summaryrefslogtreecommitdiff
path: root/scripts/gdb/linux
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/gdb/linux')
0 files changed, 0 insertions, 0 deletions
mentation/ABI/testing/sysfs-driver-panfrost-profiling?id=7eae27cd12a2d305ffad41a8e10cff3bb8c0dcb0&id2=1fd949f653ee1a3c1776ef8a5295ae072c9b67f2'>Documentation/ABI/testing/sysfs-driver-panfrost-profiling10
-rw-r--r--Documentation/ABI/testing/sysfs-firmware-opal-powercap4
-rw-r--r--Documentation/ABI/testing/sysfs-firmware-opal-psr4
-rw-r--r--Documentation/ABI/testing/sysfs-firmware-opal-sensor-groups4
-rw-r--r--Documentation/ABI/testing/sysfs-firmware-papr-energy-scale-info10
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs2
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-fadump18
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-mm-damon6
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-mm-transparent-hugepage18
-rw-r--r--Documentation/ABI/testing/sysfs-platform-asus-wmi26
-rw-r--r--Documentation/Makefile15
-rw-r--r--Documentation/PCI/msi-howto.rst2
-rw-r--r--Documentation/PCI/pci.rst2
-rw-r--r--Documentation/PCI/pcieaer-howto.rst2
-rw-r--r--Documentation/RCU/whatisRCU.rst6
-rw-r--r--Documentation/admin-guide/blockdev/zram.rst5
-rw-r--r--Documentation/admin-guide/cgroup-v1/cgroups.rst2
-rw-r--r--Documentation/admin-guide/cgroup-v1/cpusets.rst7
-rw-r--r--Documentation/admin-guide/cgroup-v1/memcg_test.rst2
-rw-r--r--Documentation/admin-guide/cgroup-v1/memory.rst8
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst42
-rw-r--r--Documentation/admin-guide/device-mapper/dm-crypt.rst5
-rw-r--r--Documentation/admin-guide/hw-vuln/core-scheduling.rst4
-rw-r--r--Documentation/admin-guide/hw-vuln/spectre.rst44
-rw-r--r--Documentation/admin-guide/hw-vuln/srso.rst2
-rw-r--r--Documentation/admin-guide/kdump/kdump.rst8
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt80
-rw-r--r--Documentation/admin-guide/media/ipu6-isys.rst161
-rw-r--r--Documentation/admin-guide/media/ipu6_isys_graph.svg548
-rw-r--r--Documentation/admin-guide/media/mgb4.rst35
-rw-r--r--Documentation/admin-guide/media/v4l-drivers.rst1
-rw-r--r--Documentation/admin-guide/mm/damon/usage.rst32
-rw-r--r--Documentation/admin-guide/mm/hugetlbpage.rst7
-rw-r--r--Documentation/admin-guide/mm/ksm.rst2
-rw-r--r--Documentation/admin-guide/mm/transhuge.rst35
-rw-r--r--Documentation/admin-guide/mm/zswap.rst33
-rw-r--r--Documentation/admin-guide/perf/hisi-pmu.rst1
-rw-r--r--Documentation/admin-guide/perf/hns3-pmu.rst8
-rw-r--r--Documentation/admin-guide/perf/qcom_l2_pmu.rst2
-rw-r--r--Documentation/admin-guide/perf/qcom_l3_pmu.rst2
-rw-r--r--Documentation/admin-guide/perf/thunderx2-pmu.rst2
-rw-r--r--Documentation/admin-guide/perf/xgene-pmu.rst2
-rw-r--r--Documentation/admin-guide/reporting-regressions.rst10
-rw-r--r--Documentation/admin-guide/sysctl/net.rst1
-rw-r--r--Documentation/admin-guide/sysctl/vm.rst16
-rw-r--r--Documentation/admin-guide/verify-bugs-and-bisect-regressions.rst597
-rw-r--r--Documentation/arch/m68k/buddha-driver.rst2
-rw-r--r--Documentation/arch/powerpc/dexcr.rst141
-rw-r--r--Documentation/arch/powerpc/firmware-assisted-dump.rst91
-rw-r--r--Documentation/arch/riscv/cmodx.rst98
-rw-r--r--Documentation/arch/riscv/hwprobe.rst4
-rw-r--r--Documentation/arch/riscv/index.rst1
-rw-r--r--Documentation/arch/s390/index.rst1
-rw-r--r--Documentation/arch/s390/mm.rst111
-rw-r--r--Documentation/arch/s390/vfio-ap.rst32
-rw-r--r--Documentation/arch/sparc/oradax/dax-hv-api.txt2
-rw-r--r--Documentation/arch/x86/resctrl.rst8
-rw-r--r--Documentation/arch/x86/xstate.rst2
-rw-r--r--Documentation/atomic_t.txt4
-rw-r--r--Documentation/bpf/standardization/instruction-set.rst114
-rw-r--r--Documentation/conf.py2
-rw-r--r--Documentation/core-api/dma-api-howto.rst24
-rw-r--r--Documentation/core-api/entry.rst2
-rw-r--r--Documentation/core-api/index.rst1
-rw-r--r--Documentation/core-api/printk-index.rst4
-rw-r--r--Documentation/core-api/swiotlb.rst321
-rw-r--r--Documentation/core-api/workqueue.rst6
-rw-r--r--Documentation/dev-tools/checkpatch.rst14
-rw-r--r--Documentation/dev-tools/kcsan.rst10
-rw-r--r--Documentation/dev-tools/kselftest.rst2
-rw-r--r--Documentation/dev-tools/testing-overview.rst2
-rw-r--r--Documentation/devicetree/bindings/Makefile36
-rw-r--r--Documentation/devicetree/bindings/access-controllers/access-controllers.yaml84
-rw-r--r--Documentation/devicetree/bindings/arm/altera/socfpga-sdram-controller.txt12
-rw-r--r--Documentation/devicetree/bindings/arm/amlogic.yaml13
-rw-r--r--Documentation/devicetree/bindings/arm/apm/scu.txt17
-rw-r--r--Documentation/devicetree/bindings/arm/aspeed/aspeed.yaml6
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm4708.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/raspberrypi,bcm2835-firmware.yaml30
-rw-r--r--Documentation/devicetree/bindings/arm/fsl.yaml22
-rw-r--r--Documentation/devicetree/bindings/arm/keystone/ti,sci.yaml5
-rw-r--r--Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt32
-rw-r--r--Documentation/devicetree/bindings/arm/qcom.yaml18
-rw-r--r--Documentation/devicetree/bindings/arm/rockchip.yaml48
-rw-r--r--Documentation/devicetree/bindings/arm/sunxi.yaml25
-rw-r--r--Documentation/devicetree/bindings/ata/ahci-da850.txt18
-rw-r--r--Documentation/devicetree/bindings/ata/fsl,imx-pata.yaml42
-rw-r--r--Documentation/devicetree/bindings/ata/imx-pata.txt16
-rw-r--r--Documentation/devicetree/bindings/ata/ti,da850-ahci.yaml39
-rw-r--r--Documentation/devicetree/bindings/bus/st,stm32-etzpc.yaml96
-rw-r--r--Documentation/devicetree/bindings/bus/st,stm32mp25-rifsc.yaml105
-rw-r--r--Documentation/devicetree/bindings/clock/airoha,en7523-scu.yaml31
-rw-r--r--Documentation/devicetree/bindings/clock/fixed-clock.yaml9
-rw-r--r--Documentation/devicetree/bindings/clock/fixed-factor-clock.yaml9
-rw-r--r--Documentation/devicetree/bindings/clock/google,gs101-clock.yaml55
-rw-r--r--Documentation/devicetree/bindings/clock/keystone-gate.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/keystone-pll.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/loongson,ls2k-clk.yaml4
-rw-r--r--Documentation/devicetree/bindings/clock/nxp,imx95-blk-ctl.yaml56
-rw-r--r--Documentation/devicetree/bindings/clock/nxp,imx95-display-master-csr.yaml64
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,hfpll.txt63
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,hfpll.yaml69
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,rzg2l-cpg.yaml18
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,s3c6400-clock.yaml57
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,s3c64xx-clock.txt76
-rw-r--r--Documentation/devicetree/bindings/clock/sophgo,cv1800-clk.yaml3
-rw-r--r--Documentation/devicetree/bindings/clock/st,stm32mp25-rcc.yaml172
-rw-r--r--Documentation/devicetree/bindings/clock/ti/adpll.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/apll.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/autoidle.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/clockdomain.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/composite.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/divider.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/dpll.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/fapll.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/fixed-factor-clock.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/gate.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/interface.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/mux.txt2
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml2
-rw-r--r--Documentation/devicetree/bindings/crypto/nvidia,tegra234-se-aes.yaml52
-rw-r--r--Documentation/devicetree/bindings/crypto/nvidia,tegra234-se-hash.yaml52
-rw-r--r--Documentation/devicetree/bindings/crypto/omap-sham.txt28
-rw-r--r--Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml1
-rw-r--r--Documentation/devicetree/bindings/crypto/st,stm32-cryp.yaml4
-rw-r--r--Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml4
-rw-r--r--Documentation/devicetree/bindings/crypto/starfive,jh7110-crypto.yaml30
-rw-r--r--Documentation/devicetree/bindings/crypto/ti,omap-sham.yaml56
-rw-r--r--Documentation/devicetree/bindings/display/atmel,lcdc-display.yaml103
-rw-r--r--Documentation/devicetree/bindings/display/atmel,lcdc.txt87
-rw-r--r--Documentation/devicetree/bindings/display/atmel,lcdc.yaml70
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml8
-rw-r--r--Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/bridge/microchip,sam9x75-lvds.yaml55
-rw-r--r--Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml39
-rw-r--r--Documentation/devicetree/bindings/display/exynos/exynos_dp.txt112
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,gamma.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/msm/dp-controller.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/msm/qcom,sm6350-mdss.yaml9
-rw-r--r--Documentation/devicetree/bindings/display/msm/qcom,sm8150-mdss.yaml9
-rw-r--r--Documentation/devicetree/bindings/display/panel/abt,y030xx067a.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/asus,z00t-tm5p5-nt35596.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/panel/boe,bf060y8m-aj0.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/boe,himax8279d.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/boe,th101mb31ig002-28a.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/elida,kd35t133.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/panel/fascontek,fs035vg158.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/panel/feixin,k101-im2ba02.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/panel/himax,hx83112a.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/panel/ilitek,ili9163.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/ilitek,ili9322.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/panel/ilitek,ili9805.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,ej030na.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,p097pfg.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/panel/jdi,lpm102a188a.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/leadtek,ltk035c5444t.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/panel/leadtek,ltk050h3146w.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/panel/lg,lg4573.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/panel/lg,sw43408.yaml62
-rw-r--r--Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/panel/nec,nl8048hl11.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/newvision,nv3051d.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/panel/novatek,nt35950.yaml7
-rw-r--r--Documentation/devicetree/bindings/display/panel/novatek,nt36523.yaml29
-rw-r--r--Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-common-dual.yaml47
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-mipi-dbi-spi.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple.yaml17
-rw-r--r--Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/raydium,rm692e5.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/raydium,rm69380.yaml89
-rw-r--r--Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/ronbo,rb070d30.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,amoled-mipi-dsi.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,ams495qa01.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,ld9040.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,lms380kf01.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,s6d27a1.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,s6d7aa0.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,s6e88a0-ams452ef01.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,s6e8aa0.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/sharp,lq101r1sx01.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/sharp,ls060t1sx01.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/sony,acx424akp.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/panel/sony,acx565akm.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/panel/sony,td4353-jdi.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/panel/sony,tulip-truly-nt35521.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/panel/synaptics,r63353.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/panel/tpo,td.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/tpo,tpg110.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip,dw-hdmi.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip,inno-hdmi.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml7
-rw-r--r--Documentation/devicetree/bindings/display/samsung/samsung,exynos5-dp.yaml163
-rw-r--r--Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.yaml11
-rw-r--r--Documentation/devicetree/bindings/dma/fsl,edma.yaml139
-rw-r--r--Documentation/devicetree/bindings/dma/fsl,imx-sdma.yaml1
-rw-r--r--Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt95
-rw-r--r--Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml42
-rw-r--r--Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml1
-rw-r--r--Documentation/devicetree/bindings/dma/st,stm32-dma.yaml4
-rw-r--r--Documentation/devicetree/bindings/dma/st,stm32-dmamux.yaml4
-rw-r--r--Documentation/devicetree/bindings/dts-coding-style.rst2
-rw-r--r--Documentation/devicetree/bindings/eeprom/at24.yaml5
-rw-r--r--Documentation/devicetree/bindings/firmware/arm,scmi.yaml54
-rw-r--r--Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.yaml3
-rw-r--r--Documentation/devicetree/bindings/gpio/microchip,mpfs-gpio.yaml17
-rw-r--r--Documentation/devicetree/bindings/gpio/raspberrypi,firmware-gpio.txt30
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml147
-rw-r--r--Documentation/devicetree/bindings/hwmon/adc128d818.txt38
-rw-r--r--Documentation/devicetree/bindings/hwmon/adi,adm1275.yaml4
-rw-r--r--Documentation/devicetree/bindings/hwmon/as370.txt11
-rw-r--r--Documentation/devicetree/bindings/hwmon/ibm,opal-sensor.yaml37
-rw-r--r--Documentation/devicetree/bindings/hwmon/ibm,p8-occ-hwmon.txt25
-rw-r--r--Documentation/devicetree/bindings/hwmon/ibmpowernv.txt23
-rw-r--r--Documentation/devicetree/bindings/hwmon/lm87.txt30
-rw-r--r--Documentation/devicetree/bindings/hwmon/max6650.txt28
-rw-r--r--Documentation/devicetree/bindings/hwmon/maxim,max6650.yaml70
-rw-r--r--Documentation/devicetree/bindings/hwmon/pmbus/adi,adp1050.yaml49
-rw-r--r--Documentation/devicetree/bindings/hwmon/pwm-fan.txt1
-rw-r--r--Documentation/devicetree/bindings/hwmon/st,stts751.yaml41
-rw-r--r--Documentation/devicetree/bindings/hwmon/stts751.txt15
-rw-r--r--Documentation/devicetree/bindings/hwmon/syna,as370.yaml32
-rw-r--r--Documentation/devicetree/bindings/hwmon/ti,adc128d818.yaml63
-rw-r--r--Documentation/devicetree/bindings/hwmon/ti,lm87.yaml69
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-pnx.txt34
-rw-r--r--Documentation/devicetree/bindings/i2c/nxp,pnx-i2c.yaml46
-rw-r--r--Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml19
-rw-r--r--Documentation/devicetree/bindings/i2c/renesas,riic.yaml19
-rw-r--r--Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml4
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml4
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml4
-rw-r--r--Documentation/devicetree/bindings/iio/dac/st,stm32-dac.yaml4
-rw-r--r--Documentation/devicetree/bindings/iio/health/maxim,max30102.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/twl4030-pwrbutton.txt21
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml8
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/mediatek,mt6577-sysirq.yaml2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/riscv,aplic.yaml172
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/riscv,imsics.yaml172
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.yaml17
-rw-r--r--Documentation/devicetree/bindings/iommu/qcom,tbu.yaml69
-rw-r--r--Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.yaml1
-rw-r--r--Documentation/devicetree/bindings/mailbox/arm,mhuv3.yaml224
-rw-r--r--Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml1
-rw-r--r--Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/amphion,vpu.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/brcm,bcm2835-unicam.yaml127
-rw-r--r--Documentation/devicetree/bindings/media/cec/st,stm32-cec.yaml4
-rw-r--r--Documentation/devicetree/bindings/media/i2c/galaxycore,gc0308.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/i2c/galaxycore,gc2145.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml35
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov8856.yaml (renamed from Documentation/devicetree/bindings/media/i2c/ov8856.yaml)2
-rw-r--r--Documentation/devicetree/bindings/media/i2c/sony,imx214.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/i2c/sony,imx290.yaml5
-rw-r--r--Documentation/devicetree/bindings/media/i2c/sony,imx415.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/nxp,imx8-isi.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/nxp,imx8-jpeg.yaml11
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sc8280xp-camss.yaml512
-rw-r--r--Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml4
-rw-r--r--Documentation/devicetree/bindings/media/st,stm32mp25-video-codec.yaml4
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/samsung,s5pv210-dmc.yaml33
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/st,stm32-fmc2-ebi.yaml4
-rw-r--r--Documentation/devicetree/bindings/mfd/actions,atc260x.yaml6
-rw-r--r--Documentation/devicetree/bindings/mfd/allwinner,sun6i-a31-prcm.yaml14
-rw-r--r--Documentation/devicetree/bindings/mfd/aspeed,ast2x00-scu.yaml16
-rw-r--r--Documentation/devicetree/bindings/mfd/brcm,cru.yaml8
-rw-r--r--Documentation/devicetree/bindings/mfd/brcm,iproc-cdru.txt16
-rw-r--r--Documentation/devicetree/bindings/mfd/brcm,iproc-mhb.txt18
-rw-r--r--Documentation/devicetree/bindings/mfd/brcm,misc.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/canaan,k210-sysctl.yaml6
-rw-r--r--Documentation/devicetree/bindings/mfd/delta,tn48m-cpld.yaml4
-rw-r--r--Documentation/devicetree/bindings/mfd/iqs62x.yaml4
-rw-r--r--Documentation/devicetree/bindings/mfd/kontron,sl28cpld.yaml10
-rw-r--r--Documentation/devicetree/bindings/mfd/lp873x.txt67
-rw-r--r--Documentation/devicetree/bindings/mfd/max77650.yaml8
-rw-r--r--Documentation/devicetree/bindings/mfd/maxim,max77686.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/maxim,max77693.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml4
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml1
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom-pm8xxx.yaml1
-rw-r--r--Documentation/devicetree/bindings/mfd/richtek,rt4831.yaml4
-rw-r--r--Documentation/devicetree/bindings/mfd/ricoh,rn5t618.yaml6
-rw-r--r--Documentation/devicetree/bindings/mfd/rockchip,rk805.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/rockchip,rk808.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/rockchip,rk816.yaml274
-rw-r--r--Documentation/devicetree/bindings/mfd/rockchip,rk817.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/rockchip,rk818.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/rohm,bd71828-pmic.yaml13
-rw-r--r--Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/rohm,bd9571mwv.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/rohm,bd9576-pmic.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/samsung,s2mpa01.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml12
-rw-r--r--Documentation/devicetree/bindings/mfd/samsung,s5m8767.yaml4
-rw-r--r--Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml4
-rw-r--r--Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml4
-rw-r--r--Documentation/devicetree/bindings/mfd/st,stmfx.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/st,stpmic1.yaml4
-rw-r--r--Documentation/devicetree/bindings/mfd/stericsson,ab8500.yaml48
-rw-r--r--Documentation/devicetree/bindings/mfd/stericsson,db8500-prcmu.yaml40
-rw-r--r--Documentation/devicetree/bindings/mfd/syscon.yaml16
-rw-r--r--Documentation/devicetree/bindings/mfd/ti,lp8732.yaml112
-rw-r--r--Documentation/devicetree/bindings/mfd/ti,tps65086.yaml4
-rw-r--r--Documentation/devicetree/bindings/mfd/ti,tps6594.yaml1
-rw-r--r--Documentation/devicetree/bindings/mfd/ti,twl.yaml72
-rw-r--r--Documentation/devicetree/bindings/mfd/x-powers,axp152.yaml2
-rw-r--r--Documentation/devicetree/bindings/mmc/arm,pl18x.yaml4
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.yaml3
-rw-r--r--Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml39
-rw-r--r--Documentation/devicetree/bindings/mtd/mtd.yaml4
-rw-r--r--Documentation/devicetree/bindings/mtd/partitions/binman.yaml53
-rw-r--r--Documentation/devicetree/bindings/mtd/partitions/partition.yaml72
-rw-r--r--Documentation/devicetree/bindings/mtd/samsung,s5pv210-onenand.yaml65
-rw-r--r--Documentation/devicetree/bindings/net/airoha,en8811h.yaml56
-rw-r--r--Documentation/devicetree/bindings/net/bluetooth/mediatek,mt7921s-bluetooth.yaml55
-rw-r--r--Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml4
-rw-r--r--Documentation/devicetree/bindings/net/broadcom-bluetooth.yaml33
-rw-r--r--Documentation/devicetree/bindings/net/can/bosch,m_can.yaml4
-rw-r--r--Documentation/devicetree/bindings/net/mediatek,net.yaml22
-rw-r--r--Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml4
-rw-r--r--Documentation/devicetree/bindings/net/pse-pd/microchip,pd692x0.yaml169
-rw-r--r--Documentation/devicetree/bindings/net/pse-pd/pse-controller.yaml101
-rw-r--r--Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml95
-rw-r--r--Documentation/devicetree/bindings/net/qcom,ethqos.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/qcom,ipq4019-mdio.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/renesas,etheravb.yaml12
-rw-r--r--Documentation/devicetree/bindings/net/renesas,ethertsn.yaml33
-rw-r--r--Documentation/devicetree/bindings/net/renesas,rzn1-gmac.yaml66
-rw-r--r--Documentation/devicetree/bindings/net/rockchip-dwmac.yaml4
-rw-r--r--Documentation/devicetree/bindings/net/sff,sfp.yaml12
-rw-r--r--Documentation/devicetree/bindings/net/snps,dwmac.yaml20
-rw-r--r--Documentation/devicetree/bindings/net/starfive,jh7110-dwmac.yaml28
-rw-r--r--Documentation/devicetree/bindings/net/stm32-dwmac.yaml11
-rw-r--r--Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/ti,icssg-prueth.yaml35
-rw-r--r--Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/wireless/brcm,bcm4329-fmac.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml6
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml3
-rw-r--r--Documentation/devicetree/bindings/opp/allwinner,sun50i-h6-operating-points.yaml87
-rw-r--r--Documentation/devicetree/bindings/pci/amlogic,axg-pcie.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/apple,pcie.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/brcm,iproc-pcie.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.yaml3
-rw-r--r--Documentation/devicetree/bindings/pci/cdns-pcie-host.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/faraday,ftpci100.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,layerscape-pcie-ep.yaml102
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,layerscape-pcie.yaml167
-rw-r--r--Documentation/devicetree/bindings/pci/host-generic-pci.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/intel,ixp4xx-pci.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/intel,keembay-pcie.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/layerscape-pci.txt79
-rw-r--r--Documentation/devicetree/bindings/pci/loongson.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/mediatek,mt7621-pcie.yaml7
-rw-r--r--Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml5
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-common.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-sm8350.yaml22
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/rcar-gen4-pci-ep.yaml4
-rw-r--r--Documentation/devicetree/bindings/pci/rcar-gen4-pci-host.yaml4
-rw-r--r--Documentation/devicetree/bindings/pci/rcar-pci-host.yaml5
-rw-r--r--Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/rockchip,rk3399-pcie.yaml3
-rw-r--r--Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/ti,am65-pci-host.yaml22
-rw-r--r--Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml5
-rw-r--r--Documentation/devicetree/bindings/pci/versatile.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml9
-rw-r--r--Documentation/devicetree/bindings/pci/xlnx,axi-pcie-host.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/xlnx,nwl-pcie.yaml4
-rw-r--r--Documentation/devicetree/bindings/pci/xlnx,xdma-host.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,sata-phy.yaml8
-rw-r--r--Documentation/devicetree/bindings/phy/fsl,imx8mp-hdmi-phy.yaml62
-rw-r--r--Documentation/devicetree/bindings/phy/mediatek,mt7988-xfi-tphy.yaml80
-rw-r--r--Documentation/devicetree/bindings/phy/phy-rockchip-usbdp.yaml148
-rw-r--r--Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.yaml4
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml28
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml19
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml4
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml5
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip,pcie3-phy.yaml10
-rw-r--r--Documentation/devicetree/bindings/phy/samsung,ufs-phy.yaml1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml113
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml38
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.yaml1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sm4450-tlmm.yaml52
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/samsung,pinctrl.yaml21
-rw-r--r--Documentation/devicetree/bindings/platform/acer,aspire1-ec.yaml60
-rw-r--r--Documentation/devicetree/bindings/power/supply/maxim,max8903.yaml2
-rw-r--r--Documentation/devicetree/bindings/pwm/atmel,at91sam-pwm.yaml3
-rw-r--r--Documentation/devicetree/bindings/pwm/google,cros-ec-pwm.yaml1
-rw-r--r--Documentation/devicetree/bindings/pwm/marvell,pxa-pwm.yaml1
-rw-r--r--Documentation/devicetree/bindings/pwm/mediatek,mt2712-pwm.yaml1
-rw-r--r--Documentation/devicetree/bindings/pwm/mediatek,pwm-disp.yaml5
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-bcm2835.yaml1
-rw-r--r--Documentation/devicetree/bindings/pwm/snps,dw-apb-timers-pwm2.yaml1
-rw-r--r--Documentation/devicetree/bindings/regulator/allwinner,sun20i-d1-system-ldos.yaml37
-rw-r--r--Documentation/devicetree/bindings/regulator/fixed-regulator.yaml7
-rw-r--r--Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml1
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,usb-vbus-regulator.yaml1
-rw-r--r--Documentation/devicetree/bindings/regulator/st,stm32-vrefbuf.yaml4
-rw-r--r--Documentation/devicetree/bindings/regulator/ti,tps62864.yaml2
-rw-r--r--Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml2
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,msm8996-mss-pil.yaml1
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,qcs404-cdsp-pil.yaml6
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sc7280-wpss-pil.yaml6
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sdm845-adsp-pil.yaml6
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,smd-edge.yaml3
-rw-r--r--Documentation/devicetree/bindings/remoteproc/ti,davinci-rproc.txt3
-rw-r--r--Documentation/devicetree/bindings/remoteproc/xlnx,zynqmp-r5fss.yaml279
-rw-r--r--Documentation/devicetree/bindings/riscv/starfive.yaml1
-rw-r--r--Documentation/devicetree/bindings/rng/microsoft,vmgenid.yaml49
-rw-r--r--Documentation/devicetree/bindings/rng/st,stm32-rng.yaml4
-rw-r--r--Documentation/devicetree/bindings/rtc/twl-rtc.txt11
-rw-r--r--Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml4
-rw-r--r--Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml2
-rw-r--r--Documentation/devicetree/bindings/serial/fsl,s32-linflexuart.yaml4
-rw-r--r--Documentation/devicetree/bindings/serial/st,stm32-uart.yaml4
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-dcfg.yaml2
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-scfg.yaml2
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml14
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,wcnss.yaml4
-rw-r--r--Documentation/devicetree/bindings/soc/renesas/renesas,r9a09g057-sys.yaml51
-rw-r--r--Documentation/devicetree/bindings/soc/renesas/renesas.yaml8
-rw-r--r--Documentation/devicetree/bindings/soc/rockchip/grf.yaml1
-rw-r--r--Documentation/devicetree/bindings/soc/samsung/samsung,exynos-sysreg.yaml2
-rw-r--r--Documentation/devicetree/bindings/soc/tegra/nvidia,tegra20-pmc.yaml6
-rw-r--r--Documentation/devicetree/bindings/sound/davinci-mcbsp.txt50
-rw-r--r--Documentation/devicetree/bindings/sound/davinci-mcbsp.yaml113
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,audmix.txt50
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,audmix.yaml83
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,esai.txt68
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,esai.yaml118
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,imx-asrc.yaml14
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,imx-audio-spdif.yaml66
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,sai.yaml6
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,spdif.yaml35
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,ssi.txt87
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,ssi.yaml194
-rw-r--r--Documentation/devicetree/bindings/sound/fsl-asoc-card.txt117
-rw-r--r--Documentation/devicetree/bindings/sound/fsl-asoc-card.yaml197
-rw-r--r--Documentation/devicetree/bindings/sound/imx-audio-spdif.txt36
-rw-r--r--Documentation/devicetree/bindings/sound/mediatek,mt2701-wm8960.yaml54
-rw-r--r--Documentation/devicetree/bindings/sound/mt2701-wm8960.txt24
-rw-r--r--Documentation/devicetree/bindings/sound/mt8186-mt6366-da7219-max98357.yaml131
-rw-r--r--Documentation/devicetree/bindings/sound/mt8186-mt6366-rt1019-rt5682s.yaml120
-rw-r--r--Documentation/devicetree/bindings/sound/mt8192-mt6359-rt1015-rt5682.yaml139
-rw-r--r--Documentation/devicetree/bindings/sound/mt8195-mt6359.yaml134
-rw-r--r--Documentation/devicetree/bindings/sound/nuvoton,nau8325.yaml80
-rw-r--r--Documentation/devicetree/bindings/sound/nuvoton,nau8821.yaml7
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra20-ac97.txt36
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra20-ac97.yaml82
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra20-das.txt12
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra20-das.yaml36
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra30-i2s.txt27
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra30-i2s.yaml67
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,sm8250.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsnd.yaml5
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip,rk3308-codec.yaml98
-rw-r--r--Documentation/devicetree/bindings/sound/rt5645.txt6
-rw-r--r--Documentation/devicetree/bindings/sound/st,stm32-i2s.yaml4
-rw-r--r--Documentation/devicetree/bindings/sound/st,stm32-sai.yaml4
-rw-r--r--Documentation/devicetree/bindings/sound/st,stm32-spdifrx.yaml4
-rw-r--r--Documentation/devicetree/bindings/sound/ti,pcm1681.txt15
-rw-r--r--Documentation/devicetree/bindings/sound/ti,pcm1681.yaml43
-rw-r--r--Documentation/devicetree/bindings/sound/ti,pcm6240.yaml177
-rw-r--r--Documentation/devicetree/bindings/sound/wlf,wm8776.yaml41
-rw-r--r--Documentation/devicetree/bindings/sound/wlf,wm8974.txt15
-rw-r--r--Documentation/devicetree/bindings/sound/wlf,wm8974.yaml41
-rw-r--r--Documentation/devicetree/bindings/sound/wm8776.txt18
-rw-r--r--Documentation/devicetree/bindings/spi/airoha,en7581-snand.yaml65
-rw-r--r--Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml8
-rw-r--r--Documentation/devicetree/bindings/spi/marvell,armada-3700-spi.yaml55
-rw-r--r--Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-armada-3700.txt25
-rw-r--r--Documentation/devicetree/bindings/spi/st,stm32-qspi.yaml4
-rw-r--r--Documentation/devicetree/bindings/spi/st,stm32-spi.yaml4
-rw-r--r--Documentation/devicetree/bindings/spi/ti,qspi.yaml96
-rw-r--r--Documentation/devicetree/bindings/spi/ti_qspi.txt53
-rw-r--r--Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml12
-rw-r--r--Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml24
-rw-r--r--Documentation/devicetree/bindings/thermal/mediatek,lvts-thermal.yaml6
-rw-r--r--Documentation/devicetree/bindings/thermal/qcom-lmh.yaml12
-rw-r--r--Documentation/devicetree/bindings/thermal/st,stih407-thermal.yaml58
-rw-r--r--Documentation/devicetree/bindings/thermal/st-thermal.txt32
-rw-r--r--Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml2
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,cmt.yaml2
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,ostm.yaml2
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,tmu.yaml1
-rw-r--r--Documentation/devicetree/bindings/tpm/ibm,vtpm.yaml4
-rw-r--r--Documentation/devicetree/bindings/tpm/tcg,tpm-tis-i2c.yaml1
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.yaml6
-rw-r--r--Documentation/devicetree/bindings/ufs/qcom,ufs.yaml38
-rw-r--r--Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml38
-rw-r--r--Documentation/devicetree/bindings/usb/dwc2.yaml4
-rw-r--r--Documentation/devicetree/bindings/usb/fsl,usbmisc.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml1
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml10
-rw-r--r--Documentation/devicetree/bindings/watchdog/aspeed,ast2400-wdt.yaml142
-rw-r--r--Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt73
-rw-r--r--Documentation/devicetree/bindings/watchdog/twl4030-wdt.txt10
-rw-r--r--Documentation/doc-guide/parse-headers.rst2
-rw-r--r--Documentation/driver-api/crypto/iaa/iaa-crypto.rst100
-rw-r--r--Documentation/driver-api/dma-buf.rst2
-rw-r--r--Documentation/driver-api/driver-model/devres.rst1
-rw-r--r--Documentation/driver-api/eisa.rst4
-rw-r--r--Documentation/driver-api/gpio/driver.rst28
-rw-r--r--Documentation/driver-api/gpio/legacy.rst16
-rw-r--r--Documentation/driver-api/media/drivers/index.rst1
-rw-r--r--Documentation/driver-api/media/drivers/ipu6.rst205
-rw-r--r--Documentation/driver-api/mtd/nand_ecc.rst2
-rw-r--r--Documentation/driver-api/scsi.rst17
-rw-r--r--Documentation/driver-api/usb/usb.rst2
-rw-r--r--Documentation/driver-api/vfio.rst2
-rw-r--r--Documentation/driver-api/virtio/writing_virtio_drivers.rst1
-rw-r--r--Documentation/driver-api/wbrf.rst2
-rw-r--r--Documentation/filesystems/api-summary.rst3
-rw-r--r--Documentation/filesystems/bcachefs/CodingStyle.rst186
-rw-r--r--Documentation/filesystems/bcachefs/index.rst12
-rw-r--r--Documentation/filesystems/buffer.rst12
-rw-r--r--Documentation/filesystems/directory-locking.rst4
-rw-r--r--Documentation/filesystems/efivarfs.rst2
-rw-r--r--Documentation/filesystems/f2fs.rst29
-rw-r--r--Documentation/filesystems/index.rst2
-rw-r--r--Documentation/filesystems/porting.rst11
-rw-r--r--Documentation/filesystems/proc.rst33
-rw-r--r--Documentation/filesystems/xfs/xfs-online-fsck-design.rst636
-rw-r--r--Documentation/firmware-guide/acpi/namespace.rst4
-rw-r--r--Documentation/gpu/amdgpu/debugging.rst80
-rw-r--r--Documentation/gpu/amdgpu/display/display-contributing.rst2
-rw-r--r--Documentation/gpu/amdgpu/index.rst1
-rw-r--r--Documentation/gpu/driver-uapi.rst5
-rw-r--r--Documentation/gpu/drm-kms.rst22
-rw-r--r--Documentation/gpu/i915.rst9
-rw-r--r--Documentation/gpu/panfrost.rst9
-rw-r--r--Documentation/gpu/rfc/i915_vm_bind.h11
-rw-r--r--Documentation/hid/hid-bpf.rst2
-rw-r--r--Documentation/hid/intel-ish-hid.rst137
-rw-r--r--Documentation/hwmon/adm1275.rst14
-rw-r--r--Documentation/hwmon/adp1050.rst64
-rw-r--r--Documentation/hwmon/aquacomputer_d5next.rst9
-rw-r--r--Documentation/hwmon/emc1403.rst17
-rw-r--r--Documentation/hwmon/index.rst2
-rw-r--r--Documentation/hwmon/lm70.rst2
-rw-r--r--Documentation/hwmon/nzxt-kraken3.rst19
-rw-r--r--Documentation/hwmon/pmbus.rst2
-rw-r--r--Documentation/hwmon/xdp710.rst83
-rw-r--r--Documentation/index.rst2
-rw-r--r--Documentation/kbuild/kconfig-language.rst3
-rw-r--r--Documentation/kbuild/llvm.rst2
-rw-r--r--Documentation/kbuild/makefiles.rst12
-rw-r--r--Documentation/litmus-tests/README45
-rw-r--r--Documentation/litmus-tests/atomic/cmpxchg-fail-ordered-1.litmus35
-rw-r--r--Documentation/litmus-tests/atomic/cmpxchg-fail-ordered-2.litmus30
-rw-r--r--Documentation/litmus-tests/atomic/cmpxchg-fail-unordered-1.litmus34
-rw-r--r--Documentation/litmus-tests/atomic/cmpxchg-fail-unordered-2.litmus30
-rw-r--r--Documentation/mm/allocation-profiling.rst100
-rw-r--r--Documentation/mm/arch_pgtable_helpers.rst6
-rw-r--r--Documentation/mm/damon/design.rst44
-rw-r--r--Documentation/mm/damon/maintainer-profile.rst13
-rw-r--r--Documentation/mm/index.rst1
-rw-r--r--Documentation/mm/page_frags.rst2
-rw-r--r--Documentation/mm/page_owner.rst73
-rw-r--r--Documentation/mm/page_table_check.rst9
-rw-r--r--Documentation/mm/slub.rst2
-rw-r--r--Documentation/mm/transhuge.rst12
-rw-r--r--Documentation/mm/vmemmap_dedup.rst22
-rw-r--r--Documentation/netlink/genetlink-c.yaml2
-rw-r--r--Documentation/netlink/genetlink-legacy.yaml2
-rw-r--r--Documentation/netlink/genetlink.yaml2
-rw-r--r--Documentation/netlink/netlink-raw.yaml2
-rw-r--r--Documentation/netlink/specs/ethtool.yaml55
-rw-r--r--Documentation/netlink/specs/netdev.yaml119
-rw-r--r--Documentation/netlink/specs/nfsd.yaml110
-rw-r--r--Documentation/netlink/specs/nftables.yaml1264
-rw-r--r--Documentation/netlink/specs/nlctrl.yaml6
-rw-r--r--Documentation/netlink/specs/rt_link.yaml489
-rw-r--r--Documentation/netlink/specs/tc.yaml72
-rw-r--r--Documentation/netlink/specs/team.yaml204
-rw-r--r--Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst11
-rw-r--r--Documentation/networking/devlink/devlink-eswitch-attr.rst76
-rw-r--r--Documentation/networking/devlink/devlink-info.rst5
-rw-r--r--Documentation/networking/devlink/devlink-port.rst33
-rw-r--r--Documentation/networking/devlink/hns3.rst5
-rw-r--r--Documentation/networking/devlink/ice.rst47
-rw-r--r--Documentation/networking/devlink/index.rst1
-rw-r--r--Documentation/networking/devlink/nfp.rst5
-rw-r--r--Documentation/networking/dns_resolver.rst4
-rw-r--r--Documentation/networking/ethtool-netlink.rst29
-rw-r--r--Documentation/networking/filter.rst4
-rw-r--r--Documentation/networking/index.rst1
-rw-r--r--Documentation/networking/nf_conntrack-sysctl.rst4
-rw-r--r--Documentation/networking/pse-pd/index.rst10
-rw-r--r--Documentation/networking/pse-pd/introduction.rst73
-rw-r--r--Documentation/networking/pse-pd/pse-pi.rst301
-rw-r--r--Documentation/networking/representors.rst1
-rw-r--r--Documentation/networking/xfrm_proc.rst6
-rw-r--r--Documentation/power/pci.rst2
-rw-r--r--Documentation/process/changes.rst11
-rw-r--r--Documentation/process/coding-style.rst23
-rw-r--r--Documentation/process/embargoed-hardware-issues.rst2
-rw-r--r--Documentation/process/handling-regressions.rst2
-rw-r--r--Documentation/process/maintainer-tip.rst18
-rw-r--r--Documentation/process/stable-kernel-rules.rst236
-rw-r--r--Documentation/rust/arch-support.rst3
-rw-r--r--Documentation/rust/general-information.rst57
-rw-r--r--Documentation/rust/testing.rst25
-rw-r--r--Documentation/scheduler/sched-domains.rst12
-rw-r--r--Documentation/scheduler/sched-stats.rst37
-rw-r--r--Documentation/scsi/scsi_mid_low_api.rst22
-rw-r--r--Documentation/security/SCTP.rst2
-rw-r--r--Documentation/security/keys/trusted-encrypted.rst53
-rw-r--r--Documentation/security/snp-tdx-threat-model.rst2
-rw-r--r--Documentation/security/tpm/index.rst2
-rw-r--r--Documentation/security/tpm/tpm-security.rst216
-rw-r--r--Documentation/security/tpm/tpm_tis.rst46
-rw-r--r--Documentation/sound/kernel-api/writing-an-alsa-driver.rst4
-rw-r--r--Documentation/sound/soc/dapm-graph.svg375
-rw-r--r--Documentation/sound/soc/dapm.rst167
-rwxr-xr-xDocumentation/sphinx/kernel_include.py1
-rw-r--r--Documentation/sphinx/kerneldoc-preamble.sty9
-rw-r--r--Documentation/spi/index.rst1
-rw-r--r--Documentation/spi/pxa2xx.rst211
-rw-r--r--Documentation/spi/spi-summary.rst5
-rw-r--r--Documentation/tee/index.rst1
-rw-r--r--Documentation/tee/ts-tee.rst71
-rw-r--r--Documentation/timers/no_hz.rst7
-rw-r--r--Documentation/tools/rtla/common_options.rst11
-rw-r--r--Documentation/tools/rtla/common_osnoise_options.rst4
-rw-r--r--Documentation/tools/rtla/common_timerlat_options.rst10
-rw-r--r--Documentation/trace/fprobetrace.rst4
-rw-r--r--Documentation/trace/ftrace.rst2
-rw-r--r--Documentation/trace/index.rst1
-rw-r--r--Documentation/trace/kprobes.rst1
-rw-r--r--Documentation/trace/kprobetrace.rst10
-rw-r--r--Documentation/trace/ring-buffer-map.rst106
-rw-r--r--Documentation/trace/tracepoints.rst2
-rw-r--r--Documentation/translations/it_IT/doc-guide/kernel-doc.rst10
-rw-r--r--Documentation/translations/it_IT/index.rst2
-rw-r--r--Documentation/translations/it_IT/process/2.Process.rst9
-rw-r--r--Documentation/translations/it_IT/process/4.Coding.rst4
-rw-r--r--Documentation/translations/it_IT/process/7.AdvancedTopics.rst19
-rw-r--r--Documentation/translations/it_IT/process/changes.rst36
-rw-r--r--Documentation/translations/it_IT/process/coding-style.rst12
-rw-r--r--Documentation/translations/it_IT/process/deprecated.rst2
-rw-r--r--Documentation/translations/it_IT/process/howto.rst6
-rw-r--r--Documentation/translations/it_IT/process/index.rst68
-rw-r--r--Documentation/translations/it_IT/process/security-bugs.rst (renamed from Documentation/translations/it_IT/admin-guide/security-bugs.rst)0
-rw-r--r--Documentation/translations/it_IT/process/stable-kernel-rules.rst2
-rw-r--r--Documentation/translations/it_IT/process/submitting-patches.rst27
-rw-r--r--Documentation/translations/ja_JP/process/howto.rst2
-rw-r--r--Documentation/translations/sp_SP/index.rst2
-rw-r--r--Documentation/translations/sp_SP/memory-barriers.txt4
-rw-r--r--Documentation/translations/sp_SP/process/1.Intro.rst302
-rw-r--r--Documentation/translations/sp_SP/process/2.Process.rst542
-rw-r--r--Documentation/translations/sp_SP/process/3.Early-stage.rst11
-rw-r--r--Documentation/translations/sp_SP/process/4.Coding.rst11
-rw-r--r--Documentation/translations/sp_SP/process/5.Posting.rst11
-rw-r--r--Documentation/translations/sp_SP/process/6.Followthrough.rst11
-rw-r--r--Documentation/translations/sp_SP/process/7.AdvancedTopics.rst11
-rw-r--r--Documentation/translations/sp_SP/process/8.Conclusion.rst11
-rw-r--r--Documentation/translations/sp_SP/process/code-of-conduct.rst2
-rw-r--r--Documentation/translations/sp_SP/process/coding-style.rst2
-rw-r--r--Documentation/translations/sp_SP/process/development-process.rst27
-rw-r--r--Documentation/translations/sp_SP/process/email-clients.rst2
-rw-r--r--Documentation/translations/sp_SP/process/howto.rst2
-rw-r--r--Documentation/translations/sp_SP/process/index.rst1
-rw-r--r--Documentation/translations/sp_SP/process/kernel-docs.rst2
-rw-r--r--Documentation/translations/sp_SP/process/kernel-enforcement-statement.rst2
-rw-r--r--Documentation/translations/sp_SP/process/magic-number.rst2
-rw-r--r--Documentation/translations/sp_SP/process/programming-language.rst2
-rw-r--r--Documentation/translations/sp_SP/process/submitting-patches.rst30
-rw-r--r--Documentation/translations/zh_CN/PCI/msi-howto.rst2
-rw-r--r--Documentation/translations/zh_CN/PCI/pci.rst2
-rw-r--r--Documentation/translations/zh_CN/core-api/cachetlb.rst2
-rw-r--r--Documentation/translations/zh_CN/core-api/workqueue.rst398
-rw-r--r--Documentation/translations/zh_CN/dev-tools/index.rst6
-rw-r--r--Documentation/translations/zh_CN/dev-tools/kcov.rst359
-rw-r--r--Documentation/translations/zh_CN/dev-tools/kmemleak.rst229
-rw-r--r--Documentation/translations/zh_CN/dev-tools/ubsan.rst91
-rw-r--r--Documentation/translations/zh_CN/driver-api/gpio/legacy.rst16
-rw-r--r--Documentation/translations/zh_CN/index.rst4
-rw-r--r--Documentation/translations/zh_CN/mm/page_frags.rst2
-rw-r--r--Documentation/translations/zh_CN/process/cve.rst89
-rw-r--r--Documentation/translations/zh_CN/process/index.rst1
-rw-r--r--Documentation/translations/zh_CN/process/submitting-patches.rst8
-rw-r--r--Documentation/translations/zh_CN/rust/arch-support.rst14
-rw-r--r--Documentation/translations/zh_CN/rust/coding-guidelines.rst12
-rw-r--r--Documentation/translations/zh_CN/rust/general-information.rst2
-rw-r--r--Documentation/translations/zh_CN/rust/quick-start.rst50
-rw-r--r--Documentation/translations/zh_CN/scheduler/sched-domains.rst10
-rw-r--r--Documentation/translations/zh_CN/scheduler/sched-stats.rst30
-rw-r--r--Documentation/translations/zh_TW/gpio.txt17
-rw-r--r--Documentation/translations/zh_TW/process/submit-checklist.rst2
-rw-r--r--Documentation/translations/zh_TW/process/submitting-patches.rst8
-rw-r--r--Documentation/userspace-api/gpio/gpio-v2-get-line-ioctl.rst2
-rw-r--r--Documentation/userspace-api/landlock.rst78
-rw-r--r--Documentation/userspace-api/media/cec/cec-func-open.rst4
-rw-r--r--Documentation/userspace-api/media/dvb/frontend_f_open.rst2
-rw-r--r--Documentation/userspace-api/media/glossary.rst12
-rw-r--r--Documentation/userspace-api/media/v4l/dev-meta.rst21
-rw-r--r--Documentation/userspace-api/media/v4l/dev-subdev.rst31
-rw-r--r--Documentation/userspace-api/media/v4l/func-open.rst4
-rw-r--r--Documentation/userspace-api/media/v4l/meta-formats.rst3
-rw-r--r--Documentation/userspace-api/media/v4l/metafmt-generic.rst340
-rw-r--r--Documentation/userspace-api/media/v4l/mmap.rst2
-rw-r--r--Documentation/userspace-api/media/v4l/pixfmt-yuv-luma.rst48
-rw-r--r--Documentation/userspace-api/media/v4l/subdev-formats.rst269
-rw-r--r--Documentation/userspace-api/media/v4l/user-func.rst1
-rw-r--r--Documentation/userspace-api/media/v4l/vidioc-enum-fmt.rst7
-rw-r--r--Documentation/userspace-api/media/v4l/vidioc-remove-bufs.rst86
-rw-r--r--Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst1
-rw-r--r--Documentation/userspace-api/media/v4l/vidioc-subdev-g-crop.rst6
-rw-r--r--Documentation/userspace-api/media/v4l/vidioc-subdev-g-routing.rst51
-rw-r--r--Documentation/userspace-api/media/videodev2.h.rst.exceptions1
-rw-r--r--Documentation/userspace-api/netlink/genetlink-legacy.rst22
-rw-r--r--Documentation/virt/kvm/api.rst19
-rw-r--r--Documentation/virt/kvm/arm/fw-pseudo-registers.rst138
-rw-r--r--Documentation/virt/kvm/arm/hypercalls.rst180
-rw-r--r--Documentation/virt/kvm/arm/index.rst1
-rw-r--r--Documentation/virt/kvm/arm/ptp_kvm.rst38
-rw-r--r--Documentation/virt/kvm/x86/amd-memory-encryption.rst101
-rw-r--r--Documentation/virt/kvm/x86/msr.rst19
-rw-r--r--Documentation/wmi/devices/msi-wmi-platform.rst194
-rw-r--r--Documentation/wmi/driver-development-guide.rst178
-rw-r--r--Documentation/wmi/index.rst1
-rw-r--r--MAINTAINERS802
-rw-r--r--Makefile46
-rw-r--r--arch/Kconfig46
-rw-r--r--arch/alpha/Kconfig175
-rw-r--r--arch/alpha/Makefile8
-rw-r--r--arch/alpha/include/asm/core_apecs.h534
-rw-r--r--arch/alpha/include/asm/core_lca.h378
-rw-r--r--arch/alpha/include/asm/core_t2.h8
-rw-r--r--arch/alpha/include/asm/dma-mapping.h4
-rw-r--r--arch/alpha/include/asm/dma.h9
-rw-r--r--arch/alpha/include/asm/elf.h4
-rw-r--r--arch/alpha/include/asm/io.h26
-rw-r--r--arch/alpha/include/asm/irq.h10
-rw-r--r--arch/alpha/include/asm/jensen.h363
-rw-r--r--arch/alpha/include/asm/machvec.h9
-rw-r--r--arch/alpha/include/asm/mmu_context.h45
-rw-r--r--arch/alpha/include/asm/special_insns.h5
-rw-r--r--arch/alpha/include/asm/tlbflush.h37
-rw-r--r--arch/alpha/include/asm/uaccess.h80
-rw-r--r--arch/alpha/include/asm/vga.h2
-rw-r--r--arch/alpha/include/uapi/asm/compiler.h18
-rw-r--r--arch/alpha/kernel/Makefile25
-rw-r--r--arch/alpha/kernel/asm-offsets.c21
-rw-r--r--arch/alpha/kernel/bugs.c1
-rw-r--r--arch/alpha/kernel/console.c1
-rw-r--r--arch/alpha/kernel/core_apecs.c420
-rw-r--r--arch/alpha/kernel/core_cia.c6
-rw-r--r--arch/alpha/kernel/core_irongate.c1
-rw-r--r--arch/alpha/kernel/core_lca.c517
-rw-r--r--arch/alpha/kernel/core_marvel.c2
-rw-r--r--arch/alpha/kernel/core_t2.c2
-rw-r--r--arch/alpha/kernel/core_wildfire.c8
-rw-r--r--arch/alpha/kernel/entry.S1
-rw-r--r--arch/alpha/kernel/io.c19
-rw-r--r--arch/alpha/kernel/irq.c1
-rw-r--r--arch/alpha/kernel/irq_i8259.c4
-rw-r--r--arch/alpha/kernel/machvec_impl.h25
-rw-r--r--arch/alpha/kernel/osf_sys.c5
-rw-r--r--arch/alpha/kernel/pci-noop.c113
-rw-r--r--arch/alpha/kernel/pci_impl.h4
-rw-r--r--arch/alpha/kernel/pci_iommu.c2
-rw-r--r--arch/alpha/kernel/perf_event.c2
-rw-r--r--arch/alpha/kernel/proto.h44
-rw-r--r--arch/alpha/kernel/setup.c109
-rw-r--r--arch/alpha/kernel/smc37c669.c6
-rw-r--r--arch/alpha/kernel/smc37c93x.c2
-rw-r--r--arch/alpha/kernel/smp.c1
-rw-r--r--arch/alpha/kernel/srmcons.c2
-rw-r--r--arch/alpha/kernel/sys_cabriolet.c87
-rw-r--r--arch/alpha/kernel/sys_eb64p.c238
-rw-r--r--arch/alpha/kernel/sys_jensen.c237
-rw-r--r--arch/alpha/kernel/sys_mikasa.c57
-rw-r--r--arch/alpha/kernel/sys_nautilus.c8
-rw-r--r--arch/alpha/kernel/sys_noritake.c60
-rw-r--r--arch/alpha/kernel/sys_sable.c294
-rw-r--r--arch/alpha/kernel/sys_sio.c486
-rw-r--r--arch/alpha/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/alpha/kernel/traps.c64
-rw-r--r--arch/alpha/lib/Makefile14
-rw-r--r--arch/alpha/lib/checksum.c2
-rw-r--r--arch/alpha/lib/fpreg.c2
-rw-r--r--arch/alpha/lib/memcpy.c3
-rw-r--r--arch/alpha/lib/stycpy.S11
-rw-r--r--arch/alpha/lib/styncpy.S11
-rw-r--r--arch/alpha/math-emu/math.c7
-rw-r--r--arch/alpha/mm/init.c2
-rw-r--r--arch/arc/Kbuild1
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/boot/Makefile4
-rw-r--r--arch/arc/boot/dts/Makefile3
-rw-r--r--arch/arc/boot/dts/axc003.dtsi4
-rw-r--r--arch/arc/boot/dts/hsdk.dts1
-rw-r--r--arch/arc/boot/dts/vdk_axs10x_mb.dtsi2
-rw-r--r--arch/arc/include/asm/cachetype.h9
-rw-r--r--arch/arc/include/asm/dsp.h2
-rw-r--r--arch/arc/include/asm/entry-compact.h10
-rw-r--r--arch/arc/include/asm/entry.h4
-rw-r--r--arch/arc/include/asm/fb.h8
-rw-r--r--arch/arc/include/asm/irq.h2
-rw-r--r--arch/arc/include/asm/irqflags-compact.h2
-rw-r--r--arch/arc/include/asm/mmu-arcv2.h2
-rw-r--r--arch/arc/include/asm/mmu_context.h2
-rw-r--r--arch/arc/include/asm/pgtable-bits-arcv2.h2
-rw-r--r--arch/arc/include/asm/ptrace.h2
-rw-r--r--arch/arc/include/asm/shmparam.h2
-rw-r--r--arch/arc/include/asm/smp.h4
-rw-r--r--arch/arc/include/asm/thread_info.h2
-rw-r--r--arch/arc/include/uapi/asm/swab.h2
-rw-r--r--arch/arc/kernel/entry-arcv2.S8
-rw-r--r--arch/arc/kernel/entry.S4
-rw-r--r--arch/arc/kernel/head.S2
-rw-r--r--arch/arc/kernel/intc-arcv2.c2
-rw-r--r--arch/arc/kernel/kprobes.c7
-rw-r--r--arch/arc/kernel/perf_event.c2
-rw-r--r--arch/arc/kernel/setup.c2
-rw-r--r--arch/arc/kernel/signal.c7
-rw-r--r--arch/arc/kernel/traps.c2
-rw-r--r--arch/arc/kernel/vmlinux.lds.S4
-rw-r--r--arch/arc/mm/dma.c3
-rw-r--r--arch/arc/mm/mmap.c4
-rw-r--r--arch/arc/mm/tlb.c4
-rw-r--r--arch/arc/mm/tlbex.S8
-rw-r--r--arch/arc/net/Makefile6
-rw-r--r--arch/arc/net/bpf_jit.h164
-rw-r--r--arch/arc/net/bpf_jit_arcv2.c3005
-rw-r--r--arch/arc/net/bpf_jit_core.c1425
-rw-r--r--arch/arm/Kbuild2
-rw-r--r--arch/arm/Kconfig25
-rw-r--r--arch/arm/boot/Makefile3
-rw-r--r--arch/arm/boot/bootp/Makefile1
-rw-r--r--arch/arm/boot/compressed/Makefile7
-rw-r--r--arch/arm/boot/dts/allwinner/Makefile1
-rw-r--r--arch/arm/boot/dts/allwinner/sun5i-a13-pocketbook-614-plus.dts218
-rw-r--r--arch/arm/boot/dts/allwinner/sun5i-a13.dtsi4
-rw-r--r--arch/arm/boot/dts/allwinner/sun5i-gr8-chip-pro.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun5i-r8-chip.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun6i-a31-hummingbird.dts4
-rw-r--r--arch/arm/boot/dts/allwinner/sun6i-a31.dtsi16
-rw-r--r--arch/arm/boot/dts/allwinner/sun6i-a31s-sinovoip-bpi-m2.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun7i-a20-bananapi-m1-plus.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun7i-a20-cubietruck.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun7i-a20-hummingbird.dts4
-rw-r--r--arch/arm/boot/dts/allwinner/sun7i-a20-olimex-som-evb-emmc.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun7i-a20-olimex-som204-evb-emmc.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun7i-a20-olimex-som204-evb.dts4
-rw-r--r--arch/arm/boot/dts/allwinner/sun7i-a20-olinuxino-lime2.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun7i-a20-wits-pro-a20-dkt.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun7i-a20.dtsi4
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-a23-a33.dtsi14
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-a23-polaroid-mid2407pxe03.dts4
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-a23-polaroid-mid2809pxe04.dts4
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-a33-ga10h-v1.1.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-a33-inet-d978-rev2.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-a33.dtsi10
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-a83t-bananapi-m3.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-a83t-cubietruck-plus.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-a83t-tbs-a711.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-a83t.dtsi8
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-h2-plus-bananapi-m2-zero.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-h2-plus-orangepi-r1.dts5
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-h2-plus-orangepi-zero.dts6
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-h3-beelink-x2.dts4
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-h3-nanopi-duo2.dts4
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-h3-nanopi-m1-plus.dts4
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-h3-nanopi-neo-air.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-h3-nanopi-r1.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-h3-orangepi-2.dts4
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-h3-orangepi-lite.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-h3-orangepi-pc-plus.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-h3-orangepi-zero-plus2.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-q8-common.dtsi4
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-r16-bananapi-m2m.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-r16-parrot.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-r40-bananapi-m2-ultra.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-r40-oka40i-c.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-s3-pinecube.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-v3s.dtsi4
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-v40-bananapi-m2-berry.dts2
-rw-r--r--arch/arm/boot/dts/allwinner/sun9i-a80.dtsi4
-rw-r--r--arch/arm/boot/dts/allwinner/sunxi-bananapi-m2-plus.dtsi2
-rw-r--r--arch/arm/boot/dts/allwinner/sunxi-h3-h5-emlid-neutis.dtsi2
-rw-r--r--arch/arm/boot/dts/allwinner/sunxi-h3-h5.dtsi4
-rw-r--r--arch/arm/boot/dts/aspeed/Makefile9
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtmitchell.dts1
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-e3c246d4i.dts9
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-e3c256d4i.dts322
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-romed8hm3.dts13
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-spc621d8hm3.dts324
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-x570d4u.dts360
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-asus-x4tf.dts581
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-delta-ahe50dc.dts4
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-cloudripper.dts544
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-greatlakes.dts4
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-harma.dts648
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-minerva-cmc.dts265
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-minerva.dts543
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-yosemite4.dts15
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-yosemitev2.dts2
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-bonnell.dts8
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-system1.dts1623
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-g6.dtsi4
-rw-r--r--arch/arm/boot/dts/aspeed/ibm-power10-dual.dtsi2
-rw-r--r--arch/arm/boot/dts/broadcom/Makefile2
-rw-r--r--arch/arm/boot/dts/broadcom/bcm2711-rpi-4-b.dts29
-rw-r--r--arch/arm/boot/dts/broadcom/bcm2711-rpi-400.dts1
-rw-r--r--arch/arm/boot/dts/broadcom/bcm2711-rpi-cm4-io.dts33
-rw-r--r--arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi34
-rw-r--r--arch/arm/boot/dts/broadcom/bcm2711.dtsi12
-rw-r--r--arch/arm/boot/dts/broadcom/bcm2835-rpi-common.dtsi7
-rw-r--r--arch/arm/boot/dts/broadcom/bcm2835-rpi.dtsi23
-rw-r--r--arch/arm/boot/dts/broadcom/bcm283x.dtsi24
-rw-r--r--arch/arm/boot/dts/broadcom/bcm4709-asus-rt-ac3200.dts150
-rw-r--r--arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac3100.dts13
-rw-r--r--arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac3100.dtsi133
-rw-r--r--arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac5300.dts156
-rw-r--r--arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac88u.dts65
-rw-r--r--arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts8
-rw-r--r--arch/arm/boot/dts/microchip/at91-sama7g5ek.dts8
-rw-r--r--arch/arm/boot/dts/nvidia/tegra20-colibri.dtsi4
-rw-r--r--arch/arm/boot/dts/nvidia/tegra20-paz00.dts43
-rw-r--r--arch/arm/boot/dts/nxp/imx/Makefile3
-rw-r--r--arch/arm/boot/dts/nxp/imx/e60k02.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/e70k02.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx27-phytec-phycard-s-som.dtsi78
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx51-ts4800.dts3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx53-kp-ddc.dts2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx53-kp.dtsi10
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx53-m53evk.dts3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx53-mba53.dts2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx53-ppd.dts6
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx53-tqma53.dtsi8
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6dl-aristainetos_4.dts3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6dl-aristainetos_7.dts3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6dl-mamoj.dts3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6q-ba16.dtsi3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6q-bosch-acc.dts10
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6q-kp.dtsi6
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6q-novena.dts3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6q-pistachio.dts3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6q-prti6q.dts3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6q-var-dt6customboard.dts3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-apf6dev.dtsi3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-aristainetos2.dtsi3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-cubox-i.dtsi3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-emcon.dtsi9
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-gw52xx.dtsi3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-gw53xx.dtsi3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-gw54xx.dtsi3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-gw560x.dtsi3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-gw5903.dtsi3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-gw5904.dtsi3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-icore.dtsi3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-mba6.dtsi3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-nit6xlite.dtsi6
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-nitrogen6_max.dtsi9
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-nitrogen6_som2.dtsi6
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-nitrogen6x.dtsi6
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-mira.dtsi3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-sabreauto.dtsi7
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-sabrelite.dtsi9
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-sabresd.dtsi3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-savageboard.dtsi3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-skov-cpu.dtsi1
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-udoo.dtsi25
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl.dtsi3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6sl-evk.dts4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6sl-tolino-shine2hd.dts4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6sll-evk.dts4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6sll.dtsi1
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6sx-nitrogen6sx.dts6
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6sx-sdb.dtsi8
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6sx-softing-vining-2000.dts12
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6sx.dtsi7
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-14x14-evk.dtsi3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsbcpro.dts3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6uldev.dtsi3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-isiot.dtsi3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-43.dts3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-common.dtsi3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-pico.dtsi3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board-emmc.dts19
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board-nand.dts19
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board.dtsi424
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi.dtsi155
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-common.dtsi1
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-master.dts4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-slave.dts2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-slavext.dts2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ull-uti260b.dts566
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7s-warp.dts1
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7s.dtsi9
-rw-r--r--arch/arm/boot/dts/qcom/Makefile3
-rw-r--r--arch/arm/boot/dts/qcom/msm8226-motorola-falcon.dts359
-rw-r--r--arch/arm/boot/dts/qcom/qcom-apq8064.dtsi12
-rw-r--r--arch/arm/boot/dts/qcom/qcom-apq8084.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/qcom-ipq4019.dtsi45
-rw-r--r--arch/arm/boot/dts/qcom/qcom-ipq8064.dtsi30
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8916-smp.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8974.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-klte-common.dtsi818
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-klte.dts813
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-kltechn.dts16
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-castor.dts574
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-common.dtsi539
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-leo.dts44
-rw-r--r--arch/arm/boot/dts/qcom/qcom-sdx55.dtsi10
-rw-r--r--arch/arm/boot/dts/renesas/r7s72100.dtsi8
-rw-r--r--arch/arm/boot/dts/renesas/r8a73a4.dtsi37
-rw-r--r--arch/arm/boot/dts/renesas/r8a7742.dtsi58
-rw-r--r--arch/arm/boot/dts/renesas/r8a7743.dtsi58
-rw-r--r--arch/arm/boot/dts/renesas/r8a7744.dtsi58
-rw-r--r--arch/arm/boot/dts/renesas/r8a7745.dtsi58
-rw-r--r--arch/arm/boot/dts/renesas/r8a77470.dtsi44
-rw-r--r--arch/arm/boot/dts/renesas/r8a7790.dtsi58
-rw-r--r--arch/arm/boot/dts/renesas/r8a7791.dtsi58
-rw-r--r--arch/arm/boot/dts/renesas/r8a7792.dtsi59
-rw-r--r--arch/arm/boot/dts/renesas/r8a7793.dtsi58
-rw-r--r--arch/arm/boot/dts/renesas/r8a7794.dtsi58
-rw-r--r--arch/arm/boot/dts/renesas/r9a06g032.dtsi1
-rw-r--r--arch/arm/boot/dts/samsung/exynos3250.dtsi2
-rw-r--r--arch/arm/boot/dts/samsung/exynos4.dtsi3
-rw-r--r--arch/arm/boot/dts/samsung/exynos4210-smdkv310.dts2
-rw-r--r--arch/arm/boot/dts/samsung/exynos4212-tab3.dtsi6
-rw-r--r--arch/arm/boot/dts/samsung/exynos4412-origen.dts2
-rw-r--r--arch/arm/boot/dts/samsung/exynos4412-smdk4412.dts12
-rw-r--r--arch/arm/boot/dts/samsung/exynos5250.dtsi3
-rw-r--r--arch/arm/boot/dts/samsung/exynos5420.dtsi3
-rw-r--r--arch/arm/boot/dts/samsung/exynos5800-peach-pi.dts2
-rw-r--r--arch/arm/boot/dts/samsung/s5pv210.dtsi6
-rw-r--r--arch/arm/boot/dts/st/stm32f746.dtsi17
-rw-r--r--arch/arm/boot/dts/st/stm32f769.dtsi17
-rw-r--r--arch/arm/boot/dts/st/stm32mp13-pinctrl.dtsi57
-rw-r--r--arch/arm/boot/dts/st/stm32mp131.dtsi1162
-rw-r--r--arch/arm/boot/dts/st/stm32mp133.dtsi51
-rw-r--r--arch/arm/boot/dts/st/stm32mp135.dtsi11
-rw-r--r--arch/arm/boot/dts/st/stm32mp135f-dk.dts53
-rw-r--r--arch/arm/boot/dts/st/stm32mp13xc.dtsi19
-rw-r--r--arch/arm/boot/dts/st/stm32mp13xf.dtsi19
-rw-r--r--arch/arm/boot/dts/st/stm32mp151.dtsi2801
-rw-r--r--arch/arm/boot/dts/st/stm32mp153.dtsi52
-rw-r--r--arch/arm/boot/dts/st/stm32mp157c-ed1.dts12
-rw-r--r--arch/arm/boot/dts/st/stm32mp15xc.dtsi19
-rw-r--r--arch/arm/boot/dts/ti/keystone/keystone-k2g.dtsi5
-rw-r--r--arch/arm/boot/dts/ti/omap/am33xx.dtsi8
-rw-r--r--arch/arm/boot/dts/ti/omap/am4372.dtsi2
-rw-r--r--arch/arm/boot/dts/ti/omap/dra76x.dtsi63
-rw-r--r--arch/arm/boot/dts/ti/omap/dra7xx-clocks.dtsi270
-rw-r--r--arch/arm/boot/dts/ti/omap/omap3-n900.dts2
-rw-r--r--arch/arm/configs/collie_defconfig2
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig1
-rw-r--r--arch/arm/configs/keystone_defconfig2
-rw-r--r--arch/arm/configs/lpc18xx_defconfig2
-rw-r--r--arch/arm/configs/moxart_defconfig2
-rw-r--r--arch/arm/configs/mps2_defconfig2
-rw-r--r--arch/arm/configs/omap1_defconfig2
-rw-r--r--arch/arm/configs/shmobile_defconfig3
-rw-r--r--arch/arm/configs/stm32_defconfig2
-rw-r--r--arch/arm/configs/sunxi_defconfig1
-rw-r--r--arch/arm/include/asm/assembler.h1
-rw-r--r--arch/arm/include/asm/fb.h6
-rw-r--r--arch/arm/include/asm/glue-cache.h28
-rw-r--r--arch/arm/include/asm/hugetlb.h6
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h1
-rw-r--r--arch/arm/include/asm/mman.h14
-rw-r--r--arch/arm/include/asm/pgtable-2level.h5
-rw-r--r--arch/arm/include/asm/pgtable-3level-hwdef.h27
-rw-r--r--arch/arm/include/asm/pgtable-3level.h5
-rw-r--r--arch/arm/include/asm/proc-fns.h12
-rw-r--r--arch/arm/include/asm/ptrace.h1
-rw-r--r--arch/arm/include/asm/topology.h6
-rw-r--r--arch/arm/include/asm/uaccess-asm.h58
-rw-r--r--arch/arm/include/asm/uaccess.h45
-rw-r--r--arch/arm/kernel/asm-offsets.c1
-rw-r--r--arch/arm/kernel/entry-ftrace.S4
-rw-r--r--arch/arm/kernel/hw_breakpoint.c43
-rw-r--r--arch/arm/kernel/irq.c1
-rw-r--r--arch/arm/kernel/module.c34
-rw-r--r--arch/arm/kernel/sleep.S4
-rw-r--r--arch/arm/kernel/suspend.c8
-rw-r--r--arch/arm/kernel/topology.c2
-rw-r--r--arch/arm/kernel/traps.c1
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S20
-rw-r--r--arch/arm/lib/delay-loop.S16
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c4
-rw-r--r--arch/arm/mach-imx/mmdc.c1
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c23
-rw-r--r--arch/arm/mach-orion5x/board-d2net.c16
-rw-r--r--arch/arm/mach-orion5x/dns323-setup.c63
-rw-r--r--arch/arm/mach-orion5x/mv2120-setup.c29
-rw-r--r--arch/arm/mach-orion5x/net2big-setup.c21
-rw-r--r--arch/arm/mach-orion5x/ts409-setup.c25
-rw-r--r--arch/arm/mach-pxa/devices.c18
-rw-r--r--arch/arm/mach-pxa/spitz.c35
-rw-r--r--arch/arm/mach-pxa/spitz_pm.c22
-rw-r--r--arch/arm/mach-s3c/Makefile2
-rw-r--r--arch/arm/mach-sa1100/h3600.c47
-rw-r--r--arch/arm/mach-stm32/Kconfig1
-rw-r--r--arch/arm/mm/Makefile4
-rw-r--r--arch/arm/mm/cache-b15-rac.c1
-rw-r--r--arch/arm/mm/cache-fa.S47
-rw-r--r--arch/arm/mm/cache-nop.S61
-rw-r--r--arch/arm/mm/cache-v4.S55
-rw-r--r--arch/arm/mm/cache-v4wb.S49
-rw-r--r--arch/arm/mm/cache-v4wt.S55
-rw-r--r--arch/arm/mm/cache-v6.S51
-rw-r--r--arch/arm/mm/cache-v7.S76
-rw-r--r--arch/arm/mm/cache-v7m.S55
-rw-r--r--arch/arm/mm/cache.c663
-rw-r--r--arch/arm/mm/dma-mapping-nommu.c3
-rw-r--r--arch/arm/mm/dma-mapping.c16
-rw-r--r--arch/arm/mm/fault.c61
-rw-r--r--arch/arm/mm/hugetlbpage.c34
-rw-r--r--arch/arm/mm/init.c45
-rw-r--r--arch/arm/mm/mmap.c5
-rw-r--r--arch/arm/mm/mmu.c7
-rw-r--r--arch/arm/mm/proc-arm1020.S69
-rw-r--r--arch/arm/mm/proc-arm1020e.S70
-rw-r--r--arch/arm/mm/proc-arm1022.S69
-rw-r--r--arch/arm/mm/proc-arm1026.S70
-rw-r--r--arch/arm/mm/proc-arm720.S25
-rw-r--r--arch/arm/mm/proc-arm740.S26
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S34
-rw-r--r--arch/arm/mm/proc-arm920.S76
-rw-r--r--arch/arm/mm/proc-arm922.S69
-rw-r--r--arch/arm/mm/proc-arm925.S66
-rw-r--r--arch/arm/mm/proc-arm926.S75
-rw-r--r--arch/arm/mm/proc-arm940.S69
-rw-r--r--arch/arm/mm/proc-arm946.S65
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S26
-rw-r--r--arch/arm/mm/proc-fa526.S24
-rw-r--r--arch/arm/mm/proc-feroceon.S105
-rw-r--r--arch/arm/mm/proc-macros.S33
-rw-r--r--arch/arm/mm/proc-mohawk.S74
-rw-r--r--arch/arm/mm/proc-sa110.S23
-rw-r--r--arch/arm/mm/proc-sa1100.S31
-rw-r--r--arch/arm/mm/proc-v6.S31
-rw-r--r--arch/arm/mm/proc-v7-2level.S8
-rw-r--r--arch/arm/mm/proc-v7-3level.S8
-rw-r--r--arch/arm/mm/proc-v7.S66
-rw-r--r--arch/arm/mm/proc-v7m.S41
-rw-r--r--arch/arm/mm/proc-xsc3.S75
-rw-r--r--arch/arm/mm/proc-xscale.S125
-rw-r--r--arch/arm/mm/proc.c500
-rw-r--r--arch/arm/mm/tlb-fa.S12
-rw-r--r--arch/arm/mm/tlb-v4.S15
-rw-r--r--arch/arm/mm/tlb-v4wb.S12
-rw-r--r--arch/arm/mm/tlb-v4wbi.S12
-rw-r--r--arch/arm/mm/tlb-v6.S12
-rw-r--r--arch/arm/mm/tlb-v7.S14
-rw-r--r--arch/arm/mm/tlb.c84
-rw-r--r--arch/arm/net/bpf_jit_32.c81
-rw-r--r--arch/arm/plat-orion/Makefile2
-rw-r--r--arch/arm/tools/Makefile2
-rw-r--r--arch/arm/vdso/Makefile9
-rw-r--r--arch/arm64/Kconfig6
-rw-r--r--arch/arm64/Kconfig.platforms9
-rw-r--r--arch/arm64/Makefile11
-rw-r--r--arch/arm64/boot/.gitignore1
-rw-r--r--arch/arm64/boot/Makefile6
-rw-r--r--arch/arm64/boot/dts/actions/s700-cubieboard7.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/Makefile4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi16
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi18
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h313-tanix-tx1.dts183
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-r1s-h5.dts4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-lite2.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64-model-b.dts6
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h616-bigtreetech-cb1.dtsi5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h616-cpu-opp.dtsi115
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero2.dts5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h616-x96-mate.dts5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h616.dtsi19
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h618-longan-module-3h.dtsi5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero2w.dts5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero3.dts5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h618-transpeed-8k618-t.dts7
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h64-remix-mini-pc.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts327
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-h.dts36
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-plus.dts53
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi2
-rw-r--r--arch/arm64/boot/dts/amazon/alpine-v2.dtsi35
-rw-r--r--arch/arm64/boot/dts/amazon/alpine-v3.dtsi12
-rw-r--r--arch/arm64/boot/dts/amd/elba-16core.dtsi2
-rw-r--r--arch/arm64/boot/dts/amd/elba-asic-common.dtsi2
-rw-r--r--arch/arm64/boot/dts/amd/elba-asic.dts2
-rw-r--r--arch/arm64/boot/dts/amd/elba-flash-parts.dtsi2
-rw-r--r--arch/arm64/boot/dts/amd/elba.dtsi2
-rw-r--r--arch/arm64/boot/dts/amlogic/Makefile7
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-a4-a113l2-ba400.dts42
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-a4-common.dtsi66
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-a4.dtsi40
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-a5-a113x2-av400.dts42
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-a5.dtsi40
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-t7-reset.h197
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-t7.dtsi7
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi70
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4-mnt-reform2.dts384
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-khadas-vim3-ts050.dtso108
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-s4.dtsi13
-rw-r--r--arch/arm64/boot/dts/apm/apm-merlin.dts2
-rw-r--r--arch/arm64/boot/dts/apm/apm-mustang.dts2
-rw-r--r--arch/arm64/boot/dts/apm/apm-shadowcat.dtsi14
-rw-r--r--arch/arm64/boot/dts/apm/apm-storm.dtsi15
-rw-r--r--arch/arm64/boot/dts/arm/juno-base.dtsi4
-rw-r--r--arch/arm64/boot/dts/arm/juno-scmi.dtsi4
-rw-r--r--arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts2
-rw-r--r--arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi1
-rw-r--r--arch/arm64/boot/dts/broadcom/bcmbca/bcm94908.dts1
-rw-r--r--arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts2
-rw-r--r--arch/arm64/boot/dts/broadcom/northstar2/ns2-xmc.dts2
-rw-r--r--arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi2
-rw-r--r--arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi2
-rw-r--r--arch/arm64/boot/dts/cavium/thunder-88xx.dtsi22
-rw-r--r--arch/arm64/boot/dts/cavium/thunder2-99xx.dts2
-rw-r--r--arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi3
-rw-r--r--arch/arm64/boot/dts/exynos/exynos5433.dtsi5
-rw-r--r--arch/arm64/boot/dts/exynos/exynos850.dtsi26
-rw-r--r--arch/arm64/boot/dts/exynos/exynosautov9.dtsi12
-rw-r--r--arch/arm64/boot/dts/exynos/google/gs101-oriole.dts46
-rw-r--r--arch/arm64/boot/dts/exynos/google/gs101.dtsi919
-rw-r--r--arch/arm64/boot/dts/freescale/Makefile7
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3-ads2.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3.dts18
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi20
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi7
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi7
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi5
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi108
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2162a-clearfog.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2162a-sr-som.dtsi9
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-audio.dtsi285
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-cm40.dtsi91
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi16
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi40
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-img.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi16
-rw-r--r--arch/arm64/boot/dts/freescale/imx8dx-colibri-aster.dts16
-rw-r--r--arch/arm64/boot/dts/freescale/imx8dx-colibri-eval-v3.dts16
-rw-r--r--arch/arm64/boot/dts/freescale/imx8dx-colibri-iris-v2.dts16
-rw-r--r--arch/arm64/boot/dts/freescale/imx8dx-colibri-iris.dts16
-rw-r--r--arch/arm64/boot/dts/freescale/imx8dx-colibri.dtsi11
-rw-r--r--arch/arm64/boot/dts/freescale/imx8dx.dtsi13
-rw-r--r--arch/arm64/boot/dts/freescale/imx8dxl-evk.dts37
-rw-r--r--arch/arm64/boot/dts/freescale/imx8dxl.dtsi15
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi19
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-var-som-symphony.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi40
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-verdin-dev.dtsi7
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-verdin-yavia.dtsi5
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi26
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm.dtsi9
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-ddr3l-evk.dts16
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts16
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-evk.dts16
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi45
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-var-som-symphony.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn.dtsi8
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-debix-som-a-bmb-08.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-dhcom-pdk3.dts10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-evk.dts45
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-msc-sm2s.dtsi46
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-navqp.dts424
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts38
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-venice-gw71xx.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi38
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi38
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx-imx219.dtso13
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts6
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi56
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi12
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-verdin-mallow.dtsi5
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-verdin-yavia.dtsi10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi53
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp.dtsi170
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-hummingboard-pulse.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq.dtsi7
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm-mek.dts86
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi8
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp-mek.dts140
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8ulp-evk.dts84
-rw-r--r--arch/arm64/boot/dts/freescale/imx8ulp.dtsi94
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts397
-rw-r--r--arch/arm64/boot/dts/freescale/imx93.dtsi140
-rw-r--r--arch/arm64/boot/dts/freescale/mba8mx.dtsi14
-rw-r--r--arch/arm64/boot/dts/freescale/s32g2.dtsi37
-rw-r--r--arch/arm64/boot/dts/freescale/s32g274a-evb.dts6
-rw-r--r--arch/arm64/boot/dts/freescale/s32g274a-rdb2.dts6
-rw-r--r--arch/arm64/boot/dts/freescale/s32g3.dtsi233
-rw-r--r--arch/arm64/boot/dts/freescale/s32g399a-rdb3.dts45
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi43
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts7
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220.dtsi2
-rw-r--r--arch/arm64/boot/dts/hisilicon/hip05-d02.dts4
-rw-r--r--arch/arm64/boot/dts/hisilicon/hip05.dtsi12
-rw-r--r--arch/arm64/boot/dts/hisilicon/hip06.dtsi88
-rw-r--r--arch/arm64/boot/dts/hisilicon/hip07.dtsi84
-rw-r--r--arch/arm64/boot/dts/intel/keembay-soc.dtsi2
-rw-r--r--arch/arm64/boot/dts/intel/socfpga_agilex.dtsi2
-rw-r--r--arch/arm64/boot/dts/lg/lg1312-ref.dts2
-rw-r--r--arch/arm64/boot/dts/lg/lg1313-ref.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-eDPU.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-espressobin-ultra.dts98
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-37xx.dtsi2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap80x.dtsi2
-rw-r--r--arch/arm64/boot/dts/marvell/cn9130-crb.dtsi13
-rw-r--r--arch/arm64/boot/dts/marvell/cn9130-db.dtsi18
-rw-r--r--arch/arm64/boot/dts/marvell/cn9131-db.dtsi8
-rw-r--r--arch/arm64/boot/dts/marvell/cn9132-db.dtsi12
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712-evb.dts8
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712e.dtsi3
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622.dtsi34
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts6
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986a.dtsi8
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-pico6.dts3
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183.dtsi1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi6
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8192.dtsi1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi36
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195.dtsi5
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8516.dtsi2
-rw-r--r--arch/arm64/boot/dts/microchip/sparx5.dtsi4
-rw-r--r--arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi149
-rw-r--r--arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi35
-rw-r--r--arch/arm64/boot/dts/nuvoton/nuvoton-npcm845-evb.dts2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra132-norrin.dts4
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra132.dtsi2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-smaug.dts2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210.dtsi2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra234.dtsi16
-rw-r--r--arch/arm64/boot/dts/qcom/Makefile1
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc.dts12
-rw-r--r--arch/arm64/boot/dts/qcom/ipq6018.dtsi10
-rw-r--r--arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/ipq8074.dtsi41
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-mtp.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi6
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-e2015-common.dtsi6
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-fortuna-common.dtsi83
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-rossa-common.dtsi12
-rw-r--r--arch/arm64/boot/dts/qcom/msm8939-samsung-a7.dts6
-rw-r--r--arch/arm64/boot/dts/qcom/msm8953.dtsi14
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi30
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998-sony-xperia-yoshino.dtsi67
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998.dtsi10
-rw-r--r--arch/arm64/boot/dts/qcom/pm6150.dtsi8
-rw-r--r--arch/arm64/boot/dts/qcom/pm6150l.dtsi10
-rw-r--r--arch/arm64/boot/dts/qcom/qcm2290.dtsi58
-rw-r--r--arch/arm64/boot/dts/qcom/qcm6490-fairphone-fp5.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/qcm6490-idp.dts189
-rw-r--r--arch/arm64/boot/dts/qcom/qcs404-evb.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/qcs404.dtsi10
-rw-r--r--arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts251
-rw-r--r--arch/arm64/boot/dts/qcom/qdu1000.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/qrb2210-rb1.dts61
-rw-r--r--arch/arm64/boot/dts/qcom/qrb4210-rb2.dts1
-rw-r--r--arch/arm64/boot/dts/qcom/sa8155p-adp.dts32
-rw-r--r--arch/arm64/boot/dts/qcom/sa8775p.dtsi20
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-acer-aspire1.dts40
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180.dtsi10
-rw-r--r--arch/arm64/boot/dts/qcom/sc7280.dtsi28
-rw-r--r--arch/arm64/boot/dts/qcom/sc8180x-lenovo-flex-5g.dts59
-rw-r--r--arch/arm64/boot/dts/qcom/sc8180x.dtsi55
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts107
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp.dtsi198
-rw-r--r--arch/arm64/boot/dts/qcom/sdm630-sony-xperia-nile.dtsi9
-rw-r--r--arch/arm64/boot/dts/qcom/sdm632-fairphone-fp3.dts31
-rw-r--r--arch/arm64/boot/dts/qcom/sdm670-google-sargo.dts64
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-db845c.dts6
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845.dtsi20
-rw-r--r--arch/arm64/boot/dts/qcom/sdx75.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sm6350.dtsi123
-rw-r--r--arch/arm64/boot/dts/qcom/sm6375.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150-hdk.dts16
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150.dtsi20
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-common.dtsi11
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250.dtsi36
-rw-r--r--arch/arm64/boot/dts/qcom/sm8350-hdk.dts1
-rw-r--r--arch/arm64/boot/dts/qcom/sm8350.dtsi26
-rw-r--r--arch/arm64/boot/dts/qcom/sm8450-hdk.dts1
-rw-r--r--arch/arm64/boot/dts/qcom/sm8450-qrd.dts8
-rw-r--r--arch/arm64/boot/dts/qcom/sm8450.dtsi39
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550-sony-xperia-yodo-pdx234.dts779
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550.dtsi52
-rw-r--r--arch/arm64/boot/dts/qcom/sm8650-mtp.dts4
-rw-r--r--arch/arm64/boot/dts/qcom/sm8650-qrd.dts12
-rw-r--r--arch/arm64/boot/dts/qcom/sm8650.dtsi265
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-crd.dts47
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi51
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-qcp.dts27
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100.dtsi50
-rw-r--r--arch/arm64/boot/dts/realtek/rtd129x.dtsi2
-rw-r--r--arch/arm64/boot/dts/realtek/rtd139x.dtsi2
-rw-r--r--arch/arm64/boot/dts/realtek/rtd16xx.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/Makefile3
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77970-eagle-function-expansion.dtso214
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779f4-s4sk.dts6
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts27
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779h0.dtsi537
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g043.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g043u.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/rzg2ul-smarc.dtsi58
-rw-r--r--arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi6
-rw-r--r--arch/arm64/boot/dts/rockchip/Makefile7
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3308.dtsi56
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3326-gameforce-chi.dts809
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi37
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-r88.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi53
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353p.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353ps.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353v.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353vs.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg503.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-anbernic-rgxx3.dtsi5
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-powkiddy-rgb30.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-powkiddy-rk2023.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-powkiddy-rk2023.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-powkiddy-x55.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-rock-3c.dts726
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-soquartz-blade.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-soquartz-cm4.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-soquartz-model-a.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-lubancat-2.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-mecsbc.dts404
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts16
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-io-expander.dtso137
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5.dts528
-rw-r--r--arch/arm64/boot/dts/rockchip/rk356x.dtsi41
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-armsom-sige7.dts721
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi11
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-common.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-io.dtsi8
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts161
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-fet3588-c.dtsi558
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts66
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-ok3588-c.dts409
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts17
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts24
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-tiger-haikou.dts65
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi29
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588.dtsi72
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts7
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts73
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-khadas-edge2.dts685
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-orangepi-5.dts101
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts22
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s.dtsi377
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts4
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts4
-rw-r--r--arch/arm64/boot/dts/sprd/sc9860.dtsi58
-rw-r--r--arch/arm64/boot/dts/sprd/sc9863a.dtsi2
-rw-r--r--arch/arm64/boot/dts/sprd/sharkl3.dtsi18
-rw-r--r--arch/arm64/boot/dts/sprd/sp9860g-1h10.dts30
-rw-r--r--arch/arm64/boot/dts/sprd/whale2.dtsi2
-rw-r--r--arch/arm64/boot/dts/st/stm32mp25-pinctrl.dtsi82
-rw-r--r--arch/arm64/boot/dts/st/stm32mp251.dtsi535
-rw-r--r--arch/arm64/boot/dts/st/stm32mp253.dtsi7
-rw-r--r--arch/arm64/boot/dts/st/stm32mp255.dtsi33
-rw-r--r--arch/arm64/boot/dts/st/stm32mp257f-ev1.dts34
-rw-r--r--arch/arm64/boot/dts/synaptics/berlin4ct.dtsi2
-rw-r--r--arch/arm64/boot/dts/tesla/fsd.dtsi2
-rw-r--r--arch/arm64/boot/dts/ti/Makefile11
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-lp-sk.dts1
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-main.dtsi10
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi32
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-verdin-dev.dtsi8
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-verdin-mallow.dtsi22
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-verdin-yavia.dtsi8
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi47
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-wakeup.dtsi10
-rw-r--r--arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts35
-rw-r--r--arch/arm64/boot/dts/ti/k3-am625-phyboard-lyra-rdk.dts127
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62a-main.dtsi23
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62a-wakeup.dtsi10
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62a7-sk.dts21
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p-main.dtsi55
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p-wakeup.dtsi10
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p5-sk.dts72
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-evm.dts1
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-gpio-fan.dtso50
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts6
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-sk.dts1
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-iot2050-common-pg1.dtsi32
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-main.dtsi56
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi7
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi1
-rw-r--r--arch/arm64/boot/dts/ti/k3-am69-sk.dts12
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-main.dtsi10
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi8
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-main.dtsi10
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi10
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi12
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi2
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721s2.dtsi1
-rw-r--r--arch/arm64/boot/dts/ti/k3-j722s-evm.dts13
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4-evm.dts8
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi12
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi9
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4.dtsi1
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi2
-rw-r--r--arch/arm64/configs/defconfig23
-rw-r--r--arch/arm64/configs/hardening.config1
-rw-r--r--arch/arm64/crypto/aes-ce.S34
-rw-r--r--arch/arm64/crypto/aes-neon.S20
-rw-r--r--arch/arm64/include/asm/assembler.h13
-rw-r--r--arch/arm64/include/asm/cputype.h2
-rw-r--r--arch/arm64/include/asm/el2_setup.h9
-rw-r--r--arch/arm64/include/asm/esr.h12
-rw-r--r--arch/arm64/include/asm/fb.h10
-rw-r--r--arch/arm64/include/asm/hugetlb.h6
-rw-r--r--arch/arm64/include/asm/insn.h8
-rw-r--r--arch/arm64/include/asm/io.h132
-rw-r--r--arch/arm64/include/asm/irqflags.h1
-rw-r--r--arch/arm64/include/asm/jump_label.h28
-rw-r--r--arch/arm64/include/asm/kvm_asm.h8
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h16
-rw-r--r--arch/arm64/include/asm/kvm_host.h156
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h4
-rw-r--r--arch/arm64/include/asm/kvm_nested.h13
-rw-r--r--arch/arm64/include/asm/kvm_ptrauth.h21
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h1
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h19
-rw-r--r--arch/arm64/include/asm/pgtable.h194
-rw-r--r--arch/arm64/include/asm/sysreg.h24
-rw-r--r--arch/arm64/include/asm/tlbflush.h53
-rw-r--r--arch/arm64/include/asm/topology.h6
-rw-r--r--arch/arm64/include/asm/virt.h12
-rw-r--r--arch/arm64/kernel/acpi.c10
-rw-r--r--arch/arm64/kernel/efi.c1
-rw-r--r--arch/arm64/kernel/head.S36
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c4
-rw-r--r--arch/arm64/kernel/io.c42
-rw-r--r--arch/arm64/kernel/module.c126
-rw-r--r--arch/arm64/kernel/perf_callchain.c118
-rw-r--r--arch/arm64/kernel/pi/Makefile6
-rw-r--r--arch/arm64/kernel/pi/idreg-override.c6
-rw-r--r--arch/arm64/kernel/probes/kprobes.c7
-rw-r--r--arch/arm64/kernel/ptrace.c5
-rw-r--r--arch/arm64/kernel/setup.c11
-rw-r--r--arch/arm64/kernel/smp.c7
-rw-r--r--arch/arm64/kernel/stacktrace.c120
-rw-r--r--arch/arm64/kernel/vdso/Makefile10
-rw-r--r--arch/arm64/kernel/vdso32/Makefile2
-rw-r--r--arch/arm64/kvm/Makefile5
-rw-r--r--arch/arm64/kvm/arm.c222
-rw-r--r--arch/arm64/kvm/emulate-nested.c66
-rw-r--r--arch/arm64/kvm/fpsimd.c69
-rw-r--r--arch/arm64/kvm/handle_exit.c36
-rw-r--r--arch/arm64/kvm/hyp/Makefile2
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/debug-sr.h8
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h86
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/pkvm.h6
-rw-r--r--arch/arm64/kvm/hyp/nvhe/Makefile13
-rw-r--r--arch/arm64/kvm/hyp/nvhe/debug-sr.c8
-rw-r--r--arch/arm64/kvm/hyp/nvhe/ffa.c1
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c27
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c8
-rw-r--r--arch/arm64/kvm/hyp/nvhe/pkvm.c14
-rw-r--r--arch/arm64/kvm/hyp/nvhe/psci-relay.c2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/setup.c4
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c18
-rw-r--r--arch/arm64/kvm/hyp/nvhe/tlb.c118
-rw-r--r--arch/arm64/kvm/hyp/pgtable.c44
-rw-r--r--arch/arm64/kvm/hyp/vgic-v3-sr.c27
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c109
-rw-r--r--arch/arm64/kvm/hyp/vhe/sysreg-sr.c4
-rw-r--r--arch/arm64/kvm/hyp/vhe/tlb.c29
-rw-r--r--arch/arm64/kvm/mmio.c12
-rw-r--r--arch/arm64/kvm/mmu.c44
-rw-r--r--arch/arm64/kvm/nested.c8
-rw-r--r--arch/arm64/kvm/pauth.c206
-rw-r--r--arch/arm64/kvm/pkvm.c2
-rw-r--r--arch/arm64/kvm/pmu.c2
-rw-r--r--arch/arm64/kvm/reset.c1
-rw-r--r--arch/arm64/kvm/sys_regs.c69
-rw-r--r--arch/arm64/kvm/vgic/vgic-debug.c82
-rw-r--r--arch/arm64/kvm/vgic/vgic-init.c90
-rw-r--r--arch/arm64/kvm/vgic/vgic-its.c352
-rw-r--r--arch/arm64/kvm/vgic/vgic-kvm-device.c8
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio-v3.c2
-rw-r--r--arch/arm64/kvm/vgic/vgic-v2.c9
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3.c23
-rw-r--r--arch/arm64/kvm/vgic/vgic.c17
-rw-r--r--arch/arm64/kvm/vgic/vgic.h8
-rw-r--r--arch/arm64/lib/insn.c11
-rw-r--r--arch/arm64/mm/contpte.c29
-rw-r--r--arch/arm64/mm/dma-mapping.c13
-rw-r--r--arch/arm64/mm/fault.c56
-rw-r--r--arch/arm64/mm/hugetlbpage.c23
-rw-r--r--arch/arm64/mm/init.c140
-rw-r--r--arch/arm64/mm/mmu.c101
-rw-r--r--arch/arm64/mm/mteswap.c45
-rw-r--r--arch/arm64/mm/pageattr.c3
-rw-r--r--arch/arm64/mm/proc.S10
-rw-r--r--arch/arm64/net/bpf_jit.h8
-rw-r--r--arch/arm64/net/bpf_jit_comp.c199
-rw-r--r--arch/csky/Kconfig1
-rw-r--r--arch/csky/abiv1/mmap.c12
-rw-r--r--arch/csky/boot/dts/Makefile4
-rw-r--r--arch/csky/include/asm/cmpxchg.h10
-rw-r--r--arch/csky/kernel/probes/ftrace.c3
-rw-r--r--arch/csky/kernel/vdso/Makefile8
-rw-r--r--arch/hexagon/kernel/vmlinux.lds.S1
-rw-r--r--arch/loongarch/Kconfig19
-rw-r--r--arch/loongarch/Makefile2
-rw-r--r--arch/loongarch/boot/dts/loongson-2k0500.dtsi86
-rw-r--r--arch/loongarch/boot/dts/loongson-2k1000-ref.dts4
-rw-r--r--arch/loongarch/boot/dts/loongson-2k1000.dtsi8
-rw-r--r--arch/loongarch/boot/dts/loongson-2k2000-ref.dts33
-rw-r--r--arch/loongarch/boot/dts/loongson-2k2000.dtsi73
-rw-r--r--arch/loongarch/configs/loongson3_defconfig25
-rw-r--r--arch/loongarch/include/asm/Kbuild1
-rw-r--r--arch/loongarch/include/asm/acpi.h1
-rw-r--r--arch/loongarch/include/asm/addrspace.h1
-rw-r--r--arch/loongarch/include/asm/asm-prototypes.h6
-rw-r--r--arch/loongarch/include/asm/crash_reserve.h (renamed from arch/loongarch/include/asm/crash_core.h)4
-rw-r--r--arch/loongarch/include/asm/hardirq.h6
-rw-r--r--arch/loongarch/include/asm/inst.h2
-rw-r--r--arch/loongarch/include/asm/io.h20
-rw-r--r--arch/loongarch/include/asm/irq.h11
-rw-r--r--arch/loongarch/include/asm/kfence.h10
-rw-r--r--arch/loongarch/include/asm/kvm_host.h34
-rw-r--r--arch/loongarch/include/asm/kvm_para.h161
-rw-r--r--arch/loongarch/include/asm/kvm_vcpu.h11
-rw-r--r--arch/loongarch/include/asm/loongarch.h12
-rw-r--r--arch/loongarch/include/asm/page.h26
-rw-r--r--arch/loongarch/include/asm/paravirt.h30
-rw-r--r--arch/loongarch/include/asm/paravirt_api_clock.h1
-rw-r--r--arch/loongarch/include/asm/perf_event.h7
-rw-r--r--arch/loongarch/include/asm/smp.h28
-rw-r--r--arch/loongarch/include/asm/tlb.h2
-rw-r--r--arch/loongarch/include/asm/video.h (renamed from arch/loongarch/include/asm/fb.h)8
-rw-r--r--arch/loongarch/include/uapi/asm/kvm.h4
-rw-r--r--arch/loongarch/kernel/Makefile1
-rw-r--r--arch/loongarch/kernel/dma.c9
-rw-r--r--arch/loongarch/kernel/ftrace_dyn.c3
-rw-r--r--arch/loongarch/kernel/irq.c24
-rw-r--r--arch/loongarch/kernel/machine_kexec.c2
-rw-r--r--arch/loongarch/kernel/module.c6
-rw-r--r--arch/loongarch/kernel/paravirt.c151
-rw-r--r--arch/loongarch/kernel/perf_event.c16
-rw-r--r--arch/loongarch/kernel/smp.c52
-rw-r--r--arch/loongarch/kernel/time.c12
-rw-r--r--arch/loongarch/kvm/Makefile2
-rw-r--r--arch/loongarch/kvm/exit.c151
-rw-r--r--arch/loongarch/kvm/mmu.c32
-rw-r--r--arch/loongarch/kvm/trace.h20
-rw-r--r--arch/loongarch/kvm/vcpu.c105
-rw-r--r--arch/loongarch/kvm/vm.c11
-rw-r--r--arch/loongarch/lib/Makefile2
-rw-r--r--arch/loongarch/lib/tishift.S56
-rw-r--r--arch/loongarch/mm/fault.c4
-rw-r--r--arch/loongarch/mm/hugetlbpage.c12
-rw-r--r--arch/loongarch/mm/init.c21
-rw-r--r--arch/loongarch/mm/mmap.c7
-rw-r--r--arch/loongarch/mm/pgtable.c4
-rw-r--r--arch/loongarch/mm/tlbex.S9
-rw-r--r--arch/loongarch/net/bpf_jit.c22
-rw-r--r--arch/loongarch/power/suspend.c4
-rw-r--r--arch/loongarch/vdso/Makefile9
-rw-r--r--arch/m68k/Kconfig4
-rw-r--r--arch/m68k/amiga/config.c2
-rw-r--r--arch/m68k/configs/amiga_defconfig4
-rw-r--r--arch/m68k/configs/apollo_defconfig4
-rw-r--r--arch/m68k/configs/atari_defconfig4
-rw-r--r--arch/m68k/configs/bvme6000_defconfig4
-rw-r--r--arch/m68k/configs/hp300_defconfig4
-rw-r--r--arch/m68k/configs/mac_defconfig4
-rw-r--r--arch/m68k/configs/multi_defconfig4
-rw-r--r--arch/m68k/configs/mvme147_defconfig4
-rw-r--r--arch/m68k/configs/mvme16x_defconfig4
-rw-r--r--arch/m68k/configs/q40_defconfig4
-rw-r--r--arch/m68k/configs/sun3_defconfig4
-rw-r--r--arch/m68k/configs/sun3x_defconfig4
-rw-r--r--arch/m68k/include/asm/pgtable.h2
-rw-r--r--arch/m68k/include/asm/thread_info.h9
-rw-r--r--arch/m68k/include/asm/video.h (renamed from arch/m68k/include/asm/fb.h)8
-rw-r--r--arch/m68k/include/uapi/asm/ptrace.h2
-rw-r--r--arch/m68k/kernel/entry.S4
-rw-r--r--arch/m68k/mac/misc.c36
-rw-r--r--arch/microblaze/configs/mmu_defconfig2
-rw-r--r--arch/microblaze/kernel/Makefile1
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo-static.c2
-rw-r--r--arch/mips/Kconfig20
-rw-r--r--arch/mips/bcm47xx/prom.c3
-rw-r--r--arch/mips/boot/compressed/Makefile6
-rw-r--r--arch/mips/boot/dts/ralink/mt7621.dtsi430
-rw-r--r--arch/mips/configs/rs90_defconfig2
-rw-r--r--arch/mips/dec/setup.c2
-rw-r--r--arch/mips/include/asm/asm.h3
-rw-r--r--arch/mips/include/asm/pgtable-32.h2
-rw-r--r--arch/mips/include/asm/pgtable-64.h6
-rw-r--r--arch/mips/include/asm/ptrace.h2
-rw-r--r--arch/mips/include/asm/setup.h6
-rw-r--r--arch/mips/include/asm/stackframe.h19
-rw-r--r--arch/mips/include/asm/video.h (renamed from arch/mips/include/asm/fb.h)12
-rw-r--r--arch/mips/jazz/jazzdma.c2
-rw-r--r--arch/mips/kernel/asm-offsets.c1
-rw-r--r--arch/mips/kernel/module.c10
-rw-r--r--arch/mips/kernel/ptrace.c15
-rw-r--r--arch/mips/kernel/scall32-o32.S23
-rw-r--r--arch/mips/kernel/scall64-n32.S3
-rw-r--r--arch/mips/kernel/scall64-n64.S3
-rw-r--r--arch/mips/kernel/scall64-o32.S33
-rw-r--r--arch/mips/kernel/syscalls/Makefile2
-rw-r--r--arch/mips/kvm/mmu.c30
-rw-r--r--arch/mips/mm/dma-noncoherent.c3
-rw-r--r--arch/mips/mm/fault.c4
-rw-r--r--arch/mips/mm/hugetlbpage.c10
-rw-r--r--arch/mips/mm/init.c23
-rw-r--r--arch/mips/mm/mmap.c3
-rw-r--r--arch/mips/mm/tlb-r4k.c2
-rw-r--r--arch/mips/net/bpf_jit_comp.c3
-rwxr-xr-x[-rw-r--r--]arch/mips/pci/pcie-octeon.c6
-rw-r--r--arch/mips/rb532/gpio.c2
-rw-r--r--arch/mips/rb532/prom.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c15
-rw-r--r--arch/mips/vdso/Makefile11
-rw-r--r--arch/nios2/boot/dts/Makefile3
-rw-r--r--arch/nios2/include/asm/pgtable.h5
-rw-r--r--arch/nios2/kernel/module.c20
-rw-r--r--arch/nios2/kernel/prom.c6
-rw-r--r--arch/nios2/mm/init.c21
-rw-r--r--arch/openrisc/Kconfig9
-rw-r--r--arch/openrisc/include/asm/fpu.h22
-rw-r--r--arch/openrisc/include/asm/processor.h1
-rw-r--r--arch/openrisc/include/asm/ptrace.h3
-rw-r--r--arch/openrisc/include/uapi/asm/elf.h75
-rw-r--r--arch/openrisc/kernel/entry.S15
-rw-r--r--arch/openrisc/kernel/module.c18
-rw-r--r--arch/openrisc/kernel/process.c13
-rw-r--r--arch/openrisc/kernel/ptrace.c18
-rw-r--r--arch/openrisc/kernel/signal.c36
-rw-r--r--arch/openrisc/kernel/traps.c144
-rw-r--r--arch/parisc/Makefile2
-rw-r--r--arch/parisc/boot/compressed/Makefile4
-rw-r--r--arch/parisc/configs/generic-32bit_defconfig2
-rw-r--r--arch/parisc/include/asm/cmpxchg.h22
-rw-r--r--arch/parisc/include/asm/fb.h14
-rw-r--r--arch/parisc/include/asm/mman.h14
-rw-r--r--arch/parisc/include/asm/page.h1
-rw-r--r--arch/parisc/include/asm/signal.h12
-rw-r--r--arch/parisc/include/asm/video.h16
-rw-r--r--arch/parisc/include/uapi/asm/signal.h10
-rw-r--r--arch/parisc/kernel/ftrace.c3
-rw-r--r--arch/parisc/kernel/module.c12
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c2
-rw-r--r--arch/parisc/kernel/smp.c2
-rw-r--r--arch/parisc/kernel/sys_parisc.c6
-rw-r--r--arch/parisc/kernel/vdso32/Makefile9
-rw-r--r--arch/parisc/kernel/vdso64/Makefile4
-rw-r--r--arch/parisc/lib/bitops.c52
-rw-r--r--arch/parisc/math-emu/driver.c6
-rw-r--r--arch/parisc/mm/hugetlbpage.c11
-rw-r--r--arch/parisc/mm/init.c23
-rw-r--r--arch/parisc/net/bpf_jit_core.c8
-rw-r--r--arch/parisc/video/Makefile2
-rw-r--r--arch/parisc/video/video-sti.c (renamed from arch/parisc/video/fbdev.c)9
-rw-r--r--arch/powerpc/Kbuild3
-rw-r--r--arch/powerpc/Kconfig9
-rw-r--r--arch/powerpc/Makefile6
-rw-r--r--arch/powerpc/boot/Makefile10
-rw-r--r--arch/powerpc/boot/decompress.c2
-rw-r--r--arch/powerpc/boot/dts/Makefile3
-rw-r--r--arch/powerpc/boot/dts/acadia.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/Makefile3
-rw-r--r--arch/powerpc/boot/dts/fsl/b4si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/bsc9131rdb.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/bsc9132qds.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/c293pcie.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/c293si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi14
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8544si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8572si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/p1010rdb-pb.dts16
-rw-r--r--arch/powerpc/boot/dts/fsl/p1010rdb-pb_36b.dts16
-rw-r--r--arch/powerpc/boot/dts/fsl/p1010rdb.dtsi16
-rw-r--r--arch/powerpc/boot/dts/fsl/p1010rdb_32b.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/p1010rdb_36b.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/p1010si-post.dtsi16
-rw-r--r--arch/powerpc/boot/dts/fsl/p1020si-post.dtsi5
-rw-r--r--arch/powerpc/boot/dts/fsl/p1021si-post.dtsi5
-rw-r--r--arch/powerpc/boot/dts/fsl/p1022si-post.dtsi7
-rw-r--r--arch/powerpc/boot/dts/fsl/p2020si-post.dtsi17
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-power.dtsi19
-rw-r--r--arch/powerpc/boot/dts/fsl/t1023si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/t1024rdb.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/t1040rdb.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/t1040si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/t1042rdb.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/t1042rdb_pi.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/t2081si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240si-post.dtsi2
-rw-r--r--arch/powerpc/boot/main.c2
-rw-r--r--arch/powerpc/boot/ps3.c2
-rw-r--r--arch/powerpc/configs/adder875_defconfig2
-rw-r--r--arch/powerpc/configs/ep88xc_defconfig2
-rw-r--r--arch/powerpc/configs/mpc866_ads_defconfig2
-rw-r--r--arch/powerpc/configs/mpc885_ads_defconfig2
-rw-r--r--arch/powerpc/configs/tqm8xx_defconfig2
-rw-r--r--arch/powerpc/crypto/chacha-p10-glue.c8
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable-4k.h20
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable-64k.h25
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h27
-rw-r--r--arch/powerpc/include/asm/cpu_has_feature.h2
-rw-r--r--arch/powerpc/include/asm/cputime.h13
-rw-r--r--arch/powerpc/include/asm/eeh.h2
-rw-r--r--arch/powerpc/include/asm/elf.h2
-rw-r--r--arch/powerpc/include/asm/fadump-internal.h36
-rw-r--r--arch/powerpc/include/asm/fadump.h2
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h2
-rw-r--r--arch/powerpc/include/asm/hvcall.h10
-rw-r--r--arch/powerpc/include/asm/interrupt.h10
-rw-r--r--arch/powerpc/include/asm/io.h28
-rw-r--r--arch/powerpc/include/asm/kasan.h2
-rw-r--r--arch/powerpc/include/asm/kexec.h15
-rw-r--r--arch/powerpc/include/asm/kexec_ranges.h20
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h1
-rw-r--r--arch/powerpc/include/asm/mmu.h6
-rw-r--r--arch/powerpc/include/asm/module.h5
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h10
-rw-r--r--arch/powerpc/include/asm/opal-api.h4
-rw-r--r--arch/powerpc/include/asm/percpu.h10
-rw-r--r--arch/powerpc/include/asm/plpks.h5
-rw-r--r--arch/powerpc/include/asm/pmac_feature.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h4
-rw-r--r--arch/powerpc/include/asm/processor.h13
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/include/asm/uninorth.h2
-rw-r--r--arch/powerpc/include/asm/vdso/gettimeofday.h29
-rw-r--r--arch/powerpc/include/asm/video.h (renamed from arch/powerpc/include/asm/fb.h)8
-rw-r--r--arch/powerpc/include/uapi/asm/bootx.h2
-rw-r--r--arch/powerpc/kernel/Makefile7
-rw-r--r--arch/powerpc/kernel/cpu_setup_6xx.S4
-rw-r--r--arch/powerpc/kernel/dexcr.c124
-rw-r--r--arch/powerpc/kernel/dma-iommu.c2
-rw-r--r--arch/powerpc/kernel/eeh.c11
-rw-r--r--arch/powerpc/kernel/eeh_driver.c13
-rw-r--r--arch/powerpc/kernel/eeh_pe.c8
-rw-r--r--arch/powerpc/kernel/fadump.c547
-rw-r--r--arch/powerpc/kernel/head_8xx.S4
-rw-r--r--arch/powerpc/kernel/head_book3s_32.S6
-rw-r--r--arch/powerpc/kernel/iommu.c8
-rw-r--r--arch/powerpc/kernel/kprobes-ftrace.c3
-rw-r--r--arch/powerpc/kernel/kprobes.c22
-rw-r--r--arch/powerpc/kernel/misc_64.S4
-rw-r--r--arch/powerpc/kernel/module.c40
-rw-r--r--arch/powerpc/kernel/pci-common.c2
-rw-r--r--arch/powerpc/kernel/process.c29
-rw-r--r--arch/powerpc/kernel/prom.c23
-rw-r--r--arch/powerpc/kernel/prom_init.c4
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-tm.c2
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-view.c7
-rw-r--r--arch/powerpc/kernel/setup-common.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/kernel/sysfs.c4
-rw-r--r--arch/powerpc/kernel/time.c22
-rw-r--r--arch/powerpc/kernel/vdso/Makefile16
-rw-r--r--arch/powerpc/kexec/Makefile4
-rw-r--r--arch/powerpc/kexec/core_64.c91
-rw-r--r--arch/powerpc/kexec/crash.c195
-rw-r--r--arch/powerpc/kexec/elf_64.c3
-rw-r--r--arch/powerpc/kexec/file_load_64.c314
-rw-r--r--arch/powerpc/kexec/ranges.c312
-rw-r--r--arch/powerpc/kvm/book3s.c9
-rw-r--r--arch/powerpc/kvm/book3s.h1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c12
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv.c3
-rw-r--r--arch/powerpc/kvm/book3s_hv_nestedv2.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c2
-rw-r--r--arch/powerpc/kvm/book3s_pr.c7
-rw-r--r--arch/powerpc/kvm/book3s_xive.c2
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c6
-rw-r--r--arch/powerpc/lib/Makefile2
-rw-r--r--arch/powerpc/lib/code-patching.c33
-rw-r--r--arch/powerpc/lib/feature-fixups.c8
-rw-r--r--arch/powerpc/lib/test-code-patching.c92
-rw-r--r--arch/powerpc/mm/Makefile2
-rw-r--r--arch/powerpc/mm/book3s32/mmu.c2
-rw-r--r--arch/powerpc/mm/book3s64/Makefile2
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c1
-rw-r--r--arch/powerpc/mm/book3s64/slice.c20
-rw-r--r--arch/powerpc/mm/cacheflush.c2
-rw-r--r--arch/powerpc/mm/fault.c33
-rw-r--r--arch/powerpc/mm/kasan/init_book3e_64.c2
-rw-r--r--arch/powerpc/mm/kasan/init_book3s_64.c2
-rw-r--r--arch/powerpc/mm/mem.c67
-rw-r--r--arch/powerpc/mm/nohash/Makefile2
-rw-r--r--arch/powerpc/mm/nohash/kaslr_booke.c2
-rw-r--r--arch/powerpc/mm/pgtable_64.c6
-rw-r--r--arch/powerpc/mm/ptdump/hashpagetable.c2
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c10
-rw-r--r--arch/powerpc/net/bpf_jit_comp32.c137
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c77
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_shared.c2
-rw-r--r--arch/powerpc/platforms/52xx/lite5200_sleep.S6
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_common.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c2
-rw-r--r--arch/powerpc/platforms/83xx/suspend-asm.S6
-rw-r--r--arch/powerpc/platforms/85xx/smp.c9
-rw-r--r--arch/powerpc/platforms/cell/iommu.c17
-rw-r--r--arch/powerpc/platforms/cell/smp.c1
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c20
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c2
-rw-r--r--arch/powerpc/platforms/maple/pci.c2
-rw-r--r--arch/powerpc/platforms/powermac/pic.c2
-rw-r--r--arch/powerpc/platforms/powermac/sleep.S2
-rw-r--r--arch/powerpc/platforms/powernv/opal-fadump.c35
-rw-r--r--arch/powerpc/platforms/powernv/pci-sriov.c4
-rw-r--r--arch/powerpc/platforms/powernv/vas-window.c2
-rw-r--r--arch/powerpc/platforms/ps3/device-init.c61
-rw-r--r--arch/powerpc/platforms/ps3/system-bus.c4
-rw-r--r--arch/powerpc/platforms/pseries/Makefile1
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c8
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c6
-rw-r--r--arch/powerpc/platforms/pseries/lparcfg.c45
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c43
-rw-r--r--arch/powerpc/platforms/pseries/pci.c27
-rw-r--r--arch/powerpc/platforms/pseries/plpks.c10
-rw-r--r--arch/powerpc/platforms/pseries/rtas-fadump.c322
-rw-r--r--arch/powerpc/platforms/pseries/rtas-fadump.h29
-rw-r--r--arch/powerpc/platforms/pseries/vas.c2
-rw-r--r--arch/powerpc/platforms/pseries/vio.c10
-rw-r--r--arch/powerpc/purgatory/Makefile3
-rw-r--r--arch/powerpc/sysdev/Makefile2
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c4
-rw-r--r--arch/powerpc/sysdev/fsl_gtm.c6
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c2
-rw-r--r--arch/powerpc/sysdev/xive/common.c4
-rw-r--r--arch/powerpc/sysdev/xive/native.c2
-rw-r--r--arch/powerpc/xmon/Makefile2
-rw-r--r--arch/powerpc/xmon/xmon.c6
-rw-r--r--arch/riscv/Kconfig26
-rw-r--r--arch/riscv/Kconfig.errata8
-rw-r--r--arch/riscv/Kconfig.socs22
-rw-r--r--arch/riscv/Makefile28
-rw-r--r--arch/riscv/boot/Makefile2
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts32
-rw-r--r--arch/riscv/boot/dts/renesas/r9a07g043f.dtsi75
-rw-r--r--arch/riscv/boot/dts/renesas/rzfive-smarc-som.dtsi16
-rw-r--r--arch/riscv/boot/dts/sophgo/cv1800b-milkv-duo.dts20
-rw-r--r--arch/riscv/boot/dts/sophgo/cv1800b.dtsi9
-rw-r--r--arch/riscv/boot/dts/sophgo/cv1812h.dtsi4
-rw-r--r--arch/riscv/boot/dts/sophgo/cv18xx.dtsi132
-rw-r--r--arch/riscv/boot/dts/starfive/Makefile1
-rw-r--r--arch/riscv/boot/dts/starfive/jh7100.dtsi2
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110-common.dtsi599
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110-milkv-mars.dts30
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi683
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110.dtsi2
-rw-r--r--arch/riscv/boot/dts/thead/th1520.dtsi54
-rw-r--r--arch/riscv/configs/defconfig8
-rw-r--r--arch/riscv/configs/nommu_k210_defconfig5
-rw-r--r--arch/riscv/configs/nommu_k210_sdcard_defconfig5
-rw-r--r--arch/riscv/configs/nommu_virt_defconfig4
-rw-r--r--arch/riscv/errata/sifive/errata.c5
-rw-r--r--arch/riscv/errata/thead/errata.c24
-rw-r--r--arch/riscv/include/asm/atomic.h164
-rw-r--r--arch/riscv/include/asm/cache.h2
-rw-r--r--arch/riscv/include/asm/cacheflush.h7
-rw-r--r--arch/riscv/include/asm/cmpxchg.h422
-rw-r--r--arch/riscv/include/asm/csr.h5
-rw-r--r--arch/riscv/include/asm/errata_list.h32
-rw-r--r--arch/riscv/include/asm/hugetlb.h6
-rw-r--r--arch/riscv/include/asm/irqflags.h1
-rw-r--r--arch/riscv/include/asm/kvm_host.h21
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_pmu.h16
-rw-r--r--arch/riscv/include/asm/mmu.h5
-rw-r--r--arch/riscv/include/asm/page.h2
-rw-r--r--arch/riscv/include/asm/patch.h1
-rw-r--r--arch/riscv/include/asm/pgalloc.h32
-rw-r--r--arch/riscv/include/asm/pgtable.h12
-rw-r--r--arch/riscv/include/asm/processor.h10
-rw-r--r--arch/riscv/include/asm/sbi.h42
-rw-r--r--arch/riscv/include/asm/signal.h12
-rw-r--r--arch/riscv/include/asm/smp.h15
-rw-r--r--arch/riscv/include/asm/suspend.h1
-rw-r--r--arch/riscv/include/asm/switch_to.h23
-rw-r--r--arch/riscv/include/asm/syscall_wrapper.h3
-rw-r--r--arch/riscv/include/asm/tlbflush.h52
-rw-r--r--arch/riscv/include/asm/uaccess.h4
-rw-r--r--arch/riscv/include/uapi/asm/auxvec.h2
-rw-r--r--arch/riscv/include/uapi/asm/hwprobe.h3
-rw-r--r--arch/riscv/include/uapi/asm/kvm.h1
-rw-r--r--arch/riscv/kernel/compat_vdso/Makefile10
-rw-r--r--arch/riscv/kernel/elf_kexec.c1
-rw-r--r--arch/riscv/kernel/ftrace.c44
-rw-r--r--arch/riscv/kernel/module.c12
-rw-r--r--arch/riscv/kernel/paravirt.c6
-rw-r--r--arch/riscv/kernel/patch.c25
-rw-r--r--arch/riscv/kernel/pi/Makefile6
-rw-r--r--arch/riscv/kernel/probes/ftrace.c3
-rw-r--r--arch/riscv/kernel/probes/kprobes.c11
-rw-r--r--arch/riscv/kernel/process.c5
-rw-r--r--arch/riscv/kernel/sbi-ipi.c11
-rw-r--r--arch/riscv/kernel/signal.c15
-rw-r--r--arch/riscv/kernel/smp.c11
-rw-r--r--arch/riscv/kernel/smpboot.c7
-rw-r--r--arch/riscv/kernel/suspend.c3
-rw-r--r--arch/riscv/kernel/sys_hwprobe.c1
-rw-r--r--arch/riscv/kernel/sys_riscv.c1
-rw-r--r--arch/riscv/kernel/traps.c2
-rw-r--r--arch/riscv/kernel/traps_misaligned.c106
-rw-r--r--arch/riscv/kernel/vdso/Makefile9
-rw-r--r--arch/riscv/kvm/Makefile2
-rw-r--r--arch/riscv/kvm/aia.c5
-rw-r--r--arch/riscv/kvm/aia_aplic.c37
-rw-r--r--arch/riscv/kvm/main.c18
-rw-r--r--arch/riscv/kvm/mmu.c20
-rw-r--r--arch/riscv/kvm/vcpu.c85
-rw-r--r--arch/riscv/kvm/vcpu_exit.c4
-rw-r--r--arch/riscv/kvm/vcpu_onereg.c8
-rw-r--r--arch/riscv/kvm/vcpu_pmu.c260
-rw-r--r--arch/riscv/kvm/vcpu_sbi.c7
-rw-r--r--arch/riscv/kvm/vcpu_sbi_hsm.c42
-rw-r--r--arch/riscv/kvm/vcpu_sbi_pmu.c17
-rw-r--r--arch/riscv/kvm/vcpu_sbi_sta.c4
-rw-r--r--arch/riscv/kvm/vm.c1
-rw-r--r--arch/riscv/mm/Makefile5
-rw-r--r--arch/riscv/mm/cacheflush.c120
-rw-r--r--arch/riscv/mm/context.c42
-rw-r--r--arch/riscv/mm/dma-noncoherent.c3
-rw-r--r--arch/riscv/mm/fault.c5
-rw-r--r--arch/riscv/mm/hugetlbpage.c10
-rw-r--r--arch/riscv/mm/init.c57
-rw-r--r--arch/riscv/mm/tlbflush.c79
-rw-r--r--arch/riscv/net/bpf_jit.h6
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c295
-rw-r--r--arch/riscv/net/bpf_jit_core.c15
-rw-r--r--arch/riscv/purgatory/Makefile8
-rw-r--r--arch/s390/Kconfig67
-rw-r--r--arch/s390/Makefile15
-rw-r--r--arch/s390/boot/Makefile9
-rw-r--r--arch/s390/boot/boot.h14
-rw-r--r--arch/s390/boot/decompressor.c15
-rw-r--r--arch/s390/boot/decompressor.h8
-rw-r--r--arch/s390/boot/kaslr.c2
-rw-r--r--arch/s390/boot/pgm_check_info.c4
-rw-r--r--arch/s390/boot/startup.c264
-rw-r--r--arch/s390/boot/vmem.c108
-rw-r--r--arch/s390/boot/vmlinux.lds.S28
-rw-r--r--arch/s390/configs/debug_defconfig1
-rw-r--r--arch/s390/configs/defconfig1
-rw-r--r--arch/s390/crypto/paes_s390.c15
-rw-r--r--arch/s390/include/asm/alternative-asm.h1
-rw-r--r--arch/s390/include/asm/alternative.h1
-rw-r--r--arch/s390/include/asm/ap.h30
-rw-r--r--arch/s390/include/asm/asm-prototypes.h1
-rw-r--r--arch/s390/include/asm/atomic.h44
-rw-r--r--arch/s390/include/asm/atomic_ops.h22
-rw-r--r--arch/s390/include/asm/chsc.h15
-rw-r--r--arch/s390/include/asm/cpacf.h109
-rw-r--r--arch/s390/include/asm/dwarf.h1
-rw-r--r--arch/s390/include/asm/extmem.h7
-rw-r--r--arch/s390/include/asm/ftrace.h8
-rw-r--r--arch/s390/include/asm/gmap.h2
-rw-r--r--arch/s390/include/asm/hugetlb.h6
-rw-r--r--arch/s390/include/asm/io.h15
-rw-r--r--arch/s390/include/asm/mmu.h5
-rw-r--r--arch/s390/include/asm/mmu_context.h1
-rw-r--r--arch/s390/include/asm/nospec-branch.h20
-rw-r--r--arch/s390/include/asm/nospec-insn.h13
-rw-r--r--arch/s390/include/asm/os_info.h29
-rw-r--r--arch/s390/include/asm/page.h50
-rw-r--r--arch/s390/include/asm/pgtable.h39
-rw-r--r--arch/s390/include/asm/physmem_info.h4
-rw-r--r--arch/s390/include/asm/preempt.h36
-rw-r--r--arch/s390/include/asm/processor.h17
-rw-r--r--arch/s390/include/asm/setup.h14
-rw-r--r--arch/s390/include/asm/stacktrace.h12
-rw-r--r--arch/s390/include/asm/vdso/gettimeofday.h7
-rw-r--r--arch/s390/include/asm/vtime.h2
-rw-r--r--arch/s390/kernel/Makefile3
-rw-r--r--arch/s390/kernel/alternative.c7
-rw-r--r--arch/s390/kernel/asm-offsets.c9
-rw-r--r--arch/s390/kernel/cert_store.c1
-rw-r--r--arch/s390/kernel/crash_dump.c41
-rw-r--r--arch/s390/kernel/entry.S27
-rw-r--r--arch/s390/kernel/ftrace.c7
-rw-r--r--arch/s390/kernel/idle.c10
-rw-r--r--arch/s390/kernel/ipl.c326
-rw-r--r--arch/s390/kernel/irq.c2
-rw-r--r--arch/s390/kernel/kprobes.c71
-rw-r--r--arch/s390/kernel/kprobes.h9
-rw-r--r--arch/s390/kernel/kprobes_insn_page.S22
-rw-r--r--arch/s390/kernel/module.c42
-rw-r--r--arch/s390/kernel/nmi.c1
-rw-r--r--arch/s390/kernel/nospec-branch.c4
-rw-r--r--arch/s390/kernel/os_info.c29
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c2
-rw-r--r--arch/s390/kernel/perf_cpum_cf_events.c11
-rw-r--r--arch/s390/kernel/perf_event.c34
-rw-r--r--arch/s390/kernel/perf_pai_crypto.c10
-rw-r--r--arch/s390/kernel/perf_pai_ext.c10
-rw-r--r--arch/s390/kernel/process.c5
-rw-r--r--arch/s390/kernel/setup.c8
-rw-r--r--arch/s390/kernel/stacktrace.c108
-rw-r--r--arch/s390/kernel/syscalls/Makefile4
-rw-r--r--arch/s390/kernel/uv.c51
-rw-r--r--arch/s390/kernel/vdso.c13
-rw-r--r--arch/s390/kernel/vdso32/Makefile16
-rw-r--r--arch/s390/kernel/vdso64/Makefile16
-rw-r--r--arch/s390/kernel/vdso64/vdso_user_wrapper.S19
-rw-r--r--arch/s390/kernel/vmcore_info.c2
-rw-r--r--arch/s390/kernel/vmlinux.lds.S38
-rw-r--r--arch/s390/kernel/vtime.c10
-rw-r--r--arch/s390/kvm/kvm-s390.c6
-rw-r--r--arch/s390/kvm/vsie.c5
-rw-r--r--arch/s390/lib/Makefile2
-rw-r--r--arch/s390/lib/expoline.S (renamed from arch/s390/lib/expoline/expoline.S)0
-rw-r--r--arch/s390/lib/expoline/Makefile3
-rw-r--r--arch/s390/mm/fault.c5
-rw-r--r--arch/s390/mm/gmap.c167
-rw-r--r--arch/s390/mm/hugetlbpage.c21
-rw-r--r--arch/s390/mm/init.c30
-rw-r--r--arch/s390/mm/mmap.c9
-rw-r--r--arch/s390/mm/vmem.c5
-rw-r--r--arch/s390/net/bpf_jit_comp.c60
-rw-r--r--arch/s390/pci/pci.c6
-rw-r--r--arch/s390/pci/pci_mmio.c4
-rw-r--r--arch/s390/pci/pci_sysfs.c4
-rw-r--r--arch/s390/purgatory/Makefile8
-rw-r--r--arch/s390/tools/relocs.c2
-rw-r--r--arch/sh/Kconfig11
-rw-r--r--arch/sh/boards/board-sh7757lcr.c2
-rw-r--r--arch/sh/boards/board-sh7785lcr.c2
-rw-r--r--arch/sh/boards/mach-dreamcast/setup.c3
-rw-r--r--arch/sh/boards/mach-highlander/pinmux-r7785rp.c1
-rw-r--r--arch/sh/boards/mach-sh03/rtc.c2
-rw-r--r--arch/sh/boards/of-generic.c2
-rw-r--r--arch/sh/boot/compressed/Makefile5
-rw-r--r--arch/sh/boot/compressed/cache.c13
-rw-r--r--arch/sh/boot/compressed/misc.c9
-rw-r--r--arch/sh/boot/compressed/misc.h9
-rw-r--r--arch/sh/boot/dts/j2_mimas_v2.dts2
-rw-r--r--arch/sh/configs/apsh4a3a_defconfig1
-rw-r--r--arch/sh/configs/apsh4ad0a_defconfig1
-rw-r--r--arch/sh/configs/edosk7705_defconfig3
-rw-r--r--arch/sh/configs/hp6xx_defconfig1
-rw-r--r--arch/sh/configs/landisk_defconfig1
-rw-r--r--arch/sh/configs/magicpanelr2_defconfig1
-rw-r--r--arch/sh/configs/rsk7264_defconfig1
-rw-r--r--arch/sh/configs/rsk7269_defconfig1
-rw-r--r--arch/sh/configs/se7619_defconfig3
-rw-r--r--arch/sh/configs/se7705_defconfig1
-rw-r--r--arch/sh/configs/se7712_defconfig2
-rw-r--r--arch/sh/configs/se7721_defconfig2
-rw-r--r--arch/sh/configs/se7722_defconfig1
-rw-r--r--arch/sh/configs/se7750_defconfig1
-rw-r--r--arch/sh/configs/secureedge5410_defconfig1
-rw-r--r--arch/sh/configs/sh7710voipgw_defconfig1
-rw-r--r--arch/sh/configs/sh7724_generic_defconfig1
-rw-r--r--arch/sh/configs/sh7770_generic_defconfig1
-rw-r--r--arch/sh/configs/sh7785lcr_32bit_defconfig1
-rw-r--r--arch/sh/configs/sh7785lcr_defconfig1
-rw-r--r--arch/sh/configs/shmin_defconfig2
-rw-r--r--arch/sh/configs/urquell_defconfig1
-rw-r--r--arch/sh/drivers/dma/dma-api.c143
-rw-r--r--arch/sh/drivers/push-switch.c6
-rw-r--r--arch/sh/include/asm/cacheflush.h12
-rw-r--r--arch/sh/include/asm/dma.h7
-rw-r--r--arch/sh/include/asm/fb.h7
-rw-r--r--arch/sh/include/asm/fpu.h3
-rw-r--r--arch/sh/include/asm/ftrace.h10
-rw-r--r--arch/sh/include/asm/hugetlb.h6
-rw-r--r--arch/sh/include/asm/hw_breakpoint.h2
-rw-r--r--arch/sh/include/asm/setup.h1
-rw-r--r--arch/sh/include/asm/syscalls.h1
-rw-r--r--arch/sh/include/asm/tlb.h4
-rw-r--r--arch/sh/kernel/cpu/sh2a/opcode_helper.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7723.c3
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7724.c1
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7757.c3
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7786.c14
-rw-r--r--arch/sh/kernel/dwarf.c2
-rw-r--r--arch/sh/kernel/kprobes.c13
-rw-r--r--arch/sh/kernel/return_address.c2
-rw-r--r--arch/sh/kernel/smp.c6
-rw-r--r--arch/sh/kernel/traps.c10
-rw-r--r--arch/sh/kernel/traps_32.c5
-rw-r--r--arch/sh/kernel/vsyscall/Makefile4
-rw-r--r--arch/sh/lib/checksum.S67
-rw-r--r--arch/sh/math-emu/math.c2
-rw-r--r--arch/sh/mm/cache-sh4.c7
-rw-r--r--arch/sh/mm/cache-shx3.c1
-rw-r--r--arch/sh/mm/cache.c16
-rw-r--r--arch/sh/mm/hugetlbpage.c10
-rw-r--r--arch/sh/mm/mmap.c5
-rw-r--r--arch/sh/mm/nommu.c2
-rw-r--r--arch/sh/mm/pgtable.c4
-rw-r--r--arch/sh/mm/tlbex_32.c1
-rw-r--r--arch/sparc/Makefile4
-rw-r--r--arch/sparc/include/asm/cmpxchg_32.h20
-rw-r--r--arch/sparc/include/asm/pgtable_32.h2
-rw-r--r--arch/sparc/include/asm/pgtable_64.h3
-rw-r--r--arch/sparc/include/asm/video.h (renamed from arch/sparc/include/asm/fb.h)15
-rw-r--r--arch/sparc/kernel/module.c30
-rw-r--r--arch/sparc/kernel/sys_sparc_32.c3
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c20
-rw-r--r--arch/sparc/lib/atomic32.c45
-rw-r--r--arch/sparc/mm/Makefile2
-rw-r--r--arch/sparc/mm/execmem.c21
-rw-r--r--arch/sparc/mm/hugetlbpage.c21
-rw-r--r--arch/sparc/mm/tlb.c7
-rw-r--r--arch/sparc/net/bpf_jit_comp_32.c8
-rw-r--r--arch/sparc/net/bpf_jit_comp_64.c6
-rw-r--r--arch/sparc/vdso/Makefile4
-rw-r--r--arch/sparc/video/Makefile2
-rw-r--r--arch/sparc/video/fbdev.c26
-rw-r--r--arch/sparc/video/video-common.c25
-rw-r--r--arch/um/include/asm/Kbuild2
-rw-r--r--arch/um/include/asm/cpufeature.h3
-rw-r--r--arch/um/include/shared/um_malloc.h3
-rw-r--r--arch/um/kernel/Makefile2
-rw-r--r--arch/x86/Kbuild2
-rw-r--r--arch/x86/Kconfig84
-rw-r--r--arch/x86/Kconfig.assembler10
-rw-r--r--arch/x86/Makefile4
-rw-r--r--arch/x86/boot/Makefile18
-rw-r--r--arch/x86/boot/compressed/Makefile11
-rw-r--r--arch/x86/boot/compressed/efi_mixed.S20
-rw-r--r--arch/x86/boot/compressed/head_64.S5
-rw-r--r--arch/x86/boot/compressed/sev.c197
-rw-r--r--arch/x86/boot/main.c4
-rw-r--r--arch/x86/boot/printf.c3
-rw-r--r--arch/x86/coco/core.c93
-rw-r--r--arch/x86/configs/hardening.config3
-rw-r--r--arch/x86/crypto/Makefile3
-rw-r--r--arch/x86/crypto/aes-xts-avx-x86_64.S845
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S467
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c400
-rw-r--r--arch/x86/crypto/nh-avx2-x86_64.S1
-rw-r--r--arch/x86/crypto/sha256-avx2-asm.S1
-rw-r--r--arch/x86/crypto/sha256_ni_asm.S253
-rw-r--r--arch/x86/crypto/sha512-avx2-asm.S1
-rw-r--r--arch/x86/entry/Makefile2
-rw-r--r--arch/x86/entry/common.c75
-rw-r--r--arch/x86/entry/entry_64.S61
-rw-r--r--arch/x86/entry/entry_64_compat.S17
-rw-r--r--arch/x86/entry/entry_fred.c12
-rw-r--r--arch/x86/entry/syscall_32.c21
-rw-r--r--arch/x86/entry/syscall_64.c19
-rw-r--r--arch/x86/entry/syscall_x32.c10
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl2
-rw-r--r--arch/x86/entry/thunk.S (renamed from arch/x86/entry/thunk_64.S)0
-rw-r--r--arch/x86/entry/thunk_32.S18
-rw-r--r--arch/x86/entry/vdso/Makefile27
-rw-r--r--arch/x86/entry/vsyscall/vsyscall_64.c28
-rw-r--r--arch/x86/events/amd/core.c76
-rw-r--r--arch/x86/events/amd/lbr.c19
-rw-r--r--arch/x86/events/core.c1
-rw-r--r--arch/x86/events/intel/cstate.c150
-rw-r--r--arch/x86/events/intel/ds.c8
-rw-r--r--arch/x86/events/intel/lbr.c4
-rw-r--r--arch/x86/events/intel/pt.c12
-rw-r--r--arch/x86/events/intel/uncore.c100
-rw-r--r--arch/x86/events/intel/uncore_nhmex.c3
-rw-r--r--arch/x86/events/intel/uncore_snbep.c5
-rw-r--r--arch/x86/events/msr.c132
-rw-r--r--arch/x86/events/perf_event.h13
-rw-r--r--arch/x86/events/rapl.c21
-rw-r--r--arch/x86/hyperv/hv_apic.c16
-rw-r--r--arch/x86/hyperv/hv_proc.c22
-rw-r--r--arch/x86/hyperv/hv_vtl.c1
-rw-r--r--arch/x86/include/asm/acpi.h2
-rw-r--r--arch/x86/include/asm/alternative.h32
-rw-r--r--arch/x86/include/asm/apic.h11
-rw-r--r--arch/x86/include/asm/asm-prototypes.h1
-rw-r--r--arch/x86/include/asm/asm.h3
-rw-r--r--arch/x86/include/asm/atomic.h12
-rw-r--r--arch/x86/include/asm/atomic64_32.h81
-rw-r--r--arch/x86/include/asm/atomic64_64.h12
-rw-r--r--arch/x86/include/asm/barrier.h3
-rw-r--r--arch/x86/include/asm/boot.h5
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h205
-rw-r--r--arch/x86/include/asm/cmpxchg_64.h8
-rw-r--r--arch/x86/include/asm/coco.h3
-rw-r--r--arch/x86/include/asm/cpu_device_id.h101
-rw-r--r--arch/x86/include/asm/cpufeature.h24
-rw-r--r--arch/x86/include/asm/cpufeatures.h15
-rw-r--r--arch/x86/include/asm/crash_reserve.h2
-rw-r--r--arch/x86/include/asm/disabled-features.h3
-rw-r--r--arch/x86/include/asm/e820/api.h1
-rw-r--r--arch/x86/include/asm/extable_fixup_types.h2
-rw-r--r--arch/x86/include/asm/fb.h19
-rw-r--r--arch/x86/include/asm/fpu/api.h3
-rw-r--r--arch/x86/include/asm/hardirq.h6
-rw-r--r--arch/x86/include/asm/ia32.h11
-rw-r--r--arch/x86/include/asm/ia32_unistd.h12
-rw-r--r--arch/x86/include/asm/idtentry.h6
-rw-r--r--arch/x86/include/asm/inat.h17
-rw-r--r--arch/x86/include/asm/insn.h32
-rw-r--r--arch/x86/include/asm/intel-family.h84
-rw-r--r--arch/x86/include/asm/io.h18
-rw-r--r--arch/x86/include/asm/irq_remapping.h7
-rw-r--r--arch/x86/include/asm/irq_stack.h2
-rw-r--r--arch/x86/include/asm/irq_vectors.h8
-rw-r--r--arch/x86/include/asm/kexec.h13
-rw-r--r--arch/x86/include/asm/kvm-x86-ops.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h64
-rw-r--r--arch/x86/include/asm/mce.h2
-rw-r--r--arch/x86/include/asm/mpspec.h6
-rw-r--r--arch/x86/include/asm/msr-index.h18
-rw-r--r--arch/x86/include/asm/nospec-branch.h38
-rw-r--r--arch/x86/include/asm/page_types.h8
-rw-r--r--arch/x86/include/asm/percpu.h157
-rw-r--r--arch/x86/include/asm/perf_event.h1
-rw-r--r--arch/x86/include/asm/pgtable.h20
-rw-r--r--arch/x86/include/asm/pgtable_64.h1
-rw-r--r--arch/x86/include/asm/pgtable_types.h5
-rw-r--r--arch/x86/include/asm/posted_intr.h118
-rw-r--r--arch/x86/include/asm/processor.h33
-rw-r--r--arch/x86/include/asm/prom.h9
-rw-r--r--arch/x86/include/asm/qspinlock.h13
-rw-r--r--arch/x86/include/asm/qspinlock_paravirt.h7
-rw-r--r--arch/x86/include/asm/required-features.h3
-rw-r--r--arch/x86/include/asm/seccomp.h2
-rw-r--r--arch/x86/include/asm/sev-common.h8
-rw-r--r--arch/x86/include/asm/sev.h12
-rw-r--r--arch/x86/include/asm/sparsemem.h2
-rw-r--r--arch/x86/include/asm/special_insns.h8
-rw-r--r--arch/x86/include/asm/string_64.h45
-rw-r--r--arch/x86/include/asm/syscall.h11
-rw-r--r--arch/x86/include/asm/text-patching.h2
-rw-r--r--arch/x86/include/asm/uaccess.h4
-rw-r--r--arch/x86/include/asm/vdso/gettimeofday.h44
-rw-r--r--arch/x86/include/asm/video.h21
-rw-r--r--arch/x86/include/asm/vm86.h2
-rw-r--r--arch/x86/include/asm/vmx.h13
-rw-r--r--arch/x86/include/asm/x86_init.h3
-rw-r--r--arch/x86/include/uapi/asm/kvm.h45
-rw-r--r--arch/x86/include/uapi/asm/kvm_para.h1
-rw-r--r--arch/x86/kernel/Makefile4
-rw-r--r--arch/x86/kernel/alternative.c133
-rw-r--r--arch/x86/kernel/amd_gart_64.c2
-rw-r--r--arch/x86/kernel/amd_nb.c1
-rw-r--r--arch/x86/kernel/apic/apic.c68
-rw-r--r--arch/x86/kernel/apic/msi.c5
-rw-r--r--arch/x86/kernel/apic/vector.c5
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c7
-rw-r--r--arch/x86/kernel/callthunks.c11
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/amd.c68
-rw-r--r--arch/x86/kernel/cpu/aperfmperf.c17
-rw-r--r--arch/x86/kernel/cpu/bugs.c200
-rw-r--r--arch/x86/kernel/cpu/common.c244
-rw-r--r--arch/x86/kernel/cpu/cpuid-deps.c9
-rw-r--r--arch/x86/kernel/cpu/intel.c1
-rw-r--r--arch/x86/kernel/cpu/intel_epb.c12
-rw-r--r--arch/x86/kernel/cpu/match.c5
-rw-r--r--arch/x86/kernel/cpu/mce/core.c28
-rw-r--r--arch/x86/kernel/cpu/mce/genpool.c40
-rw-r--r--arch/x86/kernel/cpu/mce/intel.c21
-rw-r--r--arch/x86/kernel/cpu/mce/severity.c27
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c4
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c5
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c5
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c2
-rw-r--r--arch/x86/kernel/cpu/resctrl/core.c65
-rw-r--r--arch/x86/kernel/cpu/resctrl/ctrlmondata.c40
-rw-r--r--arch/x86/kernel/cpu/resctrl/internal.h8
-rw-r--r--arch/x86/kernel/cpu/resctrl/monitor.c11
-rw-r--r--arch/x86/kernel/cpu/resctrl/pseudo_lock.c24
-rw-r--r--arch/x86/kernel/cpu/resctrl/rdtgroup.c12
-rw-r--r--arch/x86/kernel/cpu/resctrl/trace.h (renamed from arch/x86/kernel/cpu/resctrl/pseudo_lock_event.h)24
-rw-r--r--arch/x86/kernel/cpu/scattered.c2
-rw-r--r--arch/x86/kernel/cpu/sgx/driver.c2
-rw-r--r--arch/x86/kernel/cpu/sgx/main.c1
-rw-r--r--arch/x86/kernel/cpu/topology.c7
-rw-r--r--arch/x86/kernel/cpu/topology_amd.c86
-rw-r--r--arch/x86/kernel/cpu/topology_ext.c15
-rw-r--r--arch/x86/kernel/crash.c32
-rw-r--r--arch/x86/kernel/devicetree.c24
-rw-r--r--arch/x86/kernel/dumpstack.c4
-rw-r--r--arch/x86/kernel/e820.c7
-rw-r--r--arch/x86/kernel/eisa.c3
-rw-r--r--arch/x86/kernel/fpu/core.c4
-rw-r--r--arch/x86/kernel/fpu/xstate.c5
-rw-r--r--arch/x86/kernel/fpu/xstate.h2
-rw-r--r--arch/x86/kernel/ftrace.c16
-rw-r--r--arch/x86/kernel/head_32.S13
-rw-r--r--arch/x86/kernel/head_64.S4
-rw-r--r--arch/x86/kernel/idt.c3
-rw-r--r--arch/x86/kernel/irq.c172
-rw-r--r--arch/x86/kernel/irq_64.c1
-rw-r--r--arch/x86/kernel/kprobes/core.c4
-rw-r--r--arch/x86/kernel/kprobes/ftrace.c3
-rw-r--r--arch/x86/kernel/kvm.c13
-rw-r--r--arch/x86/kernel/module.c51
-rw-r--r--arch/x86/kernel/nmi.c24
-rw-r--r--arch/x86/kernel/probe_roms.c10
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/rtc.c1
-rw-r--r--arch/x86/kernel/setup.c47
-rw-r--r--arch/x86/kernel/sev-shared.c6
-rw-r--r--arch/x86/kernel/sev.c51
-rw-r--r--arch/x86/kernel/shstk.c4
-rw-r--r--arch/x86/kernel/signal_32.c2
-rw-r--r--arch/x86/kernel/signal_64.c6
-rw-r--r--arch/x86/kernel/smpboot.c28
-rw-r--r--arch/x86/kernel/sys_x86_64.c42
-rw-r--r--arch/x86/kernel/topology.c43
-rw-r--r--arch/x86/kernel/tsc.c8
-rw-r--r--arch/x86/kernel/tsc_msr.c14
-rw-r--r--arch/x86/kernel/tsc_sync.c6
-rw-r--r--arch/x86/kernel/vmlinux.lds.S10
-rw-r--r--arch/x86/kernel/x86_init.c2
-rw-r--r--arch/x86/kvm/Kconfig14
-rw-r--r--arch/x86/kvm/Makefile14
-rw-r--r--arch/x86/kvm/cpuid.c86
-rw-r--r--arch/x86/kvm/cpuid.h10
-rw-r--r--arch/x86/kvm/kvm_emulate.h1
-rw-r--r--arch/x86/kvm/lapic.c3
-rw-r--r--arch/x86/kvm/mmu.h7
-rw-r--r--arch/x86/kvm/mmu/mmu.c306
-rw-r--r--arch/x86/kvm/mmu/mmu_internal.h28
-rw-r--r--arch/x86/kvm/mmu/mmutrace.h2
-rw-r--r--arch/x86/kvm/mmu/page_track.c2
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h28
-rw-r--r--arch/x86/kvm/mmu/spte.c40
-rw-r--r--arch/x86/kvm/mmu/spte.h26
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c190
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.h1
-rw-r--r--arch/x86/kvm/pmu.c16
-rw-r--r--arch/x86/kvm/reverse_cpuid.h5
-rw-r--r--arch/x86/kvm/svm/sev.c405
-rw-r--r--arch/x86/kvm/svm/svm.c51
-rw-r--r--arch/x86/kvm/svm/svm.h59
-rw-r--r--arch/x86/kvm/svm/vmenter.S97
-rw-r--r--arch/x86/kvm/trace.h14
-rw-r--r--arch/x86/kvm/vmx/main.c167
-rw-r--r--arch/x86/kvm/vmx/nested.c30
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c2
-rw-r--r--arch/x86/kvm/vmx/posted_intr.c4
-rw-r--r--arch/x86/kvm/vmx/posted_intr.h93
-rw-r--r--arch/x86/kvm/vmx/vmcs.h5
-rw-r--r--arch/x86/kvm/vmx/vmenter.S2
-rw-r--r--arch/x86/kvm/vmx/vmx.c484
-rw-r--r--arch/x86/kvm/vmx/vmx.h14
-rw-r--r--arch/x86/kvm/vmx/x86_ops.h124
-rw-r--r--arch/x86/kvm/x86.c266
-rw-r--r--arch/x86/kvm/x86.h2
-rw-r--r--arch/x86/lib/Makefile1
-rw-r--r--arch/x86/lib/copy_mc.c21
-rw-r--r--arch/x86/lib/insn.c29
-rw-r--r--arch/x86/lib/iomap_copy_64.S15
-rw-r--r--arch/x86/lib/retpoline.S22
-rw-r--r--arch/x86/lib/x86-opcode-map.txt315
-rw-r--r--arch/x86/math-emu/fpu_etc.c9
-rw-r--r--arch/x86/math-emu/fpu_trig.c6
-rw-r--r--arch/x86/math-emu/reg_constant.c7
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/extable.c9
-rw-r--r--arch/x86/mm/fault.c64
-rw-r--r--arch/x86/mm/hugetlbpage.c35
-rw-r--r--arch/x86/mm/ident_map.c23
-rw-r--r--arch/x86/mm/init.c92
-rw-r--r--arch/x86/mm/mem_encrypt.c7
-rw-r--r--arch/x86/mm/mem_encrypt_amd.c18
-rw-r--r--arch/x86/mm/mmap.c4
-rw-r--r--arch/x86/mm/numa.c4
-rw-r--r--arch/x86/mm/numa_32.c2
-rw-r--r--arch/x86/mm/pat/memtype.c73
-rw-r--r--arch/x86/mm/pat/set_memory.c68
-rw-r--r--arch/x86/mm/pgtable.c6
-rw-r--r--arch/x86/net/bpf_jit_comp.c184
-rw-r--r--arch/x86/net/bpf_jit_comp32.c3
-rw-r--r--arch/x86/pci/ce4100.c6
-rw-r--r--arch/x86/pci/mmconfig-shared.c40
-rw-r--r--arch/x86/pci/olpc.c3
-rw-r--r--arch/x86/platform/ce4100/ce4100.c1
-rw-r--r--arch/x86/platform/iris/iris.c5
-rw-r--r--arch/x86/platform/olpc/olpc-xo1-pm.c7
-rw-r--r--arch/x86/platform/olpc/olpc-xo1-sci.c5
-rw-r--r--arch/x86/purgatory/Makefile12
-rw-r--r--arch/x86/realmode/rm/Makefile11
-rw-r--r--arch/x86/tools/gen-insn-attr-x86.awk15
-rw-r--r--arch/x86/tools/relocs.c371
-rw-r--r--arch/x86/um/vdso/Makefile9
-rw-r--r--arch/x86/video/Makefile3
-rw-r--r--arch/x86/video/video-common.c (renamed from arch/x86/video/fbdev.c)21
-rw-r--r--arch/x86/virt/Makefile2
-rw-r--r--arch/x86/virt/svm/sev.c62
-rw-r--r--arch/x86/virt/vmx/tdx/tdx.c1
-rw-r--r--arch/x86/xen/enlighten.c3
-rw-r--r--arch/x86/xen/enlighten_pv.c11
-rw-r--r--arch/x86/xen/smp_pv.c4
-rw-r--r--arch/x86/xen/xen-head.S2
-rw-r--r--arch/xtensa/boot/dts/Makefile3
-rw-r--r--arch/xtensa/include/asm/cacheflush.h24
-rw-r--r--arch/xtensa/include/asm/processor.h8
-rw-r--r--arch/xtensa/include/asm/ptrace.h2
-rw-r--r--arch/xtensa/kernel/process.c5
-rw-r--r--arch/xtensa/kernel/stacktrace.c3
-rw-r--r--arch/xtensa/mm/cache.c6
-rw-r--r--arch/xtensa/mm/tlb.c11
-rw-r--r--arch/xtensa/platforms/iss/console.c6
-rw-r--r--block/Kconfig16
-rw-r--r--block/Makefile1
-rw-r--r--block/bdev.c214
-rw-r--r--block/bio.c51
-rw-r--r--block/blk-cgroup-rwstat.c18
-rw-r--r--block/blk-cgroup.c18
-rw-r--r--block/blk-cgroup.h2
-rw-r--r--block/blk-core.c46
-rw-r--r--block/blk-flush.c2
-rw-r--r--block/blk-iocost.c14
-rw-r--r--block/blk-lib.c68
-rw-r--r--block/blk-merge.c27
-rw-r--r--block/blk-mq-debugfs-zoned.c22
-rw-r--r--block/blk-mq-debugfs.c3
-rw-r--r--block/blk-mq-debugfs.h6
-rw-r--r--block/blk-mq.c191
-rw-r--r--block/blk-mq.h31
-rw-r--r--block/blk-settings.c309
-rw-r--r--block/blk-stat.c3
-rw-r--r--block/blk-sysfs.c10
-rw-r--r--block/blk-throttle.c1019
-rw-r--r--block/blk-throttle.h46
-rw-r--r--block/blk-zoned.c1512
-rw-r--r--block/blk.h100
-rw-r--r--block/bsg-lib.c6
-rw-r--r--block/early-lookup.c2
-rw-r--r--block/elevator.c46
-rw-r--r--block/elevator.h1
-rw-r--r--block/fops.c36
-rw-r--r--block/genhd.c55
-rw-r--r--block/ioctl.c88
-rw-r--r--block/mq-deadline.c204
-rw-r--r--block/partitions/cmdline.c49
-rw-r--r--block/partitions/core.c25
-rw-r--r--block/partitions/ldm.c6
-rw-r--r--certs/Makefile4
-rw-r--r--crypto/Kconfig20
-rw-r--r--crypto/Makefile5
-rw-r--r--crypto/acompress.c33
-rw-r--r--crypto/aead.c87
-rw-r--r--crypto/af_alg.c15
-rw-r--r--crypto/ahash.c63
-rw-r--r--crypto/akcipher.c31
-rw-r--r--crypto/algboss.c3
-rw-r--r--crypto/algif_hash.c10
-rw-r--r--crypto/api.c8
-rw-r--r--crypto/asymmetric_keys/Kconfig17
-rw-r--r--crypto/asymmetric_keys/Makefile2
-rw-r--r--crypto/asymmetric_keys/mscode_parser.c3
-rw-r--r--crypto/asymmetric_keys/pkcs7_parser.c4
-rw-r--r--crypto/asymmetric_keys/public_key.c17
-rw-r--r--crypto/asymmetric_keys/selftest.c219
-rw-r--r--crypto/asymmetric_keys/selftest.h22
-rw-r--r--crypto/asymmetric_keys/selftest_ecdsa.c88
-rw-r--r--crypto/asymmetric_keys/selftest_rsa.c171
-rw-r--r--crypto/asymmetric_keys/signature.c2
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c53
-rw-r--r--crypto/asymmetric_keys/x509_parser.h3
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c31
-rw-r--r--crypto/bpf_crypto_skcipher.c82
-rw-r--r--crypto/cipher.c3
-rw-r--r--crypto/compress.h3
-rw-r--r--crypto/crypto_user.c (renamed from crypto/crypto_user_base.c)10
-rw-r--r--crypto/crypto_user_stat.c176
-rw-r--r--crypto/ecc.c122
-rw-r--r--crypto/ecc_curve_defs.h49
-rw-r--r--crypto/ecdh.c11
-rw-r--r--crypto/ecdsa.c66
-rw-r--r--crypto/ecrdsa.c1
-rw-r--r--crypto/ecrdsa_defs.h5
-rw-r--r--crypto/fips.c1
-rw-r--r--crypto/hash.h30
-rw-r--r--crypto/jitterentropy-kcapi.c3
-rw-r--r--crypto/jitterentropy.c4
-rw-r--r--crypto/kpp.c30
-rw-r--r--crypto/lskcipher.c73
-rw-r--r--crypto/rng.c44
-rw-r--r--crypto/scompress.c3
-rw-r--r--crypto/shash.c75
-rw-r--r--crypto/sig.c13
-rw-r--r--crypto/skcipher.c86
-rw-r--r--crypto/skcipher.h10
-rw-r--r--crypto/testmgr.c7
-rw-r--r--crypto/testmgr.h226
-rw-r--r--drivers/Makefile5
-rw-r--r--drivers/accel/ivpu/ivpu_debugfs.c2
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c40
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h3
-rw-r--r--drivers/accel/ivpu/ivpu_hw.h6
-rw-r--r--drivers/accel/ivpu/ivpu_hw_37xx.c11
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx.c6
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c8
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.c8
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.c1
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c14
-rw-r--r--drivers/accel/qaic/Makefile5
-rw-r--r--drivers/accel/qaic/qaic.h9
-rw-r--r--drivers/accel/qaic/qaic_data.c9
-rw-r--r--drivers/accel/qaic/qaic_debugfs.c338
-rw-r--r--drivers/accel/qaic/qaic_debugfs.h20
-rw-r--r--drivers/accel/qaic/qaic_drv.c26
-rw-r--r--drivers/accel/qaic/sahara.c449
-rw-r--r--drivers/accel/qaic/sahara.h10
-rw-r--r--drivers/accessibility/speakup/main.c2
-rw-r--r--drivers/acpi/Kconfig3
-rw-r--r--drivers/acpi/Makefile8
-rw-r--r--drivers/acpi/acpi_ipmi.c23
-rw-r--r--drivers/acpi/acpica/Makefile1
-rw-r--r--drivers/acpi/acpica/aclocal.h2
-rw-r--r--drivers/acpi/acpica/acobject.h107
-rw-r--r--drivers/acpi/acpica/dbnames.c8
-rw-r--r--drivers/acpi/acpica/evgpeinit.c1
-rw-r--r--drivers/acpi/acpica/tbfadt.c30
-rw-r--r--drivers/acpi/acpica/tbutils.c7
-rw-r--r--drivers/acpi/acpica/utdebug.c5
-rw-r--r--drivers/acpi/apei/einj-core.c10
-rw-r--r--drivers/acpi/apei/ghes.c84
-rw-r--r--drivers/acpi/arm64/dma.c17
-rw-r--r--drivers/acpi/arm64/iort.c20
-rw-r--r--drivers/acpi/bus.c25
-rw-r--r--drivers/acpi/cppc_acpi.c61
-rw-r--r--drivers/acpi/dock.c48
-rw-r--r--drivers/acpi/dptf/dptf_pch_fivr.c1
-rw-r--r--drivers/acpi/dptf/dptf_power.c2
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c6
-rw-r--r--drivers/acpi/ec.c25
-rw-r--r--drivers/acpi/fan.h1
-rw-r--r--drivers/acpi/internal.h4
-rw-r--r--drivers/acpi/nhlt.c289
-rw-r--r--drivers/acpi/numa/srat.c82
-rw-r--r--drivers/acpi/platform_profile.c39
-rw-r--r--drivers/acpi/property.c11
-rw-r--r--drivers/acpi/resource.c25
-rw-r--r--drivers/acpi/scan.c38
-rw-r--r--drivers/acpi/thermal.c22
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/acpi/x86/Makefile8
-rw-r--r--drivers/acpi/x86/blacklist.c (renamed from drivers/acpi/blacklist.c)2
-rw-r--r--drivers/acpi/x86/cmos_rtc.c (renamed from drivers/acpi/acpi_cmos_rtc.c)2
-rw-r--r--drivers/acpi/x86/lpss.c (renamed from drivers/acpi/acpi_lpss.c)5
-rw-r--r--drivers/acpi/x86/s2idle.c8
-rw-r--r--drivers/acpi/x86/utils.c29
-rw-r--r--drivers/amba/bus.c11
-rw-r--r--drivers/android/binder.c4
-rw-r--r--drivers/ata/Kconfig28
-rw-r--r--drivers/ata/ahci.c85
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/ahci_st.c1
-rw-r--r--drivers/ata/libata-core.c110
-rw-r--r--drivers/ata/libata-eh.c5
-rw-r--r--drivers/ata/libata-sata.c171
-rw-r--r--drivers/ata/libata-scsi.c35
-rw-r--r--drivers/ata/libata-sff.c4
-rw-r--r--drivers/ata/libata.h11
-rw-r--r--drivers/ata/pata_cs5520.c6
-rw-r--r--drivers/ata/pata_legacy.c8
-rw-r--r--drivers/ata/pata_macio.c14
-rw-r--r--drivers/ata/sata_gemini.c5
-rw-r--r--drivers/ata/sata_mv.c65
-rw-r--r--drivers/ata/sata_nv.c24
-rw-r--r--drivers/ata/sata_sil24.c2
-rw-r--r--drivers/ata/sata_sx4.c6
-rw-r--r--drivers/atm/fore200e.c3
-rw-r--r--drivers/atm/fore200e.h1
-rw-r--r--drivers/auxdisplay/Kconfig346
-rw-r--r--drivers/auxdisplay/Makefile10
-rw-r--r--drivers/auxdisplay/charlcd.c3
-rw-r--r--drivers/auxdisplay/seg-led-gpio.c6
-rw-r--r--drivers/base/arch_topology.c26
-rw-r--r--drivers/base/core.c26
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/base/devcoredump.c23
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/base/power/main.c6
-rw-r--r--drivers/base/power/wakeup.c11
-rw-r--r--drivers/base/regmap/internal.h14
-rw-r--r--drivers/base/regmap/regcache-maple.c8
-rw-r--r--drivers/base/regmap/regmap-i3c.c2
-rw-r--r--drivers/base/regmap/regmap-kunit.c990
-rw-r--r--drivers/base/regmap/regmap-mdio.c2
-rw-r--r--drivers/base/regmap/regmap-ram.c5
-rw-r--r--drivers/base/regmap/regmap-raw-ram.c5
-rw-r--r--drivers/base/regmap/regmap-sdw-mbq.c2
-rw-r--r--drivers/base/regmap/regmap-sdw.c2
-rw-r--r--drivers/base/regmap/regmap-spi.c1
-rw-r--r--drivers/base/regmap/regmap.c37
-rw-r--r--drivers/bcma/host_soc.c6
-rw-r--r--drivers/block/brd.c40
-rw-r--r--drivers/block/loop.c4
-rw-r--r--drivers/block/null_blk/main.c47
-rw-r--r--drivers/block/null_blk/null_blk.h2
-rw-r--r--drivers/block/null_blk/zoned.c358
-rw-r--r--drivers/block/pktcdvd.c7
-rw-r--r--drivers/block/ublk_drv.c8
-rw-r--r--drivers/block/virtio_blk.c2
-rw-r--r--drivers/block/zram/zram_drv.c60
-rw-r--r--drivers/block/zram/zram_drv.h2
-rw-r--r--drivers/bluetooth/Kconfig11
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/ath3k.c25
-rw-r--r--drivers/bluetooth/btintel.c88
-rw-r--r--drivers/bluetooth/btintel.h51
-rw-r--r--drivers/bluetooth/btintel_pcie.c1357
-rw-r--r--drivers/bluetooth/btintel_pcie.h430
-rw-r--r--drivers/bluetooth/btmrvl_main.c9
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c1
-rw-r--r--drivers/bluetooth/btmtk.c7
-rw-r--r--drivers/bluetooth/btmtksdio.c1
-rw-r--r--drivers/bluetooth/btqca.c187
-rw-r--r--drivers/bluetooth/btqca.h65
-rw-r--r--drivers/bluetooth/btqcomsmd.c6
-rw-r--r--drivers/bluetooth/btrsi.c1
-rw-r--r--drivers/bluetooth/btrtl.c7
-rw-r--r--drivers/bluetooth/btsdio.c8
-rw-r--r--drivers/bluetooth/btusb.c66
-rw-r--r--drivers/bluetooth/hci_bcm.c8
-rw-r--r--drivers/bluetooth/hci_bcm4377.c1
-rw-r--r--drivers/bluetooth/hci_intel.c25
-rw-r--r--drivers/bluetooth/hci_ldisc.c6
-rw-r--r--drivers/bluetooth/hci_qca.c46
-rw-r--r--drivers/bluetooth/hci_serdev.c5
-rw-r--r--drivers/bluetooth/hci_uart.h1
-rw-r--r--drivers/bluetooth/hci_vhci.c10
-rw-r--r--drivers/bluetooth/virtio_bt.c2
-rw-r--r--drivers/bus/Kconfig10
-rw-r--r--drivers/bus/Makefile1
-rw-r--r--drivers/bus/brcmstb_gisb.c1
-rw-r--r--drivers/bus/mhi/host/internal.h4
-rw-r--r--drivers/bus/mhi/host/pm.c42
-rw-r--r--drivers/bus/stm32_etzpc.c141
-rw-r--r--drivers/bus/stm32_firewall.c294
-rw-r--r--drivers/bus/stm32_firewall.h83
-rw-r--r--drivers/bus/stm32_rifsc.c252
-rw-r--r--drivers/bus/ti-sysc.c165
-rw-r--r--drivers/cache/sifive_ccache.c74
-rw-r--r--drivers/char/agp/alpha-agp.c2
-rw-r--r--drivers/char/hw_random/core.c2
-rw-r--r--drivers/char/hw_random/mxc-rnga.c9
-rw-r--r--drivers/char/hw_random/nomadik-rng.c1
-rw-r--r--drivers/char/hw_random/stm32-rng.c18
-rw-r--r--drivers/char/mem.c2
-rw-r--r--drivers/char/random.c10
-rw-r--r--drivers/char/tpm/Kconfig17
-rw-r--r--drivers/char/tpm/Makefile2
-rw-r--r--drivers/char/tpm/eventlog/acpi.c1
-rw-r--r--drivers/char/tpm/tpm-buf.c252
-rw-r--r--drivers/char/tpm/tpm-chip.c6
-rw-r--r--drivers/char/tpm/tpm-interface.c26
-rw-r--r--drivers/char/tpm/tpm-sysfs.c18
-rw-r--r--drivers/char/tpm/tpm.h14
-rw-r--r--drivers/char/tpm/tpm2-cmd.c53
-rw-r--r--drivers/char/tpm/tpm2-sessions.c1286
-rw-r--r--drivers/char/tpm/tpm2-space.c11
-rw-r--r--drivers/char/tpm/tpm_infineon.c14
-rw-r--r--drivers/char/tpm/tpm_tis_core.c19
-rw-r--r--drivers/clk/Kconfig5
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/bcm/clk-bcm2711-dvp.c3
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c2
-rw-r--r--drivers/clk/clk-en7523.c191
-rw-r--r--drivers/clk/clk-gemini.c2
-rw-r--r--drivers/clk/clk-highbank.c1
-rw-r--r--drivers/clk/clk-loongson2.c548
-rw-r--r--drivers/clk/clk-renesas-pcie.c10
-rw-r--r--drivers/clk/clk-scmi.c249
-rw-r--r--drivers/clk/clk.c173
-rw-r--r--drivers/clk/clkdev.c35
-rw-r--r--drivers/clk/imx/Kconfig7
-rw-r--r--drivers/clk/imx/Makefile1
-rw-r--r--drivers/clk/imx/clk-imx8mp-audiomix.c155
-rw-r--r--drivers/clk/imx/clk-imx95-blk-ctl.c438
-rw-r--r--drivers/clk/mediatek/clk-mt7988-infracfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8365-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mtk.c15
-rw-r--r--drivers/clk/mediatek/clk-pllfh.c2
-rw-r--r--drivers/clk/meson/Kconfig5
-rw-r--r--drivers/clk/meson/Makefile1
-rw-r--r--drivers/clk/meson/a1-peripherals.c1
-rw-r--r--drivers/clk/meson/a1-pll.c1
-rw-r--r--drivers/clk/meson/axg-aoclk.c2
-rw-r--r--drivers/clk/meson/axg-audio.c2
-rw-r--r--drivers/clk/meson/axg.c2
-rw-r--r--drivers/clk/meson/clk-cpu-dyndiv.c2
-rw-r--r--drivers/clk/meson/clk-dualdiv.c2
-rw-r--r--drivers/clk/meson/clk-mpll.c2
-rw-r--r--drivers/clk/meson/clk-phase.c2
-rw-r--r--drivers/clk/meson/clk-pll.c6
-rw-r--r--drivers/clk/meson/clk-regmap.c2
-rw-r--r--drivers/clk/meson/g12a-aoclk.c2
-rw-r--r--drivers/clk/meson/g12a.c78
-rw-r--r--drivers/clk/meson/gxbb-aoclk.c2
-rw-r--r--drivers/clk/meson/gxbb.c2
-rw-r--r--drivers/clk/meson/meson-aoclk.c2
-rw-r--r--drivers/clk/meson/meson-eeclk.c2
-rw-r--r--drivers/clk/meson/s4-peripherals.c2
-rw-r--r--drivers/clk/meson/s4-pll.c2
-rw-r--r--drivers/clk/meson/sclk-div.c2
-rw-r--r--drivers/clk/meson/vclk.c141
-rw-r--r--drivers/clk/meson/vclk.h51
-rw-r--r--drivers/clk/meson/vid-pll-div.c2
-rw-r--r--drivers/clk/microchip/clk-mpfs.c92
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-cgu.c1
-rw-r--r--drivers/clk/qcom/Kconfig2
-rw-r--r--drivers/clk/qcom/apss-ipq-pll.c75
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c24
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h5
-rw-r--r--drivers/clk/qcom/clk-cbf-8996.c13
-rw-r--r--drivers/clk/qcom/clk-rcg.h24
-rw-r--r--drivers/clk/qcom/clk-rcg2.c166
-rw-r--r--drivers/clk/qcom/clk-rpm.c1
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c1
-rw-r--r--drivers/clk/qcom/common.c18
-rw-r--r--drivers/clk/qcom/common.h2
-rw-r--r--drivers/clk/qcom/dispcc-sm6350.c11
-rw-r--r--drivers/clk/qcom/dispcc-sm8450.c20
-rw-r--r--drivers/clk/qcom/dispcc-sm8550.c20
-rw-r--r--drivers/clk/qcom/dispcc-sm8650.c20
-rw-r--r--drivers/clk/qcom/gcc-ipq8074.c120
-rw-r--r--drivers/clk/qcom/gcc-msm8917.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8953.c1
-rw-r--r--drivers/clk/qcom/gcc-sm8150.c61
-rw-r--r--drivers/clk/qcom/gdsc.c11
-rw-r--r--drivers/clk/qcom/hfpll.c6
-rw-r--r--drivers/clk/qcom/mmcc-msm8998.c8
-rw-r--r--drivers/clk/renesas/clk-r8a73a4.c2
-rw-r--r--drivers/clk/renesas/clk-r8a7740.c27
-rw-r--r--drivers/clk/renesas/clk-sh73a0.c2
-rw-r--r--drivers/clk/renesas/r8a779a0-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a779h0-cpg-mssr.c21
-rw-r--r--drivers/clk/renesas/r9a07g043-cpg.c13
-rw-r--r--drivers/clk/renesas/r9a07g044-cpg.c2
-rw-r--r--drivers/clk/renesas/r9a08g045-cpg.c41
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c199
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.h67
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3568.c5
-rw-r--r--drivers/clk/rockchip/rst-rk3588.c1
-rw-r--r--drivers/clk/samsung/clk-exynos-arm64.c56
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c13
-rw-r--r--drivers/clk/samsung/clk-exynos850.c440
-rw-r--r--drivers/clk/samsung/clk-exynosautov9.c8
-rw-r--r--drivers/clk/samsung/clk-gs101.c1192
-rw-r--r--drivers/clk/samsung/clk.c11
-rw-r--r--drivers/clk/samsung/clk.h15
-rw-r--r--drivers/clk/sophgo/Kconfig11
-rw-r--r--drivers/clk/sophgo/Makefile7
-rw-r--r--drivers/clk/sophgo/clk-cv1800.c1537
-rw-r--r--drivers/clk/sophgo/clk-cv1800.h123
-rw-r--r--drivers/clk/sophgo/clk-cv18xx-common.c66
-rw-r--r--drivers/clk/sophgo/clk-cv18xx-common.h81
-rw-r--r--drivers/clk/sophgo/clk-cv18xx-ip.c887
-rw-r--r--drivers/clk/sophgo/clk-cv18xx-ip.h261
-rw-r--r--drivers/clk/sophgo/clk-cv18xx-pll.c419
-rw-r--r--drivers/clk/sophgo/clk-cv18xx-pll.h118
-rw-r--r--drivers/clk/stm32/Kconfig7
-rw-r--r--drivers/clk/stm32/Makefile1
-rw-r--r--drivers/clk/stm32/clk-stm32-core.c11
-rw-r--r--drivers/clk/stm32/clk-stm32mp13.c72
-rw-r--r--drivers/clk/stm32/clk-stm32mp25.c1875
-rw-r--r--drivers/clk/stm32/reset-stm32.c59
-rw-r--r--drivers/clk/stm32/reset-stm32.h7
-rw-r--r--drivers/clk/stm32/stm32mp25_rcc.h712
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun20i-d1.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun4i-a10.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a100.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c15
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c20
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h616.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-rtc.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a33.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a83t.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.c19
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.h3
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.c21
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.h2
-rw-r--r--drivers/clk/ti/dpll.c10
-rw-r--r--drivers/clocksource/arm_arch_timer.c2
-rw-r--r--drivers/clocksource/renesas-ostm.c3
-rw-r--r--drivers/clocksource/timer-clint.c2
-rw-r--r--drivers/clocksource/timer-ti-dm.c1
-rw-r--r--drivers/comedi/drivers/vmk80xx.c35
-rw-r--r--drivers/cpufreq/amd-pstate.c287
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c5
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c14
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c10
-rw-r--r--drivers/cpufreq/cpufreq-dt.c21
-rw-r--r--drivers/cpufreq/cpufreq.c47
-rw-r--r--drivers/cpufreq/freq_table.c12
-rw-r--r--drivers/cpufreq/intel_pstate.c173
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c10
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c8
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c4
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c209
-rw-r--r--drivers/cpufreq/tegra124-cpufreq.c19
-rw-r--r--drivers/cpufreq/ti-cpufreq.c4
-rw-r--r--drivers/cpuidle/coupled.c13
-rw-r--r--drivers/cpuidle/cpuidle-kirkwood.c5
-rw-r--r--drivers/cpuidle/cpuidle-psci-domain.c3
-rw-r--r--drivers/cpuidle/cpuidle-psci.c5
-rw-r--r--drivers/cpuidle/cpuidle-psci.h20
-rw-r--r--drivers/cpuidle/governors/ladder.c1
-rw-r--r--drivers/crypto/Kconfig26
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/atmel-i2c.c30
-rw-r--r--drivers/crypto/atmel-i2c.h8
-rw-r--r--drivers/crypto/atmel-sha204a.c68
-rw-r--r--drivers/crypto/bcm/spu2.c2
-rw-r--r--drivers/crypto/caam/ctrl.c19
-rw-r--r--drivers/crypto/ccp/sev-dev.c2
-rw-r--r--drivers/crypto/ccp/sp-platform.c14
-rw-r--r--drivers/crypto/hisilicon/debugfs.c65
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c23
-rw-r--r--drivers/crypto/hisilicon/qm.c8
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c4
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c32
-rw-r--r--drivers/crypto/hisilicon/sgl.c5
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c24
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto.h16
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c33
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_stats.c183
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_stats.h8
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c5
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c7
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_drv.c2
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c1
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c1
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c1
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile6
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h88
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_aer.c19
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_common_drv.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c101
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h86
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c97
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h76
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c231
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h188
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c380
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h127
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c8
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c1010
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c318
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h89
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c8
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h11
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl.c12
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sriov.c7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_telemetry.c21
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_telemetry.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_transport.c4
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_asym_algs.c66
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_bl.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_bl.h11
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_mig_dev.c130
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c1
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c1
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.c4
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c9
-rw-r--r--drivers/crypto/mxs-dcp.c104
-rw-r--r--drivers/crypto/nx/nx-842.c6
-rw-r--r--drivers/crypto/nx/nx-842.h10
-rw-r--r--drivers/crypto/sahara.c16
-rw-r--r--drivers/crypto/starfive/Kconfig4
-rw-r--r--drivers/crypto/starfive/jh7110-aes.c597
-rw-r--r--drivers/crypto/starfive/jh7110-cryp.c43
-rw-r--r--drivers/crypto/starfive/jh7110-cryp.h10
-rw-r--r--drivers/crypto/starfive/jh7110-hash.c275
-rw-r--r--drivers/crypto/starfive/jh7110-rsa.c14
-rw-r--r--drivers/crypto/stm32/stm32-hash.c570
-rw-r--r--drivers/crypto/tegra/Makefile9
-rw-r--r--drivers/crypto/tegra/tegra-se-aes.c1933
-rw-r--r--drivers/crypto/tegra/tegra-se-hash.c1060
-rw-r--r--drivers/crypto/tegra/tegra-se-key.c156
-rw-r--r--drivers/crypto/tegra/tegra-se-main.c437
-rw-r--r--drivers/crypto/tegra/tegra-se.h560
-rw-r--r--drivers/cxl/Kconfig14
-rw-r--r--drivers/cxl/acpi.c106
-rw-r--r--drivers/cxl/core/cdat.c152
-rw-r--r--drivers/cxl/core/core.h14
-rw-r--r--drivers/cxl/core/hdm.c13
-rw-r--r--drivers/cxl/core/mbox.c87
-rw-r--r--drivers/cxl/core/memdev.c44
-rw-r--r--drivers/cxl/core/pci.c35
-rw-r--r--drivers/cxl/core/port.c127
-rw-r--r--drivers/cxl/core/region.c177
-rw-r--r--drivers/cxl/core/regs.c7
-rw-r--r--drivers/cxl/core/trace.c91
-rw-r--r--drivers/cxl/core/trace.h50
-rw-r--r--drivers/cxl/cxl.h17
-rw-r--r--drivers/cxl/cxlmem.h5
-rw-r--r--drivers/cxl/cxlpci.h1
-rw-r--r--drivers/cxl/pci.c95
-rw-r--r--drivers/cxl/pmem.c2
-rw-r--r--drivers/dax/bus.c69
-rw-r--r--drivers/dax/device.c8
-rw-r--r--drivers/dax/kmem.c30
-rw-r--r--drivers/devfreq/event/exynos-nocp.c6
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c6
-rw-r--r--drivers/devfreq/exynos-bus.c9
-rw-r--r--drivers/devfreq/mtk-cci-devfreq.c6
-rw-r--r--drivers/devfreq/rk3399_dmc.c6
-rw-r--r--drivers/devfreq/sun8i-a33-mbus.c6
-rw-r--r--drivers/dma-buf/dma-buf.c56
-rw-r--r--drivers/dma-buf/st-dma-fence-chain.c6
-rw-r--r--drivers/dma/Makefile6
-rw-r--r--drivers/dma/amba-pl08x.c4
-rw-r--r--drivers/dma/dma-axi-dmac.c78
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c38
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac.h3
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c14
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h5
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpdmai.c113
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpdmai.h61
-rw-r--r--drivers/dma/fsl-edma-common.c25
-rw-r--r--drivers/dma/fsl-edma-common.h110
-rw-r--r--drivers/dma/fsl-edma-main.c50
-rw-r--r--drivers/dma/fsl-edma-trace.c4
-rw-r--r--drivers/dma/fsl-edma-trace.h132
-rw-r--r--drivers/dma/idma64.c8
-rw-r--r--drivers/dma/idxd/cdev.c100
-rw-r--r--drivers/dma/idxd/debugfs.c4
-rw-r--r--drivers/dma/idxd/device.c8
-rw-r--r--drivers/dma/idxd/idxd.h5
-rw-r--r--drivers/dma/idxd/init.c6
-rw-r--r--drivers/dma/idxd/irq.c4
-rw-r--r--drivers/dma/idxd/perfmon.c9
-rw-r--r--drivers/dma/idxd/registers.h3
-rw-r--r--drivers/dma/idxd/sysfs.c27
-rw-r--r--drivers/dma/imx-sdma.c97
-rw-r--r--drivers/dma/mcf-edma-main.c4
-rw-r--r--drivers/dma/owl-dma.c4
-rw-r--r--drivers/dma/pch_dma.c5
-rw-r--r--drivers/dma/pl330.c4
-rw-r--r--drivers/dma/qcom/hidma.c11
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c109
-rw-r--r--drivers/dma/tegra186-gpc-dma.c3
-rw-r--r--drivers/dma/virt-dma.h10
-rw-r--r--drivers/dma/xilinx/xdma-regs.h3
-rw-r--r--drivers/dma/xilinx/xdma.c43
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c23
-rw-r--r--drivers/dpll/Kconfig2
-rw-r--r--drivers/dpll/dpll_core.c60
-rw-r--r--drivers/edac/altera_edac.c8
-rw-r--r--drivers/edac/amd64_edac.h1
-rw-r--r--drivers/edac/amd8111_edac.c3
-rw-r--r--drivers/edac/armada_xp_edac.c2
-rw-r--r--drivers/edac/cpc925_edac.c2
-rw-r--r--drivers/edac/edac_device.c53
-rw-r--r--drivers/edac/edac_device.h22
-rw-r--r--drivers/edac/edac_device_sysfs.c22
-rw-r--r--drivers/edac/edac_mc_sysfs.c47
-rw-r--r--drivers/edac/edac_pci.h5
-rw-r--r--drivers/edac/highbank_l2_edac.c2
-rw-r--r--drivers/edac/mpc85xx_edac.c2
-rw-r--r--drivers/edac/octeon_edac-l2c.c2
-rw-r--r--drivers/edac/octeon_edac-pc.c2
-rw-r--r--drivers/edac/qcom_edac.c1
-rw-r--r--drivers/edac/sifive_edac.c3
-rw-r--r--drivers/edac/skx_common.c2
-rw-r--r--drivers/edac/synopsys_edac.c50
-rw-r--r--drivers/edac/thunderx_edac.c6
-rw-r--r--drivers/edac/versal_edac.c12
-rw-r--r--drivers/edac/xgene_edac.c10
-rw-r--r--drivers/edac/zynqmp_edac.c2
-rw-r--r--drivers/eisa/Kconfig9
-rw-r--r--drivers/eisa/virtual_root.c2
-rw-r--r--drivers/firewire/.kunitconfig1
-rw-r--r--drivers/firewire/Kconfig16
-rw-r--r--drivers/firewire/Makefile6
-rw-r--r--drivers/firewire/core-card.c7
-rw-r--r--drivers/firewire/core-cdev.c7
-rw-r--r--drivers/firewire/core-topology.c3
-rw-r--r--drivers/firewire/core-trace.c5
-rw-r--r--drivers/firewire/core-transaction.c251
-rw-r--r--drivers/firewire/core.h21
-rw-r--r--drivers/firewire/nosy.c6
-rw-r--r--drivers/firewire/ohci.c139
-rw-r--r--drivers/firewire/packet-header-definitions.h234
-rw-r--r--drivers/firewire/packet-serdes-test.c582
-rw-r--r--drivers/firewire/sbp2.c13
-rw-r--r--drivers/firmware/arm_ffa/driver.c189
-rw-r--r--drivers/firmware/arm_scmi/Makefile3
-rw-r--r--drivers/firmware/arm_scmi/common.h11
-rw-r--r--drivers/firmware/arm_scmi/driver.c269
-rw-r--r--drivers/firmware/arm_scmi/mailbox.c3
-rw-r--r--drivers/firmware/arm_scmi/notify.c30
-rw-r--r--drivers/firmware/arm_scmi/perf.c15
-rw-r--r--drivers/firmware/arm_scmi/pinctrl.c916
-rw-r--r--drivers/firmware/arm_scmi/powercap.c2
-rw-r--r--drivers/firmware/arm_scmi/protocols.h18
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.c7
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c54
-rw-r--r--drivers/firmware/dmi-id.c7
-rw-r--r--drivers/firmware/dmi_scan.c17
-rw-r--r--drivers/firmware/efi/efi-pstore.c10
-rw-r--r--drivers/firmware/efi/libstub/Makefile11
-rw-r--r--drivers/firmware/efi/libstub/fdt.c4
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c2
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c29
-rw-r--r--drivers/firmware/efi/unaccepted_memory.c4
-rw-r--r--drivers/firmware/efi/vars.c2
-rw-r--r--drivers/firmware/google/cbmem.c1
-rw-r--r--drivers/firmware/google/coreboot_table.c6
-rw-r--r--drivers/firmware/google/coreboot_table.h6
-rw-r--r--drivers/firmware/microchip/mpfs-auto-update.c8
-rw-r--r--drivers/firmware/qcom/qcom_qseecom_uefisecapp.c137
-rw-r--r--drivers/firmware/qcom/qcom_scm.c118
-rw-r--r--drivers/firmware/raspberrypi.c7
-rw-r--r--drivers/firmware/smccc/smccc.c1
-rw-r--r--drivers/firmware/ti_sci.c24
-rw-r--r--drivers/fpga/dfl-pci.c3
-rw-r--r--drivers/fpga/intel-m10-bmc-sec-update.c3
-rw-r--r--drivers/gpio/Kconfig27
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-brcmstb.c21
-rw-r--r--drivers/gpio/gpio-cros-ec.c8
-rw-r--r--drivers/gpio/gpio-crystalcove.c2
-rw-r--r--drivers/gpio/gpio-graniterapids.c383
-rw-r--r--drivers/gpio/gpio-lpc32xx.c1
-rw-r--r--drivers/gpio/gpio-npcm-sgpio.c10
-rw-r--r--drivers/gpio/gpio-pca953x.c2
-rw-r--r--drivers/gpio/gpio-pcie-idio-24.c2
-rw-r--r--drivers/gpio/gpio-regmap.c4
-rw-r--r--drivers/gpio/gpio-sch.c35
-rw-r--r--drivers/gpio/gpio-tangier.c9
-rw-r--r--drivers/gpio/gpio-tegra186.c20
-rw-r--r--drivers/gpio/gpio-wcove.c2
-rw-r--r--drivers/gpio/gpiolib-acpi.c88
-rw-r--r--drivers/gpio/gpiolib-cdev.c76
-rw-r--r--drivers/gpio/gpiolib-legacy.c49
-rw-r--r--drivers/gpio/gpiolib-of.c23
-rw-r--r--drivers/gpio/gpiolib-swnode.c44
-rw-r--r--drivers/gpio/gpiolib-sysfs.c2
-rw-r--r--drivers/gpio/gpiolib.c117
-rw-r--r--drivers/gpio/gpiolib.h19
-rw-r--r--drivers/gpu/drm/Kconfig59
-rw-r--r--drivers/gpu/drm/Makefile30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c169
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c360
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c187
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c506
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c201
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c133
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c145
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c154
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_1.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v10_1.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c153
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v14_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_ras_if.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c416
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.h77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_10.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c21
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c35
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c56
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c25
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c5
-rw-r--r--drivers/gpu/drm/amd/display/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c117
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c42
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c52
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c82
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c98
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c1017
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c214
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stat.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h225
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c179
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane_priv.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_state.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_state_priv.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c207
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c269
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c161
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/Makefile77
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/CMakeLists.txt6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h)3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn20/CMakeLists.txt5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c)14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn201/CMakeLists.txt4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/CMakeLists.txt5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c)18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn32/CMakeLists.txt4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/CMakeLists.txt4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c112
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h)9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c48
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c126
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c130
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c183
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile25
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c182
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c)38
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/optc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h53
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c75
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c165
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c125
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c37
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h18
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h221
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c2
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h4
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h13
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c8
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c8
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c15
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h3
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h28
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h14
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h20
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_offset.h28
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h18
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h19
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_sh_mask.h10
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_offset.h60
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h27
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_offset.h37
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_sh_mask.h16
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_offset.h24
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_sh_mask.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_3_sh_mask.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h10
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h12
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_sh_mask.h10
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_offset.h511
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_sh_mask.h1106
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h3
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h38
-rw-r--r--drivers/gpu/drm/amd/include/umsch_mm_4_0_api_def.h13
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c233
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h6
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h41
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c8
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c6
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c8
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c8
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c39
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h1836
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h33
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h55
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h46
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h140
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h164
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c18
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c22
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c21
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c20
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c121
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c187
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c374
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c1796
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h28
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c67
-rw-r--r--drivers/gpu/drm/arm/display/komeda/Makefile4
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_component.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c1
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c5
-rw-r--r--drivers/gpu/drm/armada/armada_debugfs.c1
-rw-r--r--drivers/gpu/drm/ast/Makefile10
-rw-r--r--drivers/gpu/drm/ast/ast_ddc.c (renamed from drivers/gpu/drm/ast/ast_i2c.c)130
-rw-r--r--drivers/gpu/drm/ast/ast_ddc.h11
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c3
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c1
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h39
-rw-r--r--drivers/gpu/drm/ast/ast_main.c1
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c165
-rw-r--r--drivers/gpu/drm/bridge/Kconfig14
-rw-r--r--drivers/gpu/drm/bridge/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c20
-rw-r--r--drivers/gpu/drm/bridge/analogix/Kconfig2
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c15
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c3
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c7
-rw-r--r--drivers/gpu/drm/bridge/imx/Kconfig4
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c6
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c6
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c1
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c25
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c6
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c6
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c6
-rw-r--r--drivers/gpu/drm/bridge/microchip-lvds.c229
-rw-r--r--drivers/gpu/drm/bridge/panel.c2
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c2
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c31
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c1
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c104
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c21
-rw-r--r--drivers/gpu/drm/bridge/ti-dlpc3433.c17
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c1
-rw-r--r--drivers/gpu/drm/ci/test.yml6
-rw-r--r--drivers/gpu/drm/display/Kconfig52
-rw-r--r--drivers/gpu/drm/display/Makefile6
-rw-r--r--drivers/gpu/drm/display/drm_dp_dual_mode_helper.c4
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c48
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper_internal.h2
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c42
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology_internal.h4
-rw-r--r--drivers/gpu/drm/display/drm_dp_tunnel.c17
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c4
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c6
-rw-r--r--drivers/gpu/drm/drm_bridge.c24
-rw-r--r--drivers/gpu/drm/drm_buddy.c428
-rw-r--r--drivers/gpu/drm/drm_client.c105
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c132
-rw-r--r--drivers/gpu/drm/drm_connector.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c38
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c100
-rw-r--r--drivers/gpu/drm/drm_crtc_helper_internal.h15
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h13
-rw-r--r--drivers/gpu/drm/drm_displayid.c7
-rw-r--r--drivers/gpu/drm/drm_displayid_internal.h (renamed from include/drm/drm_displayid.h)6
-rw-r--r--drivers/gpu/drm/drm_drv.c5
-rw-r--r--drivers/gpu/drm/drm_edid.c268
-rw-r--r--drivers/gpu/drm/drm_eld.c4
-rw-r--r--drivers/gpu/drm/drm_fb_dma_helper.c45
-rw-r--r--drivers/gpu/drm/drm_fbdev_generic.c5
-rw-r--r--drivers/gpu/drm/drm_gem.c34
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c4
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c7
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c101
-rw-r--r--drivers/gpu/drm/drm_internal.h10
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c45
-rw-r--r--drivers/gpu/drm/drm_mode_config.c7
-rw-r--r--drivers/gpu/drm/drm_modes.c40
-rw-r--r--drivers/gpu/drm/drm_panic.c585
-rw-r--r--drivers/gpu/drm/drm_plane.c56
-rw-r--r--drivers/gpu/drm/drm_prime.c7
-rw-r--r--drivers/gpu/drm/drm_print.c6
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c95
-rw-r--r--drivers/gpu/drm/drm_sysfs.c20
-rw-r--r--drivers/gpu/drm/drm_vblank.c58
-rw-r--r--drivers/gpu/drm/drm_vblank_work.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c24
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h12
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_hwdb.c34
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c1
-rw-r--r--drivers/gpu/drm/gma500/Makefile1
-rw-r--r--drivers/gpu/drm/gma500/mmu.c1
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c5
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h9
-rw-r--r--drivers/gpu/drm/gma500/psb_lid.c80
-rw-r--r--drivers/gpu/drm/gud/gud_connector.c12
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug4
-rw-r--r--drivers/gpu/drm/i915/Makefile11
-rw-r--r--drivers/gpu/drm/i915/display/bxt_dpio_phy_regs.h273
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c2
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c113
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio_regs.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c50
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c312
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c160
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c242
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c53
-rw-r--r--drivers/gpu/drm/i915/display/intel_color_regs.h42
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy_regs.h117
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c353
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c52
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c389
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c257
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h186
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c713
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c126
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c57
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c107
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reg_defs.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h102
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c185
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.c264
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c340
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c248
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c368
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.h48
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c596
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c613
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h82
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc_regs.h120
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c270
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_fixed.h (renamed from drivers/gpu/drm/i915/i915_fixed.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c96
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c58
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c40
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c533
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr_regs.h50
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c56
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite_regs.h348
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h41
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c47
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c7
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c3
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c322
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h14
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark_regs.h18
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dpio_phy_regs.h309
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c470
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c22
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_regs.h327
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c22
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c6
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c18
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c8
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c5
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c1
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c27
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c43
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c66
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c39
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c52
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h66
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c51
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c52
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c51
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_tlb.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c227
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_slpc.c6
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h21
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c22
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c95
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c12
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c17
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c80
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c13
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs_params.c1
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c26
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h26
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c6
-rw-r--r--drivers/gpu/drm/i915/i915_hwmon.c89
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c8
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.c2
-rw-r--r--drivers/gpu/drm/i915/i915_params.c3
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c66
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c19
-rw-r--r--drivers/gpu/drm/i915/i915_query.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1407
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.c6
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h14
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c52
-rw-r--r--drivers/gpu/drm/i915/intel_clock_gating.c60
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c2
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h2
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c1
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c21
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c14
-rw-r--r--drivers/gpu/drm/i915/intel_step.c80
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c380
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c36
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c3
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c2
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.c1
-rw-r--r--drivers/gpu/drm/imagination/Makefile2
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_mips.h5
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.c1
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm_mips.c5
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c12
-rw-r--r--drivers/gpu/drm/lima/lima_bcast.c12
-rw-r--r--drivers/gpu/drm/lima/lima_bcast.h3
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c21
-rw-r--r--drivers/gpu/drm/lima/lima_drv.h5
-rw-r--r--drivers/gpu/drm/lima/lima_gp.c10
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.c5
-rw-r--r--drivers/gpu/drm/lima/lima_pp.c22
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c9
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_crtc.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_gem.c13
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig2
-rw-r--r--drivers/gpu/drm/mediatek/Makefile12
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.c (renamed from drivers/gpu/drm/mediatek/mtk_drm_crtc.c)218
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.h28
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.c (renamed from drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c)51
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.h (renamed from drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h)9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_aal.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ccorr.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_drv.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_gamma.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_merge.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.h30
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c34
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c33
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ethdr.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_gem.c (renamed from drivers/gpu/drm/mediatek/mtk_drm_gem.c)69
-rw-r--r--drivers/gpu/drm/mediatek/mtk_gem.h (renamed from drivers/gpu/drm/mediatek/mtk_drm_gem.h)23
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c14
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mdp_rdma.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_padding.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.c (renamed from drivers/gpu/drm/mediatek/mtk_drm_plane.c)26
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.h (renamed from drivers/gpu/drm/mediatek/mtk_drm_plane.h)4
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c70
-rw-r--r--drivers/gpu/drm/meson/meson_dw_mipi_dsi.c7
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c6
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c18
-rw-r--r--drivers/gpu/drm/msm/.gitignore1
-rw-r--r--drivers/gpu/drm/msm/Kconfig10
-rw-r--r--drivers/gpu/drm/msm/Makefile114
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h3251
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.h4
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpummu.c (renamed from drivers/gpu/drm/msm/msm_gpummu.c)45
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h3268
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h4379
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h5572
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h11858
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h12
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h422
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c19
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c85
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h14
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h539
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h1446
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h2803
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h34
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c24
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c660
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c30
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h124
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c42
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c91
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c56
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h8
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h1181
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c129
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h1979
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h11
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c7
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c125
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_common.xml.h111
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_format.c630
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_format.h77
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_kms.h18
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.c25
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c39
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.h1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c71
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.h52
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c23
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.h1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.c59
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.h38
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c107
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c26
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.h15
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c14
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h3
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c26
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h7
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h790
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c20
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c79
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy_10nm.xml.h227
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy_14nm.xml.h309
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy_20nm.xml.h237
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy_28nm.xml.h384
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy_28nm_8960.xml.h286
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy_7nm.xml.h483
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h131
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h8
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h70
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h10
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h1399
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c6
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h61
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c3
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h11
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c16
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c20
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h4
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c20
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h12
-rw-r--r--drivers/gpu/drm/msm/msm_kms.c4
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h4
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h5
-rw-r--r--drivers/gpu/drm/msm/registers/.gitignore4
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a2xx.xml1865
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a3xx.xml1751
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a4xx.xml2409
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a5xx.xml3039
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx.xml5011
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml228
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/adreno_common.xml400
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml2268
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi.xml390
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi_phy_10nm.xml102
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi_phy_14nm.xml135
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi_phy_20nm.xml100
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi_phy_28nm.xml180
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi_phy_28nm_8960.xml134
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml230
-rw-r--r--drivers/gpu/drm/msm/registers/display/edp.xml239
-rw-r--r--drivers/gpu/drm/msm/registers/display/hdmi.xml1015
-rw-r--r--drivers/gpu/drm/msm/registers/display/mdp4.xml504
-rw-r--r--drivers/gpu/drm/msm/registers/display/mdp5.xml806
-rw-r--r--drivers/gpu/drm/msm/registers/display/mdp_common.xml90
-rw-r--r--drivers/gpu/drm/msm/registers/display/msm.xml32
-rw-r--r--drivers/gpu/drm/msm/registers/display/sfpb.xml17
-rw-r--r--drivers/gpu/drm/msm/registers/freedreno_copyright.xml40
-rw-r--r--drivers/gpu/drm/msm/registers/gen_header.py970
-rw-r--r--drivers/gpu/drm/msm/registers/rules-fd.xsd404
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.c6
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc.c2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c79
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c7
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c40
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c1
-rw-r--r--drivers/gpu/drm/panel/Kconfig38
-rw-r--r--drivers/gpu/drm/panel/Makefile3
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c164
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9341.c13
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c228
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c1
-rw-r--r--drivers/gpu/drm/panel/panel-khadas-ts050.c1112
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c5
-rw-r--r--drivers/gpu/drm/panel/panel-lg-sw43408.c320
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35950.c6
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672a.c11
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672e.c35
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm69380.c344
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-atna33xc20.c44
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c285
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c123
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c87
-rw-r--r--drivers/gpu/drm/panel/panel-truly-nt35597.c6
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c18
-rw-r--r--drivers/gpu/drm/panfrost/Makefile2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_debugfs.c21
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_debugfs.h14
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c50
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c13
-rw-r--r--drivers/gpu/drm/panthor/Kconfig23
-rw-r--r--drivers/gpu/drm/panthor/Makefile14
-rw-r--r--drivers/gpu/drm/panthor/panthor_devfreq.c283
-rw-r--r--drivers/gpu/drm/panthor/panthor_devfreq.h21
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.c561
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.h357
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c1488
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c1363
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.h503
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.c230
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.h142
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.c482
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.h52
-rw-r--r--drivers/gpu/drm/panthor/panthor_heap.c597
-rw-r--r--drivers/gpu/drm/panthor/panthor_heap.h39
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c2774
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.h102
-rw-r--r--drivers/gpu/drm/panthor/panthor_regs.h239
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c3528
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.h50
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c26
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_prime.c4
-rw-r--r--drivers/gpu/drm/radeon/pptable.h10
-rw-r--r--drivers/gpu/drm/radeon/r100.c1
-rw-r--r--drivers/gpu/drm/radeon/r300.c1
-rw-r--r--drivers/gpu/drm/radeon/r420.c1
-rw-r--r--drivers/gpu/drm/radeon/r600.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ib.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c1
-rw-r--r--drivers/gpu/drm/radeon/rs400.c1
-rw-r--r--drivers/gpu/drm/radeon/rv515.c1
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c3
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c34
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h2
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c12
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c12
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c22
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c12
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c18
-rw-r--r--drivers/gpu/drm/tegra/Kconfig2
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c207
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c3
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c6
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c16
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c235
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c38
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c20
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c7
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c33
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h30
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c9
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c52
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c94
-rw-r--r--drivers/gpu/drm/v3d/v3d_sysfs.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h1
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c48
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c32
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h34
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c59
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c63
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h40
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c39
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c32
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c31
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c42
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c44
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c110
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c19
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c632
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_vkms.h75
-rw-r--r--drivers/gpu/drm/xe/Kconfig2
-rw-r--r--drivers/gpu/drm/xe/Makefile27
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h200
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h10
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h60
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_fixed.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_gem.h9
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h9
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_vgpu.h26
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uc_fw.h11
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h6
-rw-r--r--drivers/gpu/drm/xe/display/intel_fb_bo.c16
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c16
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c24
-rw-r--r--drivers/gpu/drm/xe/display/xe_dsb_buffer.c4
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c39
-rw-r--r--drivers/gpu/drm/xe/display/xe_hdcp_gsc.c240
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c7
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_gfx_state_commands.h18
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h3
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_instr_defs.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h5
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gsc_regs.h7
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h65
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gtt_defs.h37
-rw-r--r--drivers/gpu/drm/xe/regs/xe_guc_regs.h15
-rw-r--r--drivers/gpu/drm/xe/regs/xe_reg_defs.h19
-rw-r--r--drivers/gpu/drm/xe/regs/xe_regs.h2
-rw-r--r--drivers/gpu/drm/xe/regs/xe_sriov_regs.h3
-rw-r--r--drivers/gpu/drm/xe/tests/Makefile3
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c12
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo_test.c5
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf.c57
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf_test.c5
-rw-r--r--drivers/gpu/drm/xe/tests/xe_guc_id_mgr_test.c136
-rw-r--r--drivers/gpu/drm/xe/tests/xe_live_test_mod.c10
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c27
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate_test.c5
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c96
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs_test.c5
-rw-r--r--drivers/gpu/drm/xe/tests/xe_wa_test.c1
-rw-r--r--drivers/gpu/drm/xe/xe_bb.c6
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c192
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h74
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.c4
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h19
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c24
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c47
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.h6
-rw-r--r--drivers/gpu/drm/xe/xe_device.c237
-rw-r--r--drivers/gpu/drm/xe/xe_device.h13
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.c16
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.h2
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h29
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c7
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c8
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c93
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c76
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h13
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c136
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.h8
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c100
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.c15
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_submit.c15
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_submit.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c63
-rw-r--r--drivers/gpu/drm/xe/xe_gt_ccs_mode.c19
-rw-r--r--drivers/gpu/drm/xe/xe_gt_ccs_mode.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_clock.c5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_clock.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c242
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.c63
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c43
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c39
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.h14
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c52
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.h20
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c1977
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h56
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h54
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c257
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h27
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h35
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c418
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.h25
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_policy_types.h31
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h34
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sysfs.c14
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sysfs.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_throttle_sysfs.c16
-rw-r--r--drivers/gpu/drm/xe/xe_gt_throttle_sysfs.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c44
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c115
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.h11
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h29
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c122
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c139
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c140
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_debugfs.c9
-rw-r--r--drivers/gpu/drm/xe/xe_guc_fwif.h7
-rw-r--r--drivers/gpu/drm/xe/xe_guc_hwconfig.c7
-rw-r--r--drivers/gpu/drm/xe/xe_guc_id_mgr.c279
-rw-r--r--drivers/gpu/drm/xe/xe_guc_id_mgr.h22
-rw-r--r--drivers/gpu/drm/xe/xe_guc_klv_helpers.c134
-rw-r--r--drivers/gpu/drm/xe/xe_guc_klv_helpers.h51
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.c5
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c116
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c232
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.h6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit_types.h13
-rw-r--r--drivers/gpu/drm/xe/xe_guc_types.h21
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.c253
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.h11
-rw-r--r--drivers/gpu/drm/xe/xe_huc.c13
-rw-r--r--drivers/gpu/drm/xe/xe_huc_debugfs.c5
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c46
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c155
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.h7
-rw-r--r--drivers/gpu/drm/xe/xe_hw_fence.c2
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c270
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c3
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.c6
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c194
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.h5
-rw-r--r--drivers/gpu/drm/xe/xe_lrc_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.c9
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c16
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c144
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.h82
-rw-r--r--drivers/gpu/drm/xe/xe_mocs.c66
-rw-r--r--drivers/gpu/drm/xe/xe_module.c7
-rw-r--r--drivers/gpu/drm/xe/xe_module.h3
-rw-r--r--drivers/gpu/drm/xe/xe_pat.c23
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c44
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c117
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.h6
-rw-r--r--drivers/gpu/drm/xe/xe_platform_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c327
-rw-r--r--drivers/gpu/drm/xe/xe_pm.h13
-rw-r--r--drivers/gpu/drm/xe/xe_preempt_fence.c2
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c38
-rw-r--r--drivers/gpu/drm/xe/xe_query.c55
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c22
-rw-r--r--drivers/gpu/drm/xe/xe_sa.c5
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c33
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.h3
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.c62
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.h6
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.c104
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.h30
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_helpers.h46
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_types.h19
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c7
-rw-r--r--drivers/gpu/drm/xe/xe_sync.h1
-rw-r--r--drivers/gpu/drm/xe/xe_tile.c17
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sysfs.c17
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sysfs.h2
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h6
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c15
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_sys_mgr.c5
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.c18
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.h1
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c10
-rw-r--r--drivers/gpu/drm/xe/xe_uc.c11
-rw-r--r--drivers/gpu/drm/xe/xe_uc_debugfs.c2
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c53
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.h8
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c347
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h8
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h19
-rw-r--r--drivers/gpu/drm/xe/xe_vram_freq.c20
-rw-r--r--drivers/gpu/drm/xe/xe_vram_freq.h2
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c134
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules11
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c1
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c231
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.h17
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp_regs.h8
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c85
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c7
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_kms.c2
-rw-r--r--drivers/gpu/host1x/bus.c8
-rw-r--r--drivers/gpu/host1x/dev.c24
-rw-r--r--drivers/hid/Kconfig16
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/amd-sfh-hid/Makefile2
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c5
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c17
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c2
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.c226
-rw-r--r--drivers/hid/bpf/progs/FR-TEC__Raptor-Mach-2.bpf.c185
-rw-r--r--drivers/hid/bpf/progs/HP__Elite-Presenter.bpf.c58
-rw-r--r--drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c290
-rw-r--r--drivers/hid/bpf/progs/IOGEAR__Kaliber-MMOmentum.bpf.c59
-rw-r--r--drivers/hid/bpf/progs/Makefile91
-rw-r--r--drivers/hid/bpf/progs/Microsoft__XBox-Elite-2.bpf.c133
-rw-r--r--drivers/hid/bpf/progs/README102
-rw-r--r--drivers/hid/bpf/progs/Wacom__ArtPen.bpf.c173
-rw-r--r--drivers/hid/bpf/progs/XPPen__Artist24.bpf.c229
-rw-r--r--drivers/hid/bpf/progs/XPPen__ArtistPro16Gen2.bpf.c274
-rw-r--r--drivers/hid/bpf/progs/hid_bpf.h15
-rw-r--r--drivers/hid/bpf/progs/hid_bpf_helpers.h168
-rw-r--r--drivers/hid/hid-asus.c132
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-corsair.c4
-rw-r--r--drivers/hid/hid-debug.c3437
-rw-r--r--drivers/hid/hid-google-hammer.c5
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-kye.c75
-rw-r--r--drivers/hid/hid-lenovo.c23
-rw-r--r--drivers/hid/hid-logitech-dj.c4
-rw-r--r--drivers/hid/hid-logitech-hidpp.c14
-rw-r--r--drivers/hid/hid-mcp2221.c2
-rw-r--r--drivers/hid/hid-multitouch.c6
-rw-r--r--drivers/hid/hid-nintendo.c65
-rw-r--r--drivers/hid/hid-picolcd_core.c6
-rw-r--r--drivers/hid/hid-picolcd_fb.c8
-rw-r--r--drivers/hid/hid-playstation.c138
-rw-r--r--drivers/hid/hid-roccat-isku.c2
-rw-r--r--drivers/hid/hid-roccat-kone.c12
-rw-r--r--drivers/hid/hid-roccat-koneplus.c4
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c10
-rw-r--r--drivers/hid/hid-roccat-pyra.c6
-rw-r--r--drivers/hid/hid-sensor-custom.c17
-rw-r--r--drivers/hid/hid-sony.c7
-rw-r--r--drivers/hid/hid-steam.c155
-rw-r--r--drivers/hid/hid-uclogic-params.c3
-rw-r--r--drivers/hid/hid-winwing.c226
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c82
-rw-r--r--drivers/hid/intel-ish-hid/Makefile3
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h45
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c23
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c80
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.c21
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/init.c8
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h28
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/loader.c275
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/loader.h226
-rw-r--r--drivers/hid/surface-hid/surface_kbd.c5
-rw-r--r--drivers/hsi/controllers/omap_ssi_core.c6
-rw-r--r--drivers/hsi/controllers/omap_ssi_port.c6
-rw-r--r--drivers/hte/hte-tegra194-test.c6
-rw-r--r--drivers/hv/channel.c29
-rw-r--r--drivers/hv/connection.c29
-rw-r--r--drivers/hv/hv_common.c6
-rw-r--r--drivers/hv/vmbus_drv.c94
-rw-r--r--drivers/hwmon/Kconfig38
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/acpi_power_meter.c16
-rw-r--r--drivers/hwmon/ad7414.c2
-rw-r--r--drivers/hwmon/adc128d818.c59
-rw-r--r--drivers/hwmon/adm1026.c2
-rw-r--r--drivers/hwmon/adm1029.c2
-rw-r--r--drivers/hwmon/adm1177.c2
-rw-r--r--drivers/hwmon/adt7410.c4
-rw-r--r--drivers/hwmon/adt7411.c2
-rw-r--r--drivers/hwmon/adt7462.c2
-rw-r--r--drivers/hwmon/adt7470.c2
-rw-r--r--drivers/hwmon/aquacomputer_d5next.c51
-rw-r--r--drivers/hwmon/asb100.c2
-rw-r--r--drivers/hwmon/aspeed-g6-pwm-tach.c27
-rw-r--r--drivers/hwmon/atxp1.c2
-rw-r--r--drivers/hwmon/coretemp.c2
-rw-r--r--drivers/hwmon/corsair-cpro.c45
-rw-r--r--drivers/hwmon/da9052-hwmon.c38
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c15
-rw-r--r--drivers/hwmon/ds620.c2
-rw-r--r--drivers/hwmon/emc1403.c838
-rw-r--r--drivers/hwmon/emc2103.c2
-rw-r--r--drivers/hwmon/emc2305.c8
-rw-r--r--drivers/hwmon/emc6w201.c2
-rw-r--r--drivers/hwmon/ftsteutates.c2
-rw-r--r--drivers/hwmon/g760a.c2
-rw-r--r--drivers/hwmon/g762.c4
-rw-r--r--drivers/hwmon/gl518sm.c2
-rw-r--r--drivers/hwmon/gl520sm.c2
-rw-r--r--drivers/hwmon/hih6130.c2
-rw-r--r--drivers/hwmon/hs3001.c2
-rw-r--r--drivers/hwmon/ina209.c2
-rw-r--r--drivers/hwmon/ina238.c2
-rw-r--r--drivers/hwmon/ina3221.c2
-rw-r--r--drivers/hwmon/it87.c127
-rw-r--r--drivers/hwmon/jc42.c4
-rw-r--r--drivers/hwmon/lenovo-ec-sensors.c602
-rw-r--r--drivers/hwmon/lineage-pem.c2
-rw-r--r--drivers/hwmon/lm70.c4
-rw-r--r--drivers/hwmon/lm73.c2
-rw-r--r--drivers/hwmon/lm77.c2
-rw-r--r--drivers/hwmon/lm87.c4
-rw-r--r--drivers/hwmon/lm93.c4
-rw-r--r--drivers/hwmon/lm95241.c4
-rw-r--r--drivers/hwmon/lm95245.c4
-rw-r--r--drivers/hwmon/ltc2945.c2
-rw-r--r--drivers/hwmon/ltc2947-i2c.c2
-rw-r--r--drivers/hwmon/ltc2990.c2
-rw-r--r--drivers/hwmon/ltc2991.c2
-rw-r--r--drivers/hwmon/ltc2992.c2
-rw-r--r--drivers/hwmon/ltc4151.c2
-rw-r--r--drivers/hwmon/ltc4215.c2
-rw-r--r--drivers/hwmon/ltc4222.c2
-rw-r--r--drivers/hwmon/ltc4245.c2
-rw-r--r--drivers/hwmon/ltc4260.c2
-rw-r--r--drivers/hwmon/ltc4261.c2
-rw-r--r--drivers/hwmon/max127.c2
-rw-r--r--drivers/hwmon/max1619.c2
-rw-r--r--drivers/hwmon/max31730.c2
-rw-r--r--drivers/hwmon/max31790.c10
-rw-r--r--drivers/hwmon/max6620.c2
-rw-r--r--drivers/hwmon/max6621.c2
-rw-r--r--drivers/hwmon/max6639.c341
-rw-r--r--drivers/hwmon/max6642.c2
-rw-r--r--drivers/hwmon/mc34vr500.c2
-rw-r--r--drivers/hwmon/nct7802.c2
-rw-r--r--drivers/hwmon/nct7904.c2
-rw-r--r--drivers/hwmon/npcm750-pwm-fan.c7
-rw-r--r--drivers/hwmon/nzxt-kraken3.c58
-rw-r--r--drivers/hwmon/pcf8591.c2
-rw-r--r--drivers/hwmon/pmbus/Kconfig23
-rw-r--r--drivers/hwmon/pmbus/Makefile2
-rw-r--r--drivers/hwmon/pmbus/adm1266.c2
-rw-r--r--drivers/hwmon/pmbus/adm1275.c7
-rw-r--r--drivers/hwmon/pmbus/adp1050.c56
-rw-r--r--drivers/hwmon/pmbus/inspur-ipsps.c2
-rw-r--r--drivers/hwmon/pmbus/ir35221.c2
-rw-r--r--drivers/hwmon/pmbus/ir36021.c2
-rw-r--r--drivers/hwmon/pmbus/ir38064.c8
-rw-r--r--drivers/hwmon/pmbus/irps5401.c2
-rw-r--r--drivers/hwmon/pmbus/lt7182s.c2
-rw-r--r--drivers/hwmon/pmbus/ltc3815.c2
-rw-r--r--drivers/hwmon/pmbus/max15301.c4
-rw-r--r--drivers/hwmon/pmbus/max16064.c2
-rw-r--r--drivers/hwmon/pmbus/max20751.c2
-rw-r--r--drivers/hwmon/pmbus/max31785.c6
-rw-r--r--drivers/hwmon/pmbus/max8688.c2
-rw-r--r--drivers/hwmon/pmbus/mp2888.c2
-rw-r--r--drivers/hwmon/pmbus/mp2975.c136
-rw-r--r--drivers/hwmon/pmbus/mp5990.c2
-rw-r--r--drivers/hwmon/pmbus/mpq8785.c2
-rw-r--r--drivers/hwmon/pmbus/pli1209bc.c2
-rw-r--r--drivers/hwmon/pmbus/pm6764tr.c2
-rw-r--r--drivers/hwmon/pmbus/pxe1610.c6
-rw-r--r--drivers/hwmon/pmbus/stpddc60.c4
-rw-r--r--drivers/hwmon/pmbus/tda38640.c2
-rw-r--r--drivers/hwmon/pmbus/tps40422.c2
-rw-r--r--drivers/hwmon/pmbus/tps546d24.c2
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c6
-rw-r--r--drivers/hwmon/pmbus/xdp710.c131
-rw-r--r--drivers/hwmon/pmbus/xdpe12284.c6
-rw-r--r--drivers/hwmon/pmbus/xdpe152c4.c4
-rw-r--r--drivers/hwmon/pt5161l.c2
-rw-r--r--drivers/hwmon/pwm-fan.c45
-rw-r--r--drivers/hwmon/sbrmi.c2
-rw-r--r--drivers/hwmon/sbtsi_temp.c2
-rw-r--r--drivers/hwmon/sht21.c2
-rw-r--r--drivers/hwmon/sht4x.c2
-rw-r--r--drivers/hwmon/smsc47m192.c2
-rw-r--r--drivers/hwmon/stts751.c3
-rw-r--r--drivers/hwmon/tc654.c4
-rw-r--r--drivers/hwmon/tc74.c2
-rw-r--r--drivers/hwmon/tmp102.c2
-rw-r--r--drivers/hwmon/tmp103.c2
-rw-r--r--drivers/hwmon/tmp108.c2
-rw-r--r--drivers/hwmon/w83791d.c2
-rw-r--r--drivers/hwmon/w83792d.c2
-rw-r--r--drivers/hwmon/w83793.c2
-rw-r--r--drivers/hwmon/w83l785ts.c2
-rw-r--r--drivers/hwmon/w83l786ng.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-core.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-core.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-core.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-tpda.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.c1
-rw-r--r--drivers/hwtracing/intel_th/core.c6
-rw-r--r--drivers/i2c/busses/Kconfig49
-rw-r--r--drivers/i2c/busses/Makefile3
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c8
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c1
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c4
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-plat.c8
-rw-r--r--drivers/i2c/busses/i2c-at91-master.c1
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c1
-rw-r--r--drivers/i2c/busses/i2c-cadence.c3
-rw-r--r--drivers/i2c/busses/i2c-davinci.c1
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c9
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c6
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c12
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c8
-rw-r--r--drivers/i2c/busses/i2c-i801.c48
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c5
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c25
-rw-r--r--drivers/i2c/busses/i2c-ismt.c1
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c22
-rw-r--r--drivers/i2c/busses/i2c-mpc.c11
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c8
-rw-r--r--drivers/i2c/busses/i2c-ocores.c21
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.c141
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.h53
-rw-r--r--drivers/i2c/busses/i2c-omap.c11
-rw-r--r--drivers/i2c/busses/i2c-pxa.c16
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c10
-rw-r--r--drivers/i2c/busses/i2c-qup.c4
-rw-r--r--drivers/i2c/busses/i2c-riic.c125
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c14
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c6
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c1
-rw-r--r--drivers/i2c/busses/i2c-st.c11
-rw-r--r--drivers/i2c/busses/i2c-stm32f4.c8
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c8
-rw-r--r--drivers/i2c/busses/i2c-synquacer.c26
-rw-r--r--drivers/i2c/busses/i2c-tegra.c2
-rw-r--r--drivers/i2c/busses/i2c-thunderx-pcidrv.c13
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c1
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c4
-rw-r--r--drivers/i2c/busses/i2c-viai2c-common.c256
-rw-r--r--drivers/i2c/busses/i2c-viai2c-common.h85
-rw-r--r--drivers/i2c/busses/i2c-viai2c-wmt.c148
-rw-r--r--drivers/i2c/busses/i2c-viai2c-zhaoxin.c298
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c1
-rw-r--r--drivers/i2c/busses/i2c-wmt.c421
-rw-r--r--drivers/i2c/i2c-core-acpi.c19
-rw-r--r--drivers/i2c/i2c-core-base.c12
-rw-r--r--drivers/i2c/i2c-mux.c24
-rw-r--r--drivers/i2c/muxes/i2c-arb-gpio-challenge.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c3
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpmux.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-ltc4306.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-mlxcpld.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-reg.c2
-rw-r--r--drivers/iio/accel/mxc4005.c92
-rw-r--r--drivers/iio/addac/ad74115.c40
-rw-r--r--drivers/iio/frequency/admv1013.c40
-rw-r--r--drivers/iio/gyro/mpu3050-i2c.c2
-rw-r--r--drivers/iio/imu/adis16475.c4
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c2
-rw-r--r--drivers/iio/pressure/bmp280-core.c1
-rw-r--r--drivers/iio/pressure/bmp280-spi.c13
-rw-r--r--drivers/iio/pressure/bmp280.h1
-rw-r--r--drivers/infiniband/core/addr.c12
-rw-r--r--drivers/infiniband/core/cm.c11
-rw-r--r--drivers/infiniband/core/cma.c4
-rw-r--r--drivers/infiniband/core/device.c10
-rw-r--r--drivers/infiniband/core/lag.c3
-rw-r--r--drivers/infiniband/core/nldev.c23
-rw-r--r--drivers/infiniband/core/restrack.c63
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c3
-rw-r--r--drivers/infiniband/core/user_mad.c21
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c3
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h11
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c3
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h1
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c11
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c19
-rw-r--r--drivers/infiniband/hw/erdma/erdma.h13
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cmdq.c99
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cq.c2
-rw-r--r--drivers/infiniband/hw/erdma/erdma_eq.c54
-rw-r--r--drivers/infiniband/hw/erdma/erdma_hw.h6
-rw-r--r--drivers/infiniband/hw/erdma/erdma_main.c15
-rw-r--r--drivers/infiniband/hw/erdma/erdma_qp.c4
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c105
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h16
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_main.c20
-rw-r--r--drivers/infiniband/hw/hfi1/netdev.h2
-rw-r--r--drivers/infiniband/hw/hfi1/netdev_rx.c9
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c30
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c33
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c25
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c17
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h12
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c154
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h15
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c32
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c19
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c29
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c12
-rw-r--r--drivers/infiniband/hw/irdma/cm.c3
-rw-r--r--drivers/infiniband/hw/mana/cq.c111
-rw-r--r--drivers/infiniband/hw/mana/device.c51
-rw-r--r--drivers/infiniband/hw/mana/main.c328
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h147
-rw-r--r--drivers/infiniband/hw/mana/mr.c2
-rw-r--r--drivers/infiniband/hw/mana/qp.c126
-rw-r--r--drivers/infiniband/hw/mana/wq.c31
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c3
-rw-r--r--drivers/infiniband/hw/mlx5/main.c3
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c8
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c35
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c3
-rw-r--r--drivers/infiniband/hw/mlx5/restrack.c29
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c32
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hw_counters.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hw_counters.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c69
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c46
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c82
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c14
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c17
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c8
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/misc/atlas_btns.c1
-rw-r--r--drivers/input/mouse/amimouse.c8
-rw-r--r--drivers/input/serio/ambakmi.c1
-rw-r--r--drivers/input/serio/i8042-io.h5
-rw-r--r--drivers/interconnect/core.c8
-rw-r--r--drivers/interconnect/qcom/x1e80100.c26
-rw-r--r--drivers/iommu/Kconfig25
-rw-r--r--drivers/iommu/amd/Kconfig3
-rw-r--r--drivers/iommu/amd/Makefile2
-rw-r--r--drivers/iommu/amd/amd_iommu.h60
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h33
-rw-r--r--drivers/iommu/amd/init.c202
-rw-r--r--drivers/iommu/amd/io_pgtable.c13
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c18
-rw-r--r--drivers/iommu/amd/iommu.c334
-rw-r--r--drivers/iommu/amd/pasid.c198
-rw-r--r--drivers/iommu/amd/ppr.c288
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/Makefile2
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c168
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c468
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c582
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h62
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c4
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c496
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c8
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h2
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c20
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.h3
-rw-r--r--drivers/iommu/dma-iommu.c82
-rw-r--r--drivers/iommu/dma-iommu.h14
-rw-r--r--drivers/iommu/exynos-iommu.c14
-rw-r--r--drivers/iommu/intel/Makefile2
-rw-r--r--drivers/iommu/intel/cache.c419
-rw-r--r--drivers/iommu/intel/debugfs.c7
-rw-r--r--drivers/iommu/intel/dmar.c26
-rw-r--r--drivers/iommu/intel/iommu.c394
-rw-r--r--drivers/iommu/intel/iommu.h88
-rw-r--r--drivers/iommu/intel/irq_remapping.c148
-rw-r--r--drivers/iommu/intel/nested.c69
-rw-r--r--drivers/iommu/intel/pasid.c18
-rw-r--r--drivers/iommu/intel/perf.h1
-rw-r--r--drivers/iommu/intel/perfmon.c2
-rw-r--r--drivers/iommu/intel/svm.c383
-rw-r--r--drivers/iommu/intel/trace.h97
-rw-r--r--drivers/iommu/io-pgtable-arm.c15
-rw-r--r--drivers/iommu/io-pgtable-dart.c37
-rw-r--r--drivers/iommu/iommu-pages.h186
-rw-r--r--drivers/iommu/iommu-sva.c16
-rw-r--r--drivers/iommu/iommu.c59
-rw-r--r--drivers/iommu/iommufd/Kconfig1
-rw-r--r--drivers/iommu/irq_remapping.c10
-rw-r--r--drivers/iommu/irq_remapping.h2
-rw-r--r--drivers/iommu/mtk_iommu.c1
-rw-r--r--drivers/iommu/mtk_iommu_v1.c1
-rw-r--r--drivers/iommu/rockchip-iommu.c14
-rw-r--r--drivers/iommu/s390-iommu.c6
-rw-r--r--drivers/iommu/sun50i-iommu.c7
-rw-r--r--drivers/iommu/tegra-smmu.c18
-rw-r--r--drivers/iommu/virtio-iommu.c10
-rw-r--r--drivers/irqchip/Kconfig27
-rw-r--r--drivers/irqchip/Makefile3
-rw-r--r--drivers/irqchip/irq-alpine-msi.c2
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c2
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c6
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c17
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c34
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c12
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c2
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c76
-rw-r--r--drivers/irqchip/irq-mxs.c2
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c28
-rw-r--r--drivers/irqchip/irq-riscv-aplic-direct.c323
-rw-r--r--drivers/irqchip/irq-riscv-aplic-main.c211
-rw-r--r--drivers/irqchip/irq-riscv-aplic-main.h52
-rw-r--r--drivers/irqchip/irq-riscv-aplic-msi.c257
-rw-r--r--drivers/irqchip/irq-riscv-imsic-early.c201
-rw-r--r--drivers/irqchip/irq-riscv-imsic-platform.c375
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.c865
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.h108
-rw-r--r--drivers/irqchip/irq-sifive-plic.c7
-rw-r--r--drivers/irqchip/irq-stm32-exti.c139
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c1
-rw-r--r--drivers/irqchip/irq-tb10x.c1
-rw-r--r--drivers/isdn/capi/Makefile3
-rw-r--r--drivers/isdn/capi/kcapi.c7
-rw-r--r--drivers/isdn/mISDN/socket.c10
-rw-r--r--drivers/macintosh/Kconfig2
-rw-r--r--drivers/macintosh/macio-adb.c24
-rw-r--r--drivers/macintosh/via-macii.c11
-rw-r--r--drivers/mailbox/Kconfig21
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/arm_mhuv3.c1103
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c21
-rw-r--r--drivers/mailbox/imx-mailbox.c16
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c3
-rw-r--r--drivers/mailbox/omap-mailbox.c519
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c412
-rw-r--r--drivers/md/bcache/bset.c44
-rw-r--r--drivers/md/bcache/bset.h28
-rw-r--r--drivers/md/bcache/btree.c40
-rw-r--r--drivers/md/bcache/super.c21
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/bcache/writeback.c10
-rw-r--r--drivers/md/dm-bio-prison-v2.c3
-rw-r--r--drivers/md/dm-cache-target.c17
-rw-r--r--drivers/md/dm-clone-metadata.c5
-rw-r--r--drivers/md/dm-clone-target.c18
-rw-r--r--drivers/md/dm-core.h2
-rw-r--r--drivers/md/dm-crypt.c73
-rw-r--r--drivers/md/dm-delay.c60
-rw-r--r--drivers/md/dm-era-target.c3
-rw-r--r--drivers/md/dm-integrity.c3
-rw-r--r--drivers/md/dm-log-writes.c2
-rw-r--r--drivers/md/dm-mpath.c3
-rw-r--r--drivers/md/dm-snap.c2
-rw-r--r--drivers/md/dm-table.c30
-rw-r--r--drivers/md/dm-target.c1
-rw-r--r--drivers/md/dm-thin.c16
-rw-r--r--drivers/md/dm-vdo/Makefile2
-rw-r--r--drivers/md/dm-vdo/data-vio.c3
-rw-r--r--drivers/md/dm-vdo/dm-vdo-target.c4
-rw-r--r--drivers/md/dm-vdo/flush.c3
-rw-r--r--drivers/md/dm-vdo/indexer/io-factory.c2
-rw-r--r--drivers/md/dm-vdo/murmurhash3.c35
-rw-r--r--drivers/md/dm-zero.c1
-rw-r--r--drivers/md/dm-zone.c501
-rw-r--r--drivers/md/dm-zoned-target.c1
-rw-r--r--drivers/md/dm.c84
-rw-r--r--drivers/md/dm.h2
-rw-r--r--drivers/md/md-bitmap.c6
-rw-r--r--drivers/md/md.c7
-rw-r--r--drivers/md/md.h3
-rw-r--r--drivers/md/raid1.c2
-rw-r--r--drivers/md/raid5.c15
-rw-r--r--drivers/media/cec/core/cec-adap.c24
-rw-r--r--drivers/media/cec/core/cec-api.c5
-rw-r--r--drivers/media/cec/core/cec-core.c4
-rw-r--r--drivers/media/cec/platform/cros-ec/cros-ec-cec.c9
-rw-r--r--drivers/media/cec/platform/sti/stih-cec.c1
-rw-r--r--drivers/media/common/saa7146/saa7146_hlp.c8
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c231
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c34
-rw-r--r--drivers/media/dvb-core/dvbdev.c2
-rw-r--r--drivers/media/dvb-frontends/af9013.c2
-rw-r--r--drivers/media/dvb-frontends/as102_fe_types.h2
-rw-r--r--drivers/media/dvb-frontends/cxd2880/Kconfig2
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drx_driver.h2
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c58
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c7
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c9
-rw-r--r--drivers/media/dvb-frontends/mxl5xx.c22
-rw-r--r--drivers/media/dvb-frontends/rtl2830.c2
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c2
-rw-r--r--drivers/media/dvb-frontends/si2165.c6
-rw-r--r--drivers/media/dvb-frontends/si2168.c2
-rw-r--r--drivers/media/dvb-frontends/stb0899_drv.c2
-rw-r--r--drivers/media/dvb-frontends/tda10048.c9
-rw-r--r--drivers/media/dvb-frontends/tda18271c2dd.c4
-rw-r--r--drivers/media/i2c/Kconfig2
-rw-r--r--drivers/media/i2c/adv7180.c2
-rw-r--r--drivers/media/i2c/adv748x/adv748x-hdmi.c16
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c16
-rw-r--r--drivers/media/i2c/adv7604.c20
-rw-r--r--drivers/media/i2c/adv7842.c25
-rw-r--r--drivers/media/i2c/dw9714.c6
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c4
-rw-r--r--drivers/media/i2c/hi556.c105
-rw-r--r--drivers/media/i2c/imx214.c1
-rw-r--r--drivers/media/i2c/imx219.c77
-rw-r--r--drivers/media/i2c/imx335.c637
-rw-r--r--drivers/media/i2c/max9271.h5
-rw-r--r--drivers/media/i2c/max9286.c2
-rw-r--r--drivers/media/i2c/ov2680.c103
-rw-r--r--drivers/media/i2c/ov2740.c20
-rw-r--r--drivers/media/i2c/ov4689.c673
-rw-r--r--drivers/media/i2c/rdacm20.c4
-rw-r--r--drivers/media/i2c/st-mipid02.c2
-rw-r--r--drivers/media/i2c/tc358743.c25
-rw-r--r--drivers/media/i2c/tc358746.c3
-rw-r--r--drivers/media/i2c/tda1997x.c14
-rw-r--r--drivers/media/i2c/ths7303.c10
-rw-r--r--drivers/media/i2c/ths8200.c14
-rw-r--r--drivers/media/i2c/tvp7002.c32
-rw-r--r--drivers/media/mc/mc-devnode.c5
-rw-r--r--drivers/media/mc/mc-entity.c6
-rw-r--r--drivers/media/mmc/siano/smssdio.c25
-rw-r--r--drivers/media/pci/cobalt/cobalt-v4l2.c12
-rw-r--r--drivers/media/pci/intel/Kconfig4
-rw-r--r--drivers/media/pci/intel/Makefile1
-rw-r--r--drivers/media/pci/intel/ipu-bridge.c66
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c29
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.h4
-rw-r--r--drivers/media/pci/intel/ipu6/Kconfig18
-rw-r--r--drivers/media/pci/intel/ipu6/Makefile23
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-bus.c165
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-bus.h58
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-buttress.c917
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-buttress.h92
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-cpd.c362
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-cpd.h105
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-dma.c502
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-dma.h19
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-fw-com.c413
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-fw-com.h47
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-fw-isys.c487
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-fw-isys.h596
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c663
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h82
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c536
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c242
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c720
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-queue.c810
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-queue.h78
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c403
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h59
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-video.c1420
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-video.h141
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys.c1367
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys.h206
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-mmu.c846
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-mmu.h73
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h226
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h172
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-platform-regs.h179
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6.c856
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6.h342
-rw-r--r--drivers/media/pci/intel/ivsc/mei_csi.c20
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.c4
-rw-r--r--drivers/media/pci/mgb4/mgb4_regs.c2
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c2
-rw-r--r--drivers/media/pci/ngene/ngene-core.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-alsa.c9
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-core.c16
-rw-r--r--drivers/media/pci/ttpci/budget-av.c573
-rw-r--r--drivers/media/pci/ttpci/budget-ci.c495
-rw-r--r--drivers/media/pci/ttpci/budget-core.c38
-rw-r--r--drivers/media/pci/ttpci/budget.c173
-rw-r--r--drivers/media/pci/ttpci/budget.h21
-rw-r--r--drivers/media/platform/Kconfig1
-rw-r--r--drivers/media/platform/Makefile1
-rw-r--r--drivers/media/platform/broadcom/Kconfig23
-rw-r--r--drivers/media/platform/broadcom/Makefile3
-rw-r--r--drivers/media/platform/broadcom/bcm2835-unicam-regs.h246
-rw-r--r--drivers/media/platform/broadcom/bcm2835-unicam.c2739
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c26
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-helper.c17
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c13
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c13
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu.c125
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpuapi.h4
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c10
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c6
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h2
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c6
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c8
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c23
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c5
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h6
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_av1_req_lat_if.c42
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_if.c12
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_common.h15
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c14
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c6
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c6
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c6
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c15
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c4
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec_msg_queue.h4
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.h4
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c5
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h2
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c4
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h2
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c5
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c5
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.h2
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c2
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/h264.c6
-rw-r--r--drivers/media/platform/nxp/imx-mipi-csis.c34
-rw-r--r--drivers/media/platform/qcom/camss/Makefile2
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c108
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-17x.c (renamed from drivers/media/platform/qcom/camss/camss-vfe-170.c)0
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c25
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss.c307
-rw-r--r--drivers/media/platform/qcom/camss/camss.h1
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c9
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-vin.h2
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/Kconfig1
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/Makefile7
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c3
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.h5
-rw-r--r--drivers/media/platform/st/sti/hva/hva-hw.c3
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c7
-rw-r--r--drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig1
-rw-r--r--drivers/media/platform/ti/davinci/vpif_capture.c4
-rw-r--r--drivers/media/platform/ti/davinci/vpif_display.c2
-rw-r--r--drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c5
-rw-r--r--drivers/media/platform/verisilicon/hantro_h1_regs.h4
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.c1
-rw-r--r--drivers/media/radio/radio-shark2.c2
-rw-r--r--drivers/media/rc/gpio-ir-recv.c1
-rw-r--r--drivers/media/rc/imon.c12
-rw-r--r--drivers/media/rc/ir-spi.c41
-rw-r--r--drivers/media/rc/lirc_dev.c18
-rw-r--r--drivers/media/rc/mtk-cir.c1
-rw-r--r--drivers/media/rc/serial_ir.c1
-rw-r--r--drivers/media/rc/st_rc.c1
-rw-r--r--drivers/media/rc/sunxi-cir.c1
-rw-r--r--drivers/media/spi/cxd2880-spi.c2
-rw-r--r--drivers/media/spi/gs1662.c27
-rw-r--r--drivers/media/test-drivers/vicodec/vicodec-core.c1
-rw-r--r--drivers/media/test-drivers/vimc/vimc-capture.c3
-rw-r--r--drivers/media/test-drivers/visl/visl-video.c1
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.c9
-rw-r--r--drivers/media/test-drivers/vivid/vivid-meta-out.c4
-rw-r--r--drivers/media/test-drivers/vivid/vivid-touch-cap.c4
-rw-r--r--drivers/media/tuners/xc5000.c39
-rw-r--r--drivers/media/usb/as102/as102_usb_drv.c2
-rw-r--r--drivers/media/usb/au0828/au0828-video.c5
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c17
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-i2c.c5
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c10
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.c4
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c18
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c462
-rw-r--r--drivers/media/usb/go7007/go7007-fw.c4
-rw-r--r--drivers/media/usb/gspca/cpia1.c6
-rw-r--r--drivers/media/usb/s2255/s2255drv.c20
-rw-r--r--drivers/media/usb/siano/smsusb.c20
-rw-r--r--drivers/media/usb/stk1160/stk1160-video.c30
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c26
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c54
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h2
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c30
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-api.c33
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-core.c24
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c12
-rw-r--r--drivers/media/v4l2-core/v4l2-i2c.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c73
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c15
-rw-r--r--drivers/media/v4l2-core/v4l2-spi.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c113
-rw-r--r--drivers/memory/brcmstb_memc.c1
-rw-r--r--drivers/memory/mtk-smi.c2
-rw-r--r--drivers/memory/pl353-smc.c1
-rw-r--r--drivers/memstick/host/rtsx_pci_ms.c9
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c6
-rw-r--r--drivers/message/fusion/mptfc.c1
-rw-r--r--drivers/message/fusion/mptsas.c15
-rw-r--r--drivers/message/fusion/mptscsih.c2
-rw-r--r--drivers/message/fusion/mptspi.c1
-rw-r--r--drivers/mfd/Kconfig16
-rw-r--r--drivers/mfd/axp20x-i2c.c2
-rw-r--r--drivers/mfd/axp20x-rsb.c1
-rw-r--r--drivers/mfd/axp20x.c90
-rw-r--r--drivers/mfd/cs42l43.c36
-rw-r--r--drivers/mfd/intel-lpss-pci.c2
-rw-r--r--drivers/mfd/intel-m10-bmc-pmci.c1
-rw-r--r--drivers/mfd/intel-m10-bmc-spi.c1
-rw-r--r--drivers/mfd/kempld-core.c227
-rw-r--r--drivers/mfd/ocelot-spi.c5
-rw-r--r--drivers/mfd/rk8xx-core.c104
-rw-r--r--drivers/mfd/rk8xx-i2c.c45
-rw-r--r--drivers/mfd/rohm-bd71828.c36
-rw-r--r--drivers/mfd/rsmu_i2c.c107
-rw-r--r--drivers/mfd/rsmu_spi.c8
-rw-r--r--drivers/mfd/ssbi.c1
-rw-r--r--drivers/mfd/timberdale.c1
-rw-r--r--drivers/mfd/tps6594-core.c253
-rw-r--r--drivers/mfd/tps6594-i2c.c20
-rw-r--r--drivers/mfd/tps6594-spi.c20
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c2
-rw-r--r--drivers/misc/eeprom/at24.c18
-rw-r--r--drivers/misc/lkdtm/Makefile6
-rw-r--r--drivers/misc/lkdtm/perms.c2
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/pci-me.c4
-rw-r--r--drivers/misc/mei/platform-vsc.c17
-rw-r--r--drivers/misc/mei/pxp/mei_pxp.c7
-rw-r--r--drivers/misc/mei/vsc-tp.c84
-rw-r--r--drivers/misc/mei/vsc-tp.h3
-rw-r--r--drivers/misc/pvpanic/pvpanic-pci.c4
-rw-r--r--drivers/misc/tps6594-pfsm.c48
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c3
-rw-r--r--drivers/mmc/core/block.c12
-rw-r--r--drivers/mmc/core/debugfs.c7
-rw-r--r--drivers/mmc/core/host.c1
-rw-r--r--drivers/mmc/core/sd_ops.c83
-rw-r--r--drivers/mmc/core/sdio_bus.c9
-rw-r--r--drivers/mmc/core/slot-gpio.c25
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/atmel-mci.c309
-rw-r--r--drivers/mmc/host/cqhci-core.c11
-rw-r--r--drivers/mmc/host/cqhci.h4
-rw-r--r--drivers/mmc/host/davinci_mmc.c4
-rw-r--r--drivers/mmc/host/dw_mmc-hi3798cv200.c1
-rw-r--r--drivers/mmc/host/dw_mmc-hi3798mv200.c1
-rw-r--r--drivers/mmc/host/moxart-mmc.c1
-rw-r--r--drivers/mmc/host/mtk-sd.c1
-rw-r--r--drivers/mmc/host/omap.c48
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c3
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c9
-rw-r--r--drivers/mmc/host/sdhci-acpi.c61
-rw-r--r--drivers/mmc/host/sdhci-esdhc-mcf.c2
-rw-r--r--drivers/mmc/host/sdhci-msm.c16
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c334
-rw-r--r--drivers/mmc/host/sdhci-omap.c5
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c2
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c46
-rw-r--r--drivers/mmc/host/sdhci-s3c.c35
-rw-r--r--drivers/mmc/host/sdhci-sprd.c3
-rw-r--r--drivers/mmc/host/sdhci.c10
-rw-r--r--drivers/mmc/host/sdhci.h3
-rw-r--r--drivers/mmc/host/sdhci_am654.c182
-rw-r--r--drivers/mtd/devices/block2mtd.c8
-rw-r--r--drivers/mtd/devices/mchp23k256.c1
-rw-r--r--drivers/mtd/maps/sa1100-flash.c6
-rw-r--r--drivers/mtd/mtdcore.c11
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c2
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c5
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c4
-rw-r--r--drivers/mtd/nand/raw/nand_hynix.c2
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c7
-rw-r--r--drivers/mtd/parsers/redboot.c2
-rw-r--r--drivers/mtd/spi-nor/core.c4
-rw-r--r--drivers/mux/core.c4
-rw-r--r--drivers/net/Kconfig16
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/arcnet/Kconfig2
-rw-r--r--drivers/net/arcnet/arcdevice.h3
-rw-r--r--drivers/net/arcnet/arcnet.c11
-rw-r--r--drivers/net/bareudp.c19
-rw-r--r--drivers/net/bonding/bond_main.c25
-rw-r--r--drivers/net/bonding/bond_netlink.c3
-rw-r--r--drivers/net/bonding/bond_options.c2
-rw-r--r--drivers/net/bonding/bond_procfs.c2
-rw-r--r--drivers/net/bonding/bond_sysfs.c25
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c2
-rw-r--r--drivers/net/can/cc770/Kconfig1
-rw-r--r--drivers/net/can/dev/dev.c2
-rw-r--r--drivers/net/can/sja1000/Kconfig1
-rw-r--r--drivers/net/can/vcan.c2
-rw-r--r--drivers/net/can/vxcan.c2
-rw-r--r--drivers/net/dsa/b53/b53_common.c208
-rw-r--r--drivers/net/dsa/b53/b53_priv.h12
-rw-r--r--drivers/net/dsa/bcm_sf2.c49
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_ptp.c25
-rw-r--r--drivers/net/dsa/lan9303-core.c38
-rw-r--r--drivers/net/dsa/lantiq_gswip.c39
-rw-r--r--drivers/net/dsa/microchip/Kconfig2
-rw-r--r--drivers/net/dsa/microchip/Makefile2
-rw-r--r--drivers/net/dsa/microchip/ksz8.h9
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c251
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h10
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c6
-rw-r--r--drivers/net/dsa/microchip/ksz9477_tc_flower.c3
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c224
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h16
-rw-r--r--drivers/net/dsa/microchip/ksz_dcb.c819
-rw-r--r--drivers/net/dsa/microchip/ksz_dcb.h23
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c8
-rw-r--r--drivers/net/dsa/mt7530-mdio.c28
-rw-r--r--drivers/net/dsa/mt7530.c664
-rw-r--r--drivers/net/dsa/mt7530.h293
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c222
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h6
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c89
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h23
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c3
-rw-r--r--drivers/net/dsa/qca/ar9331.c37
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c49
-rw-r--r--drivers/net/dsa/realtek/realtek.h2
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c32
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c392
-rw-r--r--drivers/net/dsa/realtek/rtl83xx.c8
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c47
-rw-r--r--drivers/net/dsa/sja1105/sja1105_flower.c3
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c39
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c2
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c255
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx.h27
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c26
-rw-r--r--drivers/net/ethernet/3com/3c515.c3
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c2
-rw-r--r--drivers/net/ethernet/3com/Kconfig4
-rw-r--r--drivers/net/ethernet/8390/Kconfig6
-rw-r--r--drivers/net/ethernet/8390/etherh.c2
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c2
-rw-r--r--drivers/net/ethernet/adi/adin1110.c2
-rw-r--r--drivers/net/ethernet/agere/et131x.c2
-rw-r--r--drivers/net/ethernet/alteon/acenic.c2
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c37
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c17
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c74
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_xdp.c4
-rw-r--r--drivers/net/ethernet/amd/Kconfig4
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c7
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h1
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c2
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c13
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h5
-rw-r--r--drivers/net/ethernet/amd/pds_core/dev.c3
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c3
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c8
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c9
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c2
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c2
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c64
-rw-r--r--drivers/net/ethernet/broadcom/b44.c18
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c774
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h45
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c13
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c241
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h184
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c175
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h2
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c32
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c8
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c32
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c4
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c2
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c67
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c1
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/Makefile2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c20
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h5
-rw-r--r--drivers/net/ethernet/cortina/gemini.c14
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c5
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c2
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c16
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c37
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/fujitsu/Kconfig2
-rw-r--r--drivers/net/ethernet/fungible/funeth/Makefile2
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_main.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve.h97
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c229
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h50
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h6
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c162
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c619
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c138
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c140
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c31
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h24
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c646
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h643
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c44
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c433
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h36
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c129
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h94
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c60
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h50
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c14
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.h2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c10
-rw-r--r--drivers/net/ethernet/intel/Kconfig9
-rw-r--r--drivers/net/ethernet/intel/Makefile3
-rw-r--r--drivers/net/ethernet/intel/e100.c8
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c16
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c62
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c38
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c42
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c182
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c253
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ddp.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c29
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c244
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c1010
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c174
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h88
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c59
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c5
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c253
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c140
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c84
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_prototype.h7
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c553
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h146
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_type.h90
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c17
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile7
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c (renamed from drivers/net/ethernet/intel/ice/ice_devlink.c)561
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.h (renamed from drivers/net/ethernet/intel/ice/ice_devlink.h)0
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink_port.c430
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink_port.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.c116
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c226
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_debugfs.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c369
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c140
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c111
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c57
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h320
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c101
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h39
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c237
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c33
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c141
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c42
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c300
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c143
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c122
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c5
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c9
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2.h24
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c15
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c64
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c8
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h73
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c17
-rw-r--r--drivers/net/ethernet/intel/igc/igc_leds.c38
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c188
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c51
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c10
-rw-r--r--drivers/net/ethernet/intel/libeth/Kconfig9
-rw-r--r--drivers/net/ethernet/intel/libeth/Makefile6
-rw-r--r--drivers/net/ethernet/intel/libeth/rx.c150
-rw-r--r--drivers/net/ethernet/intel/libie/Kconfig10
-rw-r--r--drivers/net/ethernet/intel/libie/Makefile6
-rw-r--r--drivers/net/ethernet/intel/libie/rx.c124
-rw-r--r--drivers/net/ethernet/jme.c2
-rw-r--r--drivers/net/ethernet/lantiq_etop.c2
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c4
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c5
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c11
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c1
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c44
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c81
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.c4
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c83
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c6
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_rxtx.c15
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/skge.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c5
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c259
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h31
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.c83
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/dim.h45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/selq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dim.c95
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c392
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c326
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c539
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c141
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c254
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c540
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c118
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c12
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h3
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c42
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c11
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c11
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c4
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c7
-rw-r--r--drivers/net/ethernet/microchip/encx24j600_hw.h2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c21
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c31
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c12
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c14
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h25
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h68
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c235
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c6
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c147
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c125
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_ag_api.h2
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.c16
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_client.h4
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_private.h2
-rw-r--r--drivers/net/ethernet/microsoft/Kconfig3
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c1
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c20
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c7
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c8
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/devlink_param.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/xsk.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c41
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c2
-rw-r--r--drivers/net/ethernet/ni/nixge.c2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c2
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c14
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c146
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c2
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.h6
-rw-r--r--drivers/net/ethernet/realtek/r8169_leds.c35
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c55
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c114
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c4
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c2
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_common.c2
-rw-r--r--drivers/net/ethernet/sfc/tc.c7
-rw-r--r--drivers/net/ethernet/sis/Kconfig4
-rw-r--r--drivers/net/ethernet/sis/sis900.c6
-rw-r--r--drivers/net/ethernet/smsc/Kconfig2
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c86
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c107
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c55
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c56
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c99
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c52
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c58
-rw-r--r--drivers/net/ethernet/sun/cassini.c3
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c16
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/Kconfig17
-rw-r--r--drivers/net/ethernet/ti/Makefile9
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c13
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c722
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h13
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c19
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c112
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.h11
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c3
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_classifier.c113
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c1252
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c14
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.h56
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_ethtool.c105
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c1197
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h88
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c1181
-rw-r--r--drivers/net/ethernet/ti/k3-cppi-desc-pool.c46
-rw-r--r--drivers/net/ethernet/ti/k3-cppi-desc-pool.h6
-rw-r--r--drivers/net/ethernet/via/Kconfig1
-rw-r--r--drivers/net/ethernet/via/via-velocity.c4
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c4
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c66
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.h2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h22
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c18
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c1
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c18
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c31
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c4
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c23
-rw-r--r--drivers/net/ethernet/xircom/Kconfig2
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c4
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
-rw-r--r--drivers/net/fddi/defxx.c2
-rw-r--r--drivers/net/fjes/fjes_main.c3
-rw-r--r--drivers/net/geneve.c50
-rw-r--r--drivers/net/gtp.c868
-rw-r--r--drivers/net/hamradio/Kconfig6
-rw-r--r--drivers/net/hyperv/netvsc.c7
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.1.c5
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.5.1.c5
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.11.c5
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.2.c5
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.5.c5
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.7.c5
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.9.c5
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.0.c5
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.5.c5
-rw-r--r--drivers/net/ipa/gsi.c30
-rw-r--r--drivers/net/ipa/gsi.h12
-rw-r--r--drivers/net/ipa/gsi_private.h7
-rw-r--r--drivers/net/ipa/gsi_reg.c6
-rw-r--r--drivers/net/ipa/gsi_trans.c12
-rw-r--r--drivers/net/ipa/gsi_trans.h9
-rw-r--r--drivers/net/ipa/ipa.h15
-rw-r--r--drivers/net/ipa/ipa_cmd.c13
-rw-r--r--drivers/net/ipa/ipa_cmd.h18
-rw-r--r--drivers/net/ipa/ipa_data.h4
-rw-r--r--drivers/net/ipa/ipa_endpoint.c19
-rw-r--r--drivers/net/ipa/ipa_endpoint.h10
-rw-r--r--drivers/net/ipa/ipa_gsi.c7
-rw-r--r--drivers/net/ipa/ipa_interrupt.c54
-rw-r--r--drivers/net/ipa/ipa_interrupt.h6
-rw-r--r--drivers/net/ipa/ipa_main.c43
-rw-r--r--drivers/net/ipa/ipa_mem.c15
-rw-r--r--drivers/net/ipa/ipa_mem.h4
-rw-r--r--drivers/net/ipa/ipa_modem.c14
-rw-r--r--drivers/net/ipa/ipa_modem.h5
-rw-r--r--drivers/net/ipa/ipa_power.c27
-rw-r--r--drivers/net/ipa/ipa_power.h19
-rw-r--r--drivers/net/ipa/ipa_qmi.c10
-rw-r--r--drivers/net/ipa/ipa_qmi.h4
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.c3
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.h3
-rw-r--r--drivers/net/ipa/ipa_reg.c4
-rw-r--r--drivers/net/ipa/ipa_reg.h6
-rw-r--r--drivers/net/ipa/ipa_resource.c3
-rw-r--r--drivers/net/ipa/ipa_smp2p.c10
-rw-r--r--drivers/net/ipa/ipa_sysfs.c7
-rw-r--r--drivers/net/ipa/ipa_sysfs.h4
-rw-r--r--drivers/net/ipa/ipa_table.c27
-rw-r--r--drivers/net/ipa/ipa_table.h7
-rw-r--r--drivers/net/ipa/ipa_uc.c10
-rw-r--r--drivers/net/ipa/ipa_uc.h3
-rw-r--r--drivers/net/ipa/ipa_version.h22
-rw-r--r--drivers/net/ipa/reg.h8
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v3.1.c8
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v3.5.1.c8
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.0.c8
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.11.c8
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.5.c8
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.9.c8
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v5.0.c8
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.1.c20
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.5.1.c20
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.11.c20
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.2.c6
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.5.c20
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.7.c20
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.9.c20
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v5.0.c6
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v5.5.c6
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c1
-rw-r--r--drivers/net/loopback.c5
-rw-r--r--drivers/net/macsec.c48
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/mdio/mdio-gpio.c3
-rw-r--r--drivers/net/net_failover.c2
-rw-r--r--drivers/net/netdevsim/ethtool.c11
-rw-r--r--drivers/net/netdevsim/netdev.c335
-rw-r--r--drivers/net/netdevsim/netdevsim.h10
-rw-r--r--drivers/net/ntb_netdev.c4
-rw-r--r--drivers/net/pcs/pcs-lynx.c5
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c28
-rw-r--r--drivers/net/pfcp.c301
-rw-r--r--drivers/net/phy/Kconfig5
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/air_en8811h.c1090
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c21
-rw-r--r--drivers/net/phy/dp83822.c37
-rw-r--r--drivers/net/phy/dp83869.c3
-rw-r--r--drivers/net/phy/marvell-88q2xxx.c119
-rw-r--r--drivers/net/phy/marvell.c397
-rw-r--r--drivers/net/phy/mediatek-ge-soc.c43
-rw-r--r--drivers/net/phy/mediatek-ge.c3
-rw-r--r--drivers/net/phy/micrel.c597
-rw-r--r--drivers/net/phy/phylink.c28
-rw-r--r--drivers/net/phy/qcom/at803x.c7
-rw-r--r--drivers/net/phy/realtek.c324
-rw-r--r--drivers/net/phy/sfp-bus.c5
-rw-r--r--drivers/net/phy/sfp.c27
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/pse-pd/Kconfig22
-rw-r--r--drivers/net/pse-pd/Makefile2
-rw-r--r--drivers/net/pse-pd/pd692x0.c1223
-rw-r--r--drivers/net/pse-pd/pse_core.c523
-rw-r--r--drivers/net/pse-pd/pse_regulator.c49
-rw-r--r--drivers/net/pse-pd/tps23881.c820
-rw-r--r--drivers/net/slip/slip.c2
-rw-r--r--drivers/net/tap.c2
-rw-r--r--drivers/net/team/Makefile1
-rw-r--r--drivers/net/team/team_core.c (renamed from drivers/net/team/team.c)65
-rw-r--r--drivers/net/team/team_nl.c59
-rw-r--r--drivers/net/team/team_nl.h29
-rw-r--r--drivers/net/tun.c20
-rw-r--r--drivers/net/usb/aqc111.c10
-rw-r--r--drivers/net/usb/asix_devices.c2
-rw-r--r--drivers/net/usb/ax88179_178a.c62
-rw-r--r--drivers/net/usb/cdc_ncm.c2
-rw-r--r--drivers/net/usb/lan78xx.c44
-rw-r--r--drivers/net/usb/qmi_wwan.c16
-rw-r--r--drivers/net/usb/r8152.c6
-rw-r--r--drivers/net/usb/smsc75xx.c12
-rw-r--r--drivers/net/usb/smsc95xx.c15
-rw-r--r--drivers/net/usb/sr9700.c10
-rw-r--r--drivers/net/usb/usbnet.c3
-rw-r--r--drivers/net/veth.c1
-rw-r--r--drivers/net/virtio_net.c1482
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/vrf.c6
-rw-r--r--drivers/net/vsockmon.c2
-rw-r--r--drivers/net/vxlan/vxlan_core.c73
-rw-r--r--drivers/net/wan/Kconfig2
-rw-r--r--drivers/net/wan/fsl_qmc_hdlc.c6
-rw-r--r--drivers/net/wireguard/main.c2
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c14
-rw-r--r--drivers/net/wireless/ath/ath.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c18
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c52
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c48
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c23
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c26
-rw-r--r--drivers/net/wireless/ath/ath11k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c15
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c127
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h8
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h14
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c182
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c29
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.h5
-rw-r--r--drivers/net/wireless/ath/ath11k/p2p.c149
-rw-r--r--drivers/net/wireless/ath/ath11k/p2p.h22
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c44
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c21
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c104
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h78
-rw-r--r--drivers/net/wireless/ath/ath12k/Kconfig9
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath12k/acpi.c394
-rw-r--r--drivers/net/wireless/ath/ath12k/acpi.h76
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c123
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h95
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.c90
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.h30
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c121
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h12
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c236
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/hif.h14
-rw-r--r--drivers/net/wireless/ath/ath12k/htc.c4
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c12
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c1141
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h4
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c92
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.h1
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c43
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c109
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h4
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c55
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c197
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h101
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c10
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c32
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h7
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c25
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.h1
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c19
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h4
-rw-r--r--drivers/net/wireless/broadcom/b43/sysfs.c13
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/sysfs.c16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/Makefile6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/Makefile6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c15
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/Makefile6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h57
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c127
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c112
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c241
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c109
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c103
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c861
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c359
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c431
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h246
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/offloading.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rfi.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c152
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c595
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c86
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c435
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/module.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/scan.c110
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/devinfo.c26
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c4
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c94
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c46
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c85
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h47
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c160
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/soc.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c32
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c79
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c71
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c3
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c41
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c17
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c43
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.h5
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c5
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/bus.h2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c16
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c12
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c6
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c6
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8188e.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c)18
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8188f.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c)18
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8192c.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c)67
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8192e.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c)18
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8192f.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c)18
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8710b.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c)18
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8723a.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c)45
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8723b.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c)41
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/Makefile6
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c)76
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/regs.h (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h)0
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/Kconfig4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/cam.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/cam.h6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c195
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/Makefile11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/def.h (renamed from drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h)0
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.c1061
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.h79
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.c370
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.h49
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.c1225
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.h24
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/main.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.c856
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.h111
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/reg.h (renamed from drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h)162
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.c359
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.h13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.c516
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.h405
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c1072
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h91
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c375
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h37
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c1168
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c918
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h59
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c375
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c515
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h433
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c45
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h33
-rw-r--r--drivers/net/wireless/realtek/rtw88/Kconfig22
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile9
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c14
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c11
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c18
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c5
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b.c2109
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b.h102
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b_tables.c902
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b_tables.h14
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723cs.c34
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c673
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.h269
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.c721
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.h518
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig15
-rw-r--r--drivers/net/wireless/realtek/rtw89/Makefile12
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.c47
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.h21
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c120
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h71
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c1962
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h108
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c35
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h361
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c436
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h497
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c50
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h7
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c28
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c96
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h13
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c19
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy_be.c18
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h7
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c156
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c15
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c13
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_table.c2706
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c23
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c161
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922ae.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.h4
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c716
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.h57
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c31
-rw-r--r--drivers/net/wireless/silabs/wfx/bus_sdio.c1
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.h2
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c20
-rw-r--r--drivers/net/wireless/ti/wl1251/wl12xx_80211.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c11
-rw-r--r--drivers/net/wireless/ti/wlcore/wl12xx_80211.h1
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c54
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_devlink.c3
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_cldma.c4
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.c9
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.c20
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.h2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pcie_mac.c8
-rw-r--r--drivers/net/xen-netback/common.h5
-rw-r--r--drivers/net/xen-netback/interface.c4
-rw-r--r--drivers/net/xen-netback/netback.c12
-rw-r--r--drivers/net/xen-netfront.c3
-rw-r--r--drivers/nfc/nfcmrvl/spi.c1
-rw-r--r--drivers/nfc/st95hf/core.c28
-rw-r--r--drivers/nfc/trf7970a.c42
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c2
-rw-r--r--drivers/nvdimm/btt.c12
-rw-r--r--drivers/nvdimm/core.c30
-rw-r--r--drivers/nvdimm/nd.h1
-rw-r--r--drivers/nvme/host/core.c66
-rw-r--r--drivers/nvme/host/fc.c4
-rw-r--r--drivers/nvme/host/ioctl.c15
-rw-r--r--drivers/nvme/host/multipath.c3
-rw-r--r--drivers/nvme/host/nvme.h38
-rw-r--r--drivers/nvme/host/pci.c22
-rw-r--r--drivers/nvme/host/tcp.c10
-rw-r--r--drivers/nvme/host/zns.c33
-rw-r--r--drivers/nvme/target/auth.c10
-rw-r--r--drivers/nvme/target/configfs.c59
-rw-r--r--drivers/nvme/target/core.c15
-rw-r--r--drivers/nvme/target/fc.c17
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/rdma.c16
-rw-r--r--drivers/nvme/target/tcp.c11
-rw-r--r--drivers/nvme/target/zns.c10
-rw-r--r--drivers/of/address.c113
-rw-r--r--drivers/of/base.c34
-rw-r--r--drivers/of/device.c42
-rw-r--r--drivers/of/dynamic.c49
-rw-r--r--drivers/of/module.c15
-rw-r--r--drivers/of/of_private.h1
-rw-r--r--drivers/of/of_reserved_mem.c22
-rw-r--r--drivers/of/overlay.c11
-rw-r--r--drivers/of/property.c91
-rw-r--r--drivers/of/resolver.c35
-rw-r--r--drivers/of/unittest.c14
-rw-r--r--drivers/opp/core.c31
-rw-r--r--drivers/opp/of.c17
-rw-r--r--drivers/parisc/ccio-dma.c2
-rw-r--r--drivers/parisc/sba_iommu.c2
-rw-r--r--drivers/pci/access.c44
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c7
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c9
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c10
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c11
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c9
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c15
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c238
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c11
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h14
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c18
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c28
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier-ep.c15
-rw-r--r--drivers/pci/controller/pcie-mt7621.c2
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c2
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c10
-rw-r--r--drivers/pci/doe.c12
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c80
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c9
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c22
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c9
-rw-r--r--drivers/pci/hotplug/TODO12
-rw-r--r--drivers/pci/msi/api.c58
-rw-r--r--drivers/pci/msi/irqdomain.c59
-rw-r--r--drivers/pci/msi/msi.c15
-rw-r--r--drivers/pci/of_property.c2
-rw-r--r--drivers/pci/pci.c143
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/Kconfig2
-rw-r--r--drivers/pci/pcie/aer_inject.c2
-rw-r--r--drivers/pci/pcie/aspm.c186
-rw-r--r--drivers/pci/pcie/edr.c28
-rw-r--r--drivers/pci/pcie/err.c12
-rw-r--r--drivers/pci/pcie/portdrv.c8
-rw-r--r--drivers/pci/probe.c8
-rw-r--r--drivers/pci/quirks.c28
-rw-r--r--drivers/perf/alibaba_uncore_drw_pmu.c11
-rw-r--r--drivers/perf/amlogic/meson_ddr_pmu_core.c1
-rw-r--r--drivers/perf/arm-cci.c1
-rw-r--r--drivers/perf/arm-ccn.c1
-rw-r--r--drivers/perf/arm-cmn.c11
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.c9
-rw-r--r--drivers/perf/arm_dmc620_pmu.c1
-rw-r--r--drivers/perf/arm_dsu_pmu.c20
-rw-r--r--drivers/perf/arm_pmu_platform.c1
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c1
-rw-r--r--drivers/perf/arm_spe_pmu.c1
-rw-r--r--drivers/perf/cxl_pmu.c2
-rw-r--r--drivers/perf/dwc_pcie_pmu.c10
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c1
-rw-r--r--drivers/perf/hisilicon/hisi_pcie_pmu.c24
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c7
-rw-r--r--drivers/perf/hisilicon/hns3_pmu.c17
-rw-r--r--drivers/perf/qcom_l2_pmu.c9
-rw-r--r--drivers/perf/qcom_l3_pmu.c1
-rw-r--r--drivers/perf/riscv_pmu.c7
-rw-r--r--drivers/perf/riscv_pmu_legacy.c1
-rw-r--r--drivers/perf/riscv_pmu_sbi.c318
-rw-r--r--drivers/perf/thunderx2_pmu.c30
-rw-r--r--drivers/perf/xgene_pmu.c1
-rw-r--r--drivers/phy/freescale/Kconfig6
-rw-r--r--drivers/phy/freescale/Makefile1
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8m-pcie.c6
-rw-r--r--drivers/phy/freescale/phy-fsl-samsung-hdmi.c718
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-comphy.c9
-rw-r--r--drivers/phy/mediatek/Kconfig11
-rw-r--r--drivers/phy/mediatek/Makefile1
-rw-r--r--drivers/phy/mediatek/phy-mtk-xfi-tphy.c451
-rw-r--r--drivers/phy/phy-core.c26
-rw-r--r--drivers/phy/qualcomm/phy-qcom-edp.c373
-rw-r--r--drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c17
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c68
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c107
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v6.h4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v6.h6
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-ufs.c144
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c47
-rw-r--r--drivers/phy/rockchip/Kconfig13
-rw-r--r--drivers/phy/rockchip/Makefile1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-naneng-combphy.c40
-rw-r--r--drivers/phy/rockchip/phy-rockchip-snps-pcie3.c80
-rw-r--r--drivers/phy/rockchip/phy-rockchip-usbdp.c1608
-rw-r--r--drivers/phy/samsung/Makefile1
-rw-r--r--drivers/phy/samsung/phy-exynos7-ufs.c1
-rw-r--r--drivers/phy/samsung/phy-exynosautov9-ufs.c1
-rw-r--r--drivers/phy/samsung/phy-fsd-ufs.c1
-rw-r--r--drivers/phy/samsung/phy-gs101-ufs.c182
-rw-r--r--drivers/phy/samsung/phy-samsung-ufs.c28
-rw-r--r--drivers/phy/samsung/phy-samsung-ufs.h6
-rw-r--r--drivers/phy/ti/phy-tusb1210.c23
-rw-r--r--drivers/phy/xilinx/phy-zynqmp.c6
-rw-r--r--drivers/pinctrl/Kconfig15
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/aspeed/Makefile2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c34
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c83
-rw-r--r--drivers/pinctrl/core.c8
-rw-r--r--drivers/pinctrl/devicetree.c10
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8ulp.c1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c78
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h4
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6765.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6779.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c40
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-a1.c6
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c3
-rw-r--r--drivers/pinctrl/pinconf-generic.c2
-rw-r--r--drivers/pinctrl/pinctrl-amd.c2
-rw-r--r--drivers/pinctrl/pinctrl-aw9523.c131
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c2
-rw-r--r--drivers/pinctrl/pinctrl-loongson2.c1
-rw-r--r--drivers/pinctrl/pinctrl-max77620.c2
-rw-r--r--drivers/pinctrl/pinctrl-rk805.c69
-rw-r--r--drivers/pinctrl/pinctrl-scmi.c571
-rw-r--r--drivers/pinctrl/pinctrl-single.c50
-rw-r--r--drivers/pinctrl/pinctrl-tps6594.c277
-rw-r--r--drivers/pinctrl/pinmux.c26
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa2xx.c55
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa2xx.h15
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm7150.c21
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c4
-rw-r--r--drivers/pinctrl/realtek/pinctrl-rtd1315e.c1
-rw-r--r--drivers/pinctrl/realtek/pinctrl-rtd1319d.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779h0.c136
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c18
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c112
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c95
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c1
-rw-r--r--drivers/platform/Kconfig2
-rw-r--r--drivers/platform/Makefile1
-rw-r--r--drivers/platform/arm64/Kconfig35
-rw-r--r--drivers/platform/arm64/Makefile8
-rw-r--r--drivers/platform/arm64/acer-aspire1-ec.c562
-rw-r--r--drivers/platform/chrome/Kconfig1
-rw-r--r--drivers/platform/chrome/cros_ec.c16
-rw-r--r--drivers/platform/chrome/cros_ec_chardev.c9
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c9
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c9
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c81
-rw-r--r--drivers/platform/chrome/cros_ec_proto_test.c72
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub.c9
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c9
-rw-r--r--drivers/platform/chrome/cros_ec_uart.c28
-rw-r--r--drivers/platform/chrome/cros_ec_vbc.c9
-rw-r--r--drivers/platform/chrome/cros_hps_i2c.c4
-rw-r--r--drivers/platform/chrome/cros_kbd_led_backlight.c11
-rw-r--r--drivers/platform/chrome/cros_usbpd_logger.c9
-rw-r--r--drivers/platform/chrome/cros_usbpd_notify.c9
-rw-r--r--drivers/platform/chrome/wilco_ec/Kconfig1
-rw-r--r--drivers/platform/chrome/wilco_ec/core.c9
-rw-r--r--drivers/platform/chrome/wilco_ec/debugfs.c9
-rw-r--r--drivers/platform/chrome/wilco_ec/event.c2
-rw-r--r--drivers/platform/chrome/wilco_ec/sysfs.c2
-rw-r--r--drivers/platform/chrome/wilco_ec/telemetry.c9
-rw-r--r--drivers/platform/surface/aggregator/core.c42
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c43
-rw-r--r--drivers/platform/surface/surface_platform_profile.c88
-rw-r--r--drivers/platform/x86/Kconfig58
-rw-r--r--drivers/platform/x86/Makefile9
-rw-r--r--drivers/platform/x86/acer-wmi.c9
-rw-r--r--drivers/platform/x86/amd/hsmp.c2
-rw-r--r--drivers/platform/x86/amd/pmc/Kconfig15
-rw-r--r--drivers/platform/x86/amd/pmc/Makefile1
-rw-r--r--drivers/platform/x86/amd/pmc/mp2_stb.c280
-rw-r--r--drivers/platform/x86/amd/pmc/pmc-quirks.c9
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c5
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.h15
-rw-r--r--drivers/platform/x86/amd/pmf/Makefile2
-rw-r--r--drivers/platform/x86/amd/pmf/acpi.c7
-rw-r--r--drivers/platform/x86/amd/pmf/core.c2
-rw-r--r--drivers/platform/x86/amd/pmf/pmf-quirks.c51
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h3
-rw-r--r--drivers/platform/x86/asus-laptop.c45
-rw-r--r--drivers/platform/x86/asus-wmi.c417
-rw-r--r--drivers/platform/x86/classmate-laptop.c9
-rw-r--r--drivers/platform/x86/dell/Kconfig15
-rw-r--r--drivers/platform/x86/dell/Makefile1
-rw-r--r--drivers/platform/x86/dell/dell-rbtn.c1
-rw-r--r--drivers/platform/x86/dell/dell-uart-backlight.c398
-rw-r--r--drivers/platform/x86/eeepc-laptop.c1
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c18
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c12
-rw-r--r--drivers/platform/x86/huawei-wmi.c8
-rw-r--r--drivers/platform/x86/ideapad-laptop.c140
-rw-r--r--drivers/platform/x86/inspur_platform_profile.c1
-rw-r--r--drivers/platform/x86/intel/hid.c9
-rw-r--r--drivers/platform/x86/intel/ifs/load.c2
-rw-r--r--drivers/platform/x86/intel/ifs/runtest.c27
-rw-r--r--drivers/platform/x86/intel/pmc/arl.c2
-rw-r--r--drivers/platform/x86/intel/pmc/core.c38
-rw-r--r--drivers/platform/x86/intel/pmc/core.h9
-rw-r--r--drivers/platform/x86/intel/pmc/lnl.c477
-rw-r--r--drivers/platform/x86/intel/rst.c1
-rw-r--r--drivers/platform/x86/intel/sdsi.c118
-rw-r--r--drivers/platform/x86/intel/smartconnect.c1
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c3
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c383
-rw-r--r--drivers/platform/x86/intel/tpmi.c39
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c11
-rw-r--r--drivers/platform/x86/intel/vbtn.c14
-rw-r--r--drivers/platform/x86/intel_ips.c2
-rw-r--r--drivers/platform/x86/lenovo-wmi-camera.c127
-rw-r--r--drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c338
-rw-r--r--drivers/platform/x86/lg-laptop.c3
-rw-r--r--drivers/platform/x86/meegopad_anx7428.c150
-rw-r--r--drivers/platform/x86/msi-laptop.c20
-rw-r--r--drivers/platform/x86/msi-wmi-platform.c428
-rw-r--r--drivers/platform/x86/p2sb.c44
-rw-r--r--drivers/platform/x86/quickstart.c246
-rw-r--r--drivers/platform/x86/samsung-laptop.c10
-rw-r--r--drivers/platform/x86/sony-laptop.c2
-rw-r--r--drivers/platform/x86/think-lmi.c13
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c873
-rw-r--r--drivers/platform/x86/toshiba_acpi.c41
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c1
-rw-r--r--drivers/platform/x86/toshiba_haps.c1
-rw-r--r--drivers/platform/x86/uv_sysfs.c21
-rw-r--r--drivers/platform/x86/wireless-hotkey.c1
-rw-r--r--drivers/platform/x86/wmi.c62
-rw-r--r--drivers/platform/x86/x86-android-tablets/core.c16
-rw-r--r--drivers/platform/x86/x86-android-tablets/dmi.c18
-rw-r--r--drivers/platform/x86/x86-android-tablets/lenovo.c224
-rw-r--r--drivers/platform/x86/x86-android-tablets/other.c133
-rw-r--r--drivers/platform/x86/x86-android-tablets/x86-android-tablets.h3
-rw-r--r--drivers/platform/x86/xiaomi-wmi.c30
-rw-r--r--drivers/pmdomain/core.c21
-rw-r--r--drivers/pmdomain/mediatek/mt8188-pm-domains.h14
-rw-r--r--drivers/pmdomain/mediatek/mtk-scpsys.c1
-rw-r--r--drivers/pmdomain/renesas/Makefile4
-rw-r--r--drivers/pmdomain/renesas/r8a77960-sysc.c49
-rw-r--r--drivers/pmdomain/renesas/r8a77961-sysc.c (renamed from drivers/pmdomain/renesas/r8a7796-sysc.c)28
-rw-r--r--drivers/pmdomain/renesas/rcar-sysc.c70
-rw-r--r--drivers/pmdomain/renesas/rcar-sysc.h9
-rw-r--r--drivers/pmdomain/ti/ti_sci_pm_domains.c20
-rw-r--r--drivers/pnp/isapnp/Kconfig2
-rw-r--r--drivers/power/supply/bq27xxx_battery.c118
-rw-r--r--drivers/power/supply/cros_peripheral_charger.c11
-rw-r--r--drivers/power/supply/cros_usbpd-charger.c11
-rw-r--r--drivers/power/supply/mt6360_charger.c2
-rw-r--r--drivers/power/supply/power_supply_sysfs.c20
-rw-r--r--drivers/power/supply/rt9455_charger.c2
-rw-r--r--drivers/power/supply/sbs-manager.c2
-rw-r--r--drivers/power/supply/test_power.c36
-rw-r--r--drivers/powercap/dtpm_cpu.c8
-rw-r--r--drivers/powercap/intel_rapl_common.c607
-rw-r--r--drivers/powercap/intel_rapl_tpmi.c3
-rw-r--r--drivers/pps/clients/pps_parport.c6
-rw-r--r--drivers/ptp/ptp_clockmatrix.c6
-rw-r--r--drivers/ptp/ptp_dte.c6
-rw-r--r--drivers/ptp/ptp_idt82p33.c6
-rw-r--r--drivers/ptp/ptp_ines.c5
-rw-r--r--drivers/ptp/ptp_ocp.c6
-rw-r--r--drivers/ptp/ptp_qoriq.c5
-rw-r--r--drivers/ptp/ptp_vmw.c1
-rw-r--r--drivers/pwm/Kconfig4
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/core.c606
-rw-r--r--drivers/pwm/pwm-bcm2835.c30
-rw-r--r--drivers/pwm/pwm-dwc-core.c1
-rw-r--r--drivers/pwm/pwm-dwc.c88
-rw-r--r--drivers/pwm/pwm-dwc.h6
-rw-r--r--drivers/pwm/pwm-img.c4
-rw-r--r--drivers/pwm/pwm-meson.c213
-rw-r--r--drivers/pwm/pwm-pca9685.c4
-rw-r--r--drivers/pwm/pwm-sti.c159
-rw-r--r--drivers/pwm/pwm-stm32.c60
-rw-r--r--drivers/pwm/sysfs.c545
-rw-r--r--drivers/rapidio/Kconfig17
-rw-r--r--drivers/ras/amd/fmpm.c57
-rw-r--r--drivers/ras/debugfs.h4
-rw-r--r--drivers/regulator/Kconfig12
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/axp20x-regulator.c94
-rw-r--r--drivers/regulator/core.c31
-rw-r--r--drivers/regulator/da9121-regulator.c4
-rw-r--r--drivers/regulator/da9211-regulator.c2
-rw-r--r--drivers/regulator/devres.c59
-rw-r--r--drivers/regulator/irq_helpers.c3
-rw-r--r--drivers/regulator/isl9305.c2
-rw-r--r--drivers/regulator/max8973-regulator.c2
-rw-r--r--drivers/regulator/mt6311-regulator.c2
-rw-r--r--drivers/regulator/mt6360-regulator.c32
-rw-r--r--drivers/regulator/pca9450-regulator.c196
-rw-r--r--drivers/regulator/pf8x00-regulator.c2
-rw-r--r--drivers/regulator/pfuze100-regulator.c2
-rw-r--r--drivers/regulator/qcom-refgen-regulator.c1
-rw-r--r--drivers/regulator/rk808-regulator.c218
-rw-r--r--drivers/regulator/rpi-panel-attiny-regulator.c2
-rw-r--r--drivers/regulator/rtmv20-regulator.c2
-rw-r--r--drivers/regulator/rtq2208-regulator.c107
-rw-r--r--drivers/regulator/rtq6752-regulator.c2
-rw-r--r--drivers/regulator/sun20i-regulator.c157
-rw-r--r--drivers/regulator/tps51632-regulator.c2
-rw-r--r--drivers/regulator/tps62360-regulator.c2
-rw-r--r--drivers/regulator/tps65132-regulator.c7
-rw-r--r--drivers/regulator/tps6594-regulator.c334
-rw-r--r--drivers/regulator/vqmmc-ipq4019-regulator.c1
-rw-r--r--drivers/remoteproc/mtk_common.h11
-rw-r--r--drivers/remoteproc/mtk_scp.c241
-rw-r--r--drivers/remoteproc/mtk_scp_ipi.c7
-rw-r--r--drivers/remoteproc/remoteproc_internal.h2
-rw-r--r--drivers/remoteproc/remoteproc_sysfs.c2
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c58
-rw-r--r--drivers/remoteproc/xlnx_r5_remoteproc.c329
-rw-r--r--drivers/reset/Kconfig4
-rw-r--r--drivers/reset/reset-mpfs.c95
-rw-r--r--drivers/rpmsg/qcom_glink_ssr.c1
-rw-r--r--drivers/rpmsg/rpmsg_char.c2
-rw-r--r--drivers/rpmsg/rpmsg_core.c16
-rw-r--r--drivers/rpmsg/rpmsg_ctrl.c2
-rw-r--r--drivers/rpmsg/rpmsg_internal.h2
-rw-r--r--drivers/s390/block/dasd_eckd.c6
-rw-r--r--drivers/s390/block/dasd_ioctl.c2
-rw-r--r--drivers/s390/char/Makefile2
-rw-r--r--drivers/s390/char/raw3270.c6
-rw-r--r--drivers/s390/char/vmlogrdr.c20
-rw-r--r--drivers/s390/cio/airq.c1
-rw-r--r--drivers/s390/cio/chp.c141
-rw-r--r--drivers/s390/cio/chp.h2
-rw-r--r--drivers/s390/cio/chsc.c122
-rw-r--r--drivers/s390/cio/chsc.h5
-rw-r--r--drivers/s390/cio/cio.c1
-rw-r--r--drivers/s390/cio/cio_inject.c2
-rw-r--r--drivers/s390/cio/css.c14
-rw-r--r--drivers/s390/cio/css.h13
-rw-r--r--drivers/s390/cio/device.c13
-rw-r--r--drivers/s390/cio/device_fsm.c5
-rw-r--r--drivers/s390/cio/device_ops.c8
-rw-r--r--drivers/s390/cio/idset.c12
-rw-r--r--drivers/s390/cio/qdio_main.c28
-rw-r--r--drivers/s390/cio/trace.h2
-rw-r--r--drivers/s390/crypto/Makefile2
-rw-r--r--drivers/s390/crypto/ap_bus.c248
-rw-r--r--drivers/s390/crypto/ap_bus.h22
-rw-r--r--drivers/s390/crypto/ap_queue.c4
-rw-r--r--drivers/s390/crypto/pkey_api.c109
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c224
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h6
-rw-r--r--drivers/s390/crypto/zcrypt_api.c9
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c18
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c54
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/ism_drv.c39
-rw-r--r--drivers/s390/net/netiucv.c20
-rw-r--r--drivers/s390/net/qeth_core.h9
-rw-r--r--drivers/s390/net/qeth_core_main.c99
-rw-r--r--drivers/s390/net/smsgiucv_app.c21
-rw-r--r--drivers/scsi/FlashPoint.c1
-rw-r--r--drivers/scsi/Kconfig4
-rw-r--r--drivers/scsi/a3000.c8
-rw-r--r--drivers/scsi/a4000t.c8
-rw-r--r--drivers/scsi/aha152x.c8
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic79xx75
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic7xxx97
-rw-r--r--drivers/scsi/aic7xxx/Makefile12
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c29
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c2
-rw-r--r--drivers/scsi/atari_scsi.c8
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c6
-rw-r--r--drivers/scsi/ch.c20
-rw-r--r--drivers/scsi/csiostor/csio_init.c3
-rw-r--r--drivers/scsi/cxlflash/lunmgt.c6
-rw-r--r--drivers/scsi/cxlflash/main.c35
-rw-r--r--drivers/scsi/cxlflash/superpipe.c40
-rw-r--r--drivers/scsi/cxlflash/superpipe.h11
-rw-r--r--drivers/scsi/cxlflash/vlun.c9
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c9
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c20
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c26
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c41
-rw-r--r--drivers/scsi/hosts.c13
-rw-r--r--drivers/scsi/hpsa.c4
-rw-r--r--drivers/scsi/hptiop.c8
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c5
-rw-r--r--drivers/scsi/imm.c12
-rw-r--r--drivers/scsi/ipr.c12
-rw-r--r--drivers/scsi/isci/init.c29
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c84
-rw-r--r--drivers/scsi/libsas/sas_expander.c91
-rw-r--r--drivers/scsi/libsas/sas_internal.h15
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c7
-rw-r--r--drivers/scsi/lpfc/lpfc.h64
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c35
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c43
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c88
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c166
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c132
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c30
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c75
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c31
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c94
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h32
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c315
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h30
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c10
-rw-r--r--drivers/scsi/mac_scsi.c8
-rw-r--r--drivers/scsi/megaraid/Kconfig.megaraid113
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c33
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c3
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h3
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_image.h20
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h20
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h15
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c35
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c42
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c86
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c26
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c18
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c18
-rw-r--r--drivers/scsi/mvsas/mv_init.c26
-rw-r--r--drivers/scsi/myrb.c20
-rw-r--r--drivers/scsi/myrs.c24
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c5
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c21
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h1
-rw-r--r--drivers/scsi/pmcraid.c33
-rw-r--r--drivers/scsi/ppa.c8
-rw-r--r--drivers/scsi/qedf/qedf_debugfs.c2
-rw-r--r--drivers/scsi/qedf/qedf_io.c6
-rw-r--r--drivers/scsi/qedf/qedf_main.c2
-rw-r--r--drivers/scsi/qedi/qedi_debugfs.c12
-rw-r--r--drivers/scsi/qla2xxx/Kconfig42
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c128
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c68
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c17
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c14
-rw-r--r--drivers/scsi/scsi_debugfs.c56
-rw-r--r--drivers/scsi/scsi_devinfo.c18
-rw-r--r--drivers/scsi/scsi_lib.c48
-rw-r--r--drivers/scsi/scsi_scan.c108
-rw-r--r--drivers/scsi/scsi_sysfs.c5
-rw-r--r--drivers/scsi/scsi_transport_fc.c15
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c7
-rw-r--r--drivers/scsi/scsi_transport_sas.c4
-rw-r--r--drivers/scsi/scsicam.c2
-rw-r--r--drivers/scsi/sd.c38
-rw-r--r--drivers/scsi/sd.h19
-rw-r--r--drivers/scsi/sd_zbc.c335
-rw-r--r--drivers/scsi/ses.c1
-rw-r--r--drivers/scsi/sg.c38
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c5
-rw-r--r--drivers/scsi/snic/snic_attrs.c11
-rw-r--r--drivers/scsi/sr.c1
-rw-r--r--drivers/scsi/st.c5
-rw-r--r--drivers/scsi/vmw_pvscsi.c2
-rw-r--r--drivers/scsi/wd33c93.c4
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c6
-rw-r--r--drivers/soc/Makefile2
-rw-r--r--drivers/soc/canaan/Kconfig4
-rw-r--r--drivers/soc/hisilicon/Kconfig2
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.c6
-rw-r--r--drivers/soc/mediatek/Kconfig1
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c163
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c41
-rw-r--r--drivers/soc/mediatek/mtk-socinfo.c14
-rw-r--r--drivers/soc/mediatek/mtk-svs.c7
-rw-r--r--drivers/soc/pxa/ssp.c2
-rw-r--r--drivers/soc/qcom/cmd-db.c41
-rw-r--r--drivers/soc/qcom/icc-bwmon.c8
-rw-r--r--drivers/soc/qcom/pmic_glink.c26
-rw-r--r--drivers/soc/qcom/pmic_pdcharger_ulog.c4
-rw-r--r--drivers/soc/qcom/qcom_stats.c4
-rw-r--r--drivers/soc/qcom/rpm_master_stats.c4
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c5
-rw-r--r--drivers/soc/qcom/socinfo.c2
-rw-r--r--drivers/soc/renesas/Kconfig6
-rw-r--r--drivers/soc/renesas/renesas-soc.c20
-rw-r--r--drivers/soc/samsung/exynos-asv.c10
-rw-r--r--drivers/soc/tegra/pmc.c2
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c7
-rw-r--r--drivers/soundwire/amd_init.c36
-rw-r--r--drivers/soundwire/amd_init.h8
-rw-r--r--drivers/soundwire/amd_manager.c28
-rw-r--r--drivers/soundwire/amd_manager.h3
-rw-r--r--drivers/soundwire/bus.c14
-rw-r--r--drivers/soundwire/bus_type.c5
-rw-r--r--drivers/soundwire/cadence_master.c36
-rw-r--r--drivers/soundwire/intel.c68
-rw-r--r--drivers/soundwire/intel.h7
-rw-r--r--drivers/soundwire/intel_ace2x.c117
-rw-r--r--drivers/soundwire/intel_auxdevice.c41
-rw-r--r--drivers/soundwire/intel_auxdevice.h1
-rw-r--r--drivers/soundwire/intel_init.c14
-rw-r--r--drivers/soundwire/qcom.c28
-rw-r--r--drivers/soundwire/sysfs_local.h4
-rw-r--r--drivers/soundwire/sysfs_slave.c64
-rw-r--r--drivers/soundwire/sysfs_slave_dpn.c3
-rw-r--r--drivers/spi/Kconfig22
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-airoha-snfi.c1129
-rw-r--r--drivers/spi/spi-altera-platform.c1
-rw-r--r--drivers/spi/spi-amd.c112
-rw-r--r--drivers/spi/spi-armada-3700.c8
-rw-r--r--drivers/spi/spi-atmel.c8
-rw-r--r--drivers/spi/spi-au1550.c29
-rw-r--r--drivers/spi/spi-axi-spi-engine.c2
-rw-r--r--drivers/spi/spi-bitbang.c23
-rw-r--r--drivers/spi/spi-cadence-quadspi.c109
-rw-r--r--drivers/spi/spi-cadence-xspi.c8
-rw-r--r--drivers/spi/spi-coldfire-qspi.c1
-rw-r--r--drivers/spi/spi-cs42l43.c127
-rw-r--r--drivers/spi/spi-dw-core.c20
-rw-r--r--drivers/spi/spi-dw-mmio.c13
-rw-r--r--drivers/spi/spi-dw.h2
-rw-r--r--drivers/spi/spi-fsl-cpm.c14
-rw-r--r--drivers/spi/spi-fsl-cpm.h5
-rw-r--r--drivers/spi/spi-fsl-dspi.c1
-rw-r--r--drivers/spi/spi-fsl-lpspi.c28
-rw-r--r--drivers/spi/spi-fsl-spi.c7
-rw-r--r--drivers/spi/spi-hisi-kunpeng.c2
-rw-r--r--drivers/spi/spi-imx.c20
-rw-r--r--drivers/spi/spi-loopback-test.c1
-rw-r--r--drivers/spi/spi-microchip-core-qspi.c1
-rw-r--r--drivers/spi/spi-mt65xx.c32
-rw-r--r--drivers/spi/spi-mt7621.c95
-rw-r--r--drivers/spi/spi-mux.c2
-rw-r--r--drivers/spi/spi-oc-tiny.c2
-rw-r--r--drivers/spi/spi-omap2-mcspi.c95
-rw-r--r--drivers/spi/spi-pci1xxxx.c2
-rw-r--r--drivers/spi/spi-pic32-sqi.c6
-rw-r--r--drivers/spi/spi-pic32.c6
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c38
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c10
-rw-r--r--drivers/spi/spi-pxa2xx.c223
-rw-r--r--drivers/spi/spi-pxa2xx.h42
-rw-r--r--drivers/spi/spi-rspi.c12
-rw-r--r--drivers/spi/spi-s3c64xx.c11
-rw-r--r--drivers/spi/spi-stm32.c14
-rw-r--r--drivers/spi/spi-sun4i.c9
-rw-r--r--drivers/spi/spi-sun6i.c17
-rw-r--r--drivers/spi/spi-xlp.c8
-rw-r--r--drivers/spi/spi.c127
-rw-r--r--drivers/ssb/main.c6
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c26
-rw-r--r--drivers/staging/media/atomisp/Makefile1
-rw-r--r--drivers/staging/media/atomisp/i2c/Kconfig15
-rw-r--r--drivers/staging/media/atomisp/i2c/Makefile5
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc2235.c2
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-lm3554.c955
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c4
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2722.c14
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2722.h1
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp.h81
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h6
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_platform.h41
-rw-r--r--drivers/staging/media/atomisp/include/media/lm3554.h132
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c281
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.h13
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.c31
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.c26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.c49
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c123
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_internal.h19
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c493
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c102
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.h17
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_tpg.c164
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_tpg.h39
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c183
-rw-r--r--drivers/staging/media/atomisp/pci/bits.h4
-rw-r--r--drivers/staging/media/atomisp/pci/defs.h37
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_local.h1
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c38
-rw-r--r--drivers/staging/media/atomisp/pci/hive_types.h19
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm.c2
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css.h1
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_frame_public.h8
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_stream_public.h17
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_tpg.h79
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_global.h1
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_public.h15
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_global.h1
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c20
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c11
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c28
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css.c138
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_internal.h1
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mipi.c2
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_sp.c127
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_sp.h7
-rw-r--r--drivers/staging/media/atomisp/pci/system_global.h12
-rw-r--r--drivers/staging/media/imx/Kconfig1
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-fw.c4
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-fw.h2
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.c1
-rw-r--r--drivers/staging/media/ipu3/ipu3.c10
-rw-r--r--drivers/staging/media/max96712/max96712.c2
-rw-r--r--drivers/staging/media/starfive/camss/stf-camss.c12
-rw-r--r--drivers/staging/media/starfive/camss/stf-isp.c10
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.c3
-rw-r--r--drivers/staging/media/tegra-video/tegra20.c10
-rw-r--r--drivers/staging/media/tegra-video/vi.c12
-rw-r--r--drivers/staging/rtl8723bs/Makefile2
-rw-r--r--drivers/staging/rts5208/rtsx.c24
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c5
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c3
-rw-r--r--drivers/target/target_core_configfs.c12
-rw-r--r--drivers/target/target_core_device.c1
-rw-r--r--drivers/target/target_core_file.c4
-rw-r--r--drivers/tee/Kconfig1
-rw-r--r--drivers/tee/Makefile1
-rw-r--r--drivers/tee/amdtee/amdtee_private.h2
-rw-r--r--drivers/tee/amdtee/call.c2
-rw-r--r--drivers/tee/amdtee/core.c3
-rw-r--r--drivers/tee/amdtee/shm_pool.c2
-rw-r--r--drivers/tee/optee/call.c2
-rw-r--r--drivers/tee/optee/core.c66
-rw-r--r--drivers/tee/optee/device.c2
-rw-r--r--drivers/tee/optee/ffa_abi.c8
-rw-r--r--drivers/tee/optee/notif.c2
-rw-r--r--drivers/tee/optee/optee_private.h14
-rw-r--r--drivers/tee/optee/rpc.c2
-rw-r--r--drivers/tee/optee/smc_abi.c17
-rw-r--r--drivers/tee/tee_core.c2
-rw-r--r--drivers/tee/tee_private.h35
-rw-r--r--drivers/tee/tee_shm.c67
-rw-r--r--drivers/tee/tee_shm_pool.c2
-rw-r--r--drivers/tee/tstee/Kconfig11
-rw-r--r--drivers/tee/tstee/Makefile3
-rw-r--r--drivers/tee/tstee/core.c480
-rw-r--r--drivers/tee/tstee/tstee_private.h92
-rw-r--r--drivers/thermal/amlogic_thermal.c10
-rw-r--r--drivers/thermal/armada_thermal.c9
-rw-r--r--drivers/thermal/cpufreq_cooling.c3
-rw-r--r--drivers/thermal/devfreq_cooling.c2
-rw-r--r--drivers/thermal/gov_bang_bang.c97
-rw-r--r--drivers/thermal/gov_fair_share.c82
-rw-r--r--drivers/thermal/gov_power_allocator.c59
-rw-r--r--drivers/thermal/gov_step_wise.c98
-rw-r--r--drivers/thermal/gov_user_space.c10
-rw-r--r--drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c4
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_power_floor.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_wt_hint.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_wt_req.c1
-rw-r--r--drivers/thermal/intel/intel_hfi.c113
-rw-r--r--drivers/thermal/intel/intel_soc_dts_iosf.c1
-rw-r--r--drivers/thermal/k3_bandgap.c1
-rw-r--r--drivers/thermal/loongson2_thermal.c117
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c443
-rw-r--r--drivers/thermal/qcom/lmh.c3
-rw-r--r--drivers/thermal/qcom/qcom-spmi-temp-alarm.c1
-rw-r--r--drivers/thermal/qcom/tsens-v2.c1
-rw-r--r--drivers/thermal/qcom/tsens.c33
-rw-r--r--drivers/thermal/qcom/tsens.h5
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c165
-rw-r--r--drivers/thermal/thermal_core.c187
-rw-r--r--drivers/thermal/thermal_core.h121
-rw-r--r--drivers/thermal/thermal_debugfs.c171
-rw-r--r--drivers/thermal/thermal_debugfs.h8
-rw-r--r--drivers/thermal/thermal_helpers.c8
-rw-r--r--drivers/thermal/thermal_netlink.c68
-rw-r--r--drivers/thermal/thermal_netlink.h26
-rw-r--r--drivers/thermal/thermal_sysfs.c20
-rw-r--r--drivers/thermal/thermal_trace.h2
-rw-r--r--drivers/thermal/thermal_trace_ipa.h2
-rw-r--r--drivers/thermal/thermal_trip.c53
-rw-r--r--drivers/thunderbolt/switch.c48
-rw-r--r--drivers/thunderbolt/tb.c10
-rw-r--r--drivers/thunderbolt/tb.h3
-rw-r--r--drivers/thunderbolt/usb4.c13
-rw-r--r--drivers/tty/hvc/hvc_iucv.c15
-rw-r--r--drivers/tty/serial/8250/8250.h3
-rw-r--r--drivers/tty/serial/8250/8250_alpha.c21
-rw-r--r--drivers/tty/serial/8250/8250_core.c4
-rw-r--r--drivers/tty/serial/8250/8250_dw.c6
-rw-r--r--drivers/tty/serial/8250/8250_lpc18xx.c2
-rw-r--r--drivers/tty/serial/8250/8250_pci.c8
-rw-r--r--drivers/tty/serial/8250/Makefile2
-rw-r--r--drivers/tty/serial/kgdboc.c30
-rw-r--r--drivers/tty/serial/mxs-auart.c8
-rw-r--r--drivers/tty/serial/pmac_zilog.c14
-rw-r--r--drivers/tty/serial/serial_base.h4
-rw-r--r--drivers/tty/serial/serial_core.c23
-rw-r--r--drivers/tty/serial/serial_port.c34
-rw-r--r--drivers/tty/serial/stm32-usart.c13
-rw-r--r--drivers/tty/vt/vc_screen.c2
-rw-r--r--drivers/ufs/core/ufs-mcq.c5
-rw-r--r--drivers/ufs/core/ufs_bsg.c3
-rw-r--r--drivers/ufs/core/ufshcd.c379
-rw-r--r--drivers/ufs/host/cdns-pltfrm.c2
-rw-r--r--drivers/ufs/host/ufs-exynos.c205
-rw-r--r--drivers/ufs/host/ufs-exynos.h24
-rw-r--r--drivers/ufs/host/ufs-mediatek-sip.h94
-rw-r--r--drivers/ufs/host/ufs-mediatek.c131
-rw-r--r--drivers/ufs/host/ufs-mediatek.h90
-rw-r--r--drivers/ufs/host/ufs-qcom.c39
-rw-r--r--drivers/ufs/host/ufs-qcom.h12
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/uio/uio_dmem_genirq.c4
-rw-r--r--drivers/uio/uio_hv_generic.c12
-rw-r--r--drivers/uio/uio_pruss.c2
-rw-r--r--drivers/usb/core/hcd-pci.c3
-rw-r--r--drivers/usb/core/hub.c28
-rw-r--r--drivers/usb/core/hub.h2
-rw-r--r--drivers/usb/core/port.c50
-rw-r--r--drivers/usb/core/sysfs.c16
-rw-r--r--drivers/usb/dwc2/core.h14
-rw-r--r--drivers/usb/dwc2/core_intr.c72
-rw-r--r--drivers/usb/dwc2/gadget.c10
-rw-r--r--drivers/usb/dwc2/hcd.c49
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c19
-rw-r--r--drivers/usb/dwc2/hw.h2
-rw-r--r--drivers/usb/dwc2/platform.c2
-rw-r--r--drivers/usb/dwc3/core.c92
-rw-r--r--drivers/usb/dwc3/core.h3
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/ep0.c3
-rw-r--r--drivers/usb/dwc3/gadget.c12
-rw-r--r--drivers/usb/dwc3/host.c38
-rw-r--r--drivers/usb/gadget/composite.c6
-rw-r--r--drivers/usb/gadget/function/f_fs.c38
-rw-r--r--drivers/usb/gadget/function/f_ncm.c4
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c4
-rw-r--r--drivers/usb/gadget/udc/core.c4
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c5
-rw-r--r--drivers/usb/host/ohci-hcd.c8
-rw-r--r--drivers/usb/host/xhci-plat.h4
-rw-r--r--drivers/usb/host/xhci-ring.c9
-rw-r--r--drivers/usb/host/xhci-rzv2m.c1
-rw-r--r--drivers/usb/host/xhci-trace.h12
-rw-r--r--drivers/usb/image/microtek.c8
-rw-r--r--drivers/usb/misc/onboard_usb_hub.c6
-rw-r--r--drivers/usb/misc/usb-ljca.c22
-rw-r--r--drivers/usb/phy/phy-generic.c7
-rw-r--r--drivers/usb/serial/option.c40
-rw-r--r--drivers/usb/storage/scsiglue.c57
-rw-r--r--drivers/usb/storage/uas.c57
-rw-r--r--drivers/usb/storage/usb.c10
-rw-r--r--drivers/usb/typec/class.c7
-rw-r--r--drivers/usb/typec/mux/it5205.c2
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c8
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c11
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c52
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c92
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h5
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c75
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c14
-rw-r--r--drivers/vdpa/vdpa.c6
-rw-r--r--drivers/vfio/cdx/Makefile2
-rw-r--r--drivers/vfio/cdx/intr.c217
-rw-r--r--drivers/vfio/cdx/main.c63
-rw-r--r--drivers/vfio/cdx/private.h18
-rw-r--r--drivers/vfio/pci/Kconfig2
-rw-r--r--drivers/vfio/pci/Makefile2
-rw-r--r--drivers/vfio/pci/pds/dirty.c1
-rw-r--r--drivers/vfio/pci/qat/Kconfig12
-rw-r--r--drivers/vfio/pci/qat/Makefile3
-rw-r--r--drivers/vfio/pci/qat/main.c702
-rw-r--r--drivers/vfio/pci/vfio_pci.c2
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c81
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c61
-rw-r--r--drivers/vfio/platform/vfio_amba.c1
-rw-r--r--drivers/vfio/vfio_iommu_type1.c4
-rw-r--r--drivers/vhost/net.c8
-rw-r--r--drivers/vhost/vhost.c26
-rw-r--r--drivers/video/fbdev/Kconfig29
-rw-r--r--drivers/video/fbdev/au1200fb.c2
-rw-r--r--drivers/video/fbdev/core/Kconfig6
-rw-r--r--drivers/video/fbdev/core/fb_defio.c2
-rw-r--r--drivers/video/fbdev/core/fbcon.c2
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c2
-rw-r--r--drivers/video/fbdev/offb.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dsi.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss-of.c20
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/venc.c3
-rw-r--r--drivers/video/fbdev/pxafb.c2
-rw-r--r--drivers/video/fbdev/savage/savagefb_driver.c5
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c2
-rw-r--r--drivers/video/fbdev/sis/init301.c3
-rw-r--r--drivers/video/fbdev/uvesafb.c2
-rw-r--r--drivers/video/hdmi.c10
-rw-r--r--drivers/video/logo/pnmtologo.c2
-rw-r--r--drivers/virt/Kconfig1
-rw-r--r--drivers/virt/acrn/ioreq.c2
-rw-r--r--drivers/virt/acrn/mm.c61
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c28
-rw-r--r--drivers/virt/vmgenid.c152
-rw-r--r--drivers/virtio/Kconfig10
-rw-r--r--drivers/virtio/Makefile1
-rw-r--r--drivers/virtio/virtio.c14
-rw-r--r--drivers/virtio/virtio_debug.c114
-rw-r--r--drivers/virtio/virtio_mem.c1
-rw-r--r--drivers/virtio/virtio_ring.c7
-rw-r--r--drivers/watchdog/Kconfig69
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/bd9576_wdt.c12
-rw-r--r--drivers/watchdog/cpu5wdt.c2
-rw-r--r--drivers/watchdog/lenovo_se10_wdt.c308
-rw-r--r--drivers/watchdog/mtx-1_wdt.c1
-rw-r--r--drivers/watchdog/octeon-wdt-main.c6
-rw-r--r--drivers/watchdog/rti_wdt.c34
-rw-r--r--drivers/watchdog/sa1100_wdt.c5
-rw-r--r--drivers/xen/grant-dma-ops.c2
-rw-r--r--drivers/xen/pvcalls-back.c6
-rw-r--r--drivers/xen/swiotlb-xen.c4
-rw-r--r--drivers/zorro/zorro.c14
-rw-r--r--fs/9p/fid.h3
-rw-r--r--fs/9p/v9fs.h11
-rw-r--r--fs/9p/vfs_addr.c60
-rw-r--r--fs/9p/vfs_file.c2
-rw-r--r--fs/9p/vfs_inode.c60
-rw-r--r--fs/9p/vfs_inode_dotl.c34
-rw-r--r--fs/9p/vfs_super.c19
-rw-r--r--fs/afs/file.c8
-rw-r--r--fs/afs/internal.h6
-rw-r--r--fs/afs/rotate.c8
-rw-r--r--fs/afs/validation.c4
-rw-r--r--fs/afs/write.c189
-rw-r--r--fs/aio.c97
-rw-r--r--fs/anon_inodes.c33
-rw-r--r--fs/backing-file.c23
-rw-r--r--fs/bcachefs/Makefile3
-rw-r--r--fs/bcachefs/acl.c67
-rw-r--r--fs/bcachefs/alloc_background.c386
-rw-r--r--fs/bcachefs/alloc_background.h117
-rw-r--r--fs/bcachefs/alloc_foreground.c308
-rw-r--r--fs/bcachefs/alloc_foreground.h15
-rw-r--r--fs/bcachefs/alloc_types.h10
-rw-r--r--fs/bcachefs/backpointers.c330
-rw-r--r--fs/bcachefs/backpointers.h92
-rw-r--r--fs/bcachefs/bcachefs.h44
-rw-r--r--fs/bcachefs/bcachefs_format.h47
-rw-r--r--fs/bcachefs/bkey.c15
-rw-r--r--fs/bcachefs/bkey.h39
-rw-r--r--fs/bcachefs/bkey_methods.c28
-rw-r--r--fs/bcachefs/bkey_methods.h73
-rw-r--r--fs/bcachefs/bkey_sort.c79
-rw-r--r--fs/bcachefs/bkey_sort.h4
-rw-r--r--fs/bcachefs/bset.c43
-rw-r--r--fs/bcachefs/bset.h8
-rw-r--r--fs/bcachefs/btree_cache.c227
-rw-r--r--fs/bcachefs/btree_cache.h5
-rw-r--r--fs/bcachefs/btree_gc.c1439
-rw-r--r--fs/bcachefs/btree_gc.h44
-rw-r--r--fs/bcachefs/btree_io.c154
-rw-r--r--fs/bcachefs/btree_io.h2
-rw-r--r--fs/bcachefs/btree_iter.c399
-rw-r--r--fs/bcachefs/btree_iter.h92
-rw-r--r--fs/bcachefs/btree_journal_iter.c126
-rw-r--r--fs/bcachefs/btree_journal_iter.h10
-rw-r--r--fs/bcachefs/btree_key_cache.c144
-rw-r--r--fs/bcachefs/btree_key_cache_types.h8
-rw-r--r--fs/bcachefs/btree_locking.c207
-rw-r--r--fs/bcachefs/btree_locking.h4
-rw-r--r--fs/bcachefs/btree_node_scan.c524
-rw-r--r--fs/bcachefs/btree_node_scan.h11
-rw-r--r--fs/bcachefs/btree_node_scan_types.h31
-rw-r--r--fs/bcachefs/btree_trans_commit.c109
-rw-r--r--fs/bcachefs/btree_types.h143
-rw-r--r--fs/bcachefs/btree_update.c101
-rw-r--r--fs/bcachefs/btree_update.h14
-rw-r--r--fs/bcachefs/btree_update_interior.c482
-rw-r--r--fs/bcachefs/btree_update_interior.h36
-rw-r--r--fs/bcachefs/btree_write_buffer.c36
-rw-r--r--fs/bcachefs/buckets.c695
-rw-r--r--fs/bcachefs/buckets.h79
-rw-r--r--fs/bcachefs/chardev.c170
-rw-r--r--fs/bcachefs/checksum.c41
-rw-r--r--fs/bcachefs/checksum.h5
-rw-r--r--fs/bcachefs/compress.h8
-rw-r--r--fs/bcachefs/data_update.c83
-rw-r--r--fs/bcachefs/debug.c155
-rw-r--r--fs/bcachefs/dirent.c97
-rw-r--r--fs/bcachefs/dirent.h8
-rw-r--r--fs/bcachefs/disk_groups.c11
-rw-r--r--fs/bcachefs/ec.c417
-rw-r--r--fs/bcachefs/ec.h9
-rw-r--r--fs/bcachefs/errcode.h4
-rw-r--r--fs/bcachefs/error.c65
-rw-r--r--fs/bcachefs/error.h6
-rw-r--r--fs/bcachefs/extent_update.c2
-rw-r--r--fs/bcachefs/extents.c207
-rw-r--r--fs/bcachefs/extents.h37
-rw-r--r--fs/bcachefs/eytzinger.c305
-rw-r--r--fs/bcachefs/eytzinger.h81
-rw-r--r--fs/bcachefs/fs-common.c38
-rw-r--r--fs/bcachefs/fs-io-buffered.c14
-rw-r--r--fs/bcachefs/fs-io-direct.c25
-rw-r--r--fs/bcachefs/fs-io-pagecache.c2
-rw-r--r--fs/bcachefs/fs-io.c25
-rw-r--r--fs/bcachefs/fs-ioctl.c2
-rw-r--r--fs/bcachefs/fs.c124
-rw-r--r--fs/bcachefs/fsck.c472
-rw-r--r--fs/bcachefs/inode.c66
-rw-r--r--fs/bcachefs/inode.h23
-rw-r--r--fs/bcachefs/io_misc.c12
-rw-r--r--fs/bcachefs/io_read.c68
-rw-r--r--fs/bcachefs/io_write.c125
-rw-r--r--fs/bcachefs/io_write_types.h1
-rw-r--r--fs/bcachefs/journal.c139
-rw-r--r--fs/bcachefs/journal.h6
-rw-r--r--fs/bcachefs/journal_io.c240
-rw-r--r--fs/bcachefs/journal_io.h5
-rw-r--r--fs/bcachefs/journal_reclaim.c10
-rw-r--r--fs/bcachefs/journal_sb.c10
-rw-r--r--fs/bcachefs/journal_seq_blacklist.c80
-rw-r--r--fs/bcachefs/journal_seq_blacklist.h2
-rw-r--r--fs/bcachefs/journal_types.h16
-rw-r--r--fs/bcachefs/logged_ops.c9
-rw-r--r--fs/bcachefs/lru.c4
-rw-r--r--fs/bcachefs/lru.h2
-rw-r--r--fs/bcachefs/mean_and_variance_test.c28
-rw-r--r--fs/bcachefs/migrate.c8
-rw-r--r--fs/bcachefs/move.c86
-rw-r--r--fs/bcachefs/movinggc.c4
-rw-r--r--fs/bcachefs/opts.c33
-rw-r--r--fs/bcachefs/opts.h28
-rw-r--r--fs/bcachefs/printbuf.c232
-rw-r--r--fs/bcachefs/printbuf.h53
-rw-r--r--fs/bcachefs/quota.c131
-rw-r--r--fs/bcachefs/quota.h4
-rw-r--r--fs/bcachefs/rebalance.c10
-rw-r--r--fs/bcachefs/recovery.c554
-rw-r--r--fs/bcachefs/recovery.h32
-rw-r--r--fs/bcachefs/recovery_passes.c245
-rw-r--r--fs/bcachefs/recovery_passes.h17
-rw-r--r--fs/bcachefs/recovery_passes_types.h (renamed from fs/bcachefs/recovery_types.h)11
-rw-r--r--fs/bcachefs/reflink.c75
-rw-r--r--fs/bcachefs/reflink.h16
-rw-r--r--fs/bcachefs/replicas.c39
-rw-r--r--fs/bcachefs/sb-clean.c35
-rw-r--r--fs/bcachefs/sb-counters.c20
-rw-r--r--fs/bcachefs/sb-downgrade.c32
-rw-r--r--fs/bcachefs/sb-errors.c2
-rw-r--r--fs/bcachefs/sb-errors_types.h12
-rw-r--r--fs/bcachefs/sb-members.c196
-rw-r--r--fs/bcachefs/sb-members.h186
-rw-r--r--fs/bcachefs/sb-members_types.h21
-rw-r--r--fs/bcachefs/snapshot.c274
-rw-r--r--fs/bcachefs/snapshot.h103
-rw-r--r--fs/bcachefs/str_hash.h70
-rw-r--r--fs/bcachefs/subvolume.c94
-rw-r--r--fs/bcachefs/subvolume.h10
-rw-r--r--fs/bcachefs/subvolume_types.h2
-rw-r--r--fs/bcachefs/super-io.c182
-rw-r--r--fs/bcachefs/super-io.h3
-rw-r--r--fs/bcachefs/super.c149
-rw-r--r--fs/bcachefs/super_types.h13
-rw-r--r--fs/bcachefs/sysfs.c195
-rw-r--r--fs/bcachefs/tests.c18
-rw-r--r--fs/bcachefs/thread_with_file.c15
-rw-r--r--fs/bcachefs/thread_with_file.h3
-rw-r--r--fs/bcachefs/trace.h97
-rw-r--r--fs/bcachefs/util.c204
-rw-r--r--fs/bcachefs/util.h27
-rw-r--r--fs/bcachefs/xattr.c47
-rw-r--r--fs/bcachefs/xattr.h2
-rw-r--r--fs/binfmt_elf.c12
-rw-r--r--fs/binfmt_elf_fdpic.c87
-rw-r--r--fs/btrfs/backref.c60
-rw-r--r--fs/btrfs/block-group.c3
-rw-r--r--fs/btrfs/block-rsv.c11
-rw-r--r--fs/btrfs/btrfs_inode.h10
-rw-r--r--fs/btrfs/compression.c119
-rw-r--r--fs/btrfs/compression.h42
-rw-r--r--fs/btrfs/ctree.c51
-rw-r--r--fs/btrfs/defrag.c2
-rw-r--r--fs/btrfs/delayed-inode.c5
-rw-r--r--fs/btrfs/delayed-ref.c345
-rw-r--r--fs/btrfs/delayed-ref.h148
-rw-r--r--fs/btrfs/dev-replace.c2
-rw-r--r--fs/btrfs/disk-io.c161
-rw-r--r--fs/btrfs/export.c8
-rw-r--r--fs/btrfs/extent-io-tree.c58
-rw-r--r--fs/btrfs/extent-tree.c372
-rw-r--r--fs/btrfs/extent_io.c255
-rw-r--r--fs/btrfs/extent_io.h11
-rw-r--r--fs/btrfs/extent_map.c334
-rw-r--r--fs/btrfs/extent_map.h67
-rw-r--r--fs/btrfs/file-item.c88
-rw-r--r--fs/btrfs/file-item.h3
-rw-r--r--fs/btrfs/file.c331
-rw-r--r--fs/btrfs/free-space-cache.c8
-rw-r--r--fs/btrfs/fs.h5
-rw-r--r--fs/btrfs/inode-item.c16
-rw-r--r--fs/btrfs/inode.c954
-rw-r--r--fs/btrfs/ioctl.c156
-rw-r--r--fs/btrfs/locking.c26
-rw-r--r--fs/btrfs/locking.h18
-rw-r--r--fs/btrfs/lzo.c89
-rw-r--r--fs/btrfs/messages.c2
-rw-r--r--fs/btrfs/ordered-data.c9
-rw-r--r--fs/btrfs/ordered-data.h1
-rw-r--r--fs/btrfs/props.c2
-rw-r--r--fs/btrfs/qgroup.c104
-rw-r--r--fs/btrfs/raid56.c3
-rw-r--r--fs/btrfs/ref-verify.c8
-rw-r--r--fs/btrfs/reflink.c56
-rw-r--r--fs/btrfs/relocation.c415
-rw-r--r--fs/btrfs/root-tree.c13
-rw-r--r--fs/btrfs/root-tree.h2
-rw-r--r--fs/btrfs/scrub.c30
-rw-r--r--fs/btrfs/send.c74
-rw-r--r--fs/btrfs/super.c33
-rw-r--r--fs/btrfs/sysfs.c8
-rw-r--r--fs/btrfs/tests/btrfs-tests.c3
-rw-r--r--fs/btrfs/tests/extent-map-tests.c221
-rw-r--r--fs/btrfs/transaction.c95
-rw-r--r--fs/btrfs/tree-checker.c32
-rw-r--r--fs/btrfs/tree-checker.h1
-rw-r--r--fs/btrfs/tree-log.c46
-rw-r--r--fs/btrfs/tree-mod-log.c2
-rw-r--r--fs/btrfs/volumes.c58
-rw-r--r--fs/btrfs/volumes.h10
-rw-r--r--fs/btrfs/xattr.c10
-rw-r--r--fs/btrfs/zlib.c112
-rw-r--r--fs/btrfs/zoned.c16
-rw-r--r--fs/btrfs/zstd.c80
-rw-r--r--fs/buffer.c191
-rw-r--r--fs/cachefiles/io.c76
-rw-r--r--fs/cachefiles/namei.c3
-rw-r--r--fs/ceph/addr.c28
-rw-r--r--fs/ceph/caps.c4
-rw-r--r--fs/ceph/inode.c2
-rw-r--r--fs/ceph/mds_client.c9
-rw-r--r--fs/ceph/mds_client.h3
-rw-r--r--fs/coredump.c17
-rw-r--r--fs/cramfs/inode.c4
-rw-r--r--fs/crypto/hooks.c32
-rw-r--r--fs/crypto/inline_crypt.c6
-rw-r--r--fs/dax.c14
-rw-r--r--fs/dcache.c17
-rw-r--r--fs/debugfs/inode.c198
-rw-r--r--fs/direct-io.c1
-rw-r--r--fs/dlm/ast.c218
-rw-r--r--fs/dlm/ast.h13
-rw-r--r--fs/dlm/config.c8
-rw-r--r--fs/dlm/config.h2
-rw-r--r--fs/dlm/debug_fs.c327
-rw-r--r--fs/dlm/dir.c157
-rw-r--r--fs/dlm/dir.h3
-rw-r--r--fs/dlm/dlm_internal.h129
-rw-r--r--fs/dlm/lock.c1068
-rw-r--r--fs/dlm/lock.h12
-rw-r--r--fs/dlm/lockspace.c212
-rw-r--r--fs/dlm/lowcomms.c62
-rw-r--r--fs/dlm/lowcomms.h5
-rw-r--r--fs/dlm/member.c25
-rw-r--r--fs/dlm/memory.c18
-rw-r--r--fs/dlm/memory.h4
-rw-r--r--fs/dlm/midcomms.c67
-rw-r--r--fs/dlm/midcomms.h3
-rw-r--r--fs/dlm/rcom.c33
-rw-r--r--fs/dlm/recover.c149
-rw-r--r--fs/dlm/recover.h10
-rw-r--r--fs/dlm/recoverd.c142
-rw-r--r--fs/dlm/requestqueue.c43
-rw-r--r--fs/dlm/user.c135
-rw-r--r--fs/ecryptfs/crypto.c4
-rw-r--r--fs/ecryptfs/keystore.c4
-rw-r--r--fs/ecryptfs/main.c26
-rw-r--r--fs/efivarfs/internal.h5
-rw-r--r--fs/efivarfs/vars.c5
-rw-r--r--fs/erofs/Kconfig15
-rw-r--r--fs/erofs/Makefile5
-rw-r--r--fs/erofs/compress.h4
-rw-r--r--fs/erofs/data.c12
-rw-r--r--fs/erofs/decompressor.c15
-rw-r--r--fs/erofs/decompressor_zstd.c279
-rw-r--r--fs/erofs/dir.c4
-rw-r--r--fs/erofs/erofs_fs.h15
-rw-r--r--fs/erofs/fscache.c2
-rw-r--r--fs/erofs/internal.h39
-rw-r--r--fs/erofs/namei.c6
-rw-r--r--fs/erofs/pcpubuf.c148
-rw-r--r--fs/erofs/super.c161
-rw-r--r--fs/erofs/xattr.c37
-rw-r--r--fs/erofs/zdata.c6
-rw-r--r--fs/erofs/zmap.c24
-rw-r--r--fs/erofs/zutil.c (renamed from fs/erofs/utils.c)206
-rw-r--r--fs/eventpoll.c38
-rw-r--r--fs/exec.c20
-rw-r--r--fs/exfat/dir.c2
-rw-r--r--fs/exfat/file.c7
-rw-r--r--fs/ext2/Kconfig1
-rw-r--r--fs/ext2/dir.c1
-rw-r--r--fs/ext2/file.c8
-rw-r--r--fs/ext2/inode.c2
-rw-r--r--fs/ext4/acl.h5
-rw-r--r--fs/ext4/dir.c2
-rw-r--r--fs/ext4/ext4.h9
-rw-r--r--fs/ext4/ext4_jbd2.c2
-rw-r--r--fs/ext4/extents.c3
-rw-r--r--fs/ext4/file.c9
-rw-r--r--fs/ext4/inode.c11
-rw-r--r--fs/ext4/ioctl.c3
-rw-r--r--fs/ext4/mballoc-test.c76
-rw-r--r--fs/ext4/mballoc.c322
-rw-r--r--fs/ext4/mballoc.h14
-rw-r--r--fs/ext4/move_extent.c4
-rw-r--r--fs/ext4/namei.c2
-rw-r--r--fs/ext4/page-io.c3
-rw-r--r--fs/ext4/readpage.c1
-rw-r--r--fs/ext4/super.c74
-rw-r--r--fs/ext4/sysfs.c174
-rw-r--r--fs/ext4/xattr.c145
-rw-r--r--fs/f2fs/checkpoint.c13
-rw-r--r--fs/f2fs/compress.c96
-rw-r--r--fs/f2fs/data.c236
-rw-r--r--fs/f2fs/f2fs.h57
-rw-r--r--fs/f2fs/file.c259
-rw-r--r--fs/f2fs/gc.c11
-rw-r--r--fs/f2fs/gc.h1
-rw-r--r--fs/f2fs/inline.c36
-rw-r--r--fs/f2fs/inode.c22
-rw-r--r--fs/f2fs/node.c20
-rw-r--r--fs/f2fs/recovery.c3
-rw-r--r--fs/f2fs/segment.c132
-rw-r--r--fs/f2fs/super.c82
-rw-r--r--fs/f2fs/sysfs.c21
-rw-r--r--fs/fat/dir.c12
-rw-r--r--fs/fcntl.c20
-rw-r--r--fs/fhandle.c6
-rw-r--r--fs/file.c19
-rw-r--r--fs/freevxfs/vxfs_super.c69
-rw-r--r--fs/fs-writeback.c57
-rw-r--r--fs/fuse/cuse.c4
-rw-r--r--fs/fuse/dev.c3
-rw-r--r--fs/fuse/dir.c1
-rw-r--r--fs/fuse/file.c22
-rw-r--r--fs/fuse/fuse_i.h7
-rw-r--r--fs/fuse/inode.c1
-rw-r--r--fs/fuse/ioctl.c60
-rw-r--r--fs/fuse/iomode.c60
-rw-r--r--fs/fuse/passthrough.c2
-rw-r--r--fs/fuse/virtio_fs.c75
-rw-r--r--fs/gfs2/aops.c4
-rw-r--r--fs/gfs2/bmap.c7
-rw-r--r--fs/gfs2/dir.c31
-rw-r--r--fs/gfs2/file.c59
-rw-r--r--fs/gfs2/glock.c194
-rw-r--r--fs/gfs2/glock.h3
-rw-r--r--fs/gfs2/glops.c37
-rw-r--r--fs/gfs2/incore.h1
-rw-r--r--fs/gfs2/lock_dlm.c40
-rw-r--r--fs/gfs2/log.c5
-rw-r--r--fs/gfs2/meta_io.c16
-rw-r--r--fs/gfs2/ops_fstype.c51
-rw-r--r--fs/gfs2/rgrp.c10
-rw-r--r--fs/gfs2/super.c28
-rw-r--r--fs/gfs2/sys.c4
-rw-r--r--fs/gfs2/util.c63
-rw-r--r--fs/gfs2/util.h6
-rw-r--r--fs/gfs2/xattr.c28
-rw-r--r--fs/hfsplus/xattr.c22
-rw-r--r--fs/hugetlbfs/inode.c16
-rw-r--r--fs/internal.h3
-rw-r--r--fs/ioctl.c7
-rw-r--r--fs/iomap/Makefile2
-rw-r--r--fs/iomap/buffered-io.c119
-rw-r--r--fs/isofs/Makefile7
-rw-r--r--fs/isofs/compress.c4
-rw-r--r--fs/isofs/inode.c473
-rw-r--r--fs/jbd2/checkpoint.c24
-rw-r--r--fs/jbd2/commit.c3
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--fs/jffs2/xattr.c3
-rw-r--r--fs/jfs/jfs_logmgr.c4
-rw-r--r--fs/kernfs/file.c9
-rw-r--r--fs/libfs.c55
-rw-r--r--fs/lockd/host.c1
-rw-r--r--fs/minix/inode.c48
-rw-r--r--fs/namei.c32
-rw-r--r--fs/netfs/Makefile3
-rw-r--r--fs/netfs/buffered_read.c40
-rw-r--r--fs/netfs/buffered_write.c850
-rw-r--r--fs/netfs/direct_read.c3
-rw-r--r--fs/netfs/direct_write.c56
-rw-r--r--fs/netfs/fscache_io.c14
-rw-r--r--fs/netfs/internal.h55
-rw-r--r--fs/netfs/io.c162
-rw-r--r--fs/netfs/main.c55
-rw-r--r--fs/netfs/misc.c10
-rw-r--r--fs/netfs/objects.c81
-rw-r--r--fs/netfs/output.c478
-rw-r--r--fs/netfs/stats.c17
-rw-r--r--fs/netfs/write_collect.c808
-rw-r--r--fs/netfs/write_issue.c684
-rw-r--r--fs/nfs/file.c8
-rw-r--r--fs/nfs/fscache.h6
-rw-r--r--fs/nfs/inode.c7
-rw-r--r--fs/nfs/iostat.h5
-rw-r--r--fs/nfs/write.c4
-rw-r--r--fs/nfsd/export.c16
-rw-r--r--fs/nfsd/filecache.c4
-rw-r--r--fs/nfsd/netlink.c66
-rw-r--r--fs/nfsd/netlink.h10
-rw-r--r--fs/nfsd/netns.h1
-rw-r--r--fs/nfsd/nfs4callback.c53
-rw-r--r--fs/nfsd/nfs4proc.c79
-rw-r--r--fs/nfsd/nfs4state.c231
-rw-r--r--fs/nfsd/nfs4xdr.c132
-rw-r--r--fs/nfsd/nfsctl.c526
-rw-r--r--fs/nfsd/nfsd.h3
-rw-r--r--fs/nfsd/nfsfh.c4
-rw-r--r--fs/nfsd/nfssvc.c11
-rw-r--r--fs/nfsd/state.h8
-rw-r--r--fs/nfsd/stats.c42
-rw-r--r--fs/nfsd/stats.h5
-rw-r--r--fs/nfsd/trace.h100
-rw-r--r--fs/nfsd/vfs.c5
-rw-r--r--fs/nfsd/vfs.h8
-rw-r--r--fs/nfsd/xdr4.h24
-rw-r--r--fs/nilfs2/btree.c23
-rw-r--r--fs/nilfs2/dir.c3
-rw-r--r--fs/nilfs2/gcinode.c1
-rw-r--r--fs/nilfs2/ioctl.c2
-rw-r--r--fs/nilfs2/nilfs.h4
-rw-r--r--fs/nilfs2/recovery.c5
-rw-r--r--fs/nilfs2/segment.c10
-rw-r--r--fs/nilfs2/super.c388
-rw-r--r--fs/nilfs2/the_nilfs.c25
-rw-r--r--fs/nilfs2/the_nilfs.h6
-rw-r--r--fs/notify/dnotify/dnotify.c4
-rw-r--r--fs/notify/fanotify/fanotify_user.c141
-rw-r--r--fs/notify/fdinfo.c20
-rw-r--r--fs/notify/fsnotify.c27
-rw-r--r--fs/notify/fsnotify.h39
-rw-r--r--fs/notify/inotify/inotify_user.c2
-rw-r--r--fs/notify/mark.c174
-rw-r--r--fs/ntfs3/Kconfig9
-rw-r--r--fs/ntfs3/bitmap.c4
-rw-r--r--fs/ntfs3/dir.c7
-rw-r--r--fs/ntfs3/file.c8
-rw-r--r--fs/ntfs3/fsntfs.c2
-rw-r--r--fs/ntfs3/index.c11
-rw-r--r--fs/ntfs3/inode.c20
-rw-r--r--fs/ntfs3/ntfs_fs.h8
-rw-r--r--fs/ntfs3/super.c67
-rw-r--r--fs/ocfs2/aops.c2
-rw-r--r--fs/ocfs2/cluster/tcp.c5
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c12
-rw-r--r--fs/ocfs2/export.c12
-rw-r--r--fs/ocfs2/file.c2
-rw-r--r--fs/ocfs2/inode.c2
-rw-r--r--fs/ocfs2/ioctl.c1
-rw-r--r--fs/ocfs2/localalloc.c34
-rw-r--r--fs/ocfs2/move_extents.c2
-rw-r--r--fs/ocfs2/namei.c4
-rw-r--r--fs/ocfs2/ocfs2_fs.h3
-rw-r--r--fs/ocfs2/refcounttree.c2
-rw-r--r--fs/ocfs2/reservations.c2
-rw-r--r--fs/ocfs2/resize.c8
-rw-r--r--fs/ocfs2/suballoc.c117
-rw-r--r--fs/ocfs2/suballoc.h6
-rw-r--r--fs/open.c11
-rw-r--r--fs/openpromfs/inode.c8
-rw-r--r--fs/orangefs/dcache.c4
-rw-r--r--fs/orangefs/namei.c26
-rw-r--r--fs/orangefs/super.c20
-rw-r--r--fs/overlayfs/copy_up.c2
-rw-r--r--fs/overlayfs/dir.c152
-rw-r--r--fs/overlayfs/file.c3
-rw-r--r--fs/overlayfs/inode.c1
-rw-r--r--fs/overlayfs/overlayfs.h3
-rw-r--r--fs/overlayfs/params.c4
-rw-r--r--fs/overlayfs/super.c2
-rw-r--r--fs/overlayfs/util.c2
-rw-r--r--fs/pidfs.c28
-rw-r--r--fs/proc/Makefile2
-rw-r--r--fs/proc/bootconfig.c12
-rw-r--r--fs/proc/fd.c46
-rw-r--r--fs/proc/inode.c10
-rw-r--r--fs/proc/meminfo.c3
-rw-r--r--fs/proc/page.c76
-rw-r--r--fs/proc/proc_net.c1
-rw-r--r--fs/proc/proc_sysctl.c21
-rw-r--r--fs/proc/task_mmu.c117
-rw-r--r--fs/proc/vmcore.c7
-rw-r--r--fs/qnx6/inode.c117
-rw-r--r--fs/quota/dquot.c33
-rw-r--r--fs/ramfs/file-mmu.c2
-rw-r--r--fs/read_write.c15
-rw-r--r--fs/reiserfs/README16
-rw-r--r--fs/reiserfs/inode.c16
-rw-r--r--fs/reiserfs/item_ops.c13
-rw-r--r--fs/reiserfs/journal.c7
-rw-r--r--fs/remap_range.c4
-rw-r--r--fs/romfs/super.c2
-rw-r--r--fs/seq_file.c13
-rw-r--r--fs/signalfd.c44
-rw-r--r--fs/smb/client/Kconfig1
-rw-r--r--fs/smb/client/cached_dir.c6
-rw-r--r--fs/smb/client/cifs_debug.c6
-rw-r--r--fs/smb/client/cifsfs.c138
-rw-r--r--fs/smb/client/cifsfs.h11
-rw-r--r--fs/smb/client/cifsglob.h89
-rw-r--r--fs/smb/client/cifspdu.h4
-rw-r--r--fs/smb/client/cifsproto.h41
-rw-r--r--fs/smb/client/cifssmb.c126
-rw-r--r--fs/smb/client/connect.c180
-rw-r--r--fs/smb/client/dfs.c51
-rw-r--r--fs/smb/client/dfs.h33
-rw-r--r--fs/smb/client/dfs_cache.c53
-rw-r--r--fs/smb/client/dir.c22
-rw-r--r--fs/smb/client/file.c2855
-rw-r--r--fs/smb/client/fs_context.c39
-rw-r--r--fs/smb/client/fs_context.h16
-rw-r--r--fs/smb/client/fscache.c145
-rw-r--r--fs/smb/client/fscache.h54
-rw-r--r--fs/smb/client/inode.c51
-rw-r--r--fs/smb/client/ioctl.c6
-rw-r--r--fs/smb/client/misc.c20
-rw-r--r--fs/smb/client/smb1ops.c4
-rw-r--r--fs/smb/client/smb2misc.c14
-rw-r--r--fs/smb/client/smb2ops.c124
-rw-r--r--fs/smb/client/smb2pdu.c207
-rw-r--r--fs/smb/client/smb2pdu.h14
-rw-r--r--fs/smb/client/smb2proto.h5
-rw-r--r--fs/smb/client/smb2transport.c4
-rw-r--r--fs/smb/client/trace.h240
-rw-r--r--fs/smb/client/transport.c24
-rw-r--r--fs/smb/common/smb2pdu.h35
-rw-r--r--fs/smb/server/ksmbd_netlink.h38
-rw-r--r--fs/smb/server/mgmt/share_config.c7
-rw-r--r--fs/smb/server/oplock.c65
-rw-r--r--fs/smb/server/server.c13
-rw-r--r--fs/smb/server/smb2ops.c10
-rw-r--r--fs/smb/server/smb2pdu.c24
-rw-r--r--fs/smb/server/smb2pdu.h18
-rw-r--r--fs/smb/server/smb_common.c4
-rw-r--r--fs/smb/server/transport_ipc.c37
-rw-r--r--fs/smb/server/transport_tcp.c4
-rw-r--r--fs/smb/server/vfs.c5
-rw-r--r--fs/smb/server/vfs_cache.c28
-rw-r--r--fs/smb/server/vfs_cache.h2
-rw-r--r--fs/splice.c4
-rw-r--r--fs/squashfs/file.c6
-rw-r--r--fs/squashfs/file_direct.c3
-rw-r--r--fs/squashfs/inode.c5
-rw-r--r--fs/squashfs/namei.c14
-rw-r--r--fs/squashfs/symlink.c35
-rw-r--r--fs/stat.c1
-rw-r--r--fs/super.c25
-rw-r--r--fs/sysfs/file.c2
-rw-r--r--fs/timerfd.c36
-rw-r--r--fs/tracefs/event_inode.c169
-rw-r--r--fs/tracefs/inode.c288
-rw-r--r--fs/tracefs/internal.h14
-rw-r--r--fs/udf/file.c20
-rw-r--r--fs/udf/inode.c65
-rw-r--r--fs/udf/super.c8
-rw-r--r--fs/udf/symlink.c34
-rw-r--r--fs/udf/udftime.c11
-rw-r--r--fs/unicode/Makefile14
-rw-r--r--fs/userfaultfd.c53
-rw-r--r--fs/vboxsf/file.c1
-rw-r--r--fs/vboxsf/super.c9
-rw-r--r--fs/vboxsf/utils.c3
-rw-r--r--fs/verity/init.c7
-rw-r--r--fs/xfs/Makefile22
-rw-r--r--fs/xfs/libxfs/xfs_ag.c12
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.c24
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.h2
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c4
-rw-r--r--fs/xfs/libxfs/xfs_attr.c233
-rw-r--r--fs/xfs/libxfs/xfs_attr.h46
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c154
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.h4
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c102
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.h8
-rw-r--r--fs/xfs/libxfs/xfs_attr_sf.h1
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c377
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h13
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c189
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.h34
-rw-r--r--fs/xfs/libxfs/xfs_da_format.h37
-rw-r--r--fs/xfs/libxfs/xfs_defer.c12
-rw-r--r--fs/xfs/libxfs/xfs_defer.h10
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c281
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h23
-rw-r--r--fs/xfs/libxfs/xfs_dir2_block.c42
-rw-r--r--fs/xfs/libxfs/xfs_dir2_data.c18
-rw-r--r--fs/xfs/libxfs/xfs_dir2_leaf.c100
-rw-r--r--fs/xfs/libxfs/xfs_dir2_node.c44
-rw-r--r--fs/xfs/libxfs/xfs_dir2_priv.h15
-rw-r--r--fs/xfs/libxfs/xfs_errortag.h4
-rw-r--r--fs/xfs/libxfs/xfs_exchmaps.c1235
-rw-r--r--fs/xfs/libxfs/xfs_exchmaps.h124
-rw-r--r--fs/xfs/libxfs/xfs_format.h34
-rw-r--r--fs/xfs/libxfs/xfs_fs.h158
-rw-r--r--fs/xfs/libxfs/xfs_health.h4
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c56
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.h5
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c4
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c8
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c57
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h6
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h89
-rw-r--r--fs/xfs/libxfs/xfs_log_recover.h4
-rw-r--r--fs/xfs/libxfs/xfs_log_rlimit.c46
-rw-r--r--fs/xfs/libxfs/xfs_ondisk.h6
-rw-r--r--fs/xfs/libxfs/xfs_parent.c379
-rw-r--r--fs/xfs/libxfs/xfs_parent.h110
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c57
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.h17
-rw-r--r--fs/xfs/libxfs/xfs_sb.c49
-rw-r--r--fs/xfs/libxfs/xfs_sb.h5
-rw-r--r--fs/xfs/libxfs/xfs_shared.h6
-rw-r--r--fs/xfs/libxfs/xfs_symlink_remote.c54
-rw-r--r--fs/xfs/libxfs/xfs_symlink_remote.h8
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c326
-rw-r--r--fs/xfs/libxfs/xfs_trans_space.c121
-rw-r--r--fs/xfs/libxfs/xfs_trans_space.h29
-rw-r--r--fs/xfs/scrub/agheader.c43
-rw-r--r--fs/xfs/scrub/agheader_repair.c879
-rw-r--r--fs/xfs/scrub/agino_bitmap.h49
-rw-r--r--fs/xfs/scrub/alloc_repair.c2
-rw-r--r--fs/xfs/scrub/attr.c214
-rw-r--r--fs/xfs/scrub/attr.h7
-rw-r--r--fs/xfs/scrub/attr_repair.c1663
-rw-r--r--fs/xfs/scrub/attr_repair.h15
-rw-r--r--fs/xfs/scrub/bitmap.c22
-rw-r--r--fs/xfs/scrub/common.c45
-rw-r--r--fs/xfs/scrub/common.h27
-rw-r--r--fs/xfs/scrub/dab_bitmap.h37
-rw-r--r--fs/xfs/scrub/dabtree.c24
-rw-r--r--fs/xfs/scrub/dabtree.h3
-rw-r--r--fs/xfs/scrub/dir.c377
-rw-r--r--fs/xfs/scrub/dir_repair.c1958
-rw-r--r--fs/xfs/scrub/dirtree.c985
-rw-r--r--fs/xfs/scrub/dirtree.h178
-rw-r--r--fs/xfs/scrub/dirtree_repair.c821
-rw-r--r--fs/xfs/scrub/findparent.c454
-rw-r--r--fs/xfs/scrub/findparent.h56
-rw-r--r--fs/xfs/scrub/fscounters.c14
-rw-r--r--fs/xfs/scrub/fscounters.h1
-rw-r--r--fs/xfs/scrub/fscounters_repair.c12
-rw-r--r--fs/xfs/scrub/health.c1
-rw-r--r--fs/xfs/scrub/ino_bitmap.h37
-rw-r--r--fs/xfs/scrub/inode.c19
-rw-r--r--fs/xfs/scrub/inode_repair.c153
-rw-r--r--fs/xfs/scrub/iscan.c67
-rw-r--r--fs/xfs/scrub/iscan.h16
-rw-r--r--fs/xfs/scrub/listxattr.c320
-rw-r--r--fs/xfs/scrub/listxattr.h19
-rw-r--r--fs/xfs/scrub/nlinks.c133
-rw-r--r--fs/xfs/scrub/nlinks.h7
-rw-r--r--fs/xfs/scrub/nlinks_repair.c186
-rw-r--r--fs/xfs/scrub/orphanage.c627
-rw-r--r--fs/xfs/scrub/orphanage.h86
-rw-r--r--fs/xfs/scrub/parent.c700
-rw-r--r--fs/xfs/scrub/parent_repair.c1612
-rw-r--r--fs/xfs/scrub/quota_repair.c6
-rw-r--r--fs/xfs/scrub/readdir.c140
-rw-r--r--fs/xfs/scrub/readdir.h3
-rw-r--r--fs/xfs/scrub/reap.c445
-rw-r--r--fs/xfs/scrub/reap.h21
-rw-r--r--fs/xfs/scrub/repair.c127
-rw-r--r--fs/xfs/scrub/repair.h31
-rw-r--r--fs/xfs/scrub/rmap_repair.c24
-rw-r--r--fs/xfs/scrub/rtbitmap_repair.c2
-rw-r--r--fs/xfs/scrub/rtsummary.c33
-rw-r--r--fs/xfs/scrub/rtsummary.h37
-rw-r--r--fs/xfs/scrub/rtsummary_repair.c175
-rw-r--r--fs/xfs/scrub/scrub.c310
-rw-r--r--fs/xfs/scrub/scrub.h91
-rw-r--r--fs/xfs/scrub/stats.c1
-rw-r--r--fs/xfs/scrub/symlink.c13
-rw-r--r--fs/xfs/scrub/symlink_repair.c509
-rw-r--r--fs/xfs/scrub/tempexch.h22
-rw-r--r--fs/xfs/scrub/tempfile.c851
-rw-r--r--fs/xfs/scrub/tempfile.h48
-rw-r--r--fs/xfs/scrub/trace.c6
-rw-r--r--fs/xfs/scrub/trace.h1307
-rw-r--r--fs/xfs/scrub/xfarray.c27
-rw-r--r--fs/xfs/scrub/xfarray.h6
-rw-r--r--fs/xfs/scrub/xfblob.c168
-rw-r--r--fs/xfs/scrub/xfblob.h50
-rw-r--r--fs/xfs/scrub/xfile.c14
-rw-r--r--fs/xfs/scrub/xfile.h6
-rw-r--r--fs/xfs/scrub/xfs_scrub.h6
-rw-r--r--fs/xfs/xfs_acl.c17
-rw-r--r--fs/xfs/xfs_aops.c67
-rw-r--r--fs/xfs/xfs_attr_item.c554
-rw-r--r--fs/xfs/xfs_attr_item.h10
-rw-r--r--fs/xfs/xfs_attr_list.c120
-rw-r--r--fs/xfs/xfs_bmap_item.c4
-rw-r--r--fs/xfs/xfs_bmap_util.c67
-rw-r--r--fs/xfs/xfs_bmap_util.h2
-rw-r--r--fs/xfs/xfs_buf.c7
-rw-r--r--fs/xfs/xfs_dir2_readdir.c25
-rw-r--r--fs/xfs/xfs_discard.c153
-rw-r--r--fs/xfs/xfs_dquot.c47
-rw-r--r--fs/xfs/xfs_dquot.h1
-rw-r--r--fs/xfs/xfs_error.c3
-rw-r--r--fs/xfs/xfs_exchmaps_item.c614
-rw-r--r--fs/xfs/xfs_exchmaps_item.h64
-rw-r--r--fs/xfs/xfs_exchrange.c804
-rw-r--r--fs/xfs/xfs_exchrange.h38
-rw-r--r--fs/xfs/xfs_export.c4
-rw-r--r--fs/xfs/xfs_export.h2
-rw-r--r--fs/xfs/xfs_extent_busy.c80
-rw-r--r--fs/xfs/xfs_file.c100
-rw-r--r--fs/xfs/xfs_file.h15
-rw-r--r--fs/xfs/xfs_fsmap.c4
-rw-r--r--fs/xfs/xfs_fsops.c29
-rw-r--r--fs/xfs/xfs_fsops.h2
-rw-r--r--fs/xfs/xfs_handle.c952
-rw-r--r--fs/xfs/xfs_handle.h33
-rw-r--r--fs/xfs/xfs_health.c1
-rw-r--r--fs/xfs/xfs_icache.c12
-rw-r--r--fs/xfs/xfs_inode.c511
-rw-r--r--fs/xfs/xfs_inode.h41
-rw-r--r--fs/xfs/xfs_ioctl.c625
-rw-r--r--fs/xfs/xfs_ioctl.h28
-rw-r--r--fs/xfs/xfs_ioctl32.c1
-rw-r--r--fs/xfs/xfs_iomap.c105
-rw-r--r--fs/xfs/xfs_iops.c23
-rw-r--r--fs/xfs/xfs_iops.h7
-rw-r--r--fs/xfs/xfs_itable.c8
-rw-r--r--fs/xfs/xfs_iwalk.c4
-rw-r--r--fs/xfs/xfs_linux.h5
-rw-r--r--fs/xfs/xfs_log.c28
-rw-r--r--fs/xfs/xfs_log.h2
-rw-r--r--fs/xfs/xfs_log_cil.c2
-rw-r--r--fs/xfs/xfs_log_priv.h8
-rw-r--r--fs/xfs/xfs_log_recover.c85
-rw-r--r--fs/xfs/xfs_mount.c109
-rw-r--r--fs/xfs/xfs_mount.h88
-rw-r--r--fs/xfs/xfs_qm.c4
-rw-r--r--fs/xfs/xfs_qm.h2
-rw-r--r--fs/xfs/xfs_quota.h23
-rw-r--r--fs/xfs/xfs_reflink.c48
-rw-r--r--fs/xfs/xfs_rtalloc.c29
-rw-r--r--fs/xfs/xfs_super.c82
-rw-r--r--fs/xfs/xfs_symlink.c91
-rw-r--r--fs/xfs/xfs_trace.c3
-rw-r--r--fs/xfs/xfs_trace.h440
-rw-r--r--fs/xfs/xfs_trans.c72
-rw-r--r--fs/xfs/xfs_trans.h9
-rw-r--r--fs/xfs/xfs_trans_dquot.c15
-rw-r--r--fs/xfs/xfs_xattr.c92
-rw-r--r--fs/xfs/xfs_xattr.h3
-rw-r--r--fs/zonefs/super.c2
-rw-r--r--include/acpi/acpi_bus.h49
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/actbl1.h8
-rw-r--r--include/acpi/actbl2.h516
-rw-r--r--include/acpi/actbl3.h18
-rw-r--r--include/acpi/nhlt.h181
-rw-r--r--include/acpi/platform/aclinuxex.h19
-rw-r--r--include/asm-generic/Kbuild2
-rw-r--r--include/asm-generic/barrier.h8
-rw-r--r--include/asm-generic/bitops/__ffs.h4
-rw-r--r--include/asm-generic/bitops/__fls.h4
-rw-r--r--include/asm-generic/bitops/builtin-__ffs.h2
-rw-r--r--include/asm-generic/bitops/builtin-__fls.h2
-rw-r--r--include/asm-generic/bug.h7
-rw-r--r--include/asm-generic/codetag.lds.h14
-rw-r--r--include/asm-generic/export.h11
-rw-r--r--include/asm-generic/hyperv-tlfs.h19
-rw-r--r--include/asm-generic/io.h2
-rw-r--r--include/asm-generic/mshyperv.h14
-rw-r--r--include/asm-generic/page.h103
-rw-r--r--include/asm-generic/pgalloc.h35
-rw-r--r--include/asm-generic/sections.h5
-rw-r--r--include/asm-generic/video.h (renamed from include/asm-generic/fb.h)17
-rw-r--r--include/asm-generic/vmlinux.lds.h30
-rw-r--r--include/asm-generic/vtime.h1
-rw-r--r--include/crypto/acompress.h73
-rw-r--r--include/crypto/aead.h21
-rw-r--r--include/crypto/aes.h5
-rw-r--r--include/crypto/akcipher.h78
-rw-r--r--include/crypto/algapi.h3
-rw-r--r--include/crypto/ecc_curve.h2
-rw-r--r--include/crypto/ecdh.h1
-rw-r--r--include/crypto/hash.h29
-rw-r--r--include/crypto/if_alg.h3
-rw-r--r--include/crypto/internal/acompress.h6
-rw-r--r--include/crypto/internal/cryptouser.h16
-rw-r--r--include/crypto/internal/ecc.h16
-rw-r--r--include/crypto/internal/scompress.h1
-rw-r--r--include/crypto/kpp.h58
-rw-r--r--include/crypto/rng.h51
-rw-r--r--include/crypto/skcipher.h32
-rw-r--r--include/drm/amd_asic_type.h3
-rw-r--r--include/drm/bridge/samsung-dsim.h4
-rw-r--r--include/drm/display/drm_dp.h11
-rw-r--r--include/drm/display/drm_dp_helper.h51
-rw-r--r--include/drm/display/drm_dp_mst_helper.h31
-rw-r--r--include/drm/display/drm_dsc.h3
-rw-r--r--include/drm/drm_buddy.h16
-rw-r--r--include/drm/drm_client.h10
-rw-r--r--include/drm/drm_debugfs_crc.h8
-rw-r--r--include/drm/drm_edid.h45
-rw-r--r--include/drm/drm_encoder_slave.h91
-rw-r--r--include/drm/drm_fb_dma_helper.h5
-rw-r--r--include/drm/drm_format_helper.h1
-rw-r--r--include/drm/drm_gem.h3
-rw-r--r--include/drm/drm_gem_shmem_helper.h7
-rw-r--r--include/drm/drm_gem_vram_helper.h1
-rw-r--r--include/drm/drm_kunit_helpers.h2
-rw-r--r--include/drm/drm_lease.h2
-rw-r--r--include/drm/drm_mipi_dsi.h15
-rw-r--r--include/drm/drm_mode_config.h15
-rw-r--r--include/drm/drm_modeset_helper_vtables.h39
-rw-r--r--include/drm/drm_of.h1
-rw-r--r--include/drm/drm_panic.h152
-rw-r--r--include/drm/drm_plane.h10
-rw-r--r--include/drm/drm_print.h4
-rw-r--r--include/drm/drm_probe_helper.h6
-rw-r--r--include/drm/drm_suballoc.h2
-rw-r--r--include/drm/drm_vblank.h1
-rw-r--r--include/drm/gma_drm.h13
-rw-r--r--include/drm/i2c/ch7006.h1
-rw-r--r--include/drm/i2c/sil164.h1
-rw-r--r--include/drm/i915_component.h2
-rw-r--r--include/drm/i915_gsc_proxy_mei_interface.h4
-rw-r--r--include/drm/i915_hdcp_interface.h18
-rw-r--r--include/drm/i915_pciids.h4
-rw-r--r--include/drm/i915_pxp_tee_interface.h27
-rw-r--r--include/drm/ttm/ttm_bo.h17
-rw-r--r--include/drm/ttm/ttm_caching.h2
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h7
-rw-r--r--include/drm/ttm/ttm_kmap_iter.h4
-rw-r--r--include/drm/ttm/ttm_pool.h5
-rw-r--r--include/drm/ttm/ttm_resource.h6
-rw-r--r--include/drm/xe_pciids.h7
-rw-r--r--include/dt-bindings/arm/mhuv3-dt.h13
-rw-r--r--include/dt-bindings/arm/qcom,ids.h1
-rw-r--r--include/dt-bindings/clock/google,gs101.h116
-rw-r--r--include/dt-bindings/clock/loongson,ls2k-clk.h54
-rw-r--r--include/dt-bindings/clock/nxp,imx95-clock.h28
-rw-r--r--include/dt-bindings/clock/r8a73a4-clock.h4
-rw-r--r--include/dt-bindings/clock/r9a07g043-cpg.h58
-rw-r--r--include/dt-bindings/clock/r9a07g044-cpg.h58
-rw-r--r--include/dt-bindings/clock/r9a07g054-cpg.h58
-rw-r--r--include/dt-bindings/clock/r9a08g045-cpg.h70
-rw-r--r--include/dt-bindings/clock/rk3568-cru.h1
-rw-r--r--include/dt-bindings/leds/common.h1
-rw-r--r--include/dt-bindings/phy/phy-qcom-qmp.h4
-rw-r--r--include/dt-bindings/pinctrl/samsung.h95
-rw-r--r--include/dt-bindings/reset/rockchip,rk3588-cru.h2
-rw-r--r--include/dt-bindings/reset/st,stm32mp25-rcc.h2
-rw-r--r--include/dt-bindings/thermal/mediatek,lvts-thermal.h26
-rw-r--r--include/keys/trusted_dcp.h11
-rw-r--r--include/keys/trusted_tpm.h2
-rw-r--r--include/kunit/test.h24
-rw-r--r--include/kunit/try-catch.h3
-rw-r--r--include/kvm/arm_pmu.h2
-rw-r--r--include/kvm/arm_vgic.h16
-rw-r--r--include/linux/acpi.h21
-rw-r--r--include/linux/acpi_iort.h4
-rw-r--r--include/linux/alloc_tag.h216
-rw-r--r--include/linux/amba/bus.h11
-rw-r--r--include/linux/amd-pstate.h20
-rw-r--r--include/linux/anon_inodes.h5
-rw-r--r--include/linux/arch_topology.h8
-rw-r--r--include/linux/arm_ffa.h27
-rw-r--r--include/linux/backing-file.h3
-rw-r--r--include/linux/bio.h11
-rw-r--r--include/linux/bitmap.h95
-rw-r--r--include/linux/bitops.h48
-rw-r--r--include/linux/blk-mq.h85
-rw-r--r--include/linux/blk_types.h55
-rw-r--r--include/linux/blkdev.h196
-rw-r--r--include/linux/bootconfig.h8
-rw-r--r--include/linux/bpf.h80
-rw-r--r--include/linux/bpf_crypto.h24
-rw-r--r--include/linux/bpf_verifier.h11
-rw-r--r--include/linux/bpfptr.h5
-rw-r--r--include/linux/bsg-lib.h3
-rw-r--r--include/linux/btf_ids.h2
-rw-r--r--include/linux/buffer_head.h52
-rw-r--r--include/linux/bus/stm32_firewall_device.h142
-rw-r--r--include/linux/cc_platform.h12
-rw-r--r--include/linux/ceph/ceph_debug.h18
-rw-r--r--include/linux/cgroup.h2
-rw-r--r--include/linux/clk.h5
-rw-r--r--include/linux/closure.h12
-rw-r--r--include/linux/cmpxchg-emu.h15
-rw-r--r--include/linux/codetag.h81
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/compiler_attributes.h13
-rw-r--r--include/linux/compiler_types.h29
-rw-r--r--include/linux/coredump.h2
-rw-r--r--include/linux/cpu.h11
-rw-r--r--include/linux/cpufreq.h10
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/cpumask.h73
-rw-r--r--include/linux/cpuset.h3
-rw-r--r--include/linux/crash_core.h15
-rw-r--r--include/linux/cxl-event.h39
-rw-r--r--include/linux/damon.h2
-rw-r--r--include/linux/dev_printk.h25
-rw-r--r--include/linux/devcoredump.h5
-rw-r--r--include/linux/device.h5
-rw-r--r--include/linux/devm-helpers.h4
-rw-r--r--include/linux/dma-buf.h2
-rw-r--r--include/linux/dma-direct.h18
-rw-r--r--include/linux/dma-fence-chain.h6
-rw-r--r--include/linux/dma-map-ops.h20
-rw-r--r--include/linux/dma-mapping.h105
-rw-r--r--include/linux/dma/imx-dma.h1
-rw-r--r--include/linux/dmar.h2
-rw-r--r--include/linux/dynamic_debug.h4
-rw-r--r--include/linux/dynamic_queue_limits.h50
-rw-r--r--include/linux/efi.h9
-rw-r--r--include/linux/elf.h2
-rw-r--r--include/linux/energy_model.h6
-rw-r--r--include/linux/etherdevice.h37
-rw-r--r--include/linux/ethtool.h27
-rw-r--r--include/linux/evm.h8
-rw-r--r--include/linux/execmem.h132
-rw-r--r--include/linux/f2fs_fs.h10
-rw-r--r--include/linux/fb.h6
-rw-r--r--include/linux/fdtable.h15
-rw-r--r--include/linux/file.h1
-rw-r--r--include/linux/filter.h52
-rw-r--r--include/linux/find.h29
-rw-r--r--include/linux/firewire.h3
-rw-r--r--include/linux/firmware/cirrus/cs_dsp.h4
-rw-r--r--include/linux/firmware/qcom/qcom_qseecom.h55
-rw-r--r--include/linux/firmware/qcom/qcom_scm.h10
-rw-r--r--include/linux/fortify-string.h14
-rw-r--r--include/linux/fprobe.h18
-rw-r--r--include/linux/framer/framer.h4
-rw-r--r--include/linux/fs.h131
-rw-r--r--include/linux/fs_parser.h4
-rw-r--r--include/linux/fscache.h22
-rw-r--r--include/linux/fsnotify.h21
-rw-r--r--include/linux/fsnotify_backend.h97
-rw-r--r--include/linux/ftrace.h3
-rw-r--r--include/linux/genetlink.h19
-rw-r--r--include/linux/genl_magic_struct.h2
-rw-r--r--include/linux/gfp.h126
-rw-r--r--include/linux/gfp_types.h13
-rw-r--r--include/linux/gpio.h21
-rw-r--r--include/linux/gpio/driver.h21
-rw-r--r--include/linux/gpio/property.h5
-rw-r--r--include/linux/hid.h6
-rw-r--r--include/linux/hid_bpf.h9
-rw-r--r--include/linux/huge_mm.h129
-rw-r--r--include/linux/hugetlb.h103
-rw-r--r--include/linux/hyperv.h1
-rw-r--r--include/linux/i2c-mux.h3
-rw-r--r--include/linux/ieee80211.h30
-rw-r--r--include/linux/instrumented.h35
-rw-r--r--include/linux/integrity.h34
-rw-r--r--include/linux/intel_rapl.h32
-rw-r--r--include/linux/intel_tpmi.h12
-rw-r--r--include/linux/interrupt.h3
-rw-r--r--include/linux/io.h9
-rw-r--r--include/linux/io_uring.h6
-rw-r--r--include/linux/io_uring/cmd.h24
-rw-r--r--include/linux/io_uring/net.h18
-rw-r--r--include/linux/io_uring_types.h22
-rw-r--r--include/linux/iommu.h16
-rw-r--r--include/linux/iova.h5
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/irqchip/riscv-aplic.h145
-rw-r--r--include/linux/irqchip/riscv-imsic.h87
-rw-r--r--include/linux/irqdesc.h16
-rw-r--r--include/linux/irqdomain_defs.h1
-rw-r--r--include/linux/irqflags.h2
-rw-r--r--include/linux/jbd2.h20
-rw-r--r--include/linux/jump_label.h3
-rw-r--r--include/linux/kernel_stat.h8
-rw-r--r--include/linux/kexec.h17
-rw-r--r--include/linux/kfifo.h9
-rw-r--r--include/linux/kmsan-checks.h15
-rw-r--r--include/linux/kprobes.h7
-rw-r--r--include/linux/ksm.h35
-rw-r--r--include/linux/kvm_host.h4
-rw-r--r--include/linux/kvm_types.h1
-rw-r--r--include/linux/libata.h17
-rw-r--r--include/linux/linkmode.h27
-rw-r--r--include/linux/livepatch.h6
-rw-r--r--include/linux/lockdep.h5
-rw-r--r--include/linux/lsm_hook_defs.h3
-rw-r--r--include/linux/marvell_phy.h3
-rw-r--r--include/linux/math64.h8
-rw-r--r--include/linux/memcontrol.h151
-rw-r--r--include/linux/memory-tiers.h13
-rw-r--r--include/linux/mempolicy.h5
-rw-r--r--include/linux/mempool.h73
-rw-r--r--include/linux/mfd/axp20x.h98
-rw-r--r--include/linux/mfd/intel-m10-bmc.h1
-rw-r--r--include/linux/mfd/rk808.h144
-rw-r--r--include/linux/mfd/rohm-bd71828.h5
-rw-r--r--include/linux/mfd/tps6594.h351
-rw-r--r--include/linux/mhi.h18
-rw-r--r--include/linux/mlx5/cq.h7
-rw-r--r--include/linux/mlx5/device.h8
-rw-r--r--include/linux/mlx5/driver.h10
-rw-r--r--include/linux/mlx5/mlx5_ifc.h63
-rw-r--r--include/linux/mm.h235
-rw-r--r--include/linux/mm_types.h35
-rw-r--r--include/linux/mman.h8
-rw-r--r--include/linux/mmap_lock.h10
-rw-r--r--include/linux/mmc/host.h4
-rw-r--r--include/linux/mmc/sdio_func.h5
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/mmc/slot-gpio.h6
-rw-r--r--include/linux/mmu_notifier.h44
-rw-r--r--include/linux/mmzone.h5
-rw-r--r--include/linux/module.h9
-rw-r--r--include/linux/moduleloader.h15
-rw-r--r--include/linux/msi.h8
-rw-r--r--include/linux/msi_api.h1
-rw-r--r--include/linux/namei.h1
-rw-r--r--include/linux/net.h4
-rw-r--r--include/linux/net/intel/libie/rx.h50
-rw-r--r--include/linux/netdevice.h55
-rw-r--r--include/linux/netfs.h198
-rw-r--r--include/linux/nfs4.h6
-rw-r--r--include/linux/nmi.h2
-rw-r--r--include/linux/numa.h7
-rw-r--r--include/linux/objpool.h105
-rw-r--r--include/linux/of_reserved_mem.h1
-rw-r--r--include/linux/oid_registry.h5
-rw-r--r--include/linux/omap-mailbox.h13
-rw-r--r--include/linux/page-flags.h204
-rw-r--r--include/linux/page-isolation.h5
-rw-r--r--include/linux/page_ext.h5
-rw-r--r--include/linux/page_idle.h68
-rw-r--r--include/linux/page_ref.h11
-rw-r--r--include/linux/pageblock-flags.h8
-rw-r--r--include/linux/pagemap.h24
-rw-r--r--include/linux/pagevec.h4
-rw-r--r--include/linux/papr_scm.h49
-rw-r--r--include/linux/part_stat.h2
-rw-r--r--include/linux/pci-epc.h7
-rw-r--r--include/linux/pci.h74
-rw-r--r--include/linux/pci_ids.h6
-rw-r--r--include/linux/pds/pds_common.h2
-rw-r--r--include/linux/peci.h1
-rw-r--r--include/linux/percpu.h30
-rw-r--r--include/linux/perf/riscv_pmu.h8
-rw-r--r--include/linux/perf_event.h37
-rw-r--r--include/linux/pgalloc_tag.h132
-rw-r--r--include/linux/pgtable.h119
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/phy/phy-dp.h3
-rw-r--r--include/linux/phylink.h42
-rw-r--r--include/linux/platform_data/davinci_asp.h15
-rw-r--r--include/linux/platform_data/i2c-mux-gpio.h2
-rw-r--r--include/linux/platform_data/spi-omap2-mcspi.h3
-rw-r--r--include/linux/platform_data/ti-sysc.h1
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h6
-rw-r--r--include/linux/platform_profile.h1
-rw-r--r--include/linux/pm_domain.h6
-rw-r--r--include/linux/pm_opp.h8
-rw-r--r--include/linux/pm_wakeup.h12
-rw-r--r--include/linux/power/bq27xxx_battery.h8
-rw-r--r--include/linux/printk.h4
-rw-r--r--include/linux/profile.h5
-rw-r--r--include/linux/pse-pd/pse.h83
-rw-r--r--include/linux/ptr_ring.h28
-rw-r--r--include/linux/pwm.h36
-rw-r--r--include/linux/pxa2xx_ssp.h2
-rw-r--r--include/linux/qat/qat_mig_dev.h31
-rw-r--r--include/linux/randomize_kstack.h2
-rw-r--r--include/linux/rcupdate.h22
-rw-r--r--include/linux/rcupdate_wait.h18
-rw-r--r--include/linux/regmap.h70
-rw-r--r--include/linux/regulator/consumer.h9
-rw-r--r--include/linux/regulator/pca9450.h1
-rw-r--r--include/linux/remoteproc/mtk_scp.h1
-rw-r--r--include/linux/rhashtable-types.h11
-rw-r--r--include/linux/rhashtable.h10
-rw-r--r--include/linux/ring_buffer.h6
-rw-r--r--include/linux/rmap.h50
-rw-r--r--include/linux/rtnetlink.h3
-rw-r--r--include/linux/rwbase_rt.h4
-rw-r--r--include/linux/rwsem.h6
-rw-r--r--include/linux/sched.h29
-rw-r--r--include/linux/sched/coredump.h5
-rw-r--r--include/linux/sched/idle.h2
-rw-r--r--include/linux/sched/mm.h22
-rw-r--r--include/linux/sched/topology.h10
-rw-r--r--include/linux/scmi_protocol.h86
-rw-r--r--include/linux/secretmem.h21
-rw-r--r--include/linux/security.h4
-rw-r--r--include/linux/seq_file.h13
-rw-r--r--include/linux/sfp.h4
-rw-r--r--include/linux/shm.h5
-rw-r--r--include/linux/shmem_fs.h9
-rw-r--r--include/linux/skb_array.h19
-rw-r--r--include/linux/skbuff.h178
-rw-r--r--include/linux/skbuff_ref.h75
-rw-r--r--include/linux/skmsg.h14
-rw-r--r--include/linux/slab.h193
-rw-r--r--include/linux/soc/mediatek/mtk-cmdq.h139
-rw-r--r--include/linux/socket.h3
-rw-r--r--include/linux/sockptr.h35
-rw-r--r--include/linux/soundwire/sdw.h19
-rw-r--r--include/linux/soundwire/sdw_intel.h11
-rw-r--r--include/linux/soundwire/sdw_registers.h2
-rw-r--r--include/linux/spi/pxa2xx_spi.h56
-rw-r--r--include/linux/spi/rspi.h18
-rw-r--r--include/linux/spi/spi.h12
-rw-r--r--include/linux/spi/xilinx_spi.h14
-rw-r--r--include/linux/srcutiny.h2
-rw-r--r--include/linux/ssb/ssb.h8
-rw-r--r--include/linux/stackdepot.h7
-rw-r--r--include/linux/stat.h1
-rw-r--r--include/linux/stmmac.h18
-rw-r--r--include/linux/string.h66
-rw-r--r--include/linux/sunrpc/svc_rdma.h13
-rw-r--r--include/linux/sunrpc/svc_xprt.h5
-rw-r--r--include/linux/swap.h37
-rw-r--r--include/linux/swapops.h65
-rw-r--r--include/linux/swiotlb.h2
-rw-r--r--include/linux/sysctl.h27
-rw-r--r--include/linux/tcp.h6
-rw-r--r--include/linux/tee_core.h306
-rw-r--r--include/linux/tee_drv.h285
-rw-r--r--include/linux/thermal.h109
-rw-r--r--include/linux/threads.h4
-rw-r--r--include/linux/timecounter.h11
-rw-r--r--include/linux/timekeeping.h49
-rw-r--r--include/linux/timer.h12
-rw-r--r--include/linux/timerqueue.h5
-rw-r--r--include/linux/tpm.h316
-rw-r--r--include/linux/trace_events.h36
-rw-r--r--include/linux/trace_recursion.h2
-rw-r--r--include/linux/tracefs.h3
-rw-r--r--include/linux/tty.h14
-rw-r--r--include/linux/u64_stats_sync.h9
-rw-r--r--include/linux/udp.h32
-rw-r--r--include/linux/uio.h10
-rw-r--r--include/linux/virtio.h42
-rw-r--r--include/linux/vmalloc.h60
-rw-r--r--include/linux/vmstat.h8
-rw-r--r--include/linux/vtime.h5
-rw-r--r--include/linux/wordpart.h7
-rw-r--r--include/linux/workqueue.h54
-rw-r--r--include/linux/writeback.h1
-rw-r--r--include/linux/xarray.h16
-rw-r--r--include/linux/zpool.h4
-rw-r--r--include/linux/zswap.h2
-rw-r--r--include/media/cec.h4
-rw-r--r--include/media/ipu6-pci-table.h28
-rw-r--r--include/media/media-device.h6
-rw-r--r--include/media/v4l2-async.h4
-rw-r--r--include/media/v4l2-device.h7
-rw-r--r--include/media/v4l2-ioctl.h4
-rw-r--r--include/media/v4l2-mem2mem.h2
-rw-r--r--include/media/v4l2-subdev.h70
-rw-r--r--include/media/videobuf2-core.h52
-rw-r--r--include/media/videobuf2-v4l2.h2
-rw-r--r--include/net/9p/client.h2
-rw-r--r--include/net/addrconf.h4
-rw-r--r--include/net/af_unix.h36
-rw-r--r--include/net/ax25.h5
-rw-r--r--include/net/bluetooth/bluetooth.h11
-rw-r--r--include/net/bluetooth/hci.h145
-rw-r--r--include/net/bluetooth/hci_core.h77
-rw-r--r--include/net/bluetooth/l2cap.h33
-rw-r--r--include/net/cfg80211.h142
-rw-r--r--include/net/cipso_ipv4.h6
-rw-r--r--include/net/devlink.h21
-rw-r--r--include/net/dsa.h38
-rw-r--r--include/net/dscp.h76
-rw-r--r--include/net/dst_cache.h2
-rw-r--r--include/net/dst_metadata.h10
-rw-r--r--include/net/espintcp.h2
-rw-r--r--include/net/flow_dissector.h2
-rw-r--r--include/net/flow_offload.h57
-rw-r--r--include/net/genetlink.h10
-rw-r--r--include/net/gre.h70
-rw-r--r--include/net/gro.h91
-rw-r--r--include/net/gtp.h5
-rw-r--r--include/net/hotdata.h3
-rw-r--r--include/net/ieee8021q.h57
-rw-r--r--include/net/inet_common.h4
-rw-r--r--include/net/inet_connection_sock.h10
-rw-r--r--include/net/inet_timewait_sock.h2
-rw-r--r--include/net/ip.h4
-rw-r--r--include/net/ip6_fib.h8
-rw-r--r--include/net/ip6_route.h11
-rw-r--r--include/net/ip6_tunnel.h4
-rw-r--r--include/net/ip_tunnels.h172
-rw-r--r--include/net/iucv/iucv.h7
-rw-r--r--include/net/libeth/rx.h242
-rw-r--r--include/net/mac80211.h85
-rw-r--r--include/net/macsec.h2
-rw-r--r--include/net/mana/mana.h2
-rw-r--r--include/net/mptcp.h3
-rw-r--r--include/net/netdev_queues.h61
-rw-r--r--include/net/netfilter/nf_flow_table.h12
-rw-r--r--include/net/netfilter/nf_tables.h18
-rw-r--r--include/net/netlabel.h28
-rw-r--r--include/net/netlink.h46
-rw-r--r--include/net/nexthop.h2
-rw-r--r--include/net/page_pool/helpers.h34
-rw-r--r--include/net/page_pool/types.h29
-rw-r--r--include/net/pfcp.h90
-rw-r--r--include/net/pkt_cls.h9
-rw-r--r--include/net/proto_memory.h83
-rw-r--r--include/net/red.h12
-rw-r--r--include/net/request_sock.h9
-rw-r--r--include/net/route.h22
-rw-r--r--include/net/rps.h28
-rw-r--r--include/net/rstreason.h182
-rw-r--r--include/net/sch_generic.h6
-rw-r--r--include/net/scm.h10
-rw-r--r--include/net/smc.h24
-rw-r--r--include/net/sock.h106
-rw-r--r--include/net/tcp.h68
-rw-r--r--include/net/tcx.h5
-rw-r--r--include/net/timewait_sock.h9
-rw-r--r--include/net/tls.h5
-rw-r--r--include/net/udp.h9
-rw-r--r--include/net/udp_tunnel.h4
-rw-r--r--include/net/xdp_sock.h2
-rw-r--r--include/net/xdp_sock_drv.h7
-rw-r--r--include/net/xfrm.h4
-rw-r--r--include/net/xsk_buff_pool.h14
-rw-r--r--include/rdma/rdmavt_qp.h1
-rw-r--r--include/rdma/restrack.h7
-rw-r--r--include/scsi/iser.h2
-rw-r--r--include/scsi/libfc.h18
-rw-r--r--include/scsi/libfcoe.h25
-rw-r--r--include/scsi/libsas.h32
-rw-r--r--include/scsi/sas_ata.h6
-rw-r--r--include/scsi/scsi.h12
-rw-r--r--include/scsi/scsi_cmnd.h2
-rw-r--r--include/scsi/scsi_driver.h5
-rw-r--r--include/scsi/scsi_host.h10
-rw-r--r--include/scsi/scsi_transport.h2
-rw-r--r--include/scsi/scsi_transport_fc.h6
-rw-r--r--include/scsi/scsi_transport_srp.h4
-rw-r--r--include/soc/fsl/dcp.h20
-rw-r--r--include/soc/microchip/mpfs.h10
-rw-r--r--include/soc/qcom/cmd-db.h10
-rw-r--r--include/sound/control.h23
-rw-r--r--include/sound/cs35l41.h5
-rw-r--r--include/sound/cs35l56.h2
-rw-r--r--include/sound/dmaengine_pcm.h2
-rw-r--r--include/sound/emu10k1.h40
-rw-r--r--include/sound/hda-mlink.h2
-rw-r--r--include/sound/hda_codec.h11
-rw-r--r--include/sound/hdaudio.h2
-rw-r--r--include/sound/hdaudio_ext.h3
-rw-r--r--include/sound/intel-nhlt.h10
-rw-r--r--include/sound/pcm.h5
-rw-r--r--include/sound/soc-acpi-intel-match.h2
-rw-r--r--include/sound/soc-acpi-intel-ssp-common.h (renamed from sound/soc/intel/boards/sof_ssp_common.h)29
-rw-r--r--include/sound/soc-acpi.h14
-rw-r--r--include/sound/soc-jack.h2
-rw-r--r--include/sound/soc.h54
-rw-r--r--include/sound/sof.h4
-rw-r--r--include/sound/sof/channel_map.h2
-rw-r--r--include/sound/sof/control.h2
-rw-r--r--include/sound/sof/dai-intel.h2
-rw-r--r--include/sound/sof/dai.h2
-rw-r--r--include/sound/sof/debug.h2
-rw-r--r--include/sound/sof/ext_manifest.h2
-rw-r--r--include/sound/sof/ext_manifest4.h2
-rw-r--r--include/sound/sof/header.h2
-rw-r--r--include/sound/sof/info.h2
-rw-r--r--include/sound/sof/ipc4/header.h2
-rw-r--r--include/sound/sof/pm.h2
-rw-r--r--include/sound/sof/stream.h2
-rw-r--r--include/sound/sof/topology.h2
-rw-r--r--include/sound/sof/trace.h2
-rw-r--r--include/sound/sof/xtensa.h2
-rw-r--r--include/sound/soundfont.h2
-rw-r--r--include/sound/tas2781-tlv.h2
-rw-r--r--include/trace/bpf_probe.h3
-rw-r--r--include/trace/events/asoc.h2
-rw-r--r--include/trace/events/bpf_test_run.h17
-rw-r--r--include/trace/events/btrfs.h158
-rw-r--r--include/trace/events/cgroup.h92
-rw-r--r--include/trace/events/dlm.h46
-rw-r--r--include/trace/events/f2fs.h42
-rw-r--r--include/trace/events/firewire.h348
-rw-r--r--include/trace/events/fs_dax.h16
-rw-r--r--include/trace/events/huge_memory.h12
-rw-r--r--include/trace/events/hw_pressure.h (renamed from include/trace/events/thermal_pressure.h)14
-rw-r--r--include/trace/events/icmp.h67
-rw-r--r--include/trace/events/intel_ifs.h2
-rw-r--r--include/trace/events/kvm.h15
-rw-r--r--include/trace/events/mce.h25
-rw-r--r--include/trace/events/mdio.h2
-rw-r--r--include/trace/events/mmflags.h3
-rw-r--r--include/trace/events/net_probe_common.h71
-rw-r--r--include/trace/events/netfs.h250
-rw-r--r--include/trace/events/nilfs2.h6
-rw-r--r--include/trace/events/page_ref.h4
-rw-r--r--include/trace/events/rcu.h27
-rw-r--r--include/trace/events/rpcgss.h4
-rw-r--r--include/trace/events/sched.h37
-rw-r--r--include/trace/events/scmi.h6
-rw-r--r--include/trace/events/sock.h37
-rw-r--r--include/trace/events/sof.h2
-rw-r--r--include/trace/events/sof_intel.h2
-rw-r--r--include/trace/events/tcp.h134
-rw-r--r--include/trace/events/udp.h29
-rw-r--r--include/trace/events/workqueue.h4
-rw-r--r--include/trace/misc/nfs.h2
-rw-r--r--include/uapi/drm/drm_mode.h11
-rw-r--r--include/uapi/drm/etnaviv_drm.h5
-rw-r--r--include/uapi/drm/i915_drm.h31
-rw-r--r--include/uapi/drm/nouveau_drm.h22
-rw-r--r--include/uapi/drm/panthor_drm.h945
-rw-r--r--include/uapi/drm/xe_drm.h25
-rw-r--r--include/uapi/linux/bpf.h44
-rw-r--r--include/uapi/linux/cryptouser.h30
-rw-r--r--include/uapi/linux/cxl_mem.h3
-rw-r--r--include/uapi/linux/devlink.h1
-rw-r--r--include/uapi/linux/dvb/frontend.h2
-rw-r--r--include/uapi/linux/ethtool.h55
-rw-r--r--include/uapi/linux/ethtool_netlink.h32
-rw-r--r--include/uapi/linux/fcntl.h14
-rw-r--r--include/uapi/linux/gtp.h3
-rw-r--r--include/uapi/linux/icmpv6.h1
-rw-r--r--include/uapi/linux/if_link.h3
-rw-r--r--include/uapi/linux/if_team.h116
-rw-r--r--include/uapi/linux/if_tunnel.h36
-rw-r--r--include/uapi/linux/io_uring.h38
-rw-r--r--include/uapi/linux/kexec.h1
-rw-r--r--include/uapi/linux/kfd_ioctl.h17
-rw-r--r--include/uapi/linux/kvm.h4
-rw-r--r--include/uapi/linux/landlock.h38
-rw-r--r--include/uapi/linux/magic.h1
-rw-r--r--include/uapi/linux/media-bus-format.h9
-rw-r--r--include/uapi/linux/mptcp.h4
-rw-r--r--include/uapi/linux/netdev.h21
-rw-r--r--include/uapi/linux/nfs.h1
-rw-r--r--include/uapi/linux/nfsd_netlink.h47
-rw-r--r--include/uapi/linux/nl80211.h236
-rw-r--r--include/uapi/linux/papr_pdsm.h (renamed from arch/powerpc/include/uapi/asm/papr_pdsm.h)0
-rw-r--r--include/uapi/linux/pci_regs.h6
-rw-r--r--include/uapi/linux/pkt_cls.h14
-rw-r--r--include/uapi/linux/prctl.h22
-rw-r--r--include/uapi/linux/snmp.h2
-rw-r--r--include/uapi/linux/stat.h4
-rw-r--r--include/uapi/linux/stddef.h8
-rw-r--r--include/uapi/linux/tcp.h2
-rw-r--r--include/uapi/linux/tee.h1
-rw-r--r--include/uapi/linux/trace_mmap.h48
-rw-r--r--include/uapi/linux/udp.h2
-rw-r--r--include/uapi/linux/v4l2-mediabus.h18
-rw-r--r--include/uapi/linux/v4l2-subdev.h14
-rw-r--r--include/uapi/linux/vdpa.h6
-rw-r--r--include/uapi/linux/vhost.h15
-rw-r--r--include/uapi/linux/videodev2.h45
-rw-r--r--include/uapi/linux/virtio_bt.h1
-rw-r--r--include/uapi/linux/virtio_net.h143
-rw-r--r--include/uapi/linux/xfrm.h8
-rw-r--r--include/uapi/rdma/efa-abi.h7
-rw-r--r--include/uapi/rdma/hns-abi.h9
-rw-r--r--include/uapi/rdma/mana-abi.h12
-rw-r--r--include/uapi/rdma/rdma_netlink.h6
-rw-r--r--include/uapi/scsi/scsi_bsg_mpi3mr.h10
-rw-r--r--include/uapi/scsi/scsi_bsg_ufs.h4
-rw-r--r--include/uapi/sound/asoc.h56
-rw-r--r--include/uapi/sound/intel/avs/tokens.h2
-rw-r--r--include/uapi/sound/skl-tplg-interface.h74
-rw-r--r--include/uapi/sound/sof/abi.h2
-rw-r--r--include/uapi/sound/sof/fw.h2
-rw-r--r--include/uapi/sound/sof/header.h2
-rw-r--r--include/uapi/sound/sof/tokens.h2
-rw-r--r--include/ufs/ufshcd.h4
-rw-r--r--include/ufs/ufshci.h13
-rw-r--r--include/vdso/datapage.h12
-rw-r--r--include/vdso/math64.h38
-rw-r--r--include/video/omapfb_dss.h3
-rw-r--r--init/Kconfig32
-rw-r--r--init/Makefile5
-rw-r--r--init/do_mounts.c3
-rw-r--r--init/do_mounts_initrd.c1
-rw-r--r--init/init_task.c3
-rw-r--r--init/initramfs.c4
-rw-r--r--init/main.c44
-rw-r--r--io_uring/Makefile15
-rw-r--r--io_uring/alloc_cache.h59
-rw-r--r--io_uring/cancel.c4
-rw-r--r--io_uring/fdinfo.c4
-rw-r--r--io_uring/filetable.c4
-rw-r--r--io_uring/futex.c30
-rw-r--r--io_uring/futex.h5
-rw-r--r--io_uring/io-wq.c67
-rw-r--r--io_uring/io_uring.c712
-rw-r--r--io_uring/io_uring.h33
-rw-r--r--io_uring/kbuf.c416
-rw-r--r--io_uring/kbuf.h72
-rw-r--r--io_uring/memmap.c336
-rw-r--r--io_uring/memmap.h25
-rw-r--r--io_uring/msg_ring.c12
-rw-r--r--io_uring/net.c875
-rw-r--r--io_uring/net.h29
-rw-r--r--io_uring/nop.c26
-rw-r--r--io_uring/notif.c108
-rw-r--r--io_uring/notif.h13
-rw-r--r--io_uring/opdef.c65
-rw-r--r--io_uring/opdef.h9
-rw-r--r--io_uring/poll.c15
-rw-r--r--io_uring/poll.h9
-rw-r--r--io_uring/refs.h7
-rw-r--r--io_uring/register.c3
-rw-r--r--io_uring/rsrc.c47
-rw-r--r--io_uring/rsrc.h13
-rw-r--r--io_uring/rw.c605
-rw-r--r--io_uring/rw.h25
-rw-r--r--io_uring/sqpoll.c8
-rw-r--r--io_uring/timeout.c9
-rw-r--r--io_uring/uring_cmd.c122
-rw-r--r--io_uring/uring_cmd.h8
-rw-r--r--io_uring/waitid.c2
-rw-r--r--ipc/ipc_sysctl.c8
-rw-r--r--ipc/mq_sysctl.c8
-rw-r--r--ipc/shm.c10
-rw-r--r--kernel/acct.c1
-rw-r--r--kernel/audit_tree.c2
-rw-r--r--kernel/audit_watch.c2
-rw-r--r--kernel/bounds.c2
-rw-r--r--kernel/bpf/Kconfig4
-rw-r--r--kernel/bpf/Makefile5
-rw-r--r--kernel/bpf/arena.c31
-rw-r--r--kernel/bpf/arraymap.c54
-rw-r--r--kernel/bpf/bloom_filter.c13
-rw-r--r--kernel/bpf/bpf_local_storage.c2
-rw-r--r--kernel/bpf/bpf_struct_ops.c10
-rw-r--r--kernel/bpf/btf.c27
-rw-r--r--kernel/bpf/cgroup.c2
-rw-r--r--kernel/bpf/core.c90
-rw-r--r--kernel/bpf/cpumask.c1
-rw-r--r--kernel/bpf/crypto.c385
-rw-r--r--kernel/bpf/disasm.c14
-rw-r--r--kernel/bpf/hashtab.c64
-rw-r--r--kernel/bpf/helpers.c364
-rw-r--r--kernel/bpf/log.c4
-rw-r--r--kernel/bpf/lpm_trie.c31
-rw-r--r--kernel/bpf/map_in_map.c4
-rw-r--r--kernel/bpf/memalloc.c6
-rw-r--r--kernel/bpf/syscall.c91
-rw-r--r--kernel/bpf/sysfs_btf.c6
-rw-r--r--kernel/bpf/trampoline.c20
-rw-r--r--kernel/bpf/verifier.c717
-rw-r--r--kernel/cgroup/cgroup-v1.c1
-rw-r--r--kernel/cgroup/cgroup.c3
-rw-r--r--kernel/cgroup/cpuset.c158
-rw-r--r--kernel/cgroup/legacy_freezer.c5
-rw-r--r--kernel/cgroup/pids.c2
-rw-r--r--kernel/cgroup/rstat.c118
-rw-r--r--kernel/configs/hardening.config19
-rw-r--r--kernel/context_tracking.c2
-rw-r--r--kernel/cpu.c61
-rw-r--r--kernel/crash_core.c31
-rw-r--r--kernel/crash_reserve.c11
-rw-r--r--kernel/debug/kdb/kdb_io.c153
-rw-r--r--kernel/debug/kdb/kdb_main.c2
-rw-r--r--kernel/delayacct.c1
-rw-r--r--kernel/dma/Kconfig5
-rw-r--r--kernel/dma/mapping.c73
-rw-r--r--kernel/dma/swiotlb.c154
-rw-r--r--kernel/events/core.c277
-rw-r--r--kernel/events/ring_buffer.c4
-rw-r--r--kernel/events/uprobes.c28
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/fork.c35
-rw-r--r--kernel/futex/core.c2
-rw-r--r--kernel/hung_task.c1
-rw-r--r--kernel/irq/Kconfig4
-rw-r--r--kernel/irq/cpuhotplug.c27
-rw-r--r--kernel/irq/internals.h9
-rw-r--r--kernel/irq/irqdesc.c65
-rw-r--r--kernel/irq/irqdomain.c5
-rw-r--r--kernel/irq/manage.c37
-rw-r--r--kernel/irq/msi.c2
-rw-r--r--kernel/irq/proc.c9
-rw-r--r--kernel/irq/resend.c2
-rw-r--r--kernel/jump_label.c53
-rw-r--r--kernel/kallsyms.c6
-rw-r--r--kernel/kallsyms_internal.h30
-rw-r--r--kernel/kallsyms_selftest.c2
-rw-r--r--kernel/kcov.c3
-rw-r--r--kernel/kcsan/kcsan_test.c17
-rw-r--r--kernel/kexec.c4
-rw-r--r--kernel/kexec_core.c1
-rw-r--r--kernel/kexec_file.c5
-rw-r--r--kernel/kprobes.c88
-rw-r--r--kernel/ksysfs.c4
-rw-r--r--kernel/kthread.c1
-rw-r--r--kernel/latencytop.c1
-rw-r--r--kernel/livepatch/core.c4
-rw-r--r--kernel/livepatch/patch.c4
-rw-r--r--kernel/livepatch/transition.c54
-rw-r--r--kernel/locking/lock_events.h4
-rw-r--r--kernel/locking/qspinlock.c13
-rw-r--r--kernel/locking/qspinlock_paravirt.h49
-rw-r--r--kernel/module/Kconfig8
-rw-r--r--kernel/module/kallsyms.c2
-rw-r--r--kernel/module/main.c125
-rw-r--r--kernel/padata.c8
-rw-r--r--kernel/panic.c1
-rw-r--r--kernel/pid_namespace.c1
-rw-r--r--kernel/pid_sysctl.h1
-rw-r--r--kernel/power/energy_model.c106
-rw-r--r--kernel/power/hibernate.c2
-rw-r--r--kernel/power/process.c2
-rw-r--r--kernel/power/suspend.c6
-rw-r--r--kernel/power/swap.c7
-rw-r--r--kernel/printk/printk.c32
-rw-r--r--kernel/printk/sysctl.c1
-rw-r--r--kernel/profile.c43
-rw-r--r--kernel/rcu/Kconfig8
-rw-r--r--kernel/rcu/rcu.h20
-rw-r--r--kernel/rcu/rcutorture.c85
-rw-r--r--kernel/rcu/srcutiny.c31
-rw-r--r--kernel/rcu/srcutree.c5
-rw-r--r--kernel/rcu/sync.c8
-rw-r--r--kernel/rcu/tasks.h44
-rw-r--r--kernel/rcu/tiny.c4
-rw-r--r--kernel/rcu/tree.c430
-rw-r--r--kernel/rcu/tree.h24
-rw-r--r--kernel/rcu/tree_exp.h2
-rw-r--r--kernel/rcu/tree_plugin.h4
-rw-r--r--kernel/rcu/tree_stall.h11
-rw-r--r--kernel/rcu/update.c4
-rw-r--r--kernel/reboot.c1
-rw-r--r--kernel/regset.c6
-rw-r--r--kernel/sched/autogroup.c1
-rw-r--r--kernel/sched/core.c17
-rw-r--r--kernel/sched/cputime.c13
-rw-r--r--kernel/sched/deadline.c1
-rw-r--r--kernel/sched/debug.c1
-rw-r--r--kernel/sched/fair.c547
-rw-r--r--kernel/sched/isolation.c18
-rw-r--r--kernel/sched/loadavg.c2
-rw-r--r--kernel/sched/pelt.c22
-rw-r--r--kernel/sched/pelt.h16
-rw-r--r--kernel/sched/rt.c1
-rw-r--r--kernel/sched/sched.h91
-rw-r--r--kernel/sched/stats.c5
-rw-r--r--kernel/sched/topology.c65
-rw-r--r--kernel/seccomp.c5
-rw-r--r--kernel/signal.c1
-rw-r--r--kernel/softirq.c12
-rw-r--r--kernel/stackleak.c7
-rw-r--r--kernel/sys.c29
-rw-r--r--kernel/sysctl.c2
-rw-r--r--kernel/time/Kconfig2
-rw-r--r--kernel/time/clockevents.c2
-rw-r--r--kernel/time/clocksource.c44
-rw-r--r--kernel/time/hrtimer.c41
-rw-r--r--kernel/time/posix-clock.c16
-rw-r--r--kernel/time/tick-common.c17
-rw-r--r--kernel/time/tick-sched.c54
-rw-r--r--kernel/time/tick-sched.h2
-rw-r--r--kernel/time/timekeeping.c96
-rw-r--r--kernel/time/timer.c25
-rw-r--r--kernel/time/timer_migration.c36
-rw-r--r--kernel/time/vsyscall.c6
-rw-r--r--kernel/trace/Kconfig19
-rw-r--r--kernel/trace/blktrace.c3
-rw-r--r--kernel/trace/bpf_trace.c172
-rw-r--r--kernel/trace/fgraph.c11
-rw-r--r--kernel/trace/ftrace.c73
-rw-r--r--kernel/trace/rethook.c4
-rw-r--r--kernel/trace/ring_buffer.c477
-rw-r--r--kernel/trace/trace.c106
-rw-r--r--kernel/trace/trace.h1
-rw-r--r--kernel/trace/trace_benchmark.c2
-rw-r--r--kernel/trace/trace_events.c16
-rw-r--r--kernel/trace/trace_events_user.c77
-rw-r--r--kernel/trace/trace_fprobe.c6
-rw-r--r--kernel/trace/trace_kprobe.c26
-rw-r--r--kernel/trace/trace_probe.c69
-rw-r--r--kernel/trace/trace_probe.h2
-rw-r--r--kernel/trace/trace_uprobe.c103
-rw-r--r--kernel/ucount.c7
-rw-r--r--kernel/umh.c1
-rw-r--r--kernel/user.c2
-rw-r--r--kernel/utsname_sysctl.c1
-rw-r--r--kernel/vmcore_info.c8
-rw-r--r--kernel/watchdog.c226
-rw-r--r--kernel/watchdog_perf.c47
-rw-r--r--kernel/workqueue.c474
-rw-r--r--lib/Kconfig2
-rw-r--r--lib/Kconfig.debug63
-rw-r--r--lib/Kconfig.kgdb1
-rw-r--r--lib/Makefile12
-rw-r--r--lib/alloc_tag.c246
-rw-r--r--lib/bootconfig.c22
-rwxr-xr-xlib/build_OID_registry5
-rw-r--r--lib/buildid.c4
-rw-r--r--lib/checksum_kunit.c5
-rw-r--r--lib/closure.c37
-rw-r--r--lib/cmpxchg-emu.c45
-rw-r--r--lib/codetag.c283
-rw-r--r--lib/crypto/Kconfig5
-rw-r--r--lib/crypto/Makefile3
-rw-r--r--lib/crypto/aescfb.c257
-rw-r--r--lib/devres.c26
-rw-r--r--lib/dim/Makefile4
-rw-r--r--lib/dim/dim.c3
-rw-r--r--lib/dynamic_debug.c6
-rw-r--r--lib/dynamic_queue_limits.c13
-rw-r--r--lib/find_bit.c14
-rw-r--r--lib/fortify_kunit.c222
-rw-r--r--lib/iomap_copy.c13
-rw-r--r--lib/kfifo.c8
-rw-r--r--lib/kunit/Kconfig11
-rw-r--r--lib/kunit/device.c2
-rw-r--r--lib/kunit/kunit-test.c45
-rw-r--r--lib/kunit/string-stream-test.c12
-rw-r--r--lib/kunit/test.c3
-rw-r--r--lib/kunit/try-catch.c40
-rw-r--r--lib/kunit_iov_iter.c18
-rw-r--r--lib/maple_tree.c16
-rw-r--r--lib/math/prime_numbers.c2
-rw-r--r--lib/memcpy_kunit.c53
-rw-r--r--lib/objpool.c112
-rw-r--r--lib/raid6/Makefile2
-rw-r--r--lib/rhashtable.c22
-rw-r--r--lib/sbitmap.c8
-rw-r--r--lib/scatterlist.c2
-rw-r--r--lib/slub_kunit.c2
-rw-r--r--lib/stackdepot.c8
-rw-r--r--lib/strcat_kunit.c104
-rw-r--r--lib/string_kunit.c461
-rw-r--r--lib/strscpy_kunit.c142
-rw-r--r--lib/test_bitmap.c207
-rw-r--r--lib/test_bitops.c28
-rw-r--r--lib/test_bpf.c2
-rw-r--r--lib/test_hexdump.c2
-rw-r--r--lib/test_hmm.c8
-rw-r--r--lib/test_ubsan.c2
-rw-r--r--lib/test_xarray.c120
-rw-r--r--lib/ubsan.c18
-rw-r--r--lib/ubsan.h43
-rw-r--r--lib/usercopy.c9
-rw-r--r--lib/vdso/Kconfig7
-rw-r--r--lib/vdso/gettimeofday.c55
-rw-r--r--lib/vsprintf.c4
-rw-r--r--lib/xarray.c75
-rw-r--r--mm/Kconfig18
-rw-r--r--mm/Makefile5
-rw-r--r--mm/backing-dev.c200
-rw-r--r--mm/cma.c4
-rw-r--r--mm/compaction.c8
-rw-r--r--mm/damon/core.c10
-rw-r--r--mm/damon/paddr.c84
-rw-r--r--mm/damon/sysfs-schemes.c1
-rw-r--r--mm/debug.c25
-rw-r--r--mm/debug_page_alloc.c12
-rw-r--r--mm/debug_vm_pgtable.c2
-rw-r--r--mm/execmem.c143
-rw-r--r--mm/filemap.c191
-rw-r--r--mm/folio-compat.c6
-rw-r--r--mm/gup.c882
-rw-r--r--mm/hmm.c9
-rw-r--r--mm/huge_memory.c432
-rw-r--r--mm/hugetlb.c466
-rw-r--r--mm/hugetlb_cgroup.c2
-rw-r--r--mm/hugetlb_vmemmap.c1
-rw-r--r--mm/hwpoison-inject.c11
-rw-r--r--mm/internal.h215
-rw-r--r--mm/kasan/hw_tags.c1
-rw-r--r--mm/kfence/core.c14
-rw-r--r--mm/kfence/kfence.h4
-rw-r--r--mm/khugepaged.c335
-rw-r--r--mm/kmemleak.c6
-rw-r--r--mm/kmsan/hooks.c11
-rw-r--r--mm/ksm.c295
-rw-r--r--mm/madvise.c245
-rw-r--r--mm/memcontrol.c490
-rw-r--r--mm/memory-failure.c195
-rw-r--r--mm/memory-tiers.c123
-rw-r--r--mm/memory.c312
-rw-r--r--mm/memory_hotplug.c5
-rw-r--r--mm/mempolicy.c104
-rw-r--r--mm/mempool.c36
-rw-r--r--mm/memremap.c40
-rw-r--r--mm/migrate.c43
-rw-r--r--mm/migrate_device.c49
-rw-r--r--mm/mlock.c2
-rw-r--r--mm/mm_init.c218
-rw-r--r--mm/mmap.c237
-rw-r--r--mm/mmu_notifier.c17
-rw-r--r--mm/mprotect.c3
-rw-r--r--mm/mremap.c2
-rw-r--r--mm/nommu.c84
-rw-r--r--mm/oom_kill.c1
-rw-r--r--mm/page-writeback.c81
-rw-r--r--mm/page_alloc.c842
-rw-r--r--mm/page_ext.c15
-rw-r--r--mm/page_io.c3
-rw-r--r--mm/page_isolation.c121
-rw-r--r--mm/page_owner.c227
-rw-r--r--mm/page_table_check.c30
-rw-r--r--mm/page_vma_mapped.c22
-rw-r--r--mm/percpu-internal.h26
-rw-r--r--mm/percpu-vm.c4
-rw-r--r--mm/percpu.c118
-rw-r--r--mm/pgtable-generic.c2
-rw-r--r--mm/readahead.c12
-rw-r--r--mm/rmap.c111
-rw-r--r--mm/shmem.c22
-rw-r--r--mm/shmem_quota.c10
-rw-r--r--mm/show_mem.c26
-rw-r--r--mm/slab.h63
-rw-r--r--mm/slab_common.c33
-rw-r--r--mm/slub.c638
-rw-r--r--mm/sparse.c28
-rw-r--r--mm/swap.c64
-rw-r--r--mm/swap_slots.c8
-rw-r--r--mm/swap_state.c10
-rw-r--r--mm/swapfile.c397
-rw-r--r--mm/truncate.c36
-rw-r--r--mm/userfaultfd.c69
-rw-r--r--mm/util.c40
-rw-r--r--mm/vmalloc.c216
-rw-r--r--mm/vmscan.c52
-rw-r--r--mm/vmstat.c3
-rw-r--r--mm/workingset.c7
-rw-r--r--mm/z3fold.c10
-rw-r--r--mm/zbud.c10
-rw-r--r--mm/zpool.c10
-rw-r--r--mm/zsmalloc.c6
-rw-r--r--mm/zswap.c451
-rw-r--r--net/8021q/vlan_core.c2
-rw-r--r--net/8021q/vlan_dev.c2
-rw-r--r--net/8021q/vlan_netlink.c10
-rw-r--r--net/9p/Kconfig1
-rw-r--r--net/9p/client.c59
-rw-r--r--net/9p/trans_fd.c1
-rw-r--r--net/Kconfig6
-rw-r--r--net/appletalk/ddp.c19
-rw-r--r--net/appletalk/sysctl_net_atalk.c1
-rw-r--r--net/atm/clip.c4
-rw-r--r--net/atm/common.c2
-rw-r--r--net/atm/svc.c8
-rw-r--r--net/ax25/Kconfig2
-rw-r--r--net/ax25/af_ax25.c8
-rw-r--r--net/ax25/ax25_dev.c53
-rw-r--r--net/ax25/sysctl_net_ax25.c5
-rw-r--r--net/batman-adv/main.c2
-rw-r--r--net/batman-adv/main.h2
-rw-r--r--net/batman-adv/netlink.c1
-rw-r--r--net/batman-adv/originator.c2
-rw-r--r--net/batman-adv/soft-interface.c2
-rw-r--r--net/batman-adv/translation-table.c49
-rw-r--r--net/bluetooth/6lowpan.c2
-rw-r--r--net/bluetooth/hci_conn.c156
-rw-r--r--net/bluetooth/hci_core.c179
-rw-r--r--net/bluetooth/hci_debugfs.c48
-rw-r--r--net/bluetooth/hci_event.c290
-rw-r--r--net/bluetooth/hci_request.c4
-rw-r--r--net/bluetooth/hci_request.h4
-rw-r--r--net/bluetooth/hci_sock.c26
-rw-r--r--net/bluetooth/hci_sync.c232
-rw-r--r--net/bluetooth/iso.c201
-rw-r--r--net/bluetooth/l2cap_core.c167
-rw-r--r--net/bluetooth/l2cap_sock.c154
-rw-r--r--net/bluetooth/mgmt.c108
-rw-r--r--net/bluetooth/msft.c2
-rw-r--r--net/bluetooth/msft.h4
-rw-r--r--net/bluetooth/rfcomm/sock.c20
-rw-r--r--net/bluetooth/sco.c44
-rw-r--r--net/bpf/bpf_dummy_struct_ops.c59
-rw-r--r--net/bpf/test_run.c8
-rw-r--r--net/bridge/br_device.c10
-rw-r--r--net/bridge/br_forward.c9
-rw-r--r--net/bridge/br_input.c15
-rw-r--r--net/bridge/br_mst.c16
-rw-r--r--net/bridge/br_netfilter_hooks.c12
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/bridge/br_private.h1
-rw-r--r--net/bridge/br_vlan_tunnel.c9
-rw-r--r--net/bridge/netfilter/ebtables.c6
-rw-r--r--net/bridge/netfilter/nf_conntrack_bridge.c14
-rw-r--r--net/caif/cfctrl.c8
-rw-r--r--net/core/Makefile3
-rw-r--r--net/core/bpf_sk_storage.c23
-rw-r--r--net/core/datagram.c19
-rw-r--r--net/core/dev.c470
-rw-r--r--net/core/dev.h24
-rw-r--r--net/core/dev_addr_lists_test.c14
-rw-r--r--net/core/drop_monitor.c20
-rw-r--r--net/core/dst_cache.c11
-rw-r--r--net/core/fib_rules.c17
-rw-r--r--net/core/filter.c90
-rw-r--r--net/core/flow_dissector.c20
-rw-r--r--net/core/gro.c35
-rw-r--r--net/core/hotdata.c7
-rw-r--r--net/core/ieee8021q_helpers.c242
-rw-r--r--net/core/neighbour.c79
-rw-r--r--net/core/net-procfs.c3
-rw-r--r--net/core/net-sysfs.c16
-rw-r--r--net/core/net_namespace.c18
-rw-r--r--net/core/net_test.c (renamed from net/core/gso_test.c)129
-rw-r--r--net/core/netdev-genl-gen.c1
-rw-r--r--net/core/netdev-genl.c77
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/page_pool.c124
-rw-r--r--net/core/rtnetlink.c168
-rw-r--r--net/core/scm.c12
-rw-r--r--net/core/skbuff.c193
-rw-r--r--net/core/skmsg.c5
-rw-r--r--net/core/sock.c23
-rw-r--r--net/core/sock_map.c269
-rw-r--r--net/core/sysctl_net_core.c22
-rw-r--r--net/dccp/ccids/ccid2.c1
-rw-r--r--net/dccp/ipv4.c12
-rw-r--r--net/dccp/ipv6.c16
-rw-r--r--net/dccp/minisocks.c3
-rw-r--r--net/dccp/output.c2
-rw-r--r--net/dccp/sysctl.c2
-rw-r--r--net/devlink/core.c6
-rw-r--r--net/devlink/dev.c14
-rw-r--r--net/devlink/param.c7
-rw-r--r--net/devlink/port.c53
-rw-r--r--net/dsa/devlink.c3
-rw-r--r--net/dsa/dsa.c10
-rw-r--r--net/dsa/port.c175
-rw-r--r--net/dsa/user.c107
-rw-r--r--net/ethernet/eth.c12
-rw-r--r--net/ethtool/pse-pd.c60
-rw-r--r--net/ethtool/tsinfo.c52
-rw-r--r--net/handshake/tlshd.c1
-rw-r--r--net/hsr/hsr_device.c78
-rw-r--r--net/hsr/hsr_device.h4
-rw-r--r--net/hsr/hsr_forward.c85
-rw-r--r--net/hsr/hsr_framereg.c52
-rw-r--r--net/hsr/hsr_framereg.h4
-rw-r--r--net/hsr/hsr_main.c2
-rw-r--r--net/hsr/hsr_main.h7
-rw-r--r--net/hsr/hsr_netlink.c30
-rw-r--r--net/hsr/hsr_slave.c4
-rw-r--r--net/ieee802154/6lowpan/reassembly.c8
-rw-r--r--net/ipv4/af_inet.c59
-rw-r--r--net/ipv4/arp.c204
-rw-r--r--net/ipv4/bpf_tcp_ca.c6
-rw-r--r--net/ipv4/cipso_ipv4.c7
-rw-r--r--net/ipv4/devinet.c27
-rw-r--r--net/ipv4/esp4.c15
-rw-r--r--net/ipv4/fib_frontend.c5
-rw-r--r--net/ipv4/fib_semantics.c2
-rw-r--r--net/ipv4/fou_bpf.c2
-rw-r--r--net/ipv4/gre_demux.c2
-rw-r--r--net/ipv4/icmp.c42
-rw-r--r--net/ipv4/igmp.c3
-rw-r--r--net/ipv4/inet_connection_sock.c51
-rw-r--r--net/ipv4/inet_fragment.c74
-rw-r--r--net/ipv4/inet_hashtables.c3
-rw-r--r--net/ipv4/inet_timewait_sock.c16
-rw-r--r--net/ipv4/ip_fragment.c6
-rw-r--r--net/ipv4/ip_gre.c151
-rw-r--r--net/ipv4/ip_input.c2
-rw-r--r--net/ipv4/ip_output.c10
-rw-r--r--net/ipv4/ip_tunnel.c119
-rw-r--r--net/ipv4/ip_tunnel_core.c82
-rw-r--r--net/ipv4/ip_vti.c41
-rw-r--r--net/ipv4/ipip.c33
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/netfilter/Kconfig1
-rw-r--r--net/ipv4/netfilter/arp_tables.c8
-rw-r--r--net/ipv4/netfilter/ip_tables.c8
-rw-r--r--net/ipv4/netfilter/iptable_filter.c2
-rw-r--r--net/ipv4/nexthop.c4
-rw-r--r--net/ipv4/proc.c1
-rw-r--r--net/ipv4/raw.c3
-rw-r--r--net/ipv4/route.c55
-rw-r--r--net/ipv4/syncookies.c3
-rw-r--r--net/ipv4/sysctl_net_ipv4.c9
-rw-r--r--net/ipv4/tcp.c77
-rw-r--r--net/ipv4/tcp_ao.c3
-rw-r--r--net/ipv4/tcp_bbr.c6
-rw-r--r--net/ipv4/tcp_cubic.c4
-rw-r--r--net/ipv4/tcp_dctcp.c4
-rw-r--r--net/ipv4/tcp_input.c80
-rw-r--r--net/ipv4/tcp_ipv4.c60
-rw-r--r--net/ipv4/tcp_metrics.c7
-rw-r--r--net/ipv4/tcp_minisocks.c14
-rw-r--r--net/ipv4/tcp_offload.c238
-rw-r--r--net/ipv4/tcp_output.c143
-rw-r--r--net/ipv4/tcp_timer.c13
-rw-r--r--net/ipv4/udp.c74
-rw-r--r--net/ipv4/udp_offload.c54
-rw-r--r--net/ipv4/udp_tunnel_core.c5
-rw-r--r--net/ipv4/xfrm4_input.c19
-rw-r--r--net/ipv4/xfrm4_policy.c5
-rw-r--r--net/ipv6/addrconf.c25
-rw-r--r--net/ipv6/addrlabel.c18
-rw-r--r--net/ipv6/anycast.c5
-rw-r--r--net/ipv6/esp6.c15
-rw-r--r--net/ipv6/fib6_rules.c6
-rw-r--r--net/ipv6/icmp.c9
-rw-r--r--net/ipv6/ila/ila_lwt.c4
-rw-r--r--net/ipv6/inet6_hashtables.c4
-rw-r--r--net/ipv6/ip6_fib.c72
-rw-r--r--net/ipv6/ip6_gre.c113
-rw-r--r--net/ipv6/ip6_offload.c17
-rw-r--r--net/ipv6/ip6_output.c22
-rw-r--r--net/ipv6/ip6_tunnel.c18
-rw-r--r--net/ipv6/ip6_vti.c14
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c8
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c5
-rw-r--r--net/ipv6/ping.c2
-rw-r--r--net/ipv6/raw.c4
-rw-r--r--net/ipv6/reassembly.c6
-rw-r--r--net/ipv6/route.c38
-rw-r--r--net/ipv6/seg6.c5
-rw-r--r--net/ipv6/sit.c38
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/ipv6/sysctl_net_ipv6.c14
-rw-r--r--net/ipv6/tcp_ipv6.c49
-rw-r--r--net/ipv6/tcpv6_offload.c123
-rw-r--r--net/ipv6/udp.c44
-rw-r--r--net/ipv6/udp_offload.c11
-rw-r--r--net/ipv6/xfrm6_input.c26
-rw-r--r--net/ipv6/xfrm6_policy.c5
-rw-r--r--net/iucv/af_iucv.c6
-rw-r--r--net/iucv/iucv.c64
-rw-r--r--net/l2tp/l2tp_core.c81
-rw-r--r--net/l2tp/l2tp_eth.c3
-rw-r--r--net/l2tp/l2tp_ip.c2
-rw-r--r--net/l2tp/l2tp_ip6.c2
-rw-r--r--net/llc/af_llc.c7
-rw-r--r--net/llc/sysctl_net_llc.c8
-rw-r--r--net/mac80211/cfg.c171
-rw-r--r--net/mac80211/chan.c142
-rw-r--r--net/mac80211/debug.h2
-rw-r--r--net/mac80211/debugfs.c1
-rw-r--r--net/mac80211/drop.h3
-rw-r--r--net/mac80211/ht.c2
-rw-r--r--net/mac80211/ieee80211_i.h29
-rw-r--r--net/mac80211/iface.c9
-rw-r--r--net/mac80211/link.c28
-rw-r--r--net/mac80211/mesh.c8
-rw-r--r--net/mac80211/mesh.h36
-rw-r--r--net/mac80211/mesh_pathtbl.c31
-rw-r--r--net/mac80211/mlme.c181
-rw-r--r--net/mac80211/offchannel.c12
-rw-r--r--net/mac80211/rate.c6
-rw-r--r--net/mac80211/rx.c28
-rw-r--r--net/mac80211/scan.c17
-rw-r--r--net/mac80211/spectmgmt.c18
-rw-r--r--net/mac80211/sta_info.h4
-rw-r--r--net/mac80211/status.c22
-rw-r--r--net/mac80211/tx.c19
-rw-r--r--net/mac80211/util.c21
-rw-r--r--net/mac80211/wpa.c12
-rw-r--r--net/mpls/af_mpls.c78
-rw-r--r--net/mpls/mpls_iptunnel.c4
-rw-r--r--net/mptcp/ctrl.c71
-rw-r--r--net/mptcp/mib.h2
-rw-r--r--net/mptcp/options.c1
-rw-r--r--net/mptcp/pm_netlink.c1
-rw-r--r--net/mptcp/pm_userspace.c1
-rw-r--r--net/mptcp/protocol.c35
-rw-r--r--net/mptcp/protocol.h49
-rw-r--r--net/mptcp/sched.c22
-rw-r--r--net/mptcp/sockopt.c90
-rw-r--r--net/mptcp/subflow.c93
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c36
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c36
-rw-r--r--net/netfilter/nf_conntrack_core.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_icmpv6.c4
-rw-r--r--net/netfilter/nf_conntrack_standalone.c8
-rw-r--r--net/netfilter/nf_flow_table_core.c8
-rw-r--r--net/netfilter/nf_flow_table_inet.c3
-rw-r--r--net/netfilter/nf_flow_table_ip.c18
-rw-r--r--net/netfilter/nf_log.c5
-rw-r--r--net/netfilter/nf_tables_api.c190
-rw-r--r--net/netfilter/nfnetlink.c5
-rw-r--r--net/netfilter/nft_chain_filter.c10
-rw-r--r--net/netfilter/nft_connlimit.c4
-rw-r--r--net/netfilter/nft_counter.c4
-rw-r--r--net/netfilter/nft_dynset.c2
-rw-r--r--net/netfilter/nft_last.c4
-rw-r--r--net/netfilter/nft_limit.c14
-rw-r--r--net/netfilter/nft_lookup.c1
-rw-r--r--net/netfilter/nft_quota.c4
-rw-r--r--net/netfilter/nft_rt.c4
-rw-r--r--net/netfilter/nft_set_bitmap.c4
-rw-r--r--net/netfilter/nft_set_hash.c8
-rw-r--r--net/netfilter/nft_set_pipapo.c273
-rw-r--r--net/netfilter/nft_set_pipapo.h2
-rw-r--r--net/netfilter/nft_set_rbtree.c4
-rw-r--r--net/netfilter/nft_tunnel.c44
-rw-r--r--net/netlabel/netlabel_kapi.c31
-rw-r--r--net/netlink/af_netlink.c137
-rw-r--r--net/netlink/genetlink.c2
-rw-r--r--net/netlink/genetlink.h11
-rw-r--r--net/netrom/af_netrom.c6
-rw-r--r--net/netrom/nr_route.c19
-rw-r--r--net/netrom/sysctl_net_netrom.c1
-rw-r--r--net/nfc/llcp_sock.c16
-rw-r--r--net/nfc/nci/core.c6
-rw-r--r--net/nfc/netlink.c6
-rw-r--r--net/nsh/nsh.c14
-rw-r--r--net/openvswitch/conntrack.c9
-rw-r--r--net/openvswitch/datapath.c1
-rw-r--r--net/openvswitch/flow.c3
-rw-r--r--net/openvswitch/flow_netlink.c61
-rw-r--r--net/openvswitch/meter.h1
-rw-r--r--net/openvswitch/vport-netdev.c7
-rw-r--r--net/packet/af_packet.c29
-rw-r--r--net/phonet/pep.c12
-rw-r--r--net/phonet/pn_netlink.c19
-rw-r--r--net/phonet/socket.c7
-rw-r--r--net/phonet/sysctl.c1
-rw-r--r--net/psample/psample.c26
-rw-r--r--net/qrtr/mhi.c46
-rw-r--r--net/qrtr/ns.c27
-rw-r--r--net/rds/ib_sysctl.c1
-rw-r--r--net/rds/rdma.c2
-rw-r--r--net/rds/sysctl.c1
-rw-r--r--net/rds/tcp.c1
-rw-r--r--net/rds/tcp_listen.c6
-rw-r--r--net/rfkill/rfkill-gpio.c6
-rw-r--r--net/rose/af_rose.c6
-rw-r--r--net/rose/sysctl_net_rose.c1
-rw-r--r--net/rxrpc/af_rxrpc.c2
-rw-r--r--net/rxrpc/ar-internal.h2
-rw-r--r--net/rxrpc/call_object.c7
-rw-r--r--net/rxrpc/conn_object.c9
-rw-r--r--net/rxrpc/input.c49
-rw-r--r--net/rxrpc/insecure.c2
-rw-r--r--net/rxrpc/rxkad.c2
-rw-r--r--net/rxrpc/sysctl.c1
-rw-r--r--net/rxrpc/txbuf.c10
-rw-r--r--net/sched/act_skbmod.c10
-rw-r--r--net/sched/act_tunnel_key.c36
-rw-r--r--net/sched/cls_api.c41
-rw-r--r--net/sched/cls_flower.c134
-rw-r--r--net/sched/sch_api.c5
-rw-r--r--net/sched/sch_cake.c112
-rw-r--r--net/sched/sch_cbs.c20
-rw-r--r--net/sched/sch_choke.c21
-rw-r--r--net/sched/sch_codel.c29
-rw-r--r--net/sched/sch_etf.c10
-rw-r--r--net/sched/sch_ets.c25
-rw-r--r--net/sched/sch_fifo.c13
-rw-r--r--net/sched/sch_fq.c108
-rw-r--r--net/sched/sch_fq_codel.c57
-rw-r--r--net/sched/sch_fq_pie.c61
-rw-r--r--net/sched/sch_generic.c16
-rw-r--r--net/sched/sch_hfsc.c9
-rw-r--r--net/sched/sch_hhf.c35
-rw-r--r--net/sched/sch_htb.c22
-rw-r--r--net/sched/sch_mqprio.c6
-rw-r--r--net/sched/sch_pie.c39
-rw-r--r--net/sched/sch_sfq.c13
-rw-r--r--net/sched/sch_skbprio.c8
-rw-r--r--net/sched/sch_taprio.c5
-rw-r--r--net/sched/sch_teql.c4
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/protocol.c4
-rw-r--r--net/sctp/sm_statefuns.c1
-rw-r--r--net/sctp/socket.c17
-rw-r--r--net/sctp/sysctl.c12
-rw-r--r--net/smc/Kconfig13
-rw-r--r--net/smc/Makefile1
-rw-r--r--net/smc/af_smc.c40
-rw-r--r--net/smc/smc_cdc.c36
-rw-r--r--net/smc/smc_clc.c6
-rw-r--r--net/smc/smc_clc.h26
-rw-r--r--net/smc/smc_core.c61
-rw-r--r--net/smc/smc_core.h1
-rw-r--r--net/smc/smc_ib.c19
-rw-r--r--net/smc/smc_ism.c88
-rw-r--r--net/smc/smc_ism.h10
-rw-r--r--net/smc/smc_loopback.c427
-rw-r--r--net/smc/smc_loopback.h61
-rw-r--r--net/smc/smc_rx.c4
-rw-r--r--net/smc/smc_sysctl.c8
-rw-r--r--net/socket.c17
-rw-r--r--net/sunrpc/auth_gss/auth_gss_internal.h6
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c14
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c10
-rw-r--r--net/sunrpc/svc.c2
-rw-r--r--net/sunrpc/svc_xprt.c168
-rw-r--r--net/sunrpc/svcsock.c10
-rw-r--r--net/sunrpc/sysctl.c1
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma.c1
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_rw.c86
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c5
-rw-r--r--net/sunrpc/xprtrdma/transport.c1
-rw-r--r--net/sunrpc/xprtsock.c2
-rw-r--r--net/switchdev/switchdev.c99
-rw-r--r--net/sysctl_net.c3
-rw-r--r--net/tipc/msg.c8
-rw-r--r--net/tipc/socket.c18
-rw-r--r--net/tipc/sysctl.c1
-rw-r--r--net/tipc/udp_media.c2
-rw-r--r--net/tls/Kconfig1
-rw-r--r--net/tls/tls.h2
-rw-r--r--net/tls/tls_device.c1
-rw-r--r--net/tls/tls_device_fallback.c1
-rw-r--r--net/tls/tls_strp.c7
-rw-r--r--net/tls/tls_sw.c8
-rw-r--r--net/unix/af_unix.c119
-rw-r--r--net/unix/garbage.c598
-rw-r--r--net/unix/sysctl_net_unix.c3
-rw-r--r--net/vmw_vsock/af_vsock.c6
-rw-r--r--net/vmw_vsock/virtio_transport.c3
-rw-r--r--net/wireless/Makefile2
-rw-r--r--net/wireless/nl80211.c29
-rw-r--r--net/wireless/reg.c18
-rw-r--r--net/wireless/reg.h13
-rw-r--r--net/wireless/scan.c54
-rw-r--r--net/wireless/sme.c1
-rw-r--r--net/wireless/trace.h12
-rw-r--r--net/wireless/wext-core.c7
-rw-r--r--net/x25/af_x25.c4
-rw-r--r--net/x25/sysctl_net_x25.c1
-rw-r--r--net/xdp/xsk.c2
-rw-r--r--net/xdp/xsk_buff_pool.c29
-rw-r--r--net/xfrm/xfrm_compat.c7
-rw-r--r--net/xfrm/xfrm_device.c6
-rw-r--r--net/xfrm/xfrm_input.c19
-rw-r--r--net/xfrm/xfrm_interface_core.c2
-rw-r--r--net/xfrm/xfrm_policy.c11
-rw-r--r--net/xfrm/xfrm_proc.c2
-rw-r--r--net/xfrm/xfrm_replay.c3
-rw-r--r--net/xfrm/xfrm_state.c8
-rw-r--r--net/xfrm/xfrm_sysctl.c7
-rw-r--r--net/xfrm/xfrm_user.c162
-rw-r--r--rust/Makefile23
-rw-r--r--rust/alloc/README.md36
-rw-r--r--rust/alloc/alloc.rs452
-rw-r--r--rust/alloc/boxed.rs2463
-rw-r--r--rust/alloc/collections/mod.rs160
-rw-r--r--rust/alloc/lib.rs288
-rw-r--r--rust/alloc/raw_vec.rs611
-rw-r--r--rust/alloc/slice.rs890
-rw-r--r--rust/alloc/vec/drain.rs255
-rw-r--r--rust/alloc/vec/extract_if.rs115
-rw-r--r--rust/alloc/vec/into_iter.rs454
-rw-r--r--rust/alloc/vec/is_zero.rs204
-rw-r--r--rust/alloc/vec/mod.rs3683
-rw-r--r--rust/alloc/vec/partial_eq.rs49
-rw-r--r--rust/alloc/vec/set_len_on_drop.rs35
-rw-r--r--rust/alloc/vec/spec_extend.rs119
-rw-r--r--rust/bindings/bindings_helper.h3
-rw-r--r--rust/helpers.c10
-rw-r--r--rust/kernel/alloc.rs73
-rw-r--r--rust/kernel/alloc/allocator.rs (renamed from rust/kernel/allocator.rs)19
-rw-r--r--rust/kernel/alloc/box_ext.rs56
-rw-r--r--rust/kernel/alloc/vec_ext.rs182
-rw-r--r--rust/kernel/error.rs14
-rw-r--r--rust/kernel/init.rs74
-rw-r--r--rust/kernel/init/macros.rs47
-rw-r--r--rust/kernel/lib.rs15
-rw-r--r--rust/kernel/net/phy.rs6
-rw-r--r--rust/kernel/prelude.rs2
-rw-r--r--rust/kernel/print.rs5
-rw-r--r--rust/kernel/std_vendor.rs7
-rw-r--r--rust/kernel/str.rs98
-rw-r--r--rust/kernel/sync.rs6
-rw-r--r--rust/kernel/sync/arc.rs189
-rw-r--r--rust/kernel/sync/condvar.rs3
-rw-r--r--rust/kernel/sync/lock.rs2
-rw-r--r--rust/kernel/sync/lock/mutex.rs4
-rw-r--r--rust/kernel/sync/lock/spinlock.rs4
-rw-r--r--rust/kernel/task.rs2
-rw-r--r--rust/kernel/time.rs63
-rw-r--r--rust/kernel/types.rs6
-rw-r--r--rust/kernel/workqueue.rs54
-rw-r--r--rust/macros/helpers.rs122
-rw-r--r--rust/macros/lib.rs12
-rw-r--r--rust/macros/module.rs190
-rw-r--r--rust/macros/pin_data.rs2
-rw-r--r--rust/macros/zeroable.rs1
-rw-r--r--samples/bpf/Makefile4
-rw-r--r--samples/hid/Makefile2
-rw-r--r--samples/kfifo/dma-example.c3
-rw-r--r--samples/landlock/sandboxer.c18
-rw-r--r--samples/rust/rust_minimal.rs6
-rw-r--r--samples/rust/rust_print.rs4
-rw-r--r--scripts/Kbuild.include3
-rw-r--r--scripts/Makefile2
-rw-r--r--scripts/Makefile.asm-generic6
-rw-r--r--scripts/Makefile.btf15
-rw-r--r--scripts/Makefile.build40
-rw-r--r--scripts/Makefile.clean2
-rw-r--r--scripts/Makefile.debug8
-rw-r--r--scripts/Makefile.extrawarn10
-rw-r--r--scripts/Makefile.host4
-rw-r--r--scripts/Makefile.lib50
-rw-r--r--scripts/Makefile.modfinal4
-rw-r--r--scripts/Makefile.modpost4
-rw-r--r--scripts/Makefile.vdsoinst9
-rw-r--r--scripts/Makefile.vmlinux3
-rwxr-xr-xscripts/bpf_doc.py4
-rwxr-xr-xscripts/check-variable-fonts.sh115
-rwxr-xr-xscripts/checkpatch.pl6
-rw-r--r--scripts/coccinelle/api/pm_runtime.cocci2
-rw-r--r--scripts/coccinelle/misc/minmax.cocci32
-rw-r--r--scripts/dtc/Makefile6
-rw-r--r--scripts/gcc-plugins/stackleak_plugin.c2
-rw-r--r--scripts/gdb/linux/Makefile2
-rw-r--r--scripts/gdb/linux/cpus.py11
-rw-r--r--scripts/gdb/linux/interrupts.py6
-rw-r--r--scripts/gdb/linux/tasks.py2
-rw-r--r--scripts/gdb/linux/utils.py2
-rwxr-xr-xscripts/generate_rust_analyzer.py2
-rw-r--r--scripts/generate_rust_target.rs15
-rw-r--r--scripts/genksyms/Makefile4
-rwxr-xr-xscripts/headers_install.sh1
-rw-r--r--scripts/kallsyms.c13
-rw-r--r--scripts/kconfig/Makefile8
-rw-r--r--scripts/kconfig/conf.c21
-rw-r--r--scripts/kconfig/confdata.c46
-rw-r--r--scripts/kconfig/expr.h6
-rw-r--r--scripts/kconfig/gconf.c80
-rw-r--r--scripts/kconfig/lexer.l1
-rw-r--r--scripts/kconfig/lkc.h15
-rw-r--r--scripts/kconfig/lkc_proto.h1
-rw-r--r--scripts/kconfig/lxdialog/checklist.c2
-rw-r--r--scripts/kconfig/lxdialog/dialog.h12
-rw-r--r--scripts/kconfig/lxdialog/inputbox.c2
-rw-r--r--scripts/kconfig/lxdialog/menubox.c2
-rw-r--r--scripts/kconfig/lxdialog/textbox.c2
-rw-r--r--scripts/kconfig/lxdialog/util.c11
-rw-r--r--scripts/kconfig/lxdialog/yesno.c2
-rw-r--r--scripts/kconfig/mconf.c108
-rw-r--r--scripts/kconfig/menu.c73
-rw-r--r--scripts/kconfig/nconf.c118
-rw-r--r--scripts/kconfig/parser.y78
-rw-r--r--scripts/kconfig/symbol.c73
-rw-r--r--scripts/kconfig/tests/choice/Kconfig26
-rw-r--r--scripts/kconfig/tests/choice/__init__.py2
-rw-r--r--scripts/kconfig/tests/choice/allmod_expected_config4
-rw-r--r--scripts/kconfig/tests/choice/allyes_expected_config4
-rw-r--r--scripts/kconfig/tests/choice/oldask0_expected_stdout2
-rw-r--r--scripts/kconfig/tests/choice/oldask1_config1
-rw-r--r--scripts/kconfig/tests/choice/oldask1_expected_stdout6
-rwxr-xr-xscripts/kernel-doc9
-rwxr-xr-xscripts/make_fit.py290
-rwxr-xr-xscripts/min-tool-version.sh2
-rw-r--r--scripts/mod/Makefile1
-rw-r--r--scripts/mod/modpost.c12
-rw-r--r--scripts/module.lds.S8
-rwxr-xr-xscripts/package/buildtar34
-rwxr-xr-xscripts/sphinx-pre-install5
-rw-r--r--scripts/unifdef.c12
-rw-r--r--security/Kconfig.hardening15
-rw-r--r--security/apparmor/lsm.c1
-rw-r--r--security/integrity/evm/evm.h8
-rw-r--r--security/integrity/evm/evm_crypto.c25
-rw-r--r--security/integrity/evm/evm_main.c92
-rw-r--r--security/integrity/ima/ima.h12
-rw-r--r--security/integrity/ima/ima_api.c32
-rw-r--r--security/integrity/ima/ima_appraise.c4
-rw-r--r--security/integrity/ima/ima_crypto.c7
-rw-r--r--security/integrity/ima/ima_fs.c134
-rw-r--r--security/integrity/ima/ima_iint.c2
-rw-r--r--security/integrity/ima/ima_init.c6
-rw-r--r--security/integrity/ima/ima_kexec.c1
-rw-r--r--security/integrity/ima/ima_main.c44
-rw-r--r--security/integrity/ima/ima_template_lib.c27
-rw-r--r--security/integrity/integrity.h12
-rw-r--r--security/keys/gc.c8
-rw-r--r--security/keys/key.c35
-rw-r--r--security/keys/keyctl.c11
-rw-r--r--security/keys/sysctl.c1
-rw-r--r--security/keys/trusted-keys/Kconfig18
-rw-r--r--security/keys/trusted-keys/Makefile2
-rw-r--r--security/keys/trusted-keys/trusted_core.c6
-rw-r--r--security/keys/trusted-keys/trusted_dcp.c332
-rw-r--r--security/keys/trusted-keys/trusted_tpm1.c23
-rw-r--r--security/keys/trusted-keys/trusted_tpm2.c161
-rw-r--r--security/landlock/fs.c225
-rw-r--r--security/landlock/limits.h2
-rw-r--r--security/landlock/syscalls.c2
-rw-r--r--security/loadpin/loadpin.c1
-rw-r--r--security/security.c9
-rw-r--r--security/selinux/hooks.c60
-rw-r--r--security/selinux/netlabel.c5
-rw-r--r--security/selinux/selinuxfs.c48
-rw-r--r--security/selinux/ss/conditional.c18
-rw-r--r--security/selinux/ss/conditional.h2
-rw-r--r--security/selinux/ss/ebitmap.c50
-rw-r--r--security/selinux/ss/ebitmap.h38
-rw-r--r--security/selinux/ss/hashtab.c10
-rw-r--r--security/selinux/ss/hashtab.h4
-rw-r--r--security/selinux/ss/policydb.c24
-rw-r--r--security/selinux/ss/services.c3
-rw-r--r--security/selinux/ss/symtab.c22
-rw-r--r--security/selinux/xfrm.c7
-rw-r--r--security/smack/smack_lsm.c5
-rw-r--r--security/tomoyo/Makefile2
-rw-r--r--security/yama/yama_lsm.c1
-rw-r--r--sound/Makefile2
-rw-r--r--sound/ac97/bus.c1
-rw-r--r--sound/ac97_bus.c1
-rw-r--r--sound/aoa/codecs/Makefile6
-rw-r--r--sound/aoa/core/Makefile2
-rw-r--r--sound/aoa/fabrics/Makefile2
-rw-r--r--sound/aoa/soundbus/Makefile2
-rw-r--r--sound/aoa/soundbus/i2sbus/Makefile2
-rw-r--r--sound/aoa/soundbus/i2sbus/core.c2
-rw-r--r--sound/aoa/soundbus/i2sbus/pcm.c10
-rw-r--r--sound/arm/Makefile4
-rw-r--r--sound/arm/pxa2xx-pcm-lib.c4
-rw-r--r--sound/atmel/Makefile2
-rw-r--r--sound/core/Makefile18
-rw-r--r--sound/core/control_led.c15
-rw-r--r--sound/core/init.c11
-rw-r--r--sound/core/oss/Makefile2
-rw-r--r--sound/core/pcm_dmaengine.c1
-rw-r--r--sound/core/pcm_native.c2
-rw-r--r--sound/core/seq/Makefile14
-rw-r--r--sound/core/seq/oss/Makefile2
-rw-r--r--sound/core/seq/seq_dummy.c24
-rw-r--r--sound/core/seq/seq_ump_convert.c2
-rw-r--r--sound/core/sound_kunit.c11
-rw-r--r--sound/drivers/Makefile18
-rw-r--r--sound/drivers/aloop.c9
-rw-r--r--sound/drivers/mpu401/Makefile4
-rw-r--r--sound/drivers/opl3/Makefile2
-rw-r--r--sound/drivers/opl4/Makefile4
-rw-r--r--sound/drivers/pcmtest.c1
-rw-r--r--sound/drivers/pcsp/Makefile2
-rw-r--r--sound/drivers/vx/Makefile2
-rw-r--r--sound/firewire/Makefile4
-rw-r--r--sound/firewire/amdtp-stream.c10
-rw-r--r--sound/firewire/bebob/Makefile2
-rw-r--r--sound/firewire/dice/Makefile2
-rw-r--r--sound/firewire/digi00x/Makefile2
-rw-r--r--sound/firewire/fireface/Makefile2
-rw-r--r--sound/firewire/fireworks/Makefile2
-rw-r--r--sound/firewire/motu/Makefile2
-rw-r--r--sound/firewire/oxfw/Makefile2
-rw-r--r--sound/firewire/tascam/Makefile2
-rw-r--r--sound/hda/Kconfig1
-rw-r--r--sound/hda/Makefile8
-rw-r--r--sound/hda/ext/Makefile2
-rw-r--r--sound/hda/hdac_controller.c127
-rw-r--r--sound/hda/intel-dsp-config.c43
-rw-r--r--sound/hda/intel-nhlt.c26
-rw-r--r--sound/hda/intel-sdw-acpi.c2
-rw-r--r--sound/i2c/Makefile6
-rw-r--r--sound/i2c/other/Makefile10
-rw-r--r--sound/isa/Makefile18
-rw-r--r--sound/isa/ad1816a/Makefile2
-rw-r--r--sound/isa/ad1848/Makefile2
-rw-r--r--sound/isa/cs423x/Makefile4
-rw-r--r--sound/isa/es1688/Makefile4
-rw-r--r--sound/isa/galaxy/Makefile4
-rw-r--r--sound/isa/gus/Makefile12
-rw-r--r--sound/isa/msnd/Makefile6
-rw-r--r--sound/isa/opti9xx/Makefile8
-rw-r--r--sound/isa/sb/Makefile18
-rw-r--r--sound/isa/sb/emu8000_patch.c13
-rw-r--r--sound/isa/wavefront/Makefile2
-rw-r--r--sound/isa/wss/Makefile2
-rw-r--r--sound/mips/Makefile4
-rw-r--r--sound/oss/dmasound/dmasound_atari.c2
-rw-r--r--sound/oss/dmasound/dmasound_paula.c9
-rw-r--r--sound/parisc/Makefile2
-rw-r--r--sound/pci/Makefile48
-rw-r--r--sound/pci/ali5451/Makefile2
-rw-r--r--sound/pci/asihpi/Makefile2
-rw-r--r--sound/pci/au88x0/Makefile6
-rw-r--r--sound/pci/aw2/Makefile2
-rw-r--r--sound/pci/ca0106/Makefile2
-rw-r--r--sound/pci/ctxfi/Makefile2
-rw-r--r--sound/pci/echoaudio/Makefile28
-rw-r--r--sound/pci/emu10k1/Makefile6
-rw-r--r--sound/pci/emu10k1/emu10k1.c3
-rw-r--r--sound/pci/emu10k1/emu10k1_callback.c10
-rw-r--r--sound/pci/emu10k1/emu10k1_main.c225
-rw-r--r--sound/pci/emu10k1/emu10k1_patch.c209
-rw-r--r--sound/pci/emu10k1/emumixer.c18
-rw-r--r--sound/pci/emu10k1/emuproc.c9
-rw-r--r--sound/pci/emu10k1/io.c102
-rw-r--r--sound/pci/emu10k1/memory.c55
-rw-r--r--sound/pci/hda/Kconfig2
-rw-r--r--sound/pci/hda/Makefile52
-rw-r--r--sound/pci/hda/cirrus_scodec_test.c1
-rw-r--r--sound/pci/hda/cs35l41_hda.c417
-rw-r--r--sound/pci/hda/cs35l41_hda.h6
-rw-r--r--sound/pci/hda/cs35l41_hda_property.c26
-rw-r--r--sound/pci/hda/cs35l56_hda.c20
-rw-r--r--sound/pci/hda/cs35l56_hda_i2c.c13
-rw-r--r--sound/pci/hda/cs35l56_hda_spi.c13
-rw-r--r--sound/pci/hda/hda_codec.c37
-rw-r--r--sound/pci/hda/hda_component.c16
-rw-r--r--sound/pci/hda/hda_component.h7
-rw-r--r--sound/pci/hda/hda_controller.c11
-rw-r--r--sound/pci/hda/hda_controller.h1
-rw-r--r--sound/pci/hda/hda_cs_dsp_ctl.c69
-rw-r--r--sound/pci/hda/hda_generic.c4
-rw-r--r--sound/pci/hda/hda_generic.h2
-rw-r--r--sound/pci/hda/hda_intel.c55
-rw-r--r--sound/pci/hda/hda_intel_trace.h2
-rw-r--r--sound/pci/hda/hda_sysfs.c4
-rw-r--r--sound/pci/hda/patch_analog.c4
-rw-r--r--sound/pci/hda/patch_ca0132.c4
-rw-r--r--sound/pci/hda/patch_cirrus.c4
-rw-r--r--sound/pci/hda/patch_conexant.c4
-rw-r--r--sound/pci/hda/patch_cs8409.c8
-rw-r--r--sound/pci/hda/patch_hdmi.c11
-rw-r--r--sound/pci/hda/patch_realtek.c224
-rw-r--r--sound/pci/hda/patch_sigmatel.c8
-rw-r--r--sound/pci/hda/patch_via.c6
-rw-r--r--sound/pci/hda/tas2781_hda_i2c.c122
-rw-r--r--sound/pci/ice1712/Makefile6
-rw-r--r--sound/pci/korg1212/Makefile2
-rw-r--r--sound/pci/lx6464es/Makefile2
-rw-r--r--sound/pci/mixart/Makefile2
-rw-r--r--sound/pci/nm256/Makefile2
-rw-r--r--sound/pci/oxygen/Makefile8
-rw-r--r--sound/pci/pcxhr/Makefile2
-rw-r--r--sound/pci/riptide/Makefile2
-rw-r--r--sound/pci/rme9652/Makefile6
-rw-r--r--sound/pci/trident/Makefile2
-rw-r--r--sound/pci/vx222/Makefile2
-rw-r--r--sound/pci/ymfpci/Makefile2
-rw-r--r--sound/pcmcia/pdaudiocf/Makefile2
-rw-r--r--sound/pcmcia/vx/Makefile2
-rw-r--r--sound/ppc/Makefile2
-rw-r--r--sound/sh/Makefile4
-rw-r--r--sound/sh/aica.c17
-rw-r--r--sound/soc/Kconfig8
-rw-r--r--sound/soc/Makefile20
-rw-r--r--sound/soc/adi/Makefile4
-rw-r--r--sound/soc/amd/Kconfig21
-rw-r--r--sound/soc/amd/Makefile12
-rw-r--r--sound/soc/amd/acp-da7219-max98357a.c2
-rw-r--r--sound/soc/amd/acp/Makefile26
-rw-r--r--sound/soc/amd/acp/acp-legacy-common.c96
-rw-r--r--sound/soc/amd/acp/acp-mach-common.c2
-rw-r--r--sound/soc/amd/acp/acp-pci.c16
-rw-r--r--sound/soc/amd/acp/amd.h10
-rw-r--r--sound/soc/amd/acp/chip_offset_byte.h1
-rw-r--r--sound/soc/amd/acp3x-rt5682-max9836.c2
-rw-r--r--sound/soc/amd/ps/Makefile8
-rw-r--r--sound/soc/amd/ps/ps-sdw-dma.c2
-rw-r--r--sound/soc/amd/raven/Makefile6
-rw-r--r--sound/soc/amd/renoir/Makefile6
-rw-r--r--sound/soc/amd/rpl/Makefile2
-rw-r--r--sound/soc/amd/vangogh/Makefile8
-rw-r--r--sound/soc/amd/yc/Makefile6
-rw-r--r--sound/soc/amd/yc/acp6x-mach.c7
-rw-r--r--sound/soc/apple/Makefile2
-rw-r--r--sound/soc/atmel/Makefile30
-rw-r--r--sound/soc/atmel/tse850-pcm5142.c3
-rw-r--r--sound/soc/au1x/Makefile16
-rw-r--r--sound/soc/bcm/Makefile6
-rw-r--r--sound/soc/cirrus/Makefile6
-rw-r--r--sound/soc/codecs/Kconfig25
-rw-r--r--sound/soc/codecs/Makefile784
-rw-r--r--sound/soc/codecs/ab8500-codec.c1
-rw-r--r--sound/soc/codecs/adau1372-i2c.c2
-rw-r--r--sound/soc/codecs/adau1373.c2
-rw-r--r--sound/soc/codecs/adau1701.c8
-rw-r--r--sound/soc/codecs/adau7118-i2c.c2
-rw-r--r--sound/soc/codecs/adav803.c2
-rw-r--r--sound/soc/codecs/ak4118.c2
-rw-r--r--sound/soc/codecs/ak4535.c2
-rw-r--r--sound/soc/codecs/ak4641.c2
-rw-r--r--sound/soc/codecs/ak4671.c2
-rw-r--r--sound/soc/codecs/cs-amp-lib.c5
-rw-r--r--sound/soc/codecs/cs35l32.c2
-rw-r--r--sound/soc/codecs/cs35l33.c2
-rw-r--r--sound/soc/codecs/cs35l34.c2
-rw-r--r--sound/soc/codecs/cs35l35.c2
-rw-r--r--sound/soc/codecs/cs35l36.c2
-rw-r--r--sound/soc/codecs/cs35l41-i2c.c8
-rw-r--r--sound/soc/codecs/cs35l41.c33
-rw-r--r--sound/soc/codecs/cs35l45-i2c.c2
-rw-r--r--sound/soc/codecs/cs35l56-i2c.c2
-rw-r--r--sound/soc/codecs/cs35l56-sdw.c2
-rw-r--r--sound/soc/codecs/cs35l56-shared.c86
-rw-r--r--sound/soc/codecs/cs35l56.c40
-rw-r--r--sound/soc/codecs/cs4265.c2
-rw-r--r--sound/soc/codecs/cs4270.c2
-rw-r--r--sound/soc/codecs/cs4271-i2c.c2
-rw-r--r--sound/soc/codecs/cs42l42-i2c.c2
-rw-r--r--sound/soc/codecs/cs42l43.c12
-rw-r--r--sound/soc/codecs/cs42l51-i2c.c2
-rw-r--r--sound/soc/codecs/cs42l52.c2
-rw-r--r--sound/soc/codecs/cs42l56.c2
-rw-r--r--sound/soc/codecs/cs42l73.c2
-rw-r--r--sound/soc/codecs/cs43130.c8
-rw-r--r--sound/soc/codecs/cs4341.c2
-rw-r--r--sound/soc/codecs/cs4349.c2
-rw-r--r--sound/soc/codecs/cs53l30.c2
-rw-r--r--sound/soc/codecs/cx2072x.c4
-rw-r--r--sound/soc/codecs/da7210.c2
-rw-r--r--sound/soc/codecs/da7213.c2
-rw-r--r--sound/soc/codecs/da7219-aad.c6
-rw-r--r--sound/soc/codecs/da732x.c2
-rw-r--r--sound/soc/codecs/da9055.c2
-rw-r--r--sound/soc/codecs/es8316.c2
-rw-r--r--sound/soc/codecs/es8326.c80
-rw-r--r--sound/soc/codecs/es8326.h2
-rw-r--r--sound/soc/codecs/es8328-i2c.c4
-rw-r--r--sound/soc/codecs/hda-dai.c2
-rw-r--r--sound/soc/codecs/hda.c2
-rw-r--r--sound/soc/codecs/hda.h2
-rw-r--r--sound/soc/codecs/hdac_hda.c44
-rw-r--r--sound/soc/codecs/isabelle.c2
-rw-r--r--sound/soc/codecs/lm4857.c2
-rw-r--r--sound/soc/codecs/lm49453.c2
-rw-r--r--sound/soc/codecs/max9768.c2
-rw-r--r--sound/soc/codecs/max98371.c2
-rw-r--r--sound/soc/codecs/max98373-i2c.c2
-rw-r--r--sound/soc/codecs/max98373-sdw.c1
-rw-r--r--sound/soc/codecs/max98388.c2
-rw-r--r--sound/soc/codecs/max98390.c2
-rw-r--r--sound/soc/codecs/max9850.c2
-rw-r--r--sound/soc/codecs/max98520.c2
-rw-r--r--sound/soc/codecs/max9867.c2
-rw-r--r--sound/soc/codecs/max9877.c2
-rw-r--r--sound/soc/codecs/max98925.c2
-rw-r--r--sound/soc/codecs/max98926.c2
-rw-r--r--sound/soc/codecs/max98927.c2
-rw-r--r--sound/soc/codecs/ml26124.c2
-rw-r--r--sound/soc/codecs/mt6660.c2
-rw-r--r--sound/soc/codecs/nau8325.c900
-rw-r--r--sound/soc/codecs/nau8325.h391
-rw-r--r--sound/soc/codecs/nau8540.c2
-rw-r--r--sound/soc/codecs/nau8810.c6
-rw-r--r--sound/soc/codecs/nau8821.c21
-rw-r--r--sound/soc/codecs/nau8821.h1
-rw-r--r--sound/soc/codecs/nau8822.c2
-rw-r--r--sound/soc/codecs/nau8822.h1
-rw-r--r--sound/soc/codecs/nau8824.c2
-rw-r--r--sound/soc/codecs/nau8825.c2
-rw-r--r--sound/soc/codecs/pcm1681.c2
-rw-r--r--sound/soc/codecs/pcm1789-i2c.c2
-rw-r--r--sound/soc/codecs/pcm179x-i2c.c2
-rw-r--r--sound/soc/codecs/pcm6240.c2217
-rw-r--r--sound/soc/codecs/pcm6240.h252
-rw-r--r--sound/soc/codecs/rk3308_codec.c974
-rw-r--r--sound/soc/codecs/rk3308_codec.h579
-rw-r--r--sound/soc/codecs/rt1011.c2
-rw-r--r--sound/soc/codecs/rt1015.c2
-rw-r--r--sound/soc/codecs/rt1016.c2
-rw-r--r--sound/soc/codecs/rt1017-sdca-sdw.c1
-rw-r--r--sound/soc/codecs/rt1019.c2
-rw-r--r--sound/soc/codecs/rt1305.c4
-rw-r--r--sound/soc/codecs/rt1308-sdw.c1
-rw-r--r--sound/soc/codecs/rt1308.c2
-rw-r--r--sound/soc/codecs/rt1316-sdw.c9
-rw-r--r--sound/soc/codecs/rt1318-sdw.c9
-rw-r--r--sound/soc/codecs/rt274.c2
-rw-r--r--sound/soc/codecs/rt286.c4
-rw-r--r--sound/soc/codecs/rt298.c2
-rw-r--r--sound/soc/codecs/rt5514.c2
-rw-r--r--sound/soc/codecs/rt5616.c2
-rw-r--r--sound/soc/codecs/rt5631.c4
-rw-r--r--sound/soc/codecs/rt5640.c6
-rw-r--r--sound/soc/codecs/rt5645.c29
-rw-r--r--sound/soc/codecs/rt5651.c2
-rw-r--r--sound/soc/codecs/rt5659.c4
-rw-r--r--sound/soc/codecs/rt5660.c2
-rw-r--r--sound/soc/codecs/rt5663.c2
-rw-r--r--sound/soc/codecs/rt5665.c2
-rw-r--r--sound/soc/codecs/rt5668.c2
-rw-r--r--sound/soc/codecs/rt5670.c6
-rw-r--r--sound/soc/codecs/rt5682-i2c.c2
-rw-r--r--sound/soc/codecs/rt5682-sdw.c17
-rw-r--r--sound/soc/codecs/rt5682s.c2
-rw-r--r--sound/soc/codecs/rt700-sdw.c1
-rw-r--r--sound/soc/codecs/rt700.c16
-rw-r--r--sound/soc/codecs/rt711-sdca-sdw.c7
-rw-r--r--sound/soc/codecs/rt711-sdca.c18
-rw-r--r--sound/soc/codecs/rt711-sdw.c9
-rw-r--r--sound/soc/codecs/rt711.c16
-rw-r--r--sound/soc/codecs/rt712-sdca-dmic.c25
-rw-r--r--sound/soc/codecs/rt712-sdca-sdw.c8
-rw-r--r--sound/soc/codecs/rt712-sdca.c20
-rw-r--r--sound/soc/codecs/rt715-sdca-sdw.c5
-rw-r--r--sound/soc/codecs/rt715-sdca.c58
-rw-r--r--sound/soc/codecs/rt715-sdw.c6
-rw-r--r--sound/soc/codecs/rt715.c24
-rw-r--r--sound/soc/codecs/rt722-sdca-sdw.c5
-rw-r--r--sound/soc/codecs/rt722-sdca.c48
-rw-r--r--sound/soc/codecs/rt722-sdca.h3
-rw-r--r--sound/soc/codecs/sdw-mockup.c1
-rw-r--r--sound/soc/codecs/sgtl5000.c2
-rw-r--r--sound/soc/codecs/sigmadsp.c1
-rw-r--r--sound/soc/codecs/sma1303.c2
-rw-r--r--sound/soc/codecs/src4xxx-i2c.c2
-rw-r--r--sound/soc/codecs/ssm2518.c2
-rw-r--r--sound/soc/codecs/ssm4567.c2
-rw-r--r--sound/soc/codecs/sta32x.c6
-rw-r--r--sound/soc/codecs/sta350.c2
-rw-r--r--sound/soc/codecs/sta529.c2
-rw-r--r--sound/soc/codecs/tas2552.c2
-rw-r--r--sound/soc/codecs/tas2764.c2
-rw-r--r--sound/soc/codecs/tas2770.c2
-rw-r--r--sound/soc/codecs/tas2780.c5
-rw-r--r--sound/soc/codecs/tas2781-fmwlib.c6
-rw-r--r--sound/soc/codecs/tas5086.c2
-rw-r--r--sound/soc/codecs/tas6424.c2
-rw-r--r--sound/soc/codecs/tda7419.c2
-rw-r--r--sound/soc/codecs/tfa9879.c2
-rw-r--r--sound/soc/codecs/tlv320aic23-i2c.c2
-rw-r--r--sound/soc/codecs/tlv320aic32x4-spi.c1
-rw-r--r--sound/soc/codecs/tlv320aic3x-spi.c1
-rw-r--r--sound/soc/codecs/ts3a227e.c2
-rw-r--r--sound/soc/codecs/tscs42xx.c4
-rw-r--r--sound/soc/codecs/tscs454.c2
-rw-r--r--sound/soc/codecs/uda1380.c2
-rw-r--r--sound/soc/codecs/wcd934x.c1
-rw-r--r--sound/soc/codecs/wm1250-ev1.c2
-rw-r--r--sound/soc/codecs/wm2000.c2
-rw-r--r--sound/soc/codecs/wm2200.c2
-rw-r--r--sound/soc/codecs/wm5100.c2
-rw-r--r--sound/soc/codecs/wm8510.c2
-rw-r--r--sound/soc/codecs/wm8523.c2
-rw-r--r--sound/soc/codecs/wm8711.c2
-rw-r--r--sound/soc/codecs/wm8728.c2
-rw-r--r--sound/soc/codecs/wm8731-i2c.c2
-rw-r--r--sound/soc/codecs/wm8737.c2
-rw-r--r--sound/soc/codecs/wm8741.c2
-rw-r--r--sound/soc/codecs/wm8750.c4
-rw-r--r--sound/soc/codecs/wm8753.c2
-rw-r--r--sound/soc/codecs/wm8804-i2c.c2
-rw-r--r--sound/soc/codecs/wm8900.c2
-rw-r--r--sound/soc/codecs/wm8903.c2
-rw-r--r--sound/soc/codecs/wm8940.c2
-rw-r--r--sound/soc/codecs/wm8955.c2
-rw-r--r--sound/soc/codecs/wm8960.c2
-rw-r--r--sound/soc/codecs/wm8961.c2
-rw-r--r--sound/soc/codecs/wm8962.c14
-rw-r--r--sound/soc/codecs/wm8971.c2
-rw-r--r--sound/soc/codecs/wm8974.c2
-rw-r--r--sound/soc/codecs/wm8978.c2
-rw-r--r--sound/soc/codecs/wm8983.c2
-rw-r--r--sound/soc/codecs/wm8988.c2
-rw-r--r--sound/soc/codecs/wm8990.c2
-rw-r--r--sound/soc/codecs/wm8991.c2
-rw-r--r--sound/soc/codecs/wm8993.c14
-rw-r--r--sound/soc/codecs/wm8994.c8
-rw-r--r--sound/soc/codecs/wm8995.c2
-rw-r--r--sound/soc/codecs/wm8996.c16
-rw-r--r--sound/soc/codecs/wm9081.c2
-rw-r--r--sound/soc/codecs/wm9090.c4
-rw-r--r--sound/soc/codecs/wm_adsp.c36
-rw-r--r--sound/soc/codecs/wsa881x.c1
-rw-r--r--sound/soc/fsl/Makefile52
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c10
-rw-r--r--sound/soc/fsl/fsl_rpmsg.c43
-rw-r--r--sound/soc/fsl/fsl_ssi.c4
-rw-r--r--sound/soc/fsl/imx-audio-rpmsg.c21
-rw-r--r--sound/soc/fsl/imx-card.c6
-rw-r--r--sound/soc/fsl/imx-es8328.c17
-rw-r--r--sound/soc/fsl/imx-hdmi.c2
-rw-r--r--sound/soc/fsl/imx-pcm-rpmsg.c19
-rw-r--r--sound/soc/fsl/imx-rpmsg.c28
-rw-r--r--sound/soc/generic/Makefile12
-rw-r--r--sound/soc/generic/audio-graph-card2.c4
-rw-r--r--sound/soc/generic/simple-card-utils.c2
-rw-r--r--sound/soc/img/img-i2s-in.c2
-rw-r--r--sound/soc/img/img-i2s-out.c2
-rw-r--r--sound/soc/intel/Kconfig1
-rw-r--r--sound/soc/intel/atom/Makefile2
-rw-r--r--sound/soc/intel/atom/sst/Makefile6
-rw-r--r--sound/soc/intel/avs/Makefile10
-rw-r--r--sound/soc/intel/avs/apl.c22
-rw-r--r--sound/soc/intel/avs/avs.h14
-rw-r--r--sound/soc/intel/avs/board_selection.c9
-rw-r--r--sound/soc/intel/avs/boards/Makefile34
-rw-r--r--sound/soc/intel/avs/boards/da7219.c3
-rw-r--r--sound/soc/intel/avs/boards/dmic.c3
-rw-r--r--sound/soc/intel/avs/boards/es8336.c7
-rw-r--r--sound/soc/intel/avs/boards/hdaudio.c8
-rw-r--r--sound/soc/intel/avs/boards/i2s_test.c82
-rw-r--r--sound/soc/intel/avs/boards/max98357a.c3
-rw-r--r--sound/soc/intel/avs/boards/max98373.c3
-rw-r--r--sound/soc/intel/avs/boards/max98927.c3
-rw-r--r--sound/soc/intel/avs/boards/nau8825.c7
-rw-r--r--sound/soc/intel/avs/boards/probe.c3
-rw-r--r--sound/soc/intel/avs/boards/rt274.c8
-rw-r--r--sound/soc/intel/avs/boards/rt286.c9
-rw-r--r--sound/soc/intel/avs/boards/rt298.c9
-rw-r--r--sound/soc/intel/avs/boards/rt5514.c3
-rw-r--r--sound/soc/intel/avs/boards/rt5663.c5
-rw-r--r--sound/soc/intel/avs/boards/rt5682.c5
-rw-r--r--sound/soc/intel/avs/boards/ssm4567.c8
-rw-r--r--sound/soc/intel/avs/cldma.c46
-rw-r--r--sound/soc/intel/avs/cldma.h3
-rw-r--r--sound/soc/intel/avs/cnl.c93
-rw-r--r--sound/soc/intel/avs/control.c2
-rw-r--r--sound/soc/intel/avs/control.h2
-rw-r--r--sound/soc/intel/avs/core.c101
-rw-r--r--sound/soc/intel/avs/debugfs.c2
-rw-r--r--sound/soc/intel/avs/dsp.c2
-rw-r--r--sound/soc/intel/avs/icl.c19
-rw-r--r--sound/soc/intel/avs/ipc.c50
-rw-r--r--sound/soc/intel/avs/loader.c8
-rw-r--r--sound/soc/intel/avs/messages.c2
-rw-r--r--sound/soc/intel/avs/messages.h49
-rw-r--r--sound/soc/intel/avs/path.c47
-rw-r--r--sound/soc/intel/avs/path.h2
-rw-r--r--sound/soc/intel/avs/pcm.c274
-rw-r--r--sound/soc/intel/avs/probes.c16
-rw-r--r--sound/soc/intel/avs/registers.h2
-rw-r--r--sound/soc/intel/avs/skl.c75
-rw-r--r--sound/soc/intel/avs/sysfs.c2
-rw-r--r--sound/soc/intel/avs/tgl.c5
-rw-r--r--sound/soc/intel/avs/topology.c4
-rw-r--r--sound/soc/intel/avs/topology.h2
-rw-r--r--sound/soc/intel/avs/trace.c2
-rw-r--r--sound/soc/intel/avs/utils.c10
-rw-r--r--sound/soc/intel/avs/utils.h2
-rw-r--r--sound/soc/intel/boards/Kconfig70
-rw-r--r--sound/soc/intel/boards/Makefile98
-rw-r--r--sound/soc/intel/boards/bdw_rt286.c2
-rw-r--r--sound/soc/intel/boards/bridge_cs35l56.c137
-rw-r--r--sound/soc/intel/boards/bxt_da7219_max98357a.c182
-rw-r--r--sound/soc/intel/boards/bxt_rt298.c3
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c14
-rw-r--r--sound/soc/intel/boards/cml_rt1011_rt5682.c609
-rw-r--r--sound/soc/intel/boards/glk_rt5682_max98357a.c691
-rw-r--r--sound/soc/intel/boards/hda_dsp_common.c2
-rw-r--r--sound/soc/intel/boards/hsw_rt5640.c2
-rw-r--r--sound/soc/intel/boards/kbl_da7219_max98357a.c1
-rw-r--r--sound/soc/intel/boards/kbl_da7219_max98927.c4
-rw-r--r--sound/soc/intel/boards/kbl_rt5660.c1
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_max98927.c4
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c3
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_common.h1
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_generic.c50
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_max98357a.c3
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_ssm4567.c4
-rw-r--r--sound/soc/intel/boards/skl_rt286.c5
-rw-r--r--sound/soc/intel/boards/sof_board_helpers.c163
-rw-r--r--sound/soc/intel/boards/sof_board_helpers.h108
-rw-r--r--sound/soc/intel/boards/sof_cirrus_common.h2
-rw-r--r--sound/soc/intel/boards/sof_cs42l42.c89
-rw-r--r--sound/soc/intel/boards/sof_da7219.c519
-rw-r--r--sound/soc/intel/boards/sof_maxim_common.c137
-rw-r--r--sound/soc/intel/boards/sof_maxim_common.h9
-rw-r--r--sound/soc/intel/boards/sof_nau8825.c106
-rw-r--r--sound/soc/intel/boards/sof_nuvoton_common.h2
-rw-r--r--sound/soc/intel/boards/sof_realtek_common.c330
-rw-r--r--sound/soc/intel/boards/sof_realtek_common.h7
-rw-r--r--sound/soc/intel/boards/sof_rt5682.c428
-rw-r--r--sound/soc/intel/boards/sof_sdw.c1369
-rw-r--r--sound/soc/intel/boards/sof_sdw_common.h98
-rw-r--r--sound/soc/intel/boards/sof_sdw_cs42l42.c27
-rw-r--r--sound/soc/intel/boards/sof_sdw_cs42l43.c66
-rw-r--r--sound/soc/intel/boards/sof_sdw_cs_amp.c16
-rw-r--r--sound/soc/intel/boards/sof_sdw_maxim.c29
-rw-r--r--sound/soc/intel/boards/sof_sdw_rt5682.c27
-rw-r--r--sound/soc/intel/boards/sof_sdw_rt700.c29
-rw-r--r--sound/soc/intel/boards/sof_sdw_rt711.c28
-rw-r--r--sound/soc/intel/boards/sof_sdw_rt712_sdca.c49
-rw-r--r--sound/soc/intel/boards/sof_sdw_rt715.c26
-rw-r--r--sound/soc/intel/boards/sof_sdw_rt715_sdca.c26
-rw-r--r--sound/soc/intel/boards/sof_sdw_rt722_sdca.c60
-rw-r--r--sound/soc/intel/boards/sof_sdw_rt_amp.c43
-rw-r--r--sound/soc/intel/boards/sof_sdw_rt_dmic.c54
-rw-r--r--sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c49
-rw-r--r--sound/soc/intel/boards/sof_ssp_amp.c270
-rw-r--r--sound/soc/intel/boards/sof_ssp_common.c122
-rw-r--r--sound/soc/intel/catpt/Makefile2
-rw-r--r--sound/soc/intel/catpt/core.h2
-rw-r--r--sound/soc/intel/catpt/device.c2
-rw-r--r--sound/soc/intel/catpt/dsp.c2
-rw-r--r--sound/soc/intel/catpt/ipc.c2
-rw-r--r--sound/soc/intel/catpt/loader.c2
-rw-r--r--sound/soc/intel/catpt/messages.c2
-rw-r--r--sound/soc/intel/catpt/messages.h2
-rw-r--r--sound/soc/intel/catpt/pcm.c2
-rw-r--r--sound/soc/intel/catpt/registers.h2
-rw-r--r--sound/soc/intel/catpt/sysfs.c2
-rw-r--r--sound/soc/intel/catpt/trace.h2
-rw-r--r--sound/soc/intel/common/Makefile8
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-adl-match.c148
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-arl-match.c24
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cml-match.c12
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-glk-match.c4
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-icl-match.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-jsl-match.c4
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-lnl-match.c104
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-mtl-match.c244
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-rpl-match.c97
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-ssp-common.c159
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-tgl-match.c85
-rw-r--r--sound/soc/intel/common/sst-dsp-priv.h2
-rw-r--r--sound/soc/intel/common/sst-dsp.c2
-rw-r--r--sound/soc/intel/common/sst-dsp.h2
-rw-r--r--sound/soc/intel/common/sst-ipc.c2
-rw-r--r--sound/soc/intel/common/sst-ipc.h2
-rw-r--r--sound/soc/intel/keembay/Makefile2
-rw-r--r--sound/soc/intel/skylake/Makefile6
-rw-r--r--sound/soc/intel/skylake/skl-topology.c169
-rw-r--r--sound/soc/jz4740/Makefile2
-rw-r--r--sound/soc/kirkwood/Makefile4
-rw-r--r--sound/soc/kirkwood/kirkwood-dma.c5
-rw-r--r--sound/soc/loongson/Makefile4
-rw-r--r--sound/soc/loongson/loongson_card.c2
-rw-r--r--sound/soc/loongson/loongson_dma.c2
-rw-r--r--sound/soc/loongson/loongson_i2s_pci.c1
-rw-r--r--sound/soc/mediatek/Kconfig24
-rw-r--r--sound/soc/mediatek/common/Makefile4
-rw-r--r--sound/soc/mediatek/common/mtk-afe-platform-driver.c18
-rw-r--r--sound/soc/mediatek/common/mtk-dai-adda-common.c70
-rw-r--r--sound/soc/mediatek/common/mtk-dai-adda-common.h45
-rw-r--r--sound/soc/mediatek/common/mtk-dsp-sof-common.c15
-rw-r--r--sound/soc/mediatek/common/mtk-dsp-sof-common.h1
-rw-r--r--sound/soc/mediatek/common/mtk-soc-card.h7
-rw-r--r--sound/soc/mediatek/common/mtk-soundcard-driver.c205
-rw-r--r--sound/soc/mediatek/common/mtk-soundcard-driver.h42
-rw-r--r--sound/soc/mediatek/mt2701/Makefile2
-rw-r--r--sound/soc/mediatek/mt6797/Makefile2
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-afe-pcm.c14
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-dai-adda.c85
-rw-r--r--sound/soc/mediatek/mt7986/Makefile2
-rw-r--r--sound/soc/mediatek/mt7986/mt7986-afe-pcm.c18
-rw-r--r--sound/soc/mediatek/mt8183/Makefile2
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-afe-pcm.c14
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-dai-adda.c90
-rw-r--r--sound/soc/mediatek/mt8186/Makefile5
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-afe-pcm.c14
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-dai-adda.c92
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-mt6366-da7219-max98357.c1189
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-mt6366.c (renamed from sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c)580
-rw-r--r--sound/soc/mediatek/mt8188/Makefile2
-rw-r--r--sound/soc/mediatek/mt8188/mt8188-afe-pcm.c29
-rw-r--r--sound/soc/mediatek/mt8188/mt8188-dai-adda.c89
-rw-r--r--sound/soc/mediatek/mt8188/mt8188-mt6359.c209
-rw-r--r--sound/soc/mediatek/mt8192/Makefile2
-rw-r--r--sound/soc/mediatek/mt8192/mt8192-afe-pcm.c114
-rw-r--r--sound/soc/mediatek/mt8192/mt8192-dai-adda.c90
-rw-r--r--sound/soc/mediatek/mt8192/mt8192-dai-tdm.c4
-rw-r--r--sound/soc/mediatek/mt8192/mt8192-mt6359-rt1015-rt5682.c301
-rw-r--r--sound/soc/mediatek/mt8195/Makefile2
-rw-r--r--sound/soc/mediatek/mt8195/mt8195-afe-pcm.c31
-rw-r--r--sound/soc/mediatek/mt8195/mt8195-dai-adda.c90
-rw-r--r--sound/soc/mediatek/mt8195/mt8195-mt6359.c491
-rw-r--r--sound/soc/meson/Kconfig1
-rw-r--r--sound/soc/meson/Makefile50
-rw-r--r--sound/soc/meson/aiu-fifo-i2s.c2
-rw-r--r--sound/soc/meson/aiu-fifo-spdif.c2
-rw-r--r--sound/soc/meson/aiu-fifo.c2
-rw-r--r--sound/soc/meson/aiu-fifo.h2
-rw-r--r--sound/soc/meson/axg-card.c1
-rw-r--r--sound/soc/meson/axg-fifo.c33
-rw-r--r--sound/soc/meson/axg-tdm-formatter.c40
-rw-r--r--sound/soc/meson/axg-tdm-interface.c38
-rw-r--r--sound/soc/meson/axg-tdm.h5
-rw-r--r--sound/soc/mxs/Makefile6
-rw-r--r--sound/soc/pxa/Makefile12
-rw-r--r--sound/soc/qcom/Makefile38
-rw-r--r--sound/soc/qcom/apq8016_sbc.c4
-rw-r--r--sound/soc/qcom/common.c2
-rw-r--r--sound/soc/qcom/qdsp6/Makefile4
-rw-r--r--sound/soc/qcom/qdsp6/q6apm-dai.c16
-rw-r--r--sound/soc/qcom/qdsp6/q6asm-dai.c2
-rw-r--r--sound/soc/qcom/qdsp6/q6dsp-common.c2
-rw-r--r--sound/soc/qcom/sc7180.c10
-rw-r--r--sound/soc/qcom/sc7280.c12
-rw-r--r--sound/soc/qcom/sc8280xp.c10
-rw-r--r--sound/soc/qcom/sdw.c8
-rw-r--r--sound/soc/qcom/sm8250.c10
-rw-r--r--sound/soc/qcom/x1e80100.c8
-rw-r--r--sound/soc/rockchip/Makefile16
-rw-r--r--sound/soc/samsung/Makefile38
-rw-r--r--sound/soc/samsung/i2s.c1
-rw-r--r--sound/soc/samsung/midas_wm1811.c2
-rw-r--r--sound/soc/sh/Makefile16
-rw-r--r--sound/soc/sh/rcar/Makefile2
-rw-r--r--sound/soc/sh/rcar/cmd.c6
-rw-r--r--sound/soc/sh/rcar/core.c4
-rw-r--r--sound/soc/sh/rcar/ctu.c6
-rw-r--r--sound/soc/sh/rcar/dma.c6
-rw-r--r--sound/soc/sh/rcar/dvc.c6
-rw-r--r--sound/soc/sh/rcar/gen.c519
-rw-r--r--sound/soc/sh/rcar/mix.c6
-rw-r--r--sound/soc/sh/rcar/rsnd.h22
-rw-r--r--sound/soc/sh/rcar/src.c12
-rw-r--r--sound/soc/sh/rcar/ssi.c2
-rw-r--r--sound/soc/sh/rcar/ssiu.c2
-rw-r--r--sound/soc/soc-card-test.c186
-rw-r--r--sound/soc/soc-card.c21
-rw-r--r--sound/soc/soc-core.c8
-rw-r--r--sound/soc/soc-dai.c2
-rw-r--r--sound/soc/soc-dapm.c54
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c4
-rw-r--r--sound/soc/soc-jack.c23
-rw-r--r--sound/soc/soc-ops.c2
-rw-r--r--sound/soc/soc-pcm.c11
-rw-r--r--sound/soc/soc-topology-test.c3
-rw-r--r--sound/soc/soc-topology.c252
-rw-r--r--sound/soc/sof/Makefile30
-rw-r--r--sound/soc/sof/amd/Makefile20
-rw-r--r--sound/soc/sof/amd/acp-common.c2
-rw-r--r--sound/soc/sof/amd/acp-loader.c2
-rw-r--r--sound/soc/sof/amd/acp.c8
-rw-r--r--sound/soc/sof/amd/acp.h2
-rw-r--r--sound/soc/sof/control.c2
-rw-r--r--sound/soc/sof/core.c20
-rw-r--r--sound/soc/sof/debug.c60
-rw-r--r--sound/soc/sof/fw-file-profile.c2
-rw-r--r--sound/soc/sof/imx/Makefile8
-rw-r--r--sound/soc/sof/imx/imx-common.c24
-rw-r--r--sound/soc/sof/imx/imx-common.h9
-rw-r--r--sound/soc/sof/imx/imx8.c45
-rw-r--r--sound/soc/sof/imx/imx8m.c42
-rw-r--r--sound/soc/sof/imx/imx8ulp.c44
-rw-r--r--sound/soc/sof/intel/Kconfig25
-rw-r--r--sound/soc/sof/intel/Makefile35
-rw-r--r--sound/soc/sof/intel/apl.c5
-rw-r--r--sound/soc/sof/intel/atom.c2
-rw-r--r--sound/soc/sof/intel/atom.h2
-rw-r--r--sound/soc/sof/intel/bdw.c4
-rw-r--r--sound/soc/sof/intel/byt.c6
-rw-r--r--sound/soc/sof/intel/cnl.c17
-rw-r--r--sound/soc/sof/intel/ext_manifest.h2
-rw-r--r--sound/soc/sof/intel/hda-bus.c9
-rw-r--r--sound/soc/sof/intel/hda-codec.c17
-rw-r--r--sound/soc/sof/intel/hda-common-ops.c8
-rw-r--r--sound/soc/sof/intel/hda-ctrl.c21
-rw-r--r--sound/soc/sof/intel/hda-dai-ops.c54
-rw-r--r--sound/soc/sof/intel/hda-dai.c132
-rw-r--r--sound/soc/sof/intel/hda-dsp.c524
-rw-r--r--sound/soc/sof/intel/hda-ipc.c117
-rw-r--r--sound/soc/sof/intel/hda-ipc.h2
-rw-r--r--sound/soc/sof/intel/hda-loader-skl.c2
-rw-r--r--sound/soc/sof/intel/hda-loader.c122
-rw-r--r--sound/soc/sof/intel/hda-mlink.c2
-rw-r--r--sound/soc/sof/intel/hda-pcm.c37
-rw-r--r--sound/soc/sof/intel/hda-probes.c4
-rw-r--r--sound/soc/sof/intel/hda-stream.c109
-rw-r--r--sound/soc/sof/intel/hda-trace.c5
-rw-r--r--sound/soc/sof/intel/hda.c830
-rw-r--r--sound/soc/sof/intel/hda.h53
-rw-r--r--sound/soc/sof/intel/icl.c6
-rw-r--r--sound/soc/sof/intel/lnl.c69
-rw-r--r--sound/soc/sof/intel/lnl.h15
-rw-r--r--sound/soc/sof/intel/mtl.c101
-rw-r--r--sound/soc/sof/intel/mtl.h60
-rw-r--r--sound/soc/sof/intel/pci-apl.c3
-rw-r--r--sound/soc/sof/intel/pci-cnl.c3
-rw-r--r--sound/soc/sof/intel/pci-icl.c4
-rw-r--r--sound/soc/sof/intel/pci-lnl.c8
-rw-r--r--sound/soc/sof/intel/pci-mtl.c3
-rw-r--r--sound/soc/sof/intel/pci-skl.c3
-rw-r--r--sound/soc/sof/intel/pci-tgl.c4
-rw-r--r--sound/soc/sof/intel/pci-tng.c4
-rw-r--r--sound/soc/sof/intel/shim.h5
-rw-r--r--sound/soc/sof/intel/skl.c2
-rw-r--r--sound/soc/sof/intel/telemetry.c3
-rw-r--r--sound/soc/sof/intel/telemetry.h2
-rw-r--r--sound/soc/sof/intel/tgl.c27
-rw-r--r--sound/soc/sof/intel/tracepoints.c5
-rw-r--r--sound/soc/sof/iomem-utils.c2
-rw-r--r--sound/soc/sof/ipc.c2
-rw-r--r--sound/soc/sof/ipc3-control.c2
-rw-r--r--sound/soc/sof/ipc3-dtrace.c2
-rw-r--r--sound/soc/sof/ipc3-loader.c2
-rw-r--r--sound/soc/sof/ipc3-pcm.c3
-rw-r--r--sound/soc/sof/ipc3-priv.h8
-rw-r--r--sound/soc/sof/ipc3-topology.c2
-rw-r--r--sound/soc/sof/ipc3.c2
-rw-r--r--sound/soc/sof/ipc4-control.c2
-rw-r--r--sound/soc/sof/ipc4-fw-reg.h2
-rw-r--r--sound/soc/sof/ipc4-loader.c10
-rw-r--r--sound/soc/sof/ipc4-mtrace.c13
-rw-r--r--sound/soc/sof/ipc4-pcm.c300
-rw-r--r--sound/soc/sof/ipc4-priv.h18
-rw-r--r--sound/soc/sof/ipc4-telemetry.c2
-rw-r--r--sound/soc/sof/ipc4-telemetry.h2
-rw-r--r--sound/soc/sof/ipc4-topology.c401
-rw-r--r--sound/soc/sof/ipc4-topology.h5
-rw-r--r--sound/soc/sof/ipc4.c2
-rw-r--r--sound/soc/sof/loader.c2
-rw-r--r--sound/soc/sof/mediatek/mt8186/Makefile2
-rw-r--r--sound/soc/sof/mediatek/mt8186/mt8186.c2
-rw-r--r--sound/soc/sof/mediatek/mt8195/Makefile2
-rw-r--r--sound/soc/sof/mediatek/mt8195/mt8195.c2
-rw-r--r--sound/soc/sof/nocodec.c2
-rw-r--r--sound/soc/sof/ops.c2
-rw-r--r--sound/soc/sof/ops.h26
-rw-r--r--sound/soc/sof/pcm.c83
-rw-r--r--sound/soc/sof/pm.c2
-rw-r--r--sound/soc/sof/sof-acpi-dev.c2
-rw-r--r--sound/soc/sof/sof-acpi-dev.h2
-rw-r--r--sound/soc/sof/sof-audio.c31
-rw-r--r--sound/soc/sof/sof-audio.h15
-rw-r--r--sound/soc/sof/sof-client-ipc-flood-test.c19
-rw-r--r--sound/soc/sof/sof-client-ipc-kernel-injector.c2
-rw-r--r--sound/soc/sof/sof-client-ipc-msg-injector.c2
-rw-r--r--sound/soc/sof/sof-client-probes-ipc3.c2
-rw-r--r--sound/soc/sof/sof-client-probes-ipc4.c2
-rw-r--r--sound/soc/sof/sof-client-probes.c2
-rw-r--r--sound/soc/sof/sof-client.c2
-rw-r--r--sound/soc/sof/sof-pci-dev.c2
-rw-r--r--sound/soc/sof/sof-pci-dev.h2
-rw-r--r--sound/soc/sof/sof-priv.h26
-rw-r--r--sound/soc/sof/sof-utils.c2
-rw-r--r--sound/soc/sof/sof-utils.h2
-rw-r--r--sound/soc/sof/stream-ipc.c2
-rw-r--r--sound/soc/sof/topology.c9
-rw-r--r--sound/soc/sof/trace.c2
-rw-r--r--sound/soc/sof/xtensa/Makefile2
-rw-r--r--sound/soc/sof/xtensa/core.c2
-rw-r--r--sound/soc/spear/Makefile6
-rw-r--r--sound/soc/sprd/Makefile2
-rw-r--r--sound/soc/sti/Makefile2
-rw-r--r--sound/soc/stm/Makefile8
-rw-r--r--sound/soc/sunxi/sun4i-i2s.c33
-rw-r--r--sound/soc/sunxi/sun50i-codec-analog.c73
-rw-r--r--sound/soc/sunxi/sun50i-dmic.c36
-rw-r--r--sound/soc/sunxi/sun8i-codec.c346
-rw-r--r--sound/soc/tegra/Makefile46
-rw-r--r--sound/soc/tegra/tegra186_dspk.c7
-rw-r--r--sound/soc/tegra/tegra_asoc_machine.c2
-rw-r--r--sound/soc/tegra/tegra_pcm.c6
-rw-r--r--sound/soc/ti/Makefile36
-rw-r--r--sound/soc/ti/davinci-i2s.c278
-rw-r--r--sound/soc/ti/davinci-mcasp.c12
-rw-r--r--sound/soc/ti/omap-hdmi.c2
-rw-r--r--sound/soc/uniphier/Makefile8
-rw-r--r--sound/soc/uniphier/aio-dma.c2
-rw-r--r--sound/soc/ux500/Makefile6
-rw-r--r--sound/soc/ux500/ux500_msp_dai.c1
-rw-r--r--sound/soc/xilinx/Makefile6
-rw-r--r--sound/soc/xilinx/xlnx_formatter_pcm.c2
-rw-r--r--sound/soc/xilinx/xlnx_i2s.c1
-rw-r--r--sound/soc/xtensa/Makefile2
-rw-r--r--sound/sparc/Makefile6
-rw-r--r--sound/spi/Makefile2
-rw-r--r--sound/synth/Makefile2
-rw-r--r--sound/synth/emux/Makefile2
-rw-r--r--sound/synth/emux/emux.c6
-rw-r--r--sound/synth/emux/emux_hwdep.c3
-rw-r--r--sound/synth/emux/emux_oss.c3
-rw-r--r--sound/synth/emux/emux_proc.c1
-rw-r--r--sound/synth/emux/emux_seq.c6
-rw-r--r--sound/synth/emux/soundfont.c73
-rw-r--r--sound/usb/6fire/Makefile2
-rw-r--r--sound/usb/Makefile4
-rw-r--r--sound/usb/card.c4
-rw-r--r--sound/usb/hiface/Makefile2
-rw-r--r--sound/usb/line6/driver.c6
-rw-r--r--sound/usb/misc/Makefile2
-rw-r--r--sound/usb/mixer_quirks.c2
-rw-r--r--sound/usb/mixer_scarlett2.c2843
-rw-r--r--sound/usb/quirks-table.h38
-rw-r--r--sound/usb/quirks.c74
-rw-r--r--sound/usb/usx2y/Makefile4
-rw-r--r--sound/virtio/Makefile2
-rw-r--r--sound/x86/Makefile2
-rw-r--r--sound/xen/Makefile2
-rw-r--r--tools/Makefile13
-rw-r--r--tools/arch/arm64/include/asm/cputype.h4
-rw-r--r--tools/arch/arm64/include/asm/sysreg.h24
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h15
-rw-r--r--tools/arch/powerpc/include/uapi/asm/kvm.h45
-rw-r--r--tools/arch/s390/include/uapi/asm/kvm.h315
-rw-r--r--tools/arch/x86/dell-uart-backlight-emulator/.gitignore1
-rw-r--r--tools/arch/x86/dell-uart-backlight-emulator/Makefile19
-rw-r--r--tools/arch/x86/dell-uart-backlight-emulator/README46
-rw-r--r--tools/arch/x86/dell-uart-backlight-emulator/dell-uart-backlight-emulator.c163
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h22
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h11
-rw-r--r--tools/arch/x86/include/asm/inat.h17
-rw-r--r--tools/arch/x86/include/asm/insn.h32
-rw-r--r--tools/arch/x86/include/asm/msr-index.h83
-rw-r--r--tools/arch/x86/include/asm/required-features.h3
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h308
-rw-r--r--tools/arch/x86/intel_sdsi/intel_sdsi.c108
-rw-r--r--tools/arch/x86/lib/insn.c29
-rw-r--r--tools/arch/x86/lib/x86-opcode-map.txt315
-rw-r--r--tools/arch/x86/tools/gen-insn-attr-x86.awk15
-rw-r--r--tools/bpf/bpftool/Documentation/Makefile6
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-btf.rst104
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-cgroup.rst219
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-feature.rst115
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-gen.rst338
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-iter.rst60
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-link.rst73
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-map.rst232
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-net.rst112
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-perf.rst34
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst436
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst81
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool.rst60
-rw-r--r--tools/bpf/bpftool/Documentation/common_options.rst26
-rw-r--r--tools/bpf/bpftool/Makefile16
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool61
-rw-r--r--tools/bpf/bpftool/common.c96
-rw-r--r--tools/bpf/bpftool/feature.c3
-rw-r--r--tools/bpf/bpftool/gen.c7
-rw-r--r--tools/bpf/bpftool/iter.c2
-rw-r--r--tools/bpf/bpftool/link.c9
-rw-r--r--tools/bpf/bpftool/main.h3
-rw-r--r--tools/bpf/bpftool/pids.c19
-rw-r--r--tools/bpf/bpftool/prog.c7
-rw-r--r--tools/bpf/bpftool/skeleton/pid_iter.bpf.c4
-rw-r--r--tools/bpf/bpftool/struct_ops.c2
-rw-r--r--tools/cgroup/memcg_slabinfo.py5
-rw-r--r--tools/hv/hv_kvp_daemon.c213
-rw-r--r--tools/include/asm-generic/bitops/__ffs.h4
-rw-r--r--tools/include/asm-generic/bitops/__fls.h10
-rw-r--r--tools/include/asm-generic/bitops/fls.h8
-rw-r--r--tools/include/linux/align.h12
-rw-r--r--tools/include/linux/bitmap.h9
-rw-r--r--tools/include/linux/bitops.h4
-rw-r--r--tools/include/linux/bits.h8
-rw-r--r--tools/include/linux/btf_ids.h2
-rw-r--r--tools/include/linux/compiler.h4
-rw-r--r--tools/include/linux/filter.h18
-rw-r--r--tools/include/linux/kernel.h1
-rw-r--r--tools/include/linux/mm.h10
-rw-r--r--tools/include/linux/panic.h19
-rw-r--r--tools/include/linux/rbtree_augmented.h4
-rw-r--r--tools/include/nolibc/stdlib.h2
-rw-r--r--tools/include/nolibc/string.h46
-rw-r--r--tools/include/nolibc/sys.h27
-rw-r--r--tools/include/uapi/asm-generic/bitsperlong.h4
-rw-r--r--tools/include/uapi/asm-generic/fcntl.h221
-rw-r--r--tools/include/uapi/drm/i915_drm.h16
-rw-r--r--tools/include/uapi/linux/bits.h15
-rw-r--r--tools/include/uapi/linux/bpf.h44
-rw-r--r--tools/include/uapi/linux/ethtool.h104
-rw-r--r--tools/include/uapi/linux/kvm.h691
-rw-r--r--tools/include/uapi/linux/memfd.h39
-rw-r--r--tools/include/uapi/linux/netdev.h21
-rw-r--r--tools/include/uapi/linux/openat2.h43
-rw-r--r--tools/include/uapi/linux/userfaultfd.h386
-rw-r--r--tools/lib/bpf/bpf.c19
-rw-r--r--tools/lib/bpf/bpf.h9
-rw-r--r--tools/lib/bpf/bpf_core_read.h3
-rw-r--r--tools/lib/bpf/bpf_helpers.h21
-rw-r--r--tools/lib/bpf/bpf_tracing.h70
-rw-r--r--tools/lib/bpf/btf_dump.c5
-rw-r--r--tools/lib/bpf/features.c2
-rw-r--r--tools/lib/bpf/libbpf.c271
-rw-r--r--tools/lib/bpf/libbpf.h29
-rw-r--r--tools/lib/bpf/libbpf.map9
-rw-r--r--tools/lib/bpf/libbpf_internal.h5
-rw-r--r--tools/lib/bpf/libbpf_probes.c6
-rw-r--r--tools/lib/bpf/libbpf_version.h2
-rw-r--r--tools/lib/bpf/ringbuf.c53
-rw-r--r--tools/lib/bpf/str_error.c16
-rw-r--r--tools/lib/bpf/usdt.bpf.h24
-rw-r--r--tools/lib/perf/cpumap.c33
-rw-r--r--tools/lib/perf/include/perf/cpumap.h16
-rw-r--r--tools/lib/perf/libperf.map4
-rw-r--r--tools/lib/perf/mmap.c2
-rw-r--r--tools/lib/rbtree.c2
-rw-r--r--tools/lib/subcmd/parse-options.c44
-rw-r--r--tools/lib/subcmd/run-command.c70
-rw-r--r--tools/lib/subcmd/run-command.h3
-rwxr-xr-xtools/net/ynl/cli.py34
-rwxr-xr-xtools/net/ynl/ethtool.py19
-rw-r--r--tools/net/ynl/lib/nlspec.py2
-rw-r--r--tools/net/ynl/lib/ynl.h12
-rw-r--r--tools/net/ynl/lib/ynl.py163
-rw-r--r--tools/net/ynl/samples/netdev.c2
-rwxr-xr-xtools/net/ynl/ynl-gen-c.py29
-rwxr-xr-xtools/net/ynl/ynl-gen-rst.py62
-rw-r--r--tools/objtool/check.c2
-rw-r--r--tools/perf/Build14
-rw-r--r--tools/perf/Documentation/perf-arm-spe.txt12
-rw-r--r--tools/perf/Documentation/perf-list.txt1
-rw-r--r--tools/perf/Documentation/perf-report.txt9
-rw-r--r--tools/perf/Documentation/perf-sched.txt36
-rw-r--r--tools/perf/Documentation/perf-script.txt7
-rw-r--r--tools/perf/Documentation/perf-test.txt13
-rw-r--r--tools/perf/Makefile.config25
-rw-r--r--tools/perf/Makefile.perf100
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c381
-rw-r--r--tools/perf/arch/arm64/util/arm-spe.c4
-rw-r--r--tools/perf/arch/arm64/util/header.c13
-rw-r--r--tools/perf/arch/riscv/util/header.c2
-rw-r--r--tools/perf/arch/x86/Build14
-rw-r--r--tools/perf/arch/x86/tests/Build14
-rwxr-xr-xtools/perf/arch/x86/tests/gen-insn-x86-dat.sh2
-rw-r--r--tools/perf/arch/x86/util/intel-bts.c4
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c10
-rw-r--r--tools/perf/bench/bench.h2
-rw-r--r--tools/perf/bench/inject-buildid.c2
-rw-r--r--tools/perf/bench/uprobe.c22
-rw-r--r--tools/perf/builtin-annotate.c128
-rw-r--r--tools/perf/builtin-bench.c2
-rw-r--r--tools/perf/builtin-buildid-cache.c2
-rw-r--r--tools/perf/builtin-buildid-list.c18
-rw-r--r--tools/perf/builtin-c2c.c21
-rw-r--r--tools/perf/builtin-daemon.c4
-rw-r--r--tools/perf/builtin-inject.c96
-rw-r--r--tools/perf/builtin-kallsyms.c2
-rw-r--r--tools/perf/builtin-kmem.c2
-rw-r--r--tools/perf/builtin-kwork.c2
-rw-r--r--tools/perf/builtin-list.c24
-rw-r--r--tools/perf/builtin-lock.c18
-rw-r--r--tools/perf/builtin-mem.c4
-rw-r--r--tools/perf/builtin-probe.c2
-rw-r--r--tools/perf/builtin-record.c14
-rw-r--r--tools/perf/builtin-report.c18
-rw-r--r--tools/perf/builtin-sched.c13
-rw-r--r--tools/perf/builtin-script.c86
-rw-r--r--tools/perf/builtin-stat.c52
-rw-r--r--tools/perf/builtin-top.c4
-rw-r--r--tools/perf/builtin-trace.c35
-rw-r--r--tools/perf/builtin.h4
-rwxr-xr-xtools/perf/check-headers.sh23
-rwxr-xr-xtools/perf/perf-archive.sh2
-rw-r--r--tools/perf/perf-completion.sh23
-rw-r--r--tools/perf/perf.c23
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/ampereone/cache.json4
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/ampereonex/cache.json4
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z16/transaction.json28
-rw-r--r--tools/perf/pmu-events/arch/s390/mapfile.csv2
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen5/branch-prediction.json93
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen5/decode.json115
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen5/execution.json174
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen5/floating-point.json812
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen5/inst-cache.json72
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen5/l2-cache.json266
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen5/l3-cache.json177
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen5/load-store.json451
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen5/memory-controller.json101
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen5/pipeline.json99
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen5/recommended.json345
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json35
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json85
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/other.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-interconnect.json14
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/memory.json1
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json3
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cache.json112
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-interconnect.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/pipeline.json43
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/uncore-cache.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json35
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json95
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/memory.json1
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-cache.json22
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json64
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-io.json11
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/cache.json24
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/memory.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/other.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json109
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv21
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/cache.json30
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/frontend.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/memory.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/other.json42
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json44
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/uncore-interconnect.json22
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json1
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json1
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json19
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json119
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json112
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json36
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/frontend.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/cache.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/frontend.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/other.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json85
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-interconnect.json14
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-io.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/uncore-cache.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/uncore-interconnect.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/uncore-io.json11
-rwxr-xr-xtools/perf/scripts/python/parallel-perf.py988
-rw-r--r--tools/perf/tests/bitmap.c13
-rw-r--r--tools/perf/tests/builtin-test.c62
-rw-r--r--tools/perf/tests/code-reading.c18
-rw-r--r--tools/perf/tests/config-fragments/config3
-rw-r--r--tools/perf/tests/dso-data.c67
-rw-r--r--tools/perf/tests/evsel-roundtrip-name.c4
-rw-r--r--tools/perf/tests/hists_common.c6
-rw-r--r--tools/perf/tests/hists_cumulate.c4
-rw-r--r--tools/perf/tests/hists_output.c2
-rw-r--r--tools/perf/tests/maps.c4
-rw-r--r--tools/perf/tests/mem.c11
-rw-r--r--tools/perf/tests/parse-events.c58
-rw-r--r--tools/perf/tests/pmu-events.c4
-rw-r--r--tools/perf/tests/pmu.c467
-rwxr-xr-xtools/perf/tests/shell/annotate.sh83
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_adding_kernel.sh1
-rw-r--r--tools/perf/tests/shell/lib/stat_output.sh2
-rwxr-xr-xtools/perf/tests/shell/script.sh26
-rwxr-xr-xtools/perf/tests/shell/stat+json_output.sh2
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters.sh75
-rwxr-xr-xtools/perf/tests/shell/test_arm_callgraph_fp.sh4
-rwxr-xr-xtools/perf/tests/shell/test_arm_coresight.sh2
-rw-r--r--tools/perf/tests/symbols.c8
-rw-r--r--tools/perf/tests/topology.c46
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c6
-rw-r--r--tools/perf/tests/workloads/datasym.c16
-rw-r--r--tools/perf/trace/beauty/Build15
-rw-r--r--tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h (renamed from tools/arch/x86/include/asm/irq_vectors.h)2
-rw-r--r--tools/perf/trace/beauty/arch/x86/include/uapi/asm/prctl.h (renamed from tools/arch/x86/include/uapi/asm/prctl.h)0
-rwxr-xr-xtools/perf/trace/beauty/arch_errno_names.sh8
-rw-r--r--tools/perf/trace/beauty/beauty.h7
-rw-r--r--tools/perf/trace/beauty/clone.c46
-rwxr-xr-xtools/perf/trace/beauty/clone.sh17
-rw-r--r--tools/perf/trace/beauty/fcntl.c2
-rw-r--r--tools/perf/trace/beauty/flock.c2
-rw-r--r--tools/perf/trace/beauty/fs_at_flags.c58
-rwxr-xr-xtools/perf/trace/beauty/fs_at_flags.sh21
-rwxr-xr-xtools/perf/trace/beauty/fsconfig.sh6
-rw-r--r--tools/perf/trace/beauty/fsmount.c9
-rwxr-xr-xtools/perf/trace/beauty/fsmount.sh6
-rwxr-xr-xtools/perf/trace/beauty/fspick.sh6
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/fcntl.h (renamed from tools/include/uapi/linux/fcntl.h)0
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/fs.h (renamed from tools/include/uapi/linux/fs.h)30
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/mount.h (renamed from tools/include/uapi/linux/mount.h)0
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/prctl.h (renamed from tools/include/uapi/linux/prctl.h)0
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/sched.h (renamed from tools/include/uapi/linux/sched.h)0
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/stat.h195
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/usbdevice_fs.h (renamed from tools/include/uapi/linux/usbdevice_fs.h)0
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/vhost.h (renamed from tools/include/uapi/linux/vhost.h)20
-rw-r--r--tools/perf/trace/beauty/include/uapi/sound/asound.h (renamed from tools/include/uapi/sound/asound.h)4
-rwxr-xr-xtools/perf/trace/beauty/mount_flags.sh6
-rwxr-xr-xtools/perf/trace/beauty/move_mount_flags.sh6
-rw-r--r--tools/perf/trace/beauty/prctl.c2
-rwxr-xr-xtools/perf/trace/beauty/prctl_option.sh6
-rwxr-xr-xtools/perf/trace/beauty/rename_flags.sh2
-rwxr-xr-xtools/perf/trace/beauty/sndrv_ctl_ioctl.sh4
-rwxr-xr-xtools/perf/trace/beauty/sndrv_pcm_ioctl.sh4
-rw-r--r--tools/perf/trace/beauty/statx.c67
-rwxr-xr-xtools/perf/trace/beauty/statx_mask.sh23
-rw-r--r--tools/perf/trace/beauty/sync_file_range.c11
-rwxr-xr-xtools/perf/trace/beauty/sync_file_range.sh2
-rwxr-xr-xtools/perf/trace/beauty/tracepoints/x86_irq_vectors.sh6
-rwxr-xr-xtools/perf/trace/beauty/usbdevfs_ioctl.sh6
-rwxr-xr-xtools/perf/trace/beauty/vhost_virtio_ioctl.sh6
-rwxr-xr-xtools/perf/trace/beauty/x86_arch_prctl.sh4
-rw-r--r--tools/perf/ui/browser.c6
-rw-r--r--tools/perf/ui/browser.h2
-rw-r--r--tools/perf/ui/browsers/Build1
-rw-r--r--tools/perf/ui/browsers/annotate-data.c313
-rw-r--r--tools/perf/ui/browsers/annotate.c23
-rw-r--r--tools/perf/ui/browsers/hists.c39
-rw-r--r--tools/perf/ui/browsers/map.c4
-rw-r--r--tools/perf/ui/hist.c92
-rw-r--r--tools/perf/util/Build16
-rw-r--r--tools/perf/util/annotate-data.c1648
-rw-r--r--tools/perf/util/annotate-data.h74
-rw-r--r--tools/perf/util/annotate.c2233
-rw-r--r--tools/perf/util/annotate.h129
-rw-r--r--tools/perf/util/auxtrace.c19
-rw-r--r--tools/perf/util/auxtrace.h1
-rw-r--r--tools/perf/util/block-info.c24
-rw-r--r--tools/perf/util/block-info.h15
-rw-r--r--tools/perf/util/bpf-event.c8
-rw-r--r--tools/perf/util/bpf_counter_cgroup.c5
-rw-r--r--tools/perf/util/bpf_kwork.c16
-rw-r--r--tools/perf/util/bpf_kwork_top.c12
-rw-r--r--tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c21
-rw-r--r--tools/perf/util/bpf_skel/bench_uprobe.bpf.c16
-rw-r--r--tools/perf/util/bpf_skel/lock_contention.bpf.c5
-rw-r--r--tools/perf/util/build-id.c136
-rw-r--r--tools/perf/util/build-id.h2
-rw-r--r--tools/perf/util/callchain.c4
-rw-r--r--tools/perf/util/cgroup.c4
-rw-r--r--tools/perf/util/comm.c207
-rw-r--r--tools/perf/util/cpumap.c14
-rw-r--r--tools/perf/util/cpumap.h2
-rw-r--r--tools/perf/util/cs-etm.c5
-rw-r--r--tools/perf/util/data-convert-json.c2
-rw-r--r--tools/perf/util/db-export.c6
-rw-r--r--tools/perf/util/debug.c3
-rw-r--r--tools/perf/util/debug.h1
-rw-r--r--tools/perf/util/disasm.c1837
-rw-r--r--tools/perf/util/disasm.h112
-rw-r--r--tools/perf/util/dlfilter.c12
-rw-r--r--tools/perf/util/dso.c472
-rw-r--r--tools/perf/util/dso.h560
-rw-r--r--tools/perf/util/dsos.c529
-rw-r--r--tools/perf/util/dsos.h40
-rw-r--r--tools/perf/util/dump-insn.h1
-rw-r--r--tools/perf/util/dwarf-aux.c442
-rw-r--r--tools/perf/util/dwarf-aux.h41
-rw-r--r--tools/perf/util/event.c8
-rw-r--r--tools/perf/util/evlist.c3
-rw-r--r--tools/perf/util/evsel.c20
-rw-r--r--tools/perf/util/evsel.h4
-rw-r--r--tools/perf/util/genelf.h3
-rw-r--r--tools/perf/util/header.c8
-rw-r--r--tools/perf/util/help-unknown-cmd.c51
-rw-r--r--tools/perf/util/hist.c78
-rw-r--r--tools/perf/util/hist.h217
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c2
-rw-r--r--tools/perf/util/intel-pt.c24
-rw-r--r--tools/perf/util/machine.c227
-rw-r--r--tools/perf/util/machine.h4
-rw-r--r--tools/perf/util/map.c91
-rw-r--r--tools/perf/util/map.h3
-rw-r--r--tools/perf/util/maps.c44
-rw-r--r--tools/perf/util/mem-events.c36
-rw-r--r--tools/perf/util/mem-events.h29
-rw-r--r--tools/perf/util/mem-info.c35
-rw-r--r--tools/perf/util/mem-info.h54
-rw-r--r--tools/perf/util/metricgroup.c10
-rw-r--r--tools/perf/util/metricgroup.h1
-rw-r--r--tools/perf/util/parse-events.c522
-rw-r--r--tools/perf/util/parse-events.h60
-rw-r--r--tools/perf/util/parse-events.l200
-rw-r--r--tools/perf/util/parse-events.y263
-rw-r--r--tools/perf/util/perf_event_attr_fprintf.c26
-rw-r--r--tools/perf/util/pmu.c291
-rw-r--r--tools/perf/util/pmu.h16
-rw-r--r--tools/perf/util/pmus.c110
-rw-r--r--tools/perf/util/pmus.h3
-rw-r--r--tools/perf/util/print-events.c55
-rw-r--r--tools/perf/util/print_insn.c75
-rw-r--r--tools/perf/util/print_insn.h8
-rw-r--r--tools/perf/util/probe-event.c32
-rw-r--r--tools/perf/util/probe-finder.c4
-rw-r--r--tools/perf/util/python.c10
-rw-r--r--tools/perf/util/record.c2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c6
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c45
-rw-r--r--tools/perf/util/session.c26
-rw-r--r--tools/perf/util/session.h2
-rw-r--r--tools/perf/util/sort.c116
-rw-r--r--tools/perf/util/sort.h190
-rw-r--r--tools/perf/util/srcline.c65
-rw-r--r--tools/perf/util/stat-display.c3
-rw-r--r--tools/perf/util/stat.c2
-rw-r--r--tools/perf/util/stat.h1
-rw-r--r--tools/perf/util/svghelper.c20
-rw-r--r--tools/perf/util/symbol-elf.c145
-rw-r--r--tools/perf/util/symbol-minimal.c4
-rw-r--r--tools/perf/util/symbol.c261
-rw-r--r--tools/perf/util/symbol.h12
-rw-r--r--tools/perf/util/symbol_fprintf.c4
-rw-r--r--tools/perf/util/synthetic-events.c24
-rw-r--r--tools/perf/util/thread.c18
-rw-r--r--tools/perf/util/tracepoint.c56
-rw-r--r--tools/perf/util/tracepoint.h3
-rw-r--r--tools/perf/util/unwind-libunwind-local.c18
-rw-r--r--tools/perf/util/unwind-libunwind.c2
-rw-r--r--tools/perf/util/values.h1
-rw-r--r--tools/perf/util/vdso.c56
-rw-r--r--tools/power/acpi/tools/pfrut/pfrut.c2
-rw-r--r--tools/power/x86/intel-speed-select/isst-config.c25
-rw-r--r--tools/power/x86/intel-speed-select/isst-core-mbox.c3
-rw-r--r--tools/power/x86/intel-speed-select/isst-core-tpmi.c10
-rw-r--r--tools/power/x86/intel-speed-select/isst-core.c1
-rw-r--r--tools/power/x86/intel-speed-select/isst-display.c30
-rw-r--r--tools/power/x86/intel-speed-select/isst.h2
-rw-r--r--tools/power/x86/turbostat/Makefile27
-rw-r--r--tools/power/x86/turbostat/turbostat.822
-rw-r--r--tools/power/x86/turbostat/turbostat.c3050
-rwxr-xr-xtools/sound/dapm-graph303
-rw-r--r--tools/testing/cxl/test/cxl.c17
-rw-r--r--tools/testing/cxl/test/mem.c19
-rw-r--r--tools/testing/kunit/configs/all_tests.config3
-rw-r--r--tools/testing/kunit/qemu_configs/riscv.py2
-rw-r--r--tools/testing/nvdimm/test/ndtest.c7
-rw-r--r--tools/testing/nvdimm/test/ndtest.h31
-rw-r--r--tools/testing/radix-tree/linux/kernel.h2
-rw-r--r--tools/testing/selftests/Makefile13
-rw-r--r--tools/testing/selftests/alsa/conf.c2
-rw-r--r--tools/testing/selftests/arm64/abi/tpidr2.c2
-rw-r--r--tools/testing/selftests/arm64/tags/tags_test.c4
-rw-r--r--tools/testing/selftests/bpf/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.aarch642
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.s390x1
-rw-r--r--tools/testing/selftests/bpf/Makefile65
-rw-r--r--tools/testing/selftests/bpf/bench.c39
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_bpf_crypto.c185
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_local_storage_create.c2
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_trigger.c433
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_trigger.sh22
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_uprobes.sh2
-rw-r--r--tools/testing/selftests/bpf/bpf_arena_common.h2
-rw-r--r--tools/testing/selftests/bpf/bpf_arena_list.h4
-rw-r--r--tools/testing/selftests/bpf/bpf_experimental.h71
-rw-r--r--tools/testing/selftests/bpf/bpf_kfuncs.h3
-rw-r--r--tools/testing/selftests/bpf/bpf_tcp_helpers.h241
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c263
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h28
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.c5
-rw-r--r--tools/testing/selftests/bpf/config7
-rw-r--r--tools/testing/selftests/bpf/network_helpers.c243
-rw-r--r--tools/testing/selftests/bpf/network_helpers.h17
-rw-r--r--tools/testing/selftests/bpf/prog_tests/arena_atomics.c186
-rw-r--r--tools/testing/selftests/bpf/prog_tests/arena_htab.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/arena_list.c7
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_cookie.c114
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c133
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_dump.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c26
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c7
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cls_redirect.c38
-rw-r--r--tools/testing/selftests/bpf/prog_tests/crypto_sanity.c197
-rw-r--r--tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c34
-rw-r--r--tools/testing/selftests/bpf/prog_tests/empty_skb.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fib_lookup.c132
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/for_each.c62
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c322
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ksyms.c30
-rw-r--r--tools/testing/selftests/bpf/prog_tests/module_attach.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/mptcp.c18
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c214
-rw-r--r--tools/testing/selftests/bpf/prog_tests/perf_skip.c137
-rw-r--r--tools/testing/selftests/bpf/prog_tests/preempt_lock.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ringbuf.c65
-rw-r--r--tools/testing/selftests/bpf/prog_tests/send_signal.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sk_assign.c55
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sock_addr.c2353
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_basic.c171
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_listen.c38
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt.c65
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c64
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_redirect.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tcp_rtt.c14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c159
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_tunnel.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/trace_printk.c36
-rw-r--r--tools/testing/selftests/bpf/prog_tests/trace_vprintk.c36
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier_kfunc_prog_types.c11
-rw-r--r--tools/testing/selftests/bpf/prog_tests/wq.c40
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_metadata.c16
-rw-r--r--tools/testing/selftests/bpf/progs/arena_atomics.c178
-rw-r--r--tools/testing/selftests/bpf/progs/arena_htab.c2
-rw-r--r--tools/testing/selftests/bpf/progs/arena_list.c12
-rw-r--r--tools/testing/selftests/bpf/progs/bench_local_storage_create.c5
-rw-r--r--tools/testing/selftests/bpf/progs/bind4_prog.c24
-rw-r--r--tools/testing/selftests/bpf/progs/bind6_prog.c24
-rw-r--r--tools/testing/selftests/bpf/progs/bind_prog.h19
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_cc_cubic.c189
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_cubic.c74
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_dctcp.c62
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_dctcp_release.c10
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_tcp_nogpl.c8
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_tracing_net.h52
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h2
-rw-r--r--tools/testing/selftests/bpf/progs/connect4_prog.c12
-rw-r--r--tools/testing/selftests/bpf/progs/connect6_prog.c6
-rw-r--r--tools/testing/selftests/bpf/progs/connect_unix_prog.c6
-rw-r--r--tools/testing/selftests/bpf/progs/cpumask_common.h2
-rw-r--r--tools/testing/selftests/bpf/progs/cpumask_failure.c3
-rw-r--r--tools/testing/selftests/bpf/progs/crypto_basic.c68
-rw-r--r--tools/testing/selftests/bpf/progs/crypto_bench.c109
-rw-r--r--tools/testing/selftests/bpf/progs/crypto_common.h66
-rw-r--r--tools/testing/selftests/bpf/progs/crypto_sanity.c169
-rw-r--r--tools/testing/selftests/bpf/progs/dummy_st_ops_success.c15
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_fail.c12
-rw-r--r--tools/testing/selftests/bpf/progs/fib_lookup.c2
-rw-r--r--tools/testing/selftests/bpf/progs/for_each_multi_maps.c49
-rw-r--r--tools/testing/selftests/bpf/progs/getpeername4_prog.c24
-rw-r--r--tools/testing/selftests/bpf/progs/getpeername6_prog.c31
-rw-r--r--tools/testing/selftests/bpf/progs/getsockname4_prog.c24
-rw-r--r--tools/testing/selftests/bpf/progs/getsockname6_prog.c31
-rw-r--r--tools/testing/selftests/bpf/progs/iters.c2
-rw-r--r--tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c4
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi_session.c79
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c58
-rw-r--r--tools/testing/selftests/bpf/progs/local_storage.c20
-rw-r--r--tools/testing/selftests/bpf/progs/lsm_cgroup.c8
-rw-r--r--tools/testing/selftests/bpf/progs/map_kptr.c10
-rw-r--r--tools/testing/selftests/bpf/progs/mptcp_sock.c4
-rw-r--r--tools/testing/selftests/bpf/progs/mptcpify.c4
-rw-r--r--tools/testing/selftests/bpf/progs/preempt_lock.c132
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg4_prog.c6
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg6_prog.c57
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c6
-rw-r--r--tools/testing/selftests/bpf/progs/skb_pkt_end.c2
-rw-r--r--tools/testing/selftests/bpf/progs/sock_addr_kern.c65
-rw-r--r--tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c16
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_forgotten_cb.c19
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_module.c36
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_nulled_out_cb.c22
-rw-r--r--tools/testing/selftests/bpf/progs/task_kfunc_common.h2
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_ca_incompl_cong_ops.c12
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_ca_kfunc.c121
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_ca_unsupp_cong_op.c2
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_ca_update.c18
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c20
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_rtt.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_access_variable_array.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_cookie.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func10.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_lwt_redirect.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_module_attach.c23
-rw-r--r--tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c31
-rw-r--r--tools/testing/selftests/bpf/progs/test_perf_skip.c15
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf_n.c47
-rw-r--r--tools/testing/selftests/bpf/progs/test_skmsg_load_helpers.c27
-rw-r--r--tools/testing/selftests/bpf/progs/test_sock_fields.c5
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_pass_prog.c17
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c13
-rw-r--r--tools/testing/selftests/bpf/progs/test_tunnel_kern.c47
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_noinline.c27
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_vlan.c2
-rw-r--r--tools/testing/selftests/bpf/progs/timer.c3
-rw-r--r--tools/testing/selftests/bpf/progs/timer_failure.c2
-rw-r--r--tools/testing/selftests/bpf/progs/timer_mim.c2
-rw-r--r--tools/testing/selftests/bpf/progs/timer_mim_reject.c2
-rw-r--r--tools/testing/selftests/bpf/progs/trigger_bench.c107
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_arena.c10
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_arena_large.c68
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bounds.c63
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_global_subprogs.c7
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_helper_restricted.c8
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c9
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_kfunc_prog_types.c122
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_sock_addr.c331
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_subprog_precision.c89
-rw-r--r--tools/testing/selftests/bpf/progs/wq.c180
-rw-r--r--tools/testing/selftests/bpf/progs/wq_failures.c144
-rw-r--r--tools/testing/selftests/bpf/test_cpp.cpp5
-rw-r--r--tools/testing/selftests/bpf/test_sock_addr.c1434
-rwxr-xr-xtools/testing/selftests/bpf/test_sock_addr.sh58
-rw-r--r--tools/testing/selftests/bpf/test_sockmap.c12
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_tunnel.sh13
-rw-r--r--tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c117
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.c16
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c109
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.h9
-rw-r--r--tools/testing/selftests/bpf/uprobe_multi.c2
-rw-r--r--tools/testing/selftests/bpf/veristat.c5
-rw-r--r--tools/testing/selftests/bpf/xdp_hw_metadata.c16
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.c123
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.h12
-rw-r--r--tools/testing/selftests/capabilities/test_execve.c12
-rw-r--r--tools/testing/selftests/capabilities/validate_cap.c7
-rw-r--r--tools/testing/selftests/cgroup/Makefile2
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.c8
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.h4
-rw-r--r--tools/testing/selftests/cgroup/test_core.c7
-rw-r--r--tools/testing/selftests/cgroup/test_cpu.c6
-rw-r--r--tools/testing/selftests/cgroup/test_cpuset.c2
-rwxr-xr-xtools/testing/selftests/cgroup/test_cpuset_v1_hp.sh46
-rw-r--r--tools/testing/selftests/cgroup/test_freezer.c2
-rw-r--r--tools/testing/selftests/cgroup/test_hugetlb_memcg.c2
-rw-r--r--tools/testing/selftests/cgroup/test_kill.c2
-rw-r--r--tools/testing/selftests/cgroup/test_kmem.c6
-rw-r--r--tools/testing/selftests/cgroup/test_memcontrol.c6
-rw-r--r--tools/testing/selftests/cgroup/test_zswap.c138
-rw-r--r--tools/testing/selftests/clone3/clone3.c7
-rw-r--r--tools/testing/selftests/clone3/clone3_clear_sighand.c2
-rw-r--r--tools/testing/selftests/clone3/clone3_set_tid.c121
-rw-r--r--tools/testing/selftests/core/close_range_test.c55
-rwxr-xr-xtools/testing/selftests/cpufreq/cpufreq.sh3
-rwxr-xr-xtools/testing/selftests/cpufreq/main.sh47
-rwxr-xr-xtools/testing/selftests/cpufreq/module.sh6
-rw-r--r--tools/testing/selftests/damon/Makefile13
-rw-r--r--tools/testing/selftests/damon/_damon_sysfs.py179
-rw-r--r--tools/testing/selftests/damon/damos_quota_goal.py77
-rw-r--r--tools/testing/selftests/dmabuf-heaps/config3
-rw-r--r--tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c247
-rw-r--r--tools/testing/selftests/drivers/net/Makefile11
-rw-r--r--tools/testing/selftests/drivers/net/README.rst136
-rw-r--r--tools/testing/selftests/drivers/net/config2
-rw-r--r--tools/testing/selftests/drivers/net/hw/Makefile28
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/csum.py122
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/devlink_port_split.py (renamed from tools/testing/selftests/net/devlink_port_split.py)0
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/ethtool.sh (renamed from tools/testing/selftests/net/forwarding/ethtool.sh)20
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/ethtool_extended_state.sh (renamed from tools/testing/selftests/net/forwarding/ethtool_extended_state.sh)5
-rw-r--r--tools/testing/selftests/drivers/net/hw/ethtool_lib.sh (renamed from tools/testing/selftests/net/forwarding/ethtool_lib.sh)0
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/ethtool_mm.sh (renamed from tools/testing/selftests/net/forwarding/ethtool_mm.sh)3
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/ethtool_rmon.sh (renamed from tools/testing/selftests/net/forwarding/ethtool_rmon.sh)8
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/hw_stats_l3.sh (renamed from tools/testing/selftests/net/forwarding/hw_stats_l3.sh)20
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/hw_stats_l3_gre.sh (renamed from tools/testing/selftests/net/forwarding/hw_stats_l3_gre.sh)8
-rw-r--r--tools/testing/selftests/drivers/net/hw/lib/py/__init__.py16
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/loopback.sh (renamed from tools/testing/selftests/net/forwarding/loopback.sh)5
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/pp_alloc_fail.py129
-rw-r--r--tools/testing/selftests/drivers/net/hw/settings1
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/__init__.py19
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/env.py224
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/load.py41
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/remote.py15
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/remote_netns.py21
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/remote_ssh.py39
-rwxr-xr-xtools/testing/selftests/drivers/net/microchip/ksz9477_qos.sh668
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/ethtool_lanes.sh14
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/mlxsw_lib.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh1
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh1
-rw-r--r--tools/testing/selftests/drivers/net/netdevsim/settings1
-rwxr-xr-xtools/testing/selftests/drivers/net/ping.py51
-rwxr-xr-xtools/testing/selftests/drivers/net/queues.py66
-rwxr-xr-xtools/testing/selftests/drivers/net/stats.py144
-rw-r--r--tools/testing/selftests/drivers/net/virtio_net/Makefile15
-rwxr-xr-xtools/testing/selftests/drivers/net/virtio_net/basic_features.sh131
-rw-r--r--tools/testing/selftests/drivers/net/virtio_net/config2
-rw-r--r--tools/testing/selftests/drivers/net/virtio_net/virtio_net_common.sh99
-rw-r--r--tools/testing/selftests/exec/Makefile4
-rwxr-xr-xtools/testing/selftests/exec/binfmt_script.py10
-rw-r--r--tools/testing/selftests/exec/execveat.c12
-rw-r--r--tools/testing/selftests/exec/load_address.c34
-rw-r--r--tools/testing/selftests/exec/recursion-depth.c53
-rw-r--r--tools/testing/selftests/filesystems/binderfs/Makefile2
-rw-r--r--tools/testing/selftests/filesystems/statmount/statmount_test.c1
-rwxr-xr-xtools/testing/selftests/ftrace/ftracetest8
-rwxr-xr-xtools/testing/selftests/ftrace/ftracetest-ktap2
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_btfarg.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/fprobe_args_vfs.tc41
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/fprobe_entry_arg.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc6
-rw-r--r--tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_vfs.tc40
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_entry_arg.tc2
-rw-r--r--tools/testing/selftests/hid/config.common1
-rw-r--r--tools/testing/selftests/hid/hid_bpf.c112
-rw-r--r--tools/testing/selftests/hid/progs/hid.c46
-rw-r--r--tools/testing/selftests/hid/progs/hid_bpf_helpers.h6
-rw-r--r--tools/testing/selftests/hid/tests/base.py92
-rw-r--r--tools/testing/selftests/hid/tests/base_device.py421
-rw-r--r--tools/testing/selftests/hid/tests/base_gamepad.py238
-rw-r--r--tools/testing/selftests/hid/tests/test_gamepad.py457
-rw-r--r--tools/testing/selftests/hid/tests/test_tablet.py723
-rw-r--r--tools/testing/selftests/iommu/config2
-rw-r--r--tools/testing/selftests/ipc/msgque.c11
-rw-r--r--tools/testing/selftests/kcmp/kcmp_test.c2
-rw-r--r--tools/testing/selftests/kselftest.h80
-rw-r--r--tools/testing/selftests/kselftest/ktap_helpers.sh4
-rwxr-xr-xtools/testing/selftests/kselftest_deps.sh1
-rw-r--r--tools/testing/selftests/kselftest_harness.h138
-rw-r--r--tools/testing/selftests/kvm/Makefile9
-rw-r--r--tools/testing/selftests/kvm/aarch64/arch_timer.c15
-rw-r--r--tools/testing/selftests/kvm/aarch64/page_fault_test.c5
-rw-r--r--tools/testing/selftests/kvm/aarch64/psci_test.c4
-rw-r--r--tools/testing/selftests/kvm/aarch64/set_id_regs.c123
-rw-r--r--tools/testing/selftests/kvm/aarch64/vgic_init.c50
-rw-r--r--tools/testing/selftests/kvm/aarch64/vgic_irq.c15
-rw-r--r--tools/testing/selftests/kvm/aarch64/vgic_lpi_stress.c410
-rw-r--r--tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c6
-rw-r--r--tools/testing/selftests/kvm/arch_timer.c4
-rw-r--r--tools/testing/selftests/kvm/demand_paging_test.c94
-rw-r--r--tools/testing/selftests/kvm/dirty_log_perf_test.c15
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c26
-rw-r--r--tools/testing/selftests/kvm/guest_memfd_test.c4
-rw-r--r--tools/testing/selftests/kvm/guest_print_test.c1
-rw-r--r--tools/testing/selftests/kvm/hardware_disable_test.c3
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/gic.h21
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/gic_v3.h586
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/gic_v3_its.h19
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/processor.h21
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/ucall.h2
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/vgic.h5
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h1111
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util_base.h1135
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util_types.h20
-rw-r--r--tools/testing/selftests/kvm/include/memstress.h1
-rw-r--r--tools/testing/selftests/kvm/include/riscv/processor.h49
-rw-r--r--tools/testing/selftests/kvm/include/riscv/sbi.h141
-rw-r--r--tools/testing/selftests/kvm/include/riscv/ucall.h1
-rw-r--r--tools/testing/selftests/kvm/include/s390x/ucall.h2
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h19
-rw-r--r--tools/testing/selftests/kvm/include/userfaultfd_util.h19
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/kvm_util_arch.h28
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h22
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/sev.h19
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/ucall.h2
-rw-r--r--tools/testing/selftests/kvm/kvm_binary_stats_test.c2
-rw-r--r--tools/testing/selftests/kvm/kvm_create_max_vcpus.c2
-rw-r--r--tools/testing/selftests/kvm/kvm_page_table_test.c4
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/gic.c18
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/gic_private.h4
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/gic_v3.c99
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/gic_v3_its.c248
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/processor.c2
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/vgic.c38
-rw-r--r--tools/testing/selftests/kvm/lib/assert.c3
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c21
-rw-r--r--tools/testing/selftests/kvm/lib/memstress.c13
-rw-r--r--tools/testing/selftests/kvm/lib/riscv/processor.c13
-rw-r--r--tools/testing/selftests/kvm/lib/test_util.c2
-rw-r--r--tools/testing/selftests/kvm/lib/ucall_common.c5
-rw-r--r--tools/testing/selftests/kvm/lib/userfaultfd_util.c156
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c316
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/sev.c45
-rw-r--r--tools/testing/selftests/kvm/max_guest_memory_test.c17
-rw-r--r--tools/testing/selftests/kvm/memslot_modification_stress_test.c3
-rw-r--r--tools/testing/selftests/kvm/riscv/arch_timer.c8
-rw-r--r--tools/testing/selftests/kvm/riscv/ebreak_test.c82
-rw-r--r--tools/testing/selftests/kvm/riscv/get-reg-list.c4
-rw-r--r--tools/testing/selftests/kvm/riscv/sbi_pmu_test.c681
-rw-r--r--tools/testing/selftests/kvm/rseq_test.c48
-rw-r--r--tools/testing/selftests/kvm/s390x/cmma_test.c3
-rw-r--r--tools/testing/selftests/kvm/s390x/memop.c1
-rw-r--r--tools/testing/selftests/kvm/s390x/sync_regs_test.c2
-rw-r--r--tools/testing/selftests/kvm/s390x/tprot.c1
-rw-r--r--tools/testing/selftests/kvm/set_memory_region_test.c23
-rw-r--r--tools/testing/selftests/kvm/steal_time.c53
-rw-r--r--tools/testing/selftests/kvm/x86_64/amx_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c1
-rw-r--r--tools/testing/selftests/kvm/x86_64/exit_on_emulation_failure_test.c5
-rw-r--r--tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/hwcr_msr_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_evmcs.c3
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_features.c6
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_ipi.c5
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c1
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_tlb_flush.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/kvm_pv_test.c42
-rw-r--r--tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c3
-rwxr-xr-xtools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh13
-rw-r--r--tools/testing/selftests/kvm/x86_64/platform_info_test.c61
-rw-r--r--tools/testing/selftests/kvm/x86_64/pmu_counters_test.c28
-rw-r--r--tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c9
-rw-r--r--tools/testing/selftests/kvm/x86_64/private_mem_conversions_test.c1
-rw-r--r--tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c1
-rw-r--r--tools/testing/selftests/kvm/x86_64/set_sregs_test.c1
-rw-r--r--tools/testing/selftests/kvm/x86_64/sev_init2_tests.c152
-rw-r--r--tools/testing/selftests/kvm/x86_64/sev_smoke_test.c96
-rw-r--r--tools/testing/selftests/kvm/x86_64/smaller_maxphyaddr_emulation_test.c6
-rw-r--r--tools/testing/selftests/kvm/x86_64/smm_test.c1
-rw-r--r--tools/testing/selftests/kvm/x86_64/state_test.c1
-rw-r--r--tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c3
-rw-r--r--tools/testing/selftests/kvm/x86_64/svm_nested_shutdown_test.c5
-rw-r--r--tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c5
-rw-r--r--tools/testing/selftests/kvm/x86_64/sync_regs_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/ucna_injection_test.c7
-rw-r--r--tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c15
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c63
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c3
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c1
-rw-r--r--tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86_64/xapic_state_test.c1
-rw-r--r--tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c3
-rw-r--r--tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c59
-rw-r--r--tools/testing/selftests/kvm/x86_64/xss_msr_test.c2
-rw-r--r--tools/testing/selftests/landlock/base_test.c2
-rw-r--r--tools/testing/selftests/landlock/fs_test.c570
-rw-r--r--tools/testing/selftests/lib.mk38
-rw-r--r--tools/testing/selftests/membarrier/membarrier_test_multi_thread.c2
-rw-r--r--tools/testing/selftests/membarrier/membarrier_test_single_thread.c2
-rw-r--r--tools/testing/selftests/memfd/fuse_test.c2
-rw-r--r--tools/testing/selftests/memfd/memfd_test.c2
-rw-r--r--tools/testing/selftests/mm/Makefile8
-rw-r--r--tools/testing/selftests/mm/compaction_test.c6
-rw-r--r--tools/testing/selftests/mm/cow.c108
-rw-r--r--tools/testing/selftests/mm/gup_longterm.c18
-rw-r--r--tools/testing/selftests/mm/gup_test.c6
-rw-r--r--tools/testing/selftests/mm/hugetlb_madv_vs_map.c16
-rw-r--r--tools/testing/selftests/mm/ksm_functional_tests.c137
-rw-r--r--tools/testing/selftests/mm/madv_populate.c2
-rw-r--r--tools/testing/selftests/mm/mdwe_test.c1
-rw-r--r--tools/testing/selftests/mm/memfd_secret.c51
-rw-r--r--tools/testing/selftests/mm/mkdirty.c2
-rw-r--r--tools/testing/selftests/mm/mlock2-tests.c15
-rw-r--r--tools/testing/selftests/mm/mremap_test.c204
-rw-r--r--tools/testing/selftests/mm/pagemap_ioctl.c4
-rw-r--r--tools/testing/selftests/mm/protection_keys.c34
-rwxr-xr-xtools/testing/selftests/mm/run_vmtests.sh15
-rw-r--r--tools/testing/selftests/mm/soft-dirty.c4
-rw-r--r--tools/testing/selftests/mm/split_huge_page_test.c4
-rw-r--r--tools/testing/selftests/mm/uffd-common.c3
-rw-r--r--tools/testing/selftests/mm/uffd-common.h2
-rw-r--r--tools/testing/selftests/mm/uffd-unit-tests.c13
-rw-r--r--tools/testing/selftests/mm/virtual_address_range.c78
-rw-r--r--tools/testing/selftests/mm/vm_util.h2
-rw-r--r--tools/testing/selftests/net/.gitignore3
-rw-r--r--tools/testing/selftests/net/Makefile58
-rw-r--r--tools/testing/selftests/net/af_unix/Makefile2
-rw-r--r--tools/testing/selftests/net/af_unix/scm_rights.c286
-rwxr-xr-xtools/testing/selftests/net/amt.sh12
-rw-r--r--tools/testing/selftests/net/bind_wildcard.c783
-rw-r--r--tools/testing/selftests/net/bpf.mk53
-rwxr-xr-xtools/testing/selftests/net/bpf_offload.py (renamed from tools/testing/selftests/bpf/test_offload.py)142
-rw-r--r--tools/testing/selftests/net/cmsg_sender.c52
-rwxr-xr-xtools/testing/selftests/net/cmsg_time.sh7
-rw-r--r--tools/testing/selftests/net/config1
-rw-r--r--tools/testing/selftests/net/epoll_busy_poll.c320
-rwxr-xr-xtools/testing/selftests/net/fib_rule_tests.sh46
-rw-r--r--tools/testing/selftests/net/forwarding/Makefile9
-rw-r--r--tools/testing/selftests/net/forwarding/README33
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_igmp.sh6
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_mld.sh6
-rw-r--r--tools/testing/selftests/net/forwarding/forwarding.config.sample53
-rw-r--r--tools/testing/selftests/net/forwarding/ipip_lib.sh1
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh383
-rwxr-xr-xtools/testing/selftests/net/forwarding/lib_sh_test.sh208
-rwxr-xr-xtools/testing/selftests/net/forwarding/local_termination.sh30
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_mpath_nh.sh35
-rw-r--r--tools/testing/selftests/net/forwarding/router_mpath_nh_lib.sh12
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_mpath_nh_res.sh35
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_nh.sh14
-rw-r--r--tools/testing/selftests/net/forwarding/sch_ets_tests.sh19
-rwxr-xr-xtools/testing/selftests/net/forwarding/sch_red.sh10
-rw-r--r--tools/testing/selftests/net/forwarding/sch_tbf_core.sh2
-rw-r--r--tools/testing/selftests/net/forwarding/tc_common.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_tunnel_key.sh2
-rw-r--r--tools/testing/selftests/net/gro.c141
-rw-r--r--tools/testing/selftests/net/hsr/Makefile3
-rw-r--r--tools/testing/selftests/net/hsr/hsr_common.sh84
-rwxr-xr-xtools/testing/selftests/net/hsr/hsr_ping.sh106
-rwxr-xr-xtools/testing/selftests/net/hsr/hsr_redbox.sh121
-rw-r--r--tools/testing/selftests/net/ip_local_port_range.c2
-rw-r--r--tools/testing/selftests/net/lib.sh70
-rw-r--r--tools/testing/selftests/net/lib/.gitignore2
-rw-r--r--tools/testing/selftests/net/lib/Makefile15
-rw-r--r--tools/testing/selftests/net/lib/csum.c (renamed from tools/testing/selftests/net/csum.c)18
-rw-r--r--tools/testing/selftests/net/lib/py/__init__.py8
-rw-r--r--tools/testing/selftests/net/lib/py/consts.py9
-rw-r--r--tools/testing/selftests/net/lib/py/ksft.py159
-rw-r--r--tools/testing/selftests/net/lib/py/netns.py31
-rw-r--r--tools/testing/selftests/net/lib/py/nsim.py134
-rw-r--r--tools/testing/selftests/net/lib/py/utils.py102
-rw-r--r--tools/testing/selftests/net/lib/py/ynl.py49
-rwxr-xr-xtools/testing/selftests/net/mptcp/diag.sh53
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh11
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh157
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_lib.sh135
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_sockopt.sh34
-rwxr-xr-xtools/testing/selftests/net/mptcp/pm_netlink.sh295
-rw-r--r--tools/testing/selftests/net/mptcp/pm_nl_ctl.c2
-rwxr-xr-xtools/testing/selftests/net/mptcp/simult_flows.sh20
-rw-r--r--tools/testing/selftests/net/nat6to4.bpf.c (renamed from tools/testing/selftests/net/nat6to4.c)0
-rw-r--r--tools/testing/selftests/net/netfilter/.gitignore (renamed from tools/testing/selftests/netfilter/.gitignore)4
-rw-r--r--tools/testing/selftests/net/netfilter/Makefile52
-rw-r--r--tools/testing/selftests/net/netfilter/audit_logread.c (renamed from tools/testing/selftests/netfilter/audit_logread.c)0
-rwxr-xr-xtools/testing/selftests/net/netfilter/br_netfilter.sh171
-rwxr-xr-xtools/testing/selftests/net/netfilter/bridge_brouter.sh122
-rw-r--r--tools/testing/selftests/net/netfilter/config89
-rw-r--r--tools/testing/selftests/net/netfilter/connect_close.c (renamed from tools/testing/selftests/netfilter/connect_close.c)0
-rw-r--r--tools/testing/selftests/net/netfilter/conntrack_dump_flush.c (renamed from tools/testing/selftests/netfilter/conntrack_dump_flush.c)10
-rwxr-xr-xtools/testing/selftests/net/netfilter/conntrack_icmp_related.sh (renamed from tools/testing/selftests/netfilter/conntrack_icmp_related.sh)179
-rwxr-xr-xtools/testing/selftests/net/netfilter/conntrack_ipip_mtu.sh (renamed from tools/testing/selftests/netfilter/ipip-conntrack-mtu.sh)118
-rwxr-xr-xtools/testing/selftests/net/netfilter/conntrack_sctp_collision.sh87
-rwxr-xr-xtools/testing/selftests/net/netfilter/conntrack_tcp_unreplied.sh164
-rwxr-xr-xtools/testing/selftests/net/netfilter/conntrack_vrf.sh (renamed from tools/testing/selftests/netfilter/conntrack_vrf.sh)121
-rwxr-xr-xtools/testing/selftests/net/netfilter/ipvs.sh211
-rw-r--r--tools/testing/selftests/net/netfilter/lib.sh10
-rwxr-xr-xtools/testing/selftests/net/netfilter/nf_conntrack_packetdrill.sh71
-rwxr-xr-xtools/testing/selftests/net/netfilter/nf_nat_edemux.sh97
-rw-r--r--tools/testing/selftests/net/netfilter/nf_queue.c (renamed from tools/testing/selftests/netfilter/nf-queue.c)0
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_audit.sh (renamed from tools/testing/selftests/netfilter/nft_audit.sh)31
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_concat_range.sh (renamed from tools/testing/selftests/netfilter/nft_concat_range.sh)213
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_concat_range_perf.sh9
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_conntrack_helper.sh171
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_fib.sh234
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_flowtable.sh (renamed from tools/testing/selftests/netfilter/nft_flowtable.sh)371
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_meta.sh (renamed from tools/testing/selftests/netfilter/nft_meta.sh)4
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_nat.sh (renamed from tools/testing/selftests/netfilter/nft_nat.sh)480
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_nat_zones.sh (renamed from tools/testing/selftests/netfilter/nft_nat_zones.sh)194
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_queue.sh417
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_synproxy.sh96
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_zones_many.sh (renamed from tools/testing/selftests/netfilter/nft_zones_many.sh)97
-rwxr-xr-xtools/testing/selftests/net/netfilter/packetdrill/common.sh33
-rw-r--r--tools/testing/selftests/net/netfilter/packetdrill/conntrack_ack_loss_stall.pkt118
-rw-r--r--tools/testing/selftests/net/netfilter/packetdrill/conntrack_inexact_rst.pkt62
-rw-r--r--tools/testing/selftests/net/netfilter/packetdrill/conntrack_rst_invalid.pkt59
-rw-r--r--tools/testing/selftests/net/netfilter/packetdrill/conntrack_syn_challenge_ack.pkt44
-rw-r--r--tools/testing/selftests/net/netfilter/packetdrill/conntrack_synack_old.pkt51
-rw-r--r--tools/testing/selftests/net/netfilter/packetdrill/conntrack_synack_reuse.pkt34
-rwxr-xr-xtools/testing/selftests/net/netfilter/rpath.sh (renamed from tools/testing/selftests/netfilter/rpath.sh)10
-rw-r--r--tools/testing/selftests/net/netfilter/sctp_collision.c (renamed from tools/testing/selftests/netfilter/sctp_collision.c)0
-rw-r--r--tools/testing/selftests/net/netfilter/settings1
-rwxr-xr-xtools/testing/selftests/net/netfilter/xt_string.sh (renamed from tools/testing/selftests/netfilter/xt_string.sh)89
-rwxr-xr-xtools/testing/selftests/net/nl_netdev.py98
-rw-r--r--tools/testing/selftests/net/openvswitch/ovs-dpctl.py16
-rw-r--r--tools/testing/selftests/net/reuseaddr_conflict.c2
-rw-r--r--tools/testing/selftests/net/sample_map_ret0.bpf.c (renamed from tools/testing/selftests/bpf/progs/sample_map_ret0.c)2
-rw-r--r--tools/testing/selftests/net/sample_ret0.bpf.c (renamed from tools/testing/selftests/bpf/progs/sample_ret0.c)3
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/proc.c2
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/setup.c12
-rw-r--r--tools/testing/selftests/net/tcp_ao/rst.c23
-rw-r--r--tools/testing/selftests/net/tcp_ao/setsockopt-closed.c2
-rwxr-xr-xtools/testing/selftests/net/test_bridge_neigh_suppress.sh14
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_mdb.sh205
-rw-r--r--tools/testing/selftests/net/tls.c34
-rwxr-xr-xtools/testing/selftests/net/udpgro.sh2
-rwxr-xr-xtools/testing/selftests/net/udpgro_bench.sh2
-rwxr-xr-xtools/testing/selftests/net/udpgro_frglist.sh8
-rwxr-xr-xtools/testing/selftests/net/udpgro_fwd.sh12
-rw-r--r--tools/testing/selftests/net/udpgso.c2
-rwxr-xr-xtools/testing/selftests/net/veth.sh2
-rw-r--r--tools/testing/selftests/net/xdp_dummy.bpf.c (renamed from tools/testing/selftests/net/xdp_dummy.c)0
-rwxr-xr-xtools/testing/selftests/net/xfrm_policy.sh4
-rw-r--r--tools/testing/selftests/netfilter/Makefile21
-rwxr-xr-xtools/testing/selftests/netfilter/bridge_brouter.sh146
-rw-r--r--tools/testing/selftests/netfilter/bridge_netfilter.sh188
-rw-r--r--tools/testing/selftests/netfilter/config9
-rwxr-xr-xtools/testing/selftests/netfilter/conntrack_sctp_collision.sh89
-rwxr-xr-xtools/testing/selftests/netfilter/conntrack_tcp_unreplied.sh167
-rwxr-xr-xtools/testing/selftests/netfilter/ipvs.sh228
-rwxr-xr-xtools/testing/selftests/netfilter/nf_nat_edemux.sh127
-rwxr-xr-xtools/testing/selftests/netfilter/nft_conntrack_helper.sh197
-rwxr-xr-xtools/testing/selftests/netfilter/nft_fib.sh273
-rwxr-xr-xtools/testing/selftests/netfilter/nft_queue.sh449
-rwxr-xr-xtools/testing/selftests/netfilter/nft_synproxy.sh117
-rwxr-xr-xtools/testing/selftests/netfilter/nft_trans_stress.sh151
-rw-r--r--tools/testing/selftests/netfilter/settings1
-rw-r--r--tools/testing/selftests/nolibc/nolibc-test.c82
-rw-r--r--tools/testing/selftests/perf_events/.gitignore1
-rw-r--r--tools/testing/selftests/perf_events/Makefile2
-rw-r--r--tools/testing/selftests/perf_events/watermark_signal.c146
-rw-r--r--tools/testing/selftests/pidfd/config2
-rw-r--r--tools/testing/selftests/pidfd/pidfd_fdinfo_test.c2
-rw-r--r--tools/testing/selftests/pidfd/pidfd_open_test.c4
-rw-r--r--tools/testing/selftests/pidfd/pidfd_poll_test.c2
-rw-r--r--tools/testing/selftests/pidfd/pidfd_setns_test.c2
-rw-r--r--tools/testing/selftests/pidfd/pidfd_test.c2
-rwxr-xr-xtools/testing/selftests/power_supply/test_power_supply_properties.sh2
-rw-r--r--tools/testing/selftests/powerpc/Makefile11
-rw-r--r--tools/testing/selftests/powerpc/alignment/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/Makefile5
-rw-r--r--tools/testing/selftests/powerpc/cache_shape/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/copyloops/Makefile21
-rw-r--r--tools/testing/selftests/powerpc/dexcr/.gitignore2
-rw-r--r--tools/testing/selftests/powerpc/dexcr/Makefile9
-rw-r--r--tools/testing/selftests/powerpc/dexcr/chdexcr.c112
-rw-r--r--tools/testing/selftests/powerpc/dexcr/dexcr.c40
-rw-r--r--tools/testing/selftests/powerpc/dexcr/dexcr.h57
-rw-r--r--tools/testing/selftests/powerpc/dexcr/dexcr_test.c215
-rw-r--r--tools/testing/selftests/powerpc/dexcr/hashchk_test.c8
-rw-r--r--tools/testing/selftests/powerpc/dexcr/lsdexcr.c103
-rw-r--r--tools/testing/selftests/powerpc/dscr/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/eeh/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/flags.mk12
-rw-r--r--tools/testing/selftests/powerpc/math/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/mce/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/mm/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/Makefile5
-rw-r--r--tools/testing/selftests/powerpc/papr_attributes/Makefile3
-rw-r--r--tools/testing/selftests/powerpc/papr_sysparm/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/papr_vpd/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/papr_vpd/papr_vpd.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/Makefile44
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/Makefile21
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/Makefile5
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/Makefile5
-rw-r--r--tools/testing/selftests/powerpc/primitives/Makefile5
-rw-r--r--tools/testing/selftests/powerpc/ptrace/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/security/Makefile5
-rw-r--r--tools/testing/selftests/powerpc/signal/Makefile4
-rw-r--r--tools/testing/selftests/powerpc/stringloops/Makefile11
-rw-r--r--tools/testing/selftests/powerpc/switch_endian/Makefile5
-rw-r--r--tools/testing/selftests/powerpc/syscalls/Makefile5
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/vphn/Makefile5
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/torture.sh6
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE095
-rw-r--r--tools/testing/selftests/resctrl/Makefile4
-rw-r--r--tools/testing/selftests/resctrl/cat_test.c8
-rw-r--r--tools/testing/selftests/resctrl/cmt_test.c8
-rw-r--r--tools/testing/selftests/resctrl/mba_test.c10
-rw-r--r--tools/testing/selftests/resctrl/mbm_test.c10
-rw-r--r--tools/testing/selftests/resctrl/resctrl.h9
-rw-r--r--tools/testing/selftests/resctrl/resctrl_tests.c26
-rw-r--r--tools/testing/selftests/resctrl/resctrl_val.c8
-rw-r--r--tools/testing/selftests/ring-buffer/.gitignore1
-rw-r--r--tools/testing/selftests/ring-buffer/Makefile8
-rw-r--r--tools/testing/selftests/ring-buffer/config2
-rw-r--r--tools/testing/selftests/ring-buffer/map_test.c294
-rw-r--r--tools/testing/selftests/riscv/hwprobe/cbo.c2
-rw-r--r--tools/testing/selftests/riscv/hwprobe/hwprobe.h10
-rw-r--r--tools/testing/selftests/seccomp/settings2
-rw-r--r--tools/testing/selftests/sync/sync_test.c3
-rw-r--r--tools/testing/selftests/syscall_user_dispatch/sud_test.c14
-rw-r--r--tools/testing/selftests/timers/adjtick.c4
-rw-r--r--tools/testing/selftests/timers/alarmtimer-suspend.c4
-rw-r--r--tools/testing/selftests/timers/change_skew.c4
-rw-r--r--tools/testing/selftests/timers/freq-step.c4
-rw-r--r--tools/testing/selftests/timers/leap-a-day.c10
-rw-r--r--tools/testing/selftests/timers/leapcrash.c4
-rw-r--r--tools/testing/selftests/timers/mqueue-lat.c4
-rw-r--r--tools/testing/selftests/timers/posix_timers.c117
-rw-r--r--tools/testing/selftests/timers/raw_skew.c6
-rw-r--r--tools/testing/selftests/timers/set-2038.c4
-rw-r--r--tools/testing/selftests/timers/set-tai.c4
-rw-r--r--tools/testing/selftests/timers/set-timer-lat.c4
-rw-r--r--tools/testing/selftests/timers/set-tz.c4
-rw-r--r--tools/testing/selftests/timers/skew_consistency.c4
-rw-r--r--tools/testing/selftests/timers/threadtest.c2
-rw-r--r--tools/testing/selftests/timers/valid-adjtimex.c79
-rw-r--r--tools/testing/selftests/tty/tty_tstamp_update.c48
-rwxr-xr-xtools/testing/selftests/turbostat/defcolumns.py60
-rw-r--r--tools/testing/selftests/user_events/ftrace_test.c8
-rw-r--r--tools/testing/selftests/vDSO/vdso_config.h6
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_getcpu.c16
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_gettimeofday.c26
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/riscv32.config2
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/riscv64.config2
-rw-r--r--tools/testing/selftests/wireguard/qemu/kernel.config1
-rw-r--r--tools/testing/selftests/x86/amx.c27
-rw-r--r--tools/testing/selftests/x86/lam.c4
-rw-r--r--tools/testing/selftests/x86/test_mremap_vdso.c43
-rw-r--r--tools/testing/selftests/x86/test_shadow_stack.c67
-rw-r--r--tools/testing/selftests/x86/test_vsyscall.c506
-rw-r--r--tools/tracing/rtla/Makefile.config2
-rw-r--r--tools/tracing/rtla/src/osnoise_hist.c55
-rw-r--r--tools/tracing/rtla/src/osnoise_top.c55
-rw-r--r--tools/tracing/rtla/src/timerlat_aa.c109
-rw-r--r--tools/tracing/rtla/src/timerlat_hist.c294
-rw-r--r--tools/tracing/rtla/src/timerlat_top.c250
-rw-r--r--tools/tracing/rtla/src/trace.c15
-rw-r--r--tools/tracing/rtla/src/trace.h1
-rw-r--r--tools/workqueue/wq_monitor.py9
-rw-r--r--tools/writeback/wb_monitor.py172
-rw-r--r--usr/Makefile2
-rw-r--r--usr/include/Makefile2
-rw-r--r--virt/kvm/kvm_main.c116
-rw-r--r--virt/kvm/kvm_mm.h6
-rw-r--r--virt/kvm/pfncache.c50
-rw-r--r--virt/kvm/vfio.c2
11490 files changed, 466435 insertions, 223784 deletions
diff --git a/.mailmap b/.mailmap
index 2216b5d5c84e..df56e43cb57f 100644
--- a/.mailmap
+++ b/.mailmap
@@ -20,6 +20,7 @@ Adam Oldham <oldhamca@gmail.com>
Adam Radford <aradford@gmail.com>
Adriana Reus <adi.reus@gmail.com> <adriana.reus@intel.com>
Adrian Bunk <bunk@stusta.de>
+Ajay Kaher <ajay.kaher@broadcom.com> <akaher@vmware.com>
Akhil P Oommen <quic_akhilpo@quicinc.com> <akhilpo@codeaurora.org>
Alan Cox <alan@lxorguk.ukuu.org.uk>
Alan Cox <root@hraefn.swansea.linux.org.uk>
@@ -36,6 +37,17 @@ Alexei Avshalom Lazar <quic_ailizaro@quicinc.com> <ailizaro@codeaurora.org>
Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com>
Alexei Starovoitov <ast@kernel.org> <ast@fb.com>
Alexei Starovoitov <ast@kernel.org> <ast@plumgrid.com>
+Alexey Makhalov <alexey.amakhalov@broadcom.com> <amakhalov@vmware.com>
+Alex Elder <elder@kernel.org>
+Alex Elder <elder@kernel.org> <aelder@sgi.com>
+Alex Elder <elder@kernel.org> <alex.elder@linaro.org>
+Alex Elder <elder@kernel.org> <alex.elder@linary.org>
+Alex Elder <elder@kernel.org> <elder@dreamhost.com>
+Alex Elder <elder@kernel.org> <elder@dreawmhost.com>
+Alex Elder <elder@kernel.org> <elder@ieee.org>
+Alex Elder <elder@kernel.org> <elder@inktank.com>
+Alex Elder <elder@kernel.org> <elder@linaro.org>
+Alex Elder <elder@kernel.org> <elder@newdream.net>
Alex Hung <alexhung@gmail.com> <alex.hung@canonical.com>
Alex Shi <alexs@kernel.org> <alex.shi@intel.com>
Alex Shi <alexs@kernel.org> <alex.shi@linaro.org>
@@ -85,6 +97,11 @@ Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@linaro.org>
Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@spreadtrum.com>
Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@unisoc.com>
Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang7@gmail.com>
+Barry Song <baohua@kernel.org> <21cnbao@gmail.com>
+Barry Song <baohua@kernel.org> <v-songbaohua@oppo.com>
+Barry Song <baohua@kernel.org> <song.bao.hua@hisilicon.com>
+Barry Song <baohua@kernel.org> <Baohua.Song@csr.com>
+Barry Song <baohua@kernel.org> <barry.song@analog.com>
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com>
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
Bartosz Golaszewski <brgl@bgdev.pl> <bgolaszewski@baylibre.com>
@@ -96,6 +113,8 @@ Ben Widawsky <bwidawsk@kernel.org> <ben@bwidawsk.net>
Ben Widawsky <bwidawsk@kernel.org> <ben.widawsky@intel.com>
Ben Widawsky <bwidawsk@kernel.org> <benjamin.widawsky@intel.com>
Benjamin Poirier <benjamin.poirier@gmail.com> <bpoirier@suse.de>
+Benjamin Tissoires <bentiss@kernel.org> <benjamin.tissoires@gmail.com>
+Benjamin Tissoires <bentiss@kernel.org> <benjamin.tissoires@redhat.com>
Bjorn Andersson <andersson@kernel.org> <bjorn@kryo.se>
Bjorn Andersson <andersson@kernel.org> <bjorn.andersson@linaro.org>
Bjorn Andersson <andersson@kernel.org> <bjorn.andersson@sonymobile.com>
@@ -110,9 +129,11 @@ Brendan Higgins <brendan.higgins@linux.dev> <brendanhiggins@google.com>
Brian Avery <b.avery@hp.com>
Brian King <brking@us.ibm.com>
Brian Silverman <bsilver16384@gmail.com> <brian.silverman@bluerivertech.com>
+Bryan Tan <bryan-bt.tan@broadcom.com> <bryantan@vmware.com>
Cai Huoqing <cai.huoqing@linux.dev> <caihuoqing@baidu.com>
Can Guo <quic_cang@quicinc.com> <cang@codeaurora.org>
Carl Huang <quic_cjhuang@quicinc.com> <cjhuang@codeaurora.org>
+Carlos Bilbao <carlos.bilbao.osdev@gmail.com> <carlos.bilbao@amd.com>
Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
Chao Yu <chao@kernel.org> <chao2.yu@samsung.com>
@@ -289,6 +310,7 @@ Johan Hovold <johan@kernel.org> <jhovold@gmail.com>
Johan Hovold <johan@kernel.org> <johan@hovoldconsulting.com>
John Crispin <john@phrozen.org> <blogic@openwrt.org>
John Fastabend <john.fastabend@gmail.com> <john.r.fastabend@intel.com>
+John Garry <john.g.garry@oracle.com> <john.garry@huawei.com>
John Keeping <john@keeping.me.uk> <john@metanate.com>
John Moon <john@jmoon.dev> <quic_johmoo@quicinc.com>
John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
@@ -340,7 +362,8 @@ Lee Jones <lee@kernel.org> <joneslee@google.com>
Lee Jones <lee@kernel.org> <lee.jones@canonical.com>
Lee Jones <lee@kernel.org> <lee.jones@linaro.org>
Lee Jones <lee@kernel.org> <lee@ubuntu.com>
-Leonard Crestez <leonard.crestez@nxp.com> Leonard Crestez <cdleonard@gmail.com>
+Leonard Crestez <cdleonard@gmail.com> <leonard.crestez@nxp.com>
+Leonard Crestez <cdleonard@gmail.com> <leonard.crestez@intel.com>
Leonardo Bras <leobras.c@gmail.com> <leonardo@linux.ibm.com>
Leonard Göhrs <l.goehrs@pengutronix.de>
Leonid I Ananiev <leonid.i.ananiev@intel.com>
@@ -442,9 +465,11 @@ Mythri P K <mythripk@ti.com>
Nadav Amit <nadav.amit@gmail.com> <namit@vmware.com>
Nadav Amit <nadav.amit@gmail.com> <namit@cs.technion.ac.il>
Nadia Yvette Chambers <nyc@holomorphy.com> William Lee Irwin III <wli@holomorphy.com>
-Naoya Horiguchi <naoya.horiguchi@nec.com> <n-horiguchi@ah.jp.nec.com>
+Naoya Horiguchi <nao.horiguchi@gmail.com> <n-horiguchi@ah.jp.nec.com>
+Naoya Horiguchi <nao.horiguchi@gmail.com> <naoya.horiguchi@nec.com>
Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
-Neeraj Upadhyay <quic_neeraju@quicinc.com> <neeraju@codeaurora.org>
+Neeraj Upadhyay <neeraj.upadhyay@kernel.org> <quic_neeraju@quicinc.com>
+Neeraj Upadhyay <neeraj.upadhyay@kernel.org> <neeraju@codeaurora.org>
Neil Armstrong <neil.armstrong@linaro.org> <narmstrong@baylibre.com>
Nguyen Anh Quynh <aquynh@gmail.com>
Nicholas Piggin <npiggin@gmail.com> <npiggen@suse.de>
@@ -495,9 +520,11 @@ Praveen BP <praveenbp@ti.com>
Pradeep Kumar Chitrapu <quic_pradeepc@quicinc.com> <pradeepc@codeaurora.org>
Prasad Sodagudi <quic_psodagud@quicinc.com> <psodagud@codeaurora.org>
Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
+Puranjay Mohan <puranjay@kernel.org> <puranjay12@gmail.com>
Qais Yousef <qyousef@layalina.io> <qais.yousef@imgtec.com>
Qais Yousef <qyousef@layalina.io> <qais.yousef@arm.com>
-Quentin Monnet <quentin@isovalent.com> <quentin.monnet@netronome.com>
+Quentin Monnet <qmo@kernel.org> <quentin.monnet@netronome.com>
+Quentin Monnet <qmo@kernel.org> <quentin@isovalent.com>
Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com>
Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
Rajeev Nandan <quic_rajeevny@quicinc.com> <rajeevny@codeaurora.org>
@@ -519,6 +546,7 @@ Rémi Denis-Courmont <rdenis@simphalempin.com>
Ricardo Ribalda <ribalda@kernel.org> <ricardo@ribalda.com>
Ricardo Ribalda <ribalda@kernel.org> Ricardo Ribalda Delgado <ribalda@kernel.org>
Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com>
+Richard Genoud <richard.genoud@bootlin.com> <richard.genoud@gmail.com>
Richard Leitner <richard.leitner@linux.dev> <dev@g0hl1n.net>
Richard Leitner <richard.leitner@linux.dev> <me@g0hl1n.net>
Richard Leitner <richard.leitner@linux.dev> <richard.leitner@skidata.com>
@@ -527,6 +555,7 @@ Rocky Liao <quic_rjliao@quicinc.com> <rjliao@codeaurora.org>
Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru>
+Ronak Doshi <ronak.doshi@broadcom.com> <doshir@vmware.com>
Muchun Song <muchun.song@linux.dev> <songmuchun@bytedance.com>
Muchun Song <muchun.song@linux.dev> <smuchun@gmail.com>
Ross Zwisler <zwisler@kernel.org> <ross.zwisler@linux.intel.com>
@@ -649,6 +678,7 @@ Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.org>
Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.com>
+Vishnu Dasa <vishnu.dasa@broadcom.com> <vdasa@vmware.com>
Vivek Aknurwar <quic_viveka@quicinc.com> <viveka@codeaurora.org>
Vivien Didelot <vivien.didelot@gmail.com> <vivien.didelot@savoirfairelinux.com>
Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
diff --git a/CREDITS b/CREDITS
index c55c5a0ee4ff..0107047f807b 100644
--- a/CREDITS
+++ b/CREDITS
@@ -3146,6 +3146,10 @@ S: Triftstra=DFe 55
S: 13353 Berlin
S: Germany
+N: Gustavo Pimental
+E: gustavo.pimentel@synopsys.com
+D: PCI driver for Synopsys DesignWare
+
N: Emanuel Pirker
E: epirker@edu.uni-klu.ac.at
D: AIC5800 IEEE 1394, RAW I/O on 1394
diff --git a/Documentation/ABI/removed/sysfs-firmware-efi-vars b/Documentation/ABI/removed/sysfs-firmware-efi-vars
new file mode 100644
index 000000000000..8d97368b149b
--- /dev/null
+++ b/Documentation/ABI/removed/sysfs-firmware-efi-vars
@@ -0,0 +1,12 @@
+What: /sys/firmware/efi/vars
+Date: April 2004, removed March 2023
+Description:
+ This directory exposed interfaces for interacting with
+ EFI variables. For more information on EFI variables,
+ see 'Variable Services' in the UEFI specification
+ (section 7.2 in specification version 2.3 Errata D).
+
+ The 'efivars' sysfs interface was removed in March of 2023,
+ after being considered deprecated no later than September
+ of 2020. Its functionality has been replaced by the
+ 'efivarfs' filesystem.
diff --git a/Documentation/ABI/stable/sysfs-block b/Documentation/ABI/stable/sysfs-block
index 1fe9a553c37b..831f19a32e08 100644
--- a/Documentation/ABI/stable/sysfs-block
+++ b/Documentation/ABI/stable/sysfs-block
@@ -101,6 +101,16 @@ Description:
devices that support receiving integrity metadata.
+What: /sys/block/<disk>/partscan
+Date: May 2024
+Contact: Christoph Hellwig <hch@lst.de>
+Description:
+ The /sys/block/<disk>/partscan files reports if partition
+ scanning is enabled for the disk. It returns "1" if partition
+ scanning is enabled, or "0" if not. The value type is a 32-bit
+ unsigned integer, but only "0" and "1" are valid values.
+
+
What: /sys/block/<disk>/<partition>/alignment_offset
Date: April 2009
Contact: Martin K. Petersen <martin.petersen@oracle.com>
@@ -584,18 +594,6 @@ Description:
the data. If no such restriction exists, this file will contain
'0'. This file is writable for testing purposes.
-
-What: /sys/block/<disk>/queue/throttle_sample_time
-Date: March 2017
-Contact: linux-block@vger.kernel.org
-Description:
- [RW] This is the time window that blk-throttle samples data, in
- millisecond. blk-throttle makes decision based on the
- samplings. Lower time means cgroups have more smooth throughput,
- but higher CPU overhead. This exists only when
- CONFIG_BLK_DEV_THROTTLING_LOW is enabled.
-
-
What: /sys/block/<disk>/queue/virt_boundary_mask
Date: April 2021
Contact: linux-block@vger.kernel.org
diff --git a/Documentation/ABI/stable/sysfs-firmware-efi-vars b/Documentation/ABI/stable/sysfs-firmware-efi-vars
deleted file mode 100644
index 46ccd233e359..000000000000
--- a/Documentation/ABI/stable/sysfs-firmware-efi-vars
+++ /dev/null
@@ -1,79 +0,0 @@
-What: /sys/firmware/efi/vars
-Date: April 2004
-Contact: Matt Domsch <Matt_Domsch@dell.com>
-Description:
- This directory exposes interfaces for interactive with
- EFI variables. For more information on EFI variables,
- see 'Variable Services' in the UEFI specification
- (section 7.2 in specification version 2.3 Errata D).
-
- In summary, EFI variables are named, and are classified
- into separate namespaces through the use of a vendor
- GUID. They also have an arbitrary binary value
- associated with them.
-
- The efivars module enumerates these variables and
- creates a separate directory for each one found. Each
- directory has a name of the form "<key>-<vendor guid>"
- and contains the following files:
-
- =============== ========================================
- attributes: A read-only text file enumerating the
- EFI variable flags. Potential values
- include:
-
- EFI_VARIABLE_NON_VOLATILE
- EFI_VARIABLE_BOOTSERVICE_ACCESS
- EFI_VARIABLE_RUNTIME_ACCESS
- EFI_VARIABLE_HARDWARE_ERROR_RECORD
- EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS
-
- See the EFI documentation for an
- explanation of each of these variables.
-
- data: A read-only binary file that can be read
- to attain the value of the EFI variable
-
- guid: The vendor GUID of the variable. This
- should always match the GUID in the
- variable's name.
-
- raw_var: A binary file that can be read to obtain
- a structure that contains everything
- there is to know about the variable.
- For structure definition see "struct
- efi_variable" in the kernel sources.
-
- This file can also be written to in
- order to update the value of a variable.
- For this to work however, all fields of
- the "struct efi_variable" passed must
- match byte for byte with the structure
- read out of the file, save for the value
- portion.
-
- **Note** the efi_variable structure
- read/written with this file contains a
- 'long' type that may change widths
- depending on your underlying
- architecture.
-
- size: As ASCII representation of the size of
- the variable's value.
- =============== ========================================
-
-
- In addition, two other magic binary files are provided
- in the top-level directory and are used for adding and
- removing variables:
-
- =============== ========================================
- new_var: Takes a "struct efi_variable" and
- instructs the EFI firmware to create a
- new variable.
-
- del_var: Takes a "struct efi_variable" and
- instructs the EFI firmware to remove any
- variable that has a matching vendor GUID
- and variable key name.
- =============== ========================================
diff --git a/Documentation/ABI/testing/debugfs-msi-wmi-platform b/Documentation/ABI/testing/debugfs-msi-wmi-platform
new file mode 100644
index 000000000000..71f9992168d8
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-msi-wmi-platform
@@ -0,0 +1,14 @@
+What: /sys/kernel/debug/msi-wmi-platform-<wmi_device_name>/*
+Date: April 2024
+KernelVersion: 6.10
+Contact: Armin Wolf <W_Armin@gmx.de>
+Description:
+ This file allows to execute the associated WMI method with the same name.
+
+ To start the execution, write a buffer containing the method arguments
+ at file offset 0. Partial writes or writes at a different offset are not
+ supported.
+
+ The buffer returned by the WMI method can then be read from the file.
+
+ See Documentation/wmi/devices/msi-wmi-platform.rst for details.
diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-events b/Documentation/ABI/testing/sysfs-bus-event_source-devices-events
index 77de58d03822..e7efeab2ee83 100644
--- a/Documentation/ABI/testing/sysfs-bus-event_source-devices-events
+++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-events
@@ -37,6 +37,12 @@ Description: Per-pmu performance monitoring events specific to the running syste
performance monitoring event supported by the <pmu>. The name
of the file is the name of the event.
+ As performance monitoring event names are case
+ insensitive in the perf tool, the perf tool only looks
+ for lower or upper case event names in sysfs to avoid
+ scanning the directory. It is therefore required the
+ name of the event here is either lower or upper case.
+
File contents:
<term>[=<value>][,<term>[=<value>]]...
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 710d47be11e0..e7e160954e79 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -423,7 +423,7 @@ What: /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats
/sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/occ_reset
Date: March 2016
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
- Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+ Linux for PowerPC mailing list <linuxppc-dev@lists.ozlabs.org>
Description: POWERNV CPUFreq driver's frequency throttle stats directory and
attributes
@@ -473,7 +473,7 @@ What: /sys/devices/system/cpu/cpufreq/policyX/throttle_stats
/sys/devices/system/cpu/cpufreq/policyX/throttle_stats/occ_reset
Date: March 2016
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
- Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+ Linux for PowerPC mailing list <linuxppc-dev@lists.ozlabs.org>
Description: POWERNV CPUFreq driver's frequency throttle stats directory and
attributes
@@ -608,7 +608,7 @@ Description: Umwait control
What: /sys/devices/system/cpu/svm
Date: August 2019
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
- Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+ Linux for PowerPC mailing list <linuxppc-dev@lists.ozlabs.org>
Description: Secure Virtual Machine
If 1, it means the system is using the Protected Execution
@@ -617,7 +617,7 @@ Description: Secure Virtual Machine
What: /sys/devices/system/cpu/cpuX/purr
Date: Apr 2005
-Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+Contact: Linux for PowerPC mailing list <linuxppc-dev@lists.ozlabs.org>
Description: PURR ticks for this CPU since the system boot.
The Processor Utilization Resources Register (PURR) is
@@ -628,7 +628,7 @@ Description: PURR ticks for this CPU since the system boot.
What: /sys/devices/system/cpu/cpuX/spurr
Date: Dec 2006
-Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+Contact: Linux for PowerPC mailing list <linuxppc-dev@lists.ozlabs.org>
Description: SPURR ticks for this CPU since the system boot.
The Scaled Processor Utilization Resources Register
@@ -640,7 +640,7 @@ Description: SPURR ticks for this CPU since the system boot.
What: /sys/devices/system/cpu/cpuX/idle_purr
Date: Apr 2020
-Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+Contact: Linux for PowerPC mailing list <linuxppc-dev@lists.ozlabs.org>
Description: PURR ticks for cpuX when it was idle.
This sysfs interface exposes the number of PURR ticks
@@ -648,7 +648,7 @@ Description: PURR ticks for cpuX when it was idle.
What: /sys/devices/system/cpu/cpuX/idle_spurr
Date: Apr 2020
-Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+Contact: Linux for PowerPC mailing list <linuxppc-dev@lists.ozlabs.org>
Description: SPURR ticks for cpuX when it was idle.
This sysfs interface exposes the number of SPURR ticks
diff --git a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
index 023fd82de3f7..d792a56f59ac 100644
--- a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
+++ b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
@@ -10,7 +10,7 @@ Description: RW. Card reactive sustained (PL1) power limit in microwatts.
power limit is disabled, writing 0 disables the
limit. Writing values > 0 and <= TDP will enable the power limit.
- Only supported for particular Intel xe graphics platforms.
+ Only supported for particular Intel Xe graphics platforms.
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_rated_max
Date: September 2023
@@ -18,53 +18,93 @@ KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org
Description: RO. Card default power limit (default TDP setting).
- Only supported for particular Intel xe graphics platforms.
+ Only supported for particular Intel Xe graphics platforms.
-What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_crit
+
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/energy1_input
Date: September 2023
KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org
-Description: RW. Card reactive critical (I1) power limit in microwatts.
+Description: RO. Card energy input of device in microjoules.
+
+ Only supported for particular Intel Xe graphics platforms.
+
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_max_interval
+Date: October 2023
+KernelVersion: 6.6
+Contact: intel-xe@lists.freedesktop.org
+Description: RW. Card sustained power limit interval (Tau in PL1/Tau) in
+ milliseconds over which sustained power is averaged.
+
+ Only supported for particular Intel Xe graphics platforms.
+
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power2_max
+Date: February 2024
+KernelVersion: 6.8
+Contact: intel-xe@lists.freedesktop.org
+Description: RW. Package reactive sustained (PL1) power limit in microwatts.
+
+ The power controller will throttle the operating frequency
+ if the power averaged over a window (typically seconds)
+ exceeds this limit. A read value of 0 means that the PL1
+ power limit is disabled, writing 0 disables the
+ limit. Writing values > 0 and <= TDP will enable the power limit.
+
+ Only supported for particular Intel Xe graphics platforms.
+
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power2_rated_max
+Date: February 2024
+KernelVersion: 6.8
+Contact: intel-xe@lists.freedesktop.org
+Description: RO. Package default power limit (default TDP setting).
- Card reactive critical (I1) power limit in microwatts is exposed
+ Only supported for particular Intel Xe graphics platforms.
+
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power2_crit
+Date: February 2024
+KernelVersion: 6.8
+Contact: intel-xe@lists.freedesktop.org
+Description: RW. Package reactive critical (I1) power limit in microwatts.
+
+ Package reactive critical (I1) power limit in microwatts is exposed
for client products. The power controller will throttle the
operating frequency if the power averaged over a window exceeds
this limit.
- Only supported for particular Intel xe graphics platforms.
+ Only supported for particular Intel Xe graphics platforms.
-What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/curr1_crit
-Date: September 2023
-KernelVersion: 6.5
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/curr2_crit
+Date: February 2024
+KernelVersion: 6.8
Contact: intel-xe@lists.freedesktop.org
-Description: RW. Card reactive critical (I1) power limit in milliamperes.
+Description: RW. Package reactive critical (I1) power limit in milliamperes.
- Card reactive critical (I1) power limit in milliamperes is
+ Package reactive critical (I1) power limit in milliamperes is
exposed for server products. The power controller will throttle
the operating frequency if the power averaged over a window
exceeds this limit.
-What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/in0_input
-Date: September 2023
-KernelVersion: 6.5
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/energy2_input
+Date: February 2024
+KernelVersion: 6.8
Contact: intel-xe@lists.freedesktop.org
-Description: RO. Current Voltage in millivolt.
+Description: RO. Package energy input of device in microjoules.
- Only supported for particular Intel xe graphics platforms.
+ Only supported for particular Intel Xe graphics platforms.
-What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/energy1_input
-Date: September 2023
-KernelVersion: 6.5
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power2_max_interval
+Date: February 2024
+KernelVersion: 6.8
Contact: intel-xe@lists.freedesktop.org
-Description: RO. Energy input of device in microjoules.
+Description: RW. Package sustained power limit interval (Tau in PL1/Tau) in
+ milliseconds over which sustained power is averaged.
- Only supported for particular Intel xe graphics platforms.
+ Only supported for particular Intel Xe graphics platforms.
-What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_max_interval
-Date: October 2023
-KernelVersion: 6.6
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/in1_input
+Date: February 2024
+KernelVersion: 6.8
Contact: intel-xe@lists.freedesktop.org
-Description: RW. Sustained power limit interval (Tau in PL1/Tau) in
- milliseconds over which sustained power is averaged.
+Description: RO. Package current voltage in millivolt.
- Only supported for particular Intel xe graphics platforms.
+ Only supported for particular Intel Xe graphics platforms.
diff --git a/Documentation/ABI/testing/sysfs-driver-panfrost-profiling b/Documentation/ABI/testing/sysfs-driver-panfrost-profiling
new file mode 100644
index 000000000000..7597c420e54b
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-panfrost-profiling
@@ -0,0 +1,10 @@
+What: /sys/bus/platform/drivers/panfrost/.../profiling
+Date: February 2024
+KernelVersion: 6.8.0
+Contact: Adrian Larumbe <adrian.larumbe@collabora.com>
+Description:
+ Get/set drm fdinfo's engine and cycles profiling status.
+ Valid values are:
+ 0: Don't enable fdinfo job profiling sources.
+ 1: Enable fdinfo job profiling sources, this enables both the GPU's
+ timestamp and cycle counter registers.
diff --git a/Documentation/ABI/testing/sysfs-firmware-opal-powercap b/Documentation/ABI/testing/sysfs-firmware-opal-powercap
index c9b66ec4f165..d2d12ee89288 100644
--- a/Documentation/ABI/testing/sysfs-firmware-opal-powercap
+++ b/Documentation/ABI/testing/sysfs-firmware-opal-powercap
@@ -1,6 +1,6 @@
What: /sys/firmware/opal/powercap
Date: August 2017
-Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+Contact: Linux for PowerPC mailing list <linuxppc-dev@lists.ozlabs.org>
Description: Powercap directory for Powernv (P8, P9) servers
Each folder in this directory contains a
@@ -11,7 +11,7 @@ What: /sys/firmware/opal/powercap/system-powercap
/sys/firmware/opal/powercap/system-powercap/powercap-max
/sys/firmware/opal/powercap/system-powercap/powercap-current
Date: August 2017
-Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+Contact: Linux for PowerPC mailing list <linuxppc-dev@lists.ozlabs.org>
Description: System powercap directory and attributes applicable for
Powernv (P8, P9) servers
diff --git a/Documentation/ABI/testing/sysfs-firmware-opal-psr b/Documentation/ABI/testing/sysfs-firmware-opal-psr
index cc2ece70e365..1e55b56a0f89 100644
--- a/Documentation/ABI/testing/sysfs-firmware-opal-psr
+++ b/Documentation/ABI/testing/sysfs-firmware-opal-psr
@@ -1,6 +1,6 @@
What: /sys/firmware/opal/psr
Date: August 2017
-Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+Contact: Linux for PowerPC mailing list <linuxppc-dev@lists.ozlabs.org>
Description: Power-Shift-Ratio directory for Powernv P9 servers
Power-Shift-Ratio allows to provide hints the firmware
@@ -10,7 +10,7 @@ Description: Power-Shift-Ratio directory for Powernv P9 servers
What: /sys/firmware/opal/psr/cpu_to_gpu_X
Date: August 2017
-Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+Contact: Linux for PowerPC mailing list <linuxppc-dev@lists.ozlabs.org>
Description: PSR sysfs attributes for Powernv P9 servers
Power-Shift-Ratio between CPU and GPU for a given chip
diff --git a/Documentation/ABI/testing/sysfs-firmware-opal-sensor-groups b/Documentation/ABI/testing/sysfs-firmware-opal-sensor-groups
index 3a2dfe542e8c..fcb1fb4795b6 100644
--- a/Documentation/ABI/testing/sysfs-firmware-opal-sensor-groups
+++ b/Documentation/ABI/testing/sysfs-firmware-opal-sensor-groups
@@ -1,6 +1,6 @@
What: /sys/firmware/opal/sensor_groups
Date: August 2017
-Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+Contact: Linux for PowerPC mailing list <linuxppc-dev@lists.ozlabs.org>
Description: Sensor groups directory for POWER9 powernv servers
Each folder in this directory contains a sensor group
@@ -11,7 +11,7 @@ Description: Sensor groups directory for POWER9 powernv servers
What: /sys/firmware/opal/sensor_groups/<sensor_group_name>/clear
Date: August 2017
-Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+Contact: Linux for PowerPC mailing list <linuxppc-dev@lists.ozlabs.org>
Description: Sysfs file to clear the min-max of all the sensors
belonging to the group.
diff --git a/Documentation/ABI/testing/sysfs-firmware-papr-energy-scale-info b/Documentation/ABI/testing/sysfs-firmware-papr-energy-scale-info
index 141a6b371469..f5cefb81ac9d 100644
--- a/Documentation/ABI/testing/sysfs-firmware-papr-energy-scale-info
+++ b/Documentation/ABI/testing/sysfs-firmware-papr-energy-scale-info
@@ -1,6 +1,6 @@
What: /sys/firmware/papr/energy_scale_info
Date: February 2022
-Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+Contact: Linux for PowerPC mailing list <linuxppc-dev@lists.ozlabs.org>
Description: Directory hosting a set of platform attributes like
energy/frequency on Linux running as a PAPR guest.
@@ -10,20 +10,20 @@ Description: Directory hosting a set of platform attributes like
What: /sys/firmware/papr/energy_scale_info/<id>
Date: February 2022
-Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+Contact: Linux for PowerPC mailing list <linuxppc-dev@lists.ozlabs.org>
Description: Energy, frequency attributes directory for POWERVM servers
What: /sys/firmware/papr/energy_scale_info/<id>/desc
Date: February 2022
-Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+Contact: Linux for PowerPC mailing list <linuxppc-dev@lists.ozlabs.org>
Description: String description of the energy attribute of <id>
What: /sys/firmware/papr/energy_scale_info/<id>/value
Date: February 2022
-Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+Contact: Linux for PowerPC mailing list <linuxppc-dev@lists.ozlabs.org>
Description: Numeric value of the energy attribute of <id>
What: /sys/firmware/papr/energy_scale_info/<id>/value_desc
Date: February 2022
-Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+Contact: Linux for PowerPC mailing list <linuxppc-dev@lists.ozlabs.org>
Description: String value of the energy attribute of <id>
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index 1a4d83953379..cad6c3dc1f9c 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -331,7 +331,7 @@ Date: January 2018
Contact: Jaegeuk Kim <jaegeuk@kernel.org>
Description: This indicates how many GC can be failed for the pinned
file. If it exceeds this, F2FS doesn't guarantee its pinning
- state. 2048 trials is set by default.
+ state. 2048 trials is set by default, and 65535 as maximum.
What: /sys/fs/f2fs/<disk>/extension_list
Date: February 2018
diff --git a/Documentation/ABI/testing/sysfs-kernel-fadump b/Documentation/ABI/testing/sysfs-kernel-fadump
index 8f7a64a81783..2f9daa7ca55b 100644
--- a/Documentation/ABI/testing/sysfs-kernel-fadump
+++ b/Documentation/ABI/testing/sysfs-kernel-fadump
@@ -38,3 +38,21 @@ Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Provide information about the amount of memory reserved by
FADump to save the crash dump in bytes.
+
+What: /sys/kernel/fadump/hotplug_ready
+Date: Apr 2024
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read only
+ Kdump udev rule re-registers fadump on memory add/remove events,
+ primarily to update the elfcorehdr. This sysfs indicates the
+ kdump udev rule that fadump re-registration is not required on
+ memory add/remove events because elfcorehdr is now prepared in
+ the second/fadump kernel.
+User: kexec-tools
+
+What: /sys/kernel/fadump/bootargs_append
+Date: May 2024
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read/write
+ This is a special sysfs file available to setup additional
+ parameters to be passed to capture kernel.
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-damon b/Documentation/ABI/testing/sysfs-kernel-mm-damon
index dad4d5ffd786..cef6e1d20b18 100644
--- a/Documentation/ABI/testing/sysfs-kernel-mm-damon
+++ b/Documentation/ABI/testing/sysfs-kernel-mm-damon
@@ -314,9 +314,9 @@ Date: Dec 2022
Contact: SeongJae Park <sj@kernel.org>
Description: Writing to and reading from this file sets and gets the type of
the memory of the interest. 'anon' for anonymous pages,
- 'memcg' for specific memory cgroup, 'addr' for address range
- (an open-ended interval), or 'target' for DAMON monitoring
- target can be written and read.
+ 'memcg' for specific memory cgroup, 'young' for young pages,
+ 'addr' for address range (an open-ended interval), or 'target'
+ for DAMON monitoring target can be written and read.
What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/filters/<F>/memcg_path
Date: Dec 2022
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-transparent-hugepage b/Documentation/ABI/testing/sysfs-kernel-mm-transparent-hugepage
new file mode 100644
index 000000000000..7bfbb9cc2c11
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-kernel-mm-transparent-hugepage
@@ -0,0 +1,18 @@
+What: /sys/kernel/mm/transparent_hugepage/
+Date: April 2024
+Contact: Linux memory management mailing list <linux-mm@kvack.org>
+Description:
+ /sys/kernel/mm/transparent_hugepage/ contains a number of files and
+ subdirectories,
+
+ - defrag
+ - enabled
+ - hpage_pmd_size
+ - khugepaged
+ - shmem_enabled
+ - use_zero_page
+ - subdirectories of the form hugepages-<size>kB, where <size>
+ is the page size of the hugepages supported by the kernel/CPU
+ combination.
+
+ See Documentation/admin-guide/mm/transhuge.rst for details.
diff --git a/Documentation/ABI/testing/sysfs-platform-asus-wmi b/Documentation/ABI/testing/sysfs-platform-asus-wmi
index 8a7e25bde085..28144371a0f1 100644
--- a/Documentation/ABI/testing/sysfs-platform-asus-wmi
+++ b/Documentation/ABI/testing/sysfs-platform-asus-wmi
@@ -126,6 +126,14 @@ Description:
Change the mini-LED mode:
* 0 - Single-zone,
* 1 - Multi-zone
+ * 2 - Multi-zone strong (available on newer generation mini-led)
+
+What: /sys/devices/platform/<platform>/available_mini_led_mode
+Date: Apr 2024
+KernelVersion: 6.10
+Contact: "Luke Jones" <luke@ljones.dev>
+Description:
+ List the available mini-led modes.
What: /sys/devices/platform/<platform>/ppt_pl1_spl
Date: Jun 2023
@@ -186,3 +194,21 @@ Contact: "Luke Jones" <luke@ljones.dev>
Description:
Set the target temperature limit of the Nvidia dGPU:
* min=75, max=87
+
+What: /sys/devices/platform/<platform>/boot_sound
+Date: Apr 2024
+KernelVersion: 6.10
+Contact: "Luke Jones" <luke@ljones.dev>
+Description:
+ Set if the BIOS POST sound is played on boot.
+ * 0 - False,
+ * 1 - True
+
+What: /sys/devices/platform/<platform>/mcu_powersave
+Date: Apr 2024
+KernelVersion: 6.10
+Contact: "Luke Jones" <luke@ljones.dev>
+Description:
+ Set if the MCU can go in to low-power mode on system sleep
+ * 0 - False,
+ * 1 - True
diff --git a/Documentation/Makefile b/Documentation/Makefile
index b68f8c816897..fa71602ec961 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -28,6 +28,10 @@ BUILDDIR = $(obj)/output
PDFLATEX = xelatex
LATEXOPTS = -interaction=batchmode -no-shell-escape
+# For denylisting "variable font" files
+# Can be overridden by setting as an env variable
+FONTS_CONF_DENY_VF ?= $(HOME)/deny-vf
+
ifeq ($(findstring 1, $(KBUILD_VERBOSE)),)
SPHINXOPTS += "-q"
endif
@@ -76,22 +80,22 @@ loop_cmd = $(echo-cmd) $(cmd_$(1)) || exit;
# * dest folder relative to $(BUILDDIR) and
# * cache folder relative to $(BUILDDIR)/.doctrees
# $4 dest subfolder e.g. "man" for man pages at userspace-api/media/man
-# $5 reST source folder relative to $(srctree)/$(src),
+# $5 reST source folder relative to $(src),
# e.g. "userspace-api/media" for the linux-tv book-set at ./Documentation/userspace-api/media
quiet_cmd_sphinx = SPHINX $@ --> file://$(abspath $(BUILDDIR)/$3/$4)
cmd_sphinx = $(MAKE) BUILDDIR=$(abspath $(BUILDDIR)) $(build)=Documentation/userspace-api/media $2 && \
PYTHONDONTWRITEBYTECODE=1 \
- BUILDDIR=$(abspath $(BUILDDIR)) SPHINX_CONF=$(abspath $(srctree)/$(src)/$5/$(SPHINX_CONF)) \
+ BUILDDIR=$(abspath $(BUILDDIR)) SPHINX_CONF=$(abspath $(src)/$5/$(SPHINX_CONF)) \
$(PYTHON3) $(srctree)/scripts/jobserver-exec \
$(CONFIG_SHELL) $(srctree)/Documentation/sphinx/parallel-wrapper.sh \
$(SPHINXBUILD) \
-b $2 \
- -c $(abspath $(srctree)/$(src)) \
+ -c $(abspath $(src)) \
-d $(abspath $(BUILDDIR)/.doctrees/$3) \
-D version=$(KERNELVERSION) -D release=$(KERNELRELEASE) \
$(ALLSPHINXOPTS) \
- $(abspath $(srctree)/$(src)/$5) \
+ $(abspath $(src)/$5) \
$(abspath $(BUILDDIR)/$3/$4) && \
if [ "x$(DOCS_CSS)" != "x" ]; then \
cp $(if $(patsubst /%,,$(DOCS_CSS)),$(abspath $(srctree)/$(DOCS_CSS)),$(DOCS_CSS)) $(BUILDDIR)/$3/_static/; \
@@ -151,10 +155,11 @@ pdfdocs:
else # HAVE_PDFLATEX
+pdfdocs: DENY_VF = XDG_CONFIG_HOME=$(FONTS_CONF_DENY_VF)
pdfdocs: latexdocs
@$(srctree)/scripts/sphinx-pre-install --version-check
$(foreach var,$(SPHINXDIRS), \
- $(MAKE) PDFLATEX="$(PDFLATEX)" LATEXOPTS="$(LATEXOPTS)" -C $(BUILDDIR)/$(var)/latex || exit; \
+ $(MAKE) PDFLATEX="$(PDFLATEX)" LATEXOPTS="$(LATEXOPTS)" $(DENY_VF) -C $(BUILDDIR)/$(var)/latex || sh $(srctree)/scripts/check-variable-fonts.sh || exit; \
mkdir -p $(BUILDDIR)/$(var)/pdf; \
mv $(subst .tex,.pdf,$(wildcard $(BUILDDIR)/$(var)/latex/*.tex)) $(BUILDDIR)/$(var)/pdf/; \
)
diff --git a/Documentation/PCI/msi-howto.rst b/Documentation/PCI/msi-howto.rst
index 783d30b7bb42..0692c9aec66f 100644
--- a/Documentation/PCI/msi-howto.rst
+++ b/Documentation/PCI/msi-howto.rst
@@ -103,7 +103,7 @@ min_vecs argument set to this limit, and the PCI core will return -ENOSPC
if it can't meet the minimum number of vectors.
The flags argument is used to specify which type of interrupt can be used
-by the device and the driver (PCI_IRQ_LEGACY, PCI_IRQ_MSI, PCI_IRQ_MSIX).
+by the device and the driver (PCI_IRQ_INTX, PCI_IRQ_MSI, PCI_IRQ_MSIX).
A convenient short-hand (PCI_IRQ_ALL_TYPES) is also available to ask for
any possible kind of interrupt. If the PCI_IRQ_AFFINITY flag is set,
pci_alloc_irq_vectors() will spread the interrupts around the available CPUs.
diff --git a/Documentation/PCI/pci.rst b/Documentation/PCI/pci.rst
index cced568d78e9..dd7b1c0c21da 100644
--- a/Documentation/PCI/pci.rst
+++ b/Documentation/PCI/pci.rst
@@ -335,7 +335,7 @@ causes the PCI support to program CPU vector data into the PCI device
capability registers. Many architectures, chip-sets, or BIOSes do NOT
support MSI or MSI-X and a call to pci_alloc_irq_vectors with just
the PCI_IRQ_MSI and PCI_IRQ_MSIX flags will fail, so try to always
-specify PCI_IRQ_LEGACY as well.
+specify PCI_IRQ_INTX as well.
Drivers that have different interrupt handlers for MSI/MSI-X and
legacy INTx should chose the right one based on the msi_enabled
diff --git a/Documentation/PCI/pcieaer-howto.rst b/Documentation/PCI/pcieaer-howto.rst
index e00d63971695..f013f3b27c82 100644
--- a/Documentation/PCI/pcieaer-howto.rst
+++ b/Documentation/PCI/pcieaer-howto.rst
@@ -241,7 +241,7 @@ After reboot with new kernel or insert the module, a device file named
Then, you need a user space tool named aer-inject, which can be gotten
from:
- https://git.kernel.org/cgit/linux/kernel/git/gong.chen/aer-inject.git/
+ https://github.com/intel/aer-inject.git
More information about aer-inject can be found in the document in
its source code.
diff --git a/Documentation/RCU/whatisRCU.rst b/Documentation/RCU/whatisRCU.rst
index 872ac665223f..94838c65c7d9 100644
--- a/Documentation/RCU/whatisRCU.rst
+++ b/Documentation/RCU/whatisRCU.rst
@@ -427,7 +427,7 @@ their assorted primitives.
This section shows a simple use of the core RCU API to protect a
global pointer to a dynamically allocated structure. More-typical
-uses of RCU may be found in listRCU.rst, arrayRCU.rst, and NMI-RCU.rst.
+uses of RCU may be found in listRCU.rst and NMI-RCU.rst.
::
struct foo {
@@ -510,8 +510,8 @@ So, to sum up:
data item.
See checklist.rst for additional rules to follow when using RCU.
-And again, more-typical uses of RCU may be found in listRCU.rst,
-arrayRCU.rst, and NMI-RCU.rst.
+And again, more-typical uses of RCU may be found in listRCU.rst
+and NMI-RCU.rst.
.. _4_whatisRCU:
diff --git a/Documentation/admin-guide/blockdev/zram.rst b/Documentation/admin-guide/blockdev/zram.rst
index ee2b0030d416..091e8bb38887 100644
--- a/Documentation/admin-guide/blockdev/zram.rst
+++ b/Documentation/admin-guide/blockdev/zram.rst
@@ -466,6 +466,11 @@ of equal or greater size:::
#recompress idle pages larger than 2000 bytes
echo "type=idle threshold=2000" > /sys/block/zramX/recompress
+It is also possible to limit the number of pages zram re-compression will
+attempt to recompress:::
+
+ echo "type=huge_idle max_pages=42" > /sys/block/zramX/recompress
+
Recompression of idle pages requires memory tracking.
During re-compression for every page, that matches re-compression criteria,
diff --git a/Documentation/admin-guide/cgroup-v1/cgroups.rst b/Documentation/admin-guide/cgroup-v1/cgroups.rst
index 9343148ee993..a3e2edb3d274 100644
--- a/Documentation/admin-guide/cgroup-v1/cgroups.rst
+++ b/Documentation/admin-guide/cgroup-v1/cgroups.rst
@@ -570,7 +570,7 @@ visible to cgroup_for_each_child/descendant_*() iterators. The
subsystem may choose to fail creation by returning -errno. This
callback can be used to implement reliable state sharing and
propagation along the hierarchy. See the comment on
-cgroup_for_each_descendant_pre() for details.
+cgroup_for_each_live_descendant_pre() for details.
``void css_offline(struct cgroup *cgrp);``
(cgroup_mutex held by caller)
diff --git a/Documentation/admin-guide/cgroup-v1/cpusets.rst b/Documentation/admin-guide/cgroup-v1/cpusets.rst
index 7d3415eea05d..f401af5e2f09 100644
--- a/Documentation/admin-guide/cgroup-v1/cpusets.rst
+++ b/Documentation/admin-guide/cgroup-v1/cpusets.rst
@@ -568,7 +568,7 @@ on the next tick. For some applications in special situation, waiting
The 'cpuset.sched_relax_domain_level' file allows you to request changing
this searching range as you like. This file takes int value which
-indicates size of searching range in levels ideally as follows,
+indicates size of searching range in levels approximately as follows,
otherwise initial value -1 that indicates the cpuset has no request.
====== ===========================================================
@@ -581,6 +581,11 @@ otherwise initial value -1 that indicates the cpuset has no request.
5 search system wide [on NUMA system]
====== ===========================================================
+Not all levels can be present and values can change depending on the
+system architecture and kernel configuration. Check
+/sys/kernel/debug/sched/domains/cpu*/domain*/ for system-specific
+details.
+
The system default is architecture dependent. The system default
can be changed using the relax_domain_level= boot parameter.
diff --git a/Documentation/admin-guide/cgroup-v1/memcg_test.rst b/Documentation/admin-guide/cgroup-v1/memcg_test.rst
index 1f128458ddea..9f8e27355cba 100644
--- a/Documentation/admin-guide/cgroup-v1/memcg_test.rst
+++ b/Documentation/admin-guide/cgroup-v1/memcg_test.rst
@@ -102,7 +102,7 @@ Under below explanation, we assume CONFIG_SWAP=y.
The logic is very clear. (About migration, see below)
Note:
- __remove_from_page_cache() is called by remove_from_page_cache()
+ __filemap_remove_folio() is called by filemap_remove_folio()
and __remove_mapping().
6. Shmem(tmpfs) Page Cache
diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst
index ca7d9402f6be..9cde26d33843 100644
--- a/Documentation/admin-guide/cgroup-v1/memory.rst
+++ b/Documentation/admin-guide/cgroup-v1/memory.rst
@@ -300,14 +300,14 @@ When oom event notifier is registered, event will be delivered.
Lock order is as follows::
- Page lock (PG_locked bit of page->flags)
+ folio_lock
mm->page_table_lock or split pte_lock
folio_memcg_lock (memcg->move_lock)
mapping->i_pages lock
lruvec->lru_lock.
Per-node-per-memcgroup LRU (cgroup's private LRU) is guarded by
-lruvec->lru_lock; PG_lru bit of page->flags is cleared before
+lruvec->lru_lock; the folio LRU flag is cleared before
isolating a page from its LRU under lruvec->lru_lock.
.. _cgroup-v1-memory-kernel-extension:
@@ -802,8 +802,8 @@ a page or a swap can be moved only when it is charged to the task's current
| | anonymous pages, file pages (and swaps) in the range mmapped by the task |
| | will be moved even if the task hasn't done page fault, i.e. they might |
| | not be the task's "RSS", but other task's "RSS" that maps the same file. |
-| | And mapcount of the page is ignored (the page can be moved even if |
-| | page_mapcount(page) > 1). You must enable Swap Extension (see 2.4) to |
+| | The mapcount of the page is ignored (the page can be moved independent |
+| | of the mapcount). You must enable Swap Extension (see 2.4) to |
| | enable move of swap charges. |
+---+--------------------------------------------------------------------------+
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 17e6e9565156..8fbb0519d556 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1058,12 +1058,15 @@ cpufreq governor about the minimum desired frequency which should always be
provided by a CPU, as well as the maximum desired frequency, which should not
be exceeded by a CPU.
-WARNING: cgroup2 doesn't yet support control of realtime processes and
-the cpu controller can only be enabled when all RT processes are in
-the root cgroup. Be aware that system management software may already
-have placed RT processes into nonroot cgroups during the system boot
-process, and these processes may need to be moved to the root cgroup
-before the cpu controller can be enabled.
+WARNING: cgroup2 doesn't yet support control of realtime processes. For
+a kernel built with the CONFIG_RT_GROUP_SCHED option enabled for group
+scheduling of realtime processes, the cpu controller can only be enabled
+when all RT processes are in the root cgroup. This limitation does
+not apply if CONFIG_RT_GROUP_SCHED is disabled. Be aware that system
+management software may already have placed RT processes into nonroot
+cgroups during the system boot process, and these processes may need
+to be moved to the root cgroup before the cpu controller can be enabled
+with a CONFIG_RT_GROUP_SCHED enabled kernel.
CPU Interface Files
@@ -1432,7 +1435,7 @@ PAGE_SIZE multiple when read back.
sec_pagetables
Amount of memory allocated for secondary page tables,
this currently includes KVM mmu allocations on x86
- and arm64.
+ and arm64 and IOMMU page tables.
percpu (npn)
Amount of memory used for storing per-cpu kernel
@@ -1572,6 +1575,15 @@ PAGE_SIZE multiple when read back.
pglazyfreed (npn)
Amount of reclaimed lazyfree pages
+ zswpin
+ Number of pages moved in to memory from zswap.
+
+ zswpout
+ Number of pages moved out of memory to zswap.
+
+ zswpwb
+ Number of pages written from zswap to swap.
+
thp_fault_alloc (npn)
Number of transparent hugepages which were allocated to satisfy
a page fault. This counter is not present when CONFIG_TRANSPARENT_HUGEPAGE
@@ -2181,11 +2193,25 @@ PID Interface Files
Hard limit of number of processes.
pids.current
- A read-only single value file which exists on all cgroups.
+ A read-only single value file which exists on non-root cgroups.
The number of processes currently in the cgroup and its
descendants.
+ pids.peak
+ A read-only single value file which exists on non-root cgroups.
+
+ The maximum value that the number of processes in the cgroup and its
+ descendants has ever reached.
+
+ pids.events
+ A read-only flat-keyed file which exists on non-root cgroups. The
+ following entries are defined. Unless specified otherwise, a value
+ change in this file generates a file modified event.
+
+ max
+ Number of times fork failed because limit was hit.
+
Organisational operations are not blocked by cgroup policies, so it is
possible to have pids.current > pids.max. This can be done by either
setting the limit to be smaller than pids.current, or attaching enough
diff --git a/Documentation/admin-guide/device-mapper/dm-crypt.rst b/Documentation/admin-guide/device-mapper/dm-crypt.rst
index aa2d04d95df6..41f5f57f00eb 100644
--- a/Documentation/admin-guide/device-mapper/dm-crypt.rst
+++ b/Documentation/admin-guide/device-mapper/dm-crypt.rst
@@ -113,6 +113,11 @@ same_cpu_crypt
The default is to use an unbound workqueue so that encryption work
is automatically balanced between available CPUs.
+high_priority
+ Set dm-crypt workqueues and the writer thread to high priority. This
+ improves throughput and latency of dm-crypt while degrading general
+ responsiveness of the system.
+
submit_from_crypt_cpus
Disable offloading writes to a separate thread after encryption.
There are some situations where offloading write bios from the
diff --git a/Documentation/admin-guide/hw-vuln/core-scheduling.rst b/Documentation/admin-guide/hw-vuln/core-scheduling.rst
index cf1eeefdfc32..a92e10ec402e 100644
--- a/Documentation/admin-guide/hw-vuln/core-scheduling.rst
+++ b/Documentation/admin-guide/hw-vuln/core-scheduling.rst
@@ -67,8 +67,8 @@ arg4:
will be performed for all tasks in the task group of ``pid``.
arg5:
- userspace pointer to an unsigned long for storing the cookie returned by
- ``PR_SCHED_CORE_GET`` command. Should be 0 for all other commands.
+ userspace pointer to an unsigned long long for storing the cookie returned
+ by ``PR_SCHED_CORE_GET`` command. Should be 0 for all other commands.
In order for a process to push a cookie to, or pull a cookie from a process, it
is required to have the ptrace access mode: `PTRACE_MODE_READ_REALCREDS` to the
diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
index cce768afec6b..25a04cda4c2c 100644
--- a/Documentation/admin-guide/hw-vuln/spectre.rst
+++ b/Documentation/admin-guide/hw-vuln/spectre.rst
@@ -138,11 +138,10 @@ associated with the source address of the indirect branch. Specifically,
the BHB might be shared across privilege levels even in the presence of
Enhanced IBRS.
-Currently the only known real-world BHB attack vector is via
-unprivileged eBPF. Therefore, it's highly recommended to not enable
-unprivileged eBPF, especially when eIBRS is used (without retpolines).
-For a full mitigation against BHB attacks, it's recommended to use
-retpolines (or eIBRS combined with retpolines).
+Previously the only known real-world BHB attack vector was via unprivileged
+eBPF. Further research has found attacks that don't require unprivileged eBPF.
+For a full mitigation against BHB attacks it is recommended to set BHI_DIS_S or
+use the BHB clearing sequence.
Attack scenarios
----------------
@@ -430,6 +429,23 @@ The possible values in this file are:
'PBRSB-eIBRS: Not affected' CPU is not affected by PBRSB
=========================== =======================================================
+ - Branch History Injection (BHI) protection status:
+
+.. list-table::
+
+ * - BHI: Not affected
+ - System is not affected
+ * - BHI: Retpoline
+ - System is protected by retpoline
+ * - BHI: BHI_DIS_S
+ - System is protected by BHI_DIS_S
+ * - BHI: SW loop, KVM SW loop
+ - System is protected by software clearing sequence
+ * - BHI: Vulnerable
+ - System is vulnerable to BHI
+ * - BHI: Vulnerable, KVM: SW loop
+ - System is vulnerable; KVM is protected by software clearing sequence
+
Full mitigation might require a microcode update from the CPU
vendor. When the necessary microcode is not available, the kernel will
report vulnerability.
@@ -484,7 +500,11 @@ Spectre variant 2
Systems which support enhanced IBRS (eIBRS) enable IBRS protection once at
boot, by setting the IBRS bit, and they're automatically protected against
- Spectre v2 variant attacks.
+ some Spectre v2 variant attacks. The BHB can still influence the choice of
+ indirect branch predictor entry, and although branch predictor entries are
+ isolated between modes when eIBRS is enabled, the BHB itself is not isolated
+ between modes. Systems which support BHI_DIS_S will set it to protect against
+ BHI attacks.
On Intel's enhanced IBRS systems, this includes cross-thread branch target
injections on SMT systems (STIBP). In other words, Intel eIBRS enables
@@ -638,6 +658,18 @@ kernel command line.
spectre_v2=off. Spectre variant 1 mitigations
cannot be disabled.
+ spectre_bhi=
+
+ [X86] Control mitigation of Branch History Injection
+ (BHI) vulnerability. This setting affects the deployment
+ of the HW BHI control and the SW BHB clearing sequence.
+
+ on
+ (default) Enable the HW or SW mitigation as
+ needed.
+ off
+ Disable the mitigation.
+
For spectre_v2_user see Documentation/admin-guide/kernel-parameters.txt
Mitigation selection guide
diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst
index e715bfc09879..4bd3ce3ba171 100644
--- a/Documentation/admin-guide/hw-vuln/srso.rst
+++ b/Documentation/admin-guide/hw-vuln/srso.rst
@@ -135,7 +135,7 @@ and does not want to suffer the performance impact, one can always
disable the mitigation with spec_rstack_overflow=off.
Similarly, 'Mitigation: IBPB' is another full mitigation type employing
-an indrect branch prediction barrier after having applied the required
+an indirect branch prediction barrier after having applied the required
microcode patch for one's system. This mitigation comes also at
a performance cost.
diff --git a/Documentation/admin-guide/kdump/kdump.rst b/Documentation/admin-guide/kdump/kdump.rst
index 0302a93b1d40..5376890adbeb 100644
--- a/Documentation/admin-guide/kdump/kdump.rst
+++ b/Documentation/admin-guide/kdump/kdump.rst
@@ -136,10 +136,6 @@ System kernel config options
CONFIG_KEXEC_CORE=y
- Subsequently, CRASH_CORE is selected by KEXEC_CORE::
-
- CONFIG_CRASH_CORE=y
-
2) Enable "sysfs file system support" in "Filesystem" -> "Pseudo
filesystems." This is usually enabled by default::
@@ -168,6 +164,10 @@ Dump-capture kernel config options (Arch Independent)
CONFIG_CRASH_DUMP=y
+ And this will select VMCORE_INFO and CRASH_RESERVE::
+ CONFIG_VMCORE_INFO=y
+ CONFIG_CRASH_RESERVE=y
+
2) Enable "/proc/vmcore support" under "Filesystems" -> "Pseudo filesystems"::
CONFIG_PROC_VMCORE=y
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index bb884c14b2f6..ef25e06ec08c 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -431,6 +431,9 @@
arcrimi= [HW,NET] ARCnet - "RIM I" (entirely mem-mapped) cards
Format: <io>,<irq>,<nodeID>
+ arm64.no32bit_el0 [ARM64] Unconditionally disable the execution of
+ 32 bit applications.
+
arm64.nobti [ARM64] Unconditionally disable Branch Target
Identification support
@@ -2148,6 +2151,12 @@
Format: 0 | 1
Default set by CONFIG_INIT_ON_FREE_DEFAULT_ON.
+ init_mlocked_on_free= [MM] Fill freed userspace memory with zeroes if
+ it was mlock'ed and not explicitly munlock'ed
+ afterwards.
+ Format: 0 | 1
+ Default set by CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON
+
init_pkru= [X86] Specify the default memory protection keys rights
register contents for all processes. 0x55555554 by
default (disallow access to all but pkey 0). Can
@@ -2251,6 +2260,8 @@
no_x2apic_optout
BIOS x2APIC opt-out request will be ignored
nopost disable Interrupt Posting
+ posted_msi
+ enable MSIs delivered as posted interrupts
iomem= Disable strict checking of access to MMIO memory
strict regions from userspace.
@@ -3423,6 +3434,9 @@
arch-independent options, each of which is an
aggregation of existing arch-specific options.
+ Note, "mitigations" is supported if and only if the
+ kernel was built with CPU_MITIGATIONS=y.
+
off
Disable all optional CPU mitigations. This
improves system performance, but it may also
@@ -3444,6 +3458,7 @@
retbleed=off [X86]
spec_rstack_overflow=off [X86]
spec_store_bypass_disable=off [X86,PPC]
+ spectre_bhi=off [X86]
spectre_v2_user=off [X86]
srbds=off [X86,INTEL]
ssbd=force-off [ARM64]
@@ -3772,10 +3787,12 @@
Format: [state][,regs][,debounce][,die]
nmi_watchdog= [KNL,BUGS=X86] Debugging features for SMP kernels
- Format: [panic,][nopanic,][num]
+ Format: [panic,][nopanic,][rNNN,][num]
Valid num: 0 or 1
0 - turn hardlockup detector in nmi_watchdog off
1 - turn hardlockup detector in nmi_watchdog on
+ rNNN - configure the watchdog with raw perf event 0xNNN
+
When panic is specified, panic when an NMI watchdog
timeout occurs (or 'nopanic' to not panic on an NMI
watchdog, if CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is set)
@@ -4169,13 +4186,11 @@
page_alloc.shuffle=
[KNL] Boolean flag to control whether the page allocator
- should randomize its free lists. The randomization may
- be automatically enabled if the kernel detects it is
- running on a platform with a direct-mapped memory-side
- cache, and this parameter can be used to
- override/disable that behavior. The state of the flag
- can be read from sysfs at:
+ should randomize its free lists. This parameter can be
+ used to enable/disable page randomization. The state of
+ the flag can be read from sysfs at:
/sys/module/page_alloc/parameters/shuffle.
+ This parameter is only available if CONFIG_SHUFFLE_PAGE_ALLOCATOR=y.
page_owner= [KNL,EARLY] Boot-time page_owner enabling option.
Storage of the information about who allocated
@@ -4590,9 +4605,10 @@
norid [S390] ignore the RID field and force use of
one PCI domain per PCI function
- pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power
+ pcie_aspm= [PCIE] Forcibly enable or ignore PCIe Active State Power
Management.
- off Disable ASPM.
+ off Don't touch ASPM configuration at all. Leave any
+ configuration done by firmware unchanged.
force Enable ASPM even on devices that claim not to support it.
WARNING: Forcing ASPM on may cause system lockups.
@@ -4780,7 +4796,9 @@
prot_virt= [S390] enable hosting protected virtual machines
isolated from the hypervisor (if hardware supports
- that).
+ that). If enabled, the default kernel base address
+ might be overridden even when Kernel Address Space
+ Layout Randomization is disabled.
Format: <bool>
psi= [KNL] Enable or disable pressure stall information
@@ -5091,6 +5109,20 @@
delay, memory pressure or callback list growing too
big.
+ rcutree.rcu_normal_wake_from_gp= [KNL]
+ Reduces a latency of synchronize_rcu() call. This approach
+ maintains its own track of synchronize_rcu() callers, so it
+ does not interact with regular callbacks because it does not
+ use a call_rcu[_hurry]() path. Please note, this is for a
+ normal grace period.
+
+ How to enable it:
+
+ echo 1 > /sys/module/rcutree/parameters/rcu_normal_wake_from_gp
+ or pass a boot parameter "rcutree.rcu_normal_wake_from_gp=1"
+
+ Default is 0.
+
rcuscale.gp_async= [KNL]
Measure performance of asynchronous
grace-period primitives such as call_rcu().
@@ -5807,6 +5839,7 @@
but is useful for debugging and performance tuning.
sched_thermal_decay_shift=
+ [Deprecated]
[KNL, SMP] Set a decay shift for scheduler thermal
pressure signal. Thermal pressure signal follows the
default decay period of other scheduler pelt
@@ -6063,6 +6096,15 @@
sonypi.*= [HW] Sony Programmable I/O Control Device driver
See Documentation/admin-guide/laptops/sonypi.rst
+ spectre_bhi= [X86] Control mitigation of Branch History Injection
+ (BHI) vulnerability. This setting affects the
+ deployment of the HW BHI control and the SW BHB
+ clearing sequence.
+
+ on - (default) Enable the HW or SW mitigation
+ as needed.
+ off - Disable the mitigation.
+
spectre_v2= [X86,EARLY] Control mitigation of Spectre variant 2
(indirect branch speculation) vulnerability.
The default operation protects the kernel from
@@ -6599,7 +6641,7 @@
To turn off having tracepoints sent to printk,
echo 0 > /proc/sys/kernel/tracepoint_printk
Note, echoing 1 into this file without the
- tracepoint_printk kernel cmdline option has no effect.
+ tp_printk kernel cmdline option has no effect.
The tp_printk_stop_on_boot (see below) can also be used
to stop the printing of events to console at
@@ -6735,6 +6777,7 @@
- "tpm"
- "tee"
- "caam"
+ - "dcp"
If not specified then it defaults to iterating through
the trust source list starting with TPM and assigns the
first trust source as a backend which is initialized
@@ -6750,6 +6793,18 @@
If not specified, "default" is used. In this case,
the RNG's choice is left to each individual trust source.
+ trusted.dcp_use_otp_key
+ This is intended to be used in combination with
+ trusted.source=dcp and will select the DCP OTP key
+ instead of the DCP UNIQUE key blob encryption.
+
+ trusted.dcp_skip_zk_test
+ This is intended to be used in combination with
+ trusted.source=dcp and will disable the check if the
+ blob key is all zeros. This is helpful for situations where
+ having this key zero'ed is acceptable. E.g. in testing
+ scenarios.
+
tsc= Disable clocksource stability checks for TSC.
Format: <string>
[x86] reliable: mark tsc clocksource as reliable, this
@@ -7310,7 +7365,7 @@
This can be changed after boot by writing to the
matching /sys/module/workqueue/parameters file. All
workqueues with the "default" affinity scope will be
- updated accordignly.
+ updated accordingly.
workqueue.debug_force_rr_cpu
Workqueue used to implicitly guarantee that work
@@ -7454,4 +7509,3 @@
memory, and other data can't be written using
xmon commands.
off xmon is disabled.
-
diff --git a/Documentation/admin-guide/media/ipu6-isys.rst b/Documentation/admin-guide/media/ipu6-isys.rst
new file mode 100644
index 000000000000..0721e920b5e6
--- /dev/null
+++ b/Documentation/admin-guide/media/ipu6-isys.rst
@@ -0,0 +1,161 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: <isonum.txt>
+
+========================================================
+Intel Image Processing Unit 6 (IPU6) Input System driver
+========================================================
+
+Copyright |copy| 2023--2024 Intel Corporation
+
+Introduction
+============
+
+This file documents the Intel IPU6 (6th generation Image Processing Unit)
+Input System (MIPI CSI2 receiver) drivers located under
+drivers/media/pci/intel/ipu6.
+
+The Intel IPU6 can be found in certain Intel SoCs but not in all SKUs:
+
+* Tiger Lake
+* Jasper Lake
+* Alder Lake
+* Raptor Lake
+* Meteor Lake
+
+Intel IPU6 is made up of two components - Input System (ISYS) and Processing
+System (PSYS).
+
+The Input System mainly works as MIPI CSI-2 receiver which receives and
+processes the image data from the sensors and outputs the frames to memory.
+
+There are 2 driver modules - intel-ipu6 and intel-ipu6-isys. intel-ipu6 is an
+IPU6 common driver which does PCI configuration, firmware loading and parsing,
+firmware authentication, DMA mapping and IPU-MMU (internal Memory mapping Unit)
+configuration. intel_ipu6_isys implements V4L2, Media Controller and V4L2
+sub-device interfaces. The IPU6 ISYS driver supports camera sensors connected
+to the IPU6 ISYS through V4L2 sub-device sensor drivers.
+
+.. Note:: See Documentation/driver-api/media/drivers/ipu6.rst for more
+ information about the IPU6 hardware.
+
+Input system driver
+===================
+
+The Input System driver mainly configures CSI-2 D-PHY, constructs the firmware
+stream configuration, sends commands to firmware, gets response from hardware
+and firmware and then returns buffers to user. The ISYS is represented as
+several V4L2 sub-devices as well as video nodes.
+
+.. kernel-figure:: ipu6_isys_graph.svg
+ :alt: ipu6 isys media graph with multiple streams support
+
+ IPU6 ISYS media graph with multiple streams support
+
+The graph has been produced using the following command:
+
+.. code-block:: none
+
+ fdp -Gsplines=true -Tsvg < dot > dot.svg
+
+Capturing frames with IPU6 ISYS
+-------------------------------
+
+IPU6 ISYS is used to capture frames from the camera sensors connected to the
+CSI2 ports. The supported input formats of ISYS are listed in table below:
+
+.. tabularcolumns:: |p{0.8cm}|p{4.0cm}|p{4.0cm}|
+
+.. flat-table::
+ :header-rows: 1
+
+ * - IPU6 ISYS supported input formats
+
+ * - RGB565, RGB888
+
+ * - UYVY8, YUYV8
+
+ * - RAW8, RAW10, RAW12
+
+.. _ipu6_isys_capture_examples:
+
+Examples
+~~~~~~~~
+
+Here is an example of IPU6 ISYS raw capture on Dell XPS 9315 laptop. On this
+machine, ov01a10 sensor is connected to IPU ISYS CSI-2 port 2, which can
+generate images at sBGGR10 with resolution 1280x800.
+
+Using the media controller APIs, we can configure ov01a10 sensor by
+media-ctl [#f1]_ and yavta [#f2]_ to transmit frames to IPU6 ISYS.
+
+.. code-block:: none
+
+ # Example 1 capture frame from ov01a10 camera sensor
+ # This example assumes /dev/media0 as the IPU ISYS media device
+ export MDEV=/dev/media0
+
+ # Establish the link for the media devices using media-ctl
+ media-ctl -d $MDEV -l "\"ov01a10 3-0036\":0 -> \"Intel IPU6 CSI2 2\":0[1]"
+
+ # Set the format for the media devices
+ media-ctl -d $MDEV -V "ov01a10:0 [fmt:SBGGR10/1280x800]"
+ media-ctl -d $MDEV -V "Intel IPU6 CSI2 2:0 [fmt:SBGGR10/1280x800]"
+ media-ctl -d $MDEV -V "Intel IPU6 CSI2 2:1 [fmt:SBGGR10/1280x800]"
+
+Once the media pipeline is configured, desired sensor specific settings
+(such as exposure and gain settings) can be set, using the yavta tool.
+
+e.g
+
+.. code-block:: none
+
+ # and that ov01a10 sensor is connected to i2c bus 3 with address 0x36
+ export SDEV=$(media-ctl -d $MDEV -e "ov01a10 3-0036")
+
+ yavta -w 0x009e0903 400 $SDEV
+ yavta -w 0x009e0913 1000 $SDEV
+ yavta -w 0x009e0911 2000 $SDEV
+
+Once the desired sensor settings are set, frame captures can be done as below.
+
+e.g
+
+.. code-block:: none
+
+ yavta --data-prefix -u -c10 -n5 -I -s 1280x800 --file=/tmp/frame-#.bin \
+ -f SBGGR10 $(media-ctl -d $MDEV -e "Intel IPU6 ISYS Capture 0")
+
+With the above command, 10 frames are captured at 1280x800 resolution with
+sBGGR10 format. The captured frames are available as /tmp/frame-#.bin files.
+
+Here is another example of IPU6 ISYS RAW and metadata capture from camera
+sensor ov2740 on Lenovo X1 Yoga laptop.
+
+.. code-block:: none
+
+ media-ctl -l "\"ov2740 14-0036\":0 -> \"Intel IPU6 CSI2 1\":0[1]"
+ media-ctl -l "\"Intel IPU6 CSI2 1\":1 -> \"Intel IPU6 ISYS Capture 0\":0[5]"
+ media-ctl -l "\"Intel IPU6 CSI2 1\":2 -> \"Intel IPU6 ISYS Capture 1\":0[5]"
+
+ # set routing
+ media-ctl -v -R "\"Intel IPU6 CSI2 1\" [0/0->1/0[1],0/1->2/1[1]]"
+
+ media-ctl -v "\"Intel IPU6 CSI2 1\":0/0 [fmt:SGRBG10/1932x1092]"
+ media-ctl -v "\"Intel IPU6 CSI2 1\":0/1 [fmt:GENERIC_8/97x1]"
+ media-ctl -v "\"Intel IPU6 CSI2 1\":1/0 [fmt:SGRBG10/1932x1092]"
+ media-ctl -v "\"Intel IPU6 CSI2 1\":2/1 [fmt:GENERIC_8/97x1]"
+
+ CAPTURE_DEV=$(media-ctl -e "Intel IPU6 ISYS Capture 0")
+ ./yavta --data-prefix -c100 -n5 -I -s1932x1092 --file=/tmp/frame-#.bin \
+ -f SGRBG10 ${CAPTURE_DEV}
+
+ CAPTURE_META=$(media-ctl -e "Intel IPU6 ISYS Capture 1")
+ ./yavta --data-prefix -c100 -n5 -I -s97x1 -B meta-capture \
+ --file=/tmp/meta-#.bin -f GENERIC_8 ${CAPTURE_META}
+
+References
+==========
+
+.. [#f1] https://git.ideasonboard.org/media-ctl.git
+.. [#f2] https://git.ideasonboard.org/yavta.git
diff --git a/Documentation/admin-guide/media/ipu6_isys_graph.svg b/Documentation/admin-guide/media/ipu6_isys_graph.svg
new file mode 100644
index 000000000000..c8539ef320d2
--- /dev/null
+++ b/Documentation/admin-guide/media/ipu6_isys_graph.svg
@@ -0,0 +1,548 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
+ "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by graphviz version 2.43.0 (0)
+ -->
+<!-- Title: board Pages: 1 -->
+<svg width="1703pt" height="1473pt"
+ viewBox="0.00 0.00 1703.00 1473.00" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
+<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 1469)">
+<title>board</title>
+<polygon fill="white" stroke="transparent" points="-4,4 -4,-1469 1699,-1469 1699,4 -4,4"/>
+<!-- n00000001 -->
+<g id="node1" class="node">
+<title>n00000001</title>
+<polygon fill="yellow" stroke="black" points="832.99,-750.08 629.99,-750.08 629.99,-712.08 832.99,-712.08 832.99,-750.08"/>
+<text text-anchor="middle" x="731.49" y="-734.88" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 0</text>
+<text text-anchor="middle" x="731.49" y="-719.88" font-family="Times,serif" font-size="14.00">/dev/video0</text>
+</g>
+<!-- n00000005 -->
+<g id="node2" class="node">
+<title>n00000005</title>
+<polygon fill="yellow" stroke="black" points="1396.59,-771.88 1193.59,-771.88 1193.59,-733.88 1396.59,-733.88 1396.59,-771.88"/>
+<text text-anchor="middle" x="1295.09" y="-756.68" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 1</text>
+<text text-anchor="middle" x="1295.09" y="-741.68" font-family="Times,serif" font-size="14.00">/dev/video1</text>
+</g>
+<!-- n00000009 -->
+<g id="node3" class="node">
+<title>n00000009</title>
+<polygon fill="yellow" stroke="black" points="1118.52,-690.47 915.52,-690.47 915.52,-652.47 1118.52,-652.47 1118.52,-690.47"/>
+<text text-anchor="middle" x="1017.02" y="-675.27" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 2</text>
+<text text-anchor="middle" x="1017.02" y="-660.27" font-family="Times,serif" font-size="14.00">/dev/video2</text>
+</g>
+<!-- n0000000d -->
+<g id="node4" class="node">
+<title>n0000000d</title>
+<polygon fill="yellow" stroke="black" points="1105.89,-838.84 902.89,-838.84 902.89,-800.84 1105.89,-800.84 1105.89,-838.84"/>
+<text text-anchor="middle" x="1004.39" y="-823.64" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 3</text>
+<text text-anchor="middle" x="1004.39" y="-808.64" font-family="Times,serif" font-size="14.00">/dev/video3</text>
+</g>
+<!-- n00000011 -->
+<g id="node5" class="node">
+<title>n00000011</title>
+<polygon fill="yellow" stroke="black" points="1279.22,-992.95 1076.22,-992.95 1076.22,-954.95 1279.22,-954.95 1279.22,-992.95"/>
+<text text-anchor="middle" x="1177.72" y="-977.75" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 4</text>
+<text text-anchor="middle" x="1177.72" y="-962.75" font-family="Times,serif" font-size="14.00">/dev/video4</text>
+</g>
+<!-- n00000015 -->
+<g id="node6" class="node">
+<title>n00000015</title>
+<polygon fill="yellow" stroke="black" points="939.18,-984.91 736.18,-984.91 736.18,-946.91 939.18,-946.91 939.18,-984.91"/>
+<text text-anchor="middle" x="837.68" y="-969.71" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 5</text>
+<text text-anchor="middle" x="837.68" y="-954.71" font-family="Times,serif" font-size="14.00">/dev/video5</text>
+</g>
+<!-- n00000019 -->
+<g id="node7" class="node">
+<title>n00000019</title>
+<polygon fill="yellow" stroke="black" points="957.87,-527.99 754.87,-527.99 754.87,-489.99 957.87,-489.99 957.87,-527.99"/>
+<text text-anchor="middle" x="856.37" y="-512.79" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 6</text>
+<text text-anchor="middle" x="856.37" y="-497.79" font-family="Times,serif" font-size="14.00">/dev/video6</text>
+</g>
+<!-- n0000001d -->
+<g id="node8" class="node">
+<title>n0000001d</title>
+<polygon fill="yellow" stroke="black" points="1291.02,-542.15 1088.02,-542.15 1088.02,-504.15 1291.02,-504.15 1291.02,-542.15"/>
+<text text-anchor="middle" x="1189.52" y="-526.95" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 7</text>
+<text text-anchor="middle" x="1189.52" y="-511.95" font-family="Times,serif" font-size="14.00">/dev/video7</text>
+</g>
+<!-- n00000021 -->
+<g id="node9" class="node">
+<title>n00000021</title>
+<polygon fill="yellow" stroke="black" points="202.74,-611.46 -0.26,-611.46 -0.26,-573.46 202.74,-573.46 202.74,-611.46"/>
+<text text-anchor="middle" x="101.24" y="-596.26" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 8</text>
+<text text-anchor="middle" x="101.24" y="-581.26" font-family="Times,serif" font-size="14.00">/dev/video8</text>
+</g>
+<!-- n00000025 -->
+<g id="node10" class="node">
+<title>n00000025</title>
+<polygon fill="yellow" stroke="black" points="764.86,-637.89 561.86,-637.89 561.86,-599.89 764.86,-599.89 764.86,-637.89"/>
+<text text-anchor="middle" x="663.36" y="-622.69" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 9</text>
+<text text-anchor="middle" x="663.36" y="-607.69" font-family="Times,serif" font-size="14.00">/dev/video9</text>
+</g>
+<!-- n00000029 -->
+<g id="node11" class="node">
+<title>n00000029</title>
+<polygon fill="yellow" stroke="black" points="358.62,-519.5 146.62,-519.5 146.62,-481.5 358.62,-481.5 358.62,-519.5"/>
+<text text-anchor="middle" x="252.62" y="-504.3" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 10</text>
+<text text-anchor="middle" x="252.62" y="-489.3" font-family="Times,serif" font-size="14.00">/dev/video10</text>
+</g>
+<!-- n0000002d -->
+<g id="node12" class="node">
+<title>n0000002d</title>
+<polygon fill="yellow" stroke="black" points="481.4,-662.59 269.4,-662.59 269.4,-624.59 481.4,-624.59 481.4,-662.59"/>
+<text text-anchor="middle" x="375.4" y="-647.39" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 11</text>
+<text text-anchor="middle" x="375.4" y="-632.39" font-family="Times,serif" font-size="14.00">/dev/video11</text>
+</g>
+<!-- n00000031 -->
+<g id="node13" class="node">
+<title>n00000031</title>
+<polygon fill="yellow" stroke="black" points="637.17,-837.47 425.17,-837.47 425.17,-799.47 637.17,-799.47 637.17,-837.47"/>
+<text text-anchor="middle" x="531.17" y="-822.27" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 12</text>
+<text text-anchor="middle" x="531.17" y="-807.27" font-family="Times,serif" font-size="14.00">/dev/video12</text>
+</g>
+<!-- n00000035 -->
+<g id="node14" class="node">
+<title>n00000035</title>
+<polygon fill="yellow" stroke="black" points="337.75,-833.67 125.75,-833.67 125.75,-795.67 337.75,-795.67 337.75,-833.67"/>
+<text text-anchor="middle" x="231.75" y="-818.47" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 13</text>
+<text text-anchor="middle" x="231.75" y="-803.47" font-family="Times,serif" font-size="14.00">/dev/video13</text>
+</g>
+<!-- n00000039 -->
+<g id="node15" class="node">
+<title>n00000039</title>
+<polygon fill="yellow" stroke="black" points="393.07,-317.96 181.07,-317.96 181.07,-279.96 393.07,-279.96 393.07,-317.96"/>
+<text text-anchor="middle" x="287.07" y="-302.76" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 14</text>
+<text text-anchor="middle" x="287.07" y="-287.76" font-family="Times,serif" font-size="14.00">/dev/video14</text>
+</g>
+<!-- n0000003d -->
+<g id="node16" class="node">
+<title>n0000003d</title>
+<polygon fill="yellow" stroke="black" points="701.46,-391.04 489.46,-391.04 489.46,-353.04 701.46,-353.04 701.46,-391.04"/>
+<text text-anchor="middle" x="595.46" y="-375.84" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 15</text>
+<text text-anchor="middle" x="595.46" y="-360.84" font-family="Times,serif" font-size="14.00">/dev/video15</text>
+</g>
+<!-- n00000041 -->
+<g id="node17" class="node">
+<title>n00000041</title>
+<polygon fill="yellow" stroke="black" points="212.45,-1228.8 0.45,-1228.8 0.45,-1190.8 212.45,-1190.8 212.45,-1228.8"/>
+<text text-anchor="middle" x="106.45" y="-1213.6" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 16</text>
+<text text-anchor="middle" x="106.45" y="-1198.6" font-family="Times,serif" font-size="14.00">/dev/video16</text>
+</g>
+<!-- n00000045 -->
+<g id="node18" class="node">
+<title>n00000045</title>
+<polygon fill="yellow" stroke="black" points="784.86,-1252.38 572.86,-1252.38 572.86,-1214.38 784.86,-1214.38 784.86,-1252.38"/>
+<text text-anchor="middle" x="678.86" y="-1237.18" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 17</text>
+<text text-anchor="middle" x="678.86" y="-1222.18" font-family="Times,serif" font-size="14.00">/dev/video17</text>
+</g>
+<!-- n00000049 -->
+<g id="node19" class="node">
+<title>n00000049</title>
+<polygon fill="yellow" stroke="black" points="503.14,-1169.96 291.14,-1169.96 291.14,-1131.96 503.14,-1131.96 503.14,-1169.96"/>
+<text text-anchor="middle" x="397.14" y="-1154.76" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 18</text>
+<text text-anchor="middle" x="397.14" y="-1139.76" font-family="Times,serif" font-size="14.00">/dev/video18</text>
+</g>
+<!-- n0000004d -->
+<g id="node20" class="node">
+<title>n0000004d</title>
+<polygon fill="yellow" stroke="black" points="492.62,-1319.4 280.62,-1319.4 280.62,-1281.4 492.62,-1281.4 492.62,-1319.4"/>
+<text text-anchor="middle" x="386.62" y="-1304.2" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 19</text>
+<text text-anchor="middle" x="386.62" y="-1289.2" font-family="Times,serif" font-size="14.00">/dev/video19</text>
+</g>
+<!-- n00000051 -->
+<g id="node21" class="node">
+<title>n00000051</title>
+<polygon fill="yellow" stroke="black" points="680.74,-1464.66 468.74,-1464.66 468.74,-1426.66 680.74,-1426.66 680.74,-1464.66"/>
+<text text-anchor="middle" x="574.74" y="-1449.46" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 20</text>
+<text text-anchor="middle" x="574.74" y="-1434.46" font-family="Times,serif" font-size="14.00">/dev/video20</text>
+</g>
+<!-- n00000055 -->
+<g id="node22" class="node">
+<title>n00000055</title>
+<polygon fill="yellow" stroke="black" points="302.42,-1452.56 90.42,-1452.56 90.42,-1414.56 302.42,-1414.56 302.42,-1452.56"/>
+<text text-anchor="middle" x="196.42" y="-1437.36" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 21</text>
+<text text-anchor="middle" x="196.42" y="-1422.36" font-family="Times,serif" font-size="14.00">/dev/video21</text>
+</g>
+<!-- n00000059 -->
+<g id="node23" class="node">
+<title>n00000059</title>
+<polygon fill="yellow" stroke="black" points="319.89,-1018.32 107.89,-1018.32 107.89,-980.32 319.89,-980.32 319.89,-1018.32"/>
+<text text-anchor="middle" x="213.89" y="-1003.12" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 22</text>
+<text text-anchor="middle" x="213.89" y="-988.12" font-family="Times,serif" font-size="14.00">/dev/video22</text>
+</g>
+<!-- n0000005d -->
+<g id="node24" class="node">
+<title>n0000005d</title>
+<polygon fill="yellow" stroke="black" points="692.62,-1031.39 480.62,-1031.39 480.62,-993.39 692.62,-993.39 692.62,-1031.39"/>
+<text text-anchor="middle" x="586.62" y="-1016.19" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 23</text>
+<text text-anchor="middle" x="586.62" y="-1001.19" font-family="Times,serif" font-size="14.00">/dev/video23</text>
+</g>
+<!-- n00000061 -->
+<g id="node25" class="node">
+<title>n00000061</title>
+<polygon fill="yellow" stroke="black" points="1122.45,-248.8 910.45,-248.8 910.45,-210.8 1122.45,-210.8 1122.45,-248.8"/>
+<text text-anchor="middle" x="1016.45" y="-233.6" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 24</text>
+<text text-anchor="middle" x="1016.45" y="-218.6" font-family="Times,serif" font-size="14.00">/dev/video24</text>
+</g>
+<!-- n00000065 -->
+<g id="node26" class="node">
+<title>n00000065</title>
+<polygon fill="yellow" stroke="black" points="1694.86,-272.38 1482.86,-272.38 1482.86,-234.38 1694.86,-234.38 1694.86,-272.38"/>
+<text text-anchor="middle" x="1588.86" y="-257.18" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 25</text>
+<text text-anchor="middle" x="1588.86" y="-242.18" font-family="Times,serif" font-size="14.00">/dev/video25</text>
+</g>
+<!-- n00000069 -->
+<g id="node27" class="node">
+<title>n00000069</title>
+<polygon fill="yellow" stroke="black" points="1413.14,-189.96 1201.14,-189.96 1201.14,-151.96 1413.14,-151.96 1413.14,-189.96"/>
+<text text-anchor="middle" x="1307.14" y="-174.76" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 26</text>
+<text text-anchor="middle" x="1307.14" y="-159.76" font-family="Times,serif" font-size="14.00">/dev/video26</text>
+</g>
+<!-- n0000006d -->
+<g id="node28" class="node">
+<title>n0000006d</title>
+<polygon fill="yellow" stroke="black" points="1402.62,-339.4 1190.62,-339.4 1190.62,-301.4 1402.62,-301.4 1402.62,-339.4"/>
+<text text-anchor="middle" x="1296.62" y="-324.2" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 27</text>
+<text text-anchor="middle" x="1296.62" y="-309.2" font-family="Times,serif" font-size="14.00">/dev/video27</text>
+</g>
+<!-- n00000071 -->
+<g id="node29" class="node">
+<title>n00000071</title>
+<polygon fill="yellow" stroke="black" points="1590.74,-484.66 1378.74,-484.66 1378.74,-446.66 1590.74,-446.66 1590.74,-484.66"/>
+<text text-anchor="middle" x="1484.74" y="-469.46" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 28</text>
+<text text-anchor="middle" x="1484.74" y="-454.46" font-family="Times,serif" font-size="14.00">/dev/video28</text>
+</g>
+<!-- n00000075 -->
+<g id="node30" class="node">
+<title>n00000075</title>
+<polygon fill="yellow" stroke="black" points="1212.42,-472.56 1000.42,-472.56 1000.42,-434.56 1212.42,-434.56 1212.42,-472.56"/>
+<text text-anchor="middle" x="1106.42" y="-457.36" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 29</text>
+<text text-anchor="middle" x="1106.42" y="-442.36" font-family="Times,serif" font-size="14.00">/dev/video29</text>
+</g>
+<!-- n00000079 -->
+<g id="node31" class="node">
+<title>n00000079</title>
+<polygon fill="yellow" stroke="black" points="1229.89,-38.32 1017.89,-38.32 1017.89,-0.32 1229.89,-0.32 1229.89,-38.32"/>
+<text text-anchor="middle" x="1123.89" y="-23.12" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 30</text>
+<text text-anchor="middle" x="1123.89" y="-8.12" font-family="Times,serif" font-size="14.00">/dev/video30</text>
+</g>
+<!-- n0000007d -->
+<g id="node32" class="node">
+<title>n0000007d</title>
+<polygon fill="yellow" stroke="black" points="1602.62,-51.39 1390.62,-51.39 1390.62,-13.39 1602.62,-13.39 1602.62,-51.39"/>
+<text text-anchor="middle" x="1496.62" y="-36.19" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 31</text>
+<text text-anchor="middle" x="1496.62" y="-21.19" font-family="Times,serif" font-size="14.00">/dev/video31</text>
+</g>
+<!-- n00000081 -->
+<g id="node33" class="node">
+<title>n00000081</title>
+<path fill="green" stroke="black" d="M924.28,-700.28C924.28,-700.28 1108.28,-700.28 1108.28,-700.28 1114.28,-700.28 1120.28,-706.28 1120.28,-712.28 1120.28,-712.28 1120.28,-772.28 1120.28,-772.28 1120.28,-778.28 1114.28,-784.28 1108.28,-784.28 1108.28,-784.28 924.28,-784.28 924.28,-784.28 918.28,-784.28 912.28,-778.28 912.28,-772.28 912.28,-772.28 912.28,-712.28 912.28,-712.28 912.28,-706.28 918.28,-700.28 924.28,-700.28"/>
+<text text-anchor="middle" x="1016.28" y="-769.08" font-family="Times,serif" font-size="14.00">0</text>
+<polyline fill="none" stroke="black" points="912.28,-761.28 1120.28,-761.28 "/>
+<text text-anchor="middle" x="1016.28" y="-746.08" font-family="Times,serif" font-size="14.00">Intel IPU6 CSI2 0</text>
+<text text-anchor="middle" x="1016.28" y="-731.08" font-family="Times,serif" font-size="14.00">/dev/v4l&#45;subdev0</text>
+<polyline fill="none" stroke="black" points="912.28,-723.28 1120.28,-723.28 "/>
+<text text-anchor="middle" x="925.28" y="-708.08" font-family="Times,serif" font-size="14.00">1</text>
+<polyline fill="none" stroke="black" points="938.28,-700.28 938.28,-723.28 "/>
+<text text-anchor="middle" x="951.28" y="-708.08" font-family="Times,serif" font-size="14.00">2</text>
+<polyline fill="none" stroke="black" points="964.28,-700.28 964.28,-723.28 "/>
+<text text-anchor="middle" x="977.28" y="-708.08" font-family="Times,serif" font-size="14.00">3</text>
+<polyline fill="none" stroke="black" points="990.28,-700.28 990.28,-723.28 "/>
+<text text-anchor="middle" x="1003.28" y="-708.08" font-family="Times,serif" font-size="14.00">4</text>
+<polyline fill="none" stroke="black" points="1016.28,-700.28 1016.28,-723.28 "/>
+<text text-anchor="middle" x="1029.28" y="-708.08" font-family="Times,serif" font-size="14.00">5</text>
+<polyline fill="none" stroke="black" points="1042.28,-700.28 1042.28,-723.28 "/>
+<text text-anchor="middle" x="1055.28" y="-708.08" font-family="Times,serif" font-size="14.00">6</text>
+<polyline fill="none" stroke="black" points="1068.28,-700.28 1068.28,-723.28 "/>
+<text text-anchor="middle" x="1081.28" y="-708.08" font-family="Times,serif" font-size="14.00">7</text>
+<polyline fill="none" stroke="black" points="1094.28,-700.28 1094.28,-723.28 "/>
+<text text-anchor="middle" x="1107.28" y="-708.08" font-family="Times,serif" font-size="14.00">8</text>
+</g>
+<!-- n00000081&#45;&gt;n00000001 -->
+<g id="edge1" class="edge">
+<title>n00000081:port1&#45;&gt;n00000001</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M912.28,-711.28C912.28,-711.28 880.33,-714.78 843.28,-718.84"/>
+<polygon fill="black" stroke="black" points="842.81,-715.37 833.25,-719.94 843.57,-722.33 842.81,-715.37"/>
+</g>
+<!-- n00000081&#45;&gt;n00000005 -->
+<g id="edge2" class="edge">
+<title>n00000081:port2&#45;&gt;n00000005</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M951.38,-700.28C951.38,-700.28 1086.18,-688.61 1123.48,-697.08 1155.93,-704.45 1158.99,-719.67 1190.39,-730.68 1190.49,-730.71 1190.59,-730.75 1190.69,-730.78"/>
+<polygon fill="black" stroke="black" points="1189.45,-734.06 1200.05,-733.86 1191.64,-727.41 1189.45,-734.06"/>
+</g>
+<!-- n00000081&#45;&gt;n00000009 -->
+<g id="edge3" class="edge">
+<title>n00000081:port3&#45;&gt;n00000009</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M977.28,-700.28C977.28,-700.28 979.31,-698.81 982.45,-696.54"/>
+<polygon fill="black" stroke="black" points="984.7,-699.23 990.74,-690.53 980.59,-693.56 984.7,-699.23"/>
+</g>
+<!-- n00000081&#45;&gt;n0000000d -->
+<g id="edge4" class="edge">
+<title>n00000081:port4&#45;&gt;n0000000d</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M1003.38,-700.26C1003.38,-700.26 916.62,-689.8 909.08,-697.08 880.2,-725.01 885.68,-754.82 909.08,-787.48 910.88,-789.99 918.96,-793.59 929.7,-797.47"/>
+<polygon fill="black" stroke="black" points="928.69,-800.82 939.28,-800.79 930.98,-794.21 928.69,-800.82"/>
+</g>
+<!-- n00000081&#45;&gt;n00000011 -->
+<g id="edge5" class="edge">
+<title>n00000081:port5&#45;&gt;n00000011</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M1029.19,-700.26C1029.19,-700.26 1115.28,-690.56 1123.48,-697.08 1198.37,-756.64 1190.55,-886.51 1182.64,-944.71"/>
+<polygon fill="black" stroke="black" points="1179.16,-944.31 1181.18,-954.71 1186.09,-945.32 1179.16,-944.31"/>
+</g>
+<!-- n00000081&#45;&gt;n00000015 -->
+<g id="edge6" class="edge">
+<title>n00000081:port6&#45;&gt;n00000015</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M1055.18,-700.28C1055.18,-700.28 915.57,-692.2 909.08,-697.08 834.02,-753.51 831.79,-879.34 835.06,-936.56"/>
+<polygon fill="black" stroke="black" points="831.58,-936.99 835.74,-946.73 838.56,-936.52 831.58,-936.99"/>
+</g>
+<!-- n00000081&#45;&gt;n00000019 -->
+<g id="edge7" class="edge">
+<title>n00000081:port7&#45;&gt;n00000019</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M1081.28,-700.28C1081.28,-700.28 916.04,-696.54 912.32,-693.67 864.52,-656.73 856.3,-580.22 855.62,-538.2"/>
+<polygon fill="black" stroke="black" points="859.11,-538.05 855.59,-528.06 852.11,-538.07 859.11,-538.05"/>
+</g>
+<!-- n00000081&#45;&gt;n0000001d -->
+<g id="edge8" class="edge">
+<title>n00000081:port8&#45;&gt;n0000001d</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M1107.28,-700.28C1107.28,-700.28 1119.29,-696.23 1121.72,-693.67 1159.76,-653.62 1177.38,-589.6 1184.78,-552.46"/>
+<polygon fill="black" stroke="black" points="1188.29,-552.76 1186.69,-542.29 1181.41,-551.47 1188.29,-552.76"/>
+</g>
+<!-- n0000008b -->
+<g id="node34" class="node">
+<title>n0000008b</title>
+<path fill="green" stroke="black" d="M293.1,-532.08C293.1,-532.08 477.1,-532.08 477.1,-532.08 483.1,-532.08 489.1,-538.08 489.1,-544.08 489.1,-544.08 489.1,-604.08 489.1,-604.08 489.1,-610.08 483.1,-616.08 477.1,-616.08 477.1,-616.08 293.1,-616.08 293.1,-616.08 287.1,-616.08 281.1,-610.08 281.1,-604.08 281.1,-604.08 281.1,-544.08 281.1,-544.08 281.1,-538.08 287.1,-532.08 293.1,-532.08"/>
+<text text-anchor="middle" x="385.1" y="-600.88" font-family="Times,serif" font-size="14.00">0</text>
+<polyline fill="none" stroke="black" points="281.1,-593.08 489.1,-593.08 "/>
+<text text-anchor="middle" x="385.1" y="-577.88" font-family="Times,serif" font-size="14.00">Intel IPU6 CSI2 1</text>
+<text text-anchor="middle" x="385.1" y="-562.88" font-family="Times,serif" font-size="14.00">/dev/v4l&#45;subdev1</text>
+<polyline fill="none" stroke="black" points="281.1,-555.08 489.1,-555.08 "/>
+<text text-anchor="middle" x="294.1" y="-539.88" font-family="Times,serif" font-size="14.00">1</text>
+<polyline fill="none" stroke="black" points="307.1,-532.08 307.1,-555.08 "/>
+<text text-anchor="middle" x="320.1" y="-539.88" font-family="Times,serif" font-size="14.00">2</text>
+<polyline fill="none" stroke="black" points="333.1,-532.08 333.1,-555.08 "/>
+<text text-anchor="middle" x="346.1" y="-539.88" font-family="Times,serif" font-size="14.00">3</text>
+<polyline fill="none" stroke="black" points="359.1,-532.08 359.1,-555.08 "/>
+<text text-anchor="middle" x="372.1" y="-539.88" font-family="Times,serif" font-size="14.00">4</text>
+<polyline fill="none" stroke="black" points="385.1,-532.08 385.1,-555.08 "/>
+<text text-anchor="middle" x="398.1" y="-539.88" font-family="Times,serif" font-size="14.00">5</text>
+<polyline fill="none" stroke="black" points="411.1,-532.08 411.1,-555.08 "/>
+<text text-anchor="middle" x="424.1" y="-539.88" font-family="Times,serif" font-size="14.00">6</text>
+<polyline fill="none" stroke="black" points="437.1,-532.08 437.1,-555.08 "/>
+<text text-anchor="middle" x="450.1" y="-539.88" font-family="Times,serif" font-size="14.00">7</text>
+<polyline fill="none" stroke="black" points="463.1,-532.08 463.1,-555.08 "/>
+<text text-anchor="middle" x="476.1" y="-539.88" font-family="Times,serif" font-size="14.00">8</text>
+</g>
+<!-- n0000008b&#45;&gt;n00000021 -->
+<g id="edge9" class="edge">
+<title>n0000008b:port1&#45;&gt;n00000021</title>
+<path fill="none" stroke="black" d="M281.1,-543.08C281.1,-543.08 240.1,-560.51 205.94,-570.26 205.35,-570.43 204.77,-570.59 204.18,-570.76"/>
+<polygon fill="black" stroke="black" points="203.2,-567.39 194.47,-573.39 205.03,-574.15 203.2,-567.39"/>
+</g>
+<!-- n0000008b&#45;&gt;n00000025 -->
+<g id="edge10" class="edge">
+<title>n0000008b:port2&#45;&gt;n00000025</title>
+<path fill="none" stroke="black" d="M320.2,-532.07C320.2,-532.07 456.9,-514.37 492.3,-528.88 528.42,-543.68 522.86,-571.78 556.11,-594.53"/>
+<polygon fill="black" stroke="black" points="554.54,-597.67 564.9,-599.88 558.18,-591.69 554.54,-597.67"/>
+</g>
+<!-- n0000008b&#45;&gt;n00000029 -->
+<g id="edge11" class="edge">
+<title>n0000008b:port3&#45;&gt;n00000029</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M346.1,-532.08C346.1,-532.08 333.93,-527.96 318.37,-522.71"/>
+<polygon fill="black" stroke="black" points="319.48,-519.39 308.88,-519.5 317.24,-526.02 319.48,-519.39"/>
+</g>
+<!-- n0000008b&#45;&gt;n0000002d -->
+<g id="edge12" class="edge">
+<title>n0000008b:port4&#45;&gt;n0000002d</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M372.19,-532.05C372.19,-532.05 292.97,-514.3 277.9,-528.88 249.01,-556.8 253.16,-587.62 277.9,-619.28 278.34,-619.85 280.33,-620.69 283.45,-621.71"/>
+<polygon fill="black" stroke="black" points="282.71,-625.14 293.29,-624.58 284.67,-618.42 282.71,-625.14"/>
+</g>
+<!-- n0000008b&#45;&gt;n00000031 -->
+<g id="edge13" class="edge">
+<title>n0000008b:port5&#45;&gt;n00000031</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M398,-532.05C398,-532.05 476.28,-515.34 492.3,-528.88 568.49,-593.29 550.55,-729.67 538.14,-789.41"/>
+<polygon fill="black" stroke="black" points="534.69,-788.79 535.99,-799.31 541.53,-790.28 534.69,-788.79"/>
+</g>
+<!-- n0000008b&#45;&gt;n00000035 -->
+<g id="edge14" class="edge">
+<title>n0000008b:port6&#45;&gt;n00000035</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M424,-532.07C424,-532.07 290.37,-518.48 277.9,-528.88 202.27,-591.86 215.34,-725.69 225.66,-785.15"/>
+<polygon fill="black" stroke="black" points="222.29,-786.14 227.54,-795.35 229.17,-784.88 222.29,-786.14"/>
+</g>
+<!-- n0000008b&#45;&gt;n00000039 -->
+<g id="edge15" class="edge">
+<title>n0000008b:port7&#45;&gt;n00000039</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M450.1,-532.08C450.1,-532.08 395.22,-528.13 383.45,-518.65 375.46,-512.21 322.64,-385.46 298.76,-327.47"/>
+<polygon fill="black" stroke="black" points="301.96,-326.05 294.92,-318.14 295.49,-328.72 301.96,-326.05"/>
+</g>
+<!-- n0000008b&#45;&gt;n0000003d -->
+<g id="edge16" class="edge">
+<title>n0000008b:port8&#45;&gt;n0000003d</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M476.1,-532.08C476.1,-532.08 522.37,-522.39 526.85,-518.65 563.15,-488.33 581.38,-434.52 589.6,-401.2"/>
+<polygon fill="black" stroke="black" points="593.08,-401.69 591.93,-391.16 586.26,-400.11 593.08,-401.69"/>
+</g>
+<!-- n00000095 -->
+<g id="node35" class="node">
+<title>n00000095</title>
+<path fill="green" stroke="black" d="M301.38,-1180.11C301.38,-1180.11 485.38,-1180.11 485.38,-1180.11 491.38,-1180.11 497.38,-1186.11 497.38,-1192.11 497.38,-1192.11 497.38,-1252.11 497.38,-1252.11 497.38,-1258.11 491.38,-1264.11 485.38,-1264.11 485.38,-1264.11 301.38,-1264.11 301.38,-1264.11 295.38,-1264.11 289.38,-1258.11 289.38,-1252.11 289.38,-1252.11 289.38,-1192.11 289.38,-1192.11 289.38,-1186.11 295.38,-1180.11 301.38,-1180.11"/>
+<text text-anchor="middle" x="393.38" y="-1248.91" font-family="Times,serif" font-size="14.00">0</text>
+<polyline fill="none" stroke="black" points="289.38,-1241.11 497.38,-1241.11 "/>
+<text text-anchor="middle" x="393.38" y="-1225.91" font-family="Times,serif" font-size="14.00">Intel IPU6 CSI2 2</text>
+<text text-anchor="middle" x="393.38" y="-1210.91" font-family="Times,serif" font-size="14.00">/dev/v4l&#45;subdev2</text>
+<polyline fill="none" stroke="black" points="289.38,-1203.11 497.38,-1203.11 "/>
+<text text-anchor="middle" x="302.38" y="-1187.91" font-family="Times,serif" font-size="14.00">1</text>
+<polyline fill="none" stroke="black" points="315.38,-1180.11 315.38,-1203.11 "/>
+<text text-anchor="middle" x="328.38" y="-1187.91" font-family="Times,serif" font-size="14.00">2</text>
+<polyline fill="none" stroke="black" points="341.38,-1180.11 341.38,-1203.11 "/>
+<text text-anchor="middle" x="354.38" y="-1187.91" font-family="Times,serif" font-size="14.00">3</text>
+<polyline fill="none" stroke="black" points="367.38,-1180.11 367.38,-1203.11 "/>
+<text text-anchor="middle" x="380.38" y="-1187.91" font-family="Times,serif" font-size="14.00">4</text>
+<polyline fill="none" stroke="black" points="393.38,-1180.11 393.38,-1203.11 "/>
+<text text-anchor="middle" x="406.38" y="-1187.91" font-family="Times,serif" font-size="14.00">5</text>
+<polyline fill="none" stroke="black" points="419.38,-1180.11 419.38,-1203.11 "/>
+<text text-anchor="middle" x="432.38" y="-1187.91" font-family="Times,serif" font-size="14.00">6</text>
+<polyline fill="none" stroke="black" points="445.38,-1180.11 445.38,-1203.11 "/>
+<text text-anchor="middle" x="458.38" y="-1187.91" font-family="Times,serif" font-size="14.00">7</text>
+<polyline fill="none" stroke="black" points="471.38,-1180.11 471.38,-1203.11 "/>
+<text text-anchor="middle" x="484.38" y="-1187.91" font-family="Times,serif" font-size="14.00">8</text>
+</g>
+<!-- n00000095&#45;&gt;n00000041 -->
+<g id="edge17" class="edge">
+<title>n00000095:port1&#45;&gt;n00000041</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M289.38,-1191.11C289.38,-1191.11 258.94,-1194.22 222.89,-1197.91"/>
+<polygon fill="black" stroke="black" points="222.19,-1194.46 212.6,-1198.96 222.9,-1201.42 222.19,-1194.46"/>
+</g>
+<!-- n00000095&#45;&gt;n00000045 -->
+<g id="edge18" class="edge">
+<title>n00000095:port2&#45;&gt;n00000045</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M328.48,-1180.11C328.48,-1180.11 463.26,-1168.53 500.58,-1176.91 534.02,-1184.43 537.24,-1200.06 569.66,-1211.18 569.76,-1211.22 569.86,-1211.25 569.96,-1211.29"/>
+<polygon fill="black" stroke="black" points="568.86,-1214.61 579.45,-1214.34 571,-1207.95 568.86,-1214.61"/>
+</g>
+<!-- n00000095&#45;&gt;n00000049 -->
+<g id="edge19" class="edge">
+<title>n00000095:port3&#45;&gt;n00000049</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M354.38,-1180.11C354.38,-1180.11 356.8,-1178.46 360.49,-1175.94"/>
+<polygon fill="black" stroke="black" points="362.56,-1178.77 368.86,-1170.24 358.62,-1172.98 362.56,-1178.77"/>
+</g>
+<!-- n00000095&#45;&gt;n0000004d -->
+<g id="edge20" class="edge">
+<title>n00000095:port4&#45;&gt;n0000004d</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M380.47,-1180.09C380.47,-1180.09 293.71,-1169.63 286.18,-1176.91 257.29,-1204.84 262.63,-1234.76 286.18,-1267.31 288.16,-1270.05 297.33,-1273.96 309.38,-1278.13"/>
+<polygon fill="black" stroke="black" points="308.49,-1281.53 319.09,-1281.36 310.7,-1274.88 308.49,-1281.53"/>
+</g>
+<!-- n00000095&#45;&gt;n00000051 -->
+<g id="edge21" class="edge">
+<title>n00000095:port5&#45;&gt;n00000051</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M406.28,-1180.09C406.28,-1180.09 492.13,-1170.7 500.58,-1176.91 576.41,-1232.66 579.83,-1358.79 577.09,-1416.2"/>
+<polygon fill="black" stroke="black" points="573.59,-1416.23 576.51,-1426.41 580.58,-1416.63 573.59,-1416.23"/>
+</g>
+<!-- n00000095&#45;&gt;n00000055 -->
+<g id="edge22" class="edge">
+<title>n00000095:port6&#45;&gt;n00000055</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M432.28,-1180.11C432.28,-1180.11 292.85,-1172.29 286.18,-1176.91 211.26,-1228.86 198.3,-1348.49 196.45,-1404.12"/>
+<polygon fill="black" stroke="black" points="192.94,-1404.28 196.21,-1414.36 199.94,-1404.44 192.94,-1404.28"/>
+</g>
+<!-- n00000095&#45;&gt;n00000059 -->
+<g id="edge23" class="edge">
+<title>n00000095:port7&#45;&gt;n00000059</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M458.38,-1180.11C458.38,-1180.11 291.84,-1175.85 287.94,-1173.16 239.87,-1139.96 222.85,-1068.83 216.94,-1028.6"/>
+<polygon fill="black" stroke="black" points="220.39,-1028.06 215.6,-1018.61 213.46,-1028.98 220.39,-1028.06"/>
+</g>
+<!-- n00000095&#45;&gt;n0000005d -->
+<g id="edge24" class="edge">
+<title>n00000095:port8&#45;&gt;n0000005d</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M484.38,-1180.11C484.38,-1180.11 502.45,-1176.49 506.34,-1173.16 547.25,-1138.2 569.47,-1077.38 579.62,-1041.41"/>
+<polygon fill="black" stroke="black" points="583.06,-1042.09 582.28,-1031.53 576.3,-1040.27 583.06,-1042.09"/>
+</g>
+<!-- n0000009f -->
+<g id="node36" class="node">
+<title>n0000009f</title>
+<path fill="green" stroke="black" d="M1211.38,-200.11C1211.38,-200.11 1395.38,-200.11 1395.38,-200.11 1401.38,-200.11 1407.38,-206.11 1407.38,-212.11 1407.38,-212.11 1407.38,-272.11 1407.38,-272.11 1407.38,-278.11 1401.38,-284.11 1395.38,-284.11 1395.38,-284.11 1211.38,-284.11 1211.38,-284.11 1205.38,-284.11 1199.38,-278.11 1199.38,-272.11 1199.38,-272.11 1199.38,-212.11 1199.38,-212.11 1199.38,-206.11 1205.38,-200.11 1211.38,-200.11"/>
+<text text-anchor="middle" x="1303.38" y="-268.91" font-family="Times,serif" font-size="14.00">0</text>
+<polyline fill="none" stroke="black" points="1199.38,-261.11 1407.38,-261.11 "/>
+<text text-anchor="middle" x="1303.38" y="-245.91" font-family="Times,serif" font-size="14.00">Intel IPU6 CSI2 3</text>
+<text text-anchor="middle" x="1303.38" y="-230.91" font-family="Times,serif" font-size="14.00">/dev/v4l&#45;subdev3</text>
+<polyline fill="none" stroke="black" points="1199.38,-223.11 1407.38,-223.11 "/>
+<text text-anchor="middle" x="1212.38" y="-207.91" font-family="Times,serif" font-size="14.00">1</text>
+<polyline fill="none" stroke="black" points="1225.38,-200.11 1225.38,-223.11 "/>
+<text text-anchor="middle" x="1238.38" y="-207.91" font-family="Times,serif" font-size="14.00">2</text>
+<polyline fill="none" stroke="black" points="1251.38,-200.11 1251.38,-223.11 "/>
+<text text-anchor="middle" x="1264.38" y="-207.91" font-family="Times,serif" font-size="14.00">3</text>
+<polyline fill="none" stroke="black" points="1277.38,-200.11 1277.38,-223.11 "/>
+<text text-anchor="middle" x="1290.38" y="-207.91" font-family="Times,serif" font-size="14.00">4</text>
+<polyline fill="none" stroke="black" points="1303.38,-200.11 1303.38,-223.11 "/>
+<text text-anchor="middle" x="1316.38" y="-207.91" font-family="Times,serif" font-size="14.00">5</text>
+<polyline fill="none" stroke="black" points="1329.38,-200.11 1329.38,-223.11 "/>
+<text text-anchor="middle" x="1342.38" y="-207.91" font-family="Times,serif" font-size="14.00">6</text>
+<polyline fill="none" stroke="black" points="1355.38,-200.11 1355.38,-223.11 "/>
+<text text-anchor="middle" x="1368.38" y="-207.91" font-family="Times,serif" font-size="14.00">7</text>
+<polyline fill="none" stroke="black" points="1381.38,-200.11 1381.38,-223.11 "/>
+<text text-anchor="middle" x="1394.38" y="-207.91" font-family="Times,serif" font-size="14.00">8</text>
+</g>
+<!-- n0000009f&#45;&gt;n00000061 -->
+<g id="edge25" class="edge">
+<title>n0000009f:port1&#45;&gt;n00000061</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M1199.38,-211.11C1199.38,-211.11 1168.94,-214.22 1132.89,-217.91"/>
+<polygon fill="black" stroke="black" points="1132.19,-214.46 1122.6,-218.96 1132.9,-221.42 1132.19,-214.46"/>
+</g>
+<!-- n0000009f&#45;&gt;n00000065 -->
+<g id="edge26" class="edge">
+<title>n0000009f:port2&#45;&gt;n00000065</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M1238.48,-200.11C1238.48,-200.11 1373.26,-188.53 1410.58,-196.91 1444.02,-204.43 1447.24,-220.06 1479.66,-231.18 1479.76,-231.22 1479.86,-231.25 1479.96,-231.29"/>
+<polygon fill="black" stroke="black" points="1478.86,-234.61 1489.45,-234.34 1481,-227.95 1478.86,-234.61"/>
+</g>
+<!-- n0000009f&#45;&gt;n00000069 -->
+<g id="edge27" class="edge">
+<title>n0000009f:port3&#45;&gt;n00000069</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M1264.38,-200.11C1264.38,-200.11 1266.8,-198.46 1270.49,-195.94"/>
+<polygon fill="black" stroke="black" points="1272.56,-198.77 1278.86,-190.24 1268.62,-192.98 1272.56,-198.77"/>
+</g>
+<!-- n0000009f&#45;&gt;n0000006d -->
+<g id="edge28" class="edge">
+<title>n0000009f:port4&#45;&gt;n0000006d</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M1290.47,-200.09C1290.47,-200.09 1203.71,-189.63 1196.18,-196.91 1167.29,-224.84 1172.63,-254.76 1196.18,-287.31 1198.16,-290.05 1207.33,-293.96 1219.38,-298.13"/>
+<polygon fill="black" stroke="black" points="1218.49,-301.53 1229.09,-301.36 1220.7,-294.88 1218.49,-301.53"/>
+</g>
+<!-- n0000009f&#45;&gt;n00000071 -->
+<g id="edge29" class="edge">
+<title>n0000009f:port5&#45;&gt;n00000071</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M1316.28,-200.09C1316.28,-200.09 1402.13,-190.7 1410.58,-196.91 1486.41,-252.66 1489.83,-378.79 1487.09,-436.2"/>
+<polygon fill="black" stroke="black" points="1483.59,-436.23 1486.51,-446.41 1490.58,-436.63 1483.59,-436.23"/>
+</g>
+<!-- n0000009f&#45;&gt;n00000075 -->
+<g id="edge30" class="edge">
+<title>n0000009f:port6&#45;&gt;n00000075</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M1342.28,-200.11C1342.28,-200.11 1202.85,-192.29 1196.18,-196.91 1121.26,-248.86 1108.3,-368.49 1106.45,-424.12"/>
+<polygon fill="black" stroke="black" points="1102.94,-424.28 1106.21,-434.36 1109.94,-424.44 1102.94,-424.28"/>
+</g>
+<!-- n0000009f&#45;&gt;n00000079 -->
+<g id="edge31" class="edge">
+<title>n0000009f:port7&#45;&gt;n00000079</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M1368.38,-200.11C1368.38,-200.11 1201.84,-195.85 1197.94,-193.16 1149.87,-159.96 1132.85,-88.83 1126.94,-48.6"/>
+<polygon fill="black" stroke="black" points="1130.39,-48.06 1125.6,-38.61 1123.46,-48.98 1130.39,-48.06"/>
+</g>
+<!-- n0000009f&#45;&gt;n0000007d -->
+<g id="edge32" class="edge">
+<title>n0000009f:port8&#45;&gt;n0000007d</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M1394.38,-200.11C1394.38,-200.11 1412.45,-196.49 1416.34,-193.16 1457.25,-158.2 1479.47,-97.38 1489.62,-61.41"/>
+<polygon fill="black" stroke="black" points="1493.06,-62.09 1492.28,-51.53 1486.3,-60.27 1493.06,-62.09"/>
+</g>
+<!-- n000000e9 -->
+<g id="node37" class="node">
+<title>n000000e9</title>
+<path fill="green" stroke="black" d="M398.65,-431.45C398.65,-431.45 511.65,-431.45 511.65,-431.45 517.65,-431.45 523.65,-437.45 523.65,-443.45 523.65,-443.45 523.65,-503.45 523.65,-503.45 523.65,-509.45 517.65,-515.45 511.65,-515.45 511.65,-515.45 398.65,-515.45 398.65,-515.45 392.65,-515.45 386.65,-509.45 386.65,-503.45 386.65,-503.45 386.65,-443.45 386.65,-443.45 386.65,-437.45 392.65,-431.45 398.65,-431.45"/>
+<text text-anchor="middle" x="420.65" y="-500.25" font-family="Times,serif" font-size="14.00">1</text>
+<polyline fill="none" stroke="black" points="454.65,-492.45 454.65,-515.45 "/>
+<text text-anchor="middle" x="489.15" y="-500.25" font-family="Times,serif" font-size="14.00">2</text>
+<polyline fill="none" stroke="black" points="386.65,-492.45 523.65,-492.45 "/>
+<text text-anchor="middle" x="455.15" y="-477.25" font-family="Times,serif" font-size="14.00">ov2740 4&#45;0036</text>
+<text text-anchor="middle" x="455.15" y="-462.25" font-family="Times,serif" font-size="14.00">/dev/v4l&#45;subdev4</text>
+<polyline fill="none" stroke="black" points="386.65,-454.45 523.65,-454.45 "/>
+<text text-anchor="middle" x="455.15" y="-439.25" font-family="Times,serif" font-size="14.00">0</text>
+</g>
+<!-- n000000e9&#45;&gt;n0000008b -->
+<g id="edge33" class="edge">
+<title>n000000e9:port0&#45;&gt;n0000008b:port0</title>
+<path fill="none" stroke="black" stroke-width="2" d="M386.14,-442.55C386.14,-442.55 361.11,-493.23 383.45,-518.65 391.47,-527.78 484.31,-519.72 492.3,-528.88 508.64,-547.6 499.26,-579.87 493.12,-595.68"/>
+<polygon fill="black" stroke="black" stroke-width="2" points="489.86,-594.41 489.11,-604.98 496.29,-597.19 489.86,-594.41"/>
+</g>
+</g>
+</svg>
diff --git a/Documentation/admin-guide/media/mgb4.rst b/Documentation/admin-guide/media/mgb4.rst
index 2977f74d7e26..e434d4a9eeb3 100644
--- a/Documentation/admin-guide/media/mgb4.rst
+++ b/Documentation/admin-guide/media/mgb4.rst
@@ -1,8 +1,10 @@
.. SPDX-License-Identifier: GPL-2.0
-====================
-mgb4 sysfs interface
-====================
+The mgb4 driver
+===============
+
+sysfs interface
+---------------
The mgb4 driver provides a sysfs interface, that is used to configure video
stream related parameters (some of them must be set properly before the v4l2
@@ -12,9 +14,8 @@ There are two types of parameters - global / PCI card related, found under
``/sys/class/video4linux/videoX/device`` and module specific found under
``/sys/class/video4linux/videoX``.
-
Global (PCI card) parameters
-============================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**module_type** (R):
Module type.
@@ -42,9 +43,8 @@ Global (PCI card) parameters
where each component is a 8b number.
-
Common FPDL3/GMSL input parameters
-==================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**input_id** (R):
Input number ID, zero based.
@@ -190,9 +190,8 @@ Common FPDL3/GMSL input parameters
*Note: This parameter can not be changed while the input v4l2 device is
open.*
-
Common FPDL3/GMSL output parameters
-===================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**output_id** (R):
Output number ID, zero based.
@@ -282,9 +281,8 @@ Common FPDL3/GMSL output parameters
Number of video lines between the end of the last valid pixel line (marked
by DE=1) and assertion of the VSYNC signal. The default value is 2.
-
FPDL3 specific input parameters
-===============================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**fpdl3_input_width** (RW):
Number of deserializer input lines.
@@ -294,7 +292,7 @@ FPDL3 specific input parameters
| 2 - dual
FPDL3 specific output parameters
-================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**fpdl3_output_width** (RW):
Number of serializer output lines.
@@ -304,7 +302,7 @@ FPDL3 specific output parameters
| 2 - dual
GMSL specific input parameters
-==============================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**gmsl_mode** (RW):
GMSL speed mode.
@@ -328,10 +326,8 @@ GMSL specific input parameters
| 0 - disabled
| 1 - enabled (default)
-
-====================
-mgb4 mtd partitions
-====================
+MTD partitions
+--------------
The mgb4 driver creates a MTD device with two partitions:
- mgb4-fw.X - FPGA firmware.
@@ -344,9 +340,8 @@ also have a third partition named *mgb4-flash* available in the system. This
partition represents the whole, unpartitioned, card's FLASH memory and one should
not fiddle with it...
-====================
-mgb4 iio (triggers)
-====================
+IIO (triggers)
+--------------
The mgb4 driver creates an Industrial I/O (IIO) device that provides trigger and
signal level status capability. The following scan elements are available:
diff --git a/Documentation/admin-guide/media/v4l-drivers.rst b/Documentation/admin-guide/media/v4l-drivers.rst
index f4bb2605f07e..4120eded9a13 100644
--- a/Documentation/admin-guide/media/v4l-drivers.rst
+++ b/Documentation/admin-guide/media/v4l-drivers.rst
@@ -16,6 +16,7 @@ Video4Linux (V4L) driver-specific documentation
imx
imx7
ipu3
+ ipu6-isys
ivtv
mgb4
omap3isp
diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst
index 6fce035fdbf5..e58ceb89ea2a 100644
--- a/Documentation/admin-guide/mm/damon/usage.rst
+++ b/Documentation/admin-guide/mm/damon/usage.rst
@@ -153,7 +153,7 @@ Users can write below commands for the kdamond to the ``state`` file.
- ``clear_schemes_tried_regions``: Clear the DAMON-based operating scheme
action tried regions directory for each DAMON-based operation scheme of the
kdamond.
-- ``update_schemes_effective_bytes``: Update the contents of
+- ``update_schemes_effective_quotas``: Update the contents of
``effective_bytes`` files for each DAMON-based operation scheme of the
kdamond. For more details, refer to :ref:`quotas directory <sysfs_quotas>`.
@@ -342,7 +342,7 @@ Based on the user-specified :ref:`goal <sysfs_schemes_quota_goals>`, the
effective size quota is further adjusted. Reading ``effective_bytes`` returns
the current effective size quota. The file is not updated in real time, so
users should ask DAMON sysfs interface to update the content of the file for
-the stats by writing a special keyword, ``update_schemes_effective_bytes`` to
+the stats by writing a special keyword, ``update_schemes_effective_quotas`` to
the relevant ``kdamonds/<N>/state`` file.
Under ``weights`` directory, three files (``sz_permil``,
@@ -410,19 +410,19 @@ in the numeric order.
Each filter directory contains six files, namely ``type``, ``matcing``,
``memcg_path``, ``addr_start``, ``addr_end``, and ``target_idx``. To ``type``
-file, you can write one of four special keywords: ``anon`` for anonymous pages,
-``memcg`` for specific memory cgroup, ``addr`` for specific address range (an
-open-ended interval), or ``target`` for specific DAMON monitoring target
-filtering. In case of the memory cgroup filtering, you can specify the memory
-cgroup of the interest by writing the path of the memory cgroup from the
-cgroups mount point to ``memcg_path`` file. In case of the address range
-filtering, you can specify the start and end address of the range to
-``addr_start`` and ``addr_end`` files, respectively. For the DAMON monitoring
-target filtering, you can specify the index of the target between the list of
-the DAMON context's monitoring targets list to ``target_idx`` file. You can
-write ``Y`` or ``N`` to ``matching`` file to filter out pages that does or does
-not match to the type, respectively. Then, the scheme's action will not be
-applied to the pages that specified to be filtered out.
+file, you can write one of five special keywords: ``anon`` for anonymous pages,
+``memcg`` for specific memory cgroup, ``young`` for young pages, ``addr`` for
+specific address range (an open-ended interval), or ``target`` for specific
+DAMON monitoring target filtering. In case of the memory cgroup filtering, you
+can specify the memory cgroup of the interest by writing the path of the memory
+cgroup from the cgroups mount point to ``memcg_path`` file. In case of the
+address range filtering, you can specify the start and end address of the range
+to ``addr_start`` and ``addr_end`` files, respectively. For the DAMON
+monitoring target filtering, you can specify the index of the target between
+the list of the DAMON context's monitoring targets list to ``target_idx`` file.
+You can write ``Y`` or ``N`` to ``matching`` file to filter out pages that does
+or does not match to the type, respectively. Then, the scheme's action will
+not be applied to the pages that specified to be filtered out.
For example, below restricts a DAMOS action to be applied to only non-anonymous
pages of all memory cgroups except ``/having_care_already``.::
@@ -434,7 +434,7 @@ pages of all memory cgroups except ``/having_care_already``.::
# # further filter out all cgroups except one at '/having_care_already'
echo memcg > 1/type
echo /having_care_already > 1/memcg_path
- echo N > 1/matching
+ echo Y > 1/matching
Note that ``anon`` and ``memcg`` filters are currently supported only when
``paddr`` :ref:`implementation <sysfs_context>` is being used.
diff --git a/Documentation/admin-guide/mm/hugetlbpage.rst b/Documentation/admin-guide/mm/hugetlbpage.rst
index e4d4b4a8dc97..f34a0d798d5b 100644
--- a/Documentation/admin-guide/mm/hugetlbpage.rst
+++ b/Documentation/admin-guide/mm/hugetlbpage.rst
@@ -376,6 +376,13 @@ Note that the number of overcommit and reserve pages remain global quantities,
as we don't know until fault time, when the faulting task's mempolicy is
applied, from which node the huge page allocation will be attempted.
+The hugetlb may be migrated between the per-node hugepages pool in the following
+scenarios: memory offline, memory failure, longterm pinning, syscalls(mbind,
+migrate_pages and move_pages), alloc_contig_range() and alloc_contig_pages().
+Now only memory offline, memory failure and syscalls allow fallbacking to allocate
+a new hugetlb on a different node if the current node is unable to allocate during
+hugetlb migration, that means these 3 cases can break the per-node hugepages pool.
+
.. _using_huge_pages:
Using Huge Pages
diff --git a/Documentation/admin-guide/mm/ksm.rst b/Documentation/admin-guide/mm/ksm.rst
index a639cac12477..ad8e7a41f3b5 100644
--- a/Documentation/admin-guide/mm/ksm.rst
+++ b/Documentation/admin-guide/mm/ksm.rst
@@ -308,7 +308,7 @@ limited by the ``advisor_max_cpu`` parameter. In addition there is also the
``advisor_target_scan_time`` parameter. This parameter sets the target time to
scan all the KSM candidate pages. The parameter ``advisor_target_scan_time``
decides how aggressive the scan time advisor scans candidate pages. Lower
-values make the scan time advisor to scan more aggresively. This is the most
+values make the scan time advisor to scan more aggressively. This is the most
important parameter for the configuration of the scan time advisor.
The initial value and the maximum value can be changed with
diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
index 04eb45a2f940..076443cc10a6 100644
--- a/Documentation/admin-guide/mm/transhuge.rst
+++ b/Documentation/admin-guide/mm/transhuge.rst
@@ -278,7 +278,8 @@ collapsed, resulting fewer pages being collapsed into
THPs, and lower memory access performance.
``max_ptes_shared`` specifies how many pages can be shared across multiple
-processes. Exceeding the number would block the collapse::
+processes. khugepaged might treat pages of THPs as shared if any page of
+that THP is shared. Exceeding the number would block the collapse::
/sys/kernel/mm/transparent_hugepage/khugepaged/max_ptes_shared
@@ -369,7 +370,7 @@ monitor how successfully the system is providing huge pages for use.
thp_fault_alloc
is incremented every time a huge page is successfully
- allocated to handle a page fault.
+ allocated and charged to handle a page fault.
thp_collapse_alloc
is incremented by khugepaged when it has found
@@ -377,7 +378,7 @@ thp_collapse_alloc
successfully allocated a new huge page to store the data.
thp_fault_fallback
- is incremented if a page fault fails to allocate
+ is incremented if a page fault fails to allocate or charge
a huge page and instead falls back to using small pages.
thp_fault_fallback_charge
@@ -447,6 +448,34 @@ thp_swpout_fallback
Usually because failed to allocate some continuous swap space
for the huge page.
+In /sys/kernel/mm/transparent_hugepage/hugepages-<size>kB/stats, There are
+also individual counters for each huge page size, which can be utilized to
+monitor the system's effectiveness in providing huge pages for usage. Each
+counter has its own corresponding file.
+
+anon_fault_alloc
+ is incremented every time a huge page is successfully
+ allocated and charged to handle a page fault.
+
+anon_fault_fallback
+ is incremented if a page fault fails to allocate or charge
+ a huge page and instead falls back to using huge pages with
+ lower orders or small pages.
+
+anon_fault_fallback_charge
+ is incremented if a page fault fails to charge a huge page and
+ instead falls back to using huge pages with lower orders or
+ small pages even though the allocation was successful.
+
+anon_swpout
+ is incremented every time a huge page is swapped out in one
+ piece without splitting.
+
+anon_swpout_fallback
+ is incremented if a huge page has to be split before swapout.
+ Usually because failed to allocate some continuous swap space
+ for the huge page.
+
As the system ages, allocating huge pages may be expensive as the
system uses memory compaction to copy data around memory to free a
huge page for use. There are some counters in ``/proc/vmstat`` to help
diff --git a/Documentation/admin-guide/mm/zswap.rst b/Documentation/admin-guide/mm/zswap.rst
index b42132969e31..3598dcd7dbe7 100644
--- a/Documentation/admin-guide/mm/zswap.rst
+++ b/Documentation/admin-guide/mm/zswap.rst
@@ -111,35 +111,6 @@ checked if it is a same-value filled page before compressing it. If true, the
compressed length of the page is set to zero and the pattern or same-filled
value is stored.
-Same-value filled pages identification feature is enabled by default and can be
-disabled at boot time by setting the ``same_filled_pages_enabled`` attribute
-to 0, e.g. ``zswap.same_filled_pages_enabled=0``. It can also be enabled and
-disabled at runtime using the sysfs ``same_filled_pages_enabled``
-attribute, e.g.::
-
- echo 1 > /sys/module/zswap/parameters/same_filled_pages_enabled
-
-When zswap same-filled page identification is disabled at runtime, it will stop
-checking for the same-value filled pages during store operation.
-In other words, every page will be then considered non-same-value filled.
-However, the existing pages which are marked as same-value filled pages remain
-stored unchanged in zswap until they are either loaded or invalidated.
-
-In some circumstances it might be advantageous to make use of just the zswap
-ability to efficiently store same-filled pages without enabling the whole
-compressed page storage.
-In this case the handling of non-same-value pages by zswap (enabled by default)
-can be disabled by setting the ``non_same_filled_pages_enabled`` attribute
-to 0, e.g. ``zswap.non_same_filled_pages_enabled=0``.
-It can also be enabled and disabled at runtime using the sysfs
-``non_same_filled_pages_enabled`` attribute, e.g.::
-
- echo 1 > /sys/module/zswap/parameters/non_same_filled_pages_enabled
-
-Disabling both ``zswap.same_filled_pages_enabled`` and
-``zswap.non_same_filled_pages_enabled`` effectively disables accepting any new
-pages by zswap.
-
To prevent zswap from shrinking pool when zswap is full and there's a high
pressure on swap (this will result in flipping pages in and out zswap pool
without any real benefit but with a performance drop for the system), a
@@ -155,7 +126,7 @@ Setting this parameter to 100 will disable the hysteresis.
Some users cannot tolerate the swapping that comes with zswap store failures
and zswap writebacks. Swapping can be disabled entirely (without disabling
-zswap itself) on a cgroup-basis as follows:
+zswap itself) on a cgroup-basis as follows::
echo 0 > /sys/fs/cgroup/<cgroup-name>/memory.zswap.writeback
@@ -166,7 +137,7 @@ writeback (because the same pages might be rejected again and again).
When there is a sizable amount of cold memory residing in the zswap pool, it
can be advantageous to proactively write these cold pages to swap and reclaim
the memory for other use cases. By default, the zswap shrinker is disabled.
-User can enable it as follows:
+User can enable it as follows::
echo Y > /sys/module/zswap/parameters/shrinker_enabled
diff --git a/Documentation/admin-guide/perf/hisi-pmu.rst b/Documentation/admin-guide/perf/hisi-pmu.rst
index e0174d20809a..5cc248d18c63 100644
--- a/Documentation/admin-guide/perf/hisi-pmu.rst
+++ b/Documentation/admin-guide/perf/hisi-pmu.rst
@@ -20,7 +20,6 @@ interrupt, and the PMU driver shall register perf PMU drivers like L3C,
HHA and DDRC etc. The available events and configuration options shall
be described in the sysfs, see:
-/sys/devices/hisi_sccl{X}_<l3c{Y}/hha{Y}/ddrc{Y}>/, or
/sys/bus/event_source/devices/hisi_sccl{X}_<l3c{Y}/hha{Y}/ddrc{Y}>.
The "perf list" command shall list the available events from sysfs.
diff --git a/Documentation/admin-guide/perf/hns3-pmu.rst b/Documentation/admin-guide/perf/hns3-pmu.rst
index 75a40846d47f..1195e570f2d6 100644
--- a/Documentation/admin-guide/perf/hns3-pmu.rst
+++ b/Documentation/admin-guide/perf/hns3-pmu.rst
@@ -16,7 +16,7 @@ HNS3 PMU driver
The HNS3 PMU driver registers a perf PMU with the name of its sicl id.::
- /sys/devices/hns3_pmu_sicl_<sicl_id>
+ /sys/bus/event_source/devices/hns3_pmu_sicl_<sicl_id>
PMU driver provides description of available events, filter modes, format,
identifier and cpumask in sysfs.
@@ -40,9 +40,9 @@ device.
Example usage of checking event code and subevent code::
- $# cat /sys/devices/hns3_pmu_sicl_0/events/dly_tx_normal_to_mac_time
+ $# cat /sys/bus/event_source/devices/hns3_pmu_sicl_0/events/dly_tx_normal_to_mac_time
config=0x00204
- $# cat /sys/devices/hns3_pmu_sicl_0/events/dly_tx_normal_to_mac_packet_num
+ $# cat /sys/bus/event_source/devices/hns3_pmu_sicl_0/events/dly_tx_normal_to_mac_packet_num
config=0x10204
Each performance statistic has a pair of events to get two values to
@@ -60,7 +60,7 @@ computation to calculate real performance data is:::
Example usage of checking supported filter mode::
- $# cat /sys/devices/hns3_pmu_sicl_0/filtermode/bw_ssu_rpu_byte_num
+ $# cat /sys/bus/event_source/devices/hns3_pmu_sicl_0/filtermode/bw_ssu_rpu_byte_num
filter mode supported: global/port/port-tc/func/func-queue/
Example usage of perf::
diff --git a/Documentation/admin-guide/perf/qcom_l2_pmu.rst b/Documentation/admin-guide/perf/qcom_l2_pmu.rst
index c130178a4a55..c37c6be9b8d8 100644
--- a/Documentation/admin-guide/perf/qcom_l2_pmu.rst
+++ b/Documentation/admin-guide/perf/qcom_l2_pmu.rst
@@ -10,7 +10,7 @@ There is one logical L2 PMU exposed, which aggregates the results from
the physical PMUs.
The driver provides a description of its available events and configuration
-options in sysfs, see /sys/devices/l2cache_0.
+options in sysfs, see /sys/bus/event_source/devices/l2cache_0.
The "format" directory describes the format of the events.
diff --git a/Documentation/admin-guide/perf/qcom_l3_pmu.rst b/Documentation/admin-guide/perf/qcom_l3_pmu.rst
index a3d014a46bfd..a66556b7e985 100644
--- a/Documentation/admin-guide/perf/qcom_l3_pmu.rst
+++ b/Documentation/admin-guide/perf/qcom_l3_pmu.rst
@@ -9,7 +9,7 @@ PMU with device name l3cache_<socket>_<instance>. User space is responsible
for aggregating across slices.
The driver provides a description of its available events and configuration
-options in sysfs, see /sys/devices/l3cache*. Given that these are uncore PMUs
+options in sysfs, see /sys/bus/event_source/devices/l3cache*. Given that these are uncore PMUs
the driver also exposes a "cpumask" sysfs attribute which contains a mask
consisting of one CPU per socket which will be used to handle all the PMU
events on that socket.
diff --git a/Documentation/admin-guide/perf/thunderx2-pmu.rst b/Documentation/admin-guide/perf/thunderx2-pmu.rst
index 01f158238ae1..9255f7bf9452 100644
--- a/Documentation/admin-guide/perf/thunderx2-pmu.rst
+++ b/Documentation/admin-guide/perf/thunderx2-pmu.rst
@@ -22,7 +22,7 @@ The thunderx2_pmu driver registers per-socket perf PMUs for the DMC and
L3C devices. Each PMU can be used to count up to 4 (DMC/L3C) or up to 8
(CCPI2) events simultaneously. The PMUs provide a description of their
available events and configuration options under sysfs, see
-/sys/devices/uncore_<l3c_S/dmc_S/ccpi2_S/>; S is the socket id.
+/sys/bus/event_source/devices/uncore_<l3c_S/dmc_S/ccpi2_S/>; S is the socket id.
The driver does not support sampling, therefore "perf record" will not
work. Per-task perf sessions are also not supported.
diff --git a/Documentation/admin-guide/perf/xgene-pmu.rst b/Documentation/admin-guide/perf/xgene-pmu.rst
index 644f8ed89152..98ccb8e777c4 100644
--- a/Documentation/admin-guide/perf/xgene-pmu.rst
+++ b/Documentation/admin-guide/perf/xgene-pmu.rst
@@ -13,7 +13,7 @@ PMU (perf) driver
The xgene-pmu driver registers several perf PMU drivers. Each of the perf
driver provides description of its available events and configuration options
-in sysfs, see /sys/devices/<l3cX/iobX/mcbX/mcX>/.
+in sysfs, see /sys/bus/event_source/devices/<l3cX/iobX/mcbX/mcX>/.
The "format" directory describes format of the config (event ID),
config1 (agent ID) fields of the perf_event_attr structure. The "events"
diff --git a/Documentation/admin-guide/reporting-regressions.rst b/Documentation/admin-guide/reporting-regressions.rst
index 76b246ecf21b..946518355a2c 100644
--- a/Documentation/admin-guide/reporting-regressions.rst
+++ b/Documentation/admin-guide/reporting-regressions.rst
@@ -42,12 +42,12 @@ The important basics
--------------------
-What is a "regression" and what is the "no regressions rule"?
+What is a "regression" and what is the "no regressions" rule?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It's a regression if some application or practical use case running fine with
one Linux kernel works worse or not at all with a newer version compiled using a
-similar configuration. The "no regressions rule" forbids this to take place; if
+similar configuration. The "no regressions" rule forbids this to take place; if
it happens by accident, developers that caused it are expected to quickly fix
the issue.
@@ -173,7 +173,7 @@ Additional details about regressions
------------------------------------
-What is the goal of the "no regressions rule"?
+What is the goal of the "no regressions" rule?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Users should feel safe when updating kernel versions and not have to worry
@@ -199,8 +199,8 @@ Exceptions to this rule are extremely rare; in the past developers almost always
turned out to be wrong when they assumed a particular situation was warranting
an exception.
-Who ensures the "no regressions" is actually followed?
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Who ensures the "no regressions" rule is actually followed?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The subsystem maintainers should take care of that, which are watched and
supported by the tree maintainers -- e.g. Linus Torvalds for mainline and
diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst
index 7250c0542828..7b0c4291c686 100644
--- a/Documentation/admin-guide/sysctl/net.rst
+++ b/Documentation/admin-guide/sysctl/net.rst
@@ -72,6 +72,7 @@ two flavors of JITs, the newer eBPF JIT currently supported on:
- riscv64
- riscv32
- loongarch64
+ - arc
And the older cBPF JIT supported on the following archs:
diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst
index c59889de122b..e86c968a7a0e 100644
--- a/Documentation/admin-guide/sysctl/vm.rst
+++ b/Documentation/admin-guide/sysctl/vm.rst
@@ -43,6 +43,7 @@ Currently, these files are in /proc/sys/vm:
- legacy_va_layout
- lowmem_reserve_ratio
- max_map_count
+- mem_profiling (only if CONFIG_MEM_ALLOC_PROFILING=y)
- memory_failure_early_kill
- memory_failure_recovery
- min_free_kbytes
@@ -425,6 +426,21 @@ e.g., up to one or two maps per allocation.
The default value is 65530.
+mem_profiling
+==============
+
+Enable memory profiling (when CONFIG_MEM_ALLOC_PROFILING=y)
+
+1: Enable memory profiling.
+
+0: Disable memory profiling.
+
+Enabling memory profiling introduces a small performance overhead for all
+memory allocations.
+
+The default value depends on CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT.
+
+
memory_failure_early_kill:
==========================
diff --git a/Documentation/admin-guide/verify-bugs-and-bisect-regressions.rst b/Documentation/admin-guide/verify-bugs-and-bisect-regressions.rst
index d3504826f401..c389d4fd7599 100644
--- a/Documentation/admin-guide/verify-bugs-and-bisect-regressions.rst
+++ b/Documentation/admin-guide/verify-bugs-and-bisect-regressions.rst
@@ -29,7 +29,7 @@ The essence of the process (aka 'TL;DR')
========================================
*[If you are new to building or bisecting Linux, ignore this section and head
-over to the* ":ref:`step-by-step guide<introguide_bissbs>`" *below. It utilizes
+over to the* ':ref:`step-by-step guide <introguide_bissbs>`' *below. It utilizes
the same commands as this section while describing them in brief fashion. The
steps are nevertheless easy to follow and together with accompanying entries
in a reference section mention many alternatives, pitfalls, and additional
@@ -38,8 +38,8 @@ aspects, all of which might be essential in your present case.]*
**In case you want to check if a bug is present in code currently supported by
developers**, execute just the *preparations* and *segment 1*; while doing so,
consider the newest Linux kernel you regularly use to be the 'working' kernel.
-In the following example that's assumed to be 6.0.13, which is why the sources
-of 6.0 will be used to prepare the .config file.
+In the following example that's assumed to be 6.0, which is why its sources
+will be used to prepare the .config file.
**In case you face a regression**, follow the steps at least till the end of
*segment 2*. Then you can submit a preliminary report -- or continue with
@@ -61,7 +61,7 @@ will be considered the 'good' release and used to prepare the .config file.
cd ~/linux/
git remote add -t master stable \
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
- git checkout --detach v6.0
+ git switch --detach v6.0
# * Hint: if you used an existing clone, ensure no stale .config is around.
make olddefconfig
# * Ensure the former command picked the .config of the 'working' kernel.
@@ -87,7 +87,7 @@ will be considered the 'good' release and used to prepare the .config file.
a) Checking out latest mainline code::
cd ~/linux/
- git checkout --force --detach mainline/master
+ git switch --discard-changes --detach mainline/master
b) Build, install, and boot a kernel::
@@ -125,7 +125,7 @@ will be considered the 'good' release and used to prepare the .config file.
a) Start by checking out the sources of the 'good' version::
cd ~/linux/
- git checkout --force --detach v6.0
+ git switch --discard-changes --detach v6.0
b) Build, install, and boot a kernel as described earlier in *segment 1,
section b* -- just feel free to skip the 'du' commands, as you have a rough
@@ -136,8 +136,7 @@ will be considered the 'good' release and used to prepare the .config file.
* **Segment 3**: perform and validate the bisection.
- a) In case your 'broken' version is a stable/longterm release, add the Git
- branch holding it::
+ a) Retrieve the sources for your 'bad' version::
git remote set-branches --add stable linux-6.1.y
git fetch stable
@@ -157,11 +156,12 @@ will be considered the 'good' release and used to prepare the .config file.
works with the newly built kernel. If it does, tell Git by executing
``git bisect good``; if it does not, run ``git bisect bad`` instead.
- All three commands will make Git checkout another commit; then re-execute
+ All three commands will make Git check out another commit; then re-execute
this step (e.g. build, install, boot, and test a kernel to then tell Git
the outcome). Do so again and again until Git shows which commit broke
things. If you run short of disk space during this process, check the
- "Supplementary tasks" section below.
+ section 'Complementary tasks: cleanup during and after the process'
+ below.
d) Once your finished the bisection, put a few things away::
@@ -172,14 +172,17 @@ will be considered the 'good' release and used to prepare the .config file.
e) Try to verify the bisection result::
- git checkout --force --detach mainline/master
+ git switch --discard-changes --detach mainline/master
git revert --no-edit cafec0cacaca0
+ cp ~/kernel-config-working .config
+ ./scripts/config --set-str CONFIG_LOCALVERSION '-local-cafec0cacaca0-reverted'
This is optional, as some commits are impossible to revert. But if the
second command worked flawlessly, build, install, and boot one more kernel
- kernel, which should not show the regression.
+ kernel; just this time skip the first command copying the base .config file
+ over, as that already has been taken care off.
-* **Supplementary tasks**: cleanup during and after the process.
+* **Complementary tasks**: cleanup during and after the process.
a) To avoid running out of disk space during a bisection, you might need to
remove some kernels you built earlier. You most likely want to keep those
@@ -202,13 +205,25 @@ will be considered the 'good' release and used to prepare the .config file.
the kernels you built earlier and later you might want to keep around for
a week or two.
+* **Optional task**: test a debug patch or a proposed fix later::
+
+ git fetch mainline
+ git switch --discard-changes --detach mainline/master
+ git apply /tmp/foobars-proposed-fix-v1.patch
+ cp ~/kernel-config-working .config
+ ./scripts/config --set-str CONFIG_LOCALVERSION '-local-foobars-fix-v1'
+
+ Build, install, and boot a kernel as described in *segment 1, section b* --
+ but this time omit the first command copying the build configuration over,
+ as that has been taken care of already.
+
.. _introguide_bissbs:
Step-by-step guide on how to verify bugs and bisect regressions
===============================================================
This guide describes how to set up your own Linux kernels for investigating bugs
-or regressions you intent to report. How far you want to follow the instructions
+or regressions you intend to report. How far you want to follow the instructions
depends on your issue:
Execute all steps till the end of *segment 1* to **verify if your kernel problem
@@ -221,15 +236,17 @@ report; instead of the latter your could also head straight on and follow
*segment 3* to **perform a bisection** for a full-fledged regression report
developers are obliged to act upon.
- :ref:`Preparations: set up everything to build your own kernels.<introprep_bissbs>`
+ :ref:`Preparations: set up everything to build your own kernels <introprep_bissbs>`.
- :ref:`Segment 1: try to reproduce the problem with the latest codebase.<introlatestcheck_bissbs>`
+ :ref:`Segment 1: try to reproduce the problem with the latest codebase <introlatestcheck_bissbs>`.
- :ref:`Segment 2: check if the kernels you build work fine.<introworkingcheck_bissbs>`
+ :ref:`Segment 2: check if the kernels you build work fine <introworkingcheck_bissbs>`.
- :ref:`Segment 3: perform a bisection and validate the result.<introbisect_bissbs>`
+ :ref:`Segment 3: perform a bisection and validate the result <introbisect_bissbs>`.
- :ref:`Supplementary tasks: cleanup during and after following this guide.<introclosure_bissbs>`
+ :ref:`Complementary tasks: cleanup during and after following this guide <introclosure_bissbs>`.
+
+ :ref:`Optional tasks: test reverts, patches, or later versions <introoptional_bissbs>`.
The steps in each segment illustrate the important aspects of the process, while
a comprehensive reference section holds additional details for almost all of the
@@ -240,24 +257,35 @@ to get things rolling again.
For further details on how to report Linux kernel issues or regressions check
out Documentation/admin-guide/reporting-issues.rst, which works in conjunction
with this document. It among others explains why you need to verify bugs with
-the latest 'mainline' kernel, even if you face a problem with a kernel from a
-'stable/longterm' series; for users facing a regression it also explains that
-sending a preliminary report after finishing segment 2 might be wise, as the
-regression and its culprit might be known already. For further details on
-what actually qualifies as a regression check out
-Documentation/admin-guide/reporting-regressions.rst.
+the latest 'mainline' kernel (e.g. versions like 6.0, 6.1-rc1, or 6.1-rc6),
+even if you face a problem with a kernel from a 'stable/longterm' series
+(say 6.0.13).
+
+For users facing a regression that document also explains why sending a
+preliminary report after segment 2 might be wise, as the regression and its
+culprit might be known already. For further details on what actually qualifies
+as a regression check out Documentation/admin-guide/reporting-regressions.rst.
+
+If you run into any problems while following this guide or have ideas how to
+improve it, :ref:`please let the kernel developers know <submit_improvements>`.
.. _introprep_bissbs:
Preparations: set up everything to build your own kernels
---------------------------------------------------------
+The following steps lay the groundwork for all further tasks.
+
+Note: the instructions assume you are building and testing on the same
+machine; if you want to compile the kernel on another system, check
+:ref:`Build kernels on a different machine <buildhost_bis>` below.
+
.. _backup_bissbs:
* Create a fresh backup and put system repair and restore tools at hand, just
to be prepared for the unlikely case of something going sideways.
- [:ref:`details<backup_bisref>`]
+ [:ref:`details <backup_bisref>`]
.. _vanilla_bissbs:
@@ -265,7 +293,7 @@ Preparations: set up everything to build your own kernels
builds them automatically. That includes but is not limited to DKMS, openZFS,
VirtualBox, and Nvidia's graphics drivers (including the GPLed kernel module).
- [:ref:`details<vanilla_bisref>`]
+ [:ref:`details <vanilla_bisref>`]
.. _secureboot_bissbs:
@@ -276,48 +304,49 @@ Preparations: set up everything to build your own kernels
their restrictions through a process initiated by
``mokutil --disable-validation``.
- [:ref:`details<secureboot_bisref>`]
+ [:ref:`details <secureboot_bisref>`]
.. _rangecheck_bissbs:
* Determine the kernel versions considered 'good' and 'bad' throughout this
- guide.
+ guide:
- Do you follow this guide to verify if a bug is present in the code developers
- care for? Then consider the mainline release your 'working' kernel (the newest
- one you regularly use) is based on to be the 'good' version; if your 'working'
- kernel for example is 6.0.11, then your 'good' kernel is 6.0.
+ * Do you follow this guide to verify if a bug is present in the code the
+ primary developers care for? Then consider the version of the newest kernel
+ you regularly use currently as 'good' (e.g. 6.0, 6.0.13, or 6.1-rc2).
- In case you face a regression, it depends on the version range where the
- regression was introduced:
+ * Do you face a regression, e.g. something broke or works worse after
+ switching to a newer kernel version? In that case it depends on the version
+ range during which the problem appeared:
- * Something which used to work in Linux 6.0 broke when switching to Linux
- 6.1-rc1? Then henceforth regard 6.0 as the last known 'good' version
- and 6.1-rc1 as the first 'bad' one.
+ * Something regressed when updating from a stable/longterm release
+ (say 6.0.13) to a newer mainline series (like 6.1-rc7 or 6.1) or a
+ stable/longterm version based on one (say 6.1.5)? Then consider the
+ mainline release your working kernel is based on to be the 'good'
+ version (e.g. 6.0) and the first version to be broken as the 'bad' one
+ (e.g. 6.1-rc7, 6.1, or 6.1.5). Note, at this point it is merely assumed
+ that 6.0 is fine; this hypothesis will be checked in segment 2.
- * Some function stopped working when updating from 6.0.11 to 6.1.4? Then for
- the time being consider 6.0 as the last 'good' version and 6.1.4 as
- the 'bad' one. Note, at this point it is merely assumed that 6.0 is fine;
- this assumption will be checked in segment 2.
+ * Something regressed when switching from one mainline version (say 6.0) to
+ a later one (like 6.1-rc1) or a stable/longterm release based on it
+ (say 6.1.5)? Then regard the last working version (e.g. 6.0) as 'good' and
+ the first broken (e.g. 6.1-rc1 or 6.1.5) as 'bad'.
- * A feature you used in 6.0.11 does not work at all or worse in 6.1.13? In
- that case you want to bisect within a stable/longterm series: consider
- 6.0.11 as the last known 'good' version and 6.0.13 as the first 'bad'
- one. Note, in this case you still want to compile and test a mainline kernel
- as explained in segment 1: the outcome will determine if you need to report
- your issue to the regular developers or the stable team.
+ * Something regressed when updating within a stable/longterm series (say
+ from 6.0.13 to 6.0.15)? Then consider those versions as 'good' and 'bad'
+ (e.g. 6.0.13 and 6.0.15), as you need to bisect within that series.
*Note, do not confuse 'good' version with 'working' kernel; the latter term
throughout this guide will refer to the last kernel that has been working
fine.*
- [:ref:`details<rangecheck_bisref>`]
+ [:ref:`details <rangecheck_bisref>`]
.. _bootworking_bissbs:
* Boot into the 'working' kernel and briefly use the apparently broken feature.
- [:ref:`details<bootworking_bisref>`]
+ [:ref:`details <bootworking_bisref>`]
.. _diskspace_bissbs:
@@ -327,7 +356,7 @@ Preparations: set up everything to build your own kernels
debug symbols: both explain approaches reducing the amount of space, which
should allow you to master these tasks with about 4 Gigabytes free space.
- [:ref:`details<diskspace_bisref>`]
+ [:ref:`details <diskspace_bisref>`]
.. _buildrequires_bissbs:
@@ -337,7 +366,7 @@ Preparations: set up everything to build your own kernels
reference section shows how to quickly install those on various popular Linux
distributions.
- [:ref:`details<buildrequires_bisref>`]
+ [:ref:`details <buildrequires_bisref>`]
.. _sources_bissbs:
@@ -360,14 +389,23 @@ Preparations: set up everything to build your own kernels
git remote add -t master stable \
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
- [:ref:`details<sources_bisref>`]
+ [:ref:`details <sources_bisref>`]
+
+.. _stablesources_bissbs:
+
+* Is one of the versions you earlier established as 'good' or 'bad' a stable or
+ longterm release (say 6.1.5)? Then download the code for the series it belongs
+ to ('linux-6.1.y' in this example)::
+
+ git remote set-branches --add stable linux-6.1.y
+ git fetch stable
.. _oldconfig_bissbs:
* Start preparing a kernel build configuration (the '.config' file).
Before doing so, ensure you are still running the 'working' kernel an earlier
- step told you to boot; if you are unsure, check the current kernel release
+ step told you to boot; if you are unsure, check the current kernelrelease
identifier using ``uname -r``.
Afterwards check out the source code for the version earlier established as
@@ -375,7 +413,7 @@ Preparations: set up everything to build your own kernels
the version number in this and all later Git commands needs to be prefixed
with a 'v'::
- git checkout --detach v6.0
+ git switch --discard-changes --detach v6.0
Now create a build configuration file::
@@ -398,7 +436,7 @@ Preparations: set up everything to build your own kernels
'make olddefconfig' again and check if it now picked up the right config file
as base.
- [:ref:`details<oldconfig_bisref>`]
+ [:ref:`details <oldconfig_bisref>`]
.. _localmodconfig_bissbs:
@@ -432,7 +470,7 @@ Preparations: set up everything to build your own kernels
spending much effort on, as long as it boots and allows to properly test the
feature that causes trouble.
- [:ref:`details<localmodconfig_bisref>`]
+ [:ref:`details <localmodconfig_bisref>`]
.. _tagging_bissbs:
@@ -442,7 +480,7 @@ Preparations: set up everything to build your own kernels
./scripts/config --set-str CONFIG_LOCALVERSION '-local'
./scripts/config -e CONFIG_LOCALVERSION_AUTO
- [:ref:`details<tagging_bisref>`]
+ [:ref:`details <tagging_bisref>`]
.. _debugsymbols_bissbs:
@@ -461,7 +499,7 @@ Preparations: set up everything to build your own kernels
./scripts/config -d DEBUG_INFO -d DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT \
-d DEBUG_INFO_DWARF4 -d DEBUG_INFO_DWARF5 -e CONFIG_DEBUG_INFO_NONE
- [:ref:`details<debugsymbols_bisref>`]
+ [:ref:`details <debugsymbols_bisref>`]
.. _configmods_bissbs:
@@ -471,14 +509,14 @@ Preparations: set up everything to build your own kernels
* Are you running Debian? Then you want to avoid known problems by performing
additional adjustments explained in the reference section.
- [:ref:`details<configmods_distros_bisref>`].
+ [:ref:`details <configmods_distros_bisref>`].
* If you want to influence other aspects of the configuration, do so now using
your preferred tool. Note, to use make targets like 'menuconfig' or
'nconfig', you will need to install the development files of ncurses; for
'xconfig' you likewise need the Qt5 or Qt6 headers.
- [:ref:`details<configmods_individual_bisref>`].
+ [:ref:`details <configmods_individual_bisref>`].
.. _saveconfig_bissbs:
@@ -488,7 +526,7 @@ Preparations: set up everything to build your own kernels
make olddefconfig
cp .config ~/kernel-config-working
- [:ref:`details<saveconfig_bisref>`]
+ [:ref:`details <saveconfig_bisref>`]
.. _introlatestcheck_bissbs:
@@ -498,16 +536,30 @@ Segment 1: try to reproduce the problem with the latest codebase
The following steps verify if the problem occurs with the code currently
supported by developers. In case you face a regression, it also checks that the
problem is not caused by some .config change, as reporting the issue then would
-be a waste of time. [:ref:`details<introlatestcheck_bisref>`]
+be a waste of time. [:ref:`details <introlatestcheck_bisref>`]
.. _checkoutmaster_bissbs:
-* Check out the latest Linux codebase::
+* Check out the latest Linux codebase.
- cd ~/linux/
- git checkout --force --detach mainline/master
+ * Are your 'good' and 'bad' versions from the same stable or longterm series?
+ Then check the `front page of kernel.org <https://kernel.org/>`_: if it
+ lists a release from that series without an '[EOL]' tag, checkout the series
+ latest version ('linux-6.1.y' in the following example)::
+
+ cd ~/linux/
+ git switch --discard-changes --detach stable/linux-6.1.y
+
+ Your series is unsupported, if is not listed or carrying a 'end of life'
+ tag. In that case you might want to check if a successor series (say
+ linux-6.2.y) or mainline (see next point) fix the bug.
- [:ref:`details<checkoutmaster_bisref>`]
+ * In all other cases, run::
+
+ cd ~/linux/
+ git switch --discard-changes --detach mainline/master
+
+ [:ref:`details <checkoutmaster_bisref>`]
.. _build_bissbs:
@@ -522,7 +574,7 @@ be a waste of time. [:ref:`details<introlatestcheck_bisref>`]
reference section for alternatives, which obviously will require other
steps to install as well.
- [:ref:`details<build_bisref>`]
+ [:ref:`details <build_bisref>`]
.. _install_bissbs:
@@ -555,7 +607,7 @@ be a waste of time. [:ref:`details<introlatestcheck_bisref>`]
down: if you will build more kernels as described in segment 2 and 3, you will
have to perform those again after executing ``command -v installkernel [...]``.
- [:ref:`details<install_bisref>`]
+ [:ref:`details <install_bisref>`]
.. _storagespace_bissbs:
@@ -568,7 +620,7 @@ be a waste of time. [:ref:`details<introlatestcheck_bisref>`]
Write down or remember those two values for later: they enable you to prevent
running out of disk space accidentally during a bisection.
- [:ref:`details<storagespace_bisref>`]
+ [:ref:`details <storagespace_bisref>`]
.. _kernelrelease_bissbs:
@@ -595,7 +647,7 @@ be a waste of time. [:ref:`details<introlatestcheck_bisref>`]
If that command does not return '0', check the reference section, as the cause
for this might interfere with your testing.
- [:ref:`details<tainted_bisref>`]
+ [:ref:`details <tainted_bisref>`]
.. _recheckbroken_bissbs:
@@ -603,21 +655,19 @@ be a waste of time. [:ref:`details<introlatestcheck_bisref>`]
out the instructions in the reference section to ensure nothing went sideways
during your tests.
- [:ref:`details<recheckbroken_bisref>`]
+ [:ref:`details <recheckbroken_bisref>`]
.. _recheckstablebroken_bissbs:
-* Are you facing a problem within a stable/longterm series, but failed to
- reproduce it with the mainline kernel you just built? One that according to
- the `front page of kernel.org <https://kernel.org/>`_ is still supported? Then
- check if the latest codebase for the particular series might already fix the
- problem. To do so, add the stable series Git branch for your 'good' kernel
- (again, this here is assumed to be 6.0) and check out the latest version::
+* Did you just built a stable or longterm kernel? And were you able to reproduce
+ the regression with it? Then you should test the latest mainline codebase as
+ well, because the result determines which developers the bug must be submitted
+ to.
+
+ To prepare that test, check out current mainline::
cd ~/linux/
- git remote set-branches --add stable linux-6.0.y
- git fetch stable
- git checkout --force --detach linux-6.0.y
+ git switch --discard-changes --detach mainline/master
Now use the checked out code to build and install another kernel using the
commands the earlier steps already described in more detail::
@@ -639,14 +689,16 @@ be a waste of time. [:ref:`details<introlatestcheck_bisref>`]
uname -r
cat /proc/sys/kernel/tainted
- Now verify if this kernel is showing the problem.
+ Now verify if this kernel is showing the problem. If it does, then you need
+ to report the bug to the primary developers; if it does not, report it to the
+ stable team. See Documentation/admin-guide/reporting-issues.rst for details.
- [:ref:`details<recheckstablebroken_bisref>`]
+ [:ref:`details <recheckstablebroken_bisref>`]
Do you follow this guide to verify if a problem is present in the code
currently supported by Linux kernel developers? Then you are done at this
point. If you later want to remove the kernel you just built, check out
-:ref:`Supplementary tasks: cleanup during and after following this guide<introclosure_bissbs>`.
+:ref:`Complementary tasks: cleanup during and after following this guide <introclosure_bissbs>`.
In case you face a regression, move on and execute at least the next segment
as well.
@@ -658,7 +710,7 @@ Segment 2: check if the kernels you build work fine
In case of a regression, you now want to ensure the trimmed configuration file
you created earlier works as expected; a bisection with the .config file
-otherwise would be a waste of time. [:ref:`details<introworkingcheck_bisref>`]
+otherwise would be a waste of time. [:ref:`details <introworkingcheck_bisref>`]
.. _recheckworking_bissbs:
@@ -669,7 +721,7 @@ otherwise would be a waste of time. [:ref:`details<introworkingcheck_bisref>`]
'good' (once again assumed to be 6.0 here)::
cd ~/linux/
- git checkout --detach v6.0
+ git switch --discard-changes --detach v6.0
Now use the checked out code to configure, build, and install another kernel
using the commands the previous subsection explained in more detail::
@@ -693,7 +745,7 @@ otherwise would be a waste of time. [:ref:`details<introworkingcheck_bisref>`]
Now check if this kernel works as expected; if not, consult the reference
section for further instructions.
- [:ref:`details<recheckworking_bisref>`]
+ [:ref:`details <recheckworking_bisref>`]
.. _introbisect_bissbs:
@@ -703,18 +755,11 @@ Segment 3: perform the bisection and validate the result
With all the preparations and precaution builds taken care of, you are now ready
to begin the bisection. This will make you build quite a few kernels -- usually
about 15 in case you encountered a regression when updating to a newer series
-(say from 6.0.11 to 6.1.3). But do not worry, due to the trimmed build
+(say from 6.0.13 to 6.1.5). But do not worry, due to the trimmed build
configuration created earlier this works a lot faster than many people assume:
overall on average it will often just take about 10 to 15 minutes to compile
each kernel on commodity x86 machines.
-* In case your 'bad' version is a stable/longterm release (say 6.1.5), add its
- stable branch, unless you already did so earlier::
-
- cd ~/linux/
- git remote set-branches --add stable linux-6.1.y
- git fetch stable
-
.. _bisectstart_bissbs:
* Start the bisection and tell Git about the versions earlier established as
@@ -725,7 +770,7 @@ each kernel on commodity x86 machines.
git bisect good v6.0
git bisect bad v6.1.5
- [:ref:`details<bisectstart_bisref>`]
+ [:ref:`details <bisectstart_bisref>`]
.. _bisectbuild_bissbs:
@@ -745,7 +790,7 @@ each kernel on commodity x86 machines.
If compilation fails for some reason, run ``git bisect skip`` and restart
executing the stack of commands from the beginning.
- In case you skipped the "test latest codebase" step in the guide, check its
+ In case you skipped the 'test latest codebase' step in the guide, check its
description as for why the 'df [...]' and 'make -s kernelrelease [...]'
commands are here.
@@ -754,7 +799,7 @@ each kernel on commodity x86 machines.
totally normal to see release identifiers like '6.0-rc1-local-gcafec0cacaca0'
if you bisect between versions 6.1 and 6.2 for example.
- [:ref:`details<bisectbuild_bisref>`]
+ [:ref:`details <bisectbuild_bisref>`]
.. _bisecttest_bissbs:
@@ -794,7 +839,7 @@ each kernel on commodity x86 machines.
might need to scroll up to see the message mentioning the culprit;
alternatively, run ``git bisect log > ~/bisection-log``.
- [:ref:`details<bisecttest_bisref>`]
+ [:ref:`details <bisecttest_bisref>`]
.. _bisectlog_bissbs:
@@ -806,7 +851,7 @@ each kernel on commodity x86 machines.
cp .config ~/bisection-config-culprit
git bisect reset
- [:ref:`details<bisectlog_bisref>`]
+ [:ref:`details <bisectlog_bisref>`]
.. _revert_bissbs:
@@ -823,16 +868,16 @@ each kernel on commodity x86 machines.
Begin by checking out the latest codebase depending on the range you bisected:
* Did you face a regression within a stable/longterm series (say between
- 6.0.11 and 6.0.13) that does not happen in mainline? Then check out the
+ 6.0.13 and 6.0.15) that does not happen in mainline? Then check out the
latest codebase for the affected series like this::
git fetch stable
- git checkout --force --detach linux-6.0.y
+ git switch --discard-changes --detach linux-6.0.y
* In all other cases check out latest mainline::
git fetch mainline
- git checkout --force --detach mainline/master
+ git switch --discard-changes --detach mainline/master
If you bisected a regression within a stable/longterm series that also
happens in mainline, there is one more thing to do: look up the mainline
@@ -846,27 +891,33 @@ each kernel on commodity x86 machines.
git revert --no-edit cafec0cacaca0
- If that fails, give up trying and move on to the next step. But if it works,
- build a kernel again using the familiar command sequence::
+ If that fails, give up trying and move on to the next step; if it works,
+ adjust the tag to facilitate the identification and prevent accidentally
+ overwriting another kernel::
cp ~/kernel-config-working .config
+ ./scripts/config --set-str CONFIG_LOCALVERSION '-local-cafec0cacaca0-reverted'
+
+ Build a kernel using the familiar command sequence, just without copying the
+ the base .config over::
+
make olddefconfig &&
- make -j $(nproc --all) &&
+ make -j $(nproc --all)
# * Check if the free space suffices holding another kernel:
df -h /boot/ /lib/modules/
sudo make modules_install
command -v installkernel && sudo make install
- Make -s kernelrelease | tee -a ~/kernels-built
+ make -s kernelrelease | tee -a ~/kernels-built
reboot
- Now check one last time if the feature that made you perform a bisection work
- with that kernel.
+ Now check one last time if the feature that made you perform a bisection works
+ with that kernel: if everything went well, it should not show the regression.
- [:ref:`details<revert_bisref>`]
+ [:ref:`details <revert_bisref>`]
.. _introclosure_bissbs:
-Supplementary tasks: cleanup during and after the bisection
+Complementary tasks: cleanup during and after the bisection
-----------------------------------------------------------
During and after following this guide you might want or need to remove some of
@@ -903,7 +954,7 @@ space might run out.
kernel image and related files behind; in that case remove them as described
in the reference section.
- [:ref:`details<makeroom_bisref>`]
+ [:ref:`details <makeroom_bisref>`]
.. _finishingtouch_bissbs:
@@ -926,18 +977,99 @@ space might run out.
the version considered 'good', and the last three or four you compiled
during the actual bisection process.
- [:ref:`details<finishingtouch_bisref>`]
+ [:ref:`details <finishingtouch_bisref>`]
+
+.. _introoptional_bissbs:
+
+Optional: test reverts, patches, or later versions
+--------------------------------------------------
+
+While or after reporting a bug, you might want or potentially will be asked to
+test reverts, debug patches, proposed fixes, or other versions. In that case
+follow these instructions.
+
+* Update your Git clone and check out the latest code.
+
+ * In case you want to test mainline, fetch its latest changes before checking
+ its code out::
+
+ git fetch mainline
+ git switch --discard-changes --detach mainline/master
+
+ * In case you want to test a stable or longterm kernel, first add the branch
+ holding the series you are interested in (6.2 in the example), unless you
+ already did so earlier::
+
+ git remote set-branches --add stable linux-6.2.y
+
+ Then fetch the latest changes and check out the latest version from the
+ series::
+
+ git fetch stable
+ git switch --discard-changes --detach stable/linux-6.2.y
+
+* Copy your kernel build configuration over::
+
+ cp ~/kernel-config-working .config
+
+* Your next step depends on what you want to do:
+
+ * In case you just want to test the latest codebase, head to the next step,
+ you are already all set.
+
+ * In case you want to test if a revert fixes an issue, revert one or multiple
+ changes by specifying their commit ids::
+
+ git revert --no-edit cafec0cacaca0
+
+ Now give that kernel a special tag to facilitates its identification and
+ prevent accidentally overwriting another kernel::
+
+ ./scripts/config --set-str CONFIG_LOCALVERSION '-local-cafec0cacaca0-reverted'
+
+ * In case you want to test a patch, store the patch in a file like
+ '/tmp/foobars-proposed-fix-v1.patch' and apply it like this::
+
+ git apply /tmp/foobars-proposed-fix-v1.patch
+
+ In case of multiple patches, repeat this step with the others.
+
+ Now give that kernel a special tag to facilitates its identification and
+ prevent accidentally overwriting another kernel::
+
+ ./scripts/config --set-str CONFIG_LOCALVERSION '-local-foobars-fix-v1'
+
+* Build a kernel using the familiar commands, just without copying the kernel
+ build configuration over, as that has been taken care of already::
+
+ make olddefconfig &&
+ make -j $(nproc --all)
+ # * Check if the free space suffices holding another kernel:
+ df -h /boot/ /lib/modules/
+ sudo make modules_install
+ command -v installkernel && sudo make install
+ make -s kernelrelease | tee -a ~/kernels-built
+ reboot
+
+* Now verify you booted the newly built kernel and check it.
+
+[:ref:`details <introoptional_bisref>`]
.. _submit_improvements:
-This concludes the step-by-step guide.
+Conclusion
+----------
+
+You have reached the end of the step-by-step guide.
Did you run into trouble following any of the above steps not cleared up by the
reference section below? Did you spot errors? Or do you have ideas how to
-improve the guide? Then please take a moment and let the maintainer of this
+improve the guide?
+
+If any of that applies, please take a moment and let the maintainer of this
document know by email (Thorsten Leemhuis <linux@leemhuis.info>), ideally while
CCing the Linux docs mailing list (linux-doc@vger.kernel.org). Such feedback is
-vital to improve this document further, which is in everybody's interest, as it
+vital to improve this text further, which is in everybody's interest, as it
will enable more people to master the task described here -- and hopefully also
improve similar guides inspired by this one.
@@ -948,10 +1080,20 @@ Reference section for the step-by-step guide
This section holds additional information for almost all the items in the above
step-by-step guide.
+Preparations for building your own kernels
+------------------------------------------
+
+ *The steps in this section lay the groundwork for all further tests.*
+ [:ref:`... <introprep_bissbs>`]
+
+The steps in all later sections of this guide depend on those described here.
+
+[:ref:`back to step-by-step guide <introprep_bissbs>`].
+
.. _backup_bisref:
Prepare for emergencies
------------------------
+~~~~~~~~~~~~~~~~~~~~~~~
*Create a fresh backup and put system repair and restore tools at hand.*
[:ref:`... <backup_bissbs>`]
@@ -966,7 +1108,7 @@ for something going sideways, even if that should not happen.
.. _vanilla_bisref:
Remove anything related to externally maintained kernel modules
----------------------------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Remove all software that depends on externally developed kernel drivers or
builds them automatically.* [:ref:`...<vanilla_bissbs>`]
@@ -984,7 +1126,7 @@ explains in more detail.
.. _secureboot_bisref:
Deal with techniques like Secure Boot
--------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*On platforms with 'Secure Boot' or similar techniques, prepare everything to
ensure the system will permit your self-compiled kernel to boot later.*
@@ -1021,7 +1163,7 @@ Afterwards, permit MokManager to reboot the machine.
.. _bootworking_bisref:
Boot the last kernel that was working
--------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Boot into the last working kernel and briefly recheck if the feature that
regressed really works.* [:ref:`...<bootworking_bissbs>`]
@@ -1034,7 +1176,7 @@ the right thing.
.. _diskspace_bisref:
Space requirements
-------------------
+~~~~~~~~~~~~~~~~~~
*Ensure to have enough free space for building Linux.*
[:ref:`... <diskspace_bissbs>`]
@@ -1052,32 +1194,32 @@ space by quite a few gigabytes.
.. _rangecheck_bisref:
Bisection range
----------------
+~~~~~~~~~~~~~~~
*Determine the kernel versions considered 'good' and 'bad' throughout this
guide.* [:ref:`...<rangecheck_bissbs>`]
Establishing the range of commits to be checked is mostly straightforward,
except when a regression occurred when switching from a release of one stable
-series to a release of a later series (e.g. from 6.0.11 to 6.1.4). In that case
+series to a release of a later series (e.g. from 6.0.13 to 6.1.5). In that case
Git will need some hand holding, as there is no straight line of descent.
That's because with the release of 6.0 mainline carried on to 6.1 while the
stable series 6.0.y branched to the side. It's therefore theoretically possible
-that the issue you face with 6.1.4 only worked in 6.0.11, as it was fixed by a
+that the issue you face with 6.1.5 only worked in 6.0.13, as it was fixed by a
commit that went into one of the 6.0.y releases, but never hit mainline or the
6.1.y series. Thankfully that normally should not happen due to the way the
stable/longterm maintainers maintain the code. It's thus pretty safe to assume
6.0 as a 'good' kernel. That assumption will be tested anyway, as that kernel
will be built and tested in the segment '2' of this guide; Git would force you
-to do this as well, if you tried bisecting between 6.0.11 and 6.1.13.
+to do this as well, if you tried bisecting between 6.0.13 and 6.1.15.
[:ref:`back to step-by-step guide <rangecheck_bissbs>`]
.. _buildrequires_bisref:
Install build requirements
---------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~
*Install all software required to build a Linux kernel.*
[:ref:`...<buildrequires_bissbs>`]
@@ -1117,7 +1259,7 @@ These commands install a few packages that are often, but not always needed. You
for example might want to skip installing the development headers for ncurses,
which you will only need in case you later might want to adjust the kernel build
configuration using make the targets 'menuconfig' or 'nconfig'; likewise omit
-the headers of Qt6 is you do not plan to adjust the .config using 'xconfig'.
+the headers of Qt6 if you do not plan to adjust the .config using 'xconfig'.
You furthermore might need additional libraries and their development headers
for tasks not covered in this guide -- for example when building utilities from
@@ -1128,7 +1270,7 @@ the kernel's tools/ directory.
.. _sources_bisref:
Download the sources using Git
-------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Retrieve the Linux mainline sources.*
[:ref:`...<sources_bissbs>`]
@@ -1148,7 +1290,7 @@ work better for you:
.. _sources_bundle_bisref:
Downloading Linux mainline sources using a bundle
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+"""""""""""""""""""""""""""""""""""""""""""""""""
Use the following commands to retrieve the Linux mainline sources using a
bundle::
@@ -1184,7 +1326,7 @@ First, execute the following command to retrieve the latest mainline codebase::
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
Now deepen your clone's history to the second predecessor of the mainline
-release of your 'good' version. In case the latter are 6.0 or 6.0.11, 5.19 would
+release of your 'good' version. In case the latter are 6.0 or 6.0.13, 5.19 would
be the first predecessor and 5.18 the second -- hence deepen the history up to
that version::
@@ -1219,7 +1361,7 @@ Note, shallow clones have a few peculiar characteristics:
.. _oldconfig_bisref:
Start defining the build configuration for your kernel
-------------------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Start preparing a kernel build configuration (the '.config' file).*
[:ref:`... <oldconfig_bissbs>`]
@@ -1279,7 +1421,7 @@ that file to the build machine and store it as ~/linux/.config; afterwards run
.. _localmodconfig_bisref:
Trim the build configuration for your kernel
---------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Disable any kernel modules apparently superfluous for your setup.*
[:ref:`... <localmodconfig_bissbs>`]
@@ -1328,7 +1470,7 @@ step-by-step guide mentions::
.. _tagging_bisref:
Tag the kernels about to be build
----------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Ensure all the kernels you will build are clearly identifiable using a
special tag and a unique version identifier.* [:ref:`... <tagging_bissbs>`]
@@ -1344,7 +1486,7 @@ confusing during the bisection.
.. _debugsymbols_bisref:
Decide to enable or disable debug symbols
------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Decide how to handle debug symbols.* [:ref:`... <debugsymbols_bissbs>`]
@@ -1373,7 +1515,7 @@ explains this process in more detail.
.. _configmods_bisref:
Adjust build configuration
---------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~
*Check if you may want or need to adjust some other kernel configuration
options:*
@@ -1384,7 +1526,7 @@ kernel configuration options.
.. _configmods_distros_bisref:
Distro specific adjustments
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
+"""""""""""""""""""""""""""
*Are you running* [:ref:`... <configmods_bissbs>`]
@@ -1409,7 +1551,7 @@ when following this guide on a few commodity distributions.
.. _configmods_individual_bisref:
Individual adjustments
-~~~~~~~~~~~~~~~~~~~~~~
+""""""""""""""""""""""
*If you want to influence the other aspects of the configuration, do so
now.* [:ref:`... <configmods_bissbs>`]
@@ -1426,13 +1568,13 @@ is missing.
.. _saveconfig_bisref:
Put the .config file aside
---------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~
*Reprocess the .config after the latest changes and store it in a safe place.*
[:ref:`... <saveconfig_bissbs>`]
Put the .config you prepared aside, as you want to copy it back to the build
-directory every time during this guide before you start building another
+directory every time during this guide before you start building another
kernel. That's because going back and forth between different versions can alter
.config files in odd ways; those occasionally cause side effects that could
confuse testing or in some cases render the result of your bisection
@@ -1442,8 +1584,8 @@ meaningless.
.. _introlatestcheck_bisref:
-Try to reproduce the regression
------------------------------------------
+Try to reproduce the problem with the latest codebase
+-----------------------------------------------------
*Verify the regression is not caused by some .config change and check if it
still occurs with the latest codebase.* [:ref:`... <introlatestcheck_bissbs>`]
@@ -1490,28 +1632,28 @@ highly recommended for these reasons:
Your report might be ignored if you send it to the wrong party -- and even
when you get a reply there is a decent chance that developers tell you to
- evaluate which of the two cases it is before they take a closer look.
+ evaluate which of the two cases it is before they take a closer look.
[:ref:`back to step-by-step guide <introlatestcheck_bissbs>`]
.. _checkoutmaster_bisref:
Check out the latest Linux codebase
------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Check out the latest Linux codebase.*
- [:ref:`... <introlatestcheck_bissbs>`]
+ [:ref:`... <checkoutmaster_bissbs>`]
In case you later want to recheck if an ever newer codebase might fix the
problem, remember to run that ``git fetch --shallow-exclude [...]`` command
again mentioned earlier to update your local Git repository.
-[:ref:`back to step-by-step guide <introlatestcheck_bissbs>`]
+[:ref:`back to step-by-step guide <checkoutmaster_bissbs>`]
.. _build_bisref:
Build your kernel
------------------
+~~~~~~~~~~~~~~~~~
*Build the image and the modules of your first kernel using the config file
you prepared.* [:ref:`... <build_bissbs>`]
@@ -1521,7 +1663,7 @@ yourself. Another subsection explains how to directly package your kernel up as
deb, rpm or tar file.
Dealing with build errors
-~~~~~~~~~~~~~~~~~~~~~~~~~
+"""""""""""""""""""""""""
When a build error occurs, it might be caused by some aspect of your machine's
setup that often can be fixed quickly; other times though the problem lies in
@@ -1552,11 +1694,11 @@ by modifying your search terms or using another line from the error messages.
In the end, most issues you run into have likely been encountered and
reported by others already. That includes issues where the cause is not your
-system, but lies in the code. If you run into one of those, you might thus find a
-solution (e.g. a patch) or workaround for your issue, too.
+system, but lies in the code. If you run into one of those, you might thus find
+a solution (e.g. a patch) or workaround for your issue, too.
Package your kernel up
-~~~~~~~~~~~~~~~~~~~~~~
+""""""""""""""""""""""
The step-by-step guide uses the default make targets (e.g. 'bzImage' and
'modules' on x86) to build the image and the modules of your kernel, which later
@@ -1587,7 +1729,7 @@ distribution's kernel packages.
.. _install_bisref:
Put the kernel in place
------------------------
+~~~~~~~~~~~~~~~~~~~~~~~
*Install the kernel you just built.* [:ref:`... <install_bissbs>`]
@@ -1630,7 +1772,7 @@ process. Afterwards add your kernel to your bootloader configuration and reboot.
.. _storagespace_bisref:
Storage requirements per kernel
--------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Check how much storage space the kernel, its modules, and other related files
like the initramfs consume.* [:ref:`... <storagespace_bissbs>`]
@@ -1651,7 +1793,7 @@ need to look in different places.
.. _tainted_bisref:
Check if your newly built kernel considers itself 'tainted'
------------------------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Check if the kernel marked itself as 'tainted'.*
[:ref:`... <tainted_bissbs>`]
@@ -1670,7 +1812,7 @@ interest, as your testing might be flawed otherwise.
.. _recheckbroken_bisref:
Check the kernel built from a recent mainline codebase
-------------------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Verify if your bug occurs with the newly built kernel.*
[:ref:`... <recheckbroken_bissbs>`]
@@ -1696,7 +1838,7 @@ the kernel you built from the latest codebase. These are the most frequent:
.. _recheckstablebroken_bisref:
Check the kernel built from the latest stable/longterm codebase
----------------------------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Are you facing a regression within a stable/longterm release, but failed to
reproduce it with the kernel you just built using the latest mainline sources?
@@ -1741,7 +1883,7 @@ ensure the kernel version you assumed to be 'good' earlier in the process (e.g.
.. _recheckworking_bisref:
Build your own version of the 'good' kernel
--------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Build your own variant of the working kernel and check if the feature that
regressed works as expected with it.* [:ref:`... <recheckworking_bissbs>`]
@@ -1767,15 +1909,25 @@ multitude of reasons why this might happen. Some ideas where to look:
Note, if you found and fixed problems with the .config file, you want to use it
to build another kernel from the latest codebase, as your earlier tests with
-mainline and the latest version from an affected stable/longterm series were most
-likely flawed.
+mainline and the latest version from an affected stable/longterm series were
+most likely flawed.
[:ref:`back to step-by-step guide <recheckworking_bissbs>`]
+Perform a bisection and validate the result
+-------------------------------------------
+
+ *With all the preparations and precaution builds taken care of, you are now
+ ready to begin the bisection.* [:ref:`... <introbisect_bissbs>`]
+
+The steps in this segment perform and validate the bisection.
+
+[:ref:`back to step-by-step guide <introbisect_bissbs>`].
+
.. _bisectstart_bisref:
Start the bisection
--------------------
+~~~~~~~~~~~~~~~~~~~
*Start the bisection and tell Git about the versions earlier established as
'good' and 'bad'.* [:ref:`... <bisectstart_bissbs>`]
@@ -1789,7 +1941,7 @@ for you to test.
.. _bisectbuild_bisref:
Build a kernel from the bisection point
----------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Build, install, and boot a kernel from the code Git checked out using the
same commands you used earlier.* [:ref:`... <bisectbuild_bissbs>`]
@@ -1817,7 +1969,7 @@ There are two things worth of note here:
.. _bisecttest_bisref:
Bisection checkpoint
---------------------
+~~~~~~~~~~~~~~~~~~~~
*Check if the feature that regressed works in the kernel you just built.*
[:ref:`... <bisecttest_bissbs>`]
@@ -1831,7 +1983,7 @@ will be for nothing.
.. _bisectlog_bisref:
Put the bisection log away
---------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~
*Store Git's bisection log and the current .config file in a safe place.*
[:ref:`... <bisectlog_bissbs>`]
@@ -1851,7 +2003,7 @@ ask for it after you report the regression.
.. _revert_bisref:
Try reverting the culprit
--------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~
*Try reverting the culprit on top of the latest codebase to see if this fixes
your regression.* [:ref:`... <revert_bissbs>`]
@@ -1869,14 +2021,20 @@ succeeds, test that kernel version instead.
[:ref:`back to step-by-step guide <revert_bissbs>`]
+Cleanup steps during and after following this guide
+---------------------------------------------------
-Supplementary tasks: cleanup during and after the bisection
------------------------------------------------------------
+ *During and after following this guide you might want or need to remove some
+ of the kernels you installed.* [:ref:`... <introclosure_bissbs>`]
+
+The steps in this section describe clean-up procedures.
+
+[:ref:`back to step-by-step guide <introclosure_bissbs>`].
.. _makeroom_bisref:
Cleaning up during the bisection
---------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*To remove one of the kernels you installed, look up its 'kernelrelease'
identifier.* [:ref:`... <makeroom_bissbs>`]
@@ -1911,13 +2069,13 @@ Now remove the boot entry for the kernel from your bootloader's configuration;
the steps to do that vary quite a bit between Linux distributions.
Note, be careful with wildcards like '*' when deleting files or directories
-for kernels manually: you might accidentally remove files of a 6.0.11 kernel
+for kernels manually: you might accidentally remove files of a 6.0.13 kernel
when all you want is to remove 6.0 or 6.0.1.
[:ref:`back to step-by-step guide <makeroom_bissbs>`]
Cleaning up after the bisection
--------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. _finishingtouch_bisref:
@@ -1932,26 +2090,105 @@ build artifacts and the Linux sources, but will leave the Git repository
(~/linux/.git/) behind -- a simple ``git reset --hard`` thus will bring the
sources back.
-Removing the repository as well would likely be unwise at this point: there is a
-decent chance developers will ask you to build another kernel to perform
-additional tests. This is often required to debug an issue or check proposed
-fixes. Before doing so you want to run the ``git fetch mainline`` command again
-followed by ``git checkout mainline/master`` to bring your clone up to date and
-checkout the latest codebase. Then apply the patch using ``git apply
-<filename>`` or ``git am <filename>`` and build yet another kernel using the
-familiar commands.
+Removing the repository as well would likely be unwise at this point: there
+is a decent chance developers will ask you to build another kernel to
+perform additional tests -- like testing a debug patch or a proposed fix.
+Details on how to perform those can be found in the section :ref:`Optional
+tasks: test reverts, patches, or later versions <introoptional_bissbs>`.
Additional tests are also the reason why you want to keep the
~/kernel-config-working file around for a few weeks.
[:ref:`back to step-by-step guide <finishingtouch_bissbs>`]
+.. _introoptional_bisref:
-Additional reading material
-===========================
+Test reverts, patches, or later versions
+----------------------------------------
+
+ *While or after reporting a bug, you might want or potentially will be asked
+ to test reverts, patches, proposed fixes, or other versions.*
+ [:ref:`... <introoptional_bissbs>`]
+
+All the commands used in this section should be pretty straight forward, so
+there is not much to add except one thing: when setting a kernel tag as
+instructed, ensure it is not much longer than the one used in the example, as
+problems will arise if the kernelrelease identifier exceeds 63 characters.
+
+[:ref:`back to step-by-step guide <introoptional_bissbs>`].
+
+
+Additional information
+======================
+
+.. _buildhost_bis:
+
+Build kernels on a different machine
+------------------------------------
+
+To compile kernels on another system, slightly alter the step-by-step guide's
+instructions:
+
+* Start following the guide on the machine where you want to install and test
+ the kernels later.
+
+* After executing ':ref:`Boot into the working kernel and briefly use the
+ apparently broken feature <bootworking_bissbs>`', save the list of loaded
+ modules to a file using ``lsmod > ~/test-machine-lsmod``. Then locate the
+ build configuration for the running kernel (see ':ref:`Start defining the
+ build configuration for your kernel <oldconfig_bisref>`' for hints on where
+ to find it) and store it as '~/test-machine-config-working'. Transfer both
+ files to the home directory of your build host.
+
+* Continue the guide on the build host (e.g. with ':ref:`Ensure to have enough
+ free space for building [...] <diskspace_bissbs>`').
+
+* When you reach ':ref:`Start preparing a kernel build configuration[...]
+ <oldconfig_bissbs>`': before running ``make olddefconfig`` for the first time,
+ execute the following command to base your configuration on the one from the
+ test machine's 'working' kernel::
+
+ cp ~/test-machine-config-working ~/linux/.config
+
+* During the next step to ':ref:`disable any apparently superfluous kernel
+ modules <localmodconfig_bissbs>`' use the following command instead::
-Further sources
----------------
+ yes '' | make localmodconfig LSMOD=~/lsmod_foo-machine localmodconfig
+
+* Continue the guide, but ignore the instructions outlining how to compile,
+ install, and reboot into a kernel every time they come up. Instead build
+ like this::
+
+ cp ~/kernel-config-working .config
+ make olddefconfig &&
+ make -j $(nproc --all) targz-pkg
+
+ This will generate a gzipped tar file whose name is printed in the last
+ line shown; for example, a kernel with the kernelrelease identifier
+ '6.0.0-rc1-local-g928a87efa423' built for x86 machines usually will
+ be stored as '~/linux/linux-6.0.0-rc1-local-g928a87efa423-x86.tar.gz'.
+
+ Copy that file to your test machine's home directory.
+
+* Switch to the test machine to check if you have enough space to hold another
+ kernel. Then extract the file you transferred::
+
+ sudo tar -xvzf ~/linux-6.0.0-rc1-local-g928a87efa423-x86.tar.gz -C /
+
+ Afterwards :ref:`generate the initramfs and add the kernel to your boot
+ loader's configuration <install_bisref>`; on some distributions the following
+ command will take care of both these tasks::
+
+ sudo /sbin/installkernel 6.0.0-rc1-local-g928a87efa423 /boot/vmlinuz-6.0.0-rc1-local-g928a87efa423
+
+ Now reboot and ensure you started the intended kernel.
+
+This approach even works when building for another architecture: just install
+cross-compilers and add the appropriate parameters to every invocation of make
+(e.g. ``make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- [...]``).
+
+Additional reading material
+---------------------------
* The `man page for 'git bisect' <https://git-scm.com/docs/git-bisect>`_ and
`fighting regressions with 'git bisect' <https://git-scm.com/docs/git-bisect-lk2009.html>`_
diff --git a/Documentation/arch/m68k/buddha-driver.rst b/Documentation/arch/m68k/buddha-driver.rst
index 20e401413991..5d1bc824978b 100644
--- a/Documentation/arch/m68k/buddha-driver.rst
+++ b/Documentation/arch/m68k/buddha-driver.rst
@@ -173,7 +173,7 @@ When accessing IDE registers with A6=1 (for example $84x),
the timing will always be mode 0 8-bit compatible, no matter
what you have selected in the speed register:
-781ns select, IOR/IOW after 4 clock cycles (=314ns) aktive.
+781ns select, IOR/IOW after 4 clock cycles (=314ns) active.
All the timings with a very short select-signal (the 355ns
fast accesses) depend on the accelerator card used in the
diff --git a/Documentation/arch/powerpc/dexcr.rst b/Documentation/arch/powerpc/dexcr.rst
index 615a631f51fa..ab0724212fcd 100644
--- a/Documentation/arch/powerpc/dexcr.rst
+++ b/Documentation/arch/powerpc/dexcr.rst
@@ -36,8 +36,145 @@ state for a process.
Configuration
=============
-The DEXCR is currently unconfigurable. All threads are run with the
-NPHIE aspect enabled.
+prctl
+-----
+
+A process can control its own userspace DEXCR value using the
+``PR_PPC_GET_DEXCR`` and ``PR_PPC_SET_DEXCR`` pair of
+:manpage:`prctl(2)` commands. These calls have the form::
+
+ prctl(PR_PPC_GET_DEXCR, unsigned long which, 0, 0, 0);
+ prctl(PR_PPC_SET_DEXCR, unsigned long which, unsigned long ctrl, 0, 0);
+
+The possible 'which' and 'ctrl' values are as follows. Note there is no relation
+between the 'which' value and the DEXCR aspect's index.
+
+.. flat-table::
+ :header-rows: 1
+ :widths: 2 7 1
+
+ * - ``prctl()`` which
+ - Aspect name
+ - Aspect index
+
+ * - ``PR_PPC_DEXCR_SBHE``
+ - Speculative Branch Hint Enable (SBHE)
+ - 0
+
+ * - ``PR_PPC_DEXCR_IBRTPD``
+ - Indirect Branch Recurrent Target Prediction Disable (IBRTPD)
+ - 3
+
+ * - ``PR_PPC_DEXCR_SRAPD``
+ - Subroutine Return Address Prediction Disable (SRAPD)
+ - 4
+
+ * - ``PR_PPC_DEXCR_NPHIE``
+ - Non-Privileged Hash Instruction Enable (NPHIE)
+ - 5
+
+.. flat-table::
+ :header-rows: 1
+ :widths: 2 8
+
+ * - ``prctl()`` ctrl
+ - Meaning
+
+ * - ``PR_PPC_DEXCR_CTRL_EDITABLE``
+ - This aspect can be configured with PR_PPC_SET_DEXCR (get only)
+
+ * - ``PR_PPC_DEXCR_CTRL_SET``
+ - This aspect is set / set this aspect
+
+ * - ``PR_PPC_DEXCR_CTRL_CLEAR``
+ - This aspect is clear / clear this aspect
+
+ * - ``PR_PPC_DEXCR_CTRL_SET_ONEXEC``
+ - This aspect will be set after exec / set this aspect after exec
+
+ * - ``PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC``
+ - This aspect will be clear after exec / clear this aspect after exec
+
+Note that
+
+* which is a plain value, not a bitmask. Aspects must be worked with individually.
+
+* ctrl is a bitmask. ``PR_PPC_GET_DEXCR`` returns both the current and onexec
+ configuration. For example, ``PR_PPC_GET_DEXCR`` may return
+ ``PR_PPC_DEXCR_CTRL_EDITABLE | PR_PPC_DEXCR_CTRL_SET |
+ PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC``. This would indicate the aspect is currently
+ set, it will be cleared when you run exec, and you can change this with the
+ ``PR_PPC_SET_DEXCR`` prctl.
+
+* The set/clear terminology refers to setting/clearing the bit in the DEXCR.
+ For example::
+
+ prctl(PR_PPC_SET_DEXCR, PR_PPC_DEXCR_IBRTPD, PR_PPC_DEXCR_CTRL_SET, 0, 0);
+
+ will set the IBRTPD aspect bit in the DEXCR, causing indirect branch prediction
+ to be disabled.
+
+* The status returned by ``PR_PPC_GET_DEXCR`` represents what value the process
+ would like applied. It does not include any alternative overrides, such as if
+ the hypervisor is enforcing the aspect be set. To see the true DEXCR state
+ software should read the appropriate SPRs directly.
+
+* The aspect state when starting a process is copied from the parent's state on
+ :manpage:`fork(2)`. The state is reset to a fixed value on
+ :manpage:`execve(2)`. The PR_PPC_SET_DEXCR prctl() can control both of these
+ values.
+
+* The ``*_ONEXEC`` controls do not change the current process's DEXCR.
+
+Use ``PR_PPC_SET_DEXCR`` with one of ``PR_PPC_DEXCR_CTRL_SET`` or
+``PR_PPC_DEXCR_CTRL_CLEAR`` to edit a given aspect.
+
+Common error codes for both getting and setting the DEXCR are as follows:
+
+.. flat-table::
+ :header-rows: 1
+ :widths: 2 8
+
+ * - Error
+ - Meaning
+
+ * - ``EINVAL``
+ - The DEXCR is not supported by the kernel.
+
+ * - ``ENODEV``
+ - The aspect is not recognised by the kernel or not supported by the
+ hardware.
+
+``PR_PPC_SET_DEXCR`` may also report the following error codes:
+
+.. flat-table::
+ :header-rows: 1
+ :widths: 2 8
+
+ * - Error
+ - Meaning
+
+ * - ``EINVAL``
+ - The ctrl value contains unrecognised flags.
+
+ * - ``EINVAL``
+ - The ctrl value contains mutually conflicting flags (e.g.,
+ ``PR_PPC_DEXCR_CTRL_SET | PR_PPC_DEXCR_CTRL_CLEAR``)
+
+ * - ``EPERM``
+ - This aspect cannot be modified with prctl() (check for the
+ PR_PPC_DEXCR_CTRL_EDITABLE flag with PR_PPC_GET_DEXCR).
+
+ * - ``EPERM``
+ - The process does not have sufficient privilege to perform the operation.
+ For example, clearing NPHIE on exec is a privileged operation (a process
+ can still clear its own NPHIE aspect without privileges).
+
+This interface allows a process to control its own DEXCR aspects, and also set
+the initial DEXCR value for any children in its process tree (up to the next
+child to use an ``*_ONEXEC`` control). This allows fine-grained control over the
+default value of the DEXCR, for example allowing containers to run with different
+default values.
coredump and ptrace
diff --git a/Documentation/arch/powerpc/firmware-assisted-dump.rst b/Documentation/arch/powerpc/firmware-assisted-dump.rst
index e363fc48529a..7e37aadd1f77 100644
--- a/Documentation/arch/powerpc/firmware-assisted-dump.rst
+++ b/Documentation/arch/powerpc/firmware-assisted-dump.rst
@@ -134,12 +134,12 @@ that are run. If there is dump data, then the
memory is held.
If there is no waiting dump data, then only the memory required to
-hold CPU state, HPTE region, boot memory dump, FADump header and
-elfcore header, is usually reserved at an offset greater than boot
-memory size (see Fig. 1). This area is *not* released: this region
-will be kept permanently reserved, so that it can act as a receptacle
-for a copy of the boot memory content in addition to CPU state and
-HPTE region, in the case a crash does occur.
+hold CPU state, HPTE region, boot memory dump, and FADump header is
+usually reserved at an offset greater than boot memory size (see Fig. 1).
+This area is *not* released: this region will be kept permanently
+reserved, so that it can act as a receptacle for a copy of the boot
+memory content in addition to CPU state and HPTE region, in the case
+a crash does occur.
Since this reserved memory area is used only after the system crash,
there is no point in blocking this significant chunk of memory from
@@ -153,22 +153,22 @@ that were present in CMA region::
o Memory Reservation during first kernel
- Low memory Top of memory
- 0 boot memory size |<--- Reserved dump area --->| |
- | | | Permanent Reservation | |
- V V | | V
- +-----------+-----/ /---+---+----+-------+-----+-----+----+--+
- | | |///|////| DUMP | HDR | ELF |////| |
- +-----------+-----/ /---+---+----+-------+-----+-----+----+--+
- | ^ ^ ^ ^ ^
- | | | | | |
- \ CPU HPTE / | |
- ------------------------------ | |
- Boot memory content gets transferred | |
- to reserved area by firmware at the | |
- time of crash. | |
- FADump Header |
- (meta area) |
+ Low memory Top of memory
+ 0 boot memory size |<------ Reserved dump area ----->| |
+ | | | Permanent Reservation | |
+ V V | | V
+ +-----------+-----/ /---+---+----+-----------+-------+----+-----+
+ | | |///|////| DUMP | HDR |////| |
+ +-----------+-----/ /---+---+----+-----------+-------+----+-----+
+ | ^ ^ ^ ^ ^
+ | | | | | |
+ \ CPU HPTE / | |
+ -------------------------------- | |
+ Boot memory content gets transferred | |
+ to reserved area by firmware at the | |
+ time of crash. | |
+ FADump Header |
+ (meta area) |
|
|
Metadata: This area holds a metadata structure whose
@@ -186,13 +186,20 @@ that were present in CMA region::
0 boot memory size |
| |<------------ Crash preserved area ------------>|
V V |<--- Reserved dump area --->| |
- +-----------+-----/ /---+---+----+-------+-----+-----+----+--+
- | | |///|////| DUMP | HDR | ELF |////| |
- +-----------+-----/ /---+---+----+-------+-----+-----+----+--+
- | |
- V V
- Used by second /proc/vmcore
- kernel to boot
+ +----+---+--+-----/ /---+---+----+-------+-----+-----+-------+
+ | |ELF| | |///|////| DUMP | HDR |/////| |
+ +----+---+--+-----/ /---+---+----+-------+-----+-----+-------+
+ | | | | | |
+ ----- ------------------------------ ---------------
+ \ | |
+ \ | |
+ \ | |
+ \ | ----------------------------
+ \ | /
+ \ | /
+ \ | /
+ /proc/vmcore
+
+---+
|///| -> Regions (CPU, HPTE & Metadata) marked like this in the above
@@ -200,6 +207,12 @@ that were present in CMA region::
does not have CPU & HPTE regions while Metadata region is
not supported on pSeries currently.
+ +---+
+ |ELF| -> elfcorehdr, it is created in second kernel after crash.
+ +---+
+
+ Note: Memory from 0 to the boot memory size is used by second kernel
+
Fig. 2
@@ -353,26 +366,6 @@ TODO:
- Need to come up with the better approach to find out more
accurate boot memory size that is required for a kernel to
boot successfully when booted with restricted memory.
- - The FADump implementation introduces a FADump crash info structure
- in the scratch area before the ELF core header. The idea of introducing
- this structure is to pass some important crash info data to the second
- kernel which will help second kernel to populate ELF core header with
- correct data before it gets exported through /proc/vmcore. The current
- design implementation does not address a possibility of introducing
- additional fields (in future) to this structure without affecting
- compatibility. Need to come up with the better approach to address this.
-
- The possible approaches are:
-
- 1. Introduce version field for version tracking, bump up the version
- whenever a new field is added to the structure in future. The version
- field can be used to find out what fields are valid for the current
- version of the structure.
- 2. Reserve the area of predefined size (say PAGE_SIZE) for this
- structure and have unused area as reserved (initialized to zero)
- for future field additions.
-
- The advantage of approach 1 over 2 is we don't need to reserve extra space.
Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
diff --git a/Documentation/arch/riscv/cmodx.rst b/Documentation/arch/riscv/cmodx.rst
new file mode 100644
index 000000000000..1c0ca06b6c97
--- /dev/null
+++ b/Documentation/arch/riscv/cmodx.rst
@@ -0,0 +1,98 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============================================================================
+Concurrent Modification and Execution of Instructions (CMODX) for RISC-V Linux
+==============================================================================
+
+CMODX is a programming technique where a program executes instructions that were
+modified by the program itself. Instruction storage and the instruction cache
+(icache) are not guaranteed to be synchronized on RISC-V hardware. Therefore, the
+program must enforce its own synchronization with the unprivileged fence.i
+instruction.
+
+However, the default Linux ABI prohibits the use of fence.i in userspace
+applications. At any point the scheduler may migrate a task onto a new hart. If
+migration occurs after the userspace synchronized the icache and instruction
+storage with fence.i, the icache on the new hart will no longer be clean. This
+is due to the behavior of fence.i only affecting the hart that it is called on.
+Thus, the hart that the task has been migrated to may not have synchronized
+instruction storage and icache.
+
+There are two ways to solve this problem: use the riscv_flush_icache() syscall,
+or use the ``PR_RISCV_SET_ICACHE_FLUSH_CTX`` prctl() and emit fence.i in
+userspace. The syscall performs a one-off icache flushing operation. The prctl
+changes the Linux ABI to allow userspace to emit icache flushing operations.
+
+As an aside, "deferred" icache flushes can sometimes be triggered in the kernel.
+At the time of writing, this only occurs during the riscv_flush_icache() syscall
+and when the kernel uses copy_to_user_page(). These deferred flushes happen only
+when the memory map being used by a hart changes. If the prctl() context caused
+an icache flush, this deferred icache flush will be skipped as it is redundant.
+Therefore, there will be no additional flush when using the riscv_flush_icache()
+syscall inside of the prctl() context.
+
+prctl() Interface
+---------------------
+
+Call prctl() with ``PR_RISCV_SET_ICACHE_FLUSH_CTX`` as the first argument. The
+remaining arguments will be delegated to the riscv_set_icache_flush_ctx
+function detailed below.
+
+.. kernel-doc:: arch/riscv/mm/cacheflush.c
+ :identifiers: riscv_set_icache_flush_ctx
+
+Example usage:
+
+The following files are meant to be compiled and linked with each other. The
+modify_instruction() function replaces an add with 0 with an add with one,
+causing the instruction sequence in get_value() to change from returning a zero
+to returning a one.
+
+cmodx.c::
+
+ #include <stdio.h>
+ #include <sys/prctl.h>
+
+ extern int get_value();
+ extern void modify_instruction();
+
+ int main()
+ {
+ int value = get_value();
+ printf("Value before cmodx: %d\n", value);
+
+ // Call prctl before first fence.i is called inside modify_instruction
+ prctl(PR_RISCV_SET_ICACHE_FLUSH_CTX_ON, PR_RISCV_CTX_SW_FENCEI, PR_RISCV_SCOPE_PER_PROCESS);
+ modify_instruction();
+ // Call prctl after final fence.i is called in process
+ prctl(PR_RISCV_SET_ICACHE_FLUSH_CTX_OFF, PR_RISCV_CTX_SW_FENCEI, PR_RISCV_SCOPE_PER_PROCESS);
+
+ value = get_value();
+ printf("Value after cmodx: %d\n", value);
+ return 0;
+ }
+
+cmodx.S::
+
+ .option norvc
+
+ .text
+ .global modify_instruction
+ modify_instruction:
+ lw a0, new_insn
+ lui a5,%hi(old_insn)
+ sw a0,%lo(old_insn)(a5)
+ fence.i
+ ret
+
+ .section modifiable, "awx"
+ .global get_value
+ get_value:
+ li a0, 0
+ old_insn:
+ addi a0, a0, 0
+ ret
+
+ .data
+ new_insn:
+ addi a0, a0, 1
diff --git a/Documentation/arch/riscv/hwprobe.rst b/Documentation/arch/riscv/hwprobe.rst
index b2bcc9eed9aa..204cd4433af5 100644
--- a/Documentation/arch/riscv/hwprobe.rst
+++ b/Documentation/arch/riscv/hwprobe.rst
@@ -188,6 +188,10 @@ The following keys are defined:
manual starting from commit 95cf1f9 ("Add changes requested by Ved
during signoff")
+ * :c:macro:`RISCV_HWPROBE_EXT_ZIHINTPAUSE`: The Zihintpause extension is
+ supported as defined in the RISC-V ISA manual starting from commit
+ d8ab5c78c207 ("Zihintpause is ratified").
+
* :c:macro:`RISCV_HWPROBE_KEY_CPUPERF_0`: A bitmask that contains performance
information about the selected set of processors.
diff --git a/Documentation/arch/riscv/index.rst b/Documentation/arch/riscv/index.rst
index 4dab0cb4b900..eecf347ce849 100644
--- a/Documentation/arch/riscv/index.rst
+++ b/Documentation/arch/riscv/index.rst
@@ -13,6 +13,7 @@ RISC-V architecture
patch-acceptance
uabi
vector
+ cmodx
features
diff --git a/Documentation/arch/s390/index.rst b/Documentation/arch/s390/index.rst
index 73c79bf586fd..e75a6e5d2505 100644
--- a/Documentation/arch/s390/index.rst
+++ b/Documentation/arch/s390/index.rst
@@ -8,6 +8,7 @@ s390 Architecture
cds
3270
driver-model
+ mm
monreader
qeth
s390dbf
diff --git a/Documentation/arch/s390/mm.rst b/Documentation/arch/s390/mm.rst
new file mode 100644
index 000000000000..084adad5eef9
--- /dev/null
+++ b/Documentation/arch/s390/mm.rst
@@ -0,0 +1,111 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=================
+Memory Management
+=================
+
+Virtual memory layout
+=====================
+
+.. note::
+
+ - Some aspects of the virtual memory layout setup are not
+ clarified (number of page levels, alignment, DMA memory).
+
+ - Unused gaps in the virtual memory layout could be present
+ or not - depending on how partucular system is configured.
+ No page tables are created for the unused gaps.
+
+ - The virtual memory regions are tracked or untracked by KASAN
+ instrumentation, as well as the KASAN shadow memory itself is
+ created only when CONFIG_KASAN configuration option is enabled.
+
+::
+
+ =============================================================================
+ | Physical | Virtual | VM area description
+ =============================================================================
+ +- 0 --------------+- 0 --------------+
+ | | S390_lowcore | Low-address memory
+ | +- 8 KB -----------+
+ | | |
+ | | |
+ | | ... unused gap | KASAN untracked
+ | | |
+ +- AMODE31_START --+- AMODE31_START --+ .amode31 rand. phys/virt start
+ |.amode31 text/data|.amode31 text/data| KASAN untracked
+ +- AMODE31_END ----+- AMODE31_END ----+ .amode31 rand. phys/virt end (<2GB)
+ | | |
+ | | |
+ +- __kaslr_offset_phys | kernel rand. phys start
+ | | |
+ | kernel text/data | |
+ | | |
+ +------------------+ | kernel phys end
+ | | |
+ | | |
+ | | |
+ | | |
+ +- ident_map_size -+ |
+ | |
+ | ... unused gap | KASAN untracked
+ | |
+ +- __identity_base + identity mapping start (>= 2GB)
+ | |
+ | identity | phys == virt - __identity_base
+ | mapping | virt == phys + __identity_base
+ | |
+ | | KASAN tracked
+ | |
+ | |
+ | |
+ | |
+ | |
+ | |
+ | |
+ | |
+ | |
+ | |
+ | |
+ | |
+ | |
+ | |
+ | |
+ +---- vmemmap -----+ 'struct page' array start
+ | |
+ | virtually mapped |
+ | memory map | KASAN untracked
+ | |
+ +- __abs_lowcore --+
+ | |
+ | Absolute Lowcore | KASAN untracked
+ | |
+ +- __memcpy_real_area
+ | |
+ | Real Memory Copy| KASAN untracked
+ | |
+ +- VMALLOC_START --+ vmalloc area start
+ | | KASAN untracked or
+ | vmalloc area | KASAN shallowly populated in case
+ | | CONFIG_KASAN_VMALLOC=y
+ +- MODULES_VADDR --+ modules area start
+ | | KASAN allocated per module or
+ | modules area | KASAN shallowly populated in case
+ | | CONFIG_KASAN_VMALLOC=y
+ +- __kaslr_offset -+ kernel rand. virt start
+ | | KASAN tracked
+ | kernel text/data | phys == (kvirt - __kaslr_offset) +
+ | | __kaslr_offset_phys
+ +- kernel .bss end + kernel rand. virt end
+ | |
+ | ... unused gap | KASAN untracked
+ | |
+ +------------------+ UltraVisor Secure Storage limit
+ | |
+ | ... unused gap | KASAN untracked
+ | |
+ +KASAN_SHADOW_START+ KASAN shadow memory start
+ | |
+ | KASAN shadow | KASAN untracked
+ | |
+ +------------------+ ASCE limit
diff --git a/Documentation/arch/s390/vfio-ap.rst b/Documentation/arch/s390/vfio-ap.rst
index 929ee1c1c940..ea744cbc8687 100644
--- a/Documentation/arch/s390/vfio-ap.rst
+++ b/Documentation/arch/s390/vfio-ap.rst
@@ -380,6 +380,36 @@ matrix device.
control_domains:
A read-only file for displaying the control domain numbers assigned to the
vfio_ap mediated device.
+ ap_config:
+ A read/write file that, when written to, allows all three of the
+ vfio_ap mediated device's ap matrix masks to be replaced in one shot.
+ Three masks are given, one for adapters, one for domains, and one for
+ control domains. If the given state cannot be set then no changes are
+ made to the vfio-ap mediated device.
+
+ The format of the data written to ap_config is as follows:
+ {amask},{dmask},{cmask}\n
+
+ \n is a newline character.
+
+ amask, dmask, and cmask are masks identifying which adapters, domains,
+ and control domains should be assigned to the mediated device.
+
+ The format of a mask is as follows:
+ 0xNN..NN
+
+ Where NN..NN is 64 hexadecimal characters representing a 256-bit value.
+ The leftmost (highest order) bit represents adapter/domain 0.
+
+ For an example set of masks that represent your mdev's current
+ configuration, simply cat ap_config.
+
+ Setting an adapter or domain number greater than the maximum allowed for
+ the system will result in an error.
+
+ This attribute is intended to be used by automation. End users would be
+ better served using the respective assign/unassign attributes for
+ adapters, domains, and control domains.
* functions:
@@ -550,7 +580,7 @@ These are the steps:
following Kconfig elements selected:
* IOMMU_SUPPORT
* S390
- * ZCRYPT
+ * AP
* VFIO
* KVM
diff --git a/Documentation/arch/sparc/oradax/dax-hv-api.txt b/Documentation/arch/sparc/oradax/dax-hv-api.txt
index 7ecd0bf4957b..ef1a4c2bf08b 100644
--- a/Documentation/arch/sparc/oradax/dax-hv-api.txt
+++ b/Documentation/arch/sparc/oradax/dax-hv-api.txt
@@ -41,7 +41,7 @@ Chapter 36. Coprocessor services
submissions until they succeed; waiting for an outstanding CCB to complete is not necessary, and would
not be a guarantee that a future submission would succeed.
- The availablility of DAX coprocessor command service is indicated by the presence of the DAX virtual
+ The availability of DAX coprocessor command service is indicated by the presence of the DAX virtual
device node in the guest MD (Section 8.24.17, “Database Analytics Accelerators (DAX) virtual-device
node”).
diff --git a/Documentation/arch/x86/resctrl.rst b/Documentation/arch/x86/resctrl.rst
index 3712d81cb50c..627e23869bca 100644
--- a/Documentation/arch/x86/resctrl.rst
+++ b/Documentation/arch/x86/resctrl.rst
@@ -446,6 +446,12 @@ during mkdir.
max_threshold_occupancy is a user configurable value to determine the
occupancy at which an RMID can be freed.
+The mon_llc_occupancy_limbo tracepoint gives the precise occupancy in bytes
+for a subset of RMID that are not immediately available for allocation.
+This can't be relied on to produce output every second, it may be necessary
+to attempt to create an empty monitor group to force an update. Output may
+only be produced if creation of a control or monitor group fails.
+
Schemata files - general concepts
---------------------------------
Each line in the file describes one resource. The line starts with
@@ -574,7 +580,7 @@ Memory b/w domain is L3 cache.
MB:<cache_id0>=bandwidth0;<cache_id1>=bandwidth1;...
Memory bandwidth Allocation specified in MiBps
----------------------------------------------
+----------------------------------------------
Memory bandwidth domain is L3 cache.
::
diff --git a/Documentation/arch/x86/xstate.rst b/Documentation/arch/x86/xstate.rst
index ae5c69e48b11..cec05ac464c1 100644
--- a/Documentation/arch/x86/xstate.rst
+++ b/Documentation/arch/x86/xstate.rst
@@ -138,7 +138,7 @@ Note this example does not include the sigaltstack preparation.
Dynamic features in signal frames
---------------------------------
-Dynamcally enabled features are not written to the signal frame upon signal
+Dynamically enabled features are not written to the signal frame upon signal
entry if the feature is in its initial configuration. This differs from
non-dynamic features which are always written regardless of their
configuration. Signal handlers can examine the XSAVE buffer's XSTATE_BV
diff --git a/Documentation/atomic_t.txt b/Documentation/atomic_t.txt
index d7adc6d543db..bee3b1bca9a7 100644
--- a/Documentation/atomic_t.txt
+++ b/Documentation/atomic_t.txt
@@ -171,14 +171,14 @@ The rule of thumb:
- RMW operations that are conditional are unordered on FAILURE,
otherwise the above rules apply.
-Except of course when an operation has an explicit ordering like:
+Except of course when a successful operation has an explicit ordering like:
{}_relaxed: unordered
{}_acquire: the R of the RMW (or atomic_read) is an ACQUIRE
{}_release: the W of the RMW (or atomic_set) is a RELEASE
Where 'unordered' is against other memory locations. Address dependencies are
-not defeated.
+not defeated. Conditional operations are still unordered on FAILURE.
Fully ordered primitives are ordered against everything prior and everything
subsequent. Therefore a fully ordered primitive is like having an smp_mb()
diff --git a/Documentation/bpf/standardization/instruction-set.rst b/Documentation/bpf/standardization/instruction-set.rst
index a5ab00ac0b14..00c93eb42613 100644
--- a/Documentation/bpf/standardization/instruction-set.rst
+++ b/Documentation/bpf/standardization/instruction-set.rst
@@ -5,7 +5,11 @@
BPF Instruction Set Architecture (ISA)
======================================
-This document specifies the BPF instruction set architecture (ISA).
+eBPF (which is no longer an acronym for anything), also commonly
+referred to as BPF, is a technology with origins in the Linux kernel
+that can run untrusted programs in a privileged context such as an
+operating system kernel. This document specifies the BPF instruction
+set architecture (ISA).
Documentation conventions
=========================
@@ -43,7 +47,7 @@ a type's signedness (`S`) and bit width (`N`), respectively.
===== =========
For example, `u32` is a type whose valid values are all the 32-bit unsigned
-numbers and `s16` is a types whose valid values are all the 16-bit signed
+numbers and `s16` is a type whose valid values are all the 16-bit signed
numbers.
Functions
@@ -108,7 +112,7 @@ conformance group means it must support all instructions in that conformance
group.
The use of named conformance groups enables interoperability between a runtime
-that executes instructions, and tools as such compilers that generate
+that executes instructions, and tools such as compilers that generate
instructions for the runtime. Thus, capability discovery in terms of
conformance groups might be done manually by users or automatically by tools.
@@ -181,10 +185,13 @@ A basic instruction is encoded as follows::
(`64-bit immediate instructions`_ reuse this field for other purposes)
**dst_reg**
- destination register number (0-10)
+ destination register number (0-10), unless otherwise specified
+ (future instructions might reuse this field for other purposes)
**offset**
- signed integer offset used with pointer arithmetic
+ signed integer offset used with pointer arithmetic, except where
+ otherwise specified (some arithmetic instructions reuse this field
+ for other purposes)
**imm**
signed integer immediate value
@@ -228,10 +235,12 @@ This is depicted in the following figure::
operation to perform, encoded as explained above
**regs**
- The source and destination register numbers, encoded as explained above
+ The source and destination register numbers (unless otherwise
+ specified), encoded as explained above
**offset**
- signed integer offset used with pointer arithmetic
+ signed integer offset used with pointer arithmetic, unless
+ otherwise specified
**imm**
signed integer immediate value
@@ -292,8 +301,9 @@ Arithmetic instructions
``ALU`` uses 32-bit wide operands while ``ALU64`` uses 64-bit wide operands for
otherwise identical operations. ``ALU64`` instructions belong to the
base64 conformance group unless noted otherwise.
-The 'code' field encodes the operation as below, where 'src' and 'dst' refer
-to the values of the source and destination registers, respectively.
+The 'code' field encodes the operation as below, where 'src' refers to the
+the source operand and 'dst' refers to the value of the destination
+register.
===== ===== ======= ==========================================================
name code offset description
@@ -342,8 +352,8 @@ where '(u32)' indicates that the upper 32 bits are zeroed.
dst = dst ^ imm
-Note that most instructions have instruction offset of 0. Only three instructions
-(``SDIV``, ``SMOD``, ``MOVSX``) have a non-zero offset.
+Note that most arithmetic instructions have 'offset' set to 0. Only three instructions
+(``SDIV``, ``SMOD``, ``MOVSX``) have a non-zero 'offset'.
Division, multiplication, and modulo operations for ``ALU`` are part
of the "divmul32" conformance group, and division, multiplication, and
@@ -365,15 +375,15 @@ Note that there are varying definitions of the signed modulo operation
when the dividend or divisor are negative, where implementations often
vary by language such that Python, Ruby, etc. differ from C, Go, Java,
etc. This specification requires that signed modulo use truncated division
-(where -13 % 3 == -1) as implemented in C, Go, etc.:
+(where -13 % 3 == -1) as implemented in C, Go, etc.::
a % n = a - n * trunc(a / n)
The ``MOVSX`` instruction does a move operation with sign extension.
-``{MOVSX, X, ALU}`` :term:`sign extends<Sign Extend>` 8-bit and 16-bit operands into 32
-bit operands, and zeroes the remaining upper 32 bits.
+``{MOVSX, X, ALU}`` :term:`sign extends<Sign Extend>` 8-bit and 16-bit operands into
+32-bit operands, and zeroes the remaining upper 32 bits.
``{MOVSX, X, ALU64}`` :term:`sign extends<Sign Extend>` 8-bit, 16-bit, and 32-bit
-operands into 64 bit operands. Unlike other arithmetic instructions,
+operands into 64-bit operands. Unlike other arithmetic instructions,
``MOVSX`` is only defined for register source operands (``X``).
The ``NEG`` instruction is only defined when the source bit is clear
@@ -411,19 +421,19 @@ conformance group.
Examples:
-``{END, TO_LE, ALU}`` with imm = 16/32/64 means::
+``{END, TO_LE, ALU}`` with 'imm' = 16/32/64 means::
dst = htole16(dst)
dst = htole32(dst)
dst = htole64(dst)
-``{END, TO_BE, ALU}`` with imm = 16/32/64 means::
+``{END, TO_BE, ALU}`` with 'imm' = 16/32/64 means::
dst = htobe16(dst)
dst = htobe32(dst)
dst = htobe64(dst)
-``{END, TO_LE, ALU64}`` with imm = 16/32/64 means::
+``{END, TO_LE, ALU64}`` with 'imm' = 16/32/64 means::
dst = bswap16(dst)
dst = bswap32(dst)
@@ -438,27 +448,33 @@ otherwise identical operations, and indicates the base64 conformance
group unless otherwise specified.
The 'code' field encodes the operation as below:
-======== ===== ======= =============================== ===================================================
-code value src_reg description notes
-======== ===== ======= =============================== ===================================================
-JA 0x0 0x0 PC += offset {JA, K, JMP} only
-JA 0x0 0x0 PC += imm {JA, K, JMP32} only
+======== ===== ======= ================================= ===================================================
+code value src_reg description notes
+======== ===== ======= ================================= ===================================================
+JA 0x0 0x0 PC += offset {JA, K, JMP} only
+JA 0x0 0x0 PC += imm {JA, K, JMP32} only
JEQ 0x1 any PC += offset if dst == src
-JGT 0x2 any PC += offset if dst > src unsigned
-JGE 0x3 any PC += offset if dst >= src unsigned
+JGT 0x2 any PC += offset if dst > src unsigned
+JGE 0x3 any PC += offset if dst >= src unsigned
JSET 0x4 any PC += offset if dst & src
JNE 0x5 any PC += offset if dst != src
-JSGT 0x6 any PC += offset if dst > src signed
-JSGE 0x7 any PC += offset if dst >= src signed
-CALL 0x8 0x0 call helper function by address {CALL, K, JMP} only, see `Helper functions`_
-CALL 0x8 0x1 call PC += imm {CALL, K, JMP} only, see `Program-local functions`_
-CALL 0x8 0x2 call helper function by BTF ID {CALL, K, JMP} only, see `Helper functions`_
-EXIT 0x9 0x0 return {CALL, K, JMP} only
-JLT 0xa any PC += offset if dst < src unsigned
-JLE 0xb any PC += offset if dst <= src unsigned
-JSLT 0xc any PC += offset if dst < src signed
-JSLE 0xd any PC += offset if dst <= src signed
-======== ===== ======= =============================== ===================================================
+JSGT 0x6 any PC += offset if dst > src signed
+JSGE 0x7 any PC += offset if dst >= src signed
+CALL 0x8 0x0 call helper function by static ID {CALL, K, JMP} only, see `Helper functions`_
+CALL 0x8 0x1 call PC += imm {CALL, K, JMP} only, see `Program-local functions`_
+CALL 0x8 0x2 call helper function by BTF ID {CALL, K, JMP} only, see `Helper functions`_
+EXIT 0x9 0x0 return {CALL, K, JMP} only
+JLT 0xa any PC += offset if dst < src unsigned
+JLE 0xb any PC += offset if dst <= src unsigned
+JSLT 0xc any PC += offset if dst < src signed
+JSLE 0xd any PC += offset if dst <= src signed
+======== ===== ======= ================================= ===================================================
+
+where 'PC' denotes the program counter, and the offset to increment by
+is in units of 64-bit instructions relative to the instruction following
+the jump instruction. Thus 'PC += 1' skips execution of the next
+instruction if it's a basic instruction or results in undefined behavior
+if the next instruction is a 128-bit wide instruction.
The BPF program needs to store the return value into register R0 before doing an
``EXIT``.
@@ -475,7 +491,7 @@ where 's>=' indicates a signed '>=' comparison.
gotol +imm
-where 'imm' means the branch offset comes from insn 'imm' field.
+where 'imm' means the branch offset comes from the 'imm' field.
Note that there are two flavors of ``JA`` instructions. The
``JMP`` class permits a 16-bit jump offset specified by the 'offset'
@@ -493,26 +509,26 @@ Helper functions
Helper functions are a concept whereby BPF programs can call into a
set of function calls exposed by the underlying platform.
-Historically, each helper function was identified by an address
-encoded in the imm field. The available helper functions may differ
-for each program type, but address values are unique across all program types.
+Historically, each helper function was identified by a static ID
+encoded in the 'imm' field. The available helper functions may differ
+for each program type, but static IDs are unique across all program types.
Platforms that support the BPF Type Format (BTF) support identifying
-a helper function by a BTF ID encoded in the imm field, where the BTF ID
+a helper function by a BTF ID encoded in the 'imm' field, where the BTF ID
identifies the helper name and type.
Program-local functions
~~~~~~~~~~~~~~~~~~~~~~~
Program-local functions are functions exposed by the same BPF program as the
caller, and are referenced by offset from the call instruction, similar to
-``JA``. The offset is encoded in the imm field of the call instruction.
-A ``EXIT`` within the program-local function will return to the caller.
+``JA``. The offset is encoded in the 'imm' field of the call instruction.
+An ``EXIT`` within the program-local function will return to the caller.
Load and store instructions
===========================
For load and store instructions (``LD``, ``LDX``, ``ST``, and ``STX``), the
-8-bit 'opcode' field is divided as::
+8-bit 'opcode' field is divided as follows::
+-+-+-+-+-+-+-+-+
|mode |sz |class|
@@ -580,7 +596,7 @@ instructions that transfer data between a register and memory.
dst = *(signed size *) (src + offset)
-Where size is one of: ``B``, ``H``, or ``W``, and
+Where '<size>' is one of: ``B``, ``H``, or ``W``, and
'signed size' is one of: s8, s16, or s32.
Atomic operations
@@ -662,11 +678,11 @@ src_reg pseudocode imm type dst type
======= ========================================= =========== ==============
0x0 dst = (next_imm << 32) | imm integer integer
0x1 dst = map_by_fd(imm) map fd map
-0x2 dst = map_val(map_by_fd(imm)) + next_imm map fd data pointer
-0x3 dst = var_addr(imm) variable id data pointer
-0x4 dst = code_addr(imm) integer code pointer
+0x2 dst = map_val(map_by_fd(imm)) + next_imm map fd data address
+0x3 dst = var_addr(imm) variable id data address
+0x4 dst = code_addr(imm) integer code address
0x5 dst = map_by_idx(imm) map index map
-0x6 dst = map_val(map_by_idx(imm)) + next_imm map index data pointer
+0x6 dst = map_val(map_by_idx(imm)) + next_imm map index data address
======= ========================================= =========== ==============
where
diff --git a/Documentation/conf.py b/Documentation/conf.py
index d148f3e8dd57..0c2205d536b3 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -75,6 +75,8 @@ if major >= 3:
"__rcu",
"__user",
"__force",
+ "__counted_by_le",
+ "__counted_by_be",
# include/linux/compiler_attributes.h:
"__alias",
diff --git a/Documentation/core-api/dma-api-howto.rst b/Documentation/core-api/dma-api-howto.rst
index e8a55f9d61db..0bf31b6c4383 100644
--- a/Documentation/core-api/dma-api-howto.rst
+++ b/Documentation/core-api/dma-api-howto.rst
@@ -203,13 +203,33 @@ setting the DMA mask fails. In this manner, if a user of your driver reports
that performance is bad or that the device is not even detected, you can ask
them for the kernel messages to find out exactly why.
-The standard 64-bit addressing device would do something like this::
+The 24-bit addressing device would do something like this::
- if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(24))) {
dev_warn(dev, "mydev: No suitable DMA available\n");
goto ignore_this_device;
}
+The standard 64-bit addressing device would do something like this::
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))
+
+dma_set_mask_and_coherent() never return fail when DMA_BIT_MASK(64). Typical
+error code like::
+
+ /* Wrong code */
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))
+
+dma_set_mask_and_coherent() will never return failure when bigger than 32.
+So typical code like::
+
+ /* Recommended code */
+ if (support_64bit)
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ else
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+
If the device only supports 32-bit addressing for descriptors in the
coherent allocations, but supports full 64-bits for streaming mappings
it would look like this::
diff --git a/Documentation/core-api/entry.rst b/Documentation/core-api/entry.rst
index e12f22ab33c7..a15f9b1767a2 100644
--- a/Documentation/core-api/entry.rst
+++ b/Documentation/core-api/entry.rst
@@ -18,7 +18,7 @@ exceptions`_, `NMI and NMI-like exceptions`_.
Non-instrumentable code - noinstr
---------------------------------
-Most instrumentation facilities depend on RCU, so intrumentation is prohibited
+Most instrumentation facilities depend on RCU, so instrumentation is prohibited
for entry code before RCU starts watching and exit code after RCU stops
watching. In addition, many architectures must save and restore register state,
which means that (for example) a breakpoint in the breakpoint entry code would
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index 7a3a08d81f11..89c517665763 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -102,6 +102,7 @@ more memory-management documentation in Documentation/mm/index.rst.
dma-api-howto
dma-attributes
dma-isa-lpc
+ swiotlb
mm-api
genalloc
pin_user_pages
diff --git a/Documentation/core-api/printk-index.rst b/Documentation/core-api/printk-index.rst
index 3062f37d119b..1979c5dd32fe 100644
--- a/Documentation/core-api/printk-index.rst
+++ b/Documentation/core-api/printk-index.rst
@@ -4,7 +4,7 @@
Printk Index
============
-There are many ways how to monitor the state of the system. One important
+There are many ways to monitor the state of the system. One important
source of information is the system log. It provides a lot of information,
including more or less important warnings and error messages.
@@ -101,7 +101,7 @@ their own wrappers adding __printk_index_emit().
Only few subsystem specific wrappers have been updated so far,
for example, dev_printk(). As a result, the printk formats from
-some subsystes can be missing in the printk index.
+some subsystems can be missing in the printk index.
Subsystem specific prefix
diff --git a/Documentation/core-api/swiotlb.rst b/Documentation/core-api/swiotlb.rst
new file mode 100644
index 000000000000..5ad2c9ca85bc
--- /dev/null
+++ b/Documentation/core-api/swiotlb.rst
@@ -0,0 +1,321 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===============
+DMA and swiotlb
+===============
+
+swiotlb is a memory buffer allocator used by the Linux kernel DMA layer. It is
+typically used when a device doing DMA can't directly access the target memory
+buffer because of hardware limitations or other requirements. In such a case,
+the DMA layer calls swiotlb to allocate a temporary memory buffer that conforms
+to the limitations. The DMA is done to/from this temporary memory buffer, and
+the CPU copies the data between the temporary buffer and the original target
+memory buffer. This approach is generically called "bounce buffering", and the
+temporary memory buffer is called a "bounce buffer".
+
+Device drivers don't interact directly with swiotlb. Instead, drivers inform
+the DMA layer of the DMA attributes of the devices they are managing, and use
+the normal DMA map, unmap, and sync APIs when programming a device to do DMA.
+These APIs use the device DMA attributes and kernel-wide settings to determine
+if bounce buffering is necessary. If so, the DMA layer manages the allocation,
+freeing, and sync'ing of bounce buffers. Since the DMA attributes are per
+device, some devices in a system may use bounce buffering while others do not.
+
+Because the CPU copies data between the bounce buffer and the original target
+memory buffer, doing bounce buffering is slower than doing DMA directly to the
+original memory buffer, and it consumes more CPU resources. So it is used only
+when necessary for providing DMA functionality.
+
+Usage Scenarios
+---------------
+swiotlb was originally created to handle DMA for devices with addressing
+limitations. As physical memory sizes grew beyond 4 GiB, some devices could
+only provide 32-bit DMA addresses. By allocating bounce buffer memory below
+the 4 GiB line, these devices with addressing limitations could still work and
+do DMA.
+
+More recently, Confidential Computing (CoCo) VMs have the guest VM's memory
+encrypted by default, and the memory is not accessible by the host hypervisor
+and VMM. For the host to do I/O on behalf of the guest, the I/O must be
+directed to guest memory that is unencrypted. CoCo VMs set a kernel-wide option
+to force all DMA I/O to use bounce buffers, and the bounce buffer memory is set
+up as unencrypted. The host does DMA I/O to/from the bounce buffer memory, and
+the Linux kernel DMA layer does "sync" operations to cause the CPU to copy the
+data to/from the original target memory buffer. The CPU copying bridges between
+the unencrypted and the encrypted memory. This use of bounce buffers allows
+device drivers to "just work" in a CoCo VM, with no modifications
+needed to handle the memory encryption complexity.
+
+Other edge case scenarios arise for bounce buffers. For example, when IOMMU
+mappings are set up for a DMA operation to/from a device that is considered
+"untrusted", the device should be given access only to the memory containing
+the data being transferred. But if that memory occupies only part of an IOMMU
+granule, other parts of the granule may contain unrelated kernel data. Since
+IOMMU access control is per-granule, the untrusted device can gain access to
+the unrelated kernel data. This problem is solved by bounce buffering the DMA
+operation and ensuring that unused portions of the bounce buffers do not
+contain any unrelated kernel data.
+
+Core Functionality
+------------------
+The primary swiotlb APIs are swiotlb_tbl_map_single() and
+swiotlb_tbl_unmap_single(). The "map" API allocates a bounce buffer of a
+specified size in bytes and returns the physical address of the buffer. The
+buffer memory is physically contiguous. The expectation is that the DMA layer
+maps the physical memory address to a DMA address, and returns the DMA address
+to the driver for programming into the device. If a DMA operation specifies
+multiple memory buffer segments, a separate bounce buffer must be allocated for
+each segment. swiotlb_tbl_map_single() always does a "sync" operation (i.e., a
+CPU copy) to initialize the bounce buffer to match the contents of the original
+buffer.
+
+swiotlb_tbl_unmap_single() does the reverse. If the DMA operation might have
+updated the bounce buffer memory and DMA_ATTR_SKIP_CPU_SYNC is not set, the
+unmap does a "sync" operation to cause a CPU copy of the data from the bounce
+buffer back to the original buffer. Then the bounce buffer memory is freed.
+
+swiotlb also provides "sync" APIs that correspond to the dma_sync_*() APIs that
+a driver may use when control of a buffer transitions between the CPU and the
+device. The swiotlb "sync" APIs cause a CPU copy of the data between the
+original buffer and the bounce buffer. Like the dma_sync_*() APIs, the swiotlb
+"sync" APIs support doing a partial sync, where only a subset of the bounce
+buffer is copied to/from the original buffer.
+
+Core Functionality Constraints
+------------------------------
+The swiotlb map/unmap/sync APIs must operate without blocking, as they are
+called by the corresponding DMA APIs which may run in contexts that cannot
+block. Hence the default memory pool for swiotlb allocations must be
+pre-allocated at boot time (but see Dynamic swiotlb below). Because swiotlb
+allocations must be physically contiguous, the entire default memory pool is
+allocated as a single contiguous block.
+
+The need to pre-allocate the default swiotlb pool creates a boot-time tradeoff.
+The pool should be large enough to ensure that bounce buffer requests can
+always be satisfied, as the non-blocking requirement means requests can't wait
+for space to become available. But a large pool potentially wastes memory, as
+this pre-allocated memory is not available for other uses in the system. The
+tradeoff is particularly acute in CoCo VMs that use bounce buffers for all DMA
+I/O. These VMs use a heuristic to set the default pool size to ~6% of memory,
+with a max of 1 GiB, which has the potential to be very wasteful of memory.
+Conversely, the heuristic might produce a size that is insufficient, depending
+on the I/O patterns of the workload in the VM. The dynamic swiotlb feature
+described below can help, but has limitations. Better management of the swiotlb
+default memory pool size remains an open issue.
+
+A single allocation from swiotlb is limited to IO_TLB_SIZE * IO_TLB_SEGSIZE
+bytes, which is 256 KiB with current definitions. When a device's DMA settings
+are such that the device might use swiotlb, the maximum size of a DMA segment
+must be limited to that 256 KiB. This value is communicated to higher-level
+kernel code via dma_map_mapping_size() and swiotlb_max_mapping_size(). If the
+higher-level code fails to account for this limit, it may make requests that
+are too large for swiotlb, and get a "swiotlb full" error.
+
+A key device DMA setting is "min_align_mask", which is a power of 2 minus 1
+so that some number of low order bits are set, or it may be zero. swiotlb
+allocations ensure these min_align_mask bits of the physical address of the
+bounce buffer match the same bits in the address of the original buffer. When
+min_align_mask is non-zero, it may produce an "alignment offset" in the address
+of the bounce buffer that slightly reduces the maximum size of an allocation.
+This potential alignment offset is reflected in the value returned by
+swiotlb_max_mapping_size(), which can show up in places like
+/sys/block/<device>/queue/max_sectors_kb. For example, if a device does not use
+swiotlb, max_sectors_kb might be 512 KiB or larger. If a device might use
+swiotlb, max_sectors_kb will be 256 KiB. When min_align_mask is non-zero,
+max_sectors_kb might be even smaller, such as 252 KiB.
+
+swiotlb_tbl_map_single() also takes an "alloc_align_mask" parameter. This
+parameter specifies the allocation of bounce buffer space must start at a
+physical address with the alloc_align_mask bits set to zero. But the actual
+bounce buffer might start at a larger address if min_align_mask is non-zero.
+Hence there may be pre-padding space that is allocated prior to the start of
+the bounce buffer. Similarly, the end of the bounce buffer is rounded up to an
+alloc_align_mask boundary, potentially resulting in post-padding space. Any
+pre-padding or post-padding space is not initialized by swiotlb code. The
+"alloc_align_mask" parameter is used by IOMMU code when mapping for untrusted
+devices. It is set to the granule size - 1 so that the bounce buffer is
+allocated entirely from granules that are not used for any other purpose.
+
+Data structures concepts
+------------------------
+Memory used for swiotlb bounce buffers is allocated from overall system memory
+as one or more "pools". The default pool is allocated during system boot with a
+default size of 64 MiB. The default pool size may be modified with the
+"swiotlb=" kernel boot line parameter. The default size may also be adjusted
+due to other conditions, such as running in a CoCo VM, as described above. If
+CONFIG_SWIOTLB_DYNAMIC is enabled, additional pools may be allocated later in
+the life of the system. Each pool must be a contiguous range of physical
+memory. The default pool is allocated below the 4 GiB physical address line so
+it works for devices that can only address 32-bits of physical memory (unless
+architecture-specific code provides the SWIOTLB_ANY flag). In a CoCo VM, the
+pool memory must be decrypted before swiotlb is used.
+
+Each pool is divided into "slots" of size IO_TLB_SIZE, which is 2 KiB with
+current definitions. IO_TLB_SEGSIZE contiguous slots (128 slots) constitute
+what might be called a "slot set". When a bounce buffer is allocated, it
+occupies one or more contiguous slots. A slot is never shared by multiple
+bounce buffers. Furthermore, a bounce buffer must be allocated from a single
+slot set, which leads to the maximum bounce buffer size being IO_TLB_SIZE *
+IO_TLB_SEGSIZE. Multiple smaller bounce buffers may co-exist in a single slot
+set if the alignment and size constraints can be met.
+
+Slots are also grouped into "areas", with the constraint that a slot set exists
+entirely in a single area. Each area has its own spin lock that must be held to
+manipulate the slots in that area. The division into areas avoids contending
+for a single global spin lock when swiotlb is heavily used, such as in a CoCo
+VM. The number of areas defaults to the number of CPUs in the system for
+maximum parallelism, but since an area can't be smaller than IO_TLB_SEGSIZE
+slots, it might be necessary to assign multiple CPUs to the same area. The
+number of areas can also be set via the "swiotlb=" kernel boot parameter.
+
+When allocating a bounce buffer, if the area associated with the calling CPU
+does not have enough free space, areas associated with other CPUs are tried
+sequentially. For each area tried, the area's spin lock must be obtained before
+trying an allocation, so contention may occur if swiotlb is relatively busy
+overall. But an allocation request does not fail unless all areas do not have
+enough free space.
+
+IO_TLB_SIZE, IO_TLB_SEGSIZE, and the number of areas must all be powers of 2 as
+the code uses shifting and bit masking to do many of the calculations. The
+number of areas is rounded up to a power of 2 if necessary to meet this
+requirement.
+
+The default pool is allocated with PAGE_SIZE alignment. If an alloc_align_mask
+argument to swiotlb_tbl_map_single() specifies a larger alignment, one or more
+initial slots in each slot set might not meet the alloc_align_mask criterium.
+Because a bounce buffer allocation can't cross a slot set boundary, eliminating
+those initial slots effectively reduces the max size of a bounce buffer.
+Currently, there's no problem because alloc_align_mask is set based on IOMMU
+granule size, and granules cannot be larger than PAGE_SIZE. But if that were to
+change in the future, the initial pool allocation might need to be done with
+alignment larger than PAGE_SIZE.
+
+Dynamic swiotlb
+---------------
+When CONFIG_DYNAMIC_SWIOTLB is enabled, swiotlb can do on-demand expansion of
+the amount of memory available for allocation as bounce buffers. If a bounce
+buffer request fails due to lack of available space, an asynchronous background
+task is kicked off to allocate memory from general system memory and turn it
+into an swiotlb pool. Creating an additional pool must be done asynchronously
+because the memory allocation may block, and as noted above, swiotlb requests
+are not allowed to block. Once the background task is kicked off, the bounce
+buffer request creates a "transient pool" to avoid returning an "swiotlb full"
+error. A transient pool has the size of the bounce buffer request, and is
+deleted when the bounce buffer is freed. Memory for this transient pool comes
+from the general system memory atomic pool so that creation does not block.
+Creating a transient pool has relatively high cost, particularly in a CoCo VM
+where the memory must be decrypted, so it is done only as a stopgap until the
+background task can add another non-transient pool.
+
+Adding a dynamic pool has limitations. Like with the default pool, the memory
+must be physically contiguous, so the size is limited to MAX_PAGE_ORDER pages
+(e.g., 4 MiB on a typical x86 system). Due to memory fragmentation, a max size
+allocation may not be available. The dynamic pool allocator tries smaller sizes
+until it succeeds, but with a minimum size of 1 MiB. Given sufficient system
+memory fragmentation, dynamically adding a pool might not succeed at all.
+
+The number of areas in a dynamic pool may be different from the number of areas
+in the default pool. Because the new pool size is typically a few MiB at most,
+the number of areas will likely be smaller. For example, with a new pool size
+of 4 MiB and the 256 KiB minimum area size, only 16 areas can be created. If
+the system has more than 16 CPUs, multiple CPUs must share an area, creating
+more lock contention.
+
+New pools added via dynamic swiotlb are linked together in a linear list.
+swiotlb code frequently must search for the pool containing a particular
+swiotlb physical address, so that search is linear and not performant with a
+large number of dynamic pools. The data structures could be improved for
+faster searches.
+
+Overall, dynamic swiotlb works best for small configurations with relatively
+few CPUs. It allows the default swiotlb pool to be smaller so that memory is
+not wasted, with dynamic pools making more space available if needed (as long
+as fragmentation isn't an obstacle). It is less useful for large CoCo VMs.
+
+Data Structure Details
+----------------------
+swiotlb is managed with four primary data structures: io_tlb_mem, io_tlb_pool,
+io_tlb_area, and io_tlb_slot. io_tlb_mem describes a swiotlb memory allocator,
+which includes the default memory pool and any dynamic or transient pools
+linked to it. Limited statistics on swiotlb usage are kept per memory allocator
+and are stored in this data structure. These statistics are available under
+/sys/kernel/debug/swiotlb when CONFIG_DEBUG_FS is set.
+
+io_tlb_pool describes a memory pool, either the default pool, a dynamic pool,
+or a transient pool. The description includes the start and end addresses of
+the memory in the pool, a pointer to an array of io_tlb_area structures, and a
+pointer to an array of io_tlb_slot structures that are associated with the pool.
+
+io_tlb_area describes an area. The primary field is the spin lock used to
+serialize access to slots in the area. The io_tlb_area array for a pool has an
+entry for each area, and is accessed using a 0-based area index derived from the
+calling processor ID. Areas exist solely to allow parallel access to swiotlb
+from multiple CPUs.
+
+io_tlb_slot describes an individual memory slot in the pool, with size
+IO_TLB_SIZE (2 KiB currently). The io_tlb_slot array is indexed by the slot
+index computed from the bounce buffer address relative to the starting memory
+address of the pool. The size of struct io_tlb_slot is 24 bytes, so the
+overhead is about 1% of the slot size.
+
+The io_tlb_slot array is designed to meet several requirements. First, the DMA
+APIs and the corresponding swiotlb APIs use the bounce buffer address as the
+identifier for a bounce buffer. This address is returned by
+swiotlb_tbl_map_single(), and then passed as an argument to
+swiotlb_tbl_unmap_single() and the swiotlb_sync_*() functions. The original
+memory buffer address obviously must be passed as an argument to
+swiotlb_tbl_map_single(), but it is not passed to the other APIs. Consequently,
+swiotlb data structures must save the original memory buffer address so that it
+can be used when doing sync operations. This original address is saved in the
+io_tlb_slot array.
+
+Second, the io_tlb_slot array must handle partial sync requests. In such cases,
+the argument to swiotlb_sync_*() is not the address of the start of the bounce
+buffer but an address somewhere in the middle of the bounce buffer, and the
+address of the start of the bounce buffer isn't known to swiotlb code. But
+swiotlb code must be able to calculate the corresponding original memory buffer
+address to do the CPU copy dictated by the "sync". So an adjusted original
+memory buffer address is populated into the struct io_tlb_slot for each slot
+occupied by the bounce buffer. An adjusted "alloc_size" of the bounce buffer is
+also recorded in each struct io_tlb_slot so a sanity check can be performed on
+the size of the "sync" operation. The "alloc_size" field is not used except for
+the sanity check.
+
+Third, the io_tlb_slot array is used to track available slots. The "list" field
+in struct io_tlb_slot records how many contiguous available slots exist starting
+at that slot. A "0" indicates that the slot is occupied. A value of "1"
+indicates only the current slot is available. A value of "2" indicates the
+current slot and the next slot are available, etc. The maximum value is
+IO_TLB_SEGSIZE, which can appear in the first slot in a slot set, and indicates
+that the entire slot set is available. These values are used when searching for
+available slots to use for a new bounce buffer. They are updated when allocating
+a new bounce buffer and when freeing a bounce buffer. At pool creation time, the
+"list" field is initialized to IO_TLB_SEGSIZE down to 1 for the slots in every
+slot set.
+
+Fourth, the io_tlb_slot array keeps track of any "padding slots" allocated to
+meet alloc_align_mask requirements described above. When
+swiotlb_tlb_map_single() allocates bounce buffer space to meet alloc_align_mask
+requirements, it may allocate pre-padding space across zero or more slots. But
+when swiotbl_tlb_unmap_single() is called with the bounce buffer address, the
+alloc_align_mask value that governed the allocation, and therefore the
+allocation of any padding slots, is not known. The "pad_slots" field records
+the number of padding slots so that swiotlb_tbl_unmap_single() can free them.
+The "pad_slots" value is recorded only in the first non-padding slot allocated
+to the bounce buffer.
+
+Restricted pools
+----------------
+The swiotlb machinery is also used for "restricted pools", which are pools of
+memory separate from the default swiotlb pool, and that are dedicated for DMA
+use by a particular device. Restricted pools provide a level of DMA memory
+protection on systems with limited hardware protection capabilities, such as
+those lacking an IOMMU. Such usage is specified by DeviceTree entries and
+requires that CONFIG_DMA_RESTRICTED_POOL is set. Each restricted pool is based
+on its own io_tlb_mem data structure that is independent of the main swiotlb
+io_tlb_mem.
+
+Restricted pools add swiotlb_alloc() and swiotlb_free() APIs, which are called
+from the dma_alloc_*() and dma_free_*() APIs. The swiotlb_alloc/free() APIs
+allocate/free slots from/to the restricted pool directly and do not go through
+swiotlb_tbl_map/unmap_single().
diff --git a/Documentation/core-api/workqueue.rst b/Documentation/core-api/workqueue.rst
index ed73c612174d..bcc370c876be 100644
--- a/Documentation/core-api/workqueue.rst
+++ b/Documentation/core-api/workqueue.rst
@@ -671,7 +671,7 @@ configuration, worker pools and how workqueues map to the pools: ::
events_unbound unbound 9 9 10 10 8
events_freezable percpu 0 2 4 6
events_power_efficient percpu 0 2 4 6
- events_freezable_power_ percpu 0 2 4 6
+ events_freezable_pwr_ef percpu 0 2 4 6
rcu_gp percpu 0 2 4 6
rcu_par_gp percpu 0 2 4 6
slub_flushwq percpu 0 2 4 6
@@ -694,7 +694,7 @@ Use tools/workqueue/wq_monitor.py to monitor workqueue operations: ::
events_unbound 38306 0 0.1 - 7 - -
events_freezable 0 0 0.0 0 0 - -
events_power_efficient 29598 0 0.2 0 0 - -
- events_freezable_power_ 10 0 0.0 0 0 - -
+ events_freezable_pwr_ef 10 0 0.0 0 0 - -
sock_diag_events 0 0 0.0 0 0 - -
total infl CPUtime CPUhog CMW/RPR mayday rescued
@@ -704,7 +704,7 @@ Use tools/workqueue/wq_monitor.py to monitor workqueue operations: ::
events_unbound 38322 0 0.1 - 7 - -
events_freezable 0 0 0.0 0 0 - -
events_power_efficient 29603 0 0.2 0 0 - -
- events_freezable_power_ 10 0 0.0 0 0 - -
+ events_freezable_pwr_ef 10 0 0.0 0 0 - -
sock_diag_events 0 0 0.0 0 0 - -
...
diff --git a/Documentation/dev-tools/checkpatch.rst b/Documentation/dev-tools/checkpatch.rst
index 127968995847..a9fac978a525 100644
--- a/Documentation/dev-tools/checkpatch.rst
+++ b/Documentation/dev-tools/checkpatch.rst
@@ -906,6 +906,20 @@ Macros, Attributes and Symbols
See: https://lore.kernel.org/lkml/1399671106.2912.21.camel@joe-AO725/
+ **MACRO_ARG_UNUSED**
+ If function-like macros do not utilize a parameter, it might result
+ in a build warning. We advocate for utilizing static inline functions
+ to replace such macros.
+ For example, for a macro such as the one below::
+
+ #define test(a) do { } while (0)
+
+ there would be a warning like below::
+
+ WARNING: Argument 'a' is not used in function-like macro.
+
+ See: https://www.kernel.org/doc/html/latest/process/coding-style.html#macros-enums-and-rtl
+
**SINGLE_STATEMENT_DO_WHILE_MACRO**
For the multi-statement macros, it is necessary to use the do-while
loop to avoid unpredictable code paths. The do-while loop helps to
diff --git a/Documentation/dev-tools/kcsan.rst b/Documentation/dev-tools/kcsan.rst
index 94b6802ab0ab..02143f060b22 100644
--- a/Documentation/dev-tools/kcsan.rst
+++ b/Documentation/dev-tools/kcsan.rst
@@ -91,6 +91,16 @@ the below options are available:
behaviour when encountering a data race is deemed safe. Please see
`"Marking Shared-Memory Accesses" in the LKMM`_ for more information.
+* Similar to ``data_race(...)``, the type qualifier ``__data_racy`` can be used
+ to document that all data races due to accesses to a variable are intended
+ and should be ignored by KCSAN::
+
+ struct foo {
+ ...
+ int __data_racy stats_counter;
+ ...
+ };
+
* Disabling data race detection for entire functions can be accomplished by
using the function attribute ``__no_kcsan``::
diff --git a/Documentation/dev-tools/kselftest.rst b/Documentation/dev-tools/kselftest.rst
index ff10dc6eef5d..dcf634e411bd 100644
--- a/Documentation/dev-tools/kselftest.rst
+++ b/Documentation/dev-tools/kselftest.rst
@@ -183,7 +183,7 @@ expected time it takes to run a test. If you have control over the systems
which will run the tests you can configure a test runner on those systems to
use a greater or lower timeout on the command line as with the `-o` or
the `--override-timeout` argument. For example to use 165 seconds instead
-one would use:
+one would use::
$ ./run_kselftest.sh --override-timeout 165
diff --git a/Documentation/dev-tools/testing-overview.rst b/Documentation/dev-tools/testing-overview.rst
index 0aaf6ea53608..1619e5e5cc9c 100644
--- a/Documentation/dev-tools/testing-overview.rst
+++ b/Documentation/dev-tools/testing-overview.rst
@@ -104,6 +104,8 @@ Some of these tools are listed below:
KASAN and can be used in production. See Documentation/dev-tools/kfence.rst
* lockdep is a locking correctness validator. See
Documentation/locking/lockdep-design.rst
+* Runtime Verification (RV) supports checking specific behaviours for a given
+ subsystem. See Documentation/trace/rv/runtime-verification.rst
* There are several other pieces of debug instrumentation in the kernel, many
of which can be found in lib/Kconfig.debug
diff --git a/Documentation/devicetree/bindings/Makefile b/Documentation/devicetree/bindings/Makefile
index 5e08e3a6a97b..bf7d64632e20 100644
--- a/Documentation/devicetree/bindings/Makefile
+++ b/Documentation/devicetree/bindings/Makefile
@@ -25,23 +25,25 @@ quiet_cmd_extract_ex = DTEX $@
$(obj)/%.example.dts: $(src)/%.yaml check_dtschema_version FORCE
$(call if_changed,extract_ex)
-find_all_cmd = find $(srctree)/$(src) \( -name '*.yaml' ! \
+find_all_cmd = find $(src) \( -name '*.yaml' ! \
-name 'processed-schema*' \)
find_cmd = $(find_all_cmd) | \
sed 's|^$(srctree)/||' | \
grep -F -e "$(subst :," -e ",$(DT_SCHEMA_FILES))" | \
sed 's|^|$(srctree)/|'
-CHK_DT_DOCS := $(shell $(find_cmd))
+CHK_DT_EXAMPLES := $(patsubst $(srctree)/%.yaml,%.example.dtb, $(shell $(find_cmd)))
quiet_cmd_yamllint = LINT $(src)
cmd_yamllint = ($(find_cmd) | \
xargs -n200 -P$$(nproc) \
- $(DT_SCHEMA_LINT) -f parsable -c $(srctree)/$(src)/.yamllint >&2) || true
+ $(DT_SCHEMA_LINT) -f parsable -c $(src)/.yamllint >&2) \
+ && touch $@ || true
-quiet_cmd_chk_bindings = CHKDT $@
+quiet_cmd_chk_bindings = CHKDT $(src)
cmd_chk_bindings = ($(find_cmd) | \
- xargs -n200 -P$$(nproc) $(DT_DOC_CHECKER) -u $(srctree)/$(src)) || true
+ xargs -n200 -P$$(nproc) $(DT_DOC_CHECKER) -u $(src)) \
+ && touch $@ || true
quiet_cmd_mk_schema = SCHEMA $@
cmd_mk_schema = f=$$(mktemp) ; \
@@ -49,12 +51,6 @@ quiet_cmd_mk_schema = SCHEMA $@
$(DT_MK_SCHEMA) -j $(DT_MK_SCHEMA_FLAGS) @$$f > $@ ; \
rm -f $$f
-define rule_chkdt
- $(if $(DT_SCHEMA_LINT),$(call cmd,yamllint),)
- $(call cmd,chk_bindings)
- $(call cmd,mk_schema)
-endef
-
DT_DOCS = $(patsubst $(srctree)/%,%,$(shell $(find_all_cmd)))
override DTC_FLAGS := \
@@ -64,12 +60,19 @@ override DTC_FLAGS := \
-Wno-unique_unit_address \
-Wunique_unit_address_if_enabled
-$(obj)/processed-schema.json: $(DT_DOCS) $(src)/.yamllint check_dtschema_version FORCE
- $(call if_changed_rule,chkdt)
+$(obj)/processed-schema.json: $(DT_DOCS) check_dtschema_version FORCE
+ $(call if_changed,mk_schema)
+
+targets += .dt-binding.checked .yamllint.checked
+$(obj)/.yamllint.checked: $(DT_DOCS) $(src)/.yamllint FORCE
+ $(if $(DT_SCHEMA_LINT),$(call if_changed,yamllint),)
+
+$(obj)/.dt-binding.checked: $(DT_DOCS) FORCE
+ $(call if_changed,chk_bindings)
always-y += processed-schema.json
-always-$(CHECK_DT_BINDING) += $(patsubst $(srctree)/$(src)/%.yaml,%.example.dts, $(CHK_DT_DOCS))
-always-$(CHECK_DT_BINDING) += $(patsubst $(srctree)/$(src)/%.yaml,%.example.dtb, $(CHK_DT_DOCS))
+targets += $(patsubst $(obj)/%,%, $(CHK_DT_EXAMPLES))
+targets += $(patsubst $(obj)/%.dtb,%.dts, $(CHK_DT_EXAMPLES))
# Hack: avoid 'Argument list too long' error for 'make clean'. Remove most of
# build artifacts here before they are processed by scripts/Makefile.clean
@@ -78,3 +81,6 @@ clean-files = $(shell find $(obj) \( -name '*.example.dts' -o \
dt_compatible_check: $(obj)/processed-schema.json
$(Q)$(srctree)/scripts/dtc/dt-extract-compatibles $(srctree) | xargs dt-check-compatible -v -s $<
+
+PHONY += dt_binding_check
+dt_binding_check: $(obj)/.dt-binding.checked $(obj)/.yamllint.checked $(CHK_DT_EXAMPLES)
diff --git a/Documentation/devicetree/bindings/access-controllers/access-controllers.yaml b/Documentation/devicetree/bindings/access-controllers/access-controllers.yaml
new file mode 100644
index 000000000000..99e2865f0e46
--- /dev/null
+++ b/Documentation/devicetree/bindings/access-controllers/access-controllers.yaml
@@ -0,0 +1,84 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/access-controllers/access-controllers.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic Domain Access Controllers
+
+maintainers:
+ - Oleksii Moisieiev <oleksii_moisieiev@epam.com>
+
+description: |+
+ Common access controllers properties
+
+ Access controllers are in charge of stating which of the hardware blocks under
+ their responsibility (their domain) can be accesssed by which compartment. A
+ compartment can be a cluster of CPUs (or coprocessors), a range of addresses
+ or a group of hardware blocks. An access controller's domain is the set of
+ resources covered by the access controller.
+
+ This device tree binding can be used to bind devices to their access
+ controller provided by access-controllers property. In this case, the device
+ is a consumer and the access controller is the provider.
+
+ An access controller can be represented by any node in the device tree and
+ can provide one or more configuration parameters, needed to control parameters
+ of the consumer device. A consumer node can refer to the provider by phandle
+ and a set of phandle arguments, specified by '#access-controller-cells'
+ property in the access controller node.
+
+ Access controllers are typically used to set/read the permissions of a
+ hardware block and grant access to it. Any of which depends on the access
+ controller. The capabilities of each access controller are defined by the
+ binding of the access controller device.
+
+ Each node can be a consumer for the several access controllers.
+
+# always select the core schema
+select: true
+
+properties:
+ "#access-controller-cells":
+ description:
+ Number of cells in an access-controllers specifier;
+ Can be any value as specified by device tree binding documentation
+ of a particular provider. The node is an access controller.
+
+ access-controller-names:
+ $ref: /schemas/types.yaml#/definitions/string-array
+ description:
+ A list of access-controllers names, sorted in the same order as
+ access-controllers entries. Consumer drivers will use
+ access-controller-names to match with existing access-controllers entries.
+
+ access-controllers:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ A list of access controller specifiers, as defined by the
+ bindings of the access-controllers provider.
+
+additionalProperties: true
+
+examples:
+ - |
+ clock_controller: access-controllers@50000 {
+ reg = <0x50000 0x400>;
+ #access-controller-cells = <2>;
+ };
+
+ bus_controller: bus@60000 {
+ reg = <0x60000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ #access-controller-cells = <3>;
+
+ uart4: serial@60100 {
+ reg = <0x60100 0x400>;
+ clocks = <&clk_serial>;
+ access-controllers = <&clock_controller 1 2>,
+ <&bus_controller 1 3 5>;
+ access-controller-names = "clock", "bus";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/arm/altera/socfpga-sdram-controller.txt b/Documentation/devicetree/bindings/arm/altera/socfpga-sdram-controller.txt
deleted file mode 100644
index 77ca635765e1..000000000000
--- a/Documentation/devicetree/bindings/arm/altera/socfpga-sdram-controller.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Altera SOCFPGA SDRAM Controller
-
-Required properties:
-- compatible : Should contain "altr,sdr-ctl" and "syscon".
- syscon is required by the Altera SOCFPGA SDRAM EDAC.
-- reg : Should contain 1 register range (address and length)
-
-Example:
- sdr: sdr@ffc25000 {
- compatible = "altr,sdr-ctl", "syscon";
- reg = <0xffc25000 0x1000>;
- };
diff --git a/Documentation/devicetree/bindings/arm/amlogic.yaml b/Documentation/devicetree/bindings/arm/amlogic.yaml
index 949537cea6be..a374b98080fe 100644
--- a/Documentation/devicetree/bindings/arm/amlogic.yaml
+++ b/Documentation/devicetree/bindings/arm/amlogic.yaml
@@ -157,6 +157,7 @@ properties:
items:
- enum:
- bananapi,bpi-cm4io
+ - mntre,reform2-cm4
- const: bananapi,bpi-cm4
- const: amlogic,a311d
- const: amlogic,g12b
@@ -201,6 +202,18 @@ properties:
- amlogic,ad402
- const: amlogic,a1
+ - description: Boards with the Amlogic A4 A113L2 SoC
+ items:
+ - enum:
+ - amlogic,ba400
+ - const: amlogic,a4
+
+ - description: Boards with the Amlogic A5 A113X2 SoC
+ items:
+ - enum:
+ - amlogic,av400
+ - const: amlogic,a5
+
- description: Boards with the Amlogic C3 C302X/C308L SoC
items:
- enum:
diff --git a/Documentation/devicetree/bindings/arm/apm/scu.txt b/Documentation/devicetree/bindings/arm/apm/scu.txt
deleted file mode 100644
index b45be06625fd..000000000000
--- a/Documentation/devicetree/bindings/arm/apm/scu.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-APM X-GENE SoC series SCU Registers
-
-This system clock unit contain various register that control block resets,
-clock enable/disables, clock divisors and other deepsleep registers.
-
-Properties:
- - compatible : should contain two values. First value must be:
- - "apm,xgene-scu"
- second value must be always "syscon".
-
- - reg : offset and length of the register set.
-
-Example :
- scu: system-clk-controller@17000000 {
- compatible = "apm,xgene-scu","syscon";
- reg = <0x0 0x17000000 0x0 0x400>;
- };
diff --git a/Documentation/devicetree/bindings/arm/aspeed/aspeed.yaml b/Documentation/devicetree/bindings/arm/aspeed/aspeed.yaml
index 749ee54a3ff8..95113df178cc 100644
--- a/Documentation/devicetree/bindings/arm/aspeed/aspeed.yaml
+++ b/Documentation/devicetree/bindings/arm/aspeed/aspeed.yaml
@@ -35,7 +35,10 @@ properties:
- ampere,mtjade-bmc
- aspeed,ast2500-evb
- asrock,e3c246d4i-bmc
+ - asrock,e3c256d4i-bmc
- asrock,romed8hm3-bmc
+ - asrock,spc621d8hm3-bmc
+ - asrock,x570d4u-bmc
- bytedance,g220a-bmc
- facebook,cmm-bmc
- facebook,minipack-bmc
@@ -74,15 +77,18 @@ properties:
- ampere,mtmitchell-bmc
- aspeed,ast2600-evb
- aspeed,ast2600-evb-a1
+ - asus,x4tf-bmc
- facebook,bletchley-bmc
- facebook,cloudripper-bmc
- facebook,elbert-bmc
- facebook,fuji-bmc
- facebook,greatlakes-bmc
+ - facebook,harma-bmc
- facebook,minerva-cmc
- facebook,yosemite4-bmc
- ibm,everest-bmc
- ibm,rainier-bmc
+ - ibm,system1-bmc
- ibm,tacoma-bmc
- inventec,starscream-bmc
- inventec,transformer-bmc
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm4708.yaml b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm4708.yaml
index 4cc4e6754681..d925e7a3b5ef 100644
--- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm4708.yaml
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm4708.yaml
@@ -53,6 +53,7 @@ properties:
- description: BCM4709 based boards
items:
- enum:
+ - asus,rt-ac3200
- asus,rt-ac87u
- buffalo,wxr-1900dhp
- linksys,ea9200
@@ -67,6 +68,7 @@ properties:
items:
- enum:
- asus,rt-ac3100
+ - asus,rt-ac5300
- asus,rt-ac88u
- dlink,dir-885l
- dlink,dir-890l
diff --git a/Documentation/devicetree/bindings/arm/bcm/raspberrypi,bcm2835-firmware.yaml b/Documentation/devicetree/bindings/arm/bcm/raspberrypi,bcm2835-firmware.yaml
index 39e3c248f5b7..1f84407a73e4 100644
--- a/Documentation/devicetree/bindings/arm/bcm/raspberrypi,bcm2835-firmware.yaml
+++ b/Documentation/devicetree/bindings/arm/bcm/raspberrypi,bcm2835-firmware.yaml
@@ -46,6 +46,30 @@ properties:
- compatible
- "#clock-cells"
+ gpio:
+ type: object
+ additionalProperties: false
+
+ properties:
+ compatible:
+ const: raspberrypi,firmware-gpio
+
+ gpio-controller: true
+
+ "#gpio-cells":
+ const: 2
+ description:
+ The first cell is the pin number, and the second cell is used to
+ specify the gpio polarity (GPIO_ACTIVE_HIGH or GPIO_ACTIVE_LOW).
+
+ gpio-line-names:
+ minItems: 8
+
+ required:
+ - compatible
+ - gpio-controller
+ - "#gpio-cells"
+
reset:
type: object
additionalProperties: false
@@ -96,6 +120,12 @@ examples:
#clock-cells = <1>;
};
+ expgpio: gpio {
+ compatible = "raspberrypi,firmware-gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
reset: reset {
compatible = "raspberrypi,firmware-reset";
#reset-cells = <1>;
diff --git a/Documentation/devicetree/bindings/arm/fsl.yaml b/Documentation/devicetree/bindings/arm/fsl.yaml
index 0027201e19f8..6d185d09cb6a 100644
--- a/Documentation/devicetree/bindings/arm/fsl.yaml
+++ b/Documentation/devicetree/bindings/arm/fsl.yaml
@@ -813,6 +813,14 @@ properties:
- const: tq,imx6ull-tqma6ull2l # MCIMX6Y2, LGA SoM variant
- const: fsl,imx6ull
+ - description: Seeed Stuido i.MX6ULL SoM on dev boards
+ items:
+ - enum:
+ - seeed,imx6ull-seeed-npi-emmc
+ - seeed,imx6ull-seeed-npi-nand
+ - const: seeed,imx6ull-seeed-npi
+ - const: fsl,imx6ull
+
- description: i.MX6ULZ based Boards
items:
- enum:
@@ -1050,6 +1058,7 @@ properties:
- enum:
- beacon,imx8mp-beacon-kit # i.MX8MP Beacon Development Kit
- dmo,imx8mp-data-modul-edm-sbc # i.MX8MP eDM SBC
+ - emcraft,imx8mp-navqp # i.MX8MP Emcraft Systems NavQ+ Kit
- fsl,imx8mp-evk # i.MX8MP EVK Board
- gateworks,imx8mp-gw71xx-2x # i.MX8MP Gateworks Board
- gateworks,imx8mp-gw72xx-2x # i.MX8MP Gateworks Board
@@ -1218,7 +1227,6 @@ properties:
- enum:
- einfochips,imx8qxp-ai_ml # i.MX8QXP AI_ML Board
- fsl,imx8qxp-mek # i.MX8QXP MEK Board
- - toradex,colibri-imx8x # Colibri iMX8X Modules
- const: fsl,imx8qxp
- description: i.MX8DXL based Boards
@@ -1227,7 +1235,7 @@ properties:
- fsl,imx8dxl-evk # i.MX8DXL EVK Board
- const: fsl,imx8dxl
- - description: i.MX8QXP Boards with Toradex Colibri iMX8X Modules
+ - description: i.MX8QXP/i.MX8DX Boards with Toradex Colibri iMX8X Modules
items:
- enum:
- toradex,colibri-imx8x-aster # Colibri iMX8X Module on Aster Board
@@ -1235,7 +1243,9 @@ properties:
- toradex,colibri-imx8x-iris # Colibri iMX8X Module on Iris Board
- toradex,colibri-imx8x-iris-v2 # Colibri iMX8X Module on Iris Board V2
- const: toradex,colibri-imx8x
- - const: fsl,imx8qxp
+ - enum:
+ - fsl,imx8qxp
+ - fsl,imx8dx
- description:
TQMa8Xx is a series of SOM featuring NXP i.MX8X system-on-chip
@@ -1536,6 +1546,12 @@ properties:
- nxp,s32g274a-rdb2
- const: nxp,s32g2
+ - description: S32G3 based Boards
+ items:
+ - enum:
+ - nxp,s32g399a-rdb3
+ - const: nxp,s32g3
+
- description: S32V234 based Boards
items:
- enum:
diff --git a/Documentation/devicetree/bindings/arm/keystone/ti,sci.yaml b/Documentation/devicetree/bindings/arm/keystone/ti,sci.yaml
index c24ad0968f3e..7f06b1080244 100644
--- a/Documentation/devicetree/bindings/arm/keystone/ti,sci.yaml
+++ b/Documentation/devicetree/bindings/arm/keystone/ti,sci.yaml
@@ -61,10 +61,6 @@ properties:
mboxes:
minItems: 2
- ti,system-reboot-controller:
- description: Determines If system reboot can be triggered by SoC reboot
- type: boolean
-
ti,host-id:
$ref: /schemas/types.yaml#/definitions/uint32
description: |
@@ -94,7 +90,6 @@ examples:
- |
pmmc: system-controller@2921800 {
compatible = "ti,k2g-sci";
- ti,system-reboot-controller;
mbox-names = "rx", "tx";
mboxes = <&msgmgr 5 2>,
<&msgmgr 0 0>;
diff --git a/Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt b/Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt
deleted file mode 100644
index 29fa93dad52b..000000000000
--- a/Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-Power management
-----------------
-
-For power management (particularly DVFS and AVS), the North Bridge
-Power Management component is needed:
-
-Required properties:
-- compatible : should contain "marvell,armada-3700-nb-pm", "syscon";
-- reg : the register start and length for the North Bridge
- Power Management
-
-Example:
-
-nb_pm: syscon@14000 {
- compatible = "marvell,armada-3700-nb-pm", "syscon";
- reg = <0x14000 0x60>;
-}
-
-AVS
----
-
-For AVS an other component is needed:
-
-Required properties:
-- compatible : should contain "marvell,armada-3700-avs", "syscon";
-- reg : the register start and length for the AVS
-
-Example:
-avs: avs@11500 {
- compatible = "marvell,armada-3700-avs", "syscon";
- reg = <0x11500 0x40>;
-}
diff --git a/Documentation/devicetree/bindings/arm/qcom.yaml b/Documentation/devicetree/bindings/arm/qcom.yaml
index 66beaac60e1d..ae885414b181 100644
--- a/Documentation/devicetree/bindings/arm/qcom.yaml
+++ b/Documentation/devicetree/bindings/arm/qcom.yaml
@@ -137,6 +137,7 @@ properties:
- microsoft,dempsey
- microsoft,makepeace
- microsoft,moneypenny
+ - motorola,falcon
- samsung,s3ve3g
- const: qcom,msm8226
@@ -184,13 +185,16 @@ properties:
- oneplus,bacon
- samsung,klte
- sony,xperia-castor
+ - sony,xperia-leo
- const: qcom,msm8974pro
- const: qcom,msm8974
- items:
- - const: qcom,msm8916-mtp
- - const: qcom,msm8916-mtp/1
- - const: qcom,msm8916
+ - enum:
+ - samsung,kltechn
+ - const: samsung,klte
+ - const: qcom,msm8974pro
+ - const: qcom,msm8974
- items:
- enum:
@@ -200,6 +204,8 @@ properties:
- gplus,fl8005a
- huawei,g7
- longcheer,l8910
+ - longcheer,l8150
+ - qcom,msm8916-mtp
- samsung,a3u-eur
- samsung,a5u-eur
- samsung,e5
@@ -221,11 +227,6 @@ properties:
- const: qcom,msm8916
- items:
- - const: longcheer,l8150
- - const: qcom,msm8916-v1-qrd/9-v1
- - const: qcom,msm8916
-
- - items:
- enum:
- motorola,potter
- xiaomi,daisy
@@ -1003,6 +1004,7 @@ properties:
- qcom,sm8550-hdk
- qcom,sm8550-mtp
- qcom,sm8550-qrd
+ - sony,pdx234
- const: qcom,sm8550
- items:
diff --git a/Documentation/devicetree/bindings/arm/rockchip.yaml b/Documentation/devicetree/bindings/arm/rockchip.yaml
index fcf7316ecd74..e04c213a0dee 100644
--- a/Documentation/devicetree/bindings/arm/rockchip.yaml
+++ b/Documentation/devicetree/bindings/arm/rockchip.yaml
@@ -49,6 +49,11 @@ properties:
- anbernic,rg-arc-s
- const: rockchip,rk3566
+ - description: ArmSoM Sige7 board
+ items:
+ - const: armsom,sige7
+ - const: rockchip,rk3588
+
- description: Asus Tinker board
items:
- const: asus,rk3288-tinker
@@ -198,6 +203,13 @@ properties:
- const: firefly,rk3568-roc-pc
- const: rockchip,rk3568
+ - description: Forlinx FET3588-C SoM
+ items:
+ - enum:
+ - forlinx,ok3588-c
+ - const: forlinx,fet3588-c
+ - const: rockchip,rk3588
+
- description: FriendlyElec NanoPi R2 series boards
items:
- enum:
@@ -236,6 +248,11 @@ properties:
- const: friendlyarm,nanopc-t6
- const: rockchip,rk3588
+ - description: GameForce Chi
+ items:
+ - const: gameforce,chi
+ - const: rockchip,rk3326
+
- description: GeekBuying GeekBox
items:
- const: geekbuying,geekbox
@@ -631,7 +648,7 @@ properties:
- const: phytec,rk3288-phycore-som
- const: rockchip,rk3288
- - description: Pine64 PinebookPro
+ - description: Pine64 Pinebook Pro
items:
- const: pine64,pinebook-pro
- const: rockchip,rk3399
@@ -644,7 +661,7 @@ properties:
- const: pine64,pinenote
- const: rockchip,rk3566
- - description: Pine64 PinePhonePro
+ - description: Pine64 PinePhone Pro
items:
- const: pine64,pinephone-pro
- const: rockchip,rk3399
@@ -682,7 +699,7 @@ properties:
- const: pine64,quartzpro64
- const: rockchip,rk3588
- - description: Pine64 SoQuartz SoM
+ - description: Pine64 SOQuartz
items:
- enum:
- pine64,soquartz-blade
@@ -700,12 +717,17 @@ properties:
- powkiddy,x55
- const: rockchip,rk3566
+ - description: Protonic MECSBC board
+ items:
+ - const: prt,mecsbc
+ - const: rockchip,rk3568
+
- description: QNAP TS-433-4G 4-Bay NAS
items:
- const: qnap,ts433
- const: rockchip,rk3568
- - description: Radxa Compute Module 3(CM3)
+ - description: Radxa Compute Module 3 (CM3)
items:
- enum:
- radxa,cm3-io
@@ -767,22 +789,27 @@ properties:
- const: radxa,rockpis
- const: rockchip,rk3308
- - description: Radxa Rock2 Square
+ - description: Radxa Rock 2 Square
items:
- const: radxa,rock2-square
- const: rockchip,rk3288
- - description: Radxa ROCK3 Model A
+ - description: Radxa ROCK 3A
items:
- const: radxa,rock3a
- const: rockchip,rk3568
- - description: Radxa ROCK 5 Model A
+ - description: Radxa ROCK 3C
+ items:
+ - const: radxa,rock-3c
+ - const: rockchip,rk3566
+
+ - description: Radxa ROCK 5A
items:
- const: radxa,rock-5a
- const: rockchip,rk3588s
- - description: Radxa ROCK 5 Model B
+ - description: Radxa ROCK 5B
items:
- const: radxa,rock-5b
- const: rockchip,rk3588
@@ -927,6 +954,11 @@ properties:
- const: turing,rk1
- const: rockchip,rk3588
+ - description: WolfVision PF5 mainboard
+ items:
+ - const: wolfvision,rk3568-pf5
+ - const: rockchip,rk3568
+
- description: Xunlong Orange Pi 5 Plus
items:
- const: xunlong,orangepi-5-plus
diff --git a/Documentation/devicetree/bindings/arm/sunxi.yaml b/Documentation/devicetree/bindings/arm/sunxi.yaml
index 09d835db6db5..c6d0d8d81ed4 100644
--- a/Documentation/devicetree/bindings/arm/sunxi.yaml
+++ b/Documentation/devicetree/bindings/arm/sunxi.yaml
@@ -56,6 +56,21 @@ properties:
- const: anbernic,rg-nano
- const: allwinner,sun8i-v3s
+ - description: Anbernic RG35XX (2024)
+ - items:
+ - const: anbernic,rg35xx-2024
+ - const: allwinner,sun50i-h700
+
+ - description: Anbernic RG35XX Plus
+ - items:
+ - const: anbernic,rg35xx-plus
+ - const: allwinner,sun50i-h700
+
+ - description: Anbernic RG35XX H
+ - items:
+ - const: anbernic,rg35xx-h
+ - const: allwinner,sun50i-h700
+
- description: Amarula A64 Relic
items:
- const: amarula,a64-relic
@@ -774,6 +789,11 @@ properties:
- const: pocketbook,touch-lux-3
- const: allwinner,sun5i-a13
+ - description: PocketBook 614 Plus
+ items:
+ - const: pocketbook,614-plus
+ - const: allwinner,sun5i-a13
+
- description: Point of View Protab2-IPS9
items:
- const: pov,protab2-ips9
@@ -860,6 +880,11 @@ properties:
- const: allwinner,sl631
- const: allwinner,sun8i-v3
+ - description: Tanix TX1
+ items:
+ - const: oranth,tanix-tx1
+ - const: allwinner,sun50i-h616
+
- description: Tanix TX6
items:
- const: oranth,tanix-tx6
diff --git a/Documentation/devicetree/bindings/ata/ahci-da850.txt b/Documentation/devicetree/bindings/ata/ahci-da850.txt
deleted file mode 100644
index 5f8193417725..000000000000
--- a/Documentation/devicetree/bindings/ata/ahci-da850.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-Device tree binding for the TI DA850 AHCI SATA Controller
----------------------------------------------------------
-
-Required properties:
- - compatible: must be "ti,da850-ahci"
- - reg: physical base addresses and sizes of the two register regions
- used by the controller: the register map as defined by the
- AHCI 1.1 standard and the Power Down Control Register (PWRDN)
- for enabling/disabling the SATA clock receiver
- - interrupts: interrupt specifier (refer to the interrupt binding)
-
-Example:
-
- sata: sata@218000 {
- compatible = "ti,da850-ahci";
- reg = <0x218000 0x2000>, <0x22c018 0x4>;
- interrupts = <67>;
- };
diff --git a/Documentation/devicetree/bindings/ata/fsl,imx-pata.yaml b/Documentation/devicetree/bindings/ata/fsl,imx-pata.yaml
new file mode 100644
index 000000000000..324e2413bba8
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/fsl,imx-pata.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ata/fsl,imx-pata.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX PATA Controller
+
+maintainers:
+ - Animesh Agarwal <animeshagarwal28@gmail.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - fsl,imx31-pata
+ - fsl,imx51-pata
+ - const: fsl,imx27-pata
+ - const: fsl,imx27-pata
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: PATA Controller interrupts
+
+ clocks:
+ items:
+ - description: PATA Controller clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ pata: pata@83fe0000 {
+ compatible = "fsl,imx51-pata", "fsl,imx27-pata";
+ reg = <0x83fe0000 0x4000>;
+ interrupts = <70>;
+ clocks = <&clks 161>;
+ };
diff --git a/Documentation/devicetree/bindings/ata/imx-pata.txt b/Documentation/devicetree/bindings/ata/imx-pata.txt
deleted file mode 100644
index f1172f00188a..000000000000
--- a/Documentation/devicetree/bindings/ata/imx-pata.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-* Freescale i.MX PATA Controller
-
-Required properties:
-- compatible: "fsl,imx27-pata"
-- reg: Address range of the PATA Controller
-- interrupts: The interrupt of the PATA Controller
-- clocks: the clocks for the PATA Controller
-
-Example:
-
- pata: pata@83fe0000 {
- compatible = "fsl,imx51-pata", "fsl,imx27-pata";
- reg = <0x83fe0000 0x4000>;
- interrupts = <70>;
- clocks = <&clks 161>;
- };
diff --git a/Documentation/devicetree/bindings/ata/ti,da850-ahci.yaml b/Documentation/devicetree/bindings/ata/ti,da850-ahci.yaml
new file mode 100644
index 000000000000..ce13c76bdffb
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/ti,da850-ahci.yaml
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ata/ti,da850-ahci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI DA850 AHCI SATA Controller
+
+maintainers:
+ - Animesh Agarwal <animeshagarwal28@gmail.com>
+
+properties:
+ compatible:
+ const: ti,da850-ahci
+
+ reg:
+ items:
+ - description: Address and size of the register map as defined by the AHCI 1.1 standard.
+ - description:
+ Address and size of Power Down Control Register (PWRDN) for enabling/disabling the SATA clock
+ receiver.
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ sata@218000 {
+ compatible = "ti,da850-ahci";
+ reg = <0x218000 0x2000>, <0x22c018 0x4>;
+ interrupts = <67>;
+ };
diff --git a/Documentation/devicetree/bindings/bus/st,stm32-etzpc.yaml b/Documentation/devicetree/bindings/bus/st,stm32-etzpc.yaml
new file mode 100644
index 000000000000..d12b62a3a5a8
--- /dev/null
+++ b/Documentation/devicetree/bindings/bus/st,stm32-etzpc.yaml
@@ -0,0 +1,96 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/bus/st,stm32-etzpc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STM32 Extended TrustZone protection controller
+
+description: |
+ The ETZPC configures TrustZone security in a SoC having bus masters and
+ devices with programmable-security attributes (securable resources).
+
+maintainers:
+ - Gatien Chevallier <gatien.chevallier@foss.st.com>
+
+select:
+ properties:
+ compatible:
+ contains:
+ const: st,stm32-etzpc
+ required:
+ - compatible
+
+properties:
+ compatible:
+ items:
+ - const: st,stm32-etzpc
+ - const: simple-bus
+
+ reg:
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+ ranges: true
+
+ "#access-controller-cells":
+ const: 1
+ description:
+ Contains the firewall ID associated to the peripheral.
+
+patternProperties:
+ "^.*@[0-9a-f]+$":
+ description: Peripherals
+ type: object
+
+ additionalProperties: true
+
+ required:
+ - access-controllers
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+ - "#access-controller-cells"
+ - ranges
+
+additionalProperties: false
+
+examples:
+ - |
+ // In this example, the usart2 device refers to rifsc as its access
+ // controller.
+ // Access rights are verified before creating devices.
+
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/stm32mp13-clks.h>
+ #include <dt-bindings/reset/stm32mp13-resets.h>
+
+ etzpc: bus@5c007000 {
+ compatible = "st,stm32-etzpc", "simple-bus";
+ reg = <0x5c007000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ #access-controller-cells = <1>;
+ ranges;
+
+ usart2: serial@4c001000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x4c001000 0x400>;
+ interrupts-extended = <&exti 27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc USART2_K>;
+ resets = <&rcc USART2_R>;
+ wakeup-source;
+ dmas = <&dmamux1 43 0x400 0x5>,
+ <&dmamux1 44 0x400 0x1>;
+ dma-names = "rx", "tx";
+ access-controllers = <&etzpc 17>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/bus/st,stm32mp25-rifsc.yaml b/Documentation/devicetree/bindings/bus/st,stm32mp25-rifsc.yaml
new file mode 100644
index 000000000000..20acd1a6b173
--- /dev/null
+++ b/Documentation/devicetree/bindings/bus/st,stm32mp25-rifsc.yaml
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/bus/st,stm32mp25-rifsc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STM32 Resource isolation framework security controller
+
+maintainers:
+ - Gatien Chevallier <gatien.chevallier@foss.st.com>
+
+description: |
+ Resource isolation framework (RIF) is a comprehensive set of hardware blocks
+ designed to enforce and manage isolation of STM32 hardware resources like
+ memory and peripherals.
+
+ The RIFSC (RIF security controller) is composed of three sets of registers,
+ each managing a specific set of hardware resources:
+ - RISC registers associated with RISUP logic (resource isolation device unit
+ for peripherals), assign all non-RIF aware peripherals to zero, one or
+ any security domains (secure, privilege, compartment).
+ - RIMC registers: associated with RIMU logic (resource isolation master
+ unit), assign all non RIF-aware bus master to one security domain by
+ setting secure, privileged and compartment information on the system bus.
+ Alternatively, the RISUP logic controlling the device port access to a
+ peripheral can assign target bus attributes to this peripheral master port
+ (supported attribute: CID).
+ - RISC registers associated with RISAL logic (resource isolation device unit
+ for address space - Lite version), assign address space subregions to one
+ security domains (secure, privilege, compartment).
+
+select:
+ properties:
+ compatible:
+ contains:
+ const: st,stm32mp25-rifsc
+ required:
+ - compatible
+
+properties:
+ compatible:
+ items:
+ - const: st,stm32mp25-rifsc
+ - const: simple-bus
+
+ reg:
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+ ranges: true
+
+ "#access-controller-cells":
+ const: 1
+ description:
+ Contains the firewall ID associated to the peripheral.
+
+patternProperties:
+ "^.*@[0-9a-f]+$":
+ description: Peripherals
+ type: object
+
+ additionalProperties: true
+
+ required:
+ - access-controllers
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+ - "#access-controller-cells"
+ - ranges
+
+additionalProperties: false
+
+examples:
+ - |
+ // In this example, the usart2 device refers to rifsc as its domain
+ // controller.
+ // Access rights are verified before creating devices.
+
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ rifsc: bus@42080000 {
+ compatible = "st,stm32mp25-rifsc", "simple-bus";
+ reg = <0x42080000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ #access-controller-cells = <1>;
+ ranges;
+
+ usart2: serial@400e0000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x400e0000 0x400>;
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ck_flexgen_08>;
+ access-controllers = <&rifsc 32>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/clock/airoha,en7523-scu.yaml b/Documentation/devicetree/bindings/clock/airoha,en7523-scu.yaml
index 79b0752faa91..3f4266637733 100644
--- a/Documentation/devicetree/bindings/clock/airoha,en7523-scu.yaml
+++ b/Documentation/devicetree/bindings/clock/airoha,en7523-scu.yaml
@@ -29,10 +29,13 @@ description: |
properties:
compatible:
items:
- - const: airoha,en7523-scu
+ - enum:
+ - airoha,en7523-scu
+ - airoha,en7581-scu
reg:
- maxItems: 2
+ minItems: 2
+ maxItems: 3
"#clock-cells":
description:
@@ -45,6 +48,30 @@ required:
- reg
- '#clock-cells'
+allOf:
+ - if:
+ properties:
+ compatible:
+ const: airoha,en7523-scu
+ then:
+ properties:
+ reg:
+ items:
+ - description: scu base address
+ - description: misc scu base address
+
+ - if:
+ properties:
+ compatible:
+ const: airoha,en7581-scu
+ then:
+ properties:
+ reg:
+ items:
+ - description: scu base address
+ - description: misc scu base address
+ - description: pb scu base address
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/clock/fixed-clock.yaml b/Documentation/devicetree/bindings/clock/fixed-clock.yaml
index b0a4fb8256e2..90fb10660684 100644
--- a/Documentation/devicetree/bindings/clock/fixed-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/fixed-clock.yaml
@@ -11,6 +11,15 @@ maintainers:
- Stephen Boyd <sboyd@kernel.org>
properties:
+ $nodename:
+ anyOf:
+ - description:
+ Preferred name is 'clock-<freq>' with <freq> being the output
+ frequency as defined in the 'clock-frequency' property.
+ pattern: "^clock-([0-9]+|[a-z0-9-]+)$"
+ - description: Any name allowed
+ deprecated: true
+
compatible:
const: fixed-clock
diff --git a/Documentation/devicetree/bindings/clock/fixed-factor-clock.yaml b/Documentation/devicetree/bindings/clock/fixed-factor-clock.yaml
index 8f71ab300470..4afdb1c98f5f 100644
--- a/Documentation/devicetree/bindings/clock/fixed-factor-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/fixed-factor-clock.yaml
@@ -11,6 +11,15 @@ maintainers:
- Stephen Boyd <sboyd@kernel.org>
properties:
+ $nodename:
+ anyOf:
+ - description:
+ If the frequency is fixed, the preferred name is 'clock-<freq>' with
+ <freq> being the output frequency.
+ pattern: "^clock-([0-9]+|[0-9a-z-]+)$"
+ - description: Any name allowed
+ deprecated: true
+
compatible:
enum:
- fixed-factor-clock
diff --git a/Documentation/devicetree/bindings/clock/google,gs101-clock.yaml b/Documentation/devicetree/bindings/clock/google,gs101-clock.yaml
index 1d2bcea41c85..caf442ead24b 100644
--- a/Documentation/devicetree/bindings/clock/google,gs101-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/google,gs101-clock.yaml
@@ -30,16 +30,18 @@ properties:
- google,gs101-cmu-top
- google,gs101-cmu-apm
- google,gs101-cmu-misc
+ - google,gs101-cmu-hsi0
+ - google,gs101-cmu-hsi2
- google,gs101-cmu-peric0
- google,gs101-cmu-peric1
clocks:
minItems: 1
- maxItems: 3
+ maxItems: 5
clock-names:
minItems: 1
- maxItems: 3
+ maxItems: 5
"#clock-cells":
const: 1
@@ -76,6 +78,55 @@ allOf:
properties:
compatible:
contains:
+ const: google,gs101-cmu-hsi0
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: External reference clock (24.576 MHz)
+ - description: HSI0 bus clock (from CMU_TOP)
+ - description: DPGTC (from CMU_TOP)
+ - description: USB DRD controller clock (from CMU_TOP)
+ - description: USB Display Port debug clock (from CMU_TOP)
+
+ clock-names:
+ items:
+ - const: oscclk
+ - const: bus
+ - const: dpgtc
+ - const: usb31drd
+ - const: usbdpdbg
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - google,gs101-cmu-hsi2
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: External reference clock (24.576 MHz)
+ - description: High Speed Interface bus clock (from CMU_TOP)
+ - description: High Speed Interface pcie clock (from CMU_TOP)
+ - description: High Speed Interface ufs clock (from CMU_TOP)
+ - description: High Speed Interface mmc clock (from CMU_TOP)
+
+ clock-names:
+ items:
+ - const: oscclk
+ - const: bus
+ - const: pcie
+ - const: ufs
+ - const: mmc
+
+ - if:
+ properties:
+ compatible:
+ contains:
const: google,gs101-cmu-misc
then:
diff --git a/Documentation/devicetree/bindings/clock/keystone-gate.txt b/Documentation/devicetree/bindings/clock/keystone-gate.txt
index c5aa187026e3..43f6fb6c9392 100644
--- a/Documentation/devicetree/bindings/clock/keystone-gate.txt
+++ b/Documentation/devicetree/bindings/clock/keystone-gate.txt
@@ -1,5 +1,3 @@
-Status: Unstable - ABI compatibility may be broken in the future
-
Binding for Keystone gate control driver which uses PSC controller IP.
This binding uses the common clock binding[1].
diff --git a/Documentation/devicetree/bindings/clock/keystone-pll.txt b/Documentation/devicetree/bindings/clock/keystone-pll.txt
index 9a3fbc665606..69b0eb7c03c9 100644
--- a/Documentation/devicetree/bindings/clock/keystone-pll.txt
+++ b/Documentation/devicetree/bindings/clock/keystone-pll.txt
@@ -1,5 +1,3 @@
-Status: Unstable - ABI compatibility may be broken in the future
-
Binding for keystone PLLs. The main PLL IP typically has a multiplier,
a divider and a post divider. The additional PLL IPs like ARMPLL, DDRPLL
and PAPLL are controlled by the memory mapped register where as the Main
diff --git a/Documentation/devicetree/bindings/clock/loongson,ls2k-clk.yaml b/Documentation/devicetree/bindings/clock/loongson,ls2k-clk.yaml
index 63a59015987e..4f79cdb417ab 100644
--- a/Documentation/devicetree/bindings/clock/loongson,ls2k-clk.yaml
+++ b/Documentation/devicetree/bindings/clock/loongson,ls2k-clk.yaml
@@ -16,7 +16,9 @@ description: |
properties:
compatible:
enum:
- - loongson,ls2k-clk
+ - loongson,ls2k0500-clk
+ - loongson,ls2k-clk # This is for Loongson-2K1000
+ - loongson,ls2k2000-clk
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/clock/nxp,imx95-blk-ctl.yaml b/Documentation/devicetree/bindings/clock/nxp,imx95-blk-ctl.yaml
new file mode 100644
index 000000000000..2dffc02dcd8b
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/nxp,imx95-blk-ctl.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/nxp,imx95-blk-ctl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP i.MX95 Block Control
+
+maintainers:
+ - Peng Fan <peng.fan@nxp.com>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - nxp,imx95-lvds-csr
+ - nxp,imx95-display-csr
+ - nxp,imx95-camera-csr
+ - nxp,imx95-vpu-csr
+ - const: syscon
+
+ reg:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+ description:
+ The clock consumer should specify the desired clock by having the clock
+ ID in its "clocks" phandle cell. See
+ include/dt-bindings/clock/nxp,imx95-clock.h
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+ - power-domains
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ syscon@4c410000 {
+ compatible = "nxp,imx95-vpu-csr", "syscon";
+ reg = <0x4c410000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&scmi_clk 114>;
+ power-domains = <&scmi_devpd 21>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/nxp,imx95-display-master-csr.yaml b/Documentation/devicetree/bindings/clock/nxp,imx95-display-master-csr.yaml
new file mode 100644
index 000000000000..07f7412e7658
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/nxp,imx95-display-master-csr.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/nxp,imx95-display-master-csr.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP i.MX95 Display Master Block Control
+
+maintainers:
+ - Peng Fan <peng.fan@nxp.com>
+
+properties:
+ compatible:
+ items:
+ - const: nxp,imx95-display-master-csr
+ - const: syscon
+
+ reg:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+ description:
+ The clock consumer should specify the desired clock by having the clock
+ ID in its "clocks" phandle cell. See
+ include/dt-bindings/clock/nxp,imx95-clock.h
+
+ mux-controller:
+ type: object
+ $ref: /schemas/mux/reg-mux.yaml
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+ - mux-controller
+ - power-domains
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ syscon@4c410000 {
+ compatible = "nxp,imx95-display-master-csr", "syscon";
+ reg = <0x4c410000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&scmi_clk 62>;
+ power-domains = <&scmi_devpd 3>;
+
+ mux: mux-controller {
+ compatible = "mmio-mux";
+ #mux-control-cells = <1>;
+ mux-reg-masks = <0x4 0x00000001>; /* Pixel_link_sel */
+ idle-states = <0>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,hfpll.txt b/Documentation/devicetree/bindings/clock/qcom,hfpll.txt
deleted file mode 100644
index 5769cbbe76be..000000000000
--- a/Documentation/devicetree/bindings/clock/qcom,hfpll.txt
+++ /dev/null
@@ -1,63 +0,0 @@
-High-Frequency PLL (HFPLL)
-
-PROPERTIES
-
-- compatible:
- Usage: required
- Value type: <string>:
- shall contain only one of the following. The generic
- compatible "qcom,hfpll" should be also included.
-
- "qcom,hfpll-ipq8064", "qcom,hfpll"
- "qcom,hfpll-apq8064", "qcom,hfpll"
- "qcom,hfpll-msm8974", "qcom,hfpll"
- "qcom,hfpll-msm8960", "qcom,hfpll"
- "qcom,msm8976-hfpll-a53", "qcom,hfpll"
- "qcom,msm8976-hfpll-a72", "qcom,hfpll"
- "qcom,msm8976-hfpll-cci", "qcom,hfpll"
-
-- reg:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: address and size of HPLL registers. An optional second
- element specifies the address and size of the alias
- register region.
-
-- clocks:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: reference to the xo clock.
-
-- clock-names:
- Usage: required
- Value type: <stringlist>
- Definition: must be "xo".
-
-- clock-output-names:
- Usage: required
- Value type: <string>
- Definition: Name of the PLL. Typically hfpllX where X is a CPU number
- starting at 0. Otherwise hfpll_Y where Y is more specific
- such as "l2".
-
-Example:
-
-1) An HFPLL for the L2 cache.
-
- clock-controller@f9016000 {
- compatible = "qcom,hfpll-ipq8064", "qcom,hfpll";
- reg = <0xf9016000 0x30>;
- clocks = <&xo_board>;
- clock-names = "xo";
- clock-output-names = "hfpll_l2";
- };
-
-2) An HFPLL for CPU0. This HFPLL has the alias register region.
-
- clock-controller@f908a000 {
- compatible = "qcom,hfpll-ipq8064", "qcom,hfpll";
- reg = <0xf908a000 0x30>, <0xf900a000 0x30>;
- clocks = <&xo_board>;
- clock-names = "xo";
- clock-output-names = "hfpll0";
- };
diff --git a/Documentation/devicetree/bindings/clock/qcom,hfpll.yaml b/Documentation/devicetree/bindings/clock/qcom,hfpll.yaml
new file mode 100644
index 000000000000..8cb1c164f760
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,hfpll.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,hfpll.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm High-Frequency PLL
+
+maintainers:
+ - Bjorn Andersson <andersson@kernel.org>
+
+description:
+ The HFPLL is used as CPU PLL on various Qualcomm SoCs.
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - qcom,msm8974-hfpll
+ - qcom,msm8976-hfpll-a53
+ - qcom,msm8976-hfpll-a72
+ - qcom,msm8976-hfpll-cci
+ - qcom,qcs404-hfpll
+ - const: qcom,hfpll
+ deprecated: true
+
+ reg:
+ items:
+ - description: HFPLL registers
+ - description: Alias register region
+ minItems: 1
+
+ '#clock-cells':
+ const: 0
+
+ clocks:
+ items:
+ - description: board XO clock
+
+ clock-names:
+ items:
+ - const: xo
+
+ clock-output-names:
+ description:
+ Name of the PLL. Typically hfpllX where X is a CPU number starting at 0.
+ Otherwise hfpll_Y where Y is more specific such as "l2".
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+ - clocks
+ - clock-names
+ - clock-output-names
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@f908a000 {
+ compatible = "qcom,msm8974-hfpll";
+ reg = <0xf908a000 0x30>, <0xf900a000 0x30>;
+ #clock-cells = <0>;
+ clock-output-names = "hfpll0";
+ clocks = <&xo_board>;
+ clock-names = "xo";
+ };
diff --git a/Documentation/devicetree/bindings/clock/renesas,rzg2l-cpg.yaml b/Documentation/devicetree/bindings/clock/renesas,rzg2l-cpg.yaml
index 80a8c7114c31..4e3b0c45124a 100644
--- a/Documentation/devicetree/bindings/clock/renesas,rzg2l-cpg.yaml
+++ b/Documentation/devicetree/bindings/clock/renesas,rzg2l-cpg.yaml
@@ -57,7 +57,8 @@ properties:
can be power-managed through Module Standby should refer to the CPG device
node in their "power-domains" property, as documented by the generic PM
Domain bindings in Documentation/devicetree/bindings/power/power-domain.yaml.
- const: 0
+ The power domain specifiers defined in <dt-bindings/clock/r9a0*-cpg.h> could
+ be used to reference individual CPG power domains.
'#reset-cells':
description:
@@ -76,6 +77,21 @@ required:
additionalProperties: false
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,r9a08g045-cpg
+ then:
+ properties:
+ '#power-domain-cells':
+ const: 1
+ else:
+ properties:
+ '#power-domain-cells':
+ const: 0
+
examples:
- |
cpg: clock-controller@11010000 {
diff --git a/Documentation/devicetree/bindings/clock/samsung,s3c6400-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,s3c6400-clock.yaml
new file mode 100644
index 000000000000..0fcc0c963f8f
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/samsung,s3c6400-clock.yaml
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/samsung,s3c6400-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung S3C6400 SoC clock controller
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+
+description: |
+ There are several clocks that are generated outside the SoC. It is expected
+ that they are defined using standard clock bindings with following
+ clock-output-names and/or provided as clock inputs to this clock controller:
+ - "fin_pll" - PLL input clock (xtal/extclk) - required,
+ - "xusbxti" - USB xtal - required,
+ - "iiscdclk0" - I2S0 codec clock - optional,
+ - "iiscdclk1" - I2S1 codec clock - optional,
+ - "iiscdclk2" - I2S2 codec clock - optional,
+ - "pcmcdclk0" - PCM0 codec clock - optional,
+ - "pcmcdclk1" - PCM1 codec clock - optional, only S3C6410.
+
+ All available clocks are defined as preprocessor macros in
+ include/dt-bindings/clock/samsung,s3c64xx-clock.h header.
+
+properties:
+ compatible:
+ enum:
+ - samsung,s3c6400-clock
+ - samsung,s3c6410-clock
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ "#clock-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - "#clock-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@7e00f000 {
+ compatible = "samsung,s3c6410-clock";
+ reg = <0x7e00f000 0x1000>;
+ #clock-cells = <1>;
+ clocks = <&fin_pll>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/samsung,s3c64xx-clock.txt b/Documentation/devicetree/bindings/clock/samsung,s3c64xx-clock.txt
deleted file mode 100644
index 872ee8e0f041..000000000000
--- a/Documentation/devicetree/bindings/clock/samsung,s3c64xx-clock.txt
+++ /dev/null
@@ -1,76 +0,0 @@
-* Samsung S3C64xx Clock Controller
-
-The S3C64xx clock controller generates and supplies clock to various controllers
-within the SoC. The clock binding described here is applicable to all SoCs in
-the S3C64xx family.
-
-Required Properties:
-
-- compatible: should be one of the following.
- - "samsung,s3c6400-clock" - controller compatible with S3C6400 SoC.
- - "samsung,s3c6410-clock" - controller compatible with S3C6410 SoC.
-
-- reg: physical base address of the controller and length of memory mapped
- region.
-
-- #clock-cells: should be 1.
-
-Each clock is assigned an identifier and client nodes can use this identifier
-to specify the clock which they consume. Some of the clocks are available only
-on a particular S3C64xx SoC and this is specified where applicable.
-
-All available clocks are defined as preprocessor macros in
-dt-bindings/clock/samsung,s3c64xx-clock.h header and can be used in device
-tree sources.
-
-External clocks:
-
-There are several clocks that are generated outside the SoC. It is expected
-that they are defined using standard clock bindings with following
-clock-output-names:
- - "fin_pll" - PLL input clock (xtal/extclk) - required,
- - "xusbxti" - USB xtal - required,
- - "iiscdclk0" - I2S0 codec clock - optional,
- - "iiscdclk1" - I2S1 codec clock - optional,
- - "iiscdclk2" - I2S2 codec clock - optional,
- - "pcmcdclk0" - PCM0 codec clock - optional,
- - "pcmcdclk1" - PCM1 codec clock - optional, only S3C6410.
-
-Example: Clock controller node:
-
- clock: clock-controller@7e00f000 {
- compatible = "samsung,s3c6410-clock";
- reg = <0x7e00f000 0x1000>;
- #clock-cells = <1>;
- };
-
-Example: Required external clocks:
-
- fin_pll: clock-fin-pll {
- compatible = "fixed-clock";
- clock-output-names = "fin_pll";
- clock-frequency = <12000000>;
- #clock-cells = <0>;
- };
-
- xusbxti: clock-xusbxti {
- compatible = "fixed-clock";
- clock-output-names = "xusbxti";
- clock-frequency = <48000000>;
- #clock-cells = <0>;
- };
-
-Example: UART controller node that consumes the clock generated by the clock
- controller (refer to the standard clock bindings for information about
- "clocks" and "clock-names" properties):
-
- uart0: serial@7f005000 {
- compatible = "samsung,s3c6400-uart";
- reg = <0x7f005000 0x100>;
- interrupt-parent = <&vic1>;
- interrupts = <5>;
- clock-names = "uart", "clk_uart_baud2",
- "clk_uart_baud3";
- clocks = <&clock PCLK_UART0>, <&clocks PCLK_UART0>,
- <&clock SCLK_UART>;
- };
diff --git a/Documentation/devicetree/bindings/clock/sophgo,cv1800-clk.yaml b/Documentation/devicetree/bindings/clock/sophgo,cv1800-clk.yaml
index c1dc24673c0d..59ef41adb539 100644
--- a/Documentation/devicetree/bindings/clock/sophgo,cv1800-clk.yaml
+++ b/Documentation/devicetree/bindings/clock/sophgo,cv1800-clk.yaml
@@ -4,7 +4,7 @@
$id: http://devicetree.org/schemas/clock/sophgo,cv1800-clk.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Sophgo CV1800 Series Clock Controller
+title: Sophgo CV1800/SG2000 Series Clock Controller
maintainers:
- Inochi Amaoto <inochiama@outlook.com>
@@ -14,6 +14,7 @@ properties:
enum:
- sophgo,cv1800-clk
- sophgo,cv1810-clk
+ - sophgo,sg2000-clk
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/clock/st,stm32mp25-rcc.yaml b/Documentation/devicetree/bindings/clock/st,stm32mp25-rcc.yaml
index 7732e79a42b9..88e52f10d1ec 100644
--- a/Documentation/devicetree/bindings/clock/st,stm32mp25-rcc.yaml
+++ b/Documentation/devicetree/bindings/clock/st,stm32mp25-rcc.yaml
@@ -38,14 +38,85 @@ properties:
- description: CK_SCMI_MSI Low Power Internal oscillator (~ 4 MHz or ~ 16 MHz)
- description: CK_SCMI_LSE Low Speed External oscillator (32 KHz)
- description: CK_SCMI_LSI Low Speed Internal oscillator (~ 32 KHz)
+ - description: CK_SCMI_HSE_DIV2 CK_SCMI_HSE divided by 2 (coud be gated)
+ - description: CK_SCMI_ICN_HS_MCU High Speed interconnect bus clock
+ - description: CK_SCMI_ICN_LS_MCU Low Speed interconnect bus clock
+ - description: CK_SCMI_ICN_SDMMC SDMMC interconnect bus clock
+ - description: CK_SCMI_ICN_DDR DDR interconnect bus clock
+ - description: CK_SCMI_ICN_DISPLAY Display interconnect bus clock
+ - description: CK_SCMI_ICN_HSL HSL interconnect bus clock
+ - description: CK_SCMI_ICN_NIC NIC interconnect bus clock
+ - description: CK_SCMI_ICN_VID Video interconnect bus clock
+ - description: CK_SCMI_FLEXGEN_07 flexgen clock 7
+ - description: CK_SCMI_FLEXGEN_08 flexgen clock 8
+ - description: CK_SCMI_FLEXGEN_09 flexgen clock 9
+ - description: CK_SCMI_FLEXGEN_10 flexgen clock 10
+ - description: CK_SCMI_FLEXGEN_11 flexgen clock 11
+ - description: CK_SCMI_FLEXGEN_12 flexgen clock 12
+ - description: CK_SCMI_FLEXGEN_13 flexgen clock 13
+ - description: CK_SCMI_FLEXGEN_14 flexgen clock 14
+ - description: CK_SCMI_FLEXGEN_15 flexgen clock 15
+ - description: CK_SCMI_FLEXGEN_16 flexgen clock 16
+ - description: CK_SCMI_FLEXGEN_17 flexgen clock 17
+ - description: CK_SCMI_FLEXGEN_18 flexgen clock 18
+ - description: CK_SCMI_FLEXGEN_19 flexgen clock 19
+ - description: CK_SCMI_FLEXGEN_20 flexgen clock 20
+ - description: CK_SCMI_FLEXGEN_21 flexgen clock 21
+ - description: CK_SCMI_FLEXGEN_22 flexgen clock 22
+ - description: CK_SCMI_FLEXGEN_23 flexgen clock 23
+ - description: CK_SCMI_FLEXGEN_24 flexgen clock 24
+ - description: CK_SCMI_FLEXGEN_25 flexgen clock 25
+ - description: CK_SCMI_FLEXGEN_26 flexgen clock 26
+ - description: CK_SCMI_FLEXGEN_27 flexgen clock 27
+ - description: CK_SCMI_FLEXGEN_28 flexgen clock 28
+ - description: CK_SCMI_FLEXGEN_29 flexgen clock 29
+ - description: CK_SCMI_FLEXGEN_30 flexgen clock 30
+ - description: CK_SCMI_FLEXGEN_31 flexgen clock 31
+ - description: CK_SCMI_FLEXGEN_32 flexgen clock 32
+ - description: CK_SCMI_FLEXGEN_33 flexgen clock 33
+ - description: CK_SCMI_FLEXGEN_34 flexgen clock 34
+ - description: CK_SCMI_FLEXGEN_35 flexgen clock 35
+ - description: CK_SCMI_FLEXGEN_36 flexgen clock 36
+ - description: CK_SCMI_FLEXGEN_37 flexgen clock 37
+ - description: CK_SCMI_FLEXGEN_38 flexgen clock 38
+ - description: CK_SCMI_FLEXGEN_39 flexgen clock 39
+ - description: CK_SCMI_FLEXGEN_40 flexgen clock 40
+ - description: CK_SCMI_FLEXGEN_41 flexgen clock 41
+ - description: CK_SCMI_FLEXGEN_42 flexgen clock 42
+ - description: CK_SCMI_FLEXGEN_43 flexgen clock 43
+ - description: CK_SCMI_FLEXGEN_44 flexgen clock 44
+ - description: CK_SCMI_FLEXGEN_45 flexgen clock 45
+ - description: CK_SCMI_FLEXGEN_46 flexgen clock 46
+ - description: CK_SCMI_FLEXGEN_47 flexgen clock 47
+ - description: CK_SCMI_FLEXGEN_48 flexgen clock 48
+ - description: CK_SCMI_FLEXGEN_49 flexgen clock 49
+ - description: CK_SCMI_FLEXGEN_50 flexgen clock 50
+ - description: CK_SCMI_FLEXGEN_51 flexgen clock 51
+ - description: CK_SCMI_FLEXGEN_52 flexgen clock 52
+ - description: CK_SCMI_FLEXGEN_53 flexgen clock 53
+ - description: CK_SCMI_FLEXGEN_54 flexgen clock 54
+ - description: CK_SCMI_FLEXGEN_55 flexgen clock 55
+ - description: CK_SCMI_FLEXGEN_56 flexgen clock 56
+ - description: CK_SCMI_FLEXGEN_57 flexgen clock 57
+ - description: CK_SCMI_FLEXGEN_58 flexgen clock 58
+ - description: CK_SCMI_FLEXGEN_59 flexgen clock 59
+ - description: CK_SCMI_FLEXGEN_60 flexgen clock 60
+ - description: CK_SCMI_FLEXGEN_61 flexgen clock 61
+ - description: CK_SCMI_FLEXGEN_62 flexgen clock 62
+ - description: CK_SCMI_FLEXGEN_63 flexgen clock 63
+ - description: CK_SCMI_ICN_APB1 Peripheral bridge 1
+ - description: CK_SCMI_ICN_APB2 Peripheral bridge 2
+ - description: CK_SCMI_ICN_APB3 Peripheral bridge 3
+ - description: CK_SCMI_ICN_APB4 Peripheral bridge 4
+ - description: CK_SCMI_ICN_APBDBG Peripheral bridge for degub
+ - description: CK_SCMI_TIMG1 Peripheral bridge for timer1
+ - description: CK_SCMI_TIMG2 Peripheral bridge for timer2
+ - description: CK_SCMI_PLL3 PLL3 clock
+ - description: clk_dsi_txbyte DSI byte clock
- clock-names:
- items:
- - const: hse
- - const: hsi
- - const: msi
- - const: lse
- - const: lsi
+ access-controllers:
+ minItems: 1
+ maxItems: 2
required:
- compatible
@@ -53,7 +124,6 @@ required:
- '#clock-cells'
- '#reset-cells'
- clocks
- - clock-names
additionalProperties: false
@@ -66,11 +136,85 @@ examples:
reg = <0x44200000 0x10000>;
#clock-cells = <1>;
#reset-cells = <1>;
- clock-names = "hse", "hsi", "msi", "lse", "lsi";
- clocks = <&scmi_clk CK_SCMI_HSE>,
- <&scmi_clk CK_SCMI_HSI>,
- <&scmi_clk CK_SCMI_MSI>,
- <&scmi_clk CK_SCMI_LSE>,
- <&scmi_clk CK_SCMI_LSI>;
+ clocks = <&scmi_clk CK_SCMI_HSE>,
+ <&scmi_clk CK_SCMI_HSI>,
+ <&scmi_clk CK_SCMI_MSI>,
+ <&scmi_clk CK_SCMI_LSE>,
+ <&scmi_clk CK_SCMI_LSI>,
+ <&scmi_clk CK_SCMI_HSE_DIV2>,
+ <&scmi_clk CK_SCMI_ICN_HS_MCU>,
+ <&scmi_clk CK_SCMI_ICN_LS_MCU>,
+ <&scmi_clk CK_SCMI_ICN_SDMMC>,
+ <&scmi_clk CK_SCMI_ICN_DDR>,
+ <&scmi_clk CK_SCMI_ICN_DISPLAY>,
+ <&scmi_clk CK_SCMI_ICN_HSL>,
+ <&scmi_clk CK_SCMI_ICN_NIC>,
+ <&scmi_clk CK_SCMI_ICN_VID>,
+ <&scmi_clk CK_SCMI_FLEXGEN_07>,
+ <&scmi_clk CK_SCMI_FLEXGEN_08>,
+ <&scmi_clk CK_SCMI_FLEXGEN_09>,
+ <&scmi_clk CK_SCMI_FLEXGEN_10>,
+ <&scmi_clk CK_SCMI_FLEXGEN_11>,
+ <&scmi_clk CK_SCMI_FLEXGEN_12>,
+ <&scmi_clk CK_SCMI_FLEXGEN_13>,
+ <&scmi_clk CK_SCMI_FLEXGEN_14>,
+ <&scmi_clk CK_SCMI_FLEXGEN_15>,
+ <&scmi_clk CK_SCMI_FLEXGEN_16>,
+ <&scmi_clk CK_SCMI_FLEXGEN_17>,
+ <&scmi_clk CK_SCMI_FLEXGEN_18>,
+ <&scmi_clk CK_SCMI_FLEXGEN_19>,
+ <&scmi_clk CK_SCMI_FLEXGEN_20>,
+ <&scmi_clk CK_SCMI_FLEXGEN_21>,
+ <&scmi_clk CK_SCMI_FLEXGEN_22>,
+ <&scmi_clk CK_SCMI_FLEXGEN_23>,
+ <&scmi_clk CK_SCMI_FLEXGEN_24>,
+ <&scmi_clk CK_SCMI_FLEXGEN_25>,
+ <&scmi_clk CK_SCMI_FLEXGEN_26>,
+ <&scmi_clk CK_SCMI_FLEXGEN_27>,
+ <&scmi_clk CK_SCMI_FLEXGEN_28>,
+ <&scmi_clk CK_SCMI_FLEXGEN_29>,
+ <&scmi_clk CK_SCMI_FLEXGEN_30>,
+ <&scmi_clk CK_SCMI_FLEXGEN_31>,
+ <&scmi_clk CK_SCMI_FLEXGEN_32>,
+ <&scmi_clk CK_SCMI_FLEXGEN_33>,
+ <&scmi_clk CK_SCMI_FLEXGEN_34>,
+ <&scmi_clk CK_SCMI_FLEXGEN_35>,
+ <&scmi_clk CK_SCMI_FLEXGEN_36>,
+ <&scmi_clk CK_SCMI_FLEXGEN_37>,
+ <&scmi_clk CK_SCMI_FLEXGEN_38>,
+ <&scmi_clk CK_SCMI_FLEXGEN_39>,
+ <&scmi_clk CK_SCMI_FLEXGEN_40>,
+ <&scmi_clk CK_SCMI_FLEXGEN_41>,
+ <&scmi_clk CK_SCMI_FLEXGEN_42>,
+ <&scmi_clk CK_SCMI_FLEXGEN_43>,
+ <&scmi_clk CK_SCMI_FLEXGEN_44>,
+ <&scmi_clk CK_SCMI_FLEXGEN_45>,
+ <&scmi_clk CK_SCMI_FLEXGEN_46>,
+ <&scmi_clk CK_SCMI_FLEXGEN_47>,
+ <&scmi_clk CK_SCMI_FLEXGEN_48>,
+ <&scmi_clk CK_SCMI_FLEXGEN_49>,
+ <&scmi_clk CK_SCMI_FLEXGEN_50>,
+ <&scmi_clk CK_SCMI_FLEXGEN_51>,
+ <&scmi_clk CK_SCMI_FLEXGEN_52>,
+ <&scmi_clk CK_SCMI_FLEXGEN_53>,
+ <&scmi_clk CK_SCMI_FLEXGEN_54>,
+ <&scmi_clk CK_SCMI_FLEXGEN_55>,
+ <&scmi_clk CK_SCMI_FLEXGEN_56>,
+ <&scmi_clk CK_SCMI_FLEXGEN_57>,
+ <&scmi_clk CK_SCMI_FLEXGEN_58>,
+ <&scmi_clk CK_SCMI_FLEXGEN_59>,
+ <&scmi_clk CK_SCMI_FLEXGEN_60>,
+ <&scmi_clk CK_SCMI_FLEXGEN_61>,
+ <&scmi_clk CK_SCMI_FLEXGEN_62>,
+ <&scmi_clk CK_SCMI_FLEXGEN_63>,
+ <&scmi_clk CK_SCMI_ICN_APB1>,
+ <&scmi_clk CK_SCMI_ICN_APB2>,
+ <&scmi_clk CK_SCMI_ICN_APB3>,
+ <&scmi_clk CK_SCMI_ICN_APB4>,
+ <&scmi_clk CK_SCMI_ICN_APBDBG>,
+ <&scmi_clk CK_SCMI_TIMG1>,
+ <&scmi_clk CK_SCMI_TIMG2>,
+ <&scmi_clk CK_SCMI_PLL3>,
+ <&clk_dsi_txbyte>;
};
...
diff --git a/Documentation/devicetree/bindings/clock/ti/adpll.txt b/Documentation/devicetree/bindings/clock/ti/adpll.txt
index 4c8a2ce2cd70..3122360adcf3 100644
--- a/Documentation/devicetree/bindings/clock/ti/adpll.txt
+++ b/Documentation/devicetree/bindings/clock/ti/adpll.txt
@@ -1,7 +1,5 @@
Binding for Texas Instruments ADPLL clock.
-Binding status: Unstable - ABI compatibility may be broken in the future
-
This binding uses the common clock binding[1]. It assumes a
register-mapped ADPLL with two to three selectable input clocks
and three to four children.
diff --git a/Documentation/devicetree/bindings/clock/ti/apll.txt b/Documentation/devicetree/bindings/clock/ti/apll.txt
index ade4dd4c30f0..bbd505c1199d 100644
--- a/Documentation/devicetree/bindings/clock/ti/apll.txt
+++ b/Documentation/devicetree/bindings/clock/ti/apll.txt
@@ -1,7 +1,5 @@
Binding for Texas Instruments APLL clock.
-Binding status: Unstable - ABI compatibility may be broken in the future
-
This binding uses the common clock binding[1]. It assumes a
register-mapped APLL with usually two selectable input clocks
(reference clock and bypass clock), with analog phase locked
diff --git a/Documentation/devicetree/bindings/clock/ti/autoidle.txt b/Documentation/devicetree/bindings/clock/ti/autoidle.txt
index 7c735dde9fe9..05645a10a9e3 100644
--- a/Documentation/devicetree/bindings/clock/ti/autoidle.txt
+++ b/Documentation/devicetree/bindings/clock/ti/autoidle.txt
@@ -1,7 +1,5 @@
Binding for Texas Instruments autoidle clock.
-Binding status: Unstable - ABI compatibility may be broken in the future
-
This binding uses the common clock binding[1]. It assumes a register mapped
clock which can be put to idle automatically by hardware based on the usage
and a configuration bit setting. Autoidle clock is never an individual
diff --git a/Documentation/devicetree/bindings/clock/ti/clockdomain.txt b/Documentation/devicetree/bindings/clock/ti/clockdomain.txt
index 9c6199249ce5..edf0b5d42768 100644
--- a/Documentation/devicetree/bindings/clock/ti/clockdomain.txt
+++ b/Documentation/devicetree/bindings/clock/ti/clockdomain.txt
@@ -1,7 +1,5 @@
Binding for Texas Instruments clockdomain.
-Binding status: Unstable - ABI compatibility may be broken in the future
-
This binding uses the common clock binding[1] in consumer role.
Every clock on TI SoC belongs to one clockdomain, but software
only needs this information for specific clocks which require
diff --git a/Documentation/devicetree/bindings/clock/ti/composite.txt b/Documentation/devicetree/bindings/clock/ti/composite.txt
index 33ac7c9ad053..6f7e1331b546 100644
--- a/Documentation/devicetree/bindings/clock/ti/composite.txt
+++ b/Documentation/devicetree/bindings/clock/ti/composite.txt
@@ -1,7 +1,5 @@
Binding for TI composite clock.
-Binding status: Unstable - ABI compatibility may be broken in the future
-
This binding uses the common clock binding[1]. It assumes a
register-mapped composite clock with multiple different sub-types;
diff --git a/Documentation/devicetree/bindings/clock/ti/divider.txt b/Documentation/devicetree/bindings/clock/ti/divider.txt
index 9b13b32974f9..4d7c76f0b356 100644
--- a/Documentation/devicetree/bindings/clock/ti/divider.txt
+++ b/Documentation/devicetree/bindings/clock/ti/divider.txt
@@ -1,7 +1,5 @@
Binding for TI divider clock
-Binding status: Unstable - ABI compatibility may be broken in the future
-
This binding uses the common clock binding[1]. It assumes a
register-mapped adjustable clock rate divider that does not gate and has
only one input clock or parent. By default the value programmed into
diff --git a/Documentation/devicetree/bindings/clock/ti/dpll.txt b/Documentation/devicetree/bindings/clock/ti/dpll.txt
index 37a7cb6ad07d..14a1b72c2e71 100644
--- a/Documentation/devicetree/bindings/clock/ti/dpll.txt
+++ b/Documentation/devicetree/bindings/clock/ti/dpll.txt
@@ -1,7 +1,5 @@
Binding for Texas Instruments DPLL clock.
-Binding status: Unstable - ABI compatibility may be broken in the future
-
This binding uses the common clock binding[1]. It assumes a
register-mapped DPLL with usually two selectable input clocks
(reference clock and bypass clock), with digital phase locked
diff --git a/Documentation/devicetree/bindings/clock/ti/fapll.txt b/Documentation/devicetree/bindings/clock/ti/fapll.txt
index c19b3f253b8c..88986ef39ddd 100644
--- a/Documentation/devicetree/bindings/clock/ti/fapll.txt
+++ b/Documentation/devicetree/bindings/clock/ti/fapll.txt
@@ -1,7 +1,5 @@
Binding for Texas Instruments FAPLL clock.
-Binding status: Unstable - ABI compatibility may be broken in the future
-
This binding uses the common clock binding[1]. It assumes a
register-mapped FAPLL with usually two selectable input clocks
(reference clock and bypass clock), and one or more child
diff --git a/Documentation/devicetree/bindings/clock/ti/fixed-factor-clock.txt b/Documentation/devicetree/bindings/clock/ti/fixed-factor-clock.txt
index 518e3c142276..dc69477b6e98 100644
--- a/Documentation/devicetree/bindings/clock/ti/fixed-factor-clock.txt
+++ b/Documentation/devicetree/bindings/clock/ti/fixed-factor-clock.txt
@@ -1,7 +1,5 @@
Binding for TI fixed factor rate clock sources.
-Binding status: Unstable - ABI compatibility may be broken in the future
-
This binding uses the common clock binding[1], and also uses the autoidle
support from TI autoidle clock [2].
diff --git a/Documentation/devicetree/bindings/clock/ti/gate.txt b/Documentation/devicetree/bindings/clock/ti/gate.txt
index 4982615c01b9..a8e0335b006a 100644
--- a/Documentation/devicetree/bindings/clock/ti/gate.txt
+++ b/Documentation/devicetree/bindings/clock/ti/gate.txt
@@ -1,7 +1,5 @@
Binding for Texas Instruments gate clock.
-Binding status: Unstable - ABI compatibility may be broken in the future
-
This binding uses the common clock binding[1]. This clock is
quite much similar to the basic gate-clock [2], however,
it supports a number of additional features. If no register
diff --git a/Documentation/devicetree/bindings/clock/ti/interface.txt b/Documentation/devicetree/bindings/clock/ti/interface.txt
index d3eb5ca92a7f..85fb1f2d2d28 100644
--- a/Documentation/devicetree/bindings/clock/ti/interface.txt
+++ b/Documentation/devicetree/bindings/clock/ti/interface.txt
@@ -1,7 +1,5 @@
Binding for Texas Instruments interface clock.
-Binding status: Unstable - ABI compatibility may be broken in the future
-
This binding uses the common clock binding[1]. This clock is
quite much similar to the basic gate-clock [2], however,
it supports a number of additional features, including
diff --git a/Documentation/devicetree/bindings/clock/ti/mux.txt b/Documentation/devicetree/bindings/clock/ti/mux.txt
index b33f641f1043..cd56d3c1c09f 100644
--- a/Documentation/devicetree/bindings/clock/ti/mux.txt
+++ b/Documentation/devicetree/bindings/clock/ti/mux.txt
@@ -1,7 +1,5 @@
Binding for TI mux clock.
-Binding status: Unstable - ABI compatibility may be broken in the future
-
This binding uses the common clock binding[1]. It assumes a
register-mapped multiplexer with multiple input clock signals or
parents, one of which can be selected as output. This clock does not
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
index 56fc71d6a081..1e9797f96410 100644
--- a/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
@@ -38,6 +38,7 @@ properties:
- qcom,sc7280-cpufreq-epss
- qcom,sc8280xp-cpufreq-epss
- qcom,sdx75-cpufreq-epss
+ - qcom,sm4450-cpufreq-epss
- qcom,sm6375-cpufreq-epss
- qcom,sm8250-cpufreq-epss
- qcom,sm8350-cpufreq-epss
@@ -133,6 +134,7 @@ allOf:
- qcom,sc8280xp-cpufreq-epss
- qcom,sdm670-cpufreq-hw
- qcom,sdm845-cpufreq-hw
+ - qcom,sm4450-cpufreq-epss
- qcom,sm6115-cpufreq-hw
- qcom,sm6350-cpufreq-hw
- qcom,sm6375-cpufreq-epss
diff --git a/Documentation/devicetree/bindings/crypto/nvidia,tegra234-se-aes.yaml b/Documentation/devicetree/bindings/crypto/nvidia,tegra234-se-aes.yaml
new file mode 100644
index 000000000000..cb47ae2889b6
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/nvidia,tegra234-se-aes.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/crypto/nvidia,tegra234-se-aes.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra Security Engine for AES algorithms
+
+description:
+ The Tegra Security Engine accelerates the following AES encryption/decryption
+ algorithms - AES-ECB, AES-CBC, AES-OFB, AES-XTS, AES-CTR, AES-GCM, AES-CCM,
+ AES-CMAC
+
+maintainers:
+ - Akhil R <akhilrajeev@nvidia.com>
+
+properties:
+ compatible:
+ const: nvidia,tegra234-se-aes
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ iommus:
+ maxItems: 1
+
+ dma-coherent: true
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - iommus
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/memory/tegra234-mc.h>
+ #include <dt-bindings/clock/tegra234-clock.h>
+
+ crypto@15820000 {
+ compatible = "nvidia,tegra234-se-aes";
+ reg = <0x15820000 0x10000>;
+ clocks = <&bpmp TEGRA234_CLK_SE>;
+ iommus = <&smmu TEGRA234_SID_SES_SE1>;
+ dma-coherent;
+ };
+...
diff --git a/Documentation/devicetree/bindings/crypto/nvidia,tegra234-se-hash.yaml b/Documentation/devicetree/bindings/crypto/nvidia,tegra234-se-hash.yaml
new file mode 100644
index 000000000000..f57ef10645e2
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/nvidia,tegra234-se-hash.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/crypto/nvidia,tegra234-se-hash.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra Security Engine for HASH algorithms
+
+description:
+ The Tegra Security HASH Engine accelerates the following HASH functions -
+ SHA1, SHA224, SHA256, SHA384, SHA512, SHA3-224, SHA3-256, SHA3-384, SHA3-512
+ HMAC(SHA224), HMAC(SHA256), HMAC(SHA384), HMAC(SHA512)
+
+maintainers:
+ - Akhil R <akhilrajeev@nvidia.com>
+
+properties:
+ compatible:
+ const: nvidia,tegra234-se-hash
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ iommus:
+ maxItems: 1
+
+ dma-coherent: true
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - iommus
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/memory/tegra234-mc.h>
+ #include <dt-bindings/clock/tegra234-clock.h>
+
+ crypto@15840000 {
+ compatible = "nvidia,tegra234-se-hash";
+ reg = <0x15840000 0x10000>;
+ clocks = <&bpmp TEGRA234_CLK_SE>;
+ iommus = <&smmu TEGRA234_SID_SES_SE2>;
+ dma-coherent;
+ };
+...
diff --git a/Documentation/devicetree/bindings/crypto/omap-sham.txt b/Documentation/devicetree/bindings/crypto/omap-sham.txt
deleted file mode 100644
index ad9115569611..000000000000
--- a/Documentation/devicetree/bindings/crypto/omap-sham.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-OMAP SoC SHA crypto Module
-
-Required properties:
-
-- compatible : Should contain entries for this and backward compatible
- SHAM versions:
- - "ti,omap2-sham" for OMAP2 & OMAP3.
- - "ti,omap4-sham" for OMAP4 and AM33XX.
- - "ti,omap5-sham" for OMAP5, DRA7 and AM43XX.
-- ti,hwmods: Name of the hwmod associated with the SHAM module
-- reg : Offset and length of the register set for the module
-- interrupts : the interrupt-specifier for the SHAM module.
-
-Optional properties:
-- dmas: DMA specifiers for the rx dma. See the DMA client binding,
- Documentation/devicetree/bindings/dma/dma.txt
-- dma-names: DMA request name. Should be "rx" if a dma is present.
-
-Example:
- /* AM335x */
- sham: sham@53100000 {
- compatible = "ti,omap4-sham";
- ti,hwmods = "sham";
- reg = <0x53100000 0x200>;
- interrupts = <109>;
- dmas = <&edma 36>;
- dma-names = "rx";
- };
diff --git a/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml b/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml
index e91bc7dc6ad3..0304f074cf08 100644
--- a/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml
+++ b/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml
@@ -15,6 +15,7 @@ properties:
- enum:
- qcom,sa8775p-inline-crypto-engine
- qcom,sc7180-inline-crypto-engine
+ - qcom,sc7280-inline-crypto-engine
- qcom,sm8450-inline-crypto-engine
- qcom,sm8550-inline-crypto-engine
- qcom,sm8650-inline-crypto-engine
diff --git a/Documentation/devicetree/bindings/crypto/st,stm32-cryp.yaml b/Documentation/devicetree/bindings/crypto/st,stm32-cryp.yaml
index 0ddeb8a9a7a0..27354658d054 100644
--- a/Documentation/devicetree/bindings/crypto/st,stm32-cryp.yaml
+++ b/Documentation/devicetree/bindings/crypto/st,stm32-cryp.yaml
@@ -46,6 +46,10 @@ properties:
power-domains:
maxItems: 1
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml b/Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml
index ac480765cde0..822318414095 100644
--- a/Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml
+++ b/Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml
@@ -51,6 +51,10 @@ properties:
power-domains:
maxItems: 1
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/crypto/starfive,jh7110-crypto.yaml b/Documentation/devicetree/bindings/crypto/starfive,jh7110-crypto.yaml
index 71a2876bd6e4..7ccb6e1641d0 100644
--- a/Documentation/devicetree/bindings/crypto/starfive,jh7110-crypto.yaml
+++ b/Documentation/devicetree/bindings/crypto/starfive,jh7110-crypto.yaml
@@ -12,7 +12,9 @@ maintainers:
properties:
compatible:
- const: starfive,jh7110-crypto
+ enum:
+ - starfive,jh7110-crypto
+ - starfive,jh8100-crypto
reg:
maxItems: 1
@@ -28,7 +30,10 @@ properties:
- const: ahb
interrupts:
- maxItems: 1
+ minItems: 1
+ items:
+ - description: SHA2 module irq
+ - description: SM3 module irq
resets:
maxItems: 1
@@ -54,6 +59,27 @@ required:
additionalProperties: false
+allOf:
+ - if:
+ properties:
+ compatible:
+ const: starfive,jh7110-crypto
+
+ then:
+ properties:
+ interrupts:
+ maxItems: 1
+
+ - if:
+ properties:
+ compatible:
+ const: starfive,jh8100-crypto
+
+ then:
+ properties:
+ interrupts:
+ minItems: 2
+
examples:
- |
crypto: crypto@16000000 {
diff --git a/Documentation/devicetree/bindings/crypto/ti,omap-sham.yaml b/Documentation/devicetree/bindings/crypto/ti,omap-sham.yaml
new file mode 100644
index 000000000000..d69b50228009
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/ti,omap-sham.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/crypto/ti,omap-sham.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: OMAP SoC SHA crypto Module
+
+maintainers:
+ - Animesh Agarwal <animeshagarwal28@gmail.com>
+
+properties:
+ compatible:
+ enum:
+ - ti,omap2-sham
+ - ti,omap4-sham
+ - ti,omap5-sham
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ dmas:
+ maxItems: 1
+
+ dma-names:
+ const: rx
+
+ ti,hwmods:
+ description: Name of the hwmod associated with the SHAM module
+ $ref: /schemas/types.yaml#/definitions/string
+ enum: [sham]
+
+dependencies:
+ dmas: [dma-names]
+
+additionalProperties: false
+
+required:
+ - compatible
+ - ti,hwmods
+ - reg
+ - interrupts
+
+examples:
+ - |
+ sham@53100000 {
+ compatible = "ti,omap4-sham";
+ ti,hwmods = "sham";
+ reg = <0x53100000 0x200>;
+ interrupts = <109>;
+ dmas = <&edma 36>;
+ dma-names = "rx";
+ };
diff --git a/Documentation/devicetree/bindings/display/atmel,lcdc-display.yaml b/Documentation/devicetree/bindings/display/atmel,lcdc-display.yaml
new file mode 100644
index 000000000000..a5cf040ab4ea
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/atmel,lcdc-display.yaml
@@ -0,0 +1,103 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/atmel,lcdc-display.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip's LCDC Display
+
+maintainers:
+ - Nicolas Ferre <nicolas.ferre@microchip.com>
+ - Dharma Balasubiramani <dharma.b@microchip.com>
+
+description:
+ The LCD Controller (LCDC) consists of logic for transferring LCD image data
+ from an external display buffer to a TFT LCD panel. The LCDC has one display
+ input buffer per layer that fetches pixels through the single bus host
+ interface and a look-up table to allow palletized display configurations. The
+ LCDC is programmable on a per layer basis, and supports different LCD
+ resolutions, window sizes, image formats and pixel depths.
+
+# We need a select here since this schema is applicable only for nodes with the
+# following properties
+
+select:
+ anyOf:
+ - required: [ 'atmel,dmacon' ]
+ - required: [ 'atmel,lcdcon2' ]
+ - required: [ 'atmel,guard-time' ]
+
+properties:
+ atmel,dmacon:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: dma controller configuration
+
+ atmel,lcdcon2:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: lcd controller configuration
+
+ atmel,guard-time:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: lcd guard time (Delay in frame periods)
+ maximum: 127
+
+ bits-per-pixel:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: lcd panel bit-depth.
+ enum: [1, 2, 4, 8, 16, 24, 32]
+
+ atmel,lcdcon-backlight:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: enable backlight
+
+ atmel,lcdcon-backlight-inverted:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: invert backlight PWM polarity
+
+ atmel,lcd-wiring-mode:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: lcd wiring mode "RGB" or "BRG"
+ enum:
+ - RGB
+ - BRG
+
+ atmel,power-control-gpio:
+ description: gpio to power on or off the LCD (as many as needed)
+ maxItems: 1
+
+ display-timings:
+ $ref: panel/display-timings.yaml#
+
+required:
+ - atmel,dmacon
+ - atmel,lcdcon2
+ - atmel,guard-time
+ - bits-per-pixel
+
+additionalProperties: false
+
+examples:
+ - |
+ display: panel {
+ bits-per-pixel = <32>;
+ atmel,lcdcon-backlight;
+ atmel,dmacon = <0x1>;
+ atmel,lcdcon2 = <0x80008002>;
+ atmel,guard-time = <9>;
+ atmel,lcd-wiring-mode = "RGB";
+
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: timing0 {
+ clock-frequency = <9000000>;
+ hactive = <480>;
+ vactive = <272>;
+ hback-porch = <1>;
+ hfront-porch = <1>;
+ vback-porch = <40>;
+ vfront-porch = <1>;
+ hsync-len = <45>;
+ vsync-len = <1>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/atmel,lcdc.txt b/Documentation/devicetree/bindings/display/atmel,lcdc.txt
deleted file mode 100644
index b5e355ada2fa..000000000000
--- a/Documentation/devicetree/bindings/display/atmel,lcdc.txt
+++ /dev/null
@@ -1,87 +0,0 @@
-Atmel LCDC Framebuffer
------------------------------------------------------
-
-Required properties:
-- compatible :
- "atmel,at91sam9261-lcdc" ,
- "atmel,at91sam9263-lcdc" ,
- "atmel,at91sam9g10-lcdc" ,
- "atmel,at91sam9g45-lcdc" ,
- "atmel,at91sam9g45es-lcdc" ,
- "atmel,at91sam9rl-lcdc" ,
-- reg : Should contain 1 register ranges(address and length).
- Can contain an additional register range(address and length)
- for fixed framebuffer memory. Useful for dedicated memories.
-- interrupts : framebuffer controller interrupt
-- display: a phandle pointing to the display node
-
-Required nodes:
-- display: a display node is required to initialize the lcd panel
- This should be in the board dts.
-- default-mode: a videomode within the display with timing parameters
- as specified below.
-
-Optional properties:
-- lcd-supply: Regulator for LCD supply voltage.
-
-Example:
-
- fb0: fb@00500000 {
- compatible = "atmel,at91sam9g45-lcdc";
- reg = <0x00500000 0x1000>;
- interrupts = <23 3 0>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_fb>;
- display = <&display0>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- };
-
-Example for fixed framebuffer memory:
-
- fb0: fb@00500000 {
- compatible = "atmel,at91sam9263-lcdc";
- reg = <0x00700000 0x1000 0x70000000 0x200000>;
- [...]
- };
-
-Atmel LCDC Display
------------------------------------------------------
-Required properties (as per of_videomode_helper):
-
- - atmel,dmacon: dma controller configuration
- - atmel,lcdcon2: lcd controller configuration
- - atmel,guard-time: lcd guard time (Delay in frame periods)
- - bits-per-pixel: lcd panel bit-depth.
-
-Optional properties (as per of_videomode_helper):
- - atmel,lcdcon-backlight: enable backlight
- - atmel,lcdcon-backlight-inverted: invert backlight PWM polarity
- - atmel,lcd-wiring-mode: lcd wiring mode "RGB" or "BRG"
- - atmel,power-control-gpio: gpio to power on or off the LCD (as many as needed)
-
-Example:
- display0: display {
- bits-per-pixel = <32>;
- atmel,lcdcon-backlight;
- atmel,dmacon = <0x1>;
- atmel,lcdcon2 = <0x80008002>;
- atmel,guard-time = <9>;
- atmel,lcd-wiring-mode = <1>;
-
- display-timings {
- native-mode = <&timing0>;
- timing0: timing0 {
- clock-frequency = <9000000>;
- hactive = <480>;
- vactive = <272>;
- hback-porch = <1>;
- hfront-porch = <1>;
- vback-porch = <40>;
- vfront-porch = <1>;
- hsync-len = <45>;
- vsync-len = <1>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/display/atmel,lcdc.yaml b/Documentation/devicetree/bindings/display/atmel,lcdc.yaml
new file mode 100644
index 000000000000..1b6f7e395006
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/atmel,lcdc.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/atmel,lcdc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip's LCDC Framebuffer
+
+maintainers:
+ - Nicolas Ferre <nicolas.ferre@microchip.com>
+ - Dharma Balasubiramani <dharma.b@microchip.com>
+
+description:
+ The LCDC works with a framebuffer, which is a section of memory that contains
+ a complete frame of data representing pixel values for the display. The LCDC
+ reads the pixel data from the framebuffer and sends it to the LCD panel to
+ render the image.
+
+properties:
+ compatible:
+ enum:
+ - atmel,at91sam9261-lcdc
+ - atmel,at91sam9263-lcdc
+ - atmel,at91sam9g10-lcdc
+ - atmel,at91sam9g45-lcdc
+ - atmel,at91sam9g45es-lcdc
+ - atmel,at91sam9rl-lcdc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: hclk
+ - const: lcdc_clk
+
+ display:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: A phandle pointing to the display node.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - display
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/at91.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ fb@500000 {
+ compatible = "atmel,at91sam9g45-lcdc";
+ reg = <0x00500000 0x1000>;
+ interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fb>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 23>, <&pmc PMC_TYPE_PERIPHERAL 23>;
+ clock-names = "hclk", "lcdc_clk";
+ display = <&display>;
+ };
diff --git a/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml b/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml
index c9a882ee6d98..c4469f463978 100644
--- a/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml
@@ -9,6 +9,9 @@ title: ITE it6505
maintainers:
- Allen Chen <allen.chen@ite.com.tw>
+allOf:
+ - $ref: /schemas/sound/dai-common.yaml#
+
description: |
The IT6505 is a high-performance DisplayPort 1.1a transmitter,
fully compliant with DisplayPort 1.1a, HDCP 1.3 specifications.
@@ -52,6 +55,9 @@ properties:
maxItems: 1
description: extcon specifier for the Power Delivery
+ "#sound-dai-cells":
+ const: 0
+
ports:
$ref: /schemas/graph.yaml#/properties/ports
@@ -105,7 +111,7 @@ required:
- extcon
- ports
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
index 84aafcbf0919..6ceeed76e88e 100644
--- a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
@@ -41,6 +41,7 @@ properties:
- enum:
- ti,ds90cf364a # For the DS90CF364A FPD-Link LVDS Receiver
- ti,ds90cf384a # For the DS90CF384A FPD-Link LVDS Receiver
+ - ti,sn65lvds94 # For the SN65DS94 LVDS serdes
- const: lvds-decoder # Generic LVDS decoders compatible fallback
- enum:
- thine,thc63lvdm83d # For the THC63LVDM83D LVDS serializer
diff --git a/Documentation/devicetree/bindings/display/bridge/microchip,sam9x75-lvds.yaml b/Documentation/devicetree/bindings/display/bridge/microchip,sam9x75-lvds.yaml
new file mode 100644
index 000000000000..862ef441ac9f
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/microchip,sam9x75-lvds.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/microchip,sam9x75-lvds.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip SAM9X75 LVDS Controller
+
+maintainers:
+ - Dharma Balasubiramani <dharma.b@microchip.com>
+
+description:
+ The Low Voltage Differential Signaling Controller (LVDSC) manages data
+ format conversion from the LCD Controller internal DPI bus to OpenLDI
+ LVDS output signals. LVDSC functions include bit mapping, balanced mode
+ management, and serializer.
+
+properties:
+ compatible:
+ const: microchip,sam9x75-lvds
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Peripheral Bus Clock
+
+ clock-names:
+ items:
+ - const: pclk
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/clock/at91.h>
+ lvds-controller@f8060000 {
+ compatible = "microchip,sam9x75-lvds";
+ reg = <0xf8060000 0x100>;
+ interrupts = <56 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 56>;
+ clock-names = "pclk";
+ };
diff --git a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml
index d879c700594a..258dd9cfd770 100644
--- a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml
@@ -10,7 +10,7 @@ maintainers:
- Vinay Simha BN <simhavcs@gmail.com>
description: |
- This binding supports DSI to LVDS bridge TC358775
+ This binding supports DSI to LVDS bridges TC358765 and TC358775
MIPI DSI-RX Data 4-lane, CLK 1-lane with data rates up to 800 Mbps/lane.
Video frame size:
@@ -21,7 +21,9 @@ description: |
properties:
compatible:
- const: toshiba,tc358775
+ enum:
+ - toshiba,tc358765
+ - toshiba,tc358775
reg:
maxItems: 1
@@ -46,11 +48,27 @@ properties:
properties:
port@0:
- $ref: /schemas/graph.yaml#/properties/port
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description: |
DSI Input. The remote endpoint phandle should be a
reference to a valid mipi_dsi_host device node.
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes:
+ description: array of physical DSI data lane indexes.
+ minItems: 1
+ items:
+ - const: 1
+ - const: 2
+ - const: 3
+ - const: 4
+
port@1:
$ref: /schemas/graph.yaml#/properties/port
description: |
@@ -70,10 +88,19 @@ required:
- reg
- vdd-supply
- vddio-supply
- - stby-gpios
- reset-gpios
- ports
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: toshiba,tc358765
+ then:
+ properties:
+ stby-gpios: false
+
additionalProperties: false
examples:
@@ -108,6 +135,7 @@ examples:
reg = <0>;
d2l_in_test: endpoint {
remote-endpoint = <&dsi0_out>;
+ data-lanes = <1 2 3 4>;
};
};
@@ -132,7 +160,6 @@ examples:
reg = <1>;
dsi0_out: endpoint {
remote-endpoint = <&d2l_in_test>;
- data-lanes = <0 1 2 3>;
};
};
};
@@ -167,6 +194,7 @@ examples:
reg = <0>;
d2l_in_dual: endpoint {
remote-endpoint = <&dsi0_out_dual>;
+ data-lanes = <1 2 3 4>;
};
};
@@ -198,7 +226,6 @@ examples:
reg = <1>;
dsi0_out_dual: endpoint {
remote-endpoint = <&d2l_in_dual>;
- data-lanes = <0 1 2 3>;
};
};
};
diff --git a/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt b/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt
deleted file mode 100644
index 3a401590320f..000000000000
--- a/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt
+++ /dev/null
@@ -1,112 +0,0 @@
-The Exynos display port interface should be configured based on
-the type of panel connected to it.
-
-We use two nodes:
- -dp-controller node
- -dptx-phy node(defined inside dp-controller node)
-
-For the DP-PHY initialization, we use the dptx-phy node.
-Required properties for dptx-phy: deprecated, use phys and phy-names
- -reg: deprecated
- Base address of DP PHY register.
- -samsung,enable-mask: deprecated
- The bit-mask used to enable/disable DP PHY.
-
-For the Panel initialization, we read data from dp-controller node.
-Required properties for dp-controller:
- -compatible:
- should be "samsung,exynos5-dp".
- -reg:
- physical base address of the controller and length
- of memory mapped region.
- -interrupts:
- interrupt combiner values.
- -clocks:
- from common clock binding: handle to dp clock.
- -clock-names:
- from common clock binding: Shall be "dp".
- -phys:
- from general PHY binding: the phandle for the PHY device.
- -phy-names:
- from general PHY binding: Should be "dp".
-
-Optional properties for dp-controller:
- -interlaced:
- interlace scan mode.
- Progressive if defined, Interlaced if not defined
- -vsync-active-high:
- VSYNC polarity configuration.
- High if defined, Low if not defined
- -hsync-active-high:
- HSYNC polarity configuration.
- High if defined, Low if not defined
- -samsung,hpd-gpio:
- Hotplug detect GPIO.
- Indicates which GPIO should be used for hotplug
- detection
- -video interfaces: Device node can contain video interface port
- nodes according to [1].
- - display-timings: timings for the connected panel as described by
- Documentation/devicetree/bindings/display/panel/display-timing.txt
-
-For the below properties, please refer to Analogix DP binding document:
- * Documentation/devicetree/bindings/display/bridge/analogix,dp.yaml
- -phys (required)
- -phy-names (required)
- -hpd-gpios (optional)
- force-hpd (optional)
-
-Deprecated properties for DisplayPort:
--interlaced: deprecated prop that can parsed from drm_display_mode.
--vsync-active-high: deprecated prop that can parsed from drm_display_mode.
--hsync-active-high: deprecated prop that can parsed from drm_display_mode.
--samsung,ycbcr-coeff: deprecated prop that can parsed from drm_display_mode.
--samsung,dynamic-range: deprecated prop that can parsed from drm_display_mode.
--samsung,color-space: deprecated prop that can parsed from drm_display_info.
--samsung,color-depth: deprecated prop that can parsed from drm_display_info.
--samsung,link-rate: deprecated prop that can reading from monitor by dpcd method.
--samsung,lane-count: deprecated prop that can reading from monitor by dpcd method.
--samsung,hpd-gpio: deprecated name for hpd-gpios.
-
--------------------------------------------------------------------------------
-
-Example:
-
-SOC specific portion:
- dp-controller {
- compatible = "samsung,exynos5-dp";
- reg = <0x145b0000 0x10000>;
- interrupts = <10 3>;
- interrupt-parent = <&combiner>;
- clocks = <&clock 342>;
- clock-names = "dp";
-
- phys = <&dp_phy>;
- phy-names = "dp";
- };
-
-Board Specific portion:
- dp-controller {
- display-timings {
- native-mode = <&lcd_timing>;
- lcd_timing: 1366x768 {
- clock-frequency = <70589280>;
- hactive = <1366>;
- vactive = <768>;
- hfront-porch = <40>;
- hback-porch = <40>;
- hsync-len = <32>;
- vback-porch = <10>;
- vfront-porch = <12>;
- vsync-len = <6>;
- };
- };
-
- ports {
- port@0 {
- dp_out: endpoint {
- remote-endpoint = <&bridge_in>;
- };
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,gamma.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,gamma.yaml
index c6641acd75d6..b8b8e83ebc3f 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,gamma.yaml
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,gamma.yaml
@@ -24,6 +24,7 @@ properties:
- enum:
- mediatek,mt8173-disp-gamma
- mediatek,mt8183-disp-gamma
+ - mediatek,mt8195-disp-gamma
- items:
- enum:
- mediatek,mt6795-disp-gamma
@@ -35,6 +36,10 @@ properties:
- mediatek,mt8192-disp-gamma
- mediatek,mt8195-disp-gamma
- const: mediatek,mt8183-disp-gamma
+ - items:
+ - enum:
+ - mediatek,mt8188-disp-gamma
+ - const: mediatek,mt8195-disp-gamma
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/msm/dp-controller.yaml b/Documentation/devicetree/bindings/display/msm/dp-controller.yaml
index ae53cbfb2193..97993feda193 100644
--- a/Documentation/devicetree/bindings/display/msm/dp-controller.yaml
+++ b/Documentation/devicetree/bindings/display/msm/dp-controller.yaml
@@ -29,6 +29,7 @@ properties:
- qcom,sm8650-dp
- items:
- enum:
+ - qcom,sm6350-dp
- qcom,sm8150-dp
- qcom,sm8250-dp
- qcom,sm8450-dp
diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm6350-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm6350-mdss.yaml
index c9ba1fae8042..bba666bdffe5 100644
--- a/Documentation/devicetree/bindings/display/msm/qcom,sm6350-mdss.yaml
+++ b/Documentation/devicetree/bindings/display/msm/qcom,sm6350-mdss.yaml
@@ -53,6 +53,15 @@ patternProperties:
compatible:
const: qcom,sm6350-dpu
+ "^displayport-controller@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ contains:
+ const: qcom,sm6350-dp
+
"^dsi@[0-9a-f]+$":
type: object
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm8150-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm8150-mdss.yaml
index c0d6a4fdff97..e6dc5494baee 100644
--- a/Documentation/devicetree/bindings/display/msm/qcom,sm8150-mdss.yaml
+++ b/Documentation/devicetree/bindings/display/msm/qcom,sm8150-mdss.yaml
@@ -53,6 +53,15 @@ patternProperties:
compatible:
const: qcom,sm8150-dpu
+ "^displayport-controller@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ contains:
+ const: qcom,sm8150-dp
+
"^dsi@[0-9a-f]+$":
type: object
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/display/panel/abt,y030xx067a.yaml b/Documentation/devicetree/bindings/display/panel/abt,y030xx067a.yaml
index acd2f3faa6b9..0aa2d3fbadaa 100644
--- a/Documentation/devicetree/bindings/display/panel/abt,y030xx067a.yaml
+++ b/Documentation/devicetree/bindings/display/panel/abt,y030xx067a.yaml
@@ -17,10 +17,12 @@ properties:
compatible:
const: abt,y030xx067a
+ reg:
+ maxItems: 1
+
backlight: true
port: true
power-supply: true
- reg: true
reset-gpios: true
required:
diff --git a/Documentation/devicetree/bindings/display/panel/asus,z00t-tm5p5-nt35596.yaml b/Documentation/devicetree/bindings/display/panel/asus,z00t-tm5p5-nt35596.yaml
index 75a09df68ba0..2399cabf044c 100644
--- a/Documentation/devicetree/bindings/display/panel/asus,z00t-tm5p5-nt35596.yaml
+++ b/Documentation/devicetree/bindings/display/panel/asus,z00t-tm5p5-nt35596.yaml
@@ -21,7 +21,10 @@ allOf:
properties:
compatible:
const: asus,z00t-tm5p5-n35596
- reg: true
+
+ reg:
+ maxItems: 1
+
reset-gpios: true
vdd-supply:
description: core voltage supply
diff --git a/Documentation/devicetree/bindings/display/panel/boe,bf060y8m-aj0.yaml b/Documentation/devicetree/bindings/display/panel/boe,bf060y8m-aj0.yaml
index a8f3afa922c8..8b7448ad9138 100644
--- a/Documentation/devicetree/bindings/display/panel/boe,bf060y8m-aj0.yaml
+++ b/Documentation/devicetree/bindings/display/panel/boe,bf060y8m-aj0.yaml
@@ -26,6 +26,9 @@ properties:
compatible:
const: boe,bf060y8m-aj0
+ reg:
+ maxItems: 1
+
elvdd-supply:
description: EL Driving positive (VDD) supply (4.40-4.80V)
elvss-supply:
@@ -38,7 +41,6 @@ properties:
description: I/O voltage supply (1.62-1.98V)
port: true
- reg: true
reset-gpios: true
required:
diff --git a/Documentation/devicetree/bindings/display/panel/boe,himax8279d.yaml b/Documentation/devicetree/bindings/display/panel/boe,himax8279d.yaml
index 272a3a018a33..f2496cdd9260 100644
--- a/Documentation/devicetree/bindings/display/panel/boe,himax8279d.yaml
+++ b/Documentation/devicetree/bindings/display/panel/boe,himax8279d.yaml
@@ -18,9 +18,11 @@ properties:
- const: boe,himax8279d8p
- const: boe,himax8279d10p
+ reg:
+ maxItems: 1
+
backlight: true
enable-gpios: true
- reg: true
pp33-gpios:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/panel/boe,th101mb31ig002-28a.yaml b/Documentation/devicetree/bindings/display/panel/boe,th101mb31ig002-28a.yaml
index 32df26cbfeed..5eaccce13c21 100644
--- a/Documentation/devicetree/bindings/display/panel/boe,th101mb31ig002-28a.yaml
+++ b/Documentation/devicetree/bindings/display/panel/boe,th101mb31ig002-28a.yaml
@@ -18,7 +18,9 @@ properties:
# BOE TH101MB31IG002-28A 10.1" WXGA TFT LCD panel
- boe,th101mb31ig002-28a
- reg: true
+ reg:
+ maxItems: 1
+
backlight: true
enable-gpios: true
power-supply: true
diff --git a/Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml b/Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml
index 906ef62709b8..9e603cad1348 100644
--- a/Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml
+++ b/Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml
@@ -38,7 +38,7 @@ properties:
- starry,ili9882t
reg:
- description: the virtual channel number of a DSI peripheral
+ maxItems: 1
enable-gpios:
description: a GPIO spec for the enable pin
diff --git a/Documentation/devicetree/bindings/display/panel/elida,kd35t133.yaml b/Documentation/devicetree/bindings/display/panel/elida,kd35t133.yaml
index 265ab6d30572..f4cb825d1e96 100644
--- a/Documentation/devicetree/bindings/display/panel/elida,kd35t133.yaml
+++ b/Documentation/devicetree/bindings/display/panel/elida,kd35t133.yaml
@@ -15,7 +15,10 @@ allOf:
properties:
compatible:
const: elida,kd35t133
- reg: true
+
+ reg:
+ maxItems: 1
+
backlight: true
port: true
reset-gpios: true
diff --git a/Documentation/devicetree/bindings/display/panel/fascontek,fs035vg158.yaml b/Documentation/devicetree/bindings/display/panel/fascontek,fs035vg158.yaml
index d13c4bd26de4..9847da784cc8 100644
--- a/Documentation/devicetree/bindings/display/panel/fascontek,fs035vg158.yaml
+++ b/Documentation/devicetree/bindings/display/panel/fascontek,fs035vg158.yaml
@@ -17,6 +17,9 @@ properties:
compatible:
const: fascontek,fs035vg158
+ reg:
+ maxItems: 1
+
spi-3wire: true
required:
diff --git a/Documentation/devicetree/bindings/display/panel/feixin,k101-im2ba02.yaml b/Documentation/devicetree/bindings/display/panel/feixin,k101-im2ba02.yaml
index 81adb82f061d..0d8707a5844d 100644
--- a/Documentation/devicetree/bindings/display/panel/feixin,k101-im2ba02.yaml
+++ b/Documentation/devicetree/bindings/display/panel/feixin,k101-im2ba02.yaml
@@ -15,7 +15,10 @@ allOf:
properties:
compatible:
const: feixin,k101-im2ba02
- reg: true
+
+ reg:
+ maxItems: 1
+
backlight: true
reset-gpios: true
avdd-supply:
diff --git a/Documentation/devicetree/bindings/display/panel/himax,hx83112a.yaml b/Documentation/devicetree/bindings/display/panel/himax,hx83112a.yaml
index 174661d13811..56bcd152f43c 100644
--- a/Documentation/devicetree/bindings/display/panel/himax,hx83112a.yaml
+++ b/Documentation/devicetree/bindings/display/panel/himax,hx83112a.yaml
@@ -21,6 +21,9 @@ properties:
contains:
const: djn,9a-3r063-1102b
+ reg:
+ maxItems: 1
+
vdd1-supply:
description: Digital voltage rail
@@ -30,7 +33,6 @@ properties:
vsp-supply:
description: Negative source voltage rail
- reg: true
port: true
required:
diff --git a/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
index 916bb7f94206..644387e4fb6f 100644
--- a/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
+++ b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
@@ -26,7 +26,8 @@ properties:
- powkiddy,x55-panel
- const: himax,hx8394
- reg: true
+ reg:
+ maxItems: 1
reset-gpios: true
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9163.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9163.yaml
index 3cabbba86581..ef5a2240b684 100644
--- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9163.yaml
+++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9163.yaml
@@ -24,6 +24,9 @@ properties:
- newhaven,1.8-128160EF
- const: ilitek,ili9163
+ reg:
+ maxItems: 1
+
spi-max-frequency:
maximum: 32000000
@@ -32,7 +35,6 @@ properties:
description: Display data/command selection (D/CX)
backlight: true
- reg: true
reset-gpios: true
rotation: true
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9322.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9322.yaml
index 7d221ef35443..44423465f6e3 100644
--- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9322.yaml
+++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9322.yaml
@@ -26,6 +26,9 @@ properties:
- dlink,dir-685-panel
- const: ilitek,ili9322
+ reg:
+ maxItems: 1
+
reset-gpios: true
port: true
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml
index 94f169ea065a..5f41758c96d5 100644
--- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml
+++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml
@@ -28,7 +28,8 @@ properties:
- canaan,kd233-tft
- const: ilitek,ili9341
- reg: true
+ reg:
+ maxItems: 1
dc-gpios:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9805.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9805.yaml
index f4f91f93f490..ff67129c9466 100644
--- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9805.yaml
+++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9805.yaml
@@ -20,9 +20,11 @@ properties:
- tianma,tm041xdhg01
- const: ilitek,ili9805
+ reg:
+ maxItems: 1
+
avdd-supply: true
dvdd-supply: true
- reg: true
required:
- compatible
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml
index b1e624be3e33..baf5dfe5f5eb 100644
--- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml
+++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml
@@ -19,13 +19,16 @@ properties:
- ampire,am8001280g
- bananapi,lhr050h41
- feixin,k101-im2byl02
+ - startek,kd050hdfia020
- tdo,tl050hdv35
- wanchanglong,w552946aba
- const: ilitek,ili9881c
+ reg:
+ maxItems: 1
+
backlight: true
power-supply: true
- reg: true
reset-gpios: true
rotation: true
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,ej030na.yaml b/Documentation/devicetree/bindings/display/panel/innolux,ej030na.yaml
index 72788e3e6c59..c7df9a7f6589 100644
--- a/Documentation/devicetree/bindings/display/panel/innolux,ej030na.yaml
+++ b/Documentation/devicetree/bindings/display/panel/innolux,ej030na.yaml
@@ -17,10 +17,12 @@ properties:
compatible:
const: innolux,ej030na
+ reg:
+ maxItems: 1
+
backlight: true
port: true
power-supply: true
- reg: true
reset-gpios: true
required:
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.yaml b/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.yaml
index 5a5f071627fb..4164e3f7061d 100644
--- a/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.yaml
+++ b/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.yaml
@@ -16,9 +16,11 @@ properties:
compatible:
const: innolux,p097pfg
+ reg:
+ maxItems: 1
+
backlight: true
enable-gpios: true
- reg: true
avdd-supply:
description: The regulator that provides positive voltage
diff --git a/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml b/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml
index 41eb7fbf7715..20afdb4568a2 100644
--- a/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml
+++ b/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml
@@ -21,7 +21,8 @@ properties:
- radxa,display-8hd-ad002
- const: jadard,jd9365da-h3
- reg: true
+ reg:
+ maxItems: 1
vdd-supply:
description: supply regulator for VDD, usually 3.3V
diff --git a/Documentation/devicetree/bindings/display/panel/jdi,lpm102a188a.yaml b/Documentation/devicetree/bindings/display/panel/jdi,lpm102a188a.yaml
index 2f4d27a309a7..a8621459005b 100644
--- a/Documentation/devicetree/bindings/display/panel/jdi,lpm102a188a.yaml
+++ b/Documentation/devicetree/bindings/display/panel/jdi,lpm102a188a.yaml
@@ -26,7 +26,9 @@ properties:
compatible:
const: jdi,lpm102a188a
- reg: true
+ reg:
+ maxItems: 1
+
enable-gpios: true
reset-gpios: true
power-supply: true
diff --git a/Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.yaml b/Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.yaml
index 63c82a4378ff..0c8b5cb78bfe 100644
--- a/Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.yaml
+++ b/Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.yaml
@@ -16,8 +16,10 @@ properties:
compatible:
const: jdi,lt070me05000
+ reg:
+ maxItems: 1
+
enable-gpios: true
- reg: true
reset-gpios: true
vddp-supply:
diff --git a/Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml b/Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml
index b4be9bd8ddde..d86c916f7b55 100644
--- a/Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml
+++ b/Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml
@@ -17,10 +17,12 @@ properties:
compatible:
const: kingdisplay,kd035g6-54nt
+ reg:
+ maxItems: 1
+
backlight: true
port: true
power-supply: true
- reg: true
reset-gpios: true
spi-3wire: true
diff --git a/Documentation/devicetree/bindings/display/panel/leadtek,ltk035c5444t.yaml b/Documentation/devicetree/bindings/display/panel/leadtek,ltk035c5444t.yaml
index 7a55961e1a3d..b5dc02b27200 100644
--- a/Documentation/devicetree/bindings/display/panel/leadtek,ltk035c5444t.yaml
+++ b/Documentation/devicetree/bindings/display/panel/leadtek,ltk035c5444t.yaml
@@ -18,6 +18,9 @@ properties:
compatible:
const: leadtek,ltk035c5444t
+ reg:
+ maxItems: 1
+
spi-3wire: true
required:
diff --git a/Documentation/devicetree/bindings/display/panel/leadtek,ltk050h3146w.yaml b/Documentation/devicetree/bindings/display/panel/leadtek,ltk050h3146w.yaml
index a40ab887ada7..e2a2dd4ef5fa 100644
--- a/Documentation/devicetree/bindings/display/panel/leadtek,ltk050h3146w.yaml
+++ b/Documentation/devicetree/bindings/display/panel/leadtek,ltk050h3146w.yaml
@@ -18,7 +18,10 @@ properties:
- leadtek,ltk050h3146w
- leadtek,ltk050h3146w-a2
- leadtek,ltk050h3148w
- reg: true
+
+ reg:
+ maxItems: 1
+
backlight: true
reset-gpios: true
iovcc-supply:
diff --git a/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml b/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml
index d589f1677214..af9e0ea0e72f 100644
--- a/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml
+++ b/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml
@@ -17,7 +17,10 @@ properties:
enum:
- leadtek,ltk101b4029w
- leadtek,ltk500hd1829
- reg: true
+
+ reg:
+ maxItems: 1
+
backlight: true
reset-gpios: true
iovcc-supply:
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lg4573.yaml b/Documentation/devicetree/bindings/display/panel/lg,lg4573.yaml
index ee357e139ac0..590ccc27d104 100644
--- a/Documentation/devicetree/bindings/display/panel/lg,lg4573.yaml
+++ b/Documentation/devicetree/bindings/display/panel/lg,lg4573.yaml
@@ -21,7 +21,8 @@ properties:
compatible:
const: lg,lg4573
- reg: true
+ reg:
+ maxItems: 1
required:
- compatible
diff --git a/Documentation/devicetree/bindings/display/panel/lg,sw43408.yaml b/Documentation/devicetree/bindings/display/panel/lg,sw43408.yaml
new file mode 100644
index 000000000000..1e08648f5bc7
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/lg,sw43408.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/lg,sw43408.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: LG SW43408 1080x2160 DSI panel
+
+maintainers:
+ - Caleb Connolly <caleb.connolly@linaro.org>
+
+description:
+ This panel is used on the Pixel 3, it is a 60hz OLED panel which
+ required DSC (Display Stream Compression) and has rounded corners.
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ items:
+ - const: lg,sw43408
+
+ reg: true
+ port: true
+ vddi-supply: true
+ vpnl-supply: true
+ reset-gpios: true
+
+required:
+ - compatible
+ - vddi-supply
+ - vpnl-supply
+ - reset-gpios
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "lg,sw43408";
+ reg = <0>;
+
+ vddi-supply = <&vreg_l14a_1p88>;
+ vpnl-supply = <&vreg_l28a_3p0>;
+
+ reset-gpios = <&tlmm 6 GPIO_ACTIVE_LOW>;
+
+ port {
+ endpoint {
+ remote-endpoint = <&mdss_dsi0_out>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.yaml b/Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.yaml
index 628c4b898111..3de17fd8513b 100644
--- a/Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.yaml
+++ b/Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.yaml
@@ -17,6 +17,9 @@ properties:
compatible:
const: lgphilips,lb035q02
+ reg:
+ maxItems: 1
+
label: true
enable-gpios: true
port: true
diff --git a/Documentation/devicetree/bindings/display/panel/nec,nl8048hl11.yaml b/Documentation/devicetree/bindings/display/panel/nec,nl8048hl11.yaml
index accf933d6e46..1cffe4d6d498 100644
--- a/Documentation/devicetree/bindings/display/panel/nec,nl8048hl11.yaml
+++ b/Documentation/devicetree/bindings/display/panel/nec,nl8048hl11.yaml
@@ -21,9 +21,11 @@ properties:
compatible:
const: nec,nl8048hl11
+ reg:
+ maxItems: 1
+
label: true
port: true
- reg: true
reset-gpios: true
spi-max-frequency:
diff --git a/Documentation/devicetree/bindings/display/panel/newvision,nv3051d.yaml b/Documentation/devicetree/bindings/display/panel/newvision,nv3051d.yaml
index 7a634fbc465e..d3a25a8fd7e3 100644
--- a/Documentation/devicetree/bindings/display/panel/newvision,nv3051d.yaml
+++ b/Documentation/devicetree/bindings/display/panel/newvision,nv3051d.yaml
@@ -24,7 +24,9 @@ properties:
- powkiddy,rk2023-panel
- const: newvision,nv3051d
- reg: true
+ reg:
+ maxItems: 1
+
backlight: true
port: true
reset-gpios:
diff --git a/Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml b/Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml
index 91921f4b0e5f..bb50fd5506c3 100644
--- a/Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml
+++ b/Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml
@@ -24,7 +24,10 @@ properties:
string determines how the NT35510 panel driver shall be configured
to work with the indicated panel. The novatek,nt35510 compatible shall
always be provided as a fallback.
- reg: true
+
+ reg:
+ maxItems: 1
+
reset-gpios: true
vdd-supply:
description: regulator that supplies the vdd voltage
diff --git a/Documentation/devicetree/bindings/display/panel/novatek,nt35950.yaml b/Documentation/devicetree/bindings/display/panel/novatek,nt35950.yaml
index 377a05d48a02..a9e40493986b 100644
--- a/Documentation/devicetree/bindings/display/panel/novatek,nt35950.yaml
+++ b/Documentation/devicetree/bindings/display/panel/novatek,nt35950.yaml
@@ -19,7 +19,7 @@ description: |
either bilinear interpolation or pixel duplication.
allOf:
- - $ref: panel-common.yaml#
+ - $ref: panel-common-dual.yaml#
properties:
compatible:
@@ -33,6 +33,9 @@ properties:
to work with the indicated panel. The novatek,nt35950 compatible shall
always be provided as a fallback.
+ reg:
+ maxItems: 1
+
reset-gpios:
maxItems: 1
description: phandle of gpio for reset line - This should be 8mA, gpio
@@ -49,7 +52,6 @@ properties:
backlight: true
ports: true
- reg: true
required:
- compatible
@@ -59,6 +61,7 @@ required:
- avee-supply
- dvdd-supply
- vddio-supply
+ - ports
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/display/panel/novatek,nt36523.yaml b/Documentation/devicetree/bindings/display/panel/novatek,nt36523.yaml
index 5f7e4c486094..c4bae4f77085 100644
--- a/Documentation/devicetree/bindings/display/panel/novatek,nt36523.yaml
+++ b/Documentation/devicetree/bindings/display/panel/novatek,nt36523.yaml
@@ -14,9 +14,6 @@ description: |
panels. Support video mode panels from China Star Optoelectronics
Technology (CSOT) and BOE Technology.
-allOf:
- - $ref: panel-common.yaml#
-
properties:
compatible:
oneOf:
@@ -30,6 +27,9 @@ properties:
- lenovo,j606f-boe-nt36523w
- const: novatek,nt36523w
+ reg:
+ maxItems: 1
+
reset-gpios:
maxItems: 1
description: phandle of gpio for reset line - This should be 8mA
@@ -37,8 +37,6 @@ properties:
vddio-supply:
description: regulator that supplies the I/O voltage
- reg: true
- ports: true
rotation: true
backlight: true
@@ -47,7 +45,26 @@ required:
- reg
- vddio-supply
- reset-gpios
- - ports
+
+allOf:
+ - $ref: panel-common-dual.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - novatek,nt36523w
+ then:
+ properties:
+ ports:
+ properties:
+ port@1: false
+ else:
+ properties:
+ port: false
+ ports:
+ required:
+ - port@1
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml b/Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
index ae821f465e1c..800a2f0a4dad 100644
--- a/Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
+++ b/Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
@@ -29,6 +29,9 @@ properties:
determines how the NT36672A panel driver is configured for the indicated
panel. The novatek,nt36672a compatible shall always be provided as a fallback.
+ reg:
+ maxItems: 1
+
reset-gpios:
maxItems: 1
description: phandle of gpio for reset line - This should be 8mA, gpio
@@ -44,7 +47,6 @@ properties:
vddneg-supply:
description: phandle of the negative boost supply regulator
- reg: true
port: true
backlight: true
diff --git a/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.yaml b/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.yaml
index 72463795e4c6..e5d8785fdf90 100644
--- a/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.yaml
+++ b/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.yaml
@@ -38,10 +38,12 @@ properties:
compatible:
const: olimex,lcd-olinuxino
+ reg:
+ maxItems: 1
+
backlight: true
enable-gpios: true
power-supply: true
- reg: true
required:
- compatible
diff --git a/Documentation/devicetree/bindings/display/panel/panel-common-dual.yaml b/Documentation/devicetree/bindings/display/panel/panel-common-dual.yaml
new file mode 100644
index 000000000000..cc7ea3c35c77
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/panel-common-dual.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/panel-common-dual.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Common Properties for Dual-Link Display Panels
+
+maintainers:
+ - Thierry Reding <thierry.reding@gmail.com>
+ - Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+
+description:
+ Properties common for Panel IC supporting dual link panels. Devices might
+ support also single link.
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+ additionalProperties: false
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: First link
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Second link
+
+ "#address-cells": true
+ "#size-cells": true
+
+ required:
+ - port@0
+
+# Single-panel setups are still allowed.
+oneOf:
+ - required:
+ - ports
+ - required:
+ - port
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/display/panel/panel-mipi-dbi-spi.yaml b/Documentation/devicetree/bindings/display/panel/panel-mipi-dbi-spi.yaml
index e808215cb39e..d0ac31ab60cf 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-mipi-dbi-spi.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-mipi-dbi-spi.yaml
@@ -71,6 +71,9 @@ properties:
- shineworld,lh133k
- const: panel-mipi-dbi-spi
+ reg:
+ maxItems: 1
+
write-only:
type: boolean
description:
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
index f9160d7bac3c..db5acd2807ed 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
@@ -36,6 +36,8 @@ properties:
- jdi,fhd-r63452
# Khadas TS050 5" 1080x1920 LCD panel
- khadas,ts050
+ # Khadas TS050 V2 5" 1080x1920 LCD panel
+ - khadas,ts050v2
# Kingdisplay KD097D04 9.7" 1536x2048 TFT LCD panel
- kingdisplay,kd097d04
# LG ACX467AKM-7 4.95" 1080×1920 LCD Panel
@@ -50,6 +52,8 @@ properties:
- panasonic,vvx10f004b00
# Panasonic 10" WUXGA TFT LCD panel
- panasonic,vvx10f034n00
+ # Samsung s6e3fa7 1080x2220 based AMS559NK06 AMOLED panel
+ - samsung,s6e3fa7-ams559nk06
# Samsung s6e3fc2x01 1080x2340 AMOLED panel
- samsung,s6e3fc2x01
# Samsung sofef00 1080x2280 AMOLED panel
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
index a95445f40870..5067f5c0a272 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
@@ -91,6 +91,8 @@ properties:
- boe,nv133fhm-n62
# BOE NV140FHM-N49 14.0" FHD a-Si FT panel
- boe,nv140fhmn49
+ # Crystal Clear Technology CMT430B19N00 4.3" 480x272 TFT-LCD panel
+ - cct,cmt430b19n00
# CDTech(H.K.) Electronics Limited 4.3" 480x272 color TFT-LCD panel
- cdtech,s043wq26h-ct7
# CDTech(H.K.) Electronics Limited 7" WSVGA (1024x600) TFT LCD Panel
@@ -188,6 +190,8 @@ properties:
- innolux,g121i1-l01
# Innolux Corporation 12.1" G121X1-L03 XGA (1024x768) TFT LCD panel
- innolux,g121x1-l03
+ # Innolux Corporation 12.1" G121XCE-L01 XGA (1024x768) TFT LCD panel
+ - innolux,g121xce-l01
# Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel
- innolux,n116bca-ea1
# Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel
@@ -272,6 +276,8 @@ properties:
- osddisplays,osd070t1718-19ts
# One Stop Displays OSD101T2045-53TS 10.1" 1920x1200 panel
- osddisplays,osd101t2045-53ts
+ # POWERTIP PH128800T006-ZHC01 10.1" WXGA TFT LCD panel
+ - powertip,ph128800t006-zhc01
# POWERTIP PH800480T013-IDF2 7.0" WVGA TFT LCD panel
- powertip,ph800480t013-idf02
# QiaoDian XianShi Corporation 4"3 TFT LCD panel
@@ -348,15 +354,6 @@ properties:
# Yes Optoelectronics YTC700TLAG-05-201C 7" TFT LCD panel
- yes-optoelectronics,ytc700tlag-05-201c
- backlight: true
- ddc-i2c-bus: true
- enable-gpios: true
- port: true
- power-supply: true
- no-hpd: true
- hpd-gpios: true
- data-mapping: true
-
if:
not:
properties:
@@ -367,7 +364,7 @@ then:
properties:
data-mapping: false
-additionalProperties: false
+unevaluatedProperties: false
required:
- compatible
diff --git a/Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml b/Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml
index d62fd692bf10..4825792bf712 100644
--- a/Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml
+++ b/Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml
@@ -16,7 +16,9 @@ properties:
compatible:
const: raydium,rm67191
- reg: true
+ reg:
+ maxItems: 1
+
port: true
reset-gpios: true
width-mm: true
diff --git a/Documentation/devicetree/bindings/display/panel/raydium,rm692e5.yaml b/Documentation/devicetree/bindings/display/panel/raydium,rm692e5.yaml
index f436ba6738ca..7ad223f98253 100644
--- a/Documentation/devicetree/bindings/display/panel/raydium,rm692e5.yaml
+++ b/Documentation/devicetree/bindings/display/panel/raydium,rm692e5.yaml
@@ -22,6 +22,9 @@ properties:
- const: fairphone,fp5-rm692e5-boe
- const: raydium,rm692e5
+ reg:
+ maxItems: 1
+
dvdd-supply:
description: Digital voltage rail
@@ -31,7 +34,6 @@ properties:
vddio-supply:
description: I/O voltage rail
- reg: true
port: true
required:
diff --git a/Documentation/devicetree/bindings/display/panel/raydium,rm69380.yaml b/Documentation/devicetree/bindings/display/panel/raydium,rm69380.yaml
new file mode 100644
index 000000000000..b17765b2b351
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/raydium,rm69380.yaml
@@ -0,0 +1,89 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/raydium,rm69380.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Raydium RM69380-based DSI display panels
+
+maintainers:
+ - David Wronek <david@mainlining.org>
+
+description:
+ The Raydium RM69380 is a generic DSI panel IC used to control
+ OLED panels.
+
+allOf:
+ - $ref: panel-common-dual.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - lenovo,j716f-edo-rm69380
+ - const: raydium,rm69380
+ description: This indicates the panel manufacturer of the panel
+ that is in turn using the RM69380 panel driver. The compatible
+ string determines how the RM69380 panel driver shall be configured
+ to work with the indicated panel. The raydium,rm69380 compatible shall
+ always be provided as a fallback.
+
+ avdd-supply:
+ description: Analog voltage rail
+
+ vddio-supply:
+ description: I/O voltage rail
+
+ reset-gpios:
+ maxItems: 1
+ description: phandle of gpio for reset line - This should be active low
+
+ reg: true
+
+required:
+ - compatible
+ - reg
+ - avdd-supply
+ - vddio-supply
+ - reset-gpios
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "lenovo,j716f-edo-rm69380", "raydium,rm69380";
+ reg = <0>;
+
+ avdd-supply = <&panel_avdd_regulator>;
+ vddio-supply = <&vreg_l14a>;
+ reset-gpios = <&tlmm 75 GPIO_ACTIVE_LOW>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ panel_in_0: endpoint {
+ remote-endpoint = <&mdss_dsi0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ panel_in_1: endpoint {
+ remote-endpoint = <&mdss_dsi1_out>;
+ };
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.yaml b/Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.yaml
index 6ec471284f97..4ae152cc55e0 100644
--- a/Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.yaml
+++ b/Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.yaml
@@ -22,6 +22,8 @@ properties:
enum:
# Anberic RG353V-V2 5.0" 640x480 TFT LCD panel
- anbernic,rg353v-panel-v2
+ # GameForce Chi 3.5" 640x480 TFT LCD panel
+ - gameforce,chi-panel
# Powkiddy RGB10MAX3 5.0" 720x1280 TFT LCD panel
- powkiddy,rgb10max3-panel
# Powkiddy RGB30 3.0" 720x720 TFT LCD panel
diff --git a/Documentation/devicetree/bindings/display/panel/ronbo,rb070d30.yaml b/Documentation/devicetree/bindings/display/panel/ronbo,rb070d30.yaml
index 95ce22c6787a..04f86e0cbac9 100644
--- a/Documentation/devicetree/bindings/display/panel/ronbo,rb070d30.yaml
+++ b/Documentation/devicetree/bindings/display/panel/ronbo,rb070d30.yaml
@@ -14,7 +14,7 @@ properties:
const: ronbo,rb070d30
reg:
- description: MIPI-DSI virtual channel
+ maxItems: 1
power-gpios:
description: GPIO used for the power pin
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,amoled-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/panel/samsung,amoled-mipi-dsi.yaml
index ccc482570d6a..e8f9e9d06a29 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,amoled-mipi-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,amoled-mipi-dsi.yaml
@@ -33,7 +33,9 @@ properties:
# Samsung S6E3HF2 5.65" 1600x2560 AMOLED panel
- samsung,s6e3hf2
- reg: true
+ reg:
+ maxItems: 1
+
reset-gpios: true
enable-gpios: true
te-gpios: true
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,ams495qa01.yaml b/Documentation/devicetree/bindings/display/panel/samsung,ams495qa01.yaml
index 58fa073ce258..e081c84a932b 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,ams495qa01.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,ams495qa01.yaml
@@ -11,12 +11,15 @@ maintainers:
allOf:
- $ref: panel-common.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
properties:
compatible:
const: samsung,ams495qa01
- reg: true
+ reg:
+ maxItems: 1
+
reset-gpios:
description: reset gpio, must be GPIO_ACTIVE_LOW
elvdd-supply:
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,ld9040.yaml b/Documentation/devicetree/bindings/display/panel/samsung,ld9040.yaml
index c0fabeb38628..bc92b16c95b9 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,ld9040.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,ld9040.yaml
@@ -17,9 +17,11 @@ properties:
compatible:
const: samsung,ld9040
+ reg:
+ maxItems: 1
+
display-timings: true
port: true
- reg: true
reset-gpios: true
vdd3-supply:
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,lms380kf01.yaml b/Documentation/devicetree/bindings/display/panel/samsung,lms380kf01.yaml
index 70ffc88d2a08..7ce8540551f9 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,lms380kf01.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,lms380kf01.yaml
@@ -21,7 +21,8 @@ properties:
compatible:
const: samsung,lms380kf01
- reg: true
+ reg:
+ maxItems: 1
interrupts:
description: provides an optional ESD (electrostatic discharge)
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml b/Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml
index 5e77cee93f83..9363032883de 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml
@@ -20,7 +20,8 @@ properties:
compatible:
const: samsung,lms397kf04
- reg: true
+ reg:
+ maxItems: 1
reset-gpios: true
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.yaml
index 66d147496bc3..2af5bc47323f 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.yaml
@@ -16,8 +16,10 @@ properties:
compatible:
const: samsung,s6d16d0
+ reg:
+ maxItems: 1
+
port: true
- reg: true
reset-gpios: true
vdd1-supply:
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6d27a1.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6d27a1.yaml
index d273faf4442a..d74904164719 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,s6d27a1.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,s6d27a1.yaml
@@ -20,7 +20,8 @@ properties:
compatible:
const: samsung,s6d27a1
- reg: true
+ reg:
+ maxItems: 1
interrupts:
description: provides an optional ESD (electrostatic discharge)
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6d7aa0.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6d7aa0.yaml
index 45a236d2cc70..939da65114bf 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,s6d7aa0.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,s6d7aa0.yaml
@@ -24,7 +24,8 @@ properties:
- samsung,ltl101at01
- const: samsung,s6d7aa0
- reg: true
+ reg:
+ maxItems: 1
backlight:
description:
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml
index 6f1fc7469f07..c47e2a1a30e5 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml
@@ -18,7 +18,9 @@ properties:
compatible:
const: samsung,s6e63m0
- reg: true
+ reg:
+ maxItems: 1
+
reset-gpios: true
port: true
default-brightness: true
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e88a0-ams452ef01.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6e88a0-ams452ef01.yaml
index b749e9e906b7..42634fc3b5b2 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,s6e88a0-ams452ef01.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,s6e88a0-ams452ef01.yaml
@@ -15,7 +15,10 @@ allOf:
properties:
compatible:
const: samsung,s6e88a0-ams452ef01
- reg: true
+
+ reg:
+ maxItems: 1
+
port: true
reset-gpios: true
vdd3-supply:
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e8aa0.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6e8aa0.yaml
index 200fbf1c74a0..4601fa460680 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,s6e8aa0.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,s6e8aa0.yaml
@@ -16,7 +16,9 @@ properties:
compatible:
const: samsung,s6e8aa0
- reg: true
+ reg:
+ maxItems: 1
+
reset-gpios: true
display-timings: true
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq101r1sx01.yaml b/Documentation/devicetree/bindings/display/panel/sharp,lq101r1sx01.yaml
index 57b44a0e763d..ce820b96a7e2 100644
--- a/Documentation/devicetree/bindings/display/panel/sharp,lq101r1sx01.yaml
+++ b/Documentation/devicetree/bindings/display/panel/sharp,lq101r1sx01.yaml
@@ -37,7 +37,9 @@ properties:
- enum:
- sharp,lq101r1sx01
- reg: true
+ reg:
+ maxItems: 1
+
power-supply: true
backlight: true
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.yaml b/Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.yaml
index a90d0d8bf7c9..b6ea246430ce 100644
--- a/Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.yaml
+++ b/Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.yaml
@@ -16,7 +16,9 @@ properties:
compatible:
const: sharp,ls043t1le01-qhd
- reg: true
+ reg:
+ maxItems: 1
+
backlight: true
reset-gpios: true
port: true
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,ls060t1sx01.yaml b/Documentation/devicetree/bindings/display/panel/sharp,ls060t1sx01.yaml
index 271c097cc9a4..77a4fce129e7 100644
--- a/Documentation/devicetree/bindings/display/panel/sharp,ls060t1sx01.yaml
+++ b/Documentation/devicetree/bindings/display/panel/sharp,ls060t1sx01.yaml
@@ -16,7 +16,9 @@ properties:
compatible:
const: sharp,ls060t1sx01
- reg: true
+ reg:
+ maxItems: 1
+
backlight: true
reset-gpios: true
port: true
diff --git a/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml b/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml
index ef162b51d010..0ce2ea13583d 100644
--- a/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml
+++ b/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml
@@ -21,7 +21,9 @@ properties:
- jasonic,jt240mhqs-hwt-ek-e3
- sitronix,st7789v
- reg: true
+ reg:
+ maxItems: 1
+
reset-gpios: true
power-supply: true
backlight: true
diff --git a/Documentation/devicetree/bindings/display/panel/sony,acx424akp.yaml b/Documentation/devicetree/bindings/display/panel/sony,acx424akp.yaml
index 059cc6dbcfca..fd778a20f760 100644
--- a/Documentation/devicetree/bindings/display/panel/sony,acx424akp.yaml
+++ b/Documentation/devicetree/bindings/display/panel/sony,acx424akp.yaml
@@ -22,7 +22,10 @@ properties:
enum:
- sony,acx424akp
- sony,acx424akm
- reg: true
+
+ reg:
+ maxItems: 1
+
reset-gpios: true
vddi-supply:
description: regulator that supplies the vddi voltage
diff --git a/Documentation/devicetree/bindings/display/panel/sony,acx565akm.yaml b/Documentation/devicetree/bindings/display/panel/sony,acx565akm.yaml
index 98abdf4ddeac..5a8260224b74 100644
--- a/Documentation/devicetree/bindings/display/panel/sony,acx565akm.yaml
+++ b/Documentation/devicetree/bindings/display/panel/sony,acx565akm.yaml
@@ -17,6 +17,9 @@ properties:
compatible:
const: sony,acx565akm
+ reg:
+ maxItems: 1
+
label: true
reset-gpios: true
port: true
diff --git a/Documentation/devicetree/bindings/display/panel/sony,td4353-jdi.yaml b/Documentation/devicetree/bindings/display/panel/sony,td4353-jdi.yaml
index b6b885b4c22d..191b692125e1 100644
--- a/Documentation/devicetree/bindings/display/panel/sony,td4353-jdi.yaml
+++ b/Documentation/devicetree/bindings/display/panel/sony,td4353-jdi.yaml
@@ -20,9 +20,12 @@ properties:
compatible:
const: sony,td4353-jdi-tama
- reg: true
+ reg:
+ maxItems: 1
backlight: true
+ width-mm: true
+ height-mm: true
vddio-supply:
description: VDDIO 1.8V supply
diff --git a/Documentation/devicetree/bindings/display/panel/sony,tulip-truly-nt35521.yaml b/Documentation/devicetree/bindings/display/panel/sony,tulip-truly-nt35521.yaml
index 967972939598..a58a31349757 100644
--- a/Documentation/devicetree/bindings/display/panel/sony,tulip-truly-nt35521.yaml
+++ b/Documentation/devicetree/bindings/display/panel/sony,tulip-truly-nt35521.yaml
@@ -21,7 +21,8 @@ properties:
compatible:
const: sony,tulip-truly-nt35521
- reg: true
+ reg:
+ maxItems: 1
positive5-supply:
description: Positive 5V supply
diff --git a/Documentation/devicetree/bindings/display/panel/synaptics,r63353.yaml b/Documentation/devicetree/bindings/display/panel/synaptics,r63353.yaml
index e5617d125567..2fd6e0ec3682 100644
--- a/Documentation/devicetree/bindings/display/panel/synaptics,r63353.yaml
+++ b/Documentation/devicetree/bindings/display/panel/synaptics,r63353.yaml
@@ -19,15 +19,17 @@ properties:
- sharp,ls068b3sx02
- const: syna,r63353
+ reg:
+ maxItems: 1
+
avdd-supply: true
dvdd-supply: true
- reg: true
required:
- compatible
+ - reg
- avdd-supply
- dvdd-supply
- - reg
- reset-gpios
- port
- backlight
diff --git a/Documentation/devicetree/bindings/display/panel/tpo,td.yaml b/Documentation/devicetree/bindings/display/panel/tpo,td.yaml
index e8c8ee8d7c88..7edd29df4bbb 100644
--- a/Documentation/devicetree/bindings/display/panel/tpo,td.yaml
+++ b/Documentation/devicetree/bindings/display/panel/tpo,td.yaml
@@ -22,7 +22,9 @@ properties:
# Toppoly TD043MTEA1 Panel
- tpo,td043mtea1
- reg: true
+ reg:
+ maxItems: 1
+
label: true
reset-gpios: true
backlight: true
diff --git a/Documentation/devicetree/bindings/display/panel/tpo,tpg110.yaml b/Documentation/devicetree/bindings/display/panel/tpo,tpg110.yaml
index f0243d196191..59a373728e62 100644
--- a/Documentation/devicetree/bindings/display/panel/tpo,tpg110.yaml
+++ b/Documentation/devicetree/bindings/display/panel/tpo,tpg110.yaml
@@ -52,7 +52,8 @@ properties:
- const: tpo,tpg110
- const: tpo,tpg110
- reg: true
+ reg:
+ maxItems: 1
grestb-gpios:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml b/Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml
index 772399067515..30047a62fc11 100644
--- a/Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml
+++ b/Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml
@@ -20,7 +20,8 @@ properties:
compatible:
const: visionox,rm69299-1080p-display
- reg: true
+ reg:
+ maxItems: 1
vdda-supply:
description: |
diff --git a/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml b/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml
index c407deb6afb1..9c9743a23500 100644
--- a/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml
+++ b/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml
@@ -15,7 +15,10 @@ allOf:
properties:
compatible:
const: xinpeng,xpp055c272
- reg: true
+
+ reg:
+ maxItems: 1
+
backlight: true
port: true
reset-gpios: true
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-hdmi.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-hdmi.yaml
index af638b6c0d21..2aac62219ff6 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-hdmi.yaml
@@ -15,6 +15,7 @@ description: |
allOf:
- $ref: ../bridge/synopsys,dw-hdmi.yaml#
+ - $ref: /schemas/sound/dai-common.yaml#
properties:
compatible:
@@ -124,6 +125,9 @@ properties:
description:
phandle to the GRF to mux vopl/vopb.
+ "#sound-dai-cells":
+ const: 0
+
required:
- compatible
- reg
@@ -153,6 +157,7 @@ examples:
ddc-i2c-bus = <&i2c5>;
power-domains = <&power RK3288_PD_VIO>;
rockchip,grf = <&grf>;
+ #sound-dai-cells = <0>;
ports {
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,inno-hdmi.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,inno-hdmi.yaml
index be78dcfa1c76..5b87b0f1963e 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip,inno-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,inno-hdmi.yaml
@@ -37,6 +37,9 @@ properties:
power-domains:
maxItems: 1
+ "#sound-dai-cells":
+ const: 0
+
ports:
$ref: /schemas/graph.yaml#/properties/ports
@@ -66,6 +69,7 @@ required:
- ports
allOf:
+ - $ref: /schemas/sound/dai-common.yaml#
- if:
properties:
compatible:
@@ -106,6 +110,7 @@ examples:
clock-names = "pclk";
pinctrl-names = "default";
pinctrl-0 = <&hdmi_ctl>;
+ #sound-dai-cells = <0>;
ports {
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml
index 1a68a940d165..6d4b78a36576 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml
@@ -10,6 +10,9 @@ maintainers:
- Sandy Huang <hjc@rock-chips.com>
- Heiko Stuebner <heiko@sntech.de>
+allOf:
+ - $ref: /schemas/sound/dai-common.yaml#
+
properties:
compatible:
const: rockchip,rk3066-hdmi
@@ -34,6 +37,9 @@ properties:
description:
This soc uses GRF regs to switch the HDMI TX input between vop0 and vop1.
+ "#sound-dai-cells":
+ const: 0
+
ports:
$ref: /schemas/graph.yaml#/properties/ports
@@ -83,6 +89,7 @@ examples:
pinctrl-names = "default";
power-domains = <&power RK3066_PD_VIO>;
rockchip,grf = <&grf>;
+ #sound-dai-cells = <0>;
ports {
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/display/samsung/samsung,exynos5-dp.yaml b/Documentation/devicetree/bindings/display/samsung/samsung,exynos5-dp.yaml
new file mode 100644
index 000000000000..dda9097a7911
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/samsung/samsung,exynos5-dp.yaml
@@ -0,0 +1,163 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/samsung/samsung,exynos5-dp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung Exynos5250/Exynos5420 SoC Display Port
+
+maintainers:
+ - Inki Dae <inki.dae@samsung.com>
+ - Seung-Woo Kim <sw0312.kim@samsung.com>
+ - Kyungmin Park <kyungmin.park@samsung.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
+
+properties:
+ compatible:
+ const: samsung,exynos5-dp
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: dp
+
+ display-timings:
+ $ref: /schemas/display/panel/display-timings.yaml#
+
+ interrupts:
+ maxItems: 1
+
+ hpd-gpios:
+ description:
+ Hotplug detect GPIO.
+ Indicates which GPIO should be used for hotplug detection
+
+ phys:
+ maxItems: 1
+
+ phy-names:
+ items:
+ - const: dp
+
+ power-domains:
+ maxItems: 1
+
+ interlaced:
+ type: boolean
+ deprecated: true
+ description:
+ Interlace scan mode. Progressive if defined, interlaced if not defined.
+
+ vsync-active-high:
+ type: boolean
+ deprecated: true
+ description:
+ VSYNC polarity configuration. High if defined, low if not defined
+
+ hsync-active-high:
+ type: boolean
+ deprecated: true
+ description:
+ HSYNC polarity configuration. High if defined, low if not defined
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Port node with one endpoint connected to a dp-connector node.
+
+ required:
+ - port
+
+ samsung,hpd-gpios:
+ maxItems: 1
+ deprecated: true
+
+ samsung,ycbcr-coeff:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ deprecated: true
+ description:
+ Deprecated prop that can parsed from drm_display_mode.
+
+ samsung,dynamic-range:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ deprecated: true
+ description:
+ Deprecated prop that can parsed from drm_display_mode.
+
+ samsung,color-space:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ deprecated: true
+ description:
+ Deprecated prop that can parsed from drm_display_info.
+
+ samsung,color-depth:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ deprecated: true
+ description:
+ Deprecated prop that can parsed from drm_display_info.
+
+ samsung,link-rate:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ deprecated: true
+ description:
+ Deprecated prop that can reading from monitor by dpcd method.
+
+ samsung,lane-count:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ deprecated: true
+ description:
+ Deprecated prop that can reading from monitor by dpcd method.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+ - phys
+ - phy-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/exynos5250.h>
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ dp-controller@145b0000 {
+ compatible = "samsung,exynos5-dp";
+ reg = <0x145b0000 0x1000>;
+ clocks = <&clock CLK_DP>;
+ clock-names = "dp";
+ interrupts = <10 3>;
+ interrupt-parent = <&combiner>;
+ phys = <&dp_phy>;
+ phy-names = "dp";
+ pinctrl-0 = <&dp_hpd>;
+ pinctrl-names = "default";
+ power-domains = <&pd_disp1>;
+
+ samsung,color-space = <0>;
+ samsung,color-depth = <1>;
+ samsung,link-rate = <0x0a>;
+ samsung,lane-count = <2>;
+ hpd-gpios = <&gpx0 7 GPIO_ACTIVE_HIGH>;
+
+ ports {
+ port {
+ dp_out: endpoint {
+ remote-endpoint = <&bridge_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.yaml b/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.yaml
index 94c5242c03b2..3563378a01af 100644
--- a/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.yaml
+++ b/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.yaml
@@ -182,6 +182,15 @@ allOf:
compatible:
contains:
enum:
+ - nvidia,tegra194-host1x
+ then:
+ properties:
+ dma-coherent: true
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- nvidia,tegra234-host1x
then:
properties:
@@ -226,6 +235,8 @@ allOf:
use. Should be a mapping of IDs 0..n to IOMMU entries corresponding to
usable stream IDs.
+ dma-coherent: true
+
required:
- reg-names
diff --git a/Documentation/devicetree/bindings/dma/fsl,edma.yaml b/Documentation/devicetree/bindings/dma/fsl,edma.yaml
index aa51d278cb67..acfb4b2ee7a9 100644
--- a/Documentation/devicetree/bindings/dma/fsl,edma.yaml
+++ b/Documentation/devicetree/bindings/dma/fsl,edma.yaml
@@ -21,8 +21,8 @@ properties:
- enum:
- fsl,vf610-edma
- fsl,imx7ulp-edma
- - fsl,imx8qm-adma
- fsl,imx8qm-edma
+ - fsl,imx8ulp-edma
- fsl,imx93-edma3
- fsl,imx93-edma4
- fsl,imx95-edma5
@@ -43,6 +43,17 @@ properties:
maxItems: 64
"#dma-cells":
+ description: |
+ Specifies the number of cells needed to encode an DMA channel.
+
+ Encode for cells number 2:
+ cell 0: index of dma channel mux instance.
+ cell 1: peripheral dma request id.
+
+ Encode for cells number 3:
+ cell 0: peripheral dma request id.
+ cell 1: dma channel priority.
+ cell 2: bitmask, defined at include/dt-bindings/dma/fsl-edma.h
enum:
- 2
- 3
@@ -53,11 +64,18 @@ properties:
clocks:
minItems: 1
- maxItems: 2
+ maxItems: 33
clock-names:
minItems: 1
- maxItems: 2
+ maxItems: 33
+
+ power-domains:
+ description:
+ The number of power domains matches the number of channels, arranged
+ in ascending order according to their associated DMA channels.
+ minItems: 1
+ maxItems: 64
big-endian:
description: |
@@ -70,7 +88,6 @@ required:
- compatible
- reg
- interrupts
- - clocks
- dma-channels
allOf:
@@ -80,7 +97,6 @@ allOf:
compatible:
contains:
enum:
- - fsl,imx8qm-adma
- fsl,imx8qm-edma
- fsl,imx93-edma3
- fsl,imx93-edma4
@@ -108,6 +124,7 @@ allOf:
properties:
clocks:
minItems: 2
+ maxItems: 2
clock-names:
items:
- const: dmamux0
@@ -136,6 +153,7 @@ allOf:
properties:
clock:
minItems: 2
+ maxItems: 2
clock-names:
items:
- const: dma
@@ -151,6 +169,58 @@ allOf:
dma-channels:
const: 32
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: fsl,imx8ulp-edma
+ then:
+ properties:
+ clocks:
+ minItems: 33
+ clock-names:
+ minItems: 33
+ items:
+ oneOf:
+ - const: dma
+ - pattern: "^ch(0[0-9]|[1-2][0-9]|3[01])$"
+
+ interrupt-names: false
+ interrupts:
+ minItems: 32
+ "#dma-cells":
+ const: 3
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - fsl,vf610-edma
+ - fsl,imx7ulp-edma
+ - fsl,imx93-edma3
+ - fsl,imx93-edma4
+ - fsl,imx95-edma5
+ - fsl,imx8ulp-edma
+ - fsl,ls1028a-edma
+ then:
+ required:
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - fsl,imx8qm-adma
+ - fsl,imx8qm-edma
+ then:
+ required:
+ - power-domains
+ else:
+ properties:
+ power-domains: false
+
unevaluatedProperties: false
examples:
@@ -206,44 +276,27 @@ examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
- #include <dt-bindings/clock/imx93-clock.h>
+ #include <dt-bindings/firmware/imx/rsrc.h>
- dma-controller@44000000 {
- compatible = "fsl,imx93-edma3";
- reg = <0x44000000 0x200000>;
+ dma-controller@5a9f0000 {
+ compatible = "fsl,imx8qm-edma";
+ reg = <0x5a9f0000 0x90000>;
#dma-cells = <3>;
- dma-channels = <31>;
- interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX93_CLK_EDMA1_GATE>;
- clock-names = "dma";
+ dma-channels = <8>;
+ interrupts = <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&pd IMX_SC_R_DMA_3_CH0>,
+ <&pd IMX_SC_R_DMA_3_CH1>,
+ <&pd IMX_SC_R_DMA_3_CH2>,
+ <&pd IMX_SC_R_DMA_3_CH3>,
+ <&pd IMX_SC_R_DMA_3_CH4>,
+ <&pd IMX_SC_R_DMA_3_CH5>,
+ <&pd IMX_SC_R_DMA_3_CH6>,
+ <&pd IMX_SC_R_DMA_3_CH7>;
};
diff --git a/Documentation/devicetree/bindings/dma/fsl,imx-sdma.yaml b/Documentation/devicetree/bindings/dma/fsl,imx-sdma.yaml
index 37135fa024f9..738b25b88b37 100644
--- a/Documentation/devicetree/bindings/dma/fsl,imx-sdma.yaml
+++ b/Documentation/devicetree/bindings/dma/fsl,imx-sdma.yaml
@@ -94,6 +94,7 @@ properties:
- SAI: 24
- Multi SAI: 25
- HDMI Audio: 26
+ - I2C: 27
The third cell: transfer priority ID
enum:
diff --git a/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt b/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
deleted file mode 100644
index 1ae4748730a8..000000000000
--- a/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
+++ /dev/null
@@ -1,95 +0,0 @@
-Qualcomm Technologies HIDMA Management interface
-
-Qualcomm Technologies HIDMA is a high speed DMA device. It only supports
-memcpy and memset capabilities. It has been designed for virtualized
-environments.
-
-Each HIDMA HW instance consists of multiple DMA channels. These channels
-share the same bandwidth. The bandwidth utilization can be partitioned
-among channels based on the priority and weight assignments.
-
-There are only two priority levels and 15 weigh assignments possible.
-
-Other parameters here determine how much of the system bus this HIDMA
-instance can use like maximum read/write request and number of bytes to
-read/write in a single burst.
-
-Main node required properties:
-- compatible: "qcom,hidma-mgmt-1.0";
-- reg: Address range for DMA device
-- dma-channels: Number of channels supported by this DMA controller.
-- max-write-burst-bytes: Maximum write burst in bytes that HIDMA can
- occupy the bus for in a single transaction. A memcpy requested is
- fragmented to multiples of this amount. This parameter is used while
- writing into destination memory. Setting this value incorrectly can
- starve other peripherals in the system.
-- max-read-burst-bytes: Maximum read burst in bytes that HIDMA can
- occupy the bus for in a single transaction. A memcpy request is
- fragmented to multiples of this amount. This parameter is used while
- reading the source memory. Setting this value incorrectly can starve
- other peripherals in the system.
-- max-write-transactions: This value is how many times a write burst is
- applied back to back while writing to the destination before yielding
- the bus.
-- max-read-transactions: This value is how many times a read burst is
- applied back to back while reading the source before yielding the bus.
-- channel-reset-timeout-cycles: Channel reset timeout in cycles for this SOC.
- Once a reset is applied to the HW, HW starts a timer for reset operation
- to confirm. If reset is not completed within this time, HW reports reset
- failure.
-
-Sub-nodes:
-
-HIDMA has one or more DMA channels that are used to move data from one
-memory location to another.
-
-When the OS is not in control of the management interface (i.e. it's a guest),
-the channel nodes appear on their own, not under a management node.
-
-Required properties:
-- compatible: must contain "qcom,hidma-1.0" for initial HW or
- "qcom,hidma-1.1"/"qcom,hidma-1.2" for MSI capable HW.
-- reg: Addresses for the transfer and event channel
-- interrupts: Should contain the event interrupt
-- desc-count: Number of asynchronous requests this channel can handle
-- iommus: required a iommu node
-
-Optional properties for MSI:
-- msi-parent : See the generic MSI binding described in
- devicetree/bindings/interrupt-controller/msi.txt for a description of the
- msi-parent property.
-
-Example:
-
-Hypervisor OS configuration:
-
- hidma-mgmt@f9984000 = {
- compatible = "qcom,hidma-mgmt-1.0";
- reg = <0xf9984000 0x15000>;
- dma-channels = <6>;
- max-write-burst-bytes = <1024>;
- max-read-burst-bytes = <1024>;
- max-write-transactions = <31>;
- max-read-transactions = <31>;
- channel-reset-timeout-cycles = <0x500>;
-
- hidma_24: dma-controller@5c050000 {
- compatible = "qcom,hidma-1.0";
- reg = <0 0x5c050000 0x0 0x1000>,
- <0 0x5c0b0000 0x0 0x1000>;
- interrupts = <0 389 0>;
- desc-count = <10>;
- iommus = <&system_mmu>;
- };
- };
-
-Guest OS configuration:
-
- hidma_24: dma-controller@5c050000 {
- compatible = "qcom,hidma-1.0";
- reg = <0 0x5c050000 0x0 0x1000>,
- <0 0x5c0b0000 0x0 0x1000>;
- interrupts = <0 389 0>;
- desc-count = <10>;
- iommus = <&system_mmu>;
- };
diff --git a/Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml b/Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml
index 5da8291a7de0..c21a4f073f6c 100644
--- a/Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml
+++ b/Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml
@@ -93,10 +93,10 @@ properties:
data-width:
$ref: /schemas/types.yaml#/definitions/uint32-array
description: Data bus width per each DMA master in bytes.
+ minItems: 1
+ maxItems: 4
items:
- maxItems: 4
- items:
- enum: [4, 8, 16, 32]
+ enum: [4, 8, 16, 32]
data_width:
$ref: /schemas/types.yaml#/definitions/uint32-array
@@ -106,28 +106,28 @@ properties:
deprecated. It' usage is discouraged in favor of data-width one. Moreover
the property incorrectly permits to define data-bus width of 8 and 16
bits, which is impossible in accordance with DW DMAC IP-core data book.
+ minItems: 1
+ maxItems: 4
items:
- maxItems: 4
- items:
- enum:
- - 0 # 8 bits
- - 1 # 16 bits
- - 2 # 32 bits
- - 3 # 64 bits
- - 4 # 128 bits
- - 5 # 256 bits
- default: 0
+ enum:
+ - 0 # 8 bits
+ - 1 # 16 bits
+ - 2 # 32 bits
+ - 3 # 64 bits
+ - 4 # 128 bits
+ - 5 # 256 bits
+ default: 0
multi-block:
$ref: /schemas/types.yaml#/definitions/uint32-array
description: |
LLP-based multi-block transfer supported by hardware per
each DMA channel.
+ minItems: 1
+ maxItems: 8
items:
- maxItems: 8
- items:
- enum: [0, 1]
- default: 1
+ enum: [0, 1]
+ default: 1
snps,max-burst-len:
$ref: /schemas/types.yaml#/definitions/uint32-array
@@ -138,11 +138,11 @@ properties:
will be from 1 to max-burst-len words. It's an array property with one
cell per channel in the units determined by the value set in the
CTLx.SRC_TR_WIDTH/CTLx.DST_TR_WIDTH fields (data width).
+ minItems: 1
+ maxItems: 8
items:
- maxItems: 8
- items:
- enum: [4, 8, 16, 32, 64, 128, 256]
- default: 256
+ enum: [4, 8, 16, 32, 64, 128, 256]
+ default: 256
snps,dma-protection-control:
$ref: /schemas/types.yaml#/definitions/uint32
diff --git a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml
index 363cf8bd150d..525f5f3932f5 100644
--- a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml
+++ b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml
@@ -21,6 +21,7 @@ properties:
- snps,axi-dma-1.01a
- intel,kmb-axi-dma
- starfive,jh7110-axi-dma
+ - starfive,jh8100-axi-dma
reg:
minItems: 1
diff --git a/Documentation/devicetree/bindings/dma/st,stm32-dma.yaml b/Documentation/devicetree/bindings/dma/st,stm32-dma.yaml
index 329847ef096a..ff935a0068ec 100644
--- a/Documentation/devicetree/bindings/dma/st,stm32-dma.yaml
+++ b/Documentation/devicetree/bindings/dma/st,stm32-dma.yaml
@@ -82,6 +82,10 @@ properties:
description: if defined, it indicates that the controller
supports memory-to-memory transfer
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/dma/st,stm32-dmamux.yaml b/Documentation/devicetree/bindings/dma/st,stm32-dmamux.yaml
index e722fbcd8a5f..ddf82bf1e71a 100644
--- a/Documentation/devicetree/bindings/dma/st,stm32-dmamux.yaml
+++ b/Documentation/devicetree/bindings/dma/st,stm32-dmamux.yaml
@@ -28,6 +28,10 @@ properties:
resets:
maxItems: 1
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/dts-coding-style.rst b/Documentation/devicetree/bindings/dts-coding-style.rst
index a9bdd2b59dca..8a68331075a0 100644
--- a/Documentation/devicetree/bindings/dts-coding-style.rst
+++ b/Documentation/devicetree/bindings/dts-coding-style.rst
@@ -144,6 +144,8 @@ Example::
#dma-cells = <1>;
clocks = <&clock_controller 0>, <&clock_controller 1>;
clock-names = "bus", "host";
+ #address-cells = <1>;
+ #size-cells = <1>;
vendor,custom-property = <2>;
status = "disabled";
diff --git a/Documentation/devicetree/bindings/eeprom/at24.yaml b/Documentation/devicetree/bindings/eeprom/at24.yaml
index 1812ef31d5f1..3c36cd0510de 100644
--- a/Documentation/devicetree/bindings/eeprom/at24.yaml
+++ b/Documentation/devicetree/bindings/eeprom/at24.yaml
@@ -69,14 +69,10 @@ properties:
- items:
pattern: c32$
- items:
- pattern: c32d-wl$
- - items:
pattern: cs32$
- items:
pattern: c64$
- items:
- pattern: c64d-wl$
- - items:
pattern: cs64$
- items:
pattern: c128$
@@ -136,6 +132,7 @@ properties:
- renesas,r1ex24128
- samsung,s524ad0xd1
- const: atmel,24c128
+ - pattern: '^atmel,24c(32|64)d-wl$' # Actual vendor is st
label:
description: Descriptive name of the EEPROM.
diff --git a/Documentation/devicetree/bindings/firmware/arm,scmi.yaml b/Documentation/devicetree/bindings/firmware/arm,scmi.yaml
index 4591523b51a0..7de2c29606e5 100644
--- a/Documentation/devicetree/bindings/firmware/arm,scmi.yaml
+++ b/Documentation/devicetree/bindings/firmware/arm,scmi.yaml
@@ -247,6 +247,37 @@ properties:
reg:
const: 0x18
+ protocol@19:
+ type: object
+ allOf:
+ - $ref: '#/$defs/protocol-node'
+ - $ref: /schemas/pinctrl/pinctrl.yaml
+
+ unevaluatedProperties: false
+
+ properties:
+ reg:
+ const: 0x19
+
+ patternProperties:
+ '-pins$':
+ type: object
+ allOf:
+ - $ref: /schemas/pinctrl/pincfg-node.yaml#
+ - $ref: /schemas/pinctrl/pinmux-node.yaml#
+ unevaluatedProperties: false
+
+ description:
+ A pin multiplexing sub-node describes how to configure a
+ set of pins in some desired function.
+ A single sub-node may define several pin configurations.
+ This sub-node is using the default pinctrl bindings to configure
+ pin multiplexing and using SCMI protocol to apply a specified
+ configuration.
+
+ required:
+ - reg
+
additionalProperties: false
$defs:
@@ -355,7 +386,7 @@ examples:
scmi_dvfs: protocol@13 {
reg = <0x13>;
- #clock-cells = <1>;
+ #power-domain-cells = <1>;
mboxes = <&mhuB 1 0>,
<&mhuB 1 1>;
@@ -401,6 +432,25 @@ examples:
scmi_powercap: protocol@18 {
reg = <0x18>;
};
+
+ scmi_pinctrl: protocol@19 {
+ reg = <0x19>;
+
+ i2c2-pins {
+ groups = "g_i2c2_a", "g_i2c2_b";
+ function = "f_i2c2";
+ };
+
+ mdio-pins {
+ groups = "g_avb_mdio";
+ drive-strength = <24>;
+ };
+
+ keys_pins: keys-pins {
+ pins = "gpio_5_17", "gpio_5_20", "gpio_5_22", "gpio_2_1";
+ bias-pull-up;
+ };
+ };
};
};
@@ -468,7 +518,7 @@ examples:
reg = <0x13>;
linaro,optee-channel-id = <1>;
shmem = <&cpu_optee_lpri0>;
- #clock-cells = <1>;
+ #power-domain-cells = <1>;
};
scmi_clk0: protocol@14 {
diff --git a/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.yaml b/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.yaml
index a1e71c974e79..f096f286da19 100644
--- a/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.yaml
@@ -62,6 +62,8 @@ properties:
interrupt-controller: true
+ gpio-ranges: true
+
wakeup-source:
type: boolean
description: >
@@ -88,6 +90,7 @@ examples:
interrupt-parent = <&irq0_intc>;
interrupts = <0x6>;
brcm,gpio-bank-widths = <32 32 32 24>;
+ gpio-ranges = <&pinctrl 0 0 120>;
};
upg_gio_aon: gpio@f04172c0 {
diff --git a/Documentation/devicetree/bindings/gpio/microchip,mpfs-gpio.yaml b/Documentation/devicetree/bindings/gpio/microchip,mpfs-gpio.yaml
index d481e78958a7..d61569b3f15b 100644
--- a/Documentation/devicetree/bindings/gpio/microchip,mpfs-gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/microchip,mpfs-gpio.yaml
@@ -14,6 +14,7 @@ properties:
items:
- enum:
- microchip,mpfs-gpio
+ - microchip,coregpio-rtl-v3
reg:
maxItems: 1
@@ -43,6 +44,7 @@ properties:
default: 32
gpio-controller: true
+ gpio-line-names: true
patternProperties:
"^.+-hog(-[0-9]+)?$":
@@ -62,12 +64,21 @@ patternProperties:
- gpio-hog
- gpios
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: microchip,mpfs-gpio
+ then:
+ required:
+ - interrupts
+ - "#interrupt-cells"
+ - interrupt-controller
+
required:
- compatible
- reg
- - interrupts
- - "#interrupt-cells"
- - interrupt-controller
- "#gpio-cells"
- gpio-controller
- clocks
diff --git a/Documentation/devicetree/bindings/gpio/raspberrypi,firmware-gpio.txt b/Documentation/devicetree/bindings/gpio/raspberrypi,firmware-gpio.txt
deleted file mode 100644
index ce97265e23ba..000000000000
--- a/Documentation/devicetree/bindings/gpio/raspberrypi,firmware-gpio.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-Raspberry Pi GPIO expander
-
-The Raspberry Pi 3 GPIO expander is controlled by the VC4 firmware. The
-firmware exposes a mailbox interface that allows the ARM core to control the
-GPIO lines on the expander.
-
-The Raspberry Pi GPIO expander node must be a child node of the Raspberry Pi
-firmware node.
-
-Required properties:
-
-- compatible : Should be "raspberrypi,firmware-gpio"
-- gpio-controller : Marks the device node as a gpio controller
-- #gpio-cells : Should be two. The first cell is the pin number, and
- the second cell is used to specify the gpio polarity:
- 0 = active high
- 1 = active low
-
-Example:
-
-firmware: firmware-rpi {
- compatible = "raspberrypi,bcm2835-firmware";
- mboxes = <&mailbox>;
-
- expgpio: gpio {
- compatible = "raspberrypi,firmware-gpio";
- gpio-controller;
- #gpio-cells = <2>;
- };
-};
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml
new file mode 100644
index 000000000000..a5b4e0021758
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml
@@ -0,0 +1,147 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpu/arm,mali-valhall-csf.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Mali Valhall GPU
+
+maintainers:
+ - Liviu Dudau <liviu.dudau@arm.com>
+ - Boris Brezillon <boris.brezillon@collabora.com>
+
+properties:
+ $nodename:
+ pattern: '^gpu@[a-f0-9]+$'
+
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - rockchip,rk3588-mali
+ - const: arm,mali-valhall-csf # Mali Valhall GPU model/revision is fully discoverable
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: Job interrupt
+ - description: MMU interrupt
+ - description: GPU interrupt
+
+ interrupt-names:
+ items:
+ - const: job
+ - const: mmu
+ - const: gpu
+
+ clocks:
+ minItems: 1
+ maxItems: 3
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: core
+ - const: coregroup
+ - const: stacks
+
+ mali-supply: true
+
+ operating-points-v2: true
+ opp-table:
+ type: object
+
+ power-domains:
+ minItems: 1
+ maxItems: 5
+
+ power-domain-names:
+ minItems: 1
+ maxItems: 5
+
+ sram-supply: true
+
+ "#cooling-cells":
+ const: 2
+
+ dynamic-power-coefficient:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ A u32 value that represents the running time dynamic
+ power coefficient in units of uW/MHz/V^2. The
+ coefficient can either be calculated from power
+ measurements or derived by analysis.
+
+ The dynamic power consumption of the GPU is
+ proportional to the square of the Voltage (V) and
+ the clock frequency (f). The coefficient is used to
+ calculate the dynamic power as below -
+
+ Pdyn = dynamic-power-coefficient * V^2 * f
+
+ where voltage is in V, frequency is in MHz.
+
+ dma-coherent: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - clocks
+ - mali-supply
+
+additionalProperties: false
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: rockchip,rk3588-mali
+ then:
+ properties:
+ clocks:
+ minItems: 3
+ power-domains:
+ maxItems: 1
+ power-domain-names: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/rockchip,rk3588-cru.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/rk3588-power.h>
+
+ gpu: gpu@fb000000 {
+ compatible = "rockchip,rk3588-mali", "arm,mali-valhall-csf";
+ reg = <0xfb000000 0x200000>;
+ interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "job", "mmu", "gpu";
+ clock-names = "core", "coregroup", "stacks";
+ clocks = <&cru CLK_GPU>, <&cru CLK_GPU_COREGROUP>,
+ <&cru CLK_GPU_STACKS>;
+ power-domains = <&power RK3588_PD_GPU>;
+ operating-points-v2 = <&gpu_opp_table>;
+ mali-supply = <&vdd_gpu_s0>;
+ sram-supply = <&vdd_gpu_mem_s0>;
+
+ gpu_opp_table: opp-table {
+ compatible = "operating-points-v2";
+ opp-300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-microvolt = <675000 675000 850000>;
+ };
+ opp-400000000 {
+ opp-hz = /bits/ 64 <400000000>;
+ opp-microvolt = <675000 675000 850000>;
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/hwmon/adc128d818.txt b/Documentation/devicetree/bindings/hwmon/adc128d818.txt
deleted file mode 100644
index d0ae46d7bac3..000000000000
--- a/Documentation/devicetree/bindings/hwmon/adc128d818.txt
+++ /dev/null
@@ -1,38 +0,0 @@
-TI ADC128D818 ADC System Monitor With Temperature Sensor
---------------------------------------------------------
-
-Operation modes:
-
- - Mode 0: 7 single-ended voltage readings (IN0-IN6),
- 1 temperature reading (internal)
- - Mode 1: 8 single-ended voltage readings (IN0-IN7),
- no temperature
- - Mode 2: 4 pseudo-differential voltage readings
- (IN0-IN1, IN3-IN2, IN4-IN5, IN7-IN6),
- 1 temperature reading (internal)
- - Mode 3: 4 single-ended voltage readings (IN0-IN3),
- 2 pseudo-differential voltage readings
- (IN4-IN5, IN7-IN6),
- 1 temperature reading (internal)
-
-If no operation mode is configured via device tree, the driver keeps the
-currently active chip operation mode (default is mode 0).
-
-
-Required node properties:
-
- - compatible: must be set to "ti,adc128d818"
- - reg: I2C address of the device
-
-Optional node properties:
-
- - ti,mode: Operation mode (u8) (see above).
-
-
-Example (operation mode 2):
-
- adc128d818@1d {
- compatible = "ti,adc128d818";
- reg = <0x1d>;
- ti,mode = /bits/ 8 <2>;
- };
diff --git a/Documentation/devicetree/bindings/hwmon/adi,adm1275.yaml b/Documentation/devicetree/bindings/hwmon/adi,adm1275.yaml
index b68061294964..5b076d677395 100644
--- a/Documentation/devicetree/bindings/hwmon/adi,adm1275.yaml
+++ b/Documentation/devicetree/bindings/hwmon/adi,adm1275.yaml
@@ -5,7 +5,7 @@
$id: http://devicetree.org/schemas/hwmon/adi,adm1275.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Analog Devices ADM1075/ADM127x/ADM129x digital power monitors
+title: Analog Devices ADM1075/ADM127x/ADM1281/ADM129x digital power monitors
maintainers:
- Krzysztof Kozlowski <krzk@kernel.org>
@@ -27,6 +27,7 @@ properties:
- adi,adm1275
- adi,adm1276
- adi,adm1278
+ - adi,adm1281
- adi,adm1293
- adi,adm1294
@@ -91,6 +92,7 @@ allOf:
contains:
enum:
- adi,adm1278
+ - adi,adm1281
- adi,adm1293
- adi,adm1294
then:
diff --git a/Documentation/devicetree/bindings/hwmon/as370.txt b/Documentation/devicetree/bindings/hwmon/as370.txt
deleted file mode 100644
index d102fe765124..000000000000
--- a/Documentation/devicetree/bindings/hwmon/as370.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-Bindings for Synaptics AS370 PVT sensors
-
-Required properties:
-- compatible : "syna,as370-hwmon"
-- reg : address and length of the register set.
-
-Example:
- hwmon@ea0810 {
- compatible = "syna,as370-hwmon";
- reg = <0xea0810 0xc>;
- };
diff --git a/Documentation/devicetree/bindings/hwmon/ibm,opal-sensor.yaml b/Documentation/devicetree/bindings/hwmon/ibm,opal-sensor.yaml
new file mode 100644
index 000000000000..376ee7f1cdb7
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/ibm,opal-sensor.yaml
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/hwmon/ibm,opal-sensor.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: IBM POWERNV platform sensors
+
+maintainers:
+ - Javier Carrasco <javier.carrasco.cruz@gmail.com>
+
+properties:
+ compatible:
+ enum:
+ - ibm,opal-sensor-cooling-fan
+ - ibm,opal-sensor-amb-temp
+ - ibm,opal-sensor-power-supply
+ - ibm,opal-sensor-power
+
+ sensor-id:
+ description:
+ An opaque id provided by the firmware to the kernel, identifies a
+ given sensor and its attribute data.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+required:
+ - compatible
+ - sensor-id
+
+additionalProperties: false
+
+examples:
+ - |
+ sensor {
+ compatible = "ibm,opal-sensor-cooling-fan";
+ sensor-id = <0x7052107>;
+ };
diff --git a/Documentation/devicetree/bindings/hwmon/ibm,p8-occ-hwmon.txt b/Documentation/devicetree/bindings/hwmon/ibm,p8-occ-hwmon.txt
deleted file mode 100644
index 5dc5d2e2573d..000000000000
--- a/Documentation/devicetree/bindings/hwmon/ibm,p8-occ-hwmon.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Device-tree bindings for I2C-based On-Chip Controller hwmon device
-------------------------------------------------------------------
-
-Required properties:
- - compatible = "ibm,p8-occ-hwmon";
- - reg = <I2C address>; : I2C bus address
-
-Examples:
-
- i2c-bus@100 {
- #address-cells = <1>;
- #size-cells = <0>;
- clock-frequency = <100000>;
- < more properties >
-
- occ-hwmon@1 {
- compatible = "ibm,p8-occ-hwmon";
- reg = <0x50>;
- };
-
- occ-hwmon@2 {
- compatible = "ibm,p8-occ-hwmon";
- reg = <0x51>;
- };
- };
diff --git a/Documentation/devicetree/bindings/hwmon/ibmpowernv.txt b/Documentation/devicetree/bindings/hwmon/ibmpowernv.txt
deleted file mode 100644
index f93242be60a1..000000000000
--- a/Documentation/devicetree/bindings/hwmon/ibmpowernv.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-IBM POWERNV platform sensors
-----------------------------
-
-Required node properties:
-- compatible: must be one of
- "ibm,opal-sensor-cooling-fan"
- "ibm,opal-sensor-amb-temp"
- "ibm,opal-sensor-power-supply"
- "ibm,opal-sensor-power"
-- sensor-id: an opaque id provided by the firmware to the kernel, identifies a
- given sensor and its attribute data
-
-Example sensors node:
-
-cooling-fan#8-data {
- sensor-id = <0x7052107>;
- compatible = "ibm,opal-sensor-cooling-fan";
-};
-
-amb-temp#1-thrs {
- sensor-id = <0x5096000>;
- compatible = "ibm,opal-sensor-amb-temp";
-};
diff --git a/Documentation/devicetree/bindings/hwmon/lm87.txt b/Documentation/devicetree/bindings/hwmon/lm87.txt
deleted file mode 100644
index 758ff398b67b..000000000000
--- a/Documentation/devicetree/bindings/hwmon/lm87.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-*LM87 hwmon sensor.
-
-Required properties:
-- compatible: Should be
- "ti,lm87"
-
-- reg: I2C address
-
-optional properties:
-- has-temp3: This configures pins 18 and 19 to be used as a second
- remote temperature sensing channel. By default the pins
- are configured as voltage input pins in0 and in5.
-
-- has-in6: When set, pin 5 is configured to be used as voltage input
- in6. Otherwise the pin is set as FAN1 input.
-
-- has-in7: When set, pin 6 is configured to be used as voltage input
- in7. Otherwise the pin is set as FAN2 input.
-
-- vcc-supply: a Phandle for the regulator supplying power, can be
- configured to measure 5.0V power supply. Default is 3.3V.
-
-Example:
-
-lm87@2e {
- compatible = "ti,lm87";
- reg = <0x2e>;
- has-temp3;
- vcc-supply = <&reg_5v0>;
-};
diff --git a/Documentation/devicetree/bindings/hwmon/max6650.txt b/Documentation/devicetree/bindings/hwmon/max6650.txt
deleted file mode 100644
index f6bd87d8e284..000000000000
--- a/Documentation/devicetree/bindings/hwmon/max6650.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-Bindings for MAX6651 and MAX6650 I2C fan controllers
-
-Reference:
-[1] https://datasheets.maximintegrated.com/en/ds/MAX6650-MAX6651.pdf
-
-Required properties:
-- compatible : One of "maxim,max6650" or "maxim,max6651"
-- reg : I2C address, one of 0x1b, 0x1f, 0x4b, 0x48.
-
-Optional properties, default is to retain the chip's current setting:
-- maxim,fan-microvolt : The supply voltage of the fan, either 5000000 uV or
- 12000000 uV.
-- maxim,fan-prescale : Pre-scaling value, as per datasheet [1]. Lower values
- allow more fine-grained control of slower fans.
- Valid: 1, 2, 4, 8, 16.
-- maxim,fan-target-rpm: Initial requested fan rotation speed. If specified, the
- driver selects closed-loop mode and the requested speed.
- This ensures the fan is already running before userspace
- takes over.
-
-Example:
- fan-max6650: max6650@1b {
- reg = <0x1b>;
- compatible = "maxim,max6650";
- maxim,fan-microvolt = <12000000>;
- maxim,fan-prescale = <4>;
- maxim,fan-target-rpm = <1200>;
- };
diff --git a/Documentation/devicetree/bindings/hwmon/maxim,max6650.yaml b/Documentation/devicetree/bindings/hwmon/maxim,max6650.yaml
new file mode 100644
index 000000000000..2c26104a5e16
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/maxim,max6650.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+
+$id: http://devicetree.org/schemas/hwmon/maxim,max6650.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Maxim MAX6650 and MAX6651 I2C Fan Controllers
+
+maintainers:
+ - Javier Carrasco <javier.carrasco.cruz@gmail.com>
+
+description: |
+ The MAX6650 and MAX6651 regulate and monitor the speed
+ of 5VDC/12VDC burshless fans with built-in tachometers.
+
+ Datasheets:
+ https://datasheets.maximintegrated.com/en/ds/MAX6650-MAX6651.pdf
+
+properties:
+ compatible:
+ enum:
+ - maxim,max6650
+ - maxim,max6651
+
+ reg:
+ maxItems: 1
+
+ maxim,fan-microvolt:
+ description:
+ The supply voltage of the fan, either 5000000 uV or
+ 12000000 uV.
+ enum: [5000000, 12000000]
+
+ maxim,fan-prescale:
+ description:
+ Pre-scaling value, as per datasheet. Lower values
+ allow more fine-grained control of slower fans.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [1, 2, 4, 8, 16]
+
+ maxim,fan-target-rpm:
+ description:
+ Initial requested fan rotation speed. If specified, the
+ driver selects closed-loop mode and the requested speed.
+ This ensures the fan is already running before userspace
+ takes over.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 30000
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan-controller@1b {
+ compatible = "maxim,max6650";
+ reg = <0x1b>;
+ maxim,fan-microvolt = <12000000>;
+ maxim,fan-prescale = <4>;
+ maxim,fan-target-rpm = <1200>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/hwmon/pmbus/adi,adp1050.yaml b/Documentation/devicetree/bindings/hwmon/pmbus/adi,adp1050.yaml
new file mode 100644
index 000000000000..10c2204bc3df
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/pmbus/adi,adp1050.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/hwmon/pmbus/adi,adp1050.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices ADP1050 digital controller with PMBus interface
+
+maintainers:
+ - Radu Sabau <radu.sabau@analog.com>
+
+description: |
+ The ADP1050 is used to monitor system voltages, currents and temperatures.
+ Through the PMBus interface, the ADP1050 targets isolated power supplies
+ and has four individual monitors for input/output voltage, input current
+ and temperature.
+ Datasheet:
+ https://www.analog.com/en/products/adp1050.html
+
+properties:
+ compatible:
+ const: adi,adp1050
+
+ reg:
+ maxItems: 1
+
+ vcc-supply: true
+
+required:
+ - compatible
+ - reg
+ - vcc-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <100000>;
+
+ hwmon@70 {
+ compatible = "adi,adp1050";
+ reg = <0x70>;
+ vcc-supply = <&vcc>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/hwmon/pwm-fan.txt b/Documentation/devicetree/bindings/hwmon/pwm-fan.txt
deleted file mode 100644
index 48886f0ce415..000000000000
--- a/Documentation/devicetree/bindings/hwmon/pwm-fan.txt
+++ /dev/null
@@ -1 +0,0 @@
-This file has moved to pwm-fan.yaml.
diff --git a/Documentation/devicetree/bindings/hwmon/st,stts751.yaml b/Documentation/devicetree/bindings/hwmon/st,stts751.yaml
new file mode 100644
index 000000000000..9c825adbed58
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/st,stts751.yaml
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/hwmon/st,stts751.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STTS751 Thermometer
+
+maintainers:
+ - Javier Carrasco <javier.carrasco.cruz@gmail.com>
+
+properties:
+ compatible:
+ const: st,stts751
+
+ reg:
+ maxItems: 1
+
+ smbus-timeout-disable:
+ description:
+ When set, the smbus timeout function will be disabled.
+ $ref: /schemas/types.yaml#/definitions/flag
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ thermometer@48 {
+ compatible = "st,stts751";
+ reg = <0x48>;
+ smbus-timeout-disable;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/hwmon/stts751.txt b/Documentation/devicetree/bindings/hwmon/stts751.txt
deleted file mode 100644
index 3ee1dc30e72f..000000000000
--- a/Documentation/devicetree/bindings/hwmon/stts751.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-* STTS751 thermometer.
-
-Required node properties:
-- compatible: "stts751"
-- reg: I2C bus address of the device
-
-Optional properties:
-- smbus-timeout-disable: when set, the smbus timeout function will be disabled
-
-Example stts751 node:
-
-temp-sensor {
- compatible = "stts751";
- reg = <0x48>;
-}
diff --git a/Documentation/devicetree/bindings/hwmon/syna,as370.yaml b/Documentation/devicetree/bindings/hwmon/syna,as370.yaml
new file mode 100644
index 000000000000..1f7005f55247
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/syna,as370.yaml
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/hwmon/syna,as370.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synaptics AS370 PVT sensors
+
+maintainers:
+ - Javier Carrasco <javier.carrasco.cruz@gmail.com>
+
+properties:
+ compatible:
+ const: syna,as370-hwmon
+
+ reg:
+ description:
+ Address and length of the register set.
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ sensor@ea0810 {
+ compatible = "syna,as370-hwmon";
+ reg = <0xea0810 0xc>;
+ };
diff --git a/Documentation/devicetree/bindings/hwmon/ti,adc128d818.yaml b/Documentation/devicetree/bindings/hwmon/ti,adc128d818.yaml
new file mode 100644
index 000000000000..a32035409cee
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/ti,adc128d818.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+
+$id: http://devicetree.org/schemas/hwmon/ti,adc128d818.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments ADC128D818 ADC System Monitor With Temperature Sensor
+
+maintainers:
+ - Javier Carrasco <javier.carrasco.cruz@gmail.com>
+
+description: |
+ The ADC128D818 is a 12-Bit, 8-Channel Analog to Digital Converter (ADC)
+ with a temperature sensor and an I2C interface.
+
+ Datasheets:
+ https://www.ti.com/product/ADC128D818
+
+properties:
+ compatible:
+ const: ti,adc128d818
+
+ reg:
+ maxItems: 1
+
+ ti,mode:
+ $ref: /schemas/types.yaml#/definitions/uint8
+ description: |
+ Operation mode.
+ Mode 0 - 7 single-ended voltage readings (IN0-IN6), 1 temperature
+ reading (internal).
+ Mode 1 - 8 single-ended voltage readings (IN0-IN7), no temperature.
+ Mode 2 - 4 pseudo-differential voltage readings
+ (IN0-IN1, IN3-IN2, IN4-IN5, IN7-IN6), 1 temperature reading (internal).
+ Mode 3 - 4 single-ended voltage readings (IN0-IN3), 2 pseudo-differential
+ voltage readings (IN4-IN5, IN7-IN6), 1 temperature reading (internal).
+ default: 0
+
+ vref-supply:
+ description:
+ The regulator to use as an external reference. If it does not exist, the
+ internal reference will be used.
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc@1d {
+ compatible = "ti,adc128d818";
+ reg = <0x1d>;
+ vref-supply = <&vref>;
+ ti,mode = /bits/ 8 <2>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/hwmon/ti,lm87.yaml b/Documentation/devicetree/bindings/hwmon/ti,lm87.yaml
new file mode 100644
index 000000000000..f553235a7321
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/ti,lm87.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+
+$id: http://devicetree.org/schemas/hwmon/ti,lm87.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments LM87 Hardware Monitor
+
+maintainers:
+ - Javier Carrasco <javier.carrasco.cruz@gmail.com>
+
+description: |
+ The LM87 is a serial interface system hardware monitor
+ with remote diode temperature sensing.
+
+ Datasheets:
+ https://www.ti.com/product/LM87
+
+properties:
+ compatible:
+ const: ti,lm87
+
+ reg:
+ maxItems: 1
+
+ has-temp3:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ This configures pins 18 and 19 to be used as a second
+ remote temperature sensing channel. By default the pins
+ are configured as voltage input pins in0 and in5.
+
+ has-in6:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ When set, pin 5 is configured to be used as voltage input
+ in6. Otherwise the pin is set as FAN1 input.
+
+ has-in7:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ When set, pin 6 is configured to be used as voltage input
+ in7. Otherwise the pin is set as FAN2 input.
+
+ vcc-supply:
+ description:
+ Regulator supplying power, can be configured to measure
+ 5.0V power supply. Default is 3.3V.
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ hwmon@2e {
+ compatible = "ti,lm87";
+ reg = <0x2e>;
+ has-temp3;
+ vcc-supply = <&reg_5v0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-pnx.txt b/Documentation/devicetree/bindings/i2c/i2c-pnx.txt
deleted file mode 100644
index 2a59006cf79e..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-pnx.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-* NXP PNX I2C Controller
-
-Required properties:
-
- - reg: Offset and length of the register set for the device
- - compatible: should be "nxp,pnx-i2c"
- - interrupts: configure one interrupt line
- - #address-cells: always 1 (for i2c addresses)
- - #size-cells: always 0
-
-Optional properties:
-
- - clock-frequency: desired I2C bus clock frequency in Hz, Default: 100000 Hz
-
-Examples:
-
- i2c1: i2c@400a0000 {
- compatible = "nxp,pnx-i2c";
- reg = <0x400a0000 0x100>;
- interrupt-parent = <&mic>;
- interrupts = <51 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- i2c2: i2c@400a8000 {
- compatible = "nxp,pnx-i2c";
- reg = <0x400a8000 0x100>;
- interrupt-parent = <&mic>;
- interrupts = <50 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- clock-frequency = <100000>;
- };
diff --git a/Documentation/devicetree/bindings/i2c/nxp,pnx-i2c.yaml b/Documentation/devicetree/bindings/i2c/nxp,pnx-i2c.yaml
new file mode 100644
index 000000000000..798a6939b894
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/nxp,pnx-i2c.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/nxp,pnx-i2c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP PNX I2C Controller
+
+maintainers:
+ - Animesh Agarwal <animeshagarwal28@gmail.com>
+
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+
+properties:
+ compatible:
+ const: nxp,pnx-i2c
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clock-frequency:
+ default: 100000
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#address-cells"
+ - "#size-cells"
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c@400a0000 {
+ compatible = "nxp,pnx-i2c";
+ reg = <0x400a0000 0x100>;
+ interrupt-parent = <&mic>;
+ interrupts = <51 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml b/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
index f0eabff86310..daf4e71b8e7f 100644
--- a/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
+++ b/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
@@ -26,6 +26,7 @@ properties:
- items:
- enum:
- qcom,sc7280-cci
+ - qcom,sc8280xp-cci
- qcom,sdm845-cci
- qcom,sm6350-cci
- qcom,sm8250-cci
@@ -176,6 +177,24 @@ allOf:
- const: cci
- const: cci_src
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sc8280xp-cci
+ then:
+ properties:
+ clocks:
+ minItems: 4
+ maxItems: 4
+ clock-names:
+ items:
+ - const: camnoc_axi
+ - const: slow_ahb_src
+ - const: cpas_ahb
+ - const: cci
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/i2c/renesas,riic.yaml b/Documentation/devicetree/bindings/i2c/renesas,riic.yaml
index 2291a7cd619b..91ecf17b7a81 100644
--- a/Documentation/devicetree/bindings/i2c/renesas,riic.yaml
+++ b/Documentation/devicetree/bindings/i2c/renesas,riic.yaml
@@ -15,14 +15,17 @@ allOf:
properties:
compatible:
- items:
- - enum:
- - renesas,riic-r7s72100 # RZ/A1H
- - renesas,riic-r7s9210 # RZ/A2M
- - renesas,riic-r9a07g043 # RZ/G2UL and RZ/Five
- - renesas,riic-r9a07g044 # RZ/G2{L,LC}
- - renesas,riic-r9a07g054 # RZ/V2L
- - const: renesas,riic-rz # RZ/A or RZ/G2L
+ oneOf:
+ - items:
+ - enum:
+ - renesas,riic-r7s72100 # RZ/A1H
+ - renesas,riic-r7s9210 # RZ/A2M
+ - renesas,riic-r9a07g043 # RZ/G2UL and RZ/Five
+ - renesas,riic-r9a07g044 # RZ/G2{L,LC}
+ - renesas,riic-r9a07g054 # RZ/V2L
+ - const: renesas,riic-rz # RZ/A or RZ/G2L
+
+ - const: renesas,riic-r9a09g057 # RZ/V2H(P)
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml b/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml
index 1b31b87c1800..8fd8be76875e 100644
--- a/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml
@@ -127,6 +127,10 @@ properties:
wakeup-source: true
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml
index 995cbf8cefc6..ec34c48d4878 100644
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml
@@ -93,6 +93,10 @@ properties:
'#size-cells':
const: 0
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
allOf:
- if:
properties:
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
index 1970503389aa..c1b1324fa132 100644
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
@@ -59,6 +59,10 @@ properties:
If not, SPI CLKOUT frequency will not be accurate.
maximum: 20000000
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/iio/dac/st,stm32-dac.yaml b/Documentation/devicetree/bindings/iio/dac/st,stm32-dac.yaml
index 04045b932bd2..b15de4eb209c 100644
--- a/Documentation/devicetree/bindings/iio/dac/st,stm32-dac.yaml
+++ b/Documentation/devicetree/bindings/iio/dac/st,stm32-dac.yaml
@@ -45,6 +45,10 @@ properties:
'#size-cells':
const: 0
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
additionalProperties: false
required:
diff --git a/Documentation/devicetree/bindings/iio/health/maxim,max30102.yaml b/Documentation/devicetree/bindings/iio/health/maxim,max30102.yaml
index c13c10c8d65d..eed0df9d3a23 100644
--- a/Documentation/devicetree/bindings/iio/health/maxim,max30102.yaml
+++ b/Documentation/devicetree/bindings/iio/health/maxim,max30102.yaml
@@ -42,7 +42,7 @@ allOf:
properties:
compatible:
contains:
- const: maxim,max30100
+ const: maxim,max30102
then:
properties:
maxim,green-led-current-microamp: false
diff --git a/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml b/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml
index 5b1769c19b17..418c168b223b 100644
--- a/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml
+++ b/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml
@@ -784,7 +784,7 @@ patternProperties:
gpio-2: GPIO4
allOf:
- - $ref: ../pinctrl/pincfg-node.yaml#
+ - $ref: /schemas/pinctrl/pincfg-node.yaml#
properties:
drive-open-drain: true
diff --git a/Documentation/devicetree/bindings/input/twl4030-pwrbutton.txt b/Documentation/devicetree/bindings/input/twl4030-pwrbutton.txt
deleted file mode 100644
index 6c201a2ba8ac..000000000000
--- a/Documentation/devicetree/bindings/input/twl4030-pwrbutton.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-Texas Instruments TWL family (twl4030) pwrbutton module
-
-This module is part of the TWL4030. For more details about the whole
-chip see Documentation/devicetree/bindings/mfd/ti,twl.yaml.
-
-This module provides a simple power button event via an Interrupt.
-
-Required properties:
-- compatible: should be one of the following
- - "ti,twl4030-pwrbutton": For controllers compatible with twl4030
-- interrupts: should be one of the following
- - <8>: For controllers compatible with twl4030
-
-Example:
-
-&twl {
- twl_pwrbutton: pwrbutton {
- compatible = "ti,twl4030-pwrbutton";
- interrupts = <8>;
- };
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml
index 83603180d8d9..f49b43f45f3d 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml
@@ -25,12 +25,12 @@ properties:
- const: allwinner,sun6i-a31-sc-nmi
deprecated: true
- const: allwinner,sun7i-a20-sc-nmi
- - items:
- - const: allwinner,sun8i-v3s-nmi
- - const: allwinner,sun9i-a80-nmi
- const: allwinner,sun9i-a80-nmi
- items:
- - const: allwinner,sun50i-a100-nmi
+ - enum:
+ - allwinner,sun8i-v3s-nmi
+ - allwinner,sun50i-a100-nmi
+ - allwinner,sun50i-h616-nmi
- const: allwinner,sun9i-a80-nmi
reg:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/mediatek,mt6577-sysirq.yaml b/Documentation/devicetree/bindings/interrupt-controller/mediatek,mt6577-sysirq.yaml
index e1a379c052e4..123d24b05556 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/mediatek,mt6577-sysirq.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/mediatek,mt6577-sysirq.yaml
@@ -48,7 +48,7 @@ properties:
interrupt-controller: true
"#interrupt-cells":
- $ref: "arm,gic.yaml#/properties/#interrupt-cells"
+ $ref: arm,gic.yaml#/properties/#interrupt-cells
required:
- reg
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml
index b417341fc8ae..fb3c29e81349 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml
@@ -39,6 +39,7 @@ properties:
- renesas,intc-ex-r8a779a0 # R-Car V3U
- renesas,intc-ex-r8a779f0 # R-Car S4-8
- renesas,intc-ex-r8a779g0 # R-Car V4H
+ - renesas,intc-ex-r8a779h0 # R-Car V4M
- const: renesas,irqc
'#interrupt-cells':
diff --git a/Documentation/devicetree/bindings/interrupt-controller/riscv,aplic.yaml b/Documentation/devicetree/bindings/interrupt-controller/riscv,aplic.yaml
new file mode 100644
index 000000000000..190a6499c932
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/riscv,aplic.yaml
@@ -0,0 +1,172 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/riscv,aplic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: RISC-V Advanced Platform Level Interrupt Controller (APLIC)
+
+maintainers:
+ - Anup Patel <anup@brainfault.org>
+
+description:
+ The RISC-V advanced interrupt architecture (AIA) defines an advanced
+ platform level interrupt controller (APLIC) for handling wired interrupts
+ in a RISC-V platform. The RISC-V AIA specification can be found at
+ https://github.com/riscv/riscv-aia.
+
+ The RISC-V APLIC is implemented as hierarchical APLIC domains where all
+ interrupt sources connect to the root APLIC domain and a parent APLIC
+ domain can delegate interrupt sources to it's child APLIC domains. There
+ is one device tree node for each APLIC domain.
+
+allOf:
+ - $ref: /schemas/interrupt-controller.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - qemu,aplic
+ - const: riscv,aplic
+
+ reg:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 2
+
+ interrupts-extended:
+ minItems: 1
+ maxItems: 16384
+ description:
+ Given APLIC domain directly injects external interrupts to a set of
+ RISC-V HARTS (or CPUs). Each node pointed to should be a riscv,cpu-intc
+ node, which has a CPU node (i.e. RISC-V HART) as parent.
+
+ msi-parent:
+ description:
+ Given APLIC domain forwards wired interrupts as MSIs to a AIA incoming
+ message signaled interrupt controller (IMSIC). If both "msi-parent" and
+ "interrupts-extended" properties are present then it means the APLIC
+ domain supports both MSI mode and Direct mode in HW. In this case, the
+ APLIC driver has to choose between MSI mode or Direct mode.
+
+ riscv,num-sources:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 1023
+ description:
+ Specifies the number of wired interrupt sources supported by this
+ APLIC domain.
+
+ riscv,children:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ minItems: 1
+ maxItems: 1024
+ items:
+ maxItems: 1
+ description:
+ A list of child APLIC domains for the given APLIC domain. Each child
+ APLIC domain is assigned a child index in increasing order, with the
+ first child APLIC domain assigned child index 0. The APLIC domain child
+ index is used by firmware to delegate interrupts from the given APLIC
+ domain to a particular child APLIC domain.
+
+ riscv,delegation:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ minItems: 1
+ maxItems: 1024
+ items:
+ items:
+ - description: child APLIC domain phandle
+ - description: first interrupt number of the parent APLIC domain (inclusive)
+ - description: last interrupt number of the parent APLIC domain (inclusive)
+ description:
+ A interrupt delegation list where each entry is a triple consisting
+ of child APLIC domain phandle, first interrupt number of the parent
+ APLIC domain, and last interrupt number of the parent APLIC domain.
+ Firmware must configure interrupt delegation registers based on
+ interrupt delegation list.
+
+dependencies:
+ riscv,delegation: [ "riscv,children" ]
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - "#interrupt-cells"
+ - riscv,num-sources
+
+anyOf:
+ - required:
+ - interrupts-extended
+ - required:
+ - msi-parent
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ // Example 1 (APLIC domains directly injecting interrupt to HARTs):
+
+ interrupt-controller@c000000 {
+ compatible = "qemu,aplic", "riscv,aplic";
+ interrupts-extended = <&cpu1_intc 11>,
+ <&cpu2_intc 11>,
+ <&cpu3_intc 11>,
+ <&cpu4_intc 11>;
+ reg = <0xc000000 0x4080>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ riscv,num-sources = <63>;
+ riscv,children = <&aplic1>, <&aplic2>;
+ riscv,delegation = <&aplic1 1 63>;
+ };
+
+ aplic1: interrupt-controller@d000000 {
+ compatible = "qemu,aplic", "riscv,aplic";
+ interrupts-extended = <&cpu1_intc 9>,
+ <&cpu2_intc 9>;
+ reg = <0xd000000 0x4080>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ riscv,num-sources = <63>;
+ };
+
+ aplic2: interrupt-controller@e000000 {
+ compatible = "qemu,aplic", "riscv,aplic";
+ interrupts-extended = <&cpu3_intc 9>,
+ <&cpu4_intc 9>;
+ reg = <0xe000000 0x4080>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ riscv,num-sources = <63>;
+ };
+
+ - |
+ // Example 2 (APLIC domains forwarding interrupts as MSIs):
+
+ interrupt-controller@c000000 {
+ compatible = "qemu,aplic", "riscv,aplic";
+ msi-parent = <&imsic_mlevel>;
+ reg = <0xc000000 0x4000>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ riscv,num-sources = <63>;
+ riscv,children = <&aplic3>;
+ riscv,delegation = <&aplic3 1 63>;
+ };
+
+ aplic3: interrupt-controller@d000000 {
+ compatible = "qemu,aplic", "riscv,aplic";
+ msi-parent = <&imsic_slevel>;
+ reg = <0xd000000 0x4000>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ riscv,num-sources = <63>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/interrupt-controller/riscv,imsics.yaml b/Documentation/devicetree/bindings/interrupt-controller/riscv,imsics.yaml
new file mode 100644
index 000000000000..84976f17a4a1
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/riscv,imsics.yaml
@@ -0,0 +1,172 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/riscv,imsics.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: RISC-V Incoming MSI Controller (IMSIC)
+
+maintainers:
+ - Anup Patel <anup@brainfault.org>
+
+description: |
+ The RISC-V advanced interrupt architecture (AIA) defines a per-CPU incoming
+ MSI controller (IMSIC) for handling MSIs in a RISC-V platform. The RISC-V
+ AIA specification can be found at https://github.com/riscv/riscv-aia.
+
+ The IMSIC is a per-CPU (or per-HART) device with separate interrupt file
+ for each privilege level (machine or supervisor). The configuration of
+ a IMSIC interrupt file is done using AIA CSRs and it also has a 4KB MMIO
+ space to receive MSIs from devices. Each IMSIC interrupt file supports a
+ fixed number of interrupt identities (to distinguish MSIs from devices)
+ which is same for given privilege level across CPUs (or HARTs).
+
+ The device tree of a RISC-V platform will have one IMSIC device tree node
+ for each privilege level (machine or supervisor) which collectively describe
+ IMSIC interrupt files at that privilege level across CPUs (or HARTs).
+
+ The arrangement of IMSIC interrupt files in MMIO space of a RISC-V platform
+ follows a particular scheme defined by the RISC-V AIA specification. A IMSIC
+ group is a set of IMSIC interrupt files co-located in MMIO space and we can
+ have multiple IMSIC groups (i.e. clusters, sockets, chiplets, etc) in a
+ RISC-V platform. The MSI target address of a IMSIC interrupt file at given
+ privilege level (machine or supervisor) encodes group index, HART index,
+ and guest index (shown below).
+
+ XLEN-1 > (HART Index MSB) 12 0
+ | | | |
+ -------------------------------------------------------------
+ |xxxxxx|Group Index|xxxxxxxxxxx|HART Index|Guest Index| 0 |
+ -------------------------------------------------------------
+
+allOf:
+ - $ref: /schemas/interrupt-controller.yaml#
+ - $ref: /schemas/interrupt-controller/msi-controller.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - qemu,imsics
+ - const: riscv,imsics
+
+ reg:
+ minItems: 1
+ maxItems: 16384
+ description:
+ Base address of each IMSIC group.
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 0
+
+ msi-controller: true
+
+ "#msi-cells":
+ const: 0
+
+ interrupts-extended:
+ minItems: 1
+ maxItems: 16384
+ description:
+ This property represents the set of CPUs (or HARTs) for which given
+ device tree node describes the IMSIC interrupt files. Each node pointed
+ to should be a riscv,cpu-intc node, which has a CPU node (i.e. RISC-V
+ HART) as parent.
+
+ riscv,num-ids:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 63
+ maximum: 2047
+ description:
+ Number of interrupt identities supported by IMSIC interrupt file.
+
+ riscv,num-guest-ids:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 63
+ maximum: 2047
+ description:
+ Number of interrupt identities are supported by IMSIC guest interrupt
+ file. When not specified it is assumed to be same as specified by the
+ riscv,num-ids property.
+
+ riscv,guest-index-bits:
+ minimum: 0
+ maximum: 7
+ default: 0
+ description:
+ Number of guest index bits in the MSI target address.
+
+ riscv,hart-index-bits:
+ minimum: 0
+ maximum: 15
+ description:
+ Number of HART index bits in the MSI target address. When not
+ specified it is calculated based on the interrupts-extended property.
+
+ riscv,group-index-bits:
+ minimum: 0
+ maximum: 7
+ default: 0
+ description:
+ Number of group index bits in the MSI target address.
+
+ riscv,group-index-shift:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 55
+ default: 24
+ description:
+ The least significant bit position of the group index bits in the
+ MSI target address.
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - msi-controller
+ - "#msi-cells"
+ - interrupts-extended
+ - riscv,num-ids
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ // Example 1 (Machine-level IMSIC files with just one group):
+
+ interrupt-controller@24000000 {
+ compatible = "qemu,imsics", "riscv,imsics";
+ interrupts-extended = <&cpu1_intc 11>,
+ <&cpu2_intc 11>,
+ <&cpu3_intc 11>,
+ <&cpu4_intc 11>;
+ reg = <0x28000000 0x4000>;
+ interrupt-controller;
+ #interrupt-cells = <0>;
+ msi-controller;
+ #msi-cells = <0>;
+ riscv,num-ids = <127>;
+ };
+
+ - |
+ // Example 2 (Supervisor-level IMSIC files with two groups):
+
+ interrupt-controller@28000000 {
+ compatible = "qemu,imsics", "riscv,imsics";
+ interrupts-extended = <&cpu1_intc 9>,
+ <&cpu2_intc 9>,
+ <&cpu3_intc 9>,
+ <&cpu4_intc 9>;
+ reg = <0x28000000 0x2000>, /* Group0 IMSICs */
+ <0x29000000 0x2000>; /* Group1 IMSICs */
+ interrupt-controller;
+ #interrupt-cells = <0>;
+ msi-controller;
+ #msi-cells = <0>;
+ riscv,num-ids = <127>;
+ riscv,group-index-bits = <1>;
+ riscv,group-index-shift = <24>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.yaml b/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.yaml
index 00c10a8258f1..9967e57b449b 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.yaml
@@ -89,8 +89,23 @@ examples:
reg = <0x5000d000 0x400>;
};
+ - |
//Example 2
- exti2: interrupt-controller@40013c00 {
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ exti2: interrupt-controller@5000d000 {
+ compatible = "st,stm32mp1-exti", "syscon";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ reg = <0x5000d000 0x400>;
+ interrupts-extended =
+ <&intc GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <&intc GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ - |
+ //Example 3
+ exti3: interrupt-controller@40013c00 {
compatible = "st,stm32-exti";
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/Documentation/devicetree/bindings/iommu/qcom,tbu.yaml b/Documentation/devicetree/bindings/iommu/qcom,tbu.yaml
new file mode 100644
index 000000000000..82dfe935573e
--- /dev/null
+++ b/Documentation/devicetree/bindings/iommu/qcom,tbu.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iommu/qcom,tbu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm TBU (Translation Buffer Unit)
+
+maintainers:
+ - Georgi Djakov <quic_c_gdjako@quicinc.com>
+
+description:
+ The Qualcomm SMMU500 implementation consists of TCU and TBU. The TBU contains
+ a Translation Lookaside Buffer (TLB) that caches page tables. TBUs provides
+ debug features to trace and trigger debug transactions. There are multiple TBU
+ instances with each client core.
+
+properties:
+ compatible:
+ enum:
+ - qcom,sc7280-tbu
+ - qcom,sdm845-tbu
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interconnects:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ qcom,stream-id-range:
+ description: |
+ Phandle of a SMMU device and Stream ID range (address and size) that
+ is assigned by the TBU
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ items:
+ - items:
+ - description: phandle of a smmu node
+ - description: stream id base address
+ - description: stream id size
+
+required:
+ - compatible
+ - reg
+ - qcom,stream-id-range
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-sdm845.h>
+ #include <dt-bindings/interconnect/qcom,icc.h>
+ #include <dt-bindings/interconnect/qcom,sdm845.h>
+
+ tbu@150e1000 {
+ compatible = "qcom,sdm845-tbu";
+ reg = <0x150e1000 0x1000>;
+ clocks = <&gcc GCC_AGGRE_NOC_PCIE_TBU_CLK>;
+ interconnects = <&system_noc MASTER_GNOC_SNOC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_IMEM_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
+ power-domains = <&gcc HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC>;
+ qcom,stream-id-range = <&apps_smmu 0x1c00 0x400>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.yaml b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.yaml
index be90f68c11d1..0acaa2bcec08 100644
--- a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.yaml
+++ b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.yaml
@@ -50,6 +50,7 @@ properties:
- renesas,ipmmu-r8a779a0 # R-Car V3U
- renesas,ipmmu-r8a779f0 # R-Car S4-8
- renesas,ipmmu-r8a779g0 # R-Car V4H
+ - renesas,ipmmu-r8a779h0 # R-Car V4M
- const: renesas,rcar-gen4-ipmmu-vmsa # R-Car Gen4
reg:
diff --git a/Documentation/devicetree/bindings/mailbox/arm,mhuv3.yaml b/Documentation/devicetree/bindings/mailbox/arm,mhuv3.yaml
new file mode 100644
index 000000000000..449b55afeb7d
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/arm,mhuv3.yaml
@@ -0,0 +1,224 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mailbox/arm,mhuv3.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM MHUv3 Mailbox Controller
+
+maintainers:
+ - Sudeep Holla <sudeep.holla@arm.com>
+ - Cristian Marussi <cristian.marussi@arm.com>
+
+description: |
+ The Arm Message Handling Unit (MHU) Version 3 is a mailbox controller that
+ enables unidirectional communications with remote processors through various
+ possible transport protocols.
+ The controller can optionally support a varying number of extensions that, in
+ turn, enable different kinds of transport to be used for communication.
+ Number, type and characteristics of each supported extension can be discovered
+ dynamically at runtime.
+
+ Given the unidirectional nature of the controller, an MHUv3 mailbox controller
+ is composed of a MHU Sender (MHUS) containing a PostBox (PBX) block and a MHU
+ Receiver (MHUR) containing a MailBox (MBX) block, where
+
+ PBX is used to
+ - Configure the MHU
+ - Send Transfers to the Receiver
+ - Optionally receive acknowledgment of a Transfer from the Receiver
+
+ MBX is used to
+ - Configure the MHU
+ - Receive Transfers from the Sender
+ - Optionally acknowledge Transfers sent by the Sender
+
+ Both PBX and MBX need to be present and defined in the DT description if you
+ need to establish a bidirectional communication, since you will have to
+ acquire two distinct unidirectional channels, one for each block.
+
+ As a consequence both blocks needs to be represented separately and specified
+ as distinct DT nodes in order to properly describe their resources.
+
+ Note that, though, thanks to the runtime discoverability, there is no need to
+ identify the type of blocks with distinct compatibles.
+
+ Following are the MHUv3 possible extensions.
+
+ - Doorbell Extension (DBE): DBE defines a type of channel called a Doorbell
+ Channel (DBCH). DBCH enables a single bit Transfer to be sent from the
+ Sender to Receiver. The Transfer indicates that an event has occurred.
+ When DBE is implemented, the number of DBCHs that an implementation of the
+ MHU can support is between 1 and 128, numbered starting from 0 in ascending
+ order and discoverable at run-time.
+ Each DBCH contains 32 individual fields, referred to as flags, each of which
+ can be used independently. It is possible for the Sender to send multiple
+ Transfers at once using a single DBCH, so long as each Transfer uses
+ a different flag in the DBCH.
+ Optionally, data may be transmitted through an out-of-band shared memory
+ region, wherein the MHU Doorbell is used strictly as an interrupt generation
+ mechanism, but this is out of the scope of these bindings.
+
+ - FastChannel Extension (FCE): FCE defines a type of channel called a Fast
+ Channel (FCH). FCH is intended for lower overhead communication between
+ Sender and Receiver at the expense of determinism. An FCH allows the Sender
+ to update the channel value at any time, regardless of whether the previous
+ value has been seen by the Receiver. When the Receiver reads the channel's
+ content it gets the last value written to the channel.
+ FCH is considered lossy in nature, and means that the Sender has no way of
+ knowing if, or when, the Receiver will act on the Transfer.
+ FCHs are expected to behave as RAM which generates interrupts when writes
+ occur to the locations within the RAM.
+ When FCE is implemented, the number of FCHs that an implementation of the
+ MHU can support is between 1-1024, if the FastChannel word-size is 32-bits,
+ or between 1-512, when the FastChannel word-size is 64-bits.
+ FCHs are numbered from 0 in ascending order.
+ Note that the number of FCHs and the word-size are implementation defined,
+ not configurable but discoverable at run-time.
+ Optionally, data may be transmitted through an out-of-band shared memory
+ region, wherein the MHU FastChannel is used as an interrupt generation
+ mechanism which carries also a pointer to such out-of-band data, but this
+ is out of the scope of these bindings.
+
+ - FIFO Extension (FE): FE defines a Channel type called a FIFO Channel (FFCH).
+ FFCH allows a Sender to send
+ - Multiple Transfers to the Receiver without having to wait for the
+ previous Transfer to be acknowledged by the Receiver, as long as the
+ FIFO has room for the Transfer.
+ - Transfers which require the Receiver to provide acknowledgment.
+ - Transfers which have in-band payload.
+ In all cases, the data is guaranteed to be observed by the Receiver in the
+ same order which the Sender sent it.
+ When FE is implemented, the number of FFCHs that an implementation of the
+ MHU can support is between 1 and 64, numbered starting from 0 in ascending
+ order. The number of FFCHs, their depth (same for all implemented FFCHs) and
+ the access-granularity are implementation defined, not configurable but
+ discoverable at run-time.
+ Optionally, additional data may be transmitted through an out-of-band shared
+ memory region, wherein the MHU FIFO is used to transmit, in order, a small
+ part of the payload (like a header) and a reference to the shared memory
+ area holding the remaining, bigger, chunk of the payload, but this is out of
+ the scope of these bindings.
+
+properties:
+ compatible:
+ const: arm,mhuv3
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 1
+ maxItems: 74
+
+ interrupt-names:
+ description: |
+ The MHUv3 controller generates a number of events some of which are used
+ to generate interrupts; as a consequence it can expose a varying number of
+ optional PBX/MBX interrupts, representing the events generated during the
+ operation of the various transport protocols associated with different
+ extensions. All interrupts of the MHU are level-sensitive.
+ Some of these optional interrupts are defined per-channel, where the
+ number of channels effectively available is implementation defined and
+ run-time discoverable.
+ In the following names are enumerated using patterns, with per-channel
+ interrupts implicitly capped at the maximum channels allowed by the
+ specification for each extension type.
+ For the sake of simplicity maxItems is anyway capped to a most plausible
+ number, assuming way less channels would be implemented than actually
+ possible.
+
+ The only mandatory interrupts on the MHU are:
+ - combined
+ - mbx-fch-xfer-<N> but only if mbx-fcgrp-xfer-<N> is not implemented.
+
+ minItems: 1
+ maxItems: 74
+ items:
+ oneOf:
+ - const: combined
+ description: PBX/MBX Combined interrupt
+ - const: combined-ffch
+ description: PBX/MBX FIFO Combined interrupt
+ - pattern: '^ffch-low-tide-[0-9]+$'
+ description: PBX/MBX FIFO Channel <N> Low Tide interrupt
+ - pattern: '^ffch-high-tide-[0-9]+$'
+ description: PBX/MBX FIFO Channel <N> High Tide interrupt
+ - pattern: '^ffch-flush-[0-9]+$'
+ description: PBX/MBX FIFO Channel <N> Flush interrupt
+ - pattern: '^mbx-dbch-xfer-[0-9]+$'
+ description: MBX Doorbell Channel <N> Transfer interrupt
+ - pattern: '^mbx-fch-xfer-[0-9]+$'
+ description: MBX FastChannel <N> Transfer interrupt
+ - pattern: '^mbx-fchgrp-xfer-[0-9]+$'
+ description: MBX FastChannel <N> Group Transfer interrupt
+ - pattern: '^mbx-ffch-xfer-[0-9]+$'
+ description: MBX FIFO Channel <N> Transfer interrupt
+ - pattern: '^pbx-dbch-xfer-ack-[0-9]+$'
+ description: PBX Doorbell Channel <N> Transfer Ack interrupt
+ - pattern: '^pbx-ffch-xfer-ack-[0-9]+$'
+ description: PBX FIFO Channel <N> Transfer Ack interrupt
+
+ '#mbox-cells':
+ description: |
+ The first argument in the consumers 'mboxes' property represents the
+ extension type, the second is for the channel number while the third
+ depends on extension type.
+
+ Extension types constants are defined in <dt-bindings/arm/mhuv3-dt.h>.
+
+ Extension type for DBE is DBE_EXT and the third parameter represents the
+ doorbell flag number to use.
+ Extension type for FCE is FCE_EXT, third parameter unused.
+ Extension type for FE is FE_EXT, third parameter unused.
+
+ mboxes = <&mhu DBE_EXT 0 5>; // DBE, Doorbell Channel Window 0, doorbell 5.
+ mboxes = <&mhu DBE_EXT 7>; // DBE, Doorbell Channel Window 1, doorbell 7.
+ mboxes = <&mhu FCE_EXT 0 0>; // FCE, FastChannel Window 0.
+ mboxes = <&mhu FCE_EXT 3 0>; // FCE, FastChannel Window 3.
+ mboxes = <&mhu FE_EXT 1 0>; // FE, FIFO Channel Window 1.
+ mboxes = <&mhu FE_EXT 7 0>; // FE, FIFO Channel Window 7.
+ const: 3
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - '#mbox-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ mailbox@2aaa0000 {
+ compatible = "arm,mhuv3";
+ #mbox-cells = <3>;
+ reg = <0 0x2aaa0000 0 0x10000>;
+ clocks = <&clock 0>;
+ interrupt-names = "combined", "pbx-dbch-xfer-ack-1",
+ "ffch-high-tide-0";
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ mailbox@2ab00000 {
+ compatible = "arm,mhuv3";
+ #mbox-cells = <3>;
+ reg = <0 0x2aab0000 0 0x10000>;
+ clocks = <&clock 0>;
+ interrupt-names = "combined", "mbx-dbch-xfer-1", "ffch-low-tide-0";
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
index 79eb523b8436..982c741e6225 100644
--- a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
+++ b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
@@ -30,6 +30,7 @@ properties:
- const: syscon
- items:
- enum:
+ - qcom,msm8974-apcs-kpss-global
- qcom,msm8976-apcs-kpss-global
- const: qcom,msm8994-apcs-kpss-global
- const: syscon
diff --git a/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml b/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
index 8f004868aad9..05e4e1d51713 100644
--- a/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
+++ b/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
@@ -28,6 +28,7 @@ properties:
- qcom,sa8775p-ipcc
- qcom,sc7280-ipcc
- qcom,sc8280xp-ipcc
+ - qcom,sdx75-ipcc
- qcom,sm6350-ipcc
- qcom,sm6375-ipcc
- qcom,sm8250-ipcc
diff --git a/Documentation/devicetree/bindings/media/amphion,vpu.yaml b/Documentation/devicetree/bindings/media/amphion,vpu.yaml
index c0d83d755239..9801de3ed84e 100644
--- a/Documentation/devicetree/bindings/media/amphion,vpu.yaml
+++ b/Documentation/devicetree/bindings/media/amphion,vpu.yaml
@@ -44,7 +44,7 @@ patternProperties:
description:
Each vpu encoder or decoder correspond a MU, which used for communication
between driver and firmware. Implement via mailbox on driver.
- $ref: ../mailbox/fsl,mu.yaml#
+ $ref: /schemas/mailbox/fsl,mu.yaml#
"^vpu-core@[0-9a-f]+$":
diff --git a/Documentation/devicetree/bindings/media/brcm,bcm2835-unicam.yaml b/Documentation/devicetree/bindings/media/brcm,bcm2835-unicam.yaml
new file mode 100644
index 000000000000..5fb5d60f069a
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/brcm,bcm2835-unicam.yaml
@@ -0,0 +1,127 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/brcm,bcm2835-unicam.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom BCM283x Camera Interface (Unicam)
+
+maintainers:
+ - Raspberry Pi Kernel Maintenance <kernel-list@raspberrypi.com>
+
+description: |-
+ The Unicam block on BCM283x SoCs is the receiver for either
+ CSI-2 or CCP2 data from image sensors or similar devices.
+
+ The main platform using this SoC is the Raspberry Pi family of boards. On
+ the Pi the VideoCore firmware can also control this hardware block, and
+ driving it from two different processors will cause issues. To avoid this,
+ the firmware checks the device tree configuration during boot. If it finds
+ device tree nodes whose name starts with 'csi' then it will stop the firmware
+ accessing the block, and it can then safely be used via the device tree
+ binding.
+
+properties:
+ compatible:
+ const: brcm,bcm2835-unicam
+
+ reg:
+ items:
+ - description: Unicam block.
+ - description: Clock Manager Image (CMI) block.
+
+ reg-names:
+ items:
+ - const: unicam
+ - const: cmi
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Clock to drive the LP state machine of Unicam.
+ - description: Clock for the VPU (core clock).
+
+ clock-names:
+ items:
+ - const: lp
+ - const: vpu
+
+ power-domains:
+ items:
+ - description: Unicam power domain
+
+ brcm,num-data-lanes:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 2, 4 ]
+ description: |
+ Number of CSI-2 data lanes supported by this Unicam instance. The number
+ of data lanes actively used is specified with the data-lanes endpoint
+ property.
+
+ port:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ additionalProperties: false
+
+ properties:
+ bus-type:
+ enum: [ 3, 4 ]
+
+ clock-noncontinuous: true
+ data-lanes: true
+ remote-endpoint: true
+
+ required:
+ - bus-type
+ - data-lanes
+ - remote-endpoint
+
+ required:
+ - endpoint
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - clocks
+ - clock-names
+ - power-domains
+ - brcm,num-data-lanes
+ - port
+
+additionalProperties: False
+
+examples:
+ - |
+ #include <dt-bindings/clock/bcm2835.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/media/video-interfaces.h>
+ #include <dt-bindings/power/raspberrypi-power.h>
+
+ csi1: csi@7e801000 {
+ compatible = "brcm,bcm2835-unicam";
+ reg = <0x7e801000 0x800>,
+ <0x7e802004 0x4>;
+ reg-names = "unicam", "cmi";
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clocks BCM2835_CLOCK_CAM1>,
+ <&firmware_clocks 4>;
+ clock-names = "lp", "vpu";
+ power-domains = <&power RPI_POWER_DOMAIN_UNICAM1>;
+ brcm,num-data-lanes = <2>;
+ port {
+ csi1_ep: endpoint {
+ remote-endpoint = <&imx219_0>;
+ bus-type = <MEDIA_BUS_TYPE_CSI2_DPHY>;
+ data-lanes = <1 2>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/media/cec/st,stm32-cec.yaml b/Documentation/devicetree/bindings/media/cec/st,stm32-cec.yaml
index 2314a9a14650..1d930d9e10fd 100644
--- a/Documentation/devicetree/bindings/media/cec/st,stm32-cec.yaml
+++ b/Documentation/devicetree/bindings/media/cec/st,stm32-cec.yaml
@@ -29,6 +29,10 @@ properties:
- const: cec
- const: hdmi-cec
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/media/i2c/galaxycore,gc0308.yaml b/Documentation/devicetree/bindings/media/i2c/galaxycore,gc0308.yaml
index f81e7daed67b..2bf1a81feaf4 100644
--- a/Documentation/devicetree/bindings/media/i2c/galaxycore,gc0308.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/galaxycore,gc0308.yaml
@@ -15,7 +15,7 @@ description: |
They include an ISP capable of auto exposure and auto white balance.
allOf:
- - $ref: ../video-interface-devices.yaml#
+ - $ref: /schemas/media/video-interface-devices.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/media/i2c/galaxycore,gc2145.yaml b/Documentation/devicetree/bindings/media/i2c/galaxycore,gc2145.yaml
index 1726ecca4c77..9eac588de0bc 100644
--- a/Documentation/devicetree/bindings/media/i2c/galaxycore,gc2145.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/galaxycore,gc2145.yaml
@@ -19,7 +19,7 @@ description:
either through a parallel interface or through MIPI CSI-2.
allOf:
- - $ref: ../video-interface-devices.yaml#
+ - $ref: /schemas/media/video-interface-devices.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml
index cf456f8d9ddc..634d3b821b8c 100644
--- a/Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml
@@ -37,31 +37,45 @@ properties:
active low.
maxItems: 1
- dovdd-supply:
+ DOVDD-supply:
description:
Definition of the regulator used as interface power supply.
- avdd-supply:
+ AVDD-supply:
description:
Definition of the regulator used as analog power supply.
- dvdd-supply:
+ DVDD-supply:
description:
Definition of the regulator used as digital power supply.
port:
- $ref: /schemas/graph.yaml#/properties/port
description:
A node containing an output port node.
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ additionalProperties: false
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ additionalProperties: false
+
+ properties:
+ link-frequencies: true
+
+ remote-endpoint: true
+
+ required:
+ - link-frequencies
required:
- compatible
- reg
- clocks
- clock-names
- - dovdd-supply
- - avdd-supply
- - dvdd-supply
+ - DOVDD-supply
+ - AVDD-supply
+ - DVDD-supply
- reset-gpios
- port
@@ -82,13 +96,14 @@ examples:
clock-names = "xvclk";
reset-gpios = <&gpio1 3 GPIO_ACTIVE_LOW>;
- dovdd-supply = <&sw2_reg>;
- dvdd-supply = <&sw2_reg>;
- avdd-supply = <&reg_peri_3p15v>;
+ DOVDD-supply = <&sw2_reg>;
+ DVDD-supply = <&sw2_reg>;
+ AVDD-supply = <&reg_peri_3p15v>;
port {
ov2680_to_mipi: endpoint {
remote-endpoint = <&mipi_from_sensor>;
+ link-frequencies = /bits/ 64 <330000000>;
};
};
};
diff --git a/Documentation/devicetree/bindings/media/i2c/ov8856.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov8856.yaml
index 816dac9c6f60..3f6f72c35485 100644
--- a/Documentation/devicetree/bindings/media/i2c/ov8856.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov8856.yaml
@@ -2,7 +2,7 @@
# Copyright (c) 2019 MediaTek Inc.
%YAML 1.2
---
-$id: http://devicetree.org/schemas/media/i2c/ov8856.yaml#
+$id: http://devicetree.org/schemas/media/i2c/ovti,ov8856.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Omnivision OV8856 CMOS Sensor
diff --git a/Documentation/devicetree/bindings/media/i2c/sony,imx214.yaml b/Documentation/devicetree/bindings/media/i2c/sony,imx214.yaml
index 60903da84e1f..0162eec8ca99 100644
--- a/Documentation/devicetree/bindings/media/i2c/sony,imx214.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/sony,imx214.yaml
@@ -16,7 +16,7 @@ description: |
maximum throughput of 1.2Gbps/lane.
allOf:
- - $ref: ../video-interface-devices.yaml#
+ - $ref: /schemas/media/video-interface-devices.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/media/i2c/sony,imx290.yaml b/Documentation/devicetree/bindings/media/i2c/sony,imx290.yaml
index a531badc16c9..bf05ca48601a 100644
--- a/Documentation/devicetree/bindings/media/i2c/sony,imx290.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/sony,imx290.yaml
@@ -23,6 +23,9 @@ description: |-
is treated the same as this as it was the original compatible string.
imx290llr is the mono version of the sensor.
+allOf:
+ - $ref: /schemas/media/video-interface-devices.yaml#
+
properties:
compatible:
oneOf:
@@ -101,7 +104,7 @@ required:
- vdddo-supply
- port
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/media/i2c/sony,imx415.yaml b/Documentation/devicetree/bindings/media/i2c/sony,imx415.yaml
index 9a00dab2e8a3..34962c5c7006 100644
--- a/Documentation/devicetree/bindings/media/i2c/sony,imx415.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/sony,imx415.yaml
@@ -18,7 +18,7 @@ description: |-
available via CSI-2 serial data output (two or four lanes).
allOf:
- - $ref: ../video-interface-devices.yaml#
+ - $ref: /schemas/media/video-interface-devices.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/media/nxp,imx8-isi.yaml b/Documentation/devicetree/bindings/media/nxp,imx8-isi.yaml
index e4665469a86c..4d5348d456a1 100644
--- a/Documentation/devicetree/bindings/media/nxp,imx8-isi.yaml
+++ b/Documentation/devicetree/bindings/media/nxp,imx8-isi.yaml
@@ -84,6 +84,7 @@ allOf:
properties:
port@0:
description: MIPI CSI-2 RX
+ port@1: false
required:
- port@0
diff --git a/Documentation/devicetree/bindings/media/nxp,imx8-jpeg.yaml b/Documentation/devicetree/bindings/media/nxp,imx8-jpeg.yaml
index 3d9d1db37040..2be30c5fdc83 100644
--- a/Documentation/devicetree/bindings/media/nxp,imx8-jpeg.yaml
+++ b/Documentation/devicetree/bindings/media/nxp,imx8-jpeg.yaml
@@ -31,6 +31,11 @@ properties:
reg:
maxItems: 1
+ clocks:
+ items:
+ - description: AXI DMA engine clock for fetching JPEG bitstream from memory (per)
+ - description: IP bus clock for register access (ipg)
+
interrupts:
description: |
There are 4 slots available in the IP, which the driver may use
@@ -49,6 +54,7 @@ properties:
required:
- compatible
- reg
+ - clocks
- interrupts
- power-domains
@@ -56,12 +62,15 @@ additionalProperties: false
examples:
- |
+ #include <dt-bindings/clock/imx8-lpcg.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/firmware/imx/rsrc.h>
jpegdec: jpegdec@58400000 {
compatible = "nxp,imx8qxp-jpgdec";
reg = <0x58400000 0x00050000 >;
+ clocks = <&img_jpeg_dec_lpcg IMX_LPCG_CLK_0>,
+ <&img_jpeg_dec_lpcg IMX_LPCG_CLK_4>;
interrupts = <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>,
@@ -76,6 +85,8 @@ examples:
jpegenc: jpegenc@58450000 {
compatible = "nxp,imx8qm-jpgenc", "nxp,imx8qxp-jpgenc";
reg = <0x58450000 0x00050000 >;
+ clocks = <&img_jpeg_enc_lpcg IMX_LPCG_CLK_0>,
+ <&img_jpeg__lpcg IMX_LPCG_CLK_4>;
interrupts = <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/Documentation/devicetree/bindings/media/qcom,sc8280xp-camss.yaml b/Documentation/devicetree/bindings/media/qcom,sc8280xp-camss.yaml
new file mode 100644
index 000000000000..c0bc31709873
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/qcom,sc8280xp-camss.yaml
@@ -0,0 +1,512 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/qcom,sc8280xp-camss.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SC8280XP Camera Subsystem (CAMSS)
+
+maintainers:
+ - Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+
+description: |
+ The CAMSS IP is a CSI decoder and ISP present on Qualcomm platforms.
+
+properties:
+ compatible:
+ const: qcom,sc8280xp-camss
+
+ clocks:
+ maxItems: 40
+
+ clock-names:
+ items:
+ - const: camnoc_axi
+ - const: cpas_ahb
+ - const: csiphy0
+ - const: csiphy0_timer
+ - const: csiphy1
+ - const: csiphy1_timer
+ - const: csiphy2
+ - const: csiphy2_timer
+ - const: csiphy3
+ - const: csiphy3_timer
+ - const: vfe0_axi
+ - const: vfe0
+ - const: vfe0_cphy_rx
+ - const: vfe0_csid
+ - const: vfe1_axi
+ - const: vfe1
+ - const: vfe1_cphy_rx
+ - const: vfe1_csid
+ - const: vfe2_axi
+ - const: vfe2
+ - const: vfe2_cphy_rx
+ - const: vfe2_csid
+ - const: vfe3_axi
+ - const: vfe3
+ - const: vfe3_cphy_rx
+ - const: vfe3_csid
+ - const: vfe_lite0
+ - const: vfe_lite0_cphy_rx
+ - const: vfe_lite0_csid
+ - const: vfe_lite1
+ - const: vfe_lite1_cphy_rx
+ - const: vfe_lite1_csid
+ - const: vfe_lite2
+ - const: vfe_lite2_cphy_rx
+ - const: vfe_lite2_csid
+ - const: vfe_lite3
+ - const: vfe_lite3_cphy_rx
+ - const: vfe_lite3_csid
+ - const: gcc_axi_hf
+ - const: gcc_axi_sf
+
+ interrupts:
+ maxItems: 20
+
+ interrupt-names:
+ items:
+ - const: csid1_lite
+ - const: vfe_lite1
+ - const: csiphy3
+ - const: csid0
+ - const: vfe0
+ - const: csid1
+ - const: vfe1
+ - const: csid0_lite
+ - const: vfe_lite0
+ - const: csiphy0
+ - const: csiphy1
+ - const: csiphy2
+ - const: csid2
+ - const: vfe2
+ - const: csid3_lite
+ - const: csid2_lite
+ - const: vfe_lite3
+ - const: vfe_lite2
+ - const: csid3
+ - const: vfe3
+
+ iommus:
+ maxItems: 16
+
+ interconnects:
+ maxItems: 4
+
+ interconnect-names:
+ items:
+ - const: cam_ahb
+ - const: cam_hf_mnoc
+ - const: cam_sf_mnoc
+ - const: cam_sf_icp_mnoc
+
+ power-domains:
+ items:
+ - description: IFE0 GDSC - Image Front End, Global Distributed Switch Controller.
+ - description: IFE1 GDSC - Image Front End, Global Distributed Switch Controller.
+ - description: IFE2 GDSC - Image Front End, Global Distributed Switch Controller.
+ - description: IFE3 GDSC - Image Front End, Global Distributed Switch Controller.
+ - description: Titan Top GDSC - Titan ISP Block, Global Distributed Switch Controller.
+
+ power-domain-names:
+ items:
+ - const: ife0
+ - const: ife1
+ - const: ife2
+ - const: ife3
+ - const: top
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ description:
+ CSI input ports.
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description:
+ Input port for receiving CSI data from CSIPHY0.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ clock-lanes:
+ maxItems: 1
+
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+
+ required:
+ - clock-lanes
+ - data-lanes
+
+ port@1:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description:
+ Input port for receiving CSI data from CSIPHY1.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ clock-lanes:
+ maxItems: 1
+
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+
+ required:
+ - clock-lanes
+ - data-lanes
+
+ port@2:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description:
+ Input port for receiving CSI data from CSIPHY2.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ clock-lanes:
+ maxItems: 1
+
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+
+ required:
+ - clock-lanes
+ - data-lanes
+
+ port@3:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description:
+ Input port for receiving CSI data from CSIPHY3.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ clock-lanes:
+ maxItems: 1
+
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+
+ required:
+ - clock-lanes
+ - data-lanes
+
+ reg:
+ maxItems: 20
+
+ reg-names:
+ items:
+ - const: csiphy2
+ - const: csiphy3
+ - const: csiphy0
+ - const: csiphy1
+ - const: vfe0
+ - const: csid0
+ - const: vfe1
+ - const: csid1
+ - const: vfe2
+ - const: csid2
+ - const: vfe_lite0
+ - const: csid0_lite
+ - const: vfe_lite1
+ - const: csid1_lite
+ - const: vfe_lite2
+ - const: csid2_lite
+ - const: vfe_lite3
+ - const: csid3_lite
+ - const: vfe3
+ - const: csid3
+
+ vdda-phy-supply:
+ description:
+ Phandle to a regulator supply to PHY core block.
+
+ vdda-pll-supply:
+ description:
+ Phandle to 1.8V regulator supply to PHY refclk pll block.
+
+required:
+ - clock-names
+ - clocks
+ - compatible
+ - interconnects
+ - interconnect-names
+ - interrupts
+ - interrupt-names
+ - iommus
+ - power-domains
+ - power-domain-names
+ - reg
+ - reg-names
+ - vdda-phy-supply
+ - vdda-pll-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/qcom,gcc-sc8280xp.h>
+ #include <dt-bindings/clock/qcom,sc8280xp-camcc.h>
+ #include <dt-bindings/interconnect/qcom,sc8280xp.h>
+ #include <dt-bindings/power/qcom-rpmpd.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ camss: camss@ac5a000 {
+ compatible = "qcom,sc8280xp-camss";
+
+ reg = <0 0x0ac5a000 0 0x2000>,
+ <0 0x0ac5c000 0 0x2000>,
+ <0 0x0ac65000 0 0x2000>,
+ <0 0x0ac67000 0 0x2000>,
+ <0 0x0acaf000 0 0x4000>,
+ <0 0x0acb3000 0 0x1000>,
+ <0 0x0acb6000 0 0x4000>,
+ <0 0x0acba000 0 0x1000>,
+ <0 0x0acbd000 0 0x4000>,
+ <0 0x0acc1000 0 0x1000>,
+ <0 0x0acc4000 0 0x4000>,
+ <0 0x0acc8000 0 0x1000>,
+ <0 0x0accb000 0 0x4000>,
+ <0 0x0accf000 0 0x1000>,
+ <0 0x0acd2000 0 0x4000>,
+ <0 0x0acd6000 0 0x1000>,
+ <0 0x0acd9000 0 0x4000>,
+ <0 0x0acdd000 0 0x1000>,
+ <0 0x0ace0000 0 0x4000>,
+ <0 0x0ace4000 0 0x1000>;
+
+ reg-names = "csiphy2",
+ "csiphy3",
+ "csiphy0",
+ "csiphy1",
+ "vfe0",
+ "csid0",
+ "vfe1",
+ "csid1",
+ "vfe2",
+ "csid2",
+ "vfe_lite0",
+ "csid0_lite",
+ "vfe_lite1",
+ "csid1_lite",
+ "vfe_lite2",
+ "csid2_lite",
+ "vfe_lite3",
+ "csid3_lite",
+ "vfe3",
+ "csid3";
+
+ vdda-phy-supply = <&vreg_l6d>;
+ vdda-pll-supply = <&vreg_l4d>;
+
+ interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 478 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 479 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 640 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 641 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 758 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 759 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 760 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 761 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 762 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 764 IRQ_TYPE_LEVEL_HIGH>;
+
+ interrupt-names = "csid1_lite",
+ "vfe_lite1",
+ "csiphy3",
+ "csid0",
+ "vfe0",
+ "csid1",
+ "vfe1",
+ "csid0_lite",
+ "vfe_lite0",
+ "csiphy0",
+ "csiphy1",
+ "csiphy2",
+ "csid2",
+ "vfe2",
+ "csid3_lite",
+ "csid2_lite",
+ "vfe_lite3",
+ "vfe_lite2",
+ "csid3",
+ "vfe3";
+
+ power-domains = <&camcc IFE_0_GDSC>,
+ <&camcc IFE_1_GDSC>,
+ <&camcc IFE_2_GDSC>,
+ <&camcc IFE_3_GDSC>,
+ <&camcc TITAN_TOP_GDSC>;
+
+ power-domain-names = "ife0",
+ "ife1",
+ "ife2",
+ "ife3",
+ "top";
+
+ clocks = <&camcc CAMCC_CAMNOC_AXI_CLK>,
+ <&camcc CAMCC_CPAS_AHB_CLK>,
+ <&camcc CAMCC_CSIPHY0_CLK>,
+ <&camcc CAMCC_CSI0PHYTIMER_CLK>,
+ <&camcc CAMCC_CSIPHY1_CLK>,
+ <&camcc CAMCC_CSI1PHYTIMER_CLK>,
+ <&camcc CAMCC_CSIPHY2_CLK>,
+ <&camcc CAMCC_CSI2PHYTIMER_CLK>,
+ <&camcc CAMCC_CSIPHY3_CLK>,
+ <&camcc CAMCC_CSI3PHYTIMER_CLK>,
+ <&camcc CAMCC_IFE_0_AXI_CLK>,
+ <&camcc CAMCC_IFE_0_CLK>,
+ <&camcc CAMCC_IFE_0_CPHY_RX_CLK>,
+ <&camcc CAMCC_IFE_0_CSID_CLK>,
+ <&camcc CAMCC_IFE_1_AXI_CLK>,
+ <&camcc CAMCC_IFE_1_CLK>,
+ <&camcc CAMCC_IFE_1_CPHY_RX_CLK>,
+ <&camcc CAMCC_IFE_1_CSID_CLK>,
+ <&camcc CAMCC_IFE_2_AXI_CLK>,
+ <&camcc CAMCC_IFE_2_CLK>,
+ <&camcc CAMCC_IFE_2_CPHY_RX_CLK>,
+ <&camcc CAMCC_IFE_2_CSID_CLK>,
+ <&camcc CAMCC_IFE_3_AXI_CLK>,
+ <&camcc CAMCC_IFE_3_CLK>,
+ <&camcc CAMCC_IFE_3_CPHY_RX_CLK>,
+ <&camcc CAMCC_IFE_3_CSID_CLK>,
+ <&camcc CAMCC_IFE_LITE_0_CLK>,
+ <&camcc CAMCC_IFE_LITE_0_CPHY_RX_CLK>,
+ <&camcc CAMCC_IFE_LITE_0_CSID_CLK>,
+ <&camcc CAMCC_IFE_LITE_1_CLK>,
+ <&camcc CAMCC_IFE_LITE_1_CPHY_RX_CLK>,
+ <&camcc CAMCC_IFE_LITE_1_CSID_CLK>,
+ <&camcc CAMCC_IFE_LITE_2_CLK>,
+ <&camcc CAMCC_IFE_LITE_2_CPHY_RX_CLK>,
+ <&camcc CAMCC_IFE_LITE_2_CSID_CLK>,
+ <&camcc CAMCC_IFE_LITE_3_CLK>,
+ <&camcc CAMCC_IFE_LITE_3_CPHY_RX_CLK>,
+ <&camcc CAMCC_IFE_LITE_3_CSID_CLK>,
+ <&gcc GCC_CAMERA_HF_AXI_CLK>,
+ <&gcc GCC_CAMERA_SF_AXI_CLK>;
+
+ clock-names = "camnoc_axi",
+ "cpas_ahb",
+ "csiphy0",
+ "csiphy0_timer",
+ "csiphy1",
+ "csiphy1_timer",
+ "csiphy2",
+ "csiphy2_timer",
+ "csiphy3",
+ "csiphy3_timer",
+ "vfe0_axi",
+ "vfe0",
+ "vfe0_cphy_rx",
+ "vfe0_csid",
+ "vfe1_axi",
+ "vfe1",
+ "vfe1_cphy_rx",
+ "vfe1_csid",
+ "vfe2_axi",
+ "vfe2",
+ "vfe2_cphy_rx",
+ "vfe2_csid",
+ "vfe3_axi",
+ "vfe3",
+ "vfe3_cphy_rx",
+ "vfe3_csid",
+ "vfe_lite0",
+ "vfe_lite0_cphy_rx",
+ "vfe_lite0_csid",
+ "vfe_lite1",
+ "vfe_lite1_cphy_rx",
+ "vfe_lite1_csid",
+ "vfe_lite2",
+ "vfe_lite2_cphy_rx",
+ "vfe_lite2_csid",
+ "vfe_lite3",
+ "vfe_lite3_cphy_rx",
+ "vfe_lite3_csid",
+ "gcc_axi_hf",
+ "gcc_axi_sf";
+
+
+ iommus = <&apps_smmu 0x2000 0x4e0>,
+ <&apps_smmu 0x2020 0x4e0>,
+ <&apps_smmu 0x2040 0x4e0>,
+ <&apps_smmu 0x2060 0x4e0>,
+ <&apps_smmu 0x2080 0x4e0>,
+ <&apps_smmu 0x20e0 0x4e0>,
+ <&apps_smmu 0x20c0 0x4e0>,
+ <&apps_smmu 0x20a0 0x4e0>,
+ <&apps_smmu 0x2400 0x4e0>,
+ <&apps_smmu 0x2420 0x4e0>,
+ <&apps_smmu 0x2440 0x4e0>,
+ <&apps_smmu 0x2460 0x4e0>,
+ <&apps_smmu 0x2480 0x4e0>,
+ <&apps_smmu 0x24e0 0x4e0>,
+ <&apps_smmu 0x24c0 0x4e0>,
+ <&apps_smmu 0x24a0 0x4e0>;
+
+ interconnects = <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_CAMERA_CFG 0>,
+ <&mmss_noc MASTER_CAMNOC_HF 0 &mc_virt SLAVE_EBI1 0>,
+ <&mmss_noc MASTER_CAMNOC_SF 0 &mc_virt SLAVE_EBI1 0>,
+ <&mmss_noc MASTER_CAMNOC_ICP 0 &mc_virt SLAVE_EBI1 0>;
+ interconnect-names = "cam_ahb",
+ "cam_hf_mnoc",
+ "cam_sf_mnoc",
+ "cam_sf_icp_mnoc";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ csiphy_ep0: endpoint@0 {
+ reg = <0>;
+ clock-lanes = <7>;
+ data-lanes = <0 1>;
+ remote-endpoint = <&sensor_ep>;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml b/Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml
index 6b3e413cedb2..34147127192f 100644
--- a/Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml
+++ b/Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml
@@ -36,6 +36,10 @@ properties:
resets:
maxItems: 1
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
port:
$ref: /schemas/graph.yaml#/$defs/port-base
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/media/st,stm32mp25-video-codec.yaml b/Documentation/devicetree/bindings/media/st,stm32mp25-video-codec.yaml
index b8611bc8756c..73726c65cfb9 100644
--- a/Documentation/devicetree/bindings/media/st,stm32mp25-video-codec.yaml
+++ b/Documentation/devicetree/bindings/media/st,stm32mp25-video-codec.yaml
@@ -30,6 +30,10 @@ properties:
clocks:
maxItems: 1
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/memory-controllers/samsung,s5pv210-dmc.yaml b/Documentation/devicetree/bindings/memory-controllers/samsung,s5pv210-dmc.yaml
new file mode 100644
index 000000000000..c0e47055f28c
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/samsung,s5pv210-dmc.yaml
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/memory-controllers/samsung,s5pv210-dmc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung S5Pv210 SoC Dynamic Memory Controller
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+
+description:
+ Dynamic Memory Controller interfaces external JEDEC DDR-type SDRAM.
+
+properties:
+ compatible:
+ const: samsung,s5pv210-dmc
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ memory-controller@f0000000 {
+ compatible = "samsung,s5pv210-dmc";
+ reg = <0xf0000000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/memory-controllers/st,stm32-fmc2-ebi.yaml b/Documentation/devicetree/bindings/memory-controllers/st,stm32-fmc2-ebi.yaml
index 84ac6f50a6fc..706e45eb4d27 100644
--- a/Documentation/devicetree/bindings/memory-controllers/st,stm32-fmc2-ebi.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/st,stm32-fmc2-ebi.yaml
@@ -50,6 +50,10 @@ properties:
Reflects the memory layout with four integer values per bank. Format:
<bank-number> 0 <address of the bank> <size>
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
patternProperties:
"^.*@[0-4],[a-f0-9]+$":
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/mfd/actions,atc260x.yaml b/Documentation/devicetree/bindings/mfd/actions,atc260x.yaml
index 6811246c5771..9ae419748aa7 100644
--- a/Documentation/devicetree/bindings/mfd/actions,atc260x.yaml
+++ b/Documentation/devicetree/bindings/mfd/actions,atc260x.yaml
@@ -21,7 +21,7 @@ description: |
regulators.
allOf:
- - $ref: ../input/input.yaml
+ - $ref: /schemas/input/input.yaml
properties:
compatible:
@@ -57,7 +57,7 @@ properties:
switchldo1:
type: object
- $ref: ../regulator/regulator.yaml
+ $ref: /schemas/regulator/regulator.yaml
properties:
regulator-name: true
@@ -76,7 +76,7 @@ properties:
"^(dcdc[0-4]|ldo[0-9]|ldo1[1-2])$":
type: object
- $ref: ../regulator/regulator.yaml
+ $ref: /schemas/regulator/regulator.yaml
properties:
regulator-name: true
diff --git a/Documentation/devicetree/bindings/mfd/allwinner,sun6i-a31-prcm.yaml b/Documentation/devicetree/bindings/mfd/allwinner,sun6i-a31-prcm.yaml
index 8789e3639ff7..ca0e9f1f2354 100644
--- a/Documentation/devicetree/bindings/mfd/allwinner,sun6i-a31-prcm.yaml
+++ b/Documentation/devicetree/bindings/mfd/allwinner,sun6i-a31-prcm.yaml
@@ -20,7 +20,7 @@ properties:
maxItems: 1
patternProperties:
- "^.*_(clk|rst)$":
+ "^.*-(clk|rst)$":
type: object
unevaluatedProperties: false
@@ -171,7 +171,7 @@ examples:
compatible = "allwinner,sun6i-a31-prcm";
reg = <0x01f01400 0x200>;
- ar100: ar100_clk {
+ ar100: ar100-clk {
compatible = "allwinner,sun6i-a31-ar100-clk";
#clock-cells = <0>;
clocks = <&rtc 0>, <&osc24M>,
@@ -180,7 +180,7 @@ examples:
clock-output-names = "ar100";
};
- ahb0: ahb0_clk {
+ ahb0: ahb0-clk {
compatible = "fixed-factor-clock";
#clock-cells = <0>;
clock-div = <1>;
@@ -189,14 +189,14 @@ examples:
clock-output-names = "ahb0";
};
- apb0: apb0_clk {
+ apb0: apb0-clk {
compatible = "allwinner,sun6i-a31-apb0-clk";
#clock-cells = <0>;
clocks = <&ahb0>;
clock-output-names = "apb0";
};
- apb0_gates: apb0_gates_clk {
+ apb0_gates: apb0-gates-clk {
compatible = "allwinner,sun6i-a31-apb0-gates-clk";
#clock-cells = <1>;
clocks = <&apb0>;
@@ -206,14 +206,14 @@ examples:
"apb0_i2c";
};
- ir_clk: ir_clk {
+ ir_clk: ir-clk {
#clock-cells = <0>;
compatible = "allwinner,sun4i-a10-mod0-clk";
clocks = <&rtc 0>, <&osc24M>;
clock-output-names = "ir";
};
- apb0_rst: apb0_rst {
+ apb0_rst: apb0-rst {
compatible = "allwinner,sun6i-a31-clock-reset";
#reset-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/mfd/aspeed,ast2x00-scu.yaml b/Documentation/devicetree/bindings/mfd/aspeed,ast2x00-scu.yaml
index 1689b986f441..86ee69c0f45b 100644
--- a/Documentation/devicetree/bindings/mfd/aspeed,ast2x00-scu.yaml
+++ b/Documentation/devicetree/bindings/mfd/aspeed,ast2x00-scu.yaml
@@ -47,10 +47,18 @@ patternProperties:
type: object
'^pinctrl(@[0-9a-f]+)?$':
- oneOf:
- - $ref: /schemas/pinctrl/aspeed,ast2400-pinctrl.yaml
- - $ref: /schemas/pinctrl/aspeed,ast2500-pinctrl.yaml
- - $ref: /schemas/pinctrl/aspeed,ast2600-pinctrl.yaml
+ type: object
+ additionalProperties: true
+ properties:
+ compatible:
+ contains:
+ enum:
+ - aspeed,ast2400-pinctrl
+ - aspeed,ast2500-pinctrl
+ - aspeed,ast2600-pinctrl
+
+ required:
+ - compatible
'^interrupt-controller@[0-9a-f]+$':
description: See Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2xxx-scu-ic.txt
diff --git a/Documentation/devicetree/bindings/mfd/brcm,cru.yaml b/Documentation/devicetree/bindings/mfd/brcm,cru.yaml
index b85819fbb07c..04910e4f88b2 100644
--- a/Documentation/devicetree/bindings/mfd/brcm,cru.yaml
+++ b/Documentation/devicetree/bindings/mfd/brcm,cru.yaml
@@ -34,19 +34,19 @@ properties:
patternProperties:
'^clock-controller@[a-f0-9]+$':
- $ref: ../clock/brcm,iproc-clocks.yaml
+ $ref: /schemas/clock/brcm,iproc-clocks.yaml
'^phy@[a-f0-9]+$':
- $ref: ../phy/bcm-ns-usb2-phy.yaml
+ $ref: /schemas/phy/bcm-ns-usb2-phy.yaml
'^pinctrl@[a-f0-9]+$':
- $ref: ../pinctrl/brcm,ns-pinmux.yaml
+ $ref: /schemas/pinctrl/brcm,ns-pinmux.yaml
'^syscon@[a-f0-9]+$':
$ref: syscon.yaml
'^thermal@[a-f0-9]+$':
- $ref: ../thermal/brcm,ns-thermal.yaml
+ $ref: /schemas/thermal/brcm,ns-thermal.yaml
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/mfd/brcm,iproc-cdru.txt b/Documentation/devicetree/bindings/mfd/brcm,iproc-cdru.txt
deleted file mode 100644
index 82f82e069563..000000000000
--- a/Documentation/devicetree/bindings/mfd/brcm,iproc-cdru.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-Broadcom iProc Chip Device Resource Unit (CDRU)
-
-Various Broadcom iProc SoCs have a set of registers that provide various
-chip specific device and resource configurations. This node allows access to
-these CDRU registers via syscon.
-
-Required properties:
-- compatible: should contain:
- "brcm,sr-cdru", "syscon" for Stingray
-- reg: base address and range of the CDRU registers
-
-Example:
- cdru: syscon@6641d000 {
- compatible = "brcm,sr-cdru", "syscon";
- reg = <0 0x6641d000 0 0x400>;
- };
diff --git a/Documentation/devicetree/bindings/mfd/brcm,iproc-mhb.txt b/Documentation/devicetree/bindings/mfd/brcm,iproc-mhb.txt
deleted file mode 100644
index 4421e9771b8a..000000000000
--- a/Documentation/devicetree/bindings/mfd/brcm,iproc-mhb.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-Broadcom iProc Multi Host Bridge (MHB)
-
-Certain Broadcom iProc SoCs have a multi host bridge (MHB) block that controls
-the connection and configuration of 1) internal PCIe serdes; 2) PCIe endpoint
-interface; 3) access to the Nitro (network processing) engine
-
-This node allows access to these MHB registers via syscon.
-
-Required properties:
-- compatible: should contain:
- "brcm,sr-mhb", "syscon" for Stingray
-- reg: base address and range of the MHB registers
-
-Example:
- mhb: syscon@60401000 {
- compatible = "brcm,sr-mhb", "syscon";
- reg = <0 0x60401000 0 0x38c>;
- };
diff --git a/Documentation/devicetree/bindings/mfd/brcm,misc.yaml b/Documentation/devicetree/bindings/mfd/brcm,misc.yaml
index cff7d772a7db..abe24526f3d7 100644
--- a/Documentation/devicetree/bindings/mfd/brcm,misc.yaml
+++ b/Documentation/devicetree/bindings/mfd/brcm,misc.yaml
@@ -33,7 +33,7 @@ properties:
patternProperties:
'^reset-controller@[a-f0-9]+$':
- $ref: ../reset/brcm,bcm4908-misc-pcie-reset.yaml
+ $ref: /schemas/reset/brcm,bcm4908-misc-pcie-reset.yaml
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/mfd/canaan,k210-sysctl.yaml b/Documentation/devicetree/bindings/mfd/canaan,k210-sysctl.yaml
index 3b3beab9db3f..2451d0f0e4e3 100644
--- a/Documentation/devicetree/bindings/mfd/canaan,k210-sysctl.yaml
+++ b/Documentation/devicetree/bindings/mfd/canaan,k210-sysctl.yaml
@@ -36,7 +36,7 @@ properties:
clock-controller:
# Child node
type: object
- $ref: ../clock/canaan,k210-clk.yaml
+ $ref: /schemas/clock/canaan,k210-clk.yaml
description:
Clock controller for the SoC clocks. This child node definition
should follow the bindings specified in
@@ -45,7 +45,7 @@ properties:
reset-controller:
# Child node
type: object
- $ref: ../reset/canaan,k210-rst.yaml
+ $ref: /schemas/reset/canaan,k210-rst.yaml
description:
Reset controller for the SoC. This child node definition
should follow the bindings specified in
@@ -54,7 +54,7 @@ properties:
syscon-reboot:
# Child node
type: object
- $ref: ../power/reset/syscon-reboot.yaml
+ $ref: /schemas/power/reset/syscon-reboot.yaml
description:
Reboot method for the SoC. This child node definition
should follow the bindings specified in
diff --git a/Documentation/devicetree/bindings/mfd/delta,tn48m-cpld.yaml b/Documentation/devicetree/bindings/mfd/delta,tn48m-cpld.yaml
index f6967c1f6235..d3b79140cce2 100644
--- a/Documentation/devicetree/bindings/mfd/delta,tn48m-cpld.yaml
+++ b/Documentation/devicetree/bindings/mfd/delta,tn48m-cpld.yaml
@@ -42,10 +42,10 @@ required:
patternProperties:
"^gpio(@[0-9a-f]+)?$":
- $ref: ../gpio/delta,tn48m-gpio.yaml
+ $ref: /schemas/gpio/delta,tn48m-gpio.yaml
"^reset-controller?$":
- $ref: ../reset/delta,tn48m-reset.yaml
+ $ref: /schemas/reset/delta,tn48m-reset.yaml
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/mfd/iqs62x.yaml b/Documentation/devicetree/bindings/mfd/iqs62x.yaml
index f438c2374966..e79ce447a800 100644
--- a/Documentation/devicetree/bindings/mfd/iqs62x.yaml
+++ b/Documentation/devicetree/bindings/mfd/iqs62x.yaml
@@ -38,10 +38,10 @@ properties:
device name with ".bin" as the extension (e.g. iqs620a.bin for IQS620A).
keys:
- $ref: ../input/iqs62x-keys.yaml
+ $ref: /schemas/input/iqs62x-keys.yaml
pwm:
- $ref: ../pwm/iqs620a-pwm.yaml
+ $ref: /schemas/pwm/iqs620a-pwm.yaml
required:
- compatible
diff --git a/Documentation/devicetree/bindings/mfd/kontron,sl28cpld.yaml b/Documentation/devicetree/bindings/mfd/kontron,sl28cpld.yaml
index eb3b43547cb6..37207a97e06c 100644
--- a/Documentation/devicetree/bindings/mfd/kontron,sl28cpld.yaml
+++ b/Documentation/devicetree/bindings/mfd/kontron,sl28cpld.yaml
@@ -39,19 +39,19 @@ properties:
patternProperties:
"^gpio(@[0-9a-f]+)?$":
- $ref: ../gpio/kontron,sl28cpld-gpio.yaml
+ $ref: /schemas/gpio/kontron,sl28cpld-gpio.yaml
"^hwmon(@[0-9a-f]+)?$":
- $ref: ../hwmon/kontron,sl28cpld-hwmon.yaml
+ $ref: /schemas/hwmon/kontron,sl28cpld-hwmon.yaml
"^interrupt-controller(@[0-9a-f]+)?$":
- $ref: ../interrupt-controller/kontron,sl28cpld-intc.yaml
+ $ref: /schemas/interrupt-controller/kontron,sl28cpld-intc.yaml
"^pwm(@[0-9a-f]+)?$":
- $ref: ../pwm/kontron,sl28cpld-pwm.yaml
+ $ref: /schemas/pwm/kontron,sl28cpld-pwm.yaml
"^watchdog(@[0-9a-f]+)?$":
- $ref: ../watchdog/kontron,sl28cpld-wdt.yaml
+ $ref: /schemas/watchdog/kontron,sl28cpld-wdt.yaml
required:
- "#address-cells"
diff --git a/Documentation/devicetree/bindings/mfd/lp873x.txt b/Documentation/devicetree/bindings/mfd/lp873x.txt
deleted file mode 100644
index ae9cf39bd101..000000000000
--- a/Documentation/devicetree/bindings/mfd/lp873x.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-TI LP873X PMIC MFD driver
-
-Required properties:
- - compatible: "ti,lp8732", "ti,lp8733"
- - reg: I2C slave address.
- - gpio-controller: Marks the device node as a GPIO Controller.
- - #gpio-cells: Should be two. The first cell is the pin number and
- the second cell is used to specify flags.
- See ../gpio/gpio.txt for more information.
- - xxx-in-supply: Phandle to parent supply node of each regulator
- populated under regulators node. xxx can be
- buck0, buck1, ldo0 or ldo1.
- - regulators: List of child nodes that specify the regulator
- initialization data.
-Example:
-
-pmic: lp8733@60 {
- compatible = "ti,lp8733";
- reg = <0x60>;
- gpio-controller;
- #gpio-cells = <2>;
-
- buck0-in-supply = <&vsys_3v3>;
- buck1-in-supply = <&vsys_3v3>;
- ldo0-in-supply = <&vsys_3v3>;
- ldo1-in-supply = <&vsys_3v3>;
-
- regulators {
- lp8733_buck0: buck0 {
- regulator-name = "lp8733-buck0";
- regulator-min-microvolt = <800000>;
- regulator-max-microvolt = <1400000>;
- regulator-min-microamp = <1500000>;
- regulator-max-microamp = <4000000>;
- regulator-ramp-delay = <10000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- lp8733_buck1: buck1 {
- regulator-name = "lp8733-buck1";
- regulator-min-microvolt = <800000>;
- regulator-max-microvolt = <1400000>;
- regulator-min-microamp = <1500000>;
- regulator-max-microamp = <4000000>;
- regulator-ramp-delay = <10000>;
- regulator-boot-on;
- regulator-always-on;
- };
-
- lp8733_ldo0: ldo0 {
- regulator-name = "lp8733-ldo0";
- regulator-min-microvolt = <800000>;
- regulator-max-microvolt = <3000000>;
- regulator-boot-on;
- regulator-always-on;
- };
-
- lp8733_ldo1: ldo1 {
- regulator-name = "lp8733-ldo1";
- regulator-min-microvolt = <800000>;
- regulator-max-microvolt = <3000000>;
- regulator-always-on;
- regulator-boot-on;
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/mfd/max77650.yaml b/Documentation/devicetree/bindings/mfd/max77650.yaml
index 4181174fcf58..d93d84171a31 100644
--- a/Documentation/devicetree/bindings/mfd/max77650.yaml
+++ b/Documentation/devicetree/bindings/mfd/max77650.yaml
@@ -53,16 +53,16 @@ properties:
Single string containing the name of the GPIO line.
regulators:
- $ref: ../regulator/max77650-regulator.yaml
+ $ref: /schemas/regulator/max77650-regulator.yaml
charger:
- $ref: ../power/supply/max77650-charger.yaml
+ $ref: /schemas/power/supply/max77650-charger.yaml
leds:
- $ref: ../leds/leds-max77650.yaml
+ $ref: /schemas/leds/leds-max77650.yaml
onkey:
- $ref: ../input/max77650-onkey.yaml
+ $ref: /schemas/input/max77650-onkey.yaml
required:
- compatible
diff --git a/Documentation/devicetree/bindings/mfd/maxim,max77686.yaml b/Documentation/devicetree/bindings/mfd/maxim,max77686.yaml
index d027aabe453b..c13d51e462ba 100644
--- a/Documentation/devicetree/bindings/mfd/maxim,max77686.yaml
+++ b/Documentation/devicetree/bindings/mfd/maxim,max77686.yaml
@@ -35,7 +35,7 @@ properties:
maxItems: 1
voltage-regulators:
- $ref: ../regulator/maxim,max77686.yaml
+ $ref: /schemas/regulator/maxim,max77686.yaml
description:
List of child nodes that specify the regulators.
diff --git a/Documentation/devicetree/bindings/mfd/maxim,max77693.yaml b/Documentation/devicetree/bindings/mfd/maxim,max77693.yaml
index 6a6f222b868f..cce273ba4034 100644
--- a/Documentation/devicetree/bindings/mfd/maxim,max77693.yaml
+++ b/Documentation/devicetree/bindings/mfd/maxim,max77693.yaml
@@ -81,7 +81,7 @@ properties:
- pwms
regulators:
- $ref: ../regulator/maxim,max77693.yaml
+ $ref: /schemas/regulator/maxim,max77693.yaml
description:
List of child nodes that specify the regulators.
diff --git a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml
index 8103fb61a16c..b7f01cbb8fff 100644
--- a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml
+++ b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml
@@ -160,6 +160,10 @@ patternProperties:
type: object
$ref: /schemas/nvmem/qcom,spmi-sdam.yaml#
+ "^pbs@[0-9a-f]+$":
+ type: object
+ $ref: /schemas/soc/qcom/qcom,pbs.yaml#
+
"phy@[0-9a-f]+$":
type: object
$ref: /schemas/phy/qcom,snps-eusb2-repeater.yaml#
diff --git a/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml b/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml
index b97d77015335..c6bd14ec5aa0 100644
--- a/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml
+++ b/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml
@@ -28,6 +28,7 @@ properties:
- qcom,sdm845-tcsr
- qcom,sdx55-tcsr
- qcom,sdx65-tcsr
+ - qcom,sdx75-tcsr
- qcom,sm4450-tcsr
- qcom,sm6115-tcsr
- qcom,sm8150-tcsr
diff --git a/Documentation/devicetree/bindings/mfd/qcom-pm8xxx.yaml b/Documentation/devicetree/bindings/mfd/qcom-pm8xxx.yaml
index 7fe3875a5996..63e18d6a9c21 100644
--- a/Documentation/devicetree/bindings/mfd/qcom-pm8xxx.yaml
+++ b/Documentation/devicetree/bindings/mfd/qcom-pm8xxx.yaml
@@ -19,6 +19,7 @@ properties:
- enum:
- qcom,pm8058
- qcom,pm8821
+ - qcom,pm8901
- qcom,pm8921
- items:
- enum:
diff --git a/Documentation/devicetree/bindings/mfd/richtek,rt4831.yaml b/Documentation/devicetree/bindings/mfd/richtek,rt4831.yaml
index 4762eb1439ce..e3ccba177b21 100644
--- a/Documentation/devicetree/bindings/mfd/richtek,rt4831.yaml
+++ b/Documentation/devicetree/bindings/mfd/richtek,rt4831.yaml
@@ -37,10 +37,10 @@ properties:
maxItems: 1
regulators:
- $ref: ../regulator/richtek,rt4831-regulator.yaml
+ $ref: /schemas/regulator/richtek,rt4831-regulator.yaml
backlight:
- $ref: ../leds/backlight/richtek,rt4831-backlight.yaml
+ $ref: /schemas/leds/backlight/richtek,rt4831-backlight.yaml
required:
- compatible
diff --git a/Documentation/devicetree/bindings/mfd/ricoh,rn5t618.yaml b/Documentation/devicetree/bindings/mfd/ricoh,rn5t618.yaml
index 032a7fb0b4a7..e3d64307b531 100644
--- a/Documentation/devicetree/bindings/mfd/ricoh,rn5t618.yaml
+++ b/Documentation/devicetree/bindings/mfd/ricoh,rn5t618.yaml
@@ -28,7 +28,7 @@ allOf:
regulators:
patternProperties:
"^(DCDC[1-4]|LDO[1-5]|LDORTC[12])$":
- $ref: ../regulator/regulator.yaml
+ $ref: /schemas/regulator/regulator.yaml
additionalProperties: false
- if:
properties:
@@ -40,7 +40,7 @@ allOf:
regulators:
patternProperties:
"^(DCDC[1-3]|LDO[1-5]|LDORTC[12])$":
- $ref: ../regulator/regulator.yaml
+ $ref: /schemas/regulator/regulator.yaml
additionalProperties: false
- if:
properties:
@@ -52,7 +52,7 @@ allOf:
regulators:
patternProperties:
"^(DCDC[1-5]|LDO[1-9]|LDO10|LDORTC[12])$":
- $ref: ../regulator/regulator.yaml
+ $ref: /schemas/regulator/regulator.yaml
additionalProperties: false
properties:
diff --git a/Documentation/devicetree/bindings/mfd/rockchip,rk805.yaml b/Documentation/devicetree/bindings/mfd/rockchip,rk805.yaml
index 44f8188360dd..da2391530c16 100644
--- a/Documentation/devicetree/bindings/mfd/rockchip,rk805.yaml
+++ b/Documentation/devicetree/bindings/mfd/rockchip,rk805.yaml
@@ -82,7 +82,7 @@ properties:
patternProperties:
"^(DCDC_REG[1-4]|LDO_REG[1-3])$":
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/mfd/rockchip,rk808.yaml b/Documentation/devicetree/bindings/mfd/rockchip,rk808.yaml
index d2ac6fbd5ce6..50dfffac8fbf 100644
--- a/Documentation/devicetree/bindings/mfd/rockchip,rk808.yaml
+++ b/Documentation/devicetree/bindings/mfd/rockchip,rk808.yaml
@@ -109,7 +109,7 @@ properties:
patternProperties:
"^(DCDC_REG[1-4]|LDO_REG[1-8]|SWITCH_REG[1-2])$":
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/mfd/rockchip,rk816.yaml b/Documentation/devicetree/bindings/mfd/rockchip,rk816.yaml
new file mode 100644
index 000000000000..0676890f101e
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/rockchip,rk816.yaml
@@ -0,0 +1,274 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/rockchip,rk816.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: RK816 Power Management Integrated Circuit
+
+maintainers:
+ - Chris Zhong <zyw@rock-chips.com>
+ - Zhang Qing <zhangqing@rock-chips.com>
+
+description:
+ Rockchip RK816 series PMIC. This device consists of an i2c controlled MFD
+ that includes regulators, a RTC, a GPIO controller, a power button, and a
+ battery charger manager with fuel gauge.
+
+properties:
+ compatible:
+ enum:
+ - rockchip,rk816
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ '#clock-cells':
+ description:
+ See <dt-bindings/clock/rockchip,rk808.h> for clock IDs.
+ const: 1
+
+ clock-output-names:
+ maxItems: 2
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ const: 2
+
+ system-power-controller:
+ type: boolean
+ description:
+ Telling whether or not this PMIC is controlling the system power.
+
+ wakeup-source:
+ type: boolean
+
+ vcc1-supply:
+ description:
+ The input supply for dcdc1.
+
+ vcc2-supply:
+ description:
+ The input supply for dcdc2.
+
+ vcc3-supply:
+ description:
+ The input supply for dcdc3.
+
+ vcc4-supply:
+ description:
+ The input supply for dcdc4.
+
+ vcc5-supply:
+ description:
+ The input supply for ldo1, ldo2, and ldo3.
+
+ vcc6-supply:
+ description:
+ The input supply for ldo4, ldo5, and ldo6.
+
+ vcc7-supply:
+ description:
+ The input supply for boost.
+
+ vcc8-supply:
+ description:
+ The input supply for otg-switch.
+
+ regulators:
+ type: object
+ patternProperties:
+ '^(boost|dcdc[1-4]|ldo[1-6]|otg-switch)$':
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ unevaluatedProperties: false
+ additionalProperties: false
+
+patternProperties:
+ '-pins$':
+ type: object
+ additionalProperties: false
+ $ref: /schemas/pinctrl/pinmux-node.yaml
+
+ properties:
+ function:
+ enum: [gpio, thermistor]
+
+ pins:
+ $ref: /schemas/types.yaml#/definitions/string
+ const: gpio0
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/pinctrl/rockchip.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rk816: pmic@1a {
+ compatible = "rockchip,rk816";
+ reg = <0x1a>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA2 IRQ_TYPE_LEVEL_LOW>;
+ clock-output-names = "xin32k", "rk816-clkout2";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int_l>;
+ gpio-controller;
+ system-power-controller;
+ wakeup-source;
+ #clock-cells = <1>;
+ #gpio-cells = <2>;
+
+ vcc1-supply = <&vcc_sys>;
+ vcc2-supply = <&vcc_sys>;
+ vcc3-supply = <&vcc_sys>;
+ vcc4-supply = <&vcc_sys>;
+ vcc5-supply = <&vcc33_io>;
+ vcc6-supply = <&vcc_sys>;
+
+ regulators {
+ vdd_cpu: dcdc1 {
+ regulator-name = "vdd_cpu";
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1450000>;
+ regulator-ramp-delay = <6001>;
+ regulator-initial-mode = <1>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_logic: dcdc2 {
+ regulator-name = "vdd_logic";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-ramp-delay = <6001>;
+ regulator-initial-mode = <1>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vcc_ddr: dcdc3 {
+ regulator-name = "vcc_ddr";
+ regulator-initial-mode = <1>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc33_io: dcdc4 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc33_io";
+ regulator-initial-mode = <1>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vccio_pmu: ldo1 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vccio_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_tp: ldo2 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_tp";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_10: ldo3 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-name = "vdd_10";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vcc18_lcd: ldo4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc18_lcd";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vccio_sd: ldo5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vccio_sd";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vdd10_lcd: ldo6 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-name = "vdd10_lcd";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+ };
+
+ rk816_gpio_pins: gpio-pins {
+ function = "gpio";
+ pins = "gpio0";
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/rockchip,rk817.yaml b/Documentation/devicetree/bindings/mfd/rockchip,rk817.yaml
index 92b1592e8942..8c2fd0fabb92 100644
--- a/Documentation/devicetree/bindings/mfd/rockchip,rk817.yaml
+++ b/Documentation/devicetree/bindings/mfd/rockchip,rk817.yaml
@@ -91,7 +91,7 @@ properties:
"^(LDO_REG[1-9]|DCDC_REG[1-4]|BOOST|OTG_SWITCH)$":
type: object
unevaluatedProperties: false
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
clocks:
diff --git a/Documentation/devicetree/bindings/mfd/rockchip,rk818.yaml b/Documentation/devicetree/bindings/mfd/rockchip,rk818.yaml
index fd4b9de364aa..90d944c27ba1 100644
--- a/Documentation/devicetree/bindings/mfd/rockchip,rk818.yaml
+++ b/Documentation/devicetree/bindings/mfd/rockchip,rk818.yaml
@@ -101,7 +101,7 @@ properties:
patternProperties:
"^(DCDC_REG[1-4]|DCDC_BOOST|LDO_REG[1-9]|SWITCH_REG|HDMI_SWITCH|OTG_SWITCH)$":
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml b/Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml
index 05747e012516..bb81307dc11b 100644
--- a/Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml
+++ b/Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml
@@ -61,7 +61,7 @@ properties:
default: 30000000
regulators:
- $ref: ../regulator/rohm,bd71815-regulator.yaml
+ $ref: /schemas/regulator/rohm,bd71815-regulator.yaml
description:
List of child nodes that specify the regulators.
diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd71828-pmic.yaml b/Documentation/devicetree/bindings/mfd/rohm,bd71828-pmic.yaml
index 11089aa89ec6..fa17686a64f7 100644
--- a/Documentation/devicetree/bindings/mfd/rohm,bd71828-pmic.yaml
+++ b/Documentation/devicetree/bindings/mfd/rohm,bd71828-pmic.yaml
@@ -17,7 +17,12 @@ description: |
properties:
compatible:
- const: rohm,bd71828
+ oneOf:
+ - const: rohm,bd71828
+
+ - items:
+ - const: rohm,bd71879
+ - const: rohm,bd71828
reg:
description:
@@ -60,12 +65,12 @@ properties:
here in Ohms.
regulators:
- $ref: ../regulator/rohm,bd71828-regulator.yaml
+ $ref: /schemas/regulator/rohm,bd71828-regulator.yaml
description:
List of child nodes that specify the regulators.
leds:
- $ref: ../leds/rohm,bd71828-leds.yaml
+ $ref: /schemas/leds/rohm,bd71828-leds.yaml
gpio-reserved-ranges:
description: |
@@ -73,6 +78,8 @@ properties:
used to mark the pins which should not be configured for GPIO. Please see
the ../gpio/gpio.txt for more information.
+ system-power-controller: true
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.yaml b/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.yaml
index 7aa343f58cb6..08f958dc700d 100644
--- a/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.yaml
+++ b/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.yaml
@@ -109,7 +109,7 @@ properties:
- 14000
regulators:
- $ref: ../regulator/rohm,bd71837-regulator.yaml
+ $ref: /schemas/regulator/rohm,bd71837-regulator.yaml
description:
List of child nodes that specify the regulators.
diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd9571mwv.yaml b/Documentation/devicetree/bindings/mfd/rohm,bd9571mwv.yaml
index 89f9efee465b..534cf03f36bb 100644
--- a/Documentation/devicetree/bindings/mfd/rohm,bd9571mwv.yaml
+++ b/Documentation/devicetree/bindings/mfd/rohm,bd9571mwv.yaml
@@ -67,7 +67,7 @@ properties:
patternProperties:
"^(vd09|vd18|vd25|vd33|dvfs)$":
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
properties:
regulator-name:
diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd9576-pmic.yaml b/Documentation/devicetree/bindings/mfd/rohm,bd9576-pmic.yaml
index b7b323b1a4f2..70fd9b5e4c3f 100644
--- a/Documentation/devicetree/bindings/mfd/rohm,bd9576-pmic.yaml
+++ b/Documentation/devicetree/bindings/mfd/rohm,bd9576-pmic.yaml
@@ -71,7 +71,7 @@ properties:
# (HW) minimum for max timeout is 4ms, maximum 4416 ms.
regulators:
- $ref: ../regulator/rohm,bd9576-regulator.yaml
+ $ref: /schemas/regulator/rohm,bd9576-regulator.yaml
description:
List of child nodes that specify the regulators.
diff --git a/Documentation/devicetree/bindings/mfd/samsung,s2mpa01.yaml b/Documentation/devicetree/bindings/mfd/samsung,s2mpa01.yaml
index 055dfc337c2f..ad92eb6fcd3a 100644
--- a/Documentation/devicetree/bindings/mfd/samsung,s2mpa01.yaml
+++ b/Documentation/devicetree/bindings/mfd/samsung,s2mpa01.yaml
@@ -27,7 +27,7 @@ properties:
maxItems: 1
regulators:
- $ref: ../regulator/samsung,s2mpa01.yaml
+ $ref: /schemas/regulator/samsung,s2mpa01.yaml
description:
List of child nodes that specify the regulators.
diff --git a/Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml b/Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml
index 5ff6546c72b7..bc8b5940b1c5 100644
--- a/Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml
+++ b/Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml
@@ -27,7 +27,7 @@ properties:
- samsung,s2mpu02-pmic
clocks:
- $ref: ../clock/samsung,s2mps11.yaml
+ $ref: /schemas/clock/samsung,s2mps11.yaml
description:
Child node describing clock provider.
@@ -75,7 +75,7 @@ allOf:
then:
properties:
regulators:
- $ref: ../regulator/samsung,s2mps11.yaml
+ $ref: /schemas/regulator/samsung,s2mps11.yaml
samsung,s2mps11-wrstbi-ground: false
- if:
@@ -86,7 +86,7 @@ allOf:
then:
properties:
regulators:
- $ref: ../regulator/samsung,s2mps13.yaml
+ $ref: /schemas/regulator/samsung,s2mps13.yaml
samsung,s2mps11-acokb-ground: false
- if:
@@ -97,7 +97,7 @@ allOf:
then:
properties:
regulators:
- $ref: ../regulator/samsung,s2mps14.yaml
+ $ref: /schemas/regulator/samsung,s2mps14.yaml
samsung,s2mps11-acokb-ground: false
samsung,s2mps11-wrstbi-ground: false
@@ -109,7 +109,7 @@ allOf:
then:
properties:
regulators:
- $ref: ../regulator/samsung,s2mps15.yaml
+ $ref: /schemas/regulator/samsung,s2mps15.yaml
samsung,s2mps11-acokb-ground: false
samsung,s2mps11-wrstbi-ground: false
@@ -121,7 +121,7 @@ allOf:
then:
properties:
regulators:
- $ref: ../regulator/samsung,s2mpu02.yaml
+ $ref: /schemas/regulator/samsung,s2mpu02.yaml
samsung,s2mps11-acokb-ground: false
samsung,s2mps11-wrstbi-ground: false
diff --git a/Documentation/devicetree/bindings/mfd/samsung,s5m8767.yaml b/Documentation/devicetree/bindings/mfd/samsung,s5m8767.yaml
index aea0b7d57d04..249248078c59 100644
--- a/Documentation/devicetree/bindings/mfd/samsung,s5m8767.yaml
+++ b/Documentation/devicetree/bindings/mfd/samsung,s5m8767.yaml
@@ -21,7 +21,7 @@ properties:
const: samsung,s5m8767-pmic
clocks:
- $ref: ../clock/samsung,s2mps11.yaml
+ $ref: /schemas/clock/samsung,s2mps11.yaml
description:
Child node describing clock provider.
@@ -32,7 +32,7 @@ properties:
maxItems: 1
regulators:
- $ref: ../regulator/samsung,s5m8767.yaml
+ $ref: /schemas/regulator/samsung,s5m8767.yaml
description:
List of child nodes that specify the regulators.
diff --git a/Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml b/Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml
index 27329c5dc38e..d41308856408 100644
--- a/Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml
+++ b/Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml
@@ -44,6 +44,10 @@ properties:
wakeup-source: true
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
pwm:
type: object
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml b/Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml
index f84e09a5743b..b0e438ff4950 100644
--- a/Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml
+++ b/Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml
@@ -67,6 +67,10 @@ properties:
"#size-cells":
const: 0
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
pwm:
type: object
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/mfd/st,stmfx.yaml b/Documentation/devicetree/bindings/mfd/st,stmfx.yaml
index 76551c90b128..61daf36b3c80 100644
--- a/Documentation/devicetree/bindings/mfd/st,stmfx.yaml
+++ b/Documentation/devicetree/bindings/mfd/st,stmfx.yaml
@@ -60,7 +60,7 @@ properties:
additionalProperties: false
allOf:
- - $ref: ../pinctrl/pinmux-node.yaml
+ - $ref: /schemas/pinctrl/pinmux-node.yaml
properties:
pins: true
diff --git a/Documentation/devicetree/bindings/mfd/st,stpmic1.yaml b/Documentation/devicetree/bindings/mfd/st,stpmic1.yaml
index b17ebeb0a42f..e822817188fd 100644
--- a/Documentation/devicetree/bindings/mfd/st,stpmic1.yaml
+++ b/Documentation/devicetree/bindings/mfd/st,stpmic1.yaml
@@ -29,7 +29,7 @@ properties:
onkey:
type: object
- $ref: ../input/input.yaml
+ $ref: /schemas/input/input.yaml
properties:
compatible:
@@ -67,7 +67,7 @@ properties:
watchdog:
type: object
- $ref: ../watchdog/watchdog.yaml
+ $ref: /schemas/watchdog/watchdog.yaml
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/mfd/stericsson,ab8500.yaml b/Documentation/devicetree/bindings/mfd/stericsson,ab8500.yaml
index 94f9767a927d..b2cfa4120b8a 100644
--- a/Documentation/devicetree/bindings/mfd/stericsson,ab8500.yaml
+++ b/Documentation/devicetree/bindings/mfd/stericsson,ab8500.yaml
@@ -126,7 +126,7 @@ properties:
patternProperties:
"^channel@[0-9a-f]+$":
type: object
- $ref: ../iio/adc/adc.yaml#
+ $ref: /schemas/iio/adc/adc.yaml#
description: Represents each of the external channels which are
connected to the ADC.
@@ -180,22 +180,22 @@ properties:
ab8500_fg:
description: Node describing the AB8500 fuel gauge control block.
type: object
- $ref: ../power/supply/stericsson,ab8500-fg.yaml
+ $ref: /schemas/power/supply/stericsson,ab8500-fg.yaml
ab8500_btemp:
description: Node describing the AB8500 battery temperature control block.
type: object
- $ref: ../power/supply/stericsson,ab8500-btemp.yaml
+ $ref: /schemas/power/supply/stericsson,ab8500-btemp.yaml
ab8500_charger:
description: Node describing the AB8500 battery charger control block.
type: object
- $ref: ../power/supply/stericsson,ab8500-charger.yaml
+ $ref: /schemas/power/supply/stericsson,ab8500-charger.yaml
ab8500_chargalg:
description: Node describing the AB8500 battery charger algorithm.
type: object
- $ref: ../power/supply/stericsson,ab8500-chargalg.yaml
+ $ref: /schemas/power/supply/stericsson,ab8500-chargalg.yaml
phy:
description: Node describing the AB8500 USB PHY control block.
@@ -339,40 +339,40 @@ properties:
ab8500_ldo_aux1:
description: The voltage for the auxiliary LDO regulator 1
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
ab8500_ldo_aux2:
description: The voltage for the auxiliary LDO regulator 2
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
ab8500_ldo_aux3:
description: The voltage for the auxiliary LDO regulator 3
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
ab8500_ldo_aux4:
description: The voltage for the auxiliary LDO regulator 4
only present on AB8505
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
ab8500_ldo_aux5:
description: The voltage for the auxiliary LDO regulator 5
only present on AB8505
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
ab8500_ldo_aux6:
description: The voltage for the auxiliary LDO regulator 6
only present on AB8505
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
# There is never any AUX7 regulator which is confusing
@@ -381,21 +381,21 @@ properties:
description: The voltage for the auxiliary LDO regulator 8
only present on AB8505
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
ab8500_ldo_intcore:
description: The LDO regulator for the internal core voltage
of the AB8500
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
ab8500_ldo_adc:
description: Analog power regulator for the analog to digital converter
ADC, only present on AB8505
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
ab8500_ldo_tvout:
@@ -404,39 +404,39 @@ properties:
the temperature of the NTC thermistor on the battery.
Only present on AB8500.
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
ab8500_ldo_audio:
description: The LDO regulator for the audio codec output
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
ab8500_ldo_anamic1:
description: The LDO regulator for the analog microphone 1
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
ab8500_ldo_anamic2:
description: The LDO regulator for the analog microphone 2
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
ab8500_ldo_dmic:
description: The LDO regulator for the digital microphone
only present on AB8500
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
ab8500_ldo_ana:
description: Analog power regulator for CSI and DSI interfaces,
Camera Serial Interface CSI and Display Serial Interface DSI.
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
required:
@@ -459,19 +459,19 @@ properties:
ab8500_ext1:
description: The voltage for the VSMPS1 external regulator
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
ab8500_ext2:
description: The voltage for the VSMPS2 external regulator
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
ab8500_ext3:
description: The voltage for the VSMPS3 external regulator
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
required:
@@ -482,7 +482,7 @@ properties:
patternProperties:
"^pwm@[1-9]+?$":
type: object
- $ref: ../pwm/pwm.yaml#
+ $ref: /schemas/pwm/pwm.yaml#
unevaluatedProperties: false
description: Represents each of the PWM blocks in the AB8500
diff --git a/Documentation/devicetree/bindings/mfd/stericsson,db8500-prcmu.yaml b/Documentation/devicetree/bindings/mfd/stericsson,db8500-prcmu.yaml
index cb2a42caabb5..d6c13779d44e 100644
--- a/Documentation/devicetree/bindings/mfd/stericsson,db8500-prcmu.yaml
+++ b/Documentation/devicetree/bindings/mfd/stericsson,db8500-prcmu.yaml
@@ -71,52 +71,52 @@ properties:
description: The voltage for the application processor, the
main voltage domain for the chip.
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
db8500_varm:
description: The voltage for the ARM Cortex-A9 CPU.
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
db8500_vmodem:
description: The voltage for the modem subsystem.
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
db8500_vpll:
description: The voltage for the phase locked loop clocks.
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
db8500_vsmps1:
description: Also known as VIO12, is a step-down voltage regulator
for 1.2V I/O. SMPS means System Management Power Source.
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
db8500_vsmps2:
description: Also known as VIO18, is a step-down voltage regulator
for 1.8V I/O. SMPS means System Management Power Source.
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
db8500_vsmps3:
description: This is a step-down voltage regulator
for 0.87 thru 1.875V I/O. SMPS means System Management Power Source.
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
db8500_vrf1:
description: RF transceiver voltage regulator.
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
db8500_sva_mmdsp:
@@ -124,21 +124,21 @@ properties:
voltage regulator. This is the voltage for the accelerator DSP
for video encoding and decoding.
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
db8500_sva_mmdsp_ret:
description: Smart Video Accelerator (SVA) multimedia DSP (MMDSP)
voltage regulator for retention mode.
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
db8500_sva_pipe:
description: Smart Video Accelerator (SVA) multimedia DSP (MMDSP)
voltage regulator for the data pipe.
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
db8500_sia_mmdsp:
@@ -146,21 +146,21 @@ properties:
voltage regulator. This is the voltage for the accelerator DSP
for image encoding and decoding.
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
db8500_sia_mmdsp_ret:
description: Smart Image Accelerator (SIA) multimedia DSP (MMDSP)
voltage regulator for retention mode.
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
db8500_sia_pipe:
description: Smart Image Accelerator (SIA) multimedia DSP (MMDSP)
voltage regulator for the data pipe.
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
db8500_sga:
@@ -168,7 +168,7 @@ properties:
This is in effect controlling the power to the MALI400 3D
accelerator block.
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
db8500_b2r2_mcde:
@@ -176,33 +176,33 @@ properties:
Display Engine (MCDE) voltage regulator. These are two graphics
blocks.
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
db8500_esram12:
description: Embedded Static RAM (ESRAM) 1 and 2 voltage regulator.
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
db8500_esram12_ret:
description: Embedded Static RAM (ESRAM) 1 and 2 voltage regulator for
retention mode.
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
db8500_esram34:
description: Embedded Static RAM (ESRAM) 3 and 4 voltage regulator.
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
db8500_esram34_ret:
description: Embedded Static RAM (ESRAM) 3 and 4 voltage regulator for
retention mode.
type: object
- $ref: ../regulator/regulator.yaml#
+ $ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
required:
diff --git a/Documentation/devicetree/bindings/mfd/syscon.yaml b/Documentation/devicetree/bindings/mfd/syscon.yaml
index 9d55bee155ce..7ed12a938baa 100644
--- a/Documentation/devicetree/bindings/mfd/syscon.yaml
+++ b/Documentation/devicetree/bindings/mfd/syscon.yaml
@@ -38,11 +38,20 @@ properties:
- allwinner,sun8i-h3-system-controller
- allwinner,sun8i-v3s-system-controller
- allwinner,sun50i-a64-system-controller
+ - altr,sdr-ctl
- amd,pensando-elba-syscon
+ - apm,xgene-csw
+ - apm,xgene-efuse
+ - apm,xgene-mcb
+ - apm,xgene-rb
+ - apm,xgene-scu
- brcm,cru-clkset
+ - brcm,sr-cdru
+ - brcm,sr-mhb
- freecom,fsg-cs2-system-controller
- fsl,imx93-aonmix-ns-syscfg
- fsl,imx93-wakeupmix-syscfg
+ - fsl,ls1088a-reset
- hisilicon,dsa-subctrl
- hisilicon,hi6220-sramctrl
- hisilicon,pcie-sas-subctrl
@@ -51,9 +60,15 @@ properties:
- intel,lgm-syscon
- loongson,ls1b-syscon
- loongson,ls1c-syscon
+ - marvell,armada-3700-cpu-misc
+ - marvell,armada-3700-nb-pm
+ - marvell,armada-3700-avs
- marvell,armada-3700-usb2-host-misc
+ - mediatek,mt2712-pctl-a-syscfg
+ - mediatek,mt6397-pctl-pmic-syscfg
- mediatek,mt8135-pctl-a-syscfg
- mediatek,mt8135-pctl-b-syscfg
+ - mediatek,mt8173-pctl-a-syscfg
- mediatek,mt8365-syscfg
- microchip,lan966x-cpu-syscon
- microchip,sparx5-cpu-syscon
@@ -73,6 +88,7 @@ properties:
- rockchip,rv1126-qos
- starfive,jh7100-sysmain
- ti,am62-usb-phy-ctrl
+ - ti,am62p-cpsw-mac-efuse
- ti,am654-dss-oldi-io-ctrl
- ti,am654-serdes-ctrl
- ti,j784s4-pcie-ctrl
diff --git a/Documentation/devicetree/bindings/mfd/ti,lp8732.yaml b/Documentation/devicetree/bindings/mfd/ti,lp8732.yaml
new file mode 100644
index 000000000000..9a90cee2b545
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/ti,lp8732.yaml
@@ -0,0 +1,112 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/ti,lp8732.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI LP873X Power Management Integrated Circuit
+
+maintainers:
+ - J Keerthy <j-keerthy@ti.com>
+
+description:
+ PMIC with two high-current buck converters and two linear regulators.
+
+properties:
+ compatible:
+ enum:
+ - ti,lp8732
+ - ti,lp8733
+
+ reg:
+ maxItems: 1
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ const: 2
+
+ regulators:
+ description:
+ List of child nodes that specify the regulator initialization data.
+ type: object
+ patternProperties:
+ "^buck[01]|ldo[01]$":
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ unevaluatedProperties: false
+ additionalProperties: false
+
+patternProperties:
+ '^(buck[01]|ldo[01])-in-supply$':
+ description: Phandle to parent supply of each regulator populated under regulators node.
+
+required:
+ - compatible
+ - reg
+ - regulators
+ - buck0-in-supply
+ - buck1-in-supply
+ - ldo0-in-supply
+ - ldo1-in-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmic: pmic@60 {
+ compatible = "ti,lp8733";
+ reg = <0x60>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ buck0-in-supply = <&vsys_3v3>;
+ buck1-in-supply = <&vsys_3v3>;
+ ldo0-in-supply = <&vsys_3v3>;
+ ldo1-in-supply = <&vsys_3v3>;
+
+ regulators {
+ buck0: buck0 {
+ regulator-name = "buck0";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-min-microamp = <1500000>;
+ regulator-max-microamp = <4000000>;
+ regulator-ramp-delay = <10000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ buck1: buck1 {
+ regulator-name = "buck1";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-min-microamp = <1500000>;
+ regulator-max-microamp = <4000000>;
+ regulator-ramp-delay = <10000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo0: ldo0 {
+ regulator-name = "ldo0";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo1: ldo1 {
+ regulator-name = "ldo1";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/ti,tps65086.yaml b/Documentation/devicetree/bindings/mfd/ti,tps65086.yaml
index bd36a07c1721..a8eed9065d96 100644
--- a/Documentation/devicetree/bindings/mfd/ti,tps65086.yaml
+++ b/Documentation/devicetree/bindings/mfd/ti,tps65086.yaml
@@ -49,7 +49,7 @@ properties:
patternProperties:
"^buck[1-6]$":
type: object
- $ref: ../regulator/regulator.yaml
+ $ref: /schemas/regulator/regulator.yaml
properties:
regulator-name: true
@@ -72,7 +72,7 @@ properties:
"^(ldoa[1-3]|swa1|swb[1-2]|vtt)$":
type: object
- $ref: ../regulator/regulator.yaml
+ $ref: /schemas/regulator/regulator.yaml
properties:
regulator-name: true
diff --git a/Documentation/devicetree/bindings/mfd/ti,tps6594.yaml b/Documentation/devicetree/bindings/mfd/ti,tps6594.yaml
index 9d43376bebed..6341b6070366 100644
--- a/Documentation/devicetree/bindings/mfd/ti,tps6594.yaml
+++ b/Documentation/devicetree/bindings/mfd/ti,tps6594.yaml
@@ -21,6 +21,7 @@ properties:
- ti,lp8764-q1
- ti,tps6593-q1
- ti,tps6594-q1
+ - ti,tps65224-q1
reg:
description: I2C slave address or SPI chip select number.
diff --git a/Documentation/devicetree/bindings/mfd/ti,twl.yaml b/Documentation/devicetree/bindings/mfd/ti,twl.yaml
index 52ed228fb1e7..c2357fecb56c 100644
--- a/Documentation/devicetree/bindings/mfd/ti,twl.yaml
+++ b/Documentation/devicetree/bindings/mfd/ti,twl.yaml
@@ -15,6 +15,67 @@ description: |
USB transceiver or Audio amplifier.
These chips are connected to an i2c bus.
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: ti,twl4030
+ then:
+ properties:
+ madc:
+ type: object
+ $ref: /schemas/iio/adc/ti,twl4030-madc.yaml
+ unevaluatedProperties: false
+
+ bci:
+ type: object
+ $ref: /schemas/power/supply/twl4030-charger.yaml
+ unevaluatedProperties: false
+
+ pwrbutton:
+ type: object
+ additionalProperties: false
+ properties:
+ compatible:
+ const: ti,twl4030-pwrbutton
+ interrupts:
+ items:
+ - items:
+ const: 8
+
+ watchdog:
+ type: object
+ additionalProperties: false
+ properties:
+ compatible:
+ const: ti,twl4030-wdt
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: ti,twl6030
+ then:
+ properties:
+ gpadc:
+ type: object
+ properties:
+ compatible:
+ const: ti,twl6030-gpadc
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: ti,twl6032
+ then:
+ properties:
+ gpadc:
+ type: object
+ properties:
+ compatible:
+ const: ti,twl6032-gpadc
+
properties:
compatible:
description:
@@ -42,7 +103,16 @@ properties:
"#clock-cells":
const: 1
-additionalProperties: false
+ rtc:
+ type: object
+ additionalProperties: false
+ properties:
+ compatible:
+ const: ti,twl4030-rtc
+ interrupts:
+ maxItems: 1
+
+unevaluatedProperties: false
required:
- compatible
diff --git a/Documentation/devicetree/bindings/mfd/x-powers,axp152.yaml b/Documentation/devicetree/bindings/mfd/x-powers,axp152.yaml
index 06f1779835a1..b8e8db0d58e9 100644
--- a/Documentation/devicetree/bindings/mfd/x-powers,axp152.yaml
+++ b/Documentation/devicetree/bindings/mfd/x-powers,axp152.yaml
@@ -83,6 +83,7 @@ allOf:
enum:
- x-powers,axp313a
- x-powers,axp15060
+ - x-powers,axp717
then:
properties:
@@ -99,6 +100,7 @@ properties:
- x-powers,axp221
- x-powers,axp223
- x-powers,axp313a
+ - x-powers,axp717
- x-powers,axp803
- x-powers,axp806
- x-powers,axp809
diff --git a/Documentation/devicetree/bindings/mmc/arm,pl18x.yaml b/Documentation/devicetree/bindings/mmc/arm,pl18x.yaml
index 940b12688167..8f62e2c7fa64 100644
--- a/Documentation/devicetree/bindings/mmc/arm,pl18x.yaml
+++ b/Documentation/devicetree/bindings/mmc/arm,pl18x.yaml
@@ -79,6 +79,10 @@ properties:
- const: rx
- const: tx
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
power-domains: true
resets:
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.yaml b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.yaml
index 82f7ee8702cb..b9b999570529 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.yaml
+++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.yaml
@@ -91,6 +91,9 @@ properties:
- enum:
- fsl,imxrt1170-usdhc
- const: fsl,imxrt1050-usdhc
+ - items:
+ - const: nxp,s32g3-usdhc
+ - const: nxp,s32g2-usdhc
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml b/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml
index 29f2400247eb..3d0e61e59856 100644
--- a/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml
+++ b/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml
@@ -12,16 +12,13 @@ maintainers:
properties:
compatible:
oneOf:
- - items:
- - const: renesas,sdhi-sh73a0 # R-Mobile APE6
- - items:
- - const: renesas,sdhi-r7s72100 # RZ/A1H
- - items:
- - const: renesas,sdhi-r7s9210 # SH-Mobile AG5
- - items:
- - const: renesas,sdhi-r8a73a4 # R-Mobile APE6
- - items:
- - const: renesas,sdhi-r8a7740 # R-Mobile A1
+ - enum:
+ - renesas,sdhi-mmc-r8a77470 # RZ/G1C
+ - renesas,sdhi-r7s72100 # RZ/A1H
+ - renesas,sdhi-r7s9210 # SH-Mobile AG5
+ - renesas,sdhi-r8a73a4 # R-Mobile APE6
+ - renesas,sdhi-r8a7740 # R-Mobile A1
+ - renesas,sdhi-sh73a0 # R-Mobile APE6
- items:
- enum:
- renesas,sdhi-r8a7778 # R-Car M1
@@ -41,8 +38,6 @@ properties:
- renesas,sdhi-r8a7794 # R-Car E2
- const: renesas,rcar-gen2-sdhi # R-Car Gen2 and RZ/G1
- items:
- - const: renesas,sdhi-mmc-r8a77470 # RZ/G1C (SDHI/MMC IP)
- - items:
- enum:
- renesas,sdhi-r8a774a1 # RZ/G2M
- renesas,sdhi-r8a774b1 # RZ/G2N
@@ -56,11 +51,6 @@ properties:
- renesas,sdhi-r8a77980 # R-Car V3H
- renesas,sdhi-r8a77990 # R-Car E3
- renesas,sdhi-r8a77995 # R-Car D3
- - renesas,sdhi-r9a07g043 # RZ/G2UL and RZ/Five
- - renesas,sdhi-r9a07g044 # RZ/G2{L,LC}
- - renesas,sdhi-r9a07g054 # RZ/V2L
- - renesas,sdhi-r9a08g045 # RZ/G3S
- - renesas,sdhi-r9a09g011 # RZ/V2M
- const: renesas,rcar-gen3-sdhi # R-Car Gen3 or RZ/G2
- items:
- enum:
@@ -69,6 +59,14 @@ properties:
- renesas,sdhi-r8a779g0 # R-Car V4H
- renesas,sdhi-r8a779h0 # R-Car V4M
- const: renesas,rcar-gen4-sdhi # R-Car Gen4
+ - items:
+ - enum:
+ - renesas,sdhi-r9a07g043 # RZ/G2UL and RZ/Five
+ - renesas,sdhi-r9a07g044 # RZ/G2{L,LC}
+ - renesas,sdhi-r9a07g054 # RZ/V2L
+ - renesas,sdhi-r9a08g045 # RZ/G3S
+ - renesas,sdhi-r9a09g011 # RZ/V2M
+ - const: renesas,rzg2l-sdhi
reg:
maxItems: 1
@@ -120,12 +118,7 @@ allOf:
properties:
compatible:
contains:
- enum:
- - renesas,sdhi-r9a07g043
- - renesas,sdhi-r9a07g044
- - renesas,sdhi-r9a07g054
- - renesas,sdhi-r9a08g045
- - renesas,sdhi-r9a09g011
+ const: renesas,rzg2l-sdhi
then:
properties:
clocks:
diff --git a/Documentation/devicetree/bindings/mtd/mtd.yaml b/Documentation/devicetree/bindings/mtd/mtd.yaml
index ee442ecb11cd..bbb56216a4e2 100644
--- a/Documentation/devicetree/bindings/mtd/mtd.yaml
+++ b/Documentation/devicetree/bindings/mtd/mtd.yaml
@@ -48,8 +48,8 @@ patternProperties:
type: object
allOf:
- - $ref: ../nvmem/nvmem.yaml#
- - $ref: ../nvmem/nvmem-deprecated-cells.yaml#
+ - $ref: /schemas/nvmem/nvmem.yaml#
+ - $ref: /schemas/nvmem/nvmem-deprecated-cells.yaml#
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/mtd/partitions/binman.yaml b/Documentation/devicetree/bindings/mtd/partitions/binman.yaml
new file mode 100644
index 000000000000..bb4b08546184
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/partitions/binman.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/partitions/binman.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Binman entries
+
+description: |
+ This corresponds to a binman 'entry'. It is a single partition which holds
+ data of a defined type.
+
+ Binman uses the type to indicate what data file / type to place in the
+ partition. There are quite a number of binman-specific entry types, such as
+ section, fill and files, to be added later.
+
+maintainers:
+ - Simon Glass <sjg@chromium.org>
+
+allOf:
+ - $ref: /schemas/mtd/partitions/partition.yaml#
+
+properties:
+ compatible:
+ enum:
+ - u-boot # u-boot.bin from U-Boot project
+ - tfa-bl31 # bl31.bin or bl31.elf from TF-A project
+
+required:
+ - compatible
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@100000 {
+ compatible = "u-boot";
+ reg = <0x100000 0xf00000>;
+ align-size = <0x1000>;
+ align-end = <0x10000>;
+ };
+
+ partition@200000 {
+ compatible = "tfa-bl31";
+ reg = <0x200000 0x100000>;
+ align = <0x4000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mtd/partitions/partition.yaml b/Documentation/devicetree/bindings/mtd/partitions/partition.yaml
index 1ebe9e2347ea..80d0452a2a33 100644
--- a/Documentation/devicetree/bindings/mtd/partitions/partition.yaml
+++ b/Documentation/devicetree/bindings/mtd/partitions/partition.yaml
@@ -57,6 +57,57 @@ properties:
user space from
type: boolean
+ align:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 2
+ maximum: 0x80000000
+ multipleOf: 2
+ description:
+ This sets the alignment of the entry in bytes.
+
+ The entry offset is adjusted so that the entry starts on an aligned
+ boundary within the containing section or image. For example ‘align =
+ <16>’ means that the entry will start on a 16-byte boundary. This may
+ mean that padding is added before the entry. The padding is part of
+ the containing section but is not included in the entry, meaning that
+ an empty space may be created before the entry starts. Alignment
+ must be a power of 2. If ‘align’ is not provided, no alignment is
+ performed.
+
+ align-size:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 2
+ maximum: 0x80000000
+ multipleOf: 2
+ description:
+ This sets the alignment of the entry size in bytes. It must be a power
+ of 2.
+
+ For example, to ensure that the size of an entry is a multiple of 64
+ bytes, set this to 64. While this does not affect the content of the
+ entry itself (the padding is performed only when its parent section is
+ assembled), the end result is that the entry ends with the padding
+ bytes, so may grow. If ‘align-size’ is not provided, no alignment is
+ performed.
+
+ align-end:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 2
+ maximum: 0x80000000
+ multipleOf: 2
+ description:
+ This sets the alignment (in bytes) of the end of an entry with respect
+ to the containing section. It must be a power of 2.
+
+ Some entries require that they end on an alignment boundary,
+ regardless of where they start. This does not move the start of the
+ entry, so the content of the entry will still start at the beginning.
+ But there may be padding at the end. While this does not affect the
+ content of the entry itself (the padding is performed only when its
+ parent section is assembled), the end result is that the entry ends
+ with the padding bytes, so may grow. If ‘align-end’ is not provided,
+ no alignment is performed.
+
if:
not:
required: [ reg ]
@@ -67,3 +118,24 @@ then:
# This is a generic file other binding inherit from and extend
additionalProperties: true
+
+examples:
+ - |
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@100000 {
+ compatible = "u-boot";
+ reg = <0x100000 0xf00000>;
+ align-size = <0x1000>;
+ align-end = <0x10000>;
+ };
+
+ partition@200000 {
+ compatible = "tfa-bl31";
+ reg = <0x200000 0x100000>;
+ align = <0x4000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mtd/samsung,s5pv210-onenand.yaml b/Documentation/devicetree/bindings/mtd/samsung,s5pv210-onenand.yaml
new file mode 100644
index 000000000000..e07941b69904
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/samsung,s5pv210-onenand.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/samsung,s5pv210-onenand.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung S5Pv210 SoC OneNAND Controller
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+
+properties:
+ compatible:
+ enum:
+ - samsung,s5pv210-onenand
+
+ reg:
+ items:
+ - description: Control registers
+ - description: OneNAND interface nCE[0]
+ - description: OneNAND interface nCE[1]
+
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: bus
+ - const: onenand
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+
+allOf:
+ - $ref: nand-controller.yaml
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/s5pv210.h>
+
+ nand-controller@b0600000 {
+ compatible = "samsung,s5pv210-onenand";
+ reg = <0xb0600000 0x2000>,
+ <0xb0000000 0x20000>,
+ <0xb0040000 0x20000>;
+ clocks = <&clocks CLK_NANDXL>, <&clocks DOUT_FLASH>;
+ clock-names = "bus", "onenand";
+ interrupt-parent = <&vic1>;
+ interrupts = <31>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ nand@0 {
+ reg = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/airoha,en8811h.yaml b/Documentation/devicetree/bindings/net/airoha,en8811h.yaml
new file mode 100644
index 000000000000..ecb5149ec6b0
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/airoha,en8811h.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/airoha,en8811h.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Airoha EN8811H PHY
+
+maintainers:
+ - Eric Woudstra <ericwouds@gmail.com>
+
+description:
+ The Airoha EN8811H PHY has the ability to reverse polarity
+ on the lines to and/or from the MAC. It is reversed by
+ the booleans in the devicetree node of the phy.
+
+allOf:
+ - $ref: ethernet-phy.yaml#
+
+properties:
+ compatible:
+ enum:
+ - ethernet-phy-id03a2.a411
+
+ reg:
+ maxItems: 1
+
+ airoha,pnswap-rx:
+ type: boolean
+ description:
+ Reverse rx polarity of the SERDES. This is the receiving
+ side of the lines from the MAC towards the EN881H.
+
+ airoha,pnswap-tx:
+ type: boolean
+ description:
+ Reverse tx polarity of SERDES. This is the transmitting
+ side of the lines from EN8811H towards the MAC.
+
+required:
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-phy@1 {
+ compatible = "ethernet-phy-id03a2.a411";
+ reg = <1>;
+ airoha,pnswap-rx;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/bluetooth/mediatek,mt7921s-bluetooth.yaml b/Documentation/devicetree/bindings/net/bluetooth/mediatek,mt7921s-bluetooth.yaml
new file mode 100644
index 000000000000..67ff7caad599
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/bluetooth/mediatek,mt7921s-bluetooth.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/bluetooth/mediatek,mt7921s-bluetooth.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek MT7921S Bluetooth
+
+maintainers:
+ - Sean Wang <sean.wang@mediatek.com>
+
+description:
+ MT7921S is an SDIO-attached dual-radio WiFi+Bluetooth Combo chip; each
+ function is its own SDIO function on a shared SDIO interface. The chip
+ has two dedicated reset lines, one for each function core.
+ This binding only covers the Bluetooth SDIO function, with one device
+ node describing only this SDIO function.
+
+allOf:
+ - $ref: bluetooth-controller.yaml#
+
+properties:
+ compatible:
+ enum:
+ - mediatek,mt7921s-bluetooth
+
+ reg:
+ const: 2
+
+ reset-gpios:
+ maxItems: 1
+ description:
+ An active-low reset line for the Bluetooth core; on typical M.2
+ key E modules this is the W_DISABLE2# pin.
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ mmc {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ bluetooth@2 {
+ compatible = "mediatek,mt7921s-bluetooth";
+ reg = <2>;
+ reset-gpios = <&pio 8 GPIO_ACTIVE_LOW>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml b/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml
index 528ef3572b62..055a3351880b 100644
--- a/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml
+++ b/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml
@@ -94,6 +94,10 @@ properties:
local-bd-address: true
+ qcom,local-bd-address-broken:
+ type: boolean
+ description:
+ boot firmware is incorrectly passing the address in big-endian order
required:
- compatible
diff --git a/Documentation/devicetree/bindings/net/broadcom-bluetooth.yaml b/Documentation/devicetree/bindings/net/broadcom-bluetooth.yaml
index cc70b00c6ce5..4a1bfc2b3584 100644
--- a/Documentation/devicetree/bindings/net/broadcom-bluetooth.yaml
+++ b/Documentation/devicetree/bindings/net/broadcom-bluetooth.yaml
@@ -14,20 +14,25 @@ description:
properties:
compatible:
- enum:
- - brcm,bcm20702a1
- - brcm,bcm4329-bt
- - brcm,bcm4330-bt
- - brcm,bcm4334-bt
- - brcm,bcm43430a0-bt
- - brcm,bcm43430a1-bt
- - brcm,bcm43438-bt
- - brcm,bcm4345c5
- - brcm,bcm43540-bt
- - brcm,bcm4335a0
- - brcm,bcm4349-bt
- - cypress,cyw4373a0-bt
- - infineon,cyw55572-bt
+ oneOf:
+ - items:
+ - enum:
+ - infineon,cyw43439-bt
+ - const: brcm,bcm4329-bt
+ - enum:
+ - brcm,bcm20702a1
+ - brcm,bcm4329-bt
+ - brcm,bcm4330-bt
+ - brcm,bcm4334-bt
+ - brcm,bcm43430a0-bt
+ - brcm,bcm43430a1-bt
+ - brcm,bcm43438-bt
+ - brcm,bcm4345c5
+ - brcm,bcm43540-bt
+ - brcm,bcm4335a0
+ - brcm,bcm4349-bt
+ - cypress,cyw4373a0-bt
+ - infineon,cyw55572-bt
shutdown-gpios:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/net/can/bosch,m_can.yaml b/Documentation/devicetree/bindings/net/can/bosch,m_can.yaml
index f9ffb963d6b1..c4887522e8fe 100644
--- a/Documentation/devicetree/bindings/net/can/bosch,m_can.yaml
+++ b/Documentation/devicetree/bindings/net/can/bosch,m_can.yaml
@@ -118,6 +118,10 @@ properties:
phys:
maxItems: 1
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/net/mediatek,net.yaml b/Documentation/devicetree/bindings/net/mediatek,net.yaml
index e74502a0afe8..3202dc7967c5 100644
--- a/Documentation/devicetree/bindings/net/mediatek,net.yaml
+++ b/Documentation/devicetree/bindings/net/mediatek,net.yaml
@@ -337,8 +337,8 @@ allOf:
minItems: 4
clocks:
- minItems: 34
- maxItems: 34
+ minItems: 24
+ maxItems: 24
clock-names:
items:
@@ -351,18 +351,6 @@ allOf:
- const: ethwarp_wocpu1
- const: ethwarp_wocpu0
- const: esw
- - const: netsys0
- - const: netsys1
- - const: sgmii_tx250m
- - const: sgmii_rx250m
- - const: sgmii2_tx250m
- - const: sgmii2_rx250m
- - const: top_usxgmii0_sel
- - const: top_usxgmii1_sel
- - const: top_sgm0_sel
- - const: top_sgm1_sel
- - const: top_xfi_phy0_xtal_sel
- - const: top_xfi_phy1_xtal_sel
- const: top_eth_gmii_sel
- const: top_eth_refck_50m_sel
- const: top_eth_sys_200m_sel
@@ -375,16 +363,10 @@ allOf:
- const: top_netsys_sync_250m_sel
- const: top_netsys_ppefb_250m_sel
- const: top_netsys_warp_sel
- - const: wocpu1
- - const: wocpu0
- const: xgp1
- const: xgp2
- const: xgp3
- mediatek,sgmiisys:
- minItems: 2
- maxItems: 2
-
patternProperties:
"^mac@[0-1]$":
type: object
diff --git a/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml b/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml
index 4c01cae7c93a..87bc4416eadf 100644
--- a/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml
+++ b/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml
@@ -66,6 +66,10 @@ properties:
Should be phandle/offset pair. The phandle to the syscon node which
encompases the GPR register, and the offset of the GPR register.
+ nvmem-cells: true
+
+ nvmem-cell-names: true
+
snps,rmii_refclk_ext:
$ref: /schemas/types.yaml#/definitions/flag
description:
diff --git a/Documentation/devicetree/bindings/net/pse-pd/microchip,pd692x0.yaml b/Documentation/devicetree/bindings/net/pse-pd/microchip,pd692x0.yaml
new file mode 100644
index 000000000000..828439398fdf
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/pse-pd/microchip,pd692x0.yaml
@@ -0,0 +1,169 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/pse-pd/microchip,pd692x0.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip PD692x0 Power Sourcing Equipment controller
+
+maintainers:
+ - Kory Maincent <kory.maincent@bootlin.com>
+
+allOf:
+ - $ref: pse-controller.yaml#
+
+properties:
+ compatible:
+ enum:
+ - microchip,pd69200
+ - microchip,pd69210
+ - microchip,pd69220
+
+ reg:
+ maxItems: 1
+
+ managers:
+ type: object
+ description:
+ List of the PD69208T4/PD69204T4/PD69208M PSE managers. Each manager
+ have 4 or 8 physical ports according to the chip version. No need to
+ specify the SPI chip select as it is automatically detected by the
+ PD692x0 PSE controller. The PSE managers have to be described from
+ the lowest chip select to the greatest one, which is the detection
+ behavior of the PD692x0 PSE controller. The PD692x0 support up to
+ 12 PSE managers which can expose up to 96 physical ports. All
+ physical ports available on a manager have to be described in the
+ incremental order even if they are not used.
+
+ properties:
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ required:
+ - "#address-cells"
+ - "#size-cells"
+
+ patternProperties:
+ "^manager@0[0-9a-b]$":
+ type: object
+ description:
+ PD69208T4/PD69204T4/PD69208M PSE manager exposing 4 or 8 physical
+ ports.
+
+ properties:
+ reg:
+ description:
+ Incremental index of the PSE manager starting from 0, ranging
+ from lowest to highest chip select, up to 11.
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ patternProperties:
+ '^port@[0-7]$':
+ type: object
+ required:
+ - reg
+ additionalProperties: false
+
+ required:
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+
+required:
+ - compatible
+ - reg
+ - pse-pis
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-pse@3c {
+ compatible = "microchip,pd69200";
+ reg = <0x3c>;
+
+ managers {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ manager@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phys0: port@0 {
+ reg = <0>;
+ };
+
+ phys1: port@1 {
+ reg = <1>;
+ };
+
+ phys2: port@2 {
+ reg = <2>;
+ };
+
+ phys3: port@3 {
+ reg = <3>;
+ };
+ };
+
+ manager@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phys4: port@0 {
+ reg = <0>;
+ };
+
+ phys5: port@1 {
+ reg = <1>;
+ };
+
+ phys6: port@2 {
+ reg = <2>;
+ };
+
+ phys7: port@3 {
+ reg = <3>;
+ };
+ };
+ };
+
+ pse-pis {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pse_pi0: pse-pi@0 {
+ reg = <0>;
+ #pse-cells = <0>;
+ pairset-names = "alternative-a", "alternative-b";
+ pairsets = <&phys0>, <&phys1>;
+ polarity-supported = "MDI", "S";
+ vpwr-supply = <&vpwr1>;
+ };
+ pse_pi1: pse-pi@1 {
+ reg = <1>;
+ #pse-cells = <0>;
+ pairset-names = "alternative-a";
+ pairsets = <&phys2>;
+ polarity-supported = "MDI";
+ vpwr-supply = <&vpwr2>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/pse-pd/pse-controller.yaml b/Documentation/devicetree/bindings/net/pse-pd/pse-controller.yaml
index 2d382faca0e6..a12cda8aa764 100644
--- a/Documentation/devicetree/bindings/net/pse-pd/pse-controller.yaml
+++ b/Documentation/devicetree/bindings/net/pse-pd/pse-controller.yaml
@@ -13,6 +13,7 @@ description: Binding for the Power Sourcing Equipment (PSE) as defined in the
maintainers:
- Oleksij Rempel <o.rempel@pengutronix.de>
+ - Kory Maincent <kory.maincent@bootlin.com>
properties:
$nodename:
@@ -22,11 +23,105 @@ properties:
description:
Used to uniquely identify a PSE instance within an IC. Will be
0 on PSE nodes with only a single output and at least 1 on nodes
- controlling several outputs.
+ controlling several outputs which are not described in the pse-pis
+ subnode. This property is deprecated, please use pse-pis instead.
enum: [0, 1]
-required:
- - "#pse-cells"
+ pse-pis:
+ type: object
+ description:
+ Overview of the PSE PIs provided by the controller.
+
+ properties:
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ required:
+ - "#address-cells"
+ - "#size-cells"
+
+ patternProperties:
+ "^pse-pi@[0-9a-f]+$":
+ type: object
+ description:
+ PSE PI for power delivery via pairsets, compliant with IEEE
+ 802.3-2022, Section 145.2.4. Each pairset comprises a positive and
+ a negative VPSE pair, adhering to the pinout configurations
+ detailed in the standard.
+ See Documentation/networking/pse-pd/pse-pi.rst for details.
+
+ properties:
+ reg:
+ description:
+ Address describing the PSE PI index.
+ maxItems: 1
+
+ "#pse-cells":
+ const: 0
+
+ pairset-names:
+ $ref: /schemas/types.yaml#/definitions/string-array
+ description:
+ Names of the pairsets as per IEEE 802.3-2022, Section 145.2.4.
+ Each name should correspond to a phandle in the 'pairset'
+ property pointing to the power supply for that pairset.
+ minItems: 1
+ maxItems: 2
+ items:
+ enum:
+ - alternative-a
+ - alternative-b
+
+ pairsets:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ List of phandles, each pointing to the power supply for the
+ corresponding pairset named in 'pairset-names'. This property
+ aligns with IEEE 802.3-2022, Section 33.2.3 and 145.2.4.
+ PSE Pinout Alternatives (as per IEEE 802.3-2022 Table 145\u20133)
+ |-----------|---------------|---------------|---------------|---------------|
+ | Conductor | Alternative A | Alternative A | Alternative B | Alternative B |
+ | | (MDI-X) | (MDI) | (X) | (S) |
+ |-----------|---------------|---------------|---------------|---------------|
+ | 1 | Negative VPSE | Positive VPSE | - | - |
+ | 2 | Negative VPSE | Positive VPSE | - | - |
+ | 3 | Positive VPSE | Negative VPSE | - | - |
+ | 4 | - | - | Negative VPSE | Positive VPSE |
+ | 5 | - | - | Negative VPSE | Positive VPSE |
+ | 6 | Positive VPSE | Negative VPSE | - | - |
+ | 7 | - | - | Positive VPSE | Negative VPSE |
+ | 8 | - | - | Positive VPSE | Negative VPSE |
+ minItems: 1
+ maxItems: 2
+
+ polarity-supported:
+ $ref: /schemas/types.yaml#/definitions/string-array
+ description:
+ Polarity configuration supported by the PSE PI pairsets.
+ minItems: 1
+ maxItems: 4
+ items:
+ enum:
+ - MDI-X
+ - MDI
+ - X
+ - S
+
+ vpwr-supply:
+ description: Regulator power supply for the PSE PI.
+
+ required:
+ - reg
+ - "#pse-cells"
+
+oneOf:
+ - required:
+ - "#pse-cells"
+ - required:
+ - pse-pis
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml b/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml
new file mode 100644
index 000000000000..4147adb11e10
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml
@@ -0,0 +1,95 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/pse-pd/ti,tps23881.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI TPS23881 Power Sourcing Equipment controller
+
+maintainers:
+ - Kory Maincent <kory.maincent@bootlin.com>
+
+allOf:
+ - $ref: pse-controller.yaml#
+
+properties:
+ compatible:
+ enum:
+ - ti,tps23881
+
+ reg:
+ maxItems: 1
+
+ '#pse-cells':
+ const: 1
+
+ channels:
+ description: each set of 8 ports can be assigned to one physical
+ channels or two for PoE4. This parameter describes the configuration
+ of the ports conversion matrix that establishes relationship between
+ the logical ports and the physical channels.
+ type: object
+
+ patternProperties:
+ '^channel@[0-7]$':
+ type: object
+ required:
+ - reg
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-pse@20 {
+ compatible = "ti,tps23881";
+ reg = <0x20>;
+
+ channels {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phys0: channel@0 {
+ reg = <0>;
+ };
+
+ phys1: channel@1 {
+ reg = <1>;
+ };
+
+ phys2: channel@2 {
+ reg = <2>;
+ };
+ };
+
+ pse-pis {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pse_pi0: pse-pi@0 {
+ reg = <0>;
+ #pse-cells = <0>;
+ pairset-names = "alternative-a", "alternative-b";
+ pairsets = <&phys0>, <&phys1>;
+ polarity-supported = "MDI", "S";
+ vpwr-supply = <&vpwr1>;
+ };
+
+ pse_pi1: pse-pi@1 {
+ reg = <1>;
+ #pse-cells = <0>;
+ pairset-names = "alternative-a";
+ pairsets = <&phys2>;
+ polarity-supported = "MDI";
+ vpwr-supply = <&vpwr2>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/qcom,ethqos.yaml b/Documentation/devicetree/bindings/net/qcom,ethqos.yaml
index 69a337c7e345..6672327358bc 100644
--- a/Documentation/devicetree/bindings/net/qcom,ethqos.yaml
+++ b/Documentation/devicetree/bindings/net/qcom,ethqos.yaml
@@ -61,6 +61,8 @@ properties:
iommus:
maxItems: 1
+ dma-coherent: true
+
phys: true
phy-names:
diff --git a/Documentation/devicetree/bindings/net/qcom,ipq4019-mdio.yaml b/Documentation/devicetree/bindings/net/qcom,ipq4019-mdio.yaml
index 0029e197a825..a94480e819ac 100644
--- a/Documentation/devicetree/bindings/net/qcom,ipq4019-mdio.yaml
+++ b/Documentation/devicetree/bindings/net/qcom,ipq4019-mdio.yaml
@@ -20,6 +20,7 @@ properties:
- enum:
- qcom,ipq6018-mdio
- qcom,ipq8074-mdio
+ - qcom,ipq9574-mdio
- const: qcom,ipq4019-mdio
"#address-cells":
@@ -76,6 +77,7 @@ allOf:
- qcom,ipq5018-mdio
- qcom,ipq6018-mdio
- qcom,ipq8074-mdio
+ - qcom,ipq9574-mdio
then:
required:
- clocks
diff --git a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
index de7ba7f345a9..21a92f179093 100644
--- a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
+++ b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
@@ -88,10 +88,16 @@ properties:
'#address-cells':
description: Number of address cells for the MDIO bus.
const: 1
+ deprecated: true
'#size-cells':
description: Number of size cells on the MDIO bus.
const: 0
+ deprecated: true
+
+ mdio:
+ $ref: /schemas/net/mdio.yaml#
+ unevaluatedProperties: false
renesas,no-ether-link:
type: boolean
@@ -110,9 +116,13 @@ properties:
tx-internal-delay-ps:
enum: [0, 2000]
+# In older bindings there where no mdio child-node to describe the MDIO bus
+# and the PHY. To not fail older bindings accept any node with an address. New
+# users should describe the PHY inside the mdio child-node.
patternProperties:
"@[0-9a-f]$":
type: object
+ deprecated: true
required:
- compatible
@@ -123,8 +133,6 @@ required:
- resets
- phy-mode
- phy-handle
- - '#address-cells'
- - '#size-cells'
allOf:
- $ref: ethernet-controller.yaml#
diff --git a/Documentation/devicetree/bindings/net/renesas,ethertsn.yaml b/Documentation/devicetree/bindings/net/renesas,ethertsn.yaml
index ea35d19be829..b4680a1d0a06 100644
--- a/Documentation/devicetree/bindings/net/renesas,ethertsn.yaml
+++ b/Documentation/devicetree/bindings/net/renesas,ethertsn.yaml
@@ -71,16 +71,8 @@ properties:
enum: [0, 2000]
default: 0
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
-
-patternProperties:
- "^ethernet-phy@[0-9a-f]$":
- type: object
- $ref: ethernet-phy.yaml#
+ mdio:
+ $ref: /schemas/net/mdio.yaml#
unevaluatedProperties: false
required:
@@ -94,8 +86,7 @@ required:
- resets
- phy-mode
- phy-handle
- - '#address-cells'
- - '#size-cells'
+ - mdio
additionalProperties: false
@@ -122,14 +113,18 @@ examples:
tx-internal-delay-ps = <2000>;
phy-handle = <&phy3>;
- #address-cells = <1>;
- #size-cells = <0>;
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
- phy3: ethernet-phy@3 {
- compatible = "ethernet-phy-ieee802.3-c45";
- reg = <0>;
- interrupt-parent = <&gpio4>;
- interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
reset-gpios = <&gpio1 23 GPIO_ACTIVE_LOW>;
+ reset-post-delay-us = <4000>;
+
+ phy3: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ reg = <0>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ };
};
};
diff --git a/Documentation/devicetree/bindings/net/renesas,rzn1-gmac.yaml b/Documentation/devicetree/bindings/net/renesas,rzn1-gmac.yaml
new file mode 100644
index 000000000000..d9a8d586e260
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/renesas,rzn1-gmac.yaml
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/renesas,rzn1-gmac.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas GMAC
+
+maintainers:
+ - Romain Gantois <romain.gantois@bootlin.com>
+
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,r9a06g032-gmac
+ - renesas,rzn1-gmac
+ required:
+ - compatible
+
+allOf:
+ - $ref: snps,dwmac.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,r9a06g032-gmac
+ - const: renesas,rzn1-gmac
+ - const: snps,dwmac
+
+ pcs-handle:
+ description:
+ phandle pointing to a PCS sub-node compatible with
+ renesas,rzn1-miic.yaml#
+
+required:
+ - compatible
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r9a06g032-sysctrl.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ ethernet@44000000 {
+ compatible = "renesas,r9a06g032-gmac", "renesas,rzn1-gmac", "snps,dwmac";
+ reg = <0x44000000 0x2000>;
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
+ clock-names = "stmmaceth";
+ clocks = <&sysctrl R9A06G032_HCLK_GMAC0>;
+ power-domains = <&sysctrl>;
+ snps,multicast-filter-bins = <256>;
+ snps,perfect-filter-entries = <128>;
+ tx-fifo-depth = <2048>;
+ rx-fifo-depth = <4096>;
+ pcs-handle = <&mii_conv1>;
+ phy-mode = "mii";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml b/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml
index 70bbc4220e2a..6bbe96e35250 100644
--- a/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml
@@ -137,8 +137,6 @@ examples:
assigned-clock-parents = <&ext_gmac>;
rockchip,grf = <&grf>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
clock_in_out = "input";
- tx_delay = <0x30>;
- rx_delay = <0x10>;
};
diff --git a/Documentation/devicetree/bindings/net/sff,sfp.yaml b/Documentation/devicetree/bindings/net/sff,sfp.yaml
index bf6cbc7c2ba3..90611b598d2b 100644
--- a/Documentation/devicetree/bindings/net/sff,sfp.yaml
+++ b/Documentation/devicetree/bindings/net/sff,sfp.yaml
@@ -29,39 +29,39 @@ properties:
allowable by a module in the slot, in milli-Watts. Presently, modules can
be up to 1W, 1.5W or 2W.
- "mod-def0-gpios":
+ mod-def0-gpios:
maxItems: 1
description:
GPIO phandle and a specifier of the MOD-DEF0 (AKA Mod_ABS) module
presence input gpio signal, active (module absent) high. Must not be
present for SFF modules
- "los-gpios":
+ los-gpios:
maxItems: 1
description:
GPIO phandle and a specifier of the Receiver Loss of Signal Indication
input gpio signal, active (signal lost) high
- "tx-fault-gpios":
+ tx-fault-gpios:
maxItems: 1
description:
GPIO phandle and a specifier of the Module Transmitter Fault input gpio
signal, active (fault condition) high
- "tx-disable-gpios":
+ tx-disable-gpios:
maxItems: 1
description:
GPIO phandle and a specifier of the Transmitter Disable output gpio
signal, active (Tx disable) high
- "rate-select0-gpios":
+ rate-select0-gpios:
maxItems: 1
description:
GPIO phandle and a specifier of the Rx Signaling Rate Select (AKA RS0)
output gpio signal, low - low Rx rate, high - high Rx rate Must not be
present for SFF modules
- "rate-select1-gpios":
+ rate-select1-gpios:
maxItems: 1
description:
GPIO phandle and a specifier of the Tx Signaling Rate Select (AKA RS1)
diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
index 6b0341a8e0ea..21cc27e75f50 100644
--- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
@@ -242,7 +242,8 @@ properties:
type: boolean
description: Multicast & Broadcast Packets
snps,priority:
- $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ maxItems: 1
description: Bitmask of the tagged frames priorities assigned to the queue
allOf:
- if:
@@ -327,9 +328,6 @@ properties:
snps,tx-sched-dwrr:
type: boolean
description: Deficit Weighted Round Robin
- snps,tx-sched-sp:
- type: boolean
- description: Strict priority
allOf:
- if:
required:
@@ -338,7 +336,6 @@ properties:
properties:
snps,tx-sched-wfq: false
snps,tx-sched-dwrr: false
- snps,tx-sched-sp: false
- if:
required:
- snps,tx-sched-wfq
@@ -346,7 +343,6 @@ properties:
properties:
snps,tx-sched-wrr: false
snps,tx-sched-dwrr: false
- snps,tx-sched-sp: false
- if:
required:
- snps,tx-sched-dwrr
@@ -354,15 +350,6 @@ properties:
properties:
snps,tx-sched-wrr: false
snps,tx-sched-wfq: false
- snps,tx-sched-sp: false
- - if:
- required:
- - snps,tx-sched-sp
- then:
- properties:
- snps,tx-sched-wrr: false
- snps,tx-sched-wfq: false
- snps,tx-sched-dwrr: false
patternProperties:
"^queue[0-9]$":
description: Each subnode represents a queue.
@@ -393,7 +380,8 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32
description: max read outstanding req. limit
snps,priority:
- $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ maxItems: 1
description:
Bitmask of the tagged frames priorities assigned to the queue.
When a PFC frame is received with priorities matching the bitmask,
diff --git a/Documentation/devicetree/bindings/net/starfive,jh7110-dwmac.yaml b/Documentation/devicetree/bindings/net/starfive,jh7110-dwmac.yaml
index 0d1962980f57..313a15331661 100644
--- a/Documentation/devicetree/bindings/net/starfive,jh7110-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/starfive,jh7110-dwmac.yaml
@@ -30,6 +30,10 @@ properties:
- items:
- const: starfive,jh7110-dwmac
- const: snps,dwmac-5.20
+ - items:
+ - const: starfive,jh8100-dwmac
+ - const: starfive,jh7110-dwmac
+ - const: snps,dwmac-5.20
reg:
maxItems: 1
@@ -116,11 +120,25 @@ allOf:
minItems: 3
maxItems: 3
- resets:
- minItems: 2
-
- reset-names:
- minItems: 2
+ if:
+ properties:
+ compatible:
+ contains:
+ const: starfive,jh8100-dwmac
+ then:
+ properties:
+ resets:
+ maxItems: 1
+
+ reset-names:
+ const: stmmaceth
+ else:
+ properties:
+ resets:
+ minItems: 2
+
+ reset-names:
+ minItems: 2
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/net/stm32-dwmac.yaml b/Documentation/devicetree/bindings/net/stm32-dwmac.yaml
index fc8c96b08d7d..7ccf75676b6d 100644
--- a/Documentation/devicetree/bindings/net/stm32-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/stm32-dwmac.yaml
@@ -82,6 +82,13 @@ properties:
Should be phandle/offset pair. The phandle to the syscon node which
encompases the glue register, and the offset of the control register
+ st,ext-phyclk:
+ description:
+ set this property in RMII mode when you have PHY without crystal 50MHz and want to
+ select RCC clock instead of ETH_REF_CLK. OR in RGMII mode when you want to select
+ RCC clock instead of ETH_CLK125.
+ type: boolean
+
st,eth-clk-sel:
description:
set this property in RGMII PHY when you want to select RCC clock instead of ETH_CLK125.
@@ -93,6 +100,10 @@ properties:
select RCC clock instead of ETH_REF_CLK.
type: boolean
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
required:
- compatible
- clocks
diff --git a/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml b/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
index d5bd93ee4dbb..d14ca81f70e0 100644
--- a/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
+++ b/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
@@ -8,7 +8,6 @@ title: TI SoC Ethernet Switch Controller (CPSW)
maintainers:
- Siddharth Vadapalli <s-vadapalli@ti.com>
- - Ravi Gunasekaran <r-gunasekaran@ti.com>
- Roger Quadros <rogerq@kernel.org>
description:
diff --git a/Documentation/devicetree/bindings/net/ti,icssg-prueth.yaml b/Documentation/devicetree/bindings/net/ti,icssg-prueth.yaml
index 229c8f32019f..e253fa786092 100644
--- a/Documentation/devicetree/bindings/net/ti,icssg-prueth.yaml
+++ b/Documentation/devicetree/bindings/net/ti,icssg-prueth.yaml
@@ -13,14 +13,12 @@ description:
Ethernet based on the Programmable Real-Time Unit and Industrial
Communication Subsystem.
-allOf:
- - $ref: /schemas/remoteproc/ti,pru-consumer.yaml#
-
properties:
compatible:
enum:
- - ti,am642-icssg-prueth # for AM64x SoC family
- - ti,am654-icssg-prueth # for AM65x SoC family
+ - ti,am642-icssg-prueth # for AM64x SoC family
+ - ti,am654-icssg-prueth # for AM65x SoC family
+ - ti,am654-sr1-icssg-prueth # for AM65x SoC family, SR1.0
sram:
$ref: /schemas/types.yaml#/definitions/phandle
@@ -28,9 +26,11 @@ properties:
phandle to MSMC SRAM node
dmas:
- maxItems: 10
+ minItems: 10
+ maxItems: 12
dma-names:
+ minItems: 10
items:
- const: tx0-0
- const: tx0-1
@@ -42,6 +42,8 @@ properties:
- const: tx1-3
- const: rx0
- const: rx1
+ - const: rxmgm0
+ - const: rxmgm1
ti,mii-g-rt:
$ref: /schemas/types.yaml#/definitions/phandle
@@ -132,6 +134,27 @@ required:
- interrupts
- interrupt-names
+allOf:
+ - $ref: /schemas/remoteproc/ti,pru-consumer.yaml#
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: ti,am654-sr1-icssg-prueth
+ then:
+ properties:
+ dmas:
+ minItems: 12
+ dma-names:
+ minItems: 12
+ else:
+ properties:
+ dmas:
+ maxItems: 10
+ dma-names:
+ maxItems: 10
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml b/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
index 73ed5951d296..02b6d32003cc 100644
--- a/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
+++ b/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
@@ -8,7 +8,6 @@ title: The TI AM654x/J721E/AM642x SoC Gigabit Ethernet MAC (Media Access Control
maintainers:
- Siddharth Vadapalli <s-vadapalli@ti.com>
- - Ravi Gunasekaran <r-gunasekaran@ti.com>
- Roger Quadros <rogerq@kernel.org>
description:
diff --git a/Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml b/Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml
index b1c875325776..3888692275ad 100644
--- a/Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml
+++ b/Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml
@@ -8,7 +8,6 @@ title: The TI AM654x/J721E Common Platform Time Sync (CPTS) module
maintainers:
- Siddharth Vadapalli <s-vadapalli@ti.com>
- - Ravi Gunasekaran <r-gunasekaran@ti.com>
- Roger Quadros <rogerq@kernel.org>
description: |+
diff --git a/Documentation/devicetree/bindings/net/wireless/brcm,bcm4329-fmac.yaml b/Documentation/devicetree/bindings/net/wireless/brcm,bcm4329-fmac.yaml
index 4aa521f1be8c..e564f20d8f41 100644
--- a/Documentation/devicetree/bindings/net/wireless/brcm,bcm4329-fmac.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/brcm,bcm4329-fmac.yaml
@@ -44,6 +44,7 @@ properties:
- brcm,bcm4366-fmac
- cypress,cyw4373-fmac
- cypress,cyw43012-fmac
+ - infineon,cyw43439-fmac
- const: brcm,bcm4329-fmac
- enum:
- brcm,bcm4329-fmac
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml
index 9b3ef4bc3732..5c4498b762c8 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml
@@ -73,6 +73,12 @@ properties:
- sky85703-11
- sky85803
+ firmware-name:
+ maxItems: 1
+ description:
+ If present, a board or platform specific string used to lookup firmware
+ files for the device.
+
wifi-firmware:
type: object
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
index 672282cdfc2f..a2d55bf4c7a5 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
@@ -59,6 +59,8 @@ properties:
minItems: 1
maxItems: 2
+ ieee80211-freq-limit: true
+
wifi-firmware:
type: object
description: |
@@ -88,6 +90,7 @@ required:
additionalProperties: false
allOf:
+ - $ref: ieee80211.yaml#
- if:
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/opp/allwinner,sun50i-h6-operating-points.yaml b/Documentation/devicetree/bindings/opp/allwinner,sun50i-h6-operating-points.yaml
index 51f62c3ae194..ec5e424bb3c8 100644
--- a/Documentation/devicetree/bindings/opp/allwinner,sun50i-h6-operating-points.yaml
+++ b/Documentation/devicetree/bindings/opp/allwinner,sun50i-h6-operating-points.yaml
@@ -13,25 +13,25 @@ maintainers:
description: |
For some SoCs, the CPU frequency subset and voltage value of each
OPP varies based on the silicon variant in use. Allwinner Process
- Voltage Scaling Tables defines the voltage and frequency value based
- on the speedbin blown in the efuse combination. The
- sun50i-cpufreq-nvmem driver reads the efuse value from the SoC to
- provide the OPP framework with required information.
+ Voltage Scaling Tables define the voltage and frequency values based
+ on the speedbin blown in the efuse combination.
allOf:
- $ref: opp-v2-base.yaml#
properties:
compatible:
- const: allwinner,sun50i-h6-operating-points
+ enum:
+ - allwinner,sun50i-h6-operating-points
+ - allwinner,sun50i-h616-operating-points
nvmem-cells:
description: |
A phandle pointing to a nvmem-cells node representing the efuse
- registers that has information about the speedbin that is used
+ register that has information about the speedbin that is used
to select the right frequency/voltage value pair. Please refer
- the for nvmem-cells bindings
- Documentation/devicetree/bindings/nvmem/nvmem.txt and also
+ to the nvmem-cells bindings in
+ Documentation/devicetree/bindings/nvmem/nvmem.yaml and also the
examples below.
opp-shared: true
@@ -47,15 +47,18 @@ patternProperties:
properties:
opp-hz: true
clock-latency-ns: true
+ opp-microvolt: true
+ opp-supported-hw:
+ maxItems: 1
+ description:
+ A single 32 bit bitmap value, representing compatible HW, one
+ bit per speed bin index.
patternProperties:
"^opp-microvolt-speed[0-9]$": true
required:
- opp-hz
- - opp-microvolt-speed0
- - opp-microvolt-speed1
- - opp-microvolt-speed2
unevaluatedProperties: false
@@ -77,58 +80,54 @@ examples:
opp-microvolt-speed2 = <800000>;
};
- opp-720000000 {
+ opp-1080000000 {
clock-latency-ns = <244144>; /* 8 32k periods */
- opp-hz = /bits/ 64 <720000000>;
+ opp-hz = /bits/ 64 <1080000000>;
- opp-microvolt-speed0 = <880000>;
- opp-microvolt-speed1 = <820000>;
- opp-microvolt-speed2 = <800000>;
+ opp-microvolt-speed0 = <1060000>;
+ opp-microvolt-speed1 = <880000>;
+ opp-microvolt-speed2 = <840000>;
};
- opp-816000000 {
+ opp-1488000000 {
clock-latency-ns = <244144>; /* 8 32k periods */
- opp-hz = /bits/ 64 <816000000>;
+ opp-hz = /bits/ 64 <1488000000>;
- opp-microvolt-speed0 = <880000>;
- opp-microvolt-speed1 = <820000>;
- opp-microvolt-speed2 = <800000>;
+ opp-microvolt-speed0 = <1160000>;
+ opp-microvolt-speed1 = <1000000>;
+ opp-microvolt-speed2 = <960000>;
};
+ };
- opp-888000000 {
- clock-latency-ns = <244144>; /* 8 32k periods */
- opp-hz = /bits/ 64 <888000000>;
-
- opp-microvolt-speed0 = <940000>;
- opp-microvolt-speed1 = <820000>;
- opp-microvolt-speed2 = <800000>;
- };
+ - |
+ opp-table {
+ compatible = "allwinner,sun50i-h616-operating-points";
+ nvmem-cells = <&speedbin_efuse>;
+ opp-shared;
- opp-1080000000 {
+ opp-480000000 {
clock-latency-ns = <244144>; /* 8 32k periods */
- opp-hz = /bits/ 64 <1080000000>;
+ opp-hz = /bits/ 64 <480000000>;
- opp-microvolt-speed0 = <1060000>;
- opp-microvolt-speed1 = <880000>;
- opp-microvolt-speed2 = <840000>;
+ opp-microvolt = <900000>;
+ opp-supported-hw = <0x1f>;
};
- opp-1320000000 {
+ opp-792000000 {
clock-latency-ns = <244144>; /* 8 32k periods */
- opp-hz = /bits/ 64 <1320000000>;
+ opp-hz = /bits/ 64 <792000000>;
- opp-microvolt-speed0 = <1160000>;
- opp-microvolt-speed1 = <940000>;
- opp-microvolt-speed2 = <900000>;
+ opp-microvolt-speed1 = <900000>;
+ opp-microvolt-speed4 = <940000>;
+ opp-supported-hw = <0x12>;
};
- opp-1488000000 {
+ opp-1512000000 {
clock-latency-ns = <244144>; /* 8 32k periods */
- opp-hz = /bits/ 64 <1488000000>;
+ opp-hz = /bits/ 64 <1512000000>;
- opp-microvolt-speed0 = <1160000>;
- opp-microvolt-speed1 = <1000000>;
- opp-microvolt-speed2 = <960000>;
+ opp-microvolt = <1100000>;
+ opp-supported-hw = <0x0a>;
};
};
diff --git a/Documentation/devicetree/bindings/pci/amlogic,axg-pcie.yaml b/Documentation/devicetree/bindings/pci/amlogic,axg-pcie.yaml
index a5bd90bc0712..79a21ba0f9fd 100644
--- a/Documentation/devicetree/bindings/pci/amlogic,axg-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/amlogic,axg-pcie.yaml
@@ -13,7 +13,7 @@ description:
Amlogic Meson PCIe host controller is based on the Synopsys DesignWare PCI core.
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
- $ref: /schemas/pci/snps,dw-pcie-common.yaml#
# We need a select here so we don't match all nodes with 'snps,dw-pcie'
diff --git a/Documentation/devicetree/bindings/pci/apple,pcie.yaml b/Documentation/devicetree/bindings/pci/apple,pcie.yaml
index 215ff9a9c835..c8775f9cb071 100644
--- a/Documentation/devicetree/bindings/pci/apple,pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/apple,pcie.yaml
@@ -85,7 +85,7 @@ required:
unevaluatedProperties: false
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
- $ref: /schemas/interrupt-controller/msi-controller.yaml#
- if:
properties:
diff --git a/Documentation/devicetree/bindings/pci/brcm,iproc-pcie.yaml b/Documentation/devicetree/bindings/pci/brcm,iproc-pcie.yaml
index 0e07ab61a48d..5434c144d2ec 100644
--- a/Documentation/devicetree/bindings/pci/brcm,iproc-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/brcm,iproc-pcie.yaml
@@ -11,7 +11,7 @@ maintainers:
- Scott Branden <scott.branden@broadcom.com>
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml b/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
index 22491f7f8852..11f8ea33240c 100644
--- a/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
@@ -108,7 +108,7 @@ required:
- msi-controller
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
- $ref: /schemas/interrupt-controller/msi-controller.yaml#
- if:
properties:
diff --git a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.yaml b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.yaml
index bc3c48f60fff..a8190d9b100f 100644
--- a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.yaml
+++ b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.yaml
@@ -10,7 +10,6 @@ maintainers:
- Tom Joseph <tjoseph@cadence.com>
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
- $ref: cdns-pcie-host.yaml#
properties:
@@ -25,8 +24,6 @@ properties:
- const: reg
- const: cfg
- msi-parent: true
-
required:
- reg
- reg-names
diff --git a/Documentation/devicetree/bindings/pci/cdns-pcie-host.yaml b/Documentation/devicetree/bindings/pci/cdns-pcie-host.yaml
index a6b494401ebb..f4eb82e684bd 100644
--- a/Documentation/devicetree/bindings/pci/cdns-pcie-host.yaml
+++ b/Documentation/devicetree/bindings/pci/cdns-pcie-host.yaml
@@ -10,7 +10,7 @@ maintainers:
- Tom Joseph <tjoseph@cadence.com>
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
- $ref: cdns-pcie.yaml#
properties:
diff --git a/Documentation/devicetree/bindings/pci/faraday,ftpci100.yaml b/Documentation/devicetree/bindings/pci/faraday,ftpci100.yaml
index 92efbf0f1297..378dd1c8e2ee 100644
--- a/Documentation/devicetree/bindings/pci/faraday,ftpci100.yaml
+++ b/Documentation/devicetree/bindings/pci/faraday,ftpci100.yaml
@@ -51,7 +51,7 @@ description: |
<0x6000 0 0 4 &pci_intc 2>;
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/pci/fsl,layerscape-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/fsl,layerscape-pcie-ep.yaml
new file mode 100644
index 000000000000..399efa7364c9
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/fsl,layerscape-pcie-ep.yaml
@@ -0,0 +1,102 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/fsl,layerscape-pcie-ep.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale Layerscape PCIe Endpoint(EP) controller
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+description:
+ This PCIe EP controller is based on the Synopsys DesignWare PCIe IP.
+
+ This controller derives its clocks from the Reset Configuration Word (RCW)
+ which is used to describe the PLL settings at the time of chip-reset.
+
+ Also as per the available Reference Manuals, there is no specific 'version'
+ register available in the Freescale PCIe controller register set,
+ which can allow determining the underlying DesignWare PCIe controller version
+ information.
+
+properties:
+ compatible:
+ enum:
+ - fsl,ls2088a-pcie-ep
+ - fsl,ls1088a-pcie-ep
+ - fsl,ls1046a-pcie-ep
+ - fsl,ls1028a-pcie-ep
+ - fsl,lx2160ar2-pcie-ep
+
+ reg:
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: regs
+ - const: addr_space
+
+ fsl,pcie-scfg:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: A phandle to the SCFG device node. The second entry is the
+ physical PCIe controller index starting from '0'. This is used to get
+ SCFG PEXN registers.
+
+ big-endian:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: If the PEX_LUT and PF register block is in big-endian, specify
+ this property.
+
+ dma-coherent: true
+
+ interrupts:
+ minItems: 1
+ maxItems: 2
+
+ interrupt-names:
+ minItems: 1
+ maxItems: 2
+
+required:
+ - compatible
+ - reg
+ - reg-names
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ enum:
+ - fsl,ls1028a-pcie-ep
+ - fsl,ls1046a-pcie-ep
+ - fsl,ls1088a-pcie-ep
+ then:
+ properties:
+ interrupt-names:
+ items:
+ - const: pme
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ pcie_ep1: pcie-ep@3400000 {
+ compatible = "fsl,ls1028a-pcie-ep";
+ reg = <0x00 0x03400000 0x0 0x00100000
+ 0x80 0x00000000 0x8 0x00000000>;
+ reg-names = "regs", "addr_space";
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>; /* PME interrupt */
+ interrupt-names = "pme";
+ num-ib-windows = <6>;
+ num-ob-windows = <8>;
+ status = "disabled";
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/pci/fsl,layerscape-pcie.yaml b/Documentation/devicetree/bindings/pci/fsl,layerscape-pcie.yaml
new file mode 100644
index 000000000000..793986c5af7f
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/fsl,layerscape-pcie.yaml
@@ -0,0 +1,167 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/fsl,layerscape-pcie.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale Layerscape PCIe Root Complex(RC) controller
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+description:
+ This PCIe RC controller is based on the Synopsys DesignWare PCIe IP
+
+ This controller derives its clocks from the Reset Configuration Word (RCW)
+ which is used to describe the PLL settings at the time of chip-reset.
+
+ Also as per the available Reference Manuals, there is no specific 'version'
+ register available in the Freescale PCIe controller register set,
+ which can allow determining the underlying DesignWare PCIe controller version
+ information.
+
+properties:
+ compatible:
+ enum:
+ - fsl,ls1021a-pcie
+ - fsl,ls2080a-pcie
+ - fsl,ls2085a-pcie
+ - fsl,ls2088a-pcie
+ - fsl,ls1088a-pcie
+ - fsl,ls1046a-pcie
+ - fsl,ls1043a-pcie
+ - fsl,ls1012a-pcie
+ - fsl,ls1028a-pcie
+ - fsl,lx2160a-pcie
+
+ reg:
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: regs
+ - const: config
+
+ fsl,pcie-scfg:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: A phandle to the SCFG device node. The second entry is the
+ physical PCIe controller index starting from '0'. This is used to get
+ SCFG PEXN registers.
+
+ big-endian:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: If the PEX_LUT and PF register block is in big-endian, specify
+ this property.
+
+ dma-coherent: true
+
+ msi-parent: true
+
+ iommu-map: true
+
+ interrupts:
+ minItems: 1
+ maxItems: 2
+
+ interrupt-names:
+ minItems: 1
+ maxItems: 2
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - "#address-cells"
+ - "#size-cells"
+ - device_type
+ - bus-range
+ - ranges
+ - interrupts
+ - interrupt-names
+ - "#interrupt-cells"
+ - interrupt-map-mask
+ - interrupt-map
+
+allOf:
+ - $ref: /schemas/pci/pci-bus.yaml#
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - fsl,ls1028a-pcie
+ - fsl,ls1046a-pcie
+ - fsl,ls1043a-pcie
+ - fsl,ls1012a-pcie
+ then:
+ properties:
+ interrupts:
+ maxItems: 2
+ interrupt-names:
+ items:
+ - const: pme
+ - const: aer
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - fsl,ls2080a-pcie
+ - fsl,ls2085a-pcie
+ - fsl,ls2088a-pcie
+ then:
+ properties:
+ interrupts:
+ maxItems: 1
+ interrupt-names:
+ items:
+ - const: intr
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - fsl,ls1088a-pcie
+ then:
+ properties:
+ interrupts:
+ maxItems: 1
+ interrupt-names:
+ items:
+ - const: aer
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ pcie@3400000 {
+ compatible = "fsl,ls1088a-pcie";
+ reg = <0x00 0x03400000 0x0 0x00100000>, /* controller registers */
+ <0x20 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg-names = "regs", "config";
+ interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
+ interrupt-names = "aer";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ dma-coherent;
+ device_type = "pci";
+ bus-range = <0x0 0xff>;
+ ranges = <0x81000000 0x0 0x00000000 0x20 0x00010000 0x0 0x00010000 /* downstream I/O */
+ 0x82000000 0x0 0x40000000 0x20 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+ msi-parent = <&its>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic 0 0 0 109 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 2 &gic 0 0 0 110 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 3 &gic 0 0 0 111 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 4 &gic 0 0 0 112 IRQ_TYPE_LEVEL_HIGH>;
+ iommu-map = <0 &smmu 0 1>; /* Fixed-up by bootloader */
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/pci/host-generic-pci.yaml b/Documentation/devicetree/bindings/pci/host-generic-pci.yaml
index d25423aa7167..3484e0b4b412 100644
--- a/Documentation/devicetree/bindings/pci/host-generic-pci.yaml
+++ b/Documentation/devicetree/bindings/pci/host-generic-pci.yaml
@@ -116,7 +116,7 @@ required:
- ranges
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
- if:
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/pci/intel,ixp4xx-pci.yaml b/Documentation/devicetree/bindings/pci/intel,ixp4xx-pci.yaml
index debfb54a8042..3cae2e0f7f5e 100644
--- a/Documentation/devicetree/bindings/pci/intel,ixp4xx-pci.yaml
+++ b/Documentation/devicetree/bindings/pci/intel,ixp4xx-pci.yaml
@@ -12,7 +12,7 @@ maintainers:
description: PCI host controller found in the Intel IXP4xx SoC series.
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/pci/intel,keembay-pcie.yaml b/Documentation/devicetree/bindings/pci/intel,keembay-pcie.yaml
index 505acc4f3efc..1fd557504b10 100644
--- a/Documentation/devicetree/bindings/pci/intel,keembay-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/intel,keembay-pcie.yaml
@@ -11,7 +11,7 @@ maintainers:
- Srikanth Thokala <srikanth.thokala@intel.com>
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/pci/layerscape-pci.txt b/Documentation/devicetree/bindings/pci/layerscape-pci.txt
deleted file mode 100644
index ee8a4791a78b..000000000000
--- a/Documentation/devicetree/bindings/pci/layerscape-pci.txt
+++ /dev/null
@@ -1,79 +0,0 @@
-Freescale Layerscape PCIe controller
-
-This PCIe host controller is based on the Synopsys DesignWare PCIe IP
-and thus inherits all the common properties defined in snps,dw-pcie.yaml.
-
-This controller derives its clocks from the Reset Configuration Word (RCW)
-which is used to describe the PLL settings at the time of chip-reset.
-
-Also as per the available Reference Manuals, there is no specific 'version'
-register available in the Freescale PCIe controller register set,
-which can allow determining the underlying DesignWare PCIe controller version
-information.
-
-Required properties:
-- compatible: should contain the platform identifier such as:
- RC mode:
- "fsl,ls1021a-pcie"
- "fsl,ls2080a-pcie", "fsl,ls2085a-pcie"
- "fsl,ls2088a-pcie"
- "fsl,ls1088a-pcie"
- "fsl,ls1046a-pcie"
- "fsl,ls1043a-pcie"
- "fsl,ls1012a-pcie"
- "fsl,ls1028a-pcie"
- EP mode:
- "fsl,ls1028a-pcie-ep", "fsl,ls-pcie-ep"
- "fsl,ls1046a-pcie-ep", "fsl,ls-pcie-ep"
- "fsl,ls1088a-pcie-ep", "fsl,ls-pcie-ep"
- "fsl,ls2088a-pcie-ep", "fsl,ls-pcie-ep"
- "fsl,lx2160ar2-pcie-ep", "fsl,ls-pcie-ep"
-- reg: base addresses and lengths of the PCIe controller register blocks.
-- interrupts: A list of interrupt outputs of the controller. Must contain an
- entry for each entry in the interrupt-names property.
-- interrupt-names: It could include the following entries:
- "aer": Used for interrupt line which reports AER events when
- non MSI/MSI-X/INTx mode is used
- "pme": Used for interrupt line which reports PME events when
- non MSI/MSI-X/INTx mode is used
- "intr": Used for SoCs(like ls2080a, lx2160a, ls2080a, ls2088a, ls1088a)
- which has a single interrupt line for miscellaneous controller
- events(could include AER and PME events).
-- fsl,pcie-scfg: Must include two entries.
- The first entry must be a link to the SCFG device node
- The second entry is the physical PCIe controller index starting from '0'.
- This is used to get SCFG PEXN registers
-- dma-coherent: Indicates that the hardware IP block can ensure the coherency
- of the data transferred from/to the IP block. This can avoid the software
- cache flush/invalid actions, and improve the performance significantly.
-
-Optional properties:
-- big-endian: If the PEX_LUT and PF register block is in big-endian, specify
- this property.
-
-Example:
-
- pcie@3400000 {
- compatible = "fsl,ls1088a-pcie";
- reg = <0x00 0x03400000 0x0 0x00100000>, /* controller registers */
- <0x20 0x00000000 0x0 0x00002000>; /* configuration space */
- reg-names = "regs", "config";
- interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
- interrupt-names = "aer";
- #address-cells = <3>;
- #size-cells = <2>;
- device_type = "pci";
- dma-coherent;
- num-viewport = <256>;
- bus-range = <0x0 0xff>;
- ranges = <0x81000000 0x0 0x00000000 0x20 0x00010000 0x0 0x00010000 /* downstream I/O */
- 0x82000000 0x0 0x40000000 0x20 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
- msi-parent = <&its>;
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0000 0 0 1 &gic 0 0 0 109 IRQ_TYPE_LEVEL_HIGH>,
- <0000 0 0 2 &gic 0 0 0 110 IRQ_TYPE_LEVEL_HIGH>,
- <0000 0 0 3 &gic 0 0 0 111 IRQ_TYPE_LEVEL_HIGH>,
- <0000 0 0 4 &gic 0 0 0 112 IRQ_TYPE_LEVEL_HIGH>;
- iommu-map = <0 &smmu 0 1>; /* Fixed-up by bootloader */
- };
diff --git a/Documentation/devicetree/bindings/pci/loongson.yaml b/Documentation/devicetree/bindings/pci/loongson.yaml
index a8324a9bd002..1988465e73a1 100644
--- a/Documentation/devicetree/bindings/pci/loongson.yaml
+++ b/Documentation/devicetree/bindings/pci/loongson.yaml
@@ -13,7 +13,7 @@ description: |+
PCI host controller found on Loongson PCHs and SoCs.
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/pci/mediatek,mt7621-pcie.yaml b/Documentation/devicetree/bindings/pci/mediatek,mt7621-pcie.yaml
index e63e6458cea8..6fba42156db6 100644
--- a/Documentation/devicetree/bindings/pci/mediatek,mt7621-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/mediatek,mt7621-pcie.yaml
@@ -14,7 +14,7 @@ description: |+
with 3 Root Ports. Each Root Port supports a Gen1 1-lane Link
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
properties:
compatible:
@@ -33,9 +33,12 @@ properties:
patternProperties:
'^pcie@[0-2],0$':
type: object
- $ref: /schemas/pci/pci-bus.yaml#
+ $ref: /schemas/pci/pci-pci-bridge.yaml#
properties:
+ reg:
+ maxItems: 1
+
resets:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml b/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml
index 7e8c7a2a5f9b..76d742051f73 100644
--- a/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml
+++ b/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml
@@ -140,7 +140,7 @@ required:
- interrupt-controller
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
- if:
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml b/Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml
index f7a3c2636355..5d7aec5f54e7 100644
--- a/Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml
+++ b/Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml
@@ -10,7 +10,7 @@ maintainers:
- Daire McNamara <daire.mcnamara@microchip.com>
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
- $ref: /schemas/interrupt-controller/msi-controller.yaml#
properties:
@@ -65,7 +65,8 @@ properties:
- const: msi
ranges:
- maxItems: 1
+ minItems: 1
+ maxItems: 3
dma-ranges:
minItems: 1
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-common.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-common.yaml
index 0d1b23523f62..0a39bbfcb28b 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-common.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-common.yaml
@@ -95,6 +95,6 @@ anyOf:
- msi-map
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8350.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8350.yaml
index 9eb6e457b07f..2a4cc41fc710 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8350.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8350.yaml
@@ -71,28 +71,6 @@ properties:
items:
- const: pci
-oneOf:
- - properties:
- interrupts:
- maxItems: 1
- interrupt-names:
- items:
- - const: msi
-
- - properties:
- interrupts:
- minItems: 8
- interrupt-names:
- items:
- - const: msi0
- - const: msi1
- - const: msi2
- - const: msi3
- - const: msi4
- - const: msi5
- - const: msi6
- - const: msi7
-
allOf:
- $ref: qcom,pcie-common.yaml#
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie.yaml
index cf9a6910b542..f867746b1ae5 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie.yaml
@@ -130,7 +130,7 @@ anyOf:
- msi-map
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
- if:
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/pci/rcar-gen4-pci-ep.yaml b/Documentation/devicetree/bindings/pci/rcar-gen4-pci-ep.yaml
index fe38f62da066..91b81ac75592 100644
--- a/Documentation/devicetree/bindings/pci/rcar-gen4-pci-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/rcar-gen4-pci-ep.yaml
@@ -16,7 +16,9 @@ allOf:
properties:
compatible:
items:
- - const: renesas,r8a779f0-pcie-ep # R-Car S4-8
+ - enum:
+ - renesas,r8a779f0-pcie-ep # R-Car S4-8
+ - renesas,r8a779g0-pcie-ep # R-Car V4H
- const: renesas,rcar-gen4-pcie-ep # R-Car Gen4
reg:
diff --git a/Documentation/devicetree/bindings/pci/rcar-gen4-pci-host.yaml b/Documentation/devicetree/bindings/pci/rcar-gen4-pci-host.yaml
index ffb34339b637..955c664f1fbb 100644
--- a/Documentation/devicetree/bindings/pci/rcar-gen4-pci-host.yaml
+++ b/Documentation/devicetree/bindings/pci/rcar-gen4-pci-host.yaml
@@ -16,7 +16,9 @@ allOf:
properties:
compatible:
items:
- - const: renesas,r8a779f0-pcie # R-Car S4-8
+ - enum:
+ - renesas,r8a779f0-pcie # R-Car S4-8
+ - renesas,r8a779g0-pcie # R-Car V4H
- const: renesas,rcar-gen4-pcie # R-Car Gen4
reg:
diff --git a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
index b6a7cb32f61e..666f013e3af8 100644
--- a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
+++ b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
@@ -12,7 +12,7 @@ maintainers:
- Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
allOf:
- - $ref: pci-bus.yaml#
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
properties:
compatible:
@@ -77,6 +77,9 @@ properties:
vpcie12v-supply:
description: The 12v regulator to use for PCIe.
+ iommu-map: true
+ iommu-map-mask: true
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml b/Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml
index 5a0d64d3ae6b..b288cdb1ec70 100644
--- a/Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml
+++ b/Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml
@@ -110,7 +110,7 @@ required:
- "#interrupt-cells"
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
- if:
properties:
diff --git a/Documentation/devicetree/bindings/pci/rockchip,rk3399-pcie.yaml b/Documentation/devicetree/bindings/pci/rockchip,rk3399-pcie.yaml
index 531008f0b6ac..720a5f945a4e 100644
--- a/Documentation/devicetree/bindings/pci/rockchip,rk3399-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/rockchip,rk3399-pcie.yaml
@@ -10,7 +10,7 @@ maintainers:
- Shawn Lin <shawn.lin@rock-chips.com>
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
- $ref: rockchip,rk3399-pcie-common.yaml#
properties:
@@ -37,6 +37,7 @@ properties:
description: This property is needed if using 24MHz OSC for RC's PHY.
ep-gpios:
+ maxItems: 1
description: pre-reset GPIO
vpcie12v-supply:
diff --git a/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml b/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml
index 022055edbf9e..548f59d76ef2 100644
--- a/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml
@@ -23,7 +23,7 @@ select:
- compatible
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
- $ref: /schemas/pci/snps,dw-pcie-common.yaml#
- if:
not:
diff --git a/Documentation/devicetree/bindings/pci/ti,am65-pci-host.yaml b/Documentation/devicetree/bindings/pci/ti,am65-pci-host.yaml
index a20dccbafd94..0a9d10532cc8 100644
--- a/Documentation/devicetree/bindings/pci/ti,am65-pci-host.yaml
+++ b/Documentation/devicetree/bindings/pci/ti,am65-pci-host.yaml
@@ -11,7 +11,7 @@ maintainers:
- Kishon Vijay Abraham I <kishon@ti.com>
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
properties:
compatible:
@@ -55,6 +55,20 @@ properties:
dma-coherent: true
+ num-viewport:
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ phys:
+ description: per-lane PHYs
+ minItems: 1
+ maxItems: 2
+
+ phy-names:
+ minItems: 1
+ maxItems: 2
+ items:
+ pattern: '^pcie-phy[0-1]$'
+
required:
- compatible
- reg
@@ -74,6 +88,7 @@ then:
- dma-coherent
- power-domains
- msi-map
+ - num-viewport
unevaluatedProperties: false
@@ -81,6 +96,7 @@ examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/phy/phy.h>
#include <dt-bindings/soc/ti,sci_pm_domain.h>
pcie0_rc: pcie@5500000 {
@@ -98,9 +114,13 @@ examples:
ti,syscon-pcie-id = <&scm_conf 0x0210>;
ti,syscon-pcie-mode = <&scm_conf 0x4060>;
bus-range = <0x0 0xff>;
+ num-viewport = <16>;
max-link-speed = <2>;
dma-coherent;
interrupts = <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>;
msi-map = <0x0 &gic_its 0x0 0x10000>;
device_type = "pci";
+ num-lanes = <1>;
+ phys = <&serdes0 PHY_TYPE_PCIE 0>;
+ phy-names = "pcie-phy0";
};
diff --git a/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml b/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml
index b7a534cef24d..15a2658ceeef 100644
--- a/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml
+++ b/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml
@@ -23,6 +23,10 @@ properties:
items:
- const: ti,j7200-pcie-host
- const: ti,j721e-pcie-host
+ - description: PCIe controller in J722S
+ items:
+ - const: ti,j722s-pcie-host
+ - const: ti,j721e-pcie-host
reg:
maxItems: 4
@@ -68,6 +72,7 @@ properties:
- 0xb00d
- 0xb00f
- 0xb010
+ - 0xb012
- 0xb013
msi-map: true
diff --git a/Documentation/devicetree/bindings/pci/versatile.yaml b/Documentation/devicetree/bindings/pci/versatile.yaml
index 09748ef6b94f..294c7cd84b37 100644
--- a/Documentation/devicetree/bindings/pci/versatile.yaml
+++ b/Documentation/devicetree/bindings/pci/versatile.yaml
@@ -13,7 +13,7 @@ description: |+
PCI host controller found on the ARM Versatile PB board's FPGA.
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml b/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml
index 4734be456bde..4770ce02fcc3 100644
--- a/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml
+++ b/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml
@@ -10,7 +10,7 @@ maintainers:
- Bharat Kumar Gogada <bharat.kumar.gogada@amd.com>
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
properties:
compatible:
@@ -48,13 +48,16 @@ properties:
interrupt-controller:
description: Interrupt controller node for handling legacy PCI interrupts.
type: object
+ additionalProperties: false
+
properties:
"#address-cells":
const: 0
+
"#interrupt-cells":
const: 1
- "interrupt-controller": true
- additionalProperties: false
+
+ interrupt-controller: true
required:
- reg
diff --git a/Documentation/devicetree/bindings/pci/xlnx,axi-pcie-host.yaml b/Documentation/devicetree/bindings/pci/xlnx,axi-pcie-host.yaml
index 69b7decabd45..fb87b960a250 100644
--- a/Documentation/devicetree/bindings/pci/xlnx,axi-pcie-host.yaml
+++ b/Documentation/devicetree/bindings/pci/xlnx,axi-pcie-host.yaml
@@ -10,7 +10,7 @@ maintainers:
- Thippeswamy Havalige <thippeswamy.havalige@amd.com>
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/pci/xlnx,nwl-pcie.yaml b/Documentation/devicetree/bindings/pci/xlnx,nwl-pcie.yaml
index 426f90a47f35..9cad860c51a3 100644
--- a/Documentation/devicetree/bindings/pci/xlnx,nwl-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/xlnx,nwl-pcie.yaml
@@ -10,7 +10,7 @@ maintainers:
- Thippeswamy Havalige <thippeswamy.havalige@amd.com>
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
- $ref: /schemas/interrupt-controller/msi-controller.yaml#
properties:
@@ -84,7 +84,7 @@ properties:
"#interrupt-cells":
const: 1
- "interrupt-controller": true
+ interrupt-controller: true
required:
- "#address-cells"
diff --git a/Documentation/devicetree/bindings/pci/xlnx,xdma-host.yaml b/Documentation/devicetree/bindings/pci/xlnx,xdma-host.yaml
index 0aa00b8e49b3..2f59b3a73dd2 100644
--- a/Documentation/devicetree/bindings/pci/xlnx,xdma-host.yaml
+++ b/Documentation/devicetree/bindings/pci/xlnx,xdma-host.yaml
@@ -10,7 +10,7 @@ maintainers:
- Thippeswamy Havalige <thippeswamy.havalige@amd.com>
allOf:
- - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/phy/brcm,sata-phy.yaml b/Documentation/devicetree/bindings/phy/brcm,sata-phy.yaml
index 8467c8e6368c..439bda142764 100644
--- a/Documentation/devicetree/bindings/phy/brcm,sata-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/brcm,sata-phy.yaml
@@ -59,14 +59,14 @@ patternProperties:
"#phy-cells":
const: 0
- "brcm,enable-ssc":
+ brcm,enable-ssc:
$ref: /schemas/types.yaml#/definitions/flag
description: |
Use spread spectrum clocking (SSC) on this port
This property is not applicable for "brcm,iproc-ns2-sata-phy",
"brcm,iproc-nsp-sata-phy" and "brcm,iproc-sr-sata-phy".
- "brcm,rxaeq-mode":
+ brcm,rxaeq-mode:
$ref: /schemas/types.yaml#/definitions/string
description:
String that indicates the desired RX equalizer mode.
@@ -75,7 +75,7 @@ patternProperties:
- auto
- manual
- "brcm,rxaeq-value":
+ brcm,rxaeq-value:
$ref: /schemas/types.yaml#/definitions/uint32
description: |
When 'brcm,rxaeq-mode' is set to "manual", provides the RX
@@ -83,7 +83,7 @@ patternProperties:
minimum: 0
maximum: 63
- "brcm,tx-amplitude-millivolt":
+ brcm,tx-amplitude-millivolt:
description: |
Transmit amplitude voltage in millivolt.
$ref: /schemas/types.yaml#/definitions/uint32
diff --git a/Documentation/devicetree/bindings/phy/fsl,imx8mp-hdmi-phy.yaml b/Documentation/devicetree/bindings/phy/fsl,imx8mp-hdmi-phy.yaml
new file mode 100644
index 000000000000..c43e86a8c2e0
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/fsl,imx8mp-hdmi-phy.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/fsl,imx8mp-hdmi-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8MP HDMI PHY
+
+maintainers:
+ - Lucas Stach <l.stach@pengutronix.de>
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx8mp-hdmi-phy
+
+ reg:
+ maxItems: 1
+
+ "#clock-cells":
+ const: 0
+
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: apb
+ - const: ref
+
+ "#phy-cells":
+ const: 0
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - "#clock-cells"
+ - clocks
+ - clock-names
+ - "#phy-cells"
+ - power-domains
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx8mp-clock.h>
+ #include <dt-bindings/power/imx8mp-power.h>
+
+ phy@32fdff00 {
+ compatible = "fsl,imx8mp-hdmi-phy";
+ reg = <0x32fdff00 0x100>;
+ clocks = <&clk IMX8MP_CLK_HDMI_APB>,
+ <&clk IMX8MP_CLK_HDMI_24M>;
+ clock-names = "apb", "ref";
+ power-domains = <&hdmi_blk_ctrl IMX8MP_HDMIBLK_PD_HDMI_TX_PHY>;
+ #clock-cells = <0>;
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/mediatek,mt7988-xfi-tphy.yaml b/Documentation/devicetree/bindings/phy/mediatek,mt7988-xfi-tphy.yaml
new file mode 100644
index 000000000000..cfb3ca97f87c
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/mediatek,mt7988-xfi-tphy.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/mediatek,mt7988-xfi-tphy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek MT7988 XFI T-PHY
+
+maintainers:
+ - Daniel Golle <daniel@makrotopia.org>
+
+description:
+ The MediaTek XFI SerDes T-PHY provides the physical SerDes lanes
+ used by the (10G/5G) USXGMII PCS and (1G/2.5G) LynxI PCS found in
+ MediaTek's 10G-capabale MT7988 SoC.
+ In MediaTek's SDK sources, this unit is referred to as "pextp".
+
+properties:
+ compatible:
+ const: mediatek,mt7988-xfi-tphy
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: XFI PHY clock
+ - description: XFI register clock
+
+ clock-names:
+ items:
+ - const: xfipll
+ - const: topxtal
+
+ resets:
+ items:
+ - description: Reset controller corresponding to the phy instance.
+
+ mediatek,usxgmii-performance-errata:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ One instance of the T-PHY on MT7988 suffers from a performance
+ problem in 10GBase-R mode which needs a work-around in the driver.
+ This flag enables a work-around ajusting an analog phy setting and
+ is required for XFI Port0 of the MT7988 SoC to be in compliance with
+ the SFP specification.
+
+ "#phy-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - resets
+ - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/mediatek,mt7988-clk.h>
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ phy@11f20000 {
+ compatible = "mediatek,mt7988-xfi-tphy";
+ reg = <0 0x11f20000 0 0x10000>;
+ clocks = <&xfi_pll CLK_XFIPLL_PLL_EN>,
+ <&topckgen CLK_TOP_XFI_PHY_0_XTAL_SEL>;
+ clock-names = "xfipll", "topxtal";
+ resets = <&watchdog 14>;
+ mediatek,usxgmii-performance-errata;
+ #phy-cells = <0>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/phy/phy-rockchip-usbdp.yaml b/Documentation/devicetree/bindings/phy/phy-rockchip-usbdp.yaml
new file mode 100644
index 000000000000..1f1f8863b80d
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-rockchip-usbdp.yaml
@@ -0,0 +1,148 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/phy-rockchip-usbdp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip USBDP Combo PHY with Samsung IP block
+
+maintainers:
+ - Frank Wang <frank.wang@rock-chips.com>
+ - Zhang Yubing <yubing.zhang@rock-chips.com>
+
+properties:
+ compatible:
+ enum:
+ - rockchip,rk3588-usbdp-phy
+
+ reg:
+ maxItems: 1
+
+ "#phy-cells":
+ description: |
+ Cell allows setting the type of the PHY. Possible values are:
+ - PHY_TYPE_USB3
+ - PHY_TYPE_DP
+ const: 1
+
+ clocks:
+ maxItems: 4
+
+ clock-names:
+ items:
+ - const: refclk
+ - const: immortal
+ - const: pclk
+ - const: utmi
+
+ resets:
+ maxItems: 5
+
+ reset-names:
+ items:
+ - const: init
+ - const: cmn
+ - const: lane
+ - const: pcs_apb
+ - const: pma_apb
+
+ rockchip,dp-lane-mux:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 2
+ maxItems: 4
+ items:
+ maximum: 3
+ description:
+ An array of physical Type-C lanes indexes. Position of an entry
+ determines the DisplayPort (DP) lane index, while the value of an entry
+ indicates physical Type-C lane. The supported DP lanes number are 2 or 4.
+ e.g. for 2 lanes DP lanes map, we could have "rockchip,dp-lane-mux = <2,
+ 3>;", assuming DP lane0 on Type-C phy lane2, DP lane1 on Type-C phy
+ lane3. For 4 lanes DP lanes map, we could have "rockchip,dp-lane-mux =
+ <0, 1, 2, 3>;", assuming DP lane0 on Type-C phy lane0, DP lane1 on Type-C
+ phy lane1, DP lane2 on Type-C phy lane2, DP lane3 on Type-C phy lane3. If
+ DP lanes are mapped by DisplayPort Alt mode, this property is not needed.
+
+ rockchip,u2phy-grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the syscon managing the 'usb2 phy general register files'.
+
+ rockchip,usb-grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the syscon managing the 'usb general register files'.
+
+ rockchip,usbdpphy-grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the syscon managing the 'usbdp phy general register files'.
+
+ rockchip,vo-grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the syscon managing the 'video output general register files'.
+ When select the DP lane mapping will request its phandle.
+
+ sbu1-dc-gpios:
+ description:
+ GPIO connected to the SBU1 line of the USB-C connector via a big resistor
+ (~100K) to apply a DC offset for signalling the connector orientation.
+ maxItems: 1
+
+ sbu2-dc-gpios:
+ description:
+ GPIO connected to the SBU2 line of the USB-C connector via a big resistor
+ (~100K) to apply a DC offset for signalling the connector orientation.
+ maxItems: 1
+
+ orientation-switch:
+ description: Flag the port as possible handler of orientation switching
+ type: boolean
+
+ mode-switch:
+ description: Flag the port as possible handler of altmode switching
+ type: boolean
+
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ A port node to link the PHY to a TypeC controller for the purpose of
+ handling orientation switching.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - resets
+ - reset-names
+ - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/rockchip,rk3588-cru.h>
+ #include <dt-bindings/reset/rockchip,rk3588-cru.h>
+
+ usbdp_phy0: phy@fed80000 {
+ compatible = "rockchip,rk3588-usbdp-phy";
+ reg = <0xfed80000 0x10000>;
+ #phy-cells = <1>;
+ clocks = <&cru CLK_USBDPPHY_MIPIDCPPHY_REF>,
+ <&cru CLK_USBDP_PHY0_IMMORTAL>,
+ <&cru PCLK_USBDPPHY0>,
+ <&u2phy0>;
+ clock-names = "refclk", "immortal", "pclk", "utmi";
+ resets = <&cru SRST_USBDP_COMBO_PHY0_INIT>,
+ <&cru SRST_USBDP_COMBO_PHY0_CMN>,
+ <&cru SRST_USBDP_COMBO_PHY0_LANE>,
+ <&cru SRST_USBDP_COMBO_PHY0_PCS>,
+ <&cru SRST_P_USBDPPHY0>;
+ reset-names = "init", "cmn", "lane", "pcs_apb", "pma_apb";
+ rockchip,u2phy-grf = <&usb2phy0_grf>;
+ rockchip,usb-grf = <&usb_grf>;
+ rockchip,usbdpphy-grf = <&usbdpphy0_grf>;
+ rockchip,vo-grf = <&vo0_grf>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.yaml b/Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.yaml
index 24a3dbde223b..ceea122ae1a6 100644
--- a/Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.yaml
+++ b/Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.yaml
@@ -55,6 +55,10 @@ properties:
description: number of clock cells for ck_usbo_48m consumer
const: 0
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
# Required child nodes:
patternProperties:
diff --git a/Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml
index 6566353f1a02..4e15d90d08b0 100644
--- a/Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml
@@ -21,6 +21,7 @@ properties:
- qcom,sc8180x-edp-phy
- qcom,sc8280xp-dp-phy
- qcom,sc8280xp-edp-phy
+ - qcom,x1e80100-dp-phy
reg:
items:
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
index ba966a78a128..16634f73bdcf 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
@@ -88,11 +88,11 @@ properties:
- description: offset of PCIe 4-lane configuration register
- description: offset of configuration bit for this PHY
- "#clock-cells":
- const: 0
+ "#clock-cells": true
clock-output-names:
- maxItems: 1
+ minItems: 1
+ maxItems: 2
"#phy-cells":
const: 0
@@ -198,7 +198,6 @@ allOf:
enum:
- qcom,sm8550-qmp-gen4x2-pcie-phy
- qcom,sm8650-qmp-gen4x2-pcie-phy
- - qcom,x1e80100-qmp-gen3x2-pcie-phy
- qcom,x1e80100-qmp-gen4x2-pcie-phy
then:
properties:
@@ -213,6 +212,27 @@ allOf:
reset-names:
maxItems: 1
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sm8450-qmp-gen4x2-pcie-phy
+ - qcom,sm8550-qmp-gen4x2-pcie-phy
+ - qcom,sm8650-qmp-gen4x2-pcie-phy
+ then:
+ properties:
+ clock-output-names:
+ minItems: 2
+ "#clock-cells":
+ const: 1
+ else:
+ properties:
+ clock-output-names:
+ maxItems: 1
+ "#clock-cells":
+ const: 0
+
examples:
- |
#include <dt-bindings/clock/qcom,gcc-sc8280xp.h>
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
index 91a6cc38ff7f..f9cfbd0b2de6 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
@@ -32,6 +32,7 @@ properties:
- qcom,sm8250-qmp-ufs-phy
- qcom,sm8350-qmp-ufs-phy
- qcom,sm8450-qmp-ufs-phy
+ - qcom,sm8475-qmp-ufs-phy
- qcom,sm8550-qmp-ufs-phy
- qcom,sm8650-qmp-ufs-phy
@@ -71,7 +72,6 @@ required:
- reg
- clocks
- clock-names
- - power-domains
- resets
- reset-names
- vdda-phy-supply
@@ -86,6 +86,7 @@ allOf:
enum:
- qcom,msm8998-qmp-ufs-phy
- qcom,sa8775p-qmp-ufs-phy
+ - qcom,sc7180-qmp-ufs-phy
- qcom,sc7280-qmp-ufs-phy
- qcom,sc8180x-qmp-ufs-phy
- qcom,sc8280xp-qmp-ufs-phy
@@ -98,6 +99,7 @@ allOf:
- qcom,sm8250-qmp-ufs-phy
- qcom,sm8350-qmp-ufs-phy
- qcom,sm8450-qmp-ufs-phy
+ - qcom,sm8475-qmp-ufs-phy
- qcom,sm8550-qmp-ufs-phy
- qcom,sm8650-qmp-ufs-phy
then:
@@ -127,6 +129,21 @@ allOf:
- const: ref
- const: qref
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,msm8996-qmp-ufs-phy
+ - qcom,msm8998-qmp-ufs-phy
+ then:
+ properties:
+ power-domains:
+ false
+ else:
+ required:
+ - power-domains
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml
index 1e2d4ddc5391..325585bc881b 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml
@@ -20,6 +20,7 @@ properties:
- qcom,ipq8074-qmp-usb3-phy
- qcom,ipq9574-qmp-usb3-phy
- qcom,msm8996-qmp-usb3-phy
+ - com,qdu1000-qmp-usb3-uni-phy
- qcom,sa8775p-qmp-usb3-uni-phy
- qcom,sc8280xp-qmp-usb3-uni-phy
- qcom,sdm845-qmp-usb3-uni-phy
@@ -109,6 +110,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,qdu1000-qmp-usb3-uni-phy
- qcom,sa8775p-qmp-usb3-uni-phy
- qcom,sc8280xp-qmp-usb3-uni-phy
- qcom,sm8150-qmp-usb3-uni-phy
diff --git a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
index 24c733c10e0e..90d79491e281 100644
--- a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
@@ -20,7 +20,9 @@ properties:
- enum:
- qcom,pm7550ba-eusb2-repeater
- const: qcom,pm8550b-eusb2-repeater
- - const: qcom,pm8550b-eusb2-repeater
+ - enum:
+ - qcom,pm8550b-eusb2-repeater
+ - qcom,smb2360-eusb2-repeater
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml b/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
index 0f200e3f97a9..519c2b403f66 100644
--- a/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
@@ -15,9 +15,6 @@ description: |
properties:
compatible:
oneOf:
- - enum:
- - qcom,sc8180x-usb-hs-phy
- - qcom,usb-snps-femto-v2-phy
- items:
- enum:
- qcom,sa8775p-usb-hs-phy
@@ -25,7 +22,9 @@ properties:
- const: qcom,usb-snps-hs-5nm-phy
- items:
- enum:
+ - qcom,qdu1000-usb-hs-phy
- qcom,sc7280-usb-hs-phy
+ - qcom,sc8180x-usb-hs-phy
- qcom,sdx55-usb-hs-phy
- qcom,sdx65-usb-hs-phy
- qcom,sm6375-usb-hs-phy
diff --git a/Documentation/devicetree/bindings/phy/rockchip,pcie3-phy.yaml b/Documentation/devicetree/bindings/phy/rockchip,pcie3-phy.yaml
index c4fbffcde6e4..ba67dca5a446 100644
--- a/Documentation/devicetree/bindings/phy/rockchip,pcie3-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/rockchip,pcie3-phy.yaml
@@ -54,6 +54,16 @@ properties:
$ref: /schemas/types.yaml#/definitions/phandle
description: phandle to the syscon managing the pipe "general register files"
+ rockchip,rx-common-refclk-mode:
+ description: which lanes (by position) should be configured to run in
+ RX common reference clock mode. 0 means disabled, 1 means enabled.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 16
+ items:
+ minimum: 0
+ maximum: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/phy/samsung,ufs-phy.yaml b/Documentation/devicetree/bindings/phy/samsung,ufs-phy.yaml
index 782f975b43ae..f402e31bf58d 100644
--- a/Documentation/devicetree/bindings/phy/samsung,ufs-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/samsung,ufs-phy.yaml
@@ -15,6 +15,7 @@ properties:
compatible:
enum:
+ - google,gs101-ufs-phy
- samsung,exynos7-ufs-phy
- samsung,exynosautov9-ufs-phy
- tesla,fsd-ufs-phy
diff --git a/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml
index bd72a326e6e0..d74cae9d4d65 100644
--- a/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml
@@ -34,6 +34,9 @@ properties:
the amount of cells must be specified as 2. See the below mentioned gpio
binding representation for description of particular cells.
+ gpio-ranges:
+ maxItems: 1
+
interrupt-controller: true
interrupts:
@@ -75,8 +78,8 @@ patternProperties:
function:
description:
A string containing the name of the function to mux to the group.
- enum: [emmc, eth, i2c, i2s, ir, led, flash, pcie, pmic, pwm, sd,
- spi, tdm, uart, watchdog, wifi]
+ enum: [antsel, emmc, eth, i2c, i2s, ir, led, flash, pcie, pmic, pwm,
+ sd, spi, tdm, uart, watchdog, wifi]
groups:
description:
@@ -93,11 +96,26 @@ patternProperties:
- if:
properties:
function:
+ const: antsel
+ then:
+ properties:
+ groups:
+ items:
+ enum: [antsel0, antsel1, antsel2, antsel3, antsel4, antsel5,
+ antsel6, antsel7, antsel8, antsel9, antsel10,
+ antsel11, antsel12, antsel13, antsel14, antsel15,
+ antsel16, antsel17, antsel18, antsel19, antsel20,
+ antsel21, antsel22, antsel23, antsel24, antsel25,
+ antsel26, antsel27, antsel28, antsel29]
+ - if:
+ properties:
+ function:
const: emmc
then:
properties:
groups:
- enum: [emmc, emmc_rst]
+ items:
+ enum: [emmc, emmc_rst]
- if:
properties:
function:
@@ -105,8 +123,9 @@ patternProperties:
then:
properties:
groups:
- enum: [esw, esw_p0_p1, esw_p2_p3_p4, rgmii_via_esw,
- rgmii_via_gmac1, rgmii_via_gmac2, mdc_mdio]
+ items:
+ enum: [esw, esw_p0_p1, esw_p2_p3_p4, rgmii_via_esw,
+ rgmii_via_gmac1, rgmii_via_gmac2, mdc_mdio]
- if:
properties:
function:
@@ -123,10 +142,11 @@ patternProperties:
then:
properties:
groups:
- enum: [i2s_in_mclk_bclk_ws, i2s1_in_data, i2s2_in_data,
- i2s3_in_data, i2s4_in_data, i2s_out_mclk_bclk_ws,
- i2s1_out_data, i2s2_out_data, i2s3_out_data,
- i2s4_out_data]
+ items:
+ enum: [i2s_in_mclk_bclk_ws, i2s1_in_data, i2s2_in_data,
+ i2s3_in_data, i2s4_in_data, i2s_out_mclk_bclk_ws,
+ i2s1_out_data, i2s2_out_data, i2s3_out_data,
+ i2s4_out_data]
- if:
properties:
function:
@@ -159,10 +179,11 @@ patternProperties:
then:
properties:
groups:
- enum: [pcie0_0_waken, pcie0_1_waken, pcie1_0_waken,
- pcie0_0_clkreq, pcie0_1_clkreq, pcie1_0_clkreq,
- pcie0_pad_perst, pcie1_pad_perst, pcie_pereset,
- pcie_wake, pcie_clkreq]
+ items:
+ enum: [pcie0_0_waken, pcie0_1_waken, pcie1_0_waken,
+ pcie0_0_clkreq, pcie0_1_clkreq, pcie1_0_clkreq,
+ pcie0_pad_perst, pcie1_pad_perst, pcie_pereset,
+ pcie_wake, pcie_clkreq]
- if:
properties:
function:
@@ -178,11 +199,12 @@ patternProperties:
then:
properties:
groups:
- enum: [pwm_ch1_0, pwm_ch1_1, pwm_ch1_2, pwm_ch2_0, pwm_ch2_1,
- pwm_ch2_2, pwm_ch3_0, pwm_ch3_1, pwm_ch3_2, pwm_ch4_0,
- pwm_ch4_1, pwm_ch4_2, pwm_ch4_3, pwm_ch5_0, pwm_ch5_1,
- pwm_ch5_2, pwm_ch6_0, pwm_ch6_1, pwm_ch6_2, pwm_ch6_3,
- pwm_ch7_0, pwm_0, pwm_1]
+ items:
+ enum: [pwm_ch1_0, pwm_ch1_1, pwm_ch1_2, pwm_ch2_0, pwm_ch2_1,
+ pwm_ch2_2, pwm_ch3_0, pwm_ch3_1, pwm_ch3_2, pwm_ch4_0,
+ pwm_ch4_1, pwm_ch4_2, pwm_ch4_3, pwm_ch5_0, pwm_ch5_1,
+ pwm_ch5_2, pwm_ch6_0, pwm_ch6_1, pwm_ch6_2, pwm_ch6_3,
+ pwm_ch7_0, pwm_0, pwm_1]
- if:
properties:
function:
@@ -260,33 +282,34 @@ patternProperties:
pins:
description:
An array of strings. Each string contains the name of a pin.
- enum: [GPIO_A, I2S1_IN, I2S1_OUT, I2S_BCLK, I2S_WS, I2S_MCLK, TXD0,
- RXD0, SPI_WP, SPI_HOLD, SPI_CLK, SPI_MOSI, SPI_MISO, SPI_CS,
- I2C_SDA, I2C_SCL, I2S2_IN, I2S3_IN, I2S4_IN, I2S2_OUT,
- I2S3_OUT, I2S4_OUT, GPIO_B, MDC, MDIO, G2_TXD0, G2_TXD1,
- G2_TXD2, G2_TXD3, G2_TXEN, G2_TXC, G2_RXD0, G2_RXD1, G2_RXD2,
- G2_RXD3, G2_RXDV, G2_RXC, NCEB, NWEB, NREB, NDL4, NDL5, NDL6,
- NDL7, NRB, NCLE, NALE, NDL0, NDL1, NDL2, NDL3, MDI_TP_P0,
- MDI_TN_P0, MDI_RP_P0, MDI_RN_P0, MDI_TP_P1, MDI_TN_P1,
- MDI_RP_P1, MDI_RN_P1, MDI_RP_P2, MDI_RN_P2, MDI_TP_P2,
- MDI_TN_P2, MDI_TP_P3, MDI_TN_P3, MDI_RP_P3, MDI_RN_P3,
- MDI_RP_P4, MDI_RN_P4, MDI_TP_P4, MDI_TN_P4, PMIC_SCL,
- PMIC_SDA, SPIC1_CLK, SPIC1_MOSI, SPIC1_MISO, SPIC1_CS,
- GPIO_D, WATCHDOG, RTS3_N, CTS3_N, TXD3, RXD3, PERST0_N,
- PERST1_N, WLED_N, EPHY_LED0_N, AUXIN0, AUXIN1, AUXIN2,
- AUXIN3, TXD4, RXD4, RTS4_N, CST4_N, PWM1, PWM2, PWM3, PWM4,
- PWM5, PWM6, PWM7, GPIO_E, TOP_5G_CLK, TOP_5G_DATA,
- WF0_5G_HB0, WF0_5G_HB1, WF0_5G_HB2, WF0_5G_HB3, WF0_5G_HB4,
- WF0_5G_HB5, WF0_5G_HB6, XO_REQ, TOP_RST_N, SYS_WATCHDOG,
- EPHY_LED0_N_JTDO, EPHY_LED1_N_JTDI, EPHY_LED2_N_JTMS,
- EPHY_LED3_N_JTCLK, EPHY_LED4_N_JTRST_N, WF2G_LED_N,
- WF5G_LED_N, GPIO_9, GPIO_10, GPIO_11, GPIO_12, UART1_TXD,
- UART1_RXD, UART1_CTS, UART1_RTS, UART2_TXD, UART2_RXD,
- UART2_CTS, UART2_RTS, SMI_MDC, SMI_MDIO, PCIE_PERESET_N,
- PWM_0, GPIO_0, GPIO_1, GPIO_2, GPIO_3, GPIO_4, GPIO_5,
- GPIO_6, GPIO_7, GPIO_8, UART0_TXD, UART0_RXD, TOP_2G_CLK,
- TOP_2G_DATA, WF0_2G_HB0, WF0_2G_HB1, WF0_2G_HB2, WF0_2G_HB3,
- WF0_2G_HB4, WF0_2G_HB5, WF0_2G_HB6]
+ items:
+ enum: [GPIO_A, I2S1_IN, I2S1_OUT, I2S_BCLK, I2S_WS, I2S_MCLK, TXD0,
+ RXD0, SPI_WP, SPI_HOLD, SPI_CLK, SPI_MOSI, SPI_MISO, SPI_CS,
+ I2C_SDA, I2C_SCL, I2S2_IN, I2S3_IN, I2S4_IN, I2S2_OUT,
+ I2S3_OUT, I2S4_OUT, GPIO_B, MDC, MDIO, G2_TXD0, G2_TXD1,
+ G2_TXD2, G2_TXD3, G2_TXEN, G2_TXC, G2_RXD0, G2_RXD1, G2_RXD2,
+ G2_RXD3, G2_RXDV, G2_RXC, NCEB, NWEB, NREB, NDL4, NDL5, NDL6,
+ NDL7, NRB, NCLE, NALE, NDL0, NDL1, NDL2, NDL3, MDI_TP_P0,
+ MDI_TN_P0, MDI_RP_P0, MDI_RN_P0, MDI_TP_P1, MDI_TN_P1,
+ MDI_RP_P1, MDI_RN_P1, MDI_RP_P2, MDI_RN_P2, MDI_TP_P2,
+ MDI_TN_P2, MDI_TP_P3, MDI_TN_P3, MDI_RP_P3, MDI_RN_P3,
+ MDI_RP_P4, MDI_RN_P4, MDI_TP_P4, MDI_TN_P4, PMIC_SCL,
+ PMIC_SDA, SPIC1_CLK, SPIC1_MOSI, SPIC1_MISO, SPIC1_CS,
+ GPIO_D, WATCHDOG, RTS3_N, CTS3_N, TXD3, RXD3, PERST0_N,
+ PERST1_N, WLED_N, EPHY_LED0_N, AUXIN0, AUXIN1, AUXIN2,
+ AUXIN3, TXD4, RXD4, RTS4_N, CST4_N, PWM1, PWM2, PWM3, PWM4,
+ PWM5, PWM6, PWM7, GPIO_E, TOP_5G_CLK, TOP_5G_DATA,
+ WF0_5G_HB0, WF0_5G_HB1, WF0_5G_HB2, WF0_5G_HB3, WF0_5G_HB4,
+ WF0_5G_HB5, WF0_5G_HB6, XO_REQ, TOP_RST_N, SYS_WATCHDOG,
+ EPHY_LED0_N_JTDO, EPHY_LED1_N_JTDI, EPHY_LED2_N_JTMS,
+ EPHY_LED3_N_JTCLK, EPHY_LED4_N_JTRST_N, WF2G_LED_N,
+ WF5G_LED_N, GPIO_9, GPIO_10, GPIO_11, GPIO_12, UART1_TXD,
+ UART1_RXD, UART1_CTS, UART1_RTS, UART2_TXD, UART2_RXD,
+ UART2_CTS, UART2_RTS, SMI_MDC, SMI_MDIO, PCIE_PERESET_N,
+ PWM_0, GPIO_0, GPIO_1, GPIO_2, GPIO_3, GPIO_4, GPIO_5,
+ GPIO_6, GPIO_7, GPIO_8, UART0_TXD, UART0_RXD, TOP_2G_CLK,
+ TOP_2G_DATA, WF0_2G_HB0, WF0_2G_HB1, WF0_2G_HB2, WF0_2G_HB3,
+ WF0_2G_HB4, WF0_2G_HB5, WF0_2G_HB6]
bias-disable: true
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
index 3f8ad07c7cfd..50846a2d09c8 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
@@ -24,6 +24,7 @@ properties:
- qcom,pm6150-gpio
- qcom,pm6150l-gpio
- qcom,pm6350-gpio
+ - qcom,pm6450-gpio
- qcom,pm7250b-gpio
- qcom,pm7325-gpio
- qcom,pm7550ba-gpio
@@ -56,10 +57,12 @@ properties:
- qcom,pma8084-gpio
- qcom,pmc8180-gpio
- qcom,pmc8180c-gpio
+ - qcom,pmd8028-gpio
- qcom,pmi632-gpio
- qcom,pmi8950-gpio
- qcom,pmi8994-gpio
- qcom,pmi8998-gpio
+ - qcom,pmih0108-gpio
- qcom,pmk8350-gpio
- qcom,pmk8550-gpio
- qcom,pmm8155au-gpio
@@ -72,6 +75,7 @@ properties:
- qcom,pmx55-gpio
- qcom,pmx65-gpio
- qcom,pmx75-gpio
+ - qcom,pmxr2230-gpio
- enum:
- qcom,spmi-gpio
@@ -141,6 +145,7 @@ allOf:
- qcom,pm8005-gpio
- qcom,pm8450-gpio
- qcom,pm8916-gpio
+ - qcom,pmd8028-gpio
- qcom,pmk8350-gpio
- qcom,pmr735a-gpio
- qcom,pmr735b-gpio
@@ -198,6 +203,7 @@ allOf:
contains:
enum:
- qcom,pm6350-gpio
+ - qcom,pm6450-gpio
- qcom,pm8350c-gpio
then:
properties:
@@ -261,6 +267,7 @@ allOf:
- qcom,pmc8180c-gpio
- qcom,pmp8074-gpio
- qcom,pms405-gpio
+ - qcom,pmxr2230-gpio
then:
properties:
gpio-line-names:
@@ -305,6 +312,21 @@ allOf:
compatible:
contains:
enum:
+ - qcom,pmih0108-gpio
+ then:
+ properties:
+ gpio-line-names:
+ minItems: 18
+ maxItems: 18
+ gpio-reserved-ranges:
+ minItems: 1
+ maxItems: 9
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- qcom,pmx65-gpio
- qcom,pmx75-gpio
then:
@@ -402,6 +424,10 @@ patternProperties:
$ref: "#/$defs/qcom-pmic-gpio-state"
additionalProperties: false
+ "-hog(-[0-9]+)?$":
+ required:
+ - gpio-hog
+
$defs:
qcom-pmic-gpio-state:
type: object
@@ -417,6 +443,7 @@ $defs:
- gpio1-gpio10 for pm6150
- gpio1-gpio12 for pm6150l
- gpio1-gpio9 for pm6350
+ - gpio1-gpio9 for pm6450
- gpio1-gpio12 for pm7250b
- gpio1-gpio10 for pm7325
- gpio1-gpio8 for pm7550ba
@@ -447,9 +474,11 @@ $defs:
- gpio1-gpio22 for pm8994
- gpio1-gpio26 for pm8998
- gpio1-gpio22 for pma8084
+ - gpio1-gpio4 for pmd8028
- gpio1-gpio8 for pmi632
- gpio1-gpio2 for pmi8950
- gpio1-gpio10 for pmi8994
+ - gpio1-gpio18 for pmih0108
- gpio1-gpio4 for pmk8350
- gpio1-gpio6 for pmk8550
- gpio1-gpio10 for pmm8155au
@@ -464,6 +493,7 @@ $defs:
and gpio11)
- gpio1-gpio16 for pmx65
- gpio1-gpio16 for pmx75
+ - gpio1-gpio12 for pmxr2230
items:
pattern: "^gpio([0-9]+)$"
@@ -545,6 +575,7 @@ $defs:
examples:
- |
+ #include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
pm8921_gpio: gpio@150 {
@@ -568,5 +599,12 @@ examples:
power-source = <PM8921_GPIO_S4>;
};
};
+
+ otg-hog {
+ gpio-hog;
+ gpios = <35 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "otg-gpio";
+ };
};
...
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.yaml
index fe717d8d4798..43146709e204 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.yaml
@@ -35,6 +35,7 @@ properties:
- qcom,pm8038-mpp
- qcom,pm8058-mpp
- qcom,pm8821-mpp
+ - qcom,pm8901-mpp
- qcom,pm8917-mpp
- qcom,pm8921-mpp
- const: qcom,ssbi-mpp
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sm4450-tlmm.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sm4450-tlmm.yaml
index bb675c8ec220..1b941b276b3f 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sm4450-tlmm.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sm4450-tlmm.yaml
@@ -72,40 +72,24 @@ $defs:
description:
Specify the alternative function to be configured for the specified
pins.
- enum: [ gpio, atest_char, atest_char0, atest_char1, atest_char2,
- atest_char3, atest_usb0, atest_usb00, atest_usb01, atest_usb02,
- atest_usb03, audio_ref, cam_mclk, cci_async, cci_i2c,
- cci_timer0, cci_timer1, cci_timer2, cci_timer3, cci_timer4,
- cmu_rng0, cmu_rng1, cmu_rng2, cmu_rng3, coex_uart1, cri_trng,
- cri_trng0, cri_trng1, dbg_out, ddr_bist, ddr_pxi0, ddr_pxi1,
- dp0_hot, gcc_gp1, gcc_gp2, gcc_gp3, host2wlan_sol, ibi_i3c,
- jitter_bist, mdp_vsync, mdp_vsync0, mdp_vsync1, mdp_vsync2,
- mdp_vsync3, mi2s0_data0, mi2s0_data1, mi2s0_sck, mi2s0_ws,
- mi2s2_data0, mi2s2_data1, mi2s2_sck, mi2s2_ws, mi2s_mclk0,
- mi2s_mclk1, nav_gpio0, nav_gpio1, nav_gpio2, pcie0_clk,
- phase_flag0, phase_flag1, phase_flag10, phase_flag11,
- phase_flag12, phase_flag13, phase_flag14, phase_flag15,
- phase_flag16, phase_flag17, phase_flag18, phase_flag19,
- phase_flag2, phase_flag20, phase_flag21, phase_flag22,
- phase_flag23, phase_flag24, phase_flag25, phase_flag26,
- phase_flag27, phase_flag28, phase_flag29, phase_flag3,
- phase_flag30, phase_flag31, phase_flag4, phase_flag5,
- phase_flag6, phase_flag7, phase_flag8, phase_flag9,
- pll_bist, pll_clk, prng_rosc0, prng_rosc1, prng_rosc2,
- prng_rosc3, qdss_cti, qdss_gpio, qdss_gpio0, qdss_gpio1,
- qdss_gpio10, qdss_gpio11, qdss_gpio12, qdss_gpio13, qdss_gpio14,
- qdss_gpio15, qdss_gpio2, qdss_gpio3, qdss_gpio4, qdss_gpio5,
- qdss_gpio6, qdss_gpio7, qdss_gpio8, qdss_gpio9, qlink0_enable,
- qlink0_request, qlink0_wmss, qlink1_enable, qlink1_request,
- qlink1_wmss, qlink2_enable, qlink2_request, qlink2_wmss,
- qup0_se0, qup0_se1, qup0_se2, qup0_se3, qup0_se4, qup0_se5,
- qup0_se6, qup0_se7, qup1_se0, qup1_se1, qup1_se2, qup1_se3,
- qup1_se4, qup1_se5, qup1_se6, sd_write, tb_trig, tgu_ch0,
- tgu_ch1, tgu_ch2, tgu_ch3, tmess_prng0, tmess_prng1,
- tmess_prng2, tmess_prng3, tsense_pwm1, tsense_pwm2, uim0_clk,
- uim0_data, uim0_present, uim0_reset, uim1_clk, uim1_data,
- uim1_present, uim1_reset, usb0_hs, usb0_phy, vfr_0, vfr_1,
- vsense_trigger ]
+ enum: [ gpio, atest_char, atest_usb0, audio_ref_clk, cam_mclk,
+ cci_async_in0, cci_i2c, cci, cmu_rng, coex_uart1_rx,
+ coex_uart1_tx, cri_trng, dbg_out_clk, ddr_bist,
+ ddr_pxi0_test, ddr_pxi1_test, gcc_gp1_clk, gcc_gp2_clk,
+ gcc_gp3_clk, host2wlan_sol, ibi_i3c_qup0, ibi_i3c_qup1,
+ jitter_bist_ref, mdp_vsync0_out, mdp_vsync1_out,
+ mdp_vsync2_out, mdp_vsync3_out, mdp_vsync, nav,
+ pcie0_clk_req, phase_flag, pll_bist_sync, pll_clk_aux,
+ prng_rosc, qdss_cti_trig0, qdss_cti_trig1, qdss_gpio,
+ qlink0_enable, qlink0_request, qlink0_wmss_reset,
+ qup0_se0, qup0_se1, qup0_se2, qup0_se3, qup0_se4,
+ qup1_se0, qup1_se1, qup1_se2, qup1_se2_l2, qup1_se3,
+ qup1_se4, sd_write_protect, tb_trig_sdc1, tb_trig_sdc2,
+ tgu_ch0_trigout, tgu_ch1_trigout, tgu_ch2_trigout,
+ tgu_ch3_trigout, tmess_prng, tsense_pwm1_out,
+ tsense_pwm2_out, uim0, uim1, usb0_hs_ac, usb0_phy_ps,
+ vfr_0_mira, vfr_0_mirb, vfr_1, vsense_trigger_mirnat,
+ wlan1_adc_dtest0, wlan1_adc_dtest1 ]
required:
- pins
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
index d476de82e5c3..4d5a957fa232 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
@@ -120,7 +120,9 @@ additionalProperties:
slew-rate: true
gpio-hog: true
gpios: true
+ input: true
input-enable: true
+ output-enable: true
output-high: true
output-low: true
line-name: true
diff --git a/Documentation/devicetree/bindings/pinctrl/samsung,pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/samsung,pinctrl.yaml
index 118549c25976..242dd13c276b 100644
--- a/Documentation/devicetree/bindings/pinctrl/samsung,pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/samsung,pinctrl.yaml
@@ -73,6 +73,13 @@ properties:
minItems: 1
maxItems: 2
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: pclk
+
wakeup-interrupt-controller:
$ref: samsung,pinctrl-wakeup-interrupt.yaml
@@ -124,6 +131,20 @@ allOf:
properties:
compatible:
contains:
+ const: google,gs101-pinctrl
+ then:
+ required:
+ - clocks
+ - clock-names
+ else:
+ properties:
+ clocks: false
+ clock-names: false
+
+ - if:
+ properties:
+ compatible:
+ contains:
const: samsung,exynos5433-pinctrl
then:
properties:
diff --git a/Documentation/devicetree/bindings/platform/acer,aspire1-ec.yaml b/Documentation/devicetree/bindings/platform/acer,aspire1-ec.yaml
new file mode 100644
index 000000000000..7cb0134134ff
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/acer,aspire1-ec.yaml
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/platform/acer,aspire1-ec.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Acer Aspire 1 Embedded Controller
+
+maintainers:
+ - Nikita Travkin <nikita@trvn.ru>
+
+description:
+ The Acer Aspire 1 laptop uses an embedded controller to control battery
+ and charging as well as to provide a set of misc features such as the
+ laptop lid status and HPD events for the USB Type-C DP alt mode.
+
+properties:
+ compatible:
+ const: acer,aspire1-ec
+
+ reg:
+ const: 0x76
+
+ interrupts:
+ maxItems: 1
+
+ connector:
+ $ref: /schemas/connector/usb-connector.yaml#
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ embedded-controller@76 {
+ compatible = "acer,aspire1-ec";
+ reg = <0x76>;
+
+ interrupts-extended = <&tlmm 30 IRQ_TYPE_LEVEL_LOW>;
+
+ connector {
+ compatible = "usb-c-connector";
+
+ port {
+ ec_dp_in: endpoint {
+ remote-endpoint = <&mdss_dp_out>;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/power/supply/maxim,max8903.yaml b/Documentation/devicetree/bindings/power/supply/maxim,max8903.yaml
index a8d625f285f1..86af38378999 100644
--- a/Documentation/devicetree/bindings/power/supply/maxim,max8903.yaml
+++ b/Documentation/devicetree/bindings/power/supply/maxim,max8903.yaml
@@ -34,7 +34,7 @@ properties:
flt-gpios:
maxItems: 1
- description: Fault pin (active low, output)
+ description: Fault pin (active low, input)
dcm-gpios:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/pwm/atmel,at91sam-pwm.yaml b/Documentation/devicetree/bindings/pwm/atmel,at91sam-pwm.yaml
index d84268b59784..96cd6f3c3546 100644
--- a/Documentation/devicetree/bindings/pwm/atmel,at91sam-pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/atmel,at91sam-pwm.yaml
@@ -25,6 +25,9 @@ properties:
- items:
- const: microchip,sama7g5-pwm
- const: atmel,sama5d2-pwm
+ - items:
+ - const: microchip,sam9x7-pwm
+ - const: microchip,sam9x60-pwm
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/pwm/google,cros-ec-pwm.yaml b/Documentation/devicetree/bindings/pwm/google,cros-ec-pwm.yaml
index 3afe1480df52..f7bc84b05a87 100644
--- a/Documentation/devicetree/bindings/pwm/google,cros-ec-pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/google,cros-ec-pwm.yaml
@@ -35,7 +35,6 @@ properties:
required:
- compatible
- - '#pwm-cells'
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/pwm/marvell,pxa-pwm.yaml b/Documentation/devicetree/bindings/pwm/marvell,pxa-pwm.yaml
index ba6325575ea0..9ee1946dc2e1 100644
--- a/Documentation/devicetree/bindings/pwm/marvell,pxa-pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/marvell,pxa-pwm.yaml
@@ -34,7 +34,6 @@ properties:
required:
- compatible
- reg
- - "#pwm-cells"
- clocks
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/pwm/mediatek,mt2712-pwm.yaml b/Documentation/devicetree/bindings/pwm/mediatek,mt2712-pwm.yaml
index a5c308801619..d515c09e1021 100644
--- a/Documentation/devicetree/bindings/pwm/mediatek,mt2712-pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/mediatek,mt2712-pwm.yaml
@@ -66,7 +66,6 @@ properties:
required:
- compatible
- reg
- - "#pwm-cells"
- clocks
- clock-names
diff --git a/Documentation/devicetree/bindings/pwm/mediatek,pwm-disp.yaml b/Documentation/devicetree/bindings/pwm/mediatek,pwm-disp.yaml
index afcdeed4e88a..195e4371196b 100644
--- a/Documentation/devicetree/bindings/pwm/mediatek,pwm-disp.yaml
+++ b/Documentation/devicetree/bindings/pwm/mediatek,pwm-disp.yaml
@@ -31,6 +31,7 @@ properties:
- mediatek,mt8188-disp-pwm
- mediatek,mt8192-disp-pwm
- mediatek,mt8195-disp-pwm
+ - mediatek,mt8365-disp-pwm
- const: mediatek,mt8183-disp-pwm
reg:
@@ -52,10 +53,12 @@ properties:
- const: main
- const: mm
+ power-domains:
+ maxItems: 1
+
required:
- compatible
- reg
- - "#pwm-cells"
- clocks
- clock-names
diff --git a/Documentation/devicetree/bindings/pwm/pwm-bcm2835.yaml b/Documentation/devicetree/bindings/pwm/pwm-bcm2835.yaml
index 15e7fd98defc..9dc25f38fb94 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-bcm2835.yaml
+++ b/Documentation/devicetree/bindings/pwm/pwm-bcm2835.yaml
@@ -29,7 +29,6 @@ required:
- compatible
- reg
- clocks
- - "#pwm-cells"
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/pwm/snps,dw-apb-timers-pwm2.yaml b/Documentation/devicetree/bindings/pwm/snps,dw-apb-timers-pwm2.yaml
index 4d0b5964443d..7523a89a1773 100644
--- a/Documentation/devicetree/bindings/pwm/snps,dw-apb-timers-pwm2.yaml
+++ b/Documentation/devicetree/bindings/pwm/snps,dw-apb-timers-pwm2.yaml
@@ -51,7 +51,6 @@ properties:
required:
- compatible
- reg
- - "#pwm-cells"
- clocks
- clock-names
diff --git a/Documentation/devicetree/bindings/regulator/allwinner,sun20i-d1-system-ldos.yaml b/Documentation/devicetree/bindings/regulator/allwinner,sun20i-d1-system-ldos.yaml
new file mode 100644
index 000000000000..ec6695c8d2e3
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/allwinner,sun20i-d1-system-ldos.yaml
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/allwinner,sun20i-d1-system-ldos.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner D1 System LDOs
+
+maintainers:
+ - Samuel Holland <samuel@sholland.org>
+
+description:
+ Allwinner D1 contains a pair of general-purpose LDOs which are designed to
+ supply power inside and outside the SoC. They are controlled by a register
+ within the system control MMIO space.
+
+properties:
+ compatible:
+ enum:
+ - allwinner,sun20i-d1-system-ldos
+
+ reg:
+ maxItems: 1
+
+patternProperties:
+ "^ldo[ab]$":
+ type: object
+ $ref: regulator.yaml#
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+...
diff --git a/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml b/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
index 9ff9abf2691a..51e2f6fb7a5a 100644
--- a/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
@@ -41,6 +41,13 @@ allOf:
- gpios
properties:
+ $nodename:
+ anyOf:
+ - description: Preferred name is 'regulator-[0-9]v[0-9]'
+ pattern: '^regulator(-[0-9]+v[0-9]+|-[0-9a-z-]+)?$'
+ - description: Any name allowed
+ deprecated: true
+
compatible:
enum:
- regulator-fixed
diff --git a/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml b/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml
index 3d469b8e9774..849bfa50bdba 100644
--- a/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml
@@ -28,6 +28,7 @@ properties:
- nxp,pca9450a
- nxp,pca9450b
- nxp,pca9450c
+ - nxp,pca9451a
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/regulator/qcom,usb-vbus-regulator.yaml b/Documentation/devicetree/bindings/regulator/qcom,usb-vbus-regulator.yaml
index 33ae1f786802..fcefc722ee2a 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,usb-vbus-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/qcom,usb-vbus-regulator.yaml
@@ -26,6 +26,7 @@ properties:
- enum:
- qcom,pm4125-vbus-reg
- qcom,pm6150-vbus-reg
+ - qcom,pm7250b-vbus-reg
- qcom,pmi632-vbus-reg
- const: qcom,pm8150b-vbus-reg
diff --git a/Documentation/devicetree/bindings/regulator/st,stm32-vrefbuf.yaml b/Documentation/devicetree/bindings/regulator/st,stm32-vrefbuf.yaml
index 05f4ad2c7d3a..6ceaffb45dc9 100644
--- a/Documentation/devicetree/bindings/regulator/st,stm32-vrefbuf.yaml
+++ b/Documentation/devicetree/bindings/regulator/st,stm32-vrefbuf.yaml
@@ -30,6 +30,10 @@ properties:
vdda-supply:
description: phandle to the vdda input analog voltage.
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/regulator/ti,tps62864.yaml b/Documentation/devicetree/bindings/regulator/ti,tps62864.yaml
index 0f29c75f42ea..dddea27596e9 100644
--- a/Documentation/devicetree/bindings/regulator/ti,tps62864.yaml
+++ b/Documentation/devicetree/bindings/regulator/ti,tps62864.yaml
@@ -24,7 +24,7 @@ properties:
type: object
properties:
- "SW":
+ SW:
type: object
$ref: regulator.yaml#
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml b/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml
index 507f98f73d23..c5dc3c2820d7 100644
--- a/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml
@@ -19,6 +19,7 @@ properties:
- mediatek,mt8183-scp
- mediatek,mt8186-scp
- mediatek,mt8188-scp
+ - mediatek,mt8188-scp-dual
- mediatek,mt8192-scp
- mediatek,mt8195-scp
- mediatek,mt8195-scp-dual
@@ -194,6 +195,7 @@ allOf:
properties:
compatible:
enum:
+ - mediatek,mt8188-scp-dual
- mediatek,mt8195-scp-dual
then:
properties:
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,msm8996-mss-pil.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,msm8996-mss-pil.yaml
index 971734085d51..4d2055f283ac 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,msm8996-mss-pil.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,msm8996-mss-pil.yaml
@@ -231,7 +231,6 @@ allOf:
- const: snoc_axi
- const: mnoc_axi
- const: qdss
- glink-edge: false
required:
- pll-supply
- smd-edge
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,qcs404-cdsp-pil.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,qcs404-cdsp-pil.yaml
index 06f5f93f62a9..bca59394aef4 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,qcs404-cdsp-pil.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,qcs404-cdsp-pil.yaml
@@ -81,7 +81,11 @@ properties:
$ref: /schemas/types.yaml#/definitions/phandle-array
description:
Phandle reference to a syscon representing TCSR followed by the
- three offsets within syscon for q6, modem and nc halt registers.
+ offset within syscon for q6 halt register.
+ items:
+ - items:
+ - description: phandle to TCSR syscon region
+ - description: offset to the Q6 halt register
qcom,smem-states:
$ref: /schemas/types.yaml#/definitions/phandle-array
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sc7280-wpss-pil.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sc7280-wpss-pil.yaml
index 9381c7022ff4..f4118b2da5f6 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,sc7280-wpss-pil.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sc7280-wpss-pil.yaml
@@ -89,7 +89,11 @@ properties:
$ref: /schemas/types.yaml#/definitions/phandle-array
description:
Phandle reference to a syscon representing TCSR followed by the
- three offsets within syscon for q6, modem and nc halt registers.
+ offset within syscon for q6 halt register.
+ items:
+ - items:
+ - description: phandle to TCSR syscon region
+ - description: offset to the Q6 halt register
qcom,qmp:
$ref: /schemas/types.yaml#/definitions/phandle
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sdm845-adsp-pil.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sdm845-adsp-pil.yaml
index 20df83a96ef3..a3c74871457f 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,sdm845-adsp-pil.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sdm845-adsp-pil.yaml
@@ -81,7 +81,11 @@ properties:
$ref: /schemas/types.yaml#/definitions/phandle-array
description:
Phandle reference to a syscon representing TCSR followed by the
- three offsets within syscon for q6, modem and nc halt registers.
+ offset within syscon for q6 halt register.
+ items:
+ - items:
+ - description: phandle to TCSR syscon region
+ - description: offset to the Q6 halt register
qcom,smem-states:
$ref: /schemas/types.yaml#/definitions/phandle-array
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,smd-edge.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,smd-edge.yaml
index 02c85b420c1a..63500b1a0f6f 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,smd-edge.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,smd-edge.yaml
@@ -61,6 +61,7 @@ properties:
description:
Three entries specifying the outgoing ipc bit used for signaling the
remote processor.
+ deprecated: true
qcom,smd-edge:
$ref: /schemas/types.yaml#/definitions/uint32
@@ -111,7 +112,7 @@ examples:
smd-edge {
interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 8>;
+ mboxes = <&apcs 8>;
qcom,smd-edge = <1>;
};
};
diff --git a/Documentation/devicetree/bindings/remoteproc/ti,davinci-rproc.txt b/Documentation/devicetree/bindings/remoteproc/ti,davinci-rproc.txt
index 25f8658e216f..48a49c516b62 100644
--- a/Documentation/devicetree/bindings/remoteproc/ti,davinci-rproc.txt
+++ b/Documentation/devicetree/bindings/remoteproc/ti,davinci-rproc.txt
@@ -1,9 +1,6 @@
TI Davinci DSP devices
=======================
-Binding status: Unstable - Subject to changes for DT representation of clocks
- and resets
-
The TI Davinci family of SoCs usually contains a TI DSP Core sub-system that
is used to offload some of the processor-intensive tasks or algorithms, for
achieving various system level goals.
diff --git a/Documentation/devicetree/bindings/remoteproc/xlnx,zynqmp-r5fss.yaml b/Documentation/devicetree/bindings/remoteproc/xlnx,zynqmp-r5fss.yaml
index 78aac69f1060..6f13da11f593 100644
--- a/Documentation/devicetree/bindings/remoteproc/xlnx,zynqmp-r5fss.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/xlnx,zynqmp-r5fss.yaml
@@ -18,11 +18,26 @@ description: |
properties:
compatible:
- const: xlnx,zynqmp-r5fss
+ enum:
+ - xlnx,zynqmp-r5fss
+ - xlnx,versal-r5fss
+ - xlnx,versal-net-r52fss
+
+ "#address-cells":
+ const: 2
+
+ "#size-cells":
+ const: 2
+
+ ranges:
+ description: |
+ Standard ranges definition providing address translations for
+ local R5F TCM address spaces to bus addresses.
xlnx,cluster-mode:
$ref: /schemas/types.yaml#/definitions/uint32
enum: [0, 1, 2]
+ default: 1
description: |
The RPU MPCore can operate in split mode (Dual-processor performance), Safety
lock-step mode(Both RPU cores execute the same code in lock-step,
@@ -36,8 +51,16 @@ properties:
1: lockstep mode (default)
2: single cpu mode
+ xlnx,tcm-mode:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1]
+ description: |
+ Configure RPU TCM
+ 0: split mode
+ 1: lockstep mode
+
patternProperties:
- "^r5f-[a-f0-9]+$":
+ "^r(.*)@[0-9a-f]+$":
type: object
description: |
The RPU is located in the Low Power Domain of the Processor Subsystem.
@@ -52,10 +75,22 @@ patternProperties:
properties:
compatible:
- const: xlnx,zynqmp-r5f
+ enum:
+ - xlnx,zynqmp-r5f
+ - xlnx,versal-r5f
+ - xlnx,versal-net-r52f
+
+ reg:
+ minItems: 1
+ maxItems: 4
+
+ reg-names:
+ minItems: 1
+ maxItems: 4
power-domains:
- maxItems: 1
+ minItems: 2
+ maxItems: 5
mboxes:
minItems: 1
@@ -101,35 +136,235 @@ patternProperties:
required:
- compatible
+ - reg
+ - reg-names
- power-domains
- unevaluatedProperties: false
-
required:
- compatible
+ - "#address-cells"
+ - "#size-cells"
+ - ranges
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - xlnx,versal-net-r52fss
+ then:
+ properties:
+ xlnx,tcm-mode: false
+
+ patternProperties:
+ "^r52f@[0-9a-f]+$":
+ type: object
+
+ properties:
+ reg:
+ minItems: 1
+ items:
+ - description: ATCM internal memory
+ - description: BTCM internal memory
+ - description: CTCM internal memory
+
+ reg-names:
+ minItems: 1
+ items:
+ - const: atcm0
+ - const: btcm0
+ - const: ctcm0
+
+ power-domains:
+ minItems: 2
+ items:
+ - description: RPU core power domain
+ - description: ATCM power domain
+ - description: BTCM power domain
+ - description: CTCM power domain
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - xlnx,zynqmp-r5fss
+ - xlnx,versal-r5fss
+ then:
+ if:
+ properties:
+ xlnx,cluster-mode:
+ enum: [1, 2]
+ then:
+ properties:
+ xlnx,tcm-mode:
+ enum: [1]
+
+ patternProperties:
+ "^r5f@[0-9a-f]+$":
+ type: object
+
+ properties:
+ reg:
+ minItems: 1
+ items:
+ - description: ATCM internal memory
+ - description: BTCM internal memory
+ - description: extra ATCM memory in lockstep mode
+ - description: extra BTCM memory in lockstep mode
+
+ reg-names:
+ minItems: 1
+ items:
+ - const: atcm0
+ - const: btcm0
+ - const: atcm1
+ - const: btcm1
+
+ power-domains:
+ minItems: 2
+ items:
+ - description: RPU core power domain
+ - description: ATCM power domain
+ - description: BTCM power domain
+ - description: second ATCM power domain
+ - description: second BTCM power domain
+
+ required:
+ - xlnx,tcm-mode
+
+ else:
+ properties:
+ xlnx,tcm-mode:
+ enum: [0]
+
+ patternProperties:
+ "^r5f@[0-9a-f]+$":
+ type: object
+
+ properties:
+ reg:
+ minItems: 1
+ items:
+ - description: ATCM internal memory
+ - description: BTCM internal memory
+
+ reg-names:
+ minItems: 1
+ items:
+ - const: atcm0
+ - const: btcm0
+
+ power-domains:
+ minItems: 2
+ items:
+ - description: RPU core power domain
+ - description: ATCM power domain
+ - description: BTCM power domain
+
+ required:
+ - xlnx,tcm-mode
additionalProperties: false
examples:
- |
- remoteproc {
- compatible = "xlnx,zynqmp-r5fss";
- xlnx,cluster-mode = <1>;
-
- r5f-0 {
- compatible = "xlnx,zynqmp-r5f";
- power-domains = <&zynqmp_firmware 0x7>;
- memory-region = <&rproc_0_fw_image>, <&rpu0vdev0buffer>, <&rpu0vdev0vring0>, <&rpu0vdev0vring1>;
- mboxes = <&ipi_mailbox_rpu0 0>, <&ipi_mailbox_rpu0 1>;
- mbox-names = "tx", "rx";
+ #include <dt-bindings/power/xlnx-zynqmp-power.h>
+
+ // Split mode configuration
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ remoteproc@ffe00000 {
+ compatible = "xlnx,zynqmp-r5fss";
+ xlnx,cluster-mode = <0>;
+ xlnx,tcm-mode = <0>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x0 0x0 0x0 0xffe00000 0x0 0x10000>,
+ <0x0 0x20000 0x0 0xffe20000 0x0 0x10000>,
+ <0x1 0x0 0x0 0xffe90000 0x0 0x10000>,
+ <0x1 0x20000 0x0 0xffeb0000 0x0 0x10000>;
+
+ r5f@0 {
+ compatible = "xlnx,zynqmp-r5f";
+ reg = <0x0 0x0 0x0 0x10000>, <0x0 0x20000 0x0 0x10000>;
+ reg-names = "atcm0", "btcm0";
+ power-domains = <&zynqmp_firmware PD_RPU_0>,
+ <&zynqmp_firmware PD_R5_0_ATCM>,
+ <&zynqmp_firmware PD_R5_0_BTCM>;
+ memory-region = <&rproc_0_fw_image>, <&rpu0vdev0buffer>,
+ <&rpu0vdev0vring0>, <&rpu0vdev0vring1>;
+ mboxes = <&ipi_mailbox_rpu0 0>, <&ipi_mailbox_rpu0 1>;
+ mbox-names = "tx", "rx";
+ };
+
+ r5f@1 {
+ compatible = "xlnx,zynqmp-r5f";
+ reg = <0x1 0x0 0x0 0x10000>, <0x1 0x20000 0x0 0x10000>;
+ reg-names = "atcm0", "btcm0";
+ power-domains = <&zynqmp_firmware PD_RPU_1>,
+ <&zynqmp_firmware PD_R5_1_ATCM>,
+ <&zynqmp_firmware PD_R5_1_BTCM>;
+ memory-region = <&rproc_1_fw_image>, <&rpu1vdev0buffer>,
+ <&rpu1vdev0vring0>, <&rpu1vdev0vring1>;
+ mboxes = <&ipi_mailbox_rpu1 0>, <&ipi_mailbox_rpu1 1>;
+ mbox-names = "tx", "rx";
+ };
};
+ };
+
+ - |
+ //Lockstep configuration
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ remoteproc@ffe00000 {
+ compatible = "xlnx,zynqmp-r5fss";
+ xlnx,cluster-mode = <1>;
+ xlnx,tcm-mode = <1>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x0 0x0 0x0 0xffe00000 0x0 0x10000>,
+ <0x0 0x20000 0x0 0xffe20000 0x0 0x10000>,
+ <0x0 0x10000 0x0 0xffe10000 0x0 0x10000>,
+ <0x0 0x30000 0x0 0xffe30000 0x0 0x10000>;
+
+ r5f@0 {
+ compatible = "xlnx,zynqmp-r5f";
+ reg = <0x0 0x0 0x0 0x10000>,
+ <0x0 0x20000 0x0 0x10000>,
+ <0x0 0x10000 0x0 0x10000>,
+ <0x0 0x30000 0x0 0x10000>;
+ reg-names = "atcm0", "btcm0", "atcm1", "btcm1";
+ power-domains = <&zynqmp_firmware PD_RPU_0>,
+ <&zynqmp_firmware PD_R5_0_ATCM>,
+ <&zynqmp_firmware PD_R5_0_BTCM>,
+ <&zynqmp_firmware PD_R5_1_ATCM>,
+ <&zynqmp_firmware PD_R5_1_BTCM>;
+ memory-region = <&rproc_0_fw_image>, <&rpu0vdev0buffer>,
+ <&rpu0vdev0vring0>, <&rpu0vdev0vring1>;
+ mboxes = <&ipi_mailbox_rpu0 0>, <&ipi_mailbox_rpu0 1>;
+ mbox-names = "tx", "rx";
+ };
- r5f-1 {
- compatible = "xlnx,zynqmp-r5f";
- power-domains = <&zynqmp_firmware 0x8>;
- memory-region = <&rproc_1_fw_image>, <&rpu1vdev0buffer>, <&rpu1vdev0vring0>, <&rpu1vdev0vring1>;
- mboxes = <&ipi_mailbox_rpu1 0>, <&ipi_mailbox_rpu1 1>;
- mbox-names = "tx", "rx";
+ r5f@1 {
+ compatible = "xlnx,zynqmp-r5f";
+ reg = <0x1 0x0 0x0 0x10000>, <0x1 0x20000 0x0 0x10000>;
+ reg-names = "atcm0", "btcm0";
+ power-domains = <&zynqmp_firmware PD_RPU_1>,
+ <&zynqmp_firmware PD_R5_1_ATCM>,
+ <&zynqmp_firmware PD_R5_1_BTCM>;
+ memory-region = <&rproc_1_fw_image>, <&rpu1vdev0buffer>,
+ <&rpu1vdev0vring0>, <&rpu1vdev0vring1>;
+ mboxes = <&ipi_mailbox_rpu1 0>, <&ipi_mailbox_rpu1 1>;
+ mbox-names = "tx", "rx";
+ };
};
};
...
diff --git a/Documentation/devicetree/bindings/riscv/starfive.yaml b/Documentation/devicetree/bindings/riscv/starfive.yaml
index cc4d92f0a1bf..b672f8521949 100644
--- a/Documentation/devicetree/bindings/riscv/starfive.yaml
+++ b/Documentation/devicetree/bindings/riscv/starfive.yaml
@@ -26,6 +26,7 @@ properties:
- items:
- enum:
+ - milkv,mars
- starfive,visionfive-2-v1.2a
- starfive,visionfive-2-v1.3b
- const: starfive,jh7110
diff --git a/Documentation/devicetree/bindings/rng/microsoft,vmgenid.yaml b/Documentation/devicetree/bindings/rng/microsoft,vmgenid.yaml
new file mode 100644
index 000000000000..8f20dee93e7e
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/microsoft,vmgenid.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rng/microsoft,vmgenid.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Virtual Machine Generation ID
+
+maintainers:
+ - Jason A. Donenfeld <Jason@zx2c4.com>
+
+description:
+ Firmwares or hypervisors can use this devicetree to describe an
+ interrupt and a shared resource to inject a Virtual Machine Generation ID.
+ Virtual Machine Generation ID is a globally unique identifier (GUID) and
+ the devicetree binding follows VMGenID specification defined in
+ http://go.microsoft.com/fwlink/?LinkId=260709.
+
+properties:
+ compatible:
+ const: microsoft,vmgenid
+
+ reg:
+ description:
+ Specifies a 16-byte VMGenID in endianness-agnostic hexadecimal format.
+ maxItems: 1
+
+ interrupts:
+ description:
+ Interrupt used to notify that a new VMGenID is available.
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ rng@80000000 {
+ compatible = "microsoft,vmgenid";
+ reg = <0x80000000 0x1000>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_EDGE_RISING>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/rng/st,stm32-rng.yaml b/Documentation/devicetree/bindings/rng/st,stm32-rng.yaml
index 717f6b321f88..340d01d481d1 100644
--- a/Documentation/devicetree/bindings/rng/st,stm32-rng.yaml
+++ b/Documentation/devicetree/bindings/rng/st,stm32-rng.yaml
@@ -37,6 +37,10 @@ properties:
description: If set, the RNG configuration in RNG_CR, RNG_HTCR and
RNG_NSCR will be locked.
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/rtc/twl-rtc.txt b/Documentation/devicetree/bindings/rtc/twl-rtc.txt
deleted file mode 100644
index 8f9a94f2f896..000000000000
--- a/Documentation/devicetree/bindings/rtc/twl-rtc.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-* Texas Instruments TWL4030/6030 RTC
-
-Required properties:
-- compatible : Should be "ti,twl4030-rtc"
-- interrupts : Should be the interrupt number.
-
-Example:
- rtc {
- compatible = "ti,twl4030-rtc";
- interrupts = <11>;
- };
diff --git a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml
index 2e189e548327..0565fb7649c5 100644
--- a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml
@@ -54,7 +54,9 @@ properties:
- const: amlogic,meson-gx-uart
- description: UART controller on S4 compatible SoCs
items:
- - const: amlogic,t7-uart
+ - enum:
+ - amlogic,a4-uart
+ - amlogic,t7-uart
- const: amlogic,meson-s4-uart
reg:
diff --git a/Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml b/Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml
index 65cb2e5c5eee..eb2992a447d7 100644
--- a/Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml
+++ b/Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml
@@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Atmel Universal Synchronous Asynchronous Receiver/Transmitter (USART)
maintainers:
- - Richard Genoud <richard.genoud@gmail.com>
+ - Richard Genoud <richard.genoud@bootlin.com>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/serial/fsl,s32-linflexuart.yaml b/Documentation/devicetree/bindings/serial/fsl,s32-linflexuart.yaml
index 7a105551fa6a..4171f524a928 100644
--- a/Documentation/devicetree/bindings/serial/fsl,s32-linflexuart.yaml
+++ b/Documentation/devicetree/bindings/serial/fsl,s32-linflexuart.yaml
@@ -23,7 +23,9 @@ properties:
oneOf:
- const: fsl,s32v234-linflexuart
- items:
- - const: nxp,s32g2-linflexuart
+ - enum:
+ - nxp,s32g2-linflexuart
+ - nxp,s32g3-linflexuart
- const: fsl,s32v234-linflexuart
reg:
diff --git a/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
index 62f97da1b2fd..2ed526139269 100644
--- a/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
@@ -73,6 +73,10 @@ properties:
enum: [1, 2, 4, 8, 12, 14, 16]
default: 8
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
allOf:
- $ref: rs485.yaml#
- $ref: serial.yaml#
diff --git a/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-dcfg.yaml b/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-dcfg.yaml
index 397f75909b20..ce1a6505eb51 100644
--- a/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-dcfg.yaml
+++ b/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-dcfg.yaml
@@ -51,7 +51,7 @@ properties:
ranges: true
patternProperties:
- "^clock-controller@[0-9a-z]+$":
+ "^clock-controller@[0-9a-f]+$":
$ref: /schemas/clock/fsl,flexspi-clock.yaml#
required:
diff --git a/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-scfg.yaml b/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-scfg.yaml
index 8d088b5fe823..a6a511b00a12 100644
--- a/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-scfg.yaml
+++ b/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-scfg.yaml
@@ -41,7 +41,7 @@ properties:
ranges: true
patternProperties:
- "^interrupt-controller@[a-z0-9]+$":
+ "^interrupt-controller@[a-f0-9]+$":
$ref: /schemas/interrupt-controller/fsl,ls-extirq.yaml#
required:
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml
index 4310bae6c58e..4512390f90f0 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml
@@ -58,20 +58,6 @@ patternProperties:
required:
- compatible
-allOf:
- - if:
- not:
- properties:
- compatible:
- contains:
- enum:
- - qcom,sm8450-pmic-glink
- - qcom,sm8550-pmic-glink
- - qcom,x1e80100-pmic-glink
- then:
- properties:
- orientation-gpios: false
-
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,wcnss.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,wcnss.yaml
index 74bb92e31554..fd6db0ca98eb 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,wcnss.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,wcnss.yaml
@@ -116,8 +116,8 @@ examples:
bluetooth {
compatible = "qcom,wcnss-bt";
- /* BD address 00:11:22:33:44:55 */
- local-bd-address = [ 55 44 33 22 11 00 ];
+ /* Updated by boot firmware (little-endian order) */
+ local-bd-address = [ 00 00 00 00 00 00 ];
};
wifi {
diff --git a/Documentation/devicetree/bindings/soc/renesas/renesas,r9a09g057-sys.yaml b/Documentation/devicetree/bindings/soc/renesas/renesas,r9a09g057-sys.yaml
new file mode 100644
index 000000000000..ebbf0c9109ce
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/renesas/renesas,r9a09g057-sys.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/renesas/renesas,r9a09g057-sys.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas RZ/V2H(P) System Controller (SYS)
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+description: |
+ The RZ/V2H(P) SYS (System Controller) controls the overall
+ configuration of the LSI and supports the following functions,
+ - Trust zone control
+ - Extend access by specific masters to address beyond 4GB space
+ - GBETH configuration
+ - Control of settings and states of SRAM/PCIe/CM33/CA55/CR8/xSPI/ADC/TSU
+ - LSI version
+ - WDT stop control
+ - General registers
+
+properties:
+ compatible:
+ const: renesas,r9a09g057-sys
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - resets
+
+additionalProperties: false
+
+examples:
+ - |
+ sys: system-controller@10430000 {
+ compatible = "renesas,r9a09g057-sys";
+ reg = <0x10430000 0x10000>;
+ clocks = <&cpg 1>;
+ resets = <&cpg 1>;
+ };
diff --git a/Documentation/devicetree/bindings/soc/renesas/renesas.yaml b/Documentation/devicetree/bindings/soc/renesas/renesas.yaml
index c1ce4da2dc32..09d3ce97efa2 100644
--- a/Documentation/devicetree/bindings/soc/renesas/renesas.yaml
+++ b/Documentation/devicetree/bindings/soc/renesas/renesas.yaml
@@ -513,6 +513,14 @@ properties:
- renesas,rzv2mevk2 # RZ/V2M Eval Board v2.0
- const: renesas,r9a09g011
+ - description: RZ/V2H(P) (R9A09G057)
+ items:
+ - enum:
+ - renesas,r9a09g057h41 # RZ/V2H
+ - renesas,r9a09g057h42 # RZ/V2H with Mali-G31 support
+ - renesas,r9a09g057h44 # RZ/V2HP with Mali-G31 + Mali-C55 support
+ - const: renesas,r9a09g057
+
additionalProperties: true
...
diff --git a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
index 0b87c266760c..79798c747476 100644
--- a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
+++ b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
@@ -171,6 +171,7 @@ allOf:
unevaluatedProperties: false
pcie-phy:
+ type: object
description:
Documentation/devicetree/bindings/phy/rockchip-pcie-phy.txt
diff --git a/Documentation/devicetree/bindings/soc/samsung/samsung,exynos-sysreg.yaml b/Documentation/devicetree/bindings/soc/samsung/samsung,exynos-sysreg.yaml
index c0c6ce8fc786..3ca220582897 100644
--- a/Documentation/devicetree/bindings/soc/samsung/samsung,exynos-sysreg.yaml
+++ b/Documentation/devicetree/bindings/soc/samsung/samsung,exynos-sysreg.yaml
@@ -15,6 +15,7 @@ properties:
- items:
- enum:
- google,gs101-apm-sysreg
+ - google,gs101-hsi2-sysreg
- google,gs101-peric0-sysreg
- google,gs101-peric1-sysreg
- samsung,exynos3-sysreg
@@ -72,6 +73,7 @@ allOf:
compatible:
contains:
enum:
+ - google,gs101-hsi2-sysreg
- google,gs101-peric0-sysreg
- google,gs101-peric1-sysreg
- samsung,exynos850-cmgp-sysreg
diff --git a/Documentation/devicetree/bindings/soc/tegra/nvidia,tegra20-pmc.yaml b/Documentation/devicetree/bindings/soc/tegra/nvidia,tegra20-pmc.yaml
index b86f6f53ca95..7140c312d898 100644
--- a/Documentation/devicetree/bindings/soc/tegra/nvidia,tegra20-pmc.yaml
+++ b/Documentation/devicetree/bindings/soc/tegra/nvidia,tegra20-pmc.yaml
@@ -365,9 +365,9 @@ allOf:
additionalProperties: false
dependencies:
- "nvidia,suspend-mode": ["nvidia,core-pwr-off-time", "nvidia,cpu-pwr-off-time"]
- "nvidia,core-pwr-off-time": ["nvidia,core-pwr-good-time"]
- "nvidia,cpu-pwr-off-time": ["nvidia,cpu-pwr-good-time"]
+ nvidia,suspend-mode: ["nvidia,core-pwr-off-time", "nvidia,cpu-pwr-off-time"]
+ nvidia,core-pwr-off-time: ["nvidia,core-pwr-good-time"]
+ nvidia,cpu-pwr-off-time: ["nvidia,cpu-pwr-good-time"]
examples:
- |
diff --git a/Documentation/devicetree/bindings/sound/davinci-mcbsp.txt b/Documentation/devicetree/bindings/sound/davinci-mcbsp.txt
deleted file mode 100644
index 3ffc2562fb31..000000000000
--- a/Documentation/devicetree/bindings/sound/davinci-mcbsp.txt
+++ /dev/null
@@ -1,50 +0,0 @@
-Texas Instruments DaVinci McBSP module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-This binding describes the "Multi-channel Buffered Serial Port" (McBSP)
-audio interface found in some TI DaVinci processors like the OMAP-L138 or AM180x.
-
-
-Required properties:
-~~~~~~~~~~~~~~~~~~~~
-- compatible :
- "ti,da850-mcbsp" : for DA850, AM180x and OPAM-L138 platforms
-
-- reg : physical base address and length of the controller memory mapped
- region(s).
-- reg-names : Should contain:
- * "mpu" for the main registers (required).
- * "dat" for the data FIFO (optional).
-
-- dmas: three element list of DMA controller phandles, DMA request line and
- TC channel ordered triplets.
-- dma-names: identifier string for each DMA request line in the dmas property.
- These strings correspond 1:1 with the ordered pairs in dmas. The dma
- identifiers must be "rx" and "tx".
-
-Optional properties:
-~~~~~~~~~~~~~~~~~~~~
-- interrupts : Interrupt numbers for McBSP
-- interrupt-names : Known interrupt names are "rx" and "tx"
-
-- pinctrl-0: Should specify pin control group used for this controller.
-- pinctrl-names: Should contain only one value - "default", for more details
- please refer to pinctrl-bindings.txt
-
-Example (AM1808):
-~~~~~~~~~~~~~~~~~
-
-mcbsp0: mcbsp@1d10000 {
- compatible = "ti,da850-mcbsp";
- pinctrl-names = "default";
- pinctrl-0 = <&mcbsp0_pins>;
-
- reg = <0x00110000 0x1000>,
- <0x00310000 0x1000>;
- reg-names = "mpu", "dat";
- interrupts = <97 98>;
- interrupt-names = "rx", "tx";
- dmas = <&edma0 3 1
- &edma0 2 1>;
- dma-names = "tx", "rx";
-};
diff --git a/Documentation/devicetree/bindings/sound/davinci-mcbsp.yaml b/Documentation/devicetree/bindings/sound/davinci-mcbsp.yaml
new file mode 100644
index 000000000000..4fa677023827
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/davinci-mcbsp.yaml
@@ -0,0 +1,113 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/davinci-mcbsp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: McBSP Controller for TI SoCs
+
+maintainers:
+ - Bastien Curutchet <bastien.curutchet@bootlin.com>
+
+allOf:
+ - $ref: dai-common.yaml#
+
+properties:
+ compatible:
+ enum:
+ - ti,da850-mcbsp
+
+ reg:
+ minItems: 1
+ items:
+ - description: CFG registers
+ - description: data registers
+
+ reg-names:
+ minItems: 1
+ items:
+ - const: mpu
+ - const: dat
+
+ dmas:
+ items:
+ - description: transmission DMA channel
+ - description: reception DMA channel
+
+ dma-names:
+ items:
+ - const: tx
+ - const: rx
+
+ interrupts:
+ items:
+ - description: RX interrupt
+ - description: TX interrupt
+
+ interrupt-names:
+ items:
+ - const: rx
+ - const: tx
+
+ clocks:
+ minItems: 1
+ items:
+ - description: functional clock
+ - description: external input clock for sample rate generator.
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: fck
+ - const: clks
+
+ power-domains:
+ maxItems: 1
+
+ "#sound-dai-cells":
+ const: 0
+
+ ti,T1-framing-tx:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ If the property is present, tx data delay is set to 2 bit clock periods.
+ McBSP will insert a blank period (high-impedance period) before the first
+ data bit. This can be used to interface to T1-framing devices.
+
+ ti,T1-framing-rx:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ If the property is present, rx data delay is set to 2 bit clock periods.
+ McBSP will discard the bit preceding the data stream (called framing bit).
+ This can be used to interface to T1-framing devices.
+
+required:
+ - "#sound-dai-cells"
+ - compatible
+ - reg
+ - reg-names
+ - dmas
+ - dma-names
+ - clocks
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ mcbsp0@1d10000 {
+ #sound-dai-cells = <0>;
+ compatible = "ti,da850-mcbsp";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcbsp0_pins>;
+
+ reg = <0x111000 0x1000>,
+ <0x311000 0x1000>;
+ reg-names = "mpu", "dat";
+ interrupts = <97>, <98>;
+ interrupt-names = "rx", "tx";
+ dmas = <&edma0 3 1>,
+ <&edma0 2 1>;
+ dma-names = "tx", "rx";
+
+ clocks = <&psc1 14>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/fsl,audmix.txt b/Documentation/devicetree/bindings/sound/fsl,audmix.txt
deleted file mode 100644
index 840b7e0d6a63..000000000000
--- a/Documentation/devicetree/bindings/sound/fsl,audmix.txt
+++ /dev/null
@@ -1,50 +0,0 @@
-NXP Audio Mixer (AUDMIX).
-
-The Audio Mixer is a on-chip functional module that allows mixing of two
-audio streams into a single audio stream. Audio Mixer has two input serial
-audio interfaces. These are driven by two Synchronous Audio interface
-modules (SAI). Each input serial interface carries 8 audio channels in its
-frame in TDM manner. Mixer mixes audio samples of corresponding channels
-from two interfaces into a single sample. Before mixing, audio samples of
-two inputs can be attenuated based on configuration. The output of the
-Audio Mixer is also a serial audio interface. Like input interfaces it has
-the same TDM frame format. This output is used to drive the serial DAC TDM
-interface of audio codec and also sent to the external pins along with the
-receive path of normal audio SAI module for readback by the CPU.
-
-The output of Audio Mixer can be selected from any of the three streams
- - serial audio input 1
- - serial audio input 2
- - mixed audio
-
-Mixing operation is independent of audio sample rate but the two audio
-input streams must have same audio sample rate with same number of channels
-in TDM frame to be eligible for mixing.
-
-Device driver required properties:
-=================================
- - compatible : Compatible list, contains "fsl,imx8qm-audmix"
-
- - reg : Offset and length of the register set for the device.
-
- - clocks : Must contain an entry for each entry in clock-names.
-
- - clock-names : Must include the "ipg" for register access.
-
- - power-domains : Must contain the phandle to AUDMIX power domain node
-
- - dais : Must contain a list of phandles to AUDMIX connected
- DAIs. The current implementation requires two phandles
- to SAI interfaces to be provided, the first SAI in the
- list being used to route the AUDMIX output.
-
-Device driver configuration example:
-======================================
- audmix: audmix@59840000 {
- compatible = "fsl,imx8qm-audmix";
- reg = <0x0 0x59840000 0x0 0x10000>;
- clocks = <&clk IMX8QXP_AUD_AUDMIX_IPG>;
- clock-names = "ipg";
- power-domains = <&pd_audmix>;
- dais = <&sai4>, <&sai5>;
- };
diff --git a/Documentation/devicetree/bindings/sound/fsl,audmix.yaml b/Documentation/devicetree/bindings/sound/fsl,audmix.yaml
new file mode 100644
index 000000000000..9413b901cf77
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/fsl,audmix.yaml
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/fsl,audmix.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP Audio Mixer (AUDMIX).
+
+maintainers:
+ - Shengjiu Wang <shengjiu.wang@nxp.com>
+ - Frank Li <Frank.Li@nxp.com>
+
+description: |
+ The Audio Mixer is a on-chip functional module that allows mixing of two
+ audio streams into a single audio stream. Audio Mixer has two input serial
+ audio interfaces. These are driven by two Synchronous Audio interface
+ modules (SAI). Each input serial interface carries 8 audio channels in its
+ frame in TDM manner. Mixer mixes audio samples of corresponding channels
+ from two interfaces into a single sample. Before mixing, audio samples of
+ two inputs can be attenuated based on configuration. The output of the
+ Audio Mixer is also a serial audio interface. Like input interfaces it has
+ the same TDM frame format. This output is used to drive the serial DAC TDM
+ interface of audio codec and also sent to the external pins along with the
+ receive path of normal audio SAI module for readback by the CPU.
+
+ The output of Audio Mixer can be selected from any of the three streams
+ - serial audio input 1
+ - serial audio input 2
+ - mixed audio
+
+ Mixing operation is independent of audio sample rate but the two audio
+ input streams must have same audio sample rate with same number of channels
+ in TDM frame to be eligible for mixing.
+
+properties:
+ compatible:
+ const: fsl,imx8qm-audmix
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: ipg
+
+ power-domains:
+ maxItems: 1
+
+ dais:
+ description: contain a list of phandles to AUDMIX connected DAIs.
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ minItems: 2
+ items:
+ - description: the AUDMIX output
+ maxItems: 1
+ - description: serial audio input 1
+ maxItems: 1
+ - description: serial audio input 2
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - power-domains
+ - dais
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ audmix@59840000 {
+ compatible = "fsl,imx8qm-audmix";
+ reg = <0x59840000 0x10000>;
+ clocks = <&amix_lpcg 0>;
+ clock-names = "ipg";
+ power-domains = <&pd_audmix>;
+ dais = <&sai4>, <&sai5>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/fsl,esai.txt b/Documentation/devicetree/bindings/sound/fsl,esai.txt
deleted file mode 100644
index 90112ca1ff42..000000000000
--- a/Documentation/devicetree/bindings/sound/fsl,esai.txt
+++ /dev/null
@@ -1,68 +0,0 @@
-Freescale Enhanced Serial Audio Interface (ESAI) Controller
-
-The Enhanced Serial Audio Interface (ESAI) provides a full-duplex serial port
-for serial communication with a variety of serial devices, including industry
-standard codecs, Sony/Phillips Digital Interface (S/PDIF) transceivers, and
-other DSPs. It has up to six transmitters and four receivers.
-
-Required properties:
-
- - compatible : Compatible list, should contain one of the following
- compatibles:
- "fsl,imx35-esai",
- "fsl,vf610-esai",
- "fsl,imx6ull-esai",
- "fsl,imx8qm-esai",
-
- - reg : Offset and length of the register set for the device.
-
- - interrupts : Contains the spdif interrupt.
-
- - dmas : Generic dma devicetree binding as described in
- Documentation/devicetree/bindings/dma/dma.txt.
-
- - dma-names : Two dmas have to be defined, "tx" and "rx".
-
- - clocks : Contains an entry for each entry in clock-names.
-
- - clock-names : Includes the following entries:
- "core" The core clock used to access registers
- "extal" The esai baud clock for esai controller used to
- derive HCK, SCK and FS.
- "fsys" The system clock derived from ahb clock used to
- derive HCK, SCK and FS.
- "spba" The spba clock is required when ESAI is placed as a
- bus slave of the Shared Peripheral Bus and when two
- or more bus masters (CPU, DMA or DSP) try to access
- it. This property is optional depending on the SoC
- design.
-
- - fsl,fifo-depth : The number of elements in the transmit and receive
- FIFOs. This number is the maximum allowed value for
- TFCR[TFWM] or RFCR[RFWM].
-
- - fsl,esai-synchronous: This is a boolean property. If present, indicating
- that ESAI would work in the synchronous mode, which
- means all the settings for Receiving would be
- duplicated from Transmission related registers.
-
-Optional properties:
-
- - big-endian : If this property is absent, the native endian mode
- will be in use as default, or the big endian mode
- will be in use for all the device registers.
-
-Example:
-
-esai: esai@2024000 {
- compatible = "fsl,imx35-esai";
- reg = <0x02024000 0x4000>;
- interrupts = <0 51 0x04>;
- clocks = <&clks 208>, <&clks 118>, <&clks 208>;
- clock-names = "core", "extal", "fsys";
- dmas = <&sdma 23 21 0>, <&sdma 24 21 0>;
- dma-names = "rx", "tx";
- fsl,fifo-depth = <128>;
- fsl,esai-synchronous;
- big-endian;
-};
diff --git a/Documentation/devicetree/bindings/sound/fsl,esai.yaml b/Documentation/devicetree/bindings/sound/fsl,esai.yaml
new file mode 100644
index 000000000000..f99ed20fa684
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/fsl,esai.yaml
@@ -0,0 +1,118 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/fsl,esai.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale Enhanced Serial Audio Interface (ESAI) Controller
+
+maintainers:
+ - Shengjiu Wang <shengjiu.wang@nxp.com>
+ - Frank Li <Frank.Li@nxp.com>
+
+description:
+ The Enhanced Serial Audio Interface (ESAI) provides a full-duplex serial port
+ for serial communication with a variety of serial devices, including industry
+ standard codecs, Sony/Phillips Digital Interface (S/PDIF) transceivers, and
+ other DSPs. It has up to six transmitters and four receivers.
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx35-esai
+ - fsl,imx6ull-esai
+ - fsl,imx8qm-esai
+ - fsl,vf610-esai
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 3
+ items:
+ - description:
+ The core clock used to access registers.
+ - description:
+ The esai baud clock for esai controller used to
+ derive HCK, SCK and FS.
+ - description:
+ The system clock derived from ahb clock used to
+ derive HCK, SCK and FS.
+ - description:
+ The spba clock is required when ESAI is placed as a
+ bus slave of the Shared Peripheral Bus and when two
+ or more bus masters (CPU, DMA or DSP) try to access
+ it. This property is optional depending on the SoC
+ design.
+
+ clock-names:
+ minItems: 3
+ items:
+ - const: core
+ - const: extal
+ - const: fsys
+ - const: spba
+
+ dmas:
+ minItems: 2
+ maxItems: 2
+
+ dma-names:
+ items:
+ - const: rx
+ - const: tx
+
+ fsl,fifo-depth:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 64
+ description:
+ The number of elements in the transmit and receive
+ FIFOs. This number is the maximum allowed value for
+ TFCR[TFWM] or RFCR[RFWM].
+
+ fsl,esai-synchronous:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ This is a boolean property. If present, indicating
+ that ESAI would work in the synchronous mode, which
+ means all the settings for Receiving would be
+ duplicated from Transmission related registers.
+
+ big-endian:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ If this property is absent, the native endian mode
+ will be in use as default, or the big endian mode
+ will be in use for all the device registers.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - dmas
+ - dma-names
+
+unevaluatedProperties: false
+
+allOf:
+ - $ref: dai-common.yaml#
+
+examples:
+ - |
+ esai@2024000 {
+ compatible = "fsl,imx35-esai";
+ reg = <0x02024000 0x4000>;
+ interrupts = <0 51 0x04>;
+ clocks = <&clks 208>, <&clks 118>, <&clks 208>;
+ clock-names = "core", "extal", "fsys";
+ dmas = <&sdma 23 21 0>, <&sdma 24 21 0>;
+ dma-names = "rx", "tx";
+ fsl,fifo-depth = <128>;
+ fsl,esai-synchronous;
+ big-endian;
+ };
diff --git a/Documentation/devicetree/bindings/sound/fsl,imx-asrc.yaml b/Documentation/devicetree/bindings/sound/fsl,imx-asrc.yaml
index bfef2fcb75b1..76aa1f248488 100644
--- a/Documentation/devicetree/bindings/sound/fsl,imx-asrc.yaml
+++ b/Documentation/devicetree/bindings/sound/fsl,imx-asrc.yaml
@@ -74,6 +74,9 @@ properties:
- const: asrck_f
- const: spba
+ power-domains:
+ maxItems: 1
+
fsl,asrc-rate:
$ref: /schemas/types.yaml#/definitions/uint32
description: The mutual sample rate used by DPCM Back Ends
@@ -131,6 +134,17 @@ allOf:
properties:
fsl,asrc-clk-map: false
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - fsl,imx8qm-asrc
+ - fsl,imx8qxp-asrc
+ then:
+ required:
+ - power-domains
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/sound/fsl,imx-audio-spdif.yaml b/Documentation/devicetree/bindings/sound/fsl,imx-audio-spdif.yaml
new file mode 100644
index 000000000000..5fc543d02ecb
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/fsl,imx-audio-spdif.yaml
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/fsl,imx-audio-spdif.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX audio complex with S/PDIF transceiver
+
+maintainers:
+ - Shengjiu Wang <shengjiu.wang@nxp.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - fsl,imx-sabreauto-spdif
+ - fsl,imx6sx-sdb-spdif
+ - const: fsl,imx-audio-spdif
+ - enum:
+ - fsl,imx-audio-spdif
+
+ model:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: User specified audio sound card name
+
+ spdif-controller:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: The phandle of the i.MX S/PDIF controller
+
+ spdif-out:
+ type: boolean
+ description:
+ If present, the transmitting function of S/PDIF will be enabled,
+ indicating there's a physical S/PDIF out connector or jack on the
+ board or it's connecting to some other IP block, such as an HDMI
+ encoder or display-controller.
+
+ spdif-in:
+ type: boolean
+ description:
+ If present, the receiving function of S/PDIF will be enabled,
+ indicating there is a physical S/PDIF in connector/jack on the board.
+
+required:
+ - compatible
+ - model
+ - spdif-controller
+
+anyOf:
+ - required:
+ - spdif-in
+ - required:
+ - spdif-out
+
+additionalProperties: false
+
+examples:
+ - |
+ sound-spdif {
+ compatible = "fsl,imx-audio-spdif";
+ model = "imx-spdif";
+ spdif-controller = <&spdif>;
+ spdif-out;
+ spdif-in;
+ };
diff --git a/Documentation/devicetree/bindings/sound/fsl,sai.yaml b/Documentation/devicetree/bindings/sound/fsl,sai.yaml
index 2456d958adee..a5d9c246cc47 100644
--- a/Documentation/devicetree/bindings/sound/fsl,sai.yaml
+++ b/Documentation/devicetree/bindings/sound/fsl,sai.yaml
@@ -81,14 +81,12 @@ properties:
dmas:
minItems: 1
- items:
- - description: DMA controller phandle and request line for RX
- - description: DMA controller phandle and request line for TX
+ maxItems: 2
dma-names:
minItems: 1
items:
- - const: rx
+ - enum: [ rx, tx ]
- const: tx
interrupts:
diff --git a/Documentation/devicetree/bindings/sound/fsl,spdif.yaml b/Documentation/devicetree/bindings/sound/fsl,spdif.yaml
index 1d64e8337aa4..204f361cea27 100644
--- a/Documentation/devicetree/bindings/sound/fsl,spdif.yaml
+++ b/Documentation/devicetree/bindings/sound/fsl,spdif.yaml
@@ -31,7 +31,10 @@ properties:
maxItems: 1
interrupts:
- maxItems: 1
+ minItems: 1
+ items:
+ - description: Combined or receive interrupt
+ - description: Transmit interrupt
dmas:
items:
@@ -86,6 +89,9 @@ properties:
registers. Set this flag for HCDs with big endian descriptors and big
endian registers.
+ power-domains:
+ maxItems: 1
+
required:
- compatible
- reg
@@ -97,6 +103,33 @@ required:
additionalProperties: false
+allOf:
+ - if:
+ properties:
+ compatible:
+ enum:
+ - fsl,imx8qm-spdif
+ - fsl,imx8qxp-spdif
+ then:
+ properties:
+ interrupts:
+ minItems: 2
+ else:
+ properties:
+ interrupts:
+ maxItems: 1
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - fsl,imx8qm-spdif
+ - fsl,imx8qxp-spdif
+ then:
+ required:
+ - power-domains
+
examples:
- |
spdif@2004000 {
diff --git a/Documentation/devicetree/bindings/sound/fsl,ssi.txt b/Documentation/devicetree/bindings/sound/fsl,ssi.txt
deleted file mode 100644
index 7e15a85cecd2..000000000000
--- a/Documentation/devicetree/bindings/sound/fsl,ssi.txt
+++ /dev/null
@@ -1,87 +0,0 @@
-Freescale Synchronous Serial Interface
-
-The SSI is a serial device that communicates with audio codecs. It can
-be programmed in AC97, I2S, left-justified, or right-justified modes.
-
-Required properties:
-- compatible: Compatible list, should contain one of the following
- compatibles:
- fsl,mpc8610-ssi
- fsl,imx51-ssi
- fsl,imx35-ssi
- fsl,imx21-ssi
-- cell-index: The SSI, <0> = SSI1, <1> = SSI2, and so on.
-- reg: Offset and length of the register set for the device.
-- interrupts: <a b> where a is the interrupt number and b is a
- field that represents an encoding of the sense and
- level information for the interrupt. This should be
- encoded based on the information in section 2)
- depending on the type of interrupt controller you
- have.
-- fsl,fifo-depth: The number of elements in the transmit and receive FIFOs.
- This number is the maximum allowed value for SFCSR[TFWM0].
- - clocks: "ipg" - Required clock for the SSI unit
- "baud" - Required clock for SSI master mode. Otherwise this
- clock is not used
-
-Required are also ac97 link bindings if ac97 is used. See
-Documentation/devicetree/bindings/sound/soc-ac97link.txt for the necessary
-bindings.
-
-Optional properties:
-- codec-handle: Phandle to a 'codec' node that defines an audio
- codec connected to this SSI. This node is typically
- a child of an I2C or other control node.
-- fsl,fiq-stream-filter: Bool property. Disabled DMA and use FIQ instead to
- filter the codec stream. This is necessary for some boards
- where an incompatible codec is connected to this SSI, e.g.
- on pca100 and pcm043.
-- dmas: Generic dma devicetree binding as described in
- Documentation/devicetree/bindings/dma/dma.txt.
-- dma-names: Two dmas have to be defined, "tx" and "rx", if fsl,imx-fiq
- is not defined.
-- fsl,mode: The operating mode for the AC97 interface only.
- "ac97-slave" - AC97 mode, SSI is clock slave
- "ac97-master" - AC97 mode, SSI is clock master
-- fsl,ssi-asynchronous:
- If specified, the SSI is to be programmed in asynchronous
- mode. In this mode, pins SRCK, STCK, SRFS, and STFS must
- all be connected to valid signals. In synchronous mode,
- SRCK and SRFS are ignored. Asynchronous mode allows
- playback and capture to use different sample sizes and
- sample rates. Some drivers may require that SRCK and STCK
- be connected together, and SRFS and STFS be connected
- together. This would still allow different sample sizes,
- but not different sample rates.
-- fsl,playback-dma: Phandle to a node for the DMA channel to use for
- playback of audio. This is typically dictated by SOC
- design. See the notes below.
- Only used on Power Architecture.
-- fsl,capture-dma: Phandle to a node for the DMA channel to use for
- capture (recording) of audio. This is typically dictated
- by SOC design. See the notes below.
- Only used on Power Architecture.
-
-Child 'codec' node required properties:
-- compatible: Compatible list, contains the name of the codec
-
-Child 'codec' node optional properties:
-- clock-frequency: The frequency of the input clock, which typically comes
- from an on-board dedicated oscillator.
-
-Notes on fsl,playback-dma and fsl,capture-dma:
-
-On SOCs that have an SSI, specific DMA channels are hard-wired for playback
-and capture. On the MPC8610, for example, SSI1 must use DMA channel 0 for
-playback and DMA channel 1 for capture. SSI2 must use DMA channel 2 for
-playback and DMA channel 3 for capture. The developer can choose which
-DMA controller to use, but the channels themselves are hard-wired. The
-purpose of these two properties is to represent this hardware design.
-
-The device tree nodes for the DMA channels that are referenced by
-"fsl,playback-dma" and "fsl,capture-dma" must be marked as compatible with
-"fsl,ssi-dma-channel". The SOC-specific compatible string (e.g.
-"fsl,mpc8610-dma-channel") can remain. If these nodes are left as
-"fsl,elo-dma-channel" or "fsl,eloplus-dma-channel", then the generic Elo DMA
-drivers (fsldma) will attempt to use them, and it will conflict with the
-sound drivers.
diff --git a/Documentation/devicetree/bindings/sound/fsl,ssi.yaml b/Documentation/devicetree/bindings/sound/fsl,ssi.yaml
new file mode 100644
index 000000000000..4ab10cd3b520
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/fsl,ssi.yaml
@@ -0,0 +1,194 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/fsl,ssi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale Synchronous Serial Interface
+
+maintainers:
+ - Shengjiu Wang <shengjiu.wang@nxp.com>
+
+description:
+ Notes on fsl,playback-dma and fsl,capture-dma
+ On SOCs that have an SSI, specific DMA channels are hard-wired for playback
+ and capture. On the MPC8610, for example, SSI1 must use DMA channel 0 for
+ playback and DMA channel 1 for capture. SSI2 must use DMA channel 2 for
+ playback and DMA channel 3 for capture. The developer can choose which
+ DMA controller to use, but the channels themselves are hard-wired. The
+ purpose of these two properties is to represent this hardware design.
+
+ The device tree nodes for the DMA channels that are referenced by
+ "fsl,playback-dma" and "fsl,capture-dma" must be marked as compatible with
+ "fsl,ssi-dma-channel". The SOC-specific compatible string (e.g.
+ "fsl,mpc8610-dma-channel") can remain. If these nodes are left as
+ "fsl,elo-dma-channel" or "fsl,eloplus-dma-channel", then the generic Elo DMA
+ drivers (fsldma) will attempt to use them, and it will conflict with the
+ sound drivers.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - fsl,imx50-ssi
+ - fsl,imx53-ssi
+ - const: fsl,imx51-ssi
+ - const: fsl,imx21-ssi
+ - items:
+ - enum:
+ - fsl,imx25-ssi
+ - fsl,imx27-ssi
+ - fsl,imx35-ssi
+ - fsl,imx51-ssi
+ - const: fsl,imx21-ssi
+ - items:
+ - enum:
+ - fsl,imx6q-ssi
+ - fsl,imx6sl-ssi
+ - fsl,imx6sx-ssi
+ - const: fsl,imx51-ssi
+ - items:
+ - const: fsl,imx21-ssi
+ - items:
+ - const: fsl,mpc8610-ssi
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: The ipg clock for register access
+ - description: clock for SSI master mode
+ minItems: 1
+
+ clock-names:
+ items:
+ - const: ipg
+ - const: baud
+ minItems: 1
+
+ dmas:
+ oneOf:
+ - items:
+ - description: DMA controller phandle and request line for RX
+ - description: DMA controller phandle and request line for TX
+ - items:
+ - description: DMA controller phandle and request line for RX0
+ - description: DMA controller phandle and request line for TX0
+ - description: DMA controller phandle and request line for RX1
+ - description: DMA controller phandle and request line for TX1
+
+ dma-names:
+ oneOf:
+ - items:
+ - const: rx
+ - const: tx
+ - items:
+ - const: rx0
+ - const: tx0
+ - const: rx1
+ - const: tx1
+
+ "#sound-dai-cells":
+ const: 0
+ description: optional, some dts node didn't add it.
+
+ cell-index:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2]
+ description: The SSI index
+
+ ac97-gpios:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: Please refer to soc-ac97link.txt
+
+ codec-handle:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to a 'codec' node that defines an audio
+ codec connected to this SSI. This node is typically
+ a child of an I2C or other control node.
+
+ fsl,fifo-depth:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ The number of elements in the transmit and receive FIFOs.
+ This number is the maximum allowed value for SFCSR[TFWM0].
+ enum: [8, 15]
+
+ fsl,fiq-stream-filter:
+ type: boolean
+ description:
+ Disabled DMA and use FIQ instead to filter the codec stream.
+ This is necessary for some boards where an incompatible codec
+ is connected to this SSI, e.g. on pca100 and pcm043.
+
+ fsl,mode:
+ $ref: /schemas/types.yaml#/definitions/string
+ enum: [ ac97-slave, ac97-master, i2s-slave, i2s-master,
+ lj-slave, lj-master, rj-slave, rj-master ]
+ description: |
+ "ac97-slave" - AC97 mode, SSI is clock slave
+ "ac97-master" - AC97 mode, SSI is clock master
+ "i2s-slave" - I2S mode, SSI is clock slave
+ "i2s-master" - I2S mode, SSI is clock master
+ "lj-slave" - Left justified mode, SSI is clock slave
+ "lj-master" - Left justified mode, SSI is clock master
+ "rj-slave" - Right justified mode, SSI is clock slave
+ "rj-master" - Right justified mode, SSI is clock master
+
+ fsl,ssi-asynchronous:
+ type: boolean
+ description: If specified, the SSI is to be programmed in asynchronous
+ mode. In this mode, pins SRCK, STCK, SRFS, and STFS must
+ all be connected to valid signals. In synchronous mode,
+ SRCK and SRFS are ignored. Asynchronous mode allows
+ playback and capture to use different sample sizes and
+ sample rates. Some drivers may require that SRCK and STCK
+ be connected together, and SRFS and STFS be connected
+ together. This would still allow different sample sizes,
+ but not different sample rates.
+
+ fsl,playback-dma:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: Phandle to a node for the DMA channel to use for
+ playback of audio. This is typically dictated by SOC
+ design. Only used on Power Architecture.
+
+ fsl,capture-dma:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: Phandle to a node for the DMA channel to use for
+ capture (recording) of audio. This is typically dictated
+ by SOC design. Only used on Power Architecture.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - fsl,fifo-depth
+
+allOf:
+ - $ref: dai-common.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/imx6qdl-clock.h>
+ ssi@2028000 {
+ compatible = "fsl,imx6q-ssi", "fsl,imx51-ssi";
+ reg = <0x02028000 0x4000>;
+ interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6QDL_CLK_SSI1_IPG>,
+ <&clks IMX6QDL_CLK_SSI1>;
+ clock-names = "ipg", "baud";
+ dmas = <&sdma 37 1 0>, <&sdma 38 1 0>;
+ dma-names = "rx", "tx";
+ #sound-dai-cells = <0>;
+ fsl,fifo-depth = <15>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt b/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt
deleted file mode 100644
index 4e8dbc5abfd1..000000000000
--- a/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt
+++ /dev/null
@@ -1,117 +0,0 @@
-Freescale Generic ASoC Sound Card with ASRC support
-
-The Freescale Generic ASoC Sound Card can be used, ideally, for all Freescale
-SoCs connecting with external CODECs.
-
-The idea of this generic sound card is a bit like ASoC Simple Card. However,
-for Freescale SoCs (especially those released in recent years), most of them
-have ASRC (Documentation/devicetree/bindings/sound/fsl,asrc.txt) inside. And
-this is a specific feature that might be painstakingly controlled and merged
-into the Simple Card.
-
-So having this generic sound card allows all Freescale SoC users to benefit
-from the simplification of a new card support and the capability of the wide
-sample rates support through ASRC.
-
-Note: The card is initially designed for those sound cards who use AC'97, I2S
- and PCM DAI formats. However, it'll be also possible to support those non
- AC'97/I2S/PCM type sound cards, such as S/PDIF audio and HDMI audio, as
- long as the driver has been properly upgraded.
-
-
-The compatible list for this generic sound card currently:
- "fsl,imx-audio-ac97"
-
- "fsl,imx-audio-cs42888"
-
- "fsl,imx-audio-cs427x"
- (compatible with CS4271 and CS4272)
-
- "fsl,imx-audio-wm8962"
-
- "fsl,imx-audio-sgtl5000"
- (compatible with Documentation/devicetree/bindings/sound/imx-audio-sgtl5000.txt)
-
- "fsl,imx-audio-wm8960"
-
- "fsl,imx-audio-mqs"
-
- "fsl,imx-audio-wm8524"
-
- "fsl,imx-audio-tlv320aic32x4"
-
- "fsl,imx-audio-tlv320aic31xx"
-
- "fsl,imx-audio-si476x"
-
- "fsl,imx-audio-wm8958"
-
- "fsl,imx-audio-nau8822"
-
-Required properties:
-
- - compatible : Contains one of entries in the compatible list.
-
- - model : The user-visible name of this sound complex
-
- - audio-cpu : The phandle of an CPU DAI controller
-
- - audio-codec : The phandle of an audio codec
-
-Optional properties:
-
- - audio-asrc : The phandle of ASRC. It can be absent if there's no
- need to add ASRC support via DPCM.
-
- - audio-routing : A list of the connections between audio components.
- Each entry is a pair of strings, the first being the
- connection's sink, the second being the connection's
- source. There're a few pre-designed board connectors:
- * Line Out Jack
- * Line In Jack
- * Headphone Jack
- * Mic Jack
- * Ext Spk
- * AMIC (stands for Analog Microphone Jack)
- * DMIC (stands for Digital Microphone Jack)
-
- Note: The "Mic Jack" and "AMIC" are redundant while
- coexisting in order to support the old bindings
- of wm8962 and sgtl5000.
-
- - hp-det-gpio : The GPIO that detect headphones are plugged in
- - mic-det-gpio : The GPIO that detect microphones are plugged in
- - bitclock-master : Indicates dai-link bit clock master; for details see simple-card.yaml.
- - frame-master : Indicates dai-link frame master; for details see simple-card.yaml.
- - dai-format : audio format, for details see simple-card.yaml.
- - frame-inversion : dai-link uses frame clock inversion, for details see simple-card.yaml.
- - bitclock-inversion : dai-link uses bit clock inversion, for details see simple-card.yaml.
- - mclk-id : main clock id, specific for each card configuration.
-
-Optional unless SSI is selected as a CPU DAI:
-
- - mux-int-port : The internal port of the i.MX audio muxer (AUDMUX)
-
- - mux-ext-port : The external port of the i.MX audio muxer
-
-Example:
-sound-cs42888 {
- compatible = "fsl,imx-audio-cs42888";
- model = "cs42888-audio";
- audio-cpu = <&esai>;
- audio-asrc = <&asrc>;
- audio-codec = <&cs42888>;
- audio-routing =
- "Line Out Jack", "AOUT1L",
- "Line Out Jack", "AOUT1R",
- "Line Out Jack", "AOUT2L",
- "Line Out Jack", "AOUT2R",
- "Line Out Jack", "AOUT3L",
- "Line Out Jack", "AOUT3R",
- "Line Out Jack", "AOUT4L",
- "Line Out Jack", "AOUT4R",
- "AIN1L", "Line In Jack",
- "AIN1R", "Line In Jack",
- "AIN2L", "Line In Jack",
- "AIN2R", "Line In Jack";
-};
diff --git a/Documentation/devicetree/bindings/sound/fsl-asoc-card.yaml b/Documentation/devicetree/bindings/sound/fsl-asoc-card.yaml
new file mode 100644
index 000000000000..9922664d5ccc
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/fsl-asoc-card.yaml
@@ -0,0 +1,197 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/fsl-asoc-card.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale Generic ASoC Sound Card with ASRC support
+
+description:
+ The Freescale Generic ASoC Sound Card can be used, ideally,
+ for all Freescale SoCs connecting with external CODECs.
+
+ The idea of this generic sound card is a bit like ASoC Simple Card.
+ However, for Freescale SoCs (especially those released in recent years),
+ most of them have ASRC inside. And this is a specific feature that might
+ be painstakingly controlled and merged into the Simple Card.
+
+ So having this generic sound card allows all Freescale SoC users to
+ benefit from the simplification of a new card support and the capability
+ of the wide sample rates support through ASRC.
+
+ Note, The card is initially designed for those sound cards who use AC'97, I2S
+ and PCM DAI formats. However, it'll be also possible to support those non
+ AC'97/I2S/PCM type sound cards, such as S/PDIF audio and HDMI audio, as
+ long as the driver has been properly upgraded.
+
+maintainers:
+ - Shengjiu Wang <shengjiu.wang@nxp.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - fsl,imx-sgtl5000
+ - fsl,imx25-pdk-sgtl5000
+ - fsl,imx53-cpuvo-sgtl5000
+ - fsl,imx51-babbage-sgtl5000
+ - fsl,imx53-m53evk-sgtl5000
+ - fsl,imx53-qsb-sgtl5000
+ - fsl,imx53-voipac-sgtl5000
+ - fsl,imx6-armadeus-sgtl5000
+ - fsl,imx6-rex-sgtl5000
+ - fsl,imx6-sabreauto-cs42888
+ - fsl,imx6-wandboard-sgtl5000
+ - fsl,imx6dl-nit6xlite-sgtl5000
+ - fsl,imx6q-ba16-sgtl5000
+ - fsl,imx6q-nitrogen6_max-sgtl5000
+ - fsl,imx6q-nitrogen6_som2-sgtl5000
+ - fsl,imx6q-nitrogen6x-sgtl5000
+ - fsl,imx6q-sabrelite-sgtl5000
+ - fsl,imx6q-sabresd-wm8962
+ - fsl,imx6q-udoo-ac97
+ - fsl,imx6q-ventana-sgtl5000
+ - fsl,imx6sl-evk-wm8962
+ - fsl,imx6sx-sdb-mqs
+ - fsl,imx6sx-sdb-wm8962
+ - fsl,imx7d-evk-wm8960
+ - karo,tx53-audio-sgtl5000
+ - tq,imx53-mba53-sgtl5000
+ - enum:
+ - fsl,imx-audio-ac97
+ - fsl,imx-audio-cs42888
+ - fsl,imx-audio-mqs
+ - fsl,imx-audio-sgtl5000
+ - fsl,imx-audio-wm8960
+ - fsl,imx-audio-wm8962
+ - items:
+ - enum:
+ - fsl,imx-audio-ac97
+ - fsl,imx-audio-cs42888
+ - fsl,imx-audio-cs427x
+ - fsl,imx-audio-mqs
+ - fsl,imx-audio-nau8822
+ - fsl,imx-audio-sgtl5000
+ - fsl,imx-audio-si476x
+ - fsl,imx-audio-tlv320aic31xx
+ - fsl,imx-audio-tlv320aic32x4
+ - fsl,imx-audio-wm8524
+ - fsl,imx-audio-wm8904
+ - fsl,imx-audio-wm8960
+ - fsl,imx-audio-wm8962
+ - fsl,imx-audio-wm8958
+
+ model:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: The user-visible name of this sound complex
+
+ audio-asrc:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ The phandle of ASRC. It can be absent if there's no
+ need to add ASRC support via DPCM.
+
+ audio-codec:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: The phandle of an audio codec
+
+ audio-cpu:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: The phandle of an CPU DAI controller
+
+ audio-routing:
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ description:
+ A list of the connections between audio components. Each entry is a
+ pair of strings, the first being the connection's sink, the second
+ being the connection's source. There're a few pre-designed board
+ connectors. "AMIC" stands for Analog Microphone Jack.
+ "DMIC" stands for Digital Microphone Jack. The "Mic Jack" and "AMIC"
+ are redundant while coexisting in order to support the old bindings
+ of wm8962 and sgtl5000.
+
+ hp-det-gpio:
+ deprecated: true
+ maxItems: 1
+ description: The GPIO that detect headphones are plugged in
+
+ hp-det-gpios:
+ maxItems: 1
+ description: The GPIO that detect headphones are plugged in
+
+ mic-det-gpio:
+ deprecated: true
+ maxItems: 1
+ description: The GPIO that detect microphones are plugged in
+
+ mic-det-gpios:
+ maxItems: 1
+ description: The GPIO that detect microphones are plugged in
+
+ bitclock-master:
+ $ref: simple-card.yaml#/definitions/bitclock-master
+ description: Indicates dai-link bit clock master.
+
+ frame-master:
+ $ref: simple-card.yaml#/definitions/frame-master
+ description: Indicates dai-link frame master.
+
+ format:
+ $ref: simple-card.yaml#/definitions/format
+ description: audio format.
+
+ frame-inversion:
+ $ref: simple-card.yaml#/definitions/frame-inversion
+ description: dai-link uses frame clock inversion.
+
+ bitclock-inversion:
+ $ref: simple-card.yaml#/definitions/bitclock-inversion
+ description: dai-link uses bit clock inversion.
+
+ mclk-id:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: main clock id, specific for each card configuration.
+
+ mux-int-port:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [1, 2, 7]
+ description: The internal port of the i.MX audio muxer (AUDMUX)
+
+ mux-ext-port:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [3, 4, 5, 6]
+ description: The external port of the i.MX audio muxer
+
+ ssi-controller:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: The phandle of an CPU DAI controller
+
+required:
+ - compatible
+ - model
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ sound-cs42888 {
+ compatible = "fsl,imx-audio-cs42888";
+ model = "cs42888-audio";
+ audio-cpu = <&esai>;
+ audio-asrc = <&asrc>;
+ audio-codec = <&cs42888>;
+ audio-routing =
+ "Line Out Jack", "AOUT1L",
+ "Line Out Jack", "AOUT1R",
+ "Line Out Jack", "AOUT2L",
+ "Line Out Jack", "AOUT2R",
+ "Line Out Jack", "AOUT3L",
+ "Line Out Jack", "AOUT3R",
+ "Line Out Jack", "AOUT4L",
+ "Line Out Jack", "AOUT4R",
+ "AIN1L", "Line In Jack",
+ "AIN1R", "Line In Jack",
+ "AIN2L", "Line In Jack",
+ "AIN2R", "Line In Jack";
+ };
diff --git a/Documentation/devicetree/bindings/sound/imx-audio-spdif.txt b/Documentation/devicetree/bindings/sound/imx-audio-spdif.txt
deleted file mode 100644
index da84a442ccea..000000000000
--- a/Documentation/devicetree/bindings/sound/imx-audio-spdif.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-Freescale i.MX audio complex with S/PDIF transceiver
-
-Required properties:
-
- - compatible : "fsl,imx-audio-spdif"
-
- - model : The user-visible name of this sound complex
-
- - spdif-controller : The phandle of the i.MX S/PDIF controller
-
-
-Optional properties:
-
- - spdif-out : This is a boolean property. If present, the
- transmitting function of S/PDIF will be enabled,
- indicating there's a physical S/PDIF out connector
- or jack on the board or it's connecting to some
- other IP block, such as an HDMI encoder or
- display-controller.
-
- - spdif-in : This is a boolean property. If present, the receiving
- function of S/PDIF will be enabled, indicating there
- is a physical S/PDIF in connector/jack on the board.
-
-* Note: At least one of these two properties should be set in the DT binding.
-
-
-Example:
-
-sound-spdif {
- compatible = "fsl,imx-audio-spdif";
- model = "imx-spdif";
- spdif-controller = <&spdif>;
- spdif-out;
- spdif-in;
-};
diff --git a/Documentation/devicetree/bindings/sound/mediatek,mt2701-wm8960.yaml b/Documentation/devicetree/bindings/sound/mediatek,mt2701-wm8960.yaml
new file mode 100644
index 000000000000..cf985461a995
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/mediatek,mt2701-wm8960.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/mediatek,mt2701-wm8960.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek MT2701 with WM8960 CODEC
+
+maintainers:
+ - Kartik Agarwala <agarwala.kartik@gmail.com>
+
+properties:
+ compatible:
+ const: mediatek,mt2701-wm8960-machine
+
+ mediatek,platform:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: The phandle of MT2701 ASoC platform.
+
+ audio-routing:
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ description:
+ A list of the connections between audio components. Each entry is a
+ pair of strings, the first being the connection's sink, the second
+ being the connection's source.
+
+ mediatek,audio-codec:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: The phandle of the WM8960 audio codec.
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - mediatek,platform
+ - audio-routing
+ - mediatek,audio-codec
+ - pinctrl-names
+ - pinctrl-0
+
+examples:
+ - |
+ sound {
+ compatible = "mediatek,mt2701-wm8960-machine";
+ mediatek,platform = <&afe>;
+ audio-routing =
+ "Headphone", "HP_L",
+ "Headphone", "HP_R",
+ "LINPUT1", "AMIC",
+ "RINPUT1", "AMIC";
+ mediatek,audio-codec = <&wm8960>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&aud_pins_default>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/mt2701-wm8960.txt b/Documentation/devicetree/bindings/sound/mt2701-wm8960.txt
deleted file mode 100644
index 809b609ea9d0..000000000000
--- a/Documentation/devicetree/bindings/sound/mt2701-wm8960.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-MT2701 with WM8960 CODEC
-
-Required properties:
-- compatible: "mediatek,mt2701-wm8960-machine"
-- mediatek,platform: the phandle of MT2701 ASoC platform
-- audio-routing: a list of the connections between audio
-- mediatek,audio-codec: the phandles of wm8960 codec
-- pinctrl-names: Should contain only one value - "default"
-- pinctrl-0: Should specify pin control groups used for this controller.
-
-Example:
-
- sound:sound {
- compatible = "mediatek,mt2701-wm8960-machine";
- mediatek,platform = <&afe>;
- audio-routing =
- "Headphone", "HP_L",
- "Headphone", "HP_R",
- "LINPUT1", "AMIC",
- "RINPUT1", "AMIC";
- mediatek,audio-codec = <&wm8960>;
- pinctrl-names = "default";
- pinctrl-0 = <&aud_pins_default>;
- };
diff --git a/Documentation/devicetree/bindings/sound/mt8186-mt6366-da7219-max98357.yaml b/Documentation/devicetree/bindings/sound/mt8186-mt6366-da7219-max98357.yaml
index 9853c11a1330..cbc641ecbe94 100644
--- a/Documentation/devicetree/bindings/sound/mt8186-mt6366-da7219-max98357.yaml
+++ b/Documentation/devicetree/bindings/sound/mt8186-mt6366-da7219-max98357.yaml
@@ -12,17 +12,46 @@ maintainers:
description:
This binding describes the MT8186 sound card.
+allOf:
+ - $ref: sound-card-common.yaml#
+
properties:
compatible:
enum:
- mediatek,mt8186-mt6366-da7219-max98357-sound
+ audio-routing:
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ description:
+ A list of the connections between audio components. Each entry is a
+ pair of strings, the first being the connection's sink, the second
+ being the connection's source.
+ Valid names could be the input or output widgets of audio components,
+ power supplies, MicBias of codec and the software switch.
+ minItems: 2
+ items:
+ enum:
+ # Sinks
+ - HDMI1
+ - Headphones
+ - Line Out
+ - MIC
+ - Speakers
+
+ # Sources
+ - Headset Mic
+ - HPL
+ - HPR
+ - Speaker
+ - TX
+
mediatek,platform:
$ref: /schemas/types.yaml#/definitions/phandle
description: The phandle of MT8186 ASoC platform.
headset-codec:
type: object
+ deprecated: true
additionalProperties: false
properties:
sound-dai:
@@ -32,6 +61,7 @@ properties:
playback-codecs:
type: object
+ deprecated: true
additionalProperties: false
properties:
sound-dai:
@@ -53,32 +83,115 @@ properties:
A list of the desired dai-links in the sound card. Each entry is a
name defined in the machine driver.
-additionalProperties: false
+patternProperties:
+ ".*-dai-link$":
+ type: object
+ additionalProperties: false
+ description:
+ Container for dai-link level properties and CODEC sub-nodes.
+
+ properties:
+ link-name:
+ description: Indicates dai-link name and PCM stream name
+ items:
+ enum:
+ - I2S0
+ - I2S1
+ - I2S2
+ - I2S3
+
+ codec:
+ description: Holds subnode which indicates codec dai.
+ type: object
+ additionalProperties: false
+ properties:
+ sound-dai:
+ minItems: 1
+ maxItems: 2
+ required:
+ - sound-dai
+
+ dai-format:
+ description: audio format
+ items:
+ enum:
+ - i2s
+ - right_j
+ - left_j
+ - dsp_a
+ - dsp_b
+
+ mediatek,clk-provider:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: Indicates dai-link clock master.
+ items:
+ enum:
+ - cpu
+ - codec
+
+ required:
+ - link-name
+
+unevaluatedProperties: false
required:
- compatible
- mediatek,platform
- - headset-codec
- - playback-codecs
+
+# Disallow legacy properties if xxx-dai-link nodes are specified
+if:
+ not:
+ patternProperties:
+ ".*-dai-link$": false
+then:
+ properties:
+ headset-codec: false
+ speaker-codecs: false
examples:
- |
sound: mt8186-sound {
compatible = "mediatek,mt8186-mt6366-da7219-max98357-sound";
- mediatek,platform = <&afe>;
+ model = "mt8186_da7219_m98357";
pinctrl-names = "aud_clk_mosi_off",
"aud_clk_mosi_on";
pinctrl-0 = <&aud_clk_mosi_off>;
pinctrl-1 = <&aud_clk_mosi_on>;
+ mediatek,platform = <&afe>;
+
+ audio-routing =
+ "Headphones", "HPL",
+ "Headphones", "HPR",
+ "MIC", "Headset Mic",
+ "Speakers", "Speaker",
+ "HDMI1", "TX";
+
+ hs-playback-dai-link {
+ link-name = "I2S0";
+ dai-format = "i2s";
+ mediatek,clk-provider = "cpu";
+ codec {
+ sound-dai = <&da7219>;
+ };
+ };
- headset-codec {
- sound-dai = <&da7219>;
+ hs-capture-dai-link {
+ link-name = "I2S1";
+ dai-format = "i2s";
+ mediatek,clk-provider = "cpu";
+ codec {
+ sound-dai = <&da7219>;
+ };
};
- playback-codecs {
- sound-dai = <&anx_bridge_dp>,
- <&max98357a>;
+ spk-dp-playback-dai-link {
+ link-name = "I2S3";
+ dai-format = "i2s";
+ mediatek,clk-provider = "cpu";
+ codec {
+ sound-dai = <&anx_bridge_dp>, <&max98357a>;
+ };
};
};
diff --git a/Documentation/devicetree/bindings/sound/mt8186-mt6366-rt1019-rt5682s.yaml b/Documentation/devicetree/bindings/sound/mt8186-mt6366-rt1019-rt5682s.yaml
index bdf7b0960533..ed93f18ef985 100644
--- a/Documentation/devicetree/bindings/sound/mt8186-mt6366-rt1019-rt5682s.yaml
+++ b/Documentation/devicetree/bindings/sound/mt8186-mt6366-rt1019-rt5682s.yaml
@@ -12,6 +12,9 @@ maintainers:
description:
This binding describes the MT8186 sound card.
+allOf:
+ - $ref: sound-card-common.yaml#
+
properties:
compatible:
enum:
@@ -19,6 +22,34 @@ properties:
- mediatek,mt8186-mt6366-rt5682s-max98360-sound
- mediatek,mt8186-mt6366-rt5650-sound
+ audio-routing:
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ description:
+ A list of the connections between audio components. Each entry is a
+ pair of strings, the first being the connection's sink, the second
+ being the connection's source.
+ Valid names could be the input or output widgets of audio components,
+ power supplies, MicBias of codec and the software switch.
+ minItems: 2
+ items:
+ enum:
+ # Sinks
+ - HDMI1
+ - Headphone
+ - IN1P
+ - IN1N
+ - Line Out
+ - Speakers
+
+ # Sources
+ - Headset Mic
+ - HPOL
+ - HPOR
+ - Speaker
+ - SPOL
+ - SPOR
+ - TX
+
mediatek,platform:
$ref: /schemas/types.yaml#/definitions/phandle
description: The phandle of MT8186 ASoC platform.
@@ -32,6 +63,7 @@ properties:
headset-codec:
type: object
+ deprecated: true
additionalProperties: false
properties:
sound-dai:
@@ -41,6 +73,7 @@ properties:
playback-codecs:
type: object
+ deprecated: true
additionalProperties: false
properties:
sound-dai:
@@ -62,13 +95,56 @@ properties:
A list of the desired dai-links in the sound card. Each entry is a
name defined in the machine driver.
-additionalProperties: false
+patternProperties:
+ ".*-dai-link$":
+ type: object
+ additionalProperties: false
+ description:
+ Container for dai-link level properties and CODEC sub-nodes.
+
+ properties:
+ link-name:
+ description: Indicates dai-link name and PCM stream name
+ enum: [ I2S0, I2S1, I2S2, I2S3 ]
+
+ codec:
+ description: Holds subnode which indicates codec dai.
+ type: object
+ additionalProperties: false
+ properties:
+ sound-dai:
+ minItems: 1
+ maxItems: 2
+ required:
+ - sound-dai
+
+ dai-format:
+ description: audio format
+ enum: [ i2s, right_j, left_j, dsp_a, dsp_b ]
+
+ mediatek,clk-provider:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: Indicates dai-link clock master.
+ enum: [ cpu, codec ]
+
+ required:
+ - link-name
+
+unevaluatedProperties: false
required:
- compatible
- mediatek,platform
- - headset-codec
- - playback-codecs
+
+# Disallow legacy properties if xxx-dai-link nodes are specified
+if:
+ not:
+ patternProperties:
+ ".*-dai-link$": false
+then:
+ properties:
+ headset-codec: false
+ speaker-codecs: false
examples:
- |
@@ -76,23 +152,49 @@ examples:
sound: mt8186-sound {
compatible = "mediatek,mt8186-mt6366-rt1019-rt5682s-sound";
- mediatek,platform = <&afe>;
+ model = "mt8186_rt1019_rt5682s";
pinctrl-names = "aud_clk_mosi_off",
"aud_clk_mosi_on",
"aud_gpio_dmic_sec";
pinctrl-0 = <&aud_clk_mosi_off>;
pinctrl-1 = <&aud_clk_mosi_on>;
pinctrl-2 = <&aud_gpio_dmic_sec>;
+ mediatek,platform = <&afe>;
dmic-gpios = <&pio 23 GPIO_ACTIVE_HIGH>;
- headset-codec {
- sound-dai = <&rt5682s>;
+ audio-routing =
+ "Headphone", "HPOL",
+ "Headphone", "HPOR",
+ "IN1P", "Headset Mic",
+ "Speakers", "Speaker",
+ "HDMI1", "TX";
+
+ hs-playback-dai-link {
+ link-name = "I2S0";
+ dai-format = "i2s";
+ mediatek,clk-provider = "cpu";
+ codec {
+ sound-dai = <&rt5682s 0>;
+ };
+ };
+
+ hs-capture-dai-link {
+ link-name = "I2S1";
+ dai-format = "i2s";
+ mediatek,clk-provider = "cpu";
+ codec {
+ sound-dai = <&rt5682s 0>;
+ };
};
- playback-codecs {
- sound-dai = <&it6505dptx>,
- <&rt1019p>;
+ spk-hdmi-playback-dai-link {
+ link-name = "I2S3";
+ dai-format = "i2s";
+ mediatek,clk-provider = "cpu";
+ codec {
+ sound-dai = <&it6505dptx>, <&rt1019p>;
+ };
};
};
diff --git a/Documentation/devicetree/bindings/sound/mt8192-mt6359-rt1015-rt5682.yaml b/Documentation/devicetree/bindings/sound/mt8192-mt6359-rt1015-rt5682.yaml
index 7e50f5d65c8f..c4e68f31aaab 100644
--- a/Documentation/devicetree/bindings/sound/mt8192-mt6359-rt1015-rt5682.yaml
+++ b/Documentation/devicetree/bindings/sound/mt8192-mt6359-rt1015-rt5682.yaml
@@ -13,6 +13,9 @@ maintainers:
description:
This binding describes the MT8192 sound card.
+allOf:
+ - $ref: sound-card-common.yaml#
+
properties:
compatible:
enum:
@@ -20,6 +23,31 @@ properties:
- mediatek,mt8192_mt6359_rt1015p_rt5682
- mediatek,mt8192_mt6359_rt1015p_rt5682s
+ audio-routing:
+ description:
+ A list of the connections between audio components. Each entry is a
+ pair of strings, the first being the connection's sink, the second
+ being the connection's source.
+ Valid names could be the input or output widgets of audio components,
+ power supplies, MicBias of codec and the software switch.
+ minItems: 2
+ items:
+ enum:
+ # Sinks
+ - Speakers
+ - Headphone Jack
+ - IN1P
+ - Left Spk
+ - Right Spk
+
+ # Sources
+ - Headset Mic
+ - HPOL
+ - HPOR
+ - Left SPO
+ - Right SPO
+ - Speaker
+
mediatek,platform:
$ref: /schemas/types.yaml#/definitions/phandle
description: The phandle of MT8192 ASoC platform.
@@ -27,10 +55,12 @@ properties:
mediatek,hdmi-codec:
$ref: /schemas/types.yaml#/definitions/phandle
description: The phandle of HDMI codec.
+ deprecated: true
headset-codec:
type: object
additionalProperties: false
+ deprecated: true
properties:
sound-dai:
@@ -41,6 +71,7 @@ properties:
speaker-codecs:
type: object
additionalProperties: false
+ deprecated: true
properties:
sound-dai:
@@ -51,33 +82,121 @@ properties:
required:
- sound-dai
-additionalProperties: false
+patternProperties:
+ ".*-dai-link$":
+ type: object
+ additionalProperties: false
+
+ description:
+ Container for dai-link level properties and CODEC sub-nodes.
+
+ properties:
+ link-name:
+ description: Indicates dai-link name and PCM stream name
+ enum:
+ - I2S0
+ - I2S1
+ - I2S2
+ - I2S3
+ - I2S4
+ - I2S5
+ - I2S6
+ - I2S7
+ - I2S8
+ - I2S9
+ - TDM
+
+ codec:
+ description: Holds subnode which indicates codec dai.
+ type: object
+ additionalProperties: false
+ properties:
+ sound-dai:
+ minItems: 1
+ maxItems: 2
+ required:
+ - sound-dai
+
+ dai-format:
+ description: audio format
+ enum: [ i2s, right_j, left_j, dsp_a, dsp_b ]
+
+ mediatek,clk-provider:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: Indicates dai-link clock master.
+ enum: [ cpu, codec ]
+
+ required:
+ - link-name
+
+unevaluatedProperties: false
required:
- compatible
- mediatek,platform
- - headset-codec
- - speaker-codecs
+
+# Disallow legacy properties if xxx-dai-link nodes are specified
+if:
+ not:
+ patternProperties:
+ ".*-dai-link$": false
+then:
+ properties:
+ headset-codec: false
+ speaker-codecs: false
+ mediatek,hdmi-codec: false
examples:
- |
sound: mt8192-sound {
compatible = "mediatek,mt8192_mt6359_rt1015_rt5682";
- mediatek,platform = <&afe>;
- mediatek,hdmi-codec = <&anx_bridge_dp>;
+ model = "mt8192_mt6359_rt1015_rt5682";
pinctrl-names = "aud_clk_mosi_off",
"aud_clk_mosi_on";
pinctrl-0 = <&aud_clk_mosi_off>;
pinctrl-1 = <&aud_clk_mosi_on>;
+ mediatek,platform = <&afe>;
+
+ audio-routing =
+ "Headphone Jack", "HPOL",
+ "Headphone Jack", "HPOR",
+ "IN1P", "Headset Mic",
+ "Speakers", "Speaker";
+
+ spk-playback-dai-link {
+ link-name = "I2S3";
+ dai-format = "i2s";
+ mediatek,clk-provider = "cpu";
+ codec {
+ sound-dai = <&rt1015p>;
+ };
+ };
+
+ hs-playback-dai-link {
+ link-name = "I2S8";
+ dai-format = "i2s";
+ mediatek,clk-provider = "cpu";
+ codec {
+ sound-dai = <&rt5682 0>;
+ };
+ };
- headset-codec {
- sound-dai = <&rt5682>;
+ hs-capture-dai-link {
+ link-name = "I2S9";
+ dai-format = "i2s";
+ mediatek,clk-provider = "cpu";
+ codec {
+ sound-dai = <&rt5682 0>;
+ };
};
- speaker-codecs {
- sound-dai = <&rt1015_l>,
- <&rt1015_r>;
+ displayport-dai-link {
+ link-name = "TDM";
+ dai-format = "dsp_a";
+ codec {
+ sound-dai = <&anx_bridge_dp>;
+ };
};
};
diff --git a/Documentation/devicetree/bindings/sound/mt8195-mt6359.yaml b/Documentation/devicetree/bindings/sound/mt8195-mt6359.yaml
index c1ddbf672ca3..2af1d8ffbd8b 100644
--- a/Documentation/devicetree/bindings/sound/mt8195-mt6359.yaml
+++ b/Documentation/devicetree/bindings/sound/mt8195-mt6359.yaml
@@ -12,6 +12,9 @@ maintainers:
description:
This binding describes the MT8195 sound card.
+allOf:
+ - $ref: sound-card-common.yaml#
+
properties:
compatible:
enum:
@@ -23,6 +26,33 @@ properties:
$ref: /schemas/types.yaml#/definitions/string
description: User specified audio sound card name
+ audio-routing:
+ description:
+ A list of the connections between audio components. Each entry is a
+ pair of strings, the first being the connection's sink, the second
+ being the connection's source.
+ Valid names could be the input or output widgets of audio components,
+ power supplies, MicBias of codec and the software switch.
+ minItems: 2
+ items:
+ enum:
+ # Sinks
+ - Ext Spk
+ - Headphone
+ - IN1P
+ - Left Spk
+ - Right Spk
+
+ # Sources
+ - Headset Mic
+ - HPOL
+ - HPOR
+ - Left BE_OUT
+ - Left SPO
+ - Right BE_OUT
+ - Right SPO
+ - Speaker
+
mediatek,platform:
$ref: /schemas/types.yaml#/definitions/phandle
description: The phandle of MT8195 ASoC platform.
@@ -30,10 +60,12 @@ properties:
mediatek,dptx-codec:
$ref: /schemas/types.yaml#/definitions/phandle
description: The phandle of MT8195 Display Port Tx codec node.
+ deprecated: true
mediatek,hdmi-codec:
$ref: /schemas/types.yaml#/definitions/phandle
description: The phandle of MT8195 HDMI codec node.
+ deprecated: true
mediatek,adsp:
$ref: /schemas/types.yaml#/definitions/phandle
@@ -45,20 +77,122 @@ properties:
A list of the desired dai-links in the sound card. Each entry is a
name defined in the machine driver.
+patternProperties:
+ ".*-dai-link$":
+ type: object
+ additionalProperties: false
+ description:
+ Container for dai-link level properties and CODEC sub-nodes.
+
+ properties:
+ link-name:
+ description: Indicates dai-link name and PCM stream name
+ enum:
+ - DPTX_BE
+ - ETDM1_IN_BE
+ - ETDM2_IN_BE
+ - ETDM1_OUT_BE
+ - ETDM2_OUT_BE
+ - ETDM3_OUT_BE
+ - PCM1_BE
+
+ codec:
+ description: Holds subnode which indicates codec dai.
+ type: object
+ additionalProperties: false
+ properties:
+ sound-dai:
+ minItems: 1
+ maxItems: 2
+ required:
+ - sound-dai
+
+ dai-format:
+ description: audio format
+ enum: [ i2s, right_j, left_j, dsp_a, dsp_b ]
+
+ mediatek,clk-provider:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: Indicates dai-link clock master.
+ enum: [ cpu, codec ]
+
+ required:
+ - link-name
+
additionalProperties: false
required:
- compatible
- mediatek,platform
+# Disallow legacy properties if xxx-dai-link nodes are specified
+if:
+ not:
+ patternProperties:
+ ".*-dai-link$": false
+then:
+ properties:
+ mediatek,dptx-codec: false
+ mediatek,hdmi-codec: false
+
examples:
- |
sound: mt8195-sound {
compatible = "mediatek,mt8195_mt6359_rt1019_rt5682";
+ model = "mt8195_r1019_5682";
mediatek,platform = <&afe>;
pinctrl-names = "default";
pinctrl-0 = <&aud_pins_default>;
+
+ audio-routing =
+ "Headphone", "HPOL",
+ "Headphone", "HPOR",
+ "IN1P", "Headset Mic",
+ "Ext Spk", "Speaker";
+
+ mm-dai-link {
+ link-name = "ETDM1_IN_BE";
+ mediatek,clk-provider = "cpu";
+ };
+
+ hs-playback-dai-link {
+ link-name = "ETDM1_OUT_BE";
+ mediatek,clk-provider = "cpu";
+ codec {
+ sound-dai = <&headset_codec>;
+ };
+ };
+
+ hs-capture-dai-link {
+ link-name = "ETDM2_IN_BE";
+ mediatek,clk-provider = "cpu";
+ codec {
+ sound-dai = <&headset_codec>;
+ };
+ };
+
+ spk-playback-dai-link {
+ link-name = "ETDM2_OUT_BE";
+ mediatek,clk-provider = "cpu";
+ codec {
+ sound-dai = <&spk_amplifier>;
+ };
+ };
+
+ hdmi-dai-link {
+ link-name = "ETDM3_OUT_BE";
+ codec {
+ sound-dai = <&hdmi_tx>;
+ };
+ };
+
+ displayport-dai-link {
+ link-name = "DPTX_BE";
+ codec {
+ sound-dai = <&dp_tx>;
+ };
+ };
};
...
diff --git a/Documentation/devicetree/bindings/sound/nuvoton,nau8325.yaml b/Documentation/devicetree/bindings/sound/nuvoton,nau8325.yaml
new file mode 100644
index 000000000000..979be0d336da
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nuvoton,nau8325.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/nuvoton,nau8325.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NAU8325 audio Amplifier
+
+maintainers:
+ - Seven Lee <WTLI@nuvoton.com>
+
+allOf:
+ - $ref: dai-common.yaml#
+
+properties:
+ compatible:
+ const: nuvoton,nau8325
+
+ reg:
+ maxItems: 1
+
+ nuvoton,vref-impedance-ohms:
+ description:
+ The vref impedance to be used in ohms. Middle of voltage enables
+ Tie-Off selection options. Due to the high impedance of the VREF
+ pin, it is important to use a low-leakage capacitor.
+
+ enum: [0, 25000, 125000, 2500]
+
+ nuvoton,dac-vref-microvolt:
+ description:
+ The DAC vref to be used in voltage. DAC reference voltage setting. Can
+ be used for minor tuning of the output level. Since the VDDA is range
+ between 1.62 to 1.98 voltage, the typical value for design is 1.8V. After
+ the minor tuning, the final microvolt are as the below.
+
+ enum: [1800000, 2700000, 2880000, 3060000]
+
+ nuvoton,alc-enable:
+ description:
+ Enable digital automatic level control (ALC) function.
+ type: boolean
+
+ nuvoton,clock-detection-disable:
+ description:
+ When clock detection is enabled, it will detect whether MCLK
+ and FS are within the range. MCLK range is from 2.048MHz to 24.576MHz.
+ FS range is from 8kHz to 96kHz. And also needs to detect the ratio
+ MCLK_SRC/LRCK of 256, 400 or 500, and needs to detect the BCLK
+ to make sure data is present. There needs to be at least 8 BCLK
+ cycles per Frame Sync.
+ type: boolean
+
+ nuvoton,clock-det-data:
+ description:
+ Request clock detection to require 2048 non-zero samples before enabling
+ the audio paths. If set then non-zero samples is required, otherwise it
+ doesn't matter.
+ type: boolean
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ codec@21 {
+ compatible = "nuvoton,nau8325";
+ reg = <0x21>;
+ nuvoton,vref-impedance-ohms = <125000>;
+ nuvoton,dac-vref-microvolt = <2880000>;
+ nuvoton,alc-enable;
+ nuvoton,clock-det-data;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/nuvoton,nau8821.yaml b/Documentation/devicetree/bindings/sound/nuvoton,nau8821.yaml
index 054b53954ac3..9f44168efb3e 100644
--- a/Documentation/devicetree/bindings/sound/nuvoton,nau8821.yaml
+++ b/Documentation/devicetree/bindings/sound/nuvoton,nau8821.yaml
@@ -103,6 +103,12 @@ properties:
just limited to the left adc for design demand.
type: boolean
+ nuvoton,adc-delay-ms:
+ description: Delay (in ms) to make input path stable and avoid pop noise.
+ minimum: 125
+ maximum: 500
+ default: 125
+
'#sound-dai-cells':
const: 0
@@ -136,6 +142,7 @@ examples:
nuvoton,jack-eject-debounce = <0>;
nuvoton,dmic-clk-threshold = <3072000>;
nuvoton,dmic-slew-rate = <0>;
+ nuvoton,adc-delay-ms = <125>;
#sound-dai-cells = <0>;
};
};
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra20-ac97.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra20-ac97.txt
deleted file mode 100644
index eaf00102d92c..000000000000
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra20-ac97.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-NVIDIA Tegra 20 AC97 controller
-
-Required properties:
-- compatible : "nvidia,tegra20-ac97"
-- reg : Should contain AC97 controller registers location and length
-- interrupts : Should contain AC97 interrupt
-- resets : Must contain an entry for each entry in reset-names.
- See ../reset/reset.txt for details.
-- reset-names : Must include the following entries:
- - ac97
-- dmas : Must contain an entry for each entry in clock-names.
- See ../dma/dma.txt for details.
-- dma-names : Must include the following entries:
- - rx
- - tx
-- clocks : Must contain one entry, for the module clock.
- See ../clocks/clock-bindings.txt for details.
-- nvidia,codec-reset-gpio : The Tegra GPIO controller's phandle and the number
- of the GPIO used to reset the external AC97 codec
-- nvidia,codec-sync-gpio : The Tegra GPIO controller's phandle and the number
- of the GPIO corresponding with the AC97 DAP _FS line
-
-Example:
-
-ac97@70002000 {
- compatible = "nvidia,tegra20-ac97";
- reg = <0x70002000 0x200>;
- interrupts = <0 81 0x04>;
- nvidia,codec-reset-gpio = <&gpio 170 0>;
- nvidia,codec-sync-gpio = <&gpio 120 0>;
- clocks = <&tegra_car 3>;
- resets = <&tegra_car 3>;
- reset-names = "ac97";
- dmas = <&apbdma 12>, <&apbdma 12>;
- dma-names = "rx", "tx";
-};
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra20-ac97.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra20-ac97.yaml
new file mode 100644
index 000000000000..4ea0a303d995
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra20-ac97.yaml
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/nvidia,tegra20-ac97.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra20 AC97 controller
+
+maintainers:
+ - Thierry Reding <treding@nvidia.com>
+ - Jon Hunter <jonathanh@nvidia.com>
+
+properties:
+ compatible:
+ const: nvidia,tegra20-ac97
+
+ reg:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ const: ac97
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ dmas:
+ maxItems: 2
+
+ dma-names:
+ items:
+ - const: rx
+ - const: tx
+
+ nvidia,codec-reset-gpios:
+ description: Reset pin of external AC97 codec
+ maxItems: 1
+
+ nvidia,codec-sync-gpios:
+ description: AC97 DAP _FS line
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - resets
+ - reset-names
+ - interrupts
+ - clocks
+ - dmas
+ - dma-names
+ - nvidia,codec-reset-gpios
+ - nvidia,codec-sync-gpios
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/tegra20-car.h>
+ #include <dt-bindings/gpio/tegra-gpio.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/gpio/gpio.h>
+
+ ac97@70002000 {
+ compatible = "nvidia,tegra20-ac97";
+ reg = <0x70002000 0x200>;
+ resets = <&tegra_car 3>;
+ reset-names = "ac97";
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&tegra_car 3>;
+ dmas = <&apbdma 12>, <&apbdma 12>;
+ dma-names = "rx", "tx";
+ nvidia,codec-reset-gpios = <&gpio TEGRA_GPIO(V, 2) GPIO_ACTIVE_HIGH>;
+ nvidia,codec-sync-gpios = <&gpio TEGRA_GPIO(P, 0) GPIO_ACTIVE_HIGH>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra20-das.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra20-das.txt
deleted file mode 100644
index 6de3a7ee4efb..000000000000
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra20-das.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-NVIDIA Tegra 20 DAS (Digital Audio Switch) controller
-
-Required properties:
-- compatible : "nvidia,tegra20-das"
-- reg : Should contain DAS registers location and length
-
-Example:
-
-das@70000c00 {
- compatible = "nvidia,tegra20-das";
- reg = <0x70000c00 0x80>;
-};
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra20-das.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra20-das.yaml
new file mode 100644
index 000000000000..44c5ce8ee6be
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra20-das.yaml
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/nvidia,tegra20-das.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra 20 DAS (Digital Audio Switch) controller
+
+maintainers:
+ - Thierry Reding <treding@nvidia.com>
+ - Jon Hunter <jonathanh@nvidia.com>
+
+properties:
+ compatible:
+ const: nvidia,tegra20-das
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ bus {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ das@70000c00 {
+ compatible = "nvidia,tegra20-das";
+ reg = <0x70000c00 0x80>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra30-i2s.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra30-i2s.txt
deleted file mode 100644
index 38caa936f6f8..000000000000
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra30-i2s.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-NVIDIA Tegra30 I2S controller
-
-Required properties:
-- compatible : For Tegra30, must contain "nvidia,tegra30-i2s". For Tegra124,
- must contain "nvidia,tegra124-i2s". Otherwise, must contain
- "nvidia,<chip>-i2s" plus at least one of the above, where <chip> is
- tegra114 or tegra132.
-- reg : Should contain I2S registers location and length
-- clocks : Must contain one entry, for the module clock.
- See ../clocks/clock-bindings.txt for details.
-- resets : Must contain an entry for each entry in reset-names.
- See ../reset/reset.txt for details.
-- reset-names : Must include the following entries:
- - i2s
-- nvidia,ahub-cif-ids : The list of AHUB CIF IDs for this port, rx (playback)
- first, tx (capture) second. See nvidia,tegra30-ahub.txt for values.
-
-Example:
-
-i2s@70080300 {
- compatible = "nvidia,tegra30-i2s";
- reg = <0x70080300 0x100>;
- nvidia,ahub-cif-ids = <4 4>;
- clocks = <&tegra_car 11>;
- resets = <&tegra_car 11>;
- reset-names = "i2s";
-};
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra30-i2s.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra30-i2s.yaml
new file mode 100644
index 000000000000..89c3c6414ab1
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra30-i2s.yaml
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/nvidia,tegra30-i2s.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra30 I2S controller
+
+maintainers:
+ - Thierry Reding <treding@nvidia.com>
+ - Jon Hunter <jonathanh@nvidia.com>
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - nvidia,tegra124-i2s
+ - nvidia,tegra30-i2s
+ - items:
+ - const: nvidia,tegra114-i2s
+ - const: nvidia,tegra30-i2s
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: i2s
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ const: i2s
+
+ nvidia,ahub-cif-ids:
+ description: list of AHUB CIF IDs
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+ - description: rx (playback)
+ - description: tx (capture)
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - resets
+ - reset-names
+ - nvidia,ahub-cif-ids
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/tegra30-car.h>
+
+ i2s@70080300 {
+ compatible = "nvidia,tegra30-i2s";
+ reg = <0x70080300 0x100>;
+ nvidia,ahub-cif-ids = <4 4>;
+ clocks = <&tegra_car TEGRA30_CLK_I2S0>;
+ resets = <&tegra_car 30>;
+ reset-names = "i2s";
+ };
+...
diff --git a/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml b/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml
index 2ab6871e89e5..b2e15ebbd1bc 100644
--- a/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml
+++ b/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml
@@ -29,6 +29,8 @@ properties:
- enum:
- qcom,apq8016-sbc-sndcard
- qcom,msm8916-qdsp6-sndcard
+ - qcom,qcm6490-idp-sndcard
+ - qcom,qcs6490-rb3gen2-sndcard
- qcom,qrb5165-rb5-sndcard
- qcom,sc7180-qdsp6-sndcard
- qcom,sc8280xp-sndcard
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml b/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml
index 0d7a6b576d88..07ec6247d9de 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml
@@ -48,13 +48,16 @@ properties:
- const: renesas,rcar_sound-gen3
# for Gen4 SoC
- items:
- - const: renesas,rcar_sound-r8a779g0 # R-Car V4H
+ - enum:
+ - renesas,rcar_sound-r8a779g0 # R-Car V4H
+ - renesas,rcar_sound-r8a779h0 # R-Car V4M
- const: renesas,rcar_sound-gen4
# for Generic
- enum:
- renesas,rcar_sound-gen1
- renesas,rcar_sound-gen2
- renesas,rcar_sound-gen3
+ - renesas,rcar_sound-gen4
reg:
minItems: 1
diff --git a/Documentation/devicetree/bindings/sound/rockchip,rk3308-codec.yaml b/Documentation/devicetree/bindings/sound/rockchip,rk3308-codec.yaml
new file mode 100644
index 000000000000..ecf3d7d968c8
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/rockchip,rk3308-codec.yaml
@@ -0,0 +1,98 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/rockchip,rk3308-codec.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip RK3308 Internal Codec
+
+description: |
+ This is the audio codec embedded in the Rockchip RK3308
+ SoC. It has 8 24-bit ADCs and 2 24-bit DACs. The maximum supported
+ sampling rate is 192 kHz.
+
+ It is connected internally to one out of a selection of the internal I2S
+ controllers.
+
+ The RK3308 audio codec has 8 independent capture channels, but some
+ features work on stereo pairs called groups:
+ * grp 0 -- MIC1 / MIC2
+ * grp 1 -- MIC3 / MIC4
+ * grp 2 -- MIC5 / MIC6
+ * grp 3 -- MIC7 / MIC8
+
+maintainers:
+ - Luca Ceresoli <luca.ceresoli@bootlin.com>
+
+properties:
+ compatible:
+ const: rockchip,rk3308-codec
+
+ reg:
+ maxItems: 1
+
+ rockchip,grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the General Register Files (GRF)
+
+ clocks:
+ items:
+ - description: clock for TX
+ - description: clock for RX
+ - description: AHB clock driving the interface
+
+ clock-names:
+ items:
+ - const: mclk_tx
+ - const: mclk_rx
+ - const: hclk
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ items:
+ - const: codec
+
+ "#sound-dai-cells":
+ const: 0
+
+ rockchip,micbias-avdd-percent:
+ description: |
+ Voltage setting for the MICBIAS pins expressed as a percentage of
+ AVDD.
+
+ E.g. if rockchip,micbias-avdd-percent = 85 and AVDD = 3v3, then the
+ MIC BIAS voltage will be 3.3 V * 85% = 2.805 V.
+
+ enum: [ 50, 55, 60, 65, 70, 75, 80, 85 ]
+
+required:
+ - compatible
+ - reg
+ - rockchip,grf
+ - clocks
+ - resets
+ - "#sound-dai-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/rk3308-cru.h>
+
+ audio_codec: audio-codec@ff560000 {
+ compatible = "rockchip,rk3308-codec";
+ reg = <0xff560000 0x10000>;
+ rockchip,grf = <&grf>;
+ clock-names = "mclk_tx", "mclk_rx", "hclk";
+ clocks = <&cru SCLK_I2S2_8CH_TX_OUT>,
+ <&cru SCLK_I2S2_8CH_RX_OUT>,
+ <&cru PCLK_ACODEC>;
+ reset-names = "codec";
+ resets = <&cru SRST_ACODEC_P>;
+ #sound-dai-cells = <0>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/sound/rt5645.txt b/Documentation/devicetree/bindings/sound/rt5645.txt
index 41a62fd2ae1f..c1fa379f5f3e 100644
--- a/Documentation/devicetree/bindings/sound/rt5645.txt
+++ b/Documentation/devicetree/bindings/sound/rt5645.txt
@@ -20,6 +20,11 @@ Optional properties:
a GPIO spec for the external headphone detect pin. If jd-mode = 0,
we will get the JD status by getting the value of hp-detect-gpios.
+- cbj-sleeve-gpios:
+ a GPIO spec to control the external combo jack circuit to tie the sleeve/ring2
+ contacts to the ground or floating. It could avoid some electric noise from the
+ active speaker jacks.
+
- realtek,in2-differential
Boolean. Indicate MIC2 input are differential, rather than single-ended.
@@ -68,6 +73,7 @@ codec: rt5650@1a {
compatible = "realtek,rt5650";
reg = <0x1a>;
hp-detect-gpios = <&gpio 19 0>;
+ cbj-sleeve-gpios = <&gpio 20 0>;
interrupt-parent = <&gpio>;
interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
realtek,dmic-en = "true";
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-i2s.yaml b/Documentation/devicetree/bindings/sound/st,stm32-i2s.yaml
index b9111d375b93..8978f6bd63e5 100644
--- a/Documentation/devicetree/bindings/sound/st,stm32-i2s.yaml
+++ b/Documentation/devicetree/bindings/sound/st,stm32-i2s.yaml
@@ -65,6 +65,10 @@ properties:
$ref: audio-graph-port.yaml#
unevaluatedProperties: false
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
required:
- compatible
- "#sound-dai-cells"
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-sai.yaml b/Documentation/devicetree/bindings/sound/st,stm32-sai.yaml
index 59df8a832310..b46a4778807d 100644
--- a/Documentation/devicetree/bindings/sound/st,stm32-sai.yaml
+++ b/Documentation/devicetree/bindings/sound/st,stm32-sai.yaml
@@ -48,6 +48,10 @@ properties:
clock-names:
maxItems: 3
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-spdifrx.yaml b/Documentation/devicetree/bindings/sound/st,stm32-spdifrx.yaml
index bc48151b9adb..3dedc81ec12f 100644
--- a/Documentation/devicetree/bindings/sound/st,stm32-spdifrx.yaml
+++ b/Documentation/devicetree/bindings/sound/st,stm32-spdifrx.yaml
@@ -50,6 +50,10 @@ properties:
resets:
maxItems: 1
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
required:
- compatible
- "#sound-dai-cells"
diff --git a/Documentation/devicetree/bindings/sound/ti,pcm1681.txt b/Documentation/devicetree/bindings/sound/ti,pcm1681.txt
deleted file mode 100644
index 4df17185ab80..000000000000
--- a/Documentation/devicetree/bindings/sound/ti,pcm1681.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-Texas Instruments PCM1681 8-channel PWM Processor
-
-Required properties:
-
- - compatible: Should contain "ti,pcm1681".
- - reg: The i2c address. Should contain <0x4c>.
-
-Examples:
-
- i2c_bus {
- pcm1681@4c {
- compatible = "ti,pcm1681";
- reg = <0x4c>;
- };
- };
diff --git a/Documentation/devicetree/bindings/sound/ti,pcm1681.yaml b/Documentation/devicetree/bindings/sound/ti,pcm1681.yaml
new file mode 100644
index 000000000000..5aa00617291c
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ti,pcm1681.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/ti,pcm1681.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments PCM1681 8-channel PWM Processor
+
+maintainers:
+ - Shenghao Ding <shenghao-ding@ti.com>
+ - Kevin Lu <kevin-lu@ti.com>
+ - Baojun Xu <baojun.xu@ti.com>
+
+allOf:
+ - $ref: dai-common.yaml#
+
+properties:
+ compatible:
+ const: ti,pcm1681
+
+ reg:
+ maxItems: 1
+
+ "#sound-dai-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pcm1681: audio-codec@4c {
+ compatible = "ti,pcm1681";
+ reg = <0x4c>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/ti,pcm6240.yaml b/Documentation/devicetree/bindings/sound/ti,pcm6240.yaml
new file mode 100644
index 000000000000..dd5b08e3d7a1
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ti,pcm6240.yaml
@@ -0,0 +1,177 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2022 - 2024 Texas Instruments Incorporated
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/ti,pcm6240.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments PCM6240 Family Audio ADC/DAC
+
+maintainers:
+ - Shenghao Ding <shenghao-ding@ti.com>
+
+description: |
+ The PCM6240 Family is a big family of Audio ADC/DAC for
+ different Specifications, range from Personal Electric
+ to Automotive Electric, even some professional fields.
+
+ Specifications about the audio chip can be found at:
+ https://www.ti.com/lit/gpn/tlv320adc3120
+ https://www.ti.com/lit/gpn/tlv320adc5120
+ https://www.ti.com/lit/gpn/tlv320adc6120
+ https://www.ti.com/lit/gpn/dix4192
+ https://www.ti.com/lit/gpn/pcm1690
+ https://www.ti.com/lit/gpn/pcm3120-q1
+ https://www.ti.com/lit/gpn/pcm3140-q1
+ https://www.ti.com/lit/gpn/pcm5120-q1
+ https://www.ti.com/lit/gpn/pcm6120-q1
+ https://www.ti.com/lit/gpn/pcm6260-q1
+ https://www.ti.com/lit/gpn/pcm9211
+ https://www.ti.com/lit/gpn/pcmd3140
+ https://www.ti.com/lit/gpn/pcmd3180
+ https://www.ti.com/lit/gpn/taa5212
+ https://www.ti.com/lit/gpn/tad5212
+
+properties:
+ compatible:
+ description: |
+ ti,adc3120: Stereo-channel, 768-kHz, Burr-Brown™ audio analog-to-
+ digital converter (ADC) with 106-dB SNR.
+
+ ti,adc5120: 2-Channel, 768-kHz, Burr-Brown™ Audio ADC with 120-dB SNR.
+
+ ti,adc6120: Stereo-channel, 768-kHz, Burr-Brown™ audio analog-to-
+ digital converter (ADC) with 123-dB SNR.
+
+ ti,dix4192: 216-kHz digital audio converter with Quad-Channel In
+ and One-Channel Out.
+
+ ti,pcm1690: Automotive Catalog 113dB SNR 8-Channel Audio DAC with
+ Differential Outputs.
+
+ ti,pcm3120: Automotive, stereo, 106-dB SNR, 768-kHz, low-power
+ software-controlled audio ADC.
+
+ ti,pcm3140: Automotive, Quad-Channel, 768-kHz, Burr-Brown™ Audio ADC
+ with 106-dB SNR.
+
+ ti,pcm5120: Automotive, stereo, 120-dB SNR, 768-kHz, low-power
+ software-controlled audio ADC.
+
+ ti,pcm5140: Automotive, Quad-Channel, 768-kHz, Burr-Brown™ Audio ADC
+ with 120-dB SNR.
+
+ ti,pcm6120: Automotive, stereo, 123-dB SNR, 768-kHz, low-power
+ software-controlled audio ADC.
+
+ ti,pcm6140: Automotive, Quad-Channel, 768-kHz, Burr-Brown™ Audio ADC
+ with 123-dB SNR.
+
+ ti,pcm6240: Automotive 4-ch audio ADC with integrated programmable mic
+ bias, boost and input diagnostics.
+
+ ti,pcm6260: Automotive 6-ch audio ADC with integrated programmable mic
+ bias, boost and input diagnostics.
+
+ ti,pcm9211: 216-kHz digital audio converter With Stereo ADC and
+ Routing.
+
+ ti,pcmd3140: Four-channel PDM-input to TDM or I2S output converter.
+
+ ti,pcmd3180: Eight-channel pulse-density-modulation input to TDM or
+ I2S output converter.
+
+ ti,taa5212: Low-power high-performance stereo audio ADC with 118-dB
+ dynamic range.
+
+ ti,tad5212: Low-power stereo audio DAC with 120-dB dynamic range.
+ oneOf:
+ - items:
+ - enum:
+ - ti,adc3120
+ - ti,adc5120
+ - ti,pcm3120
+ - ti,pcm5120
+ - ti,pcm6120
+ - const: ti,adc6120
+ - items:
+ - enum:
+ - ti,pcmd512x
+ - ti,pcm9211
+ - ti,taa5212
+ - ti,tad5212
+ - const: ti,adc6120
+ - items:
+ - enum:
+ - ti,pcm3140
+ - ti,pcm5140
+ - ti,dix4192
+ - ti,pcm6140
+ - ti,pcm6260
+ - const: ti,pcm6240
+ - items:
+ - enum:
+ - ti,pcmd3140
+ - ti,pcmd3180
+ - ti,pcm1690
+ - ti,taa5412
+ - ti,tad5412
+ - const: ti,pcm6240
+ - enum:
+ - ti,adc6120
+ - ti,pcm6240
+
+ reg:
+ description:
+ I2C address, in multiple pcmdevices case, all the i2c address
+ aggregate as one Audio Device to support multiple audio slots.
+ minItems: 1
+ maxItems: 4
+
+ reset-gpios:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+ description:
+ Invalid only for ti,pcm1690 because of no INT pin.
+
+ '#sound-dai-cells':
+ const: 0
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - $ref: dai-common.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - ti,pcm1690
+ then:
+ properties:
+ interrupts: false
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ i2c {
+ /* example for two devices with interrupt support */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pcm6240: audio-codec@48 {
+ compatible = "ti,pcm6240";
+ reg = <0x48>, /* primary-device */
+ <0x4b>; /* secondary-device */
+ #sound-dai-cells = <0>;
+ reset-gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <15>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/sound/wlf,wm8776.yaml b/Documentation/devicetree/bindings/sound/wlf,wm8776.yaml
new file mode 100644
index 000000000000..7bbc96ee81be
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/wlf,wm8776.yaml
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/wlf,wm8776.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: WM8776 audio CODEC
+
+maintainers:
+ - patches@opensource.cirrus.com
+
+allOf:
+ - $ref: dai-common.yaml#
+
+properties:
+ compatible:
+ const: wlf,wm8776
+
+ reg:
+ maxItems: 1
+
+ "#sound-dai-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ codec@1a {
+ compatible = "wlf,wm8776";
+ reg = <0x1a>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/wlf,wm8974.txt b/Documentation/devicetree/bindings/sound/wlf,wm8974.txt
deleted file mode 100644
index 01d3a7c83419..000000000000
--- a/Documentation/devicetree/bindings/sound/wlf,wm8974.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-WM8974 audio CODEC
-
-This device supports both I2C and SPI (configured with pin strapping
-on the board).
-
-Required properties:
- - compatible: "wlf,wm8974"
- - reg: the I2C address or SPI chip select number of the device
-
-Examples:
-
-codec: wm8974@1a {
- compatible = "wlf,wm8974";
- reg = <0x1a>;
-};
diff --git a/Documentation/devicetree/bindings/sound/wlf,wm8974.yaml b/Documentation/devicetree/bindings/sound/wlf,wm8974.yaml
new file mode 100644
index 000000000000..d27300207c67
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/wlf,wm8974.yaml
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/wlf,wm8974.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: WM8974 audio CODEC
+
+maintainers:
+ - patches@opensource.cirrus.com
+
+allOf:
+ - $ref: dai-common.yaml#
+
+properties:
+ compatible:
+ const: wlf,wm8974
+
+ reg:
+ maxItems: 1
+
+ "#sound-dai-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ codec@1a {
+ compatible = "wlf,wm8974";
+ reg = <0x1a>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/wm8776.txt b/Documentation/devicetree/bindings/sound/wm8776.txt
deleted file mode 100644
index 01173369c3ed..000000000000
--- a/Documentation/devicetree/bindings/sound/wm8776.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-WM8776 audio CODEC
-
-This device supports both I2C and SPI (configured with pin strapping
-on the board).
-
-Required properties:
-
- - compatible : "wlf,wm8776"
-
- - reg : the I2C address of the device for I2C, the chip select
- number for SPI.
-
-Example:
-
-wm8776: codec@1a {
- compatible = "wlf,wm8776";
- reg = <0x1a>;
-};
diff --git a/Documentation/devicetree/bindings/spi/airoha,en7581-snand.yaml b/Documentation/devicetree/bindings/spi/airoha,en7581-snand.yaml
new file mode 100644
index 000000000000..b820c5613dcc
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/airoha,en7581-snand.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/airoha,en7581-snand.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SPI-NAND flash controller for Airoha ARM SoCs
+
+maintainers:
+ - Lorenzo Bianconi <lorenzo@kernel.org>
+
+allOf:
+ - $ref: spi-controller.yaml#
+
+properties:
+ compatible:
+ const: airoha,en7581-snand
+
+ reg:
+ items:
+ - description: spi base address
+ - description: nfi2spi base address
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: spi
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/en7523-clk.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ spi@1fa10000 {
+ compatible = "airoha,en7581-snand";
+ reg = <0x0 0x1fa10000 0x0 0x140>,
+ <0x0 0x1fa11000 0x0 0x160>;
+
+ clocks = <&scuclk EN7523_CLK_SPI>;
+ clock-names = "spi";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ flash@0 {
+ compatible = "spi-nand";
+ reg = <0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <2>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml b/Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml
index cca81f89e252..d48ecd6cd5ad 100644
--- a/Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml
+++ b/Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml
@@ -68,12 +68,13 @@ properties:
- items:
- enum:
- amd,pensando-elba-qspi
- - ti,k2g-qspi
- - ti,am654-ospi
- intel,lgm-qspi
- - xlnx,versal-ospi-1.0
- intel,socfpga-qspi
+ - mobileye,eyeq5-ospi
- starfive,jh7110-qspi
+ - ti,am654-ospi
+ - ti,k2g-qspi
+ - xlnx,versal-ospi-1.0
- const: cdns,qspi-nor
- const: cdns,qspi-nor
@@ -145,7 +146,6 @@ required:
- reg
- interrupts
- clocks
- - cdns,fifo-depth
- cdns,fifo-width
- cdns,trigger-address
- '#address-cells'
diff --git a/Documentation/devicetree/bindings/spi/marvell,armada-3700-spi.yaml b/Documentation/devicetree/bindings/spi/marvell,armada-3700-spi.yaml
new file mode 100644
index 000000000000..61caa1d86188
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/marvell,armada-3700-spi.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/marvell,armada-3700-spi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell Armada 3700 SPI Controller
+
+description:
+ The SPI controller on Marvell Armada 3700 SoC.
+
+maintainers:
+ - Kousik Sanagavarapu <five231003@gmail.com>
+
+allOf:
+ - $ref: spi-controller.yaml#
+
+properties:
+ compatible:
+ const: marvell,armada-3700-spi
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ num-cs:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ spi0: spi@10600 {
+ compatible = "marvell,armada-3700-spi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x10600 0x5d>;
+ clocks = <&nb_perih_clk 7>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ num-cs = <4>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml b/Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml
index 00acbbb0f65d..49649fc3f95a 100644
--- a/Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml
+++ b/Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml
@@ -54,6 +54,7 @@ properties:
- renesas,msiof-r8a779a0 # R-Car V3U
- renesas,msiof-r8a779f0 # R-Car S4-8
- renesas,msiof-r8a779g0 # R-Car V4H
+ - renesas,msiof-r8a779h0 # R-Car V4M
- const: renesas,rcar-gen4-msiof # generic R-Car Gen4
# compatible device
- items:
diff --git a/Documentation/devicetree/bindings/spi/spi-armada-3700.txt b/Documentation/devicetree/bindings/spi/spi-armada-3700.txt
deleted file mode 100644
index 1564aa8c02cd..000000000000
--- a/Documentation/devicetree/bindings/spi/spi-armada-3700.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-* Marvell Armada 3700 SPI Controller
-
-Required Properties:
-
-- compatible: should be "marvell,armada-3700-spi"
-- reg: physical base address of the controller and length of memory mapped
- region.
-- interrupts: The interrupt number. The interrupt specifier format depends on
- the interrupt controller and of its driver.
-- clocks: Must contain the clock source, usually from the North Bridge clocks.
-- num-cs: The number of chip selects that is supported by this SPI Controller
-- #address-cells: should be 1.
-- #size-cells: should be 0.
-
-Example:
-
- spi0: spi@10600 {
- compatible = "marvell,armada-3700-spi";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x10600 0x5d>;
- clocks = <&nb_perih_clk 7>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
- num-cs = <4>;
- };
diff --git a/Documentation/devicetree/bindings/spi/st,stm32-qspi.yaml b/Documentation/devicetree/bindings/spi/st,stm32-qspi.yaml
index 8bba965a9ae6..3f1a27efff80 100644
--- a/Documentation/devicetree/bindings/spi/st,stm32-qspi.yaml
+++ b/Documentation/devicetree/bindings/spi/st,stm32-qspi.yaml
@@ -46,6 +46,10 @@ properties:
- const: tx
- const: rx
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml b/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml
index 4bd9aeb81208..a55c8633c32c 100644
--- a/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml
+++ b/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml
@@ -52,6 +52,10 @@ properties:
- const: rx
- const: tx
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/spi/ti,qspi.yaml b/Documentation/devicetree/bindings/spi/ti,qspi.yaml
new file mode 100644
index 000000000000..626a915b3d77
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/ti,qspi.yaml
@@ -0,0 +1,96 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/ti,qspi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI QSPI controller
+
+maintainers:
+ - Kousik Sanagavarapu <five231003@gmail.com>
+
+allOf:
+ - $ref: spi-controller.yaml#
+
+properties:
+ compatible:
+ enum:
+ - ti,am4372-qspi
+ - ti,dra7xxx-qspi
+
+ reg:
+ items:
+ - description: base registers
+ - description: mapped memory
+
+ reg-names:
+ items:
+ - const: qspi_base
+ - const: qspi_mmap
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: fck
+
+ interrupts:
+ maxItems: 1
+
+ num-cs:
+ minimum: 1
+ maximum: 4
+ default: 1
+
+ ti,hwmods:
+ description:
+ Name of the hwmod associated to the QSPI. This is for legacy
+ platforms only.
+ $ref: /schemas/types.yaml#/definitions/string
+ deprecated: true
+
+ syscon-chipselects:
+ description:
+ Handle to system control region containing QSPI chipselect register
+ and offset of that register.
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ items:
+ - items:
+ - description: phandle to system control register
+ - description: register offset
+
+ spi-max-frequency:
+ description: Maximum SPI clocking speed of the controller in Hz.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - clocks
+ - clock-names
+ - interrupts
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/dra7.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ spi@4b300000 {
+ compatible = "ti,dra7xxx-qspi";
+ reg = <0x4b300000 0x100>,
+ <0x5c000000 0x4000000>;
+ reg-names = "qspi_base", "qspi_mmap";
+ syscon-chipselects = <&scm_conf 0x558>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&l4per2_clkctrl DRA7_L4PER2_QSPI_CLKCTRL 25>;
+ clock-names = "fck";
+ num-cs = <4>;
+ spi-max-frequency = <48000000>;
+ interrupts = <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/spi/ti_qspi.txt b/Documentation/devicetree/bindings/spi/ti_qspi.txt
deleted file mode 100644
index 47b184bce414..000000000000
--- a/Documentation/devicetree/bindings/spi/ti_qspi.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-TI QSPI controller.
-
-Required properties:
-- compatible : should be "ti,dra7xxx-qspi" or "ti,am4372-qspi".
-- reg: Should contain QSPI registers location and length.
-- reg-names: Should contain the resource reg names.
- - qspi_base: Qspi configuration register Address space
- - qspi_mmap: Memory mapped Address space
- - (optional) qspi_ctrlmod: Control module Address space
-- interrupts: should contain the qspi interrupt number.
-- #address-cells, #size-cells : Must be present if the device has sub-nodes
-- ti,hwmods: Name of the hwmod associated to the QSPI
-
-Recommended properties:
-- spi-max-frequency: Definition as per
- Documentation/devicetree/bindings/spi/spi-bus.txt
-
-Optional properties:
-- syscon-chipselects: Handle to system control region contains QSPI
- chipselect register and offset of that register.
-
-NOTE: TI QSPI controller requires different pinmux and IODelay
-parameters for Mode-0 and Mode-3 operations, which needs to be set up by
-the bootloader (U-Boot). Default configuration only supports Mode-0
-operation. Hence, "spi-cpol" and "spi-cpha" DT properties cannot be
-specified in the slave nodes of TI QSPI controller without appropriate
-modification to bootloader.
-
-Example:
-
-For am4372:
-qspi: qspi@47900000 {
- compatible = "ti,am4372-qspi";
- reg = <0x47900000 0x100>, <0x30000000 0x4000000>;
- reg-names = "qspi_base", "qspi_mmap";
- #address-cells = <1>;
- #size-cells = <0>;
- spi-max-frequency = <25000000>;
- ti,hwmods = "qspi";
-};
-
-For dra7xx:
-qspi: qspi@4b300000 {
- compatible = "ti,dra7xxx-qspi";
- reg = <0x4b300000 0x100>,
- <0x5c000000 0x4000000>,
- reg-names = "qspi_base", "qspi_mmap";
- syscon-chipselects = <&scm_conf 0x558>;
- #address-cells = <1>;
- #size-cells = <0>;
- spi-max-frequency = <48000000>;
- ti,hwmods = "qspi";
-};
diff --git a/Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml b/Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml
index 20f8f9b3b971..01fccdfc4178 100644
--- a/Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml
@@ -13,11 +13,13 @@ description: Binding for Amlogic Thermal
properties:
compatible:
- items:
- - enum:
- - amlogic,g12a-cpu-thermal
- - amlogic,g12a-ddr-thermal
- - const: amlogic,g12a-thermal
+ oneOf:
+ - items:
+ - enum:
+ - amlogic,g12a-cpu-thermal
+ - amlogic,g12a-ddr-thermal
+ - const: amlogic,g12a-thermal
+ - const: amlogic,a1-cpu-thermal
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml b/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml
index b634f57cd011..ca81c8afba79 100644
--- a/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml
@@ -18,13 +18,15 @@ properties:
oneOf:
- enum:
- loongson,ls2k1000-thermal
+ - loongson,ls2k2000-thermal
- items:
- enum:
- - loongson,ls2k2000-thermal
+ - loongson,ls2k0500-thermal
- const: loongson,ls2k1000-thermal
reg:
- maxItems: 1
+ minItems: 1
+ maxItems: 2
interrupts:
maxItems: 1
@@ -38,6 +40,24 @@ required:
- interrupts
- '#thermal-sensor-cells'
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - loongson,ls2k2000-thermal
+
+then:
+ properties:
+ reg:
+ minItems: 2
+ maxItems: 2
+
+else:
+ properties:
+ reg:
+ maxItems: 1
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/thermal/mediatek,lvts-thermal.yaml b/Documentation/devicetree/bindings/thermal/mediatek,lvts-thermal.yaml
index e6665af52ee6..331cf4e662e3 100644
--- a/Documentation/devicetree/bindings/thermal/mediatek,lvts-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/mediatek,lvts-thermal.yaml
@@ -19,6 +19,9 @@ properties:
compatible:
enum:
- mediatek,mt7988-lvts-ap
+ - mediatek,mt8186-lvts
+ - mediatek,mt8188-lvts-ap
+ - mediatek,mt8188-lvts-mcu
- mediatek,mt8192-lvts-ap
- mediatek,mt8192-lvts-mcu
- mediatek,mt8195-lvts-ap
@@ -60,6 +63,8 @@ allOf:
compatible:
contains:
enum:
+ - mediatek,mt8188-lvts-ap
+ - mediatek,mt8188-lvts-mcu
- mediatek,mt8192-lvts-ap
- mediatek,mt8192-lvts-mcu
then:
@@ -75,6 +80,7 @@ allOf:
compatible:
contains:
enum:
+ - mediatek,mt8186-lvts
- mediatek,mt8195-lvts-ap
- mediatek,mt8195-lvts-mcu
then:
diff --git a/Documentation/devicetree/bindings/thermal/qcom-lmh.yaml b/Documentation/devicetree/bindings/thermal/qcom-lmh.yaml
index 5ff72ce5c887..1175bb358382 100644
--- a/Documentation/devicetree/bindings/thermal/qcom-lmh.yaml
+++ b/Documentation/devicetree/bindings/thermal/qcom-lmh.yaml
@@ -17,10 +17,14 @@ description:
properties:
compatible:
- enum:
- - qcom,sc8180x-lmh
- - qcom,sdm845-lmh
- - qcom,sm8150-lmh
+ oneOf:
+ - enum:
+ - qcom,sc8180x-lmh
+ - qcom,sdm845-lmh
+ - qcom,sm8150-lmh
+ - items:
+ - const: qcom,qcm2290-lmh
+ - const: qcom,sm8150-lmh
reg:
items:
diff --git a/Documentation/devicetree/bindings/thermal/st,stih407-thermal.yaml b/Documentation/devicetree/bindings/thermal/st,stih407-thermal.yaml
new file mode 100644
index 000000000000..9f6fc5c95c55
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/st,stih407-thermal.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/thermal/st,stih407-thermal.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STi digital thermal sensor (DTS)
+
+maintainers:
+ - Patrice Chotard <patrice.chotard@foss.st.com>
+ - Lee Jones <lee@kernel.org>
+
+allOf:
+ - $ref: thermal-sensor.yaml
+
+properties:
+ compatible:
+ const: st,stih407-thermal
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: thermal
+
+ interrupts:
+ description:
+ For thermal sensors for which no interrupt has been defined, a polling
+ delay of 1000ms will be used to read the temperature from device.
+ maxItems: 1
+
+ '#thermal-sensor-cells':
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ temperature-sensor@91a0000 {
+ compatible = "st,stih407-thermal";
+ reg = <0x91a0000 0x28>;
+ clock-names = "thermal";
+ clocks = <&CLK_SYSIN>;
+ interrupts = <GIC_SPI 205 IRQ_TYPE_EDGE_RISING>;
+ #thermal-sensor-cells = <0>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/thermal/st-thermal.txt b/Documentation/devicetree/bindings/thermal/st-thermal.txt
deleted file mode 100644
index a2f939137e35..000000000000
--- a/Documentation/devicetree/bindings/thermal/st-thermal.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-Binding for Thermal Sensor driver for STMicroelectronics STi series of SoCs.
-
-Required parameters:
--------------------
-
-compatible : Should be "st,stih407-thermal"
-
-clock-names : Should be "thermal".
- See: Documentation/devicetree/bindings/resource-names.txt
-clocks : Phandle of the clock used by the thermal sensor.
- See: Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-Optional parameters:
--------------------
-
-reg : For non-sysconf based sensors, this should be the physical base
- address and length of the sensor's registers.
-interrupts : Standard way to define interrupt number.
- NB: For thermal sensor's for which no interrupt has been
- defined, a polling delay of 1000ms will be used to read the
- temperature from device.
-
-Example:
-
- temp0@91a0000 {
- compatible = "st,stih407-thermal";
- reg = <0x91a0000 0x28>;
- clock-names = "thermal";
- clocks = <&CLK_SYSIN>;
- interrupts = <GIC_SPI 205 IRQ_TYPE_EDGE_RISING>;
- st,passive_cooling_temp = <110>;
- };
diff --git a/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml b/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml
index 7a4a6ab85970..ab8f28993139 100644
--- a/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml
+++ b/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml
@@ -60,7 +60,7 @@ properties:
be implemented in an always-on power domain."
patternProperties:
- '^frame@[0-9a-z]*$':
+ '^frame@[0-9a-f]+$':
type: object
additionalProperties: false
description: A timer node has up to 8 frame sub-nodes, each with the following properties.
diff --git a/Documentation/devicetree/bindings/timer/renesas,cmt.yaml b/Documentation/devicetree/bindings/timer/renesas,cmt.yaml
index a0be1755ea28..5e09c04da30e 100644
--- a/Documentation/devicetree/bindings/timer/renesas,cmt.yaml
+++ b/Documentation/devicetree/bindings/timer/renesas,cmt.yaml
@@ -103,6 +103,7 @@ properties:
- renesas,r8a779a0-cmt0 # 32-bit CMT0 on R-Car V3U
- renesas,r8a779f0-cmt0 # 32-bit CMT0 on R-Car S4-8
- renesas,r8a779g0-cmt0 # 32-bit CMT0 on R-Car V4H
+ - renesas,r8a779h0-cmt0 # 32-bit CMT0 on R-Car V4M
- const: renesas,rcar-gen4-cmt0 # 32-bit CMT0 on R-Car Gen4
- items:
@@ -110,6 +111,7 @@ properties:
- renesas,r8a779a0-cmt1 # 48-bit CMT on R-Car V3U
- renesas,r8a779f0-cmt1 # 48-bit CMT on R-Car S4-8
- renesas,r8a779g0-cmt1 # 48-bit CMT on R-Car V4H
+ - renesas,r8a779h0-cmt1 # 48-bit CMT on R-Car V4M
- const: renesas,rcar-gen4-cmt1 # 48-bit CMT on R-Car Gen4
reg:
diff --git a/Documentation/devicetree/bindings/timer/renesas,ostm.yaml b/Documentation/devicetree/bindings/timer/renesas,ostm.yaml
index 8b06a681764e..e8c642166462 100644
--- a/Documentation/devicetree/bindings/timer/renesas,ostm.yaml
+++ b/Documentation/devicetree/bindings/timer/renesas,ostm.yaml
@@ -26,6 +26,7 @@ properties:
- renesas,r9a07g043-ostm # RZ/G2UL and RZ/Five
- renesas,r9a07g044-ostm # RZ/G2{L,LC}
- renesas,r9a07g054-ostm # RZ/V2L
+ - renesas,r9a09g057-ostm # RZ/V2H(P)
- const: renesas,ostm # Generic
reg:
@@ -58,6 +59,7 @@ if:
- renesas,r9a07g043-ostm
- renesas,r9a07g044-ostm
- renesas,r9a07g054-ostm
+ - renesas,r9a09g057-ostm
then:
required:
- resets
diff --git a/Documentation/devicetree/bindings/timer/renesas,tmu.yaml b/Documentation/devicetree/bindings/timer/renesas,tmu.yaml
index 84bbe15028a1..360a5cf1ae9c 100644
--- a/Documentation/devicetree/bindings/timer/renesas,tmu.yaml
+++ b/Documentation/devicetree/bindings/timer/renesas,tmu.yaml
@@ -39,6 +39,7 @@ properties:
- renesas,tmu-r8a779a0 # R-Car V3U
- renesas,tmu-r8a779f0 # R-Car S4-8
- renesas,tmu-r8a779g0 # R-Car V4H
+ - renesas,tmu-r8a779h0 # R-Car V4M
- const: renesas,tmu
reg:
diff --git a/Documentation/devicetree/bindings/tpm/ibm,vtpm.yaml b/Documentation/devicetree/bindings/tpm/ibm,vtpm.yaml
index 50a3fd31241c..8b0d3d4be5d8 100644
--- a/Documentation/devicetree/bindings/tpm/ibm,vtpm.yaml
+++ b/Documentation/devicetree/bindings/tpm/ibm,vtpm.yaml
@@ -33,13 +33,13 @@ properties:
reg:
maxItems: 1
- 'ibm,#dma-address-cells':
+ ibm,#dma-address-cells:
description:
number of cells that are used to encode the physical address field of
dma-window properties
$ref: /schemas/types.yaml#/definitions/uint32-array
- 'ibm,#dma-size-cells':
+ ibm,#dma-size-cells:
description:
number of cells that are used to encode the size field of
dma-window properties
diff --git a/Documentation/devicetree/bindings/tpm/tcg,tpm-tis-i2c.yaml b/Documentation/devicetree/bindings/tpm/tcg,tpm-tis-i2c.yaml
index 3ab4434b7352..af7720dc4a12 100644
--- a/Documentation/devicetree/bindings/tpm/tcg,tpm-tis-i2c.yaml
+++ b/Documentation/devicetree/bindings/tpm/tcg,tpm-tis-i2c.yaml
@@ -32,6 +32,7 @@ properties:
- enum:
- infineon,slb9673
- nuvoton,npct75x
+ - st,st33ktpm2xi2c
- const: tcg,tpm-tis-i2c
- description: TPM 1.2 and 2.0 chips with vendor-specific I²C interface
diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml
index e07be7bf8395..0a419453d183 100644
--- a/Documentation/devicetree/bindings/trivial-devices.yaml
+++ b/Documentation/devicetree/bindings/trivial-devices.yaml
@@ -126,6 +126,8 @@ properties:
- ibm,cffps1
# IBM Common Form Factor Power Supply Versions 2
- ibm,cffps2
+ # IBM On-Chip Controller hwmon device
+ - ibm,p8-occ-hwmon
# Infineon barometric pressure and temperature sensor
- infineon,dps310
# Infineon IR36021 digital POL buck controller
@@ -134,6 +136,8 @@ properties:
- infineon,irps5401
# Infineon TLV493D-A1B6 I2C 3D Magnetic Sensor
- infineon,tlv493d-a1b6
+ # Infineon Hot-swap controller xdp710
+ - infineon,xdp710
# Infineon Multi-phase Digital VR Controller xdpe11280
- infineon,xdpe11280
# Infineon Multi-phase Digital VR Controller xdpe12254
@@ -160,6 +164,8 @@ properties:
- isil,isl29030
# Intersil ISL68137 Digital Output Configurable PWM Controller
- isil,isl68137
+ # Intersil ISL69269 PMBus Voltage Regulator
+ - isil,isl69269
# Intersil ISL76682 Ambient Light Sensor
- isil,isl76682
# Linear Technology LTC2488
diff --git a/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml b/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml
index 10c146424baa..cd3680dc002f 100644
--- a/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml
+++ b/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml
@@ -27,10 +27,13 @@ properties:
- qcom,msm8996-ufshc
- qcom,msm8998-ufshc
- qcom,sa8775p-ufshc
+ - qcom,sc7180-ufshc
- qcom,sc7280-ufshc
+ - qcom,sc8180x-ufshc
- qcom,sc8280xp-ufshc
- qcom,sdm845-ufshc
- qcom,sm6115-ufshc
+ - qcom,sm6125-ufshc
- qcom,sm6350-ufshc
- qcom,sm8150-ufshc
- qcom,sm8250-ufshc
@@ -42,11 +45,11 @@ properties:
- const: jedec,ufs-2.0
clocks:
- minItems: 8
+ minItems: 7
maxItems: 11
clock-names:
- minItems: 8
+ minItems: 7
maxItems: 11
dma-coherent: true
@@ -117,9 +120,35 @@ allOf:
compatible:
contains:
enum:
+ - qcom,sc7180-ufshc
+ then:
+ properties:
+ clocks:
+ minItems: 7
+ maxItems: 7
+ clock-names:
+ items:
+ - const: core_clk
+ - const: bus_aggr_clk
+ - const: iface_clk
+ - const: core_clk_unipro
+ - const: ref_clk
+ - const: tx_lane0_sync_clk
+ - const: rx_lane0_sync_clk
+ reg:
+ maxItems: 1
+ reg-names:
+ maxItems: 1
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- qcom,msm8998-ufshc
- qcom,sa8775p-ufshc
- qcom,sc7280-ufshc
+ - qcom,sc8180x-ufshc
- qcom,sc8280xp-ufshc
- qcom,sm8250-ufshc
- qcom,sm8350-ufshc
@@ -215,6 +244,7 @@ allOf:
contains:
enum:
- qcom,sm6115-ufshc
+ - qcom,sm6125-ufshc
then:
properties:
clocks:
@@ -248,7 +278,7 @@ allOf:
reg:
maxItems: 1
clocks:
- minItems: 8
+ minItems: 7
maxItems: 8
else:
properties:
@@ -256,7 +286,7 @@ allOf:
minItems: 1
maxItems: 2
clocks:
- minItems: 8
+ minItems: 7
maxItems: 11
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml b/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
index b2b509b3944d..720879820f66 100644
--- a/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
+++ b/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
@@ -12,12 +12,10 @@ maintainers:
description: |
Each Samsung UFS host controller instance should have its own node.
-allOf:
- - $ref: ufs-common.yaml
-
properties:
compatible:
enum:
+ - google,gs101-ufs
- samsung,exynos7-ufs
- samsung,exynosautov9-ufs
- samsung,exynosautov9-ufs-vh
@@ -38,14 +36,24 @@ properties:
- const: ufsp
clocks:
+ minItems: 2
items:
- description: ufs link core clock
- description: unipro main clock
+ - description: fmp clock
+ - description: ufs aclk clock
+ - description: ufs pclk clock
+ - description: sysreg clock
clock-names:
+ minItems: 2
items:
- const: core_clk
- const: sclk_unipro_main
+ - const: fmp
+ - const: aclk
+ - const: pclk
+ - const: sysreg
phys:
maxItems: 1
@@ -72,6 +80,30 @@ required:
- clocks
- clock-names
+allOf:
+ - $ref: ufs-common.yaml
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: google,gs101-ufs
+
+ then:
+ properties:
+ clocks:
+ minItems: 6
+
+ clock-names:
+ minItems: 6
+
+ else:
+ properties:
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ maxItems: 2
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/usb/dwc2.yaml b/Documentation/devicetree/bindings/usb/dwc2.yaml
index 0a5c98ea711d..88c077673c8b 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.yaml
+++ b/Documentation/devicetree/bindings/usb/dwc2.yaml
@@ -172,6 +172,10 @@ properties:
tpl-support: true
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
dependencies:
port: [ usb-role-switch ]
role-switch-default-mode: [ usb-role-switch ]
diff --git a/Documentation/devicetree/bindings/usb/fsl,usbmisc.yaml b/Documentation/devicetree/bindings/usb/fsl,usbmisc.yaml
index 2d3589d284b2..0a6e7ac1b37e 100644
--- a/Documentation/devicetree/bindings/usb/fsl,usbmisc.yaml
+++ b/Documentation/devicetree/bindings/usb/fsl,usbmisc.yaml
@@ -33,6 +33,7 @@ properties:
- fsl,imx7ulp-usbmisc
- fsl,imx8mm-usbmisc
- fsl,imx8mn-usbmisc
+ - fsl,imx8ulp-usbmisc
- const: fsl,imx7d-usbmisc
- const: fsl,imx6q-usbmisc
- items:
diff --git a/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml b/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml
index 924fd3d748a8..ef3143f4b794 100644
--- a/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml
+++ b/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml
@@ -29,6 +29,7 @@ properties:
- mediatek,mt7623-xhci
- mediatek,mt7629-xhci
- mediatek,mt7986-xhci
+ - mediatek,mt7988-xhci
- mediatek,mt8173-xhci
- mediatek,mt8183-xhci
- mediatek,mt8186-xhci
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index b97d298b3eb6..fbf47f0bacf1 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -151,6 +151,8 @@ patternProperties:
description: ARM Ltd.
"^armadeus,.*":
description: ARMadeus Systems SARL
+ "^armsom,.*":
+ description: ArmSoM Technology Co., Ltd.
"^arrow,.*":
description: Arrow Electronics
"^artesyn,.*":
@@ -256,6 +258,8 @@ patternProperties:
description: Catalyst Semiconductor, Inc.
"^cavium,.*":
description: Cavium, Inc.
+ "^cct,.*":
+ description: Crystal Clear Technology Sdn. Bhd.
"^cdns,.*":
description: Cadence Design Systems Inc.
"^cdtech,.*":
@@ -438,6 +442,8 @@ patternProperties:
description: Dongguan EmbedFire Electronic Technology Co., Ltd.
"^embest,.*":
description: Shenzhen Embest Technology Co., Ltd.
+ "^emcraft,.*":
+ description: Emcraft Systems
"^emlid,.*":
description: Emlid, Ltd.
"^emmicro,.*":
@@ -529,6 +535,8 @@ patternProperties:
description: FX Technology Ltd.
"^galaxycore,.*":
description: GalaxyCore Inc.
+ "^gameforce,.*":
+ description: GameForce
"^gardena,.*":
description: GARDENA GmbH
"^gateway,.*":
@@ -1627,6 +1635,8 @@ patternProperties:
description: Wondermedia Technologies, Inc.
"^wobo,.*":
description: Wobo
+ "^wolfvision,.*":
+ description: WolfVision GmbH
"^x-powers,.*":
description: X-Powers
"^xen,.*":
diff --git a/Documentation/devicetree/bindings/watchdog/aspeed,ast2400-wdt.yaml b/Documentation/devicetree/bindings/watchdog/aspeed,ast2400-wdt.yaml
new file mode 100644
index 000000000000..be78a9865584
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/aspeed,ast2400-wdt.yaml
@@ -0,0 +1,142 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/aspeed,ast2400-wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Aspeed watchdog timer controllers
+
+maintainers:
+ - Andrew Jeffery <andrew@codeconstruct.com.au>
+
+properties:
+ compatible:
+ enum:
+ - aspeed,ast2400-wdt
+ - aspeed,ast2500-wdt
+ - aspeed,ast2600-wdt
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+ description: >
+ The clock used to drive the watchdog counter. From the AST2500 no source
+ other than the 1MHz clock can be selected, so the clocks property is
+ optional.
+
+ aspeed,reset-type:
+ $ref: /schemas/types.yaml#/definitions/string
+ enum:
+ - cpu
+ - soc
+ - system
+ - none
+ default: system
+ description: >
+ The watchdog can be programmed to generate one of three different types of
+ reset when a timeout occcurs.
+
+ Specifying 'cpu' will only reset the processor on a timeout event.
+
+ Specifying 'soc' will reset a configurable subset of the SoC's controllers
+ on a timeout event. Controllers critical to the SoC's operation may remain
+ untouched. The set of SoC controllers to reset may be specified via the
+ aspeed,reset-mask property if the node has the aspeed,ast2500-wdt or
+ aspeed,ast2600-wdt compatible.
+
+ Specifying 'system' will reset all controllers on a timeout event, as if
+ EXTRST had been asserted.
+
+ Specifying 'none' will cause the timeout event to have no reset effect.
+ Another watchdog engine on the chip must be used for chip reset operations.
+
+ aspeed,alt-boot:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: >
+ Direct the watchdog to configure the SoC to boot from the alternative boot
+ region if a timeout occurs.
+
+ aspeed,external-signal:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: >
+ Assert the timeout event on an external signal pin associated with the
+ watchdog controller instance. The pin must be muxed appropriately.
+
+ aspeed,ext-pulse-duration:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: >
+ The duration, in microseconds, of the pulse emitted on the external signal
+ pin.
+
+ aspeed,ext-push-pull:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: >
+ If aspeed,external-signal is specified in the node, set the external
+ signal pin's drive type to push-pull. If aspeed,ext-push-pull is not
+ specified then the pin is configured as open-drain.
+
+ aspeed,ext-active-high:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: >
+ If both aspeed,external-signal and aspeed,ext-push-pull are specified in
+ the node, set the pulse polarity to active-high. If aspeed,ext-active-high
+ is not specified then the pin is configured as active-low.
+
+ aspeed,reset-mask:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 2
+ description: >
+ A bitmask indicating which peripherals will be reset if the watchdog
+ timer expires. On AST2500 SoCs this should be a single word defined using
+ the AST2500_WDT_RESET_* macros; on AST2600 SoCs this should be a two-word
+ array with the first word defined using the AST2600_WDT_RESET1_* macros,
+ and the second word defined using the AST2600_WDT_RESET2_* macros.
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - if:
+ anyOf:
+ - required:
+ - aspeed,ext-push-pull
+ - required:
+ - aspeed,ext-active-high
+ - required:
+ - aspeed,reset-mask
+ then:
+ properties:
+ compatible:
+ enum:
+ - aspeed,ast2500-wdt
+ - aspeed,ast2600-wdt
+ - if:
+ required:
+ - aspeed,ext-active-high
+ then:
+ required:
+ - aspeed,ext-push-pull
+
+additionalProperties: false
+
+examples:
+ - |
+ watchdog@1e785000 {
+ compatible = "aspeed,ast2400-wdt";
+ reg = <0x1e785000 0x1c>;
+ aspeed,reset-type = "system";
+ aspeed,external-signal;
+ };
+ - |
+ #include <dt-bindings/watchdog/aspeed-wdt.h>
+ watchdog@1e785040 {
+ compatible = "aspeed,ast2600-wdt";
+ reg = <0x1e785040 0x40>;
+ aspeed,reset-type = "soc";
+ aspeed,reset-mask = <AST2600_WDT_RESET1_DEFAULT
+ (AST2600_WDT_RESET2_DEFAULT & ~AST2600_WDT_RESET2_LPC)>;
+ };
diff --git a/Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt b/Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt
deleted file mode 100644
index 3208adb3e52e..000000000000
--- a/Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt
+++ /dev/null
@@ -1,73 +0,0 @@
-Aspeed Watchdog Timer
-
-Required properties:
- - compatible: must be one of:
- - "aspeed,ast2400-wdt"
- - "aspeed,ast2500-wdt"
- - "aspeed,ast2600-wdt"
-
- - reg: physical base address of the controller and length of memory mapped
- region
-
-Optional properties:
-
- - aspeed,reset-type = "cpu|soc|system|none"
-
- Reset behavior - Whenever a timeout occurs the watchdog can be programmed
- to generate one of three different, mutually exclusive, types of resets.
-
- Type "none" can be specified to indicate that no resets are to be done.
- This is useful in situations where another watchdog engine on chip is
- to perform the reset.
-
- If 'aspeed,reset-type=' is not specified the default is to enable system
- reset.
-
- Reset types:
-
- - cpu: Reset CPU on watchdog timeout
-
- - soc: Reset 'System on Chip' on watchdog timeout
-
- - system: Reset system on watchdog timeout
-
- - none: No reset is performed on timeout. Assumes another watchdog
- engine is responsible for this.
-
- - aspeed,alt-boot: If property is present then boot from alternate block.
- - aspeed,external-signal: If property is present then signal is sent to
- external reset counter (only WDT1 and WDT2). If not
- specified no external signal is sent.
- - aspeed,ext-pulse-duration: External signal pulse duration in microseconds
-
-Optional properties for AST2500-compatible watchdogs:
- - aspeed,ext-push-pull: If aspeed,external-signal is present, set the pin's
- drive type to push-pull. The default is open-drain.
- - aspeed,ext-active-high: If aspeed,external-signal is present and and the pin
- is configured as push-pull, then set the pulse
- polarity to active-high. The default is active-low.
-
-Optional properties for AST2500- and AST2600-compatible watchdogs:
- - aspeed,reset-mask: A bitmask indicating which peripherals will be reset if
- the watchdog timer expires. On AST2500 this should be a
- single word defined using the AST2500_WDT_RESET_* macros;
- on AST2600 this should be a two-word array with the first
- word defined using the AST2600_WDT_RESET1_* macros and the
- second word defined using the AST2600_WDT_RESET2_* macros.
-
-Examples:
-
- wdt1: watchdog@1e785000 {
- compatible = "aspeed,ast2400-wdt";
- reg = <0x1e785000 0x1c>;
- aspeed,reset-type = "system";
- aspeed,external-signal;
- };
-
- #include <dt-bindings/watchdog/aspeed-wdt.h>
- wdt2: watchdog@1e785040 {
- compatible = "aspeed,ast2600-wdt";
- reg = <0x1e785040 0x40>;
- aspeed,reset-mask = <AST2600_WDT_RESET1_DEFAULT
- (AST2600_WDT_RESET2_DEFAULT & ~AST2600_WDT_RESET2_LPC)>;
- };
diff --git a/Documentation/devicetree/bindings/watchdog/twl4030-wdt.txt b/Documentation/devicetree/bindings/watchdog/twl4030-wdt.txt
deleted file mode 100644
index 80a37193c0b8..000000000000
--- a/Documentation/devicetree/bindings/watchdog/twl4030-wdt.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-Device tree bindings for twl4030-wdt driver (TWL4030 watchdog)
-
-Required properties:
- compatible = "ti,twl4030-wdt";
-
-Example:
-
-watchdog {
- compatible = "ti,twl4030-wdt";
-};
diff --git a/Documentation/doc-guide/parse-headers.rst b/Documentation/doc-guide/parse-headers.rst
index 5da0046f7059..204b025f1349 100644
--- a/Documentation/doc-guide/parse-headers.rst
+++ b/Documentation/doc-guide/parse-headers.rst
@@ -61,7 +61,7 @@ DESCRIPTION
***********
-Convert a C header or source file (C_FILE), into a ReStructured Text
+Convert a C header or source file (C_FILE), into a reStructuredText
included via ..parsed-literal block with cross-references for the
documentation files that describe the API. It accepts an optional
EXCEPTIONS_FILE with describes what elements will be either ignored or
diff --git a/Documentation/driver-api/crypto/iaa/iaa-crypto.rst b/Documentation/driver-api/crypto/iaa/iaa-crypto.rst
index de587cf9cbed..bba40158dd5c 100644
--- a/Documentation/driver-api/crypto/iaa/iaa-crypto.rst
+++ b/Documentation/driver-api/crypto/iaa/iaa-crypto.rst
@@ -179,7 +179,9 @@ has the old 'iax' device naming in place) ::
# configure wq1.0
- accel-config config-wq --group-id=0 --mode=dedicated --type=kernel --name="iaa_crypto" --device_name="crypto" iax1/wq1.0
+ accel-config config-wq --group-id=0 --mode=dedicated --type=kernel --priority=10 --name="iaa_crypto" --driver-name="crypto" iax1/wq1.0
+
+ accel-config config-engine iax1/engine1.0 --group-id=0
# enable IAA device iax1
@@ -321,33 +323,30 @@ driver will generate statistics which can be accessed in debugfs at::
# ls -al /sys/kernel/debug/iaa-crypto/
total 0
- drwxr-xr-x 2 root root 0 Mar 3 09:35 .
- drwx------ 47 root root 0 Mar 3 09:35 ..
- -rw-r--r-- 1 root root 0 Mar 3 09:35 max_acomp_delay_ns
- -rw-r--r-- 1 root root 0 Mar 3 09:35 max_adecomp_delay_ns
- -rw-r--r-- 1 root root 0 Mar 3 09:35 max_comp_delay_ns
- -rw-r--r-- 1 root root 0 Mar 3 09:35 max_decomp_delay_ns
- -rw-r--r-- 1 root root 0 Mar 3 09:35 stats_reset
- -rw-r--r-- 1 root root 0 Mar 3 09:35 total_comp_bytes_out
- -rw-r--r-- 1 root root 0 Mar 3 09:35 total_comp_calls
- -rw-r--r-- 1 root root 0 Mar 3 09:35 total_decomp_bytes_in
- -rw-r--r-- 1 root root 0 Mar 3 09:35 total_decomp_calls
- -rw-r--r-- 1 root root 0 Mar 3 09:35 wq_stats
-
-Most of the above statisticss are self-explanatory. The wq_stats file
-shows per-wq stats, a set for each iaa device and wq in addition to
-some global stats::
+ drwxr-xr-x 2 root root 0 Mar 3 07:55 .
+ drwx------ 53 root root 0 Mar 3 07:55 ..
+ -rw-r--r-- 1 root root 0 Mar 3 07:55 global_stats
+ -rw-r--r-- 1 root root 0 Mar 3 07:55 stats_reset
+ -rw-r--r-- 1 root root 0 Mar 3 07:55 wq_stats
- # cat wq_stats
+The global_stats file shows a set of global statistics collected since
+the driver has been loaded or reset::
+
+ # cat global_stats
global stats:
- total_comp_calls: 100
- total_decomp_calls: 100
- total_comp_bytes_out: 22800
- total_decomp_bytes_in: 22800
+ total_comp_calls: 4300
+ total_decomp_calls: 4164
+ total_sw_decomp_calls: 0
+ total_comp_bytes_out: 5993989
+ total_decomp_bytes_in: 5993989
total_completion_einval_errors: 0
total_completion_timeout_errors: 0
- total_completion_comp_buf_overflow_errors: 0
+ total_completion_comp_buf_overflow_errors: 136
+
+The wq_stats file shows per-wq stats, a set for each iaa device and wq
+in addition to some global stats::
+ # cat wq_stats
iaa device:
id: 1
n_wqs: 1
@@ -379,21 +378,36 @@ some global stats::
iaa device:
id: 5
n_wqs: 1
- comp_calls: 100
- comp_bytes: 22800
- decomp_calls: 100
- decomp_bytes: 22800
+ comp_calls: 1360
+ comp_bytes: 1999776
+ decomp_calls: 0
+ decomp_bytes: 0
wqs:
name: iaa_crypto
- comp_calls: 100
- comp_bytes: 22800
- decomp_calls: 100
- decomp_bytes: 22800
+ comp_calls: 1360
+ comp_bytes: 1999776
+ decomp_calls: 0
+ decomp_bytes: 0
-Writing 0 to 'stats_reset' resets all the stats, including the
+ iaa device:
+ id: 7
+ n_wqs: 1
+ comp_calls: 2940
+ comp_bytes: 3994213
+ decomp_calls: 4164
+ decomp_bytes: 5993989
+ wqs:
+ name: iaa_crypto
+ comp_calls: 2940
+ comp_bytes: 3994213
+ decomp_calls: 4164
+ decomp_bytes: 5993989
+ ...
+
+Writing to 'stats_reset' resets all the stats, including the
per-device and per-wq stats::
- # echo 0 > stats_reset
+ # echo 1 > stats_reset
# cat wq_stats
global stats:
total_comp_calls: 0
@@ -457,7 +471,6 @@ Use the following commands to enable zswap::
# echo deflate-iaa > /sys/module/zswap/parameters/compressor
# echo zsmalloc > /sys/module/zswap/parameters/zpool
# echo 1 > /sys/module/zswap/parameters/enabled
- # echo 0 > /sys/module/zswap/parameters/same_filled_pages_enabled
# echo 100 > /proc/sys/vm/swappiness
# echo never > /sys/kernel/mm/transparent_hugepage/enabled
# echo 1 > /proc/sys/vm/overcommit_memory
@@ -536,12 +549,20 @@ The below script automatically does that::
echo "End Disable IAA"
+ echo "Reload iaa_crypto module"
+
+ rmmod iaa_crypto
+ modprobe iaa_crypto
+
+ echo "End Reload iaa_crypto module"
+
#
# configure iaa wqs and devices
#
echo "Configure IAA"
for ((i = 1; i < ${num_iaa} * 2; i += 2)); do
- accel-config config-wq --group-id=0 --mode=dedicated --size=128 --priority=10 --type=kernel --name="iaa_crypto" --driver_name="crypto" iax${i}/wq${i}
+ accel-config config-wq --group-id=0 --mode=dedicated --wq-size=128 --priority=10 --type=kernel --name="iaa_crypto" --driver-name="crypto" iax${i}/wq${i}.0
+ accel-config config-engine iax${i}/engine${i}.0 --group-id=0
done
echo "End Configure IAA"
@@ -552,10 +573,10 @@ The below script automatically does that::
echo "Enable IAA"
for ((i = 1; i < ${num_iaa} * 2; i += 2)); do
- echo enable iaa iaa${i}
- accel-config enable-device iaa${i}
- echo enable wq iaa${i}/wq${i}.0
- accel-config enable-wq iaa${i}/wq${i}.0
+ echo enable iaa iax${i}
+ accel-config enable-device iax${i}
+ echo enable wq iax${i}/wq${i}.0
+ accel-config enable-wq iax${i}/wq${i}.0
done
echo "End Enable IAA"
@@ -599,7 +620,6 @@ the 'fixed' compression mode::
echo deflate-iaa > /sys/module/zswap/parameters/compressor
echo zsmalloc > /sys/module/zswap/parameters/zpool
echo 1 > /sys/module/zswap/parameters/enabled
- echo 0 > /sys/module/zswap/parameters/same_filled_pages_enabled
echo 100 > /proc/sys/vm/swappiness
echo never > /sys/kernel/mm/transparent_hugepage/enabled
diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst
index 0c153d79ccc4..29abf1eebf9f 100644
--- a/Documentation/driver-api/dma-buf.rst
+++ b/Documentation/driver-api/dma-buf.rst
@@ -77,7 +77,7 @@ consider though:
the usual size discover pattern size = SEEK_END(0); SEEK_SET(0). Every other
llseek operation will report -EINVAL.
- If llseek on dma-buf FDs isn't support the kernel will report -ESPIPE for all
+ If llseek on dma-buf FDs isn't supported the kernel will report -ESPIPE for all
cases. Userspace can use this to detect support for discovering the dma-buf
size using llseek.
diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst
index 7be8b8dd5f00..18caebad7376 100644
--- a/Documentation/driver-api/driver-model/devres.rst
+++ b/Documentation/driver-api/driver-model/devres.rst
@@ -433,6 +433,7 @@ REGULATOR
devm_regulator_bulk_put()
devm_regulator_get()
devm_regulator_get_enable()
+ devm_regulator_get_enable_read_voltage()
devm_regulator_get_enable_optional()
devm_regulator_get_exclusive()
devm_regulator_get_optional()
diff --git a/Documentation/driver-api/eisa.rst b/Documentation/driver-api/eisa.rst
index 3eac11b7eb01..b33ebe1ec9ed 100644
--- a/Documentation/driver-api/eisa.rst
+++ b/Documentation/driver-api/eisa.rst
@@ -196,8 +196,8 @@ eisa_bus.disable_dev
virtual_root.force_probe
Force the probing code to probe EISA slots even when it cannot find an
EISA compliant mainboard (nothing appears on slot 0). Defaults to 0
- (don't force), and set to 1 (force probing) when either
- CONFIG_ALPHA_JENSEN or CONFIG_EISA_VLB_PRIMING are set.
+ (don't force), and set to 1 (force probing) when
+ CONFIG_EISA_VLB_PRIMING is set.
Random notes
============
diff --git a/Documentation/driver-api/gpio/driver.rst b/Documentation/driver-api/gpio/driver.rst
index bf6319cc531b..e541bd2e898b 100644
--- a/Documentation/driver-api/gpio/driver.rst
+++ b/Documentation/driver-api/gpio/driver.rst
@@ -7,7 +7,7 @@ This document serves as a guide for writers of GPIO chip drivers.
Each GPIO controller driver needs to include the following header, which defines
the structures used to define a GPIO driver::
- #include <linux/gpio/driver.h>
+ #include <linux/gpio/driver.h>
Internal Representation of GPIOs
@@ -144,7 +144,7 @@ is not open, it will present a high-impedance (tristate) to the external rail::
in ----|| |/
||--+ in ----|
| |\
- GND GND
+ GND GND
This configuration is normally used as a way to achieve one of two things:
@@ -550,10 +550,10 @@ the interrupt separately and go with it:
struct my_gpio *g;
struct gpio_irq_chip *girq;
- ret = devm_request_threaded_irq(dev, irq, NULL,
- irq_thread_fn, IRQF_ONESHOT, "my-chip", g);
+ ret = devm_request_threaded_irq(dev, irq, NULL, irq_thread_fn,
+ IRQF_ONESHOT, "my-chip", g);
if (ret < 0)
- return ret;
+ return ret;
/* Get a pointer to the gpio_irq_chip */
girq = &g->gc.irq;
@@ -681,12 +681,12 @@ certain operations and keep track of usage inside of the gpiolib subsystem.
Input GPIOs can be used as IRQ signals. When this happens, a driver is requested
to mark the GPIO as being used as an IRQ::
- int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
+ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
This will prevent the use of non-irq related GPIO APIs until the GPIO IRQ lock
is released::
- void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset)
+ void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset)
When implementing an irqchip inside a GPIO driver, these two functions should
typically be called in the .startup() and .shutdown() callbacks from the
@@ -708,12 +708,12 @@ When a GPIO is used as an IRQ signal, then gpiolib also needs to know if
the IRQ is enabled or disabled. In order to inform gpiolib about this,
the irqchip driver should call::
- void gpiochip_disable_irq(struct gpio_chip *chip, unsigned int offset)
+ void gpiochip_disable_irq(struct gpio_chip *chip, unsigned int offset)
This allows drivers to drive the GPIO as an output while the IRQ is
disabled. When the IRQ is enabled again, a driver should call::
- void gpiochip_enable_irq(struct gpio_chip *chip, unsigned int offset)
+ void gpiochip_enable_irq(struct gpio_chip *chip, unsigned int offset)
When implementing an irqchip inside a GPIO driver, these two functions should
typically be called in the .irq_disable() and .irq_enable() callbacks from the
@@ -763,12 +763,12 @@ Sometimes it is useful to allow a GPIO chip driver to request its own GPIO
descriptors through the gpiolib API. A GPIO driver can use the following
functions to request and free descriptors::
- struct gpio_desc *gpiochip_request_own_desc(struct gpio_desc *desc,
- u16 hwnum,
- const char *label,
- enum gpiod_flags flags)
+ struct gpio_desc *gpiochip_request_own_desc(struct gpio_desc *desc,
+ u16 hwnum,
+ const char *label,
+ enum gpiod_flags flags)
- void gpiochip_free_own_desc(struct gpio_desc *desc)
+ void gpiochip_free_own_desc(struct gpio_desc *desc)
Descriptors requested with gpiochip_request_own_desc() must be released with
gpiochip_free_own_desc().
diff --git a/Documentation/driver-api/gpio/legacy.rst b/Documentation/driver-api/gpio/legacy.rst
index b6505914791c..534dfe95d128 100644
--- a/Documentation/driver-api/gpio/legacy.rst
+++ b/Documentation/driver-api/gpio/legacy.rst
@@ -225,8 +225,6 @@ setup or driver probe/teardown code, so this is an easy constraint.)::
gpio_request()
## gpio_request_one()
- ## gpio_request_array()
- ## gpio_free_array()
gpio_free()
@@ -295,14 +293,6 @@ are claimed, three additional calls are defined::
*/
int gpio_request_one(unsigned gpio, unsigned long flags, const char *label);
- /* request multiple GPIOs in a single call
- */
- int gpio_request_array(struct gpio *array, size_t num);
-
- /* release multiple GPIOs in a single call
- */
- void gpio_free_array(struct gpio *array, size_t num);
-
where 'flags' is currently defined to specify the following properties:
* GPIOF_DIR_IN - to configure direction as input
@@ -341,12 +331,6 @@ A typical example of usage::
if (err)
...
- err = gpio_request_array(leds_gpios, ARRAY_SIZE(leds_gpios));
- if (err)
- ...
-
- gpio_free_array(leds_gpios, ARRAY_SIZE(leds_gpios));
-
GPIOs mapped to IRQs
--------------------
diff --git a/Documentation/driver-api/media/drivers/index.rst b/Documentation/driver-api/media/drivers/index.rst
index c4123a16b5f9..7f6f3dcd5c90 100644
--- a/Documentation/driver-api/media/drivers/index.rst
+++ b/Documentation/driver-api/media/drivers/index.rst
@@ -26,6 +26,7 @@ Video4Linux (V4L) drivers
vimc-devel
zoran
ccs/ccs
+ ipu6
Digital TV drivers
diff --git a/Documentation/driver-api/media/drivers/ipu6.rst b/Documentation/driver-api/media/drivers/ipu6.rst
new file mode 100644
index 000000000000..6e1dd19b36fb
--- /dev/null
+++ b/Documentation/driver-api/media/drivers/ipu6.rst
@@ -0,0 +1,205 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==================
+Intel IPU6 Driver
+==================
+
+Author: Bingbu Cao <bingbu.cao@intel.com>
+
+Overview
+=========
+
+Intel IPU6 is the sixth generation of Intel Image Processing Unit used in some
+Intel Chipsets such as Tiger Lake, Jasper Lake, Alder Lake, Raptor Lake and
+Meteor Lake. IPU6 consists of two major systems: Input System (ISYS) and
+Processing System (PSYS). IPU6 are visible on the PCI bus as a single device, it
+can be found by ``lspci``:
+
+``0000:00:05.0 Multimedia controller: Intel Corporation Device xxxx (rev xx)``
+
+IPU6 has a 16 MB BAR in PCI configuration Space for MMIO registers which is
+visible for driver.
+
+Buttress
+=========
+
+The IPU6 is connecting to the system fabric with Buttress which is enabling host
+driver to control the IPU6, it also allows IPU6 access the system memory to
+store and load frame pixel streams and any other metadata.
+
+Buttress mainly manages several system functionalities: power management,
+interrupt handling, firmware authentication and global timer sync.
+
+ISYS and PSYS Power flow
+------------------------
+
+IPU6 driver initialize the ISYS and PSYS power up or down request by setting the
+Buttress frequency control register for ISYS and PSYS
+(``IPU6_BUTTRESS_REG_IS_FREQ_CTL`` and ``IPU6_BUTTRESS_REG_PS_FREQ_CTL``) in
+function:
+
+.. c:function:: int ipu6_buttress_power(...)
+
+Buttress forwards the request to Punit, after Punit execute the power up flow,
+Buttress indicates driver that ISYS or PSYS is powered up by updating the power
+status registers.
+
+.. Note:: ISYS power up needs take place prior to PSYS power up, ISYS power down
+ needs take place after PSYS power down due to hardware limitation.
+
+Interrupt
+---------
+
+IPU6 interrupt can be generated as MSI or INTA, interrupt will be triggered when
+ISYS, PSYS, Buttress event or error happen, driver can get the interrupt cause
+by reading the interrupt status register ``BUTTRESS_REG_ISR_STATUS``, driver
+clears the irq status and then calls specific ISYS or PSYS irq handler.
+
+.. c:function:: irqreturn_t ipu6_buttress_isr(int irq, ...)
+
+Security and firmware authentication
+-------------------------------------
+
+To address the IPU6 firmware security concerns, the IPU6 firmware needs to
+undergo an authentication process before it is allowed to executed on the IPU6
+internal processors. The IPU6 driver will work with Converged Security Engine
+(CSE) to complete authentication process. The CSE is responsible of
+authenticating the IPU6 firmware. The authenticated firmware binary is copied
+into an isolated memory region. Firmware authentication process is implemented
+by CSE following an IPC handshake with the IPU6 driver. There are some Buttress
+registers used by the CSE and the IPU6 driver to communicate with each other via
+IPC.
+
+.. c:function:: int ipu6_buttress_authenticate(...)
+
+Global timer sync
+-----------------
+
+The IPU6 driver initiates a Hammock Harbor synchronization flow each time it
+starts camera operation. The IPU6 will synchronizes an internal counter in the
+Buttress with a copy of the SoC time, this counter maintains the up-to-date time
+until camera operation is stopped. The IPU6 driver can use this time counter to
+calibrate the timestamp based on the timestamp in response event from firmware.
+
+.. c:function:: int ipu6_buttress_start_tsc_sync(...)
+
+DMA and MMU
+============
+
+The IPU6 has its own scalar processor where the firmware run at and an internal
+32-bit virtual address space. The IPU6 has MMU address translation hardware to
+allow that scalar processors to access the internal memory and external system
+memory through IPU6 virtual address. The address translation is based on two
+levels of page lookup tables stored in system memory which are maintained by the
+IPU6 driver. The IPU6 driver sets the level-1 page table base address to MMU
+register and allows MMU to perform page table lookups.
+
+The IPU6 driver exports its own DMA operations. The IPU6 driver will update the
+page table entries for each DMA operation and invalidate the MMU TLB after each
+unmap and free.
+
+.. code-block:: none
+
+ const struct dma_map_ops ipu6_dma_ops = {
+ .alloc = ipu6_dma_alloc,
+ .free = ipu6_dma_free,
+ .mmap = ipu6_dma_mmap,
+ .map_sg = ipu6_dma_map_sg,
+ .unmap_sg = ipu6_dma_unmap_sg,
+ ...
+ };
+
+.. Note:: IPU6 MMU works behind IOMMU so for each IPU6 DMA ops, driver will call
+ generic PCI DMA ops to ask IOMMU to do the additional mapping if VT-d
+ enabled.
+
+Firmware file format
+====================
+
+The IPU6 firmware is in Code Partition Directory (CPD) file format. The CPD
+firmware contains a CPD header, several CPD entries and components. The CPD
+component includes 3 entries - manifest, metadata and module data. Manifest and
+metadata are defined by CSE and used by CSE for authentication. Module data is
+specific to IPU6 which holds the binary data of firmware called package
+directory. The IPU6 driver (``ipu6-cpd.c`` in particular) parses and validates
+the CPD firmware file and gets the package directory binary data of the IPU6
+firmware, copies it to specific DMA buffer and sets its base address to Buttress
+``FW_SOURCE_BASE`` register. Finally the CSE will do authentication for this
+firmware binary.
+
+
+Syscom interface
+================
+
+The IPU6 driver communicates with firmware via the Syscom ABI. Syscom is an
+inter-processor communication mechanism between the IPU scalar processors and
+the CPU. There are a number of resources shared between firmware and software.
+A system memory region where the message queues reside, firmware can access the
+memory region via the IPU MMU. The Syscom queues are FIFO fixed depth queues
+with a configurable number of tokens (messages). There are also common IPU6 MMIO
+registers where the queue read and write indices reside. Software and firmware
+function as producer and consumer of tokens in the queues and update the write
+and read indices separately when sending or receiving each message.
+
+The IPU6 driver must prepare and configure the number of input and output
+queues, configure the count of tokens per queue and the size of per token before
+initiating and starting the communication with firmware. Firmware and software
+must use same configurations. The IPU6 Buttress has a number of firmware boot
+parameter registers which can be used to store the address of configuration and
+initialise the Syscom state, then driver can request firmware to start and run via
+setting the scalar processor control status register.
+
+Input System
+============
+
+IPU6 input system consists of MIPI D-PHY and several CSI-2 receivers. It can
+capture image pixel data from camera sensors or other MIPI CSI-2 output devices.
+
+D-PHYs and CSI-2 ports lane mapping
+-----------------------------------
+
+The IPU6 integrates different D-PHY IPs on different SoCs, on Tiger Lake and
+Alder Lake, IPU6 integrates MCD10 D-PHY, IPU6SE on Jasper Lake integrates JSL
+D-PHY and IPU6EP on Meteor Lake integrates a Synopsys DWC D-PHY. There is an
+adaptional layer between D-PHY and CSI-2 receiver controller which includes port
+configuration, PHY wrapper or private test interfaces for D-PHY. There are 3
+D-PHY drivers ``ipu6-isys-mcd-phy.c``, ``ipu6-isys-jsl-phy.c`` and
+``ipu6-isys-dwc-phy.c`` program the above 3 D-PHYs in IPU6.
+
+Different IPU6 versions have different D-PHY lanes mappings, On Tiger Lake,
+there are 12 data lanes and 8 clock lanes, IPU6 support maximum 8 CSI-2 ports,
+see the PPI mmapping in ``ipu6-isys-mcd-phy.c`` for more information. On Jasper
+Lake and Alder Lake, D-PHY has 8 data lanes and 4 clock lanes, the IPU6 supports
+maximum 4 CSI-2 ports. For Meteor Lake, D-PHY has 12 data lanes and 6 clock
+lanes so IPU6 support maximum 6 CSI-2 ports.
+
+.. Note:: Each pair of CSI-2 two ports is a single unit that can share the data
+ lanes. For example, for CSI-2 port 0 and 1, CSI-2 port 0 support
+ maximum 4 data lanes, CSI-2 port 1 support maximum 2 data lanes, CSI-2
+ port 0 with 2 data lanes can work together with CSI-2 port 1 with 2
+ data lanes. If trying to use CSI-2 port 0 with 4 lanes, CSI-2 port 1
+ will not be available as the 4 data lanes are shared by CSI-2 port 0
+ and 1. The same applies to CSI ports 2/3, 4/5 and 7/8.
+
+ISYS firmware ABIs
+------------------
+
+The IPU6 firmware implements a series of ABIs for software access. In general,
+software firstly prepares the stream configuration ``struct
+ipu6_fw_isys_stream_cfg_data_abi`` and sends the configuration to firmware via
+sending ``STREAM_OPEN`` command. Stream configuration includes input pins and
+output pins, input pin ``struct ipu6_fw_isys_input_pin_info_abi`` defines the
+resolution and data type of input source, output pin ``struct
+ipu6_fw_isys_output_pin_info_abi`` defines the output resolution, stride and
+frame format, etc.
+
+Once the driver gets the interrupt from firmware that indicates stream open
+successfully, the driver will send the ``STREAM_START`` and ``STREAM_CAPTURE``
+command to request firmware to start capturing image frames. ``STREAM_CAPTURE``
+command queues the buffers to firmware with ``struct
+ipu6_fw_isys_frame_buff_set``, software then waits for the interrupt and
+response from firmware, ``PIN_DATA_READY`` means a buffer is ready on a specific
+output pin and then software can return the buffer to user.
+
+.. Note:: See :ref:`Examples<ipu6_isys_capture_examples>` about how to do
+ capture by IPU6 ISYS driver.
diff --git a/Documentation/driver-api/mtd/nand_ecc.rst b/Documentation/driver-api/mtd/nand_ecc.rst
index 74347c14a70b..a0d681f26a2e 100644
--- a/Documentation/driver-api/mtd/nand_ecc.rst
+++ b/Documentation/driver-api/mtd/nand_ecc.rst
@@ -462,7 +462,7 @@ statements is reduced. This is also reflected in the assembly code.
Analysis 3
==========
-Very weird. Guess it has to do with caching or instruction parallellism
+Very weird. Guess it has to do with caching or instruction parallelism
or so. I also tried on an eeePC (Celeron, clocked at 900 Mhz). Interesting
observation was that this one is only 30% slower (according to time)
executing the code as my 3Ghz D920 processor.
diff --git a/Documentation/driver-api/scsi.rst b/Documentation/driver-api/scsi.rst
index 64b231d125e0..273281474c09 100644
--- a/Documentation/driver-api/scsi.rst
+++ b/Documentation/driver-api/scsi.rst
@@ -20,7 +20,7 @@ Although the old parallel (fast/wide/ultra) SCSI bus has largely fallen
out of use, the SCSI command set is more widely used than ever to
communicate with devices over a number of different busses.
-The `SCSI protocol <http://www.t10.org/scsi-3.htm>`__ is a big-endian
+The `SCSI protocol <https://www.t10.org/scsi-3.htm>`__ is a big-endian
peer-to-peer packet based protocol. SCSI commands are 6, 10, 12, or 16
bytes long, often followed by an associated data payload.
@@ -28,8 +28,7 @@ SCSI commands can be transported over just about any kind of bus, and
are the default protocol for storage devices attached to USB, SATA, SAS,
Fibre Channel, FireWire, and ATAPI devices. SCSI packets are also
commonly exchanged over Infiniband,
-`I2O <http://i2o.shadowconnect.com/faq.php>`__, TCP/IP
-(`iSCSI <https://en.wikipedia.org/wiki/ISCSI>`__), even `Parallel
+TCP/IP (`iSCSI <https://en.wikipedia.org/wiki/ISCSI>`__), even `Parallel
ports <http://cyberelk.net/tim/parport/parscsi.html>`__.
Design of the Linux SCSI subsystem
@@ -170,9 +169,9 @@ drivers/scsi/scsi_netlink.c
Infrastructure to provide async events from transports to userspace via
netlink, using a single NETLINK_SCSITRANSPORT protocol for all
-transports. See `the original patch
-submission <http://marc.info/?l=linux-scsi&m=115507374832500&w=2>`__ for
-more details.
+transports. See `the original patch submission
+<https://lore.kernel.org/linux-scsi/1155070439.6275.5.camel@localhost.localdomain/>`__
+for more details.
.. kernel-doc:: drivers/scsi/scsi_netlink.c
:internal:
@@ -259,7 +258,7 @@ attributes for Serial Attached SCSI, a variant of SATA aimed at large
high-end systems.
The SAS transport class contains common code to deal with SAS HBAs, an
-aproximated representation of SAS topologies in the driver model, and
+approximated representation of SAS topologies in the driver model, and
various sysfs attributes to expose these topologies and management
interfaces to userspace.
@@ -328,11 +327,11 @@ the ordinary is seen.
To be more realistic, the simulated devices have the transport
attributes of SAS disks.
-For documentation see http://sg.danny.cz/sg/sdebug26.html
+For documentation see http://sg.danny.cz/sg/scsi_debug.html
todo
~~~~
Parallel (fast/wide/ultra) SCSI, USB, SATA, SAS, Fibre Channel,
-FireWire, ATAPI devices, Infiniband, I2O, Parallel ports,
+FireWire, ATAPI devices, Infiniband, Parallel ports,
netlink...
diff --git a/Documentation/driver-api/usb/usb.rst b/Documentation/driver-api/usb/usb.rst
index fb41768696ec..89f9c37bb979 100644
--- a/Documentation/driver-api/usb/usb.rst
+++ b/Documentation/driver-api/usb/usb.rst
@@ -422,7 +422,7 @@ USBDEVFS_CONNECTINFO
USBDEVFS_GET_SPEED
Returns the speed of the device. The speed is returned as a
- nummerical value in accordance with enum usb_device_speed
+ numerical value in accordance with enum usb_device_speed
File modification time is not updated by this request.
diff --git a/Documentation/driver-api/vfio.rst b/Documentation/driver-api/vfio.rst
index 633d11c7fa71..2a21a42c9386 100644
--- a/Documentation/driver-api/vfio.rst
+++ b/Documentation/driver-api/vfio.rst
@@ -364,7 +364,7 @@ IOMMUFD IOAS/HWPT to enable userspace DMA::
MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
map.iova = 0; /* 1MB starting at 0x0 from device view */
map.length = 1024 * 1024;
- map.ioas_id = alloc_data.out_ioas_id;;
+ map.ioas_id = alloc_data.out_ioas_id;
ioctl(iommufd, IOMMU_IOAS_MAP, &map);
diff --git a/Documentation/driver-api/virtio/writing_virtio_drivers.rst b/Documentation/driver-api/virtio/writing_virtio_drivers.rst
index e14c58796d25..e5de6f5d061a 100644
--- a/Documentation/driver-api/virtio/writing_virtio_drivers.rst
+++ b/Documentation/driver-api/virtio/writing_virtio_drivers.rst
@@ -97,7 +97,6 @@ like this::
static struct virtio_driver virtio_dummy_driver = {
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtio_dummy_probe,
.remove = virtio_dummy_remove,
diff --git a/Documentation/driver-api/wbrf.rst b/Documentation/driver-api/wbrf.rst
index f48bfa029813..6b18833e2e69 100644
--- a/Documentation/driver-api/wbrf.rst
+++ b/Documentation/driver-api/wbrf.rst
@@ -68,7 +68,7 @@ The expected flow for the consumers:
can be enabled for the device.
2. Call `amd_wbrf_register_notifier` to register for notification
of frequency band change(add or remove) from other producers.
-3. Call the `amd_wbrf_retrieve_freq_band` initally to retrieve
+3. Call the `amd_wbrf_retrieve_freq_band` initially to retrieve
current active frequency bands considering some producers may broadcast
such information before the consumer is up.
4. On receiving a notification for frequency band change, run
diff --git a/Documentation/filesystems/api-summary.rst b/Documentation/filesystems/api-summary.rst
index 98db2ea5fa12..cc5cc7f3fbd8 100644
--- a/Documentation/filesystems/api-summary.rst
+++ b/Documentation/filesystems/api-summary.rst
@@ -56,9 +56,6 @@ Other Functions
.. kernel-doc:: fs/namei.c
:export:
-.. kernel-doc:: fs/buffer.c
- :export:
-
.. kernel-doc:: block/bio.c
:export:
diff --git a/Documentation/filesystems/bcachefs/CodingStyle.rst b/Documentation/filesystems/bcachefs/CodingStyle.rst
new file mode 100644
index 000000000000..0c45829a4899
--- /dev/null
+++ b/Documentation/filesystems/bcachefs/CodingStyle.rst
@@ -0,0 +1,186 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+bcachefs coding style
+=====================
+
+Good development is like gardening, and codebases are our gardens. Tend to them
+every day; look for little things that are out of place or in need of tidying.
+A little weeding here and there goes a long way; don't wait until things have
+spiraled out of control.
+
+Things don't always have to be perfect - nitpicking often does more harm than
+good. But appreciate beauty when you see it - and let people know.
+
+The code that you are afraid to touch is the code most in need of refactoring.
+
+A little organizing here and there goes a long way.
+
+Put real thought into how you organize things.
+
+Good code is readable code, where the structure is simple and leaves nowhere
+for bugs to hide.
+
+Assertions are one of our most important tools for writing reliable code. If in
+the course of writing a patchset you encounter a condition that shouldn't
+happen (and will have unpredictable or undefined behaviour if it does), or
+you're not sure if it can happen and not sure how to handle it yet - make it a
+BUG_ON(). Don't leave undefined or unspecified behavior lurking in the codebase.
+
+By the time you finish the patchset, you should understand better which
+assertions need to be handled and turned into checks with error paths, and
+which should be logically impossible. Leave the BUG_ON()s in for the ones which
+are logically impossible. (Or, make them debug mode assertions if they're
+expensive - but don't turn everything into a debug mode assertion, so that
+we're not stuck debugging undefined behaviour should it turn out that you were
+wrong).
+
+Assertions are documentation that can't go out of date. Good assertions are
+wonderful.
+
+Good assertions drastically and dramatically reduce the amount of testing
+required to shake out bugs.
+
+Good assertions are based on state, not logic. To write good assertions, you
+have to think about what the invariants on your state are.
+
+Good invariants and assertions will hold everywhere in your codebase. This
+means that you can run them in only a few places in the checked in version, but
+should you need to debug something that caused the assertion to fail, you can
+quickly shotgun them everywhere to find the codepath that broke the invariant.
+
+A good assertion checks something that the compiler could check for us, and
+elide - if we were working in a language with embedded correctness proofs that
+the compiler could check. This is something that exists today, but it'll likely
+still be a few decades before it comes to systems programming languages. But we
+can still incorporate that kind of thinking into our code and document the
+invariants with runtime checks - much like the way people working in
+dynamically typed languages may add type annotations, gradually making their
+code statically typed.
+
+Looking for ways to make your assertions simpler - and higher level - will
+often nudge you towards making the entire system simpler and more robust.
+
+Good code is code where you can poke around and see what it's doing -
+introspection. We can't debug anything if we can't see what's going on.
+
+Whenever we're debugging, and the solution isn't immediately obvious, if the
+issue is that we don't know where the issue is because we can't see what's
+going on - fix that first.
+
+We have the tools to make anything visible at runtime, efficiently - RCU and
+percpu data structures among them. Don't let things stay hidden.
+
+The most important tool for introspection is the humble pretty printer - in
+bcachefs, this means `*_to_text()` functions, which output to printbufs.
+
+Pretty printers are wonderful, because they compose and you can use them
+everywhere. Having functions to print whatever object you're working with will
+make your error messages much easier to write (therefore they will actually
+exist) and much more informative. And they can be used from sysfs/debugfs, as
+well as tracepoints.
+
+Runtime info and debugging tools should come with clear descriptions and
+labels, and good structure - we don't want files with a list of bare integers,
+like in procfs. Part of the job of the debugging tools is to educate users and
+new developers as to how the system works.
+
+Error messages should, whenever possible, tell you everything you need to debug
+the issue. It's worth putting effort into them.
+
+Tracepoints shouldn't be the first thing you reach for. They're an important
+tool, but always look for more immediate ways to make things visible. When we
+have to rely on tracing, we have to know which tracepoints we're looking for,
+and then we have to run the troublesome workload, and then we have to sift
+through logs. This is a lot of steps to go through when a user is hitting
+something, and if it's intermittent it may not even be possible.
+
+The humble counter is an incredibly useful tool. They're cheap and simple to
+use, and many complicated internal operations with lots of things that can
+behave weirdly (anything involving memory reclaim, for example) become
+shockingly easy to debug once you have counters on every distinct codepath.
+
+Persistent counters are even better.
+
+When debugging, try to get the most out of every bug you come across; don't
+rush to fix the initial issue. Look for things that will make related bugs
+easier the next time around - introspection, new assertions, better error
+messages, new debug tools, and do those first. Look for ways to make the system
+better behaved; often one bug will uncover several other bugs through
+downstream effects.
+
+Fix all that first, and then the original bug last - even if that means keeping
+a user waiting. They'll thank you in the long run, and when they understand
+what you're doing you'll be amazed at how patient they're happy to be. Users
+like to help - otherwise they wouldn't be reporting the bug in the first place.
+
+Talk to your users. Don't isolate yourself.
+
+Users notice all sorts of interesting things, and by just talking to them and
+interacting with them you can benefit from their experience.
+
+Spend time doing support and helpdesk stuff. Don't just write code - code isn't
+finished until it's being used trouble free.
+
+This will also motivate you to make your debugging tools as good as possible,
+and perhaps even your documentation, too. Like anything else in life, the more
+time you spend at it the better you'll get, and you the developer are the
+person most able to improve the tools to make debugging quick and easy.
+
+Be wary of how you take on and commit to big projects. Don't let development
+become product-manager focused. Often time an idea is a good one but needs to
+wait for its proper time - but you won't know if it's the proper time for an
+idea until you start writing code.
+
+Expect to throw a lot of things away, or leave them half finished for later.
+Nobody writes all perfect code that all gets shipped, and you'll be much more
+productive in the long run if you notice this early and shift to something
+else. The experience gained and lessons learned will be valuable for all the
+other work you do.
+
+But don't be afraid to tackle projects that require significant rework of
+existing code. Sometimes these can be the best projects, because they can lead
+us to make existing code more general, more flexible, more multipurpose and
+perhaps more robust. Just don't hesitate to abandon the idea if it looks like
+it's going to make a mess of things.
+
+Complicated features can often be done as a series of refactorings, with the
+final change that actually implements the feature as a quite small patch at the
+end. It's wonderful when this happens, especially when those refactorings are
+things that improve the codebase in their own right. When that happens there's
+much less risk of wasted effort if the feature you were going for doesn't work
+out.
+
+Always strive to work incrementally. Always strive to turn the big projects
+into little bite sized projects that can prove their own merits.
+
+Instead of always tackling those big projects, look for little things that
+will be useful, and make the big projects easier.
+
+The question of what's likely to be useful is where junior developers most
+often go astray - doing something because it seems like it'll be useful often
+leads to overengineering. Knowing what's useful comes from many years of
+experience, or talking with people who have that experience - or from simply
+reading lots of code and looking for common patterns and issues. Don't be
+afraid to throw things away and do something simpler.
+
+Talk about your ideas with your fellow developers; often times the best things
+come from relaxed conversations where people aren't afraid to say "what if?".
+
+Don't neglect your tools.
+
+The most important tools (besides the compiler and our text editor) are the
+tools we use for testing. The shortest possible edit/test/debug cycle is
+essential for working productively. We learn, gain experience, and discover the
+errors in our thinking by running our code and seeing what happens. If your
+time is being wasted because your tools are bad or too slow - don't accept it,
+fix it.
+
+Put effort into your documentation, commmit messages, and code comments - but
+don't go overboard. A good commit message is wonderful - but if the information
+was important enough to go in a commit message, ask yourself if it would be
+even better as a code comment.
+
+A good code comment is wonderful, but even better is the comment that didn't
+need to exist because the code was so straightforward as to be obvious;
+organized into small clean and tidy modules, with clear and descriptive names
+for functions and variable, where every line of code has a clear purpose.
diff --git a/Documentation/filesystems/bcachefs/index.rst b/Documentation/filesystems/bcachefs/index.rst
new file mode 100644
index 000000000000..95fc4b90739e
--- /dev/null
+++ b/Documentation/filesystems/bcachefs/index.rst
@@ -0,0 +1,12 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+======================
+bcachefs Documentation
+======================
+
+.. toctree::
+ :maxdepth: 2
+ :numbered:
+
+ CodingStyle
+ errorcodes
diff --git a/Documentation/filesystems/buffer.rst b/Documentation/filesystems/buffer.rst
new file mode 100644
index 000000000000..ae24faf68efa
--- /dev/null
+++ b/Documentation/filesystems/buffer.rst
@@ -0,0 +1,12 @@
+Buffer Heads
+============
+
+Linux uses buffer heads to maintain state about individual filesystem blocks.
+Buffer heads are deprecated and new filesystems should use iomap instead.
+
+Functions
+---------
+
+.. kernel-doc:: include/linux/buffer_head.h
+.. kernel-doc:: fs/buffer.c
+ :export:
diff --git a/Documentation/filesystems/directory-locking.rst b/Documentation/filesystems/directory-locking.rst
index 05ea387bc9fb..6fdf0b02df43 100644
--- a/Documentation/filesystems/directory-locking.rst
+++ b/Documentation/filesystems/directory-locking.rst
@@ -44,7 +44,7 @@ For our purposes all operations fall in 6 classes:
* decide which of the source and target need to be locked.
The source needs to be locked if it's a non-directory, target - if it's
a non-directory or about to be removed.
- * take the locks that need to be taken (exlusive), in inode pointer order
+ * take the locks that need to be taken (exclusive), in inode pointer order
if need to take both (that can happen only when both source and target
are non-directories - the source because it wouldn't need to be locked
otherwise and the target because mixing directory and non-directory is
@@ -234,7 +234,7 @@ among the children, in some order. But that is also impossible, since
neither of the children is a descendent of another.
That concludes the proof, since the set of operations with the
-properties requiered for a minimal deadlock can not exist.
+properties required for a minimal deadlock can not exist.
Note that the check for having a common ancestor in cross-directory
rename is crucial - without it a deadlock would be possible. Indeed,
diff --git a/Documentation/filesystems/efivarfs.rst b/Documentation/filesystems/efivarfs.rst
index 0551985821b8..f646c3f0980f 100644
--- a/Documentation/filesystems/efivarfs.rst
+++ b/Documentation/filesystems/efivarfs.rst
@@ -40,4 +40,4 @@ accidentally.
*See also:*
- Documentation/admin-guide/acpi/ssdt-overlays.rst
-- Documentation/ABI/stable/sysfs-firmware-efi-vars
+- Documentation/ABI/removed/sysfs-firmware-efi-vars
diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
index efc3493fd6f8..68a0885fb5e6 100644
--- a/Documentation/filesystems/f2fs.rst
+++ b/Documentation/filesystems/f2fs.rst
@@ -774,6 +774,35 @@ In order to identify whether the data in the victim segment are valid or not,
F2FS manages a bitmap. Each bit represents the validity of a block, and the
bitmap is composed of a bit stream covering whole blocks in main area.
+Write-hint Policy
+-----------------
+
+F2FS sets the whint all the time with the below policy.
+
+===================== ======================== ===================
+User F2FS Block
+===================== ======================== ===================
+N/A META WRITE_LIFE_NONE|REQ_META
+N/A HOT_NODE WRITE_LIFE_NONE
+N/A WARM_NODE WRITE_LIFE_MEDIUM
+N/A COLD_NODE WRITE_LIFE_LONG
+ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
+extension list " "
+
+-- buffered io
+N/A COLD_DATA WRITE_LIFE_EXTREME
+N/A HOT_DATA WRITE_LIFE_SHORT
+N/A WARM_DATA WRITE_LIFE_NOT_SET
+
+-- direct io
+WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
+WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
+WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
+WRITE_LIFE_NONE " WRITE_LIFE_NONE
+WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
+WRITE_LIFE_LONG " WRITE_LIFE_LONG
+===================== ======================== ===================
+
Fallocate(2) Policy
-------------------
diff --git a/Documentation/filesystems/index.rst b/Documentation/filesystems/index.rst
index 0ea1e44fa028..8f5c1ee02e2f 100644
--- a/Documentation/filesystems/index.rst
+++ b/Documentation/filesystems/index.rst
@@ -50,6 +50,7 @@ filesystem implementations.
.. toctree::
:maxdepth: 2
+ buffer
journalling
fscrypt
fsverity
@@ -69,6 +70,7 @@ Documentation for filesystem implementations.
afs
autofs
autofs-mount-control
+ bcachefs/index
befs
bfs
btrfs
diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst
index 1be76ef117b3..92bffcc6747a 100644
--- a/Documentation/filesystems/porting.rst
+++ b/Documentation/filesystems/porting.rst
@@ -858,7 +858,7 @@ be misspelled d_alloc_anon().
**mandatory**
-[should've been added in 2016] stale comment in finish_open() nonwithstanding,
+[should've been added in 2016] stale comment in finish_open() notwithstanding,
failure exits in ->atomic_open() instances should *NOT* fput() the file,
no matter what. Everything is handled by the caller.
@@ -989,7 +989,7 @@ This mechanism would only work for a single device so the block layer couldn't
find the owning superblock of any additional devices.
In the old mechanism reusing or creating a superblock for a racing mount(2) and
-umount(2) relied on the file_system_type as the holder. This was severly
+umount(2) relied on the file_system_type as the holder. This was severely
underdocumented however:
(1) Any concurrent mounter that managed to grab an active reference on an
@@ -1134,3 +1134,10 @@ superblock of the main block device, i.e., the one stored in sb->s_bdev. Block
device freezing now works for any block device owned by a given superblock, not
just the main block device. The get_active_super() helper and bd_fsfreeze_sb
pointer are gone.
+
+---
+
+**mandatory**
+
+set_blocksize() takes opened struct file instead of struct block_device now
+and it *must* be opened exclusive.
diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst
index c6a6b9df2104..4d97258a7954 100644
--- a/Documentation/filesystems/proc.rst
+++ b/Documentation/filesystems/proc.rst
@@ -688,6 +688,7 @@ files are there, and which are missing.
============ ===============================================================
File Content
============ ===============================================================
+ allocinfo Memory allocations profiling information
apm Advanced power management info
bootconfig Kernel command line obtained from boot config,
and, if there were kernel parameters from the
@@ -953,6 +954,34 @@ also be allocatable although a lot of filesystem metadata may have to be
reclaimed to achieve this.
+allocinfo
+~~~~~~~~~
+
+Provides information about memory allocations at all locations in the code
+base. Each allocation in the code is identified by its source file, line
+number, module (if originates from a loadable module) and the function calling
+the allocation. The number of bytes allocated and number of calls at each
+location are reported.
+
+Example output.
+
+::
+
+ > sort -rn /proc/allocinfo
+ 127664128 31168 mm/page_ext.c:270 func:alloc_page_ext
+ 56373248 4737 mm/slub.c:2259 func:alloc_slab_page
+ 14880768 3633 mm/readahead.c:247 func:page_cache_ra_unbounded
+ 14417920 3520 mm/mm_init.c:2530 func:alloc_large_system_hash
+ 13377536 234 block/blk-mq.c:3421 func:blk_mq_alloc_rqs
+ 11718656 2861 mm/filemap.c:1919 func:__filemap_get_folio
+ 9192960 2800 kernel/fork.c:307 func:alloc_thread_stack_node
+ 4206592 4 net/netfilter/nf_conntrack_core.c:2567 func:nf_ct_alloc_hashtable
+ 4136960 1010 drivers/staging/ctagmod/ctagmod.c:20 [ctagmod] func:ctagmod_start
+ 3940352 962 mm/memory.c:4214 func:alloc_anon_folio
+ 2894464 22613 fs/kernfs/dir.c:615 func:__kernfs_new_node
+ ...
+
+
meminfo
~~~~~~~
@@ -1110,8 +1139,8 @@ KernelStack
PageTables
Memory consumed by userspace page tables
SecPageTables
- Memory consumed by secondary page tables, this currently
- currently includes KVM mmu allocations on x86 and arm64.
+ Memory consumed by secondary page tables, this currently includes
+ KVM mmu and IOMMU allocations on x86 and arm64.
NFS_Unstable
Always zero. Previous counted pages which had been written to
the server, but has not been committed to stable storage.
diff --git a/Documentation/filesystems/xfs/xfs-online-fsck-design.rst b/Documentation/filesystems/xfs/xfs-online-fsck-design.rst
index 6333697ba3e8..12aa63840830 100644
--- a/Documentation/filesystems/xfs/xfs-online-fsck-design.rst
+++ b/Documentation/filesystems/xfs/xfs-online-fsck-design.rst
@@ -2167,7 +2167,7 @@ The ``xfblob_free`` function frees a specific blob, and the ``xfblob_truncate``
function frees them all because compaction is not needed.
The details of repairing directories and extended attributes will be discussed
-in a subsequent section about atomic extent swapping.
+in a subsequent section about atomic file content exchanges.
However, it should be noted that these repair functions only use blob storage
to cache a small number of entries before adding them to a temporary ondisk
file, which is why compaction is not required.
@@ -2802,7 +2802,8 @@ follows this format:
Repairs for file-based metadata such as extended attributes, directories,
symbolic links, quota files and realtime bitmaps are performed by building a
-new structure attached to a temporary file and swapping the forks.
+new structure attached to a temporary file and exchanging all mappings in the
+file forks.
Afterward, the mappings in the old file fork are the candidate blocks for
disposal.
@@ -3851,8 +3852,8 @@ Because file forks can consume as much space as the entire filesystem, repairs
cannot be staged in memory, even when a paging scheme is available.
Therefore, online repair of file-based metadata createas a temporary file in
the XFS filesystem, writes a new structure at the correct offsets into the
-temporary file, and atomically swaps the fork mappings (and hence the fork
-contents) to commit the repair.
+temporary file, and atomically exchanges all file fork mappings (and hence the
+fork contents) to commit the repair.
Once the repair is complete, the old fork can be reaped as necessary; if the
system goes down during the reap, the iunlink code will delete the blocks
during log recovery.
@@ -3862,10 +3863,11 @@ consistent to use a temporary file safely!
This dependency is the reason why online repair can only use pageable kernel
memory to stage ondisk space usage information.
-Swapping metadata extents with a temporary file requires the owner field of the
-block headers to match the file being repaired and not the temporary file. The
-directory, extended attribute, and symbolic link functions were all modified to
-allow callers to specify owner numbers explicitly.
+Exchanging metadata file mappings with a temporary file requires the owner
+field of the block headers to match the file being repaired and not the
+temporary file.
+The directory, extended attribute, and symbolic link functions were all
+modified to allow callers to specify owner numbers explicitly.
There is a downside to the reaping process -- if the system crashes during the
reap phase and the fork extents are crosslinked, the iunlink processing will
@@ -3974,8 +3976,8 @@ The proposed patches are in the
<https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux.git/log/?h=repair-tempfiles>`_
series.
-Atomic Extent Swapping
-----------------------
+Logged File Content Exchanges
+-----------------------------
Once repair builds a temporary file with a new data structure written into
it, it must commit the new changes into the existing file.
@@ -4010,17 +4012,21 @@ e. Old blocks in the file may be cross-linked with another structure and must
These problems are overcome by creating a new deferred operation and a new type
of log intent item to track the progress of an operation to exchange two file
ranges.
-The new deferred operation type chains together the same transactions used by
-the reverse-mapping extent swap code.
+The new exchange operation type chains together the same transactions used by
+the reverse-mapping extent swap code, but records intermedia progress in the
+log so that operations can be restarted after a crash.
+This new functionality is called the file contents exchange (xfs_exchrange)
+code.
+The underlying implementation exchanges file fork mappings (xfs_exchmaps).
The new log item records the progress of the exchange to ensure that once an
exchange begins, it will always run to completion, even there are
interruptions.
-The new ``XFS_SB_FEAT_INCOMPAT_LOG_ATOMIC_SWAP`` log-incompatible feature flag
+The new ``XFS_SB_FEAT_INCOMPAT_EXCHRANGE`` incompatible feature flag
in the superblock protects these new log item records from being replayed on
old kernels.
The proposed patchset is the
-`atomic extent swap
+`file contents exchange
<https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux.git/log/?h=atomic-file-updates>`_
series.
@@ -4047,9 +4053,6 @@ series.
| one ``struct rw_semaphore`` for each feature. |
| The log cleaning code tries to take this rwsem in exclusive mode to |
| clear the bit; if the lock attempt fails, the feature bit remains set. |
-| Filesystem code signals its intention to use a log incompat feature in a |
-| transaction by calling ``xlog_use_incompat_feat``, which takes the rwsem |
-| in shared mode. |
| The code supporting a log incompat feature should create wrapper |
| functions to obtain the log feature and call |
| ``xfs_add_incompat_log_feature`` to set the feature bits in the primary |
@@ -4064,72 +4067,73 @@ series.
| The feature bit will not be cleared from the superblock until the log |
| becomes clean. |
| |
-| Log-assisted extended attribute updates and atomic extent swaps both use |
-| log incompat features and provide convenience wrappers around the |
+| Log-assisted extended attribute updates and file content exchanges bothe |
+| use log incompat features and provide convenience wrappers around the |
| functionality. |
+--------------------------------------------------------------------------+
-Mechanics of an Atomic Extent Swap
-``````````````````````````````````
+Mechanics of a Logged File Content Exchange
+```````````````````````````````````````````
-Swapping entire file forks is a complex task.
+Exchanging contents between file forks is a complex task.
The goal is to exchange all file fork mappings between two file fork offset
ranges.
There are likely to be many extent mappings in each fork, and the edges of
the mappings aren't necessarily aligned.
-Furthermore, there may be other updates that need to happen after the swap,
+Furthermore, there may be other updates that need to happen after the exchange,
such as exchanging file sizes, inode flags, or conversion of fork data to local
format.
-This is roughly the format of the new deferred extent swap work item:
+This is roughly the format of the new deferred exchange-mapping work item:
.. code-block:: c
- struct xfs_swapext_intent {
+ struct xfs_exchmaps_intent {
/* Inodes participating in the operation. */
- struct xfs_inode *sxi_ip1;
- struct xfs_inode *sxi_ip2;
+ struct xfs_inode *xmi_ip1;
+ struct xfs_inode *xmi_ip2;
/* File offset range information. */
- xfs_fileoff_t sxi_startoff1;
- xfs_fileoff_t sxi_startoff2;
- xfs_filblks_t sxi_blockcount;
+ xfs_fileoff_t xmi_startoff1;
+ xfs_fileoff_t xmi_startoff2;
+ xfs_filblks_t xmi_blockcount;
/* Set these file sizes after the operation, unless negative. */
- xfs_fsize_t sxi_isize1;
- xfs_fsize_t sxi_isize2;
+ xfs_fsize_t xmi_isize1;
+ xfs_fsize_t xmi_isize2;
- /* XFS_SWAP_EXT_* log operation flags */
- uint64_t sxi_flags;
+ /* XFS_EXCHMAPS_* log operation flags */
+ uint64_t xmi_flags;
};
The new log intent item contains enough information to track two logical fork
offset ranges: ``(inode1, startoff1, blockcount)`` and ``(inode2, startoff2,
blockcount)``.
-Each step of a swap operation exchanges the largest file range mapping possible
-from one file to the other.
-After each step in the swap operation, the two startoff fields are incremented
-and the blockcount field is decremented to reflect the progress made.
-The flags field captures behavioral parameters such as swapping the attr fork
-instead of the data fork and other work to be done after the extent swap.
-The two isize fields are used to swap the file size at the end of the operation
-if the file data fork is the target of the swap operation.
-
-When the extent swap is initiated, the sequence of operations is as follows:
-
-1. Create a deferred work item for the extent swap.
- At the start, it should contain the entirety of the file ranges to be
- swapped.
+Each step of an exchange operation exchanges the largest file range mapping
+possible from one file to the other.
+After each step in the exchange operation, the two startoff fields are
+incremented and the blockcount field is decremented to reflect the progress
+made.
+The flags field captures behavioral parameters such as exchanging attr fork
+mappings instead of the data fork and other work to be done after the exchange.
+The two isize fields are used to exchange the file sizes at the end of the
+operation if the file data fork is the target of the operation.
+
+When the exchange is initiated, the sequence of operations is as follows:
+
+1. Create a deferred work item for the file mapping exchange.
+ At the start, it should contain the entirety of the file block ranges to be
+ exchanged.
2. Call ``xfs_defer_finish`` to process the exchange.
- This is encapsulated in ``xrep_tempswap_contents`` for scrub operations.
+ This is encapsulated in ``xrep_tempexch_contents`` for scrub operations.
This will log an extent swap intent item to the transaction for the deferred
- extent swap work item.
+ mapping exchange work item.
-3. Until ``sxi_blockcount`` of the deferred extent swap work item is zero,
+3. Until ``xmi_blockcount`` of the deferred mapping exchange work item is zero,
- a. Read the block maps of both file ranges starting at ``sxi_startoff1`` and
- ``sxi_startoff2``, respectively, and compute the longest extent that can
- be swapped in a single step.
+ a. Read the block maps of both file ranges starting at ``xmi_startoff1`` and
+ ``xmi_startoff2``, respectively, and compute the longest extent that can
+ be exchanged in a single step.
This is the minimum of the two ``br_blockcount`` s in the mappings.
Keep advancing through the file forks until at least one of the mappings
contains written blocks.
@@ -4151,20 +4155,20 @@ When the extent swap is initiated, the sequence of operations is as follows:
g. Extend the ondisk size of either file if necessary.
- h. Log an extent swap done log item for the extent swap intent log item
- that was read at the start of step 3.
+ h. Log a mapping exchange done log item for th mapping exchange intent log
+ item that was read at the start of step 3.
i. Compute the amount of file range that has just been covered.
This quantity is ``(map1.br_startoff + map1.br_blockcount -
- sxi_startoff1)``, because step 3a could have skipped holes.
+ xmi_startoff1)``, because step 3a could have skipped holes.
- j. Increase the starting offsets of ``sxi_startoff1`` and ``sxi_startoff2``
+ j. Increase the starting offsets of ``xmi_startoff1`` and ``xmi_startoff2``
by the number of blocks computed in the previous step, and decrease
- ``sxi_blockcount`` by the same quantity.
+ ``xmi_blockcount`` by the same quantity.
This advances the cursor.
- k. Log a new extent swap intent log item reflecting the advanced state of
- the work item.
+ k. Log a new mapping exchange intent log item reflecting the advanced state
+ of the work item.
l. Return the proper error code (EAGAIN) to the deferred operation manager
to inform it that there is more work to be done.
@@ -4175,22 +4179,23 @@ When the extent swap is initiated, the sequence of operations is as follows:
This will be discussed in more detail in subsequent sections.
If the filesystem goes down in the middle of an operation, log recovery will
-find the most recent unfinished extent swap log intent item and restart from
-there.
-This is how extent swapping guarantees that an outside observer will either see
-the old broken structure or the new one, and never a mismash of both.
+find the most recent unfinished maping exchange log intent item and restart
+from there.
+This is how atomic file mapping exchanges guarantees that an outside observer
+will either see the old broken structure or the new one, and never a mismash of
+both.
-Preparation for Extent Swapping
-```````````````````````````````
+Preparation for File Content Exchanges
+``````````````````````````````````````
There are a few things that need to be taken care of before initiating an
-atomic extent swap operation.
+atomic file mapping exchange operation.
First, regular files require the page cache to be flushed to disk before the
operation begins, and directio writes to be quiesced.
-Like any filesystem operation, extent swapping must determine the maximum
-amount of disk space and quota that can be consumed on behalf of both files in
-the operation, and reserve that quantity of resources to avoid an unrecoverable
-out of space failure once it starts dirtying metadata.
+Like any filesystem operation, file mapping exchanges must determine the
+maximum amount of disk space and quota that can be consumed on behalf of both
+files in the operation, and reserve that quantity of resources to avoid an
+unrecoverable out of space failure once it starts dirtying metadata.
The preparation step scans the ranges of both files to estimate:
- Data device blocks needed to handle the repeated updates to the fork
@@ -4204,56 +4209,59 @@ The preparation step scans the ranges of both files to estimate:
to different extents on the realtime volume, which could happen if the
operation fails to run to completion.
-The need for precise estimation increases the run time of the swap operation,
-but it is very important to maintain correct accounting.
-The filesystem must not run completely out of free space, nor can the extent
-swap ever add more extent mappings to a fork than it can support.
+The need for precise estimation increases the run time of the exchange
+operation, but it is very important to maintain correct accounting.
+The filesystem must not run completely out of free space, nor can the mapping
+exchange ever add more extent mappings to a fork than it can support.
Regular users are required to abide the quota limits, though metadata repairs
may exceed quota to resolve inconsistent metadata elsewhere.
-Special Features for Swapping Metadata File Extents
-```````````````````````````````````````````````````
+Special Features for Exchanging Metadata File Contents
+``````````````````````````````````````````````````````
Extended attributes, symbolic links, and directories can set the fork format to
"local" and treat the fork as a literal area for data storage.
Metadata repairs must take extra steps to support these cases:
- If both forks are in local format and the fork areas are large enough, the
- swap is performed by copying the incore fork contents, logging both forks,
- and committing.
- The atomic extent swap mechanism is not necessary, since this can be done
- with a single transaction.
+ exchange is performed by copying the incore fork contents, logging both
+ forks, and committing.
+ The atomic file mapping exchange mechanism is not necessary, since this can
+ be done with a single transaction.
-- If both forks map blocks, then the regular atomic extent swap is used.
+- If both forks map blocks, then the regular atomic file mapping exchange is
+ used.
- Otherwise, only one fork is in local format.
The contents of the local format fork are converted to a block to perform the
- swap.
+ exchange.
The conversion to block format must be done in the same transaction that
- logs the initial extent swap intent log item.
- The regular atomic extent swap is used to exchange the mappings.
- Special flags are set on the swap operation so that the transaction can be
- rolled one more time to convert the second file's fork back to local format
- so that the second file will be ready to go as soon as the ILOCK is dropped.
+ logs the initial mapping exchange intent log item.
+ The regular atomic mapping exchange is used to exchange the metadata file
+ mappings.
+ Special flags are set on the exchange operation so that the transaction can
+ be rolled one more time to convert the second file's fork back to local
+ format so that the second file will be ready to go as soon as the ILOCK is
+ dropped.
Extended attributes and directories stamp the owning inode into every block,
but the buffer verifiers do not actually check the inode number!
Although there is no verification, it is still important to maintain
-referential integrity, so prior to performing the extent swap, online repair
-builds every block in the new data structure with the owner field of the file
-being repaired.
+referential integrity, so prior to performing the mapping exchange, online
+repair builds every block in the new data structure with the owner field of the
+file being repaired.
-After a successful swap operation, the repair operation must reap the old fork
-blocks by processing each fork mapping through the standard :ref:`file extent
-reaping <reaping>` mechanism that is done post-repair.
+After a successful exchange operation, the repair operation must reap the old
+fork blocks by processing each fork mapping through the standard :ref:`file
+extent reaping <reaping>` mechanism that is done post-repair.
If the filesystem should go down during the reap part of the repair, the
iunlink processing at the end of recovery will free both the temporary file and
whatever blocks were not reaped.
However, this iunlink processing omits the cross-link detection of online
repair, and is not completely foolproof.
-Swapping Temporary File Extents
-```````````````````````````````
+Exchanging Temporary File Contents
+``````````````````````````````````
To repair a metadata file, online repair proceeds as follows:
@@ -4263,14 +4271,14 @@ To repair a metadata file, online repair proceeds as follows:
file.
The same fork must be written to as is being repaired.
-3. Commit the scrub transaction, since the swap estimation step must be
- completed before transaction reservations are made.
+3. Commit the scrub transaction, since the exchange resource estimation step
+ must be completed before transaction reservations are made.
-4. Call ``xrep_tempswap_trans_alloc`` to allocate a new scrub transaction with
+4. Call ``xrep_tempexch_trans_alloc`` to allocate a new scrub transaction with
the appropriate resource reservations, locks, and fill out a ``struct
- xfs_swapext_req`` with the details of the swap operation.
+ xfs_exchmaps_req`` with the details of the exchange operation.
-5. Call ``xrep_tempswap_contents`` to swap the contents.
+5. Call ``xrep_tempexch_contents`` to exchange the contents.
6. Commit the transaction to complete the repair.
@@ -4312,7 +4320,7 @@ To check the summary file against the bitmap:
3. Compare the contents of the xfile against the ondisk file.
To repair the summary file, write the xfile contents into the temporary file
-and use atomic extent swap to commit the new contents.
+and use atomic mapping exchange to commit the new contents.
The temporary file is then reaped.
The proposed patchset is the
@@ -4355,8 +4363,8 @@ Salvaging extended attributes is done as follows:
memory or there are no more attr fork blocks to examine, unlock the file and
add the staged extended attributes to the temporary file.
-3. Use atomic extent swapping to exchange the new and old extended attribute
- structures.
+3. Use atomic file mapping exchange to exchange the new and old extended
+ attribute structures.
The old attribute blocks are now attached to the temporary file.
4. Reap the temporary file.
@@ -4413,7 +4421,8 @@ salvaging directories is straightforward:
directory and add the staged dirents into the temporary directory.
Truncate the staging files.
-4. Use atomic extent swapping to exchange the new and old directory structures.
+4. Use atomic file mapping exchange to exchange the new and old directory
+ structures.
The old directory blocks are now attached to the temporary file.
5. Reap the temporary file.
@@ -4456,10 +4465,10 @@ reconstruction of filesystem space metadata.
The parent pointer feature, however, makes total directory reconstruction
possible.
-XFS parent pointers include the dirent name and location of the entry within
-the parent directory.
+XFS parent pointers contain the information needed to identify the
+corresponding directory entry in the parent directory.
In other words, child files use extended attributes to store pointers to
-parents in the form ``(parent_inum, parent_gen, dirent_pos) → (dirent_name)``.
+parents in the form ``(dirent_name) → (parent_inum, parent_gen)``.
The directory checking process can be strengthened to ensure that the target of
each dirent also contains a parent pointer pointing back to the dirent.
Likewise, each parent pointer can be checked by ensuring that the target of
@@ -4467,8 +4476,6 @@ each parent pointer is a directory and that it contains a dirent matching
the parent pointer.
Both online and offline repair can use this strategy.
-**Note**: The ondisk format of parent pointers is not yet finalized.
-
+--------------------------------------------------------------------------+
| **Historical Sidebar**: |
+--------------------------------------------------------------------------+
@@ -4510,8 +4517,58 @@ Both online and offline repair can use this strategy.
| Chandan increased the maximum extent counts of both data and attribute |
| forks, thereby ensuring that the extended attribute structure can grow |
| to handle the maximum hardlink count of any file. |
+| |
+| For this second effort, the ondisk parent pointer format as originally |
+| proposed was ``(parent_inum, parent_gen, dirent_pos) → (dirent_name)``. |
+| The format was changed during development to eliminate the requirement |
+| of repair tools needing to to ensure that the ``dirent_pos`` field |
+| always matched when reconstructing a directory. |
+| |
+| There were a few other ways to have solved that problem: |
+| |
+| 1. The field could be designated advisory, since the other three values |
+| are sufficient to find the entry in the parent. |
+| However, this makes indexed key lookup impossible while repairs are |
+| ongoing. |
+| |
+| 2. We could allow creating directory entries at specified offsets, which |
+| solves the referential integrity problem but runs the risk that |
+| dirent creation will fail due to conflicts with the free space in the |
+| directory. |
+| |
+| These conflicts could be resolved by appending the directory entry |
+| and amending the xattr code to support updating an xattr key and |
+| reindexing the dabtree, though this would have to be performed with |
+| the parent directory still locked. |
+| |
+| 3. Same as above, but remove the old parent pointer entry and add a new |
+| one atomically. |
+| |
+| 4. Change the ondisk xattr format to |
+| ``(parent_inum, name) → (parent_gen)``, which would provide the attr |
+| name uniqueness that we require, without forcing repair code to |
+| update the dirent position. |
+| Unfortunately, this requires changes to the xattr code to support |
+| attr names as long as 263 bytes. |
+| |
+| 5. Change the ondisk xattr format to ``(parent_inum, hash(name)) → |
+| (name, parent_gen)``. |
+| If the hash is sufficiently resistant to collisions (e.g. sha256) |
+| then this should provide the attr name uniqueness that we require. |
+| Names shorter than 247 bytes could be stored directly. |
+| |
+| 6. Change the ondisk xattr format to ``(dirent_name) → (parent_ino, |
+| parent_gen)``. This format doesn't require any of the complicated |
+| nested name hashing of the previous suggestions. However, it was |
+| discovered that multiple hardlinks to the same inode with the same |
+| filename caused performance problems with hashed xattr lookups, so |
+| the parent inumber is now xor'd into the hash index. |
+| |
+| In the end, it was decided that solution #6 was the most compact and the |
+| most performant. A new hash function was designed for parent pointers. |
+--------------------------------------------------------------------------+
+
Case Study: Repairing Directories with Parent Pointers
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -4519,8 +4576,9 @@ Directory rebuilding uses a :ref:`coordinated inode scan <iscan>` and
a :ref:`directory entry live update hook <liveupdate>` as follows:
1. Set up a temporary directory for generating the new directory structure,
- an xfblob for storing entry names, and an xfarray for stashing directory
- updates.
+ an xfblob for storing entry names, and an xfarray for stashing the fixed
+ size fields involved in a directory update: ``(child inumber, add vs.
+ remove, name cookie, ftype)``.
2. Set up an inode scanner and hook into the directory entry code to receive
updates on directory operations.
@@ -4529,73 +4587,36 @@ a :ref:`directory entry live update hook <liveupdate>` as follows:
pointer references the directory of interest.
If so:
- a. Stash an addname entry for this dirent in the xfarray for later.
+ a. Stash the parent pointer name and an addname entry for this dirent in the
+ xfblob and xfarray, respectively.
- b. When finished scanning that file, flush the stashed updates to the
- temporary directory.
+ b. When finished scanning that file or the kernel memory consumption exceeds
+ a threshold, flush the stashed updates to the temporary directory.
4. For each live directory update received via the hook, decide if the child
has already been scanned.
If so:
- a. Stash an addname or removename entry for this dirent update in the
- xfarray for later.
+ a. Stash the parent pointer name an addname or removename entry for this
+ dirent update in the xfblob and xfarray for later.
We cannot write directly to the temporary directory because hook
functions are not allowed to modify filesystem metadata.
Instead, we stash updates in the xfarray and rely on the scanner thread
to apply the stashed updates to the temporary directory.
-5. When the scan is complete, atomically swap the contents of the temporary
+5. When the scan is complete, replay any stashed entries in the xfarray.
+
+6. When the scan is complete, atomically exchange the contents of the temporary
directory and the directory being repaired.
The temporary directory now contains the damaged directory structure.
-6. Reap the temporary directory.
-
-7. Update the dirent position field of parent pointers as necessary.
- This may require the queuing of a substantial number of xattr log intent
- items.
+7. Reap the temporary directory.
The proposed patchset is the
`parent pointers directory repair
-<https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux.git/log/?h=pptrs-online-dir-repair>`_
+<https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux.git/log/?h=pptrs-fsck>`_
series.
-**Unresolved Question**: How will repair ensure that the ``dirent_pos`` fields
-match in the reconstructed directory?
-
-*Answer*: There are a few ways to solve this problem:
-
-1. The field could be designated advisory, since the other three values are
- sufficient to find the entry in the parent.
- However, this makes indexed key lookup impossible while repairs are ongoing.
-
-2. We could allow creating directory entries at specified offsets, which solves
- the referential integrity problem but runs the risk that dirent creation
- will fail due to conflicts with the free space in the directory.
-
- These conflicts could be resolved by appending the directory entry and
- amending the xattr code to support updating an xattr key and reindexing the
- dabtree, though this would have to be performed with the parent directory
- still locked.
-
-3. Same as above, but remove the old parent pointer entry and add a new one
- atomically.
-
-4. Change the ondisk xattr format to ``(parent_inum, name) → (parent_gen)``,
- which would provide the attr name uniqueness that we require, without
- forcing repair code to update the dirent position.
- Unfortunately, this requires changes to the xattr code to support attr
- names as long as 263 bytes.
-
-5. Change the ondisk xattr format to ``(parent_inum, hash(name)) →
- (name, parent_gen)``.
- If the hash is sufficiently resistant to collisions (e.g. sha256) then
- this should provide the attr name uniqueness that we require.
- Names shorter than 247 bytes could be stored directly.
-
-Discussion is ongoing under the `parent pointers patch deluge
-<https://www.spinics.net/lists/linux-xfs/msg69397.html>`_.
-
Case Study: Repairing Parent Pointers
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -4603,8 +4624,9 @@ Online reconstruction of a file's parent pointer information works similarly to
directory reconstruction:
1. Set up a temporary file for generating a new extended attribute structure,
- an `xfblob<xfblob>` for storing parent pointer names, and an xfarray for
- stashing parent pointer updates.
+ an xfblob for storing parent pointer names, and an xfarray for stashing the
+ fixed size fields involved in a parent pointer update: ``(parent inumber,
+ parent generation, add vs. remove, name cookie)``.
2. Set up an inode scanner and hook into the directory entry code to receive
updates on directory operations.
@@ -4613,34 +4635,36 @@ directory reconstruction:
dirent references the file of interest.
If so:
- a. Stash an addpptr entry for this parent pointer in the xfblob and xfarray
- for later.
+ a. Stash the dirent name and an addpptr entry for this parent pointer in the
+ xfblob and xfarray, respectively.
- b. When finished scanning the directory, flush the stashed updates to the
- temporary directory.
+ b. When finished scanning the directory or the kernel memory consumption
+ exceeds a threshold, flush the stashed updates to the temporary file.
4. For each live directory update received via the hook, decide if the parent
has already been scanned.
If so:
- a. Stash an addpptr or removepptr entry for this dirent update in the
- xfarray for later.
+ a. Stash the dirent name and an addpptr or removepptr entry for this dirent
+ update in the xfblob and xfarray for later.
We cannot write parent pointers directly to the temporary file because
hook functions are not allowed to modify filesystem metadata.
Instead, we stash updates in the xfarray and rely on the scanner thread
to apply the stashed parent pointer updates to the temporary file.
-5. Copy all non-parent pointer extended attributes to the temporary file.
+5. When the scan is complete, replay any stashed entries in the xfarray.
+
+6. Copy all non-parent pointer extended attributes to the temporary file.
-6. When the scan is complete, atomically swap the attribute fork of the
- temporary file and the file being repaired.
+7. When the scan is complete, atomically exchange the mappings of the attribute
+ forks of the temporary file and the file being repaired.
The temporary file now contains the damaged extended attribute structure.
-7. Reap the temporary file.
+8. Reap the temporary file.
The proposed patchset is the
`parent pointers repair
-<https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux.git/log/?h=pptrs-online-parent-repair>`_
+<https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux.git/log/?h=pptrs-fsck>`_
series.
Digression: Offline Checking of Parent Pointers
@@ -4651,26 +4675,56 @@ files are erased long before directory tree connectivity checks are performed.
Parent pointer checks are therefore a second pass to be added to the existing
connectivity checks:
-1. After the set of surviving files has been established (i.e. phase 6),
+1. After the set of surviving files has been established (phase 6),
walk the surviving directories of each AG in the filesystem.
This is already performed as part of the connectivity checks.
-2. For each directory entry found, record the name in an xfblob, and store
- ``(child_ag_inum, parent_inum, parent_gen, dirent_pos)`` tuples in a
- per-AG in-memory slab.
+2. For each directory entry found,
+
+ a. If the name has already been stored in the xfblob, then use that cookie
+ and skip the next step.
+
+ b. Otherwise, record the name in an xfblob, and remember the xfblob cookie.
+ Unique mappings are critical for
+
+ 1. Deduplicating names to reduce memory usage, and
+
+ 2. Creating a stable sort key for the parent pointer indexes so that the
+ parent pointer validation described below will work.
+
+ c. Store ``(child_ag_inum, parent_inum, parent_gen, name_hash, name_len,
+ name_cookie)`` tuples in a per-AG in-memory slab. The ``name_hash``
+ referenced in this section is the regular directory entry name hash, not
+ the specialized one used for parent pointer xattrs.
3. For each AG in the filesystem,
- a. Sort the per-AG tuples in order of child_ag_inum, parent_inum, and
- dirent_pos.
+ a. Sort the per-AG tuple set in order of ``child_ag_inum``, ``parent_inum``,
+ ``name_hash``, and ``name_cookie``.
+ Having a single ``name_cookie`` for each ``name`` is critical for
+ handling the uncommon case of a directory containing multiple hardlinks
+ to the same file where all the names hash to the same value.
b. For each inode in the AG,
1. Scan the inode for parent pointers.
- Record the names in a per-file xfblob, and store ``(parent_inum,
- parent_gen, dirent_pos)`` tuples in a per-file slab.
+ For each parent pointer found,
+
+ a. Validate the ondisk parent pointer.
+ If validation fails, move on to the next parent pointer in the
+ file.
+
+ b. If the name has already been stored in the xfblob, then use that
+ cookie and skip the next step.
+
+ c. Record the name in a per-file xfblob, and remember the xfblob
+ cookie.
- 2. Sort the per-file tuples in order of parent_inum, and dirent_pos.
+ d. Store ``(parent_inum, parent_gen, name_hash, name_len,
+ name_cookie)`` tuples in a per-file slab.
+
+ 2. Sort the per-file tuples in order of ``parent_inum``, ``name_hash``,
+ and ``name_cookie``.
3. Position one slab cursor at the start of the inode's records in the
per-AG tuple slab.
@@ -4679,28 +4733,37 @@ connectivity checks:
4. Position a second slab cursor at the start of the per-file tuple slab.
- 5. Iterate the two cursors in lockstep, comparing the parent_ino and
- dirent_pos fields of the records under each cursor.
+ 5. Iterate the two cursors in lockstep, comparing the ``parent_ino``,
+ ``name_hash``, and ``name_cookie`` fields of the records under each
+ cursor:
- a. Tuples in the per-AG list but not the per-file list are missing and
- need to be written to the inode.
+ a. If the per-AG cursor is at a lower point in the keyspace than the
+ per-file cursor, then the per-AG cursor points to a missing parent
+ pointer.
+ Add the parent pointer to the inode and advance the per-AG
+ cursor.
- b. Tuples in the per-file list but not the per-AG list are dangling
- and need to be removed from the inode.
+ b. If the per-file cursor is at a lower point in the keyspace than
+ the per-AG cursor, then the per-file cursor points to a dangling
+ parent pointer.
+ Remove the parent pointer from the inode and advance the per-file
+ cursor.
- c. For tuples in both lists, update the parent_gen and name components
- of the parent pointer if necessary.
+ c. Otherwise, both cursors point at the same parent pointer.
+ Update the parent_gen component if necessary.
+ Advance both cursors.
4. Move on to examining link counts, as we do today.
The proposed patchset is the
`offline parent pointers repair
-<https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfsprogs-dev.git/log/?h=pptrs-repair>`_
+<https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfsprogs-dev.git/log/?h=pptrs-fsck>`_
series.
-Rebuilding directories from parent pointers in offline repair is very
-challenging because it currently uses a single-pass scan of the filesystem
-during phase 3 to decide which files are corrupt enough to be zapped.
+Rebuilding directories from parent pointers in offline repair would be very
+challenging because xfs_repair currently uses two single-pass scans of the
+filesystem during phases 3 and 4 to decide which files are corrupt enough to be
+zapped.
This scan would have to be converted into a multi-pass scan:
1. The first pass of the scan zaps corrupt inodes, forks, and attributes
@@ -4722,6 +4785,130 @@ This scan would have to be converted into a multi-pass scan:
This code has not yet been constructed.
+.. _dirtree:
+
+Case Study: Directory Tree Structure
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+As mentioned earlier, the filesystem directory tree is supposed to be a
+directed acylic graph structure.
+However, each node in this graph is a separate ``xfs_inode`` object with its
+own locks, which makes validating the tree qualities difficult.
+Fortunately, non-directories are allowed to have multiple parents and cannot
+have children, so only directories need to be scanned.
+Directories typically constitute 5-10% of the files in a filesystem, which
+reduces the amount of work dramatically.
+
+If the directory tree could be frozen, it would be easy to discover cycles and
+disconnected regions by running a depth (or breadth) first search downwards
+from the root directory and marking a bitmap for each directory found.
+At any point in the walk, trying to set an already set bit means there is a
+cycle.
+After the scan completes, XORing the marked inode bitmap with the inode
+allocation bitmap reveals disconnected inodes.
+However, one of online repair's design goals is to avoid locking the entire
+filesystem unless it's absolutely necessary.
+Directory tree updates can move subtrees across the scanner wavefront on a live
+filesystem, so the bitmap algorithm cannot be applied.
+
+Directory parent pointers enable an incremental approach to validation of the
+tree structure.
+Instead of using one thread to scan the entire filesystem, multiple threads can
+walk from individual subdirectories upwards towards the root.
+For this to work, all directory entries and parent pointers must be internally
+consistent, each directory entry must have a parent pointer, and the link
+counts of all directories must be correct.
+Each scanner thread must be able to take the IOLOCK of an alleged parent
+directory while holding the IOLOCK of the child directory to prevent either
+directory from being moved within the tree.
+This is not possible since the VFS does not take the IOLOCK of a child
+subdirectory when moving that subdirectory, so instead the scanner stabilizes
+the parent -> child relationship by taking the ILOCKs and installing a dirent
+update hook to detect changes.
+
+The scanning process uses a dirent hook to detect changes to the directories
+mentioned in the scan data.
+The scan works as follows:
+
+1. For each subdirectory in the filesystem,
+
+ a. For each parent pointer of that subdirectory,
+
+ 1. Create a path object for that parent pointer, and mark the
+ subdirectory inode number in the path object's bitmap.
+
+ 2. Record the parent pointer name and inode number in a path structure.
+
+ 3. If the alleged parent is the subdirectory being scrubbed, the path is
+ a cycle.
+ Mark the path for deletion and repeat step 1a with the next
+ subdirectory parent pointer.
+
+ 4. Try to mark the alleged parent inode number in a bitmap in the path
+ object.
+ If the bit is already set, then there is a cycle in the directory
+ tree.
+ Mark the path as a cycle and repeat step 1a with the next subdirectory
+ parent pointer.
+
+ 5. Load the alleged parent.
+ If the alleged parent is not a linked directory, abort the scan
+ because the parent pointer information is inconsistent.
+
+ 6. For each parent pointer of this alleged ancestor directory,
+
+ a. Record the parent pointer name and inode number in the path object
+ if no parent has been set for that level.
+
+ b. If an ancestor has more than one parent, mark the path as corrupt.
+ Repeat step 1a with the next subdirectory parent pointer.
+
+ c. Repeat steps 1a3-1a6 for the ancestor identified in step 1a6a.
+ This repeats until the directory tree root is reached or no parents
+ are found.
+
+ 7. If the walk terminates at the root directory, mark the path as ok.
+
+ 8. If the walk terminates without reaching the root, mark the path as
+ disconnected.
+
+2. If the directory entry update hook triggers, check all paths already found
+ by the scan.
+ If the entry matches part of a path, mark that path and the scan stale.
+ When the scanner thread sees that the scan has been marked stale, it deletes
+ all scan data and starts over.
+
+Repairing the directory tree works as follows:
+
+1. Walk each path of the target subdirectory.
+
+ a. Corrupt paths and cycle paths are counted as suspect.
+
+ b. Paths already marked for deletion are counted as bad.
+
+ c. Paths that reached the root are counted as good.
+
+2. If the subdirectory is either the root directory or has zero link count,
+ delete all incoming directory entries in the immediate parents.
+ Repairs are complete.
+
+3. If the subdirectory has exactly one path, set the dotdot entry to the
+ parent and exit.
+
+4. If the subdirectory has at least one good path, delete all the other
+ incoming directory entries in the immediate parents.
+
+5. If the subdirectory has no good paths and more than one suspect path, delete
+ all the other incoming directory entries in the immediate parents.
+
+6. If the subdirectory has zero paths, attach it to the lost and found.
+
+The proposed patches are in the
+`directory tree repair
+<https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux.git/log/?h=scrub-directory-tree>`_
+series.
+
+
.. _orphanage:
The Orphanage
@@ -4769,14 +4956,22 @@ Orphaned files are adopted by the orphanage as follows:
The ``xrep_orphanage_iolock_two`` function follows the inode locking
strategy discussed earlier.
-3. Call ``xrep_orphanage_compute_blkres`` and ``xrep_orphanage_compute_name``
- to compute the new name in the orphanage and the block reservation required.
-
-4. Use ``xrep_orphanage_adoption_prep`` to reserve resources to the repair
+3. Use ``xrep_adoption_trans_alloc`` to reserve resources to the repair
transaction.
-5. Call ``xrep_orphanage_adopt`` to reparent the orphaned file into the lost
- and found, and update the kernel dentry cache.
+4. Call ``xrep_orphanage_compute_name`` to compute the new name in the
+ orphanage.
+
+5. If the adoption is going to happen, call ``xrep_adoption_reparent`` to
+ reparent the orphaned file into the lost and found and invalidate the dentry
+ cache.
+
+6. Call ``xrep_adoption_finish`` to commit any filesystem updates, release the
+ orphanage ILOCK, and clean the scrub transaction. Call
+ ``xrep_adoption_commit`` to commit the updates and the scrub transaction.
+
+7. If a runtime error happens, call ``xrep_adoption_cancel`` to release all
+ resources.
The proposed patches are in the
`orphanage adoption
@@ -5108,18 +5303,18 @@ make it easier for code readers to understand what has been built, for whom it
has been built, and why.
Please feel free to contact the XFS mailing list with questions.
-FIEXCHANGE_RANGE
-----------------
+XFS_IOC_EXCHANGE_RANGE
+----------------------
-As discussed earlier, a second frontend to the atomic extent swap mechanism is
-a new ioctl call that userspace programs can use to commit updates to files
-atomically.
+As discussed earlier, a second frontend to the atomic file mapping exchange
+mechanism is a new ioctl call that userspace programs can use to commit updates
+to files atomically.
This frontend has been out for review for several years now, though the
necessary refinements to online repair and lack of customer demand mean that
the proposal has not been pushed very hard.
-Extent Swapping with Regular User Files
-```````````````````````````````````````
+File Content Exchanges with Regular User Files
+``````````````````````````````````````````````
As mentioned earlier, XFS has long had the ability to swap extents between
files, which is used almost exclusively by ``xfs_fsr`` to defragment files.
@@ -5134,12 +5329,12 @@ the consistency of the fork mappings with the reverse mapping index was to
develop an iterative mechanism that used deferred bmap and rmap operations to
swap mappings one at a time.
This mechanism is identical to steps 2-3 from the procedure above except for
-the new tracking items, because the atomic extent swap mechanism is an
-iteration of an existing mechanism and not something totally novel.
+the new tracking items, because the atomic file mapping exchange mechanism is
+an iteration of an existing mechanism and not something totally novel.
For the narrow case of file defragmentation, the file contents must be
identical, so the recovery guarantees are not much of a gain.
-Atomic extent swapping is much more flexible than the existing swapext
+Atomic file content exchanges are much more flexible than the existing swapext
implementations because it can guarantee that the caller never sees a mix of
old and new contents even after a crash, and it can operate on two arbitrary
file fork ranges.
@@ -5150,11 +5345,11 @@ The extra flexibility enables several new use cases:
Next, it opens a temporary file and calls the file clone operation to reflink
the first file's contents into the temporary file.
Writes to the original file should instead be written to the temporary file.
- Finally, the process calls the atomic extent swap system call
- (``FIEXCHANGE_RANGE``) to exchange the file contents, thereby committing all
- of the updates to the original file, or none of them.
+ Finally, the process calls the atomic file mapping exchange system call
+ (``XFS_IOC_EXCHANGE_RANGE``) to exchange the file contents, thereby
+ committing all of the updates to the original file, or none of them.
-.. _swapext_if_unchanged:
+.. _exchrange_if_unchanged:
- **Transactional file updates**: The same mechanism as above, but the caller
only wants the commit to occur if the original file's contents have not
@@ -5163,16 +5358,17 @@ The extra flexibility enables several new use cases:
change timestamps of the original file before reflinking its data to the
temporary file.
When the program is ready to commit the changes, it passes the timestamps
- into the kernel as arguments to the atomic extent swap system call.
+ into the kernel as arguments to the atomic file mapping exchange system call.
The kernel only commits the changes if the provided timestamps match the
original file.
+ A new ioctl (``XFS_IOC_COMMIT_RANGE``) is provided to perform this.
- **Emulation of atomic block device writes**: Export a block device with a
logical sector size matching the filesystem block size to force all writes
to be aligned to the filesystem block size.
Stage all writes to a temporary file, and when that is complete, call the
- atomic extent swap system call with a flag to indicate that holes in the
- temporary file should be ignored.
+ atomic file mapping exchange system call with a flag to indicate that holes
+ in the temporary file should be ignored.
This emulates an atomic device write in software, and can support arbitrary
scattered writes.
@@ -5254,8 +5450,8 @@ of the file to try to share the physical space with a dummy file.
Cloning the extent means that the original owners cannot overwrite the
contents; any changes will be written somewhere else via copy-on-write.
Clearspace makes its own copy of the frozen extent in an area that is not being
-cleared, and uses ``FIEDEUPRANGE`` (or the :ref:`atomic extent swap
-<swapext_if_unchanged>` feature) to change the target file's data extent
+cleared, and uses ``FIEDEUPRANGE`` (or the :ref:`atomic file content exchanges
+<exchrange_if_unchanged>` feature) to change the target file's data extent
mapping away from the area being cleared.
When all other mappings have been moved, clearspace reflinks the space into the
space collector file so that it becomes unavailable.
diff --git a/Documentation/firmware-guide/acpi/namespace.rst b/Documentation/firmware-guide/acpi/namespace.rst
index 4ef963679a3d..5021843b526b 100644
--- a/Documentation/firmware-guide/acpi/namespace.rst
+++ b/Documentation/firmware-guide/acpi/namespace.rst
@@ -15,7 +15,7 @@ ACPI Device Tree - Representation of ACPI Namespace
Abstract
========
The Linux ACPI subsystem converts ACPI namespace objects into a Linux
-device tree under the /sys/devices/LNXSYSTEM:00 and updates it upon
+device tree under the /sys/devices/LNXSYSTM:00 and updates it upon
receiving ACPI hotplug notification events. For each device object
in this hierarchy there is a corresponding symbolic link in the
/sys/bus/acpi/devices.
@@ -326,7 +326,7 @@ example ACPI namespace illustrated in Figure 2 with the addition of
fixed PWR_BUTTON/SLP_BUTTON devices is shown below::
+--------------+---+-----------------+
- | LNXSYSTEM:00 | \ | acpi:LNXSYSTEM: |
+ | LNXSYSTM:00 | \ | acpi:LNXSYSTM: |
+--------------+---+-----------------+
|
| +-------------+-----+----------------+
diff --git a/Documentation/gpu/amdgpu/debugging.rst b/Documentation/gpu/amdgpu/debugging.rst
new file mode 100644
index 000000000000..e75f97d0e4ea
--- /dev/null
+++ b/Documentation/gpu/amdgpu/debugging.rst
@@ -0,0 +1,80 @@
+===============
+ GPU Debugging
+===============
+
+GPUVM Debugging
+===============
+
+To aid in debugging GPU virtual memory related problems, the driver supports a
+number of options module parameters:
+
+`vm_fault_stop` - If non-0, halt the GPU memory controller on a GPU page fault.
+
+`vm_update_mode` - If non-0, use the CPU to update GPU page tables rather than
+the GPU.
+
+
+Decoding a GPUVM Page Fault
+===========================
+
+If you see a GPU page fault in the kernel log, you can decode it to figure
+out what is going wrong in your application. A page fault in your kernel
+log may look something like this:
+
+::
+
+ [gfxhub0] no-retry page fault (src_id:0 ring:24 vmid:3 pasid:32777, for process glxinfo pid 2424 thread glxinfo:cs0 pid 2425)
+ in page starting at address 0x0000800102800000 from IH client 0x1b (UTCL2)
+ VM_L2_PROTECTION_FAULT_STATUS:0x00301030
+ Faulty UTCL2 client ID: TCP (0x8)
+ MORE_FAULTS: 0x0
+ WALKER_ERROR: 0x0
+ PERMISSION_FAULTS: 0x3
+ MAPPING_ERROR: 0x0
+ RW: 0x0
+
+First you have the memory hub, gfxhub and mmhub. gfxhub is the memory
+hub used for graphics, compute, and sdma on some chips. mmhub is the
+memory hub used for multi-media and sdma on some chips.
+
+Next you have the vmid and pasid. If the vmid is 0, this fault was likely
+caused by the kernel driver or firmware. If the vmid is non-0, it is generally
+a fault in a user application. The pasid is used to link a vmid to a system
+process id. If the process is active when the fault happens, the process
+information will be printed.
+
+The GPU virtual address that caused the fault comes next.
+
+The client ID indicates the GPU block that caused the fault.
+Some common client IDs:
+
+- CB/DB: The color/depth backend of the graphics pipe
+- CPF: Command Processor Frontend
+- CPC: Command Processor Compute
+- CPG: Command Processor Graphics
+- TCP/SQC/SQG: Shaders
+- SDMA: SDMA engines
+- VCN: Video encode/decode engines
+- JPEG: JPEG engines
+
+PERMISSION_FAULTS describe what faults were encountered:
+
+- bit 0: the PTE was not valid
+- bit 1: the PTE read bit was not set
+- bit 2: the PTE write bit was not set
+- bit 3: the PTE execute bit was not set
+
+Finally, RW, indicates whether the access was a read (0) or a write (1).
+
+In the example above, a shader (cliend id = TCP) generated a read (RW = 0x0) to
+an invalid page (PERMISSION_FAULTS = 0x3) at GPU virtual address
+0x0000800102800000. The user can then inspect their shader code and resource
+descriptor state to determine what caused the GPU page fault.
+
+UMR
+===
+
+`umr <https://gitlab.freedesktop.org/tomstdenis/umr>`_ is a general purpose
+GPU debugging and diagnostics tool. Please see the umr
+`documentation <https://umr.readthedocs.io/en/main/>`_ for more information
+about its capabilities.
diff --git a/Documentation/gpu/amdgpu/display/display-contributing.rst b/Documentation/gpu/amdgpu/display/display-contributing.rst
index fdb2bea01d53..36f3077eee00 100644
--- a/Documentation/gpu/amdgpu/display/display-contributing.rst
+++ b/Documentation/gpu/amdgpu/display/display-contributing.rst
@@ -135,7 +135,7 @@ Enable underlay
---------------
AMD display has this feature called underlay (which you can read more about at
-'Documentation/GPU/amdgpu/display/mpo-overview.rst') which is intended to
+'Documentation/gpu/amdgpu/display/mpo-overview.rst') which is intended to
save power when playing a video. The basic idea is to put a video in the
underlay plane at the bottom and the desktop in the plane above it with a hole
in the video area. This feature is enabled in ChromeOS, and from our data
diff --git a/Documentation/gpu/amdgpu/index.rst b/Documentation/gpu/amdgpu/index.rst
index 912e699fd373..847e04924030 100644
--- a/Documentation/gpu/amdgpu/index.rst
+++ b/Documentation/gpu/amdgpu/index.rst
@@ -15,4 +15,5 @@ Next (GCN), Radeon DNA (RDNA), and Compute DNA (CDNA) architectures.
ras
thermal
driver-misc
+ debugging
amdgpu-glossary
diff --git a/Documentation/gpu/driver-uapi.rst b/Documentation/gpu/driver-uapi.rst
index e5070a0e95ab..971cdb4816fc 100644
--- a/Documentation/gpu/driver-uapi.rst
+++ b/Documentation/gpu/driver-uapi.rst
@@ -18,6 +18,11 @@ VM_BIND / EXEC uAPI
.. kernel-doc:: include/uapi/drm/nouveau_drm.h
+drm/panthor uAPI
+================
+
+.. kernel-doc:: include/uapi/drm/panthor_drm.h
+
drm/xe uAPI
===========
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 13d3627d8bc0..abfe220764e1 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -398,6 +398,21 @@ Plane Damage Tracking Functions Reference
.. kernel-doc:: include/drm/drm_damage_helper.h
:internal:
+Plane Panic Feature
+-------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_panic.c
+ :doc: overview
+
+Plane Panic Functions Reference
+-------------------------------
+
+.. kernel-doc:: include/drm/drm_panic.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_panic.c
+ :export:
+
Display Modes Function Reference
================================
@@ -496,6 +511,13 @@ addition to the one mentioned above:
* An IGT test must be submitted where reasonable.
+For historical reasons, non-standard, driver-specific properties exist. If a KMS
+driver wants to add support for one of those properties, the requirements for
+new properties apply where possible. Additionally, the documented behavior must
+match the de facto semantics of the existing property to ensure compatibility.
+Developers of the driver that first added the property should help with those
+tasks and must ACK the documented behavior if possible.
+
Property Types and Blob Property Support
----------------------------------------
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index 0ca1550fd9dc..17261ba18313 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -204,6 +204,15 @@ DMC Firmware Support
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dmc.c
:internal:
+DMC wakelock support
+--------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dmc_wl.c
+ :doc: DMC wakelock support
+
+.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dmc_wl.c
+ :internal:
+
Video BIOS Table (VBT)
----------------------
diff --git a/Documentation/gpu/panfrost.rst b/Documentation/gpu/panfrost.rst
index b80e41f4b2c5..51ba375fd80d 100644
--- a/Documentation/gpu/panfrost.rst
+++ b/Documentation/gpu/panfrost.rst
@@ -38,3 +38,12 @@ the currently possible format options:
Possible `drm-engine-` key names are: `fragment`, and `vertex-tiler`.
`drm-curfreq-` values convey the current operating frequency for that engine.
+
+Users must bear in mind that engine and cycle sampling are disabled by default,
+because of power saving concerns. `fdinfo` users and benchmark applications which
+query the fdinfo file must make sure to toggle the job profiling status of the
+driver by writing into the appropriate sysfs node::
+
+ echo <N> > /sys/bus/platform/drivers/panfrost/[a-f0-9]*.gpu/profiling
+
+Where `N` is either `0` or `1`, depending on the desired enablement status.
diff --git a/Documentation/gpu/rfc/i915_vm_bind.h b/Documentation/gpu/rfc/i915_vm_bind.h
index 8a8fcd4fceac..bc26dc126104 100644
--- a/Documentation/gpu/rfc/i915_vm_bind.h
+++ b/Documentation/gpu/rfc/i915_vm_bind.h
@@ -93,12 +93,11 @@ struct drm_i915_gem_timeline_fence {
* Multiple VA mappings can be created to the same section of the object
* (aliasing).
*
- * The @start, @offset and @length must be 4K page aligned. However the DG2
- * and XEHPSDV has 64K page size for device local memory and has compact page
- * table. On those platforms, for binding device local-memory objects, the
- * @start, @offset and @length must be 64K aligned. Also, UMDs should not mix
- * the local memory 64K page and the system memory 4K page bindings in the same
- * 2M range.
+ * The @start, @offset and @length must be 4K page aligned. However the DG2 has
+ * 64K page size for device local memory and has compact page table. On that
+ * platform, for binding device local-memory objects, the @start, @offset and
+ * @length must be 64K aligned. Also, UMDs should not mix the local memory 64K
+ * page and the system memory 4K page bindings in the same 2M range.
*
* Error code -EINVAL will be returned if @start, @offset and @length are not
* properly aligned. In version 1 (See I915_PARAM_VM_BIND_VERSION), error code
diff --git a/Documentation/hid/hid-bpf.rst b/Documentation/hid/hid-bpf.rst
index 4fad83a6ebc3..0765b3298ecf 100644
--- a/Documentation/hid/hid-bpf.rst
+++ b/Documentation/hid/hid-bpf.rst
@@ -179,7 +179,7 @@ Available API that can be used in syscall HID-BPF programs:
-----------------------------------------------------------
.. kernel-doc:: drivers/hid/bpf/hid_bpf_dispatch.c
- :functions: hid_bpf_attach_prog hid_bpf_hw_request hid_bpf_allocate_context hid_bpf_release_context
+ :functions: hid_bpf_attach_prog hid_bpf_hw_request hid_bpf_hw_output_report hid_bpf_input_report hid_bpf_allocate_context hid_bpf_release_context
General overview of a HID-BPF program
=====================================
diff --git a/Documentation/hid/intel-ish-hid.rst b/Documentation/hid/intel-ish-hid.rst
index 42dc77b7b10f..55cbaa719a79 100644
--- a/Documentation/hid/intel-ish-hid.rst
+++ b/Documentation/hid/intel-ish-hid.rst
@@ -18,8 +18,8 @@ These ISH also comply to HID sensor specification, but the difference is the
transport protocol used for communication. The current external sensor hubs
mainly use HID over I2C or USB. But ISH doesn't use either I2C or USB.
-1. Overview
-===========
+Overview
+========
Using a analogy with a usbhid implementation, the ISH follows a similar model
for a very high speed communication::
@@ -58,8 +58,8 @@ implemented as a bus. Each client application executing in the ISH processor
is registered as a device on this bus. The driver, which binds each device
(ISH HID driver) identifies the device type and registers with the HID core.
-2. ISH Implementation: Block Diagram
-====================================
+ISH Implementation: Block Diagram
+=================================
::
@@ -96,27 +96,27 @@ is registered as a device on this bus. The driver, which binds each device
| ISH Hardware/Firmware(FW) |
----------------------------
-3. High level processing in above blocks
-========================================
+High level processing in above blocks
+=====================================
-3.1 Hardware Interface
-----------------------
+Hardware Interface
+------------------
The ISH is exposed as "Non-VGA unclassified PCI device" to the host. The PCI
product and vendor IDs are changed from different generations of processors. So
the source code which enumerates drivers needs to update from generation to
generation.
-3.2 Inter Processor Communication (IPC) driver
-----------------------------------------------
+Inter Processor Communication (IPC) driver
+------------------------------------------
Location: drivers/hid/intel-ish-hid/ipc
The IPC message uses memory mapped I/O. The registers are defined in
hw-ish-regs.h.
-3.2.1 IPC/FW message types
-^^^^^^^^^^^^^^^^^^^^^^^^^^
+IPC/FW message types
+^^^^^^^^^^^^^^^^^^^^
There are two types of messages, one for management of link and another for
messages to and from transport layers.
@@ -142,20 +142,20 @@ register has the following format::
Bit 31: doorbell trigger (signal H/W interrupt to the other side)
Other bits are reserved, should be 0.
-3.2.2 Transport layer interface
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Transport layer interface
+^^^^^^^^^^^^^^^^^^^^^^^^^
To abstract HW level IPC communication, a set of callbacks is registered.
The transport layer uses them to send and receive messages.
Refer to struct ishtp_hw_ops for callbacks.
-3.3 ISH Transport layer
------------------------
+ISH Transport layer
+-------------------
Location: drivers/hid/intel-ish-hid/ishtp/
-3.3.1 A Generic Transport Layer
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+A Generic Transport Layer
+^^^^^^^^^^^^^^^^^^^^^^^^^
The transport layer is a bi-directional protocol, which defines:
- Set of commands to start, stop, connect, disconnect and flow control
@@ -166,8 +166,8 @@ This protocol resembles bus messages described in the following document:
http://www.intel.com/content/dam/www/public/us/en/documents/technical-\
specifications/dcmi-hi-1-0-spec.pdf "Chapter 7: Bus Message Layer"
-3.3.2 Connection and Flow Control Mechanism
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Connection and Flow Control Mechanism
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Each FW client and a protocol is identified by a UUID. In order to communicate
to a FW client, a connection must be established using connect request and
@@ -181,8 +181,8 @@ before receiving the next flow control credit.
Either side can send disconnect request bus message to end communication. Also
the link will be dropped if major FW reset occurs.
-3.3.3 Peer to Peer data transfer
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Peer to Peer data transfer
+^^^^^^^^^^^^^^^^^^^^^^^^^^
Peer to Peer data transfer can happen with or without using DMA. Depending on
the sensor bandwidth requirement DMA can be enabled by using module parameter
@@ -217,8 +217,8 @@ In principle, multiple DMA_XFER and DMA_XFER_ACK messages may be sent at once
Currently, ISH FW decides to send over DMA if ISHTP message is more than 3 IPC
fragments and via IPC otherwise.
-3.3.4 Ring Buffers
-^^^^^^^^^^^^^^^^^^
+Ring Buffers
+^^^^^^^^^^^^
When a client initiates a connection, a ring of RX and TX buffers is allocated.
The size of ring can be specified by the client. HID client sets 16 and 32 for
@@ -228,8 +228,8 @@ bus message protocol. These buffers are required because the FW may have not
have processed the last message and may not have enough flow control credits
to send. Same thing holds true on receive side and flow control is required.
-3.3.5 Host Enumeration
-^^^^^^^^^^^^^^^^^^^^^^
+Host Enumeration
+^^^^^^^^^^^^^^^^
The host enumeration bus command allows discovery of clients present in the FW.
There can be multiple sensor clients and clients for calibration function.
@@ -252,8 +252,8 @@ Enumeration sequence of messages:
- Once host received properties for that last discovered client, it considers
ISHTP device fully functional (and allocates DMA buffers)
-3.4 HID over ISH Client
------------------------
+HID over ISH Client
+-------------------
Location: drivers/hid/intel-ish-hid
@@ -265,16 +265,16 @@ The ISHTP client driver is responsible for:
- Process Get/Set feature request
- Get input reports
-3.5 HID Sensor Hub MFD and IIO sensor drivers
----------------------------------------------
+HID Sensor Hub MFD and IIO sensor drivers
+-----------------------------------------
The functionality in these drivers is the same as an external sensor hub.
Refer to
Documentation/hid/hid-sensor.rst for HID sensor
Documentation/ABI/testing/sysfs-bus-iio for IIO ABIs to user space.
-3.6 End to End HID transport Sequence Diagram
----------------------------------------------
+End to End HID transport Sequence Diagram
+-----------------------------------------
::
@@ -339,16 +339,81 @@ Documentation/ABI/testing/sysfs-bus-iio for IIO ABIs to user space.
| | | |
-3.7 ISH Debugging
------------------
+ISH Firmware Loading from Host Flow
+-----------------------------------
+
+Starting from the Lunar Lake generation, the ISH firmware has been divided into two components for better space optimization and increased flexibility. These components include a bootloader that is integrated into the BIOS, and a main firmware that is stored within the operating system's file system.
+
+The process works as follows:
+
+- Initially, the ISHTP driver sends a command, HOST_START_REQ_CMD, to the ISH bootloader. In response, the bootloader sends back a HOST_START_RES_CMD. This response includes the ISHTP_SUPPORT_CAP_LOADER bit. Subsequently, the ISHTP driver checks if this bit is set. If it is, the firmware loading process from the host begins.
+
+- During this process, the ISHTP driver first invokes the request_firmware() function, followed by sending a LOADER_CMD_XFER_QUERY command. Upon receiving a response from the bootloader, the ISHTP driver sends a LOADER_CMD_XFER_FRAGMENT command. After receiving another response, the ISHTP driver sends a LOADER_CMD_START command. The bootloader responds and then proceeds to the Main Firmware.
+
+- After the process concludes, the ISHTP driver calls the release_firmware() function.
+
+For more detailed information, please refer to the flow descriptions provided below:
+
+::
+
+ +---------------+ +-----------------+
+ | ISHTP Driver | | ISH Bootloader |
+ +---------------+ +-----------------+
+ | |
+ |~~~Send HOST_START_REQ_CMD~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~>|
+ | |
+ |<--Send HOST_START_RES_CMD(Includes ISHTP_SUPPORT_CAP_LOADER bit)----|
+ | |
+ ****************************************************************************************
+ * if ISHTP_SUPPORT_CAP_LOADER bit is set *
+ ****************************************************************************************
+ | |
+ |~~~start loading firmware from host process~~~+ |
+ | | |
+ |<---------------------------------------------+ |
+ | |
+ --------------------------- |
+ | Call request_firmware() | |
+ --------------------------- |
+ | |
+ |~~~Send LOADER_CMD_XFER_QUERY~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~>|
+ | |
+ |<--Send response-----------------------------------------------------|
+ | |
+ |~~~Send LOADER_CMD_XFER_FRAGMENT~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~>|
+ | |
+ |<--Send response-----------------------------------------------------|
+ | |
+ |~~~Send LOADER_CMD_START~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~>|
+ | |
+ |<--Send response-----------------------------------------------------|
+ | |
+ | |~~~Jump to Main Firmware~~~+
+ | | |
+ | |<--------------------------+
+ | |
+ --------------------------- |
+ | Call release_firmware() | |
+ --------------------------- |
+ | |
+ ****************************************************************************************
+ * end if *
+ ****************************************************************************************
+ | |
+ +---------------+ +-----------------+
+ | ISHTP Driver | | ISH Bootloader |
+ +---------------+ +-----------------+
+
+ISH Debugging
+-------------
To debug ISH, event tracing mechanism is used. To enable debug logs::
echo 1 > /sys/kernel/tracing/events/intel_ish/enable
cat /sys/kernel/tracing/trace
-3.8 ISH IIO sysfs Example on Lenovo thinkpad Yoga 260
------------------------------------------------------
+ISH IIO sysfs Example on Lenovo thinkpad Yoga 260
+-------------------------------------------------
::
diff --git a/Documentation/hwmon/adm1275.rst b/Documentation/hwmon/adm1275.rst
index 804590eeabdc..467daf8ce3c5 100644
--- a/Documentation/hwmon/adm1275.rst
+++ b/Documentation/hwmon/adm1275.rst
@@ -43,6 +43,14 @@ Supported chips:
Datasheet: www.analog.com/static/imported-files/data_sheets/ADM1278.pdf
+ * Analog Devices ADM1281
+
+ Prefix: 'adm1281'
+
+ Addresses scanned: -
+
+ Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/adm1281.pdf
+
* Analog Devices ADM1293/ADM1294
Prefix: 'adm1293', 'adm1294'
@@ -58,10 +66,10 @@ Description
-----------
This driver supports hardware monitoring for Analog Devices ADM1075, ADM1272,
-ADM1275, ADM1276, ADM1278, ADM1293, and ADM1294 Hot-Swap Controller and
+ADM1275, ADM1276, ADM1278, ADM1281, ADM1293, and ADM1294 Hot-Swap Controller and
Digital Power Monitors.
-ADM1075, ADM1272, ADM1275, ADM1276, ADM1278, ADM1293, and ADM1294 are hot-swap
+ADM1075, ADM1272, ADM1275, ADM1276, ADM1278, ADM1281, ADM1293, and ADM1294 are hot-swap
controllers that allow a circuit board to be removed from or inserted into
a live backplane. They also feature current and voltage readback via an
integrated 12 bit analog-to-digital converter (ADC), accessed using a
@@ -144,5 +152,5 @@ temp1_highest Highest observed temperature.
temp1_reset_history Write any value to reset history.
Temperature attributes are supported on ADM1272 and
- ADM1278.
+ ADM1278, and ADM1281.
======================= =======================================================
diff --git a/Documentation/hwmon/adp1050.rst b/Documentation/hwmon/adp1050.rst
new file mode 100644
index 000000000000..8fa937064886
--- /dev/null
+++ b/Documentation/hwmon/adp1050.rst
@@ -0,0 +1,64 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver adp1050
+=====================
+
+Supported chips:
+
+ * Analog Devices ADP1050
+
+ Prefix: 'adp1050'
+
+ Addresses scanned: I2C 0x70 - 0x77
+
+ Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/ADP1050.pdf
+
+Authors:
+
+ - Radu Sabau <radu.sabau@analog.com>
+
+
+Description
+-----------
+
+This driver supprts hardware monitoring for Analog Devices ADP1050 Digital
+Controller for Isolated Power Supply with PMBus interface.
+
+The ADP1050 is an advanced digital controller with a PMBus™
+interface targeting high density, high efficiency dc-to-dc power
+conversion used to monitor system temperatures, voltages and currents.
+Through the PMBus interface, the device can monitor input/output voltages,
+input current and temperature.
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate
+the devices explicitly.
+Please see Documentation/i2c/instantiating-devices.rst for details.
+
+Platform data support
+---------------------
+
+The driver supports standard PMBus driver platform data.
+
+Sysfs Attributes
+----------------
+
+================= ========================================
+in1_label "vin"
+in1_input Measured input voltage
+in1_alarm Input voltage alarm
+in2_label "vout1"
+in2_input Measured output voltage
+in2_crit Critical maximum output voltage
+in2_crit_alarm Output voltage high alarm
+in2_lcrit Critical minimum output voltage
+in2_lcrit_alarm Output voltage critical low alarm
+curr1_label "iin"
+curr1_input Measured input current.
+curr1_alarm Input current alarm
+temp1_input Measured temperature
+temp1_crit Critical high temperature
+temp1_crit_alarm Chip temperature critical high alarm
+================= ========================================
diff --git a/Documentation/hwmon/aquacomputer_d5next.rst b/Documentation/hwmon/aquacomputer_d5next.rst
index cb073c79479c..49163f387b90 100644
--- a/Documentation/hwmon/aquacomputer_d5next.rst
+++ b/Documentation/hwmon/aquacomputer_d5next.rst
@@ -45,9 +45,9 @@ seems to require sending it a complete configuration. That includes addressable
RGB LEDs, for which there is no standard sysfs interface. Thus, that task is
better suited for userspace tools.
-The Octo exposes four physical and sixteen virtual temperature sensors, as well as
-eight PWM controllable fans, along with their speed (in RPM), power, voltage and
-current.
+The Octo exposes four physical and sixteen virtual temperature sensors, a flow sensor
+as well as eight PWM controllable fans, along with their speed (in RPM), power, voltage
+and current. Flow sensor pulses are also available.
The Quadro exposes four physical and sixteen virtual temperature sensors, a flow
sensor and four PWM controllable fans, along with their speed (in RPM), power,
@@ -95,11 +95,12 @@ Sysfs entries
================ ==============================================================
temp[1-20]_input Physical/virtual temperature sensors (in millidegrees Celsius)
temp[1-8]_offset Temperature sensor correction offset (in millidegrees Celsius)
-fan[1-8]_input Pump/fan speed (in RPM) / Flow speed (in dL/h)
+fan[1-9]_input Pump/fan speed (in RPM) / Flow speed (in dL/h)
fan1_min Minimal fan speed (in RPM)
fan1_max Maximal fan speed (in RPM)
fan1_target Target fan speed (in RPM)
fan5_pulses Quadro flow sensor pulses
+fan9_pulses Octo flow sensor pulses
power[1-8]_input Pump/fan power (in micro Watts)
in[0-7]_input Pump/fan voltage (in milli Volts)
curr[1-8]_input Pump/fan current (in milli Amperes)
diff --git a/Documentation/hwmon/emc1403.rst b/Documentation/hwmon/emc1403.rst
index 0de9616b24ed..57f833b1a800 100644
--- a/Documentation/hwmon/emc1403.rst
+++ b/Documentation/hwmon/emc1403.rst
@@ -45,6 +45,17 @@ Supported chips:
- https://ww1.microchip.com/downloads/en/DeviceDoc/1423_1424.pdf
+ * SMSC / Microchip EMC1428, EMC1438
+
+ Addresses scanned: I2C 0x18, 0x4c, 0x4d
+
+ Prefix: 'emc1428', 'emc1438'
+
+ Datasheets:
+
+ - https://ww1.microchip.com/downloads/aemDocuments/documents/OTH/ProductDocuments/DataSheets/20005275A.pdf
+ - https://ww1.microchip.com/downloads/en/DeviceDoc/EMC1438%20DS%20Rev.%201.0%20(04-29-10).pdf
+
Author:
Kalhan Trisal <kalhan.trisal@intel.com
@@ -53,10 +64,10 @@ Description
-----------
The Standard Microsystems Corporation (SMSC) / Microchip EMC14xx chips
-contain up to four temperature sensors. EMC14x2 support two sensors
+contain up to eight temperature sensors. EMC14x2 support two sensors
(one internal, one external). EMC14x3 support three sensors (one internal,
-two external), and EMC14x4 support four sensors (one internal, three
-external).
+two external), EMC14x4 support four sensors (one internal, three external),
+and EMC14x8 support eight sensors (one internal, seven external).
The chips implement three limits for each sensor: low (tempX_min), high
(tempX_max) and critical (tempX_crit.) The chips also implement an
diff --git a/Documentation/hwmon/index.rst b/Documentation/hwmon/index.rst
index 1ca7a4fe1f8f..03d313af469a 100644
--- a/Documentation/hwmon/index.rst
+++ b/Documentation/hwmon/index.rst
@@ -33,6 +33,7 @@ Hardware Monitoring Kernel Drivers
adm1266
adm1275
adm9240
+ adp1050
ads7828
adt7410
adt7411
@@ -250,6 +251,7 @@ Hardware Monitoring Kernel Drivers
wm831x
wm8350
xgene-hwmon
+ xdp710
xdpe12284
xdpe152c4
zl6100
diff --git a/Documentation/hwmon/lm70.rst b/Documentation/hwmon/lm70.rst
index 11303a7e16a8..02ed60dddffb 100644
--- a/Documentation/hwmon/lm70.rst
+++ b/Documentation/hwmon/lm70.rst
@@ -5,7 +5,7 @@ Supported chips:
* National Semiconductor LM70
- Datasheet: http://www.national.com/pf/LM/LM70.html
+ Datasheet: https://www.ti.com/product/LM70
* Texas Instruments TMP121/TMP123
diff --git a/Documentation/hwmon/nzxt-kraken3.rst b/Documentation/hwmon/nzxt-kraken3.rst
index 90fd9dec15ff..57fe99d23301 100644
--- a/Documentation/hwmon/nzxt-kraken3.rst
+++ b/Documentation/hwmon/nzxt-kraken3.rst
@@ -11,17 +11,20 @@ Supported devices:
* NZXT Kraken Z53
* NZXT Kraken Z63
* NZXT Kraken Z73
+* NZXT Kraken 2023
+* NZXT Kraken 2023 Elite
Author: Jonas Malaco, Aleksa Savic
Description
-----------
-This driver enables hardware monitoring support for NZXT Kraken X53/X63/X73 and
-Z53/Z63/Z73 all-in-one CPU liquid coolers. All models expose liquid temperature
-and pump speed (in RPM), as well as PWM control (either as a fixed value
-or through a temp-PWM curve). The Z-series models additionally expose the speed
-and duty of an optionally connected fan, with the same PWM control capabilities.
+This driver enables hardware monitoring support for NZXT Kraken X53/X63/X73,
+Z53/Z63/Z73 and Kraken 2023 (standard and Elite) all-in-one CPU liquid coolers.
+All models expose liquid temperature and pump speed (in RPM), as well as PWM
+control (either as a fixed value or through a temp-PWM curve). The Z-series and
+Kraken 2023 models additionally expose the speed and duty of an optionally connected
+fan, with the same PWM control capabilities.
Pump and fan duty control mode can be set through pwm[1-2]_enable, where 1 is
for the manual control mode and 2 is for the liquid temp to PWM curve mode.
@@ -39,9 +42,9 @@ The devices can report if they are faulty. The driver supports that situation
and will issue a warning. This can also happen when the USB cable is connected,
but SATA power is not.
-The addressable RGB LEDs and LCD screen (only on Z-series models) are not
-supported in this driver, but can be controlled through existing userspace tools,
-such as `liquidctl`_.
+The addressable RGB LEDs and LCD screen (only on Z-series and Kraken 2023 models)
+are not supported in this driver, but can be controlled through existing userspace
+tools, such as `liquidctl`_.
.. _liquidctl: https://github.com/liquidctl/liquidctl
diff --git a/Documentation/hwmon/pmbus.rst b/Documentation/hwmon/pmbus.rst
index eb1569bfa676..d477124cf67f 100644
--- a/Documentation/hwmon/pmbus.rst
+++ b/Documentation/hwmon/pmbus.rst
@@ -152,7 +152,7 @@ Emerson DS1200 power modules might look as follows::
}
static const struct i2c_device_id ds1200_id[] = {
- {"ds1200", 0},
+ {"ds1200"},
{}
};
diff --git a/Documentation/hwmon/xdp710.rst b/Documentation/hwmon/xdp710.rst
new file mode 100644
index 000000000000..083891f27818
--- /dev/null
+++ b/Documentation/hwmon/xdp710.rst
@@ -0,0 +1,83 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver xdp710
+====================
+
+Supported chips:
+
+ * Infineon XDP710
+
+ Prefix: 'xdp710'
+
+ * Datasheet
+
+ Publicly available at the Infineon website : https://www.infineon.com/dgdl/Infineon-XDP710-001-DataSheet-v01_00-EN.pdf?fileId=8ac78c8c8412f8d301848a5316290b97
+
+Author:
+
+ Peter Yin <peteryin.openbmc@gmail.com>
+
+Description
+-----------
+
+This driver implements support for Infineon XDP710 Hot-Swap Controller.
+
+Device compliant with:
+
+- PMBus rev 1.3 interface.
+
+Device supports direct and linear format for reading input voltage,
+output voltage, output current, input power and temperature.
+
+The driver exports the following attributes via the 'sysfs' files
+for input voltage:
+
+**in1_input**
+
+**in1_label**
+
+**in1_max**
+
+**in1_max_alarm**
+
+**in1_min**
+
+**in1_min_alarm**
+
+The driver provides the following attributes for output voltage:
+
+**in2_input**
+
+**in2_label**
+
+**in2_alarm**
+
+The driver provides the following attributes for output current:
+
+**curr1_input**
+
+**curr1_label**
+
+**curr1_alarm**
+
+**curr1_max**
+
+The driver provides the following attributes for input power:
+
+**power1_input**
+
+**power1_label**
+
+**power1_alarm**
+
+The driver provides the following attributes for temperature:
+
+**temp1_input**
+
+**temp1_max**
+
+**temp1_max_alarm**
+
+**temp1_crit**
+
+**temp1_crit_alarm**
diff --git a/Documentation/index.rst b/Documentation/index.rst
index 5298611e00ee..f9f525f4c0dd 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -107,7 +107,7 @@ Other documentation
There are several unsorted documents that don't seem to fit on other parts
of the documentation body, or may require some adjustments and/or conversion
-to ReStructured Text format, or are simply too old.
+to reStructuredText format, or are simply too old.
.. toctree::
:maxdepth: 1
diff --git a/Documentation/kbuild/kconfig-language.rst b/Documentation/kbuild/kconfig-language.rst
index 79ac2e8184f6..555c2f839969 100644
--- a/Documentation/kbuild/kconfig-language.rst
+++ b/Documentation/kbuild/kconfig-language.rst
@@ -410,9 +410,6 @@ to be set to 'm'. This can be used if multiple drivers for a single
hardware exists and only a single driver can be compiled/loaded into
the kernel, but all drivers can be compiled as modules.
-A choice accepts another option "optional", which allows to set the
-choice to 'n' and no entry needs to be selected.
-
comment::
"comment" <prompt>
diff --git a/Documentation/kbuild/llvm.rst b/Documentation/kbuild/llvm.rst
index b1d97fafddcf..bb5c44f8bd1c 100644
--- a/Documentation/kbuild/llvm.rst
+++ b/Documentation/kbuild/llvm.rst
@@ -178,7 +178,7 @@ yet. Bug reports are always welcome at the issue tracker below!
- ``LLVM=1``
* - s390
- Maintained
- - ``CC=clang``
+ - ``LLVM=1`` (LLVM >= 18.1.0), ``CC=clang`` (LLVM < 18.1.0)
* - um (User Mode)
- Maintained
- ``LLVM=1``
diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst
index ad118b7a1806..991ce6081e35 100644
--- a/Documentation/kbuild/makefiles.rst
+++ b/Documentation/kbuild/makefiles.rst
@@ -346,7 +346,7 @@ ccflags-y, asflags-y and ldflags-y
Example::
#arch/cris/boot/compressed/Makefile
- ldflags-y += -T $(srctree)/$(src)/decompress_$(arch-y).lds
+ ldflags-y += -T $(src)/decompress_$(arch-y).lds
subdir-ccflags-y, subdir-asflags-y
The two flags listed above are similar to ccflags-y and asflags-y.
@@ -426,14 +426,14 @@ path to prerequisite files and target files.
Two variables are used when defining custom rules:
$(src)
- $(src) is a relative path which points to the directory
- where the Makefile is located. Always use $(src) when
+ $(src) is the directory where the Makefile is located. Always use $(src) when
referring to files located in the src tree.
$(obj)
- $(obj) is a relative path which points to the directory
- where the target is saved. Always use $(obj) when
- referring to generated files.
+ $(obj) is the directory where the target is saved. Always use $(obj) when
+ referring to generated files. Use $(obj) for pattern rules that need to work
+ for both generated files and real sources (VPATH will help to find the
+ prerequisites not only in the object tree but also in the source tree).
Example::
diff --git a/Documentation/litmus-tests/README b/Documentation/litmus-tests/README
index 658d37860d39..6c666f3422ea 100644
--- a/Documentation/litmus-tests/README
+++ b/Documentation/litmus-tests/README
@@ -21,6 +21,51 @@ Atomic-RMW-ops-are-atomic-WRT-atomic_set.litmus
Test that atomic_set() cannot break the atomicity of atomic RMWs.
NOTE: Require herd7 7.56 or later which supports "(void)expr".
+cmpxchg-fail-ordered-1.litmus
+ Demonstrate that a failing cmpxchg() operation acts as a full barrier
+ when followed by smp_mb__after_atomic().
+
+cmpxchg-fail-ordered-2.litmus
+ Demonstrate that a failing cmpxchg() operation acts as an acquire
+ operation when followed by smp_mb__after_atomic().
+
+cmpxchg-fail-unordered-1.litmus
+ Demonstrate that a failing cmpxchg() operation does not act as a
+ full barrier.
+
+cmpxchg-fail-unordered-2.litmus
+ Demonstrate that a failing cmpxchg() operation does not act as an
+ acquire operation.
+
+
+locking (/locking directory)
+----------------------------
+
+DCL-broken.litmus
+ Demonstrates that double-checked locking needs more than just
+ the obvious lock acquisitions and releases.
+
+DCL-fixed.litmus
+ Demonstrates corrected double-checked locking that uses
+ smp_store_release() and smp_load_acquire() in addition to the
+ obvious lock acquisitions and releases.
+
+RM-broken.litmus
+ Demonstrates problems with "roach motel" locking, where code is
+ freely moved into lock-based critical sections. This example also
+ shows how to use the "filter" clause to discard executions that
+ would be excluded by other code not modeled in the litmus test.
+ Note also that this "roach motel" optimization is emulated by
+ physically moving P1()'s two reads from x under the lock.
+
+ What is a roach motel? This is from an old advertisement for
+ a cockroach trap, much later featured in one of the "Men in
+ Black" movies. "The roaches check in. They don't check out."
+
+RM-fixed.litmus
+ The counterpart to RM-broken.litmus, showing P0()'s two loads from
+ x safely outside of the critical section.
+
RCU (/rcu directory)
--------------------
diff --git a/Documentation/litmus-tests/atomic/cmpxchg-fail-ordered-1.litmus b/Documentation/litmus-tests/atomic/cmpxchg-fail-ordered-1.litmus
new file mode 100644
index 000000000000..c0f93dc07105
--- /dev/null
+++ b/Documentation/litmus-tests/atomic/cmpxchg-fail-ordered-1.litmus
@@ -0,0 +1,35 @@
+C cmpxchg-fail-ordered-1
+
+(*
+ * Result: Never
+ *
+ * Demonstrate that a failing cmpxchg() operation will act as a full
+ * barrier when followed by smp_mb__after_atomic().
+ *)
+
+{}
+
+P0(int *x, int *y, int *z)
+{
+ int r0;
+ int r1;
+
+ WRITE_ONCE(*x, 1);
+ r1 = cmpxchg(z, 1, 0);
+ smp_mb__after_atomic();
+ r0 = READ_ONCE(*y);
+}
+
+P1(int *x, int *y, int *z)
+{
+ int r0;
+ int r1;
+
+ WRITE_ONCE(*y, 1);
+ r1 = cmpxchg(z, 1, 0);
+ smp_mb__after_atomic();
+ r0 = READ_ONCE(*x);
+}
+
+locations[0:r1;1:r1]
+exists (0:r0=0 /\ 1:r0=0)
diff --git a/Documentation/litmus-tests/atomic/cmpxchg-fail-ordered-2.litmus b/Documentation/litmus-tests/atomic/cmpxchg-fail-ordered-2.litmus
new file mode 100644
index 000000000000..5c06054f4694
--- /dev/null
+++ b/Documentation/litmus-tests/atomic/cmpxchg-fail-ordered-2.litmus
@@ -0,0 +1,30 @@
+C cmpxchg-fail-ordered-2
+
+(*
+ * Result: Never
+ *
+ * Demonstrate use of smp_mb__after_atomic() to make a failing cmpxchg
+ * operation have acquire ordering.
+ *)
+
+{}
+
+P0(int *x, int *y)
+{
+ int r1;
+
+ WRITE_ONCE(*x, 1);
+ r1 = cmpxchg(y, 0, 1);
+}
+
+P1(int *x, int *y)
+{
+ int r1;
+ int r2;
+
+ r1 = cmpxchg(y, 0, 1);
+ smp_mb__after_atomic();
+ r2 = READ_ONCE(*x);
+}
+
+exists (0:r1=0 /\ 1:r1=1 /\ 1:r2=0)
diff --git a/Documentation/litmus-tests/atomic/cmpxchg-fail-unordered-1.litmus b/Documentation/litmus-tests/atomic/cmpxchg-fail-unordered-1.litmus
new file mode 100644
index 000000000000..39ea1f56a28d
--- /dev/null
+++ b/Documentation/litmus-tests/atomic/cmpxchg-fail-unordered-1.litmus
@@ -0,0 +1,34 @@
+C cmpxchg-fail-unordered-1
+
+(*
+ * Result: Sometimes
+ *
+ * Demonstrate that a failing cmpxchg() operation does not act as a
+ * full barrier. (In contrast, a successful cmpxchg() does act as a
+ * full barrier.)
+ *)
+
+{}
+
+P0(int *x, int *y, int *z)
+{
+ int r0;
+ int r1;
+
+ WRITE_ONCE(*x, 1);
+ r1 = cmpxchg(z, 1, 0);
+ r0 = READ_ONCE(*y);
+}
+
+P1(int *x, int *y, int *z)
+{
+ int r0;
+ int r1;
+
+ WRITE_ONCE(*y, 1);
+ r1 = cmpxchg(z, 1, 0);
+ r0 = READ_ONCE(*x);
+}
+
+locations[0:r1;1:r1]
+exists (0:r0=0 /\ 1:r0=0)
diff --git a/Documentation/litmus-tests/atomic/cmpxchg-fail-unordered-2.litmus b/Documentation/litmus-tests/atomic/cmpxchg-fail-unordered-2.litmus
new file mode 100644
index 000000000000..61aab24a4a64
--- /dev/null
+++ b/Documentation/litmus-tests/atomic/cmpxchg-fail-unordered-2.litmus
@@ -0,0 +1,30 @@
+C cmpxchg-fail-unordered-2
+
+(*
+ * Result: Sometimes
+ *
+ * Demonstrate that a failing cmpxchg() operation does not act as either
+ * an acquire release operation. (In contrast, a successful cmpxchg()
+ * does act as both an acquire and a release operation.)
+ *)
+
+{}
+
+P0(int *x, int *y)
+{
+ int r1;
+
+ WRITE_ONCE(*x, 1);
+ r1 = cmpxchg(y, 0, 1);
+}
+
+P1(int *x, int *y)
+{
+ int r1;
+ int r2;
+
+ r1 = cmpxchg(y, 0, 1);
+ r2 = READ_ONCE(*x);
+}
+
+exists (0:r1=0 /\ 1:r1=1 /\ 1:r2=0)
diff --git a/Documentation/mm/allocation-profiling.rst b/Documentation/mm/allocation-profiling.rst
new file mode 100644
index 000000000000..d3b733b41ae6
--- /dev/null
+++ b/Documentation/mm/allocation-profiling.rst
@@ -0,0 +1,100 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===========================
+MEMORY ALLOCATION PROFILING
+===========================
+
+Low overhead (suitable for production) accounting of all memory allocations,
+tracked by file and line number.
+
+Usage:
+kconfig options:
+- CONFIG_MEM_ALLOC_PROFILING
+
+- CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT
+
+- CONFIG_MEM_ALLOC_PROFILING_DEBUG
+ adds warnings for allocations that weren't accounted because of a
+ missing annotation
+
+Boot parameter:
+ sysctl.vm.mem_profiling=0|1|never
+
+ When set to "never", memory allocation profiling overhead is minimized and it
+ cannot be enabled at runtime (sysctl becomes read-only).
+ When CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT=y, default value is "1".
+ When CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT=n, default value is "never".
+
+sysctl:
+ /proc/sys/vm/mem_profiling
+
+Runtime info:
+ /proc/allocinfo
+
+Example output::
+
+ root@moria-kvm:~# sort -g /proc/allocinfo|tail|numfmt --to=iec
+ 2.8M 22648 fs/kernfs/dir.c:615 func:__kernfs_new_node
+ 3.8M 953 mm/memory.c:4214 func:alloc_anon_folio
+ 4.0M 1010 drivers/staging/ctagmod/ctagmod.c:20 [ctagmod] func:ctagmod_start
+ 4.1M 4 net/netfilter/nf_conntrack_core.c:2567 func:nf_ct_alloc_hashtable
+ 6.0M 1532 mm/filemap.c:1919 func:__filemap_get_folio
+ 8.8M 2785 kernel/fork.c:307 func:alloc_thread_stack_node
+ 13M 234 block/blk-mq.c:3421 func:blk_mq_alloc_rqs
+ 14M 3520 mm/mm_init.c:2530 func:alloc_large_system_hash
+ 15M 3656 mm/readahead.c:247 func:page_cache_ra_unbounded
+ 55M 4887 mm/slub.c:2259 func:alloc_slab_page
+ 122M 31168 mm/page_ext.c:270 func:alloc_page_ext
+
+===================
+Theory of operation
+===================
+
+Memory allocation profiling builds off of code tagging, which is a library for
+declaring static structs (that typically describe a file and line number in
+some way, hence code tagging) and then finding and operating on them at runtime,
+- i.e. iterating over them to print them in debugfs/procfs.
+
+To add accounting for an allocation call, we replace it with a macro
+invocation, alloc_hooks(), that
+- declares a code tag
+- stashes a pointer to it in task_struct
+- calls the real allocation function
+- and finally, restores the task_struct alloc tag pointer to its previous value.
+
+This allows for alloc_hooks() calls to be nested, with the most recent one
+taking effect. This is important for allocations internal to the mm/ code that
+do not properly belong to the outer allocation context and should be counted
+separately: for example, slab object extension vectors, or when the slab
+allocates pages from the page allocator.
+
+Thus, proper usage requires determining which function in an allocation call
+stack should be tagged. There are many helper functions that essentially wrap
+e.g. kmalloc() and do a little more work, then are called in multiple places;
+we'll generally want the accounting to happen in the callers of these helpers,
+not in the helpers themselves.
+
+To fix up a given helper, for example foo(), do the following:
+- switch its allocation call to the _noprof() version, e.g. kmalloc_noprof()
+
+- rename it to foo_noprof()
+
+- define a macro version of foo() like so:
+
+ #define foo(...) alloc_hooks(foo_noprof(__VA_ARGS__))
+
+It's also possible to stash a pointer to an alloc tag in your own data structures.
+
+Do this when you're implementing a generic data structure that does allocations
+"on behalf of" some other code - for example, the rhashtable code. This way,
+instead of seeing a large line in /proc/allocinfo for rhashtable.c, we can
+break it out by rhashtable type.
+
+To do so:
+- Hook your data structure's init function, like any other allocation function.
+
+- Within your init function, use the convenience macro alloc_tag_record() to
+ record alloc tag in your data structure.
+
+- Then, use the following form for your allocations:
+ alloc_hooks_tag(ht->your_saved_tag, kmalloc_noprof(...))
diff --git a/Documentation/mm/arch_pgtable_helpers.rst b/Documentation/mm/arch_pgtable_helpers.rst
index 2466d3363af7..ad50ca6f495e 100644
--- a/Documentation/mm/arch_pgtable_helpers.rst
+++ b/Documentation/mm/arch_pgtable_helpers.rst
@@ -140,7 +140,8 @@ PMD Page Table Helpers
+---------------------------+--------------------------------------------------+
| pmd_swp_clear_soft_dirty | Clears a soft dirty swapped PMD |
+---------------------------+--------------------------------------------------+
-| pmd_mkinvalid | Invalidates a mapped PMD [1] |
+| pmd_mkinvalid | Invalidates a present PMD; do not call for |
+| | non-present PMD [1] |
+---------------------------+--------------------------------------------------+
| pmd_set_huge | Creates a PMD huge mapping |
+---------------------------+--------------------------------------------------+
@@ -196,7 +197,8 @@ PUD Page Table Helpers
+---------------------------+--------------------------------------------------+
| pud_mkdevmap | Creates a ZONE_DEVICE mapped PUD |
+---------------------------+--------------------------------------------------+
-| pud_mkinvalid | Invalidates a mapped PUD [1] |
+| pud_mkinvalid | Invalidates a present PUD; do not call for |
+| | non-present PUD [1] |
+---------------------------+--------------------------------------------------+
| pud_set_huge | Creates a PUD huge mapping |
+---------------------------+--------------------------------------------------+
diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst
index 5620aab9b385..3df387249937 100644
--- a/Documentation/mm/damon/design.rst
+++ b/Documentation/mm/damon/design.rst
@@ -461,24 +461,32 @@ number of filters for each scheme. Each filter specifies the type of target
memory, and whether it should exclude the memory of the type (filter-out), or
all except the memory of the type (filter-in).
-Currently, anonymous page, memory cgroup, address range, and DAMON monitoring
-target type filters are supported by the feature. Some filter target types
-require additional arguments. The memory cgroup filter type asks users to
-specify the file path of the memory cgroup for the filter. The address range
-type asks the start and end addresses of the range. The DAMON monitoring
-target type asks the index of the target from the context's monitoring targets
-list. Hence, users can apply specific schemes to only anonymous pages,
-non-anonymous pages, pages of specific cgroups, all pages excluding those of
-specific cgroups, pages in specific address range, pages in specific DAMON
-monitoring targets, and any combination of those.
-
-To handle filters efficiently, the address range and DAMON monitoring target
-type filters are handled by the core layer, while others are handled by
-operations set. If a memory region is filtered by a core layer-handled filter,
-it is not counted as the scheme has tried to the region. In contrast, if a
-memory regions is filtered by an operations set layer-handled filter, it is
-counted as the scheme has tried. The difference in accounting leads to changes
-in the statistics.
+For efficient handling of filters, some types of filters are handled by the
+core layer, while others are handled by operations set. In the latter case,
+hence, support of the filter types depends on the DAMON operations set. In
+case of the core layer-handled filters, the memory regions that excluded by the
+filter are not counted as the scheme has tried to the region. In contrast, if
+a memory regions is filtered by an operations set layer-handled filter, it is
+counted as the scheme has tried. This difference affects the statistics.
+
+Below types of filters are currently supported.
+
+- anonymous page
+ - Applied to pages that containing data that not stored in files.
+ - Handled by operations set layer. Supported by only ``paddr`` set.
+- memory cgroup
+ - Applied to pages that belonging to a given cgroup.
+ - Handled by operations set layer. Supported by only ``paddr`` set.
+- young page
+ - Applied to pages that are accessed after the last access check from the
+ scheme.
+ - Handled by operations set layer. Supported by only ``paddr`` set.
+- address range
+ - Applied to pages that belonging to a given address range.
+ - Handled by the core logic.
+- DAMON monitoring target
+ - Applied to pages that belonging to a given DAMON monitoring target.
+ - Handled by the core logic.
Application Programming Interface
diff --git a/Documentation/mm/damon/maintainer-profile.rst b/Documentation/mm/damon/maintainer-profile.rst
index 5a306e4de22e..8213cf61d38a 100644
--- a/Documentation/mm/damon/maintainer-profile.rst
+++ b/Documentation/mm/damon/maintainer-profile.rst
@@ -20,9 +20,10 @@ management subsystem maintainer. After more sufficient tests, the patches will
be queued in mm-stable [3]_ , and finally pull-requested to the mainline by the
memory management subsystem maintainer.
-Note again the patches for review should be made against the mm-unstable
-tree [1]_ whenever possible. damon/next is only for preview of others' works
-in progress.
+Note again the patches for mm-unstable tree [1]_ are queued by the memory
+management subsystem maintainer. If the patches requires some patches in
+damon/next tree [2]_ which not yet merged in mm-unstable, please make sure the
+requirement is clearly specified.
Submit checklist addendum
-------------------------
@@ -48,9 +49,9 @@ Review cadence
--------------
The DAMON maintainer does the work on the usual work hour (09:00 to 17:00,
-Mon-Fri) in PST. The response to patches will occasionally be slow. Do not
-hesitate to send a ping if you have not heard back within a week of sending a
-patch.
+Mon-Fri) in PT (Pacific Time). The response to patches will occasionally be
+slow. Do not hesitate to send a ping if you have not heard back within a week
+of sending a patch.
.. [1] https://git.kernel.org/akpm/mm/h/mm-unstable
diff --git a/Documentation/mm/index.rst b/Documentation/mm/index.rst
index 31d2ac306438..48b9b559ca7b 100644
--- a/Documentation/mm/index.rst
+++ b/Documentation/mm/index.rst
@@ -26,6 +26,7 @@ see the :doc:`admin guide <../admin-guide/mm/index>`.
page_cache
shmfs
oom
+ allocation-profiling
Legacy Documentation
====================
diff --git a/Documentation/mm/page_frags.rst b/Documentation/mm/page_frags.rst
index a81617e688a8..503ca6cdb804 100644
--- a/Documentation/mm/page_frags.rst
+++ b/Documentation/mm/page_frags.rst
@@ -25,7 +25,7 @@ to be disabled when executing the fragment allocation.
The network stack uses two separate caches per CPU to handle fragment
allocation. The netdev_alloc_cache is used by callers making use of the
netdev_alloc_frag and __netdev_alloc_skb calls. The napi_alloc_cache is
-used by callers of the __napi_alloc_frag and __napi_alloc_skb calls. The
+used by callers of the __napi_alloc_frag and napi_alloc_skb calls. The
main difference between these two calls is the context in which they may be
called. The "netdev" prefixed functions are usable in any context as these
functions will disable interrupts, while the "napi" prefixed functions are
diff --git a/Documentation/mm/page_owner.rst b/Documentation/mm/page_owner.rst
index 0d0334cd5179..3a45a20fc05a 100644
--- a/Documentation/mm/page_owner.rst
+++ b/Documentation/mm/page_owner.rst
@@ -24,10 +24,10 @@ fragmentation statistics can be obtained through gfp flag information of
each page. It is already implemented and activated if page owner is
enabled. Other usages are more than welcome.
-It can also be used to show all the stacks and their outstanding
-allocations, which gives us a quick overview of where the memory is going
-without the need to screen through all the pages and match the allocation
-and free operation.
+It can also be used to show all the stacks and their current number of
+allocated base pages, which gives us a quick overview of where the memory
+is going without the need to screen through all the pages and match the
+allocation and free operation.
page owner is disabled by default. So, if you'd like to use it, you need
to add "page_owner=on" to your boot cmdline. If the kernel is built
@@ -75,42 +75,45 @@ Usage
cat /sys/kernel/debug/page_owner_stacks/show_stacks > stacks.txt
cat stacks.txt
- prep_new_page+0xa9/0x120
- get_page_from_freelist+0x7e6/0x2140
- __alloc_pages+0x18a/0x370
- new_slab+0xc8/0x580
- ___slab_alloc+0x1f2/0xaf0
- __slab_alloc.isra.86+0x22/0x40
- kmem_cache_alloc+0x31b/0x350
- __khugepaged_enter+0x39/0x100
- dup_mmap+0x1c7/0x5ce
- copy_process+0x1afe/0x1c90
- kernel_clone+0x9a/0x3c0
- __do_sys_clone+0x66/0x90
- do_syscall_64+0x7f/0x160
- entry_SYSCALL_64_after_hwframe+0x6c/0x74
- stack_count: 234
+ post_alloc_hook+0x177/0x1a0
+ get_page_from_freelist+0xd01/0xd80
+ __alloc_pages+0x39e/0x7e0
+ allocate_slab+0xbc/0x3f0
+ ___slab_alloc+0x528/0x8a0
+ kmem_cache_alloc+0x224/0x3b0
+ sk_prot_alloc+0x58/0x1a0
+ sk_alloc+0x32/0x4f0
+ inet_create+0x427/0xb50
+ __sock_create+0x2e4/0x650
+ inet_ctl_sock_create+0x30/0x180
+ igmp_net_init+0xc1/0x130
+ ops_init+0x167/0x410
+ setup_net+0x304/0xa60
+ copy_net_ns+0x29b/0x4a0
+ create_new_namespaces+0x4a1/0x820
+ nr_base_pages: 16
...
...
echo 7000 > /sys/kernel/debug/page_owner_stacks/count_threshold
cat /sys/kernel/debug/page_owner_stacks/show_stacks> stacks_7000.txt
cat stacks_7000.txt
- prep_new_page+0xa9/0x120
- get_page_from_freelist+0x7e6/0x2140
- __alloc_pages+0x18a/0x370
- alloc_pages_mpol+0xdf/0x1e0
- folio_alloc+0x14/0x50
- filemap_alloc_folio+0xb0/0x100
- page_cache_ra_unbounded+0x97/0x180
- filemap_fault+0x4b4/0x1200
- __do_fault+0x2d/0x110
- do_pte_missing+0x4b0/0xa30
- __handle_mm_fault+0x7fa/0xb70
- handle_mm_fault+0x125/0x300
- do_user_addr_fault+0x3c9/0x840
- exc_page_fault+0x68/0x150
- asm_exc_page_fault+0x22/0x30
- stack_count: 8248
+ post_alloc_hook+0x177/0x1a0
+ get_page_from_freelist+0xd01/0xd80
+ __alloc_pages+0x39e/0x7e0
+ alloc_pages_mpol+0x22e/0x490
+ folio_alloc+0xd5/0x110
+ filemap_alloc_folio+0x78/0x230
+ page_cache_ra_order+0x287/0x6f0
+ filemap_get_pages+0x517/0x1160
+ filemap_read+0x304/0x9f0
+ xfs_file_buffered_read+0xe6/0x1d0 [xfs]
+ xfs_file_read_iter+0x1f0/0x380 [xfs]
+ __kernel_read+0x3b9/0x730
+ kernel_read_file+0x309/0x4d0
+ __do_sys_finit_module+0x381/0x730
+ do_syscall_64+0x8d/0x150
+ entry_SYSCALL_64_after_hwframe+0x62/0x6a
+ nr_base_pages: 20824
...
cat /sys/kernel/debug/page_owner > page_owner_full.txt
diff --git a/Documentation/mm/page_table_check.rst b/Documentation/mm/page_table_check.rst
index c12838ce6b8d..c59f22eb6a0f 100644
--- a/Documentation/mm/page_table_check.rst
+++ b/Documentation/mm/page_table_check.rst
@@ -14,7 +14,7 @@ Page table check performs extra verifications at the time when new pages become
accessible from the userspace by getting their page table entries (PTEs PMDs
etc.) added into the table.
-In case of detected corruption, the kernel is crashed. There is a small
+In case of most detected corruption, the kernel is crashed. There is a small
performance and memory overhead associated with the page table check. Therefore,
it is disabled by default, but can be optionally enabled on systems where the
extra hardening outweighs the performance costs. Also, because page table check
@@ -22,6 +22,13 @@ is synchronous, it can help with debugging double map memory corruption issues,
by crashing kernel at the time wrong mapping occurs instead of later which is
often the case with memory corruptions bugs.
+It can also be used to do page table entry checks over various flags, dump
+warnings when illegal combinations of entry flags are detected. Currently,
+userfaultfd is the only user of such to sanity check wr-protect bit against
+any writable flags. Illegal flag combinations will not directly cause data
+corruption in this case immediately, but that will cause read-only data to
+be writable, leading to corrupt when the page content is later modified.
+
Double mapping detection logic
==============================
diff --git a/Documentation/mm/slub.rst b/Documentation/mm/slub.rst
index b517ee28a955..60d350d08362 100644
--- a/Documentation/mm/slub.rst
+++ b/Documentation/mm/slub.rst
@@ -80,7 +80,7 @@ to the dentry cache with::
Debugging options may require the minimum possible slab order to increase as
a result of storing the metadata (for example, caches with PAGE_SIZE object
-sizes). This has a higher liklihood of resulting in slab allocation errors
+sizes). This has a higher likelihood of resulting in slab allocation errors
in low memory situations or if there's high fragmentation of memory. To
switch off debugging for such caches by default, use::
diff --git a/Documentation/mm/transhuge.rst b/Documentation/mm/transhuge.rst
index 93c9239b9ebe..1ba0ad63246c 100644
--- a/Documentation/mm/transhuge.rst
+++ b/Documentation/mm/transhuge.rst
@@ -116,14 +116,14 @@ pages:
succeeds on tail pages.
- map/unmap of a PMD entry for the whole THP increment/decrement
- folio->_entire_mapcount and also increment/decrement
- folio->_nr_pages_mapped by ENTIRELY_MAPPED when _entire_mapcount
- goes from -1 to 0 or 0 to -1.
+ folio->_entire_mapcount, increment/decrement folio->_large_mapcount
+ and also increment/decrement folio->_nr_pages_mapped by ENTIRELY_MAPPED
+ when _entire_mapcount goes from -1 to 0 or 0 to -1.
- map/unmap of individual pages with PTE entry increment/decrement
- page->_mapcount and also increment/decrement folio->_nr_pages_mapped
- when page->_mapcount goes from -1 to 0 or 0 to -1 as this counts
- the number of pages mapped by PTE.
+ page->_mapcount, increment/decrement folio->_large_mapcount and also
+ increment/decrement folio->_nr_pages_mapped when page->_mapcount goes
+ from -1 to 0 or 0 to -1 as this counts the number of pages mapped by PTE.
split_huge_page internally has to distribute the refcounts in the head
page to the tail pages before clearing all PG_head/tail bits from the page
diff --git a/Documentation/mm/vmemmap_dedup.rst b/Documentation/mm/vmemmap_dedup.rst
index 593ede6d314b..b4a55b6569fa 100644
--- a/Documentation/mm/vmemmap_dedup.rst
+++ b/Documentation/mm/vmemmap_dedup.rst
@@ -180,27 +180,7 @@ this correctly. There is only **one** head ``struct page``, the tail
``struct page`` with ``PG_head`` are fake head ``struct page``. We need an
approach to distinguish between those two different types of ``struct page`` so
that ``compound_head()`` can return the real head ``struct page`` when the
-parameter is the tail ``struct page`` but with ``PG_head``. The following code
-snippet describes how to distinguish between real and fake head ``struct page``.
-
-.. code-block:: c
-
- if (test_bit(PG_head, &page->flags)) {
- unsigned long head = READ_ONCE(page[1].compound_head);
-
- if (head & 1) {
- if (head == (unsigned long)page + 1)
- /* head struct page */
- else
- /* tail struct page */
- } else {
- /* head struct page */
- }
- }
-
-We can safely access the field of the **page[1]** with ``PG_head`` because the
-page is a compound page composed with at least two contiguous pages.
-The implementation refers to ``page_fixed_fake_head()``.
+parameter is the tail ``struct page`` but with ``PG_head``.
Device DAX
==========
diff --git a/Documentation/netlink/genetlink-c.yaml b/Documentation/netlink/genetlink-c.yaml
index 4dfd899a1661..4f803eaac6d8 100644
--- a/Documentation/netlink/genetlink-c.yaml
+++ b/Documentation/netlink/genetlink-c.yaml
@@ -158,7 +158,7 @@ properties:
type: &attr-type
enum: [ unused, pad, flag, binary,
uint, sint, u8, u16, u32, u64, s32, s64,
- string, nest, array-nest, nest-type-value ]
+ string, nest, indexed-array, nest-type-value ]
doc:
description: Documentation of the attribute.
type: string
diff --git a/Documentation/netlink/genetlink-legacy.yaml b/Documentation/netlink/genetlink-legacy.yaml
index b48ad3b1cc32..8db0e22fa72c 100644
--- a/Documentation/netlink/genetlink-legacy.yaml
+++ b/Documentation/netlink/genetlink-legacy.yaml
@@ -201,7 +201,7 @@ properties:
description: The netlink attribute type
enum: [ unused, pad, flag, binary, bitfield32,
uint, sint, u8, u16, u32, u64, s32, s64,
- string, nest, array-nest, nest-type-value ]
+ string, nest, indexed-array, nest-type-value ]
doc:
description: Documentation of the attribute.
type: string
diff --git a/Documentation/netlink/genetlink.yaml b/Documentation/netlink/genetlink.yaml
index ebd6ee743fcc..b036227b46f1 100644
--- a/Documentation/netlink/genetlink.yaml
+++ b/Documentation/netlink/genetlink.yaml
@@ -124,7 +124,7 @@ properties:
type: &attr-type
enum: [ unused, pad, flag, binary,
uint, sint, u8, u16, u32, u64, s32, s64,
- string, nest, array-nest, nest-type-value ]
+ string, nest, indexed-array, nest-type-value ]
doc:
description: Documentation of the attribute.
type: string
diff --git a/Documentation/netlink/netlink-raw.yaml b/Documentation/netlink/netlink-raw.yaml
index a76e54cbadbc..914aa1c0a273 100644
--- a/Documentation/netlink/netlink-raw.yaml
+++ b/Documentation/netlink/netlink-raw.yaml
@@ -222,7 +222,7 @@ properties:
description: The netlink attribute type
enum: [ unused, pad, flag, binary, bitfield32,
u8, u16, u32, u64, s8, s16, s32, s64,
- string, nest, array-nest, nest-type-value,
+ string, nest, indexed-array, nest-type-value,
sub-message ]
doc:
description: Documentation of the attribute.
diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml
index 197208f419dc..00dc61358be8 100644
--- a/Documentation/netlink/specs/ethtool.yaml
+++ b/Documentation/netlink/specs/ethtool.yaml
@@ -16,6 +16,10 @@ definitions:
name: stringset
type: enum
entries: []
+ -
+ name: header-flags
+ type: flags
+ entries: [ compact-bitsets, omit-reply, stats ]
attribute-sets:
-
@@ -30,6 +34,7 @@ attribute-sets:
-
name: flags
type: u32
+ enum: header-flags
-
name: bitset-bit
@@ -560,6 +565,18 @@ attribute-sets:
name: tx-lpi-timer
type: u32
-
+ name: ts-stat
+ attributes:
+ -
+ name: tx-pkts
+ type: uint
+ -
+ name: tx-lost
+ type: uint
+ -
+ name: tx-err
+ type: uint
+ -
name: tsinfo
attributes:
-
@@ -581,6 +598,10 @@ attribute-sets:
-
name: phc-index
type: u32
+ -
+ name: stats
+ type: nest
+ nested-attributes: ts-stat
-
name: cable-result
attributes:
@@ -878,17 +899,29 @@ attribute-sets:
type: nest
nested-attributes: header
-
- name: admin-state
+ name: podl-pse-admin-state
+ type: u32
+ name-prefix: ethtool-a-
+ -
+ name: podl-pse-admin-control
+ type: u32
+ name-prefix: ethtool-a-
+ -
+ name: podl-pse-pw-d-status
+ type: u32
+ name-prefix: ethtool-a-
+ -
+ name: c33-pse-admin-state
type: u32
- name-prefix: ethtool-a-podl-pse-
+ name-prefix: ethtool-a-
-
- name: admin-control
+ name: c33-pse-admin-control
type: u32
- name-prefix: ethtool-a-podl-pse-
+ name-prefix: ethtool-a-
-
- name: pw-d-status
+ name: c33-pse-pw-d-status
type: u32
- name-prefix: ethtool-a-podl-pse-
+ name-prefix: ethtool-a-
-
name: rss
attributes:
@@ -1388,6 +1421,7 @@ operations:
- tx-types
- rx-filters
- phc-index
+ - stats
dump: *tsinfo-get-op
-
name: cable-test-act
@@ -1571,9 +1605,12 @@ operations:
reply:
attributes: &pse
- header
- - admin-state
- - admin-control
- - pw-d-status
+ - podl-pse-admin-state
+ - podl-pse-admin-control
+ - podl-pse-pw-d-status
+ - c33-pse-admin-state
+ - c33-pse-admin-control
+ - c33-pse-pw-d-status
dump: *pse-get-op
-
name: pse-set
diff --git a/Documentation/netlink/specs/netdev.yaml b/Documentation/netlink/specs/netdev.yaml
index 76352dbd2be4..11a32373365a 100644
--- a/Documentation/netlink/specs/netdev.yaml
+++ b/Documentation/netlink/specs/netdev.yaml
@@ -335,6 +335,124 @@ attribute-sets:
Allocation failure may, or may not result in a packet drop, depending
on driver implementation and whether system recovers quickly.
type: uint
+ -
+ name: rx-hw-drops
+ doc: |
+ Number of all packets which entered the device, but never left it,
+ including but not limited to: packets dropped due to lack of buffer
+ space, processing errors, explicit or implicit policies and packet
+ filters.
+ type: uint
+ -
+ name: rx-hw-drop-overruns
+ doc: |
+ Number of packets dropped due to transient lack of resources, such as
+ buffer space, host descriptors etc.
+ type: uint
+ -
+ name: rx-csum-unnecessary
+ doc: Number of packets that were marked as CHECKSUM_UNNECESSARY.
+ type: uint
+ -
+ name: rx-csum-none
+ doc: Number of packets that were not checksummed by device.
+ type: uint
+ -
+ name: rx-csum-bad
+ doc: |
+ Number of packets with bad checksum. The packets are not discarded,
+ but still delivered to the stack.
+ type: uint
+ -
+ name: rx-hw-gro-packets
+ doc: |
+ Number of packets that were coalesced from smaller packets by the device.
+ Counts only packets coalesced with the HW-GRO netdevice feature,
+ LRO-coalesced packets are not counted.
+ type: uint
+ -
+ name: rx-hw-gro-bytes
+ doc: See `rx-hw-gro-packets`.
+ type: uint
+ -
+ name: rx-hw-gro-wire-packets
+ doc: |
+ Number of packets that were coalesced to bigger packetss with the HW-GRO
+ netdevice feature. LRO-coalesced packets are not counted.
+ type: uint
+ -
+ name: rx-hw-gro-wire-bytes
+ doc: See `rx-hw-gro-wire-packets`.
+ type: uint
+ -
+ name: rx-hw-drop-ratelimits
+ doc: |
+ Number of the packets dropped by the device due to the received
+ packets bitrate exceeding the device rate limit.
+ type: uint
+ -
+ name: tx-hw-drops
+ doc: |
+ Number of packets that arrived at the device but never left it,
+ encompassing packets dropped for reasons such as processing errors, as
+ well as those affected by explicitly defined policies and packet
+ filtering criteria.
+ type: uint
+ -
+ name: tx-hw-drop-errors
+ doc: Number of packets dropped because they were invalid or malformed.
+ type: uint
+ -
+ name: tx-csum-none
+ doc: |
+ Number of packets that did not require the device to calculate the
+ checksum.
+ type: uint
+ -
+ name: tx-needs-csum
+ doc: |
+ Number of packets that required the device to calculate the checksum.
+ type: uint
+ -
+ name: tx-hw-gso-packets
+ doc: |
+ Number of packets that necessitated segmentation into smaller packets
+ by the device.
+ type: uint
+ -
+ name: tx-hw-gso-bytes
+ doc: See `tx-hw-gso-packets`.
+ type: uint
+ -
+ name: tx-hw-gso-wire-packets
+ doc: |
+ Number of wire-sized packets generated by processing
+ `tx-hw-gso-packets`
+ type: uint
+ -
+ name: tx-hw-gso-wire-bytes
+ doc: See `tx-hw-gso-wire-packets`.
+ type: uint
+ -
+ name: tx-hw-drop-ratelimits
+ doc: |
+ Number of the packets dropped by the device due to the transmit
+ packets bitrate exceeding the device rate limit.
+ type: uint
+ -
+ name: tx-stop
+ doc: |
+ Number of times driver paused accepting new tx packets
+ from the stack to this queue, because the queue was full.
+ Note that if BQL is supported and enabled on the device
+ the networking stack will avoid queuing a lot of data at once.
+ type: uint
+ -
+ name: tx-wake
+ doc: |
+ Number of times driver re-started accepting send
+ requests to this queue from the stack.
+ type: uint
operations:
list:
@@ -486,6 +604,7 @@ operations:
dump:
request:
attributes:
+ - ifindex
- scope
reply:
attributes:
diff --git a/Documentation/netlink/specs/nfsd.yaml b/Documentation/netlink/specs/nfsd.yaml
index 05acc73e2e33..d21234097167 100644
--- a/Documentation/netlink/specs/nfsd.yaml
+++ b/Documentation/netlink/specs/nfsd.yaml
@@ -62,6 +62,59 @@ attribute-sets:
name: compound-ops
type: u32
multi-attr: true
+ -
+ name: server
+ attributes:
+ -
+ name: threads
+ type: u32
+ multi-attr: true
+ -
+ name: gracetime
+ type: u32
+ -
+ name: leasetime
+ type: u32
+ -
+ name: scope
+ type: string
+ -
+ name: version
+ attributes:
+ -
+ name: major
+ type: u32
+ -
+ name: minor
+ type: u32
+ -
+ name: enabled
+ type: flag
+ -
+ name: server-proto
+ attributes:
+ -
+ name: version
+ type: nest
+ nested-attributes: version
+ multi-attr: true
+ -
+ name: sock
+ attributes:
+ -
+ name: addr
+ type: binary
+ -
+ name: transport-name
+ type: string
+ -
+ name: server-sock
+ attributes:
+ -
+ name: addr
+ type: nest
+ nested-attributes: sock
+ multi-attr: true
operations:
list:
@@ -87,3 +140,60 @@ operations:
- sport
- dport
- compound-ops
+ -
+ name: threads-set
+ doc: set the number of running threads
+ attribute-set: server
+ flags: [ admin-perm ]
+ do:
+ request:
+ attributes:
+ - threads
+ - gracetime
+ - leasetime
+ - scope
+ -
+ name: threads-get
+ doc: get the number of running threads
+ attribute-set: server
+ do:
+ reply:
+ attributes:
+ - threads
+ - gracetime
+ - leasetime
+ - scope
+ -
+ name: version-set
+ doc: set nfs enabled versions
+ attribute-set: server-proto
+ flags: [ admin-perm ]
+ do:
+ request:
+ attributes:
+ - version
+ -
+ name: version-get
+ doc: get nfs enabled versions
+ attribute-set: server-proto
+ do:
+ reply:
+ attributes:
+ - version
+ -
+ name: listener-set
+ doc: set nfs running sockets
+ attribute-set: server-sock
+ flags: [ admin-perm ]
+ do:
+ request:
+ attributes:
+ - addr
+ -
+ name: listener-get
+ doc: get nfs running listeners
+ attribute-set: server-sock
+ do:
+ reply:
+ attributes:
+ - addr
diff --git a/Documentation/netlink/specs/nftables.yaml b/Documentation/netlink/specs/nftables.yaml
new file mode 100644
index 000000000000..dff2a18f3d90
--- /dev/null
+++ b/Documentation/netlink/specs/nftables.yaml
@@ -0,0 +1,1264 @@
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+
+name: nftables
+protocol: netlink-raw
+protonum: 12
+
+doc:
+ Netfilter nftables configuration over netlink.
+
+definitions:
+ -
+ name: nfgenmsg
+ type: struct
+ members:
+ -
+ name: nfgen-family
+ type: u8
+ -
+ name: version
+ type: u8
+ -
+ name: res-id
+ byte-order: big-endian
+ type: u16
+ -
+ name: meta-keys
+ type: enum
+ entries:
+ - len
+ - protocol
+ - priority
+ - mark
+ - iif
+ - oif
+ - iifname
+ - oifname
+ - iftype
+ - oiftype
+ - skuid
+ - skgid
+ - nftrace
+ - rtclassid
+ - secmark
+ - nfproto
+ - l4-proto
+ - bri-iifname
+ - bri-oifname
+ - pkttype
+ - cpu
+ - iifgroup
+ - oifgroup
+ - cgroup
+ - prandom
+ - secpath
+ - iifkind
+ - oifkind
+ - bri-iifpvid
+ - bri-iifvproto
+ - time-ns
+ - time-day
+ - time-hour
+ - sdif
+ - sdifname
+ - bri-broute
+ -
+ name: cmp-ops
+ type: enum
+ entries:
+ - eq
+ - neq
+ - lt
+ - lte
+ - gt
+ - gte
+ -
+ name: object-type
+ type: enum
+ entries:
+ - unspec
+ - counter
+ - quota
+ - ct-helper
+ - limit
+ - connlimit
+ - tunnel
+ - ct-timeout
+ - secmark
+ - ct-expect
+ - synproxy
+ -
+ name: nat-range-flags
+ type: flags
+ entries:
+ - map-ips
+ - proto-specified
+ - proto-random
+ - persistent
+ - proto-random-fully
+ - proto-offset
+ - netmap
+ -
+ name: table-flags
+ type: flags
+ entries:
+ - dormant
+ - owner
+ - persist
+ -
+ name: chain-flags
+ type: flags
+ entries:
+ - base
+ - hw-offload
+ - binding
+ -
+ name: set-flags
+ type: flags
+ entries:
+ - anonymous
+ - constant
+ - interval
+ - map
+ - timeout
+ - eval
+ - object
+ - concat
+ - expr
+
+attribute-sets:
+ -
+ name: empty-attrs
+ attributes:
+ -
+ name: name
+ type: string
+ -
+ name: batch-attrs
+ attributes:
+ -
+ name: genid
+ type: u32
+ byte-order: big-endian
+ -
+ name: table-attrs
+ attributes:
+ -
+ name: name
+ type: string
+ doc: name of the table
+ -
+ name: flags
+ type: u32
+ byte-order: big-endian
+ doc: bitmask of flags
+ enum: table-flags
+ enum-as-flags: true
+ -
+ name: use
+ type: u32
+ byte-order: big-endian
+ doc: number of chains in this table
+ -
+ name: handle
+ type: u64
+ byte-order: big-endian
+ doc: numeric handle of the table
+ -
+ name: userdata
+ type: binary
+ doc: user data
+ -
+ name: chain-attrs
+ attributes:
+ -
+ name: table
+ type: string
+ doc: name of the table containing the chain
+ -
+ name: handle
+ type: u64
+ byte-order: big-endian
+ doc: numeric handle of the chain
+ -
+ name: name
+ type: string
+ doc: name of the chain
+ -
+ name: hook
+ type: nest
+ nested-attributes: nft-hook-attrs
+ doc: hook specification for basechains
+ -
+ name: policy
+ type: u32
+ byte-order: big-endian
+ doc: numeric policy of the chain
+ -
+ name: use
+ type: u32
+ byte-order: big-endian
+ doc: number of references to this chain
+ -
+ name: type
+ type: string
+ doc: type name of the chain
+ -
+ name: counters
+ type: nest
+ nested-attributes: nft-counter-attrs
+ doc: counter specification of the chain
+ -
+ name: flags
+ type: u32
+ byte-order: big-endian
+ doc: chain flags
+ enum: chain-flags
+ enum-as-flags: true
+ -
+ name: id
+ type: u32
+ byte-order: big-endian
+ doc: uniquely identifies a chain in a transaction
+ -
+ name: userdata
+ type: binary
+ doc: user data
+ -
+ name: counter-attrs
+ attributes:
+ -
+ name: bytes
+ type: u64
+ byte-order: big-endian
+ -
+ name: packets
+ type: u64
+ byte-order: big-endian
+ -
+ name: pad
+ type: pad
+ -
+ name: nft-hook-attrs
+ attributes:
+ -
+ name: num
+ type: u32
+ byte-order: big-endian
+ -
+ name: priority
+ type: s32
+ byte-order: big-endian
+ -
+ name: dev
+ type: string
+ doc: net device name
+ -
+ name: devs
+ type: nest
+ nested-attributes: hook-dev-attrs
+ doc: list of net devices
+ -
+ name: hook-dev-attrs
+ attributes:
+ -
+ name: name
+ type: string
+ multi-attr: true
+ -
+ name: nft-counter-attrs
+ attributes:
+ -
+ name: bytes
+ type: u64
+ -
+ name: packets
+ type: u64
+ -
+ name: rule-attrs
+ attributes:
+ -
+ name: table
+ type: string
+ doc: name of the table containing the rule
+ -
+ name: chain
+ type: string
+ doc: name of the chain containing the rule
+ -
+ name: handle
+ type: u64
+ byte-order: big-endian
+ doc: numeric handle of the rule
+ -
+ name: expressions
+ type: nest
+ nested-attributes: expr-list-attrs
+ doc: list of expressions
+ -
+ name: compat
+ type: nest
+ nested-attributes: rule-compat-attrs
+ doc: compatibility specifications of the rule
+ -
+ name: position
+ type: u64
+ byte-order: big-endian
+ doc: numeric handle of the previous rule
+ -
+ name: userdata
+ type: binary
+ doc: user data
+ -
+ name: id
+ type: u32
+ doc: uniquely identifies a rule in a transaction
+ -
+ name: position-id
+ type: u32
+ doc: transaction unique identifier of the previous rule
+ -
+ name: chain-id
+ type: u32
+ doc: add the rule to chain by ID, alternative to chain name
+ -
+ name: expr-list-attrs
+ attributes:
+ -
+ name: elem
+ type: nest
+ nested-attributes: expr-attrs
+ multi-attr: true
+ -
+ name: expr-attrs
+ attributes:
+ -
+ name: name
+ type: string
+ doc: name of the expression type
+ -
+ name: data
+ type: sub-message
+ sub-message: expr-ops
+ selector: name
+ doc: type specific data
+ -
+ name: rule-compat-attrs
+ attributes:
+ -
+ name: proto
+ type: binary
+ doc: numeric value of the handled protocol
+ -
+ name: flags
+ type: binary
+ doc: bitmask of flags
+ -
+ name: set-attrs
+ attributes:
+ -
+ name: table
+ type: string
+ doc: table name
+ -
+ name: name
+ type: string
+ doc: set name
+ -
+ name: flags
+ type: u32
+ enum: set-flags
+ byte-order: big-endian
+ doc: bitmask of enum nft_set_flags
+ -
+ name: key-type
+ type: u32
+ byte-order: big-endian
+ doc: key data type, informational purpose only
+ -
+ name: key-len
+ type: u32
+ byte-order: big-endian
+ doc: key data length
+ -
+ name: data-type
+ type: u32
+ byte-order: big-endian
+ doc: mapping data type
+ -
+ name: data-len
+ type: u32
+ byte-order: big-endian
+ doc: mapping data length
+ -
+ name: policy
+ type: u32
+ byte-order: big-endian
+ doc: selection policy
+ -
+ name: desc
+ type: nest
+ nested-attributes: set-desc-attrs
+ doc: set description
+ -
+ name: id
+ type: u32
+ doc: uniquely identifies a set in a transaction
+ -
+ name: timeout
+ type: u64
+ doc: default timeout value
+ -
+ name: gc-interval
+ type: u32
+ doc: garbage collection interval
+ -
+ name: userdata
+ type: binary
+ doc: user data
+ -
+ name: pad
+ type: pad
+ -
+ name: obj-type
+ type: u32
+ byte-order: big-endian
+ doc: stateful object type
+ -
+ name: handle
+ type: u64
+ byte-order: big-endian
+ doc: set handle
+ -
+ name: expr
+ type: nest
+ nested-attributes: expr-attrs
+ doc: set expression
+ multi-attr: true
+ -
+ name: expressions
+ type: nest
+ nested-attributes: set-list-attrs
+ doc: list of expressions
+ -
+ name: set-desc-attrs
+ attributes:
+ -
+ name: size
+ type: u32
+ byte-order: big-endian
+ doc: number of elements in set
+ -
+ name: concat
+ type: nest
+ nested-attributes: set-desc-concat-attrs
+ doc: description of field concatenation
+ multi-attr: true
+ -
+ name: set-desc-concat-attrs
+ attributes:
+ -
+ name: elem
+ type: nest
+ nested-attributes: set-field-attrs
+ -
+ name: set-field-attrs
+ attributes:
+ -
+ name: len
+ type: u32
+ byte-order: big-endian
+ -
+ name: set-list-attrs
+ attributes:
+ -
+ name: elem
+ type: nest
+ nested-attributes: expr-attrs
+ multi-attr: true
+ -
+ name: setelem-attrs
+ attributes:
+ -
+ name: key
+ type: nest
+ nested-attributes: data-attrs
+ doc: key value
+ -
+ name: data
+ type: nest
+ nested-attributes: data-attrs
+ doc: data value of mapping
+ -
+ name: flags
+ type: binary
+ doc: bitmask of nft_set_elem_flags
+ -
+ name: timeout
+ type: u64
+ doc: timeout value
+ -
+ name: expiration
+ type: u64
+ doc: expiration time
+ -
+ name: userdata
+ type: binary
+ doc: user data
+ -
+ name: expr
+ type: nest
+ nested-attributes: expr-attrs
+ doc: expression
+ -
+ name: objref
+ type: string
+ doc: stateful object reference
+ -
+ name: key-end
+ type: nest
+ nested-attributes: data-attrs
+ doc: closing key value
+ -
+ name: expressions
+ type: nest
+ nested-attributes: expr-list-attrs
+ doc: list of expressions
+ -
+ name: setelem-list-elem-attrs
+ attributes:
+ -
+ name: elem
+ type: nest
+ nested-attributes: setelem-attrs
+ multi-attr: true
+ -
+ name: setelem-list-attrs
+ attributes:
+ -
+ name: table
+ type: string
+ -
+ name: set
+ type: string
+ -
+ name: elements
+ type: nest
+ nested-attributes: setelem-list-elem-attrs
+ -
+ name: set-id
+ type: u32
+ -
+ name: gen-attrs
+ attributes:
+ -
+ name: id
+ type: u32
+ byte-order: big-endian
+ doc: ruleset generation id
+ -
+ name: proc-pid
+ type: u32
+ byte-order: big-endian
+ -
+ name: proc-name
+ type: string
+ -
+ name: obj-attrs
+ attributes:
+ -
+ name: table
+ type: string
+ doc: name of the table containing the expression
+ -
+ name: name
+ type: string
+ doc: name of this expression type
+ -
+ name: type
+ type: u32
+ enum: object-type
+ byte-order: big-endian
+ doc: stateful object type
+ -
+ name: data
+ type: sub-message
+ sub-message: obj-data
+ selector: type
+ doc: stateful object data
+ -
+ name: use
+ type: u32
+ byte-order: big-endian
+ doc: number of references to this expression
+ -
+ name: handle
+ type: u64
+ byte-order: big-endian
+ doc: object handle
+ -
+ name: pad
+ type: pad
+ -
+ name: userdata
+ type: binary
+ doc: user data
+ -
+ name: quota-attrs
+ attributes:
+ -
+ name: bytes
+ type: u64
+ byte-order: big-endian
+ -
+ name: flags # TODO
+ type: u32
+ byte-order: big-endian
+ -
+ name: pad
+ type: pad
+ -
+ name: consumed
+ type: u64
+ byte-order: big-endian
+ -
+ name: flowtable-attrs
+ attributes:
+ -
+ name: table
+ type: string
+ -
+ name: name
+ type: string
+ -
+ name: hook
+ type: nest
+ nested-attributes: flowtable-hook-attrs
+ -
+ name: use
+ type: u32
+ byte-order: big-endian
+ -
+ name: handle
+ type: u64
+ byte-order: big-endian
+ -
+ name: pad
+ type: pad
+ -
+ name: flags
+ type: u32
+ byte-order: big-endian
+ -
+ name: flowtable-hook-attrs
+ attributes:
+ -
+ name: num
+ type: u32
+ byte-order: big-endian
+ -
+ name: priority
+ type: u32
+ byte-order: big-endian
+ -
+ name: devs
+ type: nest
+ nested-attributes: hook-dev-attrs
+ -
+ name: expr-cmp-attrs
+ attributes:
+ -
+ name: sreg
+ type: u32
+ byte-order: big-endian
+ -
+ name: op
+ type: u32
+ byte-order: big-endian
+ enum: cmp-ops
+ -
+ name: data
+ type: nest
+ nested-attributes: data-attrs
+ -
+ name: data-attrs
+ attributes:
+ -
+ name: value
+ type: binary
+ # sub-type: u8
+ -
+ name: verdict
+ type: nest
+ nested-attributes: verdict-attrs
+ -
+ name: verdict-attrs
+ attributes:
+ -
+ name: code
+ type: u32
+ byte-order: big-endian
+ -
+ name: chain
+ type: string
+ -
+ name: chain-id
+ type: u32
+ -
+ name: expr-counter-attrs
+ attributes:
+ -
+ name: bytes
+ type: u64
+ doc: Number of bytes
+ -
+ name: packets
+ type: u64
+ doc: Number of packets
+ -
+ name: pad
+ type: pad
+ -
+ name: expr-flow-offload-attrs
+ attributes:
+ -
+ name: name
+ type: string
+ doc: Flow offload table name
+ -
+ name: expr-immediate-attrs
+ attributes:
+ -
+ name: dreg
+ type: u32
+ byte-order: big-endian
+ -
+ name: data
+ type: nest
+ nested-attributes: data-attrs
+ -
+ name: expr-meta-attrs
+ attributes:
+ -
+ name: dreg
+ type: u32
+ byte-order: big-endian
+ -
+ name: key
+ type: u32
+ byte-order: big-endian
+ enum: meta-keys
+ -
+ name: sreg
+ type: u32
+ byte-order: big-endian
+ -
+ name: expr-nat-attrs
+ attributes:
+ -
+ name: type
+ type: u32
+ byte-order: big-endian
+ -
+ name: family
+ type: u32
+ byte-order: big-endian
+ -
+ name: reg-addr-min
+ type: u32
+ byte-order: big-endian
+ -
+ name: reg-addr-max
+ type: u32
+ byte-order: big-endian
+ -
+ name: reg-proto-min
+ type: u32
+ byte-order: big-endian
+ -
+ name: reg-proto-max
+ type: u32
+ byte-order: big-endian
+ -
+ name: flags
+ type: u32
+ byte-order: big-endian
+ enum: nat-range-flags
+ enum-as-flags: true
+ -
+ name: expr-payload-attrs
+ attributes:
+ -
+ name: dreg
+ type: u32
+ byte-order: big-endian
+ -
+ name: base
+ type: u32
+ byte-order: big-endian
+ -
+ name: offset
+ type: u32
+ byte-order: big-endian
+ -
+ name: len
+ type: u32
+ byte-order: big-endian
+ -
+ name: sreg
+ type: u32
+ byte-order: big-endian
+ -
+ name: csum-type
+ type: u32
+ byte-order: big-endian
+ -
+ name: csum-offset
+ type: u32
+ byte-order: big-endian
+ -
+ name: csum-flags
+ type: u32
+ byte-order: big-endian
+ -
+ name: expr-tproxy-attrs
+ attributes:
+ -
+ name: family
+ type: u32
+ byte-order: big-endian
+ -
+ name: reg-addr
+ type: u32
+ byte-order: big-endian
+ -
+ name: reg-port
+ type: u32
+ byte-order: big-endian
+
+sub-messages:
+ -
+ name: expr-ops
+ formats:
+ -
+ value: bitwise # TODO
+ -
+ value: cmp
+ attribute-set: expr-cmp-attrs
+ -
+ value: counter
+ attribute-set: expr-counter-attrs
+ -
+ value: ct # TODO
+ -
+ value: flow_offload
+ attribute-set: expr-flow-offload-attrs
+ -
+ value: immediate
+ attribute-set: expr-immediate-attrs
+ -
+ value: lookup # TODO
+ -
+ value: meta
+ attribute-set: expr-meta-attrs
+ -
+ value: nat
+ attribute-set: expr-nat-attrs
+ -
+ value: payload
+ attribute-set: expr-payload-attrs
+ -
+ value: tproxy
+ attribute-set: expr-tproxy-attrs
+ -
+ name: obj-data
+ formats:
+ -
+ value: counter
+ attribute-set: counter-attrs
+ -
+ value: quota
+ attribute-set: quota-attrs
+
+operations:
+ enum-model: directional
+ list:
+ -
+ name: batch-begin
+ doc: Start a batch of operations
+ attribute-set: batch-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0x10
+ attributes:
+ - genid
+ reply:
+ value: 0x10
+ attributes:
+ - genid
+ -
+ name: batch-end
+ doc: Finish a batch of operations
+ attribute-set: batch-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0x11
+ attributes:
+ - genid
+ -
+ name: newtable
+ doc: Create a new table.
+ attribute-set: table-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa00
+ attributes:
+ - name
+ -
+ name: gettable
+ doc: Get / dump tables.
+ attribute-set: table-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa01
+ attributes:
+ - name
+ reply:
+ value: 0xa00
+ attributes:
+ - name
+ -
+ name: deltable
+ doc: Delete an existing table.
+ attribute-set: table-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa02
+ attributes:
+ - name
+ -
+ name: destroytable
+ doc: Delete an existing table with destroy semantics (ignoring ENOENT errors).
+ attribute-set: table-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa1a
+ attributes:
+ - name
+ -
+ name: newchain
+ doc: Create a new chain.
+ attribute-set: chain-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa03
+ attributes:
+ - name
+ -
+ name: getchain
+ doc: Get / dump chains.
+ attribute-set: chain-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa04
+ attributes:
+ - name
+ reply:
+ value: 0xa03
+ attributes:
+ - name
+ -
+ name: delchain
+ doc: Delete an existing chain.
+ attribute-set: chain-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa05
+ attributes:
+ - name
+ -
+ name: destroychain
+ doc: Delete an existing chain with destroy semantics (ignoring ENOENT errors).
+ attribute-set: chain-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa1b
+ attributes:
+ - name
+ -
+ name: newrule
+ doc: Create a new rule.
+ attribute-set: rule-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa06
+ attributes:
+ - name
+ -
+ name: getrule
+ doc: Get / dump rules.
+ attribute-set: rule-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa07
+ attributes:
+ - name
+ reply:
+ value: 0xa06
+ attributes:
+ - name
+ -
+ name: getrule-reset
+ doc: Get / dump rules and reset stateful expressions.
+ attribute-set: rule-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa19
+ attributes:
+ - name
+ reply:
+ value: 0xa06
+ attributes:
+ - name
+ -
+ name: delrule
+ doc: Delete an existing rule.
+ attribute-set: rule-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa08
+ attributes:
+ - name
+ -
+ name: destroyrule
+ doc: Delete an existing rule with destroy semantics (ignoring ENOENT errors).
+ attribute-set: rule-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa1c
+ attributes:
+ - name
+ -
+ name: newset
+ doc: Create a new set.
+ attribute-set: set-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa09
+ attributes:
+ - name
+ -
+ name: getset
+ doc: Get / dump sets.
+ attribute-set: set-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa0a
+ attributes:
+ - name
+ reply:
+ value: 0xa09
+ attributes:
+ - name
+ -
+ name: delset
+ doc: Delete an existing set.
+ attribute-set: set-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa0b
+ attributes:
+ - name
+ -
+ name: destroyset
+ doc: Delete an existing set with destroy semantics (ignoring ENOENT errors).
+ attribute-set: set-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa1d
+ attributes:
+ - name
+ -
+ name: newsetelem
+ doc: Create a new set element.
+ attribute-set: setelem-list-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa0c
+ attributes:
+ - name
+ -
+ name: getsetelem
+ doc: Get / dump set elements.
+ attribute-set: setelem-list-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa0d
+ attributes:
+ - name
+ reply:
+ value: 0xa0c
+ attributes:
+ - name
+ -
+ name: getsetelem-reset
+ doc: Get / dump set elements and reset stateful expressions.
+ attribute-set: setelem-list-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa21
+ attributes:
+ - name
+ reply:
+ value: 0xa0c
+ attributes:
+ - name
+ -
+ name: delsetelem
+ doc: Delete an existing set element.
+ attribute-set: setelem-list-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa0e
+ attributes:
+ - name
+ -
+ name: destroysetelem
+ doc: Delete an existing set element with destroy semantics.
+ attribute-set: setelem-list-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa1e
+ attributes:
+ - name
+ -
+ name: getgen
+ doc: Get / dump rule-set generation.
+ attribute-set: gen-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa10
+ attributes:
+ - name
+ reply:
+ value: 0xa0f
+ attributes:
+ - name
+ -
+ name: newobj
+ doc: Create a new stateful object.
+ attribute-set: obj-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa12
+ attributes:
+ - name
+ -
+ name: getobj
+ doc: Get / dump stateful objects.
+ attribute-set: obj-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa13
+ attributes:
+ - name
+ reply:
+ value: 0xa12
+ attributes:
+ - name
+ -
+ name: delobj
+ doc: Delete an existing stateful object.
+ attribute-set: obj-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa14
+ attributes:
+ - name
+ -
+ name: destroyobj
+ doc: Delete an existing stateful object with destroy semantics.
+ attribute-set: obj-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa1f
+ attributes:
+ - name
+ -
+ name: newflowtable
+ doc: Create a new flow table.
+ attribute-set: flowtable-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa16
+ attributes:
+ - name
+ -
+ name: getflowtable
+ doc: Get / dump flow tables.
+ attribute-set: flowtable-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa17
+ attributes:
+ - name
+ reply:
+ value: 0xa16
+ attributes:
+ - name
+ -
+ name: delflowtable
+ doc: Delete an existing flow table.
+ attribute-set: flowtable-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa18
+ attributes:
+ - name
+ -
+ name: destroyflowtable
+ doc: Delete an existing flow table with destroy semantics.
+ attribute-set: flowtable-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0xa20
+ attributes:
+ - name
+
+mcast-groups:
+ list:
+ -
+ name: mgmt
diff --git a/Documentation/netlink/specs/nlctrl.yaml b/Documentation/netlink/specs/nlctrl.yaml
index b1632b95f725..a36535350bdb 100644
--- a/Documentation/netlink/specs/nlctrl.yaml
+++ b/Documentation/netlink/specs/nlctrl.yaml
@@ -65,11 +65,13 @@ attribute-sets:
type: u32
-
name: ops
- type: array-nest
+ type: indexed-array
+ sub-type: nest
nested-attributes: op-attrs
-
name: mcast-groups
- type: array-nest
+ type: indexed-array
+ sub-type: nest
nested-attributes: mcast-group-attrs
-
name: policy
diff --git a/Documentation/netlink/specs/rt_link.yaml b/Documentation/netlink/specs/rt_link.yaml
index 8e4d19adee8c..de08c12fd56f 100644
--- a/Documentation/netlink/specs/rt_link.yaml
+++ b/Documentation/netlink/specs/rt_link.yaml
@@ -50,7 +50,16 @@ definitions:
name: dormant
-
name: echo
-
+ -
+ name: vlan-protocols
+ type: enum
+ entries:
+ -
+ name: 8021q
+ value: 33024
+ -
+ name: 8021ad
+ value: 34984
-
name: rtgenmsg
type: struct
@@ -729,7 +738,171 @@ definitions:
-
name: filter-mask
type: u32
-
+ -
+ name: ifla-vlan-flags
+ type: struct
+ members:
+ -
+ name: flags
+ type: u32
+ enum: vlan-flags
+ enum-as-flags: true
+ -
+ name: mask
+ type: u32
+ display-hint: hex
+ -
+ name: vlan-flags
+ type: flags
+ entries:
+ - reorder-hdr
+ - gvrp
+ - loose-binding
+ - mvrp
+ - bridge-binding
+ -
+ name: ifla-vlan-qos-mapping
+ type: struct
+ members:
+ -
+ name: from
+ type: u32
+ -
+ name: to
+ type: u32
+ -
+ name: ifla-vf-mac
+ type: struct
+ members:
+ -
+ name: vf
+ type: u32
+ -
+ name: mac
+ type: binary
+ len: 32
+ -
+ name: ifla-vf-vlan
+ type: struct
+ members:
+ -
+ name: vf
+ type: u32
+ -
+ name: vlan
+ type: u32
+ -
+ name: qos
+ type: u32
+ -
+ name: ifla-vf-tx-rate
+ type: struct
+ members:
+ -
+ name: vf
+ type: u32
+ -
+ name: rate
+ type: u32
+ -
+ name: ifla-vf-spoofchk
+ type: struct
+ members:
+ -
+ name: vf
+ type: u32
+ -
+ name: setting
+ type: u32
+ -
+ name: ifla-vf-link-state
+ type: struct
+ members:
+ -
+ name: vf
+ type: u32
+ -
+ name: link-state
+ type: u32
+ enum: ifla-vf-link-state-enum
+ -
+ name: ifla-vf-link-state-enum
+ type: enum
+ entries:
+ - auto
+ - enable
+ - disable
+ -
+ name: ifla-vf-rate
+ type: struct
+ members:
+ -
+ name: vf
+ type: u32
+ -
+ name: min-tx-rate
+ type: u32
+ -
+ name: max-tx-rate
+ type: u32
+ -
+ name: ifla-vf-rss-query-en
+ type: struct
+ members:
+ -
+ name: vf
+ type: u32
+ -
+ name: setting
+ type: u32
+ -
+ name: ifla-vf-trust
+ type: struct
+ members:
+ -
+ name: vf
+ type: u32
+ -
+ name: setting
+ type: u32
+ -
+ name: ifla-vf-guid
+ type: struct
+ members:
+ -
+ name: vf
+ type: u32
+ -
+ name: guid
+ type: u64
+ -
+ name: ifla-vf-vlan-info
+ type: struct
+ members:
+ -
+ name: vf
+ type: u32
+ -
+ name: vlan
+ type: u32
+ -
+ name: qos
+ type: u32
+ -
+ name: vlan-proto
+ type: u32
+ -
+ name: rtext-filter
+ type: flags
+ entries:
+ - vf
+ - brvlan
+ - brvlan-compressed
+ - skip-stats
+ - mrp
+ - cfm-config
+ - cfm-status
+ - mst
attribute-sets:
-
@@ -807,7 +980,7 @@ attribute-sets:
-
name: vfinfo-list
type: nest
- nested-attributes: vfinfo-attrs
+ nested-attributes: vfinfo-list-attrs
-
name: stats64
type: binary
@@ -833,6 +1006,8 @@ attribute-sets:
-
name: ext-mask
type: u32
+ enum: rtext-filter
+ enum-as-flags: true
-
name: promiscuity
type: u32
@@ -965,8 +1140,106 @@ attribute-sets:
value: 45
nested-attributes: mctp-attrs
-
+ name: vfinfo-list-attrs
+ attributes:
+ -
+ name: info
+ type: nest
+ nested-attributes: vfinfo-attrs
+ multi-attr: true
+ -
name: vfinfo-attrs
- attributes: []
+ attributes:
+ -
+ name: mac
+ type: binary
+ struct: ifla-vf-mac
+ -
+ name: vlan
+ type: binary
+ struct: ifla-vf-vlan
+ -
+ name: tx-rate
+ type: binary
+ struct: ifla-vf-tx-rate
+ -
+ name: spoofchk
+ type: binary
+ struct: ifla-vf-spoofchk
+ -
+ name: link-state
+ type: binary
+ struct: ifla-vf-link-state
+ -
+ name: rate
+ type: binary
+ struct: ifla-vf-rate
+ -
+ name: rss-query-en
+ type: binary
+ struct: ifla-vf-rss-query-en
+ -
+ name: stats
+ type: nest
+ nested-attributes: vf-stats-attrs
+ -
+ name: trust
+ type: binary
+ struct: ifla-vf-trust
+ -
+ name: ib-node-guid
+ type: binary
+ struct: ifla-vf-guid
+ -
+ name: ib-port-guid
+ type: binary
+ struct: ifla-vf-guid
+ -
+ name: vlan-list
+ type: nest
+ nested-attributes: vf-vlan-attrs
+ -
+ name: broadcast
+ type: binary
+ -
+ name: vf-stats-attrs
+ attributes:
+ -
+ name: rx-packets
+ type: u64
+ value: 0
+ -
+ name: tx-packets
+ type: u64
+ -
+ name: rx-bytes
+ type: u64
+ -
+ name: tx-bytes
+ type: u64
+ -
+ name: broadcast
+ type: u64
+ -
+ name: multicast
+ type: u64
+ -
+ name: pad
+ type: pad
+ -
+ name: rx-dropped
+ type: u64
+ -
+ name: tx-dropped
+ type: u64
+ -
+ name: vf-vlan-attrs
+ attributes:
+ -
+ name: info
+ type: binary
+ struct: ifla-vf-vlan-info
+ multi-attr: true
-
name: vf-ports-attrs
attributes: []
@@ -996,6 +1269,165 @@ attribute-sets:
sub-message: linkinfo-member-data-msg
selector: slave-kind
-
+ name: linkinfo-bond-attrs
+ name-prefix: ifla-bond-
+ attributes:
+ -
+ name: mode
+ type: u8
+ -
+ name: active-slave
+ type: u32
+ -
+ name: miimon
+ type: u32
+ -
+ name: updelay
+ type: u32
+ -
+ name: downdelay
+ type: u32
+ -
+ name: use-carrier
+ type: u8
+ -
+ name: arp-interval
+ type: u32
+ -
+ name: arp-ip-target
+ type: indexed-array
+ sub-type: u32
+ byte-order: big-endian
+ display-hint: ipv4
+ -
+ name: arp-validate
+ type: u32
+ -
+ name: arp-all-targets
+ type: u32
+ -
+ name: primary
+ type: u32
+ -
+ name: primary-reselect
+ type: u8
+ -
+ name: fail-over-mac
+ type: u8
+ -
+ name: xmit-hash-policy
+ type: u8
+ -
+ name: resend-igmp
+ type: u32
+ -
+ name: num-peer-notif
+ type: u8
+ -
+ name: all-slaves-active
+ type: u8
+ -
+ name: min-links
+ type: u32
+ -
+ name: lp-interval
+ type: u32
+ -
+ name: packets-per-slave
+ type: u32
+ -
+ name: ad-lacp-rate
+ type: u8
+ -
+ name: ad-select
+ type: u8
+ -
+ name: ad-info
+ type: nest
+ nested-attributes: bond-ad-info-attrs
+ -
+ name: ad-actor-sys-prio
+ type: u16
+ -
+ name: ad-user-port-key
+ type: u16
+ -
+ name: ad-actor-system
+ type: binary
+ display-hint: mac
+ -
+ name: tlb-dynamic-lb
+ type: u8
+ -
+ name: peer-notif-delay
+ type: u32
+ -
+ name: ad-lacp-active
+ type: u8
+ -
+ name: missed-max
+ type: u8
+ -
+ name: ns-ip6-target
+ type: indexed-array
+ sub-type: binary
+ display-hint: ipv6
+ -
+ name: coupled-control
+ type: u8
+ -
+ name: bond-ad-info-attrs
+ name-prefix: ifla-bond-ad-info-
+ attributes:
+ -
+ name: aggregator
+ type: u16
+ -
+ name: num-ports
+ type: u16
+ -
+ name: actor-key
+ type: u16
+ -
+ name: partner-key
+ type: u16
+ -
+ name: partner-mac
+ type: binary
+ display-hint: mac
+ -
+ name: bond-slave-attrs
+ name-prefix: ifla-bond-slave-
+ attributes:
+ -
+ name: state
+ type: u8
+ -
+ name: mii-status
+ type: u8
+ -
+ name: link-failure-count
+ type: u32
+ -
+ name: perm-hwaddr
+ type: binary
+ display-hint: mac
+ -
+ name: queue-id
+ type: u16
+ -
+ name: ad-aggregator-id
+ type: u16
+ -
+ name: ad-actor-oper-port-state
+ type: u8
+ -
+ name: ad-partner-oper-port-state
+ type: u16
+ -
+ name: prio
+ type: u32
+ -
name: linkinfo-bridge-attrs
name-prefix: ifla-br-
attributes:
@@ -1144,6 +1576,12 @@ attribute-sets:
-
name: mcast-querier-state
type: binary
+ -
+ name: fdb-n-learned
+ type: u32
+ -
+ name: fdb-max-learned
+ type: u32
-
name: linkinfo-brport-attrs
name-prefix: ifla-brport-
@@ -1508,6 +1946,39 @@ attribute-sets:
name: num-disabled-queues
type: u32
-
+ name: linkinfo-vlan-attrs
+ name-prefix: ifla-vlan-
+ attributes:
+ -
+ name: id
+ type: u16
+ -
+ name: flag
+ type: binary
+ struct: ifla-vlan-flags
+ -
+ name: egress-qos
+ type: nest
+ nested-attributes: ifla-vlan-qos
+ -
+ name: ingress-qos
+ type: nest
+ nested-attributes: ifla-vlan-qos
+ -
+ name: protocol
+ type: u16
+ enum: vlan-protocols
+ byte-order: big-endian
+ -
+ name: ifla-vlan-qos
+ name-prefix: ifla-vlan-qos
+ attributes:
+ -
+ name: mapping
+ type: binary
+ multi-attr: true
+ struct: ifla-vlan-qos-mapping
+ -
name: linkinfo-vrf-attrs
name-prefix: ifla-vrf-
attributes:
@@ -1617,7 +2088,8 @@ attribute-sets:
type: binary
-
name: hw-s-info
- type: array-nest
+ type: indexed-array
+ sub-type: nest
nested-attributes: hw-s-info-one
-
name: l3-stats
@@ -1643,6 +2115,9 @@ sub-messages:
name: linkinfo-data-msg
formats:
-
+ value: bond
+ attribute-set: linkinfo-bond-attrs
+ -
value: bridge
attribute-set: linkinfo-bridge-attrs
-
@@ -1667,6 +2142,9 @@ sub-messages:
value: tun
attribute-set: linkinfo-tun-attrs
-
+ value: vlan
+ attribute-set: linkinfo-vlan-attrs
+ -
value: vrf
attribute-set: linkinfo-vrf-attrs
-
@@ -1677,6 +2155,7 @@ sub-messages:
attribute-set: linkinfo-brport-attrs
-
value: bond
+ attribute-set: bond-slave-attrs
operations:
enum-model: directional
diff --git a/Documentation/netlink/specs/tc.yaml b/Documentation/netlink/specs/tc.yaml
index 324fa182cd14..8c01e4e13195 100644
--- a/Documentation/netlink/specs/tc.yaml
+++ b/Documentation/netlink/specs/tc.yaml
@@ -1100,6 +1100,19 @@ definitions:
name: offmask
type: s32
-
+ name: tc-u32-mark
+ type: struct
+ members:
+ -
+ name: val
+ type: u32
+ -
+ name: mask
+ type: u32
+ -
+ name: success
+ type: u32
+ -
name: tc-u32-sel
type: struct
members:
@@ -1775,6 +1788,44 @@ attribute-sets:
name: key-ex
type: binary
-
+ name: tc-act-police-attrs
+ attributes:
+ -
+ name: tbf
+ type: binary
+ struct: tc-police
+ -
+ name: rate
+ type: binary # TODO
+ -
+ name: peakrate
+ type: binary # TODO
+ -
+ name: avrate
+ type: u32
+ -
+ name: result
+ type: u32
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: pad
+ type: pad
+ -
+ name: rate64
+ type: u64
+ -
+ name: peakrate64
+ type: u64
+ -
+ name: pktrate64
+ type: u64
+ -
+ name: pktburst64
+ type: u64
+ -
name: tc-act-simple-attrs
attributes:
-
@@ -1937,7 +1988,8 @@ attribute-sets:
nested-attributes: tc-ematch-attrs
-
name: act
- type: array-nest
+ type: indexed-array
+ sub-type: nest
nested-attributes: tc-act-attrs
-
name: police
@@ -2077,7 +2129,8 @@ attribute-sets:
type: u32
-
name: tin-stats
- type: array-nest
+ type: indexed-array
+ sub-type: nest
nested-attributes: tc-cake-tin-stats-attrs
-
name: deficit
@@ -2297,7 +2350,8 @@ attribute-sets:
type: string
-
name: act
- type: array-nest
+ type: indexed-array
+ sub-type: nest
nested-attributes: tc-act-attrs
-
name: key-eth-dst
@@ -2798,7 +2852,8 @@ attribute-sets:
type: string
-
name: act
- type: array-nest
+ type: indexed-array
+ sub-type: nest
nested-attributes: tc-act-attrs
-
name: mask
@@ -2951,7 +3006,8 @@ attribute-sets:
type: u32
-
name: act
- type: array-nest
+ type: indexed-array
+ sub-type: nest
nested-attributes: tc-act-attrs
-
name: flags
@@ -3324,7 +3380,8 @@ attribute-sets:
nested-attributes: tc-police-attrs
-
name: act
- type: array-nest
+ type: indexed-array
+ sub-type: nest
nested-attributes: tc-act-attrs
-
name: tc-taprio-attrs
@@ -3542,7 +3599,8 @@ attribute-sets:
nested-attributes: tc-police-attrs
-
name: act
- type: array-nest
+ type: indexed-array
+ sub-type: nest
nested-attributes: tc-act-attrs
-
name: indev
diff --git a/Documentation/netlink/specs/team.yaml b/Documentation/netlink/specs/team.yaml
new file mode 100644
index 000000000000..c13529e011c9
--- /dev/null
+++ b/Documentation/netlink/specs/team.yaml
@@ -0,0 +1,204 @@
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+
+name: team
+
+protocol: genetlink-legacy
+
+doc: |
+ Network team device driver.
+
+c-family-name: team-genl-name
+c-version-name: team-genl-version
+kernel-policy: global
+uapi-header: linux/if_team.h
+
+definitions:
+ -
+ name: string-max-len
+ type: const
+ value: 32
+ -
+ name: genl-change-event-mc-grp-name
+ type: const
+ value: change_event
+
+attribute-sets:
+ -
+ name: team
+ doc:
+ The team nested layout of get/set msg looks like
+ [TEAM_ATTR_LIST_OPTION]
+ [TEAM_ATTR_ITEM_OPTION]
+ [TEAM_ATTR_OPTION_*], ...
+ [TEAM_ATTR_ITEM_OPTION]
+ [TEAM_ATTR_OPTION_*], ...
+ ...
+ [TEAM_ATTR_LIST_PORT]
+ [TEAM_ATTR_ITEM_PORT]
+ [TEAM_ATTR_PORT_*], ...
+ [TEAM_ATTR_ITEM_PORT]
+ [TEAM_ATTR_PORT_*], ...
+ ...
+ name-prefix: team-attr-
+ attributes:
+ -
+ name: unspec
+ type: unused
+ value: 0
+ -
+ name: team-ifindex
+ type: u32
+ -
+ name: list-option
+ type: nest
+ nested-attributes: item-option
+ -
+ name: list-port
+ type: nest
+ nested-attributes: item-port
+ -
+ name: item-option
+ name-prefix: team-attr-item-
+ attr-cnt-name: __team-attr-item-option-max
+ attr-max-name: team-attr-item-option-max
+ attributes:
+ -
+ name: option-unspec
+ type: unused
+ value: 0
+ -
+ name: option
+ type: nest
+ nested-attributes: attr-option
+ -
+ name: attr-option
+ name-prefix: team-attr-option-
+ attributes:
+ -
+ name: unspec
+ type: unused
+ value: 0
+ -
+ name: name
+ type: string
+ checks:
+ max-len: string-max-len
+ unterminated-ok: true
+ -
+ name: changed
+ type: flag
+ -
+ name: type
+ type: u8
+ -
+ name: data
+ type: binary
+ -
+ name: removed
+ type: flag
+ -
+ name: port-ifindex
+ type: u32
+ doc: for per-port options
+ -
+ name: array-index
+ type: u32
+ doc: for array options
+ -
+ name: item-port
+ name-prefix: team-attr-item-
+ attr-cnt-name: __team-attr-item-port-max
+ attr-max-name: team-attr-item-port-max
+ attributes:
+ -
+ name: port-unspec
+ type: unused
+ value: 0
+ -
+ name: port
+ type: nest
+ nested-attributes: attr-port
+ -
+ name: attr-port
+ name-prefix: team-attr-port-
+ attributes:
+ -
+ name: unspec
+ type: unused
+ value: 0
+ -
+ name: ifindex
+ type: u32
+ -
+ name: changed
+ type: flag
+ -
+ name: linkup
+ type: flag
+ -
+ name: speed
+ type: u32
+ -
+ name: duplex
+ type: u8
+ -
+ name: removed
+ type: flag
+
+operations:
+ list:
+ -
+ name: noop
+ doc: No operation
+ value: 0
+ attribute-set: team
+ dont-validate: [ strict ]
+
+ do:
+ # Actually it only reply the team netlink family
+ reply:
+ attributes:
+ - team-ifindex
+
+ -
+ name: options-set
+ doc: Set team options
+ attribute-set: team
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+
+ do:
+ request: &option_attrs
+ attributes:
+ - team-ifindex
+ - list-option
+ reply: *option_attrs
+
+ -
+ name: options-get
+ doc: Get team options info
+ attribute-set: team
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+
+ do:
+ request:
+ attributes:
+ - team-ifindex
+ reply: *option_attrs
+
+ -
+ name: port-list-get
+ doc: Get team ports info
+ attribute-set: team
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+
+ do:
+ request:
+ attributes:
+ - team-ifindex
+ reply: &port_attrs
+ attributes:
+ - team-ifindex
+ - list-port
diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
index f69ee1ebee01..fed821ef9b09 100644
--- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
+++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
@@ -300,6 +300,11 @@ the software port.
in the beginning of the queue. This is a normal condition.
- Informative
+ * - `tx[i]_timestamps`
+ - Transmitted packets that were hardware timestamped at the device's DMA
+ layer.
+ - Informative
+
* - `tx[i]_added_vlan_packets`
- The number of packets sent where vlan tag insertion was offloaded to the
hardware.
@@ -702,6 +707,12 @@ the software port.
the device typically ensures not posting the CQE.
- Error
+ * - `ptp_cq[i]_lost_cqe`
+ - Number of times a CQE is expected to not be delivered on the PTP
+ timestamping CQE by the device due to a time delta elapsing. If such a
+ CQE is somehow delivered, `ptp_cq[i]_late_cqe` is incremented.
+ - Error
+
.. [#ring_global] The corresponding ring and global counters do not share the
same name (i.e. do not follow the common naming scheme).
diff --git a/Documentation/networking/devlink/devlink-eswitch-attr.rst b/Documentation/networking/devlink/devlink-eswitch-attr.rst
new file mode 100644
index 000000000000..08bb39ab1528
--- /dev/null
+++ b/Documentation/networking/devlink/devlink-eswitch-attr.rst
@@ -0,0 +1,76 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==========================
+Devlink E-Switch Attribute
+==========================
+
+Devlink E-Switch supports two modes of operation: legacy and switchdev.
+Legacy mode operates based on traditional MAC/VLAN steering rules. Switching
+decisions are made based on MAC addresses, VLANs, etc. There is limited ability
+to offload switching rules to hardware.
+
+On the other hand, switchdev mode allows for more advanced offloading
+capabilities of the E-Switch to hardware. In switchdev mode, more switching
+rules and logic can be offloaded to the hardware switch ASIC. It enables
+representor netdevices that represent the slow path of virtual functions (VFs)
+or scalable-functions (SFs) of the device. See more information about
+:ref:`Documentation/networking/switchdev.rst <switchdev>` and
+:ref:`Documentation/networking/representors.rst <representors>`.
+
+In addition, the devlink E-Switch also comes with other attributes listed
+in the following section.
+
+Attributes Description
+======================
+
+The following is a list of E-Switch attributes.
+
+.. list-table:: E-Switch attributes
+ :widths: 8 5 45
+
+ * - Name
+ - Type
+ - Description
+ * - ``mode``
+ - enum
+ - The mode of the device. The mode can be one of the following:
+
+ * ``legacy`` operates based on traditional MAC/VLAN steering
+ rules.
+ * ``switchdev`` allows for more advanced offloading capabilities of
+ the E-Switch to hardware.
+ * - ``inline-mode``
+ - enum
+ - Some HWs need the VF driver to put part of the packet
+ headers on the TX descriptor so the e-switch can do proper
+ matching and steering. Support for both switchdev mode and legacy mode.
+
+ * ``none`` none.
+ * ``link`` L2 mode.
+ * ``network`` L3 mode.
+ * ``transport`` L4 mode.
+ * - ``encap-mode``
+ - enum
+ - The encapsulation mode of the device. Support for both switchdev mode
+ and legacy mode. The mode can be one of the following:
+
+ * ``none`` Disable encapsulation support.
+ * ``basic`` Enable encapsulation support.
+
+Example Usage
+=============
+
+.. code:: shell
+
+ # enable switchdev mode
+ $ devlink dev eswitch set pci/0000:08:00.0 mode switchdev
+
+ # set inline-mode and encap-mode
+ $ devlink dev eswitch set pci/0000:08:00.0 inline-mode none encap-mode basic
+
+ # display devlink device eswitch attributes
+ $ devlink dev eswitch show pci/0000:08:00.0
+ pci/0000:08:00.0: mode switchdev inline-mode none encap-mode basic
+
+ # enable encap-mode with legacy mode
+ $ devlink dev eswitch set pci/0000:08:00.0 mode legacy inline-mode none encap-mode basic
diff --git a/Documentation/networking/devlink/devlink-info.rst b/Documentation/networking/devlink/devlink-info.rst
index 1242b0e6826b..23073bc219d8 100644
--- a/Documentation/networking/devlink/devlink-info.rst
+++ b/Documentation/networking/devlink/devlink-info.rst
@@ -146,6 +146,11 @@ board.manufacture
An identifier of the company or the facility which produced the part.
+board.part_number
+-----------------
+
+Part number of the board and its components.
+
fw
--
diff --git a/Documentation/networking/devlink/devlink-port.rst b/Documentation/networking/devlink/devlink-port.rst
index 562f46b41274..9d22d41a7cd1 100644
--- a/Documentation/networking/devlink/devlink-port.rst
+++ b/Documentation/networking/devlink/devlink-port.rst
@@ -134,6 +134,9 @@ Users may also set the IPsec crypto capability of the function using
Users may also set the IPsec packet capability of the function using
`devlink port function set ipsec_packet` command.
+Users may also set the maximum IO event queues of the function
+using `devlink port function set max_io_eqs` command.
+
Function attributes
===================
@@ -295,6 +298,36 @@ policy is processed in software by the kernel.
function:
hw_addr 00:00:00:00:00:00 ipsec_packet enabled
+Maximum IO events queues setup
+------------------------------
+When user sets maximum number of IO event queues for a SF or
+a VF, such function driver is limited to consume only enforced
+number of IO event queues.
+
+IO event queues deliver events related to IO queues, including network
+device transmit and receive queues (txq and rxq) and RDMA Queue Pairs (QPs).
+For example, the number of netdevice channels and RDMA device completion
+vectors are derived from the function's IO event queues. Usually, the number
+of interrupt vectors consumed by the driver is limited by the number of IO
+event queues per device, as each of the IO event queues is connected to an
+interrupt vector.
+
+- Get maximum IO event queues of the VF device::
+
+ $ devlink port show pci/0000:06:00.0/2
+ pci/0000:06:00.0/2: type eth netdev enp6s0pf0vf1 flavour pcivf pfnum 0 vfnum 1
+ function:
+ hw_addr 00:00:00:00:00:00 ipsec_packet disabled max_io_eqs 10
+
+- Set maximum IO event queues of the VF device::
+
+ $ devlink port function set pci/0000:06:00.0/2 max_io_eqs 32
+
+ $ devlink port show pci/0000:06:00.0/2
+ pci/0000:06:00.0/2: type eth netdev enp6s0pf0vf1 flavour pcivf pfnum 0 vfnum 1
+ function:
+ hw_addr 00:00:00:00:00:00 ipsec_packet disabled max_io_eqs 32
+
Subfunction
============
diff --git a/Documentation/networking/devlink/hns3.rst b/Documentation/networking/devlink/hns3.rst
index 4562a6e4782f..72bc1b9f3785 100644
--- a/Documentation/networking/devlink/hns3.rst
+++ b/Documentation/networking/devlink/hns3.rst
@@ -23,3 +23,8 @@ The ``hns3`` driver reports the following versions
* - ``fw``
- running
- Used to represent the firmware version.
+ * - ``fw.scc``
+ - running
+ - Used to represent the Soft Congestion Control (SSC) firmware version.
+ SCC is a firmware component which provides multiple RDMA congestion
+ control algorithms, including DCQCN.
diff --git a/Documentation/networking/devlink/ice.rst b/Documentation/networking/devlink/ice.rst
index 7f30ebd5debb..830c04354222 100644
--- a/Documentation/networking/devlink/ice.rst
+++ b/Documentation/networking/devlink/ice.rst
@@ -21,6 +21,53 @@ Parameters
* - ``enable_iwarp``
- runtime
- mutually exclusive with ``enable_roce``
+ * - ``tx_scheduling_layers``
+ - permanent
+ - The ice hardware uses hierarchical scheduling for Tx with a fixed
+ number of layers in the scheduling tree. Each of them are decision
+ points. Root node represents a port, while all the leaves represent
+ the queues. This way of configuring the Tx scheduler allows features
+ like DCB or devlink-rate (documented below) to configure how much
+ bandwidth is given to any given queue or group of queues, enabling
+ fine-grained control because scheduling parameters can be configured
+ at any given layer of the tree.
+
+ The default 9-layer tree topology was deemed best for most workloads,
+ as it gives an optimal ratio of performance to configurability. However,
+ for some specific cases, this 9-layer topology might not be desired.
+ One example would be sending traffic to queues that are not a multiple
+ of 8. Because the maximum radix is limited to 8 in 9-layer topology,
+ the 9th queue has a different parent than the rest, and it's given
+ more bandwidth credits. This causes a problem when the system is
+ sending traffic to 9 queues:
+
+ | tx_queue_0_packets: 24163396
+ | tx_queue_1_packets: 24164623
+ | tx_queue_2_packets: 24163188
+ | tx_queue_3_packets: 24163701
+ | tx_queue_4_packets: 24163683
+ | tx_queue_5_packets: 24164668
+ | tx_queue_6_packets: 23327200
+ | tx_queue_7_packets: 24163853
+ | tx_queue_8_packets: 91101417 < Too much traffic is sent from 9th
+
+ To address this need, you can switch to a 5-layer topology, which
+ changes the maximum topology radix to 512. With this enhancement,
+ the performance characteristic is equal as all queues can be assigned
+ to the same parent in the tree. The obvious drawback of this solution
+ is a lower configuration depth of the tree.
+
+ Use the ``tx_scheduling_layer`` parameter with the devlink command
+ to change the transmit scheduler topology. To use 5-layer topology,
+ use a value of 5. For example:
+ $ devlink dev param set pci/0000:16:00.0 name tx_scheduling_layers
+ value 5 cmode permanent
+ Use a value of 9 to set it back to the default value.
+
+ You must do PCI slot powercycle for the selected topology to take effect.
+
+ To verify that value has been set:
+ $ devlink dev param show pci/0000:16:00.0 name tx_scheduling_layers
Info versions
=============
diff --git a/Documentation/networking/devlink/index.rst b/Documentation/networking/devlink/index.rst
index e14d7a701b72..948c8c44e233 100644
--- a/Documentation/networking/devlink/index.rst
+++ b/Documentation/networking/devlink/index.rst
@@ -67,6 +67,7 @@ general.
devlink-selftests
devlink-trap
devlink-linecard
+ devlink-eswitch-attr
Driver-specific documentation
-----------------------------
diff --git a/Documentation/networking/devlink/nfp.rst b/Documentation/networking/devlink/nfp.rst
index a1717db0dfcc..3093642bdae4 100644
--- a/Documentation/networking/devlink/nfp.rst
+++ b/Documentation/networking/devlink/nfp.rst
@@ -32,7 +32,7 @@ The ``nfp`` driver reports the following versions
- Description
* - ``board.id``
- fixed
- - Part number identifying the board design
+ - Identifier of the board design
* - ``board.rev``
- fixed
- Revision of the board design
@@ -42,6 +42,9 @@ The ``nfp`` driver reports the following versions
* - ``board.model``
- fixed
- Model name of the board design
+ * - ``board.part_number``
+ - fixed
+ - Part number of the board and its components
* - ``fw.bundle_id``
- stored, running
- Firmware bundle id
diff --git a/Documentation/networking/dns_resolver.rst b/Documentation/networking/dns_resolver.rst
index add4d59a99a5..c0364f7070af 100644
--- a/Documentation/networking/dns_resolver.rst
+++ b/Documentation/networking/dns_resolver.rst
@@ -118,7 +118,7 @@ Keys of dns_resolver type can be read from userspace using keyctl_read() or
Mechanism
=========
-The dnsresolver module registers a key type called "dns_resolver". Keys of
+The dns_resolver module registers a key type called "dns_resolver". Keys of
this type are used to transport and cache DNS lookup results from userspace.
When dns_query() is invoked, it calls request_key() to search the local
@@ -152,4 +152,4 @@ Debugging
Debugging messages can be turned on dynamically by writing a 1 into the
following file::
- /sys/module/dnsresolver/parameters/debug
+ /sys/module/dns_resolver/parameters/debug
diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst
index d583d9abf2f8..160bfb0ae8ba 100644
--- a/Documentation/networking/ethtool-netlink.rst
+++ b/Documentation/networking/ethtool-netlink.rst
@@ -1237,12 +1237,21 @@ Kernel response contents:
``ETHTOOL_A_TSINFO_TX_TYPES`` bitset supported Tx types
``ETHTOOL_A_TSINFO_RX_FILTERS`` bitset supported Rx filters
``ETHTOOL_A_TSINFO_PHC_INDEX`` u32 PTP hw clock index
+ ``ETHTOOL_A_TSINFO_STATS`` nested HW timestamping statistics
===================================== ====== ==========================
``ETHTOOL_A_TSINFO_PHC_INDEX`` is absent if there is no associated PHC (there
is no special value for this case). The bitset attributes are omitted if they
would be empty (no bit set).
+Additional hardware timestamping statistics response contents:
+
+ ===================================== ====== ===================================
+ ``ETHTOOL_A_TS_STAT_TX_PKTS`` uint Packets with Tx HW timestamps
+ ``ETHTOOL_A_TS_STAT_TX_LOST`` uint Tx HW timestamp not arrived count
+ ``ETHTOOL_A_TS_STAT_TX_ERR`` uint HW error request Tx timestamp count
+ ===================================== ====== ===================================
+
CABLE_TEST
==========
@@ -1717,6 +1726,10 @@ Kernel response contents:
PSE functions
``ETHTOOL_A_PODL_PSE_PW_D_STATUS`` u32 power detection status of the
PoDL PSE.
+ ``ETHTOOL_A_C33_PSE_ADMIN_STATE`` u32 Operational state of the PoE
+ PSE functions.
+ ``ETHTOOL_A_C33_PSE_PW_D_STATUS`` u32 power detection status of the
+ PoE PSE.
====================================== ====== =============================
When set, the optional ``ETHTOOL_A_PODL_PSE_ADMIN_STATE`` attribute identifies
@@ -1728,6 +1741,12 @@ aPoDLPSEAdminState. Possible values are:
.. kernel-doc:: include/uapi/linux/ethtool.h
:identifiers: ethtool_podl_pse_admin_state
+The same goes for ``ETHTOOL_A_C33_PSE_ADMIN_STATE`` implementing
+``IEEE 802.3-2022`` 30.9.1.1.2 aPSEAdminState.
+
+.. kernel-doc:: include/uapi/linux/ethtool.h
+ :identifiers: ethtool_c33_pse_admin_state
+
When set, the optional ``ETHTOOL_A_PODL_PSE_PW_D_STATUS`` attribute identifies
the power detection status of the PoDL PSE. The status depend on internal PSE
state machine and automatic PD classification support. This option is
@@ -1737,6 +1756,12 @@ Possible values are:
.. kernel-doc:: include/uapi/linux/ethtool.h
:identifiers: ethtool_podl_pse_pw_d_status
+The same goes for ``ETHTOOL_A_C33_PSE_ADMIN_PW_D_STATUS`` implementing
+``IEEE 802.3-2022`` 30.9.1.1.5 aPSEPowerDetectionStatus.
+
+.. kernel-doc:: include/uapi/linux/ethtool.h
+ :identifiers: ethtool_c33_pse_pw_d_status
+
PSE_SET
=======
@@ -1747,6 +1772,7 @@ Request contents:
====================================== ====== =============================
``ETHTOOL_A_PSE_HEADER`` nested request header
``ETHTOOL_A_PODL_PSE_ADMIN_CONTROL`` u32 Control PoDL PSE Admin state
+ ``ETHTOOL_A_C33_PSE_ADMIN_CONTROL`` u32 Control PSE Admin state
====================================== ====== =============================
When set, the optional ``ETHTOOL_A_PODL_PSE_ADMIN_CONTROL`` attribute is used
@@ -1754,6 +1780,9 @@ to control PoDL PSE Admin functions. This option is implementing
``IEEE 802.3-2018`` 30.15.1.2.1 acPoDLPSEAdminControl. See
``ETHTOOL_A_PODL_PSE_ADMIN_STATE`` for supported values.
+The same goes for ``ETHTOOL_A_C33_PSE_ADMIN_CONTROL`` implementing
+``IEEE 802.3-2022`` 30.9.1.2.1 acPSEAdminControl.
+
RSS_GET
=======
diff --git a/Documentation/networking/filter.rst b/Documentation/networking/filter.rst
index 7d8c5380492f..8eb9a5d40f31 100644
--- a/Documentation/networking/filter.rst
+++ b/Documentation/networking/filter.rst
@@ -513,7 +513,7 @@ JIT compiler
------------
The Linux kernel has a built-in BPF JIT compiler for x86_64, SPARC,
-PowerPC, ARM, ARM64, MIPS, RISC-V and s390 and can be enabled through
+PowerPC, ARM, ARM64, MIPS, RISC-V, s390, and ARC and can be enabled through
CONFIG_BPF_JIT. The JIT compiler is transparently invoked for each
attached filter from user space or for internal kernel users if it has
been previously enabled by root::
@@ -650,7 +650,7 @@ before a conversion to the new layout is being done behind the scenes!
Currently, the classic BPF format is being used for JITing on most
32-bit architectures, whereas x86-64, aarch64, s390x, powerpc64,
-sparc64, arm32, riscv64, riscv32, loongarch64 perform JIT compilation
+sparc64, arm32, riscv64, riscv32, loongarch64, arc perform JIT compilation
from eBPF instruction set.
Testing
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index 473d72c36d61..7664c0bfe461 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -93,6 +93,7 @@ Contents:
plip
ppp_generic
proc_net_tcp
+ pse-pd/index
radiotap-headers
rds
regulatory
diff --git a/Documentation/networking/nf_conntrack-sysctl.rst b/Documentation/networking/nf_conntrack-sysctl.rst
index c383a394c665..238b66d0e059 100644
--- a/Documentation/networking/nf_conntrack-sysctl.rst
+++ b/Documentation/networking/nf_conntrack-sysctl.rst
@@ -222,11 +222,11 @@ nf_flowtable_tcp_timeout - INTEGER (seconds)
Control offload timeout for tcp connections.
TCP connections may be offloaded from nf conntrack to nf flow table.
- Once aged, the connection is returned to nf conntrack with tcp pickup timeout.
+ Once aged, the connection is returned to nf conntrack.
nf_flowtable_udp_timeout - INTEGER (seconds)
default 30
Control offload timeout for udp connections.
UDP connections may be offloaded from nf conntrack to nf flow table.
- Once aged, the connection is returned to nf conntrack with udp pickup timeout.
+ Once aged, the connection is returned to nf conntrack.
diff --git a/Documentation/networking/pse-pd/index.rst b/Documentation/networking/pse-pd/index.rst
new file mode 100644
index 000000000000..de28a5aee316
--- /dev/null
+++ b/Documentation/networking/pse-pd/index.rst
@@ -0,0 +1,10 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Power Sourcing Equipment (PSE) Documentation
+============================================
+
+.. toctree::
+ :maxdepth: 2
+
+ introduction
+ pse-pi
diff --git a/Documentation/networking/pse-pd/introduction.rst b/Documentation/networking/pse-pd/introduction.rst
new file mode 100644
index 000000000000..e3d3faaef717
--- /dev/null
+++ b/Documentation/networking/pse-pd/introduction.rst
@@ -0,0 +1,73 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Power Sourcing Equipment (PSE) in IEEE 802.3 Standard
+=====================================================
+
+Overview
+--------
+
+Power Sourcing Equipment (PSE) is essential in networks for delivering power
+along with data over Ethernet cables. It usually refers to devices like
+switches and hubs that supply power to Powered Devices (PDs) such as IP
+cameras, VoIP phones, and wireless access points.
+
+PSE vs. PoDL PSE
+----------------
+
+PSE in the IEEE 802.3 standard generally refers to equipment that provides
+power alongside data over Ethernet cables, typically associated with Power over
+Ethernet (PoE).
+
+PoDL PSE, or Power over Data Lines PSE, specifically denotes PSEs operating
+with single balanced twisted-pair PHYs, as per Clause 104 of IEEE 802.3. PoDL
+is significant in contexts like automotive and industrial controls where power
+and data delivery over a single pair is advantageous.
+
+IEEE 802.3-2018 Addendums and Related Clauses
+---------------------------------------------
+
+Key addenda to the IEEE 802.3-2018 standard relevant to power delivery over
+Ethernet are as follows:
+
+- **802.3af (Approved in 2003-06-12)**: Known as PoE in the market, detailed in
+ Clause 33, delivering up to 15.4W of power.
+- **802.3at (Approved in 2009-09-11)**: Marketed as PoE+, enhancing PoE as
+ covered in Clause 33, increasing power delivery to up to 30W.
+- **802.3bt (Approved in 2018-09-27)**: Known as 4PPoE in the market, outlined
+ in Clause 33. Type 3 delivers up to 60W, and Type 4 up to 100W.
+- **802.3bu (Approved in 2016-12-07)**: Formerly referred to as PoDL, detailed
+ in Clause 104. Introduces Classes 0 - 9. Class 9 PoDL PSE delivers up to ~65W
+
+Kernel Naming Convention Recommendations
+----------------------------------------
+
+For clarity and consistency within the Linux kernel's networking subsystem, the
+following naming conventions are recommended:
+
+- For general PSE (PoE) code, use "c33_pse" key words. For example:
+ ``enum ethtool_c33_pse_admin_state c33_admin_control;``.
+ This aligns with Clause 33, encompassing various PoE forms.
+
+- For PoDL PSE - specific code, use "podl_pse". For example:
+ ``enum ethtool_podl_pse_admin_state podl_admin_control;`` to differentiate
+ PoDL PSE settings according to Clause 104.
+
+Summary of Clause 33: Data Terminal Equipment (DTE) Power via Media Dependent Interface (MDI)
+---------------------------------------------------------------------------------------------
+
+Clause 33 of the IEEE 802.3 standard defines the functional and electrical
+characteristics of Powered Device (PD) and Power Sourcing Equipment (PSE).
+These entities enable power delivery using the same generic cabling as for data
+transmission, integrating power with data communication for devices such as
+10BASE-T, 100BASE-TX, or 1000BASE-T.
+
+Summary of Clause 104: Power over Data Lines (PoDL) of Single Balanced Twisted-Pair Ethernet
+--------------------------------------------------------------------------------------------
+
+Clause 104 of the IEEE 802.3 standard delineates the functional and electrical
+characteristics of PoDL Powered Devices (PDs) and PoDL Power Sourcing Equipment
+(PSEs). These are designed for use with single balanced twisted-pair Ethernet
+Physical Layers. In this clause, 'PSE' refers specifically to PoDL PSE, and
+'PD' to PoDL PD. The key intent is to provide devices with a unified interface
+for both data and the power required to process this data over a single
+balanced twisted-pair Ethernet connection.
diff --git a/Documentation/networking/pse-pd/pse-pi.rst b/Documentation/networking/pse-pd/pse-pi.rst
new file mode 100644
index 000000000000..5cad14fedc13
--- /dev/null
+++ b/Documentation/networking/pse-pd/pse-pi.rst
@@ -0,0 +1,301 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+PSE Power Interface (PSE PI) Documentation
+==========================================
+
+The Power Sourcing Equipment Power Interface (PSE PI) plays a pivotal role in
+the architecture of Power over Ethernet (PoE) systems. It is essentially a
+blueprint that outlines how one or multiple power sources are connected to the
+eight-pin modular jack, commonly known as the Ethernet RJ45 port. This
+connection scheme is crucial for enabling the delivery of power alongside data
+over Ethernet cables.
+
+Documentation and Standards
+---------------------------
+
+The IEEE 802.3 standard provides detailed documentation on the PSE PI.
+Specifically:
+
+- Section "33.2.3 PI pin assignments" covers the pin assignments for PoE
+ systems that utilize two pairs for power delivery.
+- Section "145.2.4 PSE PI" addresses the configuration for PoE systems that
+ deliver power over all four pairs of an Ethernet cable.
+
+PSE PI and Single Pair Ethernet
+-------------------------------
+
+Single Pair Ethernet (SPE) represents a different approach to Ethernet
+connectivity, utilizing just one pair of conductors for both data and power
+transmission. Unlike the configurations detailed in the PSE PI for standard
+Ethernet, which can involve multiple power sourcing arrangements across four or
+two pairs of wires, SPE operates on a simpler model due to its single-pair
+design. As a result, the complexities of choosing between alternative pin
+assignments for power delivery, as described in the PSE PI for multi-pair
+Ethernet, are not applicable to SPE.
+
+Understanding PSE PI
+--------------------
+
+The Power Sourcing Equipment Power Interface (PSE PI) is a framework defining
+how Power Sourcing Equipment (PSE) delivers power to Powered Devices (PDs) over
+Ethernet cables. It details two main configurations for power delivery, known
+as Alternative A and Alternative B, which are distinguished not only by their
+method of power transmission but also by the implications for polarity and data
+transmission direction.
+
+Alternative A and B Overview
+----------------------------
+
+- **Alternative A:** Utilizes RJ45 conductors 1, 2, 3 and 6. In either case of
+ networks 10/100BaseT or 1G/2G/5G/10GBaseT, the pairs used are carrying data.
+ The power delivery's polarity in this alternative can vary based on the MDI
+ (Medium Dependent Interface) or MDI-X (Medium Dependent Interface Crossover)
+ configuration.
+
+- **Alternative B:** Utilizes RJ45 conductors 4, 5, 7 and 8. In case of
+ 10/100BaseT network the pairs used are spare pairs without data and are less
+ influenced by data transmission direction. This is not the case for
+ 1G/2G/5G/10GBaseT network. Alternative B includes two configurations with
+ different polarities, known as variant X and variant S, to accommodate
+ different network requirements and device specifications.
+
+Table 145-3 PSE Pinout Alternatives
+-----------------------------------
+
+The following table outlines the pin configurations for both Alternative A and
+Alternative B.
+
++------------+-------------------+-----------------+-----------------+-----------------+
+| Conductor | Alternative A | Alternative A | Alternative B | Alternative B |
+| | (MDI-X) | (MDI) | (X) | (S) |
++============+===================+=================+=================+=================+
+| 1 | Negative V | Positive V | - | - |
++------------+-------------------+-----------------+-----------------+-----------------+
+| 2 | Negative V | Positive V | - | - |
++------------+-------------------+-----------------+-----------------+-----------------+
+| 3 | Positive V | Negative V | - | - |
++------------+-------------------+-----------------+-----------------+-----------------+
+| 4 | - | - | Negative V | Positive V |
++------------+-------------------+-----------------+-----------------+-----------------+
+| 5 | - | - | Negative V | Positive V |
++------------+-------------------+-----------------+-----------------+-----------------+
+| 6 | Positive V | Negative V | - | - |
++------------+-------------------+-----------------+-----------------+-----------------+
+| 7 | - | - | Positive V | Negative V |
++------------+-------------------+-----------------+-----------------+-----------------+
+| 8 | - | - | Positive V | Negative V |
++------------+-------------------+-----------------+-----------------+-----------------+
+
+.. note::
+ - "Positive V" and "Negative V" indicate the voltage polarity for each pin.
+ - "-" indicates that the pin is not used for power delivery in that
+ specific configuration.
+
+PSE PI compatibilities
+----------------------
+
+The following table outlines the compatibility between the pinout alternative
+and the 1000/2.5G/5G/10GBaseT in the PSE 2 pairs connection.
+
++---------+---------------+---------------------+-----------------------+
+| Variant | Alternative | Power Feeding Type | Compatibility with |
+| | (A/B) | (Direct/Phantom) | 1000/2.5G/5G/10GBaseT |
++=========+===============+=====================+=======================+
+| 1 | A | Phantom | Yes |
++---------+---------------+---------------------+-----------------------+
+| 2 | B | Phantom | Yes |
++---------+---------------+---------------------+-----------------------+
+| 3 | B | Direct | No |
++---------+---------------+---------------------+-----------------------+
+
+.. note::
+ - "Direct" indicate a variant where the power is injected directly to pairs
+ without using magnetics in case of spare pairs.
+ - "Phantom" indicate power path over coils/magnetics as it is done for
+ Alternative A variant.
+
+In case of PSE 4 pairs, a PSE supporting only 10/100BaseT (which mean Direct
+Power on pinout Alternative B) is not compatible with a 4 pairs
+1000/2.5G/5G/10GBaseT.
+
+PSE Power Interface (PSE PI) Connection Diagram
+-----------------------------------------------
+
+The diagram below illustrates the connection architecture between the RJ45
+port, the Ethernet PHY (Physical Layer), and the PSE PI (Power Sourcing
+Equipment Power Interface), demonstrating how power and data are delivered
+simultaneously through an Ethernet cable. The RJ45 port serves as the physical
+interface for these connections, with each of its eight pins connected to both
+the Ethernet PHY for data transmission and the PSE PI for power delivery.
+
+.. code-block::
+
+ +--------------------------+
+ | |
+ | RJ45 Port |
+ | |
+ +--+--+--+--+--+--+--+--+--+ +-------------+
+ 1| 2| 3| 4| 5| 6| 7| 8| | |
+ | | | | | | | o-------------------+ |
+ | | | | | | o--|-------------------+ +<--- PSE 1
+ | | | | | o--|--|-------------------+ |
+ | | | | o--|--|--|-------------------+ |
+ | | | o--|--|--|--|-------------------+ PSE PI |
+ | | o--|--|--|--|--|-------------------+ |
+ | o--|--|--|--|--|--|-------------------+ +<--- PSE 2 (optional)
+ o--|--|--|--|--|--|--|-------------------+ |
+ | | | | | | | | | |
+ +--+--+--+--+--+--+--+--+--+ +-------------+
+ | |
+ | Ethernet PHY |
+ | |
+ +--------------------------+
+
+Simple PSE PI Configuration for Alternative A
+---------------------------------------------
+
+The diagram below illustrates a straightforward PSE PI (Power Sourcing
+Equipment Power Interface) configuration designed to support the Alternative A
+setup for Power over Ethernet (PoE). This implementation is tailored to provide
+power delivery through the data-carrying pairs of an Ethernet cable, suitable
+for either MDI or MDI-X configurations, albeit supporting one variation at a
+time.
+
+.. code-block::
+
+ +-------------+
+ | PSE PI |
+ 8 -----+ +-------------+
+ 7 -----+ Rail 1 |
+ 6 -----+------+----------------------+
+ 5 -----+ | |
+ 4 -----+ | Rail 2 | PSE 1
+ 3 -----+------/ +------------+
+ 2 -----+--+-------------/ |
+ 1 -----+--/ +-------------+
+ |
+ +-------------+
+
+In this configuration:
+
+- Pins 1 and 2, as well as pins 3 and 6, are utilized for power delivery in
+ addition to data transmission. This aligns with the standard wiring for
+ 10/100BaseT Ethernet networks where these pairs are used for data.
+- Rail 1 and Rail 2 represent the positive and negative voltage rails, with
+ Rail 1 connected to pins 1 and 2, and Rail 2 connected to pins 3 and 6.
+ More advanced PSE PI configurations may include integrated or external
+ switches to change the polarity of the voltage rails, allowing for
+ compatibility with both MDI and MDI-X configurations.
+
+More complex PSE PI configurations may include additional components, to support
+Alternative B, or to provide additional features such as power management, or
+additional power delivery capabilities such as 2-pair or 4-pair power delivery.
+
+.. code-block::
+
+ +-------------+
+ | PSE PI |
+ | +---+
+ 8 -----+--------+ | +-------------+
+ 7 -----+--------+ | Rail 1 |
+ 6 -----+--------+ +-----------------+
+ 5 -----+--------+ | |
+ 4 -----+--------+ | Rail 2 | PSE 1
+ 3 -----+--------+ +----------------+
+ 2 -----+--------+ | |
+ 1 -----+--------+ | +-------------+
+ | +---+
+ +-------------+
+
+Device Tree Configuration: Describing PSE PI Configurations
+-----------------------------------------------------------
+
+The necessity for a separate PSE PI node in the device tree is influenced by
+the intricacy of the Power over Ethernet (PoE) system's setup. Here are
+descriptions of both simple and complex PSE PI configurations to illustrate
+this decision-making process:
+
+**Simple PSE PI Configuration:**
+In a straightforward scenario, the PSE PI setup involves a direct, one-to-one
+connection between a single PSE controller and an Ethernet port. This setup
+typically supports basic PoE functionality without the need for dynamic
+configuration or management of multiple power delivery modes. For such simple
+configurations, detailing the PSE PI within the existing PSE controller's node
+may suffice, as the system does not encompass additional complexity that
+warrants a separate node. The primary focus here is on the clear and direct
+association of power delivery to a specific Ethernet port.
+
+**Complex PSE PI Configuration:**
+Contrastingly, a complex PSE PI setup may encompass multiple PSE controllers or
+auxiliary circuits that collectively manage power delivery to one Ethernet
+port. Such configurations might support a range of PoE standards and require
+the capability to dynamically configure power delivery based on the operational
+mode (e.g., PoE2 versus PoE4) or specific requirements of connected devices. In
+these instances, a dedicated PSE PI node becomes essential for accurately
+documenting the system architecture. This node would serve to detail the
+interactions between different PSE controllers, the support for various PoE
+modes, and any additional logic required to coordinate power delivery across
+the network infrastructure.
+
+**Guidance:**
+
+For simple PSE setups, including PSE PI information in the PSE controller node
+might suffice due to the straightforward nature of these systems. However,
+complex configurations, involving multiple components or advanced PoE features,
+benefit from a dedicated PSE PI node. This method adheres to IEEE 802.3
+specifications, improving documentation clarity and ensuring accurate
+representation of the PoE system's complexity.
+
+PSE PI Node: Essential Information
+----------------------------------
+
+The PSE PI (Power Sourcing Equipment Power Interface) node in a device tree can
+include several key pieces of information critical for defining the power
+delivery capabilities and configurations of a PoE (Power over Ethernet) system.
+Below is a list of such information, along with explanations for their
+necessity and reasons why they might not be found within a PSE controller node:
+
+1. **Powered Pairs Configuration**
+
+ - *Description:* Identifies the pairs used for power delivery in the
+ Ethernet cable.
+ - *Necessity:* Essential to ensure the correct pairs are powered according
+ to the board's design.
+ - *PSE Controller Node:* Typically lacks details on physical pair usage,
+ focusing on power regulation.
+
+2. **Polarity of Powered Pairs**
+
+ - *Description:* Specifies the polarity (positive or negative) for each
+ powered pair.
+ - *Necessity:* Critical for safe and effective power transmission to PDs.
+ - *PSE Controller Node:* Polarity management may exceed the standard
+ functionalities of PSE controllers.
+
+3. **PSE Cells Association**
+
+ - *Description:* Details the association of PSE cells with Ethernet ports or
+ pairs in multi-cell configurations.
+ - *Necessity:* Allows for optimized power resource allocation in complex
+ systems.
+ - *PSE Controller Node:* Controllers may not manage cell associations
+ directly, focusing instead on power flow regulation.
+
+4. **Support for PoE Standards**
+
+ - *Description:* Lists the PoE standards and configurations supported by the
+ system.
+ - *Necessity:* Ensures system compatibility with various PDs and adherence
+ to industry standards.
+ - *PSE Controller Node:* Specific capabilities may depend on the overall PSE
+ PI design rather than the controller alone. Multiple PSE cells per PI
+ do not necessarily imply support for multiple PoE standards.
+
+5. **Protection Mechanisms**
+
+ - *Description:* Outlines additional protection mechanisms, such as
+ overcurrent protection and thermal management.
+ - *Necessity:* Provides extra safety and stability, complementing PSE
+ controller protections.
+ - *PSE Controller Node:* Some protections may be implemented via
+ board-specific hardware or algorithms external to the controller.
diff --git a/Documentation/networking/representors.rst b/Documentation/networking/representors.rst
index decb39c19b9e..5e23386f6968 100644
--- a/Documentation/networking/representors.rst
+++ b/Documentation/networking/representors.rst
@@ -1,4 +1,5 @@
.. SPDX-License-Identifier: GPL-2.0
+.. _representors:
=============================
Network Function Representors
diff --git a/Documentation/networking/xfrm_proc.rst b/Documentation/networking/xfrm_proc.rst
index 0a771c5a7399..973d1571acac 100644
--- a/Documentation/networking/xfrm_proc.rst
+++ b/Documentation/networking/xfrm_proc.rst
@@ -73,6 +73,9 @@ XfrmAcquireError:
XfrmFwdHdrError:
Forward routing of a packet is not allowed
+XfrmInStateDirError:
+ State direction mismatch (lookup found an output state on the input path, expected input or no direction)
+
Outbound errors
~~~~~~~~~~~~~~~
XfrmOutError:
@@ -111,3 +114,6 @@ XfrmOutPolError:
XfrmOutStateInvalid:
State is invalid, perhaps expired
+
+XfrmOutStateDirError:
+ State direction mismatch (lookup found an input state on the output path, expected output or no direction)
diff --git a/Documentation/power/pci.rst b/Documentation/power/pci.rst
index 12070320307e..e2c1fb8a569a 100644
--- a/Documentation/power/pci.rst
+++ b/Documentation/power/pci.rst
@@ -333,7 +333,7 @@ struct pci_dev.
The PCI subsystem's first task related to device power management is to
prepare the device for power management and initialize the fields of struct
pci_dev used for this purpose. This happens in two functions defined in
-drivers/pci/pci.c, pci_pm_init() and platform_pci_wakeup_init().
+drivers/pci/, pci_pm_init() and pci_acpi_setup().
The first of these functions checks if the device supports native PCI PM
and if that's the case the offset of its power management capability structure
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index 7ef8de58f7f8..5685d7bfe4d0 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -31,7 +31,7 @@ you probably needn't concern yourself with pcmciautils.
====================== =============== ========================================
GNU C 5.1 gcc --version
Clang/LLVM (optional) 13.0.1 clang --version
-Rust (optional) 1.76.0 rustc --version
+Rust (optional) 1.78.0 rustc --version
bindgen (optional) 0.65.1 bindgen --version
GNU make 3.82 make --version
bash 4.2 bash --version
@@ -62,6 +62,7 @@ Sphinx\ [#f1]_ 2.4.4 sphinx-build --version
cpio any cpio --version
GNU tar 1.28 tar --version
gtags (optional) 6.6.5 gtags --version
+mkimage (optional) 2017.01 mkimage --version
====================== =============== ========================================
.. [#f1] Sphinx is needed only to build the Kernel documentation
@@ -189,6 +190,14 @@ The kernel build requires GNU GLOBAL version 6.6.5 or later to generate
tag files through ``make gtags``. This is due to its use of the gtags
``-C (--directory)`` flag.
+mkimage
+-------
+
+This tool is used when building a Flat Image Tree (FIT), commonly used on ARM
+platforms. The tool is available via the ``u-boot-tools`` package or can be
+built from the U-Boot source code. See the instructions at
+https://docs.u-boot.org/en/latest/build/tools.html#building-tools-for-linux
+
System utilities
****************
diff --git a/Documentation/process/coding-style.rst b/Documentation/process/coding-style.rst
index 9c7cf7347394..7e768c65aa92 100644
--- a/Documentation/process/coding-style.rst
+++ b/Documentation/process/coding-style.rst
@@ -827,6 +827,29 @@ Macros with multiple statements should be enclosed in a do - while block:
do_this(b, c); \
} while (0)
+Function-like macros with unused parameters should be replaced by static
+inline functions to avoid the issue of unused variables:
+
+.. code-block:: c
+
+ static inline void fun(struct foo *foo)
+ {
+ }
+
+Due to historical practices, many files still employ the "cast to (void)"
+approach to evaluate parameters. However, this method is not advisable.
+Inline functions address the issue of "expression with side effects
+evaluated more than once", circumvent unused-variable problems, and
+are generally better documented than macros for some reason.
+
+.. code-block:: c
+
+ /*
+ * Avoid doing this whenever possible and instead opt for static
+ * inline functions
+ */
+ #define macrofun(foo) do { (void) (foo); } while (0)
+
Things to avoid when using macros:
1) macros that affect control flow:
diff --git a/Documentation/process/embargoed-hardware-issues.rst b/Documentation/process/embargoed-hardware-issues.rst
index bb2100228cc7..6e9a4597bf2c 100644
--- a/Documentation/process/embargoed-hardware-issues.rst
+++ b/Documentation/process/embargoed-hardware-issues.rst
@@ -252,7 +252,7 @@ an involved disclosed party. The current ambassadors list:
AMD Tom Lendacky <thomas.lendacky@amd.com>
Ampere Darren Hart <darren@os.amperecomputing.com>
ARM Catalin Marinas <catalin.marinas@arm.com>
- IBM Power Anton Blanchard <anton@linux.ibm.com>
+ IBM Power Michael Ellerman <ellerman@au.ibm.com>
IBM Z Christian Borntraeger <borntraeger@de.ibm.com>
Intel Tony Luck <tony.luck@intel.com>
Qualcomm Trilok Soni <quic_tsoni@quicinc.com>
diff --git a/Documentation/process/handling-regressions.rst b/Documentation/process/handling-regressions.rst
index ce6753a674f3..49ba1410cfce 100644
--- a/Documentation/process/handling-regressions.rst
+++ b/Documentation/process/handling-regressions.rst
@@ -284,7 +284,7 @@ What else is there to known about regressions?
Check out Documentation/admin-guide/reporting-regressions.rst, it covers a lot
of other aspects you want might want to be aware of:
- * the purpose of the "no regressions rule"
+ * the purpose of the "no regressions" rule
* what issues actually qualify as regression
diff --git a/Documentation/process/maintainer-tip.rst b/Documentation/process/maintainer-tip.rst
index 497bb39727c8..64739968afa6 100644
--- a/Documentation/process/maintainer-tip.rst
+++ b/Documentation/process/maintainer-tip.rst
@@ -409,20 +409,20 @@ See :ref:`resend_reminders`.
Merge window
^^^^^^^^^^^^
-Please do not expect large patch series to be handled during the merge
-window or even during the week before. Such patches should be submitted in
-mergeable state *at* *least* a week before the merge window opens.
-Exceptions are made for bug fixes and *sometimes* for small standalone
-drivers for new hardware or minimally invasive patches for hardware
-enablement.
+Please do not expect patches to be reviewed or merged by tip
+maintainers around or during the merge window. The trees are closed
+to all but urgent fixes during this time. They reopen once the merge
+window closes and a new -rc1 kernel has been released.
+
+Large series should be submitted in mergeable state *at* *least* a week
+before the merge window opens. Exceptions are made for bug fixes and
+*sometimes* for small standalone drivers for new hardware or minimally
+invasive patches for hardware enablement.
During the merge window, the maintainers instead focus on following the
upstream changes, fixing merge window fallout, collecting bug fixes, and
allowing themselves a breath. Please respect that.
-The release candidate -rc1 is the starting point for new patches to be
-applied which are targeted for the next merge window.
-
So called _urgent_ branches will be merged into mainline during the
stabilization phase of each release.
diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst
index 1704f1c686d0..edf90bbe30f4 100644
--- a/Documentation/process/stable-kernel-rules.rst
+++ b/Documentation/process/stable-kernel-rules.rst
@@ -6,29 +6,29 @@ Everything you ever wanted to know about Linux -stable releases
Rules on what kind of patches are accepted, and which ones are not, into the
"-stable" tree:
- - It or an equivalent fix must already exist in Linus' tree (upstream).
- - It must be obviously correct and tested.
- - It cannot be bigger than 100 lines, with context.
- - It must follow the
- :ref:`Documentation/process/submitting-patches.rst <submittingpatches>`
- rules.
- - It must either fix a real bug that bothers people or just add a device ID.
- To elaborate on the former:
-
- - It fixes a problem like an oops, a hang, data corruption, a real security
- issue, a hardware quirk, a build error (but not for things marked
- CONFIG_BROKEN), or some "oh, that's not good" issue.
- - Serious issues as reported by a user of a distribution kernel may also
- be considered if they fix a notable performance or interactivity issue.
- As these fixes are not as obvious and have a higher risk of a subtle
- regression they should only be submitted by a distribution kernel
- maintainer and include an addendum linking to a bugzilla entry if it
- exists and additional information on the user-visible impact.
- - No "This could be a problem..." type of things like a "theoretical race
- condition", unless an explanation of how the bug can be exploited is also
- provided.
- - No "trivial" fixes without benefit for users (spelling changes, whitespace
- cleanups, etc).
+- It or an equivalent fix must already exist in Linux mainline (upstream).
+- It must be obviously correct and tested.
+- It cannot be bigger than 100 lines, with context.
+- It must follow the
+ :ref:`Documentation/process/submitting-patches.rst <submittingpatches>`
+ rules.
+- It must either fix a real bug that bothers people or just add a device ID.
+ To elaborate on the former:
+
+ - It fixes a problem like an oops, a hang, data corruption, a real security
+ issue, a hardware quirk, a build error (but not for things marked
+ CONFIG_BROKEN), or some "oh, that's not good" issue.
+ - Serious issues as reported by a user of a distribution kernel may also
+ be considered if they fix a notable performance or interactivity issue.
+ As these fixes are not as obvious and have a higher risk of a subtle
+ regression they should only be submitted by a distribution kernel
+ maintainer and include an addendum linking to a bugzilla entry if it
+ exists and additional information on the user-visible impact.
+ - No "This could be a problem..." type of things like a "theoretical race
+ condition", unless an explanation of how the bug can be exploited is also
+ provided.
+ - No "trivial" fixes without benefit for users (spelling changes, whitespace
+ cleanups, etc).
Procedure for submitting patches to the -stable tree
@@ -42,11 +42,11 @@ Procedure for submitting patches to the -stable tree
There are three options to submit a change to -stable trees:
- 1. Add a 'stable tag' to the description of a patch you then submit for
- mainline inclusion.
- 2. Ask the stable team to pick up a patch already mainlined.
- 3. Submit a patch to the stable team that is equivalent to a change already
- mainlined.
+1. Add a 'stable tag' to the description of a patch you then submit for
+ mainline inclusion.
+2. Ask the stable team to pick up a patch already mainlined.
+3. Submit a patch to the stable team that is equivalent to a change already
+ mainlined.
The sections below describe each of the options in more detail.
@@ -68,82 +68,72 @@ Option 1
********
To have a patch you submit for mainline inclusion later automatically picked up
-for stable trees, add the tag
+for stable trees, add this tag in the sign-off area::
-.. code-block:: none
+ Cc: stable@vger.kernel.org
- Cc: stable@vger.kernel.org
+Use ``Cc: stable@kernel.org`` instead when fixing unpublished vulnerabilities:
+it reduces the chance of accidentally exposing the fix to the public by way of
+'git send-email', as mails sent to that address are not delivered anywhere.
-in the sign-off area. Once the patch is mainlined it will be applied to the
-stable tree without anything else needing to be done by the author or
-subsystem maintainer.
+Once the patch is mainlined it will be applied to the stable tree without
+anything else needing to be done by the author or subsystem maintainer.
-To sent additional instructions to the stable team, use a shell-style inline
-comment:
+To send additional instructions to the stable team, use a shell-style inline
+comment to pass arbitrary or predefined notes:
- * To specify any additional patch prerequisites for cherry picking use the
- following format in the sign-off area:
+* Specify any additional patch prerequisites for cherry picking::
- .. code-block:: none
+ Cc: <stable@vger.kernel.org> # 3.3.x: a1f84a3: sched: Check for idle
+ Cc: <stable@vger.kernel.org> # 3.3.x: 1b9508f: sched: Rate-limit newidle
+ Cc: <stable@vger.kernel.org> # 3.3.x: fd21073: sched: Fix affinity logic
+ Cc: <stable@vger.kernel.org> # 3.3.x
+ Signed-off-by: Ingo Molnar <mingo@elte.hu>
- Cc: <stable@vger.kernel.org> # 3.3.x: a1f84a3: sched: Check for idle
- Cc: <stable@vger.kernel.org> # 3.3.x: 1b9508f: sched: Rate-limit newidle
- Cc: <stable@vger.kernel.org> # 3.3.x: fd21073: sched: Fix affinity logic
- Cc: <stable@vger.kernel.org> # 3.3.x
- Signed-off-by: Ingo Molnar <mingo@elte.hu>
+ The tag sequence has the meaning of::
- The tag sequence has the meaning of:
+ git cherry-pick a1f84a3
+ git cherry-pick 1b9508f
+ git cherry-pick fd21073
+ git cherry-pick <this commit>
- .. code-block:: none
+ Note that for a patch series, you do not have to list as prerequisites the
+ patches present in the series itself. For example, if you have the following
+ patch series::
- git cherry-pick a1f84a3
- git cherry-pick 1b9508f
- git cherry-pick fd21073
- git cherry-pick <this commit>
+ patch1
+ patch2
- Note that for a patch series, you do not have to list as prerequisites the
- patches present in the series itself. For example, if you have the following
- patch series:
+ where patch2 depends on patch1, you do not have to list patch1 as
+ prerequisite of patch2 if you have already marked patch1 for stable
+ inclusion.
- .. code-block:: none
+* Point out kernel version prerequisites::
- patch1
- patch2
+ Cc: <stable@vger.kernel.org> # 3.3.x
- where patch2 depends on patch1, you do not have to list patch1 as
- prerequisite of patch2 if you have already marked patch1 for stable
- inclusion.
+ The tag has the meaning of::
- * For patches that may have kernel version prerequisites specify them using
- the following format in the sign-off area:
+ git cherry-pick <this commit>
- .. code-block:: none
+ For each "-stable" tree starting with the specified version.
- Cc: <stable@vger.kernel.org> # 3.3.x
+ Note, such tagging is unnecessary if the stable team can derive the
+ appropriate versions from Fixes: tags.
- The tag has the meaning of:
+* Delay pick up of patches::
- .. code-block:: none
+ Cc: <stable@vger.kernel.org> # after -rc3
- git cherry-pick <this commit>
+* Point out known problems::
- For each "-stable" tree starting with the specified version.
+ Cc: <stable@vger.kernel.org> # see patch description, needs adjustments for <= 6.3
- Note, such tagging is unnecessary if the stable team can derive the
- appropriate versions from Fixes: tags.
+There furthermore is a variant of the stable tag you can use to make the stable
+team's backporting tools (e.g AUTOSEL or scripts that look for commits
+containing a 'Fixes:' tag) ignore a change::
- * To delay pick up of patches, use the following format:
-
- .. code-block:: none
-
- Cc: <stable@vger.kernel.org> # after 4 weeks in mainline
-
- * For any other requests, just add a note to the stable tag. This for example
- can be used to point out known problems:
-
- .. code-block:: none
-
- Cc: <stable@vger.kernel.org> # see patch description, needs adjustments for <= 6.3
+ Cc: <stable+noautosel@kernel.org> # reason goes here, and must be present
.. _option_2:
@@ -163,17 +153,13 @@ Option 3
Send the patch, after verifying that it follows the above rules, to
stable@vger.kernel.org and mention the kernel versions you wish it to be applied
to. When doing so, you must note the upstream commit ID in the changelog of your
-submission with a separate line above the commit text, like this:
-
-.. code-block:: none
-
- commit <sha1> upstream.
+submission with a separate line above the commit text, like this::
-or alternatively:
+ commit <sha1> upstream.
-.. code-block:: none
+Or alternatively::
- [ Upstream commit <sha1> ]
+ [ Upstream commit <sha1> ]
If the submitted patch deviates from the original upstream patch (for example
because it had to be adjusted for the older API), this must be very clearly
@@ -194,55 +180,55 @@ developers and by the relevant subsystem maintainer.
Review cycle
------------
- - When the -stable maintainers decide for a review cycle, the patches will be
- sent to the review committee, and the maintainer of the affected area of
- the patch (unless the submitter is the maintainer of the area) and CC: to
- the linux-kernel mailing list.
- - The review committee has 48 hours in which to ACK or NAK the patch.
- - If the patch is rejected by a member of the committee, or linux-kernel
- members object to the patch, bringing up issues that the maintainers and
- members did not realize, the patch will be dropped from the queue.
- - The ACKed patches will be posted again as part of release candidate (-rc)
- to be tested by developers and testers.
- - Usually only one -rc release is made, however if there are any outstanding
- issues, some patches may be modified or dropped or additional patches may
- be queued. Additional -rc releases are then released and tested until no
- issues are found.
- - Responding to the -rc releases can be done on the mailing list by sending
- a "Tested-by:" email with any testing information desired. The "Tested-by:"
- tags will be collected and added to the release commit.
- - At the end of the review cycle, the new -stable release will be released
- containing all the queued and tested patches.
- - Security patches will be accepted into the -stable tree directly from the
- security kernel team, and not go through the normal review cycle.
- Contact the kernel security team for more details on this procedure.
+- When the -stable maintainers decide for a review cycle, the patches will be
+ sent to the review committee, and the maintainer of the affected area of
+ the patch (unless the submitter is the maintainer of the area) and CC: to
+ the linux-kernel mailing list.
+- The review committee has 48 hours in which to ACK or NAK the patch.
+- If the patch is rejected by a member of the committee, or linux-kernel
+ members object to the patch, bringing up issues that the maintainers and
+ members did not realize, the patch will be dropped from the queue.
+- The ACKed patches will be posted again as part of release candidate (-rc)
+ to be tested by developers and testers.
+- Usually only one -rc release is made, however if there are any outstanding
+ issues, some patches may be modified or dropped or additional patches may
+ be queued. Additional -rc releases are then released and tested until no
+ issues are found.
+- Responding to the -rc releases can be done on the mailing list by sending
+ a "Tested-by:" email with any testing information desired. The "Tested-by:"
+ tags will be collected and added to the release commit.
+- At the end of the review cycle, the new -stable release will be released
+ containing all the queued and tested patches.
+- Security patches will be accepted into the -stable tree directly from the
+ security kernel team, and not go through the normal review cycle.
+ Contact the kernel security team for more details on this procedure.
Trees
-----
- - The queues of patches, for both completed versions and in progress
- versions can be found at:
+- The queues of patches, for both completed versions and in progress
+ versions can be found at:
- https://git.kernel.org/pub/scm/linux/kernel/git/stable/stable-queue.git
+ https://git.kernel.org/pub/scm/linux/kernel/git/stable/stable-queue.git
- - The finalized and tagged releases of all stable kernels can be found
- in separate branches per version at:
+- The finalized and tagged releases of all stable kernels can be found
+ in separate branches per version at:
- https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
+ https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
- - The release candidate of all stable kernel versions can be found at:
+- The release candidate of all stable kernel versions can be found at:
- https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable-rc.git/
+ https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable-rc.git/
- .. warning::
- The -stable-rc tree is a snapshot in time of the stable-queue tree and
- will change frequently, hence will be rebased often. It should only be
- used for testing purposes (e.g. to be consumed by CI systems).
+ .. warning::
+ The -stable-rc tree is a snapshot in time of the stable-queue tree and
+ will change frequently, hence will be rebased often. It should only be
+ used for testing purposes (e.g. to be consumed by CI systems).
Review committee
----------------
- - This is made up of a number of kernel developers who have volunteered for
- this task, and a few that haven't.
+- This is made up of a number of kernel developers who have volunteered for
+ this task, and a few that haven't.
diff --git a/Documentation/rust/arch-support.rst b/Documentation/rust/arch-support.rst
index 5c4fa9f5d1cd..b13e19d84744 100644
--- a/Documentation/rust/arch-support.rst
+++ b/Documentation/rust/arch-support.rst
@@ -16,7 +16,8 @@ support corresponds to ``S`` values in the ``MAINTAINERS`` file.
Architecture Level of support Constraints
============= ================ ==============================================
``arm64`` Maintained Little Endian only.
-``loongarch`` Maintained -
+``loongarch`` Maintained \-
+``riscv`` Maintained ``riscv64`` only.
``um`` Maintained ``x86_64`` only.
``x86`` Maintained ``x86_64`` only.
============= ================ ==============================================
diff --git a/Documentation/rust/general-information.rst b/Documentation/rust/general-information.rst
index 081397827a7e..4bb6ac12d482 100644
--- a/Documentation/rust/general-information.rst
+++ b/Documentation/rust/general-information.rst
@@ -64,6 +64,63 @@ but it is intended that coverage is expanded as time goes on. "Leaf" modules
(e.g. drivers) should not use the C bindings directly. Instead, subsystems
should provide as-safe-as-possible abstractions as needed.
+.. code-block::
+
+ rust/bindings/
+ (rust/helpers.c)
+
+ include/ -----+ <-+
+ | |
+ drivers/ rust/kernel/ +----------+ <-+ |
+ fs/ | bindgen | |
+ .../ +-------------------+ +----------+ --+ |
+ | Abstractions | | |
+ +---------+ | +------+ +------+ | +----------+ | |
+ | my_foo | -----> | | foo | | bar | | -------> | Bindings | <-+ |
+ | driver | Safe | | sub- | | sub- | | Unsafe | | |
+ +---------+ | |system| |system| | | bindings | <-----+
+ | | +------+ +------+ | | crate | |
+ | | kernel crate | +----------+ |
+ | +-------------------+ |
+ | |
+ +------------------# FORBIDDEN #--------------------------------+
+
+The main idea is to encapsulate all direct interaction with the kernel's C APIs
+into carefully reviewed and documented abstractions. Then users of these
+abstractions cannot introduce undefined behavior (UB) as long as:
+
+#. The abstractions are correct ("sound").
+#. Any ``unsafe`` blocks respect the safety contract necessary to call the
+ operations inside the block. Similarly, any ``unsafe impl``\ s respect the
+ safety contract necessary to implement the trait.
+
+Bindings
+~~~~~~~~
+
+By including a C header from ``include/`` into
+``rust/bindings/bindings_helper.h``, the ``bindgen`` tool will auto-generate the
+bindings for the included subsystem. After building, see the ``*_generated.rs``
+output files in the ``rust/bindings/`` directory.
+
+For parts of the C header that ``bindgen`` does not auto generate, e.g. C
+``inline`` functions or non-trivial macros, it is acceptable to add a small
+wrapper function to ``rust/helpers.c`` to make it available for the Rust side as
+well.
+
+Abstractions
+~~~~~~~~~~~~
+
+Abstractions are the layer between the bindings and the in-kernel users. They
+are located in ``rust/kernel/`` and their role is to encapsulate the unsafe
+access to the bindings into an as-safe-as-possible API that they expose to their
+users. Users of the abstractions include things like drivers or file systems
+written in Rust.
+
+Besides the safety aspect, the abstractions are supposed to be "ergonomic", in
+the sense that they turn the C interfaces into "idiomatic" Rust code. Basic
+examples are to turn the C resource acquisition and release into Rust
+constructors and destructors or C integer error codes into Rust's ``Result``\ s.
+
Conditional compilation
-----------------------
diff --git a/Documentation/rust/testing.rst b/Documentation/rust/testing.rst
index 6658998d1b6c..acfd0c2be48d 100644
--- a/Documentation/rust/testing.rst
+++ b/Documentation/rust/testing.rst
@@ -6,10 +6,11 @@ Testing
This document contains useful information how to test the Rust code in the
kernel.
-There are two sorts of tests:
+There are three sorts of tests:
- The KUnit tests.
- The ``#[test]`` tests.
+- The Kselftests.
The KUnit tests
---------------
@@ -133,3 +134,25 @@ Additionally, there are the ``#[test]`` tests. These can be run using the
This requires the kernel ``.config`` and downloads external repositories. It
runs the ``#[test]`` tests on the host (currently) and thus is fairly limited in
what these tests can test.
+
+The Kselftests
+--------------
+
+Kselftests are also available in the ``tools/testing/selftests/rust`` folder.
+
+The kernel config options required for the tests are listed in the
+``tools/testing/selftests/rust/config`` file and can be included with the aid
+of the ``merge_config.sh`` script::
+
+ ./scripts/kconfig/merge_config.sh .config tools/testing/selftests/rust/config
+
+The kselftests are built within the kernel source tree and are intended to
+be executed on a system that is running the same kernel.
+
+Once a kernel matching the source tree has been installed and booted, the
+tests can be compiled and executed using the following command::
+
+ make TARGETS="rust" kselftest
+
+Refer to Documentation/dev-tools/kselftest.rst for the general Kselftest
+documentation.
diff --git a/Documentation/scheduler/sched-domains.rst b/Documentation/scheduler/sched-domains.rst
index e57ad28301bd..5e996fe973b1 100644
--- a/Documentation/scheduler/sched-domains.rst
+++ b/Documentation/scheduler/sched-domains.rst
@@ -31,21 +31,21 @@ is treated as one entity. The load of a group is defined as the sum of the
load of each of its member CPUs, and only when the load of a group becomes
out of balance are tasks moved between groups.
-In kernel/sched/core.c, trigger_load_balance() is run periodically on each CPU
-through scheduler_tick(). It raises a softirq after the next regularly scheduled
+In kernel/sched/core.c, sched_balance_trigger() is run periodically on each CPU
+through sched_tick(). It raises a softirq after the next regularly scheduled
rebalancing event for the current runqueue has arrived. The actual load
-balancing workhorse, run_rebalance_domains()->rebalance_domains(), is then run
+balancing workhorse, sched_balance_softirq()->sched_balance_domains(), is then run
in softirq context (SCHED_SOFTIRQ).
The latter function takes two arguments: the runqueue of current CPU and whether
-the CPU was idle at the time the scheduler_tick() happened and iterates over all
+the CPU was idle at the time the sched_tick() happened and iterates over all
sched domains our CPU is on, starting from its base domain and going up the ->parent
chain. While doing that, it checks to see if the current domain has exhausted its
-rebalance interval. If so, it runs load_balance() on that domain. It then checks
+rebalance interval. If so, it runs sched_balance_rq() on that domain. It then checks
the parent sched_domain (if it exists), and the parent of the parent and so
forth.
-Initially, load_balance() finds the busiest group in the current sched domain.
+Initially, sched_balance_rq() finds the busiest group in the current sched domain.
If it succeeds, it looks for the busiest runqueue of all the CPUs' runqueues in
that group. If it manages to find such a runqueue, it locks both our initial
CPU's runqueue and the newly found busiest one and starts moving tasks from it
diff --git a/Documentation/scheduler/sched-stats.rst b/Documentation/scheduler/sched-stats.rst
index 03c062915998..7c2b16c4729d 100644
--- a/Documentation/scheduler/sched-stats.rst
+++ b/Documentation/scheduler/sched-stats.rst
@@ -2,6 +2,11 @@
Scheduler Statistics
====================
+Version 16 of schedstats changed the order of definitions within
+'enum cpu_idle_type', which changed the order of [CPU_MAX_IDLE_TYPES]
+columns in show_schedstat(). In particular the position of CPU_IDLE
+and __CPU_NOT_IDLE changed places. The size of the array is unchanged.
+
Version 15 of schedstats dropped counters for some sched_yield:
yld_exp_empty, yld_act_empty and yld_both_empty. Otherwise, it is
identical to version 14.
@@ -72,53 +77,53 @@ domain<N> <cpumask> 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
The first field is a bit mask indicating what cpus this domain operates over.
-The next 24 are a variety of load_balance() statistics in grouped into types
+The next 24 are a variety of sched_balance_rq() statistics in grouped into types
of idleness (idle, busy, and newly idle):
- 1) # of times in this domain load_balance() was called when the
+ 1) # of times in this domain sched_balance_rq() was called when the
cpu was idle
- 2) # of times in this domain load_balance() checked but found
+ 2) # of times in this domain sched_balance_rq() checked but found
the load did not require balancing when the cpu was idle
- 3) # of times in this domain load_balance() tried to move one or
+ 3) # of times in this domain sched_balance_rq() tried to move one or
more tasks and failed, when the cpu was idle
4) sum of imbalances discovered (if any) with each call to
- load_balance() in this domain when the cpu was idle
+ sched_balance_rq() in this domain when the cpu was idle
5) # of times in this domain pull_task() was called when the cpu
was idle
6) # of times in this domain pull_task() was called even though
the target task was cache-hot when idle
- 7) # of times in this domain load_balance() was called but did
+ 7) # of times in this domain sched_balance_rq() was called but did
not find a busier queue while the cpu was idle
8) # of times in this domain a busier queue was found while the
cpu was idle but no busier group was found
- 9) # of times in this domain load_balance() was called when the
+ 9) # of times in this domain sched_balance_rq() was called when the
cpu was busy
- 10) # of times in this domain load_balance() checked but found the
+ 10) # of times in this domain sched_balance_rq() checked but found the
load did not require balancing when busy
- 11) # of times in this domain load_balance() tried to move one or
+ 11) # of times in this domain sched_balance_rq() tried to move one or
more tasks and failed, when the cpu was busy
12) sum of imbalances discovered (if any) with each call to
- load_balance() in this domain when the cpu was busy
+ sched_balance_rq() in this domain when the cpu was busy
13) # of times in this domain pull_task() was called when busy
14) # of times in this domain pull_task() was called even though the
target task was cache-hot when busy
- 15) # of times in this domain load_balance() was called but did not
+ 15) # of times in this domain sched_balance_rq() was called but did not
find a busier queue while the cpu was busy
16) # of times in this domain a busier queue was found while the cpu
was busy but no busier group was found
- 17) # of times in this domain load_balance() was called when the
+ 17) # of times in this domain sched_balance_rq() was called when the
cpu was just becoming idle
- 18) # of times in this domain load_balance() checked but found the
+ 18) # of times in this domain sched_balance_rq() checked but found the
load did not require balancing when the cpu was just becoming idle
- 19) # of times in this domain load_balance() tried to move one or more
+ 19) # of times in this domain sched_balance_rq() tried to move one or more
tasks and failed, when the cpu was just becoming idle
20) sum of imbalances discovered (if any) with each call to
- load_balance() in this domain when the cpu was just becoming idle
+ sched_balance_rq() in this domain when the cpu was just becoming idle
21) # of times in this domain pull_task() was called when newly idle
22) # of times in this domain pull_task() was called even though the
target task was cache-hot when just becoming idle
- 23) # of times in this domain load_balance() was called but did not
+ 23) # of times in this domain sched_balance_rq() was called but did not
find a busier queue while the cpu was just becoming idle
24) # of times in this domain a busier queue was found while the cpu
was just becoming idle but no busier group was found
diff --git a/Documentation/scsi/scsi_mid_low_api.rst b/Documentation/scsi/scsi_mid_low_api.rst
index 022198c51350..2df29b92e196 100644
--- a/Documentation/scsi/scsi_mid_low_api.rst
+++ b/Documentation/scsi/scsi_mid_low_api.rst
@@ -42,18 +42,18 @@ This version of the document roughly matches linux kernel version 2.6.8 .
Documentation
=============
There is a SCSI documentation directory within the kernel source tree,
-typically Documentation/scsi . Most documents are in plain
-(i.e. ASCII) text. This file is named scsi_mid_low_api.txt and can be
+typically Documentation/scsi . Most documents are in reStructuredText
+format. This file is named scsi_mid_low_api.rst and can be
found in that directory. A more recent copy of this document may be found
-at http://web.archive.org/web/20070107183357rn_1/sg.torque.net/scsi/.
-Many LLDs are documented there (e.g. aic7xxx.txt). The SCSI mid-level is
-briefly described in scsi.txt which contains a url to a document
-describing the SCSI subsystem in the lk 2.4 series. Two upper level
-drivers have documents in that directory: st.txt (SCSI tape driver) and
-scsi-generic.txt (for the sg driver).
-
-Some documentation (or urls) for LLDs may be found in the C source code
-or in the same directory as the C source code. For example to find a url
+at https://docs.kernel.org/scsi/scsi_mid_low_api.html. Many LLDs are
+documented in Documentation/scsi (e.g. aic7xxx.rst). The SCSI mid-level is
+briefly described in scsi.rst which contains a URL to a document describing
+the SCSI subsystem in the Linux Kernel 2.4 series. Two upper level
+drivers have documents in that directory: st.rst (SCSI tape driver) and
+scsi-generic.rst (for the sg driver).
+
+Some documentation (or URLs) for LLDs may be found in the C source code
+or in the same directory as the C source code. For example to find a URL
about the USB mass storage driver see the
/usr/src/linux/drivers/usb/storage directory.
diff --git a/Documentation/security/SCTP.rst b/Documentation/security/SCTP.rst
index b73eb764a001..6d80d464ab6e 100644
--- a/Documentation/security/SCTP.rst
+++ b/Documentation/security/SCTP.rst
@@ -81,7 +81,7 @@ A summary of the ``@optname`` entries is as follows::
destination addresses.
SCTP_SENDMSG_CONNECT - Initiate a connection that is generated by a
- sendmsg(2) or sctp_sendmsg(3) on a new asociation.
+ sendmsg(2) or sctp_sendmsg(3) on a new association.
SCTP_PRIMARY_ADDR - Set local primary address.
diff --git a/Documentation/security/keys/trusted-encrypted.rst b/Documentation/security/keys/trusted-encrypted.rst
index e989b9802f92..f4d7e162d5e4 100644
--- a/Documentation/security/keys/trusted-encrypted.rst
+++ b/Documentation/security/keys/trusted-encrypted.rst
@@ -42,6 +42,14 @@ safe.
randomly generated and fused into each SoC at manufacturing time.
Otherwise, a common fixed test key is used instead.
+ (4) DCP (Data Co-Processor: crypto accelerator of various i.MX SoCs)
+
+ Rooted to a one-time programmable key (OTP) that is generally burnt
+ in the on-chip fuses and is accessible to the DCP encryption engine only.
+ DCP provides two keys that can be used as root of trust: the OTP key
+ and the UNIQUE key. Default is to use the UNIQUE key, but selecting
+ the OTP key can be done via a module parameter (dcp_use_otp_key).
+
* Execution isolation
(1) TPM
@@ -57,6 +65,12 @@ safe.
Fixed set of operations running in isolated execution environment.
+ (4) DCP
+
+ Fixed set of cryptographic operations running in isolated execution
+ environment. Only basic blob key encryption is executed there.
+ The actual key sealing/unsealing is done on main processor/kernel space.
+
* Optional binding to platform integrity state
(1) TPM
@@ -79,6 +93,11 @@ safe.
Relies on the High Assurance Boot (HAB) mechanism of NXP SoCs
for platform integrity.
+ (4) DCP
+
+ Relies on Secure/Trusted boot process (called HAB by vendor) for
+ platform integrity.
+
* Interfaces and APIs
(1) TPM
@@ -94,6 +113,11 @@ safe.
Interface is specific to silicon vendor.
+ (4) DCP
+
+ Vendor-specific API that is implemented as part of the DCP crypto driver in
+ ``drivers/crypto/mxs-dcp.c``.
+
* Threat model
The strength and appropriateness of a particular trust source for a given
@@ -129,6 +153,13 @@ selected trust source:
CAAM HWRNG, enable CRYPTO_DEV_FSL_CAAM_RNG_API and ensure the device
is probed.
+ * DCP (Data Co-Processor: crypto accelerator of various i.MX SoCs)
+
+ The DCP hardware device itself does not provide a dedicated RNG interface,
+ so the kernel default RNG is used. SoCs with DCP like the i.MX6ULL do have
+ a dedicated hardware RNG that is independent from DCP which can be enabled
+ to back the kernel RNG.
+
Users may override this by specifying ``trusted.rng=kernel`` on the kernel
command-line to override the used RNG with the kernel's random number pool.
@@ -231,6 +262,19 @@ Usage::
CAAM-specific format. The key length for new keys is always in bytes.
Trusted Keys can be 32 - 128 bytes (256 - 1024 bits).
+Trusted Keys usage: DCP
+-----------------------
+
+Usage::
+
+ keyctl add trusted name "new keylen" ring
+ keyctl add trusted name "load hex_blob" ring
+ keyctl print keyid
+
+"keyctl print" returns an ASCII hex copy of the sealed key, which is in format
+specific to this DCP key-blob implementation. The key length for new keys is
+always in bytes. Trusted Keys can be 32 - 128 bytes (256 - 1024 bits).
+
Encrypted Keys usage
--------------------
@@ -426,3 +470,12 @@ string length.
privkey is the binary representation of TPM2B_PUBLIC excluding the
initial TPM2B header which can be reconstructed from the ASN.1 octed
string length.
+
+DCP Blob Format
+---------------
+
+.. kernel-doc:: security/keys/trusted-keys/trusted_dcp.c
+ :doc: dcp blob format
+
+.. kernel-doc:: security/keys/trusted-keys/trusted_dcp.c
+ :identifiers: struct dcp_blob_fmt
diff --git a/Documentation/security/snp-tdx-threat-model.rst b/Documentation/security/snp-tdx-threat-model.rst
index ec66f2ed80c9..3a2d41d2e645 100644
--- a/Documentation/security/snp-tdx-threat-model.rst
+++ b/Documentation/security/snp-tdx-threat-model.rst
@@ -4,7 +4,7 @@ Confidential Computing in Linux for x86 virtualization
.. contents:: :local:
-By: Elena Reshetova <elena.reshetova@intel.com> and Carlos Bilbao <carlos.bilbao@amd.com>
+By: Elena Reshetova <elena.reshetova@intel.com> and Carlos Bilbao <carlos.bilbao.osdev@gmail.com>
Motivation
==========
diff --git a/Documentation/security/tpm/index.rst b/Documentation/security/tpm/index.rst
index fc40e9f23c85..fa593d960040 100644
--- a/Documentation/security/tpm/index.rst
+++ b/Documentation/security/tpm/index.rst
@@ -5,6 +5,8 @@ Trusted Platform Module documentation
.. toctree::
tpm_event_log
+ tpm-security
+ tpm_tis
tpm_vtpm_proxy
xen-tpmfront
tpm_ftpm_tee
diff --git a/Documentation/security/tpm/tpm-security.rst b/Documentation/security/tpm/tpm-security.rst
new file mode 100644
index 000000000000..4f633f251033
--- /dev/null
+++ b/Documentation/security/tpm/tpm-security.rst
@@ -0,0 +1,216 @@
+.. SPDX-License-Identifier: GPL-2.0-only
+
+TPM Security
+============
+
+The object of this document is to describe how we make the kernel's
+use of the TPM reasonably robust in the face of external snooping and
+packet alteration attacks (called passive and active interposer attack
+in the literature). The current security document is for TPM 2.0.
+
+Introduction
+------------
+
+The TPM is usually a discrete chip attached to a PC via some type of
+low bandwidth bus. There are exceptions to this such as the Intel
+PTT, which is a software TPM running inside a software environment
+close to the CPU, which are subject to different attacks, but right at
+the moment, most hardened security environments require a discrete
+hardware TPM, which is the use case discussed here.
+
+Snooping and Alteration Attacks against the bus
+-----------------------------------------------
+
+The current state of the art for snooping the `TPM Genie`_ hardware
+interposer which is a simple external device that can be installed in
+a couple of seconds on any system or laptop. Recently attacks were
+successfully demonstrated against the `Windows Bitlocker TPM`_ system.
+Most recently the same `attack against TPM based Linux disk
+encryption`_ schemes. The next phase of research seems to be hacking
+existing devices on the bus to act as interposers, so the fact that
+the attacker requires physical access for a few seconds might
+evaporate. However, the goal of this document is to protect TPM
+secrets and integrity as far as we are able in this environment and to
+try to insure that if we can't prevent the attack then at least we can
+detect it.
+
+Unfortunately, most of the TPM functionality, including the hardware
+reset capability can be controlled by an attacker who has access to
+the bus, so we'll discuss some of the disruption possibilities below.
+
+Measurement (PCR) Integrity
+---------------------------
+
+Since the attacker can send their own commands to the TPM, they can
+send arbitrary PCR extends and thus disrupt the measurement system,
+which would be an annoying denial of service attack. However, there
+are two, more serious, classes of attack aimed at entities sealed to
+trust measurements.
+
+1. The attacker could intercept all PCR extends coming from the system
+ and completely substitute their own values, producing a replay of
+ an untampered state that would cause PCR measurements to attest to
+ a trusted state and release secrets
+
+2. At some point in time the attacker could reset the TPM, clearing
+ the PCRs and then send down their own measurements which would
+ effectively overwrite the boot time measurements the TPM has
+ already done.
+
+The first can be thwarted by always doing HMAC protection of the PCR
+extend and read command meaning measurement values cannot be
+substituted without producing a detectable HMAC failure in the
+response. However, the second can only really be detected by relying
+on some sort of mechanism for protection which would change over TPM
+reset.
+
+Secrets Guarding
+----------------
+
+Certain information passing in and out of the TPM, such as key sealing
+and private key import and random number generation, is vulnerable to
+interception which HMAC protection alone cannot protect against, so
+for these types of command we must also employ request and response
+encryption to prevent the loss of secret information.
+
+Establishing Initial Trust with the TPM
+---------------------------------------
+
+In order to provide security from the beginning, an initial shared or
+asymmetric secret must be established which must also be unknown to
+the attacker. The most obvious avenues for this are the endorsement
+and storage seeds, which can be used to derive asymmetric keys.
+However, using these keys is difficult because the only way to pass
+them into the kernel would be on the command line, which requires
+extensive support in the boot system, and there's no guarantee that
+either hierarchy would not have some type of authorization.
+
+The mechanism chosen for the Linux Kernel is to derive the primary
+elliptic curve key from the null seed using the standard storage seed
+parameters. The null seed has two advantages: firstly the hierarchy
+physically cannot have an authorization, so we are always able to use
+it and secondly, the null seed changes across TPM resets, meaning if
+we establish trust on the null seed at start of day, all sessions
+salted with the derived key will fail if the TPM is reset and the seed
+changes.
+
+Obviously using the null seed without any other prior shared secrets,
+we have to create and read the initial public key which could, of
+course, be intercepted and substituted by the bus interposer.
+However, the TPM has a key certification mechanism (using the EK
+endorsement certificate, creating an attestation identity key and
+certifying the null seed primary with that key) which is too complex
+to run within the kernel, so we keep a copy of the null primary key
+name, which is what is exported via sysfs so user-space can run the
+full certification when it boots. The definitive guarantee here is
+that if the null primary key certifies correctly, you know all your
+TPM transactions since start of day were secure and if it doesn't, you
+know there's an interposer on your system (and that any secret used
+during boot may have been leaked).
+
+Stacking Trust
+--------------
+
+In the current null primary scenario, the TPM must be completely
+cleared before handing it on to the next consumer. However the kernel
+hands to user-space the name of the derived null seed key which can
+then be verified by certification in user-space. Therefore, this chain
+of name handoff can be used between the various boot components as
+well (via an unspecified mechanism). For instance, grub could use the
+null seed scheme for security and hand the name off to the kernel in
+the boot area. The kernel could make its own derivation of the key
+and the name and know definitively that if they differ from the handed
+off version that tampering has occurred. Thus it becomes possible to
+chain arbitrary boot components together (UEFI to grub to kernel) via
+the name handoff provided each successive component knows how to
+collect the name and verifies it against its derived key.
+
+Session Properties
+------------------
+
+All TPM commands the kernel uses allow sessions. HMAC sessions may be
+used to check the integrity of requests and responses and decrypt and
+encrypt flags may be used to shield parameters and responses. The
+HMAC and encryption keys are usually derived from the shared
+authorization secret, but for a lot of kernel operations that is well
+known (and usually empty). Thus, every HMAC session used by the
+kernel must be created using the null primary key as the salt key
+which thus provides a cryptographic input into the session key
+derivation. Thus, the kernel creates the null primary key once (as a
+volatile TPM handle) and keeps it around in a saved context stored in
+tpm_chip for every in-kernel use of the TPM. Currently, because of a
+lack of de-gapping in the in-kernel resource manager, the session must
+be created and destroyed for each operation, but, in future, a single
+session may also be reused for the in-kernel HMAC, encryption and
+decryption sessions.
+
+Protection Types
+----------------
+
+For every in-kernel operation we use null primary salted HMAC to
+protect the integrity. Additionally, we use parameter encryption to
+protect key sealing and parameter decryption to protect key unsealing
+and random number generation.
+
+Null Primary Key Certification in Userspace
+===========================================
+
+Every TPM comes shipped with a couple of X.509 certificates for the
+primary endorsement key. This document assumes that the Elliptic
+Curve version of the certificate exists at 01C00002, but will work
+equally well with the RSA certificate (at 01C00001).
+
+The first step in the certification is primary creation using the
+template from the `TCG EK Credential Profile`_ which allows comparison
+of the generated primary key against the one in the certificate (the
+public key must match). Note that generation of the EK primary
+requires the EK hierarchy password, but a pre-generated version of the
+EC primary should exist at 81010002 and a TPM2_ReadPublic() may be
+performed on this without needing the key authority. Next, the
+certificate itself must be verified to chain back to the manufacturer
+root (which should be published on the manufacturer website). Once
+this is done, an attestation key (AK) is generated within the TPM and
+it's name and the EK public key can be used to encrypt a secret using
+TPM2_MakeCredential. The TPM then runs TPM2_ActivateCredential which
+will only recover the secret if the binding between the TPM, the EK
+and the AK is true. the generated AK may now be used to run a
+certification of the null primary key whose name the kernel has
+exported. Since TPM2_MakeCredential/ActivateCredential are somewhat
+complicated, a more simplified process involving an externally
+generated private key is described below.
+
+This process is a simplified abbreviation of the usual privacy CA
+based attestation process. The assumption here is that the
+attestation is done by the TPM owner who thus has access to only the
+owner hierarchy. The owner creates an external public/private key
+pair (assume elliptic curve in this case) and wraps the private key
+for import using an inner wrapping process and parented to the EC
+derived storage primary. The TPM2_Import() is done using a parameter
+decryption HMAC session salted to the EK primary (which also does not
+require the EK key authority) meaning that the inner wrapping key is
+the encrypted parameter and thus the TPM will not be able to perform
+the import unless is possesses the certified EK so if the command
+succeeds and the HMAC verifies on return we know we have a loadable
+copy of the private key only for the certified TPM. This key is now
+loaded into the TPM and the Storage primary flushed (to free up space
+for the null key generation).
+
+The null EC primary is now generated using the Storage profile
+outlined in the `TCG TPM v2.0 Provisioning Guidance`_; the name of
+this key (the hash of the public area) is computed and compared to the
+null seed name presented by the kernel in
+/sys/class/tpm/tpm0/null_name. If the names do not match, the TPM is
+compromised. If the names match, the user performs a TPM2_Certify()
+using the null primary as the object handle and the loaded private key
+as the sign handle and providing randomized qualifying data. The
+signature of the returned certifyInfo is verified against the public
+part of the loaded private key and the qualifying data checked to
+prevent replay. If all of these tests pass, the user is now assured
+that TPM integrity and privacy was preserved across the entire boot
+sequence of this kernel.
+
+.. _TPM Genie: https://www.nccgroup.trust/globalassets/about-us/us/documents/tpm-genie.pdf
+.. _Windows Bitlocker TPM: https://dolosgroup.io/blog/2021/7/9/from-stolen-laptop-to-inside-the-company-network
+.. _attack against TPM based Linux disk encryption: https://www.secura.com/blog/tpm-sniffing-attacks-against-non-bitlocker-targets
+.. _TCG EK Credential Profile: https://trustedcomputinggroup.org/resource/tcg-ek-credential-profile-for-tpm-family-2-0/
+.. _TCG TPM v2.0 Provisioning Guidance: https://trustedcomputinggroup.org/resource/tcg-tpm-v2-0-provisioning-guidance/
diff --git a/Documentation/security/tpm/tpm_tis.rst b/Documentation/security/tpm/tpm_tis.rst
new file mode 100644
index 000000000000..b9637f295638
--- /dev/null
+++ b/Documentation/security/tpm/tpm_tis.rst
@@ -0,0 +1,46 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=========================
+TPM FIFO interface driver
+=========================
+
+TCG PTP Specification defines two interface types: FIFO and CRB. The former is
+based on sequenced read and write operations, and the latter is based on a
+buffer containing the full command or response.
+
+FIFO (First-In-First-Out) interface is used by the tpm_tis_core dependent
+drivers. Originally Linux had only a driver called tpm_tis, which covered
+memory mapped (aka MMIO) interface but it was later on extended to cover other
+physical interfaces supported by the TCG standard.
+
+For historical reasons above the original MMIO driver is called tpm_tis and the
+framework for FIFO drivers is named as tpm_tis_core. The postfix "tis" in
+tpm_tis comes from the TPM Interface Specification, which is the hardware
+interface specification for TPM 1.x chips.
+
+Communication is based on a 20 KiB buffer shared by the TPM chip through a
+hardware bus or memory map, depending on the physical wiring. The buffer is
+further split into five equal-size 4 KiB buffers, which provide equivalent
+sets of registers for communication between the CPU and TPM. These
+communication endpoints are called localities in the TCG terminology.
+
+When the kernel wants to send commands to the TPM chip, it first reserves
+locality 0 by setting the requestUse bit in the TPM_ACCESS register. The bit is
+cleared by the chip when the access is granted. Once it completes its
+communication, the kernel writes the TPM_ACCESS.activeLocality bit. This
+informs the chip that the locality has been relinquished.
+
+Pending localities are served in order by the chip in descending order, one at
+a time:
+
+- Locality 0 has the lowest priority.
+- Locality 5 has the highest priority.
+
+Further information on the purpose and meaning of the localities can be found
+in section 3.2 of the TCG PC Client Platform TPM Profile Specification.
+
+References
+==========
+
+TCG PC Client Platform TPM Profile (PTP) Specification
+https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/
diff --git a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
index 2d2998faff62..801b0bb57e97 100644
--- a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
+++ b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
@@ -3976,7 +3976,7 @@ Driver with A Single Source File
Suppose you have a file xyz.c. Add the following two lines::
- snd-xyz-objs := xyz.o
+ snd-xyz-y := xyz.o
obj-$(CONFIG_SND_XYZ) += snd-xyz.o
2. Create the Kconfig entry
@@ -4019,7 +4019,7 @@ located in the new subdirectory, sound/pci/xyz.
2. Under the directory ``sound/pci/xyz``, create a Makefile::
- snd-xyz-objs := xyz.o abc.o def.o
+ snd-xyz-y := xyz.o abc.o def.o
obj-$(CONFIG_SND_XYZ) += snd-xyz.o
3. Create the Kconfig entry
diff --git a/Documentation/sound/soc/dapm-graph.svg b/Documentation/sound/soc/dapm-graph.svg
new file mode 100644
index 000000000000..d783672db815
--- /dev/null
+++ b/Documentation/sound/soc/dapm-graph.svg
@@ -0,0 +1,375 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
+ "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by graphviz version 2.43.0 (0)
+ -->
+<!-- Title: G Pages: 1 -->
+<svg width="900pt" height="630pt"
+ viewBox="0.00 0.00 900.00 630.00" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
+<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 626)">
+<title>G</title>
+<polygon fill="white" stroke="transparent" points="-4,4 -4,-626 896,-626 896,4 -4,4"/>
+<g id="clust1" class="cluster">
+<title>ROOT</title>
+<polygon fill="none" stroke="dodgerblue" points="8,-537 8,-614 102,-614 102,-537 8,-537"/>
+<text text-anchor="middle" x="55" y="-598.8" font-family="sans-serif" font-size="14.00">ROOT</text>
+</g>
+<g id="clust2" class="cluster">
+<title>4000b000.audio&#45;controller</title>
+<polygon fill="none" stroke="dodgerblue" points="120,-378 120,-455 312,-455 312,-378 120,-378"/>
+<text text-anchor="middle" x="216" y="-439.8" font-family="sans-serif" font-size="14.00">4000b000.audio&#45;controller</text>
+</g>
+<g id="clust5" class="cluster">
+<title>cs42l51.0&#45;004a</title>
+<polygon fill="none" stroke="dodgerblue" points="330,-8 330,-614 884,-614 884,-8 330,-8"/>
+<text text-anchor="middle" x="607" y="-598.8" font-family="sans-serif" font-size="14.00">cs42l51.0&#45;004a</text>
+</g>
+<g id="clust9" class="cluster">
+<title>hdmi&#45;audio&#45;codec.1.auto</title>
+<polygon fill="none" stroke="dodgerblue" points="110,-463 110,-614 314,-614 314,-463 110,-463"/>
+<text text-anchor="middle" x="212" y="-598.8" font-family="sans-serif" font-size="14.00">hdmi&#45;audio&#45;codec.1.auto</text>
+</g>
+<!-- ROOT_Amplifier -->
+<g id="node1" class="node">
+<title>ROOT_Amplifier</title>
+<polygon fill="#f2f2f2" stroke="#4d4d4d" points="93.5,-583 16.5,-583 16.5,-545 93.5,-545 93.5,-583"/>
+<text text-anchor="middle" x="55" y="-567.8" font-family="sans-serif" font-size="14.00">Amplifier</text>
+<text text-anchor="middle" x="55" y="-552.8" font-family="sans-serif" font-size="14.00">[out_drv]</text>
+</g>
+<!-- 4000b000.audio&#45;controller_capture -->
+<g id="node2" class="node">
+<title>4000b000.audio&#45;controller_capture</title>
+<polygon fill="#f2f2f2" stroke="#4d4d4d" points="202,-424 128,-424 128,-386 202,-386 202,-424"/>
+<text text-anchor="middle" x="165" y="-408.8" font-family="sans-serif" font-size="14.00">capture</text>
+<text text-anchor="middle" x="165" y="-393.8" font-family="sans-serif" font-size="14.00">[dai_out]</text>
+</g>
+<!-- 4000b000.audio&#45;controller_playback -->
+<g id="node3" class="node">
+<title>4000b000.audio&#45;controller_playback</title>
+<polygon fill="#f2f2f2" stroke="#4d4d4d" points="304,-424 230,-424 230,-386 304,-386 304,-424"/>
+<text text-anchor="middle" x="267" y="-408.8" font-family="sans-serif" font-size="14.00">playback</text>
+<text text-anchor="middle" x="267" y="-393.8" font-family="sans-serif" font-size="14.00">[dai_in]</text>
+</g>
+<!-- hdmi&#45;audio&#45;codec.1.auto_I2S Playback -->
+<g id="node28" class="node">
+<title>hdmi&#45;audio&#45;codec.1.auto_I2S Playback</title>
+<polygon fill="#f2f2f2" stroke="#4d4d4d" points="306,-583 208,-583 208,-545 306,-545 306,-583"/>
+<text text-anchor="middle" x="257" y="-567.8" font-family="sans-serif" font-size="14.00">I2S Playback</text>
+<text text-anchor="middle" x="257" y="-552.8" font-family="sans-serif" font-size="14.00">[dai_in]</text>
+</g>
+<!-- 4000b000.audio&#45;controller_playback&#45;&gt;hdmi&#45;audio&#45;codec.1.auto_I2S Playback -->
+<g id="edge21" class="edge">
+<title>4000b000.audio&#45;controller_playback&#45;&gt;hdmi&#45;audio&#45;codec.1.auto_I2S Playback</title>
+<path fill="none" stroke="black" d="M276.84,-424.14C282.19,-435.06 288.26,-449.42 291,-463 295.05,-483.04 296.67,-489.36 291,-509 288.25,-518.54 283.26,-528.01 277.93,-536.3"/>
+<polygon fill="black" stroke="black" points="274.89,-534.55 272.11,-544.78 280.66,-538.51 274.89,-534.55"/>
+</g>
+<!-- hdmi&#45;audio&#45;codec.1.auto_Capture -->
+<g id="node4" class="node">
+<title>hdmi&#45;audio&#45;codec.1.auto_Capture</title>
+<polygon fill="#f2f2f2" stroke="#4d4d4d" points="192,-509 118,-509 118,-471 192,-471 192,-509"/>
+<text text-anchor="middle" x="155" y="-493.8" font-family="sans-serif" font-size="14.00">Capture</text>
+<text text-anchor="middle" x="155" y="-478.8" font-family="sans-serif" font-size="14.00">[dai_out]</text>
+</g>
+<!-- hdmi&#45;audio&#45;codec.1.auto_Capture&#45;&gt;4000b000.audio&#45;controller_capture -->
+<g id="edge1" class="edge">
+<title>hdmi&#45;audio&#45;codec.1.auto_Capture&#45;&gt;4000b000.audio&#45;controller_capture</title>
+<path fill="none" stroke="black" d="M157.17,-470.99C158.46,-460.3 160.12,-446.5 161.58,-434.37"/>
+<polygon fill="black" stroke="black" points="165.08,-434.61 162.8,-424.26 158.13,-433.77 165.08,-434.61"/>
+</g>
+<!-- cs42l51.0&#45;004a_AIN1L -->
+<g id="node5" class="node">
+<title>cs42l51.0&#45;004a_AIN1L</title>
+<polygon fill="#f2f2f2" stroke="#4d4d4d" points="836.5,-583 775.5,-583 775.5,-545 836.5,-545 836.5,-583"/>
+<text text-anchor="middle" x="806" y="-567.8" font-family="sans-serif" font-size="14.00">AIN1L</text>
+<text text-anchor="middle" x="806" y="-552.8" font-family="sans-serif" font-size="14.00">[input]</text>
+</g>
+<!-- cs42l51.0&#45;004a_PGA&#45;ADC Mux Left -->
+<g id="node22" class="node">
+<title>cs42l51.0&#45;004a_PGA&#45;ADC Mux Left</title>
+<polygon fill="#f2f2f2" stroke="#4d4d4d" points="876,-509 736,-509 736,-471 876,-471 876,-509"/>
+<text text-anchor="middle" x="806" y="-493.8" font-family="sans-serif" font-size="14.00">PGA&#45;ADC Mux Left</text>
+<text text-anchor="middle" x="806" y="-478.8" font-family="sans-serif" font-size="14.00">[mux]</text>
+</g>
+<!-- cs42l51.0&#45;004a_AIN1L&#45;&gt;cs42l51.0&#45;004a_PGA&#45;ADC Mux Left -->
+<g id="edge14" class="edge">
+<title>cs42l51.0&#45;004a_AIN1L&#45;&gt;cs42l51.0&#45;004a_PGA&#45;ADC Mux Left</title>
+<path fill="none" stroke="black" d="M806,-544.83C806,-537.13 806,-527.97 806,-519.42"/>
+<polygon fill="black" stroke="black" points="809.5,-519.41 806,-509.41 802.5,-519.41 809.5,-519.41"/>
+</g>
+<!-- cs42l51.0&#45;004a_AIN1R -->
+<g id="node6" class="node">
+<title>cs42l51.0&#45;004a_AIN1R</title>
+<polygon fill="#f2f2f2" stroke="#4d4d4d" points="738.5,-583 677.5,-583 677.5,-545 738.5,-545 738.5,-583"/>
+<text text-anchor="middle" x="708" y="-567.8" font-family="sans-serif" font-size="14.00">AIN1R</text>
+<text text-anchor="middle" x="708" y="-552.8" font-family="sans-serif" font-size="14.00">[input]</text>
+</g>
+<!-- cs42l51.0&#45;004a_PGA&#45;ADC Mux Right -->
+<g id="node23" class="node">
+<title>cs42l51.0&#45;004a_PGA&#45;ADC Mux Right</title>
+<polygon fill="#f2f2f2" stroke="#4d4d4d" points="717.5,-509 568.5,-509 568.5,-471 717.5,-471 717.5,-509"/>
+<text text-anchor="middle" x="643" y="-493.8" font-family="sans-serif" font-size="14.00">PGA&#45;ADC Mux Right</text>
+<text text-anchor="middle" x="643" y="-478.8" font-family="sans-serif" font-size="14.00">[mux]</text>
+</g>
+<!-- cs42l51.0&#45;004a_AIN1R&#45;&gt;cs42l51.0&#45;004a_PGA&#45;ADC Mux Right -->
+<g id="edge15" class="edge">
+<title>cs42l51.0&#45;004a_AIN1R&#45;&gt;cs42l51.0&#45;004a_PGA&#45;ADC Mux Right</title>
+<path fill="none" stroke="black" d="M691.6,-544.83C683.96,-536.37 674.73,-526.15 666.38,-516.9"/>
+<polygon fill="black" stroke="black" points="668.92,-514.49 659.62,-509.41 663.73,-519.18 668.92,-514.49"/>
+</g>
+<!-- cs42l51.0&#45;004a_AIN2L -->
+<g id="node7" class="node">
+<title>cs42l51.0&#45;004a_AIN2L</title>
+<polygon fill="#f2f2f2" stroke="#4d4d4d" points="659.5,-583 598.5,-583 598.5,-545 659.5,-545 659.5,-583"/>
+<text text-anchor="middle" x="629" y="-567.8" font-family="sans-serif" font-size="14.00">AIN2L</text>
+<text text-anchor="middle" x="629" y="-552.8" font-family="sans-serif" font-size="14.00">[input]</text>
+</g>
+<!-- cs42l51.0&#45;004a_AIN2R -->
+<g id="node8" class="node">
+<title>cs42l51.0&#45;004a_AIN2R</title>
+<polygon fill="#f2f2f2" stroke="#4d4d4d" points="580.5,-583 519.5,-583 519.5,-545 580.5,-545 580.5,-583"/>
+<text text-anchor="middle" x="550" y="-567.8" font-family="sans-serif" font-size="14.00">AIN2R</text>
+<text text-anchor="middle" x="550" y="-552.8" font-family="sans-serif" font-size="14.00">[input]</text>
+</g>
+<!-- cs42l51.0&#45;004a_Capture -->
+<g id="node9" class="node">
+<title>cs42l51.0&#45;004a_Capture</title>
+<polygon fill="#f2f2f2" stroke="#4d4d4d" points="692,-276 618,-276 618,-238 692,-238 692,-276"/>
+<text text-anchor="middle" x="655" y="-260.8" font-family="sans-serif" font-size="14.00">Capture</text>
+<text text-anchor="middle" x="655" y="-245.8" font-family="sans-serif" font-size="14.00">[dai_out]</text>
+</g>
+<!-- cs42l51.0&#45;004a_DAC Mux -->
+<g id="node10" class="node">
+<title>cs42l51.0&#45;004a_DAC Mux</title>
+<polygon fill="none" stroke="#008b00" stroke-width="2" points="598.5,-202 521.5,-202 521.5,-164 598.5,-164 598.5,-202"/>
+<text text-anchor="middle" x="560" y="-186.8" font-family="sans-serif" font-size="14.00">DAC Mux</text>
+<text text-anchor="middle" x="560" y="-171.8" font-family="sans-serif" font-size="14.00">[mux]</text>
+</g>
+<!-- cs42l51.0&#45;004a_Left DAC -->
+<g id="node14" class="node">
+<title>cs42l51.0&#45;004a_Left DAC</title>
+<polygon fill="none" stroke="#008b00" stroke-width="2" points="548,-128 474,-128 474,-90 548,-90 548,-128"/>
+<text text-anchor="middle" x="511" y="-112.8" font-family="sans-serif" font-size="14.00">Left DAC</text>
+<text text-anchor="middle" x="511" y="-97.8" font-family="sans-serif" font-size="14.00">[dac]</text>
+</g>
+<!-- cs42l51.0&#45;004a_DAC Mux&#45;&gt;cs42l51.0&#45;004a_Left DAC -->
+<g id="edge9" class="edge">
+<title>cs42l51.0&#45;004a_DAC Mux&#45;&gt;cs42l51.0&#45;004a_Left DAC</title>
+<path fill="none" stroke="black" d="M547.64,-163.83C542.05,-155.62 535.34,-145.76 529.19,-136.73"/>
+<polygon fill="black" stroke="black" points="532.05,-134.71 523.53,-128.41 526.26,-138.65 532.05,-134.71"/>
+</g>
+<!-- cs42l51.0&#45;004a_Right DAC -->
+<g id="node26" class="node">
+<title>cs42l51.0&#45;004a_Right DAC</title>
+<polygon fill="none" stroke="#008b00" stroke-width="2" points="649.5,-128 566.5,-128 566.5,-90 649.5,-90 649.5,-128"/>
+<text text-anchor="middle" x="608" y="-112.8" font-family="sans-serif" font-size="14.00">Right DAC</text>
+<text text-anchor="middle" x="608" y="-97.8" font-family="sans-serif" font-size="14.00">[dac]</text>
+</g>
+<!-- cs42l51.0&#45;004a_DAC Mux&#45;&gt;cs42l51.0&#45;004a_Right DAC -->
+<g id="edge18" class="edge">
+<title>cs42l51.0&#45;004a_DAC Mux&#45;&gt;cs42l51.0&#45;004a_Right DAC</title>
+<path fill="none" stroke="black" d="M572.11,-163.83C577.53,-155.71 584.02,-145.96 589.99,-137.01"/>
+<polygon fill="black" stroke="black" points="593.09,-138.68 595.72,-128.41 587.27,-134.79 593.09,-138.68"/>
+</g>
+<!-- cs42l51.0&#45;004a_HPL -->
+<g id="node11" class="node">
+<title>cs42l51.0&#45;004a_HPL</title>
+<polygon fill="none" stroke="#008b00" stroke-width="2" points="546.5,-54 475.5,-54 475.5,-16 546.5,-16 546.5,-54"/>
+<text text-anchor="middle" x="511" y="-38.8" font-family="sans-serif" font-size="14.00">HPL</text>
+<text text-anchor="middle" x="511" y="-23.8" font-family="sans-serif" font-size="14.00">[output]</text>
+</g>
+<!-- cs42l51.0&#45;004a_HPR -->
+<g id="node12" class="node">
+<title>cs42l51.0&#45;004a_HPR</title>
+<polygon fill="none" stroke="#008b00" stroke-width="2" points="643.5,-54 572.5,-54 572.5,-16 643.5,-16 643.5,-54"/>
+<text text-anchor="middle" x="608" y="-38.8" font-family="sans-serif" font-size="14.00">HPR</text>
+<text text-anchor="middle" x="608" y="-23.8" font-family="sans-serif" font-size="14.00">[output]</text>
+</g>
+<!-- cs42l51.0&#45;004a_Left ADC -->
+<g id="node13" class="node">
+<title>cs42l51.0&#45;004a_Left ADC</title>
+<polygon fill="#f2f2f2" stroke="#4d4d4d" points="822,-350 748,-350 748,-312 822,-312 822,-350"/>
+<text text-anchor="middle" x="785" y="-334.8" font-family="sans-serif" font-size="14.00">Left ADC</text>
+<text text-anchor="middle" x="785" y="-319.8" font-family="sans-serif" font-size="14.00">[adc]</text>
+</g>
+<!-- cs42l51.0&#45;004a_Left ADC&#45;&gt;cs42l51.0&#45;004a_Capture -->
+<g id="edge4" class="edge">
+<title>cs42l51.0&#45;004a_Left ADC&#45;&gt;cs42l51.0&#45;004a_Capture</title>
+<path fill="none" stroke="black" d="M752.2,-311.83C735.41,-302.54 714.8,-291.12 696.88,-281.2"/>
+<polygon fill="black" stroke="black" points="698.24,-277.95 687.79,-276.16 694.85,-284.07 698.24,-277.95"/>
+</g>
+<!-- cs42l51.0&#45;004a_Left DAC&#45;&gt;cs42l51.0&#45;004a_HPL -->
+<g id="edge6" class="edge">
+<title>cs42l51.0&#45;004a_Left DAC&#45;&gt;cs42l51.0&#45;004a_HPL</title>
+<path fill="none" stroke="black" d="M511,-89.83C511,-82.13 511,-72.97 511,-64.42"/>
+<polygon fill="black" stroke="black" points="514.5,-64.41 511,-54.41 507.5,-64.41 514.5,-64.41"/>
+</g>
+<!-- cs42l51.0&#45;004a_Left PGA -->
+<g id="node15" class="node">
+<title>cs42l51.0&#45;004a_Left PGA</title>
+<polygon fill="#f2f2f2" stroke="#4d4d4d" points="838,-424 764,-424 764,-386 838,-386 838,-424"/>
+<text text-anchor="middle" x="801" y="-408.8" font-family="sans-serif" font-size="14.00">Left PGA</text>
+<text text-anchor="middle" x="801" y="-393.8" font-family="sans-serif" font-size="14.00">[pga]</text>
+</g>
+<!-- cs42l51.0&#45;004a_Left PGA&#45;&gt;cs42l51.0&#45;004a_Left ADC -->
+<g id="edge8" class="edge">
+<title>cs42l51.0&#45;004a_Left PGA&#45;&gt;cs42l51.0&#45;004a_Left ADC</title>
+<path fill="none" stroke="black" d="M796.96,-385.83C795.25,-378.13 793.22,-368.97 791.31,-360.42"/>
+<polygon fill="black" stroke="black" points="794.68,-359.42 789.09,-350.41 787.84,-360.93 794.68,-359.42"/>
+</g>
+<!-- cs42l51.0&#45;004a_MCLK -->
+<g id="node16" class="node">
+<title>cs42l51.0&#45;004a_MCLK</title>
+<polygon fill="none" stroke="#008b00" stroke-width="2" points="594.5,-350 525.5,-350 525.5,-312 594.5,-312 594.5,-350"/>
+<text text-anchor="middle" x="560" y="-334.8" font-family="sans-serif" font-size="14.00">MCLK</text>
+<text text-anchor="middle" x="560" y="-319.8" font-family="sans-serif" font-size="14.00">[supply]</text>
+</g>
+<!-- cs42l51.0&#45;004a_MCLK&#45;&gt;cs42l51.0&#45;004a_Capture -->
+<g id="edge2" class="edge">
+<title>cs42l51.0&#45;004a_MCLK&#45;&gt;cs42l51.0&#45;004a_Capture</title>
+<path fill="none" stroke="black" d="M583.97,-311.83C595.79,-302.88 610.2,-291.96 622.94,-282.3"/>
+<polygon fill="black" stroke="black" points="625.18,-284.99 631.04,-276.16 620.95,-279.41 625.18,-284.99"/>
+</g>
+<!-- cs42l51.0&#45;004a_Playback -->
+<g id="node24" class="node">
+<title>cs42l51.0&#45;004a_Playback</title>
+<polygon fill="none" stroke="#008b00" stroke-width="2" points="597,-276 523,-276 523,-238 597,-238 597,-276"/>
+<text text-anchor="middle" x="560" y="-260.8" font-family="sans-serif" font-size="14.00">Playback</text>
+<text text-anchor="middle" x="560" y="-245.8" font-family="sans-serif" font-size="14.00">[dai_in]</text>
+</g>
+<!-- cs42l51.0&#45;004a_MCLK&#45;&gt;cs42l51.0&#45;004a_Playback -->
+<g id="edge16" class="edge">
+<title>cs42l51.0&#45;004a_MCLK&#45;&gt;cs42l51.0&#45;004a_Playback</title>
+<path fill="none" stroke="black" d="M560,-311.83C560,-304.13 560,-294.97 560,-286.42"/>
+<polygon fill="black" stroke="black" points="563.5,-286.41 560,-276.41 556.5,-286.41 563.5,-286.41"/>
+</g>
+<!-- cs42l51.0&#45;004a_MICL -->
+<g id="node17" class="node">
+<title>cs42l51.0&#45;004a_MICL</title>
+<polygon fill="#f2f2f2" stroke="#4d4d4d" points="399.5,-509 338.5,-509 338.5,-471 399.5,-471 399.5,-509"/>
+<text text-anchor="middle" x="369" y="-493.8" font-family="sans-serif" font-size="14.00">MICL</text>
+<text text-anchor="middle" x="369" y="-478.8" font-family="sans-serif" font-size="14.00">[input]</text>
+</g>
+<!-- cs42l51.0&#45;004a_Mic Preamp Left -->
+<g id="node20" class="node">
+<title>cs42l51.0&#45;004a_Mic Preamp Left</title>
+<polygon fill="#f2f2f2" stroke="#4d4d4d" points="461.5,-424 338.5,-424 338.5,-386 461.5,-386 461.5,-424"/>
+<text text-anchor="middle" x="400" y="-408.8" font-family="sans-serif" font-size="14.00">Mic Preamp Left</text>
+<text text-anchor="middle" x="400" y="-393.8" font-family="sans-serif" font-size="14.00">[mixer]</text>
+</g>
+<!-- cs42l51.0&#45;004a_MICL&#45;&gt;cs42l51.0&#45;004a_Mic Preamp Left -->
+<g id="edge12" class="edge">
+<title>cs42l51.0&#45;004a_MICL&#45;&gt;cs42l51.0&#45;004a_Mic Preamp Left</title>
+<path fill="none" stroke="black" d="M375.73,-470.99C379.8,-460.08 385.08,-445.94 389.68,-433.64"/>
+<polygon fill="black" stroke="black" points="392.96,-434.85 393.18,-424.26 386.4,-432.4 392.96,-434.85"/>
+</g>
+<!-- cs42l51.0&#45;004a_MICR -->
+<g id="node18" class="node">
+<title>cs42l51.0&#45;004a_MICR</title>
+<polygon fill="#f2f2f2" stroke="#4d4d4d" points="501.5,-583 440.5,-583 440.5,-545 501.5,-545 501.5,-583"/>
+<text text-anchor="middle" x="471" y="-567.8" font-family="sans-serif" font-size="14.00">MICR</text>
+<text text-anchor="middle" x="471" y="-552.8" font-family="sans-serif" font-size="14.00">[input]</text>
+</g>
+<!-- cs42l51.0&#45;004a_Mic Preamp Right -->
+<g id="node21" class="node">
+<title>cs42l51.0&#45;004a_Mic Preamp Right</title>
+<polygon fill="#f2f2f2" stroke="#4d4d4d" points="550.5,-509 417.5,-509 417.5,-471 550.5,-471 550.5,-509"/>
+<text text-anchor="middle" x="484" y="-493.8" font-family="sans-serif" font-size="14.00">Mic Preamp Right</text>
+<text text-anchor="middle" x="484" y="-478.8" font-family="sans-serif" font-size="14.00">[mixer]</text>
+</g>
+<!-- cs42l51.0&#45;004a_MICR&#45;&gt;cs42l51.0&#45;004a_Mic Preamp Right -->
+<g id="edge13" class="edge">
+<title>cs42l51.0&#45;004a_MICR&#45;&gt;cs42l51.0&#45;004a_Mic Preamp Right</title>
+<path fill="none" stroke="black" d="M474.28,-544.83C475.67,-537.13 477.32,-527.97 478.87,-519.42"/>
+<polygon fill="black" stroke="black" points="482.34,-519.88 480.68,-509.41 475.45,-518.63 482.34,-519.88"/>
+</g>
+<!-- cs42l51.0&#45;004a_Mic Bias -->
+<g id="node19" class="node">
+<title>cs42l51.0&#45;004a_Mic Bias</title>
+<polygon fill="#f2f2f2" stroke="#4d4d4d" points="409.5,-583 338.5,-583 338.5,-545 409.5,-545 409.5,-583"/>
+<text text-anchor="middle" x="374" y="-567.8" font-family="sans-serif" font-size="14.00">Mic Bias</text>
+<text text-anchor="middle" x="374" y="-552.8" font-family="sans-serif" font-size="14.00">[supply]</text>
+</g>
+<!-- cs42l51.0&#45;004a_Mic Bias&#45;&gt;cs42l51.0&#45;004a_MICL -->
+<g id="edge11" class="edge">
+<title>cs42l51.0&#45;004a_Mic Bias&#45;&gt;cs42l51.0&#45;004a_MICL</title>
+<path fill="none" stroke="black" d="M372.74,-544.83C372.2,-537.13 371.57,-527.97 370.97,-519.42"/>
+<polygon fill="black" stroke="black" points="374.46,-519.15 370.28,-509.41 367.48,-519.63 374.46,-519.15"/>
+</g>
+<!-- cs42l51.0&#45;004a_PGA&#45;ADC Mux Left&#45;&gt;cs42l51.0&#45;004a_Left PGA -->
+<g id="edge10" class="edge">
+<title>cs42l51.0&#45;004a_PGA&#45;ADC Mux Left&#45;&gt;cs42l51.0&#45;004a_Left PGA</title>
+<path fill="none" stroke="black" d="M804.92,-470.99C804.27,-460.3 803.44,-446.5 802.71,-434.37"/>
+<polygon fill="black" stroke="black" points="806.2,-434.03 802.1,-424.26 799.21,-434.45 806.2,-434.03"/>
+</g>
+<!-- cs42l51.0&#45;004a_Right PGA -->
+<g id="node27" class="node">
+<title>cs42l51.0&#45;004a_Right PGA</title>
+<polygon fill="#f2f2f2" stroke="#4d4d4d" points="688.5,-424 605.5,-424 605.5,-386 688.5,-386 688.5,-424"/>
+<text text-anchor="middle" x="647" y="-408.8" font-family="sans-serif" font-size="14.00">Right PGA</text>
+<text text-anchor="middle" x="647" y="-393.8" font-family="sans-serif" font-size="14.00">[pga]</text>
+</g>
+<!-- cs42l51.0&#45;004a_PGA&#45;ADC Mux Right&#45;&gt;cs42l51.0&#45;004a_Right PGA -->
+<g id="edge19" class="edge">
+<title>cs42l51.0&#45;004a_PGA&#45;ADC Mux Right&#45;&gt;cs42l51.0&#45;004a_Right PGA</title>
+<path fill="none" stroke="black" d="M643.87,-470.99C644.38,-460.3 645.05,-446.5 645.63,-434.37"/>
+<polygon fill="black" stroke="black" points="649.13,-434.42 646.12,-424.26 642.14,-434.08 649.13,-434.42"/>
+</g>
+<!-- cs42l51.0&#45;004a_Playback&#45;&gt;cs42l51.0&#45;004a_DAC Mux -->
+<g id="edge5" class="edge">
+<title>cs42l51.0&#45;004a_Playback&#45;&gt;cs42l51.0&#45;004a_DAC Mux</title>
+<path fill="none" stroke="black" d="M560,-237.83C560,-230.13 560,-220.97 560,-212.42"/>
+<polygon fill="black" stroke="black" points="563.5,-212.41 560,-202.41 556.5,-212.41 563.5,-212.41"/>
+</g>
+<!-- cs42l51.0&#45;004a_Right ADC -->
+<g id="node25" class="node">
+<title>cs42l51.0&#45;004a_Right ADC</title>
+<polygon fill="#f2f2f2" stroke="#4d4d4d" points="697,-350 613,-350 613,-312 697,-312 697,-350"/>
+<text text-anchor="middle" x="655" y="-334.8" font-family="sans-serif" font-size="14.00">Right ADC</text>
+<text text-anchor="middle" x="655" y="-319.8" font-family="sans-serif" font-size="14.00">[adc]</text>
+</g>
+<!-- cs42l51.0&#45;004a_Right ADC&#45;&gt;cs42l51.0&#45;004a_Capture -->
+<g id="edge3" class="edge">
+<title>cs42l51.0&#45;004a_Right ADC&#45;&gt;cs42l51.0&#45;004a_Capture</title>
+<path fill="none" stroke="black" d="M655,-311.83C655,-304.13 655,-294.97 655,-286.42"/>
+<polygon fill="black" stroke="black" points="658.5,-286.41 655,-276.41 651.5,-286.41 658.5,-286.41"/>
+</g>
+<!-- cs42l51.0&#45;004a_Right DAC&#45;&gt;cs42l51.0&#45;004a_HPR -->
+<g id="edge7" class="edge">
+<title>cs42l51.0&#45;004a_Right DAC&#45;&gt;cs42l51.0&#45;004a_HPR</title>
+<path fill="none" stroke="black" d="M608,-89.83C608,-82.13 608,-72.97 608,-64.42"/>
+<polygon fill="black" stroke="black" points="611.5,-64.41 608,-54.41 604.5,-64.41 611.5,-64.41"/>
+</g>
+<!-- cs42l51.0&#45;004a_Right PGA&#45;&gt;cs42l51.0&#45;004a_Right ADC -->
+<g id="edge17" class="edge">
+<title>cs42l51.0&#45;004a_Right PGA&#45;&gt;cs42l51.0&#45;004a_Right ADC</title>
+<path fill="none" stroke="black" d="M649.02,-385.83C649.87,-378.13 650.89,-368.97 651.84,-360.42"/>
+<polygon fill="black" stroke="black" points="655.33,-360.74 652.95,-350.41 648.37,-359.97 655.33,-360.74"/>
+</g>
+<!-- hdmi&#45;audio&#45;codec.1.auto_TX -->
+<g id="node30" class="node">
+<title>hdmi&#45;audio&#45;codec.1.auto_TX</title>
+<polygon fill="#f2f2f2" stroke="#4d4d4d" points="281.5,-509 210.5,-509 210.5,-471 281.5,-471 281.5,-509"/>
+<text text-anchor="middle" x="246" y="-493.8" font-family="sans-serif" font-size="14.00">TX</text>
+<text text-anchor="middle" x="246" y="-478.8" font-family="sans-serif" font-size="14.00">[output]</text>
+</g>
+<!-- hdmi&#45;audio&#45;codec.1.auto_I2S Playback&#45;&gt;hdmi&#45;audio&#45;codec.1.auto_TX -->
+<g id="edge22" class="edge">
+<title>hdmi&#45;audio&#45;codec.1.auto_I2S Playback&#45;&gt;hdmi&#45;audio&#45;codec.1.auto_TX</title>
+<path fill="none" stroke="black" d="M254.22,-544.83C253.05,-537.13 251.65,-527.97 250.34,-519.42"/>
+<polygon fill="black" stroke="black" points="253.78,-518.77 248.81,-509.41 246.86,-519.83 253.78,-518.77"/>
+</g>
+<!-- hdmi&#45;audio&#45;codec.1.auto_RX -->
+<g id="node29" class="node">
+<title>hdmi&#45;audio&#45;codec.1.auto_RX</title>
+<polygon fill="#f2f2f2" stroke="#4d4d4d" points="189.5,-583 118.5,-583 118.5,-545 189.5,-545 189.5,-583"/>
+<text text-anchor="middle" x="154" y="-567.8" font-family="sans-serif" font-size="14.00">RX</text>
+<text text-anchor="middle" x="154" y="-552.8" font-family="sans-serif" font-size="14.00">[output]</text>
+</g>
+<!-- hdmi&#45;audio&#45;codec.1.auto_RX&#45;&gt;hdmi&#45;audio&#45;codec.1.auto_Capture -->
+<g id="edge20" class="edge">
+<title>hdmi&#45;audio&#45;codec.1.auto_RX&#45;&gt;hdmi&#45;audio&#45;codec.1.auto_Capture</title>
+<path fill="none" stroke="black" d="M154.25,-544.83C154.36,-537.13 154.49,-527.97 154.61,-519.42"/>
+<polygon fill="black" stroke="black" points="158.1,-519.46 154.74,-509.41 151.11,-519.36 158.1,-519.46"/>
+</g>
+</g>
+</svg>
diff --git a/Documentation/sound/soc/dapm.rst b/Documentation/sound/soc/dapm.rst
index c3154ce6e1b2..14c4dc026e6b 100644
--- a/Documentation/sound/soc/dapm.rst
+++ b/Documentation/sound/soc/dapm.rst
@@ -7,8 +7,8 @@ Description
Dynamic Audio Power Management (DAPM) is designed to allow portable
Linux devices to use the minimum amount of power within the audio
-subsystem at all times. It is independent of other kernel PM and as
-such, can easily co-exist with the other PM systems.
+subsystem at all times. It is independent of other kernel power
+management frameworks and, as such, can easily co-exist with them.
DAPM is also completely transparent to all user space applications as
all power switching is done within the ASoC core. No code changes or
@@ -16,11 +16,29 @@ recompiling are required for user space applications. DAPM makes power
switching decisions based upon any audio stream (capture/playback)
activity and audio mixer settings within the device.
-DAPM spans the whole machine. It covers power control within the entire
-audio subsystem, this includes internal codec power blocks and machine
-level power systems.
+DAPM is based on two basic elements, called widgets and routes:
-There are 4 power domains within DAPM
+ * a **widget** is every part of the audio hardware that can be enabled by
+ software when in use and disabled to save power when not in use
+ * a **route** is an interconnection between widgets that exists when sound
+ can flow from one widget to the other
+
+All DAPM power switching decisions are made automatically by consulting an
+audio routing graph. This graph is specific to each sound card and spans
+the whole sound card, so some DAPM routes connect two widgets belonging to
+different components (e.g. the LINE OUT pin of a CODEC and the input pin of
+an amplifier).
+
+The graph for the STM32MP1-DK1 sound card is shown in picture:
+
+.. kernel-figure:: dapm-graph.svg
+ :alt: Example DAPM graph
+ :align: center
+
+DAPM power domains
+==================
+
+There are 4 power domains within DAPM:
Codec bias domain
VREF, VMID (core codec and audio power)
@@ -47,17 +65,11 @@ Stream domain
Enabled and disabled when stream playback/capture is started and
stopped respectively. e.g. aplay, arecord.
-All DAPM power switching decisions are made automatically by consulting an audio
-routing map of the whole machine. This map is specific to each machine and
-consists of the interconnections between every audio component (including
-internal codec components). All audio components that effect power are called
-widgets hereafter.
-
DAPM Widgets
============
-Audio DAPM widgets fall into a number of types:-
+Audio DAPM widgets fall into a number of types:
Mixer
Mixes several analog signals into a single analog signal.
@@ -141,14 +153,14 @@ Stream Widgets relate to the stream power domain and only consist of ADCs
(analog to digital converters), DACs (digital to analog converters),
AIF IN and AIF OUT.
-Stream widgets have the following format:-
+Stream widgets have the following format:
::
SND_SOC_DAPM_DAC(name, stream name, reg, shift, invert),
SND_SOC_DAPM_AIF_IN(name, stream, slot, reg, shift, invert)
NOTE: the stream name must match the corresponding stream name in your codec
-snd_soc_codec_dai.
+snd_soc_dai_driver.
e.g. stream widgets for HiFi playback and capture
::
@@ -167,7 +179,7 @@ Path Domain Widgets
-------------------
Path domain widgets have a ability to control or affect the audio signal or
-audio paths within the audio subsystem. They have the following form:-
+audio paths within the audio subsystem. They have the following form:
::
SND_SOC_DAPM_PGA(name, reg, shift, invert, controls, num_controls)
@@ -207,7 +219,7 @@ powered. e.g.
A machine widget can have an optional call back.
e.g. Jack connector widget for an external Mic that enables Mic Bias
-when the Mic is inserted:-::
+when the Mic is inserted::
static int spitz_mic_bias(struct snd_soc_dapm_widget* w, int event)
{
@@ -221,7 +233,7 @@ when the Mic is inserted:-::
Codec (BIAS) Domain
-------------------
-The codec bias power domain has no widgets and is handled by the codecs DAPM
+The codec bias power domain has no widgets and is handled by the codec DAPM
event handler. This handler is called when the codec powerstate is changed wrt
to any stream event or by kernel PM events.
@@ -229,17 +241,58 @@ to any stream event or by kernel PM events.
Virtual Widgets
---------------
-Sometimes widgets exist in the codec or machine audio map that don't have any
+Sometimes widgets exist in the codec or machine audio graph that don't have any
corresponding soft power control. In this case it is necessary to create
a virtual widget - a widget with no control bits e.g.
::
SND_SOC_DAPM_MIXER("AC97 Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
-This can be used to merge to signal paths together in software.
+This can be used to merge two signal paths together in software.
-After all the widgets have been defined, they can then be added to the DAPM
-subsystem individually with a call to snd_soc_dapm_new_control().
+Registering DAPM controls
+=========================
+
+In many cases the DAPM widgets are implemented statically in a ``static
+const struct snd_soc_dapm_widget`` array in a codec driver, and simply
+declared via the ``dapm_widgets`` and ``num_dapm_widgets`` fields of the
+``struct snd_soc_component_driver``.
+
+Similarly, routes connecting them are implemented statically in a ``static
+const struct snd_soc_dapm_route`` array and declared via the
+``dapm_routes`` and ``num_dapm_routes`` fields of the same struct.
+
+With the above declared, the driver registration will take care of
+populating them::
+
+ static const struct snd_soc_dapm_widget wm2000_dapm_widgets[] = {
+ SND_SOC_DAPM_OUTPUT("SPKN"),
+ SND_SOC_DAPM_OUTPUT("SPKP"),
+ ...
+ };
+
+ /* Target, Path, Source */
+ static const struct snd_soc_dapm_route wm2000_audio_map[] = {
+ { "SPKN", NULL, "ANC Engine" },
+ { "SPKP", NULL, "ANC Engine" },
+ ...
+ };
+
+ static const struct snd_soc_component_driver soc_component_dev_wm2000 = {
+ ...
+ .dapm_widgets = wm2000_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm2000_dapm_widgets),
+ .dapm_routes = wm2000_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(wm2000_audio_map),
+ ...
+ };
+
+In more complex cases the list of DAPM widgets and/or routes can be only
+known at probe time. This happens for example when a driver supports
+different models having a different set of features. In those cases
+separate widgets and routes arrays implementing the case-specific features
+can be registered programmatically by calling snd_soc_dapm_new_controls()
+and snd_soc_dapm_add_routes().
Codec/DSP Widget Interconnections
@@ -247,31 +300,29 @@ Codec/DSP Widget Interconnections
Widgets are connected to each other within the codec, platform and machine by
audio paths (called interconnections). Each interconnection must be defined in
-order to create a map of all audio paths between widgets.
+order to create a graph of all audio paths between widgets.
This is easiest with a diagram of the codec or DSP (and schematic of the machine
audio system), as it requires joining widgets together via their audio signal
paths.
-e.g., from the WM8731 output mixer (wm8731.c)
-
-The WM8731 output mixer has 3 inputs (sources)
+For example the WM8731 output mixer (wm8731.c) has 3 inputs (sources):
1. Line Bypass Input
2. DAC (HiFi playback)
3. Mic Sidetone Input
-Each input in this example has a kcontrol associated with it (defined in example
-above) and is connected to the output mixer via its kcontrol name. We can now
-connect the destination widget (wrt audio signal) with its source widgets.
-::
+Each input in this example has a kcontrol associated with it (defined in
+the example above) and is connected to the output mixer via its kcontrol
+name. We can now connect the destination widget (wrt audio signal) with its
+source widgets. ::
/* output mixer */
{"Output Mixer", "Line Bypass Switch", "Line Input"},
{"Output Mixer", "HiFi Playback Switch", "DAC"},
{"Output Mixer", "Mic Sidetone Switch", "Mic Bias"},
-So we have :-
+So we have:
* Destination Widget <=== Path Name <=== Source Widget, or
* Sink, Path, Source, or
@@ -280,12 +331,11 @@ So we have :-
When there is no path name connecting widgets (e.g. a direct connection) we
pass NULL for the path name.
-Interconnections are created with a call to:-
-::
+Interconnections are created with a call to::
snd_soc_dapm_connect_input(codec, sink, path, source);
-Finally, snd_soc_dapm_new_widgets(codec) must be called after all widgets and
+Finally, snd_soc_dapm_new_widgets() must be called after all widgets and
interconnections have been registered with the core. This causes the core to
scan the codec and machine so that the internal DAPM state matches the
physical state of the machine.
@@ -326,35 +376,44 @@ jacks can also be switched OFF.
DAPM Widget Events
==================
-Some widgets can register their interest with the DAPM core in PM events.
-e.g. A Speaker with an amplifier registers a widget so the amplifier can be
-powered only when the spk is in use.
-::
+Widgets needing to implement a more complex behaviour than what DAPM can do
+can set a custom "event handler" by setting a function pointer. An example
+is a power supply needing to enable a GPIO::
- /* turn speaker amplifier on/off depending on use */
- static int corgi_amp_event(struct snd_soc_dapm_widget *w, int event)
+ static int sof_es8316_speaker_power_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
{
- gpio_set_value(CORGI_GPIO_APM_ON, SND_SOC_DAPM_EVENT_ON(event));
- return 0;
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ gpiod_set_value_cansleep(gpio_pa, true);
+ else
+ gpiod_set_value_cansleep(gpio_pa, false);
+
+ return 0;
}
- /* corgi machine dapm widgets */
- static const struct snd_soc_dapm_widget wm8731_dapm_widgets =
- SND_SOC_DAPM_SPK("Ext Spk", corgi_amp_event);
+ static const struct snd_soc_dapm_widget st_widgets[] = {
+ ...
+ SND_SOC_DAPM_SUPPLY("Speaker Power", SND_SOC_NOPM, 0, 0,
+ sof_es8316_speaker_power_event,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+ };
-Please see soc-dapm.h for all other widgets that support events.
+See soc-dapm.h for all other widgets that support events.
Event types
-----------
-The following event types are supported by event widgets.
-::
+The following event types are supported by event widgets::
/* dapm event types */
- #define SND_SOC_DAPM_PRE_PMU 0x1 /* before widget power up */
- #define SND_SOC_DAPM_POST_PMU 0x2 /* after widget power up */
- #define SND_SOC_DAPM_PRE_PMD 0x4 /* before widget power down */
- #define SND_SOC_DAPM_POST_PMD 0x8 /* after widget power down */
- #define SND_SOC_DAPM_PRE_REG 0x10 /* before audio path setup */
- #define SND_SOC_DAPM_POST_REG 0x20 /* after audio path setup */
+ #define SND_SOC_DAPM_PRE_PMU 0x1 /* before widget power up */
+ #define SND_SOC_DAPM_POST_PMU 0x2 /* after widget power up */
+ #define SND_SOC_DAPM_PRE_PMD 0x4 /* before widget power down */
+ #define SND_SOC_DAPM_POST_PMD 0x8 /* after widget power down */
+ #define SND_SOC_DAPM_PRE_REG 0x10 /* before audio path setup */
+ #define SND_SOC_DAPM_POST_REG 0x20 /* after audio path setup */
+ #define SND_SOC_DAPM_WILL_PMU 0x40 /* called at start of sequence */
+ #define SND_SOC_DAPM_WILL_PMD 0x80 /* called at start of sequence */
+ #define SND_SOC_DAPM_PRE_POST_PMD (SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD)
+ #define SND_SOC_DAPM_PRE_POST_PMU (SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU)
diff --git a/Documentation/sphinx/kernel_include.py b/Documentation/sphinx/kernel_include.py
index abe768088377..638762442336 100755
--- a/Documentation/sphinx/kernel_include.py
+++ b/Documentation/sphinx/kernel_include.py
@@ -97,7 +97,6 @@ class KernelInclude(Include):
# HINT: this is the only line I had to change / commented out:
#path = utils.relative_path(None, path)
- path = nodes.reprunicode(path)
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
e_handler=self.state.document.settings.input_encoding_error_handler
diff --git a/Documentation/sphinx/kerneldoc-preamble.sty b/Documentation/sphinx/kerneldoc-preamble.sty
index 3092df051c95..d479cfa73658 100644
--- a/Documentation/sphinx/kerneldoc-preamble.sty
+++ b/Documentation/sphinx/kerneldoc-preamble.sty
@@ -215,11 +215,12 @@
due to the lack of suitable font families and/or the texlive-xecjk
package.
- If you want them, please install ``Noto Sans CJK'' font families
- along with the texlive-xecjk package by following instructions from
+ If you want them, please install non-variable ``Noto Sans CJK''
+ font families along with the texlive-xecjk package by following
+ instructions from
\sphinxcode{./scripts/sphinx-pre-install}.
- Having optional ``Noto Serif CJK'' font families will improve
- the looks of those translations.
+ Having optional non-variable ``Noto Serif CJK'' font families will
+ improve the looks of those translations.
\end{sphinxadmonition}}
\newcommand{\kerneldocEndSC}{}
\newcommand{\kerneldocBeginTC}[1]{}
diff --git a/Documentation/spi/index.rst b/Documentation/spi/index.rst
index 06c34ea11bcf..824ce42ed4f0 100644
--- a/Documentation/spi/index.rst
+++ b/Documentation/spi/index.rst
@@ -10,7 +10,6 @@ Serial Peripheral Interface (SPI)
spi-summary
spidev
butterfly
- pxa2xx
spi-lm70llp
spi-sc18is602
diff --git a/Documentation/spi/pxa2xx.rst b/Documentation/spi/pxa2xx.rst
deleted file mode 100644
index 19479b801826..000000000000
--- a/Documentation/spi/pxa2xx.rst
+++ /dev/null
@@ -1,211 +0,0 @@
-==============================
-PXA2xx SPI on SSP driver HOWTO
-==============================
-
-This a mini HOWTO on the pxa2xx_spi driver. The driver turns a PXA2xx
-synchronous serial port into an SPI host controller
-(see Documentation/spi/spi-summary.rst). The driver has the following features
-
-- Support for any PXA2xx and compatible SSP.
-- SSP PIO and SSP DMA data transfers.
-- External and Internal (SSPFRM) chip selects.
-- Per peripheral device (chip) configuration.
-- Full suspend, freeze, resume support.
-
-The driver is built around a &struct spi_message FIFO serviced by kernel
-thread. The kernel thread, spi_pump_messages(), drives message FIFO and
-is responsible for queuing SPI transactions and setting up and launching
-the DMA or interrupt driven transfers.
-
-Declaring PXA2xx host controllers
----------------------------------
-Typically, for a legacy platform, an SPI host controller is defined in the
-arch/.../mach-*/board-*.c as a "platform device". The host controller configuration
-is passed to the driver via a table found in include/linux/spi/pxa2xx_spi.h::
-
- struct pxa2xx_spi_controller {
- u16 num_chipselect;
- u8 enable_dma;
- ...
- };
-
-The "pxa2xx_spi_controller.num_chipselect" field is used to determine the number of
-peripheral devices (chips) attached to this SPI host controller.
-
-The "pxa2xx_spi_controller.enable_dma" field informs the driver that SSP DMA should
-be used. This caused the driver to acquire two DMA channels: Rx channel and
-Tx channel. The Rx channel has a higher DMA service priority than the Tx channel.
-See the "PXA2xx Developer Manual" section "DMA Controller".
-
-For the new platforms the description of the controller and peripheral devices
-comes from Device Tree or ACPI.
-
-NSSP HOST SAMPLE
-----------------
-Below is a sample configuration using the PXA255 NSSP for a legacy platform::
-
- static struct resource pxa_spi_nssp_resources[] = {
- [0] = {
- .start = __PREG(SSCR0_P(2)), /* Start address of NSSP */
- .end = __PREG(SSCR0_P(2)) + 0x2c, /* Range of registers */
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_NSSP, /* NSSP IRQ */
- .end = IRQ_NSSP,
- .flags = IORESOURCE_IRQ,
- },
- };
-
- static struct pxa2xx_spi_controller pxa_nssp_controller_info = {
- .num_chipselect = 1, /* Matches the number of chips attached to NSSP */
- .enable_dma = 1, /* Enables NSSP DMA */
- };
-
- static struct platform_device pxa_spi_nssp = {
- .name = "pxa2xx-spi", /* MUST BE THIS VALUE, so device match driver */
- .id = 2, /* Bus number, MUST MATCH SSP number 1..n */
- .resource = pxa_spi_nssp_resources,
- .num_resources = ARRAY_SIZE(pxa_spi_nssp_resources),
- .dev = {
- .platform_data = &pxa_nssp_controller_info, /* Passed to driver */
- },
- };
-
- static struct platform_device *devices[] __initdata = {
- &pxa_spi_nssp,
- };
-
- static void __init board_init(void)
- {
- (void)platform_add_device(devices, ARRAY_SIZE(devices));
- }
-
-Declaring peripheral devices
-----------------------------
-Typically, for a legacy platform, each SPI peripheral device (chip) is defined in the
-arch/.../mach-*/board-*.c using the "spi_board_info" structure found in
-"linux/spi/spi.h". See "Documentation/spi/spi-summary.rst" for additional
-information.
-
-Each peripheral device (chip) attached to the PXA2xx must provide specific chip configuration
-information via the structure "pxa2xx_spi_chip" found in
-"include/linux/spi/pxa2xx_spi.h". The PXA2xx host controller driver will use
-the configuration whenever the driver communicates with the peripheral
-device. All fields are optional.
-
-::
-
- struct pxa2xx_spi_chip {
- u8 tx_threshold;
- u8 rx_threshold;
- u8 dma_burst_size;
- u32 timeout;
- };
-
-The "pxa2xx_spi_chip.tx_threshold" and "pxa2xx_spi_chip.rx_threshold" fields are
-used to configure the SSP hardware FIFO. These fields are critical to the
-performance of pxa2xx_spi driver and misconfiguration will result in rx
-FIFO overruns (especially in PIO mode transfers). Good default values are::
-
- .tx_threshold = 8,
- .rx_threshold = 8,
-
-The range is 1 to 16 where zero indicates "use default".
-
-The "pxa2xx_spi_chip.dma_burst_size" field is used to configure PXA2xx DMA
-engine and is related the "spi_device.bits_per_word" field. Read and understand
-the PXA2xx "Developer Manual" sections on the DMA controller and SSP Controllers
-to determine the correct value. An SSP configured for byte-wide transfers would
-use a value of 8. The driver will determine a reasonable default if
-dma_burst_size == 0.
-
-The "pxa2xx_spi_chip.timeout" fields is used to efficiently handle
-trailing bytes in the SSP receiver FIFO. The correct value for this field is
-dependent on the SPI bus speed ("spi_board_info.max_speed_hz") and the specific
-peripheral device. Please note that the PXA2xx SSP 1 does not support trailing byte
-timeouts and must busy-wait any trailing bytes.
-
-NOTE: the SPI driver cannot control the chip select if SSPFRM is used, so the
-chipselect is dropped after each spi_transfer. Most devices need chip select
-asserted around the complete message. Use SSPFRM as a GPIO (through a descriptor)
-to accommodate these chips.
-
-
-NSSP PERIPHERAL SAMPLE
-----------------------
-For a legacy platform or in some other cases, the pxa2xx_spi_chip structure
-is passed to the pxa2xx_spi driver in the "spi_board_info.controller_data"
-field. Below is a sample configuration using the PXA255 NSSP.
-
-::
-
- static struct pxa2xx_spi_chip cs8415a_chip_info = {
- .tx_threshold = 8, /* SSP hardware FIFO threshold */
- .rx_threshold = 8, /* SSP hardware FIFO threshold */
- .dma_burst_size = 8, /* Byte wide transfers used so 8 byte bursts */
- .timeout = 235, /* See Intel documentation */
- };
-
- static struct pxa2xx_spi_chip cs8405a_chip_info = {
- .tx_threshold = 8, /* SSP hardware FIFO threshold */
- .rx_threshold = 8, /* SSP hardware FIFO threshold */
- .dma_burst_size = 8, /* Byte wide transfers used so 8 byte bursts */
- .timeout = 235, /* See Intel documentation */
- };
-
- static struct spi_board_info streetracer_spi_board_info[] __initdata = {
- {
- .modalias = "cs8415a", /* Name of spi_driver for this device */
- .max_speed_hz = 3686400, /* Run SSP as fast a possible */
- .bus_num = 2, /* Framework bus number */
- .chip_select = 0, /* Framework chip select */
- .platform_data = NULL; /* No spi_driver specific config */
- .controller_data = &cs8415a_chip_info, /* Host controller config */
- .irq = STREETRACER_APCI_IRQ, /* Peripheral device interrupt */
- },
- {
- .modalias = "cs8405a", /* Name of spi_driver for this device */
- .max_speed_hz = 3686400, /* Run SSP as fast a possible */
- .bus_num = 2, /* Framework bus number */
- .chip_select = 1, /* Framework chip select */
- .controller_data = &cs8405a_chip_info, /* Host controller config */
- .irq = STREETRACER_APCI_IRQ, /* Peripheral device interrupt */
- },
- };
-
- static void __init streetracer_init(void)
- {
- spi_register_board_info(streetracer_spi_board_info,
- ARRAY_SIZE(streetracer_spi_board_info));
- }
-
-
-DMA and PIO I/O Support
------------------------
-The pxa2xx_spi driver supports both DMA and interrupt driven PIO message
-transfers. The driver defaults to PIO mode and DMA transfers must be enabled
-by setting the "enable_dma" flag in the "pxa2xx_spi_controller" structure.
-For the newer platforms, that are known to support DMA, the driver will enable
-it automatically and try it first with a possible fallback to PIO. The DMA
-mode supports both coherent and stream based DMA mappings.
-
-The following logic is used to determine the type of I/O to be used on
-a per "spi_transfer" basis::
-
- if spi_message.len > 65536 then
- if spi_message.is_dma_mapped or rx_dma_buf != 0 or tx_dma_buf != 0 then
- reject premapped transfers
-
- print "rate limited" warning
- use PIO transfers
-
- if enable_dma and the size is in the range [DMA burst size..65536] then
- use streaming DMA mode
-
- otherwise
- use PIO transfer
-
-THANKS TO
----------
-David Brownell and others for mentoring the development of this driver.
diff --git a/Documentation/spi/spi-summary.rst b/Documentation/spi/spi-summary.rst
index 546de37d6caf..7f8accfae6f9 100644
--- a/Documentation/spi/spi-summary.rst
+++ b/Documentation/spi/spi-summary.rst
@@ -348,7 +348,6 @@ SPI protocol drivers somewhat resemble platform device drivers::
static struct spi_driver CHIP_driver = {
.driver = {
.name = "CHIP",
- .owner = THIS_MODULE,
.pm = &CHIP_pm_ops,
},
@@ -419,10 +418,6 @@ any more such messages.
to make extra copies unless the hardware requires it (e.g. working
around hardware errata that force the use of bounce buffering).
- If standard dma_map_single() handling of these buffers is inappropriate,
- you can use spi_message.is_dma_mapped to tell the controller driver
- that you've already provided the relevant DMA addresses.
-
- The basic I/O primitive is spi_async(). Async requests may be
issued in any context (irq handler, task, etc) and completion
is reported using a callback provided with the message.
diff --git a/Documentation/tee/index.rst b/Documentation/tee/index.rst
index a23bd08847e5..4be6e69d7837 100644
--- a/Documentation/tee/index.rst
+++ b/Documentation/tee/index.rst
@@ -10,6 +10,7 @@ TEE Subsystem
tee
op-tee
amd-tee
+ ts-tee
.. only:: subproject and html
diff --git a/Documentation/tee/ts-tee.rst b/Documentation/tee/ts-tee.rst
new file mode 100644
index 000000000000..843e34422648
--- /dev/null
+++ b/Documentation/tee/ts-tee.rst
@@ -0,0 +1,71 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=================================
+TS-TEE (Trusted Services project)
+=================================
+
+This driver provides access to secure services implemented by Trusted Services.
+
+Trusted Services [1] is a TrustedFirmware.org project that provides a framework
+for developing and deploying device Root of Trust services in FF-A [2] S-EL0
+Secure Partitions. The project hosts the reference implementation of the Arm
+Platform Security Architecture [3] for Arm A-profile devices.
+
+The FF-A Secure Partitions (SP) are accessible through the FF-A driver [4] which
+provides the low level communication for this driver. On top of that the Trusted
+Services RPC protocol is used [5]. To use the driver from user space a reference
+implementation is provided at [6], which is part of the Trusted Services client
+library called libts [7].
+
+All Trusted Services (TS) SPs have the same FF-A UUID; it identifies the TS RPC
+protocol. A TS SP can host one or more services (e.g. PSA Crypto, PSA ITS, etc).
+A service is identified by its service UUID; the same type of service cannot be
+present twice in the same SP. During SP boot each service in the SP is assigned
+an "interface ID". This is just a short ID to simplify message addressing.
+
+The generic TEE design is to share memory at once with the Trusted OS, which can
+then be reused to communicate with multiple applications running on the Trusted
+OS. However, in case of FF-A, memory sharing works on an endpoint level, i.e.
+memory is shared with a specific SP. User space has to be able to separately
+share memory with each SP based on its endpoint ID; therefore a separate TEE
+device is registered for each discovered TS SP. Opening the SP corresponds to
+opening the TEE device and creating a TEE context. A TS SP hosts one or more
+services. Opening a service corresponds to opening a session in the given
+tee_context.
+
+Overview of a system with Trusted Services components::
+
+ User space Kernel space Secure world
+ ~~~~~~~~~~ ~~~~~~~~~~~~ ~~~~~~~~~~~~
+ +--------+ +-------------+
+ | Client | | Trusted |
+ +--------+ | Services SP |
+ /\ +-------------+
+ || /\
+ || ||
+ || ||
+ \/ \/
+ +-------+ +----------+--------+ +-------------+
+ | libts | | TEE | TS-TEE | | FF-A SPMC |
+ | | | subsys | driver | | + SPMD |
+ +-------+----------------+----+-----+--------+-----------+-------------+
+ | Generic TEE API | | FF-A | TS RPC protocol |
+ | IOCTL (TEE_IOC_*) | | driver | over FF-A |
+ +-----------------------------+ +--------+-------------------------+
+
+References
+==========
+
+[1] https://www.trustedfirmware.org/projects/trusted-services/
+
+[2] https://developer.arm.com/documentation/den0077/
+
+[3] https://www.arm.com/architecture/security-features/platform-security
+
+[4] drivers/firmware/arm_ffa/
+
+[5] https://trusted-services.readthedocs.io/en/v1.0.0/developer/service-access-protocols.html#abi
+
+[6] https://git.trustedfirmware.org/TS/trusted-services.git/tree/components/rpc/ts_rpc/caller/linux/ts_rpc_caller_linux.c?h=v1.0.0
+
+[7] https://git.trustedfirmware.org/TS/trusted-services.git/tree/deployments/libts/arm-linux/CMakeLists.txt?h=v1.0.0
diff --git a/Documentation/timers/no_hz.rst b/Documentation/timers/no_hz.rst
index f8786be15183..7fe8ef9718d8 100644
--- a/Documentation/timers/no_hz.rst
+++ b/Documentation/timers/no_hz.rst
@@ -129,11 +129,8 @@ adaptive-tick CPUs: At least one non-adaptive-tick CPU must remain
online to handle timekeeping tasks in order to ensure that system
calls like gettimeofday() returns accurate values on adaptive-tick CPUs.
(This is not an issue for CONFIG_NO_HZ_IDLE=y because there are no running
-user processes to observe slight drifts in clock rate.) Therefore, the
-boot CPU is prohibited from entering adaptive-ticks mode. Specifying a
-"nohz_full=" mask that includes the boot CPU will result in a boot-time
-error message, and the boot CPU will be removed from the mask. Note that
-this means that your system must have at least two CPUs in order for
+user processes to observe slight drifts in clock rate.) Note that this
+means that your system must have at least two CPUs in order for
CONFIG_NO_HZ_FULL=y to do anything for you.
Finally, adaptive-ticks CPUs must have their RCU callbacks offloaded.
diff --git a/Documentation/tools/rtla/common_options.rst b/Documentation/tools/rtla/common_options.rst
index aeb91ff3bd68..2dc1575210aa 100644
--- a/Documentation/tools/rtla/common_options.rst
+++ b/Documentation/tools/rtla/common_options.rst
@@ -14,10 +14,6 @@
Print debug info.
-**-t**, **--trace**\[*=file*]
-
- Save the stopped trace to [*file|osnoise_trace.txt*].
-
**-e**, **--event** *sys:event*
Enable an event in the trace (**-t**) session. The argument can be a specific event, e.g., **-e** *sched:sched_switch*, or all events of a system group, e.g., **-e** *sched*. Multiple **-e** are allowed. It is only active when **-t** or **-a** are set.
@@ -50,6 +46,13 @@
Set a *cgroup* to the tracer's threads. If the **-C** option is passed without arguments, the tracer's thread will inherit **rtla**'s *cgroup*. Otherwise, the threads will be placed on the *cgroup* passed to the option.
+**--warm-up** *s*
+
+ After starting the workload, let it run for *s* seconds before starting collecting the data, allowing the system to warm-up. Statistical data generated during warm-up is discarded.
+
+**--trace-buffer-size** *kB*
+ Set the per-cpu trace buffer size in kB for the tracing output.
+
**-h**, **--help**
Print help menu.
diff --git a/Documentation/tools/rtla/common_osnoise_options.rst b/Documentation/tools/rtla/common_osnoise_options.rst
index f792ca58c211..d73de2d58f5f 100644
--- a/Documentation/tools/rtla/common_osnoise_options.rst
+++ b/Documentation/tools/rtla/common_osnoise_options.rst
@@ -25,3 +25,7 @@
Specify the minimum delta between two time reads to be considered noise.
The default threshold is *5 us*.
+
+**-t**, **--trace** \[*file*]
+
+ Save the stopped trace to [*file|osnoise_trace.txt*].
diff --git a/Documentation/tools/rtla/common_timerlat_options.rst b/Documentation/tools/rtla/common_timerlat_options.rst
index d3255ed70195..cef6651f1435 100644
--- a/Documentation/tools/rtla/common_timerlat_options.rst
+++ b/Documentation/tools/rtla/common_timerlat_options.rst
@@ -22,17 +22,25 @@
Save the stack trace at the *IRQ* if a *Thread* latency is higher than the
argument in us.
+**-t**, **--trace** \[*file*]
+
+ Save the stopped trace to [*file|timerlat_trace.txt*].
+
**--dma-latency** *us*
Set the /dev/cpu_dma_latency to *us*, aiming to bound exit from idle latencies.
*cyclictest* sets this value to *0* by default, use **--dma-latency** *0* to have
similar results.
+**-k**, **--kernel-threads**
+
+ Use timerlat kernel-space threads, in contrast of **-u**.
+
**-u**, **--user-threads**
Set timerlat to run without a workload, and then dispatches user-space workloads
to wait on the timerlat_fd. Once the workload is awakes, it goes to sleep again
adding so the measurement for the kernel-to-user and user-to-kernel to the tracer
- output.
+ output. **--user-threads** will be used unless the user specify **-k**.
**-U**, **--user-load**
diff --git a/Documentation/trace/fprobetrace.rst b/Documentation/trace/fprobetrace.rst
index 0f187e3796e4..b4c2ca3d02c1 100644
--- a/Documentation/trace/fprobetrace.rst
+++ b/Documentation/trace/fprobetrace.rst
@@ -74,7 +74,7 @@ Function arguments at exit
--------------------------
Function arguments can be accessed at exit probe using $arg<N> fetcharg. This
is useful to record the function parameter and return value at once, and
-trace the difference of structure fields (for debuging a function whether it
+trace the difference of structure fields (for debugging a function whether it
correctly updates the given data structure or not)
See the :ref:`sample<fprobetrace_exit_args_sample>` below for how it works.
@@ -248,4 +248,4 @@ mode. You can trace that changes with return probe.
cat-143 [007] ...1. 1945.720616: vfs_open__entry: (vfs_open+0x4/0x40) mode=0x1 inode=0x0
cat-143 [007] ...1. 1945.728263: vfs_open__exit: (do_open+0x274/0x3d0 <- vfs_open) mode=0xa800d inode=0xffff888004ada8d8
-You can see the `file::f_mode` and `file::f_inode` are upated in `vfs_open()`.
+You can see the `file::f_mode` and `file::f_inode` are updated in `vfs_open()`.
diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst
index 7e7b8ec17934..5aba74872ba7 100644
--- a/Documentation/trace/ftrace.rst
+++ b/Documentation/trace/ftrace.rst
@@ -1968,7 +1968,7 @@ wakeup
One common case that people are interested in tracing is the
time it takes for a task that is woken to actually wake up.
Now for non Real-Time tasks, this can be arbitrary. But tracing
-it none the less can be interesting.
+it nonetheless can be interesting.
Without function tracing::
diff --git a/Documentation/trace/index.rst b/Documentation/trace/index.rst
index 5092d6c13af5..0b300901fd75 100644
--- a/Documentation/trace/index.rst
+++ b/Documentation/trace/index.rst
@@ -29,6 +29,7 @@ Linux Tracing Technologies
timerlat-tracer
intel_th
ring-buffer-design
+ ring-buffer-map
stm
sys-t
coresight/index
diff --git a/Documentation/trace/kprobes.rst b/Documentation/trace/kprobes.rst
index e1636e579c9c..5e606730cec6 100644
--- a/Documentation/trace/kprobes.rst
+++ b/Documentation/trace/kprobes.rst
@@ -322,6 +322,7 @@ architectures:
- s390
- parisc
- loongarch
+- riscv
Configuring Kprobes
===================
diff --git a/Documentation/trace/kprobetrace.rst b/Documentation/trace/kprobetrace.rst
index a49662ccd53c..3b6791c17e9b 100644
--- a/Documentation/trace/kprobetrace.rst
+++ b/Documentation/trace/kprobetrace.rst
@@ -58,8 +58,9 @@ Synopsis of kprobe_events
NAME=FETCHARG : Set NAME as the argument name of FETCHARG.
FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types
(u8/u16/u32/u64/s8/s16/s32/s64), hexadecimal types
- (x8/x16/x32/x64), "char", "string", "ustring", "symbol", "symstr"
- and bitfield are supported.
+ (x8/x16/x32/x64), VFS layer common type(%pd/%pD), "char",
+ "string", "ustring", "symbol", "symstr" and bitfield are
+ supported.
(\*1) only for the probe on function entry (offs == 0). Note, this argument access
is best effort, because depending on the argument type, it may be passed on
@@ -74,7 +75,7 @@ Function arguments at kretprobe
-------------------------------
Function arguments can be accessed at kretprobe using $arg<N> fetcharg. This
is useful to record the function parameter and return value at once, and
-trace the difference of structure fields (for debuging a function whether it
+trace the difference of structure fields (for debugging a function whether it
correctly updates the given data structure or not).
See the :ref:`sample<fprobetrace_exit_args_sample>` in fprobe event for how
it works.
@@ -122,6 +123,9 @@ With 'symstr' type, you can filter the event with wildcard pattern of the
symbols, and you don't need to solve symbol name by yourself.
For $comm, the default type is "string"; any other type is invalid.
+VFS layer common type(%pd/%pD) is a special type, which fetches dentry's or
+file's name from struct dentry's address or struct file's address.
+
.. _user_mem_access:
User Memory Access
diff --git a/Documentation/trace/ring-buffer-map.rst b/Documentation/trace/ring-buffer-map.rst
new file mode 100644
index 000000000000..8e296bcc0d7f
--- /dev/null
+++ b/Documentation/trace/ring-buffer-map.rst
@@ -0,0 +1,106 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==================================
+Tracefs ring-buffer memory mapping
+==================================
+
+:Author: Vincent Donnefort <vdonnefort@google.com>
+
+Overview
+========
+Tracefs ring-buffer memory map provides an efficient method to stream data
+as no memory copy is necessary. The application mapping the ring-buffer becomes
+then a consumer for that ring-buffer, in a similar fashion to trace_pipe.
+
+Memory mapping setup
+====================
+The mapping works with a mmap() of the trace_pipe_raw interface.
+
+The first system page of the mapping contains ring-buffer statistics and
+description. It is referred to as the meta-page. One of the most important
+fields of the meta-page is the reader. It contains the sub-buffer ID which can
+be safely read by the mapper (see ring-buffer-design.rst).
+
+The meta-page is followed by all the sub-buffers, ordered by ascending ID. It is
+therefore effortless to know where the reader starts in the mapping:
+
+.. code-block:: c
+
+ reader_id = meta->reader->id;
+ reader_offset = meta->meta_page_size + reader_id * meta->subbuf_size;
+
+When the application is done with the current reader, it can get a new one using
+the trace_pipe_raw ioctl() TRACE_MMAP_IOCTL_GET_READER. This ioctl also updates
+the meta-page fields.
+
+Limitations
+===========
+When a mapping is in place on a Tracefs ring-buffer, it is not possible to
+either resize it (either by increasing the entire size of the ring-buffer or
+each subbuf). It is also not possible to use snapshot and causes splice to copy
+the ring buffer data instead of using the copyless swap from the ring buffer.
+
+Concurrent readers (either another application mapping that ring-buffer or the
+kernel with trace_pipe) are allowed but not recommended. They will compete for
+the ring-buffer and the output is unpredictable, just like concurrent readers on
+trace_pipe would be.
+
+Example
+=======
+
+.. code-block:: c
+
+ #include <fcntl.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <unistd.h>
+
+ #include <linux/trace_mmap.h>
+
+ #include <sys/mman.h>
+ #include <sys/ioctl.h>
+
+ #define TRACE_PIPE_RAW "/sys/kernel/tracing/per_cpu/cpu0/trace_pipe_raw"
+
+ int main(void)
+ {
+ int page_size = getpagesize(), fd, reader_id;
+ unsigned long meta_len, data_len;
+ struct trace_buffer_meta *meta;
+ void *map, *reader, *data;
+
+ fd = open(TRACE_PIPE_RAW, O_RDONLY | O_NONBLOCK);
+ if (fd < 0)
+ exit(EXIT_FAILURE);
+
+ map = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0);
+ if (map == MAP_FAILED)
+ exit(EXIT_FAILURE);
+
+ meta = (struct trace_buffer_meta *)map;
+ meta_len = meta->meta_page_size;
+
+ printf("entries: %llu\n", meta->entries);
+ printf("overrun: %llu\n", meta->overrun);
+ printf("read: %llu\n", meta->read);
+ printf("nr_subbufs: %u\n", meta->nr_subbufs);
+
+ data_len = meta->subbuf_size * meta->nr_subbufs;
+ data = mmap(NULL, data_len, PROT_READ, MAP_SHARED, fd, meta_len);
+ if (data == MAP_FAILED)
+ exit(EXIT_FAILURE);
+
+ if (ioctl(fd, TRACE_MMAP_IOCTL_GET_READER) < 0)
+ exit(EXIT_FAILURE);
+
+ reader_id = meta->reader.id;
+ reader = data + meta->subbuf_size * reader_id;
+
+ printf("Current reader address: %p\n", reader);
+
+ munmap(data, data_len);
+ munmap(meta, meta_len);
+ close (fd);
+
+ return 0;
+ }
diff --git a/Documentation/trace/tracepoints.rst b/Documentation/trace/tracepoints.rst
index 0cb8d9ca3d60..decabcc77b56 100644
--- a/Documentation/trace/tracepoints.rst
+++ b/Documentation/trace/tracepoints.rst
@@ -27,7 +27,7 @@ the tracepoint site).
You can put tracepoints at important locations in the code. They are
lightweight hooks that can pass an arbitrary number of parameters,
-which prototypes are described in a tracepoint declaration placed in a
+whose prototypes are described in a tracepoint declaration placed in a
header file.
They can be used for tracing and performance accounting.
diff --git a/Documentation/translations/it_IT/doc-guide/kernel-doc.rst b/Documentation/translations/it_IT/doc-guide/kernel-doc.rst
index 5cece223b46b..74057d203539 100644
--- a/Documentation/translations/it_IT/doc-guide/kernel-doc.rst
+++ b/Documentation/translations/it_IT/doc-guide/kernel-doc.rst
@@ -180,9 +180,9 @@ Il valore di ritorno, se c'è, viene descritto in una sezione dedicata di nome
se provate a formattare bene il vostro testo come nel seguente esempio::
* Return:
- * 0 - OK
- * -EINVAL - invalid argument
- * -ENOMEM - out of memory
+ * %0 - OK
+ * %-EINVAL - invalid argument
+ * %-ENOMEM - out of memory
le righe verranno unite e il risultato sarà::
@@ -192,8 +192,8 @@ Il valore di ritorno, se c'è, viene descritto in una sezione dedicata di nome
utilizzare una lista ReST, ad esempio::
* Return:
- * * 0 - OK to runtime suspend the device
- * * -EBUSY - Device should not be runtime suspended
+ * * %0 - OK to runtime suspend the device
+ * * %-EBUSY - Device should not be runtime suspended
#) Se il vostro testo ha delle righe che iniziano con una frase seguita dai
due punti, allora ognuna di queste frasi verrà considerata come il nome
diff --git a/Documentation/translations/it_IT/index.rst b/Documentation/translations/it_IT/index.rst
index 70ccd23b2cde..9220f65e30d1 100644
--- a/Documentation/translations/it_IT/index.rst
+++ b/Documentation/translations/it_IT/index.rst
@@ -132,4 +132,4 @@ Documentazione varia
Ci sono documenti che sono difficili da inserire nell'attuale organizzazione
della documentazione; altri hanno bisogno di essere migliorati e/o convertiti
-nel formato *ReStructured Text*; altri sono semplicamente troppo vecchi.
+nel formato *reStructuredText*; altri sono semplicamente troppo vecchi.
diff --git a/Documentation/translations/it_IT/process/2.Process.rst b/Documentation/translations/it_IT/process/2.Process.rst
index 25cd00351c03..0a62c0f33faf 100644
--- a/Documentation/translations/it_IT/process/2.Process.rst
+++ b/Documentation/translations/it_IT/process/2.Process.rst
@@ -462,9 +462,12 @@ linux-kernel:
di far domande. Molti sviluppatori possono divenire impazienti con le
persone che chiaramente non hanno svolto i propri compiti a casa.
-- Evitate il *top-posting* (cioè la pratica di mettere la vostra risposta sopra
- alla frase alla quale state rispondendo). Ciò renderebbe la vostra risposta
- difficile da leggere e genera scarsa impressione.
+- Rispondete sotto alla porzione di righe citate, così da dare un contesto alle
+ vostre risposte, e quindi renderle più leggibili (in altre parole, evitate di
+ rispondere in cima, ovvero prima del testo citato). Per maggiori dettagli
+ leggete :ref:`Documentation/translations/it_IT/process/submitting-patches.rst
+ <it_interleaved_replies>`.
+
- Chiedete nella lista di discussione corretta. Linux-kernel può essere un
punto di incontro generale, ma non è il miglior posto dove trovare
diff --git a/Documentation/translations/it_IT/process/4.Coding.rst b/Documentation/translations/it_IT/process/4.Coding.rst
index 54fd255b77d0..ec874a8dfb9d 100644
--- a/Documentation/translations/it_IT/process/4.Coding.rst
+++ b/Documentation/translations/it_IT/process/4.Coding.rst
@@ -72,6 +72,10 @@ compiti del genere. Consultate il file
:ref:`Documentation/translations/it_IT/process/clang-format.rst <clangformat>`
per maggiori dettagli
+Se utilizzate un programma compatibile con EditorConfig, allora alcune
+configurazioni basilari come l'indentazione e la fine delle righe verranno
+applicate automaticamente. Per maggiori informazioni consultate la pagina:
+https://editorconfig.org/
Livelli di astrazione
*********************
diff --git a/Documentation/translations/it_IT/process/7.AdvancedTopics.rst b/Documentation/translations/it_IT/process/7.AdvancedTopics.rst
index dffd813a0910..a83fcfe18024 100644
--- a/Documentation/translations/it_IT/process/7.AdvancedTopics.rst
+++ b/Documentation/translations/it_IT/process/7.AdvancedTopics.rst
@@ -160,6 +160,8 @@ preparerà una richiesta nel modo in cui gli altri sviluppatori se l'aspettano,
e verificherà che vi siate ricordati di pubblicare quelle patch su un
server pubblico.
+.. _development_advancedtopics_reviews_it:
+
Revisionare le patch
--------------------
@@ -180,6 +182,13 @@ i commenti come domande e non come critiche. Chiedere "Come viene rilasciato
il *lock* in questo percorso?" funziona sempre molto meglio che
"qui la sincronizzazione è sbagliata".
+In caso di disaccordi, può essere utile chiedere una terza opinione. Se dopo
+pochi scambi la discussione raggiunge un punto morto, allora chiedete ai
+manutentori o altri revisori di partecipare esprimendo la loro opinione. Spesso
+vige un silenzio assenso per cui gli altri revisori non intervengono se non gli
+viene richiesto esplicitamente. L'opinione di più persone avrà sicuramente un
+peso maggiore.
+
Diversi sviluppatori revisioneranno il codice con diversi punti di vista.
Alcuni potrebbero concentrarsi principalmente sullo stile del codice e se
alcune linee hanno degli spazio bianchi di troppo. Altri si chiederanno
@@ -189,3 +198,13 @@ l'uso eccessivo di *stack*, problemi di sicurezza, duplicazione del codice
in altri contesti, documentazione, effetti negativi sulle prestazioni, cambi
all'ABI dello spazio utente, eccetera. Qualunque tipo di revisione è ben
accetta e di valore, se porta ad avere un codice migliore nel kernel.
+
+Non esistono requisiti particolarmente stringenti per l'uso di etichette come
+``Reviewd-by``. Tuttavia, perché la revisione sia efficace ci si aspetta un
+qualche tipo di messaggio che dica "ho verificato A, B e C nel codice che è
+appena stato inviato e mi sembra tutto in ordine". Inoltre, questo permette ai
+manutentori di prendere conoscenza circa una revisione avvenuta per davvero.
+
+Per finire, la revisione delle patch può diventare un processo negativo, troppo
+focalizzato sulla ricerca dei problemi. Provate a fare qualche complimento di
+tanto in tanto, specialmente con i nuovi arrivati.
diff --git a/Documentation/translations/it_IT/process/changes.rst b/Documentation/translations/it_IT/process/changes.rst
index f37c53f8b524..ade695a7de19 100644
--- a/Documentation/translations/it_IT/process/changes.rst
+++ b/Documentation/translations/it_IT/process/changes.rst
@@ -34,13 +34,15 @@ PC Card, per esempio, probabilmente non dovreste preoccuparvi di pcmciautils.
====================== ================= ========================================
GNU C 5.1 gcc --version
Clang/LLVM (optional) 11.0.0 clang --version
+Rust (opzionale) 1.74.1 rustc --version
+bindgen (opzionale) 0.65.1 bindgen --version
GNU make 3.81 make --version
bash 4.2 bash --version
binutils 2.25 ld -v
flex 2.5.35 flex --version
bison 2.0 bison --version
pahole 1.16 pahole --version
-util-linux 2.10o fdformat --version
+util-linux 2.10o mount --version
kmod 13 depmod -V
e2fsprogs 1.41.4 e2fsck -V
jfsutils 1.1.3 fsck.jfs -V
@@ -59,8 +61,10 @@ mcelog 0.6 mcelog --version
iptables 1.4.2 iptables -V
openssl & libcrypto 1.0.0 openssl version
bc 1.06.95 bc --version
-Sphinx\ [#f1]_ 1.7 sphinx-build --version
+Sphinx\ [#f1]_ 2.4.4 sphinx-build --version
cpio any cpio --version
+GNU tar 1.28 tar --version
+gtags (opzionale) 6.6.5 gtags --version
====================== ================= ========================================
.. [#f1] Sphinx è necessario solo per produrre la documentazione del Kernel
@@ -151,6 +155,18 @@ Se la firma dei moduli è abilitata, allora vi servirà openssl per compilare il
kernel 3.7 e successivi. Vi serviranno anche i pacchetti di sviluppo di
openssl per compilare il kernel 4.3 o successivi.
+Tar
+---
+
+GNU Tar è necessario per accedere ai file d'intestazione del kernel usando sysfs
+(CONFIG_IKHEADERS)
+
+gtags / GNU GLOBAL (opzionale)
+------------------------------
+
+Il programma GNU GLOBAL versione 6.6.5, o successiva, è necessario quando si
+vuole eseguire ``make gtags`` e generare i relativi indici. Internamente si fa
+uso del parametro gtags ``-C (--directory)`` che compare in questa versione.
Strumenti di sistema
********************
@@ -434,7 +450,7 @@ E2fsprogs
JFSutils
--------
-- <http://jfs.sourceforge.net/>
+- <https://jfs.sourceforge.net/>
Reiserfsprogs
-------------
@@ -455,7 +471,7 @@ Pcmciautils
Quota-tools
-----------
-- <http://sourceforge.net/projects/linuxquota/>
+- <https://sourceforge.net/projects/linuxquota/>
Microcodice Intel P6
@@ -476,7 +492,7 @@ FUSE
mcelog
------
-- <http://www.mcelog.org/>
+- <https://www.mcelog.org/>
cpio
----
@@ -497,7 +513,8 @@ PPP
NFS-utils
---------
-- <http://sourceforge.net/project/showfiles.php?group_id=14>
+- <https://sourceforge.net/project/showfiles.php?group_id=14>
+- <https://nfs.sourceforge.net/>
Iptables
--------
@@ -512,12 +529,7 @@ Ip-route2
OProfile
--------
-- <http://oprofile.sf.net/download/>
-
-NFS-Utils
----------
-
-- <http://nfs.sourceforge.net/>
+- <https://oprofile.sf.net/download/>
Documentazione del kernel
*************************
diff --git a/Documentation/translations/it_IT/process/coding-style.rst b/Documentation/translations/it_IT/process/coding-style.rst
index 284a75ac19f8..a4b9f44081da 100644
--- a/Documentation/translations/it_IT/process/coding-style.rst
+++ b/Documentation/translations/it_IT/process/coding-style.rst
@@ -214,7 +214,7 @@ Non usate inutilmente le graffe dove una singola espressione è sufficiente.
e
-.. code-block:: none
+.. code-block:: c
if (condition)
do_this();
@@ -652,7 +652,7 @@ Quindi, potete sbarazzarvi di GNU emacs, o riconfigurarlo con valori più
sensati. Per fare quest'ultima cosa, potete appiccicare il codice che
segue nel vostro file .emacs:
-.. code-block:: none
+.. code-block:: elisp
(defun c-lineup-arglist-tabs-only (ignored)
"Line up argument lists by tabs, not spaces"
@@ -728,6 +728,10 @@ il testo e altre cose simili.
Per maggiori dettagli, consultate il file
:ref:`Documentation/translations/it_IT/process/clang-format.rst <it_clangformat>`.
+Se utilizzate un programma compatibile con EditorConfig, allora alcune
+configurazioni basilari come l'indentazione e la fine delle righe verranno
+applicate automaticamente. Per maggiori informazioni consultate la pagina:
+https://editorconfig.org/
10) File di configurazione Kconfig
----------------------------------
@@ -898,7 +902,9 @@ usare per assicurarvi che i messaggi vengano associati correttamente ai
dispositivi e ai driver, e che siano etichettati correttamente: dev_err(),
dev_warn(), dev_info(), e così via. Per messaggi che non sono associati ad
alcun dispositivo, <linux/printk.h> definisce pr_info(), pr_warn(), pr_err(),
-eccetera.
+eccetera. Quando tutto funziona correttamente, non dovrebbero esserci stampe,
+per cui preferite dev_dbg/pr_debug a meno che non sia qualcosa di sbagliato
+da segnalare.
Tirar fuori un buon messaggio di debug può essere una vera sfida; e quando
l'avete può essere d'enorme aiuto per risolvere problemi da remoto.
diff --git a/Documentation/translations/it_IT/process/deprecated.rst b/Documentation/translations/it_IT/process/deprecated.rst
index ba0ed7dc154c..d4ab76e9be49 100644
--- a/Documentation/translations/it_IT/process/deprecated.rst
+++ b/Documentation/translations/it_IT/process/deprecated.rst
@@ -86,7 +86,7 @@ da kcalloc().
Se questo tipo di allocatore non è disponibile, allora dovrebbero essere usate
le funzioni del tipo *saturate-on-overflow*::
- bar = vmalloc(array_size(count, size));
+ bar = dma_alloc_coherent(dev, array_size(count, size), &dma, GFP_KERNEL);
Un altro tipico caso da evitare è quello di calcolare la dimensione di una
struttura seguita da un vettore di altre strutture, come nel seguente caso::
diff --git a/Documentation/translations/it_IT/process/howto.rst b/Documentation/translations/it_IT/process/howto.rst
index 052f1b3610cb..090941a0a898 100644
--- a/Documentation/translations/it_IT/process/howto.rst
+++ b/Documentation/translations/it_IT/process/howto.rst
@@ -85,8 +85,8 @@ relativi file di documentatione che spiegano come usarele.
Quando un cambiamento del kernel genera anche un cambiamento nell'interfaccia
con lo spazio utente, è raccomandabile che inviate una notifica o una
correzione alle pagine *man* spiegando tale modifica agli amministratori di
-queste pagine all'indirizzo mtk.manpages@gmail.com, aggiungendo
-in CC la lista linux-api@vger.kernel.org.
+queste pagine all'indirizzo alx@kernel.org, aggiungendo in CC la
+lista linux-api@vger.kernel.org.
Di seguito una lista di file che sono presenti nei sorgente del kernel e che
è richiesto che voi leggiate:
@@ -144,7 +144,7 @@ Di seguito una lista di file che sono presenti nei sorgente del kernel e che
dello sviluppo di Linux ed è molto importante per le persone che arrivano
da esperienze con altri Sistemi Operativi.
- :ref:`Documentation/translations/it_IT/admin-guide/security-bugs.rst <it_securitybugs>`
+ :ref:`Documentation/translations/it_IT/process/security-bugs.rst <it_securitybugs>`
Se ritenete di aver trovato un problema di sicurezza nel kernel Linux,
seguite i passaggi scritti in questo documento per notificarlo agli
sviluppatori del kernel, ed aiutare la risoluzione del problema.
diff --git a/Documentation/translations/it_IT/process/index.rst b/Documentation/translations/it_IT/process/index.rst
index cd7977905fb8..73c643dcc541 100644
--- a/Documentation/translations/it_IT/process/index.rst
+++ b/Documentation/translations/it_IT/process/index.rst
@@ -21,19 +21,75 @@ l'accettazione delle vostre modifiche con il minimo sforzo.
Di seguito le guide che ogni sviluppatore dovrebbe leggere.
+Introduzione al funzionamento dello sviluppo del kernel
+-------------------------------------------------------
+
+Innanzitutto, leggete questi documenti che vi aiuteranno ad entrare nella
+comunità del kernel.
+
.. toctree::
:maxdepth: 1
howto
- code-of-conduct
development-process
submitting-patches
+ submit-checklist
+
+Strumenti e guide tecniche per gli sviluppatori del kernel
+----------------------------------------------------------
+
+Quella che segue è una raccolta di documenti che uno sviluppatore del kernel
+Linux dovrebbe conoscere.
+
+.. toctree::
+ :maxdepth: 1
+
+ changes
programming-language
coding-style
maintainer-pgp-guide
email-clients
+ applying-patches
+ adding-syscalls
+ volatile-considered-harmful
+ botching-up-ioctls
+
+Politiche e dichiarazioni degli sviluppatori
+--------------------------------------------
+
+Quelle che seguono rappresentano le regole che cerchiamo di seguire all'interno
+della comunità del kernel (e oltre).
+
+.. toctree::
+ :maxdepth: 1
+
+ code-of-conduct
kernel-enforcement-statement
kernel-driver-statement
+ stable-api-nonsense
+ stable-kernel-rules
+ management-style
+
+Gestire i bachi
+---------------
+
+I bachi sono parte della nostra vita; dunque è importante che vengano trattati
+con riguardo. I documenti che seguono descrivono le nostre politiche riguardo al
+trattamento di alcune classi particolari di bachi: le regressioni e i problemi
+di sicurezza.
+
+Informazioni per i manutentori
+------------------------------
+
+Come trovare le persone che accetteranno le vostre modifiche.
+
+.. toctree::
+ :maxdepth: 1
+
+ maintainers
+
+Altri documenti
+---------------
Poi ci sono altre guide sulla comunità che sono di interesse per molti
degli sviluppatori:
@@ -41,13 +97,7 @@ degli sviluppatori:
.. toctree::
:maxdepth: 1
- changes
- stable-api-nonsense
- management-style
- stable-kernel-rules
- submit-checklist
kernel-docs
- maintainers
Ed infine, qui ci sono alcune guide più tecniche che son state messe qua solo
perché non si è trovato un posto migliore.
@@ -55,11 +105,7 @@ perché non si è trovato un posto migliore.
.. toctree::
:maxdepth: 1
- applying-patches
- adding-syscalls
magic-number
- volatile-considered-harmful
- botching-up-ioctls
clang-format
../riscv/patch-acceptance
diff --git a/Documentation/translations/it_IT/admin-guide/security-bugs.rst b/Documentation/translations/it_IT/process/security-bugs.rst
index 20994f4bfa31..20994f4bfa31 100644
--- a/Documentation/translations/it_IT/admin-guide/security-bugs.rst
+++ b/Documentation/translations/it_IT/process/security-bugs.rst
diff --git a/Documentation/translations/it_IT/process/stable-kernel-rules.rst b/Documentation/translations/it_IT/process/stable-kernel-rules.rst
index 248bf1e4b171..a2577a806a18 100644
--- a/Documentation/translations/it_IT/process/stable-kernel-rules.rst
+++ b/Documentation/translations/it_IT/process/stable-kernel-rules.rst
@@ -44,7 +44,7 @@ Procedura per sottomettere patch per i sorgenti -stable
.. note::
Una patch di sicurezza non dovrebbe essere gestita (solamente) dal processo
di revisione -stable, ma dovrebbe seguire le procedure descritte in
- :ref:`Documentation/translations/it_IT/admin-guide/security-bugs.rst <it_securitybugs>`.
+ :ref:`Documentation/translations/it_IT/process/security-bugs.rst <it_securitybugs>`.
Per tutte le altre sottomissioni, scegliere una delle seguenti procedure
------------------------------------------------------------------------
diff --git a/Documentation/translations/it_IT/process/submitting-patches.rst b/Documentation/translations/it_IT/process/submitting-patches.rst
index f91c8092844f..4c6a276bdc08 100644
--- a/Documentation/translations/it_IT/process/submitting-patches.rst
+++ b/Documentation/translations/it_IT/process/submitting-patches.rst
@@ -349,6 +349,33 @@ Leggete Documentation/translations/it_IT/process/email-clients.rst per
le raccomandazioni sui programmi di posta elettronica e l'etichetta da usare
sulle liste di discussione.
+.. _it_interleaved_replies:
+
+Rispondere alle email in riga e riducendo la citazioni
+------------------------------------------------------
+
+Nelle discussioni riguardo allo sviluppo del kernel viene fortemente scoraggiato
+l'uso di risposte in cima ai messaggi di posta elettronica. Rispondere in riga
+rende le conversazioni molto più scorrevoli. Maggiori dettagli possono essere
+trovati qui: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
+
+Come spesso citato nelle liste di discussione::
+
+ R: http://en.wikipedia.org/wiki/Top_post
+ D: Dove posso trovare informazioni riguardo alle "risposte in cima"?
+ R: Perché incasina il normale ordine con cui si legge un testo.
+ D: Perché è così terribile rispondere in cima?
+ R: Risposte in cima.
+ Q: Qual è la cosa più fastidiosa nei messaggi di posta elettronica?
+
+Allo stesso modo, per favore eliminate tutte le citazioni non necessarie per la
+vostra risposta. Questo permette di trovare più facilmente le risposte, e
+permette di risparmiare tempo e spazio. Per maggiori dettagli:
+http://daringfireball.net/2007/07/on_top ::
+
+ R: No.
+ D: Dovrei includere un blocco di citazione dopo la mia risposta?
+
.. _it_resend_reminders:
Non scoraggiatevi - o impazientitevi
diff --git a/Documentation/translations/ja_JP/process/howto.rst b/Documentation/translations/ja_JP/process/howto.rst
index 8d856ebe873c..872876c67896 100644
--- a/Documentation/translations/ja_JP/process/howto.rst
+++ b/Documentation/translations/ja_JP/process/howto.rst
@@ -110,7 +110,7 @@ Linux カーネルソースツリーは幅広い範囲のドキュメントを
新しいドキュメントファイルも追加することを勧めます。
カーネルの変更が、カーネルがユーザ空間に公開しているインターフェイスの
変更を引き起こす場合、その変更を説明するマニュアルページのパッチや情報
-をマニュアルページのメンテナ mtk.manpages@gmail.com に送り、CC を
+をマニュアルページのメンテナ alx@kernel.org に送り、CC を
linux-api@vger.kernel.org に送ることを勧めます。
以下はカーネルソースツリーに含まれている読んでおくべきファイルの一覧で
diff --git a/Documentation/translations/sp_SP/index.rst b/Documentation/translations/sp_SP/index.rst
index c543b495c042..274ef4ad96b9 100644
--- a/Documentation/translations/sp_SP/index.rst
+++ b/Documentation/translations/sp_SP/index.rst
@@ -7,7 +7,7 @@ Traducción al español
\kerneldocCJKoff
-:maintainer: Carlos Bilbao <carlos.bilbao@amd.com>
+:maintainer: Carlos Bilbao <carlos.bilbao.osdev@gmail.com>
.. _sp_disclaimer:
diff --git a/Documentation/translations/sp_SP/memory-barriers.txt b/Documentation/translations/sp_SP/memory-barriers.txt
index 27097a808c88..153e57130775 100644
--- a/Documentation/translations/sp_SP/memory-barriers.txt
+++ b/Documentation/translations/sp_SP/memory-barriers.txt
@@ -1,6 +1,6 @@
NOTE:
This is a version of Documentation/memory-barriers.txt translated into
-Spanish by Carlos Bilbao <carlos.bilbao@amd.com>. If you find any
+Spanish by Carlos Bilbao <carlos.bilbao.osdev@gmail.com>. If you find any
difference between this document and the original file or a problem with
the translation, please contact the maintainer of this file. Please also
note that the purpose of this file is to be easier to read for non English
@@ -18,7 +18,7 @@ Documento original: David Howells <dhowells@redhat.com>
Will Deacon <will.deacon@arm.com>
Peter Zijlstra <peterz@infradead.org>
-Traducido por: Carlos Bilbao <carlos.bilbao@amd.com>
+Traducido por: Carlos Bilbao <carlos.bilbao.osdev@gmail.com>
Nota: Si tiene alguna duda sobre la exactitud del contenido de esta
traducción, la única referencia válida es la documentación oficial en
inglés.
diff --git a/Documentation/translations/sp_SP/process/1.Intro.rst b/Documentation/translations/sp_SP/process/1.Intro.rst
new file mode 100644
index 000000000000..9b92b6c85221
--- /dev/null
+++ b/Documentation/translations/sp_SP/process/1.Intro.rst
@@ -0,0 +1,302 @@
+.. include:: ../disclaimer-sp.rst
+
+:Original: Documentation/process/1.Intro.rst
+:Translator: Avadhut Naik <avadhut.naik@amd.com>
+
+.. _sp_development_process_intro:
+
+Introducción
+============
+
+Resumen ejecutivo
+-----------------
+
+El resto de esta sección cubre el alcance del proceso de desarrollo del
+kernel y los tipos de frustraciones que los desarrolladores y sus
+empleadores pueden encontrar allí. Hay muchas razones por las que el
+código del kernel debe fusionarse con el kernel oficial (“mainline”),
+incluyendo la disponibilidad automática para los usuarios, el apoyo de la
+comunidad en muchas formas, y la capacidad de influir en la dirección del
+desarrollo del kernel. El código contribuido al kernel de Linux debe
+estar disponible bajo una licencia compatible con GPL.
+
+:ref:`sp_development_process` introduce el proceso de desarrollo, el ciclo
+de lanzamiento del kernel y la mecánica de la "ventana de combinación"
+(merge window). Se cubren las distintas fases en el desarrollo del parche,
+la revisión y, el ciclo de fusión. Hay algunas discusiones sobre
+herramientas y listas de correo. Se anima a los desarrolladores que deseen
+comenzar con el desarrollo del kernel a encontrar y corregir errores como
+ejercicio inicial.
+
+:ref:`sp_development_early_stage` cubre la planificación de proyectos en
+etapas tempranas, con énfasis en involucrar a la comunidad de desarrollo
+lo antes posible.
+
+:ref:`sp_development_coding` trata sobre el proceso de codificación. Se
+discuten varios escollos encontrados por otros desarrolladores. Se cubren
+algunos requisitos para los parches, y hay una introducción a algunas de
+las herramientas que pueden ayudar a garantizar que los parches del kernel
+sean correctos.
+
+:ref:`sp_development_posting` trata sobre el proceso de enviar parches para
+su revisión. Para ser tomados en serio por la comunidad de desarrollo,
+los parches deben estar correctamente formateados y descritos, y deben
+enviarse al lugar correcto. Seguir los consejos de esta sección debería
+ayudar a garantizar la mejor recepción posible para su trabajo.
+
+:ref:`sp_development_followthrough` cubre lo que sucede después de publicar
+parches; el trabajo está lejos de terminar en ese momento. Trabajar con
+revisores es una parte crucial del proceso de desarrollo; esta sección
+ofrece varios consejos sobre cómo evitar problemas en esta importante
+etapa. Se advierte a los desarrolladores que no asuman que el trabajo está
+terminado cuando un parche se fusiona en mainline.
+
+:ref:`sp_development_advancedtopics` introduce un par de temas “avanzados”:
+la administración de parches con git y la revisión de parches publicados
+por otros.
+
+:ref:`sp_development_conclusion` concluye el documento con punteros a las
+fuentes para obtener más información sobre el desarrollo del kernel.
+
+De qué trata este documento
+---------------------------
+
+El kernel de Linux, con más de 8 millones de líneas de código y más de
+1000 colaboradores en cada versión, en uno de los proyectos de software
+libre más grandes y activos que existen. Desde sus humildes comienzos en
+1991, este kernel ha evolucionado hasta convertirse en el mejor componente
+del sistema operativo que se ejecuta en reproductores de música digital
+de bolsillo, PC de escritorio, las supercomputadoras más grandes que
+existen y todo tipo de sistemas intermedios. Es una solución robusta,
+eficiente, y escalable para casi cualquier situación.
+
+Con el crecimiento de Linux, ha llegado un aumento en el número de
+desarrolladores (y empresas) que desean participar en su desarrollo. Los
+vendedores de hardware quieren asegurarse de que Linux sea compatible con
+sus productos, lo que hace que esos productos sean atractivos para los
+usuarios de Linux. Los vendedores de sistemas embebidos, que utilizan
+Linux como componente de un producto integrado, quieren que Linux sea lo
+más capaz y adecuado posible para tarea en cuestión. Los distribuidores
+y otros vendedores de software que basan sus productos en Linux tienen un
+claro interés en las capacidades, el rendimiento, y la fiabilidad del
+kernel de Linux. Y los usuarios finales, también, a menudo desearán
+cambiar Linux para que se adapte mejor a sus necesidades.
+
+Una de las características más convincentes de Linux es que es accesible
+a estos desarrolladores; cualquier persona con las habilidades necesarias
+puede mejorar Linux e influir en la dirección de su desarrollo. Los
+productos propietarios no pueden ofrecer este tipo de apertura, que es una
+característica del proceso de software libre. Pero, en todo caso, el
+kernel es aún más libre que la mayoría de los otros proyectos de software
+libre. Un ciclo típico de desarrollo de kernel de tres meses puede
+involucrar a más de 1000 desarrolladores que trabajan para más de 100
+empresas diferentes (o sin pertenecer a ninguna empresa).
+
+Trabajar con la comunidad de desarrollo del kernel no es especialmente
+difícil. Pero, a pesar de eso, muchos colaboradores potenciales han
+experimentado dificultades al tratar de hacer el trabajo del kernel. La
+comunidad del kernel ha desarrollado sus propias formas distintivas de
+operar, lo que le permite funcionar de manera fluida (y producir un
+producto de alta calidad) en un entorno donde miles de líneas de código
+se cambian todos los días. Por lo tanto, no es sorprendente que el
+proceso de desarrollo del kernel de Linux difiera mucho de los métodos de
+desarrollo propietarios.
+
+El proceso de desarrollo del kernel puede parecer extraño e intimidante
+para los nuevos desarrolladores, pero hay buenas razones y una sólida
+experiencia detrás de él. Un desarrollador que no entienda las formas de
+la comunidad del kernel (o, peor aún, que intente burlarse o eludirlas)
+tendrá una experiencia frustrante por delante. La comunidad de
+desarrollo, si bien es servicial para aquellos que están tratando de
+aprender, tiene poco tiempo para aquellos que no escuchan o que no se
+preocupan por el proceso de desarrollo.
+
+Se espera que quienes lean este documento puedan evitar esa experiencia
+frustrante. Hay mucho material aquí, pero el esfuerzo que implica leerlo
+será recompensado en poco tiempo. La comunidad de desarrollo siempre
+necesita desarrolladores que ayudan a mejorar el kernel; el siguiente
+texto debería ayudarle – o a quienes trabajan para usted, a unirse a
+nuestra comunidad.
+
+Créditos
+--------
+
+Este documento fue escrito por Jonathan Corbet, corbet@lwn.net. Ha sido
+mejorado por los comentarios de Johannes Berg, James Berry, Alex Chiang,
+Roland Dreier, Randy Dunlap, Jake Edge, Jiri Kosina, Matt Mackall, Arthur
+Marsh, Amanda McPherson, Andrew Morton, Andrew Price, Tsugikazu Shibata y
+Jochen Voß.
+Este trabajo fue respaldado por la Fundación Linux; gracias especialmente
+a Amanda McPherson, quien reconoció el valor de este esfuerzo e hizo que
+todo sucediera.
+
+Importancia de integrar el código en el mainline
+------------------------------------------------
+
+Algunas empresas y desarrolladores ocasionalmente se preguntan por qué
+deberían molestarse en aprender cómo trabajar con la comunidad del
+kernel y obtener su código en el kernel mainline (el “mainline” es el
+kernel mantenido por Linus Torvalds y utilizado como base por los
+distribuidores de Linux. A corto plazo, contribuir con código puede
+parecer un gasto evitable; parece más fácil mantener el código separado
+y dar soporte a los usuarios directamente. La verdad del asunto es que
+mantener el código separado (“fuera del árbol”) es pan para hoy y hambre
+para mañana.
+
+Para ilustrar los costos del código fuera-del-árbol, aquí hay algunos
+aspectos relevantes del proceso de desarrollo del kernel. La mayoría de
+estos se discutirán con mayor detalle más adelante en este documento.
+Considerar:
+
+- El código que se ha fusionado con el kernel mainline está disponible
+ para todos los usuarios de Linux. Estará presente automáticamente en
+ todas las distribuciones que lo habiliten. No hay necesidad de discos
+ de controladores, descargas, o las molestias de admitir múltiples
+ versiones de múltiples distribuciones; todo simplemente funciona, para
+ el desarrollador y para el usuario. La incorporación al mainline
+ resuelve un gran número de problemas de distribución y soporte.
+
+- Mientras los desarrolladores del kernel se esfuerzan por mantener una
+ interfaz estable para el espacio de usuario, la API interna de kernel
+ está en constante cambio. La falta de una interfaz interna estable es
+ una decisión deliberada de diseño; permite realizar mejoras
+ fundamentales en cualquier momento y da como resultado un código de
+ mayor calidad. Pero uno resultado de esa política es que cualquier
+ código fuera-del-árbol requiere un mantenimiento constante si va a
+ funcionar con los nuevos kernels. Mantener el código fuera-del-árbol
+ requiere una cantidad significativa de trabajo sólo para que ese código
+ siga funcionando.
+
+ En su lugar, el código en el mainline no requiere este trabajo como
+ resultado de una regla simple que requiere que cualquier desarrollador
+ que realice un cambio en la API también corrija cualquier código que
+ se rompa como resultado de ese cambio. Así que, el código fusionado en
+ el mainline tiene costos de mantenimiento significativamente más bajos.
+
+- Más allá de eso, el código que está en el kernel a menudo será
+ mejorado por otros desarrolladores. Resultados sorprendentes pueden
+ provenir de capacitar a su comunidad de usuarios y clientes para mejorar
+ su producto.
+
+- El código del kernel se somete a revisión, tanto antes como después
+ de fusionarse con el mainline. No importa cuán fuertes sean las
+ habilidades del desarrollador original, este proceso de revisión
+ invariablemente encuentra formas en las que se puede mejorar el código.
+ A menudo la revisión encuentra errores graves y problemas de seguridad.
+ Esto es especialmente cierto para el código que se ha desarrollado en
+ un entorno cerrado; dicho código se beneficia fuertemente de la
+ revisión por desarrolladores externos. El código fuera-del-árbol es
+ de menor calidad.
+
+- La participación en el proceso de desarrollo es su manera de influir en
+ la dirección del desarrollo del kernel. Los usuarios que se quejan
+ desde el sofa son escuchados, pero los desarrolladores activos tienen
+ una voz más fuerte – y la capacidad de implementar cambios que hacen
+ que el kernel funcione mejor para sus necesidades.
+
+- Cuando el código se mantiene por separado, siempre existe la posibilidad
+ de que un tercero contribuya a una implementación diferente de una
+ característica similar. Si eso sucede, conseguir que su código
+ fusionado será mucho más difícil – hasta el punto de la imposibilidad.
+ Entonces se enfrentará a las desagradables alternativas de (1) mantener
+ una característica no estándar fuera del árbol indefinidamente, o
+ (2) abandonar su código y migrar sus usuarios a la versión en el árbol.
+
+- La contribución del código es la acción fundamental que hace que todo
+ el proceso funcione. Al contribuir con su código, puede agregar nuevas
+ funcionalidades al kernel y proporcionar capacidades y ejemplos que son
+ útiles para otros desarrolladores del kernel. Si ha desarrollado código
+ para Linux (o está pensando en hacerlo), claramente tiene un interés
+ en el éxito continuo de esta plataforma; contribuir con código es una
+ de las mejores maneras de ayudar a garantizar ese éxito.
+
+Todo el razonamiento anterior se aplica a cualquier código de kernel
+fuera-del-árbol, incluido el código que se distribuye en forma propietaria
+y únicamente en binario. Sin embargo, hay factores adicionales que deben
+tenerse en cuenta antes de considerar cualquier tipo de distribución de
+código de kernel únicamente en binario. Estos incluyen:
+
+- Las cuestiones legales en torno a la distribución de módulos
+ propietarios del kernel son, en el mejor de los casos, confusas;
+ bastantes titulares de derechos de autor del kernel creen que la
+ mayoría de los módulos binarios son productos derivados del kernel y
+ que, como resultado, su distribución es una violación de la licencia
+ Pública General de GNU (sobre la que se dirá más adelante). El autor
+ de este texto no es abogado, y nada en este documento puede considerarse
+ asesoramiento legal. Solo los tribunales pueden determinar el verdadero
+ estatus legal de los módulos de código cerrado. Pero la incertidumbre
+ que acecha a esos módulos está ahí a pesar de todo.
+
+- Los módulos binarios aumentan enormemente la dificultad de depurar
+ problemas del kernel, hasta el punto de que la mayoría de los
+ desarrolladores del kernel ni siquiera lo intentarán. Por lo tanto,
+ la distribución de módulos únicamente en binario hará que sea más
+ difícil para sus usuarios obtener soporte de la comunidad.
+
+- El soporte también es más difícil para los distribuidores de módulos
+ únicamente en binario, que deben proporcionar una versión del módulo
+ para cada distribución y cada versión del kernel que deseen apoyar.
+ Podría requerir docenas de compilaciones de un solo módulo para
+ proporcionar una cobertura razonablemente completa, y sus usuarios
+ tendrán que actualizar su módulo por separado cada vez que
+ actualicen su kernel.
+
+- Todo lo que se dijo anteriormente sobre la revisión de código se aplica
+ doblemente al código cerrado. Dado que este código no está disponible
+ en absoluto, no puede haber sido revisado por la comunidad y, sin duda,
+ tendrá serios problemas.
+
+Los fabricantes de sistemas embebidos, en particular, pueden verse
+tentados a ignorar gran parte de lo que se ha dicho en esta sección
+creyendo que están enviando un producto autónomo que utiliza una
+versión de kernel congelada y no requiere más desarrollo después de su
+lanzamiento. Este argumento desaprovecha el valor de la revisión
+generalizad del código y el valor de permitir que sus usuarios agreguen
+capacidades a su producto. Pero estos productos también tienen una vida
+comercial limitada, después de la cual se debe lanzar una nueva versión.
+En ese punto, los vendedores cuyo código esté en el mainline y bien
+mantenido estarán en una posición mucho mejor para preparar el nuevo
+producto rápidamente para el mercado.
+
+Licencias
+---------
+
+El código se contribuye al kernel de Linux bajo varias licencias, pero
+todo el código debe ser compatible con la versión 2 de la Licencia
+Pública General de GNU (GPLv2), que cubre la distribución del kernel. En
+la práctica, esto significa que todas las contribuciones de código están
+cubiertas ya sea por la GPLv2 (con, opcionalmente, un lenguaje que permite
+la distribución en versiones posteriores de la GPL) o por la licencia BSD
+de tres cláusulas. Cualquier contribución que no esté cubierta por una
+licencia compatible no será aceptada en el kernel.
+
+No se requieren (ni se solicitan) cesiones de derechos de autor para el
+código aportado al kernel. Todo el código fusionado en el kernel
+mainline conserva su propiedad original; como resultado, el kernel ahora
+tiene miles de propietarios.
+
+Una implicación de esta estructura de propiedad es que cualquier intento
+de cambiar la licencia del kernel está condenado a un fracaso casi seguro.
+Hay pocos escenarios prácticos en los que se pueda obtener el acuerdo de
+todos los titulares de derechos de autor (o eliminar su código del
+kernel). Así que, en particular, no hay perspectivas de una migración a
+la versión 3 de la GPL en un futuro previsible.
+
+Es imperativo que todo el código aportado al kernel sea legítimamente
+software libre. Por esa razón, no se aceptará código de colaboradores
+anónimos (o seudónimos). Todos los colaboradores están obligados a
+“firmar” su código, indicando que el código puede ser distribuido con
+el kernel bajo la GPL. El código que no ha sido licenciado como software
+libre por su propietario, o que corre el riesgo de crear problemas
+relacionadas con los derechos de autor para el kernel (como el código que
+se deriva de esfuerzos de ingeniería inversa que carecen de las garantías
+adecuadas) no puede ser contribuido.
+
+Las preguntas sobre cuestiones relacionadas con los derechos de autor son
+comunes en las listas de correo de desarrollo de Linux. Normalmente, estas
+preguntas no recibirán escasez de respuestas, pero se debe tener en cuenta
+que las personas que responden a esas preguntas no son abogados y no
+pueden proporcionar consejo legal. Si tiene preguntas legales relacionadas
+con el código fuente de Linux, no hay sustituto para hablar con un abogado
+que entienda este campo. Confiar en las respuestas obtenidas en listas
+técnicas de correo es un asunto arriesgado.
diff --git a/Documentation/translations/sp_SP/process/2.Process.rst b/Documentation/translations/sp_SP/process/2.Process.rst
new file mode 100644
index 000000000000..5993eed71563
--- /dev/null
+++ b/Documentation/translations/sp_SP/process/2.Process.rst
@@ -0,0 +1,542 @@
+.. include:: ../disclaimer-sp.rst
+
+:Original: Documentation/process/2.Process.rst
+:Translator: Avadhut Naik <avadhut.naik@amd.com>
+
+.. _sp_development_process:
+
+Cómo funciona el proceso de desarrollo
+======================================
+
+El desarrollo del kernel de Linux a principios de la década de 1990 fue
+un asunto relajado, con un número relativamente pequeño de usuarios y
+desarrolladores involucrados. Con una base de usuarios en los millones y
+alrededor de 2,000 desarrolladores involucrados durante un año, el kernel
+ha tenido que adaptar varios procesos para mantener el desarrollo sin
+problemas. Se requiere una comprensión solida de cómo funciona el proceso
+para ser una parte efectiva del mismo.
+
+El panorama general
+-------------------
+
+Los desarrolladores del kernel utilizan un proceso de lanzamiento basado
+en el tiempo de manera flexible, con uno nuevo lanzamiento principal del
+kernel ocurriendo cada dos o tres meses. El historial reciente de
+lanzamientos se ve así:
+
+ ====== ==================
+ 5.0 Marzo 3, 2019
+ 5.1 Mayo 5, 2019
+ 5.2 Julio 7, 2019
+ 5.3 Septiembre 15, 2019
+ 5.4 Noviembre 24, 2019
+ 5.5 Enero 6, 2020
+ ====== ==================
+
+Cada lanzamiento 5.x es un lanzamiento principal del kernel con nuevas
+características, cambios internos en la API y más. Un lanzamiento típico
+puede contener alrededor de 13,000 conjuntos de cambios incluyendo en
+varias centenas de miles de líneas de código. 5.x es la vanguardia del
+desarrollo del kernel de Linux; el kernel utiliza un modelo de desarrollo
+continuo que está integrando continuamente cambios importantes.
+
+Se sigue una disciplina relativamente sencilla con respecto a la fusión
+de parches para cada lanzamiento. Al comienzo de cada ciclo de desarrollo,
+se dice que la "merge window" (ventana de fusión) está abierta. En ese
+momento, el código que se considera lo suficientemente estable (y que es
+aceptado por la comunidad de desarrollo) se fusiona en el kernel mainline.
+La mayor parte de los cambios para un nuevo ciclo de desarrollo (y todos
+los cambios principales) se fusionarán durante este tiempo, a un ritmo
+cercano a los 1,000 cambios (“parches” o “conjuntos de cambios”) por
+día.
+
+(Aparte, vale la pena señalar que los cambios integrados durante la
+ventana de fusión no surgen de la nada; han sido recolectados, probados
+y montados con anticipación. Como funciona ese proceso se describirá en
+detalle más adelante).
+
+La ventana de fusión dura aproximadamente dos semanas. Al final de este
+tiempo, Linux Torvalds declarará que la ventana está cerrada y publicará
+el primero de los kernels “rc”. Para el kernel destinado a ser 5.6, por
+ejemplo, el lanzamiento al final de la ventana de fusión se llamará
+5.6-rc1. El lanzamiento -rc1 señala que el tiempo para fusionar nuevas
+características ha pasado y que el tiempo para estabilizar el siguiente
+kernel ha comenzado.
+
+Durante las próximas seis a diez semanas, solo los parches que solucionen
+problemas deben enviarse al mainline. En ocasiones, se permitirá un cambio
+más significativo, pero tales ocasiones son raras; los desarrolladores que
+intentan fusionar nuevas características fuera de la ventana de fusión
+suelen recibir una recepción poco amistosa. Como regla general, si se
+pierde la ventana de fusión de una característica determinada, lo mejor
+que puede hacer es esperar al siguiente ciclo de desarrollo. (Se hace una
+excepción ocasional para los drivers de hardware que no se admitía
+anteriormente; si no afectan a ningún código en árbol, no pueden causar
+regresiones y debería ser seguro agregarlos en cualquier momento).
+
+A medida que las correcciones se abren paso en el mainline, la tasa de
+parches se ralentizará con el tiempo. Linus lanza nuevos kernels -rc
+aproximadamente una vez a la semana; una serie normal llegará a algún
+punto entre -rc6 y -rc9 antes de que se considere que el kernel es
+suficientemente estable y realice el lanzamiento final. En ese momento,
+todo el proceso vuelve a empezar.
+
+Como ejemplo, así fue el ciclo de desarrollo de 5.4 (todas las fechas son
+de 2019):
+
+ ============== =======================================
+ Septiembre 15 5.3 lanzamiento estable
+ Septiembre 30 5.4-rc1, la ventana de fusion se cierra
+ Octubre 6 5.4-rc2
+ Octubre 13 5.4-rc3
+ Octubre 20 5.4-rc4
+ Octubre 27 5.4-rc5
+ Noviembre 3 5.4-rc6
+ Noviembre 10 5.4-rc7
+ Noviembre 17 5.4-rc8
+ Noviembre 24 5.4 lanzamiento estable
+ ============== =======================================
+
+¿Cómo deciden los desarrolladores cuándo cerrar el ciclo de desarrollo
+y crear el lanzamiento estable? La métrica más significativa utilizada
+es la lista de regresiones de lanzamientos anteriores. Ningunos errores
+son bienvenidos, pero aquellos que rompen sistemas que funcionaron en el
+pasado se consideran especialmente graves. Por esta razón, los parches
+que causan regresiones se ven con malos ojos y es bastante probable que
+se reviertan durante el periodo de estabilización.
+
+El objetivo de los desarrolladores es corregir todas las regresiones
+conocidas antes de que se realice el lanzamiento estable. En el mundo
+real, este tipo de perfección es difícil de lograr; hay demasiados
+variables en un proyecto de este tamaño. Llega un punto en el que
+retrasar el lanzamiento final solo empeora el problema; la pila de cambios
+que esperan la siguiente ventana de fusión crecerá, creando aún más
+regresiones la próxima vez. Por lo tanto, la mayoría de los kernels 5.x
+se lanzan con un punado de regresiones conocidas, aunque, con suerte,
+ninguna de ellas es seria.
+
+Una vez que se realiza un lanzamiento estable, su mantenimiento continuo
+se transfiere al “equipo estable”, actualmente encabezado por Greg
+Kroah-Hartman. El equipo estable lanzará actualizaciones ocasionales al
+lanzamiento estable utilizando el esquema de numeración 5.x.y. Para ser
+considerado para un lanzamiento de actualización, un parche debe
+(1) corregir un error significativo y (2) ya estar fusionado en el
+mainline para el siguiente kernel de desarrollo. Por lo general, los
+kernels recibirán actualizaciones estables durante un poco más de un
+ciclo de desarrollo después de su lanzamiento inicial. Así, por ejemplo,
+la historia del kernel 5.2 se veía así (todas las fechas en 2019):
+
+ ============== ===============================
+ Julio 7 5.2 lanzamiento estable
+ Julio 14 5.2.1
+ Julio 21 5.2.2
+ Julio 26 5.2.3
+ Julio 28 5.2.4
+ Julio 31 5.2.5
+ ... ...
+ Octubre 11 5.2.21
+ ============== ===============================
+
+5.2.21 fue la última actualización estable del lanzamiento 5.2.
+
+Algunos kernels se designan como kernels “a largo plazo”; recibirán
+soporte durante un periodo más largo. Consulte el siguiente enlace para
+obtener la lista de versiones activas del kernel a largo plazos y sus
+maintainers:
+
+ https://www.kernel.org/category/releases.html
+
+La selección de un kernel para soporte a largo plazo es puramente una
+cuestión de que un maintainer tenga la necesidad y el tiempo para
+mantener ese lanzamiento. No hay planes conocidos para ofrecer soporte a
+largo plazo para ningún lanzamiento especifico próximo.
+
+Ciclo de vida de un parche
+--------------------------
+
+Los parches no van directamente desde el teclado del desarrollador al
+kernel mainline. Hay, en cambio, un proceso algo complicado (aunque algo
+informal) diseñado para garantizar que cada parche sea revisado en cuanto
+a calidad y que cada parche implemente un cambio que es deseable tener en
+el mainline. Este proceso puede ocurrir rápidamente para correcciones
+menores, o, en el caso de cambios grandes y controvertidos, continuar
+durante años. Gran parte de la frustración de los desarrolladores proviene
+de la falta de compresión de este proceso o de sus intentos de eludirlo.
+
+Con la esperanza de reducir esa frustración, este documento describirá
+cómo un parche entra en el kernel. Lo que sigue a continuación es una
+introducción que describe el proceso de una manera tanto idealizada. Un
+tratamiento mucho más detallado vendrá en secciones posteriores.
+
+Las etapas por las que pasa un parche son, generalmente:
+
+ - Diseño. Aquí es donde se establecen los requisitos reales para el
+ parche – y la forma en que se cumplirán esos requisitos. El trabajo
+ de diseño a menudo se realiza sin involucrar a la comunidad, pero es
+ mejor hacer este trabajo de manera abierta si es posible; puede ahorrar
+ mucho tiempo rediseñando las cosas más tarde.
+
+ - Revisión inicial. Los parches se publican en la lista de correo
+ relevante y los desarrolladores en esa lista responden con cualquier
+ comentario que puedan tener. Este proceso debería revelar cualquier
+ problema importante con un parche si todo va bien.
+
+ - Revisión más amplia. Cuando el parche se acerca a estar listo para su
+ inclusión en el mainline, debe ser aceptado por un maintainer del
+ subsistema relevante – aunque esta aceptación no es una garantía de
+ que el parche llegara hasta el mainline. El parche aparecerá en el
+ árbol de subsistemas del maintainer y en los árboles -next (descritos
+ a continuación). Cuando el proceso funciona, este paso conduce a una
+ revisión exhaustiva del parche y al descubrimiento de cualquier
+ problema resultante de la integración de este parche con el trabajo
+ realizado por otros.
+
+ - Tenga en cuenta que la mayoría de los maintainers también tienen
+ trabajos diurnos, por lo que fusionar su parche no puede ser su máxima
+ prioridad. Si su parche está recibiendo comentarios sobre los cambios
+ que se necesitan, debería realizar esos cambios o justificar por qué
+ no deberían realizarse. Si su parche no tiene quejas de revisión, pero
+ no está siendo fusionado por el maintainer apropiado del subsistema o
+ del driver, debe ser persistente en la actualización del parche
+ al kernel actual para que se aplique limpiamente y seguir enviándolo
+ para su revisión y fusión.
+
+ - Fusión en el mainline. Eventualmente, un parche exitoso se fusionará
+ en el repositorio mainline administrado por Linux Torvalds. Mas
+ comentarios y/o problemas pueden surgir en este momento; es importante
+ que el desarrollador responda a estos y solucione cualquier problema
+ que surja.
+
+ - Lanzamiento estable. El número de usuarios potencialmente afectados por
+ el parche es ahora grande, por lo que, una vez más, pueden surgir
+ nuevos problemas.
+
+ - Mantenimiento a largo plazo. Si bien un desarrollador puede olvidarse
+ del código después de fusionarlo, ese comportamiento tiende a dejar
+ una impresión negativa en la comunidad de desarrollo. Fusionar el
+ código elimina parte de la carga de mantenimiento; otros solucionarán
+ los problemas causados por los cambios en la API. Sin embargo, el
+ desarrollador original debe seguir asumiendo la responsabilidad del
+ código si quiere seguir siendo útil a largo plazo.
+
+Uno de los peores errores cometidos por los desarrolladores del kernel
+(o sus empleadores) es tratar de reducir el proceso a un solo paso de
+“fusión en el mainline”. Este enfoque conduce invariablemente a la
+frustración de todos los involucrados.
+
+Cómo se integran los parches en el kernel
+-----------------------------------------
+
+Hay exactamente una persona que puede fusionar parches en el repositorio
+mainline del kernel: Linus Torvalds. Pero, por ejemplo, de los más de
+9,500 parches que se incluyeron en el kernel 2.6.38, solo 112 (alrededor
+del 1.3%) fueron elegidos directamente por Linus mismo. El proyecto del
+kernel ha crecido mucho desde hace tiempo a un tamaño en el que ningún
+desarrollador individual podría inspeccionar y seleccionar todos los
+parches sin ayuda. La forma que los desarrolladores del kernel han
+abordado este crecimiento es a través del uso de un sistema jerárquico
+alrededor de una cadena de confianza.
+
+La base de código del kernel se descompone lógicamente en un conjunto de
+subsistemas: redes, soporte de arquitectura especifica, gestión de
+memoria, dispositivos de video, etc. La mayoría de los subsistemas tienen
+un maintainer designado, un desarrollador que tiene la responsabilidad
+general del código dentro de ese subsistema. Estos maintainers de
+subsistemas son los guardianes (en cierto modo) de la parte del kernel que
+gestionan; son los que (usualmente) aceptarán un parche para incluirlo en
+el kernel mainline.
+
+Cada uno de los maintainers del subsistema administra su propia versión
+del árbol de fuentes del kernel, generalmente (pero, ciertamente no
+siempre) usando la herramienta de administración de código fuente de git.
+Herramientas como git (y herramientas relacionadas como quilt o mercurial)
+permiten a los maintainers realizar un seguimiento de una lista de
+parches, incluida la información de autoría y otros metadatos. En
+cualquier momento, el maintainer puede identificar qué parches de su
+repositorio no se encuentran en el mainline.
+
+Cuando se abre la ventana de fusión, los maintainers de nivel superior
+le pedirán a Linus que “extraiga” los parches que han seleccionado para
+fusionar de sus repositorios. Si Linus está de acuerdo, el flujo de
+parches fluirá hacia su repositorio, convirtiéndose en parte del kernel
+mainline. La cantidad de atención que Linus presta a los parches
+específicos recibidos en una operación de extracción varia. Está claro
+que, a veces, examina bastante de cerca. Pero, como regla general, Linus
+confía en que los maintainers del subsistema no envíen parches
+defectuosos al upstream.
+
+Los maintainers de subsistemas, a su vez, pueden extraer parches de otros
+maintainers. Por ejemplo, el árbol de red se construye a partir de
+parches que se acumulan primero en arboles dedicados a drivers de
+dispositivos de red, redes inalámbricas, etc. Esta cadena de repositorios
+puede ser arbitrariamente larga, aunque rara vez supera los dos o tres
+enlaces. Dado que cada maintainer de la cadena confía en los que
+administran árboles de nivel inferior, este proceso se conoce como la
+“cadena de confianza”.
+
+Claramente, en un sistema como este, lograr que los parches se integren
+en el kernel depende de encontrar el maintainer adecuado. Enviar parches
+directamente a Linus no es normalmente la forma correcta de hacerlo.
+
+Árboles siguientes (next)
+-------------------------
+
+La cadena de árboles de subsistemas guía el flujo de parches en el
+kernel, pero también plantea una pregunta interesante: ¿Qué pasa si
+alguien quiere ver todos los parches que se están preparando para la
+próxima ventana de fusión? Los desarrolladores estarán interesados en
+saber que otros cambios están pendientes para ver si hay algún conflicto
+del que preocuparse; un parche que cambia un prototipo de función del
+núcleo del kernel, por ejemplo, entrará en conflicto con cualquier otro
+parche que utilice la forma anterior de esa función. Los revisores y
+probadores quieren tener acceso a los cambios en su forma integrada antes
+de que todos esos cambios se integren en el kernel mainline. Uno podría
+extraer cambios de todos los árboles de subsistemas interesantes, pero
+eso sería un trabajo tedioso y propenso a errores.
+
+La respuesta viene en forma de árboles -next, donde los árboles de
+subsistemas se recopilan para pruebas y revisiones. El más antiguo de
+estos árboles, mantenido por Andrew Morton, se llama “-mm” (por gestión
+de la memoria, que es como comenzó). El árbol “-mm” integra parches
+de una larga lista de árboles de subsistemas; también tiene algunos
+parches destinados a ayudar con la depuración.
+
+Más allá de eso, -mm contiene una colección significativa de parches
+que han sido seleccionados directamente por Andrew. Estos parches pueden
+haber sido publicados en una lista de correo o aplicarse a una parte del
+kernel para la que no hay un árbol de subsistemas designado. Como
+resultado, -mm funciona como una especie de árbol de subsistemas de
+último recurso; si no hay otro camino obvio para un parche en el mainline,
+es probable que termine en -mm. Los parches misceláneos que se acumulan
+en -mm eventualmente se enviarán a un árbol de subsistema apropiado o se
+enviarán directamente a Linus. En un ciclo de desarrollo típico,
+aproximadamente el 5-10% de los parches que van al mainline llegan allí
+a través de -mm.
+
+El parche -mm actual está disponible en el directorio “mmotm” (-mm
+del momento) en:
+
+ https://www.ozlabs.org/~akpm/mmotm/
+
+Sin embargo, es probable que el uso del árbol MMOTM sea una experiencia
+frustrante; existe una posibilidad definitiva de que ni siquiera se
+compile.
+
+El árbol principal para la fusión de parches del siguiente ciclo es
+linux-next, mantenido por Stephen Rothwell. El árbol linux-next es, por
+diseño, una instantánea de cómo se espera que se vea el mainline después
+de que se cierre la siguiente ventana de fusión. Los árboles linux-next
+se anuncian en las listas de correo linux-kernel y linux-next cuando se
+ensamblan; Se pueden descargar desde:
+
+ https://www.kernel.org/pub/linux/kernel/next/
+
+Linux-next se ha convertido en una parte integral del proceso de
+desarrollo del kernel; todos los parches fusionados durante una ventana
+de fusión determinada deberían haber encontrado su camino en linux-next
+en algún momento antes de que se abra la ventana de fusión.
+
+Árboles de staging
+------------------
+
+El árbol de fuentes del kernel contiene el directorio drivers/staging/,
+donde residen muchos subdirectorios para drivers o sistemas de archivos
+que están en proceso de ser agregados al árbol del kernel. Permanecen
+en drivers/staging mientras aún necesitan más trabajo; una vez
+completados, se pueden mover al kernel propiamente dicho. Esta es una
+forma de realizar un seguimiento de los drivers drivers que no están a la
+altura de la codificación o los estándares de calidad del kernel de
+Linux, pero que las personas pueden querer usarlos y realizar un
+seguimiento del desarrollo.
+
+Greg Kroah-Hartman mantiene actualmente el árbol de staging. Los drivers
+que aun necesitan trabajo se le envían, y cada driver tiene su propio
+subdirectorio en drivers/staging/. Junto con los archivos de origen del
+driver, también debe haber un archivo TODO en el directorio. El archivo
+TODO enumera el trabajo pendiente que el driver necesita para ser aceptado
+en el kernel propiamente dicho, así como una lista de personas a las que
+Cc’d para cualquier parche para el driver. Las reglas actuales exigen
+que los drivers que contribuyen a staging deben, como mínimo, compilarse
+correctamente.
+
+El staging puede ser una forma relativamente fácil de conseguir nuevos
+drivers en el mainline donde, con suerte, llamarán la atención de otros
+desarrolladores y mejorarán rápidamente. Sin embargo, la entrada en el
+staging no es el final de la historia; el código que no está viendo
+progreso regular eventualmente será eliminado. Los distribuidores también
+tienden a ser relativamente reacios a habilitar los drivers de staging.
+Por lo tanto, el staging es, en el mejor de los casos, una parada en el
+camino para hacia convertirse en un apropiado driver del mainline.
+
+Herramientas
+------------
+
+Como se puede ver en el texto anterior, el proceso de desarrollo del
+kernel depende en gran medida de la capacidad de dirigir colecciones de
+parches en varias direcciones. Todo ello no funcionaría tan bien como lo
+hace sin herramientas apropiadamente potentes. Los tutoriales sobre cómo
+usar estas herramientas están mucho más allá del alcance de este
+documento, pero hay espacio para algunos consejos.
+
+Con mucho, el sistema de gestión de código fuente dominante utilizado
+por la comunidad del kernel es git. Git es uno de los varios sistemas de
+control de versiones distribuidos que se están desarrollando en la
+comunidad de software libre. Está bien ajustado para el desarrollo de
+kernel, ya que funciona bastante bien cuando se trata de grandes
+repositorios y grandes cantidades de parches. También tiene la reputación
+de ser difícil de aprender y usar, aunque ha mejorado con el tiempo.
+Algún tipo de familiaridad con git es casi un requisito para los
+desarrolladores del kernel; incluso si no lo usan para su propio
+trabajo, necesitarán git para mantenerse al día con lo que otros
+desarrolladores (y el mainline) están haciendo.
+
+Git ahora está empaquetado por casi todas las distribuciones de Linux.
+Hay una página de inicio en:
+
+ https://git-scm.com/
+
+Esa página tiene punteros a documentación y tutoriales.
+
+Entre los desarrolladores de kernel que no usan git, la opción más
+popular es casi con certeza Mercurial:
+
+ https://www.selenic.com/mercurial/
+
+Mercurial comparte muchas características con git, pero proporciona una
+interfaz que muchos encuentran más fácil de usar.
+
+Otra herramienta que vale la pena conocer es Quilt:
+
+ https://savannah.nongnu.org/projects/quilt/
+
+Quilt es un sistema de gestión de parches, en lugar de un sistema de
+gestión de código fuente. No rastrea el historial a lo largo del tiempo;
+en cambio, está orientado al seguimiento de un conjunto especifico de
+cambios en relación con una base de código en evolución. Algunos de los
+principales maintainers de subsistemas utilizan Quilt para gestionar los
+parches destinados a ir upstream. Para la gestión de ciertos tipos de
+árboles (por ejemplo, -mm) Quilt es la mejor herramienta para el trabajo.
+
+Listas de correo
+----------------
+
+Una gran parte del trabajo de desarrollo del kernel de Linux se realiza a
+través de listas de correo. Es difícil ser un miembro plenamente funcional
+de la comunidad sin unirse al menos a una lista en algún parte. Pero las
+listas de correo de Linux también representan un peligro potencial para
+los desarrolladores, que corren el riesgo de quedar enterrados bajo una
+carga de correo electrónico, incumplir las convenciones utilizadas en las
+listas de Linux, o ambas cosas.
+
+La mayoría de las listas de correo del kernel se ejecutan en
+vger.kernel.org; la lista principal se puede encontrar en:
+
+ http://vger.kernel.org/vger-lists.html
+
+Sim embargo, hay listas alojadas en otros lugares; varios de ellos se
+encuentran en redhat.com/mailman/listinfo.
+
+La lista de correo principal para el desarrollo del kernel es, por
+supuesto, linux-kernel. Esta lista es un lugar intimidante; el volumen
+puede alcanzar 500 mensajes por día, la cantidad de ruido es alta, la
+conversación puede ser muy técnica y los participantes no siempre se
+preocupan por mostrar un alto grado de cortesía. Pero no hay otro lugar
+donde la comunidad de desarrollo del kernel se reúna como un todo; los
+desarrolladores que eviten esta lista se perderán información importante.
+
+Hay algunos consejos que pueden ayudar a sobrevivir en el kernel de Linux:
+
+- Haga que la lista se entregue en una carpeta separada, en lugar de su
+ buzón principal. Uno debe ser capaz de ignorar el flujo durante
+ periodos prolongados.
+
+- No trate de seguir cada conversación, nadie lo hace. Es importante
+ filtrar tanto por el tema de interés (aunque tenga en cuenta que las
+ conversaciones prolongadas pueden alejarse del asunto original sin
+ cambiar la línea de asunto del correo electrónico) por las personas
+ que participan.
+
+- No alimente a los trolls. Si alguien está tratando de provocar una
+ respuesta de enojo, ignórelos.
+
+- Al responder al correo electrónico del kernel de Linux (o al de otras
+ listas) conserve el encabezado Cc: para todos los involucrados. En
+ ausencia de una razón solida (como una solicitud explícita), nunca debe
+ eliminar destinarios. Asegúrese siempre de que la persona a la que está
+ respondiendo esté en la lista Cc:. Esta convención también hace que no
+ sea necesario solicitar explícitamente que se le copie en las respuestas
+ a sus publicaciones.
+
+- Busque en los archivos de la lista (y en la red en su conjunto) antes
+ de hacer preguntas. Algunos desarrolladores pueden impacientarse con
+ las personas que claramente no han hecho sus deberes.
+
+- Utilice respuestas intercaladas (“en línea”), lo que hace que su
+ respuesta sea más fácil de leer. (Es decir, evite top-posting – la
+ práctica de poner su respuesta encima del texto citado al que está
+ respondiendo.) Para obtener más información, consulte
+ :ref:`Documentation/translations/sp_SP/process/submitting-patches.rst <sp_interleaved_replies>`.
+
+- Pregunte en la lista de correo correcta. linux-kernel puede ser el
+ punto de encuentro general, pero no es el mejor lugar para encontrar
+ desarrolladores de todos los subsistemas.
+
+El último punto, encontrar la lista de correo correcta, es una fuente
+común de errores para desarrolladores principiantes. Alguien que haga
+una pregunta relacionada con las redes en linux-kernel seguramente
+recibirá una surgencia educada para preguntar en la lista de netdev en su
+lugar, ya que esa es la lista frecuentada por la mayoría de los
+desarrolladores de redes. Existen otras listas para los subsistemas SCSI,
+video4linux, IDE, sistema de archivos, etc. El mejor lugar para buscar
+listas de correo es en el archivo MAINTAINERS incluido con el código
+fuente del kernel.
+
+Comenzar con el desarrollo del kernel
+-------------------------------------
+
+Las preguntas sobre como comenzar con el proceso de desarrollo del kernel
+son comunes, tanto de individuos como de empresas. Igualmente comunes son
+los pasos en falso que hacen que el comienzo de la relación sea más
+difícil de lo que tiene que ser.
+
+Las empresas a menudo buscan contratar desarrolladores conocidos para
+iniciar un grupo de desarrollo. De hecho, esta puede ser una técnica
+efectiva. Pero también tiende a ser caro y no hace mucho para crecer el
+grupo de desarrolladores de kernel experimentados. Es posible poner al
+día a los desarrolladores internos en el desarrollo de kernel de Linux,
+dada la inversión de algún tiempo. Tomarse este tiempo puede dotar a un
+empleador de un grupo de desarrolladores que comprendan tanto el kernel
+como la empresa, y que también puedan ayudar a educar a otros. A medio
+plazo, este es a menudo el enfoque más rentable.
+
+Los desarrolladores individuales, a menudo, comprensiblemente, no tienen
+un lugar para empezar. Comenzar con un proyecto grande puede ser
+intimidante; a menudo uno quiere probar las aguas con algo más pequeño
+primero. Este es el punto en el que algunos desarrolladores se lanzan a
+la creación de parches para corregir errores ortográficos o problemas
+menores de estilo de codificación. Desafortunadamente, dicho parches
+crean un nivel de ruido que distrae a la comunidad de desarrollo en su
+conjunto, por lo que, cada vez más, se los mira con desprecio. Los nuevos
+desarrolladores que deseen presentarse a la comunidad no recibirán la
+recepción que desean por estos medios.
+
+Andrew Morton da este consejo (traducido) para los aspirantes a
+desarrolladores de kernel.
+
+::
+
+ El proyecto #1 para los principiantes en el kernel seguramente debería
+ ser “asegúrese de que el kernel funcione perfectamente en todo momento
+ en todas las máquinas que pueda conseguir”. Por lo general, la forma
+ de hacer esto es trabajar con otros para arreglar las cosas (¡esto
+ puede requerir persistencia!), pero eso está bien, es parte del
+ desarrollo del kernel.
+
+(https://lwn.net/Articles/283982/)
+
+En ausencia de problemas obvios que solucionar, se aconseja a los
+desarrolladores que consulten las listas actuales de regresiones y errores
+abiertos en general. Nunca faltan problemas que necesitan solución; al
+abordar estos problemas, los desarrolladores ganarán experiencia con el
+proceso mientras, al mismo tiempo, se ganarán el respeto del resto de la
+comunidad de desarrollo.
diff --git a/Documentation/translations/sp_SP/process/3.Early-stage.rst b/Documentation/translations/sp_SP/process/3.Early-stage.rst
new file mode 100644
index 000000000000..71cfb3fb0fda
--- /dev/null
+++ b/Documentation/translations/sp_SP/process/3.Early-stage.rst
@@ -0,0 +1,11 @@
+.. include:: ../disclaimer-sp.rst
+
+:Original: Documentation/process/3.Early-stage.rst
+
+.. _sp_development_early_stage:
+
+Planificación en etapa inicial
+==============================
+
+.. warning::
+ TODO aún no traducido
diff --git a/Documentation/translations/sp_SP/process/4.Coding.rst b/Documentation/translations/sp_SP/process/4.Coding.rst
new file mode 100644
index 000000000000..d9436e039b4b
--- /dev/null
+++ b/Documentation/translations/sp_SP/process/4.Coding.rst
@@ -0,0 +1,11 @@
+.. include:: ../disclaimer-sp.rst
+
+:Original: Documentation/process/4.Coding.rst
+
+.. _sp_development_coding:
+
+Conseguir el código correcto
+============================
+
+.. warning::
+ TODO aún no traducido
diff --git a/Documentation/translations/sp_SP/process/5.Posting.rst b/Documentation/translations/sp_SP/process/5.Posting.rst
new file mode 100644
index 000000000000..50a3bc5998a8
--- /dev/null
+++ b/Documentation/translations/sp_SP/process/5.Posting.rst
@@ -0,0 +1,11 @@
+.. include:: ../disclaimer-sp.rst
+
+:Original: Documentation/process/5.Posting.rst
+
+.. _sp_development_posting:
+
+Publicar parches
+================
+
+.. warning::
+ TODO aún no traducido
diff --git a/Documentation/translations/sp_SP/process/6.Followthrough.rst b/Documentation/translations/sp_SP/process/6.Followthrough.rst
new file mode 100644
index 000000000000..f0acf9082bb3
--- /dev/null
+++ b/Documentation/translations/sp_SP/process/6.Followthrough.rst
@@ -0,0 +1,11 @@
+.. include:: ../disclaimer-sp.rst
+
+:Original: Documentation/process/6.Followthrough.rst
+
+.. _sp_development_followthrough:
+
+Seguimiento
+===========
+
+.. warning::
+ TODO aún no traducido
diff --git a/Documentation/translations/sp_SP/process/7.AdvancedTopics.rst b/Documentation/translations/sp_SP/process/7.AdvancedTopics.rst
new file mode 100644
index 000000000000..553759857339
--- /dev/null
+++ b/Documentation/translations/sp_SP/process/7.AdvancedTopics.rst
@@ -0,0 +1,11 @@
+.. include:: ../disclaimer-sp.rst
+
+:Original: Documentation/process/7.AdvancedTopics.rst
+
+.. _sp_development_advancedtopics:
+
+Temas avanzados
+===============
+
+.. warning::
+ TODO aún no traducido
diff --git a/Documentation/translations/sp_SP/process/8.Conclusion.rst b/Documentation/translations/sp_SP/process/8.Conclusion.rst
new file mode 100644
index 000000000000..dd181cb8ec9a
--- /dev/null
+++ b/Documentation/translations/sp_SP/process/8.Conclusion.rst
@@ -0,0 +1,11 @@
+.. include:: ../disclaimer-sp.rst
+
+:Original: Documentation/process/8.Conclusion.rst
+
+.. _sp_development_conclusion:
+
+Para más información
+====================
+
+.. warning::
+ TODO aún no traducido
diff --git a/Documentation/translations/sp_SP/process/code-of-conduct.rst b/Documentation/translations/sp_SP/process/code-of-conduct.rst
index adc6c770cc37..a6c08613aefc 100644
--- a/Documentation/translations/sp_SP/process/code-of-conduct.rst
+++ b/Documentation/translations/sp_SP/process/code-of-conduct.rst
@@ -1,7 +1,7 @@
.. include:: ../disclaimer-sp.rst
:Original: :ref:`Documentation/process/code-of-conduct.rst <code_of_conduct>`
-:Translator: Contributor Covenant and Carlos Bilbao <carlos.bilbao@amd.com>
+:Translator: Contributor Covenant and Carlos Bilbao <carlos.bilbao.osdev@gmail.com>
.. _sp_code_of_conduct:
diff --git a/Documentation/translations/sp_SP/process/coding-style.rst b/Documentation/translations/sp_SP/process/coding-style.rst
index a37274764371..b5a84df44cea 100644
--- a/Documentation/translations/sp_SP/process/coding-style.rst
+++ b/Documentation/translations/sp_SP/process/coding-style.rst
@@ -1,7 +1,7 @@
.. include:: ../disclaimer-sp.rst
:Original: :ref:`Documentation/process/coding-style.rst <submittingpatches>`
-:Translator: Carlos Bilbao <carlos.bilbao@amd.com>
+:Translator: Carlos Bilbao <carlos.bilbao.osdev@gmail.com>
.. _sp_codingstyle:
diff --git a/Documentation/translations/sp_SP/process/development-process.rst b/Documentation/translations/sp_SP/process/development-process.rst
new file mode 100644
index 000000000000..40d74086f22e
--- /dev/null
+++ b/Documentation/translations/sp_SP/process/development-process.rst
@@ -0,0 +1,27 @@
+.. include:: ../disclaimer-sp.rst
+
+:Original: Documentation/process/development-process.rst
+:Translator: Avadhut Naik <avadhut.naik@amd.com>
+
+.. _sp_development_process_main:
+
+Guía del proceso de desarrollo del kernel
+=========================================
+
+El propósito de este documento es ayudar a los desarrolladores (y sus
+gerentes) a trabajar con la comunidad de desarrollo con un mínimo de
+frustración. Es un intento de documentar cómo funciona esta comunidad
+de una manera accesible para aquellos que no están familiarizados
+íntimamente con el desarrollo del kernel de Linux (o, de hecho, el
+desarrollo de software libre en general). Si bien hay algo de material
+técnico aquí, este es en gran medida una discusión orientada al proceso
+que no requiere un conocimiento profundo de la programación del kernel
+para entenderla.
+
+.. toctree::
+ :caption: Contenido
+ :numbered:
+ :maxdepth: 2
+
+ 1.Intro
+ 2.Process
diff --git a/Documentation/translations/sp_SP/process/email-clients.rst b/Documentation/translations/sp_SP/process/email-clients.rst
index fdf1e51b84e4..55d5803daf41 100644
--- a/Documentation/translations/sp_SP/process/email-clients.rst
+++ b/Documentation/translations/sp_SP/process/email-clients.rst
@@ -1,7 +1,7 @@
.. include:: ../disclaimer-sp.rst
:Original: :ref:`Documentation/process/email-clients.rst <email_clients>`
-:Translator: Carlos Bilbao <carlos.bilbao@amd.com>
+:Translator: Carlos Bilbao <carlos.bilbao.osdev@gmail.com>
.. _sp_email_clients:
diff --git a/Documentation/translations/sp_SP/process/howto.rst b/Documentation/translations/sp_SP/process/howto.rst
index dd793c0f8574..72ea855ac9dc 100644
--- a/Documentation/translations/sp_SP/process/howto.rst
+++ b/Documentation/translations/sp_SP/process/howto.rst
@@ -1,7 +1,7 @@
.. include:: ../disclaimer-sp.rst
:Original: :ref:`Documentation/process/howto.rst <process_howto>`
-:Translator: Carlos Bilbao <carlos.bilbao@amd.com>
+:Translator: Carlos Bilbao <carlos.bilbao.osdev@gmail.com>
.. _sp_process_howto:
diff --git a/Documentation/translations/sp_SP/process/index.rst b/Documentation/translations/sp_SP/process/index.rst
index 2239373b3999..4892159310ff 100644
--- a/Documentation/translations/sp_SP/process/index.rst
+++ b/Documentation/translations/sp_SP/process/index.rst
@@ -28,3 +28,4 @@
management-style
submit-checklist
howto
+ development-process
diff --git a/Documentation/translations/sp_SP/process/kernel-docs.rst b/Documentation/translations/sp_SP/process/kernel-docs.rst
index 2f9b3df8f8fa..a62c6854f59b 100644
--- a/Documentation/translations/sp_SP/process/kernel-docs.rst
+++ b/Documentation/translations/sp_SP/process/kernel-docs.rst
@@ -1,7 +1,7 @@
.. include:: ../disclaimer-sp.rst
:Original: :ref:`Documentation/process/kernel-docs.rst <kernel_docs>`
-:Translator: Carlos Bilbao <carlos.bilbao@amd.com>
+:Translator: Carlos Bilbao <carlos.bilbao.osdev@gmail.com>
.. _sp_kernel_docs:
diff --git a/Documentation/translations/sp_SP/process/kernel-enforcement-statement.rst b/Documentation/translations/sp_SP/process/kernel-enforcement-statement.rst
index d66902694089..d47a1c154610 100644
--- a/Documentation/translations/sp_SP/process/kernel-enforcement-statement.rst
+++ b/Documentation/translations/sp_SP/process/kernel-enforcement-statement.rst
@@ -1,7 +1,7 @@
.. include:: ../disclaimer-sp.rst
:Original: :ref:`Documentation/process/kernel-enforcement-statement.rst <process_statement_kernel>`
-:Translator: Carlos Bilbao <carlos.bilbao@amd.com>
+:Translator: Carlos Bilbao <carlos.bilbao.osdev@gmail.com>
.. _sp_process_statement_kernel:
diff --git a/Documentation/translations/sp_SP/process/magic-number.rst b/Documentation/translations/sp_SP/process/magic-number.rst
index 7c7dfb4ba80b..32a99aac2f6c 100644
--- a/Documentation/translations/sp_SP/process/magic-number.rst
+++ b/Documentation/translations/sp_SP/process/magic-number.rst
@@ -1,7 +1,7 @@
.. include:: ../disclaimer-sp.rst
:Original: :ref:`Documentation/process/magic-number.rst <magicnumbers>`
-:Translator: Carlos Bilbao <carlos.bilbao@amd.com>
+:Translator: Carlos Bilbao <carlos.bilbao.osdev@gmail.com>
.. _sp_magicnumbers:
diff --git a/Documentation/translations/sp_SP/process/programming-language.rst b/Documentation/translations/sp_SP/process/programming-language.rst
index 301f525372d8..ba2164057f45 100644
--- a/Documentation/translations/sp_SP/process/programming-language.rst
+++ b/Documentation/translations/sp_SP/process/programming-language.rst
@@ -1,7 +1,7 @@
.. include:: ../disclaimer-sp.rst
:Original: :ref:`Documentation/process/programming-language.rst <programming_language>`
-:Translator: Carlos Bilbao <carlos.bilbao@amd.com>
+:Translator: Carlos Bilbao <carlos.bilbao.osdev@gmail.com>
.. _sp_programming_language:
diff --git a/Documentation/translations/sp_SP/process/submitting-patches.rst b/Documentation/translations/sp_SP/process/submitting-patches.rst
index c2757d9ab216..328ec80bd61d 100644
--- a/Documentation/translations/sp_SP/process/submitting-patches.rst
+++ b/Documentation/translations/sp_SP/process/submitting-patches.rst
@@ -1,7 +1,7 @@
.. include:: ../disclaimer-sp.rst
:Original: :ref:`Documentation/process/submitting-patches.rst <submittingpatches>`
-:Translator: Carlos Bilbao <carlos.bilbao@amd.com>
+:Translator: Carlos Bilbao <carlos.bilbao.osdev@gmail.com>
.. _sp_submittingpatches:
@@ -356,6 +356,34 @@ Consulte Documentation/process/email-clients.rst para obtener
recomendaciones sobre clientes de correo electrónico y normas de etiqueta
en la lista de correo.
+.. _sp_interleaved_replies:
+
+Uso de respuestas intercaladas recortadas en las discusiones por correo electrónico
+-----------------------------------------------------------------------------------
+
+Se desaconseja encarecidamente la publicación en la parte superior de las
+discusiones sobre el desarrollo del kernel de Linux. Las respuestas
+intercaladas (o "en línea") hacen que las conversaciones sean mucho más
+fáciles de seguir. Para obtener más detalles, consulte:
+https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
+
+Como se cita frecuentemente en la lista de correo::
+
+ A: http://en.wikipedia.org/wiki/Top_post
+ Q: ¿Dónde puedo encontrar información sobre esto que se llama top-posting?
+ A: Porque desordena el orden en el que la gente normalmente lee el texto.
+ Q: ¿Por qué es tan malo el top-posting?
+ A: Top-posting.
+ Q: ¿Qué es lo más molesto del correo electrónico?
+
+Del mismo modo, por favor, recorte todas las citas innecesarias que no
+sean relevantes para su respuesta. Esto hace que las respuestas sean más
+fáciles de encontrar y ahorra tiempo y espacio. Para obtener más
+información, consulte: http://daringfireball.net/2007/07/on_top ::
+
+ A: No.
+ Q: ¿Debo incluir citas después de mi respuesta?
+
.. _sp_resend_reminders:
No se desanime o impaciente
diff --git a/Documentation/translations/zh_CN/PCI/msi-howto.rst b/Documentation/translations/zh_CN/PCI/msi-howto.rst
index 1b9b5ea790d8..95baadf767e4 100644
--- a/Documentation/translations/zh_CN/PCI/msi-howto.rst
+++ b/Documentation/translations/zh_CN/PCI/msi-howto.rst
@@ -88,7 +88,7 @@ MSI功能。
如果设备对最小数量的向量有要求,驱动程序可以传递一个min_vecs参数,设置为这个限制,
如果PCI核不能满足最小数量的向量,将返回-ENOSPC。
-flags参数用来指定设备和驱动程序可以使用哪种类型的中断(PCI_IRQ_LEGACY, PCI_IRQ_MSI,
+flags参数用来指定设备和驱动程序可以使用哪种类型的中断(PCI_IRQ_INTX, PCI_IRQ_MSI,
PCI_IRQ_MSIX)。一个方便的短语(PCI_IRQ_ALL_TYPES)也可以用来要求任何可能的中断类型。
如果PCI_IRQ_AFFINITY标志被设置,pci_alloc_irq_vectors()将把中断分散到可用的CPU上。
diff --git a/Documentation/translations/zh_CN/PCI/pci.rst b/Documentation/translations/zh_CN/PCI/pci.rst
index 83c2a41d38d3..347f5c3f5ce9 100644
--- a/Documentation/translations/zh_CN/PCI/pci.rst
+++ b/Documentation/translations/zh_CN/PCI/pci.rst
@@ -304,7 +304,7 @@ MSI-X可以分配几个单独的向量。
的PCI_IRQ_MSI和/或PCI_IRQ_MSIX标志来启用MSI功能。这将导致PCI支持将CPU向量数
据编程到PCI设备功能寄存器中。许多架构、芯片组或BIOS不支持MSI或MSI-X,调用
``pci_alloc_irq_vectors`` 时只使用PCI_IRQ_MSI和PCI_IRQ_MSIX标志会失败,
-所以尽量也要指定 ``PCI_IRQ_LEGACY`` 。
+所以尽量也要指定 ``PCI_IRQ_INTX`` 。
对MSI/MSI-X和传统INTx有不同中断处理程序的驱动程序应该在调用
``pci_alloc_irq_vectors`` 后根据 ``pci_dev``结构体中的 ``msi_enabled``
diff --git a/Documentation/translations/zh_CN/core-api/cachetlb.rst b/Documentation/translations/zh_CN/core-api/cachetlb.rst
index b4a76ec75daa..64295c61d1c1 100644
--- a/Documentation/translations/zh_CN/core-api/cachetlb.rst
+++ b/Documentation/translations/zh_CN/core-api/cachetlb.rst
@@ -260,7 +260,7 @@ HyperSparc cpu就是这样一个具有这种属性的cpu。
如果D-cache别名不是一个问题,这个程序可以简单地定义为该架构上
的nop。
- 在page->flags (PG_arch_1)中有一个位是“架构私有”。内核保证,
+ 在folio->flags (PG_arch_1)中有一个位是“架构私有”。内核保证,
对于分页缓存的页面,当这样的页面第一次进入分页缓存时,它将清除
这个位。
diff --git a/Documentation/translations/zh_CN/core-api/workqueue.rst b/Documentation/translations/zh_CN/core-api/workqueue.rst
index 7fac6f75d078..fe0ff5a127f3 100644
--- a/Documentation/translations/zh_CN/core-api/workqueue.rst
+++ b/Documentation/translations/zh_CN/core-api/workqueue.rst
@@ -7,12 +7,13 @@
司延腾 Yanteng Si <siyanteng@loongson.cn>
周彬彬 Binbin Zhou <zhoubinbin@loongson.cn>
+ 陈兴友 Xingyou Chen <rockrush@rockwork.org>
.. _cn_workqueue.rst:
-=========================
-并发管理的工作队列 (cmwq)
-=========================
+========
+工作队列
+========
:日期: September, 2010
:作者: Tejun Heo <tj@kernel.org>
@@ -22,7 +23,7 @@
简介
====
-在很多情况下,需要一个异步进程的执行环境,工作队列(wq)API是这种情况下
+在很多情况下,需要一个异步的程序执行环境,工作队列(wq)API是这种情况下
最常用的机制。
当需要这样一个异步执行上下文时,一个描述将要执行的函数的工作项(work,
@@ -34,8 +35,8 @@
队列时,工作者又开始执行。
-为什么要cmwq?
-=============
+为什么要有并发管理工作队列?
+===========================
在最初的wq实现中,多线程(MT)wq在每个CPU上有一个工作者线程,而单线程
(ST)wq在全系统有一个工作者线程。一个MT wq需要保持与CPU数量相同的工
@@ -73,9 +74,11 @@
向该函数的工作项,并在工作队列中排队等待该工作项。(就是挂到workqueue
队列里面去)
-特定目的线程,称为工作线程(工作者),一个接一个地执行队列中的功能。
-如果没有工作项排队,工作者线程就会闲置。这些工作者线程被管理在所谓
-的工作者池中。
+工作项可以在线程或BH(软中断)上下文中执行。
+
+对于由线程执行的工作队列,被称为(内核)工作者([k]worker)的特殊
+线程会依次执行其中的函数。如果没有工作项排队,工作者线程就会闲置。
+这些工作者线程被管理在所谓的工作者池中。
cmwq设计区分了面向用户的工作队列,子系统和驱动程序在上面排队工作,
以及管理工作者池和处理排队工作项的后端机制。
@@ -84,6 +87,10 @@ cmwq设计区分了面向用户的工作队列,子系统和驱动程序在上
优先级的工作项,还有一些额外的工作者池,用于服务未绑定工作队列的工
作项目——这些后备池的数量是动态的。
+BH工作队列使用相同的结构。然而,由于同一时间只可能有一个执行上下文,
+不需要担心并发问题。每个CPU上的BH工作者池只包含一个用于表示BH执行
+上下文的虚拟工作者。BH工作队列可以被看作软中断的便捷接口。
+
当他们认为合适的时候,子系统和驱动程序可以通过特殊的
``workqueue API`` 函数创建和排队工作项。他们可以通过在工作队列上
设置标志来影响工作项执行方式的某些方面,他们把工作项放在那里。这些
@@ -95,9 +102,9 @@ cmwq设计区分了面向用户的工作队列,子系统和驱动程序在上
否则一个绑定的工作队列的工作项将被排在与发起线程运行的CPU相关的普
通或高级工作工作者池的工作项列表中。
-对于任何工作者池的实施,管理并发水平(有多少执行上下文处于活动状
-态)是一个重要问题。最低水平是为了节省资源,而饱和水平是指系统被
-充分使用。
+对于任何线程池的实施,管理并发水平(有多少执行上下文处于活动状
+态)是一个重要问题。cmwq试图将并发保持在一个尽可能低且充足的
+水平。最低水平是为了节省资源,而充足是为了使系统能被充分使用。
每个与实际CPU绑定的worker-pool通过钩住调度器来实现并发管理。每当
一个活动的工作者被唤醒或睡眠时,工作者池就会得到通知,并跟踪当前可
@@ -140,6 +147,17 @@ workqueue将自动创建与属性相匹配的后备工作者池。调节并发
``flags``
---------
+``WQ_BH``
+ BH工作队列可以被看作软中断的便捷接口。它总是每个CPU一份,
+ 其中的各个工作项也会按在队列中的顺序,被所属CPU在软中断
+ 上下文中执行。
+
+ BH工作队列的 ``max_active`` 值必须为0,且只能单独或和
+ ``WQ_HIGHPRI`` 标志组合使用。
+
+ BH工作项不可以睡眠。像延迟排队、冲洗、取消等所有其他特性
+ 都是支持的。
+
``WQ_UNBOUND``
排队到非绑定wq的工作项由特殊的工作者池提供服务,这些工作者不
绑定在任何特定的CPU上。这使得wq表现得像一个简单的执行环境提
@@ -184,25 +202,21 @@ workqueue将自动创建与属性相匹配的后备工作者池。调节并发
--------------
``@max_active`` 决定了每个CPU可以分配给wq的工作项的最大执行上
-下文数量。例如,如果 ``@max_active为16`` ,每个CPU最多可以同
-时执行16个wq的工作项。
+下文数量。例如,如果 ``@max_active`` 为16 ,每个CPU最多可以同
+时执行16个wq的工作项。它总是每CPU属性,即便对于未绑定 wq。
-目前,对于一个绑定的wq, ``@max_active`` 的最大限制是512,当指
-定为0时使用的默认值是256。对于非绑定的wq,其限制是512和
-4 * ``num_possible_cpus()`` 中的较高值。这些值被选得足够高,所
-以它们不是限制性因素,同时会在失控情况下提供保护。
+``@max_active`` 的最大限制是512,当指定为0时使用的默认值是256。
+这些值被选得足够高,所以它们不是限制性因素,同时会在失控情况下提供
+保护。
一个wq的活动工作项的数量通常由wq的用户来调节,更具体地说,是由用
户在同一时间可以排列多少个工作项来调节。除非有特定的需求来控制活动
工作项的数量,否则建议指定 为"0"。
-一些用户依赖于ST wq的严格执行顺序。 ``@max_active`` 为1和 ``WQ_UNBOUND``
-的组合用来实现这种行为。这种wq上的工作项目总是被排到未绑定的工作池
-中,并且在任何时候都只有一个工作项目处于活动状态,从而实现与ST wq相
-同的排序属性。
-
-在目前的实现中,上述配置只保证了特定NUMA节点内的ST行为。相反,
-``alloc_ordered_workqueue()`` 应该被用来实现全系统的ST行为。
+一些用户依赖于任意时刻最多只有一个工作项被执行,且各工作项被按队列中
+顺序处理带来的严格执行顺序。``@max_active`` 为1和 ``WQ_UNBOUND``
+的组合曾被用来实现这种行为,现在不用了。请使用
+``alloc_ordered_workqueue()`` 。
执行场景示例
@@ -285,7 +299,7 @@ And with cmwq with ``@max_active`` >= 3, ::
* 除非有特殊需要,建议使用0作为@max_active。在大多数使用情
况下,并发水平通常保持在默认限制之下。
-* 一个wq作为前进进度保证(WQ_MEM_RECLAIM,冲洗(flush)和工
+* 一个wq作为前进进度保证,``WQ_MEM_RECLAIM`` ,冲洗(flush)和工
作项属性的域。不涉及内存回收的工作项,不需要作为工作项组的一
部分被刷新,也不需要任何特殊属性,可以使用系统中的一个wq。使
用专用wq和系统wq在执行特性上没有区别。
@@ -294,6 +308,337 @@ And with cmwq with ``@max_active`` >= 3, ::
益的,因为wq操作和工作项执行中的定位水平提高了。
+亲和性作用域
+============
+
+一个非绑定工作队列根据其亲和性作用域来对CPU进行分组以提高缓存
+局部性。比如如果一个工作队列使用默认的“cache”亲和性作用域,
+它将根据最后一级缓存的边界来分组处理器。这个工作队列上的工作项
+将被分配给一个与发起CPU共用最后级缓存的处理器上的工作者。根据
+``affinity_strict`` 的设置,工作者在启动后可能被允许移出
+所在作用域,也可能不被允许。
+
+工作队列目前支持以下亲和性作用域。
+
+``default``
+ 使用模块参数 ``workqueue.default_affinity_scope`` 指定
+ 的作用域,该参数总是会被设为以下作用域中的一个。
+
+``cpu``
+ CPU不被分组。一个CPU上发起的工作项会被同一CPU上的工作者执行。
+ 这使非绑定工作队列表现得像是不含并发管理的每CPU工作队列。
+
+``smt``
+ CPU被按SMT边界分组。这通常意味着每个物理CPU核上的各逻辑CPU会
+ 被分进同一组。
+
+``cache``
+ CPU被按缓存边界分组。采用哪个缓存边界由架构代码决定。很多情况
+ 下会使用L3。这是默认的亲和性作用域。
+
+``numa``
+ CPU被按NUMA边界分组。
+
+``system``
+ 所有CPU被放在同一组。工作队列不尝试在临近发起CPU的CPU上运行
+ 工作项。
+
+默认的亲和性作用域可以被模块参数 ``workqueue.default_affinity_scope``
+修改,特定工作队列的亲和性作用域可以通过 ``apply_workqueue_attrs()``
+被更改。
+
+如果设置了 ``WQ_SYSFS`` ,工作队列会在它的 ``/sys/devices/virtual/workqueue/WQ_NAME/``
+目录中有以下亲和性作用域相关的接口文件。
+
+``affinity_scope``
+ 读操作以查看当前的亲和性作用域。写操作用于更改设置。
+
+ 当前作用域是默认值时,当前生效的作用域也可以被从这个文件中
+ 读到(小括号内),例如 ``default (cache)`` 。
+
+``affinity_strict``
+ 默认值0表明亲和性作用域不是严格的。当一个工作项开始执行时,
+ 工作队列尽量尝试使工作者处于亲和性作用域内,称为遣返。启动后,
+ 调度器可以自由地将工作者调度到系统中任意它认为合适的地方去。
+ 这使得在保留使用其他CPU(如果必需且有可用)能力的同时,
+ 还能从作用域局部性上获益。
+
+ 如果设置为1,作用域内的所有工作者将被保证总是处于作用域内。
+ 这在跨亲和性作用域会导致如功耗、负载隔离等方面的潜在影响时
+ 会有用。严格的NUMA作用域也可用于和旧版内核中工作队列的行为
+ 保持一致。
+
+
+亲和性作用域与性能
+==================
+
+如果非绑定工作队列的行为对绝大多数使用场景来说都是最优的,
+不需要更多调节,就完美了。很不幸,在当前内核中,重度使用
+工作队列时,需要在局部性和利用率间显式地作一个明显的权衡。
+
+更高的局部性带来更高效率,也就是相同数量的CPU周期内可以做
+更多工作。然而,如果发起者没能将工作项充分地分散在亲和性
+作用域间,更高的局部性也可能带来更低的整体系统利用率。以下
+dm-crypt 的性能测试清楚地阐明了这一取舍。
+
+测试运行在一个12核24线程、4个L3缓存的处理器(AMD Ryzen
+9 3900x)上。为保持一致性,关闭CPU超频。 ``/dev/dm-0``
+是NVME SSD(三星 990 PRO)上创建,用 ``cryptsetup``
+以默认配置打开的一个 dm-crypt 设备。
+
+
+场景 1: 机器上遍布着有充足的发起者和工作量
+------------------------------------------
+
+使用命令:::
+
+ $ fio --filename=/dev/dm-0 --direct=1 --rw=randrw --bs=32k --ioengine=libaio \
+ --iodepth=64 --runtime=60 --numjobs=24 --time_based --group_reporting \
+ --name=iops-test-job --verify=sha512
+
+这里有24个发起者,每个同时发起64个IO。 ``--verify=sha512``
+使得 ``fio`` 每次生成和读回内容受发起者和 ``kcryptd``
+间的执行局部性影响。下面是基于不同 ``kcryptd`` 的亲和性
+作用域设置,各经过五次测试得到的读取带宽和CPU利用率数据。
+
+.. list-table::
+ :widths: 16 20 20
+ :header-rows: 1
+
+ * - 亲和性
+ - 带宽 (MiBps)
+ - CPU利用率(%)
+
+ * - system
+ - 1159.40 ±1.34
+ - 99.31 ±0.02
+
+ * - cache
+ - 1166.40 ±0.89
+ - 99.34 ±0.01
+
+ * - cache (strict)
+ - 1166.00 ±0.71
+ - 99.35 ±0.01
+
+在系统中分布着足够多发起者的情况下,不论严格与否,“cache”
+没有表现得更差。三种配置均使整个机器达到饱和,但由于提高了
+局部性,缓存相关的两种有0.6%的(带宽)提升。
+
+
+场景 2: 更少发起者,足以达到饱和的工作量
+----------------------------------------
+
+使用命令:::
+
+ $ fio --filename=/dev/dm-0 --direct=1 --rw=randrw --bs=32k \
+ --ioengine=libaio --iodepth=64 --runtime=60 --numjobs=8 \
+ --time_based --group_reporting --name=iops-test-job --verify=sha512
+
+与上一个场景唯一的区别是 ``--numjobs=8``。 发起者数量
+减少为三分之一,但仍然有足以使系统达到饱和的工作总量。
+
+.. list-table::
+ :widths: 16 20 20
+ :header-rows: 1
+
+ * - 亲和性
+ - 带宽 (MiBps)
+ - CPU利用率(%)
+
+ * - system
+ - 1155.40 ±0.89
+ - 97.41 ±0.05
+
+ * - cache
+ - 1154.40 ±1.14
+ - 96.15 ±0.09
+
+ * - cache (strict)
+ - 1112.00 ±4.64
+ - 93.26 ±0.35
+
+这里有超过使系统达到饱和所需的工作量。“system”和“cache”
+都接近但并未使机器完全饱和。“cache”消耗更少的CPU但更高的
+效率使其得到和“system”相同的带宽。
+
+八个发起者盘桓在四个L3缓存作用域间仍然允许“cache (strict)”
+几乎使机器饱和,但缺少对工作的保持(不移到空闲处理器上)
+开始带来3.7%的带宽损失。
+
+
+场景 3: 更少发起者,不充足的工作量
+----------------------------------
+
+使用命令:::
+
+ $ fio --filename=/dev/dm-0 --direct=1 --rw=randrw --bs=32k \
+ --ioengine=libaio --iodepth=64 --runtime=60 --numjobs=4 \
+ --time_based --group_reporting --name=iops-test-job --verify=sha512
+
+再次,唯一的区别是 ``--numjobs=4``。由于发起者减少到四个,
+现在没有足以使系统饱和的工作量,带宽变得依赖于完成时延。
+
+.. list-table::
+ :widths: 16 20 20
+ :header-rows: 1
+
+ * - 亲和性
+ - 带宽 (MiBps)
+ - CPU利用率(%)
+
+ * - system
+ - 993.60 ±1.82
+ - 75.49 ±0.06
+
+ * - cache
+ - 973.40 ±1.52
+ - 74.90 ±0.07
+
+ * - cache (strict)
+ - 828.20 ±4.49
+ - 66.84 ±0.29
+
+现在,局部性和利用率间的权衡更清晰了。“cache”展示出相比
+“system”2%的带宽损失,而“cache (strict)”跌到20%。
+
+
+结论和建议
+----------
+
+在以上试验中,虽然一致并且也明显,但“cache”亲和性作用域
+相比“system”的性能优势并不大。然而,这影响是依赖于作用域
+间距离的,在更复杂的处理器拓扑下可能有更明显的影响。
+
+虽然这些情形下缺少工作保持是有坏处的,但比“cache (strict)”
+好多了,而且最大化工作队列利用率的需求也并不常见。因此,
+“cache”是非绑定池的默认亲和性作用域。
+
+* 由于不存在一个适用于大多数场景的选择,对于可能需要消耗
+ 大量CPU的工作队列,建议通过 ``apply_workqueue_attrs()``
+ 进行(专门)配置,并考虑是否启用 ``WQ_SYSFS``。
+
+* 设置了严格“cpu”亲和性作用域的非绑定工作队列,它的行为与
+ ``WQ_CPU_INTENSIVE`` 每CPU工作队列一样。后者没有真正
+ 优势,而前者提供了大幅度的灵活性。
+
+* 亲和性作用域是从Linux v6.5起引入的。为了模拟旧版行为,
+ 可以使用严格的“numa”亲和性作用域。
+
+* 不严格的亲和性作用域中,缺少工作保持大概缘于调度器。内核
+ 为什么没能维护好大多数场景下的工作保持,把事情作对,还没有
+ 理论上的解释。因此,未来调度器的改进可能会使我们不再需要
+ 这些调节项。
+
+
+检查配置
+========
+
+使用 tools/workqueue/wq_dump.py(drgn脚本) 来检查未
+绑定CPU的亲和性配置,工作者池,以及工作队列如何映射到池上: ::
+
+ $ tools/workqueue/wq_dump.py
+ Affinity Scopes
+ ===============
+ wq_unbound_cpumask=0000000f
+
+ CPU
+ nr_pods 4
+ pod_cpus [0]=00000001 [1]=00000002 [2]=00000004 [3]=00000008
+ pod_node [0]=0 [1]=0 [2]=1 [3]=1
+ cpu_pod [0]=0 [1]=1 [2]=2 [3]=3
+
+ SMT
+ nr_pods 4
+ pod_cpus [0]=00000001 [1]=00000002 [2]=00000004 [3]=00000008
+ pod_node [0]=0 [1]=0 [2]=1 [3]=1
+ cpu_pod [0]=0 [1]=1 [2]=2 [3]=3
+
+ CACHE (default)
+ nr_pods 2
+ pod_cpus [0]=00000003 [1]=0000000c
+ pod_node [0]=0 [1]=1
+ cpu_pod [0]=0 [1]=0 [2]=1 [3]=1
+
+ NUMA
+ nr_pods 2
+ pod_cpus [0]=00000003 [1]=0000000c
+ pod_node [0]=0 [1]=1
+ cpu_pod [0]=0 [1]=0 [2]=1 [3]=1
+
+ SYSTEM
+ nr_pods 1
+ pod_cpus [0]=0000000f
+ pod_node [0]=-1
+ cpu_pod [0]=0 [1]=0 [2]=0 [3]=0
+
+ Worker Pools
+ ============
+ pool[00] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 0
+ pool[01] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 0
+ pool[02] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 1
+ pool[03] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 1
+ pool[04] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 2
+ pool[05] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 2
+ pool[06] ref= 1 nice= 0 idle/workers= 3/ 3 cpu= 3
+ pool[07] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 3
+ pool[08] ref=42 nice= 0 idle/workers= 6/ 6 cpus=0000000f
+ pool[09] ref=28 nice= 0 idle/workers= 3/ 3 cpus=00000003
+ pool[10] ref=28 nice= 0 idle/workers= 17/ 17 cpus=0000000c
+ pool[11] ref= 1 nice=-20 idle/workers= 1/ 1 cpus=0000000f
+ pool[12] ref= 2 nice=-20 idle/workers= 1/ 1 cpus=00000003
+ pool[13] ref= 2 nice=-20 idle/workers= 1/ 1 cpus=0000000c
+
+ Workqueue CPU -> pool
+ =====================
+ [ workqueue \ CPU 0 1 2 3 dfl]
+ events percpu 0 2 4 6
+ events_highpri percpu 1 3 5 7
+ events_long percpu 0 2 4 6
+ events_unbound unbound 9 9 10 10 8
+ events_freezable percpu 0 2 4 6
+ events_power_efficient percpu 0 2 4 6
+ events_freezable_power_ percpu 0 2 4 6
+ rcu_gp percpu 0 2 4 6
+ rcu_par_gp percpu 0 2 4 6
+ slub_flushwq percpu 0 2 4 6
+ netns ordered 8 8 8 8 8
+ ...
+
+参见命令的帮助消息以获取更多信息。
+
+
+监视
+====
+
+使用 tools/workqueue/wq_monitor.py 来监视工作队列的运行: ::
+
+ $ tools/workqueue/wq_monitor.py events
+ total infl CPUtime CPUhog CMW/RPR mayday rescued
+ events 18545 0 6.1 0 5 - -
+ events_highpri 8 0 0.0 0 0 - -
+ events_long 3 0 0.0 0 0 - -
+ events_unbound 38306 0 0.1 - 7 - -
+ events_freezable 0 0 0.0 0 0 - -
+ events_power_efficient 29598 0 0.2 0 0 - -
+ events_freezable_power_ 10 0 0.0 0 0 - -
+ sock_diag_events 0 0 0.0 0 0 - -
+
+ total infl CPUtime CPUhog CMW/RPR mayday rescued
+ events 18548 0 6.1 0 5 - -
+ events_highpri 8 0 0.0 0 0 - -
+ events_long 3 0 0.0 0 0 - -
+ events_unbound 38322 0 0.1 - 7 - -
+ events_freezable 0 0 0.0 0 0 - -
+ events_power_efficient 29603 0 0.2 0 0 - -
+ events_freezable_power_ 10 0 0.0 0 0 - -
+ sock_diag_events 0 0 0.0 0 0 - -
+
+ ...
+
+参见命令的帮助消息以获取更多信息。
+
+
调试
====
@@ -330,7 +675,6 @@ And with cmwq with ``@max_active`` >= 3, ::
工作队列保证,如果在工作项排队后满足以下条件,则工作项不能重入:
-
1. 工作函数没有被改变。
2. 没有人将该工作项排到另一个工作队列中。
3. 该工作项尚未被重新启动。
diff --git a/Documentation/translations/zh_CN/dev-tools/index.rst b/Documentation/translations/zh_CN/dev-tools/index.rst
index c2db3e566b1b..fa900f5beb68 100644
--- a/Documentation/translations/zh_CN/dev-tools/index.rst
+++ b/Documentation/translations/zh_CN/dev-tools/index.rst
@@ -22,14 +22,14 @@ Documentation/translations/zh_CN/dev-tools/testing-overview.rst
sparse
gcov
kasan
+ kcov
+ ubsan
+ kmemleak
gdb-kernel-debugging
Todolist:
- coccinelle
- - kcov
- - ubsan
- - kmemleak
- kcsan
- kfence
- kgdb
diff --git a/Documentation/translations/zh_CN/dev-tools/kcov.rst b/Documentation/translations/zh_CN/dev-tools/kcov.rst
new file mode 100644
index 000000000000..629154df7121
--- /dev/null
+++ b/Documentation/translations/zh_CN/dev-tools/kcov.rst
@@ -0,0 +1,359 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/dev-tools/kcov.rst
+:Translator: 刘浩阳 Haoyang Liu <tttturtleruss@hust.edu.cn>
+
+KCOV: 用于模糊测试的代码覆盖率
+==============================
+
+KCOV 以一种适用于覆盖率引导的模糊测试的形式收集和暴露内核代码覆盖率信息。
+一个正在运行的内核的覆盖率数据可以通过 ``kcov`` 调试文件导出。覆盖率的收集是基
+于任务启用的,因此 KCOV 可以精确捕获单个系统调用的覆盖率。
+
+要注意的是 KCOV 不是为了收集尽可能多的覆盖率数据。而是为了收集相对稳定的覆盖率
+,这是系统调用输入的函数。为了完成这个目标,它不收集软硬中断的覆盖率(除非移除
+覆盖率收集被启用,见下文)以及内核中固有的不确定部分的覆盖率(如调度器,锁定)
+
+除了收集代码覆盖率,KCOV 还收集操作数比较的覆盖率。见 "操作数比较收集" 一节
+查看详细信息。
+
+除了从系统调用处理器收集覆盖率数据,KCOV 还从后台内核或软中断任务中执行的内核
+被标注的部分收集覆盖率。见 "远程覆盖率收集" 一节查看详细信息。
+
+先决条件
+--------
+
+KCOV 依赖编译器插桩,要求 GCC 6.1.0 及更高版本或者内核支持的任意版本的 Clang。
+
+收集操作数比较的覆盖率需要 GCC 8+ 或者 Clang。
+
+为了启用 KCOV,需要使用如下参数配置内核::
+
+ CONFIG_KCOV=y
+
+为了启用操作数比较覆盖率的收集,使用如下参数::
+
+ CONFIG_KCOV_ENABLE_COMPARISONS=y
+
+覆盖率数据只会在调试文件系统被挂载后才可以获取::
+
+ mount -t debugfs none /sys/kernel/debug
+
+覆盖率收集
+----------
+
+下面的程序演示了如何使用 KCOV 在一个测试程序中收集单个系统调用的覆盖率:
+
+.. code-block:: c
+
+ #include <stdio.h>
+ #include <stddef.h>
+ #include <stdint.h>
+ #include <stdlib.h>
+ #include <sys/types.h>
+ #include <sys/stat.h>
+ #include <sys/ioctl.h>
+ #include <sys/mman.h>
+ #include <unistd.h>
+ #include <fcntl.h>
+ #include <linux/types.h>
+
+ #define KCOV_INIT_TRACE _IOR('c', 1, unsigned long)
+ #define KCOV_ENABLE _IO('c', 100)
+ #define KCOV_DISABLE _IO('c', 101)
+ #define COVER_SIZE (64<<10)
+
+ #define KCOV_TRACE_PC 0
+ #define KCOV_TRACE_CMP 1
+
+ int main(int argc, char **argv)
+ {
+ int fd;
+ unsigned long *cover, n, i;
+
+ /* 单个文件描述符允许
+ * 在单线程上收集覆盖率。
+ */
+ fd = open("/sys/kernel/debug/kcov", O_RDWR);
+ if (fd == -1)
+ perror("open"), exit(1);
+ /* 设置跟踪模式和跟踪大小。 */
+ if (ioctl(fd, KCOV_INIT_TRACE, COVER_SIZE))
+ perror("ioctl"), exit(1);
+ /* 映射内核空间和用户空间共享的缓冲区。 */
+ cover = (unsigned long*)mmap(NULL, COVER_SIZE * sizeof(unsigned long),
+ PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if ((void*)cover == MAP_FAILED)
+ perror("mmap"), exit(1);
+ /* 在当前线程中启用覆盖率收集。 */
+ if (ioctl(fd, KCOV_ENABLE, KCOV_TRACE_PC))
+ perror("ioctl"), exit(1);
+ /* 在调用 ioctl() 之后重置覆盖率。 */
+ __atomic_store_n(&cover[0], 0, __ATOMIC_RELAXED);
+ /* 调用目标系统调用。 */
+ read(-1, NULL, 0);
+ /* 读取收集到的 PC 的数目。 */
+ n = __atomic_load_n(&cover[0], __ATOMIC_RELAXED);
+ for (i = 0; i < n; i++)
+ printf("0x%lx\n", cover[i + 1]);
+ /* 在当前线程上禁用覆盖率收集。在这之后
+ * 可以在其他线程上收集覆盖率
+ */
+ if (ioctl(fd, KCOV_DISABLE, 0))
+ perror("ioctl"), exit(1);
+ /* 释放资源 */
+ if (munmap(cover, COVER_SIZE * sizeof(unsigned long)))
+ perror("munmap"), exit(1);
+ if (close(fd))
+ perror("close"), exit(1);
+ return 0;
+ }
+
+在使用 ``addr2line`` 传输后,程序输出应该如下所示::
+
+ SyS_read
+ fs/read_write.c:562
+ __fdget_pos
+ fs/file.c:774
+ __fget_light
+ fs/file.c:746
+ __fget_light
+ fs/file.c:750
+ __fget_light
+ fs/file.c:760
+ __fdget_pos
+ fs/file.c:784
+ SyS_read
+ fs/read_write.c:562
+
+如果一个程序需要从多个线程收集覆盖率(独立地)。那么每个线程都需要单独打开
+``/sys/kernel/debug/kcov``。
+
+接口的细粒度允许高效的创建测试进程。即,一个父进程打开了
+``/sys/kernel/debug/kcov``,启用了追踪模式,映射了覆盖率缓冲区,然后在一个循
+环中创建了子进程。这个子进程只需要启用覆盖率收集即可(当一个线程退出时将自动禁
+用覆盖率收集)。
+
+操作数比较收集
+--------------
+
+操作数比较收集和覆盖率收集类似:
+
+.. code-block:: c
+
+ /* 包含和上文一样的头文件和宏定义。 */
+
+ /* 每次记录的 64 位字的数量。 */
+ #define KCOV_WORDS_PER_CMP 4
+
+ /*
+ * 收集的比较种类的格式。
+ *
+ * 0 比特表示是否是一个编译时常量。
+ * 1 & 2 比特包含参数大小的 log2 值,最大 8 字节。
+ */
+
+ #define KCOV_CMP_CONST (1 << 0)
+ #define KCOV_CMP_SIZE(n) ((n) << 1)
+ #define KCOV_CMP_MASK KCOV_CMP_SIZE(3)
+
+ int main(int argc, char **argv)
+ {
+ int fd;
+ uint64_t *cover, type, arg1, arg2, is_const, size;
+ unsigned long n, i;
+
+ fd = open("/sys/kernel/debug/kcov", O_RDWR);
+ if (fd == -1)
+ perror("open"), exit(1);
+ if (ioctl(fd, KCOV_INIT_TRACE, COVER_SIZE))
+ perror("ioctl"), exit(1);
+ /*
+ * 注意缓冲区指针的类型是 uint64_t*,因为所有的
+ * 比较操作数都被提升为 uint64_t 类型。
+ */
+ cover = (uint64_t *)mmap(NULL, COVER_SIZE * sizeof(unsigned long),
+ PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if ((void*)cover == MAP_FAILED)
+ perror("mmap"), exit(1);
+ /* 注意这里是 KCOV_TRACE_CMP 而不是 KCOV_TRACE_PC。 */
+ if (ioctl(fd, KCOV_ENABLE, KCOV_TRACE_CMP))
+ perror("ioctl"), exit(1);
+ __atomic_store_n(&cover[0], 0, __ATOMIC_RELAXED);
+ read(-1, NULL, 0);
+ /* 读取收集到的比较操作数的数量。 */
+ n = __atomic_load_n(&cover[0], __ATOMIC_RELAXED);
+ for (i = 0; i < n; i++) {
+ uint64_t ip;
+
+ type = cover[i * KCOV_WORDS_PER_CMP + 1];
+ /* arg1 和 arg2 - 比较的两个操作数。 */
+ arg1 = cover[i * KCOV_WORDS_PER_CMP + 2];
+ arg2 = cover[i * KCOV_WORDS_PER_CMP + 3];
+ /* ip - 调用者的地址。 */
+ ip = cover[i * KCOV_WORDS_PER_CMP + 4];
+ /* 操作数的大小。 */
+ size = 1 << ((type & KCOV_CMP_MASK) >> 1);
+ /* is_const - 当操作数是一个编译时常量时为真。*/
+ is_const = type & KCOV_CMP_CONST;
+ printf("ip: 0x%lx type: 0x%lx, arg1: 0x%lx, arg2: 0x%lx, "
+ "size: %lu, %s\n",
+ ip, type, arg1, arg2, size,
+ is_const ? "const" : "non-const");
+ }
+ if (ioctl(fd, KCOV_DISABLE, 0))
+ perror("ioctl"), exit(1);
+ /* 释放资源。 */
+ if (munmap(cover, COVER_SIZE * sizeof(unsigned long)))
+ perror("munmap"), exit(1);
+ if (close(fd))
+ perror("close"), exit(1);
+ return 0;
+ }
+
+注意 KCOV 的模式(代码覆盖率收集或操作数比较收集)是互斥的。
+
+远程覆盖率收集
+--------------
+
+除了从用户空间进程发布的系统调用句柄收集覆盖率数据以外,KCOV 也可以从部分在其
+他上下文中执行的内核中收集覆盖率 - 称为“远程”覆盖率。
+
+使用 KCOV 收集远程覆盖率要求:
+
+1. 修改内核源码并使用 ``kcov_remote_start`` 和 ``kcov_remote_stop`` 来标注要收集
+ 覆盖率的代码片段。
+
+2. 在用户空间的收集覆盖率的进程应使用 ``KCOV_REMOTE_ENABLE`` 而不是 ``KCOV_ENABLE``。
+
+``kcov_remote_start`` 和 ``kcov_remote_stop`` 的标注以及 ``KCOV_REMOTE_ENABLE``
+ioctl 都接受可以识别特定覆盖率收集片段的句柄。句柄的使用方式取决于匹配代码片段执
+行的上下文。
+
+KCOV 支持在如下上下文中收集远程覆盖率:
+
+1. 全局内核后台任务。这些任务是内核启动时创建的数量有限的实例(如,每一个
+ USB HCD 产生一个 USB ``hub_event`` 工作器)。
+
+2. 局部内核后台任务。这些任务通常是由于用户空间进程与某些内核接口进行交互时产
+ 生的,并且通常在进程退出时会被停止(如,vhost 工作器)。
+
+3. 软中断。
+
+对于 #1 和 #3,必须选择一个独特的全局句柄并将其传递给对应的
+``kcov_remote_start`` 调用。一个用户空间进程必须将该句柄存储在
+``kcov_remote_arg`` 结构体的 ``handle`` 数组字段中并将其传递给
+``KCOV_REMOTE_ENABLE``。这会将使用的 KCOV 设备附加到由此句柄引用的代码片段。多个全局
+句柄标识的不同代码片段可以一次性传递。
+
+对于 #2,用户空间进程必须通过 ``kcov_remote_arg`` 结构体的 ``common_handle`` 字段
+传递一个非零句柄。这个通用句柄将会被保存在当前 ``task_struct`` 结构体的
+``kcov_handle`` 字段中并且需要通过自定义内核代码的修改来传递给新创建的本地任务
+。这些任务需要在 ``kcov_remote_start`` 和 ``kcov_remote_stop`` 标注中依次使用传递过来的
+句柄。
+
+KCOV 对全局句柄和通用句柄均遵循一个预定义的格式。每一个句柄都是一个 ``u64`` 整形
+。当前,只有最高位和低四位字节被使用。第 4-7 字节是保留位并且值必须为 0。
+
+对于全局句柄,最高位的字节表示该句柄属于的子系统的标识。比如,KCOV 使用 ``1``
+表示 USB 子系统类型。全局句柄的低 4 字节表示子系统中任务实例的标识。比如,每一
+个 ``hub_event`` 工作器使用 USB 总线号作为任务实例的标识。
+
+对于通用句柄,使用一个保留值 ``0`` 作为子系统标识,因为这些句柄不属于一个特定
+的子系统。通用句柄的低 4 字节用于识别有用户进程生成的所有本地句柄的集合实例,
+该进程将通用句柄传递给 ``KCOV_REMOTE_ENABLE``。
+
+实际上,如果只从系统中的单个用户空间进程收集覆盖率,那么可以使用任意值作为通用
+句柄的实例标识。然而,如果通用句柄被多个用户空间进程使用,每个进程必须使用唯一
+的实例标识。一个选择是使用进程标识作为通用句柄实例的标识。
+
+下面的程序演示了如何使用 KCOV 从一个由进程产生的本地任务和处理 USB 总线的全局
+任务 #1 收集覆盖率:
+
+.. code-block:: c
+
+ /* 包含和上文一样的头文件和宏定义。 */
+
+ struct kcov_remote_arg {
+ __u32 trace_mode;
+ __u32 area_size;
+ __u32 num_handles;
+ __aligned_u64 common_handle;
+ __aligned_u64 handles[0];
+ };
+
+ #define KCOV_INIT_TRACE _IOR('c', 1, unsigned long)
+ #define KCOV_DISABLE _IO('c', 101)
+ #define KCOV_REMOTE_ENABLE _IOW('c', 102, struct kcov_remote_arg)
+
+ #define COVER_SIZE (64 << 10)
+
+ #define KCOV_TRACE_PC 0
+
+ #define KCOV_SUBSYSTEM_COMMON (0x00ull << 56)
+ #define KCOV_SUBSYSTEM_USB (0x01ull << 56)
+
+ #define KCOV_SUBSYSTEM_MASK (0xffull << 56)
+ #define KCOV_INSTANCE_MASK (0xffffffffull)
+
+ static inline __u64 kcov_remote_handle(__u64 subsys, __u64 inst)
+ {
+ if (subsys & ~KCOV_SUBSYSTEM_MASK || inst & ~KCOV_INSTANCE_MASK)
+ return 0;
+ return subsys | inst;
+ }
+
+ #define KCOV_COMMON_ID 0x42
+ #define KCOV_USB_BUS_NUM 1
+
+ int main(int argc, char **argv)
+ {
+ int fd;
+ unsigned long *cover, n, i;
+ struct kcov_remote_arg *arg;
+
+ fd = open("/sys/kernel/debug/kcov", O_RDWR);
+ if (fd == -1)
+ perror("open"), exit(1);
+ if (ioctl(fd, KCOV_INIT_TRACE, COVER_SIZE))
+ perror("ioctl"), exit(1);
+ cover = (unsigned long*)mmap(NULL, COVER_SIZE * sizeof(unsigned long),
+ PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if ((void*)cover == MAP_FAILED)
+ perror("mmap"), exit(1);
+
+ /* 通过通用句柄和 USB 总线 #1 启用代码覆盖率收集。 */
+ arg = calloc(1, sizeof(*arg) + sizeof(uint64_t));
+ if (!arg)
+ perror("calloc"), exit(1);
+ arg->trace_mode = KCOV_TRACE_PC;
+ arg->area_size = COVER_SIZE;
+ arg->num_handles = 1;
+ arg->common_handle = kcov_remote_handle(KCOV_SUBSYSTEM_COMMON,
+ KCOV_COMMON_ID);
+ arg->handles[0] = kcov_remote_handle(KCOV_SUBSYSTEM_USB,
+ KCOV_USB_BUS_NUM);
+ if (ioctl(fd, KCOV_REMOTE_ENABLE, arg))
+ perror("ioctl"), free(arg), exit(1);
+ free(arg);
+
+ /*
+ * 在这里用户需要触发执行一个内核代码段
+ * 该代码段要么使用通用句柄标识
+ * 要么触发了一些 USB 总线 #1 上的一些活动。
+ */
+ sleep(2);
+
+ n = __atomic_load_n(&cover[0], __ATOMIC_RELAXED);
+ for (i = 0; i < n; i++)
+ printf("0x%lx\n", cover[i + 1]);
+ if (ioctl(fd, KCOV_DISABLE, 0))
+ perror("ioctl"), exit(1);
+ if (munmap(cover, COVER_SIZE * sizeof(unsigned long)))
+ perror("munmap"), exit(1);
+ if (close(fd))
+ perror("close"), exit(1);
+ return 0;
+ }
diff --git a/Documentation/translations/zh_CN/dev-tools/kmemleak.rst b/Documentation/translations/zh_CN/dev-tools/kmemleak.rst
new file mode 100644
index 000000000000..d248c8428095
--- /dev/null
+++ b/Documentation/translations/zh_CN/dev-tools/kmemleak.rst
@@ -0,0 +1,229 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/dev-tools/kmemleak.rst
+:Translator: 刘浩阳 Haoyang Liu <tttturtleruss@hust.edu.cn>
+
+内核内存泄露检测器
+==================
+
+Kmemleak 提供了一个类似 `可追踪的垃圾收集器 <https://en.wikipedia.org/wiki/Tra
+cing_garbage_collection>`_ 的方法来检测可能的内核内存泄漏,不同的是孤立对象不会
+被释放,而是仅通过 /sys/kernel/debug/kmemleak 报告。Valgrind 工具
+(``memcheck --leak-check``)使用了一种相似的方法来检测用户空间应用中的内存泄
+露。
+
+用法
+----
+
+"Kernel hacking" 中的 CONFIG_DEBUG_KMEMLEAK 必须被启用。一个内核线程每10分钟
+(默认情况下)扫描一次内存,并且打印出新发现的未被引用的对象个数。
+如果 ``debugfs`` 没有挂载,则执行::
+
+ # mount -t debugfs nodev /sys/kernel/debug/
+
+显示所有扫描出的可能的内存泄漏的细节信息::
+
+ # cat /sys/kernel/debug/kmemleak
+
+启动一次中等程度的内存扫描::
+
+ # echo scan > /sys/kernel/debug/kmemleak
+
+清空当前所有可能的内存泄露列表::
+
+ # echo clear > /sys/kernel/debug/kmemleak
+
+当再次读取 ``/sys/kernel/debug/kmemleak`` 文件时,将会输出自上次扫描以来检测到的
+新的内存泄露。
+
+注意,孤立目标是通过被分配时间来排序的,列表开始的对象可能会导致后续的对象都被
+识别为孤立对象。
+
+可以通过写入 ``/sys/kernel/debug/kmemleak`` 文件在运行时修改内存扫描参数。下面是
+支持的参数:
+
+
+* off
+ 禁用 kmemleak(不可逆)
+* stack=on
+ 开启任务栈扫描(默认)
+* stack=off
+ 禁用任务栈扫描
+* scan=on
+ 开启自动内存扫描线程(默认)
+* scan=off
+ 关闭自动内存扫描线程
+* scan=<secs>;
+ 设定自动内存扫描间隔,以秒为单位(默认值为 600,设置为 0 表示停
+ 止自动扫描)
+* scan
+ 触发一次内存扫描
+* clear
+ 通过标记所有当前已报告的未被引用对象为灰,从而清空当前可能的内存泄露列
+ 表;如果 kmemleak 被禁用,则释放所有 kmemleak 对象,。
+* dump=<addr>
+ 输出存储在 <addr> 中的对象信息
+
+可以通过在内核命令行中传递 ``kmemleak=off`` 参数从而在启动时禁用 Kmemleak。
+
+在 kmemleak 初始化之前就可能会有内存分配或释放,这些操作被存储在一个早期日志缓
+冲区中。缓冲区的大小通过 CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE 选项配置。
+
+如果 CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF 被启用,则 kmemleak 默认被禁用。在内核命
+令行中传递 ``kmemleak=on`` 参数来开启这个功能。
+
+如果出现 "Error while writing to stdout" 或 "write_loop: Invalid argument" 这样
+的错误,请确认 kmemleak 被正确启用。
+
+基础算法
+--------
+
+通过 :c:func:`kmalloc`, :c:func:`vmalloc`, :c:func:`kmem_cache_alloc` 以及同类
+函数均被跟踪,指针,包括一些额外的信息如大小和栈追踪等,都被存储在红黑树中。
+对应的释放函数调用也被追踪,并从 kmemleak 数据结构中移除相应指针。
+
+对于一个已分配的内存块,如果通过扫描内存(包括保存寄存器)没有发现任何指针指向
+它的起始地址或者其中的任何位置,则认为这块内存是孤立的。这意味着内核无法将该内
+存块的地址传递给一个释放内存函数,这块内存便被认为泄露了。
+
+扫描算法步骤:
+
+ 1. 标记所有对象为白色(最后剩下的白色对象被认为是孤立的)
+ 2. 从数据节和栈开始扫描内存,检测每个值是否是红黑树中存储的地址。如果一个指向
+ 白色对象的指针被检测到,则将该对象标记为灰色。
+ 3. 扫描灰色对象引用的其他对象(有些白色对象可能会变为灰色并被添加到灰名单末尾
+ )直到灰名单为空。
+ 4. 剩余的白色对象就被认为是孤立的并通过 /sys/kernel/debug/kmemleak 报告。
+
+有些指向已分配的内存块的指针存储在内核内部的数据结构中,它们不能被检测为孤立。
+为了避免这种情况,kmemleak 也存储了指向需要被查找的内存块范围内的任意地址的地址
+数量,如此一来这些内存便不会被认为泄露。一个例子是 __vmalloc()。
+
+用 kmemleak 测试特定部分
+------------------------
+
+在初始化启动阶段 /sys/kernel/debug/kmemleak 的输出可能会很多,这也可能是你在开发
+时编写的漏洞百出的代码导致的。为了解决这种情况你可以使用 'clear' 命令来清除
+/sys/kernel/debug/kmemleak 输出的所有的未引用对象。在执行 'clear' 后执行 'scan'
+可以发现新的未引用对象,这将会有利你测试代码的特定部分。
+
+为了用一个空的 kmemleak 测试一个特定部分,执行::
+
+ # echo clear > /sys/kernel/debug/kmemleak
+ ... 测试你的内核或者模块 ...
+ # echo scan > /sys/kernel/debug/kmemleak
+
+然后像平常一样获得报告::
+
+ # cat /sys/kernel/debug/kmemleak
+
+释放 kmemleak 内核对象
+----------------------
+
+为了允许访问先前发现的内存泄露,当用户禁用或发生致命错误导致 kmemleak
+被禁用时,内核中的 kmemleak 对象不会被释放。这些对象可能会占用很大
+一部分物理内存。
+
+在这种情况下,你可以用如下命令回收这些内存::
+
+ # echo clear > /sys/kernel/debug/kmemleak
+
+Kmemleak API
+------------
+
+在 include/linux/kmemleak.h 头文件中查看函数原型:
+
+- ``kmemleak_init`` - 初始化 kmemleak
+- ``kmemleak_alloc`` - 通知一个内存块的分配
+- ``kmemleak_alloc_percpu`` - 通知一个 percpu 类型的内存分配
+- ``kmemleak_vmalloc`` - 通知一个使用 vmalloc() 的内存分配
+- ``kmemleak_free`` - 通知一个内存块的释放
+- ``kmemleak_free_part`` - 通知一个部分的内存释放
+- ``kmemleak_free_percpu`` - 通知一个 percpu 类型的内存释放
+- ``kmemleak_update_trace`` - 更新分配对象过程的栈追踪
+- ``kmemleak_not_leak`` - 标记一个对象内存为未泄露的
+- ``kmemleak_ignore`` - 不要扫描或报告某个对象未泄露的
+- ``kmemleak_scan_area`` - 在内存块中添加扫描区域
+- ``kmemleak_no_scan`` - 不扫描某个内存块
+- ``kmemleak_erase`` - 在指针变量中移除某个旧的值
+- ``kmemleak_alloc_recursive`` - 和 kmemleak_alloc 效果相同但会检查是否有递归的
+ 内存分配
+- ``kmemleak_free_recursive`` - 和 kmemleak_free 效果相同但会检查是否有递归的
+ 内存释放
+
+下列函数使用一个物理地址作为对象指针并且只在地址有一个 lowmem 映射时做出相应的
+行为:
+
+- ``kmemleak_alloc_phys``
+- ``kmemleak_free_part_phys``
+- ``kmemleak_ignore_phys``
+
+解决假阳性/假阴性
+-----------------
+
+假阴性是指由于在内存扫描中有值指向该对象导致 kmemleak 没有报告的实际存在的内存
+泄露(孤立对象)。为了减少假阴性的出现次数,kmemleak 提供了 kmemleak_ignore,
+kmemleak_scan_area,kmemleak_no_scan 和 kmemleak_erase 函数(见上)。
+任务栈也会增加假阴性的数量并且默认不开启对它们的扫描。
+
+假阳性是对象被误报为内存泄露(孤立对象)。对于已知未泄露的对象,kmemleak
+提供了 kmemleak_not_leak 函数。同时 kmemleak_ignore 可以用于标记已知不包含任何
+其他指针的内存块,标记后该内存块不会再被扫描。
+
+一些被报告的泄露仅仅是暂时的,尤其是在 SMP(对称多处理)系统中,因为其指针
+暂存在 CPU 寄存器或栈中。Kmemleak 定义了 MSECS_MIN_AGE(默认值为 1000)
+来表示一个被报告为内存泄露的对象的最小存活时间。
+
+限制和缺点
+----------
+
+主要的缺点是内存分配和释放的性能下降。为了避免其他的损失,只有当
+/sys/kernel/debug/kmemleak 文件被读取时才会进行内存扫描。无论如何,这个工具是出于
+调试的目标,性能表现可能不是最重要的。
+
+为了保持算法简单,kmemleak 寻找指向某个内存块范围中的任何值。这可能会引发假阴性
+现象的出现。但是,最后一个真正的内存泄露也会变得明显。
+
+非指针值的数据是假阴性的另一个来源。在将来的版本中,kmemleak 仅仅会扫
+描已分配结构体中的指针成员。这个特性会解决上述很多的假阴性情况。
+
+Kmemleak 会报告假阳性。这可能发生在某些被分配的内存块不需要被释放的情况下
+(某些 init_call 函数中),指针的计算是通过其他方法而不是常规的 container_of 宏
+或是指针被存储在 kmemleak 没有扫描的地方。
+
+页分配和 ioremap 不会被追踪。
+
+使用 kmemleak-test 测试
+-----------------------
+
+为了检测是否成功启用了 kmemleak,你可以使用一个故意制造内存泄露的模块
+kmemleak-test。设置 CONFIG_SAMPLE_KMEMLEAK 为模块(不能作为内建模块使用)
+并且启动启用了 kmemleak 的内核。加载模块并执行一次扫描::
+
+ # modprobe kmemleak-test
+ # echo scan > /sys/kernel/debug/kmemleak
+
+注意你可能无法立刻或在第一次扫描后得到结果。当 kmemleak 得到结果,将会输出日
+志 ``kmemleak: <count of leaks> new suspected memory leaks`` 。然后通过读取文件
+获取信息::
+
+ # cat /sys/kernel/debug/kmemleak
+ unreferenced object 0xffff89862ca702e8 (size 32):
+ comm "modprobe", pid 2088, jiffies 4294680594 (age 375.486s)
+ hex dump (first 32 bytes):
+ 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+ 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b a5 kkkkkkkkkkkkkkk.
+ backtrace:
+ [<00000000e0a73ec7>] 0xffffffffc01d2036
+ [<000000000c5d2a46>] do_one_initcall+0x41/0x1df
+ [<0000000046db7e0a>] do_init_module+0x55/0x200
+ [<00000000542b9814>] load_module+0x203c/0x2480
+ [<00000000c2850256>] __do_sys_finit_module+0xba/0xe0
+ [<000000006564e7ef>] do_syscall_64+0x43/0x110
+ [<000000007c873fa6>] entry_SYSCALL_64_after_hwframe+0x44/0xa9
+ ...
+
+用 ``rmmod kmemleak_test`` 移除模块时也会触发
+kmemleak 的结果输出。
diff --git a/Documentation/translations/zh_CN/dev-tools/ubsan.rst b/Documentation/translations/zh_CN/dev-tools/ubsan.rst
new file mode 100644
index 000000000000..2487696b3772
--- /dev/null
+++ b/Documentation/translations/zh_CN/dev-tools/ubsan.rst
@@ -0,0 +1,91 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/dev-tools/ubsan.rst
+:Translator: Dongliang Mu <dzm91@hust.edu.cn>
+
+未定义行为消毒剂 - UBSAN
+====================================
+
+UBSAN是一种动态未定义行为检查工具。
+
+UBSAN使用编译时插桩捕捉未定义行为。编译器在可能导致未定义行为的操作前插入特定
+检测代码。如果检查失败,即检测到未定义行为,__ubsan_handle_* 函数将被调用打印
+错误信息。
+
+GCC自4.9.x [1_] (详见 ``-fsanitize=undefined`` 选项及其子选项)版本后引入这
+一特性。GCC 5.x 版本实现了更多检查器 [2_]。
+
+报告样例
+--------------
+
+::
+
+ ================================================================================
+ UBSAN: Undefined behaviour in ../include/linux/bitops.h:110:33
+ shift exponent 32 is to large for 32-bit type 'unsigned int'
+ CPU: 0 PID: 0 Comm: swapper Not tainted 4.4.0-rc1+ #26
+ 0000000000000000 ffffffff82403cc8 ffffffff815e6cd6 0000000000000001
+ ffffffff82403cf8 ffffffff82403ce0 ffffffff8163a5ed 0000000000000020
+ ffffffff82403d78 ffffffff8163ac2b ffffffff815f0001 0000000000000002
+ Call Trace:
+ [<ffffffff815e6cd6>] dump_stack+0x45/0x5f
+ [<ffffffff8163a5ed>] ubsan_epilogue+0xd/0x40
+ [<ffffffff8163ac2b>] __ubsan_handle_shift_out_of_bounds+0xeb/0x130
+ [<ffffffff815f0001>] ? radix_tree_gang_lookup_slot+0x51/0x150
+ [<ffffffff8173c586>] _mix_pool_bytes+0x1e6/0x480
+ [<ffffffff83105653>] ? dmi_walk_early+0x48/0x5c
+ [<ffffffff8173c881>] add_device_randomness+0x61/0x130
+ [<ffffffff83105b35>] ? dmi_save_one_device+0xaa/0xaa
+ [<ffffffff83105653>] dmi_walk_early+0x48/0x5c
+ [<ffffffff831066ae>] dmi_scan_machine+0x278/0x4b4
+ [<ffffffff8111d58a>] ? vprintk_default+0x1a/0x20
+ [<ffffffff830ad120>] ? early_idt_handler_array+0x120/0x120
+ [<ffffffff830b2240>] setup_arch+0x405/0xc2c
+ [<ffffffff830ad120>] ? early_idt_handler_array+0x120/0x120
+ [<ffffffff830ae053>] start_kernel+0x83/0x49a
+ [<ffffffff830ad120>] ? early_idt_handler_array+0x120/0x120
+ [<ffffffff830ad386>] x86_64_start_reservations+0x2a/0x2c
+ [<ffffffff830ad4f3>] x86_64_start_kernel+0x16b/0x17a
+ ================================================================================
+
+用法
+-----
+
+使用如下内核配置启用UBSAN::
+
+ CONFIG_UBSAN=y
+
+使用如下内核配置检查整个内核::
+
+ CONFIG_UBSAN_SANITIZE_ALL=y
+
+为了在特定文件或目录启动代码插桩,需要在相应的内核Makefile中添加一行类似内容:
+
+- 单文件(如main.o)::
+
+ UBSAN_SANITIZE_main.o := y
+
+- 一个目录中的所有文件::
+
+ UBSAN_SANITIZE := y
+
+即使设置了``CONFIG_UBSAN_SANITIZE_ALL=y``,为了避免文件被插桩,可使用::
+
+ UBSAN_SANITIZE_main.o := n
+
+与::
+
+ UBSAN_SANITIZE := n
+
+未对齐的内存访问检测可通过开启独立选项 - CONFIG_UBSAN_ALIGNMENT 检测。
+该选项在支持未对齐访问的架构上(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y)
+默认为关闭。该选项仍可通过内核配置启用,但它将产生大量的UBSAN报告。
+
+参考文献
+----------
+
+.. _1: https://gcc.gnu.org/onlinedocs/gcc-4.9.0/gcc/Debugging-Options.html
+.. _2: https://gcc.gnu.org/onlinedocs/gcc/Debugging-Options.html
+.. _3: https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html
diff --git a/Documentation/translations/zh_CN/driver-api/gpio/legacy.rst b/Documentation/translations/zh_CN/driver-api/gpio/legacy.rst
index aeccff777170..0faf042001d2 100644
--- a/Documentation/translations/zh_CN/driver-api/gpio/legacy.rst
+++ b/Documentation/translations/zh_CN/driver-api/gpio/legacy.rst
@@ -208,8 +208,6 @@ GPIO 值的命令需要等待其信息排到队首才发送命令,再获得其
gpio_request()
## gpio_request_one()
- ## gpio_request_array()
- ## gpio_free_array()
gpio_free()
@@ -272,14 +270,6 @@ gpio_request()前将这类细节配置好,例如使用引脚控制子系统的
*/
int gpio_request_one(unsigned gpio, unsigned long flags, const char *label);
- /* 在单个函数中申请多个 GPIO
- */
- int gpio_request_array(struct gpio *array, size_t num);
-
- /* 在单个函数中释放多个 GPIO
- */
- void gpio_free_array(struct gpio *array, size_t num);
-
这里 'flags' 当前定义可指定以下属性:
* GPIOF_DIR_IN - 配置方向为输入
@@ -317,12 +307,6 @@ gpio_request()前将这类细节配置好,例如使用引脚控制子系统的
if (err)
...
- err = gpio_request_array(leds_gpios, ARRAY_SIZE(leds_gpios));
- if (err)
- ...
-
- gpio_free_array(leds_gpios, ARRAY_SIZE(leds_gpios));
-
GPIO 映射到 IRQ
----------------
diff --git a/Documentation/translations/zh_CN/index.rst b/Documentation/translations/zh_CN/index.rst
index 6ccec9657cc6..20b9d4270d1f 100644
--- a/Documentation/translations/zh_CN/index.rst
+++ b/Documentation/translations/zh_CN/index.rst
@@ -24,8 +24,8 @@
上的linux-doc邮件列表。
顺便说下,中文文档也需要遵守内核编码风格,风格中中文和英文的主要不同就是中文
-的字符标点占用两个英文字符宽度, 所以,当英文要求不要超过每行100个字符时,
-中文就不要超过50个字符。另外,也要注意'-','=' 等符号与相关标题的对齐。在将
+的字符标点占用两个英文字符宽度,所以,当英文要求不要超过每行100个字符时,
+中文就不要超过50个字符。另外,也要注意'-','='等符号与相关标题的对齐。在将
补丁提交到社区之前,一定要进行必要的 ``checkpatch.pl`` 检查和编译测试。
与Linux 内核社区一起工作
diff --git a/Documentation/translations/zh_CN/mm/page_frags.rst b/Documentation/translations/zh_CN/mm/page_frags.rst
index 20bd3fafdc8c..a5b22486a913 100644
--- a/Documentation/translations/zh_CN/mm/page_frags.rst
+++ b/Documentation/translations/zh_CN/mm/page_frags.rst
@@ -25,7 +25,7 @@ sk_buff->head使用,或者用于skb_shared_info的 “frags” 部分。
网络堆栈在每个CPU使用两个独立的缓存来处理碎片分配。netdev_alloc_cache被使用
netdev_alloc_frag和__netdev_alloc_skb调用的调用者使用。napi_alloc_cache
-被调用__napi_alloc_frag和__napi_alloc_skb的调用者使用。这两个调用的主要区别是
+被调用__napi_alloc_frag和napi_alloc_skb的调用者使用。这两个调用的主要区别是
它们可能被调用的环境。“netdev” 前缀的函数可以在任何上下文中使用,因为这些函数
将禁用中断,而 ”napi“ 前缀的函数只可以在softirq上下文中使用。
diff --git a/Documentation/translations/zh_CN/process/cve.rst b/Documentation/translations/zh_CN/process/cve.rst
new file mode 100644
index 000000000000..e39b796efcec
--- /dev/null
+++ b/Documentation/translations/zh_CN/process/cve.rst
@@ -0,0 +1,89 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/process/cve.rst
+:Translator: Dongliang Mu <dzm91@hust.edu.cn>
+
+====
+CVEs
+====
+
+Common Vulnerabilities and Exposure (CVE®) 编号是一种明确的方式来
+识别、定义和登记公开披露的安全漏洞。随着时间的推移,它们在内核项目中的实用性
+已经下降,CVE编号经常以不适当的方式和不适当的原因被分配。因此,内核开发社区
+倾向于避免使用它们。然而,分配CVE与其他形式的安全标识符的持续压力,以及内核
+社区之外的个人和公司的持续滥用,已经清楚地表明内核社区应该控制这些CVE分配。
+
+Linux内核开发团队确实有能力为潜在的Linux内核安全问题分配CVE。CVE的分配
+独立于 :doc:`安全漏洞报送流程</process/security-bugs>`。
+
+所有分配给Linux内核的CVE列表都可以在linux-cve邮件列表的存档中找到,如
+https://lore.kernel.org/linux-cve-announce/ 所示。如果想获得已分配
+CVE的通知,请“订阅”该邮件列表。要获得分配的CVE通知,请订阅该邮件列表:
+`订阅 <https://subspace.kernel.org/subscribing.html>`_。
+
+过程
+=======
+
+作为正常稳定发布过程的一部分,可能存在安全问题的内核更改由负责CVE编号分配
+的开发人员识别,并自动为其分配CVE编号。这些CVE分配会作为经常性的通告经常
+发布在linux-cve-announce邮件列表上。
+
+注意,由于Linux内核在系统中的特殊地位,几乎任何漏洞都可能被利用来危害内核
+的安全性,但是当漏洞被修复后,利用的可能性通常不明显。因此,CVE分配团队过于
+谨慎,并将CVE编号分配给他们识别的任何漏洞修复。这就解释了为什么Linux内核
+团队会发布大量的CVE。
+
+如果CVE分配团队错过了任何用户认为应该分配CVE的特定修复,请发送电子邮件到
+<cve@kernel.org>,那里的团队将与您一起工作。请注意,任何潜在的安全问题
+不应被发送到此邮箱,它仅用于为已发布的内核树中的漏洞修复分配CVE。如果你觉得
+自己发现了一个未修复的安全问题,请按照 :doc:`安全漏洞报送流程
+</process/security-bugs>` 发送到Linux内核社区。
+
+Linux内核不会给未修复的安全问题自动分配CVE;只有在安全修复可用且应用于
+稳定内核树后,CVE分配才会自动发生,并且它将通过安全修复的Git提交编号进行
+跟踪。如果有人希望在提交安全修复之前分配CVE,请联系内核CVE分配团队,从
+他们的一批保留编号中获得相应的CVE编号。
+
+对于目前没有得到稳定与长期维护内核团队积极支持的内核版本中发现的任何问题,
+都不会分配CVEs。当前支持的内核分支列表可以在 https://kernel.org/releases.html
+上找到。
+
+被分配CVE的争论
+=========================
+
+对于为特定内核修改分配的CVE,其争论或修改的权限仅属于受影响子系统的维护者。
+这一原则确保了漏洞报告的高度准确性和可问责性。只有那些具有深厚专业知识和
+对子系统深入了解的维护人员,才能有效评估内核漏洞的有效性和范围,并确定其适当的
+CVE指定策略。在此指定权限之外,任何争论或修改CVE的尝试都可能导致混乱、
+不准确的报告,并最终危及系统。
+
+无效的CVE
+============
+
+如果发现的安全问题存在于仅由某Linux发行版支持的Linux内核中,即安全问题是
+由于Linux发行版所做的更改导致,或者Linux的发行版内核版本不再是Linux内核
+社区支持的内核版本,那么Linux内核CVE团队将不能分配CVE,必须从Linux
+发行版本身请求。
+
+内核CVE分配团队以外的任何团队对Linux内核支持版本分配的CVE都不应被
+视为有效CVE。请通知内核CVE分配团队,以便他们可以通过CNA修复措施使
+这些条目失效。
+
+特定CVE的适用性
+==============================
+
+由于Linux内核可以以许多不同方式使用,外部用户可以通过许多不同方式访问它,或者
+根本没有访问,因此任何特定CVE的适用性取决于Linux用户,而不是内核CVE分配团队。
+请不要与我们联系来尝试确定任何特定CVE的适用性。
+
+此外,由于源代码树非常大,而任何一个系统都只使用源代码树的一小部分,因此任何
+Linux用户都应该意识到,大量分配的CVEs与他们的系统无关。
+
+简而言之,我们不知道您的用例,也不知道您使用的是内核的哪个部分,因此我们无法
+确定特定的CVE是否与您的系统相关。
+
+与往常一样,最好采用所有发布的内核更改,因为它们是由许多社区成员在一个统一的
+整体中一起进行测试的,而不是作为个别的精选更改。还要注意,对于许多安全问题来
+说,整体问题的解决方案并不是在单个更改中找到的,而是在彼此之上的许多修复的总
+和。理想情况下,CVE将被分配给所有问题的所有修复,但有时我们将无法注意到一些
+修复,因此某些修复可能在没有CVE的情况下被采取。
diff --git a/Documentation/translations/zh_CN/process/index.rst b/Documentation/translations/zh_CN/process/index.rst
index 3ca02d281be0..5c6c8ccdd50d 100644
--- a/Documentation/translations/zh_CN/process/index.rst
+++ b/Documentation/translations/zh_CN/process/index.rst
@@ -48,6 +48,7 @@ TODOLIST:
:maxdepth: 1
embargoed-hardware-issues
+ cve
TODOLIST:
diff --git a/Documentation/translations/zh_CN/process/submitting-patches.rst b/Documentation/translations/zh_CN/process/submitting-patches.rst
index f8978f02057c..7864107e60a8 100644
--- a/Documentation/translations/zh_CN/process/submitting-patches.rst
+++ b/Documentation/translations/zh_CN/process/submitting-patches.rst
@@ -333,10 +333,10 @@ Linus 和其他的内核开发者需要阅读和评论你提交的改动。对
未参与其开发。签署链应当反映补丁传播到维护者并最终传播到Linus所经过的 **真实**
路径,首个签署指明单个作者的主要作者身份。
-何时使用Acked-by:,CC:,和Co-Developed by:
+何时使用Acked-by:,Cc:,和Co-developed-by:
------------------------------------------
-Singed-off-by: 标签表示签名者参与了补丁的开发,或者他/她在补丁的传递路径中。
+Signed-off-by: 标签表示签名者参与了补丁的开发,或者他/她在补丁的传递路径中。
如果一个人没有直接参与补丁的准备或处理,但希望表示并记录他们对补丁的批准/赞成,
那么他们可以要求在补丁的变更日志中添加一个Acked-by:。
@@ -358,8 +358,8 @@ Acked-by:不一定表示对整个补丁的确认。例如,如果一个补丁
Co-developed-by: 声明补丁是由多个开发人员共同创建的;当几个人在一个补丁上工
作时,它用于给出共同作者(除了From:所给出的作者之外)。因为Co-developed-by:
表示作者身份,所以每个Co-developed-by:必须紧跟在相关合作作者的签署之后。标准
-签署程序要求Singed-off-by:标签的顺序应尽可能反映补丁的时间历史,无论作者是通
-过From:还是Co-developed-by:表明。值得注意的是,最后一个Singed-off-by:必须是
+签署程序要求Signed-off-by:标签的顺序应尽可能反映补丁的时间历史,无论作者是通
+过From:还是Co-developed-by:表明。值得注意的是,最后一个Signed-off-by:必须是
提交补丁的开发人员。
注意,如果From:作者也是电子邮件标题的From:行中列出的人,则From:标签是可选的。
diff --git a/Documentation/translations/zh_CN/rust/arch-support.rst b/Documentation/translations/zh_CN/rust/arch-support.rst
index afbd02afec45..abd708d48f82 100644
--- a/Documentation/translations/zh_CN/rust/arch-support.rst
+++ b/Documentation/translations/zh_CN/rust/arch-support.rst
@@ -16,8 +16,12 @@
下面是目前可以工作的架构的一般总结。支持程度与 ``MAINTAINERS`` 文件中的``S`` 值相对应:
-============ ================ ==============================================
-架构 支持水平 限制因素
-============ ================ ==============================================
-``x86`` Maintained 只有 ``x86_64``
-============ ================ ==============================================
+============= ================ ==============================================
+架构 支持水平 限制因素
+============= ================ ==============================================
+``arm64`` Maintained 只有小端序
+``loongarch`` Maintained \-
+``riscv`` Maintained 只有 ``riscv64``
+``um`` Maintained 只有 ``x86_64``
+``x86`` Maintained 只有 ``x86_64``
+============= ================ ==============================================
diff --git a/Documentation/translations/zh_CN/rust/coding-guidelines.rst b/Documentation/translations/zh_CN/rust/coding-guidelines.rst
index 6c0bdbbc5a2a..419143b938ed 100644
--- a/Documentation/translations/zh_CN/rust/coding-guidelines.rst
+++ b/Documentation/translations/zh_CN/rust/coding-guidelines.rst
@@ -157,6 +157,18 @@ https://commonmark.org/help/
https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html
+此外,内核支持通过在链接目标前添加 ``srctree/`` 来创建相对于源代码树的链接。例如:
+
+.. code-block:: rust
+
+ //! C header: [`include/linux/printk.h`](srctree/include/linux/printk.h)
+
+或者:
+
+.. code-block:: rust
+
+ /// [`struct mutex`]: srctree/include/linux/mutex.h
+
命名
----
diff --git a/Documentation/translations/zh_CN/rust/general-information.rst b/Documentation/translations/zh_CN/rust/general-information.rst
index 6b91dfe1834a..251f6ee2bb44 100644
--- a/Documentation/translations/zh_CN/rust/general-information.rst
+++ b/Documentation/translations/zh_CN/rust/general-information.rst
@@ -32,7 +32,7 @@ Rust内核代码使用其内置的文档生成器 ``rustdoc`` 进行记录。
要在你的网络浏览器中本地阅读该文档,请运行如::
- xdg-open rust/doc/kernel/index.html
+ xdg-open Documentation/output/rust/rustdoc/kernel/index.html
要了解如何编写文档,请看 coding-guidelines.rst 。
diff --git a/Documentation/translations/zh_CN/rust/quick-start.rst b/Documentation/translations/zh_CN/rust/quick-start.rst
index a4b8e8a50a89..8616556ae4d7 100644
--- a/Documentation/translations/zh_CN/rust/quick-start.rst
+++ b/Documentation/translations/zh_CN/rust/quick-start.rst
@@ -37,13 +37,18 @@ rustc
需要一个特定版本的Rust编译器。较新的版本可能会也可能不会工作,因为就目前而言,内核依赖
于一些不稳定的Rust特性。
-如果使用的是 ``rustup`` ,请进入检出的源代码目录并运行::
+如果使用的是 ``rustup`` ,请进入内核编译目录(或者用 ``--path=<build-dir>`` 参数
+来 ``设置`` sub-command)并运行::
rustup override set $(scripts/min-tool-version.sh rustc)
-或者从以下网址获取一个独立的安装程序或安装 ``rustup`` :
++这将配置你的工作目录使用正确版本的 ``rustc``,而不影响你的默认工具链。
- https://www.rust-lang.org
+请注意覆盖应用当前的工作目录(和它的子目录)。
+
+如果你使用 ``rustup``, 可以从下面的链接拉取一个单独的安装程序:
+
+ https://forge.rust-lang.org/infra/other-installation-methods.html#standalone
Rust标准库源代码
@@ -57,21 +62,23 @@ Rust标准库的源代码是必需的,因为构建系统会交叉编译 ``core
这些组件是按工具链安装的,因此以后升级Rust编译器版本需要重新添加组件。
-否则,如果使用独立的安装程序,可以将Rust仓库克隆到工具链的安装文件夹中::
+否则,如果使用独立的安装程序,可以将Rust源码树下载到安装工具链的文件夹中::
- git clone --recurse-submodules \
- --branch $(scripts/min-tool-version.sh rustc) \
- https://github.com/rust-lang/rust \
- $(rustc --print sysroot)/lib/rustlib/src/rust
+ curl -L "https://static.rust-lang.org/dist/rust-src-$(scripts/min-tool-version.sh rustc).tar.gz" |
+ tar -xzf - -C "$(rustc --print sysroot)/lib" \
+ "rust-src-$(scripts/min-tool-version.sh rustc)/rust-src/lib/" \
+ --strip-components=3
-在这种情况下,以后升级Rust编译器版本需要手动更新这个克隆的仓库。
+在这种情况下,以后升级Rust编译器版本需要手动更新这个源代码树(这可以通过移除
+``$(rustc --print sysroot)/lib/rustlib/src/rust`` ,然后重新执行上
+面的命令做到)。
libclang
********
``bindgen`` 使用 ``libclang`` (LLVM的一部分)来理解内核中的C代码,这意味着需要安
-装LLVM;同在开启 ``CC=clang`` 或 ``LLVM=1`` 时编译内核一样。
+装LLVM;同在开启``LLVM=1`` 时编译内核一样。
Linux发行版中可能会有合适的包,所以最好先检查一下。
@@ -94,7 +101,20 @@ bindgen
通过以下方式安装它(注意,这将从源码下载并构建该工具)::
- cargo install --locked --version $(scripts/min-tool-version.sh bindgen) bindgen
+ cargo install --locked --version $(scripts/min-tool-version.sh bindgen) bindgen-cli
+
+``bindgen`` 需要找到合适的 ``libclang`` 才能工作。如果没有找到(或者找到的
+``libclang`` 与应该使用的 ``libclang`` 不同),则可以使用 ``clang-sys``
+理解的环境变量(Rust绑定创建的 ``bindgen`` 用来访问 ``libclang``):
+
+
+* ``LLVM_CONFIG_PATH`` 可以指向一个 ``llvm-config`` 可执行文件。
+
+* 或者 ``LIBCLANG_PATH`` 可以指向 ``libclang`` 共享库或包含它的目录。
+
+* 或者 ``CLANG_PATH`` 可以指向 ``clang`` 可执行文件。
+
+详情请参阅 ``clang-sys`` 的文档:
开发依赖
@@ -163,7 +183,9 @@ rust-analyzer
一起使用,以实现语法高亮、补全、转到定义和其他功能。
``rust-analyzer`` 需要一个配置文件, ``rust-project.json``, 它可以由 ``rust-analyzer``
-Make 目标生成。
+Make 目标生成::
+
+ make LLVM=1 rust-analyzer
配置
@@ -189,10 +211,6 @@ Rust支持(CONFIG_RUST)需要在 ``General setup`` 菜单中启用。在其
make LLVM=1
-对于不支持完整LLVM工具链的架构,使用::
-
- make CC=clang
-
使用GCC对某些配置也是可行的,但目前它是非常试验性的。
diff --git a/Documentation/translations/zh_CN/scheduler/sched-domains.rst b/Documentation/translations/zh_CN/scheduler/sched-domains.rst
index e814d4c01141..06363169c56b 100644
--- a/Documentation/translations/zh_CN/scheduler/sched-domains.rst
+++ b/Documentation/translations/zh_CN/scheduler/sched-domains.rst
@@ -34,17 +34,17 @@ CPU共享。任意两个组的CPU掩码的交集不一定为空,如果是这
调度域中的负载均衡发生在调度组中。也就是说,每个组被视为一个实体。组的负载被定义为它
管辖的每个CPU的负载之和。仅当组的负载不均衡后,任务才在组之间发生迁移。
-在kernel/sched/core.c中,trigger_load_balance()在每个CPU上通过scheduler_tick()
+在kernel/sched/core.c中,sched_balance_trigger()在每个CPU上通过sched_tick()
周期执行。在当前运行队列下一个定期调度再平衡事件到达后,它引发一个软中断。负载均衡真正
-的工作由run_rebalance_domains()->rebalance_domains()完成,在软中断上下文中执行
+的工作由sched_balance_softirq()->sched_balance_domains()完成,在软中断上下文中执行
(SCHED_SOFTIRQ)。
-后一个函数有两个入参:当前CPU的运行队列、它在scheduler_tick()调用时是否空闲。函数会从
+后一个函数有两个入参:当前CPU的运行队列、它在sched_tick()调用时是否空闲。函数会从
当前CPU所在的基调度域开始迭代执行,并沿着parent指针链向上进入更高层级的调度域。在迭代
过程中,函数会检查当前调度域是否已经耗尽了再平衡的时间间隔,如果是,它在该调度域运行
-load_balance()。接下来它检查父调度域(如果存在),再后来父调度域的父调度域,以此类推。
+sched_balance_rq()。接下来它检查父调度域(如果存在),再后来父调度域的父调度域,以此类推。
-起初,load_balance()查找当前调度域中最繁忙的调度组。如果成功,在该调度组管辖的全部CPU
+起初,sched_balance_rq()查找当前调度域中最繁忙的调度组。如果成功,在该调度组管辖的全部CPU
的运行队列中找出最繁忙的运行队列。如能找到,对当前的CPU运行队列和新找到的最繁忙运行
队列均加锁,并把任务从最繁忙队列中迁移到当前CPU上。被迁移的任务数量等于在先前迭代执行
中计算出的该调度域的调度组的不均衡值。
diff --git a/Documentation/translations/zh_CN/scheduler/sched-stats.rst b/Documentation/translations/zh_CN/scheduler/sched-stats.rst
index c5e0be663837..09eee2517610 100644
--- a/Documentation/translations/zh_CN/scheduler/sched-stats.rst
+++ b/Documentation/translations/zh_CN/scheduler/sched-stats.rst
@@ -75,42 +75,42 @@ domain<N> <cpumask> 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
繁忙,新空闲):
- 1) 当CPU空闲时,load_balance()在这个调度域中被调用了#次
- 2) 当CPU空闲时,load_balance()在这个调度域中被调用,但是发现负载无需
+ 1) 当CPU空闲时,sched_balance_rq()在这个调度域中被调用了#次
+ 2) 当CPU空闲时,sched_balance_rq()在这个调度域中被调用,但是发现负载无需
均衡#次
- 3) 当CPU空闲时,load_balance()在这个调度域中被调用,试图迁移1个或更多
+ 3) 当CPU空闲时,sched_balance_rq()在这个调度域中被调用,试图迁移1个或更多
任务且失败了#次
- 4) 当CPU空闲时,load_balance()在这个调度域中被调用,发现不均衡(如果有)
+ 4) 当CPU空闲时,sched_balance_rq()在这个调度域中被调用,发现不均衡(如果有)
#次
5) 当CPU空闲时,pull_task()在这个调度域中被调用#次
6) 当CPU空闲时,尽管目标任务是热缓存状态,pull_task()依然被调用#次
- 7) 当CPU空闲时,load_balance()在这个调度域中被调用,未能找到更繁忙的
+ 7) 当CPU空闲时,sched_balance_rq()在这个调度域中被调用,未能找到更繁忙的
队列#次
8) 当CPU空闲时,在调度域中找到了更繁忙的队列,但未找到更繁忙的调度组
#次
- 9) 当CPU繁忙时,load_balance()在这个调度域中被调用了#次
- 10) 当CPU繁忙时,load_balance()在这个调度域中被调用,但是发现负载无需
+ 9) 当CPU繁忙时,sched_balance_rq()在这个调度域中被调用了#次
+ 10) 当CPU繁忙时,sched_balance_rq()在这个调度域中被调用,但是发现负载无需
均衡#次
- 11) 当CPU繁忙时,load_balance()在这个调度域中被调用,试图迁移1个或更多
+ 11) 当CPU繁忙时,sched_balance_rq()在这个调度域中被调用,试图迁移1个或更多
任务且失败了#次
- 12) 当CPU繁忙时,load_balance()在这个调度域中被调用,发现不均衡(如果有)
+ 12) 当CPU繁忙时,sched_balance_rq()在这个调度域中被调用,发现不均衡(如果有)
#次
13) 当CPU繁忙时,pull_task()在这个调度域中被调用#次
14) 当CPU繁忙时,尽管目标任务是热缓存状态,pull_task()依然被调用#次
- 15) 当CPU繁忙时,load_balance()在这个调度域中被调用,未能找到更繁忙的
+ 15) 当CPU繁忙时,sched_balance_rq()在这个调度域中被调用,未能找到更繁忙的
队列#次
16) 当CPU繁忙时,在调度域中找到了更繁忙的队列,但未找到更繁忙的调度组
#次
- 17) 当CPU新空闲时,load_balance()在这个调度域中被调用了#次
- 18) 当CPU新空闲时,load_balance()在这个调度域中被调用,但是发现负载无需
+ 17) 当CPU新空闲时,sched_balance_rq()在这个调度域中被调用了#次
+ 18) 当CPU新空闲时,sched_balance_rq()在这个调度域中被调用,但是发现负载无需
均衡#次
- 19) 当CPU新空闲时,load_balance()在这个调度域中被调用,试图迁移1个或更多
+ 19) 当CPU新空闲时,sched_balance_rq()在这个调度域中被调用,试图迁移1个或更多
任务且失败了#次
- 20) 当CPU新空闲时,load_balance()在这个调度域中被调用,发现不均衡(如果有)
+ 20) 当CPU新空闲时,sched_balance_rq()在这个调度域中被调用,发现不均衡(如果有)
#次
21) 当CPU新空闲时,pull_task()在这个调度域中被调用#次
22) 当CPU新空闲时,尽管目标任务是热缓存状态,pull_task()依然被调用#次
- 23) 当CPU新空闲时,load_balance()在这个调度域中被调用,未能找到更繁忙的
+ 23) 当CPU新空闲时,sched_balance_rq()在这个调度域中被调用,未能找到更繁忙的
队列#次
24) 当CPU新空闲时,在调度域中找到了更繁忙的队列,但未找到更繁忙的调度组
#次
diff --git a/Documentation/translations/zh_TW/gpio.txt b/Documentation/translations/zh_TW/gpio.txt
index b9b48012c62e..77d69d381316 100644
--- a/Documentation/translations/zh_TW/gpio.txt
+++ b/Documentation/translations/zh_TW/gpio.txt
@@ -215,13 +215,10 @@ GPIO 值的命令需要等待其信息排到隊首才發送命令,再獲得其
gpio_request()
## gpio_request_one()
-## gpio_request_array()
-## gpio_free_array()
gpio_free()
-
聲明和釋放 GPIO
----------------------------
爲了有助於捕獲系統配置錯誤,定義了兩個函數。
@@ -278,14 +275,6 @@ gpio_request()前將這類細節配置好,例如使用 pinctrl 子系統的映
*/
int gpio_request_one(unsigned gpio, unsigned long flags, const char *label);
- /* 在單個函數中申請多個 GPIO
- */
- int gpio_request_array(struct gpio *array, size_t num);
-
- /* 在單個函數中釋放多個 GPIO
- */
- void gpio_free_array(struct gpio *array, size_t num);
-
這裡 'flags' 當前定義可指定以下屬性:
* GPIOF_DIR_IN - 配置方向爲輸入
@@ -323,12 +312,6 @@ gpio_request()前將這類細節配置好,例如使用 pinctrl 子系統的映
if (err)
...
- err = gpio_request_array(leds_gpios, ARRAY_SIZE(leds_gpios));
- if (err)
- ...
-
- gpio_free_array(leds_gpios, ARRAY_SIZE(leds_gpios));
-
GPIO 映射到 IRQ
--------------------
diff --git a/Documentation/translations/zh_TW/process/submit-checklist.rst b/Documentation/translations/zh_TW/process/submit-checklist.rst
index 43f2e3c5b514..0ecb187753e4 100644
--- a/Documentation/translations/zh_TW/process/submit-checklist.rst
+++ b/Documentation/translations/zh_TW/process/submit-checklist.rst
@@ -31,7 +31,7 @@ Linux內核補丁提交檢查單
c) 使用 ``O=builddir`` 時可以成功編譯
- d) 任何 Doucmentation/ 下的變更都能成功構建且不引入新警告/錯誤。
+ d) 任何 Documentation/ 下的變更都能成功構建且不引入新警告/錯誤。
用 ``make htmldocs`` 或 ``make pdfdocs`` 檢驗構建情況並修復問題。
3) 通過使用本地交叉編譯工具或其他一些構建設施在多個CPU體系結構上構建。
diff --git a/Documentation/translations/zh_TW/process/submitting-patches.rst b/Documentation/translations/zh_TW/process/submitting-patches.rst
index 99fa0f2fe6f4..f12f2f193f85 100644
--- a/Documentation/translations/zh_TW/process/submitting-patches.rst
+++ b/Documentation/translations/zh_TW/process/submitting-patches.rst
@@ -334,10 +334,10 @@ Linus 和其他的內核開發者需要閱讀和評論你提交的改動。對
未參與其開發。簽署鏈應當反映補丁傳播到維護者並最終傳播到Linus所經過的 **真實**
路徑,首個簽署指明單個作者的主要作者身份。
-何時使用Acked-by:,CC:,和Co-Developed by:
+何時使用Acked-by:,Cc:,和Co-developed-by:
------------------------------------------
-Singed-off-by: 標籤表示簽名者參與了補丁的開發,或者他/她在補丁的傳遞路徑中。
+Signed-off-by: 標籤表示簽名者參與了補丁的開發,或者他/她在補丁的傳遞路徑中。
如果一個人沒有直接參與補丁的準備或處理,但希望表示並記錄他們對補丁的批准/贊成,
那麼他們可以要求在補丁的變更日誌中添加一個Acked-by:。
@@ -359,8 +359,8 @@ Acked-by:不一定表示對整個補丁的確認。例如,如果一個補丁
Co-developed-by: 聲明補丁是由多個開發人員共同創建的;當幾個人在一個補丁上工
作時,它用於給出共同作者(除了From:所給出的作者之外)。因爲Co-developed-by:
表示作者身份,所以每個Co-developed-by:必須緊跟在相關合作作者的簽署之後。標準
-簽署程序要求Singed-off-by:標籤的順序應儘可能反映補丁的時間歷史,無論作者是通
-過From:還是Co-developed-by:表明。值得注意的是,最後一個Singed-off-by:必須是
+簽署程序要求Signed-off-by:標籤的順序應儘可能反映補丁的時間歷史,無論作者是通
+過From:還是Co-developed-by:表明。值得注意的是,最後一個Signed-off-by:必須是
提交補丁的開發人員。
注意,如果From:作者也是電子郵件標題的From:行中列出的人,則From:標籤是可選的。
diff --git a/Documentation/userspace-api/gpio/gpio-v2-get-line-ioctl.rst b/Documentation/userspace-api/gpio/gpio-v2-get-line-ioctl.rst
index 56b975801b6a..6615d6ced755 100644
--- a/Documentation/userspace-api/gpio/gpio-v2-get-line-ioctl.rst
+++ b/Documentation/userspace-api/gpio/gpio-v2-get-line-ioctl.rst
@@ -81,7 +81,7 @@ Only one event clock flag, ``GPIO_V2_LINE_FLAG_EVENT_CLOCK_xxx``, may be set.
If none are set then the event clock defaults to ``CLOCK_MONOTONIC``.
The ``GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE`` flag requires supporting hardware
and a kernel with ``CONFIG_HTE`` set. Requesting HTE from a device that
-doesn't support it is an error (**EOPNOTSUP**).
+doesn't support it is an error (**EOPNOTSUPP**).
The :c:type:`debounce_period_us<gpio_v2_line_attribute>` attribute may only
be applied to lines with ``GPIO_V2_LINE_FLAG_INPUT`` set. When set, debounce
diff --git a/Documentation/userspace-api/landlock.rst b/Documentation/userspace-api/landlock.rst
index 9dd636aaa829..07b63aec56fa 100644
--- a/Documentation/userspace-api/landlock.rst
+++ b/Documentation/userspace-api/landlock.rst
@@ -8,7 +8,7 @@ Landlock: unprivileged access control
=====================================
:Author: Mickaël Salaün
-:Date: October 2023
+:Date: April 2024
The goal of Landlock is to enable to restrict ambient rights (e.g. global
filesystem or network access) for a set of processes. Because Landlock
@@ -76,7 +76,8 @@ to be explicit about the denied-by-default access rights.
LANDLOCK_ACCESS_FS_MAKE_BLOCK |
LANDLOCK_ACCESS_FS_MAKE_SYM |
LANDLOCK_ACCESS_FS_REFER |
- LANDLOCK_ACCESS_FS_TRUNCATE,
+ LANDLOCK_ACCESS_FS_TRUNCATE |
+ LANDLOCK_ACCESS_FS_IOCTL_DEV,
.handled_access_net =
LANDLOCK_ACCESS_NET_BIND_TCP |
LANDLOCK_ACCESS_NET_CONNECT_TCP,
@@ -85,10 +86,10 @@ to be explicit about the denied-by-default access rights.
Because we may not know on which kernel version an application will be
executed, it is safer to follow a best-effort security approach. Indeed, we
should try to protect users as much as possible whatever the kernel they are
-using. To avoid binary enforcement (i.e. either all security features or
-none), we can leverage a dedicated Landlock command to get the current version
-of the Landlock ABI and adapt the handled accesses. Let's check if we should
-remove access rights which are only supported in higher versions of the ABI.
+using.
+
+To be compatible with older Linux versions, we detect the available Landlock ABI
+version, and only use the available subset of access rights:
.. code-block:: c
@@ -114,6 +115,10 @@ remove access rights which are only supported in higher versions of the ABI.
ruleset_attr.handled_access_net &=
~(LANDLOCK_ACCESS_NET_BIND_TCP |
LANDLOCK_ACCESS_NET_CONNECT_TCP);
+ __attribute__((fallthrough));
+ case 4:
+ /* Removes LANDLOCK_ACCESS_FS_IOCTL_DEV for ABI < 5 */
+ ruleset_attr.handled_access_fs &= ~LANDLOCK_ACCESS_FS_IOCTL_DEV;
}
This enables to create an inclusive ruleset that will contain our rules.
@@ -225,6 +230,7 @@ access rights per directory enables to change the location of such directory
without relying on the destination directory access rights (except those that
are required for this operation, see ``LANDLOCK_ACCESS_FS_REFER``
documentation).
+
Having self-sufficient hierarchies also helps to tighten the required access
rights to the minimal set of data. This also helps avoid sinkhole directories,
i.e. directories where data can be linked to but not linked from. However,
@@ -318,18 +324,26 @@ It should also be noted that truncating files does not require the
system call, this can also be done through :manpage:`open(2)` with the flags
``O_RDONLY | O_TRUNC``.
-When opening a file, the availability of the ``LANDLOCK_ACCESS_FS_TRUNCATE``
-right is associated with the newly created file descriptor and will be used for
-subsequent truncation attempts using :manpage:`ftruncate(2)`. The behavior is
-similar to opening a file for reading or writing, where permissions are checked
-during :manpage:`open(2)`, but not during the subsequent :manpage:`read(2)` and
+The truncate right is associated with the opened file (see below).
+
+Rights associated with file descriptors
+---------------------------------------
+
+When opening a file, the availability of the ``LANDLOCK_ACCESS_FS_TRUNCATE`` and
+``LANDLOCK_ACCESS_FS_IOCTL_DEV`` rights is associated with the newly created
+file descriptor and will be used for subsequent truncation and ioctl attempts
+using :manpage:`ftruncate(2)` and :manpage:`ioctl(2)`. The behavior is similar
+to opening a file for reading or writing, where permissions are checked during
+:manpage:`open(2)`, but not during the subsequent :manpage:`read(2)` and
:manpage:`write(2)` calls.
-As a consequence, it is possible to have multiple open file descriptors for the
-same file, where one grants the right to truncate the file and the other does
-not. It is also possible to pass such file descriptors between processes,
-keeping their Landlock properties, even when these processes do not have an
-enforced Landlock ruleset.
+As a consequence, it is possible that a process has multiple open file
+descriptors referring to the same file, but Landlock enforces different things
+when operating with these file descriptors. This can happen when a Landlock
+ruleset gets enforced and the process keeps file descriptors which were opened
+both before and after the enforcement. It is also possible to pass such file
+descriptors between processes, keeping their Landlock properties, even when some
+of the involved processes do not have an enforced Landlock ruleset.
Compatibility
=============
@@ -458,6 +472,28 @@ Memory usage
Kernel memory allocated to create rulesets is accounted and can be restricted
by the Documentation/admin-guide/cgroup-v1/memory.rst.
+IOCTL support
+-------------
+
+The ``LANDLOCK_ACCESS_FS_IOCTL_DEV`` right restricts the use of
+:manpage:`ioctl(2)`, but it only applies to *newly opened* device files. This
+means specifically that pre-existing file descriptors like stdin, stdout and
+stderr are unaffected.
+
+Users should be aware that TTY devices have traditionally permitted to control
+other processes on the same TTY through the ``TIOCSTI`` and ``TIOCLINUX`` IOCTL
+commands. Both of these require ``CAP_SYS_ADMIN`` on modern Linux systems, but
+the behavior is configurable for ``TIOCSTI``.
+
+On older systems, it is therefore recommended to close inherited TTY file
+descriptors, or to reopen them from ``/proc/self/fd/*`` without the
+``LANDLOCK_ACCESS_FS_IOCTL_DEV`` right, if possible.
+
+Landlock's IOCTL support is coarse-grained at the moment, but may become more
+fine-grained in the future. Until then, users are advised to establish the
+guarantees that they need through the file hierarchy, by only allowing the
+``LANDLOCK_ACCESS_FS_IOCTL_DEV`` right on files where it is really required.
+
Previous limitations
====================
@@ -495,6 +531,16 @@ bind and connect actions to only a set of allowed ports thanks to the new
``LANDLOCK_ACCESS_NET_BIND_TCP`` and ``LANDLOCK_ACCESS_NET_CONNECT_TCP``
access rights.
+IOCTL (ABI < 5)
+---------------
+
+IOCTL operations could not be denied before the fifth Landlock ABI, so
+:manpage:`ioctl(2)` is always allowed when using a kernel that only supports an
+earlier ABI.
+
+Starting with the Landlock ABI version 5, it is possible to restrict the use of
+:manpage:`ioctl(2)` using the new ``LANDLOCK_ACCESS_FS_IOCTL_DEV`` right.
+
.. _kernel_support:
Kernel support
diff --git a/Documentation/userspace-api/media/cec/cec-func-open.rst b/Documentation/userspace-api/media/cec/cec-func-open.rst
index d86563a34b9e..125c8ac6680b 100644
--- a/Documentation/userspace-api/media/cec/cec-func-open.rst
+++ b/Documentation/userspace-api/media/cec/cec-func-open.rst
@@ -70,5 +70,5 @@ include:
``ENOMEM``
Insufficient kernel memory was available.
-``ENXIO``
- No device corresponding to this device special file exists.
+``ENODEV``
+ Device not found or was removed.
diff --git a/Documentation/userspace-api/media/dvb/frontend_f_open.rst b/Documentation/userspace-api/media/dvb/frontend_f_open.rst
index bb37eded0870..70e169b8f601 100644
--- a/Documentation/userspace-api/media/dvb/frontend_f_open.rst
+++ b/Documentation/userspace-api/media/dvb/frontend_f_open.rst
@@ -91,7 +91,7 @@ appropriately.
- The caller has no permission to access the device.
- - ``EBUSY``
- - The the device driver is already in use.
+ - The device driver is already in use.
- - ``EMFILE``
- The process already has the maximum number of files open.
diff --git a/Documentation/userspace-api/media/glossary.rst b/Documentation/userspace-api/media/glossary.rst
index 96a360edbf3b..55a9c37130ba 100644
--- a/Documentation/userspace-api/media/glossary.rst
+++ b/Documentation/userspace-api/media/glossary.rst
@@ -25,6 +25,13 @@ Glossary
See :ref:`cec`.
+ Data Unit
+
+ Unit of data transported by a bus. On parallel buses, the data unit
+ consists of one or more related samples while on serial buses the data
+ unit is logical. If the data unit is image data, it may also be called a
+ pixel.
+
Device Driver
Part of the Linux Kernel that implements support for a hardware
component.
@@ -173,6 +180,11 @@ Glossary
An integrated circuit that integrates all components of a computer
or other electronic systems.
+ Stream
+ A distinct flow of data (image data or metadata) from an initial source
+ to a final sink. The initial source may be e.g. an image sensor and the
+ final sink e.g. a memory buffer.
+
V4L2 API
**V4L2 userspace API**
diff --git a/Documentation/userspace-api/media/v4l/dev-meta.rst b/Documentation/userspace-api/media/v4l/dev-meta.rst
index 0e7e1ee1471a..5eee9ab60395 100644
--- a/Documentation/userspace-api/media/v4l/dev-meta.rst
+++ b/Documentation/userspace-api/media/v4l/dev-meta.rst
@@ -47,6 +47,12 @@ member of the ``fmt`` union as needed per the desired operation. Both drivers
and applications must set the remainder of the :c:type:`v4l2_format` structure
to 0.
+Devices that capture metadata by line have the struct v4l2_fmtdesc
+``V4L2_FMT_FLAG_META_LINE_BASED`` flag set for :c:func:`VIDIOC_ENUM_FMT`. Such
+devices can typically also :ref:`capture image data <capture>`. This primarily
+involves devices that receive the data from a different devices such as a camera
+sensor.
+
.. c:type:: v4l2_meta_format
.. tabularcolumns:: |p{1.4cm}|p{2.4cm}|p{13.5cm}|
@@ -65,3 +71,18 @@ to 0.
- ``buffersize``
- Maximum buffer size in bytes required for data. The value is set by the
driver.
+ * - __u32
+ - ``width``
+ - Width of a line of metadata in Data Units. Valid when
+ :c:type`v4l2_fmtdesc` flag ``V4L2_FMT_FLAG_META_LINE_BASED`` is set,
+ otherwise zero. See :c:func:`VIDIOC_ENUM_FMT`.
+ * - __u32
+ - ``height``
+ - Number of rows of metadata. Valid when :c:type`v4l2_fmtdesc` flag
+ ``V4L2_FMT_FLAG_META_LINE_BASED`` is set, otherwise zero. See
+ :c:func:`VIDIOC_ENUM_FMT`.
+ * - __u32
+ - ``bytesperline``
+ - Offset in bytes between the beginning of two consecutive lines. Valid
+ when :c:type`v4l2_fmtdesc` flag ``V4L2_FMT_FLAG_META_LINE_BASED`` is
+ set, otherwise zero. See :c:func:`VIDIOC_ENUM_FMT`.
diff --git a/Documentation/userspace-api/media/v4l/dev-subdev.rst b/Documentation/userspace-api/media/v4l/dev-subdev.rst
index 43988516acdd..0f9eda3187f3 100644
--- a/Documentation/userspace-api/media/v4l/dev-subdev.rst
+++ b/Documentation/userspace-api/media/v4l/dev-subdev.rst
@@ -506,6 +506,8 @@ source pads.
subdev-formats
+.. _subdev-routing:
+
Streams, multiplexed media pads and internal routing
----------------------------------------------------
@@ -527,9 +529,9 @@ the its sink pad and allows to route them individually to one of its source
pads.
Subdevice drivers that support multiplexed streams are compatible with
-non-multiplexed subdev drivers, but, of course, require a routing configuration
-where the link between those two types of drivers contains only a single
-stream.
+non-multiplexed subdev drivers. However, if the driver at the sink end of a link
+does not support streams, then only stream 0 of source end may be captured.
+There may be additional limitations specific to the sink device.
Understanding streams
^^^^^^^^^^^^^^^^^^^^^
@@ -570,6 +572,29 @@ Any configurations of a stream within a pad, such as format or selections,
are independent of similar configurations on other streams. This is
subject to change in the future.
+Device types and routing setup
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Different kinds of sub-devices have differing behaviour for route activation,
+depending on the hardware. In all cases, however, only routes that have the
+``V4L2_SUBDEV_STREAM_FL_ACTIVE`` flag set are active.
+
+Devices generating the streams may allow enabling and disabling some of the
+routes or have a fixed routing configuration. If the routes can be disabled, not
+declaring the routes (or declaring them without
+``VIDIOC_SUBDEV_STREAM_FL_ACTIVE`` flag set) in ``VIDIOC_SUBDEV_S_ROUTING`` will
+disable the routes. ``VIDIOC_SUBDEV_S_ROUTING`` will still return such routes
+back to the user in the routes array, with the ``V4L2_SUBDEV_STREAM_FL_ACTIVE``
+flag unset.
+
+Devices transporting the streams almost always have more configurability with
+respect to routing. Typically any route between the sub-device's sink and source
+pads is possible, and multiple routes (usually up to certain limited number) may
+be active simultaneously. For such devices, no routes are created by the driver
+and user-created routes are fully replaced when ``VIDIOC_SUBDEV_S_ROUTING`` is
+called on the sub-device. Such newly created routes have the device's default
+configuration for format and selection rectangles.
+
Configuring streams
^^^^^^^^^^^^^^^^^^^
diff --git a/Documentation/userspace-api/media/v4l/func-open.rst b/Documentation/userspace-api/media/v4l/func-open.rst
index ba23ff1e45dd..be3808cbf876 100644
--- a/Documentation/userspace-api/media/v4l/func-open.rst
+++ b/Documentation/userspace-api/media/v4l/func-open.rst
@@ -65,8 +65,8 @@ EBUSY
The driver does not support multiple opens and the device is already
in use.
-ENXIO
- No device corresponding to this device special file exists.
+ENODEV
+ Device not found or was removed.
ENOMEM
Not enough kernel memory was available to complete the request.
diff --git a/Documentation/userspace-api/media/v4l/meta-formats.rst b/Documentation/userspace-api/media/v4l/meta-formats.rst
index 0bb61fc5bc00..c23aac823d2c 100644
--- a/Documentation/userspace-api/media/v4l/meta-formats.rst
+++ b/Documentation/userspace-api/media/v4l/meta-formats.rst
@@ -13,9 +13,10 @@ These formats are used for the :ref:`metadata` interface only.
:maxdepth: 1
metafmt-d4xx
+ metafmt-generic
metafmt-intel-ipu3
metafmt-rkisp1
metafmt-uvc
+ metafmt-vivid
metafmt-vsp1-hgo
metafmt-vsp1-hgt
- metafmt-vivid
diff --git a/Documentation/userspace-api/media/v4l/metafmt-generic.rst b/Documentation/userspace-api/media/v4l/metafmt-generic.rst
new file mode 100644
index 000000000000..78ab56b21682
--- /dev/null
+++ b/Documentation/userspace-api/media/v4l/metafmt-generic.rst
@@ -0,0 +1,340 @@
+.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-no-invariants-or-later
+
+********************************************************************************************************************************************************************************************************************************************************************************
+V4L2_META_FMT_GENERIC_8 ('MET8'), V4L2_META_FMT_GENERIC_CSI2_10 ('MC1A'), V4L2_META_FMT_GENERIC_CSI2_12 ('MC1C'), V4L2_META_FMT_GENERIC_CSI2_14 ('MC1E'), V4L2_META_FMT_GENERIC_CSI2_16 ('MC1G'), V4L2_META_FMT_GENERIC_CSI2_20 ('MC1K'), V4L2_META_FMT_GENERIC_CSI2_24 ('MC1O')
+********************************************************************************************************************************************************************************************************************************************************************************
+
+
+Generic line-based metadata formats
+
+
+Description
+===========
+
+These generic line-based metadata formats define the memory layout of the data
+without defining the format or meaning of the metadata itself.
+
+.. _v4l2-meta-fmt-generic-8:
+
+V4L2_META_FMT_GENERIC_8
+-----------------------
+
+The V4L2_META_FMT_GENERIC_8 format is a plain 8-bit metadata format. This format
+is used on CSI-2 for 8 bits per :term:`Data Unit`.
+
+Additionally it is used for 16 bits per Data Unit when two bytes of metadata are
+packed into one 16-bit Data Unit. Otherwise the 16 bits per pixel dataformat is
+:ref:`V4L2_META_FMT_GENERIC_CSI2_16 <v4l2-meta-fmt-generic-csi2-16>`.
+
+**Byte Order Of V4L2_META_FMT_GENERIC_8.**
+Each cell is one byte. "M" denotes a byte of metadata.
+
+.. tabularcolumns:: |p{2.4cm}|p{1.2cm}|p{1.2cm}|p{1.2cm}|p{1.2cm}|
+
+.. flat-table:: Sample 4x2 Metadata Frame
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 12 8 8 8 8
+
+ * - start + 0:
+ - M\ :sub:`00`
+ - M\ :sub:`10`
+ - M\ :sub:`20`
+ - M\ :sub:`30`
+ * - start + 4:
+ - M\ :sub:`01`
+ - M\ :sub:`11`
+ - M\ :sub:`21`
+ - M\ :sub:`31`
+
+.. _v4l2-meta-fmt-generic-csi2-10:
+
+V4L2_META_FMT_GENERIC_CSI2_10
+-----------------------------
+
+V4L2_META_FMT_GENERIC_CSI2_10 contains 8-bit generic metadata packed in 10-bit
+Data Units, with one padding byte after every four bytes of metadata. This
+format is typically used by CSI-2 receivers with a source that transmits
+MEDIA_BUS_FMT_META_10 and the CSI-2 receiver writes the received data to memory
+as-is.
+
+The packing of the data follows the MIPI CSI-2 specification and the padding of
+the data is defined in the MIPI CCS specification.
+
+This format is also used in conjunction with 20 bits per :term:`Data Unit`
+formats that pack two bytes of metadata into one Data Unit. Otherwise the
+20 bits per pixel dataformat is :ref:`V4L2_META_FMT_GENERIC_CSI2_20
+<v4l2-meta-fmt-generic-csi2-20>`.
+
+This format is little endian.
+
+**Byte Order Of V4L2_META_FMT_GENERIC_CSI2_10.**
+Each cell is one byte. "M" denotes a byte of metadata and "x" a byte of padding.
+
+.. tabularcolumns:: |p{2.4cm}|p{1.2cm}|p{1.2cm}|p{1.2cm}|p{1.2cm}|p{.8cm}|
+
+.. flat-table:: Sample 4x2 Metadata Frame
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 12 8 8 8 8 8
+
+ * - start + 0:
+ - M\ :sub:`00`
+ - M\ :sub:`10`
+ - M\ :sub:`20`
+ - M\ :sub:`30`
+ - x
+ * - start + 5:
+ - M\ :sub:`01`
+ - M\ :sub:`11`
+ - M\ :sub:`21`
+ - M\ :sub:`31`
+ - x
+
+.. _v4l2-meta-fmt-generic-csi2-12:
+
+V4L2_META_FMT_GENERIC_CSI2_12
+-----------------------------
+
+V4L2_META_FMT_GENERIC_CSI2_12 contains 8-bit generic metadata packed in 12-bit
+Data Units, with one padding byte after every two bytes of metadata. This format
+is typically used by CSI-2 receivers with a source that transmits
+MEDIA_BUS_FMT_META_12 and the CSI-2 receiver writes the received data to memory
+as-is.
+
+The packing of the data follows the MIPI CSI-2 specification and the padding of
+the data is defined in the MIPI CCS specification.
+
+This format is also used in conjunction with 24 bits per :term:`Data Unit`
+formats that pack two bytes of metadata into one Data Unit. Otherwise the
+24 bits per pixel dataformat is :ref:`V4L2_META_FMT_GENERIC_CSI2_24
+<v4l2-meta-fmt-generic-csi2-24>`.
+
+This format is little endian.
+
+**Byte Order Of V4L2_META_FMT_GENERIC_CSI2_12.**
+Each cell is one byte. "M" denotes a byte of metadata and "x" a byte of padding.
+
+.. tabularcolumns:: |p{2.4cm}|p{1.2cm}|p{1.2cm}|p{1.2cm}|p{1.2cm}|p{.8cm}|p{.8cm}|
+
+.. flat-table:: Sample 4x2 Metadata Frame
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 12 8 8 8 8 8 8
+
+ * - start + 0:
+ - M\ :sub:`00`
+ - M\ :sub:`10`
+ - x
+ - M\ :sub:`20`
+ - M\ :sub:`30`
+ - x
+ * - start + 6:
+ - M\ :sub:`01`
+ - M\ :sub:`11`
+ - x
+ - M\ :sub:`21`
+ - M\ :sub:`31`
+ - x
+
+.. _v4l2-meta-fmt-generic-csi2-14:
+
+V4L2_META_FMT_GENERIC_CSI2_14
+-----------------------------
+
+V4L2_META_FMT_GENERIC_CSI2_14 contains 8-bit generic metadata packed in 14-bit
+Data Units, with three padding bytes after every four bytes of metadata. This
+format is typically used by CSI-2 receivers with a source that transmits
+MEDIA_BUS_FMT_META_14 and the CSI-2 receiver writes the received data to memory
+as-is.
+
+The packing of the data follows the MIPI CSI-2 specification and the padding of
+the data is defined in the MIPI CCS specification.
+
+This format is little endian.
+
+**Byte Order Of V4L2_META_FMT_GENERIC_CSI2_14.**
+Each cell is one byte. "M" denotes a byte of metadata and "x" a byte of padding.
+
+.. tabularcolumns:: |p{2.4cm}|p{1.2cm}|p{1.2cm}|p{1.2cm}|p{1.2cm}|p{.8cm}|p{.8cm}|p{.8cm}|
+
+.. flat-table:: Sample 4x2 Metadata Frame
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 12 8 8 8 8 8 8 8
+
+ * - start + 0:
+ - M\ :sub:`00`
+ - M\ :sub:`10`
+ - M\ :sub:`20`
+ - M\ :sub:`30`
+ - x
+ - x
+ - x
+ * - start + 7:
+ - M\ :sub:`01`
+ - M\ :sub:`11`
+ - M\ :sub:`21`
+ - M\ :sub:`31`
+ - x
+ - x
+ - x
+
+.. _v4l2-meta-fmt-generic-csi2-16:
+
+V4L2_META_FMT_GENERIC_CSI2_16
+-----------------------------
+
+V4L2_META_FMT_GENERIC_CSI2_16 contains 8-bit generic metadata packed in 16-bit
+Data Units, with one padding byte after every byte of metadata. This format is
+typically used by CSI-2 receivers with a source that transmits
+MEDIA_BUS_FMT_META_16 and the CSI-2 receiver writes the received data to memory
+as-is.
+
+The packing of the data follows the MIPI CSI-2 specification and the padding of
+the data is defined in the MIPI CCS specification.
+
+Some devices support more efficient packing of metadata in conjunction with
+16-bit image data. In that case the dataformat is
+:ref:`V4L2_META_FMT_GENERIC_8 <v4l2-meta-fmt-generic-8>`.
+
+This format is little endian.
+
+**Byte Order Of V4L2_META_FMT_GENERIC_CSI2_16.**
+Each cell is one byte. "M" denotes a byte of metadata and "x" a byte of padding.
+
+.. tabularcolumns:: |p{2.4cm}|p{1.2cm}|p{.8cm}|p{1.2cm}|p{.8cm}|p{1.2cm}|p{.8cm}|p{1.2cm}|p{.8cm}|
+
+.. flat-table:: Sample 4x2 Metadata Frame
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 12 8 8 8 8 8 8 8 8
+
+ * - start + 0:
+ - M\ :sub:`00`
+ - x
+ - M\ :sub:`10`
+ - x
+ - M\ :sub:`20`
+ - x
+ - M\ :sub:`30`
+ - x
+ * - start + 8:
+ - M\ :sub:`01`
+ - x
+ - M\ :sub:`11`
+ - x
+ - M\ :sub:`21`
+ - x
+ - M\ :sub:`31`
+ - x
+
+.. _v4l2-meta-fmt-generic-csi2-20:
+
+V4L2_META_FMT_GENERIC_CSI2_20
+-----------------------------
+
+V4L2_META_FMT_GENERIC_CSI2_20 contains 8-bit generic metadata packed in 20-bit
+Data Units, with alternating one or two padding bytes after every byte of
+metadata. This format is typically used by CSI-2 receivers with a source that
+transmits MEDIA_BUS_FMT_META_20 and the CSI-2 receiver writes the received data
+to memory as-is.
+
+The packing of the data follows the MIPI CSI-2 specification and the padding of
+the data is defined in the MIPI CCS specification.
+
+Some devices support more efficient packing of metadata in conjunction with
+16-bit image data. In that case the dataformat is
+:ref:`V4L2_META_FMT_GENERIC_CSI2_10 <v4l2-meta-fmt-generic-csi2-10>`.
+
+This format is little endian.
+
+**Byte Order Of V4L2_META_FMT_GENERIC_CSI2_20.**
+Each cell is one byte. "M" denotes a byte of metadata and "x" a byte of padding.
+
+.. tabularcolumns:: |p{2.4cm}|p{1.2cm}|p{.8cm}|p{1.2cm}|p{.8cm}|p{.8cm}|p{1.2cm}|p{.8cm}|p{1.2cm}|p{.8cm}|p{.8cm}|
+
+.. flat-table:: Sample 4x2 Metadata Frame
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 12 8 8 8 8 8 8 8 8 8 8
+
+ * - start + 0:
+ - M\ :sub:`00`
+ - x
+ - M\ :sub:`10`
+ - x
+ - x
+ - M\ :sub:`20`
+ - x
+ - M\ :sub:`30`
+ - x
+ - x
+ * - start + 10:
+ - M\ :sub:`01`
+ - x
+ - M\ :sub:`11`
+ - x
+ - x
+ - M\ :sub:`21`
+ - x
+ - M\ :sub:`31`
+ - x
+ - x
+
+.. _v4l2-meta-fmt-generic-csi2-24:
+
+V4L2_META_FMT_GENERIC_CSI2_24
+-----------------------------
+
+V4L2_META_FMT_GENERIC_CSI2_24 contains 8-bit generic metadata packed in 24-bit
+Data Units, with two padding bytes after every byte of metadata. This format is
+typically used by CSI-2 receivers with a source that transmits
+MEDIA_BUS_FMT_META_24 and the CSI-2 receiver writes the received data to memory
+as-is.
+
+The packing of the data follows the MIPI CSI-2 specification and the padding of
+the data is defined in the MIPI CCS specification.
+
+Some devices support more efficient packing of metadata in conjunction with
+16-bit image data. In that case the dataformat is
+:ref:`V4L2_META_FMT_GENERIC_CSI2_12 <v4l2-meta-fmt-generic-csi2-12>`.
+
+This format is little endian.
+
+**Byte Order Of V4L2_META_FMT_GENERIC_CSI2_24.**
+Each cell is one byte. "M" denotes a byte of metadata and "x" a byte of padding.
+
+.. tabularcolumns:: |p{2.4cm}|p{1.2cm}|p{.8cm}|p{.8cm}|p{1.2cm}|p{.8cm}|p{.8cm}|p{1.2cm}|p{.8cm}|p{.8cm}|p{1.2cm}|p{.8cm}|p{.8cm}|
+
+.. flat-table:: Sample 4x2 Metadata Frame
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 12 8 8 8 8 8 8 8 8 8 8 8 8
+
+ * - start + 0:
+ - M\ :sub:`00`
+ - x
+ - x
+ - M\ :sub:`10`
+ - x
+ - x
+ - M\ :sub:`20`
+ - x
+ - x
+ - M\ :sub:`30`
+ - x
+ - x
+ * - start + 12:
+ - M\ :sub:`01`
+ - x
+ - x
+ - M\ :sub:`11`
+ - x
+ - x
+ - M\ :sub:`21`
+ - x
+ - x
+ - M\ :sub:`31`
+ - x
+ - x
diff --git a/Documentation/userspace-api/media/v4l/mmap.rst b/Documentation/userspace-api/media/v4l/mmap.rst
index a5672573dd6f..1a48c3cbda17 100644
--- a/Documentation/userspace-api/media/v4l/mmap.rst
+++ b/Documentation/userspace-api/media/v4l/mmap.rst
@@ -188,7 +188,7 @@ Example: Mapping buffers in the multi-planar API
buffers[i].start[j] = mmap(NULL, buffer.m.planes[j].length,
PROT_READ | PROT_WRITE, /* recommended */
MAP_SHARED, /* recommended */
- fd, buffer.m.planes[j].m.offset);
+ fd, buffer.m.planes[j].m.mem_offset);
if (MAP_FAILED == buffers[i].start[j]) {
/* If you do not exit here you should unmap() and free()
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-yuv-luma.rst b/Documentation/userspace-api/media/v4l/pixfmt-yuv-luma.rst
index cf8e4dfbfbd4..b3c5779521d8 100644
--- a/Documentation/userspace-api/media/v4l/pixfmt-yuv-luma.rst
+++ b/Documentation/userspace-api/media/v4l/pixfmt-yuv-luma.rst
@@ -36,6 +36,8 @@ are often referred to as greyscale formats.
- Byte 2
- Byte 3
- Byte 4
+ - Byte 5
+ - Byte 6
* .. _V4L2-PIX-FMT-GREY:
@@ -47,6 +49,8 @@ are often referred to as greyscale formats.
- ...
- ...
- ...
+ - ...
+ - ...
* .. _V4L2-PIX-FMT-IPU3-Y10:
@@ -58,6 +62,8 @@ are often referred to as greyscale formats.
- Y'\ :sub:`2`\ [3:0] Y'\ :sub:`1`\ [9:6]
- Y'\ :sub:`3`\ [1:0] Y'\ :sub:`2`\ [9:4]
- Y'\ :sub:`3`\ [9:2]
+ - ...
+ - ...
* .. _V4L2-PIX-FMT-Y10:
@@ -69,6 +75,8 @@ are often referred to as greyscale formats.
- ...
- ...
- ...
+ - ...
+ - ...
* .. _V4L2-PIX-FMT-Y10BPACK:
@@ -80,6 +88,8 @@ are often referred to as greyscale formats.
- Y'\ :sub:`1`\ [3:0] Y'\ :sub:`2`\ [9:6]
- Y'\ :sub:`2`\ [5:0] Y'\ :sub:`3`\ [9:8]
- Y'\ :sub:`3`\ [7:0]
+ - ...
+ - ...
* .. _V4L2-PIX-FMT-Y10P:
@@ -91,6 +101,8 @@ are often referred to as greyscale formats.
- Y'\ :sub:`2`\ [9:2]
- Y'\ :sub:`3`\ [9:2]
- Y'\ :sub:`3`\ [1:0] Y'\ :sub:`2`\ [1:0] Y'\ :sub:`1`\ [1:0] Y'\ :sub:`0`\ [1:0]
+ - ...
+ - ...
* .. _V4L2-PIX-FMT-Y12:
@@ -102,6 +114,8 @@ are often referred to as greyscale formats.
- ...
- ...
- ...
+ - ...
+ - ...
* .. _V4L2-PIX-FMT-Y012:
@@ -113,6 +127,21 @@ are often referred to as greyscale formats.
- ...
- ...
- ...
+ - ...
+ - ...
+
+ * .. _V4L2-PIX-FMT-Y12P:
+
+ - ``V4L2_PIX_FMT_Y12P``
+ - 'Y12P'
+
+ - Y'\ :sub:`0`\ [11:4]
+ - Y'\ :sub:`1`\ [11:4]
+ - Y'\ :sub:`1`\ [3:0] Y'\ :sub:`0`\ [3:0]
+ - ...
+ - ...
+ - ...
+ - ...
* .. _V4L2-PIX-FMT-Y14:
@@ -124,6 +153,21 @@ are often referred to as greyscale formats.
- ...
- ...
- ...
+ - ...
+ - ...
+
+ * .. _V4L2-PIX-FMT-Y14P:
+
+ - ``V4L2_PIX_FMT_Y14P``
+ - 'Y14P'
+
+ - Y'\ :sub:`0`\ [13:6]
+ - Y'\ :sub:`1`\ [13:6]
+ - Y'\ :sub:`2`\ [13:6]
+ - Y'\ :sub:`3`\ [13:6]
+ - Y'\ :sub:`1`\ [1:0] Y'\ :sub:`0`\ [5:0]
+ - Y'\ :sub:`2`\ [3:0] Y'\ :sub:`1`\ [5:2]
+ - Y'\ :sub:`3`\ [5:0] Y'\ :sub:`2`\ [5:4]
* .. _V4L2-PIX-FMT-Y16:
@@ -135,6 +179,8 @@ are often referred to as greyscale formats.
- ...
- ...
- ...
+ - ...
+ - ...
* .. _V4L2-PIX-FMT-Y16-BE:
@@ -146,6 +192,8 @@ are often referred to as greyscale formats.
- ...
- ...
- ...
+ - ...
+ - ...
.. raw:: latex
diff --git a/Documentation/userspace-api/media/v4l/subdev-formats.rst b/Documentation/userspace-api/media/v4l/subdev-formats.rst
index eb3cd20b0cf2..d2a6cd2e1eb2 100644
--- a/Documentation/userspace-api/media/v4l/subdev-formats.rst
+++ b/Documentation/userspace-api/media/v4l/subdev-formats.rst
@@ -33,7 +33,7 @@ Media Bus Formats
* - __u32
- ``field``
- Field order, from enum :c:type:`v4l2_field`. See
- :ref:`field-order` for details.
+ :ref:`field-order` for details. Zero for metadata mbus codes.
* - __u32
- ``colorspace``
- Image colorspace, from enum :c:type:`v4l2_colorspace`.
@@ -45,7 +45,7 @@ Media Bus Formats
conversion is supported by setting the flag
V4L2_SUBDEV_MBUS_CODE_CSC_COLORSPACE in the corresponding struct
:c:type:`v4l2_subdev_mbus_code_enum` during enumeration.
- See :ref:`v4l2-subdev-mbus-code-flags`.
+ See :ref:`v4l2-subdev-mbus-code-flags`. Zero for metadata mbus codes.
* - union {
- (anonymous)
* - __u16
@@ -61,7 +61,7 @@ Media Bus Formats
that ycbcr_enc conversion is supported by setting the flag
V4L2_SUBDEV_MBUS_CODE_CSC_YCBCR_ENC in the corresponding struct
:c:type:`v4l2_subdev_mbus_code_enum` during enumeration.
- See :ref:`v4l2-subdev-mbus-code-flags`.
+ See :ref:`v4l2-subdev-mbus-code-flags`. Zero for metadata mbus codes.
* - __u16
- ``hsv_enc``
- HSV encoding, from enum :c:type:`v4l2_hsv_encoding`.
@@ -75,7 +75,7 @@ Media Bus Formats
that hsv_enc conversion is supported by setting the flag
V4L2_SUBDEV_MBUS_CODE_CSC_HSV_ENC in the corresponding struct
:c:type:`v4l2_subdev_mbus_code_enum` during enumeration.
- See :ref:`v4l2-subdev-mbus-code-flags`
+ See :ref:`v4l2-subdev-mbus-code-flags`. Zero for metadata mbus codes.
* - }
-
* - __u16
@@ -90,8 +90,8 @@ Media Bus Formats
The driver indicates that quantization conversion is supported by
setting the flag V4L2_SUBDEV_MBUS_CODE_CSC_QUANTIZATION in the
corresponding struct :c:type:`v4l2_subdev_mbus_code_enum`
- during enumeration. See :ref:`v4l2-subdev-mbus-code-flags`.
-
+ during enumeration. See :ref:`v4l2-subdev-mbus-code-flags`. Zero for
+ metadata mbus codes.
* - __u16
- ``xfer_func``
- Transfer function, from enum :c:type:`v4l2_xfer_func`.
@@ -104,7 +104,8 @@ Media Bus Formats
The driver indicates that the transfer function conversion is supported by
setting the flag V4L2_SUBDEV_MBUS_CODE_CSC_XFER_FUNC in the
corresponding struct :c:type:`v4l2_subdev_mbus_code_enum`
- during enumeration. See :ref:`v4l2-subdev-mbus-code-flags`.
+ during enumeration. See :ref:`v4l2-subdev-mbus-code-flags`. Zero for
+ metadata mbus codes.
* - __u16
- ``flags``
- flags See: :ref:v4l2-mbus-framefmt-flags
@@ -8306,3 +8307,257 @@ The following table lists the existing metadata formats.
both sides of the link and the bus format is a fixed
metadata format that is not configurable from userspace.
Width and height will be set to 0 for this format.
+
+Generic Serial Metadata Formats
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Generic serial metadata formats are used on serial buses where the actual data
+content is more or less device specific but the data is transmitted and received
+by multiple devices that do not process the data in any way, simply writing
+it to system memory for processing in software at the end of the pipeline.
+
+"b" in an array cell signifies a byte of data, followed by the number of the bit
+and finally the bit number in subscript. "x" indicates a padding bit.
+
+.. _media-bus-format-generic-meta:
+
+.. cssclass: longtable
+
+.. flat-table:: Generic Serial Metadata Formats
+ :header-rows: 2
+ :stub-columns: 0
+
+ * - Identifier
+ - Code
+ -
+ - :cspan:`23` Data organization within bus :term:`Data Unit`
+ * -
+ -
+ - Bit
+ - 23
+ - 22
+ - 21
+ - 20
+ - 19
+ - 18
+ - 17
+ - 16
+ - 15
+ - 14
+ - 13
+ - 12
+ - 11
+ - 10
+ - 9
+ - 8
+ - 7
+ - 6
+ - 5
+ - 4
+ - 3
+ - 2
+ - 1
+ - 0
+ * .. _MEDIA-BUS-FMT-META-8:
+
+ - MEDIA_BUS_FMT_META_8
+ - 0x8001
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - b0\ :sub:`7`
+ - b0\ :sub:`6`
+ - b0\ :sub:`5`
+ - b0\ :sub:`4`
+ - b0\ :sub:`3`
+ - b0\ :sub:`2`
+ - b0\ :sub:`1`
+ - b0\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-META-10:
+
+ - MEDIA_BUS_FMT_META_10
+ - 0x8002
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - b0\ :sub:`7`
+ - b0\ :sub:`6`
+ - b0\ :sub:`5`
+ - b0\ :sub:`4`
+ - b0\ :sub:`3`
+ - b0\ :sub:`2`
+ - b0\ :sub:`1`
+ - b0\ :sub:`0`
+ - x
+ - x
+ * .. _MEDIA-BUS-FMT-META-12:
+
+ - MEDIA_BUS_FMT_META_12
+ - 0x8003
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - b0\ :sub:`7`
+ - b0\ :sub:`6`
+ - b0\ :sub:`5`
+ - b0\ :sub:`4`
+ - b0\ :sub:`3`
+ - b0\ :sub:`2`
+ - b0\ :sub:`1`
+ - b0\ :sub:`0`
+ - x
+ - x
+ - x
+ - x
+ * .. _MEDIA-BUS-FMT-META-14:
+
+ - MEDIA_BUS_FMT_META_14
+ - 0x8004
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - b0\ :sub:`7`
+ - b0\ :sub:`6`
+ - b0\ :sub:`5`
+ - b0\ :sub:`4`
+ - b0\ :sub:`3`
+ - b0\ :sub:`2`
+ - b0\ :sub:`1`
+ - b0\ :sub:`0`
+ - x
+ - x
+ - x
+ - x
+ - x
+ - x
+ * .. _MEDIA-BUS-FMT-META-16:
+
+ - MEDIA_BUS_FMT_META_16
+ - 0x8005
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - b0\ :sub:`7`
+ - b0\ :sub:`6`
+ - b0\ :sub:`5`
+ - b0\ :sub:`4`
+ - b0\ :sub:`3`
+ - b0\ :sub:`2`
+ - b0\ :sub:`1`
+ - b0\ :sub:`0`
+ - x
+ - x
+ - x
+ - x
+ - x
+ - x
+ - x
+ - x
+ * .. _MEDIA-BUS-FMT-META-20:
+
+ - MEDIA_BUS_FMT_META_20
+ - 0x8006
+ -
+ -
+ -
+ -
+ -
+ - b0\ :sub:`7`
+ - b0\ :sub:`6`
+ - b0\ :sub:`5`
+ - b0\ :sub:`4`
+ - b0\ :sub:`3`
+ - b0\ :sub:`2`
+ - b0\ :sub:`1`
+ - b0\ :sub:`0`
+ - x
+ - x
+ - x
+ - x
+ - x
+ - x
+ - x
+ - x
+ - x
+ - x
+ - x
+ - x
+ * .. _MEDIA-BUS-FMT-META-24:
+
+ - MEDIA_BUS_FMT_META_24
+ - 0x8007
+ -
+ - b0\ :sub:`7`
+ - b0\ :sub:`6`
+ - b0\ :sub:`5`
+ - b0\ :sub:`4`
+ - b0\ :sub:`3`
+ - b0\ :sub:`2`
+ - b0\ :sub:`1`
+ - b0\ :sub:`0`
+ - x
+ - x
+ - x
+ - x
+ - x
+ - x
+ - x
+ - x
+ - x
+ - x
+ - x
+ - x
+ - x
+ - x
+ - x
+ - x
diff --git a/Documentation/userspace-api/media/v4l/user-func.rst b/Documentation/userspace-api/media/v4l/user-func.rst
index 15ff0bf7bbe6..6f661138801c 100644
--- a/Documentation/userspace-api/media/v4l/user-func.rst
+++ b/Documentation/userspace-api/media/v4l/user-func.rst
@@ -62,6 +62,7 @@ Function Reference
vidioc-query-dv-timings
vidioc-querystd
vidioc-reqbufs
+ vidioc-remove-bufs
vidioc-s-hw-freq-seek
vidioc-streamon
vidioc-subdev-enum-frame-interval
diff --git a/Documentation/userspace-api/media/v4l/vidioc-enum-fmt.rst b/Documentation/userspace-api/media/v4l/vidioc-enum-fmt.rst
index 000c154b0f98..3adb3d205531 100644
--- a/Documentation/userspace-api/media/v4l/vidioc-enum-fmt.rst
+++ b/Documentation/userspace-api/media/v4l/vidioc-enum-fmt.rst
@@ -227,6 +227,13 @@ the ``mbus_code`` field is handled differently:
The application can ask to configure the quantization of the capture
device when calling the :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` ioctl with
:ref:`V4L2_PIX_FMT_FLAG_SET_CSC <v4l2-pix-fmt-flag-set-csc>` set.
+ * - ``V4L2_FMT_FLAG_META_LINE_BASED``
+ - 0x0200
+ - The metadata format is line-based. In this case the ``width``,
+ ``height`` and ``bytesperline`` fields of :c:type:`v4l2_meta_format` are
+ valid. The buffer consists of ``height`` lines, each having ``width``
+ Data Units of data and the offset (in bytes) between the beginning of
+ each two consecutive lines is ``bytesperline``.
Return Value
============
diff --git a/Documentation/userspace-api/media/v4l/vidioc-remove-bufs.rst b/Documentation/userspace-api/media/v4l/vidioc-remove-bufs.rst
new file mode 100644
index 000000000000..1995b39af9ba
--- /dev/null
+++ b/Documentation/userspace-api/media/v4l/vidioc-remove-bufs.rst
@@ -0,0 +1,86 @@
+.. SPDX-License-Identifier: GFDL-1.1-no-invariants-or-later
+.. c:namespace:: V4L
+
+.. _VIDIOC_REMOVE_BUFS:
+
+************************
+ioctl VIDIOC_REMOVE_BUFS
+************************
+
+Name
+====
+
+VIDIOC_REMOVE_BUFS - Removes buffers from a queue
+
+Synopsis
+========
+
+.. c:macro:: VIDIOC_REMOVE_BUFS
+
+``int ioctl(int fd, VIDIOC_REMOVE_BUFS, struct v4l2_remove_buffers *argp)``
+
+Arguments
+=========
+
+``fd``
+ File descriptor returned by :c:func:`open()`.
+
+``argp``
+ Pointer to struct :c:type:`v4l2_remove_buffers`.
+
+Description
+===========
+
+Applications can optionally call the :ref:`VIDIOC_REMOVE_BUFS` ioctl to
+remove buffers from a queue.
+:ref:`VIDIOC_CREATE_BUFS` ioctl support is mandatory to enable :ref:`VIDIOC_REMOVE_BUFS`.
+This ioctl is available if the ``V4L2_BUF_CAP_SUPPORTS_REMOVE_BUFS`` capability
+is set on the queue when :c:func:`VIDIOC_REQBUFS` or :c:func:`VIDIOC_CREATE_BUFS`
+are invoked.
+
+.. c:type:: v4l2_remove_buffers
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.5cm}|
+
+.. flat-table:: struct v4l2_remove_buffers
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u32
+ - ``index``
+ - The starting buffer index to remove. This field is ignored if count == 0.
+ * - __u32
+ - ``count``
+ - The number of buffers to be removed with indices 'index' until 'index + count - 1'.
+ All buffers in this range must be valid and in DEQUEUED state.
+ :ref:`VIDIOC_REMOVE_BUFS` will always check the validity of ``type`, if it is
+ invalid it returns ``EINVAL`` error code.
+ If count is set to 0 :ref:`VIDIOC_REMOVE_BUFS` will do nothing and return 0.
+ * - __u32
+ - ``type``
+ - Type of the stream or buffers, this is the same as the struct
+ :c:type:`v4l2_format` ``type`` field. See
+ :c:type:`v4l2_buf_type` for valid values.
+ * - __u32
+ - ``reserved``\ [13]
+ - A place holder for future extensions. Drivers and applications
+ must set the array to zero.
+
+Return Value
+============
+
+On success 0 is returned, on error -1 and the ``errno`` variable is set
+appropriately. The generic error codes are described at the
+:ref:`Generic Error Codes <gen-errors>` chapter. If an error occurs, no
+buffers will be freed and one of the error codes below will be returned:
+
+EBUSY
+ File I/O is in progress.
+ One or more of the buffers in the range ``index`` to ``index + count - 1`` are not
+ in DEQUEUED state.
+
+EINVAL
+ One or more of the buffers in the range ``index`` to ``index + count - 1`` do not
+ exist in the queue.
+ The buffer type (``type`` field) is not valid.
diff --git a/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst b/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst
index 0b3a41a45d05..bbc22dd76032 100644
--- a/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst
+++ b/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst
@@ -121,6 +121,7 @@ aborting or finishing any DMA in progress, an implicit
.. _V4L2-BUF-CAP-SUPPORTS-M2M-HOLD-CAPTURE-BUF:
.. _V4L2-BUF-CAP-SUPPORTS-MMAP-CACHE-HINTS:
.. _V4L2-BUF-CAP-SUPPORTS-MAX-NUM-BUFFERS:
+.. _V4L2-BUF-CAP-SUPPORTS-REMOVE-BUFS:
.. raw:: latex
diff --git a/Documentation/userspace-api/media/v4l/vidioc-subdev-g-crop.rst b/Documentation/userspace-api/media/v4l/vidioc-subdev-g-crop.rst
index 92d933631fda..88a748103a71 100644
--- a/Documentation/userspace-api/media/v4l/vidioc-subdev-g-crop.rst
+++ b/Documentation/userspace-api/media/v4l/vidioc-subdev-g-crop.rst
@@ -37,9 +37,9 @@ Description
.. note::
- This is an :ref:`obsolete` interface and may be removed
- in the future. It is superseded by
- :ref:`the selection API <VIDIOC_SUBDEV_G_SELECTION>`.
+ This is an :ref:`obsolete` interface and may be removed in the future. It is
+ superseded by :ref:`the selection API <VIDIOC_SUBDEV_G_SELECTION>`. No new
+ extensions to the :c:type:`v4l2_subdev_crop` structure will be accepted.
To retrieve the current crop rectangle applications set the ``pad``
field of a struct :c:type:`v4l2_subdev_crop` to the
diff --git a/Documentation/userspace-api/media/v4l/vidioc-subdev-g-routing.rst b/Documentation/userspace-api/media/v4l/vidioc-subdev-g-routing.rst
index 26b5004bfe6d..1cf795480602 100644
--- a/Documentation/userspace-api/media/v4l/vidioc-subdev-g-routing.rst
+++ b/Documentation/userspace-api/media/v4l/vidioc-subdev-g-routing.rst
@@ -43,23 +43,39 @@ The routing configuration determines the flows of data inside an entity.
Drivers report their current routing tables using the
``VIDIOC_SUBDEV_G_ROUTING`` ioctl and application may enable or disable routes
with the ``VIDIOC_SUBDEV_S_ROUTING`` ioctl, by adding or removing routes and
-setting or clearing flags of the ``flags`` field of a
-struct :c:type:`v4l2_subdev_route`.
+setting or clearing flags of the ``flags`` field of a struct
+:c:type:`v4l2_subdev_route`. Similarly to ``VIDIOC_SUBDEV_G_ROUTING``, also
+``VIDIOC_SUBDEV_S_ROUTING`` returns the routes back to the user.
-All stream configurations are reset when ``VIDIOC_SUBDEV_S_ROUTING`` is called. This
-means that the userspace must reconfigure all streams after calling the ioctl
-with e.g. ``VIDIOC_SUBDEV_S_FMT``.
+All stream configurations are reset when ``VIDIOC_SUBDEV_S_ROUTING`` is called.
+This means that the userspace must reconfigure all stream formats and selections
+after calling the ioctl with e.g. ``VIDIOC_SUBDEV_S_FMT``.
Only subdevices which have both sink and source pads can support routing.
-When inspecting routes through ``VIDIOC_SUBDEV_G_ROUTING`` and the application
-provided ``num_routes`` is not big enough to contain all the available routes
-the subdevice exposes, drivers return the ENOSPC error code and adjust the
-value of the ``num_routes`` field. Application should then reserve enough memory
-for all the route entries and call ``VIDIOC_SUBDEV_G_ROUTING`` again.
-
-On a successful ``VIDIOC_SUBDEV_G_ROUTING`` call the driver updates the
-``num_routes`` field to reflect the actual number of routes returned.
+The ``len_routes`` field indicates the number of routes that can fit in the
+``routes`` array allocated by userspace. It is set by applications for both
+ioctls to indicate how many routes the kernel can return, and is never modified
+by the kernel.
+
+The ``num_routes`` field indicates the number of routes in the routing
+table. For ``VIDIOC_SUBDEV_S_ROUTING``, it is set by userspace to the number of
+routes that the application stored in the ``routes`` array. For both ioctls, it
+is returned by the kernel and indicates how many routes are stored in the
+subdevice routing table. This may be smaller or larger than the value of
+``num_routes`` set by the application for ``VIDIOC_SUBDEV_S_ROUTING``, as
+drivers may adjust the requested routing table.
+
+The kernel can return a ``num_routes`` value larger than ``len_routes`` from
+both ioctls. This indicates thare are more routes in the routing table than fits
+the ``routes`` array. In this case, the ``routes`` array is filled by the kernel
+with the first ``len_routes`` entries of the subdevice routing table. This is
+not considered to be an error, and the ioctl call succeeds. If the applications
+wants to retrieve the missing routes, it can issue a new
+``VIDIOC_SUBDEV_G_ROUTING`` call with a large enough ``routes`` array.
+
+``VIDIOC_SUBDEV_S_ROUTING`` may return more routes than the user provided in
+``num_routes`` field due to e.g. hardware properties.
.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
@@ -74,6 +90,9 @@ On a successful ``VIDIOC_SUBDEV_G_ROUTING`` call the driver updates the
- ``which``
- Routing table to be accessed, from enum
:ref:`v4l2_subdev_format_whence <v4l2-subdev-format-whence>`.
+ * - __u32
+ - ``len_routes``
+ - The length of the array (as in memory reserved for the array)
* - struct :c:type:`v4l2_subdev_route`
- ``routes[]``
- Array of struct :c:type:`v4l2_subdev_route` entries
@@ -81,7 +100,7 @@ On a successful ``VIDIOC_SUBDEV_G_ROUTING`` call the driver updates the
- ``num_routes``
- Number of entries of the routes array
* - __u32
- - ``reserved``\ [5]
+ - ``reserved``\ [11]
- Reserved for future extensions. Applications and drivers must set
the array to zero.
@@ -135,10 +154,6 @@ On success 0 is returned, on error -1 and the ``errno`` variable is set
appropriately. The generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
-ENOSPC
- The application provided ``num_routes`` is not big enough to contain
- all the available routes the subdevice exposes.
-
EINVAL
The sink or source pad identifiers reference a non-existing pad or reference
pads of different types (ie. the sink_pad identifiers refers to a source
diff --git a/Documentation/userspace-api/media/videodev2.h.rst.exceptions b/Documentation/userspace-api/media/videodev2.h.rst.exceptions
index 3e58aac4ef0b..bdc628e8c1d6 100644
--- a/Documentation/userspace-api/media/videodev2.h.rst.exceptions
+++ b/Documentation/userspace-api/media/videodev2.h.rst.exceptions
@@ -215,6 +215,7 @@ replace define V4L2_FMT_FLAG_CSC_XFER_FUNC fmtdesc-flags
replace define V4L2_FMT_FLAG_CSC_YCBCR_ENC fmtdesc-flags
replace define V4L2_FMT_FLAG_CSC_HSV_ENC fmtdesc-flags
replace define V4L2_FMT_FLAG_CSC_QUANTIZATION fmtdesc-flags
+replace define V4L2_FMT_FLAG_META_LINE_BASED fmtdesc-flags
# V4L2 timecode types
replace define V4L2_TC_TYPE_24FPS timecode-type
diff --git a/Documentation/userspace-api/netlink/genetlink-legacy.rst b/Documentation/userspace-api/netlink/genetlink-legacy.rst
index 70a77387f6c4..fa005989193a 100644
--- a/Documentation/userspace-api/netlink/genetlink-legacy.rst
+++ b/Documentation/userspace-api/netlink/genetlink-legacy.rst
@@ -46,10 +46,16 @@ For reference the ``multi-attr`` array may look like this::
where ``ARRAY-ATTR`` is the array entry type.
-array-nest
-~~~~~~~~~~
+indexed-array
+~~~~~~~~~~~~~
+
+``indexed-array`` wraps the entire array in an extra attribute (hence
+limiting its size to 64kB). The ``ENTRY`` nests are special and have the
+index of the entry as their type instead of normal attribute type.
-``array-nest`` creates the following structure::
+A ``sub-type`` is needed to describe what type in the ``ENTRY``. A ``nest``
+``sub-type`` means there are nest arrays in the ``ENTRY``, with the structure
+looks like::
[SOME-OTHER-ATTR]
[ARRAY-ATTR]
@@ -60,9 +66,13 @@ array-nest
[MEMBER1]
[MEMBER2]
-It wraps the entire array in an extra attribute (hence limiting its size
-to 64kB). The ``ENTRY`` nests are special and have the index of the entry
-as their type instead of normal attribute type.
+Other ``sub-type`` like ``u32`` means there is only one member as described
+in ``sub-type`` in the ``ENTRY``. The structure looks like::
+
+ [SOME-OTHER-ATTR]
+ [ARRAY-ATTR]
+ [ENTRY u32]
+ [ENTRY u32]
type-value
~~~~~~~~~~
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 0b5a33ee71ee..a71d91978d9e 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -4300,7 +4300,7 @@ operating system that uses the PIT for timing (e.g. Linux 2.4.x).
4.100 KVM_PPC_CONFIGURE_V3_MMU
------------------------------
-:Capability: KVM_CAP_PPC_RADIX_MMU or KVM_CAP_PPC_HASH_MMU_V3
+:Capability: KVM_CAP_PPC_MMU_RADIX or KVM_CAP_PPC_MMU_HASH_V3
:Architectures: ppc
:Type: vm ioctl
:Parameters: struct kvm_ppc_mmuv3_cfg (in)
@@ -4334,7 +4334,7 @@ the Power ISA V3.00, Book III section 5.7.6.1.
4.101 KVM_PPC_GET_RMMU_INFO
---------------------------
-:Capability: KVM_CAP_PPC_RADIX_MMU
+:Capability: KVM_CAP_PPC_MMU_RADIX
:Architectures: ppc
:Type: vm ioctl
:Parameters: struct kvm_ppc_rmmu_info (out)
@@ -6316,7 +6316,7 @@ The "flags" field is reserved for future extensions and must be '0'.
:Architectures: none
:Type: vm ioctl
:Parameters: struct kvm_create_guest_memfd(in)
-:Returns: 0 on success, <0 on error
+:Returns: A file descriptor on success, <0 on error
KVM_CREATE_GUEST_MEMFD creates an anonymous file and returns a file descriptor
that refers to it. guest_memfd files are roughly analogous to files created
@@ -6894,6 +6894,13 @@ Note that KVM does not skip the faulting instruction as it does for
KVM_EXIT_MMIO, but userspace has to emulate any change to the processing state
if it decides to decode and emulate the instruction.
+This feature isn't available to protected VMs, as userspace does not
+have access to the state that is required to perform the emulation.
+Instead, a data abort exception is directly injected in the guest.
+Note that although KVM_CAP_ARM_NISV_TO_USER will be reported if
+queried outside of a protected VM context, the feature will not be
+exposed if queried on a protected VM file descriptor.
+
::
/* KVM_EXIT_X86_RDMSR / KVM_EXIT_X86_WRMSR */
@@ -8095,7 +8102,7 @@ capability via KVM_ENABLE_CAP ioctl on the vcpu fd. Note that this
will disable the use of APIC hardware virtualization even if supported
by the CPU, as it's incompatible with SynIC auto-EOI behavior.
-8.3 KVM_CAP_PPC_RADIX_MMU
+8.3 KVM_CAP_PPC_MMU_RADIX
-------------------------
:Architectures: ppc
@@ -8105,7 +8112,7 @@ available, means that the kernel can support guests using the
radix MMU defined in Power ISA V3.00 (as implemented in the POWER9
processor).
-8.4 KVM_CAP_PPC_HASH_MMU_V3
+8.4 KVM_CAP_PPC_MMU_HASH_V3
---------------------------
:Architectures: ppc
@@ -8819,6 +8826,8 @@ means the VM type with value @n is supported. Possible values of @n are::
#define KVM_X86_DEFAULT_VM 0
#define KVM_X86_SW_PROTECTED_VM 1
+ #define KVM_X86_SEV_VM 2
+ #define KVM_X86_SEV_ES_VM 3
Note, KVM_X86_SW_PROTECTED_VM is currently only for development and testing.
Do not use KVM_X86_SW_PROTECTED_VM for "real" VMs, and especially not in
diff --git a/Documentation/virt/kvm/arm/fw-pseudo-registers.rst b/Documentation/virt/kvm/arm/fw-pseudo-registers.rst
new file mode 100644
index 000000000000..b90fd0b0fa66
--- /dev/null
+++ b/Documentation/virt/kvm/arm/fw-pseudo-registers.rst
@@ -0,0 +1,138 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================================
+ARM firmware pseudo-registers interface
+=======================================
+
+KVM handles the hypercall services as requested by the guests. New hypercall
+services are regularly made available by the ARM specification or by KVM (as
+vendor services) if they make sense from a virtualization point of view.
+
+This means that a guest booted on two different versions of KVM can observe
+two different "firmware" revisions. This could cause issues if a given guest
+is tied to a particular version of a hypercall service, or if a migration
+causes a different version to be exposed out of the blue to an unsuspecting
+guest.
+
+In order to remedy this situation, KVM exposes a set of "firmware
+pseudo-registers" that can be manipulated using the GET/SET_ONE_REG
+interface. These registers can be saved/restored by userspace, and set
+to a convenient value as required.
+
+The following registers are defined:
+
+* KVM_REG_ARM_PSCI_VERSION:
+
+ KVM implements the PSCI (Power State Coordination Interface)
+ specification in order to provide services such as CPU on/off, reset
+ and power-off to the guest.
+
+ - Only valid if the vcpu has the KVM_ARM_VCPU_PSCI_0_2 feature set
+ (and thus has already been initialized)
+ - Returns the current PSCI version on GET_ONE_REG (defaulting to the
+ highest PSCI version implemented by KVM and compatible with v0.2)
+ - Allows any PSCI version implemented by KVM and compatible with
+ v0.2 to be set with SET_ONE_REG
+ - Affects the whole VM (even if the register view is per-vcpu)
+
+* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
+ Holds the state of the firmware support to mitigate CVE-2017-5715, as
+ offered by KVM to the guest via a HVC call. The workaround is described
+ under SMCCC_ARCH_WORKAROUND_1 in [1].
+
+ Accepted values are:
+
+ KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL:
+ KVM does not offer
+ firmware support for the workaround. The mitigation status for the
+ guest is unknown.
+ KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL:
+ The workaround HVC call is
+ available to the guest and required for the mitigation.
+ KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED:
+ The workaround HVC call
+ is available to the guest, but it is not needed on this VCPU.
+
+* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
+ Holds the state of the firmware support to mitigate CVE-2018-3639, as
+ offered by KVM to the guest via a HVC call. The workaround is described
+ under SMCCC_ARCH_WORKAROUND_2 in [1]_.
+
+ Accepted values are:
+
+ KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL:
+ A workaround is not
+ available. KVM does not offer firmware support for the workaround.
+ KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN:
+ The workaround state is
+ unknown. KVM does not offer firmware support for the workaround.
+ KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
+ The workaround is available,
+ and can be disabled by a vCPU. If
+ KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED is set, it is active for
+ this vCPU.
+ KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED:
+ The workaround is always active on this vCPU or it is not needed.
+
+
+Bitmap Feature Firmware Registers
+---------------------------------
+
+Contrary to the above registers, the following registers exposes the
+hypercall services in the form of a feature-bitmap to the userspace. This
+bitmap is translated to the services that are available to the guest.
+There is a register defined per service call owner and can be accessed via
+GET/SET_ONE_REG interface.
+
+By default, these registers are set with the upper limit of the features
+that are supported. This way userspace can discover all the usable
+hypercall services via GET_ONE_REG. The user-space can write-back the
+desired bitmap back via SET_ONE_REG. The features for the registers that
+are untouched, probably because userspace isn't aware of them, will be
+exposed as is to the guest.
+
+Note that KVM will not allow the userspace to configure the registers
+anymore once any of the vCPUs has run at least once. Instead, it will
+return a -EBUSY.
+
+The pseudo-firmware bitmap register are as follows:
+
+* KVM_REG_ARM_STD_BMAP:
+ Controls the bitmap of the ARM Standard Secure Service Calls.
+
+ The following bits are accepted:
+
+ Bit-0: KVM_REG_ARM_STD_BIT_TRNG_V1_0:
+ The bit represents the services offered under v1.0 of ARM True Random
+ Number Generator (TRNG) specification, ARM DEN0098.
+
+* KVM_REG_ARM_STD_HYP_BMAP:
+ Controls the bitmap of the ARM Standard Hypervisor Service Calls.
+
+ The following bits are accepted:
+
+ Bit-0: KVM_REG_ARM_STD_HYP_BIT_PV_TIME:
+ The bit represents the Paravirtualized Time service as represented by
+ ARM DEN0057A.
+
+* KVM_REG_ARM_VENDOR_HYP_BMAP:
+ Controls the bitmap of the Vendor specific Hypervisor Service Calls.
+
+ The following bits are accepted:
+
+ Bit-0: KVM_REG_ARM_VENDOR_HYP_BIT_FUNC_FEAT
+ The bit represents the ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID
+ and ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID function-ids.
+
+ Bit-1: KVM_REG_ARM_VENDOR_HYP_BIT_PTP:
+ The bit represents the Precision Time Protocol KVM service.
+
+Errors:
+
+ ======= =============================================================
+ -ENOENT Unknown register accessed.
+ -EBUSY Attempt a 'write' to the register after the VM has started.
+ -EINVAL Invalid bitmap written to the register.
+ ======= =============================================================
+
+.. [1] https://developer.arm.com/-/media/developer/pdf/ARM_DEN_0070A_Firmware_interfaces_for_mitigating_CVE-2017-5715.pdf
diff --git a/Documentation/virt/kvm/arm/hypercalls.rst b/Documentation/virt/kvm/arm/hypercalls.rst
index 3e23084644ba..17be111f493f 100644
--- a/Documentation/virt/kvm/arm/hypercalls.rst
+++ b/Documentation/virt/kvm/arm/hypercalls.rst
@@ -1,138 +1,46 @@
.. SPDX-License-Identifier: GPL-2.0
-=======================
-ARM Hypercall Interface
-=======================
-
-KVM handles the hypercall services as requested by the guests. New hypercall
-services are regularly made available by the ARM specification or by KVM (as
-vendor services) if they make sense from a virtualization point of view.
-
-This means that a guest booted on two different versions of KVM can observe
-two different "firmware" revisions. This could cause issues if a given guest
-is tied to a particular version of a hypercall service, or if a migration
-causes a different version to be exposed out of the blue to an unsuspecting
-guest.
-
-In order to remedy this situation, KVM exposes a set of "firmware
-pseudo-registers" that can be manipulated using the GET/SET_ONE_REG
-interface. These registers can be saved/restored by userspace, and set
-to a convenient value as required.
-
-The following registers are defined:
-
-* KVM_REG_ARM_PSCI_VERSION:
-
- KVM implements the PSCI (Power State Coordination Interface)
- specification in order to provide services such as CPU on/off, reset
- and power-off to the guest.
-
- - Only valid if the vcpu has the KVM_ARM_VCPU_PSCI_0_2 feature set
- (and thus has already been initialized)
- - Returns the current PSCI version on GET_ONE_REG (defaulting to the
- highest PSCI version implemented by KVM and compatible with v0.2)
- - Allows any PSCI version implemented by KVM and compatible with
- v0.2 to be set with SET_ONE_REG
- - Affects the whole VM (even if the register view is per-vcpu)
-
-* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
- Holds the state of the firmware support to mitigate CVE-2017-5715, as
- offered by KVM to the guest via a HVC call. The workaround is described
- under SMCCC_ARCH_WORKAROUND_1 in [1].
-
- Accepted values are:
-
- KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL:
- KVM does not offer
- firmware support for the workaround. The mitigation status for the
- guest is unknown.
- KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL:
- The workaround HVC call is
- available to the guest and required for the mitigation.
- KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED:
- The workaround HVC call
- is available to the guest, but it is not needed on this VCPU.
-
-* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
- Holds the state of the firmware support to mitigate CVE-2018-3639, as
- offered by KVM to the guest via a HVC call. The workaround is described
- under SMCCC_ARCH_WORKAROUND_2 in [1]_.
-
- Accepted values are:
-
- KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL:
- A workaround is not
- available. KVM does not offer firmware support for the workaround.
- KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN:
- The workaround state is
- unknown. KVM does not offer firmware support for the workaround.
- KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
- The workaround is available,
- and can be disabled by a vCPU. If
- KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED is set, it is active for
- this vCPU.
- KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED:
- The workaround is always active on this vCPU or it is not needed.
-
-
-Bitmap Feature Firmware Registers
----------------------------------
-
-Contrary to the above registers, the following registers exposes the
-hypercall services in the form of a feature-bitmap to the userspace. This
-bitmap is translated to the services that are available to the guest.
-There is a register defined per service call owner and can be accessed via
-GET/SET_ONE_REG interface.
-
-By default, these registers are set with the upper limit of the features
-that are supported. This way userspace can discover all the usable
-hypercall services via GET_ONE_REG. The user-space can write-back the
-desired bitmap back via SET_ONE_REG. The features for the registers that
-are untouched, probably because userspace isn't aware of them, will be
-exposed as is to the guest.
-
-Note that KVM will not allow the userspace to configure the registers
-anymore once any of the vCPUs has run at least once. Instead, it will
-return a -EBUSY.
-
-The pseudo-firmware bitmap register are as follows:
-
-* KVM_REG_ARM_STD_BMAP:
- Controls the bitmap of the ARM Standard Secure Service Calls.
-
- The following bits are accepted:
-
- Bit-0: KVM_REG_ARM_STD_BIT_TRNG_V1_0:
- The bit represents the services offered under v1.0 of ARM True Random
- Number Generator (TRNG) specification, ARM DEN0098.
-
-* KVM_REG_ARM_STD_HYP_BMAP:
- Controls the bitmap of the ARM Standard Hypervisor Service Calls.
-
- The following bits are accepted:
-
- Bit-0: KVM_REG_ARM_STD_HYP_BIT_PV_TIME:
- The bit represents the Paravirtualized Time service as represented by
- ARM DEN0057A.
-
-* KVM_REG_ARM_VENDOR_HYP_BMAP:
- Controls the bitmap of the Vendor specific Hypervisor Service Calls.
-
- The following bits are accepted:
-
- Bit-0: KVM_REG_ARM_VENDOR_HYP_BIT_FUNC_FEAT
- The bit represents the ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID
- and ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID function-ids.
-
- Bit-1: KVM_REG_ARM_VENDOR_HYP_BIT_PTP:
- The bit represents the Precision Time Protocol KVM service.
-
-Errors:
-
- ======= =============================================================
- -ENOENT Unknown register accessed.
- -EBUSY Attempt a 'write' to the register after the VM has started.
- -EINVAL Invalid bitmap written to the register.
- ======= =============================================================
-
-.. [1] https://developer.arm.com/-/media/developer/pdf/ARM_DEN_0070A_Firmware_interfaces_for_mitigating_CVE-2017-5715.pdf
+===============================================
+KVM/arm64-specific hypercalls exposed to guests
+===============================================
+
+This file documents the KVM/arm64-specific hypercalls which may be
+exposed by KVM/arm64 to guest operating systems. These hypercalls are
+issued using the HVC instruction according to version 1.1 of the Arm SMC
+Calling Convention (DEN0028/C):
+
+https://developer.arm.com/docs/den0028/c
+
+All KVM/arm64-specific hypercalls are allocated within the "Vendor
+Specific Hypervisor Service Call" range with a UID of
+``28b46fb6-2ec5-11e9-a9ca-4b564d003a74``. This UID should be queried by the
+guest using the standard "Call UID" function for the service range in
+order to determine that the KVM/arm64-specific hypercalls are available.
+
+``ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID``
+---------------------------------------------
+
+Provides a discovery mechanism for other KVM/arm64 hypercalls.
+
++---------------------+-------------------------------------------------------------+
+| Presence: | Mandatory for the KVM/arm64 UID |
++---------------------+-------------------------------------------------------------+
+| Calling convention: | HVC32 |
++---------------------+----------+--------------------------------------------------+
+| Function ID: | (uint32) | 0x86000000 |
++---------------------+----------+--------------------------------------------------+
+| Arguments: | None |
++---------------------+----------+----+---------------------------------------------+
+| Return Values: | (uint32) | R0 | Bitmap of available function numbers 0-31 |
+| +----------+----+---------------------------------------------+
+| | (uint32) | R1 | Bitmap of available function numbers 32-63 |
+| +----------+----+---------------------------------------------+
+| | (uint32) | R2 | Bitmap of available function numbers 64-95 |
+| +----------+----+---------------------------------------------+
+| | (uint32) | R3 | Bitmap of available function numbers 96-127 |
++---------------------+----------+----+---------------------------------------------+
+
+``ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID``
+----------------------------------------
+
+See ptp_kvm.rst
diff --git a/Documentation/virt/kvm/arm/index.rst b/Documentation/virt/kvm/arm/index.rst
index 7f231c724e16..ec09881de4cf 100644
--- a/Documentation/virt/kvm/arm/index.rst
+++ b/Documentation/virt/kvm/arm/index.rst
@@ -7,6 +7,7 @@ ARM
.. toctree::
:maxdepth: 2
+ fw-pseudo-registers
hyp-abi
hypercalls
pvtime
diff --git a/Documentation/virt/kvm/arm/ptp_kvm.rst b/Documentation/virt/kvm/arm/ptp_kvm.rst
index aecdc80ddcd8..7c0960970a0e 100644
--- a/Documentation/virt/kvm/arm/ptp_kvm.rst
+++ b/Documentation/virt/kvm/arm/ptp_kvm.rst
@@ -7,19 +7,29 @@ PTP_KVM is used for high precision time sync between host and guests.
It relies on transferring the wall clock and counter value from the
host to the guest using a KVM-specific hypercall.
-* ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID: 0x86000001
+``ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID``
+----------------------------------------
-This hypercall uses the SMC32/HVC32 calling convention:
+Retrieve current time information for the specific counter. There are no
+endianness restrictions.
-ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID
- ============== ======== =====================================
- Function ID: (uint32) 0x86000001
- Arguments: (uint32) KVM_PTP_VIRT_COUNTER(0)
- KVM_PTP_PHYS_COUNTER(1)
- Return Values: (int32) NOT_SUPPORTED(-1) on error, or
- (uint32) Upper 32 bits of wall clock time (r0)
- (uint32) Lower 32 bits of wall clock time (r1)
- (uint32) Upper 32 bits of counter (r2)
- (uint32) Lower 32 bits of counter (r3)
- Endianness: No Restrictions.
- ============== ======== =====================================
++---------------------+-------------------------------------------------------+
+| Presence: | Optional |
++---------------------+-------------------------------------------------------+
+| Calling convention: | HVC32 |
++---------------------+----------+--------------------------------------------+
+| Function ID: | (uint32) | 0x86000001 |
++---------------------+----------+----+---------------------------------------+
+| Arguments: | (uint32) | R1 | ``KVM_PTP_VIRT_COUNTER (0)`` |
+| | | +---------------------------------------+
+| | | | ``KVM_PTP_PHYS_COUNTER (1)`` |
++---------------------+----------+----+---------------------------------------+
+| Return Values: | (int32) | R0 | ``NOT_SUPPORTED (-1)`` on error, else |
+| | | | upper 32 bits of wall clock time |
+| +----------+----+---------------------------------------+
+| | (uint32) | R1 | Lower 32 bits of wall clock time |
+| +----------+----+---------------------------------------+
+| | (uint32) | R2 | Upper 32 bits of counter |
+| +----------+----+---------------------------------------+
+| | (uint32) | R3 | Lower 32 bits of counter |
++---------------------+----------+----+---------------------------------------+
diff --git a/Documentation/virt/kvm/x86/amd-memory-encryption.rst b/Documentation/virt/kvm/x86/amd-memory-encryption.rst
index 995780088eb2..9677a0714a39 100644
--- a/Documentation/virt/kvm/x86/amd-memory-encryption.rst
+++ b/Documentation/virt/kvm/x86/amd-memory-encryption.rst
@@ -46,21 +46,16 @@ SEV hardware uses ASIDs to associate a memory encryption key with a VM.
Hence, the ASID for the SEV-enabled guests must be from 1 to a maximum value
defined in the CPUID 0x8000001f[ecx] field.
-SEV Key Management
-==================
+The KVM_MEMORY_ENCRYPT_OP ioctl
+===============================
-The SEV guest key management is handled by a separate processor called the AMD
-Secure Processor (AMD-SP). Firmware running inside the AMD-SP provides a secure
-key management interface to perform common hypervisor activities such as
-encrypting bootstrap code, snapshot, migrating and debugging the guest. For more
-information, see the SEV Key Management spec [api-spec]_
-
-The main ioctl to access SEV is KVM_MEMORY_ENCRYPT_OP. If the argument
-to KVM_MEMORY_ENCRYPT_OP is NULL, the ioctl returns 0 if SEV is enabled
-and ``ENOTTY`` if it is disabled (on some older versions of Linux,
-the ioctl runs normally even with a NULL argument, and therefore will
-likely return ``EFAULT``). If non-NULL, the argument to KVM_MEMORY_ENCRYPT_OP
-must be a struct kvm_sev_cmd::
+The main ioctl to access SEV is KVM_MEMORY_ENCRYPT_OP, which operates on
+the VM file descriptor. If the argument to KVM_MEMORY_ENCRYPT_OP is NULL,
+the ioctl returns 0 if SEV is enabled and ``ENOTTY`` if it is disabled
+(on some older versions of Linux, the ioctl tries to run normally even
+with a NULL argument, and therefore will likely return ``EFAULT`` instead
+of zero if SEV is enabled). If non-NULL, the argument to
+KVM_MEMORY_ENCRYPT_OP must be a struct kvm_sev_cmd::
struct kvm_sev_cmd {
__u32 id;
@@ -81,19 +76,56 @@ are defined in ``<linux/psp-dev.h>``.
KVM implements the following commands to support common lifecycle events of SEV
guests, such as launching, running, snapshotting, migrating and decommissioning.
-1. KVM_SEV_INIT
----------------
+1. KVM_SEV_INIT2
+----------------
-The KVM_SEV_INIT command is used by the hypervisor to initialize the SEV platform
+The KVM_SEV_INIT2 command is used by the hypervisor to initialize the SEV platform
context. In a typical workflow, this command should be the first command issued.
-The firmware can be initialized either by using its own non-volatile storage or
-the OS can manage the NV storage for the firmware using the module parameter
-``init_ex_path``. If the file specified by ``init_ex_path`` does not exist or
-is invalid, the OS will create or override the file with output from PSP.
+For this command to be accepted, either KVM_X86_SEV_VM or KVM_X86_SEV_ES_VM
+must have been passed to the KVM_CREATE_VM ioctl. A virtual machine created
+with those machine types in turn cannot be run until KVM_SEV_INIT2 is invoked.
+
+Parameters: struct kvm_sev_init (in)
Returns: 0 on success, -negative on error
+::
+
+ struct kvm_sev_init {
+ __u64 vmsa_features; /* initial value of features field in VMSA */
+ __u32 flags; /* must be 0 */
+ __u16 ghcb_version; /* maximum guest GHCB version allowed */
+ __u16 pad1;
+ __u32 pad2[8];
+ };
+
+It is an error if the hypervisor does not support any of the bits that
+are set in ``flags`` or ``vmsa_features``. ``vmsa_features`` must be
+0 for SEV virtual machines, as they do not have a VMSA.
+
+``ghcb_version`` must be 0 for SEV virtual machines, as they do not issue GHCB
+requests. If ``ghcb_version`` is 0 for any other guest type, then the maximum
+allowed guest GHCB protocol will default to version 2.
+
+This command replaces the deprecated KVM_SEV_INIT and KVM_SEV_ES_INIT commands.
+The commands did not have any parameters (the ```data``` field was unused) and
+only work for the KVM_X86_DEFAULT_VM machine type (0).
+
+They behave as if:
+
+* the VM type is KVM_X86_SEV_VM for KVM_SEV_INIT, or KVM_X86_SEV_ES_VM for
+ KVM_SEV_ES_INIT
+
+* the ``flags`` and ``vmsa_features`` fields of ``struct kvm_sev_init`` are
+ set to zero, and ``ghcb_version`` is set to 0 for KVM_SEV_INIT and 1 for
+ KVM_SEV_ES_INIT.
+
+If the ``KVM_X86_SEV_VMSA_FEATURES`` attribute does not exist, the hypervisor only
+supports KVM_SEV_INIT and KVM_SEV_ES_INIT. In that case, note that KVM_SEV_ES_INIT
+might set the debug swap VMSA feature (bit 5) depending on the value of the
+``debug_swap`` parameter of ``kvm-amd.ko``.
+
2. KVM_SEV_LAUNCH_START
-----------------------
@@ -434,6 +466,33 @@ issued by the hypervisor to make the guest ready for execution.
Returns: 0 on success, -negative on error
+Device attribute API
+====================
+
+Attributes of the SEV implementation can be retrieved through the
+``KVM_HAS_DEVICE_ATTR`` and ``KVM_GET_DEVICE_ATTR`` ioctls on the ``/dev/kvm``
+device node, using group ``KVM_X86_GRP_SEV``.
+
+Currently only one attribute is implemented:
+
+* ``KVM_X86_SEV_VMSA_FEATURES``: return the set of all bits that
+ are accepted in the ``vmsa_features`` of ``KVM_SEV_INIT2``.
+
+Firmware Management
+===================
+
+The SEV guest key management is handled by a separate processor called the AMD
+Secure Processor (AMD-SP). Firmware running inside the AMD-SP provides a secure
+key management interface to perform common hypervisor activities such as
+encrypting bootstrap code, snapshot, migrating and debugging the guest. For more
+information, see the SEV Key Management spec [api-spec]_
+
+The AMD-SP firmware can be initialized either by using its own non-volatile
+storage or the OS can manage the NV storage for the firmware using
+parameter ``init_ex_path`` of the ``ccp`` module. If the file specified
+by ``init_ex_path`` does not exist or is invalid, the OS will create or
+override the file with PSP non-volatile storage.
+
References
==========
diff --git a/Documentation/virt/kvm/x86/msr.rst b/Documentation/virt/kvm/x86/msr.rst
index 9315fc385fb0..3aecf2a70e7b 100644
--- a/Documentation/virt/kvm/x86/msr.rst
+++ b/Documentation/virt/kvm/x86/msr.rst
@@ -193,8 +193,8 @@ data:
Asynchronous page fault (APF) control MSR.
Bits 63-6 hold 64-byte aligned physical address of a 64 byte memory area
- which must be in guest RAM and must be zeroed. This memory is expected
- to hold a copy of the following structure::
+ which must be in guest RAM. This memory is expected to hold the
+ following structure::
struct kvm_vcpu_pv_apf_data {
/* Used for 'page not present' events delivered via #PF */
@@ -204,7 +204,6 @@ data:
__u32 token;
__u8 pad[56];
- __u32 enabled;
};
Bits 5-4 of the MSR are reserved and should be zero. Bit 0 is set to 1
@@ -232,14 +231,14 @@ data:
as regular page fault, guest must reset 'flags' to '0' before it does
something that can generate normal page fault.
- Bytes 5-7 of 64 byte memory location ('token') will be written to by the
+ Bytes 4-7 of 64 byte memory location ('token') will be written to by the
hypervisor at the time of APF 'page ready' event injection. The content
- of these bytes is a token which was previously delivered as 'page not
- present' event. The event indicates the page in now available. Guest is
- supposed to write '0' to 'token' when it is done handling 'page ready'
- event and to write 1' to MSR_KVM_ASYNC_PF_ACK after clearing the location;
- writing to the MSR forces KVM to re-scan its queue and deliver the next
- pending notification.
+ of these bytes is a token which was previously delivered in CR2 as
+ 'page not present' event. The event indicates the page is now available.
+ Guest is supposed to write '0' to 'token' when it is done handling
+ 'page ready' event and to write '1' to MSR_KVM_ASYNC_PF_ACK after
+ clearing the location; writing to the MSR forces KVM to re-scan its
+ queue and deliver the next pending notification.
Note, MSR_KVM_ASYNC_PF_INT MSR specifying the interrupt vector for 'page
ready' APF delivery needs to be written to before enabling APF mechanism
diff --git a/Documentation/wmi/devices/msi-wmi-platform.rst b/Documentation/wmi/devices/msi-wmi-platform.rst
new file mode 100644
index 000000000000..29b1b2e6d42c
--- /dev/null
+++ b/Documentation/wmi/devices/msi-wmi-platform.rst
@@ -0,0 +1,194 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+===================================================
+MSI WMI Platform Features driver (msi-wmi-platform)
+===================================================
+
+Introduction
+============
+
+Many MSI notebooks support various features like reading fan sensors. This features are controlled
+by the embedded controller, with the ACPI firmware exposing a standard ACPI WMI interface on top
+of the embedded controller interface.
+
+WMI interface description
+=========================
+
+The WMI interface description can be decoded from the embedded binary MOF (bmof)
+data using the `bmfdec <https://github.com/pali/bmfdec>`_ utility:
+
+::
+
+ [WMI, Locale("MS\0x409"),
+ Description("This class contains the definition of the package used in other classes"),
+ guid("{ABBC0F60-8EA1-11d1-00A0-C90629100000}")]
+ class Package {
+ [WmiDataId(1), read, write, Description("16 bytes of data")] uint8 Bytes[16];
+ };
+
+ [WMI, Locale("MS\0x409"),
+ Description("This class contains the definition of the package used in other classes"),
+ guid("{ABBC0F63-8EA1-11d1-00A0-C90629100000}")]
+ class Package_32 {
+ [WmiDataId(1), read, write, Description("32 bytes of data")] uint8 Bytes[32];
+ };
+
+ [WMI, Dynamic, Provider("WmiProv"), Locale("MS\0x409"),
+ Description("Class used to operate methods on a package"),
+ guid("{ABBC0F6E-8EA1-11d1-00A0-C90629100000}")]
+ class MSI_ACPI {
+ [key, read] string InstanceName;
+ [read] boolean Active;
+
+ [WmiMethodId(1), Implemented, read, write, Description("Return the contents of a package")]
+ void GetPackage([out, id(0)] Package Data);
+
+ [WmiMethodId(2), Implemented, read, write, Description("Set the contents of a package")]
+ void SetPackage([in, id(0)] Package Data);
+
+ [WmiMethodId(3), Implemented, read, write, Description("Return the contents of a package")]
+ void Get_EC([out, id(0)] Package_32 Data);
+
+ [WmiMethodId(4), Implemented, read, write, Description("Set the contents of a package")]
+ void Set_EC([in, id(0)] Package_32 Data);
+
+ [WmiMethodId(5), Implemented, read, write, Description("Return the contents of a package")]
+ void Get_BIOS([in, out, id(0)] Package_32 Data);
+
+ [WmiMethodId(6), Implemented, read, write, Description("Set the contents of a package")]
+ void Set_BIOS([in, out, id(0)] Package_32 Data);
+
+ [WmiMethodId(7), Implemented, read, write, Description("Return the contents of a package")]
+ void Get_SMBUS([in, out, id(0)] Package_32 Data);
+
+ [WmiMethodId(8), Implemented, read, write, Description("Set the contents of a package")]
+ void Set_SMBUS([in, out, id(0)] Package_32 Data);
+
+ [WmiMethodId(9), Implemented, read, write, Description("Return the contents of a package")]
+ void Get_MasterBattery([in, out, id(0)] Package_32 Data);
+
+ [WmiMethodId(10), Implemented, read, write, Description("Set the contents of a package")]
+ void Set_MasterBattery([in, out, id(0)] Package_32 Data);
+
+ [WmiMethodId(11), Implemented, read, write, Description("Return the contents of a package")]
+ void Get_SlaveBattery([in, out, id(0)] Package_32 Data);
+
+ [WmiMethodId(12), Implemented, read, write, Description("Set the contents of a package")]
+ void Set_SlaveBattery([in, out, id(0)] Package_32 Data);
+
+ [WmiMethodId(13), Implemented, read, write, Description("Return the contents of a package")]
+ void Get_Temperature([in, out, id(0)] Package_32 Data);
+
+ [WmiMethodId(14), Implemented, read, write, Description("Set the contents of a package")]
+ void Set_Temperature([in, out, id(0)] Package_32 Data);
+
+ [WmiMethodId(15), Implemented, read, write, Description("Return the contents of a package")]
+ void Get_Thermal([in, out, id(0)] Package_32 Data);
+
+ [WmiMethodId(16), Implemented, read, write, Description("Set the contents of a package")]
+ void Set_Thermal([in, out, id(0)] Package_32 Data);
+
+ [WmiMethodId(17), Implemented, read, write, Description("Return the contents of a package")]
+ void Get_Fan([in, out, id(0)] Package_32 Data);
+
+ [WmiMethodId(18), Implemented, read, write, Description("Set the contents of a package")]
+ void Set_Fan([in, out, id(0)] Package_32 Data);
+
+ [WmiMethodId(19), Implemented, read, write, Description("Return the contents of a package")]
+ void Get_Device([in, out, id(0)] Package_32 Data);
+
+ [WmiMethodId(20), Implemented, read, write, Description("Set the contents of a package")]
+ void Set_Device([in, out, id(0)] Package_32 Data);
+
+ [WmiMethodId(21), Implemented, read, write, Description("Return the contents of a package")]
+ void Get_Power([in, out, id(0)] Package_32 Data);
+
+ [WmiMethodId(22), Implemented, read, write, Description("Set the contents of a package")]
+ void Set_Power([in, out, id(0)] Package_32 Data);
+
+ [WmiMethodId(23), Implemented, read, write, Description("Return the contents of a package")]
+ void Get_Debug([in, out, id(0)] Package_32 Data);
+
+ [WmiMethodId(24), Implemented, read, write, Description("Set the contents of a package")]
+ void Set_Debug([in, out, id(0)] Package_32 Data);
+
+ [WmiMethodId(25), Implemented, read, write, Description("Return the contents of a package")]
+ void Get_AP([in, out, id(0)] Package_32 Data);
+
+ [WmiMethodId(26), Implemented, read, write, Description("Set the contents of a package")]
+ void Set_AP([in, out, id(0)] Package_32 Data);
+
+ [WmiMethodId(27), Implemented, read, write, Description("Return the contents of a package")]
+ void Get_Data([in, out, id(0)] Package_32 Data);
+
+ [WmiMethodId(28), Implemented, read, write, Description("Set the contents of a package")]
+ void Set_Data([in, out, id(0)] Package_32 Data);
+
+ [WmiMethodId(29), Implemented, read, write, Description("Return the contents of a package")]
+ void Get_WMI([out, id(0)] Package_32 Data);
+ };
+
+Due to a peculiarity in how Windows handles the ``CreateByteField()`` ACPI operator (errors only
+happen when a invalid byte field is ultimately accessed), all methods require a 32 byte input
+buffer, even if the Binay MOF says otherwise.
+
+The input buffer contains a single byte to select the subfeature to be accessed and 31 bytes of
+input data, the meaning of which depends on the subfeature being accessed.
+
+The output buffer contains a singe byte which signals success or failure (``0x00`` on failure)
+and 31 bytes of output data, the meaning if which depends on the subfeature being accessed.
+
+WMI method Get_EC()
+-------------------
+
+Returns embedded controller information, the selected subfeature does not matter. The output
+data contains a flag byte and a 28 byte controller firmware version string.
+
+The first 4 bits of the flag byte contain the minor version of the embedded controller interface,
+with the next 2 bits containing the major version of the embedded controller interface.
+
+The 7th bit signals if the embedded controller page chaged (exact meaning is unknown), and the
+last bit signals if the platform is a Tigerlake platform.
+
+The MSI software seems to only use this interface when the last bit is set.
+
+WMI method Get_Fan()
+--------------------
+
+Fan speed sensors can be accessed by selecting subfeature ``0x00``. The output data contains
+up to four 16-bit fan speed readings in big-endian format. Most machines do not support all
+four fan speed sensors, so the remaining reading are hardcoded to ``0x0000``.
+
+The fan RPM readings can be calculated with the following formula:
+
+ RPM = 480000 / <fan speed reading>
+
+If the fan speed reading is zero, then the fan RPM is zero too.
+
+WMI method Get_WMI()
+--------------------
+
+Returns the version of the ACPI WMI interface, the selected subfeature does not matter.
+The output data contains two bytes, the first one contains the major version and the last one
+contains the minor revision of the ACPI WMI interface.
+
+The MSI software seems to only use this interface when the major version is greater than two.
+
+Reverse-Engineering the MSI WMI Platform interface
+==================================================
+
+.. warning:: Randomly poking the embedded controller interface can potentially cause damage
+ to the machine and other unwanted side effects, please be careful.
+
+The underlying embedded controller interface is used by the ``msi-ec`` driver, and it seems
+that many methods just copy a part of the embedded controller memory into the output buffer.
+
+This means that the remaining WMI methods can be reverse-engineered by looking which part of
+the embedded controller memory is accessed by the ACPI AML code. The driver also supports a
+debugfs interface for directly executing WMI methods. Additionally, any safety checks regarding
+unsupported hardware can be disabled by loading the module with ``force=true``.
+
+More information about the MSI embedded controller interface can be found at the
+`msi-ec project <https://github.com/BeardOverflow/msi-ec>`_.
+
+Special thanks go to github user `glpnk` for showing how to decode the fan speed readings.
diff --git a/Documentation/wmi/driver-development-guide.rst b/Documentation/wmi/driver-development-guide.rst
new file mode 100644
index 000000000000..429137b2f632
--- /dev/null
+++ b/Documentation/wmi/driver-development-guide.rst
@@ -0,0 +1,178 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+============================
+WMI driver development guide
+============================
+
+The WMI subsystem provides a rich driver API for implementing WMI drivers,
+documented at Documentation/driver-api/wmi.rst. This document will serve
+as an introductory guide for WMI driver writers using this API. It is supposed
+to be a successor to the original LWN article [1]_ which deals with WMI drivers
+using the deprecated GUID-based WMI interface.
+
+Obtaining WMI device information
+--------------------------------
+
+Before developing an WMI driver, information about the WMI device in question
+must be obtained. The `lswmi <https://pypi.org/project/lswmi>`_ utility can be
+used to extract detailed WMI device information using the following command:
+
+::
+
+ lswmi -V
+
+The resulting output will contain information about all WMI devices available on
+a given machine, plus some extra information.
+
+In order to find out more about the interface used to communicate with a WMI device,
+the `bmfdec <https://github.com/pali/bmfdec>`_ utilities can be used to decode
+the Binary MOF (Managed Object Format) information used to describe WMI devices.
+The ``wmi-bmof`` driver exposes this information to userspace, see
+Documentation/wmi/devices/wmi-bmof.rst.
+
+In order to retrieve the decoded Binary MOF information, use the following command (requires root):
+
+::
+
+ ./bmf2mof /sys/bus/wmi/devices/05901221-D566-11D1-B2F0-00A0C9062910[-X]/bmof
+
+Sometimes, looking at the disassembled ACPI tables used to describe the WMI device
+helps in understanding how the WMI device is supposed to work. The path of the ACPI
+method associated with a given WMI device can be retrieved using the ``lswmi`` utility
+as mentioned above.
+
+Basic WMI driver structure
+--------------------------
+
+The basic WMI driver is build around the struct wmi_driver, which is then bound
+to matching WMI devices using a struct wmi_device_id table:
+
+::
+
+ static const struct wmi_device_id foo_id_table[] = {
+ { "936DA01F-9ABD-4D9D-80C7-02AF85C822A8", NULL },
+ { }
+ };
+ MODULE_DEVICE_TABLE(wmi, foo_id_table);
+
+ static struct wmi_driver foo_driver = {
+ .driver = {
+ .name = "foo",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS, /* recommended */
+ .pm = pm_sleep_ptr(&foo_dev_pm_ops), /* optional */
+ },
+ .id_table = foo_id_table,
+ .probe = foo_probe,
+ .remove = foo_remove, /* optional, devres is preferred */
+ .notify = foo_notify, /* optional, for event handling */
+ .no_notify_data = true, /* optional, enables events containing no additional data */
+ .no_singleton = true, /* required for new WMI drivers */
+ };
+ module_wmi_driver(foo_driver);
+
+The probe() callback is called when the WMI driver is bound to a matching WMI device. Allocating
+driver-specific data structures and initialising interfaces to other kernel subsystems should
+normally be done in this function.
+
+The remove() callback is then called when the WMI driver is unbound from a WMI device. In order
+to unregister interfaces to other kernel subsystems and release resources, devres should be used.
+This simplifies error handling during probe and often allows to omit this callback entirely, see
+Documentation/driver-api/driver-model/devres.rst for details.
+
+Please note that new WMI drivers are required to be able to be instantiated multiple times,
+and are forbidden from using any deprecated GUID-based WMI functions. This means that the
+WMI driver should be prepared for the scenario that multiple matching WMI devices are present
+on a given machine.
+
+Because of this, WMI drivers should use the state container design pattern as described in
+Documentation/driver-api/driver-model/design-patterns.rst.
+
+WMI method drivers
+------------------
+
+WMI drivers can call WMI device methods using wmidev_evaluate_method(), the
+structure of the ACPI buffer passed to this function is device-specific and usually
+needs some tinkering to get right. Looking at the ACPI tables containing the WMI
+device usually helps here. The method id and instance number passed to this function
+are also device-specific, looking at the decoded Binary MOF is usually enough to
+find the right values.
+
+The maximum instance number can be retrieved during runtime using wmidev_instance_count().
+
+Take a look at drivers/platform/x86/inspur_platform_profile.c for an example WMI method driver.
+
+WMI data block drivers
+----------------------
+
+WMI drivers can query WMI device data blocks using wmidev_block_query(), the
+structure of the returned ACPI object is again device-specific. Some WMI devices
+also allow for setting data blocks using wmidev_block_set().
+
+The maximum instance number can also be retrieved using wmidev_instance_count().
+
+Take a look at drivers/platform/x86/intel/wmi/sbl-fw-update.c for an example
+WMI data block driver.
+
+WMI event drivers
+-----------------
+
+WMI drivers can receive WMI events via the notify() callback inside the struct wmi_driver.
+The WMI subsystem will then take care of setting up the WMI event accordingly. Please note that
+the structure of the ACPI object passed to this callback is device-specific, and freeing the
+ACPI object is being done by the WMI subsystem, not the driver.
+
+The WMI driver core will take care that the notify() callback will only be called after
+the probe() callback has been called, and that no events are being received by the driver
+right before and after calling its remove() callback.
+
+However WMI driver developers should be aware that multiple WMI events can be received concurrently,
+so any locking (if necessary) needs to be provided by the WMI driver itself.
+
+In order to be able to receive WMI events containing no additional event data,
+the ``no_notify_data`` flag inside struct wmi_driver should be set to ``true``.
+
+Take a look at drivers/platform/x86/xiaomi-wmi.c for an example WMI event driver.
+
+Handling multiple WMI devices at once
+-------------------------------------
+
+There are many cases of firmware vendors using multiple WMI devices to control different aspects
+of a single physical device. This can make developing WMI drivers complicated, as those drivers
+might need to communicate with each other to present a unified interface to userspace.
+
+On such case involves a WMI event device which needs to talk to a WMI data block device or WMI
+method device upon receiving an WMI event. In such a case, two WMI drivers should be developed,
+one for the WMI event device and one for the other WMI device.
+
+The WMI event device driver has only one purpose: to receive WMI events, validate any additional
+event data and invoke a notifier chain. The other WMI driver adds itself to this notifier chain
+during probing and thus gets notified every time a WMI event is received. This WMI driver might
+then process the event further for example by using an input device.
+
+For other WMI device constellations, similar mechanisms can be used.
+
+Things to avoid
+---------------
+
+When developing WMI drivers, there are a couple of things which should be avoided:
+
+- usage of the deprecated GUID-based WMI interface which uses GUIDs instead of WMI device structs
+- bypassing of the WMI subsystem when talking to WMI devices
+- WMI drivers which cannot be instantiated multiple times.
+
+Many older WMI drivers violate one or more points from this list. The reason for
+this is that the WMI subsystem evolved significantly over the last two decades,
+so there is a lot of legacy cruft inside older WMI drivers.
+
+New WMI drivers are also required to conform to the linux kernel coding style as specified in
+Documentation/process/coding-style.rst. The checkpatch utility can catch many common coding style
+violations, you can invoke it with the following command:
+
+::
+
+ ./scripts/checkpatch.pl --strict <path to driver file>
+
+References
+==========
+
+.. [1] https://lwn.net/Articles/391230/
diff --git a/Documentation/wmi/index.rst b/Documentation/wmi/index.rst
index 537cff188e14..fec4b6ae97b3 100644
--- a/Documentation/wmi/index.rst
+++ b/Documentation/wmi/index.rst
@@ -8,6 +8,7 @@ WMI Subsystem
:maxdepth: 1
acpi-interface
+ driver-development-guide
devices/index
.. only:: subproject and html
diff --git a/MAINTAINERS b/MAINTAINERS
index aa3b947fb080..ede5faaad494 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -258,6 +258,12 @@ L: linux-acenic@sunsite.dk
S: Maintained
F: drivers/net/ethernet/alteon/acenic*
+ACER ASPIRE 1 EMBEDDED CONTROLLER DRIVER
+M: Nikita Travkin <nikita@trvn.ru>
+S: Maintained
+F: Documentation/devicetree/bindings/platform/acer,aspire1-ec.yaml
+F: drivers/platform/arm64/acer-aspire1-ec.c
+
ACER ASPIRE ONE TEMPERATURE AND FAN DRIVER
M: Peter Kaestle <peter@piie.net>
L: platform-driver-x86@vger.kernel.org
@@ -354,6 +360,12 @@ B: https://bugzilla.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
F: drivers/acpi/pmic/
+ACPI QUICKSTART DRIVER
+M: Armin Wolf <W_Armin@gmx.de>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: drivers/platform/x86/quickstart.c
+
ACPI SERIAL MULTI INSTANTIATE DRIVER
M: Hans de Goede <hdegoede@redhat.com>
L: platform-driver-x86@vger.kernel.org
@@ -479,6 +491,13 @@ L: linux-wireless@vger.kernel.org
S: Orphan
F: drivers/net/wireless/admtek/adm8211.*
+ADP1050 HARDWARE MONITOR DRIVER
+M: Radu Sabau <radu.sabau@analog.com>
+L: linux-hwmon@vger.kernel.org
+S: Supported
+W: https://ez.analog.com/linux-software-drivers
+F: Documentation/devicetree/bindings/hwmon/pmbus/adi,adp1050.yaml
+
ADP1653 FLASH CONTROLLER DRIVER
M: Sakari Ailus <sakari.ailus@iki.fi>
L: linux-media@vger.kernel.org
@@ -553,7 +572,7 @@ F: Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml
F: drivers/input/misc/adxl34x.c
ADXL355 THREE-AXIS DIGITAL ACCELEROMETER DRIVER
-M: Puranjay Mohan <puranjay12@gmail.com>
+M: Puranjay Mohan <puranjay@kernel.org>
L: linux-iio@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/iio/accel/adi,adxl355.yaml
@@ -653,6 +672,15 @@ S: Supported
F: fs/aio.c
F: include/linux/*aio*.h
+AIROHA SPI SNFI DRIVER
+M: Lorenzo Bianconi <lorenzo@kernel.org>
+M: Ray Liu <ray.liu@airoha.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: linux-spi@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/spi/airoha,en7581-snand.yaml
+F: drivers/spi/spi-airoha-snfi.c
+
AIRSPY MEDIA DRIVER
L: linux-media@vger.kernel.org
S: Orphan
@@ -993,7 +1021,7 @@ F: drivers/video/fbdev/geode/
AMD HSMP DRIVER
M: Naveen Krishna Chatradhi <naveenkrishna.chatradhi@amd.com>
-R: Carlos Bilbao <carlos.bilbao@amd.com>
+R: Carlos Bilbao <carlos.bilbao.osdev@gmail.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
F: Documentation/arch/x86/amd_hsmp.rst
@@ -1062,6 +1090,9 @@ F: drivers/gpu/drm/amd/pm/
AMD PSTATE DRIVER
M: Huang Rui <ray.huang@amd.com>
+M: Gautham R. Shenoy <gautham.shenoy@amd.com>
+M: Mario Limonciello <mario.limonciello@amd.com>
+R: Perry Yuan <perry.yuan@amd.com>
L: linux-pm@vger.kernel.org
S: Supported
F: Documentation/admin-guide/pm/amd-pstate.rst
@@ -1671,7 +1702,7 @@ F: drivers/soc/versatile/
ARM KOMEDA DRM-KMS DRIVER
M: Liviu Dudau <liviu.dudau@arm.com>
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/arm,komeda.yaml
F: Documentation/gpu/komeda-kms.rst
F: drivers/gpu/drm/arm/display/include/
@@ -1683,15 +1714,26 @@ M: Rob Herring <robh@kernel.org>
R: Steven Price <steven.price@arm.com>
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/gpu/panfrost.rst
F: drivers/gpu/drm/panfrost/
F: include/uapi/drm/panfrost_drm.h
+ARM MALI PANTHOR DRM DRIVER
+M: Boris Brezillon <boris.brezillon@collabora.com>
+M: Steven Price <steven.price@arm.com>
+M: Liviu Dudau <liviu.dudau@arm.com>
+L: dri-devel@lists.freedesktop.org
+S: Supported
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
+F: Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml
+F: drivers/gpu/drm/panthor/
+F: include/uapi/drm/panthor_drm.h
+
ARM MALI-DP DRM DRIVER
M: Liviu Dudau <liviu.dudau@arm.com>
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/arm,malidp.yaml
F: Documentation/gpu/afbc.rst
F: drivers/gpu/drm/arm/
@@ -2191,7 +2233,6 @@ N: mxs
ARM/FREESCALE LAYERSCAPE ARM ARCHITECTURE
M: Shawn Guo <shawnguo@kernel.org>
-M: Li Yang <leoyang.li@nxp.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
@@ -2321,7 +2362,7 @@ M: Vladimir Zapolskiy <vz@mleia.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://github.com/vzapolskiy/linux-lpc32xx.git
-F: Documentation/devicetree/bindings/i2c/i2c-pnx.txt
+F: Documentation/devicetree/bindings/i2c/nxp,pnx-i2c.yaml
F: arch/arm/boot/dts/nxp/lpc/lpc32*
F: arch/arm/mach-lpc32xx/
F: drivers/i2c/busses/i2c-pnx.c
@@ -2586,12 +2627,8 @@ F: arch/arm64/boot/dts/qcom/sc7180*
F: arch/arm64/boot/dts/qcom/sc7280*
F: arch/arm64/boot/dts/qcom/sdm845-cheza*
-ARM/QUALCOMM SUPPORT
-M: Bjorn Andersson <andersson@kernel.org>
-M: Konrad Dybcio <konrad.dybcio@linaro.org>
+ARM/QUALCOMM MAILING LIST
L: linux-arm-msm@vger.kernel.org
-S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux.git
F: Documentation/devicetree/bindings/*/qcom*
F: Documentation/devicetree/bindings/soc/qcom/
F: arch/arm/boot/dts/qcom/
@@ -2628,6 +2665,33 @@ F: include/dt-bindings/*/qcom*
F: include/linux/*/qcom*
F: include/linux/soc/qcom/
+ARM/QUALCOMM SUPPORT
+M: Bjorn Andersson <andersson@kernel.org>
+M: Konrad Dybcio <konrad.dybcio@linaro.org>
+L: linux-arm-msm@vger.kernel.org
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux.git
+F: Documentation/devicetree/bindings/arm/qcom-soc.yaml
+F: Documentation/devicetree/bindings/arm/qcom.yaml
+F: Documentation/devicetree/bindings/bus/qcom*
+F: Documentation/devicetree/bindings/cache/qcom,llcc.yaml
+F: Documentation/devicetree/bindings/firmware/qcom,scm.yaml
+F: Documentation/devicetree/bindings/reserved-memory/qcom
+F: Documentation/devicetree/bindings/soc/qcom/
+F: arch/arm/boot/dts/qcom/
+F: arch/arm/configs/qcom_defconfig
+F: arch/arm/mach-qcom/
+F: arch/arm64/boot/dts/qcom/
+F: drivers/bus/qcom*
+F: drivers/firmware/qcom/
+F: drivers/soc/qcom/
+F: include/dt-bindings/arm/qcom,ids.h
+F: include/dt-bindings/firmware/qcom,scm.h
+F: include/dt-bindings/soc/qcom*
+F: include/linux/firmware/qcom
+F: include/linux/soc/qcom/
+F: include/soc/qcom/
+
ARM/RDA MICRO ARCHITECTURE
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2708,7 +2772,7 @@ F: sound/soc/rockchip/
N: rockchip
ARM/SAMSUNG S3C, S5P AND EXYNOS ARM ARCHITECTURES
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
R: Alim Akhtar <alim.akhtar@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org
@@ -3018,7 +3082,7 @@ S: Orphan
F: Documentation/devicetree/bindings/i2c/i2c-wmt.txt
F: arch/arm/mach-vt8500/
F: drivers/clocksource/timer-vt8500.c
-F: drivers/i2c/busses/i2c-wmt.c
+F: drivers/i2c/busses/i2c-viai2c-wmt.c
F: drivers/mmc/host/wmt-sdmmc.c
F: drivers/pwm/pwm-vt8500.c
F: drivers/rtc/rtc-vt8500.c
@@ -3051,6 +3115,23 @@ F: drivers/mmc/host/sdhci-of-arasan.c
N: zynq
N: xilinx
+ARM64 FIT SUPPORT
+M: Simon Glass <sjg@chromium.org>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: arch/arm64/boot/Makefile
+F: scripts/make_fit.py
+
+ARM64 PLATFORM DRIVERS
+M: Hans de Goede <hdegoede@redhat.com>
+M: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+R: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+Q: https://patchwork.kernel.org/project/platform-driver-x86/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/pdx86/platform-drivers-x86.git
+F: drivers/platform/arm64/
+
ARM64 PORT (AARCH64 ARCHITECTURE)
M: Catalin Marinas <catalin.marinas@arm.com>
M: Will Deacon <will@kernel.org>
@@ -3573,6 +3654,7 @@ S: Supported
C: irc://irc.oftc.net/bcache
T: git https://evilpiepirate.org/git/bcachefs.git
F: fs/bcachefs/
+F: Documentation/filesystems/bcachefs/
BDISP ST MEDIA DRIVER
M: Fabien Dessenne <fabien.dessenne@foss.st.com>
@@ -3611,6 +3693,11 @@ F: Documentation/filesystems/bfs.rst
F: fs/bfs/
F: include/uapi/linux/bfs_fs.h
+BINMAN
+M: Simon Glass <sjg@chromium.org>
+S: Supported
+F: Documentation/devicetree/bindings/mtd/partitions/binman*
+
BITMAP API
M: Yury Norov <yury.norov@gmail.com>
R: Rasmus Villemoes <linux@rasmusvillemoes.dk>
@@ -3638,6 +3725,20 @@ F: tools/include/vdso/bits.h
F: tools/lib/bitmap.c
F: tools/lib/find_bit.c
+BITOPS API
+M: Yury Norov <yury.norov@gmail.com>
+R: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+S: Maintained
+F: arch/*/include/asm/bitops.h
+F: arch/*/include/asm/bitops_32.h
+F: arch/*/include/asm/bitops_64.h
+F: arch/*/lib/bitops.c
+F: include/asm-generic/bitops
+F: include/asm-generic/bitops.h
+F: include/linux/bitops.h
+F: lib/test_bitops.c
+F: tools/*/bitops*
+
BLINKM RGB LED DRIVER
M: Jan-Simon Moeller <jansimon.moeller@gmx.de>
S: Maintained
@@ -3712,9 +3813,15 @@ S: Maintained
F: Documentation/devicetree/bindings/iio/imu/bosch,bmi323.yaml
F: drivers/iio/imu/bmi323/
+BPF JIT for ARC
+M: Shahab Vahedi <shahab@synopsys.com>
+L: bpf@vger.kernel.org
+S: Maintained
+F: arch/arc/net/
+
BPF JIT for ARM
M: Russell King <linux@armlinux.org.uk>
-M: Puranjay Mohan <puranjay12@gmail.com>
+M: Puranjay Mohan <puranjay@kernel.org>
L: bpf@vger.kernel.org
S: Maintained
F: arch/arm/net/
@@ -3722,7 +3829,7 @@ F: arch/arm/net/
BPF JIT for ARM64
M: Daniel Borkmann <daniel@iogearbox.net>
M: Alexei Starovoitov <ast@kernel.org>
-M: Zi Shen Lim <zlim.lnx@gmail.com>
+M: Puranjay Mohan <puranjay@kernel.org>
L: bpf@vger.kernel.org
S: Supported
F: arch/arm64/net/
@@ -3764,6 +3871,8 @@ X: arch/riscv/net/bpf_jit_comp64.c
BPF JIT for RISC-V (64-bit)
M: Björn Töpel <bjorn@kernel.org>
+R: Pu Lehui <pulehui@huawei.com>
+R: Puranjay Mohan <puranjay@kernel.org>
L: bpf@vger.kernel.org
S: Maintained
F: arch/riscv/net/
@@ -3822,6 +3931,14 @@ F: kernel/bpf/tnum.c
F: kernel/bpf/trampoline.c
F: kernel/bpf/verifier.c
+BPF [CRYPTO]
+M: Vadim Fedorenko <vadim.fedorenko@linux.dev>
+L: bpf@vger.kernel.org
+S: Maintained
+F: crypto/bpf_crypto_skcipher.c
+F: include/linux/bpf_crypto.h
+F: kernel/bpf/crypto.c
+
BPF [DOCUMENTATION] (Related to Standardization)
R: David Vernet <void@manifault.com>
L: bpf@vger.kernel.org
@@ -3942,8 +4059,7 @@ F: kernel/bpf/ringbuf.c
BPF [SECURITY & LSM] (Security Audit and Enforcement using BPF)
M: KP Singh <kpsingh@kernel.org>
-R: Florent Revest <revest@chromium.org>
-R: Brendan Jackman <jackmanb@chromium.org>
+R: Matt Bobrowski <mattbobrowski@google.com>
L: bpf@vger.kernel.org
S: Maintained
F: Documentation/bpf/prog_lsm.rst
@@ -3968,7 +4084,7 @@ F: kernel/bpf/bpf_lru*
F: kernel/bpf/cgroup.c
BPF [TOOLING] (bpftool)
-M: Quentin Monnet <quentin@isovalent.com>
+M: Quentin Monnet <qmo@kernel.org>
L: bpf@vger.kernel.org
S: Maintained
F: kernel/bpf/disasm.*
@@ -4035,6 +4151,13 @@ N: bcm113*
N: bcm216*
N: kona
+BROADCOM BCM2835 CAMERA DRIVERS
+M: Raspberry Pi Kernel Maintenance <kernel-list@raspberrypi.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/media/brcm,bcm2835-unicam.yaml
+F: drivers/media/platform/broadcom/bcm2835-unicam*
+
BROADCOM BCM47XX MIPS ARCHITECTURE
M: Hauke Mehrtens <hauke@hauke-m.de>
M: Rafał Miłecki <zajec5@gmail.com>
@@ -4192,7 +4315,6 @@ S: Supported
F: drivers/scsi/bnx2i/
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
-M: Ariel Elior <aelior@marvell.com>
M: Sudarsana Kalluru <skalluru@marvell.com>
M: Manish Chopra <manishc@marvell.com>
L: netdev@vger.kernel.org
@@ -4870,7 +4992,6 @@ F: drivers/power/supply/cw2015_battery.c
CEPH COMMON CODE (LIBCEPH)
M: Ilya Dryomov <idryomov@gmail.com>
M: Xiubo Li <xiubli@redhat.com>
-R: Jeff Layton <jlayton@kernel.org>
L: ceph-devel@vger.kernel.org
S: Supported
W: http://ceph.com/
@@ -4882,7 +5003,6 @@ F: net/ceph/
CEPH DISTRIBUTED FILE SYSTEM CLIENT (CEPH)
M: Xiubo Li <xiubli@redhat.com>
M: Ilya Dryomov <idryomov@gmail.com>
-R: Jeff Layton <jlayton@kernel.org>
L: ceph-devel@vger.kernel.org
S: Supported
W: http://ceph.com/
@@ -5207,7 +5327,6 @@ F: lib/closure.c
CMPC ACPI DRIVER
M: Thadeu Lima de Souza Cascardo <cascardo@holoscopio.com>
-M: Daniel Oliveira Nascimento <don@syst.com.br>
L: platform-driver-x86@vger.kernel.org
S: Supported
F: drivers/platform/x86/classmate-laptop.c
@@ -5255,6 +5374,14 @@ S: Supported
F: Documentation/process/code-of-conduct-interpretation.rst
F: Documentation/process/code-of-conduct.rst
+CODE TAGGING
+M: Suren Baghdasaryan <surenb@google.com>
+M: Kent Overstreet <kent.overstreet@linux.dev>
+S: Maintained
+F: include/asm-generic/codetag.lds.h
+F: include/linux/codetag.h
+F: lib/codetag.c
+
COMEDI DRIVERS
M: Ian Abbott <abbotti@mev.co.uk>
M: H Hartley Sweeten <hsweeten@visionengravers.com>
@@ -5335,7 +5462,7 @@ M: Dan Williams <dan.j.williams@intel.com>
L: linux-cxl@vger.kernel.org
S: Maintained
F: drivers/cxl/
-F: include/linux/cxl-einj.h
+F: include/linux/einj-cxl.h
F: include/linux/cxl-event.h
F: include/uapi/linux/cxl_mem.h
F: tools/testing/cxl/
@@ -5355,7 +5482,7 @@ F: drivers/usb/atm/cxacru.c
CONFIDENTIAL COMPUTING THREAT MODEL FOR X86 VIRTUALIZATION (SNP/TDX)
M: Elena Reshetova <elena.reshetova@intel.com>
-M: Carlos Bilbao <carlos.bilbao@amd.com>
+M: Carlos Bilbao <carlos.bilbao.osdev@gmail.com>
S: Maintained
F: Documentation/security/snp-tdx-threat-model.rst
@@ -5558,7 +5685,7 @@ F: drivers/cpuidle/cpuidle-big_little.c
CPUIDLE DRIVER - ARM EXYNOS
M: Daniel Lezcano <daniel.lezcano@linaro.org>
M: Kukjin Kim <kgene@kernel.org>
-R: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+R: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-pm@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
S: Maintained
@@ -5579,6 +5706,7 @@ M: Ulf Hansson <ulf.hansson@linaro.org>
L: linux-pm@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/linux-pm.git
F: drivers/cpuidle/cpuidle-psci-domain.c
F: drivers/cpuidle/cpuidle-psci.h
@@ -5586,6 +5714,7 @@ CPUIDLE DRIVER - DT IDLE PM DOMAIN
M: Ulf Hansson <ulf.hansson@linaro.org>
L: linux-pm@vger.kernel.org
S: Supported
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/linux-pm.git
F: drivers/cpuidle/dt_idle_genpd.c
F: drivers/cpuidle/dt_idle_genpd.h
@@ -5711,7 +5840,7 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
F: drivers/media/dvb-frontends/cxd2820r*
CXGB3 ETHERNET DRIVER (CXGB3)
-M: Raju Rangoju <rajur@chelsio.com>
+M: Potnuri Bharat Teja <bharat@chelsio.com>
L: netdev@vger.kernel.org
S: Supported
W: http://www.chelsio.com
@@ -5732,7 +5861,7 @@ W: http://www.chelsio.com
F: drivers/crypto/chelsio
CXGB4 ETHERNET DRIVER (CXGB4)
-M: Raju Rangoju <rajur@chelsio.com>
+M: Potnuri Bharat Teja <bharat@chelsio.com>
L: netdev@vger.kernel.org
S: Supported
W: http://www.chelsio.com
@@ -5761,7 +5890,7 @@ F: drivers/infiniband/hw/cxgb4/
F: include/uapi/rdma/cxgb4-abi.h
CXGB4VF ETHERNET DRIVER (CXGB4VF)
-M: Raju Rangoju <rajur@chelsio.com>
+M: Potnuri Bharat Teja <bharat@chelsio.com>
L: netdev@vger.kernel.org
S: Supported
W: http://www.chelsio.com
@@ -5781,10 +5910,9 @@ F: include/uapi/misc/cxl.h
CXLFLASH (IBM Coherent Accelerator Processor Interface CAPI Flash) SCSI DRIVER
M: Manoj N. Kumar <manoj@linux.ibm.com>
-M: Matthew R. Ochs <mrochs@linux.ibm.com>
M: Uma Krishnan <ukrishn@linux.ibm.com>
L: linux-scsi@vger.kernel.org
-S: Supported
+S: Obsolete
F: Documentation/arch/powerpc/cxlflash.rst
F: drivers/scsi/cxlflash/
F: include/uapi/scsi/cxlflash_ioctl.h
@@ -6067,7 +6195,6 @@ F: drivers/mtd/nand/raw/denali*
DESIGNWARE EDMA CORE IP DRIVER
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
-R: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
R: Serge Semin <fancer.lancer@gmail.com>
L: dmaengine@vger.kernel.org
S: Maintained
@@ -6157,7 +6284,6 @@ DEVICE-MAPPER (LVM)
M: Alasdair Kergon <agk@redhat.com>
M: Mike Snitzer <snitzer@kernel.org>
M: Mikulas Patocka <mpatocka@redhat.com>
-M: dm-devel@lists.linux.dev
L: dm-devel@lists.linux.dev
S: Maintained
Q: http://patchwork.kernel.org/project/dm-devel/list/
@@ -6173,7 +6299,6 @@ F: include/uapi/linux/dm-*.h
DEVICE-MAPPER VDO TARGET
M: Matthew Sakai <msakai@redhat.com>
-M: dm-devel@lists.linux.dev
L: dm-devel@lists.linux.dev
S: Maintained
F: Documentation/admin-guide/device-mapper/vdo*.rst
@@ -6315,7 +6440,7 @@ L: linux-media@vger.kernel.org
L: dri-devel@lists.freedesktop.org
L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers)
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/driver-api/dma-buf.rst
F: Documentation/userspace-api/dma-buf-alloc-exchange.rst
F: drivers/dma-buf/
@@ -6369,7 +6494,7 @@ L: linux-media@vger.kernel.org
L: dri-devel@lists.freedesktop.org
L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers)
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/dma-buf/dma-heap.c
F: drivers/dma-buf/heaps/*
F: include/linux/dma-heap.h
@@ -6406,6 +6531,7 @@ S: Maintained
P: Documentation/doc-guide/maintainer-profile.rst
T: git git://git.lwn.net/linux.git docs-next
F: Documentation/
+F: scripts/check-variable-fonts.sh
F: scripts/documentation-file-ref-check
F: scripts/kernel-doc
F: scripts/sphinx-pre-install
@@ -6578,7 +6704,7 @@ M: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
M: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/accel/ivpu/
F: include/uapi/drm/ivpu_accel.h
@@ -6598,18 +6724,18 @@ M: Chen-Yu Tsai <wens@csie.org>
R: Jernej Skrabec <jernej.skrabec@gmail.com>
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/sun4i/sun8i*
DRM DRIVER FOR ARM PL111 CLCD
S: Orphan
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/pl111/
DRM DRIVER FOR ARM VERSATILE TFT PANELS
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.yaml
F: drivers/gpu/drm/panel/panel-arm-versatile.c
@@ -6617,7 +6743,7 @@ DRM DRIVER FOR ASPEED BMC GFX
M: Joel Stanley <joel@jms.id.au>
L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers)
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/gpu/aspeed-gfx.txt
F: drivers/gpu/drm/aspeed/
@@ -6627,14 +6753,14 @@ R: Thomas Zimmermann <tzimmermann@suse.de>
R: Jocelyn Falempe <jfalempe@redhat.com>
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/ast/
DRM DRIVER FOR BOCHS VIRTUAL GPU
M: Gerd Hoffmann <kraxel@redhat.com>
L: virtualization@lists.linux.dev
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/tiny/bochs.c
DRM DRIVER FOR BOE HIMAX8279D PANELS
@@ -6652,14 +6778,14 @@ F: drivers/gpu/drm/bridge/chipone-icn6211.c
DRM DRIVER FOR EBBG FT8719 PANEL
M: Joel Selvaraj <jo@jsfamily.in>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/ebbg,ft8719.yaml
F: drivers/gpu/drm/panel/panel-ebbg-ft8719.c
DRM DRIVER FOR FARADAY TVE200 TV ENCODER
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/tve200/
DRM DRIVER FOR FEIXIN K101 IM2BA02 MIPI-DSI LCD PANELS
@@ -6679,7 +6805,7 @@ M: Thomas Zimmermann <tzimmermann@suse.de>
M: Javier Martinez Canillas <javierm@redhat.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/drm_aperture.c
F: drivers/gpu/drm/tiny/ofdrm.c
F: drivers/gpu/drm/tiny/simpledrm.c
@@ -6698,27 +6824,27 @@ DRM DRIVER FOR GENERIC USB DISPLAY
M: Noralf Trønnes <noralf@tronnes.org>
S: Maintained
W: https://github.com/notro/gud/wiki
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/gud/
F: include/drm/gud.h
DRM DRIVER FOR GRAIN MEDIA GM12U320 PROJECTORS
M: Hans de Goede <hdegoede@redhat.com>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/tiny/gm12u320.c
DRM DRIVER FOR HIMAX HX8394 MIPI-DSI LCD panels
M: Ondrej Jirman <megi@xff.cz>
M: Javier Martinez Canillas <javierm@redhat.com>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
F: drivers/gpu/drm/panel/panel-himax-hx8394.c
DRM DRIVER FOR HX8357D PANELS
S: Orphan
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/himax,hx8357d.txt
F: drivers/gpu/drm/tiny/hx8357d.c
@@ -6727,20 +6853,20 @@ M: Deepak Rawat <drawat.floss@gmail.com>
L: linux-hyperv@vger.kernel.org
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/hyperv
DRM DRIVER FOR ILITEK ILI9225 PANELS
M: David Lechner <david@lechnology.com>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/ilitek,ili9225.txt
F: drivers/gpu/drm/tiny/ili9225.c
DRM DRIVER FOR ILITEK ILI9486 PANELS
M: Kamlesh Gurudasani <kamlesh.gurudasani@gmail.com>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/ilitek,ili9486.yaml
F: drivers/gpu/drm/tiny/ili9486.c
@@ -6756,17 +6882,25 @@ S: Maintained
F: Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml
F: drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+DRM DRIVER FOR LG SW43408 PANELS
+M: Sumit Semwal <sumit.semwal@linaro.org>
+M: Caleb Connolly <caleb.connolly@linaro.org>
+S: Maintained
+T: git git://anongit.freedesktop.org/drm/drm-misc
+F: Documentation/devicetree/bindings/display/panel/lg,sw43408.yaml
+F: drivers/gpu/drm/panel/panel-lg-sw43408.c
+
DRM DRIVER FOR LOGICVC DISPLAY CONTROLLER
M: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/logicvc/
DRM DRIVER FOR LVDS PANELS
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/lvds.yaml
F: Documentation/devicetree/bindings/display/panel/panel-lvds.yaml
F: drivers/gpu/drm/panel/panel-lvds.c
@@ -6784,13 +6918,13 @@ R: Thomas Zimmermann <tzimmermann@suse.de>
R: Jocelyn Falempe <jfalempe@redhat.com>
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/mgag200/
DRM DRIVER FOR MI0283QT
M: Noralf Trønnes <noralf@tronnes.org>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/multi-inno,mi0283qt.txt
F: drivers/gpu/drm/tiny/mi0283qt.c
@@ -6798,11 +6932,29 @@ DRM DRIVER FOR MIPI DBI compatible panels
M: Noralf Trønnes <noralf@tronnes.org>
S: Maintained
W: https://github.com/notro/panel-mipi-dbi/wiki
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/panel-mipi-dbi-spi.yaml
F: drivers/gpu/drm/tiny/panel-mipi-dbi.c
-DRM DRIVER FOR MSM ADRENO GPU
+DRM DRIVER for Qualcomm Adreno GPUs
+M: Rob Clark <robdclark@gmail.com>
+R: Sean Paul <sean@poorly.run>
+R: Konrad Dybcio <konrad.dybcio@linaro.org>
+L: linux-arm-msm@vger.kernel.org
+L: dri-devel@lists.freedesktop.org
+L: freedreno@lists.freedesktop.org
+S: Maintained
+B: https://gitlab.freedesktop.org/drm/msm/-/issues
+T: git https://gitlab.freedesktop.org/drm/msm.git
+F: Documentation/devicetree/bindings/display/msm/gpu.yaml
+F: drivers/gpu/drm/msm/adreno/
+F: drivers/gpu/drm/msm/msm_gpu.*
+F: drivers/gpu/drm/msm/msm_gpu_devfreq.*
+F: drivers/gpu/drm/msm/msm_ringbuffer.*
+F: drivers/gpu/drm/msm/registers/adreno/
+F: include/uapi/drm/msm_drm.h
+
+DRM DRIVER for Qualcomm display hardware
M: Rob Clark <robdclark@gmail.com>
M: Abhinav Kumar <quic_abhinavk@quicinc.com>
M: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
@@ -6822,28 +6974,28 @@ F: include/uapi/drm/msm_drm.h
DRM DRIVER FOR NOVATEK NT35510 PANELS
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt35510.c
DRM DRIVER FOR NOVATEK NT35560 PANELS
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/sony,acx424akp.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt35560.c
DRM DRIVER FOR NOVATEK NT36523 PANELS
M: Jianhua Lu <lujianhua000@gmail.com>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/novatek,nt36523.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt36523.c
DRM DRIVER FOR NOVATEK NT36672A PANELS
M: Sumit Semwal <sumit.semwal@linaro.org>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt36672a.c
@@ -6877,7 +7029,7 @@ F: drivers/gpu/drm/bridge/parade-ps8640.c
DRM DRIVER FOR PERVASIVE DISPLAYS REPAPER PANELS
M: Noralf Trønnes <noralf@tronnes.org>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/repaper.txt
F: drivers/gpu/drm/tiny/repaper.c
@@ -6887,7 +7039,7 @@ M: Gerd Hoffmann <kraxel@redhat.com>
L: virtualization@lists.linux.dev
S: Obsolete
W: https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/tiny/cirrus.c
DRM DRIVER FOR QXL VIRTUAL GPU
@@ -6896,7 +7048,7 @@ M: Gerd Hoffmann <kraxel@redhat.com>
L: virtualization@lists.linux.dev
L: spice-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/qxl/
F: include/uapi/drm/qxl_drm.h
@@ -6909,7 +7061,7 @@ F: drivers/gpu/drm/panel/panel-raydium-rm67191.c
DRM DRIVER FOR SAMSUNG DB7430 PANELS
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml
F: drivers/gpu/drm/panel/panel-samsung-db7430.c
@@ -6918,7 +7070,7 @@ M: Inki Dae <inki.dae@samsung.com>
M: Jagan Teki <jagan@amarulasolutions.com>
M: Marek Szyprowski <m.szyprowski@samsung.com>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/bridge/samsung,mipi-dsim.yaml
F: drivers/gpu/drm/bridge/samsung-dsim.c
F: include/drm/bridge/samsung-dsim.h
@@ -6938,7 +7090,7 @@ F: drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
DRM DRIVER FOR SITRONIX ST7586 PANELS
M: David Lechner <david@lechnology.com>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/sitronix,st7586.txt
F: drivers/gpu/drm/tiny/st7586.c
@@ -6959,14 +7111,14 @@ F: drivers/gpu/drm/panel/panel-sitronix-st7703.c
DRM DRIVER FOR SITRONIX ST7735R PANELS
M: David Lechner <david@lechnology.com>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/sitronix,st7735r.yaml
F: drivers/gpu/drm/tiny/st7735r.c
DRM DRIVER FOR SOLOMON SSD130X OLED DISPLAYS
M: Javier Martinez Canillas <javierm@redhat.com>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/solomon,ssd-common.yaml
F: Documentation/devicetree/bindings/display/solomon,ssd13*.yaml
F: drivers/gpu/drm/solomon/ssd130x*
@@ -6974,7 +7126,7 @@ F: drivers/gpu/drm/solomon/ssd130x*
DRM DRIVER FOR ST-ERICSSON MCDE
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/ste,mcde.yaml
F: drivers/gpu/drm/mcde/
@@ -6998,7 +7150,7 @@ F: drivers/gpu/drm/bridge/ti-sn65dsi86.c
DRM DRIVER FOR TPO TPG110 PANELS
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/tpo,tpg110.yaml
F: drivers/gpu/drm/panel/panel-tpo-tpg110.c
@@ -7008,7 +7160,7 @@ R: Sean Paul <sean@poorly.run>
R: Thomas Zimmermann <tzimmermann@suse.de>
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/udl/
DRM DRIVER FOR VIRTUAL KERNEL MODESETTING (VKMS)
@@ -7019,7 +7171,7 @@ R: Haneen Mohammed <hamohammed.sa@gmail.com>
R: Daniel Vetter <daniel@ffwll.ch>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/gpu/vkms.rst
F: drivers/gpu/drm/vkms/
@@ -7027,7 +7179,7 @@ DRM DRIVER FOR VIRTUALBOX VIRTUAL GPU
M: Hans de Goede <hdegoede@redhat.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/vboxvideo/
DRM DRIVER FOR VMWARE VIRTUAL GPU
@@ -7035,14 +7187,14 @@ M: Zack Rusin <zack.rusin@broadcom.com>
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/vmwgfx/
F: include/uapi/drm/vmwgfx_drm.h
DRM DRIVER FOR WIDECHIPS WS2401 PANELS
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/samsung,lms380kf01.yaml
F: drivers/gpu/drm/panel/panel-widechips-ws2401.c
@@ -7067,8 +7219,8 @@ M: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
M: Maxime Ripard <mripard@kernel.org>
M: Thomas Zimmermann <tzimmermann@suse.de>
S: Maintained
-W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html
-T: git git://anongit.freedesktop.org/drm/drm-misc
+W: https://drm.pages.freedesktop.org/maintainer-tools/drm-misc.html
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/
F: Documentation/devicetree/bindings/gpu/
F: Documentation/gpu/
@@ -7095,7 +7247,7 @@ M: Maxime Ripard <mripard@kernel.org>
M: Chen-Yu Tsai <wens@csie.org>
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/allwinner*
F: drivers/gpu/drm/sun4i/
@@ -7105,7 +7257,7 @@ L: dri-devel@lists.freedesktop.org
L: linux-amlogic@lists.infradead.org
S: Supported
W: http://linux-meson.com/
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
F: Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
F: Documentation/gpu/meson.rst
@@ -7117,7 +7269,7 @@ M: Sam Ravnborg <sam@ravnborg.org>
M: Boris Brezillon <bbrezillon@kernel.org>
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/atmel/
F: drivers/gpu/drm/atmel-hlcdc/
@@ -7129,7 +7281,7 @@ R: Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
R: Jonas Karlman <jonas@kwiboo.se>
R: Jernej Skrabec <jernej.skrabec@gmail.com>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/bridge/
F: drivers/gpu/drm/bridge/
F: drivers/gpu/drm/drm_bridge.c
@@ -7154,7 +7306,7 @@ M: Stefan Agner <stefan@agner.ch>
M: Alison Wang <alison.wang@nxp.com>
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/fsl,dcu.txt
F: Documentation/devicetree/bindings/display/fsl,tcon.txt
F: drivers/gpu/drm/fsl-dcu/
@@ -7163,7 +7315,7 @@ DRM DRIVERS FOR FREESCALE IMX 5/6
M: Philipp Zabel <p.zabel@pengutronix.de>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
T: git git://git.pengutronix.de/git/pza/linux
F: Documentation/devicetree/bindings/display/imx/
F: drivers/gpu/drm/imx/ipuv3/
@@ -7183,7 +7335,7 @@ DRM DRIVERS FOR GMA500 (Poulsbo, Moorestown and derivative chipsets)
M: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/gma500/
DRM DRIVERS FOR HISILICON
@@ -7195,7 +7347,7 @@ R: Yongqin Liu <yongqin.liu@linaro.org>
R: John Stultz <jstultz@google.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/hisilicon/
F: drivers/gpu/drm/hisilicon/
@@ -7204,7 +7356,7 @@ M: Qiang Yu <yuq825@gmail.com>
L: dri-devel@lists.freedesktop.org
L: lima@lists.freedesktop.org (moderated for non-subscribers)
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/lima/
F: include/uapi/drm/lima_drm.h
@@ -7212,7 +7364,7 @@ DRM DRIVERS FOR LOONGSON
M: Sui Jingfeng <suijingfeng@loongson.cn>
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/loongson/
DRM DRIVERS FOR MEDIATEK
@@ -7260,7 +7412,7 @@ M: Biju Das <biju.das.jz@bp.renesas.com>
L: dri-devel@lists.freedesktop.org
L: linux-renesas-soc@vger.kernel.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml
F: drivers/gpu/drm/renesas/rz-du/
@@ -7270,7 +7422,7 @@ M: Geert Uytterhoeven <geert+renesas@glider.be>
L: dri-devel@lists.freedesktop.org
L: linux-renesas-soc@vger.kernel.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/renesas,shmobile-lcdc.yaml
F: drivers/gpu/drm/renesas/shmobile/
F: include/linux/platform_data/shmob_drm.h
@@ -7281,7 +7433,7 @@ M: Heiko Stübner <heiko@sntech.de>
M: Andy Yan <andy.yan@rock-chips.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/rockchip/
F: drivers/gpu/drm/ci/xfails/rockchip*
F: drivers/gpu/drm/rockchip/
@@ -7290,7 +7442,7 @@ DRM DRIVERS FOR STI
M: Alain Volmat <alain.volmat@foss.st.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/st,stih4xx.txt
F: drivers/gpu/drm/sti
@@ -7300,7 +7452,7 @@ M: Raphael Gallais-Pou <raphael.gallais-pou@foss.st.com>
M: Philippe Cornu <philippe.cornu@foss.st.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
F: drivers/gpu/drm/stm
@@ -7309,7 +7461,7 @@ M: Jyri Sarha <jyri.sarha@iki.fi>
M: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
F: Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
F: Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
@@ -7320,7 +7472,7 @@ M: Jyri Sarha <jyri.sarha@iki.fi>
M: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/tilcdc/
F: drivers/gpu/drm/tilcdc/
@@ -7328,7 +7480,7 @@ DRM DRIVERS FOR TI OMAP
M: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/ti/
F: drivers/gpu/drm/omapdrm/
@@ -7336,7 +7488,7 @@ DRM DRIVERS FOR V3D
M: Melissa Wen <mwen@igalia.com>
M: Maíra Canal <mcanal@igalia.com>
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml
F: drivers/gpu/drm/v3d/
F: include/uapi/drm/v3d_drm.h
@@ -7345,7 +7497,7 @@ DRM DRIVERS FOR VC4
M: Maxime Ripard <mripard@kernel.org>
S: Supported
T: git git://github.com/anholt/linux
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/brcm,bcm2835-*.yaml
F: drivers/gpu/drm/vc4/
F: include/uapi/drm/vc4_drm.h
@@ -7366,15 +7518,16 @@ M: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
L: dri-devel@lists.freedesktop.org
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/gpu/xen-front.rst
F: drivers/gpu/drm/xen/
DRM DRIVERS FOR XILINX
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+M: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/xlnx/
F: drivers/gpu/drm/xlnx/
@@ -7383,7 +7536,7 @@ M: Luben Tuikov <ltuikov89@gmail.com>
M: Matthew Brost <matthew.brost@intel.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/scheduler/
F: include/drm/gpu_scheduler.h
@@ -7393,7 +7546,7 @@ R: Jessica Zhang <quic_jesszhan@quicinc.com>
R: Sam Ravnborg <sam@ravnborg.org>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/
F: drivers/gpu/drm/drm_panel.c
F: drivers/gpu/drm/panel/
@@ -7403,7 +7556,7 @@ DRM PRIVACY-SCREEN CLASS
M: Hans de Goede <hdegoede@redhat.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/drm_privacy_screen*
F: include/drm/drm_privacy_screen*
@@ -7412,7 +7565,7 @@ M: Christian Koenig <christian.koenig@amd.com>
M: Huang Rui <ray.huang@amd.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/ttm/
F: include/drm/ttm/
@@ -7420,7 +7573,7 @@ DRM AUTOMATED TESTING
M: Helen Koike <helen.koike@collabora.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/gpu/automated_testing.rst
F: drivers/gpu/drm/ci/
@@ -7834,9 +7987,8 @@ W: http://aeschi.ch.eu.org/efs/
F: fs/efs/
EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
-M: Douglas Miller <dougmill@linux.ibm.com>
L: netdev@vger.kernel.org
-S: Maintained
+S: Orphan
F: drivers/net/ethernet/ibm/ehea/
ELM327 CAN NETWORK DRIVER
@@ -7941,6 +8093,7 @@ M: Gao Xiang <xiang@kernel.org>
M: Chao Yu <chao@kernel.org>
R: Yue Hu <huyue2@coolpad.com>
R: Jeffle Xu <jefflexu@linux.alibaba.com>
+R: Sandeep Dhavale <dhavale@google.com>
L: linux-erofs@lists.ozlabs.org
S: Maintained
W: https://erofs.docs.kernel.org
@@ -8443,8 +8596,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/har
F: include/linux/fortify-string.h
F: lib/fortify_kunit.c
F: lib/memcpy_kunit.c
-F: lib/strcat_kunit.c
-F: lib/strscpy_kunit.c
F: lib/test_fortify/*
F: scripts/test_fortify.sh
K: \b__NO_FORTIFY\b
@@ -8485,7 +8636,7 @@ F: arch/x86/math-emu/
FRAMEBUFFER CORE
M: Daniel Vetter <daniel@ffwll.ch>
S: Odd Fixes
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/video/fbdev/core/
FRAMEBUFFER LAYER
@@ -8512,7 +8663,7 @@ F: Documentation/devicetree/bindings/crypto/fsl,sec-v4.0*
F: drivers/crypto/caam/
FREESCALE COLDFIRE M5441X MMC DRIVER
-M: Angelo Dureghello <angelo.dureghello@timesys.com>
+M: Angelo Dureghello <adureghello@baylibre.com>
L: linux-mmc@vger.kernel.org
S: Maintained
F: drivers/mmc/host/sdhci-esdhc-mcf.c
@@ -8525,7 +8676,6 @@ S: Maintained
F: drivers/video/fbdev/fsl-diu-fb.*
FREESCALE DMA DRIVER
-M: Li Yang <leoyang.li@nxp.com>
M: Zhang Wei <zw@zh-kernel.org>
L: linuxppc-dev@lists.ozlabs.org
S: Maintained
@@ -8690,10 +8840,9 @@ F: drivers/soc/fsl/qe/tsa.h
F: include/dt-bindings/soc/cpm1-fsl,tsa.h
FREESCALE QUICC ENGINE UCC ETHERNET DRIVER
-M: Li Yang <leoyang.li@nxp.com>
L: netdev@vger.kernel.org
L: linuxppc-dev@lists.ozlabs.org
-S: Maintained
+S: Orphan
F: drivers/net/ethernet/freescale/ucc_geth*
FREESCALE QUICC ENGINE UCC HDLC DRIVER
@@ -8710,10 +8859,9 @@ S: Maintained
F: drivers/tty/serial/ucc_uart.c
FREESCALE SOC DRIVERS
-M: Li Yang <leoyang.li@nxp.com>
L: linuxppc-dev@lists.ozlabs.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
+S: Orphan
F: Documentation/devicetree/bindings/misc/fsl,dpaa2-console.yaml
F: Documentation/devicetree/bindings/soc/fsl/
F: drivers/soc/fsl/
@@ -8747,17 +8895,15 @@ F: Documentation/devicetree/bindings/sound/fsl,qmc-audio.yaml
F: sound/soc/fsl/fsl_qmc_audio.c
FREESCALE USB PERIPHERAL DRIVERS
-M: Li Yang <leoyang.li@nxp.com>
L: linux-usb@vger.kernel.org
L: linuxppc-dev@lists.ozlabs.org
-S: Maintained
+S: Orphan
F: drivers/usb/gadget/udc/fsl*
FREESCALE USB PHY DRIVER
-M: Ran Wang <ran.wang_1@nxp.com>
L: linux-usb@vger.kernel.org
L: linuxppc-dev@lists.ozlabs.org
-S: Maintained
+S: Orphan
F: drivers/usb/phy/phy-fsl-usb*
FREEVXFS FILESYSTEM
@@ -9002,7 +9148,7 @@ F: drivers/i2c/muxes/i2c-mux-gpio.c
F: include/linux/platform_data/i2c-mux-gpio.h
GENERIC GPIO RESET DRIVER
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
S: Maintained
F: drivers/reset/reset-gpio.c
@@ -9180,6 +9326,7 @@ S: Maintained
F: Documentation/devicetree/bindings/clock/google,gs101-clock.yaml
F: arch/arm64/boot/dts/exynos/google/
F: drivers/clk/samsung/clk-gs101.c
+F: drivers/phy/samsung/phy-gs101-ufs.c
F: include/dt-bindings/clock/google,gs101.h
K: [gG]oogle.?[tT]ensor
@@ -9585,7 +9732,7 @@ F: kernel/power/
HID CORE LAYER
M: Jiri Kosina <jikos@kernel.org>
-M: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+M: Benjamin Tissoires <bentiss@kernel.org>
L: linux-input@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git
@@ -9653,7 +9800,9 @@ L: linux-input@vger.kernel.org
S: Maintained
F: drivers/hid/hid-logitech-hidpp.c
-HIGH-RESOLUTION TIMERS, CLOCKEVENTS
+HIGH-RESOLUTION TIMERS, TIMER WHEEL, CLOCKEVENTS
+M: Anna-Maria Behnsen <anna-maria@linutronix.de>
+M: Frederic Weisbecker <frederic@kernel.org>
M: Thomas Gleixner <tglx@linutronix.de>
L: linux-kernel@vger.kernel.org
S: Maintained
@@ -9661,9 +9810,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
F: Documentation/timers/
F: include/linux/clockchips.h
F: include/linux/hrtimer.h
+F: include/linux/timer.h
F: kernel/time/clockevents.c
F: kernel/time/hrtimer.c
-F: kernel/time/timer_*.c
+F: kernel/time/timer.c
+F: kernel/time/timer_list.c
+F: kernel/time/timer_migration.*
+F: tools/testing/selftests/timers/
HIGH-SPEED SCC DRIVER FOR AX.25
L: linux-hams@vger.kernel.org
@@ -10026,7 +10179,7 @@ F: drivers/media/platform/st/sti/hva
HWPOISON MEMORY FAILURE HANDLING
M: Miaohe Lin <linmiaohe@huawei.com>
-R: Naoya Horiguchi <naoya.horiguchi@nec.com>
+R: Naoya Horiguchi <nao.horiguchi@gmail.com>
L: linux-mm@kvack.org
S: Maintained
F: mm/hwpoison-inject.c
@@ -10261,6 +10414,14 @@ L: linux-i2c@vger.kernel.org
F: Documentation/i2c/busses/i2c-ismt.rst
F: drivers/i2c/busses/i2c-ismt.c
+I2C/SMBUS ZHAOXIN DRIVER
+M: Hans Hu <hanshu@zhaoxin.com>
+L: linux-i2c@vger.kernel.org
+S: Maintained
+W: https://www.zhaoxin.com
+F: drivers/i2c/busses/i2c-viai2c-common.c
+F: drivers/i2c/busses/i2c-viai2c-zhaoxin.c
+
I2C/SMBUS STUB DRIVER
M: Jean Delvare <jdelvare@suse.com>
L: linux-i2c@vger.kernel.org
@@ -10586,7 +10747,7 @@ IMGTEC POWERVR DRM DRIVER
M: Frank Binns <frank.binns@imgtec.com>
M: Matt Coster <matt.coster@imgtec.com>
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml
F: Documentation/devicetree/bindings/gpu/img,powervr-sgx.yaml
F: Documentation/gpu/imagination/
@@ -10606,7 +10767,7 @@ S: Orphan
F: drivers/video/fbdev/imsttfb.c
INDEX OF FURTHER KERNEL DOCUMENTATION
-M: Carlos Bilbao <carlos.bilbao@amd.com>
+M: Carlos Bilbao <carlos.bilbao.osdev@gmail.com>
S: Maintained
F: Documentation/process/kernel-docs.rst
@@ -10891,6 +11052,7 @@ L: linux-gpio@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
F: drivers/gpio/gpio-elkhartlake.c
+F: drivers/gpio/gpio-graniterapids.c
F: drivers/gpio/gpio-ich.c
F: drivers/gpio/gpio-merrifield.c
F: drivers/gpio/gpio-ml-ioh.c
@@ -10942,7 +11104,7 @@ F: drivers/idle/intel_idle.c
INTEL IDXD DRIVER
M: Fenghua Yu <fenghua.yu@intel.com>
-M: Dave Jiang <dave.jiang@intel.com>
+R: Dave Jiang <dave.jiang@intel.com>
L: dmaengine@vger.kernel.org
S: Supported
F: drivers/dma/idxd/*
@@ -10994,6 +11156,16 @@ F: Documentation/admin-guide/media/ipu3_rcb.svg
F: Documentation/userspace-api/media/v4l/metafmt-intel-ipu3.rst
F: drivers/staging/media/ipu3/
+INTEL IPU6 INPUT SYSTEM DRIVER
+M: Sakari Ailus <sakari.ailus@linux.intel.com>
+M: Bingbu Cao <bingbu.cao@intel.com>
+R: Tianshu Qiu <tian.shu.qiu@intel.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+T: git git://linuxtv.org/media_tree.git
+F: Documentation/admin-guide/media/ipu6-isys.rst
+F: drivers/media/pci/intel/ipu6/
+
INTEL ISHTP ECLITE DRIVER
M: Sumesh K Naduvalath <sumesh.k.naduvalath@intel.com>
L: platform-driver-x86@vger.kernel.org
@@ -11367,7 +11539,7 @@ IOSYS-MAP HELPERS
M: Thomas Zimmermann <tzimmermann@suse.de>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: include/linux/iosys-map.h
IO_URING
@@ -11435,6 +11607,7 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
F: Documentation/core-api/irq/irq-domain.rst
F: include/linux/irqdomain.h
+F: include/linux/irqdomain_defs.h
F: kernel/irq/irqdomain.c
F: kernel/irq/msi.c
@@ -11444,6 +11617,10 @@ L: linux-kernel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
F: include/linux/group_cpus.h
+F: include/linux/irq.h
+F: include/linux/irqhandler.h
+F: include/linux/irqnr.h
+F: include/linux/irqreturn.h
F: kernel/irq/
F: lib/group_cpus.c
@@ -11454,6 +11631,7 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
F: Documentation/devicetree/bindings/interrupt-controller/
F: drivers/irqchip/
+F: include/linux/irqchip.h
ISA
M: William Breathitt Gray <william.gray@linaro.org>
@@ -11560,7 +11738,7 @@ ITE IT66121 HDMI BRIDGE DRIVER
M: Phong LE <ple@baylibre.com>
M: Neil Armstrong <neil.armstrong@linaro.org>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml
F: drivers/gpu/drm/bridge/ite-it66121.c
@@ -11997,7 +12175,7 @@ F: include/keys/encrypted-type.h
F: security/keys/encrypted-keys/
KEYS-TRUSTED
-M: James Bottomley <jejb@linux.ibm.com>
+M: James Bottomley <James.Bottomley@HansenPartnership.com>
M: Jarkko Sakkinen <jarkko@kernel.org>
M: Mimi Zohar <zohar@linux.ibm.com>
L: linux-integrity@vger.kernel.org
@@ -12017,6 +12195,15 @@ S: Maintained
F: include/keys/trusted_caam.h
F: security/keys/trusted-keys/trusted_caam.c
+KEYS-TRUSTED-DCP
+M: David Gstir <david@sigma-star.at>
+R: sigma star Kernel Team <upstream+dcp@sigma-star.at>
+L: linux-integrity@vger.kernel.org
+L: keyrings@vger.kernel.org
+S: Supported
+F: include/keys/trusted_dcp.h
+F: security/keys/trusted-keys/trusted_dcp.c
+
KEYS-TRUSTED-TEE
M: Sumit Garg <sumit.garg@linaro.org>
L: linux-integrity@vger.kernel.org
@@ -12044,6 +12231,7 @@ M: Mimi Zohar <zohar@linux.ibm.com>
L: linux-integrity@vger.kernel.org
L: keyrings@vger.kernel.org
S: Supported
+W: https://kernsec.org/wiki/index.php/Linux_Kernel_Integrity
F: security/integrity/platform_certs
KFENCE
@@ -12216,12 +12404,14 @@ F: net/l3mdev
LANDLOCK SECURITY MODULE
M: Mickaël Salaün <mic@digikod.net>
+R: Günther Noack <gnoack@google.com>
L: linux-security-module@vger.kernel.org
S: Supported
W: https://landlock.io
T: git https://git.kernel.org/pub/scm/linux/kernel/git/mic/linux.git
F: Documentation/security/landlock.rst
F: Documentation/userspace-api/landlock.rst
+F: fs/ioctl.c
F: include/uapi/linux/landlock.h
F: samples/landlock/
F: security/landlock/
@@ -12390,6 +12580,26 @@ F: drivers/ata/
F: include/linux/ata.h
F: include/linux/libata.h
+LIBETH COMMON ETHERNET LIBRARY
+M: Alexander Lobakin <aleksander.lobakin@intel.com>
+L: netdev@vger.kernel.org
+L: intel-wired-lan@lists.osuosl.org (moderated for non-subscribers)
+S: Supported
+T: git https://github.com/alobakin/linux.git
+F: drivers/net/ethernet/intel/libeth/
+F: include/net/libeth/
+K: libeth
+
+LIBIE COMMON INTEL ETHERNET LIBRARY
+M: Alexander Lobakin <aleksander.lobakin@intel.com>
+L: intel-wired-lan@lists.osuosl.org (moderated for non-subscribers)
+L: netdev@vger.kernel.org
+S: Supported
+T: git https://github.com/alobakin/linux.git
+F: drivers/net/ethernet/intel/libie/
+F: include/linux/net/intel/libie/
+K: libie
+
LIBNVDIMM BTT: BLOCK TRANSLATION TABLE
M: Vishal Verma <vishal.l.verma@intel.com>
M: Dan Williams <dan.j.williams@intel.com>
@@ -12474,7 +12684,6 @@ LINUX FOR POWERPC (32-BIT AND 64-BIT)
M: Michael Ellerman <mpe@ellerman.id.au>
R: Nicholas Piggin <npiggin@gmail.com>
R: Christophe Leroy <christophe.leroy@csgroup.eu>
-R: Aneesh Kumar K.V <aneesh.kumar@kernel.org>
R: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
L: linuxppc-dev@lists.ozlabs.org
S: Supported
@@ -12500,6 +12709,8 @@ F: drivers/rtc/rtc-opal.c
F: drivers/scsi/ibmvscsi/
F: drivers/tty/hvc/hvc_opal.c
F: drivers/watchdog/wdrtas.c
+F: include/linux/papr_scm.h
+F: include/uapi/linux/papr_pdsm.h
F: tools/testing/selftests/powerpc
N: /pmac
N: powermac
@@ -12998,6 +13209,15 @@ F: Documentation/devicetree/bindings/mailbox/arm,mhuv2.yaml
F: drivers/mailbox/arm_mhuv2.c
F: include/linux/mailbox/arm_mhuv2_message.h
+MAILBOX ARM MHUv3
+M: Sudeep Holla <sudeep.holla@arm.com>
+M: Cristian Marussi <cristian.marussi@arm.com>
+L: linux-kernel@vger.kernel.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/mailbox/arm,mhuv3.yaml
+F: drivers/mailbox/arm_mhuv3.c
+
MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7
M: Alejandro Colomar <alx@kernel.org>
L: linux-man@vger.kernel.org
@@ -13134,6 +13354,7 @@ F: drivers/net/ethernet/marvell/mvpp2/
MARVELL MWIFIEX WIRELESS DRIVER
M: Brian Norris <briannorris@chromium.org>
+R: Francesco Dolcini <francesco@dolcini.it>
L: linux-wireless@vger.kernel.org
S: Odd Fixes
F: drivers/net/wireless/marvell/mwifiex/
@@ -13290,7 +13511,7 @@ F: drivers/iio/adc/max11205.c
MAXIM MAX17040 FAMILY FUEL GAUGE DRIVERS
R: Iskren Chernev <iskren.chernev@gmail.com>
-R: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+R: Krzysztof Kozlowski <krzk@kernel.org>
R: Marek Szyprowski <m.szyprowski@samsung.com>
R: Matheus Castello <matheus@castello.eng.br>
L: linux-pm@vger.kernel.org
@@ -13300,7 +13521,7 @@ F: drivers/power/supply/max17040_battery.c
MAXIM MAX17042 FAMILY FUEL GAUGE DRIVERS
R: Hans de Goede <hdegoede@redhat.com>
-R: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+R: Krzysztof Kozlowski <krzk@kernel.org>
R: Marek Szyprowski <m.szyprowski@samsung.com>
R: Sebastian Krzyszkowiak <sebastian.krzyszkowiak@puri.sm>
R: Purism Kernel Team <kernel@puri.sm>
@@ -13358,7 +13579,7 @@ F: Documentation/devicetree/bindings/power/supply/maxim,max77976.yaml
F: drivers/power/supply/max77976_charger.c
MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-pm@vger.kernel.org
S: Maintained
B: mailto:linux-samsung-soc@vger.kernel.org
@@ -13369,7 +13590,7 @@ F: drivers/power/supply/max77693_charger.c
MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS
M: Chanwoo Choi <cw00.choi@samsung.com>
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
B: mailto:linux-samsung-soc@vger.kernel.org
@@ -13731,6 +13952,7 @@ M: Sean Wang <sean.wang@mediatek.com>
L: linux-bluetooth@vger.kernel.org
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+F: Documentation/devicetree/bindings/net/bluetooth/mediatek,mt7921s-bluetooth.yaml
F: Documentation/devicetree/bindings/net/mediatek-bluetooth.txt
F: drivers/bluetooth/btmtkuart.c
@@ -13780,6 +14002,7 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/phy/mediatek-ge-soc.c
F: drivers/net/phy/mediatek-ge.c
+F: drivers/phy/mediatek/phy-mtk-xfi-tphy.c
MEDIATEK I2C CONTROLLER DRIVER
M: Qii Wang <qii.wang@mediatek.com>
@@ -14014,6 +14237,7 @@ F: drivers/net/ethernet/mellanox/mlx4/en_*
MELLANOX ETHERNET DRIVER (mlx5e)
M: Saeed Mahameed <saeedm@nvidia.com>
+M: Tariq Toukan <tariqt@nvidia.com>
L: netdev@vger.kernel.org
S: Supported
W: http://www.mellanox.com
@@ -14081,6 +14305,7 @@ F: include/uapi/rdma/mlx4-abi.h
MELLANOX MLX5 core VPI driver
M: Saeed Mahameed <saeedm@nvidia.com>
M: Leon Romanovsky <leonro@nvidia.com>
+M: Tariq Toukan <tariqt@nvidia.com>
L: netdev@vger.kernel.org
L: linux-rdma@vger.kernel.org
S: Supported
@@ -14150,8 +14375,18 @@ F: mm/memblock.c
F: mm/mm_init.c
F: tools/testing/memblock/
+MEMORY ALLOCATION PROFILING
+M: Suren Baghdasaryan <surenb@google.com>
+M: Kent Overstreet <kent.overstreet@linux.dev>
+L: linux-mm@kvack.org
+S: Maintained
+F: Documentation/mm/allocation-profiling.rst
+F: include/linux/alloc_tag.h
+F: include/linux/pgalloc_tag.h
+F: lib/alloc_tag.c
+
MEMORY CONTROLLER DRIVERS
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
B: mailto:krzysztof.kozlowski@linaro.org
@@ -14356,7 +14591,7 @@ F: drivers/dma/at_xdmac.c
F: include/dt-bindings/dma/at91.h
MICROCHIP AT91 SERIAL DRIVER
-M: Richard Genoud <richard.genoud@gmail.com>
+M: Richard Genoud <richard.genoud@bootlin.com>
S: Maintained
F: Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml
F: drivers/tty/serial/atmel_serial.c
@@ -14562,6 +14797,14 @@ S: Supported
F: Documentation/devicetree/bindings/pwm/atmel,at91sam-pwm.yaml
F: drivers/pwm/pwm-atmel.c
+MICROCHIP SAM9x7-COMPATIBLE LVDS CONTROLLER
+M: Manikandan Muralidharan <manikandan.m@microchip.com>
+M: Dharma Balasubiramani <dharma.b@microchip.com>
+L: dri-devel@lists.freedesktop.org
+S: Supported
+F: Documentation/devicetree/bindings/display/bridge/microchip,sam9x75-lvds.yaml
+F: drivers/gpu/drm/bridge/microchip-lvds.c
+
MICROCHIP SAMA5D2-COMPATIBLE ADC DRIVER
M: Eugen Hristev <eugen.hristev@microchip.com>
L: linux-iio@vger.kernel.org
@@ -14894,7 +15137,7 @@ F: drivers/phy/marvell/phy-pxa-usb.c
MMU GATHER AND TLB INVALIDATION
M: Will Deacon <will@kernel.org>
-M: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
+M: "Aneesh Kumar K.V" <aneesh.kumar@kernel.org>
M: Andrew Morton <akpm@linux-foundation.org>
M: Nick Piggin <npiggin@gmail.com>
M: Peter Zijlstra <peterz@infradead.org>
@@ -15020,6 +15263,14 @@ L: platform-driver-x86@vger.kernel.org
S: Orphan
F: drivers/platform/x86/msi-wmi.c
+MSI WMI PLATFORM FEATURES
+M: Armin Wolf <W_Armin@gmx.de>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: Documentation/ABI/testing/debugfs-msi-wmi-platform
+F: Documentation/wmi/devices/msi-wmi-platform.rst
+F: drivers/platform/x86/msi-wmi-platform.c
+
MSI001 MEDIA DRIVER
L: linux-media@vger.kernel.org
S: Orphan
@@ -15149,7 +15400,7 @@ M: Marek Vasut <marex@denx.de>
M: Stefan Agner <stefan@agner.ch>
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/fsl,lcdif.yaml
F: drivers/gpu/drm/mxsfb/
@@ -15161,9 +15412,8 @@ F: drivers/scsi/myrb.*
F: drivers/scsi/myrs.*
MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
-M: Chris Lee <christopher.lee@cspi.com>
L: netdev@vger.kernel.org
-S: Supported
+S: Orphan
W: https://www.cspi.com/ethernet-products/support/downloads/
F: drivers/net/ethernet/myricom/myri10ge/
@@ -15259,6 +15509,7 @@ F: net/*/netfilter.c
F: net/*/netfilter/
F: net/bridge/br_netfilter*.c
F: net/netfilter/
+F: tools/testing/selftests/net/netfilter/
NETROM NETWORK LAYER
M: Ralf Baechle <ralf@linux-mips.org>
@@ -15532,7 +15783,7 @@ F: include/uapi/linux/nexthop.h
F: net/ipv4/nexthop.c
NFC SUBSYSTEM
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/net/nfc/
@@ -15627,9 +15878,10 @@ F: drivers/misc/nsm.c
F: include/uapi/linux/nsm.h
NOHZ, DYNTICKS SUPPORT
+M: Anna-Maria Behnsen <anna-maria@linutronix.de>
M: Frederic Weisbecker <frederic@kernel.org>
-M: Thomas Gleixner <tglx@linutronix.de>
M: Ingo Molnar <mingo@kernel.org>
+M: Thomas Gleixner <tglx@linutronix.de>
L: linux-kernel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/nohz
@@ -15869,7 +16121,7 @@ M: Laurentiu Palcu <laurentiu.palcu@oss.nxp.com>
R: Lucas Stach <l.stach@pengutronix.de>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml
F: drivers/gpu/drm/imx/dcss/
@@ -15908,7 +16160,7 @@ F: Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
F: drivers/regulator/pf8x00-regulator.c
NXP PTN5150A CC LOGIC AND EXTCON DRIVER
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml
@@ -16391,7 +16643,7 @@ M: Sakari Ailus <sakari.ailus@linux.intel.com>
L: linux-media@vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media_tree.git
-F: Documentation/devicetree/bindings/media/i2c/ov8856.yaml
+F: Documentation/devicetree/bindings/media/i2c/ovti,ov8856.yaml
F: drivers/media/i2c/ov8856.c
OMNIVISION OV8858 SENSOR DRIVER
@@ -16519,7 +16771,7 @@ K: of_overlay_remove
OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
M: Rob Herring <robh@kernel.org>
-M: Krzysztof Kozlowski <krzysztof.kozlowski+dt@linaro.org>
+M: Krzysztof Kozlowski <krzk+dt@kernel.org>
M: Conor Dooley <conor+dt@kernel.org>
L: devicetree@vger.kernel.org
S: Maintained
@@ -16725,9 +16977,9 @@ F: include/uapi/linux/ppdev.h
PARAVIRT_OPS INTERFACE
M: Juergen Gross <jgross@suse.com>
-R: Ajay Kaher <akaher@vmware.com>
-R: Alexey Makhalov <amakhalov@vmware.com>
-R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
+R: Ajay Kaher <ajay.kaher@broadcom.com>
+R: Alexey Makhalov <alexey.amakhalov@broadcom.com>
+R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: virtualization@lists.linux.dev
L: x86@kernel.org
S: Supported
@@ -16798,12 +17050,6 @@ S: Maintained
F: drivers/leds/leds-pca9532.c
F: include/linux/leds-pca9532.h
-PCA9541 I2C BUS MASTER SELECTOR DRIVER
-M: Guenter Roeck <linux@roeck-us.net>
-L: linux-i2c@vger.kernel.org
-S: Maintained
-F: drivers/i2c/muxes/i2c-mux-pca9541.c
-
PCI DRIVER FOR AARDVARK (Marvell Armada 3700)
M: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
M: Pali Rohár <pali@kernel.org>
@@ -16966,7 +17212,6 @@ F: drivers/pci/controller/dwc/pci-exynos.c
PCI DRIVER FOR SYNOPSYS DESIGNWARE
M: Jingoo Han <jingoohan1@gmail.com>
-M: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
L: linux-pci@vger.kernel.org
S: Maintained
@@ -17296,6 +17541,7 @@ R: Alexander Shishkin <alexander.shishkin@linux.intel.com>
R: Jiri Olsa <jolsa@kernel.org>
R: Ian Rogers <irogers@google.com>
R: Adrian Hunter <adrian.hunter@intel.com>
+R: "Liang, Kan" <kan.liang@linux.intel.com>
L: linux-perf-users@vger.kernel.org
L: linux-kernel@vger.kernel.org
S: Supported
@@ -17477,7 +17723,7 @@ F: Documentation/devicetree/bindings/pinctrl/renesas,*
F: drivers/pinctrl/renesas/
PIN CONTROLLER - SAMSUNG
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
R: Alim Akhtar <alim.akhtar@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -17489,7 +17735,6 @@ C: irc://irc.libera.chat/linux-exynos
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/samsung.git
F: Documentation/devicetree/bindings/pinctrl/samsung,pinctrl*yaml
F: drivers/pinctrl/samsung/
-F: include/dt-bindings/pinctrl/samsung.h
PIN CONTROLLER - SINGLE
M: Tony Lindgren <tony@atomide.com>
@@ -17590,15 +17835,20 @@ F: drivers/pnp/
F: include/linux/pnp.h
POSIX CLOCKS and TIMERS
+M: Anna-Maria Behnsen <anna-maria@linutronix.de>
+M: Frederic Weisbecker <frederic@kernel.org>
M: Thomas Gleixner <tglx@linutronix.de>
L: linux-kernel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
F: fs/timerfd.c
F: include/linux/time_namespace.h
-F: include/linux/timer*
+F: include/linux/timerfd.h
+F: include/uapi/linux/time.h
+F: include/uapi/linux/timerfd.h
F: include/trace/events/timer*
-F: kernel/time/*timer*
+F: kernel/time/itimer.c
+F: kernel/time/posix-*
F: kernel/time/namespace.c
POWER MANAGEMENT CORE
@@ -17768,6 +18018,14 @@ F: include/net/psample.h
F: include/uapi/linux/psample.h
F: net/psample
+PSE NETWORK DRIVER
+M: Oleksij Rempel <o.rempel@pengutronix.de>
+M: Kory Maincent <kory.maincent@bootlin.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/net/pse-pd/
+F: drivers/net/pse-pd/
+
PSTORE FILESYSTEM
M: Kees Cook <keescook@chromium.org>
R: Tony Luck <tony.luck@intel.com>
@@ -17868,7 +18126,7 @@ F: Documentation/devicetree/bindings/leds/irled/pwm-ir-tx.yaml
F: drivers/media/rc/pwm-ir-tx.c
PWM SUBSYSTEM
-M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+M: Uwe Kleine-König <ukleinek@kernel.org>
L: linux-pwm@vger.kernel.org
S: Maintained
Q: https://patchwork.ozlabs.org/project/linux-pwm/list/
@@ -17992,7 +18250,6 @@ S: Supported
F: drivers/scsi/qedi/
QLOGIC QL4xxx ETHERNET DRIVER
-M: Ariel Elior <aelior@marvell.com>
M: Manish Chopra <manishc@marvell.com>
L: netdev@vger.kernel.org
S: Supported
@@ -18002,7 +18259,6 @@ F: include/linux/qed/
QLOGIC QL4xxx RDMA DRIVER
M: Michal Kalderon <mkalderon@marvell.com>
-M: Ariel Elior <aelior@marvell.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/hw/qedr/
@@ -18168,7 +18424,7 @@ R: Pranjal Ramajor Asha Kanojiya <quic_pkanojiy@quicinc.com>
L: linux-arm-msm@vger.kernel.org
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/accel/qaic/
F: drivers/accel/qaic/
F: include/uapi/drm/qaic_accel.h
@@ -18463,6 +18719,7 @@ M: "Theodore Ts'o" <tytso@mit.edu>
M: Jason A. Donenfeld <Jason@zx2c4.com>
S: Maintained
T: git https://git.kernel.org/pub/scm/linux/kernel/git/crng/random.git
+F: Documentation/devicetree/bindings/rng/microsoft,vmgenid.yaml
F: drivers/char/random.c
F: drivers/virt/vmgenid.c
@@ -18580,7 +18837,7 @@ F: tools/testing/selftests/resctrl/
READ-COPY UPDATE (RCU)
M: "Paul E. McKenney" <paulmck@kernel.org>
M: Frederic Weisbecker <frederic@kernel.org> (kernel/rcu/tree_nocb.h)
-M: Neeraj Upadhyay <quic_neeraju@quicinc.com> (kernel/rcu/tasks.h)
+M: Neeraj Upadhyay <neeraj.upadhyay@kernel.org> (kernel/rcu/tasks.h)
M: Joel Fernandes <joel@joelfernandes.org>
M: Josh Triplett <josh@joshtriplett.org>
M: Boqun Feng <boqun.feng@gmail.com>
@@ -18645,18 +18902,21 @@ REALTEK WIRELESS DRIVER (rtlwifi family)
M: Ping-Ke Shih <pkshih@realtek.com>
L: linux-wireless@vger.kernel.org
S: Maintained
+T: git https://github.com/pkshih/rtw.git
F: drivers/net/wireless/realtek/rtlwifi/
REALTEK WIRELESS DRIVER (rtw88)
M: Ping-Ke Shih <pkshih@realtek.com>
L: linux-wireless@vger.kernel.org
S: Maintained
+T: git https://github.com/pkshih/rtw.git
F: drivers/net/wireless/realtek/rtw88/
REALTEK WIRELESS DRIVER (rtw89)
M: Ping-Ke Shih <pkshih@realtek.com>
L: linux-wireless@vger.kernel.org
S: Maintained
+T: git https://github.com/pkshih/rtw.git
F: drivers/net/wireless/realtek/rtw89/
REDPINE WIRELESS DRIVER
@@ -18727,13 +18987,24 @@ S: Supported
F: Documentation/devicetree/bindings/i2c/renesas,iic-emev2.yaml
F: drivers/i2c/busses/i2c-emev2.c
-RENESAS ETHERNET DRIVERS
+RENESAS ETHERNET AVB DRIVER
R: Sergey Shtylyov <s.shtylyov@omp.ru>
L: netdev@vger.kernel.org
L: linux-renesas-soc@vger.kernel.org
-F: Documentation/devicetree/bindings/net/renesas,*.yaml
-F: drivers/net/ethernet/renesas/
-F: include/linux/sh_eth.h
+F: Documentation/devicetree/bindings/net/renesas,etheravb.yaml
+F: drivers/net/ethernet/renesas/Kconfig
+F: drivers/net/ethernet/renesas/Makefile
+F: drivers/net/ethernet/renesas/ravb*
+
+RENESAS ETHERNET SWITCH DRIVER
+R: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+L: netdev@vger.kernel.org
+L: linux-renesas-soc@vger.kernel.org
+F: Documentation/devicetree/bindings/net/renesas,*ether-switch.yaml
+F: drivers/net/ethernet/renesas/Kconfig
+F: drivers/net/ethernet/renesas/Makefile
+F: drivers/net/ethernet/renesas/rcar_gen4*
+F: drivers/net/ethernet/renesas/rswitch*
RENESAS IDT821034 ASoC CODEC
M: Herve Codina <herve.codina@bootlin.com>
@@ -18819,6 +19090,12 @@ F: include/dt-bindings/net/pcs-rzn1-miic.h
F: include/linux/pcs-rzn1-miic.h
F: net/dsa/tag_rzn1_a5psw.c
+RENESAS RZ/N1 DWMAC GLUE LAYER
+M: Romain Gantois <romain.gantois@bootlin.com>
+S: Maintained
+F: Documentation/devicetree/bindings/net/renesas,rzn1-gmac.yaml
+F: drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c
+
RENESAS RZ/N1 RTC CONTROLLER DRIVER
M: Miquel Raynal <miquel.raynal@bootlin.com>
L: linux-rtc@vger.kernel.org
@@ -18843,6 +19120,16 @@ S: Supported
F: Documentation/devicetree/bindings/i2c/renesas,rzv2m.yaml
F: drivers/i2c/busses/i2c-rzv2m.c
+RENESAS SUPERH ETHERNET DRIVER
+R: Sergey Shtylyov <s.shtylyov@omp.ru>
+L: netdev@vger.kernel.org
+L: linux-renesas-soc@vger.kernel.org
+F: Documentation/devicetree/bindings/net/renesas,ether.yaml
+F: drivers/net/ethernet/renesas/Kconfig
+F: drivers/net/ethernet/renesas/Makefile
+F: drivers/net/ethernet/renesas/sh_eth*
+F: include/linux/sh_eth.h
+
RENESAS USB PHY DRIVER
M: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
L: linux-renesas-soc@vger.kernel.org
@@ -18922,6 +19209,20 @@ S: Maintained
F: drivers/mtd/nand/raw/r852.c
F: drivers/mtd/nand/raw/r852.h
+RISC-V AIA DRIVERS
+M: Anup Patel <anup@brainfault.org>
+L: linux-riscv@lists.infradead.org
+S: Maintained
+F: Documentation/devicetree/bindings/interrupt-controller/riscv,aplic.yaml
+F: Documentation/devicetree/bindings/interrupt-controller/riscv,imsics.yaml
+F: drivers/irqchip/irq-riscv-aplic-*.c
+F: drivers/irqchip/irq-riscv-aplic-*.h
+F: drivers/irqchip/irq-riscv-imsic-*.c
+F: drivers/irqchip/irq-riscv-imsic-*.h
+F: drivers/irqchip/irq-riscv-intc.c
+F: include/linux/irqchip/riscv-aplic.h
+F: include/linux/irqchip/riscv-imsic.h
+
RISC-V ARCHITECTURE
M: Paul Walmsley <paul.walmsley@sifive.com>
M: Palmer Dabbelt <palmer@dabbelt.com>
@@ -19046,6 +19347,13 @@ S: Maintained
F: Documentation/devicetree/bindings/media/rockchip-rga.yaml
F: drivers/media/platform/rockchip/rga/
+ROCKCHIP RK3308 INTERNAL AUDIO CODEC
+M: Luca Ceresoli <luca.ceresoli@bootlin.com>
+S: Maintained
+F: Documentation/devicetree/bindings/sound/rockchip,rk3308-codec.yaml
+F: sound/soc/codecs/rk3308_codec.c
+F: sound/soc/codecs/rk3308_codec.h
+
ROCKCHIP VIDEO DECODER DRIVER
M: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
L: linux-media@vger.kernel.org
@@ -19179,12 +19487,14 @@ M: Hin-Tak Leung <hintak.leung@gmail.com>
M: Larry Finger <Larry.Finger@lwfinger.net>
L: linux-wireless@vger.kernel.org
S: Maintained
+T: git https://github.com/pkshih/rtw.git
F: drivers/net/wireless/realtek/rtl818x/rtl8187/
RTL8XXXU WIRELESS DRIVER (rtl8xxxu)
M: Jes Sorensen <Jes.Sorensen@gmail.com>
L: linux-wireless@vger.kernel.org
S: Maintained
+T: git https://github.com/pkshih/rtw.git
F: drivers/net/wireless/realtek/rtl8xxxu/
RTRS TRANSPORT DRIVERS
@@ -19414,7 +19724,7 @@ F: Documentation/devicetree/bindings/sound/samsung*
F: sound/soc/samsung/
SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-crypto@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
S: Maintained
@@ -19449,7 +19759,7 @@ S: Maintained
F: drivers/platform/x86/samsung-laptop.c
SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVERS
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-kernel@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
S: Maintained
@@ -19475,7 +19785,7 @@ F: drivers/media/platform/samsung/s3c-camif/
F: include/media/drv-intf/s3c_camif.h
SAMSUNG S3FWRN5 NFC DRIVER
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
S: Maintained
F: Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
F: drivers/nfc/s3fwrn5
@@ -19496,7 +19806,7 @@ S: Supported
F: drivers/media/i2c/s5k5baf.c
SAMSUNG S5P Security SubSystem (SSS) DRIVER
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
M: Vladimir Zapolskiy <vz@mleia.com>
L: linux-crypto@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
@@ -19518,7 +19828,7 @@ F: Documentation/devicetree/bindings/media/samsung,fimc.yaml
F: drivers/media/platform/samsung/exynos4-is/
SAMSUNG SOC CLOCK DRIVERS
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
M: Chanwoo Choi <cw00.choi@samsung.com>
R: Alim Akhtar <alim.akhtar@samsung.com>
@@ -19550,7 +19860,7 @@ F: drivers/net/ethernet/samsung/sxgbe/
SAMSUNG THERMAL DRIVER
M: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-pm@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
S: Maintained
@@ -19637,7 +19947,7 @@ F: drivers/scsi/sg.c
F: include/scsi/sg.h
SCSI SUBSYSTEM
-M: "James E.J. Bottomley" <jejb@linux.ibm.com>
+M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
M: "Martin K. Petersen" <martin.petersen@oracle.com>
L: linux-scsi@vger.kernel.org
S: Maintained
@@ -19818,6 +20128,10 @@ Q: https://patchwork.kernel.org/project/linux-security-module/list
B: mailto:linux-security-module@vger.kernel.org
P: https://github.com/LinuxSecurityModule/kernel/blob/main/README.md
T: git https://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/lsm.git
+F: include/linux/lsm_audit.h
+F: include/linux/lsm_hook_defs.h
+F: include/linux/lsm_hooks.h
+F: include/linux/security.h
F: include/uapi/linux/lsm.h
F: security/
F: tools/testing/selftests/lsm/
@@ -20146,7 +20460,6 @@ F: include/linux/platform_data/simplefb.h
SIOX
M: Thorsten Scherer <t.scherer@eckelmann.de>
-M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
R: Pengutronix Kernel Team <kernel@pengutronix.de>
S: Supported
F: drivers/gpio/gpio-siox.c
@@ -20625,6 +20938,12 @@ F: include/trace/events/sof*.h
F: include/uapi/sound/asoc.h
F: sound/soc/
+SOUND - SOC LAYER / dapm-graph
+M: Luca Ceresoli <luca.ceresoli@bootlin.com>
+L: linux-sound@vger.kernel.org
+S: Maintained
+F: tools/sound/dapm-graph
+
SOUND - SOUND OPEN FIRMWARE (SOF) DRIVERS
M: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
M: Liam Girdwood <lgirdwood@gmail.com>
@@ -20659,7 +20978,7 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
F: drivers/media/dvb-frontends/sp2*
SPANISH DOCUMENTATION
-M: Carlos Bilbao <carlos.bilbao@amd.com>
+M: Carlos Bilbao <carlos.bilbao.osdev@gmail.com>
R: Avadhut Naik <avadhut.naik@amd.com>
S: Maintained
F: Documentation/translations/sp_SP/
@@ -20811,6 +21130,13 @@ T: git git://linuxtv.org/media_tree.git
F: Documentation/devicetree/bindings/media/i2c/st,st-mipid02.yaml
F: drivers/media/i2c/st-mipid02.c
+ST STM32 FIREWALL
+M: Gatien Chevallier <gatien.chevallier@foss.st.com>
+S: Maintained
+F: drivers/bus/stm32_etzpc.c
+F: drivers/bus/stm32_firewall.c
+F: drivers/bus/stm32_rifsc.c
+
ST STM32 I2C/SMBUS DRIVER
M: Pierre-Yves MORDRET <pierre-yves.mordret@foss.st.com>
M: Alain Volmat <alain.volmat@foss.st.com>
@@ -21294,7 +21620,7 @@ R: Gustavo Padovan <gustavo@padovan.org>
L: linux-media@vger.kernel.org
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/driver-api/sync_file.rst
F: drivers/dma-buf/dma-fence*
F: drivers/dma-buf/sw_sync.c
@@ -21453,6 +21779,7 @@ F: drivers/cpufreq/sc[mp]i-cpufreq.c
F: drivers/firmware/arm_scmi/
F: drivers/firmware/arm_scpi.c
F: drivers/hwmon/scmi-hwmon.c
+F: drivers/pinctrl/pinctrl-scmi.c
F: drivers/pmdomain/arm/
F: drivers/powercap/arm_scmi_powercap.c
F: drivers/regulator/scmi-regulator.c
@@ -21639,6 +21966,7 @@ TEAM DRIVER
M: Jiri Pirko <jiri@resnulli.us>
L: netdev@vger.kernel.org
S: Supported
+F: Documentation/netlink/specs/team.yaml
F: drivers/net/team/
F: include/linux/if_team.h
F: include/uapi/linux/if_team.h
@@ -21684,6 +22012,7 @@ F: Documentation/driver-api/tee.rst
F: Documentation/tee/
F: Documentation/userspace-api/tee.rst
F: drivers/tee/
+F: include/linux/tee_core.h
F: include/linux/tee_drv.h
F: include/uapi/linux/tee.h
@@ -21702,6 +22031,11 @@ M: Prashant Gaikwad <pgaikwad@nvidia.com>
S: Supported
F: drivers/clk/tegra/
+TEGRA CRYPTO DRIVERS
+M: Akhil R <akhilrajeev@nvidia.com>
+S: Supported
+F: drivers/crypto/tegra/*
+
TEGRA DMA DRIVERS
M: Laxman Dewangan <ldewangan@nvidia.com>
M: Jon Hunter <jonathanh@nvidia.com>
@@ -21818,7 +22152,7 @@ F: Documentation/devicetree/bindings/sound/tas2552.txt
F: Documentation/devicetree/bindings/sound/tas2562.yaml
F: Documentation/devicetree/bindings/sound/tas2770.yaml
F: Documentation/devicetree/bindings/sound/tas27xx.yaml
-F: Documentation/devicetree/bindings/sound/ti,pcm1681.txt
+F: Documentation/devicetree/bindings/sound/ti,pcm1681.yaml
F: Documentation/devicetree/bindings/sound/ti,pcm3168a.yaml
F: Documentation/devicetree/bindings/sound/ti,tlv320*.yaml
F: Documentation/devicetree/bindings/sound/tlv320adcx140.yaml
@@ -21895,7 +22229,7 @@ F: include/linux/soc/ti/ti_sci_inta_msi.h
F: include/linux/soc/ti/ti_sci_protocol.h
TEXAS INSTRUMENTS' TMP117 TEMPERATURE SENSOR DRIVER
-M: Puranjay Mohan <puranjay12@gmail.com>
+M: Puranjay Mohan <puranjay@kernel.org>
L: linux-iio@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/iio/temperature/ti,tmp117.yaml
@@ -22111,7 +22445,6 @@ F: drivers/counter/ti-eqep.c
TI ETHERNET SWITCH DRIVER (CPSW)
R: Siddharth Vadapalli <s-vadapalli@ti.com>
-R: Ravi Gunasekaran <r-gunasekaran@ti.com>
R: Roger Quadros <rogerq@kernel.org>
L: linux-omap@vger.kernel.org
L: netdev@vger.kernel.org
@@ -22254,13 +22587,20 @@ S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
F: include/linux/clocksource.h
F: include/linux/time.h
+F: include/linux/timekeeper_internal.h
+F: include/linux/timekeeping.h
F: include/linux/timex.h
F: include/uapi/linux/time.h
F: include/uapi/linux/timex.h
F: kernel/time/alarmtimer.c
-F: kernel/time/clocksource.c
-F: kernel/time/ntp.c
-F: kernel/time/time*.c
+F: kernel/time/clocksource*
+F: kernel/time/ntp*
+F: kernel/time/time.c
+F: kernel/time/timeconst.bc
+F: kernel/time/timeconv.c
+F: kernel/time/timecounter.c
+F: kernel/time/timekeeping*
+F: kernel/time/time_test.c
F: tools/testing/selftests/timers/
TIPC NETWORK LAYER
@@ -22381,9 +22721,10 @@ M: Jarkko Sakkinen <jarkko@kernel.org>
R: Jason Gunthorpe <jgg@ziepe.ca>
L: linux-integrity@vger.kernel.org
S: Maintained
-W: https://kernsec.org/wiki/index.php/Linux_Kernel_Integrity
+W: https://gitlab.com/jarkkojs/linux-tpmdd-test
Q: https://patchwork.kernel.org/project/linux-integrity/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jarkko/linux-tpmdd.git
+F: Documentation/devicetree/bindings/tpm/
F: drivers/char/tpm/
TPS546D24 DRIVER
@@ -22469,6 +22810,15 @@ F: Documentation/ABI/testing/configfs-tsm
F: drivers/virt/coco/tsm.c
F: include/linux/tsm.h
+TRUSTED SERVICES TEE DRIVER
+M: Balint Dobszay <balint.dobszay@arm.com>
+M: Sudeep Holla <sudeep.holla@arm.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: trusted-services@lists.trustedfirmware.org
+S: Maintained
+F: Documentation/tee/ts-tee.rst
+F: drivers/tee/tstee/
+
TTY LAYER AND SERIAL DRIVERS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
M: Jiri Slaby <jirislaby@kernel.org>
@@ -22530,6 +22880,7 @@ Q: https://patchwork.kernel.org/project/linux-pm/list/
B: https://bugzilla.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux.git turbostat
F: tools/power/x86/turbostat/
+F: tools/testing/selftests/turbostat/
TW5864 VIDEO4LINUX DRIVER
M: Bluecherry Maintainers <maintainers@bluecherrydvr.com>
@@ -22609,6 +22960,7 @@ F: include/linux/ubsan.h
F: lib/Kconfig.ubsan
F: lib/test_ubsan.c
F: lib/ubsan.c
+F: lib/ubsan.h
F: scripts/Makefile.ubsan
K: \bARCH_HAS_UBSAN\b
@@ -22799,7 +23151,7 @@ F: drivers/usb/host/ehci*
USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...)
M: Jiri Kosina <jikos@kernel.org>
-M: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+M: Benjamin Tissoires <bentiss@kernel.org>
L: linux-usb@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git
@@ -23069,7 +23421,7 @@ USERSPACE DMA BUFFER DRIVER
M: Gerd Hoffmann <kraxel@redhat.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/dma-buf/udmabuf.c
F: include/uapi/linux/udmabuf.h
@@ -23185,9 +23537,8 @@ F: include/linux/vfio_pci_core.h
F: include/uapi/linux/vfio.h
VFIO FSL-MC DRIVER
-M: Diana Craciun <diana.craciun@oss.nxp.com>
L: kvm@vger.kernel.org
-S: Maintained
+S: Orphan
F: drivers/vfio/fsl-mc/
VFIO HISILICON PCI DRIVER
@@ -23241,6 +23592,14 @@ L: kvm@vger.kernel.org
S: Maintained
F: drivers/vfio/platform/
+VFIO QAT PCI DRIVER
+M: Xin Zeng <xin.zeng@intel.com>
+M: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+L: kvm@vger.kernel.org
+L: qat-linux@intel.com
+S: Supported
+F: drivers/vfio/pci/qat/
+
VFIO VIRTIO PCI DRIVER
M: Yishai Hadas <yishaih@nvidia.com>
L: kvm@vger.kernel.org
@@ -23251,7 +23610,7 @@ F: drivers/vfio/pci/virtio
VGA_SWITCHEROO
R: Lukas Wunner <lukas@wunner.de>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/gpu/vga-switcheroo.rst
F: drivers/gpu/vga/vga_switcheroo.c
F: include/linux/vga_switcheroo.h
@@ -23395,6 +23754,7 @@ F: include/linux/virtio*.h
F: include/linux/vringh.h
F: include/uapi/linux/virtio_*.h
F: tools/virtio/
+F: tools/testing/selftests/drivers/net/virtio_net/
VIRTIO CRYPTO DRIVER
M: Gonglei <arei.gonglei@huawei.com>
@@ -23444,7 +23804,7 @@ R: Chia-I Wu <olvaffe@gmail.com>
L: dri-devel@lists.freedesktop.org
L: virtualization@lists.linux.dev
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/ci/xfails/virtio*
F: drivers/gpu/drm/virtio/
F: include/uapi/linux/virtio_gpu.h
@@ -23608,9 +23968,9 @@ S: Supported
F: drivers/misc/vmw_balloon.c
VMWARE HYPERVISOR INTERFACE
-M: Ajay Kaher <akaher@vmware.com>
-M: Alexey Makhalov <amakhalov@vmware.com>
-R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
+M: Ajay Kaher <ajay.kaher@broadcom.com>
+M: Alexey Makhalov <alexey.amakhalov@broadcom.com>
+R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: virtualization@lists.linux.dev
L: x86@kernel.org
S: Supported
@@ -23619,34 +23979,34 @@ F: arch/x86/include/asm/vmware.h
F: arch/x86/kernel/cpu/vmware.c
VMWARE PVRDMA DRIVER
-M: Bryan Tan <bryantan@vmware.com>
-M: Vishnu Dasa <vdasa@vmware.com>
-R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
+M: Bryan Tan <bryan-bt.tan@broadcom.com>
+M: Vishnu Dasa <vishnu.dasa@broadcom.com>
+R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/hw/vmw_pvrdma/
VMWARE PVSCSI DRIVER
-M: Vishal Bhakta <vbhakta@vmware.com>
-R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
+M: Vishal Bhakta <vishal.bhakta@broadcom.com>
+R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/vmw_pvscsi.c
F: drivers/scsi/vmw_pvscsi.h
VMWARE VIRTUAL PTP CLOCK DRIVER
-M: Jeff Sipek <jsipek@vmware.com>
-R: Ajay Kaher <akaher@vmware.com>
-R: Alexey Makhalov <amakhalov@vmware.com>
-R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
+M: Nick Shi <nick.shi@broadcom.com>
+R: Ajay Kaher <ajay.kaher@broadcom.com>
+R: Alexey Makhalov <alexey.amakhalov@broadcom.com>
+R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/ptp/ptp_vmw.c
VMWARE VMCI DRIVER
-M: Bryan Tan <bryantan@vmware.com>
-M: Vishnu Dasa <vdasa@vmware.com>
-R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
+M: Bryan Tan <bryan-bt.tan@broadcom.com>
+M: Vishnu Dasa <vishnu.dasa@broadcom.com>
+R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: linux-kernel@vger.kernel.org
S: Supported
F: drivers/misc/vmw_vmci/
@@ -23661,16 +24021,16 @@ F: drivers/input/mouse/vmmouse.c
F: drivers/input/mouse/vmmouse.h
VMWARE VMXNET3 ETHERNET DRIVER
-M: Ronak Doshi <doshir@vmware.com>
-R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
+M: Ronak Doshi <ronak.doshi@broadcom.com>
+R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/vmxnet3/
VMWARE VSOCK VMCI TRANSPORT DRIVER
-M: Bryan Tan <bryantan@vmware.com>
-M: Vishnu Dasa <vdasa@vmware.com>
-R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
+M: Bryan Tan <bryan-bt.tan@broadcom.com>
+M: Vishnu Dasa <vishnu.dasa@broadcom.com>
+R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: linux-kernel@vger.kernel.org
S: Supported
F: net/vmw_vsock/vmci_transport*
@@ -23738,7 +24098,7 @@ S: Orphan
F: drivers/mmc/host/vub300.c
W1 DALLAS'S 1-WIRE BUS
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
S: Maintained
F: Documentation/devicetree/bindings/w1/
F: Documentation/w1/
@@ -24427,6 +24787,14 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/har
F: Documentation/admin-guide/LSM/Yama.rst
F: security/yama/
+YAML NETLINK (YNL)
+M: Donald Hunter <donald.hunter@gmail.com>
+M: Jakub Kicinski <kuba@kernel.org>
+F: Documentation/netlink/
+F: Documentation/userspace-api/netlink/intro-specs.rst
+F: Documentation/userspace-api/netlink/specs.rst
+F: tools/net/ynl/
+
YEALINK PHONE DRIVER
M: Henk Vergonet <Henk.Vergonet@gmail.com>
L: usbb2k-api-dev@nongnu.org
diff --git a/Makefile b/Makefile
index 763b6792d3d5..77b283d16e73 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 9
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
@@ -263,7 +263,14 @@ srctree := $(abs_srctree)
endif
objtree := .
+
+VPATH :=
+
+ifeq ($(KBUILD_EXTMOD),)
+ifdef building_out_of_srctree
VPATH := $(srctree)
+endif
+endif
export building_out_of_srctree srctree objtree VPATH
@@ -942,7 +949,6 @@ endif
ifdef CONFIG_LTO_CLANG
ifdef CONFIG_LTO_CLANG_THIN
CC_FLAGS_LTO := -flto=thin -fsplit-lto-unit
-KBUILD_LDFLAGS += --thinlto-cache-dir=$(extmod_prefix).thinlto-cache
else
CC_FLAGS_LTO := -flto
endif
@@ -1248,8 +1254,8 @@ define filechk_version.h
echo \#define LINUX_VERSION_SUBLEVEL $(SUBLEVEL)
endef
-$(version_h): PATCHLEVEL := $(or $(PATCHLEVEL), 0)
-$(version_h): SUBLEVEL := $(or $(SUBLEVEL), 0)
+$(version_h): private PATCHLEVEL := $(or $(PATCHLEVEL), 0)
+$(version_h): private SUBLEVEL := $(or $(SUBLEVEL), 0)
$(version_h): FORCE
$(call filechk,version.h)
@@ -1403,7 +1409,7 @@ export CHECK_DTBS=y
endif
ifneq ($(CHECK_DTBS),)
-dtbs_prepare: dt_binding_check
+dtbs_prepare: dt_binding_schemas
endif
dtbs_check: dtbs
@@ -1422,15 +1428,18 @@ scripts_dtc: scripts_basic
$(Q)$(MAKE) $(build)=scripts/dtc
ifneq ($(filter dt_binding_check, $(MAKECMDGOALS)),)
-export CHECK_DT_BINDING=y
+export CHECK_DTBS=y
endif
-PHONY += dt_binding_check
-dt_binding_check: scripts_dtc
+PHONY += dt_binding_check dt_binding_schemas
+dt_binding_check: dt_binding_schemas scripts_dtc
+ $(Q)$(MAKE) $(build)=Documentation/devicetree/bindings $@
+
+dt_binding_schemas:
$(Q)$(MAKE) $(build)=Documentation/devicetree/bindings
PHONY += dt_compatible_check
-dt_compatible_check: dt_binding_check
+dt_compatible_check: dt_binding_schemas
$(Q)$(MAKE) $(build)=Documentation/devicetree/bindings $@
# ---------------------------------------------------------------------------
@@ -1477,7 +1486,7 @@ endif # CONFIG_MODULES
# Directories & files removed with 'make clean'
CLEAN_FILES += vmlinux.symvers modules-only.symvers \
modules.builtin modules.builtin.modinfo modules.nsdeps \
- compile_commands.json .thinlto-cache rust/test \
+ compile_commands.json rust/test \
rust-project.json .vmlinux.objs .vmlinux.export.c
# Directories & files removed with 'make mrproper'
@@ -1494,7 +1503,7 @@ MRPROPER_FILES += include/config include/generated \
# clean - Delete most, but leave enough to build external modules
#
-clean: rm-files := $(CLEAN_FILES)
+clean: private rm-files := $(CLEAN_FILES)
PHONY += archclean vmlinuxclean
@@ -1506,7 +1515,7 @@ clean: archclean vmlinuxclean resolve_btfids_clean
# mrproper - Delete all generated files, including .config
#
-mrproper: rm-files := $(wildcard $(MRPROPER_FILES))
+mrproper: private rm-files := $(MRPROPER_FILES)
mrproper-dirs := $(addprefix _mrproper_,scripts)
PHONY += $(mrproper-dirs) mrproper
@@ -1626,10 +1635,11 @@ help:
@echo ''
@$(if $(dtstree), \
echo 'Devicetree:'; \
- echo '* dtbs - Build device tree blobs for enabled boards'; \
- echo ' dtbs_install - Install dtbs to $(INSTALL_DTBS_PATH)'; \
- echo ' dt_binding_check - Validate device tree binding documents'; \
- echo ' dtbs_check - Validate device tree source files';\
+ echo '* dtbs - Build device tree blobs for enabled boards'; \
+ echo ' dtbs_install - Install dtbs to $(INSTALL_DTBS_PATH)'; \
+ echo ' dt_binding_check - Validate device tree binding documents and examples'; \
+ echo ' dt_binding_schema - Build processed device tree binding schemas'; \
+ echo ' dtbs_check - Validate device tree source files';\
echo '')
@echo 'Userspace tools targets:'
@@ -1782,8 +1792,8 @@ compile_commands.json: $(extmod_prefix)compile_commands.json
PHONY += compile_commands.json
clean-dirs := $(KBUILD_EXTMOD)
-clean: rm-files := $(KBUILD_EXTMOD)/Module.symvers $(KBUILD_EXTMOD)/modules.nsdeps \
- $(KBUILD_EXTMOD)/compile_commands.json $(KBUILD_EXTMOD)/.thinlto-cache
+clean: private rm-files := $(KBUILD_EXTMOD)/Module.symvers $(KBUILD_EXTMOD)/modules.nsdeps \
+ $(KBUILD_EXTMOD)/compile_commands.json
PHONY += prepare
# now expand this into a simple variable to reduce the cost of shell evaluations
diff --git a/arch/Kconfig b/arch/Kconfig
index 9f066785bb71..b34946d90e4b 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -9,6 +9,14 @@
#
source "arch/$(SRCARCH)/Kconfig"
+config ARCH_CONFIGURES_CPU_MITIGATIONS
+ bool
+
+if !ARCH_CONFIGURES_CPU_MITIGATIONS
+config CPU_MITIGATIONS
+ def_bool y
+endif
+
menu "General architecture-dependent options"
config ARCH_HAS_SUBPAGE_FAULTS
@@ -52,10 +60,10 @@ config GENERIC_ENTRY
config KPROBES
bool "Kprobes"
- depends on MODULES
depends on HAVE_KPROBES
select KALLSYMS
- select TASKS_RCU if PREEMPTION
+ select EXECMEM
+ select NEED_TASKS_RCU
help
Kprobes allows you to trap at almost any kernel address and
execute a callback function. register_kprobe() establishes
@@ -104,7 +112,7 @@ config STATIC_CALL_SELFTEST
config OPTPROBES
def_bool y
depends on KPROBES && HAVE_OPTPROBES
- select TASKS_RCU if PREEMPTION
+ select NEED_TASKS_RCU
config KPROBES_ON_FTRACE
def_bool y
@@ -502,6 +510,15 @@ config MMU_LAZY_TLB_SHOOTDOWN
config ARCH_HAVE_NMI_SAFE_CMPXCHG
bool
+config ARCH_HAVE_EXTRA_ELF_NOTES
+ bool
+ help
+ An architecture should select this in order to enable adding an
+ arch-specific ELF note section to core files. It must provide two
+ functions: elf_coredump_extra_notes_size() and
+ elf_coredump_extra_notes_write() which are invoked by the ELF core
+ dumper.
+
config ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
bool
@@ -960,6 +977,14 @@ config ARCH_WANTS_MODULES_DATA_IN_VMALLOC
For architectures like powerpc/32 which have constraints on module
allocation and need to allocate module data outside of module area.
+config ARCH_WANTS_EXECMEM_LATE
+ bool
+ help
+ For architectures that do not allocate executable memory early on
+ boot, but rather require its initialization late when there is
+ enough entropy for module space randomization, for instance
+ arm64.
+
config HAVE_IRQ_EXIT_ON_IRQ_STACK
bool
help
@@ -1172,12 +1197,12 @@ config PAGE_SIZE_LESS_THAN_256KB
config PAGE_SHIFT
int
- default 12 if PAGE_SIZE_4KB
- default 13 if PAGE_SIZE_8KB
- default 14 if PAGE_SIZE_16KB
- default 15 if PAGE_SIZE_32KB
- default 16 if PAGE_SIZE_64KB
- default 18 if PAGE_SIZE_256KB
+ default 12 if PAGE_SIZE_4KB
+ default 13 if PAGE_SIZE_8KB
+ default 14 if PAGE_SIZE_16KB
+ default 15 if PAGE_SIZE_32KB
+ default 16 if PAGE_SIZE_64KB
+ default 18 if PAGE_SIZE_256KB
# This allows to use a set of generic functions to determine mmap base
# address by giving priority to top-down scheme only if the process
@@ -1609,4 +1634,7 @@ config CC_HAS_SANE_FUNCTION_ALIGNMENT
# strict alignment always, even with -falign-functions.
def_bool CC_HAS_MIN_FUNCTION_ALIGNMENT || CC_IS_CLANG
+config ARCH_NEED_CMPXCHG_1_EMU
+ bool
+
endmenu
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 3afd042150f8..50ff06d5b799 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -10,7 +10,7 @@ config ALPHA
select ARCH_NO_SG_CHAIN
select ARCH_USE_CMPXCHG_LOCKREF
select DMA_OPS if PCI
- select FORCE_PCI if !ALPHA_JENSEN
+ select FORCE_PCI
select PCI_DOMAINS if PCI
select PCI_SYSCALL if PCI
select HAVE_ASM_MODVERSIONS
@@ -90,22 +90,11 @@ choice
<http://www.alphalinux.org/>. In summary:
Alcor/Alpha-XLT AS 600, AS 500, XL-300, XL-366
- Alpha-XL XL-233, XL-266
- AlphaBook1 Alpha laptop
- Avanti AS 200, AS 205, AS 250, AS 255, AS 300, AS 400
- Cabriolet AlphaPC64, AlphaPCI64
DP264 DP264 / DS20 / ES40 / DS10 / DS10L
- EB164 EB164 21164 evaluation board
- EB64+ EB64+ 21064 evaluation board
- EB66 EB66 21066 evaluation board
- EB66+ EB66+ 21066 evaluation board
- Jensen DECpc 150, DEC 2000 models 300, 500
LX164 AlphaPC164-LX
- Lynx AS 2100A
Miata Personal Workstation 433/500/600 a/au
Marvel AlphaServer ES47 / ES80 / GS1280
Mikasa AS 1000
- Noname AXPpci33, UDB (Multia)
Noritake AS 1000A, AS 600A, AS 800
PC164 AlphaPC164
Rawhide AS 1200, AS 4000, AS 4100
@@ -137,27 +126,6 @@ config ALPHA_ALCOR
all the work required to support an external Bcache and to maintain
memory coherence when a PCI device DMAs into (or out of) memory.
-config ALPHA_XL
- bool "Alpha-XL"
- help
- XL-233 and XL-266-based Alpha systems.
-
-config ALPHA_BOOK1
- bool "AlphaBook1"
- help
- Dec AlphaBook1/Burns Alpha-based laptops.
-
-config ALPHA_AVANTI_CH
- bool "Avanti"
-
-config ALPHA_CABRIOLET
- bool "Cabriolet"
- help
- Cabriolet AlphaPC64, AlphaPCI64 systems. Derived from EB64+ but now
- baby-AT with Flash boot ROM, no on-board SCSI or Ethernet. 3 ISA
- slots, 4 PCI slots (one pair are on a shared slot), uses plug-in
- Bcache SIMMs. Requires power supply with 3.3V output.
-
config ALPHA_DP264
bool "DP264"
help
@@ -165,62 +133,18 @@ config ALPHA_DP264
API Networks: 264DP, UP2000(+), CS20;
Compaq: DS10(E,L), XP900, XP1000, DS20(E), ES40.
-config ALPHA_EB164
- bool "EB164"
- help
- EB164 21164 evaluation board from DEC. Uses 21164 and ALCOR. Has
- ISA and PCI expansion (3 ISA slots, 2 64-bit PCI slots (one is
- shared with an ISA slot) and 2 32-bit PCI slots. Uses plus-in
- Bcache SIMMs. I/O sub-system provides SuperI/O (2S, 1P, FD), KBD,
- MOUSE (PS2 style), RTC/NVRAM. Boot ROM is Flash. PC-AT-sized
- motherboard. Requires power supply with 3.3V output.
-
-config ALPHA_EB64P_CH
- bool "EB64+"
-
-config ALPHA_EB66
- bool "EB66"
- help
- A Digital DS group board. Uses 21066 or 21066A. I/O sub-system is
- identical to EB64+. Baby PC-AT size. Runs from standard PC power
- supply. The EB66 schematic was published as a marketing poster
- advertising the 21066 as "the first microprocessor in the world with
- embedded PCI".
-
-config ALPHA_EB66P
- bool "EB66+"
- help
- Later variant of the EB66 board.
-
config ALPHA_EIGER
bool "Eiger"
help
Apparently an obscure OEM single-board computer based on the
Typhoon/Tsunami chipset family. Information on it is scanty.
-config ALPHA_JENSEN
- bool "Jensen"
- select HAVE_EISA
- help
- DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one
- of the first-generation Alpha systems. A number of these systems
- seem to be available on the second- hand market. The Jensen is a
- floor-standing tower system which originally used a 150MHz 21064 It
- used programmable logic to interface a 486 EISA I/O bridge to the
- CPU.
-
config ALPHA_LX164
bool "LX164"
help
A technical overview of this board is available at
<http://www.unix-ag.org/Linux-Alpha/Architectures/LX164.html>.
-config ALPHA_LYNX
- bool "Lynx"
- select HAVE_EISA
- help
- AlphaServer 2100A-based systems.
-
config ALPHA_MARVEL
bool "Marvel"
help
@@ -243,9 +167,6 @@ config ALPHA_NAUTILUS
help
Alpha systems based on the AMD 751 & ALI 1543C chipsets.
-config ALPHA_NONAME_CH
- bool "Noname"
-
config ALPHA_NORITAKE
bool "Noritake"
select HAVE_EISA
@@ -256,9 +177,6 @@ config ALPHA_NORITAKE
config ALPHA_PC164
bool "PC164"
-config ALPHA_P2K
- bool "Platform2000"
-
config ALPHA_RAWHIDE
bool "Rawhide"
select HAVE_EISA
@@ -322,84 +240,18 @@ config ISA_DMA_API
bool
default y
-config ALPHA_NONAME
- bool
- depends on ALPHA_BOOK1 || ALPHA_NONAME_CH
- default y
- help
- The AXPpci33 (aka NoName), is based on the EB66 (includes the Multia
- UDB). This design was produced by Digital's Technical OEM (TOEM)
- group. It uses the 21066 processor running at 166MHz or 233MHz. It
- is a baby-AT size, and runs from a standard PC power supply. It has
- 5 ISA slots and 3 PCI slots (one pair are a shared slot). There are
- 2 versions, with either PS/2 or large DIN connectors for the
- keyboard.
-
-config ALPHA_EV4
- bool
- depends on ALPHA_JENSEN || (ALPHA_SABLE && !ALPHA_GAMMA) || ALPHA_LYNX || ALPHA_NORITAKE && !ALPHA_PRIMO || ALPHA_MIKASA && !ALPHA_PRIMO || ALPHA_CABRIOLET || ALPHA_AVANTI_CH || ALPHA_EB64P_CH || ALPHA_XL || ALPHA_NONAME || ALPHA_EB66 || ALPHA_EB66P || ALPHA_P2K
- default y if !ALPHA_LYNX
- default y if !ALPHA_EV5
-
-config ALPHA_LCA
- bool
- depends on ALPHA_NONAME || ALPHA_EB66 || ALPHA_EB66P || ALPHA_P2K
- default y
-
-config ALPHA_APECS
- bool
- depends on !ALPHA_PRIMO && (ALPHA_NORITAKE || ALPHA_MIKASA) || ALPHA_CABRIOLET || ALPHA_AVANTI_CH || ALPHA_EB64P_CH || ALPHA_XL
- default y
-
-config ALPHA_EB64P
- bool
- depends on ALPHA_CABRIOLET || ALPHA_EB64P_CH
- default y
- help
- Uses 21064 or 21064A and APECs. Has ISA and PCI expansion (3 ISA,
- 2 PCI, one pair are on a shared slot). Supports 36-bit DRAM SIMs.
- ISA bus generated by Intel SaturnI/O PCI-ISA bridge. On-board SCSI
- (NCR 810 on PCI) Ethernet (Digital 21040), KBD, MOUSE (PS2 style),
- SuperI/O (2S, 1P, FD), RTC/NVRAM. Boot ROM is EPROM. PC-AT size.
- Runs from standard PC power supply.
-
-config ALPHA_EV5
- bool "EV5 CPU(s) (model 5/xxx)?" if ALPHA_LYNX
- default y if ALPHA_RX164 || ALPHA_RAWHIDE || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_RUFFIAN || ALPHA_SABLE && ALPHA_GAMMA || ALPHA_NORITAKE && ALPHA_PRIMO || ALPHA_MIKASA && ALPHA_PRIMO || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_EB164 || ALPHA_ALCOR
-
config ALPHA_CIA
bool
- depends on ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_RUFFIAN || ALPHA_NORITAKE && ALPHA_PRIMO || ALPHA_MIKASA && ALPHA_PRIMO || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_EB164 || ALPHA_ALCOR
+ depends on ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_RUFFIAN || ALPHA_NORITAKE || ALPHA_MIKASA || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_ALCOR
default y
config ALPHA_EV56
- bool "EV56 CPU (speed >= 366MHz)?" if ALPHA_ALCOR
- default y if ALPHA_RX164 || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_RUFFIAN || ALPHA_PC164 || ALPHA_TAKARA
-
-config ALPHA_EV56
- prompt "EV56 CPU (speed >= 333MHz)?"
- depends on ALPHA_NORITAKE || ALPHA_PRIMO
-
-config ALPHA_EV56
- prompt "EV56 CPU (speed >= 400MHz)?"
- depends on ALPHA_RAWHIDE
-
-config ALPHA_PRIMO
- bool "EV5 CPU daughtercard (model 5/xxx)?"
- depends on ALPHA_NORITAKE || ALPHA_MIKASA
- help
- Say Y if you have an AS 1000 5/xxx or an AS 1000A 5/xxx.
-
-config ALPHA_GAMMA
- bool "EV5 CPU(s) (model 5/xxx)?" if ALPHA_SABLE
- depends on ALPHA_SABLE || ALPHA_LYNX
- default ALPHA_LYNX
- help
- Say Y if you have an AS 2000 5/xxx or an AS 2100 5/xxx.
+ bool
+ default y if ALPHA_ALCOR || ALPHA_RX164 || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_RUFFIAN || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_NORITAKE || ALPHA_MIKASA || ALPHA_RAWHIDE || ALPHA_SABLE
config ALPHA_T2
bool
- depends on ALPHA_SABLE || ALPHA_LYNX
+ depends on ALPHA_SABLE
default y
config ALPHA_PYXIS
@@ -443,15 +295,6 @@ config GENERIC_HWEIGHT
bool
default y if !ALPHA_EV67
-config ALPHA_AVANTI
- bool
- depends on ALPHA_XL || ALPHA_AVANTI_CH
- default y
- help
- Avanti AS 200, AS 205, AS 250, AS 255, AS 300, and AS 400-based
- Alphas. Info at
- <http://www.unix-ag.org/Linux-Alpha/Architectures/Avanti.html>.
-
config ALPHA_BROKEN_IRQ_MASK
bool
depends on ALPHA_GENERIC || ALPHA_PC164
@@ -481,9 +324,9 @@ config ALPHA_QEMU
config ALPHA_SRM
- bool "Use SRM as bootloader" if ALPHA_CABRIOLET || ALPHA_AVANTI_CH || ALPHA_EB64P || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_EB164 || ALPHA_ALCOR || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_NAUTILUS || ALPHA_NONAME
+ bool "Use SRM as bootloader" if ALPHA_PC164 || ALPHA_TAKARA || ALPHA_ALCOR || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_NAUTILUS
depends on TTY
- default y if ALPHA_JENSEN || ALPHA_MIKASA || ALPHA_SABLE || ALPHA_LYNX || ALPHA_NORITAKE || ALPHA_DP264 || ALPHA_RAWHIDE || ALPHA_EIGER || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_SHARK || ALPHA_MARVEL
+ default y if ALPHA_MIKASA || ALPHA_SABLE || ALPHA_NORITAKE || ALPHA_DP264 || ALPHA_RAWHIDE || ALPHA_EIGER || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_SHARK || ALPHA_MARVEL
help
There are two different types of booting firmware on Alphas: SRM,
which is command line driven, and ARC, which uses menus and arrow
@@ -509,7 +352,7 @@ config ARCH_MAY_HAVE_PC_FDC
config SMP
bool "Symmetric multi-processing support"
- depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL
+ depends on ALPHA_SABLE || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL
help
This enables support for systems with more than one CPU. If you have
a system with only one CPU, say N. If you have a system with more
@@ -545,7 +388,7 @@ config ARCH_SPARSEMEM_ENABLE
config ALPHA_WTINT
bool "Use WTINT" if ALPHA_SRM || ALPHA_GENERIC
default y if ALPHA_QEMU
- default n if ALPHA_EV5 || ALPHA_EV56 || (ALPHA_EV4 && !ALPHA_LCA)
+ default n if ALPHA_EV56
default n if !ALPHA_SRM && !ALPHA_GENERIC
default y if SMP
help
diff --git a/arch/alpha/Makefile b/arch/alpha/Makefile
index 45158024085e..35445ff2e489 100644
--- a/arch/alpha/Makefile
+++ b/arch/alpha/Makefile
@@ -15,18 +15,14 @@ CHECKFLAGS += -D__alpha__
cflags-y := -pipe -mno-fp-regs -ffixed-8
cflags-y += $(call cc-option, -fno-jump-tables)
-cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4
-cpuflags-$(CONFIG_ALPHA_EV5) := -mcpu=ev5
cpuflags-$(CONFIG_ALPHA_EV56) := -mcpu=ev56
cpuflags-$(CONFIG_ALPHA_POLARIS) := -mcpu=pca56
cpuflags-$(CONFIG_ALPHA_SX164) := -mcpu=pca56
cpuflags-$(CONFIG_ALPHA_EV6) := -mcpu=ev6
cpuflags-$(CONFIG_ALPHA_EV67) := -mcpu=ev67
# If GENERIC, make sure to turn off any instruction set extensions that
-# the host compiler might have on by default. Given that EV4 and EV5
-# have the same instruction set, prefer EV5 because an EV5 schedule is
-# more likely to keep an EV4 processor busy than vice-versa.
-cpuflags-$(CONFIG_ALPHA_GENERIC) := -mcpu=ev5
+# the host compiler might have on by default.
+cpuflags-$(CONFIG_ALPHA_GENERIC) := -mcpu=ev56 -mtune=ev6
cflags-y += $(cpuflags-y)
diff --git a/arch/alpha/include/asm/core_apecs.h b/arch/alpha/include/asm/core_apecs.h
deleted file mode 100644
index 69a2fc62c9c3..000000000000
--- a/arch/alpha/include/asm/core_apecs.h
+++ /dev/null
@@ -1,534 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ALPHA_APECS__H__
-#define __ALPHA_APECS__H__
-
-#include <linux/types.h>
-#include <asm/compiler.h>
-
-/*
- * APECS is the internal name for the 2107x chipset which provides
- * memory controller and PCI access for the 21064 chip based systems.
- *
- * This file is based on:
- *
- * DECchip 21071-AA and DECchip 21072-AA Core Logic Chipsets
- * Data Sheet
- *
- * EC-N0648-72
- *
- *
- * david.rusling@reo.mts.dec.com Initial Version.
- *
- */
-
-/*
- An AVANTI *might* be an XL, and an XL has only 27 bits of ISA address
- that get passed through the PCI<->ISA bridge chip. So we've gotta use
- both windows to max out the physical memory we can DMA to. Sigh...
-
- If we try a window at 0 for 1GB as a work-around, we run into conflicts
- with ISA/PCI bus memory which can't be relocated, like VGA aperture and
- BIOS ROMs. So we must put the windows high enough to avoid these areas.
-
- We put window 1 at BUS 64Mb for 64Mb, mapping physical 0 to 64Mb-1,
- and window 2 at BUS 1Gb for 1Gb, mapping physical 0 to 1Gb-1.
- Yes, this does map 0 to 64Mb-1 twice, but only window 1 will actually
- be used for that range (via virt_to_bus()).
-
- Note that we actually fudge the window 1 maximum as 48Mb instead of 64Mb,
- to keep virt_to_bus() from returning an address in the first window, for
- a data area that goes beyond the 64Mb first DMA window. Sigh...
- The fudge factor MUST match with <asm/dma.h> MAX_DMA_ADDRESS, but
- we can't just use that here, because of header file looping... :-(
-
- Window 1 will be used for all DMA from the ISA bus; yes, that does
- limit what memory an ISA floppy or sound card or Ethernet can touch, but
- it's also a known limitation on other platforms as well. We use the
- same technique that is used on INTEL platforms with similar limitation:
- set MAX_DMA_ADDRESS and clear some pages' DMAable flags during mem_init().
- We trust that any ISA bus device drivers will *always* ask for DMAable
- memory explicitly via kmalloc()/get_free_pages() flags arguments.
-
- Note that most PCI bus devices' drivers do *not* explicitly ask for
- DMAable memory; they count on being able to DMA to any memory they
- get from kmalloc()/get_free_pages(). They will also use window 1 for
- any physical memory accesses below 64Mb; the rest will be handled by
- window 2, maxing out at 1Gb of memory. I trust this is enough... :-)
-
- We hope that the area before the first window is large enough so that
- there will be no overlap at the top end (64Mb). We *must* locate the
- PCI cards' memory just below window 1, so that there's still the
- possibility of being able to access it via SPARSE space. This is
- important for cards such as the Matrox Millennium, whose Xserver
- wants to access memory-mapped registers in byte and short lengths.
-
- Note that the XL is treated differently from the AVANTI, even though
- for most other things they are identical. It didn't seem reasonable to
- make the AVANTI support pay for the limitations of the XL. It is true,
- however, that an XL kernel will run on an AVANTI without problems.
-
- %%% All of this should be obviated by the ability to route
- everything through the iommu.
-*/
-
-/*
- * 21071-DA Control and Status registers.
- * These are used for PCI memory access.
- */
-#define APECS_IOC_DCSR (IDENT_ADDR + 0x1A0000000UL)
-#define APECS_IOC_PEAR (IDENT_ADDR + 0x1A0000020UL)
-#define APECS_IOC_SEAR (IDENT_ADDR + 0x1A0000040UL)
-#define APECS_IOC_DR1 (IDENT_ADDR + 0x1A0000060UL)
-#define APECS_IOC_DR2 (IDENT_ADDR + 0x1A0000080UL)
-#define APECS_IOC_DR3 (IDENT_ADDR + 0x1A00000A0UL)
-
-#define APECS_IOC_TB1R (IDENT_ADDR + 0x1A00000C0UL)
-#define APECS_IOC_TB2R (IDENT_ADDR + 0x1A00000E0UL)
-
-#define APECS_IOC_PB1R (IDENT_ADDR + 0x1A0000100UL)
-#define APECS_IOC_PB2R (IDENT_ADDR + 0x1A0000120UL)
-
-#define APECS_IOC_PM1R (IDENT_ADDR + 0x1A0000140UL)
-#define APECS_IOC_PM2R (IDENT_ADDR + 0x1A0000160UL)
-
-#define APECS_IOC_HAXR0 (IDENT_ADDR + 0x1A0000180UL)
-#define APECS_IOC_HAXR1 (IDENT_ADDR + 0x1A00001A0UL)
-#define APECS_IOC_HAXR2 (IDENT_ADDR + 0x1A00001C0UL)
-
-#define APECS_IOC_PMLT (IDENT_ADDR + 0x1A00001E0UL)
-
-#define APECS_IOC_TLBTAG0 (IDENT_ADDR + 0x1A0000200UL)
-#define APECS_IOC_TLBTAG1 (IDENT_ADDR + 0x1A0000220UL)
-#define APECS_IOC_TLBTAG2 (IDENT_ADDR + 0x1A0000240UL)
-#define APECS_IOC_TLBTAG3 (IDENT_ADDR + 0x1A0000260UL)
-#define APECS_IOC_TLBTAG4 (IDENT_ADDR + 0x1A0000280UL)
-#define APECS_IOC_TLBTAG5 (IDENT_ADDR + 0x1A00002A0UL)
-#define APECS_IOC_TLBTAG6 (IDENT_ADDR + 0x1A00002C0UL)
-#define APECS_IOC_TLBTAG7 (IDENT_ADDR + 0x1A00002E0UL)
-
-#define APECS_IOC_TLBDATA0 (IDENT_ADDR + 0x1A0000300UL)
-#define APECS_IOC_TLBDATA1 (IDENT_ADDR + 0x1A0000320UL)
-#define APECS_IOC_TLBDATA2 (IDENT_ADDR + 0x1A0000340UL)
-#define APECS_IOC_TLBDATA3 (IDENT_ADDR + 0x1A0000360UL)
-#define APECS_IOC_TLBDATA4 (IDENT_ADDR + 0x1A0000380UL)
-#define APECS_IOC_TLBDATA5 (IDENT_ADDR + 0x1A00003A0UL)
-#define APECS_IOC_TLBDATA6 (IDENT_ADDR + 0x1A00003C0UL)
-#define APECS_IOC_TLBDATA7 (IDENT_ADDR + 0x1A00003E0UL)
-
-#define APECS_IOC_TBIA (IDENT_ADDR + 0x1A0000400UL)
-
-
-/*
- * 21071-CA Control and Status registers.
- * These are used to program memory timing,
- * configure memory and initialise the B-Cache.
- */
-#define APECS_MEM_GCR (IDENT_ADDR + 0x180000000UL)
-#define APECS_MEM_EDSR (IDENT_ADDR + 0x180000040UL)
-#define APECS_MEM_TAR (IDENT_ADDR + 0x180000060UL)
-#define APECS_MEM_ELAR (IDENT_ADDR + 0x180000080UL)
-#define APECS_MEM_EHAR (IDENT_ADDR + 0x1800000a0UL)
-#define APECS_MEM_SFT_RST (IDENT_ADDR + 0x1800000c0UL)
-#define APECS_MEM_LDxLAR (IDENT_ADDR + 0x1800000e0UL)
-#define APECS_MEM_LDxHAR (IDENT_ADDR + 0x180000100UL)
-#define APECS_MEM_GTR (IDENT_ADDR + 0x180000200UL)
-#define APECS_MEM_RTR (IDENT_ADDR + 0x180000220UL)
-#define APECS_MEM_VFPR (IDENT_ADDR + 0x180000240UL)
-#define APECS_MEM_PDLDR (IDENT_ADDR + 0x180000260UL)
-#define APECS_MEM_PDhDR (IDENT_ADDR + 0x180000280UL)
-
-/* Bank x Base Address Register */
-#define APECS_MEM_B0BAR (IDENT_ADDR + 0x180000800UL)
-#define APECS_MEM_B1BAR (IDENT_ADDR + 0x180000820UL)
-#define APECS_MEM_B2BAR (IDENT_ADDR + 0x180000840UL)
-#define APECS_MEM_B3BAR (IDENT_ADDR + 0x180000860UL)
-#define APECS_MEM_B4BAR (IDENT_ADDR + 0x180000880UL)
-#define APECS_MEM_B5BAR (IDENT_ADDR + 0x1800008A0UL)
-#define APECS_MEM_B6BAR (IDENT_ADDR + 0x1800008C0UL)
-#define APECS_MEM_B7BAR (IDENT_ADDR + 0x1800008E0UL)
-#define APECS_MEM_B8BAR (IDENT_ADDR + 0x180000900UL)
-
-/* Bank x Configuration Register */
-#define APECS_MEM_B0BCR (IDENT_ADDR + 0x180000A00UL)
-#define APECS_MEM_B1BCR (IDENT_ADDR + 0x180000A20UL)
-#define APECS_MEM_B2BCR (IDENT_ADDR + 0x180000A40UL)
-#define APECS_MEM_B3BCR (IDENT_ADDR + 0x180000A60UL)
-#define APECS_MEM_B4BCR (IDENT_ADDR + 0x180000A80UL)
-#define APECS_MEM_B5BCR (IDENT_ADDR + 0x180000AA0UL)
-#define APECS_MEM_B6BCR (IDENT_ADDR + 0x180000AC0UL)
-#define APECS_MEM_B7BCR (IDENT_ADDR + 0x180000AE0UL)
-#define APECS_MEM_B8BCR (IDENT_ADDR + 0x180000B00UL)
-
-/* Bank x Timing Register A */
-#define APECS_MEM_B0TRA (IDENT_ADDR + 0x180000C00UL)
-#define APECS_MEM_B1TRA (IDENT_ADDR + 0x180000C20UL)
-#define APECS_MEM_B2TRA (IDENT_ADDR + 0x180000C40UL)
-#define APECS_MEM_B3TRA (IDENT_ADDR + 0x180000C60UL)
-#define APECS_MEM_B4TRA (IDENT_ADDR + 0x180000C80UL)
-#define APECS_MEM_B5TRA (IDENT_ADDR + 0x180000CA0UL)
-#define APECS_MEM_B6TRA (IDENT_ADDR + 0x180000CC0UL)
-#define APECS_MEM_B7TRA (IDENT_ADDR + 0x180000CE0UL)
-#define APECS_MEM_B8TRA (IDENT_ADDR + 0x180000D00UL)
-
-/* Bank x Timing Register B */
-#define APECS_MEM_B0TRB (IDENT_ADDR + 0x180000E00UL)
-#define APECS_MEM_B1TRB (IDENT_ADDR + 0x180000E20UL)
-#define APECS_MEM_B2TRB (IDENT_ADDR + 0x180000E40UL)
-#define APECS_MEM_B3TRB (IDENT_ADDR + 0x180000E60UL)
-#define APECS_MEM_B4TRB (IDENT_ADDR + 0x180000E80UL)
-#define APECS_MEM_B5TRB (IDENT_ADDR + 0x180000EA0UL)
-#define APECS_MEM_B6TRB (IDENT_ADDR + 0x180000EC0UL)
-#define APECS_MEM_B7TRB (IDENT_ADDR + 0x180000EE0UL)
-#define APECS_MEM_B8TRB (IDENT_ADDR + 0x180000F00UL)
-
-
-/*
- * Memory spaces:
- */
-#define APECS_IACK_SC (IDENT_ADDR + 0x1b0000000UL)
-#define APECS_CONF (IDENT_ADDR + 0x1e0000000UL)
-#define APECS_IO (IDENT_ADDR + 0x1c0000000UL)
-#define APECS_SPARSE_MEM (IDENT_ADDR + 0x200000000UL)
-#define APECS_DENSE_MEM (IDENT_ADDR + 0x300000000UL)
-
-
-/*
- * Bit definitions for I/O Controller status register 0:
- */
-#define APECS_IOC_STAT0_CMD 0xf
-#define APECS_IOC_STAT0_ERR (1<<4)
-#define APECS_IOC_STAT0_LOST (1<<5)
-#define APECS_IOC_STAT0_THIT (1<<6)
-#define APECS_IOC_STAT0_TREF (1<<7)
-#define APECS_IOC_STAT0_CODE_SHIFT 8
-#define APECS_IOC_STAT0_CODE_MASK 0x7
-#define APECS_IOC_STAT0_P_NBR_SHIFT 13
-#define APECS_IOC_STAT0_P_NBR_MASK 0x7ffff
-
-#define APECS_HAE_ADDRESS APECS_IOC_HAXR1
-
-
-/*
- * Data structure for handling APECS machine checks:
- */
-
-struct el_apecs_mikasa_sysdata_mcheck
-{
- unsigned long coma_gcr;
- unsigned long coma_edsr;
- unsigned long coma_ter;
- unsigned long coma_elar;
- unsigned long coma_ehar;
- unsigned long coma_ldlr;
- unsigned long coma_ldhr;
- unsigned long coma_base0;
- unsigned long coma_base1;
- unsigned long coma_base2;
- unsigned long coma_base3;
- unsigned long coma_cnfg0;
- unsigned long coma_cnfg1;
- unsigned long coma_cnfg2;
- unsigned long coma_cnfg3;
- unsigned long epic_dcsr;
- unsigned long epic_pear;
- unsigned long epic_sear;
- unsigned long epic_tbr1;
- unsigned long epic_tbr2;
- unsigned long epic_pbr1;
- unsigned long epic_pbr2;
- unsigned long epic_pmr1;
- unsigned long epic_pmr2;
- unsigned long epic_harx1;
- unsigned long epic_harx2;
- unsigned long epic_pmlt;
- unsigned long epic_tag0;
- unsigned long epic_tag1;
- unsigned long epic_tag2;
- unsigned long epic_tag3;
- unsigned long epic_tag4;
- unsigned long epic_tag5;
- unsigned long epic_tag6;
- unsigned long epic_tag7;
- unsigned long epic_data0;
- unsigned long epic_data1;
- unsigned long epic_data2;
- unsigned long epic_data3;
- unsigned long epic_data4;
- unsigned long epic_data5;
- unsigned long epic_data6;
- unsigned long epic_data7;
-
- unsigned long pceb_vid;
- unsigned long pceb_did;
- unsigned long pceb_revision;
- unsigned long pceb_command;
- unsigned long pceb_status;
- unsigned long pceb_latency;
- unsigned long pceb_control;
- unsigned long pceb_arbcon;
- unsigned long pceb_arbpri;
-
- unsigned long esc_id;
- unsigned long esc_revision;
- unsigned long esc_int0;
- unsigned long esc_int1;
- unsigned long esc_elcr0;
- unsigned long esc_elcr1;
- unsigned long esc_last_eisa;
- unsigned long esc_nmi_stat;
-
- unsigned long pci_ir;
- unsigned long pci_imr;
- unsigned long svr_mgr;
-};
-
-/* This for the normal APECS machines. */
-struct el_apecs_sysdata_mcheck
-{
- unsigned long coma_gcr;
- unsigned long coma_edsr;
- unsigned long coma_ter;
- unsigned long coma_elar;
- unsigned long coma_ehar;
- unsigned long coma_ldlr;
- unsigned long coma_ldhr;
- unsigned long coma_base0;
- unsigned long coma_base1;
- unsigned long coma_base2;
- unsigned long coma_cnfg0;
- unsigned long coma_cnfg1;
- unsigned long coma_cnfg2;
- unsigned long epic_dcsr;
- unsigned long epic_pear;
- unsigned long epic_sear;
- unsigned long epic_tbr1;
- unsigned long epic_tbr2;
- unsigned long epic_pbr1;
- unsigned long epic_pbr2;
- unsigned long epic_pmr1;
- unsigned long epic_pmr2;
- unsigned long epic_harx1;
- unsigned long epic_harx2;
- unsigned long epic_pmlt;
- unsigned long epic_tag0;
- unsigned long epic_tag1;
- unsigned long epic_tag2;
- unsigned long epic_tag3;
- unsigned long epic_tag4;
- unsigned long epic_tag5;
- unsigned long epic_tag6;
- unsigned long epic_tag7;
- unsigned long epic_data0;
- unsigned long epic_data1;
- unsigned long epic_data2;
- unsigned long epic_data3;
- unsigned long epic_data4;
- unsigned long epic_data5;
- unsigned long epic_data6;
- unsigned long epic_data7;
-};
-
-struct el_apecs_procdata
-{
- unsigned long paltemp[32]; /* PAL TEMP REGS. */
- /* EV4-specific fields */
- unsigned long exc_addr; /* Address of excepting instruction. */
- unsigned long exc_sum; /* Summary of arithmetic traps. */
- unsigned long exc_mask; /* Exception mask (from exc_sum). */
- unsigned long iccsr; /* IBox hardware enables. */
- unsigned long pal_base; /* Base address for PALcode. */
- unsigned long hier; /* Hardware Interrupt Enable. */
- unsigned long hirr; /* Hardware Interrupt Request. */
- unsigned long csr; /* D-stream fault info. */
- unsigned long dc_stat; /* D-cache status (ECC/Parity Err). */
- unsigned long dc_addr; /* EV3 Phys Addr for ECC/DPERR. */
- unsigned long abox_ctl; /* ABox Control Register. */
- unsigned long biu_stat; /* BIU Status. */
- unsigned long biu_addr; /* BUI Address. */
- unsigned long biu_ctl; /* BIU Control. */
- unsigned long fill_syndrome;/* For correcting ECC errors. */
- unsigned long fill_addr; /* Cache block which was being read */
- unsigned long va; /* Effective VA of fault or miss. */
- unsigned long bc_tag; /* Backup Cache Tag Probe Results.*/
-};
-
-
-#ifdef __KERNEL__
-
-#ifndef __EXTERN_INLINE
-#define __EXTERN_INLINE extern inline
-#define __IO_EXTERN_INLINE
-#endif
-
-/*
- * I/O functions:
- *
- * Unlike Jensen, the APECS machines have no concept of local
- * I/O---everything goes over the PCI bus.
- *
- * There is plenty room for optimization here. In particular,
- * the Alpha's insb/insw/extb/extw should be useful in moving
- * data to/from the right byte-lanes.
- */
-
-#define vip volatile int __force *
-#define vuip volatile unsigned int __force *
-#define vulp volatile unsigned long __force *
-
-#define APECS_SET_HAE \
- do { \
- if (addr >= (1UL << 24)) { \
- unsigned long msb = addr & 0xf8000000; \
- addr -= msb; \
- set_hae(msb); \
- } \
- } while (0)
-
-__EXTERN_INLINE u8 apecs_ioread8(const void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long result, base_and_type;
-
- if (addr >= APECS_DENSE_MEM) {
- addr -= APECS_DENSE_MEM;
- APECS_SET_HAE;
- base_and_type = APECS_SPARSE_MEM + 0x00;
- } else {
- addr -= APECS_IO;
- base_and_type = APECS_IO + 0x00;
- }
-
- result = *(vip) ((addr << 5) + base_and_type);
- return __kernel_extbl(result, addr & 3);
-}
-
-__EXTERN_INLINE void apecs_iowrite8(u8 b, void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long w, base_and_type;
-
- if (addr >= APECS_DENSE_MEM) {
- addr -= APECS_DENSE_MEM;
- APECS_SET_HAE;
- base_and_type = APECS_SPARSE_MEM + 0x00;
- } else {
- addr -= APECS_IO;
- base_and_type = APECS_IO + 0x00;
- }
-
- w = __kernel_insbl(b, addr & 3);
- *(vuip) ((addr << 5) + base_and_type) = w;
-}
-
-__EXTERN_INLINE u16 apecs_ioread16(const void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long result, base_and_type;
-
- if (addr >= APECS_DENSE_MEM) {
- addr -= APECS_DENSE_MEM;
- APECS_SET_HAE;
- base_and_type = APECS_SPARSE_MEM + 0x08;
- } else {
- addr -= APECS_IO;
- base_and_type = APECS_IO + 0x08;
- }
-
- result = *(vip) ((addr << 5) + base_and_type);
- return __kernel_extwl(result, addr & 3);
-}
-
-__EXTERN_INLINE void apecs_iowrite16(u16 b, void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long w, base_and_type;
-
- if (addr >= APECS_DENSE_MEM) {
- addr -= APECS_DENSE_MEM;
- APECS_SET_HAE;
- base_and_type = APECS_SPARSE_MEM + 0x08;
- } else {
- addr -= APECS_IO;
- base_and_type = APECS_IO + 0x08;
- }
-
- w = __kernel_inswl(b, addr & 3);
- *(vuip) ((addr << 5) + base_and_type) = w;
-}
-
-__EXTERN_INLINE u32 apecs_ioread32(const void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- if (addr < APECS_DENSE_MEM)
- addr = ((addr - APECS_IO) << 5) + APECS_IO + 0x18;
- return *(vuip)addr;
-}
-
-__EXTERN_INLINE void apecs_iowrite32(u32 b, void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- if (addr < APECS_DENSE_MEM)
- addr = ((addr - APECS_IO) << 5) + APECS_IO + 0x18;
- *(vuip)addr = b;
-}
-
-__EXTERN_INLINE u64 apecs_ioread64(const void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- if (addr < APECS_DENSE_MEM)
- addr = ((addr - APECS_IO) << 5) + APECS_IO + 0x18;
- return *(vulp)addr;
-}
-
-__EXTERN_INLINE void apecs_iowrite64(u64 b, void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- if (addr < APECS_DENSE_MEM)
- addr = ((addr - APECS_IO) << 5) + APECS_IO + 0x18;
- *(vulp)addr = b;
-}
-
-__EXTERN_INLINE void __iomem *apecs_ioportmap(unsigned long addr)
-{
- return (void __iomem *)(addr + APECS_IO);
-}
-
-__EXTERN_INLINE void __iomem *apecs_ioremap(unsigned long addr,
- unsigned long size)
-{
- return (void __iomem *)(addr + APECS_DENSE_MEM);
-}
-
-__EXTERN_INLINE int apecs_is_ioaddr(unsigned long addr)
-{
- return addr >= IDENT_ADDR + 0x180000000UL;
-}
-
-__EXTERN_INLINE int apecs_is_mmio(const volatile void __iomem *addr)
-{
- return (unsigned long)addr >= APECS_DENSE_MEM;
-}
-
-#undef APECS_SET_HAE
-
-#undef vip
-#undef vuip
-#undef vulp
-
-#undef __IO_PREFIX
-#define __IO_PREFIX apecs
-#define apecs_trivial_io_bw 0
-#define apecs_trivial_io_lq 0
-#define apecs_trivial_rw_bw 2
-#define apecs_trivial_rw_lq 1
-#define apecs_trivial_iounmap 1
-#include <asm/io_trivial.h>
-
-#ifdef __IO_EXTERN_INLINE
-#undef __EXTERN_INLINE
-#undef __IO_EXTERN_INLINE
-#endif
-
-#endif /* __KERNEL__ */
-
-#endif /* __ALPHA_APECS__H__ */
diff --git a/arch/alpha/include/asm/core_lca.h b/arch/alpha/include/asm/core_lca.h
deleted file mode 100644
index d8c3e72ef8f6..000000000000
--- a/arch/alpha/include/asm/core_lca.h
+++ /dev/null
@@ -1,378 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ALPHA_LCA__H__
-#define __ALPHA_LCA__H__
-
-#include <asm/compiler.h>
-#include <asm/mce.h>
-
-/*
- * Low Cost Alpha (LCA) definitions (these apply to 21066 and 21068,
- * for example).
- *
- * This file is based on:
- *
- * DECchip 21066 and DECchip 21068 Alpha AXP Microprocessors
- * Hardware Reference Manual; Digital Equipment Corp.; May 1994;
- * Maynard, MA; Order Number: EC-N2681-71.
- */
-
-/*
- * NOTE: The LCA uses a Host Address Extension (HAE) register to access
- * PCI addresses that are beyond the first 27 bits of address
- * space. Updating the HAE requires an external cycle (and
- * a memory barrier), which tends to be slow. Instead of updating
- * it on each sparse memory access, we keep the current HAE value
- * cached in variable cache_hae. Only if the cached HAE differs
- * from the desired HAE value do we actually updated HAE register.
- * The HAE register is preserved by the interrupt handler entry/exit
- * code, so this scheme works even in the presence of interrupts.
- *
- * Dense memory space doesn't require the HAE, but is restricted to
- * aligned 32 and 64 bit accesses. Special Cycle and Interrupt
- * Acknowledge cycles may also require the use of the HAE. The LCA
- * limits I/O address space to the bottom 24 bits of address space,
- * but this easily covers the 16 bit ISA I/O address space.
- */
-
-/*
- * NOTE 2! The memory operations do not set any memory barriers, as
- * it's not needed for cases like a frame buffer that is essentially
- * memory-like. You need to do them by hand if the operations depend
- * on ordering.
- *
- * Similarly, the port I/O operations do a "mb" only after a write
- * operation: if an mb is needed before (as in the case of doing
- * memory mapped I/O first, and then a port I/O operation to the same
- * device), it needs to be done by hand.
- *
- * After the above has bitten me 100 times, I'll give up and just do
- * the mb all the time, but right now I'm hoping this will work out.
- * Avoiding mb's may potentially be a noticeable speed improvement,
- * but I can't honestly say I've tested it.
- *
- * Handling interrupts that need to do mb's to synchronize to
- * non-interrupts is another fun race area. Don't do it (because if
- * you do, I'll have to do *everything* with interrupts disabled,
- * ugh).
- */
-
-/*
- * Memory Controller registers:
- */
-#define LCA_MEM_BCR0 (IDENT_ADDR + 0x120000000UL)
-#define LCA_MEM_BCR1 (IDENT_ADDR + 0x120000008UL)
-#define LCA_MEM_BCR2 (IDENT_ADDR + 0x120000010UL)
-#define LCA_MEM_BCR3 (IDENT_ADDR + 0x120000018UL)
-#define LCA_MEM_BMR0 (IDENT_ADDR + 0x120000020UL)
-#define LCA_MEM_BMR1 (IDENT_ADDR + 0x120000028UL)
-#define LCA_MEM_BMR2 (IDENT_ADDR + 0x120000030UL)
-#define LCA_MEM_BMR3 (IDENT_ADDR + 0x120000038UL)
-#define LCA_MEM_BTR0 (IDENT_ADDR + 0x120000040UL)
-#define LCA_MEM_BTR1 (IDENT_ADDR + 0x120000048UL)
-#define LCA_MEM_BTR2 (IDENT_ADDR + 0x120000050UL)
-#define LCA_MEM_BTR3 (IDENT_ADDR + 0x120000058UL)
-#define LCA_MEM_GTR (IDENT_ADDR + 0x120000060UL)
-#define LCA_MEM_ESR (IDENT_ADDR + 0x120000068UL)
-#define LCA_MEM_EAR (IDENT_ADDR + 0x120000070UL)
-#define LCA_MEM_CAR (IDENT_ADDR + 0x120000078UL)
-#define LCA_MEM_VGR (IDENT_ADDR + 0x120000080UL)
-#define LCA_MEM_PLM (IDENT_ADDR + 0x120000088UL)
-#define LCA_MEM_FOR (IDENT_ADDR + 0x120000090UL)
-
-/*
- * I/O Controller registers:
- */
-#define LCA_IOC_HAE (IDENT_ADDR + 0x180000000UL)
-#define LCA_IOC_CONF (IDENT_ADDR + 0x180000020UL)
-#define LCA_IOC_STAT0 (IDENT_ADDR + 0x180000040UL)
-#define LCA_IOC_STAT1 (IDENT_ADDR + 0x180000060UL)
-#define LCA_IOC_TBIA (IDENT_ADDR + 0x180000080UL)
-#define LCA_IOC_TB_ENA (IDENT_ADDR + 0x1800000a0UL)
-#define LCA_IOC_SFT_RST (IDENT_ADDR + 0x1800000c0UL)
-#define LCA_IOC_PAR_DIS (IDENT_ADDR + 0x1800000e0UL)
-#define LCA_IOC_W_BASE0 (IDENT_ADDR + 0x180000100UL)
-#define LCA_IOC_W_BASE1 (IDENT_ADDR + 0x180000120UL)
-#define LCA_IOC_W_MASK0 (IDENT_ADDR + 0x180000140UL)
-#define LCA_IOC_W_MASK1 (IDENT_ADDR + 0x180000160UL)
-#define LCA_IOC_T_BASE0 (IDENT_ADDR + 0x180000180UL)
-#define LCA_IOC_T_BASE1 (IDENT_ADDR + 0x1800001a0UL)
-#define LCA_IOC_TB_TAG0 (IDENT_ADDR + 0x188000000UL)
-#define LCA_IOC_TB_TAG1 (IDENT_ADDR + 0x188000020UL)
-#define LCA_IOC_TB_TAG2 (IDENT_ADDR + 0x188000040UL)
-#define LCA_IOC_TB_TAG3 (IDENT_ADDR + 0x188000060UL)
-#define LCA_IOC_TB_TAG4 (IDENT_ADDR + 0x188000070UL)
-#define LCA_IOC_TB_TAG5 (IDENT_ADDR + 0x1880000a0UL)
-#define LCA_IOC_TB_TAG6 (IDENT_ADDR + 0x1880000c0UL)
-#define LCA_IOC_TB_TAG7 (IDENT_ADDR + 0x1880000e0UL)
-
-/*
- * Memory spaces:
- */
-#define LCA_IACK_SC (IDENT_ADDR + 0x1a0000000UL)
-#define LCA_CONF (IDENT_ADDR + 0x1e0000000UL)
-#define LCA_IO (IDENT_ADDR + 0x1c0000000UL)
-#define LCA_SPARSE_MEM (IDENT_ADDR + 0x200000000UL)
-#define LCA_DENSE_MEM (IDENT_ADDR + 0x300000000UL)
-
-/*
- * Bit definitions for I/O Controller status register 0:
- */
-#define LCA_IOC_STAT0_CMD 0xf
-#define LCA_IOC_STAT0_ERR (1<<4)
-#define LCA_IOC_STAT0_LOST (1<<5)
-#define LCA_IOC_STAT0_THIT (1<<6)
-#define LCA_IOC_STAT0_TREF (1<<7)
-#define LCA_IOC_STAT0_CODE_SHIFT 8
-#define LCA_IOC_STAT0_CODE_MASK 0x7
-#define LCA_IOC_STAT0_P_NBR_SHIFT 13
-#define LCA_IOC_STAT0_P_NBR_MASK 0x7ffff
-
-#define LCA_HAE_ADDRESS LCA_IOC_HAE
-
-/* LCA PMR Power Management register defines */
-#define LCA_PMR_ADDR (IDENT_ADDR + 0x120000098UL)
-#define LCA_PMR_PDIV 0x7 /* Primary clock divisor */
-#define LCA_PMR_ODIV 0x38 /* Override clock divisor */
-#define LCA_PMR_INTO 0x40 /* Interrupt override */
-#define LCA_PMR_DMAO 0x80 /* DMA override */
-#define LCA_PMR_OCCEB 0xffff0000L /* Override cycle counter - even bits */
-#define LCA_PMR_OCCOB 0xffff000000000000L /* Override cycle counter - even bits */
-#define LCA_PMR_PRIMARY_MASK 0xfffffffffffffff8L
-
-/* LCA PMR Macros */
-
-#define LCA_READ_PMR (*(volatile unsigned long *)LCA_PMR_ADDR)
-#define LCA_WRITE_PMR(d) (*((volatile unsigned long *)LCA_PMR_ADDR) = (d))
-
-#define LCA_GET_PRIMARY(r) ((r) & LCA_PMR_PDIV)
-#define LCA_GET_OVERRIDE(r) (((r) >> 3) & LCA_PMR_PDIV)
-#define LCA_SET_PRIMARY_CLOCK(r, c) ((r) = (((r) & LCA_PMR_PRIMARY_MASK)|(c)))
-
-/* LCA PMR Divisor values */
-#define LCA_PMR_DIV_1 0x0
-#define LCA_PMR_DIV_1_5 0x1
-#define LCA_PMR_DIV_2 0x2
-#define LCA_PMR_DIV_4 0x3
-#define LCA_PMR_DIV_8 0x4
-#define LCA_PMR_DIV_16 0x5
-#define LCA_PMR_DIV_MIN DIV_1
-#define LCA_PMR_DIV_MAX DIV_16
-
-
-/*
- * Data structure for handling LCA machine checks. Correctable errors
- * result in a short logout frame, uncorrectable ones in a long one.
- */
-struct el_lca_mcheck_short {
- struct el_common h; /* common logout header */
- unsigned long esr; /* error-status register */
- unsigned long ear; /* error-address register */
- unsigned long dc_stat; /* dcache status register */
- unsigned long ioc_stat0; /* I/O controller status register 0 */
- unsigned long ioc_stat1; /* I/O controller status register 1 */
-};
-
-struct el_lca_mcheck_long {
- struct el_common h; /* common logout header */
- unsigned long pt[31]; /* PAL temps */
- unsigned long exc_addr; /* exception address */
- unsigned long pad1[3];
- unsigned long pal_base; /* PALcode base address */
- unsigned long hier; /* hw interrupt enable */
- unsigned long hirr; /* hw interrupt request */
- unsigned long mm_csr; /* MMU control & status */
- unsigned long dc_stat; /* data cache status */
- unsigned long dc_addr; /* data cache addr register */
- unsigned long abox_ctl; /* address box control register */
- unsigned long esr; /* error status register */
- unsigned long ear; /* error address register */
- unsigned long car; /* cache control register */
- unsigned long ioc_stat0; /* I/O controller status register 0 */
- unsigned long ioc_stat1; /* I/O controller status register 1 */
- unsigned long va; /* virtual address register */
-};
-
-union el_lca {
- struct el_common * c;
- struct el_lca_mcheck_long * l;
- struct el_lca_mcheck_short * s;
-};
-
-#ifdef __KERNEL__
-
-#ifndef __EXTERN_INLINE
-#define __EXTERN_INLINE extern inline
-#define __IO_EXTERN_INLINE
-#endif
-
-/*
- * I/O functions:
- *
- * Unlike Jensen, the Noname machines have no concept of local
- * I/O---everything goes over the PCI bus.
- *
- * There is plenty room for optimization here. In particular,
- * the Alpha's insb/insw/extb/extw should be useful in moving
- * data to/from the right byte-lanes.
- */
-
-#define vip volatile int __force *
-#define vuip volatile unsigned int __force *
-#define vulp volatile unsigned long __force *
-
-#define LCA_SET_HAE \
- do { \
- if (addr >= (1UL << 24)) { \
- unsigned long msb = addr & 0xf8000000; \
- addr -= msb; \
- set_hae(msb); \
- } \
- } while (0)
-
-
-__EXTERN_INLINE u8 lca_ioread8(const void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long result, base_and_type;
-
- if (addr >= LCA_DENSE_MEM) {
- addr -= LCA_DENSE_MEM;
- LCA_SET_HAE;
- base_and_type = LCA_SPARSE_MEM + 0x00;
- } else {
- addr -= LCA_IO;
- base_and_type = LCA_IO + 0x00;
- }
-
- result = *(vip) ((addr << 5) + base_and_type);
- return __kernel_extbl(result, addr & 3);
-}
-
-__EXTERN_INLINE void lca_iowrite8(u8 b, void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long w, base_and_type;
-
- if (addr >= LCA_DENSE_MEM) {
- addr -= LCA_DENSE_MEM;
- LCA_SET_HAE;
- base_and_type = LCA_SPARSE_MEM + 0x00;
- } else {
- addr -= LCA_IO;
- base_and_type = LCA_IO + 0x00;
- }
-
- w = __kernel_insbl(b, addr & 3);
- *(vuip) ((addr << 5) + base_and_type) = w;
-}
-
-__EXTERN_INLINE u16 lca_ioread16(const void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long result, base_and_type;
-
- if (addr >= LCA_DENSE_MEM) {
- addr -= LCA_DENSE_MEM;
- LCA_SET_HAE;
- base_and_type = LCA_SPARSE_MEM + 0x08;
- } else {
- addr -= LCA_IO;
- base_and_type = LCA_IO + 0x08;
- }
-
- result = *(vip) ((addr << 5) + base_and_type);
- return __kernel_extwl(result, addr & 3);
-}
-
-__EXTERN_INLINE void lca_iowrite16(u16 b, void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long w, base_and_type;
-
- if (addr >= LCA_DENSE_MEM) {
- addr -= LCA_DENSE_MEM;
- LCA_SET_HAE;
- base_and_type = LCA_SPARSE_MEM + 0x08;
- } else {
- addr -= LCA_IO;
- base_and_type = LCA_IO + 0x08;
- }
-
- w = __kernel_inswl(b, addr & 3);
- *(vuip) ((addr << 5) + base_and_type) = w;
-}
-
-__EXTERN_INLINE u32 lca_ioread32(const void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- if (addr < LCA_DENSE_MEM)
- addr = ((addr - LCA_IO) << 5) + LCA_IO + 0x18;
- return *(vuip)addr;
-}
-
-__EXTERN_INLINE void lca_iowrite32(u32 b, void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- if (addr < LCA_DENSE_MEM)
- addr = ((addr - LCA_IO) << 5) + LCA_IO + 0x18;
- *(vuip)addr = b;
-}
-
-__EXTERN_INLINE u64 lca_ioread64(const void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- if (addr < LCA_DENSE_MEM)
- addr = ((addr - LCA_IO) << 5) + LCA_IO + 0x18;
- return *(vulp)addr;
-}
-
-__EXTERN_INLINE void lca_iowrite64(u64 b, void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- if (addr < LCA_DENSE_MEM)
- addr = ((addr - LCA_IO) << 5) + LCA_IO + 0x18;
- *(vulp)addr = b;
-}
-
-__EXTERN_INLINE void __iomem *lca_ioportmap(unsigned long addr)
-{
- return (void __iomem *)(addr + LCA_IO);
-}
-
-__EXTERN_INLINE void __iomem *lca_ioremap(unsigned long addr,
- unsigned long size)
-{
- return (void __iomem *)(addr + LCA_DENSE_MEM);
-}
-
-__EXTERN_INLINE int lca_is_ioaddr(unsigned long addr)
-{
- return addr >= IDENT_ADDR + 0x120000000UL;
-}
-
-__EXTERN_INLINE int lca_is_mmio(const volatile void __iomem *addr)
-{
- return (unsigned long)addr >= LCA_DENSE_MEM;
-}
-
-#undef vip
-#undef vuip
-#undef vulp
-
-#undef __IO_PREFIX
-#define __IO_PREFIX lca
-#define lca_trivial_rw_bw 2
-#define lca_trivial_rw_lq 1
-#define lca_trivial_io_bw 0
-#define lca_trivial_io_lq 0
-#define lca_trivial_iounmap 1
-#include <asm/io_trivial.h>
-
-#ifdef __IO_EXTERN_INLINE
-#undef __EXTERN_INLINE
-#undef __IO_EXTERN_INLINE
-#endif
-
-#endif /* __KERNEL__ */
-
-#endif /* __ALPHA_LCA__H__ */
diff --git a/arch/alpha/include/asm/core_t2.h b/arch/alpha/include/asm/core_t2.h
index ab956b1625b5..ca9b091d9c5f 100644
--- a/arch/alpha/include/asm/core_t2.h
+++ b/arch/alpha/include/asm/core_t2.h
@@ -25,16 +25,8 @@
#define T2_MEM_R1_MASK 0x07ffffff /* Mem sparse region 1 mask is 27 bits */
/* GAMMA-SABLE is a SABLE with EV5-based CPUs */
-/* All LYNX machines, EV4 or EV5, use the GAMMA bias also */
#define _GAMMA_BIAS 0x8000000000UL
-
-#if defined(CONFIG_ALPHA_GENERIC)
-#define GAMMA_BIAS alpha_mv.sys.t2.gamma_bias
-#elif defined(CONFIG_ALPHA_GAMMA)
#define GAMMA_BIAS _GAMMA_BIAS
-#else
-#define GAMMA_BIAS 0
-#endif
/*
* Memory spaces:
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h
index 6ce7e2041685..ad5a59b035cb 100644
--- a/arch/alpha/include/asm/dma-mapping.h
+++ b/arch/alpha/include/asm/dma-mapping.h
@@ -6,11 +6,7 @@ extern const struct dma_map_ops alpha_pci_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(void)
{
-#ifdef CONFIG_ALPHA_JENSEN
- return NULL;
-#else
return &alpha_pci_ops;
-#endif
}
#endif /* _ALPHA_DMA_MAPPING_H */
diff --git a/arch/alpha/include/asm/dma.h b/arch/alpha/include/asm/dma.h
index a04d76b96089..3a88812b7165 100644
--- a/arch/alpha/include/asm/dma.h
+++ b/arch/alpha/include/asm/dma.h
@@ -82,11 +82,6 @@
just a wiring limit.
*/
-/* The maximum address for ISA DMA transfer on Alpha XL, due to an
- hardware SIO limitation, is 64MB.
-*/
-#define ALPHA_XL_MAX_ISA_DMA_ADDRESS 0x04000000UL
-
/* The maximum address for ISA DMA transfer on RUFFIAN,
due to an hardware SIO limitation, is 16MB.
*/
@@ -107,9 +102,7 @@
#ifdef CONFIG_ALPHA_GENERIC
# define MAX_ISA_DMA_ADDRESS (alpha_mv.max_isa_dma_address)
#else
-# if defined(CONFIG_ALPHA_XL)
-# define MAX_ISA_DMA_ADDRESS ALPHA_XL_MAX_ISA_DMA_ADDRESS
-# elif defined(CONFIG_ALPHA_RUFFIAN)
+# if defined(CONFIG_ALPHA_RUFFIAN)
# define MAX_ISA_DMA_ADDRESS ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS
# elif defined(CONFIG_ALPHA_SABLE)
# define MAX_ISA_DMA_ADDRESS ALPHA_SABLE_MAX_ISA_DMA_ADDRESS
diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
index e6da23f1da83..4d7c46f50382 100644
--- a/arch/alpha/include/asm/elf.h
+++ b/arch/alpha/include/asm/elf.h
@@ -133,9 +133,7 @@ extern int dump_elf_task(elf_greg_t *dest, struct task_struct *task);
#define ELF_PLATFORM \
({ \
enum implver_enum i_ = implver(); \
- ( i_ == IMPLVER_EV4 ? "ev4" \
- : i_ == IMPLVER_EV5 \
- ? (amask(AMASK_BWX) ? "ev5" : "ev56") \
+ ( i_ == IMPLVER_EV5 ? "ev56" \
: amask (AMASK_CIX) ? "ev6" : "ev67"); \
})
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index 4f47a5003fe8..2bb8cbeedf91 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -203,16 +203,10 @@ static inline int generic_is_mmio(const volatile void __iomem *a)
#else
-#if defined(CONFIG_ALPHA_APECS)
-# include <asm/core_apecs.h>
-#elif defined(CONFIG_ALPHA_CIA)
+#if defined(CONFIG_ALPHA_CIA)
# include <asm/core_cia.h>
#elif defined(CONFIG_ALPHA_IRONGATE)
# include <asm/core_irongate.h>
-#elif defined(CONFIG_ALPHA_JENSEN)
-# include <asm/jensen.h>
-#elif defined(CONFIG_ALPHA_LCA)
-# include <asm/core_lca.h>
#elif defined(CONFIG_ALPHA_MARVEL)
# include <asm/core_marvel.h>
#elif defined(CONFIG_ALPHA_MCPCIA)
@@ -631,23 +625,7 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
#define outsw outsw
#define outsl outsl
-/*
- * The Alpha Jensen hardware for some rather strange reason puts
- * the RTC clock at 0x170 instead of 0x70. Probably due to some
- * misguided idea about using 0x70 for NMI stuff.
- *
- * These defines will override the defaults when doing RTC queries
- */
-
-#ifdef CONFIG_ALPHA_GENERIC
-# define RTC_PORT(x) ((x) + alpha_mv.rtc_port)
-#else
-# ifdef CONFIG_ALPHA_JENSEN
-# define RTC_PORT(x) (0x170+(x))
-# else
-# define RTC_PORT(x) (0x70 + (x))
-# endif
-#endif
+#define RTC_PORT(x) (0x70 + (x))
#define RTC_ALWAYS_BCD 0
/*
diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h
index 432402c8e47f..d83b26b6660f 100644
--- a/arch/alpha/include/asm/irq.h
+++ b/arch/alpha/include/asm/irq.h
@@ -31,16 +31,11 @@
# define NR_IRQS (32768 + 16) /* marvel - 32 pids */
# endif
-#elif defined(CONFIG_ALPHA_CABRIOLET) || \
- defined(CONFIG_ALPHA_EB66P) || \
- defined(CONFIG_ALPHA_EB164) || \
- defined(CONFIG_ALPHA_PC164) || \
+#elif defined(CONFIG_ALPHA_PC164) || \
defined(CONFIG_ALPHA_LX164)
# define NR_IRQS 35
-#elif defined(CONFIG_ALPHA_EB66) || \
- defined(CONFIG_ALPHA_EB64P) || \
- defined(CONFIG_ALPHA_MIKASA)
+#elif defined(CONFIG_ALPHA_MIKASA)
# define NR_IRQS 32
#elif defined(CONFIG_ALPHA_ALCOR) || \
@@ -55,7 +50,6 @@
# define NR_IRQS 40
#elif defined(CONFIG_ALPHA_DP264) || \
- defined(CONFIG_ALPHA_LYNX) || \
defined(CONFIG_ALPHA_SHARK)
# define NR_IRQS 64
diff --git a/arch/alpha/include/asm/jensen.h b/arch/alpha/include/asm/jensen.h
deleted file mode 100644
index 66eb049eb421..000000000000
--- a/arch/alpha/include/asm/jensen.h
+++ /dev/null
@@ -1,363 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ALPHA_JENSEN_H
-#define __ALPHA_JENSEN_H
-
-#include <asm/compiler.h>
-
-/*
- * Defines for the AlphaPC EISA IO and memory address space.
- */
-
-/*
- * NOTE! The memory operations do not set any memory barriers, as it's
- * not needed for cases like a frame buffer that is essentially memory-like.
- * You need to do them by hand if the operations depend on ordering.
- *
- * Similarly, the port IO operations do a "mb" only after a write operation:
- * if an mb is needed before (as in the case of doing memory mapped IO
- * first, and then a port IO operation to the same device), it needs to be
- * done by hand.
- *
- * After the above has bitten me 100 times, I'll give up and just do the
- * mb all the time, but right now I'm hoping this will work out. Avoiding
- * mb's may potentially be a noticeable speed improvement, but I can't
- * honestly say I've tested it.
- *
- * Handling interrupts that need to do mb's to synchronize to non-interrupts
- * is another fun race area. Don't do it (because if you do, I'll have to
- * do *everything* with interrupts disabled, ugh).
- */
-
-/*
- * EISA Interrupt Acknowledge address
- */
-#define EISA_INTA (IDENT_ADDR + 0x100000000UL)
-
-/*
- * FEPROM addresses
- */
-#define EISA_FEPROM0 (IDENT_ADDR + 0x180000000UL)
-#define EISA_FEPROM1 (IDENT_ADDR + 0x1A0000000UL)
-
-/*
- * VL82C106 base address
- */
-#define EISA_VL82C106 (IDENT_ADDR + 0x1C0000000UL)
-
-/*
- * EISA "Host Address Extension" address (bits 25-31 of the EISA address)
- */
-#define EISA_HAE (IDENT_ADDR + 0x1D0000000UL)
-
-/*
- * "SYSCTL" register address
- */
-#define EISA_SYSCTL (IDENT_ADDR + 0x1E0000000UL)
-
-/*
- * "spare" register address
- */
-#define EISA_SPARE (IDENT_ADDR + 0x1F0000000UL)
-
-/*
- * EISA memory address offset
- */
-#define EISA_MEM (IDENT_ADDR + 0x200000000UL)
-
-/*
- * EISA IO address offset
- */
-#define EISA_IO (IDENT_ADDR + 0x300000000UL)
-
-
-#ifdef __KERNEL__
-
-#ifndef __EXTERN_INLINE
-#define __EXTERN_INLINE extern inline
-#define __IO_EXTERN_INLINE
-#endif
-
-/*
- * Handle the "host address register". This needs to be set
- * to the high 7 bits of the EISA address. This is also needed
- * for EISA IO addresses, which are only 16 bits wide (the
- * hae needs to be set to 0).
- *
- * HAE isn't needed for the local IO operations, though.
- */
-
-#define JENSEN_HAE_ADDRESS EISA_HAE
-#define JENSEN_HAE_MASK 0x1ffffff
-
-__EXTERN_INLINE void jensen_set_hae(unsigned long addr)
-{
- /* hae on the Jensen is bits 31:25 shifted right */
- addr >>= 25;
- if (addr != alpha_mv.hae_cache)
- set_hae(addr);
-}
-
-#define vuip volatile unsigned int *
-#define vulp volatile unsigned long *
-
-/*
- * IO functions
- *
- * The "local" functions are those that don't go out to the EISA bus,
- * but instead act on the VL82C106 chip directly.. This is mainly the
- * keyboard, RTC, printer and first two serial lines..
- *
- * The local stuff makes for some complications, but it seems to be
- * gone in the PCI version. I hope I can get DEC suckered^H^H^H^H^H^H^H^H
- * convinced that I need one of the newer machines.
- */
-
-__EXTERN_INLINE unsigned int jensen_local_inb(unsigned long addr)
-{
- return 0xff & *(vuip)((addr << 9) + EISA_VL82C106);
-}
-
-__EXTERN_INLINE void jensen_local_outb(u8 b, unsigned long addr)
-{
- *(vuip)((addr << 9) + EISA_VL82C106) = b;
- mb();
-}
-
-__EXTERN_INLINE unsigned int jensen_bus_inb(unsigned long addr)
-{
- long result;
-
- jensen_set_hae(0);
- result = *(volatile int *)((addr << 7) + EISA_IO + 0x00);
- return __kernel_extbl(result, addr & 3);
-}
-
-__EXTERN_INLINE void jensen_bus_outb(u8 b, unsigned long addr)
-{
- jensen_set_hae(0);
- *(vuip)((addr << 7) + EISA_IO + 0x00) = b * 0x01010101;
- mb();
-}
-
-/*
- * It seems gcc is not very good at optimizing away logical
- * operations that result in operations across inline functions.
- * Which is why this is a macro.
- */
-
-#define jensen_is_local(addr) ( \
-/* keyboard */ (addr == 0x60 || addr == 0x64) || \
-/* RTC */ (addr == 0x170 || addr == 0x171) || \
-/* mb COM2 */ (addr >= 0x2f8 && addr <= 0x2ff) || \
-/* mb LPT1 */ (addr >= 0x3bc && addr <= 0x3be) || \
-/* mb COM2 */ (addr >= 0x3f8 && addr <= 0x3ff))
-
-__EXTERN_INLINE u8 jensen_inb(unsigned long addr)
-{
- if (jensen_is_local(addr))
- return jensen_local_inb(addr);
- else
- return jensen_bus_inb(addr);
-}
-
-__EXTERN_INLINE void jensen_outb(u8 b, unsigned long addr)
-{
- if (jensen_is_local(addr))
- jensen_local_outb(b, addr);
- else
- jensen_bus_outb(b, addr);
-}
-
-__EXTERN_INLINE u16 jensen_inw(unsigned long addr)
-{
- long result;
-
- jensen_set_hae(0);
- result = *(volatile int *) ((addr << 7) + EISA_IO + 0x20);
- result >>= (addr & 3) * 8;
- return 0xffffUL & result;
-}
-
-__EXTERN_INLINE u32 jensen_inl(unsigned long addr)
-{
- jensen_set_hae(0);
- return *(vuip) ((addr << 7) + EISA_IO + 0x60);
-}
-
-__EXTERN_INLINE u64 jensen_inq(unsigned long addr)
-{
- jensen_set_hae(0);
- return *(vulp) ((addr << 7) + EISA_IO + 0x60);
-}
-
-__EXTERN_INLINE void jensen_outw(u16 b, unsigned long addr)
-{
- jensen_set_hae(0);
- *(vuip) ((addr << 7) + EISA_IO + 0x20) = b * 0x00010001;
- mb();
-}
-
-__EXTERN_INLINE void jensen_outl(u32 b, unsigned long addr)
-{
- jensen_set_hae(0);
- *(vuip) ((addr << 7) + EISA_IO + 0x60) = b;
- mb();
-}
-
-__EXTERN_INLINE void jensen_outq(u64 b, unsigned long addr)
-{
- jensen_set_hae(0);
- *(vulp) ((addr << 7) + EISA_IO + 0x60) = b;
- mb();
-}
-
-/*
- * Memory functions.
- */
-
-__EXTERN_INLINE u8 jensen_readb(const volatile void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- long result;
-
- jensen_set_hae(addr);
- addr &= JENSEN_HAE_MASK;
- result = *(volatile int *) ((addr << 7) + EISA_MEM + 0x00);
- result >>= (addr & 3) * 8;
- return 0xffUL & result;
-}
-
-__EXTERN_INLINE u16 jensen_readw(const volatile void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- long result;
-
- jensen_set_hae(addr);
- addr &= JENSEN_HAE_MASK;
- result = *(volatile int *) ((addr << 7) + EISA_MEM + 0x20);
- result >>= (addr & 3) * 8;
- return 0xffffUL & result;
-}
-
-__EXTERN_INLINE u32 jensen_readl(const volatile void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- jensen_set_hae(addr);
- addr &= JENSEN_HAE_MASK;
- return *(vuip) ((addr << 7) + EISA_MEM + 0x60);
-}
-
-__EXTERN_INLINE u64 jensen_readq(const volatile void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long r0, r1;
-
- jensen_set_hae(addr);
- addr &= JENSEN_HAE_MASK;
- addr = (addr << 7) + EISA_MEM + 0x60;
- r0 = *(vuip) (addr);
- r1 = *(vuip) (addr + (4 << 7));
- return r1 << 32 | r0;
-}
-
-__EXTERN_INLINE void jensen_writeb(u8 b, volatile void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- jensen_set_hae(addr);
- addr &= JENSEN_HAE_MASK;
- *(vuip) ((addr << 7) + EISA_MEM + 0x00) = b * 0x01010101;
-}
-
-__EXTERN_INLINE void jensen_writew(u16 b, volatile void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- jensen_set_hae(addr);
- addr &= JENSEN_HAE_MASK;
- *(vuip) ((addr << 7) + EISA_MEM + 0x20) = b * 0x00010001;
-}
-
-__EXTERN_INLINE void jensen_writel(u32 b, volatile void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- jensen_set_hae(addr);
- addr &= JENSEN_HAE_MASK;
- *(vuip) ((addr << 7) + EISA_MEM + 0x60) = b;
-}
-
-__EXTERN_INLINE void jensen_writeq(u64 b, volatile void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- jensen_set_hae(addr);
- addr &= JENSEN_HAE_MASK;
- addr = (addr << 7) + EISA_MEM + 0x60;
- *(vuip) (addr) = b;
- *(vuip) (addr + (4 << 7)) = b >> 32;
-}
-
-__EXTERN_INLINE void __iomem *jensen_ioportmap(unsigned long addr)
-{
- return (void __iomem *)addr;
-}
-
-__EXTERN_INLINE void __iomem *jensen_ioremap(unsigned long addr,
- unsigned long size)
-{
- return (void __iomem *)(addr + 0x100000000ul);
-}
-
-__EXTERN_INLINE int jensen_is_ioaddr(unsigned long addr)
-{
- return (long)addr >= 0;
-}
-
-__EXTERN_INLINE int jensen_is_mmio(const volatile void __iomem *addr)
-{
- return (unsigned long)addr >= 0x100000000ul;
-}
-
-/* New-style ioread interface. All the routines are so ugly for Jensen
- that it doesn't make sense to merge them. */
-
-#define IOPORT(OS, NS) \
-__EXTERN_INLINE u##NS jensen_ioread##NS(const void __iomem *xaddr) \
-{ \
- if (jensen_is_mmio(xaddr)) \
- return jensen_read##OS(xaddr - 0x100000000ul); \
- else \
- return jensen_in##OS((unsigned long)xaddr); \
-} \
-__EXTERN_INLINE void jensen_iowrite##NS(u##NS b, void __iomem *xaddr) \
-{ \
- if (jensen_is_mmio(xaddr)) \
- jensen_write##OS(b, xaddr - 0x100000000ul); \
- else \
- jensen_out##OS(b, (unsigned long)xaddr); \
-}
-
-IOPORT(b, 8)
-IOPORT(w, 16)
-IOPORT(l, 32)
-IOPORT(q, 64)
-
-#undef IOPORT
-
-#undef vuip
-#undef vulp
-
-#undef __IO_PREFIX
-#define __IO_PREFIX jensen
-#define jensen_trivial_rw_bw 0
-#define jensen_trivial_rw_lq 0
-#define jensen_trivial_io_bw 0
-#define jensen_trivial_io_lq 0
-#define jensen_trivial_iounmap 1
-#include <asm/io_trivial.h>
-
-#ifdef __IO_EXTERN_INLINE
-#undef __EXTERN_INLINE
-#undef __IO_EXTERN_INLINE
-#endif
-
-#endif /* __KERNEL__ */
-
-#endif /* __ALPHA_JENSEN_H */
diff --git a/arch/alpha/include/asm/machvec.h b/arch/alpha/include/asm/machvec.h
index 8623f995d34c..490fc880bb3f 100644
--- a/arch/alpha/include/asm/machvec.h
+++ b/arch/alpha/include/asm/machvec.h
@@ -72,15 +72,6 @@ struct alpha_machine_vector
int (*mv_is_ioaddr)(unsigned long);
int (*mv_is_mmio)(const volatile void __iomem *);
- void (*mv_switch_mm)(struct mm_struct *, struct mm_struct *,
- struct task_struct *);
- void (*mv_activate_mm)(struct mm_struct *, struct mm_struct *);
-
- void (*mv_flush_tlb_current)(struct mm_struct *);
- void (*mv_flush_tlb_current_page)(struct mm_struct * mm,
- struct vm_area_struct *vma,
- unsigned long addr);
-
void (*update_irq_hw)(unsigned long, unsigned long, int);
void (*ack_irq)(unsigned long);
void (*device_interrupt)(unsigned long vector);
diff --git a/arch/alpha/include/asm/mmu_context.h b/arch/alpha/include/asm/mmu_context.h
index 29a3e3a1f02b..eee8fe836a59 100644
--- a/arch/alpha/include/asm/mmu_context.h
+++ b/arch/alpha/include/asm/mmu_context.h
@@ -71,9 +71,7 @@ __reload_thread(struct pcb_struct *pcb)
#ifdef CONFIG_ALPHA_GENERIC
# define MAX_ASN (alpha_mv.max_asn)
#else
-# ifdef CONFIG_ALPHA_EV4
-# define MAX_ASN EV4_MAX_ASN
-# elif defined(CONFIG_ALPHA_EV5)
+# if defined(CONFIG_ALPHA_EV56)
# define MAX_ASN EV5_MAX_ASN
# else
# define MAX_ASN EV6_MAX_ASN
@@ -162,26 +160,6 @@ ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
task_thread_info(next)->pcb.asn = mmc & HARDWARE_ASN_MASK;
}
-__EXTERN_INLINE void
-ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
- struct task_struct *next)
-{
- /* As described, ASN's are broken for TLB usage. But we can
- optimize for switching between threads -- if the mm is
- unchanged from current we needn't flush. */
- /* ??? May not be needed because EV4 PALcode recognizes that
- ASN's are broken and does a tbiap itself on swpctx, under
- the "Must set ASN or flush" rule. At least this is true
- for a 1992 SRM, reports Joseph Martin (jmartin@hlo.dec.com).
- I'm going to leave this here anyway, just to Be Sure. -- r~ */
- if (prev_mm != next_mm)
- tbiap();
-
- /* Do continue to allocate ASNs, because we can still use them
- to avoid flushing the icache. */
- ev5_switch_mm(prev_mm, next_mm, next);
-}
-
extern void __load_new_mm_context(struct mm_struct *);
asmlinkage void do_page_fault(unsigned long address, unsigned long mmcsr,
long cause, struct pt_regs *regs);
@@ -209,25 +187,8 @@ ev5_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
__load_new_mm_context(next_mm);
}
-__EXTERN_INLINE void
-ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
-{
- __load_new_mm_context(next_mm);
- tbiap();
-}
-
-#ifdef CONFIG_ALPHA_GENERIC
-# define switch_mm(a,b,c) alpha_mv.mv_switch_mm((a),(b),(c))
-# define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y))
-#else
-# ifdef CONFIG_ALPHA_EV4
-# define switch_mm(a,b,c) ev4_switch_mm((a),(b),(c))
-# define activate_mm(x,y) ev4_activate_mm((x),(y))
-# else
-# define switch_mm(a,b,c) ev5_switch_mm((a),(b),(c))
-# define activate_mm(x,y) ev5_activate_mm((x),(y))
-# endif
-#endif
+#define switch_mm(a,b,c) ev5_switch_mm((a),(b),(c))
+#define activate_mm(x,y) ev5_activate_mm((x),(y))
#define init_new_context init_new_context
static inline int
diff --git a/arch/alpha/include/asm/special_insns.h b/arch/alpha/include/asm/special_insns.h
index ca2c5c30b22e..798d0bdb11f9 100644
--- a/arch/alpha/include/asm/special_insns.h
+++ b/arch/alpha/include/asm/special_insns.h
@@ -15,10 +15,7 @@ enum implver_enum {
(enum implver_enum) __implver; })
#else
/* Try to eliminate some dead code. */
-#ifdef CONFIG_ALPHA_EV4
-#define implver() IMPLVER_EV4
-#endif
-#ifdef CONFIG_ALPHA_EV5
+#ifdef CONFIG_ALPHA_EV56
#define implver() IMPLVER_EV5
#endif
#if defined(CONFIG_ALPHA_EV6)
diff --git a/arch/alpha/include/asm/tlbflush.h b/arch/alpha/include/asm/tlbflush.h
index 94dc37cf873a..ba4b359d6c39 100644
--- a/arch/alpha/include/asm/tlbflush.h
+++ b/arch/alpha/include/asm/tlbflush.h
@@ -14,16 +14,6 @@
extern void __load_new_mm_context(struct mm_struct *);
-/* Use a few helper functions to hide the ugly broken ASN
- numbers on early Alphas (ev4 and ev45). */
-
-__EXTERN_INLINE void
-ev4_flush_tlb_current(struct mm_struct *mm)
-{
- __load_new_mm_context(mm);
- tbiap();
-}
-
__EXTERN_INLINE void
ev5_flush_tlb_current(struct mm_struct *mm)
{
@@ -35,19 +25,6 @@ ev5_flush_tlb_current(struct mm_struct *mm)
specific icache page. */
__EXTERN_INLINE void
-ev4_flush_tlb_current_page(struct mm_struct * mm,
- struct vm_area_struct *vma,
- unsigned long addr)
-{
- int tbi_flag = 2;
- if (vma->vm_flags & VM_EXEC) {
- __load_new_mm_context(mm);
- tbi_flag = 3;
- }
- tbi(tbi_flag, addr);
-}
-
-__EXTERN_INLINE void
ev5_flush_tlb_current_page(struct mm_struct * mm,
struct vm_area_struct *vma,
unsigned long addr)
@@ -59,18 +36,8 @@ ev5_flush_tlb_current_page(struct mm_struct * mm,
}
-#ifdef CONFIG_ALPHA_GENERIC
-# define flush_tlb_current alpha_mv.mv_flush_tlb_current
-# define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
-#else
-# ifdef CONFIG_ALPHA_EV4
-# define flush_tlb_current ev4_flush_tlb_current
-# define flush_tlb_current_page ev4_flush_tlb_current_page
-# else
-# define flush_tlb_current ev5_flush_tlb_current
-# define flush_tlb_current_page ev5_flush_tlb_current_page
-# endif
-#endif
+#define flush_tlb_current ev5_flush_tlb_current
+#define flush_tlb_current_page ev5_flush_tlb_current_page
#ifdef __MMU_EXTERN_INLINE
#undef __EXTERN_INLINE
diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h
index c32c2584c0b7..ef295cbb797c 100644
--- a/arch/alpha/include/asm/uaccess.h
+++ b/arch/alpha/include/asm/uaccess.h
@@ -96,9 +96,6 @@ struct __large_struct { unsigned long buf[100]; };
: "=r"(__gu_val), "=r"(__gu_err) \
: "m"(__m(addr)), "1"(__gu_err))
-#ifdef __alpha_bwx__
-/* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
-
#define __get_user_16(addr) \
__asm__("1: ldwu %0,%2\n" \
"2:\n" \
@@ -112,33 +109,6 @@ struct __large_struct { unsigned long buf[100]; };
EXC(1b,2b,%0,%1) \
: "=r"(__gu_val), "=r"(__gu_err) \
: "m"(__m(addr)), "1"(__gu_err))
-#else
-/* Unfortunately, we can't get an unaligned access trap for the sub-word
- load, so we have to do a general unaligned operation. */
-
-#define __get_user_16(addr) \
-{ \
- long __gu_tmp; \
- __asm__("1: ldq_u %0,0(%3)\n" \
- "2: ldq_u %1,1(%3)\n" \
- " extwl %0,%3,%0\n" \
- " extwh %1,%3,%1\n" \
- " or %0,%1,%0\n" \
- "3:\n" \
- EXC(1b,3b,%0,%2) \
- EXC(2b,3b,%0,%2) \
- : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \
- : "r"(addr), "2"(__gu_err)); \
-}
-
-#define __get_user_8(addr) \
- __asm__("1: ldq_u %0,0(%2)\n" \
- " extbl %0,%2,%0\n" \
- "2:\n" \
- EXC(1b,2b,%0,%1) \
- : "=&r"(__gu_val), "=r"(__gu_err) \
- : "r"(addr), "1"(__gu_err))
-#endif
extern void __put_user_unknown(void);
@@ -192,9 +162,6 @@ __asm__ __volatile__("1: stl %r2,%1\n" \
: "=r"(__pu_err) \
: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
-#ifdef __alpha_bwx__
-/* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
-
#define __put_user_16(x, addr) \
__asm__ __volatile__("1: stw %r2,%1\n" \
"2:\n" \
@@ -208,53 +175,6 @@ __asm__ __volatile__("1: stb %r2,%1\n" \
EXC(1b,2b,$31,%0) \
: "=r"(__pu_err) \
: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
-#else
-/* Unfortunately, we can't get an unaligned access trap for the sub-word
- write, so we have to do a general unaligned operation. */
-
-#define __put_user_16(x, addr) \
-{ \
- long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \
- __asm__ __volatile__( \
- "1: ldq_u %2,1(%5)\n" \
- "2: ldq_u %1,0(%5)\n" \
- " inswh %6,%5,%4\n" \
- " inswl %6,%5,%3\n" \
- " mskwh %2,%5,%2\n" \
- " mskwl %1,%5,%1\n" \
- " or %2,%4,%2\n" \
- " or %1,%3,%1\n" \
- "3: stq_u %2,1(%5)\n" \
- "4: stq_u %1,0(%5)\n" \
- "5:\n" \
- EXC(1b,5b,$31,%0) \
- EXC(2b,5b,$31,%0) \
- EXC(3b,5b,$31,%0) \
- EXC(4b,5b,$31,%0) \
- : "=r"(__pu_err), "=&r"(__pu_tmp1), \
- "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \
- "=&r"(__pu_tmp4) \
- : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
-}
-
-#define __put_user_8(x, addr) \
-{ \
- long __pu_tmp1, __pu_tmp2; \
- __asm__ __volatile__( \
- "1: ldq_u %1,0(%4)\n" \
- " insbl %3,%4,%2\n" \
- " mskbl %1,%4,%1\n" \
- " or %1,%2,%1\n" \
- "2: stq_u %1,0(%4)\n" \
- "3:\n" \
- EXC(1b,3b,$31,%0) \
- EXC(2b,3b,$31,%0) \
- : "=r"(__pu_err), \
- "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \
- : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
-}
-#endif
-
/*
* Complex access routines
diff --git a/arch/alpha/include/asm/vga.h b/arch/alpha/include/asm/vga.h
index 4c347a8454c7..919931cb5b63 100644
--- a/arch/alpha/include/asm/vga.h
+++ b/arch/alpha/include/asm/vga.h
@@ -13,6 +13,7 @@
#define VT_BUF_HAVE_RW
#define VT_BUF_HAVE_MEMSETW
#define VT_BUF_HAVE_MEMCPYW
+#define VT_BUF_HAVE_MEMMOVEW
static inline void scr_writew(u16 val, volatile u16 *addr)
{
@@ -40,6 +41,7 @@ static inline void scr_memsetw(u16 *s, u16 c, unsigned int count)
/* Do not trust that the usage will be correct; analyze the arguments. */
extern void scr_memcpyw(u16 *d, const u16 *s, unsigned int count);
+extern void scr_memmovew(u16 *d, const u16 *s, unsigned int count);
/* ??? These are currently only used for downloading character sets. As
such, they don't need memory barriers. Is this all they are intended
diff --git a/arch/alpha/include/uapi/asm/compiler.h b/arch/alpha/include/uapi/asm/compiler.h
index 0e00c0e13374..8c03740966b4 100644
--- a/arch/alpha/include/uapi/asm/compiler.h
+++ b/arch/alpha/include/uapi/asm/compiler.h
@@ -95,24 +95,6 @@
#define __kernel_ldwu(mem) (mem)
#define __kernel_stb(val,mem) ((mem) = (val))
#define __kernel_stw(val,mem) ((mem) = (val))
-#else
-#define __kernel_ldbu(mem) \
- ({ unsigned char __kir; \
- __asm__(".arch ev56; \
- ldbu %0,%1" : "=r"(__kir) : "m"(mem)); \
- __kir; })
-#define __kernel_ldwu(mem) \
- ({ unsigned short __kir; \
- __asm__(".arch ev56; \
- ldwu %0,%1" : "=r"(__kir) : "m"(mem)); \
- __kir; })
-#define __kernel_stb(val,mem) \
- __asm__(".arch ev56; \
- stb %1,%0" : "=m"(mem) : "r"(val))
-#define __kernel_stw(val,mem) \
- __asm__(".arch ev56; \
- stw %1,%0" : "=m"(mem) : "r"(val))
#endif
-
#endif /* _UAPI__ALPHA_COMPILER_H */
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile
index fb4efec7cbc7..b6c862dff1f6 100644
--- a/arch/alpha/kernel/Makefile
+++ b/arch/alpha/kernel/Makefile
@@ -22,14 +22,14 @@ obj-$(CONFIG_AUDIT) += audit.o
ifdef CONFIG_ALPHA_GENERIC
-obj-y += core_apecs.o core_cia.o core_irongate.o core_lca.o \
+obj-y += core_cia.o core_irongate.o \
core_mcpcia.o core_polaris.o core_t2.o \
core_tsunami.o
-obj-y += sys_alcor.o sys_cabriolet.o sys_dp264.o sys_eb64p.o sys_eiger.o \
- sys_jensen.o sys_miata.o sys_mikasa.o sys_nautilus.o \
+obj-y += sys_alcor.o sys_cabriolet.o sys_dp264.o sys_eiger.o \
+ sys_miata.o sys_mikasa.o sys_nautilus.o \
sys_noritake.o sys_rawhide.o sys_ruffian.o sys_rx164.o \
- sys_sable.o sys_sio.o sys_sx164.o sys_takara.o
+ sys_sable.o sys_sx164.o sys_takara.o
ifndef CONFIG_ALPHA_LEGACY_START_ADDRESS
obj-y += core_marvel.o core_titan.o core_wildfire.o
@@ -48,10 +48,8 @@ else
obj-$(CONFIG_ALPHA_SRM) += srmcons.o
# Core logic support
-obj-$(CONFIG_ALPHA_APECS) += core_apecs.o
obj-$(CONFIG_ALPHA_CIA) += core_cia.o
obj-$(CONFIG_ALPHA_IRONGATE) += core_irongate.o
-obj-$(CONFIG_ALPHA_LCA) += core_lca.o
obj-$(CONFIG_ALPHA_MARVEL) += core_marvel.o gct.o
obj-$(CONFIG_ALPHA_MCPCIA) += core_mcpcia.o
obj-$(CONFIG_ALPHA_POLARIS) += core_polaris.o
@@ -62,12 +60,6 @@ obj-$(CONFIG_ALPHA_WILDFIRE) += core_wildfire.o
# Board support
obj-$(CONFIG_ALPHA_ALCOR) += sys_alcor.o irq_i8259.o irq_srm.o
-obj-$(CONFIG_ALPHA_CABRIOLET) += sys_cabriolet.o irq_i8259.o irq_srm.o \
- pc873xx.o
-obj-$(CONFIG_ALPHA_EB164) += sys_cabriolet.o irq_i8259.o irq_srm.o \
- pc873xx.o
-obj-$(CONFIG_ALPHA_EB66P) += sys_cabriolet.o irq_i8259.o irq_srm.o \
- pc873xx.o
obj-$(CONFIG_ALPHA_LX164) += sys_cabriolet.o irq_i8259.o irq_srm.o \
smc37c93x.o
obj-$(CONFIG_ALPHA_PC164) += sys_cabriolet.o irq_i8259.o irq_srm.o \
@@ -75,10 +67,7 @@ obj-$(CONFIG_ALPHA_PC164) += sys_cabriolet.o irq_i8259.o irq_srm.o \
obj-$(CONFIG_ALPHA_DP264) += sys_dp264.o irq_i8259.o es1888.o smc37c669.o
obj-$(CONFIG_ALPHA_SHARK) += sys_dp264.o irq_i8259.o es1888.o smc37c669.o
obj-$(CONFIG_ALPHA_TITAN) += sys_titan.o irq_i8259.o smc37c669.o
-obj-$(CONFIG_ALPHA_EB64P) += sys_eb64p.o irq_i8259.o
-obj-$(CONFIG_ALPHA_EB66) += sys_eb64p.o irq_i8259.o
obj-$(CONFIG_ALPHA_EIGER) += sys_eiger.o irq_i8259.o
-obj-$(CONFIG_ALPHA_JENSEN) += sys_jensen.o pci-noop.o irq_i8259.o
obj-$(CONFIG_ALPHA_MARVEL) += sys_marvel.o
obj-$(CONFIG_ALPHA_MIATA) += sys_miata.o irq_pyxis.o irq_i8259.o \
es1888.o smc37c669.o
@@ -89,12 +78,6 @@ obj-$(CONFIG_ALPHA_RAWHIDE) += sys_rawhide.o irq_i8259.o
obj-$(CONFIG_ALPHA_RUFFIAN) += sys_ruffian.o irq_pyxis.o irq_i8259.o
obj-$(CONFIG_ALPHA_RX164) += sys_rx164.o irq_i8259.o
obj-$(CONFIG_ALPHA_SABLE) += sys_sable.o
-obj-$(CONFIG_ALPHA_LYNX) += sys_sable.o
-obj-$(CONFIG_ALPHA_BOOK1) += sys_sio.o irq_i8259.o irq_srm.o pc873xx.o
-obj-$(CONFIG_ALPHA_AVANTI) += sys_sio.o irq_i8259.o irq_srm.o pc873xx.o
-obj-$(CONFIG_ALPHA_NONAME) += sys_sio.o irq_i8259.o irq_srm.o pc873xx.o
-obj-$(CONFIG_ALPHA_P2K) += sys_sio.o irq_i8259.o irq_srm.o pc873xx.o
-obj-$(CONFIG_ALPHA_XL) += sys_sio.o irq_i8259.o irq_srm.o pc873xx.o
obj-$(CONFIG_ALPHA_SX164) += sys_sx164.o irq_pyxis.o irq_i8259.o \
irq_srm.o smc37c669.o
obj-$(CONFIG_ALPHA_TAKARA) += sys_takara.o irq_i8259.o pc873xx.o
diff --git a/arch/alpha/kernel/asm-offsets.c b/arch/alpha/kernel/asm-offsets.c
index bf1eedd27cf7..4cfeae42c79a 100644
--- a/arch/alpha/kernel/asm-offsets.c
+++ b/arch/alpha/kernel/asm-offsets.c
@@ -10,35 +10,16 @@
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/kbuild.h>
-#include <asm/io.h>
+#include <asm/machvec.h>
static void __used foo(void)
{
- DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
- DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_FP, offsetof(struct thread_info, fp));
DEFINE(TI_STATUS, offsetof(struct thread_info, status));
BLANK();
- DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
- DEFINE(TASK_CRED, offsetof(struct task_struct, cred));
- DEFINE(TASK_REAL_PARENT, offsetof(struct task_struct, real_parent));
- DEFINE(TASK_GROUP_LEADER, offsetof(struct task_struct, group_leader));
- DEFINE(TASK_TGID, offsetof(struct task_struct, tgid));
- BLANK();
-
- DEFINE(CRED_UID, offsetof(struct cred, uid));
- DEFINE(CRED_EUID, offsetof(struct cred, euid));
- DEFINE(CRED_GID, offsetof(struct cred, gid));
- DEFINE(CRED_EGID, offsetof(struct cred, egid));
- BLANK();
-
DEFINE(SIZEOF_PT_REGS, sizeof(struct pt_regs));
- DEFINE(PT_PTRACED, PT_PTRACED);
- DEFINE(CLONE_VM, CLONE_VM);
- DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
- DEFINE(SIGCHLD, SIGCHLD);
BLANK();
DEFINE(HAE_CACHE, offsetof(struct alpha_machine_vector, hae_cache));
diff --git a/arch/alpha/kernel/bugs.c b/arch/alpha/kernel/bugs.c
index 08cc10d7fa17..e8c51089325f 100644
--- a/arch/alpha/kernel/bugs.c
+++ b/arch/alpha/kernel/bugs.c
@@ -1,6 +1,7 @@
#include <asm/hwrpb.h>
#include <linux/device.h>
+#include <linux/cpu.h>
#ifdef CONFIG_SYSFS
diff --git a/arch/alpha/kernel/console.c b/arch/alpha/kernel/console.c
index 5476279329a6..4193f76e9633 100644
--- a/arch/alpha/kernel/console.c
+++ b/arch/alpha/kernel/console.c
@@ -15,6 +15,7 @@
#include <asm/machvec.h>
#include "pci_impl.h"
+#include "proto.h"
#ifdef CONFIG_VGA_HOSE
diff --git a/arch/alpha/kernel/core_apecs.c b/arch/alpha/kernel/core_apecs.c
deleted file mode 100644
index 6df765ff2b10..000000000000
--- a/arch/alpha/kernel/core_apecs.c
+++ /dev/null
@@ -1,420 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/arch/alpha/kernel/core_apecs.c
- *
- * Rewritten for Apecs from the lca.c from:
- *
- * Written by David Mosberger (davidm@cs.arizona.edu) with some code
- * taken from Dave Rusling's (david.rusling@reo.mts.dec.com) 32-bit
- * bios code.
- *
- * Code common to all APECS core logic chips.
- */
-
-#define __EXTERN_INLINE inline
-#include <asm/io.h>
-#include <asm/core_apecs.h>
-#undef __EXTERN_INLINE
-
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-
-#include <asm/ptrace.h>
-#include <asm/smp.h>
-#include <asm/mce.h>
-
-#include "proto.h"
-#include "pci_impl.h"
-
-/*
- * NOTE: Herein lie back-to-back mb instructions. They are magic.
- * One plausible explanation is that the i/o controller does not properly
- * handle the system transaction. Another involves timing. Ho hum.
- */
-
-/*
- * BIOS32-style PCI interface:
- */
-
-#define DEBUG_CONFIG 0
-
-#if DEBUG_CONFIG
-# define DBGC(args) printk args
-#else
-# define DBGC(args)
-#endif
-
-#define vuip volatile unsigned int *
-
-/*
- * Given a bus, device, and function number, compute resulting
- * configuration space address and setup the APECS_HAXR2 register
- * accordingly. It is therefore not safe to have concurrent
- * invocations to configuration space access routines, but there
- * really shouldn't be any need for this.
- *
- * Type 0:
- *
- * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
- * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | | | | | | | | | | | | | | | | | | | | | | | |F|F|F|R|R|R|R|R|R|0|0|
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- *
- * 31:11 Device select bit.
- * 10:8 Function number
- * 7:2 Register number
- *
- * Type 1:
- *
- * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
- * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- *
- * 31:24 reserved
- * 23:16 bus number (8 bits = 128 possible buses)
- * 15:11 Device number (5 bits)
- * 10:8 function number
- * 7:2 register number
- *
- * Notes:
- * The function number selects which function of a multi-function device
- * (e.g., SCSI and Ethernet).
- *
- * The register selects a DWORD (32 bit) register offset. Hence it
- * doesn't get shifted by 2 bits as we want to "drop" the bottom two
- * bits.
- */
-
-static int
-mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
- unsigned long *pci_addr, unsigned char *type1)
-{
- unsigned long addr;
- u8 bus = pbus->number;
-
- DBGC(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x,"
- " pci_addr=0x%p, type1=0x%p)\n",
- bus, device_fn, where, pci_addr, type1));
-
- if (bus == 0) {
- int device = device_fn >> 3;
-
- /* type 0 configuration cycle: */
-
- if (device > 20) {
- DBGC(("mk_conf_addr: device (%d) > 20, returning -1\n",
- device));
- return -1;
- }
-
- *type1 = 0;
- addr = (device_fn << 8) | (where);
- } else {
- /* type 1 configuration cycle: */
- *type1 = 1;
- addr = (bus << 16) | (device_fn << 8) | (where);
- }
- *pci_addr = addr;
- DBGC(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
- return 0;
-}
-
-static unsigned int
-conf_read(unsigned long addr, unsigned char type1)
-{
- unsigned long flags;
- unsigned int stat0, value;
- unsigned int haxr2 = 0;
-
- local_irq_save(flags); /* avoid getting hit by machine check */
-
- DBGC(("conf_read(addr=0x%lx, type1=%d)\n", addr, type1));
-
- /* Reset status register to avoid losing errors. */
- stat0 = *(vuip)APECS_IOC_DCSR;
- *(vuip)APECS_IOC_DCSR = stat0;
- mb();
- DBGC(("conf_read: APECS DCSR was 0x%x\n", stat0));
-
- /* If Type1 access, must set HAE #2. */
- if (type1) {
- haxr2 = *(vuip)APECS_IOC_HAXR2;
- mb();
- *(vuip)APECS_IOC_HAXR2 = haxr2 | 1;
- DBGC(("conf_read: TYPE1 access\n"));
- }
-
- draina();
- mcheck_expected(0) = 1;
- mcheck_taken(0) = 0;
- mb();
-
- /* Access configuration space. */
-
- /* Some SRMs step on these registers during a machine check. */
- asm volatile("ldl %0,%1; mb; mb" : "=r"(value) : "m"(*(vuip)addr)
- : "$9", "$10", "$11", "$12", "$13", "$14", "memory");
-
- if (mcheck_taken(0)) {
- mcheck_taken(0) = 0;
- value = 0xffffffffU;
- mb();
- }
- mcheck_expected(0) = 0;
- mb();
-
-#if 1
- /*
- * david.rusling@reo.mts.dec.com. This code is needed for the
- * EB64+ as it does not generate a machine check (why I don't
- * know). When we build kernels for one particular platform
- * then we can make this conditional on the type.
- */
- draina();
-
- /* Now look for any errors. */
- stat0 = *(vuip)APECS_IOC_DCSR;
- DBGC(("conf_read: APECS DCSR after read 0x%x\n", stat0));
-
- /* Is any error bit set? */
- if (stat0 & 0xffe0U) {
- /* If not NDEV, print status. */
- if (!(stat0 & 0x0800)) {
- printk("apecs.c:conf_read: got stat0=%x\n", stat0);
- }
-
- /* Reset error status. */
- *(vuip)APECS_IOC_DCSR = stat0;
- mb();
- wrmces(0x7); /* reset machine check */
- value = 0xffffffff;
- }
-#endif
-
- /* If Type1 access, must reset HAE #2 so normal IO space ops work. */
- if (type1) {
- *(vuip)APECS_IOC_HAXR2 = haxr2 & ~1;
- mb();
- }
- local_irq_restore(flags);
-
- return value;
-}
-
-static void
-conf_write(unsigned long addr, unsigned int value, unsigned char type1)
-{
- unsigned long flags;
- unsigned int stat0;
- unsigned int haxr2 = 0;
-
- local_irq_save(flags); /* avoid getting hit by machine check */
-
- /* Reset status register to avoid losing errors. */
- stat0 = *(vuip)APECS_IOC_DCSR;
- *(vuip)APECS_IOC_DCSR = stat0;
- mb();
-
- /* If Type1 access, must set HAE #2. */
- if (type1) {
- haxr2 = *(vuip)APECS_IOC_HAXR2;
- mb();
- *(vuip)APECS_IOC_HAXR2 = haxr2 | 1;
- }
-
- draina();
- mcheck_expected(0) = 1;
- mb();
-
- /* Access configuration space. */
- *(vuip)addr = value;
- mb();
- mb(); /* magic */
- mcheck_expected(0) = 0;
- mb();
-
-#if 1
- /*
- * david.rusling@reo.mts.dec.com. This code is needed for the
- * EB64+ as it does not generate a machine check (why I don't
- * know). When we build kernels for one particular platform
- * then we can make this conditional on the type.
- */
- draina();
-
- /* Now look for any errors. */
- stat0 = *(vuip)APECS_IOC_DCSR;
-
- /* Is any error bit set? */
- if (stat0 & 0xffe0U) {
- /* If not NDEV, print status. */
- if (!(stat0 & 0x0800)) {
- printk("apecs.c:conf_write: got stat0=%x\n", stat0);
- }
-
- /* Reset error status. */
- *(vuip)APECS_IOC_DCSR = stat0;
- mb();
- wrmces(0x7); /* reset machine check */
- }
-#endif
-
- /* If Type1 access, must reset HAE #2 so normal IO space ops work. */
- if (type1) {
- *(vuip)APECS_IOC_HAXR2 = haxr2 & ~1;
- mb();
- }
- local_irq_restore(flags);
-}
-
-static int
-apecs_read_config(struct pci_bus *bus, unsigned int devfn, int where,
- int size, u32 *value)
-{
- unsigned long addr, pci_addr;
- unsigned char type1;
- long mask;
- int shift;
-
- if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- mask = (size - 1) * 8;
- shift = (where & 3) * 8;
- addr = (pci_addr << 5) + mask + APECS_CONF;
- *value = conf_read(addr, type1) >> (shift);
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int
-apecs_write_config(struct pci_bus *bus, unsigned int devfn, int where,
- int size, u32 value)
-{
- unsigned long addr, pci_addr;
- unsigned char type1;
- long mask;
-
- if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- mask = (size - 1) * 8;
- addr = (pci_addr << 5) + mask + APECS_CONF;
- conf_write(addr, value << ((where & 3) * 8), type1);
- return PCIBIOS_SUCCESSFUL;
-}
-
-struct pci_ops apecs_pci_ops =
-{
- .read = apecs_read_config,
- .write = apecs_write_config,
-};
-
-void
-apecs_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
-{
- wmb();
- *(vip)APECS_IOC_TBIA = 0;
- mb();
-}
-
-void __init
-apecs_init_arch(void)
-{
- struct pci_controller *hose;
-
- /*
- * Create our single hose.
- */
-
- pci_isa_hose = hose = alloc_pci_controller();
- hose->io_space = &ioport_resource;
- hose->mem_space = &iomem_resource;
- hose->index = 0;
-
- hose->sparse_mem_base = APECS_SPARSE_MEM - IDENT_ADDR;
- hose->dense_mem_base = APECS_DENSE_MEM - IDENT_ADDR;
- hose->sparse_io_base = APECS_IO - IDENT_ADDR;
- hose->dense_io_base = 0;
-
- /*
- * Set up the PCI to main memory translation windows.
- *
- * Window 1 is direct access 1GB at 1GB
- * Window 2 is scatter-gather 8MB at 8MB (for isa)
- */
- hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000,
- SMP_CACHE_BYTES);
- hose->sg_pci = NULL;
- __direct_map_base = 0x40000000;
- __direct_map_size = 0x40000000;
-
- *(vuip)APECS_IOC_PB1R = __direct_map_base | 0x00080000;
- *(vuip)APECS_IOC_PM1R = (__direct_map_size - 1) & 0xfff00000U;
- *(vuip)APECS_IOC_TB1R = 0;
-
- *(vuip)APECS_IOC_PB2R = hose->sg_isa->dma_base | 0x000c0000;
- *(vuip)APECS_IOC_PM2R = (hose->sg_isa->size - 1) & 0xfff00000;
- *(vuip)APECS_IOC_TB2R = virt_to_phys(hose->sg_isa->ptes) >> 1;
-
- apecs_pci_tbi(hose, 0, -1);
-
- /*
- * Finally, clear the HAXR2 register, which gets used
- * for PCI Config Space accesses. That is the way
- * we want to use it, and we do not want to depend on
- * what ARC or SRM might have left behind...
- */
- *(vuip)APECS_IOC_HAXR2 = 0;
- mb();
-}
-
-void
-apecs_pci_clr_err(void)
-{
- unsigned int jd;
-
- jd = *(vuip)APECS_IOC_DCSR;
- if (jd & 0xffe0L) {
- *(vuip)APECS_IOC_SEAR;
- *(vuip)APECS_IOC_DCSR = jd | 0xffe1L;
- mb();
- *(vuip)APECS_IOC_DCSR;
- }
- *(vuip)APECS_IOC_TBIA = (unsigned int)APECS_IOC_TBIA;
- mb();
- *(vuip)APECS_IOC_TBIA;
-}
-
-void
-apecs_machine_check(unsigned long vector, unsigned long la_ptr)
-{
- struct el_common *mchk_header;
- struct el_apecs_procdata *mchk_procdata;
- struct el_apecs_sysdata_mcheck *mchk_sysdata;
-
- mchk_header = (struct el_common *)la_ptr;
-
- mchk_procdata = (struct el_apecs_procdata *)
- (la_ptr + mchk_header->proc_offset
- - sizeof(mchk_procdata->paltemp));
-
- mchk_sysdata = (struct el_apecs_sysdata_mcheck *)
- (la_ptr + mchk_header->sys_offset);
-
-
- /* Clear the error before any reporting. */
- mb();
- mb(); /* magic */
- draina();
- apecs_pci_clr_err();
- wrmces(0x7); /* reset machine check pending flag */
- mb();
-
- process_mcheck_info(vector, la_ptr, "APECS",
- (mcheck_expected(0)
- && (mchk_sysdata->epic_dcsr & 0x0c00UL)));
-}
diff --git a/arch/alpha/kernel/core_cia.c b/arch/alpha/kernel/core_cia.c
index 12926e9538b8..ca3d9c732b61 100644
--- a/arch/alpha/kernel/core_cia.c
+++ b/arch/alpha/kernel/core_cia.c
@@ -280,7 +280,7 @@ cia_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
#define CIA_BROKEN_TBIA_SIZE 1024
/* Always called with interrupts disabled */
-void
+static void
cia_pci_tbi_try2(struct pci_controller *hose,
dma_addr_t start, dma_addr_t end)
{
@@ -576,7 +576,7 @@ struct
} window[4];
} saved_config __attribute((common));
-void
+static void
cia_save_srm_settings(int is_pyxis)
{
int i;
@@ -602,7 +602,7 @@ cia_save_srm_settings(int is_pyxis)
mb();
}
-void
+static void
cia_restore_srm_settings(void)
{
int i;
diff --git a/arch/alpha/kernel/core_irongate.c b/arch/alpha/kernel/core_irongate.c
index 6b8ed12936b6..05dc4c1b9074 100644
--- a/arch/alpha/kernel/core_irongate.c
+++ b/arch/alpha/kernel/core_irongate.c
@@ -226,7 +226,6 @@ albacore_init_arch(void)
if (memtop > pci_mem) {
#ifdef CONFIG_BLK_DEV_INITRD
extern unsigned long initrd_start, initrd_end;
- extern void *move_initrd(unsigned long);
/* Move the initrd out of the way. */
if (initrd_end && __pa(initrd_end) > pci_mem) {
diff --git a/arch/alpha/kernel/core_lca.c b/arch/alpha/kernel/core_lca.c
deleted file mode 100644
index 57e0750419f2..000000000000
--- a/arch/alpha/kernel/core_lca.c
+++ /dev/null
@@ -1,517 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/arch/alpha/kernel/core_lca.c
- *
- * Written by David Mosberger (davidm@cs.arizona.edu) with some code
- * taken from Dave Rusling's (david.rusling@reo.mts.dec.com) 32-bit
- * bios code.
- *
- * Code common to all LCA core logic chips.
- */
-
-#define __EXTERN_INLINE inline
-#include <asm/io.h>
-#include <asm/core_lca.h>
-#undef __EXTERN_INLINE
-
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/tty.h>
-
-#include <asm/ptrace.h>
-#include <asm/irq_regs.h>
-#include <asm/smp.h>
-
-#include "proto.h"
-#include "pci_impl.h"
-
-
-/*
- * BIOS32-style PCI interface:
- */
-
-/*
- * Machine check reasons. Defined according to PALcode sources
- * (osf.h and platform.h).
- */
-#define MCHK_K_TPERR 0x0080
-#define MCHK_K_TCPERR 0x0082
-#define MCHK_K_HERR 0x0084
-#define MCHK_K_ECC_C 0x0086
-#define MCHK_K_ECC_NC 0x0088
-#define MCHK_K_UNKNOWN 0x008A
-#define MCHK_K_CACKSOFT 0x008C
-#define MCHK_K_BUGCHECK 0x008E
-#define MCHK_K_OS_BUGCHECK 0x0090
-#define MCHK_K_DCPERR 0x0092
-#define MCHK_K_ICPERR 0x0094
-
-
-/*
- * Platform-specific machine-check reasons:
- */
-#define MCHK_K_SIO_SERR 0x204 /* all platforms so far */
-#define MCHK_K_SIO_IOCHK 0x206 /* all platforms so far */
-#define MCHK_K_DCSR 0x208 /* all but Noname */
-
-
-/*
- * Given a bus, device, and function number, compute resulting
- * configuration space address and setup the LCA_IOC_CONF register
- * accordingly. It is therefore not safe to have concurrent
- * invocations to configuration space access routines, but there
- * really shouldn't be any need for this.
- *
- * Type 0:
- *
- * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
- * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | | | | | | | | | | | | | | | | | | | | | | | |F|F|F|R|R|R|R|R|R|0|0|
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- *
- * 31:11 Device select bit.
- * 10:8 Function number
- * 7:2 Register number
- *
- * Type 1:
- *
- * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
- * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- *
- * 31:24 reserved
- * 23:16 bus number (8 bits = 128 possible buses)
- * 15:11 Device number (5 bits)
- * 10:8 function number
- * 7:2 register number
- *
- * Notes:
- * The function number selects which function of a multi-function device
- * (e.g., SCSI and Ethernet).
- *
- * The register selects a DWORD (32 bit) register offset. Hence it
- * doesn't get shifted by 2 bits as we want to "drop" the bottom two
- * bits.
- */
-
-static int
-mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
- unsigned long *pci_addr)
-{
- unsigned long addr;
- u8 bus = pbus->number;
-
- if (bus == 0) {
- int device = device_fn >> 3;
- int func = device_fn & 0x7;
-
- /* Type 0 configuration cycle. */
-
- if (device > 12) {
- return -1;
- }
-
- *(vulp)LCA_IOC_CONF = 0;
- addr = (1 << (11 + device)) | (func << 8) | where;
- } else {
- /* Type 1 configuration cycle. */
- *(vulp)LCA_IOC_CONF = 1;
- addr = (bus << 16) | (device_fn << 8) | where;
- }
- *pci_addr = addr;
- return 0;
-}
-
-static unsigned int
-conf_read(unsigned long addr)
-{
- unsigned long flags, code, stat0;
- unsigned int value;
-
- local_irq_save(flags);
-
- /* Reset status register to avoid losing errors. */
- stat0 = *(vulp)LCA_IOC_STAT0;
- *(vulp)LCA_IOC_STAT0 = stat0;
- mb();
-
- /* Access configuration space. */
- value = *(vuip)addr;
- draina();
-
- stat0 = *(vulp)LCA_IOC_STAT0;
- if (stat0 & LCA_IOC_STAT0_ERR) {
- code = ((stat0 >> LCA_IOC_STAT0_CODE_SHIFT)
- & LCA_IOC_STAT0_CODE_MASK);
- if (code != 1) {
- printk("lca.c:conf_read: got stat0=%lx\n", stat0);
- }
-
- /* Reset error status. */
- *(vulp)LCA_IOC_STAT0 = stat0;
- mb();
-
- /* Reset machine check. */
- wrmces(0x7);
-
- value = 0xffffffff;
- }
- local_irq_restore(flags);
- return value;
-}
-
-static void
-conf_write(unsigned long addr, unsigned int value)
-{
- unsigned long flags, code, stat0;
-
- local_irq_save(flags); /* avoid getting hit by machine check */
-
- /* Reset status register to avoid losing errors. */
- stat0 = *(vulp)LCA_IOC_STAT0;
- *(vulp)LCA_IOC_STAT0 = stat0;
- mb();
-
- /* Access configuration space. */
- *(vuip)addr = value;
- draina();
-
- stat0 = *(vulp)LCA_IOC_STAT0;
- if (stat0 & LCA_IOC_STAT0_ERR) {
- code = ((stat0 >> LCA_IOC_STAT0_CODE_SHIFT)
- & LCA_IOC_STAT0_CODE_MASK);
- if (code != 1) {
- printk("lca.c:conf_write: got stat0=%lx\n", stat0);
- }
-
- /* Reset error status. */
- *(vulp)LCA_IOC_STAT0 = stat0;
- mb();
-
- /* Reset machine check. */
- wrmces(0x7);
- }
- local_irq_restore(flags);
-}
-
-static int
-lca_read_config(struct pci_bus *bus, unsigned int devfn, int where,
- int size, u32 *value)
-{
- unsigned long addr, pci_addr;
- long mask;
- int shift;
-
- if (mk_conf_addr(bus, devfn, where, &pci_addr))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- shift = (where & 3) * 8;
- mask = (size - 1) * 8;
- addr = (pci_addr << 5) + mask + LCA_CONF;
- *value = conf_read(addr) >> (shift);
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int
-lca_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
- u32 value)
-{
- unsigned long addr, pci_addr;
- long mask;
-
- if (mk_conf_addr(bus, devfn, where, &pci_addr))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- mask = (size - 1) * 8;
- addr = (pci_addr << 5) + mask + LCA_CONF;
- conf_write(addr, value << ((where & 3) * 8));
- return PCIBIOS_SUCCESSFUL;
-}
-
-struct pci_ops lca_pci_ops =
-{
- .read = lca_read_config,
- .write = lca_write_config,
-};
-
-void
-lca_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
-{
- wmb();
- *(vulp)LCA_IOC_TBIA = 0;
- mb();
-}
-
-void __init
-lca_init_arch(void)
-{
- struct pci_controller *hose;
-
- /*
- * Create our single hose.
- */
-
- pci_isa_hose = hose = alloc_pci_controller();
- hose->io_space = &ioport_resource;
- hose->mem_space = &iomem_resource;
- hose->index = 0;
-
- hose->sparse_mem_base = LCA_SPARSE_MEM - IDENT_ADDR;
- hose->dense_mem_base = LCA_DENSE_MEM - IDENT_ADDR;
- hose->sparse_io_base = LCA_IO - IDENT_ADDR;
- hose->dense_io_base = 0;
-
- /*
- * Set up the PCI to main memory translation windows.
- *
- * Mimic the SRM settings for the direct-map window.
- * Window 0 is scatter-gather 8MB at 8MB (for isa).
- * Window 1 is direct access 1GB at 1GB.
- *
- * Note that we do not try to save any of the DMA window CSRs
- * before setting them, since we cannot read those CSRs on LCA.
- */
- hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000,
- SMP_CACHE_BYTES);
- hose->sg_pci = NULL;
- __direct_map_base = 0x40000000;
- __direct_map_size = 0x40000000;
-
- *(vulp)LCA_IOC_W_BASE0 = hose->sg_isa->dma_base | (3UL << 32);
- *(vulp)LCA_IOC_W_MASK0 = (hose->sg_isa->size - 1) & 0xfff00000;
- *(vulp)LCA_IOC_T_BASE0 = virt_to_phys(hose->sg_isa->ptes);
-
- *(vulp)LCA_IOC_W_BASE1 = __direct_map_base | (2UL << 32);
- *(vulp)LCA_IOC_W_MASK1 = (__direct_map_size - 1) & 0xfff00000;
- *(vulp)LCA_IOC_T_BASE1 = 0;
-
- *(vulp)LCA_IOC_TB_ENA = 0x80;
-
- lca_pci_tbi(hose, 0, -1);
-
- /*
- * Disable PCI parity for now. The NCR53c810 chip has
- * troubles meeting the PCI spec which results in
- * data parity errors.
- */
- *(vulp)LCA_IOC_PAR_DIS = 1UL<<5;
-
- /*
- * Finally, set up for restoring the correct HAE if using SRM.
- * Again, since we cannot read many of the CSRs on the LCA,
- * one of which happens to be the HAE, we save the value that
- * the SRM will expect...
- */
- if (alpha_using_srm)
- srm_hae = 0x80000000UL;
-}
-
-/*
- * Constants used during machine-check handling. I suppose these
- * could be moved into lca.h but I don't see much reason why anybody
- * else would want to use them.
- */
-
-#define ESR_EAV (1UL<< 0) /* error address valid */
-#define ESR_CEE (1UL<< 1) /* correctable error */
-#define ESR_UEE (1UL<< 2) /* uncorrectable error */
-#define ESR_WRE (1UL<< 3) /* write-error */
-#define ESR_SOR (1UL<< 4) /* error source */
-#define ESR_CTE (1UL<< 7) /* cache-tag error */
-#define ESR_MSE (1UL<< 9) /* multiple soft errors */
-#define ESR_MHE (1UL<<10) /* multiple hard errors */
-#define ESR_NXM (1UL<<12) /* non-existent memory */
-
-#define IOC_ERR ( 1<<4) /* ioc logs an error */
-#define IOC_CMD_SHIFT 0
-#define IOC_CMD (0xf<<IOC_CMD_SHIFT)
-#define IOC_CODE_SHIFT 8
-#define IOC_CODE (0xf<<IOC_CODE_SHIFT)
-#define IOC_LOST ( 1<<5)
-#define IOC_P_NBR ((__u32) ~((1<<13) - 1))
-
-static void
-mem_error(unsigned long esr, unsigned long ear)
-{
- printk(" %s %s error to %s occurred at address %x\n",
- ((esr & ESR_CEE) ? "Correctable" :
- (esr & ESR_UEE) ? "Uncorrectable" : "A"),
- (esr & ESR_WRE) ? "write" : "read",
- (esr & ESR_SOR) ? "memory" : "b-cache",
- (unsigned) (ear & 0x1ffffff8));
- if (esr & ESR_CTE) {
- printk(" A b-cache tag parity error was detected.\n");
- }
- if (esr & ESR_MSE) {
- printk(" Several other correctable errors occurred.\n");
- }
- if (esr & ESR_MHE) {
- printk(" Several other uncorrectable errors occurred.\n");
- }
- if (esr & ESR_NXM) {
- printk(" Attempted to access non-existent memory.\n");
- }
-}
-
-static void
-ioc_error(__u32 stat0, __u32 stat1)
-{
- static const char * const pci_cmd[] = {
- "Interrupt Acknowledge", "Special", "I/O Read", "I/O Write",
- "Rsvd 1", "Rsvd 2", "Memory Read", "Memory Write", "Rsvd3",
- "Rsvd4", "Configuration Read", "Configuration Write",
- "Memory Read Multiple", "Dual Address", "Memory Read Line",
- "Memory Write and Invalidate"
- };
- static const char * const err_name[] = {
- "exceeded retry limit", "no device", "bad data parity",
- "target abort", "bad address parity", "page table read error",
- "invalid page", "data error"
- };
- unsigned code = (stat0 & IOC_CODE) >> IOC_CODE_SHIFT;
- unsigned cmd = (stat0 & IOC_CMD) >> IOC_CMD_SHIFT;
-
- printk(" %s initiated PCI %s cycle to address %x"
- " failed due to %s.\n",
- code > 3 ? "PCI" : "CPU", pci_cmd[cmd], stat1, err_name[code]);
-
- if (code == 5 || code == 6) {
- printk(" (Error occurred at PCI memory address %x.)\n",
- (stat0 & ~IOC_P_NBR));
- }
- if (stat0 & IOC_LOST) {
- printk(" Other PCI errors occurred simultaneously.\n");
- }
-}
-
-void
-lca_machine_check(unsigned long vector, unsigned long la_ptr)
-{
- const char * reason;
- union el_lca el;
-
- el.c = (struct el_common *) la_ptr;
-
- wrmces(rdmces()); /* reset machine check pending flag */
-
- printk(KERN_CRIT "LCA machine check: vector=%#lx pc=%#lx code=%#x\n",
- vector, get_irq_regs()->pc, (unsigned int) el.c->code);
-
- /*
- * The first quadword after the common header always seems to
- * be the machine check reason---don't know why this isn't
- * part of the common header instead. In the case of a long
- * logout frame, the upper 32 bits is the machine check
- * revision level, which we ignore for now.
- */
- switch ((unsigned int) el.c->code) {
- case MCHK_K_TPERR: reason = "tag parity error"; break;
- case MCHK_K_TCPERR: reason = "tag control parity error"; break;
- case MCHK_K_HERR: reason = "access to non-existent memory"; break;
- case MCHK_K_ECC_C: reason = "correctable ECC error"; break;
- case MCHK_K_ECC_NC: reason = "non-correctable ECC error"; break;
- case MCHK_K_CACKSOFT: reason = "MCHK_K_CACKSOFT"; break;
- case MCHK_K_BUGCHECK: reason = "illegal exception in PAL mode"; break;
- case MCHK_K_OS_BUGCHECK: reason = "callsys in kernel mode"; break;
- case MCHK_K_DCPERR: reason = "d-cache parity error"; break;
- case MCHK_K_ICPERR: reason = "i-cache parity error"; break;
- case MCHK_K_SIO_SERR: reason = "SIO SERR occurred on PCI bus"; break;
- case MCHK_K_SIO_IOCHK: reason = "SIO IOCHK occurred on ISA bus"; break;
- case MCHK_K_DCSR: reason = "MCHK_K_DCSR"; break;
- case MCHK_K_UNKNOWN:
- default: reason = "unknown"; break;
- }
-
- switch (el.c->size) {
- case sizeof(struct el_lca_mcheck_short):
- printk(KERN_CRIT
- " Reason: %s (short frame%s, dc_stat=%#lx):\n",
- reason, el.c->retry ? ", retryable" : "",
- el.s->dc_stat);
- if (el.s->esr & ESR_EAV) {
- mem_error(el.s->esr, el.s->ear);
- }
- if (el.s->ioc_stat0 & IOC_ERR) {
- ioc_error(el.s->ioc_stat0, el.s->ioc_stat1);
- }
- break;
-
- case sizeof(struct el_lca_mcheck_long):
- printk(KERN_CRIT " Reason: %s (long frame%s):\n",
- reason, el.c->retry ? ", retryable" : "");
- printk(KERN_CRIT
- " reason: %#lx exc_addr: %#lx dc_stat: %#lx\n",
- el.l->pt[0], el.l->exc_addr, el.l->dc_stat);
- printk(KERN_CRIT " car: %#lx\n", el.l->car);
- if (el.l->esr & ESR_EAV) {
- mem_error(el.l->esr, el.l->ear);
- }
- if (el.l->ioc_stat0 & IOC_ERR) {
- ioc_error(el.l->ioc_stat0, el.l->ioc_stat1);
- }
- break;
-
- default:
- printk(KERN_CRIT " Unknown errorlog size %d\n", el.c->size);
- }
-
- /* Dump the logout area to give all info. */
-#ifdef CONFIG_VERBOSE_MCHECK
- if (alpha_verbose_mcheck > 1) {
- unsigned long * ptr = (unsigned long *) la_ptr;
- long i;
- for (i = 0; i < el.c->size / sizeof(long); i += 2) {
- printk(KERN_CRIT " +%8lx %016lx %016lx\n",
- i*sizeof(long), ptr[i], ptr[i+1]);
- }
- }
-#endif /* CONFIG_VERBOSE_MCHECK */
-}
-
-/*
- * The following routines are needed to support the SPEED changing
- * necessary to successfully manage the thermal problem on the AlphaBook1.
- */
-
-void
-lca_clock_print(void)
-{
- long pmr_reg;
-
- pmr_reg = LCA_READ_PMR;
-
- printk("Status of clock control:\n");
- printk("\tPrimary clock divisor\t0x%lx\n", LCA_GET_PRIMARY(pmr_reg));
- printk("\tOverride clock divisor\t0x%lx\n", LCA_GET_OVERRIDE(pmr_reg));
- printk("\tInterrupt override is %s\n",
- (pmr_reg & LCA_PMR_INTO) ? "on" : "off");
- printk("\tDMA override is %s\n",
- (pmr_reg & LCA_PMR_DMAO) ? "on" : "off");
-
-}
-
-int
-lca_get_clock(void)
-{
- long pmr_reg;
-
- pmr_reg = LCA_READ_PMR;
- return(LCA_GET_PRIMARY(pmr_reg));
-
-}
-
-void
-lca_clock_fiddle(int divisor)
-{
- long pmr_reg;
-
- pmr_reg = LCA_READ_PMR;
- LCA_SET_PRIMARY_CLOCK(pmr_reg, divisor);
- /* lca_norm_clock = divisor; */
- LCA_WRITE_PMR(pmr_reg);
- mb();
-}
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index e9348aec4649..b22248044bf0 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -355,7 +355,7 @@ marvel_init_io7(struct io7 *io7)
}
}
-void __init
+static void __init
marvel_io7_present(gct6_node *node)
{
int pe;
diff --git a/arch/alpha/kernel/core_t2.c b/arch/alpha/kernel/core_t2.c
index 98d5b6ff8a76..3d72d90624f1 100644
--- a/arch/alpha/kernel/core_t2.c
+++ b/arch/alpha/kernel/core_t2.c
@@ -10,7 +10,7 @@
* Code common to all T2 core logic chips.
*/
-#define __EXTERN_INLINE
+#define __EXTERN_INLINE inline
#include <asm/io.h>
#include <asm/core_t2.h>
#undef __EXTERN_INLINE
diff --git a/arch/alpha/kernel/core_wildfire.c b/arch/alpha/kernel/core_wildfire.c
index 3a804b67f9da..8dd08c5e4270 100644
--- a/arch/alpha/kernel/core_wildfire.c
+++ b/arch/alpha/kernel/core_wildfire.c
@@ -59,7 +59,7 @@ unsigned long wildfire_pca_mask;
unsigned long wildfire_cpu_mask;
unsigned long wildfire_mem_mask;
-void __init
+static void __init
wildfire_init_hose(int qbbno, int hoseno)
{
struct pci_controller *hose;
@@ -137,7 +137,7 @@ wildfire_init_hose(int qbbno, int hoseno)
wildfire_pci_tbi(hose, 0, 0); /* Flush TLB at the end. */
}
-void __init
+static void __init
wildfire_init_pca(int qbbno, int pcano)
{
@@ -154,7 +154,7 @@ wildfire_init_pca(int qbbno, int pcano)
wildfire_init_hose(qbbno, (pcano << 1) + 1);
}
-void __init
+static void __init
wildfire_init_qbb(int qbbno)
{
int pcano;
@@ -176,7 +176,7 @@ wildfire_init_qbb(int qbbno)
}
}
-void __init
+static void __init
wildfire_hardware_probe(void)
{
unsigned long temp;
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index eb51f93a70c8..dd26062d75b3 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -811,6 +811,7 @@ alpha_\name:
fork_like fork
fork_like vfork
fork_like clone
+fork_like clone3
.macro sigreturn_like name
.align 4
diff --git a/arch/alpha/kernel/io.c b/arch/alpha/kernel/io.c
index eda09778268f..c28035d6d1e6 100644
--- a/arch/alpha/kernel/io.c
+++ b/arch/alpha/kernel/io.c
@@ -647,6 +647,10 @@ void _memset_c_io(volatile void __iomem *to, unsigned long c, long count)
EXPORT_SYMBOL(_memset_c_io);
+#if IS_ENABLED(CONFIG_VGA_CONSOLE) || IS_ENABLED(CONFIG_MDA_CONSOLE)
+
+#include <asm/vga.h>
+
/* A version of memcpy used by the vga console routines to move data around
arbitrarily between screen and main memory. */
@@ -681,6 +685,21 @@ scr_memcpyw(u16 *d, const u16 *s, unsigned int count)
EXPORT_SYMBOL(scr_memcpyw);
+void scr_memmovew(u16 *d, const u16 *s, unsigned int count)
+{
+ if (d < s)
+ scr_memcpyw(d, s, count);
+ else {
+ count /= 2;
+ d += count;
+ s += count;
+ while (count--)
+ scr_writew(scr_readw(--s), --d);
+ }
+}
+EXPORT_SYMBOL(scr_memmovew);
+#endif
+
void __iomem *ioport_map(unsigned long port, unsigned int size)
{
return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index 15f2effd6baf..c67047c5d830 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -28,6 +28,7 @@
#include <asm/io.h>
#include <linux/uaccess.h>
+#include "irq_impl.h"
volatile unsigned long irq_err_count;
DEFINE_PER_CPU(unsigned long, irq_pmi_count);
diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c
index 1dcf0d9038fd..29c6c477ac35 100644
--- a/arch/alpha/kernel/irq_i8259.c
+++ b/arch/alpha/kernel/irq_i8259.c
@@ -98,10 +98,6 @@ init_i8259a_irqs(void)
#if defined(CONFIG_ALPHA_GENERIC)
# define IACK_SC alpha_mv.iack_sc
-#elif defined(CONFIG_ALPHA_APECS)
-# define IACK_SC APECS_IACK_SC
-#elif defined(CONFIG_ALPHA_LCA)
-# define IACK_SC LCA_IACK_SC
#elif defined(CONFIG_ALPHA_CIA)
# define IACK_SC CIA_IACK_SC
#elif defined(CONFIG_ALPHA_PYXIS)
diff --git a/arch/alpha/kernel/machvec_impl.h b/arch/alpha/kernel/machvec_impl.h
index c2ebcb39e589..129ae36b8e6d 100644
--- a/arch/alpha/kernel/machvec_impl.h
+++ b/arch/alpha/kernel/machvec_impl.h
@@ -44,33 +44,14 @@
#define DO_DEFAULT_RTC .rtc_port = 0x70
-#define DO_EV4_MMU \
- .max_asn = EV4_MAX_ASN, \
- .mv_switch_mm = ev4_switch_mm, \
- .mv_activate_mm = ev4_activate_mm, \
- .mv_flush_tlb_current = ev4_flush_tlb_current, \
- .mv_flush_tlb_current_page = ev4_flush_tlb_current_page
-
#define DO_EV5_MMU \
- .max_asn = EV5_MAX_ASN, \
- .mv_switch_mm = ev5_switch_mm, \
- .mv_activate_mm = ev5_activate_mm, \
- .mv_flush_tlb_current = ev5_flush_tlb_current, \
- .mv_flush_tlb_current_page = ev5_flush_tlb_current_page
+ .max_asn = EV5_MAX_ASN \
#define DO_EV6_MMU \
- .max_asn = EV6_MAX_ASN, \
- .mv_switch_mm = ev5_switch_mm, \
- .mv_activate_mm = ev5_activate_mm, \
- .mv_flush_tlb_current = ev5_flush_tlb_current, \
- .mv_flush_tlb_current_page = ev5_flush_tlb_current_page
+ .max_asn = EV6_MAX_ASN \
#define DO_EV7_MMU \
- .max_asn = EV6_MAX_ASN, \
- .mv_switch_mm = ev5_switch_mm, \
- .mv_activate_mm = ev5_activate_mm, \
- .mv_flush_tlb_current = ev5_flush_tlb_current, \
- .mv_flush_tlb_current_page = ev5_flush_tlb_current_page
+ .max_asn = EV6_MAX_ASN \
#define IO_LITE(UP,low) \
.hae_register = (unsigned long *) CAT(UP,_HAE_ADDRESS), \
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 5db88b627439..e5f881bc8288 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1218,14 +1218,11 @@ static unsigned long
arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
unsigned long limit)
{
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
- info.flags = 0;
info.length = len;
info.low_limit = addr;
info.high_limit = limit;
- info.align_mask = 0;
- info.align_offset = 0;
return vm_unmapped_area(&info);
}
diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c
deleted file mode 100644
index ae82061edae9..000000000000
--- a/arch/alpha/kernel/pci-noop.c
+++ /dev/null
@@ -1,113 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/arch/alpha/kernel/pci-noop.c
- *
- * Stub PCI interfaces for Jensen-specific kernels.
- */
-
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/memblock.h>
-#include <linux/gfp.h>
-#include <linux/capability.h>
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/dma-mapping.h>
-#include <linux/scatterlist.h>
-#include <linux/syscalls.h>
-
-#include "proto.h"
-
-
-/*
- * The PCI controller list.
- */
-
-struct pci_controller *hose_head, **hose_tail = &hose_head;
-struct pci_controller *pci_isa_hose;
-
-
-struct pci_controller * __init
-alloc_pci_controller(void)
-{
- struct pci_controller *hose;
-
- hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES);
- if (!hose)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(*hose));
-
- *hose_tail = hose;
- hose_tail = &hose->next;
-
- return hose;
-}
-
-struct resource * __init
-alloc_resource(void)
-{
- void *ptr = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
-
- if (!ptr)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(struct resource));
-
- return ptr;
-}
-
-SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, bus,
- unsigned long, dfn)
-{
- struct pci_controller *hose;
-
- /* from hose or from bus.devfn */
- if (which & IOBASE_FROM_HOSE) {
- for (hose = hose_head; hose; hose = hose->next)
- if (hose->index == bus)
- break;
- if (!hose)
- return -ENODEV;
- } else {
- /* Special hook for ISA access. */
- if (bus == 0 && dfn == 0)
- hose = pci_isa_hose;
- else
- return -ENODEV;
- }
-
- switch (which & ~IOBASE_FROM_HOSE) {
- case IOBASE_HOSE:
- return hose->index;
- case IOBASE_SPARSE_MEM:
- return hose->sparse_mem_base;
- case IOBASE_DENSE_MEM:
- return hose->dense_mem_base;
- case IOBASE_SPARSE_IO:
- return hose->sparse_io_base;
- case IOBASE_DENSE_IO:
- return hose->dense_io_base;
- case IOBASE_ROOT_BUS:
- return hose->bus->number;
- }
-
- return -EOPNOTSUPP;
-}
-
-SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
- unsigned long, off, unsigned long, len, void __user *, buf)
-{
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- else
- return -ENODEV;
-}
-
-SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
- unsigned long, off, unsigned long, len, void __user *, buf)
-{
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- else
- return -ENODEV;
-}
diff --git a/arch/alpha/kernel/pci_impl.h b/arch/alpha/kernel/pci_impl.h
index 18043af45e2b..a16325ce21c4 100644
--- a/arch/alpha/kernel/pci_impl.h
+++ b/arch/alpha/kernel/pci_impl.h
@@ -143,9 +143,7 @@ struct pci_iommu_arena
unsigned int align_entry;
};
-#if defined(CONFIG_ALPHA_SRM) && \
- (defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA) || \
- defined(CONFIG_ALPHA_AVANTI))
+#if defined(CONFIG_ALPHA_SRM) && defined(CONFIG_ALPHA_CIA)
# define NEED_SRM_SAVE_RESTORE
#else
# undef NEED_SRM_SAVE_RESTORE
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index c81183935e97..7fcf3e9b7103 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -929,7 +929,7 @@ const struct dma_map_ops alpha_pci_ops = {
.dma_supported = alpha_pci_supported,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
- .alloc_pages = dma_common_alloc_pages,
+ .alloc_pages_op = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
};
EXPORT_SYMBOL(alpha_pci_ops);
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index ccdb508c1516..1f0eb4f25c0f 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -870,7 +870,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
/*
* Init call to initialise performance events at kernel startup.
*/
-int __init init_hw_perf_events(void)
+static int __init init_hw_perf_events(void)
{
pr_info("Performance events: ");
diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h
index 2c89c1c55712..a8bc3ead776b 100644
--- a/arch/alpha/kernel/proto.h
+++ b/arch/alpha/kernel/proto.h
@@ -15,13 +15,7 @@ struct pt_regs;
struct task_struct;
struct pci_dev;
struct pci_controller;
-
-/* core_apecs.c */
-extern struct pci_ops apecs_pci_ops;
-extern void apecs_init_arch(void);
-extern void apecs_pci_clr_err(void);
-extern void apecs_machine_check(unsigned long vector, unsigned long la_ptr);
-extern void apecs_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
+struct pci_bus;
/* core_cia.c */
extern struct pci_ops cia_pci_ops;
@@ -38,12 +32,6 @@ extern int irongate_pci_clr_err(void);
extern void irongate_init_arch(void);
#define irongate_pci_tbi ((void *)0)
-/* core_lca.c */
-extern struct pci_ops lca_pci_ops;
-extern void lca_init_arch(void);
-extern void lca_machine_check(unsigned long vector, unsigned long la_ptr);
-extern void lca_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
-
/* core_marvel.c */
extern struct pci_ops marvel_pci_ops;
extern void marvel_init_arch(void);
@@ -114,6 +102,9 @@ extern int boot_cpuid;
#ifdef CONFIG_VERBOSE_MCHECK
extern unsigned long alpha_verbose_mcheck;
#endif
+#ifdef CONFIG_BLK_DEV_INITRD
+extern void * __init move_initrd(unsigned long);
+#endif
extern struct screen_info vgacon_screen_info;
/* srmcons.c */
@@ -128,6 +119,7 @@ extern void unregister_srm_console(void);
/* smp.c */
extern void setup_smp(void);
extern void handle_ipi(struct pt_regs *);
+extern void __init smp_callin(void);
/* bios32.c */
/* extern void reset_for_srm(void); */
@@ -139,13 +131,13 @@ extern void common_init_rtc(void);
extern unsigned long est_cycle_freq;
/* smc37c93x.c */
-extern void SMC93x_Init(void);
+extern int __init SMC93x_Init(void);
/* smc37c669.c */
-extern void SMC669_Init(int);
+extern void __init SMC669_Init(int);
/* es1888.c */
-extern void es1888_init(void);
+extern void __init es1888_init(void);
/* ../lib/fpreg.c */
extern void alpha_write_fp_reg (unsigned long reg, unsigned long val);
@@ -166,19 +158,37 @@ extern void entSys(void);
extern void entUna(void);
extern void entDbg(void);
+/* pci.c */
+extern void pcibios_claim_one_bus(struct pci_bus *);
+
/* ptrace.c */
extern int ptrace_set_bpt (struct task_struct *child);
extern int ptrace_cancel_bpt (struct task_struct *child);
+extern void syscall_trace_leave(void);
+extern unsigned long syscall_trace_enter(void);
+
+/* signal.c */
+struct sigcontext;
+extern void do_sigreturn(struct sigcontext __user *);
+struct rt_sigframe;
+extern void do_rt_sigreturn(struct rt_sigframe __user *);
+extern void do_work_pending(struct pt_regs *, unsigned long, unsigned long, unsigned long);
/* traps.c */
extern void dik_show_regs(struct pt_regs *regs, unsigned long *r9_15);
extern void die_if_kernel(char *, struct pt_regs *, long, unsigned long *);
+extern void do_entInt(unsigned long, unsigned long, unsigned long, struct pt_regs *);
+extern void do_entArith(unsigned long, unsigned long, struct pt_regs *);
+extern void do_entIF(unsigned long, struct pt_regs *);
+extern void do_entDbg(struct pt_regs *);
+struct allregs;
+extern void do_entUna(void *, unsigned long, unsigned long, struct allregs *);
+extern void do_entUnaUser(void __user *, unsigned long, unsigned long, struct pt_regs *);
/* sys_titan.c */
extern void titan_dispatch_irqs(u64);
/* ../mm/init.c */
-extern void switch_to_system_map(void);
extern void srm_paging_stop(void);
static inline int
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 0738f9396f95..bebdffafaee8 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -171,35 +171,22 @@ EXPORT_SYMBOL(__direct_map_size);
asm(".weak "#X)
WEAK(alcor_mv);
-WEAK(alphabook1_mv);
-WEAK(avanti_mv);
-WEAK(cabriolet_mv);
WEAK(clipper_mv);
WEAK(dp264_mv);
WEAK(eb164_mv);
-WEAK(eb64p_mv);
-WEAK(eb66_mv);
-WEAK(eb66p_mv);
WEAK(eiger_mv);
-WEAK(jensen_mv);
WEAK(lx164_mv);
-WEAK(lynx_mv);
WEAK(marvel_ev7_mv);
WEAK(miata_mv);
-WEAK(mikasa_mv);
WEAK(mikasa_primo_mv);
WEAK(monet_mv);
WEAK(nautilus_mv);
-WEAK(noname_mv);
-WEAK(noritake_mv);
WEAK(noritake_primo_mv);
-WEAK(p2k_mv);
WEAK(pc164_mv);
WEAK(privateer_mv);
WEAK(rawhide_mv);
WEAK(ruffian_mv);
WEAK(rx164_mv);
-WEAK(sable_mv);
WEAK(sable_gamma_mv);
WEAK(shark_mv);
WEAK(sx164_mv);
@@ -207,7 +194,6 @@ WEAK(takara_mv);
WEAK(titan_mv);
WEAK(webbrick_mv);
WEAK(wildfire_mv);
-WEAK(xl_mv);
WEAK(xlt_mv);
#undef WEAK
@@ -224,7 +210,7 @@ static void __init
reserve_std_resources(void)
{
static struct resource standard_io_resources[] = {
- { .name = "rtc", .start = -1, .end = -1 },
+ { .name = "rtc", .start = 0x70, .end = 0x7f},
{ .name = "dma1", .start = 0x00, .end = 0x1f },
{ .name = "pic1", .start = 0x20, .end = 0x3f },
{ .name = "timer", .start = 0x40, .end = 0x5f },
@@ -246,10 +232,6 @@ reserve_std_resources(void)
}
}
- /* Fix up for the Jensen's queer RTC placement. */
- standard_io_resources[0].start = RTC_PORT(0);
- standard_io_resources[0].end = RTC_PORT(0) + 0x0f;
-
for (i = 0; i < ARRAY_SIZE(standard_io_resources); ++i)
request_resource(io, standard_io_resources+i);
}
@@ -486,14 +468,7 @@ setup_arch(char **cmdline_p)
/*
* Locate the command line.
*/
- /* Hack for Jensen... since we're restricted to 8 or 16 chars for
- boot flags depending on the boot mode, we need some shorthand.
- This should do for installation. */
- if (strcmp(COMMAND_LINE, "INSTALL") == 0) {
- strscpy(command_line, "root=/dev/fd0 load_ramdisk=1", sizeof(command_line));
- } else {
- strscpy(command_line, COMMAND_LINE, sizeof(command_line));
- }
+ strscpy(command_line, COMMAND_LINE, sizeof(command_line));
strcpy(boot_command_line, command_line);
*cmdline_p = command_line;
@@ -706,12 +681,6 @@ static int eb164_indices[] = {0,0,0,1,1,1,1,1,2,2,2,2,3,3,3,3,4};
static char alcor_names[][16] = {"Alcor", "Maverick", "Bret"};
static int alcor_indices[] = {0,0,0,1,1,1,0,0,0,0,0,0,2,2,2,2,2,2};
-static char eb64p_names[][16] = {"EB64+", "Cabriolet", "AlphaPCI64"};
-static int eb64p_indices[] = {0,0,1,2};
-
-static char eb66_names[][8] = {"EB66", "EB66+"};
-static int eb66_indices[] = {0,0,1};
-
static char marvel_names[][16] = {
"Marvel/EV7"
};
@@ -745,26 +714,26 @@ get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
NULL, /* Ruby */
NULL, /* Flamingo */
NULL, /* Mannequin */
- &jensen_mv,
+ NULL, /* Jensens */
NULL, /* Pelican */
NULL, /* Morgan */
NULL, /* Sable -- see below. */
NULL, /* Medulla */
- &noname_mv,
+ NULL, /* Noname */
NULL, /* Turbolaser */
- &avanti_mv,
+ NULL, /* Avanti */
NULL, /* Mustang */
NULL, /* Alcor, Bret, Maverick. HWRPB inaccurate? */
NULL, /* Tradewind */
NULL, /* Mikasa -- see below. */
NULL, /* EB64 */
- NULL, /* EB66 -- see variation. */
- NULL, /* EB64+ -- see variation. */
- &alphabook1_mv,
+ NULL, /* EB66 */
+ NULL, /* EB64+ */
+ NULL, /* Alphabook1 */
&rawhide_mv,
NULL, /* K2 */
- &lynx_mv, /* Lynx */
- &xl_mv,
+ NULL, /* Lynx */
+ NULL, /* XL */
NULL, /* EB164 -- see variation. */
NULL, /* Noritake -- see below. */
NULL, /* Cortex */
@@ -803,19 +772,6 @@ get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
&eb164_mv, &pc164_mv, &lx164_mv, &sx164_mv, &rx164_mv
};
- static struct alpha_machine_vector *eb64p_vecs[] __initdata =
- {
- &eb64p_mv,
- &cabriolet_mv,
- &cabriolet_mv /* AlphaPCI64 */
- };
-
- static struct alpha_machine_vector *eb66_vecs[] __initdata =
- {
- &eb66_mv,
- &eb66p_mv
- };
-
static struct alpha_machine_vector *marvel_vecs[] __initdata =
{
&marvel_ev7_mv,
@@ -883,14 +839,6 @@ get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
if (vec == &eb164_mv && cpu == EV56_CPU)
vec = &pc164_mv;
break;
- case ST_DEC_EB64P:
- if (member < ARRAY_SIZE(eb64p_indices))
- vec = eb64p_vecs[eb64p_indices[member]];
- break;
- case ST_DEC_EB66:
- if (member < ARRAY_SIZE(eb66_indices))
- vec = eb66_vecs[eb66_indices[member]];
- break;
case ST_DEC_MARVEL:
if (member < ARRAY_SIZE(marvel_indices))
vec = marvel_vecs[marvel_indices[member]];
@@ -905,22 +853,13 @@ get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
vec = tsunami_vecs[tsunami_indices[member]];
break;
case ST_DEC_1000:
- if (cpu == EV5_CPU || cpu == EV56_CPU)
- vec = &mikasa_primo_mv;
- else
- vec = &mikasa_mv;
+ vec = &mikasa_primo_mv;
break;
case ST_DEC_NORITAKE:
- if (cpu == EV5_CPU || cpu == EV56_CPU)
- vec = &noritake_primo_mv;
- else
- vec = &noritake_mv;
+ vec = &noritake_primo_mv;
break;
case ST_DEC_2100_A500:
- if (cpu == EV5_CPU || cpu == EV56_CPU)
- vec = &sable_gamma_mv;
- else
- vec = &sable_mv;
+ vec = &sable_gamma_mv;
break;
}
}
@@ -933,41 +872,27 @@ get_sysvec_byname(const char *name)
static struct alpha_machine_vector *all_vecs[] __initdata =
{
&alcor_mv,
- &alphabook1_mv,
- &avanti_mv,
- &cabriolet_mv,
&clipper_mv,
&dp264_mv,
&eb164_mv,
- &eb64p_mv,
- &eb66_mv,
- &eb66p_mv,
&eiger_mv,
- &jensen_mv,
&lx164_mv,
- &lynx_mv,
&miata_mv,
- &mikasa_mv,
&mikasa_primo_mv,
&monet_mv,
&nautilus_mv,
- &noname_mv,
- &noritake_mv,
&noritake_primo_mv,
- &p2k_mv,
&pc164_mv,
&privateer_mv,
&rawhide_mv,
&ruffian_mv,
&rx164_mv,
- &sable_mv,
&sable_gamma_mv,
&shark_mv,
&sx164_mv,
&takara_mv,
&webbrick_mv,
&wildfire_mv,
- &xl_mv,
&xlt_mv
};
@@ -1029,14 +954,6 @@ get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu,
if (member < ARRAY_SIZE(alcor_indices))
*variation_name = alcor_names[alcor_indices[member]];
break;
- case ST_DEC_EB64P:
- if (member < ARRAY_SIZE(eb64p_indices))
- *variation_name = eb64p_names[eb64p_indices[member]];
- break;
- case ST_DEC_EB66:
- if (member < ARRAY_SIZE(eb66_indices))
- *variation_name = eb66_names[eb66_indices[member]];
- break;
case ST_DEC_MARVEL:
if (member < ARRAY_SIZE(marvel_indices))
*variation_name = marvel_names[marvel_indices[member]];
diff --git a/arch/alpha/kernel/smc37c669.c b/arch/alpha/kernel/smc37c669.c
index bbbd34586de0..a5a6ed97a6ce 100644
--- a/arch/alpha/kernel/smc37c669.c
+++ b/arch/alpha/kernel/smc37c669.c
@@ -11,6 +11,8 @@
#include <asm/hwrpb.h>
#include <asm/io.h>
+#include "proto.h"
+
#if 0
# define DBG_DEVS(args) printk args
#else
@@ -2430,13 +2432,15 @@ int __init smcc669_write( struct FILE *fp, int size, int number, unsigned char *
}
#endif
-void __init
+#if SMC_DEBUG
+static void __init
SMC37c669_dump_registers(void)
{
int i;
for (i = 0; i <= 0x29; i++)
printk("-- CR%02x : %02x\n", i, SMC37c669_read_config(i));
}
+#endif
/*+
* ============================================================================
* = SMC_init - SMC37c669 Super I/O controller initialization =
diff --git a/arch/alpha/kernel/smc37c93x.c b/arch/alpha/kernel/smc37c93x.c
index 71cd7aca38ce..8028273f0d16 100644
--- a/arch/alpha/kernel/smc37c93x.c
+++ b/arch/alpha/kernel/smc37c93x.c
@@ -12,6 +12,8 @@
#include <asm/hwrpb.h>
#include <asm/io.h>
+#include "proto.h"
+
#define SMC_DEBUG 0
#if SMC_DEBUG
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 8e9dd63b220c..ed06367ece57 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -38,6 +38,7 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
#include "proto.h"
#include "irq_impl.h"
diff --git a/arch/alpha/kernel/srmcons.c b/arch/alpha/kernel/srmcons.c
index feaf89f6936b..3e61073f4b30 100644
--- a/arch/alpha/kernel/srmcons.c
+++ b/arch/alpha/kernel/srmcons.c
@@ -21,6 +21,8 @@
#include <asm/console.h>
#include <linux/uaccess.h>
+#include "proto.h"
+
static DEFINE_SPINLOCK(srmcons_callback_lock);
static int srm_is_registered_console = 0;
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c
index 47459b73cdb7..54e75d4fdbe3 100644
--- a/arch/alpha/kernel/sys_cabriolet.c
+++ b/arch/alpha/kernel/sys_cabriolet.c
@@ -6,8 +6,7 @@
* Copyright (C) 1996 Jay A Estabrook
* Copyright (C) 1998, 1999, 2000 Richard Henderson
*
- * Code supporting the Cabriolet (AlphaPC64), EB66+, and EB164,
- * PC164 and LX164.
+ * Code supporting the PC164 and LX164.
*/
#include <linux/kernel.h>
@@ -23,9 +22,7 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/core_apecs.h>
#include <asm/core_cia.h>
-#include <asm/core_lca.h>
#include <asm/tlbflush.h>
#include "proto.h"
@@ -233,13 +230,6 @@ cabriolet_enable_ide(void)
}
static inline void __init
-cabriolet_init_pci(void)
-{
- common_init_pci();
- cabriolet_enable_ide();
-}
-
-static inline void __init
cia_cab_init_pci(void)
{
cia_init_pci();
@@ -317,81 +307,6 @@ alphapc164_init_pci(void)
* The System Vector
*/
-#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_CABRIOLET)
-struct alpha_machine_vector cabriolet_mv __initmv = {
- .vector_name = "Cabriolet",
- DO_EV4_MMU,
- DO_DEFAULT_RTC,
- DO_APECS_IO,
- .machine_check = apecs_machine_check,
- .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
- .min_io_address = DEFAULT_IO_BASE,
- .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
-
- .nr_irqs = 35,
- .device_interrupt = cabriolet_device_interrupt,
-
- .init_arch = apecs_init_arch,
- .init_irq = cabriolet_init_irq,
- .init_rtc = common_init_rtc,
- .init_pci = cabriolet_init_pci,
- .pci_map_irq = cabriolet_map_irq,
- .pci_swizzle = common_swizzle,
-};
-#ifndef CONFIG_ALPHA_EB64P
-ALIAS_MV(cabriolet)
-#endif
-#endif
-
-#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB164)
-struct alpha_machine_vector eb164_mv __initmv = {
- .vector_name = "EB164",
- DO_EV5_MMU,
- DO_DEFAULT_RTC,
- DO_CIA_IO,
- .machine_check = cia_machine_check,
- .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
- .min_io_address = DEFAULT_IO_BASE,
- .min_mem_address = CIA_DEFAULT_MEM_BASE,
-
- .nr_irqs = 35,
- .device_interrupt = cabriolet_device_interrupt,
-
- .init_arch = cia_init_arch,
- .init_irq = cabriolet_init_irq,
- .init_rtc = common_init_rtc,
- .init_pci = cia_cab_init_pci,
- .kill_arch = cia_kill_arch,
- .pci_map_irq = cabriolet_map_irq,
- .pci_swizzle = common_swizzle,
-};
-ALIAS_MV(eb164)
-#endif
-
-#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB66P)
-struct alpha_machine_vector eb66p_mv __initmv = {
- .vector_name = "EB66+",
- DO_EV4_MMU,
- DO_DEFAULT_RTC,
- DO_LCA_IO,
- .machine_check = lca_machine_check,
- .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
- .min_io_address = DEFAULT_IO_BASE,
- .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
-
- .nr_irqs = 35,
- .device_interrupt = cabriolet_device_interrupt,
-
- .init_arch = lca_init_arch,
- .init_irq = cabriolet_init_irq,
- .init_rtc = common_init_rtc,
- .init_pci = cabriolet_init_pci,
- .pci_map_irq = eb66p_map_irq,
- .pci_swizzle = common_swizzle,
-};
-ALIAS_MV(eb66p)
-#endif
-
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LX164)
struct alpha_machine_vector lx164_mv __initmv = {
.vector_name = "LX164",
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c
deleted file mode 100644
index 3c43fd347526..000000000000
--- a/arch/alpha/kernel/sys_eb64p.c
+++ /dev/null
@@ -1,238 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/arch/alpha/kernel/sys_eb64p.c
- *
- * Copyright (C) 1995 David A Rusling
- * Copyright (C) 1996 Jay A Estabrook
- * Copyright (C) 1998, 1999 Richard Henderson
- *
- * Code supporting the EB64+ and EB66.
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-
-#include <asm/ptrace.h>
-#include <asm/dma.h>
-#include <asm/irq.h>
-#include <asm/mmu_context.h>
-#include <asm/io.h>
-#include <asm/core_apecs.h>
-#include <asm/core_lca.h>
-#include <asm/hwrpb.h>
-#include <asm/tlbflush.h>
-
-#include "proto.h"
-#include "irq_impl.h"
-#include "pci_impl.h"
-#include "machvec_impl.h"
-
-
-/* Note mask bit is true for DISABLED irqs. */
-static unsigned int cached_irq_mask = -1;
-
-static inline void
-eb64p_update_irq_hw(unsigned int irq, unsigned long mask)
-{
- outb(mask >> (irq >= 24 ? 24 : 16), (irq >= 24 ? 0x27 : 0x26));
-}
-
-static inline void
-eb64p_enable_irq(struct irq_data *d)
-{
- eb64p_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
-}
-
-static void
-eb64p_disable_irq(struct irq_data *d)
-{
- eb64p_update_irq_hw(d->irq, cached_irq_mask |= 1 << d->irq);
-}
-
-static struct irq_chip eb64p_irq_type = {
- .name = "EB64P",
- .irq_unmask = eb64p_enable_irq,
- .irq_mask = eb64p_disable_irq,
- .irq_mask_ack = eb64p_disable_irq,
-};
-
-static void
-eb64p_device_interrupt(unsigned long vector)
-{
- unsigned long pld;
- unsigned int i;
-
- /* Read the interrupt summary registers */
- pld = inb(0x26) | (inb(0x27) << 8);
-
- /*
- * Now, for every possible bit set, work through
- * them and call the appropriate interrupt handler.
- */
- while (pld) {
- i = ffz(~pld);
- pld &= pld - 1; /* clear least bit set */
-
- if (i == 5) {
- isa_device_interrupt(vector);
- } else {
- handle_irq(16 + i);
- }
- }
-}
-
-static void __init
-eb64p_init_irq(void)
-{
- long i;
-
-#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_CABRIOLET)
- /*
- * CABRIO SRM may not set variation correctly, so here we test
- * the high word of the interrupt summary register for the RAZ
- * bits, and hope that a true EB64+ would read all ones...
- */
- if (inw(0x806) != 0xffff) {
- extern struct alpha_machine_vector cabriolet_mv;
-
- printk("Detected Cabriolet: correcting HWRPB.\n");
-
- hwrpb->sys_variation |= 2L << 10;
- hwrpb_update_checksum(hwrpb);
-
- alpha_mv = cabriolet_mv;
- alpha_mv.init_irq();
- return;
- }
-#endif /* GENERIC */
-
- outb(0xff, 0x26);
- outb(0xff, 0x27);
-
- init_i8259a_irqs();
-
- for (i = 16; i < 32; ++i) {
- irq_set_chip_and_handler(i, &eb64p_irq_type, handle_level_irq);
- irq_set_status_flags(i, IRQ_LEVEL);
- }
-
- common_init_isa_dma();
- if (request_irq(16 + 5, no_action, 0, "isa-cascade", NULL))
- pr_err("Failed to register isa-cascade interrupt\n");
-}
-
-/*
- * PCI Fixup configuration.
- *
- * There are two 8 bit external summary registers as follows:
- *
- * Summary @ 0x26:
- * Bit Meaning
- * 0 Interrupt Line A from slot 0
- * 1 Interrupt Line A from slot 1
- * 2 Interrupt Line B from slot 0
- * 3 Interrupt Line B from slot 1
- * 4 Interrupt Line C from slot 0
- * 5 Interrupt line from the two ISA PICs
- * 6 Tulip
- * 7 NCR SCSI
- *
- * Summary @ 0x27
- * Bit Meaning
- * 0 Interrupt Line C from slot 1
- * 1 Interrupt Line D from slot 0
- * 2 Interrupt Line D from slot 1
- * 3 RAZ
- * 4 RAZ
- * 5 RAZ
- * 6 RAZ
- * 7 RAZ
- *
- * The device to slot mapping looks like:
- *
- * Slot Device
- * 5 NCR SCSI controller
- * 6 PCI on board slot 0
- * 7 PCI on board slot 1
- * 8 Intel SIO PCI-ISA bridge chip
- * 9 Tulip - DECchip 21040 Ethernet controller
- *
- *
- * This two layered interrupt approach means that we allocate IRQ 16 and
- * above for PCI interrupts. The IRQ relates to which bit the interrupt
- * comes in on. This makes interrupt processing much easier.
- */
-
-static int
-eb64p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- static char irq_tab[5][5] = {
- /*INT INTA INTB INTC INTD */
- {16+7, 16+7, 16+7, 16+7, 16+7}, /* IdSel 5, slot ?, ?? */
- {16+0, 16+0, 16+2, 16+4, 16+9}, /* IdSel 6, slot ?, ?? */
- {16+1, 16+1, 16+3, 16+8, 16+10}, /* IdSel 7, slot ?, ?? */
- { -1, -1, -1, -1, -1}, /* IdSel 8, SIO */
- {16+6, 16+6, 16+6, 16+6, 16+6}, /* IdSel 9, TULIP */
- };
- const long min_idsel = 5, max_idsel = 9, irqs_per_slot = 5;
- return COMMON_TABLE_LOOKUP;
-}
-
-
-/*
- * The System Vector
- */
-
-#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB64P)
-struct alpha_machine_vector eb64p_mv __initmv = {
- .vector_name = "EB64+",
- DO_EV4_MMU,
- DO_DEFAULT_RTC,
- DO_APECS_IO,
- .machine_check = apecs_machine_check,
- .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
- .min_io_address = DEFAULT_IO_BASE,
- .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
-
- .nr_irqs = 32,
- .device_interrupt = eb64p_device_interrupt,
-
- .init_arch = apecs_init_arch,
- .init_irq = eb64p_init_irq,
- .init_rtc = common_init_rtc,
- .init_pci = common_init_pci,
- .kill_arch = NULL,
- .pci_map_irq = eb64p_map_irq,
- .pci_swizzle = common_swizzle,
-};
-ALIAS_MV(eb64p)
-#endif
-
-#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB66)
-struct alpha_machine_vector eb66_mv __initmv = {
- .vector_name = "EB66",
- DO_EV4_MMU,
- DO_DEFAULT_RTC,
- DO_LCA_IO,
- .machine_check = lca_machine_check,
- .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
- .min_io_address = DEFAULT_IO_BASE,
- .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
-
- .nr_irqs = 32,
- .device_interrupt = eb64p_device_interrupt,
-
- .init_arch = lca_init_arch,
- .init_irq = eb64p_init_irq,
- .init_rtc = common_init_rtc,
- .init_pci = common_init_pci,
- .pci_map_irq = eb64p_map_irq,
- .pci_swizzle = common_swizzle,
-};
-ALIAS_MV(eb66)
-#endif
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c
deleted file mode 100644
index 5c9c88428124..000000000000
--- a/arch/alpha/kernel/sys_jensen.c
+++ /dev/null
@@ -1,237 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/arch/alpha/kernel/sys_jensen.c
- *
- * Copyright (C) 1995 Linus Torvalds
- * Copyright (C) 1998, 1999 Richard Henderson
- *
- * Code supporting the Jensen.
- */
-#define __EXTERN_INLINE
-#include <asm/io.h>
-#include <asm/jensen.h>
-#undef __EXTERN_INLINE
-
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-
-#include <asm/ptrace.h>
-
-#include <asm/dma.h>
-#include <asm/irq.h>
-#include <asm/mmu_context.h>
-#include <asm/tlbflush.h>
-
-#include "proto.h"
-#include "irq_impl.h"
-#include "pci_impl.h"
-#include "machvec_impl.h"
-
-
-/*
- * Jensen is special: the vector is 0x8X0 for EISA interrupt X, and
- * 0x9X0 for the local motherboard interrupts.
- *
- * Note especially that those local interrupts CANNOT be masked,
- * which causes much of the pain below...
- *
- * 0x660 - NMI
- *
- * 0x800 - IRQ0 interval timer (not used, as we use the RTC timer)
- * 0x810 - IRQ1 line printer (duh..)
- * 0x860 - IRQ6 floppy disk
- *
- * 0x900 - COM1
- * 0x920 - COM2
- * 0x980 - keyboard
- * 0x990 - mouse
- *
- * PCI-based systems are more sane: they don't have the local
- * interrupts at all, and have only normal PCI interrupts from
- * devices. Happily it's easy enough to do a sane mapping from the
- * Jensen.
- *
- * Note that this means that we may have to do a hardware
- * "local_op" to a different interrupt than we report to the rest of the
- * world.
- */
-
-static void
-jensen_local_enable(struct irq_data *d)
-{
- /* the parport is really hw IRQ 1, silly Jensen. */
- if (d->irq == 7)
- i8259a_enable_irq(d);
-}
-
-static void
-jensen_local_disable(struct irq_data *d)
-{
- /* the parport is really hw IRQ 1, silly Jensen. */
- if (d->irq == 7)
- i8259a_disable_irq(d);
-}
-
-static void
-jensen_local_mask_ack(struct irq_data *d)
-{
- /* the parport is really hw IRQ 1, silly Jensen. */
- if (d->irq == 7)
- i8259a_mask_and_ack_irq(d);
-}
-
-static struct irq_chip jensen_local_irq_type = {
- .name = "LOCAL",
- .irq_unmask = jensen_local_enable,
- .irq_mask = jensen_local_disable,
- .irq_mask_ack = jensen_local_mask_ack,
-};
-
-static void
-jensen_device_interrupt(unsigned long vector)
-{
- int irq;
-
- switch (vector) {
- case 0x660:
- printk("Whee.. NMI received. Probable hardware error\n");
- printk("61=%02x, 461=%02x\n", inb(0x61), inb(0x461));
- return;
-
- /* local device interrupts: */
- case 0x900: irq = 4; break; /* com1 -> irq 4 */
- case 0x920: irq = 3; break; /* com2 -> irq 3 */
- case 0x980: irq = 1; break; /* kbd -> irq 1 */
- case 0x990: irq = 9; break; /* mouse -> irq 9 */
-
- default:
- if (vector > 0x900) {
- printk("Unknown local interrupt %lx\n", vector);
- return;
- }
-
- irq = (vector - 0x800) >> 4;
- if (irq == 1)
- irq = 7;
- break;
- }
-
- /* If there is no handler yet... */
- if (!irq_has_action(irq)) {
- /* If it is a local interrupt that cannot be masked... */
- if (vector >= 0x900)
- {
- /* Clear keyboard/mouse state */
- inb(0x64);
- inb(0x60);
- /* Reset serial ports */
- inb(0x3fa);
- inb(0x2fa);
- outb(0x0c, 0x3fc);
- outb(0x0c, 0x2fc);
- /* Clear NMI */
- outb(0,0x61);
- outb(0,0x461);
- }
- }
-
-#if 0
- /* A useful bit of code to find out if an interrupt is going wild. */
- {
- static unsigned int last_msg = 0, last_cc = 0;
- static int last_irq = -1, count = 0;
- unsigned int cc;
-
- __asm __volatile("rpcc %0" : "=r"(cc));
- ++count;
-#define JENSEN_CYCLES_PER_SEC (150000000)
- if (cc - last_msg > ((JENSEN_CYCLES_PER_SEC) * 3) ||
- irq != last_irq) {
- printk(KERN_CRIT " irq %d count %d cc %u @ %lx\n",
- irq, count, cc-last_cc, get_irq_regs()->pc);
- count = 0;
- last_msg = cc;
- last_irq = irq;
- }
- last_cc = cc;
- }
-#endif
-
- handle_irq(irq);
-}
-
-static void __init
-jensen_init_irq(void)
-{
- init_i8259a_irqs();
-
- irq_set_chip_and_handler(1, &jensen_local_irq_type, handle_level_irq);
- irq_set_chip_and_handler(4, &jensen_local_irq_type, handle_level_irq);
- irq_set_chip_and_handler(3, &jensen_local_irq_type, handle_level_irq);
- irq_set_chip_and_handler(7, &jensen_local_irq_type, handle_level_irq);
- irq_set_chip_and_handler(9, &jensen_local_irq_type, handle_level_irq);
-
- common_init_isa_dma();
-}
-
-static void __init
-jensen_init_arch(void)
-{
- struct pci_controller *hose;
-#ifdef CONFIG_PCI
- static struct pci_dev fake_isa_bridge = { .dma_mask = 0xffffffffUL, };
-
- isa_bridge = &fake_isa_bridge;
-#endif
-
- /* Create a hose so that we can report i/o base addresses to
- userland. */
-
- pci_isa_hose = hose = alloc_pci_controller();
- hose->io_space = &ioport_resource;
- hose->mem_space = &iomem_resource;
- hose->index = 0;
-
- hose->sparse_mem_base = EISA_MEM - IDENT_ADDR;
- hose->dense_mem_base = 0;
- hose->sparse_io_base = EISA_IO - IDENT_ADDR;
- hose->dense_io_base = 0;
-
- hose->sg_isa = hose->sg_pci = NULL;
- __direct_map_base = 0;
- __direct_map_size = 0xffffffff;
-}
-
-static void
-jensen_machine_check(unsigned long vector, unsigned long la)
-{
- printk(KERN_CRIT "Machine check\n");
-}
-
-/*
- * The System Vector
- */
-
-struct alpha_machine_vector jensen_mv __initmv = {
- .vector_name = "Jensen",
- DO_EV4_MMU,
- IO_LITE(JENSEN,jensen),
- .machine_check = jensen_machine_check,
- .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
- .rtc_port = 0x170,
-
- .nr_irqs = 16,
- .device_interrupt = jensen_device_interrupt,
-
- .init_arch = jensen_init_arch,
- .init_irq = jensen_init_irq,
- .init_rtc = common_init_rtc,
- .init_pci = NULL,
- .kill_arch = NULL,
-};
-ALIAS_MV(jensen)
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c
index 7690dfd57cb6..557802398231 100644
--- a/arch/alpha/kernel/sys_mikasa.c
+++ b/arch/alpha/kernel/sys_mikasa.c
@@ -23,7 +23,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/core_apecs.h>
#include <asm/core_cia.h>
#include <asm/tlbflush.h>
@@ -164,64 +163,9 @@ mikasa_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
}
-#if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO)
-static void
-mikasa_apecs_machine_check(unsigned long vector, unsigned long la_ptr)
-{
-#define MCHK_NO_DEVSEL 0x205U
-#define MCHK_NO_TABT 0x204U
-
- struct el_common *mchk_header;
- unsigned int code;
-
- mchk_header = (struct el_common *)la_ptr;
-
- /* Clear the error before any reporting. */
- mb();
- mb(); /* magic */
- draina();
- apecs_pci_clr_err();
- wrmces(0x7);
- mb();
-
- code = mchk_header->code;
- process_mcheck_info(vector, la_ptr, "MIKASA APECS",
- (mcheck_expected(0)
- && (code == MCHK_NO_DEVSEL
- || code == MCHK_NO_TABT)));
-}
-#endif
-
-
/*
* The System Vector
*/
-
-#if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO)
-struct alpha_machine_vector mikasa_mv __initmv = {
- .vector_name = "Mikasa",
- DO_EV4_MMU,
- DO_DEFAULT_RTC,
- DO_APECS_IO,
- .machine_check = mikasa_apecs_machine_check,
- .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
- .min_io_address = DEFAULT_IO_BASE,
- .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
-
- .nr_irqs = 32,
- .device_interrupt = mikasa_device_interrupt,
-
- .init_arch = apecs_init_arch,
- .init_irq = mikasa_init_irq,
- .init_rtc = common_init_rtc,
- .init_pci = common_init_pci,
- .pci_map_irq = mikasa_map_irq,
- .pci_swizzle = common_swizzle,
-};
-ALIAS_MV(mikasa)
-#endif
-
-#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PRIMO)
struct alpha_machine_vector mikasa_primo_mv __initmv = {
.vector_name = "Mikasa-Primo",
DO_EV5_MMU,
@@ -244,4 +188,3 @@ struct alpha_machine_vector mikasa_primo_mv __initmv = {
.pci_swizzle = common_swizzle,
};
ALIAS_MV(mikasa_primo)
-#endif
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index 96fd6ff3fe81..13b79960b4b9 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -78,7 +78,7 @@ nautilus_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return irq;
}
-void
+static void
nautilus_kill_arch(int mode)
{
struct pci_bus *bus = pci_isa_hose->bus;
@@ -127,7 +127,7 @@ naut_sys_machine_check(unsigned long vector, unsigned long la_ptr,
/* Machine checks can come from two sources - those on the CPU and those
in the system. They are analysed separately but all starts here. */
-void
+static void
nautilus_machine_check(unsigned long vector, unsigned long la_ptr)
{
char *mchk_class;
@@ -184,8 +184,6 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr)
mb();
}
-extern void pcibios_claim_one_bus(struct pci_bus *);
-
static struct resource irongate_mem = {
.name = "Irongate PCI MEM",
.flags = IORESOURCE_MEM,
@@ -197,7 +195,7 @@ static struct resource busn_resource = {
.flags = IORESOURCE_BUS,
};
-void __init
+static void __init
nautilus_init_pci(void)
{
struct pci_controller *hose = hose_head;
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c
index 47f3ce4f719a..eed3f16561c0 100644
--- a/arch/alpha/kernel/sys_noritake.c
+++ b/arch/alpha/kernel/sys_noritake.c
@@ -24,7 +24,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/core_apecs.h>
#include <asm/core_cia.h>
#include <asm/tlbflush.h>
@@ -253,64 +252,6 @@ noritake_swizzle(struct pci_dev *dev, u8 *pinp)
return slot;
}
-#if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO)
-static void
-noritake_apecs_machine_check(unsigned long vector, unsigned long la_ptr)
-{
-#define MCHK_NO_DEVSEL 0x205U
-#define MCHK_NO_TABT 0x204U
-
- struct el_common *mchk_header;
- unsigned int code;
-
- mchk_header = (struct el_common *)la_ptr;
-
- /* Clear the error before any reporting. */
- mb();
- mb(); /* magic */
- draina();
- apecs_pci_clr_err();
- wrmces(0x7);
- mb();
-
- code = mchk_header->code;
- process_mcheck_info(vector, la_ptr, "NORITAKE APECS",
- (mcheck_expected(0)
- && (code == MCHK_NO_DEVSEL
- || code == MCHK_NO_TABT)));
-}
-#endif
-
-
-/*
- * The System Vectors
- */
-
-#if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO)
-struct alpha_machine_vector noritake_mv __initmv = {
- .vector_name = "Noritake",
- DO_EV4_MMU,
- DO_DEFAULT_RTC,
- DO_APECS_IO,
- .machine_check = noritake_apecs_machine_check,
- .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
- .min_io_address = EISA_DEFAULT_IO_BASE,
- .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
-
- .nr_irqs = 48,
- .device_interrupt = noritake_device_interrupt,
-
- .init_arch = apecs_init_arch,
- .init_irq = noritake_init_irq,
- .init_rtc = common_init_rtc,
- .init_pci = common_init_pci,
- .pci_map_irq = noritake_map_irq,
- .pci_swizzle = noritake_swizzle,
-};
-ALIAS_MV(noritake)
-#endif
-
-#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PRIMO)
struct alpha_machine_vector noritake_primo_mv __initmv = {
.vector_name = "Noritake-Primo",
DO_EV5_MMU,
@@ -333,4 +274,3 @@ struct alpha_machine_vector noritake_primo_mv __initmv = {
.pci_swizzle = noritake_swizzle,
};
ALIAS_MV(noritake_primo)
-#endif
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c
index 930005b2f630..49f5c75134ec 100644
--- a/arch/alpha/kernel/sys_sable.c
+++ b/arch/alpha/kernel/sys_sable.c
@@ -212,232 +212,6 @@ sable_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
}
#endif /* defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SABLE) */
-#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX)
-
-/***********************************************************************/
-/* LYNX hardware specifics
- */
-/*
- * For LYNX, which is also baroque, we manage 64 IRQs, via a custom IC.
- *
- * Bit Meaning Kernel IRQ
- *------------------------------------------
- * 0
- * 1
- * 2
- * 3 mouse 12
- * 4
- * 5
- * 6 keyboard 1
- * 7 floppy 6
- * 8 COM2 3
- * 9 parallel port 7
- *10 EISA irq 3 -
- *11 EISA irq 4 -
- *12 EISA irq 5 5
- *13 EISA irq 6 -
- *14 EISA irq 7 -
- *15 COM1 4
- *16 EISA irq 9 9
- *17 EISA irq 10 10
- *18 EISA irq 11 11
- *19 EISA irq 12 -
- *20
- *21 EISA irq 14 14
- *22 EISA irq 15 15
- *23 IIC -
- *24 VGA (builtin) -
- *25
- *26
- *27
- *28 NCR810 (builtin) 28
- *29
- *30
- *31
- *32 PCI 0 slot 4 A primary bus 32
- *33 PCI 0 slot 4 B primary bus 33
- *34 PCI 0 slot 4 C primary bus 34
- *35 PCI 0 slot 4 D primary bus
- *36 PCI 0 slot 5 A primary bus
- *37 PCI 0 slot 5 B primary bus
- *38 PCI 0 slot 5 C primary bus
- *39 PCI 0 slot 5 D primary bus
- *40 PCI 0 slot 6 A primary bus
- *41 PCI 0 slot 6 B primary bus
- *42 PCI 0 slot 6 C primary bus
- *43 PCI 0 slot 6 D primary bus
- *44 PCI 0 slot 7 A primary bus
- *45 PCI 0 slot 7 B primary bus
- *46 PCI 0 slot 7 C primary bus
- *47 PCI 0 slot 7 D primary bus
- *48 PCI 0 slot 0 A secondary bus
- *49 PCI 0 slot 0 B secondary bus
- *50 PCI 0 slot 0 C secondary bus
- *51 PCI 0 slot 0 D secondary bus
- *52 PCI 0 slot 1 A secondary bus
- *53 PCI 0 slot 1 B secondary bus
- *54 PCI 0 slot 1 C secondary bus
- *55 PCI 0 slot 1 D secondary bus
- *56 PCI 0 slot 2 A secondary bus
- *57 PCI 0 slot 2 B secondary bus
- *58 PCI 0 slot 2 C secondary bus
- *59 PCI 0 slot 2 D secondary bus
- *60 PCI 0 slot 3 A secondary bus
- *61 PCI 0 slot 3 B secondary bus
- *62 PCI 0 slot 3 C secondary bus
- *63 PCI 0 slot 3 D secondary bus
- */
-
-static void
-lynx_update_irq_hw(unsigned long bit, unsigned long mask)
-{
- /*
- * Write the AIR register on the T3/T4 with the
- * address of the IC mask register (offset 0x40)
- */
- *(vulp)T2_AIR = 0x40;
- mb();
- *(vulp)T2_AIR; /* re-read to force write */
- mb();
- *(vulp)T2_DIR = mask;
- mb();
- mb();
-}
-
-static void
-lynx_ack_irq_hw(unsigned long bit)
-{
- *(vulp)T2_VAR = (u_long) bit;
- mb();
- mb();
-}
-
-static irq_swizzle_t lynx_irq_swizzle = {
- { /* irq_to_mask */
- -1, 6, -1, 8, 15, 12, 7, 9, /* pseudo PIC 0-7 */
- -1, 16, 17, 18, 3, -1, 21, 22, /* pseudo PIC 8-15 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* pseudo */
- -1, -1, -1, -1, 28, -1, -1, -1, /* pseudo */
- 32, 33, 34, 35, 36, 37, 38, 39, /* mask 32-39 */
- 40, 41, 42, 43, 44, 45, 46, 47, /* mask 40-47 */
- 48, 49, 50, 51, 52, 53, 54, 55, /* mask 48-55 */
- 56, 57, 58, 59, 60, 61, 62, 63 /* mask 56-63 */
- },
- { /* mask_to_irq */
- -1, -1, -1, 12, -1, -1, 1, 6, /* mask 0-7 */
- 3, 7, -1, -1, 5, -1, -1, 4, /* mask 8-15 */
- 9, 10, 11, -1, -1, 14, 15, -1, /* mask 16-23 */
- -1, -1, -1, -1, 28, -1, -1, -1, /* mask 24-31 */
- 32, 33, 34, 35, 36, 37, 38, 39, /* mask 32-39 */
- 40, 41, 42, 43, 44, 45, 46, 47, /* mask 40-47 */
- 48, 49, 50, 51, 52, 53, 54, 55, /* mask 48-55 */
- 56, 57, 58, 59, 60, 61, 62, 63 /* mask 56-63 */
- },
- -1,
- lynx_update_irq_hw,
- lynx_ack_irq_hw
-};
-
-static void __init
-lynx_init_irq(void)
-{
- sable_lynx_irq_swizzle = &lynx_irq_swizzle;
- sable_lynx_init_irq(64);
-}
-
-/*
- * PCI Fixup configuration for ALPHA LYNX (2100A)
- *
- * The device to slot mapping looks like:
- *
- * Slot Device
- * 0 none
- * 1 none
- * 2 PCI-EISA bridge
- * 3 PCI-PCI bridge
- * 4 NCR 810 (Demi-Lynx only)
- * 5 none
- * 6 PCI on board slot 4
- * 7 PCI on board slot 5
- * 8 PCI on board slot 6
- * 9 PCI on board slot 7
- *
- * And behind the PPB we have:
- *
- * 11 PCI on board slot 0
- * 12 PCI on board slot 1
- * 13 PCI on board slot 2
- * 14 PCI on board slot 3
- */
-/*
- * NOTE: the IRQ assignments below are arbitrary, but need to be consistent
- * with the values in the irq swizzling tables above.
- */
-
-static int
-lynx_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- static char irq_tab[19][5] = {
- /*INT INTA INTB INTC INTD */
- { -1, -1, -1, -1, -1}, /* IdSel 13, PCEB */
- { -1, -1, -1, -1, -1}, /* IdSel 14, PPB */
- { 28, 28, 28, 28, 28}, /* IdSel 15, NCR demi */
- { -1, -1, -1, -1, -1}, /* IdSel 16, none */
- { 32, 32, 33, 34, 35}, /* IdSel 17, slot 4 */
- { 36, 36, 37, 38, 39}, /* IdSel 18, slot 5 */
- { 40, 40, 41, 42, 43}, /* IdSel 19, slot 6 */
- { 44, 44, 45, 46, 47}, /* IdSel 20, slot 7 */
- { -1, -1, -1, -1, -1}, /* IdSel 22, none */
- /* The following are actually behind the PPB. */
- { -1, -1, -1, -1, -1}, /* IdSel 16 none */
- { 28, 28, 28, 28, 28}, /* IdSel 17 NCR lynx */
- { -1, -1, -1, -1, -1}, /* IdSel 18 none */
- { -1, -1, -1, -1, -1}, /* IdSel 19 none */
- { -1, -1, -1, -1, -1}, /* IdSel 20 none */
- { -1, -1, -1, -1, -1}, /* IdSel 21 none */
- { 48, 48, 49, 50, 51}, /* IdSel 22 slot 0 */
- { 52, 52, 53, 54, 55}, /* IdSel 23 slot 1 */
- { 56, 56, 57, 58, 59}, /* IdSel 24 slot 2 */
- { 60, 60, 61, 62, 63} /* IdSel 25 slot 3 */
- };
- const long min_idsel = 2, max_idsel = 20, irqs_per_slot = 5;
- return COMMON_TABLE_LOOKUP;
-}
-
-static u8
-lynx_swizzle(struct pci_dev *dev, u8 *pinp)
-{
- int slot, pin = *pinp;
-
- if (dev->bus->number == 0) {
- slot = PCI_SLOT(dev->devfn);
- }
- /* Check for the built-in bridge */
- else if (PCI_SLOT(dev->bus->self->devfn) == 3) {
- slot = PCI_SLOT(dev->devfn) + 11;
- }
- else
- {
- /* Must be a card-based bridge. */
- do {
- if (PCI_SLOT(dev->bus->self->devfn) == 3) {
- slot = PCI_SLOT(dev->devfn) + 11;
- break;
- }
- pin = pci_swizzle_interrupt_pin(dev, pin);
-
- /* Move up the chain of bridges. */
- dev = dev->bus->self;
- /* Slot of the next bridge. */
- slot = PCI_SLOT(dev->devfn);
- } while (dev->bus->self);
- }
- *pinp = pin;
- return slot;
-}
-
-#endif /* defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX) */
-
/***********************************************************************/
/* GENERIC irq routines */
@@ -539,40 +313,7 @@ sable_lynx_init_pci(void)
* these games with GAMMA_BIAS.
*/
-#if defined(CONFIG_ALPHA_GENERIC) || \
- (defined(CONFIG_ALPHA_SABLE) && !defined(CONFIG_ALPHA_GAMMA))
-#undef GAMMA_BIAS
-#define GAMMA_BIAS 0
-struct alpha_machine_vector sable_mv __initmv = {
- .vector_name = "Sable",
- DO_EV4_MMU,
- DO_DEFAULT_RTC,
- DO_T2_IO,
- .machine_check = t2_machine_check,
- .max_isa_dma_address = ALPHA_SABLE_MAX_ISA_DMA_ADDRESS,
- .min_io_address = EISA_DEFAULT_IO_BASE,
- .min_mem_address = T2_DEFAULT_MEM_BASE,
-
- .nr_irqs = 40,
- .device_interrupt = sable_lynx_srm_device_interrupt,
-
- .init_arch = t2_init_arch,
- .init_irq = sable_init_irq,
- .init_rtc = common_init_rtc,
- .init_pci = sable_lynx_init_pci,
- .kill_arch = t2_kill_arch,
- .pci_map_irq = sable_map_irq,
- .pci_swizzle = common_swizzle,
-
- .sys = { .t2 = {
- .gamma_bias = 0
- } }
-};
-ALIAS_MV(sable)
-#endif /* GENERIC || (SABLE && !GAMMA) */
-
-#if defined(CONFIG_ALPHA_GENERIC) || \
- (defined(CONFIG_ALPHA_SABLE) && defined(CONFIG_ALPHA_GAMMA))
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SABLE)
#undef GAMMA_BIAS
#define GAMMA_BIAS _GAMMA_BIAS
struct alpha_machine_vector sable_gamma_mv __initmv = {
@@ -601,35 +342,4 @@ struct alpha_machine_vector sable_gamma_mv __initmv = {
} }
};
ALIAS_MV(sable_gamma)
-#endif /* GENERIC || (SABLE && GAMMA) */
-
-#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX)
-#undef GAMMA_BIAS
-#define GAMMA_BIAS _GAMMA_BIAS
-struct alpha_machine_vector lynx_mv __initmv = {
- .vector_name = "Lynx",
- DO_EV4_MMU,
- DO_DEFAULT_RTC,
- DO_T2_IO,
- .machine_check = t2_machine_check,
- .max_isa_dma_address = ALPHA_SABLE_MAX_ISA_DMA_ADDRESS,
- .min_io_address = EISA_DEFAULT_IO_BASE,
- .min_mem_address = T2_DEFAULT_MEM_BASE,
-
- .nr_irqs = 64,
- .device_interrupt = sable_lynx_srm_device_interrupt,
-
- .init_arch = t2_init_arch,
- .init_irq = lynx_init_irq,
- .init_rtc = common_init_rtc,
- .init_pci = sable_lynx_init_pci,
- .kill_arch = t2_kill_arch,
- .pci_map_irq = lynx_map_irq,
- .pci_swizzle = lynx_swizzle,
-
- .sys = { .t2 = {
- .gamma_bias = _GAMMA_BIAS
- } }
-};
-ALIAS_MV(lynx)
-#endif /* GENERIC || LYNX */
+#endif /* GENERIC || SABLE */
diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c
deleted file mode 100644
index 086488ed83a7..000000000000
--- a/arch/alpha/kernel/sys_sio.c
+++ /dev/null
@@ -1,486 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/arch/alpha/kernel/sys_sio.c
- *
- * Copyright (C) 1995 David A Rusling
- * Copyright (C) 1996 Jay A Estabrook
- * Copyright (C) 1998, 1999 Richard Henderson
- *
- * Code for all boards that route the PCI interrupts through the SIO
- * PCI/ISA bridge. This includes Noname (AXPpci33), Multia (UDB),
- * Kenetics's Platform 2000, Avanti (AlphaStation), XL, and AlphaBook1.
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/screen_info.h>
-
-#include <asm/compiler.h>
-#include <asm/ptrace.h>
-#include <asm/dma.h>
-#include <asm/irq.h>
-#include <asm/mmu_context.h>
-#include <asm/io.h>
-#include <asm/core_apecs.h>
-#include <asm/core_lca.h>
-#include <asm/tlbflush.h>
-
-#include "proto.h"
-#include "irq_impl.h"
-#include "pci_impl.h"
-#include "machvec_impl.h"
-#include "pc873xx.h"
-
-#if defined(ALPHA_RESTORE_SRM_SETUP)
-/* Save LCA configuration data as the console had it set up. */
-struct
-{
- unsigned int orig_route_tab; /* for SAVE/RESTORE */
-} saved_config __attribute((common));
-#endif
-
-
-static void __init
-sio_init_irq(void)
-{
- if (alpha_using_srm)
- alpha_mv.device_interrupt = srm_device_interrupt;
-
- init_i8259a_irqs();
- common_init_isa_dma();
-}
-
-static inline void __init
-alphabook1_init_arch(void)
-{
-#ifdef CONFIG_VGA_CONSOLE
- /* The AlphaBook1 has LCD video fixed at 800x600,
- 37 rows and 100 cols. */
- vgacon_screen_info.orig_y = 37;
- vgacon_screen_info.orig_video_cols = 100;
- vgacon_screen_info.orig_video_lines = 37;
-#endif
-
- lca_init_arch();
-}
-
-
-/*
- * sio_route_tab selects irq routing in PCI/ISA bridge so that:
- * PIRQ0 -> irq 15
- * PIRQ1 -> irq 9
- * PIRQ2 -> irq 10
- * PIRQ3 -> irq 11
- *
- * This probably ought to be configurable via MILO. For
- * example, sound boards seem to like using IRQ 9.
- *
- * This is NOT how we should do it. PIRQ0-X should have
- * their own IRQs, the way intel uses the IO-APIC IRQs.
- */
-
-static void __init
-sio_pci_route(void)
-{
- unsigned int orig_route_tab;
-
- /* First, ALWAYS read and print the original setting. */
- pci_bus_read_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60,
- &orig_route_tab);
- printk("%s: PIRQ original 0x%x new 0x%x\n", __func__,
- orig_route_tab, alpha_mv.sys.sio.route_tab);
-
-#if defined(ALPHA_RESTORE_SRM_SETUP)
- saved_config.orig_route_tab = orig_route_tab;
-#endif
-
- /* Now override with desired setting. */
- pci_bus_write_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60,
- alpha_mv.sys.sio.route_tab);
-}
-
-static bool sio_pci_dev_irq_needs_level(const struct pci_dev *dev)
-{
- if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) &&
- (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA))
- return false;
-
- return true;
-}
-
-static unsigned int __init
-sio_collect_irq_levels(void)
-{
- unsigned int level_bits = 0;
- struct pci_dev *dev = NULL;
-
- /* Iterate through the devices, collecting IRQ levels. */
- for_each_pci_dev(dev) {
- if (!sio_pci_dev_irq_needs_level(dev))
- continue;
-
- if (dev->irq)
- level_bits |= (1 << dev->irq);
- }
- return level_bits;
-}
-
-static void __sio_fixup_irq_levels(unsigned int level_bits, bool reset)
-{
- unsigned int old_level_bits;
-
- /*
- * Now, make all PCI interrupts level sensitive. Notice:
- * these registers must be accessed byte-wise. inw()/outw()
- * don't work.
- *
- * Make sure to turn off any level bits set for IRQs 9,10,11,15,
- * so that the only bits getting set are for devices actually found.
- * Note that we do preserve the remainder of the bits, which we hope
- * will be set correctly by ARC/SRM.
- *
- * Note: we at least preserve any level-set bits on AlphaBook1
- */
- old_level_bits = inb(0x4d0) | (inb(0x4d1) << 8);
-
- if (reset)
- old_level_bits &= 0x71ff;
-
- level_bits |= old_level_bits;
-
- outb((level_bits >> 0) & 0xff, 0x4d0);
- outb((level_bits >> 8) & 0xff, 0x4d1);
-}
-
-static inline void
-sio_fixup_irq_levels(unsigned int level_bits)
-{
- __sio_fixup_irq_levels(level_bits, true);
-}
-
-static inline int
-noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- /*
- * The Noname board has 5 PCI slots with each of the 4
- * interrupt pins routed to different pins on the PCI/ISA
- * bridge (PIRQ0-PIRQ3). The table below is based on
- * information available at:
- *
- * http://ftp.digital.com/pub/DEC/axppci/ref_interrupts.txt
- *
- * I have no information on the Avanti interrupt routing, but
- * the routing seems to be identical to the Noname except
- * that the Avanti has an additional slot whose routing I'm
- * unsure of.
- *
- * pirq_tab[0] is a fake entry to deal with old PCI boards
- * that have the interrupt pin number hardwired to 0 (meaning
- * that they use the default INTA line, if they are interrupt
- * driven at all).
- */
- static char irq_tab[][5] = {
- /*INT A B C D */
- { 3, 3, 3, 3, 3}, /* idsel 6 (53c810) */
- {-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */
- { 2, 2, -1, -1, -1}, /* idsel 8 (Hack: slot closest ISA) */
- {-1, -1, -1, -1, -1}, /* idsel 9 (unused) */
- {-1, -1, -1, -1, -1}, /* idsel 10 (unused) */
- { 0, 0, 2, 1, 0}, /* idsel 11 KN25_PCI_SLOT0 */
- { 1, 1, 0, 2, 1}, /* idsel 12 KN25_PCI_SLOT1 */
- { 2, 2, 1, 0, 2}, /* idsel 13 KN25_PCI_SLOT2 */
- { 0, 0, 0, 0, 0}, /* idsel 14 AS255 TULIP */
- };
- const long min_idsel = 6, max_idsel = 14, irqs_per_slot = 5;
- int irq = COMMON_TABLE_LOOKUP, tmp;
- tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq);
-
- irq = irq >= 0 ? tmp : -1;
-
- /* Fixup IRQ level if an actual IRQ mapping is detected */
- if (sio_pci_dev_irq_needs_level(dev) && irq >= 0)
- __sio_fixup_irq_levels(1 << irq, false);
-
- return irq;
-}
-
-static inline int
-p2k_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- static char irq_tab[][5] = {
- /*INT A B C D */
- { 0, 0, -1, -1, -1}, /* idsel 6 (53c810) */
- {-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */
- { 1, 1, 2, 3, 0}, /* idsel 8 (slot A) */
- { 2, 2, 3, 0, 1}, /* idsel 9 (slot B) */
- {-1, -1, -1, -1, -1}, /* idsel 10 (unused) */
- {-1, -1, -1, -1, -1}, /* idsel 11 (unused) */
- { 3, 3, -1, -1, -1}, /* idsel 12 (CMD0646) */
- };
- const long min_idsel = 6, max_idsel = 12, irqs_per_slot = 5;
- int irq = COMMON_TABLE_LOOKUP, tmp;
- tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq);
- return irq >= 0 ? tmp : -1;
-}
-
-static inline void __init
-noname_init_pci(void)
-{
- common_init_pci();
- sio_pci_route();
- sio_fixup_irq_levels(sio_collect_irq_levels());
-
- if (pc873xx_probe() == -1) {
- printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n");
- } else {
- printk(KERN_INFO "Found %s Super IO chip at 0x%x\n",
- pc873xx_get_model(), pc873xx_get_base());
-
- /* Enabling things in the Super IO chip doesn't actually
- * configure and enable things, the legacy drivers still
- * need to do the actual configuration and enabling.
- * This only unblocks them.
- */
-
-#if !defined(CONFIG_ALPHA_AVANTI)
- /* Don't bother on the Avanti family.
- * None of them had on-board IDE.
- */
- pc873xx_enable_ide();
-#endif
- pc873xx_enable_epp19();
- }
-}
-
-static inline void __init
-alphabook1_init_pci(void)
-{
- struct pci_dev *dev;
- unsigned char orig, config;
-
- common_init_pci();
- sio_pci_route();
-
- /*
- * On the AlphaBook1, the PCMCIA chip (Cirrus 6729)
- * is sensitive to PCI bus bursts, so we must DISABLE
- * burst mode for the NCR 8xx SCSI... :-(
- *
- * Note that the NCR810 SCSI driver must preserve the
- * setting of the bit in order for this to work. At the
- * moment (2.0.29), ncr53c8xx.c does NOT do this, but
- * 53c7,8xx.c DOES.
- */
-
- dev = NULL;
- while ((dev = pci_get_device(PCI_VENDOR_ID_NCR, PCI_ANY_ID, dev))) {
- if (dev->device == PCI_DEVICE_ID_NCR_53C810
- || dev->device == PCI_DEVICE_ID_NCR_53C815
- || dev->device == PCI_DEVICE_ID_NCR_53C820
- || dev->device == PCI_DEVICE_ID_NCR_53C825) {
- unsigned long io_port;
- unsigned char ctest4;
-
- io_port = dev->resource[0].start;
- ctest4 = inb(io_port+0x21);
- if (!(ctest4 & 0x80)) {
- printk("AlphaBook1 NCR init: setting"
- " burst disable\n");
- outb(ctest4 | 0x80, io_port+0x21);
- }
- }
- }
-
- /* Do not set *ANY* level triggers for AlphaBook1. */
- sio_fixup_irq_levels(0);
-
- /* Make sure that register PR1 indicates 1Mb mem */
- outb(0x0f, 0x3ce); orig = inb(0x3cf); /* read PR5 */
- outb(0x0f, 0x3ce); outb(0x05, 0x3cf); /* unlock PR0-4 */
- outb(0x0b, 0x3ce); config = inb(0x3cf); /* read PR1 */
- if ((config & 0xc0) != 0xc0) {
- printk("AlphaBook1 VGA init: setting 1Mb memory\n");
- config |= 0xc0;
- outb(0x0b, 0x3ce); outb(config, 0x3cf); /* write PR1 */
- }
- outb(0x0f, 0x3ce); outb(orig, 0x3cf); /* (re)lock PR0-4 */
-}
-
-void
-sio_kill_arch(int mode)
-{
-#if defined(ALPHA_RESTORE_SRM_SETUP)
- /* Since we cannot read the PCI DMA Window CSRs, we
- * cannot restore them here.
- *
- * However, we CAN read the PIRQ route register, so restore it
- * now...
- */
- pci_bus_write_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60,
- saved_config.orig_route_tab);
-#endif
-}
-
-
-/*
- * The System Vectors
- */
-
-#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_BOOK1)
-struct alpha_machine_vector alphabook1_mv __initmv = {
- .vector_name = "AlphaBook1",
- DO_EV4_MMU,
- DO_DEFAULT_RTC,
- DO_LCA_IO,
- .machine_check = lca_machine_check,
- .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
- .min_io_address = DEFAULT_IO_BASE,
- .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
-
- .nr_irqs = 16,
- .device_interrupt = isa_device_interrupt,
-
- .init_arch = alphabook1_init_arch,
- .init_irq = sio_init_irq,
- .init_rtc = common_init_rtc,
- .init_pci = alphabook1_init_pci,
- .kill_arch = sio_kill_arch,
- .pci_map_irq = noname_map_irq,
- .pci_swizzle = common_swizzle,
-
- .sys = { .sio = {
- /* NCR810 SCSI is 14, PCMCIA controller is 15. */
- .route_tab = 0x0e0f0a0a,
- }}
-};
-ALIAS_MV(alphabook1)
-#endif
-
-#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_AVANTI)
-struct alpha_machine_vector avanti_mv __initmv = {
- .vector_name = "Avanti",
- DO_EV4_MMU,
- DO_DEFAULT_RTC,
- DO_APECS_IO,
- .machine_check = apecs_machine_check,
- .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
- .min_io_address = DEFAULT_IO_BASE,
- .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
-
- .nr_irqs = 16,
- .device_interrupt = isa_device_interrupt,
-
- .init_arch = apecs_init_arch,
- .init_irq = sio_init_irq,
- .init_rtc = common_init_rtc,
- .init_pci = noname_init_pci,
- .kill_arch = sio_kill_arch,
- .pci_map_irq = noname_map_irq,
- .pci_swizzle = common_swizzle,
-
- .sys = { .sio = {
- .route_tab = 0x0b0a050f, /* leave 14 for IDE, 9 for SND */
- }}
-};
-ALIAS_MV(avanti)
-#endif
-
-#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_NONAME)
-struct alpha_machine_vector noname_mv __initmv = {
- .vector_name = "Noname",
- DO_EV4_MMU,
- DO_DEFAULT_RTC,
- DO_LCA_IO,
- .machine_check = lca_machine_check,
- .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
- .min_io_address = DEFAULT_IO_BASE,
- .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
-
- .nr_irqs = 16,
- .device_interrupt = srm_device_interrupt,
-
- .init_arch = lca_init_arch,
- .init_irq = sio_init_irq,
- .init_rtc = common_init_rtc,
- .init_pci = noname_init_pci,
- .kill_arch = sio_kill_arch,
- .pci_map_irq = noname_map_irq,
- .pci_swizzle = common_swizzle,
-
- .sys = { .sio = {
- /* For UDB, the only available PCI slot must not map to IRQ 9,
- since that's the builtin MSS sound chip. That PCI slot
- will map to PIRQ1 (for INTA at least), so we give it IRQ 15
- instead.
-
- Unfortunately we have to do this for NONAME as well, since
- they are co-indicated when the platform type "Noname" is
- selected... :-( */
-
- .route_tab = 0x0b0a0f0d,
- }}
-};
-ALIAS_MV(noname)
-#endif
-
-#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_P2K)
-struct alpha_machine_vector p2k_mv __initmv = {
- .vector_name = "Platform2000",
- DO_EV4_MMU,
- DO_DEFAULT_RTC,
- DO_LCA_IO,
- .machine_check = lca_machine_check,
- .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
- .min_io_address = DEFAULT_IO_BASE,
- .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
-
- .nr_irqs = 16,
- .device_interrupt = srm_device_interrupt,
-
- .init_arch = lca_init_arch,
- .init_irq = sio_init_irq,
- .init_rtc = common_init_rtc,
- .init_pci = noname_init_pci,
- .kill_arch = sio_kill_arch,
- .pci_map_irq = p2k_map_irq,
- .pci_swizzle = common_swizzle,
-
- .sys = { .sio = {
- .route_tab = 0x0b0a090f,
- }}
-};
-ALIAS_MV(p2k)
-#endif
-
-#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_XL)
-struct alpha_machine_vector xl_mv __initmv = {
- .vector_name = "XL",
- DO_EV4_MMU,
- DO_DEFAULT_RTC,
- DO_APECS_IO,
- .machine_check = apecs_machine_check,
- .max_isa_dma_address = ALPHA_XL_MAX_ISA_DMA_ADDRESS,
- .min_io_address = DEFAULT_IO_BASE,
- .min_mem_address = XL_DEFAULT_MEM_BASE,
-
- .nr_irqs = 16,
- .device_interrupt = isa_device_interrupt,
-
- .init_arch = apecs_init_arch,
- .init_irq = sio_init_irq,
- .init_rtc = common_init_rtc,
- .init_pci = noname_init_pci,
- .kill_arch = sio_kill_arch,
- .pci_map_irq = noname_map_irq,
- .pci_swizzle = common_swizzle,
-
- .sys = { .sio = {
- .route_tab = 0x0b0a090f,
- }}
-};
-ALIAS_MV(xl)
-#endif
diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl
index 8ff110826ce2..26cce7e7f70b 100644
--- a/arch/alpha/kernel/syscalls/syscall.tbl
+++ b/arch/alpha/kernel/syscalls/syscall.tbl
@@ -474,7 +474,7 @@
542 common fsmount sys_fsmount
543 common fspick sys_fspick
544 common pidfd_open sys_pidfd_open
-# 545 reserved for clone3
+545 common clone3 alpha_clone3
546 common close_range sys_close_range
547 common openat2 sys_openat2
548 common pidfd_getfd sys_pidfd_getfd
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index 7fc72aeb7398..6afae65e9a8b 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -30,39 +30,6 @@
#include "proto.h"
-/* Work-around for some SRMs which mishandle opDEC faults. */
-
-static int opDEC_fix;
-
-static void
-opDEC_check(void)
-{
- __asm__ __volatile__ (
- /* Load the address of... */
- " br $16, 1f\n"
- /* A stub instruction fault handler. Just add 4 to the
- pc and continue. */
- " ldq $16, 8($sp)\n"
- " addq $16, 4, $16\n"
- " stq $16, 8($sp)\n"
- " call_pal %[rti]\n"
- /* Install the instruction fault handler. */
- "1: lda $17, 3\n"
- " call_pal %[wrent]\n"
- /* With that in place, the fault from the round-to-minf fp
- insn will arrive either at the "lda 4" insn (bad) or one
- past that (good). This places the correct fixup in %0. */
- " lda %[fix], 0\n"
- " cvttq/svm $f31,$f31\n"
- " lda %[fix], 4"
- : [fix] "=r" (opDEC_fix)
- : [rti] "n" (PAL_rti), [wrent] "n" (PAL_wrent)
- : "$0", "$1", "$16", "$17", "$22", "$23", "$24", "$25");
-
- if (opDEC_fix)
- printk("opDEC fixup enabled.\n");
-}
-
void
dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
{
@@ -353,32 +320,6 @@ do_entIF(unsigned long type, struct pt_regs *regs)
return;
case 4: /* opDEC */
- if (implver() == IMPLVER_EV4) {
- long si_code;
-
- /* The some versions of SRM do not handle
- the opDEC properly - they return the PC of the
- opDEC fault, not the instruction after as the
- Alpha architecture requires. Here we fix it up.
- We do this by intentionally causing an opDEC
- fault during the boot sequence and testing if
- we get the correct PC. If not, we set a flag
- to correct it every time through. */
- regs->pc += opDEC_fix;
-
- /* EV4 does not implement anything except normal
- rounding. Everything else will come here as
- an illegal instruction. Emulate them. */
- si_code = alpha_fp_emul(regs->pc - 4);
- if (si_code == 0)
- return;
- if (si_code > 0) {
- send_sig_fault_trapno(SIGFPE, si_code,
- (void __user *) regs->pc,
- 0, current);
- return;
- }
- }
break;
case 5: /* illoc */
@@ -979,11 +920,6 @@ trap_init(void)
register unsigned long gptr __asm__("$29");
wrkgp(gptr);
- /* Hack for Multia (UDB) and JENSEN: some of their SRMs have
- a bug in the handling of the opDEC fault. Fix it up if so. */
- if (implver() == IMPLVER_EV4)
- opDEC_check();
-
wrent(entArith, 1);
wrent(entMM, 2);
wrent(entIF, 3);
diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile
index 6a779b9018fd..84046e730e6d 100644
--- a/arch/alpha/lib/Makefile
+++ b/arch/alpha/lib/Makefile
@@ -44,17 +44,3 @@ AFLAGS___remlu.o = -DREM -DINTSIZE
$(addprefix $(obj)/,__divqu.o __remqu.o __divlu.o __remlu.o): \
$(src)/$(ev6-y)divide.S FORCE
$(call if_changed_rule,as_o_S)
-
-# There are direct branches between {str*cpy,str*cat} and stx*cpy.
-# Ensure the branches are within range by merging these objects.
-
-LDFLAGS_stycpy.o := -r
-LDFLAGS_styncpy.o := -r
-
-$(obj)/stycpy.o: $(obj)/strcpy.o $(obj)/$(ev67-y)strcat.o \
- $(obj)/$(ev6-y)stxcpy.o FORCE
- $(call if_changed,ld)
-
-$(obj)/styncpy.o: $(obj)/strncpy.o $(obj)/$(ev67-y)strncat.o \
- $(obj)/$(ev6-y)stxncpy.o FORCE
- $(call if_changed,ld)
diff --git a/arch/alpha/lib/checksum.c b/arch/alpha/lib/checksum.c
index 3f35c3ed6948..27b2a9edf3cc 100644
--- a/arch/alpha/lib/checksum.c
+++ b/arch/alpha/lib/checksum.c
@@ -12,8 +12,10 @@
#include <linux/module.h>
#include <linux/string.h>
+#include <net/checksum.h>
#include <asm/byteorder.h>
+#include <asm/checksum.h>
static inline unsigned short from64to16(unsigned long x)
{
diff --git a/arch/alpha/lib/fpreg.c b/arch/alpha/lib/fpreg.c
index 7c08b225261c..9a238e7536ae 100644
--- a/arch/alpha/lib/fpreg.c
+++ b/arch/alpha/lib/fpreg.c
@@ -8,7 +8,9 @@
#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/preempt.h>
+#include <asm/fpu.h>
#include <asm/thread_info.h>
+#include <asm/fpu.h>
#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
#define STT(reg,val) asm volatile ("ftoit $f"#reg",%0" : "=r"(val));
diff --git a/arch/alpha/lib/memcpy.c b/arch/alpha/lib/memcpy.c
index cbac3dc6d963..78b6850a9d53 100644
--- a/arch/alpha/lib/memcpy.c
+++ b/arch/alpha/lib/memcpy.c
@@ -18,6 +18,7 @@
#include <linux/types.h>
#include <linux/export.h>
+#include <linux/string.h>
/*
* This should be done in one go with ldq_u*2/mask/stq_u. Do it
@@ -150,6 +151,8 @@ static inline void __memcpy_aligned_dn (unsigned long d, unsigned long s,
DO_REST_ALIGNED_DN(d,s,n);
}
+#undef memcpy
+
void * memcpy(void * dest, const void *src, size_t n)
{
if (!(((unsigned long) dest ^ (unsigned long) src) & 7)) {
diff --git a/arch/alpha/lib/stycpy.S b/arch/alpha/lib/stycpy.S
new file mode 100644
index 000000000000..32ecd9c5f90d
--- /dev/null
+++ b/arch/alpha/lib/stycpy.S
@@ -0,0 +1,11 @@
+#include "strcpy.S"
+#ifdef CONFIG_ALPHA_EV67
+#include "ev67-strcat.S"
+#else
+#include "strcat.S"
+#endif
+#ifdef CONFIG_ALPHA_EV6
+#include "ev6-stxcpy.S"
+#else
+#include "stxcpy.S"
+#endif
diff --git a/arch/alpha/lib/styncpy.S b/arch/alpha/lib/styncpy.S
new file mode 100644
index 000000000000..72fc2754eb57
--- /dev/null
+++ b/arch/alpha/lib/styncpy.S
@@ -0,0 +1,11 @@
+#include "strncpy.S"
+#ifdef CONFIG_ALPHA_EV67
+#include "ev67-strncat.S"
+#else
+#include "strncat.S"
+#endif
+#ifdef CONFIG_ALPHA_EV6
+#include "ev6-stxncpy.S"
+#else
+#include "stxncpy.S"
+#endif
diff --git a/arch/alpha/math-emu/math.c b/arch/alpha/math-emu/math.c
index 4212258f3cfd..68d420bfd3c0 100644
--- a/arch/alpha/math-emu/math.c
+++ b/arch/alpha/math-emu/math.c
@@ -4,6 +4,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <asm/ptrace.h>
+#include <asm/fpu.h>
#include <linux/uaccess.h>
@@ -45,12 +46,6 @@
#define MISC_TRAPB 0x0000
#define MISC_EXCB 0x0400
-extern unsigned long alpha_read_fp_reg (unsigned long reg);
-extern void alpha_write_fp_reg (unsigned long reg, unsigned long val);
-extern unsigned long alpha_read_fp_reg_s (unsigned long reg);
-extern void alpha_write_fp_reg_s (unsigned long reg, unsigned long val);
-
-
#ifdef MODULE
MODULE_DESCRIPTION("FP Software completion module");
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index a155180d7a83..4fe618446e4c 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -33,7 +33,7 @@
#include <asm/setup.h>
#include <asm/sections.h>
-extern void die_if_kernel(char *,struct pt_regs *,long);
+#include "../kernel/proto.h"
static struct pcb_struct original_pcb;
diff --git a/arch/arc/Kbuild b/arch/arc/Kbuild
index b94102fff68b..20ea7dd482d4 100644
--- a/arch/arc/Kbuild
+++ b/arch/arc/Kbuild
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-y += kernel/
obj-y += mm/
+obj-y += net/
# for cleaning
subdir- += boot
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 99d2845f3feb..fd0b0a0d4686 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -6,7 +6,6 @@
config ARC
def_bool y
select ARC_TIMERS
- select ARCH_HAS_CPU_CACHE_ALIASING
select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DMA_PREP_COHERENT
@@ -52,6 +51,7 @@ config ARC
select PCI_SYSCALL if PCI
select HAVE_ARCH_JUMP_LABEL if ISA_ARCV2 && !CPU_ENDIAN_BE32
select TRACE_IRQFLAGS_SUPPORT
+ select HAVE_EBPF_JIT if ISA_ARCV2
config LOCKDEP_SUPPORT
def_bool y
diff --git a/arch/arc/boot/Makefile b/arch/arc/boot/Makefile
index 5648748c285f..5a8550124b73 100644
--- a/arch/arc/boot/Makefile
+++ b/arch/arc/boot/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-# uImage build relies on mkimage being availble on your host for ARC target
+# uImage build relies on mkimage being available on your host for ARC target
# You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage
-# and make sure it's reacable from your PATH
+# and make sure it's reachable from your PATH
OBJCOPYFLAGS= -O binary -R .note -R .note.gnu.build-id -R .comment -S
diff --git a/arch/arc/boot/dts/Makefile b/arch/arc/boot/dts/Makefile
index 4237aa5de3a3..48704dfdf75c 100644
--- a/arch/arc/boot/dts/Makefile
+++ b/arch/arc/boot/dts/Makefile
@@ -10,8 +10,7 @@ obj-y += $(builtindtb-y).dtb.o
dtb-y := $(builtindtb-y).dtb
# for CONFIG_OF_ALL_DTBS test
-dtstree := $(srctree)/$(src)
-dtb- := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
+dtb- := $(patsubst $(src)/%.dts,%.dtb, $(wildcard $(src)/*.dts))
# board-specific dtc flags
DTC_FLAGS_hsdk += --pad 20
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
index 3434c8131ecd..c0a812674ce9 100644
--- a/arch/arc/boot/dts/axc003.dtsi
+++ b/arch/arc/boot/dts/axc003.dtsi
@@ -119,9 +119,9 @@
/*
* The DW APB ICTL intc on MB is connected to CPU intc via a
* DT "invisible" DW APB GPIO block, configured to simply pass thru
- * interrupts - setup accordinly in platform init (plat-axs10x/ax10x.c)
+ * interrupts - setup accordingly in platform init (plat-axs10x/ax10x.c)
*
- * So here we mimic a direct connection betwen them, ignoring the
+ * So here we mimic a direct connection between them, ignoring the
* ABPG GPIO. Thus set "interrupts = <24>" (DW APB GPIO to core)
* instead of "interrupts = <12>" (DW APB ICTL to DW APB GPIO)
*
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 6691f4255077..41b980df862b 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -205,7 +205,6 @@
};
gmac: ethernet@8000 {
- #interrupt-cells = <1>;
compatible = "snps,dwmac";
reg = <0x8000 0x2000>;
interrupts = <10>;
diff --git a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
index 90a412026e64..0e0e2d337bf8 100644
--- a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
@@ -113,7 +113,7 @@
/*
* Embedded Vision subsystem UIO mappings; only relevant for EV VDK
*
- * This node is intentionally put outside of MB above becase
+ * This node is intentionally put outside of MB above because
* it maps areas outside of MB's 0xez-0xfz.
*/
uio_ev: uio@d0000000 {
diff --git a/arch/arc/include/asm/cachetype.h b/arch/arc/include/asm/cachetype.h
deleted file mode 100644
index 05fc7ed59712..000000000000
--- a/arch/arc/include/asm/cachetype.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ARC_CACHETYPE_H
-#define __ASM_ARC_CACHETYPE_H
-
-#include <linux/types.h>
-
-#define cpu_dcache_is_aliasing() true
-
-#endif
diff --git a/arch/arc/include/asm/dsp.h b/arch/arc/include/asm/dsp.h
index 202c78e56704..f496dbc4640b 100644
--- a/arch/arc/include/asm/dsp.h
+++ b/arch/arc/include/asm/dsp.h
@@ -12,7 +12,7 @@
/*
* DSP-related saved registers - need to be saved only when you are
* scheduled out.
- * structure fields name must correspond to aux register defenitions for
+ * structure fields name must correspond to aux register definitions for
* automatic offset calculation in DSP_AUX_SAVE_RESTORE macros
*/
struct dsp_callee_regs {
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h
index 92c3e9f13252..00946fe04c9b 100644
--- a/arch/arc/include/asm/entry-compact.h
+++ b/arch/arc/include/asm/entry-compact.h
@@ -7,7 +7,7 @@
* Stack switching code can no longer reliably rely on the fact that
* if we are NOT in user mode, stack is switched to kernel mode.
* e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
- * it's prologue including stack switching from user mode
+ * its prologue including stack switching from user mode
*
* Vineetg: Aug 28th 2008: Bug #94984
* -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
@@ -143,7 +143,7 @@
* 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
* 3. But before it could switch SP from USER to KERNEL stack
* a L2 IRQ "Interrupts" L1
- * Thay way although L2 IRQ happened in Kernel mode, stack is still
+ * That way although L2 IRQ happened in Kernel mode, stack is still
* not switched.
* To handle this, we may need to switch stack even if in kernel mode
* provided SP has values in range of USER mode stack ( < 0x7000_0000 )
@@ -173,7 +173,7 @@
GET_CURR_TASK_ON_CPU r9
- /* With current tsk in r9, get it's kernel mode stack base */
+ /* With current tsk in r9, get its kernel mode stack base */
GET_TSK_STACK_BASE r9, r9
/* save U mode SP @ pt_regs->sp */
@@ -282,7 +282,7 @@
* NOTE:
*
* It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
- * for memory load operations. If used in that way interrupts are deffered
+ * for memory load operations. If used in that way interrupts are deferred
* by hardware and that is not good.
*-------------------------------------------------------------*/
.macro EXCEPTION_EPILOGUE
@@ -350,7 +350,7 @@
* NOTE:
*
* It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
- * for memory load operations. If used in that way interrupts are deffered
+ * for memory load operations. If used in that way interrupts are deferred
* by hardware and that is not good.
*-------------------------------------------------------------*/
.macro INTERRUPT_EPILOGUE LVL
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index cf1ba376e992..38c35722cebf 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -7,7 +7,7 @@
#ifndef __ASM_ARC_ENTRY_H
#define __ASM_ARC_ENTRY_H
-#include <asm/unistd.h> /* For NR_syscalls defination */
+#include <asm/unistd.h> /* For NR_syscalls definition */
#include <asm/arcregs.h>
#include <asm/ptrace.h>
#include <asm/processor.h> /* For VMALLOC_START */
@@ -56,7 +56,7 @@
.endm
/*-------------------------------------------------------------
- * given a tsk struct, get to the base of it's kernel mode stack
+ * given a tsk struct, get to the base of its kernel mode stack
* tsk->thread_info is really a PAGE, whose bottom hoists stack
* which grows upwards towards thread_info
*------------------------------------------------------------*/
diff --git a/arch/arc/include/asm/fb.h b/arch/arc/include/asm/fb.h
deleted file mode 100644
index 9c2383d29cbb..000000000000
--- a/arch/arc/include/asm/fb.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef _ASM_FB_H_
-#define _ASM_FB_H_
-
-#include <asm-generic/fb.h>
-
-#endif /* _ASM_FB_H_ */
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
index c574712ad865..9cd79263acba 100644
--- a/arch/arc/include/asm/irq.h
+++ b/arch/arc/include/asm/irq.h
@@ -10,7 +10,7 @@
* ARCv2 can support 240 interrupts in the core interrupts controllers and
* 128 interrupts in IDU. Thus 512 virtual IRQs must be enough for most
* configurations of boards.
- * This doesnt affect ARCompact, but we change it to same value
+ * This doesn't affect ARCompact, but we change it to same value
*/
#define NR_IRQS 512
diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h
index 0d63e568d64c..936a2f21f315 100644
--- a/arch/arc/include/asm/irqflags-compact.h
+++ b/arch/arc/include/asm/irqflags-compact.h
@@ -46,7 +46,7 @@
* IRQ Control Macros
*
* All of them have "memory" clobber (compiler barrier) which is needed to
- * ensure that LD/ST requiring irq safetly (R-M-W when LLSC is not available)
+ * ensure that LD/ST requiring irq safety (R-M-W when LLSC is not available)
* are redone after IRQs are re-enabled (and gcc doesn't reuse stale register)
*
* Noted at the time of Abilis Timer List corruption
diff --git a/arch/arc/include/asm/mmu-arcv2.h b/arch/arc/include/asm/mmu-arcv2.h
index ed9036d4ede3..d85dc0721907 100644
--- a/arch/arc/include/asm/mmu-arcv2.h
+++ b/arch/arc/include/asm/mmu-arcv2.h
@@ -9,6 +9,8 @@
#ifndef _ASM_ARC_MMU_ARCV2_H
#define _ASM_ARC_MMU_ARCV2_H
+#include <soc/arc/aux.h>
+
/*
* TLB Management regs
*/
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
index dda471f5f05b..9963bb1a5733 100644
--- a/arch/arc/include/asm/mmu_context.h
+++ b/arch/arc/include/asm/mmu_context.h
@@ -165,7 +165,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* for retiring-mm. However destroy_context( ) still needs to do that because
* between mm_release( ) = >deactive_mm( ) and
* mmput => .. => __mmdrop( ) => destroy_context( )
- * there is a good chance that task gets sched-out/in, making it's ASID valid
+ * there is a good chance that task gets sched-out/in, making its ASID valid
* again (this teased me for a whole day).
*/
diff --git a/arch/arc/include/asm/pgtable-bits-arcv2.h b/arch/arc/include/asm/pgtable-bits-arcv2.h
index f3eea3f30b2e..8ebec1b21d24 100644
--- a/arch/arc/include/asm/pgtable-bits-arcv2.h
+++ b/arch/arc/include/asm/pgtable-bits-arcv2.h
@@ -66,7 +66,7 @@
* Other rules which cause the divergence from 1:1 mapping
*
* 1. Although ARC700 can do exclusive execute/write protection (meaning R
- * can be tracked independet of X/W unlike some other CPUs), still to
+ * can be tracked independently of X/W unlike some other CPUs), still to
* keep things consistent with other archs:
* -Write implies Read: W => R
* -Execute implies Read: X => R
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 00b9318e551e..cf79df0b2570 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -169,7 +169,7 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
return *(unsigned long *)((unsigned long)regs + offset);
}
-extern int syscall_trace_entry(struct pt_regs *);
+extern int syscall_trace_enter(struct pt_regs *);
extern void syscall_trace_exit(struct pt_regs *);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arc/include/asm/shmparam.h b/arch/arc/include/asm/shmparam.h
index 8b0251464ffd..719112af0f41 100644
--- a/arch/arc/include/asm/shmparam.h
+++ b/arch/arc/include/asm/shmparam.h
@@ -6,7 +6,7 @@
#ifndef __ARC_ASM_SHMPARAM_H
#define __ARC_ASM_SHMPARAM_H
-/* Handle upto 2 cache bins */
+/* Handle up to 2 cache bins */
#define SHMLBA (2 * PAGE_SIZE)
/* Enforce SHMLBA in shmat */
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index e0913f52c2cd..990f834909f0 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -77,7 +77,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
/*
* ARC700 doesn't support atomic Read-Modify-Write ops.
- * Originally Interrupts had to be disabled around code to gaurantee atomicity.
+ * Originally Interrupts had to be disabled around code to guarantee atomicity.
* The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops
* based on retry-if-irq-in-atomic (with hardware assist).
* However despite these, we provide the IRQ disabling variant
@@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
* support needed.
*
* (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
- * gaurantted by the platform (not something which core handles).
+ * guaranteed by the platform (not something which core handles).
* Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
* disabling for atomicity.
*
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
index 4c530cf131f3..12daaf3a61ea 100644
--- a/arch/arc/include/asm/thread_info.h
+++ b/arch/arc/include/asm/thread_info.h
@@ -38,7 +38,7 @@
struct thread_info {
unsigned long flags; /* low level flags */
unsigned long ksp; /* kernel mode stack top in __switch_to */
- int preempt_count; /* 0 => preemptable, <0 => BUG */
+ int preempt_count; /* 0 => preemptible, <0 => BUG */
int cpu; /* current CPU */
unsigned long thr_ptr; /* TLS ptr */
struct task_struct *task; /* main task structure */
diff --git a/arch/arc/include/uapi/asm/swab.h b/arch/arc/include/uapi/asm/swab.h
index 02109cd48ee1..8d1f1ef44ba7 100644
--- a/arch/arc/include/uapi/asm/swab.h
+++ b/arch/arc/include/uapi/asm/swab.h
@@ -62,7 +62,7 @@
* 8051fdc4: st r2,[r1,20] ; Mem op : save result back to mem
*
* Joern suggested a better "C" algorithm which is great since
- * (1) It is portable to any architecure
+ * (1) It is portable to any architecture
* (2) At the same time it takes advantage of ARC ISA (rotate intrns)
*/
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index 2e49c81c8086..e238b5fd3c8c 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -5,7 +5,7 @@
* Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
*/
-#include <linux/linkage.h> /* ARC_{EXTRY,EXIT} */
+#include <linux/linkage.h> /* ARC_{ENTRY,EXIT} */
#include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,TRAP...} */
#include <asm/errno.h>
#include <asm/arcregs.h>
@@ -31,7 +31,7 @@ VECTOR res_service ; Reset Vector
VECTOR mem_service ; Mem exception
VECTOR instr_service ; Instrn Error
VECTOR EV_MachineCheck ; Fatal Machine check
-VECTOR EV_TLBMissI ; Intruction TLB miss
+VECTOR EV_TLBMissI ; Instruction TLB miss
VECTOR EV_TLBMissD ; Data TLB miss
VECTOR EV_TLBProtV ; Protection Violation
VECTOR EV_PrivilegeV ; Privilege Violation
@@ -76,11 +76,11 @@ ENTRY(handle_interrupt)
# query in hard ISR path would return false (since .IE is set) which would
# trips genirq interrupt handling asserts.
#
- # So do a "soft" disable of interrutps here.
+ # So do a "soft" disable of interrupts here.
#
# Note this disable is only for consistent book-keeping as further interrupts
# will be disabled anyways even w/o this. Hardware tracks active interrupts
- # seperately in AUX_IRQ_ACT.active and will not take new interrupts
+ # separately in AUX_IRQ_ACT.active and will not take new interrupts
# unless this one returns (or higher prio becomes pending in 2-prio scheme)
IRQ_DISABLE
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index 089f6680518f..3c7e74aba679 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -95,7 +95,7 @@ ENTRY(EV_MachineCheck)
lr r0, [efa]
mov r1, sp
- ; MC excpetions disable MMU
+ ; MC exceptions disable MMU
ARC_MMU_REENABLE r3
lsr r3, r10, 8
@@ -209,7 +209,7 @@ trap_with_param:
; ---------------------------------------------
; syscall TRAP
-; ABI: (r0-r7) upto 8 args, (r8) syscall number
+; ABI: (r0-r7) up to 8 args, (r8) syscall number
; ---------------------------------------------
ENTRY(EV_Trap)
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 9152782444b5..8d541f53fae3 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -165,7 +165,7 @@ ENTRY(first_lines_of_secondary)
; setup stack (fp, sp)
mov fp, 0
- ; set it's stack base to tsk->thread_info bottom
+ ; set its stack base to tsk->thread_info bottom
GET_TSK_STACK_BASE r0, sp
j start_kernel_secondary
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 678898757e47..f324f0e3341a 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -56,7 +56,7 @@ void arc_init_IRQ(void)
WRITE_AUX(AUX_IRQ_CTRL, ictrl);
/*
- * ARCv2 core intc provides multiple interrupt priorities (upto 16).
+ * ARCv2 core intc provides multiple interrupt priorities (up to 16).
* Typical builds though have only two levels (0-high, 1-low)
* Linux by default uses lower prio 1 for most irqs, reserving 0 for
* NMI style interrupts in future (say perf)
diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
index e71d64119d71..f8e2960832d9 100644
--- a/arch/arc/kernel/kprobes.c
+++ b/arch/arc/kernel/kprobes.c
@@ -190,7 +190,8 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs)
}
}
-int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
+static int
+__kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
{
struct kprobe *p;
struct kprobe_ctlblk *kcb;
@@ -241,8 +242,8 @@ int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
return 0;
}
-static int __kprobes arc_post_kprobe_handler(unsigned long addr,
- struct pt_regs *regs)
+static int
+__kprobes arc_post_kprobe_handler(unsigned long addr, struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index adff957962da..6e5a651cd75c 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -38,7 +38,7 @@
* (based on a specific RTL build)
* Below is the static map between perf generic/arc specific event_id and
* h/w condition names.
- * At the time of probe, we loop thru each index and find it's name to
+ * At the time of probe, we loop thru each index and find its name to
* complete the mapping of perf event_id to h/w index as latter is needed
* to program the counter really
*/
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index d08a5092c2b4..7b6a9beba9db 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -390,7 +390,7 @@ static void arc_chk_core_config(struct cpuinfo_arc *info)
#ifdef CONFIG_ARC_HAS_DCCM
/*
* DCCM can be arbit placed in hardware.
- * Make sure it's placement/sz matches what Linux is built with
+ * Make sure its placement/sz matches what Linux is built with
*/
if ((unsigned int)__arc_dccm_base != info->dccm.base)
panic("Linux built with incorrect DCCM Base address\n");
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index 8f6f4a542964..fefa705a8638 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -8,15 +8,16 @@
*
* vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK)
* -do_signal() supports TIF_RESTORE_SIGMASK
- * -do_signal() no loner needs oldset, required by OLD sys_sigsuspend
- * -sys_rt_sigsuspend() now comes from generic code, so discard arch implemen
+ * -do_signal() no longer needs oldset, required by OLD sys_sigsuspend
+ * -sys_rt_sigsuspend() now comes from generic code, so discard arch
+ * implementation
* -sys_sigsuspend() no longer needs to fudge ptregs, hence that arg removed
* -sys_sigsuspend() no longer loops for do_signal(), sets TIF_xxx and leaves
* the job to do_signal()
*
* vineetg: July 2009
* -Modified Code to support the uClibc provided userland sigreturn stub
- * to avoid kernel synthesing it on user stack at runtime, costing TLB
+ * to avoid kernel synthesizing it on user stack at runtime, costing TLB
* probes and Cache line flushes.
*
* vineetg: July 2009
diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
index 9b9570b79362..a19751e824fb 100644
--- a/arch/arc/kernel/traps.c
+++ b/arch/arc/kernel/traps.c
@@ -89,7 +89,7 @@ int do_misaligned_access(unsigned long address, struct pt_regs *regs,
/*
* Entry point for miscll errors such as Nested Exceptions
- * -Duplicate TLB entry is handled seperately though
+ * -Duplicate TLB entry is handled separately though
*/
void do_machine_check_fault(unsigned long address, struct pt_regs *regs)
{
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
index 549c3f407918..61a1b2b96e1d 100644
--- a/arch/arc/kernel/vmlinux.lds.S
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -41,8 +41,8 @@ SECTIONS
#endif
/*
- * The reason for having a seperate subsection .init.ramfs is to
- * prevent objump from including it in kernel dumps
+ * The reason for having a separate subsection .init.ramfs is to
+ * prevent objdump from including it in kernel dumps
*
* Reason for having .init.ramfs above .init is to make sure that the
* binary blob is tucked away to one side, reducing the displacement
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 197707bc7658..6b85e94f3275 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -90,8 +90,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
/*
* Plug in direct dma map ops.
*/
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- bool coherent)
+void arch_setup_dma_ops(struct device *dev, bool coherent)
{
/*
* IOC hardware snoops all DMA traffic keeping the caches consistent
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
index 3c1c7ae73292..69a915297155 100644
--- a/arch/arc/mm/mmap.c
+++ b/arch/arc/mm/mmap.c
@@ -27,7 +27,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
/*
* We enforce the MAP_FIXED case.
@@ -51,11 +51,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
return addr;
}
- info.flags = 0;
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
- info.align_mask = 0;
info.align_offset = pgoff << PAGE_SHIFT;
return vm_unmapped_area(&info);
}
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index ad702b49aeb3..cae4a7aae0ed 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -212,7 +212,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long flags;
/* If range @start to @end is more than 32 TLB entries deep,
- * its better to move to a new ASID rather than searching for
+ * it's better to move to a new ASID rather than searching for
* individual entries and then shooting them down
*
* The calc above is rough, doesn't account for unaligned parts,
@@ -408,7 +408,7 @@ static void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *p
* -More importantly it makes this handler inconsistent with fast-path
* TLB Refill handler which always deals with "current"
*
- * Lets see the use cases when current->mm != vma->mm and we land here
+ * Let's see the use cases when current->mm != vma->mm and we land here
* 1. execve->copy_strings()->__get_user_pages->handle_mm_fault
* Here VM wants to pre-install a TLB entry for user stack while
* current->mm still points to pre-execve mm (hence the condition).
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index e054780a8fe0..dc65e87a531f 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -5,19 +5,19 @@
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* Vineetg: April 2011 :
- * -MMU v1: moved out legacy code into a seperate file
+ * -MMU v1: moved out legacy code into a separate file
* -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
* helps avoid a shift when preparing PD0 from PTE
*
* Vineetg: July 2009
- * -For MMU V2, we need not do heuristics at the time of commiting a D-TLB
- * entry, so that it doesn't knock out it's I-TLB entry
+ * -For MMU V2, we need not do heuristics at the time of committing a D-TLB
+ * entry, so that it doesn't knock out its I-TLB entry
* -Some more fine tuning:
* bmsk instead of add, asl.cc instead of branch, delay slot utilise etc
*
* Vineetg: July 2009
* -Practically rewrote the I/D TLB Miss handlers
- * Now 40 and 135 instructions a peice as compared to 131 and 449 resp.
+ * Now 40 and 135 instructions apiece as compared to 131 and 449 resp.
* Hence Leaner by 1.5 K
* Used Conditional arithmetic to replace excessive branching
* Also used short instructions wherever possible
diff --git a/arch/arc/net/Makefile b/arch/arc/net/Makefile
new file mode 100644
index 000000000000..ea5790952e9a
--- /dev/null
+++ b/arch/arc/net/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ifeq ($(CONFIG_ISA_ARCV2),y)
+ obj-$(CONFIG_BPF_JIT) += bpf_jit_core.o
+ obj-$(CONFIG_BPF_JIT) += bpf_jit_arcv2.o
+endif
diff --git a/arch/arc/net/bpf_jit.h b/arch/arc/net/bpf_jit.h
new file mode 100644
index 000000000000..ec44873c42d1
--- /dev/null
+++ b/arch/arc/net/bpf_jit.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The interface that a back-end should provide to bpf_jit_core.c.
+ *
+ * Copyright (c) 2024 Synopsys Inc.
+ * Author: Shahab Vahedi <shahab@synopsys.com>
+ */
+
+#ifndef _ARC_BPF_JIT_H
+#define _ARC_BPF_JIT_H
+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+
+/* Print debug info and assert. */
+//#define ARC_BPF_JIT_DEBUG
+
+/* Determine the address type of the target. */
+#ifdef CONFIG_ISA_ARCV2
+#define ARC_ADDR u32
+#endif
+
+/*
+ * For the translation of some BPF instructions, a temporary register
+ * might be needed for some interim data.
+ */
+#define JIT_REG_TMP MAX_BPF_JIT_REG
+
+/*
+ * Buffer access: If buffer "b" is not NULL, advance by "n" bytes.
+ *
+ * This macro must be used in any place that potentially requires a
+ * "buf + len". This way, we make sure that the "buf" argument for
+ * the underlying "arc_*(buf, ...)" ends up as NULL instead of something
+ * like "0+4" or "0+8", etc. Those "arc_*()" functions check their "buf"
+ * value to decide if instructions should be emitted or not.
+ */
+#define BUF(b, n) (((b) != NULL) ? ((b) + (n)) : (b))
+
+/************** Functions that the back-end must provide **************/
+/* Extension for 32-bit operations. */
+inline u8 zext(u8 *buf, u8 rd);
+/***** Moves *****/
+u8 mov_r32(u8 *buf, u8 rd, u8 rs, u8 sign_ext);
+u8 mov_r32_i32(u8 *buf, u8 reg, s32 imm);
+u8 mov_r64(u8 *buf, u8 rd, u8 rs, u8 sign_ext);
+u8 mov_r64_i32(u8 *buf, u8 reg, s32 imm);
+u8 mov_r64_i64(u8 *buf, u8 reg, u32 lo, u32 hi);
+/***** Loads and stores *****/
+u8 load_r(u8 *buf, u8 rd, u8 rs, s16 off, u8 size, bool sign_ext);
+u8 store_r(u8 *buf, u8 rd, u8 rs, s16 off, u8 size);
+u8 store_i(u8 *buf, s32 imm, u8 rd, s16 off, u8 size);
+/***** Addition *****/
+u8 add_r32(u8 *buf, u8 rd, u8 rs);
+u8 add_r32_i32(u8 *buf, u8 rd, s32 imm);
+u8 add_r64(u8 *buf, u8 rd, u8 rs);
+u8 add_r64_i32(u8 *buf, u8 rd, s32 imm);
+/***** Subtraction *****/
+u8 sub_r32(u8 *buf, u8 rd, u8 rs);
+u8 sub_r32_i32(u8 *buf, u8 rd, s32 imm);
+u8 sub_r64(u8 *buf, u8 rd, u8 rs);
+u8 sub_r64_i32(u8 *buf, u8 rd, s32 imm);
+/***** Multiplication *****/
+u8 mul_r32(u8 *buf, u8 rd, u8 rs);
+u8 mul_r32_i32(u8 *buf, u8 rd, s32 imm);
+u8 mul_r64(u8 *buf, u8 rd, u8 rs);
+u8 mul_r64_i32(u8 *buf, u8 rd, s32 imm);
+/***** Division *****/
+u8 div_r32(u8 *buf, u8 rd, u8 rs, bool sign_ext);
+u8 div_r32_i32(u8 *buf, u8 rd, s32 imm, bool sign_ext);
+/***** Remainder *****/
+u8 mod_r32(u8 *buf, u8 rd, u8 rs, bool sign_ext);
+u8 mod_r32_i32(u8 *buf, u8 rd, s32 imm, bool sign_ext);
+/***** Bitwise AND *****/
+u8 and_r32(u8 *buf, u8 rd, u8 rs);
+u8 and_r32_i32(u8 *buf, u8 rd, s32 imm);
+u8 and_r64(u8 *buf, u8 rd, u8 rs);
+u8 and_r64_i32(u8 *buf, u8 rd, s32 imm);
+/***** Bitwise OR *****/
+u8 or_r32(u8 *buf, u8 rd, u8 rs);
+u8 or_r32_i32(u8 *buf, u8 rd, s32 imm);
+u8 or_r64(u8 *buf, u8 rd, u8 rs);
+u8 or_r64_i32(u8 *buf, u8 rd, s32 imm);
+/***** Bitwise XOR *****/
+u8 xor_r32(u8 *buf, u8 rd, u8 rs);
+u8 xor_r32_i32(u8 *buf, u8 rd, s32 imm);
+u8 xor_r64(u8 *buf, u8 rd, u8 rs);
+u8 xor_r64_i32(u8 *buf, u8 rd, s32 imm);
+/***** Bitwise Negate *****/
+u8 neg_r32(u8 *buf, u8 r);
+u8 neg_r64(u8 *buf, u8 r);
+/***** Bitwise left shift *****/
+u8 lsh_r32(u8 *buf, u8 rd, u8 rs);
+u8 lsh_r32_i32(u8 *buf, u8 rd, u8 imm);
+u8 lsh_r64(u8 *buf, u8 rd, u8 rs);
+u8 lsh_r64_i32(u8 *buf, u8 rd, s32 imm);
+/***** Bitwise right shift (logical) *****/
+u8 rsh_r32(u8 *buf, u8 rd, u8 rs);
+u8 rsh_r32_i32(u8 *buf, u8 rd, u8 imm);
+u8 rsh_r64(u8 *buf, u8 rd, u8 rs);
+u8 rsh_r64_i32(u8 *buf, u8 rd, s32 imm);
+/***** Bitwise right shift (arithmetic) *****/
+u8 arsh_r32(u8 *buf, u8 rd, u8 rs);
+u8 arsh_r32_i32(u8 *buf, u8 rd, u8 imm);
+u8 arsh_r64(u8 *buf, u8 rd, u8 rs);
+u8 arsh_r64_i32(u8 *buf, u8 rd, s32 imm);
+/***** Frame related *****/
+u32 mask_for_used_regs(u8 bpf_reg, bool is_call);
+u8 arc_prologue(u8 *buf, u32 usage, u16 frame_size);
+u8 arc_epilogue(u8 *buf, u32 usage, u16 frame_size);
+/***** Jumps *****/
+/*
+ * Different sorts of conditions (ARC enum as opposed to BPF_*).
+ *
+ * Do not change the order of enums here. ARC_CC_SLE+1 is used
+ * to determine the number of JCCs.
+ */
+enum ARC_CC {
+ ARC_CC_UGT = 0, /* unsigned > */
+ ARC_CC_UGE, /* unsigned >= */
+ ARC_CC_ULT, /* unsigned < */
+ ARC_CC_ULE, /* unsigned <= */
+ ARC_CC_SGT, /* signed > */
+ ARC_CC_SGE, /* signed >= */
+ ARC_CC_SLT, /* signed < */
+ ARC_CC_SLE, /* signed <= */
+ ARC_CC_AL, /* always */
+ ARC_CC_EQ, /* == */
+ ARC_CC_NE, /* != */
+ ARC_CC_SET, /* test */
+ ARC_CC_LAST
+};
+
+/*
+ * A few notes:
+ *
+ * - check_jmp_*() are prerequisites before calling the gen_jmp_*().
+ * They return "true" if the jump is possible and "false" otherwise.
+ *
+ * - The notion of "*_off" is to emphasize that these parameters are
+ * merely offsets in the JIT stream and not absolute addresses. One
+ * can look at them as addresses if the JIT code would start from
+ * address 0x0000_0000. Nonetheless, since the buffer address for the
+ * JIT is on a word-aligned address, this works and actually makes
+ * things simpler (offsets are in the range of u32 which is more than
+ * enough).
+ */
+bool check_jmp_32(u32 curr_off, u32 targ_off, u8 cond);
+bool check_jmp_64(u32 curr_off, u32 targ_off, u8 cond);
+u8 gen_jmp_32(u8 *buf, u8 rd, u8 rs, u8 cond, u32 c_off, u32 t_off);
+u8 gen_jmp_64(u8 *buf, u8 rd, u8 rs, u8 cond, u32 c_off, u32 t_off);
+/***** Miscellaneous *****/
+u8 gen_func_call(u8 *buf, ARC_ADDR func_addr, bool external_func);
+u8 arc_to_bpf_return(u8 *buf);
+/*
+ * - Perform byte swaps on "rd" based on the "size".
+ * - If "force" is set, do it unconditionally. Otherwise, consider the
+ * desired "endian"ness and the host endianness.
+ * - For data "size"s up to 32 bits, perform a zero-extension if asked
+ * by the "do_zext" boolean.
+ */
+u8 gen_swap(u8 *buf, u8 rd, u8 size, u8 endian, bool force, bool do_zext);
+
+#endif /* _ARC_BPF_JIT_H */
diff --git a/arch/arc/net/bpf_jit_arcv2.c b/arch/arc/net/bpf_jit_arcv2.c
new file mode 100644
index 000000000000..31bfb6e9ce00
--- /dev/null
+++ b/arch/arc/net/bpf_jit_arcv2.c
@@ -0,0 +1,3005 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The ARCv2 backend of Just-In-Time compiler for eBPF bytecode.
+ *
+ * Copyright (c) 2024 Synopsys Inc.
+ * Author: Shahab Vahedi <shahab@synopsys.com>
+ */
+#include <linux/bug.h>
+#include "bpf_jit.h"
+
+/* ARC core registers. */
+enum {
+ ARC_R_0, ARC_R_1, ARC_R_2, ARC_R_3, ARC_R_4, ARC_R_5,
+ ARC_R_6, ARC_R_7, ARC_R_8, ARC_R_9, ARC_R_10, ARC_R_11,
+ ARC_R_12, ARC_R_13, ARC_R_14, ARC_R_15, ARC_R_16, ARC_R_17,
+ ARC_R_18, ARC_R_19, ARC_R_20, ARC_R_21, ARC_R_22, ARC_R_23,
+ ARC_R_24, ARC_R_25, ARC_R_26, ARC_R_FP, ARC_R_SP, ARC_R_ILINK,
+ ARC_R_30, ARC_R_BLINK,
+ /*
+ * Having ARC_R_IMM encoded as source register means there is an
+ * immediate that must be interpreted from the next 4 bytes. If
+ * encoded as the destination register though, it implies that the
+ * output of the operation is not assigned to any register. The
+ * latter is helpful if we only care about updating the CPU status
+ * flags.
+ */
+ ARC_R_IMM = 62
+};
+
+/*
+ * Remarks about the rationale behind the chosen mapping:
+ *
+ * - BPF_REG_{1,2,3,4} are the argument registers and must be mapped to
+ * argument registers in ARCv2 ABI: r0-r7. The r7 registers is the last
+ * argument register in the ABI. Therefore BPF_REG_5, as the fifth
+ * argument, must be pushed onto the stack. This is a must for calling
+ * in-kernel functions.
+ *
+ * - In ARCv2 ABI, the return value is in r0 for 32-bit results and (r1,r0)
+ * for 64-bit results. However, because they're already used for BPF_REG_1,
+ * the next available scratch registers, r8 and r9, are the best candidates
+ * for BPF_REG_0. After a "call" to a(n) (in-kernel) function, the result
+ * is "mov"ed to these registers. At a BPF_EXIT, their value is "mov"ed to
+ * (r1,r0).
+ * It is worth mentioning that scratch registers are the best choice for
+ * BPF_REG_0, because it is very popular in BPF instruction encoding.
+ *
+ * - JIT_REG_TMP is an artifact needed to translate some BPF instructions.
+ * Its life span is one single BPF instruction. Since during the
+ * analyze_reg_usage(), it is not known if temporary registers are used,
+ * it is mapped to ARC's scratch registers: r10 and r11. Therefore, they
+ * don't matter in analysing phase and don't need saving. This temporary
+ * register is added as yet another index in the bpf2arc array, so it will
+ * unfold like the rest of registers during the code generation process.
+ *
+ * - Mapping of callee-saved BPF registers, BPF_REG_{6,7,8,9}, starts from
+ * (r15,r14) register pair. The (r13,r12) is not a good choice, because
+ * in ARCv2 ABI, r12 is not a callee-saved register and this can cause
+ * problem when calling an in-kernel function. Theoretically, the mapping
+ * could start from (r14,r13), but it is not a conventional ARCv2 register
+ * pair. To have a future proof design, I opted for this arrangement.
+ * If/when we decide to add ARCv2 instructions that do use register pairs,
+ * the mapping, hopefully, doesn't need to be revisited.
+ */
+const u8 bpf2arc[][2] = {
+ /* Return value from in-kernel function, and exit value from eBPF */
+ [BPF_REG_0] = {ARC_R_8, ARC_R_9},
+ /* Arguments from eBPF program to in-kernel function */
+ [BPF_REG_1] = {ARC_R_0, ARC_R_1},
+ [BPF_REG_2] = {ARC_R_2, ARC_R_3},
+ [BPF_REG_3] = {ARC_R_4, ARC_R_5},
+ [BPF_REG_4] = {ARC_R_6, ARC_R_7},
+ /* Remaining arguments, to be passed on the stack per 32-bit ABI */
+ [BPF_REG_5] = {ARC_R_22, ARC_R_23},
+ /* Callee-saved registers that in-kernel function will preserve */
+ [BPF_REG_6] = {ARC_R_14, ARC_R_15},
+ [BPF_REG_7] = {ARC_R_16, ARC_R_17},
+ [BPF_REG_8] = {ARC_R_18, ARC_R_19},
+ [BPF_REG_9] = {ARC_R_20, ARC_R_21},
+ /* Read-only frame pointer to access the eBPF stack. 32-bit only. */
+ [BPF_REG_FP] = {ARC_R_FP, },
+ /* Register for blinding constants */
+ [BPF_REG_AX] = {ARC_R_24, ARC_R_25},
+ /* Temporary registers for internal use */
+ [JIT_REG_TMP] = {ARC_R_10, ARC_R_11}
+};
+
+#define ARC_CALLEE_SAVED_REG_FIRST ARC_R_13
+#define ARC_CALLEE_SAVED_REG_LAST ARC_R_25
+
+#define REG_LO(r) (bpf2arc[(r)][0])
+#define REG_HI(r) (bpf2arc[(r)][1])
+
+/*
+ * To comply with ARCv2 ABI, BPF's arg5 must be put on stack. After which,
+ * the stack needs to be restored by ARG5_SIZE.
+ */
+#define ARG5_SIZE 8
+
+/* Instruction lengths in bytes. */
+enum {
+ INSN_len_normal = 4, /* Normal instructions length. */
+ INSN_len_imm = 4 /* Length of an extra 32-bit immediate. */
+};
+
+/* ZZ defines the size of operation in encodings that it is used. */
+enum {
+ ZZ_1_byte = 1,
+ ZZ_2_byte = 2,
+ ZZ_4_byte = 0,
+ ZZ_8_byte = 3
+};
+
+/*
+ * AA is mostly about address write back mode. It determines if the
+ * address in question should be updated before usage or after:
+ * addr += offset; data = *addr;
+ * data = *addr; addr += offset;
+ *
+ * In "scaling" mode, the effective address will become the sum
+ * of "address" + "index"*"size". The "size" is specified by the
+ * "ZZ" field. There is no write back when AA is set for scaling:
+ * data = *(addr + offset<<zz)
+ */
+enum {
+ AA_none = 0,
+ AA_pre = 1, /* in assembly known as "a/aw". */
+ AA_post = 2, /* in assembly known as "ab". */
+ AA_scale = 3 /* in assembly known as "as". */
+};
+
+/* X flag determines the mode of extension. */
+enum {
+ X_zero = 0,
+ X_sign = 1
+};
+
+/* Condition codes. */
+enum {
+ CC_always = 0, /* condition is true all the time */
+ CC_equal = 1, /* if status32.z flag is set */
+ CC_unequal = 2, /* if status32.z flag is clear */
+ CC_positive = 3, /* if status32.n flag is clear */
+ CC_negative = 4, /* if status32.n flag is set */
+ CC_less_u = 5, /* less than (unsigned) */
+ CC_less_eq_u = 14, /* less than or equal (unsigned) */
+ CC_great_eq_u = 6, /* greater than or equal (unsigned) */
+ CC_great_u = 13, /* greater than (unsigned) */
+ CC_less_s = 11, /* less than (signed) */
+ CC_less_eq_s = 12, /* less than or equal (signed) */
+ CC_great_eq_s = 10, /* greater than or equal (signed) */
+ CC_great_s = 9 /* greater than (signed) */
+};
+
+#define IN_U6_RANGE(x) ((x) <= (0x40 - 1) && (x) >= 0)
+#define IN_S9_RANGE(x) ((x) <= (0x100 - 1) && (x) >= -0x100)
+#define IN_S12_RANGE(x) ((x) <= (0x800 - 1) && (x) >= -0x800)
+#define IN_S21_RANGE(x) ((x) <= (0x100000 - 1) && (x) >= -0x100000)
+#define IN_S25_RANGE(x) ((x) <= (0x1000000 - 1) && (x) >= -0x1000000)
+
+/* Operands in most of the encodings. */
+#define OP_A(x) ((x) & 0x03f)
+#define OP_B(x) ((((x) & 0x07) << 24) | (((x) & 0x38) << 9))
+#define OP_C(x) (((x) & 0x03f) << 6)
+#define OP_IMM (OP_C(ARC_R_IMM))
+#define COND(x) (OP_A((x) & 31))
+#define FLAG(x) (((x) & 1) << 15)
+
+/*
+ * The 4-byte encoding of "mov b,c":
+ *
+ * 0010_0bbb 0000_1010 0BBB_cccc cc00_0000
+ *
+ * b: BBBbbb destination register
+ * c: cccccc source register
+ */
+#define OPC_MOV 0x200a0000
+
+/*
+ * The 4-byte encoding of "mov b,s12" (used for moving small immediates):
+ *
+ * 0010_0bbb 1000_1010 0BBB_ssss ssSS_SSSS
+ *
+ * b: BBBbbb destination register
+ * s: SSSSSSssssss source immediate (signed)
+ */
+#define OPC_MOVI 0x208a0000
+#define MOVI_S12(x) ((((x) & 0xfc0) >> 6) | (((x) & 0x3f) << 6))
+
+/*
+ * The 4-byte encoding of "mov[.qq] b,u6", used for conditional
+ * moving of even smaller immediates:
+ *
+ * 0010_0bbb 1100_1010 0BBB_cccc cciq_qqqq
+ *
+ * qq: qqqqq condition code
+ * i: If set, c is considered a 6-bit immediate, else a reg.
+ *
+ * b: BBBbbb destination register
+ * c: cccccc source
+ */
+#define OPC_MOV_CC 0x20ca0000
+#define MOV_CC_I BIT(5)
+#define OPC_MOVU_CC (OPC_MOV_CC | MOV_CC_I)
+
+/*
+ * The 4-byte encoding of "sexb b,c" (8-bit sign extension):
+ *
+ * 0010_0bbb 0010_1111 0BBB_cccc cc00_0101
+ *
+ * b: BBBbbb destination register
+ * c: cccccc source register
+ */
+#define OPC_SEXB 0x202f0005
+
+/*
+ * The 4-byte encoding of "sexh b,c" (16-bit sign extension):
+ *
+ * 0010_0bbb 0010_1111 0BBB_cccc cc00_0110
+ *
+ * b: BBBbbb destination register
+ * c: cccccc source register
+ */
+#define OPC_SEXH 0x202f0006
+
+/*
+ * The 4-byte encoding of "ld[zz][.x][.aa] c,[b,s9]":
+ *
+ * 0001_0bbb ssss_ssss SBBB_0aaz zxcc_cccc
+ *
+ * zz: size mode
+ * aa: address write back mode
+ * x: extension mode
+ *
+ * s9: S_ssss_ssss 9-bit signed number
+ * b: BBBbbb source reg for address
+ * c: cccccc destination register
+ */
+#define OPC_LOAD 0x10000000
+#define LOAD_X(x) ((x) << 6)
+#define LOAD_ZZ(x) ((x) << 7)
+#define LOAD_AA(x) ((x) << 9)
+#define LOAD_S9(x) ((((x) & 0x0ff) << 16) | (((x) & 0x100) << 7))
+#define LOAD_C(x) ((x) & 0x03f)
+/* Unsigned and signed loads. */
+#define OPC_LDU (OPC_LOAD | LOAD_X(X_zero))
+#define OPC_LDS (OPC_LOAD | LOAD_X(X_sign))
+/* 32-bit load. */
+#define OPC_LD32 (OPC_LDU | LOAD_ZZ(ZZ_4_byte))
+/* "pop reg" is merely a "ld.ab reg,[sp,4]". */
+#define OPC_POP \
+ (OPC_LD32 | LOAD_AA(AA_post) | LOAD_S9(4) | OP_B(ARC_R_SP))
+
+/*
+ * The 4-byte encoding of "st[zz][.aa] c,[b,s9]":
+ *
+ * 0001_1bbb ssss_ssss SBBB_cccc cc0a_azz0
+ *
+ * zz: zz size mode
+ * aa: aa address write back mode
+ *
+ * s9: S_ssss_ssss 9-bit signed number
+ * b: BBBbbb source reg for address
+ * c: cccccc source reg to be stored
+ */
+#define OPC_STORE 0x18000000
+#define STORE_ZZ(x) ((x) << 1)
+#define STORE_AA(x) ((x) << 3)
+#define STORE_S9(x) ((((x) & 0x0ff) << 16) | (((x) & 0x100) << 7))
+/* 32-bit store. */
+#define OPC_ST32 (OPC_STORE | STORE_ZZ(ZZ_4_byte))
+/* "push reg" is merely a "st.aw reg,[sp,-4]". */
+#define OPC_PUSH \
+ (OPC_ST32 | STORE_AA(AA_pre) | STORE_S9(-4) | OP_B(ARC_R_SP))
+
+/*
+ * The 4-byte encoding of "add a,b,c":
+ *
+ * 0010_0bbb 0i00_0000 fBBB_cccc ccaa_aaaa
+ *
+ * f: indicates if flags (carry, etc.) should be updated
+ * i: If set, c is considered a 6-bit immediate, else a reg.
+ *
+ * a: aaaaaa result
+ * b: BBBbbb the 1st input operand
+ * c: cccccc the 2nd input operand
+ */
+#define OPC_ADD 0x20000000
+/* Addition with updating the pertinent flags in "status32" register. */
+#define OPC_ADDF (OPC_ADD | FLAG(1))
+#define ADDI BIT(22)
+#define ADDI_U6(x) OP_C(x)
+#define OPC_ADDI (OPC_ADD | ADDI)
+#define OPC_ADDIF (OPC_ADDI | FLAG(1))
+#define OPC_ADD_I (OPC_ADD | OP_IMM)
+
+/*
+ * The 4-byte encoding of "adc a,b,c" (addition with carry):
+ *
+ * 0010_0bbb 0i00_0001 0BBB_cccc ccaa_aaaa
+ *
+ * i: if set, c is considered a 6-bit immediate, else a reg.
+ *
+ * a: aaaaaa result
+ * b: BBBbbb the 1st input operand
+ * c: cccccc the 2nd input operand
+ */
+#define OPC_ADC 0x20010000
+#define ADCI BIT(22)
+#define ADCI_U6(x) OP_C(x)
+#define OPC_ADCI (OPC_ADC | ADCI)
+
+/*
+ * The 4-byte encoding of "sub a,b,c":
+ *
+ * 0010_0bbb 0i00_0010 fBBB_cccc ccaa_aaaa
+ *
+ * f: indicates if flags (carry, etc.) should be updated
+ * i: if set, c is considered a 6-bit immediate, else a reg.
+ *
+ * a: aaaaaa result
+ * b: BBBbbb the 1st input operand
+ * c: cccccc the 2nd input operand
+ */
+#define OPC_SUB 0x20020000
+/* Subtraction with updating the pertinent flags in "status32" register. */
+#define OPC_SUBF (OPC_SUB | FLAG(1))
+#define SUBI BIT(22)
+#define SUBI_U6(x) OP_C(x)
+#define OPC_SUBI (OPC_SUB | SUBI)
+#define OPC_SUB_I (OPC_SUB | OP_IMM)
+
+/*
+ * The 4-byte encoding of "sbc a,b,c" (subtraction with carry):
+ *
+ * 0010_0bbb 0000_0011 fBBB_cccc ccaa_aaaa
+ *
+ * f: indicates if flags (carry, etc.) should be updated
+ *
+ * a: aaaaaa result
+ * b: BBBbbb the 1st input operand
+ * c: cccccc the 2nd input operand
+ */
+#define OPC_SBC 0x20030000
+
+/*
+ * The 4-byte encoding of "cmp[.qq] b,c":
+ *
+ * 0010_0bbb 1100_1100 1BBB_cccc cc0q_qqqq
+ *
+ * qq: qqqqq condition code
+ *
+ * b: BBBbbb the 1st operand
+ * c: cccccc the 2nd operand
+ */
+#define OPC_CMP 0x20cc8000
+
+/*
+ * The 4-byte encoding of "neg a,b":
+ *
+ * 0010_0bbb 0100_1110 0BBB_0000 00aa_aaaa
+ *
+ * a: aaaaaa result
+ * b: BBBbbb input
+ */
+#define OPC_NEG 0x204e0000
+
+/*
+ * The 4-byte encoding of "mpy a,b,c".
+ * mpy is the signed 32-bit multiplication with the lower 32-bit
+ * of the product as the result.
+ *
+ * 0010_0bbb 0001_1010 0BBB_cccc ccaa_aaaa
+ *
+ * a: aaaaaa result
+ * b: BBBbbb the 1st input operand
+ * c: cccccc the 2nd input operand
+ */
+#define OPC_MPY 0x201a0000
+#define OPC_MPYI (OPC_MPY | OP_IMM)
+
+/*
+ * The 4-byte encoding of "mpydu a,b,c".
+ * mpydu is the unsigned 32-bit multiplication with the lower 32-bit of
+ * the product in register "a" and the higher 32-bit in register "a+1".
+ *
+ * 0010_1bbb 0001_1001 0BBB_cccc ccaa_aaaa
+ *
+ * a: aaaaaa 64-bit result in registers (R_a+1,R_a)
+ * b: BBBbbb the 1st input operand
+ * c: cccccc the 2nd input operand
+ */
+#define OPC_MPYDU 0x28190000
+#define OPC_MPYDUI (OPC_MPYDU | OP_IMM)
+
+/*
+ * The 4-byte encoding of "divu a,b,c" (unsigned division):
+ *
+ * 0010_1bbb 0000_0101 0BBB_cccc ccaa_aaaa
+ *
+ * a: aaaaaa result (quotient)
+ * b: BBBbbb the 1st input operand
+ * c: cccccc the 2nd input operand (divisor)
+ */
+#define OPC_DIVU 0x28050000
+#define OPC_DIVUI (OPC_DIVU | OP_IMM)
+
+/*
+ * The 4-byte encoding of "div a,b,c" (signed division):
+ *
+ * 0010_1bbb 0000_0100 0BBB_cccc ccaa_aaaa
+ *
+ * a: aaaaaa result (quotient)
+ * b: BBBbbb the 1st input operand
+ * c: cccccc the 2nd input operand (divisor)
+ */
+#define OPC_DIVS 0x28040000
+#define OPC_DIVSI (OPC_DIVS | OP_IMM)
+
+/*
+ * The 4-byte encoding of "remu a,b,c" (unsigned remainder):
+ *
+ * 0010_1bbb 0000_1001 0BBB_cccc ccaa_aaaa
+ *
+ * a: aaaaaa result (remainder)
+ * b: BBBbbb the 1st input operand
+ * c: cccccc the 2nd input operand (divisor)
+ */
+#define OPC_REMU 0x28090000
+#define OPC_REMUI (OPC_REMU | OP_IMM)
+
+/*
+ * The 4-byte encoding of "rem a,b,c" (signed remainder):
+ *
+ * 0010_1bbb 0000_1000 0BBB_cccc ccaa_aaaa
+ *
+ * a: aaaaaa result (remainder)
+ * b: BBBbbb the 1st input operand
+ * c: cccccc the 2nd input operand (divisor)
+ */
+#define OPC_REMS 0x28080000
+#define OPC_REMSI (OPC_REMS | OP_IMM)
+
+/*
+ * The 4-byte encoding of "and a,b,c":
+ *
+ * 0010_0bbb 0000_0100 fBBB_cccc ccaa_aaaa
+ *
+ * f: indicates if zero and negative flags should be updated
+ *
+ * a: aaaaaa result
+ * b: BBBbbb the 1st input operand
+ * c: cccccc the 2nd input operand
+ */
+#define OPC_AND 0x20040000
+#define OPC_ANDI (OPC_AND | OP_IMM)
+
+/*
+ * The 4-byte encoding of "tst[.qq] b,c".
+ * Checks if the two input operands have any bit set at the same
+ * position.
+ *
+ * 0010_0bbb 1100_1011 1BBB_cccc cc0q_qqqq
+ *
+ * qq: qqqqq condition code
+ *
+ * b: BBBbbb the 1st input operand
+ * c: cccccc the 2nd input operand
+ */
+#define OPC_TST 0x20cb8000
+
+/*
+ * The 4-byte encoding of "or a,b,c":
+ *
+ * 0010_0bbb 0000_0101 0BBB_cccc ccaa_aaaa
+ *
+ * a: aaaaaa result
+ * b: BBBbbb the 1st input operand
+ * c: cccccc the 2nd input operand
+ */
+#define OPC_OR 0x20050000
+#define OPC_ORI (OPC_OR | OP_IMM)
+
+/*
+ * The 4-byte encoding of "xor a,b,c":
+ *
+ * 0010_0bbb 0000_0111 0BBB_cccc ccaa_aaaa
+ *
+ * a: aaaaaa result
+ * b: BBBbbb the 1st input operand
+ * c: cccccc the 2nd input operand
+ */
+#define OPC_XOR 0x20070000
+#define OPC_XORI (OPC_XOR | OP_IMM)
+
+/*
+ * The 4-byte encoding of "not b,c":
+ *
+ * 0010_0bbb 0010_1111 0BBB_cccc cc00_1010
+ *
+ * b: BBBbbb result
+ * c: cccccc input
+ */
+#define OPC_NOT 0x202f000a
+
+/*
+ * The 4-byte encoding of "btst b,u6":
+ *
+ * 0010_0bbb 0101_0001 1BBB_uuuu uu00_0000
+ *
+ * b: BBBbbb input number to check
+ * u6: uuuuuu 6-bit unsigned number specifying bit position to check
+ */
+#define OPC_BTSTU6 0x20518000
+#define BTST_U6(x) (OP_C((x) & 63))
+
+/*
+ * The 4-byte encoding of "asl[.qq] b,b,c" (arithmetic shift left):
+ *
+ * 0010_1bbb 0i00_0000 0BBB_cccc ccaa_aaaa
+ *
+ * i: if set, c is considered a 5-bit immediate, else a reg.
+ *
+ * b: BBBbbb result and the first operand (number to be shifted)
+ * c: cccccc amount to be shifted
+ */
+#define OPC_ASL 0x28000000
+#define ASL_I BIT(22)
+#define ASLI_U6(x) OP_C((x) & 31)
+#define OPC_ASLI (OPC_ASL | ASL_I)
+
+/*
+ * The 4-byte encoding of "asr a,b,c" (arithmetic shift right):
+ *
+ * 0010_1bbb 0i00_0010 0BBB_cccc ccaa_aaaa
+ *
+ * i: if set, c is considered a 6-bit immediate, else a reg.
+ *
+ * a: aaaaaa result
+ * b: BBBbbb first input: number to be shifted
+ * c: cccccc second input: amount to be shifted
+ */
+#define OPC_ASR 0x28020000
+#define ASR_I ASL_I
+#define ASRI_U6(x) ASLI_U6(x)
+#define OPC_ASRI (OPC_ASR | ASR_I)
+
+/*
+ * The 4-byte encoding of "lsr a,b,c" (logical shift right):
+ *
+ * 0010_1bbb 0i00_0001 0BBB_cccc ccaa_aaaa
+ *
+ * i: if set, c is considered a 6-bit immediate, else a reg.
+ *
+ * a: aaaaaa result
+ * b: BBBbbb first input: number to be shifted
+ * c: cccccc second input: amount to be shifted
+ */
+#define OPC_LSR 0x28010000
+#define LSR_I ASL_I
+#define LSRI_U6(x) ASLI_U6(x)
+#define OPC_LSRI (OPC_LSR | LSR_I)
+
+/*
+ * The 4-byte encoding of "swape b,c":
+ *
+ * 0010_1bbb 0010_1111 0bbb_cccc cc00_1001
+ *
+ * b: BBBbbb destination register
+ * c: cccccc source register
+ */
+#define OPC_SWAPE 0x282f0009
+
+/*
+ * Encoding for jump to an address in register:
+ * j reg_c
+ *
+ * 0010_0000 1110_0000 0000_cccc cc00_0000
+ *
+ * c: cccccc register holding the destination address
+ */
+#define OPC_JMP 0x20e00000
+/* Jump to "branch-and-link" register, which effectively is a "return". */
+#define OPC_J_BLINK (OPC_JMP | OP_C(ARC_R_BLINK))
+
+/*
+ * Encoding for jump-and-link to an address in register:
+ * jl reg_c
+ *
+ * 0010_0000 0010_0010 0000_cccc cc00_0000
+ *
+ * c: cccccc register holding the destination address
+ */
+#define OPC_JL 0x20220000
+
+/*
+ * Encoding for (conditional) branch to an offset from the current location
+ * that is word aligned: (PC & 0xffff_fffc) + s21
+ * B[qq] s21
+ *
+ * 0000_0sss ssss_sss0 SSSS_SSSS SS0q_qqqq
+ *
+ * qq: qqqqq condition code
+ * s21: SSSS SSSS_SSss ssss_ssss The displacement (21-bit signed)
+ *
+ * The displacement is supposed to be 16-bit (2-byte) aligned. Therefore,
+ * it should be a multiple of 2. Hence, there is an implied '0' bit at its
+ * LSB: S_SSSS SSSS_Ssss ssss_sss0
+ */
+#define OPC_BCC 0x00000000
+#define BCC_S21(d) ((((d) & 0x7fe) << 16) | (((d) & 0x1ff800) >> 5))
+
+/*
+ * Encoding for unconditional branch to an offset from the current location
+ * that is word aligned: (PC & 0xffff_fffc) + s25
+ * B s25
+ *
+ * 0000_0sss ssss_sss1 SSSS_SSSS SS00_TTTT
+ *
+ * s25: TTTT SSSS SSSS_SSss ssss_ssss The displacement (25-bit signed)
+ *
+ * The displacement is supposed to be 16-bit (2-byte) aligned. Therefore,
+ * it should be a multiple of 2. Hence, there is an implied '0' bit at its
+ * LSB: T TTTS_SSSS SSSS_Ssss ssss_sss0
+ */
+#define OPC_B 0x00010000
+#define B_S25(d) ((((d) & 0x1e00000) >> 21) | BCC_S21(d))
+
+static inline void emit_2_bytes(u8 *buf, u16 bytes)
+{
+ *((u16 *)buf) = bytes;
+}
+
+static inline void emit_4_bytes(u8 *buf, u32 bytes)
+{
+ emit_2_bytes(buf, bytes >> 16);
+ emit_2_bytes(buf + 2, bytes & 0xffff);
+}
+
+static inline u8 bpf_to_arc_size(u8 size)
+{
+ switch (size) {
+ case BPF_B:
+ return ZZ_1_byte;
+ case BPF_H:
+ return ZZ_2_byte;
+ case BPF_W:
+ return ZZ_4_byte;
+ case BPF_DW:
+ return ZZ_8_byte;
+ default:
+ return ZZ_4_byte;
+ }
+}
+
+/************** Encoders (Deal with ARC regs) ************/
+
+/* Move an immediate to register with a 4-byte instruction. */
+static u8 arc_movi_r(u8 *buf, u8 reg, s16 imm)
+{
+ const u32 insn = OPC_MOVI | OP_B(reg) | MOVI_S12(imm);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* rd <- rs */
+static u8 arc_mov_r(u8 *buf, u8 rd, u8 rs)
+{
+ const u32 insn = OPC_MOV | OP_B(rd) | OP_C(rs);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* The emitted code may have different sizes based on "imm". */
+static u8 arc_mov_i(u8 *buf, u8 rd, s32 imm)
+{
+ const u32 insn = OPC_MOV | OP_B(rd) | OP_IMM;
+
+ if (IN_S12_RANGE(imm))
+ return arc_movi_r(buf, rd, imm);
+
+ if (buf) {
+ emit_4_bytes(buf, insn);
+ emit_4_bytes(buf + INSN_len_normal, imm);
+ }
+ return INSN_len_normal + INSN_len_imm;
+}
+
+/* The emitted code will always have the same size (8). */
+static u8 arc_mov_i_fixed(u8 *buf, u8 rd, s32 imm)
+{
+ const u32 insn = OPC_MOV | OP_B(rd) | OP_IMM;
+
+ if (buf) {
+ emit_4_bytes(buf, insn);
+ emit_4_bytes(buf + INSN_len_normal, imm);
+ }
+ return INSN_len_normal + INSN_len_imm;
+}
+
+/* Conditional move. */
+static u8 arc_mov_cc_r(u8 *buf, u8 cc, u8 rd, u8 rs)
+{
+ const u32 insn = OPC_MOV_CC | OP_B(rd) | OP_C(rs) | COND(cc);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* Conditional move of a small immediate to rd. */
+static u8 arc_movu_cc_r(u8 *buf, u8 cc, u8 rd, u8 imm)
+{
+ const u32 insn = OPC_MOVU_CC | OP_B(rd) | OP_C(imm) | COND(cc);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* Sign extension from a byte. */
+static u8 arc_sexb_r(u8 *buf, u8 rd, u8 rs)
+{
+ const u32 insn = OPC_SEXB | OP_B(rd) | OP_C(rs);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* Sign extension from two bytes. */
+static u8 arc_sexh_r(u8 *buf, u8 rd, u8 rs)
+{
+ const u32 insn = OPC_SEXH | OP_B(rd) | OP_C(rs);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* st reg, [reg_mem, off] */
+static u8 arc_st_r(u8 *buf, u8 reg, u8 reg_mem, s16 off, u8 zz)
+{
+ const u32 insn = OPC_STORE | STORE_ZZ(zz) | OP_C(reg) |
+ OP_B(reg_mem) | STORE_S9(off);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* st.aw reg, [sp, -4] */
+static u8 arc_push_r(u8 *buf, u8 reg)
+{
+ const u32 insn = OPC_PUSH | OP_C(reg);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* ld reg, [reg_mem, off] (unsigned) */
+static u8 arc_ld_r(u8 *buf, u8 reg, u8 reg_mem, s16 off, u8 zz)
+{
+ const u32 insn = OPC_LDU | LOAD_ZZ(zz) | LOAD_C(reg) |
+ OP_B(reg_mem) | LOAD_S9(off);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* ld.x reg, [reg_mem, off] (sign extend) */
+static u8 arc_ldx_r(u8 *buf, u8 reg, u8 reg_mem, s16 off, u8 zz)
+{
+ const u32 insn = OPC_LDS | LOAD_ZZ(zz) | LOAD_C(reg) |
+ OP_B(reg_mem) | LOAD_S9(off);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* ld.ab reg,[sp,4] */
+static u8 arc_pop_r(u8 *buf, u8 reg)
+{
+ const u32 insn = OPC_POP | LOAD_C(reg);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* add Ra,Ra,Rc */
+static u8 arc_add_r(u8 *buf, u8 ra, u8 rc)
+{
+ const u32 insn = OPC_ADD | OP_A(ra) | OP_B(ra) | OP_C(rc);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* add.f Ra,Ra,Rc */
+static u8 arc_addf_r(u8 *buf, u8 ra, u8 rc)
+{
+ const u32 insn = OPC_ADDF | OP_A(ra) | OP_B(ra) | OP_C(rc);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* add.f Ra,Ra,u6 */
+static u8 arc_addif_r(u8 *buf, u8 ra, u8 u6)
+{
+ const u32 insn = OPC_ADDIF | OP_A(ra) | OP_B(ra) | ADDI_U6(u6);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* add Ra,Ra,u6 */
+static u8 arc_addi_r(u8 *buf, u8 ra, u8 u6)
+{
+ const u32 insn = OPC_ADDI | OP_A(ra) | OP_B(ra) | ADDI_U6(u6);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* add Ra,Rb,imm */
+static u8 arc_add_i(u8 *buf, u8 ra, u8 rb, s32 imm)
+{
+ const u32 insn = OPC_ADD_I | OP_A(ra) | OP_B(rb);
+
+ if (buf) {
+ emit_4_bytes(buf, insn);
+ emit_4_bytes(buf + INSN_len_normal, imm);
+ }
+ return INSN_len_normal + INSN_len_imm;
+}
+
+/* adc Ra,Ra,Rc */
+static u8 arc_adc_r(u8 *buf, u8 ra, u8 rc)
+{
+ const u32 insn = OPC_ADC | OP_A(ra) | OP_B(ra) | OP_C(rc);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* adc Ra,Ra,u6 */
+static u8 arc_adci_r(u8 *buf, u8 ra, u8 u6)
+{
+ const u32 insn = OPC_ADCI | OP_A(ra) | OP_B(ra) | ADCI_U6(u6);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* sub Ra,Ra,Rc */
+static u8 arc_sub_r(u8 *buf, u8 ra, u8 rc)
+{
+ const u32 insn = OPC_SUB | OP_A(ra) | OP_B(ra) | OP_C(rc);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* sub.f Ra,Ra,Rc */
+static u8 arc_subf_r(u8 *buf, u8 ra, u8 rc)
+{
+ const u32 insn = OPC_SUBF | OP_A(ra) | OP_B(ra) | OP_C(rc);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* sub Ra,Ra,u6 */
+static u8 arc_subi_r(u8 *buf, u8 ra, u8 u6)
+{
+ const u32 insn = OPC_SUBI | OP_A(ra) | OP_B(ra) | SUBI_U6(u6);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* sub Ra,Ra,imm */
+static u8 arc_sub_i(u8 *buf, u8 ra, s32 imm)
+{
+ const u32 insn = OPC_SUB_I | OP_A(ra) | OP_B(ra);
+
+ if (buf) {
+ emit_4_bytes(buf, insn);
+ emit_4_bytes(buf + INSN_len_normal, imm);
+ }
+ return INSN_len_normal + INSN_len_imm;
+}
+
+/* sbc Ra,Ra,Rc */
+static u8 arc_sbc_r(u8 *buf, u8 ra, u8 rc)
+{
+ const u32 insn = OPC_SBC | OP_A(ra) | OP_B(ra) | OP_C(rc);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* cmp Rb,Rc */
+static u8 arc_cmp_r(u8 *buf, u8 rb, u8 rc)
+{
+ const u32 insn = OPC_CMP | OP_B(rb) | OP_C(rc);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/*
+ * cmp.z Rb,Rc
+ *
+ * This "cmp.z" variant of compare instruction is used on lower
+ * 32-bits of register pairs after "cmp"ing their upper parts. If the
+ * upper parts are equal (z), then this one will proceed to check the
+ * rest.
+ */
+static u8 arc_cmpz_r(u8 *buf, u8 rb, u8 rc)
+{
+ const u32 insn = OPC_CMP | OP_B(rb) | OP_C(rc) | CC_equal;
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* neg Ra,Rb */
+static u8 arc_neg_r(u8 *buf, u8 ra, u8 rb)
+{
+ const u32 insn = OPC_NEG | OP_A(ra) | OP_B(rb);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* mpy Ra,Rb,Rc */
+static u8 arc_mpy_r(u8 *buf, u8 ra, u8 rb, u8 rc)
+{
+ const u32 insn = OPC_MPY | OP_A(ra) | OP_B(rb) | OP_C(rc);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* mpy Ra,Rb,imm */
+static u8 arc_mpy_i(u8 *buf, u8 ra, u8 rb, s32 imm)
+{
+ const u32 insn = OPC_MPYI | OP_A(ra) | OP_B(rb);
+
+ if (buf) {
+ emit_4_bytes(buf, insn);
+ emit_4_bytes(buf + INSN_len_normal, imm);
+ }
+ return INSN_len_normal + INSN_len_imm;
+}
+
+/* mpydu Ra,Ra,Rc */
+static u8 arc_mpydu_r(u8 *buf, u8 ra, u8 rc)
+{
+ const u32 insn = OPC_MPYDU | OP_A(ra) | OP_B(ra) | OP_C(rc);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* mpydu Ra,Ra,imm */
+static u8 arc_mpydu_i(u8 *buf, u8 ra, s32 imm)
+{
+ const u32 insn = OPC_MPYDUI | OP_A(ra) | OP_B(ra);
+
+ if (buf) {
+ emit_4_bytes(buf, insn);
+ emit_4_bytes(buf + INSN_len_normal, imm);
+ }
+ return INSN_len_normal + INSN_len_imm;
+}
+
+/* divu Rd,Rd,Rs */
+static u8 arc_divu_r(u8 *buf, u8 rd, u8 rs)
+{
+ const u32 insn = OPC_DIVU | OP_A(rd) | OP_B(rd) | OP_C(rs);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* divu Rd,Rd,imm */
+static u8 arc_divu_i(u8 *buf, u8 rd, s32 imm)
+{
+ const u32 insn = OPC_DIVUI | OP_A(rd) | OP_B(rd);
+
+ if (buf) {
+ emit_4_bytes(buf, insn);
+ emit_4_bytes(buf + INSN_len_normal, imm);
+ }
+ return INSN_len_normal + INSN_len_imm;
+}
+
+/* div Rd,Rd,Rs */
+static u8 arc_divs_r(u8 *buf, u8 rd, u8 rs)
+{
+ const u32 insn = OPC_DIVS | OP_A(rd) | OP_B(rd) | OP_C(rs);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* div Rd,Rd,imm */
+static u8 arc_divs_i(u8 *buf, u8 rd, s32 imm)
+{
+ const u32 insn = OPC_DIVSI | OP_A(rd) | OP_B(rd);
+
+ if (buf) {
+ emit_4_bytes(buf, insn);
+ emit_4_bytes(buf + INSN_len_normal, imm);
+ }
+ return INSN_len_normal + INSN_len_imm;
+}
+
+/* remu Rd,Rd,Rs */
+static u8 arc_remu_r(u8 *buf, u8 rd, u8 rs)
+{
+ const u32 insn = OPC_REMU | OP_A(rd) | OP_B(rd) | OP_C(rs);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* remu Rd,Rd,imm */
+static u8 arc_remu_i(u8 *buf, u8 rd, s32 imm)
+{
+ const u32 insn = OPC_REMUI | OP_A(rd) | OP_B(rd);
+
+ if (buf) {
+ emit_4_bytes(buf, insn);
+ emit_4_bytes(buf + INSN_len_normal, imm);
+ }
+ return INSN_len_normal + INSN_len_imm;
+}
+
+/* rem Rd,Rd,Rs */
+static u8 arc_rems_r(u8 *buf, u8 rd, u8 rs)
+{
+ const u32 insn = OPC_REMS | OP_A(rd) | OP_B(rd) | OP_C(rs);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* rem Rd,Rd,imm */
+static u8 arc_rems_i(u8 *buf, u8 rd, s32 imm)
+{
+ const u32 insn = OPC_REMSI | OP_A(rd) | OP_B(rd);
+
+ if (buf) {
+ emit_4_bytes(buf, insn);
+ emit_4_bytes(buf + INSN_len_normal, imm);
+ }
+ return INSN_len_normal + INSN_len_imm;
+}
+
+/* and Rd,Rd,Rs */
+static u8 arc_and_r(u8 *buf, u8 rd, u8 rs)
+{
+ const u32 insn = OPC_AND | OP_A(rd) | OP_B(rd) | OP_C(rs);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/* and Rd,Rd,limm */
+static u8 arc_and_i(u8 *buf, u8 rd, s32 imm)
+{
+ const u32 insn = OPC_ANDI | OP_A(rd) | OP_B(rd);
+
+ if (buf) {
+ emit_4_bytes(buf, insn);
+ emit_4_bytes(buf + INSN_len_normal, imm);
+ }
+ return INSN_len_normal + INSN_len_imm;
+}
+
+/* tst Rd,Rs */
+static u8 arc_tst_r(u8 *buf, u8 rd, u8 rs)
+{
+ const u32 insn = OPC_TST | OP_B(rd) | OP_C(rs);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/*
+ * This particular version, "tst.z ...", is meant to be used after a
+ * "tst" on the low 32-bit of register pairs. If that "tst" is not
+ * zero, then we don't need to test the upper 32-bits lest it sets
+ * the zero flag.
+ */
+static u8 arc_tstz_r(u8 *buf, u8 rd, u8 rs)
+{
+ const u32 insn = OPC_TST | OP_B(rd) | OP_C(rs) | CC_equal;
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+static u8 arc_or_r(u8 *buf, u8 rd, u8 rs1, u8 rs2)
+{
+ const u32 insn = OPC_OR | OP_A(rd) | OP_B(rs1) | OP_C(rs2);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+static u8 arc_or_i(u8 *buf, u8 rd, s32 imm)
+{
+ const u32 insn = OPC_ORI | OP_A(rd) | OP_B(rd);
+
+ if (buf) {
+ emit_4_bytes(buf, insn);
+ emit_4_bytes(buf + INSN_len_normal, imm);
+ }
+ return INSN_len_normal + INSN_len_imm;
+}
+
+static u8 arc_xor_r(u8 *buf, u8 rd, u8 rs)
+{
+ const u32 insn = OPC_XOR | OP_A(rd) | OP_B(rd) | OP_C(rs);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+static u8 arc_xor_i(u8 *buf, u8 rd, s32 imm)
+{
+ const u32 insn = OPC_XORI | OP_A(rd) | OP_B(rd);
+
+ if (buf) {
+ emit_4_bytes(buf, insn);
+ emit_4_bytes(buf + INSN_len_normal, imm);
+ }
+ return INSN_len_normal + INSN_len_imm;
+}
+
+static u8 arc_not_r(u8 *buf, u8 rd, u8 rs)
+{
+ const u32 insn = OPC_NOT | OP_B(rd) | OP_C(rs);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+static u8 arc_btst_i(u8 *buf, u8 rs, u8 imm)
+{
+ const u32 insn = OPC_BTSTU6 | OP_B(rs) | BTST_U6(imm);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+static u8 arc_asl_r(u8 *buf, u8 rd, u8 rs1, u8 rs2)
+{
+ const u32 insn = OPC_ASL | OP_A(rd) | OP_B(rs1) | OP_C(rs2);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+static u8 arc_asli_r(u8 *buf, u8 rd, u8 rs, u8 imm)
+{
+ const u32 insn = OPC_ASLI | OP_A(rd) | OP_B(rs) | ASLI_U6(imm);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+static u8 arc_asr_r(u8 *buf, u8 rd, u8 rs1, u8 rs2)
+{
+ const u32 insn = OPC_ASR | OP_A(rd) | OP_B(rs1) | OP_C(rs2);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+static u8 arc_asri_r(u8 *buf, u8 rd, u8 rs, u8 imm)
+{
+ const u32 insn = OPC_ASRI | OP_A(rd) | OP_B(rs) | ASRI_U6(imm);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+static u8 arc_lsr_r(u8 *buf, u8 rd, u8 rs1, u8 rs2)
+{
+ const u32 insn = OPC_LSR | OP_A(rd) | OP_B(rs1) | OP_C(rs2);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+static u8 arc_lsri_r(u8 *buf, u8 rd, u8 rs, u8 imm)
+{
+ const u32 insn = OPC_LSRI | OP_A(rd) | OP_B(rs) | LSRI_U6(imm);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+static u8 arc_swape_r(u8 *buf, u8 r)
+{
+ const u32 insn = OPC_SWAPE | OP_B(r) | OP_C(r);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+static u8 arc_jmp_return(u8 *buf)
+{
+ if (buf)
+ emit_4_bytes(buf, OPC_J_BLINK);
+ return INSN_len_normal;
+}
+
+static u8 arc_jl(u8 *buf, u8 reg)
+{
+ const u32 insn = OPC_JL | OP_C(reg);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/*
+ * Conditional jump to an address that is max 21 bits away (signed).
+ *
+ * b<cc> s21
+ */
+static u8 arc_bcc(u8 *buf, u8 cc, int offset)
+{
+ const u32 insn = OPC_BCC | BCC_S21(offset) | COND(cc);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/*
+ * Unconditional jump to an address that is max 25 bits away (signed).
+ *
+ * b s25
+ */
+static u8 arc_b(u8 *buf, s32 offset)
+{
+ const u32 insn = OPC_B | B_S25(offset);
+
+ if (buf)
+ emit_4_bytes(buf, insn);
+ return INSN_len_normal;
+}
+
+/************* Packers (Deal with BPF_REGs) **************/
+
+inline u8 zext(u8 *buf, u8 rd)
+{
+ if (rd != BPF_REG_FP)
+ return arc_movi_r(buf, REG_HI(rd), 0);
+ else
+ return 0;
+}
+
+u8 mov_r32(u8 *buf, u8 rd, u8 rs, u8 sign_ext)
+{
+ u8 len = 0;
+
+ if (sign_ext) {
+ if (sign_ext == 8)
+ len = arc_sexb_r(buf, REG_LO(rd), REG_LO(rs));
+ else if (sign_ext == 16)
+ len = arc_sexh_r(buf, REG_LO(rd), REG_LO(rs));
+ else if (sign_ext == 32 && rd != rs)
+ len = arc_mov_r(buf, REG_LO(rd), REG_LO(rs));
+
+ return len;
+ }
+
+ /* Unsigned move. */
+
+ if (rd != rs)
+ len = arc_mov_r(buf, REG_LO(rd), REG_LO(rs));
+
+ return len;
+}
+
+u8 mov_r32_i32(u8 *buf, u8 reg, s32 imm)
+{
+ return arc_mov_i(buf, REG_LO(reg), imm);
+}
+
+u8 mov_r64(u8 *buf, u8 rd, u8 rs, u8 sign_ext)
+{
+ u8 len = 0;
+
+ if (sign_ext) {
+ /* First handle the low 32-bit part. */
+ len = mov_r32(buf, rd, rs, sign_ext);
+
+ /* Now propagate the sign bit of LO to HI. */
+ if (sign_ext == 8 || sign_ext == 16 || sign_ext == 32) {
+ len += arc_asri_r(BUF(buf, len),
+ REG_HI(rd), REG_LO(rd), 31);
+ }
+
+ return len;
+ }
+
+ /* Unsigned move. */
+
+ if (rd == rs)
+ return 0;
+
+ len = arc_mov_r(buf, REG_LO(rd), REG_LO(rs));
+
+ if (rs != BPF_REG_FP)
+ len += arc_mov_r(BUF(buf, len), REG_HI(rd), REG_HI(rs));
+ /* BPF_REG_FP is mapped to 32-bit "fp" register. */
+ else
+ len += arc_movi_r(BUF(buf, len), REG_HI(rd), 0);
+
+ return len;
+}
+
+/* Sign extend the 32-bit immediate into 64-bit register pair. */
+u8 mov_r64_i32(u8 *buf, u8 reg, s32 imm)
+{
+ u8 len = 0;
+
+ len = arc_mov_i(buf, REG_LO(reg), imm);
+
+ /* BPF_REG_FP is mapped to 32-bit "fp" register. */
+ if (reg != BPF_REG_FP) {
+ if (imm >= 0)
+ len += arc_movi_r(BUF(buf, len), REG_HI(reg), 0);
+ else
+ len += arc_movi_r(BUF(buf, len), REG_HI(reg), -1);
+ }
+
+ return len;
+}
+
+/*
+ * This is merely used for translation of "LD R, IMM64" instructions
+ * of the BPF. These sort of instructions are sometimes used for
+ * relocations. If during the normal pass, the relocation value is
+ * not known, the BPF instruction may look something like:
+ *
+ * LD R <- 0x0000_0001_0000_0001
+ *
+ * Which will nicely translate to two 4-byte ARC instructions:
+ *
+ * mov R_lo, 1 # imm is small enough to be s12
+ * mov R_hi, 1 # same
+ *
+ * However, during the extra pass, the IMM64 will have changed
+ * to the resolved address and looks something like:
+ *
+ * LD R <- 0x0000_0000_1234_5678
+ *
+ * Now, the translated code will require 12 bytes:
+ *
+ * mov R_lo, 0x12345678 # this is an 8-byte instruction
+ * mov R_hi, 0 # still 4 bytes
+ *
+ * Which in practice will result in overwriting the following
+ * instruction. To avoid such cases, we will always emit codes
+ * with fixed sizes.
+ */
+u8 mov_r64_i64(u8 *buf, u8 reg, u32 lo, u32 hi)
+{
+ u8 len;
+
+ len = arc_mov_i_fixed(buf, REG_LO(reg), lo);
+ len += arc_mov_i_fixed(BUF(buf, len), REG_HI(reg), hi);
+
+ return len;
+}
+
+/*
+ * If the "off"set is too big (doesn't encode as S9) for:
+ *
+ * {ld,st} r, [rm, off]
+ *
+ * Then emit:
+ *
+ * add r10, REG_LO(rm), off
+ *
+ * and make sure that r10 becomes the effective address:
+ *
+ * {ld,st} r, [r10, 0]
+ */
+static u8 adjust_mem_access(u8 *buf, s16 *off, u8 size,
+ u8 rm, u8 *arc_reg_mem)
+{
+ u8 len = 0;
+ *arc_reg_mem = REG_LO(rm);
+
+ if (!IN_S9_RANGE(*off) ||
+ (size == BPF_DW && !IN_S9_RANGE(*off + 4))) {
+ len += arc_add_i(BUF(buf, len),
+ REG_LO(JIT_REG_TMP), REG_LO(rm), (u32)(*off));
+ *arc_reg_mem = REG_LO(JIT_REG_TMP);
+ *off = 0;
+ }
+
+ return len;
+}
+
+/* store rs, [rd, off] */
+u8 store_r(u8 *buf, u8 rs, u8 rd, s16 off, u8 size)
+{
+ u8 len, arc_reg_mem;
+
+ len = adjust_mem_access(buf, &off, size, rd, &arc_reg_mem);
+
+ if (size == BPF_DW) {
+ len += arc_st_r(BUF(buf, len), REG_LO(rs), arc_reg_mem,
+ off, ZZ_4_byte);
+ len += arc_st_r(BUF(buf, len), REG_HI(rs), arc_reg_mem,
+ off + 4, ZZ_4_byte);
+ } else {
+ u8 zz = bpf_to_arc_size(size);
+
+ len += arc_st_r(BUF(buf, len), REG_LO(rs), arc_reg_mem,
+ off, zz);
+ }
+
+ return len;
+}
+
+/*
+ * For {8,16,32}-bit stores:
+ * mov r21, imm
+ * st r21, [...]
+ * For 64-bit stores:
+ * mov r21, imm
+ * st r21, [...]
+ * mov r21, {0,-1}
+ * st r21, [...+4]
+ */
+u8 store_i(u8 *buf, s32 imm, u8 rd, s16 off, u8 size)
+{
+ u8 len, arc_reg_mem;
+ /* REG_LO(JIT_REG_TMP) might be used by "adjust_mem_access()". */
+ const u8 arc_rs = REG_HI(JIT_REG_TMP);
+
+ len = adjust_mem_access(buf, &off, size, rd, &arc_reg_mem);
+
+ if (size == BPF_DW) {
+ len += arc_mov_i(BUF(buf, len), arc_rs, imm);
+ len += arc_st_r(BUF(buf, len), arc_rs, arc_reg_mem,
+ off, ZZ_4_byte);
+ imm = (imm >= 0 ? 0 : -1);
+ len += arc_mov_i(BUF(buf, len), arc_rs, imm);
+ len += arc_st_r(BUF(buf, len), arc_rs, arc_reg_mem,
+ off + 4, ZZ_4_byte);
+ } else {
+ u8 zz = bpf_to_arc_size(size);
+
+ len += arc_mov_i(BUF(buf, len), arc_rs, imm);
+ len += arc_st_r(BUF(buf, len), arc_rs, arc_reg_mem, off, zz);
+ }
+
+ return len;
+}
+
+/*
+ * For the calling convention of a little endian machine, the LO part
+ * must be on top of the stack.
+ */
+static u8 push_r64(u8 *buf, u8 reg)
+{
+ u8 len = 0;
+
+#ifdef __LITTLE_ENDIAN
+ /* BPF_REG_FP is mapped to 32-bit "fp" register. */
+ if (reg != BPF_REG_FP)
+ len += arc_push_r(BUF(buf, len), REG_HI(reg));
+ len += arc_push_r(BUF(buf, len), REG_LO(reg));
+#else
+ len += arc_push_r(BUF(buf, len), REG_LO(reg));
+ if (reg != BPF_REG_FP)
+ len += arc_push_r(BUF(buf, len), REG_HI(reg));
+#endif
+
+ return len;
+}
+
+/* load rd, [rs, off] */
+u8 load_r(u8 *buf, u8 rd, u8 rs, s16 off, u8 size, bool sign_ext)
+{
+ u8 len, arc_reg_mem;
+
+ len = adjust_mem_access(buf, &off, size, rs, &arc_reg_mem);
+
+ if (size == BPF_B || size == BPF_H || size == BPF_W) {
+ const u8 zz = bpf_to_arc_size(size);
+
+ /* Use LD.X only if the data size is less than 32-bit. */
+ if (sign_ext && (zz == ZZ_1_byte || zz == ZZ_2_byte)) {
+ len += arc_ldx_r(BUF(buf, len), REG_LO(rd),
+ arc_reg_mem, off, zz);
+ } else {
+ len += arc_ld_r(BUF(buf, len), REG_LO(rd),
+ arc_reg_mem, off, zz);
+ }
+
+ if (sign_ext) {
+ /* Propagate the sign bit to the higher reg. */
+ len += arc_asri_r(BUF(buf, len),
+ REG_HI(rd), REG_LO(rd), 31);
+ } else {
+ len += arc_movi_r(BUF(buf, len), REG_HI(rd), 0);
+ }
+ } else if (size == BPF_DW) {
+ /*
+ * We are about to issue 2 consecutive loads:
+ *
+ * ld rx, [rb, off+0]
+ * ld ry, [rb, off+4]
+ *
+ * If "rx" and "rb" are the same registers, then the order
+ * should change to guarantee that "rb" remains intact
+ * during these 2 operations:
+ *
+ * ld ry, [rb, off+4]
+ * ld rx, [rb, off+0]
+ */
+ if (REG_LO(rd) != arc_reg_mem) {
+ len += arc_ld_r(BUF(buf, len), REG_LO(rd), arc_reg_mem,
+ off, ZZ_4_byte);
+ len += arc_ld_r(BUF(buf, len), REG_HI(rd), arc_reg_mem,
+ off + 4, ZZ_4_byte);
+ } else {
+ len += arc_ld_r(BUF(buf, len), REG_HI(rd), arc_reg_mem,
+ off + 4, ZZ_4_byte);
+ len += arc_ld_r(BUF(buf, len), REG_LO(rd), arc_reg_mem,
+ off, ZZ_4_byte);
+ }
+ }
+
+ return len;
+}
+
+u8 add_r32(u8 *buf, u8 rd, u8 rs)
+{
+ return arc_add_r(buf, REG_LO(rd), REG_LO(rs));
+}
+
+u8 add_r32_i32(u8 *buf, u8 rd, s32 imm)
+{
+ if (IN_U6_RANGE(imm))
+ return arc_addi_r(buf, REG_LO(rd), imm);
+ else
+ return arc_add_i(buf, REG_LO(rd), REG_LO(rd), imm);
+}
+
+u8 add_r64(u8 *buf, u8 rd, u8 rs)
+{
+ u8 len;
+
+ len = arc_addf_r(buf, REG_LO(rd), REG_LO(rs));
+ len += arc_adc_r(BUF(buf, len), REG_HI(rd), REG_HI(rs));
+ return len;
+}
+
+u8 add_r64_i32(u8 *buf, u8 rd, s32 imm)
+{
+ u8 len;
+
+ if (IN_U6_RANGE(imm)) {
+ len = arc_addif_r(buf, REG_LO(rd), imm);
+ len += arc_adci_r(BUF(buf, len), REG_HI(rd), 0);
+ } else {
+ len = mov_r64_i32(buf, JIT_REG_TMP, imm);
+ len += add_r64(BUF(buf, len), rd, JIT_REG_TMP);
+ }
+ return len;
+}
+
+u8 sub_r32(u8 *buf, u8 rd, u8 rs)
+{
+ return arc_sub_r(buf, REG_LO(rd), REG_LO(rs));
+}
+
+u8 sub_r32_i32(u8 *buf, u8 rd, s32 imm)
+{
+ if (IN_U6_RANGE(imm))
+ return arc_subi_r(buf, REG_LO(rd), imm);
+ else
+ return arc_sub_i(buf, REG_LO(rd), imm);
+}
+
+u8 sub_r64(u8 *buf, u8 rd, u8 rs)
+{
+ u8 len;
+
+ len = arc_subf_r(buf, REG_LO(rd), REG_LO(rs));
+ len += arc_sbc_r(BUF(buf, len), REG_HI(rd), REG_HI(rs));
+ return len;
+}
+
+u8 sub_r64_i32(u8 *buf, u8 rd, s32 imm)
+{
+ u8 len;
+
+ len = mov_r64_i32(buf, JIT_REG_TMP, imm);
+ len += sub_r64(BUF(buf, len), rd, JIT_REG_TMP);
+ return len;
+}
+
+static u8 cmp_r32(u8 *buf, u8 rd, u8 rs)
+{
+ return arc_cmp_r(buf, REG_LO(rd), REG_LO(rs));
+}
+
+u8 neg_r32(u8 *buf, u8 r)
+{
+ return arc_neg_r(buf, REG_LO(r), REG_LO(r));
+}
+
+/* In a two's complement system, -r is (~r + 1). */
+u8 neg_r64(u8 *buf, u8 r)
+{
+ u8 len;
+
+ len = arc_not_r(buf, REG_LO(r), REG_LO(r));
+ len += arc_not_r(BUF(buf, len), REG_HI(r), REG_HI(r));
+ len += add_r64_i32(BUF(buf, len), r, 1);
+ return len;
+}
+
+u8 mul_r32(u8 *buf, u8 rd, u8 rs)
+{
+ return arc_mpy_r(buf, REG_LO(rd), REG_LO(rd), REG_LO(rs));
+}
+
+u8 mul_r32_i32(u8 *buf, u8 rd, s32 imm)
+{
+ return arc_mpy_i(buf, REG_LO(rd), REG_LO(rd), imm);
+}
+
+/*
+ * MUL B, C
+ * --------
+ * mpy t0, B_hi, C_lo
+ * mpy t1, B_lo, C_hi
+ * mpydu B_lo, B_lo, C_lo
+ * add B_hi, B_hi, t0
+ * add B_hi, B_hi, t1
+ */
+u8 mul_r64(u8 *buf, u8 rd, u8 rs)
+{
+ const u8 t0 = REG_LO(JIT_REG_TMP);
+ const u8 t1 = REG_HI(JIT_REG_TMP);
+ const u8 C_lo = REG_LO(rs);
+ const u8 C_hi = REG_HI(rs);
+ const u8 B_lo = REG_LO(rd);
+ const u8 B_hi = REG_HI(rd);
+ u8 len;
+
+ len = arc_mpy_r(buf, t0, B_hi, C_lo);
+ len += arc_mpy_r(BUF(buf, len), t1, B_lo, C_hi);
+ len += arc_mpydu_r(BUF(buf, len), B_lo, C_lo);
+ len += arc_add_r(BUF(buf, len), B_hi, t0);
+ len += arc_add_r(BUF(buf, len), B_hi, t1);
+
+ return len;
+}
+
+/*
+ * MUL B, imm
+ * ----------
+ *
+ * To get a 64-bit result from a signed 64x32 multiplication:
+ *
+ * B_hi B_lo *
+ * sign imm
+ * -----------------------------
+ * HI(B_lo*imm) LO(B_lo*imm) +
+ * B_hi*imm +
+ * B_lo*sign
+ * -----------------------------
+ * res_hi res_lo
+ *
+ * mpy t1, B_lo, sign(imm)
+ * mpy t0, B_hi, imm
+ * mpydu B_lo, B_lo, imm
+ * add B_hi, B_hi, t0
+ * add B_hi, B_hi, t1
+ *
+ * Note: We can't use signed double multiplication, "mpyd", instead of an
+ * unsigned version, "mpydu", and then get rid of the sign adjustments
+ * calculated in "t1". The signed multiplication, "mpyd", will consider
+ * both operands, "B_lo" and "imm", as signed inputs. However, for this
+ * 64x32 multiplication, "B_lo" must be treated as an unsigned number.
+ */
+u8 mul_r64_i32(u8 *buf, u8 rd, s32 imm)
+{
+ const u8 t0 = REG_LO(JIT_REG_TMP);
+ const u8 t1 = REG_HI(JIT_REG_TMP);
+ const u8 B_lo = REG_LO(rd);
+ const u8 B_hi = REG_HI(rd);
+ u8 len = 0;
+
+ if (imm == 1)
+ return 0;
+
+ /* Is the sign-extension of the immediate "-1"? */
+ if (imm < 0)
+ len += arc_neg_r(BUF(buf, len), t1, B_lo);
+
+ len += arc_mpy_i(BUF(buf, len), t0, B_hi, imm);
+ len += arc_mpydu_i(BUF(buf, len), B_lo, imm);
+ len += arc_add_r(BUF(buf, len), B_hi, t0);
+
+ /* Add the "sign*B_lo" part, if necessary. */
+ if (imm < 0)
+ len += arc_add_r(BUF(buf, len), B_hi, t1);
+
+ return len;
+}
+
+u8 div_r32(u8 *buf, u8 rd, u8 rs, bool sign_ext)
+{
+ if (sign_ext)
+ return arc_divs_r(buf, REG_LO(rd), REG_LO(rs));
+ else
+ return arc_divu_r(buf, REG_LO(rd), REG_LO(rs));
+}
+
+u8 div_r32_i32(u8 *buf, u8 rd, s32 imm, bool sign_ext)
+{
+ if (imm == 0)
+ return 0;
+
+ if (sign_ext)
+ return arc_divs_i(buf, REG_LO(rd), imm);
+ else
+ return arc_divu_i(buf, REG_LO(rd), imm);
+}
+
+u8 mod_r32(u8 *buf, u8 rd, u8 rs, bool sign_ext)
+{
+ if (sign_ext)
+ return arc_rems_r(buf, REG_LO(rd), REG_LO(rs));
+ else
+ return arc_remu_r(buf, REG_LO(rd), REG_LO(rs));
+}
+
+u8 mod_r32_i32(u8 *buf, u8 rd, s32 imm, bool sign_ext)
+{
+ if (imm == 0)
+ return 0;
+
+ if (sign_ext)
+ return arc_rems_i(buf, REG_LO(rd), imm);
+ else
+ return arc_remu_i(buf, REG_LO(rd), imm);
+}
+
+u8 and_r32(u8 *buf, u8 rd, u8 rs)
+{
+ return arc_and_r(buf, REG_LO(rd), REG_LO(rs));
+}
+
+u8 and_r32_i32(u8 *buf, u8 rd, s32 imm)
+{
+ return arc_and_i(buf, REG_LO(rd), imm);
+}
+
+u8 and_r64(u8 *buf, u8 rd, u8 rs)
+{
+ u8 len;
+
+ len = arc_and_r(buf, REG_LO(rd), REG_LO(rs));
+ len += arc_and_r(BUF(buf, len), REG_HI(rd), REG_HI(rs));
+ return len;
+}
+
+u8 and_r64_i32(u8 *buf, u8 rd, s32 imm)
+{
+ u8 len;
+
+ len = mov_r64_i32(buf, JIT_REG_TMP, imm);
+ len += and_r64(BUF(buf, len), rd, JIT_REG_TMP);
+ return len;
+}
+
+static u8 tst_r32(u8 *buf, u8 rd, u8 rs)
+{
+ return arc_tst_r(buf, REG_LO(rd), REG_LO(rs));
+}
+
+u8 or_r32(u8 *buf, u8 rd, u8 rs)
+{
+ return arc_or_r(buf, REG_LO(rd), REG_LO(rd), REG_LO(rs));
+}
+
+u8 or_r32_i32(u8 *buf, u8 rd, s32 imm)
+{
+ return arc_or_i(buf, REG_LO(rd), imm);
+}
+
+u8 or_r64(u8 *buf, u8 rd, u8 rs)
+{
+ u8 len;
+
+ len = arc_or_r(buf, REG_LO(rd), REG_LO(rd), REG_LO(rs));
+ len += arc_or_r(BUF(buf, len), REG_HI(rd), REG_HI(rd), REG_HI(rs));
+ return len;
+}
+
+u8 or_r64_i32(u8 *buf, u8 rd, s32 imm)
+{
+ u8 len;
+
+ len = mov_r64_i32(buf, JIT_REG_TMP, imm);
+ len += or_r64(BUF(buf, len), rd, JIT_REG_TMP);
+ return len;
+}
+
+u8 xor_r32(u8 *buf, u8 rd, u8 rs)
+{
+ return arc_xor_r(buf, REG_LO(rd), REG_LO(rs));
+}
+
+u8 xor_r32_i32(u8 *buf, u8 rd, s32 imm)
+{
+ return arc_xor_i(buf, REG_LO(rd), imm);
+}
+
+u8 xor_r64(u8 *buf, u8 rd, u8 rs)
+{
+ u8 len;
+
+ len = arc_xor_r(buf, REG_LO(rd), REG_LO(rs));
+ len += arc_xor_r(BUF(buf, len), REG_HI(rd), REG_HI(rs));
+ return len;
+}
+
+u8 xor_r64_i32(u8 *buf, u8 rd, s32 imm)
+{
+ u8 len;
+
+ len = mov_r64_i32(buf, JIT_REG_TMP, imm);
+ len += xor_r64(BUF(buf, len), rd, JIT_REG_TMP);
+ return len;
+}
+
+/* "asl a,b,c" --> "a = (b << (c & 31))". */
+u8 lsh_r32(u8 *buf, u8 rd, u8 rs)
+{
+ return arc_asl_r(buf, REG_LO(rd), REG_LO(rd), REG_LO(rs));
+}
+
+u8 lsh_r32_i32(u8 *buf, u8 rd, u8 imm)
+{
+ return arc_asli_r(buf, REG_LO(rd), REG_LO(rd), imm);
+}
+
+/*
+ * algorithm
+ * ---------
+ * if (n <= 32)
+ * to_hi = lo >> (32-n) # (32-n) is the negate of "n" in a 5-bit width.
+ * lo <<= n
+ * hi <<= n
+ * hi |= to_hi
+ * else
+ * hi = lo << (n-32)
+ * lo = 0
+ *
+ * assembly translation for "LSH B, C"
+ * (heavily influenced by ARC gcc)
+ * -----------------------------------
+ * not t0, C_lo # The first 3 lines are almost the same as:
+ * lsr t1, B_lo, 1 # neg t0, C_lo
+ * lsr t1, t1, t0 # lsr t1, B_lo, t0 --> t1 is "to_hi"
+ * mov t0, C_lo* # with one important difference. In "neg"
+ * asl B_lo, B_lo, t0 # version, when C_lo=0, t1 becomes B_lo while
+ * asl B_hi, B_hi, t0 # it should be 0. The "not" approach instead,
+ * or B_hi, B_hi, t1 # "shift"s t1 once and 31 times, practically
+ * btst t0, 5 # setting it to 0 when C_lo=0.
+ * mov.ne B_hi, B_lo**
+ * mov.ne B_lo, 0
+ *
+ * *The "mov t0, C_lo" is necessary to cover the cases that C is the same
+ * register as B.
+ *
+ * **ARC performs a shift in this manner: B <<= (C & 31)
+ * For 32<=n<64, "n-32" and "n&31" are the same. Therefore, "B << n" and
+ * "B << (n-32)" yield the same results. e.g. the results of "B << 35" and
+ * "B << 3" are the same.
+ *
+ * The behaviour is undefined for n >= 64.
+ */
+u8 lsh_r64(u8 *buf, u8 rd, u8 rs)
+{
+ const u8 t0 = REG_LO(JIT_REG_TMP);
+ const u8 t1 = REG_HI(JIT_REG_TMP);
+ const u8 C_lo = REG_LO(rs);
+ const u8 B_lo = REG_LO(rd);
+ const u8 B_hi = REG_HI(rd);
+ u8 len;
+
+ len = arc_not_r(buf, t0, C_lo);
+ len += arc_lsri_r(BUF(buf, len), t1, B_lo, 1);
+ len += arc_lsr_r(BUF(buf, len), t1, t1, t0);
+ len += arc_mov_r(BUF(buf, len), t0, C_lo);
+ len += arc_asl_r(BUF(buf, len), B_lo, B_lo, t0);
+ len += arc_asl_r(BUF(buf, len), B_hi, B_hi, t0);
+ len += arc_or_r(BUF(buf, len), B_hi, B_hi, t1);
+ len += arc_btst_i(BUF(buf, len), t0, 5);
+ len += arc_mov_cc_r(BUF(buf, len), CC_unequal, B_hi, B_lo);
+ len += arc_movu_cc_r(BUF(buf, len), CC_unequal, B_lo, 0);
+
+ return len;
+}
+
+/*
+ * if (n < 32)
+ * to_hi = B_lo >> 32-n # extract upper n bits
+ * lo <<= n
+ * hi <<=n
+ * hi |= to_hi
+ * else if (n < 64)
+ * hi = lo << n-32
+ * lo = 0
+ */
+u8 lsh_r64_i32(u8 *buf, u8 rd, s32 imm)
+{
+ const u8 t0 = REG_LO(JIT_REG_TMP);
+ const u8 B_lo = REG_LO(rd);
+ const u8 B_hi = REG_HI(rd);
+ const u8 n = (u8)imm;
+ u8 len = 0;
+
+ if (n == 0) {
+ return 0;
+ } else if (n <= 31) {
+ len = arc_lsri_r(buf, t0, B_lo, 32 - n);
+ len += arc_asli_r(BUF(buf, len), B_lo, B_lo, n);
+ len += arc_asli_r(BUF(buf, len), B_hi, B_hi, n);
+ len += arc_or_r(BUF(buf, len), B_hi, B_hi, t0);
+ } else if (n <= 63) {
+ len = arc_asli_r(buf, B_hi, B_lo, n - 32);
+ len += arc_movi_r(BUF(buf, len), B_lo, 0);
+ }
+ /* n >= 64 is undefined behaviour. */
+
+ return len;
+}
+
+/* "lsr a,b,c" --> "a = (b >> (c & 31))". */
+u8 rsh_r32(u8 *buf, u8 rd, u8 rs)
+{
+ return arc_lsr_r(buf, REG_LO(rd), REG_LO(rd), REG_LO(rs));
+}
+
+u8 rsh_r32_i32(u8 *buf, u8 rd, u8 imm)
+{
+ return arc_lsri_r(buf, REG_LO(rd), REG_LO(rd), imm);
+}
+
+/*
+ * For better commentary, see lsh_r64().
+ *
+ * algorithm
+ * ---------
+ * if (n <= 32)
+ * to_lo = hi << (32-n)
+ * hi >>= n
+ * lo >>= n
+ * lo |= to_lo
+ * else
+ * lo = hi >> (n-32)
+ * hi = 0
+ *
+ * RSH B,C
+ * ----------
+ * not t0, C_lo
+ * asl t1, B_hi, 1
+ * asl t1, t1, t0
+ * mov t0, C_lo
+ * lsr B_hi, B_hi, t0
+ * lsr B_lo, B_lo, t0
+ * or B_lo, B_lo, t1
+ * btst t0, 5
+ * mov.ne B_lo, B_hi
+ * mov.ne B_hi, 0
+ */
+u8 rsh_r64(u8 *buf, u8 rd, u8 rs)
+{
+ const u8 t0 = REG_LO(JIT_REG_TMP);
+ const u8 t1 = REG_HI(JIT_REG_TMP);
+ const u8 C_lo = REG_LO(rs);
+ const u8 B_lo = REG_LO(rd);
+ const u8 B_hi = REG_HI(rd);
+ u8 len;
+
+ len = arc_not_r(buf, t0, C_lo);
+ len += arc_asli_r(BUF(buf, len), t1, B_hi, 1);
+ len += arc_asl_r(BUF(buf, len), t1, t1, t0);
+ len += arc_mov_r(BUF(buf, len), t0, C_lo);
+ len += arc_lsr_r(BUF(buf, len), B_hi, B_hi, t0);
+ len += arc_lsr_r(BUF(buf, len), B_lo, B_lo, t0);
+ len += arc_or_r(BUF(buf, len), B_lo, B_lo, t1);
+ len += arc_btst_i(BUF(buf, len), t0, 5);
+ len += arc_mov_cc_r(BUF(buf, len), CC_unequal, B_lo, B_hi);
+ len += arc_movu_cc_r(BUF(buf, len), CC_unequal, B_hi, 0);
+
+ return len;
+}
+
+/*
+ * if (n < 32)
+ * to_lo = B_lo << 32-n # extract lower n bits, right-padded with 32-n 0s
+ * lo >>=n
+ * hi >>=n
+ * hi |= to_lo
+ * else if (n < 64)
+ * lo = hi >> n-32
+ * hi = 0
+ */
+u8 rsh_r64_i32(u8 *buf, u8 rd, s32 imm)
+{
+ const u8 t0 = REG_LO(JIT_REG_TMP);
+ const u8 B_lo = REG_LO(rd);
+ const u8 B_hi = REG_HI(rd);
+ const u8 n = (u8)imm;
+ u8 len = 0;
+
+ if (n == 0) {
+ return 0;
+ } else if (n <= 31) {
+ len = arc_asli_r(buf, t0, B_hi, 32 - n);
+ len += arc_lsri_r(BUF(buf, len), B_lo, B_lo, n);
+ len += arc_lsri_r(BUF(buf, len), B_hi, B_hi, n);
+ len += arc_or_r(BUF(buf, len), B_lo, B_lo, t0);
+ } else if (n <= 63) {
+ len = arc_lsri_r(buf, B_lo, B_hi, n - 32);
+ len += arc_movi_r(BUF(buf, len), B_hi, 0);
+ }
+ /* n >= 64 is undefined behaviour. */
+
+ return len;
+}
+
+/* "asr a,b,c" --> "a = (b s>> (c & 31))". */
+u8 arsh_r32(u8 *buf, u8 rd, u8 rs)
+{
+ return arc_asr_r(buf, REG_LO(rd), REG_LO(rd), REG_LO(rs));
+}
+
+u8 arsh_r32_i32(u8 *buf, u8 rd, u8 imm)
+{
+ return arc_asri_r(buf, REG_LO(rd), REG_LO(rd), imm);
+}
+
+/*
+ * For comparison, see rsh_r64().
+ *
+ * algorithm
+ * ---------
+ * if (n <= 32)
+ * to_lo = hi << (32-n)
+ * hi s>>= n
+ * lo >>= n
+ * lo |= to_lo
+ * else
+ * hi_sign = hi s>>31
+ * lo = hi s>> (n-32)
+ * hi = hi_sign
+ *
+ * ARSH B,C
+ * ----------
+ * not t0, C_lo
+ * asl t1, B_hi, 1
+ * asl t1, t1, t0
+ * mov t0, C_lo
+ * asr B_hi, B_hi, t0
+ * lsr B_lo, B_lo, t0
+ * or B_lo, B_lo, t1
+ * btst t0, 5
+ * asr t0, B_hi, 31 # now, t0 = 0 or -1 based on B_hi's sign
+ * mov.ne B_lo, B_hi
+ * mov.ne B_hi, t0
+ */
+u8 arsh_r64(u8 *buf, u8 rd, u8 rs)
+{
+ const u8 t0 = REG_LO(JIT_REG_TMP);
+ const u8 t1 = REG_HI(JIT_REG_TMP);
+ const u8 C_lo = REG_LO(rs);
+ const u8 B_lo = REG_LO(rd);
+ const u8 B_hi = REG_HI(rd);
+ u8 len;
+
+ len = arc_not_r(buf, t0, C_lo);
+ len += arc_asli_r(BUF(buf, len), t1, B_hi, 1);
+ len += arc_asl_r(BUF(buf, len), t1, t1, t0);
+ len += arc_mov_r(BUF(buf, len), t0, C_lo);
+ len += arc_asr_r(BUF(buf, len), B_hi, B_hi, t0);
+ len += arc_lsr_r(BUF(buf, len), B_lo, B_lo, t0);
+ len += arc_or_r(BUF(buf, len), B_lo, B_lo, t1);
+ len += arc_btst_i(BUF(buf, len), t0, 5);
+ len += arc_asri_r(BUF(buf, len), t0, B_hi, 31);
+ len += arc_mov_cc_r(BUF(buf, len), CC_unequal, B_lo, B_hi);
+ len += arc_mov_cc_r(BUF(buf, len), CC_unequal, B_hi, t0);
+
+ return len;
+}
+
+/*
+ * if (n < 32)
+ * to_lo = lo << 32-n # extract lower n bits, right-padded with 32-n 0s
+ * lo >>=n
+ * hi s>>=n
+ * hi |= to_lo
+ * else if (n < 64)
+ * lo = hi s>> n-32
+ * hi = (lo[msb] ? -1 : 0)
+ */
+u8 arsh_r64_i32(u8 *buf, u8 rd, s32 imm)
+{
+ const u8 t0 = REG_LO(JIT_REG_TMP);
+ const u8 B_lo = REG_LO(rd);
+ const u8 B_hi = REG_HI(rd);
+ const u8 n = (u8)imm;
+ u8 len = 0;
+
+ if (n == 0) {
+ return 0;
+ } else if (n <= 31) {
+ len = arc_asli_r(buf, t0, B_hi, 32 - n);
+ len += arc_lsri_r(BUF(buf, len), B_lo, B_lo, n);
+ len += arc_asri_r(BUF(buf, len), B_hi, B_hi, n);
+ len += arc_or_r(BUF(buf, len), B_lo, B_lo, t0);
+ } else if (n <= 63) {
+ len = arc_asri_r(buf, B_lo, B_hi, n - 32);
+ len += arc_movi_r(BUF(buf, len), B_hi, -1);
+ len += arc_btst_i(BUF(buf, len), B_lo, 31);
+ len += arc_movu_cc_r(BUF(buf, len), CC_equal, B_hi, 0);
+ }
+ /* n >= 64 is undefined behaviour. */
+
+ return len;
+}
+
+u8 gen_swap(u8 *buf, u8 rd, u8 size, u8 endian, bool force, bool do_zext)
+{
+ u8 len = 0;
+#ifdef __BIG_ENDIAN
+ const u8 host_endian = BPF_FROM_BE;
+#else
+ const u8 host_endian = BPF_FROM_LE;
+#endif
+ if (host_endian != endian || force) {
+ switch (size) {
+ case 16:
+ /*
+ * r = B4B3_B2B1 << 16 --> r = B2B1_0000
+ * then, swape(r) would become the desired 0000_B1B2
+ */
+ len = arc_asli_r(buf, REG_LO(rd), REG_LO(rd), 16);
+ fallthrough;
+ case 32:
+ len += arc_swape_r(BUF(buf, len), REG_LO(rd));
+ if (do_zext)
+ len += zext(BUF(buf, len), rd);
+ break;
+ case 64:
+ /*
+ * swap "hi" and "lo":
+ * hi ^= lo;
+ * lo ^= hi;
+ * hi ^= lo;
+ * and then swap the bytes in "hi" and "lo".
+ */
+ len = arc_xor_r(buf, REG_HI(rd), REG_LO(rd));
+ len += arc_xor_r(BUF(buf, len), REG_LO(rd), REG_HI(rd));
+ len += arc_xor_r(BUF(buf, len), REG_HI(rd), REG_LO(rd));
+ len += arc_swape_r(BUF(buf, len), REG_LO(rd));
+ len += arc_swape_r(BUF(buf, len), REG_HI(rd));
+ break;
+ default:
+ /* The caller must have handled this. */
+ }
+ } else {
+ /*
+ * If the same endianness, there's not much to do other
+ * than zeroing out the upper bytes based on the "size".
+ */
+ switch (size) {
+ case 16:
+ len = arc_and_i(buf, REG_LO(rd), 0xffff);
+ fallthrough;
+ case 32:
+ if (do_zext)
+ len += zext(BUF(buf, len), rd);
+ break;
+ case 64:
+ break;
+ default:
+ /* The caller must have handled this. */
+ }
+ }
+
+ return len;
+}
+
+/*
+ * To create a frame, all that is needed is:
+ *
+ * push fp
+ * mov fp, sp
+ * sub sp, <frame_size>
+ *
+ * "push fp" is taken care of separately while saving the clobbered registers.
+ * All that remains is copying SP value to FP and shrinking SP's address space
+ * for any possible function call to come.
+ */
+static inline u8 frame_create(u8 *buf, u16 size)
+{
+ u8 len;
+
+ len = arc_mov_r(buf, ARC_R_FP, ARC_R_SP);
+ if (IN_U6_RANGE(size))
+ len += arc_subi_r(BUF(buf, len), ARC_R_SP, size);
+ else
+ len += arc_sub_i(BUF(buf, len), ARC_R_SP, size);
+ return len;
+}
+
+/*
+ * mov sp, fp
+ *
+ * The value of SP upon entering was copied to FP.
+ */
+static inline u8 frame_restore(u8 *buf)
+{
+ return arc_mov_r(buf, ARC_R_SP, ARC_R_FP);
+}
+
+/*
+ * Going from a JITed code to the native caller:
+ *
+ * mov ARC_ABI_RET_lo, BPF_REG_0_lo # r0 <- r8
+ * mov ARC_ABI_RET_hi, BPF_REG_0_hi # r1 <- r9
+ */
+static u8 bpf_to_arc_return(u8 *buf)
+{
+ u8 len;
+
+ len = arc_mov_r(buf, ARC_R_0, REG_LO(BPF_REG_0));
+ len += arc_mov_r(BUF(buf, len), ARC_R_1, REG_HI(BPF_REG_0));
+ return len;
+}
+
+/*
+ * Coming back from an external (in-kernel) function to the JITed code:
+ *
+ * mov ARC_ABI_RET_lo, BPF_REG_0_lo # r8 <- r0
+ * mov ARC_ABI_RET_hi, BPF_REG_0_hi # r9 <- r1
+ */
+u8 arc_to_bpf_return(u8 *buf)
+{
+ u8 len;
+
+ len = arc_mov_r(buf, REG_LO(BPF_REG_0), ARC_R_0);
+ len += arc_mov_r(BUF(buf, len), REG_HI(BPF_REG_0), ARC_R_1);
+ return len;
+}
+
+/*
+ * This translation leads to:
+ *
+ * mov r10, addr # always an 8-byte instruction
+ * jl [r10]
+ *
+ * The length of the "mov" must be fixed (8), otherwise it may diverge
+ * during the normal and extra passes:
+ *
+ * normal pass extra pass
+ *
+ * 180: mov r10,0 | 180: mov r10,0x700578d8
+ * 184: jl [r10] | 188: jl [r10]
+ * 188: add.f r16,r16,0x1 | 18c: adc r17,r17,0
+ * 18c: adc r17,r17,0 |
+ *
+ * In the above example, the change from "r10 <- 0" to "r10 <- 0x700578d8"
+ * has led to an increase in the length of the "mov" instruction.
+ * Inadvertently, that caused the loss of the "add.f" instruction.
+ */
+static u8 jump_and_link(u8 *buf, u32 addr)
+{
+ u8 len;
+
+ len = arc_mov_i_fixed(buf, REG_LO(JIT_REG_TMP), addr);
+ len += arc_jl(BUF(buf, len), REG_LO(JIT_REG_TMP));
+ return len;
+}
+
+/*
+ * This function determines which ARC registers must be saved and restored.
+ * It does so by looking into:
+ *
+ * "bpf_reg": The clobbered (destination) BPF register
+ * "is_call": Indicator if the current instruction is a call
+ *
+ * When a register of interest is clobbered, its corresponding bit position
+ * in return value, "usage", is set to true.
+ */
+u32 mask_for_used_regs(u8 bpf_reg, bool is_call)
+{
+ u32 usage = 0;
+
+ /* BPF registers that must be saved. */
+ if (bpf_reg >= BPF_REG_6 && bpf_reg <= BPF_REG_9) {
+ usage |= BIT(REG_LO(bpf_reg));
+ usage |= BIT(REG_HI(bpf_reg));
+ /*
+ * Using the frame pointer register implies that it should
+ * be saved and reinitialised with the current frame data.
+ */
+ } else if (bpf_reg == BPF_REG_FP) {
+ usage |= BIT(REG_LO(BPF_REG_FP));
+ /* Could there be some ARC registers that must to be saved? */
+ } else {
+ if (REG_LO(bpf_reg) >= ARC_CALLEE_SAVED_REG_FIRST &&
+ REG_LO(bpf_reg) <= ARC_CALLEE_SAVED_REG_LAST)
+ usage |= BIT(REG_LO(bpf_reg));
+
+ if (REG_HI(bpf_reg) >= ARC_CALLEE_SAVED_REG_FIRST &&
+ REG_HI(bpf_reg) <= ARC_CALLEE_SAVED_REG_LAST)
+ usage |= BIT(REG_HI(bpf_reg));
+ }
+
+ /* A "call" indicates that ARC's "blink" reg must be saved. */
+ usage |= is_call ? BIT(ARC_R_BLINK) : 0;
+
+ return usage;
+}
+
+/*
+ * push blink # if blink is marked as clobbered
+ * push r[0-n] # if r[i] is marked as clobbered
+ * push fp # if fp is marked as clobbered
+ * mov fp, sp # if frame_size > 0 (clobbers fp)
+ * sub sp, <frame_size> # same as above
+ */
+u8 arc_prologue(u8 *buf, u32 usage, u16 frame_size)
+{
+ u8 len = 0;
+ u32 gp_regs = 0;
+
+ /* Deal with blink first. */
+ if (usage & BIT(ARC_R_BLINK))
+ len += arc_push_r(BUF(buf, len), ARC_R_BLINK);
+
+ gp_regs = usage & ~(BIT(ARC_R_BLINK) | BIT(ARC_R_FP));
+ while (gp_regs) {
+ u8 reg = __builtin_ffs(gp_regs) - 1;
+
+ len += arc_push_r(BUF(buf, len), reg);
+ gp_regs &= ~BIT(reg);
+ }
+
+ /* Deal with fp last. */
+ if ((usage & BIT(ARC_R_FP)) || frame_size > 0)
+ len += arc_push_r(BUF(buf, len), ARC_R_FP);
+
+ if (frame_size > 0)
+ len += frame_create(BUF(buf, len), frame_size);
+
+#ifdef ARC_BPF_JIT_DEBUG
+ if ((usage & BIT(ARC_R_FP)) && frame_size == 0) {
+ pr_err("FP is being saved while there is no frame.");
+ BUG();
+ }
+#endif
+
+ return len;
+}
+
+/*
+ * mov sp, fp # if frame_size > 0
+ * pop fp # if fp is marked as clobbered
+ * pop r[n-0] # if r[i] is marked as clobbered
+ * pop blink # if blink is marked as clobbered
+ * mov r0, r8 # always: ABI_return <- BPF_return
+ * mov r1, r9 # continuation of above
+ * j [blink] # always
+ *
+ * "fp being marked as clobbered" and "frame_size > 0" are the two sides of
+ * the same coin.
+ */
+u8 arc_epilogue(u8 *buf, u32 usage, u16 frame_size)
+{
+ u32 len = 0;
+ u32 gp_regs = 0;
+
+#ifdef ARC_BPF_JIT_DEBUG
+ if ((usage & BIT(ARC_R_FP)) && frame_size == 0) {
+ pr_err("FP is being saved while there is no frame.");
+ BUG();
+ }
+#endif
+
+ if (frame_size > 0)
+ len += frame_restore(BUF(buf, len));
+
+ /* Deal with fp first. */
+ if ((usage & BIT(ARC_R_FP)) || frame_size > 0)
+ len += arc_pop_r(BUF(buf, len), ARC_R_FP);
+
+ gp_regs = usage & ~(BIT(ARC_R_BLINK) | BIT(ARC_R_FP));
+ while (gp_regs) {
+ /* "usage" is 32-bit, each bit indicating an ARC register. */
+ u8 reg = 31 - __builtin_clz(gp_regs);
+
+ len += arc_pop_r(BUF(buf, len), reg);
+ gp_regs &= ~BIT(reg);
+ }
+
+ /* Deal with blink last. */
+ if (usage & BIT(ARC_R_BLINK))
+ len += arc_pop_r(BUF(buf, len), ARC_R_BLINK);
+
+ /* Wrap up the return value and jump back to the caller. */
+ len += bpf_to_arc_return(BUF(buf, len));
+ len += arc_jmp_return(BUF(buf, len));
+
+ return len;
+}
+
+/*
+ * For details on the algorithm, see the comments of "gen_jcc_64()".
+ *
+ * This data structure is holding information for jump translations.
+ *
+ * jit_off: How many bytes into the current JIT address, "b"ranch insn. occurs
+ * cond: The condition that the ARC branch instruction must use
+ *
+ * e.g.:
+ *
+ * BPF_JGE R1, R0, @target
+ * ------------------------
+ * |
+ * v
+ * 0x1000: cmp r3, r1 # 0x1000 is the JIT address for "BPF_JGE ..." insn
+ * 0x1004: bhi @target # first jump (branch higher)
+ * 0x1008: blo @end # second jump acting as a skip (end is 0x1014)
+ * 0x100C: cmp r2, r0 # the lower 32 bits are evaluated
+ * 0x1010: bhs @target # third jump (branch higher or same)
+ * 0x1014: ...
+ *
+ * The jit_off(set) of the "bhi" is 4 bytes.
+ * The cond(ition) for the "bhi" is "CC_great_u".
+ *
+ * The jit_off(set) is necessary for calculating the exact displacement
+ * to the "target" address:
+ *
+ * jit_address + jit_off(set) - @target
+ * 0x1000 + 4 - @target
+ */
+#define JCC64_NR_OF_JMPS 3 /* Number of jumps in jcc64 template. */
+#define JCC64_INSNS_TO_END 3 /* Number of insn. inclusive the 2nd jmp to end. */
+#define JCC64_SKIP_JMP 1 /* Index of the "skip" jump to "end". */
+const struct {
+ /*
+ * "jit_off" is common between all "jmp[]" and is coupled with
+ * "cond" of each "jmp[]" instance. e.g.:
+ *
+ * arcv2_64_jccs.jit_off[1]
+ * arcv2_64_jccs.jmp[ARC_CC_UGT].cond[1]
+ *
+ * Are indicating that the second jump in JITed code of "UGT"
+ * is at offset "jit_off[1]" while its condition is "cond[1]".
+ */
+ u8 jit_off[JCC64_NR_OF_JMPS];
+
+ struct {
+ u8 cond[JCC64_NR_OF_JMPS];
+ } jmp[ARC_CC_SLE + 1];
+} arcv2_64_jccs = {
+ .jit_off = {
+ INSN_len_normal * 1,
+ INSN_len_normal * 2,
+ INSN_len_normal * 4
+ },
+ /*
+ * cmp rd_hi, rs_hi
+ * bhi @target # 1: u>
+ * blo @end # 2: u<
+ * cmp rd_lo, rs_lo
+ * bhi @target # 3: u>
+ * end:
+ */
+ .jmp[ARC_CC_UGT] = {
+ .cond = {CC_great_u, CC_less_u, CC_great_u}
+ },
+ /*
+ * cmp rd_hi, rs_hi
+ * bhi @target # 1: u>
+ * blo @end # 2: u<
+ * cmp rd_lo, rs_lo
+ * bhs @target # 3: u>=
+ * end:
+ */
+ .jmp[ARC_CC_UGE] = {
+ .cond = {CC_great_u, CC_less_u, CC_great_eq_u}
+ },
+ /*
+ * cmp rd_hi, rs_hi
+ * blo @target # 1: u<
+ * bhi @end # 2: u>
+ * cmp rd_lo, rs_lo
+ * blo @target # 3: u<
+ * end:
+ */
+ .jmp[ARC_CC_ULT] = {
+ .cond = {CC_less_u, CC_great_u, CC_less_u}
+ },
+ /*
+ * cmp rd_hi, rs_hi
+ * blo @target # 1: u<
+ * bhi @end # 2: u>
+ * cmp rd_lo, rs_lo
+ * bls @target # 3: u<=
+ * end:
+ */
+ .jmp[ARC_CC_ULE] = {
+ .cond = {CC_less_u, CC_great_u, CC_less_eq_u}
+ },
+ /*
+ * cmp rd_hi, rs_hi
+ * bgt @target # 1: s>
+ * blt @end # 2: s<
+ * cmp rd_lo, rs_lo
+ * bhi @target # 3: u>
+ * end:
+ */
+ .jmp[ARC_CC_SGT] = {
+ .cond = {CC_great_s, CC_less_s, CC_great_u}
+ },
+ /*
+ * cmp rd_hi, rs_hi
+ * bgt @target # 1: s>
+ * blt @end # 2: s<
+ * cmp rd_lo, rs_lo
+ * bhs @target # 3: u>=
+ * end:
+ */
+ .jmp[ARC_CC_SGE] = {
+ .cond = {CC_great_s, CC_less_s, CC_great_eq_u}
+ },
+ /*
+ * cmp rd_hi, rs_hi
+ * blt @target # 1: s<
+ * bgt @end # 2: s>
+ * cmp rd_lo, rs_lo
+ * blo @target # 3: u<
+ * end:
+ */
+ .jmp[ARC_CC_SLT] = {
+ .cond = {CC_less_s, CC_great_s, CC_less_u}
+ },
+ /*
+ * cmp rd_hi, rs_hi
+ * blt @target # 1: s<
+ * bgt @end # 2: s>
+ * cmp rd_lo, rs_lo
+ * bls @target # 3: u<=
+ * end:
+ */
+ .jmp[ARC_CC_SLE] = {
+ .cond = {CC_less_s, CC_great_s, CC_less_eq_u}
+ }
+};
+
+/*
+ * The displacement (offset) for ARC's "b"ranch instruction is the distance
+ * from the aligned version of _current_ instruction (PCL) to the target
+ * instruction:
+ *
+ * DISP = TARGET - PCL # PCL is the word aligned PC
+ */
+static inline s32 get_displacement(u32 curr_off, u32 targ_off)
+{
+ return (s32)(targ_off - (curr_off & ~3L));
+}
+
+/*
+ * "disp"lacement should be:
+ *
+ * 1. 16-bit aligned.
+ * 2. fit in S25, because no "condition code" is supposed to be encoded.
+ */
+static inline bool is_valid_far_disp(s32 disp)
+{
+ return (!(disp & 1) && IN_S25_RANGE(disp));
+}
+
+/*
+ * "disp"lacement should be:
+ *
+ * 1. 16-bit aligned.
+ * 2. fit in S21, because "condition code" is supposed to be encoded too.
+ */
+static inline bool is_valid_near_disp(s32 disp)
+{
+ return (!(disp & 1) && IN_S21_RANGE(disp));
+}
+
+/*
+ * cmp rd_hi, rs_hi
+ * cmp.z rd_lo, rs_lo
+ * b{eq,ne} @target
+ * | |
+ * | `--> "eq" param is false (JNE)
+ * `-----> "eq" param is true (JEQ)
+ */
+static int gen_j_eq_64(u8 *buf, u8 rd, u8 rs, bool eq,
+ u32 curr_off, u32 targ_off)
+{
+ s32 disp;
+ u8 len = 0;
+
+ len += arc_cmp_r(BUF(buf, len), REG_HI(rd), REG_HI(rs));
+ len += arc_cmpz_r(BUF(buf, len), REG_LO(rd), REG_LO(rs));
+ disp = get_displacement(curr_off + len, targ_off);
+ len += arc_bcc(BUF(buf, len), eq ? CC_equal : CC_unequal, disp);
+
+ return len;
+}
+
+/*
+ * tst rd_hi, rs_hi
+ * tst.z rd_lo, rs_lo
+ * bne @target
+ */
+static u8 gen_jset_64(u8 *buf, u8 rd, u8 rs, u32 curr_off, u32 targ_off)
+{
+ u8 len = 0;
+ s32 disp;
+
+ len += arc_tst_r(BUF(buf, len), REG_HI(rd), REG_HI(rs));
+ len += arc_tstz_r(BUF(buf, len), REG_LO(rd), REG_LO(rs));
+ disp = get_displacement(curr_off + len, targ_off);
+ len += arc_bcc(BUF(buf, len), CC_unequal, disp);
+
+ return len;
+}
+
+/*
+ * Verify if all the jumps for a JITed jcc64 operation are valid,
+ * by consulting the data stored at "arcv2_64_jccs".
+ */
+static bool check_jcc_64(u32 curr_off, u32 targ_off, u8 cond)
+{
+ size_t i;
+
+ if (cond >= ARC_CC_LAST)
+ return false;
+
+ for (i = 0; i < JCC64_NR_OF_JMPS; i++) {
+ u32 from, to;
+
+ from = curr_off + arcv2_64_jccs.jit_off[i];
+ /* for the 2nd jump, we jump to the end of block. */
+ if (i != JCC64_SKIP_JMP)
+ to = targ_off;
+ else
+ to = from + (JCC64_INSNS_TO_END * INSN_len_normal);
+ /* There is a "cc" in the instruction, so a "near" jump. */
+ if (!is_valid_near_disp(get_displacement(from, to)))
+ return false;
+ }
+
+ return true;
+}
+
+/* Can the jump from "curr_off" to "targ_off" actually happen? */
+bool check_jmp_64(u32 curr_off, u32 targ_off, u8 cond)
+{
+ s32 disp;
+
+ switch (cond) {
+ case ARC_CC_UGT:
+ case ARC_CC_UGE:
+ case ARC_CC_ULT:
+ case ARC_CC_ULE:
+ case ARC_CC_SGT:
+ case ARC_CC_SGE:
+ case ARC_CC_SLT:
+ case ARC_CC_SLE:
+ return check_jcc_64(curr_off, targ_off, cond);
+ case ARC_CC_EQ:
+ case ARC_CC_NE:
+ case ARC_CC_SET:
+ /*
+ * The "jump" for the JITed BPF_J{SET,EQ,NE} is actually the
+ * 3rd instruction. See comments of "gen_j{set,_eq}_64()".
+ */
+ curr_off += 2 * INSN_len_normal;
+ disp = get_displacement(curr_off, targ_off);
+ /* There is a "cc" field in the issued instruction. */
+ return is_valid_near_disp(disp);
+ case ARC_CC_AL:
+ disp = get_displacement(curr_off, targ_off);
+ return is_valid_far_disp(disp);
+ default:
+ return false;
+ }
+}
+
+/*
+ * The template for the 64-bit jumps with the following BPF conditions
+ *
+ * u< u<= u> u>= s< s<= s> s>=
+ *
+ * Looks like below:
+ *
+ * cmp rd_hi, rs_hi
+ * b<c1> @target
+ * b<c2> @end
+ * cmp rd_lo, rs_lo # if execution reaches here, r{d,s}_hi are equal
+ * b<c3> @target
+ * end:
+ *
+ * "c1" is the condition that JIT is handling minus the equality part.
+ * For instance if we have to translate an "unsigned greater or equal",
+ * then "c1" will be "unsigned greater". We won't know about equality
+ * until all 64-bits of data (higeher and lower registers) are processed.
+ *
+ * "c2" is the counter logic of "c1". For instance, if "c1" is originated
+ * from "s>", then "c2" would be "s<". Notice that equality doesn't play
+ * a role here either, because the lower 32 bits are not processed yet.
+ *
+ * "c3" is the unsigned version of "c1", no matter if the BPF condition
+ * was signed or unsigned. An unsigned version is necessary, because the
+ * MSB of the lower 32 bits does not reflect a sign in the whole 64-bit
+ * scheme. Otherwise, 64-bit comparisons like
+ * (0x0000_0000,0x8000_0000) s>= (0x0000_0000,0x0000_0000)
+ * would yield an incorrect result. Finally, if there is an equality
+ * check in the BPF condition, it will be reflected in "c3".
+ *
+ * You can find all the instances of this template where the
+ * "arcv2_64_jccs" is getting initialised.
+ */
+static u8 gen_jcc_64(u8 *buf, u8 rd, u8 rs, u8 cond,
+ u32 curr_off, u32 targ_off)
+{
+ s32 disp;
+ u32 end_off;
+ const u8 *cc = arcv2_64_jccs.jmp[cond].cond;
+ u8 len = 0;
+
+ /* cmp rd_hi, rs_hi */
+ len += arc_cmp_r(buf, REG_HI(rd), REG_HI(rs));
+
+ /* b<c1> @target */
+ disp = get_displacement(curr_off + len, targ_off);
+ len += arc_bcc(BUF(buf, len), cc[0], disp);
+
+ /* b<c2> @end */
+ end_off = curr_off + len + (JCC64_INSNS_TO_END * INSN_len_normal);
+ disp = get_displacement(curr_off + len, end_off);
+ len += arc_bcc(BUF(buf, len), cc[1], disp);
+
+ /* cmp rd_lo, rs_lo */
+ len += arc_cmp_r(BUF(buf, len), REG_LO(rd), REG_LO(rs));
+
+ /* b<c3> @target */
+ disp = get_displacement(curr_off + len, targ_off);
+ len += arc_bcc(BUF(buf, len), cc[2], disp);
+
+ return len;
+}
+
+/*
+ * This function only applies the necessary logic to make the proper
+ * translations. All the sanity checks must have already been done
+ * by calling the check_jmp_64().
+ */
+u8 gen_jmp_64(u8 *buf, u8 rd, u8 rs, u8 cond, u32 curr_off, u32 targ_off)
+{
+ u8 len = 0;
+ bool eq = false;
+ s32 disp;
+
+ switch (cond) {
+ case ARC_CC_AL:
+ disp = get_displacement(curr_off, targ_off);
+ len = arc_b(buf, disp);
+ break;
+ case ARC_CC_UGT:
+ case ARC_CC_UGE:
+ case ARC_CC_ULT:
+ case ARC_CC_ULE:
+ case ARC_CC_SGT:
+ case ARC_CC_SGE:
+ case ARC_CC_SLT:
+ case ARC_CC_SLE:
+ len = gen_jcc_64(buf, rd, rs, cond, curr_off, targ_off);
+ break;
+ case ARC_CC_EQ:
+ eq = true;
+ fallthrough;
+ case ARC_CC_NE:
+ len = gen_j_eq_64(buf, rd, rs, eq, curr_off, targ_off);
+ break;
+ case ARC_CC_SET:
+ len = gen_jset_64(buf, rd, rs, curr_off, targ_off);
+ break;
+ default:
+#ifdef ARC_BPF_JIT_DEBUG
+ pr_err("64-bit jump condition is not known.");
+ BUG();
+#endif
+ }
+ return len;
+}
+
+/*
+ * The condition codes to use when generating JIT instructions
+ * for 32-bit jumps.
+ *
+ * The "ARC_CC_AL" index is not really used by the code, but it
+ * is here for the sake of completeness.
+ *
+ * The "ARC_CC_SET" becomes "CC_unequal" because of the "tst"
+ * instruction that precedes the conditional branch.
+ */
+const u8 arcv2_32_jmps[ARC_CC_LAST] = {
+ [ARC_CC_UGT] = CC_great_u,
+ [ARC_CC_UGE] = CC_great_eq_u,
+ [ARC_CC_ULT] = CC_less_u,
+ [ARC_CC_ULE] = CC_less_eq_u,
+ [ARC_CC_SGT] = CC_great_s,
+ [ARC_CC_SGE] = CC_great_eq_s,
+ [ARC_CC_SLT] = CC_less_s,
+ [ARC_CC_SLE] = CC_less_eq_s,
+ [ARC_CC_AL] = CC_always,
+ [ARC_CC_EQ] = CC_equal,
+ [ARC_CC_NE] = CC_unequal,
+ [ARC_CC_SET] = CC_unequal
+};
+
+/* Can the jump from "curr_off" to "targ_off" actually happen? */
+bool check_jmp_32(u32 curr_off, u32 targ_off, u8 cond)
+{
+ u8 addendum;
+ s32 disp;
+
+ if (cond >= ARC_CC_LAST)
+ return false;
+
+ /*
+ * The unconditional jump happens immediately, while the rest
+ * are either preceded by a "cmp" or "tst" instruction.
+ */
+ addendum = (cond == ARC_CC_AL) ? 0 : INSN_len_normal;
+ disp = get_displacement(curr_off + addendum, targ_off);
+
+ if (ARC_CC_AL)
+ return is_valid_far_disp(disp);
+ else
+ return is_valid_near_disp(disp);
+}
+
+/*
+ * The JITed code for 32-bit (conditional) branches:
+ *
+ * ARC_CC_AL @target
+ * b @jit_targ_addr
+ *
+ * ARC_CC_SET rd, rs, @target
+ * tst rd, rs
+ * bnz @jit_targ_addr
+ *
+ * ARC_CC_xx rd, rs, @target
+ * cmp rd, rs
+ * b<cc> @jit_targ_addr # cc = arcv2_32_jmps[xx]
+ */
+u8 gen_jmp_32(u8 *buf, u8 rd, u8 rs, u8 cond, u32 curr_off, u32 targ_off)
+{
+ s32 disp;
+ u8 len = 0;
+
+ /*
+ * Although this must have already been checked by "check_jmp_32()",
+ * we're not going to risk accessing "arcv2_32_jmps" array without
+ * the boundary check.
+ */
+ if (cond >= ARC_CC_LAST) {
+#ifdef ARC_BPF_JIT_DEBUG
+ pr_err("32-bit jump condition is not known.");
+ BUG();
+#endif
+ return 0;
+ }
+
+ /* If there is a "condition", issue the "cmp" or "tst" first. */
+ if (cond != ARC_CC_AL) {
+ if (cond == ARC_CC_SET)
+ len = tst_r32(buf, rd, rs);
+ else
+ len = cmp_r32(buf, rd, rs);
+ /*
+ * The issued instruction affects the "disp"lacement as
+ * it alters the "curr_off" by its "len"gth. The "curr_off"
+ * should always point to the jump instruction.
+ */
+ disp = get_displacement(curr_off + len, targ_off);
+ len += arc_bcc(BUF(buf, len), arcv2_32_jmps[cond], disp);
+ } else {
+ /* The straight forward unconditional jump. */
+ disp = get_displacement(curr_off, targ_off);
+ len = arc_b(buf, disp);
+ }
+
+ return len;
+}
+
+/*
+ * Generate code for functions calls. There can be two types of calls:
+ *
+ * - Calling another BPF function
+ * - Calling an in-kernel function which is compiled by ARC gcc
+ *
+ * In the later case, we must comply to ARCv2 ABI and handle arguments
+ * and return values accordingly.
+ */
+u8 gen_func_call(u8 *buf, ARC_ADDR func_addr, bool external_func)
+{
+ u8 len = 0;
+
+ /*
+ * In case of an in-kernel function call, always push the 5th
+ * argument onto the stack, because that's where the ABI dictates
+ * it should be found. If the callee doesn't really use it, no harm
+ * is done. The stack is readjusted either way after the call.
+ */
+ if (external_func)
+ len += push_r64(BUF(buf, len), BPF_REG_5);
+
+ len += jump_and_link(BUF(buf, len), func_addr);
+
+ if (external_func)
+ len += arc_add_i(BUF(buf, len), ARC_R_SP, ARC_R_SP, ARG5_SIZE);
+
+ return len;
+}
diff --git a/arch/arc/net/bpf_jit_core.c b/arch/arc/net/bpf_jit_core.c
new file mode 100644
index 000000000000..6f6b4ffccf2c
--- /dev/null
+++ b/arch/arc/net/bpf_jit_core.c
@@ -0,0 +1,1425 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The back-end-agnostic part of Just-In-Time compiler for eBPF bytecode.
+ *
+ * Copyright (c) 2024 Synopsys Inc.
+ * Author: Shahab Vahedi <shahab@synopsys.com>
+ */
+#include <linux/bug.h>
+#include "bpf_jit.h"
+
+/*
+ * Check for the return value. A pattern used often in this file.
+ * There must be a "ret" variable of type "int" in the scope.
+ */
+#define CHECK_RET(cmd) \
+ do { \
+ ret = (cmd); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+#ifdef ARC_BPF_JIT_DEBUG
+/* Dumps bytes in /var/log/messages at KERN_INFO level (4). */
+static void dump_bytes(const u8 *buf, u32 len, const char *header)
+{
+ u8 line[64];
+ size_t i, j;
+
+ pr_info("-----------------[ %s ]-----------------\n", header);
+
+ for (i = 0, j = 0; i < len; i++) {
+ /* Last input byte? */
+ if (i == len - 1) {
+ j += scnprintf(line + j, 64 - j, "0x%02x", buf[i]);
+ pr_info("%s\n", line);
+ break;
+ }
+ /* End of line? */
+ else if (i % 8 == 7) {
+ j += scnprintf(line + j, 64 - j, "0x%02x", buf[i]);
+ pr_info("%s\n", line);
+ j = 0;
+ } else {
+ j += scnprintf(line + j, 64 - j, "0x%02x, ", buf[i]);
+ }
+ }
+}
+#endif /* ARC_BPF_JIT_DEBUG */
+
+/********************* JIT context ***********************/
+
+/*
+ * buf: Translated instructions end up here.
+ * len: The length of whole block in bytes.
+ * index: The offset at which the _next_ instruction may be put.
+ */
+struct jit_buffer {
+ u8 *buf;
+ u32 len;
+ u32 index;
+};
+
+/*
+ * This is a subset of "struct jit_context" that its information is deemed
+ * necessary for the next extra pass to come.
+ *
+ * bpf_header: Needed to finally lock the region.
+ * bpf2insn: Used to find the translation for instructions of interest.
+ *
+ * Things like "jit.buf" and "jit.len" can be retrieved respectively from
+ * "prog->bpf_func" and "prog->jited_len".
+ */
+struct arc_jit_data {
+ struct bpf_binary_header *bpf_header;
+ u32 *bpf2insn;
+};
+
+/*
+ * The JIT pertinent context that is used by different functions.
+ *
+ * prog: The current eBPF program being handled.
+ * orig_prog: The original eBPF program before any possible change.
+ * jit: The JIT buffer and its length.
+ * bpf_header: The JITed program header. "jit.buf" points inside it.
+ * emit: If set, opcodes are written to memory; else, a dry-run.
+ * do_zext: If true, 32-bit sub-regs must be zero extended.
+ * bpf2insn: Maps BPF insn indices to their counterparts in jit.buf.
+ * bpf2insn_valid: Indicates if "bpf2ins" is populated with the mappings.
+ * jit_data: A piece of memory to transfer data to the next pass.
+ * arc_regs_clobbered: Each bit status determines if that arc reg is clobbered.
+ * save_blink: Whether ARC's "blink" register needs to be saved.
+ * frame_size: Derived from "prog->aux->stack_depth".
+ * epilogue_offset: Used by early "return"s in the code to jump here.
+ * need_extra_pass: A forecast if an "extra_pass" will occur.
+ * is_extra_pass: Indicates if the current pass is an extra pass.
+ * user_bpf_prog: True, if VM opcodes come from a real program.
+ * blinded: True if "constant blinding" step returned a new "prog".
+ * success: Indicates if the whole JIT went OK.
+ */
+struct jit_context {
+ struct bpf_prog *prog;
+ struct bpf_prog *orig_prog;
+ struct jit_buffer jit;
+ struct bpf_binary_header *bpf_header;
+ bool emit;
+ bool do_zext;
+ u32 *bpf2insn;
+ bool bpf2insn_valid;
+ struct arc_jit_data *jit_data;
+ u32 arc_regs_clobbered;
+ bool save_blink;
+ u16 frame_size;
+ u32 epilogue_offset;
+ bool need_extra_pass;
+ bool is_extra_pass;
+ bool user_bpf_prog;
+ bool blinded;
+ bool success;
+};
+
+/*
+ * If we're in ARC_BPF_JIT_DEBUG mode and the debug level is right, dump the
+ * input BPF stream. "bpf_jit_dump()" is not fully suited for this purpose.
+ */
+static void vm_dump(const struct bpf_prog *prog)
+{
+#ifdef ARC_BPF_JIT_DEBUG
+ if (bpf_jit_enable > 1)
+ dump_bytes((u8 *)prog->insns, 8 * prog->len, " VM ");
+#endif
+}
+
+/*
+ * If the right level of debug is set, dump the bytes. There are 2 variants
+ * of this function:
+ *
+ * 1. Use the standard bpf_jit_dump() which is meant only for JITed code.
+ * 2. Use the dump_bytes() to match its "vm_dump()" instance.
+ */
+static void jit_dump(const struct jit_context *ctx)
+{
+#ifdef ARC_BPF_JIT_DEBUG
+ u8 header[8];
+#endif
+ const int pass = ctx->is_extra_pass ? 2 : 1;
+
+ if (bpf_jit_enable <= 1 || !ctx->prog->jited)
+ return;
+
+#ifdef ARC_BPF_JIT_DEBUG
+ scnprintf(header, sizeof(header), "JIT:%d", pass);
+ dump_bytes(ctx->jit.buf, ctx->jit.len, header);
+ pr_info("\n");
+#else
+ bpf_jit_dump(ctx->prog->len, ctx->jit.len, pass, ctx->jit.buf);
+#endif
+}
+
+/* Initialise the context so there's no garbage. */
+static int jit_ctx_init(struct jit_context *ctx, struct bpf_prog *prog)
+{
+ memset(ctx, 0, sizeof(ctx));
+
+ ctx->orig_prog = prog;
+
+ /* If constant blinding was requested but failed, scram. */
+ ctx->prog = bpf_jit_blind_constants(prog);
+ if (IS_ERR(ctx->prog))
+ return PTR_ERR(ctx->prog);
+ ctx->blinded = (ctx->prog == ctx->orig_prog ? false : true);
+
+ /* If the verifier doesn't zero-extend, then we have to do it. */
+ ctx->do_zext = !ctx->prog->aux->verifier_zext;
+
+ ctx->is_extra_pass = ctx->prog->jited;
+ ctx->user_bpf_prog = ctx->prog->is_func;
+
+ return 0;
+}
+
+/*
+ * Only after the first iteration of normal pass (the dry-run),
+ * there are valid offsets in ctx->bpf2insn array.
+ */
+static inline bool offsets_available(const struct jit_context *ctx)
+{
+ return ctx->bpf2insn_valid;
+}
+
+/*
+ * "*mem" should be freed when there is no "extra pass" to come,
+ * or the compilation terminated abruptly. A few of such memory
+ * allocations are: ctx->jit_data and ctx->bpf2insn.
+ */
+static inline void maybe_free(struct jit_context *ctx, void **mem)
+{
+ if (*mem) {
+ if (!ctx->success || !ctx->need_extra_pass) {
+ kfree(*mem);
+ *mem = NULL;
+ }
+ }
+}
+
+/*
+ * Free memories based on the status of the context.
+ *
+ * A note about "bpf_header": On successful runs, "bpf_header" is
+ * not freed, because "jit.buf", a sub-array of it, is returned as
+ * the "bpf_func". However, "bpf_header" is lost and nothing points
+ * to it. This should not cause a leakage, because apparently
+ * "bpf_header" can be revived by "bpf_jit_binary_hdr()". This is
+ * how "bpf_jit_free()" in "kernel/bpf/core.c" releases the memory.
+ */
+static void jit_ctx_cleanup(struct jit_context *ctx)
+{
+ if (ctx->blinded) {
+ /* if all went well, release the orig_prog. */
+ if (ctx->success)
+ bpf_jit_prog_release_other(ctx->prog, ctx->orig_prog);
+ else
+ bpf_jit_prog_release_other(ctx->orig_prog, ctx->prog);
+ }
+
+ maybe_free(ctx, (void **)&ctx->bpf2insn);
+ maybe_free(ctx, (void **)&ctx->jit_data);
+
+ if (!ctx->bpf2insn)
+ ctx->bpf2insn_valid = false;
+
+ /* Freeing "bpf_header" is enough. "jit.buf" is a sub-array of it. */
+ if (!ctx->success && ctx->bpf_header) {
+ bpf_jit_binary_free(ctx->bpf_header);
+ ctx->bpf_header = NULL;
+ ctx->jit.buf = NULL;
+ ctx->jit.index = 0;
+ ctx->jit.len = 0;
+ }
+
+ ctx->emit = false;
+ ctx->do_zext = false;
+}
+
+/*
+ * Analyse the register usage and record the frame size.
+ * The register usage is determined by consulting the back-end.
+ */
+static void analyze_reg_usage(struct jit_context *ctx)
+{
+ size_t i;
+ u32 usage = 0;
+ const struct bpf_insn *insn = ctx->prog->insnsi;
+
+ for (i = 0; i < ctx->prog->len; i++) {
+ u8 bpf_reg;
+ bool call;
+
+ bpf_reg = insn[i].dst_reg;
+ call = (insn[i].code == (BPF_JMP | BPF_CALL)) ? true : false;
+ usage |= mask_for_used_regs(bpf_reg, call);
+ }
+
+ ctx->arc_regs_clobbered = usage;
+ ctx->frame_size = ctx->prog->aux->stack_depth;
+}
+
+/* Verify that no instruction will be emitted when there is no buffer. */
+static inline int jit_buffer_check(const struct jit_context *ctx)
+{
+ if (ctx->emit) {
+ if (!ctx->jit.buf) {
+ pr_err("bpf-jit: inconsistence state; no "
+ "buffer to emit instructions.\n");
+ return -EINVAL;
+ } else if (ctx->jit.index > ctx->jit.len) {
+ pr_err("bpf-jit: estimated JIT length is less "
+ "than the emitted instructions.\n");
+ return -EFAULT;
+ }
+ }
+ return 0;
+}
+
+/* On a dry-run (emit=false), "jit.len" is growing gradually. */
+static inline void jit_buffer_update(struct jit_context *ctx, u32 n)
+{
+ if (!ctx->emit)
+ ctx->jit.len += n;
+ else
+ ctx->jit.index += n;
+}
+
+/* Based on "emit", determine the address where instructions are emitted. */
+static inline u8 *effective_jit_buf(const struct jit_context *ctx)
+{
+ return ctx->emit ? (ctx->jit.buf + ctx->jit.index) : NULL;
+}
+
+/* Prologue based on context variables set by "analyze_reg_usage()". */
+static int handle_prologue(struct jit_context *ctx)
+{
+ int ret;
+ u8 *buf = effective_jit_buf(ctx);
+ u32 len = 0;
+
+ CHECK_RET(jit_buffer_check(ctx));
+
+ len = arc_prologue(buf, ctx->arc_regs_clobbered, ctx->frame_size);
+ jit_buffer_update(ctx, len);
+
+ return 0;
+}
+
+/* The counter part for "handle_prologue()". */
+static int handle_epilogue(struct jit_context *ctx)
+{
+ int ret;
+ u8 *buf = effective_jit_buf(ctx);
+ u32 len = 0;
+
+ CHECK_RET(jit_buffer_check(ctx));
+
+ len = arc_epilogue(buf, ctx->arc_regs_clobbered, ctx->frame_size);
+ jit_buffer_update(ctx, len);
+
+ return 0;
+}
+
+/* Tell which number of the BPF instruction we are dealing with. */
+static inline s32 get_index_for_insn(const struct jit_context *ctx,
+ const struct bpf_insn *insn)
+{
+ return (insn - ctx->prog->insnsi);
+}
+
+/*
+ * In most of the cases, the "offset" is read from "insn->off". However,
+ * if it is an unconditional BPF_JMP32, then it comes from "insn->imm".
+ *
+ * (Courtesy of "cpu=v4" support)
+ */
+static inline s32 get_offset(const struct bpf_insn *insn)
+{
+ if ((BPF_CLASS(insn->code) == BPF_JMP32) &&
+ (BPF_OP(insn->code) == BPF_JA))
+ return insn->imm;
+ else
+ return insn->off;
+}
+
+/*
+ * Determine to which number of the BPF instruction we're jumping to.
+ *
+ * The "offset" is interpreted as the "number" of BPF instructions
+ * from the _next_ BPF instruction. e.g.:
+ *
+ * 4 means 4 instructions after the next insn
+ * 0 means 0 instructions after the next insn -> fallthrough.
+ * -1 means 1 instruction before the next insn -> jmp to current insn.
+ *
+ * Another way to look at this, "offset" is the number of instructions
+ * that exist between the current instruction and the target instruction.
+ *
+ * It is worth noting that a "mov r,i64", which is 16-byte long, is
+ * treated as two instructions long, therefore "offset" needn't be
+ * treated specially for those. Everything is uniform.
+ */
+static inline s32 get_target_index_for_insn(const struct jit_context *ctx,
+ const struct bpf_insn *insn)
+{
+ return (get_index_for_insn(ctx, insn) + 1) + get_offset(insn);
+}
+
+/* Is there an immediate operand encoded in the "insn"? */
+static inline bool has_imm(const struct bpf_insn *insn)
+{
+ return BPF_SRC(insn->code) == BPF_K;
+}
+
+/* Is the last BPF instruction? */
+static inline bool is_last_insn(const struct bpf_prog *prog, u32 idx)
+{
+ return idx == (prog->len - 1);
+}
+
+/*
+ * Invocation of this function, conditionally signals the need for
+ * an extra pass. The conditions that must be met are:
+ *
+ * 1. The current pass itself shouldn't be an extra pass.
+ * 2. The stream of bytes being JITed must come from a user program.
+ */
+static inline void set_need_for_extra_pass(struct jit_context *ctx)
+{
+ if (!ctx->is_extra_pass)
+ ctx->need_extra_pass = ctx->user_bpf_prog;
+}
+
+/*
+ * Check if the "size" is valid and then transfer the control to
+ * the back-end for the swap.
+ */
+static int handle_swap(u8 *buf, u8 rd, u8 size, u8 endian,
+ bool force, bool do_zext, u8 *len)
+{
+ /* Sanity check on the size. */
+ switch (size) {
+ case 16:
+ case 32:
+ case 64:
+ break;
+ default:
+ pr_err("bpf-jit: invalid size for swap.\n");
+ return -EINVAL;
+ }
+
+ *len = gen_swap(buf, rd, size, endian, force, do_zext);
+
+ return 0;
+}
+
+/* Checks if the (instruction) index is in valid range. */
+static inline bool check_insn_idx_valid(const struct jit_context *ctx,
+ const s32 idx)
+{
+ return (idx >= 0 && idx < ctx->prog->len);
+}
+
+/*
+ * Decouple the back-end from BPF by converting BPF conditions
+ * to internal enum. ARC_CC_* start from 0 and are used as index
+ * to an array. BPF_J* usage must end after this conversion.
+ */
+static int bpf_cond_to_arc(const u8 op, u8 *arc_cc)
+{
+ switch (op) {
+ case BPF_JA:
+ *arc_cc = ARC_CC_AL;
+ break;
+ case BPF_JEQ:
+ *arc_cc = ARC_CC_EQ;
+ break;
+ case BPF_JGT:
+ *arc_cc = ARC_CC_UGT;
+ break;
+ case BPF_JGE:
+ *arc_cc = ARC_CC_UGE;
+ break;
+ case BPF_JSET:
+ *arc_cc = ARC_CC_SET;
+ break;
+ case BPF_JNE:
+ *arc_cc = ARC_CC_NE;
+ break;
+ case BPF_JSGT:
+ *arc_cc = ARC_CC_SGT;
+ break;
+ case BPF_JSGE:
+ *arc_cc = ARC_CC_SGE;
+ break;
+ case BPF_JLT:
+ *arc_cc = ARC_CC_ULT;
+ break;
+ case BPF_JLE:
+ *arc_cc = ARC_CC_ULE;
+ break;
+ case BPF_JSLT:
+ *arc_cc = ARC_CC_SLT;
+ break;
+ case BPF_JSLE:
+ *arc_cc = ARC_CC_SLE;
+ break;
+ default:
+ pr_err("bpf-jit: can't handle condition 0x%02X\n", op);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Check a few things for a supposedly "jump" instruction:
+ *
+ * 0. "insn" is a "jump" instruction, but not the "call/exit" variant.
+ * 1. The current "insn" index is in valid range.
+ * 2. The index of target instruction is in valid range.
+ */
+static int check_bpf_jump(const struct jit_context *ctx,
+ const struct bpf_insn *insn)
+{
+ const u8 class = BPF_CLASS(insn->code);
+ const u8 op = BPF_OP(insn->code);
+
+ /* Must be a jmp(32) instruction that is not a "call/exit". */
+ if ((class != BPF_JMP && class != BPF_JMP32) ||
+ (op == BPF_CALL || op == BPF_EXIT)) {
+ pr_err("bpf-jit: not a jump instruction.\n");
+ return -EINVAL;
+ }
+
+ if (!check_insn_idx_valid(ctx, get_index_for_insn(ctx, insn))) {
+ pr_err("bpf-jit: the bpf jump insn is not in prog.\n");
+ return -EINVAL;
+ }
+
+ if (!check_insn_idx_valid(ctx, get_target_index_for_insn(ctx, insn))) {
+ pr_err("bpf-jit: bpf jump label is out of range.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Based on input "insn", consult "ctx->bpf2insn" to get the
+ * related index (offset) of the translation in JIT stream.
+ */
+static u32 get_curr_jit_off(const struct jit_context *ctx,
+ const struct bpf_insn *insn)
+{
+ const s32 idx = get_index_for_insn(ctx, insn);
+#ifdef ARC_BPF_JIT_DEBUG
+ BUG_ON(!offsets_available(ctx) || !check_insn_idx_valid(ctx, idx));
+#endif
+ return ctx->bpf2insn[idx];
+}
+
+/*
+ * The input "insn" must be a jump instruction.
+ *
+ * Based on input "insn", consult "ctx->bpf2insn" to get the
+ * related JIT index (offset) of "target instruction" that
+ * "insn" would jump to.
+ */
+static u32 get_targ_jit_off(const struct jit_context *ctx,
+ const struct bpf_insn *insn)
+{
+ const s32 tidx = get_target_index_for_insn(ctx, insn);
+#ifdef ARC_BPF_JIT_DEBUG
+ BUG_ON(!offsets_available(ctx) || !check_insn_idx_valid(ctx, tidx));
+#endif
+ return ctx->bpf2insn[tidx];
+}
+
+/*
+ * This function will return 0 for a feasible jump.
+ *
+ * Consult the back-end to check if it finds it feasible to emit
+ * the necessary instructions based on "cond" and the displacement
+ * between the "from_off" and the "to_off".
+ */
+static int feasible_jit_jump(u32 from_off, u32 to_off, u8 cond, bool j32)
+{
+ int ret = 0;
+
+ if (j32) {
+ if (!check_jmp_32(from_off, to_off, cond))
+ ret = -EFAULT;
+ } else {
+ if (!check_jmp_64(from_off, to_off, cond))
+ ret = -EFAULT;
+ }
+
+ if (ret != 0)
+ pr_err("bpf-jit: the JIT displacement is not OK.\n");
+
+ return ret;
+}
+
+/*
+ * This jump handler performs the following steps:
+ *
+ * 1. Compute ARC's internal condition code from BPF's
+ * 2. Determine the bitness of the operation (32 vs. 64)
+ * 3. Sanity check on BPF stream
+ * 4. Sanity check on what is supposed to be JIT's displacement
+ * 5. And finally, emit the necessary instructions
+ *
+ * The last two steps are performed through the back-end.
+ * The value of steps 1 and 2 are necessary inputs for the back-end.
+ */
+static int handle_jumps(const struct jit_context *ctx,
+ const struct bpf_insn *insn,
+ u8 *len)
+{
+ u8 cond;
+ int ret = 0;
+ u8 *buf = effective_jit_buf(ctx);
+ const bool j32 = (BPF_CLASS(insn->code) == BPF_JMP32) ? true : false;
+ const u8 rd = insn->dst_reg;
+ u8 rs = insn->src_reg;
+ u32 curr_off = 0, targ_off = 0;
+
+ *len = 0;
+
+ /* Map the BPF condition to internal enum. */
+ CHECK_RET(bpf_cond_to_arc(BPF_OP(insn->code), &cond));
+
+ /* Sanity check on the BPF byte stream. */
+ CHECK_RET(check_bpf_jump(ctx, insn));
+
+ /*
+ * Move the immediate into a temporary register _now_ for 2 reasons:
+ *
+ * 1. "gen_jmp_{32,64}()" deal with operands in registers.
+ *
+ * 2. The "len" parameter will grow so that the current jit offset
+ * (curr_off) will have increased to a point where the necessary
+ * instructions can be inserted by "gen_jmp_{32,64}()".
+ */
+ if (has_imm(insn) && cond != ARC_CC_AL) {
+ if (j32) {
+ *len += mov_r32_i32(BUF(buf, *len), JIT_REG_TMP,
+ insn->imm);
+ } else {
+ *len += mov_r64_i32(BUF(buf, *len), JIT_REG_TMP,
+ insn->imm);
+ }
+ rs = JIT_REG_TMP;
+ }
+
+ /* If the offsets are known, check if the branch can occur. */
+ if (offsets_available(ctx)) {
+ curr_off = get_curr_jit_off(ctx, insn) + *len;
+ targ_off = get_targ_jit_off(ctx, insn);
+
+ /* Sanity check on the back-end side. */
+ CHECK_RET(feasible_jit_jump(curr_off, targ_off, cond, j32));
+ }
+
+ if (j32) {
+ *len += gen_jmp_32(BUF(buf, *len), rd, rs, cond,
+ curr_off, targ_off);
+ } else {
+ *len += gen_jmp_64(BUF(buf, *len), rd, rs, cond,
+ curr_off, targ_off);
+ }
+
+ return ret;
+}
+
+/* Jump to translated epilogue address. */
+static int handle_jmp_epilogue(struct jit_context *ctx,
+ const struct bpf_insn *insn, u8 *len)
+{
+ u8 *buf = effective_jit_buf(ctx);
+ u32 curr_off = 0, epi_off = 0;
+
+ /* Check the offset only if the data is available. */
+ if (offsets_available(ctx)) {
+ curr_off = get_curr_jit_off(ctx, insn);
+ epi_off = ctx->epilogue_offset;
+
+ if (!check_jmp_64(curr_off, epi_off, ARC_CC_AL)) {
+ pr_err("bpf-jit: epilogue offset is not valid.\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Jump to "epilogue offset" (rd and rs don't matter). */
+ *len = gen_jmp_64(buf, 0, 0, ARC_CC_AL, curr_off, epi_off);
+
+ return 0;
+}
+
+/* Try to get the resolved address and generate the instructions. */
+static int handle_call(struct jit_context *ctx,
+ const struct bpf_insn *insn,
+ u8 *len)
+{
+ int ret;
+ bool in_kernel_func, fixed = false;
+ u64 addr = 0;
+ u8 *buf = effective_jit_buf(ctx);
+
+ ret = bpf_jit_get_func_addr(ctx->prog, insn, ctx->is_extra_pass,
+ &addr, &fixed);
+ if (ret < 0) {
+ pr_err("bpf-jit: can't get the address for call.\n");
+ return ret;
+ }
+ in_kernel_func = (fixed ? true : false);
+
+ /* No valuable address retrieved (yet). */
+ if (!fixed && !addr)
+ set_need_for_extra_pass(ctx);
+
+ *len = gen_func_call(buf, (ARC_ADDR)addr, in_kernel_func);
+
+ if (insn->src_reg != BPF_PSEUDO_CALL) {
+ /* Assigning ABI's return reg to JIT's return reg. */
+ *len += arc_to_bpf_return(BUF(buf, *len));
+ }
+
+ return 0;
+}
+
+/*
+ * Try to generate instructions for loading a 64-bit immediate.
+ * These sort of instructions are usually associated with the 64-bit
+ * relocations: R_BPF_64_64. Therefore, signal the need for an extra
+ * pass if the circumstances are right.
+ */
+static int handle_ld_imm64(struct jit_context *ctx,
+ const struct bpf_insn *insn,
+ u8 *len)
+{
+ const s32 idx = get_index_for_insn(ctx, insn);
+ u8 *buf = effective_jit_buf(ctx);
+
+ /* We're about to consume 2 VM instructions. */
+ if (is_last_insn(ctx->prog, idx)) {
+ pr_err("bpf-jit: need more data for 64-bit immediate.\n");
+ return -EINVAL;
+ }
+
+ *len = mov_r64_i64(buf, insn->dst_reg, insn->imm, (insn + 1)->imm);
+
+ if (bpf_pseudo_func(insn))
+ set_need_for_extra_pass(ctx);
+
+ return 0;
+}
+
+/*
+ * Handles one eBPF instruction at a time. To make this function faster,
+ * it does not call "jit_buffer_check()". Else, it would call it for every
+ * instruction. As a result, it should not be invoked directly. Only
+ * "handle_body()", that has already executed the "check", may call this
+ * function.
+ *
+ * If the "ret" value is negative, something has went wrong. Else,
+ * it mostly holds the value 0 and rarely 1. Number 1 signals
+ * the loop in "handle_body()" to skip the next instruction, because
+ * it has been consumed as part of a 64-bit immediate value.
+ */
+static int handle_insn(struct jit_context *ctx, u32 idx)
+{
+ const struct bpf_insn *insn = &ctx->prog->insnsi[idx];
+ const u8 code = insn->code;
+ const u8 dst = insn->dst_reg;
+ const u8 src = insn->src_reg;
+ const s16 off = insn->off;
+ const s32 imm = insn->imm;
+ u8 *buf = effective_jit_buf(ctx);
+ u8 len = 0;
+ int ret = 0;
+
+ switch (code) {
+ /* dst += src (32-bit) */
+ case BPF_ALU | BPF_ADD | BPF_X:
+ len = add_r32(buf, dst, src);
+ break;
+ /* dst += imm (32-bit) */
+ case BPF_ALU | BPF_ADD | BPF_K:
+ len = add_r32_i32(buf, dst, imm);
+ break;
+ /* dst -= src (32-bit) */
+ case BPF_ALU | BPF_SUB | BPF_X:
+ len = sub_r32(buf, dst, src);
+ break;
+ /* dst -= imm (32-bit) */
+ case BPF_ALU | BPF_SUB | BPF_K:
+ len = sub_r32_i32(buf, dst, imm);
+ break;
+ /* dst = -dst (32-bit) */
+ case BPF_ALU | BPF_NEG:
+ len = neg_r32(buf, dst);
+ break;
+ /* dst *= src (32-bit) */
+ case BPF_ALU | BPF_MUL | BPF_X:
+ len = mul_r32(buf, dst, src);
+ break;
+ /* dst *= imm (32-bit) */
+ case BPF_ALU | BPF_MUL | BPF_K:
+ len = mul_r32_i32(buf, dst, imm);
+ break;
+ /* dst /= src (32-bit) */
+ case BPF_ALU | BPF_DIV | BPF_X:
+ len = div_r32(buf, dst, src, off == 1);
+ break;
+ /* dst /= imm (32-bit) */
+ case BPF_ALU | BPF_DIV | BPF_K:
+ len = div_r32_i32(buf, dst, imm, off == 1);
+ break;
+ /* dst %= src (32-bit) */
+ case BPF_ALU | BPF_MOD | BPF_X:
+ len = mod_r32(buf, dst, src, off == 1);
+ break;
+ /* dst %= imm (32-bit) */
+ case BPF_ALU | BPF_MOD | BPF_K:
+ len = mod_r32_i32(buf, dst, imm, off == 1);
+ break;
+ /* dst &= src (32-bit) */
+ case BPF_ALU | BPF_AND | BPF_X:
+ len = and_r32(buf, dst, src);
+ break;
+ /* dst &= imm (32-bit) */
+ case BPF_ALU | BPF_AND | BPF_K:
+ len = and_r32_i32(buf, dst, imm);
+ break;
+ /* dst |= src (32-bit) */
+ case BPF_ALU | BPF_OR | BPF_X:
+ len = or_r32(buf, dst, src);
+ break;
+ /* dst |= imm (32-bit) */
+ case BPF_ALU | BPF_OR | BPF_K:
+ len = or_r32_i32(buf, dst, imm);
+ break;
+ /* dst ^= src (32-bit) */
+ case BPF_ALU | BPF_XOR | BPF_X:
+ len = xor_r32(buf, dst, src);
+ break;
+ /* dst ^= imm (32-bit) */
+ case BPF_ALU | BPF_XOR | BPF_K:
+ len = xor_r32_i32(buf, dst, imm);
+ break;
+ /* dst <<= src (32-bit) */
+ case BPF_ALU | BPF_LSH | BPF_X:
+ len = lsh_r32(buf, dst, src);
+ break;
+ /* dst <<= imm (32-bit) */
+ case BPF_ALU | BPF_LSH | BPF_K:
+ len = lsh_r32_i32(buf, dst, imm);
+ break;
+ /* dst >>= src (32-bit) [unsigned] */
+ case BPF_ALU | BPF_RSH | BPF_X:
+ len = rsh_r32(buf, dst, src);
+ break;
+ /* dst >>= imm (32-bit) [unsigned] */
+ case BPF_ALU | BPF_RSH | BPF_K:
+ len = rsh_r32_i32(buf, dst, imm);
+ break;
+ /* dst >>= src (32-bit) [signed] */
+ case BPF_ALU | BPF_ARSH | BPF_X:
+ len = arsh_r32(buf, dst, src);
+ break;
+ /* dst >>= imm (32-bit) [signed] */
+ case BPF_ALU | BPF_ARSH | BPF_K:
+ len = arsh_r32_i32(buf, dst, imm);
+ break;
+ /* dst = src (32-bit) */
+ case BPF_ALU | BPF_MOV | BPF_X:
+ len = mov_r32(buf, dst, src, (u8)off);
+ break;
+ /* dst = imm32 (32-bit) */
+ case BPF_ALU | BPF_MOV | BPF_K:
+ len = mov_r32_i32(buf, dst, imm);
+ break;
+ /* dst = swap(dst) */
+ case BPF_ALU | BPF_END | BPF_FROM_LE:
+ case BPF_ALU | BPF_END | BPF_FROM_BE:
+ case BPF_ALU64 | BPF_END | BPF_FROM_LE: {
+ CHECK_RET(handle_swap(buf, dst, imm, BPF_SRC(code),
+ BPF_CLASS(code) == BPF_ALU64,
+ ctx->do_zext, &len));
+ break;
+ }
+ /* dst += src (64-bit) */
+ case BPF_ALU64 | BPF_ADD | BPF_X:
+ len = add_r64(buf, dst, src);
+ break;
+ /* dst += imm32 (64-bit) */
+ case BPF_ALU64 | BPF_ADD | BPF_K:
+ len = add_r64_i32(buf, dst, imm);
+ break;
+ /* dst -= src (64-bit) */
+ case BPF_ALU64 | BPF_SUB | BPF_X:
+ len = sub_r64(buf, dst, src);
+ break;
+ /* dst -= imm32 (64-bit) */
+ case BPF_ALU64 | BPF_SUB | BPF_K:
+ len = sub_r64_i32(buf, dst, imm);
+ break;
+ /* dst = -dst (64-bit) */
+ case BPF_ALU64 | BPF_NEG:
+ len = neg_r64(buf, dst);
+ break;
+ /* dst *= src (64-bit) */
+ case BPF_ALU64 | BPF_MUL | BPF_X:
+ len = mul_r64(buf, dst, src);
+ break;
+ /* dst *= imm32 (64-bit) */
+ case BPF_ALU64 | BPF_MUL | BPF_K:
+ len = mul_r64_i32(buf, dst, imm);
+ break;
+ /* dst &= src (64-bit) */
+ case BPF_ALU64 | BPF_AND | BPF_X:
+ len = and_r64(buf, dst, src);
+ break;
+ /* dst &= imm32 (64-bit) */
+ case BPF_ALU64 | BPF_AND | BPF_K:
+ len = and_r64_i32(buf, dst, imm);
+ break;
+ /* dst |= src (64-bit) */
+ case BPF_ALU64 | BPF_OR | BPF_X:
+ len = or_r64(buf, dst, src);
+ break;
+ /* dst |= imm32 (64-bit) */
+ case BPF_ALU64 | BPF_OR | BPF_K:
+ len = or_r64_i32(buf, dst, imm);
+ break;
+ /* dst ^= src (64-bit) */
+ case BPF_ALU64 | BPF_XOR | BPF_X:
+ len = xor_r64(buf, dst, src);
+ break;
+ /* dst ^= imm32 (64-bit) */
+ case BPF_ALU64 | BPF_XOR | BPF_K:
+ len = xor_r64_i32(buf, dst, imm);
+ break;
+ /* dst <<= src (64-bit) */
+ case BPF_ALU64 | BPF_LSH | BPF_X:
+ len = lsh_r64(buf, dst, src);
+ break;
+ /* dst <<= imm32 (64-bit) */
+ case BPF_ALU64 | BPF_LSH | BPF_K:
+ len = lsh_r64_i32(buf, dst, imm);
+ break;
+ /* dst >>= src (64-bit) [unsigned] */
+ case BPF_ALU64 | BPF_RSH | BPF_X:
+ len = rsh_r64(buf, dst, src);
+ break;
+ /* dst >>= imm32 (64-bit) [unsigned] */
+ case BPF_ALU64 | BPF_RSH | BPF_K:
+ len = rsh_r64_i32(buf, dst, imm);
+ break;
+ /* dst >>= src (64-bit) [signed] */
+ case BPF_ALU64 | BPF_ARSH | BPF_X:
+ len = arsh_r64(buf, dst, src);
+ break;
+ /* dst >>= imm32 (64-bit) [signed] */
+ case BPF_ALU64 | BPF_ARSH | BPF_K:
+ len = arsh_r64_i32(buf, dst, imm);
+ break;
+ /* dst = src (64-bit) */
+ case BPF_ALU64 | BPF_MOV | BPF_X:
+ len = mov_r64(buf, dst, src, (u8)off);
+ break;
+ /* dst = imm32 (sign extend to 64-bit) */
+ case BPF_ALU64 | BPF_MOV | BPF_K:
+ len = mov_r64_i32(buf, dst, imm);
+ break;
+ /* dst = imm64 */
+ case BPF_LD | BPF_DW | BPF_IMM:
+ CHECK_RET(handle_ld_imm64(ctx, insn, &len));
+ /* Tell the loop to skip the next instruction. */
+ ret = 1;
+ break;
+ /* dst = *(size *)(src + off) */
+ case BPF_LDX | BPF_MEM | BPF_W:
+ case BPF_LDX | BPF_MEM | BPF_H:
+ case BPF_LDX | BPF_MEM | BPF_B:
+ case BPF_LDX | BPF_MEM | BPF_DW:
+ len = load_r(buf, dst, src, off, BPF_SIZE(code), false);
+ break;
+ case BPF_LDX | BPF_MEMSX | BPF_W:
+ case BPF_LDX | BPF_MEMSX | BPF_H:
+ case BPF_LDX | BPF_MEMSX | BPF_B:
+ len = load_r(buf, dst, src, off, BPF_SIZE(code), true);
+ break;
+ /* *(size *)(dst + off) = src */
+ case BPF_STX | BPF_MEM | BPF_W:
+ case BPF_STX | BPF_MEM | BPF_H:
+ case BPF_STX | BPF_MEM | BPF_B:
+ case BPF_STX | BPF_MEM | BPF_DW:
+ len = store_r(buf, src, dst, off, BPF_SIZE(code));
+ break;
+ case BPF_ST | BPF_MEM | BPF_W:
+ case BPF_ST | BPF_MEM | BPF_H:
+ case BPF_ST | BPF_MEM | BPF_B:
+ case BPF_ST | BPF_MEM | BPF_DW:
+ len = store_i(buf, imm, dst, off, BPF_SIZE(code));
+ break;
+ case BPF_JMP | BPF_JA:
+ case BPF_JMP | BPF_JEQ | BPF_X:
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ case BPF_JMP | BPF_JNE | BPF_X:
+ case BPF_JMP | BPF_JNE | BPF_K:
+ case BPF_JMP | BPF_JSET | BPF_X:
+ case BPF_JMP | BPF_JSET | BPF_K:
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP | BPF_JSGE | BPF_K:
+ case BPF_JMP | BPF_JLT | BPF_X:
+ case BPF_JMP | BPF_JLT | BPF_K:
+ case BPF_JMP | BPF_JLE | BPF_X:
+ case BPF_JMP | BPF_JLE | BPF_K:
+ case BPF_JMP | BPF_JSLT | BPF_X:
+ case BPF_JMP | BPF_JSLT | BPF_K:
+ case BPF_JMP | BPF_JSLE | BPF_X:
+ case BPF_JMP | BPF_JSLE | BPF_K:
+ case BPF_JMP32 | BPF_JA:
+ case BPF_JMP32 | BPF_JEQ | BPF_X:
+ case BPF_JMP32 | BPF_JEQ | BPF_K:
+ case BPF_JMP32 | BPF_JNE | BPF_X:
+ case BPF_JMP32 | BPF_JNE | BPF_K:
+ case BPF_JMP32 | BPF_JSET | BPF_X:
+ case BPF_JMP32 | BPF_JSET | BPF_K:
+ case BPF_JMP32 | BPF_JGT | BPF_X:
+ case BPF_JMP32 | BPF_JGT | BPF_K:
+ case BPF_JMP32 | BPF_JGE | BPF_X:
+ case BPF_JMP32 | BPF_JGE | BPF_K:
+ case BPF_JMP32 | BPF_JSGT | BPF_X:
+ case BPF_JMP32 | BPF_JSGT | BPF_K:
+ case BPF_JMP32 | BPF_JSGE | BPF_X:
+ case BPF_JMP32 | BPF_JSGE | BPF_K:
+ case BPF_JMP32 | BPF_JLT | BPF_X:
+ case BPF_JMP32 | BPF_JLT | BPF_K:
+ case BPF_JMP32 | BPF_JLE | BPF_X:
+ case BPF_JMP32 | BPF_JLE | BPF_K:
+ case BPF_JMP32 | BPF_JSLT | BPF_X:
+ case BPF_JMP32 | BPF_JSLT | BPF_K:
+ case BPF_JMP32 | BPF_JSLE | BPF_X:
+ case BPF_JMP32 | BPF_JSLE | BPF_K:
+ CHECK_RET(handle_jumps(ctx, insn, &len));
+ break;
+ case BPF_JMP | BPF_CALL:
+ CHECK_RET(handle_call(ctx, insn, &len));
+ break;
+
+ case BPF_JMP | BPF_EXIT:
+ /* If this is the last instruction, epilogue will follow. */
+ if (is_last_insn(ctx->prog, idx))
+ break;
+ CHECK_RET(handle_jmp_epilogue(ctx, insn, &len));
+ break;
+ default:
+ pr_err("bpf-jit: can't handle instruction code 0x%02X\n", code);
+ return -EOPNOTSUPP;
+ }
+
+ if (BPF_CLASS(code) == BPF_ALU) {
+ /*
+ * Skip the "swap" instructions. Even 64-bit swaps are of type
+ * BPF_ALU (and not BPF_ALU64). Therefore, for the swaps, one
+ * has to look at the "size" of the operations rather than the
+ * ALU type. "gen_swap()" specifically takes care of that.
+ */
+ if (BPF_OP(code) != BPF_END && ctx->do_zext)
+ len += zext(BUF(buf, len), dst);
+ }
+
+ jit_buffer_update(ctx, len);
+
+ return ret;
+}
+
+static int handle_body(struct jit_context *ctx)
+{
+ int ret;
+ bool populate_bpf2insn = false;
+ const struct bpf_prog *prog = ctx->prog;
+
+ CHECK_RET(jit_buffer_check(ctx));
+
+ /*
+ * Record the mapping for the instructions during the dry-run.
+ * Doing it this way allows us to have the mapping ready for
+ * the jump instructions during the real compilation phase.
+ */
+ if (!ctx->emit)
+ populate_bpf2insn = true;
+
+ for (u32 i = 0; i < prog->len; i++) {
+ /* During the dry-run, jit.len grows gradually per BPF insn. */
+ if (populate_bpf2insn)
+ ctx->bpf2insn[i] = ctx->jit.len;
+
+ CHECK_RET(handle_insn(ctx, i));
+ if (ret > 0) {
+ /* "ret" is 1 if two (64-bit) chunks were consumed. */
+ ctx->bpf2insn[i + 1] = ctx->bpf2insn[i];
+ i++;
+ }
+ }
+
+ /* If bpf2insn had to be populated, then it is done at this point. */
+ if (populate_bpf2insn)
+ ctx->bpf2insn_valid = true;
+
+ return 0;
+}
+
+/*
+ * Initialize the memory with "unimp_s" which is the mnemonic for
+ * "unimplemented" instruction and always raises an exception.
+ *
+ * The instruction is 2 bytes. If "size" is odd, there is not much
+ * that can be done about the last byte in "area". Because, the
+ * CPU always fetches instructions in two bytes. Therefore, the
+ * byte beyond the last one is going to accompany it during a
+ * possible fetch. In the most likely case of a little endian
+ * system, that beyond-byte will become the major opcode and
+ * we have no control over its initialisation.
+ */
+static void fill_ill_insn(void *area, unsigned int size)
+{
+ const u16 unimp_s = 0x79e0;
+
+ if (size & 1) {
+ *((u8 *)area + (size - 1)) = 0xff;
+ size -= 1;
+ }
+
+ memset16(area, unimp_s, size >> 1);
+}
+
+/* Piece of memory that can be allocated at the beginning of jit_prepare(). */
+static int jit_prepare_early_mem_alloc(struct jit_context *ctx)
+{
+ ctx->bpf2insn = kcalloc(ctx->prog->len, sizeof(ctx->jit.len),
+ GFP_KERNEL);
+
+ if (!ctx->bpf2insn) {
+ pr_err("bpf-jit: could not allocate memory for "
+ "mapping of the instructions.\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * Memory allocations that rely on parameters known at the end of
+ * jit_prepare().
+ */
+static int jit_prepare_final_mem_alloc(struct jit_context *ctx)
+{
+ const size_t alignment = sizeof(u32);
+
+ ctx->bpf_header = bpf_jit_binary_alloc(ctx->jit.len, &ctx->jit.buf,
+ alignment, fill_ill_insn);
+ if (!ctx->bpf_header) {
+ pr_err("bpf-jit: could not allocate memory for translation.\n");
+ return -ENOMEM;
+ }
+
+ if (ctx->need_extra_pass) {
+ ctx->jit_data = kzalloc(sizeof(*ctx->jit_data), GFP_KERNEL);
+ if (!ctx->jit_data)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * The first phase of the translation without actually emitting any
+ * instruction. It helps in getting a forecast on some aspects, such
+ * as the length of the whole program or where the epilogue starts.
+ *
+ * Whenever the necessary parameters are known, memories are allocated.
+ */
+static int jit_prepare(struct jit_context *ctx)
+{
+ int ret;
+
+ /* Dry run. */
+ ctx->emit = false;
+
+ CHECK_RET(jit_prepare_early_mem_alloc(ctx));
+
+ /* Get the length of prologue section after some register analysis. */
+ analyze_reg_usage(ctx);
+ CHECK_RET(handle_prologue(ctx));
+
+ CHECK_RET(handle_body(ctx));
+
+ /* Record at which offset epilogue begins. */
+ ctx->epilogue_offset = ctx->jit.len;
+
+ /* Process the epilogue section now. */
+ CHECK_RET(handle_epilogue(ctx));
+
+ CHECK_RET(jit_prepare_final_mem_alloc(ctx));
+
+ return 0;
+}
+
+/*
+ * All the "handle_*()" functions have been called before by the
+ * "jit_prepare()". If there was an error, we would know by now.
+ * Therefore, no extra error checking at this point, other than
+ * a sanity check at the end that expects the calculated length
+ * (jit.len) to be equal to the length of generated instructions
+ * (jit.index).
+ */
+static int jit_compile(struct jit_context *ctx)
+{
+ int ret;
+
+ /* Let there be code. */
+ ctx->emit = true;
+
+ CHECK_RET(handle_prologue(ctx));
+
+ CHECK_RET(handle_body(ctx));
+
+ CHECK_RET(handle_epilogue(ctx));
+
+ if (ctx->jit.index != ctx->jit.len) {
+ pr_err("bpf-jit: divergence between the phases; "
+ "%u vs. %u (bytes).\n",
+ ctx->jit.len, ctx->jit.index);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * Calling this function implies a successful JIT. A successful
+ * translation is signaled by setting the right parameters:
+ *
+ * prog->jited=1, prog->jited_len=..., prog->bpf_func=...
+ */
+static int jit_finalize(struct jit_context *ctx)
+{
+ struct bpf_prog *prog = ctx->prog;
+
+ /* We're going to need this information for the "do_extra_pass()". */
+ if (ctx->need_extra_pass) {
+ ctx->jit_data->bpf_header = ctx->bpf_header;
+ ctx->jit_data->bpf2insn = ctx->bpf2insn;
+ prog->aux->jit_data = (void *)ctx->jit_data;
+ } else {
+ /*
+ * If things seem finalised, then mark the JITed memory
+ * as R-X and flush it.
+ */
+ if (bpf_jit_binary_lock_ro(ctx->bpf_header)) {
+ pr_err("bpf-jit: Could not lock the JIT memory.\n");
+ return -EFAULT;
+ }
+ flush_icache_range((unsigned long)ctx->bpf_header,
+ (unsigned long)
+ BUF(ctx->jit.buf, ctx->jit.len));
+ prog->aux->jit_data = NULL;
+ bpf_prog_fill_jited_linfo(prog, ctx->bpf2insn);
+ }
+
+ ctx->success = true;
+ prog->bpf_func = (void *)ctx->jit.buf;
+ prog->jited_len = ctx->jit.len;
+ prog->jited = 1;
+
+ jit_ctx_cleanup(ctx);
+ jit_dump(ctx);
+
+ return 0;
+}
+
+/*
+ * A lenient verification for the existence of JIT context in "prog".
+ * Apparently the JIT internals, namely jit_subprogs() in bpf/verifier.c,
+ * may request for a second compilation although nothing needs to be done.
+ */
+static inline int check_jit_context(const struct bpf_prog *prog)
+{
+ if (!prog->aux->jit_data) {
+ pr_notice("bpf-jit: no jit data for the extra pass.\n");
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/* Reuse the previous pass's data. */
+static int jit_resume_context(struct jit_context *ctx)
+{
+ struct arc_jit_data *jdata =
+ (struct arc_jit_data *)ctx->prog->aux->jit_data;
+
+ if (!jdata) {
+ pr_err("bpf-jit: no jit data for the extra pass.\n");
+ return -EINVAL;
+ }
+
+ ctx->jit.buf = (u8 *)ctx->prog->bpf_func;
+ ctx->jit.len = ctx->prog->jited_len;
+ ctx->bpf_header = jdata->bpf_header;
+ ctx->bpf2insn = (u32 *)jdata->bpf2insn;
+ ctx->bpf2insn_valid = ctx->bpf2insn ? true : false;
+ ctx->jit_data = jdata;
+
+ return 0;
+}
+
+/*
+ * Patch in the new addresses. The instructions of interest are:
+ *
+ * - call
+ * - ld r64, imm64
+ *
+ * For "call"s, it resolves the addresses one more time through the
+ * handle_call().
+ *
+ * For 64-bit immediate loads, it just retranslates them, because the BPF
+ * core in kernel might have changed the value since the normal pass.
+ */
+static int jit_patch_relocations(struct jit_context *ctx)
+{
+ const u8 bpf_opc_call = BPF_JMP | BPF_CALL;
+ const u8 bpf_opc_ldi64 = BPF_LD | BPF_DW | BPF_IMM;
+ const struct bpf_prog *prog = ctx->prog;
+ int ret;
+
+ ctx->emit = true;
+ for (u32 i = 0; i < prog->len; i++) {
+ const struct bpf_insn *insn = &prog->insnsi[i];
+ u8 dummy;
+ /*
+ * Adjust "ctx.jit.index", so "gen_*()" functions below
+ * can use it for their output addresses.
+ */
+ ctx->jit.index = ctx->bpf2insn[i];
+
+ if (insn->code == bpf_opc_call) {
+ CHECK_RET(handle_call(ctx, insn, &dummy));
+ } else if (insn->code == bpf_opc_ldi64) {
+ CHECK_RET(handle_ld_imm64(ctx, insn, &dummy));
+ /* Skip the next instruction. */
+ ++i;
+ }
+ }
+ return 0;
+}
+
+/*
+ * A normal pass that involves a "dry-run" phase, jit_prepare(),
+ * to get the necessary data for the real compilation phase,
+ * jit_compile().
+ */
+static struct bpf_prog *do_normal_pass(struct bpf_prog *prog)
+{
+ struct jit_context ctx;
+
+ /* Bail out if JIT is disabled. */
+ if (!prog->jit_requested)
+ return prog;
+
+ if (jit_ctx_init(&ctx, prog)) {
+ jit_ctx_cleanup(&ctx);
+ return prog;
+ }
+
+ /* Get the lengths and allocate buffer. */
+ if (jit_prepare(&ctx)) {
+ jit_ctx_cleanup(&ctx);
+ return prog;
+ }
+
+ if (jit_compile(&ctx)) {
+ jit_ctx_cleanup(&ctx);
+ return prog;
+ }
+
+ if (jit_finalize(&ctx)) {
+ jit_ctx_cleanup(&ctx);
+ return prog;
+ }
+
+ return ctx.prog;
+}
+
+/*
+ * If there are multi-function BPF programs that call each other,
+ * their translated addresses are not known all at once. Therefore,
+ * an extra pass is needed to consult the bpf_jit_get_func_addr()
+ * again to get the newly translated addresses in order to resolve
+ * the "call"s.
+ */
+static struct bpf_prog *do_extra_pass(struct bpf_prog *prog)
+{
+ struct jit_context ctx;
+
+ /* Skip if there's no context to resume from. */
+ if (check_jit_context(prog))
+ return prog;
+
+ if (jit_ctx_init(&ctx, prog)) {
+ jit_ctx_cleanup(&ctx);
+ return prog;
+ }
+
+ if (jit_resume_context(&ctx)) {
+ jit_ctx_cleanup(&ctx);
+ return prog;
+ }
+
+ if (jit_patch_relocations(&ctx)) {
+ jit_ctx_cleanup(&ctx);
+ return prog;
+ }
+
+ if (jit_finalize(&ctx)) {
+ jit_ctx_cleanup(&ctx);
+ return prog;
+ }
+
+ return ctx.prog;
+}
+
+/*
+ * This function may be invoked twice for the same stream of BPF
+ * instructions. The "extra pass" happens, when there are "call"s
+ * involved that their addresses are not known during the first
+ * invocation.
+ */
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+{
+ vm_dump(prog);
+
+ /* Was this program already translated? */
+ if (!prog->jited)
+ return do_normal_pass(prog);
+ else
+ return do_extra_pass(prog);
+
+ return prog;
+}
diff --git a/arch/arm/Kbuild b/arch/arm/Kbuild
index b506622e7e23..69de6b6243c7 100644
--- a/arch/arm/Kbuild
+++ b/arch/arm/Kbuild
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_FPE_NWFPE) += nwfpe/
# Put arch/arm/fastfpe/ to use this.
-obj-$(CONFIG_FPE_FASTFPE) += $(patsubst $(srctree)/$(src)/%,%,$(wildcard $(srctree)/$(src)/fastfpe/))
+obj-$(CONFIG_FPE_FASTFPE) += $(patsubst $(src)/%,%,$(wildcard $(src)/fastfpe/))
obj-$(CONFIG_VFP) += vfp/
obj-$(CONFIG_XEN) += xen/
obj-$(CONFIG_VDSO) += vdso/
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index b14aed3a17ab..ee5115252aac 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -35,6 +35,7 @@ config ARM
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7
select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_SUPPORTS_CFI_CLANG
select ARCH_SUPPORTS_HUGETLBFS if ARM_LPAE
select ARCH_SUPPORTS_PER_VMA_LOCK
select ARCH_USE_BUILTIN_BSWAP
@@ -99,7 +100,7 @@ config ARM
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
select HAVE_EXIT_THREAD
- select HAVE_FAST_GUP if ARM_LPAE
+ select HAVE_GUP_FAST if ARM_LPAE
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_TRACER
@@ -1233,9 +1234,9 @@ config HIGHPTE
consumed by page tables. Setting this option will allow
user-space 2nd level page tables to reside in high memory.
-config CPU_SW_DOMAIN_PAN
- bool "Enable use of CPU domains to implement privileged no-access"
- depends on MMU && !ARM_LPAE
+config ARM_PAN
+ bool "Enable privileged no-access"
+ depends on MMU
default y
help
Increase kernel security by ensuring that normal kernel accesses
@@ -1244,10 +1245,26 @@ config CPU_SW_DOMAIN_PAN
by ensuring that magic values (such as LIST_POISON) will always
fault when dereferenced.
+ The implementation uses CPU domains when !CONFIG_ARM_LPAE and
+ disabling of TTBR0 page table walks with CONFIG_ARM_LPAE.
+
+config CPU_SW_DOMAIN_PAN
+ def_bool y
+ depends on ARM_PAN && !ARM_LPAE
+ help
+ Enable use of CPU domains to implement privileged no-access.
+
CPUs with low-vector mappings use a best-efforts implementation.
Their lower 1MB needs to remain accessible for the vectors, but
the remainder of userspace will become appropriately inaccessible.
+config CPU_TTBR0_PAN
+ def_bool y
+ depends on ARM_PAN && ARM_LPAE
+ help
+ Enable privileged no-access by disabling TTBR0 page table walks when
+ running in kernel mode.
+
config HW_PERF_EVENTS
def_bool y
depends on ARM_PMU
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index abd6a2889fd0..ba9b9a802469 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -25,8 +25,7 @@ targets := Image zImage xipImage bootpImage uImage
ifeq ($(CONFIG_XIP_KERNEL),y)
-cmd_deflate_xip_data = $(CONFIG_SHELL) -c \
- '$(srctree)/$(src)/deflate_xip_data.sh $< $@'
+cmd_deflate_xip_data = $(CONFIG_SHELL) -c '$(src)/deflate_xip_data.sh $< $@'
ifeq ($(CONFIG_XIP_DEFLATED_DATA),y)
quiet_cmd_mkxip = XIPZ $@
diff --git a/arch/arm/boot/bootp/Makefile b/arch/arm/boot/bootp/Makefile
index a2934e6fd89a..f3443f7d7b02 100644
--- a/arch/arm/boot/bootp/Makefile
+++ b/arch/arm/boot/bootp/Makefile
@@ -5,7 +5,6 @@
# This file is included by the global makefile so that you can add your own
# architecture-specific flags and dependencies.
#
-GCOV_PROFILE := n
ifdef PHYS_OFFSET
add_hex = $(shell printf 0x%x $$(( $(1) + $(2) )) )
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 726ecabcef09..6bca03c0c7f0 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -22,13 +22,6 @@ ifeq ($(CONFIG_ARM_VIRT_EXT),y)
OBJS += hyp-stub.o
endif
-GCOV_PROFILE := n
-KASAN_SANITIZE := n
-
-# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
-KCOV_INSTRUMENT := n
-UBSAN_SANITIZE := n
-
#
# Architecture dependencies
#
diff --git a/arch/arm/boot/dts/allwinner/Makefile b/arch/arm/boot/dts/allwinner/Makefile
index 2d26c3397f14..4247f19b1adc 100644
--- a/arch/arm/boot/dts/allwinner/Makefile
+++ b/arch/arm/boot/dts/allwinner/Makefile
@@ -61,6 +61,7 @@ dtb-$(CONFIG_MACH_SUN5I) += \
sun5i-a13-olinuxino.dtb \
sun5i-a13-olinuxino-micro.dtb \
sun5i-a13-pocketbook-touch-lux-3.dtb \
+ sun5i-a13-pocketbook-614-plus.dtb \
sun5i-a13-q8-tablet.dtb \
sun5i-a13-utoo-p66.dtb \
sun5i-gr8-chip-pro.dtb \
diff --git a/arch/arm/boot/dts/allwinner/sun5i-a13-pocketbook-614-plus.dts b/arch/arm/boot/dts/allwinner/sun5i-a13-pocketbook-614-plus.dts
new file mode 100644
index 000000000000..ab8d138dc11d
--- /dev/null
+++ b/arch/arm/boot/dts/allwinner/sun5i-a13-pocketbook-614-plus.dts
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 Denis Burkov <hitechshell@mail.ru>
+ */
+
+/dts-v1/;
+#include "sun5i-a13.dtsi"
+#include "sunxi-common-regulators.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+ model = "PocketBook 614 Plus";
+ compatible = "pocketbook,614-plus", "allwinner,sun5i-a13";
+
+ aliases {
+ serial0 = &uart1;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-0 {
+ color = <LED_COLOR_ID_WHITE>;
+ function = LED_FUNCTION_POWER;
+ linux,default-trigger = "default-on";
+ gpios = <&pio 4 8 GPIO_ACTIVE_LOW>; /* PE8 */
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ key-0 {
+ label = "Right";
+ linux,code = <KEY_NEXT>;
+ gpios = <&pio 6 9 GPIO_ACTIVE_LOW>; /* PG9 */
+ };
+
+ key-1 {
+ label = "Left";
+ linux,code = <KEY_PREVIOUS>;
+ gpios = <&pio 6 10 GPIO_ACTIVE_LOW>; /* PG10 */
+ };
+ };
+
+ reg_3v3_mmc0: regulator-mmc0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd-mmc0";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pio 4 4 GPIO_ACTIVE_LOW>; /* PE4 */
+ vin-supply = <&reg_vcc3v3>;
+ };
+};
+
+&cpu0 {
+ cpu-supply = <&reg_dcdc2>;
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+
+ axp209: pmic@34 {
+ compatible = "x-powers,axp209";
+ reg = <0x34>;
+ interrupts = <0>;
+ };
+};
+
+#include "axp209.dtsi"
+
+&i2c1 {
+ status = "okay";
+
+ pcf8563: rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ #clock-cells = <0>;
+ };
+};
+
+&lradc {
+ vref-supply = <&reg_ldo2>;
+ status = "okay";
+
+ button-300 {
+ label = "Down";
+ linux,code = <KEY_DOWN>;
+ channel = <0>;
+ voltage = <300000>;
+ };
+
+ button-700 {
+ label = "Up";
+ linux,code = <KEY_UP>;
+ channel = <0>;
+ voltage = <700000>;
+ };
+
+ button-1000 {
+ label = "Left";
+ linux,code = <KEY_LEFT>;
+ channel = <0>;
+ voltage = <1000000>;
+ };
+
+ button-1200 {
+ label = "Menu";
+ linux,code = <KEY_MENU>;
+ channel = <0>;
+ voltage = <1200000>;
+ };
+
+ button-1500 {
+ label = "Right";
+ linux,code = <KEY_RIGHT>;
+ channel = <0>;
+ voltage = <1500000>;
+ };
+};
+
+&mmc0 {
+ vmmc-supply = <&reg_3v3_mmc0>;
+ bus-width = <4>;
+ cd-gpios = <&pio 6 0 GPIO_ACTIVE_LOW>; /* PG0 */
+ status = "okay";
+};
+
+&mmc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_4bit_pc_pins>;
+ vmmc-supply = <&reg_vcc3v3>;
+ bus-width = <4>;
+ non-removable;
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&otg_sram {
+ status = "okay";
+};
+
+&reg_dcdc2 {
+ regulator-always-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-name = "vdd-cpu";
+};
+
+&reg_dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-name = "vdd-int-dll";
+};
+
+&reg_ldo1 {
+ regulator-name = "vdd-rtc";
+};
+
+&reg_ldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "avcc";
+};
+
+&reg_usb0_vbus {
+ status = "okay";
+ gpio = <&pio 6 12 GPIO_ACTIVE_HIGH>; /* PG12 */
+};
+
+&reg_usb1_vbus {
+ gpio = <&pio 6 11 GPIO_ACTIVE_HIGH>; /* PG11 */
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pg_pins>;
+ status = "okay";
+};
+
+&usb_otg {
+ dr_mode = "otg";
+ status = "okay";
+};
+
+&usb_power_supply {
+ status = "okay";
+};
+
+&battery_power_supply {
+ status = "okay";
+};
+
+&usbphy {
+ usb0_id_det-gpios = <&pio 6 2 GPIO_ACTIVE_HIGH>; /* PG2 */
+ usb0_vbus_det-gpios = <&axp_gpio 1 GPIO_ACTIVE_HIGH>;
+ usb0_vbus-supply = <&reg_usb0_vbus>;
+ usb1_vbus-supply = <&reg_usb1_vbus>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/allwinner/sun5i-a13.dtsi b/arch/arm/boot/dts/allwinner/sun5i-a13.dtsi
index 3325ab07094a..2c9152b151be 100644
--- a/arch/arm/boot/dts/allwinner/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/allwinner/sun5i-a13.dtsi
@@ -62,14 +62,14 @@
};
trips {
- cpu_alert0: cpu_alert0 {
+ cpu_alert0: cpu-alert0 {
/* milliCelsius */
temperature = <85000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit: cpu_crit {
+ cpu_crit: cpu-crit {
/* milliCelsius */
temperature = <100000>;
hysteresis = <2000>;
diff --git a/arch/arm/boot/dts/allwinner/sun5i-gr8-chip-pro.dts b/arch/arm/boot/dts/allwinner/sun5i-gr8-chip-pro.dts
index 5c3562b85a5b..ffbd99c176db 100644
--- a/arch/arm/boot/dts/allwinner/sun5i-gr8-chip-pro.dts
+++ b/arch/arm/boot/dts/allwinner/sun5i-gr8-chip-pro.dts
@@ -77,7 +77,7 @@
};
};
- mmc0_pwrseq: mmc0_pwrseq {
+ mmc0_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&pio 1 10 GPIO_ACTIVE_LOW>; /* PB10 */
};
diff --git a/arch/arm/boot/dts/allwinner/sun5i-r8-chip.dts b/arch/arm/boot/dts/allwinner/sun5i-r8-chip.dts
index 4192c23848c3..8c784a2c086e 100644
--- a/arch/arm/boot/dts/allwinner/sun5i-r8-chip.dts
+++ b/arch/arm/boot/dts/allwinner/sun5i-r8-chip.dts
@@ -77,7 +77,7 @@
};
};
- mmc0_pwrseq: mmc0_pwrseq {
+ mmc0_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&pio 2 19 GPIO_ACTIVE_LOW>; /* PC19 */
};
diff --git a/arch/arm/boot/dts/allwinner/sun6i-a31-hummingbird.dts b/arch/arm/boot/dts/allwinner/sun6i-a31-hummingbird.dts
index 236ebfc06192..5bce7a32651e 100644
--- a/arch/arm/boot/dts/allwinner/sun6i-a31-hummingbird.dts
+++ b/arch/arm/boot/dts/allwinner/sun6i-a31-hummingbird.dts
@@ -109,7 +109,7 @@
};
};
- reg_vga_3v3: vga_3v3_regulator {
+ reg_vga_3v3: vga-3v3-regulator {
compatible = "regulator-fixed";
regulator-name = "vga-3v3";
regulator-min-microvolt = <3300000>;
@@ -119,7 +119,7 @@
gpio = <&pio 7 25 GPIO_ACTIVE_HIGH>; /* PH25 */
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&pio 6 10 GPIO_ACTIVE_LOW>; /* PG10 */
};
diff --git a/arch/arm/boot/dts/allwinner/sun6i-a31.dtsi b/arch/arm/boot/dts/allwinner/sun6i-a31.dtsi
index 5cce4918f84c..f0145d6b9c53 100644
--- a/arch/arm/boot/dts/allwinner/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/allwinner/sun6i-a31.dtsi
@@ -179,14 +179,14 @@
};
trips {
- cpu_alert0: cpu_alert0 {
+ cpu_alert0: cpu-alert0 {
/* milliCelsius */
temperature = <70000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit: cpu_crit {
+ cpu_crit: cpu-crit {
/* milliCelsius */
temperature = <100000>;
hysteresis = <2000>;
@@ -1318,7 +1318,7 @@
compatible = "allwinner,sun6i-a31-prcm";
reg = <0x01f01400 0x200>;
- ar100: ar100_clk {
+ ar100: ar100-clk {
compatible = "allwinner,sun6i-a31-ar100-clk";
#clock-cells = <0>;
clocks = <&rtc CLK_OSC32K>, <&osc24M>,
@@ -1327,7 +1327,7 @@
clock-output-names = "ar100";
};
- ahb0: ahb0_clk {
+ ahb0: ahb0-clk {
compatible = "fixed-factor-clock";
#clock-cells = <0>;
clock-div = <1>;
@@ -1336,14 +1336,14 @@
clock-output-names = "ahb0";
};
- apb0: apb0_clk {
+ apb0: apb0-clk {
compatible = "allwinner,sun6i-a31-apb0-clk";
#clock-cells = <0>;
clocks = <&ahb0>;
clock-output-names = "apb0";
};
- apb0_gates: apb0_gates_clk {
+ apb0_gates: apb0-gates-clk {
compatible = "allwinner,sun6i-a31-apb0-gates-clk";
#clock-cells = <1>;
clocks = <&apb0>;
@@ -1353,14 +1353,14 @@
"apb0_i2c";
};
- ir_clk: ir_clk {
+ ir_clk: ir-clk {
#clock-cells = <0>;
compatible = "allwinner,sun4i-a10-mod0-clk";
clocks = <&rtc CLK_OSC32K>, <&osc24M>;
clock-output-names = "ir";
};
- apb0_rst: apb0_rst {
+ apb0_rst: apb0-rst {
compatible = "allwinner,sun6i-a31-clock-reset";
#reset-cells = <1>;
};
diff --git a/arch/arm/boot/dts/allwinner/sun6i-a31s-sinovoip-bpi-m2.dts b/arch/arm/boot/dts/allwinner/sun6i-a31s-sinovoip-bpi-m2.dts
index 96554ab4f6d3..f63d67ec9887 100644
--- a/arch/arm/boot/dts/allwinner/sun6i-a31s-sinovoip-bpi-m2.dts
+++ b/arch/arm/boot/dts/allwinner/sun6i-a31s-sinovoip-bpi-m2.dts
@@ -75,7 +75,7 @@
};
};
- mmc2_pwrseq: mmc2_pwrseq {
+ mmc2_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 8 GPIO_ACTIVE_LOW>; /* PL8 WIFI_EN */
};
diff --git a/arch/arm/boot/dts/allwinner/sun7i-a20-bananapi-m1-plus.dts b/arch/arm/boot/dts/allwinner/sun7i-a20-bananapi-m1-plus.dts
index caa935ca4f19..f2d7fab9978d 100644
--- a/arch/arm/boot/dts/allwinner/sun7i-a20-bananapi-m1-plus.dts
+++ b/arch/arm/boot/dts/allwinner/sun7i-a20-bananapi-m1-plus.dts
@@ -86,7 +86,7 @@
};
};
- mmc3_pwrseq: mmc3_pwrseq {
+ mmc3_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&pio 7 22 GPIO_ACTIVE_LOW>; /* PH22 WL-PMU-EN */
};
diff --git a/arch/arm/boot/dts/allwinner/sun7i-a20-cubietruck.dts b/arch/arm/boot/dts/allwinner/sun7i-a20-cubietruck.dts
index 52160e368304..be9b31d0f4b5 100644
--- a/arch/arm/boot/dts/allwinner/sun7i-a20-cubietruck.dts
+++ b/arch/arm/boot/dts/allwinner/sun7i-a20-cubietruck.dts
@@ -96,7 +96,7 @@
};
};
- mmc3_pwrseq: mmc3_pwrseq {
+ mmc3_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&pio 7 9 GPIO_ACTIVE_LOW>; /* PH9 WIFI_EN */
clocks = <&ccu CLK_OUT_A>;
diff --git a/arch/arm/boot/dts/allwinner/sun7i-a20-hummingbird.dts b/arch/arm/boot/dts/allwinner/sun7i-a20-hummingbird.dts
index 3def2a330598..f1e26b75cd90 100644
--- a/arch/arm/boot/dts/allwinner/sun7i-a20-hummingbird.dts
+++ b/arch/arm/boot/dts/allwinner/sun7i-a20-hummingbird.dts
@@ -65,7 +65,7 @@
stdout-path = "serial0:115200n8";
};
- reg_mmc3_vdd: mmc3_vdd {
+ reg_mmc3_vdd: regulator-mmc3-vdd {
compatible = "regulator-fixed";
regulator-name = "mmc3_vdd";
regulator-min-microvolt = <3000000>;
@@ -74,7 +74,7 @@
gpio = <&pio 7 9 GPIO_ACTIVE_HIGH>; /* PH9 */
};
- reg_gmac_vdd: gmac_vdd {
+ reg_gmac_vdd: regulator-gmac-vdd {
compatible = "regulator-fixed";
regulator-name = "gmac_vdd";
regulator-min-microvolt = <3000000>;
diff --git a/arch/arm/boot/dts/allwinner/sun7i-a20-olimex-som-evb-emmc.dts b/arch/arm/boot/dts/allwinner/sun7i-a20-olimex-som-evb-emmc.dts
index 20bf09b2226c..fb835730bbc4 100644
--- a/arch/arm/boot/dts/allwinner/sun7i-a20-olimex-som-evb-emmc.dts
+++ b/arch/arm/boot/dts/allwinner/sun7i-a20-olimex-som-evb-emmc.dts
@@ -14,7 +14,7 @@
model = "Olimex A20-Olimex-SOM-EVB-eMMC";
compatible = "olimex,a20-olimex-som-evb-emmc", "allwinner,sun7i-a20";
- mmc2_pwrseq: mmc2_pwrseq {
+ mmc2_pwrseq: pwrseq {
compatible = "mmc-pwrseq-emmc";
reset-gpios = <&pio 2 18 GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm/boot/dts/allwinner/sun7i-a20-olimex-som204-evb-emmc.dts b/arch/arm/boot/dts/allwinner/sun7i-a20-olimex-som204-evb-emmc.dts
index a59755a2e7a9..e8977c2fe798 100644
--- a/arch/arm/boot/dts/allwinner/sun7i-a20-olimex-som204-evb-emmc.dts
+++ b/arch/arm/boot/dts/allwinner/sun7i-a20-olimex-som204-evb-emmc.dts
@@ -13,7 +13,7 @@
model = "Olimex A20-SOM204-EVB-eMMC";
compatible = "olimex,a20-olimex-som204-evb-emmc", "allwinner,sun7i-a20";
- mmc2_pwrseq: mmc2_pwrseq {
+ mmc2_pwrseq: pwrseq-1 {
compatible = "mmc-pwrseq-emmc";
reset-gpios = <&pio 2 16 GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm/boot/dts/allwinner/sun7i-a20-olimex-som204-evb.dts b/arch/arm/boot/dts/allwinner/sun7i-a20-olimex-som204-evb.dts
index 54af6c18075b..a55406657449 100644
--- a/arch/arm/boot/dts/allwinner/sun7i-a20-olimex-som204-evb.dts
+++ b/arch/arm/boot/dts/allwinner/sun7i-a20-olimex-som204-evb.dts
@@ -65,7 +65,7 @@
};
};
- rtl_pwrseq: rtl_pwrseq {
+ rtl_pwrseq: pwrseq-0 {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&pio 6 9 GPIO_ACTIVE_LOW>;
};
@@ -177,7 +177,7 @@
non-removable;
status = "okay";
- rtl8723bs: sdio_wifi@1 {
+ rtl8723bs: wifi@1 {
reg = <1>;
};
};
diff --git a/arch/arm/boot/dts/allwinner/sun7i-a20-olinuxino-lime2.dts b/arch/arm/boot/dts/allwinner/sun7i-a20-olinuxino-lime2.dts
index ecb91fb899ff..435a189332e8 100644
--- a/arch/arm/boot/dts/allwinner/sun7i-a20-olinuxino-lime2.dts
+++ b/arch/arm/boot/dts/allwinner/sun7i-a20-olinuxino-lime2.dts
@@ -82,7 +82,7 @@
};
};
- reg_axp_ipsout: axp_ipsout {
+ reg_axp_ipsout: regulator-axp-ipsout {
compatible = "regulator-fixed";
regulator-name = "axp-ipsout";
regulator-min-microvolt = <5000000>;
diff --git a/arch/arm/boot/dts/allwinner/sun7i-a20-wits-pro-a20-dkt.dts b/arch/arm/boot/dts/allwinner/sun7i-a20-wits-pro-a20-dkt.dts
index 3bfae98f3cc3..29199b6a3b4a 100644
--- a/arch/arm/boot/dts/allwinner/sun7i-a20-wits-pro-a20-dkt.dts
+++ b/arch/arm/boot/dts/allwinner/sun7i-a20-wits-pro-a20-dkt.dts
@@ -60,7 +60,7 @@
stdout-path = "serial0:115200n8";
};
- mmc3_pwrseq: mmc3_pwrseq {
+ mmc3_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&pio 7 9 GPIO_ACTIVE_LOW>; /* PH9 WIFI_EN */
};
diff --git a/arch/arm/boot/dts/allwinner/sun7i-a20.dtsi b/arch/arm/boot/dts/allwinner/sun7i-a20.dtsi
index 5574299685ab..5f44f09c5545 100644
--- a/arch/arm/boot/dts/allwinner/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/allwinner/sun7i-a20.dtsi
@@ -153,14 +153,14 @@
};
trips {
- cpu_alert0: cpu_alert0 {
+ cpu_alert0: cpu-alert0 {
/* milliCelsius */
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit: cpu_crit {
+ cpu_crit: cpu-crit {
/* milliCelsius */
temperature = <100000>;
hysteresis = <2000>;
diff --git a/arch/arm/boot/dts/allwinner/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/allwinner/sun8i-a23-a33.dtsi
index cd4bf60dbb3c..2af8382ccdf5 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-a23-a33.dtsi
+++ b/arch/arm/boot/dts/allwinner/sun8i-a23-a33.dtsi
@@ -108,7 +108,7 @@
#size-cells = <1>;
ranges;
- osc24M: osc24M_clk {
+ osc24M: osc24M-clk {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <24000000>;
@@ -116,7 +116,7 @@
clock-output-names = "osc24M";
};
- ext_osc32k: ext_osc32k_clk {
+ ext_osc32k: ext-osc32k-clk {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <32768>;
@@ -733,7 +733,7 @@
compatible = "allwinner,sun8i-a23-prcm";
reg = <0x01f01400 0x200>;
- ar100: ar100_clk {
+ ar100: ar100-clk {
compatible = "fixed-factor-clock";
#clock-cells = <0>;
clock-div = <1>;
@@ -742,7 +742,7 @@
clock-output-names = "ar100";
};
- ahb0: ahb0_clk {
+ ahb0: ahb0-clk {
compatible = "fixed-factor-clock";
#clock-cells = <0>;
clock-div = <1>;
@@ -751,14 +751,14 @@
clock-output-names = "ahb0";
};
- apb0: apb0_clk {
+ apb0: apb0-clk {
compatible = "allwinner,sun8i-a23-apb0-clk";
#clock-cells = <0>;
clocks = <&ahb0>;
clock-output-names = "apb0";
};
- apb0_gates: apb0_gates_clk {
+ apb0_gates: apb0-gates-clk {
compatible = "allwinner,sun8i-a23-apb0-gates-clk";
#clock-cells = <1>;
clocks = <&apb0>;
@@ -767,7 +767,7 @@
"apb0_i2c";
};
- apb0_rst: apb0_rst {
+ apb0_rst: apb0-rst {
compatible = "allwinner,sun6i-a31-clock-reset";
#reset-cells = <1>;
};
diff --git a/arch/arm/boot/dts/allwinner/sun8i-a23-polaroid-mid2407pxe03.dts b/arch/arm/boot/dts/allwinner/sun8i-a23-polaroid-mid2407pxe03.dts
index d5f6aebd7216..0c585a6d990d 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-a23-polaroid-mid2407pxe03.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-a23-polaroid-mid2407pxe03.dts
@@ -52,7 +52,7 @@
ethernet0 = &esp8089;
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 6 GPIO_ACTIVE_LOW>; /* PL6 */
/* The esp8089 needs 200 ms after driving wifi-en high */
@@ -76,7 +76,7 @@
non-removable;
status = "okay";
- esp8089: sdio_wifi@1 {
+ esp8089: wifi@1 {
compatible = "esp,esp8089";
reg = <1>;
esp,crystal-26M-en = <2>;
diff --git a/arch/arm/boot/dts/allwinner/sun8i-a23-polaroid-mid2809pxe04.dts b/arch/arm/boot/dts/allwinner/sun8i-a23-polaroid-mid2809pxe04.dts
index 9f9232a2fefb..63cb4e194a03 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-a23-polaroid-mid2809pxe04.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-a23-polaroid-mid2809pxe04.dts
@@ -52,7 +52,7 @@
ethernet0 = &esp8089;
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 6 GPIO_ACTIVE_LOW>; /* PL6 */
/* The esp8089 needs 200 ms after driving wifi-en high */
@@ -69,7 +69,7 @@
non-removable;
status = "okay";
- esp8089: sdio_wifi@1 {
+ esp8089: wifi@1 {
compatible = "esp,esp8089";
reg = <1>;
esp,crystal-26M-en = <2>;
diff --git a/arch/arm/boot/dts/allwinner/sun8i-a33-ga10h-v1.1.dts b/arch/arm/boot/dts/allwinner/sun8i-a33-ga10h-v1.1.dts
index 2dfdd0a3151e..f00ce03ffc84 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-a33-ga10h-v1.1.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-a33-ga10h-v1.1.dts
@@ -85,7 +85,7 @@
non-removable;
status = "okay";
- rtl8703as: sdio_wifi@1 {
+ rtl8703as: wifi@1 {
reg = <1>;
};
};
diff --git a/arch/arm/boot/dts/allwinner/sun8i-a33-inet-d978-rev2.dts b/arch/arm/boot/dts/allwinner/sun8i-a33-inet-d978-rev2.dts
index 065cb620aa99..162ba93f7484 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-a33-inet-d978-rev2.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-a33-inet-d978-rev2.dts
@@ -78,7 +78,7 @@
non-removable;
status = "okay";
- rtl8723bs: sdio_wifi@1 {
+ rtl8723bs: wifi@1 {
reg = <1>;
};
};
diff --git a/arch/arm/boot/dts/allwinner/sun8i-a33.dtsi b/arch/arm/boot/dts/allwinner/sun8i-a33.dtsi
index 30fdd2703b1f..36b2d78cdab9 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-a33.dtsi
+++ b/arch/arm/boot/dts/allwinner/sun8i-a33.dtsi
@@ -323,35 +323,35 @@
};
trips {
- cpu_alert0: cpu_alert0 {
+ cpu_alert0: cpu-alert0 {
/* milliCelsius */
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
};
- gpu_alert0: gpu_alert0 {
+ gpu_alert0: gpu-alert0 {
/* milliCelsius */
temperature = <85000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_alert1: cpu_alert1 {
+ cpu_alert1: cpu-alert1 {
/* milliCelsius */
temperature = <90000>;
hysteresis = <2000>;
type = "hot";
};
- gpu_alert1: gpu_alert1 {
+ gpu_alert1: gpu-alert1 {
/* milliCelsius */
temperature = <95000>;
hysteresis = <2000>;
type = "hot";
};
- cpu_crit: cpu_crit {
+ cpu_crit: cpu-crit {
/* milliCelsius */
temperature = <110000>;
hysteresis = <2000>;
diff --git a/arch/arm/boot/dts/allwinner/sun8i-a83t-bananapi-m3.dts b/arch/arm/boot/dts/allwinner/sun8i-a83t-bananapi-m3.dts
index 8d56b103f063..32e811fa23e2 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-a83t-bananapi-m3.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-a83t-bananapi-m3.dts
@@ -95,7 +95,7 @@
gpio = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* PD24 */
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
clocks = <&ac100_rtc 1>;
clock-names = "ext_clock";
diff --git a/arch/arm/boot/dts/allwinner/sun8i-a83t-cubietruck-plus.dts b/arch/arm/boot/dts/allwinner/sun8i-a83t-cubietruck-plus.dts
index 870993393fc2..d5e6ddaffbce 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-a83t-cubietruck-plus.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-a83t-cubietruck-plus.dts
@@ -144,7 +144,7 @@
compatible = "linux,spdif-dit";
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
clocks = <&ac100_rtc 1>;
clock-names = "ext_clock";
diff --git a/arch/arm/boot/dts/allwinner/sun8i-a83t-tbs-a711.dts b/arch/arm/boot/dts/allwinner/sun8i-a83t-tbs-a711.dts
index a7d4ca308990..43982b106a4d 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-a83t-tbs-a711.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-a83t-tbs-a711.dts
@@ -123,7 +123,7 @@
vin-supply = <&reg_vbat>;
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 2 GPIO_ACTIVE_LOW>; /* PL2 WL-PMU-EN */
diff --git a/arch/arm/boot/dts/allwinner/sun8i-a83t.dtsi b/arch/arm/boot/dts/allwinner/sun8i-a83t.dtsi
index 94eb3bfc989e..addf0cb0f465 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-a83t.dtsi
+++ b/arch/arm/boot/dts/allwinner/sun8i-a83t.dtsi
@@ -164,7 +164,7 @@
ranges;
/* TODO: PRCM block has a mux for this. */
- osc24M: osc24M_clk {
+ osc24M: osc24M-clk {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <24000000>;
@@ -177,14 +177,14 @@
* It is an internal RC-based oscillator.
* TODO: Its controls are in the PRCM block.
*/
- osc16M: osc16M_clk {
+ osc16M: osc16M-clk {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <16000000>;
clock-output-names = "osc16M";
};
- osc16Md512: osc16Md512_clk {
+ osc16Md512: osc16Md512-clk {
#clock-cells = <0>;
compatible = "fixed-factor-clock";
clock-div = <512>;
@@ -1127,7 +1127,7 @@
#reset-cells = <1>;
};
- r_cpucfg@1f01c00 {
+ cpucfg@1f01c00 {
compatible = "allwinner,sun8i-a83t-r-cpucfg";
reg = <0x1f01c00 0x400>;
};
diff --git a/arch/arm/boot/dts/allwinner/sun8i-h2-plus-bananapi-m2-zero.dts b/arch/arm/boot/dts/allwinner/sun8i-h2-plus-bananapi-m2-zero.dts
index d729b7c705db..d3a7c9fa23e4 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-h2-plus-bananapi-m2-zero.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-h2-plus-bananapi-m2-zero.dts
@@ -103,7 +103,7 @@
cpu-supply = <&reg_vcc1v2>;
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 */
clocks = <&rtc CLK_OSC32K_FANOUT>;
diff --git a/arch/arm/boot/dts/allwinner/sun8i-h2-plus-orangepi-r1.dts b/arch/arm/boot/dts/allwinner/sun8i-h2-plus-orangepi-r1.dts
index 3356f4210d45..79b03b31c5eb 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-h2-plus-orangepi-r1.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-h2-plus-orangepi-r1.dts
@@ -43,11 +43,12 @@
/* Orange Pi R1 is based on Orange Pi Zero design */
#include "sun8i-h2-plus-orangepi-zero.dts"
+/delete-node/ &reg_vcc_wifi;
+
/ {
model = "Xunlong Orange Pi R1";
compatible = "xunlong,orangepi-r1", "allwinner,sun8i-h2-plus";
- /delete-node/ reg_vcc_wifi;
/*
* Ths pin of this regulator is the same with the Wi-Fi extra
@@ -89,7 +90,7 @@
vmmc-supply = <&reg_vcc3v3>;
vqmmc-supply = <&reg_vcc3v3>;
- rtl8189etv: sdio_wifi@1 {
+ rtl8189etv: wifi@1 {
reg = <1>;
};
};
diff --git a/arch/arm/boot/dts/allwinner/sun8i-h2-plus-orangepi-zero.dts b/arch/arm/boot/dts/allwinner/sun8i-h2-plus-orangepi-zero.dts
index 3706216ffb40..1b001f2ad0ef 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-h2-plus-orangepi-zero.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-h2-plus-orangepi-zero.dts
@@ -80,7 +80,7 @@
};
};
- reg_vcc_wifi: reg_vcc_wifi {
+ reg_vcc_wifi: reg-vcc-wifi {
compatible = "regulator-fixed";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
@@ -105,7 +105,7 @@
states = <1100000 0>, <1300000 1>;
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>;
post-power-on-delay-ms = <200>;
@@ -149,7 +149,7 @@
* Explicitly define the sdio device, so that we can add an ethernet
* alias for it (which e.g. makes u-boot set a mac-address).
*/
- xr819: sdio_wifi@1 {
+ xr819: wifi@1 {
reg = <1>;
};
};
diff --git a/arch/arm/boot/dts/allwinner/sun8i-h3-beelink-x2.dts b/arch/arm/boot/dts/allwinner/sun8i-h3-beelink-x2.dts
index a6d38ecee141..5b77300307de 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-h3-beelink-x2.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-h3-beelink-x2.dts
@@ -122,7 +122,7 @@
compatible = "linux,spdif-dit";
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 */
clocks = <&rtc CLK_OSC32K_FANOUT>;
@@ -185,7 +185,7 @@
* Explicitly define the sdio device, so that we can add an ethernet
* alias for it (which e.g. makes u-boot set a mac-address).
*/
- sdiowifi: sdio_wifi@1 {
+ sdiowifi: wifi@1 {
reg = <1>;
};
};
diff --git a/arch/arm/boot/dts/allwinner/sun8i-h3-nanopi-duo2.dts b/arch/arm/boot/dts/allwinner/sun8i-h3-nanopi-duo2.dts
index 343b02b97155..2b0566d4b386 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-h3-nanopi-duo2.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-h3-nanopi-duo2.dts
@@ -87,7 +87,7 @@
vin-supply = <&reg_vcc5v0>;
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 */
clocks = <&rtc CLK_OSC32K_FANOUT>;
@@ -119,7 +119,7 @@
non-removable;
status = "okay";
- sdio_wifi: sdio_wifi@1 {
+ sdio_wifi: wifi@1 {
reg = <1>;
compatible = "brcm,bcm4329-fmac";
interrupt-parent = <&pio>;
diff --git a/arch/arm/boot/dts/allwinner/sun8i-h3-nanopi-m1-plus.dts b/arch/arm/boot/dts/allwinner/sun8i-h3-nanopi-m1-plus.dts
index 4ba533b0340f..59bd0746acf8 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-h3-nanopi-m1-plus.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-h3-nanopi-m1-plus.dts
@@ -62,7 +62,7 @@
gpio = <&pio 3 6 GPIO_ACTIVE_HIGH>;
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 */
};
@@ -132,7 +132,7 @@
non-removable;
status = "okay";
- sdio_wifi: sdio_wifi@1 {
+ sdio_wifi: wifi@1 {
reg = <1>;
compatible = "brcm,bcm4329-fmac";
interrupt-parent = <&pio>;
diff --git a/arch/arm/boot/dts/allwinner/sun8i-h3-nanopi-neo-air.dts b/arch/arm/boot/dts/allwinner/sun8i-h3-nanopi-neo-air.dts
index 9e1a33f94cad..6d85370e04f1 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-h3-nanopi-neo-air.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-h3-nanopi-neo-air.dts
@@ -73,7 +73,7 @@
};
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 */
};
diff --git a/arch/arm/boot/dts/allwinner/sun8i-h3-nanopi-r1.dts b/arch/arm/boot/dts/allwinner/sun8i-h3-nanopi-r1.dts
index 42cd1131adf3..870649760f70 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-h3-nanopi-r1.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-h3-nanopi-r1.dts
@@ -43,7 +43,7 @@
<1300000 0x1>;
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 */
clocks = <&rtc CLK_OSC32K_FANOUT>;
diff --git a/arch/arm/boot/dts/allwinner/sun8i-h3-orangepi-2.dts b/arch/arm/boot/dts/allwinner/sun8i-h3-orangepi-2.dts
index f1f9dbead32a..d2ae47b074bf 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-h3-orangepi-2.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-h3-orangepi-2.dts
@@ -105,7 +105,7 @@
};
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 WIFI_EN */
};
@@ -169,7 +169,7 @@
* Explicitly define the sdio device, so that we can add an ethernet
* alias for it (which e.g. makes u-boot set a mac-address).
*/
- rtl8189: sdio_wifi@1 {
+ rtl8189: wifi@1 {
reg = <1>;
};
};
diff --git a/arch/arm/boot/dts/allwinner/sun8i-h3-orangepi-lite.dts b/arch/arm/boot/dts/allwinner/sun8i-h3-orangepi-lite.dts
index 305b34a321f5..6a4316a52469 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-h3-orangepi-lite.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-h3-orangepi-lite.dts
@@ -143,7 +143,7 @@
* Explicitly define the sdio device, so that we can add an ethernet
* alias for it (which e.g. makes u-boot set a mac-address).
*/
- rtl8189ftv: sdio_wifi@1 {
+ rtl8189ftv: wifi@1 {
reg = <1>;
};
};
diff --git a/arch/arm/boot/dts/allwinner/sun8i-h3-orangepi-pc-plus.dts b/arch/arm/boot/dts/allwinner/sun8i-h3-orangepi-pc-plus.dts
index babf4cf1b2f6..8a49b3376dfc 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-h3-orangepi-pc-plus.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-h3-orangepi-pc-plus.dts
@@ -63,7 +63,7 @@
* Explicitly define the sdio device, so that we can add an ethernet
* alias for it (which e.g. makes u-boot set a mac-address).
*/
- rtl8189ftv: sdio_wifi@1 {
+ rtl8189ftv: wifi@1 {
reg = <1>;
};
};
diff --git a/arch/arm/boot/dts/allwinner/sun8i-h3-orangepi-zero-plus2.dts b/arch/arm/boot/dts/allwinner/sun8i-h3-orangepi-zero-plus2.dts
index 561ea1d2f861..7a6444a10e25 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-h3-orangepi-zero-plus2.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-h3-orangepi-zero-plus2.dts
@@ -92,7 +92,7 @@
regulator-max-microvolt = <3300000>;
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&pio 0 9 GPIO_ACTIVE_LOW>; /* PA9 */
post-power-on-delay-ms = <200>;
diff --git a/arch/arm/boot/dts/allwinner/sun8i-q8-common.dtsi b/arch/arm/boot/dts/allwinner/sun8i-q8-common.dtsi
index 3d9a1524e17e..272584881bb2 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-q8-common.dtsi
+++ b/arch/arm/boot/dts/allwinner/sun8i-q8-common.dtsi
@@ -62,7 +62,7 @@
};
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
/*
* Q8 boards use various PL# pins as wifi-en. On other boards
@@ -94,7 +94,7 @@
non-removable;
status = "okay";
- sdio_wifi: sdio_wifi@1 {
+ sdio_wifi: wifi@1 {
reg = <1>;
};
};
diff --git a/arch/arm/boot/dts/allwinner/sun8i-r16-bananapi-m2m.dts b/arch/arm/boot/dts/allwinner/sun8i-r16-bananapi-m2m.dts
index bc394686fedb..f4bf46b35bec 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-r16-bananapi-m2m.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-r16-bananapi-m2m.dts
@@ -88,7 +88,7 @@
regulator-max-microvolt = <5000000>;
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 6 GPIO_ACTIVE_LOW>; /* PL06 */
clocks = <&rtc CLK_OSC32K_FANOUT>;
diff --git a/arch/arm/boot/dts/allwinner/sun8i-r16-parrot.dts b/arch/arm/boot/dts/allwinner/sun8i-r16-parrot.dts
index 95543a9c2118..75067522ff59 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-r16-parrot.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-r16-parrot.dts
@@ -75,7 +75,7 @@
};
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 6 GPIO_ACTIVE_LOW>; /* PL06 */
};
diff --git a/arch/arm/boot/dts/allwinner/sun8i-r40-bananapi-m2-ultra.dts b/arch/arm/boot/dts/allwinner/sun8i-r40-bananapi-m2-ultra.dts
index 28197bbcb1d5..cd2351acc32f 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-r40-bananapi-m2-ultra.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-r40-bananapi-m2-ultra.dts
@@ -100,7 +100,7 @@
enable-active-high;
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&pio 6 10 GPIO_ACTIVE_LOW>; /* PG10 WIFI_EN */
clocks = <&ccu CLK_OUTA>;
diff --git a/arch/arm/boot/dts/allwinner/sun8i-r40-oka40i-c.dts b/arch/arm/boot/dts/allwinner/sun8i-r40-oka40i-c.dts
index 0bd1336206b8..15b0b4de626a 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-r40-oka40i-c.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-r40-oka40i-c.dts
@@ -62,7 +62,7 @@
regulator-max-microvolt = <5000000>;
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&pio 1 10 GPIO_ACTIVE_LOW>; // PB10 WIFI_EN
clocks = <&ccu CLK_OUTA>;
diff --git a/arch/arm/boot/dts/allwinner/sun8i-s3-pinecube.dts b/arch/arm/boot/dts/allwinner/sun8i-s3-pinecube.dts
index 20966e954eda..e0d4404b5957 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-s3-pinecube.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-s3-pinecube.dts
@@ -51,7 +51,7 @@
startup-delay-us = <200000>;
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&pio 1 3 GPIO_ACTIVE_LOW>; /* PB3 WIFI-RST */
post-power-on-delay-ms = <200>;
diff --git a/arch/arm/boot/dts/allwinner/sun8i-v3s.dtsi b/arch/arm/boot/dts/allwinner/sun8i-v3s.dtsi
index e8a04476b776..9e13c2aa8911 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-v3s.dtsi
+++ b/arch/arm/boot/dts/allwinner/sun8i-v3s.dtsi
@@ -98,7 +98,7 @@
#size-cells = <1>;
ranges;
- osc24M: osc24M_clk {
+ osc24M: osc24M-clk {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <24000000>;
@@ -106,7 +106,7 @@
clock-output-names = "osc24M";
};
- osc32k: osc32k_clk {
+ osc32k: osc32k-clk {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <32768>;
diff --git a/arch/arm/boot/dts/allwinner/sun8i-v40-bananapi-m2-berry.dts b/arch/arm/boot/dts/allwinner/sun8i-v40-bananapi-m2-berry.dts
index 434871040aca..6575ef274453 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-v40-bananapi-m2-berry.dts
+++ b/arch/arm/boot/dts/allwinner/sun8i-v40-bananapi-m2-berry.dts
@@ -94,7 +94,7 @@
enable-active-high;
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&pio 6 10 GPIO_ACTIVE_LOW>; /* PG10 WIFI_EN */
clocks = <&ccu CLK_OUTA>;
diff --git a/arch/arm/boot/dts/allwinner/sun9i-a80.dtsi b/arch/arm/boot/dts/allwinner/sun9i-a80.dtsi
index 7d3f3300f431..a1ae0929cec9 100644
--- a/arch/arm/boot/dts/allwinner/sun9i-a80.dtsi
+++ b/arch/arm/boot/dts/allwinner/sun9i-a80.dtsi
@@ -196,14 +196,14 @@
* The actual TX clock rate is not controlled by the
* gmac_tx clock.
*/
- mii_phy_tx_clk: mii_phy_tx_clk {
+ mii_phy_tx_clk: mii-phy-tx-clk {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <25000000>;
clock-output-names = "mii_phy_tx";
};
- gmac_int_tx_clk: gmac_int_tx_clk {
+ gmac_int_tx_clk: gmac-int-tx-clk {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <125000000>;
diff --git a/arch/arm/boot/dts/allwinner/sunxi-bananapi-m2-plus.dtsi b/arch/arm/boot/dts/allwinner/sunxi-bananapi-m2-plus.dtsi
index 1d1d127cf38f..873817ddb4ea 100644
--- a/arch/arm/boot/dts/allwinner/sunxi-bananapi-m2-plus.dtsi
+++ b/arch/arm/boot/dts/allwinner/sunxi-bananapi-m2-plus.dtsi
@@ -98,7 +98,7 @@
gpio = <&pio 3 6 GPIO_ACTIVE_HIGH>;
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 */
clocks = <&rtc CLK_OSC32K_FANOUT>;
diff --git a/arch/arm/boot/dts/allwinner/sunxi-h3-h5-emlid-neutis.dtsi b/arch/arm/boot/dts/allwinner/sunxi-h3-h5-emlid-neutis.dtsi
index 60804b0e6c56..be5f5528a118 100644
--- a/arch/arm/boot/dts/allwinner/sunxi-h3-h5-emlid-neutis.dtsi
+++ b/arch/arm/boot/dts/allwinner/sunxi-h3-h5-emlid-neutis.dtsi
@@ -18,7 +18,7 @@
stdout-path = "serial0:115200n8";
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&pio 2 7 GPIO_ACTIVE_LOW>; /* PC7 */
post-power-on-delay-ms = <200>;
diff --git a/arch/arm/boot/dts/allwinner/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/allwinner/sunxi-h3-h5.dtsi
index ade1cd50e445..7df60515a903 100644
--- a/arch/arm/boot/dts/allwinner/sunxi-h3-h5.dtsi
+++ b/arch/arm/boot/dts/allwinner/sunxi-h3-h5.dtsi
@@ -83,7 +83,7 @@
#size-cells = <1>;
ranges;
- osc24M: osc24M_clk {
+ osc24M: osc24M-clk {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <24000000>;
@@ -91,7 +91,7 @@
clock-output-names = "osc24M";
};
- osc32k: osc32k_clk {
+ osc32k: osc32k-clk {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <32768>;
diff --git a/arch/arm/boot/dts/aspeed/Makefile b/arch/arm/boot/dts/aspeed/Makefile
index d3ac20e316d0..e51c6d203725 100644
--- a/arch/arm/boot/dts/aspeed/Makefile
+++ b/arch/arm/boot/dts/aspeed/Makefile
@@ -9,17 +9,21 @@ dtb-$(CONFIG_ARCH_ASPEED) += \
aspeed-bmc-ampere-mtmitchell.dtb \
aspeed-bmc-arm-stardragon4800-rep2.dtb \
aspeed-bmc-asrock-e3c246d4i.dtb \
+ aspeed-bmc-asrock-e3c256d4i.dtb \
aspeed-bmc-asrock-romed8hm3.dtb \
+ aspeed-bmc-asrock-spc621d8hm3.dtb \
+ aspeed-bmc-asrock-x570d4u.dtb \
+ aspeed-bmc-asus-x4tf.dtb \
aspeed-bmc-bytedance-g220a.dtb \
aspeed-bmc-delta-ahe50dc.dtb \
aspeed-bmc-facebook-bletchley.dtb \
- aspeed-bmc-facebook-cloudripper.dtb \
aspeed-bmc-facebook-cmm.dtb \
aspeed-bmc-facebook-elbert.dtb \
aspeed-bmc-facebook-fuji.dtb \
aspeed-bmc-facebook-galaxy100.dtb \
aspeed-bmc-facebook-greatlakes.dtb \
- aspeed-bmc-facebook-minerva-cmc.dtb \
+ aspeed-bmc-facebook-harma.dtb \
+ aspeed-bmc-facebook-minerva.dtb \
aspeed-bmc-facebook-minipack.dtb \
aspeed-bmc-facebook-tiogapass.dtb \
aspeed-bmc-facebook-wedge40.dtb \
@@ -33,6 +37,7 @@ dtb-$(CONFIG_ARCH_ASPEED) += \
aspeed-bmc-ibm-rainier.dtb \
aspeed-bmc-ibm-rainier-1s4u.dtb \
aspeed-bmc-ibm-rainier-4u.dtb \
+ aspeed-bmc-ibm-system1.dtb \
aspeed-bmc-intel-s2600wf.dtb \
aspeed-bmc-inspur-fp5280g2.dtb \
aspeed-bmc-inspur-nf5280m6.dtb \
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtmitchell.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtmitchell.dts
index 7b540880cef9..3c8925034a8c 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtmitchell.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtmitchell.dts
@@ -813,7 +813,6 @@
};
&adc0 {
- ref_voltage = <2500>;
status = "okay";
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-e3c246d4i.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-e3c246d4i.dts
index c4b2efbfdf56..bb2e6ef609af 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-e3c246d4i.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-e3c246d4i.dts
@@ -83,6 +83,9 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_rgmii1_default &pinctrl_mdio1_default>;
+
+ nvmem-cells = <&eth0_macaddress>;
+ nvmem-cell-names = "mac-address";
};
&i2c1 {
@@ -103,6 +106,12 @@
compatible = "st,24c128", "atmel,24c128";
reg = <0x57>;
pagesize = <16>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ eth0_macaddress: macaddress@3f80 {
+ reg = <0x3f80 6>;
+ };
};
};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-e3c256d4i.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-e3c256d4i.dts
new file mode 100644
index 000000000000..9d00ce9475f2
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-e3c256d4i.dts
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+#include "aspeed-g5.dtsi"
+#include <dt-bindings/gpio/aspeed-gpio.h>
+#include <dt-bindings/i2c/i2c.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/watchdog/aspeed-wdt.h>
+
+/{
+ model = "ASRock E3C256D4I BMC";
+ compatible = "asrock,e3c256d4i-bmc", "aspeed,ast2500";
+
+ aliases {
+ serial4 = &uart5;
+
+ i2c20 = &i2c2mux0ch0;
+ i2c21 = &i2c2mux0ch1;
+ i2c22 = &i2c2mux0ch2;
+ i2c23 = &i2c2mux0ch3;
+ };
+
+ chosen {
+ stdout-path = &uart5;
+ };
+
+ memory@80000000 {
+ reg = <0x80000000 0x20000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ /* BMC heartbeat */
+ led-0 {
+ gpios = <&gpio ASPEED_GPIO(H, 6) GPIO_ACTIVE_LOW>;
+ function = LED_FUNCTION_HEARTBEAT;
+ color = <LED_COLOR_ID_GREEN>;
+ linux,default-trigger = "timer";
+ };
+
+ /* system fault */
+ led-1 {
+ gpios = <&gpio ASPEED_GPIO(Z, 2) GPIO_ACTIVE_LOW>;
+ function = LED_FUNCTION_FAULT;
+ color = <LED_COLOR_ID_RED>;
+ panic-indicator;
+ };
+ };
+
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&adc 0>, <&adc 1>, <&adc 2>, <&adc 3>,
+ <&adc 4>, <&adc 5>, <&adc 6>, <&adc 7>,
+ <&adc 8>, <&adc 9>, <&adc 10>, <&adc 11>,
+ <&adc 12>, <&adc 13>, <&adc 14>, <&adc 15>;
+ };
+};
+
+&fmc {
+ status = "okay";
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "bmc";
+ spi-max-frequency = <100000000>; /* 100 MHz */
+#include "openbmc-flash-layout-64.dtsi"
+ };
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&uart3 {
+ status = "okay";
+};
+
+&uart4 {
+ status = "okay";
+};
+
+&uart5 {
+ status = "okay";
+};
+
+&uart_routing {
+ status = "okay";
+};
+
+&mac0 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rgmii1_default &pinctrl_mdio1_default>;
+
+ nvmem-cells = <&eth0_macaddress>;
+ nvmem-cell-names = "mac-address";
+};
+
+&i2c0 {
+ status = "okay";
+};
+
+&i2c1 {
+ status = "okay";
+};
+
+&i2c2 {
+ status = "okay";
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9545";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c2mux0ch0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ i2c2mux0ch1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+
+ i2c2mux0ch2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ i2c2mux0ch3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ };
+};
+
+&i2c3 {
+ status = "okay";
+};
+
+&i2c4 {
+ status = "okay";
+};
+
+&i2c5 {
+ status = "okay";
+};
+
+&i2c6 {
+ status = "okay";
+};
+
+&i2c7 {
+ status = "okay";
+};
+
+&i2c9 {
+ status = "okay";
+};
+
+&i2c10 {
+ status = "okay";
+};
+
+&i2c11 {
+ status = "okay";
+
+ vrm@60 {
+ compatible = "isil,isl69269";
+ reg = <0x60>;
+ };
+};
+
+&i2c12 {
+ status = "okay";
+
+ /* FRU eeprom */
+ eeprom@57 {
+ compatible = "st,24c128", "atmel,24c128";
+ reg = <0x57>;
+ pagesize = <16>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ eth0_macaddress: macaddress@3f80 {
+ reg = <0x3f80 6>;
+ };
+ };
+};
+
+&video {
+ status = "okay";
+};
+
+&vhub {
+ status = "okay";
+};
+
+&lpc_ctrl {
+ status = "okay";
+};
+
+&lpc_snoop {
+ status = "okay";
+ snoop-ports = <0x80>;
+};
+
+&kcs3 {
+ status = "okay";
+ aspeed,lpc-io-reg = <0xca2>;
+};
+
+&peci0 {
+ status = "okay";
+};
+
+&wdt1 {
+ aspeed,reset-mask = <(AST2500_WDT_RESET_DEFAULT & ~AST2500_WDT_RESET_LPC)>;
+};
+
+&wdt2 {
+ aspeed,reset-mask = <(AST2500_WDT_RESET_DEFAULT & ~AST2500_WDT_RESET_LPC)>;
+};
+
+&pwm_tacho {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm0_default /* CPU */
+ &pinctrl_pwm2_default /* rear */
+ &pinctrl_pwm4_default>; /* front */
+
+ /* CPU */
+ fan@0 {
+ reg = <0x00>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x00>;
+ };
+
+ /* rear */
+ fan@2 {
+ reg = <0x02>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x02>;
+ };
+
+ /* front */
+ fan@4 {
+ reg = <0x04>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x04>;
+ };
+};
+
+&gpio {
+ status = "okay";
+ gpio-line-names =
+ /* A */ "", "", "NMI_BTN_N", "BMC_NMI", "", "", "", "",
+ /* B */ "", "", "", "", "", "", "", "",
+ /* C */ "", "", "", "", "", "", "", "",
+ /* D */ "BMC_PSIN", "BMC_PSOUT", "BMC_RESETCON", "RESETCON",
+ "", "", "", "",
+ /* E */ "", "", "", "", "", "", "", "",
+ /* F */ "LOCATORLED_STATUS_N", "LOCATORBTN", "", "",
+ "", "", "BMC_PCH_SCI_LPC", "BMC_NCSI_MUX_CTL",
+ /* G */ "HWM_BAT_EN", "CHASSIS_ID0", "CHASSIS_ID1", "CHASSIS_ID2",
+ "", "", "", "",
+ /* H */ "FM_ME_RCVR_N", "O_PWROK", "", "D4_DIMM_EVENT_3V_N",
+ "MFG_MODE_N", "BMC_RTCRST", "BMC_HB_LED_N", "BMC_CASEOPEN",
+ /* I */ "", "", "", "", "", "", "", "",
+ /* J */ "BMC_READY", "BMC_PCH_BIOS_CS_N", "BMC_SMI", "", "", "", "", "",
+ /* K */ "", "", "", "", "", "", "", "",
+ /* L */ "", "", "", "", "", "", "", "",
+ /* M */ "", "", "", "", "", "", "", "",
+ /* N */ "", "", "", "", "", "", "", "",
+ /* O */ "", "", "", "", "", "", "", "",
+ /* P */ "", "", "", "", "", "", "", "",
+ /* Q */ "", "", "", "", "", "", "", "",
+ /* R */ "", "", "", "", "", "", "", "",
+ /* S */ "PCHHOT_BMC_N", "", "RSMRST", "", "", "", "", "",
+ /* T */ "", "", "", "", "", "", "", "",
+ /* U */ "", "", "", "", "", "", "", "",
+ /* V */ "", "", "", "", "", "", "", "",
+ /* W */ "", "", "", "", "", "", "", "",
+ /* X */ "", "", "", "", "", "", "", "",
+ /* Y */ "SLP_S3", "SLP_S5", "", "", "", "", "", "",
+ /* Z */ "CPU_CATERR_BMC_N", "", "SYSTEM_FAULT_LED_N", "BMC_THROTTLE_N",
+ "", "", "", "",
+ /* AA */ "CPU1_THERMTRIP_LATCH_N", "", "CPU1_PROCHOT_N", "",
+ "", "", "IRQ_SMI_ACTIVE_N", "FM_BIOS_POST_CMPLT_N",
+ /* AB */ "", "", "ME_OVERRIDE", "BMC_DMI_MODIFY", "", "", "", "",
+ /* AC */ "", "", "", "", "", "", "", "";
+};
+
+&adc {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_adc0_default /* 3VSB */
+ &pinctrl_adc1_default /* 5VSB */
+ &pinctrl_adc2_default /* CPU1 */
+ &pinctrl_adc3_default /* VCCSA */
+ &pinctrl_adc4_default /* VCCM */
+ &pinctrl_adc5_default /* V10M */
+ &pinctrl_adc6_default /* VCCIO */
+ &pinctrl_adc7_default /* VCCGT */
+ &pinctrl_adc8_default /* VPPM */
+ &pinctrl_adc9_default /* BAT */
+ &pinctrl_adc10_default /* 3V */
+ &pinctrl_adc11_default /* 5V */
+ &pinctrl_adc12_default /* 12V */
+ &pinctrl_adc13_default /* GND */
+ &pinctrl_adc14_default /* GND */
+ &pinctrl_adc15_default>; /* GND */
+};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-romed8hm3.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-romed8hm3.dts
index 4554abf0c7cd..6dd221644dc6 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-romed8hm3.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-romed8hm3.dts
@@ -71,6 +71,9 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_rgmii1_default &pinctrl_mdio1_default>;
+
+ nvmem-cells = <&eth0_macaddress>;
+ nvmem-cell-names = "mac-address";
};
&i2c0 {
@@ -98,14 +101,14 @@
/* IPB PMIC */
lm25066@40 {
- compatible = "lm25066";
+ compatible = "ti,lm25066";
reg = <0x40>;
shunt-resistor-micro-ohms = <1000>;
};
/* 12VSB PMIC */
lm25066@41 {
- compatible = "lm25066";
+ compatible = "ti,lm25066";
reg = <0x41>;
shunt-resistor-micro-ohms = <10000>;
};
@@ -131,6 +134,12 @@
compatible = "st,24c128", "atmel,24c128";
reg = <0x50>;
pagesize = <16>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ eth0_macaddress: macaddress@3f80 {
+ reg = <0x3f80 6>;
+ };
};
};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-spc621d8hm3.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-spc621d8hm3.dts
new file mode 100644
index 000000000000..555485871e7a
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-spc621d8hm3.dts
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+#include "aspeed-g5.dtsi"
+#include <dt-bindings/gpio/aspeed-gpio.h>
+#include <dt-bindings/i2c/i2c.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/leds/common.h>
+
+/{
+ model = "ASRock SPC621D8HM3 BMC";
+ compatible = "asrock,spc621d8hm3-bmc", "aspeed,ast2500";
+
+ aliases {
+ serial4 = &uart5;
+
+ i2c20 = &i2c1mux0ch0;
+ i2c21 = &i2c1mux0ch1;
+ };
+
+ chosen {
+ stdout-path = &uart5;
+ };
+
+ memory@80000000 {
+ reg = <0x80000000 0x20000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ /* BMC heartbeat */
+ led-0 {
+ gpios = <&gpio ASPEED_GPIO(H, 6) GPIO_ACTIVE_LOW>;
+ function = LED_FUNCTION_HEARTBEAT;
+ color = <LED_COLOR_ID_GREEN>;
+ linux,default-trigger = "timer";
+ };
+
+ /* system fault */
+ led-1 {
+ gpios = <&gpio ASPEED_GPIO(Z, 2) GPIO_ACTIVE_LOW>;
+ function = LED_FUNCTION_FAULT;
+ color = <LED_COLOR_ID_RED>;
+ panic-indicator;
+ };
+ };
+
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&adc 0>, <&adc 1>, <&adc 2>, <&adc 3>,
+ <&adc 4>, <&adc 5>, <&adc 6>, <&adc 7>,
+ <&adc 8>, <&adc 9>, <&adc 10>, <&adc 11>,
+ <&adc 12>, <&adc 13>, <&adc 14>, <&adc 15>;
+ };
+};
+
+&fmc {
+ status = "okay";
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "bmc";
+ spi-max-frequency = <50000000>; /* 50 MHz */
+#include "openbmc-flash-layout-64.dtsi"
+ };
+};
+
+&uart5 {
+ status = "okay";
+};
+
+&vuart {
+ status = "okay";
+ aspeed,lpc-io-reg = <0x2f8>;
+ aspeed,lpc-interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&mac0 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rgmii1_default &pinctrl_mdio1_default>;
+
+ nvmem-cells = <&eth0_macaddress>;
+ nvmem-cell-names = "mac-address";
+};
+
+&i2c0 {
+ status = "okay";
+};
+
+&i2c1 {
+ status = "okay";
+
+ /* hardware monitor/thermal sensor */
+ temperature-sensor@29 {
+ compatible = "nuvoton,nct7802";
+ reg = <0x29>;
+ };
+
+ /* motherboard temp sensor (TMP1, near BMC) */
+ temperature-sensor@4c {
+ compatible = "nuvoton,w83773g";
+ reg = <0x4c>;
+ };
+
+ /* motherboard FRU eeprom */
+ eeprom@50 {
+ compatible = "st,24c128", "atmel,24c128";
+ reg = <0x50>;
+ pagesize = <16>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ eth0_macaddress: macaddress@3f80 {
+ reg = <0x3f80 6>;
+ };
+ };
+
+ /* M.2 slot smbus mux */
+ i2c-mux@71 {
+ compatible = "nxp,pca9545";
+ reg = <0x71>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c1mux0ch0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ i2c1mux0ch1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+ };
+};
+
+&i2c2 {
+ status = "okay";
+};
+
+&i2c3 {
+ status = "okay";
+};
+
+&i2c4 {
+ status = "okay";
+};
+
+&i2c5 {
+ status = "okay";
+};
+
+&i2c6 {
+ status = "okay";
+};
+
+&i2c7 {
+ status = "okay";
+};
+
+&i2c8 {
+ status = "okay";
+};
+
+&i2c9 {
+ status = "okay";
+};
+
+&i2c10 {
+ status = "okay";
+};
+
+&i2c11 {
+ status = "okay";
+};
+
+&i2c12 {
+ status = "okay";
+};
+
+&i2c13 {
+ status = "okay";
+};
+
+&video {
+ status = "okay";
+};
+
+&vhub {
+ status = "okay";
+};
+
+&lpc_ctrl {
+ status = "okay";
+};
+
+&lpc_snoop {
+ status = "okay";
+ snoop-ports = <0x80>;
+};
+
+&kcs3 {
+ status = "okay";
+ aspeed,lpc-io-reg = <0xca2>;
+};
+
+&peci0 {
+ status = "okay";
+};
+
+&pwm_tacho {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm0_default
+ &pinctrl_pwm2_default
+ &pinctrl_pwm3_default
+ &pinctrl_pwm4_default>;
+
+ fan@0 {
+ reg = <0x00>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x00>;
+ };
+
+ fan@2 {
+ reg = <0x02>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x02>;
+ };
+
+ fan@3 {
+ reg = <0x03>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x03>;
+ };
+
+ fan@4 {
+ reg = <0x04>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x04>;
+ };
+};
+
+&gpio {
+ status = "okay";
+ gpio-line-names =
+ /* A */ "LOCATORLED_STATUS_N", "LOCATORBTN_N",
+ "BMC_READY_N", "FM_SPD_DDRCPU_LVLSHFT_EN",
+ "", "", "", "",
+ /* B */ "NODE_ID_1", "NODE_ID_2", "PSU_FAN_FAIL_N", "",
+ "", "", "", "GPIO_RST",
+ /* C */ "", "", "", "", "", "", "", "",
+ /* D */ "FP_PWR_BTN_MUX_N", "FM_BMC_PWRBTN_OUT_N",
+ "FP_RST_BTN_N", "RST_BMC_RSTBTN_OUT_N",
+ "NMI_BTN_N", "BMC_NMI",
+ "", "",
+ /* E */ "", "", "", "FM_ME_RCVR_N", "", "", "", "",
+ /* F */ "BMC_SMB_SEL_N", "FM_CPU2_DISABLE_COD_N",
+ "FM_REMOTE_DEBUG_BMC_EN", "FM_CPU_ERR0_LVT3_EN",
+ "FM_CPU_ERR1_LVT3_EN", "FM_CPU_ERR2_LVT3_EN",
+ "FM_MEM_THERM_EVENT_CPU1_LVT3_N", "FM_MEM_THERM_EVENT_CPU2_LVT3_N",
+ /* G */ "HWM_BAT_EN", "", "BMC_PHYRST_N", "FM_BIOS_SPI_BMC_CTRL",
+ "BMC_ALERT1_N", "BMC_ALERT2_N", "BMC_ALERT3_N", "IRQ_SML0_ALERT_N",
+ /* H */ "BMC_SMB_PRESENT_1_N", "FM_PCH_CORE_VID_0", "FM_PCH_CORE_VID_1", "",
+ "FM_MFG_MODE", "BMC_RTCRST", "BMC_HB_LED_N", "BMC_CASEOPEN",
+ /* I */ "IRQ_PVDDQ_ABCD_CPU1_VRHOT_LVC3_N", "IRQ_PVDDQ_ABCD_CPU2_VRHOT_LVC3_N",
+ "IRQ_PVDDQ_EFGH_CPU1_VRHOT_LVC3_N", "IRQ_PVDDQ_EFGH_CPU2_VRHOT_LVC3_N",
+ "", "", "", "",
+ /* J */ "", "", "", "", "", "", "", "",
+ /* K */ "", "", "", "", "", "", "", "",
+ /* L */ "", "", "", "", "", "", "", "",
+ /* M */ "FM_PVCCIN_CPU1_PWR_IN_ALERT_N", "FM_PVCCIN_CPU2_PWR_IN_ALERT_N",
+ "IRQ_PVCCIN_CPU1_VRHOT_LVC3_N", "IRQ_PVCCIN_CPU2_VRHOT_LVC3_N",
+ "FM_CPU1_PROCHOT_BMC_LVC3_N", "",
+ "FM_CPU1_MEMHOT_OUT_N", "FM_CPU2_MEMHOT_OUT_N",
+ /* N */ "", "", "", "", "", "", "", "",
+ /* O */ "", "", "", "", "", "", "", "",
+ /* P */ "", "", "", "", "", "", "", "",
+ /* Q */ "", "", "", "", "", "", "RST_GLB_RST_WARN_N", "PCIE_WAKE_N",
+ /* R */ "", "", "FM_BMC_SUSACK_N", "FM_BMC_EUP_LOT6_N",
+ "", "FM_BMC_PCH_SCI_LPC_N", "", "",
+ /* S */ "FM_DBP_PRESENT_N", "FM_CPU2_SKTOCC_LCT3_N",
+ "FM_CPU1_FIVR_FAULT_LVT3", "FM_CPU2_FIVR_FAULT_LVT3",
+ "", "", "", "",
+ /* T */ "", "", "", "", "", "", "", "",
+ /* U */ "", "", "", "", "", "", "", "",
+ /* V */ "", "", "", "", "", "", "", "",
+ /* W */ "", "", "", "", "", "", "", "",
+ /* X */ "", "", "", "", "", "", "", "",
+ /* Y */ "FM_SLPS3_N", "FM_SLPS4_N", "", "FM_BMC_ONCTL_N_PLD",
+ "", "", "", "",
+ /* Z */ "FM_CPU_MSMI_CATERR_LVT3_N", "", "SYSTEM_FAULT_LED_N", "BMC_THROTTLE_N",
+ "", "", "", "",
+ /* AA */ "FM_CPU1_THERMTRIP_LATCH_LVT3_N", "FM_CPU2_THERMTRIP_LATCH_LVT3_N",
+ "FM_BIOS_POST_COMPLT_N", "DBP_BMC_SYSPWROK",
+ "", "IRQ_SML0_ALERT_MUX_N",
+ "IRQ_SMI_ACTIVE_N", "IRQ_NMI_EVENT_N",
+ /* AB */ "FM_PCH_BMC_THERMTRIP_N", "PWRGD_SYS_PWROK",
+ "ME_OVERRIDE", "IRQ_BMC_PCH_SMI_LPC_N",
+ "", "", "", "",
+ /* AC */ "", "", "", "", "", "", "", "";
+};
+
+&adc {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_adc0_default /* 3VSB */
+ &pinctrl_adc1_default /* 5VSB */
+ &pinctrl_adc2_default /* CPU1 */
+ &pinctrl_adc3_default /* NC */
+ &pinctrl_adc4_default /* VCCMABCD */
+ &pinctrl_adc5_default /* VCCMEFGH */
+ &pinctrl_adc6_default /* NC */
+ &pinctrl_adc7_default /* NC */
+ &pinctrl_adc8_default /* PVNN_PCH */
+ &pinctrl_adc9_default /* 1P05PCH */
+ &pinctrl_adc10_default /* 1P8PCH */
+ &pinctrl_adc11_default /* BAT */
+ &pinctrl_adc12_default /* 3V */
+ &pinctrl_adc13_default /* 5V */
+ &pinctrl_adc14_default /* 12V */
+ &pinctrl_adc15_default>; /* GND */
+};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-x570d4u.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-x570d4u.dts
new file mode 100644
index 000000000000..8dee4faa9e07
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-x570d4u.dts
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+#include "aspeed-g5.dtsi"
+#include <dt-bindings/gpio/aspeed-gpio.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+ model = "Asrock Rack X570D4U BMC";
+ compatible = "asrock,x570d4u-bmc", "aspeed,ast2500";
+
+ aliases {
+ i2c40 = &i2c4mux0ch0;
+ i2c41 = &i2c4mux0ch1;
+ i2c42 = &i2c4mux0ch2;
+ i2c43 = &i2c4mux0ch3;
+ };
+
+ chosen {
+ stdout-path = &uart5;
+ };
+
+ memory@80000000 {
+ reg = <0x80000000 0x20000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ pci_memory: region@9a000000 {
+ no-map;
+ reg = <0x9a000000 0x00010000>; /* 64K */
+ };
+
+ video_engine_memory: jpegbuffer {
+ size = <0x02800000>; /* 40M */
+ alignment = <0x01000000>;
+ compatible = "shared-dma-pool";
+ reusable;
+ };
+
+ gfx_memory: framebuffer {
+ size = <0x01000000>;
+ alignment = <0x01000000>;
+ compatible = "shared-dma-pool";
+ reusable;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-0 {
+ /* led-heartbeat-n */
+ gpios = <&gpio ASPEED_GPIO(H, 6) GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_HEARTBEAT;
+ linux,default-trigger = "timer";
+ };
+
+ led-1 {
+ /* led-fault-n */
+ gpios = <&gpio ASPEED_GPIO(Z, 2) GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_AMBER>;
+ function = LED_FUNCTION_FAULT;
+ panic-indicator;
+ };
+ };
+
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&adc 0>, <&adc 1>, <&adc 2>, <&adc 3>, <&adc 4>,
+ <&adc 5>, <&adc 6>, <&adc 7>, <&adc 8>, <&adc 9>,
+ <&adc 10>, <&adc 11>, <&adc 12>;
+ };
+};
+
+&gpio {
+ status = "okay";
+ gpio-line-names =
+ /* A */ "input-locatorled-n", "", "", "", "", "", "", "",
+ /* B */ "input-bios-post-cmplt-n", "", "", "", "", "", "", "",
+ /* C */ "", "", "", "", "", "", "control-locatorbutton-n", "",
+ /* D */ "button-power-n", "control-power-n", "button-reset-n",
+ "control-reset-n", "", "", "", "",
+ /* E */ "", "", "", "", "", "", "", "",
+ /* F */ "", "", "", "", "", "", "", "",
+ /* G */ "output-hwm-vbat-enable", "input-id0-n", "input-id1-n",
+ "input-id2-n", "input-aux-smb-alert-n", "",
+ "input-psu-smb-alert-n", "",
+ /* H */ "", "", "", "", "input-mfg-mode-n", "",
+ "led-heartbeat-n", "input-case-open-n",
+ /* I */ "", "", "", "", "", "", "", "",
+ /* J */ "output-bmc-ready-n", "", "", "", "", "", "", "",
+ /* K */ "", "", "", "", "", "", "", "",
+ /* L */ "", "", "", "", "", "", "", "",
+ /* M */ "", "", "", "", "", "", "", "",
+ /* N */ "", "", "", "", "", "", "", "",
+ /* O */ "", "", "", "", "", "", "", "",
+ /* P */ "", "", "", "", "", "", "", "",
+ /* Q */ "", "", "", "", "input-bmc-smb-present-n", "", "",
+ "input-pcie-wake-n",
+ /* R */ "", "", "", "", "", "", "", "",
+ /* S */ "input-bmc-pchhot-n", "", "", "", "", "", "", "",
+ /* T */ "", "", "", "", "", "", "", "",
+ /* U */ "", "", "", "", "", "", "", "",
+ /* V */ "", "", "", "", "", "", "", "",
+ /* W */ "", "", "", "", "", "", "", "",
+ /* X */ "", "", "", "", "", "", "", "",
+ /* Y */ "input-sleep-s3-n", "input-sleep-s5-n", "", "", "", "",
+ "", "",
+ /* Z */ "", "", "led-fault-n", "output-bmc-throttle-n", "", "",
+ "", "",
+ /* AA */ "input-cpu1-thermtrip-latch-n", "",
+ "input-cpu1-prochot-n", "", "", "", "", "",
+ /* AB */ "", "input-power-good", "", "", "", "", "", "",
+ /* AC */ "", "", "", "", "", "", "", "";
+};
+
+&fmc {
+ status = "okay";
+ flash@0 {
+ status = "okay";
+ label = "bmc";
+ m25p,fast-read;
+ spi-max-frequency = <10000000>;
+#include "openbmc-flash-layout-64.dtsi"
+ };
+};
+
+&uart5 {
+ status = "okay";
+};
+
+&vuart {
+ status = "okay";
+};
+
+&mac0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rgmii1_default &pinctrl_mdio1_default>;
+
+ nvmem-cells = <&eth0_macaddress>;
+ nvmem-cell-names = "mac-address";
+};
+
+&mac1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rmii2_default &pinctrl_mdio2_default>;
+ use-ncsi;
+
+ nvmem-cells = <&eth1_macaddress>;
+ nvmem-cell-names = "mac-address";
+};
+
+&i2c0 {
+ /* SMBus on auxiliary panel header (AUX_PANEL1) */
+ status = "okay";
+};
+
+&i2c1 {
+ /* Hardware monitoring SMBus */
+ status = "okay";
+
+ w83773g@4c {
+ compatible = "nuvoton,w83773g";
+ reg = <0x4c>;
+ };
+};
+
+&i2c2 {
+ /* PSU SMBus (PSU_SMB1) */
+ status = "okay";
+};
+
+&i2c3 {
+ status = "okay";
+};
+
+&i2c4 {
+ status = "okay";
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9545";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c4mux0ch0: i2c@0 {
+ /* SMBus on PCI express 16x slot */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ i2c4mux0ch1: i2c@1 {
+ /* SMBus on PCI express 8x slot */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+
+ i2c4mux0ch2: i2c@2 {
+ /* Unknown */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ i2c4mux0ch3: i2c@3 {
+ /* SMBus on PCI express 1x slot */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ };
+};
+
+&i2c5 {
+ /* SMBus on BMC connector (BMC_SMB_1) */
+ status = "okay";
+};
+
+&i2c7 {
+ /* FRU and SPD EEPROM SMBus */
+ status = "okay";
+
+ eeprom@57 {
+ compatible = "st,24c128", "atmel,24c128";
+ reg = <0x57>;
+ pagesize = <16>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ eth0_macaddress: macaddress@3f80 {
+ reg = <0x3f80 6>;
+ };
+
+ eth1_macaddress: macaddress@3f88 {
+ reg = <0x3f88 6>;
+ };
+ };
+};
+
+&i2c8 {
+ /* SMBus on intelligent platform management bus header (IPMB_1) */
+ status = "okay";
+};
+
+&gfx {
+ status = "okay";
+};
+
+&pinctrl {
+ aspeed,external-nodes = <&gfx &lhc>;
+};
+
+&vhub {
+ status = "okay";
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&uhci {
+ status = "okay";
+};
+
+&kcs3 {
+ aspeed,lpc-io-reg = <0xca2>;
+ status = "okay";
+};
+
+&lpc_ctrl {
+ status = "okay";
+};
+
+&lpc_snoop {
+ status = "okay";
+ snoop-ports = <0x80>;
+};
+
+&p2a {
+ status = "okay";
+ memory-region = <&pci_memory>;
+};
+
+&video {
+ status = "okay";
+ memory-region = <&video_engine_memory>;
+};
+
+&pwm_tacho {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm0_default
+ &pinctrl_pwm1_default
+ &pinctrl_pwm2_default
+ &pinctrl_pwm3_default
+ &pinctrl_pwm4_default
+ &pinctrl_pwm5_default>;
+
+ fan@0 {
+ /* FAN1 (4-pin) */
+ reg = <0x00>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x00>;
+ };
+
+ fan@1 {
+ /* FAN2 (4-pin) */
+ reg = <0x01>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x01>;
+ };
+
+ fan@2 {
+ /* FAN3 (4-pin) */
+ reg = <0x02>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x02>;
+ };
+
+ fan@3 {
+ /* FAN4 (6-pin) */
+ reg = <0x03>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x04 0x0b>;
+ };
+
+ fan@4 {
+ /* FAN6 (6-pin) */
+ reg = <0x04>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x06 0x0d>;
+ };
+
+ fan@5 {
+ /* FAN5 (6-pin) */
+ reg = <0x05>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x05 0x0c>;
+ };
+};
+
+&adc {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_adc0_default /* 3VSB */
+ &pinctrl_adc1_default /* 5VSB */
+ &pinctrl_adc2_default /* VCPU */
+ &pinctrl_adc3_default /* VSOC */
+ &pinctrl_adc4_default /* VCCM */
+ &pinctrl_adc5_default /* APU-VDDP */
+ &pinctrl_adc6_default /* PM-VDD-CLDO */
+ &pinctrl_adc7_default /* PM-VDDCR-S5 */
+ &pinctrl_adc8_default /* PM-VDDCR */
+ &pinctrl_adc9_default /* VBAT */
+ &pinctrl_adc10_default /* 3V */
+ &pinctrl_adc11_default /* 5V */
+ &pinctrl_adc12_default>; /* 12V */
+};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-asus-x4tf.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-asus-x4tf.dts
new file mode 100644
index 000000000000..64f4ed07c7d2
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-asus-x4tf.dts
@@ -0,0 +1,581 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright 2024 ASUS Corp.
+
+/dts-v1/;
+
+#include "aspeed-g6.dtsi"
+#include "aspeed-g6-pinctrl.dtsi"
+#include <dt-bindings/i2c/i2c.h>
+#include <dt-bindings/gpio/aspeed-gpio.h>
+
+/ {
+ model = "ASUS-X4TF";
+ compatible = "asus,x4tf-bmc", "aspeed,ast2600";
+
+ aliases {
+ serial4 = &uart5;
+ };
+
+ chosen {
+ stdout-path = "serial4:115200n8";
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x40000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ video_engine_memory: video {
+ size = <0x04000000>;
+ alignment = <0x01000000>;
+ compatible = "shared-dma-pool";
+ reusable;
+ };
+ };
+
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&adc0 0>, <&adc0 1>, <&adc0 2>, <&adc0 3>,
+ <&adc0 4>, <&adc0 5>, <&adc0 6>, <&adc0 7>,
+ <&adc1 0>, <&adc1 1>, <&adc1 2>, <&adc1 3>,
+ <&adc1 4>, <&adc1 5>, <&adc1 6>, <&adc1 7>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-heartbeat {
+ gpios = <&gpio0 ASPEED_GPIO(P, 7) GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ led-uid {
+ gpios = <&gpio0 ASPEED_GPIO(P, 1) (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ default-state = "off";
+ };
+
+ led-status_Y {
+ gpios = <&gpio1 ASPEED_GPIO(B, 1) GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led-sys_boot_status {
+ gpios = <&gpio1 ASPEED_GPIO(B, 0) GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+ };
+};
+
+&adc0 {
+ vref = <2500>;
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_adc0_default &pinctrl_adc1_default
+ &pinctrl_adc2_default &pinctrl_adc3_default
+ &pinctrl_adc4_default &pinctrl_adc5_default
+ &pinctrl_adc6_default &pinctrl_adc7_default>;
+};
+
+&adc1 {
+ vref = <2500>;
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_adc8_default &pinctrl_adc9_default
+ &pinctrl_adc10_default &pinctrl_adc11_default
+ &pinctrl_adc12_default &pinctrl_adc13_default
+ &pinctrl_adc14_default &pinctrl_adc15_default>;
+};
+
+&peci0 {
+ status = "okay";
+};
+
+&lpc_snoop {
+ snoop-ports = <0x80>;
+ status = "okay";
+};
+
+&mac2 {
+ status = "okay";
+ phy-mode = "rmii";
+ use-ncsi;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rmii3_default>;
+};
+
+&mac3 {
+ status = "okay";
+ phy-mode = "rmii";
+ use-ncsi;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rmii4_default>;
+};
+
+&fmc {
+ status = "okay";
+
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "bmc";
+ spi-max-frequency = <50000000>;
+#include "openbmc-flash-layout-64.dtsi"
+ };
+};
+
+&spi1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1_default>;
+
+ flash@0 {
+ status = "okay";
+ label = "bios";
+ spi-max-frequency = <50000000>;
+ };
+};
+
+&i2c0 {
+ status = "okay";
+};
+
+&i2c1 {
+ status = "okay";
+};
+
+&i2c2 {
+ status = "okay";
+};
+
+&i2c3 {
+ status = "okay";
+};
+
+&i2c4 {
+ status = "okay";
+
+ temperature-sensor@48 {
+ compatible = "ti,tmp75";
+ reg = <0x48>;
+ };
+
+ temperature-sensor@49 {
+ compatible = "ti,tmp75";
+ reg = <0x49>;
+ };
+
+ pca9555_4_20: gpio@20 {
+ compatible = "nxp,pca9555";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ pca9555_4_22: gpio@22 {
+ compatible = "nxp,pca9555";
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ pca9555_4_24: gpio@24 {
+ compatible = "nxp,pca9555";
+ reg = <0x24>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names =
+ /*A0 - A3 0*/ "", "STRAP_BMC_BATTERY_GPIO1", "", "",
+ /*A4 - A7 4*/ "", "", "", "",
+ /*B0 - B7 8*/ "", "", "", "", "", "", "", "";
+ };
+
+ pca9555_4_26: gpio@26 {
+ compatible = "nxp,pca9555";
+ reg = <0x26>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9546";
+ status = "okay";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel_1: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ channel_2: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+
+ channel_3: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ channel_4: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ };
+};
+
+&i2c5 {
+ status = "okay";
+
+ pca9555_5_24: gpio@24 {
+ compatible = "nxp,pca9555";
+ reg = <0x24>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9546";
+ status = "okay";
+ reg = <0x70 >;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel_5: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ pca9555_5_5_20: gpio@20 {
+ compatible = "nxp,pca9555";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names =
+ "", "", "", "", "", "", "", "",
+ "", "", "SYS_FAN6", "SYS_FAN5",
+ "SYS_FAN4", "SYS_FAN3",
+ "SYS_FAN2", "SYS_FAN1";
+ };
+
+ pca9555_5_5_21: gpio@21 {
+ compatible = "nxp,pca9555";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ power-monitor@44 {
+ compatible = "ti,ina219";
+ reg = <0x44>;
+ shunt-resistor = <2>;
+ };
+ };
+
+ channel_6: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+
+ channel_7: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ channel_8: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ };
+};
+
+&i2c6 {
+ status = "okay";
+
+ pca9555_6_27: gpio@27 {
+ compatible = "nxp,pca9555";
+ reg = <0x27>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ pca9555_6_20: gpio@20 {
+ compatible = "nxp,pca9555";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names =
+ /*A0 0*/ "", "", "", "", "", "", "", "",
+ /*B0 8*/ "Drive_NVMe1", "Drive_NVMe2", "", "",
+ /*B4 12*/ "", "", "", "";
+ };
+
+ pca9555_6_21: gpio@21 {
+ compatible = "nxp,pca9555";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+};
+
+&i2c7 {
+ status = "okay";
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9546";
+ status = "okay";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ idle-state = <1>;
+
+ channel_9: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ temperature-sensor@48 {
+ compatible = "ti,tmp75";
+ reg = <0x48>;
+ };
+
+ temperature-sensor@49 {
+ compatible = "ti,tmp75";
+ reg = <0x49>;
+ };
+
+ power-monitor@40 {
+ compatible = "ti,ina219";
+ reg = <0x40>;
+ shunt-resistor = <2>;
+ };
+
+ power-monitor@41 {
+ compatible = "ti,ina219";
+ reg = <0x41>;
+ shunt-resistor = <5>;
+ };
+ };
+
+ channel_10: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+
+ channel_11: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ channel_12: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ };
+
+ i2c-mux@71 {
+ compatible = "nxp,pca9546";
+ status = "okay";
+ reg = <0x71>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ channel_13: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ channel_14: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+
+ channel_15: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ channel_16: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ };
+};
+
+&i2c8 {
+ status = "okay";
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9546";
+ status = "okay";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ channel_17: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ channel_18: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ temperature-sensor@48 {
+ compatible = "ti,tmp75";
+ reg = <0x48>;
+ };
+
+ power-monitor@41 {
+ compatible = "ti,ina219";
+ reg = <0x41>;
+ shunt-resistor = <5>;
+ };
+ };
+
+ channel_19: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ channel_20: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ };
+};
+
+&i2c9 {
+ status = "okay";
+};
+
+&i2c10 {
+ status = "okay";
+};
+
+&i2c11 {
+ status = "okay";
+};
+
+&i2c14 {
+ status = "okay";
+ multi-master;
+
+ eeprom@50 {
+ compatible = "atmel,24c08";
+ reg = <0x50>;
+ };
+
+ eeprom@51 {
+ compatible = "atmel,24c08";
+ reg = <0x51>;
+ };
+};
+
+&sgpiom0 {
+ status = "okay";
+ ngpios = <128>;
+};
+
+&video {
+ status = "okay";
+ memory-region = <&video_engine_memory>;
+};
+
+&sdc {
+ status = "okay";
+};
+
+&lpc_snoop {
+ status = "okay";
+ snoop-ports = <0x80>;
+};
+
+&kcs1 {
+ aspeed,lpc-io-reg = <0xca0>;
+ status = "okay";
+};
+
+&kcs2 {
+ aspeed,lpc-io-reg = <0xca8>;
+ status = "okay";
+};
+
+&kcs3 {
+ aspeed,lpc-io-reg = <0xca2>;
+ status = "okay";
+};
+
+&uart3 {
+ status = "okay";
+};
+
+&uart5 {
+ status = "okay";
+};
+
+&uart_routing {
+ status = "okay";
+};
+
+&vhub {
+ status = "okay";
+};
+
+&gpio0 {
+ gpio-line-names =
+ /*A0 0*/ "", "", "", "", "", "", "", "",
+ /*B0 8*/ "", "", "", "", "", "", "PS_PWROK", "",
+ /*C0 16*/ "", "", "", "", "", "", "", "",
+ /*D0 24*/ "", "", "", "", "", "", "", "",
+ /*E0 32*/ "", "", "", "", "", "", "", "",
+ /*F0 40*/ "", "", "", "", "", "", "", "",
+ /*G0 48*/ "", "", "", "", "", "", "", "",
+ /*H0 56*/ "", "", "", "", "", "", "", "",
+ /*I0 64*/ "", "", "", "", "", "", "", "",
+ /*J0 72*/ "", "", "", "", "", "", "", "",
+ /*K0 80*/ "", "", "", "", "", "", "", "",
+ /*L0 88*/ "", "", "", "", "", "", "", "",
+ /*M0 96*/ "", "", "", "", "", "", "", "",
+ /*N0 104*/ "", "", "", "",
+ /*N4 108*/ "POST_COMPLETE", "ESR1_GPIO_AST_SPISEL", "", "",
+ /*O0 112*/ "", "", "", "", "", "", "", "",
+ /*P0 120*/ "ID_BUTTON", "ID_OUT", "POWER_BUTTON", "POWER_OUT",
+ /*P4 124*/ "RESET_BUTTON", "RESET_OUT", "", "HEARTBEAT",
+ /*Q0 128*/ "", "", "", "", "", "", "", "",
+ /*R0 136*/ "", "", "", "", "", "", "", "",
+ /*S0 144*/ "", "", "", "", "", "", "", "",
+ /*T0 152*/ "", "", "", "", "", "", "", "",
+ /*U0 160*/ "", "", "", "", "", "", "", "",
+ /*V0 168*/ "", "", "", "", "", "", "", "",
+ /*W0 176*/ "", "", "", "", "", "", "", "",
+ /*X0 184*/ "", "", "", "", "", "", "", "",
+ /*Y0 192*/ "", "", "", "", "", "", "", "",
+ /*Z0 200*/ "", "", "", "", "", "", "", "";
+};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-delta-ahe50dc.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-delta-ahe50dc.dts
index 6600f7e9bf5e..b6bfdaea08e6 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-delta-ahe50dc.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-delta-ahe50dc.dts
@@ -14,11 +14,11 @@
#define EFUSE(hexaddr, num) \
efuse@##hexaddr { \
- compatible = "lm25066"; \
+ compatible = "ti,lm25066"; \
reg = <0x##hexaddr>; \
shunt-resistor-micro-ohms = <675>; \
regulators { \
- efuse##num: vout0 { \
+ efuse##num: vout { \
regulator-name = __stringify(efuse##num##-reg); \
}; \
}; \
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-cloudripper.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-cloudripper.dts
deleted file mode 100644
index d49328fa487a..000000000000
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-cloudripper.dts
+++ /dev/null
@@ -1,544 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-// Copyright (c) 2020 Facebook Inc.
-
-/dts-v1/;
-
-#include <dt-bindings/leds/common.h>
-#include "ast2600-facebook-netbmc-common.dtsi"
-
-/ {
- model = "Facebook Cloudripper BMC";
- compatible = "facebook,cloudripper-bmc", "aspeed,ast2600";
-
- aliases {
- /*
- * PCA9548 (1-0070) provides 8 channels connecting to
- * SMB (Switch Main Board).
- */
- i2c16 = &imux16;
- i2c17 = &imux17;
- i2c18 = &imux18;
- i2c19 = &imux19;
- i2c20 = &imux20;
- i2c21 = &imux21;
- i2c22 = &imux22;
- i2c23 = &imux23;
-
- /*
- * PCA9548 (2-0070) provides 8 channels connecting to
- * SCM (System Controller Module).
- */
- i2c24 = &imux24;
- i2c25 = &imux25;
- i2c26 = &imux26;
- i2c27 = &imux27;
- i2c28 = &imux28;
- i2c29 = &imux29;
- i2c30 = &imux30;
- i2c31 = &imux31;
-
- /*
- * PCA9548 (3-0070) provides 8 channels connecting to
- * SMB (Switch Main Board).
- */
- i2c32 = &imux32;
- i2c33 = &imux33;
- i2c34 = &imux34;
- i2c35 = &imux35;
- i2c36 = &imux36;
- i2c37 = &imux37;
- i2c38 = &imux38;
- i2c39 = &imux39;
-
- /*
- * PCA9548 (8-0070) provides 8 channels connecting to
- * PDB (Power Delivery Board).
- */
- i2c40 = &imux40;
- i2c41 = &imux41;
- i2c42 = &imux42;
- i2c43 = &imux43;
- i2c44 = &imux44;
- i2c45 = &imux45;
- i2c46 = &imux46;
- i2c47 = &imux47;
-
- /*
- * PCA9548 (15-0076) provides 8 channels connecting to
- * FCM (Fan Controller Module).
- */
- i2c48 = &imux48;
- i2c49 = &imux49;
- i2c50 = &imux50;
- i2c51 = &imux51;
- i2c52 = &imux52;
- i2c53 = &imux53;
- i2c54 = &imux54;
- i2c55 = &imux55;
- };
-
- spi_gpio: spi {
- num-chipselects = <2>;
- cs-gpios = <&gpio0 ASPEED_GPIO(X, 0) GPIO_ACTIVE_LOW>,
- <&gpio0 ASPEED_GPIO(X, 1) GPIO_ACTIVE_HIGH>;
-
- eeprom@1 {
- compatible = "atmel,at93c46d";
- spi-max-frequency = <250000>;
- data-size = <16>;
- spi-cs-high;
- reg = <1>;
- };
- };
-};
-
-&ehci1 {
- status = "okay";
-};
-
-/*
- * "mdio1" is connected to the MDC/MDIO interface of the on-board
- * management switch (whose ports are connected to BMC, Host and front
- * panel ethernet port).
- */
-&mdio1 {
- status = "okay";
-};
-
-&mdio3 {
- status = "okay";
-
- ethphy1: ethernet-phy@13 {
- compatible = "ethernet-phy-ieee802.3-c22";
- reg = <0x0d>;
- };
-};
-
-&mac3 {
- status = "okay";
- phy-mode = "rgmii";
- phy-handle = <&ethphy1>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_rgmii4_default>;
-};
-
-&i2c0 {
- multi-master;
- bus-frequency = <1000000>;
-};
-
-&i2c1 {
- /*
- * PCA9548 (1-0070) provides 8 channels connecting to SMB (Switch
- * Main Board).
- */
- i2c-mux@70 {
- compatible = "nxp,pca9548";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x70>;
- i2c-mux-idle-disconnect;
-
- imux16: i2c@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
- };
-
- imux17: i2c@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
- };
-
- imux18: i2c@2 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <2>;
- };
-
- imux19: i2c@3 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <3>;
- };
-
- imux20: i2c@4 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <4>;
- };
-
- imux21: i2c@5 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <5>;
- };
-
- imux22: i2c@6 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <6>;
- };
-
- imux23: i2c@7 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <7>;
- };
- };
-};
-
-&i2c2 {
- /*
- * PCA9548 (2-0070) provides 8 channels connecting to SCM (System
- * Controller Module).
- */
- i2c-mux@70 {
- compatible = "nxp,pca9548";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x70>;
- i2c-mux-idle-disconnect;
-
- imux24: i2c@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
- };
-
- imux25: i2c@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
- };
-
- imux26: i2c@2 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <2>;
- };
-
- imux27: i2c@3 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <3>;
- };
-
- imux28: i2c@4 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <4>;
- };
-
- imux29: i2c@5 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <5>;
- };
-
- imux30: i2c@6 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <6>;
- };
-
- imux31: i2c@7 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <7>;
- };
- };
-};
-
-&i2c3 {
- /*
- * PCA9548 (3-0070) provides 8 channels connecting to SMB (Switch
- * Main Board).
- */
- i2c-mux@70 {
- compatible = "nxp,pca9548";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x70>;
- i2c-mux-idle-disconnect;
-
- imux32: i2c@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
- };
-
- imux33: i2c@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
- };
-
- imux34: i2c@2 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <2>;
- };
-
- imux35: i2c@3 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <3>;
- };
-
- imux36: i2c@4 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <4>;
- };
-
- imux37: i2c@5 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <5>;
- };
-
- imux38: i2c@6 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <6>;
- };
-
- imux39: i2c@7 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <7>;
- };
- };
-};
-
-&i2c6 {
- lp5012@14 {
- compatible = "ti,lp5012";
- reg = <0x14>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- multi-led@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
- color = <LED_COLOR_ID_MULTI>;
- function = LED_FUNCTION_ACTIVITY;
- label = "sys";
-
- led@0 {
- reg = <0>;
- color = <LED_COLOR_ID_RED>;
- };
-
- led@1 {
- reg = <1>;
- color = <LED_COLOR_ID_BLUE>;
- };
-
- led@2 {
- reg = <2>;
- color = <LED_COLOR_ID_GREEN>;
- };
- };
-
- multi-led@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
- color = <LED_COLOR_ID_MULTI>;
- function = LED_FUNCTION_ACTIVITY;
- label = "fan";
-
- led@0 {
- reg = <0>;
- color = <LED_COLOR_ID_RED>;
- };
-
- led@1 {
- reg = <1>;
- color = <LED_COLOR_ID_BLUE>;
- };
-
- led@2 {
- reg = <2>;
- color = <LED_COLOR_ID_GREEN>;
- };
- };
-
- multi-led@2 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <2>;
- color = <LED_COLOR_ID_MULTI>;
- function = LED_FUNCTION_ACTIVITY;
- label = "psu";
-
- led@0 {
- reg = <0>;
- color = <LED_COLOR_ID_RED>;
- };
-
- led@1 {
- reg = <1>;
- color = <LED_COLOR_ID_BLUE>;
- };
-
- led@2 {
- reg = <2>;
- color = <LED_COLOR_ID_GREEN>;
- };
- };
-
- multi-led@3 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <3>;
- color = <LED_COLOR_ID_MULTI>;
- function = LED_FUNCTION_ACTIVITY;
- label = "scm";
-
- led@0 {
- reg = <0>;
- color = <LED_COLOR_ID_RED>;
- };
-
- led@1 {
- reg = <1>;
- color = <LED_COLOR_ID_BLUE>;
- };
-
- led@2 {
- reg = <2>;
- color = <LED_COLOR_ID_GREEN>;
- };
- };
- };
-};
-
-&i2c8 {
- /*
- * PCA9548 (8-0070) provides 8 channels connecting to PDB (Power
- * Delivery Board).
- */
- i2c-mux@70 {
- compatible = "nxp,pca9548";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x70>;
- i2c-mux-idle-disconnect;
-
- imux40: i2c@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
- };
-
- imux41: i2c@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
- };
-
- imux42: i2c@2 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <2>;
- };
-
- imux43: i2c@3 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <3>;
- };
-
- imux44: i2c@4 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <4>;
- };
-
- imux45: i2c@5 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <5>;
- };
-
- imux46: i2c@6 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <6>;
- };
-
- imux47: i2c@7 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <7>;
- };
-
- };
-};
-
-&i2c15 {
- /*
- * PCA9548 (15-0076) provides 8 channels connecting to FCM (Fan
- * Controller Module).
- */
- i2c-mux@76 {
- compatible = "nxp,pca9548";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x76>;
- i2c-mux-idle-disconnect;
-
- imux48: i2c@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
- };
-
- imux49: i2c@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
- };
-
- imux50: i2c@2 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <2>;
- };
-
- imux51: i2c@3 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <3>;
- };
-
- imux52: i2c@4 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <4>;
- };
-
- imux53: i2c@5 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <5>;
- };
-
- imux54: i2c@6 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <6>;
- };
-
- imux55: i2c@7 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <7>;
- };
- };
-};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-greatlakes.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-greatlakes.dts
index 7a53f54833a0..998598c15fd0 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-greatlakes.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-greatlakes.dts
@@ -66,7 +66,7 @@
pinctrl-0 = <&pinctrl_rmii4_default>;
no-hw-checksum;
use-ncsi;
- mlx,multi-host;
+ mellanox,multi-host;
ncsi-ctrl,start-redo-probe;
ncsi-ctrl,no-channel-monitor;
ncsi-package = <1>;
@@ -211,7 +211,6 @@
};
&adc0 {
- ref_voltage = <2500>;
status = "okay";
pinctrl-0 = <&pinctrl_adc0_default &pinctrl_adc1_default
&pinctrl_adc2_default &pinctrl_adc3_default
@@ -220,7 +219,6 @@
};
&adc1 {
- ref_voltage = <2500>;
status = "okay";
pinctrl-0 = <&pinctrl_adc8_default &pinctrl_adc10_default
&pinctrl_adc11_default &pinctrl_adc12_default
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-harma.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-harma.dts
new file mode 100644
index 000000000000..c118d473a76f
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-harma.dts
@@ -0,0 +1,648 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright 2023 Facebook Inc.
+
+/dts-v1/;
+#include "aspeed-g6.dtsi"
+#include <dt-bindings/gpio/aspeed-gpio.h>
+#include <dt-bindings/i2c/i2c.h>
+
+/ {
+ model = "Facebook Harma";
+ compatible = "facebook,harma-bmc", "aspeed,ast2600";
+
+ aliases {
+ serial0 = &uart1;
+ serial1 = &uart2;
+ serial2 = &uart4;
+ serial4 = &uart5;
+
+ i2c20 = &imux20;
+ i2c21 = &imux21;
+ i2c22 = &imux22;
+ i2c23 = &imux23;
+ i2c24 = &imux24;
+ i2c25 = &imux25;
+ i2c26 = &imux26;
+ i2c27 = &imux27;
+ i2c28 = &imux28;
+ i2c29 = &imux29;
+ i2c30 = &imux30;
+ i2c31 = &imux31;
+
+ spi1 = &spi_gpio;
+ };
+
+ chosen {
+ stdout-path = &uart5;
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x80000000>;
+ };
+
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&adc0 0>, <&adc0 1>, <&adc0 2>, <&adc0 3>,
+ <&adc0 4>, <&adc0 5>, <&adc0 6>, <&adc0 7>,
+ <&adc1 2>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-0 {
+ label = "bmc_heartbeat_amber";
+ gpios = <&gpio0 ASPEED_GPIO(P, 7) GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ led-1 {
+ label = "fp_id_amber";
+ default-state = "off";
+ gpios = <&gpio0 13 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-2 {
+ label = "power_blue";
+ default-state = "off";
+ gpios = <&gpio0 124 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ spi_gpio: spi-gpio {
+ status = "okay";
+ compatible = "spi-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-sck = <&gpio0 ASPEED_GPIO(Z, 3) GPIO_ACTIVE_HIGH>;
+ gpio-mosi = <&gpio0 ASPEED_GPIO(Z, 4) GPIO_ACTIVE_HIGH>;
+ gpio-miso = <&gpio0 ASPEED_GPIO(Z, 5) GPIO_ACTIVE_HIGH>;
+ num-chipselects = <1>;
+ cs-gpios = <&gpio0 ASPEED_GPIO(Z, 0) GPIO_ACTIVE_LOW>;
+
+ tpmdev@0 {
+ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
+ spi-max-frequency = <33000000>;
+ reg = <0>;
+ };
+ };
+};
+
+// HOST BIOS Debug
+&uart1 {
+ status = "okay";
+};
+
+// SOL Host Console
+&uart2 {
+ status = "okay";
+ pinctrl-0 = <>;
+};
+
+// SOL BMC Console
+&uart4 {
+ status = "okay";
+ pinctrl-0 = <>;
+};
+
+// BMC Debug Console
+&uart5 {
+ status = "okay";
+};
+
+// MTIA
+&uart6 {
+ status = "okay";
+};
+
+&uart_routing {
+ status = "okay";
+};
+
+&wdt1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdtrst1_default>;
+ aspeed,reset-type = "soc";
+ aspeed,external-signal;
+ aspeed,ext-push-pull;
+ aspeed,ext-active-high;
+ aspeed,ext-pulse-duration = <256>;
+};
+
+&mac3 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rmii4_default>;
+ use-ncsi;
+ mellanox,multi-host;
+};
+
+&rtc {
+ status = "okay";
+};
+
+&fmc {
+ status = "okay";
+
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "bmc";
+ spi-max-frequency = <50000000>;
+#include "openbmc-flash-layout-128.dtsi"
+ };
+
+ flash@1 {
+ status = "okay";
+ m25p,fast-read;
+ label = "alt-bmc";
+ spi-max-frequency = <50000000>;
+ };
+};
+
+// BIOS Flash
+&spi2 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi2_default>;
+
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "pnor";
+ spi-max-frequency = <12000000>;
+ spi-tx-bus-width = <2>;
+ spi-rx-bus-width = <2>;
+ };
+};
+
+&kcs2 {
+ status = "okay";
+ aspeed,lpc-io-reg = <0xca8>;
+};
+
+&kcs3 {
+ status = "okay";
+ aspeed,lpc-io-reg = <0xca2>;
+};
+
+&i2c0 {
+ status = "okay";
+
+ pwm@5e{
+ compatible = "max31790";
+ reg = <0x5e>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+};
+
+&i2c1 {
+ status = "okay";
+
+ temperature-sensor@4b {
+ compatible = "ti,tmp75";
+ reg = <0x4b>;
+ };
+
+ // MB NIC FRU
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+};
+
+&i2c2 {
+ status = "okay";
+
+ pwm@5e{
+ compatible = "max31790";
+ reg = <0x5e>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+};
+
+&i2c3 {
+ status = "okay";
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9543";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ imux20: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ //Retimer Flash
+ eeprom@50 {
+ compatible = "atmel,24c2048";
+ reg = <0x50>;
+ pagesize = <128>;
+ };
+ };
+ imux21: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ retimer@24 {
+ compatible = "asteralabs,pt5161l";
+ reg = <0x24>;
+ };
+ };
+ };
+};
+
+&i2c4 {
+ status = "okay";
+ // PDB FRU
+ eeprom@52 {
+ compatible = "atmel,24c64";
+ reg = <0x52>;
+ };
+
+ power-monitor@69 {
+ compatible = "pmbus";
+ reg = <0x69>;
+ };
+
+ temperature-sensor@49 {
+ compatible = "ti,tmp75";
+ reg = <0x49>;
+ };
+
+ power-monitor@22 {
+ compatible = "lltc,ltc4286";
+ reg = <0x22>;
+ adi,vrange-low-enable;
+ shunt-resistor-micro-ohms = <500>;
+ };
+};
+
+&i2c5 {
+ status = "okay";
+};
+
+&i2c6 {
+ status = "okay";
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9543";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ imux22: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+ imux23: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+ };
+};
+
+&i2c7 {
+ status = "okay";
+};
+
+&i2c8 {
+ status = "okay";
+};
+
+&i2c9 {
+ status = "okay";
+
+ gpio@30 {
+ compatible = "nxp,pca9555";
+ reg = <0x30>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+ gpio@31 {
+ compatible = "nxp,pca9555";
+ reg = <0x31>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "","","","",
+ "","","presence-cmm","",
+ "","","","",
+ "","","","";
+ };
+
+ i2c-mux@71 {
+ compatible = "nxp,pca9546";
+ reg = <0x71>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ imux24: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+ imux25: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+ imux26: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+ imux27: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ };
+ // PTTV FRU
+ eeprom@52 {
+ compatible = "atmel,24c64";
+ reg = <0x52>;
+ };
+};
+
+&i2c11 {
+ status = "okay";
+};
+
+&i2c12 {
+ status = "okay";
+ retimer@24 {
+ compatible = "asteralabs,pt5161l";
+ reg = <0x24>;
+ };
+};
+
+&i2c13 {
+ status = "okay";
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9545";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ imux28: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+ imux29: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ //MB FRU
+ eeprom@54 {
+ compatible = "atmel,24c64";
+ reg = <0x54>;
+ };
+ };
+ imux30: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+ imux31: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ };
+};
+
+// To Debug card
+&i2c14 {
+ status = "okay";
+ multi-master;
+
+ ipmb@10 {
+ compatible = "ipmb-dev";
+ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>;
+ i2c-protocol;
+ };
+};
+
+&i2c15 {
+ status = "okay";
+
+ // SCM FRU
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+
+ // BSM FRU
+ eeprom@56 {
+ compatible = "atmel,24c64";
+ reg = <0x56>;
+ };
+};
+
+&adc0 {
+ aspeed,int-vref-microvolt = <2500000>;
+ status = "okay";
+ pinctrl-0 = <&pinctrl_adc0_default &pinctrl_adc1_default
+ &pinctrl_adc2_default &pinctrl_adc3_default
+ &pinctrl_adc4_default &pinctrl_adc5_default
+ &pinctrl_adc6_default &pinctrl_adc7_default>;
+};
+
+&adc1 {
+ aspeed,int-vref-microvolt = <2500000>;
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_adc10_default>;
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&gpio0 {
+ pinctrl-names = "default";
+ gpio-line-names =
+ /*A0-A7*/ "","","","","","","","",
+ /*B0-B7*/ "","","","",
+ "bmc-spi-mux-select-0","led-identify","","",
+ /*C0-C7*/ "reset-cause-platrst","","","","",
+ "cpu0-err-alert","","",
+ /*D0-D7*/ "","","sol-uart-select","","","","","",
+ /*E0-E7*/ "","","","","","","","",
+ /*F0-F7*/ "","","","","","","","",
+ /*G0-G7*/ "","","","","","","","",
+ /*H0-H7*/ "","","","","","","","",
+ /*I0-I7*/ "","","","","","","","",
+ /*J0-J7*/ "","","","","","","","",
+ /*K0-K7*/ "","","","","","","","",
+ /*L0-L7*/ "","","","",
+ "leakage-detect-alert","","","",
+ /*M0-M7*/ "","","","","","","","",
+ /*N0-N7*/ "led-postcode-0","led-postcode-1",
+ "led-postcode-2","led-postcode-3",
+ "led-postcode-4","led-postcode-5",
+ "led-postcode-6","led-postcode-7",
+ /*O0-O7*/ "","","","","","","","",
+ /*P0-P7*/ "power-button","power-host-control",
+ "reset-button","","led-power","","","",
+ /*Q0-Q7*/ "","","","","","","","",
+ /*R0-R7*/ "","","","","","","","",
+ /*S0-S7*/ "","","","","","","","",
+ /*T0-T7*/ "","","","","","","","",
+ /*U0-U7*/ "","","","","","","led-identify-gate","",
+ /*V0-V7*/ "","","","",
+ "rtc-battery-voltage-read-enable","","","",
+ /*W0-W7*/ "","","","","","","","",
+ /*X0-X7*/ "","","","","","","","",
+ /*Y0-Y7*/ "","","","","","","","",
+ /*Z0-Z7*/ "","","","","","","presence-post-card","";
+};
+
+&gpio1 {
+ gpio-line-names =
+ /*18A0-18A7*/ "ac-power-button","","","","","","","",
+ /*18B0-18B7*/ "","","","","","","","",
+ /*18C0-18C7*/ "","","","","","","","",
+ /*18D0-18D7*/ "","","","","","","","",
+ /*18E0-18E3*/ "","","","","","","","";
+};
+
+&sgpiom0 {
+ status = "okay";
+ max-ngpios = <128>;
+ ngpios = <128>;
+ bus-frequency = <2000000>;
+ gpio-line-names =
+ /*in - out - in - out */
+ /*A0-A3 line 0-7*/
+ "presence-scm-cable","power-config-disable-e1s-0",
+ "","",
+ "","power-config-disable-e1s-1",
+ "","",
+ /*A4-A7 line 8-15*/
+ "","power-config-asic-module-enable",
+ "","power-config-asic-power-good",
+ "","power-config-pdb-power-good",
+ "presence-cpu","smi-control-n",
+ /*B0-B3 line 16-23*/
+ "","nmi-control-n",
+ "","nmi-control-sync-flood-n",
+ "","",
+ "","",
+ /*B4-B7 line 24-31*/
+ "","FM_CPU_SP5R1",
+ "reset-cause-rsmrst","FM_CPU_SP5R2",
+ "","FM_CPU_SP5R3",
+ "","FM_CPU_SP5R4",
+ /*C0-C3 line 32-39*/
+ "","FM_CPU0_SA0",
+ "","FM_CPU0_SA1",
+ "","rt-cpu0-p0-enable",
+ "","rt-cpu0-p1-enable",
+ /*C4-C7 line 40-47*/
+ "","smb-rt-rom-p0-select",
+ "","smb-rt-rom-p1-select",
+ "","i3c-cpu-mux0-oe-n",
+ "","i3c-cpu-mux0-select",
+ /*D0-D3 line 48-55*/
+ "","i3c-cpu-mux1-oe-n",
+ "","i3c-cpu-mux1-select",
+ "","reset-control-bmc",
+ "","reset-control-cpu0-p0-mux",
+ /*D4-D7 line 56-63*/
+ "","reset-control-cpu0-p1-mux",
+ "","reset-control-e1s-mux",
+ "power-host-good","reset-control-mb-mux",
+ "host0-ready","reset-control-smb-e1s-0",
+ /*E0-E3 line 64-71*/
+ "","reset-control-smb-e1s-1",
+ "post-end-n","reset-control-srst",
+ "presence-e1s-0","reset-control-usb-hub",
+ "","reset-control",
+ /*E4-E7 line 72-79*/
+ "presence-e1s-1","reset-control-cpu-kbrst",
+ "","reset-control-platrst",
+ "","bmc-jtag-mux-select-0",
+ "","bmc-jtag-mux-select-1",
+ /*F0-F3 line 80-87*/
+ "","bmc-jtag-select",
+ "","bmc-ready-n",
+ "","bmc-ready-sgpio",
+ "","rt-cpu0-p0-force-enable",
+ /*F4-F7 line 88-95*/
+ "presence-asic-modules-0","rt-cpu0-p1-force-enable",
+ "presence-asic-modules-1","bios-debug-msg-disable",
+ "","uart-control-buffer-select",
+ "","ac-control-n",
+ /*G0-G3 line 96-103*/
+ "FM_CPU_CORETYPE2","",
+ "FM_CPU_CORETYPE1","",
+ "FM_CPU_CORETYPE0","",
+ "FM_BOARD_REV_ID5","",
+ /*G4-G7 line 104-111*/
+ "FM_BOARD_REV_ID4","",
+ "FM_BOARD_REV_ID3","",
+ "FM_BOARD_REV_ID2","",
+ "FM_BOARD_REV_ID1","",
+ /*H0-H3 line 112-119*/
+ "FM_BOARD_REV_ID0","",
+ "","","","","","",
+ /*H4-H7 line 120-127*/
+ "","",
+ "reset-control-pcie-expansion-3","",
+ "reset-control-pcie-expansion-2","",
+ "reset-control-pcie-expansion-1","",
+ /*I0-I3 line 128-135*/
+ "reset-control-pcie-expansion-0","",
+ "FM_EXP_SLOT_ID1","",
+ "FM_EXP_SLOT_ID0","",
+ "","",
+ /*I4-I7 line 136-143*/
+ "","","","","","","","",
+ /*J0-J3 line 144-151*/
+ "","","","","","","","",
+ /*J4-J7 line 152-159*/
+ "SLOT_ID_BCB_0","",
+ "SLOT_ID_BCB_1","",
+ "SLOT_ID_BCB_2","",
+ "SLOT_ID_BCB_3","",
+ /*K0-K3 line 160-167*/
+ "","","","","","","P0_I3C_APML_ALERT_L","",
+ /*K4-K7 line 168-175*/
+ "","","","","","","irq-uv-detect-alert","",
+ /*L0-L3 line 176-183*/
+ "irq-hsc-alert","",
+ "cpu0-prochot-alert","",
+ "cpu0-thermtrip-alert","",
+ "reset-cause-pcie","",
+ /*L4-L7 line 184-191*/
+ "pvdd11-ocp-alert","","","","","","","",
+ /*M0-M3 line 192-199*/
+ "","","","","","","","",
+ /*M4-M7 line 200-207*/
+ "","","","","","","","",
+ /*N0-N3 line 208-215*/
+ "","","","","","","","",
+ /*N4-N7 line 216-223*/
+ "","","","","","","","",
+ /*O0-O3 line 224-231*/
+ "","","","","","","","",
+ /*O4-O7 line 232-239*/
+ "","","","","","","","",
+ /*P0-P3 line 240-247*/
+ "","","","","","","","",
+ /*P4-P7 line 248-255*/
+ "","","","","","","","";
+};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-minerva-cmc.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-minerva-cmc.dts
deleted file mode 100644
index f04ef9063520..000000000000
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-minerva-cmc.dts
+++ /dev/null
@@ -1,265 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-// Copyright (c) 2023 Facebook Inc.
-/dts-v1/;
-
-#include "aspeed-g6.dtsi"
-#include <dt-bindings/gpio/aspeed-gpio.h>
-#include <dt-bindings/i2c/i2c.h>
-
-/ {
- model = "Facebook Minerva CMC";
- compatible = "facebook,minerva-cmc", "aspeed,ast2600";
-
- aliases {
- serial5 = &uart5;
- };
-
- chosen {
- stdout-path = "serial5:57600n8";
- };
-
- memory@80000000 {
- device_type = "memory";
- reg = <0x80000000 0x80000000>;
- };
-
- iio-hwmon {
- compatible = "iio-hwmon";
- io-channels = <&adc0 0>, <&adc0 1>, <&adc0 2>, <&adc0 3>,
- <&adc0 4>, <&adc0 5>, <&adc0 6>, <&adc0 7>,
- <&adc1 2>;
- };
-};
-
-&uart6 {
- status = "okay";
-};
-
-&wdt1 {
- status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_wdtrst1_default>;
- aspeed,reset-type = "soc";
- aspeed,external-signal;
- aspeed,ext-push-pull;
- aspeed,ext-active-high;
- aspeed,ext-pulse-duration = <256>;
-};
-
-&mac3 {
- status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_rmii4_default>;
- use-ncsi;
- mlx,multi-host;
-};
-
-&fmc {
- status = "okay";
- flash@0 {
- status = "okay";
- m25p,fast-read;
- label = "bmc";
- spi-max-frequency = <50000000>;
-#include "openbmc-flash-layout-128.dtsi"
- };
- flash@1 {
- status = "okay";
- m25p,fast-read;
- label = "alt-bmc";
- spi-max-frequency = <50000000>;
- };
-};
-
-&rtc {
- status = "okay";
-};
-
-&sgpiom1 {
- status = "okay";
- ngpios = <128>;
- bus-frequency = <2000000>;
-};
-
-&i2c0 {
- status = "okay";
-};
-
-&i2c1 {
- status = "okay";
-
- temperature-sensor@4b {
- compatible = "ti,tmp75";
- reg = <0x4B>;
- };
-
- eeprom@51 {
- compatible = "atmel,24c128";
- reg = <0x51>;
- };
-};
-
-&i2c2 {
- status = "okay";
-
- i2c-mux@77 {
- compatible = "nxp,pca9548";
- reg = <0x77>;
- #address-cells = <1>;
- #size-cells = <0>;
- i2c-mux-idle-disconnect;
-
- i2c@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
-
- eeprom@50 {
- compatible = "atmel,24c128";
- reg = <0x50>;
- };
- };
-
- i2c@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
-
- eeprom@50 {
- compatible = "atmel,24c128";
- reg = <0x50>;
- };
- };
-
- i2c@2 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <2>;
-
- eeprom@50 {
- compatible = "atmel,24c128";
- reg = <0x50>;
- };
- };
-
- i2c@3 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <3>;
-
- eeprom@50 {
- compatible = "atmel,24c128";
- reg = <0x50>;
- };
- };
-
- i2c@4 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <4>;
-
- eeprom@50 {
- compatible = "atmel,24c128";
- reg = <0x50>;
- };
- };
-
- i2c@5 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <5>;
-
- eeprom@50 {
- compatible = "atmel,24c128";
- reg = <0x50>;
- };
- };
- };
-};
-
-&i2c3 {
- status = "okay";
-};
-
-&i2c4 {
- status = "okay";
-};
-
-&i2c5 {
- status = "okay";
-};
-
-&i2c6 {
- status = "okay";
-};
-
-&i2c7 {
- status = "okay";
-};
-
-&i2c8 {
- status = "okay";
-};
-
-&i2c9 {
- status = "okay";
-};
-
-&i2c10 {
- status = "okay";
-};
-
-&i2c11 {
- status = "okay";
-};
-
-&i2c12 {
- status = "okay";
-};
-
-&i2c13 {
- status = "okay";
-};
-
-&i2c14 {
- status = "okay";
- multi-master;
-
- ipmb@10 {
- compatible = "ipmb-dev";
- reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>;
- i2c-protocol;
- };
-};
-
-&i2c15 {
- status = "okay";
-
- eeprom@50 {
- compatible = "atmel,24c128";
- reg = <0x50>;
- };
-};
-
-&adc0 {
- aspeed,int-vref-microvolt = <2500000>;
- status = "okay";
- pinctrl-0 = <&pinctrl_adc0_default &pinctrl_adc1_default
- &pinctrl_adc2_default &pinctrl_adc3_default
- &pinctrl_adc4_default &pinctrl_adc5_default
- &pinctrl_adc6_default &pinctrl_adc7_default>;
-};
-
-&adc1 {
- aspeed,int-vref-microvolt = <2500000>;
- status = "okay";
- pinctrl-0 = <&pinctrl_adc10_default>;
-};
-
-&ehci1 {
- status = "okay";
-};
-
-&uhci {
- status = "okay";
-};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-minerva.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-minerva.dts
new file mode 100644
index 000000000000..942e53d5c714
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-minerva.dts
@@ -0,0 +1,543 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2023 Facebook Inc.
+/dts-v1/;
+
+#include "aspeed-g6.dtsi"
+#include <dt-bindings/gpio/aspeed-gpio.h>
+#include <dt-bindings/i2c/i2c.h>
+
+/ {
+ model = "Facebook Minerva CMM";
+ compatible = "facebook,minerva-cmc", "aspeed,ast2600";
+
+ aliases {
+ serial5 = &uart5;
+ /*
+ * PCA9548 (2-0077) provides 8 channels connecting to
+ * 6 pcs of FCB (Fan Controller Board).
+ */
+ i2c16 = &imux16;
+ i2c17 = &imux17;
+ i2c18 = &imux18;
+ i2c19 = &imux19;
+ i2c20 = &imux20;
+ i2c21 = &imux21;
+ };
+
+ chosen {
+ stdout-path = "serial5:57600n8";
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x80000000>;
+ };
+
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&adc0 0>, <&adc0 1>, <&adc0 2>, <&adc0 3>,
+ <&adc0 4>, <&adc0 5>, <&adc0 6>, <&adc0 7>,
+ <&adc1 2>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-fan-fault {
+ label = "led-fan-fault";
+ gpios = <&leds_gpio 9 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+ };
+};
+
+&uart6 {
+ status = "okay";
+};
+
+&wdt1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdtrst1_default>;
+ aspeed,reset-type = "soc";
+ aspeed,external-signal;
+ aspeed,ext-push-pull;
+ aspeed,ext-active-high;
+ aspeed,ext-pulse-duration = <256>;
+};
+
+&mac3 {
+ status = "okay";
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rmii4_default>;
+ fixed-link {
+ speed = <100>;
+ full-duplex;
+ };
+};
+
+&fmc {
+ status = "okay";
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "bmc";
+ spi-max-frequency = <50000000>;
+#include "openbmc-flash-layout-128.dtsi"
+ };
+ flash@1 {
+ status = "okay";
+ m25p,fast-read;
+ label = "alt-bmc";
+ spi-max-frequency = <50000000>;
+ };
+};
+
+&rtc {
+ status = "okay";
+};
+
+&sgpiom0 {
+ status = "okay";
+ ngpios = <128>;
+ bus-frequency = <2000000>;
+};
+
+&i2c0 {
+ status = "okay";
+
+ power-monitor@40 {
+ compatible = "ti,ina230";
+ reg = <0x40>;
+ shunt-resistor = <1000>;
+ };
+
+ power-monitor@41 {
+ compatible = "ti,ina230";
+ reg = <0x41>;
+ shunt-resistor = <1000>;
+ };
+
+ power-monitor@67 {
+ compatible = "adi,ltc2945";
+ reg = <0x67>;
+ };
+
+ power-monitor@68 {
+ compatible = "adi,ltc2945";
+ reg = <0x68>;
+ };
+
+ leds_gpio: gpio@19 {
+ compatible = "nxp,pca9555";
+ reg = <0x19>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+};
+
+&i2c1 {
+ status = "okay";
+
+ temperature-sensor@4b {
+ compatible = "ti,tmp75";
+ reg = <0x4b>;
+ };
+
+ temperature-sensor@48 {
+ compatible = "ti,tmp75";
+ reg = <0x48>;
+ };
+
+ eeprom@54 {
+ compatible = "atmel,24c128";
+ reg = <0x54>;
+ };
+};
+
+&i2c2 {
+ status = "okay";
+
+ i2c-mux@77 {
+ compatible = "nxp,pca9548";
+ reg = <0x77>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ imux16: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+
+ pwm@5e{
+ compatible = "max31790";
+ reg = <0x5e>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ imux17: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+
+ pwm@5e{
+ compatible = "max31790";
+ reg = <0x5e>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ imux18: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+
+ pwm@5e{
+ compatible = "max31790";
+ reg = <0x5e>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ imux19: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+
+ pwm@5e{
+ compatible = "max31790";
+ reg = <0x5e>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ imux20: i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+
+ pwm@5e{
+ compatible = "max31790";
+ reg = <0x5e>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ imux21: i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+
+ pwm@5e{
+ compatible = "max31790";
+ reg = <0x5e>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+ };
+};
+
+&i2c3 {
+ status = "okay";
+};
+
+&i2c4 {
+ status = "okay";
+};
+
+&i2c5 {
+ status = "okay";
+};
+
+&i2c6 {
+ status = "okay";
+};
+
+&i2c7 {
+ status = "okay";
+};
+
+&i2c8 {
+ status = "okay";
+};
+
+&i2c9 {
+ status = "okay";
+};
+
+&i2c10 {
+ status = "okay";
+};
+
+&i2c11 {
+ status = "okay";
+};
+
+&i2c12 {
+ status = "okay";
+};
+
+&i2c13 {
+ status = "okay";
+};
+
+&i2c14 {
+ status = "okay";
+ multi-master;
+
+ ipmb@10 {
+ compatible = "ipmb-dev";
+ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>;
+ i2c-protocol;
+ };
+};
+
+&i2c15 {
+ status = "okay";
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+};
+
+&adc0 {
+ aspeed,int-vref-microvolt = <2500000>;
+ status = "okay";
+ pinctrl-0 = <&pinctrl_adc0_default &pinctrl_adc1_default
+ &pinctrl_adc2_default &pinctrl_adc3_default
+ &pinctrl_adc4_default &pinctrl_adc5_default
+ &pinctrl_adc6_default &pinctrl_adc7_default>;
+};
+
+&adc1 {
+ aspeed,int-vref-microvolt = <2500000>;
+ status = "okay";
+ pinctrl-0 = <&pinctrl_adc10_default>;
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&uhci {
+ status = "okay";
+};
+
+&gpio0 {
+ gpio-line-names =
+ /*A0-A7*/ "","","","","","","","",
+ /*B0-B7*/ "","","","","","","","",
+ /*C0-C7*/ "","","","","BLADE_UART_SEL2","","","",
+ /*D0-D7*/ "","","","","","","","",
+ /*E0-E7*/ "","","","","","","","",
+ /*F0-F7*/ "","","","","","","","",
+ /*G0-G7*/ "","","","","","","","",
+ /*H0-H7*/ "","","","","","","","",
+ /*I0-I7*/ "","","","","","","","",
+ /*J0-J7*/ "","","","","","","","",
+ /*K0-K7*/ "","","","","","","","",
+ /*L0-L7*/ "","","","","BLADE_UART_SEL0","","","",
+ /*M0-M7*/ "","","","","","BLADE_UART_SEL1","","",
+ /*N0-N7*/ "","","","","","","","",
+ /*O0-O7*/ "","","","","","","","",
+ /*P0-P7*/ "","","","","","","","",
+ /*Q0-Q7*/ "","","","","","","","",
+ /*R0-R7*/ "","","","","","","","",
+ /*S0-S7*/ "","","","","","","","",
+ /*T0-T7*/ "","","","","","","","",
+ /*U0-U7*/ "","","","","","","","",
+ /*V0-V7*/ "","","","","BAT_DETECT","","","",
+ /*W0-W7*/ "","","","","","","","",
+ /*X0-X7*/ "","","BLADE_UART_SEL3","","","","","",
+ /*Y0-Y7*/ "","","","","","","","",
+ /*Z0-Z7*/ "","","","","","","","";
+};
+
+&sgpiom0 {
+ gpio-line-names =
+ /*"input pin","output pin"*/
+ /*A0 - A7*/
+ "PRSNT_MTIA_BLADE0_N","PWREN_MTIA_BLADE0_EN",
+ "PRSNT_MTIA_BLADE1_N","PWREN_MTIA_BLADE1_EN",
+ "PRSNT_MTIA_BLADE2_N","PWREN_MTIA_BLADE2_EN",
+ "PRSNT_MTIA_BLADE3_N","PWREN_MTIA_BLADE3_EN",
+ "PRSNT_MTIA_BLADE4_N","PWREN_MTIA_BLADE4_EN",
+ "PRSNT_MTIA_BLADE5_N","PWREN_MTIA_BLADE5_EN",
+ "PRSNT_MTIA_BLADE6_N","PWREN_MTIA_BLADE6_EN",
+ "PRSNT_MTIA_BLADE7_N","PWREN_MTIA_BLADE7_EN",
+ /*B0 - B7*/
+ "PRSNT_MTIA_BLADE8_N","PWREN_MTIA_BLADE8_EN",
+ "PRSNT_MTIA_BLADE9_N","PWREN_MTIA_BLADE9_EN",
+ "PRSNT_MTIA_BLADE10_N","PWREN_MTIA_BLADE10_EN",
+ "PRSNT_MTIA_BLADE11_N","PWREN_MTIA_BLADE11_EN",
+ "PRSNT_MTIA_BLADE12_N","PWREN_MTIA_BLADE12_EN",
+ "PRSNT_MTIA_BLADE13_N","PWREN_MTIA_BLADE13_EN",
+ "PRSNT_MTIA_BLADE14_N","PWREN_MTIA_BLADE14_EN",
+ "PRSNT_MTIA_BLADE15_N","PWREN_MTIA_BLADE15_EN",
+ /*C0 - C7*/
+ "PRSNT_NW_BLADE0_N","PWREN_NW_BLADE0_EN",
+ "PRSNT_NW_BLADE1_N","PWREN_NW_BLADE1_EN",
+ "PRSNT_NW_BLADE2_N","PWREN_NW_BLADE2_EN",
+ "PRSNT_NW_BLADE3_N","PWREN_NW_BLADE3_EN",
+ "PRSNT_NW_BLADE4_N","PWREN_NW_BLADE4_EN",
+ "PRSNT_NW_BLADE5_N","PWREN_NW_BLADE5_EN",
+ "PRSNT_FCB_TOP_0_N","PWREN_MTIA_BLADE0_HSC_EN",
+ "PRSNT_FCB_TOP_1_N","PWREN_MTIA_BLADE1_HSC_EN",
+ /*D0 - D7*/
+ "PRSNT_FCB_MIDDLE_0_N","PWREN_MTIA_BLADE2_HSC_EN",
+ "PRSNT_FCB_MIDDLE_1_N","PWREN_MTIA_BLADE3_HSC_EN",
+ "PRSNT_FCB_BOTTOM_0_N","PWREN_MTIA_BLADE4_HSC_EN",
+ "PRSNT_FCB_BOTTOM_1_N","PWREN_MTIA_BLADE5_HSC_EN",
+ "PWRGD_MTIA_BLADE0_PWROK_L_BUF","PWREN_MTIA_BLADE6_HSC_EN",
+ "PWRGD_MTIA_BLADE1_PWROK_L_BUF","PWREN_MTIA_BLADE7_HSC_EN",
+ "PWRGD_MTIA_BLADE2_PWROK_L_BUF","PWREN_MTIA_BLADE8_HSC_EN",
+ "PWRGD_MTIA_BLADE3_PWROK_L_BUF","PWREN_MTIA_BLADE9_HSC_EN",
+ /*E0 - E7*/
+ "PWRGD_MTIA_BLADE4_PWROK_L_BUF","PWREN_MTIA_BLADE10_HSC_EN",
+ "PWRGD_MTIA_BLADE5_PWROK_L_BUF","PWREN_MTIA_BLADE11_HSC_EN",
+ "PWRGD_MTIA_BLADE6_PWROK_L_BUF","PWREN_MTIA_BLADE12_HSC_EN",
+ "PWRGD_MTIA_BLADE7_PWROK_L_BUF","PWREN_MTIA_BLADE13_HSC_EN",
+ "PWRGD_MTIA_BLADE8_PWROK_L_BUF","PWREN_MTIA_BLADE14_HSC_EN",
+ "PWRGD_MTIA_BLADE9_PWROK_L_BUF","PWREN_MTIA_BLADE15_HSC_EN",
+ "PWRGD_MTIA_BLADE10_PWROK_L_BUF","PWREN_NW_BLADE0_HSC_EN",
+ "PWRGD_MTIA_BLADE11_PWROK_L_BUF","PWREN_NW_BLADE1_HSC_EN",
+ /*F0 - F7*/
+ "PWRGD_MTIA_BLADE12_PWROK_L_BUF","PWREN_NW_BLADE2_HSC_EN",
+ "PWRGD_MTIA_BLADE13_PWROK_L_BUF","PWREN_NW_BLADE3_HSC_EN",
+ "PWRGD_MTIA_BLADE14_PWROK_L_BUF","PWREN_NW_BLADE4_HSC_EN",
+ "PWRGD_MTIA_BLADE15_PWROK_L_BUF","PWREN_NW_BLADE5_HSC_EN",
+ "PWRGD_NW_BLADE0_PWROK_L_BUF","PWREN_FCB_TOP_L_EN",
+ "PWRGD_NW_BLADE1_PWROK_L_BUF","PWREN_FCB_TOP_R_EN",
+ "PWRGD_NW_BLADE2_PWROK_L_BUF","PWREN_FCB_MIDDLE_L_EN",
+ "PWRGD_NW_BLADE3_PWROK_L_BUF","PWREN_FCB_MIDDLE_R_EN",
+ /*G0 - G7*/
+ "PWRGD_NW_BLADE4_PWROK_L_BUF","PWREN_FCB_BOTTOM_L_EN",
+ "PWRGD_NW_BLADE5_PWROK_L_BUF","PWREN_FCB_BOTTOM_R_EN",
+ "PWRGD_FCB_TOP_0_PWROK_L_BUF","FM_CMM_AC_CYCLE_N",
+ "PWRGD_FCB_TOP_1_PWROK_L_BUF","MGMT_SFP_TX_DIS",
+ "PWRGD_FCB_MIDDLE_0_PWROK_L_BUF","",
+ "PWRGD_FCB_MIDDLE_1_PWROK_L_BUF","RST_I2CRST_MTIA_BLADE0_1_N",
+ "PWRGD_FCB_BOTTOM_0_PWROK_L_BUF","RST_I2CRST_MTIA_BLADE2_3_N",
+ "PWRGD_FCB_BOTTOM_1_PWROK_L_BUF","RST_I2CRST_MTIA_BLADE4_5_N",
+ /*H0 - H7*/
+ "LEAK_DETECT_MTIA_BLADE0_N_BUF","RST_I2CRST_MTIA_BLADE6_7_N",
+ "LEAK_DETECT_MTIA_BLADE1_N_BUF","RST_I2CRST_MTIA_BLADE8_9_N",
+ "LEAK_DETECT_MTIA_BLADE2_N_BUF","RST_I2CRST_MTIA_BLADE10_11_N",
+ "LEAK_DETECT_MTIA_BLADE3_N_BUF","RST_I2CRST_MTIA_BLADE12_13_N",
+ "LEAK_DETECT_MTIA_BLADE4_N_BUF","RST_I2CRST_MTIA_BLADE14_15_N",
+ "LEAK_DETECT_MTIA_BLADE5_N_BUF","RST_I2CRST_NW_BLADE0_1_2_N",
+ "LEAK_DETECT_MTIA_BLADE6_N_BUF","RST_I2CRST_NW_BLADE3_4_5_N",
+ "LEAK_DETECT_MTIA_BLADE7_N_BUF","RST_I2CRST_FCB_N",
+ /*I0 - I7*/
+ "LEAK_DETECT_MTIA_BLADE8_N_BUF","RST_I2CRST_FCB_B_L_N",
+ "LEAK_DETECT_MTIA_BLADE9_N_BUF","RST_I2CRST_FCB_B_R_N",
+ "LEAK_DETECT_MTIA_BLADE10_N_BUF","RST_I2CRST_FCB_M_L_N",
+ "LEAK_DETECT_MTIA_BLADE11_N_BUF","RST_I2CRST_FCB_M_R_N",
+ "LEAK_DETECT_MTIA_BLADE12_N_BUF","RST_I2CRST_FCB_T_L_N",
+ "LEAK_DETECT_MTIA_BLADE13_N_BUF","RST_I2CRST_FCB_T_R_N",
+ "LEAK_DETECT_MTIA_BLADE14_N_BUF","BMC_READY",
+ "LEAK_DETECT_MTIA_BLADE15_N_BUF","wFM_88E6393X_BIN_UPDATE_EN_N",
+ /*J0 - J7*/
+ "LEAK_DETECT_NW_BLADE0_N_BUF","WATER_VALVE_CLOSED_N",
+ "LEAK_DETECT_NW_BLADE1_N_BUF","",
+ "LEAK_DETECT_NW_BLADE2_N_BUF","",
+ "LEAK_DETECT_NW_BLADE3_N_BUF","",
+ "LEAK_DETECT_NW_BLADE4_N_BUF","",
+ "LEAK_DETECT_NW_BLADE5_N_BUF","",
+ "MTIA_BLADE0_STATUS_LED","",
+ "MTIA_BLADE1_STATUS_LED","",
+ /*K0 - K7*/
+ "MTIA_BLADE2_STATUS_LED","",
+ "MTIA_BLADE3_STATUS_LED","",
+ "MTIA_BLADE4_STATUS_LED","",
+ "MTIA_BLADE5_STATUS_LED","",
+ "MTIA_BLADE6_STATUS_LED","",
+ "MTIA_BLADE7_STATUS_LED","",
+ "MTIA_BLADE8_STATUS_LED","",
+ "MTIA_BLADE9_STATUS_LED","",
+ /*L0 - L7*/
+ "MTIA_BLADE10_STATUS_LED","",
+ "MTIA_BLADE11_STATUS_LED","",
+ "MTIA_BLADE12_STATUS_LED","",
+ "MTIA_BLADE13_STATUS_LED","",
+ "MTIA_BLADE14_STATUS_LED","",
+ "MTIA_BLADE15_STATUS_LED","",
+ "NW_BLADE0_STATUS_LED","",
+ "NW_BLADE1_STATUS_LED","",
+ /*M0 - M7*/
+ "NW_BLADE2_STATUS_LED","",
+ "NW_BLADE3_STATUS_LED","",
+ "NW_BLADE4_STATUS_LED","",
+ "NW_BLADE5_STATUS_LED","",
+ "RPU_READY","",
+ "IT_GEAR_RPU_LINK_N","",
+ "IT_GEAR_LEAK","",
+ "WATER_VALVE_CLOSED_N","",
+ /*N0 - N7*/
+ "VALVE_STS0","",
+ "VALVE_STS1","",
+ "VALVE_STS2","",
+ "VALVE_STS3","",
+ "CR_TOGGLE_BOOT_BUF_N","",
+ "CMM_LC_RDY_LED_N","",
+ "CMM_LC_UNRDY_LED_N","",
+ "CMM_CABLE_CARTRIDGE_PRSNT_BOT_N","",
+ /*O0 - O7*/
+ "CMM_CABLE_CARTRIDGE_PRSNT_TOP_N","",
+ "BOT_BCB_CABLE_PRSNT_N","",
+ "TOP_BCB_CABLE_PRSNT_N","",
+ "CHASSIS0_LEAK_Q_N","",
+ "CHASSIS1_LEAK_Q_N","",
+ "LEAK0_DETECT","",
+ "LEAK1_DETECT","",
+ "MGMT_SFP_PRSNT_N","",
+ /*P0 - P7*/
+ "MGMT_SFP_TX_FAULT","",
+ "MGMT_SFP_RX_LOS","",
+ "","",
+ "","",
+ "","",
+ "","",
+ "","",
+ "","";
+};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-yosemite4.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-yosemite4.dts
index 64075cc41d92..98477792aa00 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-yosemite4.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-yosemite4.dts
@@ -88,7 +88,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_rmii3_default>;
use-ncsi;
- mlx,multi-host;
+ mellanox,multi-host;
};
&mac3 {
@@ -96,7 +96,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_rmii4_default>;
use-ncsi;
- mlx,multi-host;
+ mellanox,multi-host;
};
&fmc {
@@ -369,7 +369,14 @@
&i2c13 {
status = "okay";
- bus-frequency = <400000>;
+ bus-frequency = <100000>;
+ multi-master;
+
+ ipmb@10 {
+ compatible = "ipmb-dev";
+ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>;
+ i2c-protocol;
+ };
};
&i2c14 {
@@ -596,7 +603,6 @@
};
&adc0 {
- ref_voltage = <2500>;
status = "okay";
pinctrl-0 = <&pinctrl_adc0_default &pinctrl_adc1_default
&pinctrl_adc2_default &pinctrl_adc3_default
@@ -605,7 +611,6 @@
};
&adc1 {
- ref_voltage = <2500>;
status = "okay";
pinctrl-0 = <&pinctrl_adc8_default &pinctrl_adc9_default>;
};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-yosemitev2.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-yosemitev2.dts
index 6bf2ff85a40e..5143f85fbd70 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-yosemitev2.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-yosemitev2.dts
@@ -95,7 +95,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_rmii1_default>;
use-ncsi;
- mlx,multi-host;
+ mellanox,multi-host;
};
&adc {
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-bonnell.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-bonnell.dts
index cad1b9aac97b..6fdda42575df 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-bonnell.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-bonnell.dts
@@ -488,7 +488,7 @@
#gpio-cells = <2>;
led@0 {
- label = "nvme0";
+ label = "nvme3";
reg = <0>;
retain-state-shutdown;
default-state = "keep";
@@ -496,7 +496,7 @@
};
led@1 {
- label = "nvme1";
+ label = "nvme2";
reg = <1>;
retain-state-shutdown;
default-state = "keep";
@@ -504,7 +504,7 @@
};
led@2 {
- label = "nvme2";
+ label = "nvme1";
reg = <2>;
retain-state-shutdown;
default-state = "keep";
@@ -512,7 +512,7 @@
};
led@3 {
- label = "nvme3";
+ label = "nvme0";
reg = <3>;
retain-state-shutdown;
default-state = "keep";
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-system1.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-system1.dts
new file mode 100644
index 000000000000..dcbc16308ab5
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-system1.dts
@@ -0,0 +1,1623 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright 2023 IBM Corp.
+/dts-v1/;
+
+#include "aspeed-g6.dtsi"
+#include <dt-bindings/gpio/aspeed-gpio.h>
+#include <dt-bindings/i2c/i2c.h>
+#include <dt-bindings/leds/leds-pca955x.h>
+
+/ {
+ model = "System1";
+ compatible = "ibm,system1-bmc", "aspeed,ast2600";
+
+ aliases {
+ i2c16 = &i2c8mux1chn0;
+ i2c17 = &i2c8mux1chn1;
+ i2c18 = &i2c8mux1chn2;
+ i2c19 = &i2c8mux1chn3;
+ i2c20 = &i2c8mux1chn4;
+ i2c21 = &i2c8mux1chn5;
+ i2c22 = &i2c8mux1chn6;
+ i2c23 = &i2c8mux1chn7;
+ i2c24 = &i2c3mux0chn0;
+ i2c25 = &i2c3mux0chn1;
+ i2c26 = &i2c3mux0chn2;
+ i2c27 = &i2c3mux0chn3;
+ i2c28 = &i2c3mux0chn4;
+ i2c29 = &i2c3mux0chn5;
+ i2c30 = &i2c3mux0chn6;
+ i2c31 = &i2c3mux0chn7;
+ i2c32 = &i2c6mux0chn0;
+ i2c33 = &i2c6mux0chn1;
+ i2c34 = &i2c6mux0chn2;
+ i2c35 = &i2c6mux0chn3;
+ i2c36 = &i2c6mux0chn4;
+ i2c37 = &i2c6mux0chn5;
+ i2c38 = &i2c6mux0chn6;
+ i2c39 = &i2c6mux0chn7;
+ i2c40 = &i2c7mux0chn0;
+ i2c41 = &i2c7mux0chn1;
+ i2c42 = &i2c7mux0chn2;
+ i2c43 = &i2c7mux0chn3;
+ i2c44 = &i2c7mux0chn4;
+ i2c45 = &i2c7mux0chn5;
+ i2c46 = &i2c7mux0chn6;
+ i2c47 = &i2c7mux0chn7;
+ i2c48 = &i2c8mux0chn0;
+ i2c49 = &i2c8mux0chn1;
+ i2c50 = &i2c8mux0chn2;
+ i2c51 = &i2c8mux0chn3;
+ i2c52 = &i2c8mux0chn4;
+ i2c53 = &i2c8mux0chn5;
+ i2c54 = &i2c8mux0chn6;
+ i2c55 = &i2c8mux0chn7;
+ i2c56 = &i2c14mux0chn0;
+ i2c57 = &i2c14mux0chn1;
+ i2c58 = &i2c14mux0chn2;
+ i2c59 = &i2c14mux0chn3;
+ i2c60 = &i2c14mux0chn4;
+ i2c61 = &i2c14mux0chn5;
+ i2c62 = &i2c14mux0chn6;
+ i2c63 = &i2c14mux0chn7;
+ i2c64 = &i2c15mux0chn0;
+ i2c65 = &i2c15mux0chn1;
+ i2c66 = &i2c15mux0chn2;
+ i2c67 = &i2c15mux0chn3;
+ i2c68 = &i2c15mux0chn4;
+ i2c69 = &i2c15mux0chn5;
+ i2c70 = &i2c15mux0chn6;
+ i2c71 = &i2c15mux0chn7;
+ };
+
+ chosen {
+ stdout-path = "uart5:115200n8";
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x40000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ eventlog: tcg-event-log@b3d00000 {
+ no-map;
+ reg = <0xb3d00000 0x100000>;
+ };
+
+ ramoops@b3e00000 {
+ compatible = "ramoops";
+ reg = <0xb3e00000 0x200000>; /* 16 * (4 * 0x8000) */
+ record-size = <0x8000>;
+ console-size = <0x8000>;
+ ftrace-size = <0x8000>;
+ pmsg-size = <0x8000>;
+ max-reason = <3>; /* KMSG_DUMP_EMERG */
+ };
+
+ /* LPC FW cycle bridge region requires natural alignment */
+ flash_memory: region@b4000000 {
+ no-map;
+ reg = <0xb4000000 0x04000000>; /* 64M */
+ };
+
+ /* VGA region is dictated by hardware strapping */
+ vga_memory: region@bf000000 {
+ no-map;
+ compatible = "shared-dma-pool";
+ reg = <0xbf000000 0x01000000>; /* 16M */
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-0 {
+ gpios = <&gpio0 ASPEED_GPIO(L, 7) GPIO_ACTIVE_HIGH>;
+ };
+
+ led-1 {
+ gpios = <&gpio0 ASPEED_GPIO(P, 7) GPIO_ACTIVE_HIGH>;
+ };
+
+ led-2 {
+ gpios = <&gpio0 ASPEED_GPIO(S, 6) GPIO_ACTIVE_HIGH>;
+ };
+
+ led-3 {
+ gpios = <&gpio0 ASPEED_GPIO(S, 7) GPIO_ACTIVE_HIGH>;
+ };
+
+ led-4 {
+ gpios = <&pca3 5 GPIO_ACTIVE_LOW>;
+ };
+
+ led-5 {
+ gpios = <&pca3 6 GPIO_ACTIVE_LOW>;
+ };
+
+ led-6 {
+ gpios = <&pca3 7 GPIO_ACTIVE_LOW>;
+ };
+
+ led-7 {
+ gpios = <&pca3 8 GPIO_ACTIVE_LOW>;
+ };
+
+ led-8 {
+ gpios = <&pca3 9 GPIO_ACTIVE_LOW>;
+ };
+
+ led-9 {
+ gpios = <&pca3 10 GPIO_ACTIVE_LOW>;
+ };
+
+ led-a {
+ gpios = <&pca3 11 GPIO_ACTIVE_LOW>;
+ };
+
+ led-b {
+ gpios = <&pca4 4 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-c {
+ gpios = <&pca4 5 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-d {
+ gpios = <&pca4 6 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-e {
+ gpios = <&pca4 7 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ gpio-keys-polled {
+ compatible = "gpio-keys-polled";
+ poll-interval = <1000>;
+
+ event-nvme0-presence {
+ label = "nvme0-presence";
+ gpios = <&pca4 0 GPIO_ACTIVE_LOW>;
+ linux,code = <0>;
+ };
+
+ event-nvme1-presence {
+ label = "nvme1-presence";
+ gpios = <&pca4 1 GPIO_ACTIVE_LOW>;
+ linux,code = <1>;
+ };
+
+ event-nvme2-presence {
+ label = "nvme2-presence";
+ gpios = <&pca4 2 GPIO_ACTIVE_LOW>;
+ linux,code = <2>;
+ };
+
+ event-nvme3-presence {
+ label = "nvme3-presence";
+ gpios = <&pca4 3 GPIO_ACTIVE_LOW>;
+ linux,code = <3>;
+ };
+ };
+
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&p12v_vd 0>, <&p5v_aux_vd 0>,
+ <&p5v_bmc_aux_vd 0>, <&p3v3_aux_vd 0>,
+ <&p3v3_bmc_aux_vd 0>, <&p1v8_bmc_aux_vd 0>,
+ <&adc1 4>, <&adc0 2>, <&adc1 0>,
+ <&p2v5_aux_vd 0>, <&adc1 7>;
+ };
+
+ p12v_vd: voltage-divider1 {
+ compatible = "voltage-divider";
+ io-channels = <&adc1 3>;
+ #io-channel-cells = <1>;
+
+ /*
+ * Scale the system voltage by 1127/127 to fit the ADC range.
+ * Use small nominator to prevent integer overflow.
+ */
+ output-ohms = <15>;
+ full-ohms = <133>;
+ };
+
+ p5v_aux_vd: voltage-divider2 {
+ compatible = "voltage-divider";
+ io-channels = <&adc1 5>;
+ #io-channel-cells = <1>;
+
+ /*
+ * Scale the system voltage by 1365/365 to fit the ADC range.
+ * Use small nominator to prevent integer overflow.
+ */
+ output-ohms = <50>;
+ full-ohms = <187>;
+ };
+
+ p5v_bmc_aux_vd: voltage-divider3 {
+ compatible = "voltage-divider";
+ io-channels = <&adc0 3>;
+ #io-channel-cells = <1>;
+
+ /*
+ * Scale the system voltage by 1365/365 to fit the ADC range.
+ * Use small nominator to prevent integer overflow.
+ */
+ output-ohms = <50>;
+ full-ohms = <187>;
+ };
+
+ p3v3_aux_vd: voltage-divider4 {
+ compatible = "voltage-divider";
+ io-channels = <&adc1 2>;
+ #io-channel-cells = <1>;
+
+ /*
+ * Scale the system voltage by 1698/698 to fit the ADC range.
+ * Use small nominator to prevent integer overflow.
+ */
+ output-ohms = <14>;
+ full-ohms = <34>;
+ };
+
+ p3v3_bmc_aux_vd: voltage-divider5 {
+ compatible = "voltage-divider";
+ io-channels = <&adc0 7>;
+ #io-channel-cells = <1>;
+
+ /*
+ * Scale the system voltage by 1698/698 to fit the ADC range.
+ * Use small nominator to prevent integer overflow.
+ */
+ output-ohms = <14>;
+ full-ohms = <34>;
+ };
+
+ p1v8_bmc_aux_vd: voltage-divider6 {
+ compatible = "voltage-divider";
+ io-channels = <&adc0 6>;
+ #io-channel-cells = <1>;
+
+ /*
+ * Scale the system voltage by 4000/3000 to fit the ADC range.
+ * Use small nominator to prevent integer overflow.
+ */
+ output-ohms = <3>;
+ full-ohms = <4>;
+ };
+
+ p2v5_aux_vd: voltage-divider7 {
+ compatible = "voltage-divider";
+ io-channels = <&adc1 1>;
+ #io-channel-cells = <1>;
+
+ /*
+ * Scale the system voltage by 2100/1100 to fit the ADC range.
+ * Use small nominator to prevent integer overflow.
+ */
+ output-ohms = <11>;
+ full-ohms = <21>;
+ };
+
+ p1v8_bmc_aux: fixedregulator-p1v8-bmc-aux {
+ compatible = "regulator-fixed";
+ regulator-name = "p1v8_bmc_aux";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+};
+
+&adc0 {
+ status = "okay";
+ vref-supply = <&p1v8_bmc_aux>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_adc0_default
+ &pinctrl_adc1_default
+ &pinctrl_adc2_default
+ &pinctrl_adc3_default
+ &pinctrl_adc4_default
+ &pinctrl_adc5_default
+ &pinctrl_adc6_default
+ &pinctrl_adc7_default>;
+};
+
+&adc1 {
+ status = "okay";
+ vref-supply = <&p1v8_bmc_aux>;
+ aspeed,battery-sensing;
+
+ aspeed,int-vref-microvolt = <2500000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_adc8_default
+ &pinctrl_adc9_default
+ &pinctrl_adc10_default
+ &pinctrl_adc11_default
+ &pinctrl_adc12_default
+ &pinctrl_adc13_default
+ &pinctrl_adc14_default
+ &pinctrl_adc15_default>;
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&uhci {
+ status = "okay";
+};
+
+&gpio0 {
+ gpio-line-names =
+ /*A0-A7*/ "","","","","","","","",
+ /*B0-B7*/ "","","","","bmc-tpm-reset","","","",
+ /*C0-C7*/ "","","","","","","","",
+ /*D0-D7*/ "","","","","","","","",
+ /*E0-E7*/ "","","","","","","","",
+ /*F0-F7*/ "","","","","","","","",
+ /*G0-G7*/ "","","","","","","","",
+ /*H0-H7*/ "","","","","","","","",
+ /*I0-I7*/ "","","","","","","","",
+ /*J0-J7*/ "","","","","","","","",
+ /*K0-K7*/ "","","","","","","","",
+ /*L0-L7*/ "","","","","","","","bmc-ready",
+ /*M0-M7*/ "","","","","","","","",
+ /*N0-N7*/ "","","","","","","","",
+ /*O0-O7*/ "","","","","","","","",
+ /*P0-P7*/ "","","","","","","","bmc-hb",
+ /*Q0-Q7*/ "","","","","","","","",
+ /*R0-R7*/ "","","","","","","","",
+ /*S0-S7*/ "","","","","","","rear-enc-fault0","rear-enc-id0",
+ /*T0-T7*/ "","","","","","","","",
+ /*U0-U7*/ "","","","","","","","",
+ /*V0-V7*/ "","rtc-battery-voltage-read-enable","","power-chassis-control","","","","",
+ /*W0-W7*/ "","","","","","","","",
+ /*X0-X7*/ "","power-chassis-good","","","","","","",
+ /*Y0-Y7*/ "","","","","","","","",
+ /*Z0-Z7*/ "","","","","","","","";
+};
+
+&emmc_controller {
+ status = "okay";
+};
+
+&pinctrl_emmc_default {
+ bias-disable;
+};
+
+&emmc {
+ status = "okay";
+ clk-phase-mmc-hs200 = <180>, <180>;
+};
+
+&ibt {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&vuart1 {
+ status = "okay";
+};
+
+&vuart2 {
+ status = "okay";
+};
+
+&lpc_ctrl {
+ status = "okay";
+ memory-region = <&flash_memory>;
+};
+
+&mac2 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rmii3_default>;
+ clocks = <&syscon ASPEED_CLK_GATE_MAC3CLK>,
+ <&syscon ASPEED_CLK_MAC3RCLK>;
+ clock-names = "MACCLK", "RCLK";
+ use-ncsi;
+};
+
+&mac3 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rmii4_default>;
+ clocks = <&syscon ASPEED_CLK_GATE_MAC4CLK>,
+ <&syscon ASPEED_CLK_MAC4RCLK>;
+ clock-names = "MACCLK", "RCLK";
+ use-ncsi;
+};
+
+&wdt1 {
+ aspeed,reset-type = "none";
+ aspeed,external-signal;
+ aspeed,ext-push-pull;
+ aspeed,ext-active-high;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdtrst1_default>;
+};
+
+&wdt2 {
+ status = "okay";
+};
+
+&kcs2 {
+ status = "okay";
+ aspeed,lpc-io-reg = <0xca8 0xcac>;
+};
+
+&kcs3 {
+ status = "okay";
+ aspeed,lpc-io-reg = <0xca2>;
+ aspeed,lpc-interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+};
+
+&i2c0 {
+ status = "okay";
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+
+ regulator@60 {
+ compatible = "maxim,max8952";
+ reg = <0x60>;
+
+ max8952,default-mode = <0>;
+ max8952,dvs-mode-microvolt = <1250000>, <1200000>,
+ <1050000>, <950000>;
+ max8952,sync-freq = <0>;
+ max8952,ramp-speed = <0>;
+
+ regulator-name = "VR_v77_1v4";
+ regulator-min-microvolt = <770000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+};
+
+&i2c1 {
+ status = "okay";
+
+ regulator@42 {
+ compatible = "infineon,ir38263";
+ reg = <0x42>;
+ };
+
+ led-controller@60 {
+ compatible = "nxp,pca9552";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ label = "nic1-perst";
+ reg = <0>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ label = "bmc-perst";
+ reg = <1>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ label = "reset-M2-SSD1-2-perst";
+ reg = <2>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ label = "pcie-perst1";
+ reg = <3>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ label = "pcie-perst2";
+ reg = <4>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ label = "pcie-perst3";
+ reg = <5>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@6 {
+ label = "pcie-perst4";
+ reg = <6>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@7 {
+ label = "pcie-perst5";
+ reg = <7>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@8 {
+ label = "pcie-perst6";
+ reg = <8>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@9 {
+ label = "pcie-perst7";
+ reg = <9>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@10 {
+ label = "pcie-perst8";
+ reg = <10>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@11 {
+ label = "PV-cp0-sw1stk4-perst";
+ reg = <11>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@12 {
+ label = "PV-cp0-sw1stk5-perst";
+ reg = <12>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@13 {
+ label = "pe-cp-drv0-perst";
+ reg = <13>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@14 {
+ label = "pe-cp-drv1-perst";
+ reg = <14>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@15 {
+ label = "lom-perst";
+ reg = <15>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+
+ gpio@74 {
+ compatible = "nxp,pca9539";
+ reg = <0x74>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "PLUG_DETECT_PCIE_J101_N",
+ "PLUG_DETECT_PCIE_J102_N",
+ "PLUG_DETECT_PCIE_J103_N",
+ "PLUG_DETECT_PCIE_J104_N",
+ "PLUG_DETECT_PCIE_J105_N",
+ "PLUG_DETECT_PCIE_J106_N",
+ "PLUG_DETECT_PCIE_J107_N",
+ "PLUG_DETECT_PCIE_J108_N",
+ "PLUG_DETECT_M2_SSD1_N",
+ "PLUG_DETECT_NIC1_N",
+ "SEL_SMB_DIMM_CPU0",
+ "presence-ps2",
+ "presence-ps3",
+ "", "",
+ "PWRBRD_PLUG_DETECT2_N";
+ };
+};
+
+&i2c2 {
+ status = "okay";
+
+ power-supply@58 {
+ compatible = "ibm,cffps";
+ reg = <0x58>;
+ };
+
+ power-supply@59 {
+ compatible = "ibm,cffps";
+ reg = <0x59>;
+ };
+
+ power-supply@5a {
+ compatible = "ibm,cffps";
+ reg = <0x5a>;
+ };
+
+ power-supply@5b {
+ compatible = "ibm,cffps";
+ reg = <0x5b>;
+ };
+};
+
+&i2c3 {
+ status = "okay";
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9548";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ i2c3mux0chn0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ i2c3mux0chn1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+
+ i2c3mux0chn2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ i2c3mux0chn3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+
+ i2c3mux0chn4: i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+ };
+
+ i2c3mux0chn5: i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+ };
+
+ i2c3mux0chn6: i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <6>;
+ };
+
+ i2c3mux0chn7: i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
+ };
+ };
+};
+
+&i2c4 {
+ status = "okay";
+};
+
+&i2c5 {
+ status = "okay";
+
+ regulator@42 {
+ compatible = "infineon,ir38263";
+ reg = <0x42>;
+ };
+
+ regulator@43 {
+ compatible = "infineon,ir38060";
+ reg = <0x43>;
+ };
+};
+
+&i2c6 {
+ status = "okay";
+
+ fan-controller@52 {
+ compatible = "maxim,max31785a";
+ reg = <0x52>;
+ };
+
+ fan-controller@54 {
+ compatible = "maxim,max31785a";
+ reg = <0x54>;
+ };
+
+ eeprom@55 {
+ compatible = "atmel,24c64";
+ reg = <0x55>;
+ };
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9548";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ i2c6mux0chn0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ i2c6mux0chn1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+
+ i2c6mux0chn2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ i2c6mux0chn3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+
+ i2c6mux0chn4: i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+
+ humidity-sensor@40 {
+ compatible = "ti,hdc1080";
+ reg = <0x40>;
+ };
+
+ temperature-sensor@48 {
+ compatible = "ti,tmp275";
+ reg = <0x48>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c32";
+ reg = <0x50>;
+ };
+
+ led-controller@60 {
+ compatible = "nxp,pca9551";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ label = "enclosure-id-led";
+ reg = <0>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ label = "attention-led";
+ reg = <1>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ label = "enclosure-fault-rollup-led";
+ reg = <2>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ label = "power-on-led";
+ reg = <3>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+
+ temperature-sensor@76 {
+ compatible = "infineon,dps310";
+ reg = <0x76>;
+ };
+ };
+
+ i2c6mux0chn5: i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+ };
+
+ i2c6mux0chn6: i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <6>;
+ };
+
+ i2c6mux0chn7: i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
+ };
+ };
+
+ pca3: gpio@74 {
+ compatible = "nxp,pca9539";
+ reg = <0x74>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ pca4: gpio@77 {
+ compatible = "nxp,pca9539";
+ reg = <0x77>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "PE_NVMED0_EXP_PRSNT_N",
+ "PE_NVMED1_EXP_PRSNT_N",
+ "PE_NVMED2_EXP_PRSNT_N",
+ "PE_NVMED3_EXP_PRSNT_N",
+ "LED_FAULT_NVMED0",
+ "LED_FAULT_NVMED1",
+ "LED_FAULT_NVMED2",
+ "LED_FAULT_NVMED3",
+ "FAN0_PRESENCE_R_N",
+ "FAN1_PRESENCE_R_N",
+ "FAN2_PRESENCE_R_N",
+ "FAN3_PRESENCE_R_N",
+ "FAN4_PRESENCE_R_N",
+ "FAN5_PRESENCE_N",
+ "FAN6_PRESENCE_N",
+ "";
+ };
+};
+
+&i2c7 {
+ status = "okay";
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9548";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ i2c7mux0chn0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ i2c7mux0chn1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+
+ i2c7mux0chn2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ i2c7mux0chn3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+
+ regulator@58 {
+ compatible = "mps,mp2973";
+ reg = <0x58>;
+ };
+ };
+
+ i2c7mux0chn4: i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+ };
+
+ i2c7mux0chn5: i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+
+ regulator@40 {
+ compatible = "infineon,tda38640";
+ reg = <0x40>;
+ };
+ };
+
+ i2c7mux0chn6: i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <6>;
+ };
+
+ i2c7mux0chn7: i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
+ };
+ };
+};
+
+&i2c8 {
+ status = "okay";
+
+ i2c-mux@71 {
+ compatible = "nxp,pca9548";
+ reg = <0x71>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ i2c8mux0chn0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ regulator@58 {
+ compatible = "mps,mp2971";
+ reg = <0x58>;
+ };
+ };
+
+ i2c8mux0chn1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ regulator@40 {
+ compatible = "infineon,tda38640";
+ reg = <0x40>;
+ };
+
+ regulator@41 {
+ compatible = "infineon,tda38640";
+ reg = <0x41>;
+ };
+
+ regulator@58 {
+ compatible = "mps,mp2971";
+ reg = <0x58>;
+ };
+
+ regulator@5b {
+ compatible = "mps,mp2971";
+ reg = <0x5b>;
+ };
+ };
+
+ i2c8mux0chn2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ i2c8mux0chn3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+
+ i2c8mux0chn4: i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9548";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ i2c8mux1chn0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ i2c8mux1chn1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+
+ i2c8mux1chn2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ i2c8mux1chn3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+
+ i2c8mux1chn4: i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+ };
+
+ i2c8mux1chn5: i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+ };
+
+ i2c8mux1chn6: i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <6>;
+ };
+
+ i2c8mux1chn7: i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
+ };
+ };
+ };
+
+ i2c8mux0chn5: i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+ };
+
+ i2c8mux0chn6: i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <6>;
+
+ temperature-sensor@4c {
+ compatible = "ti,tmp423";
+ reg = <0x4c>;
+ };
+ };
+
+ i2c8mux0chn7: i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
+
+ regulator@40 {
+ compatible = "infineon,ir38060";
+ reg = <0x40>;
+ };
+ };
+ };
+};
+
+&i2c9 {
+ status = "okay";
+
+ regulator@40 {
+ compatible = "infineon,ir38263";
+ reg = <0x40>;
+ };
+
+ regulator@41 {
+ compatible = "infineon,ir38263";
+ reg = <0x41>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+
+ regulator@60 {
+ compatible = "maxim,max8952";
+ reg = <0x60>;
+
+ max8952,default-mode = <0>;
+ max8952,dvs-mode-microvolt = <1250000>, <1200000>,
+ <1050000>, <950000>;
+ max8952,sync-freq = <0>;
+ max8952,ramp-speed = <0>;
+
+ regulator-name = "VR_v77_1v4";
+ regulator-min-microvolt = <770000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+};
+
+&i2c11 {
+ status = "okay";
+
+ tpm@2e {
+ compatible = "tcg,tpm-tis-i2c";
+ reg = <0x2e>;
+ memory-region = <&eventlog>;
+ };
+};
+
+&i2c12 {
+ status = "okay";
+};
+
+&i2c13 {
+ status = "okay";
+
+ regulator@41 {
+ compatible = "infineon,ir38263";
+ reg = <0x41>;
+ };
+
+ led-controller@61 {
+ compatible = "nxp,pca9552";
+ reg = <0x61>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ label = "efuse-12v-slots";
+ reg = <0>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ label = "efuse-3p3v-slot";
+ reg = <1>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ label = "nic2-pert";
+ reg = <3>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ label = "pcie-perst9";
+ reg = <4>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ label = "pcie-perst10";
+ reg = <5>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@6 {
+ label = "pcie-perst11";
+ reg = <6>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@7 {
+ label = "pcie-perst12";
+ reg = <7>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@8 {
+ label = "pcie-perst13";
+ reg = <8>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@9 {
+ label = "pcie-perst14";
+ reg = <9>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@10 {
+ label = "pcie-perst15";
+ reg = <10>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@11 {
+ label = "pcie-perst16";
+ reg = <11>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@12 {
+ label = "PV-cp1-sw1stk4-perst";
+ reg = <12>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@13 {
+ label = "PV-cp1-sw1stk5-perst";
+ reg = <13>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@14 {
+ label = "pe-cp-drv2-perst";
+ reg = <14>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@15 {
+ label = "pe-cp-drv3-perst";
+ reg = <15>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+
+ gpio@75 {
+ compatible = "nxp,pca9539";
+ reg = <0x75>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "PLUG_DETECT_PCIE_J109_N",
+ "PLUG_DETECT_PCIE_J110_N",
+ "PLUG_DETECT_PCIE_J111_N",
+ "PLUG_DETECT_PCIE_J112_N",
+ "PLUG_DETECT_PCIE_J113_N",
+ "PLUG_DETECT_PCIE_J114_N",
+ "PLUG_DETECT_PCIE_J115_N",
+ "PLUG_DETECT_PCIE_J116_N",
+ "PLUG_DETECT_M2_SSD2_N",
+ "PLUG_DETECT_NIC2_N",
+ "SEL_SMB_DIMM_CPU1",
+ "presence-ps0",
+ "presence-ps1",
+ "", "",
+ "PWRBRD_PLUG_DETECT1_N";
+ };
+
+ gpio@76 {
+ compatible = "nxp,pca9539";
+ reg = <0x76>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "SW1_BOOTRCVRYB1_N",
+ "SW1_BOOTRCVRYB0_N",
+ "SW2_BOOTRCVRYB1_N",
+ "SW2_BOOTRCVRYB0_N",
+ "SW3_4_BOOTRCVRYB1_N",
+ "SW3_4_BOOTRCVRYB0_N",
+ "SW5_BOOTRCVRYB1_N",
+ "SW5_BOOTRCVRYB0_N",
+ "SW6_BOOTRCVRYB1_N",
+ "SW6_BOOTRCVRYB0_N",
+ "SW1_RESET_N",
+ "SW3_RESET_N",
+ "SW4_RESET_N",
+ "SW2_RESET_N",
+ "SW5_RESET_N",
+ "SW6_RESET_N";
+ };
+};
+
+&i2c14 {
+ status = "okay";
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9548";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ i2c14mux0chn0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ i2c14mux0chn1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+
+ i2c14mux0chn2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ i2c14mux0chn3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+
+ regulator@58 {
+ compatible = "mps,mp2973";
+ reg = <0x58>;
+ };
+ };
+
+ i2c14mux0chn4: i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+ };
+
+ i2c14mux0chn5: i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+
+ regulator@40 {
+ compatible = "infineon,tda38640";
+ reg = <0x40>;
+ };
+ };
+
+ i2c14mux0chn6: i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <6>;
+ };
+
+ i2c14mux0chn7: i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
+ };
+ };
+};
+
+&i2c15 {
+ status = "okay";
+
+ i2c-mux@71 {
+ compatible = "nxp,pca9548";
+ reg = <0x71>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ i2c15mux0chn0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ regulator@58 {
+ compatible = "mps,mp2971";
+ reg = <0x58>;
+ };
+ };
+
+ i2c15mux0chn1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ regulator@40 {
+ compatible = "infineon,tda38640";
+ reg = <0x40>;
+ };
+
+ regulator@41 {
+ compatible = "infineon,tda38640";
+ reg = <0x41>;
+ };
+
+ regulator@58 {
+ compatible = "mps,mp2971";
+ reg = <0x58>;
+ };
+
+ regulator@5b {
+ compatible = "mps,mp2971";
+ reg = <0x5b>;
+ };
+ };
+
+ i2c15mux0chn2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ i2c15mux0chn3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+
+ i2c15mux0chn4: i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9548";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ i2c15mux1chn0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ i2c15mux1chn1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+
+ i2c15mux1chn2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ i2c15mux1chn3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+
+ i2c15mux1chn4: i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+ };
+
+ i2c15mux1chn5: i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+ };
+
+ i2c15mux1chn6: i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <6>;
+ };
+
+ i2c15mux1chn7: i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
+ };
+ };
+ };
+
+ i2c15mux0chn5: i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+ };
+
+ i2c15mux0chn6: i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <6>;
+
+ temperature-sensor@4c {
+ compatible = "ti,tmp423";
+ reg = <0x4c>;
+ };
+ };
+
+ i2c15mux0chn7: i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
+
+ regulator@40 {
+ compatible = "infineon,ir38060";
+ reg = <0x40>;
+ };
+
+ temperature-sensor@4c {
+ compatible = "ti,tmp423";
+ reg = <0x4c>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi
index 29f94696d8b1..7fb421153596 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi
+++ b/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi
@@ -867,22 +867,26 @@
};
fsim0: fsi@1e79b000 {
+ #interrupt-cells = <1>;
compatible = "aspeed,ast2600-fsi-master", "fsi-master";
reg = <0x1e79b000 0x94>;
interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_fsi1_default>;
clocks = <&syscon ASPEED_CLK_GATE_FSICLK>;
+ interrupt-controller;
status = "disabled";
};
fsim1: fsi@1e79b100 {
+ #interrupt-cells = <1>;
compatible = "aspeed,ast2600-fsi-master", "fsi-master";
reg = <0x1e79b100 0x94>;
interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_fsi2_default>;
clocks = <&syscon ASPEED_CLK_GATE_FSICLK>;
+ interrupt-controller;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/aspeed/ibm-power10-dual.dtsi b/arch/arm/boot/dts/aspeed/ibm-power10-dual.dtsi
index cc466910bb52..07ce3b2bc62a 100644
--- a/arch/arm/boot/dts/aspeed/ibm-power10-dual.dtsi
+++ b/arch/arm/boot/dts/aspeed/ibm-power10-dual.dtsi
@@ -165,10 +165,12 @@
};
fsi_hub0: hub@3400 {
+ #interrupt-cells = <1>;
compatible = "fsi-master-hub";
reg = <0x3400 0x400>;
#address-cells = <2>;
#size-cells = <0>;
+ interrupt-controller;
};
};
};
diff --git a/arch/arm/boot/dts/broadcom/Makefile b/arch/arm/boot/dts/broadcom/Makefile
index 7099d9560033..5881bcc95eba 100644
--- a/arch/arm/boot/dts/broadcom/Makefile
+++ b/arch/arm/boot/dts/broadcom/Makefile
@@ -64,6 +64,7 @@ dtb-$(CONFIG_ARCH_BCM_5301X) += \
bcm47081-luxul-xap-1410.dtb \
bcm47081-luxul-xwr-1200.dtb \
bcm47081-tplink-archer-c5-v2.dtb \
+ bcm4709-asus-rt-ac3200.dtb \
bcm4709-asus-rt-ac87u.dtb \
bcm4709-buffalo-wxr-1900dhp.dtb \
bcm4709-linksys-ea9200.dtb \
@@ -71,6 +72,7 @@ dtb-$(CONFIG_ARCH_BCM_5301X) += \
bcm4709-netgear-r8000.dtb \
bcm4709-tplink-archer-c9-v1.dtb \
bcm47094-asus-rt-ac3100.dtb \
+ bcm47094-asus-rt-ac5300.dtb \
bcm47094-asus-rt-ac88u.dtb \
bcm47094-dlink-dir-885l.dtb \
bcm47094-dlink-dir-890l.dtb \
diff --git a/arch/arm/boot/dts/broadcom/bcm2711-rpi-4-b.dts b/arch/arm/boot/dts/broadcom/bcm2711-rpi-4-b.dts
index d5f8823230db..353bb50ce542 100644
--- a/arch/arm/boot/dts/broadcom/bcm2711-rpi-4-b.dts
+++ b/arch/arm/boot/dts/broadcom/bcm2711-rpi-4-b.dts
@@ -5,6 +5,7 @@
#include "bcm283x-rpi-led-deprecated.dtsi"
#include "bcm283x-rpi-usb-peripheral.dtsi"
#include "bcm283x-rpi-wifi-bt.dtsi"
+#include <dt-bindings/leds/common.h>
/ {
compatible = "raspberrypi,4-model-b", "brcm,bcm2711";
@@ -15,6 +16,13 @@
stdout-path = "serial1:115200n8";
};
+ cam1_reg: regulator-cam1 {
+ compatible = "regulator-fixed";
+ regulator-name = "cam1-reg";
+ enable-active-high;
+ gpio = <&expgpio 5 GPIO_ACTIVE_HIGH>;
+ };
+
sd_io_1v8_reg: regulator-sd-io-1v8 {
compatible = "regulator-gpio";
regulator-name = "vdd-sd-io";
@@ -197,6 +205,27 @@
phy1: ethernet-phy@1 {
/* No PHY interrupt */
reg = <0x1>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* LED1 */
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+
+ /* LED2 */
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_AMBER>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/broadcom/bcm2711-rpi-400.dts b/arch/arm/boot/dts/broadcom/bcm2711-rpi-400.dts
index 5a2869a18bd5..ca9be91b4f36 100644
--- a/arch/arm/boot/dts/broadcom/bcm2711-rpi-400.dts
+++ b/arch/arm/boot/dts/broadcom/bcm2711-rpi-400.dts
@@ -30,6 +30,7 @@
&genet_mdio {
clock-frequency = <1950000>;
+ /delete-node/ leds;
};
&led_pwr {
diff --git a/arch/arm/boot/dts/broadcom/bcm2711-rpi-cm4-io.dts b/arch/arm/boot/dts/broadcom/bcm2711-rpi-cm4-io.dts
index d7ba02f586d3..6bc77dd48c0d 100644
--- a/arch/arm/boot/dts/broadcom/bcm2711-rpi-cm4-io.dts
+++ b/arch/arm/boot/dts/broadcom/bcm2711-rpi-cm4-io.dts
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
+#include <dt-bindings/leds/common.h>
#include "bcm2711-rpi-cm4.dtsi"
#include "bcm283x-rpi-led-deprecated.dtsi"
#include "bcm283x-rpi-usb-host.dtsi"
@@ -101,6 +102,38 @@
status = "okay";
};
+&i2c0_1 {
+ rtc@51 {
+ /* Attention: An alarm resets the machine */
+ compatible = "nxp,pcf85063a";
+ reg = <0x51>;
+ quartz-load-femtofarads = <7000>;
+ };
+};
+
+&phy1 {
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* LED2 */
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+
+ /* LED3 */
+ led@2 {
+ reg = <2>;
+ color = <LED_COLOR_ID_AMBER>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+ };
+};
+
&led_act {
gpios = <&gpio 42 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi b/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi
index d233a191c139..6bf4241fe3b7 100644
--- a/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi
+++ b/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi
@@ -17,14 +17,33 @@
pcie0 = &pcie0;
blconfig = &blconfig;
};
-};
-&firmware {
- firmware_clocks: clocks {
- compatible = "raspberrypi,firmware-clocks";
- #clock-cells = <1>;
+ i2c0mux: i2c-mux0 {
+ compatible = "i2c-mux-pinctrl";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c-parent = <&i2c0>;
+
+ pinctrl-names = "i2c0", "i2c0-vc";
+ pinctrl-0 = <&i2c0_gpio0>;
+ pinctrl-1 = <&i2c0_gpio44>;
+
+ i2c0_0: i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c0_1: i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
};
+};
+&firmware {
expgpio: gpio {
compatible = "raspberrypi,firmware-gpio";
gpio-controller;
@@ -54,6 +73,11 @@
clocks = <&firmware_clocks 4>;
};
+&i2c0 {
+ /delete-property/ pinctrl-names;
+ /delete-property/ pinctrl-0;
+};
+
&rmem {
/*
* RPi4's co-processor will copy the board's bootloader configuration
diff --git a/arch/arm/boot/dts/broadcom/bcm2711.dtsi b/arch/arm/boot/dts/broadcom/bcm2711.dtsi
index 22c7f1561344..e4e42af21ef3 100644
--- a/arch/arm/boot/dts/broadcom/bcm2711.dtsi
+++ b/arch/arm/boot/dts/broadcom/bcm2711.dtsi
@@ -432,8 +432,8 @@
};
};
- arm-pmu {
- compatible = "arm,cortex-a72-pmu", "arm,armv8-pmuv3";
+ pmu {
+ compatible = "arm,cortex-a72-pmu";
interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
@@ -1114,6 +1114,14 @@
#address-cells = <2>;
};
+&csi0 {
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&csi1 {
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+};
+
&cma {
/*
* arm64 reserves the CMA by default somewhere in ZONE_DMA32,
diff --git a/arch/arm/boot/dts/broadcom/bcm2835-rpi-common.dtsi b/arch/arm/boot/dts/broadcom/bcm2835-rpi-common.dtsi
index 4e7b4a592da7..8b3c21d9f333 100644
--- a/arch/arm/boot/dts/broadcom/bcm2835-rpi-common.dtsi
+++ b/arch/arm/boot/dts/broadcom/bcm2835-rpi-common.dtsi
@@ -7,13 +7,6 @@
#include <dt-bindings/power/raspberrypi-power.h>
-&firmware {
- firmware_clocks: clocks {
- compatible = "raspberrypi,firmware-clocks";
- #clock-cells = <1>;
- };
-};
-
&hdmi {
clocks = <&firmware_clocks 9>,
<&firmware_clocks 13>;
diff --git a/arch/arm/boot/dts/broadcom/bcm2835-rpi.dtsi b/arch/arm/boot/dts/broadcom/bcm2835-rpi.dtsi
index f0acc9390f31..e9bf41b9f5c1 100644
--- a/arch/arm/boot/dts/broadcom/bcm2835-rpi.dtsi
+++ b/arch/arm/boot/dts/broadcom/bcm2835-rpi.dtsi
@@ -4,11 +4,12 @@
soc {
firmware: firmware {
compatible = "raspberrypi,bcm2835-firmware", "simple-mfd";
- #address-cells = <1>;
- #size-cells = <1>;
-
mboxes = <&mailbox>;
- dma-ranges;
+
+ firmware_clocks: clocks {
+ compatible = "raspberrypi,firmware-clocks";
+ #clock-cells = <1>;
+ };
};
power: power {
@@ -25,6 +26,20 @@
};
};
+&csi0 {
+ clocks = <&clocks BCM2835_CLOCK_CAM0>,
+ <&firmware_clocks 4>;
+ clock-names = "lp", "vpu";
+ power-domains = <&power RPI_POWER_DOMAIN_UNICAM0>;
+};
+
+&csi1 {
+ clocks = <&clocks BCM2835_CLOCK_CAM1>,
+ <&firmware_clocks 4>;
+ clock-names = "lp", "vpu";
+ power-domains = <&power RPI_POWER_DOMAIN_UNICAM1>;
+};
+
&gpio {
gpioout: gpioout {
brcm,pins = <6>;
diff --git a/arch/arm/boot/dts/broadcom/bcm283x.dtsi b/arch/arm/boot/dts/broadcom/bcm283x.dtsi
index 2ca8a2505a4d..69b0919f1324 100644
--- a/arch/arm/boot/dts/broadcom/bcm283x.dtsi
+++ b/arch/arm/boot/dts/broadcom/bcm283x.dtsi
@@ -454,6 +454,30 @@
status = "disabled";
};
+ csi0: csi@7e800000 {
+ compatible = "brcm,bcm2835-unicam";
+ reg = <0x7e800000 0x800>,
+ <0x7e802000 0x4>;
+ reg-names = "unicam", "cmi";
+ interrupts = <2 6>;
+ brcm,num-data-lanes = <2>;
+ status = "disabled";
+ port {
+ };
+ };
+
+ csi1: csi@7e801000 {
+ compatible = "brcm,bcm2835-unicam";
+ reg = <0x7e801000 0x800>,
+ <0x7e802004 0x4>;
+ reg-names = "unicam", "cmi";
+ interrupts = <2 7>;
+ brcm,num-data-lanes = <4>;
+ status = "disabled";
+ port {
+ };
+ };
+
i2c1: i2c@7e804000 {
compatible = "brcm,bcm2835-i2c";
reg = <0x7e804000 0x1000>;
diff --git a/arch/arm/boot/dts/broadcom/bcm4709-asus-rt-ac3200.dts b/arch/arm/boot/dts/broadcom/bcm4709-asus-rt-ac3200.dts
new file mode 100644
index 000000000000..53cb0c58f6d0
--- /dev/null
+++ b/arch/arm/boot/dts/broadcom/bcm4709-asus-rt-ac3200.dts
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Author: Tom Brautaset <tbrautaset@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "bcm4709.dtsi"
+#include "bcm5301x-nand-cs0-bch8.dtsi"
+
+#include <dt-bindings/leds/common.h>
+
+/ {
+ compatible = "asus,rt-ac3200", "brcm,bcm4709", "brcm,bcm4708";
+ model = "ASUS RT-AC3200";
+
+ memory@0 {
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x08000000>;
+ device_type = "memory";
+ };
+
+ nvram@1c080000 {
+ compatible = "brcm,nvram";
+ reg = <0x1c080000 0x00180000>;
+
+ et0macaddr: et0macaddr {
+ #nvmem-cell-cells = <1>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ button-reset {
+ label = "Reset";
+ linux,code = <KEY_RESTART>;
+ gpios = <&chipcommon 11 GPIO_ACTIVE_LOW>;
+ };
+
+ button-wifi {
+ label = "Wi-Fi";
+ linux,code = <KEY_RFKILL>;
+ gpios = <&chipcommon 4 GPIO_ACTIVE_LOW>;
+ };
+
+ button-wps {
+ label = "WPS";
+ linux,code = <KEY_WPS_BUTTON>;
+ gpios = <&chipcommon 7 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-power {
+ color = <LED_COLOR_ID_WHITE>;
+ function = LED_FUNCTION_POWER;
+ gpios = <&chipcommon 3 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "default-on";
+ };
+
+ led-wan-red {
+ color = <LED_COLOR_ID_RED>;
+ function = LED_FUNCTION_WAN;
+ gpios = <&chipcommon 5 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-wps {
+ color = <LED_COLOR_ID_WHITE>;
+ function = LED_FUNCTION_WPS;
+ gpios = <&chipcommon 14 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&gmac0 {
+ nvmem-cells = <&et0macaddr 0>;
+ nvmem-cell-names = "mac-address";
+};
+
+&gmac1 {
+ nvmem-cells = <&et0macaddr 1>;
+ nvmem-cell-names = "mac-address";
+};
+
+&gmac2 {
+ nvmem-cells = <&et0macaddr 2>;
+ nvmem-cell-names = "mac-address";
+};
+
+&nandcs {
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ reg = <0x00000000 0x00080000>;
+ label = "boot";
+ read-only;
+ };
+
+ partition@80000 {
+ reg = <0x00080000 0x00180000>;
+ label = "nvram";
+ };
+
+ partition@200000 {
+ compatible = "brcm,trx";
+ reg = <0x00200000 0x07e00000>;
+ label = "firmware";
+ };
+ };
+};
+
+&srab {
+ status = "okay";
+
+ ports {
+ port@0 {
+ label = "wan";
+ };
+
+ port@1 {
+ label = "lan1";
+ };
+
+ port@2 {
+ label = "lan2";
+ };
+
+ port@3 {
+ label = "lan3";
+ };
+
+ port@4 {
+ label = "lan4";
+ };
+ };
+};
+
+&usb2 {
+ vcc-gpio = <&chipcommon 9 GPIO_ACTIVE_HIGH>;
+};
+
+&usb3_phy {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac3100.dts b/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac3100.dts
index 5f089307cd8c..1655ac95769c 100644
--- a/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac3100.dts
+++ b/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac3100.dts
@@ -13,11 +13,22 @@
nvram@1c080000 {
et0macaddr: et0macaddr {
+ #nvmem-cell-cells = <1>;
};
};
};
&gmac0 {
- nvmem-cells = <&et0macaddr>;
+ nvmem-cells = <&et0macaddr 0>;
+ nvmem-cell-names = "mac-address";
+};
+
+&gmac1 {
+ nvmem-cells = <&et0macaddr 1>;
+ nvmem-cell-names = "mac-address";
+};
+
+&gmac2 {
+ nvmem-cells = <&et0macaddr 2>;
nvmem-cell-names = "mac-address";
};
diff --git a/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac3100.dtsi b/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac3100.dtsi
index 09cefce27fb1..2cfaaabc7a6a 100644
--- a/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac3100.dtsi
+++ b/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac3100.dtsi
@@ -6,15 +6,13 @@
#include "bcm47094.dtsi"
#include "bcm5301x-nand-cs0-bch8.dtsi"
-/ {
- chosen {
- bootargs = "earlycon";
- };
+#include <dt-bindings/leds/common.h>
+/ {
memory@0 {
- device_type = "memory";
reg = <0x00000000 0x08000000>,
<0x88000000 0x18000000>;
+ device_type = "memory";
};
nvram@1c080000 {
@@ -22,76 +20,108 @@
reg = <0x1c080000 0x00180000>;
};
- leds {
- compatible = "gpio-leds";
+ gpio-keys {
+ compatible = "gpio-keys";
- led-power {
- label = "white:power";
- gpios = <&chipcommon 3 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-on";
+ button-led {
+ label = "Backlight";
+ linux,code = <KEY_BRIGHTNESS_ZERO>;
+ gpios = <&chipcommon 4 GPIO_ACTIVE_LOW>;
};
- led-wan-red {
- label = "red:wan";
- gpios = <&chipcommon 5 GPIO_ACTIVE_HIGH>;
+ button-reset {
+ label = "Reset";
+ linux,code = <KEY_RESTART>;
+ gpios = <&chipcommon 11 GPIO_ACTIVE_LOW>;
+ };
+
+ button-wifi {
+ label = "Wi-Fi";
+ linux,code = <KEY_RFKILL>;
+ gpios = <&chipcommon 18 GPIO_ACTIVE_LOW>;
};
+ button-wps {
+ label = "WPS";
+ linux,code = <KEY_WPS_BUTTON>;
+ gpios = <&chipcommon 20 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
led-lan {
- label = "white:lan";
+ color = <LED_COLOR_ID_WHITE>;
+ function = LED_FUNCTION_LAN;
gpios = <&chipcommon 21 GPIO_ACTIVE_LOW>;
};
+ led-power {
+ color = <LED_COLOR_ID_WHITE>;
+ function = LED_FUNCTION_POWER;
+ gpios = <&chipcommon 3 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "default-on";
+ };
+
led-usb2 {
- label = "white:usb2";
+ color = <LED_COLOR_ID_WHITE>;
+ function = LED_FUNCTION_USB;
+ function-enumerator = <1>;
gpios = <&chipcommon 16 GPIO_ACTIVE_LOW>;
trigger-sources = <&ehci_port2>;
linux,default-trigger = "usbport";
};
led-usb3 {
- label = "white:usb3";
+ color = <LED_COLOR_ID_WHITE>;
+ function = LED_FUNCTION_USB;
+ function-enumerator = <2>;
gpios = <&chipcommon 17 GPIO_ACTIVE_LOW>;
trigger-sources = <&ehci_port1>, <&xhci_port1>;
linux,default-trigger = "usbport";
};
+ led-wan-red {
+ color = <LED_COLOR_ID_RED>;
+ function = LED_FUNCTION_WAN;
+ gpios = <&chipcommon 5 GPIO_ACTIVE_HIGH>;
+ };
+
led-wps {
- label = "white:wps";
+ color = <LED_COLOR_ID_WHITE>;
+ function = LED_FUNCTION_WPS;
gpios = <&chipcommon 19 GPIO_ACTIVE_LOW>;
};
};
+};
- gpio-keys {
- compatible = "gpio-keys";
-
- button-wps {
- label = "WPS";
- linux,code = <KEY_WPS_BUTTON>;
- gpios = <&chipcommon 20 GPIO_ACTIVE_LOW>;
- };
+&nandcs {
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
- button-reset {
- label = "Reset";
- linux,code = <KEY_RESTART>;
- gpios = <&chipcommon 11 GPIO_ACTIVE_LOW>;
+ partition@0 {
+ reg = <0x00000000 0x00080000>;
+ label = "boot";
+ read-only;
};
- button-wifi {
- label = "Wi-Fi";
- linux,code = <KEY_RFKILL>;
- gpios = <&chipcommon 18 GPIO_ACTIVE_LOW>;
+ partition@80000 {
+ reg = <0x00080000 0x00180000>;
+ label = "nvram";
};
- button-led {
- label = "Backlight";
- linux,code = <KEY_BRIGHTNESS_ZERO>;
- gpios = <&chipcommon 4 GPIO_ACTIVE_LOW>;
+ partition@200000 {
+ compatible = "brcm,trx";
+ reg = <0x00200000 0x07e00000>;
+ label = "firmware";
};
};
};
&srab {
- compatible = "brcm,bcm53012-srab", "brcm,bcm5301x-srab";
status = "okay";
ports {
@@ -136,28 +166,3 @@
&usb3_phy {
status = "okay";
};
-
-&nandcs {
- partitions {
- compatible = "fixed-partitions";
- #address-cells = <1>;
- #size-cells = <1>;
-
- partition@0 {
- label = "boot";
- reg = <0x00000000 0x00080000>;
- read-only;
- };
-
- partition@80000 {
- label = "nvram";
- reg = <0x00080000 0x00180000>;
- };
-
- partition@200000 {
- label = "firmware";
- reg = <0x00200000 0x07e00000>;
- compatible = "brcm,trx";
- };
- };
-};
diff --git a/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac5300.dts b/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac5300.dts
new file mode 100644
index 000000000000..6c666dc7ad23
--- /dev/null
+++ b/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac5300.dts
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Author: Tom Brautaset <tbrautaset@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "bcm47094.dtsi"
+#include "bcm5301x-nand-cs0-bch8.dtsi"
+
+#include <dt-bindings/leds/common.h>
+
+/ {
+ compatible = "asus,rt-ac5300", "brcm,bcm47094", "brcm,bcm4708";
+ model = "ASUS RT-AC5300";
+
+ memory@0 {
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x18000000>;
+ device_type = "memory";
+ };
+
+ nvram@1c080000 {
+ compatible = "brcm,nvram";
+ reg = <0x1c080000 0x00180000>;
+
+ et1macaddr: et1macaddr {
+ #nvmem-cell-cells = <1>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ button-reset {
+ label = "Reset";
+ linux,code = <KEY_RESTART>;
+ gpios = <&chipcommon 11 GPIO_ACTIVE_LOW>;
+ };
+
+ button-wifi {
+ label = "Wi-Fi";
+ linux,code = <KEY_RFKILL>;
+ gpios = <&chipcommon 20 GPIO_ACTIVE_LOW>;
+ };
+
+ button-wps {
+ label = "WPS";
+ linux,code = <KEY_WPS_BUTTON>;
+ gpios = <&chipcommon 18 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-lan {
+ color = <LED_COLOR_ID_WHITE>;
+ function = LED_FUNCTION_LAN;
+ gpios = <&chipcommon 21 GPIO_ACTIVE_LOW>;
+ };
+
+ led-power {
+ color = <LED_COLOR_ID_WHITE>;
+ function = LED_FUNCTION_POWER;
+ gpios = <&chipcommon 3 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "default-on";
+ };
+
+ led-wan-red {
+ color = <LED_COLOR_ID_RED>;
+ function = LED_FUNCTION_WAN;
+ gpios = <&chipcommon 5 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-wps {
+ color = <LED_COLOR_ID_WHITE>;
+ function = LED_FUNCTION_WPS;
+ gpios = <&chipcommon 19 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&gmac0 {
+ nvmem-cells = <&et1macaddr 0>;
+ nvmem-cell-names = "mac-address";
+};
+
+&gmac1 {
+ nvmem-cells = <&et1macaddr 1>;
+ nvmem-cell-names = "mac-address";
+};
+
+&gmac2 {
+ nvmem-cells = <&et1macaddr 2>;
+ nvmem-cell-names = "mac-address";
+};
+
+&nandcs {
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ reg = <0x00000000 0x00080000>;
+ label = "boot";
+ read-only;
+ };
+
+ partition@80000 {
+ reg = <0x00080000 0x00180000>;
+ label = "nvram";
+ };
+
+ partition@200000 {
+ compatible = "brcm,trx";
+ reg = <0x00200000 0x07e00000>;
+ label = "firmware";
+ };
+ };
+};
+
+&srab {
+ status = "okay";
+
+ ports {
+ port@0 {
+ label = "lan4";
+ };
+
+ port@1 {
+ label = "lan3";
+ };
+
+ port@2 {
+ label = "lan2";
+ };
+
+ port@3 {
+ label = "lan1";
+ };
+
+ port@4 {
+ label = "wan";
+ };
+ };
+};
+
+&usb2 {
+ vcc-gpio = <&chipcommon 9 GPIO_ACTIVE_HIGH>;
+};
+
+&usb3_phy {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac88u.dts b/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac88u.dts
index fd344b55087e..a197f447fd97 100644
--- a/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac88u.dts
+++ b/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac88u.dts
@@ -13,18 +13,40 @@
nvram@1c080000 {
et1macaddr: et1macaddr {
+ #nvmem-cell-cells = <1>;
};
};
switch {
compatible = "realtek,rtl8365mb";
- /* 7 = MDIO (has input reads), 6 = MDC (clock, output only) */
mdc-gpios = <&chipcommon 6 GPIO_ACTIVE_HIGH>;
mdio-gpios = <&chipcommon 7 GPIO_ACTIVE_HIGH>;
reset-gpios = <&chipcommon 10 GPIO_ACTIVE_LOW>;
realtek,disable-leds;
dsa,member = <1 0>;
+ mdio {
+ compatible = "realtek,smi-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+
+ ethphy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+
+ ethphy2: ethernet-phy@2 {
+ reg = <2>;
+ };
+
+ ethphy3: ethernet-phy@3 {
+ reg = <3>;
+ };
+ };
+
ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -68,29 +90,21 @@
};
};
};
+ };
+};
- mdio {
- compatible = "realtek,smi-mdio";
- #address-cells = <1>;
- #size-cells = <0>;
-
- ethphy0: ethernet-phy@0 {
- reg = <0>;
- };
-
- ethphy1: ethernet-phy@1 {
- reg = <1>;
- };
+&gmac0 {
+ status = "disabled";
+};
- ethphy2: ethernet-phy@2 {
- reg = <2>;
- };
+&gmac1 {
+ nvmem-cells = <&et1macaddr 0>;
+ nvmem-cell-names = "mac-address";
+};
- ethphy3: ethernet-phy@3 {
- reg = <3>;
- };
- };
- };
+&gmac2 {
+ nvmem-cells = <&et1macaddr 1>;
+ nvmem-cell-names = "mac-address";
};
&srab {
@@ -111,12 +125,3 @@
};
};
};
-
-&gmac0 {
- status = "disabled";
-};
-
-&gmac1 {
- nvmem-cells = <&et1macaddr>;
- nvmem-cell-names = "mac-address";
-};
diff --git a/arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts b/arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts
index 4f609e9e510e..009d2c832421 100644
--- a/arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts
+++ b/arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts
@@ -242,7 +242,7 @@
regulator-state-standby {
regulator-on-in-suspend;
- regulator-suspend-voltage = <1150000>;
+ regulator-suspend-microvolt = <1150000>;
regulator-mode = <4>;
};
@@ -263,7 +263,7 @@
regulator-state-standby {
regulator-on-in-suspend;
- regulator-suspend-voltage = <1050000>;
+ regulator-suspend-microvolt = <1050000>;
regulator-mode = <4>;
};
@@ -280,7 +280,7 @@
regulator-always-on;
regulator-state-standby {
- regulator-suspend-voltage = <1800000>;
+ regulator-suspend-microvolt = <1800000>;
regulator-on-in-suspend;
};
@@ -296,7 +296,7 @@
regulator-always-on;
regulator-state-standby {
- regulator-suspend-voltage = <3300000>;
+ regulator-suspend-microvolt = <3300000>;
regulator-on-in-suspend;
};
diff --git a/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts b/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts
index 217e9b96c61e..20b2497657ae 100644
--- a/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts
+++ b/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts
@@ -293,7 +293,7 @@
regulator-state-standby {
regulator-on-in-suspend;
- regulator-suspend-voltage = <1150000>;
+ regulator-suspend-microvolt = <1150000>;
regulator-mode = <4>;
};
@@ -314,7 +314,7 @@
regulator-state-standby {
regulator-on-in-suspend;
- regulator-suspend-voltage = <1050000>;
+ regulator-suspend-microvolt = <1050000>;
regulator-mode = <4>;
};
@@ -331,7 +331,7 @@
regulator-always-on;
regulator-state-standby {
- regulator-suspend-voltage = <1800000>;
+ regulator-suspend-microvolt = <1800000>;
regulator-on-in-suspend;
};
@@ -346,7 +346,7 @@
regulator-max-microvolt = <3700000>;
regulator-state-standby {
- regulator-suspend-voltage = <1800000>;
+ regulator-suspend-microvolt = <1800000>;
regulator-on-in-suspend;
};
diff --git a/arch/arm/boot/dts/nvidia/tegra20-colibri.dtsi b/arch/arm/boot/dts/nvidia/tegra20-colibri.dtsi
index 8c1d5c9fa483..2ff7be8f1382 100644
--- a/arch/arm/boot/dts/nvidia/tegra20-colibri.dtsi
+++ b/arch/arm/boot/dts/nvidia/tegra20-colibri.dtsi
@@ -445,9 +445,9 @@
tegra_ac97: ac97@70002000 {
status = "okay";
- nvidia,codec-reset-gpio =
+ nvidia,codec-reset-gpios =
<&gpio TEGRA_GPIO(V, 0) GPIO_ACTIVE_LOW>;
- nvidia,codec-sync-gpio =
+ nvidia,codec-sync-gpios =
<&gpio TEGRA_GPIO(P, 0) GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/nvidia/tegra20-paz00.dts b/arch/arm/boot/dts/nvidia/tegra20-paz00.dts
index afb922bd79a7..1408e1e00759 100644
--- a/arch/arm/boot/dts/nvidia/tegra20-paz00.dts
+++ b/arch/arm/boot/dts/nvidia/tegra20-paz00.dts
@@ -533,6 +533,49 @@
0x00000000 0x00000000 0x00000000 0x00000000>;
};
};
+
+ emc-tables@1 {
+ nvidia,ram-code = <0x1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ emc-table@166500 {
+ reg = <166500>;
+ compatible = "nvidia,tegra20-emc-table";
+ clock-frequency = <166500>;
+ nvidia,emc-registers = <0x0000000a 0x00000016
+ 0x00000008 0x00000003 0x00000004 0x00000004
+ 0x00000002 0x0000000c 0x00000003 0x00000003
+ 0x00000002 0x00000001 0x00000004 0x00000005
+ 0x00000004 0x00000009 0x0000000d 0x000004df
+ 0x00000000 0x00000003 0x00000003 0x00000003
+ 0x00000003 0x00000001 0x0000000a 0x000000c8
+ 0x00000003 0x00000006 0x00000004 0x00000008
+ 0x00000002 0x00000000 0x00000000 0x00000002
+ 0x00000000 0x00000000 0x00000083 0xe03b0323
+ 0x007fe010 0x00001414 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000>;
+ };
+
+ emc-table@333000 {
+ reg = <333000>;
+ compatible = "nvidia,tegra20-emc-table";
+ clock-frequency = <333000>;
+ nvidia,emc-registers = <0x00000018 0x00000033
+ 0x00000012 0x00000004 0x00000004 0x00000005
+ 0x00000003 0x0000000c 0x00000006 0x00000006
+ 0x00000003 0x00000001 0x00000004 0x00000005
+ 0x00000004 0x00000009 0x0000000d 0x00000bff
+ 0x00000000 0x00000003 0x00000003 0x00000006
+ 0x00000006 0x00000001 0x00000011 0x000000c8
+ 0x00000003 0x0000000e 0x00000007 0x00000008
+ 0x00000002 0x00000000 0x00000000 0x00000002
+ 0x00000000 0x00000000 0x00000083 0xf0440303
+ 0x007fe010 0x00001414 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000>;
+ };
+ };
};
usb@c5000000 {
diff --git a/arch/arm/boot/dts/nxp/imx/Makefile b/arch/arm/boot/dts/nxp/imx/Makefile
index 4052cad859fa..231c0d73a53e 100644
--- a/arch/arm/boot/dts/nxp/imx/Makefile
+++ b/arch/arm/boot/dts/nxp/imx/Makefile
@@ -349,12 +349,15 @@ dtb-$(CONFIG_SOC_IMX6UL) += \
imx6ull-phytec-segin-lc-rdk-nand.dtb \
imx6ull-phytec-tauri-emmc.dtb \
imx6ull-phytec-tauri-nand.dtb \
+ imx6ull-seeed-npi-dev-board-emmc.dtb \
+ imx6ull-seeed-npi-dev-board-nand.dtb \
imx6ull-tarragon-master.dtb \
imx6ull-tarragon-micro.dtb \
imx6ull-tarragon-slave.dtb \
imx6ull-tarragon-slavext.dtb \
imx6ull-tqma6ull2-mba6ulx.dtb \
imx6ull-tqma6ull2l-mba6ulx.dtb \
+ imx6ull-uti260b.dtb \
imx6ulz-14x14-evk.dtb \
imx6ulz-bsh-smm-m2.dtb
dtb-$(CONFIG_SOC_IMX7D) += \
diff --git a/arch/arm/boot/dts/nxp/imx/e60k02.dtsi b/arch/arm/boot/dts/nxp/imx/e60k02.dtsi
index dd03e3860f97..13756d39fb7b 100644
--- a/arch/arm/boot/dts/nxp/imx/e60k02.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/e60k02.dtsi
@@ -127,7 +127,7 @@
compatible = "ricoh,rc5t619";
reg = <0x32>;
interrupt-parent = <&gpio5>;
- interrupts = <11 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
system-power-controller;
regulators {
diff --git a/arch/arm/boot/dts/nxp/imx/e70k02.dtsi b/arch/arm/boot/dts/nxp/imx/e70k02.dtsi
index 4e1bf080eaca..dcc3c9d488a8 100644
--- a/arch/arm/boot/dts/nxp/imx/e70k02.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/e70k02.dtsi
@@ -145,7 +145,7 @@
compatible = "ricoh,rc5t619";
reg = <0x32>;
interrupt-parent = <&gpio4>;
- interrupts = <19 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
system-power-controller;
regulators {
diff --git a/arch/arm/boot/dts/nxp/imx/imx27-phytec-phycard-s-som.dtsi b/arch/arm/boot/dts/nxp/imx/imx27-phytec-phycard-s-som.dtsi
index abc9233c5a1b..31b3fc972abb 100644
--- a/arch/arm/boot/dts/nxp/imx/imx27-phytec-phycard-s-som.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx27-phytec-phycard-s-som.dtsi
@@ -15,6 +15,22 @@
device_type = "memory";
reg = <0xa0000000 0x08000000>; /* 128MB */
};
+
+ usbotgphy: usbotgphy {
+ compatible = "usb-nop-xceiv";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotgphy>;
+ reset-gpios = <&gpio2 25 GPIO_ACTIVE_LOW>;
+ #phy-cells = <0>;
+ };
+
+ usbh2phy: usbh2phy {
+ compatible = "usb-nop-xceiv";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbh2phy>;
+ reset-gpios = <&gpio2 22 GPIO_ACTIVE_LOW>;
+ #phy-cells = <0>;
+ };
};
&cspi1 {
@@ -84,6 +100,52 @@
MX27_PAD_NFWE_B__NFWE_B 0x0
>;
};
+
+ pinctrl_usbotgphy: usbotgphygrp {
+ fsl,pins = <
+ MX27_PAD_USBH1_RCV__GPIO2_25 0x1 /* reset gpio */
+ >;
+ };
+
+ pinctrl_usbotg: usbotggrp {
+ fsl,pins = <
+ MX27_PAD_USBOTG_CLK__USBOTG_CLK 0x0
+ MX27_PAD_USBOTG_DIR__USBOTG_DIR 0x0
+ MX27_PAD_USBOTG_NXT__USBOTG_NXT 0x0
+ MX27_PAD_USBOTG_STP__USBOTG_STP 0x0
+ MX27_PAD_USBOTG_DATA0__USBOTG_DATA0 0x0
+ MX27_PAD_USBOTG_DATA1__USBOTG_DATA1 0x0
+ MX27_PAD_USBOTG_DATA2__USBOTG_DATA2 0x0
+ MX27_PAD_USBOTG_DATA3__USBOTG_DATA3 0x0
+ MX27_PAD_USBOTG_DATA4__USBOTG_DATA4 0x0
+ MX27_PAD_USBOTG_DATA5__USBOTG_DATA5 0x0
+ MX27_PAD_USBOTG_DATA6__USBOTG_DATA6 0x0
+ MX27_PAD_USBOTG_DATA7__USBOTG_DATA7 0x0
+ >;
+ };
+
+ pinctrl_usbh2phy: usbh2phygrp {
+ fsl,pins = <
+ MX27_PAD_USBH1_SUSP__GPIO2_22 0x0 /* reset gpio */
+ >;
+ };
+
+ pinctrl_usbh2: usbh2grp {
+ fsl,pins = <
+ MX27_PAD_USBH2_CLK__USBH2_CLK 0x0
+ MX27_PAD_USBH2_DIR__USBH2_DIR 0x0
+ MX27_PAD_USBH2_NXT__USBH2_NXT 0x0
+ MX27_PAD_USBH2_STP__USBH2_STP 0x0
+ MX27_PAD_CSPI2_SCLK__USBH2_DATA0 0x0
+ MX27_PAD_CSPI2_MOSI__USBH2_DATA1 0x0
+ MX27_PAD_CSPI2_MISO__USBH2_DATA2 0x0
+ MX27_PAD_CSPI2_SS1__USBH2_DATA3 0x0
+ MX27_PAD_CSPI2_SS2__USBH2_DATA4 0x0
+ MX27_PAD_CSPI1_SS2__USBH2_DATA5 0x0
+ MX27_PAD_CSPI2_SS0__USBH2_DATA6 0x0
+ MX27_PAD_USBH2_DATA7__USBH2_DATA7 0x0
+ >;
+ };
};
};
@@ -95,3 +157,19 @@
nand-on-flash-bbt;
status = "okay";
};
+
+&usbotg {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg>;
+ phy_type = "ulpi";
+ phys = <&usbotgphy>;
+ status = "okay";
+};
+
+&usbh2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbh2>;
+ phy_type = "ulpi";
+ phys = <&usbh2phy>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/nxp/imx/imx51-ts4800.dts b/arch/arm/boot/dts/nxp/imx/imx51-ts4800.dts
index f7408722d68a..2bd0761c7e90 100644
--- a/arch/arm/boot/dts/nxp/imx/imx51-ts4800.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx51-ts4800.dts
@@ -45,7 +45,7 @@
backlight: backlight {
compatible = "pwm-backlight";
- pwms = <&pwm1 0 78770>;
+ pwms = <&pwm1 0 78770 0>;
brightness-levels = <0 150 200 255>;
default-brightness-level = <1>;
power-supply = <&backlight_reg>;
@@ -113,7 +113,6 @@
};
&pwm1 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm_backlight>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx53-kp-ddc.dts b/arch/arm/boot/dts/nxp/imx/imx53-kp-ddc.dts
index 0e7f071fd10e..f6f116366643 100644
--- a/arch/arm/boot/dts/nxp/imx/imx53-kp-ddc.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx53-kp-ddc.dts
@@ -13,7 +13,7 @@
backlight_lcd: backlight {
compatible = "pwm-backlight";
- pwms = <&pwm2 0 50000>;
+ pwms = <&pwm2 0 50000 0>;
power-supply = <&reg_backlight>;
brightness-levels = <0 24 28 32 36
40 44 48 52 56
diff --git a/arch/arm/boot/dts/nxp/imx/imx53-kp.dtsi b/arch/arm/boot/dts/nxp/imx/imx53-kp.dtsi
index 4508f34139a0..ae5f87b8612d 100644
--- a/arch/arm/boot/dts/nxp/imx/imx53-kp.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx53-kp.dtsi
@@ -13,7 +13,7 @@
compatible = "pwm-beeper";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_buzzer>;
- pwms = <&pwm1 0 500000>;
+ pwms = <&pwm1 0 500000 0>;
};
gpio-buttons {
@@ -162,14 +162,6 @@
>;
};
-&pwm1 {
- #pwm-cells = <2>;
-};
-
-&pwm2 {
- #pwm-cells = <2>;
-};
-
&uart1 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx53-m53evk.dts b/arch/arm/boot/dts/nxp/imx/imx53-m53evk.dts
index c323b4dbe9f0..1353d985969c 100644
--- a/arch/arm/boot/dts/nxp/imx/imx53-m53evk.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx53-m53evk.dts
@@ -41,7 +41,7 @@
backlight {
compatible = "pwm-backlight";
- pwms = <&pwm1 0 3000>;
+ pwms = <&pwm1 0 3000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <6>;
power-supply = <&reg_backlight>;
@@ -313,7 +313,6 @@
};
&pwm1 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx53-mba53.dts b/arch/arm/boot/dts/nxp/imx/imx53-mba53.dts
index 6a37616cef1c..2117de872703 100644
--- a/arch/arm/boot/dts/nxp/imx/imx53-mba53.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx53-mba53.dts
@@ -17,7 +17,7 @@
backlight {
compatible = "pwm-backlight";
- pwms = <&pwm2 0 50000>;
+ pwms = <&pwm2 0 50000 0>;
brightness-levels = <0 24 28 32 36 40 44 48 52 56 60 64 68 72 76 80 84 88 92 96 100>;
default-brightness-level = <10>;
enable-gpios = <&gpio7 7 0>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx53-ppd.dts b/arch/arm/boot/dts/nxp/imx/imx53-ppd.dts
index 70c4a4852256..e939acc1c88b 100644
--- a/arch/arm/boot/dts/nxp/imx/imx53-ppd.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx53-ppd.dts
@@ -167,7 +167,7 @@
pwm_bl: backlight {
compatible = "pwm-backlight";
- pwms = <&pwm2 0 50000>;
+ pwms = <&pwm2 0 50000 0>;
brightness-levels = <0 2 5 7 10 12 15 17 20 22 25 28 30 33 35
38 40 43 45 48 51 53 56 58 61 63 66 68 71
73 76 79 81 84 86 89 91 94 96 99 102 104
@@ -187,7 +187,7 @@
led-1 {
label = "alarm-brightness";
- pwms = <&pwm1 0 100000>;
+ pwms = <&pwm1 0 100000 0>;
max-brightness = <255>;
};
};
@@ -628,14 +628,12 @@
};
&pwm1 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
status = "okay";
};
&pwm2 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm2>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx53-tqma53.dtsi b/arch/arm/boot/dts/nxp/imx/imx53-tqma53.dtsi
index 294811bfc8d2..b2d7271d1d24 100644
--- a/arch/arm/boot/dts/nxp/imx/imx53-tqma53.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx53-tqma53.dtsi
@@ -202,14 +202,6 @@
};
};
-&pwm1 {
- #pwm-cells = <2>;
-};
-
-&pwm2 {
- #pwm-cells = <2>;
-};
-
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6dl-aristainetos_4.dts b/arch/arm/boot/dts/nxp/imx/imx6dl-aristainetos_4.dts
index cc861a43eb58..a5ac79346854 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6dl-aristainetos_4.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6dl-aristainetos_4.dts
@@ -14,7 +14,7 @@
backlight {
compatible = "pwm-backlight";
- pwms = <&pwm1 0 5000000>;
+ pwms = <&pwm1 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <7>;
enable-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
@@ -79,6 +79,5 @@
};
&pwm1 {
- #pwm-cells = <2>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6dl-aristainetos_7.dts b/arch/arm/boot/dts/nxp/imx/imx6dl-aristainetos_7.dts
index b6cb78870cd5..5a25bdbbeb68 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6dl-aristainetos_7.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6dl-aristainetos_7.dts
@@ -49,7 +49,7 @@
backlight {
compatible = "pwm-backlight";
- pwms = <&pwm3 0 3000>;
+ pwms = <&pwm3 0 3000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <6>;
pinctrl-names = "default";
@@ -69,6 +69,5 @@
};
&pwm3 {
- #pwm-cells = <2>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6dl-mamoj.dts b/arch/arm/boot/dts/nxp/imx/imx6dl-mamoj.dts
index 028951955bde..72ee236d2f5e 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6dl-mamoj.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6dl-mamoj.dts
@@ -21,7 +21,7 @@
backlight_lcd: backlight-lcd {
compatible = "pwm-backlight";
- pwms = <&pwm3 0 25000>; /* 25000ns -> 40kHz */
+ pwms = <&pwm3 0 25000 0>; /* 25000ns -> 40kHz */
brightness-levels = <0 4 8 16 32 64 128 160 192 224 255>;
default-brightness-level = <7>;
};
@@ -303,7 +303,6 @@
};
&pwm3 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm3>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-ba16.dtsi b/arch/arm/boot/dts/nxp/imx/imx6q-ba16.dtsi
index f266f1b7e0cf..09d9ca0cb332 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6q-ba16.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6q-ba16.dtsi
@@ -55,7 +55,7 @@
compatible = "pwm-backlight";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_display>;
- pwms = <&pwm1 0 5000000>;
+ pwms = <&pwm1 0 5000000 0>;
brightness-levels = < 0 1 2 3 4 5 6 7 8 9
10 11 12 13 14 15 16 17 18 19
20 21 22 23 24 25 26 27 28 29
@@ -349,7 +349,6 @@
};
&pwm1 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-bosch-acc.dts b/arch/arm/boot/dts/nxp/imx/imx6q-bosch-acc.dts
index 02648806c275..d3f14b4d3b51 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6q-bosch-acc.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6q-bosch-acc.dts
@@ -36,7 +36,7 @@
backlight_lvds: backlight-lvds {
compatible = "pwm-backlight";
- pwms = <&pwm1 0 200000>;
+ pwms = <&pwm1 0 200000 0>;
brightness-levels = <0 61 499 1706 4079 8022 13938 22237 33328 47623 65535>;
num-interpolated-steps = <10>;
default-brightness-level = <60>;
@@ -117,14 +117,14 @@
color = <LED_COLOR_ID_RED>;
max-brightness = <248>;
default-state = "off";
- pwms = <&pwm2 0 500000>;
+ pwms = <&pwm2 0 500000 0>;
};
led_white: led-1 {
color = <LED_COLOR_ID_WHITE>;
max-brightness = <248>;
default-state = "off";
- pwms = <&pwm3 0 500000>;
+ pwms = <&pwm3 0 500000 0>;
linux,default-trigger = "heartbeat";
};
};
@@ -484,28 +484,24 @@
};
&pwm1 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
status = "okay";
};
&pwm2 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm2>;
status = "okay";
};
&pwm3 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm3>;
status = "okay";
};
&pwm4 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm4>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-kp.dtsi b/arch/arm/boot/dts/nxp/imx/imx6q-kp.dtsi
index 091903f53a56..c425d427663d 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6q-kp.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6q-kp.dtsi
@@ -15,7 +15,7 @@
/ {
backlight_lcd: backlight-lcd {
compatible = "pwm-backlight";
- pwms = <&pwm1 0 5000000>;
+ pwms = <&pwm1 0 5000000 0>;
brightness-levels = <0 255>;
num-interpolated-steps = <255>;
default-brightness-level = <250>;
@@ -23,7 +23,7 @@
beeper {
compatible = "pwm-beeper";
- pwms = <&pwm2 0 500000>;
+ pwms = <&pwm2 0 500000 0>;
};
lcd_display: display {
@@ -378,14 +378,12 @@
};
&pwm1 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
status = "okay";
};
&pwm2 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm2>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-novena.dts b/arch/arm/boot/dts/nxp/imx/imx6q-novena.dts
index a7d5a68110fc..d392b5bd2eea 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6q-novena.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6q-novena.dts
@@ -67,7 +67,7 @@
backlight: backlight {
compatible = "pwm-backlight";
- pwms = <&pwm1 0 10000000>;
+ pwms = <&pwm1 0 10000000 0>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_backlight_novena>;
power-supply = <&reg_lvds_lcd>;
@@ -465,7 +465,6 @@
};
&pwm1 {
- #pwm-cells = <2>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-pistachio.dts b/arch/arm/boot/dts/nxp/imx/imx6q-pistachio.dts
index 46c6b96d8073..56b77cc0af2b 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6q-pistachio.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6q-pistachio.dts
@@ -124,7 +124,7 @@
backlight_lvds: backlight-lvds {
compatible = "pwm-backlight";
- pwms = <&pwm1 0 50000>;
+ pwms = <&pwm1 0 50000 0>;
brightness-levels = <
0 /*1 2 3 4 5 6*/ 7 8 9
10 11 12 13 14 15 16 17 18 19
@@ -571,7 +571,6 @@
};
&pwm1 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-prti6q.dts b/arch/arm/boot/dts/nxp/imx/imx6q-prti6q.dts
index 3508a2cd928a..a7d5693c5ab7 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6q-prti6q.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6q-prti6q.dts
@@ -22,7 +22,7 @@
compatible = "pwm-backlight";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_backlight>;
- pwms = <&pwm1 0 5000000>;
+ pwms = <&pwm1 0 5000000 0>;
brightness-levels = <0 16 64 255>;
num-interpolated-steps = <16>;
default-brightness-level = <1>;
@@ -292,7 +292,6 @@
};
&pwm1 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-var-dt6customboard.dts b/arch/arm/boot/dts/nxp/imx/imx6q-var-dt6customboard.dts
index 2290c1237634..0225a621ec7a 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6q-var-dt6customboard.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6q-var-dt6customboard.dts
@@ -18,7 +18,7 @@
backlight_lvds: backlight {
compatible = "pwm-backlight";
- pwms = <&pwm2 0 50000>;
+ pwms = <&pwm2 0 50000 0>;
brightness-levels = <0 4 8 16 32 64 128 248>;
default-brightness-level = <7>;
status = "okay";
@@ -203,7 +203,6 @@
};
&pwm2 {
- #pwm-cells = <2>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-apf6dev.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-apf6dev.dtsi
index 338d292553ad..3a46ade3b6bd 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-apf6dev.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-apf6dev.dtsi
@@ -13,7 +13,7 @@
backlight: backlight {
compatible = "pwm-backlight";
- pwms = <&pwm3 0 191000>;
+ pwms = <&pwm3 0 191000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <0>;
power-supply = <&reg_5v>;
@@ -212,7 +212,6 @@
};
&pwm3 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm3>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-aristainetos2.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-aristainetos2.dtsi
index db1bc511e71f..758eaf9d93d2 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-aristainetos2.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-aristainetos2.dtsi
@@ -46,7 +46,7 @@
/ {
backlight: backlight {
compatible = "pwm-backlight";
- pwms = <&pwm1 0 5000000>;
+ pwms = <&pwm1 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <7>;
enable-gpios = <&gpio6 31 GPIO_ACTIVE_HIGH>;
@@ -346,7 +346,6 @@
};
&pwm1 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-cubox-i.dtsi
index 1e530d892b76..761566ae3cf5 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-cubox-i.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-cubox-i.dtsi
@@ -64,7 +64,7 @@
active-low;
label = "imx6:red:front";
max-brightness = <248>;
- pwms = <&pwm1 0 50000>;
+ pwms = <&pwm1 0 50000 0>;
};
};
@@ -233,7 +233,6 @@
};
&pwm1 {
- #pwm-cells = <2>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-emcon.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-emcon.dtsi
index 42b2ba23aefc..a308a3584b62 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-emcon.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-emcon.dtsi
@@ -66,7 +66,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_lvds_bl>;
enable-gpios = <&gpio6 9 GPIO_ACTIVE_HIGH>;
- pwms = <&pwm1 0 50000>;
+ pwms = <&pwm1 0 50000 0>;
brightness-levels = <
0 4 8 16 32 64 80 96 112
128 144 160 176 250
@@ -78,7 +78,7 @@
pwm_fan: pwm-fan {
compatible = "pwm-fan";
#cooling-cells = <2>;
- pwms = <&pwm4 0 50000>;
+ pwms = <&pwm4 0 50000 0>;
cooling-levels = <0 64 127 191 255>;
status = "disabled";
};
@@ -145,7 +145,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_rgb_bl>;
enable-gpios = <&gpio6 8 GPIO_ACTIVE_HIGH>;
- pwms = <&pwm3 0 5000000>;
+ pwms = <&pwm3 0 5000000 0>;
brightness-levels = <
250 176 160 144 128 112
96 80 64 48 32 16 8 1
@@ -736,17 +736,14 @@
};
&pwm1 {
- #pwm-cells = <2>;
status = "okay";
};
&pwm3 {
- #pwm-cells = <2>;
status = "okay";
};
&pwm4 {
- #pwm-cells = <2>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw52xx.dtsi
index 535679c27d6f..48ffb3ee01bd 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw52xx.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw52xx.dtsi
@@ -25,7 +25,7 @@
backlight {
compatible = "pwm-backlight";
- pwms = <&pwm4 0 5000000>;
+ pwms = <&pwm4 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <7>;
};
@@ -520,7 +520,6 @@
};
&pwm4 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm4>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw53xx.dtsi
index 3e1c572af582..1eae438fbdae 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw53xx.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw53xx.dtsi
@@ -25,7 +25,7 @@
backlight {
compatible = "pwm-backlight";
- pwms = <&pwm4 0 5000000>;
+ pwms = <&pwm4 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <7>;
};
@@ -517,7 +517,6 @@
};
&pwm4 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm4>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw54xx.dtsi
index 0ffa0357a6fa..c2ec8572c8a5 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw54xx.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw54xx.dtsi
@@ -26,7 +26,7 @@
backlight {
compatible = "pwm-backlight";
- pwms = <&pwm4 0 5000000>;
+ pwms = <&pwm4 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <7>;
};
@@ -570,7 +570,6 @@
};
&pwm4 {
- #pwm-cells = <2>;
pinctrl-names = "default", "state_dio";
pinctrl-0 = <&pinctrl_pwm4_backlight>;
pinctrl-1 = <&pinctrl_pwm4_dio>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw560x.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw560x.dtsi
index 46cf4080fec3..7cee983da669 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw560x.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw560x.dtsi
@@ -66,7 +66,7 @@
backlight-display {
compatible = "pwm-backlight";
- pwms = <&pwm4 0 5000000>;
+ pwms = <&pwm4 0 5000000 0>;
brightness-levels = <
0 1 2 3 4 5 6 7 8 9
10 11 12 13 14 15 16 17 18 19
@@ -619,7 +619,6 @@
};
&pwm4 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm4>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5903.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5903.dtsi
index a74cde050158..fbc704c064b6 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5903.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5903.dtsi
@@ -56,7 +56,7 @@
backlight {
compatible = "pwm-backlight";
- pwms = <&pwm1 0 5000000>;
+ pwms = <&pwm1 0 5000000 0>;
brightness-levels = <
0 1 2 3 4 5 6 7 8 9
10 11 12 13 14 15 16 17 18 19
@@ -502,7 +502,6 @@
};
&pwm1 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5904.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5904.dtsi
index 1e723807ab4c..070506279186 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5904.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5904.dtsi
@@ -70,7 +70,7 @@
backlight {
compatible = "pwm-backlight";
- pwms = <&pwm4 0 5000000>;
+ pwms = <&pwm4 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <7>;
};
@@ -586,7 +586,6 @@
};
&pwm4 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm4>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-icore.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-icore.dtsi
index efe11524b885..9975b6ee433d 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-icore.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-icore.dtsi
@@ -20,7 +20,7 @@
backlight_lvds: backlight-lvds {
compatible = "pwm-backlight";
- pwms = <&pwm3 0 100000>;
+ pwms = <&pwm3 0 100000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <7>;
};
@@ -245,7 +245,6 @@
};
&pwm3 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm3>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6.dtsi
index 4d2abcd44eff..60aa1e947f62 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6.dtsi
@@ -298,6 +298,7 @@
reg = <1>;
#address-cells = <1>;
#size-cells = <0>;
+ vdd-supply = <&reg_mba6_3p3v>;
ethernet@1 {
compatible = "usb424,9e00";
@@ -441,8 +442,6 @@
pinctrl_hog: hoggrp {
fsl,pins = <
- MX6QDL_PAD_DI0_PIN4__GPIO4_IO20 0x0001b099
-
MX6QDL_PAD_ENET_RXD1__GPIO1_IO26 0x0001b099
MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x0001b099
MX6QDL_PAD_ENET_TXD0__GPIO1_IO30 0x0001b099
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-nit6xlite.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-nit6xlite.dtsi
index f2542d725ce7..a30cf0d06206 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-nit6xlite.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-nit6xlite.dtsi
@@ -108,7 +108,7 @@
backlight-lcd {
compatible = "pwm-backlight";
- pwms = <&pwm1 0 5000000>;
+ pwms = <&pwm1 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <7>;
power-supply = <&reg_3p3v>;
@@ -117,7 +117,7 @@
backlight_lvds0: backlight-lvds0 {
compatible = "pwm-backlight";
- pwms = <&pwm4 0 5000000>;
+ pwms = <&pwm4 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <7>;
power-supply = <&reg_3p3v>;
@@ -499,7 +499,6 @@
};
&pwm1 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
status = "okay";
@@ -512,7 +511,6 @@
};
&pwm4 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm4>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-nitrogen6_max.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-nitrogen6_max.dtsi
index 32a110a35b02..33174febf410 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-nitrogen6_max.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-nitrogen6_max.dtsi
@@ -183,7 +183,7 @@
backlight_lcd: backlight-lcd {
compatible = "pwm-backlight";
- pwms = <&pwm1 0 5000000>;
+ pwms = <&pwm1 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <7>;
power-supply = <&reg_3p3v>;
@@ -192,7 +192,7 @@
backlight_lvds0: backlight-lvds0 {
compatible = "pwm-backlight";
- pwms = <&pwm4 0 5000000>;
+ pwms = <&pwm4 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <7>;
power-supply = <&reg_3p3v>;
@@ -201,7 +201,7 @@
backlight_lvds1: backlight-lvds1 {
compatible = "pwm-backlight";
- pwms = <&pwm2 0 5000000>;
+ pwms = <&pwm2 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <7>;
power-supply = <&reg_3p3v>;
@@ -735,14 +735,12 @@
};
&pwm1 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
status = "okay";
};
&pwm2 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm2>;
status = "okay";
@@ -755,7 +753,6 @@
};
&pwm4 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm4>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-nitrogen6_som2.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-nitrogen6_som2.dtsi
index 414196b75991..8e64314fa8b2 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-nitrogen6_som2.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-nitrogen6_som2.dtsi
@@ -17,7 +17,7 @@
backlight_lcd: backlight-lcd {
compatible = "pwm-backlight";
- pwms = <&pwm1 0 5000000>;
+ pwms = <&pwm1 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <7>;
power-supply = <&reg_3p3v>;
@@ -26,7 +26,7 @@
backlight_lvds0: backlight-lvds0 {
compatible = "pwm-backlight";
- pwms = <&pwm4 0 5000000>;
+ pwms = <&pwm4 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <7>;
power-supply = <&reg_3p3v>;
@@ -641,7 +641,6 @@
};
&pwm1 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
status = "okay";
@@ -654,7 +653,6 @@
};
&pwm4 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm4>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-nitrogen6x.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-nitrogen6x.dtsi
index f278b14911ce..121177273dd0 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-nitrogen6x.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-nitrogen6x.dtsi
@@ -134,7 +134,7 @@
backlight_lcd: backlight-lcd {
compatible = "pwm-backlight";
- pwms = <&pwm1 0 5000000>;
+ pwms = <&pwm1 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <7>;
power-supply = <&reg_3p3v>;
@@ -143,7 +143,7 @@
backlight_lvds: backlight-lvds {
compatible = "pwm-backlight";
- pwms = <&pwm4 0 5000000>;
+ pwms = <&pwm4 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <7>;
power-supply = <&reg_3p3v>;
@@ -596,7 +596,6 @@
};
&pwm1 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
status = "okay";
@@ -609,7 +608,6 @@
};
&pwm4 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm4>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-mira.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-mira.dtsi
index 1ca4d219609f..0b4c09b09c03 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-mira.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-mira.dtsi
@@ -15,7 +15,7 @@
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <7>;
power-supply = <&reg_backlight>;
- pwms = <&pwm1 0 5000000>;
+ pwms = <&pwm1 0 5000000 0>;
status = "okay";
};
@@ -224,7 +224,6 @@
};
&pwm1 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-sabreauto.dtsi
index 68e97180d33e..6656e2e762a1 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-sabreauto.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-sabreauto.dtsi
@@ -144,8 +144,8 @@
};
sound-spdif {
- compatible = "fsl,imx-audio-spdif",
- "fsl,imx-sabreauto-spdif";
+ compatible = "fsl,imx-sabreauto-spdif",
+ "fsl,imx-audio-spdif";
model = "imx-spdif";
spdif-controller = <&spdif>;
spdif-in;
@@ -153,7 +153,7 @@
backlight {
compatible = "pwm-backlight";
- pwms = <&pwm3 0 5000000>;
+ pwms = <&pwm3 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <7>;
status = "okay";
@@ -802,7 +802,6 @@
};
&pwm3 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm3>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-sabrelite.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-sabrelite.dtsi
index 84c8a9531e18..9c502bf77d0b 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-sabrelite.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-sabrelite.dtsi
@@ -99,7 +99,7 @@
#clock-cells = <0>;
clock-frequency = <22000000>;
clock-output-names = "mipi_pwm3";
- pwms = <&pwm3 0 45>; /* 1 / 45 ns = 22 MHz */
+ pwms = <&pwm3 0 45 0>; /* 1 / 45 ns = 22 MHz */
status = "okay";
};
@@ -162,7 +162,7 @@
backlight_lcd: backlight-lcd {
compatible = "pwm-backlight";
- pwms = <&pwm1 0 5000000>;
+ pwms = <&pwm1 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <7>;
power-supply = <&reg_3p3v>;
@@ -171,7 +171,7 @@
backlight_lvds: backlight-lvds {
compatible = "pwm-backlight";
- pwms = <&pwm4 0 5000000>;
+ pwms = <&pwm4 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <7>;
power-supply = <&reg_3p3v>;
@@ -654,21 +654,18 @@
};
&pwm1 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
status = "okay";
};
&pwm3 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm3>;
status = "okay";
};
&pwm4 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm4>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-sabresd.dtsi
index 4fe58764b929..8f4f5fba68cc 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-sabresd.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-sabresd.dtsi
@@ -119,7 +119,7 @@
backlight_lvds: backlight-lvds {
compatible = "pwm-backlight";
- pwms = <&pwm1 0 5000000>;
+ pwms = <&pwm1 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <7>;
status = "okay";
@@ -755,7 +755,6 @@
};
&pwm1 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-savageboard.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-savageboard.dtsi
index 02e6d36e85fa..6823a639ed2f 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-savageboard.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-savageboard.dtsi
@@ -83,7 +83,7 @@
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <4>;
power-supply = <&reg_3p3v>;
- pwms = <&pwm1 0 10000>;
+ pwms = <&pwm1 0 10000 0>;
};
reg_3p3v: regulator-3p3v {
@@ -140,7 +140,6 @@
};
&pwm1 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-skov-cpu.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-skov-cpu.dtsi
index d59d5d0e1d19..6ab71a729fd8 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-skov-cpu.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-skov-cpu.dtsi
@@ -282,7 +282,6 @@
&pwm2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm2>;
- #pwm-cells = <2>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-udoo.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-udoo.dtsi
index 647ba5e623dd..14272b42f9a1 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-udoo.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-udoo.dtsi
@@ -59,16 +59,6 @@
};
};
- reg_usb_h1_vbus: regulator-usb-h1-vbus {
- compatible = "regulator-fixed";
- regulator-name = "usb_h1_vbus";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- enable-active-high;
- startup-delay-us = <2>; /* USB2415 requires a POR of 1 us minimum */
- gpio = <&gpio7 12 0>;
- };
-
reg_panel: regulator-panel {
compatible = "regulator-fixed";
regulator-name = "lcd_panel";
@@ -285,9 +275,18 @@
&usbh1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usbh>;
- vbus-supply = <&reg_usb_h1_vbus>;
- clocks = <&clks IMX6QDL_CLK_CKO>;
- status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ usb-port@1 {
+ compatible = "usb424,2514";
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clks IMX6QDL_CLK_CKO>;
+ reset-gpios = <&gpio7 12 GPIO_ACTIVE_LOW>;
+ };
};
&usbotg {
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl.dtsi
index 8431b8a994f4..d2200c9db25a 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl.dtsi
@@ -397,11 +397,10 @@
reg = <0x02024000 0x4000>;
interrupts = <0 51 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6QDL_CLK_ESAI_IPG>,
- <&clks IMX6QDL_CLK_ESAI_MEM>,
<&clks IMX6QDL_CLK_ESAI_EXTAL>,
<&clks IMX6QDL_CLK_ESAI_IPG>,
<&clks IMX6QDL_CLK_SPBA>;
- clock-names = "core", "mem", "extal", "fsys", "spba";
+ clock-names = "core", "extal", "fsys", "spba";
dmas = <&sdma 23 21 0>, <&sdma 24 21 0>;
dma-names = "rx", "tx";
status = "disabled";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6sl-evk.dts b/arch/arm/boot/dts/nxp/imx/imx6sl-evk.dts
index 239bc6dfc584..31eee0419af7 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6sl-evk.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6sl-evk.dts
@@ -23,7 +23,7 @@
backlight_display: backlight_display {
compatible = "pwm-backlight";
- pwms = <&pwm1 0 5000000>;
+ pwms = <&pwm1 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <6>;
};
@@ -584,10 +584,8 @@
};
&pwm1 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
- status = "okay";
};
&reg_vdd1p1 {
diff --git a/arch/arm/boot/dts/nxp/imx/imx6sl-tolino-shine2hd.dts b/arch/arm/boot/dts/nxp/imx/imx6sl-tolino-shine2hd.dts
index 5636fb3661e8..03d6965f0149 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6sl-tolino-shine2hd.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6sl-tolino-shine2hd.dts
@@ -138,7 +138,7 @@
pinctrl-0 = <&pinctrl_zforce>;
reg = <0x50>;
interrupt-parent = <&gpio5>;
- interrupts = <6 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
vdd-supply = <&ldo1_reg>;
reset-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
touchscreen-size-x = <1072>;
@@ -163,7 +163,7 @@
pinctrl-0 = <&pinctrl_ricoh_gpio>;
reg = <0x32>;
interrupt-parent = <&gpio5>;
- interrupts = <11 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
system-power-controller;
regulators {
diff --git a/arch/arm/boot/dts/nxp/imx/imx6sll-evk.dts b/arch/arm/boot/dts/nxp/imx/imx6sll-evk.dts
index e3e9b0ec4f73..febc2dd9967d 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6sll-evk.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6sll-evk.dts
@@ -26,7 +26,7 @@
backlight_display: backlight-display {
compatible = "pwm-backlight";
- pwms = <&pwm1 0 5000000>;
+ pwms = <&pwm1 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <6>;
status = "okay";
@@ -314,10 +314,8 @@
};
&pwm1 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
- status = "okay";
};
&snvs_poweroff {
diff --git a/arch/arm/boot/dts/nxp/imx/imx6sll.dtsi b/arch/arm/boot/dts/nxp/imx/imx6sll.dtsi
index 3659fd5ecfa6..ddeb5b37fb78 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6sll.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6sll.dtsi
@@ -683,7 +683,6 @@
clocks = <&clks IMX6SLL_CLK_USBOH3>;
fsl,usbphy = <&usbphy1>;
fsl,usbmisc = <&usbmisc 0>;
- fsl,anatop = <&anatop>;
ahb-burst-config = <0x0>;
tx-burst-size-dword = <0x10>;
rx-burst-size-dword = <0x10>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6sx-nitrogen6sx.dts b/arch/arm/boot/dts/nxp/imx/imx6sx-nitrogen6sx.dts
index cd9cbc9ccc9e..1c1515a854c8 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6sx-nitrogen6sx.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6sx-nitrogen6sx.dts
@@ -18,7 +18,7 @@
backlight-lvds {
compatible = "pwm-backlight";
- pwms = <&pwm4 0 5000000>;
+ pwms = <&pwm4 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <6>;
power-supply = <&reg_3p3v>;
@@ -83,7 +83,7 @@
sound {
compatible = "fsl,imx-audio-sgtl5000";
model = "imx6sx-nitrogen6sx-sgtl5000";
- cpu-dai = <&ssi1>;
+ ssi-controller = <&ssi1>;
audio-codec = <&codec>;
audio-routing =
"MIC_IN", "Mic Jack",
@@ -229,10 +229,8 @@
};
&pwm4 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm4>;
- status = "okay";
};
&ssi1 {
diff --git a/arch/arm/boot/dts/nxp/imx/imx6sx-sdb.dtsi b/arch/arm/boot/dts/nxp/imx/imx6sx-sdb.dtsi
index c6e85e4a0883..7d4170c27732 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6sx-sdb.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6sx-sdb.dtsi
@@ -23,7 +23,7 @@
backlight_display: backlight-display {
compatible = "pwm-backlight";
- pwms = <&pwm3 0 5000000>;
+ pwms = <&pwm3 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <6>;
};
@@ -184,8 +184,8 @@
};
sound-spdif {
- compatible = "fsl,imx-audio-spdif",
- "fsl,imx6sx-sdb-spdif";
+ compatible = "fsl,imx6sx-sdb-spdif",
+ "fsl,imx-audio-spdif";
model = "imx-spdif";
spdif-controller = <&spdif>;
spdif-out;
@@ -295,10 +295,8 @@
};
&pwm3 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm3>;
- status = "okay";
};
&snvs_poweroff {
diff --git a/arch/arm/boot/dts/nxp/imx/imx6sx-softing-vining-2000.dts b/arch/arm/boot/dts/nxp/imx/imx6sx-softing-vining-2000.dts
index bfcd8f7d86dd..f999eb244373 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6sx-softing-vining-2000.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6sx-softing-vining-2000.dts
@@ -46,19 +46,19 @@
led-1 {
label = "red";
max-brightness = <255>;
- pwms = <&pwm6 0 50000>;
+ pwms = <&pwm6 0 50000 0>;
};
led-2 {
label = "green";
max-brightness = <255>;
- pwms = <&pwm2 0 50000>;
+ pwms = <&pwm2 0 50000 0>;
};
led-3 {
label = "blue";
max-brightness = <255>;
- pwms = <&pwm1 0 50000>;
+ pwms = <&pwm1 0 50000 0>;
};
};
};
@@ -505,24 +505,18 @@
};
&pwm1 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
- status = "okay";
};
&pwm2 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm2>;
- status = "okay";
};
&pwm6 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm6>;
- status = "okay";
};
&reg_arm {
diff --git a/arch/arm/boot/dts/nxp/imx/imx6sx.dtsi b/arch/arm/boot/dts/nxp/imx/imx6sx.dtsi
index 0de359d62a47..b386448486df 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6sx.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6sx.dtsi
@@ -339,15 +339,14 @@
};
esai: esai@2024000 {
- compatible = "fsl,imx6sx-esai", "fsl,imx35-esai";
+ compatible = "fsl,imx35-esai";
reg = <0x02024000 0x4000>;
interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SX_CLK_ESAI_IPG>,
- <&clks IMX6SX_CLK_ESAI_MEM>,
<&clks IMX6SX_CLK_ESAI_EXTAL>,
<&clks IMX6SX_CLK_ESAI_IPG>,
<&clks IMX6SX_CLK_SPBA>;
- clock-names = "core", "mem", "extal",
+ clock-names = "core", "extal",
"fsys", "spba";
dmas = <&sdma 23 21 0>,
<&sdma 24 21 0>;
@@ -929,7 +928,6 @@
clocks = <&clks IMX6SX_CLK_USBOH3>;
fsl,usbphy = <&usbphy1>;
fsl,usbmisc = <&usbmisc 0>;
- fsl,anatop = <&anatop>;
ahb-burst-config = <0x0>;
tx-burst-size-dword = <0x10>;
rx-burst-size-dword = <0x10>;
@@ -957,7 +955,6 @@
fsl,usbphy = <&usbphynop1>;
fsl,usbmisc = <&usbmisc 2>;
phy_type = "hsic";
- fsl,anatop = <&anatop>;
dr_mode = "host";
ahb-burst-config = <0x0>;
tx-burst-size-dword = <0x10>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-14x14-evk.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-14x14-evk.dtsi
index f10f0525490b..9cfb99ac9e9d 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-14x14-evk.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-14x14-evk.dtsi
@@ -16,7 +16,7 @@
backlight_display: backlight-display {
compatible = "pwm-backlight";
- pwms = <&pwm1 0 5000000>;
+ pwms = <&pwm1 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <6>;
status = "okay";
@@ -277,7 +277,6 @@
};
&pwm1 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsbcpro.dts b/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsbcpro.dts
index 1762bc47e18d..ed61ae8524fa 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsbcpro.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsbcpro.dts
@@ -18,7 +18,7 @@
lcd_backlight: backlight {
compatible = "pwm-backlight";
- pwms = <&pwm5 0 50000>;
+ pwms = <&pwm5 0 50000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <6>;
status = "okay";
@@ -168,7 +168,6 @@
};
&pwm5 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm5>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts b/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts
index 2ca18f3dad0a..cdbb8c435cd6 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts
@@ -21,7 +21,7 @@
backlight {
compatible = "pwm-backlight";
- pwms = <&pwm8 0 100000>;
+ pwms = <&pwm8 0 100000 0>;
brightness-levels = < 0 1 2 3 4 5 6 7 8 9
10 11 12 13 14 15 16 17 18 19
20 21 22 23 24 25 26 27 28 29
@@ -194,7 +194,6 @@
};
&pwm8 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm8>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6uldev.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6uldev.dtsi
index af337f18a266..be3cacb4fa7a 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6uldev.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6uldev.dtsi
@@ -9,7 +9,7 @@
backlight: backlight {
compatible = "pwm-backlight";
- pwms = <&pwm3 0 191000>;
+ pwms = <&pwm3 0 191000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <7>;
power-supply = <&reg_5v>;
@@ -143,7 +143,6 @@
};
&pwm3 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm3>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-isiot.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-isiot.dtsi
index 14fc4828ba4e..ee86c36205f9 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-isiot.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-isiot.dtsi
@@ -20,7 +20,7 @@
backlight {
compatible = "pwm-backlight";
- pwms = <&pwm8 0 100000>;
+ pwms = <&pwm8 0 100000 0>;
brightness-levels = < 0 1 2 3 4 5 6 7 8 9
10 11 12 13 14 15 16 17 18 19
20 21 22 23 24 25 26 27 28 29
@@ -187,7 +187,6 @@
};
&pwm8 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm8>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-43.dts b/arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-43.dts
index 0c643706a158..4e8191a65211 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-43.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-43.dts
@@ -14,7 +14,7 @@
backlight {
compatible = "pwm-backlight";
- pwms = <&pwm7 0 5000000>;
+ pwms = <&pwm7 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <6>;
status = "okay";
@@ -41,7 +41,6 @@
};
&pwm7 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm7>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-common.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-common.dtsi
index 33d5f27285a4..d8f7877349c9 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-common.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-common.dtsi
@@ -35,7 +35,7 @@
pwm-beeper {
compatible = "pwm-beeper";
- pwms = <&pwm8 0 5000>;
+ pwms = <&pwm8 0 5000 0>;
};
reg_3v3: regulator-3v3 {
@@ -152,7 +152,6 @@
};
&pwm8 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm8>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-pico.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-pico.dtsi
index 07dcecbe485d..fe307f49b9e5 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-pico.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-pico.dtsi
@@ -22,7 +22,7 @@
backlight: backlight {
compatible = "pwm-backlight";
- pwms = <&pwm3 0 5000000>;
+ pwms = <&pwm3 0 5000000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <6>;
status = "okay";
@@ -177,7 +177,6 @@
};
&pwm3 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm3>;
status = "okay";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board-emmc.dts b/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board-emmc.dts
new file mode 100644
index 000000000000..cfcd8783c31d
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board-emmc.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 Linumiz
+ * Author: Parthiban <parthiban@linumiz.com>
+ */
+
+/dts-v1/;
+#include "imx6ull.dtsi"
+#include "imx6ull-seeed-npi.dtsi"
+#include "imx6ull-seeed-npi-dev-board.dtsi"
+
+/ {
+ model = "Seeed NPi iMX6ULL Dev Board with NAND";
+ compatible = "seeed,imx6ull-seeed-npi-emmc", "seeed,imx6ull-seeed-npi", "fsl,imx6ull";
+};
+
+&usdhc2 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board-nand.dts b/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board-nand.dts
new file mode 100644
index 000000000000..87c9434b09c5
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board-nand.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 Linumiz
+ * Author: Parthiban <parthiban@linumiz.com>
+ */
+
+/dts-v1/;
+#include "imx6ull.dtsi"
+#include "imx6ull-seeed-npi.dtsi"
+#include "imx6ull-seeed-npi-dev-board.dtsi"
+
+/ {
+ model = "Seeed NPi iMX6ULL Dev Board with NAND";
+ compatible = "seeed,imx6ull-seeed-npi-nand", "seeed,imx6ull-seeed-npi", "fsl,imx6ull";
+};
+
+&gpmi {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board.dtsi
new file mode 100644
index 000000000000..6bb12e0bbc7e
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board.dtsi
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 Linumiz
+ * Author: Parthiban <parthiban@linumiz.com>
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ chosen {
+ stdout-path = &uart1;
+ };
+
+ gpio_buttons: gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_button>;
+
+ button-0 {
+ gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
+ label = "SW2";
+ linux,code = <KEY_A>;
+ wakeup-source;
+ };
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+ led-blue {
+ gpios = <&gpio4 19 GPIO_ACTIVE_LOW>;
+ label = "LED_B";
+ linux,default-trigger = "heartbeat";
+ default-state = "on";
+ };
+
+ led-green {
+ gpios = <&gpio4 20 GPIO_ACTIVE_LOW>;
+ label = "LED_G";
+ linux,default-trigger = "heartbeat";
+ default-state = "on";
+ };
+
+ led-red {
+ gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
+ label = "LED_R";
+ linux,default-trigger = "heartbeat";
+ default-state = "on";
+ };
+
+ led-user {
+ gpios = <&gpio5 3 GPIO_ACTIVE_LOW>;
+ label = "User";
+ linux,default-trigger = "heartbeat";
+ default-state = "on";
+ };
+ };
+
+ reg_5v_sys: regulator-5v-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "5V_SYS";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ reg_5v: regulator-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ vin-supply = <&reg_5v_sys>;
+ };
+
+ reg_3v3_in: regulator-3v3-in {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3_IN";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ vin-supply = <&reg_5v_sys>;
+ };
+
+ reg_3v3: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ vin-supply = <&reg_3v3_in>;
+ };
+
+ reg_sd1_vmmc: regulator-sd1-vmmc {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3_SD";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_vmmc>;
+ enable-active-high;
+ regulator-always-on;
+ vin-supply = <&reg_3v3>;
+ };
+};
+
+&csi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_csi1>;
+ status = "disabled"; /* LED Blue & Green shared */
+};
+
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet1>;
+ phy-mode = "rmii";
+ phy-handle = <&ethphy0>;
+ status = "okay";
+};
+
+&fec2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet2>;
+ phy-mode = "rmii";
+ phy-handle = <&ethphy1>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@2 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <2>;
+ micrel,led-mode = <1>;
+ clocks = <&clks IMX6UL_CLK_ENET_REF>;
+ clock-names = "rmii-ref";
+ };
+
+ ethphy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ micrel,led-mode = <1>;
+ clocks = <&clks IMX6UL_CLK_ENET2_REF>;
+ clock-names = "rmii-ref";
+ };
+ };
+};
+
+&lcdif {
+ pinctrl-0 = <&pinctrl_lcdif>;
+ pinctrl-names = "default";
+ status = "disabled";
+};
+
+&reg_dcdc_3v3 {
+ vin-supply = <&reg_3v3_in>;
+};
+
+&sai2 {
+ assigned-clock-rates = <320000000>;
+ assigned-clocks = <&clks IMX6UL_CLK_PLL3_PFD2>;
+ pinctrl-0 = <&pinctrl_sai2>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&snvs_poweroff {
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-0 = <&pinctrl_uart2>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-0 = <&pinctrl_uart3>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-0 = <&pinctrl_uart4>;
+ status = "okay";
+};
+
+&uart5 {
+ pinctrl-0 = <&pinctrl_uart5>;
+ status = "okay";
+};
+
+&usbotg1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb_otg1_id>;
+ dr_mode = "otg";
+ srp-disable;
+ hnp-disable;
+ adp-disable;
+ status = "okay";
+};
+
+&usbotg2 {
+ dr_mode = "host";
+ disable-over-current;
+ status = "okay";
+};
+
+&usdhc1 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc1 &pinctrl_usdhc1_cd>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz &pinctrl_usdhc1_cd>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz &pinctrl_usdhc1_cd>;
+ cd-gpios = <&gpio1 19 GPIO_ACTIVE_LOW>;
+ no-1-8-v;
+ keep-power-in-suspend;
+ wakeup-source;
+ vmmc-supply = <&reg_sd1_vmmc>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_button: buttongrp {
+ fsl,pins = <
+ MX6ULL_PAD_SNVS_TAMPER1__GPIO5_IO01 0x0b0b0
+ >;
+ };
+
+ pinctrl_csi1: csi1grp {
+ fsl,pins = <
+ MX6UL_PAD_CSI_PIXCLK__CSI_PIXCLK 0x1b088
+ MX6UL_PAD_CSI_VSYNC__CSI_VSYNC 0x1b088
+ MX6UL_PAD_CSI_HSYNC__CSI_HSYNC 0x1b088
+ MX6UL_PAD_CSI_DATA00__CSI_DATA02 0x1b088
+ MX6UL_PAD_CSI_DATA01__CSI_DATA03 0x1b088
+ MX6UL_PAD_CSI_DATA02__CSI_DATA04 0x1b088
+ MX6UL_PAD_CSI_DATA03__CSI_DATA05 0x1b088
+ MX6UL_PAD_CSI_DATA04__CSI_DATA06 0x1b088
+ MX6UL_PAD_CSI_DATA05__CSI_DATA07 0x1b088
+ MX6UL_PAD_CSI_DATA06__CSI_DATA08 0x1b088
+ MX6UL_PAD_CSI_DATA07__CSI_DATA09 0x1b088
+ >;
+ };
+
+ pinctrl_enet1: enet1grp {
+ fsl,pins = <
+ MX6UL_PAD_ENET1_RX_EN__ENET1_RX_EN 0x1b0b0
+ MX6UL_PAD_ENET1_RX_ER__ENET1_RX_ER 0x1b0b0
+ MX6UL_PAD_ENET1_RX_DATA0__ENET1_RDATA00 0x1b0b0
+ MX6UL_PAD_ENET1_RX_DATA1__ENET1_RDATA01 0x1b0b0
+ MX6UL_PAD_ENET1_TX_EN__ENET1_TX_EN 0x1b0b0
+ MX6UL_PAD_ENET1_TX_DATA0__ENET1_TDATA00 0x1b0b0
+ MX6UL_PAD_ENET1_TX_DATA1__ENET1_TDATA01 0x1b0b0
+ MX6UL_PAD_ENET1_TX_CLK__ENET1_REF_CLK1 0x4001b031
+ >;
+ };
+
+ pinctrl_enet2: enet2grp {
+ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO07__ENET2_MDC 0x1b0b0
+ MX6UL_PAD_GPIO1_IO06__ENET2_MDIO 0x1b0b0
+ MX6UL_PAD_ENET2_RX_EN__ENET2_RX_EN 0x1b0b0
+ MX6UL_PAD_ENET2_RX_ER__ENET2_RX_ER 0x1b0b0
+ MX6UL_PAD_ENET2_RX_DATA0__ENET2_RDATA00 0x1b0b0
+ MX6UL_PAD_ENET2_RX_DATA1__ENET2_RDATA01 0x1b0b0
+ MX6UL_PAD_ENET2_TX_EN__ENET2_TX_EN 0x1b0b0
+ MX6UL_PAD_ENET2_TX_DATA0__ENET2_TDATA00 0x1b0b0
+ MX6UL_PAD_ENET2_TX_DATA1__ENET2_TDATA01 0x1b0b0
+ MX6UL_PAD_ENET2_TX_CLK__ENET2_REF_CLK2 0x4001b031
+ >;
+ };
+
+ pinctrl_gpio_leds: ledgrp {
+ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO04__GPIO1_IO04 0x0b0b0
+ MX6UL_PAD_CSI_VSYNC__GPIO4_IO19 0x0b0b0
+ MX6UL_PAD_CSI_HSYNC__GPIO4_IO20 0x0b0b0
+ MX6UL_PAD_SNVS_TAMPER3__GPIO5_IO03 0x0b0b0
+ >;
+ };
+
+ pinctrl_lcdif: lcdif-grp {
+ fsl,pins = <
+ MX6UL_PAD_LCD_CLK__LCDIF_CLK 0x79
+ MX6UL_PAD_LCD_ENABLE__LCDIF_ENABLE 0x79
+ MX6UL_PAD_LCD_HSYNC__LCDIF_HSYNC 0x79
+ MX6UL_PAD_LCD_VSYNC__LCDIF_VSYNC 0x79
+ MX6UL_PAD_LCD_RESET__LCDIF_RESET 0x79
+ MX6UL_PAD_LCD_DATA00__LCDIF_DATA00 0x79
+ MX6UL_PAD_LCD_DATA01__LCDIF_DATA01 0x79
+ MX6UL_PAD_LCD_DATA02__LCDIF_DATA02 0x79
+ MX6UL_PAD_LCD_DATA03__LCDIF_DATA03 0x79
+ MX6UL_PAD_LCD_DATA04__LCDIF_DATA04 0x79
+ MX6UL_PAD_LCD_DATA05__LCDIF_DATA05 0x79
+ MX6UL_PAD_LCD_DATA06__LCDIF_DATA06 0x79
+ MX6UL_PAD_LCD_DATA07__LCDIF_DATA07 0x79
+ MX6UL_PAD_LCD_DATA08__LCDIF_DATA08 0x79
+ MX6UL_PAD_LCD_DATA09__LCDIF_DATA09 0x79
+ MX6UL_PAD_LCD_DATA10__LCDIF_DATA10 0x79
+ MX6UL_PAD_LCD_DATA11__LCDIF_DATA11 0x79
+ MX6UL_PAD_LCD_DATA12__LCDIF_DATA12 0x79
+ MX6UL_PAD_LCD_DATA13__LCDIF_DATA13 0x79
+ MX6UL_PAD_LCD_DATA14__LCDIF_DATA14 0x79
+ MX6UL_PAD_LCD_DATA15__LCDIF_DATA15 0x79
+ MX6UL_PAD_LCD_DATA16__LCDIF_DATA16 0x79
+ MX6UL_PAD_LCD_DATA17__LCDIF_DATA17 0x79
+ MX6UL_PAD_LCD_DATA18__LCDIF_DATA18 0x79
+ MX6UL_PAD_LCD_DATA19__LCDIF_DATA19 0x79
+ MX6UL_PAD_LCD_DATA20__LCDIF_DATA20 0x79
+ MX6UL_PAD_LCD_DATA21__LCDIF_DATA21 0x79
+ MX6UL_PAD_LCD_DATA22__LCDIF_DATA22 0x79
+ MX6UL_PAD_LCD_DATA23__LCDIF_DATA23 0x79
+ MX6UL_PAD_GPIO1_IO08__GPIO1_IO08 0x79
+ >;
+ };
+
+ pinctrl_reg_vmmc: usdhc1regvmmc {
+ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO09__GPIO1_IO09 0x17059
+ >;
+ };
+
+ pinctrl_sai2: sai2-grp {
+ fsl,pins = <
+ MX6UL_PAD_JTAG_TCK__SAI2_RX_DATA 0x130b0
+ MX6UL_PAD_JTAG_TDI__SAI2_TX_BCLK 0x17088
+ MX6UL_PAD_JTAG_TDO__SAI2_TX_SYNC 0x17088
+ MX6UL_PAD_JTAG_TRST_B__SAI2_TX_DATA 0x120b0
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pin = <
+ MX6UL_PAD_UART1_TX_DATA__UART1_DCE_TX 0x1b0b1
+ MX6UL_PAD_UART1_RX_DATA__UART1_DCE_RX 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pin = <
+ MX6UL_PAD_UART2_TX_DATA__UART2_DCE_TX 0x1b0b1
+ MX6UL_PAD_UART2_RX_DATA__UART2_DCE_RX 0x1b0b1
+ MX6UL_PAD_UART2_CTS_B__UART2_DCE_CTS 0x1b0b1
+ MX6UL_PAD_UART2_RTS_B__UART2_DCE_RTS 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart3: uart3grp {
+ fsl,pin = <
+ MX6UL_PAD_UART3_TX_DATA__UART3_DCE_TX 0x1b0b1
+ MX6UL_PAD_UART3_RX_DATA__UART3_DCE_RX 0x1b0b1
+ MX6UL_PAD_UART3_CTS_B__UART3_DCE_CTS 0x1b0b1
+ MX6UL_PAD_UART3_RTS_B__UART3_DCE_RTS 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pin = <
+ MX6UL_PAD_UART4_TX_DATA__UART4_DCE_TX 0x1b0b1
+ MX6UL_PAD_UART4_RX_DATA__UART4_DCE_RX 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart5: uart5grp {
+ fsl,pin = <
+ MX6UL_PAD_UART5_TX_DATA__UART5_DCE_TX 0x1b0b1
+ MX6UL_PAD_UART5_RX_DATA__UART5_DCE_RX 0x1b0b1
+ >;
+ };
+
+ pinctrl_usb_otg1_id: usbotg1idgrp {
+ fsl,pin = <
+ MX6UL_PAD_GPIO1_IO00__ANATOP_OTG1_ID 0x17059
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x17059
+ MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x10059
+ MX6UL_PAD_SD1_DATA0__USDHC1_DATA0 0x17059
+ MX6UL_PAD_SD1_DATA1__USDHC1_DATA1 0x17059
+ MX6UL_PAD_SD1_DATA2__USDHC1_DATA2 0x17059
+ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x17059
+ >;
+ };
+
+ pinctrl_usdhc1_100mhz: usdhc1grp100mhz {
+ fsl,pins = <
+ MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170b9
+ MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100b9
+ MX6UL_PAD_SD1_DATA0__USDHC1_DATA0 0x170b9
+ MX6UL_PAD_SD1_DATA1__USDHC1_DATA1 0x170b9
+ MX6UL_PAD_SD1_DATA2__USDHC1_DATA2 0x170b9
+ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x170b9
+ >;
+ };
+
+ pinctrl_usdhc1_200mhz: usdhc1grp200mhz {
+ fsl,pins = <
+ MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170f9
+ MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100f9
+ MX6UL_PAD_SD1_DATA0__USDHC1_DATA0 0x170f9
+ MX6UL_PAD_SD1_DATA1__USDHC1_DATA1 0x170f9
+ MX6UL_PAD_SD1_DATA2__USDHC1_DATA2 0x170f9
+ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x170f9
+ >;
+ };
+
+ pinctrl_usdhc1_cd: usdhc1cd {
+ fsl,pins = <
+ MX6UL_PAD_UART1_RTS_B__GPIO1_IO19 0x17059
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi.dtsi
new file mode 100644
index 000000000000..f5ad6b5c1ad0
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi.dtsi
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 Linumiz
+ * Author: Parthiban <parthiban@linumiz.com>
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "Seeed NPi-iMX6ULL Dev Board";
+ compatible = "seeed,imx6ull-seeed-npi", "fsl,imx6ull";
+
+ reg_dcdc_3v3: regulator-dcdc-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "DCDC_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_dram_1v35: regulator-dram-1v35 {
+ compatible = "regulator-fixed";
+ regulator-name = "DRAM_1V35";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ vin-supply = <&reg_dcdc_3v3>;
+ };
+
+ reg_vdd_arm_soc_in: regulator-vdd-arm-soc-in {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_ARM_SOC_IN";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ vin-supply = <&reg_dcdc_3v3>;
+ };
+
+ reg_dcdc_1v8: regulator-dcdc-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "DCDC_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ vin-supply = <&reg_dcdc_3v3>;
+ };
+
+ reg_sd1_vqmmc: regulator-sd1-vqmmc {
+ compatible = "regulator-fixed";
+ regulator-name = "NVCC_SD";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ gpios = <&gpio1 5 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_vqmmc>;
+ regulator-always-on;
+ vin-supply = <&reg_dcdc_1v8>;
+ };
+};
+
+&gpmi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpmi_nand>;
+ status = "disabled";
+};
+
+&usdhc1 {
+ vqmmc-supply = <&reg_sd1_vqmmc>;
+};
+
+&usdhc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc2>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
+ bus-width = <8>;
+ non-removable;
+ keep-power-in-suspend;
+ status = "disabled";
+};
+
+&iomuxc {
+ pinctrl_gpmi_nand: gpminandgrp {
+ fsl,pins = <
+ MX6UL_PAD_NAND_DQS__RAWNAND_DQS 0x0b0b1
+ MX6UL_PAD_NAND_CLE__RAWNAND_CLE 0x0b0b1
+ MX6UL_PAD_NAND_ALE__RAWNAND_ALE 0x0b0b1
+ MX6UL_PAD_NAND_WP_B__RAWNAND_WP_B 0x0b0b1
+ MX6UL_PAD_NAND_READY_B__RAWNAND_READY_B 0x0b000
+ MX6UL_PAD_NAND_CE0_B__RAWNAND_CE0_B 0x0b0b1
+ MX6UL_PAD_NAND_CE1_B__RAWNAND_CE1_B 0x0b0b1
+ MX6UL_PAD_NAND_RE_B__RAWNAND_RE_B 0x0b0b1
+ MX6UL_PAD_NAND_WE_B__RAWNAND_WE_B 0x0b0b1
+ MX6UL_PAD_NAND_DATA00__RAWNAND_DATA00 0x0b0b1
+ MX6UL_PAD_NAND_DATA01__RAWNAND_DATA01 0x0b0b1
+ MX6UL_PAD_NAND_DATA02__RAWNAND_DATA02 0x0b0b1
+ MX6UL_PAD_NAND_DATA03__RAWNAND_DATA03 0x0b0b1
+ MX6UL_PAD_NAND_DATA04__RAWNAND_DATA04 0x0b0b1
+ MX6UL_PAD_NAND_DATA05__RAWNAND_DATA05 0x0b0b1
+ MX6UL_PAD_NAND_DATA06__RAWNAND_DATA06 0x0b0b1
+ MX6UL_PAD_NAND_DATA07__RAWNAND_DATA07 0x0b0b1
+ >;
+ };
+
+ pinctrl_reg_vqmmc: usdhc1regvqmmc {
+ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO05__GPIO1_IO05 0x17059
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX6UL_PAD_NAND_RE_B__USDHC2_CLK 0x10069
+ MX6UL_PAD_NAND_WE_B__USDHC2_CMD 0x17059
+ MX6UL_PAD_NAND_DATA00__USDHC2_DATA0 0x17059
+ MX6UL_PAD_NAND_DATA01__USDHC2_DATA1 0x17059
+ MX6UL_PAD_NAND_DATA02__USDHC2_DATA2 0x17059
+ MX6UL_PAD_NAND_DATA03__USDHC2_DATA3 0x17059
+ MX6UL_PAD_NAND_DATA04__USDHC2_DATA4 0x17059
+ MX6UL_PAD_NAND_DATA05__USDHC2_DATA5 0x17059
+ MX6UL_PAD_NAND_DATA06__USDHC2_DATA6 0x17059
+ MX6UL_PAD_NAND_DATA07__USDHC2_DATA7 0x17059
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2grp100mhz {
+ fsl,pins = <
+ MX6UL_PAD_NAND_RE_B__USDHC2_CLK 0x100b9
+ MX6UL_PAD_NAND_WE_B__USDHC2_CMD 0x170b9
+ MX6UL_PAD_NAND_DATA00__USDHC2_DATA0 0x170b9
+ MX6UL_PAD_NAND_DATA01__USDHC2_DATA1 0x170b9
+ MX6UL_PAD_NAND_DATA02__USDHC2_DATA2 0x170b9
+ MX6UL_PAD_NAND_DATA03__USDHC2_DATA3 0x170b9
+ MX6UL_PAD_NAND_DATA04__USDHC2_DATA4 0x170b9
+ MX6UL_PAD_NAND_DATA05__USDHC2_DATA5 0x170b9
+ MX6UL_PAD_NAND_DATA06__USDHC2_DATA6 0x170b9
+ MX6UL_PAD_NAND_DATA07__USDHC2_DATA7 0x170b9
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2grp200mhz {
+ fsl,pins = <
+ MX6UL_PAD_NAND_RE_B__USDHC2_CLK 0x100f9
+ MX6UL_PAD_NAND_WE_B__USDHC2_CMD 0x170f9
+ MX6UL_PAD_NAND_DATA00__USDHC2_DATA0 0x170f9
+ MX6UL_PAD_NAND_DATA01__USDHC2_DATA1 0x170f9
+ MX6UL_PAD_NAND_DATA02__USDHC2_DATA2 0x170f9
+ MX6UL_PAD_NAND_DATA03__USDHC2_DATA3 0x170f9
+ MX6UL_PAD_NAND_DATA04__USDHC2_DATA4 0x170f9
+ MX6UL_PAD_NAND_DATA05__USDHC2_DATA5 0x170f9
+ MX6UL_PAD_NAND_DATA06__USDHC2_DATA6 0x170f9
+ MX6UL_PAD_NAND_DATA07__USDHC2_DATA7 0x170f9
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-common.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-common.dtsi
index 3fdece5bd31f..5248a058230c 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-common.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-common.dtsi
@@ -805,6 +805,7 @@
&pinctrl_usb_pwr>;
dr_mode = "host";
power-active-high;
+ over-current-active-low;
disable-over-current;
status = "okay";
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-master.dts b/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-master.dts
index 67007ce383e3..f9bbd589b66d 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-master.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-master.dts
@@ -45,7 +45,7 @@
interrupts = <19 IRQ_TYPE_EDGE_RISING>;
spi-cpha;
spi-cpol;
- spi-max-frequency = <16000000>;
+ spi-max-frequency = <12000000>;
};
};
@@ -63,7 +63,7 @@
interrupts = <9 IRQ_TYPE_EDGE_RISING>;
spi-cpha;
spi-cpol;
- spi-max-frequency = <16000000>;
+ spi-max-frequency = <12000000>;
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-slave.dts b/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-slave.dts
index cee223b5f8e1..ef06619d7c86 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-slave.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-slave.dts
@@ -23,7 +23,7 @@
interrupts = <19 IRQ_TYPE_EDGE_RISING>;
spi-cpha;
spi-cpol;
- spi-max-frequency = <16000000>;
+ spi-max-frequency = <12000000>;
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-slavext.dts b/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-slavext.dts
index 7fd53b7a4372..83db65bf630f 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-slavext.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-slavext.dts
@@ -45,7 +45,7 @@
interrupts = <19 IRQ_TYPE_EDGE_RISING>;
spi-cpha;
spi-cpol;
- spi-max-frequency = <16000000>;
+ spi-max-frequency = <12000000>;
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ull-uti260b.dts b/arch/arm/boot/dts/nxp/imx/imx6ull-uti260b.dts
new file mode 100644
index 000000000000..e4576d509a5b
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/imx/imx6ull-uti260b.dts
@@ -0,0 +1,566 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+// Copyright (C) 2022-2024 Sebastian Reichel <sre@kernel.org>
+
+/dts-v1/;
+#include "imx6ull.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/clock/imx6ul-clock.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+ model = "UNI-T UTi260B Thermal Camera";
+ compatible = "uni-t,uti260b", "fsl,imx6ull";
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x20000000>;
+ };
+
+ panel_backlight: backlight {
+ compatible = "pwm-backlight";
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <6>;
+ enable-gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mux_backlight_enable>;
+ power-supply = <&reg_vsd>;
+ pwms = <&pwm1 0 50000 0>;
+ };
+
+ battery: battery {
+ compatible = "simple-battery";
+ /* generic 26650 battery */
+ device-chemistry = "lithium-ion";
+ charge-full-design-microamp-hours = <5000000>;
+ voltage-max-design-microvolt = <4200000>;
+ voltage-min-design-microvolt = <3300000>;
+ };
+
+ tp5000: charger {
+ compatible = "gpio-charger";
+ charger-type = "usb-sdp";
+ gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mux_charger_stat1>;
+ };
+
+ fuel-gauge {
+ compatible = "adc-battery";
+ charged-gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
+ io-channel-names = "voltage";
+ io-channels = <&adc1 7>;
+ monitored-battery = <&battery>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mux_charger_stat2>;
+ power-supplies = <&tp5000>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mux_gpio_keys>;
+ autorepeat;
+
+ up-key {
+ label = "Up";
+ gpios = <&gpio2 11 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_UP>;
+ };
+
+ down-key {
+ label = "Down";
+ gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_DOWN>;
+ };
+
+ left-key {
+ label = "Left";
+ gpios = <&gpio2 13 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_LEFT>;
+ };
+
+ right-key {
+ label = "Right";
+ gpios = <&gpio2 10 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_RIGHT>;
+ };
+
+ ok-key {
+ label = "Ok";
+ gpios = <&gpio2 9 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_ENTER>;
+ };
+
+ return-key {
+ label = "Return";
+ gpios = <&gpio2 15 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_ESC>;
+ };
+
+ play-key {
+ label = "Media";
+ gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_MEDIA>;
+ };
+
+ trigger-key {
+ label = "Trigger";
+ gpios = <&gpio2 14 GPIO_ACTIVE_LOW>;
+ linux,code = <BTN_TRIGGER>;
+ };
+
+ power-key {
+ label = "Power";
+ gpios = <&gpio2 3 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_POWER>;
+ };
+
+ light-key {
+ label = "Light";
+ gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_LIGHTS_TOGGLE>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mux_led_ctrl>;
+
+ led {
+ color = <LED_COLOR_ID_WHITE>;
+ function = LED_FUNCTION_FLASH;
+ gpios = <&gpio2 2 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+ };
+
+ poweroff {
+ compatible = "gpio-poweroff";
+ gpios = <&gpio2 4 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mux_poweroff>;
+ };
+
+ reg_vref: regulator-vref-4v2 {
+ compatible = "regulator-fixed";
+ regulator-name = "VREF_4V2";
+ regulator-min-microvolt = <4200000>;
+ regulator-max-microvolt = <4200000>;
+ };
+
+ reg_vsd: regulator-vsd {
+ compatible = "regulator-fixed";
+ regulator-name = "VSD_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+};
+
+&adc1 {
+ #io-channel-cells = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mux_adc>;
+ vref-supply = <&reg_vref>;
+ status = "okay";
+};
+
+&csi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mux_csi>;
+ status = "okay";
+
+ port {
+ parallel_from_gc0308: endpoint {
+ remote-endpoint = <&gc0308_to_parallel>;
+ };
+ };
+};
+
+&ecspi3 {
+ cs-gpios = <&gpio1 20 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mux_spi3>;
+ status = "okay";
+
+ panel@0 {
+ compatible = "inanbo,t28cp45tn89-v17";
+ reg = <0>;
+ backlight = <&panel_backlight>;
+ power-supply = <&reg_vsd>;
+ spi-cpha;
+ spi-cpol;
+ spi-max-frequency = <1000000>;
+ spi-rx-bus-width = <0>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&display_out>;
+ };
+ };
+ };
+};
+
+&gpio1 {
+ ir-reset-hog {
+ gpio-hog;
+ gpios = <3 GPIO_ACTIVE_LOW>;
+ line-name = "ir-reset-gpio";
+ output-low;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mux_ir_reset>;
+ };
+};
+
+&gpio2 {
+ /* configuring this to output-high results in poweroff */
+ power-en-hog {
+ gpio-hog;
+ gpios = <6 GPIO_ACTIVE_HIGH>;
+ line-name = "power-en-gpio";
+ output-low;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mux_poweroff2>;
+ };
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mux_i2c1>;
+ status = "okay";
+
+ camera@21 {
+ compatible = "galaxycore,gc0308";
+ reg = <0x21>;
+ clocks = <&clks IMX6UL_CLK_CSI>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mux_gc0308>;
+ powerdown-gpios = <&gpio1 5 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
+ vdd28-supply = <&reg_vsd>;
+
+ port {
+ gc0308_to_parallel: endpoint {
+ remote-endpoint = <&parallel_from_gc0308>;
+ bus-width = <8>;
+ data-shift = <2>; /* lines 9:2 are used */
+ hsync-active = <1>; /* active high */
+ vsync-active = <1>; /* active high */
+ data-active = <1>; /* active high */
+ pclk-sample = <1>; /* sample on rising edge */
+ };
+ };
+ };
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mux_i2c2>;
+ status = "okay";
+
+ rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ };
+};
+
+&lcdif {
+ assigned-clocks = <&clks IMX6UL_CLK_LCDIF_PRE_SEL>;
+ assigned-clock-parents = <&clks IMX6UL_CLK_PLL5_VIDEO_DIV>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mux_lcd_data>, <&mux_lcd_ctrl>;
+ status = "okay";
+
+ port {
+ display_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+};
+
+&pwm1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mux_pwm>;
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mux_uart>;
+ status = "okay";
+};
+
+&usbotg1 {
+ /* USB-C connector */
+ disable-over-current;
+ dr_mode = "otg";
+ status = "okay";
+};
+
+&usbotg2 {
+ /* thermal sensor */
+ disable-over-current;
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usbphy1 {
+ fsl,tx-d-cal = <106>;
+};
+
+&usbphy2 {
+ fsl,tx-d-cal = <106>;
+};
+
+&usdhc1 {
+ /* MicroSD */
+ cd-gpios = <&gpio1 19 GPIO_ACTIVE_LOW>;
+ keep-power-in-suspend;
+ no-1-8-v;
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&mux_sdhc1>, <&mux_sdhc1_cd>;
+ pinctrl-1 = <&mux_sdhc1_100mhz>, <&mux_sdhc1_cd>;
+ pinctrl-2 = <&mux_sdhc1_200mhz>, <&mux_sdhc1_cd>;
+ wakeup-source;
+ vmmc-supply = <&reg_vsd>;
+ status = "okay";
+};
+
+&usdhc2 {
+ /* eMMC */
+ keep-power-in-suspend;
+ no-1-8-v;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mux_sdhc2>;
+ wakeup-source;
+ status = "okay";
+};
+
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mux_wdog>;
+};
+
+&iomuxc {
+ mux_adc: adcgrp {
+ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO07__GPIO1_IO07 0xb0
+ >;
+ };
+
+ mux_backlight_enable: blenablegrp {
+ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO09__GPIO1_IO09 0x3008
+ >;
+ };
+
+ mux_charger_stat1: charger1grp {
+ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO01__GPIO1_IO01 0x3008
+ >;
+ };
+
+ mux_charger_stat2: charger2grp {
+ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO02__GPIO1_IO02 0x3008
+ >;
+ };
+
+ mux_csi: csi1grp {
+ fsl,pins = <
+ MX6UL_PAD_CSI_PIXCLK__CSI_PIXCLK 0x1b088
+ MX6UL_PAD_CSI_VSYNC__CSI_VSYNC 0x1b088
+ MX6UL_PAD_CSI_HSYNC__CSI_HSYNC 0x1b088
+ MX6UL_PAD_CSI_DATA00__CSI_DATA02 0x1b088
+ MX6UL_PAD_CSI_DATA01__CSI_DATA03 0x1b088
+ MX6UL_PAD_CSI_DATA02__CSI_DATA04 0x1b088
+ MX6UL_PAD_CSI_DATA03__CSI_DATA05 0x1b088
+ MX6UL_PAD_CSI_DATA04__CSI_DATA06 0x1b088
+ MX6UL_PAD_CSI_DATA05__CSI_DATA07 0x1b088
+ MX6UL_PAD_CSI_DATA06__CSI_DATA08 0x1b088
+ MX6UL_PAD_CSI_DATA07__CSI_DATA09 0x1b088
+ >;
+ };
+
+ mux_gc0308: gc0308grp {
+ fsl,pins = <
+ MX6UL_PAD_CSI_MCLK__CSI_MCLK 0x1e038
+ MX6UL_PAD_GPIO1_IO05__GPIO1_IO05 0x1b088
+ MX6UL_PAD_GPIO1_IO06__GPIO1_IO06 0x1b088
+ >;
+ };
+
+ mux_gpio_keys: gpiokeygrp {
+ fsl,pins = <
+ MX6UL_PAD_ENET2_TX_DATA0__GPIO2_IO11 0x3008
+ MX6UL_PAD_ENET2_TX_DATA1__GPIO2_IO12 0x3008
+ MX6UL_PAD_ENET2_TX_EN__GPIO2_IO13 0x3008
+ MX6UL_PAD_ENET2_RX_EN__GPIO2_IO10 0x3008
+ MX6UL_PAD_ENET2_RX_DATA1__GPIO2_IO09 0x3008
+ MX6UL_PAD_ENET2_RX_ER__GPIO2_IO15 0x3008
+ MX6UL_PAD_ENET2_RX_DATA0__GPIO2_IO08 0x3008
+ MX6UL_PAD_ENET2_TX_CLK__GPIO2_IO14 0x3008
+ MX6UL_PAD_ENET1_TX_DATA0__GPIO2_IO03 0x3008
+ MX6UL_PAD_ENET1_RX_DATA1__GPIO2_IO01 0x3008
+ >;
+ };
+
+ mux_i2c1: i2c1grp {
+ fsl,pins = <
+ MX6UL_PAD_UART4_TX_DATA__I2C1_SCL 0x4001b8b0
+ MX6UL_PAD_UART4_RX_DATA__I2C1_SDA 0x4001b8b0
+ >;
+ };
+
+ mux_i2c2: i2c2grp {
+ fsl,pins = <
+ MX6UL_PAD_UART5_TX_DATA__I2C2_SCL 0x4001f8a8
+ MX6UL_PAD_UART5_RX_DATA__I2C2_SDA 0x4001f8a8
+ >;
+ };
+
+ mux_ir_reset: irresetgrp {
+ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO03__GPIO1_IO03 0x3008
+ >;
+ };
+
+ mux_lcd_ctrl: lcdifctrlgrp {
+ fsl,pins = <
+ MX6UL_PAD_LCD_CLK__LCDIF_CLK 0x79
+ MX6UL_PAD_LCD_ENABLE__LCDIF_ENABLE 0x79
+ MX6UL_PAD_LCD_HSYNC__LCDIF_HSYNC 0x79
+ MX6UL_PAD_LCD_VSYNC__LCDIF_VSYNC 0x79
+ >;
+ };
+
+ mux_lcd_data: lcdifdatgrp {
+ fsl,pins = <
+ MX6UL_PAD_LCD_DATA00__LCDIF_DATA00 0x79
+ MX6UL_PAD_LCD_DATA01__LCDIF_DATA01 0x79
+ MX6UL_PAD_LCD_DATA02__LCDIF_DATA02 0x79
+ MX6UL_PAD_LCD_DATA03__LCDIF_DATA03 0x79
+ MX6UL_PAD_LCD_DATA04__LCDIF_DATA04 0x79
+ MX6UL_PAD_LCD_DATA05__LCDIF_DATA05 0x79
+ MX6UL_PAD_LCD_DATA06__LCDIF_DATA06 0x79
+ MX6UL_PAD_LCD_DATA07__LCDIF_DATA07 0x79
+ MX6UL_PAD_LCD_DATA08__LCDIF_DATA08 0x79
+ MX6UL_PAD_LCD_DATA09__LCDIF_DATA09 0x79
+ MX6UL_PAD_LCD_DATA10__LCDIF_DATA10 0x79
+ MX6UL_PAD_LCD_DATA11__LCDIF_DATA11 0x79
+ MX6UL_PAD_LCD_DATA12__LCDIF_DATA12 0x79
+ MX6UL_PAD_LCD_DATA13__LCDIF_DATA13 0x79
+ MX6UL_PAD_LCD_DATA14__LCDIF_DATA14 0x79
+ MX6UL_PAD_LCD_DATA15__LCDIF_DATA15 0x79
+ MX6UL_PAD_LCD_DATA16__LCDIF_DATA16 0x79
+ MX6UL_PAD_LCD_DATA17__LCDIF_DATA17 0x79
+ >;
+ };
+
+ mux_led_ctrl: ledctrlgrp {
+ fsl,pins = <
+ MX6UL_PAD_ENET1_RX_EN__GPIO2_IO02 0x3008
+ >;
+ };
+
+ mux_poweroff: poweroffgrp {
+ fsl,pins = <
+ MX6UL_PAD_ENET1_TX_DATA1__GPIO2_IO04 0x3008
+ >;
+ };
+
+ mux_poweroff2: poweroff2grp {
+ fsl,pins = <
+ MX6UL_PAD_ENET1_TX_CLK__GPIO2_IO06 0x3008
+ >;
+ };
+
+ mux_pwm: pwm1grp {
+ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO08__PWM1_OUT 0x110b0
+ >;
+ };
+
+ mux_sdhc1: sdhc1grp {
+ fsl,pins = <
+ MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x17059
+ MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x10071
+ MX6UL_PAD_SD1_DATA0__USDHC1_DATA0 0x17059
+ MX6UL_PAD_SD1_DATA1__USDHC1_DATA1 0x17059
+ MX6UL_PAD_SD1_DATA2__USDHC1_DATA2 0x17059
+ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x17059
+ >;
+ };
+
+ mux_sdhc1_100mhz: sdhc1-100mhz-grp {
+ fsl,pins = <
+ MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170b9
+ MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x170b9
+ MX6UL_PAD_SD1_DATA0__USDHC1_DATA0 0x170b9
+ MX6UL_PAD_SD1_DATA1__USDHC1_DATA1 0x170b9
+ MX6UL_PAD_SD1_DATA2__USDHC1_DATA2 0x170b9
+ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x170b9
+ >;
+ };
+
+ mux_sdhc1_200mhz: sdhc1-200mhz-grp {
+ fsl,pins = <
+ MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170f9
+ MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x170f9
+ MX6UL_PAD_SD1_DATA0__USDHC1_DATA0 0x170f9
+ MX6UL_PAD_SD1_DATA1__USDHC1_DATA1 0x170f9
+ MX6UL_PAD_SD1_DATA2__USDHC1_DATA2 0x170f9
+ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x170f9
+ >;
+ };
+
+ mux_sdhc1_cd: sdhc1-cd-grp {
+ fsl,pins = <
+ MX6UL_PAD_UART1_RTS_B__GPIO1_IO19 0x17059
+ >;
+ };
+
+ mux_sdhc2: sdhc2grp {
+ fsl,pins = <
+ MX6UL_PAD_NAND_RE_B__USDHC2_CLK 0x10069
+ MX6UL_PAD_NAND_WE_B__USDHC2_CMD 0x17059
+ MX6UL_PAD_NAND_DATA00__USDHC2_DATA0 0x17059
+ MX6UL_PAD_NAND_DATA01__USDHC2_DATA1 0x17059
+ MX6UL_PAD_NAND_DATA02__USDHC2_DATA2 0x17059
+ MX6UL_PAD_NAND_DATA03__USDHC2_DATA3 0x17059
+ MX6UL_PAD_NAND_DATA04__USDHC2_DATA4 0x17059
+ MX6UL_PAD_NAND_DATA05__USDHC2_DATA5 0x17059
+ MX6UL_PAD_NAND_DATA06__USDHC2_DATA6 0x17059
+ MX6UL_PAD_NAND_DATA07__USDHC2_DATA7 0x17059
+ >;
+ };
+
+ mux_spi3: ecspi3grp {
+ fsl,pins = <
+ MX6UL_PAD_UART2_CTS_B__ECSPI3_MOSI 0x100b1
+ MX6UL_PAD_UART2_RX_DATA__ECSPI3_SCLK 0x100b1
+ MX6UL_PAD_UART2_TX_DATA__GPIO1_IO20 0x3008
+ >;
+ };
+
+ mux_uart: uartgrp {
+ fsl,pins = <
+ MX6UL_PAD_UART1_TX_DATA__UART1_DCE_TX 0x1b0b1
+ MX6UL_PAD_UART1_RX_DATA__UART1_DCE_RX 0x1b0b1
+ >;
+ };
+
+ mux_wdog: wdoggrp {
+ fsl,pins = <
+ MX6UL_PAD_LCD_RESET__WDOG1_WDOG_ANY 0x30b0
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi b/arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi
index 1235a71c6abe..52869e68f833 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi
@@ -666,7 +666,7 @@
bus-width = <4>;
no-1-8-v;
no-sdio;
- no-emmc;
+ no-mmc;
status = "okay";
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts b/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts
index ba7231b364bb..7bab113ca6da 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts
@@ -210,6 +210,7 @@
remote-endpoint = <&mipi_from_sensor>;
clock-lanes = <0>;
data-lanes = <1>;
+ link-frequencies = /bits/ 64 <330000000>;
};
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx7s.dtsi b/arch/arm/boot/dts/nxp/imx/imx7s.dtsi
index 9c81c6baa2d3..22dd72499ef2 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7s.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx7s.dtsi
@@ -636,6 +636,15 @@
clock-names = "snvs-rtc";
};
+ snvs_poweroff: snvs-poweroff {
+ compatible = "syscon-poweroff";
+ regmap = <&snvs>;
+ offset = <0x38>;
+ value = <0x60>;
+ mask = <0x60>;
+ status = "disabled";
+ };
+
snvs_pwrkey: snvs-powerkey {
compatible = "fsl,sec-v4.0-pwrkey";
regmap = <&snvs>;
diff --git a/arch/arm/boot/dts/qcom/Makefile b/arch/arm/boot/dts/qcom/Makefile
index 6478a39b3be5..e2e922bdc9e9 100644
--- a/arch/arm/boot/dts/qcom/Makefile
+++ b/arch/arm/boot/dts/qcom/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_QCOM) += \
+ msm8226-motorola-falcon.dtb \
qcom-apq8016-sbc.dtb \
qcom-apq8026-asus-sparrow.dtb \
qcom-apq8026-huawei-sturgeon.dtb \
@@ -45,7 +46,9 @@ dtb-$(CONFIG_ARCH_QCOM) += \
qcom-msm8974pro-fairphone-fp2.dtb \
qcom-msm8974pro-oneplus-bacon.dtb \
qcom-msm8974pro-samsung-klte.dtb \
+ qcom-msm8974pro-samsung-kltechn.dtb \
qcom-msm8974pro-sony-xperia-shinano-castor.dtb \
+ qcom-msm8974pro-sony-xperia-shinano-leo.dtb \
qcom-mdm9615-wp8548-mangoh-green.dtb \
qcom-sdx55-mtp.dtb \
qcom-sdx55-t55.dtb \
diff --git a/arch/arm/boot/dts/qcom/msm8226-motorola-falcon.dts b/arch/arm/boot/dts/qcom/msm8226-motorola-falcon.dts
new file mode 100644
index 000000000000..029e1b1659c9
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8226-motorola-falcon.dts
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: BSD-3-Clause
+
+/dts-v1/;
+
+#include "qcom-msm8226.dtsi"
+#include "pm8226.dtsi"
+
+/delete-node/ &smem_region;
+
+/ {
+ model = "Motorola Moto G (2013)";
+ compatible = "motorola,falcon", "qcom,msm8226";
+ chassis-type = "handset";
+
+ aliases {
+ mmc0 = &sdhc_1;
+ };
+
+ chosen {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ framebuffer@3200000 {
+ compatible = "simple-framebuffer";
+ reg = <0x03200000 0x800000>;
+ width = <720>;
+ height = <1280>;
+ stride = <(720 * 3)>;
+ format = "r8g8b8";
+ vsp-supply = <&reg_lcd_pos>;
+ vsn-supply = <&reg_lcd_neg>;
+ vddio-supply = <&vddio_disp_vreg>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ event-hall-sensor {
+ label = "Hall Effect Sensor";
+ gpios = <&tlmm 51 GPIO_ACTIVE_LOW>;
+ linux,input-type = <EV_SW>;
+ linux,code = <SW_LID>;
+ linux,can-disable;
+ };
+
+ key-volume-up {
+ label = "Volume Up";
+ gpios = <&tlmm 106 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ debounce-interval = <15>;
+ };
+ };
+
+ vddio_disp_vreg: regulator-vddio-disp {
+ compatible = "regulator-fixed";
+ regulator-name = "vddio_disp";
+ gpio = <&tlmm 34 GPIO_ACTIVE_HIGH>;
+ vin-supply = <&pm8226_l8>;
+ startup-delay-us = <300>;
+ enable-active-high;
+ regulator-boot-on;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ framebuffer@3200000 {
+ reg = <0x03200000 0x800000>;
+ no-map;
+ };
+
+ dhob@f500000 {
+ reg = <0x0f500000 0x40000>;
+ no-map;
+ };
+
+ shob@f540000 {
+ reg = <0x0f540000 0x2000>;
+ no-map;
+ };
+
+ smem_region: smem@fa00000 {
+ reg = <0x0fa00000 0x100000>;
+ no-map;
+ };
+
+ /* Actually <0x0fa00000 0x500000>, but first 100000 is smem */
+ reserved@fb00000 {
+ reg = <0x0fb00000 0x400000>;
+ no-map;
+ };
+ };
+};
+
+&blsp1_i2c3 {
+ status = "okay";
+
+ regulator@3e {
+ compatible = "ti,tps65132";
+ reg = <0x3e>;
+ pinctrl-0 = <&reg_lcd_default>;
+ pinctrl-names = "default";
+
+ reg_lcd_pos: outp {
+ regulator-name = "outp";
+ regulator-min-microvolt = <4000000>;
+ regulator-max-microvolt = <6000000>;
+ regulator-active-discharge = <1>;
+ regulator-boot-on;
+ enable-gpios = <&tlmm 31 GPIO_ACTIVE_HIGH>;
+ };
+
+ reg_lcd_neg: outn {
+ regulator-name = "outn";
+ regulator-min-microvolt = <4000000>;
+ regulator-max-microvolt = <6000000>;
+ regulator-active-discharge = <1>;
+ regulator-boot-on;
+ enable-gpios = <&tlmm 33 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ temperature-sensor@48 {
+ compatible = "ti,tmp108";
+ reg = <0x48>;
+ interrupts-extended = <&tlmm 13 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-0 = <&temp_alert_default>;
+ pinctrl-names = "default";
+ #thermal-sensor-cells = <0>;
+ };
+};
+
+&pm8226_resin {
+ linux,code = <KEY_VOLUMEDOWN>;
+ status = "okay";
+};
+
+&pm8226_vib {
+ status = "okay";
+};
+
+&rpm_requests {
+ regulators {
+ compatible = "qcom,rpm-pm8226-regulators";
+
+ pm8226_s3: s3 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ pm8226_s4: s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2200000>;
+ };
+
+ pm8226_s5: s5 {
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+ };
+
+ pm8226_l1: l1 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ pm8226_l2: l2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ pm8226_l3: l3 {
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1337500>;
+ };
+
+ pm8226_l4: l4 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ pm8226_l5: l5 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ pm8226_l6: l6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-allow-set-load;
+ };
+
+ pm8226_l7: l7 {
+ regulator-min-microvolt = <1850000>;
+ regulator-max-microvolt = <1850000>;
+ };
+
+ pm8226_l8: l8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8226_l9: l9 {
+ regulator-min-microvolt = <2050000>;
+ regulator-max-microvolt = <2050000>;
+ };
+
+ pm8226_l10: l10 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8226_l12: l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8226_l14: l14 {
+ regulator-min-microvolt = <2750000>;
+ regulator-max-microvolt = <2750000>;
+ };
+
+ pm8226_l15: l15 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+
+ pm8226_l16: l16 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3350000>;
+ };
+
+ pm8226_l17: l17 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pm8226_l18: l18 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pm8226_l19: l19 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+
+ pm8226_l20: l20 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+ };
+
+ pm8226_l21: l21 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-allow-set-load;
+ };
+
+ pm8226_l22: l22 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pm8226_l23: l23 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pm8226_l24: l24 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1350000>;
+ };
+
+ pm8226_l25: l25 {
+ regulator-min-microvolt = <1775000>;
+ regulator-max-microvolt = <2125000>;
+ };
+
+ pm8226_l26: l26 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ pm8226_l27: l27 {
+ regulator-min-microvolt = <2050000>;
+ regulator-max-microvolt = <2050000>;
+ };
+
+ pm8226_l28: l28 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-boot-on;
+ };
+
+ pm8226_lvs1: lvs1 {
+ regulator-always-on;
+ };
+ };
+};
+
+&sdhc_1 {
+ vmmc-supply = <&pm8226_l17>;
+ vqmmc-supply = <&pm8226_l6>;
+
+ bus-width = <8>;
+ non-removable;
+
+ status = "okay";
+};
+
+&smbb {
+ qcom,fast-charge-safe-current = <2000000>;
+ qcom,fast-charge-current-limit = <1900000>;
+ qcom,fast-charge-safe-voltage = <4400000>;
+ qcom,minimum-input-voltage = <4300000>;
+
+ status = "okay";
+};
+
+&tlmm {
+ reg_lcd_default: reg-lcd-default-state {
+ pins = "gpio31", "gpio33";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+
+ reg_vddio_disp_default: reg-vddio-disp-default-state {
+ pins = "gpio34";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+
+ temp_alert_default: temp-alert-default-state {
+ pins = "gpio13";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-disable;
+ };
+};
+
+&usb {
+ extcon = <&smbb>;
+ dr_mode = "peripheral";
+ status = "okay";
+};
+
+&usb_hs_phy {
+ extcon = <&smbb>;
+ v1p8-supply = <&pm8226_l10>;
+ v3p3-supply = <&pm8226_l20>;
+};
diff --git a/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi
index 9a5ba978775a..11e60b74c3c9 100644
--- a/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi
@@ -87,7 +87,7 @@
};
idle-states {
- CPU_SPC: spc {
+ CPU_SPC: cpu-spc {
compatible = "qcom,idle-state-spc",
"arm,idle-state";
entry-latency-us = <400>;
@@ -1334,6 +1334,16 @@
<&gcc PCIE_PHY_RESET>;
reset-names = "axi", "ahb", "por", "pci", "phy";
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
hdmi: hdmi-tx@4a00000 {
diff --git a/arch/arm/boot/dts/qcom/qcom-apq8084.dtsi b/arch/arm/boot/dts/qcom/qcom-apq8084.dtsi
index 8204e64d9a97..ca53dff820ef 100644
--- a/arch/arm/boot/dts/qcom/qcom-apq8084.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-apq8084.dtsi
@@ -79,7 +79,7 @@
};
idle-states {
- CPU_SPC: spc {
+ CPU_SPC: cpu-spc {
compatible = "qcom,idle-state-spc",
"arm,idle-state";
entry-latency-us = <150>;
diff --git a/arch/arm/boot/dts/qcom/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom/qcom-ipq4019.dtsi
index 681cb3fc8085..0fb65f2bbcdf 100644
--- a/arch/arm/boot/dts/qcom/qcom-ipq4019.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-ipq4019.dtsi
@@ -470,6 +470,16 @@
"phy_ahb";
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
qpic_bam: dma-controller@7984000 {
@@ -598,24 +608,33 @@
reg = <0x90000 0x64>;
status = "disabled";
- ethphy0: ethernet-phy@0 {
+ ethernet-phy-package@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "qcom,qca8075-package";
reg = <0>;
- };
- ethphy1: ethernet-phy@1 {
- reg = <1>;
- };
+ qcom,tx-drive-strength-milliwatt = <300>;
- ethphy2: ethernet-phy@2 {
- reg = <2>;
- };
+ ethphy0: ethernet-phy@0 {
+ reg = <0>;
+ };
- ethphy3: ethernet-phy@3 {
- reg = <3>;
- };
+ ethphy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+
+ ethphy2: ethernet-phy@2 {
+ reg = <2>;
+ };
+
+ ethphy3: ethernet-phy@3 {
+ reg = <3>;
+ };
- ethphy4: ethernet-phy@4 {
- reg = <4>;
+ ethphy4: ethernet-phy@4 {
+ reg = <4>;
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/qcom-ipq8064.dtsi b/arch/arm/boot/dts/qcom/qcom-ipq8064.dtsi
index 2eb6758b6a3a..f128510d8445 100644
--- a/arch/arm/boot/dts/qcom/qcom-ipq8064.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-ipq8064.dtsi
@@ -1121,6 +1121,16 @@
status = "disabled";
perst-gpios = <&qcom_pinmux 3 GPIO_ACTIVE_LOW>;
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie1: pcie@1b700000 {
@@ -1172,6 +1182,16 @@
status = "disabled";
perst-gpios = <&qcom_pinmux 48 GPIO_ACTIVE_LOW>;
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie2: pcie@1b900000 {
@@ -1223,6 +1243,16 @@
status = "disabled";
perst-gpios = <&qcom_pinmux 63 GPIO_ACTIVE_LOW>;
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
qsgmii_csr: syscon@1bb00000 {
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8916-smp.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8916-smp.dtsi
index 36328dbe4212..1ba403b83cb1 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8916-smp.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-msm8916-smp.dtsi
@@ -26,7 +26,7 @@
};
&CPU_SLEEP_0 {
- compatible = "qcom,idle-state-spc";
+ compatible = "qcom,idle-state-spc", "arm,idle-state";
};
&cpu0_acc {
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi
index 5efc38d712cc..5651bb31bd54 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi
@@ -14,6 +14,8 @@
#size-cells = <1>;
interrupt-parent = <&intc>;
+ chosen { };
+
clocks {
xo_board: xo_board {
compatible = "fixed-clock";
@@ -85,7 +87,7 @@
};
idle-states {
- CPU_SPC: spc {
+ CPU_SPC: cpu-spc {
compatible = "qcom,idle-state-spc",
"arm,idle-state";
entry-latency-us = <150>;
@@ -103,7 +105,7 @@
};
};
- memory {
+ memory@0 {
device_type = "memory";
reg = <0x0 0x0>;
};
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-klte-common.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-klte-common.dtsi
new file mode 100644
index 000000000000..b5443fd5b425
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-klte-common.dtsi
@@ -0,0 +1,818 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "qcom-msm8974pro.dtsi"
+#include "pma8084.dtsi"
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+ chassis-type = "handset";
+
+ aliases {
+ serial0 = &blsp1_uart1;
+ mmc0 = &sdhc_1; /* SDC1 eMMC slot */
+ mmc1 = &sdhc_3; /* SDC2 SD card slot */
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_keys_pin_a>;
+
+ key-volume-down {
+ label = "volume_down";
+ gpios = <&pma8084_gpios 2 GPIO_ACTIVE_LOW>;
+ linux,input-type = <1>;
+ linux,code = <KEY_VOLUMEDOWN>;
+ debounce-interval = <15>;
+ };
+
+ key-home {
+ label = "home_key";
+ gpios = <&pma8084_gpios 3 GPIO_ACTIVE_LOW>;
+ linux,input-type = <1>;
+ linux,code = <KEY_HOMEPAGE>;
+ wakeup-source;
+ debounce-interval = <15>;
+ };
+
+ key-volume-up {
+ label = "volume_up";
+ gpios = <&pma8084_gpios 5 GPIO_ACTIVE_LOW>;
+ linux,input-type = <1>;
+ linux,code = <KEY_VOLUMEUP>;
+ debounce-interval = <15>;
+ };
+ };
+
+ i2c-gpio-touchkey {
+ compatible = "i2c-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ sda-gpios = <&tlmm 95 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&tlmm 96 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c_touchkey_pins>;
+
+ touchkey@20 {
+ compatible = "cypress,tm2-touchkey";
+ reg = <0x20>;
+
+ interrupt-parent = <&pma8084_gpios>;
+ interrupts = <6 IRQ_TYPE_EDGE_FALLING>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&touchkey_pin>;
+
+ vcc-supply = <&max77826_ldo15>;
+ vdd-supply = <&pma8084_l19>;
+
+ linux,keycodes = <KEY_APPSELECT KEY_BACK>;
+ };
+ };
+
+ i2c_led_gpio: i2c-gpio-led {
+ compatible = "i2c-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c_led_gpioex_pins>;
+
+ i2c-gpio,delay-us = <2>;
+
+ gpio_expander: gpio@20 {
+ compatible = "nxp,pcal6416";
+ reg = <0x20>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ vcc-supply = <&pma8084_s4>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpioex_pin>;
+
+ reset-gpios = <&tlmm 145 GPIO_ACTIVE_LOW>;
+ };
+
+ led-controller@30 {
+ compatible = "panasonic,an30259a";
+ reg = <0x30>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@1 {
+ reg = <1>;
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_RED>;
+ };
+
+ led@2 {
+ reg = <2>;
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@3 {
+ reg = <3>;
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+ };
+ };
+
+ vreg_wlan: wlan-regulator {
+ compatible = "regulator-fixed";
+
+ regulator-name = "wl-reg";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&gpio_expander 8 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ vreg_panel: panel-regulator {
+ compatible = "regulator-fixed";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&panel_en_pin>;
+
+ regulator-name = "panel-vddr-reg";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+
+ gpio = <&pma8084_gpios 14 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ vreg_vph_pwr: vreg-vph-pwr {
+ compatible = "regulator-fixed";
+ regulator-name = "vph-pwr";
+
+ regulator-min-microvolt = <3600000>;
+ regulator-max-microvolt = <3600000>;
+
+ regulator-always-on;
+ };
+};
+
+&blsp1_i2c2 {
+ status = "okay";
+
+ touchscreen@20 {
+ compatible = "syna,rmi4-i2c";
+ reg = <0x20>;
+
+ interrupt-parent = <&pma8084_gpios>;
+ interrupts = <8 IRQ_TYPE_EDGE_FALLING>;
+
+ vdd-supply = <&max77826_ldo13>;
+ vio-supply = <&pma8084_lvs2>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&touch_pin>;
+
+ syna,startup-delay-ms = <100>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rmi4-f01@1 {
+ reg = <0x1>;
+ syna,nosleep-mode = <1>;
+ };
+
+ rmi4-f12@12 {
+ reg = <0x12>;
+ syna,sensor-type = <1>;
+ };
+ };
+};
+
+&blsp1_i2c6 {
+ status = "okay";
+
+ pmic@60 {
+ reg = <0x60>;
+ compatible = "maxim,max77826";
+
+ regulators {
+ max77826_ldo1: LDO1 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ max77826_ldo2: LDO2 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+
+ max77826_ldo3: LDO3 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ max77826_ldo4: LDO4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ max77826_ldo5: LDO5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ max77826_ldo6: LDO6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ max77826_ldo7: LDO7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ max77826_ldo8: LDO8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ max77826_ldo9: LDO9 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ max77826_ldo10: LDO10 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ max77826_ldo11: LDO11 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ max77826_ldo12: LDO12 {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ max77826_ldo13: LDO13 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ max77826_ldo14: LDO14 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ max77826_ldo15: LDO15 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ max77826_buck: BUCK {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ max77826_buckboost: BUCKBOOST {
+ regulator-min-microvolt = <3400000>;
+ regulator-max-microvolt = <3400000>;
+ };
+ };
+ };
+};
+
+&blsp1_uart2 {
+ status = "okay";
+};
+
+&blsp2_i2c6 {
+ status = "okay";
+
+ fuelgauge@36 {
+ compatible = "maxim,max17048";
+ reg = <0x36>;
+
+ maxim,double-soc;
+ maxim,rcomp = /bits/ 8 <0x56>;
+
+ interrupt-parent = <&pma8084_gpios>;
+ interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&fuelgauge_pin>;
+ };
+};
+
+&blsp2_uart2 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_uart2_pins_active>;
+ pinctrl-1 = <&blsp2_uart2_pins_sleep>;
+
+ bluetooth {
+ compatible = "brcm,bcm43540-bt";
+ max-speed = <3000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_pins>;
+ device-wakeup-gpios = <&tlmm 91 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpio_expander 9 GPIO_ACTIVE_HIGH>;
+ interrupt-parent = <&tlmm>;
+ interrupts = <75 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host-wakeup";
+ };
+};
+
+&gpu {
+ status = "okay";
+};
+
+&mdss {
+ status = "okay";
+};
+
+&mdss_dsi0 {
+ status = "okay";
+
+ vdda-supply = <&pma8084_l2>;
+ vdd-supply = <&pma8084_l22>;
+ vddio-supply = <&pma8084_l12>;
+
+ panel: panel@0 {
+ reg = <0>;
+ compatible = "samsung,s6e3fa2";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&panel_te_pin &panel_rst_pin>;
+
+ iovdd-supply = <&pma8084_lvs4>;
+ vddr-supply = <&vreg_panel>;
+
+ reset-gpios = <&pma8084_gpios 17 GPIO_ACTIVE_LOW>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&mdss_dsi0_out>;
+ };
+ };
+ };
+};
+
+&mdss_dsi0_out {
+ remote-endpoint = <&panel_in>;
+ data-lanes = <0 1 2 3>;
+};
+
+&mdss_dsi0_phy {
+ status = "okay";
+
+ vddio-supply = <&pma8084_l12>;
+};
+
+&pma8084_gpios {
+ gpio_keys_pin_a: gpio-keys-active-state {
+ pins = "gpio2", "gpio3", "gpio5";
+ function = "normal";
+
+ bias-pull-up;
+ power-source = <PMA8084_GPIO_S4>;
+ };
+
+ touchkey_pin: touchkey-int-state {
+ pins = "gpio6";
+ function = "normal";
+ bias-disable;
+ input-enable;
+ power-source = <PMA8084_GPIO_S4>;
+ };
+
+ touch_pin: touchscreen-int-state {
+ pins = "gpio8";
+ function = "normal";
+ bias-disable;
+ input-enable;
+ power-source = <PMA8084_GPIO_S4>;
+ };
+
+ panel_en_pin: panel-en-state {
+ pins = "gpio14";
+ function = "normal";
+ bias-pull-up;
+ power-source = <PMA8084_GPIO_S4>;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
+ };
+
+ wlan_sleep_clk_pin: wlan-sleep-clk-state {
+ pins = "gpio16";
+ function = "func2";
+
+ output-high;
+ power-source = <PMA8084_GPIO_S4>;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_HIGH>;
+ };
+
+ panel_rst_pin: panel-rst-state {
+ pins = "gpio17";
+ function = "normal";
+ bias-disable;
+ power-source = <PMA8084_GPIO_S4>;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
+ };
+
+ fuelgauge_pin: fuelgauge-int-state {
+ pins = "gpio21";
+ function = "normal";
+ bias-disable;
+ input-enable;
+ power-source = <PMA8084_GPIO_S4>;
+ };
+};
+
+&remoteproc_adsp {
+ status = "okay";
+ cx-supply = <&pma8084_s2>;
+};
+
+&remoteproc_mss {
+ status = "okay";
+ cx-supply = <&pma8084_s2>;
+ mss-supply = <&pma8084_s6>;
+ mx-supply = <&pma8084_s1>;
+ pll-supply = <&pma8084_l12>;
+};
+
+&rpm_requests {
+ regulators-0 {
+ compatible = "qcom,rpm-pma8084-regulators";
+
+ pma8084_s1: s1 {
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-always-on;
+ };
+
+ pma8084_s2: s2 {
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1050000>;
+ };
+
+ pma8084_s3: s3 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ pma8084_s4: s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pma8084_s5: s5 {
+ regulator-min-microvolt = <2150000>;
+ regulator-max-microvolt = <2150000>;
+ };
+
+ pma8084_s6: s6 {
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ };
+
+ pma8084_l1: l1 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ pma8084_l2: l2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ pma8084_l3: l3 {
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ pma8084_l4: l4 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ pma8084_l5: l5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pma8084_l6: l6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pma8084_l7: l7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pma8084_l8: l8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pma8084_l9: l9 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pma8084_l10: l10 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pma8084_l11: l11 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ pma8084_l12: l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ pma8084_l13: l13 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pma8084_l14: l14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pma8084_l15: l15 {
+ regulator-min-microvolt = <2050000>;
+ regulator-max-microvolt = <2050000>;
+ };
+
+ pma8084_l16: l16 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ };
+
+ pma8084_l17: l17 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+
+ pma8084_l18: l18 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+
+ pma8084_l19: l19 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ pma8084_l20: l20 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-system-load = <200000>;
+ regulator-allow-set-load;
+ };
+
+ pma8084_l21: l21 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-system-load = <200000>;
+ regulator-allow-set-load;
+ };
+
+ pma8084_l22: l22 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ pma8084_l23: l23 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+
+ pma8084_l24: l24 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+ };
+
+ pma8084_l25: l25 {
+ regulator-min-microvolt = <2100000>;
+ regulator-max-microvolt = <2100000>;
+ };
+
+ pma8084_l26: l26 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2050000>;
+ };
+
+ pma8084_l27: l27 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ pma8084_lvs1: lvs1 {};
+ pma8084_lvs2: lvs2 {};
+ pma8084_lvs3: lvs3 {};
+ pma8084_lvs4: lvs4 {};
+
+ pma8084_5vs1: 5vs1 {};
+ };
+};
+
+&sdhc_1 {
+ status = "okay";
+
+ vmmc-supply = <&pma8084_l20>;
+ vqmmc-supply = <&pma8084_s4>;
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc1_on>;
+ pinctrl-1 = <&sdc1_off>;
+};
+
+&sdhc_2 {
+ status = "okay";
+ max-frequency = <100000000>;
+ vmmc-supply = <&vreg_wlan>;
+ vqmmc-supply = <&pma8084_s4>;
+ non-removable;
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc2_on>;
+ pinctrl-1 = <&sdc2_off>;
+
+ wifi@1 {
+ reg = <1>;
+ compatible = "brcm,bcm4329-fmac";
+
+ /*
+ * Allow all klte* variants to load the same NVRAM file,
+ * as they have little difference in the WiFi part.
+ */
+ brcm,board-type = "samsung,klte";
+
+ interrupt-parent = <&tlmm>;
+ interrupts = <92 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host-wake";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_sleep_clk_pin &wifi_pin>;
+ };
+};
+
+&sdhc_3 {
+ status = "okay";
+ max-frequency = <100000000>;
+ vmmc-supply = <&pma8084_l21>;
+ vqmmc-supply = <&pma8084_l13>;
+
+ /*
+ * cd-gpio is intentionally disabled. If enabled, an SD card
+ * present during boot is not initialized correctly. Without
+ * cd-gpios the driver resorts to polling, so hotplug works.
+ */
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdc3_on /* &sdhc3_cd_pin */>;
+ /* cd-gpios = <&tlmm 62 GPIO_ACTIVE_LOW>; */
+};
+
+&tlmm {
+ /* This seems suspicious, but somebody with this device should look into it. */
+ blsp2_uart2_pins_active: blsp2-uart2-pins-active-state {
+ pins = "gpio45", "gpio46", "gpio47", "gpio48";
+ function = "blsp_uart8";
+ drive-strength = <8>;
+ bias-disable;
+ };
+
+ blsp2_uart2_pins_sleep: blsp2-uart2-pins-sleep-state {
+ pins = "gpio45", "gpio46", "gpio47", "gpio48";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ bt_pins: bt-pins-state {
+ hostwake-pins {
+ pins = "gpio75";
+ function = "gpio";
+ drive-strength = <16>;
+ };
+
+ devwake-pins {
+ pins = "gpio91";
+ function = "gpio";
+ drive-strength = <2>;
+ };
+ };
+
+ sdc1_on: sdhc1-on-state {
+ clk-pins {
+ pins = "sdc1_clk";
+ drive-strength = <4>;
+ bias-disable;
+ };
+
+ cmd-data-pins {
+ pins = "sdc1_cmd", "sdc1_data";
+ drive-strength = <4>;
+ bias-pull-up;
+ };
+ };
+
+ sdc3_on: sdc3-on-state {
+ pins = "gpio35", "gpio36", "gpio37", "gpio38", "gpio39", "gpio40";
+ function = "sdc3";
+ drive-strength = <8>;
+ bias-disable;
+ };
+
+ sdhc3_cd_pin: sdc3-cd-on-state {
+ pins = "gpio62";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ sdc2_on: sdhc2-on-state {
+ clk-pins {
+ pins = "sdc2_clk";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ cmd-data-pins {
+ pins = "sdc2_cmd", "sdc2_data";
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+ };
+
+ i2c_touchkey_pins: i2c-touchkey-state {
+ pins = "gpio95", "gpio96";
+ function = "gpio";
+ bias-pull-up;
+ };
+
+ i2c_led_gpioex_pins: i2c-led-gpioex-state {
+ function = "gpio";
+ bias-pull-down;
+ };
+
+ gpioex_pin: gpioex-state {
+ pins = "gpio145";
+ function = "gpio";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ wifi_pin: wifi-state {
+ pins = "gpio92";
+ function = "gpio";
+ bias-pull-down;
+ };
+
+ panel_te_pin: panel-state {
+ pins = "gpio12";
+ function = "mdp_vsync";
+ drive-strength = <2>;
+ bias-disable;
+ };
+};
+
+&usb {
+ status = "okay";
+
+ phys = <&usb_hs1_phy>;
+ phy-select = <&tcsr 0xb000 0>;
+
+ hnp-disable;
+ srp-disable;
+ adp-disable;
+};
+
+&usb_hs1_phy {
+ status = "okay";
+
+ v1p8-supply = <&pma8084_l6>;
+ v3p3-supply = <&pma8084_l24>;
+
+ qcom,init-seq = /bits/ 8 <0x1 0x64>;
+};
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-klte.dts b/arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-klte.dts
index b93539e2b87e..954665f3a9dd 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-klte.dts
+++ b/arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-klte.dts
@@ -1,817 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
-#include "qcom-msm8974pro.dtsi"
-#include "pma8084.dtsi"
-#include <dt-bindings/input/input.h>
-#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
-#include <dt-bindings/leds/common.h>
+#include "qcom-msm8974pro-samsung-klte-common.dtsi"
/ {
model = "Samsung Galaxy S5";
compatible = "samsung,klte", "qcom,msm8974pro", "qcom,msm8974";
- chassis-type = "handset";
-
- aliases {
- serial0 = &blsp1_uart1;
- mmc0 = &sdhc_1; /* SDC1 eMMC slot */
- mmc1 = &sdhc_3; /* SDC2 SD card slot */
- };
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
-
- gpio-keys {
- compatible = "gpio-keys";
-
- pinctrl-names = "default";
- pinctrl-0 = <&gpio_keys_pin_a>;
-
- key-volume-down {
- label = "volume_down";
- gpios = <&pma8084_gpios 2 GPIO_ACTIVE_LOW>;
- linux,input-type = <1>;
- linux,code = <KEY_VOLUMEDOWN>;
- debounce-interval = <15>;
- };
-
- key-home {
- label = "home_key";
- gpios = <&pma8084_gpios 3 GPIO_ACTIVE_LOW>;
- linux,input-type = <1>;
- linux,code = <KEY_HOMEPAGE>;
- wakeup-source;
- debounce-interval = <15>;
- };
-
- key-volume-up {
- label = "volume_up";
- gpios = <&pma8084_gpios 5 GPIO_ACTIVE_LOW>;
- linux,input-type = <1>;
- linux,code = <KEY_VOLUMEUP>;
- debounce-interval = <15>;
- };
- };
-
- i2c-gpio-touchkey {
- compatible = "i2c-gpio";
- #address-cells = <1>;
- #size-cells = <0>;
- sda-gpios = <&tlmm 95 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
- scl-gpios = <&tlmm 96 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2c_touchkey_pins>;
-
- touchkey@20 {
- compatible = "cypress,tm2-touchkey";
- reg = <0x20>;
-
- interrupt-parent = <&pma8084_gpios>;
- interrupts = <6 IRQ_TYPE_EDGE_FALLING>;
- pinctrl-names = "default";
- pinctrl-0 = <&touchkey_pin>;
-
- vcc-supply = <&max77826_ldo15>;
- vdd-supply = <&pma8084_l19>;
-
- linux,keycodes = <KEY_APPSELECT KEY_BACK>;
- };
- };
-
- i2c-gpio-led {
- compatible = "i2c-gpio";
- #address-cells = <1>;
- #size-cells = <0>;
- scl-gpios = <&tlmm 121 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
- sda-gpios = <&tlmm 120 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2c_led_gpioex_pins>;
-
- i2c-gpio,delay-us = <2>;
-
- gpio_expander: gpio@20 {
- compatible = "nxp,pcal6416";
- reg = <0x20>;
-
- gpio-controller;
- #gpio-cells = <2>;
-
- vcc-supply = <&pma8084_s4>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&gpioex_pin>;
-
- reset-gpios = <&tlmm 145 GPIO_ACTIVE_LOW>;
- };
-
- led-controller@30 {
- compatible = "panasonic,an30259a";
- reg = <0x30>;
-
- #address-cells = <1>;
- #size-cells = <0>;
-
- led@1 {
- reg = <1>;
- function = LED_FUNCTION_STATUS;
- color = <LED_COLOR_ID_RED>;
- };
-
- led@2 {
- reg = <2>;
- function = LED_FUNCTION_STATUS;
- color = <LED_COLOR_ID_GREEN>;
- };
-
- led@3 {
- reg = <3>;
- function = LED_FUNCTION_STATUS;
- color = <LED_COLOR_ID_BLUE>;
- };
- };
- };
-
- vreg_wlan: wlan-regulator {
- compatible = "regulator-fixed";
-
- regulator-name = "wl-reg";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
-
- gpio = <&gpio_expander 8 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- };
-
- vreg_panel: panel-regulator {
- compatible = "regulator-fixed";
-
- pinctrl-names = "default";
- pinctrl-0 = <&panel_en_pin>;
-
- regulator-name = "panel-vddr-reg";
- regulator-min-microvolt = <1500000>;
- regulator-max-microvolt = <1500000>;
-
- gpio = <&pma8084_gpios 14 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- };
-
- vreg_vph_pwr: vreg-vph-pwr {
- compatible = "regulator-fixed";
- regulator-name = "vph-pwr";
-
- regulator-min-microvolt = <3600000>;
- regulator-max-microvolt = <3600000>;
-
- regulator-always-on;
- };
-};
-
-&blsp1_i2c2 {
- status = "okay";
-
- touchscreen@20 {
- compatible = "syna,rmi4-i2c";
- reg = <0x20>;
-
- interrupt-parent = <&pma8084_gpios>;
- interrupts = <8 IRQ_TYPE_EDGE_FALLING>;
-
- vdd-supply = <&max77826_ldo13>;
- vio-supply = <&pma8084_lvs2>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&touch_pin>;
-
- syna,startup-delay-ms = <100>;
-
- #address-cells = <1>;
- #size-cells = <0>;
-
- rmi4-f01@1 {
- reg = <0x1>;
- syna,nosleep-mode = <1>;
- };
-
- rmi4-f12@12 {
- reg = <0x12>;
- syna,sensor-type = <1>;
- };
- };
-};
-
-&blsp1_i2c6 {
- status = "okay";
-
- pmic@60 {
- reg = <0x60>;
- compatible = "maxim,max77826";
-
- regulators {
- max77826_ldo1: LDO1 {
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- };
-
- max77826_ldo2: LDO2 {
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <1000000>;
- };
-
- max77826_ldo3: LDO3 {
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- };
-
- max77826_ldo4: LDO4 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
- max77826_ldo5: LDO5 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
- max77826_ldo6: LDO6 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3300000>;
- };
-
- max77826_ldo7: LDO7 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
- max77826_ldo8: LDO8 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3300000>;
- };
-
- max77826_ldo9: LDO9 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
- max77826_ldo10: LDO10 {
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <2950000>;
- };
-
- max77826_ldo11: LDO11 {
- regulator-min-microvolt = <2700000>;
- regulator-max-microvolt = <2950000>;
- };
-
- max77826_ldo12: LDO12 {
- regulator-min-microvolt = <2500000>;
- regulator-max-microvolt = <3300000>;
- };
-
- max77826_ldo13: LDO13 {
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- };
-
- max77826_ldo14: LDO14 {
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- };
-
- max77826_ldo15: LDO15 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
- max77826_buck: BUCK {
- regulator-min-microvolt = <1225000>;
- regulator-max-microvolt = <1225000>;
- };
-
- max77826_buckboost: BUCKBOOST {
- regulator-min-microvolt = <3400000>;
- regulator-max-microvolt = <3400000>;
- };
- };
- };
-};
-
-&blsp1_uart2 {
- status = "okay";
-};
-
-&blsp2_i2c6 {
- status = "okay";
-
- fuelgauge@36 {
- compatible = "maxim,max17048";
- reg = <0x36>;
-
- maxim,double-soc;
- maxim,rcomp = /bits/ 8 <0x56>;
-
- interrupt-parent = <&pma8084_gpios>;
- interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&fuelgauge_pin>;
- };
-};
-
-&blsp2_uart2 {
- status = "okay";
-
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&blsp2_uart2_pins_active>;
- pinctrl-1 = <&blsp2_uart2_pins_sleep>;
-
- bluetooth {
- compatible = "brcm,bcm43540-bt";
- max-speed = <3000000>;
- pinctrl-names = "default";
- pinctrl-0 = <&bt_pins>;
- device-wakeup-gpios = <&tlmm 91 GPIO_ACTIVE_HIGH>;
- shutdown-gpios = <&gpio_expander 9 GPIO_ACTIVE_HIGH>;
- interrupt-parent = <&tlmm>;
- interrupts = <75 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "host-wakeup";
- };
-};
-
-&gpu {
- status = "okay";
-};
-
-&mdss {
- status = "okay";
-};
-
-&mdss_dsi0 {
- status = "okay";
-
- vdda-supply = <&pma8084_l2>;
- vdd-supply = <&pma8084_l22>;
- vddio-supply = <&pma8084_l12>;
-
- panel: panel@0 {
- reg = <0>;
- compatible = "samsung,s6e3fa2";
-
- pinctrl-names = "default";
- pinctrl-0 = <&panel_te_pin &panel_rst_pin>;
-
- iovdd-supply = <&pma8084_lvs4>;
- vddr-supply = <&vreg_panel>;
-
- reset-gpios = <&pma8084_gpios 17 GPIO_ACTIVE_LOW>;
-
- port {
- panel_in: endpoint {
- remote-endpoint = <&mdss_dsi0_out>;
- };
- };
- };
-};
-
-&mdss_dsi0_out {
- remote-endpoint = <&panel_in>;
- data-lanes = <0 1 2 3>;
-};
-
-&mdss_dsi0_phy {
- status = "okay";
-
- vddio-supply = <&pma8084_l12>;
};
-&pma8084_gpios {
- gpio_keys_pin_a: gpio-keys-active-state {
- pins = "gpio2", "gpio3", "gpio5";
- function = "normal";
-
- bias-pull-up;
- power-source = <PMA8084_GPIO_S4>;
- };
-
- touchkey_pin: touchkey-int-state {
- pins = "gpio6";
- function = "normal";
- bias-disable;
- input-enable;
- power-source = <PMA8084_GPIO_S4>;
- };
-
- touch_pin: touchscreen-int-state {
- pins = "gpio8";
- function = "normal";
- bias-disable;
- input-enable;
- power-source = <PMA8084_GPIO_S4>;
- };
-
- panel_en_pin: panel-en-state {
- pins = "gpio14";
- function = "normal";
- bias-pull-up;
- power-source = <PMA8084_GPIO_S4>;
- qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
- };
-
- wlan_sleep_clk_pin: wlan-sleep-clk-state {
- pins = "gpio16";
- function = "func2";
-
- output-high;
- power-source = <PMA8084_GPIO_S4>;
- qcom,drive-strength = <PMIC_GPIO_STRENGTH_HIGH>;
- };
-
- panel_rst_pin: panel-rst-state {
- pins = "gpio17";
- function = "normal";
- bias-disable;
- power-source = <PMA8084_GPIO_S4>;
- qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
- };
-
- fuelgauge_pin: fuelgauge-int-state {
- pins = "gpio21";
- function = "normal";
- bias-disable;
- input-enable;
- power-source = <PMA8084_GPIO_S4>;
- };
+&i2c_led_gpio {
+ scl-gpios = <&tlmm 121 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&tlmm 120 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
};
-&remoteproc_adsp {
- status = "okay";
- cx-supply = <&pma8084_s2>;
-};
-
-&remoteproc_mss {
- status = "okay";
- cx-supply = <&pma8084_s2>;
- mss-supply = <&pma8084_s6>;
- mx-supply = <&pma8084_s1>;
- pll-supply = <&pma8084_l12>;
-};
-
-&rpm_requests {
- regulators-0 {
- compatible = "qcom,rpm-pma8084-regulators";
-
- pma8084_s1: s1 {
- regulator-min-microvolt = <675000>;
- regulator-max-microvolt = <1050000>;
- regulator-always-on;
- };
-
- pma8084_s2: s2 {
- regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <1050000>;
- };
-
- pma8084_s3: s3 {
- regulator-min-microvolt = <1300000>;
- regulator-max-microvolt = <1300000>;
- };
-
- pma8084_s4: s4 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
- pma8084_s5: s5 {
- regulator-min-microvolt = <2150000>;
- regulator-max-microvolt = <2150000>;
- };
-
- pma8084_s6: s6 {
- regulator-min-microvolt = <1050000>;
- regulator-max-microvolt = <1050000>;
- };
-
- pma8084_l1: l1 {
- regulator-min-microvolt = <1225000>;
- regulator-max-microvolt = <1225000>;
- };
-
- pma8084_l2: l2 {
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- };
-
- pma8084_l3: l3 {
- regulator-min-microvolt = <1050000>;
- regulator-max-microvolt = <1200000>;
- };
-
- pma8084_l4: l4 {
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1225000>;
- };
-
- pma8084_l5: l5 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
- pma8084_l6: l6 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
- pma8084_l7: l7 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
- pma8084_l8: l8 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
- pma8084_l9: l9 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <2950000>;
- };
-
- pma8084_l10: l10 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <2950000>;
- };
-
- pma8084_l11: l11 {
- regulator-min-microvolt = <1300000>;
- regulator-max-microvolt = <1300000>;
- };
-
- pma8084_l12: l12 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- };
-
- pma8084_l13: l13 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <2950000>;
- };
-
- pma8084_l14: l14 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
- pma8084_l15: l15 {
- regulator-min-microvolt = <2050000>;
- regulator-max-microvolt = <2050000>;
- };
-
- pma8084_l16: l16 {
- regulator-min-microvolt = <2700000>;
- regulator-max-microvolt = <2700000>;
- };
-
- pma8084_l17: l17 {
- regulator-min-microvolt = <2850000>;
- regulator-max-microvolt = <2850000>;
- };
-
- pma8084_l18: l18 {
- regulator-min-microvolt = <2850000>;
- regulator-max-microvolt = <2850000>;
- };
-
- pma8084_l19: l19 {
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- };
-
- pma8084_l20: l20 {
- regulator-min-microvolt = <2950000>;
- regulator-max-microvolt = <2950000>;
- regulator-system-load = <200000>;
- regulator-allow-set-load;
- };
-
- pma8084_l21: l21 {
- regulator-min-microvolt = <2950000>;
- regulator-max-microvolt = <2950000>;
- regulator-system-load = <200000>;
- regulator-allow-set-load;
- };
-
- pma8084_l22: l22 {
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3300000>;
- };
-
- pma8084_l23: l23 {
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- };
-
- pma8084_l24: l24 {
- regulator-min-microvolt = <3075000>;
- regulator-max-microvolt = <3075000>;
- };
-
- pma8084_l25: l25 {
- regulator-min-microvolt = <2100000>;
- regulator-max-microvolt = <2100000>;
- };
-
- pma8084_l26: l26 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <2050000>;
- };
-
- pma8084_l27: l27 {
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <1225000>;
- };
-
- pma8084_lvs1: lvs1 {};
- pma8084_lvs2: lvs2 {};
- pma8084_lvs3: lvs3 {};
- pma8084_lvs4: lvs4 {};
-
- pma8084_5vs1: 5vs1 {};
- };
-};
-
-&sdhc_1 {
- status = "okay";
-
- vmmc-supply = <&pma8084_l20>;
- vqmmc-supply = <&pma8084_s4>;
-
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&sdc1_on>;
- pinctrl-1 = <&sdc1_off>;
-};
-
-&sdhc_2 {
- status = "okay";
- max-frequency = <100000000>;
- vmmc-supply = <&vreg_wlan>;
- vqmmc-supply = <&pma8084_s4>;
- non-removable;
-
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&sdc2_on>;
- pinctrl-1 = <&sdc2_off>;
-
- wifi@1 {
- reg = <1>;
- compatible = "brcm,bcm4329-fmac";
-
- interrupt-parent = <&tlmm>;
- interrupts = <92 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "host-wake";
-
- pinctrl-names = "default";
- pinctrl-0 = <&wlan_sleep_clk_pin &wifi_pin>;
- };
-};
-
-&sdhc_3 {
- status = "okay";
- max-frequency = <100000000>;
- vmmc-supply = <&pma8084_l21>;
- vqmmc-supply = <&pma8084_l13>;
-
- /*
- * cd-gpio is intentionally disabled. If enabled, an SD card
- * present during boot is not initialized correctly. Without
- * cd-gpios the driver resorts to polling, so hotplug works.
- */
- pinctrl-names = "default";
- pinctrl-0 = <&sdc3_on /* &sdhc3_cd_pin */>;
- /* cd-gpios = <&tlmm 62 GPIO_ACTIVE_LOW>; */
-};
-
-&tlmm {
- /* This seems suspicious, but somebody with this device should look into it. */
- blsp2_uart2_pins_active: blsp2-uart2-pins-active-state {
- pins = "gpio45", "gpio46", "gpio47", "gpio48";
- function = "blsp_uart8";
- drive-strength = <8>;
- bias-disable;
- };
-
- blsp2_uart2_pins_sleep: blsp2-uart2-pins-sleep-state {
- pins = "gpio45", "gpio46", "gpio47", "gpio48";
- function = "gpio";
- drive-strength = <2>;
- bias-pull-down;
- };
-
- bt_pins: bt-pins-state {
- hostwake-pins {
- pins = "gpio75";
- function = "gpio";
- drive-strength = <16>;
- };
-
- devwake-pins {
- pins = "gpio91";
- function = "gpio";
- drive-strength = <2>;
- };
- };
-
- sdc1_on: sdhc1-on-state {
- clk-pins {
- pins = "sdc1_clk";
- drive-strength = <4>;
- bias-disable;
- };
-
- cmd-data-pins {
- pins = "sdc1_cmd", "sdc1_data";
- drive-strength = <4>;
- bias-pull-up;
- };
- };
-
- sdc3_on: sdc3-on-state {
- pins = "gpio35", "gpio36", "gpio37", "gpio38", "gpio39", "gpio40";
- function = "sdc3";
- drive-strength = <8>;
- bias-disable;
- };
-
- sdhc3_cd_pin: sdc3-cd-on-state {
- pins = "gpio62";
- function = "gpio";
-
- drive-strength = <2>;
- bias-disable;
- };
-
- sdc2_on: sdhc2-on-state {
- clk-pins {
- pins = "sdc2_clk";
- drive-strength = <6>;
- bias-disable;
- };
-
- cmd-data-pins {
- pins = "sdc2_cmd", "sdc2_data";
- drive-strength = <6>;
- bias-pull-up;
- };
- };
-
- i2c_touchkey_pins: i2c-touchkey-state {
- pins = "gpio95", "gpio96";
- function = "gpio";
- bias-pull-up;
- };
-
- i2c_led_gpioex_pins: i2c-led-gpioex-state {
- pins = "gpio120", "gpio121";
- function = "gpio";
- bias-pull-down;
- };
-
- gpioex_pin: gpioex-state {
- pins = "gpio145";
- function = "gpio";
- bias-pull-up;
- drive-strength = <2>;
- };
-
- wifi_pin: wifi-state {
- pins = "gpio92";
- function = "gpio";
- bias-pull-down;
- };
-
- panel_te_pin: panel-state {
- pins = "gpio12";
- function = "mdp_vsync";
- drive-strength = <2>;
- bias-disable;
- };
-};
-
-&usb {
- status = "okay";
-
- phys = <&usb_hs1_phy>;
- phy-select = <&tcsr 0xb000 0>;
-
- hnp-disable;
- srp-disable;
- adp-disable;
-};
-
-&usb_hs1_phy {
- status = "okay";
-
- v1p8-supply = <&pma8084_l6>;
- v3p3-supply = <&pma8084_l24>;
-
- qcom,init-seq = /bits/ 8 <0x1 0x64>;
+&i2c_led_gpioex_pins {
+ pins = "gpio120", "gpio121";
};
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-kltechn.dts b/arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-kltechn.dts
new file mode 100644
index 000000000000..b902e31b16c2
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-kltechn.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "qcom-msm8974pro-samsung-klte-common.dtsi"
+
+/ {
+ model = "Samsung Galaxy S5 China";
+ compatible = "samsung,kltechn", "samsung,klte", "qcom,msm8974pro", "qcom,msm8974";
+};
+
+&i2c_led_gpio {
+ scl-gpios = <&tlmm 61 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&tlmm 60 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+};
+
+&i2c_led_gpioex_pins {
+ pins = "gpio60", "gpio61";
+};
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-castor.dts b/arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-castor.dts
index ee94741a26ed..409d1798de34 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-castor.dts
+++ b/arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-castor.dts
@@ -1,60 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
-#include "qcom-msm8974pro.dtsi"
-#include "pm8841.dtsi"
-#include "pm8941.dtsi"
-#include <dt-bindings/input/input.h>
-#include <dt-bindings/leds/common.h>
-#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+#include "qcom-msm8974pro-sony-xperia-shinano-common.dtsi"
/ {
model = "Sony Xperia Z2 Tablet";
compatible = "sony,xperia-castor", "qcom,msm8974pro", "qcom,msm8974";
chassis-type = "tablet";
- aliases {
- serial0 = &blsp1_uart2;
- serial1 = &blsp2_uart1;
- };
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
-
- gpio-keys {
- compatible = "gpio-keys";
-
- pinctrl-names = "default";
- pinctrl-0 = <&gpio_keys_pin_a>;
-
- key-volume-down {
- label = "volume_down";
- gpios = <&pm8941_gpios 2 GPIO_ACTIVE_LOW>;
- linux,input-type = <1>;
- linux,code = <KEY_VOLUMEDOWN>;
- };
-
- key-camera-snapshot {
- label = "camera_snapshot";
- gpios = <&pm8941_gpios 3 GPIO_ACTIVE_LOW>;
- linux,input-type = <1>;
- linux,code = <KEY_CAMERA>;
- };
-
- key-camera-focus {
- label = "camera_focus";
- gpios = <&pm8941_gpios 4 GPIO_ACTIVE_LOW>;
- linux,input-type = <1>;
- linux,code = <KEY_CAMERA_FOCUS>;
- };
-
- key-volume-up {
- label = "volume_up";
- gpios = <&pm8941_gpios 5 GPIO_ACTIVE_LOW>;
- linux,input-type = <1>;
- linux,code = <KEY_VOLUMEUP>;
- };
- };
-
vreg_bl_vddio: lcd-backlight-vddio {
compatible = "regulator-fixed";
regulator-name = "vreg_bl_vddio";
@@ -67,107 +18,15 @@
vin-supply = <&pm8941_s3>;
startup-delay-us = <70000>;
- pinctrl-names = "default";
pinctrl-0 = <&lcd_backlight_en_pin_a>;
- };
-
- vreg_vsp: lcd-dcdc-regulator {
- compatible = "regulator-fixed";
- regulator-name = "vreg_vsp";
- regulator-min-microvolt = <5600000>;
- regulator-max-microvolt = <5600000>;
-
- gpio = <&pm8941_gpios 20 GPIO_ACTIVE_HIGH>;
- enable-active-high;
-
- pinctrl-names = "default";
- pinctrl-0 = <&lcd_dcdc_en_pin_a>;
- };
-
- vreg_boost: vreg-boost {
- compatible = "regulator-fixed";
-
- regulator-name = "vreg-boost";
- regulator-min-microvolt = <3150000>;
- regulator-max-microvolt = <3150000>;
-
- regulator-always-on;
- regulator-boot-on;
-
- gpio = <&pm8941_gpios 21 GPIO_ACTIVE_HIGH>;
- enable-active-high;
-
pinctrl-names = "default";
- pinctrl-0 = <&boost_bypass_n_pin>;
};
-
- vreg_vph_pwr: vreg-vph-pwr {
- compatible = "regulator-fixed";
- regulator-name = "vph-pwr";
-
- regulator-min-microvolt = <3600000>;
- regulator-max-microvolt = <3600000>;
-
- regulator-always-on;
- };
-
- vreg_wlan: wlan-regulator {
- compatible = "regulator-fixed";
-
- regulator-name = "wl-reg";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
-
- gpio = <&pm8941_gpios 18 GPIO_ACTIVE_HIGH>;
- enable-active-high;
-
- pinctrl-names = "default";
- pinctrl-0 = <&wlan_regulator_pin>;
- };
-};
-
-&blsp1_uart2 {
- status = "okay";
};
-&blsp2_i2c2 {
- status = "okay";
+&blsp2_i2c5 {
clock-frequency = <355000>;
- synaptics@2c {
- compatible = "syna,rmi4-i2c";
- reg = <0x2c>;
-
- interrupt-parent = <&tlmm>;
- interrupts = <86 IRQ_TYPE_EDGE_FALLING>;
-
- #address-cells = <1>;
- #size-cells = <0>;
-
- vdd-supply = <&pm8941_l22>;
- vio-supply = <&pm8941_lvs3>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&ts_int_pin>;
-
- syna,startup-delay-ms = <100>;
-
- rmi4-f01@1 {
- reg = <0x1>;
- syna,nosleep-mode = <1>;
- };
-
- rmi4-f11@11 {
- reg = <0x11>;
- syna,sensor-type = <1>;
- touchscreen-inverted-x;
- };
- };
-};
-
-&blsp2_i2c5 {
status = "okay";
- clock-frequency = <355000>;
lp8566_wled: backlight@2c {
compatible = "ti,lp8556";
@@ -182,42 +41,52 @@
rom-addr = /bits/ 8 <0xa0>;
rom-val = /bits/ 8 <0xff>;
};
+
rom-a1h {
rom-addr = /bits/ 8 <0xa1>;
rom-val = /bits/ 8 <0x3f>;
};
+
rom-a2h {
rom-addr = /bits/ 8 <0xa2>;
rom-val = /bits/ 8 <0x20>;
};
+
rom-a3h {
rom-addr = /bits/ 8 <0xa3>;
rom-val = /bits/ 8 <0x5e>;
};
+
rom-a4h {
rom-addr = /bits/ 8 <0xa4>;
rom-val = /bits/ 8 <0x02>;
};
+
rom-a5h {
rom-addr = /bits/ 8 <0xa5>;
rom-val = /bits/ 8 <0x04>;
};
+
rom-a6h {
rom-addr = /bits/ 8 <0xa6>;
rom-val = /bits/ 8 <0x80>;
};
+
rom-a7h {
rom-addr = /bits/ 8 <0xa7>;
rom-val = /bits/ 8 <0xf7>;
};
+
rom-a9h {
rom-addr = /bits/ 8 <0xa9>;
rom-val = /bits/ 8 <0x80>;
};
+
rom-aah {
rom-addr = /bits/ 8 <0xaa>;
rom-val = /bits/ 8 <0x0f>;
};
+
rom-aeh {
rom-addr = /bits/ 8 <0xae>;
rom-val = /bits/ 8 <0x0f>;
@@ -232,8 +101,8 @@
compatible = "brcm,bcm43438-bt";
max-speed = <3000000>;
- pinctrl-names = "default";
pinctrl-0 = <&bt_host_wake_pin>, <&bt_dev_wake_pin>, <&bt_reg_on_pin>;
+ pinctrl-names = "default";
host-wakeup-gpios = <&tlmm 95 GPIO_ACTIVE_HIGH>;
device-wakeup-gpios = <&tlmm 96 GPIO_ACTIVE_HIGH>;
@@ -241,339 +110,26 @@
};
};
-&pm8941_coincell {
- status = "okay";
-
- qcom,rset-ohms = <2100>;
- qcom,vset-millivolts = <3000>;
-};
-
&pm8941_gpios {
- gpio_keys_pin_a: gpio-keys-active-state {
- pins = "gpio2", "gpio5";
- function = "normal";
-
- bias-pull-up;
- power-source = <PM8941_GPIO_S3>;
- };
-
bt_reg_on_pin: bt-reg-on-state {
pins = "gpio16";
function = "normal";
-
output-low;
power-source = <PM8941_GPIO_S3>;
};
-
- wlan_sleep_clk_pin: wl-sleep-clk-state {
- pins = "gpio17";
- function = "func2";
-
- output-high;
- power-source = <PM8941_GPIO_S3>;
- };
-
- wlan_regulator_pin: wl-reg-active-state {
- pins = "gpio18";
- function = "normal";
-
- bias-disable;
- power-source = <PM8941_GPIO_S3>;
- };
-
- lcd_dcdc_en_pin_a: lcd-dcdc-en-active-state {
- pins = "gpio20";
- function = "normal";
-
- bias-disable;
- power-source = <PM8941_GPIO_S3>;
- input-disable;
- output-low;
- };
-
-};
-
-&pm8941_lpg {
- status = "okay";
-
- qcom,power-source = <1>;
-
- multi-led {
- color = <LED_COLOR_ID_RGB>;
- function = LED_FUNCTION_STATUS;
-
- #address-cells = <1>;
- #size-cells = <0>;
-
- led@5 {
- reg = <5>;
- color = <LED_COLOR_ID_BLUE>;
- };
-
- led@6 {
- reg = <6>;
- color = <LED_COLOR_ID_GREEN>;
- };
-
- led@7 {
- reg = <7>;
- color = <LED_COLOR_ID_RED>;
- };
- };
-};
-
-&remoteproc_adsp {
- cx-supply = <&pm8841_s2>;
- status = "okay";
-};
-
-&remoteproc_mss {
- cx-supply = <&pm8841_s2>;
- mss-supply = <&pm8841_s3>;
- mx-supply = <&pm8841_s1>;
- pll-supply = <&pm8941_l12>;
- status = "okay";
};
&rpm_requests {
- regulators-0 {
- compatible = "qcom,rpm-pm8841-regulators";
-
- pm8841_s1: s1 {
- regulator-min-microvolt = <675000>;
- regulator-max-microvolt = <1050000>;
- };
-
- pm8841_s2: s2 {
- regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <1050000>;
- };
-
- pm8841_s3: s3 {
- regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <1050000>;
- };
-
- pm8841_s4: s4 {
- regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <1050000>;
- };
- };
-
regulators-1 {
- compatible = "qcom,rpm-pm8941-regulators";
-
- vdd_l1_l3-supply = <&pm8941_s1>;
- vdd_l2_lvs1_2_3-supply = <&pm8941_s3>;
- vdd_l4_l11-supply = <&pm8941_s1>;
- vdd_l5_l7-supply = <&pm8941_s2>;
- vdd_l6_l12_l14_l15-supply = <&pm8941_s2>;
- vdd_l9_l10_l17_l22-supply = <&vreg_boost>;
- vdd_l13_l20_l23_l24-supply = <&vreg_boost>;
- vdd_l21-supply = <&vreg_boost>;
-
- pm8941_s1: s1 {
- regulator-min-microvolt = <1300000>;
- regulator-max-microvolt = <1300000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- pm8941_s2: s2 {
- regulator-min-microvolt = <2150000>;
- regulator-max-microvolt = <2150000>;
- regulator-boot-on;
- };
-
- pm8941_s3: s3 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-system-load = <154000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- pm8941_s4: s4 {
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- };
-
- pm8941_l1: l1 {
- regulator-min-microvolt = <1225000>;
- regulator-max-microvolt = <1225000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- pm8941_l2: l2 {
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- };
-
- pm8941_l3: l3 {
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- };
-
- pm8941_l4: l4 {
- regulator-min-microvolt = <1225000>;
- regulator-max-microvolt = <1225000>;
- };
-
- pm8941_l5: l5 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
- pm8941_l6: l6 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-boot-on;
- };
-
- pm8941_l7: l7 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-boot-on;
- };
-
- pm8941_l8: l8 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
- pm8941_l9: l9 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <2950000>;
- };
-
pm8941_l11: l11 {
regulator-min-microvolt = <1300000>;
regulator-max-microvolt = <1350000>;
};
- pm8941_l12: l12 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- pm8941_l13: l13 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <2950000>;
- regulator-boot-on;
- };
-
- pm8941_l14: l14 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
- pm8941_l15: l15 {
- regulator-min-microvolt = <2050000>;
- regulator-max-microvolt = <2050000>;
- };
-
- pm8941_l16: l16 {
- regulator-min-microvolt = <2700000>;
- regulator-max-microvolt = <2700000>;
- };
-
- pm8941_l17: l17 {
- regulator-min-microvolt = <2700000>;
- regulator-max-microvolt = <2700000>;
- };
-
- pm8941_l18: l18 {
- regulator-min-microvolt = <2850000>;
- regulator-max-microvolt = <2850000>;
- };
-
pm8941_l19: l19 {
regulator-min-microvolt = <2850000>;
regulator-max-microvolt = <2850000>;
};
-
- pm8941_l20: l20 {
- regulator-min-microvolt = <2950000>;
- regulator-max-microvolt = <2950000>;
- regulator-system-load = <500000>;
- regulator-allow-set-load;
- regulator-boot-on;
- };
-
- pm8941_l21: l21 {
- regulator-min-microvolt = <2950000>;
- regulator-max-microvolt = <2950000>;
- regulator-boot-on;
- };
-
- pm8941_l22: l22 {
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- };
-
- pm8941_l23: l23 {
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <2800000>;
- };
-
- pm8941_l24: l24 {
- regulator-min-microvolt = <3075000>;
- regulator-max-microvolt = <3075000>;
- regulator-boot-on;
- };
-
- pm8941_lvs3: lvs3 {};
- };
-};
-
-&sdhc_1 {
- status = "okay";
-
- vmmc-supply = <&pm8941_l20>;
- vqmmc-supply = <&pm8941_s3>;
-
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&sdc1_on>;
- pinctrl-1 = <&sdc1_off>;
-};
-
-&sdhc_2 {
- status = "okay";
-
- vmmc-supply = <&pm8941_l21>;
- vqmmc-supply = <&pm8941_l13>;
-
- cd-gpios = <&tlmm 62 GPIO_ACTIVE_LOW>;
-
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&sdc2_on>;
- pinctrl-1 = <&sdc2_off>;
-};
-
-&sdhc_3 {
- status = "okay";
-
- max-frequency = <100000000>;
- vmmc-supply = <&vreg_wlan>;
- non-removable;
-
- pinctrl-names = "default";
- pinctrl-0 = <&sdc3_on>;
-
- #address-cells = <1>;
- #size-cells = <0>;
-
- bcrmf@1 {
- compatible = "brcm,bcm4339-fmac", "brcm,bcm4329-fmac";
- reg = <1>;
-
- brcm,drive-strength = <10>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&wlan_sleep_clk_pin>;
};
};
@@ -591,75 +147,13 @@
status = "okay";
};
-&tlmm {
- lcd_backlight_en_pin_a: lcd-backlight-vddio-state {
- pins = "gpio69";
- function = "gpio";
- drive-strength = <10>;
- output-low;
- bias-disable;
- };
-
- sdc1_on: sdc1-on-state {
- clk-pins {
- pins = "sdc1_clk";
- drive-strength = <16>;
- bias-disable;
- };
-
- cmd-data-pins {
- pins = "sdc1_cmd", "sdc1_data";
- drive-strength = <10>;
- bias-pull-up;
- };
- };
-
- sdc2_on: sdc2-on-state {
- clk-pins {
- pins = "sdc2_clk";
- drive-strength = <6>;
- bias-disable;
- };
-
- cmd-data-pins {
- pins = "sdc2_cmd", "sdc2_data";
- drive-strength = <6>;
- bias-pull-up;
- };
-
- cd-pins {
- pins = "gpio62";
- function = "gpio";
- drive-strength = <2>;
- bias-disable;
- };
- };
-
- sdc3_on: sdc3-on-state {
- clk-pins {
- pins = "gpio40";
- function = "sdc3";
- drive-strength = <10>;
- bias-disable;
- };
-
- cmd-pins {
- pins = "gpio39";
- function = "sdc3";
- drive-strength = <10>;
- bias-pull-up;
- };
-
- data-pins {
- pins = "gpio35", "gpio36", "gpio37", "gpio38";
- function = "sdc3";
- drive-strength = <10>;
- bias-pull-up;
- };
- };
+&synaptics_touchscreen {
+ vio-supply = <&pm8941_lvs3>;
+};
- ts_int_pin: ts-int-pin-state {
- pins = "gpio86";
+&tlmm {
+ bt_dev_wake_pin: bt-dev-wake-state {
+ pins = "gpio96";
function = "gpio";
drive-strength = <2>;
bias-disable;
@@ -673,33 +167,11 @@
output-low;
};
- bt_dev_wake_pin: bt-dev-wake-state {
- pins = "gpio96";
+ lcd_backlight_en_pin_a: lcd-backlight-vddio-state {
+ pins = "gpio69";
function = "gpio";
- drive-strength = <2>;
+ drive-strength = <10>;
+ output-low;
bias-disable;
};
};
-
-&usb {
- status = "okay";
-
- phys = <&usb_hs1_phy>;
- phy-select = <&tcsr 0xb000 0>;
- extcon = <&smbb>, <&usb_id>;
- vbus-supply = <&chg_otg>;
-
- hnp-disable;
- srp-disable;
- adp-disable;
-};
-
-&usb_hs1_phy {
- status = "okay";
-
- v1p8-supply = <&pm8941_l6>;
- v3p3-supply = <&pm8941_l24>;
-
- extcon = <&smbb>;
- qcom,init-seq = /bits/ 8 <0x1 0x64>;
-};
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-common.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-common.dtsi
new file mode 100644
index 000000000000..e129bb1bd6ec
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-common.dtsi
@@ -0,0 +1,539 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "qcom-msm8974pro.dtsi"
+#include "pm8841.dtsi"
+#include "pm8941.dtsi"
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+
+/ {
+ aliases {
+ mmc0 = &sdhc_1;
+ mmc1 = &sdhc_2;
+ serial0 = &blsp1_uart2;
+ serial1 = &blsp2_uart1;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&gpio_keys_pin_a>;
+ pinctrl-names = "default";
+
+ key-volume-down {
+ label = "volume_down";
+ gpios = <&pm8941_gpios 2 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEDOWN>;
+ debounce-interval = <15>;
+ };
+
+ key-volume-up {
+ label = "volume_up";
+ gpios = <&pm8941_gpios 5 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ debounce-interval = <15>;
+ };
+ };
+
+ vreg_vsp: lcd-dcdc-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_vsp";
+ regulator-min-microvolt = <5600000>;
+ regulator-max-microvolt = <5600000>;
+
+ gpio = <&pm8941_gpios 20 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&lcd_dcdc_en_pin_a>;
+ pinctrl-names = "default";
+ };
+
+ vreg_boost: vreg-boost {
+ compatible = "regulator-fixed";
+
+ regulator-name = "vreg-boost";
+ regulator-min-microvolt = <3150000>;
+ regulator-max-microvolt = <3150000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+
+ gpio = <&pm8941_gpios 21 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&boost_bypass_n_pin>;
+ };
+
+ vreg_vph_pwr: vreg-vph-pwr {
+ compatible = "regulator-fixed";
+ regulator-name = "vph-pwr";
+
+ regulator-min-microvolt = <3600000>;
+ regulator-max-microvolt = <3600000>;
+
+ regulator-always-on;
+ };
+
+ vreg_wlan: wlan-regulator {
+ compatible = "regulator-fixed";
+
+ regulator-name = "wl-reg";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&pm8941_gpios 18 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&wlan_regulator_pin>;
+ pinctrl-names = "default";
+ };
+};
+
+&blsp1_uart2 {
+ status = "okay";
+};
+
+&blsp2_i2c2 {
+ clock-frequency = <355000>;
+
+ status = "okay";
+
+ synaptics_touchscreen: synaptics@2c {
+ compatible = "syna,rmi4-i2c";
+ reg = <0x2c>;
+
+ interrupt-parent = <&tlmm>;
+ interrupts = <86 IRQ_TYPE_EDGE_FALLING>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vdd-supply = <&pm8941_l22>;
+ /* vio-supply is set in dts */
+
+ pinctrl-0 = <&ts_int_pin>;
+ pinctrl-names = "default";
+
+ syna,startup-delay-ms = <100>;
+
+ rmi4-f01@1 {
+ reg = <0x1>;
+ syna,nosleep-mode = <1>;
+ };
+
+ rmi4-f11@11 {
+ reg = <0x11>;
+ syna,sensor-type = <1>;
+ touchscreen-inverted-x;
+ };
+ };
+};
+
+&pm8941_coincell {
+ qcom,rset-ohms = <2100>;
+ qcom,vset-millivolts = <3000>;
+
+ status = "okay";
+};
+
+&pm8941_gpios {
+ gpio_keys_pin_a: gpio-keys-active-state {
+ pins = "gpio2", "gpio5";
+ function = "normal";
+ bias-pull-up;
+ power-source = <PM8941_GPIO_S3>;
+ };
+
+ wlan_sleep_clk_pin: wl-sleep-clk-state {
+ pins = "gpio17";
+ function = "func2";
+ output-high;
+ power-source = <PM8941_GPIO_S3>;
+ };
+
+ wlan_regulator_pin: wl-reg-active-state {
+ pins = "gpio18";
+ function = "normal";
+ bias-disable;
+ power-source = <PM8941_GPIO_S3>;
+ };
+
+ lcd_dcdc_en_pin_a: lcd-dcdc-en-active-state {
+ pins = "gpio20";
+ function = "normal";
+ bias-disable;
+ power-source = <PM8941_GPIO_S3>;
+ input-disable;
+ output-low;
+ };
+};
+
+&pm8941_lpg {
+ qcom,power-source = <1>;
+
+ status = "okay";
+
+ multi-led {
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_STATUS;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@5 {
+ reg = <5>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+
+ led@6 {
+ reg = <6>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@7 {
+ reg = <7>;
+ color = <LED_COLOR_ID_RED>;
+ };
+ };
+};
+
+&pm8941_vib {
+ status = "okay";
+};
+
+&remoteproc_adsp {
+ cx-supply = <&pm8841_s2>;
+ status = "okay";
+};
+
+&remoteproc_mss {
+ cx-supply = <&pm8841_s2>;
+ mss-supply = <&pm8841_s3>;
+ mx-supply = <&pm8841_s1>;
+ pll-supply = <&pm8941_l12>;
+ status = "okay";
+};
+
+&rpm_requests {
+ regulators-0 {
+ compatible = "qcom,rpm-pm8841-regulators";
+
+ pm8841_s1: s1 {
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <1050000>;
+ };
+
+ pm8841_s2: s2 {
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1050000>;
+ };
+
+ pm8841_s3: s3 {
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1050000>;
+ };
+
+ pm8841_s4: s4 {
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1050000>;
+ };
+ };
+
+ regulators-1 {
+ compatible = "qcom,rpm-pm8941-regulators";
+
+ vdd_l1_l3-supply = <&pm8941_s1>;
+ vdd_l2_lvs1_2_3-supply = <&pm8941_s3>;
+ vdd_l4_l11-supply = <&pm8941_s1>;
+ vdd_l5_l7-supply = <&pm8941_s2>;
+ vdd_l6_l12_l14_l15-supply = <&pm8941_s2>;
+ vdd_l9_l10_l17_l22-supply = <&vreg_boost>;
+ vdd_l13_l20_l23_l24-supply = <&vreg_boost>;
+ vdd_l21-supply = <&vreg_boost>;
+
+ pm8941_s1: s1 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ pm8941_s2: s2 {
+ regulator-min-microvolt = <2150000>;
+ regulator-max-microvolt = <2150000>;
+ regulator-boot-on;
+ };
+
+ pm8941_s3: s3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-system-load = <154000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ pm8941_s4: s4 {
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ pm8941_l1: l1 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ pm8941_l2: l2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ pm8941_l3: l3 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ pm8941_l4: l4 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ pm8941_l5: l5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8941_l6: l6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ };
+
+ pm8941_l7: l7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ };
+
+ pm8941_l8: l8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8941_l9: l9 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pm8941_l12: l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ pm8941_l13: l13 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-boot-on;
+ };
+
+ pm8941_l14: l14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8941_l15: l15 {
+ regulator-min-microvolt = <2050000>;
+ regulator-max-microvolt = <2050000>;
+ };
+
+ pm8941_l16: l16 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ };
+
+ pm8941_l17: l17 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ };
+
+ pm8941_l18: l18 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+
+ pm8941_l20: l20 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-system-load = <500000>;
+ regulator-allow-set-load;
+ regulator-boot-on;
+ };
+
+ pm8941_l21: l21 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-boot-on;
+ };
+
+ pm8941_l22: l22 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+
+ pm8941_l23: l23 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+
+ pm8941_l24: l24 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+ regulator-boot-on;
+ };
+
+ pm8941_lvs3: lvs3 {};
+ };
+};
+
+&sdhc_1 {
+ vmmc-supply = <&pm8941_l20>;
+ vqmmc-supply = <&pm8941_s3>;
+
+ pinctrl-0 = <&sdc1_on>;
+ pinctrl-1 = <&sdc1_off>;
+ pinctrl-names = "default", "sleep";
+
+ status = "okay";
+};
+
+&sdhc_2 {
+ vmmc-supply = <&pm8941_l21>;
+ vqmmc-supply = <&pm8941_l13>;
+
+ cd-gpios = <&tlmm 62 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&sdc2_on>;
+ pinctrl-1 = <&sdc2_off>;
+ pinctrl-names = "default", "sleep";
+
+ status = "okay";
+};
+
+&sdhc_3 {
+ max-frequency = <100000000>;
+ vmmc-supply = <&vreg_wlan>;
+ non-removable;
+
+ pinctrl-0 = <&sdc3_on>;
+ pinctrl-names = "default";
+
+ status = "okay";
+
+ wifi@1 {
+ compatible = "brcm,bcm4339-fmac", "brcm,bcm4329-fmac";
+ reg = <1>;
+
+ brcm,drive-strength = <10>;
+
+ pinctrl-0 = <&wlan_sleep_clk_pin>;
+ pinctrl-names = "default";
+ };
+};
+
+&tlmm {
+ sdc1_on: sdc1-on-state {
+ clk-pins {
+ pins = "sdc1_clk";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ cmd-data-pins {
+ pins = "sdc1_cmd", "sdc1_data";
+ drive-strength = <10>;
+ bias-pull-up;
+ };
+ };
+
+ sdc2_on: sdc2-on-state {
+ clk-pins {
+ pins = "sdc2_clk";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ cmd-data-pins {
+ pins = "sdc2_cmd", "sdc2_data";
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+
+ cd-pins {
+ pins = "gpio62";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ sdc3_on: sdc3-on-state {
+ clk-pins {
+ pins = "gpio40";
+ function = "sdc3";
+ drive-strength = <10>;
+ bias-disable;
+ };
+
+ cmd-pins {
+ pins = "gpio39";
+ function = "sdc3";
+ drive-strength = <10>;
+ bias-pull-up;
+ };
+
+ data-pins {
+ pins = "gpio35", "gpio36", "gpio37", "gpio38";
+ function = "sdc3";
+ drive-strength = <10>;
+ bias-pull-up;
+ };
+ };
+
+ ts_int_pin: ts-int-pin-state {
+ pins = "gpio86";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+};
+
+&usb {
+ phys = <&usb_hs1_phy>;
+ phy-select = <&tcsr 0xb000 0>;
+ extcon = <&smbb>, <&usb_id>;
+ vbus-supply = <&chg_otg>;
+
+ hnp-disable;
+ srp-disable;
+ adp-disable;
+
+ status = "okay";
+};
+
+&usb_hs1_phy {
+ v1p8-supply = <&pm8941_l6>;
+ v3p3-supply = <&pm8941_l24>;
+
+ extcon = <&smbb>;
+ qcom,init-seq = /bits/ 8 <0x1 0x64>;
+
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-leo.dts b/arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-leo.dts
new file mode 100644
index 000000000000..1ed6e1cc21d5
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-leo.dts
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "qcom-msm8974pro-sony-xperia-shinano-common.dtsi"
+
+/ {
+ model = "Sony Xperia Z3";
+ compatible = "sony,xperia-leo", "qcom,msm8974pro", "qcom,msm8974";
+ chassis-type = "handset";
+
+ gpio-keys {
+ key-camera-snapshot {
+ label = "camera_snapshot";
+ gpios = <&pm8941_gpios 3 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_CAMERA>;
+ debounce-interval = <15>;
+ };
+
+ key-camera-focus {
+ label = "camera_focus";
+ gpios = <&pm8941_gpios 4 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_CAMERA_FOCUS>;
+ debounce-interval = <15>;
+ };
+ };
+};
+
+&gpio_keys_pin_a {
+ pins = "gpio2", "gpio3", "gpio4", "gpio5";
+};
+
+&smbb {
+ usb-charge-current-limit = <1500000>;
+ qcom,fast-charge-safe-current = <3000000>;
+ qcom,fast-charge-current-limit = <2150000>;
+ qcom,fast-charge-safe-voltage = <4400000>;
+ qcom,fast-charge-high-threshold-voltage = <4350000>;
+ qcom,auto-recharge-threshold-voltage = <4280000>;
+ qcom,minimum-input-voltage = <4200000>;
+
+ status = "okay";
+};
+
+&synaptics_touchscreen {
+ vio-supply = <&pm8941_s3>;
+};
diff --git a/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi b/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
index edc9aaf828c8..68fa5859d263 100644
--- a/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
@@ -378,6 +378,16 @@
phy-names = "pciephy";
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie_ep: pcie-ep@1c00000 {
diff --git a/arch/arm/boot/dts/renesas/r7s72100.dtsi b/arch/arm/boot/dts/renesas/r7s72100.dtsi
index e6d8da6faffb..08ea4c551ed0 100644
--- a/arch/arm/boot/dts/renesas/r7s72100.dtsi
+++ b/arch/arm/boot/dts/renesas/r7s72100.dtsi
@@ -125,6 +125,7 @@
<GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "eri", "rxi", "txi", "bri";
clocks = <&mstp4_clks R7S72100_CLK_SCIF0>;
clock-names = "fck";
power-domains = <&cpg_clocks>;
@@ -138,6 +139,7 @@
<GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "eri", "rxi", "txi", "bri";
clocks = <&mstp4_clks R7S72100_CLK_SCIF1>;
clock-names = "fck";
power-domains = <&cpg_clocks>;
@@ -151,6 +153,7 @@
<GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "eri", "rxi", "txi", "bri";
clocks = <&mstp4_clks R7S72100_CLK_SCIF2>;
clock-names = "fck";
power-domains = <&cpg_clocks>;
@@ -164,6 +167,7 @@
<GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "eri", "rxi", "txi", "bri";
clocks = <&mstp4_clks R7S72100_CLK_SCIF3>;
clock-names = "fck";
power-domains = <&cpg_clocks>;
@@ -177,6 +181,7 @@
<GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "eri", "rxi", "txi", "bri";
clocks = <&mstp4_clks R7S72100_CLK_SCIF4>;
clock-names = "fck";
power-domains = <&cpg_clocks>;
@@ -190,6 +195,7 @@
<GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "eri", "rxi", "txi", "bri";
clocks = <&mstp4_clks R7S72100_CLK_SCIF5>;
clock-names = "fck";
power-domains = <&cpg_clocks>;
@@ -203,6 +209,7 @@
<GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "eri", "rxi", "txi", "bri";
clocks = <&mstp4_clks R7S72100_CLK_SCIF6>;
clock-names = "fck";
power-domains = <&cpg_clocks>;
@@ -216,6 +223,7 @@
<GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "eri", "rxi", "txi", "bri";
clocks = <&mstp4_clks R7S72100_CLK_SCIF7>;
clock-names = "fck";
power-domains = <&cpg_clocks>;
diff --git a/arch/arm/boot/dts/renesas/r8a73a4.dtsi b/arch/arm/boot/dts/renesas/r8a73a4.dtsi
index ac654ff45d0e..9a2ae282a46b 100644
--- a/arch/arm/boot/dts/renesas/r8a73a4.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a73a4.dtsi
@@ -60,6 +60,32 @@
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
};
+ tmu0: timer@e61e0000 {
+ compatible = "renesas,tmu-r8a73a4", "renesas,tmu";
+ reg = <0 0xe61e0000 0 0x30>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2";
+ clocks = <&mstp1_clks R8A73A4_CLK_TMU0>;
+ clock-names = "fck";
+ power-domains = <&pd_c5>;
+ status = "disabled";
+ };
+
+ tmu3: timer@fff80000 {
+ compatible = "renesas,tmu-r8a73a4", "renesas,tmu";
+ reg = <0 0xfff80000 0 0x30>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2";
+ clocks = <&mstp1_clks R8A73A4_CLK_TMU3>;
+ clock-names = "fck";
+ power-domains = <&pd_a3r>;
+ status = "disabled";
+ };
+
dbsc1: memory-controller@e6790000 {
compatible = "renesas,dbsc-r8a73a4";
reg = <0 0xe6790000 0 0x10000>;
@@ -654,6 +680,17 @@
};
/* Gate clocks */
+ mstp1_clks: mstp1_clks@e6150134 {
+ compatible = "renesas,r8a73a4-mstp-clocks", "renesas,cpg-mstp-clocks";
+ reg = <0 0xe6150134 0 4>, <0 0xe6150038 0 4>;
+ clocks = <&cp_clk>, <&mp_clk>;
+ #clock-cells = <1>;
+ clock-indices = <
+ R8A73A4_CLK_TMU0 R8A73A4_CLK_TMU3
+ >;
+ clock-output-names =
+ "tmu0", "tmu3";
+ };
mstp2_clks: mstp2_clks@e6150138 {
compatible = "renesas,r8a73a4-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0 0xe6150138 0 4>, <0 0xe6150040 0 4>;
diff --git a/arch/arm/boot/dts/renesas/r8a7742.dtsi b/arch/arm/boot/dts/renesas/r8a7742.dtsi
index 16d146db824a..d55c344c1cd2 100644
--- a/arch/arm/boot/dts/renesas/r8a7742.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a7742.dtsi
@@ -404,6 +404,64 @@
resets = <&cpg 407>;
};
+ tmu0: timer@e61e0000 {
+ compatible = "renesas,tmu-r8a7742", "renesas,tmu";
+ reg = <0 0xe61e0000 0 0x30>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2";
+ clocks = <&cpg CPG_MOD 125>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 125>;
+ status = "disabled";
+ };
+
+ tmu1: timer@fff60000 {
+ compatible = "renesas,tmu-r8a7742", "renesas,tmu";
+ reg = <0 0xfff60000 0 0x30>;
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&cpg CPG_MOD 111>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 111>;
+ status = "disabled";
+ };
+
+ tmu2: timer@fff70000 {
+ compatible = "renesas,tmu-r8a7742", "renesas,tmu";
+ reg = <0 0xfff70000 0 0x30>;
+ interrupts = <GIC_SPI 303 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&cpg CPG_MOD 122>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 122>;
+ status = "disabled";
+ };
+
+ tmu3: timer@fff80000 {
+ compatible = "renesas,tmu-r8a7742", "renesas,tmu";
+ reg = <0 0xfff80000 0 0x30>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2";
+ clocks = <&cpg CPG_MOD 121>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 121>;
+ status = "disabled";
+ };
+
thermal: thermal@e61f0000 {
compatible = "renesas,thermal-r8a7742",
"renesas,rcar-gen2-thermal";
diff --git a/arch/arm/boot/dts/renesas/r8a7743.dtsi b/arch/arm/boot/dts/renesas/r8a7743.dtsi
index 2245d19a23bb..d917c0a971f5 100644
--- a/arch/arm/boot/dts/renesas/r8a7743.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a7743.dtsi
@@ -329,6 +329,64 @@
resets = <&cpg 407>;
};
+ tmu0: timer@e61e0000 {
+ compatible = "renesas,tmu-r8a7743", "renesas,tmu";
+ reg = <0 0xe61e0000 0 0x30>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2";
+ clocks = <&cpg CPG_MOD 125>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 125>;
+ status = "disabled";
+ };
+
+ tmu1: timer@fff60000 {
+ compatible = "renesas,tmu-r8a7743", "renesas,tmu";
+ reg = <0 0xfff60000 0 0x30>;
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&cpg CPG_MOD 111>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 111>;
+ status = "disabled";
+ };
+
+ tmu2: timer@fff70000 {
+ compatible = "renesas,tmu-r8a7743", "renesas,tmu";
+ reg = <0 0xfff70000 0 0x30>;
+ interrupts = <GIC_SPI 303 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&cpg CPG_MOD 122>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 122>;
+ status = "disabled";
+ };
+
+ tmu3: timer@fff80000 {
+ compatible = "renesas,tmu-r8a7743", "renesas,tmu";
+ reg = <0 0xfff80000 0 0x30>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2";
+ clocks = <&cpg CPG_MOD 121>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 121>;
+ status = "disabled";
+ };
+
thermal: thermal@e61f0000 {
compatible = "renesas,thermal-r8a7743",
"renesas,rcar-gen2-thermal";
diff --git a/arch/arm/boot/dts/renesas/r8a7744.dtsi b/arch/arm/boot/dts/renesas/r8a7744.dtsi
index aa13841f9781..754859c38a93 100644
--- a/arch/arm/boot/dts/renesas/r8a7744.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a7744.dtsi
@@ -329,6 +329,64 @@
resets = <&cpg 407>;
};
+ tmu0: timer@e61e0000 {
+ compatible = "renesas,tmu-r8a7744", "renesas,tmu";
+ reg = <0 0xe61e0000 0 0x30>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2";
+ clocks = <&cpg CPG_MOD 125>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7744_PD_ALWAYS_ON>;
+ resets = <&cpg 125>;
+ status = "disabled";
+ };
+
+ tmu1: timer@fff60000 {
+ compatible = "renesas,tmu-r8a7744", "renesas,tmu";
+ reg = <0 0xfff60000 0 0x30>;
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&cpg CPG_MOD 111>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7744_PD_ALWAYS_ON>;
+ resets = <&cpg 111>;
+ status = "disabled";
+ };
+
+ tmu2: timer@fff70000 {
+ compatible = "renesas,tmu-r8a7744", "renesas,tmu";
+ reg = <0 0xfff70000 0 0x30>;
+ interrupts = <GIC_SPI 303 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&cpg CPG_MOD 122>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7744_PD_ALWAYS_ON>;
+ resets = <&cpg 122>;
+ status = "disabled";
+ };
+
+ tmu3: timer@fff80000 {
+ compatible = "renesas,tmu-r8a7744", "renesas,tmu";
+ reg = <0 0xfff80000 0 0x30>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2";
+ clocks = <&cpg CPG_MOD 121>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7744_PD_ALWAYS_ON>;
+ resets = <&cpg 121>;
+ status = "disabled";
+ };
+
thermal: thermal@e61f0000 {
compatible = "renesas,thermal-r8a7744",
"renesas,rcar-gen2-thermal";
diff --git a/arch/arm/boot/dts/renesas/r8a7745.dtsi b/arch/arm/boot/dts/renesas/r8a7745.dtsi
index 44688b8431c3..168298300490 100644
--- a/arch/arm/boot/dts/renesas/r8a7745.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a7745.dtsi
@@ -304,6 +304,64 @@
resets = <&cpg 407>;
};
+ tmu0: timer@e61e0000 {
+ compatible = "renesas,tmu-r8a7745", "renesas,tmu";
+ reg = <0 0xe61e0000 0 0x30>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2";
+ clocks = <&cpg CPG_MOD 125>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 125>;
+ status = "disabled";
+ };
+
+ tmu1: timer@fff60000 {
+ compatible = "renesas,tmu-r8a7745", "renesas,tmu";
+ reg = <0 0xfff60000 0 0x30>;
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&cpg CPG_MOD 111>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 111>;
+ status = "disabled";
+ };
+
+ tmu2: timer@fff70000 {
+ compatible = "renesas,tmu-r8a7745", "renesas,tmu";
+ reg = <0 0xfff70000 0 0x30>;
+ interrupts = <GIC_SPI 303 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&cpg CPG_MOD 122>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 122>;
+ status = "disabled";
+ };
+
+ tmu3: timer@fff80000 {
+ compatible = "renesas,tmu-r8a7745", "renesas,tmu";
+ reg = <0 0xfff80000 0 0x30>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2";
+ clocks = <&cpg CPG_MOD 121>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 121>;
+ status = "disabled";
+ };
+
ipmmu_sy0: iommu@e6280000 {
compatible = "renesas,ipmmu-r8a7745",
"renesas,ipmmu-vmsa";
diff --git a/arch/arm/boot/dts/renesas/r8a77470.dtsi b/arch/arm/boot/dts/renesas/r8a77470.dtsi
index a5cf663a0118..2375438d83c9 100644
--- a/arch/arm/boot/dts/renesas/r8a77470.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a77470.dtsi
@@ -241,6 +241,50 @@
resets = <&cpg 407>;
};
+ tmu1: timer@fff60000 {
+ compatible = "renesas,tmu-r8a77470", "renesas,tmu";
+ reg = <0 0xfff60000 0 0x30>;
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&cpg CPG_MOD 111>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 111>;
+ status = "disabled";
+ };
+
+ tmu2: timer@fff70000 {
+ compatible = "renesas,tmu-r8a77470", "renesas,tmu";
+ reg = <0 0xfff70000 0 0x30>;
+ interrupts = <GIC_SPI 303 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&cpg CPG_MOD 122>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 122>;
+ status = "disabled";
+ };
+
+ tmu3: timer@fff80000 {
+ compatible = "renesas,tmu-r8a77470", "renesas,tmu";
+ reg = <0 0xfff80000 0 0x30>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2";
+ clocks = <&cpg CPG_MOD 121>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 121>;
+ status = "disabled";
+ };
+
icram0: sram@e63a0000 {
compatible = "mmio-sram";
reg = <0 0xe63a0000 0 0x12000>;
diff --git a/arch/arm/boot/dts/renesas/r8a7790.dtsi b/arch/arm/boot/dts/renesas/r8a7790.dtsi
index 46fb81f5062f..583b74a9f071 100644
--- a/arch/arm/boot/dts/renesas/r8a7790.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a7790.dtsi
@@ -434,6 +434,64 @@
resets = <&cpg 407>;
};
+ tmu0: timer@e61e0000 {
+ compatible = "renesas,tmu-r8a7790", "renesas,tmu";
+ reg = <0 0xe61e0000 0 0x30>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2";
+ clocks = <&cpg CPG_MOD 125>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 125>;
+ status = "disabled";
+ };
+
+ tmu1: timer@fff60000 {
+ compatible = "renesas,tmu-r8a7790", "renesas,tmu";
+ reg = <0 0xfff60000 0 0x30>;
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&cpg CPG_MOD 111>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 111>;
+ status = "disabled";
+ };
+
+ tmu2: timer@fff70000 {
+ compatible = "renesas,tmu-r8a7790", "renesas,tmu";
+ reg = <0 0xfff70000 0 0x30>;
+ interrupts = <GIC_SPI 303 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&cpg CPG_MOD 122>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 122>;
+ status = "disabled";
+ };
+
+ tmu3: timer@fff80000 {
+ compatible = "renesas,tmu-r8a7790", "renesas,tmu";
+ reg = <0 0xfff80000 0 0x30>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2";
+ clocks = <&cpg CPG_MOD 121>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 121>;
+ status = "disabled";
+ };
+
thermal: thermal@e61f0000 {
compatible = "renesas,thermal-r8a7790",
"renesas,rcar-gen2-thermal",
diff --git a/arch/arm/boot/dts/renesas/r8a7791.dtsi b/arch/arm/boot/dts/renesas/r8a7791.dtsi
index b9d34147628e..de08ceb62230 100644
--- a/arch/arm/boot/dts/renesas/r8a7791.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a7791.dtsi
@@ -351,6 +351,64 @@
resets = <&cpg 407>;
};
+ tmu0: timer@e61e0000 {
+ compatible = "renesas,tmu-r8a7791", "renesas,tmu";
+ reg = <0 0xe61e0000 0 0x30>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2";
+ clocks = <&cpg CPG_MOD 125>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
+ resets = <&cpg 125>;
+ status = "disabled";
+ };
+
+ tmu1: timer@fff60000 {
+ compatible = "renesas,tmu-r8a7791", "renesas,tmu";
+ reg = <0 0xfff60000 0 0x30>;
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&cpg CPG_MOD 111>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
+ resets = <&cpg 111>;
+ status = "disabled";
+ };
+
+ tmu2: timer@fff70000 {
+ compatible = "renesas,tmu-r8a7791", "renesas,tmu";
+ reg = <0 0xfff70000 0 0x30>;
+ interrupts = <GIC_SPI 303 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&cpg CPG_MOD 122>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
+ resets = <&cpg 122>;
+ status = "disabled";
+ };
+
+ tmu3: timer@fff80000 {
+ compatible = "renesas,tmu-r8a7791", "renesas,tmu";
+ reg = <0 0xfff80000 0 0x30>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2";
+ clocks = <&cpg CPG_MOD 121>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
+ resets = <&cpg 121>;
+ status = "disabled";
+ };
+
thermal: thermal@e61f0000 {
compatible = "renesas,thermal-r8a7791",
"renesas,rcar-gen2-thermal",
diff --git a/arch/arm/boot/dts/renesas/r8a7792.dtsi b/arch/arm/boot/dts/renesas/r8a7792.dtsi
index ecfab3ff59e8..7defeb8e4cd1 100644
--- a/arch/arm/boot/dts/renesas/r8a7792.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a7792.dtsi
@@ -351,6 +351,65 @@
resets = <&cpg 407>;
};
+ tmu0: timer@e61e0000 {
+ compatible = "renesas,tmu-r8a7792", "renesas,tmu";
+ reg = <0 0xe61e0000 0 0x30>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2";
+ clocks = <&cpg CPG_MOD 125>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ resets = <&cpg 125>;
+ status = "disabled";
+ };
+
+ tmu1: timer@fff60000 {
+ compatible = "renesas,tmu-r8a7792", "renesas,tmu";
+ reg = <0 0xfff60000 0 0x30>;
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&cpg CPG_MOD 111>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ resets = <&cpg 111>;
+ status = "disabled";
+ };
+
+ tmu2: timer@fff70000 {
+ compatible = "renesas,tmu-r8a7792", "renesas,tmu";
+ reg = <0 0xfff70000 0 0x30>;
+ interrupts = <GIC_SPI 303 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&cpg CPG_MOD 122>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ resets = <&cpg 122>;
+ status = "disabled";
+ };
+
+ tmu3: timer@fff80000 {
+ compatible = "renesas,tmu-r8a7792", "renesas,tmu";
+ reg = <0 0xfff80000 0 0x30>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&cpg CPG_MOD 121>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ resets = <&cpg 121>;
+ status = "disabled";
+ };
+
icram0: sram@e63a0000 {
compatible = "mmio-sram";
reg = <0 0xe63a0000 0 0x12000>;
diff --git a/arch/arm/boot/dts/renesas/r8a7793.dtsi b/arch/arm/boot/dts/renesas/r8a7793.dtsi
index f51bf687f4bd..d32a9d5d3faa 100644
--- a/arch/arm/boot/dts/renesas/r8a7793.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a7793.dtsi
@@ -326,6 +326,64 @@
resets = <&cpg 407>;
};
+ tmu0: timer@e61e0000 {
+ compatible = "renesas,tmu-r8a7793", "renesas,tmu";
+ reg = <0 0xe61e0000 0 0x30>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2";
+ clocks = <&cpg CPG_MOD 125>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
+ resets = <&cpg 125>;
+ status = "disabled";
+ };
+
+ tmu1: timer@fff60000 {
+ compatible = "renesas,tmu-r8a7793", "renesas,tmu";
+ reg = <0 0xfff60000 0 0x30>;
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&cpg CPG_MOD 111>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
+ resets = <&cpg 111>;
+ status = "disabled";
+ };
+
+ tmu2: timer@fff70000 {
+ compatible = "renesas,tmu-r8a7793", "renesas,tmu";
+ reg = <0 0xfff70000 0 0x30>;
+ interrupts = <GIC_SPI 303 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&cpg CPG_MOD 122>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
+ resets = <&cpg 122>;
+ status = "disabled";
+ };
+
+ tmu3: timer@fff80000 {
+ compatible = "renesas,tmu-r8a7793", "renesas,tmu";
+ reg = <0 0xfff80000 0 0x30>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2";
+ clocks = <&cpg CPG_MOD 121>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
+ resets = <&cpg 121>;
+ status = "disabled";
+ };
+
thermal: thermal@e61f0000 {
compatible = "renesas,thermal-r8a7793",
"renesas,rcar-gen2-thermal",
diff --git a/arch/arm/boot/dts/renesas/r8a7794.dtsi b/arch/arm/boot/dts/renesas/r8a7794.dtsi
index 371dd4715dde..f37f094cecc8 100644
--- a/arch/arm/boot/dts/renesas/r8a7794.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a7794.dtsi
@@ -292,6 +292,64 @@
resets = <&cpg 407>;
};
+ tmu0: timer@e61e0000 {
+ compatible = "renesas,tmu-r8a7794", "renesas,tmu";
+ reg = <0 0xe61e0000 0 0x30>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2";
+ clocks = <&cpg CPG_MOD 125>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
+ resets = <&cpg 125>;
+ status = "disabled";
+ };
+
+ tmu1: timer@fff60000 {
+ compatible = "renesas,tmu-r8a7794", "renesas,tmu";
+ reg = <0 0xfff60000 0 0x30>;
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&cpg CPG_MOD 111>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
+ resets = <&cpg 111>;
+ status = "disabled";
+ };
+
+ tmu2: timer@fff70000 {
+ compatible = "renesas,tmu-r8a7794", "renesas,tmu";
+ reg = <0 0xfff70000 0 0x30>;
+ interrupts = <GIC_SPI 303 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&cpg CPG_MOD 122>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
+ resets = <&cpg 122>;
+ status = "disabled";
+ };
+
+ tmu3: timer@fff80000 {
+ compatible = "renesas,tmu-r8a7794", "renesas,tmu";
+ reg = <0 0xfff80000 0 0x30>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2";
+ clocks = <&cpg CPG_MOD 121>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
+ resets = <&cpg 121>;
+ status = "disabled";
+ };
+
ipmmu_sy0: iommu@e6280000 {
compatible = "renesas,ipmmu-r8a7794",
"renesas,ipmmu-vmsa";
diff --git a/arch/arm/boot/dts/renesas/r9a06g032.dtsi b/arch/arm/boot/dts/renesas/r9a06g032.dtsi
index fa63e1afc4ef..45f60eeeaaa1 100644
--- a/arch/arm/boot/dts/renesas/r9a06g032.dtsi
+++ b/arch/arm/boot/dts/renesas/r9a06g032.dtsi
@@ -319,7 +319,6 @@
gmac2: ethernet@44002000 {
compatible = "renesas,r9a06g032-gmac", "renesas,rzn1-gmac", "snps,dwmac";
reg = <0x44002000 0x2000>;
- interrupt-parent = <&gic>;
interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/samsung/exynos3250.dtsi b/arch/arm/boot/dts/samsung/exynos3250.dtsi
index 3f1015edab43..b6c3826a9424 100644
--- a/arch/arm/boot/dts/samsung/exynos3250.dtsi
+++ b/arch/arm/boot/dts/samsung/exynos3250.dtsi
@@ -826,6 +826,7 @@
samsung,spi-src-clk = <0>;
pinctrl-names = "default";
pinctrl-0 = <&spi0_bus>;
+ fifo-depth = <256>;
status = "disabled";
};
@@ -842,6 +843,7 @@
samsung,spi-src-clk = <0>;
pinctrl-names = "default";
pinctrl-0 = <&spi1_bus>;
+ fifo-depth = <64>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/samsung/exynos4.dtsi b/arch/arm/boot/dts/samsung/exynos4.dtsi
index 7f981b5c0d64..ed47d0ce04e1 100644
--- a/arch/arm/boot/dts/samsung/exynos4.dtsi
+++ b/arch/arm/boot/dts/samsung/exynos4.dtsi
@@ -621,6 +621,7 @@
clock-names = "spi", "spi_busclk0";
pinctrl-names = "default";
pinctrl-0 = <&spi0_bus>;
+ fifo-depth = <256>;
status = "disabled";
};
@@ -636,6 +637,7 @@
clock-names = "spi", "spi_busclk0";
pinctrl-names = "default";
pinctrl-0 = <&spi1_bus>;
+ fifo-depth = <64>;
status = "disabled";
};
@@ -651,6 +653,7 @@
clock-names = "spi", "spi_busclk0";
pinctrl-names = "default";
pinctrl-0 = <&spi2_bus>;
+ fifo-depth = <64>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/samsung/exynos4210-smdkv310.dts b/arch/arm/boot/dts/samsung/exynos4210-smdkv310.dts
index b566f878ed84..18f4f494093b 100644
--- a/arch/arm/boot/dts/samsung/exynos4210-smdkv310.dts
+++ b/arch/arm/boot/dts/samsung/exynos4210-smdkv310.dts
@@ -88,7 +88,7 @@
&keypad {
samsung,keypad-num-rows = <2>;
samsung,keypad-num-columns = <8>;
- linux,keypad-no-autorepeat;
+ linux,input-no-autorepeat;
wakeup-source;
pinctrl-names = "default";
pinctrl-0 = <&keypad_rows &keypad_cols>;
diff --git a/arch/arm/boot/dts/samsung/exynos4212-tab3.dtsi b/arch/arm/boot/dts/samsung/exynos4212-tab3.dtsi
index e5254e32aa8f..9bc05961577d 100644
--- a/arch/arm/boot/dts/samsung/exynos4212-tab3.dtsi
+++ b/arch/arm/boot/dts/samsung/exynos4212-tab3.dtsi
@@ -45,6 +45,12 @@
/* Default S-BOOT bootloader loads initramfs here */
linux,initrd-start = <0x42000000>;
linux,initrd-end = <0x42800000>;
+
+ /*
+ * Stock bootloader provides incorrect memory size in ATAG_MEM;
+ * override it here
+ */
+ linux,usable-memory-range = <0x40000000 0x3fc00000>;
};
firmware@204f000 {
diff --git a/arch/arm/boot/dts/samsung/exynos4412-origen.dts b/arch/arm/boot/dts/samsung/exynos4412-origen.dts
index 23b151645d66..10ab7bc90f50 100644
--- a/arch/arm/boot/dts/samsung/exynos4412-origen.dts
+++ b/arch/arm/boot/dts/samsung/exynos4412-origen.dts
@@ -453,7 +453,7 @@
&keypad {
samsung,keypad-num-rows = <3>;
samsung,keypad-num-columns = <2>;
- linux,keypad-no-autorepeat;
+ linux,input-no-autorepeat;
wakeup-source;
pinctrl-0 = <&keypad_rows &keypad_cols>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/samsung/exynos4412-smdk4412.dts b/arch/arm/boot/dts/samsung/exynos4412-smdk4412.dts
index 715dfcba1417..c83fb250e664 100644
--- a/arch/arm/boot/dts/samsung/exynos4412-smdk4412.dts
+++ b/arch/arm/boot/dts/samsung/exynos4412-smdk4412.dts
@@ -69,7 +69,7 @@
&keypad {
samsung,keypad-num-rows = <3>;
samsung,keypad-num-columns = <8>;
- linux,keypad-no-autorepeat;
+ linux,input-no-autorepeat;
wakeup-source;
pinctrl-0 = <&keypad_rows &keypad_cols>;
pinctrl-names = "default";
@@ -105,31 +105,31 @@
linux,code = <6>;
};
- key-A {
+ key-a {
keypad,row = <2>;
keypad,column = <6>;
linux,code = <30>;
};
- key-B {
+ key-b {
keypad,row = <2>;
keypad,column = <7>;
linux,code = <48>;
};
- key-C {
+ key-c {
keypad,row = <0>;
keypad,column = <5>;
linux,code = <46>;
};
- key-D {
+ key-d {
keypad,row = <2>;
keypad,column = <5>;
linux,code = <32>;
};
- key-E {
+ key-e {
keypad,row = <0>;
keypad,column = <7>;
linux,code = <18>;
diff --git a/arch/arm/boot/dts/samsung/exynos5250.dtsi b/arch/arm/boot/dts/samsung/exynos5250.dtsi
index 99c84bebf25a..b9e7c4938818 100644
--- a/arch/arm/boot/dts/samsung/exynos5250.dtsi
+++ b/arch/arm/boot/dts/samsung/exynos5250.dtsi
@@ -511,6 +511,7 @@
clock-names = "spi", "spi_busclk0";
pinctrl-names = "default";
pinctrl-0 = <&spi0_bus>;
+ fifo-depth = <256>;
};
spi_1: spi@12d30000 {
@@ -526,6 +527,7 @@
clock-names = "spi", "spi_busclk0";
pinctrl-names = "default";
pinctrl-0 = <&spi1_bus>;
+ fifo-depth = <64>;
};
spi_2: spi@12d40000 {
@@ -541,6 +543,7 @@
clock-names = "spi", "spi_busclk0";
pinctrl-names = "default";
pinctrl-0 = <&spi2_bus>;
+ fifo-depth = <64>;
};
mmc_0: mmc@12200000 {
diff --git a/arch/arm/boot/dts/samsung/exynos5420.dtsi b/arch/arm/boot/dts/samsung/exynos5420.dtsi
index 25ed90374679..196c6d04675a 100644
--- a/arch/arm/boot/dts/samsung/exynos5420.dtsi
+++ b/arch/arm/boot/dts/samsung/exynos5420.dtsi
@@ -658,6 +658,7 @@
pinctrl-0 = <&spi0_bus>;
clocks = <&clock CLK_SPI0>, <&clock CLK_SCLK_SPI0>;
clock-names = "spi", "spi_busclk0";
+ fifo-depth = <256>;
status = "disabled";
};
@@ -674,6 +675,7 @@
pinctrl-0 = <&spi1_bus>;
clocks = <&clock CLK_SPI1>, <&clock CLK_SCLK_SPI1>;
clock-names = "spi", "spi_busclk0";
+ fifo-depth = <64>;
status = "disabled";
};
@@ -690,6 +692,7 @@
pinctrl-0 = <&spi2_bus>;
clocks = <&clock CLK_SPI2>, <&clock CLK_SCLK_SPI2>;
clock-names = "spi", "spi_busclk0";
+ fifo-depth = <64>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/samsung/exynos5800-peach-pi.dts b/arch/arm/boot/dts/samsung/exynos5800-peach-pi.dts
index 9bbbdce9103a..bb019868b996 100644
--- a/arch/arm/boot/dts/samsung/exynos5800-peach-pi.dts
+++ b/arch/arm/boot/dts/samsung/exynos5800-peach-pi.dts
@@ -185,7 +185,7 @@
samsung,color-depth = <1>;
samsung,link-rate = <0x0a>;
samsung,lane-count = <2>;
- samsung,hpd-gpio = <&gpx2 6 GPIO_ACTIVE_HIGH>;
+ hpd-gpios = <&gpx2 6 GPIO_ACTIVE_HIGH>;
ports {
port {
diff --git a/arch/arm/boot/dts/samsung/s5pv210.dtsi b/arch/arm/boot/dts/samsung/s5pv210.dtsi
index ed560c9a3aa1..34e8a3d5efa5 100644
--- a/arch/arm/boot/dts/samsung/s5pv210.dtsi
+++ b/arch/arm/boot/dts/samsung/s5pv210.dtsi
@@ -72,7 +72,7 @@
#size-cells = <1>;
ranges;
- onenand: onenand@b0600000 {
+ onenand: nand-controller@b0600000 {
compatible = "samsung,s5pv210-onenand";
reg = <0xb0600000 0x2000>,
<0xb0000000 0x20000>,
@@ -82,7 +82,7 @@
clocks = <&clocks CLK_NANDXL>, <&clocks DOUT_FLASH>;
clock-names = "bus", "onenand";
#address-cells = <1>;
- #size-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -161,6 +161,7 @@
pinctrl-0 = <&spi0_bus>;
#address-cells = <1>;
#size-cells = <0>;
+ fifo-depth = <256>;
status = "disabled";
};
@@ -177,6 +178,7 @@
pinctrl-0 = <&spi1_bus>;
#address-cells = <1>;
#size-cells = <0>;
+ fifo-depth = <64>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/st/stm32f746.dtsi b/arch/arm/boot/dts/st/stm32f746.dtsi
index 65c72b6fcc83..2537b3d47e6f 100644
--- a/arch/arm/boot/dts/st/stm32f746.dtsi
+++ b/arch/arm/boot/dts/st/stm32f746.dtsi
@@ -257,23 +257,6 @@
status = "disabled";
};
- can3: can@40003400 {
- compatible = "st,stm32f4-bxcan";
- reg = <0x40003400 0x200>;
- interrupts = <104>, <105>, <106>, <107>;
- interrupt-names = "tx", "rx0", "rx1", "sce";
- resets = <&rcc STM32F7_APB1_RESET(CAN3)>;
- clocks = <&rcc 0 STM32F7_APB1_CLOCK(CAN3)>;
- st,gcan = <&gcan3>;
- status = "disabled";
- };
-
- gcan3: gcan@40003600 {
- compatible = "st,stm32f4-gcan", "syscon";
- reg = <0x40003600 0x200>;
- clocks = <&rcc 0 STM32F7_APB1_CLOCK(CAN3)>;
- };
-
spi2: spi@40003800 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/st/stm32f769.dtsi b/arch/arm/boot/dts/st/stm32f769.dtsi
index 4e7d9032149c..e8cbb99e81a6 100644
--- a/arch/arm/boot/dts/st/stm32f769.dtsi
+++ b/arch/arm/boot/dts/st/stm32f769.dtsi
@@ -7,6 +7,23 @@
/ {
soc {
+ can3: can@40003400 {
+ compatible = "st,stm32f4-bxcan";
+ reg = <0x40003400 0x200>;
+ interrupts = <104>, <105>, <106>, <107>;
+ interrupt-names = "tx", "rx0", "rx1", "sce";
+ resets = <&rcc STM32F7_APB1_RESET(CAN3)>;
+ clocks = <&rcc 0 STM32F7_APB1_CLOCK(CAN3)>;
+ st,gcan = <&gcan3>;
+ status = "disabled";
+ };
+
+ gcan3: gcan@40003600 {
+ compatible = "st,stm32f4-gcan", "syscon";
+ reg = <0x40003600 0x200>;
+ clocks = <&rcc 0 STM32F7_APB1_CLOCK(CAN3)>;
+ };
+
dsi: dsi@40016c00 {
compatible = "st,stm32-dsi";
reg = <0x40016c00 0x800>;
diff --git a/arch/arm/boot/dts/st/stm32mp13-pinctrl.dtsi b/arch/arm/boot/dts/st/stm32mp13-pinctrl.dtsi
index 27e0c3826789..32c5d8a1e06a 100644
--- a/arch/arm/boot/dts/st/stm32mp13-pinctrl.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp13-pinctrl.dtsi
@@ -47,6 +47,63 @@
};
};
+ ltdc_pins_a: ltdc-0 {
+ pins {
+ pinmux = <STM32_PINMUX('D', 9, AF13)>, /* LCD_CLK */
+ <STM32_PINMUX('C', 6, AF14)>, /* LCD_HSYNC */
+ <STM32_PINMUX('G', 4, AF11)>, /* LCD_VSYNC */
+ <STM32_PINMUX('H', 9, AF11)>, /* LCD_DE */
+ <STM32_PINMUX('G', 7, AF14)>, /* LCD_R2 */
+ <STM32_PINMUX('B', 12, AF13)>, /* LCD_R3 */
+ <STM32_PINMUX('D', 14, AF14)>, /* LCD_R4 */
+ <STM32_PINMUX('E', 7, AF14)>, /* LCD_R5 */
+ <STM32_PINMUX('E', 13, AF14)>, /* LCD_R6 */
+ <STM32_PINMUX('E', 9, AF14)>, /* LCD_R7 */
+ <STM32_PINMUX('H', 13, AF14)>, /* LCD_G2 */
+ <STM32_PINMUX('F', 3, AF14)>, /* LCD_G3 */
+ <STM32_PINMUX('D', 5, AF14)>, /* LCD_G4 */
+ <STM32_PINMUX('G', 0, AF14)>, /* LCD_G5 */
+ <STM32_PINMUX('C', 7, AF14)>, /* LCD_G6 */
+ <STM32_PINMUX('A', 15, AF11)>, /* LCD_G7 */
+ <STM32_PINMUX('D', 10, AF14)>, /* LCD_B2 */
+ <STM32_PINMUX('F', 2, AF14)>, /* LCD_B3 */
+ <STM32_PINMUX('H', 14, AF11)>, /* LCD_B4 */
+ <STM32_PINMUX('E', 0, AF14)>, /* LCD_B5 */
+ <STM32_PINMUX('B', 6, AF7)>, /* LCD_B6 */
+ <STM32_PINMUX('F', 1, AF13)>; /* LCD_B7 */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ };
+
+ ltdc_sleep_pins_a: ltdc-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('D', 9, ANALOG)>, /* LCD_CLK */
+ <STM32_PINMUX('C', 6, ANALOG)>, /* LCD_HSYNC */
+ <STM32_PINMUX('G', 4, ANALOG)>, /* LCD_VSYNC */
+ <STM32_PINMUX('H', 9, ANALOG)>, /* LCD_DE */
+ <STM32_PINMUX('G', 7, ANALOG)>, /* LCD_R2 */
+ <STM32_PINMUX('B', 12, ANALOG)>, /* LCD_R3 */
+ <STM32_PINMUX('D', 14, ANALOG)>, /* LCD_R4 */
+ <STM32_PINMUX('E', 7, ANALOG)>, /* LCD_R5 */
+ <STM32_PINMUX('E', 13, ANALOG)>, /* LCD_R6 */
+ <STM32_PINMUX('E', 9, ANALOG)>, /* LCD_R7 */
+ <STM32_PINMUX('H', 13, ANALOG)>, /* LCD_G2 */
+ <STM32_PINMUX('F', 3, ANALOG)>, /* LCD_G3 */
+ <STM32_PINMUX('D', 5, ANALOG)>, /* LCD_G4 */
+ <STM32_PINMUX('G', 0, ANALOG)>, /* LCD_G5 */
+ <STM32_PINMUX('C', 7, ANALOG)>, /* LCD_G6 */
+ <STM32_PINMUX('A', 15, ANALOG)>, /* LCD_G7 */
+ <STM32_PINMUX('D', 10, ANALOG)>, /* LCD_B2 */
+ <STM32_PINMUX('F', 2, ANALOG)>, /* LCD_B3 */
+ <STM32_PINMUX('H', 14, ANALOG)>, /* LCD_B4 */
+ <STM32_PINMUX('E', 0, ANALOG)>, /* LCD_B5 */
+ <STM32_PINMUX('B', 6, ANALOG)>, /* LCD_B6 */
+ <STM32_PINMUX('F', 1, ANALOG)>; /* LCD_B7 */
+ };
+ };
+
mcp23017_pins_a: mcp23017-0 {
pins {
pinmux = <STM32_PINMUX('G', 12, GPIO)>;
diff --git a/arch/arm/boot/dts/st/stm32mp131.dtsi b/arch/arm/boot/dts/st/stm32mp131.dtsi
index 3900f32da797..6704ceef284d 100644
--- a/arch/arm/boot/dts/st/stm32mp131.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp131.dtsi
@@ -745,340 +745,6 @@
dma-channels = <16>;
};
- adc_2: adc@48004000 {
- compatible = "st,stm32mp13-adc-core";
- reg = <0x48004000 0x400>;
- interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc ADC2>, <&rcc ADC2_K>;
- clock-names = "bus", "adc";
- interrupt-controller;
- #interrupt-cells = <1>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- adc2: adc@0 {
- compatible = "st,stm32mp13-adc";
- #io-channel-cells = <1>;
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x0>;
- interrupt-parent = <&adc_2>;
- interrupts = <0>;
- dmas = <&dmamux1 10 0x400 0x80000001>;
- dma-names = "rx";
- status = "disabled";
-
- channel@13 {
- reg = <13>;
- label = "vrefint";
- };
- channel@14 {
- reg = <14>;
- label = "vddcore";
- };
- channel@16 {
- reg = <16>;
- label = "vddcpu";
- };
- channel@17 {
- reg = <17>;
- label = "vddq_ddr";
- };
- };
- };
-
- usbotg_hs: usb@49000000 {
- compatible = "st,stm32mp15-hsotg", "snps,dwc2";
- reg = <0x49000000 0x40000>;
- clocks = <&rcc USBO_K>;
- clock-names = "otg";
- resets = <&rcc USBO_R>;
- reset-names = "dwc2";
- interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
- g-rx-fifo-size = <512>;
- g-np-tx-fifo-size = <32>;
- g-tx-fifo-size = <256 16 16 16 16 16 16 16>;
- dr_mode = "otg";
- otg-rev = <0x200>;
- usb33d-supply = <&scmi_usb33>;
- status = "disabled";
- };
-
- usart1: serial@4c000000 {
- compatible = "st,stm32h7-uart";
- reg = <0x4c000000 0x400>;
- interrupts-extended = <&exti 26 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc USART1_K>;
- resets = <&rcc USART1_R>;
- wakeup-source;
- dmas = <&dmamux1 41 0x400 0x5>,
- <&dmamux1 42 0x400 0x1>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- usart2: serial@4c001000 {
- compatible = "st,stm32h7-uart";
- reg = <0x4c001000 0x400>;
- interrupts-extended = <&exti 27 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc USART2_K>;
- resets = <&rcc USART2_R>;
- wakeup-source;
- dmas = <&dmamux1 43 0x400 0x5>,
- <&dmamux1 44 0x400 0x1>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- i2s4: audio-controller@4c002000 {
- compatible = "st,stm32h7-i2s";
- reg = <0x4c002000 0x400>;
- #sound-dai-cells = <0>;
- interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&dmamux1 83 0x400 0x01>,
- <&dmamux1 84 0x400 0x01>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- spi4: spi@4c002000 {
- compatible = "st,stm32h7-spi";
- reg = <0x4c002000 0x400>;
- interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc SPI4_K>;
- resets = <&rcc SPI4_R>;
- #address-cells = <1>;
- #size-cells = <0>;
- dmas = <&dmamux1 83 0x400 0x01>,
- <&dmamux1 84 0x400 0x01>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- spi5: spi@4c003000 {
- compatible = "st,stm32h7-spi";
- reg = <0x4c003000 0x400>;
- interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc SPI5_K>;
- resets = <&rcc SPI5_R>;
- #address-cells = <1>;
- #size-cells = <0>;
- dmas = <&dmamux1 85 0x400 0x01>,
- <&dmamux1 86 0x400 0x01>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- i2c3: i2c@4c004000 {
- compatible = "st,stm32mp13-i2c";
- reg = <0x4c004000 0x400>;
- interrupt-names = "event", "error";
- interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc I2C3_K>;
- resets = <&rcc I2C3_R>;
- #address-cells = <1>;
- #size-cells = <0>;
- dmas = <&dmamux1 73 0x400 0x1>,
- <&dmamux1 74 0x400 0x1>;
- dma-names = "rx", "tx";
- st,syscfg-fmp = <&syscfg 0x4 0x4>;
- i2c-analog-filter;
- status = "disabled";
- };
-
- i2c4: i2c@4c005000 {
- compatible = "st,stm32mp13-i2c";
- reg = <0x4c005000 0x400>;
- interrupt-names = "event", "error";
- interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc I2C4_K>;
- resets = <&rcc I2C4_R>;
- #address-cells = <1>;
- #size-cells = <0>;
- dmas = <&dmamux1 75 0x400 0x1>,
- <&dmamux1 76 0x400 0x1>;
- dma-names = "rx", "tx";
- st,syscfg-fmp = <&syscfg 0x4 0x8>;
- i2c-analog-filter;
- status = "disabled";
- };
-
- i2c5: i2c@4c006000 {
- compatible = "st,stm32mp13-i2c";
- reg = <0x4c006000 0x400>;
- interrupt-names = "event", "error";
- interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc I2C5_K>;
- resets = <&rcc I2C5_R>;
- #address-cells = <1>;
- #size-cells = <0>;
- dmas = <&dmamux1 115 0x400 0x1>,
- <&dmamux1 116 0x400 0x1>;
- dma-names = "rx", "tx";
- st,syscfg-fmp = <&syscfg 0x4 0x10>;
- i2c-analog-filter;
- status = "disabled";
- };
-
- timers12: timer@4c007000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-timers";
- reg = <0x4c007000 0x400>;
- interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "global";
- clocks = <&rcc TIM12_K>;
- clock-names = "int";
- status = "disabled";
-
- pwm {
- compatible = "st,stm32-pwm";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- timer@11 {
- compatible = "st,stm32h7-timer-trigger";
- reg = <11>;
- status = "disabled";
- };
- };
-
- timers13: timer@4c008000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-timers";
- reg = <0x4c008000 0x400>;
- interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "global";
- clocks = <&rcc TIM13_K>;
- clock-names = "int";
- status = "disabled";
-
- pwm {
- compatible = "st,stm32-pwm";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- timer@12 {
- compatible = "st,stm32h7-timer-trigger";
- reg = <12>;
- status = "disabled";
- };
- };
-
- timers14: timer@4c009000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-timers";
- reg = <0x4c009000 0x400>;
- interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "global";
- clocks = <&rcc TIM14_K>;
- clock-names = "int";
- status = "disabled";
-
- pwm {
- compatible = "st,stm32-pwm";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- timer@13 {
- compatible = "st,stm32h7-timer-trigger";
- reg = <13>;
- status = "disabled";
- };
- };
-
- timers15: timer@4c00a000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-timers";
- reg = <0x4c00a000 0x400>;
- interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "global";
- clocks = <&rcc TIM15_K>;
- clock-names = "int";
- dmas = <&dmamux1 105 0x400 0x1>,
- <&dmamux1 106 0x400 0x1>,
- <&dmamux1 107 0x400 0x1>,
- <&dmamux1 108 0x400 0x1>;
- dma-names = "ch1", "up", "trig", "com";
- status = "disabled";
-
- pwm {
- compatible = "st,stm32-pwm";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- timer@14 {
- compatible = "st,stm32h7-timer-trigger";
- reg = <14>;
- status = "disabled";
- };
- };
-
- timers16: timer@4c00b000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-timers";
- reg = <0x4c00b000 0x400>;
- interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "global";
- clocks = <&rcc TIM16_K>;
- clock-names = "int";
- dmas = <&dmamux1 109 0x400 0x1>,
- <&dmamux1 110 0x400 0x1>;
- dma-names = "ch1", "up";
- status = "disabled";
-
- pwm {
- compatible = "st,stm32-pwm";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- timer@15 {
- compatible = "st,stm32h7-timer-trigger";
- reg = <15>;
- status = "disabled";
- };
- };
-
- timers17: timer@4c00c000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-timers";
- reg = <0x4c00c000 0x400>;
- interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "global";
- clocks = <&rcc TIM17_K>;
- clock-names = "int";
- dmas = <&dmamux1 111 0x400 0x1>,
- <&dmamux1 112 0x400 0x1>;
- dma-names = "ch1", "up";
- status = "disabled";
-
- pwm {
- compatible = "st,stm32-pwm";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- timer@16 {
- compatible = "st,stm32h7-timer-trigger";
- reg = <16>;
- status = "disabled";
- };
- };
-
rcc: rcc@50000000 {
compatible = "st,stm32mp13-rcc", "syscon";
reg = <0x50000000 0x1000>;
@@ -1092,80 +758,113 @@
<&scmi_clk CK_SCMI_LSI>;
};
- exti: interrupt-controller@5000d000 {
- compatible = "st,stm32mp13-exti", "syscon";
- interrupt-controller;
- #interrupt-cells = <2>;
- reg = <0x5000d000 0x400>;
- };
-
- syscfg: syscon@50020000 {
- compatible = "st,stm32mp157-syscfg", "syscon";
- reg = <0x50020000 0x400>;
- clocks = <&rcc SYSCFG>;
- };
-
- lptimer2: timer@50021000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-lptimer";
- reg = <0x50021000 0x400>;
- interrupts-extended = <&exti 48 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc LPTIM2_K>;
- clock-names = "mux";
- wakeup-source;
+ pwr_regulators: pwr@50001000 {
+ compatible = "st,stm32mp1,pwr-reg";
+ reg = <0x50001000 0x10>;
status = "disabled";
- pwm {
- compatible = "st,stm32-pwm-lp";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- trigger@1 {
- compatible = "st,stm32-lptimer-trigger";
- reg = <1>;
- status = "disabled";
+ reg11: reg11 {
+ regulator-name = "reg11";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
};
- counter {
- compatible = "st,stm32-lptimer-counter";
- status = "disabled";
+ reg18: reg18 {
+ regulator-name = "reg18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
};
- timer {
- compatible = "st,stm32-lptimer-timer";
- status = "disabled";
+ usb33: usb33 {
+ regulator-name = "usb33";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
};
};
- lptimer3: timer@50022000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-lptimer";
- reg = <0x50022000 0x400>;
- interrupts-extended = <&exti 50 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc LPTIM3_K>;
- clock-names = "mux";
- wakeup-source;
- status = "disabled";
-
- pwm {
- compatible = "st,stm32-pwm-lp";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- trigger@2 {
- compatible = "st,stm32-lptimer-trigger";
- reg = <2>;
- status = "disabled";
- };
+ exti: interrupt-controller@5000d000 {
+ compatible = "st,stm32mp1-exti", "syscon";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ reg = <0x5000d000 0x400>;
+ interrupts-extended =
+ <&intc GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_0 */
+ <&intc GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_10 */
+ <&intc GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <&intc GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <0>, /* EXTI_20 */
+ <&intc GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_30 */
+ <&intc GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>, /* EXTI_40 */
+ <0>,
+ <0>,
+ <0>,
+ <&intc GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <&intc GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <&intc GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_50 */
+ <0>,
+ <&intc GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>, /* EXTI_60 */
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <&intc GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <&intc GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>; /* EXTI_70 */
+ };
- timer {
- compatible = "st,stm32-lptimer-timer";
- status = "disabled";
- };
+ syscfg: syscon@50020000 {
+ compatible = "st,stm32mp157-syscfg", "syscon";
+ reg = <0x50020000 0x400>;
+ clocks = <&rcc SYSCFG>;
};
lptimer4: timer@50023000 {
@@ -1210,25 +909,6 @@
};
};
- hash: hash@54003000 {
- compatible = "st,stm32mp13-hash";
- reg = <0x54003000 0x400>;
- interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc HASH1>;
- resets = <&rcc HASH1_R>;
- dmas = <&mdma 30 0x2 0x1000a02 0x0 0x0>;
- dma-names = "in";
- status = "disabled";
- };
-
- rng: rng@54004000 {
- compatible = "st,stm32mp13-rng";
- reg = <0x54004000 0x400>;
- clocks = <&rcc RNG1_K>;
- resets = <&rcc RNG1_R>;
- status = "disabled";
- };
-
mdma: dma-controller@58000000 {
compatible = "st,stm32h7-mdma";
reg = <0x58000000 0x1000>;
@@ -1239,82 +919,6 @@
dma-requests = <48>;
};
- fmc: memory-controller@58002000 {
- compatible = "st,stm32mp1-fmc2-ebi";
- reg = <0x58002000 0x1000>;
- ranges = <0 0 0x60000000 0x04000000>, /* EBI CS 1 */
- <1 0 0x64000000 0x04000000>, /* EBI CS 2 */
- <2 0 0x68000000 0x04000000>, /* EBI CS 3 */
- <3 0 0x6c000000 0x04000000>, /* EBI CS 4 */
- <4 0 0x80000000 0x10000000>; /* NAND */
- #address-cells = <2>;
- #size-cells = <1>;
- clocks = <&rcc FMC_K>;
- resets = <&rcc FMC_R>;
- status = "disabled";
-
- nand-controller@4,0 {
- compatible = "st,stm32mp1-fmc2-nfc";
- reg = <4 0x00000000 0x1000>,
- <4 0x08010000 0x1000>,
- <4 0x08020000 0x1000>,
- <4 0x01000000 0x1000>,
- <4 0x09010000 0x1000>,
- <4 0x09020000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&mdma 24 0x2 0x12000a02 0x0 0x0>,
- <&mdma 24 0x2 0x12000a08 0x0 0x0>,
- <&mdma 25 0x2 0x12000a0a 0x0 0x0>;
- dma-names = "tx", "rx", "ecc";
- status = "disabled";
- };
- };
-
- qspi: spi@58003000 {
- compatible = "st,stm32f469-qspi";
- reg = <0x58003000 0x1000>, <0x70000000 0x10000000>;
- reg-names = "qspi", "qspi_mm";
- #address-cells = <1>;
- #size-cells = <0>;
- interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&mdma 26 0x2 0x10100002 0x0 0x0>,
- <&mdma 26 0x2 0x10100008 0x0 0x0>;
- dma-names = "tx", "rx";
- clocks = <&rcc QSPI_K>;
- resets = <&rcc QSPI_R>;
- status = "disabled";
- };
-
- sdmmc1: mmc@58005000 {
- compatible = "st,stm32-sdmmc2", "arm,pl18x", "arm,primecell";
- arm,primecell-periphid = <0x20253180>;
- reg = <0x58005000 0x1000>, <0x58006000 0x1000>;
- interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc SDMMC1_K>;
- clock-names = "apb_pclk";
- resets = <&rcc SDMMC1_R>;
- cap-sd-highspeed;
- cap-mmc-highspeed;
- max-frequency = <130000000>;
- status = "disabled";
- };
-
- sdmmc2: mmc@58007000 {
- compatible = "st,stm32-sdmmc2", "arm,pl18x", "arm,primecell";
- arm,primecell-periphid = <0x20253180>;
- reg = <0x58007000 0x1000>, <0x58008000 0x1000>;
- interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc SDMMC2_K>;
- clock-names = "apb_pclk";
- resets = <&rcc SDMMC2_R>;
- cap-sd-highspeed;
- cap-mmc-highspeed;
- max-frequency = <130000000>;
- status = "disabled";
- };
-
crc1: crc@58009000 {
compatible = "st,stm32f7-crc";
reg = <0x58009000 0x400>;
@@ -1349,29 +953,6 @@
status = "disabled";
};
- usbphyc: usbphyc@5a006000 {
- #address-cells = <1>;
- #size-cells = <0>;
- #clock-cells = <0>;
- compatible = "st,stm32mp1-usbphyc";
- reg = <0x5a006000 0x1000>;
- clocks = <&rcc USBPHY_K>;
- resets = <&rcc USBPHY_R>;
- vdda1v1-supply = <&scmi_reg11>;
- vdda1v8-supply = <&scmi_reg18>;
- status = "disabled";
-
- usbphyc_port0: usb-phy@0 {
- #phy-cells = <0>;
- reg = <0>;
- };
-
- usbphyc_port1: usb-phy@1 {
- #phy-cells = <1>;
- reg = <1>;
- };
- };
-
rtc: rtc@5c004000 {
compatible = "st,stm32mp1-rtc";
reg = <0x5c004000 0x400>;
@@ -1400,6 +981,555 @@
};
};
+ etzpc: bus@5c007000 {
+ compatible = "st,stm32-etzpc", "simple-bus";
+ reg = <0x5c007000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ #access-controller-cells = <1>;
+ ranges;
+
+ adc_2: adc@48004000 {
+ compatible = "st,stm32mp13-adc-core";
+ reg = <0x48004000 0x400>;
+ interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc ADC2>, <&rcc ADC2_K>;
+ clock-names = "bus", "adc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ access-controllers = <&etzpc 33>;
+ status = "disabled";
+
+ adc2: adc@0 {
+ compatible = "st,stm32mp13-adc";
+ #io-channel-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0>;
+ interrupt-parent = <&adc_2>;
+ interrupts = <0>;
+ dmas = <&dmamux1 10 0x400 0x80000001>;
+ dma-names = "rx";
+ status = "disabled";
+
+ channel@13 {
+ reg = <13>;
+ label = "vrefint";
+ };
+ channel@14 {
+ reg = <14>;
+ label = "vddcore";
+ };
+ channel@16 {
+ reg = <16>;
+ label = "vddcpu";
+ };
+ channel@17 {
+ reg = <17>;
+ label = "vddq_ddr";
+ };
+ };
+ };
+
+ usbotg_hs: usb@49000000 {
+ compatible = "st,stm32mp15-hsotg", "snps,dwc2";
+ reg = <0x49000000 0x40000>;
+ clocks = <&rcc USBO_K>;
+ clock-names = "otg";
+ resets = <&rcc USBO_R>;
+ reset-names = "dwc2";
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ g-rx-fifo-size = <512>;
+ g-np-tx-fifo-size = <32>;
+ g-tx-fifo-size = <256 16 16 16 16 16 16 16>;
+ dr_mode = "otg";
+ otg-rev = <0x200>;
+ usb33d-supply = <&scmi_usb33>;
+ access-controllers = <&etzpc 34>;
+ status = "disabled";
+ };
+
+ usart1: serial@4c000000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x4c000000 0x400>;
+ interrupts-extended = <&exti 26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc USART1_K>;
+ resets = <&rcc USART1_R>;
+ wakeup-source;
+ dmas = <&dmamux1 41 0x400 0x5>,
+ <&dmamux1 42 0x400 0x1>;
+ dma-names = "rx", "tx";
+ access-controllers = <&etzpc 16>;
+ status = "disabled";
+ };
+
+ usart2: serial@4c001000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x4c001000 0x400>;
+ interrupts-extended = <&exti 27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc USART2_K>;
+ resets = <&rcc USART2_R>;
+ wakeup-source;
+ dmas = <&dmamux1 43 0x400 0x5>,
+ <&dmamux1 44 0x400 0x1>;
+ dma-names = "rx", "tx";
+ access-controllers = <&etzpc 17>;
+ status = "disabled";
+ };
+
+ i2s4: audio-controller@4c002000 {
+ compatible = "st,stm32h7-i2s";
+ reg = <0x4c002000 0x400>;
+ #sound-dai-cells = <0>;
+ interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dmamux1 83 0x400 0x01>,
+ <&dmamux1 84 0x400 0x01>;
+ dma-names = "rx", "tx";
+ access-controllers = <&etzpc 13>;
+ status = "disabled";
+ };
+
+ spi4: spi@4c002000 {
+ compatible = "st,stm32h7-spi";
+ reg = <0x4c002000 0x400>;
+ interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc SPI4_K>;
+ resets = <&rcc SPI4_R>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ dmas = <&dmamux1 83 0x400 0x01>,
+ <&dmamux1 84 0x400 0x01>;
+ dma-names = "rx", "tx";
+ access-controllers = <&etzpc 18>;
+ status = "disabled";
+ };
+
+ spi5: spi@4c003000 {
+ compatible = "st,stm32h7-spi";
+ reg = <0x4c003000 0x400>;
+ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc SPI5_K>;
+ resets = <&rcc SPI5_R>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ dmas = <&dmamux1 85 0x400 0x01>,
+ <&dmamux1 86 0x400 0x01>;
+ dma-names = "rx", "tx";
+ access-controllers = <&etzpc 19>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@4c004000 {
+ compatible = "st,stm32mp13-i2c";
+ reg = <0x4c004000 0x400>;
+ interrupt-names = "event", "error";
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc I2C3_K>;
+ resets = <&rcc I2C3_R>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ dmas = <&dmamux1 73 0x400 0x1>,
+ <&dmamux1 74 0x400 0x1>;
+ dma-names = "rx", "tx";
+ st,syscfg-fmp = <&syscfg 0x4 0x4>;
+ i2c-analog-filter;
+ access-controllers = <&etzpc 20>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@4c005000 {
+ compatible = "st,stm32mp13-i2c";
+ reg = <0x4c005000 0x400>;
+ interrupt-names = "event", "error";
+ interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc I2C4_K>;
+ resets = <&rcc I2C4_R>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ dmas = <&dmamux1 75 0x400 0x1>,
+ <&dmamux1 76 0x400 0x1>;
+ dma-names = "rx", "tx";
+ st,syscfg-fmp = <&syscfg 0x4 0x8>;
+ i2c-analog-filter;
+ access-controllers = <&etzpc 21>;
+ status = "disabled";
+ };
+
+ i2c5: i2c@4c006000 {
+ compatible = "st,stm32mp13-i2c";
+ reg = <0x4c006000 0x400>;
+ interrupt-names = "event", "error";
+ interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc I2C5_K>;
+ resets = <&rcc I2C5_R>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ dmas = <&dmamux1 115 0x400 0x1>,
+ <&dmamux1 116 0x400 0x1>;
+ dma-names = "rx", "tx";
+ st,syscfg-fmp = <&syscfg 0x4 0x10>;
+ i2c-analog-filter;
+ access-controllers = <&etzpc 22>;
+ status = "disabled";
+ };
+
+ timers12: timer@4c007000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-timers";
+ reg = <0x4c007000 0x400>;
+ interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "global";
+ clocks = <&rcc TIM12_K>;
+ clock-names = "int";
+ access-controllers = <&etzpc 23>;
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ timer@11 {
+ compatible = "st,stm32h7-timer-trigger";
+ reg = <11>;
+ status = "disabled";
+ };
+ };
+
+ timers13: timer@4c008000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-timers";
+ reg = <0x4c008000 0x400>;
+ interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "global";
+ clocks = <&rcc TIM13_K>;
+ clock-names = "int";
+ access-controllers = <&etzpc 24>;
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ timer@12 {
+ compatible = "st,stm32h7-timer-trigger";
+ reg = <12>;
+ status = "disabled";
+ };
+ };
+
+ timers14: timer@4c009000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-timers";
+ reg = <0x4c009000 0x400>;
+ interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "global";
+ clocks = <&rcc TIM14_K>;
+ clock-names = "int";
+ access-controllers = <&etzpc 25>;
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ timer@13 {
+ compatible = "st,stm32h7-timer-trigger";
+ reg = <13>;
+ status = "disabled";
+ };
+ };
+
+ timers15: timer@4c00a000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-timers";
+ reg = <0x4c00a000 0x400>;
+ interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "global";
+ clocks = <&rcc TIM15_K>;
+ clock-names = "int";
+ dmas = <&dmamux1 105 0x400 0x1>,
+ <&dmamux1 106 0x400 0x1>,
+ <&dmamux1 107 0x400 0x1>,
+ <&dmamux1 108 0x400 0x1>;
+ dma-names = "ch1", "up", "trig", "com";
+ access-controllers = <&etzpc 26>;
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ timer@14 {
+ compatible = "st,stm32h7-timer-trigger";
+ reg = <14>;
+ status = "disabled";
+ };
+ };
+
+ timers16: timer@4c00b000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-timers";
+ reg = <0x4c00b000 0x400>;
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "global";
+ clocks = <&rcc TIM16_K>;
+ clock-names = "int";
+ dmas = <&dmamux1 109 0x400 0x1>,
+ <&dmamux1 110 0x400 0x1>;
+ dma-names = "ch1", "up";
+ access-controllers = <&etzpc 27>;
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ timer@15 {
+ compatible = "st,stm32h7-timer-trigger";
+ reg = <15>;
+ status = "disabled";
+ };
+ };
+
+ timers17: timer@4c00c000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-timers";
+ reg = <0x4c00c000 0x400>;
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "global";
+ clocks = <&rcc TIM17_K>;
+ clock-names = "int";
+ dmas = <&dmamux1 111 0x400 0x1>,
+ <&dmamux1 112 0x400 0x1>;
+ dma-names = "ch1", "up";
+ access-controllers = <&etzpc 28>;
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ timer@16 {
+ compatible = "st,stm32h7-timer-trigger";
+ reg = <16>;
+ status = "disabled";
+ };
+ };
+
+ lptimer2: timer@50021000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-lptimer";
+ reg = <0x50021000 0x400>;
+ interrupts-extended = <&exti 48 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc LPTIM2_K>;
+ clock-names = "mux";
+ wakeup-source;
+ access-controllers = <&etzpc 1>;
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm-lp";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ trigger@1 {
+ compatible = "st,stm32-lptimer-trigger";
+ reg = <1>;
+ status = "disabled";
+ };
+
+ counter {
+ compatible = "st,stm32-lptimer-counter";
+ status = "disabled";
+ };
+
+ timer {
+ compatible = "st,stm32-lptimer-timer";
+ status = "disabled";
+ };
+ };
+
+ lptimer3: timer@50022000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-lptimer";
+ reg = <0x50022000 0x400>;
+ interrupts-extended = <&exti 50 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc LPTIM3_K>;
+ clock-names = "mux";
+ wakeup-source;
+ access-controllers = <&etzpc 2>;
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm-lp";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ trigger@2 {
+ compatible = "st,stm32-lptimer-trigger";
+ reg = <2>;
+ status = "disabled";
+ };
+
+ timer {
+ compatible = "st,stm32-lptimer-timer";
+ status = "disabled";
+ };
+ };
+
+ hash: hash@54003000 {
+ compatible = "st,stm32mp13-hash";
+ reg = <0x54003000 0x400>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc HASH1>;
+ resets = <&rcc HASH1_R>;
+ dmas = <&mdma 30 0x2 0x1000a02 0x0 0x0>;
+ dma-names = "in";
+ access-controllers = <&etzpc 41>;
+ status = "disabled";
+ };
+
+ rng: rng@54004000 {
+ compatible = "st,stm32mp13-rng";
+ reg = <0x54004000 0x400>;
+ clocks = <&rcc RNG1_K>;
+ resets = <&rcc RNG1_R>;
+ access-controllers = <&etzpc 40>;
+ status = "disabled";
+ };
+
+ fmc: memory-controller@58002000 {
+ compatible = "st,stm32mp1-fmc2-ebi";
+ reg = <0x58002000 0x1000>;
+ ranges = <0 0 0x60000000 0x04000000>, /* EBI CS 1 */
+ <1 0 0x64000000 0x04000000>, /* EBI CS 2 */
+ <2 0 0x68000000 0x04000000>, /* EBI CS 3 */
+ <3 0 0x6c000000 0x04000000>, /* EBI CS 4 */
+ <4 0 0x80000000 0x10000000>; /* NAND */
+ #address-cells = <2>;
+ #size-cells = <1>;
+ clocks = <&rcc FMC_K>;
+ resets = <&rcc FMC_R>;
+ access-controllers = <&etzpc 54>;
+ status = "disabled";
+
+ nand-controller@4,0 {
+ compatible = "st,stm32mp1-fmc2-nfc";
+ reg = <4 0x00000000 0x1000>,
+ <4 0x08010000 0x1000>,
+ <4 0x08020000 0x1000>,
+ <4 0x01000000 0x1000>,
+ <4 0x09010000 0x1000>,
+ <4 0x09020000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&mdma 24 0x2 0x12000a02 0x0 0x0>,
+ <&mdma 24 0x2 0x12000a08 0x0 0x0>,
+ <&mdma 25 0x2 0x12000a0a 0x0 0x0>;
+ dma-names = "tx", "rx", "ecc";
+ status = "disabled";
+ };
+ };
+
+ qspi: spi@58003000 {
+ compatible = "st,stm32f469-qspi";
+ reg = <0x58003000 0x1000>, <0x70000000 0x10000000>;
+ reg-names = "qspi", "qspi_mm";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&mdma 26 0x2 0x10100002 0x0 0x0>,
+ <&mdma 26 0x2 0x10100008 0x0 0x0>;
+ dma-names = "tx", "rx";
+ clocks = <&rcc QSPI_K>;
+ resets = <&rcc QSPI_R>;
+ access-controllers = <&etzpc 55>;
+ status = "disabled";
+ };
+
+ sdmmc1: mmc@58005000 {
+ compatible = "st,stm32-sdmmc2", "arm,pl18x", "arm,primecell";
+ arm,primecell-periphid = <0x20253180>;
+ reg = <0x58005000 0x1000>, <0x58006000 0x1000>;
+ interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc SDMMC1_K>;
+ clock-names = "apb_pclk";
+ resets = <&rcc SDMMC1_R>;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ max-frequency = <130000000>;
+ access-controllers = <&etzpc 50>;
+ status = "disabled";
+ };
+
+ sdmmc2: mmc@58007000 {
+ compatible = "st,stm32-sdmmc2", "arm,pl18x", "arm,primecell";
+ arm,primecell-periphid = <0x20253180>;
+ reg = <0x58007000 0x1000>, <0x58008000 0x1000>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc SDMMC2_K>;
+ clock-names = "apb_pclk";
+ resets = <&rcc SDMMC2_R>;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ max-frequency = <130000000>;
+ access-controllers = <&etzpc 51>;
+ status = "disabled";
+ };
+
+ usbphyc: usbphyc@5a006000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #clock-cells = <0>;
+ compatible = "st,stm32mp1-usbphyc";
+ reg = <0x5a006000 0x1000>;
+ clocks = <&rcc USBPHY_K>;
+ resets = <&rcc USBPHY_R>;
+ vdda1v1-supply = <&scmi_reg11>;
+ vdda1v8-supply = <&scmi_reg18>;
+ access-controllers = <&etzpc 5>;
+ status = "disabled";
+
+ usbphyc_port0: usb-phy@0 {
+ #phy-cells = <0>;
+ reg = <0>;
+ };
+
+ usbphyc_port1: usb-phy@1 {
+ #phy-cells = <1>;
+ reg = <1>;
+ };
+ };
+ };
+
/*
* Break node order to solve dependency probe issue between
* pinctrl and exti.
diff --git a/arch/arm/boot/dts/st/stm32mp133.dtsi b/arch/arm/boot/dts/st/stm32mp133.dtsi
index df451c3c2a26..3e394c8e58b9 100644
--- a/arch/arm/boot/dts/st/stm32mp133.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp133.dtsi
@@ -33,35 +33,38 @@
bosch,mram-cfg = <0x1400 0 0 32 0 0 2 2>;
status = "disabled";
};
+ };
+};
- adc_1: adc@48003000 {
- compatible = "st,stm32mp13-adc-core";
- reg = <0x48003000 0x400>;
- interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc ADC1>, <&rcc ADC1_K>;
- clock-names = "bus", "adc";
- interrupt-controller;
- #interrupt-cells = <1>;
+&etzpc {
+ adc_1: adc@48003000 {
+ compatible = "st,stm32mp13-adc-core";
+ reg = <0x48003000 0x400>;
+ interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc ADC1>, <&rcc ADC1_K>;
+ clock-names = "bus", "adc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ access-controllers = <&etzpc 32>;
+ status = "disabled";
+
+ adc1: adc@0 {
+ compatible = "st,stm32mp13-adc";
+ #io-channel-cells = <1>;
#address-cells = <1>;
#size-cells = <0>;
+ reg = <0x0>;
+ interrupt-parent = <&adc_1>;
+ interrupts = <0>;
+ dmas = <&dmamux1 9 0x400 0x80000001>;
+ dma-names = "rx";
status = "disabled";
- adc1: adc@0 {
- compatible = "st,stm32mp13-adc";
- #io-channel-cells = <1>;
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x0>;
- interrupt-parent = <&adc_1>;
- interrupts = <0>;
- dmas = <&dmamux1 9 0x400 0x80000001>;
- dma-names = "rx";
- status = "disabled";
-
- channel@18 {
- reg = <18>;
- label = "vrefint";
- };
+ channel@18 {
+ reg = <18>;
+ label = "vrefint";
};
};
};
diff --git a/arch/arm/boot/dts/st/stm32mp135.dtsi b/arch/arm/boot/dts/st/stm32mp135.dtsi
index 68d32f9f5314..834a4d545fe4 100644
--- a/arch/arm/boot/dts/st/stm32mp135.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp135.dtsi
@@ -19,5 +19,16 @@
port {
};
};
+
+ ltdc: display-controller@5a001000 {
+ compatible = "st,stm32-ltdc";
+ reg = <0x5a001000 0x400>;
+ interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc LTDC_PX>;
+ clock-names = "lcd";
+ resets = <&scmi_reset RST_SCMI_LTDC>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm/boot/dts/st/stm32mp135f-dk.dts b/arch/arm/boot/dts/st/stm32mp135f-dk.dts
index 52171214a308..567e53ad285f 100644
--- a/arch/arm/boot/dts/st/stm32mp135f-dk.dts
+++ b/arch/arm/boot/dts/st/stm32mp135f-dk.dts
@@ -66,6 +66,46 @@
default-state = "off";
};
};
+
+ panel_backlight: panel-backlight {
+ compatible = "gpio-backlight";
+ gpios = <&gpioe 12 GPIO_ACTIVE_HIGH>;
+ default-on;
+ status = "okay";
+ };
+
+ panel_rgb: panel-rgb {
+ compatible = "rocktech,rk043fn48h";
+ enable-gpios = <&gpioi 7 GPIO_ACTIVE_HIGH>;
+ backlight = <&panel_backlight>;
+ power-supply = <&scmi_v3v3_sw>;
+ status = "okay";
+
+ width-mm = <105>;
+ height-mm = <67>;
+
+ panel-timing {
+ clock-frequency = <10000000>;
+ hactive = <480>;
+ hback-porch = <43>;
+ hfront-porch = <10>;
+ hsync-len = <1>;
+ hsync-active = <0>;
+ vactive = <272>;
+ vback-porch = <26>;
+ vfront-porch = <4>;
+ vsync-len = <10>;
+ vsync-active = <0>;
+ de-active = <1>;
+ pixelclk-active = <1>;
+ };
+
+ port {
+ panel_in_rgb: endpoint {
+ remote-endpoint = <&ltdc_out_rgb>;
+ };
+ };
+ };
};
&adc_1 {
@@ -168,6 +208,19 @@
status = "okay";
};
+&ltdc {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&ltdc_pins_a>;
+ pinctrl-1 = <&ltdc_sleep_pins_a>;
+ status = "okay";
+
+ port {
+ ltdc_out_rgb: endpoint {
+ remote-endpoint = <&panel_in_rgb>;
+ };
+ };
+};
+
&rtc {
status = "okay";
};
diff --git a/arch/arm/boot/dts/st/stm32mp13xc.dtsi b/arch/arm/boot/dts/st/stm32mp13xc.dtsi
index 4d00e7592882..a8bd5fe6536c 100644
--- a/arch/arm/boot/dts/st/stm32mp13xc.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp13xc.dtsi
@@ -4,15 +4,14 @@
* Author: Alexandre Torgue <alexandre.torgue@foss.st.com> for STMicroelectronics.
*/
-/ {
- soc {
- cryp: crypto@54002000 {
- compatible = "st,stm32mp1-cryp";
- reg = <0x54002000 0x400>;
- interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc CRYP1>;
- resets = <&rcc CRYP1_R>;
- status = "disabled";
- };
+&etzpc {
+ cryp: crypto@54002000 {
+ compatible = "st,stm32mp1-cryp";
+ reg = <0x54002000 0x400>;
+ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CRYP1>;
+ resets = <&rcc CRYP1_R>;
+ access-controllers = <&etzpc 42>;
+ status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/st/stm32mp13xf.dtsi b/arch/arm/boot/dts/st/stm32mp13xf.dtsi
index 4d00e7592882..a8bd5fe6536c 100644
--- a/arch/arm/boot/dts/st/stm32mp13xf.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp13xf.dtsi
@@ -4,15 +4,14 @@
* Author: Alexandre Torgue <alexandre.torgue@foss.st.com> for STMicroelectronics.
*/
-/ {
- soc {
- cryp: crypto@54002000 {
- compatible = "st,stm32mp1-cryp";
- reg = <0x54002000 0x400>;
- interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc CRYP1>;
- resets = <&rcc CRYP1_R>;
- status = "disabled";
- };
+&etzpc {
+ cryp: crypto@54002000 {
+ compatible = "st,stm32mp1-cryp";
+ reg = <0x54002000 0x400>;
+ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CRYP1>;
+ resets = <&rcc CRYP1_R>;
+ access-controllers = <&etzpc 42>;
+ status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/st/stm32mp151.dtsi b/arch/arm/boot/dts/st/stm32mp151.dtsi
index fa4cbd312e5a..90c5c72c87ab 100644
--- a/arch/arm/boot/dts/st/stm32mp151.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp151.dtsi
@@ -122,1042 +122,6 @@
interrupt-parent = <&intc>;
ranges;
- timers2: timer@40000000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-timers";
- reg = <0x40000000 0x400>;
- interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "global";
- clocks = <&rcc TIM2_K>;
- clock-names = "int";
- dmas = <&dmamux1 18 0x400 0x1>,
- <&dmamux1 19 0x400 0x1>,
- <&dmamux1 20 0x400 0x1>,
- <&dmamux1 21 0x400 0x1>,
- <&dmamux1 22 0x400 0x1>;
- dma-names = "ch1", "ch2", "ch3", "ch4", "up";
- status = "disabled";
-
- pwm {
- compatible = "st,stm32-pwm";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- timer@1 {
- compatible = "st,stm32h7-timer-trigger";
- reg = <1>;
- status = "disabled";
- };
-
- counter {
- compatible = "st,stm32-timer-counter";
- status = "disabled";
- };
- };
-
- timers3: timer@40001000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-timers";
- reg = <0x40001000 0x400>;
- interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "global";
- clocks = <&rcc TIM3_K>;
- clock-names = "int";
- dmas = <&dmamux1 23 0x400 0x1>,
- <&dmamux1 24 0x400 0x1>,
- <&dmamux1 25 0x400 0x1>,
- <&dmamux1 26 0x400 0x1>,
- <&dmamux1 27 0x400 0x1>,
- <&dmamux1 28 0x400 0x1>;
- dma-names = "ch1", "ch2", "ch3", "ch4", "up", "trig";
- status = "disabled";
-
- pwm {
- compatible = "st,stm32-pwm";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- timer@2 {
- compatible = "st,stm32h7-timer-trigger";
- reg = <2>;
- status = "disabled";
- };
-
- counter {
- compatible = "st,stm32-timer-counter";
- status = "disabled";
- };
- };
-
- timers4: timer@40002000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-timers";
- reg = <0x40002000 0x400>;
- interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "global";
- clocks = <&rcc TIM4_K>;
- clock-names = "int";
- dmas = <&dmamux1 29 0x400 0x1>,
- <&dmamux1 30 0x400 0x1>,
- <&dmamux1 31 0x400 0x1>,
- <&dmamux1 32 0x400 0x1>;
- dma-names = "ch1", "ch2", "ch3", "ch4";
- status = "disabled";
-
- pwm {
- compatible = "st,stm32-pwm";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- timer@3 {
- compatible = "st,stm32h7-timer-trigger";
- reg = <3>;
- status = "disabled";
- };
-
- counter {
- compatible = "st,stm32-timer-counter";
- status = "disabled";
- };
- };
-
- timers5: timer@40003000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-timers";
- reg = <0x40003000 0x400>;
- interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "global";
- clocks = <&rcc TIM5_K>;
- clock-names = "int";
- dmas = <&dmamux1 55 0x400 0x1>,
- <&dmamux1 56 0x400 0x1>,
- <&dmamux1 57 0x400 0x1>,
- <&dmamux1 58 0x400 0x1>,
- <&dmamux1 59 0x400 0x1>,
- <&dmamux1 60 0x400 0x1>;
- dma-names = "ch1", "ch2", "ch3", "ch4", "up", "trig";
- status = "disabled";
-
- pwm {
- compatible = "st,stm32-pwm";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- timer@4 {
- compatible = "st,stm32h7-timer-trigger";
- reg = <4>;
- status = "disabled";
- };
-
- counter {
- compatible = "st,stm32-timer-counter";
- status = "disabled";
- };
- };
-
- timers6: timer@40004000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-timers";
- reg = <0x40004000 0x400>;
- interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "global";
- clocks = <&rcc TIM6_K>;
- clock-names = "int";
- dmas = <&dmamux1 69 0x400 0x1>;
- dma-names = "up";
- status = "disabled";
-
- timer@5 {
- compatible = "st,stm32h7-timer-trigger";
- reg = <5>;
- status = "disabled";
- };
- };
-
- timers7: timer@40005000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-timers";
- reg = <0x40005000 0x400>;
- interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "global";
- clocks = <&rcc TIM7_K>;
- clock-names = "int";
- dmas = <&dmamux1 70 0x400 0x1>;
- dma-names = "up";
- status = "disabled";
-
- timer@6 {
- compatible = "st,stm32h7-timer-trigger";
- reg = <6>;
- status = "disabled";
- };
- };
-
- timers12: timer@40006000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-timers";
- reg = <0x40006000 0x400>;
- interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "global";
- clocks = <&rcc TIM12_K>;
- clock-names = "int";
- status = "disabled";
-
- pwm {
- compatible = "st,stm32-pwm";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- timer@11 {
- compatible = "st,stm32h7-timer-trigger";
- reg = <11>;
- status = "disabled";
- };
- };
-
- timers13: timer@40007000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-timers";
- reg = <0x40007000 0x400>;
- interrupts = <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "global";
- clocks = <&rcc TIM13_K>;
- clock-names = "int";
- status = "disabled";
-
- pwm {
- compatible = "st,stm32-pwm";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- timer@12 {
- compatible = "st,stm32h7-timer-trigger";
- reg = <12>;
- status = "disabled";
- };
- };
-
- timers14: timer@40008000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-timers";
- reg = <0x40008000 0x400>;
- interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "global";
- clocks = <&rcc TIM14_K>;
- clock-names = "int";
- status = "disabled";
-
- pwm {
- compatible = "st,stm32-pwm";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- timer@13 {
- compatible = "st,stm32h7-timer-trigger";
- reg = <13>;
- status = "disabled";
- };
- };
-
- lptimer1: timer@40009000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-lptimer";
- reg = <0x40009000 0x400>;
- interrupts-extended = <&exti 47 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc LPTIM1_K>;
- clock-names = "mux";
- wakeup-source;
- status = "disabled";
-
- pwm {
- compatible = "st,stm32-pwm-lp";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- trigger@0 {
- compatible = "st,stm32-lptimer-trigger";
- reg = <0>;
- status = "disabled";
- };
-
- counter {
- compatible = "st,stm32-lptimer-counter";
- status = "disabled";
- };
- };
-
- spi2: spi@4000b000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32h7-spi";
- reg = <0x4000b000 0x400>;
- interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc SPI2_K>;
- resets = <&rcc SPI2_R>;
- dmas = <&dmamux1 39 0x400 0x05>,
- <&dmamux1 40 0x400 0x05>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- i2s2: audio-controller@4000b000 {
- compatible = "st,stm32h7-i2s";
- #sound-dai-cells = <0>;
- reg = <0x4000b000 0x400>;
- interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&dmamux1 39 0x400 0x01>,
- <&dmamux1 40 0x400 0x01>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- spi3: spi@4000c000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32h7-spi";
- reg = <0x4000c000 0x400>;
- interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc SPI3_K>;
- resets = <&rcc SPI3_R>;
- dmas = <&dmamux1 61 0x400 0x05>,
- <&dmamux1 62 0x400 0x05>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- i2s3: audio-controller@4000c000 {
- compatible = "st,stm32h7-i2s";
- #sound-dai-cells = <0>;
- reg = <0x4000c000 0x400>;
- interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&dmamux1 61 0x400 0x01>,
- <&dmamux1 62 0x400 0x01>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- spdifrx: audio-controller@4000d000 {
- compatible = "st,stm32h7-spdifrx";
- #sound-dai-cells = <0>;
- reg = <0x4000d000 0x400>;
- clocks = <&rcc SPDIF_K>;
- clock-names = "kclk";
- interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&dmamux1 93 0x400 0x01>,
- <&dmamux1 94 0x400 0x01>;
- dma-names = "rx", "rx-ctrl";
- status = "disabled";
- };
-
- usart2: serial@4000e000 {
- compatible = "st,stm32h7-uart";
- reg = <0x4000e000 0x400>;
- interrupts-extended = <&exti 27 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc USART2_K>;
- wakeup-source;
- dmas = <&dmamux1 43 0x400 0x15>,
- <&dmamux1 44 0x400 0x11>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- usart3: serial@4000f000 {
- compatible = "st,stm32h7-uart";
- reg = <0x4000f000 0x400>;
- interrupts-extended = <&exti 28 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc USART3_K>;
- wakeup-source;
- dmas = <&dmamux1 45 0x400 0x15>,
- <&dmamux1 46 0x400 0x11>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- uart4: serial@40010000 {
- compatible = "st,stm32h7-uart";
- reg = <0x40010000 0x400>;
- interrupts-extended = <&exti 30 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc UART4_K>;
- wakeup-source;
- dmas = <&dmamux1 63 0x400 0x15>,
- <&dmamux1 64 0x400 0x11>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- uart5: serial@40011000 {
- compatible = "st,stm32h7-uart";
- reg = <0x40011000 0x400>;
- interrupts-extended = <&exti 31 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc UART5_K>;
- wakeup-source;
- dmas = <&dmamux1 65 0x400 0x15>,
- <&dmamux1 66 0x400 0x11>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- i2c1: i2c@40012000 {
- compatible = "st,stm32mp15-i2c";
- reg = <0x40012000 0x400>;
- interrupt-names = "event", "error";
- interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc I2C1_K>;
- resets = <&rcc I2C1_R>;
- #address-cells = <1>;
- #size-cells = <0>;
- st,syscfg-fmp = <&syscfg 0x4 0x1>;
- wakeup-source;
- i2c-analog-filter;
- status = "disabled";
- };
-
- i2c2: i2c@40013000 {
- compatible = "st,stm32mp15-i2c";
- reg = <0x40013000 0x400>;
- interrupt-names = "event", "error";
- interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc I2C2_K>;
- resets = <&rcc I2C2_R>;
- #address-cells = <1>;
- #size-cells = <0>;
- st,syscfg-fmp = <&syscfg 0x4 0x2>;
- wakeup-source;
- i2c-analog-filter;
- status = "disabled";
- };
-
- i2c3: i2c@40014000 {
- compatible = "st,stm32mp15-i2c";
- reg = <0x40014000 0x400>;
- interrupt-names = "event", "error";
- interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc I2C3_K>;
- resets = <&rcc I2C3_R>;
- #address-cells = <1>;
- #size-cells = <0>;
- st,syscfg-fmp = <&syscfg 0x4 0x4>;
- wakeup-source;
- i2c-analog-filter;
- status = "disabled";
- };
-
- i2c5: i2c@40015000 {
- compatible = "st,stm32mp15-i2c";
- reg = <0x40015000 0x400>;
- interrupt-names = "event", "error";
- interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc I2C5_K>;
- resets = <&rcc I2C5_R>;
- #address-cells = <1>;
- #size-cells = <0>;
- st,syscfg-fmp = <&syscfg 0x4 0x10>;
- wakeup-source;
- i2c-analog-filter;
- status = "disabled";
- };
-
- cec: cec@40016000 {
- compatible = "st,stm32-cec";
- reg = <0x40016000 0x400>;
- interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc CEC_K>, <&rcc CEC>;
- clock-names = "cec", "hdmi-cec";
- status = "disabled";
- };
-
- dac: dac@40017000 {
- compatible = "st,stm32h7-dac-core";
- reg = <0x40017000 0x400>;
- clocks = <&rcc DAC12>;
- clock-names = "pclk";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- dac1: dac@1 {
- compatible = "st,stm32-dac";
- #io-channel-cells = <1>;
- reg = <1>;
- status = "disabled";
- };
-
- dac2: dac@2 {
- compatible = "st,stm32-dac";
- #io-channel-cells = <1>;
- reg = <2>;
- status = "disabled";
- };
- };
-
- uart7: serial@40018000 {
- compatible = "st,stm32h7-uart";
- reg = <0x40018000 0x400>;
- interrupts-extended = <&exti 32 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc UART7_K>;
- wakeup-source;
- dmas = <&dmamux1 79 0x400 0x15>,
- <&dmamux1 80 0x400 0x11>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- uart8: serial@40019000 {
- compatible = "st,stm32h7-uart";
- reg = <0x40019000 0x400>;
- interrupts-extended = <&exti 33 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc UART8_K>;
- wakeup-source;
- dmas = <&dmamux1 81 0x400 0x15>,
- <&dmamux1 82 0x400 0x11>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- timers1: timer@44000000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-timers";
- reg = <0x44000000 0x400>;
- interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "brk", "up", "trg-com", "cc";
- clocks = <&rcc TIM1_K>;
- clock-names = "int";
- dmas = <&dmamux1 11 0x400 0x1>,
- <&dmamux1 12 0x400 0x1>,
- <&dmamux1 13 0x400 0x1>,
- <&dmamux1 14 0x400 0x1>,
- <&dmamux1 15 0x400 0x1>,
- <&dmamux1 16 0x400 0x1>,
- <&dmamux1 17 0x400 0x1>;
- dma-names = "ch1", "ch2", "ch3", "ch4",
- "up", "trig", "com";
- status = "disabled";
-
- pwm {
- compatible = "st,stm32-pwm";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- timer@0 {
- compatible = "st,stm32h7-timer-trigger";
- reg = <0>;
- status = "disabled";
- };
-
- counter {
- compatible = "st,stm32-timer-counter";
- status = "disabled";
- };
- };
-
- timers8: timer@44001000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-timers";
- reg = <0x44001000 0x400>;
- interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "brk", "up", "trg-com", "cc";
- clocks = <&rcc TIM8_K>;
- clock-names = "int";
- dmas = <&dmamux1 47 0x400 0x1>,
- <&dmamux1 48 0x400 0x1>,
- <&dmamux1 49 0x400 0x1>,
- <&dmamux1 50 0x400 0x1>,
- <&dmamux1 51 0x400 0x1>,
- <&dmamux1 52 0x400 0x1>,
- <&dmamux1 53 0x400 0x1>;
- dma-names = "ch1", "ch2", "ch3", "ch4",
- "up", "trig", "com";
- status = "disabled";
-
- pwm {
- compatible = "st,stm32-pwm";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- timer@7 {
- compatible = "st,stm32h7-timer-trigger";
- reg = <7>;
- status = "disabled";
- };
-
- counter {
- compatible = "st,stm32-timer-counter";
- status = "disabled";
- };
- };
-
- usart6: serial@44003000 {
- compatible = "st,stm32h7-uart";
- reg = <0x44003000 0x400>;
- interrupts-extended = <&exti 29 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc USART6_K>;
- wakeup-source;
- dmas = <&dmamux1 71 0x400 0x15>,
- <&dmamux1 72 0x400 0x11>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- spi1: spi@44004000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32h7-spi";
- reg = <0x44004000 0x400>;
- interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc SPI1_K>;
- resets = <&rcc SPI1_R>;
- dmas = <&dmamux1 37 0x400 0x05>,
- <&dmamux1 38 0x400 0x05>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- i2s1: audio-controller@44004000 {
- compatible = "st,stm32h7-i2s";
- #sound-dai-cells = <0>;
- reg = <0x44004000 0x400>;
- interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&dmamux1 37 0x400 0x01>,
- <&dmamux1 38 0x400 0x01>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- spi4: spi@44005000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32h7-spi";
- reg = <0x44005000 0x400>;
- interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc SPI4_K>;
- resets = <&rcc SPI4_R>;
- dmas = <&dmamux1 83 0x400 0x05>,
- <&dmamux1 84 0x400 0x05>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- timers15: timer@44006000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-timers";
- reg = <0x44006000 0x400>;
- interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "global";
- clocks = <&rcc TIM15_K>;
- clock-names = "int";
- dmas = <&dmamux1 105 0x400 0x1>,
- <&dmamux1 106 0x400 0x1>,
- <&dmamux1 107 0x400 0x1>,
- <&dmamux1 108 0x400 0x1>;
- dma-names = "ch1", "up", "trig", "com";
- status = "disabled";
-
- pwm {
- compatible = "st,stm32-pwm";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- timer@14 {
- compatible = "st,stm32h7-timer-trigger";
- reg = <14>;
- status = "disabled";
- };
- };
-
- timers16: timer@44007000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-timers";
- reg = <0x44007000 0x400>;
- interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "global";
- clocks = <&rcc TIM16_K>;
- clock-names = "int";
- dmas = <&dmamux1 109 0x400 0x1>,
- <&dmamux1 110 0x400 0x1>;
- dma-names = "ch1", "up";
- status = "disabled";
-
- pwm {
- compatible = "st,stm32-pwm";
- #pwm-cells = <3>;
- status = "disabled";
- };
- timer@15 {
- compatible = "st,stm32h7-timer-trigger";
- reg = <15>;
- status = "disabled";
- };
- };
-
- timers17: timer@44008000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-timers";
- reg = <0x44008000 0x400>;
- interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "global";
- clocks = <&rcc TIM17_K>;
- clock-names = "int";
- dmas = <&dmamux1 111 0x400 0x1>,
- <&dmamux1 112 0x400 0x1>;
- dma-names = "ch1", "up";
- status = "disabled";
-
- pwm {
- compatible = "st,stm32-pwm";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- timer@16 {
- compatible = "st,stm32h7-timer-trigger";
- reg = <16>;
- status = "disabled";
- };
- };
-
- spi5: spi@44009000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32h7-spi";
- reg = <0x44009000 0x400>;
- interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc SPI5_K>;
- resets = <&rcc SPI5_R>;
- dmas = <&dmamux1 85 0x400 0x05>,
- <&dmamux1 86 0x400 0x05>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- sai1: sai@4400a000 {
- compatible = "st,stm32h7-sai";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x4400a000 0x400>;
- reg = <0x4400a000 0x4>, <0x4400a3f0 0x10>;
- interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
- resets = <&rcc SAI1_R>;
- status = "disabled";
-
- sai1a: audio-controller@4400a004 {
- #sound-dai-cells = <0>;
-
- compatible = "st,stm32-sai-sub-a";
- reg = <0x4 0x20>;
- clocks = <&rcc SAI1_K>;
- clock-names = "sai_ck";
- dmas = <&dmamux1 87 0x400 0x01>;
- status = "disabled";
- };
-
- sai1b: audio-controller@4400a024 {
- #sound-dai-cells = <0>;
- compatible = "st,stm32-sai-sub-b";
- reg = <0x24 0x20>;
- clocks = <&rcc SAI1_K>;
- clock-names = "sai_ck";
- dmas = <&dmamux1 88 0x400 0x01>;
- status = "disabled";
- };
- };
-
- sai2: sai@4400b000 {
- compatible = "st,stm32h7-sai";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x4400b000 0x400>;
- reg = <0x4400b000 0x4>, <0x4400b3f0 0x10>;
- interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
- resets = <&rcc SAI2_R>;
- status = "disabled";
-
- sai2a: audio-controller@4400b004 {
- #sound-dai-cells = <0>;
- compatible = "st,stm32-sai-sub-a";
- reg = <0x4 0x20>;
- clocks = <&rcc SAI2_K>;
- clock-names = "sai_ck";
- dmas = <&dmamux1 89 0x400 0x01>;
- status = "disabled";
- };
-
- sai2b: audio-controller@4400b024 {
- #sound-dai-cells = <0>;
- compatible = "st,stm32-sai-sub-b";
- reg = <0x24 0x20>;
- clocks = <&rcc SAI2_K>;
- clock-names = "sai_ck";
- dmas = <&dmamux1 90 0x400 0x01>;
- status = "disabled";
- };
- };
-
- sai3: sai@4400c000 {
- compatible = "st,stm32h7-sai";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x4400c000 0x400>;
- reg = <0x4400c000 0x4>, <0x4400c3f0 0x10>;
- interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
- resets = <&rcc SAI3_R>;
- status = "disabled";
-
- sai3a: audio-controller@4400c004 {
- #sound-dai-cells = <0>;
- compatible = "st,stm32-sai-sub-a";
- reg = <0x04 0x20>;
- clocks = <&rcc SAI3_K>;
- clock-names = "sai_ck";
- dmas = <&dmamux1 113 0x400 0x01>;
- status = "disabled";
- };
-
- sai3b: audio-controller@4400c024 {
- #sound-dai-cells = <0>;
- compatible = "st,stm32-sai-sub-b";
- reg = <0x24 0x20>;
- clocks = <&rcc SAI3_K>;
- clock-names = "sai_ck";
- dmas = <&dmamux1 114 0x400 0x01>;
- status = "disabled";
- };
- };
-
- dfsdm: dfsdm@4400d000 {
- compatible = "st,stm32mp1-dfsdm";
- reg = <0x4400d000 0x800>;
- clocks = <&rcc DFSDM_K>;
- clock-names = "dfsdm";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- dfsdm0: filter@0 {
- compatible = "st,stm32-dfsdm-adc";
- #io-channel-cells = <1>;
- reg = <0>;
- interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&dmamux1 101 0x400 0x01>;
- dma-names = "rx";
- status = "disabled";
- };
-
- dfsdm1: filter@1 {
- compatible = "st,stm32-dfsdm-adc";
- #io-channel-cells = <1>;
- reg = <1>;
- interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&dmamux1 102 0x400 0x01>;
- dma-names = "rx";
- status = "disabled";
- };
-
- dfsdm2: filter@2 {
- compatible = "st,stm32-dfsdm-adc";
- #io-channel-cells = <1>;
- reg = <2>;
- interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&dmamux1 103 0x400 0x01>;
- dma-names = "rx";
- status = "disabled";
- };
-
- dfsdm3: filter@3 {
- compatible = "st,stm32-dfsdm-adc";
- #io-channel-cells = <1>;
- reg = <3>;
- interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&dmamux1 104 0x400 0x01>;
- dma-names = "rx";
- status = "disabled";
- };
-
- dfsdm4: filter@4 {
- compatible = "st,stm32-dfsdm-adc";
- #io-channel-cells = <1>;
- reg = <4>;
- interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&dmamux1 91 0x400 0x01>;
- dma-names = "rx";
- status = "disabled";
- };
-
- dfsdm5: filter@5 {
- compatible = "st,stm32-dfsdm-adc";
- #io-channel-cells = <1>;
- reg = <5>;
- interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&dmamux1 92 0x400 0x01>;
- dma-names = "rx";
- status = "disabled";
- };
- };
-
- dma1: dma-controller@48000000 {
- compatible = "st,stm32-dma";
- reg = <0x48000000 0x400>;
- interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc DMA1>;
- resets = <&rcc DMA1_R>;
- #dma-cells = <4>;
- st,mem2mem;
- dma-requests = <8>;
- };
-
- dma2: dma-controller@48001000 {
- compatible = "st,stm32-dma";
- reg = <0x48001000 0x400>;
- interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc DMA2>;
- resets = <&rcc DMA2_R>;
- #dma-cells = <4>;
- st,mem2mem;
- dma-requests = <8>;
- };
-
- dmamux1: dma-router@48002000 {
- compatible = "st,stm32h7-dmamux";
- reg = <0x48002000 0x40>;
- #dma-cells = <3>;
- dma-requests = <128>;
- dma-masters = <&dma1 &dma2>;
- dma-channels = <16>;
- clocks = <&rcc DMAMUX>;
- resets = <&rcc DMAMUX_R>;
- };
-
- adc: adc@48003000 {
- compatible = "st,stm32mp1-adc-core";
- reg = <0x48003000 0x400>;
- interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc ADC12>, <&rcc ADC12_K>;
- clock-names = "bus", "adc";
- interrupt-controller;
- st,syscfg = <&syscfg>;
- #interrupt-cells = <1>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- adc1: adc@0 {
- compatible = "st,stm32mp1-adc";
- #io-channel-cells = <1>;
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x0>;
- interrupt-parent = <&adc>;
- interrupts = <0>;
- dmas = <&dmamux1 9 0x400 0x01>;
- dma-names = "rx";
- status = "disabled";
- };
-
- adc2: adc@100 {
- compatible = "st,stm32mp1-adc";
- #io-channel-cells = <1>;
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x100>;
- interrupt-parent = <&adc>;
- interrupts = <1>;
- dmas = <&dmamux1 10 0x400 0x01>;
- dma-names = "rx";
- nvmem-cells = <&vrefint>;
- nvmem-cell-names = "vrefint";
- status = "disabled";
- channel@13 {
- reg = <13>;
- label = "vrefint";
- };
- channel@14 {
- reg = <14>;
- label = "vddcore";
- };
- };
- };
-
- sdmmc3: mmc@48004000 {
- compatible = "st,stm32-sdmmc2", "arm,pl18x", "arm,primecell";
- arm,primecell-periphid = <0x00253180>;
- reg = <0x48004000 0x400>;
- interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc SDMMC3_K>;
- clock-names = "apb_pclk";
- resets = <&rcc SDMMC3_R>;
- cap-sd-highspeed;
- cap-mmc-highspeed;
- max-frequency = <120000000>;
- status = "disabled";
- };
-
- usbotg_hs: usb-otg@49000000 {
- compatible = "st,stm32mp15-hsotg", "snps,dwc2";
- reg = <0x49000000 0x10000>;
- clocks = <&rcc USBO_K>, <&usbphyc>;
- clock-names = "otg", "utmi";
- resets = <&rcc USBO_R>;
- reset-names = "dwc2";
- interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
- g-rx-fifo-size = <512>;
- g-np-tx-fifo-size = <32>;
- g-tx-fifo-size = <256 16 16 16 16 16 16 16>;
- dr_mode = "otg";
- otg-rev = <0x200>;
- usb33d-supply = <&usb33>;
- status = "disabled";
- };
-
ipcc: mailbox@4c001000 {
compatible = "st,stm32mp1-ipcc";
#mbox-cells = <1>;
@@ -1172,18 +136,6 @@
status = "disabled";
};
- dcmi: dcmi@4c006000 {
- compatible = "st,stm32-dcmi";
- reg = <0x4c006000 0x400>;
- interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
- resets = <&rcc CAMITF_R>;
- clocks = <&rcc DCMI>;
- clock-names = "mclk";
- dmas = <&dmamux1 75 0x400 0x01>;
- dma-names = "tx";
- status = "disabled";
- };
-
rcc: rcc@50000000 {
compatible = "st,stm32mp1-rcc", "syscon";
reg = <0x50000000 0x1000>;
@@ -1224,6 +176,81 @@
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x5000d000 0x400>;
+ interrupts-extended =
+ <&intc GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_0 */
+ <&intc GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_10 */
+ <&intc GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <&intc GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <0>, /* EXTI_20 */
+ <&intc GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_30 */
+ <&intc GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>, /* EXTI_40 */
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <&intc GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <&intc GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_50 */
+ <0>,
+ <&intc GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>, /* EXTI_60 */
+ <&intc GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <0>,
+ <&intc GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <&intc GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <&intc GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_70 */
+ <0>,
+ <0>,
+ <&intc GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>;
};
syscfg: syscon@50020000 {
@@ -1232,131 +259,6 @@
clocks = <&rcc SYSCFG>;
};
- lptimer2: timer@50021000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-lptimer";
- reg = <0x50021000 0x400>;
- interrupts-extended = <&exti 48 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc LPTIM2_K>;
- clock-names = "mux";
- wakeup-source;
- status = "disabled";
-
- pwm {
- compatible = "st,stm32-pwm-lp";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- trigger@1 {
- compatible = "st,stm32-lptimer-trigger";
- reg = <1>;
- status = "disabled";
- };
-
- counter {
- compatible = "st,stm32-lptimer-counter";
- status = "disabled";
- };
- };
-
- lptimer3: timer@50022000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-lptimer";
- reg = <0x50022000 0x400>;
- interrupts-extended = <&exti 50 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc LPTIM3_K>;
- clock-names = "mux";
- wakeup-source;
- status = "disabled";
-
- pwm {
- compatible = "st,stm32-pwm-lp";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- trigger@2 {
- compatible = "st,stm32-lptimer-trigger";
- reg = <2>;
- status = "disabled";
- };
- };
-
- lptimer4: timer@50023000 {
- compatible = "st,stm32-lptimer";
- reg = <0x50023000 0x400>;
- interrupts-extended = <&exti 52 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc LPTIM4_K>;
- clock-names = "mux";
- wakeup-source;
- status = "disabled";
-
- pwm {
- compatible = "st,stm32-pwm-lp";
- #pwm-cells = <3>;
- status = "disabled";
- };
- };
-
- lptimer5: timer@50024000 {
- compatible = "st,stm32-lptimer";
- reg = <0x50024000 0x400>;
- interrupts-extended = <&exti 53 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc LPTIM5_K>;
- clock-names = "mux";
- wakeup-source;
- status = "disabled";
-
- pwm {
- compatible = "st,stm32-pwm-lp";
- #pwm-cells = <3>;
- status = "disabled";
- };
- };
-
- vrefbuf: vrefbuf@50025000 {
- compatible = "st,stm32-vrefbuf";
- reg = <0x50025000 0x8>;
- regulator-min-microvolt = <1500000>;
- regulator-max-microvolt = <2500000>;
- clocks = <&rcc VREF>;
- status = "disabled";
- };
-
- sai4: sai@50027000 {
- compatible = "st,stm32h7-sai";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x50027000 0x400>;
- reg = <0x50027000 0x4>, <0x500273f0 0x10>;
- interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
- resets = <&rcc SAI4_R>;
- status = "disabled";
-
- sai4a: audio-controller@50027004 {
- #sound-dai-cells = <0>;
- compatible = "st,stm32-sai-sub-a";
- reg = <0x04 0x20>;
- clocks = <&rcc SAI4_K>;
- clock-names = "sai_ck";
- dmas = <&dmamux1 99 0x400 0x01>;
- status = "disabled";
- };
-
- sai4b: audio-controller@50027024 {
- #sound-dai-cells = <0>;
- compatible = "st,stm32-sai-sub-b";
- reg = <0x24 0x20>;
- clocks = <&rcc SAI4_K>;
- clock-names = "sai_ck";
- dmas = <&dmamux1 100 0x400 0x01>;
- status = "disabled";
- };
- };
-
dts: thermal@50028000 {
compatible = "st,stm32-thermal";
reg = <0x50028000 0x100>;
@@ -1367,26 +269,6 @@
status = "disabled";
};
- hash1: hash@54002000 {
- compatible = "st,stm32f756-hash";
- reg = <0x54002000 0x400>;
- interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc HASH1>;
- resets = <&rcc HASH1_R>;
- dmas = <&mdma1 31 0x2 0x1000A02 0x0 0x0>;
- dma-names = "in";
- dma-maxburst = <2>;
- status = "disabled";
- };
-
- rng1: rng@54003000 {
- compatible = "st,stm32-rng";
- reg = <0x54003000 0x400>;
- clocks = <&rcc RNG1_K>;
- resets = <&rcc RNG1_R>;
- status = "disabled";
- };
-
mdma1: dma-controller@58000000 {
compatible = "st,stm32h7-mdma";
reg = <0x58000000 0x1000>;
@@ -1398,55 +280,6 @@
dma-requests = <48>;
};
- fmc: memory-controller@58002000 {
- #address-cells = <2>;
- #size-cells = <1>;
- compatible = "st,stm32mp1-fmc2-ebi";
- reg = <0x58002000 0x1000>;
- clocks = <&rcc FMC_K>;
- resets = <&rcc FMC_R>;
- status = "disabled";
-
- ranges = <0 0 0x60000000 0x04000000>, /* EBI CS 1 */
- <1 0 0x64000000 0x04000000>, /* EBI CS 2 */
- <2 0 0x68000000 0x04000000>, /* EBI CS 3 */
- <3 0 0x6c000000 0x04000000>, /* EBI CS 4 */
- <4 0 0x80000000 0x10000000>; /* NAND */
-
- nand-controller@4,0 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32mp1-fmc2-nfc";
- reg = <4 0x00000000 0x1000>,
- <4 0x08010000 0x1000>,
- <4 0x08020000 0x1000>,
- <4 0x01000000 0x1000>,
- <4 0x09010000 0x1000>,
- <4 0x09020000 0x1000>;
- interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&mdma1 20 0x2 0x12000a02 0x0 0x0>,
- <&mdma1 20 0x2 0x12000a08 0x0 0x0>,
- <&mdma1 21 0x2 0x12000a0a 0x0 0x0>;
- dma-names = "tx", "rx", "ecc";
- status = "disabled";
- };
- };
-
- qspi: spi@58003000 {
- compatible = "st,stm32f469-qspi";
- reg = <0x58003000 0x1000>, <0x70000000 0x10000000>;
- reg-names = "qspi", "qspi_mm";
- interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&mdma1 22 0x2 0x10100002 0x0 0x0>,
- <&mdma1 22 0x2 0x10100008 0x0 0x0>;
- dma-names = "tx", "rx";
- clocks = <&rcc QSPI_K>;
- resets = <&rcc QSPI_R>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
sdmmc1: mmc@58005000 {
compatible = "st,stm32-sdmmc2", "arm,pl18x", "arm,primecell";
arm,primecell-periphid = <0x00253180>;
@@ -1482,39 +315,6 @@
status = "disabled";
};
- ethernet0: ethernet@5800a000 {
- compatible = "st,stm32mp1-dwmac", "snps,dwmac-4.20a";
- reg = <0x5800a000 0x2000>;
- reg-names = "stmmaceth";
- interrupts-extended = <&intc GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "macirq";
- clock-names = "stmmaceth",
- "mac-clk-tx",
- "mac-clk-rx",
- "eth-ck",
- "ptp_ref",
- "ethstp";
- clocks = <&rcc ETHMAC>,
- <&rcc ETHTX>,
- <&rcc ETHRX>,
- <&rcc ETHCK_K>,
- <&rcc ETHPTP_K>,
- <&rcc ETHSTP>;
- st,syscon = <&syscfg 0x4>;
- snps,mixed-burst;
- snps,pbl = <2>;
- snps,en-tx-lpi-clockgating;
- snps,axi-config = <&stmmac_axi_config_0>;
- snps,tso;
- status = "disabled";
-
- stmmac_axi_config_0: stmmac-axi-config {
- snps,wr_osr_lmt = <0x7>;
- snps,rd_osr_lmt = <0x7>;
- snps,blen = <0 0 0 0 16 8 4>;
- };
- };
-
usbh_ohci: usb@5800c000 {
compatible = "generic-ohci";
reg = <0x5800c000 0x1000>;
@@ -1580,45 +380,6 @@
};
};
- usart1: serial@5c000000 {
- compatible = "st,stm32h7-uart";
- reg = <0x5c000000 0x400>;
- interrupts-extended = <&exti 26 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc USART1_K>;
- wakeup-source;
- status = "disabled";
- };
-
- spi6: spi@5c001000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32h7-spi";
- reg = <0x5c001000 0x400>;
- interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc SPI6_K>;
- resets = <&rcc SPI6_R>;
- dmas = <&mdma1 34 0x0 0x40008 0x0 0x0>,
- <&mdma1 35 0x0 0x40002 0x0 0x0>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- i2c4: i2c@5c002000 {
- compatible = "st,stm32mp15-i2c";
- reg = <0x5c002000 0x400>;
- interrupt-names = "event", "error";
- interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc I2C4_K>;
- resets = <&rcc I2C4_R>;
- #address-cells = <1>;
- #size-cells = <0>;
- st,syscfg-fmp = <&syscfg 0x4 0x8>;
- wakeup-source;
- i2c-analog-filter;
- status = "disabled";
- };
-
rtc: rtc@5c004000 {
compatible = "st,stm32mp1-rtc";
reg = <0x5c004000 0x400>;
@@ -1647,20 +408,1406 @@
};
};
- i2c6: i2c@5c009000 {
- compatible = "st,stm32mp15-i2c";
- reg = <0x5c009000 0x400>;
- interrupt-names = "event", "error";
- interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc I2C6_K>;
- resets = <&rcc I2C6_R>;
+ etzpc: bus@5c007000 {
+ compatible = "st,stm32-etzpc", "simple-bus";
+ reg = <0x5c007000 0x400>;
#address-cells = <1>;
- #size-cells = <0>;
- st,syscfg-fmp = <&syscfg 0x4 0x20>;
- wakeup-source;
- i2c-analog-filter;
- status = "disabled";
+ #size-cells = <1>;
+ #access-controller-cells = <1>;
+ ranges;
+
+ timers2: timer@40000000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-timers";
+ reg = <0x40000000 0x400>;
+ interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "global";
+ clocks = <&rcc TIM2_K>;
+ clock-names = "int";
+ dmas = <&dmamux1 18 0x400 0x1>,
+ <&dmamux1 19 0x400 0x1>,
+ <&dmamux1 20 0x400 0x1>,
+ <&dmamux1 21 0x400 0x1>,
+ <&dmamux1 22 0x400 0x1>;
+ dma-names = "ch1", "ch2", "ch3", "ch4", "up";
+ access-controllers = <&etzpc 16>;
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ timer@1 {
+ compatible = "st,stm32h7-timer-trigger";
+ reg = <1>;
+ status = "disabled";
+ };
+
+ counter {
+ compatible = "st,stm32-timer-counter";
+ status = "disabled";
+ };
+ };
+
+ timers3: timer@40001000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-timers";
+ reg = <0x40001000 0x400>;
+ interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "global";
+ clocks = <&rcc TIM3_K>;
+ clock-names = "int";
+ dmas = <&dmamux1 23 0x400 0x1>,
+ <&dmamux1 24 0x400 0x1>,
+ <&dmamux1 25 0x400 0x1>,
+ <&dmamux1 26 0x400 0x1>,
+ <&dmamux1 27 0x400 0x1>,
+ <&dmamux1 28 0x400 0x1>;
+ dma-names = "ch1", "ch2", "ch3", "ch4", "up", "trig";
+ access-controllers = <&etzpc 17>;
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ timer@2 {
+ compatible = "st,stm32h7-timer-trigger";
+ reg = <2>;
+ status = "disabled";
+ };
+
+ counter {
+ compatible = "st,stm32-timer-counter";
+ status = "disabled";
+ };
+ };
+
+ timers4: timer@40002000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-timers";
+ reg = <0x40002000 0x400>;
+ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "global";
+ clocks = <&rcc TIM4_K>;
+ clock-names = "int";
+ dmas = <&dmamux1 29 0x400 0x1>,
+ <&dmamux1 30 0x400 0x1>,
+ <&dmamux1 31 0x400 0x1>,
+ <&dmamux1 32 0x400 0x1>;
+ dma-names = "ch1", "ch2", "ch3", "ch4";
+ access-controllers = <&etzpc 18>;
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ timer@3 {
+ compatible = "st,stm32h7-timer-trigger";
+ reg = <3>;
+ status = "disabled";
+ };
+
+ counter {
+ compatible = "st,stm32-timer-counter";
+ status = "disabled";
+ };
+ };
+
+ timers5: timer@40003000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-timers";
+ reg = <0x40003000 0x400>;
+ interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "global";
+ clocks = <&rcc TIM5_K>;
+ clock-names = "int";
+ dmas = <&dmamux1 55 0x400 0x1>,
+ <&dmamux1 56 0x400 0x1>,
+ <&dmamux1 57 0x400 0x1>,
+ <&dmamux1 58 0x400 0x1>,
+ <&dmamux1 59 0x400 0x1>,
+ <&dmamux1 60 0x400 0x1>;
+ dma-names = "ch1", "ch2", "ch3", "ch4", "up", "trig";
+ access-controllers = <&etzpc 19>;
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ timer@4 {
+ compatible = "st,stm32h7-timer-trigger";
+ reg = <4>;
+ status = "disabled";
+ };
+
+ counter {
+ compatible = "st,stm32-timer-counter";
+ status = "disabled";
+ };
+ };
+
+ timers6: timer@40004000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-timers";
+ reg = <0x40004000 0x400>;
+ interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "global";
+ clocks = <&rcc TIM6_K>;
+ clock-names = "int";
+ dmas = <&dmamux1 69 0x400 0x1>;
+ dma-names = "up";
+ access-controllers = <&etzpc 20>;
+ status = "disabled";
+
+ timer@5 {
+ compatible = "st,stm32h7-timer-trigger";
+ reg = <5>;
+ status = "disabled";
+ };
+ };
+
+ timers7: timer@40005000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-timers";
+ reg = <0x40005000 0x400>;
+ interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "global";
+ clocks = <&rcc TIM7_K>;
+ clock-names = "int";
+ dmas = <&dmamux1 70 0x400 0x1>;
+ dma-names = "up";
+ access-controllers = <&etzpc 21>;
+ status = "disabled";
+
+ timer@6 {
+ compatible = "st,stm32h7-timer-trigger";
+ reg = <6>;
+ status = "disabled";
+ };
+ };
+
+ timers12: timer@40006000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-timers";
+ reg = <0x40006000 0x400>;
+ interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "global";
+ clocks = <&rcc TIM12_K>;
+ clock-names = "int";
+ access-controllers = <&etzpc 22>;
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ timer@11 {
+ compatible = "st,stm32h7-timer-trigger";
+ reg = <11>;
+ status = "disabled";
+ };
+ };
+
+ timers13: timer@40007000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-timers";
+ reg = <0x40007000 0x400>;
+ interrupts = <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "global";
+ clocks = <&rcc TIM13_K>;
+ clock-names = "int";
+ access-controllers = <&etzpc 23>;
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ timer@12 {
+ compatible = "st,stm32h7-timer-trigger";
+ reg = <12>;
+ status = "disabled";
+ };
+ };
+
+ timers14: timer@40008000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-timers";
+ reg = <0x40008000 0x400>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "global";
+ clocks = <&rcc TIM14_K>;
+ clock-names = "int";
+ access-controllers = <&etzpc 24>;
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ timer@13 {
+ compatible = "st,stm32h7-timer-trigger";
+ reg = <13>;
+ status = "disabled";
+ };
+ };
+
+ lptimer1: timer@40009000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-lptimer";
+ reg = <0x40009000 0x400>;
+ interrupts-extended = <&exti 47 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc LPTIM1_K>;
+ clock-names = "mux";
+ wakeup-source;
+ access-controllers = <&etzpc 25>;
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm-lp";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ trigger@0 {
+ compatible = "st,stm32-lptimer-trigger";
+ reg = <0>;
+ status = "disabled";
+ };
+
+ counter {
+ compatible = "st,stm32-lptimer-counter";
+ status = "disabled";
+ };
+ };
+
+ i2s2: audio-controller@4000b000 {
+ compatible = "st,stm32h7-i2s";
+ #sound-dai-cells = <0>;
+ reg = <0x4000b000 0x400>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dmamux1 39 0x400 0x01>,
+ <&dmamux1 40 0x400 0x01>;
+ dma-names = "rx", "tx";
+ access-controllers = <&etzpc 27>;
+ status = "disabled";
+ };
+
+ spi2: spi@4000b000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32h7-spi";
+ reg = <0x4000b000 0x400>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc SPI2_K>;
+ resets = <&rcc SPI2_R>;
+ dmas = <&dmamux1 39 0x400 0x05>,
+ <&dmamux1 40 0x400 0x05>;
+ dma-names = "rx", "tx";
+ access-controllers = <&etzpc 27>;
+ status = "disabled";
+ };
+
+ i2s3: audio-controller@4000c000 {
+ compatible = "st,stm32h7-i2s";
+ #sound-dai-cells = <0>;
+ reg = <0x4000c000 0x400>;
+ interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dmamux1 61 0x400 0x01>,
+ <&dmamux1 62 0x400 0x01>;
+ dma-names = "rx", "tx";
+ access-controllers = <&etzpc 28>;
+ status = "disabled";
+ };
+
+ spi3: spi@4000c000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32h7-spi";
+ reg = <0x4000c000 0x400>;
+ interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc SPI3_K>;
+ resets = <&rcc SPI3_R>;
+ dmas = <&dmamux1 61 0x400 0x05>,
+ <&dmamux1 62 0x400 0x05>;
+ dma-names = "rx", "tx";
+ access-controllers = <&etzpc 28>;
+ status = "disabled";
+ };
+
+ spdifrx: audio-controller@4000d000 {
+ compatible = "st,stm32h7-spdifrx";
+ #sound-dai-cells = <0>;
+ reg = <0x4000d000 0x400>;
+ clocks = <&rcc SPDIF_K>;
+ clock-names = "kclk";
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dmamux1 93 0x400 0x01>,
+ <&dmamux1 94 0x400 0x01>;
+ dma-names = "rx", "rx-ctrl";
+ access-controllers = <&etzpc 29>;
+ status = "disabled";
+ };
+
+ usart2: serial@4000e000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x4000e000 0x400>;
+ interrupts-extended = <&exti 27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc USART2_K>;
+ wakeup-source;
+ dmas = <&dmamux1 43 0x400 0x15>,
+ <&dmamux1 44 0x400 0x11>;
+ dma-names = "rx", "tx";
+ access-controllers = <&etzpc 30>;
+ status = "disabled";
+ };
+
+ usart3: serial@4000f000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x4000f000 0x400>;
+ interrupts-extended = <&exti 28 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc USART3_K>;
+ wakeup-source;
+ dmas = <&dmamux1 45 0x400 0x15>,
+ <&dmamux1 46 0x400 0x11>;
+ dma-names = "rx", "tx";
+ access-controllers = <&etzpc 31>;
+ status = "disabled";
+ };
+
+ uart4: serial@40010000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x40010000 0x400>;
+ interrupts-extended = <&exti 30 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc UART4_K>;
+ wakeup-source;
+ dmas = <&dmamux1 63 0x400 0x15>,
+ <&dmamux1 64 0x400 0x11>;
+ dma-names = "rx", "tx";
+ access-controllers = <&etzpc 32>;
+ status = "disabled";
+ };
+
+ uart5: serial@40011000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x40011000 0x400>;
+ interrupts-extended = <&exti 31 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc UART5_K>;
+ wakeup-source;
+ dmas = <&dmamux1 65 0x400 0x15>,
+ <&dmamux1 66 0x400 0x11>;
+ dma-names = "rx", "tx";
+ access-controllers = <&etzpc 33>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@40012000 {
+ compatible = "st,stm32mp15-i2c";
+ reg = <0x40012000 0x400>;
+ interrupt-names = "event", "error";
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc I2C1_K>;
+ resets = <&rcc I2C1_R>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ st,syscfg-fmp = <&syscfg 0x4 0x1>;
+ wakeup-source;
+ i2c-analog-filter;
+ access-controllers = <&etzpc 34>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@40013000 {
+ compatible = "st,stm32mp15-i2c";
+ reg = <0x40013000 0x400>;
+ interrupt-names = "event", "error";
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc I2C2_K>;
+ resets = <&rcc I2C2_R>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ st,syscfg-fmp = <&syscfg 0x4 0x2>;
+ wakeup-source;
+ i2c-analog-filter;
+ access-controllers = <&etzpc 35>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@40014000 {
+ compatible = "st,stm32mp15-i2c";
+ reg = <0x40014000 0x400>;
+ interrupt-names = "event", "error";
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc I2C3_K>;
+ resets = <&rcc I2C3_R>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ st,syscfg-fmp = <&syscfg 0x4 0x4>;
+ wakeup-source;
+ i2c-analog-filter;
+ access-controllers = <&etzpc 36>;
+ status = "disabled";
+ };
+
+ i2c5: i2c@40015000 {
+ compatible = "st,stm32mp15-i2c";
+ reg = <0x40015000 0x400>;
+ interrupt-names = "event", "error";
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc I2C5_K>;
+ resets = <&rcc I2C5_R>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ st,syscfg-fmp = <&syscfg 0x4 0x10>;
+ wakeup-source;
+ i2c-analog-filter;
+ access-controllers = <&etzpc 37>;
+ status = "disabled";
+ };
+
+ cec: cec@40016000 {
+ compatible = "st,stm32-cec";
+ reg = <0x40016000 0x400>;
+ interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CEC_K>, <&rcc CEC>;
+ clock-names = "cec", "hdmi-cec";
+ access-controllers = <&etzpc 38>;
+ status = "disabled";
+ };
+
+ dac: dac@40017000 {
+ compatible = "st,stm32h7-dac-core";
+ reg = <0x40017000 0x400>;
+ clocks = <&rcc DAC12>;
+ clock-names = "pclk";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ access-controllers = <&etzpc 39>;
+ status = "disabled";
+
+ dac1: dac@1 {
+ compatible = "st,stm32-dac";
+ #io-channel-cells = <1>;
+ reg = <1>;
+ status = "disabled";
+ };
+
+ dac2: dac@2 {
+ compatible = "st,stm32-dac";
+ #io-channel-cells = <1>;
+ reg = <2>;
+ status = "disabled";
+ };
+ };
+
+ uart7: serial@40018000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x40018000 0x400>;
+ interrupts-extended = <&exti 32 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc UART7_K>;
+ wakeup-source;
+ dmas = <&dmamux1 79 0x400 0x15>,
+ <&dmamux1 80 0x400 0x11>;
+ dma-names = "rx", "tx";
+ access-controllers = <&etzpc 40>;
+ status = "disabled";
+ };
+
+ uart8: serial@40019000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x40019000 0x400>;
+ interrupts-extended = <&exti 33 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc UART8_K>;
+ wakeup-source;
+ dmas = <&dmamux1 81 0x400 0x15>,
+ <&dmamux1 82 0x400 0x11>;
+ dma-names = "rx", "tx";
+ access-controllers = <&etzpc 41>;
+ status = "disabled";
+ };
+
+ timers1: timer@44000000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-timers";
+ reg = <0x44000000 0x400>;
+ interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "brk", "up", "trg-com", "cc";
+ clocks = <&rcc TIM1_K>;
+ clock-names = "int";
+ dmas = <&dmamux1 11 0x400 0x1>,
+ <&dmamux1 12 0x400 0x1>,
+ <&dmamux1 13 0x400 0x1>,
+ <&dmamux1 14 0x400 0x1>,
+ <&dmamux1 15 0x400 0x1>,
+ <&dmamux1 16 0x400 0x1>,
+ <&dmamux1 17 0x400 0x1>;
+ dma-names = "ch1", "ch2", "ch3", "ch4",
+ "up", "trig", "com";
+ access-controllers = <&etzpc 48>;
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ timer@0 {
+ compatible = "st,stm32h7-timer-trigger";
+ reg = <0>;
+ status = "disabled";
+ };
+
+ counter {
+ compatible = "st,stm32-timer-counter";
+ status = "disabled";
+ };
+ };
+
+ timers8: timer@44001000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-timers";
+ reg = <0x44001000 0x400>;
+ interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "brk", "up", "trg-com", "cc";
+ clocks = <&rcc TIM8_K>;
+ clock-names = "int";
+ dmas = <&dmamux1 47 0x400 0x1>,
+ <&dmamux1 48 0x400 0x1>,
+ <&dmamux1 49 0x400 0x1>,
+ <&dmamux1 50 0x400 0x1>,
+ <&dmamux1 51 0x400 0x1>,
+ <&dmamux1 52 0x400 0x1>,
+ <&dmamux1 53 0x400 0x1>;
+ dma-names = "ch1", "ch2", "ch3", "ch4",
+ "up", "trig", "com";
+ access-controllers = <&etzpc 49>;
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ timer@7 {
+ compatible = "st,stm32h7-timer-trigger";
+ reg = <7>;
+ status = "disabled";
+ };
+
+ counter {
+ compatible = "st,stm32-timer-counter";
+ status = "disabled";
+ };
+ };
+
+ usart6: serial@44003000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x44003000 0x400>;
+ interrupts-extended = <&exti 29 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc USART6_K>;
+ wakeup-source;
+ dmas = <&dmamux1 71 0x400 0x15>,
+ <&dmamux1 72 0x400 0x11>;
+ dma-names = "rx", "tx";
+ access-controllers = <&etzpc 51>;
+ status = "disabled";
+ };
+
+ i2s1: audio-controller@44004000 {
+ compatible = "st,stm32h7-i2s";
+ #sound-dai-cells = <0>;
+ reg = <0x44004000 0x400>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dmamux1 37 0x400 0x01>,
+ <&dmamux1 38 0x400 0x01>;
+ dma-names = "rx", "tx";
+ access-controllers = <&etzpc 52>;
+ status = "disabled";
+ };
+
+ spi1: spi@44004000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32h7-spi";
+ reg = <0x44004000 0x400>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc SPI1_K>;
+ resets = <&rcc SPI1_R>;
+ dmas = <&dmamux1 37 0x400 0x05>,
+ <&dmamux1 38 0x400 0x05>;
+ dma-names = "rx", "tx";
+ access-controllers = <&etzpc 52>;
+ status = "disabled";
+ };
+
+ spi4: spi@44005000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32h7-spi";
+ reg = <0x44005000 0x400>;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc SPI4_K>;
+ resets = <&rcc SPI4_R>;
+ dmas = <&dmamux1 83 0x400 0x05>,
+ <&dmamux1 84 0x400 0x05>;
+ dma-names = "rx", "tx";
+ access-controllers = <&etzpc 53>;
+ status = "disabled";
+ };
+
+ timers15: timer@44006000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-timers";
+ reg = <0x44006000 0x400>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "global";
+ clocks = <&rcc TIM15_K>;
+ clock-names = "int";
+ dmas = <&dmamux1 105 0x400 0x1>,
+ <&dmamux1 106 0x400 0x1>,
+ <&dmamux1 107 0x400 0x1>,
+ <&dmamux1 108 0x400 0x1>;
+ dma-names = "ch1", "up", "trig", "com";
+ access-controllers = <&etzpc 54>;
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ timer@14 {
+ compatible = "st,stm32h7-timer-trigger";
+ reg = <14>;
+ status = "disabled";
+ };
+ };
+
+ timers16: timer@44007000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-timers";
+ reg = <0x44007000 0x400>;
+ interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "global";
+ clocks = <&rcc TIM16_K>;
+ clock-names = "int";
+ dmas = <&dmamux1 109 0x400 0x1>,
+ <&dmamux1 110 0x400 0x1>;
+ dma-names = "ch1", "up";
+ access-controllers = <&etzpc 55>;
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+ timer@15 {
+ compatible = "st,stm32h7-timer-trigger";
+ reg = <15>;
+ status = "disabled";
+ };
+ };
+
+ timers17: timer@44008000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-timers";
+ reg = <0x44008000 0x400>;
+ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "global";
+ clocks = <&rcc TIM17_K>;
+ clock-names = "int";
+ dmas = <&dmamux1 111 0x400 0x1>,
+ <&dmamux1 112 0x400 0x1>;
+ dma-names = "ch1", "up";
+ access-controllers = <&etzpc 56>;
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ timer@16 {
+ compatible = "st,stm32h7-timer-trigger";
+ reg = <16>;
+ status = "disabled";
+ };
+ };
+
+ spi5: spi@44009000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32h7-spi";
+ reg = <0x44009000 0x400>;
+ interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc SPI5_K>;
+ resets = <&rcc SPI5_R>;
+ dmas = <&dmamux1 85 0x400 0x05>,
+ <&dmamux1 86 0x400 0x05>;
+ dma-names = "rx", "tx";
+ access-controllers = <&etzpc 57>;
+ status = "disabled";
+ };
+
+ sai1: sai@4400a000 {
+ compatible = "st,stm32h7-sai";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x4400a000 0x400>;
+ reg = <0x4400a000 0x4>, <0x4400a3f0 0x10>;
+ interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rcc SAI1_R>;
+ access-controllers = <&etzpc 58>;
+ status = "disabled";
+
+ sai1a: audio-controller@4400a004 {
+ #sound-dai-cells = <0>;
+
+ compatible = "st,stm32-sai-sub-a";
+ reg = <0x4 0x20>;
+ clocks = <&rcc SAI1_K>;
+ clock-names = "sai_ck";
+ dmas = <&dmamux1 87 0x400 0x01>;
+ status = "disabled";
+ };
+
+ sai1b: audio-controller@4400a024 {
+ #sound-dai-cells = <0>;
+ compatible = "st,stm32-sai-sub-b";
+ reg = <0x24 0x20>;
+ clocks = <&rcc SAI1_K>;
+ clock-names = "sai_ck";
+ dmas = <&dmamux1 88 0x400 0x01>;
+ status = "disabled";
+ };
+ };
+
+ sai2: sai@4400b000 {
+ compatible = "st,stm32h7-sai";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x4400b000 0x400>;
+ reg = <0x4400b000 0x4>, <0x4400b3f0 0x10>;
+ interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rcc SAI2_R>;
+ access-controllers = <&etzpc 59>;
+ status = "disabled";
+
+ sai2a: audio-controller@4400b004 {
+ #sound-dai-cells = <0>;
+ compatible = "st,stm32-sai-sub-a";
+ reg = <0x4 0x20>;
+ clocks = <&rcc SAI2_K>;
+ clock-names = "sai_ck";
+ dmas = <&dmamux1 89 0x400 0x01>;
+ status = "disabled";
+ };
+
+ sai2b: audio-controller@4400b024 {
+ #sound-dai-cells = <0>;
+ compatible = "st,stm32-sai-sub-b";
+ reg = <0x24 0x20>;
+ clocks = <&rcc SAI2_K>;
+ clock-names = "sai_ck";
+ dmas = <&dmamux1 90 0x400 0x01>;
+ status = "disabled";
+ };
+ };
+
+ sai3: sai@4400c000 {
+ compatible = "st,stm32h7-sai";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x4400c000 0x400>;
+ reg = <0x4400c000 0x4>, <0x4400c3f0 0x10>;
+ interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rcc SAI3_R>;
+ access-controllers = <&etzpc 60>;
+ status = "disabled";
+
+ sai3a: audio-controller@4400c004 {
+ #sound-dai-cells = <0>;
+ compatible = "st,stm32-sai-sub-a";
+ reg = <0x04 0x20>;
+ clocks = <&rcc SAI3_K>;
+ clock-names = "sai_ck";
+ dmas = <&dmamux1 113 0x400 0x01>;
+ status = "disabled";
+ };
+
+ sai3b: audio-controller@4400c024 {
+ #sound-dai-cells = <0>;
+ compatible = "st,stm32-sai-sub-b";
+ reg = <0x24 0x20>;
+ clocks = <&rcc SAI3_K>;
+ clock-names = "sai_ck";
+ dmas = <&dmamux1 114 0x400 0x01>;
+ status = "disabled";
+ };
+ };
+
+ dfsdm: dfsdm@4400d000 {
+ compatible = "st,stm32mp1-dfsdm";
+ reg = <0x4400d000 0x800>;
+ clocks = <&rcc DFSDM_K>;
+ clock-names = "dfsdm";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ access-controllers = <&etzpc 61>;
+ status = "disabled";
+
+ dfsdm0: filter@0 {
+ compatible = "st,stm32-dfsdm-adc";
+ #io-channel-cells = <1>;
+ reg = <0>;
+ interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dmamux1 101 0x400 0x01>;
+ dma-names = "rx";
+ status = "disabled";
+ };
+
+ dfsdm1: filter@1 {
+ compatible = "st,stm32-dfsdm-adc";
+ #io-channel-cells = <1>;
+ reg = <1>;
+ interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dmamux1 102 0x400 0x01>;
+ dma-names = "rx";
+ status = "disabled";
+ };
+
+ dfsdm2: filter@2 {
+ compatible = "st,stm32-dfsdm-adc";
+ #io-channel-cells = <1>;
+ reg = <2>;
+ interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dmamux1 103 0x400 0x01>;
+ dma-names = "rx";
+ status = "disabled";
+ };
+
+ dfsdm3: filter@3 {
+ compatible = "st,stm32-dfsdm-adc";
+ #io-channel-cells = <1>;
+ reg = <3>;
+ interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dmamux1 104 0x400 0x01>;
+ dma-names = "rx";
+ status = "disabled";
+ };
+
+ dfsdm4: filter@4 {
+ compatible = "st,stm32-dfsdm-adc";
+ #io-channel-cells = <1>;
+ reg = <4>;
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dmamux1 91 0x400 0x01>;
+ dma-names = "rx";
+ status = "disabled";
+ };
+
+ dfsdm5: filter@5 {
+ compatible = "st,stm32-dfsdm-adc";
+ #io-channel-cells = <1>;
+ reg = <5>;
+ interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dmamux1 92 0x400 0x01>;
+ dma-names = "rx";
+ status = "disabled";
+ };
+ };
+
+ dma1: dma-controller@48000000 {
+ compatible = "st,stm32-dma";
+ reg = <0x48000000 0x400>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc DMA1>;
+ resets = <&rcc DMA1_R>;
+ #dma-cells = <4>;
+ st,mem2mem;
+ dma-requests = <8>;
+ access-controllers = <&etzpc 88>;
+ };
+
+ dma2: dma-controller@48001000 {
+ compatible = "st,stm32-dma";
+ reg = <0x48001000 0x400>;
+ interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc DMA2>;
+ resets = <&rcc DMA2_R>;
+ #dma-cells = <4>;
+ st,mem2mem;
+ dma-requests = <8>;
+ access-controllers = <&etzpc 89>;
+ };
+
+ dmamux1: dma-router@48002000 {
+ compatible = "st,stm32h7-dmamux";
+ reg = <0x48002000 0x40>;
+ #dma-cells = <3>;
+ dma-requests = <128>;
+ dma-masters = <&dma1 &dma2>;
+ dma-channels = <16>;
+ clocks = <&rcc DMAMUX>;
+ resets = <&rcc DMAMUX_R>;
+ access-controllers = <&etzpc 90>;
+ };
+
+ adc: adc@48003000 {
+ compatible = "st,stm32mp1-adc-core";
+ reg = <0x48003000 0x400>;
+ interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc ADC12>, <&rcc ADC12_K>;
+ clock-names = "bus", "adc";
+ interrupt-controller;
+ st,syscfg = <&syscfg>;
+ #interrupt-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ access-controllers = <&etzpc 72>;
+ status = "disabled";
+
+ adc1: adc@0 {
+ compatible = "st,stm32mp1-adc";
+ #io-channel-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0>;
+ interrupt-parent = <&adc>;
+ interrupts = <0>;
+ dmas = <&dmamux1 9 0x400 0x01>;
+ dma-names = "rx";
+ status = "disabled";
+ };
+
+ adc2: adc@100 {
+ compatible = "st,stm32mp1-adc";
+ #io-channel-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x100>;
+ interrupt-parent = <&adc>;
+ interrupts = <1>;
+ dmas = <&dmamux1 10 0x400 0x01>;
+ dma-names = "rx";
+ nvmem-cells = <&vrefint>;
+ nvmem-cell-names = "vrefint";
+ status = "disabled";
+ channel@13 {
+ reg = <13>;
+ label = "vrefint";
+ };
+ channel@14 {
+ reg = <14>;
+ label = "vddcore";
+ };
+ };
+ };
+
+ sdmmc3: mmc@48004000 {
+ compatible = "st,stm32-sdmmc2", "arm,pl18x", "arm,primecell";
+ arm,primecell-periphid = <0x00253180>;
+ reg = <0x48004000 0x400>;
+ interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc SDMMC3_K>;
+ clock-names = "apb_pclk";
+ resets = <&rcc SDMMC3_R>;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ max-frequency = <120000000>;
+ access-controllers = <&etzpc 86>;
+ status = "disabled";
+ };
+
+ usbotg_hs: usb-otg@49000000 {
+ compatible = "st,stm32mp15-hsotg", "snps,dwc2";
+ reg = <0x49000000 0x10000>;
+ clocks = <&rcc USBO_K>, <&usbphyc>;
+ clock-names = "otg", "utmi";
+ resets = <&rcc USBO_R>;
+ reset-names = "dwc2";
+ interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+ g-rx-fifo-size = <512>;
+ g-np-tx-fifo-size = <32>;
+ g-tx-fifo-size = <256 16 16 16 16 16 16 16>;
+ dr_mode = "otg";
+ otg-rev = <0x200>;
+ usb33d-supply = <&usb33>;
+ access-controllers = <&etzpc 85>;
+ status = "disabled";
+ };
+
+ dcmi: dcmi@4c006000 {
+ compatible = "st,stm32-dcmi";
+ reg = <0x4c006000 0x400>;
+ interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rcc CAMITF_R>;
+ clocks = <&rcc DCMI>;
+ clock-names = "mclk";
+ dmas = <&dmamux1 75 0x400 0x01>;
+ dma-names = "tx";
+ access-controllers = <&etzpc 70>;
+ status = "disabled";
+ };
+
+ lptimer2: timer@50021000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-lptimer";
+ reg = <0x50021000 0x400>;
+ interrupts-extended = <&exti 48 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc LPTIM2_K>;
+ clock-names = "mux";
+ wakeup-source;
+ access-controllers = <&etzpc 64>;
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm-lp";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ trigger@1 {
+ compatible = "st,stm32-lptimer-trigger";
+ reg = <1>;
+ status = "disabled";
+ };
+
+ counter {
+ compatible = "st,stm32-lptimer-counter";
+ status = "disabled";
+ };
+ };
+
+ lptimer3: timer@50022000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-lptimer";
+ reg = <0x50022000 0x400>;
+ interrupts-extended = <&exti 50 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc LPTIM3_K>;
+ clock-names = "mux";
+ wakeup-source;
+ access-controllers = <&etzpc 65>;
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm-lp";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ trigger@2 {
+ compatible = "st,stm32-lptimer-trigger";
+ reg = <2>;
+ status = "disabled";
+ };
+ };
+
+ lptimer4: timer@50023000 {
+ compatible = "st,stm32-lptimer";
+ reg = <0x50023000 0x400>;
+ interrupts-extended = <&exti 52 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc LPTIM4_K>;
+ clock-names = "mux";
+ wakeup-source;
+ access-controllers = <&etzpc 66>;
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm-lp";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+ };
+
+ lptimer5: timer@50024000 {
+ compatible = "st,stm32-lptimer";
+ reg = <0x50024000 0x400>;
+ interrupts-extended = <&exti 53 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc LPTIM5_K>;
+ clock-names = "mux";
+ wakeup-source;
+ access-controllers = <&etzpc 67>;
+ status = "disabled";
+
+ pwm {
+ compatible = "st,stm32-pwm-lp";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+ };
+
+ vrefbuf: vrefbuf@50025000 {
+ compatible = "st,stm32-vrefbuf";
+ reg = <0x50025000 0x8>;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <2500000>;
+ clocks = <&rcc VREF>;
+ access-controllers = <&etzpc 69>;
+ status = "disabled";
+ };
+
+ sai4: sai@50027000 {
+ compatible = "st,stm32h7-sai";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x50027000 0x400>;
+ reg = <0x50027000 0x4>, <0x500273f0 0x10>;
+ interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rcc SAI4_R>;
+ access-controllers = <&etzpc 68>;
+ status = "disabled";
+
+ sai4a: audio-controller@50027004 {
+ #sound-dai-cells = <0>;
+ compatible = "st,stm32-sai-sub-a";
+ reg = <0x04 0x20>;
+ clocks = <&rcc SAI4_K>;
+ clock-names = "sai_ck";
+ dmas = <&dmamux1 99 0x400 0x01>;
+ status = "disabled";
+ };
+
+ sai4b: audio-controller@50027024 {
+ #sound-dai-cells = <0>;
+ compatible = "st,stm32-sai-sub-b";
+ reg = <0x24 0x20>;
+ clocks = <&rcc SAI4_K>;
+ clock-names = "sai_ck";
+ dmas = <&dmamux1 100 0x400 0x01>;
+ status = "disabled";
+ };
+ };
+
+ hash1: hash@54002000 {
+ compatible = "st,stm32f756-hash";
+ reg = <0x54002000 0x400>;
+ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc HASH1>;
+ resets = <&rcc HASH1_R>;
+ dmas = <&mdma1 31 0x2 0x1000A02 0x0 0x0>;
+ dma-names = "in";
+ dma-maxburst = <2>;
+ access-controllers = <&etzpc 8>;
+ status = "disabled";
+ };
+
+ rng1: rng@54003000 {
+ compatible = "st,stm32-rng";
+ reg = <0x54003000 0x400>;
+ clocks = <&rcc RNG1_K>;
+ resets = <&rcc RNG1_R>;
+ access-controllers = <&etzpc 7>;
+ status = "disabled";
+ };
+
+ fmc: memory-controller@58002000 {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "st,stm32mp1-fmc2-ebi";
+ reg = <0x58002000 0x1000>;
+ clocks = <&rcc FMC_K>;
+ resets = <&rcc FMC_R>;
+ access-controllers = <&etzpc 91>;
+ status = "disabled";
+
+ ranges = <0 0 0x60000000 0x04000000>, /* EBI CS 1 */
+ <1 0 0x64000000 0x04000000>, /* EBI CS 2 */
+ <2 0 0x68000000 0x04000000>, /* EBI CS 3 */
+ <3 0 0x6c000000 0x04000000>, /* EBI CS 4 */
+ <4 0 0x80000000 0x10000000>; /* NAND */
+
+ nand-controller@4,0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32mp1-fmc2-nfc";
+ reg = <4 0x00000000 0x1000>,
+ <4 0x08010000 0x1000>,
+ <4 0x08020000 0x1000>,
+ <4 0x01000000 0x1000>,
+ <4 0x09010000 0x1000>,
+ <4 0x09020000 0x1000>;
+ interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&mdma1 20 0x2 0x12000a02 0x0 0x0>,
+ <&mdma1 20 0x2 0x12000a08 0x0 0x0>,
+ <&mdma1 21 0x2 0x12000a0a 0x0 0x0>;
+ dma-names = "tx", "rx", "ecc";
+ status = "disabled";
+ };
+ };
+
+ qspi: spi@58003000 {
+ compatible = "st,stm32f469-qspi";
+ reg = <0x58003000 0x1000>, <0x70000000 0x10000000>;
+ reg-names = "qspi", "qspi_mm";
+ interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&mdma1 22 0x2 0x10100002 0x0 0x0>,
+ <&mdma1 22 0x2 0x10100008 0x0 0x0>;
+ dma-names = "tx", "rx";
+ clocks = <&rcc QSPI_K>;
+ resets = <&rcc QSPI_R>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ access-controllers = <&etzpc 92>;
+ status = "disabled";
+ };
+
+ ethernet0: ethernet@5800a000 {
+ compatible = "st,stm32mp1-dwmac", "snps,dwmac-4.20a";
+ reg = <0x5800a000 0x2000>;
+ reg-names = "stmmaceth";
+ interrupts-extended = <&intc GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ clock-names = "stmmaceth",
+ "mac-clk-tx",
+ "mac-clk-rx",
+ "eth-ck",
+ "ptp_ref",
+ "ethstp";
+ clocks = <&rcc ETHMAC>,
+ <&rcc ETHTX>,
+ <&rcc ETHRX>,
+ <&rcc ETHCK_K>,
+ <&rcc ETHPTP_K>,
+ <&rcc ETHSTP>;
+ st,syscon = <&syscfg 0x4>;
+ snps,mixed-burst;
+ snps,pbl = <2>;
+ snps,en-tx-lpi-clockgating;
+ snps,axi-config = <&stmmac_axi_config_0>;
+ snps,tso;
+ access-controllers = <&etzpc 94>;
+ status = "disabled";
+
+ stmmac_axi_config_0: stmmac-axi-config {
+ snps,wr_osr_lmt = <0x7>;
+ snps,rd_osr_lmt = <0x7>;
+ snps,blen = <0 0 0 0 16 8 4>;
+ };
+ };
+
+ usart1: serial@5c000000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x5c000000 0x400>;
+ interrupts-extended = <&exti 26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc USART1_K>;
+ wakeup-source;
+ access-controllers = <&etzpc 3>;
+ status = "disabled";
+ };
+
+ spi6: spi@5c001000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32h7-spi";
+ reg = <0x5c001000 0x400>;
+ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc SPI6_K>;
+ resets = <&rcc SPI6_R>;
+ dmas = <&mdma1 34 0x0 0x40008 0x0 0x0>,
+ <&mdma1 35 0x0 0x40002 0x0 0x0>;
+ access-controllers = <&etzpc 4>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ i2c4: i2c@5c002000 {
+ compatible = "st,stm32mp15-i2c";
+ reg = <0x5c002000 0x400>;
+ interrupt-names = "event", "error";
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc I2C4_K>;
+ resets = <&rcc I2C4_R>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ st,syscfg-fmp = <&syscfg 0x4 0x8>;
+ wakeup-source;
+ i2c-analog-filter;
+ access-controllers = <&etzpc 5>;
+ status = "disabled";
+ };
+
+ i2c6: i2c@5c009000 {
+ compatible = "st,stm32mp15-i2c";
+ reg = <0x5c009000 0x400>;
+ interrupt-names = "event", "error";
+ interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc I2C6_K>;
+ resets = <&rcc I2C6_R>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ st,syscfg-fmp = <&syscfg 0x4 0x20>;
+ wakeup-source;
+ i2c-analog-filter;
+ access-controllers = <&etzpc 12>;
+ status = "disabled";
+ };
};
tamp: tamp@5c00a000 {
diff --git a/arch/arm/boot/dts/st/stm32mp153.dtsi b/arch/arm/boot/dts/st/stm32mp153.dtsi
index 486084e0b80b..4640dafb1598 100644
--- a/arch/arm/boot/dts/st/stm32mp153.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp153.dtsi
@@ -28,32 +28,34 @@
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
};
+};
- soc {
- m_can1: can@4400e000 {
- compatible = "bosch,m_can";
- reg = <0x4400e000 0x400>, <0x44011000 0x1400>;
- reg-names = "m_can", "message_ram";
- interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "int0", "int1";
- clocks = <&rcc CK_HSE>, <&rcc FDCAN_K>;
- clock-names = "hclk", "cclk";
- bosch,mram-cfg = <0x0 0 0 32 0 0 2 2>;
- status = "disabled";
- };
+&etzpc {
+ m_can1: can@4400e000 {
+ compatible = "bosch,m_can";
+ reg = <0x4400e000 0x400>, <0x44011000 0x1400>;
+ reg-names = "m_can", "message_ram";
+ interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "int0", "int1";
+ clocks = <&rcc CK_HSE>, <&rcc FDCAN_K>;
+ clock-names = "hclk", "cclk";
+ bosch,mram-cfg = <0x0 0 0 32 0 0 2 2>;
+ access-controllers = <&etzpc 62>;
+ status = "disabled";
+ };
- m_can2: can@4400f000 {
- compatible = "bosch,m_can";
- reg = <0x4400f000 0x400>, <0x44011000 0x2800>;
- reg-names = "m_can", "message_ram";
- interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "int0", "int1";
- clocks = <&rcc CK_HSE>, <&rcc FDCAN_K>;
- clock-names = "hclk", "cclk";
- bosch,mram-cfg = <0x1400 0 0 32 0 0 2 2>;
- status = "disabled";
- };
+ m_can2: can@4400f000 {
+ compatible = "bosch,m_can";
+ reg = <0x4400f000 0x400>, <0x44011000 0x2800>;
+ reg-names = "m_can", "message_ram";
+ interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "int0", "int1";
+ clocks = <&rcc CK_HSE>, <&rcc FDCAN_K>;
+ clock-names = "hclk", "cclk";
+ bosch,mram-cfg = <0x1400 0 0 32 0 0 2 2>;
+ access-controllers = <&etzpc 62>;
+ status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/st/stm32mp157c-ed1.dts b/arch/arm/boot/dts/st/stm32mp157c-ed1.dts
index 66ed5f9921ba..9cf5ed111b52 100644
--- a/arch/arm/boot/dts/st/stm32mp157c-ed1.dts
+++ b/arch/arm/boot/dts/st/stm32mp157c-ed1.dts
@@ -10,6 +10,7 @@
#include "stm32mp15-pinctrl.dtsi"
#include "stm32mp15xxaa-pinctrl.dtsi"
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
#include <dt-bindings/mfd/st,stpmic1.h>
/ {
@@ -71,6 +72,17 @@
};
};
+ led {
+ compatible = "gpio-leds";
+ led-blue {
+ gpios = <&gpiod 9 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ default-state = "off";
+ function = LED_FUNCTION_HEARTBEAT;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+ };
+
sd_switch: regulator-sd_switch {
compatible = "regulator-gpio";
regulator-name = "sd_switch";
diff --git a/arch/arm/boot/dts/st/stm32mp15xc.dtsi b/arch/arm/boot/dts/st/stm32mp15xc.dtsi
index b06a55a2fa18..97465717f932 100644
--- a/arch/arm/boot/dts/st/stm32mp15xc.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp15xc.dtsi
@@ -4,15 +4,14 @@
* Author: Alexandre Torgue <alexandre.torgue@st.com> for STMicroelectronics.
*/
-/ {
- soc {
- cryp1: cryp@54001000 {
- compatible = "st,stm32mp1-cryp";
- reg = <0x54001000 0x400>;
- interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc CRYP1>;
- resets = <&rcc CRYP1_R>;
- status = "disabled";
- };
+&etzpc {
+ cryp1: cryp@54001000 {
+ compatible = "st,stm32mp1-cryp";
+ reg = <0x54001000 0x400>;
+ interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CRYP1>;
+ resets = <&rcc CRYP1_R>;
+ access-controllers = <&etzpc 9>;
+ status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/ti/keystone/keystone-k2g.dtsi b/arch/arm/boot/dts/ti/keystone/keystone-k2g.dtsi
index 790b29ab0fa2..dafe485dfe19 100644
--- a/arch/arm/boot/dts/ti/keystone/keystone-k2g.dtsi
+++ b/arch/arm/boot/dts/ti/keystone/keystone-k2g.dtsi
@@ -256,11 +256,6 @@
pmmc: system-controller@2921c00 {
compatible = "ti,k2g-sci";
- /*
- * In case of rare platforms that does not use k2g as
- * system master, use /delete-property/
- */
- ti,system-reboot-controller;
mbox-names = "rx", "tx";
mboxes = <&msgmgr 5 2>,
<&msgmgr 0 0>;
diff --git a/arch/arm/boot/dts/ti/omap/am33xx.dtsi b/arch/arm/boot/dts/ti/omap/am33xx.dtsi
index 989d5a6edeed..0614ffdc1578 100644
--- a/arch/arm/boot/dts/ti/omap/am33xx.dtsi
+++ b/arch/arm/boot/dts/ti/omap/am33xx.dtsi
@@ -80,7 +80,7 @@
* because the can not be enabled simultaneously on a
* single SoC.
*/
- opp-50-300000000{
+ opp-50-300000000 {
/* OPP50 */
opp-hz = /bits/ 64 <300000000>;
opp-microvolt = <950000 931000 969000>;
@@ -88,7 +88,7 @@
opp-suspend;
};
- opp-100-275000000{
+ opp-100-275000000 {
/* OPP100-1 */
opp-hz = /bits/ 64 <275000000>;
opp-microvolt = <1100000 1078000 1122000>;
@@ -96,7 +96,7 @@
opp-suspend;
};
- opp-100-300000000{
+ opp-100-300000000 {
/* OPP100-2 */
opp-hz = /bits/ 64 <300000000>;
opp-microvolt = <1100000 1078000 1122000>;
@@ -104,7 +104,7 @@
opp-suspend;
};
- opp-100-500000000{
+ opp-100-500000000 {
/* OPP100-3 */
opp-hz = /bits/ 64 <500000000>;
opp-microvolt = <1100000 1078000 1122000>;
diff --git a/arch/arm/boot/dts/ti/omap/am4372.dtsi b/arch/arm/boot/dts/ti/omap/am4372.dtsi
index 5fd1b380ece6..0a1df30f2818 100644
--- a/arch/arm/boot/dts/ti/omap/am4372.dtsi
+++ b/arch/arm/boot/dts/ti/omap/am4372.dtsi
@@ -92,7 +92,7 @@
opp-supported-hw = <0xFF 0x08>;
};
- opp-800000000{
+ opp-800000000 {
/* OPP Turbo */
opp-hz = /bits/ 64 <800000000>;
opp-microvolt = <1260000 1234800 1285200>;
diff --git a/arch/arm/boot/dts/ti/omap/dra76x.dtsi b/arch/arm/boot/dts/ti/omap/dra76x.dtsi
index 1045eb24aa0d..50a02c393ea2 100644
--- a/arch/arm/boot/dts/ti/omap/dra76x.dtsi
+++ b/arch/arm/boot/dts/ti/omap/dra76x.dtsi
@@ -84,35 +84,44 @@
};
&scm_conf_clocks {
- dpll_gmac_h14x2_ctrl_ck: dpll_gmac_h14x2_ctrl_ck@3fc {
- #clock-cells = <0>;
- compatible = "ti,divider-clock";
- clocks = <&dpll_gmac_x2_ck>;
- ti,max-div = <63>;
- reg = <0x03fc>;
- ti,bit-shift = <20>;
- ti,latch-bit = <26>;
- assigned-clocks = <&dpll_gmac_h14x2_ctrl_ck>;
- assigned-clock-rates = <80000000>;
- };
-
- dpll_gmac_h14x2_ctrl_mux_ck: dpll_gmac_h14x2_ctrl_mux_ck@3fc {
- #clock-cells = <0>;
- compatible = "ti,mux-clock";
- clocks = <&dpll_gmac_ck>, <&dpll_gmac_h14x2_ctrl_ck>;
+ /* CTRL_CORE_SMA_SW_0 */
+ clock@3fc {
+ compatible = "ti,clksel";
reg = <0x3fc>;
- ti,bit-shift = <29>;
- ti,latch-bit = <26>;
- assigned-clocks = <&dpll_gmac_h14x2_ctrl_mux_ck>;
- assigned-clock-parents = <&dpll_gmac_h14x2_ctrl_ck>;
- };
+ #clock-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dpll_gmac_h14x2_ctrl_ck: clock@20 {
+ reg = <20>;
+ clock-output-names = "dpll_gmac_h14x2_ctrl_ck";
+ compatible = "ti,divider-clock";
+ clocks = <&dpll_gmac_x2_ck>;
+ ti,max-div = <63>;
+ ti,latch-bit = <26>;
+ assigned-clocks = <&dpll_gmac_h14x2_ctrl_ck>;
+ assigned-clock-rates = <80000000>;
+ #clock-cells = <0>;
+ };
- mcan_clk: mcan_clk@3fc {
- #clock-cells = <0>;
- compatible = "ti,gate-clock";
- clocks = <&dpll_gmac_h14x2_ctrl_mux_ck>;
- ti,bit-shift = <27>;
- reg = <0x3fc>;
+ mcan_clk: clock@27 {
+ reg = <27>;
+ clock-output-names = "mcan_clk";
+ compatible = "ti,gate-clock";
+ clocks = <&dpll_gmac_h14x2_ctrl_mux_ck>;
+ #clock-cells = <0>;
+ };
+
+ dpll_gmac_h14x2_ctrl_mux_ck: clock@29 {
+ reg = <29>;
+ clock-output-names = "dpll_gmac_h14x2_ctrl_mux_ck";
+ compatible = "ti,mux-clock";
+ clocks = <&dpll_gmac_ck>, <&dpll_gmac_h14x2_ctrl_ck>;
+ ti,latch-bit = <26>;
+ assigned-clocks = <&dpll_gmac_h14x2_ctrl_mux_ck>;
+ assigned-clock-parents = <&dpll_gmac_h14x2_ctrl_ck>;
+ #clock-cells = <0>;
+ };
};
};
diff --git a/arch/arm/boot/dts/ti/omap/dra7xx-clocks.dtsi b/arch/arm/boot/dts/ti/omap/dra7xx-clocks.dtsi
index 06466d36caa9..04f08b8c64d2 100644
--- a/arch/arm/boot/dts/ti/omap/dra7xx-clocks.dtsi
+++ b/arch/arm/boot/dts/ti/omap/dra7xx-clocks.dtsi
@@ -285,13 +285,21 @@
ti,invert-autoidle-bit;
};
- dpll_core_byp_mux: clock-dpll-core-byp-mux-23@12c {
- #clock-cells = <0>;
- compatible = "ti,mux-clock";
- clock-output-names = "dpll_core_byp_mux";
- clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
- ti,bit-shift = <23>;
- reg = <0x012c>;
+ /* CM_CLKSEL_DPLL_CORE */
+ clock@12c {
+ compatible = "ti,clksel";
+ reg = <0x12c>;
+ #clock-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dpll_core_byp_mux: clock@23 {
+ reg = <23>;
+ compatible = "ti,mux-clock";
+ clock-output-names = "dpll_core_byp_mux";
+ clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+ #clock-cells = <0>;
+ };
};
dpll_core_ck: clock@120 {
@@ -368,13 +376,21 @@
clock-div = <1>;
};
- dpll_dsp_byp_mux: clock-dpll-dsp-byp-mux-23@240 {
- #clock-cells = <0>;
- compatible = "ti,mux-clock";
- clock-output-names = "dpll_dsp_byp_mux";
- clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>;
- ti,bit-shift = <23>;
- reg = <0x0240>;
+ /* CM_CLKSEL_DPLL_DSP */
+ clock@240 {
+ compatible = "ti,clksel";
+ reg = <0x240>;
+ #clock-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dpll_dsp_byp_mux: clock@23 {
+ reg = <23>;
+ compatible = "ti,mux-clock";
+ clock-output-names = "dpll_dsp_byp_mux";
+ clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>;
+ #clock-cells = <0>;
+ };
};
dpll_dsp_ck: clock@234 {
@@ -410,13 +426,21 @@
clock-div = <1>;
};
- dpll_iva_byp_mux: clock-dpll-iva-byp-mux-23@1ac {
- #clock-cells = <0>;
- compatible = "ti,mux-clock";
- clock-output-names = "dpll_iva_byp_mux";
- clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>;
- ti,bit-shift = <23>;
- reg = <0x01ac>;
+ /* CM_CLKSEL_DPLL_IVA */
+ clock@1ac {
+ compatible = "ti,clksel";
+ reg = <0x1ac>;
+ #clock-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dpll_iva_byp_mux: clock@23 {
+ reg = <23>;
+ compatible = "ti,mux-clock";
+ clock-output-names = "dpll_iva_byp_mux";
+ clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>;
+ #clock-cells = <0>;
+ };
};
dpll_iva_ck: clock@1a0 {
@@ -452,13 +476,21 @@
clock-div = <1>;
};
- dpll_gpu_byp_mux: clock-dpll-gpu-byp-mux-23@2e4 {
- #clock-cells = <0>;
- compatible = "ti,mux-clock";
- clock-output-names = "dpll_gpu_byp_mux";
- clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
- ti,bit-shift = <23>;
- reg = <0x02e4>;
+ /* CM_CLKSEL_DPLL_GPU */
+ clock@2e4 {
+ compatible = "ti,clksel";
+ reg = <0x2e4>;
+ #clock-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dpll_gpu_byp_mux: clock@23 {
+ reg = <23>;
+ compatible = "ti,mux-clock";
+ clock-output-names = "dpll_gpu_byp_mux";
+ clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+ #clock-cells = <0>;
+ };
};
dpll_gpu_ck: clock@2d8 {
@@ -506,13 +538,21 @@
clock-div = <1>;
};
- dpll_ddr_byp_mux: clock-dpll-ddr-byp-mux-23@21c {
- #clock-cells = <0>;
- compatible = "ti,mux-clock";
- clock-output-names = "dpll_ddr_byp_mux";
- clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
- ti,bit-shift = <23>;
- reg = <0x021c>;
+ /* CM_CLKSEL_DPLL_DDR */
+ clock@21c {
+ compatible = "ti,clksel";
+ reg = <0x21c>;
+ #clock-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dpll_ddr_byp_mux: clock@23 {
+ reg = <23>;
+ compatible = "ti,mux-clock";
+ clock-output-names = "dpll_ddr_byp_mux";
+ clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+ #clock-cells = <0>;
+ };
};
dpll_ddr_ck: clock@210 {
@@ -535,13 +575,21 @@
ti,invert-autoidle-bit;
};
- dpll_gmac_byp_mux: clock-dpll-gmac-byp-mux-23@2b4 {
- #clock-cells = <0>;
- compatible = "ti,mux-clock";
- clock-output-names = "dpll_gmac_byp_mux";
- clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
- ti,bit-shift = <23>;
- reg = <0x02b4>;
+ /* CM_CLKSEL_DPLL_GMAC */
+ clock@2b4 {
+ compatible = "ti,clksel";
+ reg = <0x2b4>;
+ #clock-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dpll_gmac_byp_mux: clock@23 {
+ reg = <23>;
+ compatible = "ti,mux-clock";
+ clock-output-names = "dpll_gmac_byp_mux";
+ clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+ #clock-cells = <0>;
+ };
};
dpll_gmac_ck: clock@2a8 {
@@ -618,13 +666,21 @@
clock-div = <1>;
};
- dpll_eve_byp_mux: clock-dpll-eve-byp-mux-23@290 {
- #clock-cells = <0>;
- compatible = "ti,mux-clock";
- clock-output-names = "dpll_eve_byp_mux";
- clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>;
- ti,bit-shift = <23>;
- reg = <0x0290>;
+ /* CM_CLKSEL_DPLL_EVE */
+ clock@290 {
+ compatible = "ti,clksel";
+ reg = <0x290>;
+ #clock-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dpll_eve_byp_mux: clock@23 {
+ reg = <23>;
+ compatible = "ti,mux-clock";
+ clock-output-names = "dpll_eve_byp_mux";
+ clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>;
+ #clock-cells = <0>;
+ };
};
dpll_eve_ck: clock@284 {
@@ -838,15 +894,23 @@
clock-div = <1>;
};
- l3_iclk_div: clock-l3-iclk-div-4@100 {
- #clock-cells = <0>;
- compatible = "ti,divider-clock";
- clock-output-names = "l3_iclk_div";
- ti,max-div = <2>;
- ti,bit-shift = <4>;
- reg = <0x0100>;
- clocks = <&dpll_core_h12x2_ck>;
- ti,index-power-of-two;
+ /* CM_CLKSEL_CORE */
+ clock@100 {
+ compatible = "ti,clksel";
+ reg = <0x100>;
+ #clock-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ l3_iclk_div: clock@4 {
+ reg = <4>;
+ compatible = "ti,divider-clock";
+ clock-output-names = "l3_iclk_div";
+ ti,max-div = <2>;
+ clocks = <&dpll_core_h12x2_ck>;
+ ti,index-power-of-two;
+ #clock-cells = <0>;
+ };
};
l4_root_clk_div: clock-l4-root-clk-div {
@@ -911,12 +975,21 @@
ti,index-starts-at-one;
};
- abe_dpll_sys_clk_mux: clock-abe-dpll-sys-clk-mux@118 {
- #clock-cells = <0>;
- compatible = "ti,mux-clock";
- clock-output-names = "abe_dpll_sys_clk_mux";
- clocks = <&sys_clkin1>, <&sys_clkin2>;
- reg = <0x0118>;
+ /* CM_CLKSEL_ABE_PLL_SYS */
+ clock@118 {
+ compatible = "ti,clksel";
+ reg = <0x118>;
+ #clock-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ abe_dpll_sys_clk_mux: clock@0 {
+ reg = <0>;
+ compatible = "ti,mux-clock";
+ clock-output-names = "abe_dpll_sys_clk_mux";
+ clocks = <&sys_clkin1>, <&sys_clkin2>;
+ #clock-cells = <0>;
+ };
};
abe_dpll_bypass_clk_mux: clock-abe-dpll-bypass-clk-mux@114 {
@@ -1018,14 +1091,23 @@
ti,index-power-of-two;
};
- dsp_gclk_div: clock-dsp-gclk-div@18c {
- #clock-cells = <0>;
- compatible = "ti,divider-clock";
- clock-output-names = "dsp_gclk_div";
- clocks = <&dpll_dsp_m2_ck>;
- ti,max-div = <64>;
- reg = <0x018c>;
- ti,index-power-of-two;
+ /* CM_CLKSEL_DPLL_USB */
+ clock@18c {
+ compatible = "ti,clksel";
+ reg = <0x18c>;
+ #clock-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dsp_gclk_div: clock@0 {
+ reg = <0>;
+ compatible = "ti,divider-clock";
+ clock-output-names = "dsp_gclk_div";
+ clocks = <&dpll_dsp_m2_ck>;
+ ti,max-div = <64>;
+ ti,index-power-of-two;
+ #clock-cells = <0>;
+ };
};
gpu_dclk: clock-gpu-dclk@1a0 {
@@ -1326,13 +1408,21 @@
clock-div = <1>;
};
- dpll_per_byp_mux: clock-dpll-per-byp-mux-23@14c {
- #clock-cells = <0>;
- compatible = "ti,mux-clock";
- clock-output-names = "dpll_per_byp_mux";
- clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>;
- ti,bit-shift = <23>;
- reg = <0x014c>;
+ /* CM_CLKSEL_DPLL_PER */
+ clock@14c {
+ compatible = "ti,clksel";
+ reg = <0x14c>;
+ #clock-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dpll_per_byp_mux: clock@23 {
+ reg = <23>;
+ compatible = "ti,mux-clock";
+ clock-output-names = "dpll_per_byp_mux";
+ clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>;
+ #clock-cells = <0>;
+ };
};
dpll_per_ck: clock@140 {
@@ -1364,13 +1454,21 @@
clock-div = <1>;
};
- dpll_usb_byp_mux: clock-dpll-usb-byp-mux-23@18c {
- #clock-cells = <0>;
- compatible = "ti,mux-clock";
- clock-output-names = "dpll_usb_byp_mux";
- clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>;
- ti,bit-shift = <23>;
- reg = <0x018c>;
+ /* CM_CLKSEL_DPLL_USB */
+ clock@18c {
+ compatible = "ti,clksel";
+ reg = <0x18c>;
+ #clock-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dpll_usb_byp_mux: clock@23 {
+ reg = <23>;
+ compatible = "ti,mux-clock";
+ clock-output-names = "dpll_usb_byp_mux";
+ clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>;
+ #clock-cells = <0>;
+ };
};
dpll_usb_ck: clock@180 {
diff --git a/arch/arm/boot/dts/ti/omap/omap3-n900.dts b/arch/arm/boot/dts/ti/omap/omap3-n900.dts
index d33485341251..07c5b963af78 100644
--- a/arch/arm/boot/dts/ti/omap/omap3-n900.dts
+++ b/arch/arm/boot/dts/ti/omap/omap3-n900.dts
@@ -754,7 +754,7 @@
ti,current-limit = <100>;
ti,weak-battery-voltage = <3400>;
ti,battery-regulation-voltage = <4200>;
- ti,charge-current = <650>;
+ ti,charge-current = <950>;
ti,termination-current = <100>;
ti,resistor-sense = <68>;
diff --git a/arch/arm/configs/collie_defconfig b/arch/arm/configs/collie_defconfig
index 01b5a5a73f03..42cb1c854118 100644
--- a/arch/arm/configs/collie_defconfig
+++ b/arch/arm/configs/collie_defconfig
@@ -3,7 +3,7 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
-# CONFIG_BASE_FULL is not set
+CONFIG_BASE_SMALL=y
# CONFIG_EPOLL is not set
CONFIG_ARCH_MULTI_V4=y
# CONFIG_ARCH_MULTI_V7 is not set
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 7327fce87808..cf2480dce285 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -336,6 +336,7 @@ CONFIG_USB_SERIAL_FTDI_SIO=m
CONFIG_USB_SERIAL_OPTION=m
CONFIG_USB_TEST=m
CONFIG_USB_EHSET_TEST_FIXTURE=m
+CONFIG_USB_ONBOARD_DEV=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_USB_MXS_PHY=y
CONFIG_USB_GADGET=y
diff --git a/arch/arm/configs/keystone_defconfig b/arch/arm/configs/keystone_defconfig
index 59c4835ffc97..c1291ca290b2 100644
--- a/arch/arm/configs/keystone_defconfig
+++ b/arch/arm/configs/keystone_defconfig
@@ -12,7 +12,7 @@ CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_ELF_CORE is not set
-# CONFIG_BASE_FULL is not set
+CONFIG_BASE_SMALL=y
CONFIG_KALLSYMS_ALL=y
CONFIG_EXPERT=y
CONFIG_PROFILING=y
diff --git a/arch/arm/configs/lpc18xx_defconfig b/arch/arm/configs/lpc18xx_defconfig
index d169da9b2824..f55c231e0870 100644
--- a/arch/arm/configs/lpc18xx_defconfig
+++ b/arch/arm/configs/lpc18xx_defconfig
@@ -8,7 +8,7 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_LZ4 is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_UID16 is not set
-# CONFIG_BASE_FULL is not set
+CONFIG_BASE_SMALL=y
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
diff --git a/arch/arm/configs/moxart_defconfig b/arch/arm/configs/moxart_defconfig
index 1d41e73f4903..34d079e03b3c 100644
--- a/arch/arm/configs/moxart_defconfig
+++ b/arch/arm/configs/moxart_defconfig
@@ -6,7 +6,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_EXPERT=y
# CONFIG_ELF_CORE is not set
-# CONFIG_BASE_FULL is not set
+CONFIG_BASE_SMALL=y
# CONFIG_SIGNALFD is not set
# CONFIG_TIMERFD is not set
# CONFIG_EVENTFD is not set
diff --git a/arch/arm/configs/mps2_defconfig b/arch/arm/configs/mps2_defconfig
index 3ed73f184d83..e995e50537ef 100644
--- a/arch/arm/configs/mps2_defconfig
+++ b/arch/arm/configs/mps2_defconfig
@@ -5,7 +5,7 @@ CONFIG_LOG_BUF_SHIFT=16
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EXPERT=y
# CONFIG_UID16 is not set
-# CONFIG_BASE_FULL is not set
+CONFIG_BASE_SMALL=y
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index 729ea8157e2a..025b595dd837 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -9,7 +9,7 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_ELF_CORE is not set
-# CONFIG_BASE_FULL is not set
+CONFIG_BASE_SMALL=y
# CONFIG_SHMEM is not set
# CONFIG_KALLSYMS is not set
CONFIG_PROFILING=y
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index 091e1840933c..56925adfe842 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -139,8 +139,8 @@ CONFIG_DRM_FBDEV_EMULATION=y
CONFIG_DRM_RCAR_DU=y
# CONFIG_DRM_RCAR_USE_MIPI_DSI is not set
CONFIG_DRM_SHMOBILE=y
-CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_DRM_PANEL_EDP=y
+CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_DRM_DISPLAY_CONNECTOR=y
CONFIG_DRM_LVDS_CODEC=y
CONFIG_DRM_SII902X=y
@@ -235,3 +235,4 @@ CONFIG_CMA_SIZE_MBYTES=64
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_FS=y
+CONFIG_ARM_DEBUG_WX=y
diff --git a/arch/arm/configs/stm32_defconfig b/arch/arm/configs/stm32_defconfig
index b9fe3fbed5ae..3baec075d1ef 100644
--- a/arch/arm/configs/stm32_defconfig
+++ b/arch/arm/configs/stm32_defconfig
@@ -6,7 +6,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EXPERT=y
# CONFIG_UID16 is not set
-# CONFIG_BASE_FULL is not set
+CONFIG_BASE_SMALL=y
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index bddc82f78942..a83d29fed175 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -110,6 +110,7 @@ CONFIG_DRM_PANEL_LVDS=y
CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_DRM_PANEL_EDP=y
CONFIG_DRM_SIMPLE_BRIDGE=y
+CONFIG_DRM_DW_HDMI=y
CONFIG_DRM_LIMA=y
CONFIG_FB_SIMPLE=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index aebe2c8f6a68..d33c1e24e00b 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -21,6 +21,7 @@
#include <asm/opcodes-virt.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
+#include <asm/pgtable.h>
#include <asm/thread_info.h>
#include <asm/uaccess-asm.h>
diff --git a/arch/arm/include/asm/fb.h b/arch/arm/include/asm/fb.h
deleted file mode 100644
index ce20a43c3033..000000000000
--- a/arch/arm/include/asm/fb.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_FB_H_
-#define _ASM_FB_H_
-
-#include <asm-generic/fb.h>
-
-#endif /* _ASM_FB_H_ */
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index 724f8dac1e5b..4186fbf7341f 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -118,6 +118,10 @@
# define MULTI_CACHE 1
#endif
+#ifdef CONFIG_CPU_CACHE_NOP
+# define MULTI_CACHE 1
+#endif
+
#if defined(CONFIG_CPU_V7M)
# define MULTI_CACHE 1
#endif
@@ -126,29 +130,15 @@
#error Unknown cache maintenance model
#endif
-#ifndef __ASSEMBLER__
-static inline void nop_flush_icache_all(void) { }
-static inline void nop_flush_kern_cache_all(void) { }
-static inline void nop_flush_kern_cache_louis(void) { }
-static inline void nop_flush_user_cache_all(void) { }
-static inline void nop_flush_user_cache_range(unsigned long a,
- unsigned long b, unsigned int c) { }
-
-static inline void nop_coherent_kern_range(unsigned long a, unsigned long b) { }
-static inline int nop_coherent_user_range(unsigned long a,
- unsigned long b) { return 0; }
-static inline void nop_flush_kern_dcache_area(void *a, size_t s) { }
-
-static inline void nop_dma_flush_range(const void *a, const void *b) { }
-
-static inline void nop_dma_map_area(const void *s, size_t l, int f) { }
-static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
-#endif
-
#ifndef MULTI_CACHE
#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
+/* This function only has a dedicated assembly callback on the v7 cache */
+#ifdef CONFIG_CPU_CACHE_V7
#define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_louis)
+#else
+#define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_all)
+#endif
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
diff --git a/arch/arm/include/asm/hugetlb.h b/arch/arm/include/asm/hugetlb.h
index a3a82b7158d4..b766c4b373f6 100644
--- a/arch/arm/include/asm/hugetlb.h
+++ b/arch/arm/include/asm/hugetlb.h
@@ -15,10 +15,10 @@
#include <asm/hugetlb-3level.h>
#include <asm-generic/hugetlb.h>
-static inline void arch_clear_hugepage_flags(struct page *page)
+static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
- clear_bit(PG_dcache_clean, &page->flags);
+ clear_bit(PG_dcache_clean, &folio->flags);
}
-#define arch_clear_hugepage_flags arch_clear_hugepage_flags
+#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
#endif /* _ASM_ARM_HUGETLB_H */
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index 62358d3ca0a8..e7f9961c53b2 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -84,6 +84,7 @@ static inline void decode_ctrl_reg(u32 reg,
#define ARM_DSCR_MOE(x) ((x >> 2) & 0xf)
#define ARM_ENTRY_BREAKPOINT 0x1
#define ARM_ENTRY_ASYNC_WATCHPOINT 0x2
+#define ARM_ENTRY_CFI_BREAKPOINT 0x3
#define ARM_ENTRY_SYNC_WATCHPOINT 0xa
/* DSCR monitor/halting bits. */
diff --git a/arch/arm/include/asm/mman.h b/arch/arm/include/asm/mman.h
new file mode 100644
index 000000000000..2189e507c8e0
--- /dev/null
+++ b/arch/arm/include/asm/mman.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MMAN_H__
+#define __ASM_MMAN_H__
+
+#include <asm/system_info.h>
+#include <uapi/asm/mman.h>
+
+static inline bool arch_memory_deny_write_exec_supported(void)
+{
+ return cpu_architecture() >= CPU_ARCH_ARMv6;
+}
+#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
+
+#endif /* __ASM_MMAN_H__ */
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index b0a262566eb9..6b5392e20f41 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -213,8 +213,8 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
#define pmd_pfn(pmd) (__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
-#define pmd_leaf(pmd) (pmd_val(pmd) & 2)
-#define pmd_bad(pmd) (pmd_val(pmd) & 2)
+#define pmd_leaf(pmd) (pmd_val(pmd) & PMD_TYPE_SECT)
+#define pmd_bad(pmd) pmd_leaf(pmd)
#define pmd_present(pmd) (pmd_val(pmd))
#define copy_pmd(pmdpd,pmdps) \
@@ -241,7 +241,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
* define empty stubs for use by pin_page_for_write.
*/
#define pmd_hugewillfault(pmd) (0)
-#define pmd_thp_or_huge(pmd) (0)
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index 2f35b4eddaa8..dfab3e982cbf 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -14,6 +14,7 @@
* + Level 1/2 descriptor
* - common
*/
+#define PUD_TABLE_BIT (_AT(pmdval_t, 1) << 1)
#define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0)
#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
#define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0)
@@ -74,6 +75,7 @@
#define PHYS_MASK_SHIFT (40)
#define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1)
+#ifndef CONFIG_CPU_TTBR0_PAN
/*
* TTBR0/TTBR1 split (PAGE_OFFSET):
* 0x40000000: T0SZ = 2, T1SZ = 0 (not used)
@@ -93,5 +95,30 @@
#endif
#define TTBR1_SIZE (((PAGE_OFFSET >> 30) - 1) << 16)
+#else
+/*
+ * With CONFIG_CPU_TTBR0_PAN enabled, TTBR1 is only used during uaccess
+ * disabled regions when TTBR0 is disabled.
+ */
+#define TTBR1_OFFSET 0 /* pointing to swapper_pg_dir */
+#define TTBR1_SIZE 0 /* TTBR1 size controlled via TTBCR.T0SZ */
+#endif
+
+/*
+ * TTBCR register bits.
+ */
+#define TTBCR_EAE (1 << 31)
+#define TTBCR_IMP (1 << 30)
+#define TTBCR_SH1_MASK (3 << 28)
+#define TTBCR_ORGN1_MASK (3 << 26)
+#define TTBCR_IRGN1_MASK (3 << 24)
+#define TTBCR_EPD1 (1 << 23)
+#define TTBCR_A1 (1 << 22)
+#define TTBCR_T1SZ_MASK (7 << 16)
+#define TTBCR_SH0_MASK (3 << 12)
+#define TTBCR_ORGN0_MASK (3 << 10)
+#define TTBCR_IRGN0_MASK (3 << 8)
+#define TTBCR_EPD0 (1 << 7)
+#define TTBCR_T0SZ_MASK (7 << 0)
#endif
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 4b1d9eb3908a..fa5939eb9864 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -112,7 +112,7 @@
#ifndef __ASSEMBLY__
#define pud_none(pud) (!pud_val(pud))
-#define pud_bad(pud) (!(pud_val(pud) & 2))
+#define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT))
#define pud_present(pud) (pud_val(pud))
#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_TABLE)
@@ -137,7 +137,7 @@ static inline pmd_t *pud_pgtable(pud_t pud)
return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
}
-#define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
+#define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT))
#define copy_pmd(pmdpd,pmdps) \
do { \
@@ -190,7 +190,6 @@ static inline pte_t pte_mkspecial(pte_t pte)
#define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY))
#define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
-#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_trans_huge(pmd) (pmd_val(pmd) && !pmd_table(pmd))
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index 280396483f5d..b4986a23d852 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -178,6 +178,18 @@ extern void cpu_resume(void);
})
#endif
+static inline unsigned int cpu_get_ttbcr(void)
+{
+ unsigned int ttbcr;
+ asm("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr));
+ return ttbcr;
+}
+
+static inline void cpu_set_ttbcr(unsigned int ttbcr)
+{
+ asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr) : "memory");
+}
+
#else /*!CONFIG_MMU */
#define cpu_switch_mm(pgd,mm) { }
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 14a38cc67e0b..6eb311fb2da0 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -20,6 +20,7 @@ struct pt_regs {
struct svc_pt_regs {
struct pt_regs regs;
u32 dacr;
+ u32 ttbcr;
};
#define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs)
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index 853c4f81ba4a..ad36b6570067 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -22,9 +22,9 @@
/* Enable topology flag updates */
#define arch_update_cpu_topology topology_update_cpu_topology
-/* Replace task scheduler's default thermal pressure API */
-#define arch_scale_thermal_pressure topology_get_thermal_pressure
-#define arch_update_thermal_pressure topology_update_thermal_pressure
+/* Replace task scheduler's default HW pressure API */
+#define arch_scale_hw_pressure topology_get_hw_pressure
+#define arch_update_hw_pressure topology_update_hw_pressure
#else
diff --git a/arch/arm/include/asm/uaccess-asm.h b/arch/arm/include/asm/uaccess-asm.h
index 65da32e1f1c1..4bccd895d954 100644
--- a/arch/arm/include/asm/uaccess-asm.h
+++ b/arch/arm/include/asm/uaccess-asm.h
@@ -39,8 +39,9 @@
#endif
.endm
+#if defined(CONFIG_CPU_SW_DOMAIN_PAN)
+
.macro uaccess_disable, tmp, isb=1
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Whenever we re-enter userspace, the domains should always be
* set appropriately.
@@ -50,11 +51,9 @@
.if \isb
instr_sync
.endif
-#endif
.endm
.macro uaccess_enable, tmp, isb=1
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Whenever we re-enter userspace, the domains should always be
* set appropriately.
@@ -64,15 +63,61 @@
.if \isb
instr_sync
.endif
-#endif
.endm
+#elif defined(CONFIG_CPU_TTBR0_PAN)
+
+ .macro uaccess_disable, tmp, isb=1
+ /*
+ * Disable TTBR0 page table walks (EDP0 = 1), use the reserved ASID
+ * from TTBR1 (A1 = 1) and enable TTBR1 page table walks for kernel
+ * addresses by reducing TTBR0 range to 32MB (T0SZ = 7).
+ */
+ mrc p15, 0, \tmp, c2, c0, 2 @ read TTBCR
+ orr \tmp, \tmp, #TTBCR_EPD0 | TTBCR_T0SZ_MASK
+ orr \tmp, \tmp, #TTBCR_A1
+ mcr p15, 0, \tmp, c2, c0, 2 @ write TTBCR
+ .if \isb
+ instr_sync
+ .endif
+ .endm
+
+ .macro uaccess_enable, tmp, isb=1
+ /*
+ * Enable TTBR0 page table walks (T0SZ = 0, EDP0 = 0) and ASID from
+ * TTBR0 (A1 = 0).
+ */
+ mrc p15, 0, \tmp, c2, c0, 2 @ read TTBCR
+ bic \tmp, \tmp, #TTBCR_EPD0 | TTBCR_T0SZ_MASK
+ bic \tmp, \tmp, #TTBCR_A1
+ mcr p15, 0, \tmp, c2, c0, 2 @ write TTBCR
+ .if \isb
+ instr_sync
+ .endif
+ .endm
+
+#else
+
+ .macro uaccess_disable, tmp, isb=1
+ .endm
+
+ .macro uaccess_enable, tmp, isb=1
+ .endm
+
+#endif
+
#if defined(CONFIG_CPU_SW_DOMAIN_PAN) || defined(CONFIG_CPU_USE_DOMAINS)
#define DACR(x...) x
#else
#define DACR(x...)
#endif
+#ifdef CONFIG_CPU_TTBR0_PAN
+#define PAN(x...) x
+#else
+#define PAN(x...)
+#endif
+
/*
* Save the address limit on entry to a privileged exception.
*
@@ -86,6 +131,8 @@
.macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable
DACR( mrc p15, 0, \tmp0, c3, c0, 0)
DACR( str \tmp0, [sp, #SVC_DACR])
+ PAN( mrc p15, 0, \tmp0, c2, c0, 2)
+ PAN( str \tmp0, [sp, #SVC_TTBCR])
.if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN)
/* kernel=client, user=no access */
mov \tmp2, #DACR_UACCESS_DISABLE
@@ -104,8 +151,11 @@
.macro uaccess_exit, tsk, tmp0, tmp1
DACR( ldr \tmp0, [sp, #SVC_DACR])
DACR( mcr p15, 0, \tmp0, c3, c0, 0)
+ PAN( ldr \tmp0, [sp, #SVC_TTBCR])
+ PAN( mcr p15, 0, \tmp0, c2, c0, 2)
.endm
#undef DACR
+#undef PAN
#endif /* __ASM_UACCESS_ASM_H__ */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 9556d04387f7..6c9c16d767cf 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -14,6 +14,8 @@
#include <asm/domain.h>
#include <asm/unaligned.h>
#include <asm/unified.h>
+#include <asm/pgtable.h>
+#include <asm/proc-fns.h>
#include <asm/compiler.h>
#include <asm/extable.h>
@@ -24,9 +26,10 @@
* perform such accesses (eg, via list poison values) which could then
* be exploited for priviledge escalation.
*/
+#if defined(CONFIG_CPU_SW_DOMAIN_PAN)
+
static __always_inline unsigned int uaccess_save_and_enable(void)
{
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
unsigned int old_domain = get_domain();
/* Set the current domain access to permit user accesses */
@@ -34,19 +37,49 @@ static __always_inline unsigned int uaccess_save_and_enable(void)
domain_val(DOMAIN_USER, DOMAIN_CLIENT));
return old_domain;
-#else
- return 0;
-#endif
}
static __always_inline void uaccess_restore(unsigned int flags)
{
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/* Restore the user access mask */
set_domain(flags);
-#endif
}
+#elif defined(CONFIG_CPU_TTBR0_PAN)
+
+static __always_inline unsigned int uaccess_save_and_enable(void)
+{
+ unsigned int old_ttbcr = cpu_get_ttbcr();
+
+ /*
+ * Enable TTBR0 page table walks (T0SZ = 0, EDP0 = 0) and ASID from
+ * TTBR0 (A1 = 0).
+ */
+ cpu_set_ttbcr(old_ttbcr & ~(TTBCR_A1 | TTBCR_EPD0 | TTBCR_T0SZ_MASK));
+ isb();
+
+ return old_ttbcr;
+}
+
+static inline void uaccess_restore(unsigned int flags)
+{
+ cpu_set_ttbcr(flags);
+ isb();
+}
+
+#else
+
+static inline unsigned int uaccess_save_and_enable(void)
+{
+ return 0;
+}
+
+static inline void uaccess_restore(unsigned int flags)
+{
+}
+
+#endif
+
/*
* These two are intentionally not defined anywhere - if the kernel
* code generates any references to them, that's a bug.
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 4915662842ff..4853875740d0 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -85,6 +85,7 @@ int main(void)
DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0));
DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs));
DEFINE(SVC_DACR, offsetof(struct svc_pt_regs, dacr));
+ DEFINE(SVC_TTBCR, offsetof(struct svc_pt_regs, ttbcr));
DEFINE(SVC_REGS_SIZE, sizeof(struct svc_pt_regs));
BLANK();
DEFINE(SIGFRAME_RC3_OFFSET, offsetof(struct sigframe, retcode[3]));
diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S
index 3e7bcaca5e07..bc598e3d8dd2 100644
--- a/arch/arm/kernel/entry-ftrace.S
+++ b/arch/arm/kernel/entry-ftrace.S
@@ -271,6 +271,10 @@ ENTRY(ftrace_stub)
ret lr
ENDPROC(ftrace_stub)
+ENTRY(ftrace_stub_graph)
+ ret lr
+ENDPROC(ftrace_stub_graph)
+
#ifdef CONFIG_DYNAMIC_FTRACE
__INIT
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index dc0fb7a81371..a12efd0f43e8 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -17,6 +17,7 @@
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
#include <linux/smp.h>
+#include <linux/cfi.h>
#include <linux/cpu_pm.h>
#include <linux/coresight.h>
@@ -626,7 +627,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
hw->address &= ~alignment_mask;
hw->ctrl.len <<= offset;
- if (uses_default_overflow_handler(bp)) {
+ if (is_default_overflow_handler(bp)) {
/*
* Mismatch breakpoints are required for single-stepping
* breakpoints.
@@ -798,7 +799,7 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
* Otherwise, insert a temporary mismatch breakpoint so that
* we can single-step over the watchpoint trigger.
*/
- if (!uses_default_overflow_handler(wp))
+ if (!is_default_overflow_handler(wp))
continue;
step:
enable_single_step(wp, instruction_pointer(regs));
@@ -811,7 +812,7 @@ step:
info->trigger = addr;
pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
perf_bp_event(wp, regs);
- if (uses_default_overflow_handler(wp))
+ if (is_default_overflow_handler(wp))
enable_single_step(wp, instruction_pointer(regs));
}
@@ -886,7 +887,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
info->trigger = addr;
pr_debug("breakpoint fired: address = 0x%x\n", addr);
perf_bp_event(bp, regs);
- if (uses_default_overflow_handler(bp))
+ if (is_default_overflow_handler(bp))
enable_single_step(bp, addr);
goto unlock;
}
@@ -903,6 +904,37 @@ unlock:
watchpoint_single_step_handler(addr);
}
+#ifdef CONFIG_CFI_CLANG
+static void hw_breakpoint_cfi_handler(struct pt_regs *regs)
+{
+ /*
+ * TODO: implementing target and type to pass to CFI using the more
+ * elaborate report_cfi_failure() requires compiler work. To be able
+ * to properly extract target information the compiler needs to
+ * emit a stable instructions sequence for the CFI checks so we can
+ * decode the instructions preceding the trap and figure out which
+ * registers were used.
+ */
+
+ switch (report_cfi_failure_noaddr(regs, instruction_pointer(regs))) {
+ case BUG_TRAP_TYPE_BUG:
+ die("Oops - CFI", regs, 0);
+ break;
+ case BUG_TRAP_TYPE_WARN:
+ /* Skip the breaking instruction */
+ instruction_pointer(regs) += 4;
+ break;
+ default:
+ die("Unknown CFI error", regs, 0);
+ break;
+ }
+}
+#else
+static void hw_breakpoint_cfi_handler(struct pt_regs *regs)
+{
+}
+#endif
+
/*
* Called from either the Data Abort Handler [watchpoint] or the
* Prefetch Abort Handler [breakpoint] with interrupts disabled.
@@ -932,6 +964,9 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
case ARM_ENTRY_SYNC_WATCHPOINT:
watchpoint_handler(addr, fsr, regs);
break;
+ case ARM_ENTRY_CFI_BREAKPOINT:
+ hw_breakpoint_cfi_handler(regs);
+ break;
default:
ret = 1; /* Unhandled fault. */
}
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index fe28fc1f759d..dab42d066d06 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -32,6 +32,7 @@
#include <linux/kallsyms.h>
#include <linux/proc_fs.h>
#include <linux/export.h>
+#include <linux/vmalloc.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/hardware/cache-uniphier.h>
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index e74d84f58b77..677f218f7e84 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -12,48 +12,14 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/elf.h>
-#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/string.h>
-#include <linux/gfp.h>
#include <asm/sections.h>
#include <asm/smp_plat.h>
#include <asm/unwind.h>
#include <asm/opcodes.h>
-#ifdef CONFIG_XIP_KERNEL
-/*
- * The XIP kernel text is mapped in the module area for modules and
- * some other stuff to work without any indirect relocations.
- * MODULES_VADDR is redefined here and not in asm/memory.h to avoid
- * recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off.
- */
-#undef MODULES_VADDR
-#define MODULES_VADDR (((unsigned long)_exiprom + ~PMD_MASK) & PMD_MASK)
-#endif
-
-#ifdef CONFIG_MMU
-void *module_alloc(unsigned long size)
-{
- gfp_t gfp_mask = GFP_KERNEL;
- void *p;
-
- /* Silence the initial allocation */
- if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS))
- gfp_mask |= __GFP_NOWARN;
-
- p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- gfp_mask, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
- __builtin_return_address(0));
- if (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || p)
- return p;
- return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
- GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
- __builtin_return_address(0));
-}
-#endif
-
bool module_init_section(const char *name)
{
return strstarts(name, ".init") ||
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index a86a1d4f3461..93afd1005b43 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -127,6 +127,10 @@ cpu_resume_after_mmu:
instr_sync
#endif
bl cpu_init @ restore the und/abt/irq banked regs
+#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
+ mov r0, sp
+ bl kasan_unpoison_task_stack_below
+#endif
mov r0, #0 @ return zero on success
ldmfd sp!, {r4 - r11, pc}
ENDPROC(cpu_resume_after_mmu)
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index c3ec3861dd07..58a6441b58c4 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -12,6 +12,7 @@
#include <asm/smp_plat.h>
#include <asm/suspend.h>
#include <asm/tlbflush.h>
+#include <asm/uaccess.h>
extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid);
extern void cpu_resume_mmu(void);
@@ -27,6 +28,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
return -EINVAL;
/*
+ * Needed for the MMU disabling/enabing code to be able to run from
+ * TTBR0 addresses.
+ */
+ if (IS_ENABLED(CONFIG_CPU_TTBR0_PAN))
+ uaccess_save_and_enable();
+
+ /*
* Function graph tracer state gets incosistent when the kernel
* calls functions that never return (aka suspend finishers) hence
* disable graph tracing during their execution.
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index ef0058de432b..2336ee2aa44a 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -42,7 +42,7 @@
* can take this difference into account during load balance. A per cpu
* structure is preferred because each CPU updates its own cpu_capacity field
* during the load balance except for idle cores. One idle core is selected
- * to run the rebalance_domains for all idle cores and the cpu_capacity can be
+ * to run the sched_balance_domains for all idle cores and the cpu_capacity can be
* updated during this sequence.
*/
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 72c82a4d63ac..480e307501bb 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -26,6 +26,7 @@
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <linux/irq.h>
+#include <linux/vmalloc.h>
#include <linux/atomic.h>
#include <asm/cacheflush.h>
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 6928781e6bee..c289bde04743 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -13,7 +13,8 @@
.text
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+#if defined(CONFIG_CPU_SW_DOMAIN_PAN)
+
.macro save_regs
mrc p15, 0, ip, c3, c0, 0
stmfd sp!, {r1, r2, r4 - r8, ip, lr}
@@ -25,7 +26,23 @@
mcr p15, 0, ip, c3, c0, 0
ret lr
.endm
+
+#elif defined(CONFIG_CPU_TTBR0_PAN)
+
+ .macro save_regs
+ mrc p15, 0, ip, c2, c0, 2 @ read TTBCR
+ stmfd sp!, {r1, r2, r4 - r8, ip, lr}
+ uaccess_enable ip
+ .endm
+
+ .macro load_regs
+ ldmfd sp!, {r1, r2, r4 - r8, ip, lr}
+ mcr p15, 0, ip, c2, c0, 2 @ restore TTBCR
+ ret lr
+ .endm
+
#else
+
.macro save_regs
stmfd sp!, {r1, r2, r4 - r8, lr}
.endm
@@ -33,6 +50,7 @@
.macro load_regs
ldmfd sp!, {r1, r2, r4 - r8, pc}
.endm
+
#endif
.macro load1b, reg1
diff --git a/arch/arm/lib/delay-loop.S b/arch/arm/lib/delay-loop.S
index 3ac05177d097..33b08ca1c242 100644
--- a/arch/arm/lib/delay-loop.S
+++ b/arch/arm/lib/delay-loop.S
@@ -5,6 +5,7 @@
* Copyright (C) 1995, 1996 Russell King
*/
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/delay.h>
@@ -24,21 +25,26 @@
* HZ <= 1000
*/
-ENTRY(__loop_udelay)
+SYM_TYPED_FUNC_START(__loop_udelay)
ldr r2, .LC1
mul r0, r2, r0 @ r0 = delay_us * UDELAY_MULT
-ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0xfffffaf0
+ b __loop_const_udelay
+SYM_FUNC_END(__loop_udelay)
+
+SYM_TYPED_FUNC_START(__loop_const_udelay) @ 0 <= r0 <= 0xfffffaf0
ldr r2, .LC0
ldr r2, [r2]
umull r1, r0, r2, r0 @ r0-r1 = r0 * loops_per_jiffy
adds r1, r1, #0xffffffff @ rounding up ...
adcs r0, r0, r0 @ and right shift by 31
reteq lr
+ b __loop_delay
+SYM_FUNC_END(__loop_const_udelay)
.align 3
@ Delay routine
-ENTRY(__loop_delay)
+SYM_TYPED_FUNC_START(__loop_delay)
subs r0, r0, #1
#if 0
retls lr
@@ -58,6 +64,4 @@ ENTRY(__loop_delay)
#endif
bhi __loop_delay
ret lr
-ENDPROC(__loop_udelay)
-ENDPROC(__loop_const_udelay)
-ENDPROC(__loop_delay)
+SYM_FUNC_END(__loop_delay)
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 2f6163f05e93..c0ac7796d775 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -56,10 +56,10 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
* to see that it's still huge and whether or not we will
* need to fault on write.
*/
- if (unlikely(pmd_thp_or_huge(*pmd))) {
+ if (unlikely(pmd_leaf(*pmd))) {
ptl = &current->mm->page_table_lock;
spin_lock(ptl);
- if (unlikely(!pmd_thp_or_huge(*pmd)
+ if (unlikely(!pmd_leaf(*pmd)
|| pmd_hugewillfault(*pmd))) {
spin_unlock(ptl);
return 0;
diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c
index 25893d109190..b68cb86dbe4c 100644
--- a/arch/arm/mach-imx/mmdc.c
+++ b/arch/arm/mach-imx/mmdc.c
@@ -437,6 +437,7 @@ static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc,
{
*pmu_mmdc = (struct mmdc_pmu) {
.pmu = (struct pmu) {
+ .parent = dev,
.task_ctx_nr = perf_invalid_context,
.attr_groups = attr_groups,
.event_init = mmdc_pmu_event_init,
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 31755a378c73..ff2a4a4d8220 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -79,10 +79,8 @@ static struct musb_hdrc_platform_data tusb_data = {
static struct gpiod_lookup_table tusb_gpio_table = {
.dev_id = "musb-tusb",
.table = {
- GPIO_LOOKUP("gpio-0-15", 0, "enable",
- GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("gpio-48-63", 10, "int",
- GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio-0-31", 0, "enable", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio-32-63", 26, "int", GPIO_ACTIVE_HIGH),
{ }
},
};
@@ -140,12 +138,11 @@ static int slot1_cover_open;
static int slot2_cover_open;
static struct device *mmc_device;
-static struct gpiod_lookup_table nokia8xx_mmc_gpio_table = {
+static struct gpiod_lookup_table nokia800_mmc_gpio_table = {
.dev_id = "mmci-omap.0",
.table = {
/* Slot switch, GPIO 96 */
- GPIO_LOOKUP("gpio-80-111", 16,
- "switch", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio-96-127", 0, "switch", GPIO_ACTIVE_HIGH),
{ }
},
};
@@ -153,12 +150,12 @@ static struct gpiod_lookup_table nokia8xx_mmc_gpio_table = {
static struct gpiod_lookup_table nokia810_mmc_gpio_table = {
.dev_id = "mmci-omap.0",
.table = {
+ /* Slot switch, GPIO 96 */
+ GPIO_LOOKUP("gpio-96-127", 0, "switch", GPIO_ACTIVE_HIGH),
/* Slot index 1, VSD power, GPIO 23 */
- GPIO_LOOKUP_IDX("gpio-16-31", 7,
- "vsd", 1, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("gpio-0-31", 23, "vsd", 1, GPIO_ACTIVE_HIGH),
/* Slot index 1, VIO power, GPIO 9 */
- GPIO_LOOKUP_IDX("gpio-0-15", 9,
- "vio", 1, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("gpio-0-31", 9, "vio", 1, GPIO_ACTIVE_HIGH),
{ }
},
};
@@ -415,8 +412,6 @@ static struct omap_mmc_platform_data *mmc_data[OMAP24XX_NR_MMC];
static void __init n8x0_mmc_init(void)
{
- gpiod_add_lookup_table(&nokia8xx_mmc_gpio_table);
-
if (board_is_n810()) {
mmc1_data.slots[0].name = "external";
@@ -429,6 +424,8 @@ static void __init n8x0_mmc_init(void)
mmc1_data.slots[1].name = "internal";
mmc1_data.slots[1].ban_openended = 1;
gpiod_add_lookup_table(&nokia810_mmc_gpio_table);
+ } else {
+ gpiod_add_lookup_table(&nokia800_mmc_gpio_table);
}
mmc1_data.nr_slots = 2;
diff --git a/arch/arm/mach-orion5x/board-d2net.c b/arch/arm/mach-orion5x/board-d2net.c
index 0297e302d7bc..09bf366d05ff 100644
--- a/arch/arm/mach-orion5x/board-d2net.c
+++ b/arch/arm/mach-orion5x/board-d2net.c
@@ -14,6 +14,7 @@
#include <linux/irq.h>
#include <linux/leds.h>
#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/pci.h>
@@ -55,12 +56,9 @@ static struct gpio_led d2net_leds[] = {
{
.name = "d2net:blue:sata",
.default_trigger = "default-on",
- .gpio = D2NET_GPIO_BLUE_LED_OFF,
- .active_low = 1,
},
{
.name = "d2net:red:fail",
- .gpio = D2NET_GPIO_RED_LED,
},
};
@@ -77,6 +75,17 @@ static struct platform_device d2net_gpio_leds = {
},
};
+static struct gpiod_lookup_table d2net_leds_gpio_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("orion_gpio0", D2NET_GPIO_BLUE_LED_OFF, NULL,
+ 0, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("orion_gpio0", D2NET_GPIO_RED_LED, NULL,
+ 1, GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
static void __init d2net_gpio_leds_init(void)
{
int err;
@@ -91,6 +100,7 @@ static void __init d2net_gpio_leds_init(void)
if (err)
pr_err("d2net: failed to configure blue LED blink GPIO\n");
+ gpiod_add_lookup_table(&d2net_leds_gpio_table);
platform_device_register(&d2net_gpio_leds);
}
diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c
index d69259b6b60d..062109efa0ec 100644
--- a/arch/arm/mach-orion5x/dns323-setup.c
+++ b/arch/arm/mach-orion5x/dns323-setup.c
@@ -14,6 +14,7 @@
*
*/
#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -254,37 +255,64 @@ error_fail:
static struct gpio_led dns323ab_leds[] = {
{
.name = "power:blue",
- .gpio = DNS323_GPIO_LED_POWER2,
.default_trigger = "default-on",
}, {
.name = "right:amber",
- .gpio = DNS323_GPIO_LED_RIGHT_AMBER,
- .active_low = 1,
}, {
.name = "left:amber",
- .gpio = DNS323_GPIO_LED_LEFT_AMBER,
- .active_low = 1,
},
};
+static struct gpiod_lookup_table dns323a1_leds_gpio_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("orion_gpio0", DNS323_GPIO_LED_POWER2, NULL,
+ 0, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("orion_gpio0", DNS323_GPIO_LED_RIGHT_AMBER, NULL,
+ 1, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("orion_gpio0", DNS323_GPIO_LED_LEFT_AMBER, NULL,
+ 2, GPIO_ACTIVE_LOW),
+ { },
+ },
+};
+
+/* B1 is the same but power LED is active high */
+static struct gpiod_lookup_table dns323b1_leds_gpio_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("orion_gpio0", DNS323_GPIO_LED_POWER2, NULL,
+ 0, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("orion_gpio0", DNS323_GPIO_LED_RIGHT_AMBER, NULL,
+ 1, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("orion_gpio0", DNS323_GPIO_LED_LEFT_AMBER, NULL,
+ 2, GPIO_ACTIVE_LOW),
+ { },
+ },
+};
static struct gpio_led dns323c_leds[] = {
{
.name = "power:blue",
- .gpio = DNS323C_GPIO_LED_POWER,
.default_trigger = "timer",
- .active_low = 1,
}, {
.name = "right:amber",
- .gpio = DNS323C_GPIO_LED_RIGHT_AMBER,
- .active_low = 1,
}, {
.name = "left:amber",
- .gpio = DNS323C_GPIO_LED_LEFT_AMBER,
- .active_low = 1,
},
};
+static struct gpiod_lookup_table dns323c_leds_gpio_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("orion_gpio0", DNS323C_GPIO_LED_POWER, NULL,
+ 0, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("orion_gpio0", DNS323C_GPIO_LED_RIGHT_AMBER, NULL,
+ 1, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("orion_gpio0", DNS323C_GPIO_LED_LEFT_AMBER, NULL,
+ 2, GPIO_ACTIVE_LOW),
+ { },
+ },
+};
static struct gpio_led_platform_data dns323ab_led_data = {
.num_leds = ARRAY_SIZE(dns323ab_leds),
@@ -621,16 +649,21 @@ static void __init dns323_init(void)
/* The 5181 power LED is active low and requires
* DNS323_GPIO_LED_POWER1 to also be low.
*/
- dns323ab_leds[0].active_low = 1;
- gpio_request(DNS323_GPIO_LED_POWER1, "Power Led Enable");
- gpio_direction_output(DNS323_GPIO_LED_POWER1, 0);
- fallthrough;
+ gpiod_add_lookup_table(&dns323a1_leds_gpio_table);
+ gpio_request(DNS323_GPIO_LED_POWER1, "Power Led Enable");
+ gpio_direction_output(DNS323_GPIO_LED_POWER1, 0);
+ i2c_register_board_info(0, dns323ab_i2c_devices,
+ ARRAY_SIZE(dns323ab_i2c_devices));
+
+ break;
case DNS323_REV_B1:
+ gpiod_add_lookup_table(&dns323b1_leds_gpio_table);
i2c_register_board_info(0, dns323ab_i2c_devices,
ARRAY_SIZE(dns323ab_i2c_devices));
break;
case DNS323_REV_C1:
/* Hookup LEDs & Buttons */
+ gpiod_add_lookup_table(&dns323c_leds_gpio_table);
dns323_gpio_leds.dev.platform_data = &dns323c_led_data;
dns323_button_device.dev.platform_data = &dns323c_button_data;
diff --git a/arch/arm/mach-orion5x/mv2120-setup.c b/arch/arm/mach-orion5x/mv2120-setup.c
index 2bf8ec75e908..b7327a612835 100644
--- a/arch/arm/mach-orion5x/mv2120-setup.c
+++ b/arch/arm/mach-orion5x/mv2120-setup.c
@@ -8,6 +8,7 @@
* License, or (at your option) any later version.
*/
#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
@@ -139,34 +140,45 @@ static struct i2c_board_info __initdata mv2120_i2c_rtc = {
static struct gpio_led mv2120_led_pins[] = {
{
.name = "mv2120:blue:health",
- .gpio = 0,
},
{
.name = "mv2120:red:health",
- .gpio = 1,
},
{
.name = "mv2120:led:bright",
- .gpio = 4,
.default_trigger = "default-on",
},
{
.name = "mv2120:led:dimmed",
- .gpio = 5,
},
{
.name = "mv2120:red:sata0",
- .gpio = 8,
- .active_low = 1,
},
{
.name = "mv2120:red:sata1",
- .gpio = 9,
- .active_low = 1,
},
};
+static struct gpiod_lookup_table mv2120_leds_gpio_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("orion_gpio0", 0, NULL,
+ 0, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("orion_gpio0", 1, NULL,
+ 1, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("orion_gpio0", 4, NULL,
+ 2, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("orion_gpio0", 5, NULL,
+ 3, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("orion_gpio0", 8, NULL,
+ 4, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("orion_gpio0", 9, NULL,
+ 5, GPIO_ACTIVE_LOW),
+ { },
+ },
+};
+
static struct gpio_led_platform_data mv2120_led_data = {
.leds = mv2120_led_pins,
.num_leds = ARRAY_SIZE(mv2120_led_pins),
@@ -219,6 +231,7 @@ static void __init mv2120_init(void)
gpio_free(MV2120_GPIO_RTC_IRQ);
}
i2c_register_board_info(0, &mv2120_i2c_rtc, 1);
+ gpiod_add_lookup_table(&mv2120_leds_gpio_table);
platform_device_register(&mv2120_leds);
/* register mv2120 specific power-off method */
diff --git a/arch/arm/mach-orion5x/net2big-setup.c b/arch/arm/mach-orion5x/net2big-setup.c
index 695cc683cd83..6ad9740b426b 100644
--- a/arch/arm/mach-orion5x/net2big-setup.c
+++ b/arch/arm/mach-orion5x/net2big-setup.c
@@ -18,6 +18,7 @@
#include <linux/i2c.h>
#include <linux/ata_platform.h>
#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/delay.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -214,19 +215,30 @@ err_free_1:
static struct gpio_led net2big_leds[] = {
{
.name = "net2big:red:power",
- .gpio = NET2BIG_GPIO_PWR_RED_LED,
},
{
.name = "net2big:blue:power",
- .gpio = NET2BIG_GPIO_PWR_BLUE_LED,
},
{
.name = "net2big:red:sata0",
- .gpio = NET2BIG_GPIO_SATA0_RED_LED,
},
{
.name = "net2big:red:sata1",
- .gpio = NET2BIG_GPIO_SATA1_RED_LED,
+ },
+};
+
+static struct gpiod_lookup_table net2big_leds_gpio_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("orion_gpio0", NET2BIG_GPIO_PWR_RED_LED, NULL,
+ 0, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("orion_gpio0", NET2BIG_GPIO_PWR_BLUE_LED, NULL,
+ 1, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("orion_gpio0", NET2BIG_GPIO_SATA0_RED_LED, NULL,
+ 2, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("orion_gpio0", NET2BIG_GPIO_SATA1_RED_LED, NULL,
+ 3, GPIO_ACTIVE_HIGH),
+ { },
},
};
@@ -282,6 +294,7 @@ static void __init net2big_gpio_leds_init(void)
if (err)
pr_err("net2big: failed to setup SATA1 blue LED GPIO\n");
+ gpiod_add_lookup_table(&net2big_leds_gpio_table);
platform_device_register(&net2big_gpio_leds);
}
diff --git a/arch/arm/mach-orion5x/ts409-setup.c b/arch/arm/mach-orion5x/ts409-setup.c
index 6f60dc1dfa22..8131982c10d9 100644
--- a/arch/arm/mach-orion5x/ts409-setup.c
+++ b/arch/arm/mach-orion5x/ts409-setup.c
@@ -8,6 +8,7 @@
* Copyright (C) 2008 Martin Michlmayr <tbm@cyrius.com>
*/
#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
@@ -168,20 +169,27 @@ static struct i2c_board_info __initdata qnap_ts409_i2c_rtc = {
static struct gpio_led ts409_led_pins[] = {
{
.name = "ts409:red:sata1",
- .gpio = 4,
- .active_low = 1,
}, {
.name = "ts409:red:sata2",
- .gpio = 5,
- .active_low = 1,
}, {
.name = "ts409:red:sata3",
- .gpio = 6,
- .active_low = 1,
}, {
.name = "ts409:red:sata4",
- .gpio = 7,
- .active_low = 1,
+ },
+};
+
+static struct gpiod_lookup_table ts409_leds_gpio_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("orion_gpio0", 4, NULL,
+ 0, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("orion_gpio0", 5, NULL,
+ 1, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("orion_gpio0", 6, NULL,
+ 2, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("orion_gpio0", 7, NULL,
+ 3, GPIO_ACTIVE_LOW),
+ { },
},
};
@@ -300,6 +308,7 @@ static void __init qnap_ts409_init(void)
if (qnap_ts409_i2c_rtc.irq == 0)
pr_warn("qnap_ts409_init: failed to get RTC IRQ\n");
i2c_register_board_info(0, &qnap_ts409_i2c_rtc, 1);
+ gpiod_add_lookup_table(&ts409_leds_gpio_table);
platform_device_register(&ts409_leds);
/* register tsx09 specific power-off method */
diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c
index 661b3fc43275..1e4cd502340e 100644
--- a/arch/arm/mach-pxa/devices.c
+++ b/arch/arm/mach-pxa/devices.c
@@ -7,7 +7,6 @@
#include <linux/clk-provider.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/spi/pxa2xx_spi.h>
#include <linux/platform_data/i2c-pxa.h>
#include <linux/soc/pxa/cpu.h>
@@ -665,23 +664,6 @@ struct platform_device pxa27x_device_gpio = {
.resource = pxa_resource_gpio,
};
-/* pxa2xx-spi platform-device ID equals respective SSP platform-device ID + 1.
- * See comment in arch/arm/mach-pxa/ssp.c::ssp_probe() */
-void __init pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_controller *info)
-{
- struct platform_device *pd;
-
- pd = platform_device_alloc("pxa2xx-spi", id);
- if (pd == NULL) {
- printk(KERN_ERR "pxa2xx-spi: failed to allocate device id %d\n",
- id);
- return;
- }
-
- pd->dev.platform_data = info;
- platform_device_add(pd);
-}
-
static struct resource pxa_dma_resource[] = {
[0] = {
.start = 0x40000000,
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index cc691b199429..3c5f5a3cb480 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -18,10 +18,10 @@
#include <linux/i2c.h>
#include <linux/platform_data/i2c-pxa.h>
#include <linux/platform_data/pca953x.h>
+#include <linux/property.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
#include <linux/spi/corgi_lcd.h>
-#include <linux/spi/pxa2xx_spi.h>
#include <linux/mtd/sharpsl.h>
#include <linux/mtd/physmap.h>
#include <linux/input-event-codes.h>
@@ -569,10 +569,6 @@ static struct spi_board_info spitz_spi_devices[] = {
},
};
-static struct pxa2xx_spi_controller spitz_spi_info = {
- .num_chipselect = 3,
-};
-
static struct gpiod_lookup_table spitz_spi_gpio_table = {
.dev_id = "spi2",
.table = {
@@ -583,8 +579,21 @@ static struct gpiod_lookup_table spitz_spi_gpio_table = {
},
};
+static const struct property_entry spitz_spi_properties[] = {
+ PROPERTY_ENTRY_U32("num-cs", 3),
+ { }
+};
+
+static const struct software_node spitz_spi_node = {
+ .properties = spitz_spi_properties,
+};
+
static void __init spitz_spi_init(void)
{
+ struct platform_device *pd;
+ int id = 2;
+ int err;
+
if (machine_is_akita())
gpiod_add_lookup_table(&akita_lcdcon_gpio_table);
else
@@ -592,7 +601,21 @@ static void __init spitz_spi_init(void)
gpiod_add_lookup_table(&spitz_ads7846_gpio_table);
gpiod_add_lookup_table(&spitz_spi_gpio_table);
- pxa2xx_set_spi_info(2, &spitz_spi_info);
+
+ /* pxa2xx-spi platform-device ID equals respective SSP platform-device ID + 1 */
+ pd = platform_device_alloc("pxa2xx-spi", id);
+ if (pd == NULL) {
+ pr_err("pxa2xx-spi: failed to allocate device id %d\n", id);
+ } else {
+ err = device_add_software_node(&pd->dev, &spitz_spi_node);
+ if (err) {
+ platform_device_put(pd);
+ pr_err("pxa2xx-spi: failed to add software node\n");
+ } else {
+ platform_device_add(pd);
+ }
+ }
+
spi_register_board_info(ARRAY_AND_SIZE(spitz_spi_devices));
}
#else
diff --git a/arch/arm/mach-pxa/spitz_pm.c b/arch/arm/mach-pxa/spitz_pm.c
index 8bc4ea51a0c1..03b4b347f11a 100644
--- a/arch/arm/mach-pxa/spitz_pm.c
+++ b/arch/arm/mach-pxa/spitz_pm.c
@@ -35,18 +35,20 @@
static int spitz_last_ac_status;
-static struct gpio spitz_charger_gpios[] = {
- { SPITZ_GPIO_KEY_INT, GPIOF_IN, "Keyboard Interrupt" },
- { SPITZ_GPIO_SYNC, GPIOF_IN, "Sync" },
- { SPITZ_GPIO_AC_IN, GPIOF_IN, "Charger Detection" },
- { SPITZ_GPIO_ADC_TEMP_ON, GPIOF_OUT_INIT_LOW, "ADC Temp On" },
- { SPITZ_GPIO_JK_B, GPIOF_OUT_INIT_LOW, "JK B" },
- { SPITZ_GPIO_CHRG_ON, GPIOF_OUT_INIT_LOW, "Charger On" },
-};
-
static void spitz_charger_init(void)
{
- gpio_request_array(ARRAY_AND_SIZE(spitz_charger_gpios));
+ gpio_request(SPITZ_GPIO_KEY_INT, "Keyboard Interrupt");
+ gpio_direction_input(SPITZ_GPIO_KEY_INT);
+ gpio_request(SPITZ_GPIO_SYNC, "Sync");
+ gpio_direction_input(SPITZ_GPIO_SYNC);
+ gpio_request(SPITZ_GPIO_AC_IN, "Charger Detection");
+ gpio_direction_input(SPITZ_GPIO_AC_IN);
+ gpio_request(SPITZ_GPIO_ADC_TEMP_ON, "ADC Temp On");
+ gpio_direction_output(SPITZ_GPIO_ADC_TEMP_ON, 0);
+ gpio_request(SPITZ_GPIO_JK_B, "JK B");
+ gpio_direction_output(SPITZ_GPIO_JK_B, 0);
+ gpio_request(SPITZ_GPIO_CHRG_ON, "Charger On");
+ gpio_direction_output(SPITZ_GPIO_CHRG_ON, 0);
}
static void spitz_measure_temp(int on)
diff --git a/arch/arm/mach-s3c/Makefile b/arch/arm/mach-s3c/Makefile
index 713827bef831..988c49672715 100644
--- a/arch/arm/mach-s3c/Makefile
+++ b/arch/arm/mach-s3c/Makefile
@@ -2,7 +2,7 @@
#
# Copyright 2009 Simtec Electronics
-include $(srctree)/$(src)/Makefile.s3c64xx
+include $(src)/Makefile.s3c64xx
# Objects we always build independent of SoC choice
diff --git a/arch/arm/mach-sa1100/h3600.c b/arch/arm/mach-sa1100/h3600.c
index 5e25dfa752e9..1cfc0b1fa41c 100644
--- a/arch/arm/mach-sa1100/h3600.c
+++ b/arch/arm/mach-sa1100/h3600.c
@@ -20,16 +20,6 @@
#include "generic.h"
-/*
- * helper for sa1100fb
- */
-static struct gpio h3600_lcd_gpio[] = {
- { H3XXX_EGPIO_LCD_ON, GPIOF_OUT_INIT_LOW, "LCD power" },
- { H3600_EGPIO_LCD_PCI, GPIOF_OUT_INIT_LOW, "LCD control" },
- { H3600_EGPIO_LCD_5V_ON, GPIOF_OUT_INIT_LOW, "LCD 5v" },
- { H3600_EGPIO_LVDD_ON, GPIOF_OUT_INIT_LOW, "LCD 9v/-6.5v" },
-};
-
static bool h3600_lcd_request(void)
{
static bool h3600_lcd_ok;
@@ -38,7 +28,42 @@ static bool h3600_lcd_request(void)
if (h3600_lcd_ok)
return true;
- rc = gpio_request_array(h3600_lcd_gpio, ARRAY_SIZE(h3600_lcd_gpio));
+ rc = gpio_request(H3XXX_EGPIO_LCD_ON, "LCD power");
+ if (rc)
+ goto out;
+ rc = gpio_direction_output(H3XXX_EGPIO_LCD_ON, 0);
+ if (rc)
+ goto out_free_on;
+ rc = gpio_request(H3600_EGPIO_LCD_PCI, "LCD control");
+ if (rc)
+ goto out_free_on;
+ rc = gpio_direction_output(H3600_EGPIO_LCD_PCI, 0);
+ if (rc)
+ goto out_free_pci;
+ rc = gpio_request(H3600_EGPIO_LCD_5V_ON, "LCD 5v");
+ if (rc)
+ goto out_free_pci;
+ rc = gpio_direction_output(H3600_EGPIO_LCD_5V_ON, 0);
+ if (rc)
+ goto out_free_5v_on;
+ rc = gpio_request(H3600_EGPIO_LVDD_ON, "LCD 9v/-6.5v");
+ if (rc)
+ goto out_free_5v_on;
+ rc = gpio_direction_output(H3600_EGPIO_LVDD_ON, 0);
+ if (rc)
+ goto out_free_lvdd_on;
+
+ goto out;
+
+out_free_lvdd_on:
+ gpio_free(H3600_EGPIO_LVDD_ON);
+out_free_5v_on:
+ gpio_free(H3600_EGPIO_LCD_5V_ON);
+out_free_pci:
+ gpio_free(H3600_EGPIO_LCD_PCI);
+out_free_on:
+ gpio_free(H3XXX_EGPIO_LCD_ON);
+out:
if (rc)
pr_err("%s: can't request GPIOs\n", __func__);
else
diff --git a/arch/arm/mach-stm32/Kconfig b/arch/arm/mach-stm32/Kconfig
index 98145031586f..ae21a9f78f9c 100644
--- a/arch/arm/mach-stm32/Kconfig
+++ b/arch/arm/mach-stm32/Kconfig
@@ -12,6 +12,7 @@ menuconfig ARCH_STM32
select PINCTRL
select RESET_CONTROLLER
select STM32_EXTI
+ select STM32_FIREWALL
help
Support for STMicroelectronics STM32 processors.
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 71b858c9b10c..a195cd1d3e6d 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -21,7 +21,6 @@ KASAN_SANITIZE_physaddr.o := n
obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_ARM_PV_FIXUP) += pv-fixup-asm.o
obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o
@@ -45,6 +44,7 @@ obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o
obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o
obj-$(CONFIG_CPU_CACHE_NOP) += cache-nop.o
obj-$(CONFIG_CPU_CACHE_V7M) += cache-v7m.o
+obj-y += cache.o
obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o
obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o
@@ -62,6 +62,7 @@ obj-$(CONFIG_CPU_TLB_FEROCEON) += tlb-v4wbi.o # reuse v4wbi TLB functions
obj-$(CONFIG_CPU_TLB_V6) += tlb-v6.o
obj-$(CONFIG_CPU_TLB_V7) += tlb-v7.o
obj-$(CONFIG_CPU_TLB_FA) += tlb-fa.o
+obj-y += tlb.o
obj-$(CONFIG_CPU_ARM7TDMI) += proc-arm7tdmi.o
obj-$(CONFIG_CPU_ARM720T) += proc-arm720.o
@@ -88,6 +89,7 @@ obj-$(CONFIG_CPU_V6) += proc-v6.o
obj-$(CONFIG_CPU_V6K) += proc-v6.o
obj-$(CONFIG_CPU_V7) += proc-v7.o proc-v7-bugs.o
obj-$(CONFIG_CPU_V7M) += proc-v7m.o
+obj-$(CONFIG_CFI_CLANG) += proc.o
obj-$(CONFIG_OUTER_CACHE) += l2c-common.o
obj-$(CONFIG_CACHE_B15_RAC) += cache-b15-rac.o
diff --git a/arch/arm/mm/cache-b15-rac.c b/arch/arm/mm/cache-b15-rac.c
index 9c1172f26885..6f63b90f9e1a 100644
--- a/arch/arm/mm/cache-b15-rac.c
+++ b/arch/arm/mm/cache-b15-rac.c
@@ -5,6 +5,7 @@
* Copyright (C) 2015-2016 Broadcom
*/
+#include <linux/cfi_types.h>
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/io.h>
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index 71c64e92dead..4a3668b52a2d 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -12,6 +12,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/page.h>
@@ -39,11 +40,11 @@
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(fa_flush_icache_all)
+SYM_TYPED_FUNC_START(fa_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(fa_flush_icache_all)
+SYM_FUNC_END(fa_flush_icache_all)
/*
* flush_user_cache_all()
@@ -51,14 +52,14 @@ ENDPROC(fa_flush_icache_all)
* Clean and invalidate all cache entries in a particular address
* space.
*/
-ENTRY(fa_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(fa_flush_user_cache_all, fa_flush_kern_cache_all)
+
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(fa_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(fa_flush_kern_cache_all)
mov ip, #0
mov r2, #VM_EXEC
__flush_whole_cache:
@@ -69,6 +70,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
ret lr
+SYM_FUNC_END(fa_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -80,7 +82,7 @@ __flush_whole_cache:
* - end - end address (exclusive, page aligned)
* - flags - vma_area_struct flags describing address space
*/
-ENTRY(fa_flush_user_cache_range)
+SYM_TYPED_FUNC_START(fa_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT @ total size >= limit?
@@ -97,6 +99,7 @@ ENTRY(fa_flush_user_cache_range)
mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
ret lr
+SYM_FUNC_END(fa_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -108,8 +111,11 @@ ENTRY(fa_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(fa_coherent_kern_range)
- /* fall through */
+SYM_TYPED_FUNC_START(fa_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b fa_coherent_user_range
+#endif
+SYM_FUNC_END(fa_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -121,7 +127,7 @@ ENTRY(fa_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(fa_coherent_user_range)
+SYM_TYPED_FUNC_START(fa_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
@@ -133,6 +139,7 @@ ENTRY(fa_coherent_user_range)
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
ret lr
+SYM_FUNC_END(fa_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -143,7 +150,7 @@ ENTRY(fa_coherent_user_range)
* - addr - kernel address
* - size - size of region
*/
-ENTRY(fa_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(fa_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
add r0, r0, #CACHE_DLINESIZE
@@ -153,6 +160,7 @@ ENTRY(fa_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
ret lr
+SYM_FUNC_END(fa_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -203,7 +211,7 @@ fa_dma_clean_range:
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(fa_dma_flush_range)
+SYM_TYPED_FUNC_START(fa_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry
add r0, r0, #CACHE_DLINESIZE
@@ -212,6 +220,7 @@ ENTRY(fa_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
ret lr
+SYM_FUNC_END(fa_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -219,13 +228,13 @@ ENTRY(fa_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(fa_dma_map_area)
+SYM_TYPED_FUNC_START(fa_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq fa_dma_clean_range
bcs fa_dma_inv_range
b fa_dma_flush_range
-ENDPROC(fa_dma_map_area)
+SYM_FUNC_END(fa_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -233,14 +242,6 @@ ENDPROC(fa_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(fa_dma_unmap_area)
+SYM_TYPED_FUNC_START(fa_dma_unmap_area)
ret lr
-ENDPROC(fa_dma_unmap_area)
-
- .globl fa_flush_kern_cache_louis
- .equ fa_flush_kern_cache_louis, fa_flush_kern_cache_all
-
- __INITDATA
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions fa
+SYM_FUNC_END(fa_dma_unmap_area)
diff --git a/arch/arm/mm/cache-nop.S b/arch/arm/mm/cache-nop.S
index 72d939ef8798..f68dde2014ee 100644
--- a/arch/arm/mm/cache-nop.S
+++ b/arch/arm/mm/cache-nop.S
@@ -1,47 +1,52 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include "proc-macros.S"
-ENTRY(nop_flush_icache_all)
+/*
+ * These are all open-coded instead of aliased, to make clear
+ * what is going on here: all functions are stubbed out.
+ */
+SYM_TYPED_FUNC_START(nop_flush_icache_all)
ret lr
-ENDPROC(nop_flush_icache_all)
+SYM_FUNC_END(nop_flush_icache_all)
- .globl nop_flush_kern_cache_all
- .equ nop_flush_kern_cache_all, nop_flush_icache_all
-
- .globl nop_flush_kern_cache_louis
- .equ nop_flush_kern_cache_louis, nop_flush_icache_all
+SYM_TYPED_FUNC_START(nop_flush_kern_cache_all)
+ ret lr
+SYM_FUNC_END(nop_flush_kern_cache_all)
- .globl nop_flush_user_cache_all
- .equ nop_flush_user_cache_all, nop_flush_icache_all
+SYM_TYPED_FUNC_START(nop_flush_user_cache_all)
+ ret lr
+SYM_FUNC_END(nop_flush_user_cache_all)
- .globl nop_flush_user_cache_range
- .equ nop_flush_user_cache_range, nop_flush_icache_all
+SYM_TYPED_FUNC_START(nop_flush_user_cache_range)
+ ret lr
+SYM_FUNC_END(nop_flush_user_cache_range)
- .globl nop_coherent_kern_range
- .equ nop_coherent_kern_range, nop_flush_icache_all
+SYM_TYPED_FUNC_START(nop_coherent_kern_range)
+ ret lr
+SYM_FUNC_END(nop_coherent_kern_range)
-ENTRY(nop_coherent_user_range)
+SYM_TYPED_FUNC_START(nop_coherent_user_range)
mov r0, 0
ret lr
-ENDPROC(nop_coherent_user_range)
-
- .globl nop_flush_kern_dcache_area
- .equ nop_flush_kern_dcache_area, nop_flush_icache_all
+SYM_FUNC_END(nop_coherent_user_range)
- .globl nop_dma_flush_range
- .equ nop_dma_flush_range, nop_flush_icache_all
-
- .globl nop_dma_map_area
- .equ nop_dma_map_area, nop_flush_icache_all
+SYM_TYPED_FUNC_START(nop_flush_kern_dcache_area)
+ ret lr
+SYM_FUNC_END(nop_flush_kern_dcache_area)
- .globl nop_dma_unmap_area
- .equ nop_dma_unmap_area, nop_flush_icache_all
+SYM_TYPED_FUNC_START(nop_dma_flush_range)
+ ret lr
+SYM_FUNC_END(nop_dma_flush_range)
- __INITDATA
+SYM_TYPED_FUNC_START(nop_dma_map_area)
+ ret lr
+SYM_FUNC_END(nop_dma_map_area)
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions nop
+SYM_TYPED_FUNC_START(nop_dma_unmap_area)
+ ret lr
+SYM_FUNC_END(nop_dma_unmap_area)
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index 7787057e4990..0e94e5193dbd 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/page.h>
#include "proc-macros.S"
@@ -15,9 +16,9 @@
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(v4_flush_icache_all)
+SYM_TYPED_FUNC_START(v4_flush_icache_all)
ret lr
-ENDPROC(v4_flush_icache_all)
+SYM_FUNC_END(v4_flush_icache_all)
/*
* flush_user_cache_all()
@@ -27,21 +28,22 @@ ENDPROC(v4_flush_icache_all)
*
* - mm - mm_struct describing address space
*/
-ENTRY(v4_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(v4_flush_user_cache_all, v4_flush_kern_cache_all)
+
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(v4_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(v4_flush_kern_cache_all)
#ifdef CONFIG_CPU_CP15
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
ret lr
#else
- /* FALLTHROUGH */
+ ret lr
#endif
+SYM_FUNC_END(v4_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -53,14 +55,15 @@ ENTRY(v4_flush_kern_cache_all)
* - end - end address (exclusive, may not be aligned)
* - flags - vma_area_struct flags describing address space
*/
-ENTRY(v4_flush_user_cache_range)
+SYM_TYPED_FUNC_START(v4_flush_user_cache_range)
#ifdef CONFIG_CPU_CP15
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ flush ID cache
ret lr
#else
- /* FALLTHROUGH */
+ ret lr
#endif
+SYM_FUNC_END(v4_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -72,8 +75,9 @@ ENTRY(v4_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(v4_coherent_kern_range)
+ ret lr
+SYM_FUNC_END(v4_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -85,9 +89,10 @@ ENTRY(v4_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4_coherent_user_range)
+SYM_TYPED_FUNC_START(v4_coherent_user_range)
mov r0, #0
ret lr
+SYM_FUNC_END(v4_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -98,8 +103,11 @@ ENTRY(v4_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(v4_flush_kern_dcache_area)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(v4_flush_kern_dcache_area)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b v4_dma_flush_range
+#endif
+SYM_FUNC_END(v4_flush_kern_dcache_area)
/*
* dma_flush_range(start, end)
@@ -109,12 +117,13 @@ ENTRY(v4_flush_kern_dcache_area)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4_dma_flush_range)
+SYM_TYPED_FUNC_START(v4_dma_flush_range)
#ifdef CONFIG_CPU_CP15
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
#endif
ret lr
+SYM_FUNC_END(v4_dma_flush_range)
/*
* dma_unmap_area(start, size, dir)
@@ -122,10 +131,11 @@ ENTRY(v4_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v4_dma_unmap_area)
+SYM_TYPED_FUNC_START(v4_dma_unmap_area)
teq r2, #DMA_TO_DEVICE
bne v4_dma_flush_range
- /* FALLTHROUGH */
+ ret lr
+SYM_FUNC_END(v4_dma_unmap_area)
/*
* dma_map_area(start, size, dir)
@@ -133,15 +143,6 @@ ENTRY(v4_dma_unmap_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v4_dma_map_area)
+SYM_TYPED_FUNC_START(v4_dma_map_area)
ret lr
-ENDPROC(v4_dma_unmap_area)
-ENDPROC(v4_dma_map_area)
-
- .globl v4_flush_kern_cache_louis
- .equ v4_flush_kern_cache_louis, v4_flush_kern_cache_all
-
- __INITDATA
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions v4
+SYM_FUNC_END(v4_dma_map_area)
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index ad382cee0fdb..ce55a2eef5da 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/page.h>
#include "proc-macros.S"
@@ -53,11 +54,11 @@ flush_base:
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(v4wb_flush_icache_all)
+SYM_TYPED_FUNC_START(v4wb_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(v4wb_flush_icache_all)
+SYM_FUNC_END(v4wb_flush_icache_all)
/*
* flush_user_cache_all()
@@ -65,14 +66,14 @@ ENDPROC(v4wb_flush_icache_all)
* Clean and invalidate all cache entries in a particular address
* space.
*/
-ENTRY(v4wb_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(v4wb_flush_user_cache_all, v4wb_flush_kern_cache_all)
+
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(v4wb_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(v4wb_flush_kern_cache_all)
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
__flush_whole_cache:
@@ -93,6 +94,7 @@ __flush_whole_cache:
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
ret lr
+SYM_FUNC_END(v4wb_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -104,7 +106,7 @@ __flush_whole_cache:
* - end - end address (exclusive, page aligned)
* - flags - vma_area_struct flags describing address space
*/
-ENTRY(v4wb_flush_user_cache_range)
+SYM_TYPED_FUNC_START(v4wb_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
tst r2, #VM_EXEC @ executable region?
@@ -121,6 +123,7 @@ ENTRY(v4wb_flush_user_cache_range)
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
ret lr
+SYM_FUNC_END(v4wb_flush_user_cache_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -131,9 +134,12 @@ ENTRY(v4wb_flush_user_cache_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(v4wb_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(v4wb_flush_kern_dcache_area)
add r1, r0, r1
- /* fall through */
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b v4wb_coherent_user_range
+#endif
+SYM_FUNC_END(v4wb_flush_kern_dcache_area)
/*
* coherent_kern_range(start, end)
@@ -145,8 +151,11 @@ ENTRY(v4wb_flush_kern_dcache_area)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4wb_coherent_kern_range)
- /* fall through */
+SYM_TYPED_FUNC_START(v4wb_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b v4wb_coherent_user_range
+#endif
+SYM_FUNC_END(v4wb_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -158,7 +167,7 @@ ENTRY(v4wb_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4wb_coherent_user_range)
+SYM_TYPED_FUNC_START(v4wb_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
@@ -169,7 +178,7 @@ ENTRY(v4wb_coherent_user_range)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
-
+SYM_FUNC_END(v4wb_coherent_user_range)
/*
* dma_inv_range(start, end)
@@ -231,13 +240,13 @@ v4wb_dma_clean_range:
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v4wb_dma_map_area)
+SYM_TYPED_FUNC_START(v4wb_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq v4wb_dma_clean_range
bcs v4wb_dma_inv_range
b v4wb_dma_flush_range
-ENDPROC(v4wb_dma_map_area)
+SYM_FUNC_END(v4wb_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -245,14 +254,6 @@ ENDPROC(v4wb_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v4wb_dma_unmap_area)
+SYM_TYPED_FUNC_START(v4wb_dma_unmap_area)
ret lr
-ENDPROC(v4wb_dma_unmap_area)
-
- .globl v4wb_flush_kern_cache_louis
- .equ v4wb_flush_kern_cache_louis, v4wb_flush_kern_cache_all
-
- __INITDATA
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions v4wb
+SYM_FUNC_END(v4wb_dma_unmap_area)
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index 0b290c25a99d..a97dc267b3b0 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -10,6 +10,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/page.h>
#include "proc-macros.S"
@@ -43,11 +44,11 @@
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(v4wt_flush_icache_all)
+SYM_TYPED_FUNC_START(v4wt_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(v4wt_flush_icache_all)
+SYM_FUNC_END(v4wt_flush_icache_all)
/*
* flush_user_cache_all()
@@ -55,14 +56,14 @@ ENDPROC(v4wt_flush_icache_all)
* Invalidate all cache entries in a particular address
* space.
*/
-ENTRY(v4wt_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(v4wt_flush_user_cache_all, v4wt_flush_kern_cache_all)
+
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(v4wt_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(v4wt_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -70,6 +71,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
ret lr
+SYM_FUNC_END(v4wt_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -81,7 +83,7 @@ __flush_whole_cache:
* - end - end address (exclusive, page aligned)
* - flags - vma_area_struct flags describing address space
*/
-ENTRY(v4wt_flush_user_cache_range)
+SYM_TYPED_FUNC_START(v4wt_flush_user_cache_range)
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
bhs __flush_whole_cache
@@ -93,6 +95,7 @@ ENTRY(v4wt_flush_user_cache_range)
cmp r0, r1
blo 1b
ret lr
+SYM_FUNC_END(v4wt_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -104,8 +107,11 @@ ENTRY(v4wt_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4wt_coherent_kern_range)
- /* FALLTRHOUGH */
+SYM_TYPED_FUNC_START(v4wt_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b v4wt_coherent_user_range
+#endif
+SYM_FUNC_END(v4wt_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -117,7 +123,7 @@ ENTRY(v4wt_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4wt_coherent_user_range)
+SYM_TYPED_FUNC_START(v4wt_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
@@ -125,6 +131,7 @@ ENTRY(v4wt_coherent_user_range)
blo 1b
mov r0, #0
ret lr
+SYM_FUNC_END(v4wt_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -135,11 +142,12 @@ ENTRY(v4wt_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(v4wt_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(v4wt_flush_kern_dcache_area)
mov r2, #0
mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
add r1, r0, r1
- /* fallthrough */
+ b v4wt_dma_inv_range
+SYM_FUNC_END(v4wt_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -167,9 +175,10 @@ v4wt_dma_inv_range:
*
* - start - virtual start address
* - end - virtual end address
- */
- .globl v4wt_dma_flush_range
- .equ v4wt_dma_flush_range, v4wt_dma_inv_range
+*/
+SYM_TYPED_FUNC_START(v4wt_dma_flush_range)
+ b v4wt_dma_inv_range
+SYM_FUNC_END(v4wt_dma_flush_range)
/*
* dma_unmap_area(start, size, dir)
@@ -177,11 +186,12 @@ v4wt_dma_inv_range:
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v4wt_dma_unmap_area)
+SYM_TYPED_FUNC_START(v4wt_dma_unmap_area)
add r1, r1, r0
teq r2, #DMA_TO_DEVICE
bne v4wt_dma_inv_range
- /* FALLTHROUGH */
+ ret lr
+SYM_FUNC_END(v4wt_dma_unmap_area)
/*
* dma_map_area(start, size, dir)
@@ -189,15 +199,6 @@ ENTRY(v4wt_dma_unmap_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v4wt_dma_map_area)
+SYM_TYPED_FUNC_START(v4wt_dma_map_area)
ret lr
-ENDPROC(v4wt_dma_unmap_area)
-ENDPROC(v4wt_dma_map_area)
-
- .globl v4wt_flush_kern_cache_louis
- .equ v4wt_flush_kern_cache_louis, v4wt_flush_kern_cache_all
-
- __INITDATA
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions v4wt
+SYM_FUNC_END(v4wt_dma_map_area)
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 44211d8a296f..9f415476e218 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -8,6 +8,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/unwind.h>
@@ -34,7 +35,7 @@
* r0 - set to 0
* r1 - corrupted
*/
-ENTRY(v6_flush_icache_all)
+SYM_TYPED_FUNC_START(v6_flush_icache_all)
mov r0, #0
#ifdef CONFIG_ARM_ERRATA_411920
mrs r1, cpsr
@@ -51,7 +52,7 @@ ENTRY(v6_flush_icache_all)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache
#endif
ret lr
-ENDPROC(v6_flush_icache_all)
+SYM_FUNC_END(v6_flush_icache_all)
/*
* v6_flush_cache_all()
@@ -60,7 +61,7 @@ ENDPROC(v6_flush_icache_all)
*
* It is assumed that:
*/
-ENTRY(v6_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(v6_flush_kern_cache_all)
mov r0, #0
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate
@@ -73,6 +74,7 @@ ENTRY(v6_flush_kern_cache_all)
mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
#endif
ret lr
+SYM_FUNC_END(v6_flush_kern_cache_all)
/*
* v6_flush_cache_all()
@@ -81,8 +83,9 @@ ENTRY(v6_flush_kern_cache_all)
*
* - mm - mm_struct describing address space
*/
-ENTRY(v6_flush_user_cache_all)
- /*FALLTHROUGH*/
+SYM_TYPED_FUNC_START(v6_flush_user_cache_all)
+ ret lr
+SYM_FUNC_END(v6_flush_user_cache_all)
/*
* v6_flush_cache_range(start, end, flags)
@@ -96,8 +99,9 @@ ENTRY(v6_flush_user_cache_all)
* It is assumed that:
* - we have a VIPT cache.
*/
-ENTRY(v6_flush_user_cache_range)
+SYM_TYPED_FUNC_START(v6_flush_user_cache_range)
ret lr
+SYM_FUNC_END(v6_flush_user_cache_range)
/*
* v6_coherent_kern_range(start,end)
@@ -112,8 +116,11 @@ ENTRY(v6_flush_user_cache_range)
* It is assumed that:
* - the Icache does not read data from the write buffer
*/
-ENTRY(v6_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(v6_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b v6_coherent_user_range
+#endif
+SYM_FUNC_END(v6_coherent_kern_range)
/*
* v6_coherent_user_range(start,end)
@@ -128,7 +135,7 @@ ENTRY(v6_coherent_kern_range)
* It is assumed that:
* - the Icache does not read data from the write buffer
*/
-ENTRY(v6_coherent_user_range)
+SYM_TYPED_FUNC_START(v6_coherent_user_range)
UNWIND(.fnstart )
#ifdef HARVARD_CACHE
bic r0, r0, #CACHE_LINE_SIZE - 1
@@ -159,8 +166,7 @@ ENTRY(v6_coherent_user_range)
mov r0, #-EFAULT
ret lr
UNWIND(.fnend )
-ENDPROC(v6_coherent_user_range)
-ENDPROC(v6_coherent_kern_range)
+SYM_FUNC_END(v6_coherent_user_range)
/*
* v6_flush_kern_dcache_area(void *addr, size_t size)
@@ -171,7 +177,7 @@ ENDPROC(v6_coherent_kern_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(v6_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(v6_flush_kern_dcache_area)
add r1, r0, r1
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
@@ -188,7 +194,7 @@ ENTRY(v6_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c10, 4
#endif
ret lr
-
+SYM_FUNC_END(v6_flush_kern_dcache_area)
/*
* v6_dma_inv_range(start,end)
@@ -253,7 +259,7 @@ v6_dma_clean_range:
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(v6_dma_flush_range)
+SYM_TYPED_FUNC_START(v6_dma_flush_range)
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
#ifdef HARVARD_CACHE
@@ -267,6 +273,7 @@ ENTRY(v6_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
ret lr
+SYM_FUNC_END(v6_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -274,12 +281,12 @@ ENTRY(v6_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v6_dma_map_area)
+SYM_TYPED_FUNC_START(v6_dma_map_area)
add r1, r1, r0
teq r2, #DMA_FROM_DEVICE
beq v6_dma_inv_range
b v6_dma_clean_range
-ENDPROC(v6_dma_map_area)
+SYM_FUNC_END(v6_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -287,17 +294,9 @@ ENDPROC(v6_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v6_dma_unmap_area)
+SYM_TYPED_FUNC_START(v6_dma_unmap_area)
add r1, r1, r0
teq r2, #DMA_TO_DEVICE
bne v6_dma_inv_range
ret lr
-ENDPROC(v6_dma_unmap_area)
-
- .globl v6_flush_kern_cache_louis
- .equ v6_flush_kern_cache_louis, v6_flush_kern_cache_all
-
- __INITDATA
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions v6
+SYM_FUNC_END(v6_dma_unmap_area)
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 127afe2096ba..201ca05436fa 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/unwind.h>
@@ -80,12 +81,12 @@ ENDPROC(v7_invalidate_l1)
* Registers:
* r0 - set to 0
*/
-ENTRY(v7_flush_icache_all)
+SYM_TYPED_FUNC_START(v7_flush_icache_all)
mov r0, #0
ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
ret lr
-ENDPROC(v7_flush_icache_all)
+SYM_FUNC_END(v7_flush_icache_all)
/*
* v7_flush_dcache_louis()
@@ -193,7 +194,7 @@ ENDPROC(v7_flush_dcache_all)
* unification in a single instruction.
*
*/
-ENTRY(v7_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(v7_flush_kern_cache_all)
stmfd sp!, {r4-r6, r9-r10, lr}
bl v7_flush_dcache_all
mov r0, #0
@@ -201,7 +202,7 @@ ENTRY(v7_flush_kern_cache_all)
ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
ldmfd sp!, {r4-r6, r9-r10, lr}
ret lr
-ENDPROC(v7_flush_kern_cache_all)
+SYM_FUNC_END(v7_flush_kern_cache_all)
/*
* v7_flush_kern_cache_louis(void)
@@ -209,7 +210,7 @@ ENDPROC(v7_flush_kern_cache_all)
* Flush the data cache up to Level of Unification Inner Shareable.
* Invalidate the I-cache to the point of unification.
*/
-ENTRY(v7_flush_kern_cache_louis)
+SYM_TYPED_FUNC_START(v7_flush_kern_cache_louis)
stmfd sp!, {r4-r6, r9-r10, lr}
bl v7_flush_dcache_louis
mov r0, #0
@@ -217,7 +218,7 @@ ENTRY(v7_flush_kern_cache_louis)
ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
ldmfd sp!, {r4-r6, r9-r10, lr}
ret lr
-ENDPROC(v7_flush_kern_cache_louis)
+SYM_FUNC_END(v7_flush_kern_cache_louis)
/*
* v7_flush_cache_all()
@@ -226,8 +227,9 @@ ENDPROC(v7_flush_kern_cache_louis)
*
* - mm - mm_struct describing address space
*/
-ENTRY(v7_flush_user_cache_all)
- /*FALLTHROUGH*/
+SYM_TYPED_FUNC_START(v7_flush_user_cache_all)
+ ret lr
+SYM_FUNC_END(v7_flush_user_cache_all)
/*
* v7_flush_cache_range(start, end, flags)
@@ -241,10 +243,9 @@ ENTRY(v7_flush_user_cache_all)
* It is assumed that:
* - we have a VIPT cache.
*/
-ENTRY(v7_flush_user_cache_range)
+SYM_TYPED_FUNC_START(v7_flush_user_cache_range)
ret lr
-ENDPROC(v7_flush_user_cache_all)
-ENDPROC(v7_flush_user_cache_range)
+SYM_FUNC_END(v7_flush_user_cache_range)
/*
* v7_coherent_kern_range(start,end)
@@ -259,8 +260,11 @@ ENDPROC(v7_flush_user_cache_range)
* It is assumed that:
* - the Icache does not read data from the write buffer
*/
-ENTRY(v7_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(v7_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b v7_coherent_user_range
+#endif
+SYM_FUNC_END(v7_coherent_kern_range)
/*
* v7_coherent_user_range(start,end)
@@ -275,7 +279,7 @@ ENTRY(v7_coherent_kern_range)
* It is assumed that:
* - the Icache does not read data from the write buffer
*/
-ENTRY(v7_coherent_user_range)
+SYM_TYPED_FUNC_START(v7_coherent_user_range)
UNWIND(.fnstart )
dcache_line_size r2, r3
sub r3, r2, #1
@@ -321,8 +325,7 @@ ENTRY(v7_coherent_user_range)
mov r0, #-EFAULT
ret lr
UNWIND(.fnend )
-ENDPROC(v7_coherent_kern_range)
-ENDPROC(v7_coherent_user_range)
+SYM_FUNC_END(v7_coherent_user_range)
/*
* v7_flush_kern_dcache_area(void *addr, size_t size)
@@ -333,7 +336,7 @@ ENDPROC(v7_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(v7_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(v7_flush_kern_dcache_area)
dcache_line_size r2, r3
add r1, r0, r1
sub r3, r2, #1
@@ -349,7 +352,7 @@ ENTRY(v7_flush_kern_dcache_area)
blo 1b
dsb st
ret lr
-ENDPROC(v7_flush_kern_dcache_area)
+SYM_FUNC_END(v7_flush_kern_dcache_area)
/*
* v7_dma_inv_range(start,end)
@@ -413,7 +416,7 @@ ENDPROC(v7_dma_clean_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(v7_dma_flush_range)
+SYM_TYPED_FUNC_START(v7_dma_flush_range)
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
@@ -428,7 +431,7 @@ ENTRY(v7_dma_flush_range)
blo 1b
dsb st
ret lr
-ENDPROC(v7_dma_flush_range)
+SYM_FUNC_END(v7_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -436,12 +439,12 @@ ENDPROC(v7_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v7_dma_map_area)
+SYM_TYPED_FUNC_START(v7_dma_map_area)
add r1, r1, r0
teq r2, #DMA_FROM_DEVICE
beq v7_dma_inv_range
b v7_dma_clean_range
-ENDPROC(v7_dma_map_area)
+SYM_FUNC_END(v7_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -449,34 +452,9 @@ ENDPROC(v7_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v7_dma_unmap_area)
+SYM_TYPED_FUNC_START(v7_dma_unmap_area)
add r1, r1, r0
teq r2, #DMA_TO_DEVICE
bne v7_dma_inv_range
ret lr
-ENDPROC(v7_dma_unmap_area)
-
- __INITDATA
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions v7
-
- /* The Broadcom Brahma-B15 read-ahead cache requires some modifications
- * to the v7_cache_fns, we only override the ones we need
- */
-#ifndef CONFIG_CACHE_B15_RAC
- globl_equ b15_flush_kern_cache_all, v7_flush_kern_cache_all
-#endif
- globl_equ b15_flush_icache_all, v7_flush_icache_all
- globl_equ b15_flush_kern_cache_louis, v7_flush_kern_cache_louis
- globl_equ b15_flush_user_cache_all, v7_flush_user_cache_all
- globl_equ b15_flush_user_cache_range, v7_flush_user_cache_range
- globl_equ b15_coherent_kern_range, v7_coherent_kern_range
- globl_equ b15_coherent_user_range, v7_coherent_user_range
- globl_equ b15_flush_kern_dcache_area, v7_flush_kern_dcache_area
-
- globl_equ b15_dma_map_area, v7_dma_map_area
- globl_equ b15_dma_unmap_area, v7_dma_unmap_area
- globl_equ b15_dma_flush_range, v7_dma_flush_range
-
- define_cache_functions b15
+SYM_FUNC_END(v7_dma_unmap_area)
diff --git a/arch/arm/mm/cache-v7m.S b/arch/arm/mm/cache-v7m.S
index eb60b5e5e2ad..14d719eba729 100644
--- a/arch/arm/mm/cache-v7m.S
+++ b/arch/arm/mm/cache-v7m.S
@@ -11,6 +11,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/unwind.h>
@@ -159,10 +160,10 @@ ENDPROC(v7m_invalidate_l1)
* Registers:
* r0 - set to 0
*/
-ENTRY(v7m_flush_icache_all)
+SYM_TYPED_FUNC_START(v7m_flush_icache_all)
invalidate_icache r0
ret lr
-ENDPROC(v7m_flush_icache_all)
+SYM_FUNC_END(v7m_flush_icache_all)
/*
* v7m_flush_dcache_all()
@@ -236,13 +237,13 @@ ENDPROC(v7m_flush_dcache_all)
* unification in a single instruction.
*
*/
-ENTRY(v7m_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(v7m_flush_kern_cache_all)
stmfd sp!, {r4-r7, r9-r11, lr}
bl v7m_flush_dcache_all
invalidate_icache r0
ldmfd sp!, {r4-r7, r9-r11, lr}
ret lr
-ENDPROC(v7m_flush_kern_cache_all)
+SYM_FUNC_END(v7m_flush_kern_cache_all)
/*
* v7m_flush_cache_all()
@@ -251,8 +252,9 @@ ENDPROC(v7m_flush_kern_cache_all)
*
* - mm - mm_struct describing address space
*/
-ENTRY(v7m_flush_user_cache_all)
- /*FALLTHROUGH*/
+SYM_TYPED_FUNC_START(v7m_flush_user_cache_all)
+ ret lr
+SYM_FUNC_END(v7m_flush_user_cache_all)
/*
* v7m_flush_cache_range(start, end, flags)
@@ -266,10 +268,9 @@ ENTRY(v7m_flush_user_cache_all)
* It is assumed that:
* - we have a VIPT cache.
*/
-ENTRY(v7m_flush_user_cache_range)
+SYM_TYPED_FUNC_START(v7m_flush_user_cache_range)
ret lr
-ENDPROC(v7m_flush_user_cache_all)
-ENDPROC(v7m_flush_user_cache_range)
+SYM_FUNC_END(v7m_flush_user_cache_range)
/*
* v7m_coherent_kern_range(start,end)
@@ -284,8 +285,11 @@ ENDPROC(v7m_flush_user_cache_range)
* It is assumed that:
* - the Icache does not read data from the write buffer
*/
-ENTRY(v7m_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(v7m_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b v7m_coherent_user_range
+#endif
+SYM_FUNC_END(v7m_coherent_kern_range)
/*
* v7m_coherent_user_range(start,end)
@@ -300,7 +304,7 @@ ENTRY(v7m_coherent_kern_range)
* It is assumed that:
* - the Icache does not read data from the write buffer
*/
-ENTRY(v7m_coherent_user_range)
+SYM_TYPED_FUNC_START(v7m_coherent_user_range)
UNWIND(.fnstart )
dcache_line_size r2, r3
sub r3, r2, #1
@@ -328,8 +332,7 @@ ENTRY(v7m_coherent_user_range)
isb
ret lr
UNWIND(.fnend )
-ENDPROC(v7m_coherent_kern_range)
-ENDPROC(v7m_coherent_user_range)
+SYM_FUNC_END(v7m_coherent_user_range)
/*
* v7m_flush_kern_dcache_area(void *addr, size_t size)
@@ -340,7 +343,7 @@ ENDPROC(v7m_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(v7m_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(v7m_flush_kern_dcache_area)
dcache_line_size r2, r3
add r1, r0, r1
sub r3, r2, #1
@@ -352,7 +355,7 @@ ENTRY(v7m_flush_kern_dcache_area)
blo 1b
dsb st
ret lr
-ENDPROC(v7m_flush_kern_dcache_area)
+SYM_FUNC_END(v7m_flush_kern_dcache_area)
/*
* v7m_dma_inv_range(start,end)
@@ -408,7 +411,7 @@ ENDPROC(v7m_dma_clean_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(v7m_dma_flush_range)
+SYM_TYPED_FUNC_START(v7m_dma_flush_range)
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
@@ -419,7 +422,7 @@ ENTRY(v7m_dma_flush_range)
blo 1b
dsb st
ret lr
-ENDPROC(v7m_dma_flush_range)
+SYM_FUNC_END(v7m_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -427,12 +430,12 @@ ENDPROC(v7m_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v7m_dma_map_area)
+SYM_TYPED_FUNC_START(v7m_dma_map_area)
add r1, r1, r0
teq r2, #DMA_FROM_DEVICE
beq v7m_dma_inv_range
b v7m_dma_clean_range
-ENDPROC(v7m_dma_map_area)
+SYM_FUNC_END(v7m_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -440,17 +443,9 @@ ENDPROC(v7m_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v7m_dma_unmap_area)
+SYM_TYPED_FUNC_START(v7m_dma_unmap_area)
add r1, r1, r0
teq r2, #DMA_TO_DEVICE
bne v7m_dma_inv_range
ret lr
-ENDPROC(v7m_dma_unmap_area)
-
- .globl v7m_flush_kern_cache_louis
- .equ v7m_flush_kern_cache_louis, v7m_flush_kern_cache_all
-
- __INITDATA
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions v7m
+SYM_FUNC_END(v7m_dma_unmap_area)
diff --git a/arch/arm/mm/cache.c b/arch/arm/mm/cache.c
new file mode 100644
index 000000000000..e6fbc599c9ed
--- /dev/null
+++ b/arch/arm/mm/cache.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This file defines C prototypes for the low-level cache assembly functions
+ * and populates a vtable for each selected ARM CPU cache type.
+ */
+
+#include <linux/types.h>
+#include <asm/cacheflush.h>
+
+#ifdef CONFIG_CPU_CACHE_V4
+void v4_flush_icache_all(void);
+void v4_flush_kern_cache_all(void);
+void v4_flush_user_cache_all(void);
+void v4_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void v4_coherent_kern_range(unsigned long, unsigned long);
+int v4_coherent_user_range(unsigned long, unsigned long);
+void v4_flush_kern_dcache_area(void *, size_t);
+void v4_dma_map_area(const void *, size_t, int);
+void v4_dma_unmap_area(const void *, size_t, int);
+void v4_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns v4_cache_fns __initconst = {
+ .flush_icache_all = v4_flush_icache_all,
+ .flush_kern_all = v4_flush_kern_cache_all,
+ .flush_kern_louis = v4_flush_kern_cache_all,
+ .flush_user_all = v4_flush_user_cache_all,
+ .flush_user_range = v4_flush_user_cache_range,
+ .coherent_kern_range = v4_coherent_kern_range,
+ .coherent_user_range = v4_coherent_user_range,
+ .flush_kern_dcache_area = v4_flush_kern_dcache_area,
+ .dma_map_area = v4_dma_map_area,
+ .dma_unmap_area = v4_dma_unmap_area,
+ .dma_flush_range = v4_dma_flush_range,
+};
+#endif
+
+/* V4 write-back cache "V4WB" */
+#ifdef CONFIG_CPU_CACHE_V4WB
+void v4wb_flush_icache_all(void);
+void v4wb_flush_kern_cache_all(void);
+void v4wb_flush_user_cache_all(void);
+void v4wb_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void v4wb_coherent_kern_range(unsigned long, unsigned long);
+int v4wb_coherent_user_range(unsigned long, unsigned long);
+void v4wb_flush_kern_dcache_area(void *, size_t);
+void v4wb_dma_map_area(const void *, size_t, int);
+void v4wb_dma_unmap_area(const void *, size_t, int);
+void v4wb_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns v4wb_cache_fns __initconst = {
+ .flush_icache_all = v4wb_flush_icache_all,
+ .flush_kern_all = v4wb_flush_kern_cache_all,
+ .flush_kern_louis = v4wb_flush_kern_cache_all,
+ .flush_user_all = v4wb_flush_user_cache_all,
+ .flush_user_range = v4wb_flush_user_cache_range,
+ .coherent_kern_range = v4wb_coherent_kern_range,
+ .coherent_user_range = v4wb_coherent_user_range,
+ .flush_kern_dcache_area = v4wb_flush_kern_dcache_area,
+ .dma_map_area = v4wb_dma_map_area,
+ .dma_unmap_area = v4wb_dma_unmap_area,
+ .dma_flush_range = v4wb_dma_flush_range,
+};
+#endif
+
+/* V4 write-through cache "V4WT" */
+#ifdef CONFIG_CPU_CACHE_V4WT
+void v4wt_flush_icache_all(void);
+void v4wt_flush_kern_cache_all(void);
+void v4wt_flush_user_cache_all(void);
+void v4wt_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void v4wt_coherent_kern_range(unsigned long, unsigned long);
+int v4wt_coherent_user_range(unsigned long, unsigned long);
+void v4wt_flush_kern_dcache_area(void *, size_t);
+void v4wt_dma_map_area(const void *, size_t, int);
+void v4wt_dma_unmap_area(const void *, size_t, int);
+void v4wt_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns v4wt_cache_fns __initconst = {
+ .flush_icache_all = v4wt_flush_icache_all,
+ .flush_kern_all = v4wt_flush_kern_cache_all,
+ .flush_kern_louis = v4wt_flush_kern_cache_all,
+ .flush_user_all = v4wt_flush_user_cache_all,
+ .flush_user_range = v4wt_flush_user_cache_range,
+ .coherent_kern_range = v4wt_coherent_kern_range,
+ .coherent_user_range = v4wt_coherent_user_range,
+ .flush_kern_dcache_area = v4wt_flush_kern_dcache_area,
+ .dma_map_area = v4wt_dma_map_area,
+ .dma_unmap_area = v4wt_dma_unmap_area,
+ .dma_flush_range = v4wt_dma_flush_range,
+};
+#endif
+
+/* Faraday FA526 cache */
+#ifdef CONFIG_CPU_CACHE_FA
+void fa_flush_icache_all(void);
+void fa_flush_kern_cache_all(void);
+void fa_flush_user_cache_all(void);
+void fa_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void fa_coherent_kern_range(unsigned long, unsigned long);
+int fa_coherent_user_range(unsigned long, unsigned long);
+void fa_flush_kern_dcache_area(void *, size_t);
+void fa_dma_map_area(const void *, size_t, int);
+void fa_dma_unmap_area(const void *, size_t, int);
+void fa_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns fa_cache_fns __initconst = {
+ .flush_icache_all = fa_flush_icache_all,
+ .flush_kern_all = fa_flush_kern_cache_all,
+ .flush_kern_louis = fa_flush_kern_cache_all,
+ .flush_user_all = fa_flush_user_cache_all,
+ .flush_user_range = fa_flush_user_cache_range,
+ .coherent_kern_range = fa_coherent_kern_range,
+ .coherent_user_range = fa_coherent_user_range,
+ .flush_kern_dcache_area = fa_flush_kern_dcache_area,
+ .dma_map_area = fa_dma_map_area,
+ .dma_unmap_area = fa_dma_unmap_area,
+ .dma_flush_range = fa_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_CACHE_V6
+void v6_flush_icache_all(void);
+void v6_flush_kern_cache_all(void);
+void v6_flush_user_cache_all(void);
+void v6_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void v6_coherent_kern_range(unsigned long, unsigned long);
+int v6_coherent_user_range(unsigned long, unsigned long);
+void v6_flush_kern_dcache_area(void *, size_t);
+void v6_dma_map_area(const void *, size_t, int);
+void v6_dma_unmap_area(const void *, size_t, int);
+void v6_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns v6_cache_fns __initconst = {
+ .flush_icache_all = v6_flush_icache_all,
+ .flush_kern_all = v6_flush_kern_cache_all,
+ .flush_kern_louis = v6_flush_kern_cache_all,
+ .flush_user_all = v6_flush_user_cache_all,
+ .flush_user_range = v6_flush_user_cache_range,
+ .coherent_kern_range = v6_coherent_kern_range,
+ .coherent_user_range = v6_coherent_user_range,
+ .flush_kern_dcache_area = v6_flush_kern_dcache_area,
+ .dma_map_area = v6_dma_map_area,
+ .dma_unmap_area = v6_dma_unmap_area,
+ .dma_flush_range = v6_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_CACHE_V7
+void v7_flush_icache_all(void);
+void v7_flush_kern_cache_all(void);
+void v7_flush_kern_cache_louis(void);
+void v7_flush_user_cache_all(void);
+void v7_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void v7_coherent_kern_range(unsigned long, unsigned long);
+int v7_coherent_user_range(unsigned long, unsigned long);
+void v7_flush_kern_dcache_area(void *, size_t);
+void v7_dma_map_area(const void *, size_t, int);
+void v7_dma_unmap_area(const void *, size_t, int);
+void v7_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns v7_cache_fns __initconst = {
+ .flush_icache_all = v7_flush_icache_all,
+ .flush_kern_all = v7_flush_kern_cache_all,
+ .flush_kern_louis = v7_flush_kern_cache_louis,
+ .flush_user_all = v7_flush_user_cache_all,
+ .flush_user_range = v7_flush_user_cache_range,
+ .coherent_kern_range = v7_coherent_kern_range,
+ .coherent_user_range = v7_coherent_user_range,
+ .flush_kern_dcache_area = v7_flush_kern_dcache_area,
+ .dma_map_area = v7_dma_map_area,
+ .dma_unmap_area = v7_dma_unmap_area,
+ .dma_flush_range = v7_dma_flush_range,
+};
+
+/* Special quirky cache flush function for Broadcom B15 v7 caches */
+void b15_flush_kern_cache_all(void);
+
+struct cpu_cache_fns b15_cache_fns __initconst = {
+ .flush_icache_all = v7_flush_icache_all,
+#ifdef CONFIG_CACHE_B15_RAC
+ .flush_kern_all = b15_flush_kern_cache_all,
+#else
+ .flush_kern_all = v7_flush_kern_cache_all,
+#endif
+ .flush_kern_louis = v7_flush_kern_cache_louis,
+ .flush_user_all = v7_flush_user_cache_all,
+ .flush_user_range = v7_flush_user_cache_range,
+ .coherent_kern_range = v7_coherent_kern_range,
+ .coherent_user_range = v7_coherent_user_range,
+ .flush_kern_dcache_area = v7_flush_kern_dcache_area,
+ .dma_map_area = v7_dma_map_area,
+ .dma_unmap_area = v7_dma_unmap_area,
+ .dma_flush_range = v7_dma_flush_range,
+};
+#endif
+
+/* The NOP cache is just a set of dummy stubs that by definition does nothing */
+#ifdef CONFIG_CPU_CACHE_NOP
+void nop_flush_icache_all(void);
+void nop_flush_kern_cache_all(void);
+void nop_flush_user_cache_all(void);
+void nop_flush_user_cache_range(unsigned long start, unsigned long end, unsigned int flags);
+void nop_coherent_kern_range(unsigned long start, unsigned long end);
+int nop_coherent_user_range(unsigned long, unsigned long);
+void nop_flush_kern_dcache_area(void *kaddr, size_t size);
+void nop_dma_map_area(const void *start, size_t size, int flags);
+void nop_dma_unmap_area(const void *start, size_t size, int flags);
+void nop_dma_flush_range(const void *start, const void *end);
+
+struct cpu_cache_fns nop_cache_fns __initconst = {
+ .flush_icache_all = nop_flush_icache_all,
+ .flush_kern_all = nop_flush_kern_cache_all,
+ .flush_kern_louis = nop_flush_kern_cache_all,
+ .flush_user_all = nop_flush_user_cache_all,
+ .flush_user_range = nop_flush_user_cache_range,
+ .coherent_kern_range = nop_coherent_kern_range,
+ .coherent_user_range = nop_coherent_user_range,
+ .flush_kern_dcache_area = nop_flush_kern_dcache_area,
+ .dma_map_area = nop_dma_map_area,
+ .dma_unmap_area = nop_dma_unmap_area,
+ .dma_flush_range = nop_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_CACHE_V7M
+void v7m_flush_icache_all(void);
+void v7m_flush_kern_cache_all(void);
+void v7m_flush_user_cache_all(void);
+void v7m_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void v7m_coherent_kern_range(unsigned long, unsigned long);
+int v7m_coherent_user_range(unsigned long, unsigned long);
+void v7m_flush_kern_dcache_area(void *, size_t);
+void v7m_dma_map_area(const void *, size_t, int);
+void v7m_dma_unmap_area(const void *, size_t, int);
+void v7m_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns v7m_cache_fns __initconst = {
+ .flush_icache_all = v7m_flush_icache_all,
+ .flush_kern_all = v7m_flush_kern_cache_all,
+ .flush_kern_louis = v7m_flush_kern_cache_all,
+ .flush_user_all = v7m_flush_user_cache_all,
+ .flush_user_range = v7m_flush_user_cache_range,
+ .coherent_kern_range = v7m_coherent_kern_range,
+ .coherent_user_range = v7m_coherent_user_range,
+ .flush_kern_dcache_area = v7m_flush_kern_dcache_area,
+ .dma_map_area = v7m_dma_map_area,
+ .dma_unmap_area = v7m_dma_unmap_area,
+ .dma_flush_range = v7m_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_ARM1020
+void arm1020_flush_icache_all(void);
+void arm1020_flush_kern_cache_all(void);
+void arm1020_flush_user_cache_all(void);
+void arm1020_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm1020_coherent_kern_range(unsigned long, unsigned long);
+int arm1020_coherent_user_range(unsigned long, unsigned long);
+void arm1020_flush_kern_dcache_area(void *, size_t);
+void arm1020_dma_map_area(const void *, size_t, int);
+void arm1020_dma_unmap_area(const void *, size_t, int);
+void arm1020_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm1020_cache_fns __initconst = {
+ .flush_icache_all = arm1020_flush_icache_all,
+ .flush_kern_all = arm1020_flush_kern_cache_all,
+ .flush_kern_louis = arm1020_flush_kern_cache_all,
+ .flush_user_all = arm1020_flush_user_cache_all,
+ .flush_user_range = arm1020_flush_user_cache_range,
+ .coherent_kern_range = arm1020_coherent_kern_range,
+ .coherent_user_range = arm1020_coherent_user_range,
+ .flush_kern_dcache_area = arm1020_flush_kern_dcache_area,
+ .dma_map_area = arm1020_dma_map_area,
+ .dma_unmap_area = arm1020_dma_unmap_area,
+ .dma_flush_range = arm1020_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_ARM1020E
+void arm1020e_flush_icache_all(void);
+void arm1020e_flush_kern_cache_all(void);
+void arm1020e_flush_user_cache_all(void);
+void arm1020e_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm1020e_coherent_kern_range(unsigned long, unsigned long);
+int arm1020e_coherent_user_range(unsigned long, unsigned long);
+void arm1020e_flush_kern_dcache_area(void *, size_t);
+void arm1020e_dma_map_area(const void *, size_t, int);
+void arm1020e_dma_unmap_area(const void *, size_t, int);
+void arm1020e_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm1020e_cache_fns __initconst = {
+ .flush_icache_all = arm1020e_flush_icache_all,
+ .flush_kern_all = arm1020e_flush_kern_cache_all,
+ .flush_kern_louis = arm1020e_flush_kern_cache_all,
+ .flush_user_all = arm1020e_flush_user_cache_all,
+ .flush_user_range = arm1020e_flush_user_cache_range,
+ .coherent_kern_range = arm1020e_coherent_kern_range,
+ .coherent_user_range = arm1020e_coherent_user_range,
+ .flush_kern_dcache_area = arm1020e_flush_kern_dcache_area,
+ .dma_map_area = arm1020e_dma_map_area,
+ .dma_unmap_area = arm1020e_dma_unmap_area,
+ .dma_flush_range = arm1020e_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_ARM1022
+void arm1022_flush_icache_all(void);
+void arm1022_flush_kern_cache_all(void);
+void arm1022_flush_user_cache_all(void);
+void arm1022_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm1022_coherent_kern_range(unsigned long, unsigned long);
+int arm1022_coherent_user_range(unsigned long, unsigned long);
+void arm1022_flush_kern_dcache_area(void *, size_t);
+void arm1022_dma_map_area(const void *, size_t, int);
+void arm1022_dma_unmap_area(const void *, size_t, int);
+void arm1022_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm1022_cache_fns __initconst = {
+ .flush_icache_all = arm1022_flush_icache_all,
+ .flush_kern_all = arm1022_flush_kern_cache_all,
+ .flush_kern_louis = arm1022_flush_kern_cache_all,
+ .flush_user_all = arm1022_flush_user_cache_all,
+ .flush_user_range = arm1022_flush_user_cache_range,
+ .coherent_kern_range = arm1022_coherent_kern_range,
+ .coherent_user_range = arm1022_coherent_user_range,
+ .flush_kern_dcache_area = arm1022_flush_kern_dcache_area,
+ .dma_map_area = arm1022_dma_map_area,
+ .dma_unmap_area = arm1022_dma_unmap_area,
+ .dma_flush_range = arm1022_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_ARM1026
+void arm1026_flush_icache_all(void);
+void arm1026_flush_kern_cache_all(void);
+void arm1026_flush_user_cache_all(void);
+void arm1026_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm1026_coherent_kern_range(unsigned long, unsigned long);
+int arm1026_coherent_user_range(unsigned long, unsigned long);
+void arm1026_flush_kern_dcache_area(void *, size_t);
+void arm1026_dma_map_area(const void *, size_t, int);
+void arm1026_dma_unmap_area(const void *, size_t, int);
+void arm1026_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm1026_cache_fns __initconst = {
+ .flush_icache_all = arm1026_flush_icache_all,
+ .flush_kern_all = arm1026_flush_kern_cache_all,
+ .flush_kern_louis = arm1026_flush_kern_cache_all,
+ .flush_user_all = arm1026_flush_user_cache_all,
+ .flush_user_range = arm1026_flush_user_cache_range,
+ .coherent_kern_range = arm1026_coherent_kern_range,
+ .coherent_user_range = arm1026_coherent_user_range,
+ .flush_kern_dcache_area = arm1026_flush_kern_dcache_area,
+ .dma_map_area = arm1026_dma_map_area,
+ .dma_unmap_area = arm1026_dma_unmap_area,
+ .dma_flush_range = arm1026_dma_flush_range,
+};
+#endif
+
+#if defined(CONFIG_CPU_ARM920T) && !defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
+void arm920_flush_icache_all(void);
+void arm920_flush_kern_cache_all(void);
+void arm920_flush_user_cache_all(void);
+void arm920_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm920_coherent_kern_range(unsigned long, unsigned long);
+int arm920_coherent_user_range(unsigned long, unsigned long);
+void arm920_flush_kern_dcache_area(void *, size_t);
+void arm920_dma_map_area(const void *, size_t, int);
+void arm920_dma_unmap_area(const void *, size_t, int);
+void arm920_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm920_cache_fns __initconst = {
+ .flush_icache_all = arm920_flush_icache_all,
+ .flush_kern_all = arm920_flush_kern_cache_all,
+ .flush_kern_louis = arm920_flush_kern_cache_all,
+ .flush_user_all = arm920_flush_user_cache_all,
+ .flush_user_range = arm920_flush_user_cache_range,
+ .coherent_kern_range = arm920_coherent_kern_range,
+ .coherent_user_range = arm920_coherent_user_range,
+ .flush_kern_dcache_area = arm920_flush_kern_dcache_area,
+ .dma_map_area = arm920_dma_map_area,
+ .dma_unmap_area = arm920_dma_unmap_area,
+ .dma_flush_range = arm920_dma_flush_range,
+};
+#endif
+
+#if defined(CONFIG_CPU_ARM922T) && !defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
+void arm922_flush_icache_all(void);
+void arm922_flush_kern_cache_all(void);
+void arm922_flush_user_cache_all(void);
+void arm922_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm922_coherent_kern_range(unsigned long, unsigned long);
+int arm922_coherent_user_range(unsigned long, unsigned long);
+void arm922_flush_kern_dcache_area(void *, size_t);
+void arm922_dma_map_area(const void *, size_t, int);
+void arm922_dma_unmap_area(const void *, size_t, int);
+void arm922_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm922_cache_fns __initconst = {
+ .flush_icache_all = arm922_flush_icache_all,
+ .flush_kern_all = arm922_flush_kern_cache_all,
+ .flush_kern_louis = arm922_flush_kern_cache_all,
+ .flush_user_all = arm922_flush_user_cache_all,
+ .flush_user_range = arm922_flush_user_cache_range,
+ .coherent_kern_range = arm922_coherent_kern_range,
+ .coherent_user_range = arm922_coherent_user_range,
+ .flush_kern_dcache_area = arm922_flush_kern_dcache_area,
+ .dma_map_area = arm922_dma_map_area,
+ .dma_unmap_area = arm922_dma_unmap_area,
+ .dma_flush_range = arm922_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_ARM925T
+void arm925_flush_icache_all(void);
+void arm925_flush_kern_cache_all(void);
+void arm925_flush_user_cache_all(void);
+void arm925_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm925_coherent_kern_range(unsigned long, unsigned long);
+int arm925_coherent_user_range(unsigned long, unsigned long);
+void arm925_flush_kern_dcache_area(void *, size_t);
+void arm925_dma_map_area(const void *, size_t, int);
+void arm925_dma_unmap_area(const void *, size_t, int);
+void arm925_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm925_cache_fns __initconst = {
+ .flush_icache_all = arm925_flush_icache_all,
+ .flush_kern_all = arm925_flush_kern_cache_all,
+ .flush_kern_louis = arm925_flush_kern_cache_all,
+ .flush_user_all = arm925_flush_user_cache_all,
+ .flush_user_range = arm925_flush_user_cache_range,
+ .coherent_kern_range = arm925_coherent_kern_range,
+ .coherent_user_range = arm925_coherent_user_range,
+ .flush_kern_dcache_area = arm925_flush_kern_dcache_area,
+ .dma_map_area = arm925_dma_map_area,
+ .dma_unmap_area = arm925_dma_unmap_area,
+ .dma_flush_range = arm925_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_ARM926T
+void arm926_flush_icache_all(void);
+void arm926_flush_kern_cache_all(void);
+void arm926_flush_user_cache_all(void);
+void arm926_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm926_coherent_kern_range(unsigned long, unsigned long);
+int arm926_coherent_user_range(unsigned long, unsigned long);
+void arm926_flush_kern_dcache_area(void *, size_t);
+void arm926_dma_map_area(const void *, size_t, int);
+void arm926_dma_unmap_area(const void *, size_t, int);
+void arm926_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm926_cache_fns __initconst = {
+ .flush_icache_all = arm926_flush_icache_all,
+ .flush_kern_all = arm926_flush_kern_cache_all,
+ .flush_kern_louis = arm926_flush_kern_cache_all,
+ .flush_user_all = arm926_flush_user_cache_all,
+ .flush_user_range = arm926_flush_user_cache_range,
+ .coherent_kern_range = arm926_coherent_kern_range,
+ .coherent_user_range = arm926_coherent_user_range,
+ .flush_kern_dcache_area = arm926_flush_kern_dcache_area,
+ .dma_map_area = arm926_dma_map_area,
+ .dma_unmap_area = arm926_dma_unmap_area,
+ .dma_flush_range = arm926_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_ARM940T
+void arm940_flush_icache_all(void);
+void arm940_flush_kern_cache_all(void);
+void arm940_flush_user_cache_all(void);
+void arm940_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm940_coherent_kern_range(unsigned long, unsigned long);
+int arm940_coherent_user_range(unsigned long, unsigned long);
+void arm940_flush_kern_dcache_area(void *, size_t);
+void arm940_dma_map_area(const void *, size_t, int);
+void arm940_dma_unmap_area(const void *, size_t, int);
+void arm940_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm940_cache_fns __initconst = {
+ .flush_icache_all = arm940_flush_icache_all,
+ .flush_kern_all = arm940_flush_kern_cache_all,
+ .flush_kern_louis = arm940_flush_kern_cache_all,
+ .flush_user_all = arm940_flush_user_cache_all,
+ .flush_user_range = arm940_flush_user_cache_range,
+ .coherent_kern_range = arm940_coherent_kern_range,
+ .coherent_user_range = arm940_coherent_user_range,
+ .flush_kern_dcache_area = arm940_flush_kern_dcache_area,
+ .dma_map_area = arm940_dma_map_area,
+ .dma_unmap_area = arm940_dma_unmap_area,
+ .dma_flush_range = arm940_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_ARM946E
+void arm946_flush_icache_all(void);
+void arm946_flush_kern_cache_all(void);
+void arm946_flush_user_cache_all(void);
+void arm946_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm946_coherent_kern_range(unsigned long, unsigned long);
+int arm946_coherent_user_range(unsigned long, unsigned long);
+void arm946_flush_kern_dcache_area(void *, size_t);
+void arm946_dma_map_area(const void *, size_t, int);
+void arm946_dma_unmap_area(const void *, size_t, int);
+void arm946_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm946_cache_fns __initconst = {
+ .flush_icache_all = arm946_flush_icache_all,
+ .flush_kern_all = arm946_flush_kern_cache_all,
+ .flush_kern_louis = arm946_flush_kern_cache_all,
+ .flush_user_all = arm946_flush_user_cache_all,
+ .flush_user_range = arm946_flush_user_cache_range,
+ .coherent_kern_range = arm946_coherent_kern_range,
+ .coherent_user_range = arm946_coherent_user_range,
+ .flush_kern_dcache_area = arm946_flush_kern_dcache_area,
+ .dma_map_area = arm946_dma_map_area,
+ .dma_unmap_area = arm946_dma_unmap_area,
+ .dma_flush_range = arm946_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_XSCALE
+void xscale_flush_icache_all(void);
+void xscale_flush_kern_cache_all(void);
+void xscale_flush_user_cache_all(void);
+void xscale_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void xscale_coherent_kern_range(unsigned long, unsigned long);
+int xscale_coherent_user_range(unsigned long, unsigned long);
+void xscale_flush_kern_dcache_area(void *, size_t);
+void xscale_dma_map_area(const void *, size_t, int);
+void xscale_dma_unmap_area(const void *, size_t, int);
+void xscale_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns xscale_cache_fns __initconst = {
+ .flush_icache_all = xscale_flush_icache_all,
+ .flush_kern_all = xscale_flush_kern_cache_all,
+ .flush_kern_louis = xscale_flush_kern_cache_all,
+ .flush_user_all = xscale_flush_user_cache_all,
+ .flush_user_range = xscale_flush_user_cache_range,
+ .coherent_kern_range = xscale_coherent_kern_range,
+ .coherent_user_range = xscale_coherent_user_range,
+ .flush_kern_dcache_area = xscale_flush_kern_dcache_area,
+ .dma_map_area = xscale_dma_map_area,
+ .dma_unmap_area = xscale_dma_unmap_area,
+ .dma_flush_range = xscale_dma_flush_range,
+};
+
+/* The 80200 A0 and A1 need a special quirk for dma_map_area() */
+void xscale_80200_A0_A1_dma_map_area(const void *, size_t, int);
+
+struct cpu_cache_fns xscale_80200_A0_A1_cache_fns __initconst = {
+ .flush_icache_all = xscale_flush_icache_all,
+ .flush_kern_all = xscale_flush_kern_cache_all,
+ .flush_kern_louis = xscale_flush_kern_cache_all,
+ .flush_user_all = xscale_flush_user_cache_all,
+ .flush_user_range = xscale_flush_user_cache_range,
+ .coherent_kern_range = xscale_coherent_kern_range,
+ .coherent_user_range = xscale_coherent_user_range,
+ .flush_kern_dcache_area = xscale_flush_kern_dcache_area,
+ .dma_map_area = xscale_80200_A0_A1_dma_map_area,
+ .dma_unmap_area = xscale_dma_unmap_area,
+ .dma_flush_range = xscale_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_XSC3
+void xsc3_flush_icache_all(void);
+void xsc3_flush_kern_cache_all(void);
+void xsc3_flush_user_cache_all(void);
+void xsc3_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void xsc3_coherent_kern_range(unsigned long, unsigned long);
+int xsc3_coherent_user_range(unsigned long, unsigned long);
+void xsc3_flush_kern_dcache_area(void *, size_t);
+void xsc3_dma_map_area(const void *, size_t, int);
+void xsc3_dma_unmap_area(const void *, size_t, int);
+void xsc3_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns xsc3_cache_fns __initconst = {
+ .flush_icache_all = xsc3_flush_icache_all,
+ .flush_kern_all = xsc3_flush_kern_cache_all,
+ .flush_kern_louis = xsc3_flush_kern_cache_all,
+ .flush_user_all = xsc3_flush_user_cache_all,
+ .flush_user_range = xsc3_flush_user_cache_range,
+ .coherent_kern_range = xsc3_coherent_kern_range,
+ .coherent_user_range = xsc3_coherent_user_range,
+ .flush_kern_dcache_area = xsc3_flush_kern_dcache_area,
+ .dma_map_area = xsc3_dma_map_area,
+ .dma_unmap_area = xsc3_dma_unmap_area,
+ .dma_flush_range = xsc3_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_MOHAWK
+void mohawk_flush_icache_all(void);
+void mohawk_flush_kern_cache_all(void);
+void mohawk_flush_user_cache_all(void);
+void mohawk_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void mohawk_coherent_kern_range(unsigned long, unsigned long);
+int mohawk_coherent_user_range(unsigned long, unsigned long);
+void mohawk_flush_kern_dcache_area(void *, size_t);
+void mohawk_dma_map_area(const void *, size_t, int);
+void mohawk_dma_unmap_area(const void *, size_t, int);
+void mohawk_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns mohawk_cache_fns __initconst = {
+ .flush_icache_all = mohawk_flush_icache_all,
+ .flush_kern_all = mohawk_flush_kern_cache_all,
+ .flush_kern_louis = mohawk_flush_kern_cache_all,
+ .flush_user_all = mohawk_flush_user_cache_all,
+ .flush_user_range = mohawk_flush_user_cache_range,
+ .coherent_kern_range = mohawk_coherent_kern_range,
+ .coherent_user_range = mohawk_coherent_user_range,
+ .flush_kern_dcache_area = mohawk_flush_kern_dcache_area,
+ .dma_map_area = mohawk_dma_map_area,
+ .dma_unmap_area = mohawk_dma_unmap_area,
+ .dma_flush_range = mohawk_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_FEROCEON
+void feroceon_flush_icache_all(void);
+void feroceon_flush_kern_cache_all(void);
+void feroceon_flush_user_cache_all(void);
+void feroceon_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void feroceon_coherent_kern_range(unsigned long, unsigned long);
+int feroceon_coherent_user_range(unsigned long, unsigned long);
+void feroceon_flush_kern_dcache_area(void *, size_t);
+void feroceon_dma_map_area(const void *, size_t, int);
+void feroceon_dma_unmap_area(const void *, size_t, int);
+void feroceon_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns feroceon_cache_fns __initconst = {
+ .flush_icache_all = feroceon_flush_icache_all,
+ .flush_kern_all = feroceon_flush_kern_cache_all,
+ .flush_kern_louis = feroceon_flush_kern_cache_all,
+ .flush_user_all = feroceon_flush_user_cache_all,
+ .flush_user_range = feroceon_flush_user_cache_range,
+ .coherent_kern_range = feroceon_coherent_kern_range,
+ .coherent_user_range = feroceon_coherent_user_range,
+ .flush_kern_dcache_area = feroceon_flush_kern_dcache_area,
+ .dma_map_area = feroceon_dma_map_area,
+ .dma_unmap_area = feroceon_dma_unmap_area,
+ .dma_flush_range = feroceon_dma_flush_range,
+};
+
+void feroceon_range_flush_kern_dcache_area(void *, size_t);
+void feroceon_range_dma_map_area(const void *, size_t, int);
+void feroceon_range_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns feroceon_range_cache_fns __initconst = {
+ .flush_icache_all = feroceon_flush_icache_all,
+ .flush_kern_all = feroceon_flush_kern_cache_all,
+ .flush_kern_louis = feroceon_flush_kern_cache_all,
+ .flush_user_all = feroceon_flush_user_cache_all,
+ .flush_user_range = feroceon_flush_user_cache_range,
+ .coherent_kern_range = feroceon_coherent_kern_range,
+ .coherent_user_range = feroceon_coherent_user_range,
+ .flush_kern_dcache_area = feroceon_range_flush_kern_dcache_area,
+ .dma_map_area = feroceon_range_dma_map_area,
+ .dma_unmap_area = feroceon_dma_unmap_area,
+ .dma_flush_range = feroceon_range_dma_flush_range,
+};
+#endif
diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c
index b94850b57995..97db5397c320 100644
--- a/arch/arm/mm/dma-mapping-nommu.c
+++ b/arch/arm/mm/dma-mapping-nommu.c
@@ -33,8 +33,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
}
}
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- bool coherent)
+void arch_setup_dma_ops(struct device *dev, bool coherent)
{
if (IS_ENABLED(CONFIG_CPU_V7M)) {
/*
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index f68db05eba29..5adf1769eee4 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1709,11 +1709,15 @@ void arm_iommu_detach_device(struct device *dev)
}
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
-static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
- bool coherent)
+static void arm_setup_iommu_dma_ops(struct device *dev)
{
struct dma_iommu_mapping *mapping;
+ u64 dma_base = 0, size = 1ULL << 32;
+ if (dev->dma_range_map) {
+ dma_base = dma_range_map_min(dev->dma_range_map);
+ size = dma_range_map_max(dev->dma_range_map) - dma_base;
+ }
mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
if (IS_ERR(mapping)) {
pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
@@ -1744,8 +1748,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
#else
-static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
- bool coherent)
+static void arm_setup_iommu_dma_ops(struct device *dev)
{
}
@@ -1753,8 +1756,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { }
#endif /* CONFIG_ARM_DMA_USE_IOMMU */
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- bool coherent)
+void arch_setup_dma_ops(struct device *dev, bool coherent)
{
/*
* Due to legacy code that sets the ->dma_coherent flag from a bus
@@ -1774,7 +1776,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
return;
if (device_iommu_mapped(dev))
- arm_setup_iommu_dma_ops(dev, dma_base, size, coherent);
+ arm_setup_iommu_dma_ops(dev);
xen_setup_dma_ops(dev);
dev->archdata.dma_ops_setup = true;
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 439dc6a26bb9..67c425341a95 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -226,9 +226,6 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
}
#ifdef CONFIG_MMU
-#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000)
-#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000)
-
static inline bool is_permission_fault(unsigned int fsr)
{
int fs = fsr_fs(fsr);
@@ -242,6 +239,27 @@ static inline bool is_permission_fault(unsigned int fsr)
return false;
}
+#ifdef CONFIG_CPU_TTBR0_PAN
+static inline bool ttbr0_usermode_access_allowed(struct pt_regs *regs)
+{
+ struct svc_pt_regs *svcregs;
+
+ /* If we are in user mode: permission granted */
+ if (user_mode(regs))
+ return true;
+
+ /* uaccess state saved above pt_regs on SVC exception entry */
+ svcregs = to_svc_pt_regs(regs);
+
+ return !(svcregs->ttbcr & TTBCR_EPD0);
+}
+#else
+static inline bool ttbr0_usermode_access_allowed(struct pt_regs *regs)
+{
+ return true;
+}
+#endif
+
static int __kprobes
do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
@@ -285,6 +303,14 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
+ /*
+ * Privileged access aborts with CONFIG_CPU_TTBR0_PAN enabled are
+ * routed via the translation fault mechanism. Check whether uaccess
+ * is disabled while in kernel mode.
+ */
+ if (!ttbr0_usermode_access_allowed(regs))
+ goto no_context;
+
if (!(flags & FAULT_FLAG_USER))
goto lock_mmap;
@@ -294,7 +320,10 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (!(vma->vm_flags & vm_flags)) {
vma_end_read(vma);
- goto lock_mmap;
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ fault = 0;
+ code = SEGV_ACCERR;
+ goto bad_area;
}
fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs);
if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
@@ -319,7 +348,8 @@ lock_mmap:
retry:
vma = lock_mm_and_find_vma(mm, addr, regs);
if (unlikely(!vma)) {
- fault = VM_FAULT_BADMAP;
+ fault = 0;
+ code = SEGV_MAPERR;
goto bad_area;
}
@@ -327,10 +357,14 @@ retry:
* ok, we have a good vm_area for this memory access, check the
* permissions on the VMA allow for the fault which occurred.
*/
- if (!(vma->vm_flags & vm_flags))
- fault = VM_FAULT_BADACCESS;
- else
- fault = handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
+ if (!(vma->vm_flags & vm_flags)) {
+ mmap_read_unlock(mm);
+ fault = 0;
+ code = SEGV_ACCERR;
+ goto bad_area;
+ }
+
+ fault = handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
/* If we need to retry but a fatal signal is pending, handle the
* signal first. We do not need to release the mmap_lock because
@@ -356,12 +390,11 @@ retry:
mmap_read_unlock(mm);
done:
- /*
- * Handle the "normal" case first - VM_FAULT_MAJOR
- */
- if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
+ /* Handle the "normal" case first */
+ if (likely(!(fault & VM_FAULT_ERROR)))
return 0;
+ code = SEGV_MAPERR;
bad_area:
/*
* If we are in kernel mode at this point, we
@@ -393,8 +426,6 @@ bad_area:
* isn't in our memory map..
*/
sig = SIGSEGV;
- code = fault == VM_FAULT_BADACCESS ?
- SEGV_ACCERR : SEGV_MAPERR;
}
__do_user_fault(addr, fsr, sig, code, regs);
diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c
deleted file mode 100644
index dd7a0277c5c0..000000000000
--- a/arch/arm/mm/hugetlbpage.c
+++ /dev/null
@@ -1,34 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * arch/arm/mm/hugetlbpage.c
- *
- * Copyright (C) 2012 ARM Ltd.
- *
- * Based on arch/x86/include/asm/hugetlb.h and Bill Carson's patches
- */
-
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/hugetlb.h>
-#include <linux/pagemap.h>
-#include <linux/err.h>
-#include <linux/sysctl.h>
-#include <asm/mman.h>
-#include <asm/tlb.h>
-#include <asm/tlbflush.h>
-
-/*
- * On ARM, huge pages are backed by pmd's rather than pte's, so we do a lot
- * of type casting from pmd_t * to pte_t *.
- */
-
-int pud_huge(pud_t pud)
-{
- return 0;
-}
-
-int pmd_huge(pmd_t pmd)
-{
- return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
-}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index e8c6f4be0ce1..5345d218899a 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -22,6 +22,7 @@
#include <linux/sizes.h>
#include <linux/stop_machine.h>
#include <linux/swiotlb.h>
+#include <linux/execmem.h>
#include <asm/cp15.h>
#include <asm/mach-types.h>
@@ -486,3 +487,47 @@ void free_initrd_mem(unsigned long start, unsigned long end)
free_reserved_area((void *)start, (void *)end, -1, "initrd");
}
#endif
+
+#ifdef CONFIG_EXECMEM
+
+#ifdef CONFIG_XIP_KERNEL
+/*
+ * The XIP kernel text is mapped in the module area for modules and
+ * some other stuff to work without any indirect relocations.
+ * MODULES_VADDR is redefined here and not in asm/memory.h to avoid
+ * recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off.
+ */
+#undef MODULES_VADDR
+#define MODULES_VADDR (((unsigned long)_exiprom + ~PMD_MASK) & PMD_MASK)
+#endif
+
+#ifdef CONFIG_MMU
+static struct execmem_info execmem_info __ro_after_init;
+
+struct execmem_info __init *execmem_arch_setup(void)
+{
+ unsigned long fallback_start = 0, fallback_end = 0;
+
+ if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS)) {
+ fallback_start = VMALLOC_START;
+ fallback_end = VMALLOC_END;
+ }
+
+ execmem_info = (struct execmem_info){
+ .ranges = {
+ [EXECMEM_DEFAULT] = {
+ .start = MODULES_VADDR,
+ .end = MODULES_END,
+ .pgprot = PAGE_KERNEL_EXEC,
+ .alignment = 1,
+ .fallback_start = fallback_start,
+ .fallback_end = fallback_end,
+ },
+ },
+ };
+
+ return &execmem_info;
+}
+#endif /* CONFIG_MMU */
+
+#endif /* CONFIG_EXECMEM */
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index a0f8a0ca0788..d65d0e6ed10a 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -34,7 +34,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct vm_area_struct *vma;
int do_align = 0;
int aliasing = cache_is_vipt_aliasing();
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
/*
* We only need to do colour alignment if either the I or D
@@ -68,7 +68,6 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
return addr;
}
- info.flags = 0;
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
@@ -87,7 +86,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
unsigned long addr = addr0;
int do_align = 0;
int aliasing = cache_is_vipt_aliasing();
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
/*
* We only need to do colour alignment if either the I or D
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index c24e29c0b9a4..3f774856ca67 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1687,9 +1687,8 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
*/
cr = get_cr();
set_cr(cr & ~(CR_I | CR_C));
- asm("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr));
- asm volatile("mcr p15, 0, %0, c2, c0, 2"
- : : "r" (ttbcr & ~(3 << 8 | 3 << 10)));
+ ttbcr = cpu_get_ttbcr();
+ cpu_set_ttbcr(ttbcr & ~(3 << 8 | 3 << 10));
flush_cache_all();
/*
@@ -1701,7 +1700,7 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
lpae_pgtables_remap(offset, pa_pgd);
/* Re-enable the caches and cacheable TLB walks */
- asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr));
+ cpu_set_ttbcr(ttbcr);
set_cr(cr);
}
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 6837cf7a4812..d0ce3414a13e 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -11,6 +11,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
@@ -56,18 +57,20 @@
/*
* cpu_arm1020_proc_init()
*/
-ENTRY(cpu_arm1020_proc_init)
+SYM_TYPED_FUNC_START(cpu_arm1020_proc_init)
ret lr
+SYM_FUNC_END(cpu_arm1020_proc_init)
/*
* cpu_arm1020_proc_fin()
*/
-ENTRY(cpu_arm1020_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm1020_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_arm1020_proc_fin)
/*
* cpu_arm1020_reset(loc)
@@ -80,7 +83,7 @@ ENTRY(cpu_arm1020_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm1020_reset)
+SYM_TYPED_FUNC_START(cpu_arm1020_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
@@ -92,16 +95,17 @@ ENTRY(cpu_arm1020_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_arm1020_reset)
+SYM_FUNC_END(cpu_arm1020_reset)
.popsection
/*
* cpu_arm1020_do_idle()
*/
.align 5
-ENTRY(cpu_arm1020_do_idle)
+SYM_TYPED_FUNC_START(cpu_arm1020_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr
+SYM_FUNC_END(cpu_arm1020_do_idle)
/* ================================= CACHE ================================ */
@@ -112,13 +116,13 @@ ENTRY(cpu_arm1020_do_idle)
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(arm1020_flush_icache_all)
+SYM_TYPED_FUNC_START(arm1020_flush_icache_all)
#ifndef CONFIG_CPU_ICACHE_DISABLE
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
#endif
ret lr
-ENDPROC(arm1020_flush_icache_all)
+SYM_FUNC_END(arm1020_flush_icache_all)
/*
* flush_user_cache_all()
@@ -126,14 +130,14 @@ ENDPROC(arm1020_flush_icache_all)
* Invalidate all cache entries in a particular address
* space.
*/
-ENTRY(arm1020_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(arm1020_flush_user_cache_all, arm1020_flush_kern_cache_all)
+
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(arm1020_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(arm1020_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -154,6 +158,7 @@ __flush_whole_cache:
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1020_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -165,7 +170,7 @@ __flush_whole_cache:
* - end - end address (exclusive)
* - flags - vm_flags for this space
*/
-ENTRY(arm1020_flush_user_cache_range)
+SYM_TYPED_FUNC_START(arm1020_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
@@ -185,6 +190,7 @@ ENTRY(arm1020_flush_user_cache_range)
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1020_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -196,8 +202,11 @@ ENTRY(arm1020_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm1020_coherent_kern_range)
- /* FALLTRHOUGH */
+SYM_TYPED_FUNC_START(arm1020_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b arm1020_coherent_user_range
+#endif
+SYM_FUNC_END(arm1020_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -209,7 +218,7 @@ ENTRY(arm1020_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm1020_coherent_user_range)
+SYM_TYPED_FUNC_START(arm1020_coherent_user_range)
mov ip, #0
bic r0, r0, #CACHE_DLINESIZE - 1
mcr p15, 0, ip, c7, c10, 4
@@ -227,6 +236,7 @@ ENTRY(arm1020_coherent_user_range)
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
+SYM_FUNC_END(arm1020_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -237,7 +247,7 @@ ENTRY(arm1020_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(arm1020_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(arm1020_flush_kern_dcache_area)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
add r1, r0, r1
@@ -249,6 +259,7 @@ ENTRY(arm1020_flush_kern_dcache_area)
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1020_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -314,7 +325,7 @@ arm1020_dma_clean_range:
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm1020_dma_flush_range)
+SYM_TYPED_FUNC_START(arm1020_dma_flush_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
@@ -327,6 +338,7 @@ ENTRY(arm1020_dma_flush_range)
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1020_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -334,13 +346,13 @@ ENTRY(arm1020_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm1020_dma_map_area)
+SYM_TYPED_FUNC_START(arm1020_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq arm1020_dma_clean_range
bcs arm1020_dma_inv_range
b arm1020_dma_flush_range
-ENDPROC(arm1020_dma_map_area)
+SYM_FUNC_END(arm1020_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -348,18 +360,12 @@ ENDPROC(arm1020_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm1020_dma_unmap_area)
+SYM_TYPED_FUNC_START(arm1020_dma_unmap_area)
ret lr
-ENDPROC(arm1020_dma_unmap_area)
-
- .globl arm1020_flush_kern_cache_louis
- .equ arm1020_flush_kern_cache_louis, arm1020_flush_kern_cache_all
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions arm1020
+SYM_FUNC_END(arm1020_dma_unmap_area)
.align 5
-ENTRY(cpu_arm1020_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_arm1020_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_DISABLE
mov ip, #0
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -369,6 +375,7 @@ ENTRY(cpu_arm1020_dcache_clean_area)
bhi 1b
#endif
ret lr
+SYM_FUNC_END(cpu_arm1020_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -380,7 +387,7 @@ ENTRY(cpu_arm1020_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_arm1020_switch_mm)
+SYM_TYPED_FUNC_START(cpu_arm1020_switch_mm)
#ifdef CONFIG_MMU
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r3, c7, c10, 4
@@ -408,14 +415,15 @@ ENTRY(cpu_arm1020_switch_mm)
mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
#endif /* CONFIG_MMU */
ret lr
-
+SYM_FUNC_END(cpu_arm1020_switch_mm)
+
/*
* cpu_arm1020_set_pte(ptep, pte)
*
* Set a PTE and flush it out
*/
.align 5
-ENTRY(cpu_arm1020_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_arm1020_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext
mov r0, r0
@@ -426,6 +434,7 @@ ENTRY(cpu_arm1020_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif /* CONFIG_MMU */
ret lr
+SYM_FUNC_END(cpu_arm1020_set_pte_ext)
.type __arm1020_setup, #function
__arm1020_setup:
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index df49b10250b8..64f031bf6eff 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -11,6 +11,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
@@ -56,18 +57,20 @@
/*
* cpu_arm1020e_proc_init()
*/
-ENTRY(cpu_arm1020e_proc_init)
+SYM_TYPED_FUNC_START(cpu_arm1020e_proc_init)
ret lr
+SYM_FUNC_END(cpu_arm1020e_proc_init)
/*
* cpu_arm1020e_proc_fin()
*/
-ENTRY(cpu_arm1020e_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm1020e_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_arm1020e_proc_fin)
/*
* cpu_arm1020e_reset(loc)
@@ -80,7 +83,7 @@ ENTRY(cpu_arm1020e_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm1020e_reset)
+SYM_TYPED_FUNC_START(cpu_arm1020e_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
@@ -92,16 +95,17 @@ ENTRY(cpu_arm1020e_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_arm1020e_reset)
+SYM_FUNC_END(cpu_arm1020e_reset)
.popsection
/*
* cpu_arm1020e_do_idle()
*/
.align 5
-ENTRY(cpu_arm1020e_do_idle)
+SYM_TYPED_FUNC_START(cpu_arm1020e_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr
+SYM_FUNC_END(cpu_arm1020e_do_idle)
/* ================================= CACHE ================================ */
@@ -112,13 +116,13 @@ ENTRY(cpu_arm1020e_do_idle)
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(arm1020e_flush_icache_all)
+SYM_TYPED_FUNC_START(arm1020e_flush_icache_all)
#ifndef CONFIG_CPU_ICACHE_DISABLE
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
#endif
ret lr
-ENDPROC(arm1020e_flush_icache_all)
+SYM_FUNC_END(arm1020e_flush_icache_all)
/*
* flush_user_cache_all()
@@ -126,14 +130,14 @@ ENDPROC(arm1020e_flush_icache_all)
* Invalidate all cache entries in a particular address
* space.
*/
-ENTRY(arm1020e_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(arm1020e_flush_user_cache_all, arm1020e_flush_kern_cache_all)
+
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(arm1020e_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(arm1020e_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -153,6 +157,7 @@ __flush_whole_cache:
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1020e_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -164,7 +169,7 @@ __flush_whole_cache:
* - end - end address (exclusive)
* - flags - vm_flags for this space
*/
-ENTRY(arm1020e_flush_user_cache_range)
+SYM_TYPED_FUNC_START(arm1020e_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
@@ -182,6 +187,7 @@ ENTRY(arm1020e_flush_user_cache_range)
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1020e_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -193,8 +199,12 @@ ENTRY(arm1020e_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm1020e_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(arm1020e_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b arm1020e_coherent_user_range
+#endif
+SYM_FUNC_END(arm1020e_coherent_kern_range)
+
/*
* coherent_user_range(start, end)
*
@@ -205,7 +215,7 @@ ENTRY(arm1020e_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm1020e_coherent_user_range)
+SYM_TYPED_FUNC_START(arm1020e_coherent_user_range)
mov ip, #0
bic r0, r0, #CACHE_DLINESIZE - 1
1:
@@ -221,6 +231,7 @@ ENTRY(arm1020e_coherent_user_range)
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
+SYM_FUNC_END(arm1020e_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -231,7 +242,7 @@ ENTRY(arm1020e_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(arm1020e_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(arm1020e_flush_kern_dcache_area)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
add r1, r0, r1
@@ -242,6 +253,7 @@ ENTRY(arm1020e_flush_kern_dcache_area)
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1020e_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -302,7 +314,7 @@ arm1020e_dma_clean_range:
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm1020e_dma_flush_range)
+SYM_TYPED_FUNC_START(arm1020e_dma_flush_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
@@ -313,6 +325,7 @@ ENTRY(arm1020e_dma_flush_range)
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1020e_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -320,13 +333,13 @@ ENTRY(arm1020e_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm1020e_dma_map_area)
+SYM_TYPED_FUNC_START(arm1020e_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq arm1020e_dma_clean_range
bcs arm1020e_dma_inv_range
b arm1020e_dma_flush_range
-ENDPROC(arm1020e_dma_map_area)
+SYM_FUNC_END(arm1020e_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -334,18 +347,12 @@ ENDPROC(arm1020e_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm1020e_dma_unmap_area)
+SYM_TYPED_FUNC_START(arm1020e_dma_unmap_area)
ret lr
-ENDPROC(arm1020e_dma_unmap_area)
-
- .globl arm1020e_flush_kern_cache_louis
- .equ arm1020e_flush_kern_cache_louis, arm1020e_flush_kern_cache_all
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions arm1020e
+SYM_FUNC_END(arm1020e_dma_unmap_area)
.align 5
-ENTRY(cpu_arm1020e_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_arm1020e_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_DISABLE
mov ip, #0
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -354,6 +361,7 @@ ENTRY(cpu_arm1020e_dcache_clean_area)
bhi 1b
#endif
ret lr
+SYM_FUNC_END(cpu_arm1020e_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -365,7 +373,7 @@ ENTRY(cpu_arm1020e_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_arm1020e_switch_mm)
+SYM_TYPED_FUNC_START(cpu_arm1020e_switch_mm)
#ifdef CONFIG_MMU
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r3, c7, c10, 4
@@ -392,14 +400,15 @@ ENTRY(cpu_arm1020e_switch_mm)
mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
#endif
ret lr
-
+SYM_FUNC_END(cpu_arm1020e_switch_mm)
+
/*
* cpu_arm1020e_set_pte(ptep, pte)
*
* Set a PTE and flush it out
*/
.align 5
-ENTRY(cpu_arm1020e_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_arm1020e_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext
mov r0, r0
@@ -408,6 +417,7 @@ ENTRY(cpu_arm1020e_set_pte_ext)
#endif
#endif /* CONFIG_MMU */
ret lr
+SYM_FUNC_END(cpu_arm1020e_set_pte_ext)
.type __arm1020e_setup, #function
__arm1020e_setup:
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index e89ce467f672..42ed5ed07252 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -11,6 +11,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
@@ -56,18 +57,20 @@
/*
* cpu_arm1022_proc_init()
*/
-ENTRY(cpu_arm1022_proc_init)
+SYM_TYPED_FUNC_START(cpu_arm1022_proc_init)
ret lr
+SYM_FUNC_END(cpu_arm1022_proc_init)
/*
* cpu_arm1022_proc_fin()
*/
-ENTRY(cpu_arm1022_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm1022_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_arm1022_proc_fin)
/*
* cpu_arm1022_reset(loc)
@@ -80,7 +83,7 @@ ENTRY(cpu_arm1022_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm1022_reset)
+SYM_TYPED_FUNC_START(cpu_arm1022_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
@@ -92,16 +95,17 @@ ENTRY(cpu_arm1022_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_arm1022_reset)
+SYM_FUNC_END(cpu_arm1022_reset)
.popsection
/*
* cpu_arm1022_do_idle()
*/
.align 5
-ENTRY(cpu_arm1022_do_idle)
+SYM_TYPED_FUNC_START(cpu_arm1022_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr
+SYM_FUNC_END(cpu_arm1022_do_idle)
/* ================================= CACHE ================================ */
@@ -112,13 +116,13 @@ ENTRY(cpu_arm1022_do_idle)
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(arm1022_flush_icache_all)
+SYM_TYPED_FUNC_START(arm1022_flush_icache_all)
#ifndef CONFIG_CPU_ICACHE_DISABLE
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
#endif
ret lr
-ENDPROC(arm1022_flush_icache_all)
+SYM_FUNC_END(arm1022_flush_icache_all)
/*
* flush_user_cache_all()
@@ -126,14 +130,14 @@ ENDPROC(arm1022_flush_icache_all)
* Invalidate all cache entries in a particular address
* space.
*/
-ENTRY(arm1022_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(arm1022_flush_user_cache_all, arm1022_flush_kern_cache_all)
+
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(arm1022_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(arm1022_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -152,6 +156,7 @@ __flush_whole_cache:
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1022_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -163,7 +168,7 @@ __flush_whole_cache:
* - end - end address (exclusive)
* - flags - vm_flags for this space
*/
-ENTRY(arm1022_flush_user_cache_range)
+SYM_TYPED_FUNC_START(arm1022_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
@@ -181,6 +186,7 @@ ENTRY(arm1022_flush_user_cache_range)
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1022_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -192,8 +198,11 @@ ENTRY(arm1022_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm1022_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(arm1022_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b arm1022_coherent_user_range
+#endif
+SYM_FUNC_END(arm1022_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -205,7 +214,7 @@ ENTRY(arm1022_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm1022_coherent_user_range)
+SYM_TYPED_FUNC_START(arm1022_coherent_user_range)
mov ip, #0
bic r0, r0, #CACHE_DLINESIZE - 1
1:
@@ -221,6 +230,7 @@ ENTRY(arm1022_coherent_user_range)
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
+SYM_FUNC_END(arm1022_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -231,7 +241,7 @@ ENTRY(arm1022_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(arm1022_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(arm1022_flush_kern_dcache_area)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
add r1, r0, r1
@@ -242,6 +252,7 @@ ENTRY(arm1022_flush_kern_dcache_area)
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1022_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -302,7 +313,7 @@ arm1022_dma_clean_range:
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm1022_dma_flush_range)
+SYM_TYPED_FUNC_START(arm1022_dma_flush_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
@@ -313,6 +324,7 @@ ENTRY(arm1022_dma_flush_range)
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1022_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -320,13 +332,13 @@ ENTRY(arm1022_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm1022_dma_map_area)
+SYM_TYPED_FUNC_START(arm1022_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq arm1022_dma_clean_range
bcs arm1022_dma_inv_range
b arm1022_dma_flush_range
-ENDPROC(arm1022_dma_map_area)
+SYM_FUNC_END(arm1022_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -334,18 +346,12 @@ ENDPROC(arm1022_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm1022_dma_unmap_area)
+SYM_TYPED_FUNC_START(arm1022_dma_unmap_area)
ret lr
-ENDPROC(arm1022_dma_unmap_area)
-
- .globl arm1022_flush_kern_cache_louis
- .equ arm1022_flush_kern_cache_louis, arm1022_flush_kern_cache_all
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions arm1022
+SYM_FUNC_END(arm1022_dma_unmap_area)
.align 5
-ENTRY(cpu_arm1022_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_arm1022_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_DISABLE
mov ip, #0
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -354,6 +360,7 @@ ENTRY(cpu_arm1022_dcache_clean_area)
bhi 1b
#endif
ret lr
+SYM_FUNC_END(cpu_arm1022_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -365,7 +372,7 @@ ENTRY(cpu_arm1022_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_arm1022_switch_mm)
+SYM_TYPED_FUNC_START(cpu_arm1022_switch_mm)
#ifdef CONFIG_MMU
#ifndef CONFIG_CPU_DCACHE_DISABLE
mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments
@@ -385,14 +392,15 @@ ENTRY(cpu_arm1022_switch_mm)
mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
#endif
ret lr
-
+SYM_FUNC_END(cpu_arm1022_switch_mm)
+
/*
* cpu_arm1022_set_pte_ext(ptep, pte, ext)
*
* Set a PTE and flush it out
*/
.align 5
-ENTRY(cpu_arm1022_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_arm1022_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext
mov r0, r0
@@ -401,6 +409,7 @@ ENTRY(cpu_arm1022_set_pte_ext)
#endif
#endif /* CONFIG_MMU */
ret lr
+SYM_FUNC_END(cpu_arm1022_set_pte_ext)
.type __arm1022_setup, #function
__arm1022_setup:
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 7fdd1a205e8e..b3ae62cd553a 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -11,6 +11,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
@@ -56,18 +57,20 @@
/*
* cpu_arm1026_proc_init()
*/
-ENTRY(cpu_arm1026_proc_init)
+SYM_TYPED_FUNC_START(cpu_arm1026_proc_init)
ret lr
+SYM_FUNC_END(cpu_arm1026_proc_init)
/*
* cpu_arm1026_proc_fin()
*/
-ENTRY(cpu_arm1026_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm1026_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_arm1026_proc_fin)
/*
* cpu_arm1026_reset(loc)
@@ -80,7 +83,7 @@ ENTRY(cpu_arm1026_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm1026_reset)
+SYM_TYPED_FUNC_START(cpu_arm1026_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
@@ -92,16 +95,17 @@ ENTRY(cpu_arm1026_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_arm1026_reset)
+SYM_FUNC_END(cpu_arm1026_reset)
.popsection
/*
* cpu_arm1026_do_idle()
*/
.align 5
-ENTRY(cpu_arm1026_do_idle)
+SYM_TYPED_FUNC_START(cpu_arm1026_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr
+SYM_FUNC_END(cpu_arm1026_do_idle)
/* ================================= CACHE ================================ */
@@ -112,13 +116,13 @@ ENTRY(cpu_arm1026_do_idle)
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(arm1026_flush_icache_all)
+SYM_TYPED_FUNC_START(arm1026_flush_icache_all)
#ifndef CONFIG_CPU_ICACHE_DISABLE
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
#endif
ret lr
-ENDPROC(arm1026_flush_icache_all)
+SYM_FUNC_END(arm1026_flush_icache_all)
/*
* flush_user_cache_all()
@@ -126,14 +130,14 @@ ENDPROC(arm1026_flush_icache_all)
* Invalidate all cache entries in a particular address
* space.
*/
-ENTRY(arm1026_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(arm1026_flush_user_cache_all, arm1026_flush_kern_cache_all)
+
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(arm1026_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(arm1026_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -147,6 +151,7 @@ __flush_whole_cache:
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1026_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -158,7 +163,7 @@ __flush_whole_cache:
* - end - end address (exclusive)
* - flags - vm_flags for this space
*/
-ENTRY(arm1026_flush_user_cache_range)
+SYM_TYPED_FUNC_START(arm1026_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
@@ -176,6 +181,7 @@ ENTRY(arm1026_flush_user_cache_range)
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1026_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -187,8 +193,12 @@ ENTRY(arm1026_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm1026_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(arm1026_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b arm1026_coherent_user_range
+#endif
+SYM_FUNC_END(arm1026_coherent_kern_range)
+
/*
* coherent_user_range(start, end)
*
@@ -199,7 +209,7 @@ ENTRY(arm1026_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm1026_coherent_user_range)
+SYM_TYPED_FUNC_START(arm1026_coherent_user_range)
mov ip, #0
bic r0, r0, #CACHE_DLINESIZE - 1
1:
@@ -215,6 +225,7 @@ ENTRY(arm1026_coherent_user_range)
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
+SYM_FUNC_END(arm1026_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -225,7 +236,7 @@ ENTRY(arm1026_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(arm1026_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(arm1026_flush_kern_dcache_area)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
add r1, r0, r1
@@ -236,6 +247,7 @@ ENTRY(arm1026_flush_kern_dcache_area)
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1026_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -296,7 +308,7 @@ arm1026_dma_clean_range:
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm1026_dma_flush_range)
+SYM_TYPED_FUNC_START(arm1026_dma_flush_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
@@ -307,6 +319,7 @@ ENTRY(arm1026_dma_flush_range)
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1026_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -314,13 +327,13 @@ ENTRY(arm1026_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm1026_dma_map_area)
+SYM_TYPED_FUNC_START(arm1026_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq arm1026_dma_clean_range
bcs arm1026_dma_inv_range
b arm1026_dma_flush_range
-ENDPROC(arm1026_dma_map_area)
+SYM_FUNC_END(arm1026_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -328,18 +341,12 @@ ENDPROC(arm1026_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm1026_dma_unmap_area)
+SYM_TYPED_FUNC_START(arm1026_dma_unmap_area)
ret lr
-ENDPROC(arm1026_dma_unmap_area)
-
- .globl arm1026_flush_kern_cache_louis
- .equ arm1026_flush_kern_cache_louis, arm1026_flush_kern_cache_all
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions arm1026
+SYM_FUNC_END(arm1026_dma_unmap_area)
.align 5
-ENTRY(cpu_arm1026_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_arm1026_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_DISABLE
mov ip, #0
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -348,6 +355,7 @@ ENTRY(cpu_arm1026_dcache_clean_area)
bhi 1b
#endif
ret lr
+SYM_FUNC_END(cpu_arm1026_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -359,7 +367,7 @@ ENTRY(cpu_arm1026_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_arm1026_switch_mm)
+SYM_TYPED_FUNC_START(cpu_arm1026_switch_mm)
#ifdef CONFIG_MMU
mov r1, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
@@ -374,14 +382,15 @@ ENTRY(cpu_arm1026_switch_mm)
mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
#endif
ret lr
-
+SYM_FUNC_END(cpu_arm1026_switch_mm)
+
/*
* cpu_arm1026_set_pte_ext(ptep, pte, ext)
*
* Set a PTE and flush it out
*/
.align 5
-ENTRY(cpu_arm1026_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_arm1026_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext
mov r0, r0
@@ -390,6 +399,7 @@ ENTRY(cpu_arm1026_set_pte_ext)
#endif
#endif /* CONFIG_MMU */
ret lr
+SYM_FUNC_END(cpu_arm1026_set_pte_ext)
.type __arm1026_setup, #function
__arm1026_setup:
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index 3b687e6dd9fd..59732c334e1d 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -20,6 +20,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
@@ -35,24 +36,30 @@
*
* Notes : This processor does not require these
*/
-ENTRY(cpu_arm720_dcache_clean_area)
-ENTRY(cpu_arm720_proc_init)
+SYM_TYPED_FUNC_START(cpu_arm720_dcache_clean_area)
ret lr
+SYM_FUNC_END(cpu_arm720_dcache_clean_area)
-ENTRY(cpu_arm720_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm720_proc_init)
+ ret lr
+SYM_FUNC_END(cpu_arm720_proc_init)
+
+SYM_TYPED_FUNC_START(cpu_arm720_proc_fin)
mrc p15, 0, r0, c1, c0, 0
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_arm720_proc_fin)
/*
* Function: arm720_proc_do_idle(void)
* Params : r0 = unused
* Purpose : put the processor in proper idle mode
*/
-ENTRY(cpu_arm720_do_idle)
+SYM_TYPED_FUNC_START(cpu_arm720_do_idle)
ret lr
+SYM_FUNC_END(cpu_arm720_do_idle)
/*
* Function: arm720_switch_mm(unsigned long pgd_phys)
@@ -60,7 +67,7 @@ ENTRY(cpu_arm720_do_idle)
* Purpose : Perform a task switch, saving the old process' state and restoring
* the new.
*/
-ENTRY(cpu_arm720_switch_mm)
+SYM_TYPED_FUNC_START(cpu_arm720_switch_mm)
#ifdef CONFIG_MMU
mov r1, #0
mcr p15, 0, r1, c7, c7, 0 @ invalidate cache
@@ -68,6 +75,7 @@ ENTRY(cpu_arm720_switch_mm)
mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4)
#endif
ret lr
+SYM_FUNC_END(cpu_arm720_switch_mm)
/*
* Function: arm720_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext)
@@ -76,11 +84,12 @@ ENTRY(cpu_arm720_switch_mm)
* Purpose : Set a PTE and flush it out of any WB cache
*/
.align 5
-ENTRY(cpu_arm720_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_arm720_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext wc_disable=0
#endif
ret lr
+SYM_FUNC_END(cpu_arm720_set_pte_ext)
/*
* Function: arm720_reset
@@ -88,7 +97,7 @@ ENTRY(cpu_arm720_set_pte_ext)
* Notes : This sets up everything for a reset
*/
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm720_reset)
+SYM_TYPED_FUNC_START(cpu_arm720_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate cache
#ifdef CONFIG_MMU
@@ -99,7 +108,7 @@ ENTRY(cpu_arm720_reset)
bic ip, ip, #0x2100 @ ..v....s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_arm720_reset)
+SYM_FUNC_END(cpu_arm720_reset)
.popsection
.type __arm710_setup, #function
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index f2ec3bc60874..78854df63964 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
@@ -24,21 +25,32 @@
*
* These are not required.
*/
-ENTRY(cpu_arm740_proc_init)
-ENTRY(cpu_arm740_do_idle)
-ENTRY(cpu_arm740_dcache_clean_area)
-ENTRY(cpu_arm740_switch_mm)
+SYM_TYPED_FUNC_START(cpu_arm740_proc_init)
ret lr
+SYM_FUNC_END(cpu_arm740_proc_init)
+
+SYM_TYPED_FUNC_START(cpu_arm740_do_idle)
+ ret lr
+SYM_FUNC_END(cpu_arm740_do_idle)
+
+SYM_TYPED_FUNC_START(cpu_arm740_dcache_clean_area)
+ ret lr
+SYM_FUNC_END(cpu_arm740_dcache_clean_area)
+
+SYM_TYPED_FUNC_START(cpu_arm740_switch_mm)
+ ret lr
+SYM_FUNC_END(cpu_arm740_switch_mm)
/*
* cpu_arm740_proc_fin()
*/
-ENTRY(cpu_arm740_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm740_proc_fin)
mrc p15, 0, r0, c1, c0, 0
bic r0, r0, #0x3f000000 @ bank/f/lock/s
bic r0, r0, #0x0000000c @ w-buffer/cache
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_arm740_proc_fin)
/*
* cpu_arm740_reset(loc)
@@ -46,14 +58,14 @@ ENTRY(cpu_arm740_proc_fin)
* Notes : This sets up everything for a reset
*/
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm740_reset)
+SYM_TYPED_FUNC_START(cpu_arm740_reset)
mov ip, #0
mcr p15, 0, ip, c7, c0, 0 @ invalidate cache
mrc p15, 0, ip, c1, c0, 0 @ get ctrl register
bic ip, ip, #0x0000000c @ ............wc..
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_arm740_reset)
+SYM_FUNC_END(cpu_arm740_reset)
.popsection
.type __arm740_setup, #function
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index 01bbe7576c1c..baa3d4472147 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
@@ -23,18 +24,29 @@
* cpu_arm7tdmi_switch_mm()
*
* These are not required.
- */
-ENTRY(cpu_arm7tdmi_proc_init)
-ENTRY(cpu_arm7tdmi_do_idle)
-ENTRY(cpu_arm7tdmi_dcache_clean_area)
-ENTRY(cpu_arm7tdmi_switch_mm)
- ret lr
+*/
+SYM_TYPED_FUNC_START(cpu_arm7tdmi_proc_init)
+ ret lr
+SYM_FUNC_END(cpu_arm7tdmi_proc_init)
+
+SYM_TYPED_FUNC_START(cpu_arm7tdmi_do_idle)
+ ret lr
+SYM_FUNC_END(cpu_arm7tdmi_do_idle)
+
+SYM_TYPED_FUNC_START(cpu_arm7tdmi_dcache_clean_area)
+ ret lr
+SYM_FUNC_END(cpu_arm7tdmi_dcache_clean_area)
+
+SYM_TYPED_FUNC_START(cpu_arm7tdmi_switch_mm)
+ ret lr
+SYM_FUNC_END(cpu_arm7tdmi_switch_mm)
/*
* cpu_arm7tdmi_proc_fin()
- */
-ENTRY(cpu_arm7tdmi_proc_fin)
- ret lr
+*/
+SYM_TYPED_FUNC_START(cpu_arm7tdmi_proc_fin)
+ ret lr
+SYM_FUNC_END(cpu_arm7tdmi_proc_fin)
/*
* Function: cpu_arm7tdmi_reset(loc)
@@ -42,9 +54,9 @@ ENTRY(cpu_arm7tdmi_proc_fin)
* Purpose : Sets up everything for a reset and jump to the location for soft reset.
*/
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm7tdmi_reset)
+SYM_TYPED_FUNC_START(cpu_arm7tdmi_reset)
ret r0
-ENDPROC(cpu_arm7tdmi_reset)
+SYM_FUNC_END(cpu_arm7tdmi_reset)
.popsection
.type __arm7tdmi_setup, #function
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index a234cd8ba5e6..a30df54ad5fa 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -13,6 +13,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
@@ -48,18 +49,20 @@
/*
* cpu_arm920_proc_init()
*/
-ENTRY(cpu_arm920_proc_init)
+SYM_TYPED_FUNC_START(cpu_arm920_proc_init)
ret lr
+SYM_FUNC_END(cpu_arm920_proc_init)
/*
* cpu_arm920_proc_fin()
*/
-ENTRY(cpu_arm920_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm920_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_arm920_proc_fin)
/*
* cpu_arm920_reset(loc)
@@ -72,7 +75,7 @@ ENTRY(cpu_arm920_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm920_reset)
+SYM_TYPED_FUNC_START(cpu_arm920_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
@@ -84,17 +87,17 @@ ENTRY(cpu_arm920_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_arm920_reset)
+SYM_FUNC_END(cpu_arm920_reset)
.popsection
/*
* cpu_arm920_do_idle()
*/
.align 5
-ENTRY(cpu_arm920_do_idle)
+SYM_TYPED_FUNC_START(cpu_arm920_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr
-
+SYM_FUNC_END(cpu_arm920_do_idle)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -103,11 +106,11 @@ ENTRY(cpu_arm920_do_idle)
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(arm920_flush_icache_all)
+SYM_TYPED_FUNC_START(arm920_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(arm920_flush_icache_all)
+SYM_FUNC_END(arm920_flush_icache_all)
/*
* flush_user_cache_all()
@@ -115,15 +118,14 @@ ENDPROC(arm920_flush_icache_all)
* Invalidate all cache entries in a particular address
* space.
*/
-ENTRY(arm920_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(arm920_flush_user_cache_all, arm920_flush_kern_cache_all)
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(arm920_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(arm920_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -138,6 +140,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm920_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -149,7 +152,7 @@ __flush_whole_cache:
* - end - end address (exclusive)
* - flags - vm_flags for address space
*/
-ENTRY(arm920_flush_user_cache_range)
+SYM_TYPED_FUNC_START(arm920_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
@@ -164,6 +167,7 @@ ENTRY(arm920_flush_user_cache_range)
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm920_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -175,8 +179,11 @@ ENTRY(arm920_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm920_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(arm920_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b arm920_coherent_user_range
+#endif
+SYM_FUNC_END(arm920_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -188,7 +195,7 @@ ENTRY(arm920_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm920_coherent_user_range)
+SYM_TYPED_FUNC_START(arm920_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
@@ -198,6 +205,7 @@ ENTRY(arm920_coherent_user_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
+SYM_FUNC_END(arm920_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -208,7 +216,7 @@ ENTRY(arm920_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(arm920_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(arm920_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
@@ -218,6 +226,7 @@ ENTRY(arm920_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm920_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -272,7 +281,7 @@ arm920_dma_clean_range:
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm920_dma_flush_range)
+SYM_TYPED_FUNC_START(arm920_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
@@ -280,6 +289,7 @@ ENTRY(arm920_dma_flush_range)
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm920_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -287,13 +297,13 @@ ENTRY(arm920_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm920_dma_map_area)
+SYM_TYPED_FUNC_START(arm920_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq arm920_dma_clean_range
bcs arm920_dma_inv_range
b arm920_dma_flush_range
-ENDPROC(arm920_dma_map_area)
+SYM_FUNC_END(arm920_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -301,24 +311,20 @@ ENDPROC(arm920_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm920_dma_unmap_area)
+SYM_TYPED_FUNC_START(arm920_dma_unmap_area)
ret lr
-ENDPROC(arm920_dma_unmap_area)
+SYM_FUNC_END(arm920_dma_unmap_area)
- .globl arm920_flush_kern_cache_louis
- .equ arm920_flush_kern_cache_louis, arm920_flush_kern_cache_all
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions arm920
-#endif
+#endif /* !CONFIG_CPU_DCACHE_WRITETHROUGH */
-ENTRY(cpu_arm920_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_arm920_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
ret lr
+SYM_FUNC_END(cpu_arm920_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -330,7 +336,7 @@ ENTRY(cpu_arm920_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_arm920_switch_mm)
+SYM_TYPED_FUNC_START(cpu_arm920_switch_mm)
#ifdef CONFIG_MMU
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -354,6 +360,7 @@ ENTRY(cpu_arm920_switch_mm)
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
#endif
ret lr
+SYM_FUNC_END(cpu_arm920_switch_mm)
/*
* cpu_arm920_set_pte(ptep, pte, ext)
@@ -361,7 +368,7 @@ ENTRY(cpu_arm920_switch_mm)
* Set a PTE and flush it out
*/
.align 5
-ENTRY(cpu_arm920_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_arm920_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext
mov r0, r0
@@ -369,21 +376,22 @@ ENTRY(cpu_arm920_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif
ret lr
+SYM_FUNC_END(cpu_arm920_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm920_suspend_size
.equ cpu_arm920_suspend_size, 4 * 3
#ifdef CONFIG_ARM_CPU_SUSPEND
-ENTRY(cpu_arm920_do_suspend)
+SYM_TYPED_FUNC_START(cpu_arm920_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
mrc p15, 0, r5, c3, c0, 0 @ Domain ID
mrc p15, 0, r6, c1, c0, 0 @ Control register
stmia r0, {r4 - r6}
ldmfd sp!, {r4 - r6, pc}
-ENDPROC(cpu_arm920_do_suspend)
+SYM_FUNC_END(cpu_arm920_do_suspend)
-ENTRY(cpu_arm920_do_resume)
+SYM_TYPED_FUNC_START(cpu_arm920_do_resume)
mov ip, #0
mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs
mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches
@@ -393,7 +401,7 @@ ENTRY(cpu_arm920_do_resume)
mcr p15, 0, r1, c2, c0, 0 @ TTB address
mov r0, r6 @ control register
b cpu_resume_mmu
-ENDPROC(cpu_arm920_do_resume)
+SYM_FUNC_END(cpu_arm920_do_resume)
#endif
.type __arm920_setup, #function
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 53c029dcfd83..aac4e048100d 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -14,6 +14,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
@@ -50,18 +51,20 @@
/*
* cpu_arm922_proc_init()
*/
-ENTRY(cpu_arm922_proc_init)
+SYM_TYPED_FUNC_START(cpu_arm922_proc_init)
ret lr
+SYM_FUNC_END(cpu_arm922_proc_init)
/*
* cpu_arm922_proc_fin()
*/
-ENTRY(cpu_arm922_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm922_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_arm922_proc_fin)
/*
* cpu_arm922_reset(loc)
@@ -74,7 +77,7 @@ ENTRY(cpu_arm922_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm922_reset)
+SYM_TYPED_FUNC_START(cpu_arm922_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
@@ -86,17 +89,17 @@ ENTRY(cpu_arm922_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_arm922_reset)
+SYM_FUNC_END(cpu_arm922_reset)
.popsection
/*
* cpu_arm922_do_idle()
*/
.align 5
-ENTRY(cpu_arm922_do_idle)
+SYM_TYPED_FUNC_START(cpu_arm922_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr
-
+SYM_FUNC_END(cpu_arm922_do_idle)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -105,11 +108,11 @@ ENTRY(cpu_arm922_do_idle)
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(arm922_flush_icache_all)
+SYM_TYPED_FUNC_START(arm922_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(arm922_flush_icache_all)
+SYM_FUNC_END(arm922_flush_icache_all)
/*
* flush_user_cache_all()
@@ -117,15 +120,14 @@ ENDPROC(arm922_flush_icache_all)
* Clean and invalidate all cache entries in a particular
* address space.
*/
-ENTRY(arm922_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(arm922_flush_user_cache_all, arm922_flush_kern_cache_all)
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(arm922_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(arm922_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -140,6 +142,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm922_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -151,7 +154,7 @@ __flush_whole_cache:
* - end - end address (exclusive)
* - flags - vm_flags describing address space
*/
-ENTRY(arm922_flush_user_cache_range)
+SYM_TYPED_FUNC_START(arm922_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
@@ -166,6 +169,7 @@ ENTRY(arm922_flush_user_cache_range)
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm922_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -177,8 +181,11 @@ ENTRY(arm922_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm922_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(arm922_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b arm922_coherent_user_range
+#endif
+SYM_FUNC_END(arm922_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -190,7 +197,7 @@ ENTRY(arm922_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm922_coherent_user_range)
+SYM_TYPED_FUNC_START(arm922_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
@@ -200,6 +207,7 @@ ENTRY(arm922_coherent_user_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
+SYM_FUNC_END(arm922_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -210,7 +218,7 @@ ENTRY(arm922_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(arm922_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(arm922_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
@@ -220,6 +228,7 @@ ENTRY(arm922_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm922_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -274,7 +283,7 @@ arm922_dma_clean_range:
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm922_dma_flush_range)
+SYM_TYPED_FUNC_START(arm922_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
@@ -282,6 +291,7 @@ ENTRY(arm922_dma_flush_range)
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm922_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -289,13 +299,13 @@ ENTRY(arm922_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm922_dma_map_area)
+SYM_TYPED_FUNC_START(arm922_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq arm922_dma_clean_range
bcs arm922_dma_inv_range
b arm922_dma_flush_range
-ENDPROC(arm922_dma_map_area)
+SYM_FUNC_END(arm922_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -303,19 +313,13 @@ ENDPROC(arm922_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm922_dma_unmap_area)
+SYM_TYPED_FUNC_START(arm922_dma_unmap_area)
ret lr
-ENDPROC(arm922_dma_unmap_area)
-
- .globl arm922_flush_kern_cache_louis
- .equ arm922_flush_kern_cache_louis, arm922_flush_kern_cache_all
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions arm922
-#endif
+SYM_FUNC_END(arm922_dma_unmap_area)
+#endif /* !CONFIG_CPU_DCACHE_WRITETHROUGH */
-ENTRY(cpu_arm922_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_arm922_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -323,6 +327,7 @@ ENTRY(cpu_arm922_dcache_clean_area)
bhi 1b
#endif
ret lr
+SYM_FUNC_END(cpu_arm922_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -334,7 +339,7 @@ ENTRY(cpu_arm922_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_arm922_switch_mm)
+SYM_TYPED_FUNC_START(cpu_arm922_switch_mm)
#ifdef CONFIG_MMU
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -358,6 +363,7 @@ ENTRY(cpu_arm922_switch_mm)
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
#endif
ret lr
+SYM_FUNC_END(cpu_arm922_switch_mm)
/*
* cpu_arm922_set_pte_ext(ptep, pte, ext)
@@ -365,7 +371,7 @@ ENTRY(cpu_arm922_switch_mm)
* Set a PTE and flush it out
*/
.align 5
-ENTRY(cpu_arm922_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_arm922_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext
mov r0, r0
@@ -373,6 +379,7 @@ ENTRY(cpu_arm922_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif /* CONFIG_MMU */
ret lr
+SYM_FUNC_END(cpu_arm922_set_pte_ext)
.type __arm922_setup, #function
__arm922_setup:
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 0bfad62ea858..035941faeb2e 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -37,6 +37,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
@@ -71,18 +72,20 @@
/*
* cpu_arm925_proc_init()
*/
-ENTRY(cpu_arm925_proc_init)
+SYM_TYPED_FUNC_START(cpu_arm925_proc_init)
ret lr
+SYM_FUNC_END(cpu_arm925_proc_init)
/*
* cpu_arm925_proc_fin()
*/
-ENTRY(cpu_arm925_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm925_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_arm925_proc_fin)
/*
* cpu_arm925_reset(loc)
@@ -95,14 +98,14 @@ ENTRY(cpu_arm925_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm925_reset)
+SYM_TYPED_FUNC_START(cpu_arm925_reset)
/* Send software reset to MPU and DSP */
mov ip, #0xff000000
orr ip, ip, #0x00fe0000
orr ip, ip, #0x0000ce00
mov r4, #1
strh r4, [ip, #0x10]
-ENDPROC(cpu_arm925_reset)
+SYM_FUNC_END(cpu_arm925_reset)
.popsection
mov ip, #0
@@ -123,7 +126,7 @@ ENDPROC(cpu_arm925_reset)
* Called with IRQs disabled
*/
.align 10
-ENTRY(cpu_arm925_do_idle)
+SYM_TYPED_FUNC_START(cpu_arm925_do_idle)
mov r0, #0
mrc p15, 0, r1, c1, c0, 0 @ Read control register
mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
@@ -132,17 +135,18 @@ ENTRY(cpu_arm925_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable
ret lr
+SYM_FUNC_END(cpu_arm925_do_idle)
/*
* flush_icache_all()
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(arm925_flush_icache_all)
+SYM_TYPED_FUNC_START(arm925_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(arm925_flush_icache_all)
+SYM_FUNC_END(arm925_flush_icache_all)
/*
* flush_user_cache_all()
@@ -150,15 +154,14 @@ ENDPROC(arm925_flush_icache_all)
* Clean and invalidate all cache entries in a particular
* address space.
*/
-ENTRY(arm925_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(arm925_flush_user_cache_all, arm925_flush_kern_cache_all)
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(arm925_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(arm925_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -175,6 +178,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm925_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -186,7 +190,7 @@ __flush_whole_cache:
* - end - end address (exclusive)
* - flags - vm_flags describing address space
*/
-ENTRY(arm925_flush_user_cache_range)
+SYM_TYPED_FUNC_START(arm925_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
@@ -212,6 +216,7 @@ ENTRY(arm925_flush_user_cache_range)
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm925_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -223,8 +228,11 @@ ENTRY(arm925_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm925_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(arm925_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b arm925_coherent_user_range
+#endif
+SYM_FUNC_END(arm925_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -236,7 +244,7 @@ ENTRY(arm925_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm925_coherent_user_range)
+SYM_TYPED_FUNC_START(arm925_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
@@ -246,6 +254,7 @@ ENTRY(arm925_coherent_user_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
+SYM_FUNC_END(arm925_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -256,7 +265,7 @@ ENTRY(arm925_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(arm925_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(arm925_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
@@ -266,6 +275,7 @@ ENTRY(arm925_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm925_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -324,7 +334,7 @@ arm925_dma_clean_range:
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm925_dma_flush_range)
+SYM_TYPED_FUNC_START(arm925_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1:
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -337,6 +347,7 @@ ENTRY(arm925_dma_flush_range)
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm925_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -344,13 +355,13 @@ ENTRY(arm925_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm925_dma_map_area)
+SYM_TYPED_FUNC_START(arm925_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq arm925_dma_clean_range
bcs arm925_dma_inv_range
b arm925_dma_flush_range
-ENDPROC(arm925_dma_map_area)
+SYM_FUNC_END(arm925_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -358,17 +369,11 @@ ENDPROC(arm925_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm925_dma_unmap_area)
+SYM_TYPED_FUNC_START(arm925_dma_unmap_area)
ret lr
-ENDPROC(arm925_dma_unmap_area)
-
- .globl arm925_flush_kern_cache_louis
- .equ arm925_flush_kern_cache_louis, arm925_flush_kern_cache_all
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions arm925
+SYM_FUNC_END(arm925_dma_unmap_area)
-ENTRY(cpu_arm925_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_arm925_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -377,6 +382,7 @@ ENTRY(cpu_arm925_dcache_clean_area)
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(cpu_arm925_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -388,7 +394,7 @@ ENTRY(cpu_arm925_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_arm925_switch_mm)
+SYM_TYPED_FUNC_START(cpu_arm925_switch_mm)
#ifdef CONFIG_MMU
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -406,6 +412,7 @@ ENTRY(cpu_arm925_switch_mm)
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
#endif
ret lr
+SYM_FUNC_END(cpu_arm925_switch_mm)
/*
* cpu_arm925_set_pte_ext(ptep, pte, ext)
@@ -413,7 +420,7 @@ ENTRY(cpu_arm925_switch_mm)
* Set a PTE and flush it out
*/
.align 5
-ENTRY(cpu_arm925_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_arm925_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext
mov r0, r0
@@ -423,6 +430,7 @@ ENTRY(cpu_arm925_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif /* CONFIG_MMU */
ret lr
+SYM_FUNC_END(cpu_arm925_set_pte_ext)
.type __arm925_setup, #function
__arm925_setup:
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 0487a2c3439b..6f43d6af2d9a 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -13,6 +13,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
@@ -40,18 +41,20 @@
/*
* cpu_arm926_proc_init()
*/
-ENTRY(cpu_arm926_proc_init)
+SYM_TYPED_FUNC_START(cpu_arm926_proc_init)
ret lr
+SYM_FUNC_END(cpu_arm926_proc_init)
/*
* cpu_arm926_proc_fin()
*/
-ENTRY(cpu_arm926_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm926_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_arm926_proc_fin)
/*
* cpu_arm926_reset(loc)
@@ -64,7 +67,7 @@ ENTRY(cpu_arm926_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm926_reset)
+SYM_TYPED_FUNC_START(cpu_arm926_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
@@ -76,7 +79,7 @@ ENTRY(cpu_arm926_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_arm926_reset)
+SYM_FUNC_END(cpu_arm926_reset)
.popsection
/*
@@ -85,7 +88,7 @@ ENDPROC(cpu_arm926_reset)
* Called with IRQs disabled
*/
.align 10
-ENTRY(cpu_arm926_do_idle)
+SYM_TYPED_FUNC_START(cpu_arm926_do_idle)
mov r0, #0
mrc p15, 0, r1, c1, c0, 0 @ Read control register
mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
@@ -98,17 +101,18 @@ ENTRY(cpu_arm926_do_idle)
mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable
msr cpsr_c, r3 @ Restore FIQ state
ret lr
+SYM_FUNC_END(cpu_arm926_do_idle)
/*
* flush_icache_all()
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(arm926_flush_icache_all)
+SYM_TYPED_FUNC_START(arm926_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(arm926_flush_icache_all)
+SYM_FUNC_END(arm926_flush_icache_all)
/*
* flush_user_cache_all()
@@ -116,15 +120,14 @@ ENDPROC(arm926_flush_icache_all)
* Clean and invalidate all cache entries in a particular
* address space.
*/
-ENTRY(arm926_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(arm926_flush_user_cache_all, arm926_flush_kern_cache_all)
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(arm926_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(arm926_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -138,6 +141,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm926_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -149,7 +153,7 @@ __flush_whole_cache:
* - end - end address (exclusive)
* - flags - vm_flags describing address space
*/
-ENTRY(arm926_flush_user_cache_range)
+SYM_TYPED_FUNC_START(arm926_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
@@ -175,6 +179,7 @@ ENTRY(arm926_flush_user_cache_range)
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm926_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -186,8 +191,11 @@ ENTRY(arm926_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm926_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(arm926_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b arm926_coherent_user_range
+#endif
+SYM_FUNC_END(arm926_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -199,7 +207,7 @@ ENTRY(arm926_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm926_coherent_user_range)
+SYM_TYPED_FUNC_START(arm926_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
@@ -209,6 +217,7 @@ ENTRY(arm926_coherent_user_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
+SYM_FUNC_END(arm926_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -219,7 +228,7 @@ ENTRY(arm926_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(arm926_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(arm926_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
@@ -229,6 +238,7 @@ ENTRY(arm926_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm926_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -287,7 +297,7 @@ arm926_dma_clean_range:
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm926_dma_flush_range)
+SYM_TYPED_FUNC_START(arm926_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1:
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -300,6 +310,7 @@ ENTRY(arm926_dma_flush_range)
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm926_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -307,13 +318,13 @@ ENTRY(arm926_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm926_dma_map_area)
+SYM_TYPED_FUNC_START(arm926_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq arm926_dma_clean_range
bcs arm926_dma_inv_range
b arm926_dma_flush_range
-ENDPROC(arm926_dma_map_area)
+SYM_FUNC_END(arm926_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -321,17 +332,11 @@ ENDPROC(arm926_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm926_dma_unmap_area)
+SYM_TYPED_FUNC_START(arm926_dma_unmap_area)
ret lr
-ENDPROC(arm926_dma_unmap_area)
-
- .globl arm926_flush_kern_cache_louis
- .equ arm926_flush_kern_cache_louis, arm926_flush_kern_cache_all
+SYM_FUNC_END(arm926_dma_unmap_area)
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions arm926
-
-ENTRY(cpu_arm926_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_arm926_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -340,6 +345,7 @@ ENTRY(cpu_arm926_dcache_clean_area)
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(cpu_arm926_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -351,7 +357,8 @@ ENTRY(cpu_arm926_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_arm926_switch_mm)
+
+SYM_TYPED_FUNC_START(cpu_arm926_switch_mm)
#ifdef CONFIG_MMU
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -367,6 +374,7 @@ ENTRY(cpu_arm926_switch_mm)
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
#endif
ret lr
+SYM_FUNC_END(cpu_arm926_switch_mm)
/*
* cpu_arm926_set_pte_ext(ptep, pte, ext)
@@ -374,7 +382,7 @@ ENTRY(cpu_arm926_switch_mm)
* Set a PTE and flush it out
*/
.align 5
-ENTRY(cpu_arm926_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_arm926_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext
mov r0, r0
@@ -384,21 +392,22 @@ ENTRY(cpu_arm926_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif
ret lr
+SYM_FUNC_END(cpu_arm926_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm926_suspend_size
.equ cpu_arm926_suspend_size, 4 * 3
#ifdef CONFIG_ARM_CPU_SUSPEND
-ENTRY(cpu_arm926_do_suspend)
+SYM_TYPED_FUNC_START(cpu_arm926_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
mrc p15, 0, r5, c3, c0, 0 @ Domain ID
mrc p15, 0, r6, c1, c0, 0 @ Control register
stmia r0, {r4 - r6}
ldmfd sp!, {r4 - r6, pc}
-ENDPROC(cpu_arm926_do_suspend)
+SYM_FUNC_END(cpu_arm926_do_suspend)
-ENTRY(cpu_arm926_do_resume)
+SYM_TYPED_FUNC_START(cpu_arm926_do_resume)
mov ip, #0
mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs
mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches
@@ -408,7 +417,7 @@ ENTRY(cpu_arm926_do_resume)
mcr p15, 0, r1, c2, c0, 0 @ TTB address
mov r0, r6 @ control register
b cpu_resume_mmu
-ENDPROC(cpu_arm926_do_resume)
+SYM_FUNC_END(cpu_arm926_do_resume)
#endif
.type __arm926_setup, #function
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index cf9bfcc825ca..0d30bb25c42b 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
@@ -25,19 +26,24 @@
*
* These are not required.
*/
-ENTRY(cpu_arm940_proc_init)
-ENTRY(cpu_arm940_switch_mm)
+SYM_TYPED_FUNC_START(cpu_arm940_proc_init)
ret lr
+SYM_FUNC_END(cpu_arm940_proc_init)
+
+SYM_TYPED_FUNC_START(cpu_arm940_switch_mm)
+ ret lr
+SYM_FUNC_END(cpu_arm940_switch_mm)
/*
* cpu_arm940_proc_fin()
*/
-ENTRY(cpu_arm940_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm940_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x00001000 @ i-cache
bic r0, r0, #0x00000004 @ d-cache
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_arm940_proc_fin)
/*
* cpu_arm940_reset(loc)
@@ -45,7 +51,7 @@ ENTRY(cpu_arm940_proc_fin)
* Notes : This sets up everything for a reset
*/
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm940_reset)
+SYM_TYPED_FUNC_START(cpu_arm940_reset)
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ flush I cache
mcr p15, 0, ip, c7, c6, 0 @ flush D cache
@@ -55,42 +61,43 @@ ENTRY(cpu_arm940_reset)
bic ip, ip, #0x00001000 @ i-cache
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_arm940_reset)
+SYM_FUNC_END(cpu_arm940_reset)
.popsection
/*
* cpu_arm940_do_idle()
*/
.align 5
-ENTRY(cpu_arm940_do_idle)
+SYM_TYPED_FUNC_START(cpu_arm940_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr
+SYM_FUNC_END(cpu_arm940_do_idle)
/*
* flush_icache_all()
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(arm940_flush_icache_all)
+SYM_TYPED_FUNC_START(arm940_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(arm940_flush_icache_all)
+SYM_FUNC_END(arm940_flush_icache_all)
/*
* flush_user_cache_all()
*/
-ENTRY(arm940_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(arm940_flush_user_cache_all, arm940_flush_kern_cache_all)
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(arm940_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(arm940_flush_kern_cache_all)
mov r2, #VM_EXEC
- /* FALLTHROUGH */
+ b arm940_flush_user_cache_range
+SYM_FUNC_END(arm940_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -102,7 +109,7 @@ ENTRY(arm940_flush_kern_cache_all)
* - end - end address (exclusive)
* - flags - vm_flags describing address space
*/
-ENTRY(arm940_flush_user_cache_range)
+SYM_TYPED_FUNC_START(arm940_flush_user_cache_range)
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, ip, c7, c6, 0 @ flush D cache
@@ -119,6 +126,7 @@ ENTRY(arm940_flush_user_cache_range)
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm940_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -130,8 +138,9 @@ ENTRY(arm940_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm940_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(arm940_coherent_kern_range)
+ b arm940_flush_kern_dcache_area
+SYM_FUNC_END(arm940_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -143,8 +152,11 @@ ENTRY(arm940_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm940_coherent_user_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(arm940_coherent_user_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b arm940_flush_kern_dcache_area
+#endif
+SYM_FUNC_END(arm940_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -155,7 +167,7 @@ ENTRY(arm940_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(arm940_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(arm940_flush_kern_dcache_area)
mov r0, #0
mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
@@ -167,6 +179,7 @@ ENTRY(arm940_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm940_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -199,7 +212,7 @@ arm940_dma_inv_range:
* - end - virtual end address
*/
arm940_dma_clean_range:
-ENTRY(cpu_arm940_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_arm940_dcache_clean_area)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
@@ -212,6 +225,7 @@ ENTRY(cpu_arm940_dcache_clean_area)
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(cpu_arm940_dcache_clean_area)
/*
* dma_flush_range(start, end)
@@ -222,7 +236,7 @@ ENTRY(cpu_arm940_dcache_clean_area)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm940_dma_flush_range)
+SYM_TYPED_FUNC_START(arm940_dma_flush_range)
mov ip, #0
mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
@@ -238,6 +252,7 @@ ENTRY(arm940_dma_flush_range)
bcs 1b @ segments 7 to 0
mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm940_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -245,13 +260,13 @@ ENTRY(arm940_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm940_dma_map_area)
+SYM_TYPED_FUNC_START(arm940_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq arm940_dma_clean_range
bcs arm940_dma_inv_range
b arm940_dma_flush_range
-ENDPROC(arm940_dma_map_area)
+SYM_FUNC_END(arm940_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -259,15 +274,9 @@ ENDPROC(arm940_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm940_dma_unmap_area)
+SYM_TYPED_FUNC_START(arm940_dma_unmap_area)
ret lr
-ENDPROC(arm940_dma_unmap_area)
-
- .globl arm940_flush_kern_cache_louis
- .equ arm940_flush_kern_cache_louis, arm940_flush_kern_cache_all
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions arm940
+SYM_FUNC_END(arm940_dma_unmap_area)
.type __arm940_setup, #function
__arm940_setup:
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 6fb3898ad1cd..27750ace2ced 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -8,6 +8,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
@@ -32,19 +33,24 @@
*
* These are not required.
*/
-ENTRY(cpu_arm946_proc_init)
-ENTRY(cpu_arm946_switch_mm)
+SYM_TYPED_FUNC_START(cpu_arm946_proc_init)
ret lr
+SYM_FUNC_END(cpu_arm946_proc_init)
+
+SYM_TYPED_FUNC_START(cpu_arm946_switch_mm)
+ ret lr
+SYM_FUNC_END(cpu_arm946_switch_mm)
/*
* cpu_arm946_proc_fin()
*/
-ENTRY(cpu_arm946_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm946_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x00001000 @ i-cache
bic r0, r0, #0x00000004 @ d-cache
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_arm946_proc_fin)
/*
* cpu_arm946_reset(loc)
@@ -52,7 +58,7 @@ ENTRY(cpu_arm946_proc_fin)
* Notes : This sets up everything for a reset
*/
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm946_reset)
+SYM_TYPED_FUNC_START(cpu_arm946_reset)
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ flush I cache
mcr p15, 0, ip, c7, c6, 0 @ flush D cache
@@ -62,40 +68,40 @@ ENTRY(cpu_arm946_reset)
bic ip, ip, #0x00001000 @ i-cache
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_arm946_reset)
+SYM_FUNC_END(cpu_arm946_reset)
.popsection
/*
* cpu_arm946_do_idle()
*/
.align 5
-ENTRY(cpu_arm946_do_idle)
+SYM_TYPED_FUNC_START(cpu_arm946_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr
+SYM_FUNC_END(cpu_arm946_do_idle)
/*
* flush_icache_all()
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(arm946_flush_icache_all)
+SYM_TYPED_FUNC_START(arm946_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(arm946_flush_icache_all)
+SYM_FUNC_END(arm946_flush_icache_all)
/*
* flush_user_cache_all()
*/
-ENTRY(arm946_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(arm946_flush_user_cache_all, arm946_flush_kern_cache_all)
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(arm946_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(arm946_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -114,6 +120,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c5, 0 @ flush I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm946_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -126,7 +133,7 @@ __flush_whole_cache:
* - flags - vm_flags describing address space
* (same as arm926)
*/
-ENTRY(arm946_flush_user_cache_range)
+SYM_TYPED_FUNC_START(arm946_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
@@ -153,6 +160,7 @@ ENTRY(arm946_flush_user_cache_range)
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm946_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -164,8 +172,11 @@ ENTRY(arm946_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm946_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(arm946_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b arm946_coherent_user_range
+#endif
+SYM_FUNC_END(arm946_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -178,7 +189,7 @@ ENTRY(arm946_coherent_kern_range)
* - end - virtual end address
* (same as arm926)
*/
-ENTRY(arm946_coherent_user_range)
+SYM_TYPED_FUNC_START(arm946_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
@@ -188,6 +199,7 @@ ENTRY(arm946_coherent_user_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
+SYM_FUNC_END(arm946_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -199,7 +211,7 @@ ENTRY(arm946_coherent_user_range)
* - size - region size
* (same as arm926)
*/
-ENTRY(arm946_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(arm946_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
@@ -209,6 +221,7 @@ ENTRY(arm946_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm946_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -268,7 +281,7 @@ arm946_dma_clean_range:
*
* (same as arm926)
*/
-ENTRY(arm946_dma_flush_range)
+SYM_TYPED_FUNC_START(arm946_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1:
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -281,6 +294,7 @@ ENTRY(arm946_dma_flush_range)
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm946_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -288,13 +302,13 @@ ENTRY(arm946_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm946_dma_map_area)
+SYM_TYPED_FUNC_START(arm946_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq arm946_dma_clean_range
bcs arm946_dma_inv_range
b arm946_dma_flush_range
-ENDPROC(arm946_dma_map_area)
+SYM_FUNC_END(arm946_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -302,17 +316,11 @@ ENDPROC(arm946_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm946_dma_unmap_area)
+SYM_TYPED_FUNC_START(arm946_dma_unmap_area)
ret lr
-ENDPROC(arm946_dma_unmap_area)
-
- .globl arm946_flush_kern_cache_louis
- .equ arm946_flush_kern_cache_louis, arm946_flush_kern_cache_all
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions arm946
+SYM_FUNC_END(arm946_dma_unmap_area)
-ENTRY(cpu_arm946_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_arm946_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -321,6 +329,7 @@ ENTRY(cpu_arm946_dcache_clean_area)
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(cpu_arm946_dcache_clean_area)
.type __arm946_setup, #function
__arm946_setup:
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index a054c0e9c034..c480a8400eff 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
@@ -24,17 +25,28 @@
*
* These are not required.
*/
-ENTRY(cpu_arm9tdmi_proc_init)
-ENTRY(cpu_arm9tdmi_do_idle)
-ENTRY(cpu_arm9tdmi_dcache_clean_area)
-ENTRY(cpu_arm9tdmi_switch_mm)
+SYM_TYPED_FUNC_START(cpu_arm9tdmi_proc_init)
ret lr
+SYM_FUNC_END(cpu_arm9tdmi_proc_init)
+
+SYM_TYPED_FUNC_START(cpu_arm9tdmi_do_idle)
+ ret lr
+SYM_FUNC_END(cpu_arm9tdmi_do_idle)
+
+SYM_TYPED_FUNC_START(cpu_arm9tdmi_dcache_clean_area)
+ ret lr
+SYM_FUNC_END(cpu_arm9tdmi_dcache_clean_area)
+
+SYM_TYPED_FUNC_START(cpu_arm9tdmi_switch_mm)
+ ret lr
+SYM_FUNC_END(cpu_arm9tdmi_switch_mm)
/*
* cpu_arm9tdmi_proc_fin()
*/
-ENTRY(cpu_arm9tdmi_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm9tdmi_proc_fin)
ret lr
+SYM_FUNC_END(cpu_arm9tdmi_proc_fin)
/*
* Function: cpu_arm9tdmi_reset(loc)
@@ -42,9 +54,9 @@ ENTRY(cpu_arm9tdmi_proc_fin)
* Purpose : Sets up everything for a reset and jump to the location for soft reset.
*/
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm9tdmi_reset)
+SYM_TYPED_FUNC_START(cpu_arm9tdmi_reset)
ret r0
-ENDPROC(cpu_arm9tdmi_reset)
+SYM_FUNC_END(cpu_arm9tdmi_reset)
.popsection
.type __arm9tdmi_setup, #function
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index 2c73e0d47d08..7c16ccac8a05 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -11,6 +11,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
@@ -26,13 +27,14 @@
/*
* cpu_fa526_proc_init()
*/
-ENTRY(cpu_fa526_proc_init)
+SYM_TYPED_FUNC_START(cpu_fa526_proc_init)
ret lr
+SYM_FUNC_END(cpu_fa526_proc_init)
/*
* cpu_fa526_proc_fin()
*/
-ENTRY(cpu_fa526_proc_fin)
+SYM_TYPED_FUNC_START(cpu_fa526_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
@@ -40,6 +42,7 @@ ENTRY(cpu_fa526_proc_fin)
nop
nop
ret lr
+SYM_FUNC_END(cpu_fa526_proc_fin)
/*
* cpu_fa526_reset(loc)
@@ -52,7 +55,7 @@ ENTRY(cpu_fa526_proc_fin)
*/
.align 4
.pushsection .idmap.text, "ax"
-ENTRY(cpu_fa526_reset)
+SYM_TYPED_FUNC_START(cpu_fa526_reset)
/* TODO: Use CP8 if possible... */
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -68,24 +71,25 @@ ENTRY(cpu_fa526_reset)
nop
nop
ret r0
-ENDPROC(cpu_fa526_reset)
+SYM_FUNC_END(cpu_fa526_reset)
.popsection
/*
* cpu_fa526_do_idle()
*/
.align 4
-ENTRY(cpu_fa526_do_idle)
+SYM_TYPED_FUNC_START(cpu_fa526_do_idle)
ret lr
+SYM_FUNC_END(cpu_fa526_do_idle)
-
-ENTRY(cpu_fa526_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_fa526_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(cpu_fa526_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -97,7 +101,7 @@ ENTRY(cpu_fa526_dcache_clean_area)
* pgd: new page tables
*/
.align 4
-ENTRY(cpu_fa526_switch_mm)
+SYM_TYPED_FUNC_START(cpu_fa526_switch_mm)
#ifdef CONFIG_MMU
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -113,6 +117,7 @@ ENTRY(cpu_fa526_switch_mm)
mcr p15, 0, ip, c8, c7, 0 @ invalidate UTLB
#endif
ret lr
+SYM_FUNC_END(cpu_fa526_switch_mm)
/*
* cpu_fa526_set_pte_ext(ptep, pte, ext)
@@ -120,7 +125,7 @@ ENTRY(cpu_fa526_switch_mm)
* Set a PTE and flush it out
*/
.align 4
-ENTRY(cpu_fa526_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_fa526_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext
mov r0, r0
@@ -129,6 +134,7 @@ ENTRY(cpu_fa526_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif
ret lr
+SYM_FUNC_END(cpu_fa526_set_pte_ext)
.type __fa526_setup, #function
__fa526_setup:
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index 072ff9b451f8..f67b2ffac854 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -8,6 +8,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
@@ -43,7 +44,7 @@ __cache_params:
/*
* cpu_feroceon_proc_init()
*/
-ENTRY(cpu_feroceon_proc_init)
+SYM_TYPED_FUNC_START(cpu_feroceon_proc_init)
mrc p15, 0, r0, c0, c0, 1 @ read cache type register
ldr r1, __cache_params
mov r2, #(16 << 5)
@@ -61,11 +62,12 @@ ENTRY(cpu_feroceon_proc_init)
str_l r1, VFP_arch_feroceon, r2
#endif
ret lr
+SYM_FUNC_END(cpu_feroceon_proc_init)
/*
* cpu_feroceon_proc_fin()
*/
-ENTRY(cpu_feroceon_proc_fin)
+SYM_TYPED_FUNC_START(cpu_feroceon_proc_fin)
#if defined(CONFIG_CACHE_FEROCEON_L2) && \
!defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
mov r0, #0
@@ -78,6 +80,7 @@ ENTRY(cpu_feroceon_proc_fin)
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_feroceon_proc_fin)
/*
* cpu_feroceon_reset(loc)
@@ -90,7 +93,7 @@ ENTRY(cpu_feroceon_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_feroceon_reset)
+SYM_TYPED_FUNC_START(cpu_feroceon_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
@@ -102,7 +105,7 @@ ENTRY(cpu_feroceon_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_feroceon_reset)
+SYM_FUNC_END(cpu_feroceon_reset)
.popsection
/*
@@ -111,22 +114,23 @@ ENDPROC(cpu_feroceon_reset)
* Called with IRQs disabled
*/
.align 5
-ENTRY(cpu_feroceon_do_idle)
+SYM_TYPED_FUNC_START(cpu_feroceon_do_idle)
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr
+SYM_FUNC_END(cpu_feroceon_do_idle)
/*
* flush_icache_all()
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(feroceon_flush_icache_all)
+SYM_TYPED_FUNC_START(feroceon_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(feroceon_flush_icache_all)
+SYM_FUNC_END(feroceon_flush_icache_all)
/*
* flush_user_cache_all()
@@ -135,15 +139,14 @@ ENDPROC(feroceon_flush_icache_all)
* address space.
*/
.align 5
-ENTRY(feroceon_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(feroceon_flush_user_cache_all, feroceon_flush_kern_cache_all)
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(feroceon_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(feroceon_flush_kern_cache_all)
mov r2, #VM_EXEC
__flush_whole_cache:
@@ -161,6 +164,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(feroceon_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -173,7 +177,7 @@ __flush_whole_cache:
* - flags - vm_flags describing address space
*/
.align 5
-ENTRY(feroceon_flush_user_cache_range)
+SYM_TYPED_FUNC_START(feroceon_flush_user_cache_range)
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
bgt __flush_whole_cache
@@ -190,6 +194,7 @@ ENTRY(feroceon_flush_user_cache_range)
mov ip, #0
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(feroceon_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -202,8 +207,11 @@ ENTRY(feroceon_flush_user_cache_range)
* - end - virtual end address
*/
.align 5
-ENTRY(feroceon_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(feroceon_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b feroceon_coherent_user_range
+#endif
+SYM_FUNC_END(feroceon_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -215,7 +223,7 @@ ENTRY(feroceon_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(feroceon_coherent_user_range)
+SYM_TYPED_FUNC_START(feroceon_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
@@ -225,6 +233,7 @@ ENTRY(feroceon_coherent_user_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
+SYM_FUNC_END(feroceon_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -236,7 +245,7 @@ ENTRY(feroceon_coherent_user_range)
* - size - region size
*/
.align 5
-ENTRY(feroceon_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(feroceon_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
@@ -246,9 +255,10 @@ ENTRY(feroceon_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(feroceon_flush_kern_dcache_area)
.align 5
-ENTRY(feroceon_range_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(feroceon_range_flush_kern_dcache_area)
mrs r2, cpsr
add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive
orr r3, r2, #PSR_I_BIT
@@ -260,6 +270,7 @@ ENTRY(feroceon_range_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(feroceon_range_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -346,7 +357,7 @@ feroceon_range_dma_clean_range:
* - end - virtual end address
*/
.align 5
-ENTRY(feroceon_dma_flush_range)
+SYM_TYPED_FUNC_START(feroceon_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
@@ -354,9 +365,10 @@ ENTRY(feroceon_dma_flush_range)
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(feroceon_dma_flush_range)
.align 5
-ENTRY(feroceon_range_dma_flush_range)
+SYM_TYPED_FUNC_START(feroceon_range_dma_flush_range)
mrs r2, cpsr
cmp r1, r0
subne r1, r1, #1 @ top address is inclusive
@@ -367,6 +379,7 @@ ENTRY(feroceon_range_dma_flush_range)
msr cpsr_c, r2 @ restore interrupts
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(feroceon_range_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -374,13 +387,13 @@ ENTRY(feroceon_range_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(feroceon_dma_map_area)
+SYM_TYPED_FUNC_START(feroceon_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq feroceon_dma_clean_range
bcs feroceon_dma_inv_range
b feroceon_dma_flush_range
-ENDPROC(feroceon_dma_map_area)
+SYM_FUNC_END(feroceon_dma_map_area)
/*
* dma_map_area(start, size, dir)
@@ -388,13 +401,13 @@ ENDPROC(feroceon_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(feroceon_range_dma_map_area)
+SYM_TYPED_FUNC_START(feroceon_range_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq feroceon_range_dma_clean_range
bcs feroceon_range_dma_inv_range
b feroceon_range_dma_flush_range
-ENDPROC(feroceon_range_dma_map_area)
+SYM_FUNC_END(feroceon_range_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -402,39 +415,12 @@ ENDPROC(feroceon_range_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(feroceon_dma_unmap_area)
+SYM_TYPED_FUNC_START(feroceon_dma_unmap_area)
ret lr
-ENDPROC(feroceon_dma_unmap_area)
-
- .globl feroceon_flush_kern_cache_louis
- .equ feroceon_flush_kern_cache_louis, feroceon_flush_kern_cache_all
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions feroceon
-
-.macro range_alias basename
- .globl feroceon_range_\basename
- .type feroceon_range_\basename , %function
- .equ feroceon_range_\basename , feroceon_\basename
-.endm
-
-/*
- * Most of the cache functions are unchanged for this case.
- * Export suitable alias symbols for the unchanged functions:
- */
- range_alias flush_icache_all
- range_alias flush_user_cache_all
- range_alias flush_kern_cache_all
- range_alias flush_kern_cache_louis
- range_alias flush_user_cache_range
- range_alias coherent_kern_range
- range_alias coherent_user_range
- range_alias dma_unmap_area
-
- define_cache_functions feroceon_range
+SYM_FUNC_END(feroceon_dma_unmap_area)
.align 5
-ENTRY(cpu_feroceon_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_feroceon_dcache_clean_area)
#if defined(CONFIG_CACHE_FEROCEON_L2) && \
!defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
mov r2, r0
@@ -453,6 +439,7 @@ ENTRY(cpu_feroceon_dcache_clean_area)
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(cpu_feroceon_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -464,7 +451,7 @@ ENTRY(cpu_feroceon_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_feroceon_switch_mm)
+SYM_TYPED_FUNC_START(cpu_feroceon_switch_mm)
#ifdef CONFIG_MMU
/*
* Note: we wish to call __flush_whole_cache but we need to preserve
@@ -485,6 +472,7 @@ ENTRY(cpu_feroceon_switch_mm)
#else
ret lr
#endif
+SYM_FUNC_END(cpu_feroceon_switch_mm)
/*
* cpu_feroceon_set_pte_ext(ptep, pte, ext)
@@ -492,7 +480,7 @@ ENTRY(cpu_feroceon_switch_mm)
* Set a PTE and flush it out
*/
.align 5
-ENTRY(cpu_feroceon_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_feroceon_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext wc_disable=0
mov r0, r0
@@ -504,21 +492,22 @@ ENTRY(cpu_feroceon_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif
ret lr
+SYM_FUNC_END(cpu_feroceon_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */
.globl cpu_feroceon_suspend_size
.equ cpu_feroceon_suspend_size, 4 * 3
#ifdef CONFIG_ARM_CPU_SUSPEND
-ENTRY(cpu_feroceon_do_suspend)
+SYM_TYPED_FUNC_START(cpu_feroceon_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
mrc p15, 0, r5, c3, c0, 0 @ Domain ID
mrc p15, 0, r6, c1, c0, 0 @ Control register
stmia r0, {r4 - r6}
ldmfd sp!, {r4 - r6, pc}
-ENDPROC(cpu_feroceon_do_suspend)
+SYM_FUNC_END(cpu_feroceon_do_suspend)
-ENTRY(cpu_feroceon_do_resume)
+SYM_TYPED_FUNC_START(cpu_feroceon_do_resume)
mov ip, #0
mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs
mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches
@@ -528,7 +517,7 @@ ENTRY(cpu_feroceon_do_resume)
mcr p15, 0, r1, c2, c0, 0 @ TTB address
mov r0, r6 @ control register
b cpu_resume_mmu
-ENDPROC(cpu_feroceon_do_resume)
+SYM_FUNC_END(cpu_feroceon_do_resume)
#endif
.type __feroceon_setup, #function
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index e43f6d716b4b..e388c4cc0c44 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -320,39 +320,6 @@ ENTRY(\name\()_processor_functions)
#endif
.endm
-.macro define_cache_functions name:req
- .align 2
- .type \name\()_cache_fns, #object
-ENTRY(\name\()_cache_fns)
- .long \name\()_flush_icache_all
- .long \name\()_flush_kern_cache_all
- .long \name\()_flush_kern_cache_louis
- .long \name\()_flush_user_cache_all
- .long \name\()_flush_user_cache_range
- .long \name\()_coherent_kern_range
- .long \name\()_coherent_user_range
- .long \name\()_flush_kern_dcache_area
- .long \name\()_dma_map_area
- .long \name\()_dma_unmap_area
- .long \name\()_dma_flush_range
- .size \name\()_cache_fns, . - \name\()_cache_fns
-.endm
-
-.macro define_tlb_functions name:req, flags_up:req, flags_smp
- .type \name\()_tlb_fns, #object
- .align 2
-ENTRY(\name\()_tlb_fns)
- .long \name\()_flush_user_tlb_range
- .long \name\()_flush_kern_tlb_range
- .ifnb \flags_smp
- ALT_SMP(.long \flags_smp )
- ALT_UP(.long \flags_up )
- .else
- .long \flags_up
- .endif
- .size \name\()_tlb_fns, . - \name\()_tlb_fns
-.endm
-
.macro globl_equ x, y
.globl \x
.equ \x, \y
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 1645ccaffe96..8e9f38da863a 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -9,6 +9,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
@@ -31,18 +32,20 @@
/*
* cpu_mohawk_proc_init()
*/
-ENTRY(cpu_mohawk_proc_init)
+SYM_TYPED_FUNC_START(cpu_mohawk_proc_init)
ret lr
+SYM_FUNC_END(cpu_mohawk_proc_init)
/*
* cpu_mohawk_proc_fin()
*/
-ENTRY(cpu_mohawk_proc_fin)
+SYM_TYPED_FUNC_START(cpu_mohawk_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1800 @ ...iz...........
bic r0, r0, #0x0006 @ .............ca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_mohawk_proc_fin)
/*
* cpu_mohawk_reset(loc)
@@ -57,7 +60,7 @@ ENTRY(cpu_mohawk_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_mohawk_reset)
+SYM_TYPED_FUNC_START(cpu_mohawk_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
@@ -67,7 +70,7 @@ ENTRY(cpu_mohawk_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_mohawk_reset)
+SYM_FUNC_END(cpu_mohawk_reset)
.popsection
/*
@@ -76,22 +79,23 @@ ENDPROC(cpu_mohawk_reset)
* Called with IRQs disabled
*/
.align 5
-ENTRY(cpu_mohawk_do_idle)
+SYM_TYPED_FUNC_START(cpu_mohawk_do_idle)
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt
ret lr
+SYM_FUNC_END(cpu_mohawk_do_idle)
/*
* flush_icache_all()
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(mohawk_flush_icache_all)
+SYM_TYPED_FUNC_START(mohawk_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(mohawk_flush_icache_all)
+SYM_FUNC_END(mohawk_flush_icache_all)
/*
* flush_user_cache_all()
@@ -99,15 +103,14 @@ ENDPROC(mohawk_flush_icache_all)
* Clean and invalidate all cache entries in a particular
* address space.
*/
-ENTRY(mohawk_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(mohawk_flush_user_cache_all, mohawk_flush_kern_cache_all)
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(mohawk_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(mohawk_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -116,6 +119,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 0 @ drain write buffer
ret lr
+SYM_FUNC_END(mohawk_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -129,7 +133,7 @@ __flush_whole_cache:
*
* (same as arm926)
*/
-ENTRY(mohawk_flush_user_cache_range)
+SYM_TYPED_FUNC_START(mohawk_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
@@ -146,6 +150,7 @@ ENTRY(mohawk_flush_user_cache_range)
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(mohawk_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -157,8 +162,11 @@ ENTRY(mohawk_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(mohawk_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(mohawk_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b mohawk_coherent_user_range
+#endif
+SYM_FUNC_END(mohawk_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -172,7 +180,7 @@ ENTRY(mohawk_coherent_kern_range)
*
* (same as arm926)
*/
-ENTRY(mohawk_coherent_user_range)
+SYM_TYPED_FUNC_START(mohawk_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
@@ -182,6 +190,7 @@ ENTRY(mohawk_coherent_user_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
+SYM_FUNC_END(mohawk_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -192,7 +201,7 @@ ENTRY(mohawk_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(mohawk_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(mohawk_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
@@ -202,6 +211,7 @@ ENTRY(mohawk_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(mohawk_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -256,7 +266,7 @@ mohawk_dma_clean_range:
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(mohawk_dma_flush_range)
+SYM_TYPED_FUNC_START(mohawk_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1:
mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
@@ -265,6 +275,7 @@ ENTRY(mohawk_dma_flush_range)
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(mohawk_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -272,13 +283,13 @@ ENTRY(mohawk_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(mohawk_dma_map_area)
+SYM_TYPED_FUNC_START(mohawk_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq mohawk_dma_clean_range
bcs mohawk_dma_inv_range
b mohawk_dma_flush_range
-ENDPROC(mohawk_dma_map_area)
+SYM_FUNC_END(mohawk_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -286,23 +297,18 @@ ENDPROC(mohawk_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(mohawk_dma_unmap_area)
+SYM_TYPED_FUNC_START(mohawk_dma_unmap_area)
ret lr
-ENDPROC(mohawk_dma_unmap_area)
-
- .globl mohawk_flush_kern_cache_louis
- .equ mohawk_flush_kern_cache_louis, mohawk_flush_kern_cache_all
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions mohawk
+SYM_FUNC_END(mohawk_dma_unmap_area)
-ENTRY(cpu_mohawk_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_mohawk_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(cpu_mohawk_dcache_clean_area)
/*
* cpu_mohawk_switch_mm(pgd)
@@ -312,7 +318,7 @@ ENTRY(cpu_mohawk_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_mohawk_switch_mm)
+SYM_TYPED_FUNC_START(cpu_mohawk_switch_mm)
mov ip, #0
mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
@@ -321,6 +327,7 @@ ENTRY(cpu_mohawk_switch_mm)
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
ret lr
+SYM_FUNC_END(cpu_mohawk_switch_mm)
/*
* cpu_mohawk_set_pte_ext(ptep, pte, ext)
@@ -328,7 +335,7 @@ ENTRY(cpu_mohawk_switch_mm)
* Set a PTE and flush it out
*/
.align 5
-ENTRY(cpu_mohawk_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_mohawk_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext
mov r0, r0
@@ -336,11 +343,12 @@ ENTRY(cpu_mohawk_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
#endif
+SYM_FUNC_END(cpu_mohawk_set_pte_ext)
.globl cpu_mohawk_suspend_size
.equ cpu_mohawk_suspend_size, 4 * 6
#ifdef CONFIG_ARM_CPU_SUSPEND
-ENTRY(cpu_mohawk_do_suspend)
+SYM_TYPED_FUNC_START(cpu_mohawk_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
mrc p15, 0, r5, c15, c1, 0 @ CP access reg
@@ -351,9 +359,9 @@ ENTRY(cpu_mohawk_do_suspend)
bic r4, r4, #2 @ clear frequency change bit
stmia r0, {r4 - r9} @ store cp regs
ldmia sp!, {r4 - r9, pc}
-ENDPROC(cpu_mohawk_do_suspend)
+SYM_FUNC_END(cpu_mohawk_do_suspend)
-ENTRY(cpu_mohawk_do_resume)
+SYM_TYPED_FUNC_START(cpu_mohawk_do_resume)
ldmia r0, {r4 - r9} @ load cp regs
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB
@@ -369,7 +377,7 @@ ENTRY(cpu_mohawk_do_resume)
mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg
mov r0, r9 @ control register
b cpu_resume_mmu
-ENDPROC(cpu_mohawk_do_resume)
+SYM_FUNC_END(cpu_mohawk_do_resume)
#endif
.type __mohawk_setup, #function
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index 4071f7a61cb6..3da76fab8ac3 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -12,6 +12,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
@@ -32,15 +33,16 @@
/*
* cpu_sa110_proc_init()
*/
-ENTRY(cpu_sa110_proc_init)
+SYM_TYPED_FUNC_START(cpu_sa110_proc_init)
mov r0, #0
mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching
ret lr
+SYM_FUNC_END(cpu_sa110_proc_init)
/*
* cpu_sa110_proc_fin()
*/
-ENTRY(cpu_sa110_proc_fin)
+SYM_TYPED_FUNC_START(cpu_sa110_proc_fin)
mov r0, #0
mcr p15, 0, r0, c15, c2, 2 @ Disable clock switching
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
@@ -48,6 +50,7 @@ ENTRY(cpu_sa110_proc_fin)
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_sa110_proc_fin)
/*
* cpu_sa110_reset(loc)
@@ -60,7 +63,7 @@ ENTRY(cpu_sa110_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_sa110_reset)
+SYM_TYPED_FUNC_START(cpu_sa110_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
@@ -72,7 +75,7 @@ ENTRY(cpu_sa110_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_sa110_reset)
+SYM_FUNC_END(cpu_sa110_reset)
.popsection
/*
@@ -88,7 +91,7 @@ ENDPROC(cpu_sa110_reset)
*/
.align 5
-ENTRY(cpu_sa110_do_idle)
+SYM_TYPED_FUNC_START(cpu_sa110_do_idle)
mcr p15, 0, ip, c15, c2, 2 @ disable clock switching
ldr r1, =UNCACHEABLE_ADDR @ load from uncacheable loc
ldr r1, [r1, #0] @ force switch to MCLK
@@ -101,6 +104,7 @@ ENTRY(cpu_sa110_do_idle)
mov r0, r0 @ safety
mcr p15, 0, r0, c15, c1, 2 @ enable clock switching
ret lr
+SYM_FUNC_END(cpu_sa110_do_idle)
/* ================================= CACHE ================================ */
@@ -113,12 +117,13 @@ ENTRY(cpu_sa110_do_idle)
* addr: cache-unaligned virtual address
*/
.align 5
-ENTRY(cpu_sa110_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_sa110_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
subs r1, r1, #DCACHELINESIZE
bhi 1b
ret lr
+SYM_FUNC_END(cpu_sa110_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -130,7 +135,7 @@ ENTRY(cpu_sa110_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_sa110_switch_mm)
+SYM_TYPED_FUNC_START(cpu_sa110_switch_mm)
#ifdef CONFIG_MMU
str lr, [sp, #-4]!
bl v4wb_flush_kern_cache_all @ clears IP
@@ -140,6 +145,7 @@ ENTRY(cpu_sa110_switch_mm)
#else
ret lr
#endif
+SYM_FUNC_END(cpu_sa110_switch_mm)
/*
* cpu_sa110_set_pte_ext(ptep, pte, ext)
@@ -147,7 +153,7 @@ ENTRY(cpu_sa110_switch_mm)
* Set a PTE and flush it out
*/
.align 5
-ENTRY(cpu_sa110_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_sa110_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext wc_disable=0
mov r0, r0
@@ -155,6 +161,7 @@ ENTRY(cpu_sa110_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif
ret lr
+SYM_FUNC_END(cpu_sa110_set_pte_ext)
.type __sa110_setup, #function
__sa110_setup:
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index e723bd4119d3..7c496195e440 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -17,6 +17,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
@@ -36,11 +37,12 @@
/*
* cpu_sa1100_proc_init()
*/
-ENTRY(cpu_sa1100_proc_init)
+SYM_TYPED_FUNC_START(cpu_sa1100_proc_init)
mov r0, #0
mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching
mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland
ret lr
+SYM_FUNC_END(cpu_sa1100_proc_init)
/*
* cpu_sa1100_proc_fin()
@@ -49,13 +51,14 @@ ENTRY(cpu_sa1100_proc_init)
* - Disable interrupts
* - Clean and turn off caches.
*/
-ENTRY(cpu_sa1100_proc_fin)
+SYM_TYPED_FUNC_START(cpu_sa1100_proc_fin)
mcr p15, 0, ip, c15, c2, 2 @ Disable clock switching
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_sa1100_proc_fin)
/*
* cpu_sa1100_reset(loc)
@@ -68,7 +71,7 @@ ENTRY(cpu_sa1100_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_sa1100_reset)
+SYM_TYPED_FUNC_START(cpu_sa1100_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
@@ -80,7 +83,7 @@ ENTRY(cpu_sa1100_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_sa1100_reset)
+SYM_FUNC_END(cpu_sa1100_reset)
.popsection
/*
@@ -95,7 +98,7 @@ ENDPROC(cpu_sa1100_reset)
* 3 = switch to fast processor clock
*/
.align 5
-ENTRY(cpu_sa1100_do_idle)
+SYM_TYPED_FUNC_START(cpu_sa1100_do_idle)
mov r0, r0 @ 4 nop padding
mov r0, r0
mov r0, r0
@@ -111,6 +114,7 @@ ENTRY(cpu_sa1100_do_idle)
mov r0, r0 @ safety
mcr p15, 0, r0, c15, c1, 2 @ enable clock switching
ret lr
+SYM_FUNC_END(cpu_sa1100_do_idle)
/* ================================= CACHE ================================ */
@@ -123,12 +127,13 @@ ENTRY(cpu_sa1100_do_idle)
* addr: cache-unaligned virtual address
*/
.align 5
-ENTRY(cpu_sa1100_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_sa1100_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
subs r1, r1, #DCACHELINESIZE
bhi 1b
ret lr
+SYM_FUNC_END(cpu_sa1100_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -140,7 +145,7 @@ ENTRY(cpu_sa1100_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_sa1100_switch_mm)
+SYM_TYPED_FUNC_START(cpu_sa1100_switch_mm)
#ifdef CONFIG_MMU
str lr, [sp, #-4]!
bl v4wb_flush_kern_cache_all @ clears IP
@@ -151,6 +156,7 @@ ENTRY(cpu_sa1100_switch_mm)
#else
ret lr
#endif
+SYM_FUNC_END(cpu_sa1100_switch_mm)
/*
* cpu_sa1100_set_pte_ext(ptep, pte, ext)
@@ -158,7 +164,7 @@ ENTRY(cpu_sa1100_switch_mm)
* Set a PTE and flush it out
*/
.align 5
-ENTRY(cpu_sa1100_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_sa1100_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext wc_disable=0
mov r0, r0
@@ -166,20 +172,21 @@ ENTRY(cpu_sa1100_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif
ret lr
+SYM_FUNC_END(cpu_sa1100_set_pte_ext)
.globl cpu_sa1100_suspend_size
.equ cpu_sa1100_suspend_size, 4 * 3
#ifdef CONFIG_ARM_CPU_SUSPEND
-ENTRY(cpu_sa1100_do_suspend)
+SYM_TYPED_FUNC_START(cpu_sa1100_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c3, c0, 0 @ domain ID
mrc p15, 0, r5, c13, c0, 0 @ PID
mrc p15, 0, r6, c1, c0, 0 @ control reg
stmia r0, {r4 - r6} @ store cp regs
ldmfd sp!, {r4 - r6, pc}
-ENDPROC(cpu_sa1100_do_suspend)
+SYM_FUNC_END(cpu_sa1100_do_suspend)
-ENTRY(cpu_sa1100_do_resume)
+SYM_TYPED_FUNC_START(cpu_sa1100_do_resume)
ldmia r0, {r4 - r6} @ load cp regs
mov ip, #0
mcr p15, 0, ip, c8, c7, 0 @ flush I+D TLBs
@@ -192,7 +199,7 @@ ENTRY(cpu_sa1100_do_resume)
mcr p15, 0, r5, c13, c0, 0 @ PID
mov r0, r6 @ control register
b cpu_resume_mmu
-ENDPROC(cpu_sa1100_do_resume)
+SYM_FUNC_END(cpu_sa1100_do_resume)
#endif
.type __sa1100_setup, #function
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 203dff89ab1a..90a01f5950b9 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -8,6 +8,7 @@
* This is the "shell" of the ARMv6 processor support.
*/
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/linkage.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
@@ -34,15 +35,17 @@
.arch armv6
-ENTRY(cpu_v6_proc_init)
+SYM_TYPED_FUNC_START(cpu_v6_proc_init)
ret lr
+SYM_FUNC_END(cpu_v6_proc_init)
-ENTRY(cpu_v6_proc_fin)
+SYM_TYPED_FUNC_START(cpu_v6_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x0006 @ .............ca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_v6_proc_fin)
/*
* cpu_v6_reset(loc)
@@ -55,14 +58,14 @@ ENTRY(cpu_v6_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_v6_reset)
+SYM_TYPED_FUNC_START(cpu_v6_reset)
mrc p15, 0, r1, c1, c0, 0 @ ctrl register
bic r1, r1, #0x1 @ ...............m
mcr p15, 0, r1, c1, c0, 0 @ disable MMU
mov r1, #0
mcr p15, 0, r1, c7, c5, 4 @ ISB
ret r0
-ENDPROC(cpu_v6_reset)
+SYM_FUNC_END(cpu_v6_reset)
.popsection
/*
@@ -72,18 +75,20 @@ ENDPROC(cpu_v6_reset)
*
* IRQs are already disabled.
*/
-ENTRY(cpu_v6_do_idle)
+SYM_TYPED_FUNC_START(cpu_v6_do_idle)
mov r1, #0
mcr p15, 0, r1, c7, c10, 4 @ DWB - WFI may enter a low-power mode
mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt
ret lr
+SYM_FUNC_END(cpu_v6_do_idle)
-ENTRY(cpu_v6_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_v6_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #D_CACHE_LINE_SIZE
subs r1, r1, #D_CACHE_LINE_SIZE
bhi 1b
ret lr
+SYM_FUNC_END(cpu_v6_dcache_clean_area)
/*
* cpu_v6_switch_mm(pgd_phys, tsk)
@@ -95,7 +100,7 @@ ENTRY(cpu_v6_dcache_clean_area)
* It is assumed that:
* - we are not using split page tables
*/
-ENTRY(cpu_v6_switch_mm)
+SYM_TYPED_FUNC_START(cpu_v6_switch_mm)
#ifdef CONFIG_MMU
mov r2, #0
mmid r1, r1 @ get mm->context.id
@@ -113,6 +118,7 @@ ENTRY(cpu_v6_switch_mm)
mcr p15, 0, r1, c13, c0, 1 @ set context ID
#endif
ret lr
+SYM_FUNC_END(cpu_v6_switch_mm)
/*
* cpu_v6_set_pte_ext(ptep, pte, ext)
@@ -126,17 +132,18 @@ ENTRY(cpu_v6_switch_mm)
*/
armv6_mt_table cpu_v6
-ENTRY(cpu_v6_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_v6_set_pte_ext)
#ifdef CONFIG_MMU
armv6_set_pte_ext cpu_v6
#endif
ret lr
+SYM_FUNC_END(cpu_v6_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
.globl cpu_v6_suspend_size
.equ cpu_v6_suspend_size, 4 * 6
#ifdef CONFIG_ARM_CPU_SUSPEND
-ENTRY(cpu_v6_do_suspend)
+SYM_TYPED_FUNC_START(cpu_v6_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
#ifdef CONFIG_MMU
@@ -148,9 +155,9 @@ ENTRY(cpu_v6_do_suspend)
mrc p15, 0, r9, c1, c0, 0 @ control register
stmia r0, {r4 - r9}
ldmfd sp!, {r4- r9, pc}
-ENDPROC(cpu_v6_do_suspend)
+SYM_FUNC_END(cpu_v6_do_suspend)
-ENTRY(cpu_v6_do_resume)
+SYM_TYPED_FUNC_START(cpu_v6_do_resume)
mov ip, #0
mcr p15, 0, ip, c7, c14, 0 @ clean+invalidate D cache
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
@@ -172,7 +179,7 @@ ENTRY(cpu_v6_do_resume)
mcr p15, 0, ip, c7, c5, 4 @ ISB
mov r0, r9 @ control register
b cpu_resume_mmu
-ENDPROC(cpu_v6_do_resume)
+SYM_FUNC_END(cpu_v6_do_resume)
#endif
string cpu_v6_name, "ARMv6-compatible processor"
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 0a3083ad19c2..1007702fcaf3 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -40,7 +40,7 @@
* even on Cortex-A8 revisions not affected by 430973.
* If IBE is not set, the flush BTAC/BTB won't do anything.
*/
-ENTRY(cpu_v7_switch_mm)
+SYM_TYPED_FUNC_START(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
mmid r1, r1 @ get mm->context.id
ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
@@ -59,7 +59,7 @@ ENTRY(cpu_v7_switch_mm)
isb
#endif
bx lr
-ENDPROC(cpu_v7_switch_mm)
+SYM_FUNC_END(cpu_v7_switch_mm)
/*
* cpu_v7_set_pte_ext(ptep, pte)
@@ -71,7 +71,7 @@ ENDPROC(cpu_v7_switch_mm)
* - pte - PTE value to store
* - ext - value for extended PTE bits
*/
-ENTRY(cpu_v7_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_v7_set_pte_ext)
#ifdef CONFIG_MMU
str r1, [r0] @ linux version
@@ -106,7 +106,7 @@ ENTRY(cpu_v7_set_pte_ext)
ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
#endif
bx lr
-ENDPROC(cpu_v7_set_pte_ext)
+SYM_FUNC_END(cpu_v7_set_pte_ext)
/*
* Memory region attributes with SCTLR.TRE=1
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 131984462d0d..bdabc15cde56 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -42,7 +42,7 @@
* Set the translation table base pointer to be pgd_phys (physical address of
* the new TTB).
*/
-ENTRY(cpu_v7_switch_mm)
+SYM_TYPED_FUNC_START(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
mmid r2, r2
asid r2, r2
@@ -51,7 +51,7 @@ ENTRY(cpu_v7_switch_mm)
isb
#endif
ret lr
-ENDPROC(cpu_v7_switch_mm)
+SYM_FUNC_END(cpu_v7_switch_mm)
#ifdef __ARMEB__
#define rl r3
@@ -68,7 +68,7 @@ ENDPROC(cpu_v7_switch_mm)
* - ptep - pointer to level 3 translation table entry
* - pte - PTE value to store (64-bit in r2 and r3)
*/
-ENTRY(cpu_v7_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_v7_set_pte_ext)
#ifdef CONFIG_MMU
tst rl, #L_PTE_VALID
beq 1f
@@ -87,7 +87,7 @@ ENTRY(cpu_v7_set_pte_ext)
ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
#endif
ret lr
-ENDPROC(cpu_v7_set_pte_ext)
+SYM_FUNC_END(cpu_v7_set_pte_ext)
/*
* Memory region attributes for LPAE (defined in pgtable-3level.h):
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 193c7aeb6703..5fb9a6aecb00 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -7,6 +7,7 @@
* This is the "shell" of the ARMv7 processor support.
*/
#include <linux/arm-smccc.h>
+#include <linux/cfi_types.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/pgtable.h>
@@ -26,17 +27,17 @@
.arch armv7-a
-ENTRY(cpu_v7_proc_init)
+SYM_TYPED_FUNC_START(cpu_v7_proc_init)
ret lr
-ENDPROC(cpu_v7_proc_init)
+SYM_FUNC_END(cpu_v7_proc_init)
-ENTRY(cpu_v7_proc_fin)
+SYM_TYPED_FUNC_START(cpu_v7_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x0006 @ .............ca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
-ENDPROC(cpu_v7_proc_fin)
+SYM_FUNC_END(cpu_v7_proc_fin)
/*
* cpu_v7_reset(loc, hyp)
@@ -53,7 +54,7 @@ ENDPROC(cpu_v7_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_v7_reset)
+SYM_TYPED_FUNC_START(cpu_v7_reset)
mrc p15, 0, r2, c1, c0, 0 @ ctrl register
bic r2, r2, #0x1 @ ...............m
THUMB( bic r2, r2, #1 << 30 ) @ SCTLR.TE (Thumb exceptions)
@@ -64,7 +65,7 @@ ENTRY(cpu_v7_reset)
bne __hyp_soft_restart
#endif
bx r0
-ENDPROC(cpu_v7_reset)
+SYM_FUNC_END(cpu_v7_reset)
.popsection
/*
@@ -74,13 +75,13 @@ ENDPROC(cpu_v7_reset)
*
* IRQs are already disabled.
*/
-ENTRY(cpu_v7_do_idle)
+SYM_TYPED_FUNC_START(cpu_v7_do_idle)
dsb @ WFI may enter a low-power mode
wfi
ret lr
-ENDPROC(cpu_v7_do_idle)
+SYM_FUNC_END(cpu_v7_do_idle)
-ENTRY(cpu_v7_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_v7_dcache_clean_area)
ALT_SMP(W(nop)) @ MP extensions imply L1 PTW
ALT_UP_B(1f)
ret lr
@@ -91,38 +92,39 @@ ENTRY(cpu_v7_dcache_clean_area)
bhi 2b
dsb ishst
ret lr
-ENDPROC(cpu_v7_dcache_clean_area)
+SYM_FUNC_END(cpu_v7_dcache_clean_area)
#ifdef CONFIG_ARM_PSCI
.arch_extension sec
-ENTRY(cpu_v7_smc_switch_mm)
+SYM_TYPED_FUNC_START(cpu_v7_smc_switch_mm)
stmfd sp!, {r0 - r3}
movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
smc #0
ldmfd sp!, {r0 - r3}
b cpu_v7_switch_mm
-ENDPROC(cpu_v7_smc_switch_mm)
+SYM_FUNC_END(cpu_v7_smc_switch_mm)
.arch_extension virt
-ENTRY(cpu_v7_hvc_switch_mm)
+SYM_TYPED_FUNC_START(cpu_v7_hvc_switch_mm)
stmfd sp!, {r0 - r3}
movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
hvc #0
ldmfd sp!, {r0 - r3}
b cpu_v7_switch_mm
-ENDPROC(cpu_v7_hvc_switch_mm)
+SYM_FUNC_END(cpu_v7_hvc_switch_mm)
#endif
-ENTRY(cpu_v7_iciallu_switch_mm)
+
+SYM_TYPED_FUNC_START(cpu_v7_iciallu_switch_mm)
mov r3, #0
mcr p15, 0, r3, c7, c5, 0 @ ICIALLU
b cpu_v7_switch_mm
-ENDPROC(cpu_v7_iciallu_switch_mm)
-ENTRY(cpu_v7_bpiall_switch_mm)
+SYM_FUNC_END(cpu_v7_iciallu_switch_mm)
+SYM_TYPED_FUNC_START(cpu_v7_bpiall_switch_mm)
mov r3, #0
mcr p15, 0, r3, c7, c5, 6 @ flush BTAC/BTB
b cpu_v7_switch_mm
-ENDPROC(cpu_v7_bpiall_switch_mm)
+SYM_FUNC_END(cpu_v7_bpiall_switch_mm)
string cpu_v7_name, "ARMv7 Processor"
.align
@@ -131,7 +133,7 @@ ENDPROC(cpu_v7_bpiall_switch_mm)
.globl cpu_v7_suspend_size
.equ cpu_v7_suspend_size, 4 * 9
#ifdef CONFIG_ARM_CPU_SUSPEND
-ENTRY(cpu_v7_do_suspend)
+SYM_TYPED_FUNC_START(cpu_v7_do_suspend)
stmfd sp!, {r4 - r11, lr}
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID
@@ -150,9 +152,9 @@ ENTRY(cpu_v7_do_suspend)
mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control
stmia r0, {r5 - r11}
ldmfd sp!, {r4 - r11, pc}
-ENDPROC(cpu_v7_do_suspend)
+SYM_FUNC_END(cpu_v7_do_suspend)
-ENTRY(cpu_v7_do_resume)
+SYM_TYPED_FUNC_START(cpu_v7_do_resume)
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID
@@ -186,22 +188,22 @@ ENTRY(cpu_v7_do_resume)
dsb
mov r0, r8 @ control register
b cpu_resume_mmu
-ENDPROC(cpu_v7_do_resume)
+SYM_FUNC_END(cpu_v7_do_resume)
#endif
.globl cpu_ca9mp_suspend_size
.equ cpu_ca9mp_suspend_size, cpu_v7_suspend_size + 4 * 2
#ifdef CONFIG_ARM_CPU_SUSPEND
-ENTRY(cpu_ca9mp_do_suspend)
+SYM_TYPED_FUNC_START(cpu_ca9mp_do_suspend)
stmfd sp!, {r4 - r5}
mrc p15, 0, r4, c15, c0, 1 @ Diagnostic register
mrc p15, 0, r5, c15, c0, 0 @ Power register
stmia r0!, {r4 - r5}
ldmfd sp!, {r4 - r5}
b cpu_v7_do_suspend
-ENDPROC(cpu_ca9mp_do_suspend)
+SYM_FUNC_END(cpu_ca9mp_do_suspend)
-ENTRY(cpu_ca9mp_do_resume)
+SYM_TYPED_FUNC_START(cpu_ca9mp_do_resume)
ldmia r0!, {r4 - r5}
mrc p15, 0, r10, c15, c0, 1 @ Read Diagnostic register
teq r4, r10 @ Already restored?
@@ -210,7 +212,7 @@ ENTRY(cpu_ca9mp_do_resume)
teq r5, r10 @ Already restored?
mcrne p15, 0, r5, c15, c0, 0 @ No, so restore it
b cpu_v7_do_resume
-ENDPROC(cpu_ca9mp_do_resume)
+SYM_FUNC_END(cpu_ca9mp_do_resume)
#endif
#ifdef CONFIG_CPU_PJ4B
@@ -220,18 +222,18 @@ ENDPROC(cpu_ca9mp_do_resume)
globl_equ cpu_pj4b_proc_fin, cpu_v7_proc_fin
globl_equ cpu_pj4b_reset, cpu_v7_reset
#ifdef CONFIG_PJ4B_ERRATA_4742
-ENTRY(cpu_pj4b_do_idle)
+SYM_TYPED_FUNC_START(cpu_pj4b_do_idle)
dsb @ WFI may enter a low-power mode
wfi
dsb @barrier
ret lr
-ENDPROC(cpu_pj4b_do_idle)
+SYM_FUNC_END(cpu_pj4b_do_idle)
#else
globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle
#endif
globl_equ cpu_pj4b_dcache_clean_area, cpu_v7_dcache_clean_area
#ifdef CONFIG_ARM_CPU_SUSPEND
-ENTRY(cpu_pj4b_do_suspend)
+SYM_TYPED_FUNC_START(cpu_pj4b_do_suspend)
stmfd sp!, {r6 - r10}
mrc p15, 1, r6, c15, c1, 0 @ save CP15 - extra features
mrc p15, 1, r7, c15, c2, 0 @ save CP15 - Aux Func Modes Ctrl 0
@@ -241,9 +243,9 @@ ENTRY(cpu_pj4b_do_suspend)
stmia r0!, {r6 - r10}
ldmfd sp!, {r6 - r10}
b cpu_v7_do_suspend
-ENDPROC(cpu_pj4b_do_suspend)
+SYM_FUNC_END(cpu_pj4b_do_suspend)
-ENTRY(cpu_pj4b_do_resume)
+SYM_TYPED_FUNC_START(cpu_pj4b_do_resume)
ldmia r0!, {r6 - r10}
mcr p15, 1, r6, c15, c1, 0 @ restore CP15 - extra features
mcr p15, 1, r7, c15, c2, 0 @ restore CP15 - Aux Func Modes Ctrl 0
@@ -251,7 +253,7 @@ ENTRY(cpu_pj4b_do_resume)
mcr p15, 1, r9, c15, c1, 1 @ restore CP15 - Aux Debug Modes Ctrl 1
mcr p15, 0, r10, c9, c14, 0 @ restore CP15 - PMC
b cpu_v7_do_resume
-ENDPROC(cpu_pj4b_do_resume)
+SYM_FUNC_END(cpu_pj4b_do_resume)
#endif
.globl cpu_pj4b_suspend_size
.equ cpu_pj4b_suspend_size, cpu_v7_suspend_size + 4 * 5
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index d65a12f851a9..d4675603593b 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -8,18 +8,19 @@
* This is the "shell" of the ARMv7-M processor support.
*/
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/page.h>
#include <asm/v7m.h>
#include "proc-macros.S"
-ENTRY(cpu_v7m_proc_init)
+SYM_TYPED_FUNC_START(cpu_v7m_proc_init)
ret lr
-ENDPROC(cpu_v7m_proc_init)
+SYM_FUNC_END(cpu_v7m_proc_init)
-ENTRY(cpu_v7m_proc_fin)
+SYM_TYPED_FUNC_START(cpu_v7m_proc_fin)
ret lr
-ENDPROC(cpu_v7m_proc_fin)
+SYM_FUNC_END(cpu_v7m_proc_fin)
/*
* cpu_v7m_reset(loc)
@@ -31,9 +32,9 @@ ENDPROC(cpu_v7m_proc_fin)
* - loc - location to jump to for soft reset
*/
.align 5
-ENTRY(cpu_v7m_reset)
+SYM_TYPED_FUNC_START(cpu_v7m_reset)
ret r0
-ENDPROC(cpu_v7m_reset)
+SYM_FUNC_END(cpu_v7m_reset)
/*
* cpu_v7m_do_idle()
@@ -42,36 +43,36 @@ ENDPROC(cpu_v7m_reset)
*
* IRQs are already disabled.
*/
-ENTRY(cpu_v7m_do_idle)
+SYM_TYPED_FUNC_START(cpu_v7m_do_idle)
wfi
ret lr
-ENDPROC(cpu_v7m_do_idle)
+SYM_FUNC_END(cpu_v7m_do_idle)
-ENTRY(cpu_v7m_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_v7m_dcache_clean_area)
ret lr
-ENDPROC(cpu_v7m_dcache_clean_area)
+SYM_FUNC_END(cpu_v7m_dcache_clean_area)
/*
* There is no MMU, so here is nothing to do.
*/
-ENTRY(cpu_v7m_switch_mm)
+SYM_TYPED_FUNC_START(cpu_v7m_switch_mm)
ret lr
-ENDPROC(cpu_v7m_switch_mm)
+SYM_FUNC_END(cpu_v7m_switch_mm)
.globl cpu_v7m_suspend_size
.equ cpu_v7m_suspend_size, 0
#ifdef CONFIG_ARM_CPU_SUSPEND
-ENTRY(cpu_v7m_do_suspend)
+SYM_TYPED_FUNC_START(cpu_v7m_do_suspend)
ret lr
-ENDPROC(cpu_v7m_do_suspend)
+SYM_FUNC_END(cpu_v7m_do_suspend)
-ENTRY(cpu_v7m_do_resume)
+SYM_TYPED_FUNC_START(cpu_v7m_do_resume)
ret lr
-ENDPROC(cpu_v7m_do_resume)
+SYM_FUNC_END(cpu_v7m_do_resume)
#endif
-ENTRY(cpu_cm7_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_cm7_dcache_clean_area)
dcache_line_size r2, r3
movw r3, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_DCCMVAC
movt r3, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_DCCMVAC
@@ -82,16 +83,16 @@ ENTRY(cpu_cm7_dcache_clean_area)
bhi 1b
dsb
ret lr
-ENDPROC(cpu_cm7_dcache_clean_area)
+SYM_FUNC_END(cpu_cm7_dcache_clean_area)
-ENTRY(cpu_cm7_proc_fin)
+SYM_TYPED_FUNC_START(cpu_cm7_proc_fin)
movw r2, #:lower16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
movt r2, #:upper16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
ldr r0, [r2]
bic r0, r0, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC)
str r0, [r2]
ret lr
-ENDPROC(cpu_cm7_proc_fin)
+SYM_FUNC_END(cpu_cm7_proc_fin)
.section ".init.text", "ax"
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index a17afe7e195a..14927b380452 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -23,6 +23,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
@@ -79,18 +80,20 @@
*
* Nothing too exciting at the moment
*/
-ENTRY(cpu_xsc3_proc_init)
+SYM_TYPED_FUNC_START(cpu_xsc3_proc_init)
ret lr
+SYM_FUNC_END(cpu_xsc3_proc_init)
/*
* cpu_xsc3_proc_fin()
*/
-ENTRY(cpu_xsc3_proc_fin)
+SYM_TYPED_FUNC_START(cpu_xsc3_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1800 @ ...IZ...........
bic r0, r0, #0x0006 @ .............CA.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_xsc3_proc_fin)
/*
* cpu_xsc3_reset(loc)
@@ -103,7 +106,7 @@ ENTRY(cpu_xsc3_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_xsc3_reset)
+SYM_TYPED_FUNC_START(cpu_xsc3_reset)
mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
msr cpsr_c, r1 @ reset CPSR
mrc p15, 0, r1, c1, c0, 0 @ ctrl register
@@ -117,7 +120,7 @@ ENTRY(cpu_xsc3_reset)
@ already containing those two last instructions to survive.
mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
ret r0
-ENDPROC(cpu_xsc3_reset)
+SYM_FUNC_END(cpu_xsc3_reset)
.popsection
/*
@@ -132,10 +135,11 @@ ENDPROC(cpu_xsc3_reset)
*/
.align 5
-ENTRY(cpu_xsc3_do_idle)
+SYM_TYPED_FUNC_START(cpu_xsc3_do_idle)
mov r0, #1
mcr p14, 0, r0, c7, c0, 0 @ go to idle
ret lr
+SYM_FUNC_END(cpu_xsc3_do_idle)
/* ================================= CACHE ================================ */
@@ -144,11 +148,11 @@ ENTRY(cpu_xsc3_do_idle)
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(xsc3_flush_icache_all)
+SYM_TYPED_FUNC_START(xsc3_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(xsc3_flush_icache_all)
+SYM_FUNC_END(xsc3_flush_icache_all)
/*
* flush_user_cache_all()
@@ -156,15 +160,14 @@ ENDPROC(xsc3_flush_icache_all)
* Invalidate all cache entries in a particular address
* space.
*/
-ENTRY(xsc3_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(xsc3_flush_user_cache_all, xsc3_flush_kern_cache_all)
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(xsc3_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(xsc3_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -174,6 +177,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
ret lr
+SYM_FUNC_END(xsc3_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, vm_flags)
@@ -186,7 +190,7 @@ __flush_whole_cache:
* - vma - vma_area_struct describing address space
*/
.align 5
-ENTRY(xsc3_flush_user_cache_range)
+SYM_TYPED_FUNC_START(xsc3_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #MAX_AREA_SIZE
@@ -203,6 +207,7 @@ ENTRY(xsc3_flush_user_cache_range)
mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
ret lr
+SYM_FUNC_END(xsc3_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -217,9 +222,13 @@ ENTRY(xsc3_flush_user_cache_range)
* Note: single I-cache line invalidation isn't used here since
* it also trashes the mini I-cache used by JTAG debuggers.
*/
-ENTRY(xsc3_coherent_kern_range)
-/* FALLTHROUGH */
-ENTRY(xsc3_coherent_user_range)
+SYM_TYPED_FUNC_START(xsc3_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b xsc3_coherent_user_range
+#endif
+SYM_FUNC_END(xsc3_coherent_kern_range)
+
+SYM_TYPED_FUNC_START(xsc3_coherent_user_range)
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
add r0, r0, #CACHELINESIZE
@@ -230,6 +239,7 @@ ENTRY(xsc3_coherent_user_range)
mcr p15, 0, r0, c7, c10, 4 @ data write barrier
mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
ret lr
+SYM_FUNC_END(xsc3_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -240,7 +250,7 @@ ENTRY(xsc3_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(xsc3_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(xsc3_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
add r0, r0, #CACHELINESIZE
@@ -251,6 +261,7 @@ ENTRY(xsc3_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c10, 4 @ data write barrier
mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
ret lr
+SYM_FUNC_END(xsc3_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -301,7 +312,7 @@ xsc3_dma_clean_range:
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(xsc3_dma_flush_range)
+SYM_TYPED_FUNC_START(xsc3_dma_flush_range)
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
add r0, r0, #CACHELINESIZE
@@ -309,6 +320,7 @@ ENTRY(xsc3_dma_flush_range)
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ data write barrier
ret lr
+SYM_FUNC_END(xsc3_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -316,13 +328,13 @@ ENTRY(xsc3_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(xsc3_dma_map_area)
+SYM_TYPED_FUNC_START(xsc3_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq xsc3_dma_clean_range
bcs xsc3_dma_inv_range
b xsc3_dma_flush_range
-ENDPROC(xsc3_dma_map_area)
+SYM_FUNC_END(xsc3_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -330,22 +342,17 @@ ENDPROC(xsc3_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(xsc3_dma_unmap_area)
+SYM_TYPED_FUNC_START(xsc3_dma_unmap_area)
ret lr
-ENDPROC(xsc3_dma_unmap_area)
-
- .globl xsc3_flush_kern_cache_louis
- .equ xsc3_flush_kern_cache_louis, xsc3_flush_kern_cache_all
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions xsc3
+SYM_FUNC_END(xsc3_dma_unmap_area)
-ENTRY(cpu_xsc3_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_xsc3_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
add r0, r0, #CACHELINESIZE
subs r1, r1, #CACHELINESIZE
bhi 1b
ret lr
+SYM_FUNC_END(cpu_xsc3_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -357,7 +364,7 @@ ENTRY(cpu_xsc3_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_xsc3_switch_mm)
+SYM_TYPED_FUNC_START(cpu_xsc3_switch_mm)
clean_d_cache r1, r2
mcr p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB
mcr p15, 0, ip, c7, c10, 4 @ data write barrier
@@ -366,6 +373,7 @@ ENTRY(cpu_xsc3_switch_mm)
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
cpwait_ret lr, ip
+SYM_FUNC_END(cpu_xsc3_switch_mm)
/*
* cpu_xsc3_set_pte_ext(ptep, pte, ext)
@@ -391,7 +399,7 @@ cpu_xsc3_mt_table:
.long 0x00 @ unused
.align 5
-ENTRY(cpu_xsc3_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_xsc3_set_pte_ext)
xscale_set_pte_ext_prologue
tst r1, #L_PTE_SHARED @ shared?
@@ -404,6 +412,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
xscale_set_pte_ext_epilogue
ret lr
+SYM_FUNC_END(cpu_xsc3_set_pte_ext)
.ltorg
.align
@@ -411,7 +420,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
.globl cpu_xsc3_suspend_size
.equ cpu_xsc3_suspend_size, 4 * 6
#ifdef CONFIG_ARM_CPU_SUSPEND
-ENTRY(cpu_xsc3_do_suspend)
+SYM_TYPED_FUNC_START(cpu_xsc3_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
mrc p15, 0, r5, c15, c1, 0 @ CP access reg
@@ -422,9 +431,9 @@ ENTRY(cpu_xsc3_do_suspend)
bic r4, r4, #2 @ clear frequency change bit
stmia r0, {r4 - r9} @ store cp regs
ldmia sp!, {r4 - r9, pc}
-ENDPROC(cpu_xsc3_do_suspend)
+SYM_FUNC_END(cpu_xsc3_do_suspend)
-ENTRY(cpu_xsc3_do_resume)
+SYM_TYPED_FUNC_START(cpu_xsc3_do_resume)
ldmia r0, {r4 - r9} @ load cp regs
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB
@@ -440,7 +449,7 @@ ENTRY(cpu_xsc3_do_resume)
mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg
mov r0, r9 @ control register
b cpu_resume_mmu
-ENDPROC(cpu_xsc3_do_resume)
+SYM_FUNC_END(cpu_xsc3_do_resume)
#endif
.type __xsc3_setup, #function
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index d82590aa71c0..d8462df8020b 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -19,6 +19,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
@@ -111,22 +112,24 @@ clean_addr: .word CLEAN_ADDR
*
* Nothing too exciting at the moment
*/
-ENTRY(cpu_xscale_proc_init)
+SYM_TYPED_FUNC_START(cpu_xscale_proc_init)
@ enable write buffer coalescing. Some bootloader disable it
mrc p15, 0, r1, c1, c0, 1
bic r1, r1, #1
mcr p15, 0, r1, c1, c0, 1
ret lr
+SYM_FUNC_END(cpu_xscale_proc_init)
/*
* cpu_xscale_proc_fin()
*/
-ENTRY(cpu_xscale_proc_fin)
+SYM_TYPED_FUNC_START(cpu_xscale_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1800 @ ...IZ...........
bic r0, r0, #0x0006 @ .............CA.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_xscale_proc_fin)
/*
* cpu_xscale_reset(loc)
@@ -141,7 +144,7 @@ ENTRY(cpu_xscale_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_xscale_reset)
+SYM_TYPED_FUNC_START(cpu_xscale_reset)
mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
msr cpsr_c, r1 @ reset CPSR
mcr p15, 0, r1, c10, c4, 1 @ unlock I-TLB
@@ -159,7 +162,7 @@ ENTRY(cpu_xscale_reset)
@ already containing those two last instructions to survive.
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
ret r0
-ENDPROC(cpu_xscale_reset)
+SYM_FUNC_END(cpu_xscale_reset)
.popsection
/*
@@ -174,10 +177,11 @@ ENDPROC(cpu_xscale_reset)
*/
.align 5
-ENTRY(cpu_xscale_do_idle)
+SYM_TYPED_FUNC_START(cpu_xscale_do_idle)
mov r0, #1
mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE
ret lr
+SYM_FUNC_END(cpu_xscale_do_idle)
/* ================================= CACHE ================================ */
@@ -186,11 +190,11 @@ ENTRY(cpu_xscale_do_idle)
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(xscale_flush_icache_all)
+SYM_TYPED_FUNC_START(xscale_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(xscale_flush_icache_all)
+SYM_FUNC_END(xscale_flush_icache_all)
/*
* flush_user_cache_all()
@@ -198,15 +202,14 @@ ENDPROC(xscale_flush_icache_all)
* Invalidate all cache entries in a particular address
* space.
*/
-ENTRY(xscale_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(xscale_flush_user_cache_all, xscale_flush_kern_cache_all)
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(xscale_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(xscale_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -215,6 +218,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB
mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
ret lr
+SYM_FUNC_END(xscale_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, vm_flags)
@@ -227,7 +231,7 @@ __flush_whole_cache:
* - vma - vma_area_struct describing address space
*/
.align 5
-ENTRY(xscale_flush_user_cache_range)
+SYM_TYPED_FUNC_START(xscale_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #MAX_AREA_SIZE
@@ -244,6 +248,7 @@ ENTRY(xscale_flush_user_cache_range)
mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB
mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
ret lr
+SYM_FUNC_END(xscale_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -258,7 +263,7 @@ ENTRY(xscale_flush_user_cache_range)
* Note: single I-cache line invalidation isn't used here since
* it also trashes the mini I-cache used by JTAG debuggers.
*/
-ENTRY(xscale_coherent_kern_range)
+SYM_TYPED_FUNC_START(xscale_coherent_kern_range)
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHELINESIZE
@@ -268,6 +273,7 @@ ENTRY(xscale_coherent_kern_range)
mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
ret lr
+SYM_FUNC_END(xscale_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -279,7 +285,7 @@ ENTRY(xscale_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(xscale_coherent_user_range)
+SYM_TYPED_FUNC_START(xscale_coherent_user_range)
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache entry
@@ -290,6 +296,7 @@ ENTRY(xscale_coherent_user_range)
mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB
mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
ret lr
+SYM_FUNC_END(xscale_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -300,7 +307,7 @@ ENTRY(xscale_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(xscale_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(xscale_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
@@ -311,6 +318,7 @@ ENTRY(xscale_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
ret lr
+SYM_FUNC_END(xscale_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -361,7 +369,7 @@ xscale_dma_clean_range:
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(xscale_dma_flush_range)
+SYM_TYPED_FUNC_START(xscale_dma_flush_range)
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
@@ -370,6 +378,7 @@ ENTRY(xscale_dma_flush_range)
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
ret lr
+SYM_FUNC_END(xscale_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -377,13 +386,27 @@ ENTRY(xscale_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(xscale_dma_map_area)
+SYM_TYPED_FUNC_START(xscale_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq xscale_dma_clean_range
bcs xscale_dma_inv_range
b xscale_dma_flush_range
-ENDPROC(xscale_dma_map_area)
+SYM_FUNC_END(xscale_dma_map_area)
+
+/*
+ * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't
+ * clear the dirty bits, which means that if we invalidate a dirty line,
+ * the dirty data can still be written back to external memory later on.
+ *
+ * The recommended workaround is to always do a clean D-cache line before
+ * doing an invalidate D-cache line, so on the affected processors,
+ * dma_inv_range() is implemented as dma_flush_range().
+ *
+ * See erratum #25 of "Intel 80200 Processor Specification Update",
+ * revision January 22, 2003, available at:
+ * http://www.intel.com/design/iio/specupdt/273415.htm
+ */
/*
* dma_map_area(start, size, dir)
@@ -391,12 +414,12 @@ ENDPROC(xscale_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(xscale_80200_A0_A1_dma_map_area)
+SYM_TYPED_FUNC_START(xscale_80200_A0_A1_dma_map_area)
add r1, r1, r0
teq r2, #DMA_TO_DEVICE
beq xscale_dma_clean_range
b xscale_dma_flush_range
-ENDPROC(xscale_80200_A0_A1_dma_map_area)
+SYM_FUNC_END(xscale_80200_A0_A1_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -404,59 +427,17 @@ ENDPROC(xscale_80200_A0_A1_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(xscale_dma_unmap_area)
+SYM_TYPED_FUNC_START(xscale_dma_unmap_area)
ret lr
-ENDPROC(xscale_dma_unmap_area)
-
- .globl xscale_flush_kern_cache_louis
- .equ xscale_flush_kern_cache_louis, xscale_flush_kern_cache_all
+SYM_FUNC_END(xscale_dma_unmap_area)
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions xscale
-
-/*
- * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't
- * clear the dirty bits, which means that if we invalidate a dirty line,
- * the dirty data can still be written back to external memory later on.
- *
- * The recommended workaround is to always do a clean D-cache line before
- * doing an invalidate D-cache line, so on the affected processors,
- * dma_inv_range() is implemented as dma_flush_range().
- *
- * See erratum #25 of "Intel 80200 Processor Specification Update",
- * revision January 22, 2003, available at:
- * http://www.intel.com/design/iio/specupdt/273415.htm
- */
-.macro a0_alias basename
- .globl xscale_80200_A0_A1_\basename
- .type xscale_80200_A0_A1_\basename , %function
- .equ xscale_80200_A0_A1_\basename , xscale_\basename
-.endm
-
-/*
- * Most of the cache functions are unchanged for these processor revisions.
- * Export suitable alias symbols for the unchanged functions:
- */
- a0_alias flush_icache_all
- a0_alias flush_user_cache_all
- a0_alias flush_kern_cache_all
- a0_alias flush_kern_cache_louis
- a0_alias flush_user_cache_range
- a0_alias coherent_kern_range
- a0_alias coherent_user_range
- a0_alias flush_kern_dcache_area
- a0_alias dma_flush_range
- a0_alias dma_unmap_area
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions xscale_80200_A0_A1
-
-ENTRY(cpu_xscale_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_xscale_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHELINESIZE
subs r1, r1, #CACHELINESIZE
bhi 1b
ret lr
+SYM_FUNC_END(cpu_xscale_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -468,13 +449,14 @@ ENTRY(cpu_xscale_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_xscale_switch_mm)
+SYM_TYPED_FUNC_START(cpu_xscale_switch_mm)
clean_d_cache r1, r2
mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
cpwait_ret lr, ip
+SYM_FUNC_END(cpu_xscale_switch_mm)
/*
* cpu_xscale_set_pte_ext(ptep, pte, ext)
@@ -502,7 +484,7 @@ cpu_xscale_mt_table:
.long 0x00 @ unused
.align 5
-ENTRY(cpu_xscale_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_xscale_set_pte_ext)
xscale_set_pte_ext_prologue
@
@@ -520,6 +502,7 @@ ENTRY(cpu_xscale_set_pte_ext)
xscale_set_pte_ext_epilogue
ret lr
+SYM_FUNC_END(cpu_xscale_set_pte_ext)
.ltorg
.align
@@ -527,7 +510,7 @@ ENTRY(cpu_xscale_set_pte_ext)
.globl cpu_xscale_suspend_size
.equ cpu_xscale_suspend_size, 4 * 6
#ifdef CONFIG_ARM_CPU_SUSPEND
-ENTRY(cpu_xscale_do_suspend)
+SYM_TYPED_FUNC_START(cpu_xscale_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
mrc p15, 0, r5, c15, c1, 0 @ CP access reg
@@ -538,9 +521,9 @@ ENTRY(cpu_xscale_do_suspend)
bic r4, r4, #2 @ clear frequency change bit
stmia r0, {r4 - r9} @ store cp regs
ldmfd sp!, {r4 - r9, pc}
-ENDPROC(cpu_xscale_do_suspend)
+SYM_FUNC_END(cpu_xscale_do_suspend)
-ENTRY(cpu_xscale_do_resume)
+SYM_TYPED_FUNC_START(cpu_xscale_do_resume)
ldmia r0, {r4 - r9} @ load cp regs
mov ip, #0
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
@@ -553,7 +536,7 @@ ENTRY(cpu_xscale_do_resume)
mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg
mov r0, r9 @ control register
b cpu_resume_mmu
-ENDPROC(cpu_xscale_do_resume)
+SYM_FUNC_END(cpu_xscale_do_resume)
#endif
.type __xscale_setup, #function
diff --git a/arch/arm/mm/proc.c b/arch/arm/mm/proc.c
new file mode 100644
index 000000000000..bdbbf65d1b36
--- /dev/null
+++ b/arch/arm/mm/proc.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This file defines C prototypes for the low-level processor assembly functions
+ * and creates a reference for CFI. This needs to be done for every assembly
+ * processor ("proc") function that is called from C but does not have a
+ * corresponding C implementation.
+ *
+ * Processors are listed in the order they appear in the Makefile.
+ *
+ * Functions are listed if and only if they see use on the target CPU, and in
+ * the order they are defined in struct processor.
+ */
+#include <asm/proc-fns.h>
+
+#ifdef CONFIG_CPU_ARM7TDMI
+void cpu_arm7tdmi_proc_init(void);
+__ADDRESSABLE(cpu_arm7tdmi_proc_init);
+void cpu_arm7tdmi_proc_fin(void);
+__ADDRESSABLE(cpu_arm7tdmi_proc_fin);
+void cpu_arm7tdmi_reset(void);
+__ADDRESSABLE(cpu_arm7tdmi_reset);
+int cpu_arm7tdmi_do_idle(void);
+__ADDRESSABLE(cpu_arm7tdmi_do_idle);
+void cpu_arm7tdmi_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm7tdmi_dcache_clean_area);
+void cpu_arm7tdmi_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm7tdmi_switch_mm);
+#endif
+
+#ifdef CONFIG_CPU_ARM720T
+void cpu_arm720_proc_init(void);
+__ADDRESSABLE(cpu_arm720_proc_init);
+void cpu_arm720_proc_fin(void);
+__ADDRESSABLE(cpu_arm720_proc_fin);
+void cpu_arm720_reset(void);
+__ADDRESSABLE(cpu_arm720_reset);
+int cpu_arm720_do_idle(void);
+__ADDRESSABLE(cpu_arm720_do_idle);
+void cpu_arm720_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm720_dcache_clean_area);
+void cpu_arm720_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm720_switch_mm);
+void cpu_arm720_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_arm720_set_pte_ext);
+#endif
+
+#ifdef CONFIG_CPU_ARM740T
+void cpu_arm740_proc_init(void);
+__ADDRESSABLE(cpu_arm740_proc_init);
+void cpu_arm740_proc_fin(void);
+__ADDRESSABLE(cpu_arm740_proc_fin);
+void cpu_arm740_reset(void);
+__ADDRESSABLE(cpu_arm740_reset);
+int cpu_arm740_do_idle(void);
+__ADDRESSABLE(cpu_arm740_do_idle);
+void cpu_arm740_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm740_dcache_clean_area);
+void cpu_arm740_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm740_switch_mm);
+#endif
+
+#ifdef CONFIG_CPU_ARM9TDMI
+void cpu_arm9tdmi_proc_init(void);
+__ADDRESSABLE(cpu_arm9tdmi_proc_init);
+void cpu_arm9tdmi_proc_fin(void);
+__ADDRESSABLE(cpu_arm9tdmi_proc_fin);
+void cpu_arm9tdmi_reset(void);
+__ADDRESSABLE(cpu_arm9tdmi_reset);
+int cpu_arm9tdmi_do_idle(void);
+__ADDRESSABLE(cpu_arm9tdmi_do_idle);
+void cpu_arm9tdmi_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm9tdmi_dcache_clean_area);
+void cpu_arm9tdmi_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm9tdmi_switch_mm);
+#endif
+
+#ifdef CONFIG_CPU_ARM920T
+void cpu_arm920_proc_init(void);
+__ADDRESSABLE(cpu_arm920_proc_init);
+void cpu_arm920_proc_fin(void);
+__ADDRESSABLE(cpu_arm920_proc_fin);
+void cpu_arm920_reset(void);
+__ADDRESSABLE(cpu_arm920_reset);
+int cpu_arm920_do_idle(void);
+__ADDRESSABLE(cpu_arm920_do_idle);
+void cpu_arm920_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm920_dcache_clean_area);
+void cpu_arm920_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm920_switch_mm);
+void cpu_arm920_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_arm920_set_pte_ext);
+#ifdef CONFIG_ARM_CPU_SUSPEND
+void cpu_arm920_do_suspend(void *);
+__ADDRESSABLE(cpu_arm920_do_suspend);
+void cpu_arm920_do_resume(void *);
+__ADDRESSABLE(cpu_arm920_do_resume);
+#endif /* CONFIG_ARM_CPU_SUSPEND */
+#endif /* CONFIG_CPU_ARM920T */
+
+#ifdef CONFIG_CPU_ARM922T
+void cpu_arm922_proc_init(void);
+__ADDRESSABLE(cpu_arm922_proc_init);
+void cpu_arm922_proc_fin(void);
+__ADDRESSABLE(cpu_arm922_proc_fin);
+void cpu_arm922_reset(void);
+__ADDRESSABLE(cpu_arm922_reset);
+int cpu_arm922_do_idle(void);
+__ADDRESSABLE(cpu_arm922_do_idle);
+void cpu_arm922_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm922_dcache_clean_area);
+void cpu_arm922_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm922_switch_mm);
+void cpu_arm922_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_arm922_set_pte_ext);
+#endif
+
+#ifdef CONFIG_CPU_ARM925T
+void cpu_arm925_proc_init(void);
+__ADDRESSABLE(cpu_arm925_proc_init);
+void cpu_arm925_proc_fin(void);
+__ADDRESSABLE(cpu_arm925_proc_fin);
+void cpu_arm925_reset(void);
+__ADDRESSABLE(cpu_arm925_reset);
+int cpu_arm925_do_idle(void);
+__ADDRESSABLE(cpu_arm925_do_idle);
+void cpu_arm925_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm925_dcache_clean_area);
+void cpu_arm925_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm925_switch_mm);
+void cpu_arm925_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_arm925_set_pte_ext);
+#endif
+
+#ifdef CONFIG_CPU_ARM926T
+void cpu_arm926_proc_init(void);
+__ADDRESSABLE(cpu_arm926_proc_init);
+void cpu_arm926_proc_fin(void);
+__ADDRESSABLE(cpu_arm926_proc_fin);
+void cpu_arm926_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_arm926_reset);
+int cpu_arm926_do_idle(void);
+__ADDRESSABLE(cpu_arm926_do_idle);
+void cpu_arm926_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm926_dcache_clean_area);
+void cpu_arm926_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm926_switch_mm);
+void cpu_arm926_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_arm926_set_pte_ext);
+#ifdef CONFIG_ARM_CPU_SUSPEND
+void cpu_arm926_do_suspend(void *);
+__ADDRESSABLE(cpu_arm926_do_suspend);
+void cpu_arm926_do_resume(void *);
+__ADDRESSABLE(cpu_arm926_do_resume);
+#endif /* CONFIG_ARM_CPU_SUSPEND */
+#endif /* CONFIG_CPU_ARM926T */
+
+#ifdef CONFIG_CPU_ARM940T
+void cpu_arm940_proc_init(void);
+__ADDRESSABLE(cpu_arm940_proc_init);
+void cpu_arm940_proc_fin(void);
+__ADDRESSABLE(cpu_arm940_proc_fin);
+void cpu_arm940_reset(void);
+__ADDRESSABLE(cpu_arm940_reset);
+int cpu_arm940_do_idle(void);
+__ADDRESSABLE(cpu_arm940_do_idle);
+void cpu_arm940_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm940_dcache_clean_area);
+void cpu_arm940_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm940_switch_mm);
+#endif
+
+#ifdef CONFIG_CPU_ARM946E
+void cpu_arm946_proc_init(void);
+__ADDRESSABLE(cpu_arm946_proc_init);
+void cpu_arm946_proc_fin(void);
+__ADDRESSABLE(cpu_arm946_proc_fin);
+void cpu_arm946_reset(void);
+__ADDRESSABLE(cpu_arm946_reset);
+int cpu_arm946_do_idle(void);
+__ADDRESSABLE(cpu_arm946_do_idle);
+void cpu_arm946_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm946_dcache_clean_area);
+void cpu_arm946_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm946_switch_mm);
+#endif
+
+#ifdef CONFIG_CPU_FA526
+void cpu_fa526_proc_init(void);
+__ADDRESSABLE(cpu_fa526_proc_init);
+void cpu_fa526_proc_fin(void);
+__ADDRESSABLE(cpu_fa526_proc_fin);
+void cpu_fa526_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_fa526_reset);
+int cpu_fa526_do_idle(void);
+__ADDRESSABLE(cpu_fa526_do_idle);
+void cpu_fa526_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_fa526_dcache_clean_area);
+void cpu_fa526_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_fa526_switch_mm);
+void cpu_fa526_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_fa526_set_pte_ext);
+#endif
+
+#ifdef CONFIG_CPU_ARM1020
+void cpu_arm1020_proc_init(void);
+__ADDRESSABLE(cpu_arm1020_proc_init);
+void cpu_arm1020_proc_fin(void);
+__ADDRESSABLE(cpu_arm1020_proc_fin);
+void cpu_arm1020_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_arm1020_reset);
+int cpu_arm1020_do_idle(void);
+__ADDRESSABLE(cpu_arm1020_do_idle);
+void cpu_arm1020_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm1020_dcache_clean_area);
+void cpu_arm1020_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm1020_switch_mm);
+void cpu_arm1020_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_arm1020_set_pte_ext);
+#endif
+
+#ifdef CONFIG_CPU_ARM1020E
+void cpu_arm1020e_proc_init(void);
+__ADDRESSABLE(cpu_arm1020e_proc_init);
+void cpu_arm1020e_proc_fin(void);
+__ADDRESSABLE(cpu_arm1020e_proc_fin);
+void cpu_arm1020e_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_arm1020e_reset);
+int cpu_arm1020e_do_idle(void);
+__ADDRESSABLE(cpu_arm1020e_do_idle);
+void cpu_arm1020e_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm1020e_dcache_clean_area);
+void cpu_arm1020e_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm1020e_switch_mm);
+void cpu_arm1020e_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_arm1020e_set_pte_ext);
+#endif
+
+#ifdef CONFIG_CPU_ARM1022
+void cpu_arm1022_proc_init(void);
+__ADDRESSABLE(cpu_arm1022_proc_init);
+void cpu_arm1022_proc_fin(void);
+__ADDRESSABLE(cpu_arm1022_proc_fin);
+void cpu_arm1022_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_arm1022_reset);
+int cpu_arm1022_do_idle(void);
+__ADDRESSABLE(cpu_arm1022_do_idle);
+void cpu_arm1022_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm1022_dcache_clean_area);
+void cpu_arm1022_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm1022_switch_mm);
+void cpu_arm1022_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_arm1022_set_pte_ext);
+#endif
+
+#ifdef CONFIG_CPU_ARM1026
+void cpu_arm1026_proc_init(void);
+__ADDRESSABLE(cpu_arm1026_proc_init);
+void cpu_arm1026_proc_fin(void);
+__ADDRESSABLE(cpu_arm1026_proc_fin);
+void cpu_arm1026_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_arm1026_reset);
+int cpu_arm1026_do_idle(void);
+__ADDRESSABLE(cpu_arm1026_do_idle);
+void cpu_arm1026_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm1026_dcache_clean_area);
+void cpu_arm1026_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm1026_switch_mm);
+void cpu_arm1026_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_arm1026_set_pte_ext);
+#endif
+
+#ifdef CONFIG_CPU_SA110
+void cpu_sa110_proc_init(void);
+__ADDRESSABLE(cpu_sa110_proc_init);
+void cpu_sa110_proc_fin(void);
+__ADDRESSABLE(cpu_sa110_proc_fin);
+void cpu_sa110_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_sa110_reset);
+int cpu_sa110_do_idle(void);
+__ADDRESSABLE(cpu_sa110_do_idle);
+void cpu_sa110_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_sa110_dcache_clean_area);
+void cpu_sa110_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_sa110_switch_mm);
+void cpu_sa110_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_sa110_set_pte_ext);
+#endif
+
+#ifdef CONFIG_CPU_SA1100
+void cpu_sa1100_proc_init(void);
+__ADDRESSABLE(cpu_sa1100_proc_init);
+void cpu_sa1100_proc_fin(void);
+__ADDRESSABLE(cpu_sa1100_proc_fin);
+void cpu_sa1100_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_sa1100_reset);
+int cpu_sa1100_do_idle(void);
+__ADDRESSABLE(cpu_sa1100_do_idle);
+void cpu_sa1100_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_sa1100_dcache_clean_area);
+void cpu_sa1100_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_sa1100_switch_mm);
+void cpu_sa1100_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_sa1100_set_pte_ext);
+#ifdef CONFIG_ARM_CPU_SUSPEND
+void cpu_sa1100_do_suspend(void *);
+__ADDRESSABLE(cpu_sa1100_do_suspend);
+void cpu_sa1100_do_resume(void *);
+__ADDRESSABLE(cpu_sa1100_do_resume);
+#endif /* CONFIG_ARM_CPU_SUSPEND */
+#endif /* CONFIG_CPU_SA1100 */
+
+#ifdef CONFIG_CPU_XSCALE
+void cpu_xscale_proc_init(void);
+__ADDRESSABLE(cpu_xscale_proc_init);
+void cpu_xscale_proc_fin(void);
+__ADDRESSABLE(cpu_xscale_proc_fin);
+void cpu_xscale_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_xscale_reset);
+int cpu_xscale_do_idle(void);
+__ADDRESSABLE(cpu_xscale_do_idle);
+void cpu_xscale_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_xscale_dcache_clean_area);
+void cpu_xscale_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_xscale_switch_mm);
+void cpu_xscale_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_xscale_set_pte_ext);
+#ifdef CONFIG_ARM_CPU_SUSPEND
+void cpu_xscale_do_suspend(void *);
+__ADDRESSABLE(cpu_xscale_do_suspend);
+void cpu_xscale_do_resume(void *);
+__ADDRESSABLE(cpu_xscale_do_resume);
+#endif /* CONFIG_ARM_CPU_SUSPEND */
+#endif /* CONFIG_CPU_XSCALE */
+
+#ifdef CONFIG_CPU_XSC3
+void cpu_xsc3_proc_init(void);
+__ADDRESSABLE(cpu_xsc3_proc_init);
+void cpu_xsc3_proc_fin(void);
+__ADDRESSABLE(cpu_xsc3_proc_fin);
+void cpu_xsc3_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_xsc3_reset);
+int cpu_xsc3_do_idle(void);
+__ADDRESSABLE(cpu_xsc3_do_idle);
+void cpu_xsc3_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_xsc3_dcache_clean_area);
+void cpu_xsc3_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_xsc3_switch_mm);
+void cpu_xsc3_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_xsc3_set_pte_ext);
+#ifdef CONFIG_ARM_CPU_SUSPEND
+void cpu_xsc3_do_suspend(void *);
+__ADDRESSABLE(cpu_xsc3_do_suspend);
+void cpu_xsc3_do_resume(void *);
+__ADDRESSABLE(cpu_xsc3_do_resume);
+#endif /* CONFIG_ARM_CPU_SUSPEND */
+#endif /* CONFIG_CPU_XSC3 */
+
+#ifdef CONFIG_CPU_MOHAWK
+void cpu_mohawk_proc_init(void);
+__ADDRESSABLE(cpu_mohawk_proc_init);
+void cpu_mohawk_proc_fin(void);
+__ADDRESSABLE(cpu_mohawk_proc_fin);
+void cpu_mohawk_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_mohawk_reset);
+int cpu_mohawk_do_idle(void);
+__ADDRESSABLE(cpu_mohawk_do_idle);
+void cpu_mohawk_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_mohawk_dcache_clean_area);
+void cpu_mohawk_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_mohawk_switch_mm);
+void cpu_mohawk_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_mohawk_set_pte_ext);
+#ifdef CONFIG_ARM_CPU_SUSPEND
+void cpu_mohawk_do_suspend(void *);
+__ADDRESSABLE(cpu_mohawk_do_suspend);
+void cpu_mohawk_do_resume(void *);
+__ADDRESSABLE(cpu_mohawk_do_resume);
+#endif /* CONFIG_ARM_CPU_SUSPEND */
+#endif /* CONFIG_CPU_MOHAWK */
+
+#ifdef CONFIG_CPU_FEROCEON
+void cpu_feroceon_proc_init(void);
+__ADDRESSABLE(cpu_feroceon_proc_init);
+void cpu_feroceon_proc_fin(void);
+__ADDRESSABLE(cpu_feroceon_proc_fin);
+void cpu_feroceon_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_feroceon_reset);
+int cpu_feroceon_do_idle(void);
+__ADDRESSABLE(cpu_feroceon_do_idle);
+void cpu_feroceon_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_feroceon_dcache_clean_area);
+void cpu_feroceon_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_feroceon_switch_mm);
+void cpu_feroceon_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_feroceon_set_pte_ext);
+#ifdef CONFIG_ARM_CPU_SUSPEND
+void cpu_feroceon_do_suspend(void *);
+__ADDRESSABLE(cpu_feroceon_do_suspend);
+void cpu_feroceon_do_resume(void *);
+__ADDRESSABLE(cpu_feroceon_do_resume);
+#endif /* CONFIG_ARM_CPU_SUSPEND */
+#endif /* CONFIG_CPU_FEROCEON */
+
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
+void cpu_v6_proc_init(void);
+__ADDRESSABLE(cpu_v6_proc_init);
+void cpu_v6_proc_fin(void);
+__ADDRESSABLE(cpu_v6_proc_fin);
+void cpu_v6_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_v6_reset);
+int cpu_v6_do_idle(void);
+__ADDRESSABLE(cpu_v6_do_idle);
+void cpu_v6_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_v6_dcache_clean_area);
+void cpu_v6_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_v6_switch_mm);
+void cpu_v6_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_v6_set_pte_ext);
+#ifdef CONFIG_ARM_CPU_SUSPEND
+void cpu_v6_do_suspend(void *);
+__ADDRESSABLE(cpu_v6_do_suspend);
+void cpu_v6_do_resume(void *);
+__ADDRESSABLE(cpu_v6_do_resume);
+#endif /* CONFIG_ARM_CPU_SUSPEND */
+#endif /* CPU_V6 */
+
+#ifdef CONFIG_CPU_V7
+void cpu_v7_proc_init(void);
+__ADDRESSABLE(cpu_v7_proc_init);
+void cpu_v7_proc_fin(void);
+__ADDRESSABLE(cpu_v7_proc_fin);
+void cpu_v7_reset(void);
+__ADDRESSABLE(cpu_v7_reset);
+int cpu_v7_do_idle(void);
+__ADDRESSABLE(cpu_v7_do_idle);
+#ifdef CONFIG_PJ4B_ERRATA_4742
+int cpu_pj4b_do_idle(void);
+__ADDRESSABLE(cpu_pj4b_do_idle);
+#endif
+void cpu_v7_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_v7_dcache_clean_area);
+void cpu_v7_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+/* Special switch_mm() callbacks to work around bugs in v7 */
+__ADDRESSABLE(cpu_v7_switch_mm);
+void cpu_v7_iciallu_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_v7_iciallu_switch_mm);
+void cpu_v7_bpiall_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_v7_bpiall_switch_mm);
+#ifdef CONFIG_ARM_LPAE
+void cpu_v7_set_pte_ext(pte_t *ptep, pte_t pte);
+#else
+void cpu_v7_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+#endif
+__ADDRESSABLE(cpu_v7_set_pte_ext);
+#ifdef CONFIG_ARM_CPU_SUSPEND
+void cpu_v7_do_suspend(void *);
+__ADDRESSABLE(cpu_v7_do_suspend);
+void cpu_v7_do_resume(void *);
+__ADDRESSABLE(cpu_v7_do_resume);
+/* Special versions of suspend and resume for the CA9MP cores */
+void cpu_ca9mp_do_suspend(void *);
+__ADDRESSABLE(cpu_ca9mp_do_suspend);
+void cpu_ca9mp_do_resume(void *);
+__ADDRESSABLE(cpu_ca9mp_do_resume);
+/* Special versions of suspend and resume for the Marvell PJ4B cores */
+#ifdef CONFIG_CPU_PJ4B
+void cpu_pj4b_do_suspend(void *);
+__ADDRESSABLE(cpu_pj4b_do_suspend);
+void cpu_pj4b_do_resume(void *);
+__ADDRESSABLE(cpu_pj4b_do_resume);
+#endif /* CONFIG_CPU_PJ4B */
+#endif /* CONFIG_ARM_CPU_SUSPEND */
+#endif /* CONFIG_CPU_V7 */
+
+#ifdef CONFIG_CPU_V7M
+void cpu_v7m_proc_init(void);
+__ADDRESSABLE(cpu_v7m_proc_init);
+void cpu_v7m_proc_fin(void);
+__ADDRESSABLE(cpu_v7m_proc_fin);
+void cpu_v7m_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_v7m_reset);
+int cpu_v7m_do_idle(void);
+__ADDRESSABLE(cpu_v7m_do_idle);
+void cpu_v7m_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_v7m_dcache_clean_area);
+void cpu_v7m_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_v7m_switch_mm);
+void cpu_v7m_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_v7m_set_pte_ext);
+#ifdef CONFIG_ARM_CPU_SUSPEND
+void cpu_v7m_do_suspend(void *);
+__ADDRESSABLE(cpu_v7m_do_suspend);
+void cpu_v7m_do_resume(void *);
+__ADDRESSABLE(cpu_v7m_do_resume);
+#endif /* CONFIG_ARM_CPU_SUSPEND */
+void cpu_cm7_proc_fin(void);
+__ADDRESSABLE(cpu_cm7_proc_fin);
+void cpu_cm7_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_cm7_dcache_clean_area);
+#endif /* CONFIG_CPU_V7M */
diff --git a/arch/arm/mm/tlb-fa.S b/arch/arm/mm/tlb-fa.S
index def6161ec452..85a6fe766b21 100644
--- a/arch/arm/mm/tlb-fa.S
+++ b/arch/arm/mm/tlb-fa.S
@@ -15,6 +15,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/tlbflush.h>
@@ -31,7 +32,7 @@
* - mm - mm_struct describing address space
*/
.align 4
-ENTRY(fa_flush_user_tlb_range)
+SYM_TYPED_FUNC_START(fa_flush_user_tlb_range)
vma_vm_mm ip, r2
act_mm r3 @ get current->active_mm
eors r3, ip, r3 @ == mm ?
@@ -46,9 +47,10 @@ ENTRY(fa_flush_user_tlb_range)
blo 1b
mcr p15, 0, r3, c7, c10, 4 @ data write barrier
ret lr
+SYM_FUNC_END(fa_flush_user_tlb_range)
-ENTRY(fa_flush_kern_tlb_range)
+SYM_TYPED_FUNC_START(fa_flush_kern_tlb_range)
mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB
bic r0, r0, #0x0ff
@@ -60,8 +62,4 @@ ENTRY(fa_flush_kern_tlb_range)
mcr p15, 0, r3, c7, c10, 4 @ data write barrier
mcr p15, 0, r3, c7, c5, 4 @ prefetch flush (isb)
ret lr
-
- __INITDATA
-
- /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
- define_tlb_functions fa, fa_tlb_flags
+SYM_FUNC_END(fa_flush_kern_tlb_range)
diff --git a/arch/arm/mm/tlb-v4.S b/arch/arm/mm/tlb-v4.S
index b962b4e75158..09ff69008d94 100644
--- a/arch/arm/mm/tlb-v4.S
+++ b/arch/arm/mm/tlb-v4.S
@@ -11,6 +11,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/tlbflush.h>
@@ -27,7 +28,7 @@
* - mm - mm_struct describing address space
*/
.align 5
-ENTRY(v4_flush_user_tlb_range)
+SYM_TYPED_FUNC_START(v4_flush_user_tlb_range)
vma_vm_mm ip, r2
act_mm r3 @ get current->active_mm
eors r3, ip, r3 @ == mm ?
@@ -40,6 +41,7 @@ ENTRY(v4_flush_user_tlb_range)
cmp r0, r1
blo 1b
ret lr
+SYM_FUNC_END(v4_flush_user_tlb_range)
/*
* v4_flush_kern_tlb_range(start, end)
@@ -50,10 +52,11 @@ ENTRY(v4_flush_user_tlb_range)
* - start - virtual address (may not be aligned)
* - end - virtual address (may not be aligned)
*/
+#ifdef CONFIG_CFI_CLANG
+SYM_TYPED_FUNC_START(v4_flush_kern_tlb_range)
+ b .v4_flush_kern_tlb_range
+SYM_FUNC_END(v4_flush_kern_tlb_range)
+#else
.globl v4_flush_kern_tlb_range
.equ v4_flush_kern_tlb_range, .v4_flush_kern_tlb_range
-
- __INITDATA
-
- /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
- define_tlb_functions v4, v4_tlb_flags
+#endif
diff --git a/arch/arm/mm/tlb-v4wb.S b/arch/arm/mm/tlb-v4wb.S
index 9348bba7586a..04e46c359e75 100644
--- a/arch/arm/mm/tlb-v4wb.S
+++ b/arch/arm/mm/tlb-v4wb.S
@@ -11,6 +11,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/tlbflush.h>
@@ -27,7 +28,7 @@
* - mm - mm_struct describing address space
*/
.align 5
-ENTRY(v4wb_flush_user_tlb_range)
+SYM_TYPED_FUNC_START(v4wb_flush_user_tlb_range)
vma_vm_mm ip, r2
act_mm r3 @ get current->active_mm
eors r3, ip, r3 @ == mm ?
@@ -43,6 +44,7 @@ ENTRY(v4wb_flush_user_tlb_range)
cmp r0, r1
blo 1b
ret lr
+SYM_FUNC_END(v4wb_flush_user_tlb_range)
/*
* v4_flush_kern_tlb_range(start, end)
@@ -53,7 +55,7 @@ ENTRY(v4wb_flush_user_tlb_range)
* - start - virtual address (may not be aligned)
* - end - virtual address (may not be aligned)
*/
-ENTRY(v4wb_flush_kern_tlb_range)
+SYM_TYPED_FUNC_START(v4wb_flush_kern_tlb_range)
mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB
bic r0, r0, #0x0ff
@@ -64,8 +66,4 @@ ENTRY(v4wb_flush_kern_tlb_range)
cmp r0, r1
blo 1b
ret lr
-
- __INITDATA
-
- /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
- define_tlb_functions v4wb, v4wb_tlb_flags
+SYM_FUNC_END(v4wb_flush_kern_tlb_range)
diff --git a/arch/arm/mm/tlb-v4wbi.S b/arch/arm/mm/tlb-v4wbi.S
index d4f9040a4111..502dfe5628a3 100644
--- a/arch/arm/mm/tlb-v4wbi.S
+++ b/arch/arm/mm/tlb-v4wbi.S
@@ -11,6 +11,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/tlbflush.h>
@@ -26,7 +27,7 @@
* - mm - mm_struct describing address space
*/
.align 5
-ENTRY(v4wbi_flush_user_tlb_range)
+SYM_TYPED_FUNC_START(v4wbi_flush_user_tlb_range)
vma_vm_mm ip, r2
act_mm r3 @ get current->active_mm
eors r3, ip, r3 @ == mm ?
@@ -43,8 +44,9 @@ ENTRY(v4wbi_flush_user_tlb_range)
cmp r0, r1
blo 1b
ret lr
+SYM_FUNC_END(v4wbi_flush_user_tlb_range)
-ENTRY(v4wbi_flush_kern_tlb_range)
+SYM_TYPED_FUNC_START(v4wbi_flush_kern_tlb_range)
mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB
bic r0, r0, #0x0ff
@@ -55,8 +57,4 @@ ENTRY(v4wbi_flush_kern_tlb_range)
cmp r0, r1
blo 1b
ret lr
-
- __INITDATA
-
- /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
- define_tlb_functions v4wbi, v4wbi_tlb_flags
+SYM_FUNC_END(v4wbi_flush_kern_tlb_range)
diff --git a/arch/arm/mm/tlb-v6.S b/arch/arm/mm/tlb-v6.S
index 1d91e49b2c2d..8256a67ac654 100644
--- a/arch/arm/mm/tlb-v6.S
+++ b/arch/arm/mm/tlb-v6.S
@@ -9,6 +9,7 @@
*/
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/page.h>
@@ -32,7 +33,7 @@
* - the "Invalidate single entry" instruction will invalidate
* both the I and the D TLBs on Harvard-style TLBs
*/
-ENTRY(v6wbi_flush_user_tlb_range)
+SYM_TYPED_FUNC_START(v6wbi_flush_user_tlb_range)
vma_vm_mm r3, r2 @ get vma->vm_mm
mov ip, #0
mmid r3, r3 @ get vm_mm->context.id
@@ -56,6 +57,7 @@ ENTRY(v6wbi_flush_user_tlb_range)
blo 1b
mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier
ret lr
+SYM_FUNC_END(v6wbi_flush_user_tlb_range)
/*
* v6wbi_flush_kern_tlb_range(start,end)
@@ -65,7 +67,7 @@ ENTRY(v6wbi_flush_user_tlb_range)
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*/
-ENTRY(v6wbi_flush_kern_tlb_range)
+SYM_TYPED_FUNC_START(v6wbi_flush_kern_tlb_range)
mov r2, #0
mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
mov r0, r0, lsr #PAGE_SHIFT @ align address
@@ -85,8 +87,4 @@ ENTRY(v6wbi_flush_kern_tlb_range)
mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier
mcr p15, 0, r2, c7, c5, 4 @ prefetch flush (isb)
ret lr
-
- __INIT
-
- /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
- define_tlb_functions v6wbi, v6wbi_tlb_flags
+SYM_FUNC_END(v6wbi_flush_kern_tlb_range)
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index 35fd6d4f0d03..f1aa0764a2cc 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -10,6 +10,7 @@
*/
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
@@ -31,7 +32,7 @@
* - the "Invalidate single entry" instruction will invalidate
* both the I and the D TLBs on Harvard-style TLBs
*/
-ENTRY(v7wbi_flush_user_tlb_range)
+SYM_TYPED_FUNC_START(v7wbi_flush_user_tlb_range)
vma_vm_mm r3, r2 @ get vma->vm_mm
mmid r3, r3 @ get vm_mm->context.id
dsb ish
@@ -57,7 +58,7 @@ ENTRY(v7wbi_flush_user_tlb_range)
blo 1b
dsb ish
ret lr
-ENDPROC(v7wbi_flush_user_tlb_range)
+SYM_FUNC_END(v7wbi_flush_user_tlb_range)
/*
* v7wbi_flush_kern_tlb_range(start,end)
@@ -67,7 +68,7 @@ ENDPROC(v7wbi_flush_user_tlb_range)
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*/
-ENTRY(v7wbi_flush_kern_tlb_range)
+SYM_TYPED_FUNC_START(v7wbi_flush_kern_tlb_range)
dsb ish
mov r0, r0, lsr #PAGE_SHIFT @ align address
mov r1, r1, lsr #PAGE_SHIFT
@@ -86,9 +87,4 @@ ENTRY(v7wbi_flush_kern_tlb_range)
dsb ish
isb
ret lr
-ENDPROC(v7wbi_flush_kern_tlb_range)
-
- __INIT
-
- /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
- define_tlb_functions v7wbi, v7wbi_tlb_flags_up, flags_smp=v7wbi_tlb_flags_smp
+SYM_FUNC_END(v7wbi_flush_kern_tlb_range)
diff --git a/arch/arm/mm/tlb.c b/arch/arm/mm/tlb.c
new file mode 100644
index 000000000000..42359793120b
--- /dev/null
+++ b/arch/arm/mm/tlb.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2024 Google LLC
+// Author: Ard Biesheuvel <ardb@google.com>
+
+#include <linux/types.h>
+#include <asm/tlbflush.h>
+
+#ifdef CONFIG_CPU_TLB_V4WT
+void v4_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
+void v4_flush_kern_tlb_range(unsigned long, unsigned long);
+
+struct cpu_tlb_fns v4_tlb_fns __initconst = {
+ .flush_user_range = v4_flush_user_tlb_range,
+ .flush_kern_range = v4_flush_kern_tlb_range,
+ .tlb_flags = v4_tlb_flags,
+};
+#endif
+
+#ifdef CONFIG_CPU_TLB_V4WB
+void v4wb_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
+void v4wb_flush_kern_tlb_range(unsigned long, unsigned long);
+
+struct cpu_tlb_fns v4wb_tlb_fns __initconst = {
+ .flush_user_range = v4wb_flush_user_tlb_range,
+ .flush_kern_range = v4wb_flush_kern_tlb_range,
+ .tlb_flags = v4wb_tlb_flags,
+};
+#endif
+
+#if defined(CONFIG_CPU_TLB_V4WBI) || defined(CONFIG_CPU_TLB_FEROCEON)
+void v4wbi_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
+void v4wbi_flush_kern_tlb_range(unsigned long, unsigned long);
+
+struct cpu_tlb_fns v4wbi_tlb_fns __initconst = {
+ .flush_user_range = v4wbi_flush_user_tlb_range,
+ .flush_kern_range = v4wbi_flush_kern_tlb_range,
+ .tlb_flags = v4wbi_tlb_flags,
+};
+#endif
+
+#ifdef CONFIG_CPU_TLB_V6
+void v6wbi_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
+void v6wbi_flush_kern_tlb_range(unsigned long, unsigned long);
+
+struct cpu_tlb_fns v6wbi_tlb_fns __initconst = {
+ .flush_user_range = v6wbi_flush_user_tlb_range,
+ .flush_kern_range = v6wbi_flush_kern_tlb_range,
+ .tlb_flags = v6wbi_tlb_flags,
+};
+#endif
+
+#ifdef CONFIG_CPU_TLB_V7
+void v7wbi_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
+void v7wbi_flush_kern_tlb_range(unsigned long, unsigned long);
+
+struct cpu_tlb_fns v7wbi_tlb_fns __initconst = {
+ .flush_user_range = v7wbi_flush_user_tlb_range,
+ .flush_kern_range = v7wbi_flush_kern_tlb_range,
+ .tlb_flags = IS_ENABLED(CONFIG_SMP) ? v7wbi_tlb_flags_smp
+ : v7wbi_tlb_flags_up,
+};
+
+#ifdef CONFIG_SMP_ON_UP
+/* This will be run-time patched so the offset better be right */
+static_assert(offsetof(struct cpu_tlb_fns, tlb_flags) == 8);
+
+asm(" .pushsection \".alt.smp.init\", \"a\" \n" \
+ " .align 2 \n" \
+ " .long v7wbi_tlb_fns + 8 - . \n" \
+ " .long " __stringify(v7wbi_tlb_flags_up) " \n" \
+ " .popsection \n");
+#endif
+#endif
+
+#ifdef CONFIG_CPU_TLB_FA
+void fa_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
+void fa_flush_kern_tlb_range(unsigned long, unsigned long);
+
+struct cpu_tlb_fns fa_tlb_fns __initconst = {
+ .flush_user_range = fa_flush_user_tlb_range,
+ .flush_kern_range = fa_flush_kern_tlb_range,
+ .tlb_flags = fa_tlb_flags,
+};
+#endif
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 1d672457d02f..deeb8f292454 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -871,16 +871,11 @@ static inline void emit_a32_alu_r64(const bool is64, const s8 dst[],
}
/* dst = src (4 bytes)*/
-static inline void emit_a32_mov_r(const s8 dst, const s8 src, const u8 off,
- struct jit_ctx *ctx) {
+static inline void emit_a32_mov_r(const s8 dst, const s8 src, struct jit_ctx *ctx) {
const s8 *tmp = bpf2a32[TMP_REG_1];
s8 rt;
rt = arm_bpf_get_reg32(src, tmp[0], ctx);
- if (off && off != 32) {
- emit(ARM_LSL_I(rt, rt, 32 - off), ctx);
- emit(ARM_ASR_I(rt, rt, 32 - off), ctx);
- }
arm_bpf_put_reg32(dst, rt, ctx);
}
@@ -889,15 +884,15 @@ static inline void emit_a32_mov_r64(const bool is64, const s8 dst[],
const s8 src[],
struct jit_ctx *ctx) {
if (!is64) {
- emit_a32_mov_r(dst_lo, src_lo, 0, ctx);
+ emit_a32_mov_r(dst_lo, src_lo, ctx);
if (!ctx->prog->aux->verifier_zext)
/* Zero out high 4 bytes */
emit_a32_mov_i(dst_hi, 0, ctx);
} else if (__LINUX_ARM_ARCH__ < 6 &&
ctx->cpu_architecture < CPU_ARCH_ARMv5TE) {
/* complete 8 byte move */
- emit_a32_mov_r(dst_lo, src_lo, 0, ctx);
- emit_a32_mov_r(dst_hi, src_hi, 0, ctx);
+ emit_a32_mov_r(dst_lo, src_lo, ctx);
+ emit_a32_mov_r(dst_hi, src_hi, ctx);
} else if (is_stacked(src_lo) && is_stacked(dst_lo)) {
const u8 *tmp = bpf2a32[TMP_REG_1];
@@ -917,17 +912,52 @@ static inline void emit_a32_mov_r64(const bool is64, const s8 dst[],
static inline void emit_a32_movsx_r64(const bool is64, const u8 off, const s8 dst[], const s8 src[],
struct jit_ctx *ctx) {
const s8 *tmp = bpf2a32[TMP_REG_1];
- const s8 *rt;
+ s8 rs;
+ s8 rd;
+
+ if (is_stacked(dst_lo))
+ rd = tmp[1];
+ else
+ rd = dst_lo;
+ rs = arm_bpf_get_reg32(src_lo, rd, ctx);
+ /* rs may be one of src[1], dst[1], or tmp[1] */
- rt = arm_bpf_get_reg64(dst, tmp, ctx);
+ /* Sign extend rs if needed. If off == 32, lower 32-bits of src are moved to dst and sign
+ * extension only happens in the upper 64 bits.
+ */
+ if (off != 32) {
+ /* Sign extend rs into rd */
+ emit(ARM_LSL_I(rd, rs, 32 - off), ctx);
+ emit(ARM_ASR_I(rd, rd, 32 - off), ctx);
+ } else {
+ rd = rs;
+ }
+
+ /* Write rd to dst_lo
+ *
+ * Optimization:
+ * Assume:
+ * 1. dst == src and stacked.
+ * 2. off == 32
+ *
+ * In this case src_lo was loaded into rd(tmp[1]) but rd was not sign extended as off==32.
+ * So, we don't need to write rd back to dst_lo as they have the same value.
+ * This saves us one str instruction.
+ */
+ if (dst_lo != src_lo || off != 32)
+ arm_bpf_put_reg32(dst_lo, rd, ctx);
- emit_a32_mov_r(dst_lo, src_lo, off, ctx);
if (!is64) {
if (!ctx->prog->aux->verifier_zext)
/* Zero out high 4 bytes */
emit_a32_mov_i(dst_hi, 0, ctx);
} else {
- emit(ARM_ASR_I(rt[0], rt[1], 31), ctx);
+ if (is_stacked(dst_hi)) {
+ emit(ARM_ASR_I(tmp[0], rd, 31), ctx);
+ arm_bpf_put_reg32(dst_hi, tmp[0], ctx);
+ } else {
+ emit(ARM_ASR_I(dst_hi, rd, 31), ctx);
+ }
}
}
@@ -2222,28 +2252,21 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* If building the body of the JITed code fails somehow,
* we fall back to the interpretation.
*/
- if (build_body(&ctx) < 0) {
- image_ptr = NULL;
- bpf_jit_binary_free(header);
- prog = orig_prog;
- goto out_imms;
- }
+ if (build_body(&ctx) < 0)
+ goto out_free;
build_epilogue(&ctx);
/* 3.) Extra pass to validate JITed Code */
- if (validate_code(&ctx)) {
- image_ptr = NULL;
- bpf_jit_binary_free(header);
- prog = orig_prog;
- goto out_imms;
- }
+ if (validate_code(&ctx))
+ goto out_free;
flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
if (bpf_jit_enable > 1)
/* there are 2 passes here */
bpf_jit_dump(prog->len, image_size, 2, ctx.target);
- bpf_jit_binary_lock_ro(header);
+ if (bpf_jit_binary_lock_ro(header))
+ goto out_free;
prog->bpf_func = (void *)ctx.target;
prog->jited = 1;
prog->jited_len = image_size;
@@ -2260,5 +2283,11 @@ out:
bpf_jit_prog_release_other(prog, prog == orig_prog ?
tmp : orig_prog);
return prog;
+
+out_free:
+ image_ptr = NULL;
+ bpf_jit_binary_free(header);
+ prog = orig_prog;
+ goto out_imms;
}
diff --git a/arch/arm/plat-orion/Makefile b/arch/arm/plat-orion/Makefile
index 830b0be038c6..e8c7580df8ca 100644
--- a/arch/arm/plat-orion/Makefile
+++ b/arch/arm/plat-orion/Makefile
@@ -2,7 +2,7 @@
#
# Makefile for the linux kernel.
#
-ccflags-y := -I$(srctree)/$(src)/include
+ccflags-y := -I$(src)/include
orion-gpio-$(CONFIG_GPIOLIB) += gpio.o
obj-$(CONFIG_PLAT_ORION_LEGACY) += irq.o pcie.o time.o common.o mpp.o
diff --git a/arch/arm/tools/Makefile b/arch/arm/tools/Makefile
index 81f13bdf32f2..28b6da8ac5f6 100644
--- a/arch/arm/tools/Makefile
+++ b/arch/arm/tools/Makefile
@@ -9,7 +9,7 @@ gen := arch/$(ARCH)/include/generated
kapi := $(gen)/asm
uapi := $(gen)/uapi/asm
syshdr := $(srctree)/scripts/syscallhdr.sh
-sysnr := $(srctree)/$(src)/syscallnr.sh
+sysnr := $(src)/syscallnr.sh
systbl := $(srctree)/scripts/syscalltbl.sh
syscall := $(src)/syscall.tbl
diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
index d761bd2e2f40..01067a2bc43b 100644
--- a/arch/arm/vdso/Makefile
+++ b/arch/arm/vdso/Makefile
@@ -33,15 +33,6 @@ else
CFLAGS_vgettimeofday.o = -O2 -include $(c-gettimeofday-y)
endif
-# Disable gcov profiling for VDSO code
-GCOV_PROFILE := n
-UBSAN_SANITIZE := n
-
-# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
-KCOV_INSTRUMENT := n
-
-KASAN_SANITIZE := n
-
# Force dependency
$(obj)/vdso.o : $(obj)/vdso.so
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 7b11c98b3e84..00cbb794aeda 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -46,7 +46,6 @@ config ARM64
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYSCALL_WRAPPER
- select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_ZONE_DMA_SET if EXPERT
select ARCH_HAVE_ELF_PROT
@@ -105,6 +104,7 @@ config ARM64
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
select ARCH_WANT_LD_ORPHAN_WARN
+ select ARCH_WANTS_EXECMEM_LATE if EXECMEM
select ARCH_WANTS_NO_INSTR
select ARCH_WANTS_THP_SWAP if ARM64_4K_PAGES
select ARCH_HAS_UBSAN
@@ -205,7 +205,7 @@ config ARM64
select HAVE_SAMPLE_FTRACE_DIRECT
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
select HAVE_EFFICIENT_UNALIGNED_ACCESS
- select HAVE_FAST_GUP
+ select HAVE_GUP_FAST
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_ERROR_INJECTION
@@ -255,9 +255,11 @@ config ARM64
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
select HAVE_ARCH_USERFAULTFD_MINOR if USERFAULTFD
+ select HAVE_ARCH_USERFAULTFD_WP if USERFAULTFD
select TRACE_IRQFLAGS_SUPPORT
select TRACE_IRQFLAGS_NMI_SUPPORT
select HAVE_SOFTIRQ_ON_OWN_STACK
+ select USER_STACKTRACE_SUPPORT
help
ARM 64-bit (AArch64) Linux support.
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 24335565bad5..a52618073de2 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -8,6 +8,13 @@ config ARCH_ACTIONS
help
This enables support for the Actions Semiconductor S900 SoC family.
+config ARCH_AIROHA
+ bool "Airoha SoC Support"
+ select ARM_PSCI
+ select HAVE_ARM_ARCH_TIMER
+ help
+ This enables support for the ARM64 based Airoha SoCs.
+
config ARCH_SUNXI
bool "Allwinner sunxi 64-bit SoC Family"
select ARCH_HAS_RESET_CONTROLLER
@@ -302,9 +309,11 @@ config ARCH_STM32
select GPIOLIB
select PINCTRL
select PINCTRL_STM32MP257
+ select STM32_EXTI
select ARM_SMC_MBOX
select ARM_SCMI_PROTOCOL
select COMMON_CLK_SCMI
+ select STM32_FIREWALL
help
This enables support for ARMv8 based STMicroelectronics
STM32 family, including:
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 0e075d3c546b..b8b1d4f4a572 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -154,6 +154,10 @@ libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
# Default target when executing plain make
boot := arch/arm64/boot
+BOOT_TARGETS := Image vmlinuz.efi image.fit
+
+PHONY += $(BOOT_TARGETS)
+
ifeq ($(CONFIG_EFI_ZBOOT),)
KBUILD_IMAGE := $(boot)/Image.gz
else
@@ -162,8 +166,10 @@ endif
all: $(notdir $(KBUILD_IMAGE))
-vmlinuz.efi: Image
-Image vmlinuz.efi: vmlinux
+image.fit: dtbs
+
+vmlinuz.efi image.fit: Image
+$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
Image.%: Image
@@ -215,6 +221,7 @@ virtconfig:
define archhelp
echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
+ echo ' image.fit - Flat Image Tree (arch/$(ARCH)/boot/image.fit)'
echo ' install - Install uncompressed kernel'
echo ' zinstall - Install compressed kernel'
echo ' Install using (your) ~/bin/installkernel or'
diff --git a/arch/arm64/boot/.gitignore b/arch/arm64/boot/.gitignore
index af5dc61f8b43..abaae9de1bdd 100644
--- a/arch/arm64/boot/.gitignore
+++ b/arch/arm64/boot/.gitignore
@@ -2,3 +2,4 @@
Image
Image.gz
vmlinuz*
+image.fit
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index a5a787371117..607a67a649c4 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -16,7 +16,8 @@
OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
-targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo Image.zst
+targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo \
+ Image.zst image.fit
$(obj)/Image: vmlinux FORCE
$(call if_changed,objcopy)
@@ -39,6 +40,9 @@ $(obj)/Image.lzo: $(obj)/Image FORCE
$(obj)/Image.zst: $(obj)/Image FORCE
$(call if_changed,zstd)
+$(obj)/image.fit: $(obj)/Image $(obj)/dts/dtbs-list FORCE
+ $(call if_changed,fit)
+
EFI_ZBOOT_PAYLOAD := Image
EFI_ZBOOT_BFD_TARGET := elf64-littleaarch64
EFI_ZBOOT_MACH_TYPE := ARM64
diff --git a/arch/arm64/boot/dts/actions/s700-cubieboard7.dts b/arch/arm64/boot/dts/actions/s700-cubieboard7.dts
index 63e375cd9eb4..bd54b5165129 100644
--- a/arch/arm64/boot/dts/actions/s700-cubieboard7.dts
+++ b/arch/arm64/boot/dts/actions/s700-cubieboard7.dts
@@ -24,7 +24,7 @@
reg = <0x0 0x0 0x0 0x80000000>;
};
- memory@1,e0000000 {
+ memory@1e0000000 {
device_type = "memory";
reg = <0x1 0xe0000000 0x0 0x0>;
};
diff --git a/arch/arm64/boot/dts/allwinner/Makefile b/arch/arm64/boot/dts/allwinner/Makefile
index 21149b346a60..0db7b60b49a1 100644
--- a/arch/arm64/boot/dts/allwinner/Makefile
+++ b/arch/arm64/boot/dts/allwinner/Makefile
@@ -39,6 +39,7 @@ dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h6-pine-h64.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h6-pine-h64-model-b.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h6-tanix-tx6.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h6-tanix-tx6-mini.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h313-tanix-tx1.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h616-bigtreetech-cb1-manta.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h616-bigtreetech-pi.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h616-orangepi-zero2.dtb
@@ -47,3 +48,6 @@ dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-longanpi-3h.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-orangepi-zero2w.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-orangepi-zero3.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-transpeed-8k618-t.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h700-anbernic-rg35xx-2024.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h700-anbernic-rg35xx-plus.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h700-anbernic-rg35xx-h.dtb
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
index e6d5bc0f7a61..d1f415acd7b5 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
@@ -53,7 +53,7 @@
};
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 2 GPIO_ACTIVE_LOW>; /* PL2 */
clocks = <&rtc CLK_OSC32K_FANOUT>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts
index 0af6dcdf7515..dec9960a7440 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts
@@ -41,7 +41,7 @@
};
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
clocks = <&rtc CLK_OSC32K_FANOUT>;
clock-names = "ext_clock";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts
index bfb806cf6d7a..fd3794678c33 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts
@@ -52,7 +52,7 @@
status = "okay";
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 2 GPIO_ACTIVE_LOW>; /* PL2 */
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
index 4f8529d5ac00..c8303a66438d 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
@@ -68,7 +68,7 @@
status = "okay";
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 8 GPIO_ACTIVE_LOW>; /* PL8 */
clocks = <&rtc CLK_OSC32K_FANOUT>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
index 50ed2e9f10ed..6c65d5bc16ba 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
@@ -79,7 +79,7 @@
enable-active-high;
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 2 GPIO_ACTIVE_LOW>; /* PL2 */
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
index 87847116ab6d..6eab61a12cd8 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
@@ -39,25 +39,35 @@
leds {
compatible = "gpio-leds";
- led-0 {
+ led0: led-0 {
function = LED_FUNCTION_INDICATOR;
color = <LED_COLOR_ID_BLUE>;
gpios = <&pio 3 20 GPIO_ACTIVE_HIGH>; /* PD20 */
+ retain-state-suspended;
};
- led-1 {
+ led1: led-1 {
function = LED_FUNCTION_INDICATOR;
color = <LED_COLOR_ID_GREEN>;
gpios = <&pio 3 18 GPIO_ACTIVE_HIGH>; /* PD18 */
+ retain-state-suspended;
};
- led-2 {
+ led2: led-2 {
function = LED_FUNCTION_INDICATOR;
color = <LED_COLOR_ID_RED>;
gpios = <&pio 3 19 GPIO_ACTIVE_HIGH>; /* PD19 */
+ retain-state-suspended;
};
};
+ multi-led {
+ compatible = "leds-group-multicolor";
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_INDICATOR;
+ leds = <&led0>, <&led1>, <&led2>;
+ };
+
reg_ps: ps-regulator {
compatible = "regulator-fixed";
regulator-name = "ps";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts
index 0a5607f73049..c6007df99938 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts
@@ -98,7 +98,7 @@
enable-active-high;
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 2 GPIO_ACTIVE_LOW>; /* PL2 */
post-power-on-delay-ms = <200>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts
index 1128030e4c25..b407e1dd08a7 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts
@@ -74,7 +74,7 @@
status = "okay";
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 2 GPIO_ACTIVE_LOW>; /* PL2 */
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index 57ac18738c99..ce4aa44c3353 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -107,27 +107,19 @@
gpu_opp_table: opp-table-gpu {
compatible = "operating-points-v2";
- opp-120000000 {
- opp-hz = /bits/ 64 <120000000>;
- };
-
- opp-312000000 {
- opp-hz = /bits/ 64 <312000000>;
- };
-
opp-432000000 {
opp-hz = /bits/ 64 <432000000>;
};
};
- osc24M: osc24M_clk {
+ osc24M: osc24M-clk {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <24000000>;
clock-output-names = "osc24M";
};
- osc32k: osc32k_clk {
+ osc32k: osc32k-clk {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <32768>;
@@ -216,21 +208,21 @@
};
trips {
- cpu_alert0: cpu_alert0 {
+ cpu_alert0: cpu-alert0 {
/* milliCelsius */
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_alert1: cpu_alert1 {
+ cpu_alert1: cpu-alert1 {
/* milliCelsius */
temperature = <90000>;
hysteresis = <2000>;
type = "hot";
};
- cpu_crit: cpu_crit {
+ cpu_crit: cpu-crit {
/* milliCelsius */
temperature = <110000>;
hysteresis = <2000>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h313-tanix-tx1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h313-tanix-tx1.dts
new file mode 100644
index 000000000000..bb2cde59bd03
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h313-tanix-tx1.dts
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2024 Arm Ltd.
+ */
+
+/dts-v1/;
+
+#include "sun50i-h616.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/input/linux-event-codes.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+ model = "Tanix TX1";
+ compatible = "oranth,tanix-tx1", "allwinner,sun50i-h616";
+
+ aliases {
+ serial0 = &uart0;
+ ethernet0 = &sdio_wifi;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ key {
+ label = "hidden";
+ linux,code = <BTN_0>;
+ gpios = <&pio 7 9 GPIO_ACTIVE_LOW>; /* PH9 */
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-0 {
+ function = LED_FUNCTION_POWER;
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&pio 7 6 GPIO_ACTIVE_HIGH>; /* PH6 */
+ default-state = "on";
+ };
+ };
+
+ wifi_pwrseq: pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&rtc CLK_OSC32K_FANOUT>;
+ clock-names = "ext_clock";
+ pinctrl-0 = <&x32clk_fanout_pin>;
+ pinctrl-names = "default";
+ reset-gpios = <&pio 6 18 GPIO_ACTIVE_LOW>; /* PG18 */
+ };
+
+ reg_vcc5v: vcc5v {
+ /* board wide 5V supply directly from the DC input */
+ compatible = "regulator-fixed";
+ regulator-name = "vcc-5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+};
+
+&cpu0 {
+ cpu-supply = <&reg_dcdc2>;
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ir {
+ status = "okay";
+};
+
+&mmc1 {
+ vmmc-supply = <&reg_dldo1>;
+ vqmmc-supply = <&reg_aldo1>;
+ mmc-pwrseq = <&wifi_pwrseq>;
+ bus-width = <4>;
+ non-removable;
+ status = "okay";
+
+ sdio_wifi: wifi@1 {
+ reg = <1>;
+ };
+};
+
+&mmc2 {
+ vmmc-supply = <&reg_dldo1>;
+ vqmmc-supply = <&reg_aldo1>;
+ bus-width = <8>;
+ non-removable;
+ max-frequency = <100000000>;
+ cap-mmc-hw-reset;
+ mmc-ddr-1_8v;
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&pio {
+ vcc-pc-supply = <&reg_aldo1>;
+ vcc-pf-supply = <&reg_dldo1>;
+ vcc-pg-supply = <&reg_aldo1>;
+ vcc-ph-supply = <&reg_dldo1>;
+ vcc-pi-supply = <&reg_dldo1>;
+};
+
+&r_i2c {
+ status = "okay";
+
+ axp313: pmic@36 {
+ compatible = "x-powers,axp313a";
+ reg = <0x36>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+
+ vin1-supply = <&reg_vcc5v>;
+ vin2-supply = <&reg_vcc5v>;
+ vin3-supply = <&reg_vcc5v>;
+
+ regulators {
+ /* Supplies VCC-PLL, so needs to be always on. */
+ reg_aldo1: aldo1 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc1v8";
+ };
+
+ /* Supplies VCC-IO, so needs to be always on. */
+ reg_dldo1: dldo1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc3v3";
+ };
+
+ reg_dcdc1: dcdc1 {
+ regulator-always-on;
+ regulator-min-microvolt = <810000>;
+ regulator-max-microvolt = <990000>;
+ regulator-name = "vdd-gpu-sys";
+ };
+
+ reg_dcdc2: dcdc2 {
+ regulator-always-on;
+ regulator-min-microvolt = <810000>;
+ regulator-max-microvolt = <1120000>;
+ regulator-name = "vdd-cpu";
+ };
+
+ reg_dcdc3: dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "vdd-dram";
+ };
+ };
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_ph_pins>;
+ status = "okay";
+};
+
+&usbotg {
+ dr_mode = "host"; /* USB A type receptable */
+ status = "okay";
+};
+
+&usbphy {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts
index 4c3921ac236c..b69032c44557 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts
@@ -68,7 +68,7 @@
states = <1100000 0>, <1300000 1>;
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 */
post-power-on-delay-ms = <200>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-r1s-h5.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-r1s-h5.dts
index a3e040da38a0..3a7ee44708a2 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-r1s-h5.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-r1s-h5.dts
@@ -103,7 +103,7 @@
states = <1100000 0x0>, <1300000 0x1>;
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 */
post-power-on-delay-ms = <200>;
@@ -170,7 +170,7 @@
non-removable;
status = "okay";
- rtl8189etv: sdio_wifi@1 {
+ rtl8189etv: wifi@1 {
reg = <1>;
};
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
index d7f8bad6bb98..b699bb900e13 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
@@ -85,7 +85,7 @@
status = "okay";
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&pio 2 14 GPIO_ACTIVE_LOW>; /* PC14 */
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
index 7ec5ac850a0d..ae85131aac9c 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
@@ -97,7 +97,7 @@
* Explicitly define the sdio device, so that we can add an ethernet
* alias for it (which e.g. makes u-boot set a mac-address).
*/
- rtl8189ftv: sdio_wifi@1 {
+ rtl8189ftv: wifi@1 {
reg = <1>;
};
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts
index 22530ace12d5..734481e998b8 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts
@@ -52,7 +52,7 @@
regulator-max-microvolt = <3300000>;
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&pio 0 9 GPIO_ACTIVE_LOW>; /* PA9 */
post-power-on-delay-ms = <200>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
index 381d58cea092..3be1e8c2fdb9 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
@@ -34,7 +34,7 @@
};
};
- ext_osc32k: ext_osc32k_clk {
+ ext_osc32k: ext-osc32k-clk {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <32768>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
index 6fc65e8db220..6c3bfe3d09d9 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
@@ -33,7 +33,7 @@
};
};
- ext_osc32k: ext_osc32k_clk {
+ ext_osc32k: ext-osc32k-clk {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <32768>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-lite2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-lite2.dts
index fb31dcb1cb6d..a3f65a45bd26 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-lite2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-lite2.dts
@@ -11,7 +11,7 @@
serial1 = &uart1; /* BT-UART */
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
clocks = <&rtc CLK_OSC32K_FANOUT>;
clock-names = "ext_clock";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
index 92745128fcfe..13b07141c334 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
@@ -32,7 +32,7 @@
};
};
- ext_osc32k: ext_osc32k_clk {
+ ext_osc32k: ext-osc32k-clk {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <32768>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64-model-b.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64-model-b.dts
index b710f1a0f53a..66fe03910d5e 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64-model-b.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64-model-b.dts
@@ -5,13 +5,13 @@
#include "sun50i-h6-pine-h64.dts"
+/delete-node/ &reg_gmac_3v3;
+
/ {
model = "Pine H64 model B";
compatible = "pine64,pine-h64-model-b", "allwinner,sun50i-h6";
- /delete-node/ reg_gmac_3v3;
-
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 1 3 GPIO_ACTIVE_LOW>; /* PM3 */
post-power-on-delay-ms = <200>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
index 1ffd68f43f87..3910393be1f9 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
@@ -22,7 +22,7 @@
stdout-path = "serial0:115200n8";
};
- ext_osc32k: ext_osc32k_clk {
+ ext_osc32k: ext-osc32k-clk {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <32768>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
index d11e5041bae9..8a8591c4e7dd 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
@@ -68,7 +68,7 @@
status = "disabled";
};
- osc24M: osc24M_clk {
+ osc24M: osc24M-clk {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <24000000>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h616-bigtreetech-cb1.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h616-bigtreetech-cb1.dtsi
index af421ba24ce0..d12b01c5f41b 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h616-bigtreetech-cb1.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h616-bigtreetech-cb1.dtsi
@@ -6,6 +6,7 @@
/dts-v1/;
#include "sun50i-h616.dtsi"
+#include "sun50i-h616-cpu-opp.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -62,6 +63,10 @@
};
};
+&cpu0 {
+ cpu-supply = <&reg_dcdc2>;
+};
+
&mmc0 {
vmmc-supply = <&reg_dldo1>;
/* Card detection pin is not connected */
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h616-cpu-opp.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h616-cpu-opp.dtsi
new file mode 100644
index 000000000000..aca22a7f0191
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h616-cpu-opp.dtsi
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2023 Martin Botka <martin@somainline.org>
+
+/ {
+ cpu_opp_table: opp-table-cpu {
+ compatible = "allwinner,sun50i-h616-operating-points";
+ nvmem-cells = <&cpu_speed_grade>;
+ opp-shared;
+
+ opp-480000000 {
+ opp-hz = /bits/ 64 <480000000>;
+ opp-microvolt = <900000>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-supported-hw = <0x1f>;
+ };
+
+ opp-600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <900000>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-supported-hw = <0x12>;
+ };
+
+ opp-720000000 {
+ opp-hz = /bits/ 64 <720000000>;
+ opp-microvolt = <900000>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-supported-hw = <0x0d>;
+ };
+
+ opp-792000000 {
+ opp-hz = /bits/ 64 <792000000>;
+ opp-microvolt-speed1 = <900000>;
+ opp-microvolt-speed4 = <940000>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-supported-hw = <0x12>;
+ };
+
+ opp-936000000 {
+ opp-hz = /bits/ 64 <936000000>;
+ opp-microvolt = <900000>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-supported-hw = <0x0d>;
+ };
+
+ opp-1008000000 {
+ opp-hz = /bits/ 64 <1008000000>;
+ opp-microvolt-speed0 = <950000>;
+ opp-microvolt-speed1 = <940000>;
+ opp-microvolt-speed2 = <950000>;
+ opp-microvolt-speed3 = <950000>;
+ opp-microvolt-speed4 = <1020000>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-supported-hw = <0x1f>;
+ };
+
+ opp-1104000000 {
+ opp-hz = /bits/ 64 <1104000000>;
+ opp-microvolt-speed0 = <1000000>;
+ opp-microvolt-speed2 = <1000000>;
+ opp-microvolt-speed3 = <1000000>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-supported-hw = <0x0d>;
+ };
+
+ opp-1200000000 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt-speed0 = <1050000>;
+ opp-microvolt-speed1 = <1020000>;
+ opp-microvolt-speed2 = <1050000>;
+ opp-microvolt-speed3 = <1050000>;
+ opp-microvolt-speed4 = <1100000>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-supported-hw = <0x1f>;
+ };
+
+ opp-1320000000 {
+ opp-hz = /bits/ 64 <1320000000>;
+ opp-microvolt = <1100000>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-supported-hw = <0x1d>;
+ };
+
+ opp-1416000000 {
+ opp-hz = /bits/ 64 <1416000000>;
+ opp-microvolt = <1100000>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-supported-hw = <0x0d>;
+ };
+
+ opp-1512000000 {
+ opp-hz = /bits/ 64 <1512000000>;
+ opp-microvolt-speed1 = <1100000>;
+ opp-microvolt-speed3 = <1100000>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-supported-hw = <0x0a>;
+ };
+ };
+};
+
+&cpu0 {
+ operating-points-v2 = <&cpu_opp_table>;
+};
+
+&cpu1 {
+ operating-points-v2 = <&cpu_opp_table>;
+};
+
+&cpu2 {
+ operating-points-v2 = <&cpu_opp_table>;
+};
+
+&cpu3 {
+ operating-points-v2 = <&cpu_opp_table>;
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero2.dts
index b5d713926a34..a360d8567f95 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero2.dts
@@ -6,12 +6,17 @@
/dts-v1/;
#include "sun50i-h616-orangepi-zero.dtsi"
+#include "sun50i-h616-cpu-opp.dtsi"
/ {
model = "OrangePi Zero2";
compatible = "xunlong,orangepi-zero2", "allwinner,sun50i-h616";
};
+&cpu0 {
+ cpu-supply = <&reg_dcdca>;
+};
+
&emac0 {
allwinner,rx-delay-ps = <3100>;
allwinner,tx-delay-ps = <700>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h616-x96-mate.dts b/arch/arm64/boot/dts/allwinner/sun50i-h616-x96-mate.dts
index 959b6fd18483..26d25b5b59e0 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h616-x96-mate.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h616-x96-mate.dts
@@ -6,6 +6,7 @@
/dts-v1/;
#include "sun50i-h616.dtsi"
+#include "sun50i-h616-cpu-opp.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -32,6 +33,10 @@
};
};
+&cpu0 {
+ cpu-supply = <&reg_dcdca>;
+};
+
&ehci0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h616.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h616.dtsi
index b2e85e52d1a1..921d5f61d8d6 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h616.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h616.dtsi
@@ -26,6 +26,7 @@
reg = <0>;
enable-method = "psci";
clocks = <&ccu CLK_CPUX>;
+ #cooling-cells = <2>;
};
cpu1: cpu@1 {
@@ -34,6 +35,7 @@
reg = <1>;
enable-method = "psci";
clocks = <&ccu CLK_CPUX>;
+ #cooling-cells = <2>;
};
cpu2: cpu@2 {
@@ -42,6 +44,7 @@
reg = <2>;
enable-method = "psci";
clocks = <&ccu CLK_CPUX>;
+ #cooling-cells = <2>;
};
cpu3: cpu@3 {
@@ -50,6 +53,7 @@
reg = <3>;
enable-method = "psci";
clocks = <&ccu CLK_CPUX>;
+ #cooling-cells = <2>;
};
};
@@ -156,6 +160,10 @@
ths_calibration: thermal-sensor-calibration@14 {
reg = <0x14 0x8>;
};
+
+ cpu_speed_grade: cpu-speed-grade@0 {
+ reg = <0x0 2>;
+ };
};
watchdog: watchdog@30090a0 {
@@ -194,7 +202,7 @@
};
i2c0_pins: i2c0-pins {
- pins = "PI6", "PI7";
+ pins = "PI5", "PI6";
function = "i2c0";
};
@@ -775,6 +783,15 @@
#reset-cells = <1>;
};
+ nmi_intc: interrupt-controller@7010320 {
+ compatible = "allwinner,sun50i-h616-nmi",
+ "allwinner,sun9i-a80-nmi";
+ reg = <0x07010320 0xc>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
r_pio: pinctrl@7022000 {
compatible = "allwinner,sun50i-h616-r-pinctrl";
reg = <0x07022000 0x400>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h618-longan-module-3h.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h618-longan-module-3h.dtsi
index 8c1263a3939e..e92d150aaf1c 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h618-longan-module-3h.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h618-longan-module-3h.dtsi
@@ -4,6 +4,11 @@
*/
#include "sun50i-h616.dtsi"
+#include "sun50i-h616-cpu-opp.dtsi"
+
+&cpu0 {
+ cpu-supply = <&reg_dcdc2>;
+};
&mmc2 {
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero2w.dts b/arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero2w.dts
index 21ca1977055d..6a4f0da97233 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero2w.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero2w.dts
@@ -6,6 +6,7 @@
/dts-v1/;
#include "sun50i-h616.dtsi"
+#include "sun50i-h616-cpu-opp.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -53,6 +54,10 @@
};
};
+&cpu0 {
+ cpu-supply = <&reg_dcdc2>;
+};
+
&ehci1 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero3.dts b/arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero3.dts
index b3b1b8692125..e1cd7572a14c 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero3.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero3.dts
@@ -6,12 +6,17 @@
/dts-v1/;
#include "sun50i-h616-orangepi-zero.dtsi"
+#include "sun50i-h616-cpu-opp.dtsi"
/ {
model = "OrangePi Zero3";
compatible = "xunlong,orangepi-zero3", "allwinner,sun50i-h618";
};
+&cpu0 {
+ cpu-supply = <&reg_dcdc2>;
+};
+
&emac0 {
allwinner,tx-delay-ps = <700>;
phy-mode = "rgmii-rxid";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h618-transpeed-8k618-t.dts b/arch/arm64/boot/dts/allwinner/sun50i-h618-transpeed-8k618-t.dts
index ac0a2b7ea6f3..d6631bfe629f 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h618-transpeed-8k618-t.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h618-transpeed-8k618-t.dts
@@ -6,6 +6,7 @@
/dts-v1/;
#include "sun50i-h616.dtsi"
+#include "sun50i-h616-cpu-opp.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -41,7 +42,7 @@
regulator-always-on;
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
clocks = <&rtc CLK_OSC32K_FANOUT>;
clock-names = "ext_clock";
@@ -51,6 +52,10 @@
};
};
+&cpu0 {
+ cpu-supply = <&reg_dcdc2>;
+};
+
&ehci0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h64-remix-mini-pc.dts b/arch/arm64/boot/dts/allwinner/sun50i-h64-remix-mini-pc.dts
index b6e3c169797f..c204dd43c726 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h64-remix-mini-pc.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h64-remix-mini-pc.dts
@@ -42,7 +42,7 @@
regulator-always-on;
};
- wifi_pwrseq: wifi_pwrseq {
+ wifi_pwrseq: pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 2 GPIO_ACTIVE_LOW>; /* PL2 */
post-power-on-delay-ms = <200>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts b/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts
new file mode 100644
index 000000000000..ee30584b6ad7
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts
@@ -0,0 +1,327 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Copyright (C) 2024 Ryan Walklin <ryan@testtoast.com>.
+ */
+
+/dts-v1/;
+
+#include "sun50i-h616.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/linux-event-codes.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+ model = "Anbernic RG35XX 2024";
+ chassis-type = "handset";
+ compatible = "anbernic,rg35xx-2024", "allwinner,sun50i-h700";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ gpio_keys_gamepad: gpio-keys-gamepad {
+ compatible = "gpio-keys";
+
+ button-a {
+ label = "Action-Pad A";
+ gpios = <&pio 0 0 GPIO_ACTIVE_LOW>; /* PA0 */
+ linux,input-type = <EV_KEY>;
+ linux,code = <BTN_EAST>;
+ };
+
+ button-b {
+ label = "Action-Pad B";
+ gpios = <&pio 0 1 GPIO_ACTIVE_LOW>; /* PA1 */
+ linux,input-type = <EV_KEY>;
+ linux,code = <BTN_SOUTH>;
+ };
+
+ button-down {
+ label = "D-Pad Down";
+ gpios = <&pio 4 0 GPIO_ACTIVE_LOW>; /* PE0 */
+ linux,input-type = <EV_KEY>;
+ linux,code = <BTN_DPAD_DOWN>;
+ };
+
+ button-l1 {
+ label = "Key L1";
+ gpios = <&pio 0 10 GPIO_ACTIVE_LOW>; /* PA10 */
+ linux,input-type = <EV_KEY>;
+ linux,code = <BTN_TL>;
+ };
+
+ button-l2 {
+ label = "Key L2";
+ gpios = <&pio 0 11 GPIO_ACTIVE_LOW>; /* PA11 */
+ linux,input-type = <EV_KEY>;
+ linux,code = <BTN_TL2>;
+ };
+
+ button-left {
+ label = "D-Pad left";
+ gpios = <&pio 0 8 GPIO_ACTIVE_LOW>; /* PA8 */
+ linux,input-type = <EV_KEY>;
+ linux,code = <BTN_DPAD_LEFT>;
+ };
+
+ button-menu {
+ label = "Key Menu";
+ gpios = <&pio 4 3 GPIO_ACTIVE_LOW>; /* PE3 */
+ linux,input-type = <EV_KEY>;
+ linux,code = <BTN_MODE>;
+ };
+
+ button-r1 {
+ label = "Key R1";
+ gpios = <&pio 0 12 GPIO_ACTIVE_LOW>; /* PA12 */
+ linux,input-type = <EV_KEY>;
+ linux,code = <BTN_TR>;
+ };
+
+ button-r2 {
+ label = "Key R2";
+ gpios = <&pio 0 7 GPIO_ACTIVE_LOW>; /* PA7 */
+ linux,input-type = <EV_KEY>;
+ linux,code = <BTN_TR2>;
+ };
+
+ button-right {
+ label = "D-Pad Right";
+ gpios = <&pio 0 9 GPIO_ACTIVE_LOW>; /* PA9 */
+ linux,input-type = <EV_KEY>;
+ linux,code = <BTN_DPAD_RIGHT>;
+ };
+
+ button-select {
+ label = "Key Select";
+ gpios = <&pio 0 5 GPIO_ACTIVE_LOW>; /* PA5 */
+ linux,input-type = <EV_KEY>;
+ linux,code = <BTN_SELECT>;
+ };
+ button-start {
+ label = "Key Start";
+ gpios = <&pio 0 4 GPIO_ACTIVE_LOW>; /* PA4 */
+ linux,input-type = <EV_KEY>;
+ linux,code = <BTN_START>;
+ };
+
+ button-up {
+ label = "D-Pad Up";
+ gpios = <&pio 0 6 GPIO_ACTIVE_LOW>; /* PA6 */
+ linux,input-type = <EV_KEY>;
+ linux,code = <BTN_DPAD_UP>;
+ };
+
+ button-x {
+ label = "Action-Pad X";
+ gpios = <&pio 0 3 GPIO_ACTIVE_LOW>; /* PA3 */
+ linux,input-type = <EV_KEY>;
+ linux,code = <BTN_NORTH>;
+ };
+
+ button-y {
+ label = "Action Pad Y";
+ gpios = <&pio 0 2 GPIO_ACTIVE_LOW>; /* PA2 */
+ linux,input-type = <EV_KEY>;
+ linux,code = <BTN_WEST>;
+ };
+ };
+
+ gpio-keys-volume {
+ compatible = "gpio-keys";
+ autorepeat;
+
+ button-vol-up {
+ label = "Key Volume Up";
+ gpios = <&pio 4 1 GPIO_ACTIVE_LOW>; /* PE1 */
+ linux,input-type = <EV_KEY>;
+ linux,code = <KEY_VOLUMEUP>;
+ };
+
+ button-vol-down {
+ label = "Key Volume Down";
+ gpios = <&pio 4 2 GPIO_ACTIVE_LOW>; /* PE2 */
+ linux,input-type = <EV_KEY>;
+ linux,code = <KEY_VOLUMEDOWN>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-0 {
+ function = LED_FUNCTION_POWER;
+ color = <LED_COLOR_ID_GREEN>;
+ gpios = <&pio 8 12 GPIO_ACTIVE_HIGH>; /* PI12 */
+ default-state = "on";
+ };
+ };
+
+ reg_vcc5v: regulator-vcc5v { /* USB-C power input */
+ compatible = "regulator-fixed";
+ regulator-name = "vcc-5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+};
+
+&cpu0 {
+ cpu-supply = <&reg_dcdc1>;
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&mmc0 {
+ vmmc-supply = <&reg_cldo3>;
+ disable-wp;
+ cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
+ bus-width = <4>;
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&pio {
+ vcc-pa-supply = <&reg_cldo3>;
+ vcc-pc-supply = <&reg_cldo3>;
+ vcc-pe-supply = <&reg_cldo3>;
+ vcc-pf-supply = <&reg_cldo3>;
+ vcc-pg-supply = <&reg_aldo4>;
+ vcc-ph-supply = <&reg_cldo3>;
+ vcc-pi-supply = <&reg_cldo3>;
+};
+
+&r_rsb {
+ status = "okay";
+
+ axp717: pmic@3a3 {
+ compatible = "x-powers,axp717";
+ reg = <0x3a3>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+
+ vin1-supply = <&reg_vcc5v>;
+ vin2-supply = <&reg_vcc5v>;
+ vin3-supply = <&reg_vcc5v>;
+ vin4-supply = <&reg_vcc5v>;
+
+ regulators {
+ reg_dcdc1: dcdc1 {
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-name = "vdd-cpu";
+ };
+
+ reg_dcdc2: dcdc2 {
+ regulator-always-on;
+ regulator-min-microvolt = <940000>;
+ regulator-max-microvolt = <940000>;
+ regulator-name = "vdd-gpu-sys";
+ };
+
+ reg_dcdc3: dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-name = "vdd-dram";
+ };
+
+ reg_aldo1: aldo1 {
+ /* 1.8v - unused */
+ };
+
+ reg_aldo2: aldo2 {
+ /* 1.8v - unused */
+ };
+
+ reg_aldo3: aldo3 {
+ /* 1.8v - unused */
+ };
+
+ reg_aldo4: aldo4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-pg";
+ };
+
+ reg_bldo1: bldo1 {
+ /* 1.8v - unused */
+ };
+
+ reg_bldo2: bldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-pll";
+ };
+
+ reg_bldo3: bldo3 {
+ /* 2.8v - unused */
+ };
+
+ reg_bldo4: bldo4 {
+ /* 1.2v - unused */
+ };
+
+ reg_cldo1: cldo1 {
+ /* 3.3v - audio codec - not yet implemented */
+ };
+
+ reg_cldo2: cldo2 {
+ /* 3.3v - unused */
+ };
+
+ reg_cldo3: cldo3 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-io";
+ };
+
+ reg_cldo4: cldo4 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-wifi";
+ };
+
+ reg_boost: boost {
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5200000>;
+ regulator-name = "boost";
+ };
+
+ reg_cpusldo: cpusldo {
+ /* unused */
+ };
+ };
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_ph_pins>;
+ status = "okay";
+};
+
+/* the AXP717 has USB type-C role switch functionality, not yet described by the binding */
+&usbotg {
+ dr_mode = "peripheral"; /* USB type-C receptable */
+ status = "okay";
+};
+
+&usbphy {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-h.dts b/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-h.dts
new file mode 100644
index 000000000000..63036256917f
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-h.dts
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Copyright (C) 2024 Ryan Walklin <ryan@testtoast.com>.
+ * Copyright (C) 2024 Chris Morgan <macroalpha82@gmail.com>.
+ */
+
+#include "sun50i-h700-anbernic-rg35xx-plus.dts"
+
+/ {
+ model = "Anbernic RG35XX H";
+ compatible = "anbernic,rg35xx-h", "allwinner,sun50i-h700";
+};
+
+&gpio_keys_gamepad {
+ button-thumbl {
+ label = "GPIO Thumb Left";
+ gpios = <&pio 4 8 GPIO_ACTIVE_LOW>; /* PE8 */
+ linux,input-type = <EV_KEY>;
+ linux,code = <BTN_THUMBL>;
+ };
+
+ button-thumbr {
+ label = "GPIO Thumb Right";
+ gpios = <&pio 4 9 GPIO_ACTIVE_LOW>; /* PE9 */
+ linux,input-type = <EV_KEY>;
+ linux,code = <BTN_THUMBR>;
+ };
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&ohci1 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-plus.dts b/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-plus.dts
new file mode 100644
index 000000000000..60a8e4922103
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-plus.dts
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Copyright (C) 2024 Ryan Walklin <ryan@testtoast.com>.
+ */
+
+#include "sun50i-h700-anbernic-rg35xx-2024.dts"
+
+/ {
+ model = "Anbernic RG35XX Plus";
+ compatible = "anbernic,rg35xx-plus", "allwinner,sun50i-h700";
+
+ wifi_pwrseq: pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&rtc CLK_OSC32K_FANOUT>;
+ clock-names = "ext_clock";
+ pinctrl-0 = <&x32clk_fanout_pin>;
+ pinctrl-names = "default";
+ post-power-on-delay-ms = <200>;
+ reset-gpios = <&pio 6 18 GPIO_ACTIVE_LOW>; /* PG18 */
+ };
+};
+
+/* SDIO WiFi RTL8821CS */
+&mmc1 {
+ vmmc-supply = <&reg_cldo4>;
+ vqmmc-supply = <&reg_aldo4>;
+ mmc-pwrseq = <&wifi_pwrseq>;
+ bus-width = <4>;
+ non-removable;
+ status = "okay";
+
+ sdio_wifi: wifi@1 {
+ reg = <1>;
+ interrupt-parent = <&pio>;
+ interrupts = <6 15 IRQ_TYPE_LEVEL_LOW>; /* PG15 */
+ interrupt-names = "host-wake";
+ };
+};
+
+/* Bluetooth RTL8821CS */
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pins>, <&uart1_rts_cts_pins>;
+ uart-has-rtscts;
+ status = "okay";
+
+ bluetooth {
+ compatible = "realtek,rtl8821cs-bt", "realtek,rtl8723bs-bt";
+ device-wake-gpios = <&pio 6 17 GPIO_ACTIVE_HIGH>; /* PG17 */
+ enable-gpios = <&pio 6 19 GPIO_ACTIVE_HIGH>; /* PG19 */
+ host-wake-gpios = <&pio 6 16 GPIO_ACTIVE_HIGH>; /* PG16 */
+ };
+};
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
index 072fe20cfca0..cbbc53c47921 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
@@ -79,7 +79,7 @@
};
pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a53-pmu";
interrupts = <0 170 4>,
<0 171 4>,
<0 172 4>,
diff --git a/arch/arm64/boot/dts/amazon/alpine-v2.dtsi b/arch/arm64/boot/dts/amazon/alpine-v2.dtsi
index dbf2dce8d1d6..da9de4986660 100644
--- a/arch/arm64/boot/dts/amazon/alpine-v2.dtsi
+++ b/arch/arm64/boot/dts/amazon/alpine-v2.dtsi
@@ -39,6 +39,7 @@
/ {
model = "Annapurna Labs Alpine v2";
compatible = "al,alpine-v2";
+ interrupt-parent = <&gic>;
#address-cells = <2>;
#size-cells = <2>;
@@ -89,6 +90,22 @@
clock-frequency = <1000000>;
};
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ pmu {
+ compatible = "arm,cortex-a57-pmu";
+ interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
soc {
compatible = "simple-bus";
#address-cells = <2>;
@@ -97,22 +114,6 @@
interrupt-parent = <&gic>;
ranges;
- timer {
- compatible = "arm,armv8-timer";
- interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
- <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
- <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
- <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
- };
-
- pmu {
- compatible = "arm,armv8-pmuv3";
- interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
- };
-
gic: interrupt-controller@f0200000 {
compatible = "arm,gic-v3";
reg = <0x0 0xf0200000 0x0 0x10000>, /* GIC Dist */
@@ -150,7 +151,7 @@
al,msi-num-spis = <160>;
};
- io-fabric {
+ io-fabric@fc000000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/amazon/alpine-v3.dtsi b/arch/arm64/boot/dts/amazon/alpine-v3.dtsi
index 3ea178acdddf..8b6156b5af65 100644
--- a/arch/arm64/boot/dts/amazon/alpine-v3.dtsi
+++ b/arch/arm64/boot/dts/amazon/alpine-v3.dtsi
@@ -244,7 +244,7 @@
next-level-cache = <&cluster3_l2>;
};
- cluster0_l2: cache@0 {
+ cluster0_l2: cache-0 {
compatible = "cache";
cache-size = <0x200000>;
cache-line-size = <64>;
@@ -253,7 +253,7 @@
cache-unified;
};
- cluster1_l2: cache@100 {
+ cluster1_l2: cache-100 {
compatible = "cache";
cache-size = <0x200000>;
cache-line-size = <64>;
@@ -262,7 +262,7 @@
cache-unified;
};
- cluster2_l2: cache@200 {
+ cluster2_l2: cache-200 {
compatible = "cache";
cache-size = <0x200000>;
cache-line-size = <64>;
@@ -271,7 +271,7 @@
cache-unified;
};
- cluster3_l2: cache@300 {
+ cluster3_l2: cache-300 {
compatible = "cache";
cache-size = <0x200000>;
cache-line-size = <64>;
@@ -318,7 +318,7 @@
#size-cells = <2>;
ranges;
- gic: interrupt-controller@f0000000 {
+ gic: interrupt-controller@f0800000 {
compatible = "arm,gic-v3";
#interrupt-cells = <3>;
interrupt-controller;
@@ -361,7 +361,7 @@
interrupt-parent = <&gic>;
};
- io-fabric {
+ io-fabric@fc000000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/amd/elba-16core.dtsi b/arch/arm64/boot/dts/amd/elba-16core.dtsi
index 568bcc39ce9f..6c1b7b8fe354 100644
--- a/arch/arm64/boot/dts/amd/elba-16core.dtsi
+++ b/arch/arm64/boot/dts/amd/elba-16core.dtsi
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/*
* Copyright 2020-2023 Advanced Micro Devices, Inc.
*/
diff --git a/arch/arm64/boot/dts/amd/elba-asic-common.dtsi b/arch/arm64/boot/dts/amd/elba-asic-common.dtsi
index 46b6c6783f58..d12e9a7b5587 100644
--- a/arch/arm64/boot/dts/amd/elba-asic-common.dtsi
+++ b/arch/arm64/boot/dts/amd/elba-asic-common.dtsi
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/*
* Copyright 2020-2022 Advanced Micro Devices, Inc.
*/
diff --git a/arch/arm64/boot/dts/amd/elba-asic.dts b/arch/arm64/boot/dts/amd/elba-asic.dts
index c3f4da2f7449..20b0fa0807a1 100644
--- a/arch/arm64/boot/dts/amd/elba-asic.dts
+++ b/arch/arm64/boot/dts/amd/elba-asic.dts
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/*
* Device Tree file for AMD Pensando Elba Board.
*
diff --git a/arch/arm64/boot/dts/amd/elba-flash-parts.dtsi b/arch/arm64/boot/dts/amd/elba-flash-parts.dtsi
index cf761a05a81f..6ea2d777c8c9 100644
--- a/arch/arm64/boot/dts/amd/elba-flash-parts.dtsi
+++ b/arch/arm64/boot/dts/amd/elba-flash-parts.dtsi
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/*
* Copyright 2020-2023 Advanced Micro Devices, Inc.
*/
diff --git a/arch/arm64/boot/dts/amd/elba.dtsi b/arch/arm64/boot/dts/amd/elba.dtsi
index 674890cf2a34..758bce0a0b2a 100644
--- a/arch/arm64/boot/dts/amd/elba.dtsi
+++ b/arch/arm64/boot/dts/amd/elba.dtsi
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/*
* Copyright 2020-2022 Advanced Micro Devices, Inc.
*/
diff --git a/arch/arm64/boot/dts/amlogic/Makefile b/arch/arm64/boot/dts/amlogic/Makefile
index 1ab160bf928a..0f29517da5ec 100644
--- a/arch/arm64/boot/dts/amlogic/Makefile
+++ b/arch/arm64/boot/dts/amlogic/Makefile
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+dtb-$(CONFIG_ARCH_MESON) += amlogic-a4-a113l2-ba400.dtb
+dtb-$(CONFIG_ARCH_MESON) += amlogic-a5-a113x2-av400.dtb
dtb-$(CONFIG_ARCH_MESON) += amlogic-c3-c302x-aw409.dtb
dtb-$(CONFIG_ARCH_MESON) += amlogic-t7-a311d2-an400.dtb
dtb-$(CONFIG_ARCH_MESON) += amlogic-t7-a311d2-khadas-vim4.dtb
@@ -16,7 +18,9 @@ dtb-$(CONFIG_ARCH_MESON) += meson-g12a-u200.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12a-x96-max.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12b-a311d-bananapi-m2s.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12b-a311d-khadas-vim3.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-g12b-a311d-khadas-vim3-ts050.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12b-bananapi-cm4-cm4io.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-g12b-bananapi-cm4-mnt-reform2.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12b-gsking-x.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12b-gtking-pro.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12b-gtking.dtb
@@ -76,6 +80,7 @@ dtb-$(CONFIG_ARCH_MESON) += meson-sm1-bananapi-m2-pro.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-sm1-bananapi-m5.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-sm1-h96-max.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-sm1-khadas-vim3l.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-sm1-khadas-vim3l-ts050.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-sm1-s905d3-libretech-cc.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-sm1-odroid-c4.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-sm1-odroid-hc4.dtb
@@ -86,3 +91,5 @@ dtb-$(CONFIG_ARCH_MESON) += meson-sm1-x96-air.dtb
# Overlays
meson-g12a-fbx8am-brcm-dtbs := meson-g12a-fbx8am.dtb meson-g12a-fbx8am-brcm.dtbo
meson-g12a-fbx8am-realtek-dtbs := meson-g12a-fbx8am.dtb meson-g12a-fbx8am-realtek.dtbo
+meson-g12b-a311d-khadas-vim3-ts050-dtbs := meson-g12b-a311d-khadas-vim3.dtb meson-khadas-vim3-ts050.dtbo
+meson-sm1-khadas-vim3l-ts050-dtbs := meson-sm1-khadas-vim3l.dtb meson-khadas-vim3-ts050.dtbo
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-a4-a113l2-ba400.dts b/arch/arm64/boot/dts/amlogic/amlogic-a4-a113l2-ba400.dts
new file mode 100644
index 000000000000..ad3127e695d9
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/amlogic-a4-a113l2-ba400.dts
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2024 Amlogic, Inc. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "amlogic-a4.dtsi"
+
+/ {
+ model = "Amlogic A113L2 ba400 Development Board";
+ compatible = "amlogic,ba400", "amlogic,a4";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ serial0 = &uart_b;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x40000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /* 10 MiB reserved for ARM Trusted Firmware */
+ secmon_reserved: secmon@5000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x05000000 0x0 0xa00000>;
+ no-map;
+ };
+ };
+};
+
+&uart_b {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-a4-common.dtsi b/arch/arm64/boot/dts/amlogic/amlogic-a4-common.dtsi
new file mode 100644
index 000000000000..b6106ad4a072
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/amlogic-a4-common.dtsi
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2024 Amlogic, Inc. All rights reserved.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/gpio/gpio.h>
+/ {
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ xtal: xtal-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>;
+ clock-output-names = "xtal";
+ #clock-cells = <0>;
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ gic: interrupt-controller@fff01000 {
+ compatible = "arm,gic-400";
+ reg = <0x0 0xfff01000 0 0x1000>,
+ <0x0 0xfff02000 0 0x2000>,
+ <0x0 0xfff04000 0 0x2000>,
+ <0x0 0xfff06000 0 0x2000>;
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
+ apb: bus@fe000000 {
+ compatible = "simple-bus";
+ reg = <0x0 0xfe000000 0x0 0x480000>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x0 0x0 0x0 0xfe000000 0x0 0x480000>;
+
+ uart_b: serial@7a000 {
+ compatible = "amlogic,a4-uart",
+ "amlogic,meson-s4-uart";
+ reg = <0x0 0x7a000 0x0 0x18>;
+ interrupts = <GIC_SPI 169 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&xtal>, <&xtal>, <&xtal>;
+ clock-names = "xtal", "pclk", "baud";
+ status = "disabled";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-a4.dtsi b/arch/arm64/boot/dts/amlogic/amlogic-a4.dtsi
new file mode 100644
index 000000000000..73ca1d7eed81
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/amlogic-a4.dtsi
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2024 Amlogic, Inc. All rights reserved.
+ */
+
+#include "amlogic-a4-common.dtsi"
+/ {
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0 0x1>;
+ enable-method = "psci";
+ };
+
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0 0x2>;
+ enable-method = "psci";
+ };
+
+ cpu3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0 0x3>;
+ enable-method = "psci";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-a5-a113x2-av400.dts b/arch/arm64/boot/dts/amlogic/amlogic-a5-a113x2-av400.dts
new file mode 100644
index 000000000000..11d8b88c1ce5
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/amlogic-a5-a113x2-av400.dts
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2024 Amlogic, Inc. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "amlogic-a5.dtsi"
+
+/ {
+ model = "Amlogic A113X2 av400 Development Board";
+ compatible = "amlogic,av400", "amlogic,a5";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ serial0 = &uart_b;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x40000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /* 10 MiB reserved for ARM Trusted Firmware */
+ secmon_reserved: secmon@5000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x05000000 0x0 0xa00000>;
+ no-map;
+ };
+ };
+};
+
+&uart_b {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-a5.dtsi b/arch/arm64/boot/dts/amlogic/amlogic-a5.dtsi
new file mode 100644
index 000000000000..43f68a7da2f7
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/amlogic-a5.dtsi
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2024 Amlogic, Inc. All rights reserved.
+ */
+
+#include "amlogic-a4-common.dtsi"
+/ {
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ };
+
+ cpu1: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ };
+
+ cpu2: cpu@200 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x200>;
+ enable-method = "psci";
+ };
+
+ cpu3: cpu@300 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x300>;
+ enable-method = "psci";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-t7-reset.h b/arch/arm64/boot/dts/amlogic/amlogic-t7-reset.h
new file mode 100644
index 000000000000..ec90a11df508
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/amlogic-t7-reset.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2024 Amlogic, Inc. All rights reserved.
+ */
+
+#ifndef __DTS_AMLOGIC_T7_RESET_H
+#define __DTS_AMLOGIC_T7_RESET_H
+
+/* RESET0 */
+/* 0-3 */
+#define RESET_USB 4
+#define RESET_U2DRD 5
+#define RESET_U3DRD 6
+#define RESET_U3DRD_PIPE0 7
+#define RESET_U2PHY20 8
+#define RESET_U2PHY21 9
+#define RESET_GDC 10
+#define RESET_HDMI20_AES 11
+#define RESET_HDMIRX 12
+#define RESET_HDMIRX_APB 13
+#define RESET_DEWARP 14
+/* 15 */
+#define RESET_HDMITX_CAPB3 16
+#define RESET_BRG_VCBUG_DEC 17
+#define RESET_VCBUS 18
+#define RESET_VID_PLL_DIV 19
+#define RESET_VDI6 20
+#define RESET_GE2D 21
+#define RESET_HDMITXPHY 22
+#define RESET_VID_LOCK 23
+#define RESET_VENC0 24
+#define RESET_VDAC 25
+#define RESET_VENC2 26
+#define RESET_VENC1 27
+#define RESET_RDMA 28
+#define RESET_HDMITX 29
+#define RESET_VIU 30
+#define RESET_VENC 31
+
+/* RESET1 */
+#define RESET_AUDIO 32
+#define RESET_MALI_CAPB3 33
+#define RESET_MALI 34
+#define RESET_DDR_APB 35
+#define RESET_DDR 36
+#define RESET_DOS_CAPB3 37
+#define RESET_DOS 38
+#define RESET_COMBO_DPHY_CHAN2 39
+#define RESET_DEBUG_B 40
+#define RESET_DEBUG_A 41
+#define RESET_DSP_B 42
+#define RESET_DSP_A 43
+#define RESET_PCIE_A 44
+#define RESET_PCIE_PHY 45
+#define RESET_PCIE_APB 46
+#define RESET_ANAKIN 47
+#define RESET_ETH 48
+#define RESET_EDP0_CTRL 49
+#define RESET_EDP1_CTRL 50
+#define RESET_COMBO_DPHY_CHAN0 51
+#define RESET_COMBO_DPHY_CHAN1 52
+#define RESET_DSI_LVDS_EDP_TOP 53
+#define RESET_PCIE1_PHY 54
+#define RESET_PCIE1_APB 55
+#define RESET_DDR_1 56
+/* 57 */
+#define RESET_EDP1_PIPELINE 58
+#define RESET_EDP0_PIPELINE 59
+#define RESET_MIPI_DSI1_PHY 60
+#define RESET_MIPI_DSI0_PHY 61
+#define RESET_MIPI_DSI_A_HOST 62
+#define RESET_MIPI_DSI_B_HOST 63
+
+/* RESET2 */
+#define RESET_DEVICE_MMC_ARB 64
+#define RESET_IR_CTRL 65
+#define RESET_TS_A73 66
+#define RESET_TS_A53 67
+#define RESET_SPICC_2 68
+#define RESET_SPICC_3 69
+#define RESET_SPICC_4 70
+#define RESET_SPICC_5 71
+#define RESET_SMART_CARD 72
+#define RESET_SPICC_0 73
+#define RESET_SPICC_1 74
+#define RESET_RSA 75
+/* 76-79 */
+#define RESET_MSR_CLK 80
+#define RESET_SPIFC 81
+#define RESET_SAR_ADC 82
+#define RESET_BT 83
+/* 84-87 */
+#define RESET_ACODEC 88
+#define RESET_CEC 89
+#define RESET_AFIFO 90
+#define RESET_WATCHDOG 91
+/* 92-95 */
+
+/* RESET3 */
+#define RESET_BRG_NIC1_GPV 96
+#define RESET_BRG_NIC2_GPV 97
+#define RESET_BRG_NIC3_GPV 98
+#define RESET_BRG_NIC4_GPV 99
+#define RESET_BRG_NIC5_GPV 100
+/* 101-121 */
+#define RESET_MIPI_ISP 122
+#define RESET_BRG_ADB_MALI_1 123
+#define RESET_BRG_ADB_MALI_0 124
+#define RESET_BRG_ADB_A73 125
+#define RESET_BRG_ADB_A53 126
+#define RESET_BRG_CCI 127
+
+/* RESET4 */
+#define RESET_PWM_AO_AB 128
+#define RESET_PWM_AO_CD 129
+#define RESET_PWM_AO_EF 130
+#define RESET_PWM_AO_GH 131
+#define RESET_PWM_AB 132
+#define RESET_PWM_CD 133
+#define RESET_PWM_EF 134
+/* 135-137 */
+#define RESET_UART_A 138
+#define RESET_UART_B 139
+#define RESET_UART_C 140
+#define RESET_UART_D 141
+#define RESET_UART_E 142
+#define RESET_UART_F 143
+#define RESET_I2C_S_A 144
+#define RESET_I2C_M_A 145
+#define RESET_I2C_M_B 146
+#define RESET_I2C_M_C 147
+#define RESET_I2C_M_D 148
+#define RESET_I2C_M_E 149
+#define RESET_I2C_M_F 150
+#define RESET_I2C_M_AO_A 151
+#define RESET_SD_EMMC_A 152
+#define RESET_SD_EMMC_B 153
+#define RESET_SD_EMMC_C 154
+#define RESET_I2C_M_AO_B 155
+#define RESET_TS_GPU 156
+#define RESET_TS_NNA 157
+#define RESET_TS_VPN 158
+#define RESET_TS_HEVC 159
+
+/* RESET5 */
+#define RESET_BRG_NOC_DDR_1 160
+#define RESET_BRG_NOC_DDR_0 161
+#define RESET_BRG_NOC_MAIN 162
+#define RESET_BRG_NOC_ALL 163
+/* 164-167 */
+#define RESET_BRG_NIC2_SYS 168
+#define RESET_BRG_NIC2_MAIN 169
+#define RESET_BRG_NIC2_HDMI 170
+#define RESET_BRG_NIC2_ALL 171
+#define RESET_BRG_NIC3_WAVE 172
+#define RESET_BRG_NIC3_VDEC 173
+#define RESET_BRG_NIC3_HEVCF 174
+#define RESET_BRG_NIC3_HEVCB 175
+#define RESET_BRG_NIC3_HCODEC 176
+#define RESET_BRG_NIC3_GE2D 177
+#define RESET_BRG_NIC3_GDC 178
+#define RESET_BRG_NIC3_AMLOGIC 179
+#define RESET_BRG_NIC3_MAIN 180
+#define RESET_BRG_NIC3_ALL 181
+#define RESET_BRG_NIC5_VPU 182
+/* 183-185 */
+#define RESET_BRG_NIC4_DSPB 186
+#define RESET_BRG_NIC4_DSPA 187
+#define RESET_BRG_NIC4_VAPB 188
+#define RESET_BRG_NIC4_CLK81 189
+#define RESET_BRG_NIC4_MAIN 190
+#define RESET_BRG_NIC4_ALL 191
+
+/* RESET6 */
+#define RESET_BRG_VDEC_PIPEL 192
+#define RESET_BRG_HEVCF_DMC_PIPEL 193
+#define RESET_BRG_NIC2TONIC4_PIPEL 194
+#define RESET_BRG_HDMIRXTONIC2_PIPEL 195
+#define RESET_BRG_SECTONIC4_PIPEL 196
+#define RESET_BRG_VPUTONOC_PIPEL 197
+#define RESET_BRG_NIC4TONOC_PIPEL 198
+#define RESET_BRG_NIC3TONOC_PIPEL 199
+#define RESET_BRG_NIC2TONOC_PIPEL 200
+#define RESET_BRG_NNATONOC_PIPEL 201
+#define RESET_BRG_FRISP3_PIPEL 202
+#define RESET_BRG_FRISP2_PIPEL 203
+#define RESET_BRG_FRISP1_PIPEL 204
+#define RESET_BRG_FRISP0_PIPEL 205
+/* 206-217 */
+#define RESET_BRG_AMPIPE_NAND 218
+#define RESET_BRG_AMPIPE_ETH 219
+/* 220 */
+#define RESET_BRG_AM2AXI0 221
+#define RESET_BRG_AM2AXI1 222
+#define RESET_BRG_AM2AXI2 223
+
+#endif /* ___DTS_AMLOGIC_T7_RESET_H */
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-t7.dtsi b/arch/arm64/boot/dts/amlogic/amlogic-t7.dtsi
index 5248bdf824ea..c23efc6c7ac0 100644
--- a/arch/arm64/boot/dts/amlogic/amlogic-t7.dtsi
+++ b/arch/arm64/boot/dts/amlogic/amlogic-t7.dtsi
@@ -5,6 +5,7 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/amlogic,t7-pwrc.h>
+#include "amlogic-t7-reset.h"
/ {
interrupt-parent = <&gic>;
@@ -149,6 +150,12 @@
#size-cells = <2>;
ranges = <0x0 0x0 0x0 0xfe000000 0x0 0x480000>;
+ reset: reset-controller@2000 {
+ compatible = "amlogic,t7-reset";
+ reg = <0x0 0x2000 0x0 0x98>;
+ #reset-cells = <1>;
+ };
+
watchdog@2100 {
compatible = "amlogic,t7-wdt";
reg = <0x0 0x2100 0x0 0x10>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
index 9d5eab6595d0..b058ed78faf0 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
@@ -1663,9 +1663,28 @@
<250000000>,
<0>; /* Do Nothing */
};
+
+ mipi_analog_dphy: phy {
+ compatible = "amlogic,g12a-mipi-dphy-analog";
+ #phy-cells = <0>;
+ status = "disabled";
+ };
};
};
+ mipi_dphy: phy@44000 {
+ compatible = "amlogic,axg-mipi-dphy";
+ reg = <0x0 0x44000 0x0 0x2000>;
+ clocks = <&clkc CLKID_MIPI_DSI_PHY>;
+ clock-names = "pclk";
+ resets = <&reset RESET_MIPI_DSI_PHY>;
+ reset-names = "phy";
+ phys = <&mipi_analog_dphy>;
+ phy-names = "analog";
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
usb3_pcie_phy: phy@46000 {
compatible = "amlogic,g12a-usb3-pcie-phy";
reg = <0x0 0x46000 0x0 0x2000>;
@@ -2152,6 +2171,15 @@
remote-endpoint = <&hdmi_tx_in>;
};
};
+
+ /* DPI output port */
+ dpi_port: port@2 {
+ reg = <2>;
+
+ dpi_out: endpoint {
+ remote-endpoint = <&mipi_dsi_in>;
+ };
+ };
};
gic: interrupt-controller@ffc01000 {
@@ -2189,6 +2217,48 @@
amlogic,channel-interrupts = <64 65 66 67 68 69 70 71>;
};
+ mipi_dsi: dsi@7000 {
+ compatible = "amlogic,meson-g12a-dw-mipi-dsi";
+ reg = <0x0 0x7000 0x0 0x1000>;
+ resets = <&reset RESET_MIPI_DSI_HOST>;
+ reset-names = "top";
+ clocks = <&clkc CLKID_MIPI_DSI_HOST>,
+ <&clkc CLKID_MIPI_DSI_PXCLK>,
+ <&clkc CLKID_CTS_ENCL>;
+ clock-names = "pclk", "bit", "px";
+ phys = <&mipi_dphy>;
+ phy-names = "dphy";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ assigned-clocks = <&clkc CLKID_MIPI_DSI_PXCLK_SEL>,
+ <&clkc CLKID_CTS_ENCL_SEL>,
+ <&clkc CLKID_VCLK2_SEL>;
+ assigned-clock-parents = <&clkc CLKID_GP0_PLL>,
+ <&clkc CLKID_VCLK2_DIV1>,
+ <&clkc CLKID_GP0_PLL>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* VPU VENC Input */
+ mipi_dsi_venc_port: port@0 {
+ reg = <0>;
+
+ mipi_dsi_in: endpoint {
+ remote-endpoint = <&dpi_out>;
+ };
+ };
+
+ /* DSI Output */
+ mipi_dsi_panel_port: port@1 {
+ reg = <1>;
+ };
+ };
+ };
+
watchdog: watchdog@f0d0 {
compatible = "amlogic,meson-gxbb-wdt";
reg = <0x0 0xf0d0 0x0 0x10>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4-mnt-reform2.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4-mnt-reform2.dts
new file mode 100644
index 000000000000..003efed529ba
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4-mnt-reform2.dts
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2023 Neil Armstrong <neil.armstrong@linaro.org>
+ * Copyright 2023 MNT Research GmbH
+ */
+
+/dts-v1/;
+
+#include "meson-g12b-bananapi-cm4.dtsi"
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/sound/meson-g12a-tohdmitx.h>
+
+/ {
+ model = "MNT Reform 2 with BPI-CM4 Module";
+ compatible = "mntre,reform2-cm4", "bananapi,bpi-cm4", "amlogic,a311d", "amlogic,g12b";
+ chassis-type = "laptop";
+
+ aliases {
+ ethernet0 = &ethmac;
+ i2c0 = &i2c1;
+ i2c1 = &i2c3;
+ };
+
+ hdmi_connector: hdmi-connector {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_tmds_out>;
+ };
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-blue {
+ color = <LED_COLOR_ID_BLUE>;
+ function = LED_FUNCTION_STATUS;
+ gpios = <&gpio_ao GPIOAO_7 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ led-green {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_STATUS;
+ gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ sound {
+ compatible = "amlogic,axg-sound-card";
+ model = "MNT-REFORM2-BPI-CM4";
+ audio-widgets = "Headphone", "Headphone Jack",
+ "Speaker", "External Speaker",
+ "Microphone", "Mic Jack";
+ audio-aux-devs = <&tdmout_a>, <&tdmout_b>, <&tdmin_b>;
+ audio-routing = "TDMOUT_A IN 0", "FRDDR_A OUT 0",
+ "TDMOUT_A IN 1", "FRDDR_B OUT 0",
+ "TDMOUT_A IN 2", "FRDDR_C OUT 0",
+ "TDM_A Playback", "TDMOUT_A OUT",
+ "TDMOUT_B IN 0", "FRDDR_A OUT 1",
+ "TDMOUT_B IN 1", "FRDDR_B OUT 1",
+ "TDMOUT_B IN 2", "FRDDR_C OUT 1",
+ "TDM_B Playback", "TDMOUT_B OUT",
+ "TDMIN_B IN 1", "TDM_B Capture",
+ "TDMIN_B IN 4", "TDM_B Loopback",
+ "TODDR_A IN 1", "TDMIN_B OUT",
+ "TODDR_B IN 1", "TDMIN_B OUT",
+ "TODDR_C IN 1", "TDMIN_B OUT",
+ "Headphone Jack", "HP_L",
+ "Headphone Jack", "HP_R",
+ "External Speaker", "SPK_LP",
+ "External Speaker", "SPK_LN",
+ "External Speaker", "SPK_RP",
+ "External Speaker", "SPK_RN",
+ "LINPUT1", "Mic Jack",
+ "Mic Jack", "MICB";
+
+ assigned-clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+ assigned-clock-parents = <0>, <0>, <0>;
+ assigned-clock-rates = <294912000>,
+ <270950400>,
+ <393216000>;
+
+ dai-link-0 {
+ sound-dai = <&frddr_a>;
+ };
+
+ dai-link-1 {
+ sound-dai = <&frddr_b>;
+ };
+
+ dai-link-2 {
+ sound-dai = <&frddr_c>;
+ };
+
+ dai-link-3 {
+ sound-dai = <&toddr_a>;
+ };
+
+ dai-link-4 {
+ sound-dai = <&toddr_b>;
+ };
+
+ dai-link-5 {
+ sound-dai = <&toddr_c>;
+ };
+
+ /* 8ch hdmi interface */
+ dai-link-6 {
+ sound-dai = <&tdmif_a>;
+ dai-format = "i2s";
+ dai-tdm-slot-tx-mask-0 = <1 1>;
+ dai-tdm-slot-tx-mask-1 = <1 1>;
+ dai-tdm-slot-tx-mask-2 = <1 1>;
+ dai-tdm-slot-tx-mask-3 = <1 1>;
+ mclk-fs = <256>;
+
+ codec {
+ sound-dai = <&tohdmitx TOHDMITX_I2S_IN_A>;
+ };
+ };
+
+ /* Analog Audio */
+ dai-link-7 {
+ sound-dai = <&tdmif_b>;
+ dai-format = "i2s";
+ dai-tdm-slot-tx-mask-0 = <1 1>;
+ mclk-fs = <256>;
+
+ codec {
+ sound-dai = <&wm8960>;
+ };
+ };
+
+ /* hdmi glue */
+ dai-link-8 {
+ sound-dai = <&tohdmitx TOHDMITX_I2S_OUT>;
+
+ codec {
+ sound-dai = <&hdmi_tx>;
+ };
+ };
+ };
+
+ reg_main_1v8: regulator-main-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&reg_main_3v3>;
+ };
+
+ reg_main_1v2: regulator-main-1v2 {
+ compatible = "regulator-fixed";
+ regulator-name = "1V2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ vin-supply = <&reg_main_5v>;
+ };
+
+ reg_main_3v3: regulator-main-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ reg_main_5v: regulator-main-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ reg_main_usb: regulator-main-usb {
+ compatible = "regulator-fixed";
+ regulator-name = "USB_PWR";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&reg_main_5v>;
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm_AO_ab 0 10000 0>;
+ power-supply = <&reg_main_usb>;
+ enable-gpios = <&gpio 58 GPIO_ACTIVE_HIGH>;
+ brightness-levels = <0 32 64 128 160 200 255>;
+ default-brightness-level = <6>;
+ };
+
+ panel {
+ compatible = "innolux,n125hce-gn1";
+ power-supply = <&reg_main_3v3>;
+ backlight = <&backlight>;
+ no-hpd;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&edp_bridge_out>;
+ };
+ };
+ };
+
+ clock_12288: clock_12288 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <12288000>;
+ };
+};
+
+&mipi_analog_dphy {
+ status = "okay";
+};
+
+&mipi_dphy {
+ status = "okay";
+};
+
+&mipi_dsi {
+ status = "okay";
+
+ assigned-clocks = <&clkc CLKID_GP0_PLL>,
+ <&clkc CLKID_MIPI_DSI_PXCLK_SEL>,
+ <&clkc CLKID_MIPI_DSI_PXCLK>,
+ <&clkc CLKID_CTS_ENCL_SEL>,
+ <&clkc CLKID_VCLK2_SEL>;
+ assigned-clock-parents = <0>,
+ <&clkc CLKID_GP0_PLL>,
+ <0>,
+ <&clkc CLKID_VCLK2_DIV1>,
+ <&clkc CLKID_GP0_PLL>;
+ assigned-clock-rates = <936000000>,
+ <0>,
+ <936000000>,
+ <0>,
+ <0>;
+};
+
+&mipi_dsi_panel_port {
+ mipi_dsi_out: endpoint {
+ remote-endpoint = <&edp_bridge_in>;
+ };
+};
+
+&cecb_AO {
+ status = "okay";
+};
+
+&ethmac {
+ status = "okay";
+};
+
+&hdmi_tx {
+ status = "okay";
+};
+
+&hdmi_tx_tmds_port {
+ hdmi_tx_tmds_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+};
+
+&pwm_AO_ab {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm_ao_a_pins>;
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+};
+
+&i2c3 {
+ status = "okay";
+
+ edp_bridge: bridge@2c {
+ compatible = "ti,sn65dsi86";
+ reg = <0x2c>;
+ enable-gpios = <&gpio GPIOX_10 GPIO_ACTIVE_HIGH>; // PIN_24 / GPIO8
+ vccio-supply = <&reg_main_1v8>;
+ vpll-supply = <&reg_main_1v8>;
+ vcca-supply = <&reg_main_1v2>;
+ vcc-supply = <&reg_main_1v2>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ edp_bridge_in: endpoint {
+ remote-endpoint = <&mipi_dsi_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ edp_bridge_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+ };
+};
+
+&i2c2 {
+ status = "okay";
+
+ wm8960: codec@1a {
+ compatible = "wlf,wm8960";
+ reg = <0x1a>;
+ clocks = <&clock_12288>;
+ clock-names = "mclk";
+ #sound-dai-cells = <0>;
+ wlf,shared-lrclk;
+ };
+
+ rtc@68 {
+ compatible = "nxp,pcf8523";
+ reg = <0x68>;
+ };
+};
+
+&pcie {
+ status = "okay";
+};
+
+&sd_emmc_b {
+ status = "okay";
+};
+
+&tdmif_a {
+ status = "okay";
+};
+
+&tdmout_a {
+ status = "okay";
+};
+
+&tdmif_b {
+ pinctrl-0 = <&tdm_b_dout0_pins>, <&tdm_b_fs_pins>, <&tdm_b_sclk_pins>, <&tdm_b_din1_pins>;
+ pinctrl-names = "default";
+
+ assigned-clocks = <&clkc_audio AUD_CLKID_TDM_SCLK_PAD1>,
+ <&clkc_audio AUD_CLKID_TDM_LRCLK_PAD1>;
+ assigned-clock-parents = <&clkc_audio AUD_CLKID_MST_B_SCLK>,
+ <&clkc_audio AUD_CLKID_MST_B_LRCLK>;
+ assigned-clock-rates = <0>, <0>;
+};
+
+&tdmin_b {
+ status = "okay";
+};
+
+&toddr_a {
+ status = "okay";
+};
+
+&toddr_b {
+ status = "okay";
+};
+
+&toddr_c {
+ status = "okay";
+};
+
+&tohdmitx {
+ status = "okay";
+};
+
+&usb {
+ dr_mode = "host";
+
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3-ts050.dtso b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3-ts050.dtso
new file mode 100644
index 000000000000..a41b4e619580
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3-ts050.dtso
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/clock/g12a-clkc.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/amlogic,meson-g12a-gpio-intc.h>
+
+/dts-v1/;
+/plugin/;
+
+/*
+ * Enable Khadas TS050 DSI Panel + Touch Controller
+ * on Khadas VIM3 (A311D) and VIM3L (S905D3)
+ */
+
+&{/} {
+ panel_backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm_AO_cd 0 25000 0>;
+ brightness-levels = <0 255>;
+ num-interpolated-steps = <255>;
+ default-brightness-level = <200>;
+ };
+};
+
+&i2c3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-0 = <&i2c3_sda_a_pins>, <&i2c3_sck_a_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ touch-controller@38 {
+ compatible = "edt,edt-ft5206";
+ reg = <0x38>;
+ interrupt-parent = <&gpio_intc>;
+ interrupts = <IRQID_GPIOA_5 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&gpio_expander 6 GPIO_ACTIVE_LOW>;
+ touchscreen-size-x = <1080>;
+ touchscreen-size-y = <1920>;
+ status = "okay";
+ };
+};
+
+&mipi_dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ assigned-clocks = <&clkc CLKID_GP0_PLL>,
+ <&clkc CLKID_MIPI_DSI_PXCLK_SEL>,
+ <&clkc CLKID_MIPI_DSI_PXCLK>,
+ <&clkc CLKID_CTS_ENCL_SEL>,
+ <&clkc CLKID_VCLK2_SEL>;
+ assigned-clock-parents = <0>,
+ <&clkc CLKID_GP0_PLL>,
+ <0>,
+ <&clkc CLKID_VCLK2_DIV1>,
+ <&clkc CLKID_GP0_PLL>;
+ assigned-clock-rates = <960000000>,
+ <0>,
+ <960000000>,
+ <0>,
+ <0>;
+
+ panel@0 {
+ compatible = "khadas,ts050";
+ reset-gpios = <&gpio_expander 0 GPIO_ACTIVE_LOW>;
+ enable-gpios = <&gpio_expander 1 GPIO_ACTIVE_HIGH>;
+ power-supply = <&vcc_3v3>;
+ backlight = <&panel_backlight>;
+ reg = <0>;
+
+ port {
+ mipi_in_panel: endpoint {
+ remote-endpoint = <&mipi_out_panel>;
+ };
+ };
+ };
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ mipi_out_panel: endpoint {
+ remote-endpoint = <&mipi_in_panel>;
+ };
+ };
+ };
+};
+
+&mipi_analog_dphy {
+ status = "okay";
+};
+
+&mipi_dphy {
+ status = "okay";
+};
+
+&pwm_AO_cd {
+ pinctrl-0 = <&pwm_ao_c_6_pins>, <&pwm_ao_d_e_pins>;
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-s4.dtsi b/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
index ce90b35686a2..10896f9df682 100644
--- a/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
@@ -65,10 +65,15 @@
#clock-cells = <0>;
};
- pwrc: power-controller {
- compatible = "amlogic,meson-s4-pwrc";
- #power-domain-cells = <1>;
- status = "okay";
+ firmware {
+ sm: secure-monitor {
+ compatible = "amlogic,meson-gxbb-sm";
+
+ pwrc: power-controller {
+ compatible = "amlogic,meson-s4-pwrc";
+ #power-domain-cells = <1>;
+ };
+ };
};
soc {
diff --git a/arch/arm64/boot/dts/apm/apm-merlin.dts b/arch/arm64/boot/dts/apm/apm-merlin.dts
index 2e8069002ec1..6e05cf1a3df6 100644
--- a/arch/arm64/boot/dts/apm/apm-merlin.dts
+++ b/arch/arm64/boot/dts/apm/apm-merlin.dts
@@ -15,7 +15,7 @@
chosen { };
- memory {
+ memory@100000000 {
device_type = "memory";
reg = < 0x1 0x00000000 0x0 0x80000000 >;
};
diff --git a/arch/arm64/boot/dts/apm/apm-mustang.dts b/arch/arm64/boot/dts/apm/apm-mustang.dts
index 033e10e12b18..e7644cddf06f 100644
--- a/arch/arm64/boot/dts/apm/apm-mustang.dts
+++ b/arch/arm64/boot/dts/apm/apm-mustang.dts
@@ -15,7 +15,7 @@
chosen { };
- memory {
+ memory@100000000 {
device_type = "memory";
reg = < 0x1 0x00000000 0x0 0x80000000 >; /* Updated by bootloader */
};
diff --git a/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi b/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi
index 65ebac3082e2..ea5721ea02f0 100644
--- a/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi
@@ -211,6 +211,13 @@
};
};
+ refclk: refclk {
+ compatible = "fixed-clock";
+ #clock-cells = <1>;
+ clock-frequency = <100000000>;
+ clock-output-names = "refclk";
+ };
+
pmu {
compatible = "arm,armv8-pmuv3";
interrupts = <1 12 0xff04>;
@@ -236,13 +243,6 @@
#size-cells = <2>;
ranges;
- refclk: refclk {
- compatible = "fixed-clock";
- #clock-cells = <1>;
- clock-frequency = <100000000>;
- clock-output-names = "refclk";
- };
-
pmdpll: pmdpll@170000f0 {
compatible = "apm,xgene-pcppll-v2-clock";
#clock-cells = <1>;
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index 988928c60f15..532401bc9c66 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -112,6 +112,13 @@
interrupts = <1 9 0xf04>; /* GIC Maintenence IRQ */
};
+ refclk: refclk {
+ compatible = "fixed-clock";
+ #clock-cells = <1>;
+ clock-frequency = <100000000>;
+ clock-output-names = "refclk";
+ };
+
timer {
compatible = "arm,armv8-timer";
interrupts = <1 0 0xff08>, /* Secure Phys IRQ */
@@ -122,7 +129,7 @@
};
pmu {
- compatible = "apm,potenza-pmu", "arm,armv8-pmuv3";
+ compatible = "apm,potenza-pmu";
interrupts = <1 12 0xff04>;
};
@@ -137,12 +144,6 @@
#address-cells = <2>;
#size-cells = <2>;
ranges;
- refclk: refclk {
- compatible = "fixed-clock";
- #clock-cells = <1>;
- clock-frequency = <100000000>;
- clock-output-names = "refclk";
- };
pcppll: pcppll@17000100 {
compatible = "apm,xgene-pcppll-clock";
diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi
index b897f5542c0a..98ed2b329ed6 100644
--- a/arch/arm64/boot/dts/arm/juno-base.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-base.dtsi
@@ -773,14 +773,14 @@
};
};
- big_cluster_thermal_zone: big-cluster-thermal {
+ big_cluster_thermal_zone: big-cl-thermal {
polling-delay = <1000>;
polling-delay-passive = <100>;
thermal-sensors = <&scpi_sensors0 21>;
status = "disabled";
};
- little_cluster_thermal_zone: little-cluster-thermal {
+ little_cluster_thermal_zone: little-cl-thermal {
polling-delay = <1000>;
polling-delay-passive = <100>;
thermal-sensors = <&scpi_sensors0 22>;
diff --git a/arch/arm64/boot/dts/arm/juno-scmi.dtsi b/arch/arm64/boot/dts/arm/juno-scmi.dtsi
index 31929e2377d8..f38c5b6ef657 100644
--- a/arch/arm64/boot/dts/arm/juno-scmi.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-scmi.dtsi
@@ -84,11 +84,11 @@
thermal-sensors = <&scmi_sensors0 3>;
};
- big-cluster-thermal {
+ big-cl-thermal {
thermal-sensors = <&scmi_sensors0 21>;
};
- little-cluster-thermal {
+ little-cl-thermal {
thermal-sensors = <&scmi_sensors0 22>;
};
diff --git a/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
index 8db4243a4947..9115c99d0dc0 100644
--- a/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
+++ b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
@@ -102,7 +102,7 @@
};
pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a53-pmu";
interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
};
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
index e01cf4f54077..8b924812322c 100644
--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
@@ -594,6 +594,7 @@
reg-names = "nand", "nand-int-base";
interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "nand_ctlrdy";
+ brcm,wp-not-connected;
status = "disabled";
nandcs: nand@0 {
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm94908.dts b/arch/arm64/boot/dts/broadcom/bcmbca/bcm94908.dts
index 030ffa5364fb..e5b37643296b 100644
--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm94908.dts
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm94908.dts
@@ -34,7 +34,6 @@
};
&nand_controller {
- brcm,wp-not-connected;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts b/arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts
index dec5a110f1e8..f43cfe66b6af 100644
--- a/arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts
+++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts
@@ -50,7 +50,7 @@
bootargs = "earlycon=uart8250,mmio32,0x66130000";
};
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x00000000 0x80000000 0x00000000 0x40000000>;
};
diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2-xmc.dts b/arch/arm64/boot/dts/broadcom/northstar2/ns2-xmc.dts
index 1d314f17bbdd..c50df1d02797 100644
--- a/arch/arm64/boot/dts/broadcom/northstar2/ns2-xmc.dts
+++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2-xmc.dts
@@ -47,7 +47,7 @@
bootargs = "earlycon=uart8250,mmio32,0x66130000";
};
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x00000000 0x80000000 0x00000001 0x00000000>;
};
diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
index 896d1f33b5b6..cfd9fd23a1c2 100644
--- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
@@ -102,7 +102,7 @@
};
pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a57-pmu";
interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
index d8516ec0dae7..857fa427e195 100644
--- a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
+++ b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
@@ -142,7 +142,7 @@
};
pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a72-pmu";
interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
};
diff --git a/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi b/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
index 8ad31dee11a3..cc860a80af51 100644
--- a/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
+++ b/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
@@ -361,24 +361,24 @@
};
pmu {
- compatible = "cavium,thunder-pmu", "arm,armv8-pmuv3";
+ compatible = "cavium,thunder-pmu";
interrupts = <1 7 4>;
};
+ refclk50mhz: refclk50mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ clock-output-names = "refclk50mhz";
+ };
+
soc {
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <2>;
ranges;
- refclk50mhz: refclk50mhz {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <50000000>;
- clock-output-names = "refclk50mhz";
- };
-
- gic0: interrupt-controller@8010,00000000 {
+ gic0: interrupt-controller@801000000000 {
compatible = "arm,gic-v3";
#interrupt-cells = <3>;
#address-cells = <2>;
@@ -397,7 +397,7 @@
};
};
- uaa0: serial@87e0,24000000 {
+ uaa0: serial@87e024000000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x87e0 0x24000000 0x0 0x1000>;
interrupts = <1 21 4>;
@@ -405,7 +405,7 @@
clock-names = "apb_pclk";
};
- uaa1: serial@87e0,25000000 {
+ uaa1: serial@87e025000000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x87e0 0x25000000 0x0 0x1000>;
interrupts = <1 22 4>;
diff --git a/arch/arm64/boot/dts/cavium/thunder2-99xx.dts b/arch/arm64/boot/dts/cavium/thunder2-99xx.dts
index d005e1e79c3d..89fc4107a0c4 100644
--- a/arch/arm64/boot/dts/cavium/thunder2-99xx.dts
+++ b/arch/arm64/boot/dts/cavium/thunder2-99xx.dts
@@ -14,7 +14,7 @@
model = "Cavium ThunderX2 CN99XX";
compatible = "cavium,thunderx2-cn9900", "brcm,vulcan-soc";
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x00000000 0x80000000 0x0 0x80000000>, /* 2G @ 2G */
<0x00000008 0x80000000 0x0 0x80000000>; /* 2G @ 34G */
diff --git a/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi b/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi
index 3419bd252696..6dfe78a7d4ab 100644
--- a/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi
+++ b/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi
@@ -83,7 +83,7 @@
};
pmu {
- compatible = "brcm,vulcan-pmu", "arm,armv8-pmuv3";
+ compatible = "brcm,vulcan-pmu";
interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>; /* PMU overflow */
};
@@ -103,7 +103,6 @@
/* ECAM at 0x3000_0000 - 0x4000_0000 */
reg = <0x0 0x30000000 0x0 0x10000000>;
- reg-names = "PCI ECAM";
/*
* PCI ranges:
diff --git a/arch/arm64/boot/dts/exynos/exynos5433.dtsi b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
index 7fbbec04bff0..0b9053b9b2b5 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
@@ -1468,6 +1468,7 @@
pinctrl-names = "default";
pinctrl-0 = <&spi0_bus>;
num-cs = <1>;
+ fifo-depth = <256>;
status = "disabled";
};
@@ -1487,6 +1488,7 @@
pinctrl-names = "default";
pinctrl-0 = <&spi1_bus>;
num-cs = <1>;
+ fifo-depth = <64>;
status = "disabled";
};
@@ -1506,6 +1508,7 @@
pinctrl-names = "default";
pinctrl-0 = <&spi2_bus>;
num-cs = <1>;
+ fifo-depth = <64>;
status = "disabled";
};
@@ -1525,6 +1528,7 @@
pinctrl-names = "default";
pinctrl-0 = <&spi3_bus>;
num-cs = <1>;
+ fifo-depth = <64>;
status = "disabled";
};
@@ -1544,6 +1548,7 @@
pinctrl-names = "default";
pinctrl-0 = <&spi4_bus>;
num-cs = <1>;
+ fifo-depth = <64>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/exynos/exynos850.dtsi b/arch/arm64/boot/dts/exynos/exynos850.dtsi
index 2ba67c3d0681..0706c8534ceb 100644
--- a/arch/arm64/boot/dts/exynos/exynos850.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos850.dtsi
@@ -93,6 +93,8 @@
compatible = "arm,cortex-a55";
reg = <0x0>;
enable-method = "psci";
+ clocks = <&cmu_cpucl0 CLK_CLUSTER0_SCLK>;
+ clock-names = "cluster0_clk";
};
cpu1: cpu@1 {
device_type = "cpu";
@@ -117,6 +119,8 @@
compatible = "arm,cortex-a55";
reg = <0x100>;
enable-method = "psci";
+ clocks = <&cmu_cpucl1 CLK_CLUSTER1_SCLK>;
+ clock-names = "cluster1_clk";
};
cpu5: cpu@101 {
device_type = "cpu";
@@ -254,6 +258,28 @@
"dout_peri_uart", "dout_peri_ip";
};
+ cmu_cpucl1: clock-controller@10800000 {
+ compatible = "samsung,exynos850-cmu-cpucl1";
+ reg = <0x10800000 0x8000>;
+ #clock-cells = <1>;
+
+ clocks = <&oscclk>, <&cmu_top CLK_DOUT_CPUCL1_SWITCH>,
+ <&cmu_top CLK_DOUT_CPUCL1_DBG>;
+ clock-names = "oscclk", "dout_cpucl1_switch",
+ "dout_cpucl1_dbg";
+ };
+
+ cmu_cpucl0: clock-controller@10900000 {
+ compatible = "samsung,exynos850-cmu-cpucl0";
+ reg = <0x10900000 0x8000>;
+ #clock-cells = <1>;
+
+ clocks = <&oscclk>, <&cmu_top CLK_DOUT_CPUCL0_SWITCH>,
+ <&cmu_top CLK_DOUT_CPUCL0_DBG>;
+ clock-names = "oscclk", "dout_cpucl0_switch",
+ "dout_cpucl0_dbg";
+ };
+
cmu_g3d: clock-controller@11400000 {
compatible = "samsung,exynos850-cmu-g3d";
reg = <0x11400000 0x8000>;
diff --git a/arch/arm64/boot/dts/exynos/exynosautov9.dtsi b/arch/arm64/boot/dts/exynos/exynosautov9.dtsi
index c871a2f49fda..0248329da49a 100644
--- a/arch/arm64/boot/dts/exynos/exynosautov9.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynosautov9.dtsi
@@ -435,6 +435,7 @@
num-cs = <1>;
#address-cells = <1>;
#size-cells = <0>;
+ fifo-depth = <256>;
status = "disabled";
};
@@ -526,6 +527,7 @@
num-cs = <1>;
#address-cells = <1>;
#size-cells = <0>;
+ fifo-depth = <256>;
status = "disabled";
};
@@ -617,6 +619,7 @@
num-cs = <1>;
#address-cells = <1>;
#size-cells = <0>;
+ fifo-depth = <64>;
status = "disabled";
};
@@ -708,6 +711,7 @@
num-cs = <1>;
#address-cells = <1>;
#size-cells = <0>;
+ fifo-depth = <64>;
status = "disabled";
};
@@ -799,6 +803,7 @@
num-cs = <1>;
#address-cells = <1>;
#size-cells = <0>;
+ fifo-depth = <64>;
status = "disabled";
};
@@ -890,6 +895,7 @@
num-cs = <1>;
#address-cells = <1>;
#size-cells = <0>;
+ fifo-depth = <64>;
status = "disabled";
};
@@ -981,6 +987,7 @@
num-cs = <1>;
#address-cells = <1>;
#size-cells = <0>;
+ fifo-depth = <256>;
status = "disabled";
};
@@ -1072,6 +1079,7 @@
num-cs = <1>;
#address-cells = <1>;
#size-cells = <0>;
+ fifo-depth = <64>;
status = "disabled";
};
@@ -1163,6 +1171,7 @@
num-cs = <1>;
#address-cells = <1>;
#size-cells = <0>;
+ fifo-depth = <64>;
status = "disabled";
};
@@ -1254,6 +1263,7 @@
num-cs = <1>;
#address-cells = <1>;
#size-cells = <0>;
+ fifo-depth = <64>;
status = "disabled";
};
@@ -1345,6 +1355,7 @@
num-cs = <1>;
#address-cells = <1>;
#size-cells = <0>;
+ fifo-depth = <64>;
status = "disabled";
};
@@ -1434,6 +1445,7 @@
num-cs = <1>;
#address-cells = <1>;
#size-cells = <0>;
+ fifo-depth = <64>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/exynos/google/gs101-oriole.dts b/arch/arm64/boot/dts/exynos/google/gs101-oriole.dts
index 6ccade2c8cb4..5e8ffe065081 100644
--- a/arch/arm64/boot/dts/exynos/google/gs101-oriole.dts
+++ b/arch/arm64/boot/dts/exynos/google/gs101-oriole.dts
@@ -29,8 +29,8 @@
gpio-keys {
compatible = "gpio-keys";
- pinctrl-names = "default";
pinctrl-0 = <&key_voldown>, <&key_volup>, <&key_power>;
+ pinctrl-names = "default";
button-vol-down {
label = "KEY_VOLUMEDOWN";
@@ -53,6 +53,21 @@
wakeup-source;
};
};
+
+ /* TODO: Remove this once PMIC is implemented */
+ reg_placeholder: regulator-0 {
+ compatible = "regulator-fixed";
+ regulator-name = "placeholder_reg";
+ };
+
+ /* TODO: Remove this once S2MPG11 slave PMIC is implemented */
+ ufs_0_fixed_vcc_reg: regulator-1 {
+ compatible = "regulator-fixed";
+ regulator-name = "ufs-vcc";
+ gpio = <&gpp0 1 GPIO_ACTIVE_HIGH>;
+ regulator-boot-on;
+ enable-active-high;
+ };
};
&ext_24_5m {
@@ -103,8 +118,33 @@
};
&serial_0 {
- pinctrl-names = "default";
- pinctrl-0 = <&uart0_bus>;
+ status = "okay";
+};
+
+&ufs_0 {
+ status = "okay";
+ vcc-supply = <&ufs_0_fixed_vcc_reg>;
+};
+
+&ufs_0_phy {
+ status = "okay";
+};
+
+&usbdrd31 {
+ status = "okay";
+ vdd10-supply = <&reg_placeholder>;
+ vdd33-supply = <&reg_placeholder>;
+};
+
+&usbdrd31_dwc3 {
+ dr_mode = "otg";
+ usb-role-switch;
+ role-switch-default-mode = "peripheral";
+ maximum-speed = "super-speed-plus";
+ status = "okay";
+};
+
+&usbdrd31_phy {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/exynos/google/gs101.dtsi b/arch/arm64/boot/dts/exynos/google/gs101.dtsi
index 55e6bcb3689e..a66e996666b8 100644
--- a/arch/arm64/boot/dts/exynos/google/gs101.dtsi
+++ b/arch/arm64/boot/dts/exynos/google/gs101.dtsi
@@ -370,12 +370,398 @@
pinctrl_peric0: pinctrl@10840000 {
compatible = "google,gs101-pinctrl";
reg = <0x10840000 0x00001000>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_GPIO_PERIC0_PCLK>;
+ clock-names = "pclk";
interrupts = <GIC_SPI 625 IRQ_TYPE_LEVEL_HIGH 0>;
};
+ usi1: usi@109000c0 {
+ compatible = "google,gs101-usi", "samsung,exynos850-usi";
+ reg = <0x109000c0 0x20>;
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_0>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_0>;
+ clock-names = "pclk", "ipclk";
+ samsung,sysreg = <&sysreg_peric0 0x1000>;
+ status = "disabled";
+
+ hsi2c_1: i2c@10900000 {
+ compatible = "google,gs101-hsi2c",
+ "samsung,exynosautov9-hsi2c";
+ reg = <0x10900000 0xc0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_0>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_0>;
+ clock-names = "hsi2c", "hsi2c_pclk";
+ interrupts = <GIC_SPI 635 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&hsi2c1_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_1: serial@10900000 {
+ compatible = "google,gs101-uart";
+ reg = <0x10900000 0xc0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_0>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_0>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 635 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&uart1_bus_single>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_1: spi@10900000 {
+ compatible = "google,gs101-spi";
+ reg = <0x10900000 0x30>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_0>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_0>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 635 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&spi1_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
+ usi2: usi@109100c0 {
+ compatible = "google,gs101-usi", "samsung,exynos850-usi";
+ reg = <0x109100c0 0x20>;
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_1>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_1>;
+ clock-names = "pclk", "ipclk";
+ samsung,sysreg = <&sysreg_peric0 0x1004>;
+ status = "disabled";
+
+ hsi2c_2: i2c@10910000 {
+ compatible = "google,gs101-hsi2c",
+ "samsung,exynosautov9-hsi2c";
+ reg = <0x10910000 0xc0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_1>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_1>;
+ clock-names = "hsi2c", "hsi2c_pclk";
+ interrupts = <GIC_SPI 636 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&hsi2c2_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_2: serial@10910000 {
+ compatible = "google,gs101-uart";
+ reg = <0x10910000 0xc0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_1>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_1>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 636 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&uart2_bus_single>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_2: spi@10910000 {
+ compatible = "google,gs101-spi";
+ reg = <0x10910000 0x30>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_1>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_1>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 636 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&spi2_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
+ usi3: usi@109200c0 {
+ compatible = "google,gs101-usi", "samsung,exynos850-usi";
+ reg = <0x109200c0 0x20>;
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_2>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_2>;
+ clock-names = "pclk", "ipclk";
+ samsung,sysreg = <&sysreg_peric0 0x1008>;
+ status = "disabled";
+
+ hsi2c_3: i2c@10920000 {
+ compatible = "google,gs101-hsi2c",
+ "samsung,exynosautov9-hsi2c";
+ reg = <0x10920000 0xc0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_2>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_2>;
+ clock-names = "hsi2c", "hsi2c_pclk";
+ interrupts = <GIC_SPI 637 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&hsi2c3_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_3: serial@10920000 {
+ compatible = "google,gs101-uart";
+ reg = <0x10920000 0xc0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_2>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_2>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 637 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&uart3_bus_single>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_3: spi@10920000 {
+ compatible = "google,gs101-spi";
+ reg = <0x10920000 0x30>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_2>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_2>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 637 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&spi3_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
+ usi4: usi@109300c0 {
+ compatible = "google,gs101-usi", "samsung,exynos850-usi";
+ reg = <0x109300c0 0x20>;
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_3>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_3>;
+ clock-names = "pclk", "ipclk";
+ samsung,sysreg = <&sysreg_peric0 0x100c>;
+ status = "disabled";
+
+ hsi2c_4: i2c@10930000 {
+ compatible = "google,gs101-hsi2c",
+ "samsung,exynosautov9-hsi2c";
+ reg = <0x10930000 0xc0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_3>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_3>;
+ clock-names = "hsi2c", "hsi2c_pclk";
+ interrupts = <GIC_SPI 638 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&hsi2c4_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_4: serial@10930000 {
+ compatible = "google,gs101-uart";
+ reg = <0x10930000 0xc0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_3>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_3>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 638 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&uart4_bus_single>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_4: spi@10930000 {
+ compatible = "google,gs101-spi";
+ reg = <0x10930000 0x30>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_3>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_3>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 638 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&spi4_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
+ usi5: usi@109400c0 {
+ compatible = "google,gs101-usi", "samsung,exynos850-usi";
+ reg = <0x109400c0 0x20>;
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_4>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_4>;
+ clock-names = "pclk", "ipclk";
+ samsung,sysreg = <&sysreg_peric0 0x1010>;
+ status = "disabled";
+
+ hsi2c_5: i2c@10940000 {
+ compatible = "google,gs101-hsi2c",
+ "samsung,exynosautov9-hsi2c";
+ reg = <0x10940000 0xc0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_4>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_4>;
+ clock-names = "hsi2c", "hsi2c_pclk";
+ interrupts = <GIC_SPI 639 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&hsi2c5_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_5: serial@10940000 {
+ compatible = "google,gs101-uart";
+ reg = <0x10940000 0xc0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_4>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_4>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 639 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&uart5_bus_single>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_5: spi@10940000 {
+ compatible = "google,gs101-spi";
+ reg = <0x10940000 0x30>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_4>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_4>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 639 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&spi5_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
+ usi6: usi@109500c0 {
+ compatible = "google,gs101-usi", "samsung,exynos850-usi";
+ reg = <0x109500c0 0x20>;
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_5>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_5>;
+ clock-names = "pclk", "ipclk";
+ samsung,sysreg = <&sysreg_peric0 0x1014>;
+ status = "disabled";
+
+ hsi2c_6: i2c@10950000 {
+ compatible = "google,gs101-hsi2c",
+ "samsung,exynosautov9-hsi2c";
+ reg = <0x10950000 0xc0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_5>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_5>;
+ clock-names = "hsi2c", "hsi2c_pclk";
+ interrupts = <GIC_SPI 640 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&hsi2c6_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_6: serial@10950000 {
+ compatible = "google,gs101-uart";
+ reg = <0x10950000 0xc0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_5>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_5>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 640 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&uart6_bus_single>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_6: spi@10950000 {
+ compatible = "google,gs101-spi";
+ reg = <0x10950000 0x30>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_5>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_5>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 640 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&spi6_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
+ usi7: usi@109600c0 {
+ compatible = "google,gs101-usi", "samsung,exynos850-usi";
+ reg = <0x109600c0 0x20>;
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_6>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_6>;
+ clock-names = "pclk", "ipclk";
+ samsung,sysreg = <&sysreg_peric0 0x1018>;
+ status = "disabled";
+
+ hsi2c_7: i2c@10960000 {
+ compatible = "google,gs101-hsi2c",
+ "samsung,exynosautov9-hsi2c";
+ reg = <0x10960000 0xc0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_6>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_6>;
+ clock-names = "hsi2c", "hsi2c_pclk";
+ interrupts = <GIC_SPI 641 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&hsi2c7_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_7: serial@10960000 {
+ compatible = "google,gs101-uart";
+ reg = <0x10960000 0xc0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_6>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_6>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 641 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&uart7_bus_single>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_7: spi@10960000 {
+ compatible = "google,gs101-spi";
+ reg = <0x10960000 0x30>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_6>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_6>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 641 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&spi7_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
usi8: usi@109700c0 {
- compatible = "google,gs101-usi",
- "samsung,exynos850-usi";
+ compatible = "google,gs101-usi", "samsung,exynos850-usi";
reg = <0x109700c0 0x20>;
ranges;
#address-cells = <1>;
@@ -393,18 +779,44 @@
interrupts = <GIC_SPI 642 IRQ_TYPE_LEVEL_HIGH 0>;
#address-cells = <1>;
#size-cells = <0>;
- pinctrl-names = "default";
- pinctrl-0 = <&hsi2c8_bus>;
clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_7>,
<&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_7>;
clock-names = "hsi2c", "hsi2c_pclk";
+ pinctrl-0 = <&hsi2c8_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_8: serial@10970000 {
+ compatible = "google,gs101-uart";
+ reg = <0x10970000 0xc0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_7>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_7>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 642 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&uart8_bus_single>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_8: spi@10970000 {
+ compatible = "google,gs101-spi";
+ reg = <0x10970000 0x30>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_7>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_7>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 642 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&spi8_bus>;
+ pinctrl-names = "default";
status = "disabled";
};
};
usi_uart: usi@10a000c0 {
- compatible = "google,gs101-usi",
- "samsung,exynos850-usi";
+ compatible = "google,gs101-usi", "samsung,exynos850-usi";
reg = <0x10a000c0 0x20>;
ranges;
#address-cells = <1>;
@@ -419,16 +831,72 @@
serial_0: serial@10a00000 {
compatible = "google,gs101-uart";
reg = <0x10a00000 0xc0>;
- interrupts = <GIC_SPI 634
- IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupts = <GIC_SPI 634 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP1_PCLK_0>,
<&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP1_IPCLK_0>;
clock-names = "uart", "clk_uart_baud0";
+ pinctrl-0 = <&uart0_bus>;
+ pinctrl-names = "default";
samsung,uart-fifosize = <256>;
status = "disabled";
};
};
+ usi14: usi@10a200c0 {
+ compatible = "google,gs101-usi", "samsung,exynos850-usi";
+ reg = <0x10a200c0 0x20>;
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP1_PCLK_2>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP1_IPCLK_2>;
+ clock-names = "pclk", "ipclk";
+ samsung,sysreg = <&sysreg_peric0 0x1028>;
+ status = "disabled";
+
+ hsi2c_14: i2c@10a20000 {
+ compatible = "google,gs101-hsi2c",
+ "samsung,exynosautov9-hsi2c";
+ reg = <0x10a20000 0xc0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP1_IPCLK_2>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP1_PCLK_2>;
+ clock-names = "hsi2c", "hsi2c_pclk";
+ interrupts = <GIC_SPI 643 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&hsi2c14_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_14: serial@10a20000 {
+ compatible = "google,gs101-uart";
+ reg = <0x10a20000 0xc0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP1_PCLK_2>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP1_IPCLK_2>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 643 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&uart14_bus_single>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_14: spi@10a20000 {
+ compatible = "google,gs101-spi";
+ reg = <0x10a20000 0x30>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP1_PCLK_2>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP1_IPCLK_2>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 643 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&spi14_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
cmu_peric1: clock-controller@10c00000 {
compatible = "google,gs101-cmu-peric1";
reg = <0x10c00000 0x4000>;
@@ -448,12 +916,233 @@
pinctrl_peric1: pinctrl@10c40000 {
compatible = "google,gs101-pinctrl";
reg = <0x10c40000 0x00001000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_GPIO_PERIC1_PCLK>;
+ clock-names = "pclk";
interrupts = <GIC_SPI 644 IRQ_TYPE_LEVEL_HIGH 0>;
};
+ usi0: usi@10d100c0 {
+ compatible = "google,gs101-usi", "samsung,exynos850-usi";
+ reg = <0x10d100c0 0x20>;
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_1>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_1>;
+ clock-names = "pclk", "ipclk";
+ samsung,sysreg = <&sysreg_peric1 0x1000>;
+ status = "disabled";
+
+ hsi2c_0: i2c@10d10000 {
+ compatible = "google,gs101-hsi2c",
+ "samsung,exynosautov9-hsi2c";
+ reg = <0x10d10000 0xc0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_1>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_1>;
+ clock-names = "hsi2c", "hsi2c_pclk";
+ interrupts = <GIC_SPI 651 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&hsi2c0_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_usi0: serial@10d10000 {
+ compatible = "google,gs101-uart";
+ reg = <0x10d10000 0xc0>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_1>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_1>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 651 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&uart0_bus_single>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_0: spi@10d10000 {
+ compatible = "google,gs101-spi";
+ reg = <0x10d10000 0x30>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_1>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_1>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 651 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&spi0_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
+ usi9: usi@10d200c0 {
+ compatible = "google,gs101-usi", "samsung,exynos850-usi";
+ reg = <0x10d200c0 0x20>;
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_2>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_2>;
+ clock-names = "pclk", "ipclk";
+ samsung,sysreg = <&sysreg_peric1 0x1004>;
+ status = "disabled";
+
+ hsi2c_9: i2c@10d20000 {
+ compatible = "google,gs101-hsi2c",
+ "samsung,exynosautov9-hsi2c";
+ reg = <0x10d20000 0xc0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_2>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_2>;
+ clock-names = "hsi2c", "hsi2c_pclk";
+ interrupts = <GIC_SPI 652 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&hsi2c9_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_9: serial@10d20000 {
+ compatible = "google,gs101-uart";
+ reg = <0x10d20000 0xc0>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_2>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_2>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 652 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&uart9_bus_single>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_9: spi@10d20000 {
+ compatible = "google,gs101-spi";
+ reg = <0x10d20000 0x30>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_2>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_2>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 652 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&spi9_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
+ usi10: usi@10d300c0 {
+ compatible = "google,gs101-usi", "samsung,exynos850-usi";
+ reg = <0x10d300c0 0x20>;
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_3>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_3>;
+ clock-names = "pclk", "ipclk";
+ samsung,sysreg = <&sysreg_peric1 0x1008>;
+ status = "disabled";
+
+ hsi2c_10: i2c@10d30000 {
+ compatible = "google,gs101-hsi2c",
+ "samsung,exynosautov9-hsi2c";
+ reg = <0x10d30000 0xc0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_3>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_3>;
+ clock-names = "hsi2c", "hsi2c_pclk";
+ interrupts = <GIC_SPI 653 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&hsi2c10_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_10: serial@10d30000 {
+ compatible = "google,gs101-uart";
+ reg = <0x10d30000 0xc0>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_3>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_3>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 653 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&uart10_bus_single>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_10: spi@10d30000 {
+ compatible = "google,gs101-spi";
+ reg = <0x10d30000 0x30>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_3>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_3>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 653 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&spi10_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
+ usi11: usi@10d400c0 {
+ compatible = "google,gs101-usi", "samsung,exynos850-usi";
+ reg = <0x10d400c0 0x20>;
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_4>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_4>;
+ clock-names = "pclk", "ipclk";
+ samsung,sysreg = <&sysreg_peric1 0x100c>;
+ status = "disabled";
+
+ hsi2c_11: i2c@10d40000 {
+ compatible = "google,gs101-hsi2c",
+ "samsung,exynosautov9-hsi2c";
+ reg = <0x10d40000 0xc0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_4>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_4>;
+ clock-names = "hsi2c", "hsi2c_pclk";
+ interrupts = <GIC_SPI 654 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&hsi2c11_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_11: serial@10d40000 {
+ compatible = "google,gs101-uart";
+ reg = <0x10d40000 0xc0>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_4>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_4>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 654 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&uart11_bus_single>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_11: spi@10d40000 {
+ compatible = "google,gs101-spi";
+ reg = <0x10d40000 0x30>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_4>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_4>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 654 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&spi11_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
usi12: usi@10d500c0 {
- compatible = "google,gs101-usi",
- "samsung,exynos850-usi";
+ compatible = "google,gs101-usi", "samsung,exynos850-usi";
reg = <0x10d500c0 0x20>;
ranges;
#address-cells = <1>;
@@ -471,11 +1160,148 @@
interrupts = <GIC_SPI 655 IRQ_TYPE_LEVEL_HIGH 0>;
#address-cells = <1>;
#size-cells = <0>;
- pinctrl-0 = <&hsi2c12_bus>;
- pinctrl-names = "default";
clocks = <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_5>,
<&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_5>;
clock-names = "hsi2c", "hsi2c_pclk";
+ pinctrl-0 = <&hsi2c12_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_12: serial@10d50000 {
+ compatible = "google,gs101-uart";
+ reg = <0x10d50000 0xc0>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_5>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_5>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 655 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&uart12_bus_single>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_12: spi@10d50000 {
+ compatible = "google,gs101-spi";
+ reg = <0x10d50000 0x30>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_5>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_5>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 655 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&spi12_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
+ usi13: usi@10d600c0 {
+ compatible = "google,gs101-usi", "samsung,exynos850-usi";
+ reg = <0x10d600c0 0x20>;
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_6>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_6>;
+ clock-names = "pclk", "ipclk";
+ samsung,sysreg = <&sysreg_peric1 0x1014>;
+ status = "disabled";
+
+ hsi2c_13: i2c@10d60000 {
+ compatible = "google,gs101-hsi2c",
+ "samsung,exynosautov9-hsi2c";
+ reg = <0x10d60000 0xc0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_6>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_6>;
+ clock-names = "hsi2c", "hsi2c_pclk";
+ interrupts = <GIC_SPI 656 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&hsi2c13_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_13: serial@10d60000 {
+ compatible = "google,gs101-uart";
+ reg = <0x10d60000 0xc0>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_6>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_6>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 656 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&uart13_bus_single>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_13: spi@10d60000 {
+ compatible = "google,gs101-spi";
+ reg = <0x10d60000 0x30>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_6>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_6>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 656 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&spi13_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
+ cmu_hsi0: clock-controller@11000000 {
+ compatible = "google,gs101-cmu-hsi0";
+ reg = <0x11000000 0x4000>;
+ #clock-cells = <1>;
+
+ clocks = <&ext_24_5m>,
+ <&cmu_top CLK_DOUT_CMU_HSI0_BUS>,
+ <&cmu_top CLK_DOUT_CMU_HSI0_DPGTC>,
+ <&cmu_top CLK_DOUT_CMU_HSI0_USB31DRD>,
+ <&cmu_top CLK_DOUT_CMU_HSI0_USBDPDBG>;
+ clock-names = "oscclk", "bus", "dpgtc", "usb31drd",
+ "usbdpdbg";
+ };
+
+ usbdrd31_phy: phy@11100000 {
+ compatible = "google,gs101-usb31drd-phy";
+ reg = <0x11100000 0x0100>,
+ <0x110f0000 0x0800>,
+ <0x110e0000 0x2800>;
+ reg-names = "phy", "pcs", "pma";
+ clocks = <&cmu_hsi0 CLK_GOUT_HSI0_USB31DRD_ACLK_PHYCTRL>,
+ <&cmu_hsi0 CLK_GOUT_HSI0_USB31DRD_I_USB20_PHY_REFCLK_26>,
+ <&cmu_hsi0 CLK_GOUT_HSI0_UASC_HSI0_CTRL_ACLK>,
+ <&cmu_hsi0 CLK_GOUT_HSI0_UASC_HSI0_CTRL_PCLK>,
+ <&cmu_hsi0 CLK_GOUT_HSI0_USB31DRD_I_USBDPPHY_SCL_APB_PCLK>;
+ clock-names = "phy", "ref", "ctrl_aclk", "ctrl_pclk", "scl_pclk";
+ samsung,pmu-syscon = <&pmu_system_controller>;
+ #phy-cells = <1>;
+ status = "disabled";
+ };
+
+ usbdrd31: usb@11110000 {
+ compatible = "google,gs101-dwusb3";
+ clocks = <&cmu_hsi0 CLK_GOUT_HSI0_USB31DRD_BUS_CLK_EARLY>,
+ <&cmu_hsi0 CLK_GOUT_HSI0_USB31DRD_I_USB31DRD_SUSPEND_CLK_26>,
+ <&cmu_hsi0 CLK_GOUT_HSI0_UASC_HSI0_LINK_ACLK>,
+ <&cmu_hsi0 CLK_GOUT_HSI0_UASC_HSI0_LINK_PCLK>;
+ clock-names = "bus_early", "susp_clk", "link_aclk", "link_pclk";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x11110000 0x10000>;
+ status = "disabled";
+
+ usbdrd31_dwc3: usb@0 {
+ compatible = "snps,dwc3";
+ clocks = <&cmu_hsi0 CLK_GOUT_HSI0_USB31DRD_I_USB31DRD_REF_CLK_40>;
+ clock-names = "ref";
+ reg = <0x0 0x10000>;
+ interrupts = <GIC_SPI 463 IRQ_TYPE_LEVEL_HIGH 0>;
+ phys = <&usbdrd31_phy 0>, <&usbdrd31_phy 1>;
+ phy-names = "usb2-phy", "usb3-phy";
status = "disabled";
};
};
@@ -483,15 +1309,74 @@
pinctrl_hsi1: pinctrl@11840000 {
compatible = "google,gs101-pinctrl";
reg = <0x11840000 0x00001000>;
+ /* TODO: update once support for this CMU exists */
+ clocks = <0>;
+ clock-names = "pclk";
interrupts = <GIC_SPI 471 IRQ_TYPE_LEVEL_HIGH 0>;
};
+ cmu_hsi2: clock-controller@14400000 {
+ compatible = "google,gs101-cmu-hsi2";
+ reg = <0x14400000 0x4000>;
+ #clock-cells = <1>;
+ clocks = <&ext_24_5m>,
+ <&cmu_top CLK_DOUT_CMU_HSI2_BUS>,
+ <&cmu_top CLK_DOUT_CMU_HSI2_PCIE>,
+ <&cmu_top CLK_DOUT_CMU_HSI2_UFS_EMBD>,
+ <&cmu_top CLK_DOUT_CMU_HSI2_MMC_CARD>;
+ clock-names = "oscclk", "bus", "pcie", "ufs", "mmc";
+ };
+
+ sysreg_hsi2: syscon@14420000 {
+ compatible = "google,gs101-hsi2-sysreg", "syscon";
+ reg = <0x14420000 0x10000>;
+ clocks = <&cmu_hsi2 CLK_GOUT_HSI2_SYSREG_HSI2_PCLK>;
+ };
+
pinctrl_hsi2: pinctrl@14440000 {
compatible = "google,gs101-pinctrl";
reg = <0x14440000 0x00001000>;
+ clocks = <&cmu_hsi2 CLK_GOUT_HSI2_GPIO_HSI2_PCLK>;
+ clock-names = "pclk";
interrupts = <GIC_SPI 503 IRQ_TYPE_LEVEL_HIGH 0>;
};
+ ufs_0: ufs@14700000 {
+ compatible = "google,gs101-ufs";
+ reg = <0x14700000 0x200>,
+ <0x14701100 0x200>,
+ <0x14780000 0xa000>,
+ <0x14600000 0x100>;
+ reg-names = "hci", "vs_hci", "unipro", "ufsp";
+ interrupts = <GIC_SPI 532 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cmu_hsi2 CLK_GOUT_HSI2_UFS_EMBD_I_ACLK>,
+ <&cmu_hsi2 CLK_GOUT_HSI2_UFS_EMBD_I_CLK_UNIPRO>,
+ <&cmu_hsi2 CLK_GOUT_HSI2_UFS_EMBD_I_FMP_CLK>,
+ <&cmu_hsi2 CLK_GOUT_HSI2_QE_UFS_EMBD_HSI2_ACLK>,
+ <&cmu_hsi2 CLK_GOUT_HSI2_QE_UFS_EMBD_HSI2_PCLK>,
+ <&cmu_hsi2 CLK_GOUT_HSI2_SYSREG_HSI2_PCLK>;
+ clock-names = "core_clk", "sclk_unipro_main", "fmp",
+ "aclk", "pclk", "sysreg";
+ freq-table-hz = <0 0>, <0 0>, <0 0>, <0 0>, <0 0>, <0 0>;
+ pinctrl-0 = <&ufs_rst_n &ufs_refclk_out>;
+ pinctrl-names = "default";
+ phys = <&ufs_0_phy>;
+ phy-names = "ufs-phy";
+ samsung,sysreg = <&sysreg_hsi2 0x710>;
+ status = "disabled";
+ };
+
+ ufs_0_phy: phy@14704000 {
+ compatible = "google,gs101-ufs-phy";
+ reg = <0x14704000 0x3000>;
+ reg-names = "phy-pma";
+ samsung,pmu-syscon = <&pmu_system_controller>;
+ #phy-cells = <0>;
+ clocks = <&ext_24_5m>;
+ clock-names = "ref_clk";
+ status = "disabled";
+ };
+
cmu_apm: clock-controller@17400000 {
compatible = "google,gs101-cmu-apm";
reg = <0x17400000 0x8000>;
@@ -514,6 +1399,8 @@
pinctrl_gpio_alive: pinctrl@174d0000 {
compatible = "google,gs101-pinctrl";
reg = <0x174d0000 0x00001000>;
+ clocks = <&cmu_apm CLK_GOUT_APM_APBIF_GPIO_ALIVE_PCLK>;
+ clock-names = "pclk";
wakeup-interrupt-controller {
compatible = "google,gs101-wakeup-eint",
@@ -525,6 +1412,8 @@
pinctrl_far_alive: pinctrl@174e0000 {
compatible = "google,gs101-pinctrl";
reg = <0x174e0000 0x00001000>;
+ clocks = <&cmu_apm CLK_GOUT_APM_APBIF_GPIO_FAR_ALIVE_PCLK>;
+ clock-names = "pclk";
wakeup-interrupt-controller {
compatible = "google,gs101-wakeup-eint",
@@ -536,11 +1425,17 @@
pinctrl_gsactrl: pinctrl@17940000 {
compatible = "google,gs101-pinctrl";
reg = <0x17940000 0x00001000>;
+ /* TODO: update once support for this CMU exists */
+ clocks = <0>;
+ clock-names = "pclk";
};
pinctrl_gsacore: pinctrl@17a80000 {
compatible = "google,gs101-pinctrl";
reg = <0x17a80000 0x00001000>;
+ /* TODO: update once support for this CMU exists */
+ clocks = <0>;
+ clock-names = "pclk";
};
cmu_top: clock-controller@1e080000 {
diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
index 045250d0a040..bd443c2bc5a4 100644
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
@@ -9,6 +9,7 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-kontron-kbox-a-230-ls.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-kontron-sl28.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-kontron-sl28-var1.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-kontron-sl28-var2.dtb
+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-kontron-sl28-var3.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-kontron-sl28-var3-ads2.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-kontron-sl28-var4.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-qds.dtb
@@ -98,6 +99,10 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-tqmlx2160a-mblx2160a-14-11-x.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-tqmlx2160a-mblx2160a-14-8-x.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-tqmlx2160a-mblx2160a-14-7-x.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8dx-colibri-aster.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8dx-colibri-eval-v3.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8dx-colibri-iris-v2.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8dx-colibri-iris.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8dxl-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8dxp-tqma8xdp-mba8xx.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-beacon-kit.dtb
@@ -166,6 +171,7 @@ dtb-$(CONFIG_ARCH_MXC) += imx8mp-dhcom-pdk3.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-icore-mx8mp-edimm2.2.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-msc-sm2s-ep1.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-navqp.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-phyboard-pollux-rdk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-skov-revb-hdmi.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-skov-revb-lt6.dtb
@@ -259,4 +265,5 @@ dtb-$(CONFIG_ARCH_MXC) += imx8mp-venice-gw74xx-rpidsi.dtb
dtb-$(CONFIG_ARCH_S32) += s32g274a-evb.dtb
dtb-$(CONFIG_ARCH_S32) += s32g274a-rdb2.dtb
+dtb-$(CONFIG_ARCH_S32) += s32g399a-rdb3.dtb
dtb-$(CONFIG_ARCH_S32) += s32v234-evb.dtb
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
index fe9093b3c02e..a0f7bbd691a0 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
@@ -81,7 +81,7 @@
};
pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a53-pmu";
interrupts = <0 106 IRQ_TYPE_LEVEL_HIGH>;
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3-ads2.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3-ads2.dts
index ed4e69e87e30..195bdbafdf7c 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3-ads2.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3-ads2.dts
@@ -10,7 +10,7 @@
/dts-v1/;
#include <dt-bindings/clock/fsl,qoriq-clockgen.h>
-#include "fsl-ls1028a-kontron-sl28.dts"
+#include "fsl-ls1028a-kontron-sl28-var3.dts"
/ {
model = "Kontron SMARC-sAL28 (Single PHY) on SMARC Eval 2.0 carrier";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3.dts
new file mode 100644
index 000000000000..08851ca407a8
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3.dts
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Device Tree file for the Kontron SMARC-sAL28 board.
+ *
+ * This is for the network variant 3 which has one ethernet ports.
+ *
+ * Copyright (C) 2024 Michael Walle <michael@walle.cc>
+ *
+ */
+
+/dts-v1/;
+
+#include "fsl-ls1028a-kontron-sl28.dts"
+
+/ {
+ model = "Kontron SMARC-sAL28 (Single PHY)";
+ compatible = "kontron,sl28-var3", "kontron,sl28", "fsl,ls1028a";
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
index ae534c23b970..70b8731029c4 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
@@ -1099,21 +1099,25 @@
0xc2000000 0x1 0xf8230000 0x1 0xf8230000 0x0 0x020000
/* BAR4 (PF5) - non-prefetchable memory */
0x82000000 0x1 0xfc000000 0x1 0xfc000000 0x0 0x400000>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 2 &gic 0 0 GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
enetc_port0: ethernet@0,0 {
- compatible = "fsl,enetc";
+ compatible = "pci1957,e100", "fsl,enetc";
reg = <0x000000 0 0 0 0>;
status = "disabled";
};
enetc_port1: ethernet@0,1 {
- compatible = "fsl,enetc";
+ compatible = "pci1957,e100", "fsl,enetc";
reg = <0x000100 0 0 0 0>;
status = "disabled";
};
enetc_port2: ethernet@0,2 {
- compatible = "fsl,enetc";
+ compatible = "pci1957,e100", "fsl,enetc";
reg = <0x000200 0 0 0 0>;
phy-mode = "internal";
status = "disabled";
@@ -1126,14 +1130,14 @@
};
enetc_mdio_pf3: mdio@0,3 {
- compatible = "fsl,enetc-mdio";
+ compatible = "pci1957,ee01", "fsl,enetc-mdio";
reg = <0x000300 0 0 0 0>;
#address-cells = <1>;
#size-cells = <0>;
};
ethernet@0,4 {
- compatible = "fsl,enetc-ptp";
+ compatible = "pci1957,ee02", "fsl,enetc-ptp";
reg = <0x000400 0 0 0 0>;
clocks = <&clockgen QORIQ_CLK_HWACCEL 3>;
little-endian;
@@ -1143,7 +1147,7 @@
mscc_felix: ethernet-switch@0,5 {
reg = <0x000500 0 0 0 0>;
/* IEP INT_B */
- interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <2>;
status = "disabled";
mscc_felix_ports: ports {
@@ -1201,7 +1205,7 @@
};
enetc_port3: ethernet@0,6 {
- compatible = "fsl,enetc";
+ compatible = "pci1957,e100", "fsl,enetc";
reg = <0x000600 0 0 0 0>;
phy-mode = "internal";
status = "disabled";
@@ -1216,7 +1220,7 @@
rcec@1f,0 {
reg = <0x00f800 0 0 0 0>;
/* IEP INT_A */
- interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <1>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index d333b773bc45..8ee6d8c0ef61 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -276,7 +276,7 @@
};
pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a53-pmu";
interrupts = <0 106 0x4>,
<0 107 0x4>,
<0 95 0x4>,
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
index 1aa38ed09aa4..8352197cea6f 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
@@ -12,6 +12,13 @@
#include <dt-bindings/clock/fsl,qoriq-clockgen.h>
#include "fsl-ls208xa.dtsi"
+/ {
+ pmu {
+ compatible = "arm,cortex-a57-pmu";
+ interrupts = <1 7 0x8>; /* PMU PPI, Level low type */
+ };
+};
+
&cpu {
cpu0: cpu@0 {
device_type = "cpu";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi
index 8581ea55d254..245bbd615c81 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi
@@ -12,6 +12,13 @@
#include <dt-bindings/clock/fsl,qoriq-clockgen.h>
#include "fsl-ls208xa.dtsi"
+/ {
+ pmu {
+ compatible = "arm,cortex-a72-pmu";
+ interrupts = <1 7 0x8>; /* PMU PPI, Level low type */
+ };
+};
+
&cpu {
cpu0: cpu@0 {
device_type = "cpu";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
index 0b7292835906..ccba0a135b24 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
@@ -247,11 +247,6 @@
<1 10 4>; /* Hypervisor PPI, active-low */
};
- pmu {
- compatible = "arm,armv8-pmuv3";
- interrupts = <1 7 0x8>; /* PMU PPI, Level low type */
- };
-
psci {
compatible = "arm,psci-0.2";
method = "smc";
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
index e665c629e1a1..96055593204a 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
@@ -748,7 +748,10 @@
clock-names = "i2c";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(16)>;
- scl-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&i2c0_scl>;
+ pinctrl-1 = <&i2c0_scl_gpio>;
+ scl-gpios = <&gpio0 3 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "disabled";
};
@@ -761,6 +764,10 @@
clock-names = "i2c";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(16)>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&i2c1_scl>;
+ pinctrl-1 = <&i2c1_scl_gpio>;
+ scl-gpios = <&gpio0 31 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "disabled";
};
@@ -773,6 +780,10 @@
clock-names = "i2c";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(16)>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&i2c2_scl>;
+ pinctrl-1 = <&i2c2_scl_gpio>;
+ scl-gpios = <&gpio0 29 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "disabled";
};
@@ -785,6 +796,10 @@
clock-names = "i2c";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(16)>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&i2c3_scl>;
+ pinctrl-1 = <&i2c3_scl_gpio>;
+ scl-gpios = <&gpio0 27 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "disabled";
};
@@ -797,7 +812,10 @@
clock-names = "i2c";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(16)>;
- scl-gpios = <&gpio2 16 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&i2c4_scl>;
+ pinctrl-1 = <&i2c4_scl_gpio>;
+ scl-gpios = <&gpio0 25 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "disabled";
};
@@ -810,6 +828,10 @@
clock-names = "i2c";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(16)>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&i2c5_scl>;
+ pinctrl-1 = <&i2c5_scl_gpio>;
+ scl-gpios = <&gpio0 23 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "disabled";
};
@@ -822,6 +844,10 @@
clock-names = "i2c";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(16)>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&i2c6_scl>;
+ pinctrl-1 = <&i2c6_scl_gpio>;
+ scl-gpios = <&gpio1 16 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "disabled";
};
@@ -834,6 +860,10 @@
clock-names = "i2c";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(16)>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&i2c7_scl>;
+ pinctrl-1 = <&i2c7_scl_gpio>;
+ scl-gpios = <&gpio1 18 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "disabled";
};
@@ -1669,6 +1699,80 @@
};
};
+ pinmux_i2crv: pinmux@70010012c {
+ compatible = "pinctrl-single";
+ reg = <0x00000007 0x0010012c 0x0 0xc>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ pinctrl-single,bit-per-mux;
+ pinctrl-single,register-width = <32>;
+ pinctrl-single,function-mask = <0x7>;
+
+ i2c1_scl: i2c1-scl-pins {
+ pinctrl-single,bits = <0x0 0 0x7>;
+ };
+
+ i2c1_scl_gpio: i2c1-scl-gpio-pins {
+ pinctrl-single,bits = <0x0 0x1 0x7>;
+ };
+
+ i2c2_scl: i2c2-scl-pins {
+ pinctrl-single,bits = <0x0 0 (0x7 << 3)>;
+ };
+
+ i2c2_scl_gpio: i2c2-scl-gpio-pins {
+ pinctrl-single,bits = <0x0 (0x1 << 3) (0x7 << 3)>;
+ };
+
+ i2c3_scl: i2c3-scl-pins {
+ pinctrl-single,bits = <0x0 0 (0x7 << 6)>;
+ };
+
+ i2c3_scl_gpio: i2c3-scl-gpio-pins {
+ pinctrl-single,bits = <0x0 (0x1 << 6) (0x7 << 6)>;
+ };
+
+ i2c4_scl: i2c4-scl-pins {
+ pinctrl-single,bits = <0x0 0 (0x7 << 9)>;
+ };
+
+ i2c4_scl_gpio: i2c4-scl-gpio-pins {
+ pinctrl-single,bits = <0x0 (0x1 << 9) (0x7 << 9)>;
+ };
+
+ i2c5_scl: i2c5-scl-pins {
+ pinctrl-single,bits = <0x0 0 (0x7 << 12)>;
+ };
+
+ i2c5_scl_gpio: i2c5-scl-gpio-pins {
+ pinctrl-single,bits = <0x0 (0x1 << 12) (0x7 << 12)>;
+ };
+
+ i2c6_scl: i2c6-scl-pins {
+ pinctrl-single,bits = <0x4 0x2 0x7>;
+ };
+
+ i2c6_scl_gpio: i2c6-scl-gpio-pins {
+ pinctrl-single,bits = <0x4 0x1 0x7>;
+ };
+
+ i2c7_scl: i2c7-scl-pins {
+ pinctrl-single,bits = <0x4 0x2 0x7>;
+ };
+
+ i2c7_scl_gpio: i2c7-scl-gpio-pins {
+ pinctrl-single,bits = <0x4 0x1 0x7>;
+ };
+
+ i2c0_scl: i2c0-scl-pins {
+ pinctrl-single,bits = <0x8 0 (0x7 << 10)>;
+ };
+
+ i2c0_scl_gpio: i2c0-scl-gpio-pins {
+ pinctrl-single,bits = <0x8 (0x1 << 10) (0x7 << 10)>;
+ };
+ };
+
fsl_mc: fsl-mc@80c000000 {
compatible = "fsl,qoriq-mc";
reg = <0x00000008 0x0c000000 0 0x40>,
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2162a-clearfog.dts b/arch/arm64/boot/dts/freescale/fsl-lx2162a-clearfog.dts
index 9f88583aa25e..eafef8718a0f 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2162a-clearfog.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2162a-clearfog.dts
@@ -25,6 +25,7 @@
i2c7 = &mpcie1_i2c;
i2c8 = &mpcie0_i2c;
i2c9 = &pcieclk_i2c;
+ i2c10 = &i2c5;
mmc0 = &esdhc0;
mmc1 = &esdhc1;
serial0 = &uart0;
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2162a-sr-som.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2162a-sr-som.dtsi
index 0580ea30cfbc..e914291e63a1 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2162a-sr-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2162a-sr-som.dtsi
@@ -71,3 +71,12 @@
reg = <0x54>;
};
};
+
+&i2c5 {
+ status = "okay";
+
+ rtc@6f {
+ compatible = "microchip,mcp7940x";
+ reg = <0x6f>;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-audio.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-audio.dtsi
index 07afeb78ed56..897cbb7b6742 100644
--- a/arch/arm64/boot/dts/freescale/imx8-ss-audio.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-audio.dtsi
@@ -6,6 +6,7 @@
#include <dt-bindings/clock/imx8-clock.h>
#include <dt-bindings/clock/imx8-lpcg.h>
+#include <dt-bindings/dma/fsl-edma.h>
#include <dt-bindings/firmware/imx/rsrc.h>
audio_ipg_clk: clock-audio-ipg {
@@ -119,13 +120,96 @@ audio_subsys: bus@59000000 {
#size-cells = <1>;
ranges = <0x59000000 0x0 0x59000000 0x1000000>;
+ asrc0: asrc@59000000 {
+ compatible = "fsl,imx8qm-asrc";
+ reg = <0x59000000 0x10000>;
+ interrupts = <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&asrc0_lpcg IMX_LPCG_CLK_0>,
+ <&asrc0_lpcg IMX_LPCG_CLK_0>,
+ <&aud_pll_div0_lpcg IMX_LPCG_CLK_4>,
+ <&aud_pll_div1_lpcg IMX_LPCG_CLK_4>,
+ <&acm IMX_ADMA_ACM_AUD_CLK0_SEL>,
+ <&acm IMX_ADMA_ACM_AUD_CLK1_SEL>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>;
+ clock-names = "mem", "ipg",
+ "asrck_0", "asrck_1", "asrck_2", "asrck_3",
+ "asrck_4", "asrck_5", "asrck_6", "asrck_7",
+ "asrck_8", "asrck_9", "asrck_a", "asrck_b",
+ "asrck_c", "asrck_d", "asrck_e", "asrck_f",
+ "spba";
+ dmas = <&edma0 0 0 0>,
+ <&edma0 1 0 0>,
+ <&edma0 2 0 0>,
+ <&edma0 3 0 FSL_EDMA_RX>,
+ <&edma0 4 0 FSL_EDMA_RX>,
+ <&edma0 5 0 FSL_EDMA_RX>;
+ /* tx* is output channel of asrc, it is rx channel for eDMA */
+ dma-names = "rxa", "rxb", "rxc", "txa", "txb", "txc";
+ fsl,asrc-rate = <8000>;
+ fsl,asrc-width = <16>;
+ fsl,asrc-clk-map = <0>;
+ power-domains = <&pd IMX_SC_R_ASRC_0>;
+ status = "disabled";
+ };
+
+ esai0: esai@59010000 {
+ compatible = "fsl,imx8qm-esai";
+ reg = <0x59010000 0x10000>;
+ interrupts = <GIC_SPI 409 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&esai0_lpcg IMX_LPCG_CLK_4>,
+ <&esai0_lpcg IMX_LPCG_CLK_0>,
+ <&esai0_lpcg IMX_LPCG_CLK_4>,
+ <&clk_dummy>;
+ clock-names = "core", "extal", "fsys", "spba";
+ dmas = <&edma0 6 0 FSL_EDMA_RX>, <&edma0 7 0 0>;
+ dma-names = "rx", "tx";
+ power-domains = <&pd IMX_SC_R_ESAI_0>;
+ status = "disabled";
+ };
+
+ spdif0: spdif@59020000 {
+ compatible = "fsl,imx8qm-spdif";
+ reg = <0x59020000 0x10000>;
+ interrupts = <GIC_SPI 456 IRQ_TYPE_LEVEL_HIGH>, /* rx */
+ <GIC_SPI 458 IRQ_TYPE_LEVEL_HIGH>; /* tx */
+ clocks = <&spdif0_lpcg IMX_LPCG_CLK_4>, /* core */
+ <&clk_dummy>, /* rxtx0 */
+ <&spdif0_lpcg IMX_LPCG_CLK_0>, /* rxtx1 */
+ <&clk_dummy>, /* rxtx2 */
+ <&clk_dummy>, /* rxtx3 */
+ <&clk_dummy>, /* rxtx4 */
+ <&audio_ipg_clk>, /* rxtx5 */
+ <&clk_dummy>, /* rxtx6 */
+ <&clk_dummy>, /* rxtx7 */
+ <&clk_dummy>; /* spba */
+ clock-names = "core", "rxtx0", "rxtx1", "rxtx2", "rxtx3", "rxtx4",
+ "rxtx5", "rxtx6", "rxtx7", "spba";
+ dmas = <&edma0 8 0 (FSL_EDMA_MULTI_FIFO | FSL_EDMA_RX)>,
+ <&edma0 9 0 FSL_EDMA_MULTI_FIFO>;
+ dma-names = "rx", "tx";
+ power-domains = <&pd IMX_SC_R_SPDIF_0>;
+ status = "disabled";
+ };
+
sai0: sai@59040000 {
compatible = "fsl,imx8qm-sai";
reg = <0x59040000 0x10000>;
interrupts = <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&sai0_lpcg 1>,
+ clocks = <&sai0_lpcg IMX_LPCG_CLK_4>,
<&clk_dummy>,
- <&sai0_lpcg 0>,
+ <&sai0_lpcg IMX_LPCG_CLK_0>,
<&clk_dummy>,
<&clk_dummy>;
clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3";
@@ -139,9 +223,9 @@ audio_subsys: bus@59000000 {
compatible = "fsl,imx8qm-sai";
reg = <0x59050000 0x10000>;
interrupts = <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&sai1_lpcg 1>,
+ clocks = <&sai1_lpcg IMX_LPCG_CLK_4>,
<&clk_dummy>,
- <&sai1_lpcg 0>,
+ <&sai1_lpcg IMX_LPCG_CLK_0>,
<&clk_dummy>,
<&clk_dummy>;
clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3";
@@ -155,9 +239,9 @@ audio_subsys: bus@59000000 {
compatible = "fsl,imx8qm-sai";
reg = <0x59060000 0x10000>;
interrupts = <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&sai2_lpcg 1>,
+ clocks = <&sai2_lpcg IMX_LPCG_CLK_4>,
<&clk_dummy>,
- <&sai2_lpcg 0>,
+ <&sai2_lpcg IMX_LPCG_CLK_0>,
<&clk_dummy>,
<&clk_dummy>;
clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3";
@@ -171,9 +255,9 @@ audio_subsys: bus@59000000 {
compatible = "fsl,imx8qm-sai";
reg = <0x59070000 0x10000>;
interrupts = <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&sai3_lpcg 1>,
+ clocks = <&sai3_lpcg IMX_LPCG_CLK_4>,
<&clk_dummy>,
- <&sai3_lpcg 0>,
+ <&sai3_lpcg IMX_LPCG_CLK_0>,
<&clk_dummy>,
<&clk_dummy>;
clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3";
@@ -239,6 +323,40 @@ audio_subsys: bus@59000000 {
<&pd IMX_SC_R_DMA_0_CH23>;
};
+ asrc0_lpcg: clock-controller@59400000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x59400000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&audio_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_4>;
+ clock-output-names = "asrc0_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_ASRC_0>;
+ };
+
+ esai0_lpcg: clock-controller@59410000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x59410000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&acm IMX_ADMA_ACM_ESAI0_MCLK_SEL>,
+ <&audio_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
+ clock-output-names = "esai0_lpcg_extal_clk",
+ "esai0_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_ESAI_0>;
+ };
+
+ spdif0_lpcg: clock-controller@59420000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x59420000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&acm IMX_ADMA_ACM_SPDIF0_TX_CLK_SEL>,
+ <&audio_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
+ clock-output-names = "spdif0_lpcg_tx_clk",
+ "spdif0_lpcg_gclkw";
+ power-domains = <&pd IMX_SC_R_SPDIF_0>;
+ };
+
sai0_lpcg: clock-controller@59440000 {
compatible = "fsl,imx8qxp-lpcg";
reg = <0x59440000 0x10000>;
@@ -333,6 +451,101 @@ audio_subsys: bus@59000000 {
status = "disabled";
};
+ asrc1: asrc@59800000 {
+ compatible = "fsl,imx8qm-asrc";
+ reg = <0x59800000 0x10000>;
+ interrupts = <GIC_SPI 380 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&asrc1_lpcg IMX_LPCG_CLK_4>,
+ <&asrc1_lpcg IMX_LPCG_CLK_4>,
+ <&aud_pll_div0_lpcg IMX_LPCG_CLK_0>,
+ <&aud_pll_div1_lpcg IMX_LPCG_CLK_0>,
+ <&acm IMX_ADMA_ACM_AUD_CLK0_SEL>,
+ <&acm IMX_ADMA_ACM_AUD_CLK1_SEL>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>;
+ clock-names = "mem", "ipg",
+ "asrck_0", "asrck_1", "asrck_2", "asrck_3",
+ "asrck_4", "asrck_5", "asrck_6", "asrck_7",
+ "asrck_8", "asrck_9", "asrck_a", "asrck_b",
+ "asrck_c", "asrck_d", "asrck_e", "asrck_f",
+ "spba";
+ dmas = <&edma1 0 0 0>,
+ <&edma1 1 0 0>,
+ <&edma1 2 0 0>,
+ <&edma1 3 0 FSL_EDMA_RX>,
+ <&edma1 4 0 FSL_EDMA_RX>,
+ <&edma1 5 0 FSL_EDMA_RX>;
+ /* tx* is output channel of asrc, it is rx channel for eDMA */
+ dma-names = "rxa", "rxb", "rxc", "txa", "txb", "txc";
+ fsl,asrc-rate = <8000>;
+ fsl,asrc-width = <16>;
+ fsl,asrc-clk-map = <1>;
+ power-domains = <&pd IMX_SC_R_ASRC_1>;
+ status = "disabled";
+ };
+
+ sai4: sai@59820000 {
+ compatible = "fsl,imx8qm-sai";
+ reg = <0x59820000 0x10000>;
+ interrupts = <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&sai4_lpcg IMX_LPCG_CLK_4>,
+ <&clk_dummy>,
+ <&sai4_lpcg IMX_LPCG_CLK_0>,
+ <&clk_dummy>,
+ <&clk_dummy>;
+ clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3";
+ dmas = <&edma1 8 0 FSL_EDMA_RX>, <&edma1 9 0 0>;
+ dma-names = "rx", "tx";
+ power-domains = <&pd IMX_SC_R_SAI_4>;
+ status = "disabled";
+ };
+
+ sai5: sai@59830000 {
+ compatible = "fsl,imx8qm-sai";
+ reg = <0x59830000 0x10000>;
+ interrupts = <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&sai5_lpcg IMX_LPCG_CLK_4>,
+ <&clk_dummy>,
+ <&sai5_lpcg IMX_LPCG_CLK_0>,
+ <&clk_dummy>,
+ <&clk_dummy>;
+ clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3";
+ dmas = <&edma1 10 0 0>;
+ dma-names = "tx";
+ power-domains = <&pd IMX_SC_R_SAI_5>;
+ status = "disabled";
+ };
+
+ amix: amix@59840000 {
+ compatible = "fsl,imx8qm-audmix";
+ reg = <0x59840000 0x10000>;
+ clocks = <&amix_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg";
+ power-domains = <&pd IMX_SC_R_AMIX>;
+ dais = <&sai4>, <&sai5>;
+ status = "disabled";
+ };
+
+ mqs: mqs@59850000 {
+ compatible = "fsl,imx8qm-mqs";
+ reg = <0x59850000 0x10000>;
+ clocks = <&mqs0_lpcg IMX_LPCG_CLK_4>, <&mqs0_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "mclk", "core";
+ power-domains = <&pd IMX_SC_R_MQS_0>;
+ status = "disabled";
+ };
+
edma1: dma-controller@599f0000 {
compatible = "fsl,imx8qm-edma";
reg = <0x599f0000 0xc0000>;
@@ -481,4 +694,60 @@ audio_subsys: bus@59000000 {
"sai3_rx_bclk",
"sai4_rx_bclk";
};
+
+ asrc1_lpcg: clock-controller@59c00000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x59c00000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&audio_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_4>;
+ clock-output-names = "asrc1_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_ASRC_1>;
+ };
+
+ sai4_lpcg: clock-controller@59c20000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x59c20000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&acm IMX_ADMA_ACM_SAI4_MCLK_SEL>,
+ <&audio_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
+ clock-output-names = "sai4_lpcg_mclk",
+ "sai4_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_SAI_4>;
+ };
+
+ sai5_lpcg: clock-controller@59c30000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x59c30000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&acm IMX_ADMA_ACM_SAI5_MCLK_SEL>,
+ <&audio_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
+ clock-output-names = "sai5_lpcg_mclk",
+ "sai5_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_SAI_5>;
+ };
+
+ amix_lpcg: clock-controller@59c40000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x59c40000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&audio_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>;
+ clock-output-names = "amix_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_AMIX>;
+ };
+
+ mqs0_lpcg: clock-controller@59c50000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x59c50000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&acm IMX_ADMA_ACM_MQS_TX_CLK_SEL>,
+ <&audio_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
+ clock-output-names = "mqs0_lpcg_mclk",
+ "mqs0_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_MQS_0>;
+ };
};
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-cm40.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-cm40.dtsi
new file mode 100644
index 000000000000..92752c0c5eb5
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-cm40.dtsi
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+#include <dt-bindings/firmware/imx/rsrc.h>
+
+cm40_ipg_clk: clock-cm40-ipg {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <132000000>;
+ clock-output-names = "cm40_ipg_clk";
+};
+
+cm40_subsys: bus@34000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x34000000 0x0 0x34000000 0x4000000>;
+ interrupt-parent = <&cm40_intmux>;
+
+ cm40_lpuart: serial@37220000 {
+ compatible = "fsl,imx8qxp-lpuart";
+ reg = <0x37220000 0x1000>;
+ interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cm40_uart_lpcg IMX_LPCG_CLK_1>, <&cm40_uart_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "baud";
+ assigned-clocks = <&clk IMX_SC_R_M4_0_UART IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+ power-domains = <&pd IMX_SC_R_M4_0_UART>;
+ status = "disabled";
+ };
+
+ cm40_i2c: i2c@37230000 {
+ compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x37230000 0x1000>;
+ interrupts = <9 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cm40_i2c_lpcg IMX_LPCG_CLK_0>,
+ <&cm40_i2c_lpcg IMX_LPCG_CLK_4>;
+ clock-names = "per", "ipg";
+ assigned-clocks = <&clk IMX_SC_R_M4_0_I2C IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+ power-domains = <&pd IMX_SC_R_M4_0_I2C>;
+ status = "disabled";
+ };
+
+ cm40_intmux: intmux@37400000 {
+ compatible = "fsl,imx-intmux";
+ reg = <0x37400000 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&cm40_ipg_clk>;
+ clock-names = "ipg";
+ power-domains = <&pd IMX_SC_R_M4_0_INTMUX>;
+ status = "disabled";
+ };
+
+ cm40_uart_lpcg: clock-controller@37620000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x37620000 0x1000>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_M4_0_UART IMX_SC_PM_CLK_PER>,
+ <&cm40_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_1>;
+ clock-output-names = "cm40_lpcg_uart_clk",
+ "cm40_lpcg_uart_ipg_clk";
+ power-domains = <&pd IMX_SC_R_M4_0_UART>;
+ };
+
+ cm40_i2c_lpcg: clock-controller@37630000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x37630000 0x1000>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_M4_0_I2C IMX_SC_PM_CLK_PER>,
+ <&cm40_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
+ clock-output-names = "cm40_lpcg_i2c_clk",
+ "cm40_lpcg_i2c_ipg_clk";
+ power-domains = <&pd IMX_SC_R_M4_0_I2C>;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
index 3c42240e78e2..4aaf5a0c1ed8 100644
--- a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
@@ -41,7 +41,7 @@ conn_subsys: bus@5b000000 {
interrupts = <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>;
fsl,usbphy = <&usbphy1>;
fsl,usbmisc = <&usbmisc1 0>;
- clocks = <&usb2_lpcg 0>;
+ clocks = <&usb2_lpcg IMX_LPCG_CLK_6>;
ahb-burst-config = <0x0>;
tx-burst-size-dword = <0x10>;
rx-burst-size-dword = <0x10>;
@@ -58,7 +58,7 @@ conn_subsys: bus@5b000000 {
usbphy1: usbphy@5b100000 {
compatible = "fsl,imx7ulp-usbphy";
reg = <0x5b100000 0x1000>;
- clocks = <&usb2_lpcg 1>;
+ clocks = <&usb2_lpcg IMX_LPCG_CLK_7>;
power-domains = <&pd IMX_SC_R_USB_0_PHY>;
status = "disabled";
};
@@ -67,8 +67,8 @@ conn_subsys: bus@5b000000 {
interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>;
reg = <0x5b010000 0x10000>;
clocks = <&sdhc0_lpcg IMX_LPCG_CLK_4>,
- <&sdhc0_lpcg IMX_LPCG_CLK_0>,
- <&sdhc0_lpcg IMX_LPCG_CLK_5>;
+ <&sdhc0_lpcg IMX_LPCG_CLK_5>,
+ <&sdhc0_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "ahb", "per";
power-domains = <&pd IMX_SC_R_SDHC_0>;
status = "disabled";
@@ -78,8 +78,8 @@ conn_subsys: bus@5b000000 {
interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>;
reg = <0x5b020000 0x10000>;
clocks = <&sdhc1_lpcg IMX_LPCG_CLK_4>,
- <&sdhc1_lpcg IMX_LPCG_CLK_0>,
- <&sdhc1_lpcg IMX_LPCG_CLK_5>;
+ <&sdhc1_lpcg IMX_LPCG_CLK_5>,
+ <&sdhc1_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "ahb", "per";
power-domains = <&pd IMX_SC_R_SDHC_1>;
fsl,tuning-start-tap = <20>;
@@ -91,8 +91,8 @@ conn_subsys: bus@5b000000 {
interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>;
reg = <0x5b030000 0x10000>;
clocks = <&sdhc2_lpcg IMX_LPCG_CLK_4>,
- <&sdhc2_lpcg IMX_LPCG_CLK_0>,
- <&sdhc2_lpcg IMX_LPCG_CLK_5>;
+ <&sdhc2_lpcg IMX_LPCG_CLK_5>,
+ <&sdhc2_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "ahb", "per";
power-domains = <&pd IMX_SC_R_SDHC_2>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi
index cab3468b1875..f7a91d43a0ff 100644
--- a/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi
@@ -28,8 +28,8 @@ dma_subsys: bus@5a000000 {
#size-cells = <0>;
interrupts = <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
- clocks = <&spi0_lpcg 0>,
- <&spi0_lpcg 1>;
+ clocks = <&spi0_lpcg IMX_LPCG_CLK_0>,
+ <&spi0_lpcg IMX_LPCG_CLK_4>;
clock-names = "per", "ipg";
assigned-clocks = <&clk IMX_SC_R_SPI_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <60000000>;
@@ -44,8 +44,8 @@ dma_subsys: bus@5a000000 {
#size-cells = <0>;
interrupts = <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
- clocks = <&spi1_lpcg 0>,
- <&spi1_lpcg 1>;
+ clocks = <&spi1_lpcg IMX_LPCG_CLK_0>,
+ <&spi1_lpcg IMX_LPCG_CLK_4>;
clock-names = "per", "ipg";
assigned-clocks = <&clk IMX_SC_R_SPI_1 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <60000000>;
@@ -60,8 +60,8 @@ dma_subsys: bus@5a000000 {
#size-cells = <0>;
interrupts = <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
- clocks = <&spi2_lpcg 0>,
- <&spi2_lpcg 1>;
+ clocks = <&spi2_lpcg IMX_LPCG_CLK_0>,
+ <&spi2_lpcg IMX_LPCG_CLK_4>;
clock-names = "per", "ipg";
assigned-clocks = <&clk IMX_SC_R_SPI_2 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <60000000>;
@@ -76,8 +76,8 @@ dma_subsys: bus@5a000000 {
#size-cells = <0>;
interrupts = <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
- clocks = <&spi3_lpcg 0>,
- <&spi3_lpcg 1>;
+ clocks = <&spi3_lpcg IMX_LPCG_CLK_0>,
+ <&spi3_lpcg IMX_LPCG_CLK_4>;
clock-names = "per", "ipg";
assigned-clocks = <&clk IMX_SC_R_SPI_3 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <60000000>;
@@ -145,8 +145,8 @@ dma_subsys: bus@5a000000 {
compatible = "fsl,imx8qxp-pwm", "fsl,imx27-pwm";
reg = <0x5a190000 0x1000>;
interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&adma_pwm_lpcg 1>,
- <&adma_pwm_lpcg 0>;
+ clocks = <&adma_pwm_lpcg IMX_LPCG_CLK_4>,
+ <&adma_pwm_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "per";
assigned-clocks = <&clk IMX_SC_R_LCD_0_PWM_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
@@ -355,8 +355,8 @@ dma_subsys: bus@5a000000 {
reg = <0x5a880000 0x10000>;
interrupts = <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
- clocks = <&adc0_lpcg 0>,
- <&adc0_lpcg 1>;
+ clocks = <&adc0_lpcg IMX_LPCG_CLK_0>,
+ <&adc0_lpcg IMX_LPCG_CLK_4>;
clock-names = "per", "ipg";
assigned-clocks = <&clk IMX_SC_R_ADC_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
@@ -370,8 +370,8 @@ dma_subsys: bus@5a000000 {
reg = <0x5a890000 0x10000>;
interrupts = <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
- clocks = <&adc1_lpcg 0>,
- <&adc1_lpcg 1>;
+ clocks = <&adc1_lpcg IMX_LPCG_CLK_0>,
+ <&adc1_lpcg IMX_LPCG_CLK_4>;
clock-names = "per", "ipg";
assigned-clocks = <&clk IMX_SC_R_ADC_1 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
@@ -384,8 +384,8 @@ dma_subsys: bus@5a000000 {
reg = <0x5a8d0000 0x10000>;
interrupts = <GIC_SPI 235 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
- clocks = <&can0_lpcg 1>,
- <&can0_lpcg 0>;
+ clocks = <&can0_lpcg IMX_LPCG_CLK_4>,
+ <&can0_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "per";
assigned-clocks = <&clk IMX_SC_R_CAN_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <40000000>;
@@ -405,8 +405,8 @@ dma_subsys: bus@5a000000 {
* CAN1 shares CAN0's clock and to enable CAN0's clock it
* has to be powered on.
*/
- clocks = <&can0_lpcg 1>,
- <&can0_lpcg 0>;
+ clocks = <&can0_lpcg IMX_LPCG_CLK_4>,
+ <&can0_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "per";
assigned-clocks = <&clk IMX_SC_R_CAN_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <40000000>;
@@ -426,8 +426,8 @@ dma_subsys: bus@5a000000 {
* CAN2 shares CAN0's clock and to enable CAN0's clock it
* has to be powered on.
*/
- clocks = <&can0_lpcg 1>,
- <&can0_lpcg 0>;
+ clocks = <&can0_lpcg IMX_LPCG_CLK_4>,
+ <&can0_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "per";
assigned-clocks = <&clk IMX_SC_R_CAN_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <40000000>;
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-img.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-img.dtsi
index e7783cc2d830..77d2928997b4 100644
--- a/arch/arm64/boot/dts/freescale/imx8-ss-img.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-img.dtsi
@@ -21,7 +21,6 @@ img_subsys: bus@58000000 {
interrupts = <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&img_jpeg_dec_lpcg IMX_LPCG_CLK_0>,
<&img_jpeg_dec_lpcg IMX_LPCG_CLK_4>;
- clock-names = "per", "ipg";
assigned-clocks = <&img_jpeg_dec_lpcg IMX_LPCG_CLK_0>,
<&img_jpeg_dec_lpcg IMX_LPCG_CLK_4>;
assigned-clock-rates = <200000000>, <200000000>;
@@ -35,7 +34,6 @@ img_subsys: bus@58000000 {
interrupts = <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&img_jpeg_enc_lpcg IMX_LPCG_CLK_0>,
<&img_jpeg_enc_lpcg IMX_LPCG_CLK_4>;
- clock-names = "per", "ipg";
assigned-clocks = <&img_jpeg_enc_lpcg IMX_LPCG_CLK_0>,
<&img_jpeg_enc_lpcg IMX_LPCG_CLK_4>;
assigned-clock-rates = <200000000>, <200000000>;
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi
index 7e510b21bbac..764c1a08e3b1 100644
--- a/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi
@@ -25,8 +25,8 @@ lsio_subsys: bus@5d000000 {
compatible = "fsl,imx27-pwm";
reg = <0x5d000000 0x10000>;
clock-names = "ipg", "per";
- clocks = <&pwm0_lpcg 4>,
- <&pwm0_lpcg 1>;
+ clocks = <&pwm0_lpcg IMX_LPCG_CLK_6>,
+ <&pwm0_lpcg IMX_LPCG_CLK_1>;
assigned-clocks = <&clk IMX_SC_R_PWM_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
#pwm-cells = <3>;
@@ -38,8 +38,8 @@ lsio_subsys: bus@5d000000 {
compatible = "fsl,imx27-pwm";
reg = <0x5d010000 0x10000>;
clock-names = "ipg", "per";
- clocks = <&pwm1_lpcg 4>,
- <&pwm1_lpcg 1>;
+ clocks = <&pwm1_lpcg IMX_LPCG_CLK_6>,
+ <&pwm1_lpcg IMX_LPCG_CLK_1>;
assigned-clocks = <&clk IMX_SC_R_PWM_1 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
#pwm-cells = <3>;
@@ -51,8 +51,8 @@ lsio_subsys: bus@5d000000 {
compatible = "fsl,imx27-pwm";
reg = <0x5d020000 0x10000>;
clock-names = "ipg", "per";
- clocks = <&pwm2_lpcg 4>,
- <&pwm2_lpcg 1>;
+ clocks = <&pwm2_lpcg IMX_LPCG_CLK_6>,
+ <&pwm2_lpcg IMX_LPCG_CLK_1>;
assigned-clocks = <&clk IMX_SC_R_PWM_2 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
#pwm-cells = <3>;
@@ -64,8 +64,8 @@ lsio_subsys: bus@5d000000 {
compatible = "fsl,imx27-pwm";
reg = <0x5d030000 0x10000>;
clock-names = "ipg", "per";
- clocks = <&pwm3_lpcg 4>,
- <&pwm3_lpcg 1>;
+ clocks = <&pwm3_lpcg IMX_LPCG_CLK_6>,
+ <&pwm3_lpcg IMX_LPCG_CLK_1>;
assigned-clocks = <&clk IMX_SC_R_PWM_3 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
#pwm-cells = <3>;
diff --git a/arch/arm64/boot/dts/freescale/imx8dx-colibri-aster.dts b/arch/arm64/boot/dts/freescale/imx8dx-colibri-aster.dts
new file mode 100644
index 000000000000..c974f5dc0283
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8dx-colibri-aster.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2018-2021 Toradex
+ */
+
+/dts-v1/;
+
+#include "imx8dx-colibri.dtsi"
+#include "imx8x-colibri-aster.dtsi"
+
+/ {
+ model = "Toradex Colibri iMX8DX on Aster Board";
+ compatible = "toradex,colibri-imx8x-aster",
+ "toradex,colibri-imx8x",
+ "fsl,imx8dx";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8dx-colibri-eval-v3.dts b/arch/arm64/boot/dts/freescale/imx8dx-colibri-eval-v3.dts
new file mode 100644
index 000000000000..f2bf15463ae8
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8dx-colibri-eval-v3.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2018-2021 Toradex
+ */
+
+/dts-v1/;
+
+#include "imx8dx-colibri.dtsi"
+#include "imx8x-colibri-eval-v3.dtsi"
+
+/ {
+ model = "Toradex Colibri iMX8DX on Colibri Evaluation Board V3";
+ compatible = "toradex,colibri-imx8x-eval-v3",
+ "toradex,colibri-imx8x",
+ "fsl,imx8dx";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8dx-colibri-iris-v2.dts b/arch/arm64/boot/dts/freescale/imx8dx-colibri-iris-v2.dts
new file mode 100644
index 000000000000..fd425c70cf2b
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8dx-colibri-iris-v2.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2018-2021 Toradex
+ */
+
+/dts-v1/;
+
+#include "imx8dx-colibri.dtsi"
+#include "imx8x-colibri-iris-v2.dtsi"
+
+/ {
+ model = "Toradex Colibri iMX8DX on Colibri Iris V2 Board";
+ compatible = "toradex,colibri-imx8x-iris-v2",
+ "toradex,colibri-imx8x",
+ "fsl,imx8dx";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8dx-colibri-iris.dts b/arch/arm64/boot/dts/freescale/imx8dx-colibri-iris.dts
new file mode 100644
index 000000000000..e5e2346ce4f1
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8dx-colibri-iris.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2018-2021 Toradex
+ */
+
+/dts-v1/;
+
+#include "imx8dx-colibri.dtsi"
+#include "imx8x-colibri-iris.dtsi"
+
+/ {
+ model = "Toradex Colibri iMX8DX on Colibri Iris Board";
+ compatible = "toradex,colibri-imx8x-iris",
+ "toradex,colibri-imx8x",
+ "fsl,imx8dx";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8dx-colibri.dtsi b/arch/arm64/boot/dts/freescale/imx8dx-colibri.dtsi
new file mode 100644
index 000000000000..66b0fcc6687d
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8dx-colibri.dtsi
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2018-2021 Toradex
+ */
+
+#include "imx8dx.dtsi"
+#include "imx8x-colibri.dtsi"
+
+/ {
+ model = "Toradex Colibri iMX8DX Module";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8dx.dtsi b/arch/arm64/boot/dts/freescale/imx8dx.dtsi
new file mode 100644
index 000000000000..ce76efc1a041
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8dx.dtsi
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2020 NXP
+ */
+
+/dts-v1/;
+
+#include "imx8dxp.dtsi"
+
+&gpu_3d0 {
+ assigned-clock-rates = <372000000>, <372000000>;
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts b/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts
index 2123d431e061..2412ab145c06 100644
--- a/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts
@@ -16,6 +16,8 @@
mmc0 = &usdhc1;
mmc1 = &usdhc2;
serial0 = &lpuart0;
+ serial1 = &lpuart1;
+ serial6 = &cm40_lpuart;
};
chosen {
@@ -51,6 +53,16 @@
};
};
+ m2_uart1_sel: regulator-m2uart1sel {
+ compatible = "regulator-fixed";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "m2_uart1_sel";
+ gpio = <&pca6416_1 6 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
mux3_en: regulator-0 {
compatible = "regulator-fixed";
regulator-min-microvolt = <3300000>;
@@ -340,6 +352,12 @@
status = "okay";
};
+&lpuart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpuart1>;
+ status = "okay";
+};
+
&flexcan2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_flexcan2>;
@@ -354,6 +372,16 @@
status = "okay";
};
+&cm40_intmux {
+ status = "disabled";
+};
+
+&cm40_lpuart {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_cm40_lpuart>;
+ status = "disabled";
+};
+
&lsio_gpio4 {
status = "okay";
};
@@ -595,6 +623,15 @@
>;
};
+ pinctrl_lpuart1: lpuart1grp {
+ fsl,pins = <
+ IMX8DXL_UART1_TX_ADMA_UART1_TX 0x06000020
+ IMX8DXL_UART1_RX_ADMA_UART1_RX 0x06000020
+ IMX8DXL_UART1_RTS_B_ADMA_UART1_RTS_B 0x06000020
+ IMX8DXL_UART1_CTS_B_ADMA_UART1_CTS_B 0x06000020
+ >;
+ };
+
pinctrl_usdhc1: usdhc1grp {
fsl,pins = <
IMX8DXL_EMMC0_CLK_CONN_EMMC0_CLK 0x06000041
diff --git a/arch/arm64/boot/dts/freescale/imx8dxl.dtsi b/arch/arm64/boot/dts/freescale/imx8dxl.dtsi
index a0674c5c5576..7e54cf202858 100644
--- a/arch/arm64/boot/dts/freescale/imx8dxl.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8dxl.dtsi
@@ -5,6 +5,7 @@
#include <dt-bindings/clock/imx8-clock.h>
#include <dt-bindings/dma/fsl-edma.h>
+#include <dt-bindings/clock/imx8-lpcg.h>
#include <dt-bindings/firmware/imx/rsrc.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -104,7 +105,7 @@
};
pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a35-pmu";
interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
};
@@ -231,6 +232,7 @@
};
/* sorted in register address */
+ #include "imx8-ss-cm40.dtsi"
#include "imx8-ss-adma.dtsi"
#include "imx8-ss-conn.dtsi"
#include "imx8-ss-ddr.dtsi"
@@ -241,3 +243,14 @@
#include "imx8dxl-ss-conn.dtsi"
#include "imx8dxl-ss-lsio.dtsi"
#include "imx8dxl-ss-ddr.dtsi"
+
+&cm40_intmux {
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
index bd5b365867fd..90d1901df2b1 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
@@ -72,6 +72,20 @@
enable-active-high;
};
+ reg_1v5: regulator-1v5 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_1V5";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ };
+
+ reg_1v8: regulator-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
reg_vddext_3v3: regulator-vddext-3v3 {
compatible = "regulator-fixed";
regulator-name = "VDDEXT_3V3";
@@ -381,7 +395,7 @@
};
ptn5110: tcpc@50 {
- compatible = "nxp,ptn5110";
+ compatible = "nxp,ptn5110", "tcpci";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_typec1>;
reg = <0x50>;
@@ -441,6 +455,9 @@
assigned-clock-rates = <24000000>;
powerdown-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
reset-gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
+ DOVDD-supply = <&buck5_reg>;
+ AVDD-supply = <&reg_1v8>;
+ DVDD-supply = <&reg_1v5>;
port {
ov5640_to_mipi_csi2: endpoint {
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-var-som-symphony.dts b/arch/arm64/boot/dts/freescale/imx8mm-var-som-symphony.dts
index d643381417f1..affbc67c2ef6 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-var-som-symphony.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-var-som-symphony.dts
@@ -117,7 +117,6 @@
interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ptn5150>;
- status = "okay";
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
index 41c966147b94..429be2bab8a2 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
@@ -57,7 +57,7 @@
status = "okay";
tpm@1 {
- compatible = "tcg,tpm_tis-spi";
+ compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
reg = <0x1>;
spi-max-frequency = <36000000>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
index 5e2cbaf27e0f..35ae0faa815b 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
@@ -297,7 +297,7 @@
};
tpm@1 {
- compatible = "tcg,tpm_tis-spi";
+ compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
reg = <0x1>;
spi-max-frequency = <36000000>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi
index 1cff0b829357..ce20de259805 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi
@@ -10,7 +10,7 @@
simple-audio-card,format = "i2s";
simple-audio-card,frame-master = <&dailink_master>;
simple-audio-card,mclk-fs = <256>;
- simple-audio-card,name = "imx8mm-wm8904";
+ simple-audio-card,name = "verdin-wm8904";
simple-audio-card,routing =
"Headphone Jack", "HPOUTL",
"Headphone Jack", "HPOUTR",
@@ -32,6 +32,25 @@
sound-dai = <&sai2>;
};
};
+
+ reg_usb_hub: regulator-usb-hub {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ /* Verdin CTRL_SLEEP_MOCI# (SODIMM 256) */
+ gpio = <&gpio5 1 GPIO_ACTIVE_HIGH>;
+ regulator-boot-on;
+ regulator-name = "HUB_PWR_EN";
+ };
+
+ reg_pcie: regulator-pcie {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ /* Verdin CTRL_SLEEP_MOCI# (SODIMM 256) */
+ gpio = <&gpio5 1 GPIO_ACTIVE_HIGH>;
+ regulator-boot-on;
+ regulator-name = "PCIE_1_PWR_EN";
+ startup-delay-us = <100000>;
+ };
};
/* Verdin SPI_1 */
@@ -58,6 +77,11 @@
status = "okay";
};
+&gpio5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ctrl_sleep_moci>;
+};
+
/* Current measurement into module VCC */
&hwmon {
status = "okay";
@@ -93,6 +117,7 @@
/* Verdin PCIE_1 */
&pcie0 {
+ vpcie-supply = <&reg_pcie>;
status = "okay";
};
@@ -115,6 +140,11 @@
status = "okay";
};
+/* We support turning off sleep moci on Dahlia */
+&reg_force_sleep_moci {
+ status = "disabled";
+};
+
/* Verdin I2S_1 */
&sai2 {
status = "okay";
@@ -143,8 +173,16 @@
/* Verdin USB_2 */
&usbotg2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
disable-over-current;
status = "okay";
+
+ usb-hub@1 {
+ compatible = "usb424,2744";
+ reg = <1>;
+ vdd-supply = <&reg_usb_hub>;
+ };
};
/* Verdin SD_1 */
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dev.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dev.dtsi
index 3c4b8ca125e3..1d8d146d9eeb 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dev.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dev.dtsi
@@ -10,7 +10,7 @@
simple-audio-card,format = "i2s";
simple-audio-card,frame-master = <&dailink_master>;
simple-audio-card,mclk-fs = <256>;
- simple-audio-card,name = "imx8mm-nau8822";
+ simple-audio-card,name = "verdin-nau8822";
simple-audio-card,routing =
"Headphones", "LHP",
"Headphones", "RHP",
@@ -78,6 +78,11 @@
status = "okay";
};
+&gpio5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ctrl_sleep_moci>;
+};
+
&gpio_expander_21 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin-yavia.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin-yavia.dtsi
index 1e28c78e381f..763f069e8405 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin-yavia.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin-yavia.dtsi
@@ -81,6 +81,11 @@
pinctrl-0 = <&pinctrl_gpios_ext_yavia>;
};
+&gpio5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ctrl_sleep_moci>;
+};
+
&hwmon_temp {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
index 6f0811587142..4768b05fd765 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
@@ -110,6 +110,22 @@
startup-delay-us = <200000>;
};
+ /*
+ * By default we enable CTRL_SLEEP_MOCI#, this is required to have
+ * peripherals on the carrier board powered.
+ * If more granularity or power saving is required this can be disabled
+ * in the carrier board device tree files.
+ */
+ reg_force_sleep_moci: regulator-force-sleep-moci {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ /* Verdin CTRL_SLEEP_MOCI# (SODIMM 256) */
+ gpio = <&gpio5 1 GPIO_ACTIVE_HIGH>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "CTRL_SLEEP_MOCI#";
+ };
+
reg_usb_otg1_vbus: regulator-usb-otg1 {
compatible = "regulator-fixed";
enable-active-high;
@@ -333,16 +349,6 @@
"SODIMM_212",
"SODIMM_151",
"SODIMM_153";
-
- ctrl-sleep-moci-hog {
- gpio-hog;
- /* Verdin CTRL_SLEEP_MOCI# (SODIMM 256) */
- gpios = <1 GPIO_ACTIVE_HIGH>;
- line-name = "CTRL_SLEEP_MOCI#";
- output-high;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_ctrl_sleep_moci>;
- };
};
/* On-module I2C */
diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
index 8a1b42b94dce..9535dedcef59 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
@@ -1168,6 +1168,13 @@
remote-endpoint = <&lcdif_to_dsim>;
};
};
+
+ port@1 {
+ reg = <1>;
+
+ mipi_dsi_out: endpoint {
+ };
+ };
};
};
@@ -1253,7 +1260,6 @@
reg = <0x32e40000 0x200>;
interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MM_CLK_USB1_CTRL_ROOT>;
- clock-names = "usb1_ctrl_root_clk";
assigned-clocks = <&clk IMX8MM_CLK_USB_BUS>;
assigned-clock-parents = <&clk IMX8MM_SYS_PLL2_500M>;
phys = <&usbphynop1>;
@@ -1274,7 +1280,6 @@
reg = <0x32e50000 0x200>;
interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MM_CLK_USB1_CTRL_ROOT>;
- clock-names = "usb1_ctrl_root_clk";
assigned-clocks = <&clk IMX8MM_CLK_USB_BUS>;
assigned-clock-parents = <&clk IMX8MM_SYS_PLL2_500M>;
phys = <&usbphynop2>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-ddr3l-evk.dts b/arch/arm64/boot/dts/freescale/imx8mn-ddr3l-evk.dts
index 000e2c0596df..d25032e3ceab 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-ddr3l-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mn-ddr3l-evk.dts
@@ -112,3 +112,19 @@
};
};
};
+
+&i2c2 {
+ hdmi@3d {
+ avdd-supply = <&buck5>;
+ dvdd-supply = <&buck5>;
+ pvdd-supply = <&buck5>;
+ a2vdd-supply = <&buck5>;
+ v1p2-supply = <&buck5>;
+ };
+};
+
+&i2c3 {
+ camera@3c {
+ DOVDD-supply = <&buck5>;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
index cc2ff59ac53b..6d85a0b052c9 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
@@ -158,3 +158,19 @@
};
};
};
+
+&i2c2 {
+ hdmi@3d {
+ avdd-supply = <&buck5_reg>;
+ dvdd-supply = <&buck5_reg>;
+ pvdd-supply = <&buck5_reg>;
+ a2vdd-supply = <&buck5_reg>;
+ v1p2-supply = <&buck5_reg>;
+ };
+};
+
+&i2c3 {
+ camera@3c {
+ DOVDD-supply = <&buck5_reg>;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-evk.dts b/arch/arm64/boot/dts/freescale/imx8mn-evk.dts
index 0b71f50d936e..41330210a05f 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mn-evk.dts
@@ -125,3 +125,19 @@
};
};
};
+
+&i2c2 {
+ hdmi@3d {
+ avdd-supply = <&buck5>;
+ dvdd-supply = <&buck5>;
+ pvdd-supply = <&buck5>;
+ a2vdd-supply = <&buck5>;
+ v1p2-supply = <&buck5>;
+ };
+};
+
+&i2c3 {
+ camera@3c {
+ DOVDD-supply = <&buck5>;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi
index 269e70f66a13..9e0259ddf4bc 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi
@@ -30,7 +30,7 @@
port {
hdmi_connector_in: endpoint {
- remote-endpoint = <&adv7533_out>;
+ remote-endpoint = <&adv7535_out>;
};
};
};
@@ -52,6 +52,27 @@
enable-active-high;
};
+ reg_1v5: regulator-1v5 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_1V5";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ };
+
+ reg_1v8: regulator-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ reg_vddext_3v3: regulator-vddext-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDEXT_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
ir-receiver {
compatible = "gpio-ir-receiver";
gpios = <&gpio1 13 GPIO_ACTIVE_LOW>;
@@ -193,15 +214,11 @@
hdmi@3d {
compatible = "adi,adv7535";
- reg = <0x3d>, <0x3c>, <0x3e>, <0x3f>;
- reg-names = "main", "cec", "edid", "packet";
+ reg = <0x3d>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
adi,dsi-lanes = <4>;
-
- adi,input-depth = <8>;
- adi,input-colorspace = "rgb";
- adi,input-clock = "1x";
- adi,input-style = <1>;
- adi,input-justification = "evenly";
+ v3p3-supply = <&reg_vddext_3v3>;
ports {
#address-cells = <1>;
@@ -210,7 +227,7 @@
port@0 {
reg = <0>;
- adv7533_in: endpoint {
+ adv7535_in: endpoint {
remote-endpoint = <&dsi_out>;
};
};
@@ -218,7 +235,7 @@
port@1 {
reg = <1>;
- adv7533_out: endpoint {
+ adv7535_out: endpoint {
remote-endpoint = <&hdmi_connector_in>;
};
};
@@ -227,7 +244,7 @@
};
ptn5110: tcpc@50 {
- compatible = "nxp,ptn5110";
+ compatible = "nxp,ptn5110", "tcpci";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_typec1>;
reg = <0x50>;
@@ -284,6 +301,8 @@
assigned-clock-rates = <24000000>;
powerdown-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
reset-gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
+ AVDD-supply = <&reg_1v8>;
+ DVDD-supply = <&reg_1v5>;
port {
ov5640_to_mipi_csi2: endpoint {
@@ -335,7 +354,7 @@
reg = <1>;
dsi_out: endpoint {
- remote-endpoint = <&adv7533_in>;
+ remote-endpoint = <&adv7535_in>;
data-lanes = <1 2 3 4>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-var-som-symphony.dts b/arch/arm64/boot/dts/freescale/imx8mn-var-som-symphony.dts
index a6b94d1957c9..3434b189fa58 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-var-som-symphony.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mn-var-som-symphony.dts
@@ -126,7 +126,6 @@
interrupts = <11 IRQ_TYPE_EDGE_FALLING>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ptn5150>;
- status = "okay";
port {
typec1_dr_sw: endpoint {
diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
index 932c8b05c75f..a5f9cfb46e5d 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
@@ -1104,6 +1104,13 @@
remote-endpoint = <&lcdif_to_dsim>;
};
};
+
+ port@1 {
+ reg = <1>;
+
+ mipi_dsi_out: endpoint {
+ };
+ };
};
};
@@ -1213,7 +1220,6 @@
reg = <0x32e40000 0x200>;
interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_USB1_CTRL_ROOT>;
- clock-names = "usb1_ctrl_root_clk";
assigned-clocks = <&clk IMX8MN_CLK_USB_BUS>;
assigned-clock-parents = <&clk IMX8MN_SYS_PLL2_500M>;
phys = <&usbphynop1>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts b/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
index a08057410bde..e5d3901f2913 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
@@ -340,7 +340,7 @@
&i2c3 {
/* Connected to USB Hub */
usb-typec@52 {
- compatible = "nxp,ptn5110";
+ compatible = "nxp,ptn5110", "tcpci";
reg = <0x52>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_typec>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts b/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts
index 2c19766ebf09..9b8f97a84e61 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts
@@ -197,10 +197,8 @@
};
&i2c2 {
- clock-frequency = <100000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c2>;
- status = "okay";
};
&i2c3 {
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-debix-som-a-bmb-08.dts b/arch/arm64/boot/dts/freescale/imx8mp-debix-som-a-bmb-08.dts
index b11d694b98e1..d241db3743a9 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-debix-som-a-bmb-08.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-debix-som-a-bmb-08.dts
@@ -144,7 +144,6 @@
pinctrl-0 = <&pinctrl_eqos>;
nvmem-cells = <&ethmac1>;
nvmem-cell-names = "mac-address";
- phy-supply = <&reg_baseboard_vdd3v3>;
phy-handle = <&ethphy0>;
phy-mode = "rgmii-id";
status = "okay";
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-pdk3.dts b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-pdk3.dts
index b749e28e5ede..ac7ec7533a3c 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-pdk3.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-pdk3.dts
@@ -167,6 +167,16 @@
VDDIO-supply = <&reg_vdd_3p3v_awo>;
};
+ csi2exp: gpio@24 {
+ compatible = "nxp,pca9570";
+ reg = <0x24>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names =
+ "CSI2_#RESET", "CSI2_#PWDN",
+ "CSI_#PWDN", "CSI_#RESET";
+ };
+
typec@3d {
compatible = "nxp,ptn5150";
reg = <0x3d>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
index 9beba8d6a0df..8be5b2a57f27 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
@@ -145,6 +145,27 @@
};
+ sound-hdmi {
+ compatible = "fsl,imx-audio-hdmi";
+ model = "audio-hdmi";
+ audio-cpu = <&aud2htx>;
+ hdmi-out;
+ };
+
+ sound-micfil {
+ compatible = "fsl,imx-audio-card";
+ model = "micfil-audio";
+
+ pri-dai-link {
+ link-name = "micfil hifi";
+ format = "i2s";
+
+ cpu {
+ sound-dai = <&micfil>;
+ };
+ };
+ };
+
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
@@ -198,6 +219,10 @@
cpu-supply = <&reg_arm>;
};
+&aud2htx {
+ status = "okay";
+};
+
&eqos {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_eqos>;
@@ -524,6 +549,16 @@
status = "okay";
};
+&micfil {
+ #sound-dai-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pdm>;
+ assigned-clocks = <&clk IMX8MP_CLK_PDM>;
+ assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL1_OUT>;
+ assigned-clock-rates = <196608000>;
+ status = "okay";
+};
+
&mipi_dsi {
samsung,esc-clock-frequency = <10000000>;
status = "okay";
@@ -790,6 +825,16 @@
>;
};
+ pinctrl_pdm: pdmgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI5_RXC__AUDIOMIX_PDM_CLK 0xd6
+ MX8MP_IOMUXC_SAI5_RXD0__AUDIOMIX_PDM_BIT_STREAM00 0xd6
+ MX8MP_IOMUXC_SAI5_RXD1__AUDIOMIX_PDM_BIT_STREAM01 0xd6
+ MX8MP_IOMUXC_SAI5_RXD2__AUDIOMIX_PDM_BIT_STREAM02 0xd6
+ MX8MP_IOMUXC_SAI5_RXD3__AUDIOMIX_PDM_BIT_STREAM03 0xd6
+ >;
+ };
+
pinctrl_pmic: pmicgrp {
fsl,pins = <
MX8MP_IOMUXC_GPIO1_IO03__GPIO1_IO03 0x000001c0
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-msc-sm2s.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-msc-sm2s.dtsi
index 61c2a63efc6d..0fd5c3abcdb7 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-msc-sm2s.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-msc-sm2s.dtsi
@@ -200,8 +200,11 @@
};
&i2c1 {
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c1>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio5 14 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio5 15 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
clock-frequency = <400000>;
status = "okay";
@@ -241,8 +244,11 @@
};
&i2c6 {
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c6>;
+ pinctrl-1 = <&pinctrl_i2c6_gpio>;
+ scl-gpios = <&gpio3 19 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio3 20 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
clock-frequency = <400000>;
status = "okay";
@@ -602,38 +608,50 @@
pinctrl_i2c1: i2c1grp {
fsl,pins =
- <MX8MP_IOMUXC_I2C1_SCL__I2C1_SCL 0x400001c3>,
- <MX8MP_IOMUXC_I2C1_SDA__I2C1_SDA 0x400001c3>;
+ <MX8MP_IOMUXC_I2C1_SCL__I2C1_SCL 0x400001e0>,
+ <MX8MP_IOMUXC_I2C1_SDA__I2C1_SDA 0x400001e0>;
+ };
+
+ pinctrl_i2c1_gpio: i2c1gpiogrp {
+ fsl,pins =
+ <MX8MP_IOMUXC_I2C1_SCL__GPIO5_IO14 0x1e0>,
+ <MX8MP_IOMUXC_I2C1_SDA__GPIO5_IO15 0x1e0>;
};
pinctrl_i2c2: i2c2grp {
fsl,pins =
- <MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL 0x400001c3>,
- <MX8MP_IOMUXC_I2C2_SDA__I2C2_SDA 0x400001c3>;
+ <MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL 0x400001e0>,
+ <MX8MP_IOMUXC_I2C2_SDA__I2C2_SDA 0x400001e0>;
};
pinctrl_i2c3: i2c3grp {
fsl,pins =
- <MX8MP_IOMUXC_I2C3_SCL__I2C3_SCL 0x400001c3>,
- <MX8MP_IOMUXC_I2C3_SDA__I2C3_SDA 0x400001c3>;
+ <MX8MP_IOMUXC_I2C3_SCL__I2C3_SCL 0x400001e0>,
+ <MX8MP_IOMUXC_I2C3_SDA__I2C3_SDA 0x400001e0>;
};
pinctrl_i2c4: i2c4grp {
fsl,pins =
- <MX8MP_IOMUXC_I2C4_SCL__I2C4_SCL 0x400001c3>,
- <MX8MP_IOMUXC_I2C4_SDA__I2C4_SDA 0x400001c3>;
+ <MX8MP_IOMUXC_I2C4_SCL__I2C4_SCL 0x400001e0>,
+ <MX8MP_IOMUXC_I2C4_SDA__I2C4_SDA 0x400001e0>;
};
pinctrl_i2c5: i2c5grp {
fsl,pins =
- <MX8MP_IOMUXC_SPDIF_TX__I2C5_SCL 0x400001c3>,
- <MX8MP_IOMUXC_SPDIF_RX__I2C5_SDA 0x400001c3>;
+ <MX8MP_IOMUXC_SPDIF_TX__I2C5_SCL 0x400001e0>,
+ <MX8MP_IOMUXC_SPDIF_RX__I2C5_SDA 0x400001e0>;
};
pinctrl_i2c6: i2c6grp {
fsl,pins =
- <MX8MP_IOMUXC_SAI5_RXFS__I2C6_SCL 0x400001c3>,
- <MX8MP_IOMUXC_SAI5_RXC__I2C6_SDA 0x400001c3>;
+ <MX8MP_IOMUXC_SAI5_RXFS__I2C6_SCL 0x400001e0>,
+ <MX8MP_IOMUXC_SAI5_RXC__I2C6_SDA 0x400001e0>;
+ };
+
+ pinctrl_i2c6_gpio: i2c6gpiogrp {
+ fsl,pins =
+ <MX8MP_IOMUXC_SAI5_RXFS__GPIO3_IO19 0x1e0>,
+ <MX8MP_IOMUXC_SAI5_RXC__GPIO3_IO20 0x1e0>;
};
pinctrl_lcd0_backlight: lcd0-backlightgrp {
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-navqp.dts b/arch/arm64/boot/dts/freescale/imx8mp-navqp.dts
new file mode 100644
index 000000000000..5fd1614982cd
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-navqp.dts
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2021 Emcraft Systems
+ * Copyright 2024 Gilles Talis <gilles.talis@gmail.com>
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include "imx8mp.dtsi"
+
+/ {
+ model = "Emcraft Systems i.MX8MPlus NavQ+ Kit";
+ compatible = "emcraft,imx8mp-navqp", "fsl,imx8mp";
+
+ chosen {
+ stdout-path = &uart2;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_led>;
+
+ led-0 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_STATUS;
+ gpios = <&gpio3 16 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+ };
+
+ reg_usdhc2_vmmc: regulator-usdhc2 {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_usdhc2_vmmc>;
+ regulator-name = "VSD_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ startup-delay-us = <100>;
+ off-on-delay-us = <12000>;
+ };
+};
+
+&A53_0 {
+ cpu-supply = <&buck2>;
+};
+
+&A53_1 {
+ cpu-supply = <&buck2>;
+};
+
+&A53_2 {
+ cpu-supply = <&buck2>;
+};
+
+&A53_3 {
+ cpu-supply = <&buck2>;
+};
+
+&eqos {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_eqos>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy0>;
+ status = "okay";
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ reset-gpios = <&gpio4 22 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <1000>;
+ reset-deassert-us = <10000>;
+ qca,disable-smarteee;
+ qca,disable-hibernation-mode;
+ };
+ };
+};
+
+&i2c1 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ pmic@25 {
+ compatible = "nxp,pca9450c";
+ reg = <0x25>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pmic>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ BUCK1 {
+ regulator-name = "BUCK1";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <2187500>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <3125>;
+ };
+
+ buck2: BUCK2 {
+ regulator-name = "BUCK2";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <2187500>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <3125>;
+ nxp,dvs-run-voltage = <950000>;
+ nxp,dvs-standby-voltage = <850000>;
+ };
+
+ BUCK4 {
+ regulator-name = "BUCK4";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ BUCK5 {
+ regulator-name = "BUCK5";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ BUCK6 {
+ regulator-name = "BUCK6";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ LDO1 {
+ regulator-name = "LDO1";
+ regulator-min-microvolt = <1600000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ LDO2 {
+ regulator-name = "LDO2";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ LDO3 {
+ regulator-name = "LDO3";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ LDO4 {
+ regulator-name = "LDO4";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ LDO5 {
+ regulator-name = "LDO5";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+&i2c2 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+};
+
+&i2c3 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+};
+
+&i2c4 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c4>;
+ status = "okay";
+
+ rtc@53 {
+ compatible = "nxp,pcf2131";
+ reg = <0x53>;
+ };
+};
+
+&uart2 {
+ /* console */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+};
+
+/* SD Card */
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
+ cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ bus-width = <4>;
+ status = "okay";
+};
+
+/* eMMC */
+&usdhc3 {
+ assigned-clocks = <&clk IMX8MP_CLK_USDHC3>;
+ assigned-clock-rates = <400000000>;
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ bus-width = <8>;
+ non-removable;
+ status = "okay";
+};
+
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_eqos: eqosgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_ENET_MDC__ENET_QOS_MDC 0x3
+ MX8MP_IOMUXC_ENET_MDIO__ENET_QOS_MDIO 0x3
+ MX8MP_IOMUXC_ENET_RD0__ENET_QOS_RGMII_RD0 0x91
+ MX8MP_IOMUXC_ENET_RD1__ENET_QOS_RGMII_RD1 0x91
+ MX8MP_IOMUXC_ENET_RD2__ENET_QOS_RGMII_RD2 0x91
+ MX8MP_IOMUXC_ENET_RD3__ENET_QOS_RGMII_RD3 0x91
+ MX8MP_IOMUXC_ENET_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x91
+ MX8MP_IOMUXC_ENET_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x91
+ MX8MP_IOMUXC_ENET_TD0__ENET_QOS_RGMII_TD0 0x1f
+ MX8MP_IOMUXC_ENET_TD1__ENET_QOS_RGMII_TD1 0x1f
+ MX8MP_IOMUXC_ENET_TD2__ENET_QOS_RGMII_TD2 0x1f
+ MX8MP_IOMUXC_ENET_TD3__ENET_QOS_RGMII_TD3 0x1f
+ MX8MP_IOMUXC_ENET_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x1f
+ MX8MP_IOMUXC_ENET_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x1f
+ MX8MP_IOMUXC_SAI2_RXC__GPIO4_IO22 0x110
+ >;
+ };
+
+ pinctrl_gpio_led: gpioledgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_READY_B__GPIO3_IO16 0x19
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C1_SCL__I2C1_SCL 0x400001c3
+ MX8MP_IOMUXC_I2C1_SDA__I2C1_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL 0x400001c3
+ MX8MP_IOMUXC_I2C2_SDA__I2C2_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C3_SCL__I2C3_SCL 0x400001c3
+ MX8MP_IOMUXC_I2C3_SDA__I2C3_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c4: i2c4grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C4_SCL__I2C4_SCL 0x400001c3
+ MX8MP_IOMUXC_I2C4_SDA__I2C4_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_pmic: pmicgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO03__GPIO1_IO03 0x41
+ >;
+ };
+
+ pinctrl_reg_usdhc2_vmmc: regusdhc2vmmcgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19 0x41
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_UART2_RXD__UART2_DCE_RX 0x49
+ MX8MP_IOMUXC_UART2_TXD__UART2_DCE_TX 0x49
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x190
+ MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d0
+ MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d0
+ MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d0
+ MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d0
+ MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d0
+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x194
+ MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d4
+ MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d4
+ MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d4
+ MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d4
+ MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4
+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x196
+ MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d6
+ MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d6
+ MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d6
+ MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d6
+ MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d6
+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1
+ >;
+ };
+
+ pinctrl_usdhc2_gpio: usdhc2gpiogrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_CD_B__GPIO2_IO12 0x1c4
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x190
+ MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x1d0
+ MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x1d0
+ MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x1d0
+ MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x1d0
+ MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x1d0
+ MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x1d0
+ MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x1d0
+ MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x1d0
+ MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x1d0
+ MX8MP_IOMUXC_NAND_CE1_B__USDHC3_STROBE 0x190
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: usdhc3-100mhzgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x194
+ MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x1d4
+ MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x1d4
+ MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x1d4
+ MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x1d4
+ MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x1d4
+ MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x1d4
+ MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x1d4
+ MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x1d4
+ MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x1d4
+ MX8MP_IOMUXC_NAND_CE1_B__USDHC3_STROBE 0x194
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: usdhc3-200mhzgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x196
+ MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x1d6
+ MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x1d6
+ MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x1d6
+ MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x1d6
+ MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x1d6
+ MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x1d6
+ MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x1d6
+ MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x1d6
+ MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x1d6
+ MX8MP_IOMUXC_NAND_CE1_B__USDHC3_STROBE 0x196
+ >;
+ };
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO02__WDOG1_WDOG_B 0xc6
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
index 86d3da36e4f3..c51ed7d991d1 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
@@ -135,6 +135,18 @@
};
};
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ label = "X44";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_out>;
+ };
+ };
+ };
+
display: display {
/*
* Display is not fixed, so compatible has to be added from
@@ -470,6 +482,28 @@
"", "", "", "";
};
+&hdmi_pvi {
+ status = "okay";
+};
+
+&hdmi_tx {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hdmi>;
+ status = "okay";
+
+ ports {
+ port@1 {
+ hdmi_tx_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+ };
+ };
+};
+
+&hdmi_tx_phy {
+ status = "okay";
+};
+
&i2c2 {
clock-frequency = <384000>;
pinctrl-names = "default", "gpio";
@@ -531,6 +565,10 @@
status = "okay";
};
+&lcdif3 {
+ status = "okay";
+};
+
&pcf85063 {
/* RTC_EVENT# is connected on MBa8MPxL */
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw71xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw71xx.dtsi
index e7bf032265e0..2f740d74707b 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw71xx.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw71xx.dtsi
@@ -68,7 +68,7 @@
status = "okay";
tpm@1 {
- compatible = "tcg,tpm_tis-spi";
+ compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
reg = <0x1>;
spi-max-frequency = <36000000>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi
index 41c79d2ebdd6..5ab3ffe9931d 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi
@@ -8,12 +8,17 @@
#include <dt-bindings/phy/phy-imx8-pcie.h>
/ {
+ aliases {
+ ethernet1 = &eth1;
+ };
+
connector {
compatible = "gpio-usb-b-connector", "usb-b-connector";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usbcon1>;
type = "micro";
label = "otg";
+ vbus-supply = <&reg_usb1_vbus>;
id-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
port {
@@ -151,6 +156,38 @@
pinctrl-0 = <&pinctrl_pcie0>;
reset-gpio = <&gpio4 29 GPIO_ACTIVE_LOW>;
status = "okay";
+
+ pcie@0,0 {
+ reg = <0x0000 0 0 0 0>;
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+
+ pcie@0,0 {
+ reg = <0x0000 0 0 0 0>;
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+
+ pcie@3,0 {
+ reg = <0x1800 0 0 0 0>;
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+
+ eth1: ethernet@0,0 {
+ reg = <0x0000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ local-mac-address = [00 00 00 00 00 00];
+ };
+ };
+ };
+ };
};
/* GPS */
@@ -183,7 +220,6 @@
};
&usb3_phy0 {
- vbus-supply = <&reg_usb1_vbus>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
index d5c400b355af..dec57fad6828 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
@@ -8,12 +8,17 @@
#include <dt-bindings/phy/phy-imx8-pcie.h>
/ {
+ aliases {
+ ethernet1 = &eth1;
+ };
+
connector {
compatible = "gpio-usb-b-connector", "usb-b-connector";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usbcon1>;
type = "micro";
label = "otg";
+ vbus-supply = <&reg_usb1_vbus>;
id-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
port {
@@ -163,6 +168,38 @@
pinctrl-0 = <&pinctrl_pcie0>;
reset-gpio = <&gpio4 29 GPIO_ACTIVE_LOW>;
status = "okay";
+
+ pcie@0,0 {
+ reg = <0x0000 0 0 0 0>;
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+
+ pcie@0,0 {
+ reg = <0x0000 0 0 0 0>;
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+
+ pcie@4,0 {
+ reg = <0x2000 0 0 0 0>;
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+
+ eth1: ethernet@0,0 {
+ reg = <0x0000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ local-mac-address = [00 00 00 00 00 00];
+ };
+ };
+ };
+ };
};
/* GPS */
@@ -202,7 +239,6 @@
};
&usb3_phy0 {
- vbus-supply = <&reg_usb1_vbus>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx-imx219.dtso b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx-imx219.dtso
index 270a9114da97..edf22ff549a4 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx-imx219.dtso
+++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx-imx219.dtso
@@ -62,12 +62,25 @@
status = "okay";
ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
port@0 {
+ reg = <0>;
+
mipi_csi_0_in: endpoint {
remote-endpoint = <&imx219_to_mipi_csi2>;
data-lanes = <1 2>;
};
};
+
+ port@1 {
+ reg = <1>;
+
+ mipi_csi_0_out: endpoint {
+ remote-endpoint = <&isi_in_0>;
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
index cae586cd45bd..a77e9a44d9fa 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
@@ -404,6 +404,12 @@
label = "vdd_dram";
};
+ channel@9e {
+ gw,mode = <2>;
+ reg = <0x9e>;
+ label = "vdd_1p0";
+ };
+
channel@a2 {
gw,mode = <2>;
reg = <0xa2>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi
index 7e9e4b13b5c5..6e6b9c2c4640 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi
@@ -10,7 +10,7 @@
simple-audio-card,format = "i2s";
simple-audio-card,frame-master = <&codec_dai>;
simple-audio-card,mclk-fs = <256>;
- simple-audio-card,name = "imx8mp-wm8904";
+ simple-audio-card,name = "verdin-wm8904";
simple-audio-card,routing =
"Headphone Jack", "HPOUTL",
"Headphone Jack", "HPOUTR",
@@ -32,6 +32,25 @@
sound-dai = <&sai1>;
};
};
+
+ reg_usb_hub: regulator-usb-hub {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ /* Verdin CTRL_SLEEP_MOCI# (SODIMM 256) */
+ gpio = <&gpio4 29 GPIO_ACTIVE_HIGH>;
+ regulator-boot-on;
+ regulator-name = "HUB_PWR_EN";
+ };
+
+ reg_pcie: regulator-pcie {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ /* Verdin CTRL_SLEEP_MOCI# (SODIMM 256) */
+ gpio = <&gpio4 29 GPIO_ACTIVE_HIGH>;
+ regulator-boot-on;
+ regulator-name = "PCIE_1_PWR_EN";
+ startup-delay-us = <100000>;
+ };
};
&backlight {
@@ -70,6 +89,11 @@
status = "okay";
};
+&gpio4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ctrl_sleep_moci>;
+};
+
/* Current measurement into module VCC */
&hwmon {
status = "okay";
@@ -110,8 +134,14 @@
};
};
+/* Verdin I2C_3_HDMI */
+&i2c5 {
+ status = "okay";
+};
+
/* Verdin PCIE_1 */
&pcie {
+ vpcie-supply = <&reg_pcie>;
status = "okay";
};
@@ -138,6 +168,11 @@
vin-supply = <&reg_3p3v>;
};
+/* We support turning off sleep moci on Dahlia */
+&reg_force_sleep_moci {
+ status = "disabled";
+};
+
/* Verdin I2S_1 */
&sai1 {
assigned-clocks = <&clk IMX8MP_CLK_SAI1>;
@@ -181,6 +216,25 @@
status = "okay";
};
+&usb_dwc3_1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ usb_hub_3_0: usb-hub@1 {
+ compatible = "usb424,5744";
+ reg = <1>;
+ peer-hub = <&usb_hub_2_0>;
+ vdd-supply = <&reg_usb_hub>;
+ };
+
+ usb_hub_2_0: usb-hub@2 {
+ compatible = "usb424,2744";
+ reg = <2>;
+ peer-hub = <&usb_hub_3_0>;
+ vdd-supply = <&reg_usb_hub>;
+ };
+};
+
/* Verdin SD_1 */
&usdhc2 {
status = "okay";
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi
index a509b2b7fa85..42ed44a11711 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi
@@ -22,7 +22,7 @@
simple-audio-card,format = "i2s";
simple-audio-card,frame-master = <&codec_dai>;
simple-audio-card,mclk-fs = <256>;
- simple-audio-card,name = "imx8mp-nau8822";
+ simple-audio-card,name = "verdin-nau8822";
simple-audio-card,routing =
"Headphones", "LHP",
"Headphones", "RHP",
@@ -93,6 +93,11 @@
status = "okay";
};
+&gpio4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ctrl_sleep_moci>;
+};
+
&gpio_expander_21 {
status = "okay";
vcc-supply = <&reg_1p8v>;
@@ -131,6 +136,11 @@
};
};
+/* Verdin I2C_3_HDMI */
+&i2c5 {
+ status = "okay";
+};
+
/* Verdin PCIE_1 */
&pcie {
status = "okay";
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-mallow.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-mallow.dtsi
index 8482393f3cac..1d15f7449c58 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-mallow.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-mallow.dtsi
@@ -112,6 +112,11 @@
status = "okay";
};
+/* Verdin I2C_3_HDMI */
+&i2c5 {
+ status = "okay";
+};
+
/* Verdin PCIE_1 */
&pcie {
status = "okay";
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-yavia.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-yavia.dtsi
index db1722f0d80e..a7b261ff3e4c 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-yavia.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-yavia.dtsi
@@ -100,6 +100,11 @@
status = "okay";
};
+&gpio4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ctrl_sleep_moci>;
+};
+
&hwmon_temp {
status = "okay";
};
@@ -117,6 +122,11 @@
status = "okay";
};
+/* Verdin I2C_3_HDMI */
+&i2c5 {
+ status = "okay";
+};
+
/* Verdin PCIE_1 */
&pcie {
status = "okay";
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
index faa17cbbe2fd..aef4bef4bccd 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
@@ -116,6 +116,22 @@
vin-supply = <&reg_vdd_3v3>;
};
+ /*
+ * By default we enable CTRL_SLEEP_MOCI#, this is required to have
+ * peripherals on the carrier board powered.
+ * If more granularity or power saving is required this can be disabled
+ * in the carrier board device tree files.
+ */
+ reg_force_sleep_moci: regulator-force-sleep-moci {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ /* Verdin CTRL_SLEEP_MOCI# (SODIMM 256) */
+ gpio = <&gpio4 29 GPIO_ACTIVE_HIGH>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "CTRL_SLEEP_MOCI#";
+ };
+
reg_usb1_vbus: regulator-usb1-vbus {
compatible = "regulator-fixed";
enable-active-high;
@@ -439,16 +455,6 @@
"SODIMM_256",
"SODIMM_48",
"SODIMM_44";
-
- ctrl-sleep-moci-hog {
- gpio-hog;
- /* Verdin CTRL_SLEEP_MOCI# (SODIMM 256) */
- gpios = <29 GPIO_ACTIVE_HIGH>;
- line-name = "CTRL_SLEEP_MOCI#";
- output-high;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_ctrl_sleep_moci>;
- };
};
/* On-module I2C */
@@ -664,8 +670,6 @@
};
};
-/* TODO: Verdin I2C_3_HDMI */
-
/* Verdin I2C_4_CSI */
&i2c3 {
clock-frequency = <400000>;
@@ -764,6 +768,16 @@
};
};
+/* Verdin I2C_3_HDMI */
+&i2c5 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c5>;
+ pinctrl-1 = <&pinctrl_i2c5_gpio>;
+ scl-gpios = <&gpio3 26 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio3 27 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+};
+
/* Verdin PCIE_1 */
&pcie {
pinctrl-names = "default";
@@ -1106,8 +1120,6 @@
pinctrl_hdmi_hog: hdmihoggrp {
fsl,pins =
<MX8MP_IOMUXC_HDMI_CEC__HDMIMIX_HDMI_CEC 0x40000019>, /* SODIMM 63 */
- <MX8MP_IOMUXC_HDMI_DDC_SCL__HDMIMIX_HDMI_SCL 0x400001c3>, /* SODIMM 59 */
- <MX8MP_IOMUXC_HDMI_DDC_SDA__HDMIMIX_HDMI_SDA 0x400001c3>, /* SODIMM 57 */
<MX8MP_IOMUXC_HDMI_HPD__HDMIMIX_HDMI_HPD 0x40000019>; /* SODIMM 61 */
};
@@ -1163,6 +1175,19 @@
<MX8MP_IOMUXC_I2C4_SDA__GPIO5_IO21 0x400001c6>; /* SODIMM 12 */
};
+ /* Verdin I2C_3_HDMI */
+ pinctrl_i2c5: i2c5grp {
+ fsl,pins =
+ <MX8MP_IOMUXC_HDMI_DDC_SCL__I2C5_SCL 0x400001c6>, /* SODIMM 59 */
+ <MX8MP_IOMUXC_HDMI_DDC_SDA__I2C5_SDA 0x400001c6>; /* SODIMM 57 */
+ };
+
+ pinctrl_i2c5_gpio: i2c5gpiogrp {
+ fsl,pins =
+ <MX8MP_IOMUXC_HDMI_DDC_SCL__GPIO3_IO26 0x400001c6>, /* SODIMM 59 */
+ <MX8MP_IOMUXC_HDMI_DDC_SDA__GPIO3_IO27 0x400001c6>; /* SODIMM 57 */
+ };
+
/* Verdin I2S_2_BCLK (TOUCH_RESET#) */
pinctrl_i2s_2_bclk_touch_reset: i2s2bclktouchresetgrp {
fsl,pins =
diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
index bfc5c81a5bd4..b92abb5a5c53 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
@@ -836,6 +836,23 @@
<&clk IMX8MP_CLK_MEDIA_APB_ROOT>;
};
+ pgc_hdmimix: power-domain@14 {
+ #power-domain-cells = <0>;
+ reg = <IMX8MP_POWER_DOMAIN_HDMIMIX>;
+ clocks = <&clk IMX8MP_CLK_HDMI_ROOT>,
+ <&clk IMX8MP_CLK_HDMI_APB>;
+ assigned-clocks = <&clk IMX8MP_CLK_HDMI_AXI>,
+ <&clk IMX8MP_CLK_HDMI_APB>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_500M>,
+ <&clk IMX8MP_SYS_PLL1_133M>;
+ assigned-clock-rates = <500000000>, <133000000>;
+ };
+
+ pgc_hdmi_phy: power-domain@15 {
+ #power-domain-cells = <0>;
+ reg = <IMX8MP_POWER_DOMAIN_HDMI_PHY>;
+ };
+
pgc_mipi_phy2: power-domain@16 {
#power-domain-cells = <0>;
reg = <IMX8MP_POWER_DOMAIN_MIPI_PHY2>;
@@ -1513,6 +1530,16 @@
status = "disabled";
};
+ aud2htx: aud2htx@30cb0000 {
+ compatible = "fsl,imx8mp-aud2htx";
+ reg = <0x30cb0000 0x10000>;
+ interrupts = <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&audio_blk_ctrl IMX8MP_CLK_AUDIOMIX_AUD2HTX_IPG>;
+ clock-names = "bus";
+ dmas = <&sdma2 26 2 0>;
+ dma-names = "tx";
+ status = "disabled";
+ };
};
sdma3: dma-controller@30e00000 {
@@ -1630,7 +1657,7 @@
compatible = "fsl,imx8mp-mipi-csi2", "fsl,imx8mm-mipi-csi2";
reg = <0x32e40000 0x10000>;
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
- clock-frequency = <500000000>;
+ clock-frequency = <266000000>;
clocks = <&clk IMX8MP_CLK_MEDIA_APB_ROOT>,
<&clk IMX8MP_CLK_MEDIA_CAM1_PIX_ROOT>,
<&clk IMX8MP_CLK_MEDIA_MIPI_PHY1_REF_ROOT>,
@@ -1640,7 +1667,7 @@
<&clk IMX8MP_CLK_MEDIA_MIPI_PHY1_REF>;
assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_1000M>,
<&clk IMX8MP_CLK_24M>;
- assigned-clock-rates = <500000000>;
+ assigned-clock-rates = <266000000>;
power-domains = <&media_blk_ctrl IMX8MP_MEDIABLK_PD_MIPI_CSI2_1>;
status = "disabled";
@@ -1672,7 +1699,7 @@
<&clk IMX8MP_CLK_MEDIA_MIPI_PHY1_REF_ROOT>,
<&clk IMX8MP_CLK_MEDIA_AXI_ROOT>;
clock-names = "pclk", "wrap", "phy", "axi";
- assigned-clocks = <&clk IMX8MP_CLK_MEDIA_CAM1_PIX>,
+ assigned-clocks = <&clk IMX8MP_CLK_MEDIA_CAM2_PIX>,
<&clk IMX8MP_CLK_MEDIA_MIPI_PHY1_REF>;
assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_1000M>,
<&clk IMX8MP_CLK_24M>;
@@ -1725,6 +1752,13 @@
remote-endpoint = <&lcdif1_to_dsim>;
};
};
+
+ port@1 {
+ reg = <1>;
+
+ mipi_dsi_out: endpoint {
+ };
+ };
};
};
@@ -1889,6 +1923,136 @@
#power-domain-cells = <1>;
#clock-cells = <0>;
};
+
+ hdmi_blk_ctrl: blk-ctrl@32fc0000 {
+ compatible = "fsl,imx8mp-hdmi-blk-ctrl", "syscon";
+ reg = <0x32fc0000 0x1000>;
+ clocks = <&clk IMX8MP_CLK_HDMI_APB>,
+ <&clk IMX8MP_CLK_HDMI_ROOT>,
+ <&clk IMX8MP_CLK_HDMI_REF_266M>,
+ <&clk IMX8MP_CLK_HDMI_24M>,
+ <&clk IMX8MP_CLK_HDMI_FDCC_TST>;
+ clock-names = "apb", "axi", "ref_266m", "ref_24m", "fdcc";
+ power-domains = <&pgc_hdmimix>, <&pgc_hdmimix>,
+ <&pgc_hdmimix>, <&pgc_hdmimix>,
+ <&pgc_hdmimix>, <&pgc_hdmimix>,
+ <&pgc_hdmimix>, <&pgc_hdmi_phy>,
+ <&pgc_hdmimix>, <&pgc_hdmimix>;
+ power-domain-names = "bus", "irqsteer", "lcdif",
+ "pai", "pvi", "trng",
+ "hdmi-tx", "hdmi-tx-phy",
+ "hdcp", "hrv";
+ #power-domain-cells = <1>;
+ };
+
+ irqsteer_hdmi: interrupt-controller@32fc2000 {
+ compatible = "fsl,imx-irqsteer";
+ reg = <0x32fc2000 0x1000>;
+ interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ fsl,channel = <1>;
+ fsl,num-irqs = <64>;
+ clocks = <&clk IMX8MP_CLK_HDMI_APB>;
+ clock-names = "ipg";
+ power-domains = <&hdmi_blk_ctrl IMX8MP_HDMIBLK_PD_IRQSTEER>;
+ };
+
+ hdmi_pvi: display-bridge@32fc4000 {
+ compatible = "fsl,imx8mp-hdmi-pvi";
+ reg = <0x32fc4000 0x1000>;
+ interrupt-parent = <&irqsteer_hdmi>;
+ interrupts = <12>;
+ power-domains = <&hdmi_blk_ctrl IMX8MP_HDMIBLK_PD_PVI>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ pvi_from_lcdif3: endpoint {
+ remote-endpoint = <&lcdif3_to_pvi>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ pvi_to_hdmi_tx: endpoint {
+ remote-endpoint = <&hdmi_tx_from_pvi>;
+ };
+ };
+ };
+ };
+
+ lcdif3: display-controller@32fc6000 {
+ compatible = "fsl,imx8mp-lcdif";
+ reg = <0x32fc6000 0x1000>;
+ interrupt-parent = <&irqsteer_hdmi>;
+ interrupts = <8>;
+ clocks = <&hdmi_tx_phy>,
+ <&clk IMX8MP_CLK_HDMI_APB>,
+ <&clk IMX8MP_CLK_HDMI_ROOT>;
+ clock-names = "pix", "axi", "disp_axi";
+ power-domains = <&hdmi_blk_ctrl IMX8MP_HDMIBLK_PD_LCDIF>;
+ status = "disabled";
+
+ port {
+ lcdif3_to_pvi: endpoint {
+ remote-endpoint = <&pvi_from_lcdif3>;
+ };
+ };
+ };
+
+ hdmi_tx: hdmi@32fd8000 {
+ compatible = "fsl,imx8mp-hdmi-tx";
+ reg = <0x32fd8000 0x7eff>;
+ interrupt-parent = <&irqsteer_hdmi>;
+ interrupts = <0>;
+ clocks = <&clk IMX8MP_CLK_HDMI_APB>,
+ <&clk IMX8MP_CLK_HDMI_REF_266M>,
+ <&clk IMX8MP_CLK_32K>,
+ <&hdmi_tx_phy>;
+ clock-names = "iahb", "isfr", "cec", "pix";
+ assigned-clocks = <&clk IMX8MP_CLK_HDMI_REF_266M>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_266M>;
+ power-domains = <&hdmi_blk_ctrl IMX8MP_HDMIBLK_PD_HDMI_TX>;
+ reg-io-width = <1>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ hdmi_tx_from_pvi: endpoint {
+ remote-endpoint = <&pvi_to_hdmi_tx>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ /* Point endpoint to the HDMI connector */
+ };
+ };
+ };
+
+ hdmi_tx_phy: phy@32fdff00 {
+ compatible = "fsl,imx8mp-hdmi-phy";
+ reg = <0x32fdff00 0x100>;
+ clocks = <&clk IMX8MP_CLK_HDMI_APB>,
+ <&clk IMX8MP_CLK_HDMI_24M>;
+ clock-names = "apb", "ref";
+ assigned-clocks = <&clk IMX8MP_CLK_HDMI_24M>;
+ assigned-clock-parents = <&clk IMX8MP_CLK_24M>;
+ power-domains = <&hdmi_blk_ctrl IMX8MP_HDMIBLK_PD_HDMI_TX_PHY>;
+ #clock-cells = <0>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
};
pcie: pcie@33800000 {
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-hummingboard-pulse.dts b/arch/arm64/boot/dts/freescale/imx8mq-hummingboard-pulse.dts
index 366693f31992..e92b5d5a66b5 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-hummingboard-pulse.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-hummingboard-pulse.dts
@@ -42,7 +42,7 @@
status = "okay";
typec_ptn5100: usb-typec@50 {
- compatible = "nxp,ptn5110";
+ compatible = "nxp,ptn5110", "tcpci";
reg = <0x50>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_typec>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
index 8055a2c23035..b268ba7a0e12 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
@@ -429,7 +429,7 @@
};
typec_ptn5100: usb-typec@52 {
- compatible = "nxp,ptn5110";
+ compatible = "nxp,ptn5110", "tcpci";
reg = <0x52>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_typec>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
index c6dc3ba0d43b..e03186bbc415 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
@@ -1290,6 +1290,13 @@
remote-endpoint = <&lcdif_mipi_dsi>;
};
};
+
+ port@1 {
+ reg = <1>;
+
+ mipi_dsi_out: endpoint {
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
index 77ac0efdfaad..5c6b39c6933f 100644
--- a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
+++ b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
@@ -39,6 +39,20 @@
gpio = <&lsio_gpio4 19 GPIO_ACTIVE_HIGH>;
enable-active-high;
};
+
+ reg_vref_1v8: regulator-adc-vref {
+ compatible = "regulator-fixed";
+ regulator-name = "vref_1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+};
+
+&adc0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_adc0>;
+ vref-supply = <&reg_vref_1v8>;
+ status = "okay";
};
&i2c1 {
@@ -71,6 +85,37 @@
status = "okay";
};
+&lpspi2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpspi2 &pinctrl_lpspi2_cs>;
+ cs-gpios = <&lsio_gpio3 10 GPIO_ACTIVE_LOW>;
+ status = "okay";
+
+ spidev0: spi@0 {
+ reg = <0>;
+ compatible = "rohm,dh2228fv";
+ spi-max-frequency = <30000000>;
+ };
+};
+
+&flexspi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexspi0>;
+ status = "okay";
+
+ flash0: flash@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "jedec,spi-nor";
+ spi-max-frequency = <133000000>;
+ spi-tx-bus-width = <8>;
+ spi-rx-bus-width = <8>;
+ };
+};
+
&fec1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_fec1>;
@@ -130,6 +175,12 @@
>;
};
+ pinctrl_adc0: adc0grp {
+ fsl,pins = <
+ IMX8QM_ADC_IN0_DMA_ADC0_IN0 0xc0000060
+ >;
+ };
+
pinctrl_fec1: fec1grp {
fsl,pins = <
IMX8QM_ENET0_MDC_CONN_ENET0_MDC 0x06000020
@@ -149,6 +200,41 @@
>;
};
+ pinctrl_lpspi2: lpspi2grp {
+ fsl,pins = <
+ IMX8QM_SPI2_SCK_DMA_SPI2_SCK 0x06000040
+ IMX8QM_SPI2_SDO_DMA_SPI2_SDO 0x06000040
+ IMX8QM_SPI2_SDI_DMA_SPI2_SDI 0x06000040
+ >;
+ };
+
+ pinctrl_lpspi2_cs: lpspi2csgrp {
+ fsl,pins = <
+ IMX8QM_SPI2_CS0_LSIO_GPIO3_IO10 0x21
+ >;
+ };
+
+ pinctrl_flexspi0: flexspi0grp {
+ fsl,pins = <
+ IMX8QM_QSPI0A_DATA0_LSIO_QSPI0A_DATA0 0x06000021
+ IMX8QM_QSPI0A_DATA1_LSIO_QSPI0A_DATA1 0x06000021
+ IMX8QM_QSPI0A_DATA2_LSIO_QSPI0A_DATA2 0x06000021
+ IMX8QM_QSPI0A_DATA3_LSIO_QSPI0A_DATA3 0x06000021
+ IMX8QM_QSPI0A_DQS_LSIO_QSPI0A_DQS 0x06000021
+ IMX8QM_QSPI0A_SS0_B_LSIO_QSPI0A_SS0_B 0x06000021
+ IMX8QM_QSPI0A_SS1_B_LSIO_QSPI0A_SS1_B 0x06000021
+ IMX8QM_QSPI0A_SCLK_LSIO_QSPI0A_SCLK 0x06000021
+ IMX8QM_QSPI0B_SCLK_LSIO_QSPI0B_SCLK 0x06000021
+ IMX8QM_QSPI0B_DATA0_LSIO_QSPI0B_DATA0 0x06000021
+ IMX8QM_QSPI0B_DATA1_LSIO_QSPI0B_DATA1 0x06000021
+ IMX8QM_QSPI0B_DATA2_LSIO_QSPI0B_DATA2 0x06000021
+ IMX8QM_QSPI0B_DATA3_LSIO_QSPI0B_DATA3 0x06000021
+ IMX8QM_QSPI0B_DQS_LSIO_QSPI0B_DQS 0x06000021
+ IMX8QM_QSPI0B_SS0_B_LSIO_QSPI0B_SS0_B 0x06000021
+ IMX8QM_QSPI0B_SS1_B_LSIO_QSPI0B_SS1_B 0x06000021
+ >;
+ };
+
pinctrl_lpuart0: lpuart0grp {
fsl,pins = <
IMX8QM_UART0_RX_DMA_UART0_RX 0x06000020
diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi
index 11626fae5f97..aa9f28c4431d 100644
--- a/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi
@@ -153,15 +153,15 @@
};
&flexcan2 {
- clocks = <&can1_lpcg 1>,
- <&can1_lpcg 0>;
+ clocks = <&can1_lpcg IMX_LPCG_CLK_4>,
+ <&can1_lpcg IMX_LPCG_CLK_0>;
assigned-clocks = <&clk IMX_SC_R_CAN_1 IMX_SC_PM_CLK_PER>;
fsl,clk-source = /bits/ 8 <1>;
};
&flexcan3 {
- clocks = <&can2_lpcg 1>,
- <&can2_lpcg 0>;
+ clocks = <&can2_lpcg IMX_LPCG_CLK_4>,
+ <&can2_lpcg IMX_LPCG_CLK_0>;
assigned-clocks = <&clk IMX_SC_R_CAN_2 IMX_SC_PM_CLK_PER>;
fsl,clk-source = /bits/ 8 <1>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
index 8360bb851ac0..cee13e58762c 100644
--- a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
+++ b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
@@ -44,6 +44,22 @@
};
};
};
+
+ sound-wm8960 {
+ compatible = "fsl,imx-audio-wm8960";
+ model = "wm8960-audio";
+ audio-cpu = <&sai1>;
+ audio-codec = <&wm8960>;
+ hp-det-gpio = <&lsio_gpio1 0 GPIO_ACTIVE_HIGH>;
+ audio-routing = "Headphone Jack", "HP_L",
+ "Headphone Jack", "HP_R",
+ "Ext Spk", "SPK_LP",
+ "Ext Spk", "SPK_LN",
+ "Ext Spk", "SPK_RP",
+ "Ext Spk", "SPK_RN",
+ "LINPUT1", "Mic Jack",
+ "Mic Jack", "MICB";
+ };
};
&dsp {
@@ -149,7 +165,7 @@
};
ptn5110: tcpc@50 {
- compatible = "nxp,ptn5110";
+ compatible = "nxp,ptn5110", "tcpci";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_typec>;
reg = <0x50>;
@@ -188,6 +204,47 @@
};
+&cm40_i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <100000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_cm40_i2c>;
+ pinctrl-1 = <&pinctrl_cm40_i2c_gpio>;
+ scl-gpios = <&lsio_gpio1 10 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&lsio_gpio1 9 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+
+ wm8960: audio-codec@1a {
+ compatible = "wlf,wm8960";
+ reg = <0x1a>;
+ clocks = <&mclkout0_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "mclk";
+ assigned-clocks = <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_PLL>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_SLV_BUS>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_MST_BUS>,
+ <&mclkout0_lpcg IMX_LPCG_CLK_0>;
+ assigned-clock-rates = <786432000>,
+ <49152000>,
+ <12288000>,
+ <12288000>;
+ wlf,shared-lrclk;
+ wlf,hp-cfg = <2 2 3>;
+ wlf,gpio-cfg = <1 3>;
+ };
+
+ pca6416: gpio@20 {
+ compatible = "ti,tca6416";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+};
+
+&cm40_intmux {
+ status = "okay";
+};
+
&lpuart0 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_lpuart0>;
@@ -218,6 +275,53 @@
status = "okay";
};
+&sai0 {
+ #sound-dai-cells = <0>;
+ assigned-clocks = <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_PLL>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_SLV_BUS>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_MST_BUS>,
+ <&sai0_lpcg IMX_LPCG_CLK_0>;
+ assigned-clock-rates = <786432000>, <49152000>, <12288000>, <49152000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai0>;
+ status = "okay";
+};
+
+&sai1 {
+ assigned-clocks = <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_PLL>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_SLV_BUS>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_MST_BUS>,
+ <&sai1_lpcg IMX_LPCG_CLK_0>;
+ assigned-clock-rates = <786432000>, <49152000>, <12288000>, <49152000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai1>;
+ status = "okay";
+};
+
+&sai4 {
+ assigned-clocks = <&acm IMX_ADMA_ACM_SAI4_MCLK_SEL>,
+ <&clk IMX_SC_R_AUDIO_PLL_1 IMX_SC_PM_CLK_PLL>,
+ <&clk IMX_SC_R_AUDIO_PLL_1 IMX_SC_PM_CLK_SLV_BUS>,
+ <&clk IMX_SC_R_AUDIO_PLL_1 IMX_SC_PM_CLK_MST_BUS>,
+ <&sai4_lpcg IMX_LPCG_CLK_0>;
+ assigned-clock-parents = <&aud_pll_div1_lpcg IMX_LPCG_CLK_0>;
+ assigned-clock-rates = <0>, <786432000>, <98304000>, <12288000>, <98304000>;
+ fsl,sai-asynchronous;
+ status = "okay";
+};
+
+&sai5 {
+ assigned-clocks = <&acm IMX_ADMA_ACM_SAI5_MCLK_SEL>,
+ <&clk IMX_SC_R_AUDIO_PLL_1 IMX_SC_PM_CLK_PLL>,
+ <&clk IMX_SC_R_AUDIO_PLL_1 IMX_SC_PM_CLK_SLV_BUS>,
+ <&clk IMX_SC_R_AUDIO_PLL_1 IMX_SC_PM_CLK_MST_BUS>,
+ <&sai5_lpcg IMX_LPCG_CLK_0>;
+ assigned-clock-parents = <&aud_pll_div1_lpcg IMX_LPCG_CLK_0>;
+ assigned-clock-rates = <0>, <786432000>, <98304000>, <12288000>, <98304000>;
+ fsl,sai-asynchronous;
+ status = "okay";
+};
+
&thermal_zones {
pmic-thermal {
polling-delay-passive = <250>;
@@ -314,6 +418,21 @@
};
&iomuxc {
+
+ pinctrl_cm40_i2c: cm40i2cgrp {
+ fsl,pins = <
+ IMX8QXP_ADC_IN1_M40_I2C0_SDA 0x0600004c
+ IMX8QXP_ADC_IN0_M40_I2C0_SCL 0x0600004c
+ >;
+ };
+
+ pinctrl_cm40_i2c_gpio: cm40i2cgpio-grp {
+ fsl,pins = <
+ IMX8QXP_ADC_IN1_LSIO_GPIO1_IO09 0xc600004c
+ IMX8QXP_ADC_IN0_LSIO_GPIO1_IO10 0xc600004c
+ >;
+ };
+
pinctrl_fec1: fec1grp {
fsl,pins = <
IMX8QXP_ENET0_MDC_CONN_ENET0_MDC 0x06000020
@@ -385,6 +504,25 @@
>;
};
+ pinctrl_sai0: sai0grp {
+ fsl,pins = <
+ IMX8QXP_SAI0_TXD_ADMA_SAI0_TXD 0x06000060
+ IMX8QXP_SAI0_RXD_ADMA_SAI0_RXD 0x06000040
+ IMX8QXP_SAI0_TXC_ADMA_SAI0_TXC 0x06000040
+ IMX8QXP_SAI0_TXFS_ADMA_SAI0_TXFS 0x06000040
+ >;
+ };
+
+ pinctrl_sai1: sai1grp {
+ fsl,pins = <
+ IMX8QXP_SAI1_RXD_ADMA_SAI1_RXD 0x06000040
+ IMX8QXP_SAI1_RXC_ADMA_SAI1_TXC 0x06000040
+ IMX8QXP_SAI1_RXFS_ADMA_SAI1_TXFS 0x06000040
+ IMX8QXP_SPI0_CS1_ADMA_SAI1_TXD 0x06000060
+ IMX8QXP_SPI2_CS0_LSIO_GPIO1_IO00 0x06000040
+ >;
+ };
+
pinctrl_usdhc1: usdhc1grp {
fsl,pins = <
IMX8QXP_EMMC0_CLK_CONN_EMMC0_CLK 0x06000041
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp.dtsi b/arch/arm64/boot/dts/freescale/imx8qxp.dtsi
index 10e16d84c0c3..0313f295de2e 100644
--- a/arch/arm64/boot/dts/freescale/imx8qxp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8qxp.dtsi
@@ -317,6 +317,7 @@
/* sorted in register address */
#include "imx8-ss-img.dtsi"
#include "imx8-ss-vpu.dtsi"
+ #include "imx8-ss-cm40.dtsi"
#include "imx8-ss-gpu0.dtsi"
#include "imx8-ss-adma.dtsi"
#include "imx8-ss-conn.dtsi"
diff --git a/arch/arm64/boot/dts/freescale/imx8ulp-evk.dts b/arch/arm64/boot/dts/freescale/imx8ulp-evk.dts
index 24bb253b938d..e937e5f8fa8b 100644
--- a/arch/arm64/boot/dts/freescale/imx8ulp-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8ulp-evk.dts
@@ -127,12 +127,70 @@
pinctrl-1 = <&pinctrl_lpi2c7>;
status = "okay";
+ ptn5150_1: typec@1d {
+ compatible = "nxp,ptn5150";
+ reg = <0x1d>;
+ int-gpios = <&gpiof 3 IRQ_TYPE_EDGE_FALLING>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_typec1>;
+ status = "disabled";
+ };
+
pcal6408: gpio@21 {
compatible = "nxp,pcal9554b";
reg = <0x21>;
gpio-controller;
#gpio-cells = <2>;
};
+
+ ptn5150_2: typec@3d {
+ compatible = "nxp,ptn5150";
+ reg = <0x3d>;
+ int-gpios = <&gpiof 5 IRQ_TYPE_EDGE_FALLING>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_typec2>;
+ status = "disabled";
+ };
+};
+
+&usbotg1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb1>;
+ dr_mode = "otg";
+ hnp-disable;
+ srp-disable;
+ adp-disable;
+ over-current-active-low;
+ status = "okay";
+};
+
+&usbphy1 {
+ fsl,tx-d-cal = <110>;
+ status = "okay";
+};
+
+&usbmisc1 {
+ status = "okay";
+};
+
+&usbotg2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb2>;
+ dr_mode = "otg";
+ hnp-disable;
+ srp-disable;
+ adp-disable;
+ over-current-active-low;
+ status = "okay";
+};
+
+&usbphy2 {
+ fsl,tx-d-cal = <110>;
+ status = "okay";
+};
+
+&usbmisc2 {
+ status = "okay";
};
&usdhc0 {
@@ -224,6 +282,32 @@
>;
};
+ pinctrl_typec1: typec1grp {
+ fsl,pins = <
+ MX8ULP_PAD_PTF3__PTF3 0x3
+ >;
+ };
+
+ pinctrl_typec2: typec2grp {
+ fsl,pins = <
+ MX8ULP_PAD_PTF5__PTF5 0x3
+ >;
+ };
+
+ pinctrl_usb1: usb1grp {
+ fsl,pins = <
+ MX8ULP_PAD_PTF2__USB0_ID 0x10003
+ MX8ULP_PAD_PTF4__USB0_OC 0x10003
+ >;
+ };
+
+ pinctrl_usb2: usb2grp {
+ fsl,pins = <
+ MX8ULP_PAD_PTD23__USB1_ID 0x10003
+ MX8ULP_PAD_PTF6__USB1_OC 0x10003
+ >;
+ };
+
pinctrl_usdhc0: usdhc0grp {
fsl,pins = <
MX8ULP_PAD_PTD1__SDHC0_CMD 0x3
diff --git a/arch/arm64/boot/dts/freescale/imx8ulp.dtsi b/arch/arm64/boot/dts/freescale/imx8ulp.dtsi
index c4a0082f30d3..e32d5afcf4a9 100644
--- a/arch/arm64/boot/dts/freescale/imx8ulp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8ulp.dtsi
@@ -252,6 +252,38 @@
#reset-cells = <1>;
};
+ crypto: crypto@292e0000 {
+ compatible = "fsl,sec-v4.0";
+ reg = <0x292e0000 0x10000>;
+ ranges = <0 0x292e0000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ sec_jr0: jr@1000 {
+ compatible = "fsl,sec-v4.0-job-ring";
+ reg = <0x1000 0x1000>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ sec_jr1: jr@2000 {
+ compatible = "fsl,sec-v4.0-job-ring";
+ reg = <0x2000 0x1000>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ sec_jr2: jr@3000 {
+ compatible = "fsl,sec-v4.0-job-ring";
+ reg = <0x3000 0x1000>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ sec_jr3: jr@4000 {
+ compatible = "fsl,sec-v4.0-job-ring";
+ reg = <0x4000 0x1000>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
tpm5: tpm@29340000 {
compatible = "fsl,imx8ulp-tpm", "fsl,imx7ulp-tpm";
reg = <0x29340000 0x1000>;
@@ -472,6 +504,68 @@
status = "disabled";
};
+ usbotg1: usb@29900000 {
+ compatible = "fsl,imx8ulp-usb", "fsl,imx7ulp-usb", "fsl,imx6ul-usb";
+ reg = <0x29900000 0x200>;
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pcc4 IMX8ULP_CLK_USB0>;
+ power-domains = <&scmi_devpd IMX8ULP_PD_USB0>;
+ phys = <&usbphy1>;
+ fsl,usbmisc = <&usbmisc1 0>;
+ ahb-burst-config = <0x0>;
+ tx-burst-size-dword = <0x8>;
+ rx-burst-size-dword = <0x8>;
+ status = "disabled";
+ };
+
+ usbmisc1: usbmisc@29900200 {
+ compatible = "fsl,imx8ulp-usbmisc", "fsl,imx7d-usbmisc",
+ "fsl,imx6q-usbmisc";
+ reg = <0x29900200 0x200>;
+ #index-cells = <1>;
+ status = "disabled";
+ };
+
+ usbphy1: usb-phy@29910000 {
+ compatible = "fsl,imx8ulp-usbphy", "fsl,imx7ulp-usbphy";
+ reg = <0x29910000 0x10000>;
+ interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pcc4 IMX8ULP_CLK_USB0_PHY>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
+ usbotg2: usb@29920000 {
+ compatible = "fsl,imx8ulp-usb", "fsl,imx7ulp-usb", "fsl,imx6ul-usb";
+ reg = <0x29920000 0x200>;
+ interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pcc4 IMX8ULP_CLK_USB1>;
+ power-domains = <&scmi_devpd IMX8ULP_PD_USDHC2_USB1>;
+ phys = <&usbphy2>;
+ fsl,usbmisc = <&usbmisc2 0>;
+ ahb-burst-config = <0x0>;
+ tx-burst-size-dword = <0x8>;
+ rx-burst-size-dword = <0x8>;
+ status = "disabled";
+ };
+
+ usbmisc2: usbmisc@29920200 {
+ compatible = "fsl,imx8ulp-usbmisc", "fsl,imx7d-usbmisc",
+ "fsl,imx6q-usbmisc";
+ reg = <0x29920200 0x200>;
+ #index-cells = <1>;
+ status = "disabled";
+ };
+
+ usbphy2: usb-phy@29930000 {
+ compatible = "fsl,imx8ulp-usbphy", "fsl,imx7ulp-usbphy";
+ reg = <0x29930000 0x10000>;
+ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pcc4 IMX8ULP_CLK_USB1_PHY>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
fec: ethernet@29950000 {
compatible = "fsl,imx8ulp-fec", "fsl,imx6ul-fec", "fsl,imx6q-fec";
reg = <0x29950000 0x10000>;
diff --git a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
index 9921ea13ab48..d400d85f42a9 100644
--- a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
@@ -5,6 +5,7 @@
/dts-v1/;
+#include <dt-bindings/usb/pd.h>
#include "imx93.dtsi"
/ {
@@ -38,7 +39,7 @@
no-map;
};
- vdev1vring0: vdev1vring0@a4000000 {
+ vdev1vring0: vdev1vring0@a4010000 {
reg = <0 0xa4010000 0 0x8000>;
no-map;
};
@@ -48,8 +49,8 @@
no-map;
};
- rsc_table: rsc-table@2021f000 {
- reg = <0 0x2021f000 0 0x1000>;
+ rsc_table: rsc-table@2021e000 {
+ reg = <0 0x2021e000 0 0x1000>;
no-map;
};
@@ -104,9 +105,85 @@
status = "okay";
};
-&eqos {
+&lpi2c3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <400000>;
pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpi2c3>;
+ status = "okay";
+
+ ptn5110: tcpc@50 {
+ compatible = "nxp,ptn5110", "tcpci";
+ reg = <0x50>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
+
+ typec1_con: connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ power-role = "dual";
+ data-role = "dual";
+ try-power-role = "sink";
+ source-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)>;
+ sink-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)
+ PDO_VAR(5000, 20000, 3000)>;
+ op-sink-microwatt = <15000000>;
+ self-powered;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ typec1_dr_sw: endpoint {
+ remote-endpoint = <&usb1_drd_sw>;
+ };
+ };
+ };
+ };
+ };
+
+ ptn5110_2: tcpc@51 {
+ compatible = "nxp,ptn5110", "tcpci";
+ reg = <0x51>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
+
+ typec2_con: connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ power-role = "dual";
+ data-role = "dual";
+ try-power-role = "sink";
+ source-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)>;
+ sink-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)
+ PDO_VAR(5000, 20000, 3000)>;
+ op-sink-microwatt = <15000000>;
+ self-powered;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ typec2_dr_sw: endpoint {
+ remote-endpoint = <&usb2_drd_sw>;
+ };
+ };
+ };
+ };
+ };
+};
+
+&eqos {
+ pinctrl-names = "default", "sleep";
pinctrl-0 = <&pinctrl_eqos>;
+ pinctrl-1 = <&pinctrl_eqos_sleep>;
phy-mode = "rgmii-id";
phy-handle = <&ethphy1>;
status = "okay";
@@ -120,13 +197,17 @@
ethphy1: ethernet-phy@1 {
reg = <1>;
eee-broken-1000t;
+ reset-gpios = <&pcal6524 15 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <80000>;
};
};
};
&fec {
- pinctrl-names = "default";
+ pinctrl-names = "default", "sleep";
pinctrl-0 = <&pinctrl_fec>;
+ pinctrl-1 = <&pinctrl_fec_sleep>;
phy-mode = "rgmii-id";
phy-handle = <&ethphy2>;
fsl,magic-packet;
@@ -140,6 +221,9 @@
ethphy2: ethernet-phy@2 {
reg = <2>;
eee-broken-1000t;
+ reset-gpios = <&pcal6524 16 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <80000>;
};
};
};
@@ -156,21 +240,58 @@
status = "okay";
};
+&usbotg1 {
+ dr_mode = "otg";
+ hnp-disable;
+ srp-disable;
+ adp-disable;
+ usb-role-switch;
+ disable-over-current;
+ samsung,picophy-pre-emp-curr-control = <3>;
+ samsung,picophy-dc-vol-level-adjust = <7>;
+ status = "okay";
+
+ port {
+ usb1_drd_sw: endpoint {
+ remote-endpoint = <&typec1_dr_sw>;
+ };
+ };
+};
+
+&usbotg2 {
+ dr_mode = "otg";
+ hnp-disable;
+ srp-disable;
+ adp-disable;
+ usb-role-switch;
+ disable-over-current;
+ samsung,picophy-pre-emp-curr-control = <3>;
+ samsung,picophy-dc-vol-level-adjust = <7>;
+ status = "okay";
+
+ port {
+ usb2_drd_sw: endpoint {
+ remote-endpoint = <&typec2_dr_sw>;
+ };
+ };
+};
+
&usdhc1 {
pinctrl-names = "default", "state_100mhz", "state_200mhz";
pinctrl-0 = <&pinctrl_usdhc1>;
- pinctrl-1 = <&pinctrl_usdhc1>;
- pinctrl-2 = <&pinctrl_usdhc1>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
bus-width = <8>;
non-removable;
status = "okay";
};
&usdhc2 {
- pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-names = "default", "state_100mhz", "state_200mhz", "sleep";
pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
- pinctrl-1 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
- pinctrl-2 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-3 = <&pinctrl_usdhc2_sleep>, <&pinctrl_usdhc2_gpio_sleep>;
cd-gpios = <&gpio3 00 GPIO_ACTIVE_LOW>;
vmmc-supply = <&reg_usdhc2_vmmc>;
bus-width = <4>;
@@ -183,6 +304,118 @@
status = "okay";
};
+&lpi2c2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <400000>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pinctrl_lpi2c2>;
+ pinctrl-1 = <&pinctrl_lpi2c2>;
+ status = "okay";
+
+ pcal6524: gpio@22 {
+ compatible = "nxp,pcal6524";
+ reg = <0x22>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcal6524>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ pmic@25 {
+ compatible = "nxp,pca9451a";
+ reg = <0x25>;
+ interrupt-parent = <&pcal6524>;
+ interrupts = <11 IRQ_TYPE_EDGE_FALLING>;
+
+ regulators {
+ buck1: BUCK1 {
+ regulator-name = "BUCK1";
+ regulator-min-microvolt = <610000>;
+ regulator-max-microvolt = <950000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <3125>;
+ };
+
+ buck2: BUCK2 {
+ regulator-name = "BUCK2";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <670000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <3125>;
+ };
+
+ buck4: BUCK4{
+ regulator-name = "BUCK4";
+ regulator-min-microvolt = <1620000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck5: BUCK5{
+ regulator-name = "BUCK5";
+ regulator-min-microvolt = <1620000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck6: BUCK6 {
+ regulator-name = "BUCK6";
+ regulator-min-microvolt = <1060000>;
+ regulator-max-microvolt = <1140000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo1: LDO1 {
+ regulator-name = "LDO1";
+ regulator-min-microvolt = <1620000>;
+ regulator-max-microvolt = <1980000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo4: LDO4 {
+ regulator-name = "LDO4";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <840000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo5: LDO5 {
+ regulator-name = "LDO5";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+&lpi2c3 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpi2c3>;
+ status = "okay";
+
+ pcf2131: rtc@53 {
+ compatible = "nxp,pcf2131";
+ reg = <0x53>;
+ interrupt-parent = <&pcal6524>;
+ interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
+ };
+};
+
&iomuxc {
pinctrl_eqos: eqosgrp {
fsl,pins = <
@@ -203,6 +436,25 @@
>;
};
+ pinctrl_eqos_sleep: eqossleepgrp {
+ fsl,pins = <
+ MX93_PAD_ENET1_MDC__GPIO4_IO00 0x31e
+ MX93_PAD_ENET1_MDIO__GPIO4_IO01 0x31e
+ MX93_PAD_ENET1_RD0__GPIO4_IO10 0x31e
+ MX93_PAD_ENET1_RD1__GPIO4_IO11 0x31e
+ MX93_PAD_ENET1_RD2__GPIO4_IO12 0x31e
+ MX93_PAD_ENET1_RD3__GPIO4_IO13 0x31e
+ MX93_PAD_ENET1_RXC__GPIO4_IO09 0x31e
+ MX93_PAD_ENET1_RX_CTL__GPIO4_IO08 0x31e
+ MX93_PAD_ENET1_TD0__GPIO4_IO05 0x31e
+ MX93_PAD_ENET1_TD1__GPIO4_IO04 0x31e
+ MX93_PAD_ENET1_TD2__GPIO4_IO03 0x31e
+ MX93_PAD_ENET1_TD3__GPIO4_IO02 0x31e
+ MX93_PAD_ENET1_TXC__GPIO4_IO07 0x31e
+ MX93_PAD_ENET1_TX_CTL__GPIO4_IO06 0x31e
+ >;
+ };
+
pinctrl_fec: fecgrp {
fsl,pins = <
MX93_PAD_ENET2_MDC__ENET1_MDC 0x57e
@@ -222,6 +474,32 @@
>;
};
+ pinctrl_lpi2c3: lpi2c3grp {
+ fsl,pins = <
+ MX93_PAD_GPIO_IO28__LPI2C3_SDA 0x40000b9e
+ MX93_PAD_GPIO_IO29__LPI2C3_SCL 0x40000b9e
+ >;
+ };
+
+ pinctrl_fec_sleep: fecsleepgrp {
+ fsl,pins = <
+ MX93_PAD_ENET2_MDC__GPIO4_IO14 0x51e
+ MX93_PAD_ENET2_MDIO__GPIO4_IO15 0x51e
+ MX93_PAD_ENET2_RD0__GPIO4_IO24 0x51e
+ MX93_PAD_ENET2_RD1__GPIO4_IO25 0x51e
+ MX93_PAD_ENET2_RD2__GPIO4_IO26 0x51e
+ MX93_PAD_ENET2_RD3__GPIO4_IO27 0x51e
+ MX93_PAD_ENET2_RXC__GPIO4_IO23 0x51e
+ MX93_PAD_ENET2_RX_CTL__GPIO4_IO22 0x51e
+ MX93_PAD_ENET2_TD0__GPIO4_IO19 0x51e
+ MX93_PAD_ENET2_TD1__GPIO4_IO18 0x51e
+ MX93_PAD_ENET2_TD2__GPIO4_IO17 0x51e
+ MX93_PAD_ENET2_TD3__GPIO4_IO16 0x51e
+ MX93_PAD_ENET2_TXC__GPIO4_IO21 0x51e
+ MX93_PAD_ENET2_TX_CTL__GPIO4_IO20 0x51e
+ >;
+ };
+
pinctrl_uart1: uart1grp {
fsl,pins = <
MX93_PAD_UART1_RXD__LPUART1_RX 0x31e
@@ -238,9 +516,63 @@
>;
};
+ pinctrl_lpi2c2: lpi2c2grp {
+ fsl,pins = <
+ MX93_PAD_I2C2_SCL__LPI2C2_SCL 0x40000b9e
+ MX93_PAD_I2C2_SDA__LPI2C2_SDA 0x40000b9e
+ >;
+ };
+
+ pinctrl_lpi2c3: lpi2c3grp {
+ fsl,pins = <
+ MX93_PAD_GPIO_IO28__LPI2C3_SDA 0x40000b9e
+ MX93_PAD_GPIO_IO29__LPI2C3_SCL 0x40000b9e
+ >;
+ };
+
+ pinctrl_pcal6524: pcal6524grp {
+ fsl,pins = <
+ MX93_PAD_CCM_CLKO2__GPIO3_IO27 0x31e
+ >;
+ };
+
/* need to config the SION for data and cmd pad, refer to ERR052021 */
pinctrl_usdhc1: usdhc1grp {
fsl,pins = <
+ MX93_PAD_SD1_CLK__USDHC1_CLK 0x1582
+ MX93_PAD_SD1_CMD__USDHC1_CMD 0x40001382
+ MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x40001382
+ MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x40001382
+ MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x40001382
+ MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x40001382
+ MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x40001382
+ MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x40001382
+ MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x40001382
+ MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x40001382
+ MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x1582
+ >;
+ };
+
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc1_100mhz: usdhc1-100mhzgrp {
+ fsl,pins = <
+ MX93_PAD_SD1_CLK__USDHC1_CLK 0x158e
+ MX93_PAD_SD1_CMD__USDHC1_CMD 0x4000138e
+ MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x4000138e
+ MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x4000138e
+ MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x4000138e
+ MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x4000138e
+ MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x4000138e
+ MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x4000138e
+ MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x4000138e
+ MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x4000138e
+ MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x158e
+ >;
+ };
+
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc1_200mhz: usdhc1-200mhzgrp {
+ fsl,pins = <
MX93_PAD_SD1_CLK__USDHC1_CLK 0x15fe
MX93_PAD_SD1_CMD__USDHC1_CMD 0x400013fe
MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x400013fe
@@ -267,9 +599,41 @@
>;
};
+ pinctrl_usdhc2_gpio_sleep: usdhc2gpiosleepgrp {
+ fsl,pins = <
+ MX93_PAD_SD2_CD_B__GPIO3_IO00 0x51e
+ >;
+ };
+
/* need to config the SION for data and cmd pad, refer to ERR052021 */
pinctrl_usdhc2: usdhc2grp {
fsl,pins = <
+ MX93_PAD_SD2_CLK__USDHC2_CLK 0x1582
+ MX93_PAD_SD2_CMD__USDHC2_CMD 0x40001382
+ MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x40001382
+ MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x40001382
+ MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x40001382
+ MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x40001382
+ MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
+ >;
+ };
+
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ fsl,pins = <
+ MX93_PAD_SD2_CLK__USDHC2_CLK 0x158e
+ MX93_PAD_SD2_CMD__USDHC2_CMD 0x4000138e
+ MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x4000138e
+ MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x4000138e
+ MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x4000138e
+ MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x4000138e
+ MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
+ >;
+ };
+
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ fsl,pins = <
MX93_PAD_SD2_CLK__USDHC2_CLK 0x15fe
MX93_PAD_SD2_CMD__USDHC2_CMD 0x400013fe
MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x400013fe
@@ -279,4 +643,17 @@
MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
>;
};
+
+ pinctrl_usdhc2_sleep: usdhc2sleepgrp {
+ fsl,pins = <
+ MX93_PAD_SD2_CLK__GPIO3_IO01 0x51e
+ MX93_PAD_SD2_CMD__GPIO3_IO02 0x51e
+ MX93_PAD_SD2_DATA0__GPIO3_IO03 0x51e
+ MX93_PAD_SD2_DATA1__GPIO3_IO04 0x51e
+ MX93_PAD_SD2_DATA2__GPIO3_IO05 0x51e
+ MX93_PAD_SD2_DATA3__GPIO3_IO06 0x51e
+ MX93_PAD_SD2_VSELECT__GPIO3_IO19 0x51e
+ >;
+ };
+
};
diff --git a/arch/arm64/boot/dts/freescale/imx93.dtsi b/arch/arm64/boot/dts/freescale/imx93.dtsi
index 601c94e1fac8..4a3f42355cb8 100644
--- a/arch/arm64/boot/dts/freescale/imx93.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx93.dtsi
@@ -4,6 +4,7 @@
*/
#include <dt-bindings/clock/imx93-clock.h>
+#include <dt-bindings/dma/fsl-edma.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -183,6 +184,20 @@
status = "disabled";
};
+ usbphynop1: usbphynop1 {
+ compatible = "usb-nop-xceiv";
+ #phy-cells = <0>;
+ clocks = <&clk IMX93_CLK_USB_PHY_BURUNIN>;
+ clock-names = "main_clk";
+ };
+
+ usbphynop2: usbphynop2 {
+ compatible = "usb-nop-xceiv";
+ #phy-cells = <0>;
+ clocks = <&clk IMX93_CLK_USB_PHY_BURUNIN>;
+ clock-names = "main_clk";
+ };
+
soc@0 {
compatible = "simple-bus";
#address-cells = <1>;
@@ -316,6 +331,8 @@
clocks = <&clk IMX93_CLK_LPI2C1_GATE>,
<&clk IMX93_CLK_BUS_AON>;
clock-names = "per", "ipg";
+ dmas = <&edma1 7 0 0>, <&edma1 8 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -328,6 +345,8 @@
clocks = <&clk IMX93_CLK_LPI2C2_GATE>,
<&clk IMX93_CLK_BUS_AON>;
clock-names = "per", "ipg";
+ dmas = <&edma1 9 0 0>, <&edma1 10 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -340,6 +359,8 @@
clocks = <&clk IMX93_CLK_LPSPI1_GATE>,
<&clk IMX93_CLK_BUS_AON>;
clock-names = "per", "ipg";
+ dmas = <&edma1 11 0 0>, <&edma1 12 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -352,6 +373,8 @@
clocks = <&clk IMX93_CLK_LPSPI2_GATE>,
<&clk IMX93_CLK_BUS_AON>;
clock-names = "per", "ipg";
+ dmas = <&edma1 13 0 0>, <&edma1 14 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -361,7 +384,7 @@
interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX93_CLK_LPUART1_GATE>;
clock-names = "ipg";
- dmas = <&edma1 17 0 1>, <&edma1 16 0 0>;
+ dmas = <&edma1 17 0 FSL_EDMA_RX>, <&edma1 16 0 0>;
dma-names = "rx", "tx";
status = "disabled";
};
@@ -372,7 +395,7 @@
interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX93_CLK_LPUART2_GATE>;
clock-names = "ipg";
- dmas = <&edma1 19 0 1>, <&edma1 18 0 0>;
+ dmas = <&edma1 19 0 FSL_EDMA_RX>, <&edma1 18 0 0>;
dma-names = "rx", "tx";
status = "disabled";
};
@@ -400,7 +423,7 @@
<&clk IMX93_CLK_SAI1_GATE>, <&clk IMX93_CLK_DUMMY>,
<&clk IMX93_CLK_DUMMY>;
clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3";
- dmas = <&edma1 22 0 1>, <&edma1 21 0 0>;
+ dmas = <&edma1 22 0 FSL_EDMA_RX>, <&edma1 21 0 0>;
dma-names = "rx", "tx";
status = "disabled";
};
@@ -509,8 +532,7 @@
reg = <0x44530000 0x10000>;
interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX93_CLK_ADC1_GATE>;
clock-names = "ipg";
#io-channel-cells = <1>;
@@ -693,6 +715,8 @@
clocks = <&clk IMX93_CLK_LPI2C3_GATE>,
<&clk IMX93_CLK_BUS_WAKEUP>;
clock-names = "per", "ipg";
+ dmas = <&edma2 8 0 0>, <&edma2 9 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -705,6 +729,8 @@
clocks = <&clk IMX93_CLK_LPI2C4_GATE>,
<&clk IMX93_CLK_BUS_WAKEUP>;
clock-names = "per", "ipg";
+ dmas = <&edma2 10 0 0>, <&edma2 11 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -717,6 +743,8 @@
clocks = <&clk IMX93_CLK_LPSPI3_GATE>,
<&clk IMX93_CLK_BUS_WAKEUP>;
clock-names = "per", "ipg";
+ dmas = <&edma2 12 0 0>, <&edma2 13 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -729,6 +757,8 @@
clocks = <&clk IMX93_CLK_LPSPI4_GATE>,
<&clk IMX93_CLK_BUS_WAKEUP>;
clock-names = "per", "ipg";
+ dmas = <&edma2 14 0 0>, <&edma2 15 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -738,7 +768,7 @@
interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX93_CLK_LPUART3_GATE>;
clock-names = "ipg";
- dmas = <&edma2 18 0 1>, <&edma2 17 0 0>;
+ dmas = <&edma2 18 0 FSL_EDMA_RX>, <&edma2 17 0 0>;
dma-names = "rx", "tx";
status = "disabled";
};
@@ -749,7 +779,7 @@
interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX93_CLK_LPUART4_GATE>;
clock-names = "ipg";
- dmas = <&edma2 20 0 1>, <&edma2 19 0 0>;
+ dmas = <&edma2 20 0 FSL_EDMA_RX>, <&edma2 19 0 0>;
dma-names = "rx", "tx";
status = "disabled";
};
@@ -760,7 +790,7 @@
interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX93_CLK_LPUART5_GATE>;
clock-names = "ipg";
- dmas = <&edma2 22 0 1>, <&edma2 21 0 0>;
+ dmas = <&edma2 22 0 FSL_EDMA_RX>, <&edma2 21 0 0>;
dma-names = "rx", "tx";
status = "disabled";
};
@@ -771,7 +801,7 @@
interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX93_CLK_LPUART6_GATE>;
clock-names = "ipg";
- dmas = <&edma2 24 0 1>, <&edma2 23 0 0>;
+ dmas = <&edma2 24 0 FSL_EDMA_RX>, <&edma2 23 0 0>;
dma-names = "rx", "tx";
status = "disabled";
};
@@ -814,7 +844,7 @@
<&clk IMX93_CLK_SAI2_GATE>, <&clk IMX93_CLK_DUMMY>,
<&clk IMX93_CLK_DUMMY>;
clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3";
- dmas = <&edma2 59 0 1>, <&edma2 58 0 0>;
+ dmas = <&edma2 59 0 FSL_EDMA_RX>, <&edma2 58 0 0>;
dma-names = "rx", "tx";
status = "disabled";
};
@@ -827,7 +857,7 @@
<&clk IMX93_CLK_SAI3_GATE>, <&clk IMX93_CLK_DUMMY>,
<&clk IMX93_CLK_DUMMY>;
clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3";
- dmas = <&edma2 61 0 1>, <&edma2 60 0 0>;
+ dmas = <&edma2 61 0 FSL_EDMA_RX>, <&edma2 60 0 0>;
dma-names = "rx", "tx";
status = "disabled";
};
@@ -846,7 +876,7 @@
<&clk IMX93_CLK_DUMMY>,
<&clk IMX93_CLK_AUD_XCVR_GATE>;
clock-names = "ipg", "phy", "spba", "pll_ipg";
- dmas = <&edma2 65 0 1>, <&edma2 66 0 0>;
+ dmas = <&edma2 65 0 FSL_EDMA_RX>, <&edma2 66 0 0>;
dma-names = "rx", "tx";
status = "disabled";
};
@@ -857,7 +887,7 @@
interrupts = <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX93_CLK_LPUART7_GATE>;
clock-names = "ipg";
- dmas = <&edma2 88 0 1>, <&edma2 87 0 0>;
+ dmas = <&edma2 88 0 FSL_EDMA_RX>, <&edma2 87 0 0>;
dma-names = "rx", "tx";
status = "disabled";
};
@@ -868,7 +898,7 @@
interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX93_CLK_LPUART8_GATE>;
clock-names = "ipg";
- dmas = <&edma2 90 0 1>, <&edma2 89 0 0>;
+ dmas = <&edma2 90 0 FSL_EDMA_RX>, <&edma2 89 0 0>;
dma-names = "rx", "tx";
status = "disabled";
};
@@ -882,6 +912,8 @@
clocks = <&clk IMX93_CLK_LPI2C5_GATE>,
<&clk IMX93_CLK_BUS_WAKEUP>;
clock-names = "per", "ipg";
+ dmas = <&edma2 71 0 0>, <&edma2 72 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -894,6 +926,8 @@
clocks = <&clk IMX93_CLK_LPI2C6_GATE>,
<&clk IMX93_CLK_BUS_WAKEUP>;
clock-names = "per", "ipg";
+ dmas = <&edma2 73 0 0>, <&edma2 74 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -906,6 +940,8 @@
clocks = <&clk IMX93_CLK_LPI2C7_GATE>,
<&clk IMX93_CLK_BUS_WAKEUP>;
clock-names = "per", "ipg";
+ dmas = <&edma2 75 0 0>, <&edma2 76 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -918,6 +954,8 @@
clocks = <&clk IMX93_CLK_LPI2C8_GATE>,
<&clk IMX93_CLK_BUS_WAKEUP>;
clock-names = "per", "ipg";
+ dmas = <&edma2 77 0 0>, <&edma2 78 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -930,6 +968,8 @@
clocks = <&clk IMX93_CLK_LPSPI5_GATE>,
<&clk IMX93_CLK_BUS_WAKEUP>;
clock-names = "per", "ipg";
+ dmas = <&edma2 79 0 0>, <&edma2 80 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -942,6 +982,8 @@
clocks = <&clk IMX93_CLK_LPSPI6_GATE>,
<&clk IMX93_CLK_BUS_WAKEUP>;
clock-names = "per", "ipg";
+ dmas = <&edma2 81 0 0>, <&edma2 82 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -954,6 +996,8 @@
clocks = <&clk IMX93_CLK_LPSPI7_GATE>,
<&clk IMX93_CLK_BUS_WAKEUP>;
clock-names = "per", "ipg";
+ dmas = <&edma2 83 0 0>, <&edma2 84 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -966,6 +1010,8 @@
clocks = <&clk IMX93_CLK_LPSPI8_GATE>,
<&clk IMX93_CLK_BUS_WAKEUP>;
clock-names = "per", "ipg";
+ dmas = <&edma2 85 0 0>, <&edma2 86 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -986,6 +1032,9 @@
<&clk IMX93_CLK_WAKEUP_AXI>,
<&clk IMX93_CLK_USDHC1_GATE>;
clock-names = "ipg", "ahb", "per";
+ assigned-clocks = <&clk IMX93_CLK_USDHC1>;
+ assigned-clock-parents = <&clk IMX93_CLK_SYS_PLL_PFD1>;
+ assigned-clock-rates = <400000000>;
bus-width = <8>;
fsl,tuning-start-tap = <1>;
fsl,tuning-step = <2>;
@@ -1000,6 +1049,9 @@
<&clk IMX93_CLK_WAKEUP_AXI>,
<&clk IMX93_CLK_USDHC2_GATE>;
clock-names = "ipg", "ahb", "per";
+ assigned-clocks = <&clk IMX93_CLK_USDHC2>;
+ assigned-clock-parents = <&clk IMX93_CLK_SYS_PLL_PFD1>;
+ assigned-clock-rates = <400000000>;
bus-width = <4>;
fsl,tuning-start-tap = <1>;
fsl,tuning-step = <2>;
@@ -1030,6 +1082,8 @@
fsl,num-tx-queues = <3>;
fsl,num-rx-queues = <3>;
fsl,stop-mode = <&wakeupmix_gpr 0x0c 1>;
+ nvmem-cells = <&eth_mac1>;
+ nvmem-cell-names = "mac-address";
status = "disabled";
};
@@ -1052,6 +1106,8 @@
assigned-clock-rates = <100000000>, <250000000>;
intf_mode = <&wakeupmix_gpr 0x28>;
snps,clk-csr = <0>;
+ nvmem-cells = <&eth_mac2>;
+ nvmem-cell-names = "mac-address";
status = "disabled";
};
@@ -1063,6 +1119,9 @@
<&clk IMX93_CLK_WAKEUP_AXI>,
<&clk IMX93_CLK_USDHC3_GATE>;
clock-names = "ipg", "ahb", "per";
+ assigned-clocks = <&clk IMX93_CLK_USDHC3>;
+ assigned-clock-parents = <&clk IMX93_CLK_SYS_PLL_PFD1>;
+ assigned-clock-rates = <400000000>;
bus-width = <4>;
fsl,tuning-start-tap = <1>;
fsl,tuning-step = <2>;
@@ -1136,6 +1195,15 @@
reg = <0x47510000 0x10000>;
#address-cells = <1>;
#size-cells = <1>;
+
+ eth_mac1: mac-address@4ec {
+ reg = <0x4ec 0x6>;
+ };
+
+ eth_mac2: mac-address@4f2 {
+ reg = <0x4f2 0x6>;
+ };
+
};
s4muap: mailbox@47520000 {
@@ -1167,6 +1235,50 @@
status = "disabled";
};
+ usbotg1: usb@4c100000 {
+ compatible = "fsl,imx93-usb", "fsl,imx7d-usb", "fsl,imx27-usb";
+ reg = <0x4c100000 0x200>;
+ interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX93_CLK_USB_CONTROLLER_GATE>,
+ <&clk IMX93_CLK_HSIO_32K_GATE>;
+ clock-names = "usb_ctrl_root", "usb_wakeup";
+ assigned-clocks = <&clk IMX93_CLK_HSIO>;
+ assigned-clock-parents = <&clk IMX93_CLK_SYS_PLL_PFD1_DIV2>;
+ assigned-clock-rates = <133000000>;
+ phys = <&usbphynop1>;
+ fsl,usbmisc = <&usbmisc1 0>;
+ status = "disabled";
+ };
+
+ usbmisc1: usbmisc@4c100200 {
+ compatible = "fsl,imx8mm-usbmisc", "fsl,imx7d-usbmisc",
+ "fsl,imx6q-usbmisc";
+ reg = <0x4c100200 0x200>;
+ #index-cells = <1>;
+ };
+
+ usbotg2: usb@4c200000 {
+ compatible = "fsl,imx93-usb", "fsl,imx7d-usb", "fsl,imx27-usb";
+ reg = <0x4c200000 0x200>;
+ interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX93_CLK_USB_CONTROLLER_GATE>,
+ <&clk IMX93_CLK_HSIO_32K_GATE>;
+ clock-names = "usb_ctrl_root", "usb_wakeup";
+ assigned-clocks = <&clk IMX93_CLK_HSIO>;
+ assigned-clock-parents = <&clk IMX93_CLK_SYS_PLL_PFD1_DIV2>;
+ assigned-clock-rates = <133000000>;
+ phys = <&usbphynop2>;
+ fsl,usbmisc = <&usbmisc2 0>;
+ status = "disabled";
+ };
+
+ usbmisc2: usbmisc@4c200200 {
+ compatible = "fsl,imx8mm-usbmisc", "fsl,imx7d-usbmisc",
+ "fsl,imx6q-usbmisc";
+ reg = <0x4c200200 0x200>;
+ #index-cells = <1>;
+ };
+
ddr-pmu@4e300dc0 {
compatible = "fsl,imx93-ddr-pmu";
reg = <0x4e300dc0 0x200>;
diff --git a/arch/arm64/boot/dts/freescale/mba8mx.dtsi b/arch/arm64/boot/dts/freescale/mba8mx.dtsi
index 427467df42bf..815241526a0d 100644
--- a/arch/arm64/boot/dts/freescale/mba8mx.dtsi
+++ b/arch/arm64/boot/dts/freescale/mba8mx.dtsi
@@ -316,17 +316,11 @@
&mipi_dsi {
samsung,burst-clock-frequency = <891000000>;
samsung,esc-clock-frequency = <20000000>;
+};
- ports {
- port@1 {
- reg = <1>;
-
- mipi_dsi_out: endpoint {
- data-lanes = <1 2 3 4>;
- remote-endpoint = <&lvds_bridge_in>;
- };
- };
- };
+&mipi_dsi_out {
+ data-lanes = <1 2 3 4>;
+ remote-endpoint = <&lvds_bridge_in>;
};
&pwm3 {
diff --git a/arch/arm64/boot/dts/freescale/s32g2.dtsi b/arch/arm64/boot/dts/freescale/s32g2.dtsi
index 5ac1cc9ff50e..fc19ae2e8d3b 100644
--- a/arch/arm64/boot/dts/freescale/s32g2.dtsi
+++ b/arch/arm64/boot/dts/freescale/s32g2.dtsi
@@ -3,7 +3,7 @@
* NXP S32G2 SoC family
*
* Copyright (c) 2021 SUSE LLC
- * Copyright (c) 2017-2021 NXP
+ * Copyright 2017-2021, 2024 NXP
*/
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -14,6 +14,18 @@
#address-cells = <2>;
#size-cells = <2>;
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ scmi_buf: shm@d0000000 {
+ compatible = "arm,scmi-shmem";
+ reg = <0x0 0xd0000000 0x0 0x80>;
+ no-map;
+ };
+ };
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -77,6 +89,19 @@
};
firmware {
+ scmi {
+ compatible = "arm,scmi-smc";
+ arm,smc-id = <0xc20000fe>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ shmem = <&scmi_buf>;
+
+ clks: protocol@14 {
+ reg = <0x14>;
+ #clock-cells = <1>;
+ };
+ };
+
psci {
compatible = "arm,psci-1.0";
method = "smc";
@@ -113,6 +138,16 @@
status = "disabled";
};
+ usdhc0: mmc@402f0000 {
+ compatible = "nxp,s32g2-usdhc";
+ reg = <0x402f0000 0x1000>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 32>, <&clks 31>, <&clks 33>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <8>;
+ status = "disabled";
+ };
+
gic: interrupt-controller@50800000 {
compatible = "arm,gic-v3";
reg = <0x50800000 0x10000>,
diff --git a/arch/arm64/boot/dts/freescale/s32g274a-evb.dts b/arch/arm64/boot/dts/freescale/s32g274a-evb.dts
index 9118d8d2ee01..00070c949e2a 100644
--- a/arch/arm64/boot/dts/freescale/s32g274a-evb.dts
+++ b/arch/arm64/boot/dts/freescale/s32g274a-evb.dts
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
/*
* Copyright (c) 2021 SUSE LLC
- * Copyright (c) 2019-2021 NXP
+ * Copyright 2019-2021, 2024 NXP
*/
/dts-v1/;
@@ -32,3 +32,7 @@
&uart0 {
status = "okay";
};
+
+&usdhc0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/s32g274a-rdb2.dts b/arch/arm64/boot/dts/freescale/s32g274a-rdb2.dts
index e05ee854cdf5..b3fc12899cae 100644
--- a/arch/arm64/boot/dts/freescale/s32g274a-rdb2.dts
+++ b/arch/arm64/boot/dts/freescale/s32g274a-rdb2.dts
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
/*
* Copyright (c) 2021 SUSE LLC
- * Copyright (c) 2019-2021 NXP
+ * Copyright 2019-2021, 2024 NXP
*/
/dts-v1/;
@@ -38,3 +38,7 @@
&uart1 {
status = "okay";
};
+
+&usdhc0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/s32g3.dtsi b/arch/arm64/boot/dts/freescale/s32g3.dtsi
new file mode 100644
index 000000000000..c1b08992754b
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/s32g3.dtsi
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2021-2023 NXP
+ *
+ * Authors: Ghennadi Procopciuc <ghennadi.procopciuc@nxp.com>
+ * Ciprian Costea <ciprianmarian.costea@nxp.com>
+ * Andra-Teodora Ilie <andra.ilie@nxp.com>
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ compatible = "nxp,s32g3";
+ interrupt-parent = <&gic>;
+ #address-cells = <0x02>;
+ #size-cells = <0x02>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+
+ core1 {
+ cpu = <&cpu1>;
+ };
+
+ core2 {
+ cpu = <&cpu2>;
+ };
+
+ core3 {
+ cpu = <&cpu3>;
+ };
+ };
+
+ cluster1 {
+ core0 {
+ cpu = <&cpu4>;
+ };
+
+ core1 {
+ cpu = <&cpu5>;
+ };
+
+ core2 {
+ cpu = <&cpu6>;
+ };
+
+ core3 {
+ cpu = <&cpu7>;
+ };
+ };
+ };
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0>;
+ enable-method = "psci";
+ clocks = <&dfs 0>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x1>;
+ enable-method = "psci";
+ clocks = <&dfs 0>;
+ };
+
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x2>;
+ enable-method = "psci";
+ clocks = <&dfs 0>;
+ };
+
+ cpu3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x3>;
+ enable-method = "psci";
+ clocks = <&dfs 0>;
+ };
+
+ cpu4: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x100>;
+ enable-method = "psci";
+ clocks = <&dfs 0>;
+ };
+
+ cpu5: cpu@101 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x101>;
+ enable-method = "psci";
+ clocks = <&dfs 0>;
+ };
+
+ cpu6: cpu@102 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x102>;
+ enable-method = "psci";
+ clocks = <&dfs 0>;
+ };
+
+ cpu7: cpu@103 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x103>;
+ enable-method = "psci";
+ clocks = <&dfs 0>;
+ };
+ };
+
+ firmware {
+ scmi: scmi {
+ compatible = "arm,scmi-smc";
+ shmem = <&scmi_shmem>;
+ arm,smc-id = <0xc20000fe>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dfs: protocol@13 {
+ reg = <0x13>;
+ #clock-cells = <1>;
+ };
+
+ clks: protocol@14 {
+ reg = <0x14>;
+ #clock-cells = <1>;
+ };
+ };
+
+ psci: psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+ };
+
+
+ pmu {
+ compatible = "arm,cortex-a53-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ scmi_shmem: shm@d0000000 {
+ compatible = "arm,scmi-shmem";
+ reg = <0x0 0xd0000000 0x0 0x80>;
+ no-map;
+ };
+ };
+
+ soc@0 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0x80000000>;
+
+ uart0: serial@401c8000 {
+ compatible = "nxp,s32g3-linflexuart",
+ "fsl,s32v234-linflexuart";
+ reg = <0x401c8000 0x3000>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_EDGE_RISING>;
+ status = "disabled";
+ };
+
+ uart1: serial@401cc000 {
+ compatible = "nxp,s32g3-linflexuart",
+ "fsl,s32v234-linflexuart";
+ reg = <0x401cc000 0x3000>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_EDGE_RISING>;
+ status = "disabled";
+ };
+
+ uart2: serial@402bc000 {
+ compatible = "nxp,s32g3-linflexuart",
+ "fsl,s32v234-linflexuart";
+ reg = <0x402bc000 0x3000>;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_EDGE_RISING>;
+ status = "disabled";
+ };
+
+ usdhc0: mmc@402f0000 {
+ compatible = "nxp,s32g3-usdhc",
+ "nxp,s32g2-usdhc";
+ reg = <0x402f0000 0x1000>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 32>,
+ <&clks 31>,
+ <&clks 33>;
+ clock-names = "ipg", "ahb", "per";
+ status = "disabled";
+ };
+
+ gic: interrupt-controller@50800000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x50800000 0x10000>,
+ <0x50900000 0x200000>,
+ <0x50400000 0x2000>,
+ <0x50410000 0x2000>,
+ <0x50420000 0x2000>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>, /* sec-phys */
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>, /* phys */
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>, /* virt */
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>, /* hyp-phys */
+ <GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>; /* hyp-virt */
+ arm,no-tick-in-suspend;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/s32g399a-rdb3.dts b/arch/arm64/boot/dts/freescale/s32g399a-rdb3.dts
new file mode 100644
index 000000000000..9d674819876e
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/s32g399a-rdb3.dts
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2021-2023 NXP
+ *
+ * NXP S32G3 Reference Design Board 3 (S32G-VNP-RDB3)
+ */
+
+/dts-v1/;
+
+#include "s32g3.dtsi"
+
+/ {
+ model = "NXP S32G3 Reference Design Board 3 (S32G-VNP-RDB3)";
+ compatible = "nxp,s32g399a-rdb3", "nxp,s32g3";
+
+ aliases {
+ mmc0 = &usdhc0;
+ serial0 = &uart0;
+ serial1 = &uart1;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ /* 4GiB RAM */
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x0 0x80000000 0 0x80000000>,
+ <0x8 0x80000000 0 0x80000000>;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&usdhc0 {
+ bus-width = <8>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi b/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
index ed1b5a7a6067..f6bc001c3832 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
@@ -31,6 +31,13 @@
device_type = "cpu";
reg = <0x0 0x0>;
enable-method = "psci";
+ d-cache-size = <0x8000>; /* 32 KiB */
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ i-cache-size = <0x8000>; /* 32 KiB */
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ next-level-cache = <&L2>;
};
cpu@1 {
@@ -38,6 +45,13 @@
device_type = "cpu";
reg = <0x0 0x1>;
enable-method = "psci";
+ d-cache-size = <0x8000>; /* 32 KiB */
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ i-cache-size = <0x8000>; /* 32 KiB */
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ next-level-cache = <&L2>;
};
cpu@2 {
@@ -45,6 +59,13 @@
device_type = "cpu";
reg = <0x0 0x2>;
enable-method = "psci";
+ d-cache-size = <0x8000>; /* 32 KiB */
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ i-cache-size = <0x8000>; /* 32 KiB */
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ next-level-cache = <&L2>;
};
cpu@3 {
@@ -52,13 +73,33 @@
device_type = "cpu";
reg = <0x0 0x3>;
enable-method = "psci";
+ d-cache-size = <0x8000>; /* 32 KiB */
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ i-cache-size = <0x8000>; /* 32 KiB */
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ next-level-cache = <&L2>;
};
};
+ L2: l2-cache {
+ compatible = "cache";
+ cache-unified;
+ cache-size = <0x80000>; /* 512 KiB */
+ cache-line-size = <64>;
+ cache-sets = <512>;
+ cache-level = <2>;
+ };
+
gic: interrupt-controller@f1001000 {
compatible = "arm,gic-400";
reg = <0x0 0xf1001000 0x0 0x1000>, /* GICD */
- <0x0 0xf1002000 0x0 0x100>; /* GICC */
+ <0x0 0xf1002000 0x0 0x2000>, /* GICC */
+ <0x0 0xf1004000 0x0 0x2000>, /* GICH */
+ <0x0 0xf1006000 0x0 0x2000>; /* GICV */
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) |
+ IRQ_TYPE_LEVEL_HIGH)>;
#address-cells = <0>;
#interrupt-cells = <3>;
interrupt-controller;
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
index f0672ec65b26..2d304efe081d 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
@@ -82,7 +82,7 @@
};
};
- reg_sys_5v: regulator@0 {
+ reg_sys_5v: regulator-0 {
compatible = "regulator-fixed";
regulator-name = "SYS_5V";
regulator-min-microvolt = <5000000>;
@@ -91,7 +91,7 @@
regulator-always-on;
};
- reg_vdd_3v3: regulator@1 {
+ reg_vdd_3v3: regulator-1 {
compatible = "regulator-fixed";
regulator-name = "VDD_3V3";
regulator-min-microvolt = <3300000>;
@@ -101,7 +101,7 @@
vin-supply = <&reg_sys_5v>;
};
- reg_5v_hub: regulator@2 {
+ reg_5v_hub: regulator-2 {
compatible = "regulator-fixed";
regulator-name = "5V_HUB";
regulator-min-microvolt = <5000000>;
@@ -514,6 +514,7 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
+ reg = <0>;
adv7533_in: endpoint {
remote-endpoint = <&dsi_out0>;
};
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
index be808bb2544e..a589954c29e2 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
@@ -852,7 +852,7 @@
clock-names = "wdog_clk", "apb_pclk";
};
- tsensor: tsensor@0,f7030700 {
+ tsensor: tsensor@f7030700 {
compatible = "hisilicon,tsensor";
reg = <0x0 0xf7030700 0x0 0x1000>;
interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/hisilicon/hip05-d02.dts b/arch/arm64/boot/dts/hisilicon/hip05-d02.dts
index c4eaebbb448f..b7792d443189 100644
--- a/arch/arm64/boot/dts/hisilicon/hip05-d02.dts
+++ b/arch/arm64/boot/dts/hisilicon/hip05-d02.dts
@@ -54,7 +54,7 @@
ranges = <0 0 0x0 0x90000000 0x08000000>,
<1 0 0x0 0x98000000 0x08000000>;
- nor-flash@0,0 {
+ nor-flash@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "numonyx,js28f00a", "cfi-flash";
@@ -75,7 +75,7 @@
};
};
- cpld@1,0 {
+ cpld@100000000 {
compatible = "hisilicon,hip05-cpld";
reg = <1 0x0 0x100>;
};
diff --git a/arch/arm64/boot/dts/hisilicon/hip05.dtsi b/arch/arm64/boot/dts/hisilicon/hip05.dtsi
index 65ddc0698f82..d0912ca5f237 100644
--- a/arch/arm64/boot/dts/hisilicon/hip05.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hip05.dtsi
@@ -279,6 +279,12 @@
};
};
+ refclk200mhz: refclk200mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <200000000>;
+ };
+
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
@@ -298,12 +304,6 @@
#size-cells = <2>;
ranges;
- refclk200mhz: refclk200mhz {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <200000000>;
- };
-
uart0: serial@80300000 {
compatible = "snps,dw-apb-uart";
reg = <0x0 0x80300000 0x0 0x10000>;
diff --git a/arch/arm64/boot/dts/hisilicon/hip06.dtsi b/arch/arm64/boot/dts/hisilicon/hip06.dtsi
index f46c33d10750..3d7285e6700e 100644
--- a/arch/arm64/boot/dts/hisilicon/hip06.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hip06.dtsi
@@ -258,6 +258,48 @@
};
};
+ eth2: ethernet-0 {
+ compatible = "hisilicon,hns-nic-v2";
+ ae-handle = <&dsaf0>;
+ port-idx-in-ae = <0>;
+ local-mac-address = [00 00 00 00 00 00];
+ status = "disabled";
+ dma-coherent;
+ };
+
+ eth3: ethernet-1 {
+ compatible = "hisilicon,hns-nic-v2";
+ ae-handle = <&dsaf0>;
+ port-idx-in-ae = <1>;
+ local-mac-address = [00 00 00 00 00 00];
+ status = "disabled";
+ dma-coherent;
+ };
+
+ eth0: ethernet-4 {
+ compatible = "hisilicon,hns-nic-v2";
+ ae-handle = <&dsaf0>;
+ port-idx-in-ae = <4>;
+ local-mac-address = [00 00 00 00 00 00];
+ status = "disabled";
+ dma-coherent;
+ };
+
+ eth1: ethernet-5 {
+ compatible = "hisilicon,hns-nic-v2";
+ ae-handle = <&dsaf0>;
+ port-idx-in-ae = <5>;
+ local-mac-address = [00 00 00 00 00 00];
+ status = "disabled";
+ dma-coherent;
+ };
+
+ refclk: refclk {
+ compatible = "fixed-clock";
+ clock-frequency = <50000000>;
+ #clock-cells = <0>;
+ };
+
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
@@ -374,12 +416,6 @@
};
};
- refclk: refclk {
- compatible = "fixed-clock";
- clock-frequency = <50000000>;
- #clock-cells = <0>;
- };
-
usb_ohci: usb@a7030000 {
compatible = "generic-ohci";
reg = <0x0 0xa7030000 0x0 0x10000>;
@@ -436,7 +472,7 @@
};
};
- dsaf0: dsa@c7000000 {
+ dsaf0: dsa@c5000000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "hisilicon,hns-dsaf-v2";
@@ -570,42 +606,6 @@
};
};
- eth0: ethernet-4 {
- compatible = "hisilicon,hns-nic-v2";
- ae-handle = <&dsaf0>;
- port-idx-in-ae = <4>;
- local-mac-address = [00 00 00 00 00 00];
- status = "disabled";
- dma-coherent;
- };
-
- eth1: ethernet-5 {
- compatible = "hisilicon,hns-nic-v2";
- ae-handle = <&dsaf0>;
- port-idx-in-ae = <5>;
- local-mac-address = [00 00 00 00 00 00];
- status = "disabled";
- dma-coherent;
- };
-
- eth2: ethernet-0 {
- compatible = "hisilicon,hns-nic-v2";
- ae-handle = <&dsaf0>;
- port-idx-in-ae = <0>;
- local-mac-address = [00 00 00 00 00 00];
- status = "disabled";
- dma-coherent;
- };
-
- eth3: ethernet-1 {
- compatible = "hisilicon,hns-nic-v2";
- ae-handle = <&dsaf0>;
- port-idx-in-ae = <1>;
- local-mac-address = [00 00 00 00 00 00];
- status = "disabled";
- dma-coherent;
- };
-
sas0: sas@c3000000 {
compatible = "hisilicon,hip06-sas-v2";
reg = <0 0xc3000000 0 0x10000>;
@@ -733,7 +733,7 @@
status = "disabled";
};
- pcie0: pcie@a0090000 {
+ pcie0: pcie@b0000000 {
compatible = "hisilicon,hip06-pcie-ecam";
reg = <0 0xb0000000 0 0x2000000>,
<0 0xa0090000 0 0x10000>;
diff --git a/arch/arm64/boot/dts/hisilicon/hip07.dtsi b/arch/arm64/boot/dts/hisilicon/hip07.dtsi
index 81d907ef43ed..00a6bfa7478c 100644
--- a/arch/arm64/boot/dts/hisilicon/hip07.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hip07.dtsi
@@ -1013,6 +1013,42 @@
};
};
+ eth0: ethernet-0 {
+ compatible = "hisilicon,hns-nic-v2";
+ ae-handle = <&dsaf0>;
+ port-idx-in-ae = <4>;
+ local-mac-address = [00 00 00 00 00 00];
+ status = "disabled";
+ dma-coherent;
+ };
+
+ eth1: ethernet-1 {
+ compatible = "hisilicon,hns-nic-v2";
+ ae-handle = <&dsaf0>;
+ port-idx-in-ae = <5>;
+ local-mac-address = [00 00 00 00 00 00];
+ status = "disabled";
+ dma-coherent;
+ };
+
+ eth2: ethernet-2 {
+ compatible = "hisilicon,hns-nic-v2";
+ ae-handle = <&dsaf0>;
+ port-idx-in-ae = <0>;
+ local-mac-address = [00 00 00 00 00 00];
+ status = "disabled";
+ dma-coherent;
+ };
+
+ eth3: ethernet-3 {
+ compatible = "hisilicon,hns-nic-v2";
+ ae-handle = <&dsaf0>;
+ port-idx-in-ae = <1>;
+ local-mac-address = [00 00 00 00 00 00];
+ status = "disabled";
+ dma-coherent;
+ };
+
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
@@ -1343,7 +1379,7 @@
};
};
- dsaf0: dsa@c7000000 {
+ dsaf0: dsa@c5000000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "hisilicon,hns-dsaf-v2";
@@ -1483,42 +1519,6 @@
};
};
- eth0: ethernet@4 {
- compatible = "hisilicon,hns-nic-v2";
- ae-handle = <&dsaf0>;
- port-idx-in-ae = <4>;
- local-mac-address = [00 00 00 00 00 00];
- status = "disabled";
- dma-coherent;
- };
-
- eth1: ethernet@5 {
- compatible = "hisilicon,hns-nic-v2";
- ae-handle = <&dsaf0>;
- port-idx-in-ae = <5>;
- local-mac-address = [00 00 00 00 00 00];
- status = "disabled";
- dma-coherent;
- };
-
- eth2: ethernet@0 {
- compatible = "hisilicon,hns-nic-v2";
- ae-handle = <&dsaf0>;
- port-idx-in-ae = <0>;
- local-mac-address = [00 00 00 00 00 00];
- status = "disabled";
- dma-coherent;
- };
-
- eth3: ethernet@1 {
- compatible = "hisilicon,hns-nic-v2";
- ae-handle = <&dsaf0>;
- port-idx-in-ae = <1>;
- local-mac-address = [00 00 00 00 00 00];
- status = "disabled";
- dma-coherent;
- };
-
infiniband@c4000000 {
compatible = "hisilicon,hns-roce-v1";
reg = <0x0 0xc4000000 0x0 0x100000>;
@@ -1724,7 +1724,7 @@
status = "disabled";
};
- p0_pcie2_a: pcie@a00a0000 {
+ p0_pcie2_a: pcie@af800000 {
compatible = "hisilicon,hip07-pcie-ecam";
reg = <0 0xaf800000 0 0x800000>,
<0 0xa00a0000 0 0x10000>;
@@ -1745,7 +1745,7 @@
0x0 0 0 4 &mbigen_pcie2_a 671 4>;
status = "disabled";
};
- p0_sec_a: crypto@d2000000 {
+ p0_sec_a: crypto@d0000000 {
compatible = "hisilicon,hip07-sec";
reg = <0x0 0xd0000000 0x0 0x10000>,
<0x0 0xd2000000 0x0 0x10000>,
@@ -1786,7 +1786,7 @@
<605 1>, <606 4>,
<607 1>, <608 4>;
};
- p0_sec_b: crypto@8,d2000000 {
+ p0_sec_b: crypto@8d0000000 {
compatible = "hisilicon,hip07-sec";
reg = <0x8 0xd0000000 0x0 0x10000>,
<0x8 0xd2000000 0x0 0x10000>,
@@ -1827,7 +1827,7 @@
<605 1>, <606 4>,
<607 1>, <608 4>;
};
- p1_sec_a: crypto@400,d2000000 {
+ p1_sec_a: crypto@400d0000000 {
compatible = "hisilicon,hip07-sec";
reg = <0x400 0xd0000000 0x0 0x10000>,
<0x400 0xd2000000 0x0 0x10000>,
@@ -1868,7 +1868,7 @@
<605 1>, <606 4>,
<607 1>, <608 4>;
};
- p1_sec_b: crypto@408,d2000000 {
+ p1_sec_b: crypto@408d0000000 {
compatible = "hisilicon,hip07-sec";
reg = <0x408 0xd0000000 0x0 0x10000>,
<0x408 0xd2000000 0x0 0x10000>,
diff --git a/arch/arm64/boot/dts/intel/keembay-soc.dtsi b/arch/arm64/boot/dts/intel/keembay-soc.dtsi
index 781761d2942b..ae00e9e54e82 100644
--- a/arch/arm64/boot/dts/intel/keembay-soc.dtsi
+++ b/arch/arm64/boot/dts/intel/keembay-soc.dtsi
@@ -70,7 +70,7 @@
};
pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a53-pmu";
interrupts = <GIC_PPI 0x7 IRQ_TYPE_LEVEL_HIGH>;
};
diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
index 76aafa172eb0..2a5eeb21da47 100644
--- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
+++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
@@ -80,7 +80,7 @@
};
pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a53-pmu";
interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/lg/lg1312-ref.dts b/arch/arm64/boot/dts/lg/lg1312-ref.dts
index 260a2c5b19e5..cdd10f138098 100644
--- a/arch/arm64/boot/dts/lg/lg1312-ref.dts
+++ b/arch/arm64/boot/dts/lg/lg1312-ref.dts
@@ -22,7 +22,7 @@
serial2 = &uart2;
};
- memory {
+ memory@0 {
device_type = "memory";
reg = <0x0 0x00000000 0x20000000>;
};
diff --git a/arch/arm64/boot/dts/lg/lg1313-ref.dts b/arch/arm64/boot/dts/lg/lg1313-ref.dts
index e89ae853788a..6ace977ff4cf 100644
--- a/arch/arm64/boot/dts/lg/lg1313-ref.dts
+++ b/arch/arm64/boot/dts/lg/lg1313-ref.dts
@@ -22,7 +22,7 @@
serial2 = &uart2;
};
- memory {
+ memory@0 {
device_type = "memory";
reg = <0x0 0x00000000 0x20000000>;
};
diff --git a/arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi b/arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi
index 5591939e057b..75377c292bcb 100644
--- a/arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi
@@ -68,7 +68,7 @@
};
pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a55-pmu";
interrupts = <GIC_PPI 12 IRQ_TYPE_LEVEL_HIGH>;
};
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-eDPU.dts b/arch/arm64/boot/dts/marvell/armada-3720-eDPU.dts
index d6d37a1f6f38..91c2f8b4edfa 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-eDPU.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-eDPU.dts
@@ -25,8 +25,6 @@
/* Actual device is MV88E6361 */
switch: switch@0 {
compatible = "marvell,mv88e6190";
- #address-cells = <1>;
- #size-cells = <0>;
reg = <0>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-ultra.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-ultra.dts
index 870bb380a40a..b3cc2b7b5d19 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-ultra.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-ultra.dts
@@ -114,54 +114,84 @@
};
&mdio {
+ /* Switch is @3, not @1 */
+ /delete-node/ ethernet-switch@1;
extphy: ethernet-phy@1 {
reg = <1>;
reset-gpios = <&gpionb 2 GPIO_ACTIVE_LOW>;
};
-};
-&switch0 {
- reg = <3>;
+ switch0: ethernet-switch@3 {
+ compatible = "marvell,mv88e6085";
+ reg = <3>;
- reset-gpios = <&gpiosb 23 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&gpiosb 23 GPIO_ACTIVE_LOW>;
+ dsa,member = <0 0>;
- ethernet-ports {
- switch0port1: ethernet-port@1 {
- reg = <1>;
- label = "lan0";
- phy-handle = <&switch0phy0>;
- };
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch0port0: ethernet-port@0 {
+ reg = <0>;
+ label = "cpu";
+ ethernet = <&eth0>;
+ phy-mode = "rgmii-id";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
- switch0port2: ethernet-port@2 {
- reg = <2>;
- label = "lan1";
- phy-handle = <&switch0phy1>;
- };
+ switch0port1: ethernet-port@1 {
+ reg = <1>;
+ label = "lan0";
+ phy-handle = <&switch0phy0>;
+ };
- switch0port3: ethernet-port@3 {
- reg = <3>;
- label = "lan2";
- phy-handle = <&switch0phy2>;
- };
+ switch0port2: ethernet-port@2 {
+ reg = <2>;
+ label = "lan1";
+ phy-handle = <&switch0phy1>;
+ };
- switch0port4: ethernet-port@4 {
- reg = <4>;
- label = "lan3";
- phy-handle = <&switch0phy3>;
- };
+ switch0port3: ethernet-port@3 {
+ reg = <3>;
+ label = "lan2";
+ phy-handle = <&switch0phy2>;
+ };
- switch0port5: ethernet-port@5 {
- reg = <5>;
- label = "wan";
- phy-handle = <&extphy>;
- phy-mode = "sgmii";
+ switch0port4: ethernet-port@4 {
+ reg = <4>;
+ label = "lan3";
+ phy-handle = <&switch0phy3>;
+ };
+
+ switch0port5: ethernet-port@5 {
+ reg = <5>;
+ label = "wan";
+ phy-handle = <&extphy>;
+ phy-mode = "sgmii";
+ };
};
- };
- mdio {
- switch0phy3: ethernet-phy@14 {
- reg = <0x14>;
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch0phy0: ethernet-phy@11 {
+ reg = <0x11>;
+ };
+ switch0phy1: ethernet-phy@12 {
+ reg = <0x12>;
+ };
+ switch0phy2: ethernet-phy@13 {
+ reg = <0x13>;
+ };
+ switch0phy3: ethernet-phy@14 {
+ reg = <0x14>;
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
index f1a9f2234359..54453b0a91f9 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
@@ -216,8 +216,6 @@
assigned-clock-rates = <20000000>;
flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <20000000>;
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index 1cc3fa1c354d..9603223dd761 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -68,7 +68,7 @@
};
pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a53-pmu";
interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
};
diff --git a/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi b/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
index 7ec7c789d87e..fdf88cd0eb02 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
@@ -61,7 +61,7 @@
compatible = "simple-bus";
ranges = <0x0 0x0 0xf0000000 0x1000000>;
- smmu: iommu@5000000 {
+ smmu: iommu@100000 {
compatible = "marvell,ap806-smmu-500", "arm,mmu-500";
reg = <0x100000 0x100000>;
dma-coherent;
diff --git a/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi b/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi
index 6fcc34f7b464..5e7d6de3cdde 100644
--- a/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi
+++ b/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi
@@ -26,7 +26,7 @@
reg = <0x0 0x0 0x0 0x80000000>;
};
- ap0_reg_mmc_vccq: ap0_mmc_vccq@0 {
+ ap0_reg_mmc_vccq: regulator-1 {
compatible = "regulator-gpio";
regulator-name = "ap0_mmc_vccq";
regulator-min-microvolt = <1800000>;
@@ -36,7 +36,7 @@
3300000 0x0>;
};
- cp0_reg_usb3_vbus1: cp0_usb3_vbus@1 {
+ cp0_reg_usb3_vbus1: regulator-2 {
compatible = "regulator-fixed";
regulator-name = "cp0-xhci1-vbus";
regulator-min-microvolt = <5000000>;
@@ -45,16 +45,16 @@
gpio = <&expander0 8 GPIO_ACTIVE_HIGH>;
};
- cp0_usb3_0_phy0: cp0_usb3_phy0 {
+ cp0_usb3_0_phy0: usb-phy-1 {
compatible = "usb-nop-xceiv";
};
- cp0_usb3_0_phy1: cp0_usb3_phy1 {
+ cp0_usb3_0_phy1: usb-phy-2 {
compatible = "usb-nop-xceiv";
vcc-supply = <&cp0_reg_usb3_vbus1>;
};
- cp0_reg_sd_vccq: cp0_sd_vccq@0 {
+ cp0_reg_sd_vccq: regulator-3 {
compatible = "regulator-gpio";
regulator-name = "cp0_sd_vccq";
regulator-min-microvolt = <1800000>;
@@ -64,7 +64,7 @@
3300000 0x0>;
};
- cp0_reg_sd_vcc: cp0_sd_vcc@0 {
+ cp0_reg_sd_vcc: regulator-4 {
compatible = "regulator-fixed";
regulator-name = "cp0_sd_vcc";
regulator-min-microvolt = <3300000>;
@@ -82,7 +82,6 @@
tx-disable-gpios = <&expander0 2 GPIO_ACTIVE_HIGH>;
tx-fault-gpios = <&cp0_gpio1 24 GPIO_ACTIVE_HIGH>;
maximum-power-milliwatt = <3000>;
- status = "okay";
};
};
diff --git a/arch/arm64/boot/dts/marvell/cn9130-db.dtsi b/arch/arm64/boot/dts/marvell/cn9130-db.dtsi
index 6eb6a175de38..be56a2336265 100644
--- a/arch/arm64/boot/dts/marvell/cn9130-db.dtsi
+++ b/arch/arm64/boot/dts/marvell/cn9130-db.dtsi
@@ -30,7 +30,7 @@
reg = <0x0 0x0 0x0 0x80000000>;
};
- ap0_reg_sd_vccq: ap0_sd_vccq@0 {
+ ap0_reg_sd_vccq: regulator-1 {
compatible = "regulator-gpio";
regulator-name = "ap0_sd_vccq";
regulator-min-microvolt = <1800000>;
@@ -39,7 +39,7 @@
states = <1800000 0x1 3300000 0x0>;
};
- cp0_reg_usb3_vbus0: cp0_usb3_vbus@0 {
+ cp0_reg_usb3_vbus0: regulator-2 {
compatible = "regulator-fixed";
regulator-name = "cp0-xhci0-vbus";
regulator-min-microvolt = <5000000>;
@@ -48,12 +48,12 @@
gpio = <&expander0 0 GPIO_ACTIVE_HIGH>;
};
- cp0_usb3_0_phy0: cp0_usb3_phy@0 {
+ cp0_usb3_0_phy0: usb-phy-1 {
compatible = "usb-nop-xceiv";
vcc-supply = <&cp0_reg_usb3_vbus0>;
};
- cp0_reg_usb3_vbus1: cp0_usb3_vbus@1 {
+ cp0_reg_usb3_vbus1: regulator-3 {
compatible = "regulator-fixed";
regulator-name = "cp0-xhci1-vbus";
regulator-min-microvolt = <5000000>;
@@ -62,12 +62,12 @@
gpio = <&expander0 1 GPIO_ACTIVE_HIGH>;
};
- cp0_usb3_0_phy1: cp0_usb3_phy@1 {
+ cp0_usb3_0_phy1: usb-phy-2 {
compatible = "usb-nop-xceiv";
vcc-supply = <&cp0_reg_usb3_vbus1>;
};
- cp0_reg_sd_vccq: cp0_sd_vccq@0 {
+ cp0_reg_sd_vccq: regulator-4 {
compatible = "regulator-gpio";
regulator-name = "cp0_sd_vccq";
regulator-min-microvolt = <1800000>;
@@ -77,7 +77,7 @@
3300000 0x0>;
};
- cp0_reg_sd_vcc: cp0_sd_vcc@0 {
+ cp0_reg_sd_vcc: regulator-5 {
compatible = "regulator-fixed";
regulator-name = "cp0_sd_vcc";
regulator-min-microvolt = <3300000>;
@@ -87,7 +87,7 @@
regulator-always-on;
};
- cp0_sfp_eth0: sfp-eth@0 {
+ cp0_sfp_eth0: sfp-eth-1 {
compatible = "sff,sfp";
i2c-bus = <&cp0_sfpp0_i2c>;
los-gpios = <&cp0_module_expander1 11 GPIO_ACTIVE_HIGH>;
@@ -311,8 +311,6 @@
reg = <0x700680 0x50>;
flash@0 {
- #address-cells = <0x1>;
- #size-cells = <0x1>;
compatible = "jedec,spi-nor";
reg = <0x0>;
/* On-board MUX does not allow higher frequencies */
diff --git a/arch/arm64/boot/dts/marvell/cn9131-db.dtsi b/arch/arm64/boot/dts/marvell/cn9131-db.dtsi
index ff8422fae31b..ad7360c83048 100644
--- a/arch/arm64/boot/dts/marvell/cn9131-db.dtsi
+++ b/arch/arm64/boot/dts/marvell/cn9131-db.dtsi
@@ -18,7 +18,7 @@
ethernet4 = &cp1_eth1;
};
- cp1_reg_usb3_vbus0: cp1_usb3_vbus@0 {
+ cp1_reg_usb3_vbus0: regulator-6 {
compatible = "regulator-fixed";
pinctrl-names = "default";
pinctrl-0 = <&cp1_xhci0_vbus_pins>;
@@ -29,12 +29,12 @@
gpio = <&cp1_gpio1 3 GPIO_ACTIVE_HIGH>;
};
- cp1_usb3_0_phy0: cp1_usb3_phy0 {
+ cp1_usb3_0_phy0: usb-phy-3 {
compatible = "usb-nop-xceiv";
vcc-supply = <&cp1_reg_usb3_vbus0>;
};
- cp1_sfp_eth1: sfp-eth1 {
+ cp1_sfp_eth1: sfp-eth-2 {
compatible = "sff,sfp";
i2c-bus = <&cp1_i2c0>;
los-gpios = <&cp1_gpio1 11 GPIO_ACTIVE_HIGH>;
@@ -138,8 +138,6 @@
reg = <0x700680 0x50>;
flash@0 {
- #address-cells = <0x1>;
- #size-cells = <0x1>;
compatible = "jedec,spi-nor";
reg = <0x0>;
/* On-board MUX does not allow higher frequencies */
diff --git a/arch/arm64/boot/dts/marvell/cn9132-db.dtsi b/arch/arm64/boot/dts/marvell/cn9132-db.dtsi
index 512a4fa2861e..e753cfdac697 100644
--- a/arch/arm64/boot/dts/marvell/cn9132-db.dtsi
+++ b/arch/arm64/boot/dts/marvell/cn9132-db.dtsi
@@ -17,7 +17,7 @@
ethernet5 = &cp2_eth0;
};
- cp2_reg_usb3_vbus0: cp2_usb3_vbus@0 {
+ cp2_reg_usb3_vbus0: regulator-7 {
compatible = "regulator-fixed";
regulator-name = "cp2-xhci0-vbus";
regulator-min-microvolt = <5000000>;
@@ -26,12 +26,12 @@
gpio = <&cp2_gpio1 2 GPIO_ACTIVE_HIGH>;
};
- cp2_usb3_0_phy0: cp2_usb3_phy0 {
+ cp2_usb3_0_phy0: usb-phy-4 {
compatible = "usb-nop-xceiv";
vcc-supply = <&cp2_reg_usb3_vbus0>;
};
- cp2_reg_usb3_vbus1: cp2_usb3_vbus@1 {
+ cp2_reg_usb3_vbus1: regulator-8 {
compatible = "regulator-fixed";
regulator-name = "cp2-xhci1-vbus";
regulator-min-microvolt = <5000000>;
@@ -40,12 +40,12 @@
gpio = <&cp2_gpio1 3 GPIO_ACTIVE_HIGH>;
};
- cp2_usb3_0_phy1: cp2_usb3_phy1 {
+ cp2_usb3_0_phy1: usb-phy-5 {
compatible = "usb-nop-xceiv";
vcc-supply = <&cp2_reg_usb3_vbus1>;
};
- cp2_reg_sd_vccq: cp2_sd_vccq@0 {
+ cp2_reg_sd_vccq: regulator-9 {
compatible = "regulator-gpio";
regulator-name = "cp2_sd_vcc";
regulator-min-microvolt = <1800000>;
@@ -54,7 +54,7 @@
states = <1800000 0x1 3300000 0x0>;
};
- cp2_sfp_eth0: sfp-eth0 {
+ cp2_sfp_eth0: sfp-eth-3 {
compatible = "sff,sfp";
i2c-bus = <&cp2_sfpp0_i2c>;
los-gpios = <&cp2_module_expander1 11 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
index 0c38f7b51763..234e3b23d7a8 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
@@ -129,7 +129,7 @@
};
&pio {
- eth_default: eth_default {
+ eth_default: eth-default-pins {
tx_pins {
pinmux = <MT2712_PIN_71_GBE_TXD3__FUNC_GBE_TXD3>,
<MT2712_PIN_72_GBE_TXD2__FUNC_GBE_TXD2>,
@@ -156,7 +156,7 @@
};
};
- eth_sleep: eth_sleep {
+ eth_sleep: eth-sleep-pins {
tx_pins {
pinmux = <MT2712_PIN_71_GBE_TXD3__FUNC_GPIO71>,
<MT2712_PIN_72_GBE_TXD2__FUNC_GPIO72>,
@@ -182,14 +182,14 @@
};
};
- usb0_id_pins_float: usb0_iddig {
+ usb0_id_pins_float: usb0-iddig-pins {
pins_iddig {
pinmux = <MT2712_PIN_12_IDDIG_P0__FUNC_IDDIG_A>;
bias-pull-up;
};
};
- usb1_id_pins_float: usb1_iddig {
+ usb1_id_pins_float: usb1-iddig-pins {
pins_iddig {
pinmux = <MT2712_PIN_14_IDDIG_P1__FUNC_IDDIG_B>;
bias-pull-up;
diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
index 6d218caa198c..082672efba0a 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
@@ -249,10 +249,11 @@
#clock-cells = <1>;
};
- infracfg: syscon@10001000 {
+ infracfg: clock-controller@10001000 {
compatible = "mediatek,mt2712-infracfg", "syscon";
reg = <0 0x10001000 0 0x1000>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
pericfg: syscon@10003000 {
diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
index 3ee9266fa8e9..917fa39a74f8 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
@@ -252,7 +252,7 @@
clock-names = "hif_sel";
};
- cir: cir@10009000 {
+ cir: ir-receiver@10009000 {
compatible = "mediatek,mt7622-cir";
reg = <0 0x10009000 0 0x1000>;
interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_LOW>;
@@ -283,16 +283,14 @@
};
};
- apmixedsys: apmixedsys@10209000 {
- compatible = "mediatek,mt7622-apmixedsys",
- "syscon";
+ apmixedsys: clock-controller@10209000 {
+ compatible = "mediatek,mt7622-apmixedsys";
reg = <0 0x10209000 0 0x1000>;
#clock-cells = <1>;
};
- topckgen: topckgen@10210000 {
- compatible = "mediatek,mt7622-topckgen",
- "syscon";
+ topckgen: clock-controller@10210000 {
+ compatible = "mediatek,mt7622-topckgen";
reg = <0 0x10210000 0 0x1000>;
#clock-cells = <1>;
};
@@ -515,7 +513,6 @@
<&pericfg CLK_PERI_AUXADC_PD>;
clock-names = "therm", "auxadc";
resets = <&pericfg MT7622_PERI_THERM_SW_RST>;
- reset-names = "therm";
mediatek,auxadc = <&auxadc>;
mediatek,apmixedsys = <&apmixedsys>;
nvmem-cells = <&thermal_calibration>;
@@ -734,9 +731,8 @@
power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>;
};
- ssusbsys: ssusbsys@1a000000 {
- compatible = "mediatek,mt7622-ssusbsys",
- "syscon";
+ ssusbsys: clock-controller@1a000000 {
+ compatible = "mediatek,mt7622-ssusbsys";
reg = <0 0x1a000000 0 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
@@ -793,9 +789,8 @@
};
};
- pciesys: pciesys@1a100800 {
- compatible = "mediatek,mt7622-pciesys",
- "syscon";
+ pciesys: clock-controller@1a100800 {
+ compatible = "mediatek,mt7622-pciesys";
reg = <0 0x1a100800 0 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
@@ -921,12 +916,13 @@
};
};
- hifsys: syscon@1af00000 {
- compatible = "mediatek,mt7622-hifsys", "syscon";
+ hifsys: clock-controller@1af00000 {
+ compatible = "mediatek,mt7622-hifsys";
reg = <0 0x1af00000 0 0x70>;
+ #clock-cells = <1>;
};
- ethsys: syscon@1b000000 {
+ ethsys: clock-controller@1b000000 {
compatible = "mediatek,mt7622-ethsys",
"syscon";
reg = <0 0x1b000000 0 0x1000>;
@@ -966,9 +962,7 @@
};
eth: ethernet@1b100000 {
- compatible = "mediatek,mt7622-eth",
- "mediatek,mt2701-eth",
- "syscon";
+ compatible = "mediatek,mt7622-eth";
reg = <0 0x1b100000 0 0x20000>;
interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 224 IRQ_TYPE_LEVEL_LOW>,
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts
index e04b1c0c0ebb..ed79ad1ae871 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts
@@ -146,19 +146,19 @@
&cpu_thermal {
cooling-maps {
- cpu-active-high {
+ map-cpu-active-high {
/* active: set fan to cooling level 2 */
cooling-device = <&fan 2 2>;
trip = <&cpu_trip_active_high>;
};
- cpu-active-med {
+ map-cpu-active-med {
/* active: set fan to cooling level 1 */
cooling-device = <&fan 1 1>;
trip = <&cpu_trip_active_med>;
};
- cpu-active-low {
+ map-cpu-active-low {
/* active: set fan to cooling level 0 */
cooling-device = <&fan 0 0>;
trip = <&cpu_trip_active_low>;
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
index b3f416b9a7a4..559990dcd1d1 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
@@ -332,9 +332,8 @@
reg = <0 0x1100c800 0 0x800>;
interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&infracfg CLK_INFRA_THERM_CK>,
- <&infracfg CLK_INFRA_ADC_26M_CK>,
- <&infracfg CLK_INFRA_ADC_FRC_CK>;
- clock-names = "therm", "auxadc", "adc_32k";
+ <&infracfg CLK_INFRA_ADC_26M_CK>;
+ clock-names = "therm", "auxadc";
nvmem-cells = <&thermal_calibration>;
nvmem-cell-names = "calibration-data";
#thermal-sensor-cells = <1>;
@@ -492,8 +491,6 @@
compatible = "mediatek,mt7986-ethsys",
"syscon";
reg = <0 0x15000000 0 0x1000>;
- #address-cells = <1>;
- #size-cells = <1>;
#clock-cells = <1>;
#reset-cells = <1>;
};
@@ -556,7 +553,6 @@
<&topckgen CLK_TOP_SGM_325M_SEL>;
assigned-clock-parents = <&apmixedsys CLK_APMIXED_NET2PLL>,
<&apmixedsys CLK_APMIXED_SGMPLL>;
- #reset-cells = <1>;
#address-cells = <1>;
#size-cells = <0>;
mediatek,ethsys = <&ethsys>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-pico6.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-pico6.dts
index a2e74b829320..6a7ae616512d 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-pico6.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-pico6.dts
@@ -82,7 +82,8 @@
};
&mmc1 {
- bt_reset: bt-reset {
+ bluetooth@2 {
+ reg = <2>;
compatible = "mediatek,mt7921s-bluetooth";
pinctrl-names = "default";
pinctrl-0 = <&bt_pins_reset>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
index 6bd7424ef66c..100191c6453b 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
@@ -433,7 +433,6 @@
};
&mt6358_vgpu_reg {
- regulator-min-microvolt = <625000>;
regulator-max-microvolt = <900000>;
regulator-coupled-with = <&mt6358_vsram_gpu_reg>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
index 93dfbf130231..774ae5d9143f 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
@@ -1637,6 +1637,7 @@
compatible = "mediatek,mt8183-mfgcfg", "syscon";
reg = <0 0x13000000 0 0x1000>;
#clock-cells = <1>;
+ power-domains = <&spm MT8183_POWER_DOMAIN_MFG_ASYNC>;
};
gpu: gpu@13040000 {
diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi b/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
index 3dea28f1d806..1807e9d6cb0e 100644
--- a/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
@@ -1296,7 +1296,7 @@
* regulator coupling requirements.
*/
regulator-name = "ppvar_dvdd_vgpu";
- regulator-min-microvolt = <600000>;
+ regulator-min-microvolt = <500000>;
regulator-max-microvolt = <950000>;
regulator-ramp-delay = <6250>;
regulator-enable-ramp-delay = <200>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
index 9b738f6a5d21..7a704246678f 100644
--- a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
@@ -1421,7 +1421,7 @@
mt6315_6_vbuck1: vbuck1 {
regulator-compatible = "vbuck1";
regulator-name = "Vbcpu";
- regulator-min-microvolt = <300000>;
+ regulator-min-microvolt = <400000>;
regulator-max-microvolt = <1193750>;
regulator-enable-ramp-delay = <256>;
regulator-allowed-modes = <0 1 2>;
@@ -1431,7 +1431,7 @@
mt6315_6_vbuck3: vbuck3 {
regulator-compatible = "vbuck3";
regulator-name = "Vlcpu";
- regulator-min-microvolt = <300000>;
+ regulator-min-microvolt = <400000>;
regulator-max-microvolt = <1193750>;
regulator-enable-ramp-delay = <256>;
regulator-allowed-modes = <0 1 2>;
@@ -1448,7 +1448,7 @@
mt6315_7_vbuck1: vbuck1 {
regulator-compatible = "vbuck1";
regulator-name = "Vgpu";
- regulator-min-microvolt = <606250>;
+ regulator-min-microvolt = <400000>;
regulator-max-microvolt = <800000>;
regulator-enable-ramp-delay = <256>;
regulator-allowed-modes = <0 1 2>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8192.dtsi b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
index 05e401670bce..84cbdf6e9eb0 100644
--- a/arch/arm64/boot/dts/mediatek/mt8192.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
@@ -1464,6 +1464,7 @@
reg = <0 0x14001000 0 0x1000>;
interrupts = <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&mmsys CLK_MM_DISP_MUTEX0>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x1000 0x1000>;
mediatek,gce-events = <CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_0>,
<CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_1>;
power-domains = <&spm MT8192_POWER_DOMAIN_DISP>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
index f94c07f8b933..4a11918da370 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
@@ -264,6 +264,38 @@
status = "okay";
};
+&cpu0 {
+ cpu-supply = <&mt6359_vcore_buck_reg>;
+};
+
+&cpu1 {
+ cpu-supply = <&mt6359_vcore_buck_reg>;
+};
+
+&cpu2 {
+ cpu-supply = <&mt6359_vcore_buck_reg>;
+};
+
+&cpu3 {
+ cpu-supply = <&mt6359_vcore_buck_reg>;
+};
+
+&cpu4 {
+ cpu-supply = <&mt6315_6_vbuck1>;
+};
+
+&cpu5 {
+ cpu-supply = <&mt6315_6_vbuck1>;
+};
+
+&cpu6 {
+ cpu-supply = <&mt6315_6_vbuck1>;
+};
+
+&cpu7 {
+ cpu-supply = <&mt6315_6_vbuck1>;
+};
+
&dp_intf0 {
status = "okay";
@@ -1214,7 +1246,7 @@
mt6315_6_vbuck1: vbuck1 {
regulator-compatible = "vbuck1";
regulator-name = "Vbcpu";
- regulator-min-microvolt = <300000>;
+ regulator-min-microvolt = <400000>;
regulator-max-microvolt = <1193750>;
regulator-enable-ramp-delay = <256>;
regulator-ramp-delay = <6250>;
@@ -1232,7 +1264,7 @@
mt6315_7_vbuck1: vbuck1 {
regulator-compatible = "vbuck1";
regulator-name = "Vgpu";
- regulator-min-microvolt = <625000>;
+ regulator-min-microvolt = <400000>;
regulator-max-microvolt = <1193750>;
regulator-enable-ramp-delay = <256>;
regulator-ramp-delay = <6250>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
index ea6dc220e1cc..5d8b68f86ce4 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
@@ -2028,6 +2028,7 @@
compatible = "mediatek,mt8195-vppsys0", "syscon";
reg = <0 0x14000000 0 0x1000>;
#clock-cells = <1>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_1400XXXX 0 0x1000>;
};
dma-controller@14001000 {
@@ -2251,6 +2252,7 @@
compatible = "mediatek,mt8195-vppsys1", "syscon";
reg = <0 0x14f00000 0 0x1000>;
#clock-cells = <1>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_14f0XXXX 0 0x1000>;
};
mutex@14f01000 {
@@ -3080,6 +3082,7 @@
reg = <0 0x1c01a000 0 0x1000>;
mboxes = <&gce0 0 CMDQ_THR_PRIO_4>;
#clock-cells = <1>;
+ mediatek,gce-client-reg = <&gce0 SUBSYS_1c01XXXX 0xa000 0x1000>;
};
@@ -3261,6 +3264,7 @@
interrupts = <GIC_SPI 658 IRQ_TYPE_LEVEL_HIGH 0>;
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS0>;
clocks = <&vdosys0 CLK_VDO0_DISP_MUTEX0>;
+ mediatek,gce-client-reg = <&gce0 SUBSYS_1c01XXXX 0x6000 0x1000>;
mediatek,gce-events = <CMDQ_EVENT_VDO0_DISP_STREAM_DONE_0>;
};
@@ -3331,6 +3335,7 @@
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
clocks = <&vdosys1 CLK_VDO1_DISP_MUTEX>;
clock-names = "vdo1_mutex";
+ mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0x1000 0x1000>;
mediatek,gce-events = <CMDQ_EVENT_VDO1_STREAM_DONE_ENG_0>;
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8516.dtsi b/arch/arm64/boot/dts/mediatek/mt8516.dtsi
index 9cbd6dd8f671..d0b03dc4d3f4 100644
--- a/arch/arm64/boot/dts/mediatek/mt8516.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8516.dtsi
@@ -165,7 +165,7 @@
};
pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a35-pmu";
interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 5 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 6 IRQ_TYPE_LEVEL_LOW>,
diff --git a/arch/arm64/boot/dts/microchip/sparx5.dtsi b/arch/arm64/boot/dts/microchip/sparx5.dtsi
index 24075cd91420..c3029e0abacc 100644
--- a/arch/arm64/boot/dts/microchip/sparx5.dtsi
+++ b/arch/arm64/boot/dts/microchip/sparx5.dtsi
@@ -447,7 +447,7 @@
pinctrl-names = "default";
#address-cells = <1>;
#size-cells = <0>;
- reg = <0x6 0x110102d4 0x24>;
+ reg = <0x6 0x110102f8 0x24>;
};
mdio3: mdio@61101031c {
@@ -460,7 +460,7 @@
reg = <0x6 0x1101031c 0x24>;
};
- serdes: serdes@10808000 {
+ serdes: serdes@610808000 {
compatible = "microchip,sparx5-serdes";
#phy-cells = <1>;
clocks = <&sys_clk>;
diff --git a/arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi b/arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi
index f3e226de5e5e..2c5574734c9e 100644
--- a/arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi
+++ b/arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi
@@ -15,234 +15,234 @@
leds {
compatible = "gpio-leds";
- led@0 {
+ led-0 {
label = "twr0:green";
gpios = <&sgpio_out0 8 0 GPIO_ACTIVE_LOW>;
};
- led@1 {
+ led-1 {
label = "twr0:yellow";
gpios = <&sgpio_out0 8 1 GPIO_ACTIVE_LOW>;
};
- led@2 {
+ led-2 {
label = "twr1:green";
gpios = <&sgpio_out0 9 0 GPIO_ACTIVE_LOW>;
};
- led@3 {
+ led-3 {
label = "twr1:yellow";
gpios = <&sgpio_out0 9 1 GPIO_ACTIVE_LOW>;
};
- led@4 {
+ led-4 {
label = "twr2:green";
gpios = <&sgpio_out0 10 0 GPIO_ACTIVE_LOW>;
};
- led@5 {
+ led-5 {
label = "twr2:yellow";
gpios = <&sgpio_out0 10 1 GPIO_ACTIVE_LOW>;
};
- led@6 {
+ led-6 {
label = "twr3:green";
gpios = <&sgpio_out0 11 0 GPIO_ACTIVE_LOW>;
};
- led@7 {
+ led-7 {
label = "twr3:yellow";
gpios = <&sgpio_out0 11 1 GPIO_ACTIVE_LOW>;
};
- led@8 {
+ led-8 {
label = "eth12:green";
gpios = <&sgpio_out0 12 0 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@9 {
+ led-9 {
label = "eth12:yellow";
gpios = <&sgpio_out0 12 1 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@10 {
+ led-10 {
label = "eth13:green";
gpios = <&sgpio_out0 13 0 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@11 {
+ led-11 {
label = "eth13:yellow";
gpios = <&sgpio_out0 13 1 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@12 {
+ led-12 {
label = "eth14:green";
gpios = <&sgpio_out0 14 0 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@13 {
+ led-13 {
label = "eth14:yellow";
gpios = <&sgpio_out0 14 1 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@14 {
+ led-14 {
label = "eth15:green";
gpios = <&sgpio_out0 15 0 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@15 {
+ led-15 {
label = "eth15:yellow";
gpios = <&sgpio_out0 15 1 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@16 {
+ led-16 {
label = "eth48:green";
gpios = <&sgpio_out1 16 0 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@17 {
+ led-17 {
label = "eth48:yellow";
gpios = <&sgpio_out1 16 1 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@18 {
+ led-18 {
label = "eth49:green";
gpios = <&sgpio_out1 17 0 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@19 {
+ led-19 {
label = "eth49:yellow";
gpios = <&sgpio_out1 17 1 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@20 {
+ led-20 {
label = "eth50:green";
gpios = <&sgpio_out1 18 0 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@21 {
+ led-21 {
label = "eth50:yellow";
gpios = <&sgpio_out1 18 1 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@22 {
+ led-22 {
label = "eth51:green";
gpios = <&sgpio_out1 19 0 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@23 {
+ led-23 {
label = "eth51:yellow";
gpios = <&sgpio_out1 19 1 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@24 {
+ led-24 {
label = "eth52:green";
gpios = <&sgpio_out1 20 0 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@25 {
+ led-25 {
label = "eth52:yellow";
gpios = <&sgpio_out1 20 1 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@26 {
+ led-26 {
label = "eth53:green";
gpios = <&sgpio_out1 21 0 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@27 {
+ led-27 {
label = "eth53:yellow";
gpios = <&sgpio_out1 21 1 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@28 {
+ led-28 {
label = "eth54:green";
gpios = <&sgpio_out1 22 0 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@29 {
+ led-29 {
label = "eth54:yellow";
gpios = <&sgpio_out1 22 1 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@30 {
+ led-30 {
label = "eth55:green";
gpios = <&sgpio_out1 23 0 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@31 {
+ led-31 {
label = "eth55:yellow";
gpios = <&sgpio_out1 23 1 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@32 {
+ led-32 {
label = "eth56:green";
gpios = <&sgpio_out1 24 0 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@33 {
+ led-33 {
label = "eth56:yellow";
gpios = <&sgpio_out1 24 1 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@34 {
+ led-34 {
label = "eth57:green";
gpios = <&sgpio_out1 25 0 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@35 {
+ led-35 {
label = "eth57:yellow";
gpios = <&sgpio_out1 25 1 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@36 {
+ led-36 {
label = "eth58:green";
gpios = <&sgpio_out1 26 0 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@37 {
+ led-37 {
label = "eth58:yellow";
gpios = <&sgpio_out1 26 1 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@38 {
+ led-38 {
label = "eth59:green";
gpios = <&sgpio_out1 27 0 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@39 {
+ led-39 {
label = "eth59:yellow";
gpios = <&sgpio_out1 27 1 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@40 {
+ led-40 {
label = "eth60:green";
gpios = <&sgpio_out1 28 0 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@41 {
+ led-41 {
label = "eth60:yellow";
gpios = <&sgpio_out1 28 1 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@42 {
+ led-42 {
label = "eth61:green";
gpios = <&sgpio_out1 29 0 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@43 {
+ led-43 {
label = "eth61:yellow";
gpios = <&sgpio_out1 29 1 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@44 {
+ led-44 {
label = "eth62:green";
gpios = <&sgpio_out1 30 0 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@45 {
+ led-45 {
label = "eth62:yellow";
gpios = <&sgpio_out1 30 1 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@46 {
+ led-46 {
label = "eth63:green";
gpios = <&sgpio_out1 31 0 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@47 {
+ led-47 {
label = "eth63:yellow";
gpios = <&sgpio_out1 31 1 GPIO_ACTIVE_HIGH>;
default-state = "off";
@@ -274,15 +274,6 @@
&spi0 {
status = "okay";
- flash@0 {
- compatible = "jedec,spi-nor";
- spi-max-frequency = <8000000>;
- reg = <0>;
- };
-};
-
-&spi0 {
- status = "okay";
spi@0 {
compatible = "spi-mux";
mux-controls = <&mux>;
@@ -395,13 +386,13 @@
};
&axi {
- i2c0_imux: i2c0-imux@0 {
+ i2c0_imux: i2c-mux-0 {
compatible = "i2c-mux-pinctrl";
#address-cells = <1>;
#size-cells = <0>;
i2c-parent = <&i2c0>;
};
- i2c0_emux: i2c0-emux@0 {
+ i2c0_emux: i2c-mux-1 {
compatible = "i2c-mux-gpio";
#address-cells = <1>;
#size-cells = <0>;
@@ -427,62 +418,62 @@
pinctrl-10 = <&i2cmux_10>;
pinctrl-11 = <&i2cmux_11>;
pinctrl-12 = <&i2cmux_pins_i>;
- i2c_sfp1: i2c_sfp1 {
+ i2c_sfp1: i2c@0 {
reg = <0x0>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c_sfp2: i2c_sfp2 {
+ i2c_sfp2: i2c@1 {
reg = <0x1>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c_sfp3: i2c_sfp3 {
+ i2c_sfp3: i2c@2 {
reg = <0x2>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c_sfp4: i2c_sfp4 {
+ i2c_sfp4: i2c@3 {
reg = <0x3>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c_sfp5: i2c_sfp5 {
+ i2c_sfp5: i2c@4 {
reg = <0x4>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c_sfp6: i2c_sfp6 {
+ i2c_sfp6: i2c@5 {
reg = <0x5>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c_sfp7: i2c_sfp7 {
+ i2c_sfp7: i2c@6 {
reg = <0x6>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c_sfp8: i2c_sfp8 {
+ i2c_sfp8: i2c@7 {
reg = <0x7>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c_sfp9: i2c_sfp9 {
+ i2c_sfp9: i2c@8 {
reg = <0x8>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c_sfp10: i2c_sfp10 {
+ i2c_sfp10: i2c@9 {
reg = <0x9>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c_sfp11: i2c_sfp11 {
+ i2c_sfp11: i2c@a {
reg = <0xa>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c_sfp12: i2c_sfp12 {
+ i2c_sfp12: i2c@b {
reg = <0xb>;
#address-cells = <1>;
#size-cells = <0>;
@@ -495,42 +486,42 @@
&gpio 61 GPIO_ACTIVE_HIGH
&gpio 54 GPIO_ACTIVE_HIGH>;
idle-state = <0x8>;
- i2c_sfp13: i2c_sfp13 {
+ i2c_sfp13: i2c@0 {
reg = <0x0>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c_sfp14: i2c_sfp14 {
+ i2c_sfp14: i2c@1 {
reg = <0x1>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c_sfp15: i2c_sfp15 {
+ i2c_sfp15: i2c@2 {
reg = <0x2>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c_sfp16: i2c_sfp16 {
+ i2c_sfp16: i2c@3 {
reg = <0x3>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c_sfp17: i2c_sfp17 {
+ i2c_sfp17: i2c@4 {
reg = <0x4>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c_sfp18: i2c_sfp18 {
+ i2c_sfp18: i2c@5 {
reg = <0x5>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c_sfp19: i2c_sfp19 {
+ i2c_sfp19: i2c@6 {
reg = <0x6>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c_sfp20: i2c_sfp20 {
+ i2c_sfp20: i2c@7 {
reg = <0x7>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi b/arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi
index 82ce007d9959..af2f1831f07f 100644
--- a/arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi
+++ b/arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi
@@ -15,42 +15,42 @@
leds {
compatible = "gpio-leds";
- led@0 {
+ led-0 {
label = "eth60:yellow";
gpios = <&sgpio_out1 28 0 GPIO_ACTIVE_LOW>;
default-state = "off";
};
- led@1 {
+ led-1 {
label = "eth60:green";
gpios = <&sgpio_out1 28 1 GPIO_ACTIVE_LOW>;
default-state = "off";
};
- led@2 {
+ led-2 {
label = "eth61:yellow";
gpios = <&sgpio_out1 29 0 GPIO_ACTIVE_LOW>;
default-state = "off";
};
- led@3 {
+ led-3 {
label = "eth61:green";
gpios = <&sgpio_out1 29 1 GPIO_ACTIVE_LOW>;
default-state = "off";
};
- led@4 {
+ led-4 {
label = "eth62:yellow";
gpios = <&sgpio_out1 30 0 GPIO_ACTIVE_LOW>;
default-state = "off";
};
- led@5 {
+ led-5 {
label = "eth62:green";
gpios = <&sgpio_out1 30 1 GPIO_ACTIVE_LOW>;
default-state = "off";
};
- led@6 {
+ led-6 {
label = "eth63:yellow";
gpios = <&sgpio_out1 31 0 GPIO_ACTIVE_LOW>;
default-state = "off";
};
- led@7 {
+ led-7 {
label = "eth63:green";
gpios = <&sgpio_out1 31 1 GPIO_ACTIVE_LOW>;
default-state = "off";
@@ -89,15 +89,6 @@
&spi0 {
status = "okay";
- flash@0 {
- compatible = "jedec,spi-nor";
- spi-max-frequency = <8000000>;
- reg = <0>;
- };
-};
-
-&spi0 {
- status = "okay";
spi@0 {
compatible = "spi-mux";
mux-controls = <&mux>;
@@ -129,7 +120,7 @@
};
&axi {
- i2c0_imux: i2c0-imux@0 {
+ i2c0_imux: i2c-mux {
compatible = "i2c-mux-pinctrl";
#address-cells = <1>;
#size-cells = <0>;
@@ -146,22 +137,22 @@
pinctrl-2 = <&i2cmux_s31>;
pinctrl-3 = <&i2cmux_s32>;
pinctrl-4 = <&i2cmux_pins_i>;
- i2c_sfp1: i2c_sfp1 {
+ i2c_sfp1: i2c@0 {
reg = <0x0>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c_sfp2: i2c_sfp2 {
+ i2c_sfp2: i2c@1 {
reg = <0x1>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c_sfp3: i2c_sfp3 {
+ i2c_sfp3: i2c@2 {
reg = <0x2>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c_sfp4: i2c_sfp4 {
+ i2c_sfp4: i2c@3 {
reg = <0x3>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/nuvoton/nuvoton-npcm845-evb.dts b/arch/arm64/boot/dts/nuvoton/nuvoton-npcm845-evb.dts
index a5ab2bc0f835..eeceb5b292a8 100644
--- a/arch/arm64/boot/dts/nuvoton/nuvoton-npcm845-evb.dts
+++ b/arch/arm64/boot/dts/nuvoton/nuvoton-npcm845-evb.dts
@@ -16,7 +16,7 @@
stdout-path = &serial0;
};
- memory {
+ memory@0 {
reg = <0x0 0x0 0x0 0x40000000>;
};
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts b/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
index 14d58859bb55..683ac124523b 100644
--- a/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
@@ -9,8 +9,8 @@
compatible = "nvidia,norrin", "nvidia,tegra132", "nvidia,tegra124";
aliases {
- rtc0 = "/i2c@7000d000/as3722@40";
- rtc1 = "/rtc@7000e000";
+ rtc0 = &as3722;
+ rtc1 = &tegra_rtc;
serial0 = &uarta;
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra132.dtsi b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
index 7e24a212c7e4..5bcccfef3f7f 100644
--- a/arch/arm64/boot/dts/nvidia/tegra132.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
@@ -572,7 +572,7 @@
status = "disabled";
};
- rtc@7000e000 {
+ tegra_rtc: rtc@7000e000 {
compatible = "nvidia,tegra124-rtc", "nvidia,tegra20-rtc";
reg = <0x0 0x7000e000 0x0 0x100>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts b/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
index 9ebb7369256e..2e5b6b2c1f56 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
@@ -25,7 +25,7 @@
stdout-path = "serial0:115200n8";
};
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x0 0x80000000 0x0 0xc0000000>;
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
index 47f8268e46bf..882b1d1f4ada 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
@@ -2004,7 +2004,7 @@
};
pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a57-pmu";
interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
index 78cbfdd98dd1..f2e2d8d6845b 100644
--- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
@@ -4406,6 +4406,22 @@
*/
status = "disabled";
};
+
+ crypto@15820000 {
+ compatible = "nvidia,tegra234-se-aes";
+ reg = <0x00 0x15820000 0x00 0x10000>;
+ clocks = <&bpmp TEGRA234_CLK_SE>;
+ iommus = <&smmu_niso1 TEGRA234_SID_SES_SE1>;
+ dma-coherent;
+ };
+
+ crypto@15840000 {
+ compatible = "nvidia,tegra234-se-hash";
+ reg = <0x00 0x15840000 0x00 0x10000>;
+ clocks = <&bpmp TEGRA234_CLK_SE>;
+ iommus = <&smmu_niso1 TEGRA234_SID_SES_SE2>;
+ dma-coherent;
+ };
};
pcie@140a0000 {
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 7d40ec5e7d21..f63abb43e9fe 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -241,6 +241,7 @@ dtb-$(CONFIG_ARCH_QCOM) += sm8450-sony-xperia-nagara-pdx224.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8550-hdk.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8550-mtp.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8550-qrd.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sm8550-sony-xperia-yodo-pdx234.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8650-mtp.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8650-qrd.dtb
dtb-$(CONFIG_ARCH_QCOM) += x1e80100-crd.dtb
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
index 9ffad7d1f2b6..aba08424aa38 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
@@ -91,7 +91,7 @@
compatible = "gpio-leds";
- led@1 {
+ led-1 {
label = "apq8016-sbc:green:user1";
function = LED_FUNCTION_HEARTBEAT;
color = <LED_COLOR_ID_GREEN>;
@@ -100,7 +100,7 @@
default-state = "off";
};
- led@2 {
+ led-2 {
label = "apq8016-sbc:green:user2";
function = LED_FUNCTION_DISK_ACTIVITY;
color = <LED_COLOR_ID_GREEN>;
@@ -109,7 +109,7 @@
default-state = "off";
};
- led@3 {
+ led-3 {
label = "apq8016-sbc:green:user3";
function = LED_FUNCTION_DISK_ACTIVITY;
color = <LED_COLOR_ID_GREEN>;
@@ -118,7 +118,7 @@
default-state = "off";
};
- led@4 {
+ led-4 {
label = "apq8016-sbc:green:user4";
color = <LED_COLOR_ID_GREEN>;
gpios = <&pm8916_gpios 2 GPIO_ACTIVE_HIGH>;
@@ -127,7 +127,7 @@
default-state = "off";
};
- led@5 {
+ led-5 {
label = "apq8016-sbc:yellow:wlan";
function = LED_FUNCTION_WLAN;
color = <LED_COLOR_ID_YELLOW>;
@@ -136,7 +136,7 @@
default-state = "off";
};
- led@6 {
+ led-6 {
label = "apq8016-sbc:blue:bt";
function = LED_FUNCTION_BLUETOOTH;
color = <LED_COLOR_ID_BLUE>;
diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
index 4e29adea570a..17ab6c475958 100644
--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
@@ -907,6 +907,16 @@
"axi_s_sticky";
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi b/arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi
index 1b8379ba87f9..34e2f80514a3 100644
--- a/arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi
@@ -16,7 +16,7 @@
stdout-path = "serial0";
};
- memory {
+ memory@40000000 {
device_type = "memory";
reg = <0x0 0x40000000 0x0 0x20000000>;
};
diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
index e5b89753aa5c..5d42de829e75 100644
--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
@@ -323,6 +323,13 @@
bias-disable;
};
+ serial_5_pins: serial5-state {
+ pins = "gpio9", "gpio16";
+ function = "blsp5_uart";
+ drive-strength = <8>;
+ bias-disable;
+ };
+
i2c_0_pins: i2c-0-state {
pins = "gpio42", "gpio43";
function = "blsp1_i2c";
@@ -349,7 +356,7 @@
"gpio5", "gpio6", "gpio7",
"gpio8", "gpio10", "gpio11",
"gpio12", "gpio13", "gpio14",
- "gpio15", "gpio16", "gpio17";
+ "gpio15", "gpio17";
function = "qpic";
drive-strength = <8>;
bias-disable;
@@ -471,6 +478,18 @@
status = "disabled";
};
+ blsp1_uart6: serial@78b4000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x078b4000 0x200>;
+ interrupts = <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_UART6_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ pinctrl-0 = <&serial_5_pins>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
blsp1_spi1: spi@78b5000 {
compatible = "qcom,spi-qup-v2.2.1";
#address-cells = <1>;
@@ -864,6 +883,16 @@
"ahb",
"axi_m_sticky";
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie0: pcie@20000000 {
@@ -929,6 +958,16 @@
"axi_m_sticky",
"axi_s_sticky";
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
index 3a3e794c022f..7f0c2c1b8a94 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
@@ -12,7 +12,7 @@
/ {
model = "Longcheer L8150";
- compatible = "longcheer,l8150", "qcom,msm8916-v1-qrd/9-v1", "qcom,msm8916";
+ compatible = "longcheer,l8150", "qcom,msm8916";
chassis-type = "handset";
aliases {
diff --git a/arch/arm64/boot/dts/qcom/msm8916-mtp.dts b/arch/arm64/boot/dts/qcom/msm8916-mtp.dts
index ac527a3a0826..c11a845e91bb 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-mtp.dts
@@ -9,7 +9,7 @@
/ {
model = "Qualcomm Technologies, Inc. MSM 8916 MTP";
- compatible = "qcom,msm8916-mtp", "qcom,msm8916-mtp/1", "qcom,msm8916";
+ compatible = "qcom,msm8916-mtp", "qcom,msm8916";
chassis-type = "handset";
aliases {
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
index 2937495940ea..4bbbee80b5e4 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
@@ -128,6 +128,12 @@
pinctrl-names = "default";
pinctrl-0 = <&muic_int_default>;
+
+ usb_con: connector {
+ compatible = "usb-b-connector";
+ label = "micro-USB";
+ type = "micro";
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-e2015-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-e2015-common.dtsi
index 3c49dac92d2d..c50f81a68897 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-e2015-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-e2015-common.dtsi
@@ -23,6 +23,12 @@
pinctrl-names = "default";
pinctrl-0 = <&muic_int_default>;
+
+ usb_con: connector {
+ compatible = "usb-b-connector";
+ label = "micro-USB";
+ type = "micro";
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-fortuna-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-fortuna-common.dtsi
index c2800ad2dd5b..5e933fb8b363 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-fortuna-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-fortuna-common.dtsi
@@ -26,6 +26,30 @@
};
};
+ clk_pwm_backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&clk_pwm 0 100000>;
+
+ enable-gpios = <&tlmm 98 GPIO_ACTIVE_HIGH>;
+
+ brightness-levels = <0 255>;
+ num-interpolated-steps = <255>;
+ default-brightness-level = <128>;
+
+ pinctrl-0 = <&backlight_en_default>;
+ pinctrl-names = "default";
+ };
+
+ clk_pwm: pwm {
+ compatible = "clk-pwm";
+ #pwm-cells = <2>;
+
+ clocks = <&gcc GCC_GP2_CLK>;
+
+ pinctrl-0 = <&backlight_pwm_default>;
+ pinctrl-names = "default";
+ };
+
gpio-keys {
compatible = "gpio-keys";
@@ -66,6 +90,19 @@
pinctrl-0 = <&motor_en_default>;
pinctrl-names = "default";
};
+
+ reg_vdd_tsp_a: regulator-vdd-tsp-a {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_tsp_a";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+
+ gpio = <&tlmm 73 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&tsp_en_default>;
+ pinctrl-names = "default";
+ };
};
&blsp_i2c1 {
@@ -94,6 +131,26 @@
};
};
+&blsp_i2c5 {
+ status = "okay";
+
+ touchscreen: touchscreen@20 {
+ compatible = "zinitix,bt541";
+ reg = <0x20>;
+
+ interrupts-extended = <&tlmm 13 IRQ_TYPE_EDGE_FALLING>;
+
+ touchscreen-size-x = <540>;
+ touchscreen-size-y = <960>;
+
+ vcca-supply = <&reg_vdd_tsp_a>;
+ vdd-supply = <&pm8916_l6>;
+
+ pinctrl-0 = <&tsp_int_default>;
+ pinctrl-names = "default";
+ };
+};
+
&blsp_uart2 {
status = "okay";
};
@@ -166,6 +223,18 @@
};
&tlmm {
+ backlight_en_default: backlight-en-default-state {
+ pins = "gpio98";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ backlight_pwm_default: backlight-pwm-default-state {
+ pins = "gpio50";
+ function = "gcc_gp2_clk_a";
+ };
+
fg_alert_default: fg-alert-default-state {
pins = "gpio121";
function = "gpio";
@@ -200,4 +269,18 @@
drive-strength = <2>;
bias-disable;
};
+
+ tsp_en_default: tsp-en-default-state {
+ pins = "gpio73";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ tsp_int_default: tsp-int-default-state {
+ pins = "gpio13";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-rossa-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-rossa-common.dtsi
index 42843771ae2a..b438fa81886c 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-rossa-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-rossa-common.dtsi
@@ -5,6 +5,9 @@
/* SM5504 MUIC instead of SM5502 */
/delete-node/ &muic;
+/* Touchscreen varies depending on model variant */
+/delete-node/ &touchscreen;
+
&blsp_i2c1 {
muic: extcon@14 {
compatible = "siliconmitus,sm5504-muic";
@@ -14,3 +17,12 @@
pinctrl-names = "default";
};
};
+
+/* On rossa backlight is controlled with MIPI DCS commands */
+&clk_pwm {
+ status = "disabled";
+};
+
+&clk_pwm_backlight {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8939-samsung-a7.dts b/arch/arm64/boot/dts/qcom/msm8939-samsung-a7.dts
index aa6c39482a2f..0c599e71a464 100644
--- a/arch/arm64/boot/dts/qcom/msm8939-samsung-a7.dts
+++ b/arch/arm64/boot/dts/qcom/msm8939-samsung-a7.dts
@@ -286,6 +286,12 @@
pinctrl-0 = <&muic_int_default>;
pinctrl-names = "default";
+
+ usb_con: connector {
+ compatible = "usb-b-connector";
+ label = "micro-USB";
+ type = "micro";
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
index f1011bb641c6..5d818fe057dd 100644
--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
@@ -1323,6 +1323,20 @@
snps,hird-threshold = /bits/ 8 <0x00>;
maximum-speed = "high-speed";
+
+ usb-role-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usb_dwc3_hs: endpoint {
+ };
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index 1601e46549e7..8d2cb6f41095 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -1929,6 +1929,16 @@
"cfg",
"bus_master",
"bus_slave";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie1: pcie@608000 {
@@ -1982,6 +1992,16 @@
"cfg",
"bus_master",
"bus_slave";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie2: pcie@610000 {
@@ -2032,6 +2052,16 @@
"cfg",
"bus_master",
"bus_slave";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8998-sony-xperia-yoshino.dtsi b/arch/arm64/boot/dts/qcom/msm8998-sony-xperia-yoshino.dtsi
index 876c6921ddf0..d8cc0d729e99 100644
--- a/arch/arm64/boot/dts/qcom/msm8998-sony-xperia-yoshino.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998-sony-xperia-yoshino.dtsi
@@ -98,30 +98,35 @@
gpio-keys {
compatible = "gpio-keys";
label = "Side buttons";
+ pinctrl-0 = <&focus_n &snapshot_n &vol_down_n &vol_up_n>;
pinctrl-names = "default";
- pinctrl-0 = <&vol_down_n &focus_n &snapshot_n>;
- button-vol-down {
- label = "Volume Down";
- gpios = <&pm8998_gpios 5 GPIO_ACTIVE_LOW>;
- linux,input-type = <EV_KEY>;
- linux,code = <KEY_VOLUMEDOWN>;
- wakeup-source;
+ button-camera-focus {
+ label = "Camera Focus";
+ gpios = <&pm8998_gpios 8 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_CAMERA_FOCUS>;
debounce-interval = <15>;
};
button-camera-snapshot {
label = "Camera Snapshot";
gpios = <&pm8998_gpios 7 GPIO_ACTIVE_LOW>;
- linux,input-type = <EV_KEY>;
linux,code = <KEY_CAMERA>;
debounce-interval = <15>;
};
- button-camera-focus {
- label = "Camera Focus";
- gpios = <&pm8998_gpios 8 GPIO_ACTIVE_LOW>;
- linux,input-type = <EV_KEY>;
- linux,code = <KEY_CAMERA_FOCUS>;
+ button-vol-down {
+ label = "Volume Down";
+ gpios = <&pm8998_gpios 5 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEDOWN>;
+ wakeup-source;
+ debounce-interval = <15>;
+ };
+
+ button-vol-up {
+ label = "Volume Up";
+ gpios = <&pm8998_gpios 6 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ wakeup-source;
debounce-interval = <15>;
};
};
@@ -345,6 +350,14 @@
qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
};
+ vol_up_n: vol-up-n-state {
+ pins = "gpio6";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ bias-pull-up;
+ input-enable;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
+ };
+
focus_n: focus-n-state {
pins = "gpio7";
function = PMIC_GPIO_FUNC_NORMAL;
@@ -405,9 +418,33 @@
};
};
-&pm8998_resin {
- linux,code = <KEY_VOLUMEUP>;
+&pmi8998_lpg {
+ qcom,power-source = <1>;
+
status = "okay";
+
+ multi-led {
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_STATUS;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@3 {
+ reg = <3>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+
+ led@4 {
+ reg = <4>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@5 {
+ reg = <5>;
+ color = <LED_COLOR_ID_RED>;
+ };
+ };
};
&qusb2phy {
diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
index 4dfe2d09ac28..d795b2bbe133 100644
--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
@@ -972,6 +972,16 @@
power-domains = <&gcc PCIE_0_GDSC>;
iommu-map = <0x100 &anoc1_smmu 0x1480 1>;
perst-gpios = <&tlmm 35 GPIO_ACTIVE_LOW>;
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie_phy: phy@1c06000 {
diff --git a/arch/arm64/boot/dts/qcom/pm6150.dtsi b/arch/arm64/boot/dts/qcom/pm6150.dtsi
index 11158c2bd524..6de6ed562d97 100644
--- a/arch/arm64/boot/dts/qcom/pm6150.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm6150.dtsi
@@ -64,15 +64,15 @@
};
pm6150_vbus: usb-vbus-regulator@1100 {
- compatible = "qcom,pm6150-vbus-reg,
- qcom,pm8150b-vbus-reg";
+ compatible = "qcom,pm6150-vbus-reg",
+ "qcom,pm8150b-vbus-reg";
reg = <0x1100>;
status = "disabled";
};
pm6150_typec: typec@1500 {
- compatible = "qcom,pm6150-typec,
- qcom,pm8150b-typec";
+ compatible = "qcom,pm6150-typec",
+ "qcom,pm8150b-typec";
reg = <0x1500>, <0x1700>;
interrupts = <0x0 0x15 0x00 IRQ_TYPE_EDGE_RISING>,
<0x0 0x15 0x01 IRQ_TYPE_EDGE_BOTH>,
diff --git a/arch/arm64/boot/dts/qcom/pm6150l.dtsi b/arch/arm64/boot/dts/qcom/pm6150l.dtsi
index d13a1ab7c20b..0fce45276e5c 100644
--- a/arch/arm64/boot/dts/qcom/pm6150l.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm6150l.dtsi
@@ -118,6 +118,16 @@
status = "disabled";
};
+ pm6150l_lpg: pwm {
+ compatible = "qcom,pm6150l-lpg", "qcom,pm8150l-lpg";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #pwm-cells = <2>;
+
+ status = "disabled";
+ };
+
pm6150l_wled: leds@d800 {
compatible = "qcom,pm6150l-wled";
reg = <0xd800>, <0xd900>;
diff --git a/arch/arm64/boot/dts/qcom/qcm2290.dtsi b/arch/arm64/boot/dts/qcom/qcm2290.dtsi
index 89beac833d43..106110a9f551 100644
--- a/arch/arm64/boot/dts/qcom/qcm2290.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcm2290.dtsi
@@ -165,7 +165,7 @@
};
pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a53-pmu";
interrupts = <GIC_PPI 6 IRQ_TYPE_LEVEL_HIGH>;
};
@@ -694,10 +694,31 @@
clock-output-names = "usb3_phy_pipe_clk_src";
#phy-cells = <0>;
+ orientation-switch;
qcom,tcsr-reg = <&tcsr_regs 0xb244>;
status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usb_qmpphy_out: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usb_qmpphy_usb_ss_in: endpoint {
+ remote-endpoint = <&usb_dwc3_ss>;
+ };
+ };
+ };
};
system_noc: interconnect@1880000 {
@@ -1380,6 +1401,27 @@
snps,usb3_lpm_capable;
maximum-speed = "super-speed";
dr_mode = "otg";
+ usb-role-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usb_dwc3_hs: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usb_dwc3_ss: endpoint {
+ remote-endpoint = <&usb_qmpphy_usb_ss_in>;
+ };
+ };
+ };
};
};
@@ -1858,7 +1900,7 @@
compatible = "qcom,qcm2290-cpufreq-hw", "qcom,cpufreq-hw";
reg = <0x0 0x0f521000 0x0 0x1000>;
reg-names = "freq-domain0";
- interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&lmh_cluster 0>;
interrupt-names = "dcvsh-irq-0";
clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>, <&gcc GPLL0>;
clock-names = "xo", "alternate";
@@ -1866,6 +1908,18 @@
#freq-domain-cells = <1>;
#clock-cells = <1>;
};
+
+ lmh_cluster: lmh@f550800 {
+ compatible = "qcom,qcm2290-lmh", "qcom,sm8150-lmh";
+ reg = <0x0 0x0f550800 0x0 0x400>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ cpus = <&CPU0>;
+ qcom,lmh-temp-arm-millicelsius = <65000>;
+ qcom,lmh-temp-low-millicelsius = <94500>;
+ qcom,lmh-temp-high-millicelsius = <95000>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
};
thermal-zones {
diff --git a/arch/arm64/boot/dts/qcom/qcm6490-fairphone-fp5.dts b/arch/arm64/boot/dts/qcom/qcm6490-fairphone-fp5.dts
index 4ff9fc24e50e..f3432701945f 100644
--- a/arch/arm64/boot/dts/qcom/qcm6490-fairphone-fp5.dts
+++ b/arch/arm64/boot/dts/qcom/qcm6490-fairphone-fp5.dts
@@ -77,6 +77,8 @@
#address-cells = <1>;
#size-cells = <0>;
+ orientation-gpios = <&tlmm 140 GPIO_ACTIVE_HIGH>;
+
connector@0 {
compatible = "usb-c-connector";
reg = <0>;
diff --git a/arch/arm64/boot/dts/qcom/qcm6490-idp.dts b/arch/arm64/boot/dts/qcom/qcm6490-idp.dts
index e4bfad50a669..47ca2d000341 100644
--- a/arch/arm64/boot/dts/qcom/qcm6490-idp.dts
+++ b/arch/arm64/boot/dts/qcom/qcm6490-idp.dts
@@ -9,7 +9,9 @@
#define PM7250B_SID 8
#define PM7250B_SID1 9
+#include <dt-bindings/input/linux-event-codes.h>
#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
#include "sc7280.dtsi"
#include "pm7250b.dtsi"
@@ -35,10 +37,45 @@
serial0 = &uart5;
};
+ pm8350c_pwm_backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pm8350c_pwm 3 65535>;
+ enable-gpios = <&pm8350c_gpios 7 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&pmic_lcd_bl_en>;
+ pinctrl-names = "default";
+ };
+
chosen {
stdout-path = "serial0:115200n8";
};
+ lcd_disp_bias: regulator-lcd-disp-bias {
+ compatible = "regulator-fixed";
+ regulator-name = "lcd_disp_bias";
+ regulator-min-microvolt = <5500000>;
+ regulator-max-microvolt = <5500000>;
+ gpio = <&pm7250b_gpios 2 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ pinctrl-0 = <&lcd_disp_bias_en>;
+ pinctrl-names = "default";
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&key_vol_up_default>;
+ pinctrl-names = "default";
+
+ key-volume-up {
+ label = "Volume_up";
+ gpios = <&pm7325_gpios 6 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ wakeup-source;
+ debounce-interval = <15>;
+ linux,can-disable;
+ };
+ };
+
reserved-memory {
xbl_mem: xbl@80700000 {
reg = <0x0 0x80700000 0x0 0x100000>;
@@ -158,129 +195,151 @@
vdd-l14-l16-supply = <&vreg_s8b_1p272>;
vreg_s1b_1p872: smps1 {
+ regulator-name = "vreg_s1b_1p872";
regulator-min-microvolt = <1840000>;
regulator-max-microvolt = <2040000>;
};
vreg_s2b_0p876: smps2 {
+ regulator-name = "vreg_s2b_0p876";
regulator-min-microvolt = <570070>;
regulator-max-microvolt = <1050000>;
};
vreg_s7b_0p972: smps7 {
+ regulator-name = "vreg_s7b_0p972";
regulator-min-microvolt = <535000>;
regulator-max-microvolt = <1120000>;
};
vreg_s8b_1p272: smps8 {
+ regulator-name = "vreg_s8b_1p272";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1500000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_RET>;
};
vreg_l1b_0p912: ldo1 {
+ regulator-name = "vreg_l1b_0p912";
regulator-min-microvolt = <825000>;
regulator-max-microvolt = <925000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l2b_3p072: ldo2 {
+ regulator-name = "vreg_l2b_3p072";
regulator-min-microvolt = <2700000>;
regulator-max-microvolt = <3544000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l3b_0p504: ldo3 {
+ regulator-name = "vreg_l3b_0p504";
regulator-min-microvolt = <312000>;
regulator-max-microvolt = <910000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l4b_0p752: ldo4 {
+ regulator-name = "vreg_l4b_0p752";
regulator-min-microvolt = <752000>;
regulator-max-microvolt = <820000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
reg_l5b_0p752: ldo5 {
+ regulator-name = "reg_l5b_0p752";
regulator-min-microvolt = <552000>;
regulator-max-microvolt = <832000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l6b_1p2: ldo6 {
+ regulator-name = "vreg_l6b_1p2";
regulator-min-microvolt = <1140000>;
regulator-max-microvolt = <1260000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l7b_2p952: ldo7 {
+ regulator-name = "vreg_l7b_2p952";
regulator-min-microvolt = <2400000>;
regulator-max-microvolt = <3544000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l8b_0p904: ldo8 {
+ regulator-name = "vreg_l8b_0p904";
regulator-min-microvolt = <870000>;
regulator-max-microvolt = <970000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l9b_1p2: ldo9 {
+ regulator-name = "vreg_l9b_1p2";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1304000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l11b_1p504: ldo11 {
+ regulator-name = "vreg_l11b_1p504";
regulator-min-microvolt = <1504000>;
regulator-max-microvolt = <2000000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l12b_0p751: ldo12 {
+ regulator-name = "vreg_l12b_0p751";
regulator-min-microvolt = <751000>;
regulator-max-microvolt = <824000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l13b_0p53: ldo13 {
+ regulator-name = "vreg_l13b_0p53";
regulator-min-microvolt = <530000>;
regulator-max-microvolt = <824000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l14b_1p08: ldo14 {
+ regulator-name = "vreg_l14b_1p08";
regulator-min-microvolt = <1080000>;
regulator-max-microvolt = <1304000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l15b_0p765: ldo15 {
+ regulator-name = "vreg_l15b_0p765";
regulator-min-microvolt = <765000>;
regulator-max-microvolt = <1020000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l16b_1p1: ldo16 {
+ regulator-name = "vreg_l16b_1p1";
regulator-min-microvolt = <1100000>;
regulator-max-microvolt = <1300000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l17b_1p7: ldo17 {
+ regulator-name = "vreg_l17b_1p7";
regulator-min-microvolt = <1700000>;
regulator-max-microvolt = <1900000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l18b_1p8: ldo18 {
+ regulator-name = "vreg_l18b_1p8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <2000000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l19b_1p8: ldo19 {
+ regulator-name = "vreg_l19b_1p8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <2000000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
@@ -312,116 +371,217 @@
vdd-bob-supply = <&vph_pwr>;
vreg_s1c_2p19: smps1 {
+ regulator-name = "vreg_s1c_2p19";
regulator-min-microvolt = <2190000>;
regulator-max-microvolt = <2210000>;
};
vreg_s2c_0p752: smps2 {
+ regulator-name = "vreg_s2c_0p752";
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <800000>;
};
vreg_s5c_0p752: smps5 {
+ regulator-name = "vreg_s5c_0p752";
regulator-min-microvolt = <465000>;
regulator-max-microvolt = <1050000>;
};
vreg_s7c_0p752: smps7 {
+ regulator-name = "vreg_s7c_0p752";
regulator-min-microvolt = <465000>;
regulator-max-microvolt = <800000>;
};
vreg_s9c_1p084: smps9 {
+ regulator-name = "vreg_s9c_1p084";
regulator-min-microvolt = <1010000>;
regulator-max-microvolt = <1170000>;
};
vreg_l1c_1p8: ldo1 {
+ regulator-name = "vreg_l1c_1p8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1980000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l2c_1p62: ldo2 {
+ regulator-name = "vreg_l2c_1p62";
regulator-min-microvolt = <1620000>;
regulator-max-microvolt = <1980000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l3c_2p8: ldo3 {
+ regulator-name = "vreg_l3c_2p8";
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <3540000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l4c_1p62: ldo4 {
+ regulator-name = "vreg_l4c_1p62";
regulator-min-microvolt = <1620000>;
regulator-max-microvolt = <3300000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l5c_1p62: ldo5 {
+ regulator-name = "vreg_l5c_1p62";
regulator-min-microvolt = <1620000>;
regulator-max-microvolt = <3300000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l6c_2p96: ldo6 {
+ regulator-name = "vreg_l6c_2p96";
regulator-min-microvolt = <1650000>;
regulator-max-microvolt = <3544000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l7c_3p0: ldo7 {
+ regulator-name = "vreg_l7c_3p0";
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3544000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l8c_1p62: ldo8 {
+ regulator-name = "vreg_l8c_1p62";
regulator-min-microvolt = <1620000>;
regulator-max-microvolt = <2000000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l9c_2p96: ldo9 {
+ regulator-name = "vreg_l9c_2p96";
regulator-min-microvolt = <2700000>;
regulator-max-microvolt = <35440000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l10c_0p88: ldo10 {
+ regulator-name = "vreg_l10c_0p88";
regulator-min-microvolt = <720000>;
regulator-max-microvolt = <1050000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l11c_2p8: ldo11 {
+ regulator-name = "vreg_l11c_2p8";
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <3544000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l12c_1p65: ldo12 {
+ regulator-name = "vreg_l12c_1p65";
regulator-min-microvolt = <1650000>;
regulator-max-microvolt = <2000000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l13c_2p7: ldo13 {
+ regulator-name = "vreg_l13c_2p7";
regulator-min-microvolt = <2700000>;
regulator-max-microvolt = <3544000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_bob_3p296: bob {
+ regulator-name = "vreg_bob_3p296";
regulator-min-microvolt = <3008000>;
regulator-max-microvolt = <3960000>;
};
};
};
+&mdss {
+ status = "okay";
+};
+
+&mdss_dsi {
+ vdda-supply = <&vreg_l6b_1p2>;
+ status = "okay";
+
+ panel@0 {
+ compatible = "novatek,nt36672e";
+ reg = <0>;
+
+ reset-gpios = <&tlmm 44 GPIO_ACTIVE_HIGH>;
+
+ vddi-supply = <&vreg_l8c_1p62>;
+ avdd-supply = <&lcd_disp_bias>;
+ avee-supply = <&lcd_disp_bias>;
+
+ backlight = <&pm8350c_pwm_backlight>;
+
+ port {
+ panel0_in: endpoint {
+ remote-endpoint = <&mdss_dsi0_out>;
+ };
+ };
+ };
+};
+
+&mdss_dsi0_out {
+ remote-endpoint = <&panel0_in>;
+ data-lanes = <0 1 2 3>;
+};
+
+&mdss_dsi_phy {
+ vdds-supply = <&vreg_l10c_0p88>;
+ status = "okay";
+};
+
+&pm7250b_gpios {
+ lcd_disp_bias_en: lcd-disp-bias-en-state {
+ pins = "gpio2";
+ function = "func1";
+ bias-disable;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
+ input-disable;
+ output-enable;
+ power-source = <0>;
+ };
+};
+
+&pm8350c_gpios {
+ pmic_lcd_bl_en: pmic-lcd-bl-en-state {
+ pins = "gpio7";
+ function = "normal";
+ bias-disable;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
+ output-low;
+ power-source = <0>;
+ };
+
+ pmic_lcd_bl_pwm: pmic-lcd-bl-pwm-state {
+ pins = "gpio8";
+ function = "func1";
+ bias-disable;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
+ output-low;
+ power-source = <0>;
+ };
+};
+
+&pm7325_gpios {
+ key_vol_up_default: key-vol-up-state {
+ pins = "gpio6";
+ function = "normal";
+ input-enable;
+ bias-pull-up;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
+ };
+};
+
&pm8350c_pwm {
+ pinctrl-0 = <&pmic_lcd_bl_pwm>;
+ pinctrl-names = "default";
status = "okay";
multi-led {
@@ -448,10 +608,39 @@
};
};
+&pon_pwrkey {
+ status = "okay";
+};
+
+&pon_resin {
+ linux,code = <KEY_VOLUMEDOWN>;
+ status = "okay";
+};
+
&qupv3_id_0 {
status = "okay";
};
+&remoteproc_adsp {
+ firmware-name = "qcom/qcm6490/adsp.mbn";
+ status = "okay";
+};
+
+&remoteproc_cdsp {
+ firmware-name = "qcom/qcm6490/cdsp.mbn";
+ status = "okay";
+};
+
+&remoteproc_mpss {
+ firmware-name = "qcom/qcm6490/modem.mbn";
+ status = "okay";
+};
+
+&remoteproc_wpss {
+ firmware-name = "qcom/qcm6490/wpss.mbn";
+ status = "okay";
+};
+
&sdhc_1 {
non-removable;
no-sd;
diff --git a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
index 10655401528e..a22b4501ce1e 100644
--- a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
@@ -62,7 +62,7 @@
vddrf-supply = <&vreg_l1_1p3>;
vddch0-supply = <&vdd_ch0_3p3>;
- local-bd-address = [ 02 00 00 00 5a ad ];
+ local-bd-address = [ 00 00 00 00 00 00 ];
max-speed = <3200000>;
};
diff --git a/arch/arm64/boot/dts/qcom/qcs404.dtsi b/arch/arm64/boot/dts/qcom/qcs404.dtsi
index a05d0234f7fc..ac451f378056 100644
--- a/arch/arm64/boot/dts/qcom/qcs404.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs404.dtsi
@@ -1516,6 +1516,16 @@
phy-names = "pciephy";
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts b/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts
index 97824c769ba3..a085ff5b5fb2 100644
--- a/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts
+++ b/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts
@@ -17,7 +17,6 @@
#include "pmk8350.dtsi"
/delete-node/ &ipa_fw_mem;
-/delete-node/ &remoteproc_mpss;
/delete-node/ &rmtfs_mem;
/delete-node/ &adsp_mem;
/delete-node/ &cdsp_mem;
@@ -39,6 +38,20 @@
stdout-path = "serial0:115200n8";
};
+ dp-connector {
+ compatible = "dp-connector";
+ label = "DP";
+ type = "mini";
+
+ hpd-gpios = <&tlmm 60 GPIO_ACTIVE_HIGH>;
+
+ port {
+ dp_connector_in: endpoint {
+ remote-endpoint = <&mdss_edp_out>;
+ };
+ };
+ };
+
reserved-memory {
xbl_mem: xbl@80700000 {
reg = <0x0 0x80700000 0x0 0x100000>;
@@ -121,6 +134,49 @@
};
};
+ pmic-glink {
+ compatible = "qcom,qcm6490-pmic-glink", "qcom,pmic-glink";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ connector@0 {
+ compatible = "usb-c-connector";
+ reg = <0>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_hs_in: endpoint {
+ remote-endpoint = <&usb_1_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss_in: endpoint {
+ remote-endpoint = <&redriver_usb_con_ss>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_sbu_in: endpoint {
+ remote-endpoint = <&redriver_usb_con_sbu>;
+ };
+ };
+ };
+ };
+ };
+
vph_pwr: vph-pwr-regulator {
compatible = "regulator-fixed";
regulator-name = "vph_pwr";
@@ -153,129 +209,154 @@
vdd-l14-l16-supply = <&vreg_s8b_1p272>;
vreg_s1b_1p872: smps1 {
+ regulator-name = "vreg_s1b_1p872";
regulator-min-microvolt = <1840000>;
regulator-max-microvolt = <2040000>;
};
vreg_s2b_0p876: smps2 {
+ regulator-name = "vreg_s2b_0p876";
regulator-min-microvolt = <570070>;
regulator-max-microvolt = <1050000>;
};
vreg_s7b_0p972: smps7 {
+ regulator-name = "vreg_s7b_0p972";
regulator-min-microvolt = <535000>;
regulator-max-microvolt = <1120000>;
};
vreg_s8b_1p272: smps8 {
+ regulator-name = "vreg_s8b_1p272";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1500000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_RET>;
};
vreg_l1b_0p912: ldo1 {
+ regulator-name = "vreg_l1b_0p912";
regulator-min-microvolt = <825000>;
regulator-max-microvolt = <925000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l2b_3p072: ldo2 {
+ regulator-name = "vreg_l2b_3p072";
regulator-min-microvolt = <2700000>;
regulator-max-microvolt = <3544000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l3b_0p504: ldo3 {
+ regulator-name = "vreg_l3b_0p504";
regulator-min-microvolt = <312000>;
regulator-max-microvolt = <910000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l4b_0p752: ldo4 {
+ regulator-name = "vreg_l4b_0p752";
regulator-min-microvolt = <752000>;
regulator-max-microvolt = <820000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
reg_l5b_0p752: ldo5 {
+ regulator-name = "reg_l5b_0p752";
regulator-min-microvolt = <552000>;
regulator-max-microvolt = <832000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l6b_1p2: ldo6 {
+ regulator-name = "vreg_l6b_1p2";
regulator-min-microvolt = <1140000>;
regulator-max-microvolt = <1260000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l7b_2p952: ldo7 {
- regulator-min-microvolt = <2400000>;
- regulator-max-microvolt = <3544000>;
+ regulator-name = "vreg_l7b_2p952";
+ regulator-min-microvolt = <2952000>;
+ regulator-max-microvolt = <2952000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l8b_0p904: ldo8 {
+ regulator-name = "vreg_l8b_0p904";
regulator-min-microvolt = <870000>;
regulator-max-microvolt = <970000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l9b_1p2: ldo9 {
+ regulator-name = "vreg_l9b_1p2";
regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1304000>;
+ regulator-max-microvolt = <1200000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
};
vreg_l11b_1p504: ldo11 {
+ regulator-name = "vreg_l11b_1p504";
regulator-min-microvolt = <1504000>;
regulator-max-microvolt = <2000000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l12b_0p751: ldo12 {
+ regulator-name = "vreg_l12b_0p751";
regulator-min-microvolt = <751000>;
regulator-max-microvolt = <824000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l13b_0p53: ldo13 {
+ regulator-name = "vreg_l13b_0p53";
regulator-min-microvolt = <530000>;
regulator-max-microvolt = <824000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l14b_1p08: ldo14 {
+ regulator-name = "vreg_l14b_1p08";
regulator-min-microvolt = <1080000>;
regulator-max-microvolt = <1304000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l15b_0p765: ldo15 {
+ regulator-name = "vreg_l15b_0p765";
regulator-min-microvolt = <765000>;
regulator-max-microvolt = <1020000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l16b_1p1: ldo16 {
+ regulator-name = "vreg_l16b_1p1";
regulator-min-microvolt = <1100000>;
regulator-max-microvolt = <1300000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l17b_1p7: ldo17 {
+ regulator-name = "vreg_l17b_1p7";
regulator-min-microvolt = <1700000>;
regulator-max-microvolt = <1900000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l18b_1p8: ldo18 {
+ regulator-name = "vreg_l18b_1p8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <2000000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l19b_1p8: ldo19 {
+ regulator-name = "vreg_l19b_1p8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <2000000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
@@ -304,109 +385,128 @@
vdd-bob-supply = <&vph_pwr>;
vreg_s1c_2p19: smps1 {
+ regulator-name = "vreg_s1c_2p19";
regulator-min-microvolt = <2190000>;
regulator-max-microvolt = <2210000>;
};
vreg_s2c_0p752: smps2 {
+ regulator-name = "vreg_s2c_0p752";
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <800000>;
};
vreg_s5c_0p752: smps5 {
+ regulator-name = "vreg_s5c_0p752";
regulator-min-microvolt = <465000>;
regulator-max-microvolt = <1050000>;
};
vreg_s7c_0p752: smps7 {
+ regulator-name = "vreg_s7c_0p752";
regulator-min-microvolt = <465000>;
regulator-max-microvolt = <800000>;
};
vreg_s9c_1p084: smps9 {
+ regulator-name = "vreg_s9c_1p084";
regulator-min-microvolt = <1010000>;
regulator-max-microvolt = <1170000>;
};
vreg_l1c_1p8: ldo1 {
+ regulator-name = "vreg_l1c_1p8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1980000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l2c_1p62: ldo2 {
+ regulator-name = "vreg_l2c_1p62";
regulator-min-microvolt = <1620000>;
regulator-max-microvolt = <1980000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l3c_2p8: ldo3 {
+ regulator-name = "vreg_l3c_2p8";
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <3540000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l4c_1p62: ldo4 {
+ regulator-name = "vreg_l4c_1p62";
regulator-min-microvolt = <1620000>;
regulator-max-microvolt = <3300000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l5c_1p62: ldo5 {
+ regulator-name = "vreg_l5c_1p62";
regulator-min-microvolt = <1620000>;
regulator-max-microvolt = <3300000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l6c_2p96: ldo6 {
+ regulator-name = "vreg_l6c_2p96";
regulator-min-microvolt = <1650000>;
regulator-max-microvolt = <3544000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l7c_3p0: ldo7 {
+ regulator-name = "vreg_l7c_3p0";
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3544000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l8c_1p62: ldo8 {
+ regulator-name = "vreg_l8c_1p62";
regulator-min-microvolt = <1620000>;
regulator-max-microvolt = <2000000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l9c_2p96: ldo9 {
+ regulator-name = "vreg_l9c_2p96";
regulator-min-microvolt = <2700000>;
regulator-max-microvolt = <35440000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l10c_0p88: ldo10 {
+ regulator-name = "vreg_l10c_0p88";
regulator-min-microvolt = <720000>;
regulator-max-microvolt = <1050000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l11c_2p8: ldo11 {
+ regulator-name = "vreg_l11c_2p8";
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <3544000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l12c_1p65: ldo12 {
+ regulator-name = "vreg_l12c_1p65";
regulator-min-microvolt = <1650000>;
regulator-max-microvolt = <2000000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l13c_2p7: ldo13 {
+ regulator-name = "vreg_l13c_2p7";
regulator-min-microvolt = <2700000>;
regulator-max-microvolt = <3544000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_bob_3p296: bob {
+ regulator-name = "vreg_bob_3p296";
regulator-min-microvolt = <3008000>;
regulator-max-microvolt = <3960000>;
};
@@ -430,10 +530,102 @@
<GCC_WPSS_RSCP_CLK>;
};
+&i2c1 {
+ status = "okay";
+
+ typec-mux@1c {
+ compatible = "onnn,nb7vpq904m";
+ reg = <0x1c>;
+
+ vcc-supply = <&vreg_l18b_1p8>;
+
+ retimer-switch;
+ orientation-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ redriver_usb_con_ss: endpoint {
+ remote-endpoint = <&pmic_glink_ss_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ redriver_phy_con_ss: endpoint {
+ remote-endpoint = <&usb_dp_qmpphy_out>;
+ data-lanes = <0 1 2 3>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ redriver_usb_con_sbu: endpoint {
+ remote-endpoint = <&pmic_glink_sbu_in>;
+ };
+ };
+ };
+ };
+};
+
+&mdss {
+ status = "okay";
+};
+
+&mdss_dp {
+ status = "okay";
+};
+
+&mdss_dp_out {
+ data-lanes = <0 1>;
+ remote-endpoint = <&usb_dp_qmpphy_dp_in>;
+};
+
+&mdss_edp {
+ status = "okay";
+};
+
+&mdss_edp_out {
+ data-lanes = <0 1 2 3>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
+
+ remote-endpoint = <&dp_connector_in>;
+};
+
+&mdss_edp_phy {
+ status = "okay";
+};
+
&qupv3_id_0 {
status = "okay";
};
+&remoteproc_adsp {
+ firmware-name = "qcom/qcs6490/adsp.mbn";
+ status = "okay";
+};
+
+&remoteproc_cdsp {
+ firmware-name = "qcom/qcs6490/cdsp.mbn";
+ status = "okay";
+};
+
+&remoteproc_mpss {
+ firmware-name = "qcom/qcs6490/modem.mdt";
+ status = "okay";
+};
+
+&remoteproc_wpss {
+ firmware-name = "qcom/qcs6490/wpss.mbn";
+ status = "okay";
+};
+
&tlmm {
gpio-reserved-ranges = <32 2>, /* ADSP */
<48 4>; /* NFC */
@@ -449,7 +641,16 @@
};
&usb_1_dwc3 {
- dr_mode = "peripheral";
+ dr_mode = "otg";
+ usb-role-switch;
+};
+
+&usb_1_dwc3_hs {
+ remote-endpoint = <&pmic_glink_hs_in>;
+};
+
+&usb_1_dwc3_ss {
+ remote-endpoint = <&usb_dp_qmpphy_usb_ss_in>;
};
&usb_1_hsphy {
@@ -464,9 +665,49 @@
vdda-phy-supply = <&vreg_l6b_1p2>;
vdda-pll-supply = <&vreg_l1b_0p912>;
+ orientation-switch;
+
+ status = "okay";
+};
+
+&usb_dp_qmpphy_out {
+ remote-endpoint = <&redriver_phy_con_ss>;
+};
+
+&usb_dp_qmpphy_usb_ss_in {
+ remote-endpoint = <&usb_1_dwc3_ss>;
+};
+
+&usb_dp_qmpphy_dp_in {
+ remote-endpoint = <&mdss_dp_out>;
+};
+
+&ufs_mem_hc {
+ reset-gpios = <&tlmm 175 GPIO_ACTIVE_LOW>;
+ vcc-supply = <&vreg_l7b_2p952>;
+ vcc-max-microamp = <800000>;
+ vccq-supply = <&vreg_l9b_1p2>;
+ vccq-max-microamp = <900000>;
+ vccq2-supply = <&vreg_l9b_1p2>;
+ vccq2-max-microamp = <900000>;
+
+ status = "okay";
+};
+
+&ufs_mem_phy {
+ vdda-phy-supply = <&vreg_l10c_0p88>;
+ vdda-pll-supply = <&vreg_l6b_1p2>;
+
status = "okay";
};
&wifi {
memory-region = <&wlan_fw_mem>;
};
+
+/* PINCTRL - ADDITIONS TO NODES IN PARENT DEVICE TREE FILES */
+
+&edp_hot_plug_det {
+ function = "gpio";
+ bias-disable;
+};
diff --git a/arch/arm64/boot/dts/qcom/qdu1000.dtsi b/arch/arm64/boot/dts/qcom/qdu1000.dtsi
index 832f472c4b7a..f2a5e2e40461 100644
--- a/arch/arm64/boot/dts/qcom/qdu1000.dtsi
+++ b/arch/arm64/boot/dts/qcom/qdu1000.dtsi
@@ -177,7 +177,7 @@
};
pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a55-pmu";
interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
};
diff --git a/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts b/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
index 6e9dd0312adc..bb5191422660 100644
--- a/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
+++ b/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
@@ -262,6 +262,46 @@
status = "okay";
};
+&pm4125_typec {
+ status = "okay";
+
+ connector {
+ compatible = "usb-c-connector";
+
+ power-role = "dual";
+ data-role = "dual";
+ self-powered;
+
+ typec-power-opmode = "default";
+ pd-disable;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ pm4125_hs_in: endpoint {
+ remote-endpoint = <&usb_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ pm4125_ss_in: endpoint {
+ remote-endpoint = <&usb_qmpphy_out>;
+ };
+ };
+ };
+ };
+};
+
+&pm4125_vbus {
+ regulator-min-microamp = <500000>;
+ regulator-max-microamp = <500000>;
+ status = "okay";
+};
+
&qupv3_id_0 {
status = "okay";
};
@@ -535,14 +575,8 @@
status = "okay";
};
-&usb_qmpphy {
- vdda-phy-supply = <&pm4125_l12>;
- vdda-pll-supply = <&pm4125_l13>;
- status = "okay";
-};
-
-&usb_dwc3 {
- dr_mode = "host";
+&usb_dwc3_hs {
+ remote-endpoint = <&pm4125_hs_in>;
};
&usb_hsphy {
@@ -552,12 +586,23 @@
status = "okay";
};
+&usb_qmpphy {
+ vdda-phy-supply = <&pm4125_l12>;
+ vdda-pll-supply = <&pm4125_l13>;
+ status = "okay";
+};
+
+&usb_qmpphy_out {
+ remote-endpoint = <&pm4125_ss_in>;
+};
+
&wifi {
vdd-0.8-cx-mx-supply = <&pm4125_l7>;
vdd-1.8-xo-supply = <&pm4125_l13>;
vdd-1.3-rfa-supply = <&pm4125_l10>;
vdd-3.3-ch0-supply = <&pm4125_l22>;
qcom,ath10k-calibration-variant = "Thundercomm_RB1";
+ firmware-name = "qcm2290";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts b/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
index 696d6d43c56b..2c39bb1b97db 100644
--- a/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
+++ b/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
@@ -678,6 +678,7 @@
vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
vdd-3.3-ch0-supply = <&vreg_l23a_3p3>;
qcom,ath10k-calibration-variant = "Thundercomm_RB2";
+ firmware-name = "qrb4210";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sa8155p-adp.dts b/arch/arm64/boot/dts/qcom/sa8155p-adp.dts
index 5e4287f8c8cd..9e9c7f81096b 100644
--- a/arch/arm64/boot/dts/qcom/sa8155p-adp.dts
+++ b/arch/arm64/boot/dts/qcom/sa8155p-adp.dts
@@ -283,7 +283,7 @@
vreg_l13c_2p96: ldo13 {
regulator-name = "vreg_l13c_2p96";
- regulator-min-microvolt = <2504000>;
+ regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <2960000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
@@ -367,6 +367,16 @@
};
};
+&pmm8155au_1_gpios {
+ pmm8155au_1_sdc2_cd: sdc2-cd-default-state {
+ pins = "gpio4";
+ function = "normal";
+ input-enable;
+ bias-pull-up;
+ power-source = <0>;
+ };
+};
+
&qupv3_id_1 {
status = "okay";
};
@@ -384,10 +394,10 @@
&sdhc_2 {
status = "okay";
- cd-gpios = <&tlmm 4 GPIO_ACTIVE_LOW>;
+ cd-gpios = <&pmm8155au_1_gpios 4 GPIO_ACTIVE_LOW>;
pinctrl-names = "default", "sleep";
- pinctrl-0 = <&sdc2_on>;
- pinctrl-1 = <&sdc2_off>;
+ pinctrl-0 = <&sdc2_on &pmm8155au_1_sdc2_cd>;
+ pinctrl-1 = <&sdc2_off &pmm8155au_1_sdc2_cd>;
vqmmc-supply = <&vreg_l13c_2p96>; /* IO line power */
vmmc-supply = <&vreg_l17a_2p96>; /* Card power line */
bus-width = <4>;
@@ -505,13 +515,6 @@
bias-pull-up; /* pull up */
drive-strength = <16>; /* 16 MA */
};
-
- sd-cd-pins {
- pins = "gpio96";
- function = "gpio";
- bias-pull-up; /* pull up */
- drive-strength = <2>; /* 2 MA */
- };
};
sdc2_off: sdc2-off-state {
@@ -532,13 +535,6 @@
bias-pull-up; /* pull up */
drive-strength = <2>; /* 2 MA */
};
-
- sd-cd-pins {
- pins = "gpio96";
- function = "gpio";
- bias-pull-up; /* pull up */
- drive-strength = <2>; /* 2 MA */
- };
};
usb2phy_ac_en1_default: usb2phy-ac-en1-default-state {
diff --git a/arch/arm64/boot/dts/qcom/sa8775p.dtsi b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
index 231cea1f0fa8..31de73594839 100644
--- a/arch/arm64/boot/dts/qcom/sa8775p.dtsi
+++ b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
@@ -3677,6 +3677,16 @@
phy-names = "pciephy";
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie0_phy: phy@1c04000 {
@@ -3777,6 +3787,16 @@
phy-names = "pciephy";
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie1_phy: phy@1c14000 {
diff --git a/arch/arm64/boot/dts/qcom/sc7180-acer-aspire1.dts b/arch/arm64/boot/dts/qcom/sc7180-acer-aspire1.dts
index 5afcb8212f49..3f0d3e33894a 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-acer-aspire1.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-acer-aspire1.dts
@@ -255,7 +255,25 @@
clock-frequency = <400000>;
status = "okay";
- /* embedded-controller@76 */
+ embedded-controller@76 {
+ compatible = "acer,aspire1-ec";
+ reg = <0x76>;
+
+ interrupts-extended = <&tlmm 30 IRQ_TYPE_LEVEL_LOW>;
+
+ pinctrl-0 = <&ec_int_default>;
+ pinctrl-names = "default";
+
+ connector {
+ compatible = "usb-c-connector";
+
+ port {
+ ec_dp_in: endpoint {
+ remote-endpoint = <&mdss_dp_out>;
+ };
+ };
+ };
+ };
};
&i2c4 {
@@ -419,6 +437,19 @@
status = "okay";
};
+&mdss_dp {
+ data-lanes = <0 1>;
+
+ vdda-1p2-supply = <&vreg_l3c_1p2>;
+ vdda-0p9-supply = <&vreg_l4a_0p8>;
+
+ status = "okay";
+};
+
+&mdss_dp_out {
+ remote-endpoint = <&ec_dp_in>;
+};
+
&mdss_dsi0 {
vdda-supply = <&vreg_l3c_1p2>;
status = "okay";
@@ -857,6 +888,13 @@
bias-disable;
};
+ ec_int_default: ec-int-default-state {
+ pins = "gpio30";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
edp_bridge_irq_default: edp-bridge-irq-default-state {
pins = "gpio11";
function = "gpio";
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
index f3a6da8b2890..8513be297120 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
@@ -944,6 +944,8 @@ ap_spi_fp: &spi10 {
vddrf-supply = <&pp1300_l2c>;
vddch0-supply = <&pp3300_l10c>;
max-speed = <3200000>;
+
+ qcom,local-bd-address-broken;
};
};
@@ -1165,6 +1167,7 @@ ap_spi_fp: &spi10 {
};
&pm6150l_gpios {
+ status = "disabled"; /* No GPIOs are consumed or configured */
gpio-line-names = "AP_SUSPEND",
"",
"",
diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
index 2b481e20ae38..4774a859bd7e 100644
--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
@@ -1585,9 +1585,12 @@
compatible = "qcom,sc7180-qmp-ufs-phy",
"qcom,sm7150-qmp-ufs-phy";
reg = <0 0x01d87000 0 0x1000>;
- clocks = <&gcc GCC_UFS_MEM_CLKREF_CLK>,
- <&gcc GCC_UFS_PHY_PHY_AUX_CLK>;
- clock-names = "ref", "ref_aux";
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_PHY_PHY_AUX_CLK>,
+ <&gcc GCC_UFS_MEM_CLKREF_CLK>;
+ clock-names = "ref",
+ "ref_aux",
+ "qref";
power-domains = <&gcc UFS_PHY_GDSC>;
resets = <&ufs_mem_hc 0>;
reset-names = "ufsphy";
@@ -2309,6 +2312,7 @@
compatible = "qcom,sc7180-dcc", "qcom,dcc";
reg = <0x0 0x010a2000 0x0 0x1000>,
<0x0 0x010ae000 0x0 0x2000>;
+ status = "disabled";
};
stm@6002000 {
diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
index 7e7f0f0fb41b..fc9ec367e3a5 100644
--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
@@ -2273,6 +2273,16 @@
<0x100 &apps_smmu 0x1c81 0x1>;
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie1_phy: phy@1c0e000 {
@@ -2352,6 +2362,8 @@
<0 0>,
<0 0>,
<0 0>;
+ qcom,ice = <&ice>;
+
status = "disabled";
};
@@ -2374,6 +2386,13 @@
status = "disabled";
};
+ ice: crypto@1d88000 {
+ compatible = "qcom,sc7280-inline-crypto-engine",
+ "qcom,inline-crypto-engine";
+ reg = <0 0x01d88000 0 0x8000>;
+ clocks = <&gcc GCC_UFS_PHY_ICE_CORE_CLK>;
+ };
+
cryptobam: dma-controller@1dc4000 {
compatible = "qcom,bam-v1.7.4", "qcom,bam-v1.7.0";
reg = <0x0 0x01dc4000 0x0 0x28000>;
@@ -3707,7 +3726,7 @@
compatible = "qcom,sc7280-adsp-pas";
reg = <0 0x03700000 0 0x100>;
- interrupts-extended = <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
<&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
<&adsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
<&adsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
@@ -3944,7 +3963,7 @@
compatible = "qcom,sc7280-cdsp-pas";
reg = <0 0x0a300000 0 0x10000>;
- interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
<&cdsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
<&cdsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
<&cdsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
@@ -4458,6 +4477,11 @@
opp-hz = /bits/ 64 <506666667>;
required-opps = <&rpmhpd_opp_nom>;
};
+
+ opp-608000000 {
+ opp-hz = /bits/ 64 <608000000>;
+ required-opps = <&rpmhpd_opp_turbo>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/sc8180x-lenovo-flex-5g.dts b/arch/arm64/boot/dts/qcom/sc8180x-lenovo-flex-5g.dts
index 0c22f3efec20..6af99116c715 100644
--- a/arch/arm64/boot/dts/qcom/sc8180x-lenovo-flex-5g.dts
+++ b/arch/arm64/boot/dts/qcom/sc8180x-lenovo-flex-5g.dts
@@ -51,6 +51,8 @@
#address-cells = <1>;
#size-cells = <0>;
+ orientation-gpios = <&tlmm 38 GPIO_ACTIVE_HIGH>,
+ <&tlmm 58 GPIO_ACTIVE_HIGH>;
connector@0 {
compatible = "usb-c-connector";
@@ -329,12 +331,18 @@
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
};
vreg_l10e_2p9: ldo10 {
regulator-min-microvolt = <2904000>;
regulator-max-microvolt = <2904000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
};
vreg_l16e_3p0: ldo16 {
@@ -350,49 +358,58 @@
zap-shader {
memory-region = <&gpu_mem>;
- firmware-name = "qcom/sc8180x/qcdxkmsuc8180.mbn";
+ firmware-name = "qcom/sc8180x/LENOVO/82AK/qcdxkmsuc8180.mbn";
};
};
&i2c1 {
clock-frequency = <100000>;
- pinctrl-0 = <&i2c1_active>, <&i2c1_hid_active>;
+ pinctrl-0 = <&i2c1_active>;
pinctrl-names = "default";
status = "okay";
- hid@10 {
+ touchscreen@10 {
compatible = "hid-over-i2c";
reg = <0x10>;
hid-descr-addr = <0x1>;
interrupts-extended = <&tlmm 122 IRQ_TYPE_LEVEL_LOW>;
+
+ pinctrl-0 = <&ts_int_default>;
+ pinctrl-names = "default";
};
};
&i2c7 {
- clock-frequency = <100000>;
+ clock-frequency = <1000000>;
- pinctrl-0 = <&i2c7_active>, <&i2c7_hid_active>;
+ pinctrl-0 = <&i2c7_active>;
pinctrl-names = "default";
status = "okay";
- hid@5 {
+ keyboard@5 {
compatible = "hid-over-i2c";
reg = <0x5>;
hid-descr-addr = <0x20>;
interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_LOW>;
+
+ pinctrl-0 = <&kb_int_default>;
+ pinctrl-names = "default";
};
- hid@2c {
+ touchpad@2c {
compatible = "hid-over-i2c";
reg = <0x2c>;
hid-descr-addr = <0x20>;
interrupts-extended = <&tlmm 24 IRQ_TYPE_LEVEL_LOW>;
+
+ pinctrl-0 = <&tp_int_default>;
+ pinctrl-names = "default";
};
};
@@ -669,14 +686,6 @@
drive-strength = <2>;
};
- i2c1_hid_active: i2c1-hid-active-state {
- pins = "gpio122";
- function = "gpio";
-
- bias-pull-up;
- drive-strength = <2>;
- };
-
i2c7_active: i2c7-active-state {
pins = "gpio98", "gpio99";
function = "qup7";
@@ -685,8 +694,8 @@
drive-strength = <2>;
};
- i2c7_hid_active: i2c7-hid-active-state {
- pins = "gpio37", "gpio24";
+ kb_int_default: kb-int-default-state {
+ pins = "gpio37";
function = "gpio";
bias-pull-up;
@@ -718,6 +727,22 @@
};
};
+ tp_int_default: tp-int-default-state {
+ pins = "gpio24";
+ function = "gpio";
+
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ ts_int_default: ts-int-default-state {
+ pins = "gpio122";
+ function = "gpio";
+
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
usbprim_sbu_default: usbprim-sbu-state {
oe-n-pins {
pins = "gpio152";
diff --git a/arch/arm64/boot/dts/qcom/sc8180x.dtsi b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
index 32afc78d5b76..067712310560 100644
--- a/arch/arm64/boot/dts/qcom/sc8180x.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
@@ -1777,6 +1777,16 @@
dma-coherent;
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie0_phy: phy@1c06000 {
@@ -1888,6 +1898,16 @@
dma-coherent;
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie3_phy: phy@1c0c000 {
@@ -2000,6 +2020,16 @@
dma-coherent;
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie1_phy: phy@1c16000 {
@@ -2112,6 +2142,16 @@
dma-coherent;
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie2_phy: phy@1c1c000 {
@@ -2225,7 +2265,6 @@
gpu: gpu@2c00000 {
compatible = "qcom,adreno-680.1", "qcom,adreno";
- #stream-id-cells = <16>;
reg = <0 0x02c00000 0 0x40000>;
reg-names = "kgsl_3d0_reg_memory";
@@ -2701,7 +2740,7 @@
resets = <&gcc GCC_USB30_SEC_BCR>;
power-domains = <&gcc USB30_SEC_GDSC>;
interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
- <&pdc 7 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 40 IRQ_TYPE_LEVEL_HIGH>,
<&pdc 10 IRQ_TYPE_EDGE_BOTH>,
<&pdc 11 IRQ_TYPE_EDGE_BOTH>;
interrupt-names = "hs_phy_irq", "ss_phy_irq",
@@ -2805,7 +2844,7 @@
power-domains = <&rpmhpd SC8180X_MMCX>;
interrupt-parent = <&mdss>;
- interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <0>;
ports {
#address-cells = <1>;
@@ -2878,7 +2917,7 @@
reg-names = "dsi_ctrl";
interrupt-parent = <&mdss>;
- interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <4>;
clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK>,
<&dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
@@ -2964,7 +3003,7 @@
reg-names = "dsi_ctrl";
interrupt-parent = <&mdss>;
- interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <5>;
clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK>,
<&dispcc DISP_CC_MDSS_BYTE1_INTF_CLK>,
@@ -3030,7 +3069,8 @@
reg = <0 0xae90000 0 0x200>,
<0 0xae90200 0 0x200>,
<0 0xae90400 0 0x600>,
- <0 0xae90a00 0 0x400>;
+ <0 0xae90a00 0 0x400>,
+ <0 0xae91000 0 0x400>;
interrupt-parent = <&mdss>;
interrupts = <12>;
clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
@@ -3106,7 +3146,8 @@
reg = <0 0xae98000 0 0x200>,
<0 0xae98200 0 0x200>,
<0 0xae98400 0 0x600>,
- <0 0xae98a00 0 0x400>;
+ <0 0xae98a00 0 0x400>,
+ <0 0xae99000 0 0x400>;
interrupt-parent = <&mdss>;
interrupts = <13>;
clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
index 15ae94c1602d..e937732abede 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
+++ b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
@@ -100,6 +100,8 @@
#address-cells = <1>;
#size-cells = <0>;
+ orientation-gpios = <&tlmm 166 GPIO_ACTIVE_HIGH>,
+ <&tlmm 49 GPIO_ACTIVE_HIGH>;
connector@0 {
compatible = "usb-c-connector";
@@ -414,6 +416,13 @@
regulator-always-on;
};
+ vreg_l1b: ldo1 {
+ regulator-name = "vreg_l1b";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
vreg_l3b: ldo3 {
regulator-name = "vreg_l3b";
regulator-min-microvolt = <1200000>;
@@ -464,6 +473,13 @@
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
+ vreg_l8c: ldo8 {
+ regulator-name = "vreg_l8c";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
vreg_l12c: ldo12 {
regulator-name = "vreg_l12c";
regulator-min-microvolt = <1800000>;
@@ -497,6 +513,13 @@
vdd-l6-l9-l10-supply = <&vreg_s12b>;
vdd-l8-supply = <&vreg_s12b>;
+ vreg_l2d: ldo2 {
+ regulator-name = "vreg_l2d";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
vreg_l3d: ldo3 {
regulator-name = "vreg_l3d";
regulator-min-microvolt = <1200000>;
@@ -525,12 +548,26 @@
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
+ vreg_l8d: ldo8 {
+ regulator-name = "vreg_l8d";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
vreg_l9d: ldo9 {
regulator-name = "vreg_l9d";
regulator-min-microvolt = <912000>;
regulator-max-microvolt = <912000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
+
+ vreg_l10d: ldo10 {
+ regulator-name = "vreg_l10d";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
};
};
@@ -731,22 +768,14 @@
pinctrl-0 = <&pcie4_default>;
status = "okay";
+};
- pcie@0 {
- device_type = "pci";
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #address-cells = <3>;
- #size-cells = <2>;
- ranges;
-
- bus-range = <0x01 0xff>;
-
- wifi@0 {
- compatible = "pci17cb,1103";
- reg = <0x10000 0x0 0x0 0x0 0x0>;
+&pcie4_port0 {
+ wifi@0 {
+ compatible = "pci17cb,1103";
+ reg = <0x10000 0x0 0x0 0x0 0x0>;
- qcom,ath11k-calibration-variant = "LE_X13S";
- };
+ qcom,ath11k-calibration-variant = "LE_X13S";
};
};
@@ -1168,6 +1197,56 @@
remote-endpoint = <&pmic_glink_con1_hs>;
};
+&usb_2 {
+ status = "okay";
+};
+
+&usb_2_hsphy0 {
+ vdda-pll-supply = <&vreg_l1b>;
+ vdda18-supply = <&vreg_l1c>;
+ vdda33-supply = <&vreg_l7d>;
+
+ status = "okay";
+};
+
+&usb_2_hsphy1 {
+ vdda-pll-supply = <&vreg_l8d>;
+ vdda18-supply = <&vreg_l1c>;
+ vdda33-supply = <&vreg_l7d>;
+
+ status = "okay";
+};
+
+&usb_2_hsphy2 {
+ vdda-pll-supply = <&vreg_l10d>;
+ vdda18-supply = <&vreg_l8c>;
+ vdda33-supply = <&vreg_l2d>;
+
+ status = "okay";
+};
+
+&usb_2_hsphy3 {
+ vdda-pll-supply = <&vreg_l10d>;
+ vdda18-supply = <&vreg_l8c>;
+ vdda33-supply = <&vreg_l2d>;
+
+ status = "okay";
+};
+
+&usb_2_qmpphy0 {
+ vdda-phy-supply = <&vreg_l1b>;
+ vdda-pll-supply = <&vreg_l4d>;
+
+ status = "okay";
+};
+
+&usb_2_qmpphy1 {
+ vdda-phy-supply = <&vreg_l8d>;
+ vdda-pll-supply = <&vreg_l4d>;
+
+ status = "okay";
+};
+
&vamacro {
pinctrl-0 = <&dmic01_default>, <&dmic23_default>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
index a5b194813079..0549ba1fbeea 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
@@ -50,7 +50,8 @@
reg = <0x0 0x0>;
clocks = <&cpufreq_hw 0>;
enable-method = "psci";
- capacity-dmips-mhz = <602>;
+ capacity-dmips-mhz = <981>;
+ dynamic-power-coefficient = <549>;
next-level-cache = <&L2_0>;
power-domains = <&CPU_PD0>;
power-domain-names = "psci";
@@ -77,7 +78,8 @@
reg = <0x0 0x100>;
clocks = <&cpufreq_hw 0>;
enable-method = "psci";
- capacity-dmips-mhz = <602>;
+ capacity-dmips-mhz = <981>;
+ dynamic-power-coefficient = <549>;
next-level-cache = <&L2_100>;
power-domains = <&CPU_PD1>;
power-domain-names = "psci";
@@ -99,7 +101,8 @@
reg = <0x0 0x200>;
clocks = <&cpufreq_hw 0>;
enable-method = "psci";
- capacity-dmips-mhz = <602>;
+ capacity-dmips-mhz = <981>;
+ dynamic-power-coefficient = <549>;
next-level-cache = <&L2_200>;
power-domains = <&CPU_PD2>;
power-domain-names = "psci";
@@ -121,7 +124,8 @@
reg = <0x0 0x300>;
clocks = <&cpufreq_hw 0>;
enable-method = "psci";
- capacity-dmips-mhz = <602>;
+ capacity-dmips-mhz = <981>;
+ dynamic-power-coefficient = <549>;
next-level-cache = <&L2_300>;
power-domains = <&CPU_PD3>;
power-domain-names = "psci";
@@ -144,6 +148,7 @@
clocks = <&cpufreq_hw 1>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <590>;
next-level-cache = <&L2_400>;
power-domains = <&CPU_PD4>;
power-domain-names = "psci";
@@ -166,6 +171,7 @@
clocks = <&cpufreq_hw 1>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <590>;
next-level-cache = <&L2_500>;
power-domains = <&CPU_PD5>;
power-domain-names = "psci";
@@ -188,6 +194,7 @@
clocks = <&cpufreq_hw 1>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <590>;
next-level-cache = <&L2_600>;
power-domains = <&CPU_PD6>;
power-domain-names = "psci";
@@ -210,6 +217,7 @@
clocks = <&cpufreq_hw 1>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <590>;
next-level-cache = <&L2_700>;
power-domains = <&CPU_PD7>;
power-domain-names = "psci";
@@ -300,6 +308,7 @@
scm: scm {
compatible = "qcom,scm-sc8280xp", "qcom,scm";
interconnects = <&aggre2_noc MASTER_CRYPTO 0 &mc_virt SLAVE_EBI1 0>;
+ qcom,dload-mode = <&tcsr 0x13000>;
};
};
@@ -862,6 +871,18 @@
#mbox-cells = <2>;
};
+ qfprom: efuse@784000 {
+ compatible = "qcom,sc8280xp-qfprom", "qcom,qfprom";
+ reg = <0 0x00784000 0 0x3000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ gpu_speed_bin: gpu-speed-bin@18b {
+ reg = <0x18b 0x1>;
+ bits = <5 3>;
+ };
+ };
+
qup2: geniqup@8c0000 {
compatible = "qcom,geni-se-qup";
reg = <0 0x008c0000 0 0x2000>;
@@ -1731,6 +1752,8 @@
linux,pci-domain = <6>;
num-lanes = <1>;
+ msi-map = <0x0 &its 0xe0000 0x10000>;
+
interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>,
@@ -1774,11 +1797,22 @@
reset-names = "pci";
power-domains = <&gcc PCIE_4_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
phys = <&pcie4_phy>;
phy-names = "pciephy";
status = "disabled";
+
+ pcie4_port0: pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie4_phy: phy@1c06000 {
@@ -1831,6 +1865,8 @@
linux,pci-domain = <5>;
num-lanes = <2>;
+ msi-map = <0x0 &its 0xd0000 0x10000>;
+
interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
@@ -1872,11 +1908,22 @@
reset-names = "pci";
power-domains = <&gcc PCIE_3B_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
phys = <&pcie3b_phy>;
phy-names = "pciephy";
status = "disabled";
+
+ pcie3b_port0: pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie3b_phy: phy@1c0e000 {
@@ -1929,6 +1976,8 @@
linux,pci-domain = <4>;
num-lanes = <4>;
+ msi-map = <0x0 &its 0xc0000 0x10000>;
+
interrupts = <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
@@ -1970,11 +2019,22 @@
reset-names = "pci";
power-domains = <&gcc PCIE_3A_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
phys = <&pcie3a_phy>;
phy-names = "pciephy";
status = "disabled";
+
+ pcie3a_port0: pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie3a_phy: phy@1c14000 {
@@ -2030,6 +2090,8 @@
linux,pci-domain = <3>;
num-lanes = <2>;
+ msi-map = <0x0 &its 0xb0000 0x10000>;
+
interrupts = <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
@@ -2071,11 +2133,22 @@
reset-names = "pci";
power-domains = <&gcc PCIE_2B_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
phys = <&pcie2b_phy>;
phy-names = "pciephy";
status = "disabled";
+
+ pcie2b_port0: pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie2b_phy: phy@1c1e000 {
@@ -2128,6 +2201,8 @@
linux,pci-domain = <2>;
num-lanes = <4>;
+ msi-map = <0x0 &its 0xa0000 0x10000>;
+
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 523 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 524 IRQ_TYPE_LEVEL_HIGH>,
@@ -2169,11 +2244,22 @@
reset-names = "pci";
power-domains = <&gcc PCIE_2A_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
phys = <&pcie2a_phy>;
phy-names = "pciephy";
status = "disabled";
+
+ pcie2a_port0: pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie2a_phy: phy@1c24000 {
@@ -2641,7 +2727,7 @@
compatible = "qcom,sc8280xp-adsp-pas";
reg = <0 0x03000000 0 0x100>;
- interrupts-extended = <&intc GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 162 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
@@ -3337,6 +3423,88 @@
interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>;
};
+ usb_2: usb@a4f8800 {
+ compatible = "qcom,sc8280xp-dwc3-mp", "qcom,dwc3";
+ reg = <0 0x0a4f8800 0 0x400>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ clocks = <&gcc GCC_CFG_NOC_USB3_MP_AXI_CLK>,
+ <&gcc GCC_USB30_MP_MASTER_CLK>,
+ <&gcc GCC_AGGRE_USB3_MP_AXI_CLK>,
+ <&gcc GCC_USB30_MP_SLEEP_CLK>,
+ <&gcc GCC_USB30_MP_MOCK_UTMI_CLK>,
+ <&gcc GCC_AGGRE_USB_NOC_AXI_CLK>,
+ <&gcc GCC_AGGRE_USB_NOC_NORTH_AXI_CLK>,
+ <&gcc GCC_AGGRE_USB_NOC_SOUTH_AXI_CLK>,
+ <&gcc GCC_SYS_NOC_USB_AXI_CLK>;
+ clock-names = "cfg_noc", "core", "iface", "sleep", "mock_utmi",
+ "noc_aggr", "noc_aggr_north", "noc_aggr_south", "noc_sys";
+
+ assigned-clocks = <&gcc GCC_USB30_MP_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_MP_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <200000000>;
+
+ interrupts-extended = <&intc GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 857 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 856 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 860 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 859 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 127 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 126 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 129 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 128 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 131 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 130 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 133 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 132 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 16 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 17 IRQ_TYPE_LEVEL_HIGH>;
+
+ interrupt-names = "pwr_event_1", "pwr_event_2",
+ "pwr_event_3", "pwr_event_4",
+ "hs_phy_1", "hs_phy_2",
+ "hs_phy_3", "hs_phy_4",
+ "dp_hs_phy_1", "dm_hs_phy_1",
+ "dp_hs_phy_2", "dm_hs_phy_2",
+ "dp_hs_phy_3", "dm_hs_phy_3",
+ "dp_hs_phy_4", "dm_hs_phy_4",
+ "ss_phy_1", "ss_phy_2";
+
+ power-domains = <&gcc USB30_MP_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
+
+ resets = <&gcc GCC_USB30_MP_BCR>;
+
+ interconnects = <&aggre1_noc MASTER_USB3_MP 0 &mc_virt SLAVE_EBI1 0>,
+ <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3_MP 0>;
+ interconnect-names = "usb-ddr", "apps-usb";
+
+ wakeup-source;
+
+ status = "disabled";
+
+ usb_2_dwc3: usb@a400000 {
+ compatible = "snps,dwc3";
+ reg = <0 0x0a400000 0 0xcd00>;
+ interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ iommus = <&apps_smmu 0x800 0x0>;
+ phys = <&usb_2_hsphy0>, <&usb_2_qmpphy0>,
+ <&usb_2_hsphy1>, <&usb_2_qmpphy1>,
+ <&usb_2_hsphy2>,
+ <&usb_2_hsphy3>;
+ phy-names = "usb2-0", "usb3-0",
+ "usb2-1", "usb3-1",
+ "usb2-2",
+ "usb2-3";
+ dr_mode = "host";
+ };
+ };
+
usb_0: usb@a6f8800 {
compatible = "qcom,sc8280xp-dwc3", "qcom,dwc3";
reg = <0 0x0a6f8800 0 0x400>;
@@ -3361,10 +3529,12 @@
assigned-clock-rates = <19200000>, <200000000>;
interrupts-extended = <&intc GIC_SPI 804 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 805 IRQ_TYPE_LEVEL_HIGH>,
<&pdc 14 IRQ_TYPE_EDGE_BOTH>,
<&pdc 15 IRQ_TYPE_EDGE_BOTH>,
<&pdc 138 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "pwr_event",
+ "hs_phy_irq",
"dp_hs_phy_irq",
"dm_hs_phy_irq",
"ss_phy_irq";
@@ -3421,10 +3591,12 @@
assigned-clock-rates = <19200000>, <200000000>;
interrupts-extended = <&intc GIC_SPI 811 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 790 IRQ_TYPE_LEVEL_HIGH>,
<&pdc 12 IRQ_TYPE_EDGE_BOTH>,
<&pdc 13 IRQ_TYPE_EDGE_BOTH>,
<&pdc 136 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "pwr_event",
+ "hs_phy_irq",
"dp_hs_phy_irq",
"dm_hs_phy_irq",
"ss_phy_irq";
@@ -4448,6 +4620,11 @@
#thermal-sensor-cells = <1>;
};
+ restart@c264000 {
+ compatible = "qcom,pshold";
+ reg = <0 0x0c264000 0 0x4>;
+ };
+
tsens1: thermal-sensor@c265000 {
compatible = "qcom,sc8280xp-tsens", "qcom,tsens-v2";
reg = <0 0x0c265000 0 0x1ff>, /* TM */
@@ -4799,7 +4976,7 @@
#size-cells = <2>;
ranges;
- msi-controller@17a40000 {
+ its: msi-controller@17a40000 {
compatible = "arm,gic-v3-its";
reg = <0 0x17a40000 0 0x20000>;
msi-controller;
@@ -4966,6 +5143,11 @@
<0 0x18592000 0 0x1000>;
reg-names = "freq-domain0", "freq-domain1";
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "dcvsh-irq-0",
+ "dcvsh-irq-1";
+
clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_GPLL0>;
clock-names = "xo", "alternate";
@@ -4977,7 +5159,7 @@
compatible = "qcom,sc8280xp-nsp0-pas";
reg = <0 0x1b300000 0 0x100>;
- interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
<&smp2p_nsp0_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_nsp0_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_nsp0_in 2 IRQ_TYPE_EDGE_RISING>,
@@ -5108,7 +5290,7 @@
compatible = "qcom,sc8280xp-nsp1-pas";
reg = <0 0x21300000 0 0x100>;
- interrupts-extended = <&intc GIC_SPI 887 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 887 IRQ_TYPE_EDGE_RISING>,
<&smp2p_nsp1_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_nsp1_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_nsp1_in 2 IRQ_TYPE_EDGE_RISING>,
diff --git a/arch/arm64/boot/dts/qcom/sdm630-sony-xperia-nile.dtsi b/arch/arm64/boot/dts/qcom/sdm630-sony-xperia-nile.dtsi
index 819a5f8825e7..a4b722e0fc1e 100644
--- a/arch/arm64/boot/dts/qcom/sdm630-sony-xperia-nile.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm630-sony-xperia-nile.dtsi
@@ -90,6 +90,8 @@
gpio-keys {
compatible = "gpio-keys";
+ pinctrl-0 = <&gpio_keys_default>;
+ pinctrl-names = "default";
key-camera-focus {
label = "Camera Focus";
@@ -645,6 +647,13 @@
bias-disable;
};
+ gpio_keys_default: gpio-keys-default-state {
+ pins = "gpio64", "gpio113";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
imx300_vana_default: imx300-vana-default-state {
pins = "gpio50";
function = "gpio";
diff --git a/arch/arm64/boot/dts/qcom/sdm632-fairphone-fp3.dts b/arch/arm64/boot/dts/qcom/sdm632-fairphone-fp3.dts
index 057579ae3013..e2708c74e95a 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-fairphone-fp3.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-fairphone-fp3.dts
@@ -116,6 +116,33 @@
};
};
+&pmi632_typec {
+ status = "okay";
+
+ connector {
+ compatible = "usb-c-connector";
+
+ power-role = "dual";
+ data-role = "dual";
+ self-powered;
+
+ typec-power-opmode = "default";
+ pd-disable;
+
+ port {
+ pmi632_hs_in: endpoint {
+ remote-endpoint = <&usb_dwc3_hs>;
+ };
+ };
+ };
+};
+
+&pmi632_vbus {
+ regulator-min-microamp = <500000>;
+ regulator-max-microamp = <1000000>;
+ status = "okay";
+};
+
&sdhc_1 {
status = "okay";
vmmc-supply = <&pm8953_l8>;
@@ -240,8 +267,8 @@
status = "okay";
};
-&usb3_dwc3 {
- dr_mode = "peripheral";
+&usb_dwc3_hs {
+ remote-endpoint = <&pmi632_hs_in>;
};
&wcnss {
diff --git a/arch/arm64/boot/dts/qcom/sdm670-google-sargo.dts b/arch/arm64/boot/dts/qcom/sdm670-google-sargo.dts
index 32a7bd59e1ec..176b0119fe6d 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-google-sargo.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-google-sargo.dts
@@ -441,6 +441,47 @@
};
};
+&mdss {
+ status = "okay";
+};
+
+&mdss_dsi0 {
+ vdda-supply = <&vreg_l1a_1p225>;
+ status = "okay";
+
+ panel@0 {
+ compatible = "samsung,s6e3fa7-ams559nk06";
+ reg = <0>;
+
+ reset-gpios = <&tlmm 75 GPIO_ACTIVE_LOW>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&panel_default>;
+
+ power-supply = <&vreg_l6b_3p3>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&mdss_dsi0_out>;
+ };
+ };
+ };
+};
+
+&mdss_dsi0_out {
+ remote-endpoint = <&panel_in>;
+ data-lanes = <0 1 2 3>;
+};
+
+&mdss_dsi0_phy {
+ vdds-supply = <&vreg_l1b_0p925>;
+ status = "okay";
+};
+
+&mdss_mdp {
+ status = "okay";
+};
+
&pm660l_gpios {
vol_up_pin: vol-up-state {
pins = "gpio7";
@@ -481,6 +522,29 @@
&tlmm {
gpio-reserved-ranges = <0 4>, <81 4>;
+ panel_default: panel-default-state {
+ te-pins {
+ pins = "gpio10";
+ function = "mdp_vsync";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ reset-pins {
+ pins = "gpio75";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-disable;
+ };
+
+ mode-pins {
+ pins = "gpio76";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-disable;
+ };
+ };
+
touchscreen_default: ts-default-state {
ts-reset-pins {
pins = "gpio99";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
index 1f517328199b..9a6d3d0c0ee4 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
@@ -195,6 +195,12 @@
gpio = <&tlmm 90 GPIO_ACTIVE_HIGH>;
enable-active-high;
+ /*
+ * FIXME: this regulator is responsible for VBUS on the left USB
+ * port. Keep it always on until we can correctly model this
+ * relationship.
+ */
+ regulator-always-on;
pinctrl-names = "default";
pinctrl-0 = <&pcie0_pwren_state>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 2f20be99ee7e..10de2bd46ffc 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -2375,6 +2375,16 @@
phy-names = "pciephy";
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie0_phy: phy@1c06000 {
@@ -2479,6 +2489,16 @@
phy-names = "pciephy";
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie1_phy: phy@1c0a000 {
diff --git a/arch/arm64/boot/dts/qcom/sdx75.dtsi b/arch/arm64/boot/dts/qcom/sdx75.dtsi
index 7dbdf8ca6de6..da1704061d58 100644
--- a/arch/arm64/boot/dts/qcom/sdx75.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdx75.dtsi
@@ -224,7 +224,7 @@
};
pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a55-pmu";
interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
};
@@ -411,7 +411,7 @@
hwlocks = <&tcsr_mutex 3>;
};
- soc: soc {
+ soc: soc@0 {
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <2>;
diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi
index 24bcec3366ef..84ff20a96c83 100644
--- a/arch/arm64/boot/dts/qcom/sm6350.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi
@@ -1205,6 +1205,37 @@
status = "disabled";
};
+ cryptobam: dma-controller@1dc4000 {
+ compatible = "qcom,bam-v1.7.4", "qcom,bam-v1.7.0";
+ reg = <0 0x01dc4000 0 0x24000>;
+ interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ qcom,ee = <0>;
+ qcom,controlled-remotely;
+ num-channels = <16>;
+ qcom,num-ees = <4>;
+ iommus = <&apps_smmu 0x426 0x11>,
+ <&apps_smmu 0x432 0x0>,
+ <&apps_smmu 0x436 0x11>,
+ <&apps_smmu 0x438 0x1>,
+ <&apps_smmu 0x43f 0x0>;
+ };
+
+ crypto: crypto@1dfa000 {
+ compatible = "qcom,sm6350-qce", "qcom,sm8150-qce", "qcom,qce";
+ reg = <0 0x01dfa000 0 0x6000>;
+ dmas = <&cryptobam 4>, <&cryptobam 5>;
+ dma-names = "rx", "tx";
+ iommus = <&apps_smmu 0x426 0x11>,
+ <&apps_smmu 0x432 0x0>,
+ <&apps_smmu 0x436 0x11>,
+ <&apps_smmu 0x438 0x1>,
+ <&apps_smmu 0x43f 0x0>;
+ interconnects = <&aggre2_noc MASTER_CRYPTO_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_EBI_CH0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "memory";
+ };
+
ipa: ipa@1e40000 {
compatible = "qcom,sm6350-ipa";
@@ -1252,7 +1283,7 @@
compatible = "qcom,sm6350-adsp-pas";
reg = <0 0x03000000 0 0x100>;
- interrupts-extended = <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
@@ -1511,7 +1542,7 @@
compatible = "qcom,sm6350-cdsp-pas";
reg = <0 0x08300000 0 0x10000>;
- interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 2 IRQ_TYPE_EDGE_RISING>,
@@ -2033,6 +2064,14 @@
remote-endpoint = <&mdss_dsi0_in>;
};
};
+
+ port@2 {
+ reg = <2>;
+
+ dpu_intf0_out: endpoint {
+ remote-endpoint = <&mdss_dp_in>;
+ };
+ };
};
mdp_opp_table: opp-table {
@@ -2070,6 +2109,86 @@
};
};
+ mdss_dp: displayport-controller@ae90000 {
+ compatible = "qcom,sm6350-dp", "qcom,sm8350-dp";
+ reg = <0 0xae90000 0 0x200>,
+ <0 0xae90200 0 0x200>,
+ <0 0xae90400 0 0x600>,
+ <0 0xae91000 0 0x400>,
+ <0 0xae91400 0 0x400>;
+ interrupt-parent = <&mdss>;
+ interrupts = <12>;
+ clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&dispcc DISP_CC_MDSS_DP_AUX_CLK>,
+ <&dispcc DISP_CC_MDSS_DP_LINK_CLK>,
+ <&dispcc DISP_CC_MDSS_DP_LINK_INTF_CLK>,
+ <&dispcc DISP_CC_MDSS_DP_PIXEL_CLK>;
+ clock-names = "core_iface",
+ "core_aux",
+ "ctrl_link",
+ "ctrl_link_iface",
+ "stream_pixel";
+
+ assigned-clocks = <&dispcc DISP_CC_MDSS_DP_LINK_CLK_SRC>,
+ <&dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>;
+ assigned-clock-parents = <&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>,
+ <&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
+
+ phys = <&usb_1_qmpphy QMP_USB43DP_DP_PHY>;
+ phy-names = "dp";
+
+ #sound-dai-cells = <0>;
+
+ operating-points-v2 = <&dp_opp_table>;
+ power-domains = <&rpmhpd SM6350_CX>;
+
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ mdss_dp_in: endpoint {
+ remote-endpoint = <&dpu_intf0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ mdss_dp_out: endpoint {
+ };
+ };
+ };
+
+ dp_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-160000000 {
+ opp-hz = /bits/ 64 <160000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-270000000 {
+ opp-hz = /bits/ 64 <270000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+
+ opp-540000000 {
+ opp-hz = /bits/ 64 <540000000>;
+ required-opps = <&rpmhpd_opp_svs_l1>;
+ };
+
+ opp-810000000 {
+ opp-hz = /bits/ 64 <810000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ };
+ };
+ };
+
mdss_dsi0: dsi@ae94000 {
compatible = "qcom,sm6350-dsi-ctrl", "qcom,mdss-dsi-ctrl";
reg = <0 0x0ae94000 0 0x400>;
diff --git a/arch/arm64/boot/dts/qcom/sm6375.dtsi b/arch/arm64/boot/dts/qcom/sm6375.dtsi
index 4386f8a9c636..f40509d91bbd 100644
--- a/arch/arm64/boot/dts/qcom/sm6375.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6375.dtsi
@@ -1561,7 +1561,7 @@
compatible = "qcom,sm6375-adsp-pas";
reg = <0 0x0a400000 0 0x100>;
- interrupts-extended = <&intc GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 282 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
diff --git a/arch/arm64/boot/dts/qcom/sm8150-hdk.dts b/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
index de670b407ef1..6cb6f503fdac 100644
--- a/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
+++ b/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
@@ -609,6 +609,11 @@
firmware-name = "qcom/sm8150/cdsp.mbn";
};
+&remoteproc_mpss {
+ firmware-name = "qcom/sm8150/modem.mbn";
+ status = "okay";
+};
+
&remoteproc_slpi {
status = "okay";
@@ -713,3 +718,14 @@
&usb_2_dwc3 {
dr_mode = "host";
};
+
+&wifi {
+ status = "okay";
+
+ vdd-0.8-cx-mx-supply = <&vreg_l1a_0p75>;
+ vdd-1.8-xo-supply = <&vreg_l7a_1p8>;
+ vdd-1.3-rfa-supply = <&vreg_l2c_1p3>;
+ vdd-3.3-ch0-supply = <&vreg_l11c_3p3>;
+
+ qcom,ath10k-calibration-variant = "Qualcomm_sm8150hdk";
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
index a35c0852b5a1..ff22e4346660 100644
--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
@@ -1901,6 +1901,16 @@
pinctrl-0 = <&pcie0_default_state>;
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie0_phy: phy@1c06000 {
@@ -2011,6 +2021,16 @@
pinctrl-0 = <&pcie1_default_state>;
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie1_phy: phy@1c0e000 {
diff --git a/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-common.dtsi b/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-common.dtsi
index 6f54f50a70b0..41f117474872 100644
--- a/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-common.dtsi
@@ -636,7 +636,8 @@
connector {
compatible = "usb-c-connector";
- power-role = "source";
+ op-sink-microwatt = <10000000>;
+ power-role = "dual";
data-role = "dual";
self-powered;
@@ -645,6 +646,12 @@
PDO_FIXED_USB_COMM |
PDO_FIXED_DATA_SWAP)>;
+ sink-pdos = <PDO_FIXED(5000, 3000,
+ PDO_FIXED_DUAL_ROLE |
+ PDO_FIXED_USB_COMM |
+ PDO_FIXED_DATA_SWAP)
+ PDO_VAR(5000, 12000, 5000)>;
+
ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -661,6 +668,8 @@
};
&pm8150b_vbus {
+ regulator-min-microamp = <500000>;
+ regulator-max-microamp = <3000000>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
index 39bd8f0eba1e..8ccade628f1f 100644
--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
@@ -2203,6 +2203,16 @@
dma-coherent;
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie0_phy: phy@1c06000 {
@@ -2318,6 +2328,16 @@
dma-coherent;
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie1_phy: phy@1c0e000 {
@@ -2433,6 +2453,16 @@
dma-coherent;
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie2_phy: phy@1c16000 {
@@ -3062,7 +3092,7 @@
compatible = "qcom,sm8250-slpi-pas";
reg = <0 0x05c00000 0 0x4000>;
- interrupts-extended = <&pdc 9 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&pdc 9 IRQ_TYPE_EDGE_RISING>,
<&smp2p_slpi_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_slpi_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_slpi_in 2 IRQ_TYPE_EDGE_RISING>,
@@ -3766,7 +3796,7 @@
compatible = "qcom,sm8250-cdsp-pas";
reg = <0 0x08300000 0 0x10000>;
- interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 2 IRQ_TYPE_EDGE_RISING>,
@@ -5928,7 +5958,7 @@
compatible = "qcom,sm8250-adsp-pas";
reg = <0 0x17300000 0 0x100>;
- interrupts-extended = <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
diff --git a/arch/arm64/boot/dts/qcom/sm8350-hdk.dts b/arch/arm64/boot/dts/qcom/sm8350-hdk.dts
index b43d264ed42b..4c25ab2f5670 100644
--- a/arch/arm64/boot/dts/qcom/sm8350-hdk.dts
+++ b/arch/arm64/boot/dts/qcom/sm8350-hdk.dts
@@ -42,6 +42,7 @@
compatible = "qcom,sm8350-pmic-glink", "qcom,pmic-glink";
#address-cells = <1>;
#size-cells = <0>;
+ orientation-gpios = <&tlmm 81 GPIO_ACTIVE_HIGH>;
connector@0 {
compatible = "usb-c-connector";
diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
index a5e7dbbd8c6c..f7c4700f00c3 100644
--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
@@ -12,6 +12,7 @@
#include <dt-bindings/dma/qcom-gpi.h>
#include <dt-bindings/firmware/qcom,scm.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interconnect/qcom,icc.h>
#include <dt-bindings/interconnect/qcom,sm8350.h>
#include <dt-bindings/mailbox/qcom-ipcc.h>
#include <dt-bindings/phy/phy-qcom-qmp.h>
@@ -1572,6 +1573,16 @@
phy-names = "pciephy";
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie0_phy: phy@1c06000 {
@@ -1669,6 +1680,16 @@
phy-names = "pciephy";
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie1_phy: phy@1c0e000 {
@@ -1730,6 +1751,11 @@
<&gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
<&gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
<&gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>;
+ interconnects = <&aggre1_noc MASTER_UFS_MEM QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_UFS_MEM_CFG QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "ufs-ddr", "cpu-ufs";
freq-table-hz =
<75000000 300000000>,
<0 0>,
diff --git a/arch/arm64/boot/dts/qcom/sm8450-hdk.dts b/arch/arm64/boot/dts/qcom/sm8450-hdk.dts
index 0786cff07b89..3be46b56c723 100644
--- a/arch/arm64/boot/dts/qcom/sm8450-hdk.dts
+++ b/arch/arm64/boot/dts/qcom/sm8450-hdk.dts
@@ -95,6 +95,7 @@
compatible = "qcom,sm8450-pmic-glink", "qcom,pmic-glink";
#address-cells = <1>;
#size-cells = <0>;
+ orientation-gpios = <&tlmm 91 GPIO_ACTIVE_HIGH>;
connector@0 {
compatible = "usb-c-connector";
diff --git a/arch/arm64/boot/dts/qcom/sm8450-qrd.dts b/arch/arm64/boot/dts/qcom/sm8450-qrd.dts
index c7d05945aa51..7b62ead68e77 100644
--- a/arch/arm64/boot/dts/qcom/sm8450-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sm8450-qrd.dts
@@ -467,6 +467,14 @@
vdda-pll-supply = <&vreg_l5b_0p88>;
vdda18-supply = <&vreg_l1c_1p8>;
vdda33-supply = <&vreg_l2b_3p07>;
+ qcom,squelch-detector-bp = <(-2090)>;
+ qcom,hs-disconnect-bp = <1743>;
+ qcom,pre-emphasis-amplitude-bp = <40000>;
+ qcom,pre-emphasis-duration-bp = <20000>;
+ qcom,hs-amplitude-bp = <2000>;
+ qcom,hs-output-impedance-micro-ohms = <2600000>;
+ qcom,hs-crossover-voltage-microvolt = <(-31000)>;
+ qcom,hs-rise-fall-time-bp = <(-4100)>;
};
&usb_1_qmpphy {
diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
index b86be34a912b..616461fcbab9 100644
--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
@@ -1777,12 +1777,8 @@
ranges = <0x01000000 0x0 0x00000000 0x0 0x60200000 0x0 0x100000>,
<0x02000000 0x0 0x60300000 0x0 0x60300000 0x0 0x3d00000>;
- /*
- * MSIs for BDF (1:0.0) only works with Device ID 0x5980.
- * Hence, the IDs are swapped.
- */
- msi-map = <0x0 &gic_its 0x5981 0x1>,
- <0x100 &gic_its 0x5980 0x1>;
+ msi-map = <0x0 &gic_its 0x5980 0x1>,
+ <0x100 &gic_its 0x5981 0x1>;
msi-map-mask = <0xff00>;
interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
@@ -1850,6 +1846,16 @@
pinctrl-0 = <&pcie0_default_state>;
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie0_phy: phy@1c06000 {
@@ -1900,12 +1906,8 @@
ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
<0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;
- /*
- * MSIs for BDF (1:0.0) only works with Device ID 0x5a00.
- * Hence, the IDs are swapped.
- */
- msi-map = <0x0 &gic_its 0x5a01 0x1>,
- <0x100 &gic_its 0x5a00 0x1>;
+ msi-map = <0x0 &gic_its 0x5a00 0x1>,
+ <0x100 &gic_its 0x5a01 0x1>;
msi-map-mask = <0xff00>;
interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
@@ -1971,6 +1973,16 @@
pinctrl-0 = <&pcie1_default_state>;
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie1_phy: phy@1c0e000 {
@@ -2363,6 +2375,7 @@
compatible = "qcom,fastrpc";
qcom,glink-channels = "fastrpcglink-apps-dsp";
label = "sdsp";
+ qcom,non-secure-domain;
#address-cells = <1>;
#size-cells = <0>;
@@ -2665,6 +2678,7 @@
compatible = "qcom,fastrpc";
qcom,glink-channels = "fastrpcglink-apps-dsp";
label = "adsp";
+ qcom,non-secure-domain;
#address-cells = <1>;
#size-cells = <0>;
@@ -2731,6 +2745,7 @@
compatible = "qcom,fastrpc";
qcom,glink-channels = "fastrpcglink-apps-dsp";
label = "cdsp";
+ qcom,non-secure-domain;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sm8550-sony-xperia-yodo-pdx234.dts b/arch/arm64/boot/dts/qcom/sm8550-sony-xperia-yodo-pdx234.dts
new file mode 100644
index 000000000000..85e0d3d66e16
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sm8550-sony-xperia-yodo-pdx234.dts
@@ -0,0 +1,779 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/firmware/qcom,scm.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <dt-bindings/sound/cs35l45.h>
+#include "sm8550.dtsi"
+#include "pm8010.dtsi"
+#include "pm8550.dtsi"
+#include "pm8550b.dtsi"
+#define PMK8550VE_SID 5
+#include "pm8550ve.dtsi"
+#include "pm8550vs.dtsi"
+#include "pmk8550.dtsi"
+/* TODO: Only one SID of PMR735D seems accessible? */
+
+/delete-node/ &hwfence_shbuf;
+/delete-node/ &mpss_mem;
+/delete-node/ &rmtfs_mem;
+/ {
+ model = "Sony Xperia 1 V";
+ compatible = "sony,pdx234", "qcom,sm8550";
+ chassis-type = "handset";
+
+ aliases {
+ i2c0 = &i2c0;
+ i2c4 = &i2c4;
+ i2c10 = &i2c10;
+ i2c11 = &i2c11;
+ i2c16 = &i2c_hub_2;
+ serial0 = &uart7;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ label = "gpio-keys";
+
+ pinctrl-0 = <&focus_n &snapshot_n &vol_down_n>;
+ pinctrl-names = "default";
+
+ key-camera-focus {
+ label = "Camera Focus";
+ linux,code = <KEY_CAMERA_FOCUS>;
+ gpios = <&pm8550b_gpios 8 GPIO_ACTIVE_LOW>;
+ debounce-interval = <15>;
+ linux,can-disable;
+ wakeup-source;
+ };
+
+ key-camera-snapshot {
+ label = "Camera Snapshot";
+ gpios = <&pm8550b_gpios 7 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_CAMERA>;
+ debounce-interval = <15>;
+ linux,can-disable;
+ wakeup-source;
+ };
+
+ key-volume-down {
+ label = "Volume Down";
+ linux,code = <KEY_VOLUMEDOWN>;
+ gpios = <&pm8550_gpios 6 GPIO_ACTIVE_LOW>;
+ debounce-interval = <15>;
+ linux,can-disable;
+ wakeup-source;
+ };
+ };
+
+ pmic-glink {
+ compatible = "qcom,sm8550-pmic-glink", "qcom,pmic-glink";
+ orientation-gpios = <&tlmm 11 GPIO_ACTIVE_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ connector@0 {
+ compatible = "usb-c-connector";
+ reg = <0>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_hs_in: endpoint {
+ remote-endpoint = <&usb_1_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss_in: endpoint {
+ remote-endpoint = <&usb_dp_qmpphy_out>;
+ };
+ };
+ };
+ };
+ };
+
+ reserved-memory {
+ mpss_mem: mpss-region@89800000 {
+ reg = <0x0 0x89800000 0x0 0x10800000>;
+ no-map;
+ };
+
+ splash@b8000000 {
+ reg = <0x0 0xb8000000 0x0 0x2b00000>;
+ no-map;
+ };
+
+ hwfence_shbuf: hwfence-shbuf-region@e6440000 {
+ reg = <0x0 0xe6440000 0x0 0x2dd000>;
+ no-map;
+ };
+
+ rmtfs_mem: memory@f8b00000 {
+ compatible = "qcom,rmtfs-mem";
+ reg = <0x0 0xf8b00000 0x0 0x280000>;
+ no-map;
+
+ qcom,client-id = <1>;
+ qcom,vmid = <QCOM_SCM_VMID_MSS_MSA>;
+ };
+
+ ramoops@ffd00000 {
+ compatible = "ramoops";
+ reg = <0x0 0xffd00000 0x0 0xc0000>;
+ console-size = <0x40000>;
+ record-size = <0x1000>;
+ pmsg-size = <0x40000>;
+ ecc-size = <16>;
+ };
+
+ rdtag-store-region@ffdc0000 {
+ reg = <0x0 0xffdc0000 0x0 0x40000>;
+ no-map;
+ };
+ };
+
+ vph_pwr: vph-pwr-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ };
+};
+
+&apps_rsc {
+ regulators-0 {
+ compatible = "qcom,pm8550-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ pm8550_bob1: bob1 {
+ regulator-name = "pm8550_bob1";
+ regulator-min-microvolt = <3416000>;
+ regulator-max-microvolt = <3960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ /* TODO: bob2 @ 2.704-3.008V doesn't fall into the vreg driver constraints */
+
+ pm8550_l1: ldo1 {
+ regulator-name = "pm8550_l1";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550_l2: ldo2 {
+ regulator-name = "pm8550_l2";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ /* L4 exists in cmd-db, but the board seems to crash on access */
+
+ pm8550_l5: ldo5 {
+ regulator-name = "pm8550_l5";
+ regulator-min-microvolt = <3104000>;
+ regulator-max-microvolt = <3104000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550_l6: ldo6 {
+ regulator-name = "pm8550_l6";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550_l7: ldo7 {
+ regulator-name = "pm8550_l7";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550_l8: ldo8 {
+ regulator-name = "pm8550_l8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550_l9: ldo9 {
+ regulator-name = "pm8550_l9";
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550_l10: ldo10 {
+ regulator-name = "pm8550_l10";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550_l11: ldo11 {
+ regulator-name = "pm8550_l11";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1504000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550_l12: ldo12 {
+ regulator-name = "pm8550_l12";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550_l13: ldo13 {
+ regulator-name = "pm8550_l13";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550_l14: ldo14 {
+ regulator-name = "pm8550_l14";
+ regulator-min-microvolt = <3304000>;
+ regulator-max-microvolt = <3304000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550_l15: ldo15 {
+ regulator-name = "pm8550_l15";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550_l16: ldo16 {
+ regulator-name = "pm8550_l16";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550_l17: ldo17 {
+ regulator-name = "pm8550_l17";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <2504000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-1 {
+ compatible = "qcom,pm8550vs-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ pm8550vs_0_l1: ldo1 {
+ regulator-name = "pm8550vs_0_l1";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550vs_0_l3: ldo3 {
+ regulator-name = "pm8550vs_0_l3";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-2 {
+ compatible = "qcom,pm8550vs-rpmh-regulators";
+ qcom,pmic-id = "d";
+
+ pm8550vs_1_l1: ldo1 {
+ regulator-name = "pm8550vs_1_l1";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ /* L3 exists in cmd-db, but the board seems to crash on access */
+ };
+
+ regulators-3 {
+ compatible = "qcom,pm8550vs-rpmh-regulators";
+ qcom,pmic-id = "e";
+
+ pm8550vs_2_s4: smps4 {
+ regulator-name = "pm8550vs_2_s4";
+ regulator-min-microvolt = <904000>;
+ regulator-max-microvolt = <984000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550vs_2_s5: smps5 {
+ regulator-name = "pm8550vs_2_s5";
+ regulator-min-microvolt = <1010000>;
+ regulator-max-microvolt = <1120000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550vs_2_l1: ldo1 {
+ regulator-name = "pm8550vs_2_l1";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550vs_2_l2: ldo2 {
+ regulator-name = "pm8550vs_2_l2";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <968000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550vs_2_l3: ldo3 {
+ regulator-name = "pm8550vs_2_l3";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-4 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "f";
+
+ pm8550ve_s4: smps4 {
+ regulator-name = "pm8550ve_s4";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <700000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550ve_l1: ldo1 {
+ regulator-name = "pm8550ve_l1";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550ve_l2: ldo2 {
+ regulator-name = "pm8550ve_l2";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550ve_l3: ldo3 {
+ regulator-name = "pm8550ve_l3";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-5 {
+ compatible = "qcom,pm8550vs-rpmh-regulators";
+ qcom,pmic-id = "g";
+
+ pm8550vs_3_s1: smps1 {
+ regulator-name = "pm8550vs_3_s1";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550vs_3_s2: smps2 {
+ regulator-name = "pm8550vs_3_s2";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1036000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550vs_3_s3: smps3 {
+ regulator-name = "pm8550vs_3_s3";
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <1004000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550vs_3_s4: smps4 {
+ regulator-name = "pm8550vs_3_s4";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1352000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550vs_3_s5: smps5 {
+ regulator-name = "pm8550vs_3_s5";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1004000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550vs_3_s6: smps6 {
+ regulator-name = "pm8550vs_3_s6";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550vs_3_l1: ldo1 {
+ regulator-name = "pm8550vs_3_l1";
+ regulator-min-microvolt = <1144000>;
+ regulator-max-microvolt = <1256000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550vs_3_l2: ldo2 {
+ regulator-name = "pm8550vs_3_l2";
+ regulator-min-microvolt = <1104000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ pm8550vs_3_l3: ldo3 {
+ regulator-name = "pm8550vs_3_l3";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ /* TODO: Unknown PMIC @ k, l, PM8010 @ m, n */
+};
+
+&gpi_dma1 {
+ status = "okay";
+};
+
+&gpi_dma2 {
+ status = "okay";
+};
+
+&i2c_hub_2 {
+ clock-frequency = <400000>;
+ status = "okay";
+
+ pmic@75 {
+ compatible = "dlg,slg51000";
+ reg = <0x75>;
+ dlg,cs-gpios = <&pm8550vs_g_gpios 4 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-0 = <&cam_pwr_a_cs>;
+ pinctrl-names = "default";
+
+ regulators {
+ slg51000_a_ldo1: ldo1 {
+ regulator-name = "slg51000_a_ldo1";
+ regulator-min-microvolt = <2400000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ slg51000_a_ldo2: ldo2 {
+ regulator-name = "slg51000_a_ldo2";
+ regulator-min-microvolt = <2400000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ slg51000_a_ldo3: ldo3 {
+ regulator-name = "slg51000_a_ldo3";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3750000>;
+ };
+
+ slg51000_a_ldo4: ldo4 {
+ regulator-name = "slg51000_a_ldo4";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3750000>;
+ };
+
+ slg51000_a_ldo5: ldo5 {
+ regulator-name = "slg51000_a_ldo5";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ slg51000_a_ldo6: ldo6 {
+ regulator-name = "slg51000_a_ldo6";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ slg51000_a_ldo7: ldo7 {
+ regulator-name = "slg51000_a_ldo7";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3750000>;
+ };
+ };
+ };
+};
+
+&i2c_master_hub_0 {
+ status = "okay";
+};
+
+&i2c0 {
+ clock-frequency = <1000000>;
+ status = "okay";
+
+ /* NXP NFC @ 28 */
+};
+
+&i2c4 {
+ clock-frequency = <400000>;
+ status = "okay";
+
+ /* LX Semi SW82907 touchscreen @ 28 */
+};
+
+&i2c10 {
+ clock-frequency = <1000000>;
+ status = "okay";
+
+ /* Cirrus Logic CS40L25A boosted haptics driver @ 40 */
+};
+
+&i2c11 {
+ clock-frequency = <1000000>;
+ status = "okay";
+
+ cs35l41_l: speaker-amp@30 {
+ compatible = "cirrus,cs35l45";
+ reg = <0x30>;
+ interrupts-extended = <&tlmm 182 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&tlmm 183 GPIO_ACTIVE_HIGH>;
+ cirrus,asp-sdout-hiz-ctrl = <(CS35L45_ASP_TX_HIZ_UNUSED | CS35L45_ASP_TX_HIZ_DISABLED)>;
+ #sound-dai-cells = <1>;
+
+ cirrus,gpio-ctrl2 {
+ gpio-ctrl = <0x2>;
+ };
+ };
+
+ cs35l41_r: speaker-amp@31 {
+ compatible = "cirrus,cs35l45";
+ reg = <0x31>;
+ interrupts-extended = <&tlmm 182 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&tlmm 183 GPIO_ACTIVE_HIGH>;
+ cirrus,asp-sdout-hiz-ctrl = <(CS35L45_ASP_TX_HIZ_UNUSED | CS35L45_ASP_TX_HIZ_DISABLED)>;
+ #sound-dai-cells = <1>;
+
+ cirrus,gpio-ctrl2 {
+ gpio-ctrl = <0x2>;
+ };
+ };
+};
+
+&pcie0 {
+ wake-gpios = <&tlmm 96 GPIO_ACTIVE_HIGH>;
+ perst-gpios = <&tlmm 94 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&pcie0_default_state>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&pcie0_phy {
+ vdda-phy-supply = <&pm8550vs_2_l1>;
+ vdda-pll-supply = <&pm8550vs_2_l3>;
+
+ status = "okay";
+};
+
+&pm8550_flash {
+ status = "okay";
+
+ led-0 {
+ function = LED_FUNCTION_FLASH;
+ color = <LED_COLOR_ID_WHITE>;
+ led-sources = <1>, <4>;
+ led-max-microamp = <500000>;
+ flash-max-microamp = <1000000>;
+ flash-max-timeout-us = <1280000>;
+ function-enumerator = <0>;
+ };
+
+ led-1 {
+ function = LED_FUNCTION_FLASH;
+ color = <LED_COLOR_ID_YELLOW>;
+ led-sources = <2>, <3>;
+ led-max-microamp = <500000>;
+ flash-max-microamp = <1000000>;
+ flash-max-timeout-us = <1280000>;
+ function-enumerator = <1>;
+ };
+};
+
+&pm8550_gpios {
+ vol_down_n: volume-down-n-state {
+ pins = "gpio6";
+ function = "normal";
+ power-source = <1>;
+ bias-pull-up;
+ input-enable;
+ };
+
+ sdc2_card_det_n: sd-card-det-n-state {
+ pins = "gpio12";
+ function = "normal";
+ power-source = <1>;
+ bias-pull-down;
+ output-disable;
+ input-enable;
+ };
+};
+
+&pm8550b_gpios {
+ snapshot_n: snapshot-n-state {
+ pins = "gpio7";
+ function = "normal";
+ power-source = <1>;
+ bias-pull-up;
+ input-enable;
+ };
+
+ focus_n: focus-n-state {
+ pins = "gpio8";
+ function = "normal";
+ power-source = <1>;
+ bias-pull-up;
+ input-enable;
+ };
+};
+
+&pm8550vs_g_gpios {
+ cam_pwr_a_cs: cam-pwr-a-cs-state {
+ pins = "gpio4";
+ function = "normal";
+ power-source = <0x01>;
+ drive-push-pull;
+ output-low;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_HIGH>;
+ };
+};
+
+&pm8550b_eusb2_repeater {
+ qcom,tune-usb2-disc-thres = /bits/ 8 <0x6>;
+ qcom,tune-usb2-amplitude = /bits/ 8 <0xf>;
+ qcom,tune-usb2-preem = /bits/ 8 <0x7>;
+ vdd18-supply = <&pm8550_l15>;
+ vdd3-supply = <&pm8550_l5>;
+};
+
+&pon_pwrkey {
+ status = "okay";
+};
+
+&pon_resin {
+ linux,code = <KEY_VOLUMEUP>;
+ status = "okay";
+};
+
+&qupv3_id_0 {
+ status = "okay";
+};
+
+&qupv3_id_1 {
+ status = "okay";
+};
+
+&remoteproc_adsp {
+ firmware-name = "qcom/sm8550/Sony/yodo/adsp.mbn",
+ "qcom/sm8550/Sony/yodo/adsp_dtb.mbn";
+ status = "okay";
+};
+
+&remoteproc_cdsp {
+ firmware-name = "qcom/sm8550/Sony/yodo/cdsp.mbn",
+ "qcom/sm8550/Sony/yodo/cdsp_dtb.mbn";
+ status = "okay";
+};
+
+&sdhc_2 {
+ cd-gpios = <&pm8550_gpios 12 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&sdc2_default &sdc2_card_det_n>;
+ pinctrl-1 = <&sdc2_sleep &sdc2_card_det_n>;
+ pinctrl-names = "default", "sleep";
+ vmmc-supply = <&pm8550_l9>;
+ vqmmc-supply = <&pm8550_l8>;
+ no-sdio;
+ no-mmc;
+ status = "okay";
+};
+
+&sleep_clk {
+ clock-frequency = <32000>;
+};
+
+&tlmm {
+ gpio-reserved-ranges = <32 8>;
+};
+
+&uart7 {
+ status = "okay";
+};
+
+&usb_1 {
+ status = "okay";
+};
+
+&usb_1_dwc3 {
+ dr_mode = "otg";
+ usb-role-switch;
+};
+
+&usb_1_dwc3_hs {
+ remote-endpoint = <&pmic_glink_hs_in>;
+};
+
+&usb_1_dwc3_ss {
+ remote-endpoint = <&usb_dp_qmpphy_usb_ss_in>;
+};
+
+&usb_1_hsphy {
+ vdd-supply = <&pm8550vs_2_l1>;
+ vdda12-supply = <&pm8550vs_2_l3>;
+ phys = <&pm8550b_eusb2_repeater>;
+
+ status = "okay";
+};
+
+&usb_dp_qmpphy {
+ vdda-phy-supply = <&pm8550vs_2_l3>;
+ vdda-pll-supply = <&pm8550ve_l3>;
+ orientation-switch;
+
+ status = "okay";
+};
+
+&usb_dp_qmpphy_out {
+ remote-endpoint = <&pmic_glink_ss_in>;
+};
+
+&usb_dp_qmpphy_usb_ss_in {
+ remote-endpoint = <&usb_1_dwc3_ss>;
+};
+
+&xo_board {
+ clock-frequency = <76800000>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8550.dtsi b/arch/arm64/boot/dts/qcom/sm8550.dtsi
index 3904348075f6..bc5aeb05ffc3 100644
--- a/arch/arm64/boot/dts/qcom/sm8550.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8550.dtsi
@@ -812,6 +812,7 @@
dma-channels = <12>;
dma-channel-mask = <0x3e>;
iommus = <&apps_smmu 0x436 0>;
+ dma-coherent;
status = "disabled";
};
@@ -823,6 +824,7 @@
clocks = <&gcc GCC_QUPV3_WRAP_2_M_AHB_CLK>,
<&gcc GCC_QUPV3_WRAP_2_S_AHB_CLK>;
iommus = <&apps_smmu 0x423 0>;
+ dma-coherent;
#address-cells = <2>;
#size-cells = <2>;
status = "disabled";
@@ -1322,6 +1324,7 @@
dma-channels = <12>;
dma-channel-mask = <0x1e>;
iommus = <&apps_smmu 0xb6 0>;
+ dma-coherent;
status = "disabled";
};
@@ -1335,6 +1338,7 @@
iommus = <&apps_smmu 0xa3 0>;
interconnects = <&clk_virt MASTER_QUP_CORE_1 0 &clk_virt SLAVE_QUP_CORE_1 0>;
interconnect-names = "qup-core";
+ dma-coherent;
#address-cells = <2>;
#size-cells = <2>;
status = "disabled";
@@ -1755,9 +1759,8 @@
<&gem_noc MASTER_APPSS_PROC 0 &cnoc_main SLAVE_PCIE_0 0>;
interconnect-names = "pcie-mem", "cpu-pcie";
- /* Entries are reversed due to the unusual ITS DeviceID encoding */
- msi-map = <0x0 &gic_its 0x1401 0x1>,
- <0x100 &gic_its 0x1400 0x1>;
+ msi-map = <0x0 &gic_its 0x1400 0x1>,
+ <0x100 &gic_its 0x1401 0x1>;
iommu-map = <0x0 &apps_smmu 0x1400 0x1>,
<0x100 &apps_smmu 0x1401 0x1>;
@@ -1770,6 +1773,16 @@
phy-names = "pciephy";
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie0_phy: phy@1c06000 {
@@ -1867,9 +1880,8 @@
<&gem_noc MASTER_APPSS_PROC 0 &cnoc_main SLAVE_PCIE_1 0>;
interconnect-names = "pcie-mem", "cpu-pcie";
- /* Entries are reversed due to the unusual ITS DeviceID encoding */
- msi-map = <0x0 &gic_its 0x1481 0x1>,
- <0x100 &gic_its 0x1480 0x1>;
+ msi-map = <0x0 &gic_its 0x1480 0x1>,
+ <0x100 &gic_its 0x1481 0x1>;
iommu-map = <0x0 &apps_smmu 0x1480 0x1>,
<0x100 &apps_smmu 0x1481 0x1>;
@@ -1883,6 +1895,16 @@
phy-names = "pciephy";
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie1_phy: phy@1c0e000 {
@@ -3227,12 +3249,21 @@
reg = <0x0 0x0a600000 0x0 0xcd00>;
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
iommus = <&apps_smmu 0x40 0x0>;
- snps,dis_u2_susphy_quirk;
- snps,dis_enblslpm_quirk;
- snps,usb3_lpm_capable;
phys = <&usb_1_hsphy>,
<&usb_dp_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
+ snps,hird-threshold = /bits/ 8 <0x0>;
+ snps,usb2-gadget-lpm-disable;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_enblslpm_quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
+ snps,is-utmi-l1-suspend;
+ snps,usb3_lpm_capable;
+ snps,usb2-lpm-disable;
+ snps,has-lpm-erratum;
+ tx-fifo-resize;
+ dma-coherent;
ports {
#address-cells = <1>;
@@ -3968,6 +3999,7 @@
<GIC_SPI 694 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 695 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 696 IRQ_TYPE_LEVEL_HIGH>;
+ dma-coherent;
};
intc: interrupt-controller@17100000 {
@@ -4316,6 +4348,7 @@
compatible = "qcom,fastrpc";
qcom,glink-channels = "fastrpcglink-apps-dsp";
label = "adsp";
+ qcom,non-secure-domain;
#address-cells = <1>;
#size-cells = <0>;
@@ -4454,6 +4487,7 @@
compatible = "qcom,fastrpc";
qcom,glink-channels = "fastrpcglink-apps-dsp";
label = "cdsp";
+ qcom,non-secure-domain;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sm8650-mtp.dts b/arch/arm64/boot/dts/qcom/sm8650-mtp.dts
index 4450273f9667..d04ceaa73c2b 100644
--- a/arch/arm64/boot/dts/qcom/sm8650-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sm8650-mtp.dts
@@ -641,10 +641,6 @@
status = "okay";
};
-&mdss_mdp {
- status = "okay";
-};
-
&pcie_1_phy_aux_clk {
clock-frequency = <1000>;
};
diff --git a/arch/arm64/boot/dts/qcom/sm8650-qrd.dts b/arch/arm64/boot/dts/qcom/sm8650-qrd.dts
index b07cac2e5bc8..4e94f7fe4d2d 100644
--- a/arch/arm64/boot/dts/qcom/sm8650-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sm8650-qrd.dts
@@ -766,6 +766,14 @@
status = "okay";
};
+&gpu {
+ status = "okay";
+
+ zap-shader {
+ firmware-name = "qcom/sm8650/gen70900_zap.mbn";
+ };
+};
+
&lpass_tlmm {
spkr_1_sd_n_active: spkr-1-sd-n-active-state {
pins = "gpio21";
@@ -827,10 +835,6 @@
remote-endpoint = <&usb_dp_qmpphy_dp_in>;
};
-&mdss_mdp {
- status = "okay";
-};
-
&pcie_1_phy_aux_clk {
clock-frequency = <1000>;
};
diff --git a/arch/arm64/boot/dts/qcom/sm8650.dtsi b/arch/arm64/boot/dts/qcom/sm8650.dtsi
index ba72d8f38420..62a6e77730bc 100644
--- a/arch/arm64/boot/dts/qcom/sm8650.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8650.dtsi
@@ -485,9 +485,9 @@
no-map;
};
- /* Merged aop_config, tme_crash_dump, tme_log and uefi_log regions */
+ /* Merged aop_config, tme_crash_dump, tme_log, uefi_log, and chipinfo regions */
aop_tme_uefi_merged_mem: aop-tme-uefi-merged@81c80000 {
- reg = <0 0x81c80000 0 0x74000>;
+ reg = <0 0x81c80000 0 0x75000>;
no-map;
};
@@ -2274,9 +2274,8 @@
interrupt-map-mask = <0 0 0 0x7>;
#interrupt-cells = <1>;
- /* Entries are reversed due to the unusual ITS DeviceID encoding */
- msi-map = <0x0 &gic_its 0x1401 0x1>,
- <0x100 &gic_its 0x1400 0x1>;
+ msi-map = <0x0 &gic_its 0x1400 0x1>,
+ <0x100 &gic_its 0x1401 0x1>;
msi-map-mask = <0xff00>;
linux,pci-domain = <0>;
@@ -2294,6 +2293,16 @@
dma-coherent;
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie0_phy: phy@1c06000 {
@@ -2402,9 +2411,8 @@
interrupt-map-mask = <0 0 0 0x7>;
#interrupt-cells = <1>;
- /* Entries are reversed due to the unusual ITS DeviceID encoding */
- msi-map = <0x0 &gic_its 0x1481 0x1>,
- <0x100 &gic_its 0x1480 0x1>;
+ msi-map = <0x0 &gic_its 0x1480 0x1>,
+ <0x100 &gic_its 0x1481 0x1>;
msi-map-mask = <0xff00>;
linux,pci-domain = <1>;
@@ -2422,6 +2430,16 @@
<0x02000000 0 0x40300000 0 0x40300000 0 0x1fd00000>;
status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie1_phy: phy@1c0e000 {
@@ -2591,6 +2609,143 @@
#reset-cells = <1>;
};
+ gpu: gpu@3d00000 {
+ compatible = "qcom,adreno-43051401", "qcom,adreno";
+ reg = <0x0 0x03d00000 0x0 0x40000>,
+ <0x0 0x03d9e000 0x0 0x2000>,
+ <0x0 0x03d61000 0x0 0x800>;
+ reg-names = "kgsl_3d0_reg_memory",
+ "cx_mem",
+ "cx_dbgc";
+
+ interrupts = <GIC_SPI 300 IRQ_TYPE_LEVEL_HIGH>;
+
+ iommus = <&adreno_smmu 0 0x0>,
+ <&adreno_smmu 1 0x0>;
+
+ operating-points-v2 = <&gpu_opp_table>;
+
+ qcom,gmu = <&gmu>;
+
+ status = "disabled";
+
+ zap-shader {
+ memory-region = <&gpu_micro_code_mem>;
+ };
+
+ /* Speedbin needs more work on A740+, keep only lower freqs */
+ gpu_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-231000000 {
+ opp-hz = /bits/ 64 <231000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS_D2>;
+ };
+
+ opp-310000000 {
+ opp-hz = /bits/ 64 <310000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS_D1>;
+ };
+
+ opp-366000000 {
+ opp-hz = /bits/ 64 <366000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS_D0>;
+ };
+
+ opp-422000000 {
+ opp-hz = /bits/ 64 <422000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+ };
+
+ opp-500000000 {
+ opp-hz = /bits/ 64 <500000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS_L1>;
+ };
+
+ opp-578000000 {
+ opp-hz = /bits/ 64 <578000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+ };
+
+ opp-629000000 {
+ opp-hz = /bits/ 64 <629000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L0>;
+ };
+
+ opp-680000000 {
+ opp-hz = /bits/ 64 <680000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
+ };
+
+ opp-720000000 {
+ opp-hz = /bits/ 64 <720000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L2>;
+ };
+
+ opp-770000000 {
+ opp-hz = /bits/ 64 <770000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
+ };
+
+ opp-834000000 {
+ opp-hz = /bits/ 64 <834000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
+ };
+ };
+ };
+
+ gmu: gmu@3d6a000 {
+ compatible = "qcom,adreno-gmu-750.1", "qcom,adreno-gmu";
+ reg = <0x0 0x03d6a000 0x0 0x35000>,
+ <0x0 0x03d50000 0x0 0x10000>,
+ <0x0 0x0b280000 0x0 0x10000>;
+ reg-names = "gmu", "rscc", "gmu_pdc";
+
+ interrupts = <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hfi", "gmu";
+
+ clocks = <&gpucc GPU_CC_AHB_CLK>,
+ <&gpucc GPU_CC_CX_GMU_CLK>,
+ <&gpucc GPU_CC_CXO_CLK>,
+ <&gcc GCC_DDRSS_GPU_AXI_CLK>,
+ <&gcc GCC_GPU_MEMNOC_GFX_CLK>,
+ <&gpucc GPU_CC_HUB_CX_INT_CLK>,
+ <&gpucc GPU_CC_DEMET_CLK>;
+ clock-names = "ahb",
+ "gmu",
+ "cxo",
+ "axi",
+ "memnoc",
+ "hub",
+ "demet";
+
+ power-domains = <&gpucc GPU_CX_GDSC>,
+ <&gpucc GPU_GX_GDSC>;
+ power-domain-names = "cx",
+ "gx";
+
+ iommus = <&adreno_smmu 5 0x0>;
+
+ qcom,qmp = <&aoss_qmp>;
+
+ operating-points-v2 = <&gmu_opp_table>;
+
+ gmu_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-260000000 {
+ opp-hz = /bits/ 64 <260000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+ };
+
+ opp-625000000 {
+ opp-hz = /bits/ 64 <625000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+ };
+ };
+ };
+
gpucc: clock-controller@3d90000 {
compatible = "qcom,sm8650-gpucc";
reg = <0 0x03d90000 0 0xa000>;
@@ -2604,6 +2759,50 @@
#power-domain-cells = <1>;
};
+ adreno_smmu: iommu@3da0000 {
+ compatible = "qcom,sm8650-smmu-500", "qcom,adreno-smmu",
+ "qcom,smmu-500", "arm,mmu-500";
+ reg = <0x0 0x03da0000 0x0 0x40000>;
+ #iommu-cells = <2>;
+ #global-interrupts = <1>;
+ interrupts = <GIC_SPI 673 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 677 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 678 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 679 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 680 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 681 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 682 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 683 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 684 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 685 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 686 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 687 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 476 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 574 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 575 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 576 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 577 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 659 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 661 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 664 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 665 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 666 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 668 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 669 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 699 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gpucc GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK>,
+ <&gcc GCC_GPU_MEMNOC_GFX_CLK>,
+ <&gcc GCC_GPU_SNOC_DVM_GFX_CLK>,
+ <&gpucc GPU_CC_AHB_CLK>;
+ clock-names = "hlos",
+ "bus",
+ "iface",
+ "ahb";
+ power-domains = <&gpucc GPU_CX_GDSC>;
+ dma-coherent;
+ };
+
ipa: ipa@3f40000 {
compatible = "qcom,sm8650-ipa", "qcom,sm8550-ipa";
@@ -3584,14 +3783,16 @@
compatible = "qcom,sm8650-dwc3", "qcom,dwc3";
reg = <0 0x0a6f8800 0 0x400>;
- interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
- <&pdc 17 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 14 IRQ_TYPE_EDGE_RISING>,
<&pdc 15 IRQ_TYPE_EDGE_RISING>,
- <&pdc 14 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "hs_phy_irq",
- "ss_phy_irq",
+ <&pdc 17 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pwr_event",
+ "hs_phy_irq",
+ "dp_hs_phy_irq",
"dm_hs_phy_irq",
- "dp_hs_phy_irq";
+ "ss_phy_irq";
clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
<&gcc GCC_USB30_PRIM_MASTER_CLK>,
@@ -4845,6 +5046,8 @@
label = "adsp";
+ qcom,non-secure-domain;
+
#address-cells = <1>;
#size-cells = <0>;
@@ -5002,6 +5205,8 @@
label = "cdsp";
+ qcom,non-secure-domain;
+
#address-cells = <1>;
#size-cells = <0>;
@@ -5084,6 +5289,38 @@
<&apps_smmu 0x19c8 0x0>;
dma-coherent;
};
+
+ /* note: secure cb9 in downstream */
+
+ compute-cb@10 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <12>;
+
+ iommus = <&apps_smmu 0x196c 0x0>,
+ <&apps_smmu 0x0c0c 0x20>,
+ <&apps_smmu 0x19cc 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@11 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <13>;
+
+ iommus = <&apps_smmu 0x196d 0x0>,
+ <&apps_smmu 0x0c0d 0x20>,
+ <&apps_smmu 0x19cd 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@12 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <14>;
+
+ iommus = <&apps_smmu 0x196e 0x0>,
+ <&apps_smmu 0x0c0e 0x20>,
+ <&apps_smmu 0x19ce 0x0>;
+ dma-coherent;
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
index 6a0a54532e5f..c5c2895b37c7 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
+++ b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
@@ -9,6 +9,7 @@
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
#include "x1e80100.dtsi"
+#include "x1e80100-pmics.dtsi"
/ {
model = "Qualcomm Technologies, Inc. X1E80100 CRD";
@@ -598,8 +599,6 @@
compatible = "qcom,x1e80100-dp";
/delete-property/ #sound-dai-cells;
- data-lanes = <0 1 2 3>;
-
status = "okay";
aux-bus {
@@ -619,6 +618,9 @@
port@1 {
reg = <1>;
mdss_dp3_out: endpoint {
+ data-lanes = <0 1 2 3>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
+
remote-endpoint = <&edp_panel_in>;
};
};
@@ -680,16 +682,32 @@
status = "okay";
};
+&smb2360_0_eusb2_repeater {
+ vdd18-supply = <&vreg_l3d_1p8>;
+ vdd3-supply = <&vreg_l2b_3p0>;
+};
+
+&smb2360_1_eusb2_repeater {
+ vdd18-supply = <&vreg_l3d_1p8>;
+ vdd3-supply = <&vreg_l14b_3p0>;
+};
+
+&smb2360_2_eusb2_repeater {
+ vdd18-supply = <&vreg_l3d_1p8>;
+ vdd3-supply = <&vreg_l8b_3p0>;
+};
+
&swr0 {
status = "okay";
+ pinctrl-0 = <&wsa_swr_active>, <&spkr_01_sd_n_active>;
+ pinctrl-names = "default";
+
/* WSA8845, Left Woofer */
left_woofer: speaker@0,0 {
compatible = "sdw20217020400";
reg = <0 0>;
- pinctrl-0 = <&spkr_01_sd_n_active>;
- pinctrl-names = "default";
- powerdown-gpios = <&lpass_tlmm 12 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&lpass_tlmm 12 GPIO_ACTIVE_LOW>;
#sound-dai-cells = <0>;
sound-name-prefix = "WooferLeft";
vdd-1p8-supply = <&vreg_l15b_1p8>;
@@ -700,8 +718,7 @@
left_tweeter: speaker@0,1 {
compatible = "sdw20217020400";
reg = <0 1>;
- /* pinctrl in left_woofer node because of sharing the GPIO*/
- powerdown-gpios = <&lpass_tlmm 12 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&lpass_tlmm 12 GPIO_ACTIVE_LOW>;
#sound-dai-cells = <0>;
sound-name-prefix = "TwitterLeft";
vdd-1p8-supply = <&vreg_l15b_1p8>;
@@ -734,13 +751,14 @@
&swr3 {
status = "okay";
+ pinctrl-0 = <&wsa2_swr_active>, <&spkr_23_sd_n_active>;
+ pinctrl-names = "default";
+
/* WSA8845, Right Woofer */
right_woofer: speaker@0,0 {
compatible = "sdw20217020400";
reg = <0 0>;
- pinctrl-0 = <&spkr_23_sd_n_active>;
- pinctrl-names = "default";
- powerdown-gpios = <&lpass_tlmm 13 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&lpass_tlmm 13 GPIO_ACTIVE_LOW>;
#sound-dai-cells = <0>;
sound-name-prefix = "WooferRight";
vdd-1p8-supply = <&vreg_l15b_1p8>;
@@ -751,8 +769,7 @@
right_tweeter: speaker@0,1 {
compatible = "sdw20217020400";
reg = <0 1>;
- /* pinctrl in right_woofer node because of sharing the GPIO*/
- powerdown-gpios = <&lpass_tlmm 13 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&lpass_tlmm 13 GPIO_ACTIVE_LOW>;
#sound-dai-cells = <0>;
sound-name-prefix = "TwitterRight";
vdd-1p8-supply = <&vreg_l15b_1p8>;
@@ -817,6 +834,8 @@
vdd-supply = <&vreg_l2e_0p8>;
vdda12-supply = <&vreg_l3e_1p2>;
+ phys = <&smb2360_0_eusb2_repeater>;
+
status = "okay";
};
@@ -837,6 +856,8 @@
vdd-supply = <&vreg_l2e_0p8>;
vdda12-supply = <&vreg_l3e_1p2>;
+ phys = <&smb2360_1_eusb2_repeater>;
+
status = "okay";
};
@@ -857,6 +878,8 @@
vdd-supply = <&vreg_l2e_0p8>;
vdda12-supply = <&vreg_l3e_1p2>;
+ phys = <&smb2360_2_eusb2_repeater>;
+
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi b/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi
new file mode 100644
index 000000000000..04301f772fbd
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2024, Linaro Limited
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+
+/ {
+};
+
+&spmi_bus1 {
+ smb2360_0: pmic@7 {
+ compatible = "qcom,smb2360", "qcom,spmi-pmic";
+ reg = <0x7 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ smb2360_0_eusb2_repeater: phy@fd00 {
+ compatible = "qcom,smb2360-eusb2-repeater";
+ reg = <0xfd00>;
+ #phy-cells = <0>;
+ };
+ };
+
+ smb2360_1: pmic@a {
+ compatible = "qcom,smb2360", "qcom,spmi-pmic";
+ reg = <0xa SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ smb2360_1_eusb2_repeater: phy@fd00 {
+ compatible = "qcom,smb2360-eusb2-repeater";
+ reg = <0xfd00>;
+ #phy-cells = <0>;
+ };
+ };
+
+ smb2360_2: pmic@b {
+ compatible = "qcom,smb2360", "qcom,spmi-pmic";
+ reg = <0xb SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ smb2360_2_eusb2_repeater: phy@fd00 {
+ compatible = "qcom,smb2360-eusb2-repeater";
+ reg = <0xfd00>;
+ #phy-cells = <0>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
index e76d29053d79..2061fbe7b75a 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
+++ b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
@@ -9,6 +9,7 @@
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
#include "x1e80100.dtsi"
+#include "x1e80100-pmics.dtsi"
/ {
model = "Qualcomm Technologies, Inc. X1E80100 QCP";
@@ -409,8 +410,6 @@
compatible = "qcom,x1e80100-dp";
/delete-property/ #sound-dai-cells;
- data-lanes = <0 1 2 3>;
-
status = "okay";
aux-bus {
@@ -430,6 +429,9 @@
port@1 {
reg = <1>;
mdss_dp3_out: endpoint {
+ data-lanes = <0 1 2 3>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
+
remote-endpoint = <&edp_panel_in>;
};
};
@@ -491,6 +493,21 @@
status = "okay";
};
+&smb2360_0_eusb2_repeater {
+ vdd18-supply = <&vreg_l3d_1p8>;
+ vdd3-supply = <&vreg_l2b_3p0>;
+};
+
+&smb2360_1_eusb2_repeater {
+ vdd18-supply = <&vreg_l3d_1p8>;
+ vdd3-supply = <&vreg_l14b_3p0>;
+};
+
+&smb2360_2_eusb2_repeater {
+ vdd18-supply = <&vreg_l3d_1p8>;
+ vdd3-supply = <&vreg_l8b_3p0>;
+};
+
&tlmm {
gpio-reserved-ranges = <33 3>, /* Unused */
<44 4>, /* SPI (TPM) */
@@ -513,6 +530,8 @@
vdd-supply = <&vreg_l2e_0p8>;
vdda12-supply = <&vreg_l3e_1p2>;
+ phys = <&smb2360_0_eusb2_repeater>;
+
status = "okay";
};
@@ -533,6 +552,8 @@
vdd-supply = <&vreg_l2e_0p8>;
vdda12-supply = <&vreg_l3e_1p2>;
+ phys = <&smb2360_1_eusb2_repeater>;
+
status = "okay";
};
@@ -553,6 +574,8 @@
vdd-supply = <&vreg_l2e_0p8>;
vdda12-supply = <&vreg_l3e_1p2>;
+ phys = <&smb2360_2_eusb2_repeater>;
+
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/x1e80100.dtsi b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
index 8e517f76189e..5f90a0b3c016 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+++ b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
@@ -284,7 +284,7 @@
domain-idle-states {
CLUSTER_CL4: cluster-sleep-0 {
- compatible = "arm,idle-state";
+ compatible = "domain-idle-state";
idle-state-name = "l2-ret";
arm,psci-suspend-param = <0x01000044>;
entry-latency-us = <350>;
@@ -293,7 +293,7 @@
};
CLUSTER_CL5: cluster-sleep-1 {
- compatible = "arm,idle-state";
+ compatible = "domain-idle-state";
idle-state-name = "ret-pll-off";
arm,psci-suspend-param = <0x01000054>;
entry-latency-us = <2200>;
@@ -3088,7 +3088,7 @@
qcom,ports-hstart = /bits/ 8 <0xff 0x03 0x00 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff>;
qcom,ports-hstop = /bits/ 8 <0xff 0x06 0x0f 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff>;
qcom,ports-word-length = /bits/ 8 <0x01 0x07 0x04 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff>;
- qcom,ports-block-pack-mode = /bits/ 8 <0xff 0x00 0x01 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff>;
+ qcom,ports-block-pack-mode = /bits/ 8 <0xff 0xff 0x01 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff>;
qcom,ports-block-group-count = /bits/ 8 <0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff>;
qcom,ports-lane-control = /bits/ 8 <0x01 0x00 0x00 0x00 0x00 0xff 0xff 0xff 0xff 0xff 0xff 0xff>;
@@ -4095,8 +4095,6 @@
mdss_dp3_in: endpoint {
remote-endpoint = <&mdss_intf5_out>;
-
- link-frequencies = /bits/ 64 <8100000000>;
};
};
@@ -4221,6 +4219,48 @@
#clock-cells = <0>;
};
+ spmi: arbiter@c400000 {
+ compatible = "qcom,x1e80100-spmi-pmic-arb";
+ reg = <0 0x0c400000 0 0x3000>,
+ <0 0x0c500000 0 0x400000>,
+ <0 0x0c440000 0 0x80000>;
+ reg-names = "core", "chnls", "obsrvr";
+
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ spmi_bus0: spmi@c42d000 {
+ reg = <0 0x0c42d000 0 0x4000>,
+ <0 0x0c4c0000 0 0x10000>;
+ reg-names = "cnfg", "intr";
+
+ interrupt-names = "periph_irq";
+ interrupts-extended = <&pdc 1 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+
+ #address-cells = <2>;
+ #size-cells = <0>;
+ };
+
+ spmi_bus1: spmi@c432000 {
+ reg = <0 0x0c432000 0 0x4000>,
+ <0 0x0c4d0000 0 0x10000>;
+ reg-names = "cnfg", "intr";
+
+ interrupt-names = "periph_irq";
+ interrupts-extended = <&pdc 3 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+
+ #address-cells = <2>;
+ #size-cells = <0>;
+ };
+ };
tlmm: pinctrl@f100000 {
compatible = "qcom,x1e80100-tlmm";
diff --git a/arch/arm64/boot/dts/realtek/rtd129x.dtsi b/arch/arm64/boot/dts/realtek/rtd129x.dtsi
index 39aefe66a794..ba50e292bdbb 100644
--- a/arch/arm64/boot/dts/realtek/rtd129x.dtsi
+++ b/arch/arm64/boot/dts/realtek/rtd129x.dtsi
@@ -48,7 +48,7 @@
clock-output-names = "osc27M";
};
- soc {
+ soc@0 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/realtek/rtd139x.dtsi b/arch/arm64/boot/dts/realtek/rtd139x.dtsi
index a3c10ceeb586..e8af39193e75 100644
--- a/arch/arm64/boot/dts/realtek/rtd139x.dtsi
+++ b/arch/arm64/boot/dts/realtek/rtd139x.dtsi
@@ -47,7 +47,7 @@
clock-output-names = "osc27M";
};
- soc {
+ soc@0 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/realtek/rtd16xx.dtsi b/arch/arm64/boot/dts/realtek/rtd16xx.dtsi
index 34802cc62983..3a7f6e35b7f7 100644
--- a/arch/arm64/boot/dts/realtek/rtd16xx.dtsi
+++ b/arch/arm64/boot/dts/realtek/rtd16xx.dtsi
@@ -109,7 +109,7 @@
};
arm_pmu: pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a55-pmu";
interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>,
<&cpu3>, <&cpu4>, <&cpu5>;
@@ -127,7 +127,7 @@
#clock-cells = <0>;
};
- soc {
+ soc@0 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/renesas/Makefile b/arch/arm64/boot/dts/renesas/Makefile
index 5f3e0e61d78d..fbd214a1a638 100644
--- a/arch/arm64/boot/dts/renesas/Makefile
+++ b/arch/arm64/boot/dts/renesas/Makefile
@@ -62,6 +62,9 @@ dtb-$(CONFIG_ARCH_R8A77965) += r8a77965-ulcb.dtb
dtb-$(CONFIG_ARCH_R8A77965) += r8a77965-ulcb-kf.dtb
dtb-$(CONFIG_ARCH_R8A77970) += r8a77970-eagle.dtb
+dtb-$(CONFIG_ARCH_R8A77970) += r8a77970-eagle-function-expansion.dtbo
+r8a77970-eagle-function-expansion-dtbs := r8a77970-eagle.dtb r8a77970-eagle-function-expansion.dtbo
+dtb-$(CONFIG_ARCH_R8A77970) += r8a77970-eagle-function-expansion.dtb
dtb-$(CONFIG_ARCH_R8A77970) += r8a77970-v3msk.dtb
dtb-$(CONFIG_ARCH_R8A77980) += r8a77980-condor.dtb
diff --git a/arch/arm64/boot/dts/renesas/r8a77970-eagle-function-expansion.dtso b/arch/arm64/boot/dts/renesas/r8a77970-eagle-function-expansion.dtso
new file mode 100644
index 000000000000..3aa243c5f04c
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a77970-eagle-function-expansion.dtso
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the Eagle V3M Function expansion board.
+ *
+ * Copyright (C) 2024 Niklas Söderlund <niklas.soderlund@ragnatech.se>
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ /* CN4 */
+ /* Eagle: SW18 set to OFF */
+ cvbs-in-cn4 {
+ compatible = "composite-video-connector";
+ label = "CVBS IN CN4";
+
+ port {
+ cvbs_con: endpoint {
+ remote-endpoint = <&adv7482_ain7>;
+ };
+ };
+ };
+
+ /* CN2 */
+ /* Eagle: SW35 set 5, 6 and 8 to OFF */
+ hdmi-in-cn2 {
+ compatible = "hdmi-connector";
+ label = "HDMI IN CN2";
+ type = "a";
+
+ port {
+ hdmi_in_con2: endpoint {
+ remote-endpoint = <&adv7612_in>;
+ };
+ };
+ };
+
+ /* CN3 */
+ /* Eagle: SW18 set to OFF */
+ hdmi-in-cn3 {
+ compatible = "hdmi-connector";
+ label = "HDMI IN CN3";
+ type = "a";
+
+ port {
+ hdmi_in_con: endpoint {
+ remote-endpoint = <&adv7482_hdmi>;
+ };
+ };
+ };
+};
+
+/* Disconnect MAX9286 GMSL I2C. */
+&i2c3 {
+ status = "disabled";
+};
+
+/* Connect expansion board I2C. */
+&i2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio@27 {
+ compatible = "onnn,pca9654";
+ reg = <0x27>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ vin0_adv7612_en {
+ gpio-hog;
+ gpios = <3 GPIO_ACTIVE_LOW>;
+ output-high;
+ line-name = "VIN0_ADV7612_ENn";
+ };
+ };
+
+ hdmi-decoder@4c {
+ compatible = "adi,adv7612";
+ reg = <0x4c>, <0x50>, <0x52>, <0x54>, <0x56>, <0x58>;
+ reg-names = "main", "afe", "rep", "edid", "hdmi", "cp";
+ interrupt-parent = <&gpio3>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ default-input = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ adv7612_in: endpoint {
+ remote-endpoint = <&hdmi_in_con2>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ adv7612_out: endpoint {
+ remote-endpoint = <&vin0_in>;
+ };
+ };
+ };
+ };
+
+ video-receiver@70 {
+ compatible = "adi,adv7482";
+ reg = <0x70 0x71 0x72 0x73 0x74 0x75
+ 0x60 0x61 0x62 0x63 0x64 0x65>;
+ reg-names = "main", "dpll", "cp", "hdmi", "edid", "repeater",
+ "infoframe", "cbus", "cec", "sdp", "txa", "txb" ;
+ interrupt-parent = <&gpio3>;
+ interrupts = <03 IRQ_TYPE_LEVEL_LOW>, <04 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "intrq1", "intrq2";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@7 {
+ reg = <7>;
+
+ adv7482_ain7: endpoint {
+ remote-endpoint = <&cvbs_con>;
+ };
+ };
+
+ port@8 {
+ reg = <8>;
+
+ adv7482_hdmi: endpoint {
+ remote-endpoint = <&hdmi_in_con>;
+ };
+ };
+
+ port@a {
+ reg = <10>;
+
+ adv7482_txa: endpoint {
+ clock-lanes = <0>;
+ data-lanes = <1 2 3 4>;
+ remote-endpoint = <&csi40_in>;
+ };
+ };
+ };
+ };
+
+};
+
+&csi40 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ csi40_in: endpoint {
+ clock-lanes = <0>;
+ data-lanes = <1 2 3 4>;
+ remote-endpoint = <&adv7482_txa>;
+ };
+ };
+ };
+};
+
+&pfc {
+ vin0_pins_parallel: vin0 {
+ groups = "vin0_data12", "vin0_sync", "vin0_clk", "vin0_clkenb";
+ function = "vin0";
+ };
+};
+
+&vin0 {
+ status = "okay";
+
+ pinctrl-0 = <&vin0_pins_parallel>;
+ pinctrl-names = "default";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ vin0_in: endpoint {
+ pclk-sample = <0>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ remote-endpoint = <&adv7612_out>;
+ };
+ };
+ };
+};
+
+&vin1 {
+ status = "okay";
+};
+
+&vin2 {
+ status = "okay";
+};
+
+&vin3 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a779f4-s4sk.dts b/arch/arm64/boot/dts/renesas/r8a779f4-s4sk.dts
index abfda5c6ca16..bc65a7b4d999 100644
--- a/arch/arm64/boot/dts/renesas/r8a779f4-s4sk.dts
+++ b/arch/arm64/boot/dts/renesas/r8a779f4-s4sk.dts
@@ -14,9 +14,9 @@
compatible = "renesas,s4sk", "renesas,r8a779f4", "renesas,r8a779f0";
aliases {
- serial0 = &hscif0;
- serial1 = &hscif1;
- eth0 = &rswitch;
+ serial0 = &hscif0;
+ serial1 = &hscif1;
+ ethernet0 = &rswitch;
};
chosen {
diff --git a/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts b/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts
index bc8616a56c03..cfbe8c8680cd 100644
--- a/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts
+++ b/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts
@@ -18,11 +18,12 @@
aliases {
serial0 = &hscif0;
+ serial1 = &hscif2;
ethernet0 = &avb0;
};
chosen {
- bootargs = "ignore_loglevel";
+ bootargs = "ignore_loglevel rw root=/dev/nfs ip=on";
stdout-path = "serial0:921600n8";
};
@@ -90,6 +91,14 @@
status = "okay";
};
+&hscif2 {
+ pinctrl-0 = <&hscif2_pins>;
+ pinctrl-names = "default";
+
+ uart-has-rtscts;
+ status = "okay";
+};
+
&i2c0 {
pinctrl-0 = <&i2c0_pins>;
pinctrl-names = "default";
@@ -144,7 +153,7 @@
};
&pfc {
- pinctrl-0 = <&scif_clk_pins>;
+ pinctrl-0 = <&scif_clk_pins>, <&scif_clk2_pins>;
pinctrl-names = "default";
avb0_pins: avb0 {
@@ -170,6 +179,11 @@
function = "hscif0";
};
+ hscif2_pins: hscif2 {
+ groups = "hscif2_data", "hscif2_ctrl";
+ function = "hscif2";
+ };
+
i2c0_pins: i2c0 {
groups = "i2c0";
function = "i2c0";
@@ -190,6 +204,11 @@
groups = "scif_clk";
function = "scif_clk";
};
+
+ scif_clk2_pins: scif-clk2 {
+ groups = "scif_clk2";
+ function = "scif_clk2";
+ };
};
&rpc {
@@ -228,3 +247,7 @@
&scif_clk {
clock-frequency = <24000000>;
};
+
+&scif_clk2 {
+ clock-frequency = <24000000>;
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a779h0.dtsi b/arch/arm64/boot/dts/renesas/r8a779h0.dtsi
index 11885729181b..6d791024cabe 100644
--- a/arch/arm64/boot/dts/renesas/r8a779h0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779h0.dtsi
@@ -144,13 +144,19 @@
method = "smc";
};
- /* External SCIF clock - to be overridden by boards that provide it */
+ /* External SCIF clocks - to be overridden by boards that provide them */
scif_clk: scif-clk {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <0>;
};
+ scif_clk2: scif-clk2 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
soc: soc {
compatible = "simple-bus";
interrupt-parent = <&gic>;
@@ -297,6 +303,76 @@
resets = <&cpg 917>;
};
+ cmt0: timer@e60f0000 {
+ compatible = "renesas,r8a779h0-cmt0",
+ "renesas,rcar-gen4-cmt0";
+ reg = <0 0xe60f0000 0 0x1004>;
+ interrupts = <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 910>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 910>;
+ status = "disabled";
+ };
+
+ cmt1: timer@e6130000 {
+ compatible = "renesas,r8a779h0-cmt1",
+ "renesas,rcar-gen4-cmt1";
+ reg = <0 0xe6130000 0 0x1004>;
+ interrupts = <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 264 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 911>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 911>;
+ status = "disabled";
+ };
+
+ cmt2: timer@e6140000 {
+ compatible = "renesas,r8a779h0-cmt1",
+ "renesas,rcar-gen4-cmt1";
+ reg = <0 0xe6140000 0 0x1004>;
+ interrupts = <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 271 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 273 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 274 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 275 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 276 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 277 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 912>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 912>;
+ status = "disabled";
+ };
+
+ cmt3: timer@e6148000 {
+ compatible = "renesas,r8a779h0-cmt1",
+ "renesas,rcar-gen4-cmt1";
+ reg = <0 0xe6148000 0 0x1004>;
+ interrupts = <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 284 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 285 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 913>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 913>;
+ status = "disabled";
+ };
+
cpg: clock-controller@e6150000 {
compatible = "renesas,r8a779h0-cpg-mssr";
reg = <0 0xe6150000 0 0x4000>;
@@ -318,6 +394,106 @@
#power-domain-cells = <1>;
};
+ tsc: thermal@e6198000 {
+ compatible = "renesas,r8a779h0-thermal";
+ reg = <0 0xe6198000 0 0x200>,
+ <0 0xe61a0000 0 0x200>;
+ clocks = <&cpg CPG_MOD 919>;
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 919>;
+ #thermal-sensor-cells = <1>;
+ };
+
+ intc_ex: interrupt-controller@e61c0000 {
+ compatible = "renesas,intc-ex-r8a779h0", "renesas,irqc";
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ reg = <0 0xe61c0000 0 0x200>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 611>;
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 611>;
+ };
+
+ tmu0: timer@e61e0000 {
+ compatible = "renesas,tmu-r8a779h0", "renesas,tmu";
+ reg = <0 0xe61e0000 0 0x30>;
+ interrupts = <GIC_SPI 289 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 291 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2";
+ clocks = <&cpg CPG_MOD 713>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 713>;
+ status = "disabled";
+ };
+
+ tmu1: timer@e6fc0000 {
+ compatible = "renesas,tmu-r8a779h0", "renesas,tmu";
+ reg = <0 0xe6fc0000 0 0x30>;
+ interrupts = <GIC_SPI 292 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 293 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 294 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 295 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&cpg CPG_MOD 714>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 714>;
+ status = "disabled";
+ };
+
+ tmu2: timer@e6fd0000 {
+ compatible = "renesas,tmu-r8a779h0", "renesas,tmu";
+ reg = <0 0xe6fd0000 0 0x30>;
+ interrupts = <GIC_SPI 296 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 297 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 298 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 299 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&cpg CPG_MOD 715>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 715>;
+ status = "disabled";
+ };
+
+ tmu3: timer@e6fe0000 {
+ compatible = "renesas,tmu-r8a779h0", "renesas,tmu";
+ reg = <0 0xe6fe0000 0 0x30>;
+ interrupts = <GIC_SPI 300 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 301 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 302 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 303 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&cpg CPG_MOD 716>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 716>;
+ status = "disabled";
+ };
+
+ tmu4: timer@ffc00000 {
+ compatible = "renesas,tmu-r8a779h0", "renesas,tmu";
+ reg = <0 0xffc00000 0 0x30>;
+ interrupts = <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&cpg CPG_MOD 717>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 717>;
+ status = "disabled";
+ };
+
i2c0: i2c@e6500000 {
compatible = "renesas,i2c-r8a779h0",
"renesas,rcar-gen4-i2c";
@@ -403,6 +579,57 @@
status = "disabled";
};
+ hscif1: serial@e6550000 {
+ compatible = "renesas,hscif-r8a779h0",
+ "renesas,rcar-gen4-hscif", "renesas,hscif";
+ reg = <0 0xe6550000 0 0x60>;
+ interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 515>,
+ <&cpg CPG_CORE R8A779H0_CLK_SASYNCPERD1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 515>;
+ dmas = <&dmac1 0x33>, <&dmac1 0x32>,
+ <&dmac2 0x33>, <&dmac2 0x32>;
+ dma-names = "tx", "rx", "tx", "rx";
+ status = "disabled";
+ };
+
+ hscif2: serial@e6560000 {
+ compatible = "renesas,hscif-r8a779h0",
+ "renesas,rcar-gen4-hscif", "renesas,hscif";
+ reg = <0 0xe6560000 0 0x60>;
+ interrupts = <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 516>,
+ <&cpg CPG_CORE R8A779H0_CLK_SASYNCPERD1>,
+ <&scif_clk2>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 516>;
+ dmas = <&dmac1 0x35>, <&dmac1 0x34>,
+ <&dmac2 0x35>, <&dmac2 0x34>;
+ dma-names = "tx", "rx", "tx", "rx";
+ status = "disabled";
+ };
+
+ hscif3: serial@e66a0000 {
+ compatible = "renesas,hscif-r8a779h0",
+ "renesas,rcar-gen4-hscif", "renesas,hscif";
+ reg = <0 0xe66a0000 0 0x60>;
+ interrupts = <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 517>,
+ <&cpg CPG_CORE R8A779H0_CLK_SASYNCPERD1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 517>;
+ dmas = <&dmac1 0x37>, <&dmac1 0x36>,
+ <&dmac2 0x37>, <&dmac2 0x36>;
+ dma-names = "tx", "rx", "tx", "rx";
+ status = "disabled";
+ };
+
avb0: ethernet@e6800000 {
compatible = "renesas,etheravb-r8a779h0",
"renesas,etheravb-rcar-gen4";
@@ -446,6 +673,7 @@
phy-mode = "rgmii";
rx-internal-delay-ps = <0>;
tx-internal-delay-ps = <0>;
+ iommus = <&ipmmu_hc 0>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -547,6 +775,170 @@
status = "disabled";
};
+ scif0: serial@e6e60000 {
+ compatible = "renesas,scif-r8a779h0",
+ "renesas,rcar-gen4-scif", "renesas,scif";
+ reg = <0 0xe6e60000 0 64>;
+ interrupts = <GIC_SPI 251 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 702>,
+ <&cpg CPG_CORE R8A779H0_CLK_SASYNCPERD1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 702>;
+ dmas = <&dmac1 0x51>, <&dmac1 0x50>,
+ <&dmac2 0x51>, <&dmac2 0x50>;
+ dma-names = "tx", "rx", "tx", "rx";
+ status = "disabled";
+ };
+
+ scif1: serial@e6e68000 {
+ compatible = "renesas,scif-r8a779h0",
+ "renesas,rcar-gen4-scif", "renesas,scif";
+ reg = <0 0xe6e68000 0 64>;
+ interrupts = <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 703>,
+ <&cpg CPG_CORE R8A779H0_CLK_SASYNCPERD1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 703>;
+ dmas = <&dmac1 0x53>, <&dmac1 0x52>,
+ <&dmac2 0x53>, <&dmac2 0x52>;
+ dma-names = "tx", "rx", "tx", "rx";
+ status = "disabled";
+ };
+
+ scif3: serial@e6c50000 {
+ compatible = "renesas,scif-r8a779h0",
+ "renesas,rcar-gen4-scif", "renesas,scif";
+ reg = <0 0xe6c50000 0 64>;
+ interrupts = <GIC_SPI 253 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 704>,
+ <&cpg CPG_CORE R8A779H0_CLK_SASYNCPERD1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 704>;
+ dmas = <&dmac1 0x57>, <&dmac1 0x56>,
+ <&dmac2 0x57>, <&dmac2 0x56>;
+ dma-names = "tx", "rx", "tx", "rx";
+ status = "disabled";
+ };
+
+ scif4: serial@e6c40000 {
+ compatible = "renesas,scif-r8a779h0",
+ "renesas,rcar-gen4-scif", "renesas,scif";
+ reg = <0 0xe6c40000 0 64>;
+ interrupts = <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 705>,
+ <&cpg CPG_CORE R8A779H0_CLK_SASYNCPERD1>,
+ <&scif_clk2>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 705>;
+ dmas = <&dmac1 0x59>, <&dmac1 0x58>,
+ <&dmac2 0x59>, <&dmac2 0x58>;
+ dma-names = "tx", "rx", "tx", "rx";
+ status = "disabled";
+ };
+
+ msiof0: spi@e6e90000 {
+ compatible = "renesas,msiof-r8a779h0",
+ "renesas,rcar-gen4-msiof";
+ reg = <0 0xe6e90000 0 0x0064>;
+ interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 618>;
+ dmas = <&dmac1 0x41>, <&dmac1 0x40>,
+ <&dmac2 0x41>, <&dmac2 0x40>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 618>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ msiof1: spi@e6ea0000 {
+ compatible = "renesas,msiof-r8a779h0",
+ "renesas,rcar-gen4-msiof";
+ reg = <0 0xe6ea0000 0 0x0064>;
+ interrupts = <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 619>;
+ dmas = <&dmac1 0x43>, <&dmac1 0x42>,
+ <&dmac2 0x43>, <&dmac2 0x42>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 619>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ msiof2: spi@e6c00000 {
+ compatible = "renesas,msiof-r8a779h0",
+ "renesas,rcar-gen4-msiof";
+ reg = <0 0xe6c00000 0 0x0064>;
+ interrupts = <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 620>;
+ dmas = <&dmac1 0x45>, <&dmac1 0x44>,
+ <&dmac2 0x45>, <&dmac2 0x44>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 620>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ msiof3: spi@e6c10000 {
+ compatible = "renesas,msiof-r8a779h0",
+ "renesas,rcar-gen4-msiof";
+ reg = <0 0xe6c10000 0 0x0064>;
+ interrupts = <GIC_SPI 242 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 621>;
+ dmas = <&dmac1 0x47>, <&dmac1 0x46>,
+ <&dmac2 0x47>, <&dmac2 0x46>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 621>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ msiof4: spi@e6c20000 {
+ compatible = "renesas,msiof-r8a779h0",
+ "renesas,rcar-gen4-msiof";
+ reg = <0 0xe6c20000 0 0x0064>;
+ interrupts = <GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 622>;
+ dmas = <&dmac1 0x49>, <&dmac1 0x48>,
+ <&dmac2 0x49>, <&dmac2 0x48>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 622>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ msiof5: spi@e6c28000 {
+ compatible = "renesas,msiof-r8a779h0",
+ "renesas,rcar-gen4-msiof";
+ reg = <0 0xe6c28000 0 0x0064>;
+ interrupts = <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 623>;
+ dmas = <&dmac1 0x4b>, <&dmac1 0x4a>,
+ <&dmac2 0x4b>, <&dmac2 0x4a>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 623>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
dmac1: dma-controller@e7350000 {
compatible = "renesas,dmac-r8a779h0",
"renesas,rcar-gen4-dmac";
@@ -580,6 +972,14 @@
resets = <&cpg 709>;
#dma-cells = <1>;
dma-channels = <16>;
+ iommus = <&ipmmu_ds0 0>, <&ipmmu_ds0 1>,
+ <&ipmmu_ds0 2>, <&ipmmu_ds0 3>,
+ <&ipmmu_ds0 4>, <&ipmmu_ds0 5>,
+ <&ipmmu_ds0 6>, <&ipmmu_ds0 7>,
+ <&ipmmu_ds0 8>, <&ipmmu_ds0 9>,
+ <&ipmmu_ds0 10>, <&ipmmu_ds0 11>,
+ <&ipmmu_ds0 12>, <&ipmmu_ds0 13>,
+ <&ipmmu_ds0 14>, <&ipmmu_ds0 15>;
};
dmac2: dma-controller@e7351000 {
@@ -605,6 +1005,10 @@
resets = <&cpg 710>;
#dma-cells = <1>;
dma-channels = <8>;
+ iommus = <&ipmmu_ds0 16>, <&ipmmu_ds0 17>,
+ <&ipmmu_ds0 18>, <&ipmmu_ds0 19>,
+ <&ipmmu_ds0 20>, <&ipmmu_ds0 21>,
+ <&ipmmu_ds0 22>, <&ipmmu_ds0 23>;
};
mmc0: mmc@ee140000 {
@@ -618,6 +1022,7 @@
power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
resets = <&cpg 706>;
max-frequency = <200000000>;
+ iommus = <&ipmmu_ds0 32>;
status = "disabled";
};
@@ -637,6 +1042,106 @@
status = "disabled";
};
+ ipmmu_rt0: iommu@ee480000 {
+ compatible = "renesas,ipmmu-r8a779h0",
+ "renesas,rcar-gen4-ipmmu-vmsa";
+ reg = <0 0xee480000 0 0x20000>;
+ renesas,ipmmu-main = <&ipmmu_mm>;
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ #iommu-cells = <1>;
+ };
+
+ ipmmu_rt1: iommu@ee4c0000 {
+ compatible = "renesas,ipmmu-r8a779h0",
+ "renesas,rcar-gen4-ipmmu-vmsa";
+ reg = <0 0xee4c0000 0 0x20000>;
+ renesas,ipmmu-main = <&ipmmu_mm>;
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ #iommu-cells = <1>;
+ };
+
+ ipmmu_ds0: iommu@eed00000 {
+ compatible = "renesas,ipmmu-r8a779h0",
+ "renesas,rcar-gen4-ipmmu-vmsa";
+ reg = <0 0xeed00000 0 0x20000>;
+ renesas,ipmmu-main = <&ipmmu_mm>;
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ #iommu-cells = <1>;
+ };
+
+ ipmmu_hc: iommu@eed40000 {
+ compatible = "renesas,ipmmu-r8a779h0",
+ "renesas,rcar-gen4-ipmmu-vmsa";
+ reg = <0 0xeed40000 0 0x20000>;
+ renesas,ipmmu-main = <&ipmmu_mm>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ #iommu-cells = <1>;
+ };
+
+ ipmmu_ir: iommu@eed80000 {
+ compatible = "renesas,ipmmu-r8a779h0",
+ "renesas,rcar-gen4-ipmmu-vmsa";
+ reg = <0 0xeed80000 0 0x20000>;
+ renesas,ipmmu-main = <&ipmmu_mm>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ #iommu-cells = <1>;
+ };
+
+ ipmmu_vc: iommu@eedc0000 {
+ compatible = "renesas,ipmmu-r8a779h0",
+ "renesas,rcar-gen4-ipmmu-vmsa";
+ reg = <0 0xeedc0000 0 0x20000>;
+ renesas,ipmmu-main = <&ipmmu_mm>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ #iommu-cells = <1>;
+ };
+
+ ipmmu_3dg: iommu@eee00000 {
+ compatible = "renesas,ipmmu-r8a779h0",
+ "renesas,rcar-gen4-ipmmu-vmsa";
+ reg = <0 0xeee00000 0 0x20000>;
+ renesas,ipmmu-main = <&ipmmu_mm>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ #iommu-cells = <1>;
+ };
+
+ ipmmu_vi0: iommu@eee80000 {
+ compatible = "renesas,ipmmu-r8a779h0",
+ "renesas,rcar-gen4-ipmmu-vmsa";
+ reg = <0 0xeee80000 0 0x20000>;
+ renesas,ipmmu-main = <&ipmmu_mm>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ #iommu-cells = <1>;
+ };
+
+ ipmmu_vi1: iommu@eeec0000 {
+ compatible = "renesas,ipmmu-r8a779h0",
+ "renesas,rcar-gen4-ipmmu-vmsa";
+ reg = <0 0xeeec0000 0 0x20000>;
+ renesas,ipmmu-main = <&ipmmu_mm>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ #iommu-cells = <1>;
+ };
+
+ ipmmu_vip0: iommu@eef00000 {
+ compatible = "renesas,ipmmu-r8a779h0",
+ "renesas,rcar-gen4-ipmmu-vmsa";
+ reg = <0 0xeef00000 0 0x20000>;
+ renesas,ipmmu-main = <&ipmmu_mm>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ #iommu-cells = <1>;
+ };
+
+ ipmmu_mm: iommu@eefc0000 {
+ compatible = "renesas,ipmmu-r8a779h0",
+ "renesas,rcar-gen4-ipmmu-vmsa";
+ reg = <0 0xeefc0000 0 0x20000>;
+ interrupts = <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ #iommu-cells = <1>;
+ };
+
gic: interrupt-controller@f1000000 {
compatible = "arm,gic-v3";
#interrupt-cells = <3>;
@@ -653,6 +1158,36 @@
};
};
+ thermal-zones {
+ sensor_thermal_cr52: sensor1-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+ thermal-sensors = <&tsc 0>;
+
+ trips {
+ sensor1_crit: sensor1-crit {
+ temperature = <120000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ sensor_thermal_ca76: sensor2-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+ thermal-sensors = <&tsc 1>;
+
+ trips {
+ sensor2_crit: sensor2-crit {
+ temperature = <120000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
timer {
compatible = "arm,armv8-timer";
interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
diff --git a/arch/arm64/boot/dts/renesas/r9a07g043.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043.dtsi
index 8721f4c9fa0f..d2365def1059 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g043.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a07g043.dtsi
@@ -598,6 +598,7 @@
gpio-ranges = <&pinctrl 0 0 152>;
#interrupt-cells = <2>;
interrupt-controller;
+ interrupt-parent = <&irqc>;
clocks = <&cpg CPG_MOD R9A07G043_GPIO_HCLK>;
power-domains = <&cpg>;
resets = <&cpg R9A07G043_GPIO_RSTN>,
diff --git a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
index 964b0a475eee..165bfcfef3bc 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
@@ -54,10 +54,6 @@
};
};
-&pinctrl {
- interrupt-parent = <&irqc>;
-};
-
&soc {
interrupt-parent = <&gic>;
diff --git a/arch/arm64/boot/dts/renesas/rzg2ul-smarc.dtsi b/arch/arm64/boot/dts/renesas/rzg2ul-smarc.dtsi
index de590996e10a..433860987f73 100644
--- a/arch/arm64/boot/dts/renesas/rzg2ul-smarc.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg2ul-smarc.dtsi
@@ -5,6 +5,7 @@
* Copyright (C) 2022 Renesas Electronics Corp.
*/
+#include <dt-bindings/gpio/gpio.h>
#include "rzg2ul-smarc-pinfunction.dtsi"
#include "rz-smarc-common.dtsi"
@@ -23,6 +24,63 @@
&i2c0 {
clock-frequency = <400000>;
+ da9062: pmic@58 {
+ compatible = "dlg,da9062";
+ reg = <0x58>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio {
+ compatible = "dlg,da9062-gpio";
+ };
+
+ onkey {
+ compatible = "dlg,da9062-onkey";
+ };
+
+ pmic-good-hog {
+ gpio-hog;
+ gpios = <4 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "PMIC_PGOOD";
+ };
+
+ rtc {
+ compatible = "dlg,da9062-rtc";
+ };
+
+ sd0-pwr-sel-hog {
+ gpio-hog;
+ gpios = <1 GPIO_ACTIVE_HIGH>;
+ input;
+ line-name = "SD0_PWR_SEL";
+ };
+
+ sd1-pwr-sel-hog {
+ gpio-hog;
+ gpios = <2 GPIO_ACTIVE_HIGH>;
+ input;
+ line-name = "SD1_PWR_SEL";
+ };
+
+ sw-et0-en-hog {
+ gpio-hog;
+ gpios = <3 GPIO_ACTIVE_HIGH>;
+ input;
+ line-name = "SW_ET0_EN#";
+ };
+
+ thermal {
+ compatible = "dlg,da9062-thermal";
+ status = "disabled";
+ };
+
+ watchdog {
+ compatible = "dlg,da9062-watchdog";
+ status = "disabled";
+ };
+ };
+
versa3: clock-generator@68 {
compatible = "renesas,5p35023";
reg = <0x68>;
diff --git a/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
index acac4666ae59..8a3d302f1535 100644
--- a/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
@@ -25,7 +25,7 @@
* SW_OFF - SD2 is connected to SoC
* SW_ON - SCIF1, SSI0, IRQ0, IRQ1 connected to SoC
*/
-#define SW_CONFIG2 SW_ON
+#define SW_CONFIG2 SW_OFF
#define SW_CONFIG3 SW_ON
/ {
@@ -36,8 +36,8 @@
#if SW_CONFIG3 == SW_OFF
mmc2 = &sdhi2;
#else
- eth0 = &eth0;
- eth1 = &eth1;
+ ethernet0 = &eth0;
+ ethernet1 = &eth1;
#endif
};
diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile
index f906a868b71a..f42fa62b4064 100644
--- a/arch/arm64/boot/dts/rockchip/Makefile
+++ b/arch/arm64/boot/dts/rockchip/Makefile
@@ -10,6 +10,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3308-rock-pi-s.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3318-a95x-z2.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3326-anbernic-rg351m.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3326-anbernic-rg351v.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3326-gameforce-chi.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3326-odroid-go2.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3326-odroid-go2-v11.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3326-odroid-go3.dtb
@@ -90,6 +91,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-quartz64-a.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-quartz64-b.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-radxa-cm3-io.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-roc-pc.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-rock-3c.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-soquartz-blade.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-soquartz-cm4.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-soquartz-model-a.dtb
@@ -100,6 +102,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-evb1-v10.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-fastrhino-r66s.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-fastrhino-r68s.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-lubancat-2.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-mecsbc.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-nanopi-r5c.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-nanopi-r5s.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-odroid-m1.dtb
@@ -107,6 +110,9 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-qnap-ts433.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-radxa-e25.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-roc-pc.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-rock-3a.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-wolfvision-pf5.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-wolfvision-pf5-io-expander.dtbo
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-armsom-sige7.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-coolpi-cm5-evb.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-edgeble-neu6a-io.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-edgeble-neu6a-wifi.dtbo
@@ -114,6 +120,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-edgeble-neu6b-io.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-evb1-v10.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-jaguar.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-nanopc-t6.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-ok3588-c.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-orangepi-5-plus.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-quartzpro64.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-rock-5b.dtb
diff --git a/arch/arm64/boot/dts/rockchip/rk3308.dtsi b/arch/arm64/boot/dts/rockchip/rk3308.dtsi
index cfc0a87b5195..962ea893999b 100644
--- a/arch/arm64/boot/dts/rockchip/rk3308.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3308.dtsi
@@ -578,6 +578,48 @@
#dma-cells = <1>;
};
+ /*
+ * - can be clock producer or consumer
+ * - up to 8 capture channels and 2 playback channels
+ * - connected internally to audio codec
+ */
+ i2s_8ch_2: i2s@ff320000 {
+ compatible = "rockchip,rk3308-i2s-tdm";
+ reg = <0x0 0xff320000 0x0 0x1000>;
+ interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "mclk_tx", "mclk_rx", "hclk";
+ clocks = <&cru SCLK_I2S2_8CH_TX>,
+ <&cru SCLK_I2S2_8CH_RX>,
+ <&cru HCLK_I2S2_8CH>;
+ dmas = <&dmac1 5>, <&dmac1 4>;
+ dma-names = "rx", "tx";
+ resets = <&cru SRST_I2S2_8CH_TX_M>, <&cru SRST_I2S2_8CH_RX_M>;
+ reset-names = "tx-m", "rx-m";
+ rockchip,grf = <&grf>;
+ status = "disabled";
+ };
+
+ /*
+ * - can be clock consumer only
+ * - up to 4 capture channels, no playback
+ * - connected internally to audio codec
+ */
+ i2s_8ch_3: i2s@ff330000 {
+ compatible = "rockchip,rk3308-i2s-tdm";
+ reg = <0x0 0xff330000 0x0 0x1000>;
+ interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "mclk_tx", "mclk_rx", "hclk";
+ clocks = <&cru SCLK_I2S3_8CH_TX>,
+ <&cru SCLK_I2S3_8CH_RX>,
+ <&cru HCLK_I2S3_8CH>;
+ dmas = <&dmac1 7>;
+ dma-names = "rx";
+ resets = <&cru SRST_I2S3_8CH_TX_M>, <&cru SRST_I2S3_8CH_RX_M>;
+ reset-names = "tx-m", "rx-m";
+ rockchip,grf = <&grf>;
+ status = "disabled";
+ };
+
i2s_2ch_0: i2s@ff350000 {
compatible = "rockchip,rk3308-i2s", "rockchip,rk3066-i2s";
reg = <0x0 0xff350000 0x0 0x1000>;
@@ -761,6 +803,20 @@
assigned-clock-rates = <32768>;
};
+ codec: codec@ff560000 {
+ compatible = "rockchip,rk3308-codec";
+ reg = <0x0 0xff560000 0x0 0x10000>;
+ rockchip,grf = <&grf>;
+ clock-names = "mclk_tx", "mclk_rx", "hclk";
+ clocks = <&cru SCLK_I2S2_8CH_TX_OUT>,
+ <&cru SCLK_I2S2_8CH_RX_OUT>,
+ <&cru PCLK_ACODEC>;
+ reset-names = "codec-reset";
+ resets = <&cru SRST_ACODEC_P>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
gic: interrupt-controller@ff580000 {
compatible = "arm,gic-400";
reg = <0x0 0xff581000 0x0 0x1000>,
diff --git a/arch/arm64/boot/dts/rockchip/rk3326-gameforce-chi.dts b/arch/arm64/boot/dts/rockchip/rk3326-gameforce-chi.dts
new file mode 100644
index 000000000000..579261b3a474
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3326-gameforce-chi.dts
@@ -0,0 +1,809 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2024 Chris Morgan <macromorgan@hotmail.com>
+ */
+
+/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include "rk3326.dtsi"
+
+/ {
+ model = "GameForce Chi";
+ compatible = "gameforce,chi", "rockchip,rk3326";
+ chassis-type = "handset";
+
+ aliases {
+ mmc0 = &sdmmc;
+ mmc1 = &sdio;
+ };
+
+ chosen {
+ stdout-path = "serial2:115200n8";
+ };
+
+ adc_joystick: adc-joystick {
+ compatible = "adc-joystick";
+ io-channels = <&saradc 0>,
+ <&saradc 1>;
+ poll-interval = <100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ axis@0 {
+ reg = <0>;
+ abs-flat = <10>;
+ abs-fuzz = <10>;
+ abs-range = <850 175>;
+ linux,code = <ABS_Y>;
+ };
+
+ axis@1 {
+ reg = <1>;
+ abs-flat = <10>;
+ abs-fuzz = <10>;
+ abs-range = <800 190>;
+ linux,code = <ABS_X>;
+ };
+ };
+
+ adc_keys: adc-keys {
+ compatible = "adc-keys";
+ io-channels = <&saradc 2>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1800000>;
+ poll-interval = <60>;
+
+ button-1 {
+ label = "HAPPY1";
+ linux,code = <BTN_TRIGGER_HAPPY1>;
+ press-threshold-microvolt = <15000>;
+ };
+
+ button-2 {
+ label = "HAPPY2";
+ linux,code = <BTN_TRIGGER_HAPPY2>;
+ press-threshold-microvolt = <300000>;
+ };
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ power-supply = <&vcc_bl>;
+ pwms = <&pwm1 0 25000 0>;
+ };
+
+ battery: battery {
+ compatible = "simple-battery";
+ charge-full-design-microamp-hours = <3000000>;
+ charge-term-current-microamp = <300000>;
+ constant-charge-current-max-microamp = <1500000>;
+ constant-charge-voltage-max-microvolt = <4200000>;
+ factory-internal-resistance-micro-ohms = <180000>;
+ ocv-capacity-celsius = <20>;
+ ocv-capacity-table-0 = <4106000 100>, <4071000 95>, <4018000 90>, <3975000 85>,
+ <3946000 80>, <3908000 75>, <3877000 70>, <3853000 65>,
+ <3834000 60>, <3816000 55>, <3802000 50>, <3788000 45>,
+ <3774000 40>, <3760000 35>, <3748000 30>, <3735000 25>,
+ <3718000 20>, <3697000 15>, <3685000 10>, <3625000 5>,
+ <3400000 0>;
+ voltage-max-design-microvolt = <4250000>;
+ voltage-min-design-microvolt = <3400000>;
+ };
+
+ gpio_leds: gpio-leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pins>;
+
+ red_led: led-0 {
+ color = <LED_COLOR_ID_RED>;
+ gpios = <&gpio3 RK_PC4 GPIO_ACTIVE_HIGH>;
+ };
+
+ green_led: led-1 {
+ color = <LED_COLOR_ID_GREEN>;
+ gpios = <&gpio3 RK_PC5 GPIO_ACTIVE_HIGH>;
+ };
+
+ blue_led: led-2 {
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&gpio3 RK_PC6 GPIO_ACTIVE_HIGH>;
+ };
+
+ white_led: led-3 {
+ color = <LED_COLOR_ID_WHITE>;
+ function = LED_FUNCTION_STATUS;
+ gpios = <&gpio3 RK_PB3 GPIO_ACTIVE_HIGH>;
+ };
+
+ chg_led: led-4 {
+ color = <LED_COLOR_ID_RED>;
+ function = LED_FUNCTION_CHARGING;
+ gpios = <&gpio3 RK_PB2 GPIO_ACTIVE_HIGH>;
+ };
+
+ };
+
+ gpio_keys: gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-0 = <&btn_pins_ctrl>;
+ pinctrl-names = "default";
+
+ button-a {
+ gpios = <&gpio2 RK_PB0 GPIO_ACTIVE_LOW>;
+ label = "EAST";
+ linux,code = <BTN_EAST>;
+ };
+
+ button-b {
+ gpios = <&gpio2 RK_PB1 GPIO_ACTIVE_LOW>;
+ label = "SOUTH";
+ linux,code = <BTN_SOUTH>;
+ };
+
+ button-down {
+ gpios = <&gpio1 RK_PB5 GPIO_ACTIVE_LOW>;
+ label = "DPAD-DOWN";
+ linux,code = <BTN_DPAD_DOWN>;
+ };
+
+ button-home {
+ gpios = <&gpio2 RK_PA0 GPIO_ACTIVE_LOW>;
+ label = "HOME";
+ linux,code = <BTN_MODE>;
+ };
+
+ button-l1 {
+ gpios = <&gpio2 RK_PA6 GPIO_ACTIVE_LOW>;
+ label = "TL";
+ linux,code = <BTN_TL>;
+ };
+
+ button-l2 {
+ gpios = <&gpio2 RK_PA4 GPIO_ACTIVE_LOW>;
+ label = "TL2";
+ linux,code = <BTN_TL2>;
+ };
+
+ button-left {
+ gpios = <&gpio1 RK_PB6 GPIO_ACTIVE_LOW>;
+ label = "DPAD-LEFT";
+ linux,code = <BTN_DPAD_LEFT>;
+ };
+
+ button-r1 {
+ gpios = <&gpio2 RK_PA7 GPIO_ACTIVE_LOW>;
+ label = "TR";
+ linux,code = <BTN_TR>;
+ };
+
+ button-r2 {
+ gpios = <&gpio2 RK_PA5 GPIO_ACTIVE_LOW>;
+ label = "TR2";
+ linux,code = <BTN_TR2>;
+ };
+
+ button-right {
+ gpios = <&gpio1 RK_PB7 GPIO_ACTIVE_LOW>;
+ label = "DPAD-RIGHT";
+ linux,code = <BTN_DPAD_RIGHT>;
+ };
+
+ button-select {
+ gpios = <&gpio2 RK_PA3 GPIO_ACTIVE_LOW>;
+ label = "SELECT";
+ linux,code = <BTN_SELECT>;
+ };
+
+ button-start {
+ gpios = <&gpio2 RK_PA2 GPIO_ACTIVE_LOW>;
+ label = "START";
+ linux,code = <BTN_START>;
+ };
+
+ button-up {
+ gpios = <&gpio1 RK_PB4 GPIO_ACTIVE_LOW>;
+ label = "DPAD-UP";
+ linux,code = <BTN_DPAD_UP>;
+ };
+
+ button-x {
+ gpios = <&gpio2 RK_PB3 GPIO_ACTIVE_LOW>;
+ label = "NORTH";
+ linux,code = <BTN_NORTH>;
+ };
+
+ button-y {
+ gpios = <&gpio2 RK_PB2 GPIO_ACTIVE_LOW>;
+ label = "WEST";
+ linux,code = <BTN_WEST>;
+ };
+ };
+
+ multi-led {
+ compatible = "leds-group-multicolor";
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_KBD_BACKLIGHT;
+ leds = <&red_led>, <&green_led>, <&blue_led>;
+ };
+
+ spk_amp: audio-amplifier {
+ compatible = "simple-audio-amplifier";
+ enable-gpios = <&gpio2 RK_PB5 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&spk_amp_enable_h>;
+ pinctrl-names = "default";
+ sound-name-prefix = "Speaker Amp";
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ pinctrl-0 = <&hp_det>;
+ pinctrl-names = "default";
+ simple-audio-card,name = "rk817_ext";
+ simple-audio-card,aux-devs = <&spk_amp>;
+ simple-audio-card,format = "i2s";
+ simple-audio-card,hp-det-gpio = <&gpio2 RK_PC6 GPIO_ACTIVE_HIGH>;
+ simple-audio-card,mclk-fs = <256>;
+ simple-audio-card,widgets =
+ "Microphone", "Mic Jack",
+ "Headphone", "Headphones",
+ "Speaker", "Internal Speakers";
+ simple-audio-card,routing =
+ "MICL", "Mic Jack",
+ "Headphones", "HPOL",
+ "Headphones", "HPOR",
+ "Internal Speakers", "Speaker Amp OUTL",
+ "Internal Speakers", "Speaker Amp OUTR",
+ "Speaker Amp INL", "HPOL",
+ "Speaker Amp INR", "HPOR";
+ simple-audio-card,pin-switches = "Internal Speakers";
+
+ simple-audio-card,codec {
+ sound-dai = <&rk817>;
+ };
+
+ simple-audio-card,cpu {
+ sound-dai = <&i2s1_2ch>;
+ };
+ };
+
+ vibrator_left: pwm-vibrator-l {
+ compatible = "pwm-vibrator";
+ pwm-names = "enable";
+ pwms = <&pwm4 0 25000 0>;
+ };
+
+ vibrator_right: pwm-vibrator-r {
+ compatible = "pwm-vibrator";
+ pwm-names = "enable";
+ pwms = <&pwm5 0 25000 0>;
+ };
+
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&rk817 1>;
+ clock-names = "ext_clock";
+ pinctrl-0 = <&wifi_enable_h>;
+ pinctrl-names = "default";
+ post-power-on-delay-ms = <200>;
+ reset-gpios = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>;
+ };
+
+ vccsys: vccsys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v8_sys";
+ regulator-always-on;
+ regulator-min-microvolt = <3800000>;
+ regulator-max-microvolt = <3800000>;
+ };
+};
+
+&cpu0 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&cpu1 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&cpu2 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&cpu3 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&display_subsystem {
+ status = "okay";
+};
+
+&dsi {
+ status = "okay";
+
+ internal_display: panel@0 {
+ reg = <0>;
+ compatible = "gameforce,chi-panel";
+ backlight = <&backlight>;
+ iovcc-supply = <&vcc_lcd>;
+ vcc-supply = <&vcc_lcd>;
+ reset-gpios = <&gpio3 RK_PA0 GPIO_ACTIVE_LOW>;
+
+ port {
+ mipi_in_panel: endpoint {
+ remote-endpoint = <&mipi_out_panel>;
+ };
+ };
+ };
+
+ ports {
+ mipi_out: port@1 {
+ reg = <1>;
+
+ mipi_out_panel: endpoint {
+ remote-endpoint = <&mipi_in_panel>;
+ };
+ };
+ };
+};
+
+&dsi_dphy {
+ status = "okay";
+};
+
+&gpu {
+ mali-supply = <&vdd_logic>;
+ status = "okay";
+};
+
+&i2c0 {
+ clock-frequency = <400000>;
+ i2c-scl-falling-time-ns = <16>;
+ i2c-scl-rising-time-ns = <280>;
+ status = "okay";
+
+ rk817: pmic@20 {
+ compatible = "rockchip,rk817";
+ reg = <0x20>;
+ #clock-cells = <1>;
+ clock-names = "mclk";
+ clock-output-names = "rk808-clkout1", "xin32k";
+ clocks = <&cru SCLK_I2S1_OUT>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PC1 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-0 = <&pmic_int>, <&i2s1_2ch_mclk>;
+ pinctrl-names = "default";
+ #sound-dai-cells = <0>;
+ system-power-controller;
+ wakeup-source;
+
+ vcc1-supply = <&vccsys>;
+ vcc2-supply = <&vccsys>;
+ vcc3-supply = <&vccsys>;
+ vcc4-supply = <&vccsys>;
+ vcc5-supply = <&vccsys>;
+ vcc6-supply = <&vccsys>;
+ vcc7-supply = <&vcc_3v0>;
+ vcc8-supply = <&vccsys>;
+ vcc9-supply = <&dcdc_boost>;
+
+ regulators {
+ vdd_logic: DCDC_REG1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1150000>;
+ regulator-min-microvolt = <950000>;
+ regulator-name = "vdd_logic";
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <950000>;
+ };
+ };
+
+ vdd_arm: DCDC_REG2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1350000>;
+ regulator-min-microvolt = <950000>;
+ regulator-name = "vdd_arm";
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <950000>;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vcc_ddr";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_3v0: DCDC_REG4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <3000000>;
+ regulator-min-microvolt = <3000000>;
+ regulator-name = "vcc_3v0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <3000000>;
+ };
+ };
+
+ vcc_1v8: LDO_REG2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "vcc_1v8";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_1v0: LDO_REG3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1000000>;
+ regulator-min-microvolt = <1000000>;
+ regulator-name = "vdd_1v0";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vcc_3v0_pmu: LDO_REG4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <3000000>;
+ regulator-min-microvolt = <3000000>;
+ regulator-name = "vcc_3v0_pmu";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3000000>;
+ };
+ };
+
+ vccio_sd: LDO_REG5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "vccio_sd";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_sd: LDO_REG6 {
+ regulator-boot-on;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "vcc_sd";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_bl: LDO_REG7 {
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "vcc_bl";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_lcd: LDO_REG8 {
+ regulator-max-microvolt = <2800000>;
+ regulator-min-microvolt = <2800000>;
+ regulator-name = "vcc_lcd";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <2800000>;
+ };
+ };
+
+ vcc_wifi: LDO_REG9 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "vcc_wifi";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ dcdc_boost: BOOST {
+ regulator-max-microvolt = <5000000>;
+ regulator-min-microvolt = <5000000>;
+ regulator-name = "dcdc_boost";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ otg_switch: OTG_SWITCH {
+ regulator-name = "otg_switch";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+
+ rk817_charger: charger {
+ monitored-battery = <&battery>;
+ rockchip,resistor-sense-micro-ohms = <10000>;
+ rockchip,sleep-enter-current-microamp = <300000>;
+ rockchip,sleep-filter-current-microamp = <100000>;
+ };
+ };
+};
+
+&i2s1_2ch {
+ status = "okay";
+};
+
+&io_domains {
+ vccio1-supply = <&vcc_3v0_pmu>;
+ vccio2-supply = <&vccio_sd>;
+ vccio3-supply = <&vcc_3v0>;
+ vccio4-supply = <&vcc_3v0>;
+ vccio5-supply = <&vcc_3v0>;
+ vccio6-supply = <&vcc_3v0>;
+ status = "okay";
+};
+
+&pinctrl {
+ bluetooth-pins {
+ bt_reset: bt-reset {
+ rockchip,pins =
+ <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
+ bt_wake_dev: bt-wake-dev {
+ rockchip,pins =
+ <0 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ bt_wake_host: bt-wake-host {
+ rockchip,pins =
+ <0 RK_PA7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ headphone {
+ hp_det: hp-det {
+ rockchip,pins =
+ <2 RK_PC6 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ gpio-btns {
+ btn_pins_ctrl: btn-pins-ctrl {
+ rockchip,pins =
+ <1 RK_PB4 RK_FUNC_GPIO &pcfg_pull_up>,
+ <1 RK_PB5 RK_FUNC_GPIO &pcfg_pull_up>,
+ <1 RK_PB6 RK_FUNC_GPIO &pcfg_pull_up>,
+ <1 RK_PB7 RK_FUNC_GPIO &pcfg_pull_up>,
+ <2 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>,
+ <2 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>,
+ <2 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>,
+ <2 RK_PA4 RK_FUNC_GPIO &pcfg_pull_up>,
+ <2 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>,
+ <2 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up>,
+ <2 RK_PA7 RK_FUNC_GPIO &pcfg_pull_up>,
+ <2 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>,
+ <2 RK_PB1 RK_FUNC_GPIO &pcfg_pull_up>,
+ <2 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>,
+ <2 RK_PB3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ gpio-leds {
+ led_pins: led-pins {
+ rockchip,pins =
+ <3 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>,
+ <3 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>,
+ <3 RK_PC4 RK_FUNC_GPIO &pcfg_pull_none>,
+ <3 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>,
+ <3 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pmic {
+ pmic_int: pmic-int {
+ rockchip,pins =
+ <0 RK_PC1 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ soc_slppin_gpio: soc_slppin_gpio {
+ rockchip,pins =
+ <0 RK_PA4 RK_FUNC_GPIO &pcfg_output_low>;
+ };
+
+ soc_slppin_rst: soc_slppin_rst {
+ rockchip,pins =
+ <0 RK_PA4 2 &pcfg_pull_none>;
+ };
+
+ soc_slppin_slp: soc_slppin_slp {
+ rockchip,pins =
+ <0 RK_PA4 1 &pcfg_pull_none>;
+ };
+ };
+
+ sdio-pwrseq {
+ wifi_enable_h: wifi-enable-h {
+ rockchip,pins =
+ <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ speaker {
+ spk_amp_enable_h: spk-amp-enable-h {
+ rockchip,pins =
+ <2 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&pmu_io_domains {
+ pmuio1-supply = <&vcc_1v8>;
+ pmuio2-supply = <&vcc_3v0_pmu>;
+ status = "okay";
+};
+
+&pwm1 {
+ status = "okay";
+};
+
+&pwm4 {
+ status = "okay";
+};
+
+&pwm5 {
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&vcc_1v8>;
+ status = "okay";
+};
+
+&sdio {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+ disable-wp;
+ keep-power-in-suspend;
+ mmc-pwrseq = <&sdio_pwrseq>;
+ no-mmc;
+ no-sd;
+ non-removable;
+ sd-uhs-sdr104;
+ status = "okay";
+};
+
+&sdmmc {
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ no-sdio;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc_sd>;
+ vqmmc-supply = <&vccio_sd>;
+ status = "okay";
+};
+
+&sfc {
+ #address-cells = <1>;
+ pinctrl-0 = <&sfc_clk &sfc_cs0 &sfc_bus2>;
+ pinctrl-names = "default";
+ #size-cells = <0>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <108000000>;
+ spi-rx-bus-width = <2>;
+ spi-tx-bus-width = <1>;
+ };
+};
+
+&tsadc {
+ status = "okay";
+};
+
+&u2phy {
+ status = "okay";
+
+ u2phy_otg: otg-port {
+ status = "okay";
+ };
+};
+
+&usb20_otg {
+ status = "okay";
+};
+
+/*
+ * The right ADC joystick exists connected to an unknown ADC
+ * controller which can be communicated with via uart0. This ADC device
+ * is an 8-pin SOIC with no markings located right next to the left ADC
+ * joystick ribbon cable. The pinout for this ADC controller appears to
+ * be pin 1 - VCC (2.8v), pin 2 - 1.8v (clk maybe?), pin 3 - GPIO 10,
+ * pin 4 - unknown, pin 5 - unknown, pin 6 - analog in, pin 7 - analog in,
+ * pin 8 - ground. There is currently a userspace UART driver for this
+ * device but it only works with the BSP joystick driver.
+ */
+&uart0 {
+ status = "okay";
+};
+
+/*
+ * Bluetooth was not working on BSP and is not currently working on
+ * mainline due to missing firmware. Bluetooth requires removal of DMA
+ * or else it will not probe.
+ */
+&uart1 {
+ /delete-property/ dma-names;
+ /delete-property/ dmas;
+ uart-has-rtscts;
+ status = "okay";
+
+ bluetooth: bluetooth {
+ compatible = "realtek,rtl8723ds-bt";
+ device-wake-gpios = <&gpio0 RK_PA1 GPIO_ACTIVE_HIGH>;
+ enable-gpios = <&gpio0 RK_PA0 GPIO_ACTIVE_HIGH>;
+ host-wake-gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&bt_reset>, <&bt_wake_dev>, <&bt_wake_host>;
+ pinctrl-names = "default";
+ };
+};
+
+&uart2 {
+ pinctrl-0 = <&uart2m1_xfer>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&vopb {
+ status = "okay";
+};
+
+&vopb_mmu {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index b6f045069ee2..07dcc949b899 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -46,8 +46,14 @@
cpu-idle-states = <&CPU_SLEEP>;
dynamic-power-coefficient = <120>;
enable-method = "psci";
- next-level-cache = <&l2>;
operating-points-v2 = <&cpu0_opp_table>;
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache>;
};
cpu1: cpu@1 {
@@ -59,8 +65,14 @@
cpu-idle-states = <&CPU_SLEEP>;
dynamic-power-coefficient = <120>;
enable-method = "psci";
- next-level-cache = <&l2>;
operating-points-v2 = <&cpu0_opp_table>;
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache>;
};
cpu2: cpu@2 {
@@ -72,8 +84,14 @@
cpu-idle-states = <&CPU_SLEEP>;
dynamic-power-coefficient = <120>;
enable-method = "psci";
- next-level-cache = <&l2>;
operating-points-v2 = <&cpu0_opp_table>;
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache>;
};
cpu3: cpu@3 {
@@ -85,8 +103,14 @@
cpu-idle-states = <&CPU_SLEEP>;
dynamic-power-coefficient = <120>;
enable-method = "psci";
- next-level-cache = <&l2>;
operating-points-v2 = <&cpu0_opp_table>;
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache>;
};
idle-states {
@@ -102,10 +126,13 @@
};
};
- l2: l2-cache0 {
+ l2_cache: l2-cache {
compatible = "cache";
cache-level = <2>;
cache-unified;
+ cache-size = <0x40000>;
+ cache-line-size = <64>;
+ cache-sets = <256>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi b/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi
index b48b98c13705..e5c0dbf794ae 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi
@@ -17,7 +17,7 @@
stdout-path = "serial2:115200n8";
};
- memory {
+ memory@0 {
device_type = "memory";
reg = <0x0 0x0 0x0 0x40000000>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
index dcee2e28916f..23ae2d9de382 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
@@ -21,7 +21,7 @@
stdout-path = "serial2:115200n8";
};
- memory {
+ memory@0 {
device_type = "memory";
reg = <0x0 0x0 0x0 0x80000000>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
index b16b7ca02379..7f14206d53c3 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
@@ -21,7 +21,7 @@
stdout-path = "serial2:115200n8";
};
- memory {
+ memory@0 {
device_type = "memory";
reg = <0x0 0x0 0x0 0x40000000>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index 62af0cb94839..734f87db4d11 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -141,7 +141,7 @@
};
arm-pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a53-pmu";
interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
index 5846a11f0e84..d5e035823eb5 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
@@ -663,7 +663,7 @@ camera: &i2c7 {
port@1 {
reg = <1>;
- mipi1_in_panel: endpoint@1 {
+ mipi1_in_panel: endpoint {
remote-endpoint = <&mipi1_out_panel>;
};
};
@@ -689,7 +689,6 @@ camera: &i2c7 {
ep-gpios = <&gpio0 3 GPIO_ACTIVE_HIGH>;
/* PERST# asserted in S3 */
- pcie-reset-suspend = <1>;
vpcie3v3-supply = <&wlan_3v3>;
vpcie1v8-supply = <&pp1800_pcie>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
index dfb2a0bdea5b..9586bb12a5d8 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
@@ -611,7 +611,7 @@
#size-cells = <0>;
interface@0 { /* interface 0 of configuration 1 */
- compatible = "usbbda,8156.config1.0";
+ compatible = "usbifbda,8156.config1.0";
reg = <0 1>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
index 054c6a4d1a45..294eb2de263d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
@@ -779,7 +779,6 @@
};
&pcie0 {
- bus-scan-delay-ms = <1000>;
ep-gpios = <&gpio2 RK_PD4 GPIO_ACTIVE_HIGH>;
num-lanes = <4>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts
index 61f3fec5a8b1..e5709c7ee06a 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts
@@ -16,7 +16,7 @@
#include "rk3399-opp.dtsi"
/ {
- model = "Pine64 PinePhonePro";
+ model = "Pine64 PinePhone Pro";
compatible = "pine64,pinephone-pro", "rockchip,rk3399";
chassis-type = "handset";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
index 2c3984a880af..f6f15946579e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
@@ -194,6 +194,8 @@
num-lanes = <4>;
pinctrl-names = "default";
pinctrl-0 = <&pcie_clkreqn_cpm>;
+ vpcie3v3-supply = <&vcc3v3_baseboard>;
+ vpcie12v-supply = <&dc_12v>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
index c08e69391c01..ccbe3a7a1d2c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
@@ -79,6 +79,26 @@
regulator-max-microvolt = <5000000>;
};
+ vcca_0v9: vcca-0v9-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcca_0v9";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ vin-supply = <&vcc_1v8>;
+ };
+
+ vcca_1v8: vcca-1v8-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcca_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc3v3_sys>;
+ };
+
vdd_log: vdd-log {
compatible = "pwm-regulator";
pwms = <&pwm2 0 25000 1>;
@@ -416,16 +436,28 @@
gpio1830-supply = <&vcc_1v8>;
};
-&pmu_io_domains {
- status = "okay";
- pmu1830-supply = <&vcc_1v8>;
+&pcie0 {
+ /* PCIe PHY supplies */
+ vpcie0v9-supply = <&vcca_0v9>;
+ vpcie1v8-supply = <&vcca_1v8>;
};
-&pwm2 {
- status = "okay";
+&pcie_clkreqn_cpm {
+ rockchip,pins =
+ <2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_up>;
};
&pinctrl {
+ pinctrl-names = "default";
+ pinctrl-0 = <&q7_thermal_pin>;
+
+ gpios {
+ q7_thermal_pin: q7-thermal-pin {
+ rockchip,pins =
+ <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
i2c8 {
i2c8_xfer_a: i2c8-xfer {
rockchip,pins =
@@ -458,11 +490,20 @@
usb3 {
usb3_id: usb3-id {
rockchip,pins =
- <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
+ <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
};
+&pmu_io_domains {
+ status = "okay";
+ pmu1830-supply = <&vcc_1v8>;
+};
+
+&pwm2 {
+ status = "okay";
+};
+
&sdhci {
/*
* Signal integrity isn't great at 200MHz but 100MHz has proven stable
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts
index 7baf9d1b22fd..972aea843afd 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts
@@ -151,6 +151,7 @@
};
&emmc_phy {
+ rockchip,enable-strobe-pulldown;
status = "okay";
};
@@ -549,7 +550,8 @@
&sdhci {
max-frequency = <150000000>;
bus-width = <8>;
- mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
non-removable;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
index 281a12180703..b9d6284bb804 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
@@ -194,6 +194,7 @@
};
&emmc_phy {
+ rockchip,enable-strobe-pulldown;
status = "okay";
};
@@ -648,7 +649,8 @@
&sdhci {
max-frequency = <150000000>;
bus-width = <8>;
- mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
non-removable;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353p.dts b/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353p.dts
index 8aa93c646bec..a73cf30801ec 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353p.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353p.dts
@@ -8,7 +8,7 @@
#include "rk3566-anbernic-rg353x.dtsi"
/ {
- model = "RG353P";
+ model = "Anbernic RG353P";
compatible = "anbernic,rg353p", "rockchip,rk3566";
aliases {
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353ps.dts b/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353ps.dts
index b211973e36c2..ca5284e4807d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353ps.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353ps.dts
@@ -8,7 +8,7 @@
#include "rk3566-anbernic-rg353x.dtsi"
/ {
- model = "RG353PS";
+ model = "Anbernic RG353PS";
compatible = "anbernic,rg353ps", "rockchip,rk3566";
aliases {
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353v.dts b/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353v.dts
index f49ce29ba597..e9954a33e8cd 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353v.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353v.dts
@@ -8,7 +8,7 @@
#include "rk3566-anbernic-rg353x.dtsi"
/ {
- model = "RG353V";
+ model = "Anbernic RG353V";
compatible = "anbernic,rg353v", "rockchip,rk3566";
aliases {
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353vs.dts b/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353vs.dts
index a7dc462fe21f..90da43855d1c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353vs.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353vs.dts
@@ -8,7 +8,7 @@
#include "rk3566-anbernic-rg353x.dtsi"
/ {
- model = "RG353VS";
+ model = "Anbernic RG353VS";
compatible = "anbernic,rg353vs", "rockchip,rk3566";
aliases {
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg503.dts b/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg503.dts
index 94e6dd61a2db..74cf313e0635 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg503.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg503.dts
@@ -8,7 +8,7 @@
#include "rk3566-anbernic-rgxx3.dtsi"
/ {
- model = "RG503";
+ model = "Anbernic RG503";
compatible = "anbernic,rg503", "rockchip,rk3566";
aliases {
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rgxx3.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rgxx3.dtsi
index 18b8c2e7befa..233eade30f21 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rgxx3.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rgxx3.dtsi
@@ -10,6 +10,8 @@
#include "rk3566.dtsi"
/ {
+ chassis-type = "handset";
+
chosen: chosen {
stdout-path = "serial2:1500000n8";
};
@@ -623,9 +625,12 @@
cap-sdio-irq;
keep-power-in-suspend;
mmc-pwrseq = <&sdio_pwrseq>;
+ no-mmc;
+ no-sd;
non-removable;
pinctrl-0 = <&sdmmc2m0_bus4 &sdmmc2m0_cmd &sdmmc2m0_clk>;
pinctrl-names = "default";
+ sd-uhs-sdr50;
vmmc-supply = <&vcc_wifi>;
vqmmc-supply = <&vcca1v8_pmu>;
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts b/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts
index 6ecdf5d28339..c1194d1e438d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts
@@ -447,7 +447,6 @@
&pcie2x1 {
reset-gpios = <&gpio0 RK_PB6 GPIO_ACTIVE_HIGH>;
- disable-gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_HIGH>;
vpcie3v3-supply = <&vcc3v3_pcie>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-powkiddy-rgb30.dts b/arch/arm64/boot/dts/rockchip/rk3566-powkiddy-rgb30.dts
index 1f567a14ac84..952b1b285f3b 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-powkiddy-rgb30.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-powkiddy-rgb30.dts
@@ -8,7 +8,7 @@
#include "rk3566-powkiddy-rk2023.dtsi"
/ {
- model = "RGB30";
+ model = "Powkiddy RGB30";
compatible = "powkiddy,rgb30", "rockchip,rk3566";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-powkiddy-rk2023.dts b/arch/arm64/boot/dts/rockchip/rk3566-powkiddy-rk2023.dts
index bc9933d9e262..72890f747ee3 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-powkiddy-rk2023.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-powkiddy-rk2023.dts
@@ -8,7 +8,7 @@
#include "rk3566-powkiddy-rk2023.dtsi"
/ {
- model = "RK2023";
+ model = "Powkiddy RK2023";
compatible = "powkiddy,rk2023", "rockchip,rk3566";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-powkiddy-rk2023.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-powkiddy-rk2023.dtsi
index 3ab751a01cb2..bd332714a023 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-powkiddy-rk2023.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3566-powkiddy-rk2023.dtsi
@@ -10,6 +10,8 @@
#include "rk3566.dtsi"
/ {
+ chassis-type = "handset";
+
aliases {
mmc1 = &sdmmc0;
mmc2 = &sdmmc1;
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-powkiddy-x55.dts b/arch/arm64/boot/dts/rockchip/rk3566-powkiddy-x55.dts
index 4786b19fd017..5a648db41f35 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-powkiddy-x55.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-powkiddy-x55.dts
@@ -11,6 +11,7 @@
/ {
model = "Powkiddy x55";
+ chassis-type = "handset";
compatible = "powkiddy,x55", "rockchip,rk3566";
aliases {
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
index 59843a7a199c..0b191d8462ad 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
@@ -8,7 +8,7 @@
#include "rk3566.dtsi"
/ {
- model = "Pine64 RK3566 Quartz64-A Board";
+ model = "Pine64 Quartz64 Model A";
compatible = "pine64,quartz64-a", "rockchip,rk3566";
aliases {
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
index 2d92713be2a0..26322a358d91 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
@@ -8,7 +8,7 @@
#include "rk3566.dtsi"
/ {
- model = "Pine64 RK3566 Quartz64-B Board";
+ model = "Pine64 Quartz64 Model B";
compatible = "pine64,quartz64-b", "rockchip,rk3566";
aliases {
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-rock-3c.dts b/arch/arm64/boot/dts/rockchip/rk3566-rock-3c.dts
new file mode 100644
index 000000000000..b242409d378c
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3566-rock-3c.dts
@@ -0,0 +1,726 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
+#include "rk3566.dtsi"
+
+/ {
+ model = "Radxa ROCK 3C";
+ compatible = "radxa,rock-3c", "rockchip,rk3566";
+
+ aliases {
+ ethernet0 = &gmac1;
+ mmc0 = &sdhci;
+ mmc1 = &sdmmc0;
+ mmc2 = &sdmmc1;
+ };
+
+ chosen: chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ gmac1_clkin: external-gmac1-clock {
+ compatible = "fixed-clock";
+ clock-frequency = <125000000>;
+ clock-output-names = "gmac1_clkin";
+ #clock-cells = <0>;
+ };
+
+ hdmi-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con_in: endpoint {
+ remote-endpoint = <&hdmi_out_con>;
+ };
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-0 {
+ gpios = <&gpio0 RK_PA0 GPIO_ACTIVE_HIGH>;
+ function = LED_FUNCTION_HEARTBEAT;
+ color = <LED_COLOR_ID_BLUE>;
+ linux,default-trigger = "heartbeat";
+ pinctrl-names = "default";
+ pinctrl-0 = <&user_led2>;
+ };
+ };
+
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&rk809 1>;
+ clock-names = "ext_clock";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_reg_on_h>;
+ post-power-on-delay-ms = <100>;
+ power-off-delay-us = <5000000>;
+ reset-gpios = <&gpio0 RK_PC0 GPIO_ACTIVE_LOW>;
+ };
+
+ vcc5v_dcin: vcc5v-dcin-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v_dcin";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vcc3v3_pcie: vcc3v3-pcie-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_pwr_en>;
+ regulator-name = "vcc3v3_pcie";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc3v3_sys>;
+ };
+
+ vcc3v3_sys: vcc3v3-sys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_sys: vcc5v0-sys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v_dcin>;
+ };
+
+ vcc5v0_usb30_host: vcc5v0-usb30-host-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 RK_PC5 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_usb30_host_en>;
+ regulator-name = "vcc5v0_usb30_host";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_usb_otg: vcc5v0-usb-otg-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 RK_PC6 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_usb_otg_en>;
+ regulator-name = "vcc5v0_usb_otg";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc_cam: vcc-cam-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 RK_PC4 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc_cam_en>;
+ regulator-name = "vcc_cam";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc3v3_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_mipi: vcc-mipi-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 RK_PC7 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc_mipi_en>;
+ regulator-name = "vcc_mipi";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc3v3_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&combphy1 {
+ status = "okay";
+};
+
+&combphy2 {
+ status = "okay";
+};
+
+&cpu0 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu1 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu2 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu3 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&gmac1 {
+ assigned-clocks = <&cru SCLK_GMAC1_RX_TX>, <&cru SCLK_GMAC1>;
+ assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>, <&gmac1_clkin>;
+ clock_in_out = "input";
+ phy-handle = <&rgmii_phy1>;
+ phy-mode = "rgmii-id";
+ phy-supply = <&vcc_3v3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac1m1_miim
+ &gmac1m1_tx_bus2
+ &gmac1m1_rx_bus2
+ &gmac1m1_rgmii_clk
+ &gmac1m1_rgmii_bus
+ &gmac1m1_clkinout>;
+ status = "okay";
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu>;
+ status = "okay";
+};
+
+&hdmi {
+ avdd-0v9-supply = <&vdda0v9_image>;
+ avdd-1v8-supply = <&vcca1v8_image>;
+ status = "okay";
+};
+
+&hdmi_in {
+ hdmi_in_vp0: endpoint {
+ remote-endpoint = <&vp0_out_hdmi>;
+ };
+};
+
+&hdmi_out {
+ hdmi_out_con: endpoint {
+ remote-endpoint = <&hdmi_con_in>;
+ };
+};
+
+&hdmi_sound {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+
+ vdd_cpu: regulator@1c {
+ compatible = "tcs,tcs4525";
+ reg = <0x1c>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ rk809: pmic@20 {
+ compatible = "rockchip,rk809";
+ reg = <0x20>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA3 IRQ_TYPE_LEVEL_LOW>;
+ clock-output-names = "rk808-clkout1", "rk808-clkout2";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int_l>, <&i2s1m0_mclk>;
+ system-power-controller;
+ vcc1-supply = <&vcc3v3_sys>;
+ vcc2-supply = <&vcc3v3_sys>;
+ vcc3-supply = <&vcc3v3_sys>;
+ vcc4-supply = <&vcc3v3_sys>;
+ vcc5-supply = <&vcc3v3_sys>;
+ vcc6-supply = <&vcc3v3_sys>;
+ vcc7-supply = <&vcc3v3_sys>;
+ vcc8-supply = <&vcc3v3_sys>;
+ vcc9-supply = <&vcc3v3_sys>;
+ wakeup-source;
+ #clock-cells = <1>;
+
+ regulators {
+ vdd_logic: DCDC_REG1 {
+ regulator-name = "vdd_logic";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <900000>;
+ };
+ };
+
+ vdd_gpu: DCDC_REG2 {
+ regulator-name = "vdd_gpu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <900000>;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vdd_npu: DCDC_REG4 {
+ regulator-name = "vdd_npu";
+ regulator-initial-mode = <0x2>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8: DCDC_REG5 {
+ regulator-name = "vcc_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda0v9_image: LDO_REG1 {
+ regulator-name = "vdda0v9_image";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda_0v9: LDO_REG2 {
+ regulator-name = "vdda_0v9";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda0v9_pmu: LDO_REG3 {
+ regulator-name = "vdda0v9_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <900000>;
+ };
+ };
+
+ vccio_acodec: LDO_REG4 {
+ regulator-name = "vccio_acodec";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd: LDO_REG5 {
+ regulator-name = "vccio_sd";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_pmu: LDO_REG6 {
+ regulator-name = "vcc3v3_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcca_1v8: LDO_REG7 {
+ regulator-name = "vcca_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca1v8_pmu: LDO_REG8 {
+ regulator-name = "vcca1v8_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcca1v8_image: LDO_REG9 {
+ regulator-name = "vcca1v8_image";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v3: SWITCH_REG1 {
+ regulator-name = "vcc_3v3";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_sd: SWITCH_REG2 {
+ regulator-name = "vcc3v3_sd";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+
+ eeprom: eeprom@50 {
+ compatible = "belling,bl24c16a", "atmel,24c16";
+ reg = <0x50>;
+ pagesize = <16>;
+ };
+};
+
+&i2s0_8ch {
+ status = "okay";
+};
+
+&i2s1_8ch {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s1m0_sclktx &i2s1m0_lrcktx &i2s1m0_sdi0 &i2s1m0_sdo0>;
+ rockchip,trcm-sync-tx-only;
+ status = "okay";
+};
+
+&mdio1 {
+ rgmii_phy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0x1>;
+ reset-assert-us = <20000>;
+ reset-deassert-us = <100000>;
+ reset-gpios = <&gpio3 RK_PC0 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&pcie2x1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_reset_h>;
+ reset-gpios = <&gpio1 RK_PB2 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie>;
+ status = "okay";
+};
+
+&pinctrl {
+ bluetooth {
+ bt_reg_on_h: bt-reg-on-h {
+ rockchip,pins = <0 RK_PC1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ bt_wake_host_h: bt-wake-host-h {
+ rockchip,pins = <0 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ bt_host_wake_h: bt-host-wake-h {
+ rockchip,pins = <0 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ cam {
+ vcc_cam_en: vcc_cam_en {
+ rockchip,pins = <0 RK_PC4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ display {
+ vcc_mipi_en: vcc_mipi_en {
+ rockchip,pins = <0 RK_PC7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ leds {
+ user_led2: user-led2 {
+ rockchip,pins = <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pcie {
+ pcie_pwr_en: pcie-pwr-en {
+ rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie_reset_h: pcie-reset-h {
+ rockchip,pins = <1 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pmic {
+ pmic_int_l: pmic-int-l {
+ rockchip,pins = <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ usb {
+ vcc5v0_usb30_host_en: vcc5v0-usb30-host-en {
+ rockchip,pins = <0 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ vcc5v0_usb_otg_en: vcc5v0-usb-otg-en {
+ rockchip,pins = <0 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ wifi {
+ wifi_host_wake_h: wifi-host-wake-h {
+ rockchip,pins = <0 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ wifi_reg_on_h: wifi-reg-on-h {
+ rockchip,pins = <0 RK_PC0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&pmu_io_domains {
+ pmuio1-supply = <&vcc3v3_pmu>;
+ pmuio2-supply = <&vcca1v8_pmu>;
+ vccio1-supply = <&vccio_acodec>;
+ vccio2-supply = <&vcc_1v8>;
+ vccio3-supply = <&vccio_sd>;
+ vccio4-supply = <&vcca1v8_pmu>;
+ vccio5-supply = <&vcc_3v3>;
+ vccio6-supply = <&vcc_3v3>;
+ vccio7-supply = <&vcc_3v3>;
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&vcca_1v8>;
+ status = "okay";
+};
+
+&sdhci {
+ bus-width = <8>;
+ max-frequency = <200000000>;
+ mmc-hs200-1_8v;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd &emmc_datastrobe>;
+ vmmc-supply = <&vcc_3v3>;
+ vqmmc-supply = <&vcc_1v8>;
+ status = "okay";
+};
+
+&sdmmc0 {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ disable-wp;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd &sdmmc0_det>;
+ sd-uhs-sdr50;
+ vmmc-supply = <&vcc3v3_sys>;
+ vqmmc-supply = <&vccio_sd>;
+ status = "okay";
+};
+
+&sdmmc1 {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+ keep-power-in-suspend;
+ mmc-pwrseq = <&sdio_pwrseq>;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc1_bus4 &sdmmc1_clk &sdmmc1_cmd>;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc3v3_sys>;
+ vqmmc-supply = <&vcca1v8_pmu>;
+ status = "okay";
+};
+
+&sfc {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0x0>;
+ spi-max-frequency = <120000000>;
+ spi-rx-bus-width = <4>;
+ spi-tx-bus-width = <1>;
+ };
+};
+
+&tsadc {
+ rockchip,hw-tshut-mode = <1>;
+ rockchip,hw-tshut-polarity = <0>;
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1m0_ctsn &uart1m0_rtsn &uart1m0_xfer>;
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&usb_host0_xhci {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
+
+&usb_host1_xhci {
+ status = "okay";
+};
+
+&usb2phy0 {
+ status = "okay";
+};
+
+&usb2phy0_host {
+ phy-supply = <&vcc5v0_usb30_host>;
+ status = "okay";
+};
+
+&usb2phy0_otg {
+ phy-supply = <&vcc5v0_usb_otg>;
+ status = "okay";
+};
+
+&usb2phy1 {
+ status = "okay";
+};
+
+&usb2phy1_host {
+ phy-supply = <&vcc5v0_usb30_host>;
+ status = "okay";
+};
+
+&usb2phy1_otg {
+ phy-supply = <&vcc5v0_usb30_host>;
+ status = "okay";
+};
+
+&vop {
+ assigned-clocks = <&cru DCLK_VOP0>, <&cru DCLK_VOP1>;
+ assigned-clock-parents = <&pmucru PLL_HPLL>, <&cru PLL_VPLL>;
+ status = "okay";
+};
+
+&vop_mmu {
+ status = "okay";
+};
+
+&vp0 {
+ vp0_out_hdmi: endpoint@ROCKCHIP_VOP2_EP_HDMI0 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI0>;
+ remote-endpoint = <&hdmi_in_vp0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-soquartz-blade.dts b/arch/arm64/boot/dts/rockchip/rk3566-soquartz-blade.dts
index fdbf1c783242..fdbb4a6a19d8 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-soquartz-blade.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-soquartz-blade.dts
@@ -10,7 +10,7 @@
#include "rk3566-soquartz.dtsi"
/ {
- model = "PINE64 RK3566 SOQuartz on Blade carrier board";
+ model = "Pine64 SOQuartz on Blade carrier board";
compatible = "pine64,soquartz-blade", "pine64,soquartz", "rockchip,rk3566";
aliases {
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-soquartz-cm4.dts b/arch/arm64/boot/dts/rockchip/rk3566-soquartz-cm4.dts
index 6ed3fa4aee34..2b6f0df477b6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-soquartz-cm4.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-soquartz-cm4.dts
@@ -5,7 +5,7 @@
#include "rk3566-soquartz.dtsi"
/ {
- model = "Pine64 RK3566 SoQuartz with CM4-IO Carrier Board";
+ model = "Pine64 SOQuartz on CM4-IO carrier board";
compatible = "pine64,soquartz-cm4io", "pine64,soquartz", "rockchip,rk3566";
aliases {
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-soquartz-model-a.dts b/arch/arm64/boot/dts/rockchip/rk3566-soquartz-model-a.dts
index f2095dfa4eaf..9a6a63277c3d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-soquartz-model-a.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-soquartz-model-a.dts
@@ -5,7 +5,7 @@
#include "rk3566-soquartz.dtsi"
/ {
- model = "PINE64 RK3566 SOQuartz on Model A carrier board";
+ model = "Pine64 SOQuartz on Model A carrier board";
compatible = "pine64,soquartz-model-a", "pine64,soquartz", "rockchip,rk3566";
aliases {
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi
index bfb7b952f4c5..dd4e9c1893c6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi
@@ -8,7 +8,7 @@
#include "rk3566.dtsi"
/ {
- model = "Pine64 RK3566 SoQuartz SOM";
+ model = "Pine64 SOQuartz system on module";
compatible = "pine64,soquartz", "rockchip,rk3566";
aliases {
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts b/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
index 7b5f3904ef61..c87fad2c34cb 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
@@ -416,6 +416,8 @@
vccio_sd: LDO_REG5 {
regulator-name = "vccio_sd";
+ regulator-always-on;
+ regulator-boot-on;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
@@ -525,9 +527,9 @@
#address-cells = <1>;
#size-cells = <0>;
- switch@0 {
+ switch@1f {
compatible = "mediatek,mt7531";
- reg = <0>;
+ reg = <0x1f>;
ports {
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-lubancat-2.dts b/arch/arm64/boot/dts/rockchip/rk3568-lubancat-2.dts
index a8a4cc190eb3..a3112d5df200 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-lubancat-2.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-lubancat-2.dts
@@ -523,7 +523,6 @@
&pcie2x1 {
reset-gpios = <&gpio3 RK_PC1 GPIO_ACTIVE_HIGH>;
- disable-gpios = <&gpio3 RK_PC2 GPIO_ACTIVE_HIGH>;
vpcie3v3-supply = <&vcc3v3_mini_pcie>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-mecsbc.dts b/arch/arm64/boot/dts/rockchip/rk3568-mecsbc.dts
new file mode 100644
index 000000000000..c2dfffc638d1
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3568-mecsbc.dts
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/pwm/pwm.h>
+#include "rk3568.dtsi"
+
+/ {
+ model = "Protonic MECSBC";
+ compatible = "prt,mecsbc", "rockchip,rk3568";
+
+ aliases {
+ mmc0 = &sdhci;
+ mmc1 = &sdmmc0;
+ };
+
+ chosen: chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ tas2562-sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,name = "Speaker";
+ simple-audio-card,mclk-fs = <256>;
+
+ simple-audio-card,cpu {
+ sound-dai = <&i2s1_8ch>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&tas2562>;
+ };
+ };
+
+ vdd_gpu: regulator-vdd-gpu {
+ compatible = "pwm-regulator";
+ pwms = <&pwm1 0 5000 PWM_POLARITY_INVERTED>;
+ regulator-name = "vdd_gpu";
+ regulator-min-microvolt = <915000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-settling-time-up-us = <250>;
+ pwm-dutycycle-range = <0 100>; /* dutycycle inverted 0% => 0.915V */
+ };
+
+ p3v3: regulator-p3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "p3v3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ p1v8: regulator-p1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "p1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ vcc_sd: regulator-sd {
+ compatible = "regulator-gpio";
+ enable-gpios = <&gpio0 RK_PA5 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_HIGH>;
+ regulator-name = "sdcard-gpio-supply";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ states = <1800000 0x1>, <3300000 0x0>;
+ };
+
+ vdd_npu: regulator-vdd-npu {
+ compatible = "pwm-regulator";
+ pwms = <&pwm2 0 5000 PWM_POLARITY_INVERTED>;
+ regulator-name = "vdd_npu";
+ regulator-min-microvolt = <915000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-settling-time-up-us = <250>;
+ pwm-dutycycle-range = <0 100>; /* dutycycle inverted 0% => 0.915V */
+ };
+};
+
+&combphy0 {
+ status = "okay";
+};
+
+&combphy1 {
+ status = "okay";
+};
+
+&combphy2 {
+ status = "okay";
+};
+
+&cpu0 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu1 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu2 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu3 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&gmac1 {
+ assigned-clocks = <&cru SCLK_GMAC1_RX_TX>, <&cru SCLK_GMAC1>;
+ assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>, <&cru CLK_MAC1_2TOP>;
+ phy-handle = <&rgmii_phy1>;
+ phy-mode = "rgmii-id";
+ clock_in_out = "output";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac1m1_miim
+ &gmac1m1_tx_bus2
+ &gmac1m1_rx_bus2
+ &gmac1m1_rgmii_clk
+ &gmac1m1_clkinout
+ &gmac1m1_rgmii_bus>;
+ status = "okay";
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu>;
+ status = "okay";
+};
+
+&gpu_opp_table {
+ compatible = "operating-points-v2";
+
+ opp-200000000 {
+ opp-hz = /bits/ 64 <200000000>;
+ opp-microvolt = <915000>;
+ };
+
+ opp-300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-microvolt = <915000>;
+ };
+
+ opp-400000000 {
+ opp-hz = /bits/ 64 <400000000>;
+ opp-microvolt = <915000>;
+ };
+
+ opp-600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <920000>;
+ };
+
+ opp-700000000 {
+ opp-hz = /bits/ 64 <700000000>;
+ opp-microvolt = <950000>;
+ };
+
+ opp-800000000 {
+ opp-hz = /bits/ 64 <800000000>;
+ opp-microvolt = <1000000>;
+ };
+};
+
+&i2c0 {
+ status = "okay";
+
+ vdd_cpu: regulator@60 {
+ compatible = "fcs,fan53555";
+ reg = <0x60>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-ramp-delay = <2300>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&i2c2 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2m0_xfer>;
+};
+
+&i2c3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3m0_xfer>;
+ status = "okay";
+
+ tas2562: amplifier@4c {
+ compatible = "ti,tas2562";
+ reg = <0x4c>;
+ #sound-dai-cells = <0>;
+ shutdown-gpios = <&gpio1 RK_PD4 GPIO_ACTIVE_HIGH>;
+ interrupt-parent = <&gpio1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tas2562>;
+ interrupts = <RK_PD1 IRQ_TYPE_LEVEL_LOW>;
+ ti,imon-slot-no = <0>;
+ };
+};
+
+&i2c5 {
+ status = "okay";
+
+ temperature-sensor@48 {
+ compatible = "ti,tmp1075";
+ reg = <0x48>;
+ };
+
+ rtc@51 {
+ compatible = "nxp,pcf85363";
+ reg = <0x51>;
+ #clock-cells = <0>;
+ clock-output-names = "rtcic_32kout";
+ };
+};
+
+&i2s1_8ch {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s1m0_sclktx &i2s1m0_lrcktx &i2s1m0_sdi0 &i2s1m0_sdo0>;
+ rockchip,trcm-sync-tx-only;
+ status = "okay";
+};
+
+&mdio1 {
+ rgmii_phy1: ethernet-phy@2 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0x2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&eth_phy1_rst>;
+ reset-assert-us = <20000>;
+ reset-deassert-us = <100000>;
+ reset-gpios = <&gpio4 RK_PB3 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&pcie2x1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie20m1_pins>;
+ reset-gpios = <&gpio3 RK_PC1 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&pcie30phy {
+ status = "okay";
+};
+
+&pcie3x2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie30x2m1_pins>;
+ reset-gpios = <&gpio2 RK_PD6 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&p3v3>;
+ status = "okay";
+};
+
+&pinctrl {
+ ethernet {
+ eth_phy1_rst: eth-phy1-rst {
+ rockchip,pins = <4 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ tas2562 {
+ pinctrl_tas2562: tas2562 {
+ rockchip,pins = <1 RK_PD4 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+};
+
+&pmu_io_domains {
+ pmuio1-supply = <&p3v3>;
+ pmuio2-supply = <&p3v3>;
+ vccio1-supply = <&p1v8>;
+ vccio2-supply = <&p1v8>;
+ vccio3-supply = <&vcc_sd>;
+ vccio4-supply = <&p1v8>;
+ vccio5-supply = <&p3v3>;
+ vccio6-supply = <&p1v8>;
+ vccio7-supply = <&p3v3>;
+ status = "okay";
+};
+
+&pwm1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm1m0_pins>;
+};
+
+&pwm2 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm2m0_pins>;
+};
+
+&saradc {
+ vref-supply = <&p1v8>;
+ status = "okay";
+};
+
+&sdhci {
+ bus-width = <8>;
+ max-frequency = <200000000>;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd &emmc_datastrobe>;
+ vmmc-supply = <&p3v3>;
+ vqmmc-supply = <&p1v8>;
+ mmc-hs200-1_8v;
+ non-removable;
+ no-sd;
+ no-sdio;
+ status = "okay";
+};
+
+&sdmmc0 {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>;
+ disable-wp;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd &sdmmc0_det>;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ vmmc-supply = <&p3v3>;
+ vqmmc-supply = <&vcc_sd>;
+ status = "okay";
+};
+
+&tsadc {
+ rockchip,hw-tshut-mode = <1>;
+ rockchip,hw-tshut-polarity = <0>;
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&usb_host0_xhci {
+ dr_mode = "host";
+ extcon = <&usb2phy0>;
+ status = "okay";
+};
+
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
+
+&usb_host1_xhci {
+ status = "okay";
+};
+
+&usb2phy0 {
+ status = "okay";
+};
+
+&usb2phy0_host {
+ status = "okay";
+};
+
+&usb2phy0_otg {
+ status = "okay";
+};
+
+&usb2phy1 {
+ status = "okay";
+};
+
+&usb2phy1_host {
+ status = "okay";
+};
+
+&usb2phy1_otg {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
index a5e974ea659e..ebdedea15ad1 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
@@ -8,7 +8,7 @@
#include "rk3568.dtsi"
/ {
- model = "Radxa ROCK3 Model A";
+ model = "Radxa ROCK 3A";
compatible = "radxa,rock3a", "rockchip,rk3568";
aliases {
@@ -757,6 +757,20 @@
status = "okay";
};
+&sfc {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0x0>;
+ spi-max-frequency = <104000000>;
+ spi-rx-bus-width = <4>;
+ spi-tx-bus-width = <1>;
+ };
+};
+
&tsadc {
rockchip,hw-tshut-mode = <1>;
rockchip,hw-tshut-polarity = <0>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-io-expander.dtso b/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-io-expander.dtso
new file mode 100644
index 000000000000..ebcaeafc3800
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-io-expander.dtso
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+/*
+ * Device tree overlay for the WolfVision PF5 IO Expander board.
+ *
+ * Copyright (C) 2024 WolfVision GmbH.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/rk3568-cru.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+
+&{/} {
+ gmac0_clkin: external-gmac0-clock {
+ compatible = "fixed-clock";
+ clock-frequency = <50000000>;
+ clock-output-names = "gmac0_clkin";
+ #clock-cells = <0>;
+ };
+
+ usb_host_vbus: usb-host-vbus-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio1 RK_PA3 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_host_vbus_en>;
+ regulator-name = "usb_host_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v_in>;
+ };
+
+ vcc1v8_eth: vcc1v8-eth-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 RK_PC1 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc1v8_eth_en>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "1v8_eth";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc3v3_sys>;
+ };
+
+ vcc3v3_eth: vcc3v3-eth-regulator {
+ compatible = "regulator-fixed";
+ enable-active-low;
+ gpio = <&gpio0 RK_PC0 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc3v3_eth_enn>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "3v3_eth";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc3v3_sys>;
+ };
+};
+
+&gmac0 {
+ assigned-clocks = <&cru SCLK_GMAC0_RX_TX>,
+ <&cru SCLK_GMAC0>;
+ assigned-clock-parents = <&cru SCLK_GMAC0_RMII_SPEED>,
+ <&gmac0_clkin>;
+ clock_in_out = "input";
+ phy-handle = <&dp83826>;
+ phy-mode = "rmii";
+ phy-supply = <&vcc3v3_eth>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac0_miim
+ &gmac0_clkinout
+ &gmac0_rx_er
+ &gmac0_rx_bus2
+ &gmac0_tx_bus2>;
+ status = "okay";
+};
+
+&mdio0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dp83826: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0x0>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PD3 IRQ_TYPE_EDGE_FALLING>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&eth_wake_intn &eth_phy_rstn>;
+ reset-assert-us = <1000>;
+ reset-deassert-us = <2000>;
+ reset-gpios = <&gpio0 RK_PD4 GPIO_ACTIVE_LOW>;
+ wakeup-source;
+ };
+};
+
+&pinctrl {
+ ethernet {
+ eth_wake_intn: eth-wake-intn-pinctrl {
+ rockchip,pins = <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ eth_phy_rstn: eth-phy-rstn-pinctrl {
+ rockchip,pins = <0 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ vcc1v8_eth_en: vcc1v8-eth-en-pinctrl {
+ rockchip,pins = <0 RK_PC1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ vcc3v3_eth_enn: vcc3v3-eth-enn-pinctrl {
+ rockchip,pins = <0 RK_PC0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb {
+ usb_host_vbus_en: usb-host-vbus-en-pinctrl {
+ rockchip,pins = <1 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&usb_host1_xhci {
+ maximum-speed = "high-speed";
+ phys = <&usb2phy0_host>;
+ phy-names = "usb2-phy";
+ status = "okay";
+};
+
+&usb2phy0_host {
+ phy-supply = <&usb_host_vbus>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5.dts b/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5.dts
new file mode 100644
index 000000000000..170b14f92f51
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5.dts
@@ -0,0 +1,528 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+/*
+ * Device tree for the WolfVision PF5 mainboard.
+ *
+ * Copyright (C) 2024 WolfVision GmbH.
+ */
+
+/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/regulator/ti,tps62864.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
+#include "rk3568.dtsi"
+
+/ {
+ model = "WolfVision PF5";
+ compatible = "wolfvision,rk3568-pf5", "rockchip,rk3568";
+
+ aliases {
+ ethernet0 = &gmac0;
+ mmc0 = &sdhci;
+ rtc0 = &pcf85623;
+ rtc1 = &rk809;
+ };
+
+ chosen: chosen {
+ stdout-path = "serial2:115200n8";
+ };
+
+ hdmi_tx: hdmi-tx-connector {
+ compatible = "hdmi-connector";
+ hdmi-pwr-supply = <&hdmi_tx_5v>;
+ type = "a";
+
+ port {
+ hdmi_tx_in: endpoint {
+ remote-endpoint = <&hdmi_tx_out>;
+ };
+ };
+ };
+
+ hdmi_tx_5v: hdmi-tx-5v-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio4 RK_PC5 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdmi_tx_5v_en>;
+ regulator-name = "hdmi_tx_5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v_in>;
+ };
+
+ pdm_codec: pdm-codec {
+ compatible = "dmic-codec";
+ num-channels = <1>;
+ #sound-dai-cells = <0>;
+ };
+
+ pdm_sound: pdm-sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "microphone";
+
+ simple-audio-card,cpu {
+ sound-dai = <&pdm>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&pdm_codec>;
+ };
+ };
+
+ vcc12v_cam: vcc12v-cam-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio2 RK_PD1 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc12v_cam_en>;
+ regulator-name = "12v_cam";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ vin-supply = <&vcc12v_in>;
+ };
+
+ vcc12v_in: vcc12v-in-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "12v_in";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ };
+
+ vcc3v8_cam: vcc3v8-cam-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 RK_PC3 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc3v8_cam_en>;
+ regulator-name = "3v8_cam";
+ regulator-min-microvolt = <3800000>;
+ regulator-max-microvolt = <3800000>;
+ vin-supply = <&vcc5v_in>;
+ };
+
+ vcc3v3_sys: vcc3v3-sys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "3v3_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc5v_in>;
+ };
+
+ vcc5v_in: vcc5v-in-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "5v_in";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc12v_in>;
+ };
+};
+
+&combphy0 {
+ status = "okay";
+};
+
+&cpu0 {
+ cpu-supply = <&vcc0v9_cpu>;
+};
+
+&cpu1 {
+ cpu-supply = <&vcc0v9_cpu>;
+};
+
+&cpu2 {
+ cpu-supply = <&vcc0v9_cpu>;
+};
+
+&cpu3 {
+ cpu-supply = <&vcc0v9_cpu>;
+};
+
+&gpu {
+ mali-supply = <&vcc0v9_gpu>;
+ status = "okay";
+};
+
+&hdmi {
+ avdd-0v9-supply = <&vcc0v9a_image>;
+ avdd-1v8-supply = <&vcc1v8a_image>;
+ status = "okay";
+};
+
+&hdmi_in {
+ hdmi_in_vp0: endpoint {
+ remote-endpoint = <&vp0_out_hdmi>;
+ };
+};
+
+&hdmi_out {
+ hdmi_tx_out: endpoint {
+ remote-endpoint = <&hdmi_tx_in>;
+ };
+};
+
+&i2c0 {
+ status = "okay";
+
+ rk809: pmic@20 {
+ compatible = "rockchip,rk809";
+ reg = <0x20>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA3 IRQ_TYPE_LEVEL_LOW>;
+ #clock-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int_l>;
+ rockchip,system-power-controller;
+ vcc1-supply = <&vcc5v_in>;
+ vcc2-supply = <&vcc5v_in>;
+ vcc3-supply = <&vcc5v_in>;
+ vcc4-supply = <&vcc5v_in>;
+ vcc5-supply = <&vcc3v3_sys>;
+ vcc6-supply = <&vcc5v_in>;
+ vcc7-supply = <&vcc3v3_sys>;
+ vcc8-supply = <&vcc3v3_sys>;
+ vcc9-supply = <&vcc3v3_sys>;
+ wakeup-source;
+
+ regulators {
+ vcc0v9_logic: DCDC_REG1 {
+ regulator-name = "0v9_logic";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc0v9_gpu: DCDC_REG2 {
+ regulator-name = "0v9_gpu";
+ regulator-always-on;
+ regulator-initial-mode = <0x2>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc1v1_ddr4: DCDC_REG3 {
+ regulator-name = "1v1_ddr4";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc0v9_npu: DCDC_REG4 {
+ regulator-name = "0v9_npu";
+ regulator-always-on;
+ regulator-initial-mode = <0x2>;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc1v8: DCDC_REG5 {
+ regulator-name = "1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc0v9a_image: LDO_REG1 {
+ regulator-name = "0v9a_image";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc0v9a: LDO_REG2 {
+ regulator-name = "0v9a";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc0v9a_pmu: LDO_REG3 {
+ regulator-name = "0v9a_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <900000>;
+ };
+ };
+
+ vcc3v3_acodec: LDO_REG4 {
+ regulator-name = "3v3_acodec";
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_sd: LDO_REG5 {
+ regulator-name = "3v3_sd";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_pmu: LDO_REG6 {
+ regulator-name = "3v3_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc1v8a: LDO_REG7 {
+ regulator-name = "1v8a";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc1v8a_pmu: LDO_REG8 {
+ regulator-name = "1v8a_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc1v8a_image: LDO_REG9 {
+ regulator-name = "1v8a_image";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_sw: SWITCH_REG1 {
+ regulator-name = "3v3_sw";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+
+ regulator@42 {
+ compatible = "ti,tps62869";
+ reg = <0x42>;
+
+ regulators {
+ vcc0v9_cpu: SW {
+ regulator-name = "0v9_cpu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <TPS62864_MODE_FPWM>;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1150000>;
+ vin-supply = <&vcc5v_in>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+
+ pcf85623: rtc@51 {
+ compatible = "nxp,pcf85263";
+ reg = <0x51>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&clk32k_in>;
+ quartz-load-femtofarads = <12500>;
+ };
+};
+
+&i2c3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3m0_xfer>;
+};
+
+&i2c4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4m1_xfer>;
+};
+
+&pdm {
+ pinctrl-0 = <&pdmm0_clk
+ &pdmm0_sdi0>;
+ status = "okay";
+};
+
+&pinctrl {
+ cam {
+ vcc12v_cam_en: vcc12v-cam-en-pinctrl {
+ rockchip,pins = <2 RK_PD1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ vcc3v8_cam_en: vcc3v8-cam-en-pinctrl {
+ rockchip,pins = <0 RK_PC3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ hdmitx {
+ hdmi_tx_5v_en: hdmi-tx-5v-en-pinctrl {
+ rockchip,pins = <4 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pmic {
+ pmic_int_l: pmic-int-l-pinctrl {
+ rockchip,pins = <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+};
+
+&pmu_io_domains {
+ pmuio1-supply = <&vcc3v3_pmu>;
+ pmuio2-supply = <&vcc3v3_pmu>;
+ vccio1-supply = <&vcc3v3_acodec>;
+ vccio2-supply = <&vcc1v8>;
+ vccio3-supply = <&vcc3v3_sd>;
+ vccio4-supply = <&vcc1v8>;
+ vccio5-supply = <&vcc1v8>;
+ vccio6-supply = <&vcc3v3_sw>;
+ vccio7-supply = <&vcc3v3_sw>;
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&vcc1v8a>;
+ status = "okay";
+};
+
+&sdhci {
+ bus-width = <8>;
+ max-frequency = <200000000>;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd &emmc_datastrobe>;
+ vmmc-supply = <&vcc3v3_sw>;
+ vqmmc-supply = <&vcc1v8>;
+ status = "okay";
+};
+
+&tsadc {
+ rockchip,hw-tshut-mode = <1>;
+ rockchip,hw-tshut-polarity = <0>;
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&usb_host0_xhci {
+ dr_mode = "peripheral";
+ /* The following quirks are required since the bInterval is 1 and we
+ * handle steady ISOC streaming. See Usecase 3 in commit 729dcffd1ed3
+ * ("usb: dwc3: gadget: Add support for disabling U1 and U2 entries").
+ */
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
+ /*
+ * Without this quirk the available fifosize seems to be miscalculated
+ * in cases where many endpoints are used. In one particular situation
+ * 8 IN EPs and 3 OUT EPs where selected and lead to stalled transfers
+ * without the resize quirk.
+ */
+ tx-fifo-resize;
+
+ status = "okay";
+};
+
+&usb2phy0 {
+ status = "okay";
+};
+
+&usb2phy0_otg {
+ status = "okay";
+};
+
+&vop {
+ assigned-clocks = <&cru DCLK_VOP0>, <&cru DCLK_VOP2>;
+ assigned-clock-parents = <&pmucru PLL_HPLL>, <&cru PLL_VPLL>;
+ status = "okay";
+};
+
+&vop_mmu {
+ status = "okay";
+};
+
+&vp0 {
+ vp0_out_hdmi: endpoint@ROCKCHIP_VOP2_EP_HDMI0 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI0>;
+ remote-endpoint = <&hdmi_in_vp0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk356x.dtsi b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
index 92f96ec01385..d8543b5557ee 100644
--- a/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
@@ -57,6 +57,13 @@
#cooling-cells = <2>;
enable-method = "psci";
operating-points-v2 = <&cpu0_opp_table>;
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <128>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l3_cache>;
};
cpu1: cpu@100 {
@@ -66,6 +73,13 @@
#cooling-cells = <2>;
enable-method = "psci";
operating-points-v2 = <&cpu0_opp_table>;
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <128>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l3_cache>;
};
cpu2: cpu@200 {
@@ -75,6 +89,13 @@
#cooling-cells = <2>;
enable-method = "psci";
operating-points-v2 = <&cpu0_opp_table>;
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <128>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l3_cache>;
};
cpu3: cpu@300 {
@@ -84,9 +105,29 @@
#cooling-cells = <2>;
enable-method = "psci";
operating-points-v2 = <&cpu0_opp_table>;
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <128>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l3_cache>;
};
};
+ /*
+ * There are no private per-core L2 caches, but only the
+ * L3 cache that appears to the CPU cores as L2 caches
+ */
+ l3_cache: l3-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ cache-size = <0x80000>;
+ cache-line-size = <64>;
+ cache-sets = <512>;
+ };
+
cpu0_opp_table: opp-table-0 {
compatible = "operating-points-v2";
opp-shared;
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-armsom-sige7.dts b/arch/arm64/boot/dts/rockchip/rk3588-armsom-sige7.dts
new file mode 100644
index 000000000000..98c622b27647
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-armsom-sige7.dts
@@ -0,0 +1,721 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include "rk3588.dtsi"
+
+/ {
+ model = "ArmSoM Sige7";
+ compatible = "armsom,sige7", "rockchip,rk3588";
+
+ aliases {
+ mmc0 = &sdhci;
+ mmc1 = &sdmmc;
+ };
+
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ analog-sound {
+ compatible = "audio-graph-card";
+ dais = <&i2s0_8ch_p0>;
+ label = "rk3588-es8316";
+ hp-det-gpio = <&gpio1 RK_PD5 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hp_detect>;
+ routing = "MIC2", "Mic Jack",
+ "Headphones", "HPOL",
+ "Headphones", "HPOR";
+ widgets = "Microphone", "Mic Jack",
+ "Headphone", "Headphones";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_rgb_g>;
+
+ led_green: led-0 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_STATUS;
+ gpios = <&gpio0 RK_PB7 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ led_red: led-1 {
+ color = <LED_COLOR_ID_RED>;
+ function = LED_FUNCTION_STATUS;
+ gpios = <&gpio4 RK_PC5 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "none";
+ };
+ };
+
+ fan: pwm-fan {
+ compatible = "pwm-fan";
+ cooling-levels = <0 95 145 195 255>;
+ fan-supply = <&vcc5v0_sys>;
+ pwms = <&pwm1 0 50000 0>;
+ #cooling-cells = <2>;
+ };
+
+ vcc3v3_pcie2x1l2: vcc3v3-pcie2x1l2-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_pcie2x1l2";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ startup-delay-us = <5000>;
+ vin-supply = <&vcc_3v3_s3>;
+ };
+
+ vcc3v3_pcie30: vcc3v3-pcie30-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio1 RK_PA4 GPIO_ACTIVE_HIGH>;
+ regulator-name = "vcc3v3_pcie30";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ startup-delay-us = <5000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_host: vcc5v0-host-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_host";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ gpio = <&gpio4 RK_PB0 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_host_en>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_sys: vcc5v0-sys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vcc_1v1_nldo_s3: vcc-1v1-nldo-s3-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_1v1_nldo_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+};
+
+&combphy0_ps {
+ status = "okay";
+};
+
+&combphy1_ps {
+ status = "okay";
+};
+
+&combphy2_psu {
+ status = "okay";
+};
+
+&cpu_b0 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b1 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b2 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&cpu_b3 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&cpu_l0 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l1 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l2 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l3 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu_s0>;
+ status = "okay";
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0m2_xfer>;
+ status = "okay";
+
+ vdd_cpu_big0_s0: regulator@42 {
+ compatible = "rockchip,rk8602";
+ reg = <0x42>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu_big0_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_big1_s0: regulator@43 {
+ compatible = "rockchip,rk8603", "rockchip,rk8602";
+ reg = <0x43>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu_big1_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&i2c6 {
+ status = "okay";
+
+ hym8563: rtc@51 {
+ compatible = "haoyu,hym8563";
+ reg = <0x51>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PB0 IRQ_TYPE_LEVEL_LOW>;
+ #clock-cells = <0>;
+ clock-output-names = "hym8563";
+ pinctrl-names = "default";
+ pinctrl-0 = <&hym8563_int>;
+ wakeup-source;
+ };
+};
+
+&i2c7 {
+ status = "okay";
+
+ es8316: audio-codec@11 {
+ compatible = "everest,es8316";
+ reg = <0x11>;
+ assigned-clocks = <&cru I2S0_8CH_MCLKOUT>;
+ assigned-clock-rates = <12288000>;
+ clocks = <&cru I2S0_8CH_MCLKOUT>;
+ clock-names = "mclk";
+ #sound-dai-cells = <0>;
+
+ port {
+ es8316_p0_0: endpoint {
+ remote-endpoint = <&i2s0_8ch_p0_0>;
+ };
+ };
+ };
+};
+
+&i2s0_8ch {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s0_lrck
+ &i2s0_mclk
+ &i2s0_sclk
+ &i2s0_sdi0
+ &i2s0_sdo0>;
+ status = "okay";
+
+ i2s0_8ch_p0: port {
+ i2s0_8ch_p0_0: endpoint {
+ dai-format = "i2s";
+ mclk-fs = <256>;
+ remote-endpoint = <&es8316_p0_0>;
+ };
+ };
+};
+
+/* phy1 - right ethernet port */
+&pcie2x1l0 {
+ reset-gpios = <&gpio4 RK_PA5 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+/* phy2 - WiFi */
+&pcie2x1l1 {
+ reset-gpios = <&gpio3 RK_PD4 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+/* phy0 - left ethernet port */
+&pcie2x1l2 {
+ reset-gpios = <&gpio3 RK_PB0 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&pcie30phy {
+ status = "okay";
+};
+
+&pcie3x4 {
+ reset-gpios = <&gpio4 RK_PB6 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie30>;
+ status = "okay";
+};
+
+&pinctrl {
+ hym8563 {
+ hym8563_int: hym8563-int {
+ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ leds {
+ led_rgb_g: led-rgb-g {
+ rockchip,pins = <0 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ led_rgb_r: led-rgb-r {
+ rockchip,pins = <0 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ sound {
+ hp_detect: hp-detect {
+ rockchip,pins = <1 RK_PD5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb {
+ vcc5v0_host_en: vcc5v0-host-en {
+ rockchip,pins = <4 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&pwm1 {
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&avcc_1v8_s0>;
+ status = "okay";
+};
+
+&sdhci {
+ bus-width = <8>;
+ no-sdio;
+ no-sd;
+ non-removable;
+ mmc-hs200-1_8v;
+ status = "okay";
+};
+
+&sdmmc {
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ disable-wp;
+ max-frequency = <200000000>;
+ no-sdio;
+ no-mmc;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc_3v3_s3>;
+ vqmmc-supply = <&vccio_sd_s0>;
+ status = "okay";
+};
+
+&spi2 {
+ assigned-clocks = <&cru CLK_SPI2>;
+ assigned-clock-rates = <200000000>;
+ num-cs = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2m2_cs0 &spi2m2_pins>;
+ status = "okay";
+
+ pmic@0 {
+ compatible = "rockchip,rk806";
+ spi-max-frequency = <1000000>;
+ reg = <0x0>;
+
+ interrupt-parent = <&gpio0>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
+ <&rk806_dvs2_null>, <&rk806_dvs3_null>;
+
+ system-power-controller;
+
+ vcc1-supply = <&vcc5v0_sys>;
+ vcc2-supply = <&vcc5v0_sys>;
+ vcc3-supply = <&vcc5v0_sys>;
+ vcc4-supply = <&vcc5v0_sys>;
+ vcc5-supply = <&vcc5v0_sys>;
+ vcc6-supply = <&vcc5v0_sys>;
+ vcc7-supply = <&vcc5v0_sys>;
+ vcc8-supply = <&vcc5v0_sys>;
+ vcc9-supply = <&vcc5v0_sys>;
+ vcc10-supply = <&vcc5v0_sys>;
+ vcc11-supply = <&vcc_2v0_pldo_s3>;
+ vcc12-supply = <&vcc5v0_sys>;
+ vcc13-supply = <&vcc_1v1_nldo_s3>;
+ vcc14-supply = <&vcc_1v1_nldo_s3>;
+ vcca-supply = <&vcc5v0_sys>;
+
+ rk806_dvs1_null: dvs1-null-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs2_null: dvs2-null-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs3_null: dvs3-null-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun0";
+ };
+
+ regulators {
+ vdd_gpu_s0: vdd_gpu_mem_s0: dcdc-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_gpu_s0";
+ regulator-enable-ramp-delay = <400>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_lit_s0: vdd_cpu_lit_mem_s0: dcdc-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_cpu_lit_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_log_s0: dcdc-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <750000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_log_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_vdenc_s0: vdd_vdenc_mem_s0: dcdc-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_vdenc_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_ddr_s0: dcdc-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <900000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_ddr_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ vdd2_ddr_s3: dcdc-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vdd2_ddr_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_2v0_pldo_s3: dcdc-reg7 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_2v0_pldo_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <2000000>;
+ };
+ };
+
+ vcc_3v3_s3: dcdc-reg8 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_3v3_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vddq_ddr_s0: dcdc-reg9 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vddq_ddr_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s3: dcdc-reg10 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ avcc_1v8_s0: pldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "avcc_1v8_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s0: pldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ avdd_1v2_s0: pldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "avdd_1v2_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v3_s0: pldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vcc_3v3_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd_s0: pldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vccio_sd_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ pldo6_s3: pldo-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "pldo6_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_0v75_s3: nldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdd_0v75_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_ddr_pll_s0: nldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "vdd_ddr_pll_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ avdd_0v75_s0: nldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "avdd_0v75_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_0v85_s0: nldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "vdd_0v85_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_0v75_s0: nldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdd_0v75_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy0_otg {
+ status = "okay";
+};
+
+&u2phy1 {
+ status = "okay";
+};
+
+&u2phy1_otg {
+ status = "okay";
+};
+
+&u2phy3 {
+ status = "okay";
+};
+
+&u2phy3_host {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-0 = <&uart2m0_xfer>;
+ status = "okay";
+};
+
+&usbdp_phy1 {
+ status = "okay";
+};
+
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
+
+&usb_host1_xhci {
+ dr_mode = "host";
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi
index cce1c8e83587..fde8b228f2c7 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi
@@ -136,6 +136,11 @@
status = "okay";
};
+&gpu {
+ mali-supply = <&vdd_gpu_s0>;
+ status = "okay";
+};
+
&i2c0 {
pinctrl-0 = <&i2c0m2_xfer>;
status = "okay";
@@ -216,9 +221,9 @@
pinctrl-0 = <&i2c7m0_xfer>;
status = "okay";
- es8316: audio-codec@11 {
+ es8316: audio-codec@10 {
compatible = "everest,es8316";
- reg = <0x11>;
+ reg = <0x10>;
assigned-clocks = <&cru I2S0_8CH_MCLKOUT>;
assigned-clock-rates = <12288000>;
clocks = <&cru I2S0_8CH_MCLKOUT>;
@@ -357,7 +362,7 @@
vcca-supply = <&vcc5v0_sys>;
rk806_dvs1_null: dvs1-null-pins {
- pins = "gpio_pwrctrl2";
+ pins = "gpio_pwrctrl1";
function = "pin_fun0";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-common.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-common.dtsi
index c0d4a15323e2..709d348cf06b 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-common.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-common.dtsi
@@ -162,6 +162,8 @@
pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
<&rk806_dvs2_null>, <&rk806_dvs3_null>;
+ system-power-controller;
+
vcc1-supply = <&vcc5v0_sys>;
vcc2-supply = <&vcc5v0_sys>;
vcc3-supply = <&vcc5v0_sys>;
@@ -182,7 +184,7 @@
#gpio-cells = <2>;
rk806_dvs1_null: dvs1-null-pins {
- pins = "gpio_pwrctrl2";
+ pins = "gpio_pwrctrl1";
function = "pin_fun0";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-io.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-io.dtsi
index 963e880ccc12..7b1317898358 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-io.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-io.dtsi
@@ -68,6 +68,10 @@
status = "okay";
};
+&combphy2_psu {
+ status = "okay";
+};
+
&i2c6 {
status = "okay";
@@ -230,3 +234,7 @@
&usb_host1_ohci {
status = "okay";
};
+
+&usb_host2_xhci {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts b/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts
index de30c2632b8e..7be2190244ba 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts
@@ -9,6 +9,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/usb/pd.h>
#include "rk3588.dtsi"
/ {
@@ -159,6 +160,18 @@
vin-supply = <&avcc_1v8_s0>;
};
+ vbus5v0_typec: vbus5v0-typec-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio4 RK_PD0 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&typec5v_pwren>;
+ regulator-name = "vbus5v0_typec";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_usb>;
+ };
+
vcc12v_dcin: vcc12v-dcin-regulator {
compatible = "regulator-fixed";
regulator-name = "vcc12v_dcin";
@@ -281,9 +294,68 @@
status = "okay";
};
+&gpu {
+ mali-supply = <&vdd_gpu_s0>;
+ sram-supply = <&vdd_gpu_mem_s0>;
+ status = "okay";
+};
+
&i2c2 {
status = "okay";
+ usbc0: usb-typec@22 {
+ compatible = "fcs,fusb302";
+ reg = <0x22>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <RK_PB4 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usbc0_int>;
+ vbus-supply = <&vbus5v0_typec>;
+ status = "okay";
+
+ usb_con: connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ data-role = "dual";
+ op-sink-microwatt = <1000000>;
+ power-role = "dual";
+ sink-pdos =
+ <PDO_FIXED(5000, 1000, PDO_FIXED_USB_COMM)>;
+ source-pdos =
+ <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)>;
+ try-power-role = "source";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usbc0_orien_sw: endpoint {
+ remote-endpoint = <&usbdp_phy0_orientation_switch>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usbc0_role_sw: endpoint {
+ remote-endpoint = <&dwc3_0_role_switch>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ dp_altmode_mux: endpoint {
+ remote-endpoint = <&usbdp_phy0_dp_altmode_mux>;
+ };
+ };
+ };
+ };
+ };
+
hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
@@ -410,6 +482,16 @@
rockchip,pins = <4 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
+
+ usb-typec {
+ typec5v_pwren: typec5v-pwren {
+ rockchip,pins = <4 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ usbc0_int: usbc0-int {
+ rockchip,pins = <3 RK_PB4 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
};
&pwm2 {
@@ -484,12 +566,16 @@
regulators {
vdd_gpu_s0: dcdc-reg1 {
+ /* regulator coupling requires always-on */
+ regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <550000>;
regulator-max-microvolt = <950000>;
regulator-ramp-delay = <12500>;
regulator-name = "vdd_gpu_s0";
regulator-enable-ramp-delay = <400>;
+ regulator-coupled-with = <&vdd_gpu_mem_s0>;
+ regulator-coupled-max-spread = <10000>;
regulator-state-mem {
regulator-off-in-suspend;
};
@@ -534,12 +620,16 @@
};
vdd_gpu_mem_s0: dcdc-reg5 {
+ /* regulator coupling requires always-on */
+ regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <675000>;
regulator-max-microvolt = <950000>;
regulator-ramp-delay = <12500>;
regulator-enable-ramp-delay = <400>;
regulator-name = "vdd_gpu_mem_s0";
+ regulator-coupled-with = <&vdd_gpu_s0>;
+ regulator-coupled-max-spread = <10000>;
regulator-state-mem {
regulator-off-in-suspend;
};
@@ -1041,6 +1131,22 @@
status = "okay";
};
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy0_otg {
+ status = "okay";
+};
+
+&u2phy1 {
+ status = "okay";
+};
+
+&u2phy1_otg {
+ status = "okay";
+};
+
&u2phy2 {
status = "okay";
};
@@ -1079,3 +1185,58 @@
&usb_host1_ohci {
status = "okay";
};
+
+&usbdp_phy0 {
+ mode-switch;
+ orientation-switch;
+ sbu1-dc-gpios = <&gpio4 RK_PA6 GPIO_ACTIVE_HIGH>;
+ sbu2-dc-gpios = <&gpio4 RK_PA7 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ usbdp_phy0_orientation_switch: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&usbc0_orien_sw>;
+ };
+
+ usbdp_phy0_dp_altmode_mux: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&dp_altmode_mux>;
+ };
+ };
+};
+
+&usbdp_phy1 {
+ /*
+ * USBDP PHY1 is wired to a female USB3 Type-A connector. Additionally
+ * the differential pairs 2+3 and the aux channel are wired to a RTD2166,
+ * which converts the DP signal into VGA. This is exposed on the
+ * board via a female VGA connector.
+ */
+ rockchip,dp-lane-mux = <2 3>;
+ status = "okay";
+};
+
+&usb_host0_xhci {
+ dr_mode = "otg";
+ usb-role-switch;
+ status = "okay";
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dwc3_0_role_switch: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&usbc0_role_sw>;
+ };
+ };
+};
+
+&usb_host1_xhci {
+ dr_mode = "host";
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-fet3588-c.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-fet3588-c.dtsi
new file mode 100644
index 000000000000..47e64d547ea9
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-fet3588-c.dtsi
@@ -0,0 +1,558 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include "rk3588.dtsi"
+
+/ {
+ compatible = "forlinx,fet3588-c", "rockchip,rk3588";
+
+ aliases {
+ mmc0 = &sdhci;
+ };
+
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_rgb_b>;
+
+ io-led {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&gpio0 RK_PA0 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ pcie20_avdd0v85: pcie20-avdd0v85-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "pcie20_avdd0v85";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ vin-supply = <&vdd_0v85_s0>;
+ };
+
+ pcie20_avdd1v8: pcie20-avdd1v8-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "pcie20_avdd1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&avcc_1v8_s0>;
+ };
+
+ pcie30_avdd0v75: pcie30-avdd0v75-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "pcie30_avdd0v75";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ vin-supply = <&avdd_0v75_s0>;
+ };
+
+ pcie30_avdd1v8: pcie30-avdd1v8-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "pcie30_avdd1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&avcc_1v8_s0>;
+ };
+
+ vcc_1v1_nldo_s3: vcc-1v1-nldo-s3-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_1v1_nldo_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc4v0_sys: vcc4v0-sys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc4v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <4000000>;
+ regulator-max-microvolt = <4000000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+};
+
+&combphy0_ps {
+ status = "okay";
+};
+
+&combphy1_ps {
+ status = "okay";
+};
+
+&combphy2_psu {
+ status = "okay";
+};
+
+&cpu_b0 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+ mem-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b1 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+ mem-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b2 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+ mem-supply = <&vdd_cpu_big1_s0>;
+};
+
+&cpu_b3 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+ mem-supply = <&vdd_cpu_big1_s0>;
+};
+
+&cpu_l0 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+ mem-supply = <&vdd_cpu_lit_mem_s0>;
+};
+
+&cpu_l1 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+ mem-supply = <&vdd_cpu_lit_mem_s0>;
+};
+
+&cpu_l2 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+ mem-supply = <&vdd_cpu_lit_mem_s0>;
+};
+
+&cpu_l3 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+ mem-supply = <&vdd_cpu_lit_mem_s0>;
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0m2_xfer>;
+ status = "okay";
+
+ vdd_cpu_big0_s0: regulator@42 {
+ compatible = "rockchip,rk8602";
+ reg = <0x42>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu_big0_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc4v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_big1_s0: regulator@43 {
+ compatible = "rockchip,rk8603", "rockchip,rk8602";
+ reg = <0x43>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu_big1_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc4v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&i2c1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1m2_xfer>;
+
+ vdd_npu_s0: regulator@42 {
+ compatible = "rockchip,rk8602";
+ reg = <0x42>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_npu_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc4v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&pinctrl {
+ leds {
+ led_rgb_b: led-rgb-b {
+ rockchip,pins = <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&sdhci {
+ bus-width = <8>;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+ no-sdio;
+ no-sd;
+ non-removable;
+ status = "okay";
+};
+
+&spi2 {
+ status = "okay";
+ assigned-clocks = <&cru CLK_SPI2>;
+ assigned-clock-rates = <200000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2m2_cs0 &spi2m2_pins>;
+ num-cs = <1>;
+
+ pmic@0 {
+ compatible = "rockchip,rk806";
+ spi-max-frequency = <1000000>;
+ reg = <0x0>;
+
+ interrupt-parent = <&gpio0>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
+ <&rk806_dvs2_null>, <&rk806_dvs3_null>;
+
+ system-power-controller;
+
+ vcc1-supply = <&vcc5v0_sys>;
+ vcc2-supply = <&vcc5v0_sys>;
+ vcc3-supply = <&vcc5v0_sys>;
+ vcc4-supply = <&vcc5v0_sys>;
+ vcc5-supply = <&vcc5v0_sys>;
+ vcc6-supply = <&vcc5v0_sys>;
+ vcc7-supply = <&vcc5v0_sys>;
+ vcc8-supply = <&vcc5v0_sys>;
+ vcc9-supply = <&vcc5v0_sys>;
+ vcc10-supply = <&vcc5v0_sys>;
+ vcc11-supply = <&vcc_2v0_pldo_s3>;
+ vcc12-supply = <&vcc5v0_sys>;
+ vcc13-supply = <&vcc_1v1_nldo_s3>;
+ vcc14-supply = <&vcc_1v1_nldo_s3>;
+ vcca-supply = <&vcc5v0_sys>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ rk806_dvs1_null: dvs1-null-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs2_null: dvs2-null-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs3_null: dvs3-null-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun0";
+ };
+
+ regulators {
+ vdd_gpu_s0: vdd_gpu_mem_s0: dcdc-reg1 {
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_gpu_s0";
+ regulator-enable-ramp-delay = <400>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_lit_s0: vdd_cpu_lit_mem_s0: dcdc-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_cpu_lit_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_log_s0: dcdc-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <750000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_log_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_vdenc_s0: vdd_vdenc_mem_s0: dcdc-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_vdenc_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_ddr_s0: dcdc-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <900000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_ddr_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ vdd2_ddr_s3: dcdc-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vdd2_ddr_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_2v0_pldo_s3: dcdc-reg7 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_2v0_pldo_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <2000000>;
+ };
+ };
+
+ vcc_3v3_s3: dcdc-reg8 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_3v3_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vddq_ddr_s0: dcdc-reg9 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vddq_ddr_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s3: dcdc-reg10 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ avcc_1v8_s0: pldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "avcc_1v8_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s0: pldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ avdd_1v2_s0: pldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "avdd_1v2_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v3_s0: pldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vcc_3v3_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd_s0: pldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vccio_sd_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ pldo6_s3: pldo-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "pldo6_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_0v75_s3: nldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdd_0v75_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_ddr_pll_s0: nldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "vdd_ddr_pll_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ avdd_0v75_s0: nldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "avdd_0v75_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_0v85_s0: nldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "vdd_0v85_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_0v75_s0: nldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdd_0v75_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+&tsadc {
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-0 = <&uart2m0_xfer>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts b/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts
index 39d65002add1..31d2f8994f85 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts
@@ -72,6 +72,27 @@
};
};
+ /*
+ * 100MHz reference clock for PCIe peripherals from PI6C557-05BLE
+ * clock generator.
+ * The clock output is gated via the OE pin on the clock generator.
+ * This is modeled as a fixed-clock plus a gpio-gate-clock.
+ */
+ pcie_refclk_gen: pcie-refclk-gen-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ };
+
+ pcie_refclk: pcie-refclk-clock {
+ compatible = "gpio-gate-clock";
+ clocks = <&pcie_refclk_gen>;
+ #clock-cells = <0>;
+ enable-gpios = <&gpio0 RK_PC6 GPIO_ACTIVE_LOW>; /* PCIE30X4_CLKREQN_M0 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie30x4_clkreqn_m0>;
+ };
+
pps {
compatible = "pps-gpio";
gpios = <&gpio0 RK_PD5 GPIO_ACTIVE_HIGH>;
@@ -245,6 +266,11 @@
};
};
+&gpu {
+ mali-supply = <&vdd_gpu_s0>;
+ status = "okay";
+};
+
&i2c0 {
pinctrl-0 = <&i2c0m2_xfer>;
status = "okay";
@@ -353,6 +379,30 @@
status = "okay";
};
+&pcie30phy {
+ status = "okay";
+};
+
+&pcie3x4 {
+ /*
+ * The board has a gpio-controlled "pcie_refclk" generator,
+ * so add it to the list of clocks.
+ */
+ clocks = <&cru ACLK_PCIE_4L_MSTR>, <&cru ACLK_PCIE_4L_SLV>,
+ <&cru ACLK_PCIE_4L_DBI>, <&cru PCLK_PCIE_4L>,
+ <&cru CLK_PCIE_AUX0>, <&cru CLK_PCIE4L_PIPE>,
+ <&pcie_refclk>;
+ clock-names = "aclk_mst", "aclk_slv",
+ "aclk_dbi", "pclk",
+ "aux", "pipe",
+ "ref";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie30x4_waken_m0 &pcie30x4_perstn_m0>;
+ reset-gpios = <&gpio0 RK_PD0 GPIO_ACTIVE_HIGH>; /* PCIE30X4_PERSTN_M0 */
+ vpcie3v3-supply = <&vcc3v3_mdot2>;
+ status = "okay";
+};
+
&pinctrl {
emmc {
emmc_reset: emmc-reset {
@@ -371,6 +421,20 @@
rockchip,pins = <1 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
+
+ pcie30x4 {
+ pcie30x4_clkreqn_m0: pcie30x4-clkreqn-m0 {
+ rockchip,pins = <0 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie30x4_perstn_m0: pcie30x4-perstn-m0 {
+ rockchip,pins = <0 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie30x4_waken_m0: pcie30x4-waken-m0 {
+ rockchip,pins = <0 RK_PC7 12 &pcfg_pull_none>;
+ };
+ };
};
&saradc {
@@ -452,7 +516,7 @@
vcca-supply = <&vcc5v0_sys>;
rk806_dvs1_null: dvs1-null-pins {
- pins = "gpio_pwrctrl2";
+ pins = "gpio_pwrctrl1";
function = "pin_fun0";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-ok3588-c.dts b/arch/arm64/boot/dts/rockchip/rk3588-ok3588-c.dts
new file mode 100644
index 000000000000..009566d881f3
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-ok3588-c.dts
@@ -0,0 +1,409 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+#include "rk3588-fet3588-c.dtsi"
+
+/ {
+ model = "Forlinx OK3588-C Board";
+ compatible = "forlinx,ok3588-c", "forlinx,fet3588-c", "rockchip,rk3588";
+
+ aliases {
+ ethernet0 = &gmac0;
+ ethernet1 = &gmac1;
+ mmc1 = &sdmmc;
+ };
+
+ adc-keys-0 {
+ compatible = "adc-keys";
+ io-channels = <&saradc 0>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1800000>;
+ poll-interval = <100>;
+
+ button-maskrom {
+ label = "Maskrom";
+ linux,code = <KEY_SETUP>;
+ press-threshold-microvolt = <400>;
+ };
+ };
+
+ adc-keys-1 {
+ compatible = "adc-keys";
+ io-channels = <&saradc 1>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1800000>;
+ poll-interval = <100>;
+
+ button-volume-up {
+ label = "V+/Recovery";
+ linux,code = <KEY_VOLUMEUP>;
+ press-threshold-microvolt = <17000>;
+ };
+
+ button-volume-down {
+ label = "V-";
+ linux,code = <KEY_VOLUMEDOWN>;
+ press-threshold-microvolt = <417000>;
+ };
+
+ button-menu {
+ label = "Menu";
+ linux,code = <KEY_MENU>;
+ press-threshold-microvolt = <890000>;
+ };
+
+ button-escape {
+ label = "ESC";
+ linux,code = <KEY_ESC>;
+ press-threshold-microvolt = <1235000>;
+ };
+ };
+
+ fan: pwm-fan {
+ compatible = "pwm-fan";
+ cooling-levels = <0 95 145 195 255>;
+ fan-supply = <&vcc12v_dcin>;
+ pwms = <&pwm2 0 50000 0>;
+ #cooling-cells = <2>;
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ pinctrl-names = "default";
+ pinctrl-0 = <&hp_detect>;
+ simple-audio-card,name = "RK3588 OK3588-C Audio";
+ simple-audio-card,bitclock-master = <&masterdai>;
+ simple-audio-card,format = "i2s";
+ simple-audio-card,frame-master = <&masterdai>;
+ simple-audio-card,hp-det-gpio = <&gpio1 RK_PB2 GPIO_ACTIVE_HIGH>;
+ simple-audio-card,mclk-fs = <256>;
+ simple-audio-card,pin-switches = "Headphones", "Speaker";
+ simple-audio-card,widgets =
+ "Headphones", "Headphones",
+ "Speaker", "Speaker",
+ "Microphone", "Internal Microphone",
+ "Microphone", "Headset Microphone";
+ simple-audio-card,routing =
+ "Headphones", "LHP",
+ "Headphones", "RHP",
+ "Speaker", "LSPK",
+ "Speaker", "RSPK",
+ "LMICP", "Headset Microphone",
+ "RMICP", "Internal Microphone";
+
+ simple-audio-card,cpu {
+ sound-dai = <&i2s0_8ch>;
+ };
+
+ masterdai: simple-audio-card,codec {
+ sound-dai = <&nau8822>;
+ };
+ };
+
+ vcc12v_dcin: vcc12v-dcin-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc12v_dcin";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ };
+
+ vcc1v8_sys: vcc1v8-sys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc1v8_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc3v3_sys>;
+ };
+
+ vcc3v3_pcie2x1l0: vcc3v3-pcie2x1l0-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_pcie2x1l0";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ startup-delay-us = <50000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc3v3_pcie2x1l2: vcc3v3-pcie2x1l2-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_pcie2x1l2";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ startup-delay-us = <5000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc3v3_pcie30: vcc3v3_pcie30-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_pcie30";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc3v3_sys: vcc3v3-sys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_sys: vcc5v0-sys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+};
+
+&gmac0 {
+ clock_in_out = "output";
+ phy-handle = <&rgmii_phy0>;
+ phy-mode = "rgmii-rxid";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac0_miim
+ &gmac0_tx_bus2
+ &gmac0_rx_bus2
+ &gmac0_rgmii_clk
+ &gmac0_rgmii_bus>;
+ tx_delay = <0x44>;
+ rx_delay = <0x00>;
+ status = "okay";
+};
+
+&gmac1 {
+ clock_in_out = "output";
+ phy-handle = <&rgmii_phy1>;
+ phy-mode = "rgmii-rxid";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac1_miim
+ &gmac1_tx_bus2
+ &gmac1_rx_bus2
+ &gmac1_rgmii_clk
+ &gmac1_rgmii_bus>;
+ tx_delay = <0x44>;
+ rx_delay = <0x00>;
+ status = "okay";
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu_s0>;
+ status = "okay";
+};
+
+&i2c2 {
+ status = "okay";
+
+ tca6424a: gpio@23 {
+ compatible = "ti,tca6424";
+ reg = <0x23>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-parent = <&gpio1>;
+ interrupts = <RK_PA4 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&tca6424a_int>;
+ vcc-supply = <&vcc3v3_sys>;
+ };
+};
+
+&i2c5 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c5m2_xfer>;
+
+ pcf8563: rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ };
+};
+
+&i2c7 {
+ status = "okay";
+
+ nau8822: audio-codec@1a {
+ compatible = "nuvoton,nau8822";
+ reg = <0x1a>;
+ clocks = <&cru I2S0_8CH_MCLKOUT>;
+ clock-names = "mclk";
+ assigned-clocks = <&cru I2S0_8CH_MCLKOUT>;
+ assigned-clock-rates = <12288000>;
+ #sound-dai-cells = <0>;
+ };
+};
+
+&i2s0_8ch {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s0_lrck
+ &i2s0_mclk
+ &i2s0_sclk
+ &i2s0_sdi0
+ &i2s0_sdo0>;
+ status = "okay";
+};
+
+&mdio0 {
+ rgmii_phy0: ethernet-phy@1 {
+ /* RTL8211F */
+ compatible = "ethernet-phy-id001c.c916",
+ "ethernet-phy-ieee802.3-c22";
+ reg = <0x1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&rtl8211f_0_rst>;
+ reset-assert-us = <20000>;
+ reset-deassert-us = <100000>;
+ reset-gpios = <&gpio0 RK_PB0 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&mdio1 {
+ rgmii_phy1: ethernet-phy@2 {
+ /* RTL8211F */
+ compatible = "ethernet-phy-id001c.c916",
+ "ethernet-phy-ieee802.3-c22";
+ reg = <0x2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&rtl8211f_1_rst>;
+ reset-assert-us = <20000>;
+ reset-deassert-us = <100000>;
+ reset-gpios = <&gpio1 RK_PB4 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&pcie2x1l0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie2_0_rst>;
+ reset-gpios = <&gpio4 RK_PA5 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie2x1l0>;
+ status = "okay";
+};
+
+&pcie2x1l2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie2_2_rst>;
+ reset-gpios = <&gpio3 RK_PD1 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie2x1l2>;
+ status = "okay";
+};
+
+&pcie30phy {
+ status = "okay";
+};
+
+&pcie3x4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie3_rst>;
+ reset-gpios = <&gpio4 RK_PB6 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie30>;
+ status = "okay";
+};
+
+&pinctrl {
+ pcie2 {
+ pcie2_0_rst: pcie2-0-rst {
+ rockchip,pins = <4 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie2_2_rst: pcie2-2-rst {
+ rockchip,pins = <3 RK_PD1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pcie3 {
+ pcie3_rst: pcie3-rst {
+ rockchip,pins = <4 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ rtl8211f {
+ rtl8211f_0_rst: rtl8211f-0-rst {
+ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ rtl8211f_1_rst: rtl8211f-1-rst {
+ rockchip,pins = <1 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ sound {
+ hp_detect: hp-detect {
+ rockchip,pins = <1 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ tca6424a {
+ tca6424a_int: tca6424a-int {
+ rockchip,pins = <1 RK_PA4 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+};
+
+&pwm2 {
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&avcc_1v8_s0>;
+ status = "okay";
+};
+
+&sdmmc {
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>;
+ disable-wp;
+ max-frequency = <150000000>;
+ no-sdio;
+ no-mmc;
+ sd-uhs-sdr104;
+ vqmmc-supply = <&vccio_sd_s0>;
+ status = "okay";
+};
+
+&u2phy2 {
+ status = "okay";
+};
+
+&u2phy2_host {
+ status = "okay";
+};
+
+&u2phy3 {
+ status = "okay";
+};
+
+&u2phy3_host {
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts
index 1b606ea5b6cf..1a604429fb26 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts
@@ -485,6 +485,7 @@
pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
<&rk806_dvs2_null>, <&rk806_dvs3_null>;
spi-max-frequency = <1000000>;
+ system-power-controller;
vcc1-supply = <&vcc5v0_sys>;
vcc2-supply = <&vcc5v0_sys>;
@@ -506,7 +507,7 @@
#gpio-cells = <2>;
rk806_dvs1_null: dvs1-null-pins {
- pins = "gpio_pwrctrl2";
+ pins = "gpio_pwrctrl1";
function = "pin_fun0";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts b/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts
index 67414d72e2b6..b4f22d95ac0e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts
@@ -13,7 +13,7 @@
#include "rk3588.dtsi"
/ {
- model = "PINE64 QuartzPro64";
+ model = "Pine64 QuartzPro64";
compatible = "pine64,quartzpro64", "rockchip,rk3588";
aliases {
@@ -285,6 +285,12 @@
status = "okay";
};
+&gpu {
+ mali-supply = <&vdd_gpu_s0>;
+ sram-supply = <&vdd_gpu_mem_s0>;
+ status = "okay";
+};
+
&i2c2 {
status = "okay";
@@ -456,6 +462,7 @@
<&rk806_dvs2_null>, <&rk806_dvs3_null>;
pinctrl-names = "default";
spi-max-frequency = <1000000>;
+ system-power-controller;
vcc1-supply = <&vcc4v0_sys>;
vcc2-supply = <&vcc4v0_sys>;
@@ -491,11 +498,15 @@
regulators {
vdd_gpu_s0: dcdc-reg1 {
regulator-name = "vdd_gpu_s0";
+ /* regulator coupling requires always-on */
+ regulator-always-on;
regulator-boot-on;
regulator-enable-ramp-delay = <400>;
regulator-min-microvolt = <550000>;
regulator-max-microvolt = <950000>;
regulator-ramp-delay = <12500>;
+ regulator-coupled-with = <&vdd_gpu_mem_s0>;
+ regulator-coupled-max-spread = <10000>;
regulator-state-mem {
regulator-off-in-suspend;
@@ -545,11 +556,15 @@
vdd_gpu_mem_s0: dcdc-reg5 {
regulator-name = "vdd_gpu_mem_s0";
+ /* regulator coupling requires always-on */
+ regulator-always-on;
regulator-boot-on;
regulator-enable-ramp-delay = <400>;
regulator-min-microvolt = <675000>;
regulator-max-microvolt = <950000>;
regulator-ramp-delay = <12500>;
+ regulator-coupled-with = <&vdd_gpu_s0>;
+ regulator-coupled-max-spread = <10000>;
regulator-state-mem {
regulator-off-in-suspend;
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts
index 1fe8b2a0ed75..b8e15b76a8a6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts
@@ -7,7 +7,7 @@
#include "rk3588.dtsi"
/ {
- model = "Radxa ROCK 5 Model B";
+ model = "Radxa ROCK 5B";
compatible = "radxa,rock-5b", "rockchip,rk3588";
aliases {
@@ -180,6 +180,11 @@
cpu-supply = <&vdd_cpu_lit_s0>;
};
+&gpu {
+ mali-supply = <&vdd_gpu_s0>;
+ status = "okay";
+};
+
&i2c0 {
pinctrl-names = "default";
pinctrl-0 = <&i2c0m2_xfer>;
@@ -742,6 +747,14 @@
status = "okay";
};
+&u2phy1 {
+ status = "okay";
+};
+
+&u2phy1_otg {
+ status = "okay";
+};
+
&u2phy2 {
status = "okay";
};
@@ -761,6 +774,10 @@
status = "okay";
};
+&usbdp_phy1 {
+ status = "okay";
+};
+
&usb_host0_ehci {
status = "okay";
};
@@ -777,6 +794,11 @@
status = "okay";
};
+&usb_host1_xhci {
+ dr_mode = "host";
+ status = "okay";
+};
+
&usb_host2_xhci {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-tiger-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3588-tiger-haikou.dts
index d672198c6b64..e4b7a0a4444b 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-tiger-haikou.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-tiger-haikou.dts
@@ -113,6 +113,16 @@
vin-supply = <&dc_12v>;
};
+ vcc5v0_otg: vcc5v0-otg-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio1 RK_PB5 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&otg_vbus_drv>;
+ regulator-name = "vcc5v0_otg";
+ regulator-always-on;
+ };
+
vcc5v0_usb: vcc5v0-usb-regulator {
compatible = "regulator-fixed";
regulator-name = "vcc5v0_usb";
@@ -137,6 +147,10 @@
status = "okay";
};
+&extcon_usb3 {
+ status = "okay";
+};
+
&gmac0 {
status = "okay";
};
@@ -199,6 +213,13 @@
<3 RK_PD5 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
+
+ usb2 {
+ otg_vbus_drv: otg-vbus-drv {
+ rockchip,pins =
+ <1 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
};
&sdmmc {
@@ -214,6 +235,23 @@
status = "okay";
};
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy0_otg {
+ phy-supply = <&vcc5v0_otg>;
+ status = "okay";
+};
+
+&u2phy1 {
+ status = "okay";
+};
+
+&u2phy1_otg {
+ status = "okay";
+};
+
&u2phy2 {
status = "okay";
};
@@ -231,25 +269,38 @@
};
&uart2 {
- pinctrl-0 = <&uart2m2_xfer>;
status = "okay";
};
&uart5 {
rts-gpios = <&gpio3 RK_PB3 GPIO_ACTIVE_HIGH>;
+};
+
+&usbdp_phy0 {
status = "okay";
};
-/* host0 on Q7_USB_P2, lower usb3 port */
+&usbdp_phy1 {
+ status = "okay";
+};
+
+/* host0 on Q7_USB_P2, upper usb3 port */
&usb_host0_ehci {
status = "okay";
};
-/* host0 on Q7_USB_P2, lower usb3 port */
+/* host0 on Q7_USB_P2, upper usb3 port */
&usb_host0_ohci {
status = "okay";
};
+/* host0_xhci on Q7_USB_P1, usb3-otg port */
+&usb_host0_xhci {
+ dr_mode = "otg";
+ extcon = <&extcon_usb3>;
+ status = "okay";
+};
+
/* host1 on Q7_USB_P3, usb2 port */
&usb_host1_ehci {
status = "okay";
@@ -260,7 +311,13 @@
status = "okay";
};
-/* host2 on Q7_USB_P2, lower usb3 port */
+/* host1_xhci on Q7_USB_P0, lower usb3 port */
+&usb_host1_xhci {
+ dr_mode = "host";
+ status = "okay";
+};
+
+/* host2 on Q7_USB_P2, upper usb3 port */
&usb_host2_xhci {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi
index 1eb2543a5fde..aebe1fedd2d8 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi
@@ -23,6 +23,14 @@
reset-gpios = <&gpio2 RK_PA3 GPIO_ACTIVE_HIGH>;
};
+ extcon_usb3: extcon-usb3 {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpios = <&gpio3 RK_PC0 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb3_id>;
+ status = "disabled";
+ };
+
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
@@ -46,7 +54,7 @@
pcie_refclk_gen: pcie-refclk-gen-clock {
compatible = "fixed-clock";
#clock-cells = <0>;
- clock-frequency = <1000000000>;
+ clock-frequency = <100000000>;
};
pcie_refclk: pcie-refclk-clock {
@@ -139,6 +147,11 @@
snps,reset-delays-us = <0 10000 100000>;
};
+&gpu {
+ mali-supply = <&vdd_gpu_s0>;
+ status = "okay";
+};
+
&i2c1 {
pinctrl-0 = <&i2c1m0_xfer>;
};
@@ -322,6 +335,13 @@
rockchip,pins = <1 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
+
+ usb3 {
+ usb3_id: usb3-id {
+ rockchip,pins =
+ <3 RK_PC0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
};
&saradc {
@@ -396,7 +416,7 @@
vcca-supply = <&vcc5v0_sys>;
rk806_dvs1_null: dvs1-null-pins {
- pins = "gpio_pwrctrl2";
+ pins = "gpio_pwrctrl1";
function = "pin_fun0";
};
@@ -683,6 +703,11 @@
status = "okay";
};
+/* Routed to UART0 on the Q7 connector */
+&uart2 {
+ pinctrl-0 = <&uart2m2_xfer>;
+};
+
/* Mule-ATtiny UPDI */
&uart4 {
pinctrl-0 = <&uart4m2_xfer>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi
index dc08da518a76..6b9206ce4a03 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi
@@ -318,7 +318,7 @@
#gpio-cells = <2>;
rk806_dvs1_null: dvs1-null-pins {
- pins = "gpio_pwrctrl2";
+ pins = "gpio_pwrctrl1";
function = "pin_fun0";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588.dtsi b/arch/arm64/boot/dts/rockchip/rk3588.dtsi
index 5519c1430cb7..5984016b5f96 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588.dtsi
@@ -7,6 +7,26 @@
#include "rk3588-pinctrl.dtsi"
/ {
+ usb_host1_xhci: usb@fc400000 {
+ compatible = "rockchip,rk3588-dwc3", "snps,dwc3";
+ reg = <0x0 0xfc400000 0x0 0x400000>;
+ interrupts = <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru REF_CLK_USB3OTG1>, <&cru SUSPEND_CLK_USB3OTG1>,
+ <&cru ACLK_USB3OTG1>;
+ clock-names = "ref_clk", "suspend_clk", "bus_clk";
+ dr_mode = "otg";
+ phys = <&u2phy1_otg>, <&usbdp_phy1 PHY_TYPE_USB3>;
+ phy-names = "usb2-phy", "usb3-phy";
+ phy_type = "utmi_wide";
+ power-domains = <&power RK3588_PD_USB>;
+ resets = <&cru SRST_A_USB3OTG1>;
+ snps,dis_enblslpm_quirk;
+ snps,dis-u2-freeclk-exists-quirk;
+ snps,dis-del-phy-power-chg-quirk;
+ snps,dis-tx-ipgap-linecheck-quirk;
+ status = "disabled";
+ };
+
pcie30_phy_grf: syscon@fd5b8000 {
compatible = "rockchip,rk3588-pcie3-phy-grf", "syscon";
reg = <0x0 0xfd5b8000 0x0 0x10000>;
@@ -17,6 +37,36 @@
reg = <0x0 0xfd5c0000 0x0 0x100>;
};
+ usbdpphy1_grf: syscon@fd5cc000 {
+ compatible = "rockchip,rk3588-usbdpphy-grf", "syscon";
+ reg = <0x0 0xfd5cc000 0x0 0x4000>;
+ };
+
+ usb2phy1_grf: syscon@fd5d4000 {
+ compatible = "rockchip,rk3588-usb2phy-grf", "syscon", "simple-mfd";
+ reg = <0x0 0xfd5d4000 0x0 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ u2phy1: usb2phy@4000 {
+ compatible = "rockchip,rk3588-usb2phy";
+ reg = <0x4000 0x10>;
+ #clock-cells = <0>;
+ clocks = <&cru CLK_USB2PHY_HDPTXRXPHY_REF>;
+ clock-names = "phyclk";
+ clock-output-names = "usb480m_phy1";
+ interrupts = <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH 0>;
+ resets = <&cru SRST_OTGPHY_U3_1>, <&cru SRST_P_USB2PHY_U3_1_GRF0>;
+ reset-names = "phy", "apb";
+ status = "disabled";
+
+ u2phy1_otg: otg-port {
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+ };
+ };
+
i2s8_8ch: i2s@fddc8000 {
compatible = "rockchip,rk3588-i2s-tdm";
reg = <0x0 0xfddc8000 0x0 0x1000>;
@@ -310,6 +360,28 @@
};
};
+ usbdp_phy1: phy@fed90000 {
+ compatible = "rockchip,rk3588-usbdp-phy";
+ reg = <0x0 0xfed90000 0x0 0x10000>;
+ #phy-cells = <1>;
+ clocks = <&cru CLK_USBDPPHY_MIPIDCPPHY_REF>,
+ <&cru CLK_USBDP_PHY1_IMMORTAL>,
+ <&cru PCLK_USBDPPHY1>,
+ <&u2phy1>;
+ clock-names = "refclk", "immortal", "pclk", "utmi";
+ resets = <&cru SRST_USBDP_COMBO_PHY1_INIT>,
+ <&cru SRST_USBDP_COMBO_PHY1_CMN>,
+ <&cru SRST_USBDP_COMBO_PHY1_LANE>,
+ <&cru SRST_USBDP_COMBO_PHY1_PCS>,
+ <&cru SRST_P_USBDPPHY1>;
+ reset-names = "init", "cmn", "lane", "pcs_apb", "pma_apb";
+ rockchip,u2phy-grf = <&usb2phy1_grf>;
+ rockchip,usb-grf = <&usb_grf>;
+ rockchip,usbdpphy-grf = <&usbdpphy1_grf>;
+ rockchip,vo-grf = <&vo0_grf>;
+ status = "disabled";
+ };
+
combphy1_ps: phy@fee10000 {
compatible = "rockchip,rk3588-naneng-combphy";
reg = <0x0 0xfee10000 0x0 0x100>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts b/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts
index e037bf9db75a..3b2ec1d0c542 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts
@@ -203,6 +203,11 @@
cpu-supply = <&vdd_cpu_big1_s0>;
};
+&gpu {
+ mali-supply = <&vdd_gpu_s0>;
+ status = "okay";
+};
+
&i2c0 {
pinctrl-0 = <&i2c0m2_xfer>;
status = "okay";
@@ -479,7 +484,7 @@
vcca-supply = <&vcc5v0_sys>;
rk806_dvs1_null: dvs1-null-pins {
- pins = "gpio_pwrctrl2";
+ pins = "gpio_pwrctrl1";
function = "pin_fun0";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts b/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts
index ce8119cbb824..d8c50fdcca3b 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts
@@ -316,7 +316,7 @@
pinctrl-names = "default";
vbus-supply = <&vbus5v0_typec>;
- connector {
+ usb_con: connector {
compatible = "usb-c-connector";
data-role = "dual";
label = "USB-C";
@@ -325,6 +325,32 @@
source-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)>;
sink-pdos = <PDO_FIXED(5000, 1000, PDO_FIXED_USB_COMM)>;
op-sink-microwatt = <1000000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ usbc0_orien_sw: endpoint {
+ remote-endpoint = <&usbdp_phy0_orientation_switch>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ usbc0_role_sw: endpoint {
+ remote-endpoint = <&dwc3_0_role_switch>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ dp_altmode_mux: endpoint {
+ remote-endpoint = <&usbdp_phy0_dp_altmode_mux>;
+ };
+ };
+ };
};
};
@@ -528,7 +554,7 @@
vcca-supply = <&vcc5v0_sys>;
rk806_dvs1_null: dvs1-null-pins {
- pins = "gpio_pwrctrl2";
+ pins = "gpio_pwrctrl1";
function = "pin_fun0";
};
@@ -788,6 +814,14 @@
status = "okay";
};
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy0_otg {
+ status = "okay";
+};
+
&u2phy2 {
status = "okay";
};
@@ -839,6 +873,17 @@
status = "okay";
};
+&usb_host0_xhci {
+ usb-role-switch;
+ status = "okay";
+
+ port {
+ dwc3_0_role_switch: endpoint {
+ remote-endpoint = <&usbc0_role_sw>;
+ };
+ };
+};
+
&usb_host1_ehci {
status = "okay";
};
@@ -850,3 +895,27 @@
&usb_host2_xhci {
status = "okay";
};
+
+&usbdp_phy0 {
+ orientation-switch;
+ mode-switch;
+ sbu1-dc-gpios = <&gpio4 RK_PA0 GPIO_ACTIVE_HIGH>;
+ sbu2-dc-gpios = <&gpio4 RK_PA1 GPIO_ACTIVE_HIGH>;
+ rockchip,dp-lane-mux = <2 3>;
+ status = "okay";
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ usbdp_phy0_orientation_switch: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&usbc0_orien_sw>;
+ };
+
+ usbdp_phy0_dp_altmode_mux: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&dp_altmode_mux>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-khadas-edge2.dts b/arch/arm64/boot/dts/rockchip/rk3588s-khadas-edge2.dts
index f53e993c785e..dbddfc3bb464 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-khadas-edge2.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-khadas-edge2.dts
@@ -3,7 +3,9 @@
/dts-v1/;
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/leds/common.h>
#include "rk3588s.dtsi"
/ {
@@ -12,11 +14,298 @@
aliases {
mmc0 = &sdhci;
+ mmc1 = &sdmmc;
};
chosen {
stdout-path = "serial2:1500000n8";
};
+
+ adc-keys {
+ compatible = "adc-keys";
+ io-channels = <&saradc 1>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1800000>;
+ poll-interval = <100>;
+
+ button-function {
+ label = "Function";
+ linux,code = <KEY_FN>;
+ press-threshold-microvolt = <17000>;
+ };
+ };
+
+ ir-receiver {
+ compatible = "gpio-ir-receiver";
+ gpios = <&gpio1 RK_PA7 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ir_receiver_pin>;
+ };
+
+ leds {
+ compatible = "pwm-leds";
+
+ red_led: led-0 {
+ label = "red_led";
+ color = <LED_COLOR_ID_RED>;
+ default-state = "off";
+ function = LED_FUNCTION_INDICATOR;
+ linux,default-trigger = "none";
+ max-brightness = <255>;
+ pwms = <&pwm11 0 25000 0>;
+ };
+
+ green_led: led-1 {
+ label = "green_led";
+ color = <LED_COLOR_ID_GREEN>;
+ default-state = "on";
+ function = LED_FUNCTION_POWER;
+ linux,default-trigger = "default-on";
+ max-brightness = <255>;
+ pwms = <&pwm14 0 25000 0>;
+ };
+
+ blue_led: led-2 {
+ label = "blue_led";
+ color = <LED_COLOR_ID_BLUE>;
+ default-state = "off";
+ function = LED_FUNCTION_INDICATOR;
+ linux,default-trigger = "none";
+ max-brightness = <255>;
+ pwms = <&pwm15 0 25000 0>;
+ };
+ };
+
+ vcc3v3_pcie_wl: vcc3v3-pcie-wl-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio0 RK_PC4 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie2_2_vcc3v3_en>;
+ regulator-name = "vcc3v3_pcie_wl";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ startup-delay-us = <5000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_host: vcc5v0-host-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_host";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ gpio = <&gpio1 RK_PB1 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_host_en>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_sys: vcc5v0-sys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vcc_1v1_nldo_s3: vcc-1v1-nldo-s3-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_1v1_nldo_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vdd_3v3_sd: vdd-3v3-sd-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_3v3_sd";
+ gpios = <&gpio1 RK_PB6 GPIO_ACTIVE_HIGH>;
+ regulator-boot-on;
+ enable-active-high;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_3v3_s3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vdd_sd_en>;
+ };
+};
+
+&cpu_b0 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b1 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b2 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&cpu_b3 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&cpu_l0 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l1 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l2 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l3 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&combphy0_ps {
+ status = "okay";
+};
+
+&combphy2_psu {
+ status = "okay";
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu_s0>;
+ status = "okay";
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0m2_xfer>;
+ status = "okay";
+
+ vdd_cpu_big0_s0: regulator@42 {
+ compatible = "rockchip,rk8602";
+ reg = <0x42>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu_big0_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_big1_s0: regulator@43 {
+ compatible = "rockchip,rk8603", "rockchip,rk8602";
+ reg = <0x43>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu_big1_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&i2c2 {
+ status = "okay";
+
+ hym8563: rtc@51 {
+ compatible = "haoyu,hym8563";
+ reg = <0x51>;
+ #clock-cells = <0>;
+ clock-output-names = "hym8563";
+ wakeup-source;
+ };
+};
+
+&pinctrl {
+ vdd_sd {
+ vdd_sd_en: vdd-sd-en {
+ rockchip,pins = <1 RK_PB6 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ pcie2 {
+ pcie2_2_rst: pcie2-2-rst {
+ rockchip,pins = <3 RK_PD1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie2_2_vcc3v3_en: pcie2-2-vcc-en {
+ rockchip,pins = <0 RK_PC4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb {
+ vcc5v0_host_en: vcc5v0-host-en {
+ rockchip,pins = <1 RK_PB1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ ir-receiver {
+ ir_receiver_pin: ir-receiver-pin {
+ rockchip,pins = <1 RK_PA7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ wireless-bluetooth {
+ bt_reset_pin: bt-reset-pin {
+ rockchip,pins = <0 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ bt_wake_pin: bt-wake-pin {
+ rockchip,pins = <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ bt_wake_host_irq: bt-wake-host-irq {
+ rockchip,pins = <0 RK_PD5 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+ };
+};
+
+&pcie2x1l2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie2_2_rst>;
+ reset-gpios = <&gpio3 RK_PD1 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie_wl>;
+ status = "okay";
+};
+
+&pwm11 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm11m1_pins>;
+ status = "okay";
+};
+
+&pwm14 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm14m1_pins>;
+ status = "okay";
+};
+
+&pwm15 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm15m1_pins>;
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&avcc_1v8_s0>;
+ status = "okay";
};
&sdhci {
@@ -29,7 +318,403 @@
status = "okay";
};
+&sdmmc {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ disable-wp;
+ no-mmc;
+ no-sdio;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vdd_3v3_sd>;
+ vqmmc-supply = <&vccio_sd_s0>;
+ status = "okay";
+};
+
+&sfc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&fspim2_pins>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0x0>;
+ spi-max-frequency = <100000000>;
+ spi-rx-bus-width = <4>;
+ spi-tx-bus-width = <1>;
+ };
+};
+
+&spi2 {
+ assigned-clocks = <&cru CLK_SPI2>;
+ assigned-clock-rates = <200000000>;
+ num-cs = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2m2_cs0 &spi2m2_pins>;
+ status = "okay";
+
+ pmic@0 {
+ compatible = "rockchip,rk806";
+ reg = <0x0>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
+ <&rk806_dvs2_null>, <&rk806_dvs3_null>;
+ spi-max-frequency = <1000000>;
+ system-power-controller;
+
+ vcc1-supply = <&vcc5v0_sys>;
+ vcc2-supply = <&vcc5v0_sys>;
+ vcc3-supply = <&vcc5v0_sys>;
+ vcc4-supply = <&vcc5v0_sys>;
+ vcc5-supply = <&vcc5v0_sys>;
+ vcc6-supply = <&vcc5v0_sys>;
+ vcc7-supply = <&vcc5v0_sys>;
+ vcc8-supply = <&vcc5v0_sys>;
+ vcc9-supply = <&vcc5v0_sys>;
+ vcc10-supply = <&vcc5v0_sys>;
+ vcc11-supply = <&vcc_2v0_pldo_s3>;
+ vcc12-supply = <&vcc5v0_sys>;
+ vcc13-supply = <&vcc_1v1_nldo_s3>;
+ vcc14-supply = <&vcc_1v1_nldo_s3>;
+ vcca-supply = <&vcc5v0_sys>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ rk806_dvs1_null: dvs1-null-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs2_null: dvs2-null-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs3_null: dvs3-null-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun0";
+ };
+
+ regulators {
+ vdd_gpu_s0: vdd_gpu_mem_s0: dcdc-reg1 {
+ regulator-boot-on;
+ regulator-enable-ramp-delay = <400>;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-name = "vdd_gpu_s0";
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_lit_s0: vdd_cpu_lit_mem_s0: dcdc-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-name = "vdd_cpu_lit_s0";
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_log_s0: dcdc-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdd_log_s0";
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_vdenc_s0: vdd_vdenc_mem_s0: dcdc-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-name = "vdd_vdenc_s0";
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_ddr_s0: dcdc-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <900000>;
+ regulator-name = "vdd_ddr_s0";
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ vdd2_ddr_s3: dcdc-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vdd2_ddr_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_2v0_pldo_s3: dcdc-reg7 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-name = "vdd_2v0_pldo_s3";
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <2000000>;
+ };
+ };
+
+ vcc_3v3_s3: dcdc-reg8 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_3v3_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vddq_ddr_s0: dcdc-reg9 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vddq_ddr_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s3: dcdc-reg10 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ avcc_1v8_s0: pldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "avcc_1v8_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s0: pldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ avdd_1v2_s0: pldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "avdd_1v2_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v3_s0: pldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vcc_3v3_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd_s0: pldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vccio_sd_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ pldo6_s3: pldo-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "pldo6_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_0v75_s3: nldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdd_0v75_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_ddr_pll_s0: nldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "vdd_ddr_pll_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ avdd_0v75_s0: nldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "avdd_0v75_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_0v85_s0: nldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "vdd_0v85_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_0v75_s0: nldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdd_0v75_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+&tsadc {
+ status = "okay";
+};
+
&uart2 {
pinctrl-0 = <&uart2m0_xfer>;
status = "okay";
};
+
+&uart9 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart9m2_xfer &uart9m2_ctsn>;
+ status = "okay";
+};
+
+&u2phy2 {
+ status = "okay";
+};
+
+&u2phy2_host {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+};
+
+&u2phy3 {
+ status = "okay";
+};
+
+&u2phy3_host {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
+
+&usb_host2_xhci {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-orangepi-5.dts b/arch/arm64/boot/dts/rockchip/rk3588s-orangepi-5.dts
index 25de4362af38..feea6b20a6bf 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-orangepi-5.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-orangepi-5.dts
@@ -6,6 +6,7 @@
#include <dt-bindings/leds/common.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/usb/pd.h>
#include "rk3588s.dtsi"
/ {
@@ -146,6 +147,11 @@
status = "okay";
};
+&gpu {
+ mali-supply = <&vdd_gpu_s0>;
+ status = "okay";
+};
+
&i2c0 {
pinctrl-names = "default";
pinctrl-0 = <&i2c0m2_xfer>;
@@ -212,6 +218,56 @@
pinctrl-0 = <&i2c6m3_xfer>;
status = "okay";
+ usbc0: usb-typec@22 {
+ compatible = "fcs,fusb302";
+ reg = <0x22>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PD3 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usbc0_int>;
+ vbus-supply = <&vbus_typec>;
+ status = "okay";
+
+ usb_con: connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ data-role = "dual";
+ op-sink-microwatt = <1000000>;
+ power-role = "dual";
+ sink-pdos =
+ <PDO_FIXED(5000, 1000, PDO_FIXED_USB_COMM)>;
+ source-pdos =
+ <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)>;
+ try-power-role = "source";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ usbc0_hs: endpoint {
+ remote-endpoint = <&usb_host0_xhci_drd_sw>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ usbc0_ss: endpoint {
+ remote-endpoint = <&usbdp_phy0_typec_ss>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ usbc0_sbu: endpoint {
+ remote-endpoint = <&usbdp_phy0_typec_sbu>;
+ };
+ };
+ };
+ };
+ };
+
hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
@@ -336,7 +392,7 @@
#gpio-cells = <2>;
rk806_dvs1_null: dvs1-null-pins {
- pins = "gpio_pwrctrl2";
+ pins = "gpio_pwrctrl1";
function = "pin_fun0";
};
@@ -625,6 +681,14 @@
status = "okay";
};
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy0_otg {
+ status = "okay";
+};
+
&u2phy2 {
status = "okay";
};
@@ -646,6 +710,29 @@
status = "okay";
};
+&usbdp_phy0 {
+ mode-switch;
+ orientation-switch;
+ sbu1-dc-gpios = <&gpio4 RK_PA5 GPIO_ACTIVE_HIGH>;
+ sbu2-dc-gpios = <&gpio4 RK_PA7 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ usbdp_phy0_typec_ss: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&usbc0_ss>;
+ };
+
+ usbdp_phy0_typec_sbu: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&usbc0_sbu>;
+ };
+ };
+};
+
&usb_host0_ehci {
status = "okay";
};
@@ -654,6 +741,18 @@
status = "okay";
};
+&usb_host0_xhci {
+ dr_mode = "otg";
+ usb-role-switch;
+ status = "okay";
+
+ port {
+ usb_host0_xhci_drd_sw: endpoint {
+ remote-endpoint = <&usbc0_hs>;
+ };
+ };
+};
+
&usb_host1_ehci {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts b/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts
index 00afb90d4eb1..8e2a07612d17 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts
@@ -8,7 +8,7 @@
#include "rk3588s.dtsi"
/ {
- model = "Radxa ROCK 5 Model A";
+ model = "Radxa ROCK 5A";
compatible = "radxa,rock-5a", "rockchip,rk3588s";
aliases {
@@ -414,7 +414,7 @@
#gpio-cells = <2>;
rk806_dvs1_null: dvs1-null-pins {
- pins = "gpio_pwrctrl2";
+ pins = "gpio_pwrctrl1";
function = "pin_fun0";
};
@@ -697,6 +697,14 @@
};
};
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy0_otg {
+ status = "okay";
+};
+
&u2phy2 {
status = "okay";
};
@@ -720,6 +728,11 @@
status = "okay";
};
+&usbdp_phy0 {
+ status = "okay";
+ rockchip,dp-lane-mux = <2 3>;
+};
+
&usb_host0_ehci {
status = "okay";
pinctrl-names = "default";
@@ -730,6 +743,11 @@
status = "okay";
};
+&usb_host0_xhci {
+ dr_mode = "host";
+ status = "okay";
+};
+
&usb_host1_ehci {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s.dtsi b/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
index 87b83c87bd55..6ac5ac8b48ab 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
@@ -347,6 +347,11 @@
};
};
+ display_subsystem: display-subsystem {
+ compatible = "rockchip,display-subsystem";
+ ports = <&vop_out>;
+ };
+
firmware {
optee: optee {
compatible = "linaro,optee-tz";
@@ -394,11 +399,6 @@
#clock-cells = <0>;
};
- display_subsystem: display-subsystem {
- compatible = "rockchip,display-subsystem";
- ports = <&vop_out>;
- };
-
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH 0>,
@@ -436,6 +436,84 @@
};
};
+ gpu: gpu@fb000000 {
+ compatible = "rockchip,rk3588-mali", "arm,mali-valhall-csf";
+ reg = <0x0 0xfb000000 0x0 0x200000>;
+ #cooling-cells = <2>;
+ assigned-clocks = <&scmi_clk SCMI_CLK_GPU>;
+ assigned-clock-rates = <200000000>;
+ clocks = <&cru CLK_GPU>, <&cru CLK_GPU_COREGROUP>,
+ <&cru CLK_GPU_STACKS>;
+ clock-names = "core", "coregroup", "stacks";
+ dynamic-power-coefficient = <2982>;
+ interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "job", "mmu", "gpu";
+ operating-points-v2 = <&gpu_opp_table>;
+ power-domains = <&power RK3588_PD_GPU>;
+ status = "disabled";
+
+ gpu_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-microvolt = <675000 675000 850000>;
+ };
+ opp-400000000 {
+ opp-hz = /bits/ 64 <400000000>;
+ opp-microvolt = <675000 675000 850000>;
+ };
+ opp-500000000 {
+ opp-hz = /bits/ 64 <500000000>;
+ opp-microvolt = <675000 675000 850000>;
+ };
+ opp-600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <675000 675000 850000>;
+ };
+ opp-700000000 {
+ opp-hz = /bits/ 64 <700000000>;
+ opp-microvolt = <700000 700000 850000>;
+ };
+ opp-800000000 {
+ opp-hz = /bits/ 64 <800000000>;
+ opp-microvolt = <750000 750000 850000>;
+ };
+ opp-900000000 {
+ opp-hz = /bits/ 64 <900000000>;
+ opp-microvolt = <800000 800000 850000>;
+ };
+ opp-1000000000 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = <850000 850000 850000>;
+ };
+ };
+ };
+
+ usb_host0_xhci: usb@fc000000 {
+ compatible = "rockchip,rk3588-dwc3", "snps,dwc3";
+ reg = <0x0 0xfc000000 0x0 0x400000>;
+ interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru REF_CLK_USB3OTG0>, <&cru SUSPEND_CLK_USB3OTG0>,
+ <&cru ACLK_USB3OTG0>;
+ clock-names = "ref_clk", "suspend_clk", "bus_clk";
+ dr_mode = "otg";
+ phys = <&u2phy0_otg>, <&usbdp_phy0 PHY_TYPE_USB3>;
+ phy-names = "usb2-phy", "usb3-phy";
+ phy_type = "utmi_wide";
+ power-domains = <&power RK3588_PD_USB>;
+ resets = <&cru SRST_A_USB3OTG0>;
+ snps,dis_enblslpm_quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
+ snps,dis-u2-freeclk-exists-quirk;
+ snps,dis-del-phy-power-chg-quirk;
+ snps,dis-tx-ipgap-linecheck-quirk;
+ status = "disabled";
+ };
+
usb_host0_ehci: usb@fc800000 {
compatible = "rockchip,rk3588-ehci", "generic-ehci";
reg = <0x0 0xfc800000 0x0 0x40000>;
@@ -501,6 +579,30 @@
status = "disabled";
};
+ mmu600_pcie: iommu@fc900000 {
+ compatible = "arm,smmu-v3";
+ reg = <0x0 0xfc900000 0x0 0x200000>;
+ interrupts = <GIC_SPI 369 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 367 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
+
+ mmu600_php: iommu@fcb00000 {
+ compatible = "arm,smmu-v3";
+ reg = <0x0 0xfcb00000 0x0 0x200000>;
+ interrupts = <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 386 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
+
pmu1grf: syscon@fd58a000 {
compatible = "rockchip,rk3588-pmugrf", "syscon", "simple-mfd";
reg = <0x0 0xfd58a000 0x0 0x10000>;
@@ -516,12 +618,23 @@
reg = <0x0 0xfd5a4000 0x0 0x2000>;
};
+ vo0_grf: syscon@fd5a6000 {
+ compatible = "rockchip,rk3588-vo-grf", "syscon";
+ reg = <0x0 0xfd5a6000 0x0 0x2000>;
+ clocks = <&cru PCLK_VO0GRF>;
+ };
+
vo1_grf: syscon@fd5a8000 {
compatible = "rockchip,rk3588-vo-grf", "syscon";
reg = <0x0 0xfd5a8000 0x0 0x100>;
clocks = <&cru PCLK_VO1GRF>;
};
+ usb_grf: syscon@fd5ac000 {
+ compatible = "rockchip,rk3588-usb-grf", "syscon";
+ reg = <0x0 0xfd5ac000 0x0 0x4000>;
+ };
+
php_grf: syscon@fd5b0000 {
compatible = "rockchip,rk3588-php-grf", "syscon";
reg = <0x0 0xfd5b0000 0x0 0x1000>;
@@ -537,22 +650,52 @@
reg = <0x0 0xfd5c4000 0x0 0x100>;
};
+ usbdpphy0_grf: syscon@fd5c8000 {
+ compatible = "rockchip,rk3588-usbdpphy-grf", "syscon";
+ reg = <0x0 0xfd5c8000 0x0 0x4000>;
+ };
+
+ usb2phy0_grf: syscon@fd5d0000 {
+ compatible = "rockchip,rk3588-usb2phy-grf", "syscon", "simple-mfd";
+ reg = <0x0 0xfd5d0000 0x0 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ u2phy0: usb2phy@0 {
+ compatible = "rockchip,rk3588-usb2phy";
+ reg = <0x0 0x10>;
+ #clock-cells = <0>;
+ clocks = <&cru CLK_USB2PHY_HDPTXRXPHY_REF>;
+ clock-names = "phyclk";
+ clock-output-names = "usb480m_phy0";
+ interrupts = <GIC_SPI 393 IRQ_TYPE_LEVEL_HIGH 0>;
+ resets = <&cru SRST_OTGPHY_U3_0>, <&cru SRST_P_USB2PHY_U3_0_GRF0>;
+ reset-names = "phy", "apb";
+ status = "disabled";
+
+ u2phy0_otg: otg-port {
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+ };
+ };
+
usb2phy2_grf: syscon@fd5d8000 {
compatible = "rockchip,rk3588-usb2phy-grf", "syscon", "simple-mfd";
reg = <0x0 0xfd5d8000 0x0 0x4000>;
#address-cells = <1>;
#size-cells = <1>;
- u2phy2: usb2-phy@8000 {
+ u2phy2: usb2phy@8000 {
compatible = "rockchip,rk3588-usb2phy";
reg = <0x8000 0x10>;
- interrupts = <GIC_SPI 391 IRQ_TYPE_LEVEL_HIGH 0>;
- resets = <&cru SRST_OTGPHY_U2_0>, <&cru SRST_P_USB2PHY_U2_0_GRF0>;
- reset-names = "phy", "apb";
+ #clock-cells = <0>;
clocks = <&cru CLK_USB2PHY_HDPTXRXPHY_REF>;
clock-names = "phyclk";
clock-output-names = "usb480m_phy2";
- #clock-cells = <0>;
+ interrupts = <GIC_SPI 391 IRQ_TYPE_LEVEL_HIGH 0>;
+ resets = <&cru SRST_OTGPHY_U2_0>, <&cru SRST_P_USB2PHY_U2_0_GRF0>;
+ reset-names = "phy", "apb";
status = "disabled";
u2phy2_host: host-port {
@@ -568,16 +711,16 @@
#address-cells = <1>;
#size-cells = <1>;
- u2phy3: usb2-phy@c000 {
+ u2phy3: usb2phy@c000 {
compatible = "rockchip,rk3588-usb2phy";
reg = <0xc000 0x10>;
- interrupts = <GIC_SPI 392 IRQ_TYPE_LEVEL_HIGH 0>;
- resets = <&cru SRST_OTGPHY_U2_1>, <&cru SRST_P_USB2PHY_U2_1_GRF0>;
- reset-names = "phy", "apb";
+ #clock-cells = <0>;
clocks = <&cru CLK_USB2PHY_HDPTXRXPHY_REF>;
clock-names = "phyclk";
clock-output-names = "usb480m_phy3";
- #clock-cells = <0>;
+ interrupts = <GIC_SPI 392 IRQ_TYPE_LEVEL_HIGH 0>;
+ resets = <&cru SRST_OTGPHY_U2_1>, <&cru SRST_P_USB2PHY_U2_1_GRF0>;
+ reset-names = "phy", "apb";
status = "disabled";
u2phy3_host: host-port {
@@ -646,74 +789,6 @@
status = "disabled";
};
- vop: vop@fdd90000 {
- compatible = "rockchip,rk3588-vop";
- reg = <0x0 0xfdd90000 0x0 0x4200>, <0x0 0xfdd95000 0x0 0x1000>;
- reg-names = "vop", "gamma-lut";
- interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru ACLK_VOP>,
- <&cru HCLK_VOP>,
- <&cru DCLK_VOP0>,
- <&cru DCLK_VOP1>,
- <&cru DCLK_VOP2>,
- <&cru DCLK_VOP3>,
- <&cru PCLK_VOP_ROOT>;
- clock-names = "aclk",
- "hclk",
- "dclk_vp0",
- "dclk_vp1",
- "dclk_vp2",
- "dclk_vp3",
- "pclk_vop";
- iommus = <&vop_mmu>;
- power-domains = <&power RK3588_PD_VOP>;
- rockchip,grf = <&sys_grf>;
- rockchip,vop-grf = <&vop_grf>;
- rockchip,vo1-grf = <&vo1_grf>;
- rockchip,pmu = <&pmu>;
- status = "disabled";
-
- vop_out: ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- vp0: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
- };
-
- vp1: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
- };
-
- vp2: port@2 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <2>;
- };
-
- vp3: port@3 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <3>;
- };
- };
- };
-
- vop_mmu: iommu@fdd97e00 {
- compatible = "rockchip,rk3588-iommu", "rockchip,rk3568-iommu";
- reg = <0x0 0xfdd97e00 0x0 0x100>, <0x0 0xfdd97f00 0x0 0x100>;
- interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru ACLK_VOP>, <&cru HCLK_VOP>;
- clock-names = "aclk", "iface";
- #iommu-cells = <0>;
- power-domains = <&power RK3588_PD_VOP>;
- status = "disabled";
- };
-
uart0: serial@fd890000 {
compatible = "rockchip,rk3588-uart", "snps,dw-apb-uart";
reg = <0x0 0xfd890000 0x0 0x100>;
@@ -1084,6 +1159,87 @@
};
};
+ av1d: video-codec@fdc70000 {
+ compatible = "rockchip,rk3588-av1-vpu";
+ reg = <0x0 0xfdc70000 0x0 0x800>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "vdpu";
+ assigned-clocks = <&cru ACLK_AV1>, <&cru PCLK_AV1>;
+ assigned-clock-rates = <400000000>, <400000000>;
+ clocks = <&cru ACLK_AV1>, <&cru PCLK_AV1>;
+ clock-names = "aclk", "hclk";
+ power-domains = <&power RK3588_PD_AV1>;
+ resets = <&cru SRST_A_AV1>, <&cru SRST_P_AV1>, <&cru SRST_A_AV1_BIU>, <&cru SRST_P_AV1_BIU>;
+ };
+
+ vop: vop@fdd90000 {
+ compatible = "rockchip,rk3588-vop";
+ reg = <0x0 0xfdd90000 0x0 0x4200>, <0x0 0xfdd95000 0x0 0x1000>;
+ reg-names = "vop", "gamma-lut";
+ interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_VOP>,
+ <&cru HCLK_VOP>,
+ <&cru DCLK_VOP0>,
+ <&cru DCLK_VOP1>,
+ <&cru DCLK_VOP2>,
+ <&cru DCLK_VOP3>,
+ <&cru PCLK_VOP_ROOT>;
+ clock-names = "aclk",
+ "hclk",
+ "dclk_vp0",
+ "dclk_vp1",
+ "dclk_vp2",
+ "dclk_vp3",
+ "pclk_vop";
+ iommus = <&vop_mmu>;
+ power-domains = <&power RK3588_PD_VOP>;
+ rockchip,grf = <&sys_grf>;
+ rockchip,vop-grf = <&vop_grf>;
+ rockchip,vo1-grf = <&vo1_grf>;
+ rockchip,pmu = <&pmu>;
+ status = "disabled";
+
+ vop_out: ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vp0: port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ vp1: port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+
+ vp2: port@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ vp3: port@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ };
+ };
+
+ vop_mmu: iommu@fdd97e00 {
+ compatible = "rockchip,rk3588-iommu", "rockchip,rk3568-iommu";
+ reg = <0x0 0xfdd97e00 0x0 0x100>, <0x0 0xfdd97f00 0x0 0x100>;
+ interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_VOP>, <&cru HCLK_VOP>;
+ clock-names = "aclk", "iface";
+ #iommu-cells = <0>;
+ power-domains = <&power RK3588_PD_VOP>;
+ status = "disabled";
+ };
+
i2s4_8ch: i2s@fddc0000 {
compatible = "rockchip,rk3588-i2s-tdm";
reg = <0x0 0xfddc0000 0x0 0x1000>;
@@ -1375,6 +1531,16 @@
reg = <0x0 0xfdf82200 0x0 0x20>;
};
+ dfi: dfi@fe060000 {
+ reg = <0x00 0xfe060000 0x00 0x10000>;
+ compatible = "rockchip,rk3588-dfi";
+ interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH 0>;
+ rockchip,pmu = <&pmu1grf>;
+ };
+
pcie2x1l1: pcie@fe180000 {
compatible = "rockchip,rk3588-pcie", "rockchip,rk3568-pcie";
bus-range = <0x30 0x3f>;
@@ -1477,16 +1643,6 @@
};
};
- dfi: dfi@fe060000 {
- reg = <0x00 0xfe060000 0x00 0x10000>;
- compatible = "rockchip,rk3588-dfi";
- interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH 0>;
- rockchip,pmu = <&pmu1grf>;
- };
-
gmac1: ethernet@fe1c0000 {
compatible = "rockchip,rk3588-gmac", "snps,dwmac-4.20a";
reg = <0x0 0xfe1c0000 0x0 0x10000>;
@@ -2380,6 +2536,28 @@
status = "disabled";
};
+ usbdp_phy0: phy@fed80000 {
+ compatible = "rockchip,rk3588-usbdp-phy";
+ reg = <0x0 0xfed80000 0x0 0x10000>;
+ #phy-cells = <1>;
+ clocks = <&cru CLK_USBDPPHY_MIPIDCPPHY_REF>,
+ <&cru CLK_USBDP_PHY0_IMMORTAL>,
+ <&cru PCLK_USBDPPHY0>,
+ <&u2phy0>;
+ clock-names = "refclk", "immortal", "pclk", "utmi";
+ resets = <&cru SRST_USBDP_COMBO_PHY0_INIT>,
+ <&cru SRST_USBDP_COMBO_PHY0_CMN>,
+ <&cru SRST_USBDP_COMBO_PHY0_LANE>,
+ <&cru SRST_USBDP_COMBO_PHY0_PCS>,
+ <&cru SRST_P_USBDPPHY0>;
+ reset-names = "init", "cmn", "lane", "pcs_apb", "pma_apb";
+ rockchip,u2phy-grf = <&usb2phy0_grf>;
+ rockchip,usb-grf = <&usb_grf>;
+ rockchip,usbdpphy-grf = <&usbdpphy0_grf>;
+ rockchip,vo-grf = <&vo0_grf>;
+ status = "disabled";
+ };
+
combphy0_ps: phy@fee00000 {
compatible = "rockchip,rk3588-naneng-combphy";
reg = <0x0 0xfee00000 0x0 0x100>;
@@ -2487,19 +2665,6 @@
#interrupt-cells = <2>;
};
};
-
- av1d: video-codec@fdc70000 {
- compatible = "rockchip,rk3588-av1-vpu";
- reg = <0x0 0xfdc70000 0x0 0x800>;
- interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "vdpu";
- assigned-clocks = <&cru ACLK_AV1>, <&cru PCLK_AV1>;
- assigned-clock-rates = <400000000>, <400000000>;
- clocks = <&cru ACLK_AV1>, <&cru PCLK_AV1>;
- clock-names = "aclk", "hclk";
- power-domains = <&power RK3588_PD_AV1>;
- resets = <&cru SRST_A_AV1>, <&cru SRST_P_AV1>, <&cru SRST_A_AV1_BIU>, <&cru SRST_P_AV1_BIU>;
- };
};
#include "rk3588s-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts b/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts
index da44a15a8adf..a251c4343548 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts
@@ -111,7 +111,7 @@
&i2c0 {
status = "okay";
- tas5707a@1d {
+ audio-codec@1d {
compatible = "ti,tas5711";
reg = <0x1d>;
reset-gpios = <&gpio UNIPHIER_GPIO_PORT(23, 4) GPIO_ACTIVE_LOW>;
@@ -124,7 +124,7 @@
PVDD_C-supply = <&amp_vcc_reg>;
PVDD_D-supply = <&amp_vcc_reg>;
- port@0 {
+ port {
tas_speaker: endpoint {
dai-format = "i2s";
remote-endpoint = <&i2s_hpcmout1>;
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts b/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts
index a01579cb3b79..79f6db2455c1 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts
@@ -111,7 +111,7 @@
&i2c0 {
status = "okay";
- tas5707@1b {
+ audio-codec@1b {
compatible = "ti,tas5711";
reg = <0x1b>;
reset-gpios = <&gpio UNIPHIER_GPIO_PORT(0, 0) GPIO_ACTIVE_LOW>;
@@ -124,7 +124,7 @@
PVDD_C-supply = <&amp_vcc_reg>;
PVDD_D-supply = <&amp_vcc_reg>;
- port@0 {
+ port {
tas_speaker: endpoint {
dai-format = "i2s";
remote-endpoint = <&i2s_hpcmout1>;
diff --git a/arch/arm64/boot/dts/sprd/sc9860.dtsi b/arch/arm64/boot/dts/sprd/sc9860.dtsi
index e27eb3ed1d47..31952d361a8a 100644
--- a/arch/arm64/boot/dts/sprd/sc9860.dtsi
+++ b/arch/arm64/boot/dts/sprd/sc9860.dtsi
@@ -113,7 +113,7 @@
};
};
- idle-states{
+ idle-states {
entry-method = "psci";
CORE_PD: core_pd {
@@ -135,18 +135,6 @@
};
};
- gic: interrupt-controller@12001000 {
- compatible = "arm,gic-400";
- reg = <0 0x12001000 0 0x1000>,
- <0 0x12002000 0 0x2000>,
- <0 0x12004000 0 0x2000>,
- <0 0x12006000 0 0x2000>;
- #interrupt-cells = <3>;
- interrupt-controller;
- interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(8)
- | IRQ_TYPE_LEVEL_HIGH)>;
- };
-
psci {
compatible = "arm,psci-0.2";
method = "smc";
@@ -165,7 +153,7 @@
};
pmu {
- compatible = "arm,cortex-a53-pmu", "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a53-pmu";
interrupts = <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
@@ -185,6 +173,18 @@
};
soc {
+ gic: interrupt-controller@12001000 {
+ compatible = "arm,gic-400";
+ reg = <0 0x12001000 0 0x1000>,
+ <0 0x12002000 0 0x2000>,
+ <0 0x12004000 0 0x2000>,
+ <0 0x12006000 0 0x2000>;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(8)
+ | IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
pmu_gate: pmu-gate {
compatible = "sprd,sc9860-pmu-gate";
sprd,syscon = <&pmu_regs>; /* 0x402b0000 */
@@ -207,7 +207,7 @@
#clock-cells = <1>;
};
- aon_prediv: aon-prediv {
+ aon_prediv: aon-prediv@402d0000 {
compatible = "sprd,sc9860-aon-prediv";
reg = <0 0x402d0000 0 0x400>;
clocks = <&ext_26m>, <&pll 0>,
@@ -684,33 +684,5 @@
};
};
};
-
- gpio-keys {
- compatible = "gpio-keys";
-
- key-volumedown {
- label = "Volume Down Key";
- linux,code = <KEY_VOLUMEDOWN>;
- gpios = <&eic_debounce 2 GPIO_ACTIVE_LOW>;
- debounce-interval = <2>;
- wakeup-source;
- };
-
- key-volumeup {
- label = "Volume Up Key";
- linux,code = <KEY_VOLUMEUP>;
- gpios = <&pmic_eic 10 GPIO_ACTIVE_HIGH>;
- debounce-interval = <2>;
- wakeup-source;
- };
-
- key-power {
- label = "Power Key";
- linux,code = <KEY_POWER>;
- gpios = <&pmic_eic 1 GPIO_ACTIVE_HIGH>;
- debounce-interval = <2>;
- wakeup-source;
- };
- };
};
};
diff --git a/arch/arm64/boot/dts/sprd/sc9863a.dtsi b/arch/arm64/boot/dts/sprd/sc9863a.dtsi
index 22d81ace740a..53e5b77d70b5 100644
--- a/arch/arm64/boot/dts/sprd/sc9863a.dtsi
+++ b/arch/arm64/boot/dts/sprd/sc9863a.dtsi
@@ -134,7 +134,7 @@
};
pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a55-pmu";
interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/sprd/sharkl3.dtsi b/arch/arm64/boot/dts/sprd/sharkl3.dtsi
index 206a4afdab1c..9b4ee0bdd69f 100644
--- a/arch/arm64/boot/dts/sprd/sharkl3.dtsi
+++ b/arch/arm64/boot/dts/sprd/sharkl3.dtsi
@@ -24,7 +24,7 @@
#size-cells = <1>;
ranges = <0 0 0x20e00000 0x4000>;
- apahb_gate: apahb-gate {
+ apahb_gate: apahb-gate@0 {
compatible = "sprd,sc9863a-apahb-gate";
reg = <0x0 0x1020>;
#clock-cells = <1>;
@@ -39,7 +39,7 @@
#size-cells = <1>;
ranges = <0 0 0x402b0000 0x4000>;
- pmu_gate: pmu-gate {
+ pmu_gate: pmu-gate@0 {
compatible = "sprd,sc9863a-pmu-gate";
reg = <0 0x1200>;
clocks = <&ext_26m>;
@@ -56,7 +56,7 @@
#size-cells = <1>;
ranges = <0 0 0x402e0000 0x4000>;
- aonapb_gate: aonapb-gate {
+ aonapb_gate: aonapb-gate@0 {
compatible = "sprd,sc9863a-aonapb-gate";
reg = <0 0x1100>;
#clock-cells = <1>;
@@ -71,7 +71,7 @@
#size-cells = <1>;
ranges = <0 0 0x40353000 0x3000>;
- pll: pll {
+ pll: pll@0 {
compatible = "sprd,sc9863a-pll";
reg = <0 0x100>;
clocks = <&ext_26m>;
@@ -88,7 +88,7 @@
#size-cells = <1>;
ranges = <0 0 0x40359000 0x3000>;
- mpll: mpll {
+ mpll: mpll@0 {
compatible = "sprd,sc9863a-mpll";
reg = <0 0x100>;
#clock-cells = <1>;
@@ -103,7 +103,7 @@
#size-cells = <1>;
ranges = <0 0 0x4035c000 0x3000>;
- rpll: rpll {
+ rpll: rpll@0 {
compatible = "sprd,sc9863a-rpll";
reg = <0 0x100>;
clocks = <&ext_26m>;
@@ -120,7 +120,7 @@
#size-cells = <1>;
ranges = <0 0 0x40363000 0x3000>;
- dpll: dpll {
+ dpll: dpll@0 {
compatible = "sprd,sc9863a-dpll";
reg = <0 0x100>;
#clock-cells = <1>;
@@ -135,7 +135,7 @@
#size-cells = <1>;
ranges = <0 0 0x60800000 0x3000>;
- mm_gate: mm-gate {
+ mm_gate: mm-gate@0 {
compatible = "sprd,sc9863a-mm-gate";
reg = <0 0x1100>;
#clock-cells = <1>;
@@ -150,7 +150,7 @@
#size-cells = <1>;
ranges = <0 0 0x71300000 0x4000>;
- apapb_gate: apapb-gate {
+ apapb_gate: apapb-gate@0 {
compatible = "sprd,sc9863a-apapb-gate";
reg = <0 0x1000>;
clocks = <&ext_26m>;
diff --git a/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts b/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts
index 6b95fd94cee3..1ce3cbbd9668 100644
--- a/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts
+++ b/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts
@@ -24,7 +24,7 @@
spi0 = &adi_bus;
};
- memory{
+ memory@80000000 {
device_type = "memory";
reg = <0x0 0x80000000 0 0x60000000>,
<0x1 0x80000000 0 0x60000000>;
@@ -34,6 +34,34 @@
stdout-path = "serial1:115200n8";
};
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ key-volumedown {
+ label = "Volume Down Key";
+ linux,code = <KEY_VOLUMEDOWN>;
+ gpios = <&eic_debounce 2 GPIO_ACTIVE_LOW>;
+ debounce-interval = <2>;
+ wakeup-source;
+ };
+
+ key-volumeup {
+ label = "Volume Up Key";
+ linux,code = <KEY_VOLUMEUP>;
+ gpios = <&pmic_eic 10 GPIO_ACTIVE_HIGH>;
+ debounce-interval = <2>;
+ wakeup-source;
+ };
+
+ key-power {
+ label = "Power Key";
+ linux,code = <KEY_POWER>;
+ gpios = <&pmic_eic 1 GPIO_ACTIVE_HIGH>;
+ debounce-interval = <2>;
+ wakeup-source;
+ };
+ };
+
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
diff --git a/arch/arm64/boot/dts/sprd/whale2.dtsi b/arch/arm64/boot/dts/sprd/whale2.dtsi
index fece49704b5c..7068bfd2f4c3 100644
--- a/arch/arm64/boot/dts/sprd/whale2.dtsi
+++ b/arch/arm64/boot/dts/sprd/whale2.dtsi
@@ -64,7 +64,7 @@
reg = <0 0x70b00000 0 0x40000>;
};
- ap-apb {
+ ap-apb@70000000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/st/stm32mp25-pinctrl.dtsi b/arch/arm64/boot/dts/st/stm32mp25-pinctrl.dtsi
index 66791a974f8f..7a82896dcbf6 100644
--- a/arch/arm64/boot/dts/st/stm32mp25-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/st/stm32mp25-pinctrl.dtsi
@@ -6,6 +6,23 @@
#include <dt-bindings/pinctrl/stm32-pinfunc.h>
&pinctrl {
+ i2c2_pins_a: i2c2-0 {
+ pins {
+ pinmux = <STM32_PINMUX('B', 5, AF9)>, /* I2C2_SCL */
+ <STM32_PINMUX('B', 4, AF9)>; /* I2C2_SDA */
+ bias-disable;
+ drive-open-drain;
+ slew-rate = <0>;
+ };
+ };
+
+ i2c2_sleep_pins_a: i2c2-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('B', 5, ANALOG)>, /* I2C2_SCL */
+ <STM32_PINMUX('B', 4, ANALOG)>; /* I2C2_SDA */
+ };
+ };
+
sdmmc1_b4_pins_a: sdmmc1-b4-0 {
pins1 {
pinmux = <STM32_PINMUX('E', 4, AF10)>, /* SDMMC1_D0 */
@@ -60,6 +77,28 @@
};
};
+ spi3_pins_a: spi3-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 7, AF1)>, /* SPI3_SCK */
+ <STM32_PINMUX('B', 8, AF1)>; /* SPI3_MOSI */
+ drive-push-pull;
+ bias-disable;
+ slew-rate = <1>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('B', 10, AF1)>; /* SPI3_MISO */
+ bias-disable;
+ };
+ };
+
+ spi3_sleep_pins_a: spi3-sleep-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 7, ANALOG)>, /* SPI3_SCK */
+ <STM32_PINMUX('B', 8, ANALOG)>, /* SPI3_MOSI */
+ <STM32_PINMUX('B', 10, ANALOG)>; /* SPI3_MISO */
+ };
+ };
+
usart2_pins_a: usart2-0 {
pins1 {
pinmux = <STM32_PINMUX('A', 4, AF6)>; /* USART2_TX */
@@ -90,3 +129,46 @@
};
};
};
+
+&pinctrl_z {
+ i2c8_pins_a: i2c8-0 {
+ pins {
+ pinmux = <STM32_PINMUX('Z', 4, AF8)>, /* I2C8_SCL */
+ <STM32_PINMUX('Z', 3, AF8)>; /* I2C8_SDA */
+ bias-disable;
+ drive-open-drain;
+ slew-rate = <0>;
+ };
+ };
+
+ i2c8_sleep_pins_a: i2c8-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('Z', 4, ANALOG)>, /* I2C8_SCL */
+ <STM32_PINMUX('Z', 3, ANALOG)>; /* I2C8_SDA */
+ };
+ };
+};
+
+&pinctrl_z {
+ spi8_pins_a: spi8-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('Z', 2, AF3)>, /* SPI8_SCK */
+ <STM32_PINMUX('Z', 0, AF3)>; /* SPI8_MOSI */
+ drive-push-pull;
+ bias-disable;
+ slew-rate = <1>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('Z', 1, AF3)>; /* SPI8_MISO */
+ bias-disable;
+ };
+ };
+
+ spi8_sleep_pins_a: spi8-sleep-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('Z', 2, ANALOG)>, /* SPI8_SCK */
+ <STM32_PINMUX('Z', 0, ANALOG)>, /* SPI8_MOSI */
+ <STM32_PINMUX('Z', 1, ANALOG)>; /* SPI8_MISO */
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/st/stm32mp251.dtsi b/arch/arm64/boot/dts/st/stm32mp251.dtsi
index 5dd4f3580a60..dcd0656d67a8 100644
--- a/arch/arm64/boot/dts/st/stm32mp251.dtsi
+++ b/arch/arm64/boot/dts/st/stm32mp251.dtsi
@@ -3,7 +3,9 @@
* Copyright (C) STMicroelectronics 2023 - All Rights Reserved
* Author: Alexandre Torgue <alexandre.torgue@foss.st.com> for STMicroelectronics.
*/
+#include <dt-bindings/clock/st,stm32mp25-rcc.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/reset/st,stm32mp25-rcc.h>
/ {
#address-cells = <2>;
@@ -35,34 +37,16 @@
};
clocks {
- ck_flexgen_08: ck-flexgen-08 {
+ clk_dsi_txbyte: txbyteclk {
#clock-cells = <0>;
compatible = "fixed-clock";
- clock-frequency = <100000000>;
+ clock-frequency = <0>;
};
- ck_flexgen_51: ck-flexgen-51 {
+ clk_rcbsec: clk-rcbsec {
#clock-cells = <0>;
compatible = "fixed-clock";
- clock-frequency = <200000000>;
- };
-
- ck_icn_ls_mcu: ck-icn-ls-mcu {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <200000000>;
- };
-
- ck_icn_p_vdec: ck-icn-p-vdec {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <200000000>;
- };
-
- ck_icn_p_venc: ck-icn-p-venc {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <200000000>;
+ clock-frequency = <64000000>;
};
};
@@ -109,10 +93,10 @@
timer {
compatible = "arm,armv8-timer";
interrupt-parent = <&intc>;
- interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>;
always-on;
};
@@ -123,18 +107,220 @@
interrupt-parent = <&intc>;
ranges = <0x0 0x0 0x0 0x80000000>;
- rifsc: rifsc-bus@42080000 {
- compatible = "simple-bus";
+ rifsc: bus@42080000 {
+ compatible = "st,stm32mp25-rifsc", "simple-bus";
reg = <0x42080000 0x1000>;
#address-cells = <1>;
#size-cells = <1>;
+ #access-controller-cells = <1>;
ranges;
+ spi2: spi@400b0000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32mp25-spi";
+ reg = <0x400b0000 0x400>;
+ interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_SPI2>;
+ resets = <&rcc SPI2_R>;
+ access-controllers = <&rifsc 23>;
+ status = "disabled";
+ };
+
+ spi3: spi@400c0000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32mp25-spi";
+ reg = <0x400c0000 0x400>;
+ interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_SPI3>;
+ resets = <&rcc SPI3_R>;
+ access-controllers = <&rifsc 24>;
+ status = "disabled";
+ };
+
usart2: serial@400e0000 {
compatible = "st,stm32h7-uart";
reg = <0x400e0000 0x400>;
interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ck_flexgen_08>;
+ clocks = <&rcc CK_KER_USART2>;
+ access-controllers = <&rifsc 32>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@40120000 {
+ compatible = "st,stm32mp25-i2c";
+ reg = <0x40120000 0x400>;
+ interrupt-names = "event";
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_I2C1>;
+ resets = <&rcc I2C1_R>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ access-controllers = <&rifsc 41>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@40130000 {
+ compatible = "st,stm32mp25-i2c";
+ reg = <0x40130000 0x400>;
+ interrupt-names = "event";
+ interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_I2C2>;
+ resets = <&rcc I2C2_R>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ access-controllers = <&rifsc 42>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@40140000 {
+ compatible = "st,stm32mp25-i2c";
+ reg = <0x40140000 0x400>;
+ interrupt-names = "event";
+ interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_I2C3>;
+ resets = <&rcc I2C3_R>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ access-controllers = <&rifsc 43>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@40150000 {
+ compatible = "st,stm32mp25-i2c";
+ reg = <0x40150000 0x400>;
+ interrupt-names = "event";
+ interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_I2C4>;
+ resets = <&rcc I2C4_R>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ access-controllers = <&rifsc 44>;
+ status = "disabled";
+ };
+
+ i2c5: i2c@40160000 {
+ compatible = "st,stm32mp25-i2c";
+ reg = <0x40160000 0x400>;
+ interrupt-names = "event";
+ interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_I2C5>;
+ resets = <&rcc I2C5_R>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ access-controllers = <&rifsc 45>;
+ status = "disabled";
+ };
+
+ i2c6: i2c@40170000 {
+ compatible = "st,stm32mp25-i2c";
+ reg = <0x40170000 0x400>;
+ interrupt-names = "event";
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_I2C6>;
+ resets = <&rcc I2C6_R>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ access-controllers = <&rifsc 46>;
+ status = "disabled";
+ };
+
+ i2c7: i2c@40180000 {
+ compatible = "st,stm32mp25-i2c";
+ reg = <0x40180000 0x400>;
+ interrupt-names = "event";
+ interrupts = <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_I2C7>;
+ resets = <&rcc I2C7_R>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ access-controllers = <&rifsc 47>;
+ status = "disabled";
+ };
+
+ spi1: spi@40230000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32mp25-spi";
+ reg = <0x40230000 0x400>;
+ interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_SPI1>;
+ resets = <&rcc SPI1_R>;
+ access-controllers = <&rifsc 22>;
+ status = "disabled";
+ };
+
+ spi4: spi@40240000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32mp25-spi";
+ reg = <0x40240000 0x400>;
+ interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_SPI4>;
+ resets = <&rcc SPI4_R>;
+ access-controllers = <&rifsc 25>;
+ status = "disabled";
+ };
+
+ spi5: spi@40280000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32mp25-spi";
+ reg = <0x40280000 0x400>;
+ interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_SPI5>;
+ resets = <&rcc SPI5_R>;
+ access-controllers = <&rifsc 26>;
+ status = "disabled";
+ };
+
+ spi6: spi@40350000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32mp25-spi";
+ reg = <0x40350000 0x400>;
+ interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_SPI6>;
+ resets = <&rcc SPI6_R>;
+ access-controllers = <&rifsc 27>;
+ status = "disabled";
+ };
+
+ spi7: spi@40360000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32mp25-spi";
+ reg = <0x40360000 0x400>;
+ interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_SPI7>;
+ resets = <&rcc SPI7_R>;
+ access-controllers = <&rifsc 28>;
+ status = "disabled";
+ };
+
+ spi8: spi@46020000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32mp25-spi";
+ reg = <0x46020000 0x400>;
+ interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_SPI8>;
+ resets = <&rcc SPI8_R>;
+ access-controllers = <&rifsc 29>;
+ status = "disabled";
+ };
+
+ i2c8: i2c@46040000 {
+ compatible = "st,stm32mp25-i2c";
+ reg = <0x46040000 0x400>;
+ interrupt-names = "event";
+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_I2C8>;
+ resets = <&rcc I2C8_R>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ access-controllers = <&rifsc 48>;
status = "disabled";
};
@@ -143,11 +329,13 @@
arm,primecell-periphid = <0x00353180>;
reg = <0x48220000 0x400>, <0x44230400 0x8>;
interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ck_flexgen_51>;
+ clocks = <&rcc CK_KER_SDMMC1 >;
clock-names = "apb_pclk";
+ resets = <&rcc SDMMC1_R>;
cap-sd-highspeed;
cap-mmc-highspeed;
max-frequency = <120000000>;
+ access-controllers = <&rifsc 76>;
status = "disabled";
};
};
@@ -168,6 +356,186 @@
};
};
+ rcc: clock-controller@44200000 {
+ compatible = "st,stm32mp25-rcc";
+ reg = <0x44200000 0x10000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ clocks = <&scmi_clk CK_SCMI_HSE>,
+ <&scmi_clk CK_SCMI_HSI>,
+ <&scmi_clk CK_SCMI_MSI>,
+ <&scmi_clk CK_SCMI_LSE>,
+ <&scmi_clk CK_SCMI_LSI>,
+ <&scmi_clk CK_SCMI_HSE_DIV2>,
+ <&scmi_clk CK_SCMI_ICN_HS_MCU>,
+ <&scmi_clk CK_SCMI_ICN_LS_MCU>,
+ <&scmi_clk CK_SCMI_ICN_SDMMC>,
+ <&scmi_clk CK_SCMI_ICN_DDR>,
+ <&scmi_clk CK_SCMI_ICN_DISPLAY>,
+ <&scmi_clk CK_SCMI_ICN_HSL>,
+ <&scmi_clk CK_SCMI_ICN_NIC>,
+ <&scmi_clk CK_SCMI_ICN_VID>,
+ <&scmi_clk CK_SCMI_FLEXGEN_07>,
+ <&scmi_clk CK_SCMI_FLEXGEN_08>,
+ <&scmi_clk CK_SCMI_FLEXGEN_09>,
+ <&scmi_clk CK_SCMI_FLEXGEN_10>,
+ <&scmi_clk CK_SCMI_FLEXGEN_11>,
+ <&scmi_clk CK_SCMI_FLEXGEN_12>,
+ <&scmi_clk CK_SCMI_FLEXGEN_13>,
+ <&scmi_clk CK_SCMI_FLEXGEN_14>,
+ <&scmi_clk CK_SCMI_FLEXGEN_15>,
+ <&scmi_clk CK_SCMI_FLEXGEN_16>,
+ <&scmi_clk CK_SCMI_FLEXGEN_17>,
+ <&scmi_clk CK_SCMI_FLEXGEN_18>,
+ <&scmi_clk CK_SCMI_FLEXGEN_19>,
+ <&scmi_clk CK_SCMI_FLEXGEN_20>,
+ <&scmi_clk CK_SCMI_FLEXGEN_21>,
+ <&scmi_clk CK_SCMI_FLEXGEN_22>,
+ <&scmi_clk CK_SCMI_FLEXGEN_23>,
+ <&scmi_clk CK_SCMI_FLEXGEN_24>,
+ <&scmi_clk CK_SCMI_FLEXGEN_25>,
+ <&scmi_clk CK_SCMI_FLEXGEN_26>,
+ <&scmi_clk CK_SCMI_FLEXGEN_27>,
+ <&scmi_clk CK_SCMI_FLEXGEN_28>,
+ <&scmi_clk CK_SCMI_FLEXGEN_29>,
+ <&scmi_clk CK_SCMI_FLEXGEN_30>,
+ <&scmi_clk CK_SCMI_FLEXGEN_31>,
+ <&scmi_clk CK_SCMI_FLEXGEN_32>,
+ <&scmi_clk CK_SCMI_FLEXGEN_33>,
+ <&scmi_clk CK_SCMI_FLEXGEN_34>,
+ <&scmi_clk CK_SCMI_FLEXGEN_35>,
+ <&scmi_clk CK_SCMI_FLEXGEN_36>,
+ <&scmi_clk CK_SCMI_FLEXGEN_37>,
+ <&scmi_clk CK_SCMI_FLEXGEN_38>,
+ <&scmi_clk CK_SCMI_FLEXGEN_39>,
+ <&scmi_clk CK_SCMI_FLEXGEN_40>,
+ <&scmi_clk CK_SCMI_FLEXGEN_41>,
+ <&scmi_clk CK_SCMI_FLEXGEN_42>,
+ <&scmi_clk CK_SCMI_FLEXGEN_43>,
+ <&scmi_clk CK_SCMI_FLEXGEN_44>,
+ <&scmi_clk CK_SCMI_FLEXGEN_45>,
+ <&scmi_clk CK_SCMI_FLEXGEN_46>,
+ <&scmi_clk CK_SCMI_FLEXGEN_47>,
+ <&scmi_clk CK_SCMI_FLEXGEN_48>,
+ <&scmi_clk CK_SCMI_FLEXGEN_49>,
+ <&scmi_clk CK_SCMI_FLEXGEN_50>,
+ <&scmi_clk CK_SCMI_FLEXGEN_51>,
+ <&scmi_clk CK_SCMI_FLEXGEN_52>,
+ <&scmi_clk CK_SCMI_FLEXGEN_53>,
+ <&scmi_clk CK_SCMI_FLEXGEN_54>,
+ <&scmi_clk CK_SCMI_FLEXGEN_55>,
+ <&scmi_clk CK_SCMI_FLEXGEN_56>,
+ <&scmi_clk CK_SCMI_FLEXGEN_57>,
+ <&scmi_clk CK_SCMI_FLEXGEN_58>,
+ <&scmi_clk CK_SCMI_FLEXGEN_59>,
+ <&scmi_clk CK_SCMI_FLEXGEN_60>,
+ <&scmi_clk CK_SCMI_FLEXGEN_61>,
+ <&scmi_clk CK_SCMI_FLEXGEN_62>,
+ <&scmi_clk CK_SCMI_FLEXGEN_63>,
+ <&scmi_clk CK_SCMI_ICN_APB1>,
+ <&scmi_clk CK_SCMI_ICN_APB2>,
+ <&scmi_clk CK_SCMI_ICN_APB3>,
+ <&scmi_clk CK_SCMI_ICN_APB4>,
+ <&scmi_clk CK_SCMI_ICN_APBDBG>,
+ <&scmi_clk CK_SCMI_TIMG1>,
+ <&scmi_clk CK_SCMI_TIMG2>,
+ <&scmi_clk CK_SCMI_PLL3>,
+ <&clk_dsi_txbyte>;
+ };
+
+ exti1: interrupt-controller@44220000 {
+ compatible = "st,stm32mp1-exti", "syscon";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ reg = <0x44220000 0x400>;
+ interrupts-extended =
+ <&intc GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_0 */
+ <&intc GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 271 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 273 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 274 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 275 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 276 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 277 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_10 */
+ <&intc GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>,
+ <0>, /* EXTI_20 */
+ <&intc GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_30 */
+ <&intc GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <&intc GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_40 */
+ <&intc GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_50 */
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <&intc GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>,
+ <0>, /* EXTI_60 */
+ <&intc GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <&intc GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <&intc GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <&intc GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_70 */
+ <0>,
+ <&intc GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 253 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 255 IRQ_TYPE_LEVEL_HIGH>,
+ <0>, /* EXTI_80 */
+ <0>,
+ <0>,
+ <&intc GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
syscfg: syscon@44230000 {
compatible = "st,stm32mp25-syscfg", "syscon";
reg = <0x44230000 0x10000>;
@@ -178,6 +546,8 @@
#size-cells = <1>;
compatible = "st,stm32mp257-pinctrl";
ranges = <0 0x44240000 0xa0400>;
+ interrupt-parent = <&exti1>;
+ st,syscfg = <&exti1 0x60 0xff>;
pins-are-numbered;
gpioa: gpio@44240000 {
@@ -186,7 +556,7 @@
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x0 0x400>;
- clocks = <&ck_icn_ls_mcu>;
+ clocks = <&scmi_clk CK_SCMI_GPIOA>;
st,bank-name = "GPIOA";
status = "disabled";
};
@@ -197,7 +567,7 @@
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x10000 0x400>;
- clocks = <&ck_icn_ls_mcu>;
+ clocks = <&scmi_clk CK_SCMI_GPIOB>;
st,bank-name = "GPIOB";
status = "disabled";
};
@@ -208,7 +578,7 @@
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x20000 0x400>;
- clocks = <&ck_icn_ls_mcu>;
+ clocks = <&scmi_clk CK_SCMI_GPIOC>;
st,bank-name = "GPIOC";
status = "disabled";
};
@@ -219,7 +589,7 @@
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x30000 0x400>;
- clocks = <&ck_icn_ls_mcu>;
+ clocks = <&scmi_clk CK_SCMI_GPIOD>;
st,bank-name = "GPIOD";
status = "disabled";
};
@@ -230,7 +600,7 @@
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x40000 0x400>;
- clocks = <&ck_icn_ls_mcu>;
+ clocks = <&scmi_clk CK_SCMI_GPIOE>;
st,bank-name = "GPIOE";
status = "disabled";
};
@@ -241,7 +611,7 @@
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x50000 0x400>;
- clocks = <&ck_icn_ls_mcu>;
+ clocks = <&scmi_clk CK_SCMI_GPIOF>;
st,bank-name = "GPIOF";
status = "disabled";
};
@@ -252,7 +622,7 @@
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x60000 0x400>;
- clocks = <&ck_icn_ls_mcu>;
+ clocks = <&scmi_clk CK_SCMI_GPIOG>;
st,bank-name = "GPIOG";
status = "disabled";
};
@@ -263,7 +633,7 @@
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x70000 0x400>;
- clocks = <&ck_icn_ls_mcu>;
+ clocks = <&scmi_clk CK_SCMI_GPIOH>;
st,bank-name = "GPIOH";
status = "disabled";
};
@@ -274,7 +644,7 @@
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x80000 0x400>;
- clocks = <&ck_icn_ls_mcu>;
+ clocks = <&scmi_clk CK_SCMI_GPIOI>;
st,bank-name = "GPIOI";
status = "disabled";
};
@@ -285,7 +655,7 @@
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x90000 0x400>;
- clocks = <&ck_icn_ls_mcu>;
+ clocks = <&scmi_clk CK_SCMI_GPIOJ>;
st,bank-name = "GPIOJ";
status = "disabled";
};
@@ -296,7 +666,7 @@
interrupt-controller;
#interrupt-cells = <2>;
reg = <0xa0000 0x400>;
- clocks = <&ck_icn_ls_mcu>;
+ clocks = <&scmi_clk CK_SCMI_GPIOK>;
st,bank-name = "GPIOK";
status = "disabled";
};
@@ -307,6 +677,8 @@
#size-cells = <1>;
compatible = "st,stm32mp257-z-pinctrl";
ranges = <0 0x46200000 0x400>;
+ interrupt-parent = <&exti1>;
+ st,syscfg = <&exti1 0x60 0xff>;
pins-are-numbered;
gpioz: gpio@46200000 {
@@ -315,12 +687,91 @@
interrupt-controller;
#interrupt-cells = <2>;
reg = <0 0x400>;
- clocks = <&ck_icn_ls_mcu>;
+ clocks = <&scmi_clk CK_SCMI_GPIOZ>;
st,bank-name = "GPIOZ";
st,bank-ioport = <11>;
status = "disabled";
};
};
+
+ exti2: interrupt-controller@46230000 {
+ compatible = "st,stm32mp1-exti", "syscon";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ reg = <0x46230000 0x400>;
+ interrupts-extended =
+ <&intc GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_0 */
+ <&intc GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_10 */
+ <&intc GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <0>, /* EXTI_20 */
+ <&intc GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <&intc GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <&intc GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_30 */
+ <&intc GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <&intc GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <&intc GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <&intc GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_40 */
+ <0>,
+ <0>,
+ <&intc GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <&intc GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <&intc GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_50 */
+ <&intc GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>, /* EXTI_60 */
+ <&intc GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <&intc GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <&intc GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>; /* EXTI_70 */
+ };
};
};
diff --git a/arch/arm64/boot/dts/st/stm32mp253.dtsi b/arch/arm64/boot/dts/st/stm32mp253.dtsi
index af48e82efe8a..029f88981961 100644
--- a/arch/arm64/boot/dts/st/stm32mp253.dtsi
+++ b/arch/arm64/boot/dts/st/stm32mp253.dtsi
@@ -20,4 +20,11 @@
<GIC_SPI 369 IRQ_TYPE_LEVEL_HIGH>;
interrupt-affinity = <&cpu0>, <&cpu1>;
};
+
+ timer {
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+ };
};
diff --git a/arch/arm64/boot/dts/st/stm32mp255.dtsi b/arch/arm64/boot/dts/st/stm32mp255.dtsi
index 17f197c5b22b..f689b47c5010 100644
--- a/arch/arm64/boot/dts/st/stm32mp255.dtsi
+++ b/arch/arm64/boot/dts/st/stm32mp255.dtsi
@@ -5,22 +5,21 @@
*/
#include "stm32mp253.dtsi"
-/ {
- soc@0 {
- rifsc: rifsc-bus@42080000 {
- vdec: vdec@480d0000 {
- compatible = "st,stm32mp25-vdec";
- reg = <0x480d0000 0x3c8>;
- interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ck_icn_p_vdec>;
- };
+&rifsc {
+ vdec: vdec@480d0000 {
+ compatible = "st,stm32mp25-vdec";
+ reg = <0x480d0000 0x3c8>;
+ interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_BUS_VDEC>;
+ access-controllers = <&rifsc 89>;
- venc: venc@480e0000 {
- compatible = "st,stm32mp25-venc";
- reg = <0x480e0000 0x800>;
- interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ck_icn_ls_mcu>;
- };
- };
};
-};
+
+ venc: venc@480e0000 {
+ compatible = "st,stm32mp25-venc";
+ reg = <0x480e0000 0x800>;
+ interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_BUS_VENC>;
+ access-controllers = <&rifsc 90>;
+ };
+}; \ No newline at end of file
diff --git a/arch/arm64/boot/dts/st/stm32mp257f-ev1.dts b/arch/arm64/boot/dts/st/stm32mp257f-ev1.dts
index b2d3afb15758..27b7360e5dba 100644
--- a/arch/arm64/boot/dts/st/stm32mp257f-ev1.dts
+++ b/arch/arm64/boot/dts/st/stm32mp257f-ev1.dts
@@ -55,6 +55,26 @@
status = "okay";
};
+&i2c2 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c2_pins_a>;
+ pinctrl-1 = <&i2c2_sleep_pins_a>;
+ i2c-scl-rising-time-ns = <100>;
+ i2c-scl-falling-time-ns = <13>;
+ clock-frequency = <400000>;
+ status = "okay";
+};
+
+&i2c8 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c8_pins_a>;
+ pinctrl-1 = <&i2c8_sleep_pins_a>;
+ i2c-scl-rising-time-ns = <57>;
+ i2c-scl-falling-time-ns = <7>;
+ clock-frequency = <400000>;
+ status = "disabled";
+};
+
&sdmmc1 {
pinctrl-names = "default", "opendrain", "sleep";
pinctrl-0 = <&sdmmc1_b4_pins_a>;
@@ -68,6 +88,20 @@
status = "okay";
};
+&spi3 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&spi3_pins_a>;
+ pinctrl-1 = <&spi3_sleep_pins_a>;
+ status = "disabled";
+};
+
+&spi8 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&spi8_pins_a>;
+ pinctrl-1 = <&spi8_sleep_pins_a>;
+ status = "disabled";
+};
+
&usart2 {
pinctrl-names = "default", "idle", "sleep";
pinctrl-0 = <&usart2_pins_a>;
diff --git a/arch/arm64/boot/dts/synaptics/berlin4ct.dtsi b/arch/arm64/boot/dts/synaptics/berlin4ct.dtsi
index 53d616c3cfed..71e4bfcc9e81 100644
--- a/arch/arm64/boot/dts/synaptics/berlin4ct.dtsi
+++ b/arch/arm64/boot/dts/synaptics/berlin4ct.dtsi
@@ -88,7 +88,7 @@
};
pmu {
- compatible = "arm,cortex-a53-pmu", "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a53-pmu";
interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/tesla/fsd.dtsi b/arch/arm64/boot/dts/tesla/fsd.dtsi
index 047a83cee603..690b4ed9c29b 100644
--- a/arch/arm64/boot/dts/tesla/fsd.dtsi
+++ b/arch/arm64/boot/dts/tesla/fsd.dtsi
@@ -304,7 +304,7 @@
};
arm-pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a72-pmu";
interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/ti/Makefile b/arch/arm64/boot/dts/ti/Makefile
index 9a722c2473fb..2c327cc320cf 100644
--- a/arch/arm64/boot/dts/ti/Makefile
+++ b/arch/arm64/boot/dts/ti/Makefile
@@ -48,6 +48,7 @@ dtb-$(CONFIG_ARCH_K3) += k3-am642-hummingboard-t.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am642-hummingboard-t-pcie.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am642-hummingboard-t-usb3.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am642-phyboard-electra-rdk.dtb
+dtb-$(CONFIG_ARCH_K3) += k3-am642-phyboard-electra-gpio-fan.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-am642-sk.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am642-tqma64xxl-mbax4xxl.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am64-tqma64xxl-mbax4xxl-sdcard.dtbo
@@ -131,6 +132,8 @@ k3-am62p5-sk-csi2-tevi-ov5640-dtbs := k3-am62p5-sk.dtb \
k3-am62x-sk-csi2-tevi-ov5640.dtbo
k3-am642-evm-icssg1-dualemac-dtbs := \
k3-am642-evm.dtb k3-am642-evm-icssg1-dualemac.dtbo
+k3-am642-phyboard-electra-gpio-fan-dtbs := \
+ k3-am642-phyboard-electra-rdk.dtb k3-am642-phyboard-electra-gpio-fan.dtbo
k3-am642-tqma64xxl-mbax4xxl-sdcard-dtbs := \
k3-am642-tqma64xxl-mbax4xxl.dtb k3-am64-tqma64xxl-mbax4xxl-sdcard.dtbo
k3-am642-tqma64xxl-mbax4xxl-wlan-dtbs := \
@@ -161,19 +164,21 @@ dtb- += k3-am625-beagleplay-csi2-ov5640.dtb \
k3-am642-evm-icssg1-dualemac.dtb \
k3-am642-tqma64xxl-mbax4xxl-sdcard.dtb \
k3-am642-tqma64xxl-mbax4xxl-wlan.dtb \
- k3-am68-sk-base-board-csi2-dual-imx219-dtbs \
- k3-am69-sk-csi2-dual-imx219-dtbs \
+ k3-am68-sk-base-board-csi2-dual-imx219.dtb \
+ k3-am69-sk-csi2-dual-imx219.dtb \
k3-j721e-evm-pcie0-ep.dtb \
- k3-j721e-sk-csi2-dual-imx219-dtbs \
+ k3-j721e-sk-csi2-dual-imx219.dtb \
k3-j721s2-evm-pcie1-ep.dtb
# Enable support for device-tree overlays
DTC_FLAGS_k3-am625-beagleplay += -@
+DTC_FLAGS_k3-am625-phyboard-lyra-rdk += -@
DTC_FLAGS_k3-am625-sk += -@
DTC_FLAGS_k3-am62-lp-sk += -@
DTC_FLAGS_k3-am62a7-sk += -@
DTC_FLAGS_k3-am62p5-sk += -@
DTC_FLAGS_k3-am642-evm += -@
+DTC_FLAGS_k3-am642-phyboard-electra-rdk += -@
DTC_FLAGS_k3-am642-tqma64xxl-mbax4xxl += -@
DTC_FLAGS_k3-am6548-iot2050-advanced-m2 += -@
DTC_FLAGS_k3-am68-sk-base-board += -@
diff --git a/arch/arm64/boot/dts/ti/k3-am62-lp-sk.dts b/arch/arm64/boot/dts/ti/k3-am62-lp-sk.dts
index c4149059a4c5..9a17bd3e59c9 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-lp-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am62-lp-sk.dts
@@ -166,7 +166,6 @@
interrupt-parent = <&gic500>;
interrupts = <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
- ti,power-button;
regulators {
buck1_reg: buck1 {
diff --git a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
index e9cffca073ef..448a59dc53a7 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
@@ -619,10 +619,11 @@
usbss0: dwc3-usb@f900000 {
compatible = "ti,am62-usb";
- reg = <0x00 0x0f900000 0x00 0x800>;
+ reg = <0x00 0x0f900000 0x00 0x800>,
+ <0x00 0x0f908000 0x00 0x400>;
clocks = <&k3_clks 161 3>;
clock-names = "ref";
- ti,syscon-phy-pll-refclk = <&wkup_conf 0x4008>;
+ ti,syscon-phy-pll-refclk = <&usb0_phy_ctrl 0x0>;
#address-cells = <2>;
#size-cells = <2>;
power-domains = <&k3_pds 178 TI_SCI_PD_EXCLUSIVE>;
@@ -644,10 +645,11 @@
usbss1: dwc3-usb@f910000 {
compatible = "ti,am62-usb";
- reg = <0x00 0x0f910000 0x00 0x800>;
+ reg = <0x00 0x0f910000 0x00 0x800>,
+ <0x00 0x0f918000 0x00 0x400>;
clocks = <&k3_clks 162 3>;
clock-names = "ref";
- ti,syscon-phy-pll-refclk = <&wkup_conf 0x4018>;
+ ti,syscon-phy-pll-refclk = <&usb1_phy_ctrl 0x0>;
#address-cells = <2>;
#size-cells = <2>;
power-domains = <&k3_pds 179 TI_SCI_PD_EXCLUSIVE>;
diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi
index 6c4cec8728e4..e8f4d136e5df 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi
@@ -22,6 +22,7 @@
simple-audio-card,format = "i2s";
simple-audio-card,frame-master = <&codec_dai>;
simple-audio-card,name = "verdin-wm8904";
+ simple-audio-card,mclk-fs = <256>;
simple-audio-card,routing =
"Headphone Jack", "HPOUTL",
"Headphone Jack", "HPOUTR",
@@ -35,7 +36,6 @@
"Line", "Line In Jack";
codec_dai: simple-audio-card,codec {
- clocks = <&audio_refclk1>;
sound-dai = <&wm8904_1a>;
};
@@ -43,6 +43,15 @@
sound-dai = <&mcasp0>;
};
};
+
+ reg_usb_hub: regulator-usb-hub {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ /* Verdin CTRL_SLEEP_MOCI# (SODIMM 256) */
+ gpio = <&main_gpio0 31 GPIO_ACTIVE_HIGH>;
+ regulator-boot-on;
+ regulator-name = "HUB_PWR_EN";
+ };
};
/* Verdin ETHs */
@@ -160,7 +169,8 @@
pinctrl-0 = <&pinctrl_gpio_1>,
<&pinctrl_gpio_2>,
<&pinctrl_gpio_3>,
- <&pinctrl_gpio_4>;
+ <&pinctrl_gpio_4>,
+ <&pinctrl_pcie_1_reset>;
};
/* Verdin I2C_3_HDMI */
@@ -183,6 +193,11 @@
status = "okay";
};
+/* Do not force CTRL_SLEEP_MOCI# always enabled */
+&reg_force_sleep_moci {
+ status = "disabled";
+};
+
/* Verdin SD_1 */
&sdhci1 {
status = "okay";
@@ -203,7 +218,15 @@
};
&usb1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "okay";
+
+ usb-hub@1 {
+ compatible = "usb424,2744";
+ reg = <1>;
+ vdd-supply = <&reg_usb_hub>;
+ };
};
/* Verdin CTRL_WAKE1_MICO# */
@@ -211,6 +234,11 @@
status = "okay";
};
+/* Verdin PCIE_1_RESET# */
+&verdin_pcie_1_reset_hog {
+ status = "okay";
+};
+
/* Verdin UART_2 */
&wkup_uart0 {
status = "okay";
diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin-dev.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin-dev.dtsi
index be62648e7818..74eec1a1abca 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-verdin-dev.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-verdin-dev.dtsi
@@ -181,7 +181,8 @@
pinctrl-0 = <&pinctrl_gpio_1>,
<&pinctrl_gpio_2>,
<&pinctrl_gpio_3>,
- <&pinctrl_gpio_4>;
+ <&pinctrl_gpio_4>,
+ <&pinctrl_pcie_1_reset>;
};
/* Verdin I2C_3_HDMI */
@@ -232,6 +233,11 @@
status = "okay";
};
+/* Verdin PCIE_1_RESET# */
+&verdin_pcie_1_reset_hog {
+ status = "okay";
+};
+
/* Verdin UART_2 */
&wkup_uart0 {
status = "okay";
diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin-mallow.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin-mallow.dtsi
index 77b1beb638ad..754216d8ac14 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-verdin-mallow.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-verdin-mallow.dtsi
@@ -81,10 +81,10 @@
&main_gpio0 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ctrl_sleep_moci>,
- <&pinctrl_gpio_1>,
- <&pinctrl_gpio_2>,
- <&pinctrl_gpio_3>,
- <&pinctrl_gpio_4>;
+ <&pinctrl_gpio_5>,
+ <&pinctrl_gpio_6>,
+ <&pinctrl_gpio_7>,
+ <&pinctrl_gpio_8>;
};
/* Verdin I2C_1 */
@@ -149,6 +149,15 @@
status = "okay";
};
+&mcu_gpio0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_1>,
+ <&pinctrl_gpio_2>,
+ <&pinctrl_gpio_3>,
+ <&pinctrl_gpio_4>,
+ <&pinctrl_pcie_1_reset>;
+};
+
/* Verdin I2C_3_HDMI */
&mcu_i2c0 {
status = "okay";
@@ -192,6 +201,11 @@
status = "okay";
};
+/* Verdin PCIE_1_RESET# */
+&verdin_pcie_1_reset_hog {
+ status = "okay";
+};
+
/* Verdin UART_2 */
&wkup_uart0 {
status = "okay";
diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin-yavia.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin-yavia.dtsi
index 997dfafd27eb..7372d392ec8a 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-verdin-yavia.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-verdin-yavia.dtsi
@@ -159,7 +159,8 @@
pinctrl-0 = <&pinctrl_gpio_1>,
<&pinctrl_gpio_2>,
<&pinctrl_gpio_3>,
- <&pinctrl_gpio_4>;
+ <&pinctrl_gpio_4>,
+ <&pinctrl_pcie_1_reset>;
};
/* Verdin I2C_3_HDMI */
@@ -205,6 +206,11 @@
status = "okay";
};
+/* Verdin PCIE_1_RESET# */
+&verdin_pcie_1_reset_hog {
+ status = "okay";
+};
+
/* Verdin UART_2 */
&wkup_uart0 {
status = "okay";
diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
index e8d8857ad51f..2038c5e04639 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
@@ -76,7 +76,7 @@
memory@80000000 {
device_type = "memory";
- reg = <0x00000000 0x80000000 0x00000000 0x40000000>; /* 1G RAM */
+ reg = <0x00000000 0x80000000 0x00000000 0x80000000>; /* 2G RAM */
};
opp-table {
@@ -138,6 +138,22 @@
vin-supply = <&reg_1v8>;
};
+ /*
+ * By default we enable CTRL_SLEEP_MOCI#, this is required to have
+ * peripherals on the carrier board powered.
+ * If more granularity or power saving is required this can be disabled
+ * in the carrier board device tree files.
+ */
+ reg_force_sleep_moci: regulator-force-sleep-moci {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ /* Verdin CTRL_SLEEP_MOCI# (SODIMM 256) */
+ gpio = <&main_gpio0 31 GPIO_ACTIVE_HIGH>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "CTRL_SLEEP_MOCI#";
+ };
+
/* Verdin SD_1 Power Supply */
reg_sdhc1_vmmc: regulator-sdhci1 {
compatible = "regulator-fixed";
@@ -457,6 +473,13 @@
>;
};
+ /* Verdin SD_1_CD# as GPIO */
+ pinctrl_sd1_cd_gpio: main-gpio1-48-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x240, PIN_INPUT_PULLUP, 7) /* (D17) MMC1_SDCD.GPIO1_48 */ /* SODIMM 84 */
+ >;
+ };
+
/* Verdin DSI_1_INT# (pulled-up as active-low) */
pinctrl_dsi1_int: main-gpio1-49-default-pins {
pinctrl-single,pins = <
@@ -571,7 +594,6 @@
AM62X_IOPAD(0x22c, PIN_INPUT, 0) /* (B21) MMC1_DAT1 */ /* SODIMM 82 */
AM62X_IOPAD(0x228, PIN_INPUT, 0) /* (C21) MMC1_DAT2 */ /* SODIMM 70 */
AM62X_IOPAD(0x224, PIN_INPUT, 0) /* (D22) MMC1_DAT3 */ /* SODIMM 72 */
- AM62X_IOPAD(0x240, PIN_INPUT_PULLUP, 0) /* (D17) MMC1_SDCD */ /* SODIMM 84 */
>;
};
@@ -979,14 +1001,6 @@
"",
"",
"";
-
- verdin_ctrl_sleep_moci: ctrl-sleep-moci-hog {
- gpio-hog;
- /* Verdin CTRL_SLEEP_MOCI# (SODIMM 256) */
- gpios = <31 GPIO_ACTIVE_HIGH>;
- line-name = "CTRL_SLEEP_MOCI#";
- output-high;
- };
};
&main_gpio1 {
@@ -1407,6 +1421,15 @@
"",
"",
"";
+
+ verdin_pcie_1_reset_hog: pcie-1-reset-hog {
+ gpio-hog;
+ /* Verdin PCIE_1_RESET# (SODIMM 244) */
+ gpios = <0 GPIO_ACTIVE_LOW>;
+ line-name = "PCIE_1_RESET#";
+ output-low;
+ status = "disabled";
+ };
};
/* Verdin CAN_2 */
@@ -1441,10 +1464,12 @@
/* Verdin SD_1 */
&sdhci1 {
pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_sdhci1>;
+ pinctrl-0 = <&pinctrl_sdhci1>, <&pinctrl_sd1_cd_gpio>;
+ cd-gpios = <&main_gpio1 48 GPIO_ACTIVE_LOW>;
disable-wp;
vmmc-supply = <&reg_sdhc1_vmmc>;
vqmmc-supply = <&reg_sdhc1_vqmmc>;
+ ti,fails-without-test-cd;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am62-wakeup.dtsi
index 23ce1bfda8d6..66ddf2dc51af 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-wakeup.dtsi
@@ -21,6 +21,16 @@
compatible = "ti,am654-chipid";
reg = <0x14 0x4>;
};
+
+ usb0_phy_ctrl: syscon@4008 {
+ compatible = "ti,am62-usb-phy-ctrl", "syscon";
+ reg = <0x4008 0x4>;
+ };
+
+ usb1_phy_ctrl: syscon@4018 {
+ compatible = "ti,am62-usb-phy-ctrl", "syscon";
+ reg = <0x4018 0x4>;
+ };
};
target-module@2b300050 {
diff --git a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
index a34e0df2ab86..18e3070a8683 100644
--- a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
+++ b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
@@ -82,6 +82,17 @@
};
};
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_en_pins_default>;
+ /* Internal power on time(Figure 8-3) * 2 */
+ post-power-on-delay-ms = <10>;
+ /* Re-enable time(Figure 8-2) + 20uS */
+ power-off-delay-us = <80>;
+ reset-gpios = <&main_gpio0 38 GPIO_ACTIVE_LOW>;
+ };
+
vsys_5v0: regulator-1 {
bootph-all;
compatible = "regulator-fixed";
@@ -104,20 +115,6 @@
regulator-boot-on;
};
- wlan_en: regulator-3 {
- /* OUTPUT of SN74AVC2T244DQMR */
- compatible = "regulator-fixed";
- regulator-name = "wlan_en";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- enable-active-high;
- regulator-always-on;
- vin-supply = <&vdd_3v3>;
- gpio = <&main_gpio0 38 GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&wifi_en_pins_default>;
- };
-
vdd_3v3_sd: regulator-4 {
/* output of TPS22918DBVR-U21 */
bootph-all;
@@ -292,6 +289,8 @@
pinctrl-single,pins = <
AM62X_IOPAD(0x0160, PIN_OUTPUT, 0) /* (AD24) MDIO0_MDC */
AM62X_IOPAD(0x015c, PIN_INPUT, 0) /* (AB22) MDIO0_MDIO */
+ AM62X_IOPAD(0x003c, PIN_INPUT, 7) /* (M25) GPMC0_AD0.GPIO0_15 */
+ AM62X_IOPAD(0x018c, PIN_INPUT, 7) /* (AC21) RGMII2_RD2.GPIO1_5 */
>;
};
@@ -383,7 +382,6 @@
AM62X_IOPAD(0x016c, PIN_INPUT, 1) /* (Y18) RGMII2_TD0.RMII2_TXD0 */
AM62X_IOPAD(0x0170, PIN_INPUT, 1) /* (AA18) RGMII2_TD1.RMII2_TXD1 */
AM62X_IOPAD(0x0164, PIN_INPUT, 1) /* (AA19) RGMII2_TX_CTL.RMII2_TX_EN */
- AM62X_IOPAD(0x018c, PIN_OUTPUT, 7) /* (AC21) RGMII2_RD2.GPIO1_5 */
AM62X_IOPAD(0x0190, PIN_INPUT, 7) /* (AE22) RGMII2_RD3.GPIO1_6 */
AM62X_IOPAD(0x01f0, PIN_OUTPUT, 5) /* (A18) EXT_REFCLK1.CLKOUT0 */
>;
@@ -597,6 +595,9 @@
cpsw3g_phy0: ethernet-phy@0 {
reg = <0>;
+ reset-gpios = <&main_gpio0 15 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <50000>;
};
cpsw3g_phy1: ethernet-phy@1 {
@@ -615,7 +616,7 @@
"USR0", "USR1", "USR2", "USR3", "", "", "USR4", /* 3-9 */
"EEPROM_WP", /* 10 */
"CSI2_CAMERA_GPIO1", "CSI2_CAMERA_GPIO2", /* 11-12 */
- "CC1352P7_BOOT", "CC1352P7_RSTN", "", "", "", /* 13-17 */
+ "CC1352P7_BOOT", "CC1352P7_RSTN", "GBE_RSTN", "", "", /* 13-17 */
"USR_BUTTON", "", "", "", "", "", "", "", "", /* 18-26 */
"", "", "", "", "", "", "", "", "", "HDMI_INT", /* 27-36 */
"", "VDD_WLAN_EN", "", "", "WL_IRQ", "GBE_INTN",/* 37-42 */
@@ -839,13 +840,13 @@
};
&sdhci2 {
- vmmc-supply = <&wlan_en>;
pinctrl-names = "default";
pinctrl-0 = <&wifi_pins_default>, <&wifi_32k_clk>;
non-removable;
ti,fails-without-test-cd;
cap-power-off-card;
keep-power-in-suspend;
+ mmc-pwrseq = <&sdio_pwrseq>;
assigned-clocks = <&k3_clks 157 158>;
assigned-clock-parents = <&k3_clks 157 160>;
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/ti/k3-am625-phyboard-lyra-rdk.dts b/arch/arm64/boot/dts/ti/k3-am625-phyboard-lyra-rdk.dts
index a83a90497857..50d2573c840e 100644
--- a/arch/arm64/boot/dts/ti/k3-am625-phyboard-lyra-rdk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am625-phyboard-lyra-rdk.dts
@@ -31,7 +31,7 @@
can_tc1: can-phy0 {
compatible = "ti,tcan1042";
#phy-cells = <0>;
- max-bitrate = <5000000>;
+ max-bitrate = <8000000>;
standby-gpios = <&gpio_exp 1 GPIO_ACTIVE_HIGH>;
};
@@ -66,6 +66,35 @@
};
};
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "phyBOARD-Lyra";
+ simple-audio-card,widgets =
+ "Microphone", "Mic Jack",
+ "Headphone", "Headphone Jack",
+ "Speaker", "External Speaker";
+ simple-audio-card,routing =
+ "MIC3R", "Mic Jack",
+ "Mic Jack", "Mic Bias",
+ "Headphone Jack", "HPLOUT",
+ "Headphone Jack", "HPROUT",
+ "External Speaker", "SPOP",
+ "External Speaker", "SPOM";
+ simple-audio-card,format = "dsp_b";
+ simple-audio-card,bitclock-master = <&sound_master>;
+ simple-audio-card,frame-master = <&sound_master>;
+ simple-audio-card,bitclock-inversion;
+
+ simple-audio-card,cpu {
+ sound-dai = <&mcasp2>;
+ };
+
+ sound_master: simple-audio-card,codec {
+ sound-dai = <&audio_codec>;
+ clocks = <&audio_refclk1>;
+ };
+ };
+
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
@@ -82,6 +111,15 @@
};
};
+ vcc_1v8: regulator-vcc-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
vcc_3v3_mmc: regulator-vcc-3v3-mmc {
compatible = "regulator-fixed";
regulator-name = "VCC_3V3_MMC";
@@ -90,9 +128,24 @@
regulator-always-on;
regulator-boot-on;
};
+
+ vcc_3v3_sw: regulator-vcc-3v3-sw {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_3V3_SW";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
};
&main_pmx0 {
+ audio_ext_refclk1_pins_default: audio-ext-refclk1-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x0a0, PIN_OUTPUT, 1) /* (K25) GPMC0_WPn.AUDIO_EXT_REFCLK1 */
+ >;
+ };
+
gpio_keys_pins_default: gpio-keys-default-pins {
pinctrl-single,pins = <
AM62X_IOPAD(0x1d4, PIN_INPUT, 7) /* (B15) UART0_RTSn.GPIO1_23 */
@@ -150,6 +203,15 @@
>;
};
+ main_mcasp2_pins_default: main-mcasp2-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x070, PIN_INPUT, 3) /* (T24) GPMC0_AD13.MCASP2_ACLKX */
+ AM62X_IOPAD(0x06c, PIN_INPUT, 3) /* (T22) GPMC0_AD12.MCASP2_AFSX */
+ AM62X_IOPAD(0x064, PIN_OUTPUT, 3) /* (T25) GPMC0_AD10.MCASP2_AXR2 */
+ AM62X_IOPAD(0x068, PIN_INPUT, 3) /* (R21) GPMC0_AD11.MCASP2_AXR3 */
+ >;
+ };
+
main_mmc1_pins_default: main-mmc1-default-pins {
pinctrl-single,pins = <
AM62X_IOPAD(0x23c, PIN_INPUT_PULLUP, 0) /* (A21) MMC1_CMD */
@@ -254,6 +316,21 @@
clock-frequency = <100000>;
status = "okay";
+ audio_codec: audio-codec@18 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&audio_ext_refclk1_pins_default>;
+
+ #sound-dai-cells = <0>;
+ compatible = "ti,tlv320aic3007";
+ reg = <0x18>;
+ ai3x-micbias-vg = <2>;
+
+ AVDD-supply = <&vcc_3v3_sw>;
+ IOVDD-supply = <&vcc_3v3_sw>;
+ DRVDD-supply = <&vcc_3v3_sw>;
+ DVDD-supply = <&vcc_1v8>;
+ };
+
gpio_exp: gpio-expander@21 {
pinctrl-names = "default";
pinctrl-0 = <&gpio_exp_int_pins_default>;
@@ -271,6 +348,24 @@
"GPIO6_ETH1_USER_RESET", "GPIO7_AUDIO_USER_RESET";
};
+ usb-pd@22 {
+ compatible = "ti,tps6598x";
+ reg = <0x22>;
+
+ connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ self-powered;
+ data-role = "dual";
+ power-role = "sink";
+ port {
+ usb_con_hs: endpoint {
+ remote-endpoint = <&typec_hs>;
+ };
+ };
+ };
+ };
+
sii9022: bridge-hdmi@39 {
compatible = "sil,sii9022";
reg = <0x39>;
@@ -329,6 +424,28 @@
status = "okay";
};
+&mcasp2 {
+ #sound-dai-cells = <0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mcasp2_pins_default>;
+
+ /* MCASP_IIS_MODE */
+ op-mode = <0>;
+ tdm-slots = <2>;
+
+ /* 0: INACTIVE, 1: TX, 2: RX */
+ serial-dir = <
+ 0 0 1 2
+ 0 0 0 0
+ 0 0 0 0
+ 0 0 0 0
+ >;
+ tx-num-evt = <32>;
+ rx-num-evt = <32>;
+ status = "okay";
+};
+
&sdhci1 {
vmmc-supply = <&vcc_3v3_mmc>;
vqmmc-supply = <&vddshv5_sdio>;
@@ -350,7 +467,13 @@
};
&usb0 {
- dr_mode = "peripheral";
+ usb-role-switch;
+
+ port {
+ typec_hs: endpoint {
+ remote-endpoint = <&usb_con_hs>;
+ };
+ };
};
&usb1 {
diff --git a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
index aa1e057082f0..bf9c2d9c6439 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
@@ -573,7 +573,6 @@
ti,itap-del-sel-sd-hs = <0x0>;
ti,itap-del-sel-sdr12 = <0x0>;
ti,itap-del-sel-sdr25 = <0x0>;
- no-1-8-v;
status = "disabled";
};
@@ -597,16 +596,16 @@
ti,itap-del-sel-sd-hs = <0x0>;
ti,itap-del-sel-sdr12 = <0x0>;
ti,itap-del-sel-sdr25 = <0x0>;
- no-1-8-v;
status = "disabled";
};
usbss0: dwc3-usb@f900000 {
compatible = "ti,am62-usb";
- reg = <0x00 0x0f900000 0x00 0x800>;
+ reg = <0x00 0x0f900000 0x00 0x800>,
+ <0x00 0x0f908000 0x00 0x400>;
clocks = <&k3_clks 161 3>;
clock-names = "ref";
- ti,syscon-phy-pll-refclk = <&wkup_conf 0x4008>;
+ ti,syscon-phy-pll-refclk = <&usb0_phy_ctrl 0x0>;
#address-cells = <2>;
#size-cells = <2>;
power-domains = <&k3_pds 178 TI_SCI_PD_EXCLUSIVE>;
@@ -621,15 +620,18 @@
interrupt-names = "host", "peripheral";
maximum-speed = "high-speed";
dr_mode = "otg";
+ snps,usb2-gadget-lpm-disable;
+ snps,usb2-lpm-disable;
};
};
usbss1: dwc3-usb@f910000 {
compatible = "ti,am62-usb";
- reg = <0x00 0x0f910000 0x00 0x800>;
+ reg = <0x00 0x0f910000 0x00 0x800>,
+ <0x00 0x0f918000 0x00 0x400>;
clocks = <&k3_clks 162 3>;
clock-names = "ref";
- ti,syscon-phy-pll-refclk = <&wkup_conf 0x4018>;
+ ti,syscon-phy-pll-refclk = <&usb1_phy_ctrl 0x0>;
#address-cells = <2>;
#size-cells = <2>;
power-domains = <&k3_pds 179 TI_SCI_PD_EXCLUSIVE>;
@@ -644,6 +646,8 @@
interrupt-names = "host", "peripheral";
maximum-speed = "high-speed";
dr_mode = "otg";
+ snps,usb2-gadget-lpm-disable;
+ snps,usb2-lpm-disable;
};
};
@@ -1051,4 +1055,11 @@
#size-cells = <0>;
};
};
+
+ vpu: video-codec@30210000 {
+ compatible = "ti,j721s2-wave521c", "cnm,wave521c";
+ reg = <0x00 0x30210000 0x00 0x10000>;
+ clocks = <&k3_clks 204 2>;
+ power-domains = <&k3_pds 204 TI_SCI_PD_EXCLUSIVE>;
+ };
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62a-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-wakeup.dtsi
index f7bec484705a..98043e9aa316 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62a-wakeup.dtsi
@@ -17,6 +17,16 @@
compatible = "ti,am654-chipid";
reg = <0x14 0x4>;
};
+
+ usb0_phy_ctrl: syscon@4008 {
+ compatible = "ti,am62-usb-phy-ctrl", "syscon";
+ reg = <0x4008 0x4>;
+ };
+
+ usb1_phy_ctrl: syscon@4018 {
+ compatible = "ti,am62-usb-phy-ctrl", "syscon";
+ reg = <0x4018 0x4>;
+ };
};
wkup_uart0: serial@2b300000 {
diff --git a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
index f241637a5642..fa43cd0b631e 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
@@ -113,6 +113,20 @@
regulator-boot-on;
};
+ vddshv_sdio: regulator-5 {
+ compatible = "regulator-gpio";
+ regulator-name = "vddshv_sdio";
+ pinctrl-names = "default";
+ pinctrl-0 = <&vddshv_sdio_pins_default>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ vin-supply = <&ldo1>;
+ gpios = <&main_gpio0 31 GPIO_ACTIVE_HIGH>;
+ states = <1800000 0x0>,
+ <3300000 0x1>;
+ };
+
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
@@ -342,6 +356,12 @@
AM62AX_IOPAD(0x01d4, PIN_INPUT, 7) /* (C15) UART0_RTSn.GPIO1_23 */
>;
};
+
+ vddshv_sdio_pins_default: vddshv-sdio-default-pins {
+ pinctrl-single,pins = <
+ AM62AX_IOPAD(0x07c, PIN_OUTPUT, 7) /* (M19) GPMC0_CLK.GPIO0_31 */
+ >;
+ };
};
&mcu_pmx0 {
@@ -580,6 +600,7 @@
/* SD/MMC */
status = "okay";
vmmc-supply = <&vdd_mmc1>;
+ vqmmc-supply = <&vddshv_sdio>;
pinctrl-names = "default";
pinctrl-0 = <&main_mmc1_pins_default>;
disable-wp;
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi
index 7337a9e13535..900d1f9530a2 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi
@@ -635,6 +635,58 @@
status = "disabled";
};
+ usbss0: usb@f900000 {
+ compatible = "ti,am62-usb";
+ reg = <0x00 0x0f900000 0x00 0x800>,
+ <0x00 0x0f908000 0x00 0x400>;
+ clocks = <&k3_clks 161 3>;
+ clock-names = "ref";
+ ti,syscon-phy-pll-refclk = <&usb0_phy_ctrl 0x0>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ power-domains = <&k3_pds 178 TI_SCI_PD_EXCLUSIVE>;
+ ranges;
+ status = "disabled";
+
+ usb0: usb@31000000 {
+ compatible = "snps,dwc3";
+ reg = <0x00 0x31000000 0x00 0x50000>;
+ interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>, /* irq.0 */
+ <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>; /* irq.0 */
+ interrupt-names = "host", "peripheral";
+ maximum-speed = "high-speed";
+ dr_mode = "otg";
+ snps,usb2-gadget-lpm-disable;
+ snps,usb2-lpm-disable;
+ };
+ };
+
+ usbss1: usb@f910000 {
+ compatible = "ti,am62-usb";
+ reg = <0x00 0x0f910000 0x00 0x800>,
+ <0x00 0x0f918000 0x00 0x400>;
+ clocks = <&k3_clks 162 3>;
+ clock-names = "ref";
+ ti,syscon-phy-pll-refclk = <&usb1_phy_ctrl 0x0>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ power-domains = <&k3_pds 179 TI_SCI_PD_EXCLUSIVE>;
+ ranges;
+ status = "disabled";
+
+ usb1: usb@31100000 {
+ compatible = "snps,dwc3";
+ reg = <0x00 0x31100000 0x00 0x50000>;
+ interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>, /* irq.0 */
+ <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>; /* irq.0 */
+ interrupt-names = "host", "peripheral";
+ maximum-speed = "high-speed";
+ dr_mode = "otg";
+ snps,usb2-gadget-lpm-disable;
+ snps,usb2-lpm-disable;
+ };
+ };
+
fss: bus@fc00000 {
compatible = "simple-bus";
reg = <0x00 0x0fc00000 0x00 0x70000>;
@@ -673,6 +725,7 @@
assigned-clock-parents = <&k3_clks 13 11>;
clock-names = "fck";
power-domains = <&k3_pds 13 TI_SCI_PD_EXCLUSIVE>;
+ status = "disabled";
dmas = <&main_pktdma 0xc600 15>,
<&main_pktdma 0xc601 15>,
@@ -696,6 +749,7 @@
label = "port1";
phys = <&phy_gmii_sel 1>;
mac-address = [00 00 00 00 00 00];
+ status = "disabled";
};
cpsw_port2: port@2 {
@@ -704,6 +758,7 @@
label = "port2";
phys = <&phy_gmii_sel 2>;
mac-address = [00 00 00 00 00 00];
+ status = "disabled";
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-wakeup.dtsi
index a84756c336d0..c71d9624ea27 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62p-wakeup.dtsi
@@ -18,6 +18,16 @@
reg = <0x14 0x4>;
bootph-all;
};
+
+ usb0_phy_ctrl: syscon@4008 {
+ compatible = "ti,am62-usb-phy-ctrl", "syscon";
+ reg = <0x4008 0x4>;
+ };
+
+ usb1_phy_ctrl: syscon@4018 {
+ compatible = "ti,am62-usb-phy-ctrl", "syscon";
+ reg = <0x4018 0x4>;
+ };
};
wkup_uart0: serial@2b300000 {
diff --git a/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts b/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
index e86f34e835c1..6e7234659111 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
@@ -27,6 +27,8 @@
spi0 = &ospi0;
ethernet0 = &cpsw_port1;
ethernet1 = &cpsw_port2;
+ usb0 = &usb0;
+ usb1 = &usb1;
};
chosen {
@@ -297,6 +299,12 @@
bootph-all;
};
+ main_usb1_pins_default: main-usb1-default-pins {
+ pinctrl-single,pins = <
+ AM62PX_IOPAD(0x0258, PIN_INPUT, 0) /* (G21) USB1_DRVVBUS */
+ >;
+ };
+
main_wlirq_pins_default: main-wlirq-default-pins {
pinctrl-single,pins = <
AM62PX_IOPAD(0x0128, PIN_INPUT, 7) /* (K25) MMC2_SDWP.GPIO0_72 */
@@ -340,6 +348,36 @@
};
};
+&main_i2c0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c0_pins_default>;
+ clock-frequency = <400000>;
+
+ typec_pd0: usb-power-controller@3f {
+ compatible = "ti,tps6598x";
+ reg = <0x3f>;
+
+ connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ self-powered;
+ data-role = "dual";
+ power-role = "sink";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ usb_con_hs: endpoint {
+ remote-endpoint = <&usb0_hs_ep>;
+ };
+ };
+ };
+ };
+ };
+};
+
&main_i2c1 {
status = "okay";
pinctrl-names = "default";
@@ -431,16 +469,19 @@
pinctrl-names = "default";
pinctrl-0 = <&main_rgmii1_pins_default>,
<&main_rgmii2_pins_default>;
+ status = "okay";
};
&cpsw_port1 {
phy-mode = "rgmii-rxid";
phy-handle = <&cpsw3g_phy0>;
+ status = "okay";
};
&cpsw_port2 {
phy-mode = "rgmii-rxid";
phy-handle = <&cpsw3g_phy1>;
+ status = "okay";
};
&cpsw3g_mdio {
@@ -463,6 +504,35 @@
};
};
+&usbss0 {
+ status = "okay";
+ ti,vbus-divider;
+};
+
+&usbss1 {
+ status = "okay";
+ ti,vbus-divider;
+};
+
+&usb0 {
+ usb-role-switch;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ usb0_hs_ep: endpoint {
+ remote-endpoint = <&usb_con_hs>;
+ };
+ };
+};
+
+&usb1 {
+ dr_mode = "host";
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_usb1_pins_default>;
+};
+
&mcasp1 {
status = "okay";
#sound-dai-cells = <0>;
@@ -493,7 +563,7 @@
pinctrl-0 = <&ospi0_pins_default>;
bootph-all;
- flash@0{
+ flash@0 {
compatible = "jedec,spi-nor";
reg = <0x0>;
spi-tx-bus-width = <8>;
diff --git a/arch/arm64/boot/dts/ti/k3-am642-evm.dts b/arch/arm64/boot/dts/ti/k3-am642-evm.dts
index 53fe1d065ddb..e20e4ffd0f1f 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-evm.dts
+++ b/arch/arm64/boot/dts/ti/k3-am642-evm.dts
@@ -473,7 +473,6 @@
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&main_uart0_pins_default>;
- current-speed = <115200>;
};
/* main_uart1 is reserved for firmware usage */
diff --git a/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-gpio-fan.dtso b/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-gpio-fan.dtso
new file mode 100644
index 000000000000..5057658061b4
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-gpio-fan.dtso
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Copyright (C) 2024 PHYTEC America LLC
+ * Author: Nathan Morrisson <nmorrisson@phytec.com>
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/thermal/thermal.h>
+#include "k3-pinctrl.h"
+
+&{/} {
+ fan: gpio-fan {
+ compatible = "gpio-fan";
+ gpio-fan,speed-map = <0 0 8600 1>;
+ gpios = <&main_gpio0 28 GPIO_ACTIVE_LOW>;
+ #cooling-cells = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_fan_pins_default>;
+ };
+};
+
+&main_pmx0 {
+ gpio_fan_pins_default: gpio-fan-default-pins {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x070, PIN_OUTPUT, 7) /* (V18) GPMC0_AD13.GPIO0_28 */
+ >;
+ };
+};
+
+&thermal_zones {
+ main0_thermal: main0-thermal {
+ trips {
+ main0_thermal_trip0: main0-thermal-trip {
+ temperature = <65000>; /* millicelsius */
+ hysteresis = <2000>; /* millicelsius */
+ type = "active";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&main0_thermal_trip0>;
+ cooling-device = <&fan THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts b/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts
index 8237b8c815b8..6df331ccb970 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts
@@ -42,7 +42,7 @@
pinctrl-names = "default";
pinctrl-0 = <&can_tc1_pins_default>;
#phy-cells = <0>;
- max-bitrate = <5000000>;
+ max-bitrate = <8000000>;
standby-gpios = <&main_gpio0 32 GPIO_ACTIVE_HIGH>;
};
@@ -51,7 +51,7 @@
pinctrl-names = "default";
pinctrl-0 = <&can_tc2_pins_default>;
#phy-cells = <0>;
- max-bitrate = <5000000>;
+ max-bitrate = <8000000>;
standby-gpios = <&main_gpio0 35 GPIO_ACTIVE_HIGH>;
};
@@ -275,7 +275,6 @@
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&main_uart0_pins_default>;
- current-speed = <115200>;
};
&main_uart1 {
@@ -283,7 +282,6 @@
pinctrl-names = "default";
pinctrl-0 = <&main_uart1_pins_default>;
uart-has-rtscts;
- current-speed = <115200>;
};
&sdhci1 {
diff --git a/arch/arm64/boot/dts/ti/k3-am642-sk.dts b/arch/arm64/boot/dts/ti/k3-am642-sk.dts
index 67cd41bf806e..5b028b3a3192 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am642-sk.dts
@@ -381,7 +381,6 @@
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&main_uart0_pins_default>;
- current-speed = <115200>;
};
&main_uart1 {
diff --git a/arch/arm64/boot/dts/ti/k3-am65-iot2050-common-pg1.dtsi b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common-pg1.dtsi
index c50a585dd638..ef7897763ef8 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-iot2050-common-pg1.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common-pg1.dtsi
@@ -43,9 +43,33 @@
};
&icssg0_eth {
- status = "disabled";
-};
+ compatible = "ti,am654-sr1-icssg-prueth";
-&icssg0_mdio {
- status = "disabled";
+ ti,prus = <&pru0_0>, <&rtu0_0>, <&pru0_1>, <&rtu0_1>;
+ firmware-name = "ti-pruss/am65x-pru0-prueth-fw.elf",
+ "ti-pruss/am65x-rtu0-prueth-fw.elf",
+ "ti-pruss/am65x-pru1-prueth-fw.elf",
+ "ti-pruss/am65x-rtu1-prueth-fw.elf";
+
+ ti,pruss-gp-mux-sel = <2>, /* MII mode */
+ <2>,
+ <2>, /* MII mode */
+ <2>;
+
+ dmas = <&main_udmap 0xc100>, /* egress slice 0 */
+ <&main_udmap 0xc101>, /* egress slice 0 */
+ <&main_udmap 0xc102>, /* egress slice 0 */
+ <&main_udmap 0xc103>, /* egress slice 0 */
+ <&main_udmap 0xc104>, /* egress slice 1 */
+ <&main_udmap 0xc105>, /* egress slice 1 */
+ <&main_udmap 0xc106>, /* egress slice 1 */
+ <&main_udmap 0xc107>, /* egress slice 1 */
+ <&main_udmap 0x4100>, /* ingress slice 0 */
+ <&main_udmap 0x4101>, /* ingress slice 1 */
+ <&main_udmap 0x4102>, /* mgmnt rsp slice 0 */
+ <&main_udmap 0x4103>; /* mgmnt rsp slice 1 */
+ dma-names = "tx0-0", "tx0-1", "tx0-2", "tx0-3",
+ "tx1-0", "tx1-1", "tx1-2", "tx1-3",
+ "rx0", "rx1",
+ "rxmgm0", "rxmgm1";
};
diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
index ff857117d719..ed71561c5bd9 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
@@ -66,7 +66,7 @@
assigned-clock-parents = <&k3_clks 153 8>, <&k3_clks 153 4>;
ti,serdes-clk = <&serdes0_clk>;
#clock-cells = <1>;
- mux-controls = <&serdes_mux 0>;
+ mux-controls = <&serdes0_mux 0>;
};
serdes1: serdes@910000 {
@@ -81,7 +81,7 @@
assigned-clock-parents = <&k3_clks 154 9>, <&k3_clks 154 5>;
ti,serdes-clk = <&serdes1_clk>;
#clock-cells = <1>;
- mux-controls = <&serdes_mux 1>;
+ mux-controls = <&serdes1_mux 0>;
};
main_uart0: serial@2800000 {
@@ -89,7 +89,6 @@
reg = <0x00 0x02800000 0x00 0x100>;
interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 146 TI_SCI_PD_EXCLUSIVE>;
status = "disabled";
};
@@ -436,18 +435,13 @@
interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
mmc-ddr-1_8v;
mmc-hs200-1_8v;
+ ti,clkbuf-sel = <0x7>;
+ ti,trm-icp = <0x8>;
ti,otap-del-sel-legacy = <0x0>;
ti,otap-del-sel-mmc-hs = <0x0>;
- ti,otap-del-sel-sd-hs = <0x0>;
- ti,otap-del-sel-sdr12 = <0x0>;
- ti,otap-del-sel-sdr25 = <0x0>;
- ti,otap-del-sel-sdr50 = <0x8>;
- ti,otap-del-sel-sdr104 = <0x7>;
- ti,otap-del-sel-ddr50 = <0x5>;
ti,otap-del-sel-ddr52 = <0x5>;
ti,otap-del-sel-hs200 = <0x5>;
- ti,otap-del-sel-hs400 = <0x0>;
- ti,trm-icp = <0x8>;
+ ti,itap-del-sel-ddr52 = <0x0>;
dma-coherent;
status = "disabled";
};
@@ -459,18 +453,19 @@
clocks = <&k3_clks 48 0>, <&k3_clks 48 1>;
clock-names = "clk_ahb", "clk_xin";
interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
+ ti,clkbuf-sel = <0x7>;
+ ti,trm-icp = <0x8>;
ti,otap-del-sel-legacy = <0x0>;
- ti,otap-del-sel-mmc-hs = <0x0>;
ti,otap-del-sel-sd-hs = <0x0>;
- ti,otap-del-sel-sdr12 = <0x0>;
- ti,otap-del-sel-sdr25 = <0x0>;
+ ti,otap-del-sel-sdr12 = <0xf>;
+ ti,otap-del-sel-sdr25 = <0xf>;
ti,otap-del-sel-sdr50 = <0x8>;
ti,otap-del-sel-sdr104 = <0x7>;
ti,otap-del-sel-ddr50 = <0x4>;
- ti,otap-del-sel-ddr52 = <0x4>;
- ti,otap-del-sel-hs200 = <0x7>;
- ti,clkbuf-sel = <0x7>;
- ti,trm-icp = <0x8>;
+ ti,itap-del-sel-legacy = <0xa>;
+ ti,itap-del-sel-sd-hs = <0x1>;
+ ti,itap-del-sel-sdr12 = <0xa>;
+ ti,itap-del-sel-sdr25 = <0x1>;
dma-coherent;
status = "disabled";
};
@@ -483,20 +478,25 @@
ranges = <0x0 0x0 0x00100000 0x1c000>;
serdes0_clk: clock@4080 {
- compatible = "syscon";
- reg = <0x00004080 0x4>;
+ compatible = "ti,am654-serdes-ctrl", "syscon";
+ reg = <0x4080 0x4>;
+
+ serdes0_mux: mux-controller {
+ compatible = "mmio-mux";
+ #mux-control-cells = <1>;
+ mux-reg-masks = <0x0 0x3>; /* lane select */
+ };
};
serdes1_clk: clock@4090 {
- compatible = "syscon";
- reg = <0x00004090 0x4>;
- };
+ compatible = "ti,am654-serdes-ctrl", "syscon";
+ reg = <0x4090 0x4>;
- serdes_mux: mux-controller {
- compatible = "mmio-mux";
- #mux-control-cells = <1>;
- mux-reg-masks = <0x4080 0x3>, /* SERDES0 lane select */
- <0x4090 0x3>; /* SERDES1 lane select */
+ serdes1_mux: mux-controller {
+ compatible = "mmio-mux";
+ #mux-control-cells = <1>;
+ mux-reg-masks = <0x0 0x3>; /* lane select */
+ };
};
dss_oldi_io_ctrl: dss-oldi-io-ctrl@41e0 {
diff --git a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
index 6ff3ccc39fb4..8feab9317644 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
@@ -43,7 +43,6 @@
reg = <0x00 0x40a00000 0x00 0x100>;
interrupts = <GIC_SPI 565 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <96000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 149 TI_SCI_PD_EXCLUSIVE>;
status = "disabled";
};
@@ -286,7 +285,11 @@
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <2>;
- ranges;
+ ranges = <0x0 0x47000000 0x0 0x47000000 0x0 0x100>, /* FSS Control */
+ <0x0 0x47040000 0x0 0x47040000 0x0 0x100>, /* OSPI0 Control */
+ <0x0 0x47050000 0x0 0x47050000 0x0 0x100>, /* OSPI1 Control */
+ <0x5 0x00000000 0x5 0x00000000 0x1 0x0000000>, /* OSPI0 Memory */
+ <0x7 0x00000000 0x7 0x00000000 0x1 0x0000000>; /* OSPI1 Memory */
ospi0: spi@47040000 {
compatible = "ti,am654-ospi", "cdns,qspi-nor";
diff --git a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
index 37527890ddea..eee072e44a42 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
@@ -59,7 +59,6 @@
reg = <0x42300000 0x100>;
interrupts = <GIC_SPI 697 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 150 TI_SCI_PD_EXCLUSIVE>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/ti/k3-am69-sk.dts b/arch/arm64/boot/dts/ti/k3-am69-sk.dts
index 50de2a448a3a..d88651c297a2 100644
--- a/arch/arm64/boot/dts/ti/k3-am69-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am69-sk.dts
@@ -517,18 +517,18 @@
wkup_uart0_pins_default: wkup-uart0-default-pins {
bootph-all;
pinctrl-single,pins = <
- J721S2_WKUP_IOPAD(0x070, PIN_INPUT, 0) /* (L37) WKUP_GPIO0_6.WKUP_UART0_CTSn */
- J721S2_WKUP_IOPAD(0x074, PIN_INPUT, 0) /* (L36) WKUP_GPIO0_7.WKUP_UART0_RTSn */
- J721S2_WKUP_IOPAD(0x048, PIN_INPUT, 0) /* (K35) WKUP_UART0_RXD */
- J721S2_WKUP_IOPAD(0x04c, PIN_INPUT, 0) /* (K34) WKUP_UART0_TXD */
+ J784S4_WKUP_IOPAD(0x070, PIN_INPUT, 0) /* (L37) WKUP_UART0_CTSn */
+ J784S4_WKUP_IOPAD(0x074, PIN_OUTPUT, 0) /* (L36) WKUP_UART0_RTSn */
+ J784S4_WKUP_IOPAD(0x048, PIN_INPUT, 0) /* (K35) WKUP_UART0_RXD */
+ J784S4_WKUP_IOPAD(0x04c, PIN_OUTPUT, 0) /* (K34) WKUP_UART0_TXD */
>;
};
wkup_i2c0_pins_default: wkup-i2c0-default-pins {
bootph-all;
pinctrl-single,pins = <
- J721S2_WKUP_IOPAD(0x98, PIN_INPUT, 0) /* (N33) WKUP_I2C0_SCL */
- J721S2_WKUP_IOPAD(0x9c, PIN_INPUT, 0) /* (N35) WKUP_I2C0_SDA */
+ J784S4_WKUP_IOPAD(0x98, PIN_INPUT, 0) /* (N33) WKUP_I2C0_SCL */
+ J784S4_WKUP_IOPAD(0x9c, PIN_INPUT, 0) /* (N35) WKUP_I2C0_SDA */
>;
};
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
index 657f9cc9f4ea..9386bf3ef9f6 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
@@ -440,7 +440,6 @@
reg = <0x00 0x02800000 0x00 0x100>;
interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 146 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 146 2>;
clock-names = "fclk";
@@ -452,7 +451,6 @@
reg = <0x00 0x02810000 0x00 0x100>;
interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 278 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 278 2>;
clock-names = "fclk";
@@ -464,7 +462,6 @@
reg = <0x00 0x02820000 0x00 0x100>;
interrupts = <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 279 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 279 2>;
clock-names = "fclk";
@@ -476,7 +473,6 @@
reg = <0x00 0x02830000 0x00 0x100>;
interrupts = <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 280 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 280 2>;
clock-names = "fclk";
@@ -488,7 +484,6 @@
reg = <0x00 0x02840000 0x00 0x100>;
interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 281 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 281 2>;
clock-names = "fclk";
@@ -500,7 +495,6 @@
reg = <0x00 0x02850000 0x00 0x100>;
interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 282 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 282 2>;
clock-names = "fclk";
@@ -512,7 +506,6 @@
reg = <0x00 0x02860000 0x00 0x100>;
interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 283 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 283 2>;
clock-names = "fclk";
@@ -524,7 +517,6 @@
reg = <0x00 0x02870000 0x00 0x100>;
interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 284 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 284 2>;
clock-names = "fclk";
@@ -536,7 +528,6 @@
reg = <0x00 0x02880000 0x00 0x100>;
interrupts = <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 285 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 285 2>;
clock-names = "fclk";
@@ -548,7 +539,6 @@
reg = <0x00 0x02890000 0x00 0x100>;
interrupts = <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 286 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 286 2>;
clock-names = "fclk";
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
index 7cf21c99956e..fccaabfb1348 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
@@ -259,7 +259,6 @@
reg = <0x00 0x42300000 0x00 0x100>;
interrupts = <GIC_SPI 897 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 287 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 287 2>;
clock-names = "fclk";
@@ -271,7 +270,6 @@
reg = <0x00 0x40a00000 0x00 0x100>;
interrupts = <GIC_SPI 846 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <96000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 149 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 149 2>;
clock-names = "fclk";
@@ -520,10 +518,12 @@
fss: bus@47000000 {
compatible = "simple-bus";
- reg = <0x00 0x47000000 0x00 0x100>;
#address-cells = <2>;
#size-cells = <2>;
- ranges;
+ ranges = <0x0 0x47000000 0x0 0x47000000 0x0 0x100>, /* FSS Control */
+ <0x0 0x47034000 0x0 0x47040000 0x0 0x100>, /* HBMC Control */
+ <0x0 0x47040000 0x0 0x47040000 0x0 0x100>, /* OSPI0 Control */
+ <0x5 0x00000000 0x5 0x00000000 0x1 0x0000000>; /* HBMC/OSPI0 Memory */
hbmc_mux: mux-controller@47000004 {
compatible = "reg-mux";
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
index c7eafbc862f9..0da785be80ff 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
@@ -1337,7 +1337,6 @@
reg = <0x00 0x02800000 0x00 0x100>;
interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 146 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 146 0>;
clock-names = "fclk";
@@ -1349,7 +1348,6 @@
reg = <0x00 0x02810000 0x00 0x100>;
interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 278 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 278 0>;
clock-names = "fclk";
@@ -1361,7 +1359,6 @@
reg = <0x00 0x02820000 0x00 0x100>;
interrupts = <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 279 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 279 0>;
clock-names = "fclk";
@@ -1373,7 +1370,6 @@
reg = <0x00 0x02830000 0x00 0x100>;
interrupts = <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 280 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 280 0>;
clock-names = "fclk";
@@ -1385,7 +1381,6 @@
reg = <0x00 0x02840000 0x00 0x100>;
interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 281 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 281 0>;
clock-names = "fclk";
@@ -1397,7 +1392,6 @@
reg = <0x00 0x02850000 0x00 0x100>;
interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 282 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 282 0>;
clock-names = "fclk";
@@ -1409,7 +1403,6 @@
reg = <0x00 0x02860000 0x00 0x100>;
interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 283 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 283 0>;
clock-names = "fclk";
@@ -1421,7 +1414,6 @@
reg = <0x00 0x02870000 0x00 0x100>;
interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 284 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 284 0>;
clock-names = "fclk";
@@ -1433,7 +1425,6 @@
reg = <0x00 0x02880000 0x00 0x100>;
interrupts = <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 285 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 285 0>;
clock-names = "fclk";
@@ -1445,7 +1436,6 @@
reg = <0x00 0x02890000 0x00 0x100>;
interrupts = <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 286 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 286 0>;
clock-names = "fclk";
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
index 4618b697fbc4..9349ae07c046 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
@@ -243,7 +243,6 @@
reg = <0x00 0x42300000 0x00 0x100>;
interrupts = <GIC_SPI 897 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 287 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 287 0>;
clock-names = "fclk";
@@ -255,7 +254,6 @@
reg = <0x00 0x40a00000 0x00 0x100>;
interrupts = <GIC_SPI 846 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <96000000>;
- current-speed = <115200>;
power-domains = <&k3_pds 149 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 149 0>;
clock-names = "fclk";
@@ -346,10 +344,14 @@
fss: bus@47000000 {
compatible = "simple-bus";
- reg = <0x0 0x47000000 0x0 0x100>;
#address-cells = <2>;
#size-cells = <2>;
- ranges;
+ ranges = <0x0 0x47000000 0x0 0x47000000 0x0 0x100>, /* FSS Control */
+ <0x0 0x47034000 0x0 0x47034000 0x0 0x100>, /* HBMC Control */
+ <0x0 0x47040000 0x0 0x47040000 0x0 0x100>, /* OSPI0 Control */
+ <0x0 0x47050000 0x0 0x47050000 0x0 0x100>, /* OSPI1 Control */
+ <0x5 0x00000000 0x5 0x00000000 0x1 0x0000000>, /* HBMC/OSPI0 Memory */
+ <0x7 0x00000000 0x7 0x00000000 0x1 0x0000000>; /* OSPI1 Memory */
hbmc_mux: mux-controller@47000004 {
compatible = "reg-mux";
diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi
index b70c8615e3c1..9ed6949b40e9 100644
--- a/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi
@@ -459,7 +459,6 @@
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02800000 0x00 0x200>;
interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
- current-speed = <115200>;
clocks = <&k3_clks 146 3>;
clock-names = "fclk";
power-domains = <&k3_pds 146 TI_SCI_PD_EXCLUSIVE>;
@@ -470,7 +469,6 @@
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02810000 0x00 0x200>;
interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
- current-speed = <115200>;
clocks = <&k3_clks 350 3>;
clock-names = "fclk";
power-domains = <&k3_pds 350 TI_SCI_PD_EXCLUSIVE>;
@@ -481,7 +479,6 @@
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02820000 0x00 0x200>;
interrupts = <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>;
- current-speed = <115200>;
clocks = <&k3_clks 351 3>;
clock-names = "fclk";
power-domains = <&k3_pds 351 TI_SCI_PD_EXCLUSIVE>;
@@ -492,7 +489,6 @@
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02830000 0x00 0x200>;
interrupts = <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>;
- current-speed = <115200>;
clocks = <&k3_clks 352 3>;
clock-names = "fclk";
power-domains = <&k3_pds 352 TI_SCI_PD_EXCLUSIVE>;
@@ -503,7 +499,6 @@
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02840000 0x00 0x200>;
interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>;
- current-speed = <115200>;
clocks = <&k3_clks 353 3>;
clock-names = "fclk";
power-domains = <&k3_pds 353 TI_SCI_PD_EXCLUSIVE>;
@@ -514,7 +509,6 @@
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02850000 0x00 0x200>;
interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>;
- current-speed = <115200>;
clocks = <&k3_clks 354 3>;
clock-names = "fclk";
power-domains = <&k3_pds 354 TI_SCI_PD_EXCLUSIVE>;
@@ -525,7 +519,6 @@
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02860000 0x00 0x200>;
interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>;
- current-speed = <115200>;
clocks = <&k3_clks 355 3>;
clock-names = "fclk";
power-domains = <&k3_pds 355 TI_SCI_PD_EXCLUSIVE>;
@@ -536,7 +529,6 @@
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02870000 0x00 0x200>;
interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
- current-speed = <115200>;
clocks = <&k3_clks 356 3>;
clock-names = "fclk";
power-domains = <&k3_pds 356 TI_SCI_PD_EXCLUSIVE>;
@@ -547,7 +539,6 @@
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02880000 0x00 0x200>;
interrupts = <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>;
- current-speed = <115200>;
clocks = <&k3_clks 357 3>;
clock-names = "fclk";
power-domains = <&k3_pds 357 TI_SCI_PD_EXCLUSIVE>;
@@ -558,7 +549,6 @@
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02890000 0x00 0x200>;
interrupts = <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>;
- current-speed = <115200>;
clocks = <&k3_clks 358 3>;
clock-names = "fclk";
power-domains = <&k3_pds 358 TI_SCI_PD_EXCLUSIVE>;
@@ -778,8 +768,6 @@
ti,clkbuf-sel = <0x7>;
ti,trm-icp = <0x8>;
dma-coherent;
- /* Masking support for SDR104 capability */
- sdhci-caps-mask = <0x00000003 0x00000000>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
index eaf7f709440e..5ccb04c7c462 100644
--- a/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
@@ -298,7 +298,6 @@
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x42300000 0x00 0x200>;
interrupts = <GIC_SPI 897 IRQ_TYPE_LEVEL_HIGH>;
- current-speed = <115200>;
clocks = <&k3_clks 359 3>;
clock-names = "fclk";
power-domains = <&k3_pds 359 TI_SCI_PD_EXCLUSIVE>;
@@ -309,7 +308,6 @@
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x40a00000 0x00 0x200>;
interrupts = <GIC_SPI 846 IRQ_TYPE_LEVEL_HIGH>;
- current-speed = <115200>;
clocks = <&k3_clks 149 3>;
clock-names = "fclk";
power-domains = <&k3_pds 149 TI_SCI_PD_EXCLUSIVE>;
diff --git a/arch/arm64/boot/dts/ti/k3-j721s2.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2.dtsi
index be4502fe1c9d..568e6a04619d 100644
--- a/arch/arm64/boot/dts/ti/k3-j721s2.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721s2.dtsi
@@ -117,6 +117,7 @@
#size-cells = <2>;
ranges = <0x00 0x00100000 0x00 0x00100000 0x00 0x00020000>, /* ctrl mmr */
<0x00 0x00600000 0x00 0x00600000 0x00 0x00031100>, /* GPIO */
+ <0x00 0x00700000 0x00 0x00700000 0x00 0x00001000>, /* ESM */
<0x00 0x01000000 0x00 0x01000000 0x00 0x0d000000>, /* Most peripherals */
<0x00 0x0d800000 0x00 0x0d800000 0x00 0x00800000>, /* PCIe Core*/
<0x00 0x18000000 0x00 0x18000000 0x00 0x08000000>, /* PCIe1 DAT0 */
diff --git a/arch/arm64/boot/dts/ti/k3-j722s-evm.dts b/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
index cee3a8661d5e..bf3c246d13d1 100644
--- a/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
+++ b/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
@@ -226,10 +226,7 @@
&cpsw_port1 {
phy-mode = "rgmii-rxid";
phy-handle = <&cpsw3g_phy0>;
-};
-
-&cpsw_port2 {
- status = "disabled";
+ status = "okay";
};
&main_gpio1 {
@@ -369,6 +366,13 @@
};
+&sdhci0 {
+ disable-wp;
+ bootph-all;
+ ti,driver-strength-ohm = <50>;
+ status = "okay";
+};
+
&sdhci1 {
/* SD/MMC */
vmmc-supply = <&vdd_mmc1>;
@@ -377,7 +381,6 @@
pinctrl-0 = <&main_mmc1_pins_default>;
ti,driver-strength-ohm = <50>;
disable-wp;
- no-1-8-v;
status = "okay";
bootph-all;
};
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts b/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
index 81fd7afac8c5..d511b25d62e3 100644
--- a/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
+++ b/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
@@ -343,16 +343,16 @@
wkup_uart0_pins_default: wkup-uart0-default-pins {
bootph-all;
pinctrl-single,pins = <
- J721S2_WKUP_IOPAD(0x048, PIN_INPUT, 0) /* (K35) WKUP_UART0_RXD */
- J721S2_WKUP_IOPAD(0x04c, PIN_INPUT, 0) /* (K34) WKUP_UART0_TXD */
+ J784S4_WKUP_IOPAD(0x048, PIN_INPUT, 0) /* (K35) WKUP_UART0_RXD */
+ J784S4_WKUP_IOPAD(0x04c, PIN_OUTPUT, 0) /* (K34) WKUP_UART0_TXD */
>;
};
wkup_i2c0_pins_default: wkup-i2c0-default-pins {
bootph-all;
pinctrl-single,pins = <
- J721S2_WKUP_IOPAD(0x98, PIN_INPUT, 0) /* (N33) WKUP_I2C0_SCL */
- J721S2_WKUP_IOPAD(0x9c, PIN_INPUT, 0) /* (N35) WKUP_I2C0_SDA */
+ J784S4_WKUP_IOPAD(0x98, PIN_INPUT, 0) /* (N33) WKUP_I2C0_SCL */
+ J784S4_WKUP_IOPAD(0x9c, PIN_INPUT, 0) /* (N35) WKUP_I2C0_SDA */
>;
};
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi
index b67c37460a73..6a4554c6c9c1 100644
--- a/arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi
@@ -404,7 +404,6 @@
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02800000 0x00 0x200>;
interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
- current-speed = <115200>;
clocks = <&k3_clks 146 0>;
clock-names = "fclk";
power-domains = <&k3_pds 146 TI_SCI_PD_EXCLUSIVE>;
@@ -415,7 +414,6 @@
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02810000 0x00 0x200>;
interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
- current-speed = <115200>;
clocks = <&k3_clks 388 0>;
clock-names = "fclk";
power-domains = <&k3_pds 388 TI_SCI_PD_EXCLUSIVE>;
@@ -426,7 +424,6 @@
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02820000 0x00 0x200>;
interrupts = <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>;
- current-speed = <115200>;
clocks = <&k3_clks 389 0>;
clock-names = "fclk";
power-domains = <&k3_pds 389 TI_SCI_PD_EXCLUSIVE>;
@@ -437,7 +434,6 @@
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02830000 0x00 0x200>;
interrupts = <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>;
- current-speed = <115200>;
clocks = <&k3_clks 390 0>;
clock-names = "fclk";
power-domains = <&k3_pds 390 TI_SCI_PD_EXCLUSIVE>;
@@ -448,7 +444,6 @@
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02840000 0x00 0x200>;
interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>;
- current-speed = <115200>;
clocks = <&k3_clks 391 0>;
clock-names = "fclk";
power-domains = <&k3_pds 391 TI_SCI_PD_EXCLUSIVE>;
@@ -459,7 +454,6 @@
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02850000 0x00 0x200>;
interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>;
- current-speed = <115200>;
clocks = <&k3_clks 392 0>;
clock-names = "fclk";
power-domains = <&k3_pds 392 TI_SCI_PD_EXCLUSIVE>;
@@ -470,7 +464,6 @@
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02860000 0x00 0x200>;
interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>;
- current-speed = <115200>;
clocks = <&k3_clks 393 0>;
clock-names = "fclk";
power-domains = <&k3_pds 393 TI_SCI_PD_EXCLUSIVE>;
@@ -481,7 +474,6 @@
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02870000 0x00 0x200>;
interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
- current-speed = <115200>;
clocks = <&k3_clks 394 0>;
clock-names = "fclk";
power-domains = <&k3_pds 394 TI_SCI_PD_EXCLUSIVE>;
@@ -492,7 +484,6 @@
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02880000 0x00 0x200>;
interrupts = <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>;
- current-speed = <115200>;
clocks = <&k3_clks 395 0>;
clock-names = "fclk";
power-domains = <&k3_pds 395 TI_SCI_PD_EXCLUSIVE>;
@@ -503,7 +494,6 @@
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02890000 0x00 0x200>;
interrupts = <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>;
- current-speed = <115200>;
clocks = <&k3_clks 396 0>;
clock-names = "fclk";
power-domains = <&k3_pds 396 TI_SCI_PD_EXCLUSIVE>;
@@ -914,8 +904,6 @@
ti,clkbuf-sel = <0x7>;
ti,trm-icp = <0x8>;
dma-coherent;
- sdhci-caps-mask = <0x00000003 0x00000000>;
- no-1-8-v;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi
index 77a8d99139ec..2e18d91ae92f 100644
--- a/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi
@@ -304,7 +304,6 @@
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x42300000 0x00 0x200>;
interrupts = <GIC_SPI 897 IRQ_TYPE_LEVEL_HIGH>;
- current-speed = <115200>;
clocks = <&k3_clks 397 0>;
clock-names = "fclk";
power-domains = <&k3_pds 397 TI_SCI_PD_EXCLUSIVE>;
@@ -315,7 +314,6 @@
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x40a00000 0x00 0x200>;
interrupts = <GIC_SPI 846 IRQ_TYPE_LEVEL_HIGH>;
- current-speed = <115200>;
clocks = <&k3_clks 149 0>;
clock-names = "fclk";
power-domains = <&k3_pds 149 TI_SCI_PD_EXCLUSIVE>;
@@ -674,10 +672,13 @@
fss: bus@47000000 {
compatible = "simple-bus";
- reg = <0x00 0x47000000 0x00 0x100>;
#address-cells = <2>;
#size-cells = <2>;
- ranges;
+ ranges = <0x0 0x47000000 0x0 0x47000000 0x0 0x100>, /* FSS Control */
+ <0x0 0x47040000 0x0 0x47040000 0x0 0x100>, /* OSPI0 Control */
+ <0x0 0x47050000 0x0 0x47050000 0x0 0x100>, /* OSPI1 Control */
+ <0x5 0x00000000 0x5 0x00000000 0x1 0x0000000>, /* OSPI0 Memory */
+ <0x7 0x00000000 0x7 0x00000000 0x1 0x0000000>; /* OSPI1 Memory */
ospi0: spi@47040000 {
compatible = "ti,am654-ospi", "cdns,qspi-nor";
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4.dtsi
index 6e2e92ffe745..da7368ed6b52 100644
--- a/arch/arm64/boot/dts/ti/k3-j784s4.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j784s4.dtsi
@@ -234,6 +234,7 @@
#size-cells = <2>;
ranges = <0x00 0x00100000 0x00 0x00100000 0x00 0x00020000>, /* ctrl mmr */
<0x00 0x00600000 0x00 0x00600000 0x00 0x00031100>, /* GPIO */
+ <0x00 0x00700000 0x00 0x00700000 0x00 0x00001000>, /* ESM */
<0x00 0x01000000 0x00 0x01000000 0x00 0x0d000000>, /* Most peripherals */
<0x00 0x04210000 0x00 0x04210000 0x00 0x00010000>, /* VPU0 */
<0x00 0x04220000 0x00 0x04220000 0x00 0x00010000>, /* VPU1 */
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 25d20d803230..34d0e0be3fe6 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -169,7 +169,7 @@
};
pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a53-pmu";
interrupt-parent = <&gic>;
interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 2c30d617e180..d35150a979d4 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -34,6 +34,7 @@ CONFIG_KEXEC=y
CONFIG_KEXEC_FILE=y
CONFIG_CRASH_DUMP=y
CONFIG_ARCH_ACTIONS=y
+CONFIG_ARCH_AIROHA=y
CONFIG_ARCH_SUNXI=y
CONFIG_ARCH_ALPINE=y
CONFIG_ARCH_APPLE=y
@@ -411,6 +412,7 @@ CONFIG_WCN36XX=m
CONFIG_ATH11K=m
CONFIG_ATH11K_AHB=m
CONFIG_ATH11K_PCI=m
+CONFIG_ATH12K=m
CONFIG_BRCMFMAC=m
CONFIG_MWIFIEX=m
CONFIG_MWIFIEX_SDIO=m
@@ -445,6 +447,7 @@ CONFIG_INPUT_TPS65219_PWRBUTTON=m
CONFIG_INPUT_PWM_BEEPER=m
CONFIG_INPUT_PWM_VIBRA=m
CONFIG_INPUT_RK805_PWRKEY=m
+CONFIG_INPUT_DA9063_ONKEY=m
CONFIG_INPUT_HISI_POWERKEY=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_SERIO_AMBAKMI=y
@@ -562,6 +565,7 @@ CONFIG_SPI_TEGRA114=m
CONFIG_SPI_SPIDEV=m
CONFIG_SPMI=y
CONFIG_SPMI_MTK_PMIF=m
+CONFIG_PINCTRL_DA9062=m
CONFIG_PINCTRL_MAX77620=y
CONFIG_PINCTRL_RK805=m
CONFIG_PINCTRL_SINGLE=y
@@ -727,6 +731,7 @@ CONFIG_MFD_ALTERA_SYSMGR=y
CONFIG_MFD_BD9571MWV=y
CONFIG_MFD_AXP20X_I2C=y
CONFIG_MFD_AXP20X_RSB=y
+CONFIG_MFD_DA9062=m
CONFIG_MFD_EXYNOS_LPASS=m
CONFIG_MFD_HI6421_PMIC=y
CONFIG_MFD_HI655X_PMIC=y
@@ -772,6 +777,7 @@ CONFIG_REGULATOR_PWM=y
CONFIG_REGULATOR_QCOM_RPMH=y
CONFIG_REGULATOR_QCOM_SMD_RPM=y
CONFIG_REGULATOR_QCOM_SPMI=y
+CONFIG_REGULATOR_QCOM_USB_VBUS=m
CONFIG_REGULATOR_RAA215300=y
CONFIG_REGULATOR_RK808=y
CONFIG_REGULATOR_S2MPS11=y
@@ -854,6 +860,7 @@ CONFIG_DRM_RCAR_DU=m
CONFIG_DRM_RCAR_DW_HDMI=m
CONFIG_DRM_RCAR_MIPI_DSI=m
CONFIG_DRM_RZG2L_MIPI_DSI=m
+CONFIG_DRM_RZG2L_DU=m
CONFIG_DRM_SUN4I=m
CONFIG_DRM_SUN6I_DSI=m
CONFIG_DRM_SUN8I_DW_HDMI=m
@@ -865,7 +872,9 @@ CONFIG_DRM_PANEL_LVDS=m
CONFIG_DRM_PANEL_SIMPLE=m
CONFIG_DRM_PANEL_EDP=m
CONFIG_DRM_PANEL_ILITEK_ILI9882T=m
+CONFIG_DRM_PANEL_KHADAS_TS050=m
CONFIG_DRM_PANEL_MANTIX_MLAF057WE51=m
+CONFIG_DRM_PANEL_NOVATEK_NT36672E=m
CONFIG_DRM_PANEL_RAYDIUM_RM67191=m
CONFIG_DRM_PANEL_SITRONIX_ST7703=m
CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA=m
@@ -890,6 +899,7 @@ CONFIG_DRM_ANALOGIX_ANX7625=m
CONFIG_DRM_I2C_ADV7511=m
CONFIG_DRM_I2C_ADV7511_AUDIO=y
CONFIG_DRM_CDNS_MHDP8546=m
+CONFIG_DRM_IMX8MP_DW_HDMI_BRIDGE=m
CONFIG_DRM_DW_HDMI_AHB_AUDIO=m
CONFIG_DRM_DW_HDMI_CEC=m
CONFIG_DRM_IMX_DCSS=m
@@ -907,6 +917,7 @@ CONFIG_DRM_MESON=m
CONFIG_DRM_PL111=m
CONFIG_DRM_LIMA=m
CONFIG_DRM_PANFROST=m
+CONFIG_DRM_PANTHOR=m
CONFIG_DRM_TIDSS=m
CONFIG_DRM_POWERVR=m
CONFIG_FB=y
@@ -949,6 +960,7 @@ CONFIG_SND_SOC_SM8250=m
CONFIG_SND_SOC_SC8280XP=m
CONFIG_SND_SOC_SC7180=m
CONFIG_SND_SOC_SC7280=m
+CONFIG_SND_SOC_X1E80100=m
CONFIG_SND_SOC_ROCKCHIP=m
CONFIG_SND_SOC_ROCKCHIP_I2S_TDM=m
CONFIG_SND_SOC_ROCKCHIP_SPDIF=m
@@ -991,6 +1003,7 @@ CONFIG_SND_SOC_GTM601=m
CONFIG_SND_SOC_MSM8916_WCD_ANALOG=m
CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=m
CONFIG_SND_SOC_PCM3168A_I2C=m
+CONFIG_SND_SOC_RK3308=m
CONFIG_SND_SOC_RK817=m
CONFIG_SND_SOC_RT5640=m
CONFIG_SND_SOC_RT5659=m
@@ -1169,6 +1182,7 @@ CONFIG_RTC_DRV_RV8803=m
CONFIG_RTC_DRV_S5M=y
CONFIG_RTC_DRV_DS3232=y
CONFIG_RTC_DRV_PCF2127=m
+CONFIG_RTC_DRV_DA9063=m
CONFIG_RTC_DRV_EFI=y
CONFIG_RTC_DRV_CROS_EC=y
CONFIG_RTC_DRV_FSL_FTM_ALARM=m
@@ -1217,6 +1231,7 @@ CONFIG_STAGING=y
CONFIG_STAGING_MEDIA=y
CONFIG_VIDEO_MAX96712=m
CONFIG_VIDEO_MESON_VDEC=m
+CONFIG_SND_BCM2835=m
CONFIG_CHROME_PLATFORMS=y
CONFIG_CROS_EC=y
CONFIG_CROS_EC_I2C=y
@@ -1286,6 +1301,7 @@ CONFIG_QCM_DISPCC_2290=m
CONFIG_QCS_GCC_404=y
CONFIG_QDU_GCC_1000=y
CONFIG_SC_CAMCC_8280XP=m
+CONFIG_SC_DISPCC_7280=m
CONFIG_SC_DISPCC_8280XP=m
CONFIG_SA_GCC_8775P=y
CONFIG_SA_GPUCC_8775P=m
@@ -1293,6 +1309,7 @@ CONFIG_SC_GCC_7180=y
CONFIG_SC_GCC_7280=y
CONFIG_SC_GCC_8180X=y
CONFIG_SC_GCC_8280XP=y
+CONFIG_SC_GPUCC_7280=m
CONFIG_SC_GPUCC_8280XP=m
CONFIG_SC_LPASSCC_8280XP=m
CONFIG_SDM_CAMCC_845=m
@@ -1408,6 +1425,7 @@ CONFIG_ARCH_R9A07G044=y
CONFIG_ARCH_R9A07G054=y
CONFIG_ARCH_R9A08G045=y
CONFIG_ARCH_R9A09G011=y
+CONFIG_ARCH_R9A09G057=y
CONFIG_ROCKCHIP_IODOMAIN=y
CONFIG_ARCH_TEGRA_132_SOC=y
CONFIG_ARCH_TEGRA_210_SOC=y
@@ -1473,6 +1491,7 @@ CONFIG_PWM_VISCONTI=m
CONFIG_SL28CPLD_INTC=y
CONFIG_QCOM_PDC=y
CONFIG_QCOM_MPM=y
+CONFIG_RESET_GPIO=m
CONFIG_RESET_IMX7=y
CONFIG_RESET_QCOM_AOSS=y
CONFIG_RESET_QCOM_PDC=m
@@ -1517,6 +1536,7 @@ CONFIG_PHY_ROCKCHIP_PCIE=m
CONFIG_PHY_ROCKCHIP_SAMSUNG_HDPTX=m
CONFIG_PHY_ROCKCHIP_SNPS_PCIE3=y
CONFIG_PHY_ROCKCHIP_TYPEC=y
+CONFIG_PHY_ROCKCHIP_USBDP=m
CONFIG_PHY_SAMSUNG_UFS=y
CONFIG_PHY_UNIPHIER_USB2=y
CONFIG_PHY_UNIPHIER_USB3=y
@@ -1585,6 +1605,7 @@ CONFIG_INTERCONNECT_QCOM_SC8180X=y
CONFIG_INTERCONNECT_QCOM_SC8280XP=y
CONFIG_INTERCONNECT_QCOM_SDM845=y
CONFIG_INTERCONNECT_QCOM_SDX75=y
+CONFIG_INTERCONNECT_QCOM_SM6115=y
CONFIG_INTERCONNECT_QCOM_SM8150=m
CONFIG_INTERCONNECT_QCOM_SM8250=y
CONFIG_INTERCONNECT_QCOM_SM8350=m
@@ -1599,6 +1620,7 @@ CONFIG_HTE_TEGRA194=y
CONFIG_HTE_TEGRA194_TEST=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
CONFIG_BTRFS_FS=m
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_FANOTIFY=y
@@ -1647,6 +1669,7 @@ CONFIG_CRYPTO_DEV_FSL_CAAM=m
CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM=m
CONFIG_CRYPTO_DEV_QCE=m
CONFIG_CRYPTO_DEV_QCOM_RNG=m
+CONFIG_CRYPTO_DEV_TEGRA=m
CONFIG_CRYPTO_DEV_CCREE=m
CONFIG_CRYPTO_DEV_HISI_SEC2=m
CONFIG_CRYPTO_DEV_HISI_ZIP=m
diff --git a/arch/arm64/configs/hardening.config b/arch/arm64/configs/hardening.config
index b0e795208998..24179722927e 100644
--- a/arch/arm64/configs/hardening.config
+++ b/arch/arm64/configs/hardening.config
@@ -5,6 +5,7 @@ CONFIG_ARM64_SW_TTBR0_PAN=y
# Software Shadow Stack or PAC
CONFIG_SHADOW_CALL_STACK=y
+CONFIG_UNWIND_PATCH_PAC_INTO_SCS=y
# Pointer authentication (ARMv8.3 and later). If hardware actually supports
# it, one can turn off CONFIG_STACKPROTECTOR_STRONG with this enabled.
diff --git a/arch/arm64/crypto/aes-ce.S b/arch/arm64/crypto/aes-ce.S
index 1dc5bbbfeed2..b262eaa9170c 100644
--- a/arch/arm64/crypto/aes-ce.S
+++ b/arch/arm64/crypto/aes-ce.S
@@ -25,33 +25,28 @@
.endm
/* preload all round keys */
- .macro load_round_keys, rounds, rk
- cmp \rounds, #12
- blo 2222f /* 128 bits */
- beq 1111f /* 192 bits */
- ld1 {v17.4s-v18.4s}, [\rk], #32
-1111: ld1 {v19.4s-v20.4s}, [\rk], #32
-2222: ld1 {v21.4s-v24.4s}, [\rk], #64
- ld1 {v25.4s-v28.4s}, [\rk], #64
- ld1 {v29.4s-v31.4s}, [\rk]
+ .macro load_round_keys, rk, nr, tmp
+ add \tmp, \rk, \nr, sxtw #4
+ sub \tmp, \tmp, #160
+ ld1 {v17.4s-v20.4s}, [\rk]
+ ld1 {v21.4s-v24.4s}, [\tmp], #64
+ ld1 {v25.4s-v28.4s}, [\tmp], #64
+ ld1 {v29.4s-v31.4s}, [\tmp]
.endm
/* prepare for encryption with key in rk[] */
.macro enc_prepare, rounds, rk, temp
- mov \temp, \rk
- load_round_keys \rounds, \temp
+ load_round_keys \rk, \rounds, \temp
.endm
/* prepare for encryption (again) but with new key in rk[] */
.macro enc_switch_key, rounds, rk, temp
- mov \temp, \rk
- load_round_keys \rounds, \temp
+ load_round_keys \rk, \rounds, \temp
.endm
/* prepare for decryption with key in rk[] */
.macro dec_prepare, rounds, rk, temp
- mov \temp, \rk
- load_round_keys \rounds, \temp
+ load_round_keys \rk, \rounds, \temp
.endm
.macro do_enc_Nx, de, mc, k, i0, i1, i2, i3, i4
@@ -110,14 +105,13 @@
/* up to 5 interleaved blocks */
.macro do_block_Nx, enc, rounds, i0, i1, i2, i3, i4
- cmp \rounds, #12
- blo 2222f /* 128 bits */
- beq 1111f /* 192 bits */
+ tbz \rounds, #2, .L\@ /* 128 bits */
round_Nx \enc, v17, \i0, \i1, \i2, \i3, \i4
round_Nx \enc, v18, \i0, \i1, \i2, \i3, \i4
-1111: round_Nx \enc, v19, \i0, \i1, \i2, \i3, \i4
+ tbz \rounds, #1, .L\@ /* 192 bits */
+ round_Nx \enc, v19, \i0, \i1, \i2, \i3, \i4
round_Nx \enc, v20, \i0, \i1, \i2, \i3, \i4
-2222: .irp key, v21, v22, v23, v24, v25, v26, v27, v28, v29
+.L\@: .irp key, v21, v22, v23, v24, v25, v26, v27, v28, v29
round_Nx \enc, \key, \i0, \i1, \i2, \i3, \i4
.endr
fin_round_Nx \enc, v30, v31, \i0, \i1, \i2, \i3, \i4
diff --git a/arch/arm64/crypto/aes-neon.S b/arch/arm64/crypto/aes-neon.S
index 9de7fbc797af..3a8961b6ea51 100644
--- a/arch/arm64/crypto/aes-neon.S
+++ b/arch/arm64/crypto/aes-neon.S
@@ -99,16 +99,16 @@
ld1 {v15.4s}, [\rk]
add \rkp, \rk, #16
mov \i, \rounds
-1111: eor \in\().16b, \in\().16b, v15.16b /* ^round key */
+.La\@: eor \in\().16b, \in\().16b, v15.16b /* ^round key */
movi v15.16b, #0x40
tbl \in\().16b, {\in\().16b}, v13.16b /* ShiftRows */
sub_bytes \in
- subs \i, \i, #1
+ sub \i, \i, #1
ld1 {v15.4s}, [\rkp], #16
- beq 2222f
+ cbz \i, .Lb\@
mix_columns \in, \enc
- b 1111b
-2222: eor \in\().16b, \in\().16b, v15.16b /* ^round key */
+ b .La\@
+.Lb\@: eor \in\().16b, \in\().16b, v15.16b /* ^round key */
.endm
.macro encrypt_block, in, rounds, rk, rkp, i
@@ -206,7 +206,7 @@
ld1 {v15.4s}, [\rk]
add \rkp, \rk, #16
mov \i, \rounds
-1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
+.La\@: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
eor \in2\().16b, \in2\().16b, v15.16b /* ^round key */
eor \in3\().16b, \in3\().16b, v15.16b /* ^round key */
@@ -216,13 +216,13 @@
tbl \in2\().16b, {\in2\().16b}, v13.16b /* ShiftRows */
tbl \in3\().16b, {\in3\().16b}, v13.16b /* ShiftRows */
sub_bytes_4x \in0, \in1, \in2, \in3
- subs \i, \i, #1
+ sub \i, \i, #1
ld1 {v15.4s}, [\rkp], #16
- beq 2222f
+ cbz \i, .Lb\@
mix_columns_2x \in0, \in1, \enc
mix_columns_2x \in2, \in3, \enc
- b 1111b
-2222: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
+ b .La\@
+.Lb\@: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
eor \in2\().16b, \in2\().16b, v15.16b /* ^round key */
eor \in3\().16b, \in3\().16b, v15.16b /* ^round key */
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index ab8b396428da..bc0b0d75acef 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -50,16 +50,12 @@
msr daif, \flags
.endm
- .macro enable_dbg
- msr daifclr, #8
- .endm
-
.macro disable_step_tsk, flgs, tmp
tbz \flgs, #TIF_SINGLESTEP, 9990f
mrs \tmp, mdscr_el1
bic \tmp, \tmp, #DBG_MDSCR_SS
msr mdscr_el1, \tmp
- isb // Synchronise with enable_dbg
+ isb // Take effect before a subsequent clear of DAIF.D
9990:
.endm
@@ -480,9 +476,10 @@ alternative_endif
*/
.macro reset_pmuserenr_el0, tmpreg
mrs \tmpreg, id_aa64dfr0_el1
- sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
- cmp \tmpreg, #1 // Skip if no PMU present
- b.lt 9000f
+ ubfx \tmpreg, \tmpreg, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
+ cmp \tmpreg, #ID_AA64DFR0_EL1_PMUVer_NI
+ ccmp \tmpreg, #ID_AA64DFR0_EL1_PMUVer_IMP_DEF, #4, ne
+ b.eq 9000f // Skip if no PMU present or IMP_DEF
msr pmuserenr_el0, xzr // Disable PMU access from EL0
9000:
.endm
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 52f076afeb96..936389e9aecb 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -86,6 +86,7 @@
#define ARM_CPU_PART_CORTEX_X2 0xD48
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
#define ARM_CPU_PART_CORTEX_A78C 0xD4B
+#define ARM_CPU_PART_NEOVERSE_V2 0xD4F
#define APM_CPU_PART_XGENE 0x000
#define APM_CPU_VAR_POTENZA 0x00
@@ -159,6 +160,7 @@
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
+#define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index b7afaa026842..e4546b29dd0c 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -59,13 +59,14 @@
.macro __init_el2_debug
mrs x1, id_aa64dfr0_el1
- sbfx x0, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
- cmp x0, #1
- b.lt .Lskip_pmu_\@ // Skip if no PMU present
+ ubfx x0, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
+ cmp x0, #ID_AA64DFR0_EL1_PMUVer_NI
+ ccmp x0, #ID_AA64DFR0_EL1_PMUVer_IMP_DEF, #4, ne
+ b.eq .Lskip_pmu_\@ // Skip if no PMU present or IMP_DEF
mrs x0, pmcr_el0 // Disable debug access traps
ubfx x0, x0, #11, #5 // to EL2 and allow access to
.Lskip_pmu_\@:
- csel x2, xzr, x0, lt // all PMU counters from EL1
+ csel x2, xzr, x0, eq // all PMU counters from EL1
/* Statistical profiling */
ubfx x0, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 81606bf7d5ac..7abf09df7033 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -404,6 +404,18 @@ static inline bool esr_fsc_is_access_flag_fault(unsigned long esr)
return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_ACCESS;
}
+/* Indicate whether ESR.EC==0x1A is for an ERETAx instruction */
+static inline bool esr_iss_is_eretax(unsigned long esr)
+{
+ return esr & ESR_ELx_ERET_ISS_ERET;
+}
+
+/* Indicate which key is used for ERETAx (false: A-Key, true: B-Key) */
+static inline bool esr_iss_is_eretab(unsigned long esr)
+{
+ return esr & ESR_ELx_ERET_ISS_ERETA;
+}
+
const char *esr_get_class_string(unsigned long esr);
#endif /* __ASSEMBLY */
diff --git a/arch/arm64/include/asm/fb.h b/arch/arm64/include/asm/fb.h
deleted file mode 100644
index 1a495d8fb2ce..000000000000
--- a/arch/arm64/include/asm/fb.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 ARM Ltd.
- */
-#ifndef __ASM_FB_H_
-#define __ASM_FB_H_
-
-#include <asm-generic/fb.h>
-
-#endif /* __ASM_FB_H_ */
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index 2ddc33d93b13..3954cbd2ff56 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -18,11 +18,11 @@
extern bool arch_hugetlb_migration_supported(struct hstate *h);
#endif
-static inline void arch_clear_hugepage_flags(struct page *page)
+static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
- clear_bit(PG_dcache_clean, &page->flags);
+ clear_bit(PG_dcache_clean, &folio->flags);
}
-#define arch_clear_hugepage_flags arch_clear_hugepage_flags
+#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
#define arch_make_huge_pte arch_make_huge_pte
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index db1aeacd4cd9..8c0a36f72d6f 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -135,6 +135,12 @@ enum aarch64_insn_special_register {
AARCH64_INSN_SPCLREG_SP_EL2 = 0xF210
};
+enum aarch64_insn_system_register {
+ AARCH64_INSN_SYSREG_TPIDR_EL1 = 0x4684,
+ AARCH64_INSN_SYSREG_TPIDR_EL2 = 0x6682,
+ AARCH64_INSN_SYSREG_SP_EL0 = 0x4208,
+};
+
enum aarch64_insn_variant {
AARCH64_INSN_VARIANT_32BIT,
AARCH64_INSN_VARIANT_64BIT
@@ -686,6 +692,8 @@ u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
}
#endif
u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type);
+u32 aarch64_insn_gen_mrs(enum aarch64_insn_register result,
+ enum aarch64_insn_system_register sysreg);
s32 aarch64_get_branch_offset(u32 insn);
u32 aarch64_set_branch_offset(u32 insn, s32 offset);
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 8d825522c55c..4ff0ae3f6d66 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -140,6 +140,138 @@ extern void __memset_io(volatile void __iomem *, int, size_t);
#define memcpy_toio(c,a,l) __memcpy_toio((c),(a),(l))
/*
+ * The ARM64 iowrite implementation is intended to support drivers that want to
+ * use write combining. For instance PCI drivers using write combining with a 64
+ * byte __iowrite64_copy() expect to get a 64 byte MemWr TLP on the PCIe bus.
+ *
+ * Newer ARM core have sensitive write combining buffers, it is important that
+ * the stores be contiguous blocks of store instructions. Normal memcpy
+ * approaches have a very low chance to generate write combining.
+ *
+ * Since this is the only API on ARM64 that should be used with write combining
+ * it also integrates the DGH hint which is supposed to lower the latency to
+ * emit the large TLP from the CPU.
+ */
+
+static inline void __const_memcpy_toio_aligned32(volatile u32 __iomem *to,
+ const u32 *from, size_t count)
+{
+ switch (count) {
+ case 8:
+ asm volatile("str %w0, [%8, #4 * 0]\n"
+ "str %w1, [%8, #4 * 1]\n"
+ "str %w2, [%8, #4 * 2]\n"
+ "str %w3, [%8, #4 * 3]\n"
+ "str %w4, [%8, #4 * 4]\n"
+ "str %w5, [%8, #4 * 5]\n"
+ "str %w6, [%8, #4 * 6]\n"
+ "str %w7, [%8, #4 * 7]\n"
+ :
+ : "rZ"(from[0]), "rZ"(from[1]), "rZ"(from[2]),
+ "rZ"(from[3]), "rZ"(from[4]), "rZ"(from[5]),
+ "rZ"(from[6]), "rZ"(from[7]), "r"(to));
+ break;
+ case 4:
+ asm volatile("str %w0, [%4, #4 * 0]\n"
+ "str %w1, [%4, #4 * 1]\n"
+ "str %w2, [%4, #4 * 2]\n"
+ "str %w3, [%4, #4 * 3]\n"
+ :
+ : "rZ"(from[0]), "rZ"(from[1]), "rZ"(from[2]),
+ "rZ"(from[3]), "r"(to));
+ break;
+ case 2:
+ asm volatile("str %w0, [%2, #4 * 0]\n"
+ "str %w1, [%2, #4 * 1]\n"
+ :
+ : "rZ"(from[0]), "rZ"(from[1]), "r"(to));
+ break;
+ case 1:
+ __raw_writel(*from, to);
+ break;
+ default:
+ BUILD_BUG();
+ }
+}
+
+void __iowrite32_copy_full(void __iomem *to, const void *from, size_t count);
+
+static inline void __const_iowrite32_copy(void __iomem *to, const void *from,
+ size_t count)
+{
+ if (count == 8 || count == 4 || count == 2 || count == 1) {
+ __const_memcpy_toio_aligned32(to, from, count);
+ dgh();
+ } else {
+ __iowrite32_copy_full(to, from, count);
+ }
+}
+
+#define __iowrite32_copy(to, from, count) \
+ (__builtin_constant_p(count) ? \
+ __const_iowrite32_copy(to, from, count) : \
+ __iowrite32_copy_full(to, from, count))
+
+static inline void __const_memcpy_toio_aligned64(volatile u64 __iomem *to,
+ const u64 *from, size_t count)
+{
+ switch (count) {
+ case 8:
+ asm volatile("str %x0, [%8, #8 * 0]\n"
+ "str %x1, [%8, #8 * 1]\n"
+ "str %x2, [%8, #8 * 2]\n"
+ "str %x3, [%8, #8 * 3]\n"
+ "str %x4, [%8, #8 * 4]\n"
+ "str %x5, [%8, #8 * 5]\n"
+ "str %x6, [%8, #8 * 6]\n"
+ "str %x7, [%8, #8 * 7]\n"
+ :
+ : "rZ"(from[0]), "rZ"(from[1]), "rZ"(from[2]),
+ "rZ"(from[3]), "rZ"(from[4]), "rZ"(from[5]),
+ "rZ"(from[6]), "rZ"(from[7]), "r"(to));
+ break;
+ case 4:
+ asm volatile("str %x0, [%4, #8 * 0]\n"
+ "str %x1, [%4, #8 * 1]\n"
+ "str %x2, [%4, #8 * 2]\n"
+ "str %x3, [%4, #8 * 3]\n"
+ :
+ : "rZ"(from[0]), "rZ"(from[1]), "rZ"(from[2]),
+ "rZ"(from[3]), "r"(to));
+ break;
+ case 2:
+ asm volatile("str %x0, [%2, #8 * 0]\n"
+ "str %x1, [%2, #8 * 1]\n"
+ :
+ : "rZ"(from[0]), "rZ"(from[1]), "r"(to));
+ break;
+ case 1:
+ __raw_writeq(*from, to);
+ break;
+ default:
+ BUILD_BUG();
+ }
+}
+
+void __iowrite64_copy_full(void __iomem *to, const void *from, size_t count);
+
+static inline void __const_iowrite64_copy(void __iomem *to, const void *from,
+ size_t count)
+{
+ if (count == 8 || count == 4 || count == 2 || count == 1) {
+ __const_memcpy_toio_aligned64(to, from, count);
+ dgh();
+ } else {
+ __iowrite64_copy_full(to, from, count);
+ }
+}
+
+#define __iowrite64_copy(to, from, count) \
+ (__builtin_constant_p(count) ? \
+ __const_iowrite64_copy(to, from, count) : \
+ __iowrite64_copy_full(to, from, count))
+
+/*
* I/O memory mapping functions.
*/
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index 0a7186a93882..d4d7451c2c12 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -5,7 +5,6 @@
#ifndef __ASM_IRQFLAGS_H
#define __ASM_IRQFLAGS_H
-#include <asm/alternative.h>
#include <asm/barrier.h>
#include <asm/ptrace.h>
#include <asm/sysreg.h>
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
index 6aafbb789991..4e753908b801 100644
--- a/arch/arm64/include/asm/jump_label.h
+++ b/arch/arm64/include/asm/jump_label.h
@@ -15,17 +15,23 @@
#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
+#define JUMP_TABLE_ENTRY(key, label) \
+ ".pushsection __jump_table, \"aw\"\n\t" \
+ ".align 3\n\t" \
+ ".long 1b - ., %l["#label"] - .\n\t" \
+ ".quad %c0 - .\n\t" \
+ ".popsection\n\t" \
+ : : "i"(key) : : label
+
static __always_inline bool arch_static_branch(struct static_key * const key,
const bool branch)
{
+ char *k = &((char *)key)[branch];
+
asm goto(
"1: nop \n\t"
- " .pushsection __jump_table, \"aw\" \n\t"
- " .align 3 \n\t"
- " .long 1b - ., %l[l_yes] - . \n\t"
- " .quad %c0 - . \n\t"
- " .popsection \n\t"
- : : "i"(&((char *)key)[branch]) : : l_yes);
+ JUMP_TABLE_ENTRY(k, l_yes)
+ );
return false;
l_yes:
@@ -35,15 +41,11 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key * const key,
const bool branch)
{
+ char *k = &((char *)key)[branch];
asm goto(
"1: b %l[l_yes] \n\t"
- " .pushsection __jump_table, \"aw\" \n\t"
- " .align 3 \n\t"
- " .long 1b - ., %l[l_yes] - . \n\t"
- " .quad %c0 - . \n\t"
- " .popsection \n\t"
- : : "i"(&((char *)key)[branch]) : : l_yes);
-
+ JUMP_TABLE_ENTRY(k, l_yes)
+ );
return false;
l_yes:
return true;
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 24b5e6b23417..a6330460d9e5 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -73,10 +73,8 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range,
__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
__KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
- __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr,
- __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr,
- __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
- __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs,
+ __KVM_HOST_SMCCC_FUNC___vgic_v3_save_vmcr_aprs,
+ __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs,
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_init_traps,
__KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
__KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
@@ -241,8 +239,6 @@ extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu);
extern u64 __vgic_v3_get_gic_config(void);
-extern u64 __vgic_v3_read_vmcr(void);
-extern void __vgic_v3_write_vmcr(u32 vmcr);
extern void __vgic_v3_init_lrs(void);
extern u64 __kvm_get_mdcr_el2(void);
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 975af30af31f..501e3e019c93 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -125,16 +125,6 @@ static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 |= HCR_TWI;
}
-static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
-}
-
-static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
-}
-
static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.vsesr_el2;
@@ -587,16 +577,14 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
} else if (has_hvhe()) {
val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
- if (!vcpu_has_sve(vcpu) ||
- (vcpu->arch.fp_state != FP_STATE_GUEST_OWNED))
+ if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN;
if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN;
} else {
val = CPTR_NVHE_EL2_RES1;
- if (vcpu_has_sve(vcpu) &&
- (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
+ if (vcpu_has_sve(vcpu) && guest_owns_fp_regs())
val |= CPTR_EL2_TZ;
if (cpus_have_final_cap(ARM64_SME))
val &= ~CPTR_EL2_TSM;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 9e8a496fb284..8170c04fde91 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -211,6 +211,7 @@ typedef unsigned int pkvm_handle_t;
struct kvm_protected_vm {
pkvm_handle_t handle;
struct kvm_hyp_memcache teardown_mc;
+ bool enabled;
};
struct kvm_mpidr_data {
@@ -220,20 +221,10 @@ struct kvm_mpidr_data {
static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr)
{
- unsigned long mask = data->mpidr_mask;
- u64 aff = mpidr & MPIDR_HWID_BITMASK;
- int nbits, bit, bit_idx = 0;
- u16 index = 0;
+ unsigned long index = 0, mask = data->mpidr_mask;
+ unsigned long aff = mpidr & MPIDR_HWID_BITMASK;
- /*
- * If this looks like RISC-V's BEXT or x86's PEXT
- * instructions, it isn't by accident.
- */
- nbits = fls(mask);
- for_each_set_bit(bit, &mask, nbits) {
- index |= (aff & BIT(bit)) >> (bit - bit_idx);
- bit_idx++;
- }
+ bitmap_gather(&index, &aff, &mask, fls(mask));
return index;
}
@@ -530,8 +521,42 @@ struct kvm_cpu_context {
u64 *vncr_array;
};
+/*
+ * This structure is instantiated on a per-CPU basis, and contains
+ * data that is:
+ *
+ * - tied to a single physical CPU, and
+ * - either have a lifetime that does not extend past vcpu_put()
+ * - or is an invariant for the lifetime of the system
+ *
+ * Use host_data_ptr(field) as a way to access a pointer to such a
+ * field.
+ */
struct kvm_host_data {
struct kvm_cpu_context host_ctxt;
+ struct user_fpsimd_state *fpsimd_state; /* hyp VA */
+
+ /* Ownership of the FP regs */
+ enum {
+ FP_STATE_FREE,
+ FP_STATE_HOST_OWNED,
+ FP_STATE_GUEST_OWNED,
+ } fp_owner;
+
+ /*
+ * host_debug_state contains the host registers which are
+ * saved and restored during world switches.
+ */
+ struct {
+ /* {Break,watch}point registers */
+ struct kvm_guest_debug_arch regs;
+ /* Statistical profiling extension */
+ u64 pmscr_el1;
+ /* Self-hosted trace */
+ u64 trfcr_el1;
+ /* Values of trap registers for the host before guest entry. */
+ u64 mdcr_el2;
+ } host_debug_state;
};
struct kvm_host_psci_config {
@@ -592,19 +617,9 @@ struct kvm_vcpu_arch {
u64 mdcr_el2;
u64 cptr_el2;
- /* Values of trap registers for the host before guest entry. */
- u64 mdcr_el2_host;
-
/* Exception Information */
struct kvm_vcpu_fault_info fault;
- /* Ownership of the FP regs */
- enum {
- FP_STATE_FREE,
- FP_STATE_HOST_OWNED,
- FP_STATE_GUEST_OWNED,
- } fp_state;
-
/* Configuration flags, set once and for all before the vcpu can run */
u8 cflags;
@@ -627,11 +642,10 @@ struct kvm_vcpu_arch {
* We maintain more than a single set of debug registers to support
* debugging the guest from the host and to maintain separate host and
* guest state during world switches. vcpu_debug_state are the debug
- * registers of the vcpu as the guest sees them. host_debug_state are
- * the host registers which are saved and restored during
- * world switches. external_debug_state contains the debug
- * values we want to debug the guest. This is set via the
- * KVM_SET_GUEST_DEBUG ioctl.
+ * registers of the vcpu as the guest sees them.
+ *
+ * external_debug_state contains the debug values we want to debug the
+ * guest. This is set via the KVM_SET_GUEST_DEBUG ioctl.
*
* debug_ptr points to the set of debug registers that should be loaded
* onto the hardware when running the guest.
@@ -640,18 +654,6 @@ struct kvm_vcpu_arch {
struct kvm_guest_debug_arch vcpu_debug_state;
struct kvm_guest_debug_arch external_debug_state;
- struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
- struct task_struct *parent_task;
-
- struct {
- /* {Break,watch}point registers */
- struct kvm_guest_debug_arch regs;
- /* Statistical profiling extension */
- u64 pmscr_el1;
- /* Self-hosted trace */
- u64 trfcr_el1;
- } host_debug_state;
-
/* VGIC state */
struct vgic_cpu vgic_cpu;
struct arch_timer_cpu timer_cpu;
@@ -817,8 +819,6 @@ struct kvm_vcpu_arch {
#define DEBUG_STATE_SAVE_SPE __vcpu_single_flag(iflags, BIT(5))
/* Save TRBE context if active */
#define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6))
-/* vcpu running in HYP context */
-#define VCPU_HYP_CONTEXT __vcpu_single_flag(iflags, BIT(7))
/* SVE enabled for host EL0 */
#define HOST_SVE_ENABLED __vcpu_single_flag(sflags, BIT(0))
@@ -896,7 +896,7 @@ struct kvm_vcpu_arch {
* Don't bother with VNCR-based accesses in the nVHE code, it has no
* business dealing with NV.
*/
-static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
+static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
{
#if !defined (__KVM_NVHE_HYPERVISOR__)
if (unlikely(cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
@@ -906,6 +906,13 @@ static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
return (u64 *)&ctxt->sys_regs[r];
}
+#define __ctxt_sys_reg(c,r) \
+ ({ \
+ BUILD_BUG_ON(__builtin_constant_p(r) && \
+ (r) >= NR_SYS_REGS); \
+ ___ctxt_sys_reg(c, r); \
+ })
+
#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *, enum vcpu_sysreg);
@@ -1168,6 +1175,44 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
+/*
+ * How we access per-CPU host data depends on the where we access it from,
+ * and the mode we're in:
+ *
+ * - VHE and nVHE hypervisor bits use their locally defined instance
+ *
+ * - the rest of the kernel use either the VHE or nVHE one, depending on
+ * the mode we're running in.
+ *
+ * Unless we're in protected mode, fully deprivileged, and the nVHE
+ * per-CPU stuff is exclusively accessible to the protected EL2 code.
+ * In this case, the EL1 code uses the *VHE* data as its private state
+ * (which makes sense in a way as there shouldn't be any shared state
+ * between the host and the hypervisor).
+ *
+ * Yes, this is all totally trivial. Shoot me now.
+ */
+#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
+#define host_data_ptr(f) (&this_cpu_ptr(&kvm_host_data)->f)
+#else
+#define host_data_ptr(f) \
+ (static_branch_unlikely(&kvm_protected_mode_initialized) ? \
+ &this_cpu_ptr(&kvm_host_data)->f : \
+ &this_cpu_ptr_hyp_sym(kvm_host_data)->f)
+#endif
+
+/* Check whether the FP regs are owned by the guest */
+static inline bool guest_owns_fp_regs(void)
+{
+ return *host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED;
+}
+
+/* Check whether the FP regs are owned by the host */
+static inline bool host_owns_fp_regs(void)
+{
+ return *host_data_ptr(fp_owner) == FP_STATE_HOST_OWNED;
+}
+
static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
{
/* The host's MPIDR is immutable, so let's set it up at boot time */
@@ -1211,7 +1256,6 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
-void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu);
static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
{
@@ -1247,10 +1291,9 @@ struct kvm *kvm_arch_alloc_vm(void);
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
-static inline bool kvm_vm_is_protected(struct kvm *kvm)
-{
- return false;
-}
+#define kvm_vm_is_protected(kvm) (is_protected_kvm_enabled() && (kvm)->arch.pkvm.enabled)
+
+#define vcpu_is_protected(vcpu) kvm_vm_is_protected((vcpu)->kvm)
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
@@ -1275,6 +1318,8 @@ static inline bool __vcpu_has_feature(const struct kvm_arch *ka, int feature)
#define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f))
+#define kvm_vcpu_initialized(v) vcpu_get_flag(vcpu, VCPU_INITIALIZED)
+
int kvm_trng_call(struct kvm_vcpu *vcpu);
#ifdef CONFIG_KVM
extern phys_addr_t hyp_mem_base;
@@ -1331,4 +1376,19 @@ bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
(get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, min) && \
get_idreg_field((kvm), id, fld) <= expand_field_sign(id, fld, max))
+/* Check for a given level of PAuth support */
+#define kvm_has_pauth(k, l) \
+ ({ \
+ bool pa, pi, pa3; \
+ \
+ pa = kvm_has_feat((k), ID_AA64ISAR1_EL1, APA, l); \
+ pa &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPA, IMP); \
+ pi = kvm_has_feat((k), ID_AA64ISAR1_EL1, API, l); \
+ pi &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPI, IMP); \
+ pa3 = kvm_has_feat((k), ID_AA64ISAR2_EL1, APA3, l); \
+ pa3 &= kvm_has_feat((k), ID_AA64ISAR2_EL1, GPA3, IMP); \
+ \
+ (pa + pi + pa3) == 1; \
+ })
+
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 3e2a1ac0c9bb..3e80464f8953 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -80,8 +80,8 @@ void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if);
-void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if);
-void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if);
+void __vgic_v3_save_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if);
+void __vgic_v3_restore_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if);
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
#ifdef __KVM_NVHE_HYPERVISOR__
diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
index c77d795556e1..5e0ab0596246 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -60,7 +60,20 @@ static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)
return ttbr0 & ~GENMASK_ULL(63, 48);
}
+extern bool forward_smc_trap(struct kvm_vcpu *vcpu);
int kvm_init_nv_sysregs(struct kvm *kvm);
+#ifdef CONFIG_ARM64_PTR_AUTH
+bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr);
+#else
+static inline bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr)
+{
+ /* We really should never execute this... */
+ WARN_ON_ONCE(1);
+ *elr = 0xbad9acc0debadbad;
+ return false;
+}
+#endif
+
#endif /* __ARM64_KVM_NESTED_H */
diff --git a/arch/arm64/include/asm/kvm_ptrauth.h b/arch/arm64/include/asm/kvm_ptrauth.h
index 0cd0965255d2..d81bac256abc 100644
--- a/arch/arm64/include/asm/kvm_ptrauth.h
+++ b/arch/arm64/include/asm/kvm_ptrauth.h
@@ -99,5 +99,26 @@ alternative_else_nop_endif
.macro ptrauth_switch_to_hyp g_ctxt, h_ctxt, reg1, reg2, reg3
.endm
#endif /* CONFIG_ARM64_PTR_AUTH */
+
+#else /* !__ASSEMBLY */
+
+#define __ptrauth_save_key(ctxt, key) \
+ do { \
+ u64 __val; \
+ __val = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
+ ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \
+ __val = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
+ ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val; \
+ } while(0)
+
+#define ptrauth_save_keys(ctxt) \
+ do { \
+ __ptrauth_save_key(ctxt, APIA); \
+ __ptrauth_save_key(ctxt, APIB); \
+ __ptrauth_save_key(ctxt, APDA); \
+ __ptrauth_save_key(ctxt, APDB); \
+ __ptrauth_save_key(ctxt, APGA); \
+ } while(0)
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_KVM_PTRAUTH_H */
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index ef207a0d4f0d..9943ff0af4c9 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -297,6 +297,7 @@
#define TCR_TBI1 (UL(1) << 38)
#define TCR_HA (UL(1) << 39)
#define TCR_HD (UL(1) << 40)
+#define TCR_TBID0 (UL(1) << 51)
#define TCR_TBID1 (UL(1) << 52)
#define TCR_NFD0 (UL(1) << 53)
#define TCR_NFD1 (UL(1) << 54)
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index dd9ee67d1d87..b11cfb9fdd37 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -18,14 +18,21 @@
#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
#define PTE_DEVMAP (_AT(pteval_t, 1) << 57)
-#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
/*
- * This bit indicates that the entry is present i.e. pmd_page()
- * still points to a valid huge page in memory even if the pmd
- * has been invalidated.
+ * PTE_PRESENT_INVALID=1 & PTE_VALID=0 indicates that the pte's fields should be
+ * interpreted according to the HW layout by SW but any attempted HW access to
+ * the address will result in a fault. pte_present() returns true.
*/
-#define PMD_PRESENT_INVALID (_AT(pteval_t, 1) << 59) /* only when !PMD_SECT_VALID */
+#define PTE_PRESENT_INVALID (PTE_NG) /* only when !PTE_VALID */
+
+#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
+#define PTE_UFFD_WP (_AT(pteval_t, 1) << 58) /* uffd-wp tracking */
+#define PTE_SWP_UFFD_WP (_AT(pteval_t, 1) << 3) /* only for swp ptes */
+#else
+#define PTE_UFFD_WP (_AT(pteval_t, 0))
+#define PTE_SWP_UFFD_WP (_AT(pteval_t, 0))
+#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
@@ -103,7 +110,7 @@ static inline bool __pure lpa2_is_enabled(void)
__val; \
})
-#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
+#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PRESENT_INVALID | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
/* shared+writable pages are clean by default, hence PTE_RDONLY|PTE_WRITE */
#define PAGE_SHARED __pgprot(_PAGE_SHARED)
#define PAGE_SHARED_EXEC __pgprot(_PAGE_SHARED_EXEC)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index afdd56d26ad7..f8efbc128446 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -49,12 +49,6 @@
__flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-static inline bool arch_thp_swp_supported(void)
-{
- return !system_supports_mte();
-}
-#define arch_thp_swp_supported arch_thp_swp_supported
-
/*
* Outside of a few very special situations (e.g. hibernation), we always
* use broadcast TLB invalidation instructions, therefore a spurious page
@@ -105,7 +99,7 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
/*
* The following only work if pte_present(). Undefined behaviour otherwise.
*/
-#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
+#define pte_present(pte) (pte_valid(pte) || pte_present_invalid(pte))
#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
@@ -132,6 +126,8 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
+#define pte_present_invalid(pte) \
+ ((pte_val(pte) & (PTE_VALID | PTE_PRESENT_INVALID)) == PTE_PRESENT_INVALID)
/*
* Execute-only user mappings do not have the PTE_USER bit set. All valid
* kernel mappings have the PTE_UXN bit set.
@@ -261,6 +257,13 @@ static inline pte_t pte_mkpresent(pte_t pte)
return set_pte_bit(pte, __pgprot(PTE_VALID));
}
+static inline pte_t pte_mkinvalid(pte_t pte)
+{
+ pte = set_pte_bit(pte, __pgprot(PTE_PRESENT_INVALID));
+ pte = clear_pte_bit(pte, __pgprot(PTE_VALID));
+ return pte;
+}
+
static inline pmd_t pmd_mkcont(pmd_t pmd)
{
return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
@@ -271,9 +274,31 @@ static inline pte_t pte_mkdevmap(pte_t pte)
return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
}
-static inline void __set_pte(pte_t *ptep, pte_t pte)
+#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
+static inline int pte_uffd_wp(pte_t pte)
+{
+ return !!(pte_val(pte) & PTE_UFFD_WP);
+}
+
+static inline pte_t pte_mkuffd_wp(pte_t pte)
+{
+ return pte_wrprotect(set_pte_bit(pte, __pgprot(PTE_UFFD_WP)));
+}
+
+static inline pte_t pte_clear_uffd_wp(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(PTE_UFFD_WP));
+}
+#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
+
+static inline void __set_pte_nosync(pte_t *ptep, pte_t pte)
{
WRITE_ONCE(*ptep, pte);
+}
+
+static inline void __set_pte(pte_t *ptep, pte_t pte)
+{
+ __set_pte_nosync(ptep, pte);
/*
* Only if the new pte is valid and kernel, otherwise TLB maintenance
@@ -463,13 +488,39 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE));
}
+#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
+static inline pte_t pte_swp_mkuffd_wp(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(PTE_SWP_UFFD_WP));
+}
+
+static inline int pte_swp_uffd_wp(pte_t pte)
+{
+ return !!(pte_val(pte) & PTE_SWP_UFFD_WP);
+}
+
+static inline pte_t pte_swp_clear_uffd_wp(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(PTE_SWP_UFFD_WP));
+}
+#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
+
#ifdef CONFIG_NUMA_BALANCING
/*
* See the comment in include/linux/pgtable.h
*/
static inline int pte_protnone(pte_t pte)
{
- return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
+ /*
+ * pte_present_invalid() tells us that the pte is invalid from HW
+ * perspective but present from SW perspective, so the fields are to be
+ * interpretted as per the HW layout. The second 2 checks are the unique
+ * encoding that we use for PROT_NONE. It is insufficient to only use
+ * the first check because we share the same encoding scheme with pmds
+ * which support pmd_mkinvalid(), so can be present-invalid without
+ * being PROT_NONE.
+ */
+ return pte_present_invalid(pte) && !pte_user(pte) && !pte_user_exec(pte);
}
static inline int pmd_protnone(pmd_t pmd)
@@ -478,12 +529,7 @@ static inline int pmd_protnone(pmd_t pmd)
}
#endif
-#define pmd_present_invalid(pmd) (!!(pmd_val(pmd) & PMD_PRESENT_INVALID))
-
-static inline int pmd_present(pmd_t pmd)
-{
- return pte_present(pmd_pte(pmd)) || pmd_present_invalid(pmd);
-}
+#define pmd_present(pmd) pte_present(pmd_pte(pmd))
/*
* THP definitions.
@@ -508,16 +554,16 @@ static inline int pmd_trans_huge(pmd_t pmd)
#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
-
-static inline pmd_t pmd_mkinvalid(pmd_t pmd)
-{
- pmd = set_pmd_bit(pmd, __pgprot(PMD_PRESENT_INVALID));
- pmd = clear_pmd_bit(pmd, __pgprot(PMD_SECT_VALID));
-
- return pmd;
-}
-
-#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
+#define pmd_mkinvalid(pmd) pte_pmd(pte_mkinvalid(pmd_pte(pmd)))
+#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
+#define pmd_uffd_wp(pmd) pte_uffd_wp(pmd_pte(pmd))
+#define pmd_mkuffd_wp(pmd) pte_pmd(pte_mkuffd_wp(pmd_pte(pmd)))
+#define pmd_clear_uffd_wp(pmd) pte_pmd(pte_clear_uffd_wp(pmd_pte(pmd)))
+#define pmd_swp_uffd_wp(pmd) pte_swp_uffd_wp(pmd_pte(pmd))
+#define pmd_swp_mkuffd_wp(pmd) pte_pmd(pte_swp_mkuffd_wp(pmd_pte(pmd)))
+#define pmd_swp_clear_uffd_wp(pmd) \
+ pte_pmd(pte_swp_clear_uffd_wp(pmd_pte(pmd)))
+#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
#define pmd_write(pmd) pte_write(pmd_pte(pmd))
@@ -709,7 +755,11 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
#define pud_none(pud) (!pud_val(pud))
#define pud_bad(pud) (!pud_table(pud))
#define pud_present(pud) pte_present(pud_pte(pud))
+#ifndef __PAGETABLE_PMD_FOLDED
#define pud_leaf(pud) (pud_present(pud) && !pud_table(pud))
+#else
+#define pud_leaf(pud) false
+#endif
#define pud_valid(pud) pte_valid(pud_pte(pud))
#define pud_user(pud) pte_user(pud_pte(pud))
#define pud_user_exec(pud) pte_user_exec(pud_pte(pud))
@@ -760,6 +810,7 @@ static inline pmd_t *pud_pgtable(pud_t pud)
#else
+#define pud_valid(pud) false
#define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
#define pud_user_exec(pud) pud_user(pud) /* Always 0 with folding */
@@ -1005,6 +1056,8 @@ static inline p4d_t *p4d_offset_kimg(pgd_t *pgdp, u64 addr)
static inline bool pgtable_l5_enabled(void) { return false; }
+#define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
+
/* Match p4d_offset folding in <asm/generic/pgtable-nop4d.h> */
#define p4d_set_fixmap(addr) NULL
#define p4d_set_fixmap_offset(p4dp, addr) ((p4d_t *)p4dp)
@@ -1027,8 +1080,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
* in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK.
*/
const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
- PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP |
- PTE_ATTRINDX_MASK;
+ PTE_PRESENT_INVALID | PTE_VALID | PTE_WRITE |
+ PTE_GP | PTE_ATTRINDX_MASK;
/* preserve the hardware dirty information */
if (pte_hw_dirty(pte))
pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
@@ -1076,17 +1129,17 @@ static inline int pgd_devmap(pgd_t pgd)
#ifdef CONFIG_PAGE_TABLE_CHECK
static inline bool pte_user_accessible_page(pte_t pte)
{
- return pte_present(pte) && (pte_user(pte) || pte_user_exec(pte));
+ return pte_valid(pte) && (pte_user(pte) || pte_user_exec(pte));
}
static inline bool pmd_user_accessible_page(pmd_t pmd)
{
- return pmd_leaf(pmd) && !pmd_present_invalid(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
+ return pmd_valid(pmd) && !pmd_table(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
}
static inline bool pud_user_accessible_page(pud_t pud)
{
- return pud_leaf(pud) && (pud_user(pud) || pud_user_exec(pud));
+ return pud_valid(pud) && !pud_table(pud) && (pud_user(pud) || pud_user_exec(pud));
}
#endif
@@ -1227,6 +1280,46 @@ static inline void __wrprotect_ptes(struct mm_struct *mm, unsigned long address,
__ptep_set_wrprotect(mm, address, ptep);
}
+static inline void __clear_young_dirty_pte(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, cydp_t flags)
+{
+ pte_t old_pte;
+
+ do {
+ old_pte = pte;
+
+ if (flags & CYDP_CLEAR_YOUNG)
+ pte = pte_mkold(pte);
+ if (flags & CYDP_CLEAR_DIRTY)
+ pte = pte_mkclean(pte);
+
+ pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
+ pte_val(old_pte), pte_val(pte));
+ } while (pte_val(pte) != pte_val(old_pte));
+}
+
+static inline void __clear_young_dirty_ptes(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ unsigned int nr, cydp_t flags)
+{
+ pte_t pte;
+
+ for (;;) {
+ pte = __ptep_get(ptep);
+
+ if (flags == (CYDP_CLEAR_YOUNG | CYDP_CLEAR_DIRTY))
+ __set_pte(ptep, pte_mkclean(pte_mkold(pte)));
+ else
+ __clear_young_dirty_pte(vma, addr, ptep, pte, flags);
+
+ if (--nr == 0)
+ break;
+ ptep++;
+ addr += PAGE_SIZE;
+ }
+}
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
@@ -1248,15 +1341,16 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
* Encode and decode a swap entry:
* bits 0-1: present (must be zero)
* bits 2: remember PG_anon_exclusive
- * bits 3-7: swap type
- * bits 8-57: swap offset
- * bit 58: PTE_PROT_NONE (must be zero)
+ * bit 3: remember uffd-wp state
+ * bits 6-10: swap type
+ * bit 11: PTE_PRESENT_INVALID (must be zero)
+ * bits 12-61: swap offset
*/
-#define __SWP_TYPE_SHIFT 3
+#define __SWP_TYPE_SHIFT 6
#define __SWP_TYPE_BITS 5
-#define __SWP_OFFSET_BITS 50
#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
-#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
+#define __SWP_OFFSET_SHIFT 12
+#define __SWP_OFFSET_BITS 50
#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
@@ -1280,12 +1374,7 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
#ifdef CONFIG_ARM64_MTE
#define __HAVE_ARCH_PREPARE_TO_SWAP
-static inline int arch_prepare_to_swap(struct page *page)
-{
- if (system_supports_mte())
- return mte_save_tags(page);
- return 0;
-}
+extern int arch_prepare_to_swap(struct folio *folio);
#define __HAVE_ARCH_SWAP_INVALIDATE
static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
@@ -1301,11 +1390,7 @@ static inline void arch_swap_invalidate_area(int type)
}
#define __HAVE_ARCH_SWAP_RESTORE
-static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
-{
- if (system_supports_mte())
- mte_restore_tags(entry, &folio->page);
-}
+extern void arch_swap_restore(swp_entry_t entry, struct folio *folio);
#endif /* CONFIG_ARM64_MTE */
@@ -1392,6 +1477,9 @@ extern void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
extern int contpte_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t entry, int dirty);
+extern void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ unsigned int nr, cydp_t flags);
static __always_inline void contpte_try_fold(struct mm_struct *mm,
unsigned long addr, pte_t *ptep, pte_t pte)
@@ -1616,6 +1704,17 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
return contpte_ptep_set_access_flags(vma, addr, ptep, entry, dirty);
}
+#define clear_young_dirty_ptes clear_young_dirty_ptes
+static inline void clear_young_dirty_ptes(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ unsigned int nr, cydp_t flags)
+{
+ if (likely(nr == 1 && !pte_cont(__ptep_get(ptep))))
+ __clear_young_dirty_ptes(vma, addr, ptep, nr, flags);
+ else
+ contpte_clear_young_dirty_ptes(vma, addr, ptep, nr, flags);
+}
+
#else /* CONFIG_ARM64_CONTPTE */
#define ptep_get __ptep_get
@@ -1635,6 +1734,7 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
#define wrprotect_ptes __wrprotect_ptes
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
#define ptep_set_access_flags __ptep_set_access_flags
+#define clear_young_dirty_ptes __clear_young_dirty_ptes
#endif /* CONFIG_ARM64_CONTPTE */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 9e8999592f3a..af3b206fa423 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -1036,18 +1036,18 @@
* Permission Indirection Extension (PIE) permission encodings.
* Encodings with the _O suffix, have overlays applied (Permission Overlay Extension).
*/
-#define PIE_NONE_O 0x0
-#define PIE_R_O 0x1
-#define PIE_X_O 0x2
-#define PIE_RX_O 0x3
-#define PIE_RW_O 0x5
-#define PIE_RWnX_O 0x6
-#define PIE_RWX_O 0x7
-#define PIE_R 0x8
-#define PIE_GCS 0x9
-#define PIE_RX 0xa
-#define PIE_RW 0xc
-#define PIE_RWX 0xe
+#define PIE_NONE_O UL(0x0)
+#define PIE_R_O UL(0x1)
+#define PIE_X_O UL(0x2)
+#define PIE_RX_O UL(0x3)
+#define PIE_RW_O UL(0x5)
+#define PIE_RWnX_O UL(0x6)
+#define PIE_RWX_O UL(0x7)
+#define PIE_R UL(0x8)
+#define PIE_GCS UL(0x9)
+#define PIE_RX UL(0xa)
+#define PIE_RW UL(0xc)
+#define PIE_RWX UL(0xe)
#define PIRx_ELx_PERM(idx, perm) ((perm) << ((idx) * 4))
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 3b0e8248e1a4..95fbc8c05607 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -142,17 +142,24 @@ static inline unsigned long get_trans_granule(void)
* EL1, Inner Shareable".
*
*/
-#define __TLBI_VADDR_RANGE(baddr, asid, scale, num, ttl) \
- ({ \
- unsigned long __ta = (baddr); \
- unsigned long __ttl = (ttl >= 1 && ttl <= 3) ? ttl : 0; \
- __ta &= GENMASK_ULL(36, 0); \
- __ta |= __ttl << 37; \
- __ta |= (unsigned long)(num) << 39; \
- __ta |= (unsigned long)(scale) << 44; \
- __ta |= get_trans_granule() << 46; \
- __ta |= (unsigned long)(asid) << 48; \
- __ta; \
+#define TLBIR_ASID_MASK GENMASK_ULL(63, 48)
+#define TLBIR_TG_MASK GENMASK_ULL(47, 46)
+#define TLBIR_SCALE_MASK GENMASK_ULL(45, 44)
+#define TLBIR_NUM_MASK GENMASK_ULL(43, 39)
+#define TLBIR_TTL_MASK GENMASK_ULL(38, 37)
+#define TLBIR_BADDR_MASK GENMASK_ULL(36, 0)
+
+#define __TLBI_VADDR_RANGE(baddr, asid, scale, num, ttl) \
+ ({ \
+ unsigned long __ta = 0; \
+ unsigned long __ttl = (ttl >= 1 && ttl <= 3) ? ttl : 0; \
+ __ta |= FIELD_PREP(TLBIR_BADDR_MASK, baddr); \
+ __ta |= FIELD_PREP(TLBIR_TTL_MASK, __ttl); \
+ __ta |= FIELD_PREP(TLBIR_NUM_MASK, num); \
+ __ta |= FIELD_PREP(TLBIR_SCALE_MASK, scale); \
+ __ta |= FIELD_PREP(TLBIR_TG_MASK, get_trans_granule()); \
+ __ta |= FIELD_PREP(TLBIR_ASID_MASK, asid); \
+ __ta; \
})
/* These macros are used by the TLBI RANGE feature. */
@@ -161,12 +168,18 @@ static inline unsigned long get_trans_granule(void)
#define MAX_TLBI_RANGE_PAGES __TLBI_RANGE_PAGES(31, 3)
/*
- * Generate 'num' values from -1 to 30 with -1 rejected by the
- * __flush_tlb_range() loop below.
+ * Generate 'num' values from -1 to 31 with -1 rejected by the
+ * __flush_tlb_range() loop below. Its return value is only
+ * significant for a maximum of MAX_TLBI_RANGE_PAGES pages. If
+ * 'pages' is more than that, you must iterate over the overall
+ * range.
*/
-#define TLBI_RANGE_MASK GENMASK_ULL(4, 0)
-#define __TLBI_RANGE_NUM(pages, scale) \
- ((((pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1)
+#define __TLBI_RANGE_NUM(pages, scale) \
+ ({ \
+ int __pages = min((pages), \
+ __TLBI_RANGE_PAGES(31, (scale))); \
+ (__pages >> (5 * (scale) + 1)) - 1; \
+ })
/*
* TLB Invalidation
@@ -379,10 +392,6 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
* 3. If there is 1 page remaining, flush it through non-range operations. Range
* operations can only span an even number of pages. We save this for last to
* ensure 64KB start alignment is maintained for the LPA2 case.
- *
- * Note that certain ranges can be represented by either num = 31 and
- * scale or num = 0 and scale + 1. The loop below favours the latter
- * since num is limited to 30 by the __TLBI_RANGE_NUM() macro.
*/
#define __flush_tlb_range_op(op, start, pages, stride, \
asid, tlb_level, tlbi_user, lpa2) \
@@ -437,11 +446,11 @@ static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
* When not uses TLB range ops, we can handle up to
* (MAX_DVM_OPS - 1) pages;
* When uses TLB range ops, we can handle up to
- * (MAX_TLBI_RANGE_PAGES - 1) pages.
+ * MAX_TLBI_RANGE_PAGES pages.
*/
if ((!system_supports_tlb_range() &&
(end - start) >= (MAX_DVM_OPS * stride)) ||
- pages >= MAX_TLBI_RANGE_PAGES) {
+ pages > MAX_TLBI_RANGE_PAGES) {
flush_tlb_mm(vma->vm_mm);
return;
}
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index a323b109b9c4..0f6ef432fb84 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -35,9 +35,9 @@ void update_freq_counters_refs(void);
/* Enable topology flag updates */
#define arch_update_cpu_topology topology_update_cpu_topology
-/* Replace task scheduler's default thermal pressure API */
-#define arch_scale_thermal_pressure topology_get_thermal_pressure
-#define arch_update_thermal_pressure topology_update_thermal_pressure
+/* Replace task scheduler's default HW pressure API */
+#define arch_scale_hw_pressure topology_get_hw_pressure
+#define arch_update_hw_pressure topology_update_hw_pressure
#include <asm-generic/topology.h>
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 261d6e9df2e1..ebf4a9f943ed 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -82,6 +82,12 @@ bool is_kvm_arm_initialised(void);
DECLARE_STATIC_KEY_FALSE(kvm_protected_mode_initialized);
+static inline bool is_pkvm_initialized(void)
+{
+ return IS_ENABLED(CONFIG_KVM) &&
+ static_branch_likely(&kvm_protected_mode_initialized);
+}
+
/* Reports the availability of HYP mode */
static inline bool is_hyp_mode_available(void)
{
@@ -89,8 +95,7 @@ static inline bool is_hyp_mode_available(void)
* If KVM protected mode is initialized, all CPUs must have been booted
* in EL2. Avoid checking __boot_cpu_mode as CPUs now come up in EL1.
*/
- if (IS_ENABLED(CONFIG_KVM) &&
- static_branch_likely(&kvm_protected_mode_initialized))
+ if (is_pkvm_initialized())
return true;
return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 &&
@@ -104,8 +109,7 @@ static inline bool is_hyp_mode_mismatched(void)
* If KVM protected mode is initialized, all CPUs must have been booted
* in EL2. Avoid checking __boot_cpu_mode as CPUs now come up in EL1.
*/
- if (IS_ENABLED(CONFIG_KVM) &&
- static_branch_likely(&kvm_protected_mode_initialized))
+ if (is_pkvm_initialized())
return false;
return __boot_cpu_mode[0] != __boot_cpu_mode[1];
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index dba8fcec7f33..e0e7b93c16cc 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -26,6 +26,7 @@
#include <linux/libfdt.h>
#include <linux/smp.h>
#include <linux/serial_core.h>
+#include <linux/suspend.h>
#include <linux/pgtable.h>
#include <acpi/ghes.h>
@@ -227,6 +228,15 @@ done:
if (earlycon_acpi_spcr_enable)
early_init_dt_scan_chosen_stdout();
} else {
+#ifdef CONFIG_HIBERNATION
+ struct acpi_table_header *facs = NULL;
+ acpi_get_table(ACPI_SIG_FACS, 1, &facs);
+ if (facs) {
+ swsusp_hardware_signature =
+ ((struct acpi_table_facs *)facs)->hardware_signature;
+ acpi_put_table(facs);
+ }
+#endif
acpi_parse_spcr(earlycon_acpi_spcr_enable, true);
if (IS_ENABLED(CONFIG_ACPI_BGRT))
acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt);
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 9afcc690fe73..4a92096db34e 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -10,6 +10,7 @@
#include <linux/efi.h>
#include <linux/init.h>
#include <linux/screen_info.h>
+#include <linux/vmalloc.h>
#include <asm/efi.h>
#include <asm/stacktrace.h>
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index ce08b744aaab..cb68adcabe07 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -289,8 +289,28 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
adr_l x1, __hyp_text_end
adr_l x2, dcache_clean_poc
blr x2
+
+ mov_q x0, INIT_SCTLR_EL2_MMU_OFF
+ pre_disable_mmu_workaround
+ msr sctlr_el2, x0
+ isb
0:
mov_q x0, HCR_HOST_NVHE_FLAGS
+
+ /*
+ * Compliant CPUs advertise their VHE-onlyness with
+ * ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2.E2H can be
+ * RES1 in that case. Publish the E2H bit early so that
+ * it can be picked up by the init_el2_state macro.
+ *
+ * Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but
+ * don't advertise it (they predate this relaxation).
+ */
+ mrs_s x1, SYS_ID_AA64MMFR4_EL1
+ tbz x1, #(ID_AA64MMFR4_EL1_E2H0_SHIFT + ID_AA64MMFR4_EL1_E2H0_WIDTH - 1), 1f
+
+ orr x0, x0, #HCR_E2H
+1:
msr hcr_el2, x0
isb
@@ -303,30 +323,16 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
mov_q x1, INIT_SCTLR_EL1_MMU_OFF
- /*
- * Compliant CPUs advertise their VHE-onlyness with
- * ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2.E2H can be
- * RES1 in that case.
- *
- * Fruity CPUs seem to have HCR_EL2.E2H set to RES1, but
- * don't advertise it (they predate this relaxation).
- */
- mrs_s x0, SYS_ID_AA64MMFR4_EL1
- ubfx x0, x0, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH
- tbnz x0, #(ID_AA64MMFR4_EL1_E2H0_SHIFT + ID_AA64MMFR4_EL1_E2H0_WIDTH - 1), 1f
-
mrs x0, hcr_el2
and x0, x0, #HCR_E2H
cbz x0, 2f
-1:
+
/* Set a sane SCTLR_EL1, the VHE way */
- pre_disable_mmu_workaround
msr_s SYS_SCTLR_EL12, x1
mov x2, #BOOT_CPU_FLAG_E2H
b 3f
2:
- pre_disable_mmu_workaround
msr sctlr_el1, x1
mov x2, xzr
3:
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 2f5755192c2b..722ac45f9f7b 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -655,7 +655,7 @@ static int breakpoint_handler(unsigned long unused, unsigned long esr,
perf_bp_event(bp, regs);
/* Do we need to handle the stepping? */
- if (uses_default_overflow_handler(bp))
+ if (is_default_overflow_handler(bp))
step = 1;
unlock:
rcu_read_unlock();
@@ -734,7 +734,7 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val,
static int watchpoint_report(struct perf_event *wp, unsigned long addr,
struct pt_regs *regs)
{
- int step = uses_default_overflow_handler(wp);
+ int step = is_default_overflow_handler(wp);
struct arch_hw_breakpoint *info = counter_arch_bp(wp);
info->trigger = addr;
diff --git a/arch/arm64/kernel/io.c b/arch/arm64/kernel/io.c
index aa7a4ec6a3ae..ef48089fbfe1 100644
--- a/arch/arm64/kernel/io.c
+++ b/arch/arm64/kernel/io.c
@@ -38,6 +38,48 @@ void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
EXPORT_SYMBOL(__memcpy_fromio);
/*
+ * This generates a memcpy that works on a from/to address which is aligned to
+ * bits. Count is in terms of the number of bits sized quantities to copy. It
+ * optimizes to use the STR groupings when possible so that it is WC friendly.
+ */
+#define memcpy_toio_aligned(to, from, count, bits) \
+ ({ \
+ volatile u##bits __iomem *_to = to; \
+ const u##bits *_from = from; \
+ size_t _count = count; \
+ const u##bits *_end_from = _from + ALIGN_DOWN(_count, 8); \
+ \
+ for (; _from < _end_from; _from += 8, _to += 8) \
+ __const_memcpy_toio_aligned##bits(_to, _from, 8); \
+ if ((_count % 8) >= 4) { \
+ __const_memcpy_toio_aligned##bits(_to, _from, 4); \
+ _from += 4; \
+ _to += 4; \
+ } \
+ if ((_count % 4) >= 2) { \
+ __const_memcpy_toio_aligned##bits(_to, _from, 2); \
+ _from += 2; \
+ _to += 2; \
+ } \
+ if (_count % 2) \
+ __const_memcpy_toio_aligned##bits(_to, _from, 1); \
+ })
+
+void __iowrite64_copy_full(void __iomem *to, const void *from, size_t count)
+{
+ memcpy_toio_aligned(to, from, count, 64);
+ dgh();
+}
+EXPORT_SYMBOL(__iowrite64_copy_full);
+
+void __iowrite32_copy_full(void __iomem *to, const void *from, size_t count)
+{
+ memcpy_toio_aligned(to, from, count, 32);
+ dgh();
+}
+EXPORT_SYMBOL(__iowrite32_copy_full);
+
+/*
* Copy data from "real" memory space to IO memory space.
*/
void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 47e0be610bb6..36b25af56324 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -12,144 +12,18 @@
#include <linux/bitops.h>
#include <linux/elf.h>
#include <linux/ftrace.h>
-#include <linux/gfp.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/moduleloader.h>
#include <linux/random.h>
#include <linux/scs.h>
-#include <linux/vmalloc.h>
#include <asm/alternative.h>
#include <asm/insn.h>
#include <asm/scs.h>
#include <asm/sections.h>
-static u64 module_direct_base __ro_after_init = 0;
-static u64 module_plt_base __ro_after_init = 0;
-
-/*
- * Choose a random page-aligned base address for a window of 'size' bytes which
- * entirely contains the interval [start, end - 1].
- */
-static u64 __init random_bounding_box(u64 size, u64 start, u64 end)
-{
- u64 max_pgoff, pgoff;
-
- if ((end - start) >= size)
- return 0;
-
- max_pgoff = (size - (end - start)) / PAGE_SIZE;
- pgoff = get_random_u32_inclusive(0, max_pgoff);
-
- return start - pgoff * PAGE_SIZE;
-}
-
-/*
- * Modules may directly reference data and text anywhere within the kernel
- * image and other modules. References using PREL32 relocations have a +/-2G
- * range, and so we need to ensure that the entire kernel image and all modules
- * fall within a 2G window such that these are always within range.
- *
- * Modules may directly branch to functions and code within the kernel text,
- * and to functions and code within other modules. These branches will use
- * CALL26/JUMP26 relocations with a +/-128M range. Without PLTs, we must ensure
- * that the entire kernel text and all module text falls within a 128M window
- * such that these are always within range. With PLTs, we can expand this to a
- * 2G window.
- *
- * We chose the 128M region to surround the entire kernel image (rather than
- * just the text) as using the same bounds for the 128M and 2G regions ensures
- * by construction that we never select a 128M region that is not a subset of
- * the 2G region. For very large and unusual kernel configurations this means
- * we may fall back to PLTs where they could have been avoided, but this keeps
- * the logic significantly simpler.
- */
-static int __init module_init_limits(void)
-{
- u64 kernel_end = (u64)_end;
- u64 kernel_start = (u64)_text;
- u64 kernel_size = kernel_end - kernel_start;
-
- /*
- * The default modules region is placed immediately below the kernel
- * image, and is large enough to use the full 2G relocation range.
- */
- BUILD_BUG_ON(KIMAGE_VADDR != MODULES_END);
- BUILD_BUG_ON(MODULES_VSIZE < SZ_2G);
-
- if (!kaslr_enabled()) {
- if (kernel_size < SZ_128M)
- module_direct_base = kernel_end - SZ_128M;
- if (kernel_size < SZ_2G)
- module_plt_base = kernel_end - SZ_2G;
- } else {
- u64 min = kernel_start;
- u64 max = kernel_end;
-
- if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
- pr_info("2G module region forced by RANDOMIZE_MODULE_REGION_FULL\n");
- } else {
- module_direct_base = random_bounding_box(SZ_128M, min, max);
- if (module_direct_base) {
- min = module_direct_base;
- max = module_direct_base + SZ_128M;
- }
- }
-
- module_plt_base = random_bounding_box(SZ_2G, min, max);
- }
-
- pr_info("%llu pages in range for non-PLT usage",
- module_direct_base ? (SZ_128M - kernel_size) / PAGE_SIZE : 0);
- pr_info("%llu pages in range for PLT usage",
- module_plt_base ? (SZ_2G - kernel_size) / PAGE_SIZE : 0);
-
- return 0;
-}
-subsys_initcall(module_init_limits);
-
-void *module_alloc(unsigned long size)
-{
- void *p = NULL;
-
- /*
- * Where possible, prefer to allocate within direct branch range of the
- * kernel such that no PLTs are necessary.
- */
- if (module_direct_base) {
- p = __vmalloc_node_range(size, MODULE_ALIGN,
- module_direct_base,
- module_direct_base + SZ_128M,
- GFP_KERNEL | __GFP_NOWARN,
- PAGE_KERNEL, 0, NUMA_NO_NODE,
- __builtin_return_address(0));
- }
-
- if (!p && module_plt_base) {
- p = __vmalloc_node_range(size, MODULE_ALIGN,
- module_plt_base,
- module_plt_base + SZ_2G,
- GFP_KERNEL | __GFP_NOWARN,
- PAGE_KERNEL, 0, NUMA_NO_NODE,
- __builtin_return_address(0));
- }
-
- if (!p) {
- pr_warn_ratelimited("%s: unable to allocate memory\n",
- __func__);
- }
-
- if (p && (kasan_alloc_module_shadow(p, size, GFP_KERNEL) < 0)) {
- vfree(p);
- return NULL;
- }
-
- /* Memory is intended to be executable, reset the pointer tag. */
- return kasan_reset_tag(p);
-}
-
enum aarch64_reloc_op {
RELOC_OP_NONE,
RELOC_OP_ABS,
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index 6d157f32187b..e8ed5673f481 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -10,94 +10,12 @@
#include <asm/pointer_auth.h>
-struct frame_tail {
- struct frame_tail __user *fp;
- unsigned long lr;
-} __attribute__((packed));
-
-/*
- * Get the return address for a single stackframe and return a pointer to the
- * next frame tail.
- */
-static struct frame_tail __user *
-user_backtrace(struct frame_tail __user *tail,
- struct perf_callchain_entry_ctx *entry)
-{
- struct frame_tail buftail;
- unsigned long err;
- unsigned long lr;
-
- /* Also check accessibility of one struct frame_tail beyond */
- if (!access_ok(tail, sizeof(buftail)))
- return NULL;
-
- pagefault_disable();
- err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
- pagefault_enable();
-
- if (err)
- return NULL;
-
- lr = ptrauth_strip_user_insn_pac(buftail.lr);
-
- perf_callchain_store(entry, lr);
-
- /*
- * Frame pointers should strictly progress back up the stack
- * (towards higher addresses).
- */
- if (tail >= buftail.fp)
- return NULL;
-
- return buftail.fp;
-}
-
-#ifdef CONFIG_COMPAT
-/*
- * The registers we're interested in are at the end of the variable
- * length saved register structure. The fp points at the end of this
- * structure so the address of this struct is:
- * (struct compat_frame_tail *)(xxx->fp)-1
- *
- * This code has been adapted from the ARM OProfile support.
- */
-struct compat_frame_tail {
- compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
- u32 sp;
- u32 lr;
-} __attribute__((packed));
-
-static struct compat_frame_tail __user *
-compat_user_backtrace(struct compat_frame_tail __user *tail,
- struct perf_callchain_entry_ctx *entry)
+static bool callchain_trace(void *data, unsigned long pc)
{
- struct compat_frame_tail buftail;
- unsigned long err;
-
- /* Also check accessibility of one struct frame_tail beyond */
- if (!access_ok(tail, sizeof(buftail)))
- return NULL;
-
- pagefault_disable();
- err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
- pagefault_enable();
-
- if (err)
- return NULL;
-
- perf_callchain_store(entry, buftail.lr);
-
- /*
- * Frame pointers should strictly progress back up the stack
- * (towards higher addresses).
- */
- if (tail + 1 >= (struct compat_frame_tail __user *)
- compat_ptr(buftail.fp))
- return NULL;
+ struct perf_callchain_entry_ctx *entry = data;
- return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
+ return perf_callchain_store(entry, pc) == 0;
}
-#endif /* CONFIG_COMPAT */
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
@@ -107,35 +25,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
return;
}
- perf_callchain_store(entry, regs->pc);
-
- if (!compat_user_mode(regs)) {
- /* AARCH64 mode */
- struct frame_tail __user *tail;
-
- tail = (struct frame_tail __user *)regs->regs[29];
-
- while (entry->nr < entry->max_stack &&
- tail && !((unsigned long)tail & 0x7))
- tail = user_backtrace(tail, entry);
- } else {
-#ifdef CONFIG_COMPAT
- /* AARCH32 compat mode */
- struct compat_frame_tail __user *tail;
-
- tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
-
- while ((entry->nr < entry->max_stack) &&
- tail && !((unsigned long)tail & 0x3))
- tail = compat_user_backtrace(tail, entry);
-#endif
- }
-}
-
-static bool callchain_trace(void *data, unsigned long pc)
-{
- struct perf_callchain_entry_ctx *entry = data;
- return perf_callchain_store(entry, pc) == 0;
+ arch_stack_walk_user(callchain_trace, entry, regs);
}
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
diff --git a/arch/arm64/kernel/pi/Makefile b/arch/arm64/kernel/pi/Makefile
index 4393b41f0b71..4d11a8c29181 100644
--- a/arch/arm64/kernel/pi/Makefile
+++ b/arch/arm64/kernel/pi/Makefile
@@ -19,12 +19,6 @@ KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))
# disable LTO
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO), $(KBUILD_CFLAGS))
-GCOV_PROFILE := n
-KASAN_SANITIZE := n
-KCSAN_SANITIZE := n
-UBSAN_SANITIZE := n
-KCOV_INSTRUMENT := n
-
hostprogs := relacheck
quiet_cmd_piobjcopy = $(quiet_cmd_objcopy)
diff --git a/arch/arm64/kernel/pi/idreg-override.c b/arch/arm64/kernel/pi/idreg-override.c
index aad399796e81..29d4b6244a6f 100644
--- a/arch/arm64/kernel/pi/idreg-override.c
+++ b/arch/arm64/kernel/pi/idreg-override.c
@@ -108,6 +108,7 @@ static const struct ftr_set_desc pfr0 __prel64_initconst = {
.override = &id_aa64pfr0_override,
.fields = {
FIELD("sve", ID_AA64PFR0_EL1_SVE_SHIFT, pfr0_sve_filter),
+ FIELD("el0", ID_AA64PFR0_EL1_EL0_SHIFT, NULL),
{}
},
};
@@ -209,8 +210,8 @@ static const struct {
char alias[FTR_ALIAS_NAME_LEN];
char feature[FTR_ALIAS_OPTION_LEN];
} aliases[] __initconst = {
- { "kvm_arm.mode=nvhe", "id_aa64mmfr1.vh=0" },
- { "kvm_arm.mode=protected", "id_aa64mmfr1.vh=0" },
+ { "kvm_arm.mode=nvhe", "arm64_sw.hvhe=0 id_aa64mmfr1.vh=0" },
+ { "kvm_arm.mode=protected", "arm64_sw.hvhe=1" },
{ "arm64.nosve", "id_aa64pfr0.sve=0" },
{ "arm64.nosme", "id_aa64pfr1.sme=0" },
{ "arm64.nobti", "id_aa64pfr1.bt=0" },
@@ -223,6 +224,7 @@ static const struct {
{ "nokaslr", "arm64_sw.nokaslr=1" },
{ "rodata=off", "arm64_sw.rodataoff=1" },
{ "arm64.nolva", "id_aa64mmfr2.varange=0" },
+ { "arm64.no32bit_el0", "id_aa64pfr0.el0=1" },
};
static int __init parse_hexdigit(const char *p, u64 *v)
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 327855a11df2..4268678d0e86 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -129,13 +129,6 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
return 0;
}
-void *alloc_insn_page(void)
-{
- return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
- GFP_KERNEL, PAGE_KERNEL_ROX, VM_FLUSH_RESET_PERMS,
- NUMA_NO_NODE, __builtin_return_address(0));
-}
-
/* arm kprobe: install breakpoint in text */
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 162b030ab9da..0d022599eb61 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -761,7 +761,6 @@ static void sve_init_header_from_task(struct user_sve_header *header,
{
unsigned int vq;
bool active;
- bool fpsimd_only;
enum vec_type task_type;
memset(header, 0, sizeof(*header));
@@ -777,12 +776,10 @@ static void sve_init_header_from_task(struct user_sve_header *header,
case ARM64_VEC_SVE:
if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
header->flags |= SVE_PT_VL_INHERIT;
- fpsimd_only = !test_tsk_thread_flag(target, TIF_SVE);
break;
case ARM64_VEC_SME:
if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
header->flags |= SVE_PT_VL_INHERIT;
- fpsimd_only = false;
break;
default:
WARN_ON_ONCE(1);
@@ -790,7 +787,7 @@ static void sve_init_header_from_task(struct user_sve_header *header,
}
if (active) {
- if (fpsimd_only) {
+ if (target->thread.fp_type == FP_STATE_FPSIMD) {
header->flags |= SVE_PT_REGS_FPSIMD;
} else {
header->flags |= SVE_PT_REGS_SVE;
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 65a052bf741f..a096e2451044 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -298,8 +298,15 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
dynamic_scs_init();
/*
- * Unmask SError as soon as possible after initializing earlycon so
- * that we can report any SErrors immediately.
+ * The primary CPU enters the kernel with all DAIF exceptions masked.
+ *
+ * We must unmask Debug and SError before preemption or scheduling is
+ * possible to ensure that these are consistently unmasked across
+ * threads, and we want to unmask SError as soon as possible after
+ * initializing earlycon so that we can report any SErrors immediately.
+ *
+ * IRQ and FIQ will be unmasked after the root irqchip has been
+ * detected and initialized.
*/
local_daif_restore(DAIF_PROCCTX_NOIRQ);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 4ced34f62dab..31c8b3094dd7 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -264,6 +264,13 @@ asmlinkage notrace void secondary_start_kernel(void)
set_cpu_online(cpu, true);
complete(&cpu_running);
+ /*
+ * Secondary CPUs enter the kernel with all DAIF exceptions masked.
+ *
+ * As with setup_arch() we must unmask Debug and SError exceptions, and
+ * as the root irqchip has already been detected and initialized we can
+ * unmask IRQ and FIQ at the same time.
+ */
local_daif_restore(DAIF_PROCCTX);
/*
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 684c26511696..6b3258860377 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -324,3 +324,123 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
dump_backtrace(NULL, tsk, loglvl);
barrier();
}
+
+/*
+ * The struct defined for userspace stack frame in AARCH64 mode.
+ */
+struct frame_tail {
+ struct frame_tail __user *fp;
+ unsigned long lr;
+} __attribute__((packed));
+
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static struct frame_tail __user *
+unwind_user_frame(struct frame_tail __user *tail, void *cookie,
+ stack_trace_consume_fn consume_entry)
+{
+ struct frame_tail buftail;
+ unsigned long err;
+ unsigned long lr;
+
+ /* Also check accessibility of one struct frame_tail beyond */
+ if (!access_ok(tail, sizeof(buftail)))
+ return NULL;
+
+ pagefault_disable();
+ err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
+ pagefault_enable();
+
+ if (err)
+ return NULL;
+
+ lr = ptrauth_strip_user_insn_pac(buftail.lr);
+
+ if (!consume_entry(cookie, lr))
+ return NULL;
+
+ /*
+ * Frame pointers should strictly progress back up the stack
+ * (towards higher addresses).
+ */
+ if (tail >= buftail.fp)
+ return NULL;
+
+ return buftail.fp;
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * The registers we're interested in are at the end of the variable
+ * length saved register structure. The fp points at the end of this
+ * structure so the address of this struct is:
+ * (struct compat_frame_tail *)(xxx->fp)-1
+ *
+ * This code has been adapted from the ARM OProfile support.
+ */
+struct compat_frame_tail {
+ compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
+ u32 sp;
+ u32 lr;
+} __attribute__((packed));
+
+static struct compat_frame_tail __user *
+unwind_compat_user_frame(struct compat_frame_tail __user *tail, void *cookie,
+ stack_trace_consume_fn consume_entry)
+{
+ struct compat_frame_tail buftail;
+ unsigned long err;
+
+ /* Also check accessibility of one struct frame_tail beyond */
+ if (!access_ok(tail, sizeof(buftail)))
+ return NULL;
+
+ pagefault_disable();
+ err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
+ pagefault_enable();
+
+ if (err)
+ return NULL;
+
+ if (!consume_entry(cookie, buftail.lr))
+ return NULL;
+
+ /*
+ * Frame pointers should strictly progress back up the stack
+ * (towards higher addresses).
+ */
+ if (tail + 1 >= (struct compat_frame_tail __user *)
+ compat_ptr(buftail.fp))
+ return NULL;
+
+ return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
+}
+#endif /* CONFIG_COMPAT */
+
+
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+ const struct pt_regs *regs)
+{
+ if (!consume_entry(cookie, regs->pc))
+ return;
+
+ if (!compat_user_mode(regs)) {
+ /* AARCH64 mode */
+ struct frame_tail __user *tail;
+
+ tail = (struct frame_tail __user *)regs->regs[29];
+ while (tail && !((unsigned long)tail & 0x7))
+ tail = unwind_user_frame(tail, cookie, consume_entry);
+ } else {
+#ifdef CONFIG_COMPAT
+ /* AARCH32 compat mode */
+ struct compat_frame_tail __user *tail;
+
+ tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
+ while (tail && !((unsigned long)tail & 0x3))
+ tail = unwind_compat_user_frame(tail, cookie, consume_entry);
+#endif
+ }
+}
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index 8818287f1095..d63930c82839 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -40,11 +40,6 @@ CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) \
$(RANDSTRUCT_CFLAGS) $(GCC_PLUGINS_CFLAGS) \
$(CC_FLAGS_LTO) $(CC_FLAGS_CFI) \
-Wmissing-prototypes -Wmissing-declarations
-KASAN_SANITIZE := n
-KCSAN_SANITIZE := n
-UBSAN_SANITIZE := n
-OBJECT_FILES_NON_STANDARD := y
-KCOV_INSTRUMENT := n
CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny -fasynchronous-unwind-tables
@@ -52,9 +47,6 @@ ifneq ($(c-gettimeofday-y),)
CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
endif
-# Disable gcov profiling for VDSO code
-GCOV_PROFILE := n
-
targets += vdso.lds
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
@@ -68,7 +60,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
# Generate VDSO offsets using helper script
-gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
+gen-vdsosym := $(src)/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index f5f80fdce0fe..cc4508c604b2 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -136,7 +136,7 @@ $(obj)/vdso32.so.dbg: $(obj)/vdso.so.raw $(obj)/$(munge) FORCE
$(call if_changed,vdsomunge)
# Link rule for the .so file, .lds has to be first
-$(obj)/vdso.so.raw: $(src)/vdso.lds $(obj-vdso) FORCE
+$(obj)/vdso.so.raw: $(obj)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold_and_vdso_check)
# Compilation rules for the vDSO sources
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index c0c050e53157..a6497228c5a8 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -3,7 +3,7 @@
# Makefile for Kernel-based Virtual Machine module
#
-ccflags-y += -I $(srctree)/$(src)
+ccflags-y += -I $(src)
include $(srctree)/virt/kvm/Makefile.kvm
@@ -23,6 +23,7 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
vgic/vgic-its.o vgic/vgic-debug.o
kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o pmu.o
+kvm-$(CONFIG_ARM64_PTR_AUTH) += pauth.o
always-y := hyp_constants.h hyp-constants.s
@@ -30,7 +31,7 @@ define rule_gen_hyp_constants
$(call filechk,offsets,__HYP_CONSTANTS_H__)
endef
-CFLAGS_hyp-constants.o = -I $(srctree)/$(src)/hyp/include
+CFLAGS_hyp-constants.o = -I $(src)/hyp/include
$(obj)/hyp-constants.s: $(src)/hyp/hyp-constants.c FORCE
$(call if_changed_dep,cc_s_c)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 3dee5490eea9..9996a989b52e 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -35,10 +35,11 @@
#include <asm/virt.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
+#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_nested.h>
#include <asm/kvm_pkvm.h>
-#include <asm/kvm_emulate.h>
+#include <asm/kvm_ptrauth.h>
#include <asm/sections.h>
#include <kvm/arm_hypercalls.h>
@@ -69,15 +70,42 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
}
+/*
+ * This functions as an allow-list of protected VM capabilities.
+ * Features not explicitly allowed by this function are denied.
+ */
+static bool pkvm_ext_allowed(struct kvm *kvm, long ext)
+{
+ switch (ext) {
+ case KVM_CAP_IRQCHIP:
+ case KVM_CAP_ARM_PSCI:
+ case KVM_CAP_ARM_PSCI_0_2:
+ case KVM_CAP_NR_VCPUS:
+ case KVM_CAP_MAX_VCPUS:
+ case KVM_CAP_MAX_VCPU_ID:
+ case KVM_CAP_MSI_DEVID:
+ case KVM_CAP_ARM_VM_IPA_SIZE:
+ case KVM_CAP_ARM_PMU_V3:
+ case KVM_CAP_ARM_SVE:
+ case KVM_CAP_ARM_PTRAUTH_ADDRESS:
+ case KVM_CAP_ARM_PTRAUTH_GENERIC:
+ return true;
+ default:
+ return false;
+ }
+}
+
int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
struct kvm_enable_cap *cap)
{
- int r;
- u64 new_cap;
+ int r = -EINVAL;
if (cap->flags)
return -EINVAL;
+ if (kvm_vm_is_protected(kvm) && !pkvm_ext_allowed(kvm, cap->cap))
+ return -EINVAL;
+
switch (cap->cap) {
case KVM_CAP_ARM_NISV_TO_USER:
r = 0;
@@ -86,9 +114,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
break;
case KVM_CAP_ARM_MTE:
mutex_lock(&kvm->lock);
- if (!system_supports_mte() || kvm->created_vcpus) {
- r = -EINVAL;
- } else {
+ if (system_supports_mte() && !kvm->created_vcpus) {
r = 0;
set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags);
}
@@ -99,25 +125,22 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
set_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags);
break;
case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE:
- new_cap = cap->args[0];
-
mutex_lock(&kvm->slots_lock);
/*
* To keep things simple, allow changing the chunk
* size only when no memory slots have been created.
*/
- if (!kvm_are_all_memslots_empty(kvm)) {
- r = -EINVAL;
- } else if (new_cap && !kvm_is_block_size_supported(new_cap)) {
- r = -EINVAL;
- } else {
- r = 0;
- kvm->arch.mmu.split_page_chunk_size = new_cap;
+ if (kvm_are_all_memslots_empty(kvm)) {
+ u64 new_cap = cap->args[0];
+
+ if (!new_cap || kvm_is_block_size_supported(new_cap)) {
+ r = 0;
+ kvm->arch.mmu.split_page_chunk_size = new_cap;
+ }
}
mutex_unlock(&kvm->slots_lock);
break;
default:
- r = -EINVAL;
break;
}
@@ -195,6 +218,23 @@ void kvm_arch_create_vm_debugfs(struct kvm *kvm)
kvm_sys_regs_create_debugfs(kvm);
}
+static void kvm_destroy_mpidr_data(struct kvm *kvm)
+{
+ struct kvm_mpidr_data *data;
+
+ mutex_lock(&kvm->arch.config_lock);
+
+ data = rcu_dereference_protected(kvm->arch.mpidr_data,
+ lockdep_is_held(&kvm->arch.config_lock));
+ if (data) {
+ rcu_assign_pointer(kvm->arch.mpidr_data, NULL);
+ synchronize_rcu();
+ kfree(data);
+ }
+
+ mutex_unlock(&kvm->arch.config_lock);
+}
+
/**
* kvm_arch_destroy_vm - destroy the VM data structure
* @kvm: pointer to the KVM struct
@@ -209,7 +249,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
if (is_protected_kvm_enabled())
pkvm_destroy_hyp_vm(kvm);
- kfree(kvm->arch.mpidr_data);
+ kvm_destroy_mpidr_data(kvm);
+
kfree(kvm->arch.sysreg_masks);
kvm_destroy_vcpus(kvm);
@@ -218,9 +259,47 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_arm_teardown_hypercalls(kvm);
}
+static bool kvm_has_full_ptr_auth(void)
+{
+ bool apa, gpa, api, gpi, apa3, gpa3;
+ u64 isar1, isar2, val;
+
+ /*
+ * Check that:
+ *
+ * - both Address and Generic auth are implemented for a given
+ * algorithm (Q5, IMPDEF or Q3)
+ * - only a single algorithm is implemented.
+ */
+ if (!system_has_full_ptr_auth())
+ return false;
+
+ isar1 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
+ isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
+
+ apa = !!FIELD_GET(ID_AA64ISAR1_EL1_APA_MASK, isar1);
+ val = FIELD_GET(ID_AA64ISAR1_EL1_GPA_MASK, isar1);
+ gpa = (val == ID_AA64ISAR1_EL1_GPA_IMP);
+
+ api = !!FIELD_GET(ID_AA64ISAR1_EL1_API_MASK, isar1);
+ val = FIELD_GET(ID_AA64ISAR1_EL1_GPI_MASK, isar1);
+ gpi = (val == ID_AA64ISAR1_EL1_GPI_IMP);
+
+ apa3 = !!FIELD_GET(ID_AA64ISAR2_EL1_APA3_MASK, isar2);
+ val = FIELD_GET(ID_AA64ISAR2_EL1_GPA3_MASK, isar2);
+ gpa3 = (val == ID_AA64ISAR2_EL1_GPA3_IMP);
+
+ return (apa == gpa && api == gpi && apa3 == gpa3 &&
+ (apa + api + apa3) == 1);
+}
+
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
{
int r;
+
+ if (kvm && kvm_vm_is_protected(kvm) && !pkvm_ext_allowed(kvm, ext))
+ return 0;
+
switch (ext) {
case KVM_CAP_IRQCHIP:
r = vgic_present;
@@ -311,7 +390,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break;
case KVM_CAP_ARM_PTRAUTH_ADDRESS:
case KVM_CAP_ARM_PTRAUTH_GENERIC:
- r = system_has_full_ptr_auth();
+ r = kvm_has_full_ptr_auth();
break;
case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE:
if (kvm)
@@ -378,12 +457,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
- /*
- * Default value for the FP state, will be overloaded at load
- * time if we support FP (pretty likely)
- */
- vcpu->arch.fp_state = FP_STATE_FREE;
-
/* Set up the timer */
kvm_timer_vcpu_init(vcpu);
@@ -395,6 +468,13 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
+ /*
+ * This vCPU may have been created after mpidr_data was initialized.
+ * Throw out the pre-computed mappings if that is the case which forces
+ * KVM to fall back to iteratively searching the vCPUs.
+ */
+ kvm_destroy_mpidr_data(vcpu->kvm);
+
err = kvm_vgic_vcpu_init(vcpu);
if (err)
return err;
@@ -428,6 +508,44 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
}
+static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu)
+{
+ if (vcpu_has_ptrauth(vcpu)) {
+ /*
+ * Either we're running running an L2 guest, and the API/APK
+ * bits come from L1's HCR_EL2, or API/APK are both set.
+ */
+ if (unlikely(vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))) {
+ u64 val;
+
+ val = __vcpu_sys_reg(vcpu, HCR_EL2);
+ val &= (HCR_API | HCR_APK);
+ vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
+ vcpu->arch.hcr_el2 |= val;
+ } else {
+ vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
+ }
+
+ /*
+ * Save the host keys if there is any chance for the guest
+ * to use pauth, as the entry code will reload the guest
+ * keys in that case.
+ * Protected mode is the exception to that rule, as the
+ * entry into the EL2 code eagerly switch back and forth
+ * between host and hyp keys (and kvm_hyp_ctxt is out of
+ * reach anyway).
+ */
+ if (is_protected_kvm_enabled())
+ return;
+
+ if (vcpu->arch.hcr_el2 & (HCR_API | HCR_APK)) {
+ struct kvm_cpu_context *ctxt;
+ ctxt = this_cpu_ptr_hyp_sym(kvm_hyp_ctxt);
+ ptrauth_save_keys(ctxt);
+ }
+ }
+}
+
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvm_s2_mmu *mmu;
@@ -466,8 +584,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
else
vcpu_set_wfx_traps(vcpu);
- if (vcpu_has_ptrauth(vcpu))
- vcpu_ptrauth_disable(vcpu);
+ vcpu_set_pauth_traps(vcpu);
+
kvm_arch_vcpu_load_debug_state_flags(vcpu);
if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus))
@@ -580,11 +698,6 @@ unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
}
#endif
-static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
-{
- return vcpu_get_flag(vcpu, VCPU_INITIALIZED);
-}
-
static void kvm_init_mpidr_data(struct kvm *kvm)
{
struct kvm_mpidr_data *data = NULL;
@@ -594,7 +707,8 @@ static void kvm_init_mpidr_data(struct kvm *kvm)
mutex_lock(&kvm->arch.config_lock);
- if (kvm->arch.mpidr_data || atomic_read(&kvm->online_vcpus) == 1)
+ if (rcu_access_pointer(kvm->arch.mpidr_data) ||
+ atomic_read(&kvm->online_vcpus) == 1)
goto out;
kvm_for_each_vcpu(c, vcpu, kvm) {
@@ -631,7 +745,7 @@ static void kvm_init_mpidr_data(struct kvm *kvm)
data->cmpidr_to_idx[index] = c;
}
- kvm->arch.mpidr_data = data;
+ rcu_assign_pointer(kvm->arch.mpidr_data, data);
out:
mutex_unlock(&kvm->arch.config_lock);
}
@@ -790,9 +904,8 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
* doorbells to be signalled, should an interrupt become pending.
*/
preempt_disable();
- kvm_vgic_vmcr_sync(vcpu);
vcpu_set_flag(vcpu, IN_WFI);
- vgic_v4_put(vcpu);
+ kvm_vgic_put(vcpu);
preempt_enable();
kvm_vcpu_halt(vcpu);
@@ -800,7 +913,7 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
preempt_disable();
vcpu_clear_flag(vcpu, IN_WFI);
- vgic_v4_load(vcpu);
+ kvm_vgic_load(vcpu);
preempt_enable();
}
@@ -980,7 +1093,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
if (run->exit_reason == KVM_EXIT_MMIO) {
ret = kvm_handle_mmio_return(vcpu);
- if (ret)
+ if (ret <= 0)
return ret;
}
@@ -1270,7 +1383,7 @@ static unsigned long system_supported_vcpu_features(void)
if (!system_supports_sve())
clear_bit(KVM_ARM_VCPU_SVE, &features);
- if (!system_has_full_ptr_auth()) {
+ if (!kvm_has_full_ptr_auth()) {
clear_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features);
clear_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features);
}
@@ -1971,7 +2084,7 @@ static void cpu_set_hyp_vector(void)
static void cpu_hyp_init_context(void)
{
- kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt);
+ kvm_init_host_cpu_context(host_data_ptr(host_ctxt));
if (!is_kernel_in_hyp_mode())
cpu_init_hyp_mode();
@@ -2470,21 +2583,27 @@ out_err:
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
{
- struct kvm_vcpu *vcpu;
+ struct kvm_vcpu *vcpu = NULL;
+ struct kvm_mpidr_data *data;
unsigned long i;
mpidr &= MPIDR_HWID_BITMASK;
- if (kvm->arch.mpidr_data) {
- u16 idx = kvm_mpidr_index(kvm->arch.mpidr_data, mpidr);
+ rcu_read_lock();
+ data = rcu_dereference(kvm->arch.mpidr_data);
+
+ if (data) {
+ u16 idx = kvm_mpidr_index(data, mpidr);
- vcpu = kvm_get_vcpu(kvm,
- kvm->arch.mpidr_data->cmpidr_to_idx[idx]);
+ vcpu = kvm_get_vcpu(kvm, data->cmpidr_to_idx[idx]);
if (mpidr != kvm_vcpu_get_mpidr_aff(vcpu))
vcpu = NULL;
+ }
+
+ rcu_read_unlock();
+ if (vcpu)
return vcpu;
- }
kvm_for_each_vcpu(i, vcpu, kvm) {
if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
@@ -2597,14 +2716,11 @@ static __init int kvm_arm_init(void)
if (err)
goto out_hyp;
- if (is_protected_kvm_enabled()) {
- kvm_info("Protected nVHE mode initialized successfully\n");
- } else if (in_hyp_mode) {
- kvm_info("VHE mode initialized successfully\n");
- } else {
- char mode = cpus_have_final_cap(ARM64_KVM_HVHE) ? 'h' : 'n';
- kvm_info("Hyp mode (%cVHE) initialized successfully\n", mode);
- }
+ kvm_info("%s%sVHE mode initialized successfully\n",
+ in_hyp_mode ? "" : (is_protected_kvm_enabled() ?
+ "Protected " : "Hyp "),
+ in_hyp_mode ? "" : (cpus_have_final_cap(ARM64_KVM_HVHE) ?
+ "h" : "n"));
/*
* FIXME: Do something reasonable if kvm_init() fails after pKVM
diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
index 4697ba41b3a9..72d733c74a38 100644
--- a/arch/arm64/kvm/emulate-nested.c
+++ b/arch/arm64/kvm/emulate-nested.c
@@ -2117,6 +2117,26 @@ inject:
return true;
}
+static bool forward_traps(struct kvm_vcpu *vcpu, u64 control_bit)
+{
+ bool control_bit_set;
+
+ if (!vcpu_has_nv(vcpu))
+ return false;
+
+ control_bit_set = __vcpu_sys_reg(vcpu, HCR_EL2) & control_bit;
+ if (!is_hyp_ctxt(vcpu) && control_bit_set) {
+ kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
+ return true;
+ }
+ return false;
+}
+
+bool forward_smc_trap(struct kvm_vcpu *vcpu)
+{
+ return forward_traps(vcpu, HCR_TSC);
+}
+
static u64 kvm_check_illegal_exception_return(struct kvm_vcpu *vcpu, u64 spsr)
{
u64 mode = spsr & PSR_MODE_MASK;
@@ -2152,37 +2172,39 @@ static u64 kvm_check_illegal_exception_return(struct kvm_vcpu *vcpu, u64 spsr)
void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
{
- u64 spsr, elr, mode;
- bool direct_eret;
+ u64 spsr, elr, esr;
/*
- * Going through the whole put/load motions is a waste of time
- * if this is a VHE guest hypervisor returning to its own
- * userspace, or the hypervisor performing a local exception
- * return. No need to save/restore registers, no need to
- * switch S2 MMU. Just do the canonical ERET.
+ * Forward this trap to the virtual EL2 if the virtual
+ * HCR_EL2.NV bit is set and this is coming from !EL2.
*/
- spsr = vcpu_read_sys_reg(vcpu, SPSR_EL2);
- spsr = kvm_check_illegal_exception_return(vcpu, spsr);
-
- mode = spsr & (PSR_MODE_MASK | PSR_MODE32_BIT);
-
- direct_eret = (mode == PSR_MODE_EL0t &&
- vcpu_el2_e2h_is_set(vcpu) &&
- vcpu_el2_tge_is_set(vcpu));
- direct_eret |= (mode == PSR_MODE_EL2h || mode == PSR_MODE_EL2t);
-
- if (direct_eret) {
- *vcpu_pc(vcpu) = vcpu_read_sys_reg(vcpu, ELR_EL2);
- *vcpu_cpsr(vcpu) = spsr;
- trace_kvm_nested_eret(vcpu, *vcpu_pc(vcpu), spsr);
+ if (forward_traps(vcpu, HCR_NV))
return;
+
+ /* Check for an ERETAx */
+ esr = kvm_vcpu_get_esr(vcpu);
+ if (esr_iss_is_eretax(esr) && !kvm_auth_eretax(vcpu, &elr)) {
+ /*
+ * Oh no, ERETAx failed to authenticate. If we have
+ * FPACCOMBINE, deliver an exception right away. If we
+ * don't, then let the mangled ELR value trickle down the
+ * ERET handling, and the guest will have a little surprise.
+ */
+ if (kvm_has_pauth(vcpu->kvm, FPACCOMBINE)) {
+ esr &= ESR_ELx_ERET_ISS_ERETA;
+ esr |= FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_FPAC);
+ kvm_inject_nested_sync(vcpu, esr);
+ return;
+ }
}
preempt_disable();
kvm_arch_vcpu_put(vcpu);
- elr = __vcpu_sys_reg(vcpu, ELR_EL2);
+ spsr = __vcpu_sys_reg(vcpu, SPSR_EL2);
+ spsr = kvm_check_illegal_exception_return(vcpu, spsr);
+ if (!esr_iss_is_eretax(esr))
+ elr = __vcpu_sys_reg(vcpu, ELR_EL2);
trace_kvm_nested_eret(vcpu, elr, spsr);
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index 826307e19e3a..1807d3a79a8a 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -14,19 +14,6 @@
#include <asm/kvm_mmu.h>
#include <asm/sysreg.h>
-void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu)
-{
- struct task_struct *p = vcpu->arch.parent_task;
- struct user_fpsimd_state *fpsimd;
-
- if (!is_protected_kvm_enabled() || !p)
- return;
-
- fpsimd = &p->thread.uw.fpsimd_state;
- kvm_unshare_hyp(fpsimd, fpsimd + 1);
- put_task_struct(p);
-}
-
/*
* Called on entry to KVM_RUN unless this vcpu previously ran at least
* once and the most recent prior KVM_RUN for this vcpu was called from
@@ -38,30 +25,18 @@ void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu)
*/
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
{
- int ret;
-
struct user_fpsimd_state *fpsimd = &current->thread.uw.fpsimd_state;
+ int ret;
- kvm_vcpu_unshare_task_fp(vcpu);
+ /* pKVM has its own tracking of the host fpsimd state. */
+ if (is_protected_kvm_enabled())
+ return 0;
/* Make sure the host task fpsimd state is visible to hyp: */
ret = kvm_share_hyp(fpsimd, fpsimd + 1);
if (ret)
return ret;
- vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
-
- /*
- * We need to keep current's task_struct pinned until its data has been
- * unshared with the hypervisor to make sure it is not re-used by the
- * kernel and donated to someone else while already shared -- see
- * kvm_vcpu_unshare_task_fp() for the matching put_task_struct().
- */
- if (is_protected_kvm_enabled()) {
- get_task_struct(current);
- vcpu->arch.parent_task = current;
- }
-
return 0;
}
@@ -86,7 +61,8 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
* guest in kvm_arch_vcpu_ctxflush_fp() and override this to
* FP_STATE_FREE if the flag set.
*/
- vcpu->arch.fp_state = FP_STATE_HOST_OWNED;
+ *host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
+ *host_data_ptr(fpsimd_state) = kern_hyp_va(&current->thread.uw.fpsimd_state);
vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
@@ -110,7 +86,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
* been saved, this is very unlikely to happen.
*/
if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) {
- vcpu->arch.fp_state = FP_STATE_FREE;
+ *host_data_ptr(fp_owner) = FP_STATE_FREE;
fpsimd_save_and_flush_cpu_state();
}
}
@@ -126,7 +102,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu)
{
if (test_thread_flag(TIF_FOREIGN_FPSTATE))
- vcpu->arch.fp_state = FP_STATE_FREE;
+ *host_data_ptr(fp_owner) = FP_STATE_FREE;
}
/*
@@ -142,8 +118,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
WARN_ON_ONCE(!irqs_disabled());
- if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
-
+ if (guest_owns_fp_regs()) {
/*
* Currently we do not support SME guests so SVCR is
* always 0 and we just need a variable to point to.
@@ -196,16 +171,38 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
isb();
}
- if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
+ if (guest_owns_fp_regs()) {
if (vcpu_has_sve(vcpu)) {
__vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
- /* Restore the VL that was saved when bound to the CPU */
+ /*
+ * Restore the VL that was saved when bound to the CPU,
+ * which is the maximum VL for the guest. Because the
+ * layout of the data when saving the sve state depends
+ * on the VL, we need to use a consistent (i.e., the
+ * maximum) VL.
+ * Note that this means that at guest exit ZCR_EL1 is
+ * not necessarily the same as on guest entry.
+ *
+ * Restoring the VL isn't needed in VHE mode since
+ * ZCR_EL2 (accessed via ZCR_EL1) would fulfill the same
+ * role when doing the save from EL2.
+ */
if (!has_vhe())
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1,
SYS_ZCR_EL1);
}
+ /*
+ * Flush (save and invalidate) the fpsimd/sve state so that if
+ * the host tries to use fpsimd/sve, it's not using stale data
+ * from the guest.
+ *
+ * Flushing the state sets the TIF_FOREIGN_FPSTATE bit for the
+ * context unconditionally, in both nVHE and VHE. This allows
+ * the kernel to restore the fpsimd/sve state, including ZCR_EL1
+ * when needed.
+ */
fpsimd_save_and_flush_cpu_state();
} else if (has_vhe() && system_supports_sve()) {
/*
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 617ae6dea5d5..b037f0a0e27e 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -56,6 +56,13 @@ static int handle_hvc(struct kvm_vcpu *vcpu)
static int handle_smc(struct kvm_vcpu *vcpu)
{
/*
+ * Forward this trapped smc instruction to the virtual EL2 if
+ * the guest has asked for it.
+ */
+ if (forward_smc_trap(vcpu))
+ return 1;
+
+ /*
* "If an SMC instruction executed at Non-secure EL1 is
* trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
* Trap exception, not a Secure Monitor Call exception [...]"
@@ -207,19 +214,40 @@ static int handle_sve(struct kvm_vcpu *vcpu)
}
/*
- * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
- * a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
- * that we can do is give the guest an UNDEF.
+ * Two possibilities to handle a trapping ptrauth instruction:
+ *
+ * - Guest usage of a ptrauth instruction (which the guest EL1 did not
+ * turn into a NOP). If we get here, it is because we didn't enable
+ * ptrauth for the guest. This results in an UNDEF, as it isn't
+ * supposed to use ptrauth without being told it could.
+ *
+ * - Running an L2 NV guest while L1 has left HCR_EL2.API==0, and for
+ * which we reinject the exception into L1.
+ *
+ * Anything else is an emulation bug (hence the WARN_ON + UNDEF).
*/
static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
{
+ if (!vcpu_has_ptrauth(vcpu)) {
+ kvm_inject_undefined(vcpu);
+ return 1;
+ }
+
+ if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
+ kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
+ return 1;
+ }
+
+ /* Really shouldn't be here! */
+ WARN_ON_ONCE(1);
kvm_inject_undefined(vcpu);
return 1;
}
static int kvm_handle_eret(struct kvm_vcpu *vcpu)
{
- if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_ERET_ISS_ERET)
+ if (esr_iss_is_eretax(kvm_vcpu_get_esr(vcpu)) &&
+ !vcpu_has_ptrauth(vcpu))
return kvm_handle_ptrauth(vcpu);
/*
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index a38dea6186c9..d61e44642f98 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -3,7 +3,7 @@
# Makefile for Kernel-based Virtual Machine module, HYP part
#
-incdir := $(srctree)/$(src)/include
+incdir := $(src)/include
subdir-asflags-y := -I$(incdir)
subdir-ccflags-y := -I$(incdir)
diff --git a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h
index 961bbef104a6..d00093699aaf 100644
--- a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h
@@ -135,9 +135,9 @@ static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu)
if (!vcpu_get_flag(vcpu, DEBUG_DIRTY))
return;
- host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+ host_ctxt = host_data_ptr(host_ctxt);
guest_ctxt = &vcpu->arch.ctxt;
- host_dbg = &vcpu->arch.host_debug_state.regs;
+ host_dbg = host_data_ptr(host_debug_state.regs);
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
__debug_save_state(host_dbg, host_ctxt);
@@ -154,9 +154,9 @@ static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu)
if (!vcpu_get_flag(vcpu, DEBUG_DIRTY))
return;
- host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+ host_ctxt = host_data_ptr(host_ctxt);
guest_ctxt = &vcpu->arch.ctxt;
- host_dbg = &vcpu->arch.host_debug_state.regs;
+ host_dbg = host_data_ptr(host_debug_state.regs);
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
__debug_save_state(guest_dbg, guest_ctxt);
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index e3fcf8c4d5b4..a92566f36022 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -27,6 +27,7 @@
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_nested.h>
+#include <asm/kvm_ptrauth.h>
#include <asm/fpsimd.h>
#include <asm/debug-monitors.h>
#include <asm/processor.h>
@@ -39,12 +40,6 @@ struct kvm_exception_table_entry {
extern struct kvm_exception_table_entry __start___kvm_ex_table;
extern struct kvm_exception_table_entry __stop___kvm_ex_table;
-/* Check whether the FP regs are owned by the guest */
-static inline bool guest_owns_fp_regs(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.fp_state == FP_STATE_GUEST_OWNED;
-}
-
/* Save the 32-bit only FPSIMD system register state */
static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
{
@@ -155,7 +150,7 @@ static inline bool cpu_has_amu(void)
static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+ struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
CHECK_FGT_MASKS(HFGRTR_EL2);
@@ -191,7 +186,7 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+ struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
if (!cpus_have_final_cap(ARM64_HAS_FGT))
@@ -226,13 +221,13 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
write_sysreg(0, pmselr_el0);
- hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+ hctxt = host_data_ptr(host_ctxt);
ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0);
write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
vcpu_set_flag(vcpu, PMUSERENR_ON_CPU);
}
- vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2);
+ *host_data_ptr(host_debug_state.mdcr_el2) = read_sysreg(mdcr_el2);
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
if (cpus_have_final_cap(ARM64_HAS_HCX)) {
@@ -254,13 +249,13 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
{
- write_sysreg(vcpu->arch.mdcr_el2_host, mdcr_el2);
+ write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
write_sysreg(0, hstr_el2);
if (kvm_arm_support_pmu_v3()) {
struct kvm_cpu_context *hctxt;
- hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+ hctxt = host_data_ptr(host_ctxt);
write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0);
vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU);
}
@@ -271,10 +266,8 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
__deactivate_traps_hfgxtr(vcpu);
}
-static inline void ___activate_traps(struct kvm_vcpu *vcpu)
+static inline void ___activate_traps(struct kvm_vcpu *vcpu, u64 hcr)
{
- u64 hcr = vcpu->arch.hcr_el2;
-
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
hcr |= HCR_TVM;
@@ -376,8 +369,8 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
isb();
/* Write out the host state if it's in the registers */
- if (vcpu->arch.fp_state == FP_STATE_HOST_OWNED)
- __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
+ if (host_owns_fp_regs())
+ __fpsimd_save_state(*host_data_ptr(fpsimd_state));
/* Restore the guest state */
if (sve_guest)
@@ -389,7 +382,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
if (!(read_sysreg(hcr_el2) & HCR_RW))
write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
- vcpu->arch.fp_state = FP_STATE_GUEST_OWNED;
+ *host_data_ptr(fp_owner) = FP_STATE_GUEST_OWNED;
return true;
}
@@ -449,60 +442,6 @@ static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
return true;
}
-static inline bool esr_is_ptrauth_trap(u64 esr)
-{
- switch (esr_sys64_to_sysreg(esr)) {
- case SYS_APIAKEYLO_EL1:
- case SYS_APIAKEYHI_EL1:
- case SYS_APIBKEYLO_EL1:
- case SYS_APIBKEYHI_EL1:
- case SYS_APDAKEYLO_EL1:
- case SYS_APDAKEYHI_EL1:
- case SYS_APDBKEYLO_EL1:
- case SYS_APDBKEYHI_EL1:
- case SYS_APGAKEYLO_EL1:
- case SYS_APGAKEYHI_EL1:
- return true;
- }
-
- return false;
-}
-
-#define __ptrauth_save_key(ctxt, key) \
- do { \
- u64 __val; \
- __val = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
- ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \
- __val = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
- ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val; \
-} while(0)
-
-DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
-
-static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
-{
- struct kvm_cpu_context *ctxt;
- u64 val;
-
- if (!vcpu_has_ptrauth(vcpu))
- return false;
-
- ctxt = this_cpu_ptr(&kvm_hyp_ctxt);
- __ptrauth_save_key(ctxt, APIA);
- __ptrauth_save_key(ctxt, APIB);
- __ptrauth_save_key(ctxt, APDA);
- __ptrauth_save_key(ctxt, APDB);
- __ptrauth_save_key(ctxt, APGA);
-
- vcpu_ptrauth_enable(vcpu);
-
- val = read_sysreg(hcr_el2);
- val |= (HCR_API | HCR_APK);
- write_sysreg(val, hcr_el2);
-
- return true;
-}
-
static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu)
{
struct arch_timer_context *ctxt;
@@ -590,9 +529,6 @@ static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
__vgic_v3_perform_cpuif_access(vcpu) == 1)
return true;
- if (esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
- return kvm_hyp_handle_ptrauth(vcpu, exit_code);
-
if (kvm_hyp_handle_cntpct(vcpu))
return true;
diff --git a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
index 82b3d62538a6..22f374e9f532 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
@@ -53,7 +53,13 @@ pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu)
return container_of(hyp_vcpu->vcpu.kvm, struct pkvm_hyp_vm, kvm);
}
+static inline bool pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu *hyp_vcpu)
+{
+ return vcpu_is_protected(&hyp_vcpu->vcpu);
+}
+
void pkvm_hyp_vm_table_init(void *tbl);
+void pkvm_host_fpsimd_state_init(void);
int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
unsigned long pgd_hva);
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index 2250253a6429..50fa0ffb6b7e 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -97,16 +97,3 @@ KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS) $(CC_FLAGS_CFI)
# causes a build failure. Remove profile optimization flags.
KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%, $(KBUILD_CFLAGS))
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
-
-# KVM nVHE code is run at a different exception code with a different map, so
-# compiler instrumentation that inserts callbacks or checks into the code may
-# cause crashes. Just disable it.
-GCOV_PROFILE := n
-KASAN_SANITIZE := n
-KCSAN_SANITIZE := n
-UBSAN_SANITIZE := n
-KCOV_INSTRUMENT := n
-
-# Skip objtool checking for this directory because nVHE code is compiled with
-# non-standard build rules.
-OBJECT_FILES_NON_STANDARD := y
diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
index 7746ea507b6f..53efda0235cf 100644
--- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c
+++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
@@ -83,10 +83,10 @@ void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
{
/* Disable and flush SPE data generation */
if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_SPE))
- __debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1);
+ __debug_save_spe(host_data_ptr(host_debug_state.pmscr_el1));
/* Disable and flush Self-Hosted Trace generation */
if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_TRBE))
- __debug_save_trace(&vcpu->arch.host_debug_state.trfcr_el1);
+ __debug_save_trace(host_data_ptr(host_debug_state.trfcr_el1));
}
void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
@@ -97,9 +97,9 @@ void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
{
if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_SPE))
- __debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
+ __debug_restore_spe(*host_data_ptr(host_debug_state.pmscr_el1));
if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_TRBE))
- __debug_restore_trace(vcpu->arch.host_debug_state.trfcr_el1);
+ __debug_restore_trace(*host_data_ptr(host_debug_state.trfcr_el1));
}
void __debug_switch_to_host(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c
index 320f2eaa14a9..02746f9d0980 100644
--- a/arch/arm64/kvm/hyp/nvhe/ffa.c
+++ b/arch/arm64/kvm/hyp/nvhe/ffa.c
@@ -600,7 +600,6 @@ static bool ffa_call_supported(u64 func_id)
case FFA_MSG_POLL:
case FFA_MSG_WAIT:
/* 32-bit variants of 64-bit calls */
- case FFA_MSG_SEND_DIRECT_REQ:
case FFA_MSG_SEND_DIRECT_RESP:
case FFA_RXTX_MAP:
case FFA_MEM_DONATE:
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 2385fd03ed87..d5c48dc98f67 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -39,10 +39,8 @@ static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
hyp_vcpu->vcpu.arch.cptr_el2 = host_vcpu->arch.cptr_el2;
hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags;
- hyp_vcpu->vcpu.arch.fp_state = host_vcpu->arch.fp_state;
hyp_vcpu->vcpu.arch.debug_ptr = kern_hyp_va(host_vcpu->arch.debug_ptr);
- hyp_vcpu->vcpu.arch.host_fpsimd_state = host_vcpu->arch.host_fpsimd_state;
hyp_vcpu->vcpu.arch.vsesr_el2 = host_vcpu->arch.vsesr_el2;
@@ -64,7 +62,6 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
host_vcpu->arch.fault = hyp_vcpu->vcpu.arch.fault;
host_vcpu->arch.iflags = hyp_vcpu->vcpu.arch.iflags;
- host_vcpu->arch.fp_state = hyp_vcpu->vcpu.arch.fp_state;
host_cpu_if->vgic_hcr = hyp_cpu_if->vgic_hcr;
for (i = 0; i < hyp_cpu_if->used_lrs; ++i)
@@ -178,16 +175,6 @@ static void handle___vgic_v3_get_gic_config(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __vgic_v3_get_gic_config();
}
-static void handle___vgic_v3_read_vmcr(struct kvm_cpu_context *host_ctxt)
-{
- cpu_reg(host_ctxt, 1) = __vgic_v3_read_vmcr();
-}
-
-static void handle___vgic_v3_write_vmcr(struct kvm_cpu_context *host_ctxt)
-{
- __vgic_v3_write_vmcr(cpu_reg(host_ctxt, 1));
-}
-
static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt)
{
__vgic_v3_init_lrs();
@@ -198,18 +185,18 @@ static void handle___kvm_get_mdcr_el2(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __kvm_get_mdcr_el2();
}
-static void handle___vgic_v3_save_aprs(struct kvm_cpu_context *host_ctxt)
+static void handle___vgic_v3_save_vmcr_aprs(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
- __vgic_v3_save_aprs(kern_hyp_va(cpu_if));
+ __vgic_v3_save_vmcr_aprs(kern_hyp_va(cpu_if));
}
-static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt)
+static void handle___vgic_v3_restore_vmcr_aprs(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
- __vgic_v3_restore_aprs(kern_hyp_va(cpu_if));
+ __vgic_v3_restore_vmcr_aprs(kern_hyp_va(cpu_if));
}
static void handle___pkvm_init(struct kvm_cpu_context *host_ctxt)
@@ -340,10 +327,8 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__kvm_tlb_flush_vmid_range),
HANDLE_FUNC(__kvm_flush_cpu_context),
HANDLE_FUNC(__kvm_timer_set_cntvoff),
- HANDLE_FUNC(__vgic_v3_read_vmcr),
- HANDLE_FUNC(__vgic_v3_write_vmcr),
- HANDLE_FUNC(__vgic_v3_save_aprs),
- HANDLE_FUNC(__vgic_v3_restore_aprs),
+ HANDLE_FUNC(__vgic_v3_save_vmcr_aprs),
+ HANDLE_FUNC(__vgic_v3_restore_vmcr_aprs),
HANDLE_FUNC(__pkvm_vcpu_init_traps),
HANDLE_FUNC(__pkvm_init_vm),
HANDLE_FUNC(__pkvm_init_vcpu),
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 861c76021a25..caba3e4bd09e 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -533,7 +533,13 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
int ret = 0;
esr = read_sysreg_el2(SYS_ESR);
- BUG_ON(!__get_fault_info(esr, &fault));
+ if (!__get_fault_info(esr, &fault)) {
+ /*
+ * We've presumably raced with a page-table change which caused
+ * AT to fail, try again.
+ */
+ return;
+ }
addr = (fault.hpfar_el2 & HPFAR_MASK) << 8;
ret = host_stage2_idmap(addr);
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 26dd9a20ad6e..16aa4875ddb8 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -200,7 +200,7 @@ static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
}
/*
- * Initialize trap register values for protected VMs.
+ * Initialize trap register values in protected mode.
*/
void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
{
@@ -247,6 +247,17 @@ void pkvm_hyp_vm_table_init(void *tbl)
vm_table = tbl;
}
+void pkvm_host_fpsimd_state_init(void)
+{
+ unsigned long i;
+
+ for (i = 0; i < hyp_nr_cpus; i++) {
+ struct kvm_host_data *host_data = per_cpu_ptr(&kvm_host_data, i);
+
+ host_data->fpsimd_state = &host_data->host_ctxt.fp_regs;
+ }
+}
+
/*
* Return the hyp vm structure corresponding to the handle.
*/
@@ -430,6 +441,7 @@ static void *map_donated_memory(unsigned long host_va, size_t size)
static void __unmap_donated_memory(void *va, size_t size)
{
+ kvm_flush_dcache_to_poc(va, size);
WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(va),
PAGE_ALIGN(size) >> PAGE_SHIFT));
}
diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
index d57bcb6ab94d..dfe8fe0f7eaf 100644
--- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c
+++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
@@ -205,7 +205,7 @@ asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on)
struct psci_boot_args *boot_args;
struct kvm_cpu_context *host_ctxt;
- host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+ host_ctxt = host_data_ptr(host_ctxt);
if (is_cpu_on)
boot_args = this_cpu_ptr(&cpu_on_args);
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index bc58d1b515af..859f22f754d3 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -257,8 +257,7 @@ static int fix_hyp_pgtable_refcnt(void)
void __noreturn __pkvm_init_finalise(void)
{
- struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data);
- struct kvm_cpu_context *host_ctxt = &host_data->host_ctxt;
+ struct kvm_cpu_context *host_ctxt = host_data_ptr(host_ctxt);
unsigned long nr_pages, reserved_pages, pfn;
int ret;
@@ -301,6 +300,7 @@ void __noreturn __pkvm_init_finalise(void)
goto out;
pkvm_hyp_vm_table_init(vm_table_base);
+ pkvm_host_fpsimd_state_init();
out:
/*
* We tail-called to here from handle___pkvm_init() and will not return,
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index c50f8459e4fc..6758cd905570 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -40,7 +40,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
{
u64 val;
- ___activate_traps(vcpu);
+ ___activate_traps(vcpu, vcpu->arch.hcr_el2);
__activate_traps_common(vcpu);
val = vcpu->arch.cptr_el2;
@@ -53,7 +53,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
val |= CPTR_EL2_TSM;
}
- if (!guest_owns_fp_regs(vcpu)) {
+ if (!guest_owns_fp_regs()) {
if (has_hvhe())
val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN);
@@ -191,7 +191,6 @@ static const exit_handler_fn hyp_exit_handlers[] = {
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
- [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
[ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops,
};
@@ -203,13 +202,12 @@ static const exit_handler_fn pvm_exit_handlers[] = {
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
- [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
[ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops,
};
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
{
- if (unlikely(kvm_vm_is_protected(kern_hyp_va(vcpu->kvm))))
+ if (unlikely(vcpu_is_protected(vcpu)))
return pvm_exit_handlers;
return hyp_exit_handlers;
@@ -228,9 +226,7 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
*/
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
{
- struct kvm *kvm = kern_hyp_va(vcpu->kvm);
-
- if (kvm_vm_is_protected(kvm) && vcpu_mode_is_32bit(vcpu)) {
+ if (unlikely(vcpu_is_protected(vcpu) && vcpu_mode_is_32bit(vcpu))) {
/*
* As we have caught the guest red-handed, decide that it isn't
* fit for purpose anymore by making the vcpu invalid. The VMM
@@ -264,7 +260,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
pmr_sync();
}
- host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+ host_ctxt = host_data_ptr(host_ctxt);
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
@@ -337,7 +333,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__sysreg_restore_state_nvhe(host_ctxt);
- if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)
+ if (guest_owns_fp_regs())
__fpsimd_save_fpexc32(vcpu);
__debug_switch_to_host(vcpu);
@@ -367,7 +363,7 @@ asmlinkage void __noreturn hyp_panic(void)
struct kvm_cpu_context *host_ctxt;
struct kvm_vcpu *vcpu;
- host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+ host_ctxt = host_data_ptr(host_ctxt);
vcpu = host_ctxt->__hyp_running_vcpu;
if (vcpu) {
diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
index a60fb13e2192..ca3c09df8d7c 100644
--- a/arch/arm64/kvm/hyp/nvhe/tlb.c
+++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
@@ -11,13 +11,23 @@
#include <nvhe/mem_protect.h>
struct tlb_inv_context {
- u64 tcr;
+ struct kvm_s2_mmu *mmu;
+ u64 tcr;
+ u64 sctlr;
};
-static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
- struct tlb_inv_context *cxt,
- bool nsh)
+static void enter_vmid_context(struct kvm_s2_mmu *mmu,
+ struct tlb_inv_context *cxt,
+ bool nsh)
{
+ struct kvm_s2_mmu *host_s2_mmu = &host_mmu.arch.mmu;
+ struct kvm_cpu_context *host_ctxt;
+ struct kvm_vcpu *vcpu;
+
+ host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+ vcpu = host_ctxt->__hyp_running_vcpu;
+ cxt->mmu = NULL;
+
/*
* We have two requirements:
*
@@ -40,20 +50,55 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
else
dsb(ish);
+ /*
+ * If we're already in the desired context, then there's nothing to do.
+ */
+ if (vcpu) {
+ /*
+ * We're in guest context. However, for this to work, this needs
+ * to be called from within __kvm_vcpu_run(), which ensures that
+ * __hyp_running_vcpu is set to the current guest vcpu.
+ */
+ if (mmu == vcpu->arch.hw_mmu || WARN_ON(mmu != host_s2_mmu))
+ return;
+
+ cxt->mmu = vcpu->arch.hw_mmu;
+ } else {
+ /* We're in host context. */
+ if (mmu == host_s2_mmu)
+ return;
+
+ cxt->mmu = host_s2_mmu;
+ }
+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
u64 val;
/*
* For CPUs that are affected by ARM 1319367, we need to
- * avoid a host Stage-1 walk while we have the guest's
- * VMID set in the VTTBR in order to invalidate TLBs.
- * We're guaranteed that the S1 MMU is enabled, so we can
- * simply set the EPD bits to avoid any further TLB fill.
+ * avoid a Stage-1 walk with the old VMID while we have
+ * the new VMID set in the VTTBR in order to invalidate TLBs.
+ * We're guaranteed that the host S1 MMU is enabled, so
+ * we can simply set the EPD bits to avoid any further
+ * TLB fill. For guests, we ensure that the S1 MMU is
+ * temporarily enabled in the next context.
*/
val = cxt->tcr = read_sysreg_el1(SYS_TCR);
val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
write_sysreg_el1(val, SYS_TCR);
isb();
+
+ if (vcpu) {
+ val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
+ if (!(val & SCTLR_ELx_M)) {
+ val |= SCTLR_ELx_M;
+ write_sysreg_el1(val, SYS_SCTLR);
+ isb();
+ }
+ } else {
+ /* The host S1 MMU is always enabled. */
+ cxt->sctlr = SCTLR_ELx_M;
+ }
}
/*
@@ -62,18 +107,40 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
* ensuring that we always have an ISB, but not two ISBs back
* to back.
*/
- __load_stage2(mmu, kern_hyp_va(mmu->arch));
+ if (vcpu)
+ __load_host_stage2();
+ else
+ __load_stage2(mmu, kern_hyp_va(mmu->arch));
+
asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
}
-static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
+static void exit_vmid_context(struct tlb_inv_context *cxt)
{
- __load_host_stage2();
+ struct kvm_s2_mmu *mmu = cxt->mmu;
+ struct kvm_cpu_context *host_ctxt;
+ struct kvm_vcpu *vcpu;
+
+ host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+ vcpu = host_ctxt->__hyp_running_vcpu;
+
+ if (!mmu)
+ return;
+
+ if (vcpu)
+ __load_stage2(mmu, kern_hyp_va(mmu->arch));
+ else
+ __load_host_stage2();
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
- /* Ensure write of the host VMID */
+ /* Ensure write of the old VMID */
isb();
- /* Restore the host's TCR_EL1 */
+
+ if (!(cxt->sctlr & SCTLR_ELx_M)) {
+ write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
+ isb();
+ }
+
write_sysreg_el1(cxt->tcr, SYS_TCR);
}
}
@@ -84,7 +151,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
struct tlb_inv_context cxt;
/* Switch to requested VMID */
- __tlb_switch_to_guest(mmu, &cxt, false);
+ enter_vmid_context(mmu, &cxt, false);
/*
* We could do so much better if we had the VA as well.
@@ -105,7 +172,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
dsb(ish);
isb();
- __tlb_switch_to_host(&cxt);
+ exit_vmid_context(&cxt);
}
void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
@@ -114,7 +181,7 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
struct tlb_inv_context cxt;
/* Switch to requested VMID */
- __tlb_switch_to_guest(mmu, &cxt, true);
+ enter_vmid_context(mmu, &cxt, true);
/*
* We could do so much better if we had the VA as well.
@@ -135,7 +202,7 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
dsb(nsh);
isb();
- __tlb_switch_to_host(&cxt);
+ exit_vmid_context(&cxt);
}
void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
@@ -152,16 +219,17 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
start = round_down(start, stride);
/* Switch to requested VMID */
- __tlb_switch_to_guest(mmu, &cxt, false);
+ enter_vmid_context(mmu, &cxt, false);
- __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0);
+ __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride,
+ TLBI_TTL_UNKNOWN);
dsb(ish);
__tlbi(vmalle1is);
dsb(ish);
isb();
- __tlb_switch_to_host(&cxt);
+ exit_vmid_context(&cxt);
}
void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
@@ -169,13 +237,13 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
struct tlb_inv_context cxt;
/* Switch to requested VMID */
- __tlb_switch_to_guest(mmu, &cxt, false);
+ enter_vmid_context(mmu, &cxt, false);
__tlbi(vmalls12e1is);
dsb(ish);
isb();
- __tlb_switch_to_host(&cxt);
+ exit_vmid_context(&cxt);
}
void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
@@ -183,19 +251,19 @@ void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
struct tlb_inv_context cxt;
/* Switch to requested VMID */
- __tlb_switch_to_guest(mmu, &cxt, false);
+ enter_vmid_context(mmu, &cxt, false);
__tlbi(vmalle1);
asm volatile("ic iallu");
dsb(nsh);
isb();
- __tlb_switch_to_host(&cxt);
+ exit_vmid_context(&cxt);
}
void __kvm_flush_vm_context(void)
{
- /* Same remark as in __tlb_switch_to_guest() */
+ /* Same remark as in enter_vmid_context() */
dsb(ish);
__tlbi(alle1is);
dsb(ish);
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index 3fae5830f8d2..9e2bbee77491 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -528,7 +528,7 @@ static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
kvm_clear_pte(ctx->ptep);
dsb(ishst);
- __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
+ __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), TLBI_TTL_UNKNOWN);
} else {
if (ctx->end - ctx->addr < granule)
return -EINVAL;
@@ -843,12 +843,15 @@ static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
* Perform the appropriate TLB invalidation based on the
* evicted pte value (if any).
*/
- if (kvm_pte_table(ctx->old, ctx->level))
- kvm_tlb_flush_vmid_range(mmu, ctx->addr,
- kvm_granule_size(ctx->level));
- else if (kvm_pte_valid(ctx->old))
+ if (kvm_pte_table(ctx->old, ctx->level)) {
+ u64 size = kvm_granule_size(ctx->level);
+ u64 addr = ALIGN_DOWN(ctx->addr, size);
+
+ kvm_tlb_flush_vmid_range(mmu, addr, size);
+ } else if (kvm_pte_valid(ctx->old)) {
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
ctx->addr, ctx->level);
+ }
}
if (stage2_pte_is_counted(ctx->old))
@@ -896,9 +899,13 @@ static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx,
if (kvm_pte_valid(ctx->old)) {
kvm_clear_pte(ctx->ptep);
- if (!stage2_unmap_defer_tlb_flush(pgt))
- kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
- ctx->addr, ctx->level);
+ if (kvm_pte_table(ctx->old, ctx->level)) {
+ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr,
+ TLBI_TTL_UNKNOWN);
+ } else if (!stage2_unmap_defer_tlb_flush(pgt)) {
+ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr,
+ ctx->level);
+ }
}
mm_ops->put_page(ctx->ptep);
@@ -907,12 +914,12 @@ static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx,
static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
{
u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR;
- return memattr == KVM_S2_MEMATTR(pgt, NORMAL);
+ return kvm_pte_valid(pte) && memattr == KVM_S2_MEMATTR(pgt, NORMAL);
}
static bool stage2_pte_executable(kvm_pte_t pte)
{
- return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
+ return kvm_pte_valid(pte) && !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
}
static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx,
@@ -972,6 +979,21 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
if (!stage2_pte_needs_update(ctx->old, new))
return -EAGAIN;
+ /* If we're only changing software bits, then store them and go! */
+ if (!kvm_pgtable_walk_shared(ctx) &&
+ !((ctx->old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW)) {
+ bool old_is_counted = stage2_pte_is_counted(ctx->old);
+
+ if (old_is_counted != stage2_pte_is_counted(new)) {
+ if (old_is_counted)
+ mm_ops->put_page(ctx->ptep);
+ else
+ mm_ops->get_page(ctx->ptep);
+ }
+ WARN_ON_ONCE(!stage2_try_set_pte(ctx, new));
+ return 0;
+ }
+
if (!stage2_try_break_pte(ctx, data->mmu))
return -EAGAIN;
@@ -1363,7 +1385,7 @@ static int stage2_flush_walker(const struct kvm_pgtable_visit_ctx *ctx,
struct kvm_pgtable *pgt = ctx->arg;
struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
- if (!kvm_pte_valid(ctx->old) || !stage2_pte_cacheable(pgt, ctx->old))
+ if (!stage2_pte_cacheable(pgt, ctx->old))
return 0;
if (mm_ops->dcache_clean_inval_poc)
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index 6cb638b184b1..7b397fad26f2 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -330,7 +330,7 @@ void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
write_gicreg(0, ICH_HCR_EL2);
}
-void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
+static void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
{
u64 val;
u32 nr_pre_bits;
@@ -363,7 +363,7 @@ void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
}
}
-void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if)
+static void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if)
{
u64 val;
u32 nr_pre_bits;
@@ -455,16 +455,35 @@ u64 __vgic_v3_get_gic_config(void)
return val;
}
-u64 __vgic_v3_read_vmcr(void)
+static u64 __vgic_v3_read_vmcr(void)
{
return read_gicreg(ICH_VMCR_EL2);
}
-void __vgic_v3_write_vmcr(u32 vmcr)
+static void __vgic_v3_write_vmcr(u32 vmcr)
{
write_gicreg(vmcr, ICH_VMCR_EL2);
}
+void __vgic_v3_save_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if)
+{
+ __vgic_v3_save_aprs(cpu_if);
+ if (cpu_if->vgic_sre)
+ cpu_if->vgic_vmcr = __vgic_v3_read_vmcr();
+}
+
+void __vgic_v3_restore_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if)
+{
+ /*
+ * If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
+ * is dependent on ICC_SRE_EL1.SRE, and we have to perform the
+ * VMCR_EL2 save/restore in the world switch.
+ */
+ if (cpu_if->vgic_sre)
+ __vgic_v3_write_vmcr(cpu_if->vgic_vmcr);
+ __vgic_v3_restore_aprs(cpu_if);
+}
+
static int __vgic_v3_bpr_min(void)
{
/* See Pseudocode for VPriorityGroup */
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 1581df6aec87..d7af5f46f22a 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -33,11 +33,43 @@ DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
+/*
+ * HCR_EL2 bits that the NV guest can freely change (no RES0/RES1
+ * semantics, irrespective of the configuration), but that cannot be
+ * applied to the actual HW as things would otherwise break badly.
+ *
+ * - TGE: we want the guest to use EL1, which is incompatible with
+ * this bit being set
+ *
+ * - API/APK: they are already accounted for by vcpu_load(), and can
+ * only take effect across a load/put cycle (such as ERET)
+ */
+#define NV_HCR_GUEST_EXCLUDE (HCR_TGE | HCR_API | HCR_APK)
+
+static u64 __compute_hcr(struct kvm_vcpu *vcpu)
+{
+ u64 hcr = vcpu->arch.hcr_el2;
+
+ if (!vcpu_has_nv(vcpu))
+ return hcr;
+
+ if (is_hyp_ctxt(vcpu)) {
+ hcr |= HCR_NV | HCR_NV2 | HCR_AT | HCR_TTLB;
+
+ if (!vcpu_el2_e2h_is_set(vcpu))
+ hcr |= HCR_NV1;
+
+ write_sysreg_s(vcpu->arch.ctxt.vncr_array, SYS_VNCR_EL2);
+ }
+
+ return hcr | (__vcpu_sys_reg(vcpu, HCR_EL2) & ~NV_HCR_GUEST_EXCLUDE);
+}
+
static void __activate_traps(struct kvm_vcpu *vcpu)
{
u64 val;
- ___activate_traps(vcpu);
+ ___activate_traps(vcpu, __compute_hcr(vcpu));
if (has_cntpoff()) {
struct timer_map map;
@@ -75,7 +107,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
val |= CPTR_EL2_TAM;
- if (guest_owns_fp_regs(vcpu)) {
+ if (guest_owns_fp_regs()) {
if (vcpu_has_sve(vcpu))
val |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
} else {
@@ -162,6 +194,8 @@ static void __vcpu_put_deactivate_traps(struct kvm_vcpu *vcpu)
void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu)
{
+ host_data_ptr(host_ctxt)->__hyp_running_vcpu = vcpu;
+
__vcpu_load_switch_sysregs(vcpu);
__vcpu_load_activate_traps(vcpu);
__load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
@@ -171,6 +205,61 @@ void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu)
{
__vcpu_put_deactivate_traps(vcpu);
__vcpu_put_switch_sysregs(vcpu);
+
+ host_data_ptr(host_ctxt)->__hyp_running_vcpu = NULL;
+}
+
+static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+ u64 esr = kvm_vcpu_get_esr(vcpu);
+ u64 spsr, elr, mode;
+
+ /*
+ * Going through the whole put/load motions is a waste of time
+ * if this is a VHE guest hypervisor returning to its own
+ * userspace, or the hypervisor performing a local exception
+ * return. No need to save/restore registers, no need to
+ * switch S2 MMU. Just do the canonical ERET.
+ *
+ * Unless the trap has to be forwarded further down the line,
+ * of course...
+ */
+ if ((__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_NV) ||
+ (__vcpu_sys_reg(vcpu, HFGITR_EL2) & HFGITR_EL2_ERET))
+ return false;
+
+ spsr = read_sysreg_el1(SYS_SPSR);
+ mode = spsr & (PSR_MODE_MASK | PSR_MODE32_BIT);
+
+ switch (mode) {
+ case PSR_MODE_EL0t:
+ if (!(vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)))
+ return false;
+ break;
+ case PSR_MODE_EL2t:
+ mode = PSR_MODE_EL1t;
+ break;
+ case PSR_MODE_EL2h:
+ mode = PSR_MODE_EL1h;
+ break;
+ default:
+ return false;
+ }
+
+ /* If ERETAx fails, take the slow path */
+ if (esr_iss_is_eretax(esr)) {
+ if (!(vcpu_has_ptrauth(vcpu) && kvm_auth_eretax(vcpu, &elr)))
+ return false;
+ } else {
+ elr = read_sysreg_el1(SYS_ELR);
+ }
+
+ spsr = (spsr & ~(PSR_MODE_MASK | PSR_MODE32_BIT)) | mode;
+
+ write_sysreg_el2(spsr, SYS_SPSR);
+ write_sysreg_el2(elr, SYS_ELR);
+
+ return true;
}
static const exit_handler_fn hyp_exit_handlers[] = {
@@ -182,7 +271,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
- [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
+ [ESR_ELx_EC_ERET] = kvm_hyp_handle_eret,
[ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops,
};
@@ -197,7 +286,7 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
* If we were in HYP context on entry, adjust the PSTATE view
* so that the usual helpers work correctly.
*/
- if (unlikely(vcpu_get_flag(vcpu, VCPU_HYP_CONTEXT))) {
+ if (vcpu_has_nv(vcpu) && (read_sysreg(hcr_el2) & HCR_NV)) {
u64 mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT);
switch (mode) {
@@ -221,8 +310,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *guest_ctxt;
u64 exit_code;
- host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
- host_ctxt->__hyp_running_vcpu = vcpu;
+ host_ctxt = host_data_ptr(host_ctxt);
guest_ctxt = &vcpu->arch.ctxt;
sysreg_save_host_state_vhe(host_ctxt);
@@ -240,11 +328,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
sysreg_restore_guest_state_vhe(guest_ctxt);
__debug_switch_to_guest(vcpu);
- if (is_hyp_ctxt(vcpu))
- vcpu_set_flag(vcpu, VCPU_HYP_CONTEXT);
- else
- vcpu_clear_flag(vcpu, VCPU_HYP_CONTEXT);
-
do {
/* Jump in the fire! */
exit_code = __guest_enter(vcpu);
@@ -258,7 +341,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
sysreg_restore_host_state_vhe(host_ctxt);
- if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)
+ if (guest_owns_fp_regs())
__fpsimd_save_fpexc32(vcpu);
__debug_switch_to_host(vcpu);
@@ -306,7 +389,7 @@ static void __hyp_call_panic(u64 spsr, u64 elr, u64 par)
struct kvm_cpu_context *host_ctxt;
struct kvm_vcpu *vcpu;
- host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+ host_ctxt = host_data_ptr(host_ctxt);
vcpu = host_ctxt->__hyp_running_vcpu;
__deactivate_traps(vcpu);
diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
index a8b9ea496706..e12bd7d6d2dc 100644
--- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
@@ -67,7 +67,7 @@ void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
struct kvm_cpu_context *host_ctxt;
- host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+ host_ctxt = host_data_ptr(host_ctxt);
__sysreg_save_user_state(host_ctxt);
/*
@@ -110,7 +110,7 @@ void __vcpu_put_switch_sysregs(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
struct kvm_cpu_context *host_ctxt;
- host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+ host_ctxt = host_data_ptr(host_ctxt);
__sysreg_save_el1_state(guest_ctxt);
__sysreg_save_user_state(guest_ctxt);
diff --git a/arch/arm64/kvm/hyp/vhe/tlb.c b/arch/arm64/kvm/hyp/vhe/tlb.c
index b32e2940df7d..5fa0359f3a87 100644
--- a/arch/arm64/kvm/hyp/vhe/tlb.c
+++ b/arch/arm64/kvm/hyp/vhe/tlb.c
@@ -17,8 +17,8 @@ struct tlb_inv_context {
u64 sctlr;
};
-static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
- struct tlb_inv_context *cxt)
+static void enter_vmid_context(struct kvm_s2_mmu *mmu,
+ struct tlb_inv_context *cxt)
{
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
u64 val;
@@ -67,7 +67,7 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
isb();
}
-static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
+static void exit_vmid_context(struct tlb_inv_context *cxt)
{
/*
* We're done with the TLB operation, let's restore the host's
@@ -97,7 +97,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
dsb(ishst);
/* Switch to requested VMID */
- __tlb_switch_to_guest(mmu, &cxt);
+ enter_vmid_context(mmu, &cxt);
/*
* We could do so much better if we had the VA as well.
@@ -118,7 +118,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
dsb(ish);
isb();
- __tlb_switch_to_host(&cxt);
+ exit_vmid_context(&cxt);
}
void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
@@ -129,7 +129,7 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
dsb(nshst);
/* Switch to requested VMID */
- __tlb_switch_to_guest(mmu, &cxt);
+ enter_vmid_context(mmu, &cxt);
/*
* We could do so much better if we had the VA as well.
@@ -150,7 +150,7 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
dsb(nsh);
isb();
- __tlb_switch_to_host(&cxt);
+ exit_vmid_context(&cxt);
}
void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
@@ -169,16 +169,17 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
dsb(ishst);
/* Switch to requested VMID */
- __tlb_switch_to_guest(mmu, &cxt);
+ enter_vmid_context(mmu, &cxt);
- __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0);
+ __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride,
+ TLBI_TTL_UNKNOWN);
dsb(ish);
__tlbi(vmalle1is);
dsb(ish);
isb();
- __tlb_switch_to_host(&cxt);
+ exit_vmid_context(&cxt);
}
void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
@@ -188,13 +189,13 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
dsb(ishst);
/* Switch to requested VMID */
- __tlb_switch_to_guest(mmu, &cxt);
+ enter_vmid_context(mmu, &cxt);
__tlbi(vmalls12e1is);
dsb(ish);
isb();
- __tlb_switch_to_host(&cxt);
+ exit_vmid_context(&cxt);
}
void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
@@ -202,14 +203,14 @@ void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
struct tlb_inv_context cxt;
/* Switch to requested VMID */
- __tlb_switch_to_guest(mmu, &cxt);
+ enter_vmid_context(mmu, &cxt);
__tlbi(vmalle1);
asm volatile("ic iallu");
dsb(nsh);
isb();
- __tlb_switch_to_host(&cxt);
+ exit_vmid_context(&cxt);
}
void __kvm_flush_vm_context(void)
diff --git a/arch/arm64/kvm/mmio.c b/arch/arm64/kvm/mmio.c
index 200c8019a82a..cd6b7b83e2c3 100644
--- a/arch/arm64/kvm/mmio.c
+++ b/arch/arm64/kvm/mmio.c
@@ -86,7 +86,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu)
/* Detect an already handled MMIO return */
if (unlikely(!vcpu->mmio_needed))
- return 0;
+ return 1;
vcpu->mmio_needed = 0;
@@ -117,7 +117,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu)
*/
kvm_incr_pc(vcpu);
- return 0;
+ return 1;
}
int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
@@ -133,11 +133,19 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
/*
* No valid syndrome? Ask userspace for help if it has
* volunteered to do so, and bail out otherwise.
+ *
+ * In the protected VM case, there isn't much userspace can do
+ * though, so directly deliver an exception to the guest.
*/
if (!kvm_vcpu_dabt_isvalid(vcpu)) {
trace_kvm_mmio_nisv(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
kvm_vcpu_get_hfar(vcpu), fault_ipa);
+ if (vcpu_is_protected(vcpu)) {
+ kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
+ return 1;
+ }
+
if (test_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER,
&vcpu->kvm->arch.flags)) {
run->exit_reason = KVM_EXIT_ARM_NISV;
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 18680771cdb0..8bcab0cc3fe9 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1522,8 +1522,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
read_lock(&kvm->mmu_lock);
pgt = vcpu->arch.hw_mmu->pgt;
- if (mmu_invalidate_retry(kvm, mmu_seq))
+ if (mmu_invalidate_retry(kvm, mmu_seq)) {
+ ret = -EAGAIN;
goto out_unlock;
+ }
/*
* If we are not forced to use page mapping, check if we are
@@ -1581,6 +1583,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
memcache,
KVM_PGTABLE_WALK_HANDLE_FAULT |
KVM_PGTABLE_WALK_SHARED);
+out_unlock:
+ read_unlock(&kvm->mmu_lock);
/* Mark the page dirty only if the fault is handled successfully */
if (writable && !ret) {
@@ -1588,8 +1592,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
mark_page_dirty_in_slot(kvm, memslot, gfn);
}
-out_unlock:
- read_unlock(&kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
return ret != -EAGAIN ? ret : 0;
}
@@ -1637,7 +1639,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
- if (esr_fsc_is_permission_fault(esr)) {
+ if (esr_fsc_is_translation_fault(esr)) {
/* Beyond sanitised PARange (which is the IPA limit) */
if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) {
kvm_inject_size_fault(vcpu);
@@ -1768,40 +1770,6 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
return false;
}
-bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
-{
- kvm_pfn_t pfn = pte_pfn(range->arg.pte);
-
- if (!kvm->arch.mmu.pgt)
- return false;
-
- WARN_ON(range->end - range->start != 1);
-
- /*
- * If the page isn't tagged, defer to user_mem_abort() for sanitising
- * the MTE tags. The S2 pte should have been unmapped by
- * mmu_notifier_invalidate_range_end().
- */
- if (kvm_has_mte(kvm) && !page_mte_tagged(pfn_to_page(pfn)))
- return false;
-
- /*
- * We've moved a page around, probably through CoW, so let's treat
- * it just like a translation fault and the map handler will clean
- * the cache to the PoC.
- *
- * The MMU notifiers will have unmapped a huge PMD before calling
- * ->change_pte() (which in turn calls kvm_set_spte_gfn()) and
- * therefore we never need to clear out a huge PMD through this
- * calling path and a memcache is not required.
- */
- kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT,
- PAGE_SIZE, __pfn_to_phys(pfn),
- KVM_PGTABLE_PROT_R, NULL, 0);
-
- return false;
-}
-
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
u64 size = (range->end - range->start) << PAGE_SHIFT;
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index ced30c90521a..6813c7c7f00a 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -35,13 +35,9 @@ static u64 limit_nv_id_reg(u32 id, u64 val)
break;
case SYS_ID_AA64ISAR1_EL1:
- /* Support everything but PtrAuth and Spec Invalidation */
+ /* Support everything but Spec Invalidation */
val &= ~(GENMASK_ULL(63, 56) |
- NV_FTR(ISAR1, SPECRES) |
- NV_FTR(ISAR1, GPI) |
- NV_FTR(ISAR1, GPA) |
- NV_FTR(ISAR1, API) |
- NV_FTR(ISAR1, APA));
+ NV_FTR(ISAR1, SPECRES));
break;
case SYS_ID_AA64PFR0_EL1:
diff --git a/arch/arm64/kvm/pauth.c b/arch/arm64/kvm/pauth.c
new file mode 100644
index 000000000000..d5eb3ae876be
--- /dev/null
+++ b/arch/arm64/kvm/pauth.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 - Google LLC
+ * Author: Marc Zyngier <maz@kernel.org>
+ *
+ * Primitive PAuth emulation for ERETAA/ERETAB.
+ *
+ * This code assumes that is is run from EL2, and that it is part of
+ * the emulation of ERETAx for a guest hypervisor. That's a lot of
+ * baked-in assumptions and shortcuts.
+ *
+ * Do no reuse for anything else!
+ */
+
+#include <linux/kvm_host.h>
+
+#include <asm/gpr-num.h>
+#include <asm/kvm_emulate.h>
+#include <asm/pointer_auth.h>
+
+/* PACGA Xd, Xn, Xm */
+#define PACGA(d,n,m) \
+ asm volatile(__DEFINE_ASM_GPR_NUMS \
+ ".inst 0x9AC03000 |" \
+ "(.L__gpr_num_%[Rd] << 0) |" \
+ "(.L__gpr_num_%[Rn] << 5) |" \
+ "(.L__gpr_num_%[Rm] << 16)\n" \
+ : [Rd] "=r" ((d)) \
+ : [Rn] "r" ((n)), [Rm] "r" ((m)))
+
+static u64 compute_pac(struct kvm_vcpu *vcpu, u64 ptr,
+ struct ptrauth_key ikey)
+{
+ struct ptrauth_key gkey;
+ u64 mod, pac = 0;
+
+ preempt_disable();
+
+ if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
+ mod = __vcpu_sys_reg(vcpu, SP_EL2);
+ else
+ mod = read_sysreg(sp_el1);
+
+ gkey.lo = read_sysreg_s(SYS_APGAKEYLO_EL1);
+ gkey.hi = read_sysreg_s(SYS_APGAKEYHI_EL1);
+
+ __ptrauth_key_install_nosync(APGA, ikey);
+ isb();
+
+ PACGA(pac, ptr, mod);
+ isb();
+
+ __ptrauth_key_install_nosync(APGA, gkey);
+
+ preempt_enable();
+
+ /* PAC in the top 32bits */
+ return pac;
+}
+
+static bool effective_tbi(struct kvm_vcpu *vcpu, bool bit55)
+{
+ u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
+ bool tbi, tbid;
+
+ /*
+ * Since we are authenticating an instruction address, we have
+ * to take TBID into account. If E2H==0, ignore VA[55], as
+ * TCR_EL2 only has a single TBI/TBID. If VA[55] was set in
+ * this case, this is likely a guest bug...
+ */
+ if (!vcpu_el2_e2h_is_set(vcpu)) {
+ tbi = tcr & BIT(20);
+ tbid = tcr & BIT(29);
+ } else if (bit55) {
+ tbi = tcr & TCR_TBI1;
+ tbid = tcr & TCR_TBID1;
+ } else {
+ tbi = tcr & TCR_TBI0;
+ tbid = tcr & TCR_TBID0;
+ }
+
+ return tbi && !tbid;
+}
+
+static int compute_bottom_pac(struct kvm_vcpu *vcpu, bool bit55)
+{
+ static const int maxtxsz = 39; // Revisit these two values once
+ static const int mintxsz = 16; // (if) we support TTST/LVA/LVA2
+ u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
+ int txsz;
+
+ if (!vcpu_el2_e2h_is_set(vcpu) || !bit55)
+ txsz = FIELD_GET(TCR_T0SZ_MASK, tcr);
+ else
+ txsz = FIELD_GET(TCR_T1SZ_MASK, tcr);
+
+ return 64 - clamp(txsz, mintxsz, maxtxsz);
+}
+
+static u64 compute_pac_mask(struct kvm_vcpu *vcpu, bool bit55)
+{
+ int bottom_pac;
+ u64 mask;
+
+ bottom_pac = compute_bottom_pac(vcpu, bit55);
+
+ mask = GENMASK(54, bottom_pac);
+ if (!effective_tbi(vcpu, bit55))
+ mask |= GENMASK(63, 56);
+
+ return mask;
+}
+
+static u64 to_canonical_addr(struct kvm_vcpu *vcpu, u64 ptr, u64 mask)
+{
+ bool bit55 = !!(ptr & BIT(55));
+
+ if (bit55)
+ return ptr | mask;
+
+ return ptr & ~mask;
+}
+
+static u64 corrupt_addr(struct kvm_vcpu *vcpu, u64 ptr)
+{
+ bool bit55 = !!(ptr & BIT(55));
+ u64 mask, error_code;
+ int shift;
+
+ if (effective_tbi(vcpu, bit55)) {
+ mask = GENMASK(54, 53);
+ shift = 53;
+ } else {
+ mask = GENMASK(62, 61);
+ shift = 61;
+ }
+
+ if (esr_iss_is_eretab(kvm_vcpu_get_esr(vcpu)))
+ error_code = 2 << shift;
+ else
+ error_code = 1 << shift;
+
+ ptr &= ~mask;
+ ptr |= error_code;
+
+ return ptr;
+}
+
+/*
+ * Authenticate an ERETAA/ERETAB instruction, returning true if the
+ * authentication succeeded and false otherwise. In all cases, *elr
+ * contains the VA to ERET to. Potential exception injection is left
+ * to the caller.
+ */
+bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr)
+{
+ u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL2);
+ u64 esr = kvm_vcpu_get_esr(vcpu);
+ u64 ptr, cptr, pac, mask;
+ struct ptrauth_key ikey;
+
+ *elr = ptr = vcpu_read_sys_reg(vcpu, ELR_EL2);
+
+ /* We assume we're already in the context of an ERETAx */
+ if (esr_iss_is_eretab(esr)) {
+ if (!(sctlr & SCTLR_EL1_EnIB))
+ return true;
+
+ ikey.lo = __vcpu_sys_reg(vcpu, APIBKEYLO_EL1);
+ ikey.hi = __vcpu_sys_reg(vcpu, APIBKEYHI_EL1);
+ } else {
+ if (!(sctlr & SCTLR_EL1_EnIA))
+ return true;
+
+ ikey.lo = __vcpu_sys_reg(vcpu, APIAKEYLO_EL1);
+ ikey.hi = __vcpu_sys_reg(vcpu, APIAKEYHI_EL1);
+ }
+
+ mask = compute_pac_mask(vcpu, !!(ptr & BIT(55)));
+ cptr = to_canonical_addr(vcpu, ptr, mask);
+
+ pac = compute_pac(vcpu, cptr, ikey);
+
+ /*
+ * Slightly deviate from the pseudocode: if we have a PAC
+ * match with the signed pointer, then it must be good.
+ * Anything after this point is pure error handling.
+ */
+ if ((pac & mask) == (ptr & mask)) {
+ *elr = cptr;
+ return true;
+ }
+
+ /*
+ * Authentication failed, corrupt the canonical address if
+ * PAuth2 isn't implemented, or some XORing if it is.
+ */
+ if (!kvm_has_pauth(vcpu->kvm, PAuth2))
+ cptr = corrupt_addr(vcpu, cptr);
+ else
+ cptr = ptr ^ (pac & mask);
+
+ *elr = cptr;
+ return false;
+}
diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c
index b7be96a53597..85117ea8f351 100644
--- a/arch/arm64/kvm/pkvm.c
+++ b/arch/arm64/kvm/pkvm.c
@@ -222,7 +222,6 @@ void pkvm_destroy_hyp_vm(struct kvm *host_kvm)
int pkvm_init_host_vm(struct kvm *host_kvm)
{
- mutex_init(&host_kvm->lock);
return 0;
}
@@ -259,6 +258,7 @@ static int __init finalize_pkvm(void)
* at, which would end badly once inaccessible.
*/
kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
+ kmemleak_free_part(__hyp_rodata_start, __hyp_rodata_end - __hyp_rodata_start);
kmemleak_free_part_phys(hyp_mem_base, hyp_mem_size);
ret = pkvm_drop_host_privileges();
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index a243934c5568..329819806096 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -232,7 +232,7 @@ bool kvm_set_pmuserenr(u64 val)
if (!vcpu || !vcpu_get_flag(vcpu, PMUSERENR_ON_CPU))
return false;
- hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+ hctxt = host_data_ptr(host_ctxt);
ctxt_sys_reg(hctxt, PMUSERENR_EL0) = val;
return true;
}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 68d1d05672bd..1b7b58cb121f 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -151,7 +151,6 @@ void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
{
void *sve_state = vcpu->arch.sve_state;
- kvm_vcpu_unshare_task_fp(vcpu);
kvm_unshare_hyp(vcpu, vcpu + 1);
if (sve_state)
kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu));
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index c9f4f387155f..22b45a15d068 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1568,17 +1568,31 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r
return IDREG(vcpu->kvm, reg_to_encoding(r));
}
+static bool is_feature_id_reg(u32 encoding)
+{
+ return (sys_reg_Op0(encoding) == 3 &&
+ (sys_reg_Op1(encoding) < 2 || sys_reg_Op1(encoding) == 3) &&
+ sys_reg_CRn(encoding) == 0 &&
+ sys_reg_CRm(encoding) <= 7);
+}
+
/*
* Return true if the register's (Op0, Op1, CRn, CRm, Op2) is
- * (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8.
+ * (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8, which is the range of ID
+ * registers KVM maintains on a per-VM basis.
*/
-static inline bool is_id_reg(u32 id)
+static inline bool is_vm_ftr_id_reg(u32 id)
{
return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
sys_reg_CRm(id) < 8);
}
+static inline bool is_vcpu_ftr_id_reg(u32 id)
+{
+ return is_feature_id_reg(id) && !is_vm_ftr_id_reg(id);
+}
+
static inline bool is_aa32_id_reg(u32 id)
{
return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
@@ -2338,7 +2352,6 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_AA64MMFR0_EL1_TGRAN16_2)),
ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
ID_AA64MMFR1_EL1_HCX |
- ID_AA64MMFR1_EL1_XNX |
ID_AA64MMFR1_EL1_TWED |
ID_AA64MMFR1_EL1_XNX |
ID_AA64MMFR1_EL1_VH |
@@ -3069,12 +3082,14 @@ static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
for (i = 0; i < n; i++) {
if (!is_32 && table[i].reg && !table[i].reset) {
- kvm_err("sys_reg table %pS entry %d lacks reset\n", &table[i], i);
+ kvm_err("sys_reg table %pS entry %d (%s) lacks reset\n",
+ &table[i], i, table[i].name);
return false;
}
if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
- kvm_err("sys_reg table %pS entry %d out of order\n", &table[i - 1], i - 1);
+ kvm_err("sys_reg table %pS entry %d (%s -> %s) out of order\n",
+ &table[i], i, table[i - 1].name, table[i].name);
return false;
}
}
@@ -3509,26 +3524,25 @@ void kvm_sys_regs_create_debugfs(struct kvm *kvm)
&idregs_debug_fops);
}
-static void kvm_reset_id_regs(struct kvm_vcpu *vcpu)
+static void reset_vm_ftr_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *reg)
{
- const struct sys_reg_desc *idreg = first_idreg;
- u32 id = reg_to_encoding(idreg);
+ u32 id = reg_to_encoding(reg);
struct kvm *kvm = vcpu->kvm;
if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
return;
lockdep_assert_held(&kvm->arch.config_lock);
+ IDREG(kvm, id) = reg->reset(vcpu, reg);
+}
- /* Initialize all idregs */
- while (is_id_reg(id)) {
- IDREG(kvm, id) = idreg->reset(vcpu, idreg);
-
- idreg++;
- id = reg_to_encoding(idreg);
- }
+static void reset_vcpu_ftr_id_reg(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *reg)
+{
+ if (kvm_vcpu_initialized(vcpu))
+ return;
- set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
+ reg->reset(vcpu, reg);
}
/**
@@ -3540,19 +3554,24 @@ static void kvm_reset_id_regs(struct kvm_vcpu *vcpu)
*/
void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
{
+ struct kvm *kvm = vcpu->kvm;
unsigned long i;
- kvm_reset_id_regs(vcpu);
-
for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
const struct sys_reg_desc *r = &sys_reg_descs[i];
- if (is_id_reg(reg_to_encoding(r)))
+ if (!r->reset)
continue;
- if (r->reset)
+ if (is_vm_ftr_id_reg(reg_to_encoding(r)))
+ reset_vm_ftr_id_reg(vcpu, r);
+ else if (is_vcpu_ftr_id_reg(reg_to_encoding(r)))
+ reset_vcpu_ftr_id_reg(vcpu, r);
+ else
r->reset(vcpu, r);
}
+
+ set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
}
/**
@@ -3978,14 +3997,6 @@ int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
sys_reg_CRm(r), \
sys_reg_Op2(r))
-static bool is_feature_id_reg(u32 encoding)
-{
- return (sys_reg_Op0(encoding) == 3 &&
- (sys_reg_Op1(encoding) < 2 || sys_reg_Op1(encoding) == 3) &&
- sys_reg_CRn(encoding) == 0 &&
- sys_reg_CRm(encoding) <= 7);
-}
-
int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *range)
{
const void *zero_page = page_to_virt(ZERO_PAGE(0));
@@ -4014,7 +4025,7 @@ int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *
* compliant with a given revision of the architecture, but the
* RES0/RES1 definitions allow us to do that.
*/
- if (is_id_reg(encoding)) {
+ if (is_vm_ftr_id_reg(encoding)) {
if (!reg->val ||
(is_aa32_id_reg(encoding) && !kvm_supports_32bit_el0()))
continue;
diff --git a/arch/arm64/kvm/vgic/vgic-debug.c b/arch/arm64/kvm/vgic/vgic-debug.c
index 389025ce7749..bcbc8c986b1d 100644
--- a/arch/arm64/kvm/vgic/vgic-debug.c
+++ b/arch/arm64/kvm/vgic/vgic-debug.c
@@ -28,27 +28,65 @@ struct vgic_state_iter {
int nr_lpis;
int dist_id;
int vcpu_id;
- int intid;
+ unsigned long intid;
int lpi_idx;
- u32 *lpi_array;
};
-static void iter_next(struct vgic_state_iter *iter)
+static void iter_next(struct kvm *kvm, struct vgic_state_iter *iter)
{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+
if (iter->dist_id == 0) {
iter->dist_id++;
return;
}
+ /*
+ * Let the xarray drive the iterator after the last SPI, as the iterator
+ * has exhausted the sequentially-allocated INTID space.
+ */
+ if (iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS - 1)) {
+ if (iter->lpi_idx < iter->nr_lpis)
+ xa_find_after(&dist->lpi_xa, &iter->intid,
+ VGIC_LPI_MAX_INTID,
+ LPI_XA_MARK_DEBUG_ITER);
+ iter->lpi_idx++;
+ return;
+ }
+
iter->intid++;
if (iter->intid == VGIC_NR_PRIVATE_IRQS &&
++iter->vcpu_id < iter->nr_cpus)
iter->intid = 0;
+}
- if (iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS)) {
- if (iter->lpi_idx < iter->nr_lpis)
- iter->intid = iter->lpi_array[iter->lpi_idx];
- iter->lpi_idx++;
+static int iter_mark_lpis(struct kvm *kvm)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ struct vgic_irq *irq;
+ unsigned long intid;
+ int nr_lpis = 0;
+
+ xa_for_each(&dist->lpi_xa, intid, irq) {
+ if (!vgic_try_get_irq_kref(irq))
+ continue;
+
+ xa_set_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER);
+ nr_lpis++;
+ }
+
+ return nr_lpis;
+}
+
+static void iter_unmark_lpis(struct kvm *kvm)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ struct vgic_irq *irq;
+ unsigned long intid;
+
+ xa_for_each(&dist->lpi_xa, intid, irq) {
+ xa_clear_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER);
+ vgic_put_irq(kvm, irq);
}
}
@@ -61,15 +99,12 @@ static void iter_init(struct kvm *kvm, struct vgic_state_iter *iter,
iter->nr_cpus = nr_cpus;
iter->nr_spis = kvm->arch.vgic.nr_spis;
- if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
- iter->nr_lpis = vgic_copy_lpi_list(kvm, NULL, &iter->lpi_array);
- if (iter->nr_lpis < 0)
- iter->nr_lpis = 0;
- }
+ if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
+ iter->nr_lpis = iter_mark_lpis(kvm);
/* Fast forward to the right position if needed */
while (pos--)
- iter_next(iter);
+ iter_next(kvm, iter);
}
static bool end_of_vgic(struct vgic_state_iter *iter)
@@ -114,7 +149,7 @@ static void *vgic_debug_next(struct seq_file *s, void *v, loff_t *pos)
struct vgic_state_iter *iter = kvm->arch.vgic.iter;
++*pos;
- iter_next(iter);
+ iter_next(kvm, iter);
if (end_of_vgic(iter))
iter = NULL;
return iter;
@@ -134,13 +169,14 @@ static void vgic_debug_stop(struct seq_file *s, void *v)
mutex_lock(&kvm->arch.config_lock);
iter = kvm->arch.vgic.iter;
- kfree(iter->lpi_array);
+ iter_unmark_lpis(kvm);
kfree(iter);
kvm->arch.vgic.iter = NULL;
mutex_unlock(&kvm->arch.config_lock);
}
-static void print_dist_state(struct seq_file *s, struct vgic_dist *dist)
+static void print_dist_state(struct seq_file *s, struct vgic_dist *dist,
+ struct vgic_state_iter *iter)
{
bool v3 = dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3;
@@ -149,7 +185,7 @@ static void print_dist_state(struct seq_file *s, struct vgic_dist *dist)
seq_printf(s, "vgic_model:\t%s\n", v3 ? "GICv3" : "GICv2");
seq_printf(s, "nr_spis:\t%d\n", dist->nr_spis);
if (v3)
- seq_printf(s, "nr_lpis:\t%d\n", atomic_read(&dist->lpi_count));
+ seq_printf(s, "nr_lpis:\t%d\n", iter->nr_lpis);
seq_printf(s, "enabled:\t%d\n", dist->enabled);
seq_printf(s, "\n");
@@ -236,7 +272,7 @@ static int vgic_debug_show(struct seq_file *s, void *v)
unsigned long flags;
if (iter->dist_id == 0) {
- print_dist_state(s, &kvm->arch.vgic);
+ print_dist_state(s, &kvm->arch.vgic, iter);
return 0;
}
@@ -246,11 +282,13 @@ static int vgic_debug_show(struct seq_file *s, void *v)
if (iter->vcpu_id < iter->nr_cpus)
vcpu = kvm_get_vcpu(kvm, iter->vcpu_id);
+ /*
+ * Expect this to succeed, as iter_mark_lpis() takes a reference on
+ * every LPI to be visited.
+ */
irq = vgic_get_irq(kvm, vcpu, iter->intid);
- if (!irq) {
- seq_printf(s, " LPI %4d freed\n", iter->intid);
- return 0;
- }
+ if (WARN_ON_ONCE(!irq))
+ return -EINVAL;
raw_spin_lock_irqsave(&irq->irq_lock, flags);
print_irq_state(s, irq, vcpu);
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index f20941f83a07..8f5b7a3e7009 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -53,8 +53,6 @@ void kvm_vgic_early_init(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
- INIT_LIST_HEAD(&dist->lpi_translation_cache);
- raw_spin_lock_init(&dist->lpi_list_lock);
xa_init_flags(&dist->lpi_xa, XA_FLAGS_LOCK_IRQ);
}
@@ -182,27 +180,22 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
return 0;
}
-/**
- * kvm_vgic_vcpu_init() - Initialize static VGIC VCPU data
- * structures and register VCPU-specific KVM iodevs
- *
- * @vcpu: pointer to the VCPU being created and initialized
- *
- * Only do initialization, but do not actually enable the
- * VGIC CPU interface
- */
-int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
+static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- int ret = 0;
int i;
- vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
+ lockdep_assert_held(&vcpu->kvm->arch.config_lock);
- INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
- raw_spin_lock_init(&vgic_cpu->ap_list_lock);
- atomic_set(&vgic_cpu->vgic_v3.its_vpe.vlpi_count, 0);
+ if (vgic_cpu->private_irqs)
+ return 0;
+
+ vgic_cpu->private_irqs = kcalloc(VGIC_NR_PRIVATE_IRQS,
+ sizeof(struct vgic_irq),
+ GFP_KERNEL_ACCOUNT);
+
+ if (!vgic_cpu->private_irqs)
+ return -ENOMEM;
/*
* Enable and configure all SGIs to be edge-triggered and
@@ -227,9 +220,48 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
}
}
+ return 0;
+}
+
+static int vgic_allocate_private_irqs(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ mutex_lock(&vcpu->kvm->arch.config_lock);
+ ret = vgic_allocate_private_irqs_locked(vcpu);
+ mutex_unlock(&vcpu->kvm->arch.config_lock);
+
+ return ret;
+}
+
+/**
+ * kvm_vgic_vcpu_init() - Initialize static VGIC VCPU data
+ * structures and register VCPU-specific KVM iodevs
+ *
+ * @vcpu: pointer to the VCPU being created and initialized
+ *
+ * Only do initialization, but do not actually enable the
+ * VGIC CPU interface
+ */
+int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ int ret = 0;
+
+ vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
+
+ INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
+ raw_spin_lock_init(&vgic_cpu->ap_list_lock);
+ atomic_set(&vgic_cpu->vgic_v3.its_vpe.vlpi_count, 0);
+
if (!irqchip_in_kernel(vcpu->kvm))
return 0;
+ ret = vgic_allocate_private_irqs(vcpu);
+ if (ret)
+ return ret;
+
/*
* If we are creating a VCPU with a GICv3 we must also register the
* KVM io device for the redistributor that belongs to this VCPU.
@@ -285,10 +317,13 @@ int vgic_init(struct kvm *kvm)
/* Initialize groups on CPUs created before the VGIC type was known */
kvm_for_each_vcpu(idx, vcpu, kvm) {
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ ret = vgic_allocate_private_irqs_locked(vcpu);
+ if (ret)
+ goto out;
for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
- struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
+ struct vgic_irq *irq = vgic_get_irq(kvm, vcpu, i);
+
switch (dist->vgic_model) {
case KVM_DEV_TYPE_ARM_VGIC_V3:
irq->group = 1;
@@ -300,14 +335,15 @@ int vgic_init(struct kvm *kvm)
break;
default:
ret = -EINVAL;
- goto out;
}
+
+ vgic_put_irq(kvm, irq);
+
+ if (ret)
+ goto out;
}
}
- if (vgic_has_its(kvm))
- vgic_lpi_translation_cache_init(kvm);
-
/*
* If we have GICv4.1 enabled, unconditionally request enable the
* v4 support so that we get HW-accelerated vSGIs. Otherwise, only
@@ -361,9 +397,6 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
dist->vgic_cpu_base = VGIC_ADDR_UNDEF;
}
- if (vgic_has_its(kvm))
- vgic_lpi_translation_cache_destroy(kvm);
-
if (vgic_supports_direct_msis(kvm))
vgic_v4_teardown(kvm);
@@ -381,6 +414,9 @@ static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
vgic_flush_pending_lpis(vcpu);
INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
+ kfree(vgic_cpu->private_irqs);
+ vgic_cpu->private_irqs = NULL;
+
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
vgic_unregister_redist_iodev(vcpu);
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
index e85a495ada9c..40bb43f20bf3 100644
--- a/arch/arm64/kvm/vgic/vgic-its.c
+++ b/arch/arm64/kvm/vgic/vgic-its.c
@@ -23,6 +23,8 @@
#include "vgic.h"
#include "vgic-mmio.h"
+static struct kvm_device_ops kvm_arm_vgic_its_ops;
+
static int vgic_its_save_tables_v0(struct vgic_its *its);
static int vgic_its_restore_tables_v0(struct vgic_its *its);
static int vgic_its_commit_v0(struct vgic_its *its);
@@ -67,7 +69,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
irq->target_vcpu = vcpu;
irq->group = 1;
- raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
+ xa_lock_irqsave(&dist->lpi_xa, flags);
/*
* There could be a race with another vgic_add_lpi(), so we need to
@@ -82,17 +84,14 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
goto out_unlock;
}
- ret = xa_err(xa_store(&dist->lpi_xa, intid, irq, 0));
+ ret = xa_err(__xa_store(&dist->lpi_xa, intid, irq, 0));
if (ret) {
xa_release(&dist->lpi_xa, intid);
kfree(irq);
- goto out_unlock;
}
- atomic_inc(&dist->lpi_count);
-
out_unlock:
- raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+ xa_unlock_irqrestore(&dist->lpi_xa, flags);
if (ret)
return ERR_PTR(ret);
@@ -150,14 +149,6 @@ struct its_ite {
u32 event_id;
};
-struct vgic_translation_cache_entry {
- struct list_head entry;
- phys_addr_t db;
- u32 devid;
- u32 eventid;
- struct vgic_irq *irq;
-};
-
/**
* struct vgic_its_abi - ITS abi ops and settings
* @cte_esz: collection table entry size
@@ -252,8 +243,10 @@ static struct its_ite *find_ite(struct vgic_its *its, u32 device_id,
#define GIC_LPI_OFFSET 8192
-#define VITS_TYPER_IDBITS 16
-#define VITS_TYPER_DEVBITS 16
+#define VITS_TYPER_IDBITS 16
+#define VITS_MAX_EVENTID (BIT(VITS_TYPER_IDBITS) - 1)
+#define VITS_TYPER_DEVBITS 16
+#define VITS_MAX_DEVID (BIT(VITS_TYPER_DEVBITS) - 1)
#define VITS_DTE_MAX_DEVID_OFFSET (BIT(14) - 1)
#define VITS_ITE_MAX_EVENTID_OFFSET (BIT(16) - 1)
@@ -316,53 +309,6 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
return 0;
}
-#define GIC_LPI_MAX_INTID ((1 << INTERRUPT_ID_BITS_ITS) - 1)
-
-/*
- * Create a snapshot of the current LPIs targeting @vcpu, so that we can
- * enumerate those LPIs without holding any lock.
- * Returns their number and puts the kmalloc'ed array into intid_ptr.
- */
-int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
- XA_STATE(xas, &dist->lpi_xa, GIC_LPI_OFFSET);
- struct vgic_irq *irq;
- unsigned long flags;
- u32 *intids;
- int irq_count, i = 0;
-
- /*
- * There is an obvious race between allocating the array and LPIs
- * being mapped/unmapped. If we ended up here as a result of a
- * command, we're safe (locks are held, preventing another
- * command). If coming from another path (such as enabling LPIs),
- * we must be careful not to overrun the array.
- */
- irq_count = atomic_read(&dist->lpi_count);
- intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL_ACCOUNT);
- if (!intids)
- return -ENOMEM;
-
- raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
- rcu_read_lock();
-
- xas_for_each(&xas, irq, GIC_LPI_MAX_INTID) {
- if (i == irq_count)
- break;
- /* We don't need to "get" the IRQ, as we hold the list lock. */
- if (vcpu && irq->target_vcpu != vcpu)
- continue;
- intids[i++] = irq->intid;
- }
-
- rcu_read_unlock();
- raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
-
- *intid_ptr = intids;
- return i;
-}
-
static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
{
int ret = 0;
@@ -446,23 +392,18 @@ static u32 max_lpis_propbaser(u64 propbaser)
static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
{
gpa_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ unsigned long intid, flags;
struct vgic_irq *irq;
int last_byte_offset = -1;
int ret = 0;
- u32 *intids;
- int nr_irqs, i;
- unsigned long flags;
u8 pendmask;
- nr_irqs = vgic_copy_lpi_list(vcpu->kvm, vcpu, &intids);
- if (nr_irqs < 0)
- return nr_irqs;
-
- for (i = 0; i < nr_irqs; i++) {
+ xa_for_each(&dist->lpi_xa, intid, irq) {
int byte_offset, bit_nr;
- byte_offset = intids[i] / BITS_PER_BYTE;
- bit_nr = intids[i] % BITS_PER_BYTE;
+ byte_offset = intid / BITS_PER_BYTE;
+ bit_nr = intid % BITS_PER_BYTE;
/*
* For contiguously allocated LPIs chances are we just read
@@ -472,25 +413,23 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
ret = kvm_read_guest_lock(vcpu->kvm,
pendbase + byte_offset,
&pendmask, 1);
- if (ret) {
- kfree(intids);
+ if (ret)
return ret;
- }
+
last_byte_offset = byte_offset;
}
- irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
+ irq = vgic_get_irq(vcpu->kvm, NULL, intid);
if (!irq)
continue;
raw_spin_lock_irqsave(&irq->irq_lock, flags);
- irq->pending_latch = pendmask & (1U << bit_nr);
+ if (irq->target_vcpu == vcpu)
+ irq->pending_latch = pendmask & (1U << bit_nr);
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
vgic_put_irq(vcpu->kvm, irq);
}
- kfree(intids);
-
return ret;
}
@@ -566,51 +505,52 @@ static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
return 0;
}
-static struct vgic_irq *__vgic_its_check_cache(struct vgic_dist *dist,
- phys_addr_t db,
- u32 devid, u32 eventid)
+static struct vgic_its *__vgic_doorbell_to_its(struct kvm *kvm, gpa_t db)
{
- struct vgic_translation_cache_entry *cte;
+ struct kvm_io_device *kvm_io_dev;
+ struct vgic_io_device *iodev;
- list_for_each_entry(cte, &dist->lpi_translation_cache, entry) {
- /*
- * If we hit a NULL entry, there is nothing after this
- * point.
- */
- if (!cte->irq)
- break;
+ kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, db);
+ if (!kvm_io_dev)
+ return ERR_PTR(-EINVAL);
- if (cte->db != db || cte->devid != devid ||
- cte->eventid != eventid)
- continue;
+ if (kvm_io_dev->ops != &kvm_io_gic_ops)
+ return ERR_PTR(-EINVAL);
- /*
- * Move this entry to the head, as it is the most
- * recently used.
- */
- if (!list_is_first(&cte->entry, &dist->lpi_translation_cache))
- list_move(&cte->entry, &dist->lpi_translation_cache);
+ iodev = container_of(kvm_io_dev, struct vgic_io_device, dev);
+ if (iodev->iodev_type != IODEV_ITS)
+ return ERR_PTR(-EINVAL);
- return cte->irq;
- }
+ return iodev->its;
+}
+
+static unsigned long vgic_its_cache_key(u32 devid, u32 eventid)
+{
+ return (((unsigned long)devid) << VITS_TYPER_IDBITS) | eventid;
- return NULL;
}
static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
u32 devid, u32 eventid)
{
- struct vgic_dist *dist = &kvm->arch.vgic;
+ unsigned long cache_key = vgic_its_cache_key(devid, eventid);
+ struct vgic_its *its;
struct vgic_irq *irq;
- unsigned long flags;
- raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
+ if (devid > VITS_MAX_DEVID || eventid > VITS_MAX_EVENTID)
+ return NULL;
- irq = __vgic_its_check_cache(dist, db, devid, eventid);
+ its = __vgic_doorbell_to_its(kvm, db);
+ if (IS_ERR(its))
+ return NULL;
+
+ rcu_read_lock();
+
+ irq = xa_load(&its->translation_cache, cache_key);
if (!vgic_try_get_irq_kref(irq))
irq = NULL;
- raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+ rcu_read_unlock();
return irq;
}
@@ -619,41 +559,13 @@ static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
u32 devid, u32 eventid,
struct vgic_irq *irq)
{
- struct vgic_dist *dist = &kvm->arch.vgic;
- struct vgic_translation_cache_entry *cte;
- unsigned long flags;
- phys_addr_t db;
+ unsigned long cache_key = vgic_its_cache_key(devid, eventid);
+ struct vgic_irq *old;
/* Do not cache a directly injected interrupt */
if (irq->hw)
return;
- raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
-
- if (unlikely(list_empty(&dist->lpi_translation_cache)))
- goto out;
-
- /*
- * We could have raced with another CPU caching the same
- * translation behind our back, so let's check it is not in
- * already
- */
- db = its->vgic_its_base + GITS_TRANSLATER;
- if (__vgic_its_check_cache(dist, db, devid, eventid))
- goto out;
-
- /* Always reuse the last entry (LRU policy) */
- cte = list_last_entry(&dist->lpi_translation_cache,
- typeof(*cte), entry);
-
- /*
- * Caching the translation implies having an extra reference
- * to the interrupt, so drop the potential reference on what
- * was in the cache, and increment it on the new interrupt.
- */
- if (cte->irq)
- vgic_put_irq(kvm, cte->irq);
-
/*
* The irq refcount is guaranteed to be nonzero while holding the
* its_lock, as the ITE (and the reference it holds) cannot be freed.
@@ -661,39 +573,44 @@ static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
lockdep_assert_held(&its->its_lock);
vgic_get_irq_kref(irq);
- cte->db = db;
- cte->devid = devid;
- cte->eventid = eventid;
- cte->irq = irq;
+ /*
+ * We could have raced with another CPU caching the same
+ * translation behind our back, ensure we don't leak a
+ * reference if that is the case.
+ */
+ old = xa_store(&its->translation_cache, cache_key, irq, GFP_KERNEL_ACCOUNT);
+ if (old)
+ vgic_put_irq(kvm, old);
+}
- /* Move the new translation to the head of the list */
- list_move(&cte->entry, &dist->lpi_translation_cache);
+static void vgic_its_invalidate_cache(struct vgic_its *its)
+{
+ struct kvm *kvm = its->dev->kvm;
+ struct vgic_irq *irq;
+ unsigned long idx;
-out:
- raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+ xa_for_each(&its->translation_cache, idx, irq) {
+ xa_erase(&its->translation_cache, idx);
+ vgic_put_irq(kvm, irq);
+ }
}
-void vgic_its_invalidate_cache(struct kvm *kvm)
+void vgic_its_invalidate_all_caches(struct kvm *kvm)
{
- struct vgic_dist *dist = &kvm->arch.vgic;
- struct vgic_translation_cache_entry *cte;
- unsigned long flags;
+ struct kvm_device *dev;
+ struct vgic_its *its;
- raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
+ rcu_read_lock();
- list_for_each_entry(cte, &dist->lpi_translation_cache, entry) {
- /*
- * If we hit a NULL entry, there is nothing after this
- * point.
- */
- if (!cte->irq)
- break;
+ list_for_each_entry_rcu(dev, &kvm->devices, vm_node) {
+ if (dev->ops != &kvm_arm_vgic_its_ops)
+ continue;
- vgic_put_irq(kvm, cte->irq);
- cte->irq = NULL;
+ its = dev->private;
+ vgic_its_invalidate_cache(its);
}
- raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+ rcu_read_unlock();
}
int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
@@ -725,8 +642,6 @@ int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi)
{
u64 address;
- struct kvm_io_device *kvm_io_dev;
- struct vgic_io_device *iodev;
if (!vgic_has_its(kvm))
return ERR_PTR(-ENODEV);
@@ -736,18 +651,7 @@ struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi)
address = (u64)msi->address_hi << 32 | msi->address_lo;
- kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
- if (!kvm_io_dev)
- return ERR_PTR(-EINVAL);
-
- if (kvm_io_dev->ops != &kvm_io_gic_ops)
- return ERR_PTR(-EINVAL);
-
- iodev = container_of(kvm_io_dev, struct vgic_io_device, dev);
- if (iodev->iodev_type != IODEV_ITS)
- return ERR_PTR(-EINVAL);
-
- return iodev->its;
+ return __vgic_doorbell_to_its(kvm, address);
}
/*
@@ -883,7 +787,7 @@ static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
* don't bother here since we clear the ITTE anyway and the
* pending state is a property of the ITTE struct.
*/
- vgic_its_invalidate_cache(kvm);
+ vgic_its_invalidate_cache(its);
its_free_ite(kvm, ite);
return 0;
@@ -920,7 +824,7 @@ static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
ite->collection = collection;
vcpu = collection_to_vcpu(kvm, collection);
- vgic_its_invalidate_cache(kvm);
+ vgic_its_invalidate_cache(its);
return update_affinity(ite->irq, vcpu);
}
@@ -955,7 +859,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
switch (type) {
case GITS_BASER_TYPE_DEVICE:
- if (id >= BIT_ULL(VITS_TYPER_DEVBITS))
+ if (id > VITS_MAX_DEVID)
return false;
break;
case GITS_BASER_TYPE_COLLECTION:
@@ -1167,7 +1071,8 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
}
/* Requires the its_lock to be held. */
-static void vgic_its_free_device(struct kvm *kvm, struct its_device *device)
+static void vgic_its_free_device(struct kvm *kvm, struct vgic_its *its,
+ struct its_device *device)
{
struct its_ite *ite, *temp;
@@ -1179,7 +1084,7 @@ static void vgic_its_free_device(struct kvm *kvm, struct its_device *device)
list_for_each_entry_safe(ite, temp, &device->itt_head, ite_list)
its_free_ite(kvm, ite);
- vgic_its_invalidate_cache(kvm);
+ vgic_its_invalidate_cache(its);
list_del(&device->dev_list);
kfree(device);
@@ -1191,7 +1096,7 @@ static void vgic_its_free_device_list(struct kvm *kvm, struct vgic_its *its)
struct its_device *cur, *temp;
list_for_each_entry_safe(cur, temp, &its->device_list, dev_list)
- vgic_its_free_device(kvm, cur);
+ vgic_its_free_device(kvm, its, cur);
}
/* its lock must be held */
@@ -1250,7 +1155,7 @@ static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
* by removing the mapping and re-establishing it.
*/
if (device)
- vgic_its_free_device(kvm, device);
+ vgic_its_free_device(kvm, its, device);
/*
* The spec does not say whether unmapping a not-mapped device
@@ -1281,7 +1186,7 @@ static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
if (!valid) {
vgic_its_free_collection(its, coll_id);
- vgic_its_invalidate_cache(kvm);
+ vgic_its_invalidate_cache(its);
} else {
struct kvm_vcpu *vcpu;
@@ -1372,23 +1277,19 @@ static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
int vgic_its_invall(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
- int irq_count, i = 0;
- u32 *intids;
-
- irq_count = vgic_copy_lpi_list(kvm, vcpu, &intids);
- if (irq_count < 0)
- return irq_count;
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ struct vgic_irq *irq;
+ unsigned long intid;
- for (i = 0; i < irq_count; i++) {
- struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intids[i]);
+ xa_for_each(&dist->lpi_xa, intid, irq) {
+ irq = vgic_get_irq(kvm, NULL, intid);
if (!irq)
continue;
+
update_lpi_config(kvm, irq, vcpu, false);
vgic_put_irq(kvm, irq);
}
- kfree(intids);
-
if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
@@ -1431,10 +1332,10 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
u64 *its_cmd)
{
+ struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu1, *vcpu2;
struct vgic_irq *irq;
- u32 *intids;
- int irq_count, i;
+ unsigned long intid;
/* We advertise GITS_TYPER.PTA==0, making the address the vcpu ID */
vcpu1 = kvm_get_vcpu_by_id(kvm, its_cmd_get_target_addr(its_cmd));
@@ -1446,12 +1347,8 @@ static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
if (vcpu1 == vcpu2)
return 0;
- irq_count = vgic_copy_lpi_list(kvm, vcpu1, &intids);
- if (irq_count < 0)
- return irq_count;
-
- for (i = 0; i < irq_count; i++) {
- irq = vgic_get_irq(kvm, NULL, intids[i]);
+ xa_for_each(&dist->lpi_xa, intid, irq) {
+ irq = vgic_get_irq(kvm, NULL, intid);
if (!irq)
continue;
@@ -1460,9 +1357,8 @@ static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
vgic_put_irq(kvm, irq);
}
- vgic_its_invalidate_cache(kvm);
+ vgic_its_invalidate_cache(its);
- kfree(intids);
return 0;
}
@@ -1813,7 +1709,7 @@ static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
its->enabled = !!(val & GITS_CTLR_ENABLE);
if (!its->enabled)
- vgic_its_invalidate_cache(kvm);
+ vgic_its_invalidate_cache(its);
/*
* Try to process any pending commands. This function bails out early
@@ -1914,47 +1810,6 @@ out:
return ret;
}
-/* Default is 16 cached LPIs per vcpu */
-#define LPI_DEFAULT_PCPU_CACHE_SIZE 16
-
-void vgic_lpi_translation_cache_init(struct kvm *kvm)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
- unsigned int sz;
- int i;
-
- if (!list_empty(&dist->lpi_translation_cache))
- return;
-
- sz = atomic_read(&kvm->online_vcpus) * LPI_DEFAULT_PCPU_CACHE_SIZE;
-
- for (i = 0; i < sz; i++) {
- struct vgic_translation_cache_entry *cte;
-
- /* An allocation failure is not fatal */
- cte = kzalloc(sizeof(*cte), GFP_KERNEL_ACCOUNT);
- if (WARN_ON(!cte))
- break;
-
- INIT_LIST_HEAD(&cte->entry);
- list_add(&cte->entry, &dist->lpi_translation_cache);
- }
-}
-
-void vgic_lpi_translation_cache_destroy(struct kvm *kvm)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
- struct vgic_translation_cache_entry *cte, *tmp;
-
- vgic_its_invalidate_cache(kvm);
-
- list_for_each_entry_safe(cte, tmp,
- &dist->lpi_translation_cache, entry) {
- list_del(&cte->entry);
- kfree(cte);
- }
-}
-
#define INITIAL_BASER_VALUE \
(GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \
GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \
@@ -1987,8 +1842,6 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
kfree(its);
return ret;
}
-
- vgic_lpi_translation_cache_init(dev->kvm);
}
mutex_init(&its->its_lock);
@@ -2006,6 +1859,7 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
INIT_LIST_HEAD(&its->device_list);
INIT_LIST_HEAD(&its->collection_list);
+ xa_init(&its->translation_cache);
dev->kvm->arch.vgic.msis_require_devid = true;
dev->kvm->arch.vgic.has_its = true;
@@ -2036,6 +1890,8 @@ static void vgic_its_destroy(struct kvm_device *kvm_dev)
vgic_its_free_device_list(kvm, its);
vgic_its_free_collection_list(kvm, its);
+ vgic_its_invalidate_cache(its);
+ xa_destroy(&its->translation_cache);
mutex_unlock(&its->its_lock);
kfree(its);
@@ -2438,7 +2294,7 @@ static int vgic_its_restore_dte(struct vgic_its *its, u32 id,
ret = vgic_its_restore_itt(its, dev);
if (ret) {
- vgic_its_free_device(its->dev->kvm, dev);
+ vgic_its_free_device(its->dev->kvm, its, dev);
return ret;
}
diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c
index f48b8dab8b3d..1d26bb5b02f4 100644
--- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
+++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
@@ -338,12 +338,12 @@ int kvm_register_vgic_device(unsigned long type)
int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
struct vgic_reg_attr *reg_attr)
{
- int cpuid;
+ int cpuid = FIELD_GET(KVM_DEV_ARM_VGIC_CPUID_MASK, attr->attr);
- cpuid = FIELD_GET(KVM_DEV_ARM_VGIC_CPUID_MASK, attr->attr);
-
- reg_attr->vcpu = kvm_get_vcpu_by_id(dev->kvm, cpuid);
reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+ reg_attr->vcpu = kvm_get_vcpu_by_id(dev->kvm, cpuid);
+ if (!reg_attr->vcpu)
+ return -EINVAL;
return 0;
}
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
index c15ee1df036a..a3983a631b5a 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
@@ -277,7 +277,7 @@ static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu,
return;
vgic_flush_pending_lpis(vcpu);
- vgic_its_invalidate_cache(vcpu->kvm);
+ vgic_its_invalidate_all_caches(vcpu->kvm);
atomic_set_release(&vgic_cpu->ctlr, 0);
} else {
ctlr = atomic_cmpxchg_acquire(&vgic_cpu->ctlr, 0,
diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c
index 7e9cdb78f7ce..ae5a44d5702d 100644
--- a/arch/arm64/kvm/vgic/vgic-v2.c
+++ b/arch/arm64/kvm/vgic/vgic-v2.c
@@ -464,17 +464,10 @@ void vgic_v2_load(struct kvm_vcpu *vcpu)
kvm_vgic_global_state.vctrl_base + GICH_APR);
}
-void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu)
-{
- struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
-
- cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
-}
-
void vgic_v2_put(struct kvm_vcpu *vcpu)
{
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
- vgic_v2_vmcr_sync(vcpu);
+ cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
}
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index 4ea3340786b9..ed6e412cd74b 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -722,15 +722,7 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
- /*
- * If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
- * is dependent on ICC_SRE_EL1.SRE, and we have to perform the
- * VMCR_EL2 save/restore in the world switch.
- */
- if (likely(cpu_if->vgic_sre))
- kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
-
- kvm_call_hyp(__vgic_v3_restore_aprs, cpu_if);
+ kvm_call_hyp(__vgic_v3_restore_vmcr_aprs, cpu_if);
if (has_vhe())
__vgic_v3_activate_traps(cpu_if);
@@ -738,24 +730,13 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
WARN_ON(vgic_v4_load(vcpu));
}
-void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
-{
- struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
-
- if (likely(cpu_if->vgic_sre))
- cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
-}
-
void vgic_v3_put(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+ kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if);
WARN_ON(vgic_v4_put(vcpu));
- vgic_v3_vmcr_sync(vcpu);
-
- kvm_call_hyp(__vgic_v3_save_aprs, cpu_if);
-
if (has_vhe())
__vgic_v3_deactivate_traps(cpu_if);
}
diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
index 4ec93587c8cd..f07b3ddff7d4 100644
--- a/arch/arm64/kvm/vgic/vgic.c
+++ b/arch/arm64/kvm/vgic/vgic.c
@@ -29,9 +29,8 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
* its->cmd_lock (mutex)
* its->its_lock (mutex)
* vgic_cpu->ap_list_lock must be taken with IRQs disabled
- * kvm->lpi_list_lock must be taken with IRQs disabled
- * vgic_dist->lpi_xa.xa_lock must be taken with IRQs disabled
- * vgic_irq->irq_lock must be taken with IRQs disabled
+ * vgic_dist->lpi_xa.xa_lock must be taken with IRQs disabled
+ * vgic_irq->irq_lock must be taken with IRQs disabled
*
* As the ap_list_lock might be taken from the timer interrupt handler,
* we have to disable IRQs before taking this lock and everything lower
@@ -126,7 +125,6 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
__xa_erase(&dist->lpi_xa, irq->intid);
xa_unlock_irqrestore(&dist->lpi_xa, flags);
- atomic_dec(&dist->lpi_count);
kfree_rcu(irq, rcu);
}
@@ -939,17 +937,6 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu)
vgic_v3_put(vcpu);
}
-void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
-{
- if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
- return;
-
- if (kvm_vgic_global_state.type == VGIC_V2)
- vgic_v2_vmcr_sync(vcpu);
- else
- vgic_v3_vmcr_sync(vcpu);
-}
-
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
index 0c2b82de8fa3..6106ebd5ba42 100644
--- a/arch/arm64/kvm/vgic/vgic.h
+++ b/arch/arm64/kvm/vgic/vgic.h
@@ -16,6 +16,7 @@
#define INTERRUPT_ID_BITS_SPIS 10
#define INTERRUPT_ID_BITS_ITS 16
+#define VGIC_LPI_MAX_INTID ((1 << INTERRUPT_ID_BITS_ITS) - 1)
#define VGIC_PRI_BITS 5
#define vgic_irq_is_sgi(intid) ((intid) < VGIC_NR_SGIS)
@@ -214,7 +215,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
void vgic_v2_init_lrs(void);
void vgic_v2_load(struct kvm_vcpu *vcpu);
void vgic_v2_put(struct kvm_vcpu *vcpu);
-void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
void vgic_v2_save_state(struct kvm_vcpu *vcpu);
void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
@@ -253,7 +253,6 @@ bool vgic_v3_check_base(struct kvm *kvm);
void vgic_v3_load(struct kvm_vcpu *vcpu);
void vgic_v3_put(struct kvm_vcpu *vcpu);
-void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu);
bool vgic_has_its(struct kvm *kvm);
int kvm_vgic_register_its_device(void);
@@ -330,14 +329,11 @@ static inline bool vgic_dist_overlap(struct kvm *kvm, gpa_t base, size_t size)
}
bool vgic_lpis_enabled(struct kvm_vcpu *vcpu);
-int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr);
int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
u32 devid, u32 eventid, struct vgic_irq **irq);
struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi);
int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi);
-void vgic_lpi_translation_cache_init(struct kvm *kvm);
-void vgic_lpi_translation_cache_destroy(struct kvm *kvm);
-void vgic_its_invalidate_cache(struct kvm *kvm);
+void vgic_its_invalidate_all_caches(struct kvm *kvm);
/* GICv4.1 MMIO interface */
int vgic_its_inv_lpi(struct kvm *kvm, struct vgic_irq *irq);
diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c
index a635ab83fee3..b008a9b46a7f 100644
--- a/arch/arm64/lib/insn.c
+++ b/arch/arm64/lib/insn.c
@@ -1515,3 +1515,14 @@ u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
return insn;
}
+
+u32 aarch64_insn_gen_mrs(enum aarch64_insn_register result,
+ enum aarch64_insn_system_register sysreg)
+{
+ u32 insn = aarch64_insn_get_mrs_value();
+
+ insn &= ~GENMASK(19, 0);
+ insn |= sysreg << 5;
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT,
+ insn, result);
+}
diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c
index 1b64b4c3f8bf..9f9486de0004 100644
--- a/arch/arm64/mm/contpte.c
+++ b/arch/arm64/mm/contpte.c
@@ -361,6 +361,35 @@ void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
}
EXPORT_SYMBOL_GPL(contpte_wrprotect_ptes);
+void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ unsigned int nr, cydp_t flags)
+{
+ /*
+ * We can safely clear access/dirty without needing to unfold from
+ * the architectures perspective, even when contpte is set. If the
+ * range starts or ends midway through a contpte block, we can just
+ * expand to include the full contpte block. While this is not
+ * exactly what the core-mm asked for, it tracks access/dirty per
+ * folio, not per page. And since we only create a contpte block
+ * when it is covered by a single folio, we can get away with
+ * clearing access/dirty for the whole block.
+ */
+ unsigned long start = addr;
+ unsigned long end = start + nr;
+
+ if (pte_cont(__ptep_get(ptep + nr - 1)))
+ end = ALIGN(end, CONT_PTE_SIZE);
+
+ if (pte_cont(__ptep_get(ptep))) {
+ start = ALIGN_DOWN(start, CONT_PTE_SIZE);
+ ptep = contpte_align_down(ptep);
+ }
+
+ __clear_young_dirty_ptes(vma, start, ptep, end - start, flags);
+}
+EXPORT_SYMBOL_GPL(contpte_clear_young_dirty_ptes);
+
int contpte_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t entry, int dirty)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 61886e43e3a1..b2b5792b2caa 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -7,7 +7,6 @@
#include <linux/gfp.h>
#include <linux/cache.h>
#include <linux/dma-map-ops.h>
-#include <linux/iommu.h>
#include <xen/xen.h>
#include <asm/cacheflush.h>
@@ -39,15 +38,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
dcache_clean_poc(start, start + size);
}
-#ifdef CONFIG_IOMMU_DMA
-void arch_teardown_dma_ops(struct device *dev)
-{
- dev->dma_ops = NULL;
-}
-#endif
-
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- bool coherent)
+void arch_setup_dma_ops(struct device *dev, bool coherent)
{
int cls = cache_line_size_of_cpu();
@@ -58,8 +49,6 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
ARCH_DMA_MINALIGN, cls);
dev->dma_coherent = coherent;
- if (device_iommu_mapped(dev))
- iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1);
xen_setup_dma_ops(dev);
}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 8251e2fea9c7..451ba7cbd5ad 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -486,25 +486,6 @@ static void do_bad_area(unsigned long far, unsigned long esr,
}
}
-#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000)
-#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000)
-
-static vm_fault_t __do_page_fault(struct mm_struct *mm,
- struct vm_area_struct *vma, unsigned long addr,
- unsigned int mm_flags, unsigned long vm_flags,
- struct pt_regs *regs)
-{
- /*
- * Ok, we have a good vm_area for this memory access, so we can handle
- * it.
- * Check that the permissions on the VMA allow for the fault which
- * occurred.
- */
- if (!(vma->vm_flags & vm_flags))
- return VM_FAULT_BADACCESS;
- return handle_mm_fault(vma, addr, mm_flags, regs);
-}
-
static bool is_el0_instruction_abort(unsigned long esr)
{
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW;
@@ -529,6 +510,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
unsigned int mm_flags = FAULT_FLAG_DEFAULT;
unsigned long addr = untagged_addr(far);
struct vm_area_struct *vma;
+ int si_code;
if (kprobe_page_fault(regs, esr))
return 0;
@@ -588,7 +570,10 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
if (!(vma->vm_flags & vm_flags)) {
vma_end_read(vma);
- goto lock_mmap;
+ fault = 0;
+ si_code = SEGV_ACCERR;
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ goto bad_area;
}
fault = handle_mm_fault(vma, addr, mm_flags | FAULT_FLAG_VMA_LOCK, regs);
if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
@@ -613,12 +598,19 @@ lock_mmap:
retry:
vma = lock_mm_and_find_vma(mm, addr, regs);
if (unlikely(!vma)) {
- fault = VM_FAULT_BADMAP;
- goto done;
+ fault = 0;
+ si_code = SEGV_MAPERR;
+ goto bad_area;
}
- fault = __do_page_fault(mm, vma, addr, mm_flags, vm_flags, regs);
+ if (!(vma->vm_flags & vm_flags)) {
+ mmap_read_unlock(mm);
+ fault = 0;
+ si_code = SEGV_ACCERR;
+ goto bad_area;
+ }
+ fault = handle_mm_fault(vma, addr, mm_flags, regs);
/* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
@@ -637,13 +629,12 @@ retry:
mmap_read_unlock(mm);
done:
- /*
- * Handle the "normal" (no error) case first.
- */
- if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
- VM_FAULT_BADACCESS))))
+ /* Handle the "normal" (no error) case first. */
+ if (likely(!(fault & VM_FAULT_ERROR)))
return 0;
+ si_code = SEGV_MAPERR;
+bad_area:
/*
* If we are in kernel mode at this point, we have no context to
* handle this fault with.
@@ -678,13 +669,8 @@ done:
arm64_force_sig_mceerr(BUS_MCEERR_AR, far, lsb, inf->name);
} else {
- /*
- * Something tried to access memory that isn't in our memory
- * map.
- */
- arm64_force_sig_fault(SIGSEGV,
- fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR,
- far, inf->name);
+ /* Something tried to access memory that out of memory map */
+ arm64_force_sig_fault(SIGSEGV, si_code, far, inf->name);
}
return 0;
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 0f0e10bb0a95..3f09ac73cce3 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -79,20 +79,6 @@ bool arch_hugetlb_migration_supported(struct hstate *h)
}
#endif
-int pmd_huge(pmd_t pmd)
-{
- return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
-}
-
-int pud_huge(pud_t pud)
-{
-#ifndef __PAGETABLE_PMD_FOLDED
- return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
-#else
- return 0;
-#endif
-}
-
static int find_num_contig(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, size_t *pgsize)
{
@@ -276,7 +262,10 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
pte_t *ptep = NULL;
pgdp = pgd_offset(mm, addr);
- p4dp = p4d_offset(pgdp, addr);
+ p4dp = p4d_alloc(mm, pgdp, addr);
+ if (!p4dp)
+ return NULL;
+
pudp = pud_alloc(mm, p4dp, addr);
if (!pudp)
return NULL;
@@ -325,7 +314,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
if (sz != PUD_SIZE && pud_none(pud))
return NULL;
/* hugepage or swap? */
- if (pud_huge(pud) || !pud_present(pud))
+ if (pud_leaf(pud) || !pud_present(pud))
return (pte_t *)pudp;
/* table; check the next level */
@@ -337,7 +326,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) &&
pmd_none(pmd))
return NULL;
- if (pmd_huge(pmd) || !pmd_present(pmd))
+ if (pmd_leaf(pmd) || !pmd_present(pmd))
return (pte_t *)pmdp;
if (sz == CONT_PTE_SIZE)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 03efd86dce0a..9b5ab6818f7f 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -32,6 +32,7 @@
#include <linux/hugetlb.h>
#include <linux/acpi_iort.h>
#include <linux/kmemleak.h>
+#include <linux/execmem.h>
#include <asm/boot.h>
#include <asm/fixmap.h>
@@ -432,3 +433,142 @@ void dump_mem_limit(void)
pr_emerg("Memory Limit: none\n");
}
}
+
+#ifdef CONFIG_EXECMEM
+static u64 module_direct_base __ro_after_init = 0;
+static u64 module_plt_base __ro_after_init = 0;
+
+/*
+ * Choose a random page-aligned base address for a window of 'size' bytes which
+ * entirely contains the interval [start, end - 1].
+ */
+static u64 __init random_bounding_box(u64 size, u64 start, u64 end)
+{
+ u64 max_pgoff, pgoff;
+
+ if ((end - start) >= size)
+ return 0;
+
+ max_pgoff = (size - (end - start)) / PAGE_SIZE;
+ pgoff = get_random_u32_inclusive(0, max_pgoff);
+
+ return start - pgoff * PAGE_SIZE;
+}
+
+/*
+ * Modules may directly reference data and text anywhere within the kernel
+ * image and other modules. References using PREL32 relocations have a +/-2G
+ * range, and so we need to ensure that the entire kernel image and all modules
+ * fall within a 2G window such that these are always within range.
+ *
+ * Modules may directly branch to functions and code within the kernel text,
+ * and to functions and code within other modules. These branches will use
+ * CALL26/JUMP26 relocations with a +/-128M range. Without PLTs, we must ensure
+ * that the entire kernel text and all module text falls within a 128M window
+ * such that these are always within range. With PLTs, we can expand this to a
+ * 2G window.
+ *
+ * We chose the 128M region to surround the entire kernel image (rather than
+ * just the text) as using the same bounds for the 128M and 2G regions ensures
+ * by construction that we never select a 128M region that is not a subset of
+ * the 2G region. For very large and unusual kernel configurations this means
+ * we may fall back to PLTs where they could have been avoided, but this keeps
+ * the logic significantly simpler.
+ */
+static int __init module_init_limits(void)
+{
+ u64 kernel_end = (u64)_end;
+ u64 kernel_start = (u64)_text;
+ u64 kernel_size = kernel_end - kernel_start;
+
+ /*
+ * The default modules region is placed immediately below the kernel
+ * image, and is large enough to use the full 2G relocation range.
+ */
+ BUILD_BUG_ON(KIMAGE_VADDR != MODULES_END);
+ BUILD_BUG_ON(MODULES_VSIZE < SZ_2G);
+
+ if (!kaslr_enabled()) {
+ if (kernel_size < SZ_128M)
+ module_direct_base = kernel_end - SZ_128M;
+ if (kernel_size < SZ_2G)
+ module_plt_base = kernel_end - SZ_2G;
+ } else {
+ u64 min = kernel_start;
+ u64 max = kernel_end;
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
+ pr_info("2G module region forced by RANDOMIZE_MODULE_REGION_FULL\n");
+ } else {
+ module_direct_base = random_bounding_box(SZ_128M, min, max);
+ if (module_direct_base) {
+ min = module_direct_base;
+ max = module_direct_base + SZ_128M;
+ }
+ }
+
+ module_plt_base = random_bounding_box(SZ_2G, min, max);
+ }
+
+ pr_info("%llu pages in range for non-PLT usage",
+ module_direct_base ? (SZ_128M - kernel_size) / PAGE_SIZE : 0);
+ pr_info("%llu pages in range for PLT usage",
+ module_plt_base ? (SZ_2G - kernel_size) / PAGE_SIZE : 0);
+
+ return 0;
+}
+
+static struct execmem_info execmem_info __ro_after_init;
+
+struct execmem_info __init *execmem_arch_setup(void)
+{
+ unsigned long fallback_start = 0, fallback_end = 0;
+ unsigned long start = 0, end = 0;
+
+ module_init_limits();
+
+ /*
+ * Where possible, prefer to allocate within direct branch range of the
+ * kernel such that no PLTs are necessary.
+ */
+ if (module_direct_base) {
+ start = module_direct_base;
+ end = module_direct_base + SZ_128M;
+
+ if (module_plt_base) {
+ fallback_start = module_plt_base;
+ fallback_end = module_plt_base + SZ_2G;
+ }
+ } else if (module_plt_base) {
+ start = module_plt_base;
+ end = module_plt_base + SZ_2G;
+ }
+
+ execmem_info = (struct execmem_info){
+ .ranges = {
+ [EXECMEM_DEFAULT] = {
+ .start = start,
+ .end = end,
+ .pgprot = PAGE_KERNEL,
+ .alignment = 1,
+ .fallback_start = fallback_start,
+ .fallback_end = fallback_end,
+ },
+ [EXECMEM_KPROBES] = {
+ .start = VMALLOC_START,
+ .end = VMALLOC_END,
+ .pgprot = PAGE_KERNEL_ROX,
+ .alignment = 1,
+ },
+ [EXECMEM_BPF] = {
+ .start = VMALLOC_START,
+ .end = VMALLOC_END,
+ .pgprot = PAGE_KERNEL,
+ .alignment = 1,
+ },
+ },
+ };
+
+ return &execmem_info;
+}
+#endif /* CONFIG_EXECMEM */
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 495b732d5af3..c927e9312f10 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -109,28 +109,12 @@ EXPORT_SYMBOL(phys_mem_access_prot);
static phys_addr_t __init early_pgtable_alloc(int shift)
{
phys_addr_t phys;
- void *ptr;
phys = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0,
MEMBLOCK_ALLOC_NOLEAKTRACE);
if (!phys)
panic("Failed to allocate page table page\n");
- /*
- * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
- * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
- * any level of table.
- */
- ptr = pte_set_fixmap(phys);
-
- memset(ptr, 0, PAGE_SIZE);
-
- /*
- * Implicit barriers also ensure the zeroed page is visible to the page
- * table walker
- */
- pte_clear_fixmap();
-
return phys;
}
@@ -172,16 +156,25 @@ bool pgattr_change_is_safe(u64 old, u64 new)
return ((old ^ new) & ~mask) == 0;
}
-static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
- phys_addr_t phys, pgprot_t prot)
+static void init_clear_pgtable(void *table)
{
- pte_t *ptep;
+ clear_page(table);
- ptep = pte_set_fixmap_offset(pmdp, addr);
+ /* Ensure the zeroing is observed by page table walks. */
+ dsb(ishst);
+}
+
+static void init_pte(pte_t *ptep, unsigned long addr, unsigned long end,
+ phys_addr_t phys, pgprot_t prot)
+{
do {
pte_t old_pte = __ptep_get(ptep);
- __set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot));
+ /*
+ * Required barriers to make this visible to the table walker
+ * are deferred to the end of alloc_init_cont_pte().
+ */
+ __set_pte_nosync(ptep, pfn_pte(__phys_to_pfn(phys), prot));
/*
* After the PTE entry has been populated once, we
@@ -192,8 +185,6 @@ static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
phys += PAGE_SIZE;
} while (ptep++, addr += PAGE_SIZE, addr != end);
-
- pte_clear_fixmap();
}
static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
@@ -204,6 +195,7 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
{
unsigned long next;
pmd_t pmd = READ_ONCE(*pmdp);
+ pte_t *ptep;
BUG_ON(pmd_sect(pmd));
if (pmd_none(pmd)) {
@@ -214,10 +206,14 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
pmdval |= PMD_TABLE_PXN;
BUG_ON(!pgtable_alloc);
pte_phys = pgtable_alloc(PAGE_SHIFT);
+ ptep = pte_set_fixmap(pte_phys);
+ init_clear_pgtable(ptep);
+ ptep += pte_index(addr);
__pmd_populate(pmdp, pte_phys, pmdval);
- pmd = READ_ONCE(*pmdp);
+ } else {
+ BUG_ON(pmd_bad(pmd));
+ ptep = pte_set_fixmap_offset(pmdp, addr);
}
- BUG_ON(pmd_bad(pmd));
do {
pgprot_t __prot = prot;
@@ -229,20 +225,26 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
(flags & NO_CONT_MAPPINGS) == 0)
__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
- init_pte(pmdp, addr, next, phys, __prot);
+ init_pte(ptep, addr, next, phys, __prot);
+ ptep += pte_index(next) - pte_index(addr);
phys += next - addr;
} while (addr = next, addr != end);
+
+ /*
+ * Note: barriers and maintenance necessary to clear the fixmap slot
+ * ensure that all previous pgtable writes are visible to the table
+ * walker.
+ */
+ pte_clear_fixmap();
}
-static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
+static void init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
phys_addr_t (*pgtable_alloc)(int), int flags)
{
unsigned long next;
- pmd_t *pmdp;
- pmdp = pmd_set_fixmap_offset(pudp, addr);
do {
pmd_t old_pmd = READ_ONCE(*pmdp);
@@ -268,8 +270,6 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
}
phys += next - addr;
} while (pmdp++, addr = next, addr != end);
-
- pmd_clear_fixmap();
}
static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
@@ -279,6 +279,7 @@ static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
{
unsigned long next;
pud_t pud = READ_ONCE(*pudp);
+ pmd_t *pmdp;
/*
* Check for initial section mappings in the pgd/pud.
@@ -292,10 +293,14 @@ static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
pudval |= PUD_TABLE_PXN;
BUG_ON(!pgtable_alloc);
pmd_phys = pgtable_alloc(PMD_SHIFT);
+ pmdp = pmd_set_fixmap(pmd_phys);
+ init_clear_pgtable(pmdp);
+ pmdp += pmd_index(addr);
__pud_populate(pudp, pmd_phys, pudval);
- pud = READ_ONCE(*pudp);
+ } else {
+ BUG_ON(pud_bad(pud));
+ pmdp = pmd_set_fixmap_offset(pudp, addr);
}
- BUG_ON(pud_bad(pud));
do {
pgprot_t __prot = prot;
@@ -307,10 +312,13 @@ static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
(flags & NO_CONT_MAPPINGS) == 0)
__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
- init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags);
+ init_pmd(pmdp, addr, next, phys, __prot, pgtable_alloc, flags);
+ pmdp += pmd_index(next) - pmd_index(addr);
phys += next - addr;
} while (addr = next, addr != end);
+
+ pmd_clear_fixmap();
}
static void alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end,
@@ -330,12 +338,15 @@ static void alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end,
p4dval |= P4D_TABLE_PXN;
BUG_ON(!pgtable_alloc);
pud_phys = pgtable_alloc(PUD_SHIFT);
+ pudp = pud_set_fixmap(pud_phys);
+ init_clear_pgtable(pudp);
+ pudp += pud_index(addr);
__p4d_populate(p4dp, pud_phys, p4dval);
- p4d = READ_ONCE(*p4dp);
+ } else {
+ BUG_ON(p4d_bad(p4d));
+ pudp = pud_set_fixmap_offset(p4dp, addr);
}
- BUG_ON(p4d_bad(p4d));
- pudp = pud_set_fixmap_offset(p4dp, addr);
do {
pud_t old_pud = READ_ONCE(*pudp);
@@ -385,12 +396,15 @@ static void alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end,
pgdval |= PGD_TABLE_PXN;
BUG_ON(!pgtable_alloc);
p4d_phys = pgtable_alloc(P4D_SHIFT);
+ p4dp = p4d_set_fixmap(p4d_phys);
+ init_clear_pgtable(p4dp);
+ p4dp += p4d_index(addr);
__pgd_populate(pgdp, p4d_phys, pgdval);
- pgd = READ_ONCE(*pgdp);
+ } else {
+ BUG_ON(pgd_bad(pgd));
+ p4dp = p4d_set_fixmap_offset(pgdp, addr);
}
- BUG_ON(pgd_bad(pgd));
- p4dp = p4d_set_fixmap_offset(pgdp, addr);
do {
p4d_t old_p4d = READ_ONCE(*p4dp);
@@ -457,11 +471,10 @@ void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt,
static phys_addr_t __pgd_pgtable_alloc(int shift)
{
- void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL);
- BUG_ON(!ptr);
+ /* Page is zeroed by init_clear_pgtable() so don't duplicate effort. */
+ void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL & ~__GFP_ZERO);
- /* Ensure the zeroed page is visible to the page table walker */
- dsb(ishst);
+ BUG_ON(!ptr);
return __pa(ptr);
}
diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c
index a31833e3ddc5..63e8d72f202a 100644
--- a/arch/arm64/mm/mteswap.c
+++ b/arch/arm64/mm/mteswap.c
@@ -68,6 +68,13 @@ void mte_invalidate_tags(int type, pgoff_t offset)
mte_free_tag_storage(tags);
}
+static inline void __mte_invalidate_tags(struct page *page)
+{
+ swp_entry_t entry = page_swap_entry(page);
+
+ mte_invalidate_tags(swp_type(entry), swp_offset(entry));
+}
+
void mte_invalidate_tags_area(int type)
{
swp_entry_t entry = swp_entry(type, 0);
@@ -83,3 +90,41 @@ void mte_invalidate_tags_area(int type)
}
xa_unlock(&mte_pages);
}
+
+int arch_prepare_to_swap(struct folio *folio)
+{
+ long i, nr;
+ int err;
+
+ if (!system_supports_mte())
+ return 0;
+
+ nr = folio_nr_pages(folio);
+
+ for (i = 0; i < nr; i++) {
+ err = mte_save_tags(folio_page(folio, i));
+ if (err)
+ goto out;
+ }
+ return 0;
+
+out:
+ while (i--)
+ __mte_invalidate_tags(folio_page(folio, i));
+ return err;
+}
+
+void arch_swap_restore(swp_entry_t entry, struct folio *folio)
+{
+ long i, nr;
+
+ if (!system_supports_mte())
+ return;
+
+ nr = folio_nr_pages(folio);
+
+ for (i = 0; i < nr; i++) {
+ mte_restore_tags(entry, folio_page(folio, i));
+ entry.val++;
+ }
+}
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 0c4e3ecf989d..0e270a1c51e6 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -219,9 +219,6 @@ bool kernel_page_present(struct page *page)
pte_t *ptep;
unsigned long addr = (unsigned long)page_address(page);
- if (!can_set_direct_map())
- return true;
-
pgdp = pgd_offset_k(addr);
if (pgd_none(READ_ONCE(*pgdp)))
return false;
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 9d40f3ffd8d2..f4bc6c5bac06 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -135,14 +135,6 @@ SYM_FUNC_START(cpu_do_resume)
msr tcr_el1, x8
msr vbar_el1, x9
-
- /*
- * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking
- * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug
- * exception. Mask them until local_daif_restore() in cpu_suspend()
- * resets them.
- */
- disable_daif
msr mdscr_el1, x10
msr sctlr_el1, x12
@@ -466,8 +458,6 @@ SYM_FUNC_START(__cpu_setup)
msr cpacr_el1, xzr // Reset cpacr_el1
mov x1, #1 << 12 // Reset mdscr_el1 and disable
msr mdscr_el1, x1 // access to the DCC from EL0
- isb // Unmask debug exceptions now,
- enable_dbg // since this is per-cpu
reset_pmuserenr_el0 x1 // Disable PMU access from EL0
reset_amuserenr_el0 x1 // Disable AMU access from EL0
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
index 23b1b34db088..b22ab2f97a30 100644
--- a/arch/arm64/net/bpf_jit.h
+++ b/arch/arm64/net/bpf_jit.h
@@ -297,4 +297,12 @@
#define A64_ADR(Rd, offset) \
aarch64_insn_gen_adr(0, offset, Rd, AARCH64_INSN_ADR_TYPE_ADR)
+/* MRS */
+#define A64_MRS_TPIDR_EL1(Rt) \
+ aarch64_insn_gen_mrs(Rt, AARCH64_INSN_SYSREG_TPIDR_EL1)
+#define A64_MRS_TPIDR_EL2(Rt) \
+ aarch64_insn_gen_mrs(Rt, AARCH64_INSN_SYSREG_TPIDR_EL2)
+#define A64_MRS_SP_EL0(Rt) \
+ aarch64_insn_gen_mrs(Rt, AARCH64_INSN_SYSREG_SP_EL0)
+
#endif /* _BPF_JIT_H */
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index c5b461dda438..720336d28856 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -29,6 +29,7 @@
#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
#define FP_BOTTOM (MAX_BPF_JIT_REG + 4)
+#define ARENA_VM_START (MAX_BPF_JIT_REG + 5)
#define check_imm(bits, imm) do { \
if ((((imm) > 0) && ((imm) >> (bits))) || \
@@ -67,6 +68,8 @@ static const int bpf2a64[] = {
/* temporary register for blinding constants */
[BPF_REG_AX] = A64_R(9),
[FP_BOTTOM] = A64_R(27),
+ /* callee saved register for kern_vm_start address */
+ [ARENA_VM_START] = A64_R(28),
};
struct jit_ctx {
@@ -79,6 +82,7 @@ struct jit_ctx {
__le32 *ro_image;
u32 stack_size;
int fpb_offset;
+ u64 user_vm_start;
};
struct bpf_plt {
@@ -295,7 +299,7 @@ static bool is_lsi_offset(int offset, int scale)
#define PROLOGUE_OFFSET (BTI_INSNS + 2 + PAC_INSNS + 8)
static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
- bool is_exception_cb)
+ bool is_exception_cb, u64 arena_vm_start)
{
const struct bpf_prog *prog = ctx->prog;
const bool is_main_prog = !bpf_is_subprog(prog);
@@ -306,6 +310,7 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
const u8 fp = bpf2a64[BPF_REG_FP];
const u8 tcc = bpf2a64[TCALL_CNT];
const u8 fpb = bpf2a64[FP_BOTTOM];
+ const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
const int idx0 = ctx->idx;
int cur_offset;
@@ -411,6 +416,10 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
/* Set up function call stack */
emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
+
+ if (arena_vm_start)
+ emit_a64_mov_i64(arena_vm_base, arena_vm_start, ctx);
+
return 0;
}
@@ -485,20 +494,26 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
static int emit_lse_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
{
const u8 code = insn->code;
+ const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
const u8 dst = bpf2a64[insn->dst_reg];
const u8 src = bpf2a64[insn->src_reg];
const u8 tmp = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
const bool isdw = BPF_SIZE(code) == BPF_DW;
+ const bool arena = BPF_MODE(code) == BPF_PROBE_ATOMIC;
const s16 off = insn->off;
- u8 reg;
+ u8 reg = dst;
- if (!off) {
- reg = dst;
- } else {
- emit_a64_mov_i(1, tmp, off, ctx);
- emit(A64_ADD(1, tmp, tmp, dst), ctx);
- reg = tmp;
+ if (off || arena) {
+ if (off) {
+ emit_a64_mov_i(1, tmp, off, ctx);
+ emit(A64_ADD(1, tmp, tmp, dst), ctx);
+ reg = tmp;
+ }
+ if (arena) {
+ emit(A64_ADD(1, tmp, reg, arena_vm_base), ctx);
+ reg = tmp;
+ }
}
switch (insn->imm) {
@@ -567,6 +582,12 @@ static int emit_ll_sc_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
u8 reg;
s32 jmp_offset;
+ if (BPF_MODE(code) == BPF_PROBE_ATOMIC) {
+ /* ll_sc based atomics don't support unsafe pointers yet. */
+ pr_err_once("unknown atomic opcode %02x\n", code);
+ return -EINVAL;
+ }
+
if (!off) {
reg = dst;
} else {
@@ -738,6 +759,7 @@ static void build_epilogue(struct jit_ctx *ctx, bool is_exception_cb)
#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
+#define DONT_CLEAR 5 /* Unused ARM64 register from BPF's POV */
bool ex_handler_bpf(const struct exception_table_entry *ex,
struct pt_regs *regs)
@@ -745,7 +767,8 @@ bool ex_handler_bpf(const struct exception_table_entry *ex,
off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
- regs->regs[dst_reg] = 0;
+ if (dst_reg != DONT_CLEAR)
+ regs->regs[dst_reg] = 0;
regs->pc = (unsigned long)&ex->fixup - offset;
return true;
}
@@ -765,7 +788,9 @@ static int add_exception_handler(const struct bpf_insn *insn,
return 0;
if (BPF_MODE(insn->code) != BPF_PROBE_MEM &&
- BPF_MODE(insn->code) != BPF_PROBE_MEMSX)
+ BPF_MODE(insn->code) != BPF_PROBE_MEMSX &&
+ BPF_MODE(insn->code) != BPF_PROBE_MEM32 &&
+ BPF_MODE(insn->code) != BPF_PROBE_ATOMIC)
return 0;
if (!ctx->prog->aux->extable ||
@@ -810,6 +835,9 @@ static int add_exception_handler(const struct bpf_insn *insn,
ex->insn = ins_offset;
+ if (BPF_CLASS(insn->code) != BPF_LDX)
+ dst_reg = DONT_CLEAR;
+
ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, fixup_offset) |
FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
@@ -829,12 +857,13 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
bool extra_pass)
{
const u8 code = insn->code;
- const u8 dst = bpf2a64[insn->dst_reg];
- const u8 src = bpf2a64[insn->src_reg];
+ u8 dst = bpf2a64[insn->dst_reg];
+ u8 src = bpf2a64[insn->src_reg];
const u8 tmp = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
const u8 fp = bpf2a64[BPF_REG_FP];
const u8 fpb = bpf2a64[FP_BOTTOM];
+ const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
const s16 off = insn->off;
const s32 imm = insn->imm;
const int i = insn - ctx->prog->insnsi;
@@ -853,6 +882,24 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
/* dst = src */
case BPF_ALU | BPF_MOV | BPF_X:
case BPF_ALU64 | BPF_MOV | BPF_X:
+ if (insn_is_cast_user(insn)) {
+ emit(A64_MOV(0, tmp, src), ctx); // 32-bit mov clears the upper 32 bits
+ emit_a64_mov_i(0, dst, ctx->user_vm_start >> 32, ctx);
+ emit(A64_LSL(1, dst, dst, 32), ctx);
+ emit(A64_CBZ(1, tmp, 2), ctx);
+ emit(A64_ORR(1, tmp, dst, tmp), ctx);
+ emit(A64_MOV(1, dst, tmp), ctx);
+ break;
+ } else if (insn_is_mov_percpu_addr(insn)) {
+ if (dst != src)
+ emit(A64_MOV(1, dst, src), ctx);
+ if (cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN))
+ emit(A64_MRS_TPIDR_EL2(tmp), ctx);
+ else
+ emit(A64_MRS_TPIDR_EL1(tmp), ctx);
+ emit(A64_ADD(1, dst, dst, tmp), ctx);
+ break;
+ }
switch (insn->off) {
case 0:
emit(A64_MOV(is64, dst, src), ctx);
@@ -943,7 +990,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
emit(A64_UXTH(is64, dst, dst), ctx);
break;
case 32:
- emit(A64_REV32(is64, dst, dst), ctx);
+ emit(A64_REV32(0, dst, dst), ctx);
/* upper 32 bits already cleared */
break;
case 64:
@@ -1181,6 +1228,21 @@ emit_cond_jmp:
const u8 r0 = bpf2a64[BPF_REG_0];
bool func_addr_fixed;
u64 func_addr;
+ u32 cpu_offset;
+
+ /* Implement helper call to bpf_get_smp_processor_id() inline */
+ if (insn->src_reg == 0 && insn->imm == BPF_FUNC_get_smp_processor_id) {
+ cpu_offset = offsetof(struct thread_info, cpu);
+
+ emit(A64_MRS_SP_EL0(tmp), ctx);
+ if (is_lsi_offset(cpu_offset, 2)) {
+ emit(A64_LDR32I(r0, tmp, cpu_offset), ctx);
+ } else {
+ emit_a64_mov_i(1, tmp2, cpu_offset, ctx);
+ emit(A64_LDR32(r0, tmp, tmp2), ctx);
+ }
+ break;
+ }
ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
&func_addr, &func_addr_fixed);
@@ -1237,7 +1299,15 @@ emit_cond_jmp:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
- if (ctx->fpb_offset > 0 && src == fp) {
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
+ if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
+ emit(A64_ADD(1, tmp2, src, arena_vm_base), ctx);
+ src = tmp2;
+ }
+ if (ctx->fpb_offset > 0 && src == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) {
src_adj = fpb;
off_adj = off + ctx->fpb_offset;
} else {
@@ -1256,7 +1326,7 @@ emit_cond_jmp:
} else {
emit_a64_mov_i(1, tmp, off, ctx);
if (sign_extend)
- emit(A64_LDRSW(dst, src_adj, off_adj), ctx);
+ emit(A64_LDRSW(dst, src, tmp), ctx);
else
emit(A64_LDR32(dst, src, tmp), ctx);
}
@@ -1322,7 +1392,15 @@ emit_cond_jmp:
case BPF_ST | BPF_MEM | BPF_H:
case BPF_ST | BPF_MEM | BPF_B:
case BPF_ST | BPF_MEM | BPF_DW:
- if (ctx->fpb_offset > 0 && dst == fp) {
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
+ if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
+ emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx);
+ dst = tmp2;
+ }
+ if (ctx->fpb_offset > 0 && dst == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) {
dst_adj = fpb;
off_adj = off + ctx->fpb_offset;
} else {
@@ -1365,6 +1443,10 @@ emit_cond_jmp:
}
break;
}
+
+ ret = add_exception_handler(insn, ctx, dst);
+ if (ret)
+ return ret;
break;
/* STX: *(size *)(dst + off) = src */
@@ -1372,7 +1454,15 @@ emit_cond_jmp:
case BPF_STX | BPF_MEM | BPF_H:
case BPF_STX | BPF_MEM | BPF_B:
case BPF_STX | BPF_MEM | BPF_DW:
- if (ctx->fpb_offset > 0 && dst == fp) {
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
+ if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
+ emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx);
+ dst = tmp2;
+ }
+ if (ctx->fpb_offset > 0 && dst == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) {
dst_adj = fpb;
off_adj = off + ctx->fpb_offset;
} else {
@@ -1413,16 +1503,26 @@ emit_cond_jmp:
}
break;
}
+
+ ret = add_exception_handler(insn, ctx, dst);
+ if (ret)
+ return ret;
break;
case BPF_STX | BPF_ATOMIC | BPF_W:
case BPF_STX | BPF_ATOMIC | BPF_DW:
+ case BPF_STX | BPF_PROBE_ATOMIC | BPF_W:
+ case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW:
if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS))
ret = emit_lse_atomic(insn, ctx);
else
ret = emit_ll_sc_atomic(insn, ctx);
if (ret)
return ret;
+
+ ret = add_exception_handler(insn, ctx, dst);
+ if (ret)
+ return ret;
break;
default:
@@ -1594,6 +1694,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
bool tmp_blinded = false;
bool extra_pass = false;
struct jit_ctx ctx;
+ u64 arena_vm_start;
u8 *image_ptr;
u8 *ro_image_ptr;
@@ -1611,6 +1712,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog = tmp;
}
+ arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena);
jit_data = prog->aux->jit_data;
if (!jit_data) {
jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
@@ -1641,6 +1743,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
}
ctx.fpb_offset = find_fpb_offset(prog);
+ ctx.user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena);
/*
* 1. Initial fake pass to compute ctx->idx and ctx->offset.
@@ -1648,7 +1751,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
* BPF line info needs ctx->offset[i] to be the offset of
* instruction[i] in jited image, so build prologue first.
*/
- if (build_prologue(&ctx, was_classic, prog->aux->exception_cb)) {
+ if (build_prologue(&ctx, was_classic, prog->aux->exception_cb,
+ arena_vm_start)) {
prog = orig_prog;
goto out_off;
}
@@ -1696,7 +1800,7 @@ skip_init_ctx:
ctx.idx = 0;
ctx.exentry_idx = 0;
- build_prologue(&ctx, was_classic, prog->aux->exception_cb);
+ build_prologue(&ctx, was_classic, prog->aux->exception_cb, arena_vm_start);
if (build_body(&ctx, extra_pass)) {
prog = orig_prog;
@@ -1793,17 +1897,6 @@ u64 bpf_jit_alloc_exec_limit(void)
return VMALLOC_END - VMALLOC_START;
}
-void *bpf_jit_alloc_exec(unsigned long size)
-{
- /* Memory is intended to be executable, reset the pointer tag. */
- return kasan_reset_tag(vmalloc(size));
-}
-
-void bpf_jit_free_exec(void *addr)
-{
- return vfree(addr);
-}
-
/* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
bool bpf_jit_supports_subprog_tailcalls(void)
{
@@ -1844,15 +1937,15 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
emit_call(enter_prog, ctx);
+ /* save return value to callee saved register x20 */
+ emit(A64_MOV(1, A64_R(20), A64_R(0)), ctx);
+
/* if (__bpf_prog_enter(prog) == 0)
* goto skip_exec_of_prog;
*/
branch = ctx->image + ctx->idx;
emit(A64_NOP, ctx);
- /* save return value to callee saved register x20 */
- emit(A64_MOV(1, A64_R(20), A64_R(0)), ctx);
-
emit(A64_ADD_I(1, A64_R(0), A64_SP, args_off), ctx);
if (!p->jited)
emit_addr_mov_i64(A64_R(1), (const u64)p->insnsi, ctx);
@@ -2176,12 +2269,9 @@ void arch_free_bpf_trampoline(void *image, unsigned int size)
bpf_prog_pack_free(image, size);
}
-void arch_protect_bpf_trampoline(void *image, unsigned int size)
-{
-}
-
-void arch_unprotect_bpf_trampoline(void *image, unsigned int size)
+int arch_protect_bpf_trampoline(void *image, unsigned int size)
{
+ return 0;
}
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image,
@@ -2464,6 +2554,39 @@ bool bpf_jit_supports_exceptions(void)
return true;
}
+bool bpf_jit_supports_arena(void)
+{
+ return true;
+}
+
+bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
+{
+ if (!in_arena)
+ return true;
+ switch (insn->code) {
+ case BPF_STX | BPF_ATOMIC | BPF_W:
+ case BPF_STX | BPF_ATOMIC | BPF_DW:
+ if (!cpus_have_cap(ARM64_HAS_LSE_ATOMICS))
+ return false;
+ }
+ return true;
+}
+
+bool bpf_jit_supports_percpu_insn(void)
+{
+ return true;
+}
+
+bool bpf_jit_inlines_helper_call(s32 imm)
+{
+ switch (imm) {
+ case BPF_FUNC_get_smp_processor_id:
+ return true;
+ default:
+ return false;
+ }
+}
+
void bpf_jit_free(struct bpf_prog *prog)
{
if (prog->jited) {
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index d3ac36751ad1..5479707eb5d1 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -37,6 +37,7 @@ config CSKY
select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
+ select ARCH_NEED_CMPXCHG_1_EMU
select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 && $(cc-option,-mbacktrace)
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
select COMMON_CLK
diff --git a/arch/csky/abiv1/mmap.c b/arch/csky/abiv1/mmap.c
index 6792aca49999..7f826331d409 100644
--- a/arch/csky/abiv1/mmap.c
+++ b/arch/csky/abiv1/mmap.c
@@ -28,7 +28,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int do_align = 0;
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {
+ .length = len,
+ .low_limit = mm->mmap_base,
+ .high_limit = TASK_SIZE,
+ .align_offset = pgoff << PAGE_SHIFT
+ };
/*
* We only need to do colour alignment if either the I or D
@@ -61,11 +66,6 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
return addr;
}
- info.flags = 0;
- info.length = len;
- info.low_limit = mm->mmap_base;
- info.high_limit = TASK_SIZE;
info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
- info.align_offset = pgoff << PAGE_SHIFT;
return vm_unmapped_area(&info);
}
diff --git a/arch/csky/boot/dts/Makefile b/arch/csky/boot/dts/Makefile
index 5f1f55e911ad..aabffc1912e8 100644
--- a/arch/csky/boot/dts/Makefile
+++ b/arch/csky/boot/dts/Makefile
@@ -1,4 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-dtstree := $(srctree)/$(src)
-
-dtb-y := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
+dtb-y := $(patsubst $(src)/%.dts,%.dtb, $(wildcard $(src)/*.dts))
diff --git a/arch/csky/include/asm/cmpxchg.h b/arch/csky/include/asm/cmpxchg.h
index 916043b845f1..db6dda47184e 100644
--- a/arch/csky/include/asm/cmpxchg.h
+++ b/arch/csky/include/asm/cmpxchg.h
@@ -6,6 +6,7 @@
#ifdef CONFIG_SMP
#include <linux/bug.h>
#include <asm/barrier.h>
+#include <linux/cmpxchg-emu.h>
#define __xchg_relaxed(new, ptr, size) \
({ \
@@ -61,6 +62,9 @@
__typeof__(old) __old = (old); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
+ case 1: \
+ __ret = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)__ptr, (uintptr_t)__old, (uintptr_t)__new); \
+ break; \
case 4: \
asm volatile ( \
"1: ldex.w %0, (%3) \n" \
@@ -91,6 +95,9 @@
__typeof__(old) __old = (old); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
+ case 1: \
+ __ret = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)__ptr, (uintptr_t)__old, (uintptr_t)__new); \
+ break; \
case 4: \
asm volatile ( \
"1: ldex.w %0, (%3) \n" \
@@ -122,6 +129,9 @@
__typeof__(old) __old = (old); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
+ case 1: \
+ __ret = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)__ptr, (uintptr_t)__old, (uintptr_t)__new); \
+ break; \
case 4: \
asm volatile ( \
RELEASE_FENCE \
diff --git a/arch/csky/kernel/probes/ftrace.c b/arch/csky/kernel/probes/ftrace.c
index 834cffcfbce3..7ba4b98076de 100644
--- a/arch/csky/kernel/probes/ftrace.c
+++ b/arch/csky/kernel/probes/ftrace.c
@@ -12,6 +12,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct kprobe_ctlblk *kcb;
struct pt_regs *regs;
+ if (unlikely(kprobe_ftrace_disabled))
+ return;
+
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
return;
diff --git a/arch/csky/kernel/vdso/Makefile b/arch/csky/kernel/vdso/Makefile
index ddf784a62c11..bc2261f5a8d4 100644
--- a/arch/csky/kernel/vdso/Makefile
+++ b/arch/csky/kernel/vdso/Makefile
@@ -23,15 +23,11 @@ obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
obj-y += vdso.o vdso-syms.o
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
-# Disable gcov profiling for VDSO code
-GCOV_PROFILE := n
-KCOV_INSTRUMENT := n
-
# Force dependency
$(obj)/vdso.o: $(obj)/vdso.so
SYSCFLAGS_vdso.so.dbg = $(c_flags)
-$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
+$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold)
SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
-Wl,--build-id=sha1 -Wl,--hash-style=both
@@ -57,4 +53,4 @@ quiet_cmd_vdsold = VDSOLD $@
# Extracts symbol offsets from the VDSO, converting them into an assembly file
# that contains the same symbols at the same offsets.
quiet_cmd_so2s = SO2S $@
- cmd_so2s = $(NM) -D $< | $(srctree)/$(src)/so2s.sh > $@
+ cmd_so2s = $(NM) -D $< | $(src)/so2s.sh > $@
diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S
index 1140051a0c45..1150b77fa281 100644
--- a/arch/hexagon/kernel/vmlinux.lds.S
+++ b/arch/hexagon/kernel/vmlinux.lds.S
@@ -63,6 +63,7 @@ SECTIONS
STABS_DEBUG
DWARF_DEBUG
ELF_DETAILS
+ .hexagon.attributes 0 : { *(.hexagon.attributes) }
DISCARDS
}
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index a5f300ec6f28..a2774ad5e916 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -16,6 +16,7 @@ config LOONGARCH
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_CURRENT_STACK_POINTER
+ select ARCH_HAS_FAST_MULTIPLIER
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_KCOV
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
@@ -56,6 +57,7 @@ config LOONGARCH
select ARCH_SUPPORTS_ACPI
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_HUGETLBFS
+ select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
select ARCH_SUPPORTS_LTO_CLANG
select ARCH_SUPPORTS_LTO_CLANG_THIN
select ARCH_SUPPORTS_NUMA_BALANCING
@@ -63,10 +65,12 @@ config LOONGARCH
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
+ select ARCH_WANT_DEFAULT_BPF_JIT
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
select ARCH_WANTS_NO_INSTR
+ select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE
select BUILDTIME_TABLE_SORT
select COMMON_CLK
select CPU_PM
@@ -119,7 +123,7 @@ config LOONGARCH
select HAVE_EBPF_JIT
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !ARCH_STRICT_ALIGN
select HAVE_EXIT_THREAD
- select HAVE_FAST_GUP
+ select HAVE_GUP_FAST
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_FUNCTION_ERROR_INJECTION
@@ -174,7 +178,6 @@ config LOONGARCH
select PCI_QUIRKS
select PERF_USE_VMALLOC
select RTC_LIB
- select SMP
select SPARSE_IRQ
select SYSCTL_ARCH_UNALIGN_ALLOW
select SYSCTL_ARCH_UNALIGN_NO_WARN
@@ -420,6 +423,7 @@ config EFI_STUB
config SCHED_SMT
bool "SMT scheduler support"
+ depends on SMP
default y
help
Improves scheduler's performance when there are multiple
@@ -595,7 +599,7 @@ config ARCH_SELECTS_CRASH_DUMP
select RELOCATABLE
config ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
- def_bool CRASH_CORE
+ def_bool CRASH_RESERVE
config RELOCATABLE
bool "Relocatable kernel"
@@ -632,6 +636,15 @@ config RANDOMIZE_BASE_MAX_OFFSET
source "kernel/livepatch/Kconfig"
+config PARAVIRT
+ bool "Enable paravirtualization code"
+ depends on AS_HAS_LVZ_EXTENSION
+ help
+ This changes the kernel so it can modify itself when it is run
+ under a hypervisor, potentially improving performance significantly
+ over full virtualization. However, when run without a hypervisor
+ the kernel is theoretically slower and slightly larger.
+
endmenu
config ARCH_SELECT_MEMORY_MODEL
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
index df6caf79537a..4347915721bd 100644
--- a/arch/loongarch/Makefile
+++ b/arch/loongarch/Makefile
@@ -101,7 +101,7 @@ ifdef CONFIG_OBJTOOL
KBUILD_CFLAGS += -fno-jump-tables
endif
-KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
+KBUILD_RUSTFLAGS += --target=loongarch64-unknown-none-softfloat
KBUILD_RUSTFLAGS_MODULE += -Crelocation-model=pic
ifeq ($(CONFIG_RELOCATABLE),y)
diff --git a/arch/loongarch/boot/dts/loongson-2k0500.dtsi b/arch/loongarch/boot/dts/loongson-2k0500.dtsi
index 444779c21034..3b38ff8853a7 100644
--- a/arch/loongarch/boot/dts/loongson-2k0500.dtsi
+++ b/arch/loongarch/boot/dts/loongson-2k0500.dtsi
@@ -6,6 +6,7 @@
/dts-v1/;
#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/clock/loongson,ls2k-clk.h>
/ {
#address-cells = <2>;
@@ -19,14 +20,15 @@
compatible = "loongson,la264";
device_type = "cpu";
reg = <0x0>;
- clocks = <&cpu_clk>;
+ clocks = <&clk LOONGSON2_NODE_CLK>;
};
};
- cpu_clk: cpu-clk {
+ ref_100m: clock-ref-100m {
compatible = "fixed-clock";
#clock-cells = <0>;
- clock-frequency = <500000000>;
+ clock-frequency = <100000000>;
+ clock-output-names = "ref_100m";
};
cpuintc: interrupt-controller {
@@ -35,6 +37,28 @@
interrupt-controller;
};
+ thermal-zones {
+ cpu-thermal {
+ polling-delay-passive = <1000>;
+ polling-delay = <5000>;
+ thermal-sensors = <&tsensor 0>;
+
+ trips {
+ cpu-alert {
+ temperature = <33000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+
+ cpu-crit {
+ temperature = <85000>;
+ hysteresis = <5000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
bus@10000000 {
compatible = "simple-bus";
ranges = <0x0 0x10000000 0x0 0x10000000 0x0 0x10000000>,
@@ -52,6 +76,54 @@
ranges = <1 0x0 0x0 0x16400000 0x4000>;
};
+ clk: clock-controller@1fe10400 {
+ compatible = "loongson,ls2k0500-clk";
+ reg = <0x0 0x1fe10400 0x0 0x2c>;
+ #clock-cells = <1>;
+ clocks = <&ref_100m>;
+ clock-names = "ref_100m";
+ };
+
+ dma-controller@1fe10c00 {
+ compatible = "loongson,ls2k0500-apbdma", "loongson,ls2k1000-apbdma";
+ reg = <0 0x1fe10c00 0 0x8>;
+ interrupt-parent = <&eiointc>;
+ interrupts = <67>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #dma-cells = <1>;
+ status = "disabled";
+ };
+
+ dma-controller@1fe10c10 {
+ compatible = "loongson,ls2k0500-apbdma", "loongson,ls2k1000-apbdma";
+ reg = <0 0x1fe10c10 0 0x8>;
+ interrupt-parent = <&eiointc>;
+ interrupts = <68>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #dma-cells = <1>;
+ status = "disabled";
+ };
+
+ dma-controller@1fe10c20 {
+ compatible = "loongson,ls2k0500-apbdma", "loongson,ls2k1000-apbdma";
+ reg = <0 0x1fe10c20 0 0x8>;
+ interrupt-parent = <&eiointc>;
+ interrupts = <69>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #dma-cells = <1>;
+ status = "disabled";
+ };
+
+ dma-controller@1fe10c30 {
+ compatible = "loongson,ls2k0500-apbdma", "loongson,ls2k1000-apbdma";
+ reg = <0 0x1fe10c30 0 0x8>;
+ interrupt-parent = <&eiointc>;
+ interrupts = <70>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #dma-cells = <1>;
+ status = "disabled";
+ };
+
liointc0: interrupt-controller@1fe11400 {
compatible = "loongson,liointc-2.0";
reg = <0x0 0x1fe11400 0x0 0x40>,
@@ -139,6 +211,14 @@
status = "disabled";
};
+ tsensor: thermal-sensor@1fe11500 {
+ compatible = "loongson,ls2k0500-thermal", "loongson,ls2k1000-thermal";
+ reg = <0x0 0x1fe11500 0x0 0x30>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
+ #thermal-sensor-cells = <1>;
+ };
+
uart0: serial@1ff40800 {
compatible = "ns16550a";
reg = <0x0 0x1ff40800 0x0 0x10>;
diff --git a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
index ed4d32434041..8463fe035386 100644
--- a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
+++ b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
@@ -113,10 +113,6 @@
status = "okay";
};
-&clk {
- status = "okay";
-};
-
&rtc0 {
status = "okay";
};
diff --git a/arch/loongarch/boot/dts/loongson-2k1000.dtsi b/arch/loongarch/boot/dts/loongson-2k1000.dtsi
index 49a70f8c3cab..92180140eb56 100644
--- a/arch/loongarch/boot/dts/loongson-2k1000.dtsi
+++ b/arch/loongarch/boot/dts/loongson-2k1000.dtsi
@@ -100,6 +100,13 @@
#size-cells = <2>;
dma-coherent;
+ isa@18000000 {
+ compatible = "isa";
+ #size-cells = <1>;
+ #address-cells = <2>;
+ ranges = <1 0x0 0x0 0x18000000 0x4000>;
+ };
+
liointc0: interrupt-controller@1fe01400 {
compatible = "loongson,liointc-2.0";
reg = <0x0 0x1fe01400 0x0 0x40>,
@@ -152,7 +159,6 @@
#clock-cells = <1>;
clocks = <&ref_100m>;
clock-names = "ref_100m";
- status = "disabled";
};
gpio0: gpio@1fe00500 {
diff --git a/arch/loongarch/boot/dts/loongson-2k2000-ref.dts b/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
index dca91caf895e..74b99bd234cc 100644
--- a/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
+++ b/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
@@ -61,12 +61,45 @@
&gmac0 {
status = "okay";
+
+ phy-mode = "gmii";
+ phy-handle = <&phy0>;
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy0: ethernet-phy@0 {
+ reg = <2>;
+ };
+ };
};
&gmac1 {
status = "okay";
+
+ phy-mode = "gmii";
+ phy-handle = <&phy1>;
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy1: ethernet-phy@1 {
+ reg = <2>;
+ };
+ };
};
&gmac2 {
status = "okay";
+
+ phy-mode = "rgmii";
+ phy-handle = <&phy2>;
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy2: ethernet-phy@2 {
+ reg = <0>;
+ };
+ };
};
diff --git a/arch/loongarch/boot/dts/loongson-2k2000.dtsi b/arch/loongarch/boot/dts/loongson-2k2000.dtsi
index a231949b5f55..0953c5707825 100644
--- a/arch/loongarch/boot/dts/loongson-2k2000.dtsi
+++ b/arch/loongarch/boot/dts/loongson-2k2000.dtsi
@@ -6,6 +6,7 @@
/dts-v1/;
#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/clock/loongson,ls2k-clk.h>
/ {
#address-cells = <2>;
@@ -19,21 +20,22 @@
compatible = "loongson,la364";
device_type = "cpu";
reg = <0x0>;
- clocks = <&cpu_clk>;
+ clocks = <&clk LOONGSON2_NODE_CLK>;
};
cpu1: cpu@2 {
compatible = "loongson,la364";
device_type = "cpu";
reg = <0x1>;
- clocks = <&cpu_clk>;
+ clocks = <&clk LOONGSON2_NODE_CLK>;
};
};
- cpu_clk: cpu-clk {
+ ref_100m: clock-ref-100m {
compatible = "fixed-clock";
#clock-cells = <0>;
- clock-frequency = <1400000000>;
+ clock-frequency = <100000000>;
+ clock-output-names = "ref_100m";
};
cpuintc: interrupt-controller {
@@ -42,6 +44,28 @@
interrupt-controller;
};
+ thermal-zones {
+ cpu-thermal {
+ polling-delay-passive = <1000>;
+ polling-delay = <5000>;
+ thermal-sensors = <&tsensor 0>;
+
+ trips {
+ cpu-alert {
+ temperature = <40000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+
+ cpu-crit {
+ temperature = <85000>;
+ hysteresis = <5000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
bus@10000000 {
compatible = "simple-bus";
ranges = <0x0 0x10000000 0x0 0x10000000 0x0 0x10000000>,
@@ -51,6 +75,21 @@
#address-cells = <2>;
#size-cells = <2>;
+ isa@18400000 {
+ compatible = "isa";
+ #size-cells = <1>;
+ #address-cells = <2>;
+ ranges = <1 0x0 0x0 0x18400000 0x4000>;
+ };
+
+ clk: clock-controller@10010480 {
+ compatible = "loongson,ls2k2000-clk";
+ reg = <0x0 0x10010480 0x0 0x100>;
+ #clock-cells = <1>;
+ clocks = <&ref_100m>;
+ clock-names = "ref_100m";
+ };
+
pmc: power-management@100d0000 {
compatible = "loongson,ls2k2000-pmc", "loongson,ls2k0500-pmc", "syscon";
reg = <0x0 0x100d0000 0x0 0x58>;
@@ -73,6 +112,15 @@
};
};
+ tsensor: thermal-sensor@1fe01460 {
+ compatible = "loongson,ls2k2000-thermal";
+ reg = <0x0 0x1fe01460 0x0 0x30>,
+ <0x0 0x1fe0019c 0x0 0x4>;
+ interrupt-parent = <&liointc>;
+ interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
+ #thermal-sensor-cells = <1>;
+ };
+
liointc: interrupt-controller@1fe01400 {
compatible = "loongson,liointc-1.0";
reg = <0x0 0x1fe01400 0x0 0x64>;
@@ -109,6 +157,8 @@
msi: msi-controller@1fe01140 {
compatible = "loongson,pch-msi-1.0";
reg = <0x0 0x1fe01140 0x0 0x8>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
msi-controller;
loongson,msi-base-vec = <64>;
loongson,msi-num-vecs = <192>;
@@ -140,27 +190,34 @@
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
+ msi-parent = <&msi>;
bus-range = <0x0 0xff>;
- ranges = <0x01000000 0x0 0x00008000 0x0 0x18400000 0x0 0x00008000>,
+ ranges = <0x01000000 0x0 0x00008000 0x0 0x18408000 0x0 0x00008000>,
<0x02000000 0x0 0x60000000 0x0 0x60000000 0x0 0x20000000>;
gmac0: ethernet@3,0 {
reg = <0x1800 0x0 0x0 0x0 0x0>;
- interrupts = <12 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
+ <13 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_lpi";
interrupt-parent = <&pic>;
status = "disabled";
};
gmac1: ethernet@3,1 {
reg = <0x1900 0x0 0x0 0x0 0x0>;
- interrupts = <14 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <14 IRQ_TYPE_LEVEL_HIGH>,
+ <15 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_lpi";
interrupt-parent = <&pic>;
status = "disabled";
};
gmac2: ethernet@3,2 {
reg = <0x1a00 0x0 0x0 0x0 0x0>;
- interrupts = <17 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <17 IRQ_TYPE_LEVEL_HIGH>,
+ <18 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_lpi";
interrupt-parent = <&pic>;
status = "disabled";
};
diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
index f18c2ba871ef..b4252c357c8e 100644
--- a/arch/loongarch/configs/loongson3_defconfig
+++ b/arch/loongarch/configs/loongson3_defconfig
@@ -14,6 +14,10 @@ CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_IKHEADERS=y
CONFIG_LOG_BUF_SHIFT=18
CONFIG_NUMA_BALANCING=y
CONFIG_MEMCG=y
@@ -76,7 +80,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_BLK_DEV_ZONED=y
CONFIG_BLK_DEV_THROTTLING=y
-CONFIG_BLK_DEV_THROTTLING_LOW=y
CONFIG_BLK_WBT=y
CONFIG_BLK_CGROUP_IOLATENCY=y
CONFIG_BLK_CGROUP_FC_APPID=y
@@ -130,13 +133,22 @@ CONFIG_IP_MROUTE=y
CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
+CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
+CONFIG_INET_ESPINTCP=y
+CONFIG_INET_IPCOMP=m
CONFIG_INET_UDP_DIAG=y
CONFIG_TCP_CONG_ADVANCED=y
CONFIG_TCP_CONG_BBR=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
+CONFIG_INET6_ESPINTCP=y
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_MROUTE=y
CONFIG_MPTCP=y
CONFIG_NETWORK_PHY_TIMESTAMPING=y
@@ -152,6 +164,8 @@ CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_INET=y
+CONFIG_NFT_CT=m
CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -164,6 +178,7 @@ CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
+CONFIG_NFT_FIB_INET=m
CONFIG_NFT_SOCKET=m
CONFIG_NFT_OSF=m
CONFIG_NFT_TPROXY=m
@@ -260,6 +275,7 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_TABLES_IPV6=y
+CONFIG_NFT_FIB_IPV6=m
CONFIG_IP6_NF_IPTABLES=y
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -280,6 +296,7 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -550,6 +567,7 @@ CONFIG_NGBE=y
CONFIG_TXGBE=y
# CONFIG_NET_VENDOR_WIZNET is not set
# CONFIG_NET_VENDOR_XILINX is not set
+CONFIG_MOTORCOMM_PHY=y
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -811,6 +829,7 @@ CONFIG_NTB_SWITCHTEC=m
CONFIG_NTB_PERF=m
CONFIG_NTB_TRANSPORT=m
CONFIG_PWM=y
+CONFIG_GENERIC_PHY=y
CONFIG_USB4=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
@@ -876,10 +895,13 @@ CONFIG_UBIFS_FS=m
CONFIG_UBIFS_FS_ADVANCED_COMPR=y
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_FILE_DIRECT=y
+CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT=y
CONFIG_SQUASHFS_XATTR=y
CONFIG_SQUASHFS_LZ4=y
CONFIG_SQUASHFS_LZO=y
CONFIG_SQUASHFS_XZ=y
+CONFIG_SQUASHFS_ZSTD=y
CONFIG_MINIX_FS=m
CONFIG_ROMFS_FS=m
CONFIG_PSTORE=m
@@ -961,3 +983,4 @@ CONFIG_DEBUG_FS=y
CONFIG_SCHEDSTATS=y
# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_FTRACE is not set
+CONFIG_UNWINDER_ORC=y
diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild
index 2dbec7853ae8..c862672ed953 100644
--- a/arch/loongarch/include/asm/Kbuild
+++ b/arch/loongarch/include/asm/Kbuild
@@ -26,4 +26,3 @@ generic-y += poll.h
generic-y += param.h
generic-y += posix_types.h
generic-y += resource.h
-generic-y += kvm_para.h
diff --git a/arch/loongarch/include/asm/acpi.h b/arch/loongarch/include/asm/acpi.h
index 49e29b29996f..313f66f7913a 100644
--- a/arch/loongarch/include/asm/acpi.h
+++ b/arch/loongarch/include/asm/acpi.h
@@ -8,6 +8,7 @@
#ifndef _ASM_LOONGARCH_ACPI_H
#define _ASM_LOONGARCH_ACPI_H
+#include <asm/smp.h>
#include <asm/suspend.h>
#ifdef CONFIG_ACPI
diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h
index b24437e28c6e..7bd47d65bf7a 100644
--- a/arch/loongarch/include/asm/addrspace.h
+++ b/arch/loongarch/include/asm/addrspace.h
@@ -11,6 +11,7 @@
#define _ASM_ADDRSPACE_H
#include <linux/const.h>
+#include <linux/sizes.h>
#include <asm/loongarch.h>
diff --git a/arch/loongarch/include/asm/asm-prototypes.h b/arch/loongarch/include/asm/asm-prototypes.h
index cf8e1a4e7c19..51f224bcfc65 100644
--- a/arch/loongarch/include/asm/asm-prototypes.h
+++ b/arch/loongarch/include/asm/asm-prototypes.h
@@ -6,3 +6,9 @@
#include <asm/page.h>
#include <asm/ftrace.h>
#include <asm-generic/asm-prototypes.h>
+
+#ifdef CONFIG_ARCH_SUPPORTS_INT128
+__int128_t __ashlti3(__int128_t a, int b);
+__int128_t __ashrti3(__int128_t a, int b);
+__int128_t __lshrti3(__int128_t a, int b);
+#endif
diff --git a/arch/loongarch/include/asm/crash_core.h b/arch/loongarch/include/asm/crash_reserve.h
index 218bdbfa527b..a1d9b84b1c7d 100644
--- a/arch/loongarch/include/asm/crash_core.h
+++ b/arch/loongarch/include/asm/crash_reserve.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef _LOONGARCH_CRASH_CORE_H
-#define _LOONGARCH_CRASH_CORE_H
+#ifndef _LOONGARCH_CRASH_RESERVE_H
+#define _LOONGARCH_CRASH_RESERVE_H
#define CRASH_ALIGN SZ_2M
diff --git a/arch/loongarch/include/asm/hardirq.h b/arch/loongarch/include/asm/hardirq.h
index 0ef3b18f8980..d41138abcf26 100644
--- a/arch/loongarch/include/asm/hardirq.h
+++ b/arch/loongarch/include/asm/hardirq.h
@@ -14,9 +14,15 @@ extern void ack_bad_irq(unsigned int irq);
#define NR_IPI 2
+enum ipi_msg_type {
+ IPI_RESCHEDULE,
+ IPI_CALL_FUNCTION,
+};
+
typedef struct {
unsigned int ipi_irqs[NR_IPI];
unsigned int __softirq_pending;
+ atomic_t message ____cacheline_aligned_in_smp;
} ____cacheline_aligned irq_cpustat_t;
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
index d8f637f9e400..c3993fd88aba 100644
--- a/arch/loongarch/include/asm/inst.h
+++ b/arch/loongarch/include/asm/inst.h
@@ -12,6 +12,7 @@
#define INSN_NOP 0x03400000
#define INSN_BREAK 0x002a0000
+#define INSN_HVCL 0x002b8000
#define ADDR_IMMMASK_LU52ID 0xFFF0000000000000
#define ADDR_IMMMASK_LU32ID 0x000FFFFF00000000
@@ -67,6 +68,7 @@ enum reg2_op {
revhd_op = 0x11,
extwh_op = 0x16,
extwb_op = 0x17,
+ cpucfg_op = 0x1b,
iocsrrdb_op = 0x19200,
iocsrrdh_op = 0x19201,
iocsrrdw_op = 0x19202,
diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h
index 4a8adcca329b..c2f9979b2979 100644
--- a/arch/loongarch/include/asm/io.h
+++ b/arch/loongarch/include/asm/io.h
@@ -14,11 +14,6 @@
#include <asm/pgtable-bits.h>
#include <asm/string.h>
-/*
- * Change "struct page" to physical address.
- */
-#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-
extern void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size);
extern void __init early_iounmap(void __iomem *addr, unsigned long size);
@@ -73,6 +68,21 @@ extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t
#define __io_aw() mmiowb()
+#ifdef CONFIG_KFENCE
+#define virt_to_phys(kaddr) \
+({ \
+ (likely((unsigned long)kaddr < vm_map_base)) ? __pa((unsigned long)kaddr) : \
+ page_to_phys(tlb_virt_to_page((unsigned long)kaddr)) + offset_in_page((unsigned long)kaddr);\
+})
+
+#define phys_to_virt(paddr) \
+({ \
+ extern char *__kfence_pool; \
+ (unlikely(__kfence_pool == NULL)) ? __va((unsigned long)paddr) : \
+ page_address(phys_to_page((unsigned long)paddr)) + offset_in_page((unsigned long)paddr);\
+})
+#endif
+
#include <asm-generic/io.h>
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h
index 218b4da0ea90..480418bc5071 100644
--- a/arch/loongarch/include/asm/irq.h
+++ b/arch/loongarch/include/asm/irq.h
@@ -117,7 +117,16 @@ extern struct fwnode_handle *liointc_handle;
extern struct fwnode_handle *pch_lpc_handle;
extern struct fwnode_handle *pch_pic_handle[MAX_IO_PICS];
-extern irqreturn_t loongson_ipi_interrupt(int irq, void *dev);
+static inline int get_percpu_irq(int vector)
+{
+ struct irq_domain *d;
+
+ d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
+ if (d)
+ return irq_create_mapping(d, vector);
+
+ return -EINVAL;
+}
#include <asm-generic/irq.h>
diff --git a/arch/loongarch/include/asm/kfence.h b/arch/loongarch/include/asm/kfence.h
index 6c82aea1c993..92636e82957c 100644
--- a/arch/loongarch/include/asm/kfence.h
+++ b/arch/loongarch/include/asm/kfence.h
@@ -10,12 +10,14 @@
#define _ASM_LOONGARCH_KFENCE_H
#include <linux/kfence.h>
+#include <linux/vmalloc.h>
#include <asm/pgtable.h>
#include <asm/tlb.h>
static inline bool arch_kfence_init_pool(void)
{
int err;
+ char *kaddr, *vaddr;
char *kfence_pool = __kfence_pool;
struct vm_struct *area;
@@ -35,6 +37,14 @@ static inline bool arch_kfence_init_pool(void)
return false;
}
+ kaddr = kfence_pool;
+ vaddr = __kfence_pool;
+ while (kaddr < kfence_pool + KFENCE_POOL_SIZE) {
+ set_page_address(virt_to_page(kaddr), vaddr);
+ kaddr += PAGE_SIZE;
+ vaddr += PAGE_SIZE;
+ }
+
return true;
}
diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
index 2d62f7b0d377..c87b6ea0ec47 100644
--- a/arch/loongarch/include/asm/kvm_host.h
+++ b/arch/loongarch/include/asm/kvm_host.h
@@ -31,6 +31,11 @@
#define KVM_HALT_POLL_NS_DEFAULT 500000
+#define KVM_GUESTDBG_SW_BP_MASK \
+ (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)
+#define KVM_GUESTDBG_VALID_MASK \
+ (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP)
+
struct kvm_vm_stat {
struct kvm_vm_stat_generic generic;
u64 pages;
@@ -43,6 +48,7 @@ struct kvm_vcpu_stat {
u64 idle_exits;
u64 cpucfg_exits;
u64 signal_exits;
+ u64 hypercall_exits;
};
#define KVM_MEM_HUGEPAGE_CAPABLE (1UL << 0)
@@ -64,6 +70,31 @@ struct kvm_world_switch {
#define MAX_PGTABLE_LEVELS 4
+/*
+ * Physical CPUID is used for interrupt routing, there are different
+ * definitions about physical cpuid on different hardwares.
+ *
+ * For LOONGARCH_CSR_CPUID register, max CPUID size if 512
+ * For IPI hardware, max destination CPUID size 1024
+ * For extioi interrupt controller, max destination CPUID size is 256
+ * For msgint interrupt controller, max supported CPUID size is 65536
+ *
+ * Currently max CPUID is defined as 256 for KVM hypervisor, in future
+ * it will be expanded to 4096, including 16 packages at most. And every
+ * package supports at most 256 vcpus
+ */
+#define KVM_MAX_PHYID 256
+
+struct kvm_phyid_info {
+ struct kvm_vcpu *vcpu;
+ bool enabled;
+};
+
+struct kvm_phyid_map {
+ int max_phyid;
+ struct kvm_phyid_info phys_map[KVM_MAX_PHYID];
+};
+
struct kvm_arch {
/* Guest physical mm */
kvm_pte_t *pgd;
@@ -71,6 +102,8 @@ struct kvm_arch {
unsigned long invalid_ptes[MAX_PGTABLE_LEVELS];
unsigned int pte_shifts[MAX_PGTABLE_LEVELS];
unsigned int root_level;
+ spinlock_t phyid_map_lock;
+ struct kvm_phyid_map *phyid_map;
s64 time_offset;
struct kvm_context __percpu *vmcs;
@@ -203,7 +236,6 @@ void kvm_flush_tlb_all(void);
void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa);
int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h
new file mode 100644
index 000000000000..4ba2312e5f8c
--- /dev/null
+++ b/arch/loongarch/include/asm/kvm_para.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_LOONGARCH_KVM_PARA_H
+#define _ASM_LOONGARCH_KVM_PARA_H
+
+/*
+ * Hypercall code field
+ */
+#define HYPERVISOR_KVM 1
+#define HYPERVISOR_VENDOR_SHIFT 8
+#define HYPERCALL_ENCODE(vendor, code) ((vendor << HYPERVISOR_VENDOR_SHIFT) + code)
+
+#define KVM_HCALL_CODE_SERVICE 0
+#define KVM_HCALL_CODE_SWDBG 1
+
+#define KVM_HCALL_SERVICE HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SERVICE)
+#define KVM_HCALL_FUNC_IPI 1
+
+#define KVM_HCALL_SWDBG HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SWDBG)
+
+/*
+ * LoongArch hypercall return code
+ */
+#define KVM_HCALL_SUCCESS 0
+#define KVM_HCALL_INVALID_CODE -1UL
+#define KVM_HCALL_INVALID_PARAMETER -2UL
+
+/*
+ * Hypercall interface for KVM hypervisor
+ *
+ * a0: function identifier
+ * a1-a6: args
+ * Return value will be placed in a0.
+ * Up to 6 arguments are passed in a1, a2, a3, a4, a5, a6.
+ */
+static __always_inline long kvm_hypercall0(u64 fid)
+{
+ register long ret asm("a0");
+ register unsigned long fun asm("a0") = fid;
+
+ __asm__ __volatile__(
+ "hvcl "__stringify(KVM_HCALL_SERVICE)
+ : "=r" (ret)
+ : "r" (fun)
+ : "memory"
+ );
+
+ return ret;
+}
+
+static __always_inline long kvm_hypercall1(u64 fid, unsigned long arg0)
+{
+ register long ret asm("a0");
+ register unsigned long fun asm("a0") = fid;
+ register unsigned long a1 asm("a1") = arg0;
+
+ __asm__ __volatile__(
+ "hvcl "__stringify(KVM_HCALL_SERVICE)
+ : "=r" (ret)
+ : "r" (fun), "r" (a1)
+ : "memory"
+ );
+
+ return ret;
+}
+
+static __always_inline long kvm_hypercall2(u64 fid,
+ unsigned long arg0, unsigned long arg1)
+{
+ register long ret asm("a0");
+ register unsigned long fun asm("a0") = fid;
+ register unsigned long a1 asm("a1") = arg0;
+ register unsigned long a2 asm("a2") = arg1;
+
+ __asm__ __volatile__(
+ "hvcl "__stringify(KVM_HCALL_SERVICE)
+ : "=r" (ret)
+ : "r" (fun), "r" (a1), "r" (a2)
+ : "memory"
+ );
+
+ return ret;
+}
+
+static __always_inline long kvm_hypercall3(u64 fid,
+ unsigned long arg0, unsigned long arg1, unsigned long arg2)
+{
+ register long ret asm("a0");
+ register unsigned long fun asm("a0") = fid;
+ register unsigned long a1 asm("a1") = arg0;
+ register unsigned long a2 asm("a2") = arg1;
+ register unsigned long a3 asm("a3") = arg2;
+
+ __asm__ __volatile__(
+ "hvcl "__stringify(KVM_HCALL_SERVICE)
+ : "=r" (ret)
+ : "r" (fun), "r" (a1), "r" (a2), "r" (a3)
+ : "memory"
+ );
+
+ return ret;
+}
+
+static __always_inline long kvm_hypercall4(u64 fid,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3)
+{
+ register long ret asm("a0");
+ register unsigned long fun asm("a0") = fid;
+ register unsigned long a1 asm("a1") = arg0;
+ register unsigned long a2 asm("a2") = arg1;
+ register unsigned long a3 asm("a3") = arg2;
+ register unsigned long a4 asm("a4") = arg3;
+
+ __asm__ __volatile__(
+ "hvcl "__stringify(KVM_HCALL_SERVICE)
+ : "=r" (ret)
+ : "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4)
+ : "memory"
+ );
+
+ return ret;
+}
+
+static __always_inline long kvm_hypercall5(u64 fid,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3, unsigned long arg4)
+{
+ register long ret asm("a0");
+ register unsigned long fun asm("a0") = fid;
+ register unsigned long a1 asm("a1") = arg0;
+ register unsigned long a2 asm("a2") = arg1;
+ register unsigned long a3 asm("a3") = arg2;
+ register unsigned long a4 asm("a4") = arg3;
+ register unsigned long a5 asm("a5") = arg4;
+
+ __asm__ __volatile__(
+ "hvcl "__stringify(KVM_HCALL_SERVICE)
+ : "=r" (ret)
+ : "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a5)
+ : "memory"
+ );
+
+ return ret;
+}
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+ return 0;
+}
+
+static inline unsigned int kvm_arch_para_hints(void)
+{
+ return 0;
+}
+
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
+
+#endif /* _ASM_LOONGARCH_KVM_PARA_H */
diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
index 0cb4fdb8a9b5..590a92cb5416 100644
--- a/arch/loongarch/include/asm/kvm_vcpu.h
+++ b/arch/loongarch/include/asm/kvm_vcpu.h
@@ -81,6 +81,7 @@ void kvm_save_timer(struct kvm_vcpu *vcpu);
void kvm_restore_timer(struct kvm_vcpu *vcpu);
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
+struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid);
/*
* Loongarch KVM guest interrupt handling
@@ -109,4 +110,14 @@ static inline int kvm_queue_exception(struct kvm_vcpu *vcpu,
return -1;
}
+static inline unsigned long kvm_read_reg(struct kvm_vcpu *vcpu, int num)
+{
+ return vcpu->arch.gprs[num];
+}
+
+static inline void kvm_write_reg(struct kvm_vcpu *vcpu, int num, unsigned long val)
+{
+ vcpu->arch.gprs[num] = val;
+}
+
#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */
diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
index 46366e783c84..eb09adda54b7 100644
--- a/arch/loongarch/include/asm/loongarch.h
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -158,6 +158,18 @@
#define CPUCFG48_VFPU_CG BIT(2)
#define CPUCFG48_RAM_CG BIT(3)
+/*
+ * CPUCFG index area: 0x40000000 -- 0x400000ff
+ * SW emulation for KVM hypervirsor
+ */
+#define CPUCFG_KVM_BASE 0x40000000
+#define CPUCFG_KVM_SIZE 0x100
+
+#define CPUCFG_KVM_SIG (CPUCFG_KVM_BASE + 0)
+#define KVM_SIGNATURE "KVM\0"
+#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4)
+#define KVM_FEATURE_IPI BIT(1)
+
#ifndef __ASSEMBLY__
/* CSR */
diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm/page.h
index 44027060c54a..e85df33f11c7 100644
--- a/arch/loongarch/include/asm/page.h
+++ b/arch/loongarch/include/asm/page.h
@@ -78,7 +78,26 @@ typedef struct { unsigned long pgprot; } pgprot_t;
struct page *dmw_virt_to_page(unsigned long kaddr);
struct page *tlb_virt_to_page(unsigned long kaddr);
-#define virt_to_pfn(kaddr) PFN_DOWN(PHYSADDR(kaddr))
+#define pfn_to_phys(pfn) __pfn_to_phys(pfn)
+#define phys_to_pfn(paddr) __phys_to_pfn(paddr)
+
+#define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
+#define phys_to_page(paddr) pfn_to_page(phys_to_pfn(paddr))
+
+#ifndef CONFIG_KFENCE
+
+#define page_to_virt(page) __va(page_to_phys(page))
+#define virt_to_page(kaddr) phys_to_page(__pa(kaddr))
+
+#else
+
+#define WANT_PAGE_VIRTUAL
+
+#define page_to_virt(page) \
+({ \
+ extern char *__kfence_pool; \
+ (__kfence_pool == NULL) ? __va(page_to_phys(page)) : page_address(page); \
+})
#define virt_to_page(kaddr) \
({ \
@@ -86,6 +105,11 @@ struct page *tlb_virt_to_page(unsigned long kaddr);
dmw_virt_to_page((unsigned long)kaddr) : tlb_virt_to_page((unsigned long)kaddr);\
})
+#endif
+
+#define pfn_to_virt(pfn) page_to_virt(pfn_to_page(pfn))
+#define virt_to_pfn(kaddr) page_to_pfn(virt_to_page(kaddr))
+
extern int __virt_addr_valid(volatile void *kaddr);
#define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr))
diff --git a/arch/loongarch/include/asm/paravirt.h b/arch/loongarch/include/asm/paravirt.h
new file mode 100644
index 000000000000..0965710f47f2
--- /dev/null
+++ b/arch/loongarch/include/asm/paravirt.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_LOONGARCH_PARAVIRT_H
+#define _ASM_LOONGARCH_PARAVIRT_H
+
+#ifdef CONFIG_PARAVIRT
+
+#include <linux/static_call_types.h>
+struct static_key;
+extern struct static_key paravirt_steal_enabled;
+extern struct static_key paravirt_steal_rq_enabled;
+
+u64 dummy_steal_clock(int cpu);
+DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
+
+static inline u64 paravirt_steal_clock(int cpu)
+{
+ return static_call(pv_steal_clock)(cpu);
+}
+
+int __init pv_ipi_init(void);
+
+#else
+
+static inline int pv_ipi_init(void)
+{
+ return 0;
+}
+
+#endif // CONFIG_PARAVIRT
+#endif
diff --git a/arch/loongarch/include/asm/paravirt_api_clock.h b/arch/loongarch/include/asm/paravirt_api_clock.h
new file mode 100644
index 000000000000..65ac7cee0dad
--- /dev/null
+++ b/arch/loongarch/include/asm/paravirt_api_clock.h
@@ -0,0 +1 @@
+#include <asm/paravirt.h>
diff --git a/arch/loongarch/include/asm/perf_event.h b/arch/loongarch/include/asm/perf_event.h
index 2a35a0bc2aaa..f948a0676daf 100644
--- a/arch/loongarch/include/asm/perf_event.h
+++ b/arch/loongarch/include/asm/perf_event.h
@@ -7,6 +7,13 @@
#ifndef __LOONGARCH_PERF_EVENT_H__
#define __LOONGARCH_PERF_EVENT_H__
+#include <asm/ptrace.h>
+
#define perf_arch_bpf_user_pt_regs(regs) (struct user_pt_regs *)regs
+#define perf_arch_fetch_caller_regs(regs, __ip) { \
+ (regs)->csr_era = (__ip); \
+ (regs)->regs[3] = (unsigned long) __builtin_frame_address(0); \
+}
+
#endif /* __LOONGARCH_PERF_EVENT_H__ */
diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h
index f81e5f01d619..278700cfee88 100644
--- a/arch/loongarch/include/asm/smp.h
+++ b/arch/loongarch/include/asm/smp.h
@@ -6,12 +6,21 @@
#ifndef __ASM_SMP_H
#define __ASM_SMP_H
+#ifdef CONFIG_SMP
+
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/linkage.h>
#include <linux/threads.h>
#include <linux/cpumask.h>
+struct smp_ops {
+ void (*init_ipi)(void);
+ void (*send_ipi_single)(int cpu, unsigned int action);
+ void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action);
+};
+extern struct smp_ops mp_ops;
+
extern int smp_num_siblings;
extern int num_processors;
extern int disabled_cpus;
@@ -24,8 +33,6 @@ void loongson_prepare_cpus(unsigned int max_cpus);
void loongson_boot_secondary(int cpu, struct task_struct *idle);
void loongson_init_secondary(void);
void loongson_smp_finish(void);
-void loongson_send_ipi_single(int cpu, unsigned int action);
-void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action);
#ifdef CONFIG_HOTPLUG_CPU
int loongson_cpu_disable(void);
void loongson_cpu_die(unsigned int cpu);
@@ -59,9 +66,12 @@ extern int __cpu_logical_map[NR_CPUS];
#define cpu_physical_id(cpu) cpu_logical_map(cpu)
-#define SMP_BOOT_CPU 0x1
-#define SMP_RESCHEDULE 0x2
-#define SMP_CALL_FUNCTION 0x4
+#define ACTION_BOOT_CPU 0
+#define ACTION_RESCHEDULE 1
+#define ACTION_CALL_FUNCTION 2
+#define SMP_BOOT_CPU BIT(ACTION_BOOT_CPU)
+#define SMP_RESCHEDULE BIT(ACTION_RESCHEDULE)
+#define SMP_CALL_FUNCTION BIT(ACTION_CALL_FUNCTION)
struct secondary_data {
unsigned long stack;
@@ -81,12 +91,12 @@ extern void show_ipi_list(struct seq_file *p, int prec);
static inline void arch_send_call_function_single_ipi(int cpu)
{
- loongson_send_ipi_single(cpu, SMP_CALL_FUNCTION);
+ mp_ops.send_ipi_single(cpu, ACTION_CALL_FUNCTION);
}
static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
- loongson_send_ipi_mask(mask, SMP_CALL_FUNCTION);
+ mp_ops.send_ipi_mask(mask, ACTION_CALL_FUNCTION);
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -101,4 +111,8 @@ static inline void __cpu_die(unsigned int cpu)
}
#endif
+#else /* !CONFIG_SMP */
+#define cpu_logical_map(cpu) 0
+#endif /* CONFIG_SMP */
+
#endif /* __ASM_SMP_H */
diff --git a/arch/loongarch/include/asm/tlb.h b/arch/loongarch/include/asm/tlb.h
index da7a3b5b9374..e071f5e9e858 100644
--- a/arch/loongarch/include/asm/tlb.h
+++ b/arch/loongarch/include/asm/tlb.h
@@ -132,8 +132,6 @@ static __always_inline void invtlb_all(u32 op, u32 info, u64 addr)
);
}
-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
-
static void tlb_flush(struct mmu_gather *tlb);
#define tlb_flush tlb_flush
diff --git a/arch/loongarch/include/asm/fb.h b/arch/loongarch/include/asm/video.h
index 0b218b10a9ec..9f76845f2d4f 100644
--- a/arch/loongarch/include/asm/fb.h
+++ b/arch/loongarch/include/asm/video.h
@@ -2,8 +2,8 @@
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
-#ifndef _ASM_FB_H_
-#define _ASM_FB_H_
+#ifndef _ASM_VIDEO_H_
+#define _ASM_VIDEO_H_
#include <linux/compiler.h>
#include <linux/string.h>
@@ -26,6 +26,6 @@ static inline void fb_memset_io(volatile void __iomem *addr, int c, size_t n)
}
#define fb_memset fb_memset_io
-#include <asm-generic/fb.h>
+#include <asm-generic/video.h>
-#endif /* _ASM_FB_H_ */
+#endif /* _ASM_VIDEO_H_ */
diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h
index 109785922cf9..f9abef382317 100644
--- a/arch/loongarch/include/uapi/asm/kvm.h
+++ b/arch/loongarch/include/uapi/asm/kvm.h
@@ -17,6 +17,8 @@
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_DIRTY_LOG_PAGE_OFFSET 64
+#define KVM_GUESTDBG_USE_SW_BP 0x00010000
+
/*
* for KVM_GET_REGS and KVM_SET_REGS
*/
@@ -72,6 +74,8 @@ struct kvm_fpu {
#define KVM_REG_LOONGARCH_COUNTER (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 1)
#define KVM_REG_LOONGARCH_VCPU_RESET (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 2)
+/* Debugging: Special instruction for software breakpoint */
+#define KVM_REG_LOONGARCH_DEBUG_INST (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3)
#define LOONGARCH_REG_SHIFT 3
#define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT))
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
index 3a7620b66bc6..c9bfeda89e40 100644
--- a/arch/loongarch/kernel/Makefile
+++ b/arch/loongarch/kernel/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_MODULES) += module.o module-sections.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_PROC_FS) += proc.o
+obj-$(CONFIG_PARAVIRT) += paravirt.o
obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/loongarch/kernel/dma.c b/arch/loongarch/kernel/dma.c
index 7a9c6a9dd2d0..429555fb4e13 100644
--- a/arch/loongarch/kernel/dma.c
+++ b/arch/loongarch/kernel/dma.c
@@ -8,17 +8,12 @@
void acpi_arch_dma_setup(struct device *dev)
{
int ret;
- u64 mask, end = 0;
+ u64 mask, end;
const struct bus_dma_region *map = NULL;
ret = acpi_dma_get_range(dev, &map);
if (!ret && map) {
- const struct bus_dma_region *r = map;
-
- for (end = 0; r->size; r++) {
- if (r->dma_start + r->size - 1 > end)
- end = r->dma_start + r->size - 1;
- }
+ end = dma_range_map_max(map);
mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->bus_dma_limit = end;
diff --git a/arch/loongarch/kernel/ftrace_dyn.c b/arch/loongarch/kernel/ftrace_dyn.c
index 73858c9029cc..bff058317062 100644
--- a/arch/loongarch/kernel/ftrace_dyn.c
+++ b/arch/loongarch/kernel/ftrace_dyn.c
@@ -287,6 +287,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct kprobe *p;
struct kprobe_ctlblk *kcb;
+ if (unlikely(kprobe_ftrace_disabled))
+ return;
+
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
return;
diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c
index 883e5066ae44..f4991c03514f 100644
--- a/arch/loongarch/kernel/irq.c
+++ b/arch/loongarch/kernel/irq.c
@@ -87,23 +87,9 @@ static void __init init_vec_parent_group(void)
acpi_table_parse(ACPI_SIG_MCFG, early_pci_mcfg_parse);
}
-static int __init get_ipi_irq(void)
-{
- struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
-
- if (d)
- return irq_create_mapping(d, INT_IPI);
-
- return -EINVAL;
-}
-
void __init init_IRQ(void)
{
int i;
-#ifdef CONFIG_SMP
- int r, ipi_irq;
- static int ipi_dummy_dev;
-#endif
unsigned int order = get_order(IRQ_STACK_SIZE);
struct page *page;
@@ -113,13 +99,7 @@ void __init init_IRQ(void)
init_vec_parent_group();
irqchip_init();
#ifdef CONFIG_SMP
- ipi_irq = get_ipi_irq();
- if (ipi_irq < 0)
- panic("IPI IRQ mapping failed\n");
- irq_set_percpu_devid(ipi_irq);
- r = request_percpu_irq(ipi_irq, loongson_ipi_interrupt, "IPI", &ipi_dummy_dev);
- if (r < 0)
- panic("IPI IRQ request failed\n");
+ mp_ops.init_ipi();
#endif
for (i = 0; i < NR_IRQS; i++)
@@ -133,5 +113,5 @@ void __init init_IRQ(void)
per_cpu(irq_stack, i), per_cpu(irq_stack, i) + IRQ_STACK_SIZE);
}
- set_csr_ecfg(ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | ECFGF_IPI | ECFGF_PMC);
+ set_csr_ecfg(ECFGF_SIP0 | ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | ECFGF_IPI | ECFGF_PMC);
}
diff --git a/arch/loongarch/kernel/machine_kexec.c b/arch/loongarch/kernel/machine_kexec.c
index 2dcb9e003657..8ae641dc53bb 100644
--- a/arch/loongarch/kernel/machine_kexec.c
+++ b/arch/loongarch/kernel/machine_kexec.c
@@ -225,6 +225,7 @@ void crash_smp_send_stop(void)
void machine_shutdown(void)
{
+#ifdef CONFIG_SMP
int cpu;
/* All CPUs go to reboot_code_buffer */
@@ -232,7 +233,6 @@ void machine_shutdown(void)
if (!cpu_online(cpu))
cpu_device_up(get_cpu_device(cpu));
-#ifdef CONFIG_SMP
smp_call_function(kexec_shutdown_secondary, NULL, 0);
#endif
}
diff --git a/arch/loongarch/kernel/module.c b/arch/loongarch/kernel/module.c
index c7d0338d12c1..36d6d9eeb7c7 100644
--- a/arch/loongarch/kernel/module.c
+++ b/arch/loongarch/kernel/module.c
@@ -490,12 +490,6 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
return 0;
}
-void *module_alloc(unsigned long size)
-{
- return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0));
-}
-
static void module_init_ftrace_plt(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, struct module *mod)
{
diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c
new file mode 100644
index 000000000000..1633ed4f692f
--- /dev/null
+++ b/arch/loongarch/kernel/paravirt.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/jump_label.h>
+#include <linux/kvm_para.h>
+#include <linux/static_call.h>
+#include <asm/paravirt.h>
+
+struct static_key paravirt_steal_enabled;
+struct static_key paravirt_steal_rq_enabled;
+
+static u64 native_steal_clock(int cpu)
+{
+ return 0;
+}
+
+DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
+
+#ifdef CONFIG_SMP
+static void pv_send_ipi_single(int cpu, unsigned int action)
+{
+ int min, old;
+ irq_cpustat_t *info = &per_cpu(irq_stat, cpu);
+
+ old = atomic_fetch_or(BIT(action), &info->message);
+ if (old)
+ return;
+
+ min = cpu_logical_map(cpu);
+ kvm_hypercall3(KVM_HCALL_FUNC_IPI, 1, 0, min);
+}
+
+#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
+
+static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action)
+{
+ int i, cpu, min = 0, max = 0, old;
+ __uint128_t bitmap = 0;
+ irq_cpustat_t *info;
+
+ if (cpumask_empty(mask))
+ return;
+
+ action = BIT(action);
+ for_each_cpu(i, mask) {
+ info = &per_cpu(irq_stat, i);
+ old = atomic_fetch_or(action, &info->message);
+ if (old)
+ continue;
+
+ cpu = cpu_logical_map(i);
+ if (!bitmap) {
+ min = max = cpu;
+ } else if (cpu < min && cpu > (max - KVM_IPI_CLUSTER_SIZE)) {
+ /* cpu < min, and bitmap still enough */
+ bitmap <<= min - cpu;
+ min = cpu;
+ } else if (cpu > min && cpu < (min + KVM_IPI_CLUSTER_SIZE)) {
+ /* cpu > min, and bitmap still enough */
+ max = cpu > max ? cpu : max;
+ } else {
+ /*
+ * With cpu, bitmap will exceed KVM_IPI_CLUSTER_SIZE,
+ * send IPI here directly and skip the remaining CPUs.
+ */
+ kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap,
+ (unsigned long)(bitmap >> BITS_PER_LONG), min);
+ min = max = cpu;
+ bitmap = 0;
+ }
+ __set_bit(cpu - min, (unsigned long *)&bitmap);
+ }
+
+ if (bitmap)
+ kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap,
+ (unsigned long)(bitmap >> BITS_PER_LONG), min);
+}
+
+static irqreturn_t pv_ipi_interrupt(int irq, void *dev)
+{
+ u32 action;
+ irq_cpustat_t *info;
+
+ /* Clear SWI interrupt */
+ clear_csr_estat(1 << INT_SWI0);
+ info = this_cpu_ptr(&irq_stat);
+ action = atomic_xchg(&info->message, 0);
+
+ if (action & SMP_RESCHEDULE) {
+ scheduler_ipi();
+ info->ipi_irqs[IPI_RESCHEDULE]++;
+ }
+
+ if (action & SMP_CALL_FUNCTION) {
+ generic_smp_call_function_interrupt();
+ info->ipi_irqs[IPI_CALL_FUNCTION]++;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void pv_init_ipi(void)
+{
+ int r, swi;
+
+ swi = get_percpu_irq(INT_SWI0);
+ if (swi < 0)
+ panic("SWI0 IRQ mapping failed\n");
+ irq_set_percpu_devid(swi);
+ r = request_percpu_irq(swi, pv_ipi_interrupt, "SWI0-IPI", &irq_stat);
+ if (r < 0)
+ panic("SWI0 IRQ request failed\n");
+}
+#endif
+
+static bool kvm_para_available(void)
+{
+ int config;
+ static int hypervisor_type;
+
+ if (!hypervisor_type) {
+ config = read_cpucfg(CPUCFG_KVM_SIG);
+ if (!memcmp(&config, KVM_SIGNATURE, 4))
+ hypervisor_type = HYPERVISOR_KVM;
+ }
+
+ return hypervisor_type == HYPERVISOR_KVM;
+}
+
+int __init pv_ipi_init(void)
+{
+ int feature;
+
+ if (!cpu_has_hypervisor)
+ return 0;
+ if (!kvm_para_available())
+ return 0;
+
+ feature = read_cpucfg(CPUCFG_KVM_FEATURE);
+ if (!(feature & KVM_FEATURE_IPI))
+ return 0;
+
+#ifdef CONFIG_SMP
+ mp_ops.init_ipi = pv_init_ipi;
+ mp_ops.send_ipi_single = pv_send_ipi_single;
+ mp_ops.send_ipi_mask = pv_send_ipi_mask;
+#endif
+
+ return 0;
+}
diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c
index 0491bf453cd4..f86a4b838dd7 100644
--- a/arch/loongarch/kernel/perf_event.c
+++ b/arch/loongarch/kernel/perf_event.c
@@ -456,16 +456,6 @@ static void loongarch_pmu_disable(struct pmu *pmu)
static DEFINE_MUTEX(pmu_reserve_mutex);
static atomic_t active_events = ATOMIC_INIT(0);
-static int get_pmc_irq(void)
-{
- struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
-
- if (d)
- return irq_create_mapping(d, INT_PCOV);
-
- return -EINVAL;
-}
-
static void reset_counters(void *arg);
static int __hw_perf_event_init(struct perf_event *event);
@@ -473,7 +463,7 @@ static void hw_perf_event_destroy(struct perf_event *event)
{
if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) {
on_each_cpu(reset_counters, NULL, 1);
- free_irq(get_pmc_irq(), &loongarch_pmu);
+ free_irq(get_percpu_irq(INT_PCOV), &loongarch_pmu);
mutex_unlock(&pmu_reserve_mutex);
}
}
@@ -562,7 +552,7 @@ static int loongarch_pmu_event_init(struct perf_event *event)
if (event->cpu >= 0 && !cpu_online(event->cpu))
return -ENODEV;
- irq = get_pmc_irq();
+ irq = get_percpu_irq(INT_PCOV);
flags = IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_NO_SUSPEND | IRQF_SHARED;
if (!atomic_inc_not_zero(&active_events)) {
mutex_lock(&pmu_reserve_mutex);
@@ -884,4 +874,4 @@ static int __init init_hw_perf_events(void)
return 0;
}
-early_initcall(init_hw_perf_events);
+pure_initcall(init_hw_perf_events);
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
index aabee0b280fe..0dfe2388ef41 100644
--- a/arch/loongarch/kernel/smp.c
+++ b/arch/loongarch/kernel/smp.c
@@ -29,6 +29,7 @@
#include <asm/loongson.h>
#include <asm/mmu_context.h>
#include <asm/numa.h>
+#include <asm/paravirt.h>
#include <asm/processor.h>
#include <asm/setup.h>
#include <asm/time.h>
@@ -66,11 +67,6 @@ static cpumask_t cpu_core_setup_map;
struct secondary_data cpuboot_data;
static DEFINE_PER_CPU(int, cpu_state);
-enum ipi_msg_type {
- IPI_RESCHEDULE,
- IPI_CALL_FUNCTION,
-};
-
static const char *ipi_types[NR_IPI] __tracepoint_string = {
[IPI_RESCHEDULE] = "Rescheduling interrupts",
[IPI_CALL_FUNCTION] = "Function call interrupts",
@@ -190,24 +186,19 @@ static u32 ipi_read_clear(int cpu)
static void ipi_write_action(int cpu, u32 action)
{
- unsigned int irq = 0;
-
- while ((irq = ffs(action))) {
- uint32_t val = IOCSR_IPI_SEND_BLOCKING;
+ uint32_t val;
- val |= (irq - 1);
- val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT);
- iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND);
- action &= ~BIT(irq - 1);
- }
+ val = IOCSR_IPI_SEND_BLOCKING | action;
+ val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT);
+ iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND);
}
-void loongson_send_ipi_single(int cpu, unsigned int action)
+static void loongson_send_ipi_single(int cpu, unsigned int action)
{
ipi_write_action(cpu_logical_map(cpu), (u32)action);
}
-void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action)
+static void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
@@ -222,11 +213,11 @@ void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action)
*/
void arch_smp_send_reschedule(int cpu)
{
- loongson_send_ipi_single(cpu, SMP_RESCHEDULE);
+ mp_ops.send_ipi_single(cpu, ACTION_RESCHEDULE);
}
EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
-irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
+static irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
{
unsigned int action;
unsigned int cpu = smp_processor_id();
@@ -246,6 +237,26 @@ irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
return IRQ_HANDLED;
}
+static void loongson_init_ipi(void)
+{
+ int r, ipi_irq;
+
+ ipi_irq = get_percpu_irq(INT_IPI);
+ if (ipi_irq < 0)
+ panic("IPI IRQ mapping failed\n");
+
+ irq_set_percpu_devid(ipi_irq);
+ r = request_percpu_irq(ipi_irq, loongson_ipi_interrupt, "IPI", &irq_stat);
+ if (r < 0)
+ panic("IPI IRQ request failed\n");
+}
+
+struct smp_ops mp_ops = {
+ .init_ipi = loongson_init_ipi,
+ .send_ipi_single = loongson_send_ipi_single,
+ .send_ipi_mask = loongson_send_ipi_mask,
+};
+
static void __init fdt_smp_setup(void)
{
#ifdef CONFIG_OF
@@ -289,6 +300,7 @@ void __init loongson_smp_setup(void)
cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package;
cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
+ pv_ipi_init();
iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
pr_info("Detected %i available CPU(s)\n", loongson_sysconf.nr_cpus);
}
@@ -323,7 +335,7 @@ void loongson_boot_secondary(int cpu, struct task_struct *idle)
csr_mail_send(entry, cpu_logical_map(cpu), 0);
- loongson_send_ipi_single(cpu, SMP_BOOT_CPU);
+ loongson_send_ipi_single(cpu, ACTION_BOOT_CPU);
}
/*
@@ -333,7 +345,7 @@ void loongson_init_secondary(void)
{
unsigned int cpu = smp_processor_id();
unsigned int imask = ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 |
- ECFGF_IPI | ECFGF_PMC | ECFGF_TIMER;
+ ECFGF_IPI | ECFGF_PMC | ECFGF_TIMER | ECFGF_SIP0;
change_csr_ecfg(ECFG0_IM, imask);
diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
index e7015f7b70e3..fd5354f9be7c 100644
--- a/arch/loongarch/kernel/time.c
+++ b/arch/loongarch/kernel/time.c
@@ -123,16 +123,6 @@ void sync_counter(void)
csr_write64(init_offset, LOONGARCH_CSR_CNTC);
}
-static int get_timer_irq(void)
-{
- struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
-
- if (d)
- return irq_create_mapping(d, INT_TI);
-
- return -EINVAL;
-}
-
int constant_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
@@ -142,7 +132,7 @@ int constant_clockevent_init(void)
static int irq = 0, timer_irq_installed = 0;
if (!timer_irq_installed) {
- irq = get_timer_irq();
+ irq = get_percpu_irq(INT_TI);
if (irq < 0)
pr_err("Failed to map irq %d (timer)\n", irq);
}
diff --git a/arch/loongarch/kvm/Makefile b/arch/loongarch/kvm/Makefile
index 244467d7792a..b2f4cbe01ae8 100644
--- a/arch/loongarch/kvm/Makefile
+++ b/arch/loongarch/kvm/Makefile
@@ -3,7 +3,7 @@
# Makefile for LoongArch KVM support
#
-ccflags-y += -I $(srctree)/$(src)
+ccflags-y += -I $(src)
include $(srctree)/virt/kvm/Makefile.kvm
diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
index ed1d89d53e2e..c86e099af5ca 100644
--- a/arch/loongarch/kvm/exit.c
+++ b/arch/loongarch/kvm/exit.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/preempt.h>
#include <linux/vmalloc.h>
+#include <trace/events/kvm.h>
#include <asm/fpu.h>
#include <asm/inst.h>
#include <asm/loongarch.h>
@@ -20,6 +21,46 @@
#include <asm/kvm_vcpu.h>
#include "trace.h"
+static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
+{
+ int rd, rj;
+ unsigned int index;
+
+ if (inst.reg2_format.opcode != cpucfg_op)
+ return EMULATE_FAIL;
+
+ rd = inst.reg2_format.rd;
+ rj = inst.reg2_format.rj;
+ ++vcpu->stat.cpucfg_exits;
+ index = vcpu->arch.gprs[rj];
+
+ /*
+ * By LoongArch Reference Manual 2.2.10.5
+ * Return value is 0 for undefined CPUCFG index
+ *
+ * Disable preemption since hw gcsr is accessed
+ */
+ preempt_disable();
+ switch (index) {
+ case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
+ vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index];
+ break;
+ case CPUCFG_KVM_SIG:
+ /* CPUCFG emulation between 0x40000000 -- 0x400000ff */
+ vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE;
+ break;
+ case CPUCFG_KVM_FEATURE:
+ vcpu->arch.gprs[rd] = KVM_FEATURE_IPI;
+ break;
+ default:
+ vcpu->arch.gprs[rd] = 0;
+ break;
+ }
+ preempt_enable();
+
+ return EMULATE_DONE;
+}
+
static unsigned long kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid)
{
unsigned long val = 0;
@@ -208,8 +249,6 @@ int kvm_emu_idle(struct kvm_vcpu *vcpu)
static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
{
- int rd, rj;
- unsigned int index;
unsigned long curr_pc;
larch_inst inst;
enum emulation_result er = EMULATE_DONE;
@@ -224,21 +263,7 @@ static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
er = EMULATE_FAIL;
switch (((inst.word >> 24) & 0xff)) {
case 0x0: /* CPUCFG GSPR */
- if (inst.reg2_format.opcode == 0x1B) {
- rd = inst.reg2_format.rd;
- rj = inst.reg2_format.rj;
- ++vcpu->stat.cpucfg_exits;
- index = vcpu->arch.gprs[rj];
- er = EMULATE_DONE;
- /*
- * By LoongArch Reference Manual 2.2.10.5
- * return value is 0 for undefined cpucfg index
- */
- if (index < KVM_MAX_CPUCFG_REGS)
- vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index];
- else
- vcpu->arch.gprs[rd] = 0;
- }
+ er = kvm_emu_cpucfg(vcpu, inst);
break;
case 0x4: /* CSR{RD,WR,XCHG} GSPR */
er = kvm_handle_csr(vcpu, inst);
@@ -417,6 +442,8 @@ int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
vcpu->arch.io_gpr = rd;
run->mmio.is_write = 0;
vcpu->mmio_is_write = 0;
+ trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, run->mmio.len,
+ run->mmio.phys_addr, NULL);
} else {
kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
inst.word, vcpu->arch.pc, vcpu->arch.badv);
@@ -463,6 +490,9 @@ int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
break;
}
+ trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len,
+ run->mmio.phys_addr, run->mmio.data);
+
return er;
}
@@ -564,6 +594,8 @@ int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
run->mmio.is_write = 1;
vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 1;
+ trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, run->mmio.len,
+ run->mmio.phys_addr, data);
} else {
vcpu->arch.pc = curr_pc;
kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
@@ -685,6 +717,90 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
+static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
+{
+ unsigned int min, cpu, i;
+ unsigned long ipi_bitmap;
+ struct kvm_vcpu *dest;
+
+ min = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
+ for (i = 0; i < 2; i++, min += BITS_PER_LONG) {
+ ipi_bitmap = kvm_read_reg(vcpu, LOONGARCH_GPR_A1 + i);
+ if (!ipi_bitmap)
+ continue;
+
+ cpu = find_first_bit((void *)&ipi_bitmap, BITS_PER_LONG);
+ while (cpu < BITS_PER_LONG) {
+ dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
+ cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, cpu + 1);
+ if (!dest)
+ continue;
+
+ /* Send SWI0 to dest vcpu to emulate IPI interrupt */
+ kvm_queue_irq(dest, INT_SWI0);
+ kvm_vcpu_kick(dest);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Hypercall emulation always return to guest, Caller should check retval.
+ */
+static void kvm_handle_service(struct kvm_vcpu *vcpu)
+{
+ unsigned long func = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
+ long ret;
+
+ switch (func) {
+ case KVM_HCALL_FUNC_IPI:
+ kvm_send_pv_ipi(vcpu);
+ ret = KVM_HCALL_SUCCESS;
+ break;
+ default:
+ ret = KVM_HCALL_INVALID_CODE;
+ break;
+ };
+
+ kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret);
+}
+
+static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
+{
+ int ret;
+ larch_inst inst;
+ unsigned int code;
+
+ inst.word = vcpu->arch.badi;
+ code = inst.reg0i15_format.immediate;
+ ret = RESUME_GUEST;
+
+ switch (code) {
+ case KVM_HCALL_SERVICE:
+ vcpu->stat.hypercall_exits++;
+ kvm_handle_service(vcpu);
+ break;
+ case KVM_HCALL_SWDBG:
+ /* KVM_HCALL_SWDBG only in effective when SW_BP is enabled */
+ if (vcpu->guest_debug & KVM_GUESTDBG_SW_BP_MASK) {
+ vcpu->run->exit_reason = KVM_EXIT_DEBUG;
+ ret = RESUME_HOST;
+ break;
+ }
+ fallthrough;
+ default:
+ /* Treat it as noop intruction, only set return value */
+ kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE);
+ break;
+ }
+
+ if (ret == RESUME_GUEST)
+ update_pc(&vcpu->arch);
+
+ return ret;
+}
+
/*
* LoongArch KVM callback handling for unimplemented guest exiting
*/
@@ -716,6 +832,7 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
[EXCCODE_LSXDIS] = kvm_handle_lsx_disabled,
[EXCCODE_LASXDIS] = kvm_handle_lasx_disabled,
[EXCCODE_GSPR] = kvm_handle_gspr,
+ [EXCCODE_HVC] = kvm_handle_hypercall,
};
int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)
diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c
index a556cff35740..98883aa23ab8 100644
--- a/arch/loongarch/kvm/mmu.c
+++ b/arch/loongarch/kvm/mmu.c
@@ -494,38 +494,6 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
range->end << PAGE_SHIFT, &ctx);
}
-bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
-{
- unsigned long prot_bits;
- kvm_pte_t *ptep;
- kvm_pfn_t pfn = pte_pfn(range->arg.pte);
- gpa_t gpa = range->start << PAGE_SHIFT;
-
- ptep = kvm_populate_gpa(kvm, NULL, gpa, 0);
- if (!ptep)
- return false;
-
- /* Replacing an absent or old page doesn't need flushes */
- if (!kvm_pte_present(NULL, ptep) || !kvm_pte_young(*ptep)) {
- kvm_set_pte(ptep, 0);
- return false;
- }
-
- /* Fill new pte if write protected or page migrated */
- prot_bits = _PAGE_PRESENT | __READABLE;
- prot_bits |= _CACHE_MASK & pte_val(range->arg.pte);
-
- /*
- * Set _PAGE_WRITE or _PAGE_DIRTY iff old and new pte both support
- * _PAGE_WRITE for map_page_fast if next page write fault
- * _PAGE_DIRTY since gpa has already recorded as dirty page
- */
- prot_bits |= __WRITEABLE & *ptep & pte_val(range->arg.pte);
- kvm_set_pte(ptep, kvm_pfn_pte(pfn, __pgprot(prot_bits)));
-
- return true;
-}
-
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
kvm_ptw_ctx ctx;
diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h
index c2484ad4cffa..1783397b1bc8 100644
--- a/arch/loongarch/kvm/trace.h
+++ b/arch/loongarch/kvm/trace.h
@@ -19,14 +19,16 @@ DECLARE_EVENT_CLASS(kvm_transition,
TP_PROTO(struct kvm_vcpu *vcpu),
TP_ARGS(vcpu),
TP_STRUCT__entry(
+ __field(unsigned int, vcpu_id)
__field(unsigned long, pc)
),
TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
__entry->pc = vcpu->arch.pc;
),
- TP_printk("PC: 0x%08lx", __entry->pc)
+ TP_printk("vcpu %u PC: 0x%08lx", __entry->vcpu_id, __entry->pc)
);
DEFINE_EVENT(kvm_transition, kvm_enter,
@@ -54,19 +56,22 @@ DECLARE_EVENT_CLASS(kvm_exit,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
TP_ARGS(vcpu, reason),
TP_STRUCT__entry(
+ __field(unsigned int, vcpu_id)
__field(unsigned long, pc)
__field(unsigned int, reason)
),
TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
__entry->pc = vcpu->arch.pc;
__entry->reason = reason;
),
- TP_printk("[%s]PC: 0x%08lx",
- __print_symbolic(__entry->reason,
- kvm_trace_symbol_exit_types),
- __entry->pc)
+ TP_printk("vcpu %u [%s] PC: 0x%08lx",
+ __entry->vcpu_id,
+ __print_symbolic(__entry->reason,
+ kvm_trace_symbol_exit_types),
+ __entry->pc)
);
DEFINE_EVENT(kvm_exit, kvm_exit_idle,
@@ -85,14 +90,17 @@ TRACE_EVENT(kvm_exit_gspr,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int inst_word),
TP_ARGS(vcpu, inst_word),
TP_STRUCT__entry(
+ __field(unsigned int, vcpu_id)
__field(unsigned int, inst_word)
),
TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
__entry->inst_word = inst_word;
),
- TP_printk("Inst word: 0x%08x", __entry->inst_word)
+ TP_printk("vcpu %u Inst word: 0x%08x", __entry->vcpu_id,
+ __entry->inst_word)
);
#define KVM_TRACE_AUX_SAVE 0
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index 3a8779065f73..9e8030d45129 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -19,6 +19,7 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
STATS_DESC_COUNTER(VCPU, idle_exits),
STATS_DESC_COUNTER(VCPU, cpucfg_exits),
STATS_DESC_COUNTER(VCPU, signal_exits),
+ STATS_DESC_COUNTER(VCPU, hypercall_exits)
};
const struct kvm_stats_header kvm_vcpu_stats_header = {
@@ -247,7 +248,101 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
- return -EINVAL;
+ if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
+ return -EINVAL;
+
+ if (dbg->control & KVM_GUESTDBG_ENABLE)
+ vcpu->guest_debug = dbg->control;
+ else
+ vcpu->guest_debug = 0;
+
+ return 0;
+}
+
+static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val)
+{
+ int cpuid;
+ struct kvm_phyid_map *map;
+ struct loongarch_csrs *csr = vcpu->arch.csr;
+
+ if (val >= KVM_MAX_PHYID)
+ return -EINVAL;
+
+ map = vcpu->kvm->arch.phyid_map;
+ cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
+
+ spin_lock(&vcpu->kvm->arch.phyid_map_lock);
+ if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) {
+ /* Discard duplicated CPUID set operation */
+ if (cpuid == val) {
+ spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
+ return 0;
+ }
+
+ /*
+ * CPUID is already set before
+ * Forbid changing to a different CPUID at runtime
+ */
+ spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
+ return -EINVAL;
+ }
+
+ if (map->phys_map[val].enabled) {
+ /* Discard duplicated CPUID set operation */
+ if (vcpu == map->phys_map[val].vcpu) {
+ spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
+ return 0;
+ }
+
+ /*
+ * New CPUID is already set with other vcpu
+ * Forbid sharing the same CPUID between different vcpus
+ */
+ spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
+ return -EINVAL;
+ }
+
+ kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val);
+ map->phys_map[val].enabled = true;
+ map->phys_map[val].vcpu = vcpu;
+ spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
+
+ return 0;
+}
+
+static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu)
+{
+ int cpuid;
+ struct kvm_phyid_map *map;
+ struct loongarch_csrs *csr = vcpu->arch.csr;
+
+ map = vcpu->kvm->arch.phyid_map;
+ cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
+
+ if (cpuid >= KVM_MAX_PHYID)
+ return;
+
+ spin_lock(&vcpu->kvm->arch.phyid_map_lock);
+ if (map->phys_map[cpuid].enabled) {
+ map->phys_map[cpuid].vcpu = NULL;
+ map->phys_map[cpuid].enabled = false;
+ kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
+ }
+ spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
+}
+
+struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid)
+{
+ struct kvm_phyid_map *map;
+
+ if (cpuid >= KVM_MAX_PHYID)
+ return NULL;
+
+ map = kvm->arch.phyid_map;
+ if (!map->phys_map[cpuid].enabled)
+ return NULL;
+
+ return map->phys_map[cpuid].vcpu;
}
static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
@@ -282,6 +377,9 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
if (get_gcsr_flag(id) & INVALID_GCSR)
return -EINVAL;
+ if (id == LOONGARCH_CSR_CPUID)
+ return kvm_set_cpuid(vcpu, val);
+
if (id == LOONGARCH_CSR_ESTAT) {
/* ESTAT IP0~IP7 inject through GINTC */
gintc = (val >> 2) & 0xff;
@@ -409,6 +507,9 @@ static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
case KVM_REG_LOONGARCH_COUNTER:
*v = drdtime() + vcpu->kvm->arch.time_offset;
break;
+ case KVM_REG_LOONGARCH_DEBUG_INST:
+ *v = INSN_HVCL | KVM_HCALL_SWDBG;
+ break;
default:
ret = -EINVAL;
break;
@@ -924,6 +1025,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
/* Set cpuid */
kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
+ kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
/* Start with no pending virtual guest interrupts */
csr->csrs[LOONGARCH_CSR_GINTC] = 0;
@@ -942,6 +1044,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
hrtimer_cancel(&vcpu->arch.swtimer);
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+ kvm_drop_cpuid(vcpu);
kfree(vcpu->arch.csr);
/*
diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c
index 0a37f6fa8f2d..6b2e4f66ad26 100644
--- a/arch/loongarch/kvm/vm.c
+++ b/arch/loongarch/kvm/vm.c
@@ -30,6 +30,14 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (!kvm->arch.pgd)
return -ENOMEM;
+ kvm->arch.phyid_map = kvzalloc(sizeof(struct kvm_phyid_map), GFP_KERNEL_ACCOUNT);
+ if (!kvm->arch.phyid_map) {
+ free_page((unsigned long)kvm->arch.pgd);
+ kvm->arch.pgd = NULL;
+ return -ENOMEM;
+ }
+ spin_lock_init(&kvm->arch.phyid_map_lock);
+
kvm_init_vmcs(kvm);
kvm->arch.gpa_size = BIT(cpu_vabits - 1);
kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1;
@@ -52,6 +60,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_destroy_vcpus(kvm);
free_page((unsigned long)kvm->arch.pgd);
kvm->arch.pgd = NULL;
+ kvfree(kvm->arch.phyid_map);
+ kvm->arch.phyid_map = NULL;
}
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
@@ -66,6 +76,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_IMMEDIATE_EXIT:
case KVM_CAP_IOEVENTFD:
case KVM_CAP_MP_STATE:
+ case KVM_CAP_SET_GUEST_DEBUG:
r = 1;
break;
case KVM_CAP_NR_VCPUS:
diff --git a/arch/loongarch/lib/Makefile b/arch/loongarch/lib/Makefile
index a77bf160bfc4..ccea3bbd4353 100644
--- a/arch/loongarch/lib/Makefile
+++ b/arch/loongarch/lib/Makefile
@@ -6,6 +6,8 @@
lib-y += delay.o memset.o memcpy.o memmove.o \
clear_user.o copy_user.o csum.o dump_tlb.o unaligned.o
+obj-$(CONFIG_ARCH_SUPPORTS_INT128) += tishift.o
+
obj-$(CONFIG_CPU_HAS_LSX) += xor_simd.o xor_simd_glue.o
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
diff --git a/arch/loongarch/lib/tishift.S b/arch/loongarch/lib/tishift.S
new file mode 100644
index 000000000000..fa1d310012bc
--- /dev/null
+++ b/arch/loongarch/lib/tishift.S
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <asm/asmmacro.h>
+#include <linux/export.h>
+#include <linux/linkage.h>
+
+SYM_FUNC_START(__ashlti3)
+ srli.d t2, a0, 1
+ nor t3, zero, a2
+ sll.d t1, a1, a2
+ srl.d t2, t2, t3
+ andi t0, a2, 64
+ sll.d a0, a0, a2
+ or t1, t2, t1
+ maskeqz a1, a0, t0
+ masknez a0, a0, t0
+ masknez t0, t1, t0
+ or a1, t0, a1
+ jr ra
+SYM_FUNC_END(__ashlti3)
+EXPORT_SYMBOL(__ashlti3)
+
+SYM_FUNC_START(__ashrti3)
+ nor t3, zero, a2
+ slli.d t2, a1, 1
+ srl.d t1, a0, a2
+ sll.d t2, t2, t3
+ andi t0, a2, 64
+ or t1, t2, t1
+ sra.d a2, a1, a2
+ srai.d a1, a1, 63
+ maskeqz a0, a2, t0
+ maskeqz a1, a1, t0
+ masknez a2, a2, t0
+ masknez t0, t1, t0
+ or a1, a1, a2
+ or a0, t0, a0
+ jr ra
+SYM_FUNC_END(__ashrti3)
+EXPORT_SYMBOL(__ashrti3)
+
+SYM_FUNC_START(__lshrti3)
+ slli.d t2, a1, 1
+ nor t3, zero, a2
+ srl.d t1, a0, a2
+ sll.d t2, t2, t3
+ andi t0, a2, 64
+ srl.d a1, a1, a2
+ or t1, t2, t1
+ maskeqz a0, a1, t0
+ masknez a1, a1, t0
+ masknez t0, t1, t0
+ or a0, t0, a0
+ jr ra
+SYM_FUNC_END(__lshrti3)
+EXPORT_SYMBOL(__lshrti3)
diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
index 1fc2f6813ea0..97b40defde06 100644
--- a/arch/loongarch/mm/fault.c
+++ b/arch/loongarch/mm/fault.c
@@ -202,10 +202,10 @@ good_area:
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
- if (!(vma->vm_flags & VM_READ) && address != exception_era(regs))
- goto bad_area;
if (!(vma->vm_flags & VM_EXEC) && address == exception_era(regs))
goto bad_area;
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)) && address != exception_era(regs))
+ goto bad_area;
}
/*
diff --git a/arch/loongarch/mm/hugetlbpage.c b/arch/loongarch/mm/hugetlbpage.c
index 1e76fcb83093..12222c56cb59 100644
--- a/arch/loongarch/mm/hugetlbpage.c
+++ b/arch/loongarch/mm/hugetlbpage.c
@@ -50,21 +50,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
return (pte_t *) pmd;
}
-int pmd_huge(pmd_t pmd)
-{
- return (pmd_val(pmd) & _PAGE_HUGE) != 0;
-}
-
-int pud_huge(pud_t pud)
-{
- return (pud_val(pud) & _PAGE_HUGE) != 0;
-}
-
uint64_t pmd_to_entrylo(unsigned long pmd_val)
{
uint64_t val;
/* PMD as PTE. Must be huge page */
- if (!pmd_huge(__pmd(pmd_val)))
+ if (!pmd_leaf(__pmd(pmd_val)))
panic("%s", __func__);
val = pmd_val ^ _PAGE_HUGE;
diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
index 4dd53427f657..bf789d114c2d 100644
--- a/arch/loongarch/mm/init.c
+++ b/arch/loongarch/mm/init.c
@@ -24,6 +24,7 @@
#include <linux/gfp.h>
#include <linux/hugetlb.h>
#include <linux/mmzone.h>
+#include <linux/execmem.h>
#include <asm/asm-offsets.h>
#include <asm/bootinfo.h>
@@ -248,3 +249,23 @@ EXPORT_SYMBOL(invalid_pmd_table);
#endif
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
EXPORT_SYMBOL(invalid_pte_table);
+
+#ifdef CONFIG_EXECMEM
+static struct execmem_info execmem_info __ro_after_init;
+
+struct execmem_info __init *execmem_arch_setup(void)
+{
+ execmem_info = (struct execmem_info){
+ .ranges = {
+ [EXECMEM_DEFAULT] = {
+ .start = MODULES_VADDR,
+ .end = MODULES_END,
+ .pgprot = PAGE_KERNEL,
+ .alignment = 1,
+ },
+ },
+ };
+
+ return &execmem_info;
+}
+#endif /* CONFIG_EXECMEM */
diff --git a/arch/loongarch/mm/mmap.c b/arch/loongarch/mm/mmap.c
index a9630a81b38a..889030985135 100644
--- a/arch/loongarch/mm/mmap.c
+++ b/arch/loongarch/mm/mmap.c
@@ -4,6 +4,7 @@
*/
#include <linux/export.h>
#include <linux/io.h>
+#include <linux/kfence.h>
#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/mman.h>
@@ -24,7 +25,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
struct vm_area_struct *vma;
unsigned long addr = addr0;
int do_color_align;
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
if (unlikely(len > TASK_SIZE))
return -ENOMEM;
@@ -82,7 +83,6 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
*/
}
- info.flags = 0;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
return vm_unmapped_area(&info);
@@ -111,6 +111,9 @@ int __virt_addr_valid(volatile void *kaddr)
{
unsigned long vaddr = (unsigned long)kaddr;
+ if (is_kfence_address((void *)kaddr))
+ return 1;
+
if ((vaddr < PAGE_OFFSET) || (vaddr >= vm_map_base))
return 0;
diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c
index 2aae72e63871..bda018150000 100644
--- a/arch/loongarch/mm/pgtable.c
+++ b/arch/loongarch/mm/pgtable.c
@@ -11,13 +11,13 @@
struct page *dmw_virt_to_page(unsigned long kaddr)
{
- return pfn_to_page(virt_to_pfn(kaddr));
+ return phys_to_page(__pa(kaddr));
}
EXPORT_SYMBOL(dmw_virt_to_page);
struct page *tlb_virt_to_page(unsigned long kaddr)
{
- return pfn_to_page(pte_pfn(*virt_to_kpte(kaddr)));
+ return phys_to_page(pfn_to_phys(pte_pfn(*virt_to_kpte(kaddr))));
}
EXPORT_SYMBOL(tlb_virt_to_page);
diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S
index a44387b838af..c08682a89c58 100644
--- a/arch/loongarch/mm/tlbex.S
+++ b/arch/loongarch/mm/tlbex.S
@@ -125,6 +125,8 @@ vmalloc_load:
tlb_huge_update_load:
#ifdef CONFIG_SMP
ll.d ra, t1, 0
+#else
+ rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
#endif
andi t0, ra, _PAGE_PRESENT
beqz t0, nopage_tlb_load
@@ -135,7 +137,6 @@ tlb_huge_update_load:
beqz t0, tlb_huge_update_load
ori t0, ra, _PAGE_VALID
#else
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
ori t0, ra, _PAGE_VALID
st.d t0, t1, 0
#endif
@@ -281,6 +282,8 @@ vmalloc_store:
tlb_huge_update_store:
#ifdef CONFIG_SMP
ll.d ra, t1, 0
+#else
+ rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
#endif
andi t0, ra, _PAGE_PRESENT | _PAGE_WRITE
xori t0, t0, _PAGE_PRESENT | _PAGE_WRITE
@@ -292,7 +295,6 @@ tlb_huge_update_store:
beqz t0, tlb_huge_update_store
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#else
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
st.d t0, t1, 0
#endif
@@ -438,6 +440,8 @@ vmalloc_modify:
tlb_huge_update_modify:
#ifdef CONFIG_SMP
ll.d ra, t1, 0
+#else
+ rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
#endif
andi t0, ra, _PAGE_WRITE
beqz t0, nopage_tlb_modify
@@ -448,7 +452,6 @@ tlb_huge_update_modify:
beqz t0, tlb_huge_update_modify
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#else
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
st.d t0, t1, 0
#endif
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
index e73323d759d0..7dbefd4ba210 100644
--- a/arch/loongarch/net/bpf_jit.c
+++ b/arch/loongarch/net/bpf_jit.c
@@ -1294,16 +1294,19 @@ skip_init_ctx:
flush_icache_range((unsigned long)header, (unsigned long)(ctx.image + ctx.idx));
if (!prog->is_func || extra_pass) {
+ int err;
+
if (extra_pass && ctx.idx != jit_data->ctx.idx) {
pr_err_once("multi-func JIT bug %d != %d\n",
ctx.idx, jit_data->ctx.idx);
- bpf_jit_binary_free(header);
- prog->bpf_func = NULL;
- prog->jited = 0;
- prog->jited_len = 0;
- goto out_offset;
+ goto out_free;
+ }
+ err = bpf_jit_binary_lock_ro(header);
+ if (err) {
+ pr_err_once("bpf_jit_binary_lock_ro() returned %d\n",
+ err);
+ goto out_free;
}
- bpf_jit_binary_lock_ro(header);
} else {
jit_data->ctx = ctx;
jit_data->image = image_ptr;
@@ -1334,6 +1337,13 @@ out:
out_offset = -1;
return prog;
+
+out_free:
+ bpf_jit_binary_free(header);
+ prog->bpf_func = NULL;
+ prog->jited = 0;
+ prog->jited_len = 0;
+ goto out_offset;
}
/* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
diff --git a/arch/loongarch/power/suspend.c b/arch/loongarch/power/suspend.c
index 166d9e06a64b..c9e594925c47 100644
--- a/arch/loongarch/power/suspend.c
+++ b/arch/loongarch/power/suspend.c
@@ -24,6 +24,7 @@ struct saved_registers {
u64 kpgd;
u32 pwctl0;
u32 pwctl1;
+ u64 pcpu_base;
};
static struct saved_registers saved_regs;
@@ -36,6 +37,7 @@ void loongarch_common_suspend(void)
saved_regs.pwctl1 = csr_read32(LOONGARCH_CSR_PWCTL1);
saved_regs.ecfg = csr_read32(LOONGARCH_CSR_ECFG);
saved_regs.euen = csr_read32(LOONGARCH_CSR_EUEN);
+ saved_regs.pcpu_base = csr_read64(PERCPU_BASE_KS);
loongarch_suspend_addr = loongson_sysconf.suspend_addr;
}
@@ -44,7 +46,6 @@ void loongarch_common_resume(void)
{
sync_counter();
local_flush_tlb_all();
- csr_write64(per_cpu_offset(0), PERCPU_BASE_KS);
csr_write64(eentry, LOONGARCH_CSR_EENTRY);
csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
@@ -55,6 +56,7 @@ void loongarch_common_resume(void)
csr_write32(saved_regs.pwctl1, LOONGARCH_CSR_PWCTL1);
csr_write32(saved_regs.ecfg, LOONGARCH_CSR_ECFG);
csr_write32(saved_regs.euen, LOONGARCH_CSR_EUEN);
+ csr_write64(saved_regs.pcpu_base, PERCPU_BASE_KS);
}
int loongarch_acpi_suspend(void)
diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
index 75c6726382c3..d724d46b07c8 100644
--- a/arch/loongarch/vdso/Makefile
+++ b/arch/loongarch/vdso/Makefile
@@ -1,11 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
# Objects to go into the VDSO.
-KASAN_SANITIZE := n
-UBSAN_SANITIZE := n
-KCOV_INSTRUMENT := n
-OBJECT_FILES_NON_STANDARD := y
-
# Include the generic Makefile to check the built vdso.
include $(srctree)/lib/vdso/Makefile
@@ -39,8 +34,6 @@ ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \
$(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared \
--hash-style=sysv --build-id -T
-GCOV_PROFILE := n
-
#
# Shared build commands.
#
@@ -52,7 +45,7 @@ quiet_cmd_vdsoas_o_S = AS $@
cmd_vdsoas_o_S = $(CC) $(a_flags) -c -o $@ $<
# Generate VDSO offsets using helper script
-gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
+gen-vdsosym := $(src)/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 6ffa29585194..cc26df907bfe 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -3,8 +3,8 @@ config M68K
bool
default y
select ARCH_32BIT_OFF_T
- select ARCH_HAS_CPU_CACHE_ALIASING
select ARCH_HAS_BINFMT_FLAT
+ select ARCH_HAS_CPU_CACHE_ALIASING
select ARCH_HAS_CPU_FINALIZE_INIT if MMU
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DMA_PREP_COHERENT if M68K_NONCOHERENT_DMA && !COLDFIRE
@@ -18,7 +18,7 @@ config M68K
select DMA_DIRECT_REMAP if M68K_NONCOHERENT_DMA && !COLDFIRE
select GENERIC_ATOMIC64
select GENERIC_CPU_DEVICES
- select GENERIC_IOMAP
+ select GENERIC_IOMAP if HAS_IOPORT
select GENERIC_IRQ_SHOW
select GENERIC_LIB_ASHLDI3
select GENERIC_LIB_ASHRDI3
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
index 99718f3dc686..d4b170c861bf 100644
--- a/arch/m68k/amiga/config.c
+++ b/arch/m68k/amiga/config.c
@@ -836,7 +836,7 @@ static void amiga_get_hardware_list(struct seq_file *m)
seq_printf(m, "\tZorro II%s AutoConfig: %d Expansion "
"Device%s\n",
AMIGAHW_PRESENT(ZORRO3) ? "I" : "",
- zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s");
+ zorro_num_autocon, str_plural(zorro_num_autocon));
#endif /* CONFIG_ZORRO */
#undef AMIGAHW_ANNOUNCE
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index a6f6607efe79..6bb202d03d34 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -26,6 +26,7 @@ CONFIG_AMIGA_BUILTIN_SERIAL=y
CONFIG_SERIAL_CONSOLE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+CONFIG_TRIM_UNUSED_KSYMS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_ATARI_PARTITION=y
CONFIG_MAC_PARTITION=y
@@ -217,7 +218,6 @@ CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_DUP_IPV6=m
@@ -624,8 +624,6 @@ CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
-CONFIG_STRING_SELFTEST=m
-CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_SCANF=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 0ca1f9f930bc..3598e0cc66b0 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -21,6 +21,7 @@ CONFIG_HEARTBEAT=y
CONFIG_PROC_HARDWARE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+CONFIG_TRIM_UNUSED_KSYMS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_AMIGA_PARTITION=y
CONFIG_ATARI_PARTITION=y
@@ -213,7 +214,6 @@ CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_DUP_IPV6=m
@@ -581,8 +581,6 @@ CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
-CONFIG_STRING_SELFTEST=m
-CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_SCANF=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index be030659d8d7..3fbb8a9a307d 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -29,6 +29,7 @@ CONFIG_ATARI_ETHERNEC=y
CONFIG_ATARI_DSP56K=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+CONFIG_TRIM_UNUSED_KSYMS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_AMIGA_PARTITION=y
CONFIG_MAC_PARTITION=y
@@ -220,7 +221,6 @@ CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_DUP_IPV6=m
@@ -601,8 +601,6 @@ CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
-CONFIG_STRING_SELFTEST=m
-CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_SCANF=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index ad8f81fbb630..9a2b0c7871ce 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -19,6 +19,7 @@ CONFIG_BVME6000=y
CONFIG_PROC_HARDWARE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+CONFIG_TRIM_UNUSED_KSYMS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_AMIGA_PARTITION=y
CONFIG_ATARI_PARTITION=y
@@ -210,7 +211,6 @@ CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_DUP_IPV6=m
@@ -573,8 +573,6 @@ CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
-CONFIG_STRING_SELFTEST=m
-CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_SCANF=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index ff253b6accec..2408785e0e48 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -20,6 +20,7 @@ CONFIG_HP300=y
CONFIG_PROC_HARDWARE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+CONFIG_TRIM_UNUSED_KSYMS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_AMIGA_PARTITION=y
CONFIG_ATARI_PARTITION=y
@@ -212,7 +213,6 @@ CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_DUP_IPV6=m
@@ -583,8 +583,6 @@ CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
-CONFIG_STRING_SELFTEST=m
-CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_SCANF=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index f92b866620a7..6789d03b7b52 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -20,6 +20,7 @@ CONFIG_MAC=y
CONFIG_PROC_HARDWARE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+CONFIG_TRIM_UNUSED_KSYMS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_AMIGA_PARTITION=y
CONFIG_ATARI_PARTITION=y
@@ -211,7 +212,6 @@ CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_DUP_IPV6=m
@@ -600,8 +600,6 @@ CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
-CONFIG_STRING_SELFTEST=m
-CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_SCANF=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index bd813beaa8a7..b57af39db3b4 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -44,6 +44,7 @@ CONFIG_AMIGA_BUILTIN_SERIAL=y
CONFIG_SERIAL_CONSOLE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+CONFIG_TRIM_UNUSED_KSYMS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_BSD_DISKLABEL=y
CONFIG_MINIX_SUBPARTITION=y
@@ -231,7 +232,6 @@ CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_DUP_IPV6=m
@@ -686,8 +686,6 @@ CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
-CONFIG_STRING_SELFTEST=m
-CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_SCANF=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 2237ee0fe433..29b70f27aedd 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -18,6 +18,7 @@ CONFIG_MVME147=y
CONFIG_PROC_HARDWARE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+CONFIG_TRIM_UNUSED_KSYMS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_AMIGA_PARTITION=y
CONFIG_ATARI_PARTITION=y
@@ -209,7 +210,6 @@ CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_DUP_IPV6=m
@@ -572,8 +572,6 @@ CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
-CONFIG_STRING_SELFTEST=m
-CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_SCANF=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index afb5aa9c5012..757c582ffe84 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -19,6 +19,7 @@ CONFIG_MVME16x=y
CONFIG_PROC_HARDWARE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+CONFIG_TRIM_UNUSED_KSYMS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_AMIGA_PARTITION=y
CONFIG_ATARI_PARTITION=y
@@ -210,7 +211,6 @@ CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_DUP_IPV6=m
@@ -573,8 +573,6 @@ CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
-CONFIG_STRING_SELFTEST=m
-CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_SCANF=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index e40f7a308966..f15d1009108a 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -19,6 +19,7 @@ CONFIG_HEARTBEAT=y
CONFIG_PROC_HARDWARE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+CONFIG_TRIM_UNUSED_KSYMS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_AMIGA_PARTITION=y
CONFIG_ATARI_PARTITION=y
@@ -211,7 +212,6 @@ CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_DUP_IPV6=m
@@ -590,8 +590,6 @@ CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
-CONFIG_STRING_SELFTEST=m
-CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_SCANF=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 4df397c0395f..5218c1e614b5 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -16,6 +16,7 @@ CONFIG_SUN3=y
CONFIG_PROC_HARDWARE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+CONFIG_TRIM_UNUSED_KSYMS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_AMIGA_PARTITION=y
CONFIG_ATARI_PARTITION=y
@@ -206,7 +207,6 @@ CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_DUP_IPV6=m
@@ -570,8 +570,6 @@ CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
-CONFIG_STRING_SELFTEST=m
-CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_SCANF=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index aa7719b3947f..acdf4eb7af28 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -16,6 +16,7 @@ CONFIG_SUN3X=y
CONFIG_PROC_HARDWARE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+CONFIG_TRIM_UNUSED_KSYMS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_AMIGA_PARTITION=y
CONFIG_ATARI_PARTITION=y
@@ -207,7 +208,6 @@ CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_DUP_IPV6=m
@@ -571,8 +571,6 @@ CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
-CONFIG_STRING_SELFTEST=m
-CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_SCANF=m
diff --git a/arch/m68k/include/asm/pgtable.h b/arch/m68k/include/asm/pgtable.h
index 27525c6a12fd..49fcfd734860 100644
--- a/arch/m68k/include/asm/pgtable.h
+++ b/arch/m68k/include/asm/pgtable.h
@@ -2,6 +2,8 @@
#ifndef __M68K_PGTABLE_H
#define __M68K_PGTABLE_H
+#include <asm/page.h>
+
#ifdef __uClinux__
#include <asm/pgtable_no.h>
#else
diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h
index 31be2ad999ca..3e31adbddc75 100644
--- a/arch/m68k/include/asm/thread_info.h
+++ b/arch/m68k/include/asm/thread_info.h
@@ -12,14 +12,15 @@
*/
#if PAGE_SHIFT < 13
#ifdef CONFIG_4KSTACKS
-#define THREAD_SIZE 4096
+#define THREAD_SIZE_ORDER 0
#else
-#define THREAD_SIZE 8192
+#define THREAD_SIZE_ORDER 1
#endif
#else
-#define THREAD_SIZE PAGE_SIZE
+#define THREAD_SIZE_ORDER 0
#endif
-#define THREAD_SIZE_ORDER ((THREAD_SIZE / PAGE_SIZE) - 1)
+
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#ifndef __ASSEMBLY__
diff --git a/arch/m68k/include/asm/fb.h b/arch/m68k/include/asm/video.h
index 9941b7434b69..6cf2194c413d 100644
--- a/arch/m68k/include/asm/fb.h
+++ b/arch/m68k/include/asm/video.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_FB_H_
-#define _ASM_FB_H_
+#ifndef _ASM_VIDEO_H_
+#define _ASM_VIDEO_H_
#include <asm/page.h>
#include <asm/setup.h>
@@ -27,6 +27,6 @@ static inline pgprot_t pgprot_framebuffer(pgprot_t prot,
}
#define pgprot_framebuffer pgprot_framebuffer
-#include <asm-generic/fb.h>
+#include <asm-generic/video.h>
-#endif /* _ASM_FB_H_ */
+#endif /* _ASM_VIDEO_H_ */
diff --git a/arch/m68k/include/uapi/asm/ptrace.h b/arch/m68k/include/uapi/asm/ptrace.h
index 5b50ea592e00..ebd9fccb3d11 100644
--- a/arch/m68k/include/uapi/asm/ptrace.h
+++ b/arch/m68k/include/uapi/asm/ptrace.h
@@ -39,7 +39,7 @@ struct pt_regs {
long d0;
long orig_d0;
long stkadj;
-#ifdef CONFIG_COLDFIRE
+#ifdef __mcoldfire__
unsigned format : 4; /* frame format specifier */
unsigned vector : 12; /* vector offset */
unsigned short sr;
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index 3bcdd32a6b36..338b474910f7 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -430,7 +430,9 @@ resume:
movec %a0,%dfc
/* restore status register */
- movew %a1@(TASK_THREAD+THREAD_SR),%sr
+ movew %a1@(TASK_THREAD+THREAD_SR),%d0
+ oriw #0x0700,%d0
+ movew %d0,%sr
rts
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
index 4c8f8cbfa05f..e7f0f72c1b36 100644
--- a/arch/m68k/mac/misc.c
+++ b/arch/m68k/mac/misc.c
@@ -453,30 +453,18 @@ void mac_poweroff(void)
void mac_reset(void)
{
- if (macintosh_config->adb_type == MAC_ADB_II &&
- macintosh_config->ident != MAC_MODEL_SE30) {
- /* need ROMBASE in booter */
- /* indeed, plus need to MAP THE ROM !! */
-
- if (mac_bi_data.rombase == 0)
- mac_bi_data.rombase = 0x40800000;
-
- /* works on some */
- rom_reset = (void *) (mac_bi_data.rombase + 0xa);
-
- local_irq_disable();
- rom_reset();
#ifdef CONFIG_ADB_CUDA
- } else if (macintosh_config->adb_type == MAC_ADB_EGRET ||
- macintosh_config->adb_type == MAC_ADB_CUDA) {
+ if (macintosh_config->adb_type == MAC_ADB_EGRET ||
+ macintosh_config->adb_type == MAC_ADB_CUDA) {
cuda_restart();
+ } else
#endif
#ifdef CONFIG_ADB_PMU
- } else if (macintosh_config->adb_type == MAC_ADB_PB2) {
+ if (macintosh_config->adb_type == MAC_ADB_PB2) {
pmu_restart();
+ } else
#endif
- } else if (CPU_IS_030) {
-
+ if (CPU_IS_030) {
/* 030-specific reset routine. The idea is general, but the
* specific registers to reset are '030-specific. Until I
* have a non-030 machine, I can't test anything else.
@@ -524,6 +512,18 @@ void mac_reset(void)
"jmp %/a0@\n\t" /* jump to the reset vector */
".chip 68k"
: : "r" (offset), "a" (rombase) : "a0");
+ } else {
+ /* need ROMBASE in booter */
+ /* indeed, plus need to MAP THE ROM !! */
+
+ if (mac_bi_data.rombase == 0)
+ mac_bi_data.rombase = 0x40800000;
+
+ /* works on some */
+ rom_reset = (void *)(mac_bi_data.rombase + 0xa);
+
+ local_irq_disable();
+ rom_reset();
}
/* should never get here */
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
index 4da7bc4ac4a3..176314f3c9aa 100644
--- a/arch/microblaze/configs/mmu_defconfig
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -4,7 +4,7 @@ CONFIG_AUDIT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_EXPERT=y
-# CONFIG_BASE_FULL is not set
+CONFIG_BASE_SMALL=y
CONFIG_KALLSYMS_ALL=y
CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index 4393bee64eaf..85c4d29ef43e 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -7,7 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
# Do not trace early boot code and low level code
CFLAGS_REMOVE_timer.o = -pg
CFLAGS_REMOVE_intc.o = -pg
-CFLAGS_REMOVE_early_printk.o = -pg
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_process.o = -pg
endif
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-static.c b/arch/microblaze/kernel/cpu/cpuinfo-static.c
index 85dbda4a08a8..03da36dc6d9c 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo-static.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo-static.c
@@ -18,7 +18,7 @@ static const char family_string[] = CONFIG_XILINX_MICROBLAZE0_FAMILY;
static const char cpu_ver_string[] = CONFIG_XILINX_MICROBLAZE0_HW_VER;
#define err_printk(x) \
- early_printk("ERROR: Microblaze " x "-different for kernel and DTS\n");
+ pr_err("ERROR: Microblaze " x "-different for kernel and DTS\n");
void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu)
{
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 06ef440d16ce..f1aa1bf11166 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -68,7 +68,7 @@ config MIPS
select HAVE_DYNAMIC_FTRACE
select HAVE_EBPF_JIT if !CPU_MICROMIPS
select HAVE_EXIT_THREAD
- select HAVE_FAST_GUP
+ select HAVE_GUP_FAST
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
@@ -619,15 +619,6 @@ config MACH_EYEQ5
bool
-config FIT_IMAGE_FDT_EPM5
- bool "Include FDT for Mobileye EyeQ5 development platforms"
- depends on MACH_EYEQ5
- default n
- help
- Enable this to include the FDT for the EyeQ5 development platforms
- from Mobileye in the FIT kernel image.
- This requires u-boot on the platform.
-
config MACH_NINTENDO64
bool "Nintendo 64 console"
select CEVT_R4K
@@ -1011,6 +1002,15 @@ config CAVIUM_OCTEON_SOC
endchoice
+config FIT_IMAGE_FDT_EPM5
+ bool "Include FDT for Mobileye EyeQ5 development platforms"
+ depends on MACH_EYEQ5
+ default n
+ help
+ Enable this to include the FDT for the EyeQ5 development platforms
+ from Mobileye in the FIT kernel image.
+ This requires u-boot on the platform.
+
source "arch/mips/alchemy/Kconfig"
source "arch/mips/ath25/Kconfig"
source "arch/mips/ath79/Kconfig"
diff --git a/arch/mips/bcm47xx/prom.c b/arch/mips/bcm47xx/prom.c
index 99a1ba5394e0..58fb7c2dc3b8 100644
--- a/arch/mips/bcm47xx/prom.c
+++ b/arch/mips/bcm47xx/prom.c
@@ -35,6 +35,7 @@
#include <asm/bootinfo.h>
#include <bcm47xx.h>
#include <bcm47xx_board.h>
+#include "bcm47xx_private.h"
static char bcm47xx_system_type[20] = "Broadcom BCM47XX";
@@ -123,7 +124,7 @@ void __init prom_init(void)
/* Stripped version of tlb_init, with the call to build_tlb_refill_handler
* dropped. Calling it at this stage causes a hang.
*/
-void early_tlb_init(void)
+static void early_tlb_init(void)
{
write_c0_pagemask(PM_DEFAULT_MASK);
write_c0_wired(0);
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index 6cc28173bee8..e0b8ec9a9516 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -34,12 +34,6 @@ KBUILD_AFLAGS := $(KBUILD_AFLAGS) -D__ASSEMBLY__ \
-DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) \
-DKERNEL_ENTRY=$(VMLINUX_ENTRY_ADDRESS)
-# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
-KCOV_INSTRUMENT := n
-GCOV_PROFILE := n
-UBSAN_SANITIZE := n
-KCSAN_SANITIZE := n
-
# decompressor objects (linked with vmlinuz)
vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o $(obj)/bswapsi.o
diff --git a/arch/mips/boot/dts/ralink/mt7621.dtsi b/arch/mips/boot/dts/ralink/mt7621.dtsi
index 6e95e6f19a6a..0704eab4a80b 100644
--- a/arch/mips/boot/dts/ralink/mt7621.dtsi
+++ b/arch/mips/boot/dts/ralink/mt7621.dtsi
@@ -5,50 +5,143 @@
#include <dt-bindings/reset/mt7621-reset.h>
/ {
+ compatible = "mediatek,mt7621-soc";
+
#address-cells = <1>;
#size-cells = <1>;
- compatible = "mediatek,mt7621-soc";
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
- device_type = "cpu";
compatible = "mips,mips1004Kc";
reg = <0>;
+ device_type = "cpu";
};
cpu@1 {
- device_type = "cpu";
compatible = "mips,mips1004Kc";
reg = <1>;
+ device_type = "cpu";
};
};
cpuintc: cpuintc {
+ compatible = "mti,cpu-interrupt-controller";
+
#address-cells = <0>;
#interrupt-cells = <1>;
+
interrupt-controller;
- compatible = "mti,cpu-interrupt-controller";
};
mmc_fixed_3v3: regulator-3v3 {
compatible = "regulator-fixed";
- regulator-name = "mmc_power";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
+
enable-active-high;
+
regulator-always-on;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "mmc_power";
};
mmc_fixed_1v8_io: regulator-1v8 {
compatible = "regulator-fixed";
- regulator-name = "mmc_io";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
+
enable-active-high;
+
regulator-always-on;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "mmc_io";
+ };
+
+ pinctrl: pinctrl {
+ compatible = "ralink,mt7621-pinctrl";
+
+ i2c_pins: i2c0-pins {
+ pinmux {
+ groups = "i2c";
+ function = "i2c";
+ };
+ };
+
+ mdio_pins: mdio0-pins {
+ pinmux {
+ groups = "mdio";
+ function = "mdio";
+ };
+ };
+
+ nand_pins: nand0-pins {
+ sdhci-pinmux {
+ groups = "sdhci";
+ function = "nand2";
+ };
+
+ spi-pinmux {
+ groups = "spi";
+ function = "nand1";
+ };
+ };
+
+ pcie_pins: pcie0-pins {
+ pinmux {
+ groups = "pcie";
+ function = "gpio";
+ };
+ };
+
+ rgmii1_pins: rgmii1-pins {
+ pinmux {
+ groups = "rgmii1";
+ function = "rgmii1";
+ };
+ };
+
+ rgmii2_pins: rgmii2-pins {
+ pinmux {
+ groups = "rgmii2";
+ function = "rgmii2";
+ };
+ };
+
+ sdhci_pins: sdhci0-pins {
+ pinmux {
+ groups = "sdhci";
+ function = "sdhci";
+ };
+ };
+
+ spi_pins: spi0-pins {
+ pinmux {
+ groups = "spi";
+ function = "spi";
+ };
+ };
+
+ uart1_pins: uart1-pins {
+ pinmux {
+ groups = "uart1";
+ function = "uart1";
+ };
+ };
+
+ uart2_pins: uart2-pins {
+ pinmux {
+ groups = "uart2";
+ function = "uart2";
+ };
+ };
+
+ uart3_pins: uart3-pins {
+ pinmux {
+ groups = "uart3";
+ function = "uart3";
+ };
+ };
};
palmbus: palmbus@1e000000 {
@@ -62,12 +155,15 @@
sysc: syscon@0 {
compatible = "mediatek,mt7621-sysc", "syscon";
reg = <0x0 0x100>;
+
#clock-cells = <1>;
#reset-cells = <1>;
- ralink,memctl = <&memc>;
+
clock-output-names = "xtal", "cpu", "bus",
"50m", "125m", "150m",
"250m", "270m";
+
+ ralink,memctl = <&memc>;
};
wdt: watchdog@100 {
@@ -77,13 +173,16 @@
};
gpio: gpio@600 {
+ compatible = "mediatek,mt7621-gpio";
+ reg = <0x600 0x100>;
+
#gpio-cells = <2>;
#interrupt-cells = <2>;
- compatible = "mediatek,mt7621-gpio";
+
gpio-controller;
gpio-ranges = <&pinctrl 0 0 95>;
+
interrupt-controller;
- reg = <0x600 0x100>;
interrupt-parent = <&gic>;
interrupts = <GIC_SHARED 12 IRQ_TYPE_LEVEL_HIGH>;
};
@@ -92,18 +191,19 @@
compatible = "mediatek,mt7621-i2c";
reg = <0x900 0x100>;
- clocks = <&sysc MT7621_CLK_I2C>;
- clock-names = "i2c";
- resets = <&sysc MT7621_RST_I2C>;
- reset-names = "i2c";
-
#address-cells = <1>;
#size-cells = <0>;
- status = "disabled";
+ clocks = <&sysc MT7621_CLK_I2C>;
+ clock-names = "i2c";
pinctrl-names = "default";
pinctrl-0 = <&i2c_pins>;
+
+ resets = <&sysc MT7621_RST_I2C>;
+ reset-names = "i2c";
+
+ status = "disabled";
};
memc: memory-controller@5000 {
@@ -170,135 +270,53 @@
};
spi0: spi@b00 {
- status = "disabled";
-
compatible = "ralink,mt7621-spi";
reg = <0xb00 0x100>;
- clocks = <&sysc MT7621_CLK_SPI>;
- clock-names = "spi";
-
- resets = <&sysc MT7621_RST_SPI>;
- reset-names = "spi";
-
#address-cells = <1>;
#size-cells = <0>;
+ clock-names = "spi";
+ clocks = <&sysc MT7621_CLK_SPI>;
+
pinctrl-names = "default";
pinctrl-0 = <&spi_pins>;
- };
- };
-
- pinctrl: pinctrl {
- compatible = "ralink,mt7621-pinctrl";
-
- i2c_pins: i2c0-pins {
- pinmux {
- groups = "i2c";
- function = "i2c";
- };
- };
-
- spi_pins: spi0-pins {
- pinmux {
- groups = "spi";
- function = "spi";
- };
- };
-
- uart1_pins: uart1-pins {
- pinmux {
- groups = "uart1";
- function = "uart1";
- };
- };
-
- uart2_pins: uart2-pins {
- pinmux {
- groups = "uart2";
- function = "uart2";
- };
- };
-
- uart3_pins: uart3-pins {
- pinmux {
- groups = "uart3";
- function = "uart3";
- };
- };
-
- rgmii1_pins: rgmii1-pins {
- pinmux {
- groups = "rgmii1";
- function = "rgmii1";
- };
- };
-
- rgmii2_pins: rgmii2-pins {
- pinmux {
- groups = "rgmii2";
- function = "rgmii2";
- };
- };
-
- mdio_pins: mdio0-pins {
- pinmux {
- groups = "mdio";
- function = "mdio";
- };
- };
-
- pcie_pins: pcie0-pins {
- pinmux {
- groups = "pcie";
- function = "gpio";
- };
- };
-
- nand_pins: nand0-pins {
- spi-pinmux {
- groups = "spi";
- function = "nand1";
- };
- sdhci-pinmux {
- groups = "sdhci";
- function = "nand2";
- };
- };
+ reset-names = "spi";
+ resets = <&sysc MT7621_RST_SPI>;
- sdhci_pins: sdhci0-pins {
- pinmux {
- groups = "sdhci";
- function = "sdhci";
- };
+ status = "disabled";
};
};
mmc: mmc@1e130000 {
- status = "disabled";
-
compatible = "mediatek,mt7620-mmc";
reg = <0x1e130000 0x4000>;
bus-width = <4>;
- max-frequency = <48000000>;
- cap-sd-highspeed;
- cap-mmc-highspeed;
- vmmc-supply = <&mmc_fixed_3v3>;
- vqmmc-supply = <&mmc_fixed_1v8_io>;
- disable-wp;
- pinctrl-names = "default", "state_uhs";
- pinctrl-0 = <&sdhci_pins>;
- pinctrl-1 = <&sdhci_pins>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
clocks = <&sysc MT7621_CLK_SHXC>,
<&sysc MT7621_CLK_50M>;
clock-names = "source", "hclk";
+ disable-wp;
+
interrupt-parent = <&gic>;
interrupts = <GIC_SHARED 20 IRQ_TYPE_LEVEL_HIGH>;
+
+ max-frequency = <48000000>;
+
+ pinctrl-names = "default", "state_uhs";
+ pinctrl-0 = <&sdhci_pins>;
+ pinctrl-1 = <&sdhci_pins>;
+
+ vmmc-supply = <&mmc_fixed_3v3>;
+ vqmmc-supply = <&mmc_fixed_1v8_io>;
+
+ status = "disabled";
};
usb: usb@1e1c0000 {
@@ -321,15 +339,15 @@
compatible = "mti,gic";
reg = <0x1fbc0000 0x2000>;
- interrupt-controller;
#interrupt-cells = <3>;
+ interrupt-controller;
mti,reserved-cpu-vectors = <7>;
timer {
compatible = "mti,gic-timer";
- interrupts = <GIC_LOCAL 1 IRQ_TYPE_NONE>;
clocks = <&sysc MT7621_CLK_CPU>;
+ interrupts = <GIC_LOCAL 1 IRQ_TYPE_NONE>;
};
};
@@ -347,46 +365,22 @@
compatible = "mediatek,mt7621-eth";
reg = <0x1e100000 0x10000>;
- clocks = <&sysc MT7621_CLK_FE>, <&sysc MT7621_CLK_ETH>;
- clock-names = "fe", "ethif";
-
#address-cells = <1>;
#size-cells = <0>;
- resets = <&sysc MT7621_RST_FE>, <&sysc MT7621_RST_ETH>;
- reset-names = "fe", "eth";
+ clock-names = "fe", "ethif";
+ clocks = <&sysc MT7621_CLK_FE>, <&sysc MT7621_CLK_ETH>;
interrupt-parent = <&gic>;
interrupts = <GIC_SHARED 3 IRQ_TYPE_LEVEL_HIGH>;
- mediatek,ethsys = <&sysc>;
-
pinctrl-names = "default";
pinctrl-0 = <&mdio_pins>, <&rgmii1_pins>, <&rgmii2_pins>;
- gmac0: mac@0 {
- compatible = "mediatek,eth-mac";
- reg = <0>;
- phy-mode = "trgmii";
-
- fixed-link {
- speed = <1000>;
- full-duplex;
- pause;
- };
- };
-
- gmac1: mac@1 {
- compatible = "mediatek,eth-mac";
- reg = <1>;
- phy-mode = "rgmii";
+ reset-names = "fe", "eth";
+ resets = <&sysc MT7621_RST_FE>, <&sysc MT7621_RST_ETH>;
- fixed-link {
- speed = <1000>;
- full-duplex;
- pause;
- };
- };
+ mediatek,ethsys = <&sysc>;
mdio: mdio-bus {
#address-cells = <1>;
@@ -395,73 +389,105 @@
switch0: switch@1f {
compatible = "mediatek,mt7621";
reg = <0x1f>;
- mediatek,mcm;
- resets = <&sysc MT7621_RST_MCM>;
- reset-names = "mcm";
- interrupt-controller;
+
#interrupt-cells = <1>;
+ interrupt-controller;
interrupts = <GIC_SHARED 23 IRQ_TYPE_LEVEL_HIGH>;
+ reset-names = "mcm";
+ resets = <&sysc MT7621_RST_MCM>;
+
+ mediatek,mcm;
+
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
- status = "disabled";
reg = <0>;
label = "swp0";
+ status = "disabled";
};
port@1 {
- status = "disabled";
reg = <1>;
label = "swp1";
+ status = "disabled";
};
port@2 {
- status = "disabled";
reg = <2>;
label = "swp2";
+ status = "disabled";
};
port@3 {
- status = "disabled";
reg = <3>;
label = "swp3";
+ status = "disabled";
};
port@4 {
- status = "disabled";
reg = <4>;
label = "swp4";
+ status = "disabled";
};
port@5 {
reg = <5>;
+
ethernet = <&gmac1>;
phy-mode = "rgmii";
fixed-link {
- speed = <1000>;
full-duplex;
pause;
+ speed = <1000>;
};
};
port@6 {
reg = <6>;
+
ethernet = <&gmac0>;
phy-mode = "trgmii";
fixed-link {
- speed = <1000>;
full-duplex;
pause;
+ speed = <1000>;
};
};
};
};
};
+
+ gmac0: mac@0 {
+ compatible = "mediatek,eth-mac";
+ reg = <0>;
+
+ phy-mode = "trgmii";
+
+ fixed-link {
+ full-duplex;
+ pause;
+ speed = <1000>;
+ };
+ };
+
+ gmac1: mac@1 {
+ compatible = "mediatek,eth-mac";
+ reg = <1>;
+
+ phy-mode = "rgmii";
+
+ fixed-link {
+ full-duplex;
+ pause;
+ speed = <1000>;
+ };
+ };
+
};
pcie: pcie@1e140000 {
@@ -470,84 +496,106 @@
<0x1e142000 0x100>, /* pcie port 0 RC control registers */
<0x1e143000 0x100>, /* pcie port 1 RC control registers */
<0x1e144000 0x100>; /* pcie port 2 RC control registers */
+ ranges = <0x02000000 0 0x60000000 0x60000000 0 0x10000000>, /* pci memory */
+ <0x01000000 0 0x00000000 0x1e160000 0 0x00010000>; /* io space */
+
#address-cells = <3>;
+ #interrupt-cells = <1>;
#size-cells = <2>;
- pinctrl-names = "default";
- pinctrl-0 = <&pcie_pins>;
-
device_type = "pci";
- ranges = <0x02000000 0 0x60000000 0x60000000 0 0x10000000>, /* pci memory */
- <0x01000000 0 0x00000000 0x1e160000 0 0x00010000>; /* io space */
-
- #interrupt-cells = <1>;
- interrupt-map-mask = <0xF800 0 0 0>;
- interrupt-map = <0x0000 0 0 0 &gic GIC_SHARED 4 IRQ_TYPE_LEVEL_HIGH>,
+ interrupt-map-mask = <0xf800 0 0 0>;
+ interrupt-map = <0x0000 0 0 0 &gic GIC_SHARED 4 IRQ_TYPE_LEVEL_HIGH>,
<0x0800 0 0 0 &gic GIC_SHARED 24 IRQ_TYPE_LEVEL_HIGH>,
<0x1000 0 0 0 &gic GIC_SHARED 25 IRQ_TYPE_LEVEL_HIGH>;
- status = "disabled";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_pins>;
reset-gpios = <&gpio 19 GPIO_ACTIVE_LOW>;
+ status = "disabled";
+
pcie@0,0 {
reg = <0x0000 0 0 0 0>;
+ ranges;
+
#address-cells = <3>;
+ #interrupt-cells = <1>;
#size-cells = <2>;
+
+ clocks = <&sysc MT7621_CLK_PCIE0>;
+
device_type = "pci";
- #interrupt-cells = <1>;
+
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &gic GIC_SHARED 4 IRQ_TYPE_LEVEL_HIGH>;
- resets = <&sysc MT7621_RST_PCIE0>;
- clocks = <&sysc MT7621_CLK_PCIE0>;
- phys = <&pcie0_phy 1>;
+
phy-names = "pcie-phy0";
- ranges;
+ phys = <&pcie0_phy 1>;
+
+ resets = <&sysc MT7621_RST_PCIE0>;
};
pcie@1,0 {
reg = <0x0800 0 0 0 0>;
+ ranges;
+
#address-cells = <3>;
+ #interrupt-cells = <1>;
#size-cells = <2>;
+
+ clocks = <&sysc MT7621_CLK_PCIE1>;
+
device_type = "pci";
- #interrupt-cells = <1>;
+
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &gic GIC_SHARED 24 IRQ_TYPE_LEVEL_HIGH>;
- resets = <&sysc MT7621_RST_PCIE1>;
- clocks = <&sysc MT7621_CLK_PCIE1>;
- phys = <&pcie0_phy 1>;
+
phy-names = "pcie-phy1";
- ranges;
+ phys = <&pcie0_phy 1>;
+
+ resets = <&sysc MT7621_RST_PCIE1>;
};
pcie@2,0 {
reg = <0x1000 0 0 0 0>;
+ ranges;
+
#address-cells = <3>;
+ #interrupt-cells = <1>;
#size-cells = <2>;
+
+ clocks = <&sysc MT7621_CLK_PCIE2>;
+
device_type = "pci";
- #interrupt-cells = <1>;
+
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &gic GIC_SHARED 25 IRQ_TYPE_LEVEL_HIGH>;
- resets = <&sysc MT7621_RST_PCIE2>;
- clocks = <&sysc MT7621_CLK_PCIE2>;
- phys = <&pcie2_phy 0>;
+
phy-names = "pcie-phy2";
- ranges;
+ phys = <&pcie2_phy 0>;
+
+ resets = <&sysc MT7621_RST_PCIE2>;
};
};
pcie0_phy: pcie-phy@1e149000 {
compatible = "mediatek,mt7621-pci-phy";
reg = <0x1e149000 0x0700>;
- clocks = <&sysc MT7621_CLK_XTAL>;
+
#phy-cells = <1>;
+
+ clocks = <&sysc MT7621_CLK_XTAL>;
};
pcie2_phy: pcie-phy@1e14a000 {
compatible = "mediatek,mt7621-pci-phy";
reg = <0x1e14a000 0x0700>;
- clocks = <&sysc MT7621_CLK_XTAL>;
+
#phy-cells = <1>;
+
+ clocks = <&sysc MT7621_CLK_XTAL>;
};
};
diff --git a/arch/mips/configs/rs90_defconfig b/arch/mips/configs/rs90_defconfig
index 4b9e36d6400e..a53dd66e9b86 100644
--- a/arch/mips/configs/rs90_defconfig
+++ b/arch/mips/configs/rs90_defconfig
@@ -9,7 +9,7 @@ CONFIG_LD_DEAD_CODE_DATA_ELIMINATION=y
# CONFIG_SGETMASK_SYSCALL is not set
# CONFIG_SYSFS_SYSCALL is not set
# CONFIG_ELF_CORE is not set
-# CONFIG_BASE_FULL is not set
+CONFIG_BASE_SMALL=y
# CONFIG_TIMERFD is not set
# CONFIG_AIO is not set
# CONFIG_IO_URING is not set
diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c
index 6c3704f51d0d..87f0a1436bf9 100644
--- a/arch/mips/dec/setup.c
+++ b/arch/mips/dec/setup.c
@@ -756,7 +756,7 @@ void __init arch_init_irq(void)
NULL))
pr_err("Failed to register fpu interrupt\n");
desc_fpu = irq_to_desc(irq_fpu);
- fpu_kstat_irq = this_cpu_ptr(desc_fpu->kstat_irqs);
+ fpu_kstat_irq = this_cpu_ptr(&desc_fpu->kstat_irqs->cnt);
}
if (dec_interrupt[DEC_IRQ_CASCADE] >= 0) {
if (request_irq(dec_interrupt[DEC_IRQ_CASCADE], no_action,
diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h
index 2e99450f4228..87ff609b53fe 100644
--- a/arch/mips/include/asm/asm.h
+++ b/arch/mips/include/asm/asm.h
@@ -37,6 +37,7 @@
#define CFI_SECTIONS
#endif
+#ifdef __ASSEMBLY__
/*
* LEAF - declare leaf routine
*/
@@ -122,6 +123,8 @@ symbol = value
#define ASM_PRINT(string)
#endif
+#endif /* __ASSEMBLY__ */
+
/*
* Stack alignment
*/
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index 0e196650f4f4..92b7591aac2a 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -129,7 +129,7 @@ static inline int pmd_none(pmd_t pmd)
static inline int pmd_bad(pmd_t pmd)
{
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
- /* pmd_huge(pmd) but inline */
+ /* pmd_leaf(pmd) but inline */
if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
return 0;
#endif
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 20ca48c1b606..401c1d9e4409 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -147,8 +147,8 @@
#if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \
VMALLOC_START != CKSSEG
/* Load modules into 32bit-compatible segment. */
-#define MODULE_START CKSSEG
-#define MODULE_END (FIXADDR_START-2*PAGE_SIZE)
+#define MODULES_VADDR CKSSEG
+#define MODULES_END (FIXADDR_START-2*PAGE_SIZE)
#endif
#define pte_ERROR(e) \
@@ -245,7 +245,7 @@ static inline int pmd_none(pmd_t pmd)
static inline int pmd_bad(pmd_t pmd)
{
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
- /* pmd_huge(pmd) but inline */
+ /* pmd_leaf(pmd) but inline */
if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
return 0;
#endif
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index d14d0e37ad02..4a2b40ce39e0 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -159,7 +159,7 @@ extern unsigned long exception_ip(struct pt_regs *regs);
#define exception_ip(regs) exception_ip(regs)
#define profile_pc(regs) instruction_pointer(regs)
-extern asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall);
+extern asmlinkage long syscall_trace_enter(struct pt_regs *regs);
extern asmlinkage void syscall_trace_leave(struct pt_regs *regs);
extern void die(const char *, struct pt_regs *) __noreturn;
diff --git a/arch/mips/include/asm/setup.h b/arch/mips/include/asm/setup.h
index 4dce41138bad..d8077136372c 100644
--- a/arch/mips/include/asm/setup.h
+++ b/arch/mips/include/asm/setup.h
@@ -2,6 +2,7 @@
#ifndef _MIPS_SETUP_H
#define _MIPS_SETUP_H
+#include <linux/init.h>
#include <linux/types.h>
#include <uapi/asm/setup.h>
@@ -29,4 +30,9 @@ extern void per_cpu_trap_init(bool);
extern void cpu_cache_init(void);
extern void tlb_init(void);
+#ifdef CONFIG_RELOCATABLE
+extern void * __init relocate_kernel(void);
+extern int plat_post_relocation(long);
+#endif
+
#endif /* __SETUP_H */
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index a8705aef47e1..a13431379073 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -308,17 +308,12 @@
jal octeon_mult_restore
#endif
#ifdef CONFIG_CPU_HAS_SMARTMIPS
- LONG_L $24, PT_ACX(sp)
- mtlhx $24
- LONG_L $24, PT_HI(sp)
- mtlhx $24
+ LONG_L $14, PT_ACX(sp)
LONG_L $24, PT_LO(sp)
- mtlhx $24
+ LONG_L $15, PT_HI(sp)
#elif !defined(CONFIG_CPU_MIPSR6)
LONG_L $24, PT_LO(sp)
- mtlo $24
- LONG_L $24, PT_HI(sp)
- mthi $24
+ LONG_L $15, PT_HI(sp)
#endif
#ifdef CONFIG_32BIT
cfi_ld $8, PT_R8, \docfi
@@ -327,6 +322,14 @@
cfi_ld $10, PT_R10, \docfi
cfi_ld $11, PT_R11, \docfi
cfi_ld $12, PT_R12, \docfi
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ mtlhx $14
+ mtlhx $15
+ mtlhx $24
+#elif !defined(CONFIG_CPU_MIPSR6)
+ mtlo $24
+ mthi $15
+#endif
cfi_ld $13, PT_R13, \docfi
cfi_ld $14, PT_R14, \docfi
cfi_ld $15, PT_R15, \docfi
diff --git a/arch/mips/include/asm/fb.h b/arch/mips/include/asm/video.h
index d98d6681d64e..007c106d980f 100644
--- a/arch/mips/include/asm/fb.h
+++ b/arch/mips/include/asm/video.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_FB_H_
-#define _ASM_FB_H_
+#ifndef _ASM_VIDEO_H_
+#define _ASM_VIDEO_H_
#include <asm/page.h>
@@ -13,8 +13,8 @@ static inline pgprot_t pgprot_framebuffer(pgprot_t prot,
/*
* MIPS doesn't define __raw_ I/O macros, so the helpers
- * in <asm-generic/fb.h> don't generate fb_readq() and
- * fb_write(). We have to provide them here.
+ * in <asm-generic/video.h> don't generate fb_readq() and
+ * fb_writeq(). We have to provide them here.
*
* TODO: Convert MIPS to generic I/O. The helpers below can
* then be removed.
@@ -33,6 +33,6 @@ static inline void fb_writeq(u64 b, volatile void __iomem *addr)
#define fb_writeq fb_writeq
#endif
-#include <asm-generic/fb.h>
+#include <asm-generic/video.h>
-#endif /* _ASM_FB_H_ */
+#endif /* _ASM_VIDEO_H_ */
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index eabddb89d221..c97b089b9902 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -617,7 +617,7 @@ const struct dma_map_ops jazz_dma_ops = {
.sync_sg_for_device = jazz_dma_sync_sg_for_device,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
- .alloc_pages = dma_common_alloc_pages,
+ .alloc_pages_op = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
};
EXPORT_SYMBOL(jazz_dma_ops);
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index d1b11f66f748..cb1045ebab06 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -101,6 +101,7 @@ void output_thread_info_defines(void)
OFFSET(TI_CPU, thread_info, cpu);
OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
OFFSET(TI_REGS, thread_info, regs);
+ OFFSET(TI_SYSCALL, thread_info, syscall);
DEFINE(_THREAD_SIZE, THREAD_SIZE);
DEFINE(_THREAD_MASK, THREAD_MASK);
DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index 7b2fbaa9cac5..ba0f62d8eff5 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -13,7 +13,6 @@
#include <linux/elf.h>
#include <linux/mm.h>
#include <linux/numa.h>
-#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/string.h>
@@ -31,15 +30,6 @@ struct mips_hi16 {
static LIST_HEAD(dbe_list);
static DEFINE_SPINLOCK(dbe_lock);
-#ifdef MODULE_START
-void *module_alloc(unsigned long size)
-{
- return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
- GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
- __builtin_return_address(0));
-}
-#endif
-
static void apply_r_mips_32(u32 *location, u32 base, Elf_Addr v)
{
*location = base + v;
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 59288c13b581..61503a36067e 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -1317,16 +1317,13 @@ long arch_ptrace(struct task_struct *child, long request,
* Notification of system call entry/exit
* - triggered by current->work.syscall_trace
*/
-asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
+asmlinkage long syscall_trace_enter(struct pt_regs *regs)
{
user_exit();
- current_thread_info()->syscall = syscall;
-
if (test_thread_flag(TIF_SYSCALL_TRACE)) {
if (ptrace_report_syscall_entry(regs))
return -1;
- syscall = current_thread_info()->syscall;
}
#ifdef CONFIG_SECCOMP
@@ -1335,7 +1332,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
struct seccomp_data sd;
unsigned long args[6];
- sd.nr = syscall;
+ sd.nr = current_thread_info()->syscall;
sd.arch = syscall_get_arch(current);
syscall_get_arguments(current, regs, args);
for (i = 0; i < 6; i++)
@@ -1345,23 +1342,23 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
ret = __secure_computing(&sd);
if (ret == -1)
return ret;
- syscall = current_thread_info()->syscall;
}
#endif
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->regs[2]);
- audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
+ audit_syscall_entry(current_thread_info()->syscall,
+ regs->regs[4], regs->regs[5],
regs->regs[6], regs->regs[7]);
/*
* Negative syscall numbers are mistaken for rejected syscalls, but
* won't have had the return value set appropriately, so we do so now.
*/
- if (syscall < 0)
+ if (current_thread_info()->syscall < 0)
syscall_set_return_value(current, regs, -ENOSYS, 0);
- return syscall;
+ return current_thread_info()->syscall;
}
/*
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 18dc9b345056..2c604717e630 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -77,6 +77,18 @@ loads_done:
PTR_WD load_a7, bad_stack_a7
.previous
+ /*
+ * syscall number is in v0 unless we called syscall(__NR_###)
+ * where the real syscall number is in a0
+ */
+ subu t2, v0, __NR_O32_Linux
+ bnez t2, 1f /* __NR_syscall at offset 0 */
+ LONG_S a0, TI_SYSCALL($28) # Save a0 as syscall number
+ b 2f
+1:
+ LONG_S v0, TI_SYSCALL($28) # Save v0 as syscall number
+2:
+
lw t0, TI_FLAGS($28) # syscall tracing enabled?
li t1, _TIF_WORK_SYSCALL_ENTRY
and t0, t1
@@ -114,16 +126,7 @@ syscall_trace_entry:
SAVE_STATIC
move a0, sp
- /*
- * syscall number is in v0 unless we called syscall(__NR_###)
- * where the real syscall number is in a0
- */
- move a1, v0
- subu t2, v0, __NR_O32_Linux
- bnez t2, 1f /* __NR_syscall at offset 0 */
- lw a1, PT_R4(sp)
-
-1: jal syscall_trace_enter
+ jal syscall_trace_enter
bltz v0, 1f # seccomp failed? Skip syscall
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 97456b2ca7dc..97788859238c 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -44,6 +44,8 @@ NESTED(handle_sysn32, PT_SIZE, sp)
sd a3, PT_R26(sp) # save a3 for syscall restarting
+ LONG_S v0, TI_SYSCALL($28) # Store syscall number
+
li t1, _TIF_WORK_SYSCALL_ENTRY
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
and t0, t1, t0
@@ -72,7 +74,6 @@ syscall_common:
n32_syscall_trace_entry:
SAVE_STATIC
move a0, sp
- move a1, v0
jal syscall_trace_enter
bltz v0, 1f # seccomp failed? Skip syscall
diff --git a/arch/mips/kernel/scall64-n64.S b/arch/mips/kernel/scall64-n64.S
index e6264aa62e45..be11ea5cc67e 100644
--- a/arch/mips/kernel/scall64-n64.S
+++ b/arch/mips/kernel/scall64-n64.S
@@ -46,6 +46,8 @@ NESTED(handle_sys64, PT_SIZE, sp)
sd a3, PT_R26(sp) # save a3 for syscall restarting
+ LONG_S v0, TI_SYSCALL($28) # Store syscall number
+
li t1, _TIF_WORK_SYSCALL_ENTRY
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
and t0, t1, t0
@@ -82,7 +84,6 @@ n64_syscall_exit:
syscall_trace_entry:
SAVE_STATIC
move a0, sp
- move a1, v0
jal syscall_trace_enter
bltz v0, 1f # seccomp failed? Skip syscall
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index d3c2616cba22..7a5abb73e531 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -79,6 +79,22 @@ loads_done:
PTR_WD load_a7, bad_stack_a7
.previous
+ /*
+ * absolute syscall number is in v0 unless we called syscall(__NR_###)
+ * where the real syscall number is in a0
+ * note: NR_syscall is the first O32 syscall but the macro is
+ * only defined when compiling with -mabi=32 (CONFIG_32BIT)
+ * therefore __NR_O32_Linux is used (4000)
+ */
+
+ subu t2, v0, __NR_O32_Linux
+ bnez t2, 1f /* __NR_syscall at offset 0 */
+ LONG_S a0, TI_SYSCALL($28) # Save a0 as syscall number
+ b 2f
+1:
+ LONG_S v0, TI_SYSCALL($28) # Save v0 as syscall number
+2:
+
li t1, _TIF_WORK_SYSCALL_ENTRY
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
and t0, t1, t0
@@ -113,22 +129,7 @@ trace_a_syscall:
sd a7, PT_R11(sp) # For indirect syscalls
move a0, sp
- /*
- * absolute syscall number is in v0 unless we called syscall(__NR_###)
- * where the real syscall number is in a0
- * note: NR_syscall is the first O32 syscall but the macro is
- * only defined when compiling with -mabi=32 (CONFIG_32BIT)
- * therefore __NR_O32_Linux is used (4000)
- */
- .set push
- .set reorder
- subu t1, v0, __NR_O32_Linux
- move a1, v0
- bnez t1, 1f /* __NR_syscall at offset 0 */
- ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
- .set pop
-
-1: jal syscall_trace_enter
+ jal syscall_trace_enter
bltz v0, 1f # seccomp failed? Skip syscall
diff --git a/arch/mips/kernel/syscalls/Makefile b/arch/mips/kernel/syscalls/Makefile
index e6b21de65cca..56f6f093bb88 100644
--- a/arch/mips/kernel/syscalls/Makefile
+++ b/arch/mips/kernel/syscalls/Makefile
@@ -5,7 +5,7 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm
$(shell mkdir -p $(uapi) $(kapi))
syshdr := $(srctree)/scripts/syscallhdr.sh
-sysnr := $(srctree)/$(src)/syscallnr.sh
+sysnr := $(src)/syscallnr.sh
systbl := $(srctree)/scripts/syscalltbl.sh
quiet_cmd_syshdr = SYSHDR $@
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 467ee6b95ae1..c17157e700c0 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -444,36 +444,6 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
return true;
}
-bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
-{
- gpa_t gpa = range->start << PAGE_SHIFT;
- pte_t hva_pte = range->arg.pte;
- pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
- pte_t old_pte;
-
- if (!gpa_pte)
- return false;
-
- /* Mapping may need adjusting depending on memslot flags */
- old_pte = *gpa_pte;
- if (range->slot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte))
- hva_pte = pte_mkclean(hva_pte);
- else if (range->slot->flags & KVM_MEM_READONLY)
- hva_pte = pte_wrprotect(hva_pte);
-
- set_pte(gpa_pte, hva_pte);
-
- /* Replacing an absent or old page doesn't need flushes */
- if (!pte_present(old_pte) || !pte_young(old_pte))
- return false;
-
- /* Pages swapped, aged, moved, or cleaned require flushes */
- return !pte_present(hva_pte) ||
- !pte_young(hva_pte) ||
- pte_pfn(old_pte) != pte_pfn(hva_pte) ||
- (pte_dirty(old_pte) && !pte_dirty(hva_pte));
-}
-
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
return kvm_mips_mkold_gpa_pt(kvm, range->start, range->end);
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index 0f3cec663a12..ab4f2a75a7d0 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -137,8 +137,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
#endif
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- bool coherent)
+void arch_setup_dma_ops(struct device *dev, bool coherent)
{
dev->dma_coherent = coherent;
}
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index aaa9a242ebba..37fedeaca2e9 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -83,8 +83,8 @@ static void __do_page_fault(struct pt_regs *regs, unsigned long write,
if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
goto VMALLOC_FAULT_TARGET;
-#ifdef MODULE_START
- if (unlikely(address >= MODULE_START && address < MODULE_END))
+#ifdef MODULES_VADDR
+ if (unlikely(address >= MODULES_VADDR && address < MODULES_END))
goto VMALLOC_FAULT_TARGET;
#endif
diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c
index 7eaff5b07873..0b9e15555b59 100644
--- a/arch/mips/mm/hugetlbpage.c
+++ b/arch/mips/mm/hugetlbpage.c
@@ -57,13 +57,3 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
}
return (pte_t *) pmd;
}
-
-int pmd_huge(pmd_t pmd)
-{
- return (pmd_val(pmd) & _PAGE_HUGE) != 0;
-}
-
-int pud_huge(pud_t pud)
-{
- return (pud_val(pud) & _PAGE_HUGE) != 0;
-}
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 39f129205b0c..4583d1a2a73e 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -31,6 +31,7 @@
#include <linux/gfp.h>
#include <linux/kcore.h>
#include <linux/initrd.h>
+#include <linux/execmem.h>
#include <asm/bootinfo.h>
#include <asm/cachectl.h>
@@ -576,3 +577,25 @@ EXPORT_SYMBOL_GPL(invalid_pmd_table);
#endif
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
EXPORT_SYMBOL(invalid_pte_table);
+
+#ifdef CONFIG_EXECMEM
+#ifdef MODULES_VADDR
+static struct execmem_info execmem_info __ro_after_init;
+
+struct execmem_info __init *execmem_arch_setup(void)
+{
+ execmem_info = (struct execmem_info){
+ .ranges = {
+ [EXECMEM_DEFAULT] = {
+ .start = MODULES_VADDR,
+ .end = MODULES_END,
+ .pgprot = PAGE_KERNEL,
+ .alignment = 1,
+ },
+ },
+ };
+
+ return &execmem_info;
+}
+#endif
+#endif /* CONFIG_EXECMEM */
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 00fe90c6db3e..7e11d7b58761 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -34,7 +34,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
struct vm_area_struct *vma;
unsigned long addr = addr0;
int do_color_align;
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
if (unlikely(len > TASK_SIZE))
return -ENOMEM;
@@ -92,7 +92,6 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
*/
}
- info.flags = 0;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
return vm_unmapped_area(&info);
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 4106084e57d7..76f3b9c0a9f0 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -326,7 +326,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
idx = read_c0_index();
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/* this could be a huge page */
- if (pmd_huge(*pmdp)) {
+ if (pmd_leaf(*pmdp)) {
unsigned long lo;
write_c0_pagemask(PM_HUGE_MASK);
ptep = (pte_t *)pmdp;
diff --git a/arch/mips/net/bpf_jit_comp.c b/arch/mips/net/bpf_jit_comp.c
index a40d926b6513..e355dfca4400 100644
--- a/arch/mips/net/bpf_jit_comp.c
+++ b/arch/mips/net/bpf_jit_comp.c
@@ -1012,7 +1012,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
bpf_prog_fill_jited_linfo(prog, &ctx.descriptors[1]);
/* Set as read-only exec and flush instruction cache */
- bpf_jit_binary_lock_ro(header);
+ if (bpf_jit_binary_lock_ro(header))
+ goto out_err;
flush_icache_range((unsigned long)header,
(unsigned long)&ctx.target[ctx.jit_index]);
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
index 2583e318e8c6..b080c7c6cc46 100644..100755
--- a/arch/mips/pci/pcie-octeon.c
+++ b/arch/mips/pci/pcie-octeon.c
@@ -230,12 +230,18 @@ static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus,
{
union cvmx_pcie_address pcie_addr;
union cvmx_pciercx_cfg006 pciercx_cfg006;
+ union cvmx_pciercx_cfg032 pciercx_cfg032;
pciercx_cfg006.u32 =
cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port));
if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0))
return 0;
+ pciercx_cfg032.u32 =
+ cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
+ if ((pciercx_cfg032.s.dlla == 0) || (pciercx_cfg032.s.lt == 1))
+ return 0;
+
pcie_addr.u64 = 0;
pcie_addr.config.upper = 2;
pcie_addr.config.io = 1;
diff --git a/arch/mips/rb532/gpio.c b/arch/mips/rb532/gpio.c
index 29c21b9d42da..ea6ebfea4a67 100644
--- a/arch/mips/rb532/gpio.c
+++ b/arch/mips/rb532/gpio.c
@@ -197,7 +197,7 @@ void rb532_gpio_set_func(unsigned gpio)
}
EXPORT_SYMBOL(rb532_gpio_set_func);
-int __init rb532_gpio_init(void)
+static int __init rb532_gpio_init(void)
{
struct resource *r;
diff --git a/arch/mips/rb532/prom.c b/arch/mips/rb532/prom.c
index b11693715547..b88e89ec5894 100644
--- a/arch/mips/rb532/prom.c
+++ b/arch/mips/rb532/prom.c
@@ -46,7 +46,7 @@ static inline unsigned long tag2ul(char *arg, const char *tag)
return simple_strtoul(num, 0, 10);
}
-void __init prom_setup_cmdline(void)
+static void __init prom_setup_cmdline(void)
{
static char cmd_line[COMMAND_LINE_SIZE] __initdata;
char *cp, *board;
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 8f5299b269e7..00e63e9ef61d 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -277,7 +277,6 @@ void __init arch_init_irq(void)
{
struct irq_domain *domain;
struct fwnode_handle *fn;
- int i;
mips_cpu_irq_init();
@@ -286,20 +285,16 @@ void __init arch_init_irq(void)
* Mark these as reserved right away so they won't be used accidentally
* later.
*/
- for (i = 0; i <= CPU_CALL_B_IRQ; i++)
- set_bit(i, hub_irq_map);
-
- for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++)
- set_bit(i, hub_irq_map);
+ bitmap_set(hub_irq_map, 0, CPU_CALL_B_IRQ + 1);
+ bitmap_set(hub_irq_map, NI_BRDCAST_ERR_A, MSC_PANIC_INTR - NI_BRDCAST_ERR_A + 1);
fn = irq_domain_alloc_named_fwnode("HUB");
- WARN_ON(fn == NULL);
- if (!fn)
+ if (WARN_ON(fn == NULL))
return;
+
domain = irq_domain_create_linear(fn, IP27_HUB_IRQ_COUNT,
&hub_domain_ops, NULL);
- WARN_ON(domain == NULL);
- if (!domain)
+ if (WARN_ON(domain == NULL))
return;
irq_set_default_host(domain);
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index eb56581f6d73..b289b2c1b294 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -1,9 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
# Objects to go into the VDSO.
-# Sanitizer runtimes are unavailable and cannot be linked here.
- KCSAN_SANITIZE := n
-
# Include the generic Makefile to check the built vdso.
include $(srctree)/lib/vdso/Makefile
@@ -43,8 +40,8 @@ CFLAGS_vgettimeofday.o = -include $(c-gettimeofday-y)
# config-n32-o32-env.c prepares the environment to build a 32bit vDSO
# library on a 64bit kernel.
# Note: Needs to be included before than the generic library.
-CFLAGS_vgettimeofday-o32.o = -include $(srctree)/$(src)/config-n32-o32-env.c -include $(c-gettimeofday-y)
-CFLAGS_vgettimeofday-n32.o = -include $(srctree)/$(src)/config-n32-o32-env.c -include $(c-gettimeofday-y)
+CFLAGS_vgettimeofday-o32.o = -include $(src)/config-n32-o32-env.c -include $(c-gettimeofday-y)
+CFLAGS_vgettimeofday-n32.o = -include $(src)/config-n32-o32-env.c -include $(c-gettimeofday-y)
endif
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE)
@@ -60,10 +57,6 @@ ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \
CFLAGS_REMOVE_vdso.o = $(CC_FLAGS_FTRACE)
-GCOV_PROFILE := n
-UBSAN_SANITIZE := n
-KCOV_INSTRUMENT := n
-
# Check that we don't have PIC 'jalr t9' calls left
quiet_cmd_vdso_mips_check = VDSOCHK $@
cmd_vdso_mips_check = if $(OBJDUMP) --disassemble $@ | grep -E -h "jalr.*t9" > /dev/null; \
diff --git a/arch/nios2/boot/dts/Makefile b/arch/nios2/boot/dts/Makefile
index e9e31bb40df8..1a2e8996bec7 100644
--- a/arch/nios2/boot/dts/Makefile
+++ b/arch/nios2/boot/dts/Makefile
@@ -2,5 +2,4 @@
obj-y := $(patsubst %.dts,%.dtb.o,$(CONFIG_NIOS2_DTB_SOURCE))
-dtstree := $(srctree)/$(src)
-dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
+dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(src)/%.dts,%.dtb, $(wildcard $(src)/*.dts))
diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h
index d052dfcbe8d3..eab87c6beacb 100644
--- a/arch/nios2/include/asm/pgtable.h
+++ b/arch/nios2/include/asm/pgtable.h
@@ -25,7 +25,10 @@
#include <asm-generic/pgtable-nopmd.h>
#define VMALLOC_START CONFIG_NIOS2_KERNEL_MMU_REGION_BASE
-#define VMALLOC_END (CONFIG_NIOS2_KERNEL_REGION_BASE - 1)
+#define VMALLOC_END (CONFIG_NIOS2_KERNEL_REGION_BASE - SZ_32M - 1)
+
+#define MODULES_VADDR (CONFIG_NIOS2_KERNEL_REGION_BASE - SZ_32M)
+#define MODULES_END (CONFIG_NIOS2_KERNEL_REGION_BASE - 1)
struct mm_struct;
diff --git a/arch/nios2/kernel/module.c b/arch/nios2/kernel/module.c
index 76e0a42d6e36..f4483243578d 100644
--- a/arch/nios2/kernel/module.c
+++ b/arch/nios2/kernel/module.c
@@ -13,7 +13,6 @@
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/mm.h>
-#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/string.h>
@@ -21,25 +20,6 @@
#include <asm/cacheflush.h>
-/*
- * Modules should NOT be allocated with kmalloc for (obvious) reasons.
- * But we do it for now to avoid relocation issues. CALL26/PCREL26 cannot reach
- * from 0x80000000 (vmalloc area) to 0xc00000000 (kernel) (kmalloc returns
- * addresses in 0xc0000000)
- */
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
- return kmalloc(size, GFP_KERNEL);
-}
-
-/* Free memory returned from module_alloc */
-void module_memfree(void *module_region)
-{
- kfree(module_region);
-}
-
int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *mod)
diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c
index 8d98af5c7201..9a8393e6b4a8 100644
--- a/arch/nios2/kernel/prom.c
+++ b/arch/nios2/kernel/prom.c
@@ -21,7 +21,8 @@
void __init early_init_devtree(void *params)
{
- __be32 *dtb = (u32 *)__dtb_start;
+ __be32 __maybe_unused *dtb = (u32 *)__dtb_start;
+
#if defined(CONFIG_NIOS2_DTB_AT_PHYS_ADDR)
if (be32_to_cpup((__be32 *)CONFIG_NIOS2_DTB_PHYS_ADDR) ==
OF_DT_HEADER) {
@@ -30,8 +31,11 @@ void __init early_init_devtree(void *params)
return;
}
#endif
+
+#ifdef CONFIG_NIOS2_DTB_SOURCE_BOOL
if (be32_to_cpu((__be32) *dtb) == OF_DT_HEADER)
params = (void *)__dtb_start;
+#endif
early_init_dt_scan(params);
}
diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c
index 7bc82ee889c9..3459df28afee 100644
--- a/arch/nios2/mm/init.c
+++ b/arch/nios2/mm/init.c
@@ -26,6 +26,7 @@
#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/binfmts.h>
+#include <linux/execmem.h>
#include <asm/setup.h>
#include <asm/page.h>
@@ -143,3 +144,23 @@ static const pgprot_t protection_map[16] = {
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = MKP(1, 1, 1)
};
DECLARE_VM_GET_PAGE_PROT
+
+#ifdef CONFIG_EXECMEM
+static struct execmem_info execmem_info __ro_after_init;
+
+struct execmem_info __init *execmem_arch_setup(void)
+{
+ execmem_info = (struct execmem_info){
+ .ranges = {
+ [EXECMEM_DEFAULT] = {
+ .start = MODULES_VADDR,
+ .end = MODULES_END,
+ .pgprot = PAGE_KERNEL_EXEC,
+ .alignment = 1,
+ },
+ },
+ };
+
+ return &execmem_info;
+}
+#endif /* CONFIG_EXECMEM */
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 3586cda55bde..69c0258700b2 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -188,6 +188,15 @@ config SMP
If you don't know what to do here, say N.
+config FPU
+ bool "FPU support"
+ default y
+ help
+ Say N here if you want to disable all floating-point related procedures
+ in the kernel and reduce binary size.
+
+ If you don't know what to do here, say Y.
+
source "kernel/Kconfig.hz"
config OPENRISC_NO_SPR_SR_DSX
diff --git a/arch/openrisc/include/asm/fpu.h b/arch/openrisc/include/asm/fpu.h
new file mode 100644
index 000000000000..57bc44d80d53
--- /dev/null
+++ b/arch/openrisc/include/asm/fpu.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_OPENRISC_FPU_H
+#define __ASM_OPENRISC_FPU_H
+
+struct task_struct;
+
+#ifdef CONFIG_FPU
+static inline void save_fpu(struct task_struct *task)
+{
+ task->thread.fpcsr = mfspr(SPR_FPCSR);
+}
+
+static inline void restore_fpu(struct task_struct *task)
+{
+ mtspr(SPR_FPCSR, task->thread.fpcsr);
+}
+#else
+#define save_fpu(tsk) do { } while (0)
+#define restore_fpu(tsk) do { } while (0)
+#endif
+
+#endif /* __ASM_OPENRISC_FPU_H */
diff --git a/arch/openrisc/include/asm/processor.h b/arch/openrisc/include/asm/processor.h
index 3b736e74e6ed..e05d1b59e24e 100644
--- a/arch/openrisc/include/asm/processor.h
+++ b/arch/openrisc/include/asm/processor.h
@@ -44,6 +44,7 @@
struct task_struct;
struct thread_struct {
+ long fpcsr; /* Floating point control status register. */
};
/*
diff --git a/arch/openrisc/include/asm/ptrace.h b/arch/openrisc/include/asm/ptrace.h
index 375147ff71fc..1da3e66292e2 100644
--- a/arch/openrisc/include/asm/ptrace.h
+++ b/arch/openrisc/include/asm/ptrace.h
@@ -59,7 +59,7 @@ struct pt_regs {
* -1 for all other exceptions.
*/
long orig_gpr11; /* For restarting system calls */
- long fpcsr; /* Floating point control status register. */
+ long dummy; /* Cheap alignment fix */
long dummy2; /* Cheap alignment fix */
};
@@ -115,6 +115,5 @@ static inline long regs_return_value(struct pt_regs *regs)
#define PT_GPR31 124
#define PT_PC 128
#define PT_ORIG_GPR11 132
-#define PT_FPCSR 136
#endif /* __ASM_OPENRISC_PTRACE_H */
diff --git a/arch/openrisc/include/uapi/asm/elf.h b/arch/openrisc/include/uapi/asm/elf.h
index 6868f81c281e..441e343f8268 100644
--- a/arch/openrisc/include/uapi/asm/elf.h
+++ b/arch/openrisc/include/uapi/asm/elf.h
@@ -34,15 +34,72 @@
#include <asm/ptrace.h>
/* The OR1K relocation types... not all relevant for module loader */
-#define R_OR32_NONE 0
-#define R_OR32_32 1
-#define R_OR32_16 2
-#define R_OR32_8 3
-#define R_OR32_CONST 4
-#define R_OR32_CONSTH 5
-#define R_OR32_JUMPTARG 6
-#define R_OR32_VTINHERIT 7
-#define R_OR32_VTENTRY 8
+#define R_OR1K_NONE 0
+#define R_OR1K_32 1
+#define R_OR1K_16 2
+#define R_OR1K_8 3
+#define R_OR1K_LO_16_IN_INSN 4
+#define R_OR1K_HI_16_IN_INSN 5
+#define R_OR1K_INSN_REL_26 6
+#define R_OR1K_GNU_VTENTRY 7
+#define R_OR1K_GNU_VTINHERIT 8
+#define R_OR1K_32_PCREL 9
+#define R_OR1K_16_PCREL 10
+#define R_OR1K_8_PCREL 11
+#define R_OR1K_GOTPC_HI16 12
+#define R_OR1K_GOTPC_LO16 13
+#define R_OR1K_GOT16 14
+#define R_OR1K_PLT26 15
+#define R_OR1K_GOTOFF_HI16 16
+#define R_OR1K_GOTOFF_LO16 17
+#define R_OR1K_COPY 18
+#define R_OR1K_GLOB_DAT 19
+#define R_OR1K_JMP_SLOT 20
+#define R_OR1K_RELATIVE 21
+#define R_OR1K_TLS_GD_HI16 22
+#define R_OR1K_TLS_GD_LO16 23
+#define R_OR1K_TLS_LDM_HI16 24
+#define R_OR1K_TLS_LDM_LO16 25
+#define R_OR1K_TLS_LDO_HI16 26
+#define R_OR1K_TLS_LDO_LO16 27
+#define R_OR1K_TLS_IE_HI16 28
+#define R_OR1K_TLS_IE_LO16 29
+#define R_OR1K_TLS_LE_HI16 30
+#define R_OR1K_TLS_LE_LO16 31
+#define R_OR1K_TLS_TPOFF 32
+#define R_OR1K_TLS_DTPOFF 33
+#define R_OR1K_TLS_DTPMOD 34
+#define R_OR1K_AHI16 35
+#define R_OR1K_GOTOFF_AHI16 36
+#define R_OR1K_TLS_IE_AHI16 37
+#define R_OR1K_TLS_LE_AHI16 38
+#define R_OR1K_SLO16 39
+#define R_OR1K_GOTOFF_SLO16 40
+#define R_OR1K_TLS_LE_SLO16 41
+#define R_OR1K_PCREL_PG21 42
+#define R_OR1K_GOT_PG21 43
+#define R_OR1K_TLS_GD_PG21 44
+#define R_OR1K_TLS_LDM_PG21 45
+#define R_OR1K_TLS_IE_PG21 46
+#define R_OR1K_LO13 47
+#define R_OR1K_GOT_LO13 48
+#define R_OR1K_TLS_GD_LO13 49
+#define R_OR1K_TLS_LDM_LO13 50
+#define R_OR1K_TLS_IE_LO13 51
+#define R_OR1K_SLO13 52
+#define R_OR1K_PLTA26 53
+#define R_OR1K_GOT_AHI16 54
+
+/* Old relocation names */
+#define R_OR32_NONE R_OR1K_NONE
+#define R_OR32_32 R_OR1K_32
+#define R_OR32_16 R_OR1K_16
+#define R_OR32_8 R_OR1K_8
+#define R_OR32_CONST R_OR1K_LO_16_IN_INSN
+#define R_OR32_CONSTH R_OR1K_HI_16_IN_INSN
+#define R_OR32_JUMPTARG R_OR1K_INSN_REL_26
+#define R_OR32_VTENTRY R_OR1K_GNU_VTENTRY
+#define R_OR32_VTINHERIT R_OR1K_GNU_VTINHERIT
typedef unsigned long elf_greg_t;
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
index c9f48e750b72..440711d7bf40 100644
--- a/arch/openrisc/kernel/entry.S
+++ b/arch/openrisc/kernel/entry.S
@@ -106,8 +106,6 @@
l.mtspr r0,r3,SPR_EPCR_BASE ;\
l.lwz r3,PT_SR(r1) ;\
l.mtspr r0,r3,SPR_ESR_BASE ;\
- l.lwz r3,PT_FPCSR(r1) ;\
- l.mtspr r0,r3,SPR_FPCSR ;\
l.lwz r2,PT_GPR2(r1) ;\
l.lwz r3,PT_GPR3(r1) ;\
l.lwz r4,PT_GPR4(r1) ;\
@@ -177,8 +175,6 @@ handler: ;\
/* r30 already save */ ;\
l.sw PT_GPR31(r1),r31 ;\
TRACE_IRQS_OFF_ENTRY ;\
- l.mfspr r30,r0,SPR_FPCSR ;\
- l.sw PT_FPCSR(r1),r30 ;\
/* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\
l.addi r30,r0,-1 ;\
l.sw PT_ORIG_GPR11(r1),r30
@@ -219,8 +215,6 @@ handler: ;\
/* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\
l.addi r30,r0,-1 ;\
l.sw PT_ORIG_GPR11(r1),r30 ;\
- l.mfspr r30,r0,SPR_FPCSR ;\
- l.sw PT_FPCSR(r1),r30 ;\
l.addi r3,r1,0 ;\
/* r4 is exception EA */ ;\
l.addi r5,r0,vector ;\
@@ -852,6 +846,7 @@ _syscall_badsys:
EXCEPTION_ENTRY(_fpe_trap_handler)
CLEAR_LWA_FLAG(r3)
+
/* r4: EA of fault (set by EXCEPTION_HANDLE) */
l.jal do_fpe_trap
l.addi r3,r1,0 /* pt_regs */
@@ -1100,10 +1095,6 @@ ENTRY(_switch)
l.sw PT_GPR28(r1),r28
l.sw PT_GPR30(r1),r30
- /* Store the old FPU state to new pt_regs */
- l.mfspr r29,r0,SPR_FPCSR
- l.sw PT_FPCSR(r1),r29
-
l.addi r11,r10,0 /* Save old 'current' to 'last' return value*/
/* We use thread_info->ksp for storing the address of the above
@@ -1126,10 +1117,6 @@ ENTRY(_switch)
l.lwz r29,PT_SP(r1)
l.sw TI_KSP(r10),r29
- /* Restore the old value of FPCSR */
- l.lwz r29,PT_FPCSR(r1)
- l.mtspr r0,r29,SPR_FPCSR
-
/* ...and restore the registers, except r11 because the return value
* has already been set above.
*/
diff --git a/arch/openrisc/kernel/module.c b/arch/openrisc/kernel/module.c
index 532013f523ac..c9ff4c4a0b29 100644
--- a/arch/openrisc/kernel/module.c
+++ b/arch/openrisc/kernel/module.c
@@ -39,22 +39,32 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
value = sym->st_value + rel[i].r_addend;
switch (ELF32_R_TYPE(rel[i].r_info)) {
- case R_OR32_32:
+ case R_OR1K_32:
*location = value;
break;
- case R_OR32_CONST:
+ case R_OR1K_LO_16_IN_INSN:
*((uint16_t *)location + 1) = value;
break;
- case R_OR32_CONSTH:
+ case R_OR1K_HI_16_IN_INSN:
*((uint16_t *)location + 1) = value >> 16;
break;
- case R_OR32_JUMPTARG:
+ case R_OR1K_INSN_REL_26:
value -= (uint32_t)location;
value >>= 2;
value &= 0x03ffffff;
value |= *location & 0xfc000000;
*location = value;
break;
+ case R_OR1K_AHI16:
+ /* Adjust the operand to match with a signed LO16. */
+ value += 0x8000;
+ *((uint16_t *)location + 1) = value >> 16;
+ break;
+ case R_OR1K_SLO16:
+ /* Split value lower 16-bits. */
+ value = ((value & 0xf800) << 10) | (value & 0x7ff);
+ *location = (*location & ~0x3e007ff) | value;
+ break;
default:
pr_err("module %s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index 86e02929f3ac..eef99fee2110 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -36,6 +36,7 @@
#include <linux/reboot.h>
#include <linux/uaccess.h>
+#include <asm/fpu.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/spr_defs.h>
@@ -65,7 +66,7 @@ void machine_restart(char *cmd)
}
/*
- * This is used if pm_power_off has not been set by a power management
+ * This is used if a sys-off handler was not set by a power management
* driver, in this case we can assume we are on a simulator. On
* OpenRISC simulators l.nop 1 will trigger the simulator exit.
*/
@@ -89,10 +90,8 @@ void machine_halt(void)
void machine_power_off(void)
{
printk(KERN_INFO "*** MACHINE POWER OFF ***\n");
- if (pm_power_off != NULL)
- pm_power_off();
- else
- default_power_off();
+ do_kernel_power_off();
+ default_power_off();
}
/*
@@ -246,6 +245,8 @@ struct task_struct *__switch_to(struct task_struct *old,
local_irq_save(flags);
+ save_fpu(current);
+
/* current_set is an array of saved current pointers
* (one for each cpu). we need them at user->kernel transition,
* while we save them at kernel->user transition
@@ -258,6 +259,8 @@ struct task_struct *__switch_to(struct task_struct *old,
current_thread_info_set[smp_processor_id()] = new_ti;
last = (_switch(old_ti, new_ti))->task;
+ restore_fpu(current);
+
local_irq_restore(flags);
return last;
diff --git a/arch/openrisc/kernel/ptrace.c b/arch/openrisc/kernel/ptrace.c
index 1eeac3b62e9d..5091b18eab4c 100644
--- a/arch/openrisc/kernel/ptrace.c
+++ b/arch/openrisc/kernel/ptrace.c
@@ -88,6 +88,7 @@ static int genregs_set(struct task_struct *target,
return ret;
}
+#ifdef CONFIG_FPU
/*
* As OpenRISC shares GPRs and floating point registers we don't need to export
* the floating point registers again. So here we only export the fpcsr special
@@ -97,9 +98,7 @@ static int fpregs_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
- const struct pt_regs *regs = task_pt_regs(target);
-
- return membuf_store(&to, regs->fpcsr);
+ return membuf_store(&to, target->thread.fpcsr);
}
static int fpregs_set(struct task_struct *target,
@@ -107,21 +106,20 @@ static int fpregs_set(struct task_struct *target,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- struct pt_regs *regs = task_pt_regs(target);
- int ret;
-
/* FPCSR */
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &regs->fpcsr, 0, 4);
- return ret;
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpcsr, 0, 4);
}
+#endif
/*
* Define the register sets available on OpenRISC under Linux
*/
enum or1k_regset {
REGSET_GENERAL,
+#ifdef CONFIG_FPU
REGSET_FPU,
+#endif
};
static const struct user_regset or1k_regsets[] = {
@@ -133,6 +131,7 @@ static const struct user_regset or1k_regsets[] = {
.regset_get = genregs_get,
.set = genregs_set,
},
+#ifdef CONFIG_FPU
[REGSET_FPU] = {
.core_note_type = NT_PRFPREG,
.n = sizeof(struct __or1k_fpu_state) / sizeof(long),
@@ -141,6 +140,7 @@ static const struct user_regset or1k_regsets[] = {
.regset_get = fpregs_get,
.set = fpregs_set,
},
+#endif
};
static const struct user_regset_view user_or1k_native_view = {
diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c
index e2f21a5d8ad9..c7ab42e2cb7a 100644
--- a/arch/openrisc/kernel/signal.c
+++ b/arch/openrisc/kernel/signal.c
@@ -23,6 +23,7 @@
#include <linux/stddef.h>
#include <linux/resume_user_mode.h>
+#include <asm/fpu.h>
#include <asm/processor.h>
#include <asm/syscall.h>
#include <asm/ucontext.h>
@@ -39,6 +40,37 @@ asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs);
asmlinkage int do_work_pending(struct pt_regs *regs, unsigned int thread_flags,
int syscall);
+#ifdef CONFIG_FPU
+static long restore_fp_state(struct sigcontext __user *sc)
+{
+ long err;
+
+ err = __copy_from_user(&current->thread.fpcsr, &sc->fpcsr, sizeof(unsigned long));
+ if (unlikely(err))
+ return err;
+
+ /* Restore the FPU state */
+ restore_fpu(current);
+
+ return 0;
+}
+
+static long save_fp_state(struct sigcontext __user *sc)
+{
+ long err;
+
+ /* Sync the user FPU state so we can copy to sigcontext */
+ save_fpu(current);
+
+ err = __copy_to_user(&sc->fpcsr, &current->thread.fpcsr, sizeof(unsigned long));
+
+ return err;
+}
+#else
+#define save_fp_state(sc) (0)
+#define restore_fp_state(sc) (0)
+#endif
+
static int restore_sigcontext(struct pt_regs *regs,
struct sigcontext __user *sc)
{
@@ -55,7 +87,7 @@ static int restore_sigcontext(struct pt_regs *regs,
err |= __copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long));
err |= __copy_from_user(&regs->pc, &sc->regs.pc, sizeof(unsigned long));
err |= __copy_from_user(&regs->sr, &sc->regs.sr, sizeof(unsigned long));
- err |= __copy_from_user(&regs->fpcsr, &sc->fpcsr, sizeof(unsigned long));
+ err |= restore_fp_state(sc);
/* make sure the SM-bit is cleared so user-mode cannot fool us */
regs->sr &= ~SPR_SR_SM;
@@ -118,7 +150,7 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
err |= __copy_to_user(sc->regs.gpr, regs, 32 * sizeof(unsigned long));
err |= __copy_to_user(&sc->regs.pc, &regs->pc, sizeof(unsigned long));
err |= __copy_to_user(&sc->regs.sr, &regs->sr, sizeof(unsigned long));
- err |= __copy_to_user(&sc->fpcsr, &regs->fpcsr, sizeof(unsigned long));
+ err |= save_fp_state(sc);
return err;
}
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
index 9370888c9a7e..c195be9cc9fc 100644
--- a/arch/openrisc/kernel/traps.c
+++ b/arch/openrisc/kernel/traps.c
@@ -31,6 +31,7 @@
#include <linux/uaccess.h>
#include <asm/bug.h>
+#include <asm/fpu.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/unwinder.h>
@@ -51,16 +52,16 @@ static void print_trace(void *data, unsigned long addr, int reliable)
{
const char *loglvl = data;
- printk("%s[<%p>] %s%pS\n", loglvl, (void *) addr, reliable ? "" : "? ",
- (void *) addr);
+ pr_info("%s[<%p>] %s%pS\n", loglvl, (void *) addr, reliable ? "" : "? ",
+ (void *) addr);
}
static void print_data(unsigned long base_addr, unsigned long word, int i)
{
if (i == 0)
- printk("(%08lx:)\t%08lx", base_addr + (i * 4), word);
+ pr_info("(%08lx:)\t%08lx", base_addr + (i * 4), word);
else
- printk(" %08lx:\t%08lx", base_addr + (i * 4), word);
+ pr_info(" %08lx:\t%08lx", base_addr + (i * 4), word);
}
/* displays a short stack trace */
@@ -69,7 +70,7 @@ void show_stack(struct task_struct *task, unsigned long *esp, const char *loglvl
if (esp == NULL)
esp = (unsigned long *)&esp;
- printk("%sCall trace:\n", loglvl);
+ pr_info("%sCall trace:\n", loglvl);
unwind_stack((void *)loglvl, esp, print_trace);
}
@@ -83,57 +84,56 @@ void show_registers(struct pt_regs *regs)
if (user_mode(regs))
in_kernel = 0;
- printk("CPU #: %d\n"
- " PC: %08lx SR: %08lx SP: %08lx FPCSR: %08lx\n",
- smp_processor_id(), regs->pc, regs->sr, regs->sp,
- regs->fpcsr);
- printk("GPR00: %08lx GPR01: %08lx GPR02: %08lx GPR03: %08lx\n",
- 0L, regs->gpr[1], regs->gpr[2], regs->gpr[3]);
- printk("GPR04: %08lx GPR05: %08lx GPR06: %08lx GPR07: %08lx\n",
- regs->gpr[4], regs->gpr[5], regs->gpr[6], regs->gpr[7]);
- printk("GPR08: %08lx GPR09: %08lx GPR10: %08lx GPR11: %08lx\n",
- regs->gpr[8], regs->gpr[9], regs->gpr[10], regs->gpr[11]);
- printk("GPR12: %08lx GPR13: %08lx GPR14: %08lx GPR15: %08lx\n",
- regs->gpr[12], regs->gpr[13], regs->gpr[14], regs->gpr[15]);
- printk("GPR16: %08lx GPR17: %08lx GPR18: %08lx GPR19: %08lx\n",
- regs->gpr[16], regs->gpr[17], regs->gpr[18], regs->gpr[19]);
- printk("GPR20: %08lx GPR21: %08lx GPR22: %08lx GPR23: %08lx\n",
- regs->gpr[20], regs->gpr[21], regs->gpr[22], regs->gpr[23]);
- printk("GPR24: %08lx GPR25: %08lx GPR26: %08lx GPR27: %08lx\n",
- regs->gpr[24], regs->gpr[25], regs->gpr[26], regs->gpr[27]);
- printk("GPR28: %08lx GPR29: %08lx GPR30: %08lx GPR31: %08lx\n",
- regs->gpr[28], regs->gpr[29], regs->gpr[30], regs->gpr[31]);
- printk(" RES: %08lx oGPR11: %08lx\n",
- regs->gpr[11], regs->orig_gpr11);
-
- printk("Process %s (pid: %d, stackpage=%08lx)\n",
- current->comm, current->pid, (unsigned long)current);
+ pr_info("CPU #: %d\n"
+ " PC: %08lx SR: %08lx SP: %08lx\n",
+ smp_processor_id(), regs->pc, regs->sr, regs->sp);
+ pr_info("GPR00: %08lx GPR01: %08lx GPR02: %08lx GPR03: %08lx\n",
+ 0L, regs->gpr[1], regs->gpr[2], regs->gpr[3]);
+ pr_info("GPR04: %08lx GPR05: %08lx GPR06: %08lx GPR07: %08lx\n",
+ regs->gpr[4], regs->gpr[5], regs->gpr[6], regs->gpr[7]);
+ pr_info("GPR08: %08lx GPR09: %08lx GPR10: %08lx GPR11: %08lx\n",
+ regs->gpr[8], regs->gpr[9], regs->gpr[10], regs->gpr[11]);
+ pr_info("GPR12: %08lx GPR13: %08lx GPR14: %08lx GPR15: %08lx\n",
+ regs->gpr[12], regs->gpr[13], regs->gpr[14], regs->gpr[15]);
+ pr_info("GPR16: %08lx GPR17: %08lx GPR18: %08lx GPR19: %08lx\n",
+ regs->gpr[16], regs->gpr[17], regs->gpr[18], regs->gpr[19]);
+ pr_info("GPR20: %08lx GPR21: %08lx GPR22: %08lx GPR23: %08lx\n",
+ regs->gpr[20], regs->gpr[21], regs->gpr[22], regs->gpr[23]);
+ pr_info("GPR24: %08lx GPR25: %08lx GPR26: %08lx GPR27: %08lx\n",
+ regs->gpr[24], regs->gpr[25], regs->gpr[26], regs->gpr[27]);
+ pr_info("GPR28: %08lx GPR29: %08lx GPR30: %08lx GPR31: %08lx\n",
+ regs->gpr[28], regs->gpr[29], regs->gpr[30], regs->gpr[31]);
+ pr_info(" RES: %08lx oGPR11: %08lx\n",
+ regs->gpr[11], regs->orig_gpr11);
+
+ pr_info("Process %s (pid: %d, stackpage=%08lx)\n",
+ current->comm, current->pid, (unsigned long)current);
/*
* When in-kernel, we also print out the stack and code at the
* time of the fault..
*/
if (in_kernel) {
- printk("\nStack: ");
+ pr_info("\nStack: ");
show_stack(NULL, (unsigned long *)esp, KERN_EMERG);
if (esp < PAGE_OFFSET)
goto bad_stack;
- printk("\n");
+ pr_info("\n");
for (i = -8; i < 24; i += 1) {
unsigned long word;
if (__get_user(word, &((unsigned long *)esp)[i])) {
bad_stack:
- printk(" Bad Stack value.");
+ pr_info(" Bad Stack value.");
break;
}
print_data(esp, word, i);
}
- printk("\nCode: ");
+ pr_info("\nCode: ");
if (regs->pc < PAGE_OFFSET)
goto bad;
@@ -142,14 +142,14 @@ bad_stack:
if (__get_user(word, &((unsigned long *)regs->pc)[i])) {
bad:
- printk(" Bad PC value.");
+ pr_info(" Bad PC value.");
break;
}
print_data(regs->pc, word, i);
}
}
- printk("\n");
+ pr_info("\n");
}
/* This is normally the 'Oops' routine */
@@ -157,10 +157,10 @@ void __noreturn die(const char *str, struct pt_regs *regs, long err)
{
console_verbose();
- printk("\n%s#: %04lx\n", str, err & 0xffff);
+ pr_emerg("\n%s#: %04lx\n", str, err & 0xffff);
show_registers(regs);
#ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION
- printk("\n\nUNHANDLED_EXCEPTION: entering infinite loop\n");
+ pr_emerg("\n\nUNHANDLED_EXCEPTION: entering infinite loop\n");
/* shut down interrupts */
local_irq_disable();
@@ -173,36 +173,51 @@ void __noreturn die(const char *str, struct pt_regs *regs, long err)
asmlinkage void unhandled_exception(struct pt_regs *regs, int ea, int vector)
{
- printk("Unable to handle exception at EA =0x%x, vector 0x%x",
- ea, vector);
+ pr_emerg("Unable to handle exception at EA =0x%x, vector 0x%x",
+ ea, vector);
die("Oops", regs, 9);
}
asmlinkage void do_fpe_trap(struct pt_regs *regs, unsigned long address)
{
- int code = FPE_FLTUNK;
- unsigned long fpcsr = regs->fpcsr;
-
- if (fpcsr & SPR_FPCSR_IVF)
- code = FPE_FLTINV;
- else if (fpcsr & SPR_FPCSR_OVF)
- code = FPE_FLTOVF;
- else if (fpcsr & SPR_FPCSR_UNF)
- code = FPE_FLTUND;
- else if (fpcsr & SPR_FPCSR_DZF)
- code = FPE_FLTDIV;
- else if (fpcsr & SPR_FPCSR_IXF)
- code = FPE_FLTRES;
-
- /* Clear all flags */
- regs->fpcsr &= ~SPR_FPCSR_ALLF;
-
- force_sig_fault(SIGFPE, code, (void __user *)regs->pc);
+ if (user_mode(regs)) {
+ int code = FPE_FLTUNK;
+#ifdef CONFIG_FPU
+ unsigned long fpcsr;
+
+ save_fpu(current);
+ fpcsr = current->thread.fpcsr;
+
+ if (fpcsr & SPR_FPCSR_IVF)
+ code = FPE_FLTINV;
+ else if (fpcsr & SPR_FPCSR_OVF)
+ code = FPE_FLTOVF;
+ else if (fpcsr & SPR_FPCSR_UNF)
+ code = FPE_FLTUND;
+ else if (fpcsr & SPR_FPCSR_DZF)
+ code = FPE_FLTDIV;
+ else if (fpcsr & SPR_FPCSR_IXF)
+ code = FPE_FLTRES;
+
+ /* Clear all flags */
+ current->thread.fpcsr &= ~SPR_FPCSR_ALLF;
+ restore_fpu(current);
+#endif
+ force_sig_fault(SIGFPE, code, (void __user *)regs->pc);
+ } else {
+ pr_emerg("KERNEL: Illegal fpe exception 0x%.8lx\n", regs->pc);
+ die("Die:", regs, SIGFPE);
+ }
}
asmlinkage void do_trap(struct pt_regs *regs, unsigned long address)
{
- force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc);
+ if (user_mode(regs)) {
+ force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc);
+ } else {
+ pr_emerg("KERNEL: Illegal trap exception 0x%.8lx\n", regs->pc);
+ die("Die:", regs, SIGILL);
+ }
}
asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address)
@@ -211,8 +226,7 @@ asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address)
/* Send a SIGBUS */
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)address);
} else {
- printk("KERNEL: Unaligned Access 0x%.8lx\n", address);
- show_registers(regs);
+ pr_emerg("KERNEL: Unaligned Access 0x%.8lx\n", address);
die("Die:", regs, address);
}
@@ -224,8 +238,7 @@ asmlinkage void do_bus_fault(struct pt_regs *regs, unsigned long address)
/* Send a SIGBUS */
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
} else { /* Kernel mode */
- printk("KERNEL: Bus error (SIGBUS) 0x%.8lx\n", address);
- show_registers(regs);
+ pr_emerg("KERNEL: Bus error (SIGBUS) 0x%.8lx\n", address);
die("Die:", regs, address);
}
}
@@ -419,9 +432,8 @@ asmlinkage void do_illegal_instruction(struct pt_regs *regs,
/* Send a SIGILL */
force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)address);
} else { /* Kernel mode */
- printk("KERNEL: Illegal instruction (SIGILL) 0x%.8lx\n",
- address);
- show_registers(regs);
+ pr_emerg("KERNEL: Illegal instruction (SIGILL) 0x%.8lx\n",
+ address);
die("Die:", regs, address);
}
}
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 316f84f1d15c..21b8166a6883 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -119,7 +119,7 @@ export LIBGCC
libs-y += arch/parisc/lib/ $(LIBGCC)
-drivers-y += arch/parisc/video/
+drivers-$(CONFIG_VIDEO) += arch/parisc/video/
boot := arch/parisc/boot
diff --git a/arch/parisc/boot/compressed/Makefile b/arch/parisc/boot/compressed/Makefile
index a294a1b58ee7..92227fa813dc 100644
--- a/arch/parisc/boot/compressed/Makefile
+++ b/arch/parisc/boot/compressed/Makefile
@@ -5,10 +5,6 @@
# create a compressed self-extracting vmlinux image from the original vmlinux
#
-KCOV_INSTRUMENT := n
-GCOV_PROFILE := n
-UBSAN_SANITIZE := n
-
OBJECTS := head.o real2.o firmware.o misc.o piggy.o
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
index ee4febb30386..5ce258f3fffa 100644
--- a/arch/parisc/configs/generic-32bit_defconfig
+++ b/arch/parisc/configs/generic-32bit_defconfig
@@ -131,7 +131,7 @@ CONFIG_PPDEV=m
CONFIG_I2C=y
CONFIG_HWMON=m
CONFIG_DRM=m
-CONFIG_DRM_DP_CEC=y
+CONFIG_DRM_DISPLAY_DP_AUX_CEC=y
# CONFIG_DRM_I2C_CH7006 is not set
# CONFIG_DRM_I2C_SIL164 is not set
CONFIG_DRM_RADEON=m
diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
index c1d776bb16b4..bf0a0f1189eb 100644
--- a/arch/parisc/include/asm/cmpxchg.h
+++ b/arch/parisc/include/asm/cmpxchg.h
@@ -56,26 +56,24 @@ __arch_xchg(unsigned long x, volatile void *ptr, int size)
/* bug catcher for when unsupported size is used - won't link */
extern void __cmpxchg_called_with_bad_pointer(void);
-/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
-extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
- unsigned int new_);
-extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
+/* __cmpxchg_u... defined in arch/parisc/lib/bitops.c */
extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_);
+extern u16 __cmpxchg_u16(volatile u16 *ptr, u16 old, u16 new_);
+extern u32 __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
+extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
/* don't worry...optimizer will get rid of most of this */
static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
{
- switch (size) {
+ return
#ifdef CONFIG_64BIT
- case 8: return __cmpxchg_u64((u64 *)ptr, old, new_);
+ size == 8 ? __cmpxchg_u64(ptr, old, new_) :
#endif
- case 4: return __cmpxchg_u32((unsigned int *)ptr,
- (unsigned int)old, (unsigned int)new_);
- case 1: return __cmpxchg_u8((u8 *)ptr, old & 0xff, new_ & 0xff);
- }
- __cmpxchg_called_with_bad_pointer();
- return old;
+ size == 4 ? __cmpxchg_u32(ptr, old, new_) :
+ size == 2 ? __cmpxchg_u16(ptr, old, new_) :
+ size == 1 ? __cmpxchg_u8(ptr, old, new_) :
+ (__cmpxchg_called_with_bad_pointer(), old);
}
#define arch_cmpxchg(ptr, o, n) \
diff --git a/arch/parisc/include/asm/fb.h b/arch/parisc/include/asm/fb.h
deleted file mode 100644
index 658a8a7dc531..000000000000
--- a/arch/parisc/include/asm/fb.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_FB_H_
-#define _ASM_FB_H_
-
-struct fb_info;
-
-#if defined(CONFIG_STI_CORE)
-int fb_is_primary_device(struct fb_info *info);
-#define fb_is_primary_device fb_is_primary_device
-#endif
-
-#include <asm-generic/fb.h>
-
-#endif /* _ASM_FB_H_ */
diff --git a/arch/parisc/include/asm/mman.h b/arch/parisc/include/asm/mman.h
new file mode 100644
index 000000000000..47c5a1991d10
--- /dev/null
+++ b/arch/parisc/include/asm/mman.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MMAN_H__
+#define __ASM_MMAN_H__
+
+#include <uapi/asm/mman.h>
+
+/* PARISC cannot allow mdwe as it needs writable stacks */
+static inline bool arch_memory_deny_write_exec_supported(void)
+{
+ return false;
+}
+#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
+
+#endif /* __ASM_MMAN_H__ */
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index ad4e15d12ed1..4bea2e95798f 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -8,6 +8,7 @@
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#ifndef __ASSEMBLY__
diff --git a/arch/parisc/include/asm/signal.h b/arch/parisc/include/asm/signal.h
index 715c96ba2ec8..e84883c6b4c7 100644
--- a/arch/parisc/include/asm/signal.h
+++ b/arch/parisc/include/asm/signal.h
@@ -4,23 +4,11 @@
#include <uapi/asm/signal.h>
-#define _NSIG 64
-/* bits-per-word, where word apparently means 'long' not 'int' */
-#define _NSIG_BPW BITS_PER_LONG
-#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
-
# ifndef __ASSEMBLY__
/* Most things should be clean enough to redefine this at will, if care
is taken to make libc match. */
-typedef unsigned long old_sigset_t; /* at least 32 bits */
-
-typedef struct {
- /* next_signal() assumes this is a long - no choice */
- unsigned long sig[_NSIG_WORDS];
-} sigset_t;
-
#include <asm/sigcontext.h>
#endif /* !__ASSEMBLY */
diff --git a/arch/parisc/include/asm/video.h b/arch/parisc/include/asm/video.h
new file mode 100644
index 000000000000..c5dff3223194
--- /dev/null
+++ b/arch/parisc/include/asm/video.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_VIDEO_H_
+#define _ASM_VIDEO_H_
+
+#include <linux/types.h>
+
+struct device;
+
+#if defined(CONFIG_STI_CORE)
+bool video_is_primary_device(struct device *dev);
+#define video_is_primary_device video_is_primary_device
+#endif
+
+#include <asm-generic/video.h>
+
+#endif /* _ASM_VIDEO_H_ */
diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h
index 8e4895c5ea5d..40d7a574c5dd 100644
--- a/arch/parisc/include/uapi/asm/signal.h
+++ b/arch/parisc/include/uapi/asm/signal.h
@@ -57,10 +57,20 @@
#include <asm-generic/signal-defs.h>
+#define _NSIG 64
+#define _NSIG_BPW (sizeof(unsigned long) * 8)
+#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
+
# ifndef __ASSEMBLY__
# include <linux/types.h>
+typedef unsigned long old_sigset_t; /* at least 32 bits */
+
+typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
/* Avoid too many header ordering problems. */
struct siginfo;
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
index 621a4b386ae4..c91f9c2e61ed 100644
--- a/arch/parisc/kernel/ftrace.c
+++ b/arch/parisc/kernel/ftrace.c
@@ -206,6 +206,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct kprobe *p;
int bit;
+ if (unlikely(kprobe_ftrace_disabled))
+ return;
+
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
return;
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index d214bbe3c2af..4e5d991b2b65 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -41,7 +41,6 @@
#include <linux/moduleloader.h>
#include <linux/elf.h>
-#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/ftrace.h>
#include <linux/string.h>
@@ -173,17 +172,6 @@ static inline int reassemble_22(int as22)
((as22 & 0x0003ff) << 3));
}
-void *module_alloc(unsigned long size)
-{
- /* using RWX means less protection for modules, but it's
- * easier than trying to map the text, data, init_text and
- * init_data correctly */
- return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
- GFP_KERNEL,
- PAGE_KERNEL_RWX, 0, NUMA_NO_NODE,
- __builtin_return_address(0));
-}
-
#ifndef CONFIG_64BIT
static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n)
{
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 6f0c92e8149d..c1587aa35beb 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -22,6 +22,8 @@ EXPORT_SYMBOL(memset);
#include <linux/atomic.h>
EXPORT_SYMBOL(__xchg8);
EXPORT_SYMBOL(__xchg32);
+EXPORT_SYMBOL(__cmpxchg_u8);
+EXPORT_SYMBOL(__cmpxchg_u16);
EXPORT_SYMBOL(__cmpxchg_u32);
EXPORT_SYMBOL(__cmpxchg_u64);
#ifdef CONFIG_SMP
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 444154271f23..800eb64e91ad 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -344,7 +344,7 @@ static int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
struct irq_desc *desc = irq_to_desc(i);
if (desc && desc->kstat_irqs)
- *per_cpu_ptr(desc->kstat_irqs, cpuid) = 0;
+ *per_cpu_ptr(desc->kstat_irqs, cpuid) = (struct irqstat) { };
}
#endif
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 98af719d5f85..f7722451276e 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -104,7 +104,9 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
struct vm_area_struct *vma, *prev;
unsigned long filp_pgoff;
int do_color_align;
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {
+ .length = len
+ };
if (unlikely(len > TASK_SIZE))
return -ENOMEM;
@@ -139,7 +141,6 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
return addr;
}
- info.length = len;
info.align_mask = do_color_align ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
info.align_offset = shared_align_offset(filp_pgoff, pgoff);
@@ -160,7 +161,6 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
*/
}
- info.flags = 0;
info.low_limit = mm->mmap_base;
info.high_limit = mmap_upper_limit(NULL);
return vm_unmapped_area(&info);
diff --git a/arch/parisc/kernel/vdso32/Makefile b/arch/parisc/kernel/vdso32/Makefile
index 4459a48d2303..1350d50c6306 100644
--- a/arch/parisc/kernel/vdso32/Makefile
+++ b/arch/parisc/kernel/vdso32/Makefile
@@ -26,26 +26,21 @@ $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so FORCE
# Force dependency (incbin is bad)
# link rule for the .so file, .lds has to be first
-$(obj)/vdso32.so: $(src)/vdso32.lds $(obj-vdso32) $(obj-cvdso32) $(VDSO_LIBGCC) FORCE
+$(obj)/vdso32.so: $(obj)/vdso32.lds $(obj-vdso32) $(VDSO_LIBGCC) FORCE
$(call if_changed,vdso32ld)
# assembly rules for the .S files
$(obj-vdso32): %.o: %.S FORCE
$(call if_changed_dep,vdso32as)
-$(obj-cvdso32): %.o: %.c FORCE
- $(call if_changed_dep,vdso32cc)
-
# actual build commands
quiet_cmd_vdso32ld = VDSO32L $@
cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $(filter-out FORCE, $^) -o $@
quiet_cmd_vdso32as = VDSO32A $@
cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $<
-quiet_cmd_vdso32cc = VDSO32C $@
- cmd_vdso32cc = $(CROSS32CC) $(c_flags) -c -fPIC -mno-fast-indirect-calls -o $@ $<
# Generate VDSO offsets using helper script
-gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
+gen-vdsosym := $(src)/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
diff --git a/arch/parisc/kernel/vdso64/Makefile b/arch/parisc/kernel/vdso64/Makefile
index f3d6045793f4..0b1c1cc4c2c7 100644
--- a/arch/parisc/kernel/vdso64/Makefile
+++ b/arch/parisc/kernel/vdso64/Makefile
@@ -26,7 +26,7 @@ $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so FORCE
# Force dependency (incbin is bad)
# link rule for the .so file, .lds has to be first
-$(obj)/vdso64.so: $(src)/vdso64.lds $(obj-vdso64) $(VDSO_LIBGCC) FORCE
+$(obj)/vdso64.so: $(obj)/vdso64.lds $(obj-vdso64) $(VDSO_LIBGCC) FORCE
$(call if_changed,vdso64ld)
# assembly rules for the .S files
@@ -40,7 +40,7 @@ quiet_cmd_vdso64as = VDSO64A $@
cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
# Generate VDSO offsets using helper script
-gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
+gen-vdsosym := $(src)/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
index 36a314199074..9df810050642 100644
--- a/arch/parisc/lib/bitops.c
+++ b/arch/parisc/lib/bitops.c
@@ -56,38 +56,20 @@ unsigned long notrace __xchg8(char x, volatile char *ptr)
}
-u64 notrace __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new)
-{
- unsigned long flags;
- u64 prev;
-
- _atomic_spin_lock_irqsave(ptr, flags);
- if ((prev = *ptr) == old)
- *ptr = new;
- _atomic_spin_unlock_irqrestore(ptr, flags);
- return prev;
-}
-
-unsigned long notrace __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new)
-{
- unsigned long flags;
- unsigned int prev;
-
- _atomic_spin_lock_irqsave(ptr, flags);
- if ((prev = *ptr) == old)
- *ptr = new;
- _atomic_spin_unlock_irqrestore(ptr, flags);
- return (unsigned long)prev;
-}
-
-u8 notrace __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new)
-{
- unsigned long flags;
- u8 prev;
-
- _atomic_spin_lock_irqsave(ptr, flags);
- if ((prev = *ptr) == old)
- *ptr = new;
- _atomic_spin_unlock_irqrestore(ptr, flags);
- return prev;
-}
+#define CMPXCHG(T) \
+ T notrace __cmpxchg_##T(volatile T *ptr, T old, T new) \
+ { \
+ unsigned long flags; \
+ T prev; \
+ \
+ _atomic_spin_lock_irqsave(ptr, flags); \
+ if ((prev = *ptr) == old) \
+ *ptr = new; \
+ _atomic_spin_unlock_irqrestore(ptr, flags); \
+ return prev; \
+ }
+
+CMPXCHG(u64)
+CMPXCHG(u32)
+CMPXCHG(u16)
+CMPXCHG(u8)
diff --git a/arch/parisc/math-emu/driver.c b/arch/parisc/math-emu/driver.c
index 6ce427b58836..34495446e051 100644
--- a/arch/parisc/math-emu/driver.c
+++ b/arch/parisc/math-emu/driver.c
@@ -26,12 +26,6 @@
#define FPUDEBUG 0
-/* Format of the floating-point exception registers. */
-struct exc_reg {
- unsigned int exception : 6;
- unsigned int ei : 26;
-};
-
/* Macros for grabbing bits of the instruction format from the 'ei'
field above. */
/* Major opcode 0c and 0e */
diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c
index a9f7e21f6656..0356199bd9e7 100644
--- a/arch/parisc/mm/hugetlbpage.c
+++ b/arch/parisc/mm/hugetlbpage.c
@@ -180,14 +180,3 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
}
return changed;
}
-
-
-int pmd_huge(pmd_t pmd)
-{
- return 0;
-}
-
-int pud_huge(pud_t pud)
-{
- return 0;
-}
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index f876af56e13f..34d91cb8b259 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -24,6 +24,7 @@
#include <linux/nodemask.h> /* for node_online_map */
#include <linux/pagemap.h> /* for release_pages */
#include <linux/compat.h>
+#include <linux/execmem.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
@@ -481,7 +482,7 @@ void free_initmem(void)
/* finally dump all the instructions which were cached, since the
* pages are no-longer executable */
flush_icache_range(init_begin, init_end);
-
+
free_initmem_default(POISON_FREE_INITMEM);
/* set up a new led state on systems shipped LED State panel */
@@ -992,3 +993,23 @@ static const pgprot_t protection_map[16] = {
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX
};
DECLARE_VM_GET_PAGE_PROT
+
+#ifdef CONFIG_EXECMEM
+static struct execmem_info execmem_info __ro_after_init;
+
+struct execmem_info __init *execmem_arch_setup(void)
+{
+ execmem_info = (struct execmem_info){
+ .ranges = {
+ [EXECMEM_DEFAULT] = {
+ .start = VMALLOC_START,
+ .end = VMALLOC_END,
+ .pgprot = PAGE_KERNEL_RWX,
+ .alignment = 1,
+ },
+ },
+ };
+
+ return &execmem_info;
+}
+#endif /* CONFIG_EXECMEM */
diff --git a/arch/parisc/net/bpf_jit_core.c b/arch/parisc/net/bpf_jit_core.c
index d6ee2fd45550..979f45d4d1fb 100644
--- a/arch/parisc/net/bpf_jit_core.c
+++ b/arch/parisc/net/bpf_jit_core.c
@@ -167,7 +167,13 @@ skip_init_ctx:
bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns);
if (!prog->is_func || extra_pass) {
- bpf_jit_binary_lock_ro(jit_data->header);
+ if (bpf_jit_binary_lock_ro(jit_data->header)) {
+ bpf_jit_binary_free(jit_data->header);
+ prog->bpf_func = NULL;
+ prog->jited = 0;
+ prog->jited_len = 0;
+ goto out_offset;
+ }
prologue_len = ctx->epilogue_offset - ctx->body_len;
for (i = 0; i < prog->len; i++)
ctx->offset[i] += prologue_len;
diff --git a/arch/parisc/video/Makefile b/arch/parisc/video/Makefile
index 16a73cce4661..b5db5b42880f 100644
--- a/arch/parisc/video/Makefile
+++ b/arch/parisc/video/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_STI_CORE) += fbdev.o
+obj-$(CONFIG_STI_CORE) += video-sti.o
diff --git a/arch/parisc/video/fbdev.c b/arch/parisc/video/video-sti.c
index e4f8ac99fc9e..564661e87093 100644
--- a/arch/parisc/video/fbdev.c
+++ b/arch/parisc/video/video-sti.c
@@ -5,12 +5,13 @@
* Copyright (C) 2001-2002 Thomas Bogendoerfer <tsbogend@alpha.franken.de>
*/
-#include <linux/fb.h>
#include <linux/module.h>
#include <video/sticore.h>
-int fb_is_primary_device(struct fb_info *info)
+#include <asm/video.h>
+
+bool video_is_primary_device(struct device *dev)
{
struct sti_struct *sti;
@@ -21,6 +22,6 @@ int fb_is_primary_device(struct fb_info *info)
return true;
/* return true if it's the default built-in framebuffer driver */
- return (sti->dev == info->device);
+ return (sti->dev == dev);
}
-EXPORT_SYMBOL(fb_is_primary_device);
+EXPORT_SYMBOL(video_is_primary_device);
diff --git a/arch/powerpc/Kbuild b/arch/powerpc/Kbuild
index 22cd0d55a892..571f260b0842 100644
--- a/arch/powerpc/Kbuild
+++ b/arch/powerpc/Kbuild
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
+subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror -Wa,--fatal-warnings
+subdir-asflags-$(CONFIG_PPC_WERROR) := -Wa,--fatal-warnings
obj-y += kernel/
obj-y += mm/
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 1c4be3373686..56dcafbb83b3 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -156,6 +156,7 @@ config PPC
select ARCH_HAS_UACCESS_FLUSHCACHE
select ARCH_HAS_UBSAN
select ARCH_HAVE_NMI_SAFE_CMPXCHG
+ select ARCH_HAVE_EXTRA_ELF_NOTES if SPU_BASE
select ARCH_KEEP_MEMBLOCK
select ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE if PPC_RADIX_MMU
select ARCH_MIGHT_HAVE_PC_PARPORT
@@ -236,7 +237,7 @@ config PPC
select HAVE_DYNAMIC_FTRACE_WITH_REGS if ARCH_USING_PATCHABLE_FUNCTION_ENTRY || MPROFILE_KERNEL || PPC32
select HAVE_EBPF_JIT
select HAVE_EFFICIENT_UNALIGNED_ACCESS
- select HAVE_FAST_GUP
+ select HAVE_GUP_FAST
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_FUNCTION_DESCRIPTORS if PPC64_ELF_ABI_V1
@@ -285,7 +286,7 @@ config PPC
select IOMMU_HELPER if PPC64
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
- select KASAN_VMALLOC if KASAN && MODULES
+ select KASAN_VMALLOC if KASAN && EXECMEM
select LOCK_MM_AND_FIND_VMA
select MMU_GATHER_PAGE_SIZE
select MMU_GATHER_RCU_TABLE_FREE
@@ -686,6 +687,10 @@ config ARCH_SELECTS_CRASH_DUMP
depends on CRASH_DUMP
select RELOCATABLE if PPC64 || 44x || PPC_85xx
+config ARCH_SUPPORTS_CRASH_HOTPLUG
+ def_bool y
+ depends on PPC64
+
config FA_DUMP
bool "Firmware-assisted dump"
depends on CRASH_DUMP && PPC64 && (PPC_RTAS || PPC_POWERNV)
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 65261cbe5bfd..0a0c57aee1ae 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -114,7 +114,6 @@ LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-y)
ifdef CONFIG_PPC64
ifndef CONFIG_PPC_KERNEL_PCREL
-ifeq ($(call cc-option-yn,-mcmodel=medium),y)
# -mcmodel=medium breaks modules because it uses 32bit offsets from
# the TOC pointer to create pointers where possible. Pointers into the
# percpu data area are created by this method.
@@ -124,9 +123,6 @@ ifeq ($(call cc-option-yn,-mcmodel=medium),y)
# kernel percpu data space (starting with 0xc...). We need a full
# 64bit relocation for this to work, hence -mcmodel=large.
KBUILD_CFLAGS_MODULE += -mcmodel=large
-else
- export NO_MINIMAL_TOC := -mno-minimal-toc
-endif
endif
endif
@@ -139,7 +135,7 @@ CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcall-aixdesc)
endif
endif
-CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc))
+CFLAGS-$(CONFIG_PPC64) += -mcmodel=medium
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mlong-double-128)
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 968aee2025b8..35f6b15e4c47 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -108,8 +108,8 @@ DTC_FLAGS ?= -p 1024
# these files into the build dir, fix up any includes and ensure that dependent
# files are copied in the right order.
-# these need to be seperate variables because they are copied out of different
-# directories in the kernel tree. Sure you COULd merge them, but it's a
+# these need to be separate variables because they are copied out of different
+# directories in the kernel tree. Sure you COULD merge them, but it's a
# cure-is-worse-than-disease situation.
zlib-decomp-$(CONFIG_KERNEL_GZIP) := decompress_inflate.c
zlib-$(CONFIG_KERNEL_GZIP) := inffast.c inflate.c inftrees.c
@@ -218,7 +218,7 @@ $(addprefix $(obj)/,$(libfdt) $(libfdtheader)): $(obj)/%: $(srctree)/scripts/dtc
$(obj)/empty.c:
$(Q)touch $@
-$(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds : $(obj)/%: $(srctree)/$(src)/%.S
+$(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds : $(obj)/%: $(src)/%.S
$(Q)cp $< $@
clean-files := $(zlib-) $(zlibheader-) $(zliblinuxheader-) \
@@ -252,9 +252,9 @@ targets += $(patsubst $(obj)/%,%,$(obj-boot) wrapper.a) zImage.lds
extra-y := $(obj)/wrapper.a $(obj-plat) $(obj)/empty.o \
$(obj)/zImage.lds $(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds
-dtstree := $(srctree)/$(src)/dts
+dtstree := $(src)/dts
-wrapper :=$(srctree)/$(src)/wrapper
+wrapper := $(src)/wrapper
wrapperbits := $(extra-y) $(addprefix $(obj)/,addnote hack-coff mktree) \
$(wrapper) FORCE
diff --git a/arch/powerpc/boot/decompress.c b/arch/powerpc/boot/decompress.c
index 977eb15a6d17..6835cb53f034 100644
--- a/arch/powerpc/boot/decompress.c
+++ b/arch/powerpc/boot/decompress.c
@@ -101,7 +101,7 @@ static void print_err(char *s)
* @input_size: length of the input buffer
* @outbuf: output buffer
* @output_size: length of the output buffer
- * @skip number of output bytes to ignore
+ * @_skip: number of output bytes to ignore
*
* This function takes compressed data from inbuf, decompresses and write it to
* outbuf. Once output_size bytes are written to the output buffer, or the
diff --git a/arch/powerpc/boot/dts/Makefile b/arch/powerpc/boot/dts/Makefile
index fb335d05aae8..0cd0d8558b47 100644
--- a/arch/powerpc/boot/dts/Makefile
+++ b/arch/powerpc/boot/dts/Makefile
@@ -2,5 +2,4 @@
subdir-y += fsl
-dtstree := $(srctree)/$(src)
-dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
+dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(src)/%.dts,%.dtb, $(wildcard $(src)/*.dts))
diff --git a/arch/powerpc/boot/dts/acadia.dts b/arch/powerpc/boot/dts/acadia.dts
index deb52e41ab84..5fedda811378 100644
--- a/arch/powerpc/boot/dts/acadia.dts
+++ b/arch/powerpc/boot/dts/acadia.dts
@@ -172,7 +172,7 @@
reg = <0xef602800 0x60>;
interrupt-parent = <&UIC0>;
interrupts = <0x4 0x4>;
- /* This thing is a bit weird. It has it's own UIC
+ /* This thing is a bit weird. It has its own UIC
* that it uses to generate snapshot triggers. We
* don't really support this device yet, and it needs
* work to figure this out.
diff --git a/arch/powerpc/boot/dts/fsl/Makefile b/arch/powerpc/boot/dts/fsl/Makefile
index 3bae982641e9..d3ecdf14bc42 100644
--- a/arch/powerpc/boot/dts/fsl/Makefile
+++ b/arch/powerpc/boot/dts/fsl/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-dtstree := $(srctree)/$(src)
-dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
+dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(src)/%.dts,%.dtb, $(wildcard $(src)/*.dts))
diff --git a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
index 4f044b41a776..fb3200b006ad 100644
--- a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
@@ -50,7 +50,7 @@
&ifc {
#address-cells = <2>;
#size-cells = <1>;
- compatible = "fsl,ifc", "simple-bus";
+ compatible = "fsl,ifc";
interrupts = <25 2 0 0>;
};
diff --git a/arch/powerpc/boot/dts/fsl/bsc9131rdb.dts b/arch/powerpc/boot/dts/fsl/bsc9131rdb.dts
index 8da984251abc..0ba86a6dce1b 100644
--- a/arch/powerpc/boot/dts/fsl/bsc9131rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/bsc9131rdb.dts
@@ -15,7 +15,7 @@
device_type = "memory";
};
- board_ifc: ifc: ifc@ff71e000 {
+ board_ifc: ifc: memory-controller@ff71e000 {
/* NAND Flash on board */
ranges = <0x0 0x0 0x0 0xff800000 0x00004000>;
reg = <0x0 0xff71e000 0x0 0x2000>;
diff --git a/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi b/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi
index 2a677fd323eb..5c53cee8755f 100644
--- a/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi
@@ -35,7 +35,7 @@
&ifc {
#address-cells = <2>;
#size-cells = <1>;
- compatible = "fsl,ifc", "simple-bus";
+ compatible = "fsl,ifc";
interrupts = <16 2 0 0 20 2 0 0>;
};
diff --git a/arch/powerpc/boot/dts/fsl/bsc9132qds.dts b/arch/powerpc/boot/dts/fsl/bsc9132qds.dts
index 7cb2158dfe58..ce642e879a1b 100644
--- a/arch/powerpc/boot/dts/fsl/bsc9132qds.dts
+++ b/arch/powerpc/boot/dts/fsl/bsc9132qds.dts
@@ -15,7 +15,7 @@
device_type = "memory";
};
- ifc: ifc@ff71e000 {
+ ifc: memory-controller@ff71e000 {
/* NOR, NAND Flash on board */
ranges = <0x0 0x0 0x0 0x88000000 0x08000000
0x1 0x0 0x0 0xff800000 0x00010000>;
diff --git a/arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi b/arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi
index b8e0edd1ac69..4da451e000d9 100644
--- a/arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi
@@ -35,7 +35,7 @@
&ifc {
#address-cells = <2>;
#size-cells = <1>;
- compatible = "fsl,ifc", "simple-bus";
+ compatible = "fsl,ifc";
/* FIXME: Test whether interrupts are split */
interrupts = <16 2 0 0 20 2 0 0>;
};
diff --git a/arch/powerpc/boot/dts/fsl/c293pcie.dts b/arch/powerpc/boot/dts/fsl/c293pcie.dts
index 5e905e0857cf..e2fdac2ed420 100644
--- a/arch/powerpc/boot/dts/fsl/c293pcie.dts
+++ b/arch/powerpc/boot/dts/fsl/c293pcie.dts
@@ -42,7 +42,7 @@
device_type = "memory";
};
- ifc: ifc@fffe1e000 {
+ ifc: memory-controller@fffe1e000 {
reg = <0xf 0xffe1e000 0 0x2000>;
ranges = <0x0 0x0 0xf 0xec000000 0x04000000
0x1 0x0 0xf 0xff800000 0x00010000
diff --git a/arch/powerpc/boot/dts/fsl/c293si-post.dtsi b/arch/powerpc/boot/dts/fsl/c293si-post.dtsi
index f208fb8f64b3..2d443d519274 100644
--- a/arch/powerpc/boot/dts/fsl/c293si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/c293si-post.dtsi
@@ -35,7 +35,7 @@
&ifc {
#address-cells = <2>;
#size-cells = <1>;
- compatible = "fsl,ifc", "simple-bus";
+ compatible = "fsl,ifc";
interrupts = <19 2 0 0>;
};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi
index 41935709ebe8..fba40a1bccc0 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi
@@ -199,6 +199,10 @@
/include/ "pq3-dma-0.dtsi"
/include/ "pq3-etsec1-0.dtsi"
+ enet0: ethernet@24000 {
+ fsl,wake-on-filer;
+ fsl,pmc-handle = <&etsec1_clk>;
+ };
/include/ "pq3-etsec1-timer-0.dtsi"
usb@22000 {
@@ -222,9 +226,10 @@
};
/include/ "pq3-etsec1-2.dtsi"
-
- ethernet@26000 {
+ enet2: ethernet@26000 {
cell-index = <1>;
+ fsl,wake-on-filer;
+ fsl,pmc-handle = <&etsec3_clk>;
};
usb@2b000 {
@@ -249,4 +254,9 @@
reg = <0xe0000 0x1000>;
fsl,has-rstcr;
};
+
+/include/ "pq3-power.dtsi"
+ power@e0070 {
+ compatible = "fsl,mpc8536-pmc", "fsl,mpc8548-pmc";
+ };
};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8544si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8544si-post.dtsi
index b68eb119faef..ea7416af7ee3 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8544si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/mpc8544si-post.dtsi
@@ -188,4 +188,6 @@
reg = <0xe0000 0x1000>;
fsl,has-rstcr;
};
+
+/include/ "pq3-power.dtsi"
};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi
index 579d76cb8e32..dddb7374508d 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi
@@ -156,4 +156,6 @@
reg = <0xe0000 0x1000>;
fsl,has-rstcr;
};
+
+/include/ "pq3-power.dtsi"
};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8572si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8572si-post.dtsi
index 49294cf36b4e..40a6cff77032 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8572si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/mpc8572si-post.dtsi
@@ -193,4 +193,6 @@
reg = <0xe0000 0x1000>;
fsl,has-rstcr;
};
+
+/include/ "pq3-power.dtsi"
};
diff --git a/arch/powerpc/boot/dts/fsl/p1010rdb-pb.dts b/arch/powerpc/boot/dts/fsl/p1010rdb-pb.dts
index 3a94acbb3c03..ce3346d77858 100644
--- a/arch/powerpc/boot/dts/fsl/p1010rdb-pb.dts
+++ b/arch/powerpc/boot/dts/fsl/p1010rdb-pb.dts
@@ -29,3 +29,19 @@
};
/include/ "p1010si-post.dtsi"
+
+&pci0 {
+ pcie@0 {
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ /*
+ *irq[4:5] are active-high
+ *irq[6:7] are active-low
+ */
+ 0000 0x0 0x0 0x1 &mpic 0x4 0x2 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x5 0x2 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0
+ >;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/p1010rdb-pb_36b.dts b/arch/powerpc/boot/dts/fsl/p1010rdb-pb_36b.dts
index 4cf255fedc96..83590354f9a0 100644
--- a/arch/powerpc/boot/dts/fsl/p1010rdb-pb_36b.dts
+++ b/arch/powerpc/boot/dts/fsl/p1010rdb-pb_36b.dts
@@ -56,3 +56,19 @@
};
/include/ "p1010si-post.dtsi"
+
+&pci0 {
+ pcie@0 {
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ /*
+ *irq[4:5] are active-high
+ *irq[6:7] are active-low
+ */
+ 0000 0x0 0x0 0x1 &mpic 0x4 0x2 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x5 0x2 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0
+ >;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/p1010rdb.dtsi b/arch/powerpc/boot/dts/fsl/p1010rdb.dtsi
index 2ca9cee2ddeb..ef49a7d6c69d 100644
--- a/arch/powerpc/boot/dts/fsl/p1010rdb.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1010rdb.dtsi
@@ -215,19 +215,3 @@
phy-connection-type = "sgmii";
};
};
-
-&pci0 {
- pcie@0 {
- interrupt-map = <
- /* IDSEL 0x0 */
- /*
- *irq[4:5] are active-high
- *irq[6:7] are active-low
- */
- 0000 0x0 0x0 0x1 &mpic 0x4 0x2 0x0 0x0
- 0000 0x0 0x0 0x2 &mpic 0x5 0x2 0x0 0x0
- 0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0
- 0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0
- >;
- };
-};
diff --git a/arch/powerpc/boot/dts/fsl/p1010rdb_32b.dtsi b/arch/powerpc/boot/dts/fsl/p1010rdb_32b.dtsi
index fdc19aab2f70..583a6cd05079 100644
--- a/arch/powerpc/boot/dts/fsl/p1010rdb_32b.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1010rdb_32b.dtsi
@@ -36,7 +36,7 @@ memory {
device_type = "memory";
};
-board_ifc: ifc: ifc@ffe1e000 {
+board_ifc: ifc: memory-controller@ffe1e000 {
/* NOR, NAND Flashes and CPLD on board */
ranges = <0x0 0x0 0x0 0xee000000 0x02000000
0x1 0x0 0x0 0xff800000 0x00010000
diff --git a/arch/powerpc/boot/dts/fsl/p1010rdb_36b.dtsi b/arch/powerpc/boot/dts/fsl/p1010rdb_36b.dtsi
index de2fceed4f79..4d41efe0038f 100644
--- a/arch/powerpc/boot/dts/fsl/p1010rdb_36b.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1010rdb_36b.dtsi
@@ -36,7 +36,7 @@ memory {
device_type = "memory";
};
-board_ifc: ifc: ifc@fffe1e000 {
+board_ifc: ifc: memory-controller@fffe1e000 {
/* NOR, NAND Flashes and CPLD on board */
ranges = <0x0 0x0 0xf 0xee000000 0x02000000
0x1 0x0 0xf 0xff800000 0x00010000
diff --git a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
index ccda0a91abf0..2d2550729dcc 100644
--- a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
@@ -35,7 +35,7 @@
&ifc {
#address-cells = <2>;
#size-cells = <1>;
- compatible = "fsl,ifc", "simple-bus";
+ compatible = "fsl,ifc";
interrupts = <16 2 0 0 19 2 0 0>;
};
@@ -183,9 +183,23 @@
/include/ "pq3-etsec2-1.dtsi"
/include/ "pq3-etsec2-2.dtsi"
+ enet0: ethernet@b0000 {
+ fsl,pmc-handle = <&etsec1_clk>;
+ };
+
+ enet1: ethernet@b1000 {
+ fsl,pmc-handle = <&etsec2_clk>;
+ };
+
+ enet2: ethernet@b2000 {
+ fsl,pmc-handle = <&etsec3_clk>;
+ };
+
global-utilities@e0000 {
compatible = "fsl,p1010-guts";
reg = <0xe0000 0x1000>;
fsl,has-rstcr;
};
+
+/include/ "pq3-power.dtsi"
};
diff --git a/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi
index 642dc3a83d0e..cc4c7461003b 100644
--- a/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi
@@ -163,14 +163,17 @@
/include/ "pq3-etsec2-0.dtsi"
enet0: enet0_grp2: ethernet@b0000 {
+ fsl,pmc-handle = <&etsec1_clk>;
};
/include/ "pq3-etsec2-1.dtsi"
enet1: enet1_grp2: ethernet@b1000 {
+ fsl,pmc-handle = <&etsec2_clk>;
};
/include/ "pq3-etsec2-2.dtsi"
enet2: enet2_grp2: ethernet@b2000 {
+ fsl,pmc-handle = <&etsec3_clk>;
};
global-utilities@e0000 {
@@ -178,6 +181,8 @@
reg = <0xe0000 0x1000>;
fsl,has-rstcr;
};
+
+/include/ "pq3-power.dtsi"
};
/include/ "pq3-etsec2-grp2-0.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
index 407cb5fd0f5b..378195db9fca 100644
--- a/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
@@ -159,14 +159,17 @@
/include/ "pq3-etsec2-0.dtsi"
enet0: enet0_grp2: ethernet@b0000 {
+ fsl,pmc-handle = <&etsec1_clk>;
};
/include/ "pq3-etsec2-1.dtsi"
enet1: enet1_grp2: ethernet@b1000 {
+ fsl,pmc-handle = <&etsec2_clk>;
};
/include/ "pq3-etsec2-2.dtsi"
enet2: enet2_grp2: ethernet@b2000 {
+ fsl,pmc-handle = <&etsec3_clk>;
};
global-utilities@e0000 {
@@ -174,6 +177,8 @@
reg = <0xe0000 0x1000>;
fsl,has-rstcr;
};
+
+/include/ "pq3-power.dtsi"
};
&qe {
diff --git a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
index 093e4e3ed368..6ac21e81344a 100644
--- a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
@@ -225,11 +225,13 @@
/include/ "pq3-etsec2-0.dtsi"
enet0: enet0_grp2: ethernet@b0000 {
fsl,wake-on-filer;
+ fsl,pmc-handle = <&etsec1_clk>;
};
/include/ "pq3-etsec2-1.dtsi"
enet1: enet1_grp2: ethernet@b1000 {
fsl,wake-on-filer;
+ fsl,pmc-handle = <&etsec2_clk>;
};
global-utilities@e0000 {
@@ -238,9 +240,10 @@
fsl,has-rstcr;
};
+/include/ "pq3-power.dtsi"
power@e0070 {
- compatible = "fsl,mpc8536-pmc", "fsl,mpc8548-pmc";
- reg = <0xe0070 0x20>;
+ compatible = "fsl,p1022-pmc", "fsl,mpc8536-pmc",
+ "fsl,mpc8548-pmc";
};
};
diff --git a/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi
index 81b9ab2119be..d410082d21c0 100644
--- a/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi
@@ -178,6 +178,10 @@
compatible = "fsl-usb2-dr-v1.6", "fsl-usb2-dr";
};
/include/ "pq3-etsec1-0.dtsi"
+ enet0: ethernet@24000 {
+ fsl,pmc-handle = <&etsec1_clk>;
+
+ };
/include/ "pq3-etsec1-timer-0.dtsi"
ptp_clock@24e00 {
@@ -186,7 +190,15 @@
/include/ "pq3-etsec1-1.dtsi"
+ enet1: ethernet@25000 {
+ fsl,pmc-handle = <&etsec2_clk>;
+ };
+
/include/ "pq3-etsec1-2.dtsi"
+ enet2: ethernet@26000 {
+ fsl,pmc-handle = <&etsec3_clk>;
+ };
+
/include/ "pq3-esdhc-0.dtsi"
sdhc@2e000 {
compatible = "fsl,p2020-esdhc", "fsl,esdhc";
@@ -202,8 +214,5 @@
fsl,has-rstcr;
};
- pmc: power@e0070 {
- compatible = "fsl,mpc8548-pmc";
- reg = <0xe0070 0x20>;
- };
+/include/ "pq3-power.dtsi"
};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-power.dtsi b/arch/powerpc/boot/dts/fsl/pq3-power.dtsi
new file mode 100644
index 000000000000..6af12401004d
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-power.dtsi
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: (GPL-2.0+)
+/*
+ * Copyright 2024 NXP
+ */
+
+power@e0070 {
+ compatible = "fsl,mpc8548-pmc";
+ reg = <0xe0070 0x20>;
+
+ etsec1_clk: soc-clk@24 {
+ fsl,pmcdr-mask = <0x00000080>;
+ };
+ etsec2_clk: soc-clk@25 {
+ fsl,pmcdr-mask = <0x00000040>;
+ };
+ etsec3_clk: soc-clk@26 {
+ fsl,pmcdr-mask = <0x00000020>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi
index aa5152ca8120..8ef0c020206b 100644
--- a/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi
@@ -52,7 +52,7 @@
&ifc {
#address-cells = <2>;
#size-cells = <1>;
- compatible = "fsl,ifc", "simple-bus";
+ compatible = "fsl,ifc";
interrupts = <25 2 0 0>;
};
diff --git a/arch/powerpc/boot/dts/fsl/t1024rdb.dts b/arch/powerpc/boot/dts/fsl/t1024rdb.dts
index 270aaf631f2a..7d003e07a9fb 100644
--- a/arch/powerpc/boot/dts/fsl/t1024rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t1024rdb.dts
@@ -91,7 +91,7 @@
board-control@2,0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "fsl,t1024-cpld";
+ compatible = "fsl,t1024-cpld", "fsl,deepsleep-cpld";
reg = <3 0 0x300>;
ranges = <0 3 0 0x300>;
bank-width = <1>;
diff --git a/arch/powerpc/boot/dts/fsl/t1040rdb.dts b/arch/powerpc/boot/dts/fsl/t1040rdb.dts
index dd3aab81e9de..4347924e9aa7 100644
--- a/arch/powerpc/boot/dts/fsl/t1040rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t1040rdb.dts
@@ -104,7 +104,7 @@
ifc: localbus@ffe124000 {
cpld@3,0 {
- compatible = "fsl,t1040rdb-cpld";
+ compatible = "fsl,t104xrdb-cpld", "fsl,deepsleep-cpld";
};
};
};
diff --git a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
index 776788623204..c9542b73bd7f 100644
--- a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
@@ -52,7 +52,7 @@
&ifc {
#address-cells = <2>;
#size-cells = <1>;
- compatible = "fsl,ifc", "simple-bus";
+ compatible = "fsl,ifc";
interrupts = <25 2 0 0>;
};
diff --git a/arch/powerpc/boot/dts/fsl/t1042rdb.dts b/arch/powerpc/boot/dts/fsl/t1042rdb.dts
index 3ebb712224cb..099764322b33 100644
--- a/arch/powerpc/boot/dts/fsl/t1042rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t1042rdb.dts
@@ -68,7 +68,7 @@
ifc: localbus@ffe124000 {
cpld@3,0 {
- compatible = "fsl,t1042rdb-cpld";
+ compatible = "fsl,t104xrdb-cpld", "fsl,deepsleep-cpld";
};
};
};
diff --git a/arch/powerpc/boot/dts/fsl/t1042rdb_pi.dts b/arch/powerpc/boot/dts/fsl/t1042rdb_pi.dts
index 8ec3ff45e6fc..b10cab1a347b 100644
--- a/arch/powerpc/boot/dts/fsl/t1042rdb_pi.dts
+++ b/arch/powerpc/boot/dts/fsl/t1042rdb_pi.dts
@@ -41,7 +41,7 @@
ifc: localbus@ffe124000 {
cpld@3,0 {
- compatible = "fsl,t1042rdb_pi-cpld";
+ compatible = "fsl,t104xrdb-cpld", "fsl,deepsleep-cpld";
};
};
diff --git a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
index 27714dc2f04a..6bb95878d39d 100644
--- a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
@@ -50,7 +50,7 @@
&ifc {
#address-cells = <2>;
#size-cells = <1>;
- compatible = "fsl,ifc", "simple-bus";
+ compatible = "fsl,ifc";
interrupts = <25 2 0 0>;
};
diff --git a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
index fcac73486d48..65f3e17c0d41 100644
--- a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
@@ -50,7 +50,7 @@
&ifc {
#address-cells = <2>;
#size-cells = <1>;
- compatible = "fsl,ifc", "simple-bus";
+ compatible = "fsl,ifc";
interrupts = <25 2 0 0>;
};
diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c
index cae31a6e8f02..2c0e2a1cab01 100644
--- a/arch/powerpc/boot/main.c
+++ b/arch/powerpc/boot/main.c
@@ -188,7 +188,7 @@ static inline void prep_esm_blob(struct addr_range vmlinux, void *chosen) { }
/* A buffer that may be edited by tools operating on a zImage binary so as to
* edit the command line passed to vmlinux (by setting /chosen/bootargs).
- * The buffer is put in it's own section so that tools may locate it easier.
+ * The buffer is put in its own section so that tools may locate it easier.
*/
static char cmdline[BOOT_COMMAND_LINE_SIZE]
__attribute__((__section__("__builtin_cmdline")));
diff --git a/arch/powerpc/boot/ps3.c b/arch/powerpc/boot/ps3.c
index f157717ae814..89ff46b8b225 100644
--- a/arch/powerpc/boot/ps3.c
+++ b/arch/powerpc/boot/ps3.c
@@ -25,7 +25,7 @@ BSS_STACK(4096);
/* A buffer that may be edited by tools operating on a zImage binary so as to
* edit the command line passed to vmlinux (by setting /chosen/bootargs).
- * The buffer is put in it's own section so that tools may locate it easier.
+ * The buffer is put in its own section so that tools may locate it easier.
*/
static char cmdline[BOOT_COMMAND_LINE_SIZE]
diff --git a/arch/powerpc/configs/adder875_defconfig b/arch/powerpc/configs/adder875_defconfig
index 7f35d5bc1229..97f4d4851735 100644
--- a/arch/powerpc/configs/adder875_defconfig
+++ b/arch/powerpc/configs/adder875_defconfig
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_EXPERT=y
# CONFIG_ELF_CORE is not set
-# CONFIG_BASE_FULL is not set
+CONFIG_BASE_SMALL=y
# CONFIG_FUTEX is not set
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/ep88xc_defconfig b/arch/powerpc/configs/ep88xc_defconfig
index a98ef6a4abef..50cc59eb36cf 100644
--- a/arch/powerpc/configs/ep88xc_defconfig
+++ b/arch/powerpc/configs/ep88xc_defconfig
@@ -6,7 +6,7 @@ CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_EXPERT=y
# CONFIG_ELF_CORE is not set
-# CONFIG_BASE_FULL is not set
+CONFIG_BASE_SMALL=y
# CONFIG_FUTEX is not set
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/mpc866_ads_defconfig b/arch/powerpc/configs/mpc866_ads_defconfig
index 5c56d36cdfc5..6f449411abf7 100644
--- a/arch/powerpc/configs/mpc866_ads_defconfig
+++ b/arch/powerpc/configs/mpc866_ads_defconfig
@@ -6,7 +6,7 @@ CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_EXPERT=y
# CONFIG_BUG is not set
-# CONFIG_BASE_FULL is not set
+CONFIG_BASE_SMALL=y
# CONFIG_EPOLL is not set
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
index 56b876e418e9..77306be62e9e 100644
--- a/arch/powerpc/configs/mpc885_ads_defconfig
+++ b/arch/powerpc/configs/mpc885_ads_defconfig
@@ -7,7 +7,7 @@ CONFIG_VIRT_CPU_ACCOUNTING_NATIVE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_EXPERT=y
# CONFIG_ELF_CORE is not set
-# CONFIG_BASE_FULL is not set
+CONFIG_BASE_SMALL=y
# CONFIG_FUTEX is not set
CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/powerpc/configs/tqm8xx_defconfig b/arch/powerpc/configs/tqm8xx_defconfig
index 083c2e57520a..383c0966e92f 100644
--- a/arch/powerpc/configs/tqm8xx_defconfig
+++ b/arch/powerpc/configs/tqm8xx_defconfig
@@ -6,7 +6,7 @@ CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_EXPERT=y
# CONFIG_ELF_CORE is not set
-# CONFIG_BASE_FULL is not set
+CONFIG_BASE_SMALL=y
# CONFIG_FUTEX is not set
# CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_MODULES=y
diff --git a/arch/powerpc/crypto/chacha-p10-glue.c b/arch/powerpc/crypto/chacha-p10-glue.c
index 74fb86b0d209..7c728755852e 100644
--- a/arch/powerpc/crypto/chacha-p10-glue.c
+++ b/arch/powerpc/crypto/chacha-p10-glue.c
@@ -197,6 +197,9 @@ static struct skcipher_alg algs[] = {
static int __init chacha_p10_init(void)
{
+ if (!cpu_has_feature(CPU_FTR_ARCH_31))
+ return 0;
+
static_branch_enable(&have_p10);
return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
@@ -204,10 +207,13 @@ static int __init chacha_p10_init(void)
static void __exit chacha_p10_exit(void)
{
+ if (!static_branch_likely(&have_p10))
+ return;
+
crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
}
-module_cpu_feature_match(PPC_MODULE_FEATURE_P10, chacha_p10_init);
+module_init(chacha_p10_init);
module_exit(chacha_p10_exit);
MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (P10 accelerated)");
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 61a8d5555cd7..e5fdc336c9b2 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -6,5 +6,4 @@ generic-y += agp.h
generic-y += kvm_types.h
generic-y += mcs_spinlock.h
generic-y += qrwlock.h
-generic-y += vtime.h
generic-y += early_ioremap.h
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
index 48f21820afe2..baf934578c3a 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
@@ -6,26 +6,6 @@
*/
#ifndef __ASSEMBLY__
#ifdef CONFIG_HUGETLB_PAGE
-static inline int pmd_huge(pmd_t pmd)
-{
- /*
- * leaf pte for huge page
- */
- if (radix_enabled())
- return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
- return 0;
-}
-
-static inline int pud_huge(pud_t pud)
-{
- /*
- * leaf pte for huge page
- */
- if (radix_enabled())
- return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
- return 0;
-}
-
/*
* With radix , we have hugepage ptes in the pud and pmd entries. We don't
* need to setup hugepage directory for them. Our pte and page directory format
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
index ced7ee8b42fc..6ac73da7b80e 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
@@ -4,31 +4,6 @@
#ifndef __ASSEMBLY__
#ifdef CONFIG_HUGETLB_PAGE
-/*
- * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
- * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD;
- *
- * Defined in such a way that we can optimize away code block at build time
- * if CONFIG_HUGETLB_PAGE=n.
- *
- * returns true for pmd migration entries, THP, devmap, hugetlb
- * But compile time dependent on CONFIG_HUGETLB_PAGE
- */
-static inline int pmd_huge(pmd_t pmd)
-{
- /*
- * leaf pte for huge page
- */
- return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
-}
-
-static inline int pud_huge(pud_t pud)
-{
- /*
- * leaf pte for huge page
- */
- return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
-}
/*
* With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index fac5615e6bc5..8f9432e3855a 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -262,6 +262,18 @@ extern unsigned long __kernel_io_end;
extern struct page *vmemmap;
extern unsigned long pci_io_base;
+
+#define pmd_leaf pmd_leaf
+static inline bool pmd_leaf(pmd_t pmd)
+{
+ return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
+}
+
+#define pud_leaf pud_leaf
+static inline bool pud_leaf(pud_t pud)
+{
+ return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
+}
#endif /* __ASSEMBLY__ */
#include <asm/book3s/64/hash.h>
@@ -1426,20 +1438,5 @@ static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_va
return false;
}
-/*
- * Like pmd_huge(), but works regardless of config options
- */
-#define pmd_leaf pmd_leaf
-static inline bool pmd_leaf(pmd_t pmd)
-{
- return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
-}
-
-#define pud_leaf pud_leaf
-static inline bool pud_leaf(pud_t pud)
-{
- return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
-}
-
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
index 727d4b321937..0efabccd820c 100644
--- a/arch/powerpc/include/asm/cpu_has_feature.h
+++ b/arch/powerpc/include/asm/cpu_has_feature.h
@@ -29,7 +29,7 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
#endif
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
- if (!static_key_initialized) {
+ if (!static_key_feature_checks_initialized) {
printk("Warning! cpu_has_feature() used prior to jump label init!\n");
dump_stack();
return early_cpu_has_feature(feature);
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index 4961fb38e438..aff858ca99c0 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -32,23 +32,10 @@
#ifdef CONFIG_PPC64
#define get_accounting(tsk) (&get_paca()->accounting)
#define raw_get_accounting(tsk) (&local_paca->accounting)
-static inline void arch_vtime_task_switch(struct task_struct *tsk) { }
#else
#define get_accounting(tsk) (&task_thread_info(tsk)->accounting)
#define raw_get_accounting(tsk) get_accounting(tsk)
-/*
- * Called from the context switch with interrupts disabled, to charge all
- * accumulated times to the current process, and to prepare accounting on
- * the next process.
- */
-static inline void arch_vtime_task_switch(struct task_struct *prev)
-{
- struct cpu_accounting_data *acct = get_accounting(current);
- struct cpu_accounting_data *acct0 = get_accounting(prev);
-
- acct->starttime = acct0->starttime;
-}
#endif
/*
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 514dd056c2c8..91a9fd53254f 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -82,7 +82,7 @@ struct eeh_pe {
int false_positives; /* Times of reported #ff's */
atomic_t pass_dev_cnt; /* Count of passed through devs */
struct eeh_pe *parent; /* Parent PE */
- void *data; /* PE auxillary data */
+ void *data; /* PE auxiliary data */
struct list_head child_list; /* List of PEs below this PE */
struct list_head child; /* Memb. child_list/eeh_phb_pe */
struct list_head edevs; /* List of eeh_dev in this PE */
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 79f1c480b5eb..bb4b94444d3e 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -127,8 +127,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
/* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
#define NT_SPU 1
-#define ARCH_HAVE_EXTRA_ELF_NOTES
-
#endif /* CONFIG_SPU_BASE */
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/include/asm/fadump-internal.h b/arch/powerpc/include/asm/fadump-internal.h
index 27f9e11eda28..e83869a4eb6a 100644
--- a/arch/powerpc/include/asm/fadump-internal.h
+++ b/arch/powerpc/include/asm/fadump-internal.h
@@ -42,13 +42,38 @@ static inline u64 fadump_str_to_u64(const char *str)
#define FADUMP_CPU_UNKNOWN (~((u32)0))
-#define FADUMP_CRASH_INFO_MAGIC fadump_str_to_u64("FADMPINF")
+/*
+ * The introduction of new fields in the fadump crash info header has
+ * led to a change in the magic key from `FADMPINF` to `FADMPSIG` for
+ * identifying a kernel crash from an old kernel.
+ *
+ * To prevent the need for further changes to the magic number in the
+ * event of future modifications to the fadump crash info header, a
+ * version field has been introduced to track the fadump crash info
+ * header version.
+ *
+ * Consider a few points before adding new members to the fadump crash info
+ * header structure:
+ *
+ * - Append new members; avoid adding them in between.
+ * - Non-primitive members should have a size member as well.
+ * - For every change in the fadump header, increment the
+ * fadump header version. This helps the updated kernel decide how to
+ * handle kernel dumps from older kernels.
+ */
+#define FADUMP_CRASH_INFO_MAGIC_OLD fadump_str_to_u64("FADMPINF")
+#define FADUMP_CRASH_INFO_MAGIC fadump_str_to_u64("FADMPSIG")
+#define FADUMP_HEADER_VERSION 1
/* fadump crash info structure */
struct fadump_crash_info_header {
u64 magic_number;
- u64 elfcorehdr_addr;
+ u32 version;
u32 crashing_cpu;
+ u64 vmcoreinfo_raddr;
+ u64 vmcoreinfo_size;
+ u32 pt_regs_sz;
+ u32 cpu_mask_sz;
struct pt_regs regs;
struct cpumask cpu_mask;
};
@@ -94,9 +119,13 @@ struct fw_dump {
u64 boot_mem_regs_cnt;
unsigned long fadumphdr_addr;
+ u64 elfcorehdr_addr;
+ u64 elfcorehdr_size;
unsigned long cpu_notes_buf_vaddr;
unsigned long cpu_notes_buf_size;
+ unsigned long param_area;
+
/*
* Maximum size supported by firmware to copy from source to
* destination address per entry.
@@ -111,6 +140,7 @@ struct fw_dump {
unsigned long dump_active:1;
unsigned long dump_registered:1;
unsigned long nocma:1;
+ unsigned long param_area_supported:1;
struct fadump_ops *ops;
};
@@ -129,6 +159,7 @@ struct fadump_ops {
struct seq_file *m);
void (*fadump_trigger)(struct fadump_crash_info_header *fdh,
const char *msg);
+ int (*fadump_max_boot_mem_rgns)(void);
};
/* Helper functions */
@@ -136,7 +167,6 @@ s32 __init fadump_setup_cpu_notes_buf(u32 num_cpus);
void fadump_free_cpu_notes_buf(void);
u32 *__init fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs);
void __init fadump_update_elfcore_header(char *bufp);
-bool is_fadump_boot_mem_contiguous(void);
bool is_fadump_reserved_mem_contiguous(void);
#else /* !CONFIG_PRESERVE_FA_DUMP */
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
index 526a6a647312..ef40c9b6972a 100644
--- a/arch/powerpc/include/asm/fadump.h
+++ b/arch/powerpc/include/asm/fadump.h
@@ -19,12 +19,14 @@ extern int is_fadump_active(void);
extern int should_fadump_crash(void);
extern void crash_fadump(struct pt_regs *, const char *);
extern void fadump_cleanup(void);
+extern void fadump_append_bootargs(void);
#else /* CONFIG_FA_DUMP */
static inline int is_fadump_active(void) { return 0; }
static inline int should_fadump_crash(void) { return 0; }
static inline void crash_fadump(struct pt_regs *regs, const char *str) { }
static inline void fadump_cleanup(void) { }
+static inline void fadump_append_bootargs(void) { }
#endif /* !CONFIG_FA_DUMP */
#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index 77824bd289a3..17d168dd8b49 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -291,6 +291,8 @@ extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
+extern bool static_key_feature_checks_initialized;
+
void apply_feature_fixups(void);
void update_mmu_feature_fixups(unsigned long mask);
void setup_feature_keys(void);
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index a41e542ba94d..7a8495660c2f 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -524,7 +524,7 @@ long plpar_hcall_norets_notrace(unsigned long opcode, ...);
* Used for all but the craziest of phyp interfaces (see plpar_hcall9)
*/
#define PLPAR_HCALL_BUFSIZE 4
-long plpar_hcall(unsigned long opcode, unsigned long *retbuf, ...);
+long plpar_hcall(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL_BUFSIZE], ...);
/**
* plpar_hcall_raw: - Make a hypervisor call without calculating hcall stats
@@ -538,7 +538,7 @@ long plpar_hcall(unsigned long opcode, unsigned long *retbuf, ...);
* plpar_hcall, but plpar_hcall_raw works in real mode and does not
* calculate hypervisor call statistics.
*/
-long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...);
+long plpar_hcall_raw(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL_BUFSIZE], ...);
/**
* plpar_hcall9: - Make a pseries hypervisor call with up to 9 return arguments
@@ -549,8 +549,8 @@ long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...);
* PLPAR_HCALL9_BUFSIZE to size the return argument buffer.
*/
#define PLPAR_HCALL9_BUFSIZE 9
-long plpar_hcall9(unsigned long opcode, unsigned long *retbuf, ...);
-long plpar_hcall9_raw(unsigned long opcode, unsigned long *retbuf, ...);
+long plpar_hcall9(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL9_BUFSIZE], ...);
+long plpar_hcall9_raw(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL9_BUFSIZE], ...);
/* pseries hcall tracing */
extern struct static_key hcall_tracepoint_key;
@@ -570,7 +570,7 @@ struct hvcall_mpp_data {
unsigned long backing_mem;
};
-int h_get_mpp(struct hvcall_mpp_data *);
+long h_get_mpp(struct hvcall_mpp_data *mpp_data);
struct hvcall_mpp_x_data {
unsigned long coalesced_bytes;
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
index 7b610864b364..2d6c886b40f4 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -336,6 +336,14 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
if (IS_ENABLED(CONFIG_KASAN))
return;
+ /*
+ * Likewise, do not use it in real mode if percpu first chunk is not
+ * embedded. With CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK enabled there
+ * are chances where percpu allocation can come from vmalloc area.
+ */
+ if (percpu_first_chunk_is_paged)
+ return;
+
/* Otherwise, it should be safe to call it */
nmi_enter();
}
@@ -351,6 +359,8 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter
// no nmi_exit for a pseries hash guest taking a real mode exception
} else if (IS_ENABLED(CONFIG_KASAN)) {
// no nmi_exit for KASAN in real mode
+ } else if (percpu_first_chunk_is_paged) {
+ // no nmi_exit if percpu first chunk is not embedded
} else {
nmi_exit();
}
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 08c550ed49be..52e1b1d15ff6 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -37,7 +37,7 @@ extern struct pci_dev *isa_bridge_pcidev;
* define properly based on the platform
*/
#ifndef CONFIG_PCI
-#define _IO_BASE 0
+#define _IO_BASE POISON_POINTER_DELTA
#define _ISA_MEM_BASE 0
#define PCI_DRAM_OFFSET 0
#elif defined(CONFIG_PPC32)
@@ -585,12 +585,12 @@ __do_out_asm(_rec_outl, "stwbrx")
#define __do_inw(port) _rec_inw(port)
#define __do_inl(port) _rec_inl(port)
#else /* CONFIG_PPC32 */
-#define __do_outb(val, port) writeb(val,(PCI_IO_ADDR)_IO_BASE+port);
-#define __do_outw(val, port) writew(val,(PCI_IO_ADDR)_IO_BASE+port);
-#define __do_outl(val, port) writel(val,(PCI_IO_ADDR)_IO_BASE+port);
-#define __do_inb(port) readb((PCI_IO_ADDR)_IO_BASE + port);
-#define __do_inw(port) readw((PCI_IO_ADDR)_IO_BASE + port);
-#define __do_inl(port) readl((PCI_IO_ADDR)_IO_BASE + port);
+#define __do_outb(val, port) writeb(val,(PCI_IO_ADDR)(_IO_BASE+port));
+#define __do_outw(val, port) writew(val,(PCI_IO_ADDR)(_IO_BASE+port));
+#define __do_outl(val, port) writel(val,(PCI_IO_ADDR)(_IO_BASE+port));
+#define __do_inb(port) readb((PCI_IO_ADDR)(_IO_BASE + port));
+#define __do_inw(port) readw((PCI_IO_ADDR)(_IO_BASE + port));
+#define __do_inl(port) readl((PCI_IO_ADDR)(_IO_BASE + port));
#endif /* !CONFIG_PPC32 */
#ifdef CONFIG_EEH
@@ -606,12 +606,12 @@ __do_out_asm(_rec_outl, "stwbrx")
#define __do_writesw(a, b, n) _outsw(PCI_FIX_ADDR(a),(b),(n))
#define __do_writesl(a, b, n) _outsl(PCI_FIX_ADDR(a),(b),(n))
-#define __do_insb(p, b, n) readsb((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
-#define __do_insw(p, b, n) readsw((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
-#define __do_insl(p, b, n) readsl((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
-#define __do_outsb(p, b, n) writesb((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
-#define __do_outsw(p, b, n) writesw((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
-#define __do_outsl(p, b, n) writesl((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
+#define __do_insb(p, b, n) readsb((PCI_IO_ADDR)(_IO_BASE+(p)), (b), (n))
+#define __do_insw(p, b, n) readsw((PCI_IO_ADDR)(_IO_BASE+(p)), (b), (n))
+#define __do_insl(p, b, n) readsl((PCI_IO_ADDR)(_IO_BASE+(p)), (b), (n))
+#define __do_outsb(p, b, n) writesb((PCI_IO_ADDR)(_IO_BASE+(p)),(b),(n))
+#define __do_outsw(p, b, n) writesw((PCI_IO_ADDR)(_IO_BASE+(p)),(b),(n))
+#define __do_outsl(p, b, n) writesl((PCI_IO_ADDR)(_IO_BASE+(p)),(b),(n))
#define __do_memset_io(addr, c, n) \
_memset_io(PCI_FIX_ADDR(addr), c, n)
@@ -982,7 +982,7 @@ static inline phys_addr_t page_to_phys(struct page *page)
}
/*
- * 32 bits still uses virt_to_bus() for it's implementation of DMA
+ * 32 bits still uses virt_to_bus() for its implementation of DMA
* mappings se we have to keep it defined here. We also have some old
* drivers (shame shame shame) that use bus_to_virt() and haven't been
* fixed yet so I need to define it here.
diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h
index 365d2720097c..b5bbb94c51f6 100644
--- a/arch/powerpc/include/asm/kasan.h
+++ b/arch/powerpc/include/asm/kasan.h
@@ -19,7 +19,7 @@
#define KASAN_SHADOW_SCALE_SHIFT 3
-#if defined(CONFIG_MODULES) && defined(CONFIG_PPC32)
+#if defined(CONFIG_EXECMEM) && defined(CONFIG_PPC32)
#define KASAN_KERN_START ALIGN_DOWN(PAGE_OFFSET - SZ_256M, SZ_256M)
#else
#define KASAN_KERN_START PAGE_OFFSET
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index fdb90e24dc74..95a98b390d62 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -135,6 +135,17 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
ppc_save_regs(newregs);
}
+#ifdef CONFIG_CRASH_HOTPLUG
+void arch_crash_handle_hotplug_event(struct kimage *image, void *arg);
+#define arch_crash_handle_hotplug_event arch_crash_handle_hotplug_event
+
+int arch_crash_hotplug_support(struct kimage *image, unsigned long kexec_flags);
+#define arch_crash_hotplug_support arch_crash_hotplug_support
+
+unsigned int arch_crash_get_elfcorehdr_size(void);
+#define crash_get_elfcorehdr_size arch_crash_get_elfcorehdr_size
+#endif /* CONFIG_CRASH_HOTPLUG */
+
extern int crashing_cpu;
extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *));
extern void crash_ipi_callback(struct pt_regs *regs);
@@ -185,6 +196,10 @@ static inline void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
#endif /* CONFIG_CRASH_DUMP */
+#if defined(CONFIG_KEXEC_FILE) || defined(CONFIG_CRASH_DUMP)
+int update_cpus_node(void *fdt);
+#endif
+
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/kexec.h>
#endif
diff --git a/arch/powerpc/include/asm/kexec_ranges.h b/arch/powerpc/include/asm/kexec_ranges.h
index f83866a19e87..14055896cbcb 100644
--- a/arch/powerpc/include/asm/kexec_ranges.h
+++ b/arch/powerpc/include/asm/kexec_ranges.h
@@ -7,19 +7,9 @@
void sort_memory_ranges(struct crash_mem *mrngs, bool merge);
struct crash_mem *realloc_mem_ranges(struct crash_mem **mem_ranges);
int add_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size);
-int add_tce_mem_ranges(struct crash_mem **mem_ranges);
-int add_initrd_mem_range(struct crash_mem **mem_ranges);
-#ifdef CONFIG_PPC_64S_HASH_MMU
-int add_htab_mem_range(struct crash_mem **mem_ranges);
-#else
-static inline int add_htab_mem_range(struct crash_mem **mem_ranges)
-{
- return 0;
-}
-#endif
-int add_kernel_mem_range(struct crash_mem **mem_ranges);
-int add_rtas_mem_range(struct crash_mem **mem_ranges);
-int add_opal_mem_range(struct crash_mem **mem_ranges);
-int add_reserved_mem_ranges(struct crash_mem **mem_ranges);
-
+int remove_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size);
+int get_exclude_memory_ranges(struct crash_mem **mem_ranges);
+int get_reserved_memory_ranges(struct crash_mem **mem_ranges);
+int get_crash_memory_ranges(struct crash_mem **mem_ranges);
+int get_usable_memory_ranges(struct crash_mem **mem_ranges);
#endif /* _ASM_POWERPC_KEXEC_RANGES_H */
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 3281215097cc..ca3829d47ab7 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -287,7 +287,6 @@ struct kvmppc_ops {
bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range);
bool (*age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
bool (*test_age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
- bool (*set_spte_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
void (*free_memslot)(struct kvm_memory_slot *slot);
int (*init_vm)(struct kvm *kvm);
void (*destroy_vm)(struct kvm *kvm);
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 3b72c7ed24cf..8a27b046c6a2 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -251,7 +251,7 @@ static __always_inline bool mmu_has_feature(unsigned long feature)
#endif
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
- if (!static_key_initialized) {
+ if (!static_key_feature_checks_initialized) {
printk("Warning! mmu_has_feature() used prior to jump label init!\n");
dump_stack();
return early_mmu_has_feature(feature);
@@ -406,9 +406,5 @@ extern void *abatron_pteptrs[2];
#include <asm/nohash/mmu.h>
#endif
-#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
-#define __HAVE_ARCH_RESERVED_KERNEL_PAGES
-#endif
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MMU_H_ */
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index a8e2e8339fb7..300c777cc307 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -48,11 +48,6 @@ struct mod_arch_specific {
unsigned long tramp;
unsigned long tramp_regs;
#endif
-
- /* List of BUG addresses, source line numbers and filenames */
- struct list_head bug_list;
- struct bug_entry *bug_table;
- unsigned int num_bugs;
};
/*
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 427db14292c9..f5f39d4f03c8 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -351,16 +351,6 @@ static inline int hugepd_ok(hugepd_t hpd)
#endif
}
-static inline int pmd_huge(pmd_t pmd)
-{
- return 0;
-}
-
-static inline int pud_huge(pud_t pud)
-{
- return 0;
-}
-
#define is_hugepd(hpd) (hugepd_ok(hpd))
#endif
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
index a2bc4b95e703..8c9d4b26bf57 100644
--- a/arch/powerpc/include/asm/opal-api.h
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -1027,10 +1027,10 @@ struct opal_i2c_request {
* The host will pass on OPAL, a buffer of length OPAL_SYSEPOW_MAX
* with individual elements being 16 bits wide to fetch the system
* wide EPOW status. Each element in the buffer will contain the
- * EPOW status in it's bit representation for a particular EPOW sub
+ * EPOW status in its bit representation for a particular EPOW sub
* class as defined here. So multiple detailed EPOW status bits
* specific for any sub class can be represented in a single buffer
- * element as it's bit representation.
+ * element as its bit representation.
*/
/* System EPOW type */
diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h
index 8e5b7d0b851c..634970ce13c6 100644
--- a/arch/powerpc/include/asm/percpu.h
+++ b/arch/powerpc/include/asm/percpu.h
@@ -15,6 +15,16 @@
#endif /* CONFIG_SMP */
#endif /* __powerpc64__ */
+#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) && defined(CONFIG_SMP)
+#include <linux/jump_label.h>
+DECLARE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged);
+
+#define percpu_first_chunk_is_paged \
+ (static_key_enabled(&__percpu_first_chunk_is_paged.key))
+#else
+#define percpu_first_chunk_is_paged false
+#endif /* CONFIG_PPC64 && CONFIG_SMP */
+
#include <asm-generic/percpu.h>
#include <asm/paca.h>
diff --git a/arch/powerpc/include/asm/plpks.h b/arch/powerpc/include/asm/plpks.h
index 23b77027c916..7a84069759b0 100644
--- a/arch/powerpc/include/asm/plpks.h
+++ b/arch/powerpc/include/asm/plpks.h
@@ -44,9 +44,8 @@
#define PLPKS_MAX_DATA_SIZE 4000
// Timeouts for PLPKS operations
-#define PLPKS_MAX_TIMEOUT 5000 // msec
-#define PLPKS_FLUSH_SLEEP 10 // msec
-#define PLPKS_FLUSH_SLEEP_RANGE 400
+#define PLPKS_MAX_TIMEOUT (5 * USEC_PER_SEC)
+#define PLPKS_FLUSH_SLEEP 10000 // usec
struct plpks_var {
char *component;
diff --git a/arch/powerpc/include/asm/pmac_feature.h b/arch/powerpc/include/asm/pmac_feature.h
index 2495866f2e97..420e2878ae67 100644
--- a/arch/powerpc/include/asm/pmac_feature.h
+++ b/arch/powerpc/include/asm/pmac_feature.h
@@ -192,7 +192,7 @@ static inline long pmac_call_feature(int selector, struct device_node* node,
/* PMAC_FTR_BMAC_ENABLE (struct device_node* node, 0, int value)
* enable/disable the bmac (ethernet) cell of a mac-io ASIC, also drive
- * it's reset line
+ * its reset line
*/
#define PMAC_FTR_BMAC_ENABLE PMAC_FTR_DEF(6)
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 005601243dda..076ae60b4a55 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -510,6 +510,7 @@
#define PPC_RAW_STB(r, base, i) (0x98000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i))
#define PPC_RAW_LBZ(r, base, i) (0x88000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
#define PPC_RAW_LDX(r, base, b) (0x7c00002a | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
+#define PPC_RAW_LHA(r, base, i) (0xa8000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
#define PPC_RAW_LHZ(r, base, i) (0xa0000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
#define PPC_RAW_LHBRX(r, base, b) (0x7c00062c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
#define PPC_RAW_LWBRX(r, base, b) (0x7c00042c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
@@ -532,6 +533,7 @@
#define PPC_RAW_MULW(d, a, b) (0x7c0001d6 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_MULHWU(d, a, b) (0x7c000016 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_MULI(d, a, i) (0x1c000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_DIVW(d, a, b) (0x7c0003d6 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_DIVWU(d, a, b) (0x7c000396 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_DIVDU(d, a, b) (0x7c000392 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_DIVDE(t, a, b) (0x7c000352 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
@@ -550,6 +552,8 @@
#define PPC_RAW_XOR(d, a, b) (0x7c000278 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_XORI(d, a, i) (0x68000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
#define PPC_RAW_XORIS(d, a, i) (0x6c000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_EXTSB(d, a) (0x7c000774 | ___PPC_RA(d) | ___PPC_RS(a))
+#define PPC_RAW_EXTSH(d, a) (0x7c000734 | ___PPC_RA(d) | ___PPC_RS(a))
#define PPC_RAW_EXTSW(d, a) (0x7c0007b4 | ___PPC_RA(d) | ___PPC_RS(a))
#define PPC_RAW_SLW(d, a, s) (0x7c000030 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
#define PPC_RAW_SLD(d, a, s) (0x7c000036 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index b2c51d337e60..e44cac0da346 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -260,7 +260,8 @@ struct thread_struct {
unsigned long sier2;
unsigned long sier3;
unsigned long hashkeyr;
-
+ unsigned long dexcr;
+ unsigned long dexcr_onexec; /* Reset value to load on exec */
#endif
};
@@ -333,6 +334,16 @@ extern int set_endian(struct task_struct *tsk, unsigned int val);
extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
+#ifdef CONFIG_PPC_BOOK3S_64
+
+#define PPC_GET_DEXCR_ASPECT(tsk, asp) get_dexcr_prctl((tsk), (asp))
+#define PPC_SET_DEXCR_ASPECT(tsk, asp, val) set_dexcr_prctl((tsk), (asp), (val))
+
+int get_dexcr_prctl(struct task_struct *tsk, unsigned long asp);
+int set_dexcr_prctl(struct task_struct *tsk, unsigned long asp, unsigned long val);
+
+#endif
+
extern void load_fp_state(struct thread_fp_state *fp);
extern void store_fp_state(struct thread_fp_state *fp);
extern void load_vr_state(struct thread_vr_state *vr);
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index d3d1aea009b4..eed33cb916d0 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -615,7 +615,7 @@
#define HID1_ABE (1<<10) /* 7450 Address Broadcast Enable */
#define HID1_PS (1<<16) /* 750FX PLL selection */
#endif
-#define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */
+#define SPRN_HID2_750FX 0x3F8 /* IBM 750FX HID2 Register */
#define SPRN_HID2_GEKKO 0x398 /* Gekko HID2 Register */
#define SPRN_HID2_G2_LE 0x3F3 /* G2_LE HID2 Register */
#define HID2_G2_LE_HBE (1<<18) /* High BAT Enable (G2_LE) */
diff --git a/arch/powerpc/include/asm/uninorth.h b/arch/powerpc/include/asm/uninorth.h
index e278299b9b37..6949b5daa37d 100644
--- a/arch/powerpc/include/asm/uninorth.h
+++ b/arch/powerpc/include/asm/uninorth.h
@@ -144,7 +144,7 @@
#define UNI_N_HWINIT_STATE_SLEEPING 0x01
#define UNI_N_HWINIT_STATE_RUNNING 0x02
/* This last bit appear to be used by the bootROM to know the second
- * CPU has started and will enter it's sleep loop with IP=0
+ * CPU has started and will enter its sleep loop with IP=0
*/
#define UNI_N_HWINIT_STATE_CPU1_FLAG 0x10000000
diff --git a/arch/powerpc/include/asm/vdso/gettimeofday.h b/arch/powerpc/include/asm/vdso/gettimeofday.h
index f0a4cf01e85c..c6390890a60c 100644
--- a/arch/powerpc/include/asm/vdso/gettimeofday.h
+++ b/arch/powerpc/include/asm/vdso/gettimeofday.h
@@ -4,7 +4,6 @@
#ifndef __ASSEMBLY__
-#include <asm/page.h>
#include <asm/vdso/timebase.h>
#include <asm/barrier.h>
#include <asm/unistd.h>
@@ -14,6 +13,17 @@
#define VDSO_HAS_TIME 1
+/*
+ * powerpc specific delta calculation.
+ *
+ * This variant removes the masking of the subtraction because the
+ * clocksource mask of all VDSO capable clocksources on powerpc is U64_MAX
+ * which would result in a pointless operation. The compiler cannot
+ * optimize it away as the mask comes from the vdso data and is not compile
+ * time constant.
+ */
+#define VDSO_DELTA_NOMASK 1
+
static __always_inline int do_syscall_2(const unsigned long _r0, const unsigned long _r3,
const unsigned long _r4)
{
@@ -95,7 +105,7 @@ const struct vdso_data *__arch_get_vdso_data(void);
static __always_inline
const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
{
- return (void *)vd + PAGE_SIZE;
+ return (void *)vd + (1U << CONFIG_PAGE_SHIFT);
}
#endif
@@ -105,21 +115,6 @@ static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
}
#define vdso_clocksource_ok vdso_clocksource_ok
-/*
- * powerpc specific delta calculation.
- *
- * This variant removes the masking of the subtraction because the
- * clocksource mask of all VDSO capable clocksources on powerpc is U64_MAX
- * which would result in a pointless operation. The compiler cannot
- * optimize it away as the mask comes from the vdso data and is not compile
- * time constant.
- */
-static __always_inline u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
-{
- return (cycles - last) * mult;
-}
-#define vdso_calc_delta vdso_calc_delta
-
#ifndef __powerpc64__
static __always_inline u64 vdso_shift_ns(u64 ns, unsigned long shift)
{
diff --git a/arch/powerpc/include/asm/fb.h b/arch/powerpc/include/asm/video.h
index c0c5d1df7ad1..e1770114ffc3 100644
--- a/arch/powerpc/include/asm/fb.h
+++ b/arch/powerpc/include/asm/video.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_FB_H_
-#define _ASM_FB_H_
+#ifndef _ASM_VIDEO_H_
+#define _ASM_VIDEO_H_
#include <asm/page.h>
@@ -12,6 +12,6 @@ static inline pgprot_t pgprot_framebuffer(pgprot_t prot,
}
#define pgprot_framebuffer pgprot_framebuffer
-#include <asm-generic/fb.h>
+#include <asm-generic/video.h>
-#endif /* _ASM_FB_H_ */
+#endif /* _ASM_VIDEO_H_ */
diff --git a/arch/powerpc/include/uapi/asm/bootx.h b/arch/powerpc/include/uapi/asm/bootx.h
index 6728c7e24e58..1b8c121071d9 100644
--- a/arch/powerpc/include/uapi/asm/bootx.h
+++ b/arch/powerpc/include/uapi/asm/bootx.h
@@ -108,7 +108,7 @@ typedef struct boot_infos
/* ALL BELOW NEW (vers. 4) */
/* This defines the physical memory. Valid with BOOT_ARCH_NUBUS flag
- (non-PCI) only. On PCI, memory is contiguous and it's size is in the
+ (non-PCI) only. On PCI, memory is contiguous and its size is in the
device-tree. */
boot_info_map_entry_t
physMemoryMap[MAX_MEM_MAP_SIZE]; /* Where the phys memory is */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index d3282fbea4f2..8585d03c02d3 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -3,9 +3,6 @@
# Makefile for the linux kernel.
#
-ifdef CONFIG_PPC64
-CFLAGS_prom_init.o += $(NO_MINIMAL_TOC)
-endif
ifdef CONFIG_PPC32
CFLAGS_prom_init.o += -fPIC
CFLAGS_btext.o += -fPIC
@@ -87,6 +84,7 @@ obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_PPC_DAWR) += dawr.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
+obj-$(CONFIG_PPC_BOOK3S_64) += dexcr.o
obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_64e.o
obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o
@@ -190,9 +188,6 @@ GCOV_PROFILE_kprobes-ftrace.o := n
KCOV_INSTRUMENT_kprobes-ftrace.o := n
KCSAN_SANITIZE_kprobes-ftrace.o := n
UBSAN_SANITIZE_kprobes-ftrace.o := n
-GCOV_PROFILE_syscall_64.o := n
-KCOV_INSTRUMENT_syscall_64.o := n
-UBSAN_SANITIZE_syscall_64.o := n
UBSAN_SANITIZE_vdso.o := n
# Necessary for booting with kcov enabled on book3e machines
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
index bfd3f442e5eb..ab3ca74e6730 100644
--- a/arch/powerpc/kernel/cpu_setup_6xx.S
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -401,7 +401,7 @@ _GLOBAL(__save_cpu_setup)
andi. r3,r3,0xff00
cmpwi cr0,r3,0x0200
bne 1f
- mfspr r4,SPRN_HID2
+ mfspr r4,SPRN_HID2_750FX
stw r4,CS_HID2(r5)
1:
mtcr r7
@@ -496,7 +496,7 @@ _GLOBAL(__restore_cpu_setup)
bne 4f
lwz r4,CS_HID2(r5)
rlwinm r4,r4,0,19,17
- mtspr SPRN_HID2,r4
+ mtspr SPRN_HID2_750FX,r4
sync
4:
lwz r4,CS_HID1(r5)
diff --git a/arch/powerpc/kernel/dexcr.c b/arch/powerpc/kernel/dexcr.c
new file mode 100644
index 000000000000..3a0358e91c60
--- /dev/null
+++ b/arch/powerpc/kernel/dexcr.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/capability.h>
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <linux/prctl.h>
+#include <linux/sched.h>
+
+#include <asm/cpu_has_feature.h>
+#include <asm/cputable.h>
+#include <asm/processor.h>
+#include <asm/reg.h>
+
+static int __init init_task_dexcr(void)
+{
+ if (!early_cpu_has_feature(CPU_FTR_ARCH_31))
+ return 0;
+
+ current->thread.dexcr_onexec = mfspr(SPRN_DEXCR);
+
+ return 0;
+}
+early_initcall(init_task_dexcr)
+
+/* Allow thread local configuration of these by default */
+#define DEXCR_PRCTL_EDITABLE ( \
+ DEXCR_PR_IBRTPD | \
+ DEXCR_PR_SRAPD | \
+ DEXCR_PR_NPHIE)
+
+static int prctl_to_aspect(unsigned long which, unsigned int *aspect)
+{
+ switch (which) {
+ case PR_PPC_DEXCR_SBHE:
+ *aspect = DEXCR_PR_SBHE;
+ break;
+ case PR_PPC_DEXCR_IBRTPD:
+ *aspect = DEXCR_PR_IBRTPD;
+ break;
+ case PR_PPC_DEXCR_SRAPD:
+ *aspect = DEXCR_PR_SRAPD;
+ break;
+ case PR_PPC_DEXCR_NPHIE:
+ *aspect = DEXCR_PR_NPHIE;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+int get_dexcr_prctl(struct task_struct *task, unsigned long which)
+{
+ unsigned int aspect;
+ int ret;
+
+ ret = prctl_to_aspect(which, &aspect);
+ if (ret)
+ return ret;
+
+ if (aspect & DEXCR_PRCTL_EDITABLE)
+ ret |= PR_PPC_DEXCR_CTRL_EDITABLE;
+
+ if (aspect & mfspr(SPRN_DEXCR))
+ ret |= PR_PPC_DEXCR_CTRL_SET;
+ else
+ ret |= PR_PPC_DEXCR_CTRL_CLEAR;
+
+ if (aspect & task->thread.dexcr_onexec)
+ ret |= PR_PPC_DEXCR_CTRL_SET_ONEXEC;
+ else
+ ret |= PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC;
+
+ return ret;
+}
+
+int set_dexcr_prctl(struct task_struct *task, unsigned long which, unsigned long ctrl)
+{
+ unsigned long dexcr;
+ unsigned int aspect;
+ int err = 0;
+
+ err = prctl_to_aspect(which, &aspect);
+ if (err)
+ return err;
+
+ if (!(aspect & DEXCR_PRCTL_EDITABLE))
+ return -EPERM;
+
+ if (ctrl & ~PR_PPC_DEXCR_CTRL_MASK)
+ return -EINVAL;
+
+ if (ctrl & PR_PPC_DEXCR_CTRL_SET && ctrl & PR_PPC_DEXCR_CTRL_CLEAR)
+ return -EINVAL;
+
+ if (ctrl & PR_PPC_DEXCR_CTRL_SET_ONEXEC && ctrl & PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC)
+ return -EINVAL;
+
+ /*
+ * We do not want an unprivileged process being able to disable
+ * a setuid process's hash check instructions
+ */
+ if (aspect == DEXCR_PR_NPHIE &&
+ ctrl & PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC &&
+ !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ dexcr = mfspr(SPRN_DEXCR);
+
+ if (ctrl & PR_PPC_DEXCR_CTRL_SET)
+ dexcr |= aspect;
+ else if (ctrl & PR_PPC_DEXCR_CTRL_CLEAR)
+ dexcr &= ~aspect;
+
+ if (ctrl & PR_PPC_DEXCR_CTRL_SET_ONEXEC)
+ task->thread.dexcr_onexec |= aspect;
+ else if (ctrl & PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC)
+ task->thread.dexcr_onexec &= ~aspect;
+
+ mtspr(SPRN_DEXCR, dexcr);
+
+ return 0;
+}
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 8920862ffd79..f0ae39e77e37 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -216,6 +216,6 @@ const struct dma_map_ops dma_iommu_ops = {
.get_required_mask = dma_iommu_get_required_mask,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
- .alloc_pages = dma_common_alloc_pages,
+ .alloc_pages_op = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
};
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index ab316e155ea9..6670063a7a6c 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -506,9 +506,18 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
* We will punt with the following conditions: Failure to get
* PE's state, EEH not support and Permanently unavailable
* state, PE is in good state.
+ *
+ * On the pSeries, after reaching the threshold, get_state might
+ * return EEH_STATE_NOT_SUPPORT. However, it's possible that the
+ * device state remains uncleared if the device is not marked
+ * pci_channel_io_perm_failure. Therefore, consider logging the
+ * event to let device removal happen.
+ *
*/
if ((ret < 0) ||
- (ret == EEH_STATE_NOT_SUPPORT) || eeh_state_active(ret)) {
+ (ret == EEH_STATE_NOT_SUPPORT &&
+ dev->error_state == pci_channel_io_perm_failure) ||
+ eeh_state_active(ret)) {
eeh_stats.false_positives++;
pe->false_positives++;
rc = 0;
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 48773d2d9be3..7efe04c68f0f 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -865,9 +865,18 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
devices++;
if (!devices) {
- pr_debug("EEH: Frozen PHB#%x-PE#%x is empty!\n",
+ pr_warn("EEH: Frozen PHB#%x-PE#%x is empty!\n",
pe->phb->global_number, pe->addr);
- goto out; /* nothing to recover */
+ /*
+ * The device is removed, tear down its state, on powernv
+ * hotplug driver would take care of it but not on pseries,
+ * permanently disable the card as it is hot removed.
+ *
+ * In the case of powernv, note that the removal of device
+ * is covered by pci rescan lock, so no problem even if hotplug
+ * driver attempts to remove the device.
+ */
+ goto recover_failed;
}
/* Log the event */
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index e0ce81279624..d1030bc52564 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -24,10 +24,10 @@ static int eeh_pe_aux_size = 0;
static LIST_HEAD(eeh_phb_pe);
/**
- * eeh_set_pe_aux_size - Set PE auxillary data size
- * @size: PE auxillary data size
+ * eeh_set_pe_aux_size - Set PE auxiliary data size
+ * @size: PE auxiliary data size in bytes
*
- * Set PE auxillary data size
+ * Set PE auxiliary data size.
*/
void eeh_set_pe_aux_size(int size)
{
@@ -527,7 +527,7 @@ EXPORT_SYMBOL_GPL(eeh_pe_state_mark);
* eeh_pe_mark_isolated
* @pe: EEH PE
*
- * Record that a PE has been isolated by marking the PE and it's children as
+ * Record that a PE has been isolated by marking the PE and its children as
* EEH_PE_ISOLATED (and EEH_PE_CFG_BLOCKED, if required) and their PCI devices
* as pci_channel_io_frozen.
*/
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index d14eda1e8589..a612e7513a4f 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -53,8 +53,6 @@ static struct kobject *fadump_kobj;
static atomic_t cpus_in_fadump;
static DEFINE_MUTEX(fadump_mutex);
-static struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false };
-
#define RESERVED_RNGS_SZ 16384 /* 16K - 128 entries */
#define RESERVED_RNGS_CNT (RESERVED_RNGS_SZ / \
sizeof(struct fadump_memory_range))
@@ -133,6 +131,41 @@ static int __init fadump_cma_init(void)
static int __init fadump_cma_init(void) { return 1; }
#endif /* CONFIG_CMA */
+/*
+ * Additional parameters meant for capture kernel are placed in a dedicated area.
+ * If this is capture kernel boot, append these parameters to bootargs.
+ */
+void __init fadump_append_bootargs(void)
+{
+ char *append_args;
+ size_t len;
+
+ if (!fw_dump.dump_active || !fw_dump.param_area_supported || !fw_dump.param_area)
+ return;
+
+ if (fw_dump.param_area >= fw_dump.boot_mem_top) {
+ if (memblock_reserve(fw_dump.param_area, COMMAND_LINE_SIZE)) {
+ pr_warn("WARNING: Can't use additional parameters area!\n");
+ fw_dump.param_area = 0;
+ return;
+ }
+ }
+
+ append_args = (char *)fw_dump.param_area;
+ len = strlen(boot_command_line);
+
+ /*
+ * Too late to fail even if cmdline size exceeds. Truncate additional parameters
+ * to cmdline size and proceed anyway.
+ */
+ if (len + strlen(append_args) >= COMMAND_LINE_SIZE - 1)
+ pr_warn("WARNING: Appending parameters exceeds cmdline size. Truncating!\n");
+
+ pr_debug("Cmdline: %s\n", boot_command_line);
+ snprintf(boot_command_line + len, COMMAND_LINE_SIZE - len, " %s", append_args);
+ pr_info("Updated cmdline: %s\n", boot_command_line);
+}
+
/* Scan the Firmware Assisted dump configuration details. */
int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
int depth, void *data)
@@ -223,28 +256,6 @@ static bool is_fadump_mem_area_contiguous(u64 d_start, u64 d_end)
}
/*
- * Returns true, if there are no holes in boot memory area,
- * false otherwise.
- */
-bool is_fadump_boot_mem_contiguous(void)
-{
- unsigned long d_start, d_end;
- bool ret = false;
- int i;
-
- for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
- d_start = fw_dump.boot_mem_addr[i];
- d_end = d_start + fw_dump.boot_mem_sz[i];
-
- ret = is_fadump_mem_area_contiguous(d_start, d_end);
- if (!ret)
- break;
- }
-
- return ret;
-}
-
-/*
* Returns true, if there are no holes in reserved memory area,
* false otherwise.
*/
@@ -373,12 +384,6 @@ static unsigned long __init get_fadump_area_size(void)
size = PAGE_ALIGN(size);
size += fw_dump.boot_memory_size;
size += sizeof(struct fadump_crash_info_header);
- size += sizeof(struct elfhdr); /* ELF core header.*/
- size += sizeof(struct elf_phdr); /* place holder for cpu notes */
- /* Program headers for crash memory regions. */
- size += sizeof(struct elf_phdr) * (memblock_num_regions(memory) + 2);
-
- size = PAGE_ALIGN(size);
/* This is to hold kernel metadata on platforms that support it */
size += (fw_dump.ops->fadump_get_metadata_size ?
@@ -389,10 +394,11 @@ static unsigned long __init get_fadump_area_size(void)
static int __init add_boot_mem_region(unsigned long rstart,
unsigned long rsize)
{
+ int max_boot_mem_rgns = fw_dump.ops->fadump_max_boot_mem_rgns();
int i = fw_dump.boot_mem_regs_cnt++;
- if (fw_dump.boot_mem_regs_cnt > FADUMP_MAX_MEM_REGS) {
- fw_dump.boot_mem_regs_cnt = FADUMP_MAX_MEM_REGS;
+ if (fw_dump.boot_mem_regs_cnt > max_boot_mem_rgns) {
+ fw_dump.boot_mem_regs_cnt = max_boot_mem_rgns;
return 0;
}
@@ -573,22 +579,6 @@ int __init fadump_reserve_mem(void)
}
}
- /*
- * Calculate the memory boundary.
- * If memory_limit is less than actual memory boundary then reserve
- * the memory for fadump beyond the memory_limit and adjust the
- * memory_limit accordingly, so that the running kernel can run with
- * specified memory_limit.
- */
- if (memory_limit && memory_limit < memblock_end_of_DRAM()) {
- size = get_fadump_area_size();
- if ((memory_limit + size) < memblock_end_of_DRAM())
- memory_limit += size;
- else
- memory_limit = memblock_end_of_DRAM();
- printk(KERN_INFO "Adjusted memory_limit for firmware-assisted"
- " dump, now %#016llx\n", memory_limit);
- }
if (memory_limit)
mem_boundary = memory_limit;
else
@@ -705,7 +695,7 @@ void crash_fadump(struct pt_regs *regs, const char *str)
* old_cpu == -1 means this is the first CPU which has come here,
* go ahead and trigger fadump.
*
- * old_cpu != -1 means some other CPU has already on it's way
+ * old_cpu != -1 means some other CPU has already on its way
* to trigger fadump, just keep looping here.
*/
this_cpu = smp_processor_id();
@@ -931,36 +921,6 @@ static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
return 0;
}
-static int fadump_exclude_reserved_area(u64 start, u64 end)
-{
- u64 ra_start, ra_end;
- int ret = 0;
-
- ra_start = fw_dump.reserve_dump_area_start;
- ra_end = ra_start + fw_dump.reserve_dump_area_size;
-
- if ((ra_start < end) && (ra_end > start)) {
- if ((start < ra_start) && (end > ra_end)) {
- ret = fadump_add_mem_range(&crash_mrange_info,
- start, ra_start);
- if (ret)
- return ret;
-
- ret = fadump_add_mem_range(&crash_mrange_info,
- ra_end, end);
- } else if (start < ra_start) {
- ret = fadump_add_mem_range(&crash_mrange_info,
- start, ra_start);
- } else if (ra_end < end) {
- ret = fadump_add_mem_range(&crash_mrange_info,
- ra_end, end);
- }
- } else
- ret = fadump_add_mem_range(&crash_mrange_info, start, end);
-
- return ret;
-}
-
static int fadump_init_elfcore_header(char *bufp)
{
struct elfhdr *elf;
@@ -998,52 +958,6 @@ static int fadump_init_elfcore_header(char *bufp)
}
/*
- * Traverse through memblock structure and setup crash memory ranges. These
- * ranges will be used create PT_LOAD program headers in elfcore header.
- */
-static int fadump_setup_crash_memory_ranges(void)
-{
- u64 i, start, end;
- int ret;
-
- pr_debug("Setup crash memory ranges.\n");
- crash_mrange_info.mem_range_cnt = 0;
-
- /*
- * Boot memory region(s) registered with firmware are moved to
- * different location at the time of crash. Create separate program
- * header(s) for this memory chunk(s) with the correct offset.
- */
- for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
- start = fw_dump.boot_mem_addr[i];
- end = start + fw_dump.boot_mem_sz[i];
- ret = fadump_add_mem_range(&crash_mrange_info, start, end);
- if (ret)
- return ret;
- }
-
- for_each_mem_range(i, &start, &end) {
- /*
- * skip the memory chunk that is already added
- * (0 through boot_memory_top).
- */
- if (start < fw_dump.boot_mem_top) {
- if (end > fw_dump.boot_mem_top)
- start = fw_dump.boot_mem_top;
- else
- continue;
- }
-
- /* add this range excluding the reserved dump area. */
- ret = fadump_exclude_reserved_area(start, end);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-/*
* If the given physical address falls within the boot memory region then
* return the relocated address that points to the dump region reserved
* for saving initial boot memory contents.
@@ -1073,36 +987,50 @@ static inline unsigned long fadump_relocate(unsigned long paddr)
return raddr;
}
-static int fadump_create_elfcore_headers(char *bufp)
+static void __init populate_elf_pt_load(struct elf_phdr *phdr, u64 start,
+ u64 size, unsigned long long offset)
{
- unsigned long long raddr, offset;
- struct elf_phdr *phdr;
+ phdr->p_align = 0;
+ phdr->p_memsz = size;
+ phdr->p_filesz = size;
+ phdr->p_paddr = start;
+ phdr->p_offset = offset;
+ phdr->p_type = PT_LOAD;
+ phdr->p_flags = PF_R|PF_W|PF_X;
+ phdr->p_vaddr = (unsigned long)__va(start);
+}
+
+static void __init fadump_populate_elfcorehdr(struct fadump_crash_info_header *fdh)
+{
+ char *bufp;
struct elfhdr *elf;
- int i, j;
+ struct elf_phdr *phdr;
+ u64 boot_mem_dest_offset;
+ unsigned long long i, ra_start, ra_end, ra_size, mstart, mend;
+ bufp = (char *) fw_dump.elfcorehdr_addr;
fadump_init_elfcore_header(bufp);
elf = (struct elfhdr *)bufp;
bufp += sizeof(struct elfhdr);
/*
- * setup ELF PT_NOTE, place holder for cpu notes info. The notes info
- * will be populated during second kernel boot after crash. Hence
- * this PT_NOTE will always be the first elf note.
+ * Set up ELF PT_NOTE, a placeholder for CPU notes information.
+ * The notes info will be populated later by platform-specific code.
+ * Hence, this PT_NOTE will always be the first ELF note.
*
* NOTE: Any new ELF note addition should be placed after this note.
*/
phdr = (struct elf_phdr *)bufp;
bufp += sizeof(struct elf_phdr);
phdr->p_type = PT_NOTE;
- phdr->p_flags = 0;
- phdr->p_vaddr = 0;
- phdr->p_align = 0;
-
- phdr->p_offset = 0;
- phdr->p_paddr = 0;
- phdr->p_filesz = 0;
- phdr->p_memsz = 0;
-
+ phdr->p_flags = 0;
+ phdr->p_vaddr = 0;
+ phdr->p_align = 0;
+ phdr->p_offset = 0;
+ phdr->p_paddr = 0;
+ phdr->p_filesz = 0;
+ phdr->p_memsz = 0;
+ /* Increment number of program headers. */
(elf->e_phnum)++;
/* setup ELF PT_NOTE for vmcoreinfo */
@@ -1112,55 +1040,66 @@ static int fadump_create_elfcore_headers(char *bufp)
phdr->p_flags = 0;
phdr->p_vaddr = 0;
phdr->p_align = 0;
-
- phdr->p_paddr = fadump_relocate(paddr_vmcoreinfo_note());
- phdr->p_offset = phdr->p_paddr;
- phdr->p_memsz = phdr->p_filesz = VMCOREINFO_NOTE_SIZE;
-
+ phdr->p_paddr = phdr->p_offset = fdh->vmcoreinfo_raddr;
+ phdr->p_memsz = phdr->p_filesz = fdh->vmcoreinfo_size;
/* Increment number of program headers. */
(elf->e_phnum)++;
- /* setup PT_LOAD sections. */
- j = 0;
- offset = 0;
- raddr = fw_dump.boot_mem_addr[0];
- for (i = 0; i < crash_mrange_info.mem_range_cnt; i++) {
- u64 mbase, msize;
-
- mbase = crash_mrange_info.mem_ranges[i].base;
- msize = crash_mrange_info.mem_ranges[i].size;
- if (!msize)
- continue;
-
+ /*
+ * Setup PT_LOAD sections. first include boot memory regions
+ * and then add rest of the memory regions.
+ */
+ boot_mem_dest_offset = fw_dump.boot_mem_dest_addr;
+ for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
phdr = (struct elf_phdr *)bufp;
bufp += sizeof(struct elf_phdr);
- phdr->p_type = PT_LOAD;
- phdr->p_flags = PF_R|PF_W|PF_X;
- phdr->p_offset = mbase;
-
- if (mbase == raddr) {
- /*
- * The entire real memory region will be moved by
- * firmware to the specified destination_address.
- * Hence set the correct offset.
- */
- phdr->p_offset = fw_dump.boot_mem_dest_addr + offset;
- if (j < (fw_dump.boot_mem_regs_cnt - 1)) {
- offset += fw_dump.boot_mem_sz[j];
- raddr = fw_dump.boot_mem_addr[++j];
- }
+ populate_elf_pt_load(phdr, fw_dump.boot_mem_addr[i],
+ fw_dump.boot_mem_sz[i],
+ boot_mem_dest_offset);
+ /* Increment number of program headers. */
+ (elf->e_phnum)++;
+ boot_mem_dest_offset += fw_dump.boot_mem_sz[i];
+ }
+
+ /* Memory reserved for fadump in first kernel */
+ ra_start = fw_dump.reserve_dump_area_start;
+ ra_size = get_fadump_area_size();
+ ra_end = ra_start + ra_size;
+
+ phdr = (struct elf_phdr *)bufp;
+ for_each_mem_range(i, &mstart, &mend) {
+ /* Boot memory regions already added, skip them now */
+ if (mstart < fw_dump.boot_mem_top) {
+ if (mend > fw_dump.boot_mem_top)
+ mstart = fw_dump.boot_mem_top;
+ else
+ continue;
}
- phdr->p_paddr = mbase;
- phdr->p_vaddr = (unsigned long)__va(mbase);
- phdr->p_filesz = msize;
- phdr->p_memsz = msize;
- phdr->p_align = 0;
+ /* Handle memblock regions overlaps with fadump reserved area */
+ if ((ra_start < mend) && (ra_end > mstart)) {
+ if ((mstart < ra_start) && (mend > ra_end)) {
+ populate_elf_pt_load(phdr, mstart, ra_start - mstart, mstart);
+ /* Increment number of program headers. */
+ (elf->e_phnum)++;
+ bufp += sizeof(struct elf_phdr);
+ phdr = (struct elf_phdr *)bufp;
+ populate_elf_pt_load(phdr, ra_end, mend - ra_end, ra_end);
+ } else if (mstart < ra_start) {
+ populate_elf_pt_load(phdr, mstart, ra_start - mstart, mstart);
+ } else if (ra_end < mend) {
+ populate_elf_pt_load(phdr, ra_end, mend - ra_end, ra_end);
+ }
+ } else {
+ /* No overlap with fadump reserved memory region */
+ populate_elf_pt_load(phdr, mstart, mend - mstart, mstart);
+ }
/* Increment number of program headers. */
(elf->e_phnum)++;
+ bufp += sizeof(struct elf_phdr);
+ phdr = (struct elf_phdr *) bufp;
}
- return 0;
}
static unsigned long init_fadump_header(unsigned long addr)
@@ -1175,14 +1114,25 @@ static unsigned long init_fadump_header(unsigned long addr)
memset(fdh, 0, sizeof(struct fadump_crash_info_header));
fdh->magic_number = FADUMP_CRASH_INFO_MAGIC;
- fdh->elfcorehdr_addr = addr;
+ fdh->version = FADUMP_HEADER_VERSION;
/* We will set the crashing cpu id in crash_fadump() during crash. */
fdh->crashing_cpu = FADUMP_CPU_UNKNOWN;
+
+ /*
+ * The physical address and size of vmcoreinfo are required in the
+ * second kernel to prepare elfcorehdr.
+ */
+ fdh->vmcoreinfo_raddr = fadump_relocate(paddr_vmcoreinfo_note());
+ fdh->vmcoreinfo_size = VMCOREINFO_NOTE_SIZE;
+
+
+ fdh->pt_regs_sz = sizeof(struct pt_regs);
/*
* When LPAR is terminated by PYHP, ensure all possible CPUs'
* register data is processed while exporting the vmcore.
*/
fdh->cpu_mask = *cpu_possible_mask;
+ fdh->cpu_mask_sz = sizeof(struct cpumask);
return addr;
}
@@ -1190,8 +1140,6 @@ static unsigned long init_fadump_header(unsigned long addr)
static int register_fadump(void)
{
unsigned long addr;
- void *vaddr;
- int ret;
/*
* If no memory is reserved then we can not register for firmware-
@@ -1200,18 +1148,10 @@ static int register_fadump(void)
if (!fw_dump.reserve_dump_area_size)
return -ENODEV;
- ret = fadump_setup_crash_memory_ranges();
- if (ret)
- return ret;
-
addr = fw_dump.fadumphdr_addr;
/* Initialize fadump crash info header. */
addr = init_fadump_header(addr);
- vaddr = __va(addr);
-
- pr_debug("Creating ELF core headers at %#016lx\n", addr);
- fadump_create_elfcore_headers(vaddr);
/* register the future kernel dump with firmware. */
pr_debug("Registering for firmware-assisted kernel dump...\n");
@@ -1230,7 +1170,6 @@ void fadump_cleanup(void)
} else if (fw_dump.dump_registered) {
/* Un-register Firmware-assisted dump if it was registered. */
fw_dump.ops->fadump_unregister(&fw_dump);
- fadump_free_mem_ranges(&crash_mrange_info);
}
if (fw_dump.ops->fadump_cleanup)
@@ -1416,6 +1355,22 @@ static void fadump_release_memory(u64 begin, u64 end)
fadump_release_reserved_area(tstart, end);
}
+static void fadump_free_elfcorehdr_buf(void)
+{
+ if (fw_dump.elfcorehdr_addr == 0 || fw_dump.elfcorehdr_size == 0)
+ return;
+
+ /*
+ * Before freeing the memory of `elfcorehdr`, reset the global
+ * `elfcorehdr_addr` to prevent modules like `vmcore` from accessing
+ * invalid memory.
+ */
+ elfcorehdr_addr = ELFCORE_ADDR_ERR;
+ fadump_free_buffer(fw_dump.elfcorehdr_addr, fw_dump.elfcorehdr_size);
+ fw_dump.elfcorehdr_addr = 0;
+ fw_dump.elfcorehdr_size = 0;
+}
+
static void fadump_invalidate_release_mem(void)
{
mutex_lock(&fadump_mutex);
@@ -1427,6 +1382,7 @@ static void fadump_invalidate_release_mem(void)
fadump_cleanup();
mutex_unlock(&fadump_mutex);
+ fadump_free_elfcorehdr_buf();
fadump_release_memory(fw_dump.boot_mem_top, memblock_end_of_DRAM());
fadump_free_cpu_notes_buf();
@@ -1484,6 +1440,18 @@ static ssize_t enabled_show(struct kobject *kobj,
return sprintf(buf, "%d\n", fw_dump.fadump_enabled);
}
+/*
+ * /sys/kernel/fadump/hotplug_ready sysfs node returns 1, which inidcates
+ * to usersapce that fadump re-registration is not required on memory
+ * hotplug events.
+ */
+static ssize_t hotplug_ready_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", 1);
+}
+
static ssize_t mem_reserved_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
@@ -1498,6 +1466,43 @@ static ssize_t registered_show(struct kobject *kobj,
return sprintf(buf, "%d\n", fw_dump.dump_registered);
}
+static ssize_t bootargs_append_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", (char *)__va(fw_dump.param_area));
+}
+
+static ssize_t bootargs_append_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ char *params;
+
+ if (!fw_dump.fadump_enabled || fw_dump.dump_active)
+ return -EPERM;
+
+ if (count >= COMMAND_LINE_SIZE)
+ return -EINVAL;
+
+ /*
+ * Fail here instead of handling this scenario with
+ * some silly workaround in capture kernel.
+ */
+ if (saved_command_line_len + count >= COMMAND_LINE_SIZE) {
+ pr_err("Appending parameters exceeds cmdline size!\n");
+ return -ENOSPC;
+ }
+
+ params = __va(fw_dump.param_area);
+ strscpy_pad(params, buf, COMMAND_LINE_SIZE);
+ /* Remove newline character at the end. */
+ if (params[count-1] == '\n')
+ params[count-1] = '\0';
+
+ return count;
+}
+
static ssize_t registered_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
@@ -1556,11 +1561,14 @@ static struct kobj_attribute release_attr = __ATTR_WO(release_mem);
static struct kobj_attribute enable_attr = __ATTR_RO(enabled);
static struct kobj_attribute register_attr = __ATTR_RW(registered);
static struct kobj_attribute mem_reserved_attr = __ATTR_RO(mem_reserved);
+static struct kobj_attribute hotplug_ready_attr = __ATTR_RO(hotplug_ready);
+static struct kobj_attribute bootargs_append_attr = __ATTR_RW(bootargs_append);
static struct attribute *fadump_attrs[] = {
&enable_attr.attr,
&register_attr.attr,
&mem_reserved_attr.attr,
+ &hotplug_ready_attr.attr,
NULL,
};
@@ -1632,6 +1640,150 @@ static void __init fadump_init_files(void)
return;
}
+static int __init fadump_setup_elfcorehdr_buf(void)
+{
+ int elf_phdr_cnt;
+ unsigned long elfcorehdr_size;
+
+ /*
+ * Program header for CPU notes comes first, followed by one for
+ * vmcoreinfo, and the remaining program headers correspond to
+ * memory regions.
+ */
+ elf_phdr_cnt = 2 + fw_dump.boot_mem_regs_cnt + memblock_num_regions(memory);
+ elfcorehdr_size = sizeof(struct elfhdr) + (elf_phdr_cnt * sizeof(struct elf_phdr));
+ elfcorehdr_size = PAGE_ALIGN(elfcorehdr_size);
+
+ fw_dump.elfcorehdr_addr = (u64)fadump_alloc_buffer(elfcorehdr_size);
+ if (!fw_dump.elfcorehdr_addr) {
+ pr_err("Failed to allocate %lu bytes for elfcorehdr\n",
+ elfcorehdr_size);
+ return -ENOMEM;
+ }
+ fw_dump.elfcorehdr_size = elfcorehdr_size;
+ return 0;
+}
+
+/*
+ * Check if the fadump header of crashed kernel is compatible with fadump kernel.
+ *
+ * It checks the magic number, endianness, and size of non-primitive type
+ * members of fadump header to ensure safe dump collection.
+ */
+static bool __init is_fadump_header_compatible(struct fadump_crash_info_header *fdh)
+{
+ if (fdh->magic_number == FADUMP_CRASH_INFO_MAGIC_OLD) {
+ pr_err("Old magic number, can't process the dump.\n");
+ return false;
+ }
+
+ if (fdh->magic_number != FADUMP_CRASH_INFO_MAGIC) {
+ if (fdh->magic_number == swab64(FADUMP_CRASH_INFO_MAGIC))
+ pr_err("Endianness mismatch between the crashed and fadump kernels.\n");
+ else
+ pr_err("Fadump header is corrupted.\n");
+
+ return false;
+ }
+
+ /*
+ * Dump collection is not safe if the size of non-primitive type members
+ * of the fadump header do not match between crashed and fadump kernel.
+ */
+ if (fdh->pt_regs_sz != sizeof(struct pt_regs) ||
+ fdh->cpu_mask_sz != sizeof(struct cpumask)) {
+ pr_err("Fadump header size mismatch.\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void __init fadump_process(void)
+{
+ struct fadump_crash_info_header *fdh;
+
+ fdh = (struct fadump_crash_info_header *) __va(fw_dump.fadumphdr_addr);
+ if (!fdh) {
+ pr_err("Crash info header is empty.\n");
+ goto err_out;
+ }
+
+ /* Avoid processing the dump if fadump header isn't compatible */
+ if (!is_fadump_header_compatible(fdh))
+ goto err_out;
+
+ /* Allocate buffer for elfcorehdr */
+ if (fadump_setup_elfcorehdr_buf())
+ goto err_out;
+
+ fadump_populate_elfcorehdr(fdh);
+
+ /* Let platform update the CPU notes in elfcorehdr */
+ if (fw_dump.ops->fadump_process(&fw_dump) < 0)
+ goto err_out;
+
+ /*
+ * elfcorehdr is now ready to be exported.
+ *
+ * set elfcorehdr_addr so that vmcore module will export the
+ * elfcorehdr through '/proc/vmcore'.
+ */
+ elfcorehdr_addr = virt_to_phys((void *)fw_dump.elfcorehdr_addr);
+ return;
+
+err_out:
+ fadump_invalidate_release_mem();
+}
+
+/*
+ * Reserve memory to store additional parameters to be passed
+ * for fadump/capture kernel.
+ */
+static void __init fadump_setup_param_area(void)
+{
+ phys_addr_t range_start, range_end;
+
+ if (!fw_dump.param_area_supported || fw_dump.dump_active)
+ return;
+
+ /* This memory can't be used by PFW or bootloader as it is shared across kernels */
+ if (radix_enabled()) {
+ /*
+ * Anywhere in the upper half should be good enough as all memory
+ * is accessible in real mode.
+ */
+ range_start = memblock_end_of_DRAM() / 2;
+ range_end = memblock_end_of_DRAM();
+ } else {
+ /*
+ * Passing additional parameters is supported for hash MMU only
+ * if the first memory block size is 768MB or higher.
+ */
+ if (ppc64_rma_size < 0x30000000)
+ return;
+
+ /*
+ * 640 MB to 768 MB is not used by PFW/bootloader. So, try reserving
+ * memory for passing additional parameters in this range to avoid
+ * being stomped on by PFW/bootloader.
+ */
+ range_start = 0x2A000000;
+ range_end = range_start + 0x4000000;
+ }
+
+ fw_dump.param_area = memblock_phys_alloc_range(COMMAND_LINE_SIZE,
+ COMMAND_LINE_SIZE,
+ range_start,
+ range_end);
+ if (!fw_dump.param_area || sysfs_create_file(fadump_kobj, &bootargs_append_attr.attr)) {
+ pr_warn("WARNING: Could not setup area to pass additional parameters!\n");
+ return;
+ }
+
+ memset(phys_to_virt(fw_dump.param_area), 0, COMMAND_LINE_SIZE);
+}
+
/*
* Prepare for firmware-assisted dump.
*/
@@ -1651,15 +1803,11 @@ int __init setup_fadump(void)
* saving it to the disk.
*/
if (fw_dump.dump_active) {
- /*
- * if dump process fails then invalidate the registration
- * and release memory before proceeding for re-registration.
- */
- if (fw_dump.ops->fadump_process(&fw_dump) < 0)
- fadump_invalidate_release_mem();
+ fadump_process();
}
/* Initialize the kernel dump memory structure and register with f/w */
else if (fw_dump.reserve_dump_area_size) {
+ fadump_setup_param_area();
fw_dump.ops->fadump_init_mem_struct(&fw_dump);
register_fadump();
}
@@ -1735,8 +1883,3 @@ static void __init fadump_reserve_crash_area(u64 base)
memblock_reserve(mstart, msize);
}
}
-
-unsigned long __init arch_reserved_kernel_pages(void)
-{
- return memblock_reserved_size() / PAGE_SIZE;
-}
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 647b0b445e89..edc479a7c2bc 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -199,12 +199,12 @@ instruction_counter:
mfspr r10, SPRN_SRR0 /* Get effective address of fault */
INVALIDATE_ADJACENT_PAGES_CPU15(r10, r11)
mtspr SPRN_MD_EPN, r10
-#ifdef CONFIG_MODULES
+#ifdef CONFIG_EXECMEM
mfcr r11
compare_to_kernel_boundary r10, r10
#endif
mfspr r10, SPRN_M_TWB /* Get level 1 table */
-#ifdef CONFIG_MODULES
+#ifdef CONFIG_EXECMEM
blt+ 3f
rlwinm r10, r10, 0, 20, 31
oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S
index c1d89764dd22..57196883a00e 100644
--- a/arch/powerpc/kernel/head_book3s_32.S
+++ b/arch/powerpc/kernel/head_book3s_32.S
@@ -419,14 +419,14 @@ InstructionTLBMiss:
*/
/* Get PTE (linux-style) and check access */
mfspr r3,SPRN_IMISS
-#ifdef CONFIG_MODULES
+#ifdef CONFIG_EXECMEM
lis r1, TASK_SIZE@h /* check if kernel address */
cmplw 0,r1,r3
#endif
mfspr r2, SPRN_SDR1
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
rlwinm r2, r2, 28, 0xfffff000
-#ifdef CONFIG_MODULES
+#ifdef CONFIG_EXECMEM
li r0, 3
bgt- 112f
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
@@ -442,7 +442,7 @@ InstructionTLBMiss:
andc. r1,r1,r2 /* check access & ~permission */
bne- InstructionAddressInvalid /* return if access not permitted */
/* Convert linux-style PTE to low word of PPC-style PTE */
-#ifdef CONFIG_MODULES
+#ifdef CONFIG_EXECMEM
rlwimi r2, r0, 0, 31, 31 /* userspace ? -> PP lsb */
#endif
ori r1, r1, 0xe06 /* clear out reserved bits */
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 1185efebf032..b70b4f93561f 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -26,6 +26,7 @@
#include <linux/iommu.h>
#include <linux/sched.h>
#include <linux/debugfs.h>
+#include <linux/vmalloc.h>
#include <asm/io.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
@@ -1285,15 +1286,14 @@ spapr_tce_platform_iommu_attach_dev(struct iommu_domain *platform_domain,
struct device *dev)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct iommu_group *grp = iommu_group_get(dev);
struct iommu_table_group *table_group;
+ struct iommu_group *grp;
/* At first attach the ownership is already set */
- if (!domain) {
- iommu_group_put(grp);
+ if (!domain)
return 0;
- }
+ grp = iommu_group_get(dev);
table_group = iommu_group_get_iommudata(grp);
/*
* The domain being set to PLATFORM from earlier
diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c
index 072ebe7f290b..f8208c027148 100644
--- a/arch/powerpc/kernel/kprobes-ftrace.c
+++ b/arch/powerpc/kernel/kprobes-ftrace.c
@@ -21,6 +21,9 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
struct pt_regs *regs;
int bit;
+ if (unlikely(kprobe_ftrace_disabled))
+ return;
+
bit = ftrace_test_recursion_trylock(nip, parent_nip);
if (bit < 0)
return;
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index bbca90a5e2ec..14c5ddec3056 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -19,8 +19,8 @@
#include <linux/extable.h>
#include <linux/kdebug.h>
#include <linux/slab.h>
-#include <linux/moduleloader.h>
#include <linux/set_memory.h>
+#include <linux/execmem.h>
#include <asm/code-patching.h>
#include <asm/cacheflush.h>
#include <asm/sstep.h>
@@ -126,26 +126,6 @@ kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offse
return (kprobe_opcode_t *)(addr + offset);
}
-void *alloc_insn_page(void)
-{
- void *page;
-
- page = module_alloc(PAGE_SIZE);
- if (!page)
- return NULL;
-
- if (strict_module_rwx_enabled()) {
- int err = set_memory_rox((unsigned long)page, 1);
-
- if (err)
- goto error;
- }
- return page;
-error:
- module_memfree(page);
- return NULL;
-}
-
int arch_prepare_kprobe(struct kprobe *p)
{
int ret = 0;
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 1a8cdafd68e8..91123e102db4 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -192,7 +192,7 @@ _GLOBAL(scom970_read)
xori r0,r0,MSR_EE
mtmsrd r0,1
- /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
+ /* rotate 24 bits SCOM address 8 bits left and mask out its low 8 bits
* (including parity). On current CPUs they must be 0'd,
* and finally or in RW bit
*/
@@ -226,7 +226,7 @@ _GLOBAL(scom970_write)
xori r0,r0,MSR_EE
mtmsrd r0,1
- /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
+ /* rotate 24 bits SCOM address 8 bits left and mask out its low 8 bits
* (including parity). On current CPUs they must be 0'd.
*/
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index f6d6ae0a1692..baeb24c102c8 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -7,7 +7,6 @@
#include <linux/elf.h>
#include <linux/moduleloader.h>
#include <linux/err.h>
-#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/bug.h>
#include <asm/module.h>
@@ -17,8 +16,6 @@
#include <asm/setup.h>
#include <asm/sections.h>
-static LIST_HEAD(module_bug_list);
-
static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
const char *name)
@@ -88,40 +85,3 @@ int module_finalize(const Elf_Ehdr *hdr,
return 0;
}
-
-static __always_inline void *
-__module_alloc(unsigned long size, unsigned long start, unsigned long end, bool nowarn)
-{
- pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC;
- gfp_t gfp = GFP_KERNEL | (nowarn ? __GFP_NOWARN : 0);
-
- /*
- * Don't do huge page allocations for modules yet until more testing
- * is done. STRICT_MODULE_RWX may require extra work to support this
- * too.
- */
- return __vmalloc_node_range(size, 1, start, end, gfp, prot,
- VM_FLUSH_RESET_PERMS,
- NUMA_NO_NODE, __builtin_return_address(0));
-}
-
-void *module_alloc(unsigned long size)
-{
-#ifdef MODULES_VADDR
- unsigned long limit = (unsigned long)_etext - SZ_32M;
- void *ptr = NULL;
-
- BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
-
- /* First try within 32M limit from _etext to avoid branch trampolines */
- if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit)
- ptr = __module_alloc(size, limit, MODULES_END, true);
-
- if (!ptr)
- ptr = __module_alloc(size, MODULES_VADDR, MODULES_END, false);
-
- return ptr;
-#else
- return __module_alloc(size, VMALLOC_START, VMALLOC_END, false);
-#endif
-}
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index d95a48eff412..eac84d687b53 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -517,7 +517,7 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
}
/*
- * This one is used by /dev/mem and fbdev who have no clue about the
+ * This one is used by /dev/mem and video who have no clue about the
* PCI device, it tries to find the PCI device first and calls the
* above routine
*/
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 9452a54d356c..a7671786764b 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1185,6 +1185,9 @@ static inline void save_sprs(struct thread_struct *t)
if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE))
t->hashkeyr = mfspr(SPRN_HASHKEYR);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ t->dexcr = mfspr(SPRN_DEXCR);
#endif
}
@@ -1267,6 +1270,10 @@ static inline void restore_sprs(struct thread_struct *old_thread,
if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE) &&
old_thread->hashkeyr != new_thread->hashkeyr)
mtspr(SPRN_HASHKEYR, new_thread->hashkeyr);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31) &&
+ old_thread->dexcr != new_thread->dexcr)
+ mtspr(SPRN_DEXCR, new_thread->dexcr);
#endif
}
@@ -1634,6 +1641,13 @@ void arch_setup_new_exec(void)
current->thread.regs->amr = default_amr;
current->thread.regs->iamr = default_iamr;
#endif
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ current->thread.dexcr = current->thread.dexcr_onexec;
+ mtspr(SPRN_DEXCR, current->thread.dexcr);
+ }
+#endif /* CONFIG_PPC_BOOK3S_64 */
}
#ifdef CONFIG_PPC64
@@ -1647,7 +1661,7 @@ void arch_setup_new_exec(void)
* cases will happen:
*
* 1. The correct thread is running, the wrong thread is not
- * In this situation, the correct thread is woken and proceeds to pass it's
+ * In this situation, the correct thread is woken and proceeds to pass its
* condition check.
*
* 2. Neither threads are running
@@ -1657,15 +1671,15 @@ void arch_setup_new_exec(void)
* for the wrong thread, or they will execute the condition check immediately.
*
* 3. The wrong thread is running, the correct thread is not
- * The wrong thread will be woken, but will fail it's condition check and
+ * The wrong thread will be woken, but will fail its condition check and
* re-execute wait. The correct thread, when scheduled, will execute either
- * it's condition check (which will pass), or wait, which returns immediately
- * when called the first time after the thread is scheduled, followed by it's
+ * its condition check (which will pass), or wait, which returns immediately
+ * when called the first time after the thread is scheduled, followed by its
* condition check (which will pass).
*
* 4. Both threads are running
- * Both threads will be woken. The wrong thread will fail it's condition check
- * and execute another wait, while the correct thread will pass it's condition
+ * Both threads will be woken. The wrong thread will fail its condition check
+ * and execute another wait, while the correct thread will pass its condition
* check.
*
* @t: the task to set the thread ID for
@@ -1878,6 +1892,9 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE))
p->thread.hashkeyr = current->thread.hashkeyr;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ p->thread.dexcr = mfspr(SPRN_DEXCR);
#endif
return 0;
}
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index cd8d8883de90..60819751e55e 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -779,7 +779,7 @@ static inline void save_fscr_to_task(void) {}
void __init early_init_devtree(void *params)
{
- phys_addr_t limit;
+ phys_addr_t int_vector_size;
DBG(" -> early_init_devtree(%px)\n", params);
@@ -813,6 +813,9 @@ void __init early_init_devtree(void *params)
*/
of_scan_flat_dt(early_init_dt_scan_chosen_ppc, boot_command_line);
+ /* Append additional parameters passed for fadump capture kernel */
+ fadump_append_bootargs();
+
/* Scan memory nodes and rebuild MEMBLOCKs */
early_init_dt_scan_root();
early_init_dt_scan_memory_ppc();
@@ -832,9 +835,16 @@ void __init early_init_devtree(void *params)
setup_initial_memory_limit(memstart_addr, first_memblock_size);
/* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
memblock_reserve(PHYSICAL_START, __pa(_end) - PHYSICAL_START);
+#ifdef CONFIG_PPC64
+ /* If relocatable, reserve at least 32k for interrupt vectors etc. */
+ int_vector_size = __end_interrupts - _stext;
+ int_vector_size = max_t(phys_addr_t, SZ_32K, int_vector_size);
+#else
/* If relocatable, reserve first 32k for interrupt vectors etc. */
+ int_vector_size = SZ_32K;
+#endif
if (PHYSICAL_START > MEMORY_START)
- memblock_reserve(MEMORY_START, 0x8000);
+ memblock_reserve(MEMORY_START, int_vector_size);
reserve_kdump_trampoline();
#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
/*
@@ -846,9 +856,12 @@ void __init early_init_devtree(void *params)
reserve_crashkernel();
early_reserve_mem();
- /* Ensure that total memory size is page-aligned. */
- limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE);
- memblock_enforce_memory_limit(limit);
+ if (memory_limit > memblock_phys_mem_size())
+ memory_limit = 0;
+
+ /* Align down to 16 MB which is large page size with hash page translation */
+ memory_limit = ALIGN_DOWN(memory_limit ?: memblock_phys_mem_size(), SZ_16M);
+ memblock_enforce_memory_limit(memory_limit);
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_4K_PAGES)
if (!early_radix_enabled())
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 0ef358285337..fbb68fc28ed3 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -817,8 +817,8 @@ static void __init early_cmdline_parse(void)
opt += 4;
prom_memory_limit = prom_memparse(opt, (const char **)&opt);
#ifdef CONFIG_PPC64
- /* Align to 16 MB == size of ppc64 large page */
- prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
+ /* Align down to 16 MB which is large page size with hash page translation */
+ prom_memory_limit = ALIGN_DOWN(prom_memory_limit, SZ_16M);
#endif
}
diff --git a/arch/powerpc/kernel/ptrace/ptrace-tm.c b/arch/powerpc/kernel/ptrace/ptrace-tm.c
index 210ea834e603..447bff87fd21 100644
--- a/arch/powerpc/kernel/ptrace/ptrace-tm.c
+++ b/arch/powerpc/kernel/ptrace/ptrace-tm.c
@@ -12,7 +12,7 @@ void flush_tmregs_to_thread(struct task_struct *tsk)
{
/*
* If task is not current, it will have been flushed already to
- * it's thread_struct during __switch_to().
+ * its thread_struct during __switch_to().
*
* A reclaim flushes ALL the state or if not in TM save TM SPRs
* in the appropriate thread structures from live.
diff --git a/arch/powerpc/kernel/ptrace/ptrace-view.c b/arch/powerpc/kernel/ptrace/ptrace-view.c
index 584cf5c3df50..c1819e0a6684 100644
--- a/arch/powerpc/kernel/ptrace/ptrace-view.c
+++ b/arch/powerpc/kernel/ptrace/ptrace-view.c
@@ -469,12 +469,7 @@ static int dexcr_get(struct task_struct *target, const struct user_regset *regse
if (!cpu_has_feature(CPU_FTR_ARCH_31))
return -ENODEV;
- /*
- * The DEXCR is currently static across all CPUs, so we don't
- * store the target's value anywhere, but the static value
- * will also be correct.
- */
- membuf_store(&to, (u64)lower_32_bits(DEXCR_INIT));
+ membuf_store(&to, (u64)lower_32_bits(target->thread.dexcr));
/*
* Technically the HDEXCR is per-cpu, but a hypervisor can't reasonably
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 01ed1263e1a9..4bd2f87616ba 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -405,7 +405,7 @@ static void __init cpu_init_thread_core_maps(int tpc)
cpumask_set_cpu(i, &threads_core_mask);
printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n",
- tpc, tpc > 1 ? "s" : "");
+ tpc, str_plural(tpc));
printk(KERN_DEBUG " (thread shift is %d)\n", threads_shift);
}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 2f19d5e94485..ae36a129789f 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -834,6 +834,7 @@ static __init int pcpu_cpu_to_node(int cpu)
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
+DEFINE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged);
void __init setup_per_cpu_areas(void)
{
@@ -876,6 +877,7 @@ void __init setup_per_cpu_areas(void)
if (rc < 0)
panic("cannot initialize percpu area (err=%d)", rc);
+ static_key_enable(&__percpu_first_chunk_is_paged.key);
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
for_each_possible_cpu(cpu) {
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 12e53b3d7923..46e6d2cd7a2d 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -1567,7 +1567,7 @@ static void add_cpu_to_masks(int cpu)
/*
* This CPU will not be in the online mask yet so we need to manually
- * add it to it's own thread sibling mask.
+ * add it to its own thread sibling mask.
*/
map_cpu_to_node(cpu, cpu_to_node(cpu));
cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 0f39a6b84132..b842c83ab497 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -139,7 +139,7 @@ static unsigned long dscr_default;
* @val: Returned cpu specific DSCR default value
*
* This function returns the per cpu DSCR default value
- * for any cpu which is contained in it's PACA structure.
+ * for any cpu which is contained in its PACA structure.
*/
static void read_dscr(void *val)
{
@@ -152,7 +152,7 @@ static void read_dscr(void *val)
* @val: New cpu specific DSCR default value to update
*
* This function updates the per cpu DSCR default value
- * for any cpu which is contained in it's PACA structure.
+ * for any cpu which is contained in its PACA structure.
*/
static void write_dscr(void *val)
{
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index df20cf201f74..c0fdc6d94fee 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -354,6 +354,28 @@ void vtime_flush(struct task_struct *tsk)
acct->hardirq_time = 0;
acct->softirq_time = 0;
}
+
+/*
+ * Called from the context switch with interrupts disabled, to charge all
+ * accumulated times to the current process, and to prepare accounting on
+ * the next process.
+ */
+void vtime_task_switch(struct task_struct *prev)
+{
+ if (is_idle_task(prev))
+ vtime_account_idle(prev);
+ else
+ vtime_account_kernel(prev);
+
+ vtime_flush(prev);
+
+ if (!IS_ENABLED(CONFIG_PPC64)) {
+ struct cpu_accounting_data *acct = get_accounting(current);
+ struct cpu_accounting_data *acct0 = get_accounting(prev);
+
+ acct->starttime = acct0->starttime;
+ }
+}
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
void __no_kcsan __delay(unsigned long loops)
diff --git a/arch/powerpc/kernel/vdso/Makefile b/arch/powerpc/kernel/vdso/Makefile
index 1b93655c2857..1425b6edc66b 100644
--- a/arch/powerpc/kernel/vdso/Makefile
+++ b/arch/powerpc/kernel/vdso/Makefile
@@ -47,12 +47,6 @@ obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
targets += $(obj-vdso64) vdso64.so.dbg vgettimeofday-64.o
obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
-GCOV_PROFILE := n
-KCOV_INSTRUMENT := n
-UBSAN_SANITIZE := n
-KASAN_SANITIZE := n
-KCSAN_SANITIZE := n
-
ccflags-y := -fno-common -fno-builtin
ldflags-y := -Wl,--hash-style=both -nostdlib -shared -z noexecstack $(CLANG_FLAGS)
ldflags-$(CONFIG_LD_IS_LLD) += $(call cc-option,--ld-path=$(LD),-fuse-ld=lld)
@@ -74,9 +68,9 @@ targets += vdso64.lds
CPPFLAGS_vdso64.lds += -P -C
# link rule for the .so file, .lds has to be first
-$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) $(obj)/vgettimeofday-32.o FORCE
+$(obj)/vdso32.so.dbg: $(obj)/vdso32.lds $(obj-vdso32) $(obj)/vgettimeofday-32.o FORCE
$(call if_changed,vdso32ld_and_check)
-$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) $(obj)/vgettimeofday-64.o FORCE
+$(obj)/vdso64.so.dbg: $(obj)/vdso64.lds $(obj-vdso64) $(obj)/vgettimeofday-64.o FORCE
$(call if_changed,vdso64ld_and_check)
# assembly rules for the .S files
@@ -90,10 +84,10 @@ $(obj)/vgettimeofday-64.o: %-64.o: %.c FORCE
$(call if_changed_dep,cc_o_c)
# Generate VDSO offsets using helper script
-gen-vdso32sym := $(srctree)/$(src)/gen_vdso32_offsets.sh
+gen-vdso32sym := $(src)/gen_vdso32_offsets.sh
quiet_cmd_vdso32sym = VDSO32SYM $@
cmd_vdso32sym = $(NM) $< | $(gen-vdso32sym) | LC_ALL=C sort > $@
-gen-vdso64sym := $(srctree)/$(src)/gen_vdso64_offsets.sh
+gen-vdso64sym := $(src)/gen_vdso64_offsets.sh
quiet_cmd_vdso64sym = VDSO64SYM $@
cmd_vdso64sym = $(NM) $< | $(gen-vdso64sym) | LC_ALL=C sort > $@
@@ -114,5 +108,3 @@ quiet_cmd_vdso64ld_and_check = VDSO64L $@
cmd_vdso64ld_and_check = $(VDSOCC) $(ldflags-y) $(LD64FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^); $(cmd_vdso_check)
quiet_cmd_vdso64as = VDSO64A $@
cmd_vdso64as = $(VDSOCC) $(a_flags) $(AS64FLAGS) -c -o $@ $<
-
-OBJECT_FILES_NON_STANDARD := y
diff --git a/arch/powerpc/kexec/Makefile b/arch/powerpc/kexec/Makefile
index 8e469c4da3f8..470eb0453e17 100644
--- a/arch/powerpc/kexec/Makefile
+++ b/arch/powerpc/kexec/Makefile
@@ -3,11 +3,11 @@
# Makefile for the linux kernel.
#
-obj-y += core.o core_$(BITS).o
+obj-y += core.o core_$(BITS).o ranges.o
obj-$(CONFIG_PPC32) += relocate_32.o
-obj-$(CONFIG_KEXEC_FILE) += file_load.o ranges.o file_load_$(BITS).o elf_$(BITS).o
+obj-$(CONFIG_KEXEC_FILE) += file_load.o file_load_$(BITS).o elf_$(BITS).o
obj-$(CONFIG_VMCORE_INFO) += vmcore_info.o
obj-$(CONFIG_CRASH_DUMP) += crash.o
diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c
index 762e4d09aacf..85050be08a23 100644
--- a/arch/powerpc/kexec/core_64.c
+++ b/arch/powerpc/kexec/core_64.c
@@ -17,6 +17,7 @@
#include <linux/cpu.h>
#include <linux/hardirq.h>
#include <linux/of.h>
+#include <linux/libfdt.h>
#include <asm/page.h>
#include <asm/current.h>
@@ -30,6 +31,7 @@
#include <asm/hw_breakpoint.h>
#include <asm/svm.h>
#include <asm/ultravisor.h>
+#include <asm/crashdump-ppc64.h>
int machine_kexec_prepare(struct kimage *image)
{
@@ -419,3 +421,92 @@ static int __init export_htab_values(void)
}
late_initcall(export_htab_values);
#endif /* CONFIG_PPC_64S_HASH_MMU */
+
+#if defined(CONFIG_KEXEC_FILE) || defined(CONFIG_CRASH_DUMP)
+/**
+ * add_node_props - Reads node properties from device node structure and add
+ * them to fdt.
+ * @fdt: Flattened device tree of the kernel
+ * @node_offset: offset of the node to add a property at
+ * @dn: device node pointer
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int add_node_props(void *fdt, int node_offset, const struct device_node *dn)
+{
+ int ret = 0;
+ struct property *pp;
+
+ if (!dn)
+ return -EINVAL;
+
+ for_each_property_of_node(dn, pp) {
+ ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length);
+ if (ret < 0) {
+ pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret));
+ return ret;
+ }
+ }
+ return ret;
+}
+
+/**
+ * update_cpus_node - Update cpus node of flattened device tree using of_root
+ * device node.
+ * @fdt: Flattened device tree of the kernel.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int update_cpus_node(void *fdt)
+{
+ struct device_node *cpus_node, *dn;
+ int cpus_offset, cpus_subnode_offset, ret = 0;
+
+ cpus_offset = fdt_path_offset(fdt, "/cpus");
+ if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) {
+ pr_err("Malformed device tree: error reading /cpus node: %s\n",
+ fdt_strerror(cpus_offset));
+ return cpus_offset;
+ }
+
+ if (cpus_offset > 0) {
+ ret = fdt_del_node(fdt, cpus_offset);
+ if (ret < 0) {
+ pr_err("Error deleting /cpus node: %s\n", fdt_strerror(ret));
+ return -EINVAL;
+ }
+ }
+
+ /* Add cpus node to fdt */
+ cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), "cpus");
+ if (cpus_offset < 0) {
+ pr_err("Error creating /cpus node: %s\n", fdt_strerror(cpus_offset));
+ return -EINVAL;
+ }
+
+ /* Add cpus node properties */
+ cpus_node = of_find_node_by_path("/cpus");
+ ret = add_node_props(fdt, cpus_offset, cpus_node);
+ of_node_put(cpus_node);
+ if (ret < 0)
+ return ret;
+
+ /* Loop through all subnodes of cpus and add them to fdt */
+ for_each_node_by_type(dn, "cpu") {
+ cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name);
+ if (cpus_subnode_offset < 0) {
+ pr_err("Unable to add %s subnode: %s\n", dn->full_name,
+ fdt_strerror(cpus_subnode_offset));
+ ret = cpus_subnode_offset;
+ goto out;
+ }
+
+ ret = add_node_props(fdt, cpus_subnode_offset, dn);
+ if (ret < 0)
+ goto out;
+ }
+out:
+ of_node_put(dn);
+ return ret;
+}
+#endif /* CONFIG_KEXEC_FILE || CONFIG_CRASH_DUMP */
diff --git a/arch/powerpc/kexec/crash.c b/arch/powerpc/kexec/crash.c
index ef5c2d25ec39..9ac3266e4965 100644
--- a/arch/powerpc/kexec/crash.c
+++ b/arch/powerpc/kexec/crash.c
@@ -16,6 +16,8 @@
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/types.h>
+#include <linux/libfdt.h>
+#include <linux/memory.h>
#include <asm/processor.h>
#include <asm/machdep.h>
@@ -24,6 +26,7 @@
#include <asm/setjmp.h>
#include <asm/debug.h>
#include <asm/interrupt.h>
+#include <asm/kexec_ranges.h>
/*
* The primary CPU waits a while for all secondary CPUs to enter. This is to
@@ -392,3 +395,195 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 0);
}
+
+#ifdef CONFIG_CRASH_HOTPLUG
+#undef pr_fmt
+#define pr_fmt(fmt) "crash hp: " fmt
+
+/*
+ * Advertise preferred elfcorehdr size to userspace via
+ * /sys/kernel/crash_elfcorehdr_size sysfs interface.
+ */
+unsigned int arch_crash_get_elfcorehdr_size(void)
+{
+ unsigned long phdr_cnt;
+
+ /* A program header for possible CPUs + vmcoreinfo */
+ phdr_cnt = num_possible_cpus() + 1;
+ if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG))
+ phdr_cnt += CONFIG_CRASH_MAX_MEMORY_RANGES;
+
+ return sizeof(struct elfhdr) + (phdr_cnt * sizeof(Elf64_Phdr));
+}
+
+/**
+ * update_crash_elfcorehdr() - Recreate the elfcorehdr and replace it with old
+ * elfcorehdr in the kexec segment array.
+ * @image: the active struct kimage
+ * @mn: struct memory_notify data handler
+ */
+static void update_crash_elfcorehdr(struct kimage *image, struct memory_notify *mn)
+{
+ int ret;
+ struct crash_mem *cmem = NULL;
+ struct kexec_segment *ksegment;
+ void *ptr, *mem, *elfbuf = NULL;
+ unsigned long elfsz, memsz, base_addr, size;
+
+ ksegment = &image->segment[image->elfcorehdr_index];
+ mem = (void *) ksegment->mem;
+ memsz = ksegment->memsz;
+
+ ret = get_crash_memory_ranges(&cmem);
+ if (ret) {
+ pr_err("Failed to get crash mem range\n");
+ return;
+ }
+
+ /*
+ * The hot unplugged memory is part of crash memory ranges,
+ * remove it here.
+ */
+ if (image->hp_action == KEXEC_CRASH_HP_REMOVE_MEMORY) {
+ base_addr = PFN_PHYS(mn->start_pfn);
+ size = mn->nr_pages * PAGE_SIZE;
+ ret = remove_mem_range(&cmem, base_addr, size);
+ if (ret) {
+ pr_err("Failed to remove hot-unplugged memory from crash memory ranges\n");
+ goto out;
+ }
+ }
+
+ ret = crash_prepare_elf64_headers(cmem, false, &elfbuf, &elfsz);
+ if (ret) {
+ pr_err("Failed to prepare elf header\n");
+ goto out;
+ }
+
+ /*
+ * It is unlikely that kernel hit this because elfcorehdr kexec
+ * segment (memsz) is built with addition space to accommodate growing
+ * number of crash memory ranges while loading the kdump kernel. It is
+ * Just to avoid any unforeseen case.
+ */
+ if (elfsz > memsz) {
+ pr_err("Updated crash elfcorehdr elfsz %lu > memsz %lu", elfsz, memsz);
+ goto out;
+ }
+
+ ptr = __va(mem);
+ if (ptr) {
+ /* Temporarily invalidate the crash image while it is replaced */
+ xchg(&kexec_crash_image, NULL);
+
+ /* Replace the old elfcorehdr with newly prepared elfcorehdr */
+ memcpy((void *)ptr, elfbuf, elfsz);
+
+ /* The crash image is now valid once again */
+ xchg(&kexec_crash_image, image);
+ }
+out:
+ kvfree(cmem);
+ kvfree(elfbuf);
+}
+
+/**
+ * get_fdt_index - Loop through the kexec segment array and find
+ * the index of the FDT segment.
+ * @image: a pointer to kexec_crash_image
+ *
+ * Returns the index of FDT segment in the kexec segment array
+ * if found; otherwise -1.
+ */
+static int get_fdt_index(struct kimage *image)
+{
+ void *ptr;
+ unsigned long mem;
+ int i, fdt_index = -1;
+
+ /* Find the FDT segment index in kexec segment array. */
+ for (i = 0; i < image->nr_segments; i++) {
+ mem = image->segment[i].mem;
+ ptr = __va(mem);
+
+ if (ptr && fdt_magic(ptr) == FDT_MAGIC) {
+ fdt_index = i;
+ break;
+ }
+ }
+
+ return fdt_index;
+}
+
+/**
+ * update_crash_fdt - updates the cpus node of the crash FDT.
+ *
+ * @image: a pointer to kexec_crash_image
+ */
+static void update_crash_fdt(struct kimage *image)
+{
+ void *fdt;
+ int fdt_index;
+
+ fdt_index = get_fdt_index(image);
+ if (fdt_index < 0) {
+ pr_err("Unable to locate FDT segment.\n");
+ return;
+ }
+
+ fdt = __va((void *)image->segment[fdt_index].mem);
+
+ /* Temporarily invalidate the crash image while it is replaced */
+ xchg(&kexec_crash_image, NULL);
+
+ /* update FDT to reflect changes in CPU resources */
+ if (update_cpus_node(fdt))
+ pr_err("Failed to update crash FDT");
+
+ /* The crash image is now valid once again */
+ xchg(&kexec_crash_image, image);
+}
+
+int arch_crash_hotplug_support(struct kimage *image, unsigned long kexec_flags)
+{
+#ifdef CONFIG_KEXEC_FILE
+ if (image->file_mode)
+ return 1;
+#endif
+ return kexec_flags & KEXEC_CRASH_HOTPLUG_SUPPORT;
+}
+
+/**
+ * arch_crash_handle_hotplug_event - Handle crash CPU/Memory hotplug events to update the
+ * necessary kexec segments based on the hotplug event.
+ * @image: a pointer to kexec_crash_image
+ * @arg: struct memory_notify handler for memory hotplug case and NULL for CPU hotplug case.
+ *
+ * Update the kdump image based on the type of hotplug event, represented by image->hp_action.
+ * CPU add: Update the FDT segment to include the newly added CPU.
+ * CPU remove: No action is needed, with the assumption that it's okay to have offline CPUs
+ * part of the FDT.
+ * Memory add/remove: No action is taken as this is not yet supported.
+ */
+void arch_crash_handle_hotplug_event(struct kimage *image, void *arg)
+{
+ struct memory_notify *mn;
+
+ switch (image->hp_action) {
+ case KEXEC_CRASH_HP_REMOVE_CPU:
+ return;
+
+ case KEXEC_CRASH_HP_ADD_CPU:
+ update_crash_fdt(image);
+ break;
+
+ case KEXEC_CRASH_HP_REMOVE_MEMORY:
+ case KEXEC_CRASH_HP_ADD_MEMORY:
+ mn = (struct memory_notify *)arg;
+ update_crash_elfcorehdr(image, mn);
+ return;
+ default:
+ pr_warn_once("Unknown hotplug action\n");
+ }
+}
+#endif /* CONFIG_CRASH_HOTPLUG */
diff --git a/arch/powerpc/kexec/elf_64.c b/arch/powerpc/kexec/elf_64.c
index 6d8951e8e966..214c071c58ed 100644
--- a/arch/powerpc/kexec/elf_64.c
+++ b/arch/powerpc/kexec/elf_64.c
@@ -116,7 +116,8 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
if (ret)
goto out_free_fdt;
- fdt_pack(fdt);
+ if (!IS_ENABLED(CONFIG_CRASH_HOTPLUG) || image->type != KEXEC_TYPE_CRASH)
+ fdt_pack(fdt);
kbuf.buffer = fdt;
kbuf.bufsz = kbuf.memsz = fdt_totalsize(fdt);
diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
index 1bc65de6174f..925a69ad2468 100644
--- a/arch/powerpc/kexec/file_load_64.c
+++ b/arch/powerpc/kexec/file_load_64.c
@@ -30,6 +30,7 @@
#include <asm/iommu.h>
#include <asm/prom.h>
#include <asm/plpks.h>
+#include <asm/cputhreads.h>
struct umem_info {
__be64 *buf; /* data buffer for usable-memory property */
@@ -48,83 +49,6 @@ const struct kexec_file_ops * const kexec_file_loaders[] = {
};
/**
- * get_exclude_memory_ranges - Get exclude memory ranges. This list includes
- * regions like opal/rtas, tce-table, initrd,
- * kernel, htab which should be avoided while
- * setting up kexec load segments.
- * @mem_ranges: Range list to add the memory ranges to.
- *
- * Returns 0 on success, negative errno on error.
- */
-static int get_exclude_memory_ranges(struct crash_mem **mem_ranges)
-{
- int ret;
-
- ret = add_tce_mem_ranges(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_initrd_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_htab_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_kernel_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_rtas_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_opal_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_reserved_mem_ranges(mem_ranges);
- if (ret)
- goto out;
-
- /* exclude memory ranges should be sorted for easy lookup */
- sort_memory_ranges(*mem_ranges, true);
-out:
- if (ret)
- pr_err("Failed to setup exclude memory ranges\n");
- return ret;
-}
-
-/**
- * get_reserved_memory_ranges - Get reserve memory ranges. This list includes
- * memory regions that should be added to the
- * memory reserve map to ensure the region is
- * protected from any mischief.
- * @mem_ranges: Range list to add the memory ranges to.
- *
- * Returns 0 on success, negative errno on error.
- */
-static int get_reserved_memory_ranges(struct crash_mem **mem_ranges)
-{
- int ret;
-
- ret = add_rtas_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_tce_mem_ranges(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_reserved_mem_ranges(mem_ranges);
-out:
- if (ret)
- pr_err("Failed to setup reserved memory ranges\n");
- return ret;
-}
-
-/**
* __locate_mem_hole_top_down - Looks top down for a large enough memory hole
* in the memory regions between buf_min & buf_max
* for the buffer. If found, sets kbuf->mem.
@@ -323,119 +247,6 @@ static int locate_mem_hole_bottom_up_ppc64(struct kexec_buf *kbuf,
#ifdef CONFIG_CRASH_DUMP
/**
- * get_usable_memory_ranges - Get usable memory ranges. This list includes
- * regions like crashkernel, opal/rtas & tce-table,
- * that kdump kernel could use.
- * @mem_ranges: Range list to add the memory ranges to.
- *
- * Returns 0 on success, negative errno on error.
- */
-static int get_usable_memory_ranges(struct crash_mem **mem_ranges)
-{
- int ret;
-
- /*
- * Early boot failure observed on guests when low memory (first memory
- * block?) is not added to usable memory. So, add [0, crashk_res.end]
- * instead of [crashk_res.start, crashk_res.end] to workaround it.
- * Also, crashed kernel's memory must be added to reserve map to
- * avoid kdump kernel from using it.
- */
- ret = add_mem_range(mem_ranges, 0, crashk_res.end + 1);
- if (ret)
- goto out;
-
- ret = add_rtas_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_opal_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_tce_mem_ranges(mem_ranges);
-out:
- if (ret)
- pr_err("Failed to setup usable memory ranges\n");
- return ret;
-}
-
-/**
- * get_crash_memory_ranges - Get crash memory ranges. This list includes
- * first/crashing kernel's memory regions that
- * would be exported via an elfcore.
- * @mem_ranges: Range list to add the memory ranges to.
- *
- * Returns 0 on success, negative errno on error.
- */
-static int get_crash_memory_ranges(struct crash_mem **mem_ranges)
-{
- phys_addr_t base, end;
- struct crash_mem *tmem;
- u64 i;
- int ret;
-
- for_each_mem_range(i, &base, &end) {
- u64 size = end - base;
-
- /* Skip backup memory region, which needs a separate entry */
- if (base == BACKUP_SRC_START) {
- if (size > BACKUP_SRC_SIZE) {
- base = BACKUP_SRC_END + 1;
- size -= BACKUP_SRC_SIZE;
- } else
- continue;
- }
-
- ret = add_mem_range(mem_ranges, base, size);
- if (ret)
- goto out;
-
- /* Try merging adjacent ranges before reallocation attempt */
- if ((*mem_ranges)->nr_ranges == (*mem_ranges)->max_nr_ranges)
- sort_memory_ranges(*mem_ranges, true);
- }
-
- /* Reallocate memory ranges if there is no space to split ranges */
- tmem = *mem_ranges;
- if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) {
- tmem = realloc_mem_ranges(mem_ranges);
- if (!tmem)
- goto out;
- }
-
- /* Exclude crashkernel region */
- ret = crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end);
- if (ret)
- goto out;
-
- /*
- * FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL
- * regions are exported to save their context at the time of
- * crash, they should actually be backed up just like the
- * first 64K bytes of memory.
- */
- ret = add_rtas_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_opal_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- /* create a separate program header for the backup region */
- ret = add_mem_range(mem_ranges, BACKUP_SRC_START, BACKUP_SRC_SIZE);
- if (ret)
- goto out;
-
- sort_memory_ranges(*mem_ranges, false);
-out:
- if (ret)
- pr_err("Failed to setup crash memory ranges\n");
- return ret;
-}
-
-/**
* check_realloc_usable_mem - Reallocate buffer if it can't accommodate entries
* @um_info: Usable memory buffer and ranges info.
* @cnt: No. of entries to accommodate.
@@ -784,6 +595,23 @@ static void update_backup_region_phdr(struct kimage *image, Elf64_Ehdr *ehdr)
}
}
+static unsigned int kdump_extra_elfcorehdr_size(struct crash_mem *cmem)
+{
+#if defined(CONFIG_CRASH_HOTPLUG) && defined(CONFIG_MEMORY_HOTPLUG)
+ unsigned int extra_sz = 0;
+
+ if (CONFIG_CRASH_MAX_MEMORY_RANGES > (unsigned int)PN_XNUM)
+ pr_warn("Number of Phdrs %u exceeds max\n", CONFIG_CRASH_MAX_MEMORY_RANGES);
+ else if (cmem->nr_ranges >= CONFIG_CRASH_MAX_MEMORY_RANGES)
+ pr_warn("Configured crash mem ranges may not be enough\n");
+ else
+ extra_sz = (CONFIG_CRASH_MAX_MEMORY_RANGES - cmem->nr_ranges) * sizeof(Elf64_Phdr);
+
+ return extra_sz;
+#endif
+ return 0;
+}
+
/**
* load_elfcorehdr_segment - Setup crash memory ranges and initialize elfcorehdr
* segment needed to load kdump kernel.
@@ -815,7 +643,8 @@ static int load_elfcorehdr_segment(struct kimage *image, struct kexec_buf *kbuf)
kbuf->buffer = headers;
kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
- kbuf->bufsz = kbuf->memsz = headers_sz;
+ kbuf->bufsz = headers_sz;
+ kbuf->memsz = headers_sz + kdump_extra_elfcorehdr_size(cmem);
kbuf->top_down = false;
ret = kexec_add_buffer(kbuf);
@@ -979,6 +808,9 @@ static unsigned int kdump_extra_fdt_size_ppc64(struct kimage *image)
unsigned int cpu_nodes, extra_size = 0;
struct device_node *dn;
u64 usm_entries;
+#ifdef CONFIG_CRASH_HOTPLUG
+ unsigned int possible_cpu_nodes;
+#endif
if (!IS_ENABLED(CONFIG_CRASH_DUMP) || image->type != KEXEC_TYPE_CRASH)
return 0;
@@ -1006,6 +838,19 @@ static unsigned int kdump_extra_fdt_size_ppc64(struct kimage *image)
if (cpu_nodes > boot_cpu_node_count)
extra_size += (cpu_nodes - boot_cpu_node_count) * cpu_node_size();
+#ifdef CONFIG_CRASH_HOTPLUG
+ /*
+ * Make sure enough space is reserved to accommodate possible CPU nodes
+ * in the crash FDT. This allows packing possible CPU nodes which are
+ * not yet present in the system without regenerating the entire FDT.
+ */
+ if (image->type == KEXEC_TYPE_CRASH) {
+ possible_cpu_nodes = num_possible_cpus() / threads_per_core;
+ if (possible_cpu_nodes > cpu_nodes)
+ extra_size += (possible_cpu_nodes - cpu_nodes) * cpu_node_size();
+ }
+#endif
+
return extra_size;
}
@@ -1028,93 +873,6 @@ unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
return extra_size + kdump_extra_fdt_size_ppc64(image);
}
-/**
- * add_node_props - Reads node properties from device node structure and add
- * them to fdt.
- * @fdt: Flattened device tree of the kernel
- * @node_offset: offset of the node to add a property at
- * @dn: device node pointer
- *
- * Returns 0 on success, negative errno on error.
- */
-static int add_node_props(void *fdt, int node_offset, const struct device_node *dn)
-{
- int ret = 0;
- struct property *pp;
-
- if (!dn)
- return -EINVAL;
-
- for_each_property_of_node(dn, pp) {
- ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length);
- if (ret < 0) {
- pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret));
- return ret;
- }
- }
- return ret;
-}
-
-/**
- * update_cpus_node - Update cpus node of flattened device tree using of_root
- * device node.
- * @fdt: Flattened device tree of the kernel.
- *
- * Returns 0 on success, negative errno on error.
- */
-static int update_cpus_node(void *fdt)
-{
- struct device_node *cpus_node, *dn;
- int cpus_offset, cpus_subnode_offset, ret = 0;
-
- cpus_offset = fdt_path_offset(fdt, "/cpus");
- if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) {
- pr_err("Malformed device tree: error reading /cpus node: %s\n",
- fdt_strerror(cpus_offset));
- return cpus_offset;
- }
-
- if (cpus_offset > 0) {
- ret = fdt_del_node(fdt, cpus_offset);
- if (ret < 0) {
- pr_err("Error deleting /cpus node: %s\n", fdt_strerror(ret));
- return -EINVAL;
- }
- }
-
- /* Add cpus node to fdt */
- cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), "cpus");
- if (cpus_offset < 0) {
- pr_err("Error creating /cpus node: %s\n", fdt_strerror(cpus_offset));
- return -EINVAL;
- }
-
- /* Add cpus node properties */
- cpus_node = of_find_node_by_path("/cpus");
- ret = add_node_props(fdt, cpus_offset, cpus_node);
- of_node_put(cpus_node);
- if (ret < 0)
- return ret;
-
- /* Loop through all subnodes of cpus and add them to fdt */
- for_each_node_by_type(dn, "cpu") {
- cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name);
- if (cpus_subnode_offset < 0) {
- pr_err("Unable to add %s subnode: %s\n", dn->full_name,
- fdt_strerror(cpus_subnode_offset));
- ret = cpus_subnode_offset;
- goto out;
- }
-
- ret = add_node_props(fdt, cpus_subnode_offset, dn);
- if (ret < 0)
- goto out;
- }
-out:
- of_node_put(dn);
- return ret;
-}
-
static int copy_property(void *fdt, int node_offset, const struct device_node *dn,
const char *propname)
{
diff --git a/arch/powerpc/kexec/ranges.c b/arch/powerpc/kexec/ranges.c
index 33b780049aaf..3702b0bdab14 100644
--- a/arch/powerpc/kexec/ranges.c
+++ b/arch/powerpc/kexec/ranges.c
@@ -20,9 +20,13 @@
#include <linux/kexec.h>
#include <linux/of.h>
#include <linux/slab.h>
+#include <linux/memblock.h>
+#include <linux/crash_core.h>
#include <asm/sections.h>
#include <asm/kexec_ranges.h>
+#include <asm/crashdump-ppc64.h>
+#if defined(CONFIG_KEXEC_FILE) || defined(CONFIG_CRASH_DUMP)
/**
* get_max_nr_ranges - Get the max no. of ranges crash_mem structure
* could hold, given the size allocated for it.
@@ -234,13 +238,16 @@ int add_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size)
return __add_mem_range(mem_ranges, base, size);
}
+#endif /* CONFIG_KEXEC_FILE || CONFIG_CRASH_DUMP */
+
+#ifdef CONFIG_KEXEC_FILE
/**
* add_tce_mem_ranges - Adds tce-table range to the given memory ranges list.
* @mem_ranges: Range list to add the memory range(s) to.
*
* Returns 0 on success, negative errno on error.
*/
-int add_tce_mem_ranges(struct crash_mem **mem_ranges)
+static int add_tce_mem_ranges(struct crash_mem **mem_ranges)
{
struct device_node *dn = NULL;
int ret = 0;
@@ -279,7 +286,7 @@ int add_tce_mem_ranges(struct crash_mem **mem_ranges)
*
* Returns 0 on success, negative errno on error.
*/
-int add_initrd_mem_range(struct crash_mem **mem_ranges)
+static int add_initrd_mem_range(struct crash_mem **mem_ranges)
{
u64 base, end;
int ret;
@@ -296,7 +303,6 @@ int add_initrd_mem_range(struct crash_mem **mem_ranges)
return ret;
}
-#ifdef CONFIG_PPC_64S_HASH_MMU
/**
* add_htab_mem_range - Adds htab range to the given memory ranges list,
* if it exists
@@ -304,14 +310,18 @@ int add_initrd_mem_range(struct crash_mem **mem_ranges)
*
* Returns 0 on success, negative errno on error.
*/
-int add_htab_mem_range(struct crash_mem **mem_ranges)
+static int add_htab_mem_range(struct crash_mem **mem_ranges)
{
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
if (!htab_address)
return 0;
return add_mem_range(mem_ranges, __pa(htab_address), htab_size_bytes);
-}
+#else
+ return 0;
#endif
+}
/**
* add_kernel_mem_range - Adds kernel text region to the given
@@ -320,18 +330,20 @@ int add_htab_mem_range(struct crash_mem **mem_ranges)
*
* Returns 0 on success, negative errno on error.
*/
-int add_kernel_mem_range(struct crash_mem **mem_ranges)
+static int add_kernel_mem_range(struct crash_mem **mem_ranges)
{
return add_mem_range(mem_ranges, 0, __pa(_end));
}
+#endif /* CONFIG_KEXEC_FILE */
+#if defined(CONFIG_KEXEC_FILE) || defined(CONFIG_CRASH_DUMP)
/**
* add_rtas_mem_range - Adds RTAS region to the given memory ranges list.
* @mem_ranges: Range list to add the memory range to.
*
* Returns 0 on success, negative errno on error.
*/
-int add_rtas_mem_range(struct crash_mem **mem_ranges)
+static int add_rtas_mem_range(struct crash_mem **mem_ranges)
{
struct device_node *dn;
u32 base, size;
@@ -356,7 +368,7 @@ int add_rtas_mem_range(struct crash_mem **mem_ranges)
*
* Returns 0 on success, negative errno on error.
*/
-int add_opal_mem_range(struct crash_mem **mem_ranges)
+static int add_opal_mem_range(struct crash_mem **mem_ranges)
{
struct device_node *dn;
u64 base, size;
@@ -374,7 +386,9 @@ int add_opal_mem_range(struct crash_mem **mem_ranges)
of_node_put(dn);
return ret;
}
+#endif /* CONFIG_KEXEC_FILE || CONFIG_CRASH_DUMP */
+#ifdef CONFIG_KEXEC_FILE
/**
* add_reserved_mem_ranges - Adds "/reserved-ranges" regions exported by f/w
* to the given memory ranges list.
@@ -382,7 +396,7 @@ int add_opal_mem_range(struct crash_mem **mem_ranges)
*
* Returns 0 on success, negative errno on error.
*/
-int add_reserved_mem_ranges(struct crash_mem **mem_ranges)
+static int add_reserved_mem_ranges(struct crash_mem **mem_ranges)
{
int n_mem_addr_cells, n_mem_size_cells, i, len, cells, ret = 0;
struct device_node *root = of_find_node_by_path("/");
@@ -412,3 +426,283 @@ int add_reserved_mem_ranges(struct crash_mem **mem_ranges)
return ret;
}
+
+/**
+ * get_reserved_memory_ranges - Get reserve memory ranges. This list includes
+ * memory regions that should be added to the
+ * memory reserve map to ensure the region is
+ * protected from any mischief.
+ * @mem_ranges: Range list to add the memory ranges to.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int get_reserved_memory_ranges(struct crash_mem **mem_ranges)
+{
+ int ret;
+
+ ret = add_rtas_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_tce_mem_ranges(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_reserved_mem_ranges(mem_ranges);
+out:
+ if (ret)
+ pr_err("Failed to setup reserved memory ranges\n");
+ return ret;
+}
+
+/**
+ * get_exclude_memory_ranges - Get exclude memory ranges. This list includes
+ * regions like opal/rtas, tce-table, initrd,
+ * kernel, htab which should be avoided while
+ * setting up kexec load segments.
+ * @mem_ranges: Range list to add the memory ranges to.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int get_exclude_memory_ranges(struct crash_mem **mem_ranges)
+{
+ int ret;
+
+ ret = add_tce_mem_ranges(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_initrd_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_htab_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_kernel_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_rtas_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_opal_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_reserved_mem_ranges(mem_ranges);
+ if (ret)
+ goto out;
+
+ /* exclude memory ranges should be sorted for easy lookup */
+ sort_memory_ranges(*mem_ranges, true);
+out:
+ if (ret)
+ pr_err("Failed to setup exclude memory ranges\n");
+ return ret;
+}
+
+#ifdef CONFIG_CRASH_DUMP
+/**
+ * get_usable_memory_ranges - Get usable memory ranges. This list includes
+ * regions like crashkernel, opal/rtas & tce-table,
+ * that kdump kernel could use.
+ * @mem_ranges: Range list to add the memory ranges to.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int get_usable_memory_ranges(struct crash_mem **mem_ranges)
+{
+ int ret;
+
+ /*
+ * Early boot failure observed on guests when low memory (first memory
+ * block?) is not added to usable memory. So, add [0, crashk_res.end]
+ * instead of [crashk_res.start, crashk_res.end] to workaround it.
+ * Also, crashed kernel's memory must be added to reserve map to
+ * avoid kdump kernel from using it.
+ */
+ ret = add_mem_range(mem_ranges, 0, crashk_res.end + 1);
+ if (ret)
+ goto out;
+
+ ret = add_rtas_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_opal_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_tce_mem_ranges(mem_ranges);
+out:
+ if (ret)
+ pr_err("Failed to setup usable memory ranges\n");
+ return ret;
+}
+#endif /* CONFIG_CRASH_DUMP */
+#endif /* CONFIG_KEXEC_FILE */
+
+#ifdef CONFIG_CRASH_DUMP
+/**
+ * get_crash_memory_ranges - Get crash memory ranges. This list includes
+ * first/crashing kernel's memory regions that
+ * would be exported via an elfcore.
+ * @mem_ranges: Range list to add the memory ranges to.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int get_crash_memory_ranges(struct crash_mem **mem_ranges)
+{
+ phys_addr_t base, end;
+ struct crash_mem *tmem;
+ u64 i;
+ int ret;
+
+ for_each_mem_range(i, &base, &end) {
+ u64 size = end - base;
+
+ /* Skip backup memory region, which needs a separate entry */
+ if (base == BACKUP_SRC_START) {
+ if (size > BACKUP_SRC_SIZE) {
+ base = BACKUP_SRC_END + 1;
+ size -= BACKUP_SRC_SIZE;
+ } else
+ continue;
+ }
+
+ ret = add_mem_range(mem_ranges, base, size);
+ if (ret)
+ goto out;
+
+ /* Try merging adjacent ranges before reallocation attempt */
+ if ((*mem_ranges)->nr_ranges == (*mem_ranges)->max_nr_ranges)
+ sort_memory_ranges(*mem_ranges, true);
+ }
+
+ /* Reallocate memory ranges if there is no space to split ranges */
+ tmem = *mem_ranges;
+ if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) {
+ tmem = realloc_mem_ranges(mem_ranges);
+ if (!tmem)
+ goto out;
+ }
+
+ /* Exclude crashkernel region */
+ ret = crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end);
+ if (ret)
+ goto out;
+
+ /*
+ * FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL
+ * regions are exported to save their context at the time of
+ * crash, they should actually be backed up just like the
+ * first 64K bytes of memory.
+ */
+ ret = add_rtas_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_opal_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ /* create a separate program header for the backup region */
+ ret = add_mem_range(mem_ranges, BACKUP_SRC_START, BACKUP_SRC_SIZE);
+ if (ret)
+ goto out;
+
+ sort_memory_ranges(*mem_ranges, false);
+out:
+ if (ret)
+ pr_err("Failed to setup crash memory ranges\n");
+ return ret;
+}
+
+/**
+ * remove_mem_range - Removes the given memory range from the range list.
+ * @mem_ranges: Range list to remove the memory range to.
+ * @base: Base address of the range to remove.
+ * @size: Size of the memory range to remove.
+ *
+ * (Re)allocates memory, if needed.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int remove_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size)
+{
+ u64 end;
+ int ret = 0;
+ unsigned int i;
+ u64 mstart, mend;
+ struct crash_mem *mem_rngs = *mem_ranges;
+
+ if (!size)
+ return 0;
+
+ /*
+ * Memory range are stored as start and end address, use
+ * the same format to do remove operation.
+ */
+ end = base + size - 1;
+
+ for (i = 0; i < mem_rngs->nr_ranges; i++) {
+ mstart = mem_rngs->ranges[i].start;
+ mend = mem_rngs->ranges[i].end;
+
+ /*
+ * Memory range to remove is not part of this range entry
+ * in the memory range list
+ */
+ if (!(base >= mstart && end <= mend))
+ continue;
+
+ /*
+ * Memory range to remove is equivalent to this entry in the
+ * memory range list. Remove the range entry from the list.
+ */
+ if (base == mstart && end == mend) {
+ for (; i < mem_rngs->nr_ranges - 1; i++) {
+ mem_rngs->ranges[i].start = mem_rngs->ranges[i+1].start;
+ mem_rngs->ranges[i].end = mem_rngs->ranges[i+1].end;
+ }
+ mem_rngs->nr_ranges--;
+ goto out;
+ }
+ /*
+ * Start address of the memory range to remove and the
+ * current memory range entry in the list is same. Just
+ * move the start address of the current memory range
+ * entry in the list to end + 1.
+ */
+ else if (base == mstart) {
+ mem_rngs->ranges[i].start = end + 1;
+ goto out;
+ }
+ /*
+ * End address of the memory range to remove and the
+ * current memory range entry in the list is same.
+ * Just move the end address of the current memory
+ * range entry in the list to base - 1.
+ */
+ else if (end == mend) {
+ mem_rngs->ranges[i].end = base - 1;
+ goto out;
+ }
+ /*
+ * Memory range to remove is not at the edge of current
+ * memory range entry. Split the current memory entry into
+ * two half.
+ */
+ else {
+ mem_rngs->ranges[i].end = base - 1;
+ size = mem_rngs->ranges[i].end - end;
+ ret = add_mem_range(mem_ranges, end + 1, size);
+ }
+ }
+out:
+ return ret;
+}
+#endif /* CONFIG_CRASH_DUMP */
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 8acec144120e..ff6c38373957 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -360,10 +360,6 @@ static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
break;
}
-#if 0
- printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
-#endif
-
if (deliver)
kvmppc_inject_interrupt(vcpu, vec, 0);
@@ -899,11 +895,6 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
return kvm->arch.kvm_ops->test_age_gfn(kvm, range);
}
-bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
-{
- return kvm->arch.kvm_ops->set_spte_gfn(kvm, range);
-}
-
int kvmppc_core_init_vm(struct kvm *kvm)
{
diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h
index 58391b4b32ed..4aa2ab89afbc 100644
--- a/arch/powerpc/kvm/book3s.h
+++ b/arch/powerpc/kvm/book3s.h
@@ -12,7 +12,6 @@ extern void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
extern bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range);
extern bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range);
extern bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range);
-extern bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range);
extern int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 2b1f0cdd8c18..1b51b1c4713b 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -1010,18 +1010,6 @@ bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
return kvm_test_age_rmapp(kvm, range->slot, range->start);
}
-bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
-{
- WARN_ON(range->start + 1 != range->end);
-
- if (kvm_is_radix(kvm))
- kvm_unmap_radix(kvm, range->slot, range->start);
- else
- kvm_unmap_rmapp(kvm, range->slot, range->start);
-
- return false;
-}
-
static int vcpus_running(struct kvm *kvm)
{
return atomic_read(&kvm->arch.vcpus_running) != 0;
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 5bbfb2eed127..de126d153328 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -714,7 +714,7 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
case SPRN_HID1:
to_book3s(vcpu)->hid[1] = spr_val;
break;
- case SPRN_HID2:
+ case SPRN_HID2_750FX:
to_book3s(vcpu)->hid[2] = spr_val;
break;
case SPRN_HID2_GEKKO:
@@ -900,7 +900,7 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val
case SPRN_HID1:
*spr_val = to_book3s(vcpu)->hid[1];
break;
- case SPRN_HID2:
+ case SPRN_HID2_750FX:
case SPRN_HID2_GEKKO:
*spr_val = to_book3s(vcpu)->hid[2];
break;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 8e86eb577eb8..daaf7faf21a5 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -4857,7 +4857,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
* entering a nested guest in which case the decrementer is now owned
* by L2 and the L1 decrementer is provided in hdec_expires
*/
- if (!kvmhv_is_nestedv2() && kvmppc_core_pending_dec(vcpu) &&
+ if (kvmppc_core_pending_dec(vcpu) &&
((tb < kvmppc_dec_expires_host_tb(vcpu)) ||
(trap == BOOK3S_INTERRUPT_SYSCALL &&
kvmppc_get_gpr(vcpu, 3) == H_ENTER_NESTED)))
@@ -6364,7 +6364,6 @@ static struct kvmppc_ops kvm_ops_hv = {
.unmap_gfn_range = kvm_unmap_gfn_range_hv,
.age_gfn = kvm_age_gfn_hv,
.test_age_gfn = kvm_test_age_gfn_hv,
- .set_spte_gfn = kvm_set_spte_gfn_hv,
.free_memslot = kvmppc_core_free_memslot_hv,
.init_vm = kvmppc_core_init_vm_hv,
.destroy_vm = kvmppc_core_destroy_vm_hv,
diff --git a/arch/powerpc/kvm/book3s_hv_nestedv2.c b/arch/powerpc/kvm/book3s_hv_nestedv2.c
index 8e6f5355f08b..1091f7a83b25 100644
--- a/arch/powerpc/kvm/book3s_hv_nestedv2.c
+++ b/arch/powerpc/kvm/book3s_hv_nestedv2.c
@@ -71,8 +71,8 @@ gs_msg_ops_kvmhv_nestedv2_config_fill_info(struct kvmppc_gs_buff *gsb,
}
if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT)) {
- kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_OUTPUT,
- cfg->vcpu_run_output_cfg);
+ rc = kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_OUTPUT,
+ cfg->vcpu_run_output_cfg);
if (rc < 0)
return rc;
}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index e42984878503..f2636414d82a 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -837,7 +837,7 @@ static inline void this_cpu_inc_rm(unsigned int __percpu *addr)
*/
static void kvmppc_rm_handle_irq_desc(struct irq_desc *desc)
{
- this_cpu_inc_rm(desc->kstat_irqs);
+ this_cpu_inc_rm(&desc->kstat_irqs->cnt);
__this_cpu_inc(kstat.irqs_sum);
}
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 5b92619a05fd..a7d7137ea0c8 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -461,12 +461,6 @@ static bool kvm_test_age_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range)
return false;
}
-static bool kvm_set_spte_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range)
-{
- /* The page will get remapped properly on its next fault */
- return do_kvm_unmap_gfn(kvm, range);
-}
-
/*****************************************/
static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
@@ -2071,7 +2065,6 @@ static struct kvmppc_ops kvm_ops_pr = {
.unmap_gfn_range = kvm_unmap_gfn_range_pr,
.age_gfn = kvm_age_gfn_pr,
.test_age_gfn = kvm_test_age_gfn_pr,
- .set_spte_gfn = kvm_set_spte_gfn_pr,
.free_memslot = kvmppc_core_free_memslot_pr,
.init_vm = kvmppc_core_init_vm_pr,
.destroy_vm = kvmppc_core_destroy_vm_pr,
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index 29a382249770..1362c672387e 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -531,7 +531,7 @@ static int xive_vm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
xc->cppr = xive_prio_from_guest(new_cppr);
/*
- * IPIs are synthetized from MFRR and thus don't need
+ * IPIs are synthesized from MFRR and thus don't need
* any special EOI handling. The underlying interrupt
* used to signal MFRR changes is EOId when fetched from
* the queue.
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index ccb8f16ffe41..c664fdec75b1 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -747,12 +747,6 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
return false;
}
-bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
-{
- /* The page will get remapped properly on its next fault */
- return kvm_e500_mmu_unmap_gfn(kvm, range);
-}
-
/*****************************************/
int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 0ab65eeb93ee..f14ecab674a3 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -3,8 +3,6 @@
# Makefile for ppc-specific library files..
#
-ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
-
CFLAGS_code-patching.o += -fno-stack-protector
CFLAGS_feature-fixups.o += -fno-stack-protector
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index c6ab46156cda..0d1f3ee91115 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -225,7 +225,7 @@ void __init poking_init(void)
static unsigned long get_patch_pfn(void *addr)
{
- if (IS_ENABLED(CONFIG_MODULES) && is_vmalloc_or_module_addr(addr))
+ if (IS_ENABLED(CONFIG_EXECMEM) && is_vmalloc_or_module_addr(addr))
return vmalloc_to_pfn(addr);
else
return __pa_symbol(addr) >> PAGE_SHIFT;
@@ -372,9 +372,32 @@ int patch_instruction(u32 *addr, ppc_inst_t instr)
}
NOKPROBE_SYMBOL(patch_instruction);
+static int patch_memset64(u64 *addr, u64 val, size_t count)
+{
+ for (u64 *end = addr + count; addr < end; addr++)
+ __put_kernel_nofault(addr, &val, u64, failed);
+
+ return 0;
+
+failed:
+ return -EPERM;
+}
+
+static int patch_memset32(u32 *addr, u32 val, size_t count)
+{
+ for (u32 *end = addr + count; addr < end; addr++)
+ __put_kernel_nofault(addr, &val, u32, failed);
+
+ return 0;
+
+failed:
+ return -EPERM;
+}
+
static int __patch_instructions(u32 *patch_addr, u32 *code, size_t len, bool repeat_instr)
{
unsigned long start = (unsigned long)patch_addr;
+ int err;
/* Repeat instruction */
if (repeat_instr) {
@@ -383,19 +406,19 @@ static int __patch_instructions(u32 *patch_addr, u32 *code, size_t len, bool rep
if (ppc_inst_prefixed(instr)) {
u64 val = ppc_inst_as_ulong(instr);
- memset64((u64 *)patch_addr, val, len / 8);
+ err = patch_memset64((u64 *)patch_addr, val, len / 8);
} else {
u32 val = ppc_inst_val(instr);
- memset32(patch_addr, val, len / 4);
+ err = patch_memset32(patch_addr, val, len / 4);
}
} else {
- memcpy(patch_addr, code, len);
+ err = copy_to_kernel_nofault(patch_addr, code, len);
}
smp_wmb(); /* smp write barrier */
flush_icache_range(start, start + len);
- return 0;
+ return err;
}
/*
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 4f82581ca203..b7201ba50b2e 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -25,6 +25,13 @@
#include <asm/firmware.h>
#include <asm/inst.h>
+/*
+ * Used to generate warnings if mmu or cpu feature check functions that
+ * use static keys before they are initialized.
+ */
+bool static_key_feature_checks_initialized __read_mostly;
+EXPORT_SYMBOL_GPL(static_key_feature_checks_initialized);
+
struct fixup_entry {
unsigned long mask;
unsigned long value;
@@ -679,6 +686,7 @@ void __init setup_feature_keys(void)
jump_label_init();
cpu_feature_keys_init();
mmu_feature_keys_init();
+ static_key_feature_checks_initialized = true;
}
static int __init check_features(void)
diff --git a/arch/powerpc/lib/test-code-patching.c b/arch/powerpc/lib/test-code-patching.c
index c44823292f73..f76030087f98 100644
--- a/arch/powerpc/lib/test-code-patching.c
+++ b/arch/powerpc/lib/test-code-patching.c
@@ -347,6 +347,97 @@ static void __init test_prefixed_patching(void)
check(!memcmp(iptr, expected, sizeof(expected)));
}
+static void __init test_multi_instruction_patching(void)
+{
+ u32 code[32];
+ void *buf;
+ u32 *addr32;
+ u64 *addr64;
+ ppc_inst_t inst64 = ppc_inst_prefix(OP_PREFIX << 26 | 3UL << 24, PPC_RAW_TRAP());
+ u32 inst32 = PPC_RAW_NOP();
+
+ buf = vzalloc(PAGE_SIZE * 8);
+ check(buf);
+ if (!buf)
+ return;
+
+ /* Test single page 32-bit repeated instruction */
+ addr32 = buf + PAGE_SIZE;
+ check(!patch_instructions(addr32 + 1, &inst32, 12, true));
+
+ check(addr32[0] == 0);
+ check(addr32[1] == inst32);
+ check(addr32[2] == inst32);
+ check(addr32[3] == inst32);
+ check(addr32[4] == 0);
+
+ /* Test single page 64-bit repeated instruction */
+ if (IS_ENABLED(CONFIG_PPC64)) {
+ check(ppc_inst_prefixed(inst64));
+
+ addr64 = buf + PAGE_SIZE * 2;
+ ppc_inst_write(code, inst64);
+ check(!patch_instructions((u32 *)(addr64 + 1), code, 24, true));
+
+ check(addr64[0] == 0);
+ check(ppc_inst_equal(ppc_inst_read((u32 *)&addr64[1]), inst64));
+ check(ppc_inst_equal(ppc_inst_read((u32 *)&addr64[2]), inst64));
+ check(ppc_inst_equal(ppc_inst_read((u32 *)&addr64[3]), inst64));
+ check(addr64[4] == 0);
+ }
+
+ /* Test single page memcpy */
+ addr32 = buf + PAGE_SIZE * 3;
+
+ for (int i = 0; i < ARRAY_SIZE(code); i++)
+ code[i] = i + 1;
+
+ check(!patch_instructions(addr32 + 1, code, sizeof(code), false));
+
+ check(addr32[0] == 0);
+ check(!memcmp(&addr32[1], code, sizeof(code)));
+ check(addr32[ARRAY_SIZE(code) + 1] == 0);
+
+ /* Test multipage 32-bit repeated instruction */
+ addr32 = buf + PAGE_SIZE * 4 - 8;
+ check(!patch_instructions(addr32 + 1, &inst32, 12, true));
+
+ check(addr32[0] == 0);
+ check(addr32[1] == inst32);
+ check(addr32[2] == inst32);
+ check(addr32[3] == inst32);
+ check(addr32[4] == 0);
+
+ /* Test multipage 64-bit repeated instruction */
+ if (IS_ENABLED(CONFIG_PPC64)) {
+ check(ppc_inst_prefixed(inst64));
+
+ addr64 = buf + PAGE_SIZE * 5 - 8;
+ ppc_inst_write(code, inst64);
+ check(!patch_instructions((u32 *)(addr64 + 1), code, 24, true));
+
+ check(addr64[0] == 0);
+ check(ppc_inst_equal(ppc_inst_read((u32 *)&addr64[1]), inst64));
+ check(ppc_inst_equal(ppc_inst_read((u32 *)&addr64[2]), inst64));
+ check(ppc_inst_equal(ppc_inst_read((u32 *)&addr64[3]), inst64));
+ check(addr64[4] == 0);
+ }
+
+ /* Test multipage memcpy */
+ addr32 = buf + PAGE_SIZE * 6 - 12;
+
+ for (int i = 0; i < ARRAY_SIZE(code); i++)
+ code[i] = i + 1;
+
+ check(!patch_instructions(addr32 + 1, code, sizeof(code), false));
+
+ check(addr32[0] == 0);
+ check(!memcmp(&addr32[1], code, sizeof(code)));
+ check(addr32[ARRAY_SIZE(code) + 1] == 0);
+
+ vfree(buf);
+}
+
static int __init test_code_patching(void)
{
pr_info("Running code patching self-tests ...\n");
@@ -356,6 +447,7 @@ static int __init test_code_patching(void)
test_create_function_call();
test_translate_branch();
test_prefixed_patching();
+ test_multi_instruction_patching();
return 0;
}
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 503a6e249940..0fe2f085c05a 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -3,8 +3,6 @@
# Makefile for the linux ppc-specific parts of the memory manager.
#
-ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
-
obj-y := fault.o mem.o pgtable.o maccess.o pageattr.o \
init_$(BITS).o pgtable_$(BITS).o \
pgtable-frag.o ioremap.o ioremap_$(BITS).o \
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index 100f999871bc..625fe7d08e06 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -184,7 +184,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
static bool is_module_segment(unsigned long addr)
{
- if (!IS_ENABLED(CONFIG_MODULES))
+ if (!IS_ENABLED(CONFIG_EXECMEM))
return false;
if (addr < ALIGN_DOWN(MODULES_VADDR, SZ_256M))
return false;
diff --git a/arch/powerpc/mm/book3s64/Makefile b/arch/powerpc/mm/book3s64/Makefile
index cad2abc1730f..33af5795856a 100644
--- a/arch/powerpc/mm/book3s64/Makefile
+++ b/arch/powerpc/mm/book3s64/Makefile
@@ -1,7 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-ccflags-y := $(NO_MINIMAL_TOC)
-
obj-y += mmu_context.o pgtable.o trace.o
ifdef CONFIG_PPC_64S_HASH_MMU
CFLAGS_REMOVE_slb.o = $(CC_FLAGS_FTRACE)
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index 83823db3488b..2975ea0841ba 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -170,6 +170,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
{
unsigned long old_pmd;
+ VM_WARN_ON_ONCE(!pmd_present(*pmdp));
old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return __pmd(old_pmd);
diff --git a/arch/powerpc/mm/book3s64/slice.c b/arch/powerpc/mm/book3s64/slice.c
index c0b58afb9a47..ef3ce37f1bb3 100644
--- a/arch/powerpc/mm/book3s64/slice.c
+++ b/arch/powerpc/mm/book3s64/slice.c
@@ -282,12 +282,10 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
{
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
unsigned long found, next_end;
- struct vm_unmapped_area_info info;
-
- info.flags = 0;
- info.length = len;
- info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
- info.align_offset = 0;
+ struct vm_unmapped_area_info info = {
+ .length = len,
+ .align_mask = PAGE_MASK & ((1ul << pshift) - 1),
+ };
/*
* Check till the allow max value for this mmap request
*/
@@ -326,13 +324,13 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
{
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
unsigned long found, prev;
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {
+ .flags = VM_UNMAPPED_AREA_TOPDOWN,
+ .length = len,
+ .align_mask = PAGE_MASK & ((1ul << pshift) - 1),
+ };
unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
- info.flags = VM_UNMAPPED_AREA_TOPDOWN;
- info.length = len;
- info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
- info.align_offset = 0;
/*
* If we are trying to allocate above DEFAULT_MAP_WINDOW
* Add the different to the mmap_base.
diff --git a/arch/powerpc/mm/cacheflush.c b/arch/powerpc/mm/cacheflush.c
index 15189592da09..7186516eca52 100644
--- a/arch/powerpc/mm/cacheflush.c
+++ b/arch/powerpc/mm/cacheflush.c
@@ -78,7 +78,7 @@ EXPORT_SYMBOL(flush_icache_range);
#ifdef CONFIG_HIGHMEM
/**
- * flush_dcache_icache_phys() - Flush a page by it's physical address
+ * flush_dcache_icache_phys() - Flush a page by its physical address
* @physaddr: the physical address of the page
*/
static void flush_dcache_icache_phys(unsigned long physaddr)
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 53335ae21a40..215690452495 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -71,23 +71,26 @@ static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long add
return __bad_area_nosemaphore(regs, address, SEGV_MAPERR);
}
-static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code)
+static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code,
+ struct mm_struct *mm, struct vm_area_struct *vma)
{
- struct mm_struct *mm = current->mm;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
- mmap_read_unlock(mm);
+ if (mm)
+ mmap_read_unlock(mm);
+ else
+ vma_end_read(vma);
return __bad_area_nosemaphore(regs, address, si_code);
}
static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address,
+ struct mm_struct *mm,
struct vm_area_struct *vma)
{
- struct mm_struct *mm = current->mm;
int pkey;
/*
@@ -109,7 +112,10 @@ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address,
*/
pkey = vma_pkey(vma);
- mmap_read_unlock(mm);
+ if (mm)
+ mmap_read_unlock(mm);
+ else
+ vma_end_read(vma);
/*
* If we are in kernel mode, bail out with a SEGV, this will
@@ -124,9 +130,10 @@ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address,
return 0;
}
-static noinline int bad_access(struct pt_regs *regs, unsigned long address)
+static noinline int bad_access(struct pt_regs *regs, unsigned long address,
+ struct mm_struct *mm, struct vm_area_struct *vma)
{
- return __bad_area(regs, address, SEGV_ACCERR);
+ return __bad_area(regs, address, SEGV_ACCERR, mm, vma);
}
static int do_sigbus(struct pt_regs *regs, unsigned long address,
@@ -479,13 +486,13 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
if (unlikely(access_pkey_error(is_write, is_exec,
(error_code & DSISR_KEYFAULT), vma))) {
- vma_end_read(vma);
- goto lock_mmap;
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ return bad_access_pkey(regs, address, NULL, vma);
}
if (unlikely(access_error(is_write, is_exec, vma))) {
- vma_end_read(vma);
- goto lock_mmap;
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ return bad_access(regs, address, NULL, vma);
}
fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
@@ -521,10 +528,10 @@ retry:
if (unlikely(access_pkey_error(is_write, is_exec,
(error_code & DSISR_KEYFAULT), vma)))
- return bad_access_pkey(regs, address, vma);
+ return bad_access_pkey(regs, address, mm, vma);
if (unlikely(access_error(is_write, is_exec, vma)))
- return bad_access(regs, address);
+ return bad_access(regs, address, mm, vma);
/*
* If for any reason at all we couldn't handle the fault,
diff --git a/arch/powerpc/mm/kasan/init_book3e_64.c b/arch/powerpc/mm/kasan/init_book3e_64.c
index 11519e88dc6b..43c03b84ff32 100644
--- a/arch/powerpc/mm/kasan/init_book3e_64.c
+++ b/arch/powerpc/mm/kasan/init_book3e_64.c
@@ -112,7 +112,7 @@ void __init kasan_init(void)
pte_t zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL_RO);
for_each_mem_range(i, &start, &end)
- kasan_init_phys_region((void *)start, (void *)end);
+ kasan_init_phys_region(phys_to_virt(start), phys_to_virt(end));
if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
kasan_remove_zero_shadow((void *)VMALLOC_START, VMALLOC_SIZE);
diff --git a/arch/powerpc/mm/kasan/init_book3s_64.c b/arch/powerpc/mm/kasan/init_book3s_64.c
index 9300d641cf9a..3fb5ce4f48f4 100644
--- a/arch/powerpc/mm/kasan/init_book3s_64.c
+++ b/arch/powerpc/mm/kasan/init_book3s_64.c
@@ -62,7 +62,7 @@ void __init kasan_init(void)
}
for_each_mem_range(i, &start, &end)
- kasan_init_phys_region((void *)start, (void *)end);
+ kasan_init_phys_region(phys_to_virt(start), phys_to_virt(end));
for (i = 0; i < PTRS_PER_PTE; i++)
__set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page,
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 3a440004b97d..d325217ab201 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -16,6 +16,8 @@
#include <linux/highmem.h>
#include <linux/suspend.h>
#include <linux/dma-direct.h>
+#include <linux/execmem.h>
+#include <linux/vmalloc.h>
#include <asm/swiotlb.h>
#include <asm/machdep.h>
@@ -30,7 +32,7 @@
#include <mm/mmu_decl.h>
-unsigned long long memory_limit;
+unsigned long long memory_limit __initdata;
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
@@ -406,3 +408,66 @@ int devmem_is_allowed(unsigned long pfn)
* the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
*/
EXPORT_SYMBOL_GPL(walk_system_ram_range);
+
+#ifdef CONFIG_EXECMEM
+static struct execmem_info execmem_info __ro_after_init;
+
+struct execmem_info __init *execmem_arch_setup(void)
+{
+ pgprot_t kprobes_prot = strict_module_rwx_enabled() ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
+ pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC;
+ unsigned long fallback_start = 0, fallback_end = 0;
+ unsigned long start, end;
+
+ /*
+ * BOOK3S_32 and 8xx define MODULES_VADDR for text allocations and
+ * allow allocating data in the entire vmalloc space
+ */
+#ifdef MODULES_VADDR
+ unsigned long limit = (unsigned long)_etext - SZ_32M;
+
+ BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
+
+ /* First try within 32M limit from _etext to avoid branch trampolines */
+ if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit) {
+ start = limit;
+ fallback_start = MODULES_VADDR;
+ fallback_end = MODULES_END;
+ } else {
+ start = MODULES_VADDR;
+ }
+
+ end = MODULES_END;
+#else
+ start = VMALLOC_START;
+ end = VMALLOC_END;
+#endif
+
+ execmem_info = (struct execmem_info){
+ .ranges = {
+ [EXECMEM_DEFAULT] = {
+ .start = start,
+ .end = end,
+ .pgprot = prot,
+ .alignment = 1,
+ .fallback_start = fallback_start,
+ .fallback_end = fallback_end,
+ },
+ [EXECMEM_KPROBES] = {
+ .start = VMALLOC_START,
+ .end = VMALLOC_END,
+ .pgprot = kprobes_prot,
+ .alignment = 1,
+ },
+ [EXECMEM_MODULE_DATA] = {
+ .start = VMALLOC_START,
+ .end = VMALLOC_END,
+ .pgprot = PAGE_KERNEL,
+ .alignment = 1,
+ },
+ },
+ };
+
+ return &execmem_info;
+}
+#endif /* CONFIG_EXECMEM */
diff --git a/arch/powerpc/mm/nohash/Makefile b/arch/powerpc/mm/nohash/Makefile
index f3894e79d5f7..b3f0498dd42f 100644
--- a/arch/powerpc/mm/nohash/Makefile
+++ b/arch/powerpc/mm/nohash/Makefile
@@ -1,7 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
-
obj-y += mmu_context.o tlb.o tlb_low.o kup.o
obj-$(CONFIG_PPC_BOOK3E_64) += tlb_low_64e.o book3e_pgtable.o
obj-$(CONFIG_40x) += 40x.o
diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c
index cdff129abb14..5c8d1bb98b3e 100644
--- a/arch/powerpc/mm/nohash/kaslr_booke.c
+++ b/arch/powerpc/mm/nohash/kaslr_booke.c
@@ -376,7 +376,7 @@ notrace void __init kaslr_early_init(void *dt_ptr, phys_addr_t size)
create_kaslr_tlb_entry(1, tlb_virt, tlb_phys);
}
- /* Copy the kernel to it's new location and run */
+ /* Copy the kernel to its new location and run */
memcpy((void *)kernstart_virt_addr, (void *)_stext, kernel_sz);
flush_icache_range(kernstart_virt_addr, kernstart_virt_addr + kernel_sz);
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 9b99113cb51a..6621cfc3baf8 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -102,7 +102,7 @@ struct page *p4d_page(p4d_t p4d)
{
if (p4d_leaf(p4d)) {
if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
- VM_WARN_ON(!p4d_huge(p4d));
+ VM_WARN_ON(!p4d_leaf(p4d));
return pte_page(p4d_pte(p4d));
}
return virt_to_page(p4d_pgtable(p4d));
@@ -113,7 +113,7 @@ struct page *pud_page(pud_t pud)
{
if (pud_leaf(pud)) {
if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
- VM_WARN_ON(!pud_huge(pud));
+ VM_WARN_ON(!pud_leaf(pud));
return pte_page(pud_pte(pud));
}
return virt_to_page(pud_pgtable(pud));
@@ -132,7 +132,7 @@ struct page *pmd_page(pmd_t pmd)
* enabled so these checks can't be used.
*/
if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
- VM_WARN_ON(!(pmd_leaf(pmd) || pmd_huge(pmd)));
+ VM_WARN_ON(!pmd_leaf(pmd));
return pte_page(pmd_pte(pmd));
}
return virt_to_page(pmd_page_vaddr(pmd));
diff --git a/arch/powerpc/mm/ptdump/hashpagetable.c b/arch/powerpc/mm/ptdump/hashpagetable.c
index 9a601587836b..a6baa6166d94 100644
--- a/arch/powerpc/mm/ptdump/hashpagetable.c
+++ b/arch/powerpc/mm/ptdump/hashpagetable.c
@@ -491,7 +491,7 @@ static void walk_vmemmap(struct pg_state *st)
* Traverse the vmemmaped memory and dump pages that are in the hash
* pagetable.
*/
- while (ptr->list) {
+ while (ptr) {
hpte_find(st, ptr->virt_addr, mmu_vmemmap_psize);
ptr = ptr->list;
}
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 0f9a21783329..984655419da5 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -359,3 +359,13 @@ void bpf_jit_free(struct bpf_prog *fp)
bpf_prog_unlock_free(fp);
}
+
+bool bpf_jit_supports_kfunc_call(void)
+{
+ return true;
+}
+
+bool bpf_jit_supports_far_kfunc_call(void)
+{
+ return IS_ENABLED(CONFIG_PPC64);
+}
diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
index 2f39c50ca729..43b97032a91c 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -450,10 +450,16 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
}
break;
case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
- EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, src_reg));
+ if (off)
+ EMIT(PPC_RAW_DIVW(dst_reg, src2_reg, src_reg));
+ else
+ EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, src_reg));
break;
case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
- EMIT(PPC_RAW_DIVWU(_R0, src2_reg, src_reg));
+ if (off)
+ EMIT(PPC_RAW_DIVW(_R0, src2_reg, src_reg));
+ else
+ EMIT(PPC_RAW_DIVWU(_R0, src2_reg, src_reg));
EMIT(PPC_RAW_MULW(_R0, src_reg, _R0));
EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0));
break;
@@ -467,10 +473,16 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
if (imm == 1) {
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
} else if (is_power_of_2((u32)imm)) {
- EMIT(PPC_RAW_SRWI(dst_reg, src2_reg, ilog2(imm)));
+ if (off)
+ EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg, ilog2(imm)));
+ else
+ EMIT(PPC_RAW_SRWI(dst_reg, src2_reg, ilog2(imm)));
} else {
PPC_LI32(_R0, imm);
- EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, _R0));
+ if (off)
+ EMIT(PPC_RAW_DIVW(dst_reg, src2_reg, _R0));
+ else
+ EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, _R0));
}
break;
case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
@@ -480,11 +492,19 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
if (!is_power_of_2((u32)imm)) {
bpf_set_seen_register(ctx, tmp_reg);
PPC_LI32(tmp_reg, imm);
- EMIT(PPC_RAW_DIVWU(_R0, src2_reg, tmp_reg));
+ if (off)
+ EMIT(PPC_RAW_DIVW(_R0, src2_reg, tmp_reg));
+ else
+ EMIT(PPC_RAW_DIVWU(_R0, src2_reg, tmp_reg));
EMIT(PPC_RAW_MULW(_R0, tmp_reg, _R0));
EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0));
} else if (imm == 1) {
EMIT(PPC_RAW_LI(dst_reg, 0));
+ } else if (off) {
+ EMIT(PPC_RAW_SRAWI(_R0, src2_reg, ilog2(imm)));
+ EMIT(PPC_RAW_ADDZE(_R0, _R0));
+ EMIT(PPC_RAW_SLWI(_R0, _R0, ilog2(imm)));
+ EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0));
} else {
imm = ilog2((u32)imm);
EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 32 - imm, 31));
@@ -497,11 +517,21 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
imm = -imm;
if (!is_power_of_2(imm))
return -EOPNOTSUPP;
- if (imm == 1)
+ if (imm == 1) {
EMIT(PPC_RAW_LI(dst_reg, 0));
- else
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ } else if (off) {
+ EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, 31));
+ EMIT(PPC_RAW_XOR(dst_reg, src2_reg, dst_reg_h));
+ EMIT(PPC_RAW_SUBFC(dst_reg, dst_reg_h, dst_reg));
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 32 - ilog2(imm), 31));
+ EMIT(PPC_RAW_XOR(dst_reg, dst_reg, dst_reg_h));
+ EMIT(PPC_RAW_SUBFC(dst_reg, dst_reg_h, dst_reg));
+ EMIT(PPC_RAW_SUBFE(dst_reg_h, dst_reg_h, dst_reg_h));
+ } else {
EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 32 - ilog2(imm), 31));
- EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ }
break;
case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
if (!imm)
@@ -727,15 +757,30 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
* MOV
*/
case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
- if (dst_reg == src_reg)
- break;
- EMIT(PPC_RAW_MR(dst_reg, src_reg));
- EMIT(PPC_RAW_MR(dst_reg_h, src_reg_h));
+ if (off == 8) {
+ EMIT(PPC_RAW_EXTSB(dst_reg, src_reg));
+ EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31));
+ } else if (off == 16) {
+ EMIT(PPC_RAW_EXTSH(dst_reg, src_reg));
+ EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31));
+ } else if (off == 32 && dst_reg == src_reg) {
+ EMIT(PPC_RAW_SRAWI(dst_reg_h, src_reg, 31));
+ } else if (off == 32) {
+ EMIT(PPC_RAW_MR(dst_reg, src_reg));
+ EMIT(PPC_RAW_SRAWI(dst_reg_h, src_reg, 31));
+ } else if (dst_reg != src_reg) {
+ EMIT(PPC_RAW_MR(dst_reg, src_reg));
+ EMIT(PPC_RAW_MR(dst_reg_h, src_reg_h));
+ }
break;
case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
/* special mov32 for zext */
if (imm == 1)
EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ else if (off == 8)
+ EMIT(PPC_RAW_EXTSB(dst_reg, src_reg));
+ else if (off == 16)
+ EMIT(PPC_RAW_EXTSH(dst_reg, src_reg));
else if (dst_reg != src_reg)
EMIT(PPC_RAW_MR(dst_reg, src_reg));
break;
@@ -751,6 +796,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
* BPF_FROM_BE/LE
*/
case BPF_ALU | BPF_END | BPF_FROM_LE:
+ case BPF_ALU64 | BPF_END | BPF_FROM_LE:
switch (imm) {
case 16:
/* Copy 16 bits to upper part */
@@ -785,6 +831,8 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
EMIT(PPC_RAW_MR(dst_reg_h, tmp_reg));
break;
}
+ if (BPF_CLASS(code) == BPF_ALU64 && imm != 64)
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
break;
case BPF_ALU | BPF_END | BPF_FROM_BE:
switch (imm) {
@@ -918,11 +966,17 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
* BPF_LDX
*/
case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
+ case BPF_LDX | BPF_MEMSX | BPF_B:
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
+ case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
+ case BPF_LDX | BPF_MEMSX | BPF_H:
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
+ case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
+ case BPF_LDX | BPF_MEMSX | BPF_W:
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
+ case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
/*
@@ -931,7 +985,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
* load only if addr is kernel address (see is_kernel_addr()), otherwise
* set dst_reg=0 and move on.
*/
- if (BPF_MODE(code) == BPF_PROBE_MEM) {
+ if (BPF_MODE(code) == BPF_PROBE_MEM || BPF_MODE(code) == BPF_PROBE_MEMSX) {
PPC_LI32(_R0, TASK_SIZE - off);
EMIT(PPC_RAW_CMPLW(src_reg, _R0));
PPC_BCC_SHORT(COND_GT, (ctx->idx + 4) * 4);
@@ -953,30 +1007,48 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
* as there are two load instructions for dst_reg_h & dst_reg
* respectively.
*/
- if (size == BPF_DW)
+ if (size == BPF_DW ||
+ (size == BPF_B && BPF_MODE(code) == BPF_PROBE_MEMSX))
PPC_JMP((ctx->idx + 3) * 4);
else
PPC_JMP((ctx->idx + 2) * 4);
}
- switch (size) {
- case BPF_B:
- EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
- break;
- case BPF_H:
- EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
- break;
- case BPF_W:
- EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
- break;
- case BPF_DW:
- EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off));
- EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4));
- break;
- }
+ if (BPF_MODE(code) == BPF_MEMSX || BPF_MODE(code) == BPF_PROBE_MEMSX) {
+ switch (size) {
+ case BPF_B:
+ EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
+ EMIT(PPC_RAW_EXTSB(dst_reg, dst_reg));
+ break;
+ case BPF_H:
+ EMIT(PPC_RAW_LHA(dst_reg, src_reg, off));
+ break;
+ case BPF_W:
+ EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
+ break;
+ }
+ if (!fp->aux->verifier_zext)
+ EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31));
- if (size != BPF_DW && !fp->aux->verifier_zext)
- EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ } else {
+ switch (size) {
+ case BPF_B:
+ EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
+ break;
+ case BPF_H:
+ EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
+ break;
+ case BPF_W:
+ EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
+ break;
+ case BPF_DW:
+ EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off));
+ EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4));
+ break;
+ }
+ if (size != BPF_DW && !fp->aux->verifier_zext)
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ }
if (BPF_MODE(code) == BPF_PROBE_MEM) {
int insn_idx = ctx->idx - 1;
@@ -1068,6 +1140,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
case BPF_JMP | BPF_JA:
PPC_JMP(addrs[i + 1 + off]);
break;
+ case BPF_JMP32 | BPF_JA:
+ PPC_JMP(addrs[i + 1 + imm]);
+ break;
case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JGT | BPF_X:
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 79f23974a320..8afc14a4a125 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -202,29 +202,47 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
EMIT(PPC_RAW_BLR());
}
-static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u64 func)
+static int
+bpf_jit_emit_func_call_hlp(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
{
unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
long reladdr;
- if (WARN_ON_ONCE(!core_kernel_text(func_addr)))
+ if (WARN_ON_ONCE(!kernel_text_address(func_addr)))
return -EINVAL;
- if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
- reladdr = func_addr - CTX_NIA(ctx);
-
- if (reladdr >= (long)SZ_8G || reladdr < -(long)SZ_8G) {
- pr_err("eBPF: address of %ps out of range of pcrel address.\n",
- (void *)func);
- return -ERANGE;
- }
- /* pla r12,addr */
- EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr));
- EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr));
- EMIT(PPC_RAW_MTCTR(_R12));
- EMIT(PPC_RAW_BCTR());
+#ifdef CONFIG_PPC_KERNEL_PCREL
+ reladdr = func_addr - local_paca->kernelbase;
+ if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
+ EMIT(PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernelbase)));
+ /* Align for subsequent prefix instruction */
+ if (!IS_ALIGNED((unsigned long)fimage + CTX_NIA(ctx), 8))
+ EMIT(PPC_RAW_NOP());
+ /* paddi r12,r12,addr */
+ EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(0) | IMM_H18(reladdr));
+ EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | ___PPC_RA(_R12) | IMM_L(reladdr));
} else {
+ unsigned long pc = (unsigned long)fimage + CTX_NIA(ctx);
+ bool alignment_needed = !IS_ALIGNED(pc, 8);
+
+ reladdr = func_addr - (alignment_needed ? pc + 4 : pc);
+
+ if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
+ if (alignment_needed)
+ EMIT(PPC_RAW_NOP());
+ /* pla r12,addr */
+ EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr));
+ EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr));
+ } else {
+ /* We can clobber r12 */
+ PPC_LI64(_R12, func);
+ }
+ }
+ EMIT(PPC_RAW_MTCTR(_R12));
+ EMIT(PPC_RAW_BCTRL());
+#else
+ if (core_kernel_text(func_addr)) {
reladdr = func_addr - kernel_toc_addr();
if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func);
@@ -235,7 +253,32 @@ static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u
EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
EMIT(PPC_RAW_MTCTR(_R12));
EMIT(PPC_RAW_BCTRL());
+ } else {
+ if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1)) {
+ /* func points to the function descriptor */
+ PPC_LI64(bpf_to_ppc(TMP_REG_2), func);
+ /* Load actual entry point from function descriptor */
+ EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), 0));
+ /* ... and move it to CTR */
+ EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
+ /*
+ * Load TOC from function descriptor at offset 8.
+ * We can clobber r2 since we get called through a
+ * function pointer (so caller will save/restore r2).
+ */
+ EMIT(PPC_RAW_LD(_R2, bpf_to_ppc(TMP_REG_2), 8));
+ } else {
+ PPC_LI64(_R12, func);
+ EMIT(PPC_RAW_MTCTR(_R12));
+ }
+ EMIT(PPC_RAW_BCTRL());
+ /*
+ * Load r2 with kernel TOC as kernel TOC is used if function address falls
+ * within core kernel text.
+ */
+ EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
}
+#endif
return 0;
}
@@ -285,7 +328,7 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
int b2p_index = bpf_to_ppc(BPF_REG_3);
int bpf_tailcall_prologue_size = 8;
- if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
+ if (!IS_ENABLED(CONFIG_PPC_KERNEL_PCREL) && IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
bpf_tailcall_prologue_size += 4; /* skip past the toc load */
/*
@@ -993,7 +1036,7 @@ emit_clear:
return ret;
if (func_addr_fixed)
- ret = bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
+ ret = bpf_jit_emit_func_call_hlp(image, fimage, ctx, func_addr);
else
ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
index 8f75e9574c27..8c1f3b629fc7 100644
--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -279,7 +279,7 @@ static void __init mpc512x_setup_diu(void)
* and so negatively affect boot time. Instead we reserve the
* already configured frame buffer area so that it won't be
* destroyed. The starting address of the area to reserve and
- * also it's length is passed to memblock_reserve(). It will be
+ * also its length is passed to memblock_reserve(). It will be
* freed later on first open of fbdev, when splash image is not
* needed any more.
*/
diff --git a/arch/powerpc/platforms/52xx/lite5200_sleep.S b/arch/powerpc/platforms/52xx/lite5200_sleep.S
index 0b12647e7b42..0ec2522ee4ad 100644
--- a/arch/powerpc/platforms/52xx/lite5200_sleep.S
+++ b/arch/powerpc/platforms/52xx/lite5200_sleep.S
@@ -203,7 +203,8 @@ lite5200_wakeup:
/* HIDs, MSR */
LOAD_SPRN(HID1, 0x19)
- LOAD_SPRN(HID2, 0x1a)
+ /* FIXME: Should this use HID2_G2_LE? */
+ LOAD_SPRN(HID2_750FX, 0x1a)
/* address translation is tricky (see turn_on_mmu) */
@@ -283,7 +284,8 @@ SYM_FUNC_START_LOCAL(save_regs)
SAVE_SPRN(HID0, 0x18)
SAVE_SPRN(HID1, 0x19)
- SAVE_SPRN(HID2, 0x1a)
+ /* FIXME: Should this use HID2_G2_LE? */
+ SAVE_SPRN(HID2_750FX, 0x1a)
mfmsr r10
stw r10, (4*0x1b)(r4)
/*SAVE_SPRN(LR, 0x1c) have to save it before the call */
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c
index b4938e344f71..253421ffb4e5 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_common.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c
@@ -12,12 +12,10 @@
#undef DEBUG
-#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/export.h>
#include <asm/io.h>
#include <asm/mpc52xx.h>
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index 581059527c36..2bd6abcdc113 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -48,6 +48,7 @@
* the output mode. This driver does not change the output mode setting.
*/
+#include <linux/gpio/driver.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -56,7 +57,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/property.h>
diff --git a/arch/powerpc/platforms/83xx/suspend-asm.S b/arch/powerpc/platforms/83xx/suspend-asm.S
index bc6bd4d0ae96..6a62ed6082c9 100644
--- a/arch/powerpc/platforms/83xx/suspend-asm.S
+++ b/arch/powerpc/platforms/83xx/suspend-asm.S
@@ -68,7 +68,8 @@ _GLOBAL(mpc83xx_enter_deep_sleep)
mfspr r5, SPRN_HID0
mfspr r6, SPRN_HID1
- mfspr r7, SPRN_HID2
+ /* FIXME: Should this use SPRN_HID2_G2_LE? */
+ mfspr r7, SPRN_HID2_750FX
stw r5, SS_HID+0(r3)
stw r6, SS_HID+4(r3)
@@ -396,7 +397,8 @@ mpc83xx_deep_resume:
mtspr SPRN_HID0, r5
mtspr SPRN_HID1, r6
- mtspr SPRN_HID2, r7
+ /* FIXME: Should this use SPRN_HID2_G2_LE? */
+ mtspr SPRN_HID2_750FX, r7
lwz r4, SS_IABR+0(r3)
lwz r5, SS_IABR+4(r3)
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 40aa58206888..e52b848b64b7 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -398,6 +398,7 @@ static void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
hard_irq_disable();
mpic_teardown_this_cpu(secondary);
+#ifdef CONFIG_CRASH_DUMP
if (cpu == crashing_cpu && cpu_thread_in_core(cpu) != 0) {
/*
* We enter the crash kernel on whatever cpu crashed,
@@ -406,9 +407,11 @@ static void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
*/
disable_threadbit = 1;
disable_cpu = cpu_first_thread_sibling(cpu);
- } else if (sibling != crashing_cpu &&
- cpu_thread_in_core(cpu) == 0 &&
- cpu_thread_in_core(sibling) != 0) {
+ } else if (sibling == crashing_cpu) {
+ return;
+ }
+#endif
+ if (cpu_thread_in_core(cpu) == 0 && cpu_thread_in_core(sibling) != 0) {
disable_threadbit = 2;
disable_cpu = sibling;
}
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 1202a69b0a20..4cd9c0de22c2 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -424,23 +424,6 @@ static void __init cell_iommu_setup_hardware(struct cbe_iommu *iommu,
cell_iommu_enable_hardware(iommu);
}
-#if 0/* Unused for now */
-static struct iommu_window *find_window(struct cbe_iommu *iommu,
- unsigned long offset, unsigned long size)
-{
- struct iommu_window *window;
-
- /* todo: check for overlapping (but not equal) windows) */
-
- list_for_each_entry(window, &(iommu->windows), list) {
- if (window->offset == offset && window->size == size)
- return window;
- }
-
- return NULL;
-}
-#endif
-
static inline u32 cell_iommu_get_ioid(struct device_node *np)
{
const u32 *ioid;
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index 30394c6f8894..fee638fd8970 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -54,6 +54,7 @@ static cpumask_t of_spin_map;
/**
* smp_startup_cpu() - start the given cpu
+ * @lcpu: Logical CPU ID of the CPU to be started.
*
* At boot time, there is nothing to do for primary threads which were
* started from Open Firmware. For anything else, call RTAS with the
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 02a8158c469d..7f4e0db8eb08 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -1704,23 +1704,11 @@ static int spufs_mfc_flush(struct file *file, fl_owner_t id)
ret = spu_acquire(ctx);
if (ret)
- goto out;
-#if 0
-/* this currently hangs */
- ret = spufs_wait(ctx->mfc_wq,
- ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
- if (ret)
- goto out;
- ret = spufs_wait(ctx->mfc_wq,
- ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
- if (ret)
- goto out;
-#else
- ret = 0;
-#endif
+ return ret;
+
spu_release(ctx);
-out:
- return ret;
+
+ return 0;
}
static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync)
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 99bd027a7f7c..610ca8570682 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -868,7 +868,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
}
/**
- * spu_deactivate - unbind a context from it's physical spu
+ * spu_deactivate - unbind a context from its physical spu
* @ctx: spu context to unbind
*
* Unbind @ctx from the physical spu it is running on and schedule
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index b911b31717cc..b9ff37c7f6f0 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -595,7 +595,7 @@ void __init maple_pci_init(void)
/* Probe root PCI hosts, that is on U3 the AGP host and the
* HyperTransport host. That one is actually "kept" around
- * and actually added last as it's resource management relies
+ * and actually added last as its resource management relies
* on the AGP resources to have been setup first
*/
root = of_find_node_by_path("/");
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 7135ea1d7db6..2202bf77c7a3 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -2,7 +2,7 @@
/*
* Support for the interrupt controllers found on Power Macintosh,
* currently Apple's "Grand Central" interrupt controller in all
- * it's incarnations. OpenPIC support used on newer machines is
+ * its incarnations. OpenPIC support used on newer machines is
* in a separate file
*
* Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S
index d497a60003d2..822ed70cdcbf 100644
--- a/arch/powerpc/platforms/powermac/sleep.S
+++ b/arch/powerpc/platforms/powermac/sleep.S
@@ -176,7 +176,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
* memory location containing the PC to resume from
* at address 0.
* - On Core99, we must store the wakeup vector at
- * address 0x80 and eventually it's parameters
+ * address 0x80 and eventually its parameters
* at address 0x84. I've have some trouble with those
* parameters however and I no longer use them.
*/
diff --git a/arch/powerpc/platforms/powernv/opal-fadump.c b/arch/powerpc/platforms/powernv/opal-fadump.c
index 964f464b1b0e..c9c1dfb35464 100644
--- a/arch/powerpc/platforms/powernv/opal-fadump.c
+++ b/arch/powerpc/platforms/powernv/opal-fadump.c
@@ -513,8 +513,8 @@ out:
final_note(note_buf);
pr_debug("Updating elfcore header (%llx) with cpu notes\n",
- fdh->elfcorehdr_addr);
- fadump_update_elfcore_header(__va(fdh->elfcorehdr_addr));
+ fadump_conf->elfcorehdr_addr);
+ fadump_update_elfcore_header((char *)fadump_conf->elfcorehdr_addr);
return 0;
}
@@ -526,12 +526,7 @@ static int __init opal_fadump_process(struct fw_dump *fadump_conf)
if (!opal_fdm_active || !fadump_conf->fadumphdr_addr)
return rc;
- /* Validate the fadump crash info header */
fdh = __va(fadump_conf->fadumphdr_addr);
- if (fdh->magic_number != FADUMP_CRASH_INFO_MAGIC) {
- pr_err("Crash info header is not valid.\n");
- return rc;
- }
#ifdef CONFIG_OPAL_CORE
/*
@@ -545,18 +540,7 @@ static int __init opal_fadump_process(struct fw_dump *fadump_conf)
kernel_initiated = true;
#endif
- rc = opal_fadump_build_cpu_notes(fadump_conf, fdh);
- if (rc)
- return rc;
-
- /*
- * We are done validating dump info and elfcore header is now ready
- * to be exported. set elfcorehdr_addr so that vmcore module will
- * export the elfcore header through '/proc/vmcore'.
- */
- elfcorehdr_addr = fdh->elfcorehdr_addr;
-
- return rc;
+ return opal_fadump_build_cpu_notes(fadump_conf, fdh);
}
static void opal_fadump_region_show(struct fw_dump *fadump_conf,
@@ -615,6 +599,12 @@ static void opal_fadump_trigger(struct fadump_crash_info_header *fdh,
pr_emerg("No backend support for MPIPL!\n");
}
+/* FADUMP_MAX_MEM_REGS or lower */
+static int opal_fadump_max_boot_mem_rgns(void)
+{
+ return FADUMP_MAX_MEM_REGS;
+}
+
static struct fadump_ops opal_fadump_ops = {
.fadump_init_mem_struct = opal_fadump_init_mem_struct,
.fadump_get_metadata_size = opal_fadump_get_metadata_size,
@@ -627,6 +617,7 @@ static struct fadump_ops opal_fadump_ops = {
.fadump_process = opal_fadump_process,
.fadump_region_show = opal_fadump_region_show,
.fadump_trigger = opal_fadump_trigger,
+ .fadump_max_boot_mem_rgns = opal_fadump_max_boot_mem_rgns,
};
void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
@@ -674,8 +665,10 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
}
}
- fadump_conf->ops = &opal_fadump_ops;
- fadump_conf->fadump_supported = 1;
+ fadump_conf->ops = &opal_fadump_ops;
+ fadump_conf->fadump_supported = 1;
+ /* TODO: Add support to pass additional parameters */
+ fadump_conf->param_area_supported = 0;
/*
* Firmware supports 32-bit field for size. Align it to PAGE_SIZE
diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c
index 59882da3e742..cc7b1dd54ac6 100644
--- a/arch/powerpc/platforms/powernv/pci-sriov.c
+++ b/arch/powerpc/platforms/powernv/pci-sriov.c
@@ -238,7 +238,7 @@ void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev)
} else if (pdev->is_physfn) {
/*
* For PFs adjust their allocated IOV resources to match what
- * the PHB can support using it's M64 BAR table.
+ * the PHB can support using its M64 BAR table.
*/
pnv_pci_ioda_fixup_iov_resources(pdev);
}
@@ -658,7 +658,7 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
list_add_tail(&pe->list, &phb->ioda.pe_list);
mutex_unlock(&phb->ioda.pe_list_mutex);
- /* associate this pe to it's pdn */
+ /* associate this pe to its pdn */
list_for_each_entry(vf_pdn, &pdn->parent->child_list, list) {
if (vf_pdn->busno == vf_bus &&
vf_pdn->devfn == vf_devfn) {
diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c
index b664838008c1..5147df3a18ac 100644
--- a/arch/powerpc/platforms/powernv/vas-window.c
+++ b/arch/powerpc/platforms/powernv/vas-window.c
@@ -1059,7 +1059,7 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
}
} else {
/*
- * Interrupt hanlder or fault window setup failed. Means
+ * Interrupt handler or fault window setup failed. Means
* NX can not generate fault for page fault. So not
* opening for user space tx window.
*/
diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
index 878bc160246e..b18e1c92e554 100644
--- a/arch/powerpc/platforms/ps3/device-init.c
+++ b/arch/powerpc/platforms/ps3/device-init.c
@@ -770,49 +770,51 @@ static struct task_struct *probe_task;
static int ps3_probe_thread(void *data)
{
- struct ps3_notification_device dev;
+ struct {
+ struct ps3_notification_device dev;
+ u8 buf[512];
+ } *local;
+ struct ps3_notify_cmd *notify_cmd;
+ struct ps3_notify_event *notify_event;
int res;
unsigned int irq;
u64 lpar;
- void *buf;
- struct ps3_notify_cmd *notify_cmd;
- struct ps3_notify_event *notify_event;
pr_debug(" -> %s:%u: kthread started\n", __func__, __LINE__);
- buf = kzalloc(512, GFP_KERNEL);
- if (!buf)
+ local = kzalloc(sizeof(*local), GFP_KERNEL);
+ if (!local)
return -ENOMEM;
- lpar = ps3_mm_phys_to_lpar(__pa(buf));
- notify_cmd = buf;
- notify_event = buf;
+ lpar = ps3_mm_phys_to_lpar(__pa(&local->buf));
+ notify_cmd = (struct ps3_notify_cmd *)&local->buf;
+ notify_event = (struct ps3_notify_event *)&local->buf;
/* dummy system bus device */
- dev.sbd.bus_id = (u64)data;
- dev.sbd.dev_id = PS3_NOTIFICATION_DEV_ID;
- dev.sbd.interrupt_id = PS3_NOTIFICATION_INTERRUPT_ID;
+ local->dev.sbd.bus_id = (u64)data;
+ local->dev.sbd.dev_id = PS3_NOTIFICATION_DEV_ID;
+ local->dev.sbd.interrupt_id = PS3_NOTIFICATION_INTERRUPT_ID;
- res = lv1_open_device(dev.sbd.bus_id, dev.sbd.dev_id, 0);
+ res = lv1_open_device(local->dev.sbd.bus_id, local->dev.sbd.dev_id, 0);
if (res) {
pr_err("%s:%u: lv1_open_device failed %s\n", __func__,
__LINE__, ps3_result(res));
goto fail_free;
}
- res = ps3_sb_event_receive_port_setup(&dev.sbd, PS3_BINDING_CPU_ANY,
- &irq);
+ res = ps3_sb_event_receive_port_setup(&local->dev.sbd,
+ PS3_BINDING_CPU_ANY, &irq);
if (res) {
pr_err("%s:%u: ps3_sb_event_receive_port_setup failed %d\n",
__func__, __LINE__, res);
goto fail_close_device;
}
- spin_lock_init(&dev.lock);
- rcuwait_init(&dev.wait);
+ spin_lock_init(&local->dev.lock);
+ rcuwait_init(&local->dev.wait);
res = request_irq(irq, ps3_notification_interrupt, 0,
- "ps3_notification", &dev);
+ "ps3_notification", &local->dev);
if (res) {
pr_err("%s:%u: request_irq failed %d\n", __func__, __LINE__,
res);
@@ -823,7 +825,7 @@ static int ps3_probe_thread(void *data)
notify_cmd->operation_code = 0; /* must be zero */
notify_cmd->event_mask = 1UL << notify_region_probe;
- res = ps3_notification_read_write(&dev, lpar, 1);
+ res = ps3_notification_read_write(&local->dev, lpar, 1);
if (res)
goto fail_free_irq;
@@ -834,36 +836,37 @@ static int ps3_probe_thread(void *data)
memset(notify_event, 0, sizeof(*notify_event));
- res = ps3_notification_read_write(&dev, lpar, 0);
+ res = ps3_notification_read_write(&local->dev, lpar, 0);
if (res)
break;
pr_debug("%s:%u: notify event type 0x%llx bus id %llu dev id %llu"
" type %llu port %llu\n", __func__, __LINE__,
- notify_event->event_type, notify_event->bus_id,
- notify_event->dev_id, notify_event->dev_type,
- notify_event->dev_port);
+ notify_event->event_type, notify_event->bus_id,
+ notify_event->dev_id, notify_event->dev_type,
+ notify_event->dev_port);
if (notify_event->event_type != notify_region_probe ||
- notify_event->bus_id != dev.sbd.bus_id) {
+ notify_event->bus_id != local->dev.sbd.bus_id) {
pr_warn("%s:%u: bad notify_event: event %llu, dev_id %llu, dev_type %llu\n",
__func__, __LINE__, notify_event->event_type,
notify_event->dev_id, notify_event->dev_type);
continue;
}
- ps3_find_and_add_device(dev.sbd.bus_id, notify_event->dev_id);
+ ps3_find_and_add_device(local->dev.sbd.bus_id,
+ notify_event->dev_id);
} while (!kthread_should_stop());
fail_free_irq:
- free_irq(irq, &dev);
+ free_irq(irq, &local->dev);
fail_sb_event_receive_port_destroy:
- ps3_sb_event_receive_port_destroy(&dev.sbd, irq);
+ ps3_sb_event_receive_port_destroy(&local->dev.sbd, irq);
fail_close_device:
- lv1_close_device(dev.sbd.bus_id, dev.sbd.dev_id);
+ lv1_close_device(local->dev.sbd.bus_id, local->dev.sbd.dev_id);
fail_free:
- kfree(buf);
+ kfree(local);
probe_task = NULL;
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index d6b5f5ecd515..56dc6b29a3e7 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -695,7 +695,7 @@ static const struct dma_map_ops ps3_sb_dma_ops = {
.unmap_page = ps3_unmap_page,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
- .alloc_pages = dma_common_alloc_pages,
+ .alloc_pages_op = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
};
@@ -709,7 +709,7 @@ static const struct dma_map_ops ps3_ioc0_dma_ops = {
.unmap_page = ps3_unmap_page,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
- .alloc_pages = dma_common_alloc_pages,
+ .alloc_pages_op = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
};
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index f936962a2946..7bf506f6b8c8 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
ccflags-$(CONFIG_PPC_PSERIES_DEBUG) += -DDEBUG
obj-y := lpar.o hvCall.o nvram.o reconfig.o \
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index e8c4129697b1..b1e6d275cda9 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -786,8 +786,16 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
* parent bus. During reboot, there will be ibm,dma-window property to
* define DMA window. For kdump, there will at least be default window or DDW
* or both.
+ * There is an exception to the above. In case the PE goes into frozen
+ * state, firmware may not provide ibm,dma-window property at the time
+ * of LPAR boot up.
*/
+ if (!pdn) {
+ pr_debug(" no ibm,dma-window property !\n");
+ return;
+ }
+
ppci = PCI_DN(pdn);
pr_debug(" parent is %pOF, iommu_table: 0x%p\n",
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 4e9916bb03d7..c1d8bee8f701 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -1886,10 +1886,10 @@ out:
* h_get_mpp
* H_GET_MPP hcall returns info in 7 parms
*/
-int h_get_mpp(struct hvcall_mpp_data *mpp_data)
+long h_get_mpp(struct hvcall_mpp_data *mpp_data)
{
- int rc;
- unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
+ long rc;
rc = plpar_hcall9(H_GET_MPP, retbuf);
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
index f73c4d1c26af..6e7029640c0c 100644
--- a/arch/powerpc/platforms/pseries/lparcfg.c
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -113,8 +113,8 @@ struct hvcall_ppp_data {
*/
static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data)
{
- unsigned long rc;
- unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
+ long rc;
rc = plpar_hcall9(H_GET_PPP, retbuf);
@@ -170,20 +170,24 @@ out:
kfree(buf);
}
-static unsigned h_pic(unsigned long *pool_idle_time,
- unsigned long *num_procs)
+static long h_pic(unsigned long *pool_idle_time,
+ unsigned long *num_procs)
{
- unsigned long rc;
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = {0};
rc = plpar_hcall(H_PIC, retbuf);
- *pool_idle_time = retbuf[0];
- *num_procs = retbuf[1];
+ if (pool_idle_time)
+ *pool_idle_time = retbuf[0];
+ if (num_procs)
+ *num_procs = retbuf[1];
return rc;
}
+unsigned long boot_pool_idle_time;
+
/*
* parse_ppp_data
* Parse out the data returned from h_get_ppp and h_pic
@@ -193,7 +197,7 @@ static void parse_ppp_data(struct seq_file *m)
struct hvcall_ppp_data ppp_data;
struct device_node *root;
const __be32 *perf_level;
- int rc;
+ long rc;
rc = h_get_ppp(&ppp_data);
if (rc)
@@ -215,9 +219,15 @@ static void parse_ppp_data(struct seq_file *m)
seq_printf(m, "pool_capacity=%d\n",
ppp_data.active_procs_in_pool * 100);
- h_pic(&pool_idle_time, &pool_procs);
- seq_printf(m, "pool_idle_time=%ld\n", pool_idle_time);
- seq_printf(m, "pool_num_procs=%ld\n", pool_procs);
+ /* In case h_pic call is not successful, this would result in
+ * APP values being wrong in tools like lparstat.
+ */
+
+ if (h_pic(&pool_idle_time, &pool_procs) == H_SUCCESS) {
+ seq_printf(m, "pool_idle_time=%ld\n", pool_idle_time);
+ seq_printf(m, "pool_num_procs=%ld\n", pool_procs);
+ seq_printf(m, "boot_pool_idle_time=%ld\n", boot_pool_idle_time);
+ }
}
seq_printf(m, "unallocated_capacity_weight=%d\n",
@@ -792,6 +802,7 @@ static const struct proc_ops lparcfg_proc_ops = {
static int __init lparcfg_init(void)
{
umode_t mode = 0444;
+ long retval;
/* Allow writing if we have FW_FEATURE_SPLPAR */
if (firmware_has_feature(FW_FEATURE_SPLPAR))
@@ -801,6 +812,16 @@ static int __init lparcfg_init(void)
printk(KERN_ERR "Failed to create powerpc/lparcfg\n");
return -EIO;
}
+
+ /* If this call fails, it would result in APP values
+ * being wrong for since boot reports of lparstat
+ */
+ retval = h_pic(&boot_pool_idle_time, NULL);
+
+ if (retval != H_SUCCESS)
+ pr_debug("H_PIC failed during lparcfg init retval: %ld\n",
+ retval);
+
return 0;
}
machine_device_initcall(pseries, lparcfg_init);
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index c233f9db039b..9b6420eb3567 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -16,7 +16,8 @@
#include <linux/nd.h>
#include <asm/plpar_wrappers.h>
-#include <asm/papr_pdsm.h>
+#include <uapi/linux/papr_pdsm.h>
+#include <linux/papr_scm.h>
#include <asm/mce.h>
#include <asm/unaligned.h>
#include <linux/perf_event.h>
@@ -29,46 +30,6 @@
(1ul << ND_CMD_SET_CONFIG_DATA) | \
(1ul << ND_CMD_CALL))
-/* DIMM health bitmap indicators */
-/* SCM device is unable to persist memory contents */
-#define PAPR_PMEM_UNARMED (1ULL << (63 - 0))
-/* SCM device failed to persist memory contents */
-#define PAPR_PMEM_SHUTDOWN_DIRTY (1ULL << (63 - 1))
-/* SCM device contents are persisted from previous IPL */
-#define PAPR_PMEM_SHUTDOWN_CLEAN (1ULL << (63 - 2))
-/* SCM device contents are not persisted from previous IPL */
-#define PAPR_PMEM_EMPTY (1ULL << (63 - 3))
-/* SCM device memory life remaining is critically low */
-#define PAPR_PMEM_HEALTH_CRITICAL (1ULL << (63 - 4))
-/* SCM device will be garded off next IPL due to failure */
-#define PAPR_PMEM_HEALTH_FATAL (1ULL << (63 - 5))
-/* SCM contents cannot persist due to current platform health status */
-#define PAPR_PMEM_HEALTH_UNHEALTHY (1ULL << (63 - 6))
-/* SCM device is unable to persist memory contents in certain conditions */
-#define PAPR_PMEM_HEALTH_NON_CRITICAL (1ULL << (63 - 7))
-/* SCM device is encrypted */
-#define PAPR_PMEM_ENCRYPTED (1ULL << (63 - 8))
-/* SCM device has been scrubbed and locked */
-#define PAPR_PMEM_SCRUBBED_AND_LOCKED (1ULL << (63 - 9))
-
-/* Bits status indicators for health bitmap indicating unarmed dimm */
-#define PAPR_PMEM_UNARMED_MASK (PAPR_PMEM_UNARMED | \
- PAPR_PMEM_HEALTH_UNHEALTHY)
-
-/* Bits status indicators for health bitmap indicating unflushed dimm */
-#define PAPR_PMEM_BAD_SHUTDOWN_MASK (PAPR_PMEM_SHUTDOWN_DIRTY)
-
-/* Bits status indicators for health bitmap indicating unrestored dimm */
-#define PAPR_PMEM_BAD_RESTORE_MASK (PAPR_PMEM_EMPTY)
-
-/* Bit status indicators for smart event notification */
-#define PAPR_PMEM_SMART_EVENT_MASK (PAPR_PMEM_HEALTH_CRITICAL | \
- PAPR_PMEM_HEALTH_FATAL | \
- PAPR_PMEM_HEALTH_UNHEALTHY)
-
-#define PAPR_SCM_PERF_STATS_EYECATCHER __stringify(SCMSTATS)
-#define PAPR_SCM_PERF_STATS_VERSION 0x1
-
/* Struct holding a single performance metric */
struct papr_scm_perf_stat {
u8 stat_id[8];
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 1772ae3d193d..6dbc73eb2ca2 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -18,33 +18,6 @@
#include <asm/pci.h>
#include "pseries.h"
-#if 0
-void pcibios_name_device(struct pci_dev *dev)
-{
- struct device_node *dn;
-
- /*
- * Add IBM loc code (slot) as a prefix to the device names for service
- */
- dn = pci_device_to_OF_node(dev);
- if (dn) {
- const char *loc_code = of_get_property(dn, "ibm,loc-code",
- NULL);
- if (loc_code) {
- int loc_len = strlen(loc_code);
- if (loc_len < sizeof(dev->dev.name)) {
- memmove(dev->dev.name+loc_len+1, dev->dev.name,
- sizeof(dev->dev.name)-loc_len-1);
- memcpy(dev->dev.name, loc_code, loc_len);
- dev->dev.name[loc_len] = ' ';
- dev->dev.name[sizeof(dev->dev.name)-1] = '\0';
- }
- }
- }
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_name_device);
-#endif
-
#ifdef CONFIG_PCI_IOV
#define MAX_VFS_FOR_MAP_PE 256
struct pe_map_bar_entry {
diff --git a/arch/powerpc/platforms/pseries/plpks.c b/arch/powerpc/platforms/pseries/plpks.c
index febe18f251d0..4a595493d28a 100644
--- a/arch/powerpc/platforms/pseries/plpks.c
+++ b/arch/powerpc/platforms/pseries/plpks.c
@@ -415,8 +415,7 @@ static int plpks_confirm_object_flushed(struct label *label,
break;
}
- usleep_range(PLPKS_FLUSH_SLEEP,
- PLPKS_FLUSH_SLEEP + PLPKS_FLUSH_SLEEP_RANGE);
+ fsleep(PLPKS_FLUSH_SLEEP);
timeout = timeout + PLPKS_FLUSH_SLEEP;
} while (timeout < PLPKS_MAX_TIMEOUT);
@@ -464,9 +463,10 @@ int plpks_signed_update_var(struct plpks_var *var, u64 flags)
continuetoken = retbuf[0];
if (pseries_status_to_err(rc) == -EBUSY) {
- int delay_ms = get_longbusy_msecs(rc);
- mdelay(delay_ms);
- timeout += delay_ms;
+ int delay_us = get_longbusy_msecs(rc) * 1000;
+
+ fsleep(delay_us);
+ timeout += delay_us;
}
rc = pseries_status_to_err(rc);
} while (rc == -EBUSY && timeout < PLPKS_MAX_TIMEOUT);
diff --git a/arch/powerpc/platforms/pseries/rtas-fadump.c b/arch/powerpc/platforms/pseries/rtas-fadump.c
index b5853e9fcc3c..eceb3289383e 100644
--- a/arch/powerpc/platforms/pseries/rtas-fadump.c
+++ b/arch/powerpc/platforms/pseries/rtas-fadump.c
@@ -18,6 +18,7 @@
#include <asm/page.h>
#include <asm/rtas.h>
+#include <asm/setup.h>
#include <asm/fadump.h>
#include <asm/fadump-internal.h>
@@ -29,9 +30,6 @@ static const struct rtas_fadump_mem_struct *fdm_active;
static void rtas_fadump_update_config(struct fw_dump *fadump_conf,
const struct rtas_fadump_mem_struct *fdm)
{
- fadump_conf->boot_mem_dest_addr =
- be64_to_cpu(fdm->rmr_region.destination_address);
-
fadump_conf->fadumphdr_addr = (fadump_conf->boot_mem_dest_addr +
fadump_conf->boot_memory_size);
}
@@ -43,20 +41,56 @@ static void rtas_fadump_update_config(struct fw_dump *fadump_conf,
static void __init rtas_fadump_get_config(struct fw_dump *fadump_conf,
const struct rtas_fadump_mem_struct *fdm)
{
- fadump_conf->boot_mem_addr[0] =
- be64_to_cpu(fdm->rmr_region.source_address);
- fadump_conf->boot_mem_sz[0] = be64_to_cpu(fdm->rmr_region.source_len);
- fadump_conf->boot_memory_size = fadump_conf->boot_mem_sz[0];
+ unsigned long base, size, last_end, hole_size;
- fadump_conf->boot_mem_top = fadump_conf->boot_memory_size;
- fadump_conf->boot_mem_regs_cnt = 1;
+ last_end = 0;
+ hole_size = 0;
+ fadump_conf->boot_memory_size = 0;
+ fadump_conf->boot_mem_regs_cnt = 0;
+ pr_debug("Boot memory regions:\n");
+ for (int i = 0; i < be16_to_cpu(fdm->header.dump_num_sections); i++) {
+ int type = be16_to_cpu(fdm->rgn[i].source_data_type);
+ u64 addr;
- /*
- * Start address of reserve dump area (permanent reservation) for
- * re-registering FADump after dump capture.
- */
- fadump_conf->reserve_dump_area_start =
- be64_to_cpu(fdm->cpu_state_data.destination_address);
+ switch (type) {
+ case RTAS_FADUMP_CPU_STATE_DATA:
+ addr = be64_to_cpu(fdm->rgn[i].destination_address);
+
+ fadump_conf->cpu_state_dest_vaddr = (u64)__va(addr);
+ /*
+ * Start address of reserve dump area (permanent reservation) for
+ * re-registering FADump after dump capture.
+ */
+ fadump_conf->reserve_dump_area_start = addr;
+ break;
+ case RTAS_FADUMP_HPTE_REGION:
+ /* Not processed currently. */
+ break;
+ case RTAS_FADUMP_REAL_MODE_REGION:
+ base = be64_to_cpu(fdm->rgn[i].source_address);
+ size = be64_to_cpu(fdm->rgn[i].source_len);
+ pr_debug("\t[%03d] base: 0x%lx, size: 0x%lx\n", i, base, size);
+ if (!base) {
+ fadump_conf->boot_mem_dest_addr =
+ be64_to_cpu(fdm->rgn[i].destination_address);
+ }
+
+ fadump_conf->boot_mem_addr[fadump_conf->boot_mem_regs_cnt] = base;
+ fadump_conf->boot_mem_sz[fadump_conf->boot_mem_regs_cnt] = size;
+ fadump_conf->boot_memory_size += size;
+ hole_size += (base - last_end);
+ last_end = base + size;
+ fadump_conf->boot_mem_regs_cnt++;
+ break;
+ case RTAS_FADUMP_PARAM_AREA:
+ fadump_conf->param_area = be64_to_cpu(fdm->rgn[i].destination_address);
+ break;
+ default:
+ pr_warn("Section type %d unsupported on this kernel. Ignoring!\n", type);
+ break;
+ }
+ }
+ fadump_conf->boot_mem_top = fadump_conf->boot_memory_size + hole_size;
rtas_fadump_update_config(fadump_conf, fdm);
}
@@ -64,16 +98,15 @@ static void __init rtas_fadump_get_config(struct fw_dump *fadump_conf,
static u64 rtas_fadump_init_mem_struct(struct fw_dump *fadump_conf)
{
u64 addr = fadump_conf->reserve_dump_area_start;
+ u16 sec_cnt = 0;
memset(&fdm, 0, sizeof(struct rtas_fadump_mem_struct));
addr = addr & PAGE_MASK;
fdm.header.dump_format_version = cpu_to_be32(0x00000001);
- fdm.header.dump_num_sections = cpu_to_be16(3);
fdm.header.dump_status_flag = 0;
fdm.header.offset_first_dump_section =
- cpu_to_be32((u32)offsetof(struct rtas_fadump_mem_struct,
- cpu_state_data));
+ cpu_to_be32((u32)offsetof(struct rtas_fadump_mem_struct, rgn));
/*
* Fields for disk dump option.
@@ -89,25 +122,22 @@ static u64 rtas_fadump_init_mem_struct(struct fw_dump *fadump_conf)
/* Kernel dump sections */
/* cpu state data section. */
- fdm.cpu_state_data.request_flag =
- cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG);
- fdm.cpu_state_data.source_data_type =
- cpu_to_be16(RTAS_FADUMP_CPU_STATE_DATA);
- fdm.cpu_state_data.source_address = 0;
- fdm.cpu_state_data.source_len =
- cpu_to_be64(fadump_conf->cpu_state_data_size);
- fdm.cpu_state_data.destination_address = cpu_to_be64(addr);
+ fdm.rgn[sec_cnt].request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG);
+ fdm.rgn[sec_cnt].source_data_type = cpu_to_be16(RTAS_FADUMP_CPU_STATE_DATA);
+ fdm.rgn[sec_cnt].source_address = 0;
+ fdm.rgn[sec_cnt].source_len = cpu_to_be64(fadump_conf->cpu_state_data_size);
+ fdm.rgn[sec_cnt].destination_address = cpu_to_be64(addr);
addr += fadump_conf->cpu_state_data_size;
+ sec_cnt++;
/* hpte region section */
- fdm.hpte_region.request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG);
- fdm.hpte_region.source_data_type =
- cpu_to_be16(RTAS_FADUMP_HPTE_REGION);
- fdm.hpte_region.source_address = 0;
- fdm.hpte_region.source_len =
- cpu_to_be64(fadump_conf->hpte_region_size);
- fdm.hpte_region.destination_address = cpu_to_be64(addr);
+ fdm.rgn[sec_cnt].request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG);
+ fdm.rgn[sec_cnt].source_data_type = cpu_to_be16(RTAS_FADUMP_HPTE_REGION);
+ fdm.rgn[sec_cnt].source_address = 0;
+ fdm.rgn[sec_cnt].source_len = cpu_to_be64(fadump_conf->hpte_region_size);
+ fdm.rgn[sec_cnt].destination_address = cpu_to_be64(addr);
addr += fadump_conf->hpte_region_size;
+ sec_cnt++;
/*
* Align boot memory area destination address to page boundary to
@@ -115,14 +145,29 @@ static u64 rtas_fadump_init_mem_struct(struct fw_dump *fadump_conf)
*/
addr = PAGE_ALIGN(addr);
- /* RMA region section */
- fdm.rmr_region.request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG);
- fdm.rmr_region.source_data_type =
- cpu_to_be16(RTAS_FADUMP_REAL_MODE_REGION);
- fdm.rmr_region.source_address = cpu_to_be64(0);
- fdm.rmr_region.source_len = cpu_to_be64(fadump_conf->boot_memory_size);
- fdm.rmr_region.destination_address = cpu_to_be64(addr);
- addr += fadump_conf->boot_memory_size;
+ /* First boot memory region destination address */
+ fadump_conf->boot_mem_dest_addr = addr;
+ for (int i = 0; i < fadump_conf->boot_mem_regs_cnt; i++) {
+ /* Boot memory regions */
+ fdm.rgn[sec_cnt].request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG);
+ fdm.rgn[sec_cnt].source_data_type = cpu_to_be16(RTAS_FADUMP_REAL_MODE_REGION);
+ fdm.rgn[sec_cnt].source_address = cpu_to_be64(fadump_conf->boot_mem_addr[i]);
+ fdm.rgn[sec_cnt].source_len = cpu_to_be64(fadump_conf->boot_mem_sz[i]);
+ fdm.rgn[sec_cnt].destination_address = cpu_to_be64(addr);
+ addr += fadump_conf->boot_mem_sz[i];
+ sec_cnt++;
+ }
+
+ /* Parameters area */
+ if (fadump_conf->param_area) {
+ fdm.rgn[sec_cnt].request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG);
+ fdm.rgn[sec_cnt].source_data_type = cpu_to_be16(RTAS_FADUMP_PARAM_AREA);
+ fdm.rgn[sec_cnt].source_address = cpu_to_be64(fadump_conf->param_area);
+ fdm.rgn[sec_cnt].source_len = cpu_to_be64(COMMAND_LINE_SIZE);
+ fdm.rgn[sec_cnt].destination_address = cpu_to_be64(fadump_conf->param_area);
+ sec_cnt++;
+ }
+ fdm.header.dump_num_sections = cpu_to_be16(sec_cnt);
rtas_fadump_update_config(fadump_conf, &fdm);
@@ -136,14 +181,21 @@ static u64 rtas_fadump_get_bootmem_min(void)
static int rtas_fadump_register(struct fw_dump *fadump_conf)
{
- unsigned int wait_time;
+ unsigned int wait_time, fdm_size;
int rc, err = -EIO;
+ /*
+ * Platform requires the exact size of the Dump Memory Structure.
+ * Avoid including any unused rgns in the calculation, as this
+ * could result in a parameter error (-3) from the platform.
+ */
+ fdm_size = sizeof(struct rtas_fadump_section_header);
+ fdm_size += be16_to_cpu(fdm.header.dump_num_sections) * sizeof(struct rtas_fadump_section);
+
/* TODO: Add upper time limit for the delay */
do {
rc = rtas_call(fadump_conf->ibm_configure_kernel_dump, 3, 1,
- NULL, FADUMP_REGISTER, &fdm,
- sizeof(struct rtas_fadump_mem_struct));
+ NULL, FADUMP_REGISTER, &fdm, fdm_size);
wait_time = rtas_busy_delay_time(rc);
if (wait_time)
@@ -161,9 +213,7 @@ static int rtas_fadump_register(struct fw_dump *fadump_conf)
pr_err("Failed to register. Hardware Error(%d).\n", rc);
break;
case -3:
- if (!is_fadump_boot_mem_contiguous())
- pr_err("Can't have holes in boot memory area.\n");
- else if (!is_fadump_reserved_mem_contiguous())
+ if (!is_fadump_reserved_mem_contiguous())
pr_err("Can't have holes in reserved memory area.\n");
pr_err("Failed to register. Parameter Error(%d).\n", rc);
@@ -316,11 +366,9 @@ static int __init rtas_fadump_build_cpu_notes(struct fw_dump *fadump_conf)
u32 num_cpus, *note_buf;
int i, rc = 0, cpu = 0;
struct pt_regs regs;
- unsigned long addr;
void *vaddr;
- addr = be64_to_cpu(fdm_active->cpu_state_data.destination_address);
- vaddr = __va(addr);
+ vaddr = (void *)fadump_conf->cpu_state_dest_vaddr;
reg_header = vaddr;
if (be64_to_cpu(reg_header->magic_number) !=
@@ -375,11 +423,8 @@ static int __init rtas_fadump_build_cpu_notes(struct fw_dump *fadump_conf)
}
final_note(note_buf);
- if (fdh) {
- pr_debug("Updating elfcore header (%llx) with cpu notes\n",
- fdh->elfcorehdr_addr);
- fadump_update_elfcore_header(__va(fdh->elfcorehdr_addr));
- }
+ pr_debug("Updating elfcore header (%llx) with cpu notes\n", fadump_conf->elfcorehdr_addr);
+ fadump_update_elfcore_header((char *)fadump_conf->elfcorehdr_addr);
return 0;
error_out:
@@ -389,57 +434,66 @@ error_out:
}
/*
- * Validate and process the dump data stored by firmware before exporting
- * it through '/proc/vmcore'.
+ * Validate and process the dump data stored by the firmware, and update
+ * the CPU notes of elfcorehdr.
*/
static int __init rtas_fadump_process(struct fw_dump *fadump_conf)
{
- struct fadump_crash_info_header *fdh;
- int rc = 0;
-
if (!fdm_active || !fadump_conf->fadumphdr_addr)
return -EINVAL;
/* Check if the dump data is valid. */
- if ((be16_to_cpu(fdm_active->header.dump_status_flag) ==
- RTAS_FADUMP_ERROR_FLAG) ||
- (fdm_active->cpu_state_data.error_flags != 0) ||
- (fdm_active->rmr_region.error_flags != 0)) {
- pr_err("Dump taken by platform is not valid\n");
- return -EINVAL;
- }
- if ((fdm_active->rmr_region.bytes_dumped !=
- fdm_active->rmr_region.source_len) ||
- !fdm_active->cpu_state_data.bytes_dumped) {
- pr_err("Dump taken by platform is incomplete\n");
- return -EINVAL;
- }
+ for (int i = 0; i < be16_to_cpu(fdm_active->header.dump_num_sections); i++) {
+ int type = be16_to_cpu(fdm_active->rgn[i].source_data_type);
+ int rc = 0;
- /* Validate the fadump crash info header */
- fdh = __va(fadump_conf->fadumphdr_addr);
- if (fdh->magic_number != FADUMP_CRASH_INFO_MAGIC) {
- pr_err("Crash info header is not valid.\n");
- return -EINVAL;
+ switch (type) {
+ case RTAS_FADUMP_CPU_STATE_DATA:
+ case RTAS_FADUMP_HPTE_REGION:
+ case RTAS_FADUMP_REAL_MODE_REGION:
+ if (fdm_active->rgn[i].error_flags != 0) {
+ pr_err("Dump taken by platform is not valid (%d)\n", i);
+ rc = -EINVAL;
+ }
+ if (fdm_active->rgn[i].bytes_dumped != fdm_active->rgn[i].source_len) {
+ pr_err("Dump taken by platform is incomplete (%d)\n", i);
+ rc = -EINVAL;
+ }
+ if (rc) {
+ pr_warn("Region type: %u src addr: 0x%llx dest addr: 0x%llx\n",
+ be16_to_cpu(fdm_active->rgn[i].source_data_type),
+ be64_to_cpu(fdm_active->rgn[i].source_address),
+ be64_to_cpu(fdm_active->rgn[i].destination_address));
+ return rc;
+ }
+ break;
+ case RTAS_FADUMP_PARAM_AREA:
+ if (fdm_active->rgn[i].bytes_dumped != fdm_active->rgn[i].source_len ||
+ fdm_active->rgn[i].error_flags != 0) {
+ pr_warn("Failed to process additional parameters! Proceeding anyway..\n");
+ fadump_conf->param_area = 0;
+ }
+ break;
+ default:
+ /*
+ * If the first/crashed kernel added a new region type that the
+ * second/fadump kernel doesn't recognize, skip it and process
+ * assuming backward compatibility.
+ */
+ pr_warn("Unknown region found: type: %u src addr: 0x%llx dest addr: 0x%llx\n",
+ be16_to_cpu(fdm_active->rgn[i].source_data_type),
+ be64_to_cpu(fdm_active->rgn[i].source_address),
+ be64_to_cpu(fdm_active->rgn[i].destination_address));
+ break;
+ }
}
- rc = rtas_fadump_build_cpu_notes(fadump_conf);
- if (rc)
- return rc;
-
- /*
- * We are done validating dump info and elfcore header is now ready
- * to be exported. set elfcorehdr_addr so that vmcore module will
- * export the elfcore header through '/proc/vmcore'.
- */
- elfcorehdr_addr = fdh->elfcorehdr_addr;
-
- return 0;
+ return rtas_fadump_build_cpu_notes(fadump_conf);
}
static void rtas_fadump_region_show(struct fw_dump *fadump_conf,
struct seq_file *m)
{
- const struct rtas_fadump_section *cpu_data_section;
const struct rtas_fadump_mem_struct *fdm_ptr;
if (fdm_active)
@@ -447,27 +501,49 @@ static void rtas_fadump_region_show(struct fw_dump *fadump_conf,
else
fdm_ptr = &fdm;
- cpu_data_section = &(fdm_ptr->cpu_state_data);
- seq_printf(m, "CPU :[%#016llx-%#016llx] %#llx bytes, Dumped: %#llx\n",
- be64_to_cpu(cpu_data_section->destination_address),
- be64_to_cpu(cpu_data_section->destination_address) +
- be64_to_cpu(cpu_data_section->source_len) - 1,
- be64_to_cpu(cpu_data_section->source_len),
- be64_to_cpu(cpu_data_section->bytes_dumped));
-
- seq_printf(m, "HPTE:[%#016llx-%#016llx] %#llx bytes, Dumped: %#llx\n",
- be64_to_cpu(fdm_ptr->hpte_region.destination_address),
- be64_to_cpu(fdm_ptr->hpte_region.destination_address) +
- be64_to_cpu(fdm_ptr->hpte_region.source_len) - 1,
- be64_to_cpu(fdm_ptr->hpte_region.source_len),
- be64_to_cpu(fdm_ptr->hpte_region.bytes_dumped));
-
- seq_printf(m, "DUMP: Src: %#016llx, Dest: %#016llx, ",
- be64_to_cpu(fdm_ptr->rmr_region.source_address),
- be64_to_cpu(fdm_ptr->rmr_region.destination_address));
- seq_printf(m, "Size: %#llx, Dumped: %#llx bytes\n",
- be64_to_cpu(fdm_ptr->rmr_region.source_len),
- be64_to_cpu(fdm_ptr->rmr_region.bytes_dumped));
+
+ for (int i = 0; i < be16_to_cpu(fdm_ptr->header.dump_num_sections); i++) {
+ int type = be16_to_cpu(fdm_ptr->rgn[i].source_data_type);
+
+ switch (type) {
+ case RTAS_FADUMP_CPU_STATE_DATA:
+ seq_printf(m, "CPU :[%#016llx-%#016llx] %#llx bytes, Dumped: %#llx\n",
+ be64_to_cpu(fdm_ptr->rgn[i].destination_address),
+ be64_to_cpu(fdm_ptr->rgn[i].destination_address) +
+ be64_to_cpu(fdm_ptr->rgn[i].source_len) - 1,
+ be64_to_cpu(fdm_ptr->rgn[i].source_len),
+ be64_to_cpu(fdm_ptr->rgn[i].bytes_dumped));
+ break;
+ case RTAS_FADUMP_HPTE_REGION:
+ seq_printf(m, "HPTE:[%#016llx-%#016llx] %#llx bytes, Dumped: %#llx\n",
+ be64_to_cpu(fdm_ptr->rgn[i].destination_address),
+ be64_to_cpu(fdm_ptr->rgn[i].destination_address) +
+ be64_to_cpu(fdm_ptr->rgn[i].source_len) - 1,
+ be64_to_cpu(fdm_ptr->rgn[i].source_len),
+ be64_to_cpu(fdm_ptr->rgn[i].bytes_dumped));
+ break;
+ case RTAS_FADUMP_REAL_MODE_REGION:
+ seq_printf(m, "DUMP: Src: %#016llx, Dest: %#016llx, ",
+ be64_to_cpu(fdm_ptr->rgn[i].source_address),
+ be64_to_cpu(fdm_ptr->rgn[i].destination_address));
+ seq_printf(m, "Size: %#llx, Dumped: %#llx bytes\n",
+ be64_to_cpu(fdm_ptr->rgn[i].source_len),
+ be64_to_cpu(fdm_ptr->rgn[i].bytes_dumped));
+ break;
+ case RTAS_FADUMP_PARAM_AREA:
+ seq_printf(m, "\n[%#016llx-%#016llx]: cmdline append: '%s'\n",
+ be64_to_cpu(fdm_ptr->rgn[i].destination_address),
+ be64_to_cpu(fdm_ptr->rgn[i].destination_address) +
+ be64_to_cpu(fdm_ptr->rgn[i].source_len) - 1,
+ (char *)__va(be64_to_cpu(fdm_ptr->rgn[i].destination_address)));
+ break;
+ default:
+ seq_printf(m, "Unknown region type %d : Src: %#016llx, Dest: %#016llx, ",
+ type, be64_to_cpu(fdm_ptr->rgn[i].source_address),
+ be64_to_cpu(fdm_ptr->rgn[i].destination_address));
+ break;
+ }
+ }
/* Dump is active. Show preserved area start address. */
if (fdm_active) {
@@ -483,6 +559,20 @@ static void rtas_fadump_trigger(struct fadump_crash_info_header *fdh,
rtas_os_term((char *)msg);
}
+/* FADUMP_MAX_MEM_REGS or lower */
+static int rtas_fadump_max_boot_mem_rgns(void)
+{
+ /*
+ * Version 1 of Kernel Assisted Dump Memory Structure (PAPR) supports 10 sections.
+ * With one each section taken for CPU state data & HPTE respectively, 8 sections
+ * can be used for boot memory regions.
+ *
+ * If new region(s) is(are) defined, maximum boot memory regions will decrease
+ * proportionally.
+ */
+ return RTAS_FADUMP_MAX_BOOT_MEM_REGS;
+}
+
static struct fadump_ops rtas_fadump_ops = {
.fadump_init_mem_struct = rtas_fadump_init_mem_struct,
.fadump_get_bootmem_min = rtas_fadump_get_bootmem_min,
@@ -492,6 +582,7 @@ static struct fadump_ops rtas_fadump_ops = {
.fadump_process = rtas_fadump_process,
.fadump_region_show = rtas_fadump_region_show,
.fadump_trigger = rtas_fadump_trigger,
+ .fadump_max_boot_mem_rgns = rtas_fadump_max_boot_mem_rgns,
};
void __init rtas_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
@@ -508,9 +599,10 @@ void __init rtas_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
if (!token)
return;
- fadump_conf->ibm_configure_kernel_dump = be32_to_cpu(*token);
- fadump_conf->ops = &rtas_fadump_ops;
- fadump_conf->fadump_supported = 1;
+ fadump_conf->ibm_configure_kernel_dump = be32_to_cpu(*token);
+ fadump_conf->ops = &rtas_fadump_ops;
+ fadump_conf->fadump_supported = 1;
+ fadump_conf->param_area_supported = 1;
/* Firmware supports 64-bit value for size, align it to pagesize. */
fadump_conf->max_copy_size = ALIGN_DOWN(U64_MAX, PAGE_SIZE);
diff --git a/arch/powerpc/platforms/pseries/rtas-fadump.h b/arch/powerpc/platforms/pseries/rtas-fadump.h
index fd59bd7ca9c3..c109abf6befd 100644
--- a/arch/powerpc/platforms/pseries/rtas-fadump.h
+++ b/arch/powerpc/platforms/pseries/rtas-fadump.h
@@ -23,12 +23,24 @@
#define RTAS_FADUMP_HPTE_REGION 0x0002
#define RTAS_FADUMP_REAL_MODE_REGION 0x0011
+/* OS defined sections */
+#define RTAS_FADUMP_PARAM_AREA 0x0100
+
/* Dump request flag */
#define RTAS_FADUMP_REQUEST_FLAG 0x00000001
/* Dump status flag */
#define RTAS_FADUMP_ERROR_FLAG 0x2000
+/*
+ * The Firmware Assisted Dump Memory structure supports a maximum of 10 sections
+ * in the dump memory structure. Presently, three sections are used for
+ * CPU state data, HPTE & Parameters area, while the remaining seven sections
+ * can be used for boot memory regions.
+ */
+#define MAX_SECTIONS 10
+#define RTAS_FADUMP_MAX_BOOT_MEM_REGS 7
+
/* Kernel Dump section info */
struct rtas_fadump_section {
__be32 request_flag;
@@ -61,20 +73,15 @@ struct rtas_fadump_section_header {
* Firmware Assisted dump memory structure. This structure is required for
* registering future kernel dump with power firmware through rtas call.
*
- * No disk dump option. Hence disk dump path string section is not included.
+ * In version 1, the platform permits one section header, dump-disk path
+ * and ten sections.
+ *
+ * Note: No disk dump option. Hence disk dump path string section is not
+ * included.
*/
struct rtas_fadump_mem_struct {
struct rtas_fadump_section_header header;
-
- /* Kernel dump sections */
- struct rtas_fadump_section cpu_state_data;
- struct rtas_fadump_section hpte_region;
-
- /*
- * TODO: Extend multiple boot memory regions support in the kernel
- * for this platform.
- */
- struct rtas_fadump_section rmr_region;
+ struct rtas_fadump_section rgn[MAX_SECTIONS];
};
/*
diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c
index 71d52a670d95..ba3fb7a7f2ea 100644
--- a/arch/powerpc/platforms/pseries/vas.c
+++ b/arch/powerpc/platforms/pseries/vas.c
@@ -228,7 +228,7 @@ static irqreturn_t pseries_vas_irq_handler(int irq, void *data)
struct pseries_vas_window *txwin = data;
/*
- * The thread hanlder will process this interrupt if it is
+ * The thread handler will process this interrupt if it is
* already running.
*/
atomic_inc(&txwin->pending_faults);
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index 90ff85c879bf..36d1c7d4156b 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -611,7 +611,7 @@ static const struct dma_map_ops vio_dma_mapping_ops = {
.get_required_mask = dma_iommu_get_required_mask,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
- .alloc_pages = dma_common_alloc_pages,
+ .alloc_pages_op = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
};
@@ -1592,13 +1592,9 @@ static int vio_hotplug(const struct device *dev, struct kobj_uevent_env *env)
const char *cp;
dn = dev->of_node;
- if (!dn)
- return -ENODEV;
- cp = of_get_property(dn, "compatible", NULL);
- if (!cp)
- return -ENODEV;
+ if (dn && (cp = of_get_property(dn, "compatible", NULL)))
+ add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp);
- add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp);
return 0;
}
diff --git a/arch/powerpc/purgatory/Makefile b/arch/powerpc/purgatory/Makefile
index 78473d69cd2b..e9890085953e 100644
--- a/arch/powerpc/purgatory/Makefile
+++ b/arch/powerpc/purgatory/Makefile
@@ -1,8 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-KASAN_SANITIZE := n
-KCSAN_SANITIZE := n
-
targets += trampoline_$(BITS).o purgatory.ro
# When profile-guided optimization is enabled, llvm emits two different
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 9cb1d029511a..24a177d164f1 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -1,7 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
-
mpic-msi-obj-$(CONFIG_PCI_MSI) += mpic_msi.o mpic_u3msi.o
obj-$(CONFIG_MPIC) += mpic.o $(mpic-msi-obj-y)
obj-$(CONFIG_MPIC_TIMER) += mpic_timer.o
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 98096bbfd62e..c0d10c149661 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -24,7 +24,6 @@
#include <linux/suspend.h>
#include <linux/memblock.h>
#include <linux/gfp.h>
-#include <linux/kmemleak.h>
#include <linux/of_address.h>
#include <asm/io.h>
#include <asm/iommu.h>
@@ -243,9 +242,6 @@ static void __init allocate_dart(void)
if (!dart_tablebase)
panic("Failed to allocate 16MB below 2GB for DART table\n");
- /* There is no point scanning the DART space for leaks*/
- kmemleak_no_scan((void *)dart_tablebase);
-
/* Allocate a spare page to map all invalid DART pages. We need to do
* that to work around what looks like a problem with the HT bridge
* prefetching into invalid pages and corrupting data
diff --git a/arch/powerpc/sysdev/fsl_gtm.c b/arch/powerpc/sysdev/fsl_gtm.c
index 39186ad6b3c3..3dabc9621810 100644
--- a/arch/powerpc/sysdev/fsl_gtm.c
+++ b/arch/powerpc/sysdev/fsl_gtm.c
@@ -77,7 +77,7 @@ struct gtm {
static LIST_HEAD(gtms);
/**
- * gtm_get_timer - request GTM timer to use it with the rest of GTM API
+ * gtm_get_timer16 - request GTM timer to use it with the rest of GTM API
* Context: non-IRQ
*
* This function reserves GTM timer for later use. It returns gtm_timer
@@ -110,7 +110,7 @@ struct gtm_timer *gtm_get_timer16(void)
EXPORT_SYMBOL(gtm_get_timer16);
/**
- * gtm_get_specific_timer - request specific GTM timer
+ * gtm_get_specific_timer16 - request specific GTM timer
* @gtm: specific GTM, pass here GTM's device_node->data
* @timer: specific timer number, Timer1 is 0.
* Context: non-IRQ
@@ -260,7 +260,7 @@ int gtm_set_timer16(struct gtm_timer *tmr, unsigned long usec, bool reload)
EXPORT_SYMBOL(gtm_set_timer16);
/**
- * gtm_set_exact_utimer16 - (re)set 16 bits timer
+ * gtm_set_exact_timer16 - (re)set 16 bits timer
* @tmr: pointer to the gtm_timer structure obtained from gtm_get_timer
* @usec: timer interval in microseconds
* @reload: if set, the timer will reset upon expiry rather than
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 8e6c84df4ca1..e205135ae1fe 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -564,10 +564,12 @@ static const struct fsl_msi_feature ipic_msi_feature = {
.msiir_offset = 0x38,
};
+#ifdef CONFIG_EPAPR_PARAVIRT
static const struct fsl_msi_feature vmpic_msi_feature = {
.fsl_pic_ip = FSL_PIC_IP_VMPIC,
.msiir_offset = 0,
};
+#endif
static const struct of_device_id fsl_of_msi_ids[] = {
{
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index a289cb97c1d7..fa01818c1972 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -383,7 +383,7 @@ static unsigned int xive_get_irq(void)
* CPU.
*
* If we find that there is indeed more in there, we call
- * force_external_irq_replay() to make Linux synthetize an
+ * force_external_irq_replay() to make Linux synthesize an
* external interrupt on the next call to local_irq_restore().
*/
static void xive_do_queue_eoi(struct xive_cpu *xc)
@@ -874,7 +874,7 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
*
* This also tells us that it's in flight to a host queue
* or has already been fetched but hasn't been EOIed yet
- * by the host. This it's potentially using up a host
+ * by the host. Thus it's potentially using up a host
* queue slot. This is important to know because as long
* as this is the case, we must not hard-unmask it when
* "returning" that interrupt to the host.
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index f1c0fa6ece21..517b963e3e6a 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -415,7 +415,7 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
return;
}
- /* Grab it's CAM value */
+ /* Grab its CAM value */
rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL);
if (rc) {
pr_err("Failed to get pool VP info CPU %d\n", cpu);
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
index 682c7c0a6f77..d778011060a8 100644
--- a/arch/powerpc/xmon/Makefile
+++ b/arch/powerpc/xmon/Makefile
@@ -10,8 +10,6 @@ KCSAN_SANITIZE := n
# Disable ftrace for the entire directory
ccflags-remove-$(CONFIG_FUNCTION_TRACER) += $(CC_FLAGS_FTRACE)
-ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
-
# Clang stores addresses on the stack causing the frame size to blow
# out. See https://github.com/ClangBuiltLinux/linux/issues/252
ccflags-$(CONFIG_CC_IS_CLANG) += -Wframe-larger-than=4096
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index d79d6633f333..bd4813bad317 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -1350,7 +1350,7 @@ static int cpu_cmd(void)
}
termch = cpu;
- if (!scanhex(&cpu)) {
+ if (!scanhex(&cpu) || cpu >= num_possible_cpus()) {
/* print cpus waiting or in xmon */
printf("cpus stopped:");
last_cpu = first_cpu = NR_CPUS;
@@ -2772,7 +2772,7 @@ static void dump_pacas(void)
termch = c; /* Put c back, it wasn't 'a' */
- if (scanhex(&num))
+ if (scanhex(&num) && num < num_possible_cpus())
dump_one_paca(num);
else
dump_one_paca(xmon_owner);
@@ -2845,7 +2845,7 @@ static void dump_xives(void)
termch = c; /* Put c back, it wasn't 'a' */
- if (scanhex(&num))
+ if (scanhex(&num) && num < num_possible_cpus())
dump_one_xive(num);
else
dump_one_xive(xmon_owner);
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index be09c8836d56..0bdcc383644d 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -23,6 +23,7 @@ config RISCV
select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEBUG_WX
+ select ARCH_HAS_FAST_MULTIPLIER
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE
@@ -57,10 +58,11 @@ config RISCV
select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU
select ARCH_SUPPORTS_PER_VMA_LOCK if MMU
select ARCH_SUPPORTS_SHADOW_CALL_STACK if HAVE_SHADOW_CALL_STACK
+ select ARCH_USE_CMPXCHG_LOCKREF if 64BIT
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USES_CFI_TRAPS if CFI_CLANG
- select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP && MMU
+ select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if MMU
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_GENERAL_HUGETLB if !RISCV_ISA_SVNAPOT
@@ -71,7 +73,7 @@ config RISCV
select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE
select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU
select BUILDTIME_TABLE_SORT if MMU
- select CLINT_TIMER if !MMU
+ select CLINT_TIMER if RISCV_M_MODE
select CLONE_BACKWARDS
select COMMON_CLK
select CPU_PM if CPU_IDLE || HIBERNATION || SUSPEND
@@ -132,7 +134,7 @@ config RISCV
select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !PREEMPTION
select HAVE_EBPF_JIT if MMU
- select HAVE_FAST_GUP if MMU
+ select HAVE_GUP_FAST if MMU
select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_GCC_PLUGINS
@@ -155,6 +157,7 @@ config RISCV
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RETHOOK if !XIP_KERNEL
select HAVE_RSEQ
+ select HAVE_RUST if 64BIT
select HAVE_SAMPLE_FTRACE_DIRECT
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
select HAVE_STACKPROTECTOR
@@ -173,6 +176,8 @@ config RISCV
select PCI_DOMAINS_GENERIC if PCI
select PCI_MSI if PCI
select RISCV_ALTERNATIVE if !XIP_KERNEL
+ select RISCV_APLIC
+ select RISCV_IMSIC
select RISCV_INTC
select RISCV_TIMER if RISCV_SBI
select SIFIVE_PLIC
@@ -229,8 +234,12 @@ config ARCH_MMAP_RND_COMPAT_BITS_MAX
# set if we run in machine mode, cleared if we run in supervisor mode
config RISCV_M_MODE
- bool
- default !MMU
+ bool "Build a kernel that runs in machine mode"
+ depends on !MMU
+ default y
+ help
+ Select this option if you want to run the kernel in M-mode,
+ without the assistance of any other firmware.
# set if we are running in S-mode and can use SBI calls
config RISCV_SBI
@@ -247,8 +256,9 @@ config MMU
config PAGE_OFFSET
hex
- default 0xC0000000 if 32BIT && MMU
- default 0x80000000 if !MMU
+ default 0x80000000 if !MMU && RISCV_M_MODE
+ default 0x80200000 if !MMU
+ default 0xc0000000 if 32BIT
default 0xff60000000000000 if 64BIT
config KASAN_SHADOW_OFFSET
@@ -596,7 +606,6 @@ config TOOLCHAIN_HAS_VECTOR_CRYPTO
config RISCV_ISA_ZBB
bool "Zbb extension support for bit manipulation instructions"
depends on TOOLCHAIN_HAS_ZBB
- depends on MMU
depends on RISCV_ALTERNATIVE
default y
help
@@ -628,7 +637,6 @@ config RISCV_ISA_ZICBOM
config RISCV_ISA_ZICBOZ
bool "Zicboz extension support for faster zeroing of memory"
- depends on MMU
depends on RISCV_ALTERNATIVE
default y
help
diff --git a/arch/riscv/Kconfig.errata b/arch/riscv/Kconfig.errata
index 910ba8837add..2acc7d876e1f 100644
--- a/arch/riscv/Kconfig.errata
+++ b/arch/riscv/Kconfig.errata
@@ -82,14 +82,14 @@ config ERRATA_THEAD
Otherwise, please say "N" here to avoid unnecessary overhead.
-config ERRATA_THEAD_PBMT
- bool "Apply T-Head memory type errata"
+config ERRATA_THEAD_MAE
+ bool "Apply T-Head's memory attribute extension (XTheadMae) errata"
depends on ERRATA_THEAD && 64BIT && MMU
select RISCV_ALTERNATIVE_EARLY
default y
help
- This will apply the memory type errata to handle the non-standard
- memory type bits in page-table-entries on T-Head SoCs.
+ This will apply the memory attribute extension errata to handle the
+ non-standard PTE utilization on T-Head SoCs (XTheadMae).
If you don't know what to do here, say "Y".
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index 623de5f8a208..f51bb24bc84c 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -1,12 +1,12 @@
menu "SoC selection"
config ARCH_MICROCHIP_POLARFIRE
- def_bool SOC_MICROCHIP_POLARFIRE
+ def_bool ARCH_MICROCHIP
-config SOC_MICROCHIP_POLARFIRE
- bool "Microchip PolarFire SoCs"
+config ARCH_MICROCHIP
+ bool "Microchip SoCs"
help
- This enables support for Microchip PolarFire SoC platforms.
+ This enables support for Microchip SoC platforms.
config ARCH_RENESAS
bool "Renesas RISC-V SoCs"
@@ -14,9 +14,6 @@ config ARCH_RENESAS
This enables support for the RISC-V based Renesas SoCs.
config ARCH_SIFIVE
- def_bool SOC_SIFIVE
-
-config SOC_SIFIVE
bool "SiFive SoCs"
select ERRATA_SIFIVE if !XIP_KERNEL
help
@@ -55,9 +52,6 @@ config ARCH_THEAD
This enables support for the RISC-V based T-HEAD SoCs.
config ARCH_VIRT
- def_bool SOC_VIRT
-
-config SOC_VIRT
bool "QEMU Virt Machine"
select CLINT_TIMER if RISCV_M_MODE
select POWER_RESET
@@ -72,11 +66,13 @@ config SOC_VIRT
This enables support for QEMU Virt Machine.
config ARCH_CANAAN
- def_bool SOC_CANAAN
+ bool "Canaan Kendryte SoC"
+ help
+ This enables support for Canaan Kendryte series SoC platform hardware.
-config SOC_CANAAN
+config SOC_CANAAN_K210
bool "Canaan Kendryte K210 SoC"
- depends on !MMU
+ depends on !MMU && ARCH_CANAAN
select CLINT_TIMER if RISCV_M_MODE
select ARCH_HAS_RESET_CONTROLLER
select PINCTRL
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 252d63942f34..c537764ee3c9 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -34,6 +34,9 @@ ifeq ($(CONFIG_ARCH_RV64I),y)
KBUILD_AFLAGS += -mabi=lp64
KBUILD_LDFLAGS += -melf64lriscv
+
+ KBUILD_RUSTFLAGS += -Ctarget-cpu=generic-rv64 --target=riscv64imac-unknown-none-elf \
+ -Cno-redzone
else
BITS := 32
UTS_MACHINE := riscv32
@@ -68,6 +71,10 @@ riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd
riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c
riscv-march-$(CONFIG_RISCV_ISA_V) := $(riscv-march-y)v
+ifneq ($(CONFIG_RISCV_ISA_C),y)
+ KBUILD_RUSTFLAGS += -Ctarget-feature=-c
+endif
+
ifdef CONFIG_TOOLCHAIN_NEEDS_OLD_ISA_SPEC
KBUILD_CFLAGS += -Wa,-misa-spec=2.2
KBUILD_AFLAGS += -Wa,-misa-spec=2.2
@@ -133,7 +140,15 @@ boot := arch/riscv/boot
ifeq ($(CONFIG_XIP_KERNEL),y)
KBUILD_IMAGE := $(boot)/xipImage
else
+ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_SOC_CANAAN_K210),yy)
+KBUILD_IMAGE := $(boot)/loader.bin
+else
+ifeq ($(CONFIG_EFI_ZBOOT),)
KBUILD_IMAGE := $(boot)/Image.gz
+else
+KBUILD_IMAGE := $(boot)/vmlinuz.efi
+endif
+endif
endif
libs-y += arch/riscv/lib/
@@ -151,19 +166,8 @@ endif
endif
vdso-install-y += arch/riscv/kernel/vdso/vdso.so.dbg
-vdso-install-$(CONFIG_COMPAT) += arch/riscv/kernel/compat_vdso/compat_vdso.so.dbg:../compat_vdso/compat_vdso.so
+vdso-install-$(CONFIG_COMPAT) += arch/riscv/kernel/compat_vdso/compat_vdso.so.dbg
-ifneq ($(CONFIG_XIP_KERNEL),y)
-ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_ARCH_CANAAN),yy)
-KBUILD_IMAGE := $(boot)/loader.bin
-else
-ifeq ($(CONFIG_EFI_ZBOOT),)
-KBUILD_IMAGE := $(boot)/Image.gz
-else
-KBUILD_IMAGE := $(boot)/vmlinuz.efi
-endif
-endif
-endif
BOOT_TARGETS := Image Image.gz loader loader.bin xipImage vmlinuz.efi
all: $(notdir $(KBUILD_IMAGE))
diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile
index 8e7fc0edf21d..869c0345b908 100644
--- a/arch/riscv/boot/Makefile
+++ b/arch/riscv/boot/Makefile
@@ -14,8 +14,6 @@
# Based on the ia64 and arm64 boot/Makefile.
#
-KCOV_INSTRUMENT := n
-
OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
OBJCOPYFLAGS_loader.bin :=-O binary
OBJCOPYFLAGS_xipImage :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
diff --git a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts
index 222a39d90f85..f80df225f72b 100644
--- a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts
+++ b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts
@@ -100,6 +100,38 @@
&i2c1 {
status = "okay";
+
+ power-monitor@10 {
+ compatible = "microchip,pac1934";
+ reg = <0x10>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@1 {
+ reg = <0x1>;
+ shunt-resistor-micro-ohms = <10000>;
+ label = "VDDREG";
+ };
+
+ channel@2 {
+ reg = <0x2>;
+ shunt-resistor-micro-ohms = <10000>;
+ label = "VDDA25";
+ };
+
+ channel@3 {
+ reg = <0x3>;
+ shunt-resistor-micro-ohms = <10000>;
+ label = "VDD25";
+ };
+
+ channel@4 {
+ reg = <0x4>;
+ shunt-resistor-micro-ohms = <10000>;
+ label = "VDDA_REG";
+ };
+ };
};
&i2c2 {
diff --git a/arch/riscv/boot/dts/renesas/r9a07g043f.dtsi b/arch/riscv/boot/dts/renesas/r9a07g043f.dtsi
index f35324b9173c..e0ddf8f602c7 100644
--- a/arch/riscv/boot/dts/renesas/r9a07g043f.dtsi
+++ b/arch/riscv/boot/dts/renesas/r9a07g043f.dtsi
@@ -54,6 +54,81 @@
dma-noncoherent;
interrupt-parent = <&plic>;
+ irqc: interrupt-controller@110a0000 {
+ compatible = "renesas,r9a07g043f-irqc";
+ reg = <0 0x110a0000 0 0x20000>;
+ #interrupt-cells = <2>;
+ #address-cells = <0>;
+ interrupt-controller;
+ interrupts = <32 IRQ_TYPE_LEVEL_HIGH>,
+ <33 IRQ_TYPE_LEVEL_HIGH>,
+ <34 IRQ_TYPE_LEVEL_HIGH>,
+ <35 IRQ_TYPE_LEVEL_HIGH>,
+ <36 IRQ_TYPE_LEVEL_HIGH>,
+ <37 IRQ_TYPE_LEVEL_HIGH>,
+ <38 IRQ_TYPE_LEVEL_HIGH>,
+ <39 IRQ_TYPE_LEVEL_HIGH>,
+ <40 IRQ_TYPE_LEVEL_HIGH>,
+ <476 IRQ_TYPE_LEVEL_HIGH>,
+ <477 IRQ_TYPE_LEVEL_HIGH>,
+ <478 IRQ_TYPE_LEVEL_HIGH>,
+ <479 IRQ_TYPE_LEVEL_HIGH>,
+ <480 IRQ_TYPE_LEVEL_HIGH>,
+ <481 IRQ_TYPE_LEVEL_HIGH>,
+ <482 IRQ_TYPE_LEVEL_HIGH>,
+ <483 IRQ_TYPE_LEVEL_HIGH>,
+ <484 IRQ_TYPE_LEVEL_HIGH>,
+ <485 IRQ_TYPE_LEVEL_HIGH>,
+ <486 IRQ_TYPE_LEVEL_HIGH>,
+ <487 IRQ_TYPE_LEVEL_HIGH>,
+ <488 IRQ_TYPE_LEVEL_HIGH>,
+ <489 IRQ_TYPE_LEVEL_HIGH>,
+ <490 IRQ_TYPE_LEVEL_HIGH>,
+ <491 IRQ_TYPE_LEVEL_HIGH>,
+ <492 IRQ_TYPE_LEVEL_HIGH>,
+ <493 IRQ_TYPE_LEVEL_HIGH>,
+ <494 IRQ_TYPE_LEVEL_HIGH>,
+ <495 IRQ_TYPE_LEVEL_HIGH>,
+ <496 IRQ_TYPE_LEVEL_HIGH>,
+ <497 IRQ_TYPE_LEVEL_HIGH>,
+ <498 IRQ_TYPE_LEVEL_HIGH>,
+ <499 IRQ_TYPE_LEVEL_HIGH>,
+ <500 IRQ_TYPE_LEVEL_HIGH>,
+ <501 IRQ_TYPE_LEVEL_HIGH>,
+ <502 IRQ_TYPE_LEVEL_HIGH>,
+ <503 IRQ_TYPE_LEVEL_HIGH>,
+ <504 IRQ_TYPE_LEVEL_HIGH>,
+ <505 IRQ_TYPE_LEVEL_HIGH>,
+ <506 IRQ_TYPE_LEVEL_HIGH>,
+ <507 IRQ_TYPE_LEVEL_HIGH>,
+ <57 IRQ_TYPE_LEVEL_HIGH>,
+ <66 IRQ_TYPE_EDGE_RISING>,
+ <67 IRQ_TYPE_EDGE_RISING>,
+ <68 IRQ_TYPE_EDGE_RISING>,
+ <69 IRQ_TYPE_EDGE_RISING>,
+ <70 IRQ_TYPE_EDGE_RISING>,
+ <71 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "nmi",
+ "irq0", "irq1", "irq2", "irq3",
+ "irq4", "irq5", "irq6", "irq7",
+ "tint0", "tint1", "tint2", "tint3",
+ "tint4", "tint5", "tint6", "tint7",
+ "tint8", "tint9", "tint10", "tint11",
+ "tint12", "tint13", "tint14", "tint15",
+ "tint16", "tint17", "tint18", "tint19",
+ "tint20", "tint21", "tint22", "tint23",
+ "tint24", "tint25", "tint26", "tint27",
+ "tint28", "tint29", "tint30", "tint31",
+ "bus-err", "ec7tie1-0", "ec7tie2-0",
+ "ec7tiovf-0", "ec7tie1-1", "ec7tie2-1",
+ "ec7tiovf-1";
+ clocks = <&cpg CPG_MOD R9A07G043_IAX45_CLK>,
+ <&cpg CPG_MOD R9A07G043_IAX45_PCLK>;
+ clock-names = "clk", "pclk";
+ power-domains = <&cpg>;
+ resets = <&cpg R9A07G043_IAX45_RESETN>;
+ };
+
plic: interrupt-controller@12c00000 {
compatible = "renesas,r9a07g043-plic", "andestech,nceplic100";
#interrupt-cells = <2>;
diff --git a/arch/riscv/boot/dts/renesas/rzfive-smarc-som.dtsi b/arch/riscv/boot/dts/renesas/rzfive-smarc-som.dtsi
index 433ab5c6a626..5e808242649e 100644
--- a/arch/riscv/boot/dts/renesas/rzfive-smarc-som.dtsi
+++ b/arch/riscv/boot/dts/renesas/rzfive-smarc-som.dtsi
@@ -6,19 +6,3 @@
*/
#include <arm64/renesas/rzg2ul-smarc-som.dtsi>
-
-#if (!SW_ET0_EN_N)
-&eth0 {
- phy0: ethernet-phy@7 {
- /delete-property/ interrupt-parent;
- /delete-property/ interrupts;
- };
-};
-#endif
-
-&eth1 {
- phy1: ethernet-phy@7 {
- /delete-property/ interrupt-parent;
- /delete-property/ interrupts;
- };
-};
diff --git a/arch/riscv/boot/dts/sophgo/cv1800b-milkv-duo.dts b/arch/riscv/boot/dts/sophgo/cv1800b-milkv-duo.dts
index 3af9e34b3bc7..cd013588adc0 100644
--- a/arch/riscv/boot/dts/sophgo/cv1800b-milkv-duo.dts
+++ b/arch/riscv/boot/dts/sophgo/cv1800b-milkv-duo.dts
@@ -23,9 +23,15 @@
stdout-path = "serial0:115200n8";
};
- memory@80000000 {
- device_type = "memory";
- reg = <0x80000000 0x3f40000>;
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ coprocessor_rtos: region@83f40000 {
+ reg = <0x83f40000 0xc0000>;
+ no-map;
+ };
};
};
@@ -33,6 +39,14 @@
clock-frequency = <25000000>;
};
+&sdhci0 {
+ status = "okay";
+ bus-width = <4>;
+ no-1-8-v;
+ no-mmc;
+ no-sdio;
+};
+
&uart0 {
status = "okay";
};
diff --git a/arch/riscv/boot/dts/sophgo/cv1800b.dtsi b/arch/riscv/boot/dts/sophgo/cv1800b.dtsi
index 165e9e320a8c..ec9530972ae2 100644
--- a/arch/riscv/boot/dts/sophgo/cv1800b.dtsi
+++ b/arch/riscv/boot/dts/sophgo/cv1800b.dtsi
@@ -7,6 +7,11 @@
/ {
compatible = "sophgo,cv1800b";
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x4000000>;
+ };
};
&plic {
@@ -16,3 +21,7 @@
&clint {
compatible = "sophgo,cv1800b-clint", "thead,c900-clint";
};
+
+&clk {
+ compatible = "sophgo,cv1800-clk";
+};
diff --git a/arch/riscv/boot/dts/sophgo/cv1812h.dtsi b/arch/riscv/boot/dts/sophgo/cv1812h.dtsi
index 3e7a942f5c1a..7fa4c1e2d1da 100644
--- a/arch/riscv/boot/dts/sophgo/cv1812h.dtsi
+++ b/arch/riscv/boot/dts/sophgo/cv1812h.dtsi
@@ -22,3 +22,7 @@
&clint {
compatible = "sophgo,cv1812h-clint", "thead,c900-clint";
};
+
+&clk {
+ compatible = "sophgo,cv1810-clk";
+};
diff --git a/arch/riscv/boot/dts/sophgo/cv18xx.dtsi b/arch/riscv/boot/dts/sophgo/cv18xx.dtsi
index 2d6f4a4b1e58..891932ae470f 100644
--- a/arch/riscv/boot/dts/sophgo/cv18xx.dtsi
+++ b/arch/riscv/boot/dts/sophgo/cv18xx.dtsi
@@ -4,6 +4,8 @@
* Copyright (C) 2023 Inochi Amaoto <inochiama@outlook.com>
*/
+#include <dt-bindings/clock/sophgo,cv1800.h>
+#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
/ {
@@ -53,6 +55,12 @@
dma-noncoherent;
ranges;
+ clk: clock-controller@3002000 {
+ reg = <0x03002000 0x1000>;
+ clocks = <&osc>;
+ #clock-cells = <1>;
+ };
+
gpio0: gpio@3020000 {
compatible = "snps,dw-apb-gpio";
reg = <0x3020000 0x1000>;
@@ -125,11 +133,67 @@
};
};
+ i2c0: i2c@4000000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x04000000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clk CLK_I2C>, <&clk CLK_APB_I2C0>;
+ clock-names = "ref", "pclk";
+ interrupts = <49 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@4010000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x04010000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clk CLK_I2C>, <&clk CLK_APB_I2C1>;
+ clock-names = "ref", "pclk";
+ interrupts = <50 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@4020000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x04020000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clk CLK_I2C>, <&clk CLK_APB_I2C2>;
+ clock-names = "ref", "pclk";
+ interrupts = <51 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@4030000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x04030000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clk CLK_I2C>, <&clk CLK_APB_I2C3>;
+ clock-names = "ref", "pclk";
+ interrupts = <52 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@4040000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x04040000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clk CLK_I2C>, <&clk CLK_APB_I2C4>;
+ clock-names = "ref", "pclk";
+ interrupts = <53 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
uart0: serial@4140000 {
compatible = "snps,dw-apb-uart";
reg = <0x04140000 0x100>;
interrupts = <44 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&osc>;
+ clocks = <&clk CLK_UART0>, <&clk CLK_APB_UART0>;
+ clock-names = "baudclk", "apb_pclk";
reg-shift = <2>;
reg-io-width = <4>;
status = "disabled";
@@ -139,7 +203,8 @@
compatible = "snps,dw-apb-uart";
reg = <0x04150000 0x100>;
interrupts = <45 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&osc>;
+ clocks = <&clk CLK_UART1>, <&clk CLK_APB_UART1>;
+ clock-names = "baudclk", "apb_pclk";
reg-shift = <2>;
reg-io-width = <4>;
status = "disabled";
@@ -149,7 +214,8 @@
compatible = "snps,dw-apb-uart";
reg = <0x04160000 0x100>;
interrupts = <46 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&osc>;
+ clocks = <&clk CLK_UART2>, <&clk CLK_APB_UART2>;
+ clock-names = "baudclk", "apb_pclk";
reg-shift = <2>;
reg-io-width = <4>;
status = "disabled";
@@ -159,22 +225,78 @@
compatible = "snps,dw-apb-uart";
reg = <0x04170000 0x100>;
interrupts = <47 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&osc>;
+ clocks = <&clk CLK_UART3>, <&clk CLK_APB_UART3>;
+ clock-names = "baudclk", "apb_pclk";
reg-shift = <2>;
reg-io-width = <4>;
status = "disabled";
};
+ spi0: spi@4180000 {
+ compatible = "snps,dw-apb-ssi";
+ reg = <0x04180000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clk CLK_SPI>, <&clk CLK_APB_SPI0>;
+ clock-names = "ssi_clk", "pclk";
+ interrupts = <54 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ spi1: spi@4190000 {
+ compatible = "snps,dw-apb-ssi";
+ reg = <0x04190000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clk CLK_SPI>, <&clk CLK_APB_SPI1>;
+ clock-names = "ssi_clk", "pclk";
+ interrupts = <55 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ spi2: spi@41a0000 {
+ compatible = "snps,dw-apb-ssi";
+ reg = <0x041a0000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clk CLK_SPI>, <&clk CLK_APB_SPI2>;
+ clock-names = "ssi_clk", "pclk";
+ interrupts = <56 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ spi3: spi@41b0000 {
+ compatible = "snps,dw-apb-ssi";
+ reg = <0x041b0000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clk CLK_SPI>, <&clk CLK_APB_SPI3>;
+ clock-names = "ssi_clk", "pclk";
+ interrupts = <57 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
uart4: serial@41c0000 {
compatible = "snps,dw-apb-uart";
reg = <0x041c0000 0x100>;
interrupts = <48 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&osc>;
+ clocks = <&clk CLK_UART4>, <&clk CLK_APB_UART4>;
+ clock-names = "baudclk", "apb_pclk";
reg-shift = <2>;
reg-io-width = <4>;
status = "disabled";
};
+ sdhci0: mmc@4310000 {
+ compatible = "sophgo,cv1800b-dwcmshc";
+ reg = <0x4310000 0x1000>;
+ interrupts = <36 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk CLK_AXI4_SD0>,
+ <&clk CLK_SD0>;
+ clock-names = "core", "bus";
+ status = "disabled";
+ };
+
plic: interrupt-controller@70000000 {
reg = <0x70000000 0x4000000>;
interrupts-extended = <&cpu0_intc 11>, <&cpu0_intc 9>;
diff --git a/arch/riscv/boot/dts/starfive/Makefile b/arch/riscv/boot/dts/starfive/Makefile
index 0141504c0f5c..2fa0cd7f31c3 100644
--- a/arch/riscv/boot/dts/starfive/Makefile
+++ b/arch/riscv/boot/dts/starfive/Makefile
@@ -8,5 +8,6 @@ DTC_FLAGS_jh7110-starfive-visionfive-2-v1.3b := -@
dtb-$(CONFIG_ARCH_STARFIVE) += jh7100-beaglev-starlight.dtb
dtb-$(CONFIG_ARCH_STARFIVE) += jh7100-starfive-visionfive-v1.dtb
+dtb-$(CONFIG_ARCH_STARFIVE) += jh7110-milkv-mars.dtb
dtb-$(CONFIG_ARCH_STARFIVE) += jh7110-starfive-visionfive-2-v1.2a.dtb
dtb-$(CONFIG_ARCH_STARFIVE) += jh7110-starfive-visionfive-2-v1.3b.dtb
diff --git a/arch/riscv/boot/dts/starfive/jh7100.dtsi b/arch/riscv/boot/dts/starfive/jh7100.dtsi
index 9a2e9583af88..7de0732b8eab 100644
--- a/arch/riscv/boot/dts/starfive/jh7100.dtsi
+++ b/arch/riscv/boot/dts/starfive/jh7100.dtsi
@@ -13,7 +13,7 @@
#address-cells = <2>;
#size-cells = <2>;
- cpus {
+ cpus: cpus {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/riscv/boot/dts/starfive/jh7110-common.dtsi b/arch/riscv/boot/dts/starfive/jh7110-common.dtsi
new file mode 100644
index 000000000000..8ff6ea64f048
--- /dev/null
+++ b/arch/riscv/boot/dts/starfive/jh7110-common.dtsi
@@ -0,0 +1,599 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2022 StarFive Technology Co., Ltd.
+ * Copyright (C) 2022 Emil Renner Berthing <kernel@esmil.dk>
+ */
+
+/dts-v1/;
+#include "jh7110.dtsi"
+#include "jh7110-pinfunc.h"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ aliases {
+ ethernet0 = &gmac0;
+ i2c0 = &i2c0;
+ i2c2 = &i2c2;
+ i2c5 = &i2c5;
+ i2c6 = &i2c6;
+ mmc0 = &mmc0;
+ mmc1 = &mmc1;
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0x0 0x40000000 0x1 0x0>;
+ };
+
+ gpio-restart {
+ compatible = "gpio-restart";
+ gpios = <&sysgpio 35 GPIO_ACTIVE_HIGH>;
+ priority = <224>;
+ };
+
+ pwmdac_codec: audio-codec {
+ compatible = "linux,spdif-dit";
+ #sound-dai-cells = <0>;
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "StarFive-PWMDAC-Sound-Card";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ simple-audio-card,dai-link@0 {
+ reg = <0>;
+ format = "left_j";
+ bitclock-master = <&sndcpu0>;
+ frame-master = <&sndcpu0>;
+
+ sndcpu0: cpu {
+ sound-dai = <&pwmdac>;
+ };
+
+ codec {
+ sound-dai = <&pwmdac_codec>;
+ };
+ };
+ };
+};
+
+&cpus {
+ timebase-frequency = <4000000>;
+};
+
+&dvp_clk {
+ clock-frequency = <74250000>;
+};
+
+&gmac0_rgmii_rxin {
+ clock-frequency = <125000000>;
+};
+
+&gmac0_rmii_refin {
+ clock-frequency = <50000000>;
+};
+
+&gmac1_rgmii_rxin {
+ clock-frequency = <125000000>;
+};
+
+&gmac1_rmii_refin {
+ clock-frequency = <50000000>;
+};
+
+&hdmitx0_pixelclk {
+ clock-frequency = <297000000>;
+};
+
+&i2srx_bclk_ext {
+ clock-frequency = <12288000>;
+};
+
+&i2srx_lrck_ext {
+ clock-frequency = <192000>;
+};
+
+&i2stx_bclk_ext {
+ clock-frequency = <12288000>;
+};
+
+&i2stx_lrck_ext {
+ clock-frequency = <192000>;
+};
+
+&mclk_ext {
+ clock-frequency = <12288000>;
+};
+
+&osc {
+ clock-frequency = <24000000>;
+};
+
+&rtc_osc {
+ clock-frequency = <32768>;
+};
+
+&tdm_ext {
+ clock-frequency = <49152000>;
+};
+
+&camss {
+ assigned-clocks = <&ispcrg JH7110_ISPCLK_DOM4_APB_FUNC>,
+ <&ispcrg JH7110_ISPCLK_MIPI_RX0_PXL>;
+ assigned-clock-rates = <49500000>, <198000000>;
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ };
+
+ port@1 {
+ reg = <1>;
+
+ camss_from_csi2rx: endpoint {
+ remote-endpoint = <&csi2rx_to_camss>;
+ };
+ };
+ };
+};
+
+&csi2rx {
+ assigned-clocks = <&ispcrg JH7110_ISPCLK_VIN_SYS>;
+ assigned-clock-rates = <297000000>;
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ /* remote MIPI sensor endpoint */
+ };
+
+ port@1 {
+ reg = <1>;
+
+ csi2rx_to_camss: endpoint {
+ remote-endpoint = <&camss_from_csi2rx>;
+ };
+ };
+ };
+};
+
+&gmac0 {
+ phy-handle = <&phy0>;
+ phy-mode = "rgmii-id";
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+ };
+};
+
+&i2c0 {
+ clock-frequency = <100000>;
+ i2c-sda-hold-time-ns = <300>;
+ i2c-sda-falling-time-ns = <510>;
+ i2c-scl-falling-time-ns = <510>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins>;
+ status = "okay";
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ i2c-sda-hold-time-ns = <300>;
+ i2c-sda-falling-time-ns = <510>;
+ i2c-scl-falling-time-ns = <510>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins>;
+ status = "okay";
+};
+
+&i2c5 {
+ clock-frequency = <100000>;
+ i2c-sda-hold-time-ns = <300>;
+ i2c-sda-falling-time-ns = <510>;
+ i2c-scl-falling-time-ns = <510>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c5_pins>;
+ status = "okay";
+
+ axp15060: pmic@36 {
+ compatible = "x-powers,axp15060";
+ reg = <0x36>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ regulators {
+ vcc_3v3: dcdc1 {
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_3v3";
+ };
+
+ vdd_cpu: dcdc2 {
+ regulator-always-on;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1540000>;
+ regulator-name = "vdd-cpu";
+ };
+
+ emmc_vdd: aldo4 {
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "emmc_vdd";
+ };
+ };
+ };
+};
+
+&i2c6 {
+ clock-frequency = <100000>;
+ i2c-sda-hold-time-ns = <300>;
+ i2c-sda-falling-time-ns = <510>;
+ i2c-scl-falling-time-ns = <510>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c6_pins>;
+ status = "okay";
+};
+
+&mmc0 {
+ max-frequency = <100000000>;
+ assigned-clocks = <&syscrg JH7110_SYSCLK_SDIO0_SDCARD>;
+ assigned-clock-rates = <50000000>;
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+ cap-mmc-hw-reset;
+ post-power-on-delay-ms = <200>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_pins>;
+ vmmc-supply = <&vcc_3v3>;
+ vqmmc-supply = <&emmc_vdd>;
+ status = "okay";
+};
+
+&mmc1 {
+ max-frequency = <100000000>;
+ assigned-clocks = <&syscrg JH7110_SYSCLK_SDIO1_SDCARD>;
+ assigned-clock-rates = <50000000>;
+ bus-width = <4>;
+ no-sdio;
+ no-mmc;
+ cd-gpios = <&sysgpio 41 GPIO_ACTIVE_LOW>;
+ disable-wp;
+ cap-sd-highspeed;
+ post-power-on-delay-ms = <200>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins>;
+ status = "okay";
+};
+
+&pwmdac {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwmdac_pins>;
+ status = "okay";
+};
+
+&qspi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ nor_flash: flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ cdns,read-delay = <5>;
+ spi-max-frequency = <12000000>;
+ cdns,tshsl-ns = <1>;
+ cdns,tsd2d-ns = <1>;
+ cdns,tchsh-ns = <1>;
+ cdns,tslch-ns = <1>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ spl@0 {
+ reg = <0x0 0x80000>;
+ };
+ uboot-env@f0000 {
+ reg = <0xf0000 0x10000>;
+ };
+ uboot@100000 {
+ reg = <0x100000 0x400000>;
+ };
+ reserved-data@600000 {
+ reg = <0x600000 0xa00000>;
+ };
+ };
+ };
+};
+
+&pwm {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm_pins>;
+ status = "okay";
+};
+
+&spi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi0_pins>;
+ status = "okay";
+
+ spi_dev0: spi@0 {
+ compatible = "rohm,dh2228fv";
+ reg = <0>;
+ spi-max-frequency = <10000000>;
+ };
+};
+
+&sysgpio {
+ i2c0_pins: i2c0-0 {
+ i2c-pins {
+ pinmux = <GPIOMUX(57, GPOUT_LOW,
+ GPOEN_SYS_I2C0_CLK,
+ GPI_SYS_I2C0_CLK)>,
+ <GPIOMUX(58, GPOUT_LOW,
+ GPOEN_SYS_I2C0_DATA,
+ GPI_SYS_I2C0_DATA)>;
+ bias-disable; /* external pull-up */
+ input-enable;
+ input-schmitt-enable;
+ };
+ };
+
+ i2c2_pins: i2c2-0 {
+ i2c-pins {
+ pinmux = <GPIOMUX(3, GPOUT_LOW,
+ GPOEN_SYS_I2C2_CLK,
+ GPI_SYS_I2C2_CLK)>,
+ <GPIOMUX(2, GPOUT_LOW,
+ GPOEN_SYS_I2C2_DATA,
+ GPI_SYS_I2C2_DATA)>;
+ bias-disable; /* external pull-up */
+ input-enable;
+ input-schmitt-enable;
+ };
+ };
+
+ i2c5_pins: i2c5-0 {
+ i2c-pins {
+ pinmux = <GPIOMUX(19, GPOUT_LOW,
+ GPOEN_SYS_I2C5_CLK,
+ GPI_SYS_I2C5_CLK)>,
+ <GPIOMUX(20, GPOUT_LOW,
+ GPOEN_SYS_I2C5_DATA,
+ GPI_SYS_I2C5_DATA)>;
+ bias-disable; /* external pull-up */
+ input-enable;
+ input-schmitt-enable;
+ };
+ };
+
+ i2c6_pins: i2c6-0 {
+ i2c-pins {
+ pinmux = <GPIOMUX(16, GPOUT_LOW,
+ GPOEN_SYS_I2C6_CLK,
+ GPI_SYS_I2C6_CLK)>,
+ <GPIOMUX(17, GPOUT_LOW,
+ GPOEN_SYS_I2C6_DATA,
+ GPI_SYS_I2C6_DATA)>;
+ bias-disable; /* external pull-up */
+ input-enable;
+ input-schmitt-enable;
+ };
+ };
+
+ mmc0_pins: mmc0-0 {
+ rst-pins {
+ pinmux = <GPIOMUX(62, GPOUT_SYS_SDIO0_RST,
+ GPOEN_ENABLE,
+ GPI_NONE)>;
+ bias-pull-up;
+ drive-strength = <12>;
+ input-disable;
+ input-schmitt-disable;
+ slew-rate = <0>;
+ };
+
+ mmc-pins {
+ pinmux = <PINMUX(64, 0)>,
+ <PINMUX(65, 0)>,
+ <PINMUX(66, 0)>,
+ <PINMUX(67, 0)>,
+ <PINMUX(68, 0)>,
+ <PINMUX(69, 0)>,
+ <PINMUX(70, 0)>,
+ <PINMUX(71, 0)>,
+ <PINMUX(72, 0)>,
+ <PINMUX(73, 0)>;
+ bias-pull-up;
+ drive-strength = <12>;
+ input-enable;
+ };
+ };
+
+ mmc1_pins: mmc1-0 {
+ clk-pins {
+ pinmux = <GPIOMUX(10, GPOUT_SYS_SDIO1_CLK,
+ GPOEN_ENABLE,
+ GPI_NONE)>;
+ bias-pull-up;
+ drive-strength = <12>;
+ input-disable;
+ input-schmitt-disable;
+ slew-rate = <0>;
+ };
+
+ mmc-pins {
+ pinmux = <GPIOMUX(9, GPOUT_SYS_SDIO1_CMD,
+ GPOEN_SYS_SDIO1_CMD,
+ GPI_SYS_SDIO1_CMD)>,
+ <GPIOMUX(11, GPOUT_SYS_SDIO1_DATA0,
+ GPOEN_SYS_SDIO1_DATA0,
+ GPI_SYS_SDIO1_DATA0)>,
+ <GPIOMUX(12, GPOUT_SYS_SDIO1_DATA1,
+ GPOEN_SYS_SDIO1_DATA1,
+ GPI_SYS_SDIO1_DATA1)>,
+ <GPIOMUX(7, GPOUT_SYS_SDIO1_DATA2,
+ GPOEN_SYS_SDIO1_DATA2,
+ GPI_SYS_SDIO1_DATA2)>,
+ <GPIOMUX(8, GPOUT_SYS_SDIO1_DATA3,
+ GPOEN_SYS_SDIO1_DATA3,
+ GPI_SYS_SDIO1_DATA3)>;
+ bias-pull-up;
+ drive-strength = <12>;
+ input-enable;
+ input-schmitt-enable;
+ slew-rate = <0>;
+ };
+ };
+
+ pwmdac_pins: pwmdac-0 {
+ pwmdac-pins {
+ pinmux = <GPIOMUX(33, GPOUT_SYS_PWMDAC_LEFT,
+ GPOEN_ENABLE,
+ GPI_NONE)>,
+ <GPIOMUX(34, GPOUT_SYS_PWMDAC_RIGHT,
+ GPOEN_ENABLE,
+ GPI_NONE)>;
+ bias-disable;
+ drive-strength = <2>;
+ input-disable;
+ input-schmitt-disable;
+ slew-rate = <0>;
+ };
+ };
+
+ pwm_pins: pwm-0 {
+ pwm-pins {
+ pinmux = <GPIOMUX(46, GPOUT_SYS_PWM_CHANNEL0,
+ GPOEN_SYS_PWM0_CHANNEL0,
+ GPI_NONE)>,
+ <GPIOMUX(59, GPOUT_SYS_PWM_CHANNEL1,
+ GPOEN_SYS_PWM0_CHANNEL1,
+ GPI_NONE)>;
+ bias-disable;
+ drive-strength = <12>;
+ input-disable;
+ input-schmitt-disable;
+ slew-rate = <0>;
+ };
+ };
+
+ spi0_pins: spi0-0 {
+ mosi-pins {
+ pinmux = <GPIOMUX(52, GPOUT_SYS_SPI0_TXD,
+ GPOEN_ENABLE,
+ GPI_NONE)>;
+ bias-disable;
+ input-disable;
+ input-schmitt-disable;
+ };
+
+ miso-pins {
+ pinmux = <GPIOMUX(53, GPOUT_LOW,
+ GPOEN_DISABLE,
+ GPI_SYS_SPI0_RXD)>;
+ bias-pull-up;
+ input-enable;
+ input-schmitt-enable;
+ };
+
+ sck-pins {
+ pinmux = <GPIOMUX(48, GPOUT_SYS_SPI0_CLK,
+ GPOEN_ENABLE,
+ GPI_SYS_SPI0_CLK)>;
+ bias-disable;
+ input-disable;
+ input-schmitt-disable;
+ };
+
+ ss-pins {
+ pinmux = <GPIOMUX(49, GPOUT_SYS_SPI0_FSS,
+ GPOEN_ENABLE,
+ GPI_SYS_SPI0_FSS)>;
+ bias-disable;
+ input-disable;
+ input-schmitt-disable;
+ };
+ };
+
+ uart0_pins: uart0-0 {
+ tx-pins {
+ pinmux = <GPIOMUX(5, GPOUT_SYS_UART0_TX,
+ GPOEN_ENABLE,
+ GPI_NONE)>;
+ bias-disable;
+ drive-strength = <12>;
+ input-disable;
+ input-schmitt-disable;
+ slew-rate = <0>;
+ };
+
+ rx-pins {
+ pinmux = <GPIOMUX(6, GPOUT_LOW,
+ GPOEN_DISABLE,
+ GPI_SYS_UART0_RX)>;
+ bias-disable; /* external pull-up */
+ drive-strength = <2>;
+ input-enable;
+ input-schmitt-enable;
+ slew-rate = <0>;
+ };
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins>;
+ status = "okay";
+};
+
+&usb0 {
+ dr_mode = "peripheral";
+ status = "okay";
+};
+
+&U74_1 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&U74_2 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&U74_3 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&U74_4 {
+ cpu-supply = <&vdd_cpu>;
+};
diff --git a/arch/riscv/boot/dts/starfive/jh7110-milkv-mars.dts b/arch/riscv/boot/dts/starfive/jh7110-milkv-mars.dts
new file mode 100644
index 000000000000..fa0eac78e0ba
--- /dev/null
+++ b/arch/riscv/boot/dts/starfive/jh7110-milkv-mars.dts
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2023 Jisheng Zhang <jszhang@kernel.org>
+ */
+
+/dts-v1/;
+#include "jh7110-common.dtsi"
+
+/ {
+ model = "Milk-V Mars";
+ compatible = "milkv,mars", "starfive,jh7110";
+};
+
+&gmac0 {
+ starfive,tx-use-rgmii-clk;
+ assigned-clocks = <&aoncrg JH7110_AONCLK_GMAC0_TX>;
+ assigned-clock-parents = <&aoncrg JH7110_AONCLK_GMAC0_RMII_RTX>;
+};
+
+
+&phy0 {
+ motorcomm,tx-clk-adj-enabled;
+ motorcomm,tx-clk-10-inverted;
+ motorcomm,tx-clk-100-inverted;
+ motorcomm,tx-clk-1000-inverted;
+ motorcomm,rx-clk-drv-microamp = <3970>;
+ motorcomm,rx-data-drv-microamp = <2910>;
+ rx-internal-delay-ps = <1500>;
+ tx-internal-delay-ps = <1500>;
+};
diff --git a/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi b/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
index 45b58b6f3df8..9d70f21c86fc 100644
--- a/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
+++ b/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
@@ -5,188 +5,11 @@
*/
/dts-v1/;
-#include "jh7110.dtsi"
-#include "jh7110-pinfunc.h"
-#include <dt-bindings/gpio/gpio.h>
+#include "jh7110-common.dtsi"
/ {
aliases {
- ethernet0 = &gmac0;
ethernet1 = &gmac1;
- i2c0 = &i2c0;
- i2c2 = &i2c2;
- i2c5 = &i2c5;
- i2c6 = &i2c6;
- mmc0 = &mmc0;
- mmc1 = &mmc1;
- serial0 = &uart0;
- };
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
-
- cpus {
- timebase-frequency = <4000000>;
- };
-
- memory@40000000 {
- device_type = "memory";
- reg = <0x0 0x40000000 0x1 0x0>;
- };
-
- gpio-restart {
- compatible = "gpio-restart";
- gpios = <&sysgpio 35 GPIO_ACTIVE_HIGH>;
- priority = <224>;
- };
-
- pwmdac_codec: pwmdac-codec {
- compatible = "linux,spdif-dit";
- #sound-dai-cells = <0>;
- };
-
- sound-pwmdac {
- compatible = "simple-audio-card";
- simple-audio-card,name = "StarFive-PWMDAC-Sound-Card";
- #address-cells = <1>;
- #size-cells = <0>;
-
- simple-audio-card,dai-link@0 {
- reg = <0>;
- format = "left_j";
- bitclock-master = <&sndcpu0>;
- frame-master = <&sndcpu0>;
-
- sndcpu0: cpu {
- sound-dai = <&pwmdac>;
- };
-
- codec {
- sound-dai = <&pwmdac_codec>;
- };
- };
- };
-};
-
-&dvp_clk {
- clock-frequency = <74250000>;
-};
-
-&gmac0_rgmii_rxin {
- clock-frequency = <125000000>;
-};
-
-&gmac0_rmii_refin {
- clock-frequency = <50000000>;
-};
-
-&gmac1_rgmii_rxin {
- clock-frequency = <125000000>;
-};
-
-&gmac1_rmii_refin {
- clock-frequency = <50000000>;
-};
-
-&hdmitx0_pixelclk {
- clock-frequency = <297000000>;
-};
-
-&i2srx_bclk_ext {
- clock-frequency = <12288000>;
-};
-
-&i2srx_lrck_ext {
- clock-frequency = <192000>;
-};
-
-&i2stx_bclk_ext {
- clock-frequency = <12288000>;
-};
-
-&i2stx_lrck_ext {
- clock-frequency = <192000>;
-};
-
-&mclk_ext {
- clock-frequency = <12288000>;
-};
-
-&osc {
- clock-frequency = <24000000>;
-};
-
-&rtc_osc {
- clock-frequency = <32768>;
-};
-
-&tdm_ext {
- clock-frequency = <49152000>;
-};
-
-&camss {
- assigned-clocks = <&ispcrg JH7110_ISPCLK_DOM4_APB_FUNC>,
- <&ispcrg JH7110_ISPCLK_MIPI_RX0_PXL>;
- assigned-clock-rates = <49500000>, <198000000>;
- status = "okay";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
- };
-
- port@1 {
- reg = <1>;
-
- camss_from_csi2rx: endpoint {
- remote-endpoint = <&csi2rx_to_camss>;
- };
- };
- };
-};
-
-&csi2rx {
- assigned-clocks = <&ispcrg JH7110_ISPCLK_VIN_SYS>;
- assigned-clock-rates = <297000000>;
- status = "okay";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
-
- /* remote MIPI sensor endpoint */
- };
-
- port@1 {
- reg = <1>;
-
- csi2rx_to_camss: endpoint {
- remote-endpoint = <&camss_from_csi2rx>;
- };
- };
- };
-};
-
-&gmac0 {
- phy-handle = <&phy0>;
- phy-mode = "rgmii-id";
- status = "okay";
-
- mdio {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "snps,dwmac-mdio";
-
- phy0: ethernet-phy@0 {
- reg = <0>;
- };
};
};
@@ -206,510 +29,6 @@
};
};
-&i2c0 {
- clock-frequency = <100000>;
- i2c-sda-hold-time-ns = <300>;
- i2c-sda-falling-time-ns = <510>;
- i2c-scl-falling-time-ns = <510>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2c0_pins>;
- status = "okay";
-};
-
-&i2c2 {
- clock-frequency = <100000>;
- i2c-sda-hold-time-ns = <300>;
- i2c-sda-falling-time-ns = <510>;
- i2c-scl-falling-time-ns = <510>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2c2_pins>;
- status = "okay";
-};
-
-&i2c5 {
- clock-frequency = <100000>;
- i2c-sda-hold-time-ns = <300>;
- i2c-sda-falling-time-ns = <510>;
- i2c-scl-falling-time-ns = <510>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2c5_pins>;
- status = "okay";
-
- axp15060: pmic@36 {
- compatible = "x-powers,axp15060";
- reg = <0x36>;
- interrupts = <0>;
- interrupt-controller;
- #interrupt-cells = <1>;
-
- regulators {
- vcc_3v3: dcdc1 {
- regulator-boot-on;
- regulator-always-on;
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-name = "vcc_3v3";
- };
-
- vdd_cpu: dcdc2 {
- regulator-always-on;
- regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <1540000>;
- regulator-name = "vdd-cpu";
- };
-
- emmc_vdd: aldo4 {
- regulator-boot-on;
- regulator-always-on;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-name = "emmc_vdd";
- };
- };
- };
-};
-
-&i2c6 {
- clock-frequency = <100000>;
- i2c-sda-hold-time-ns = <300>;
- i2c-sda-falling-time-ns = <510>;
- i2c-scl-falling-time-ns = <510>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2c6_pins>;
- status = "okay";
-};
-
-&i2srx {
- pinctrl-names = "default";
- pinctrl-0 = <&i2srx_pins>;
- status = "okay";
-};
-
-&i2stx0 {
- pinctrl-names = "default";
- pinctrl-0 = <&mclk_ext_pins>;
- status = "okay";
-};
-
-&i2stx1 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2stx1_pins>;
- status = "okay";
-};
-
&mmc0 {
- max-frequency = <100000000>;
- assigned-clocks = <&syscrg JH7110_SYSCLK_SDIO0_SDCARD>;
- assigned-clock-rates = <50000000>;
- bus-width = <8>;
- cap-mmc-highspeed;
- mmc-ddr-1_8v;
- mmc-hs200-1_8v;
non-removable;
- cap-mmc-hw-reset;
- post-power-on-delay-ms = <200>;
- pinctrl-names = "default";
- pinctrl-0 = <&mmc0_pins>;
- vmmc-supply = <&vcc_3v3>;
- vqmmc-supply = <&emmc_vdd>;
- status = "okay";
-};
-
-&mmc1 {
- max-frequency = <100000000>;
- assigned-clocks = <&syscrg JH7110_SYSCLK_SDIO1_SDCARD>;
- assigned-clock-rates = <50000000>;
- bus-width = <4>;
- no-sdio;
- no-mmc;
- broken-cd;
- cap-sd-highspeed;
- post-power-on-delay-ms = <200>;
- pinctrl-names = "default";
- pinctrl-0 = <&mmc1_pins>;
- status = "okay";
-};
-
-&pwmdac {
- pinctrl-names = "default";
- pinctrl-0 = <&pwmdac_pins>;
- status = "okay";
-};
-
-&qspi {
- #address-cells = <1>;
- #size-cells = <0>;
- status = "okay";
-
- nor_flash: flash@0 {
- compatible = "jedec,spi-nor";
- reg = <0>;
- cdns,read-delay = <5>;
- spi-max-frequency = <12000000>;
- cdns,tshsl-ns = <1>;
- cdns,tsd2d-ns = <1>;
- cdns,tchsh-ns = <1>;
- cdns,tslch-ns = <1>;
-
- partitions {
- compatible = "fixed-partitions";
- #address-cells = <1>;
- #size-cells = <1>;
-
- spl@0 {
- reg = <0x0 0x80000>;
- };
- uboot-env@f0000 {
- reg = <0xf0000 0x10000>;
- };
- uboot@100000 {
- reg = <0x100000 0x400000>;
- };
- reserved-data@600000 {
- reg = <0x600000 0xa00000>;
- };
- };
- };
-};
-
-&pwm {
- pinctrl-names = "default";
- pinctrl-0 = <&pwm_pins>;
- status = "okay";
-};
-
-&spi0 {
- pinctrl-names = "default";
- pinctrl-0 = <&spi0_pins>;
- status = "okay";
-
- spi_dev0: spi@0 {
- compatible = "rohm,dh2228fv";
- reg = <0>;
- spi-max-frequency = <10000000>;
- };
-};
-
-&sysgpio {
- i2c0_pins: i2c0-0 {
- i2c-pins {
- pinmux = <GPIOMUX(57, GPOUT_LOW,
- GPOEN_SYS_I2C0_CLK,
- GPI_SYS_I2C0_CLK)>,
- <GPIOMUX(58, GPOUT_LOW,
- GPOEN_SYS_I2C0_DATA,
- GPI_SYS_I2C0_DATA)>;
- bias-disable; /* external pull-up */
- input-enable;
- input-schmitt-enable;
- };
- };
-
- i2c2_pins: i2c2-0 {
- i2c-pins {
- pinmux = <GPIOMUX(3, GPOUT_LOW,
- GPOEN_SYS_I2C2_CLK,
- GPI_SYS_I2C2_CLK)>,
- <GPIOMUX(2, GPOUT_LOW,
- GPOEN_SYS_I2C2_DATA,
- GPI_SYS_I2C2_DATA)>;
- bias-disable; /* external pull-up */
- input-enable;
- input-schmitt-enable;
- };
- };
-
- i2c5_pins: i2c5-0 {
- i2c-pins {
- pinmux = <GPIOMUX(19, GPOUT_LOW,
- GPOEN_SYS_I2C5_CLK,
- GPI_SYS_I2C5_CLK)>,
- <GPIOMUX(20, GPOUT_LOW,
- GPOEN_SYS_I2C5_DATA,
- GPI_SYS_I2C5_DATA)>;
- bias-disable; /* external pull-up */
- input-enable;
- input-schmitt-enable;
- };
- };
-
- i2c6_pins: i2c6-0 {
- i2c-pins {
- pinmux = <GPIOMUX(16, GPOUT_LOW,
- GPOEN_SYS_I2C6_CLK,
- GPI_SYS_I2C6_CLK)>,
- <GPIOMUX(17, GPOUT_LOW,
- GPOEN_SYS_I2C6_DATA,
- GPI_SYS_I2C6_DATA)>;
- bias-disable; /* external pull-up */
- input-enable;
- input-schmitt-enable;
- };
- };
-
- i2srx_pins: i2srx-0 {
- clk-sd-pins {
- pinmux = <GPIOMUX(38, GPOUT_LOW,
- GPOEN_DISABLE,
- GPI_SYS_I2SRX_BCLK)>,
- <GPIOMUX(63, GPOUT_LOW,
- GPOEN_DISABLE,
- GPI_SYS_I2SRX_LRCK)>,
- <GPIOMUX(38, GPOUT_LOW,
- GPOEN_DISABLE,
- GPI_SYS_I2STX1_BCLK)>,
- <GPIOMUX(63, GPOUT_LOW,
- GPOEN_DISABLE,
- GPI_SYS_I2STX1_LRCK)>,
- <GPIOMUX(61, GPOUT_LOW,
- GPOEN_DISABLE,
- GPI_SYS_I2SRX_SDIN0)>;
- input-enable;
- };
- };
-
- i2stx1_pins: i2stx1-0 {
- sd-pins {
- pinmux = <GPIOMUX(44, GPOUT_SYS_I2STX1_SDO0,
- GPOEN_ENABLE,
- GPI_NONE)>;
- bias-disable;
- input-disable;
- };
- };
-
- mclk_ext_pins: mclk-ext-0 {
- mclk-ext-pins {
- pinmux = <GPIOMUX(4, GPOUT_LOW,
- GPOEN_DISABLE,
- GPI_SYS_MCLK_EXT)>;
- input-enable;
- };
- };
-
- mmc0_pins: mmc0-0 {
- rst-pins {
- pinmux = <GPIOMUX(62, GPOUT_SYS_SDIO0_RST,
- GPOEN_ENABLE,
- GPI_NONE)>;
- bias-pull-up;
- drive-strength = <12>;
- input-disable;
- input-schmitt-disable;
- slew-rate = <0>;
- };
-
- mmc-pins {
- pinmux = <PINMUX(64, 0)>,
- <PINMUX(65, 0)>,
- <PINMUX(66, 0)>,
- <PINMUX(67, 0)>,
- <PINMUX(68, 0)>,
- <PINMUX(69, 0)>,
- <PINMUX(70, 0)>,
- <PINMUX(71, 0)>,
- <PINMUX(72, 0)>,
- <PINMUX(73, 0)>;
- bias-pull-up;
- drive-strength = <12>;
- input-enable;
- };
- };
-
- mmc1_pins: mmc1-0 {
- clk-pins {
- pinmux = <GPIOMUX(10, GPOUT_SYS_SDIO1_CLK,
- GPOEN_ENABLE,
- GPI_NONE)>;
- bias-pull-up;
- drive-strength = <12>;
- input-disable;
- input-schmitt-disable;
- slew-rate = <0>;
- };
-
- mmc-pins {
- pinmux = <GPIOMUX(9, GPOUT_SYS_SDIO1_CMD,
- GPOEN_SYS_SDIO1_CMD,
- GPI_SYS_SDIO1_CMD)>,
- <GPIOMUX(11, GPOUT_SYS_SDIO1_DATA0,
- GPOEN_SYS_SDIO1_DATA0,
- GPI_SYS_SDIO1_DATA0)>,
- <GPIOMUX(12, GPOUT_SYS_SDIO1_DATA1,
- GPOEN_SYS_SDIO1_DATA1,
- GPI_SYS_SDIO1_DATA1)>,
- <GPIOMUX(7, GPOUT_SYS_SDIO1_DATA2,
- GPOEN_SYS_SDIO1_DATA2,
- GPI_SYS_SDIO1_DATA2)>,
- <GPIOMUX(8, GPOUT_SYS_SDIO1_DATA3,
- GPOEN_SYS_SDIO1_DATA3,
- GPI_SYS_SDIO1_DATA3)>;
- bias-pull-up;
- drive-strength = <12>;
- input-enable;
- input-schmitt-enable;
- slew-rate = <0>;
- };
- };
-
- pwmdac_pins: pwmdac-0 {
- pwmdac-pins {
- pinmux = <GPIOMUX(33, GPOUT_SYS_PWMDAC_LEFT,
- GPOEN_ENABLE,
- GPI_NONE)>,
- <GPIOMUX(34, GPOUT_SYS_PWMDAC_RIGHT,
- GPOEN_ENABLE,
- GPI_NONE)>;
- bias-disable;
- drive-strength = <2>;
- input-disable;
- input-schmitt-disable;
- slew-rate = <0>;
- };
- };
-
- pwm_pins: pwm-0 {
- pwm-pins {
- pinmux = <GPIOMUX(46, GPOUT_SYS_PWM_CHANNEL0,
- GPOEN_SYS_PWM0_CHANNEL0,
- GPI_NONE)>,
- <GPIOMUX(59, GPOUT_SYS_PWM_CHANNEL1,
- GPOEN_SYS_PWM0_CHANNEL1,
- GPI_NONE)>;
- bias-disable;
- drive-strength = <12>;
- input-disable;
- input-schmitt-disable;
- slew-rate = <0>;
- };
- };
-
- spi0_pins: spi0-0 {
- mosi-pins {
- pinmux = <GPIOMUX(52, GPOUT_SYS_SPI0_TXD,
- GPOEN_ENABLE,
- GPI_NONE)>;
- bias-disable;
- input-disable;
- input-schmitt-disable;
- };
-
- miso-pins {
- pinmux = <GPIOMUX(53, GPOUT_LOW,
- GPOEN_DISABLE,
- GPI_SYS_SPI0_RXD)>;
- bias-pull-up;
- input-enable;
- input-schmitt-enable;
- };
-
- sck-pins {
- pinmux = <GPIOMUX(48, GPOUT_SYS_SPI0_CLK,
- GPOEN_ENABLE,
- GPI_SYS_SPI0_CLK)>;
- bias-disable;
- input-disable;
- input-schmitt-disable;
- };
-
- ss-pins {
- pinmux = <GPIOMUX(49, GPOUT_SYS_SPI0_FSS,
- GPOEN_ENABLE,
- GPI_SYS_SPI0_FSS)>;
- bias-disable;
- input-disable;
- input-schmitt-disable;
- };
- };
-
- tdm_pins: tdm-0 {
- tx-pins {
- pinmux = <GPIOMUX(44, GPOUT_SYS_TDM_TXD,
- GPOEN_ENABLE,
- GPI_NONE)>;
- bias-pull-up;
- drive-strength = <2>;
- input-disable;
- input-schmitt-disable;
- slew-rate = <0>;
- };
-
- rx-pins {
- pinmux = <GPIOMUX(61, GPOUT_HIGH,
- GPOEN_DISABLE,
- GPI_SYS_TDM_RXD)>;
- input-enable;
- };
-
- sync-pins {
- pinmux = <GPIOMUX(63, GPOUT_HIGH,
- GPOEN_DISABLE,
- GPI_SYS_TDM_SYNC)>;
- input-enable;
- };
-
- pcmclk-pins {
- pinmux = <GPIOMUX(38, GPOUT_HIGH,
- GPOEN_DISABLE,
- GPI_SYS_TDM_CLK)>;
- input-enable;
- };
- };
-
- uart0_pins: uart0-0 {
- tx-pins {
- pinmux = <GPIOMUX(5, GPOUT_SYS_UART0_TX,
- GPOEN_ENABLE,
- GPI_NONE)>;
- bias-disable;
- drive-strength = <12>;
- input-disable;
- input-schmitt-disable;
- slew-rate = <0>;
- };
-
- rx-pins {
- pinmux = <GPIOMUX(6, GPOUT_LOW,
- GPOEN_DISABLE,
- GPI_SYS_UART0_RX)>;
- bias-disable; /* external pull-up */
- drive-strength = <2>;
- input-enable;
- input-schmitt-enable;
- slew-rate = <0>;
- };
- };
-};
-
-&tdm {
- pinctrl-names = "default";
- pinctrl-0 = <&tdm_pins>;
- status = "okay";
-};
-
-&uart0 {
- pinctrl-names = "default";
- pinctrl-0 = <&uart0_pins>;
- status = "okay";
-};
-
-&usb0 {
- dr_mode = "peripheral";
- status = "okay";
-};
-
-&U74_1 {
- cpu-supply = <&vdd_cpu>;
-};
-
-&U74_2 {
- cpu-supply = <&vdd_cpu>;
-};
-
-&U74_3 {
- cpu-supply = <&vdd_cpu>;
-};
-
-&U74_4 {
- cpu-supply = <&vdd_cpu>;
};
diff --git a/arch/riscv/boot/dts/starfive/jh7110.dtsi b/arch/riscv/boot/dts/starfive/jh7110.dtsi
index 4a5708f7fcf7..18047195c600 100644
--- a/arch/riscv/boot/dts/starfive/jh7110.dtsi
+++ b/arch/riscv/boot/dts/starfive/jh7110.dtsi
@@ -15,7 +15,7 @@
#address-cells = <2>;
#size-cells = <2>;
- cpus {
+ cpus: cpus {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/riscv/boot/dts/thead/th1520.dtsi b/arch/riscv/boot/dts/thead/th1520.dtsi
index 8b915e206f3a..d2fa25839012 100644
--- a/arch/riscv/boot/dts/thead/th1520.dtsi
+++ b/arch/riscv/boot/dts/thead/th1520.dtsi
@@ -193,6 +193,33 @@
status = "disabled";
};
+ emmc: mmc@ffe7080000 {
+ compatible = "thead,th1520-dwcmshc";
+ reg = <0xff 0xe7080000 0x0 0x10000>;
+ interrupts = <62 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&sdhci_clk>;
+ clock-names = "core";
+ status = "disabled";
+ };
+
+ sdio0: mmc@ffe7090000 {
+ compatible = "thead,th1520-dwcmshc";
+ reg = <0xff 0xe7090000 0x0 0x10000>;
+ interrupts = <64 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&sdhci_clk>;
+ clock-names = "core";
+ status = "disabled";
+ };
+
+ sdio1: mmc@ffe70a0000 {
+ compatible = "thead,th1520-dwcmshc";
+ reg = <0xff 0xe70a0000 0x0 0x10000>;
+ interrupts = <71 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&sdhci_clk>;
+ clock-names = "core";
+ status = "disabled";
+ };
+
uart1: serial@ffe7f00000 {
compatible = "snps,dw-apb-uart";
reg = <0xff 0xe7f00000 0x0 0x100>;
@@ -311,33 +338,6 @@
status = "disabled";
};
- emmc: mmc@ffe7080000 {
- compatible = "thead,th1520-dwcmshc";
- reg = <0xff 0xe7080000 0x0 0x10000>;
- interrupts = <62 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&sdhci_clk>;
- clock-names = "core";
- status = "disabled";
- };
-
- sdio0: mmc@ffe7090000 {
- compatible = "thead,th1520-dwcmshc";
- reg = <0xff 0xe7090000 0x0 0x10000>;
- interrupts = <64 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&sdhci_clk>;
- clock-names = "core";
- status = "disabled";
- };
-
- sdio1: mmc@ffe70a0000 {
- compatible = "thead,th1520-dwcmshc";
- reg = <0xff 0xe70a0000 0x0 0x10000>;
- interrupts = <71 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&sdhci_clk>;
- clock-names = "core";
- status = "disabled";
- };
-
timer0: timer@ffefc32000 {
compatible = "snps,dw-apb-timer";
reg = <0xff 0xefc32000 0x0 0x14>;
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index fc0ec2ee13bc..12dc8c73a8ac 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -25,14 +25,15 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_PROFILING=y
-CONFIG_SOC_MICROCHIP_POLARFIRE=y
+CONFIG_ARCH_MICROCHIP=y
CONFIG_ARCH_RENESAS=y
-CONFIG_SOC_SIFIVE=y
+CONFIG_ARCH_SIFIVE=y
CONFIG_ARCH_SOPHGO=y
CONFIG_SOC_STARFIVE=y
CONFIG_ARCH_SUNXI=y
CONFIG_ARCH_THEAD=y
-CONFIG_SOC_VIRT=y
+CONFIG_ARCH_VIRT=y
+CONFIG_ARCH_CANAAN=y
CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
CONFIG_PM=y
@@ -233,6 +234,7 @@ CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_INPUT=y
CONFIG_VIRTIO_MMIO=y
CONFIG_RENESAS_OSTM=y
+CONFIG_CLK_SOPHGO_CV1800=y
CONFIG_SUN8I_DE2_CCU=m
CONFIG_SUN50I_IOMMU=y
CONFIG_RPMSG_CHAR=y
diff --git a/arch/riscv/configs/nommu_k210_defconfig b/arch/riscv/configs/nommu_k210_defconfig
index 7e75200543f4..af9601da4643 100644
--- a/arch/riscv/configs/nommu_k210_defconfig
+++ b/arch/riscv/configs/nommu_k210_defconfig
@@ -11,7 +11,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_SYSFS_SYSCALL is not set
# CONFIG_FHANDLE is not set
-# CONFIG_BASE_FULL is not set
+CONFIG_BASE_SMALL=y
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
@@ -27,7 +27,8 @@ CONFIG_EXPERT=y
CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
# CONFIG_MMU is not set
-CONFIG_SOC_CANAAN=y
+CONFIG_ARCH_CANAAN=y
+CONFIG_SOC_CANAAN_K210=y
CONFIG_NONPORTABLE=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
diff --git a/arch/riscv/configs/nommu_k210_sdcard_defconfig b/arch/riscv/configs/nommu_k210_sdcard_defconfig
index 0ba353e9ca71..dd460c649152 100644
--- a/arch/riscv/configs/nommu_k210_sdcard_defconfig
+++ b/arch/riscv/configs/nommu_k210_sdcard_defconfig
@@ -3,7 +3,7 @@ CONFIG_LOG_BUF_SHIFT=13
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_SYSFS_SYSCALL is not set
# CONFIG_FHANDLE is not set
-# CONFIG_BASE_FULL is not set
+CONFIG_BASE_SMALL=y
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
@@ -19,7 +19,8 @@ CONFIG_EXPERT=y
CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
# CONFIG_MMU is not set
-CONFIG_SOC_CANAAN=y
+CONFIG_ARCH_CANAAN=y
+CONFIG_SOC_CANAAN_K210=y
CONFIG_NONPORTABLE=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
diff --git a/arch/riscv/configs/nommu_virt_defconfig b/arch/riscv/configs/nommu_virt_defconfig
index b794e2f8144e..d4b03dc3c2c0 100644
--- a/arch/riscv/configs/nommu_virt_defconfig
+++ b/arch/riscv/configs/nommu_virt_defconfig
@@ -10,7 +10,7 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
# CONFIG_FHANDLE is not set
-# CONFIG_BASE_FULL is not set
+CONFIG_BASE_SMALL=y
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
# CONFIG_TIMERFD is not set
@@ -24,7 +24,7 @@ CONFIG_EXPERT=y
CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
# CONFIG_MMU is not set
-CONFIG_SOC_VIRT=y
+CONFIG_ARCH_VIRT=y
CONFIG_NONPORTABLE=y
CONFIG_SMP=y
CONFIG_CMDLINE="root=/dev/vda rw earlycon=uart8250,mmio,0x10000000,115200n8 console=ttyS0"
diff --git a/arch/riscv/errata/sifive/errata.c b/arch/riscv/errata/sifive/errata.c
index 3d9a32d791f7..716cfedad3a2 100644
--- a/arch/riscv/errata/sifive/errata.c
+++ b/arch/riscv/errata/sifive/errata.c
@@ -42,6 +42,11 @@ static bool errata_cip_1200_check_func(unsigned long arch_id, unsigned long imp
return false;
if ((impid & 0xffffff) > 0x200630 || impid == 0x1200626)
return false;
+
+#ifdef CONFIG_MMU
+ tlb_flush_all_threshold = 0;
+#endif
+
return true;
}
diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c
index b1c410bbc1ae..bf6a0a6318ee 100644
--- a/arch/riscv/errata/thead/errata.c
+++ b/arch/riscv/errata/thead/errata.c
@@ -19,20 +19,26 @@
#include <asm/patch.h>
#include <asm/vendorid_list.h>
-static bool errata_probe_pbmt(unsigned int stage,
- unsigned long arch_id, unsigned long impid)
+#define CSR_TH_SXSTATUS 0x5c0
+#define SXSTATUS_MAEE _AC(0x200000, UL)
+
+static bool errata_probe_mae(unsigned int stage,
+ unsigned long arch_id, unsigned long impid)
{
- if (!IS_ENABLED(CONFIG_ERRATA_THEAD_PBMT))
+ if (!IS_ENABLED(CONFIG_ERRATA_THEAD_MAE))
return false;
if (arch_id != 0 || impid != 0)
return false;
- if (stage == RISCV_ALTERNATIVES_EARLY_BOOT ||
- stage == RISCV_ALTERNATIVES_MODULE)
- return true;
+ if (stage != RISCV_ALTERNATIVES_EARLY_BOOT &&
+ stage != RISCV_ALTERNATIVES_MODULE)
+ return false;
+
+ if (!(csr_read(CSR_TH_SXSTATUS) & SXSTATUS_MAEE))
+ return false;
- return false;
+ return true;
}
/*
@@ -140,8 +146,8 @@ static u32 thead_errata_probe(unsigned int stage,
{
u32 cpu_req_errata = 0;
- if (errata_probe_pbmt(stage, archid, impid))
- cpu_req_errata |= BIT(ERRATA_THEAD_PBMT);
+ if (errata_probe_mae(stage, archid, impid))
+ cpu_req_errata |= BIT(ERRATA_THEAD_MAE);
errata_probe_cmo(stage, archid, impid);
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index 0e0522e588ca..5b96c2f61adb 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -195,22 +195,28 @@ ATOMIC_OPS(xor, xor, i)
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
+#define _arch_atomic_fetch_add_unless(_prev, _rc, counter, _a, _u, sfx) \
+({ \
+ __asm__ __volatile__ ( \
+ "0: lr." sfx " %[p], %[c]\n" \
+ " beq %[p], %[u], 1f\n" \
+ " add %[rc], %[p], %[a]\n" \
+ " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
+ " bnez %[rc], 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
+ : [a]"r" (_a), [u]"r" (_u) \
+ : "memory"); \
+})
+
/* This is required to provide a full barrier on success. */
static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int prev, rc;
- __asm__ __volatile__ (
- "0: lr.w %[p], %[c]\n"
- " beq %[p], %[u], 1f\n"
- " add %[rc], %[p], %[a]\n"
- " sc.w.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- RISCV_FULL_BARRIER
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- : [a]"r" (a), [u]"r" (u)
- : "memory");
+ _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "w");
+
return prev;
}
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
@@ -221,77 +227,86 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a,
s64 prev;
long rc;
- __asm__ __volatile__ (
- "0: lr.d %[p], %[c]\n"
- " beq %[p], %[u], 1f\n"
- " add %[rc], %[p], %[a]\n"
- " sc.d.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- RISCV_FULL_BARRIER
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- : [a]"r" (a), [u]"r" (u)
- : "memory");
+ _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "d");
+
return prev;
}
#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif
+#define _arch_atomic_inc_unless_negative(_prev, _rc, counter, sfx) \
+({ \
+ __asm__ __volatile__ ( \
+ "0: lr." sfx " %[p], %[c]\n" \
+ " bltz %[p], 1f\n" \
+ " addi %[rc], %[p], 1\n" \
+ " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
+ " bnez %[rc], 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
+ : \
+ : "memory"); \
+})
+
static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
{
int prev, rc;
- __asm__ __volatile__ (
- "0: lr.w %[p], %[c]\n"
- " bltz %[p], 1f\n"
- " addi %[rc], %[p], 1\n"
- " sc.w.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- RISCV_FULL_BARRIER
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_inc_unless_negative(prev, rc, v->counter, "w");
+
return !(prev < 0);
}
#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
+#define _arch_atomic_dec_unless_positive(_prev, _rc, counter, sfx) \
+({ \
+ __asm__ __volatile__ ( \
+ "0: lr." sfx " %[p], %[c]\n" \
+ " bgtz %[p], 1f\n" \
+ " addi %[rc], %[p], -1\n" \
+ " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
+ " bnez %[rc], 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
+ : \
+ : "memory"); \
+})
+
static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
{
int prev, rc;
- __asm__ __volatile__ (
- "0: lr.w %[p], %[c]\n"
- " bgtz %[p], 1f\n"
- " addi %[rc], %[p], -1\n"
- " sc.w.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- RISCV_FULL_BARRIER
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_dec_unless_positive(prev, rc, v->counter, "w");
+
return !(prev > 0);
}
#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
+#define _arch_atomic_dec_if_positive(_prev, _rc, counter, sfx) \
+({ \
+ __asm__ __volatile__ ( \
+ "0: lr." sfx " %[p], %[c]\n" \
+ " addi %[rc], %[p], -1\n" \
+ " bltz %[rc], 1f\n" \
+ " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
+ " bnez %[rc], 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
+ : \
+ : "memory"); \
+})
+
static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
{
int prev, rc;
- __asm__ __volatile__ (
- "0: lr.w %[p], %[c]\n"
- " addi %[rc], %[p], -1\n"
- " bltz %[rc], 1f\n"
- " sc.w.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- RISCV_FULL_BARRIER
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_dec_if_positive(prev, rc, v->counter, "w");
+
return prev - 1;
}
@@ -303,17 +318,8 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
s64 prev;
long rc;
- __asm__ __volatile__ (
- "0: lr.d %[p], %[c]\n"
- " bltz %[p], 1f\n"
- " addi %[rc], %[p], 1\n"
- " sc.d.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- RISCV_FULL_BARRIER
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_inc_unless_negative(prev, rc, v->counter, "d");
+
return !(prev < 0);
}
@@ -324,17 +330,8 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
s64 prev;
long rc;
- __asm__ __volatile__ (
- "0: lr.d %[p], %[c]\n"
- " bgtz %[p], 1f\n"
- " addi %[rc], %[p], -1\n"
- " sc.d.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- RISCV_FULL_BARRIER
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_dec_unless_positive(prev, rc, v->counter, "d");
+
return !(prev > 0);
}
@@ -345,17 +342,8 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
s64 prev;
long rc;
- __asm__ __volatile__ (
- "0: lr.d %[p], %[c]\n"
- " addi %[rc], %[p], -1\n"
- " bltz %[rc], 1f\n"
- " sc.d.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- RISCV_FULL_BARRIER
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_dec_if_positive(prev, rc, v->counter, "d");
+
return prev - 1;
}
diff --git a/arch/riscv/include/asm/cache.h b/arch/riscv/include/asm/cache.h
index 2174fe7bac9a..570e9d8acad1 100644
--- a/arch/riscv/include/asm/cache.h
+++ b/arch/riscv/include/asm/cache.h
@@ -26,8 +26,8 @@
#ifndef __ASSEMBLY__
-#ifdef CONFIG_RISCV_DMA_NONCOHERENT
extern int dma_cache_alignment;
+#ifdef CONFIG_RISCV_DMA_NONCOHERENT
#define dma_get_cache_alignment dma_get_cache_alignment
static inline int dma_get_cache_alignment(void)
{
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index a129dac4521d..dd8d07146116 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -33,8 +33,11 @@ static inline void flush_dcache_page(struct page *page)
* so instead we just flush the whole thing.
*/
#define flush_icache_range(start, end) flush_icache_all()
-#define flush_icache_user_page(vma, pg, addr, len) \
- flush_icache_mm(vma->vm_mm, 0)
+#define flush_icache_user_page(vma, pg, addr, len) \
+do { \
+ if (vma->vm_flags & VM_EXEC) \
+ flush_icache_mm(vma->vm_mm, 0); \
+} while (0)
#ifdef CONFIG_64BIT
#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end)
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 2fee65cc8443..ddb002ed89de 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -10,140 +10,79 @@
#include <asm/fence.h>
-#define __xchg_relaxed(ptr, new, size) \
+#define __arch_xchg_masked(prepend, append, r, p, n) \
+({ \
+ u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \
+ ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \
+ ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \
+ << __s; \
+ ulong __newx = (ulong)(n) << __s; \
+ ulong __retx; \
+ ulong __rc; \
+ \
+ __asm__ __volatile__ ( \
+ prepend \
+ "0: lr.w %0, %2\n" \
+ " and %1, %0, %z4\n" \
+ " or %1, %1, %z3\n" \
+ " sc.w %1, %1, %2\n" \
+ " bnez %1, 0b\n" \
+ append \
+ : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \
+ : "rJ" (__newx), "rJ" (~__mask) \
+ : "memory"); \
+ \
+ r = (__typeof__(*(p)))((__retx & __mask) >> __s); \
+})
+
+#define __arch_xchg(sfx, prepend, append, r, p, n) \
+({ \
+ __asm__ __volatile__ ( \
+ prepend \
+ " amoswap" sfx " %0, %2, %1\n" \
+ append \
+ : "=r" (r), "+A" (*(p)) \
+ : "r" (n) \
+ : "memory"); \
+})
+
+#define _arch_xchg(ptr, new, sfx, prepend, append) \
({ \
__typeof__(ptr) __ptr = (ptr); \
- __typeof__(new) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- switch (size) { \
+ __typeof__(*(__ptr)) __new = (new); \
+ __typeof__(*(__ptr)) __ret; \
+ \
+ switch (sizeof(*__ptr)) { \
+ case 1: \
+ case 2: \
+ __arch_xchg_masked(prepend, append, \
+ __ret, __ptr, __new); \
+ break; \
case 4: \
- __asm__ __volatile__ ( \
- " amoswap.w %0, %2, %1\n" \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
+ __arch_xchg(".w" sfx, prepend, append, \
+ __ret, __ptr, __new); \
break; \
case 8: \
- __asm__ __volatile__ ( \
- " amoswap.d %0, %2, %1\n" \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
+ __arch_xchg(".d" sfx, prepend, append, \
+ __ret, __ptr, __new); \
break; \
default: \
BUILD_BUG(); \
} \
- __ret; \
+ (__typeof__(*(__ptr)))__ret; \
})
#define arch_xchg_relaxed(ptr, x) \
-({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
- _x_, sizeof(*(ptr))); \
-})
-
-#define __xchg_acquire(ptr, new, size) \
-({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(new) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- switch (size) { \
- case 4: \
- __asm__ __volatile__ ( \
- " amoswap.w %0, %2, %1\n" \
- RISCV_ACQUIRE_BARRIER \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
- break; \
- case 8: \
- __asm__ __volatile__ ( \
- " amoswap.d %0, %2, %1\n" \
- RISCV_ACQUIRE_BARRIER \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- __ret; \
-})
+ _arch_xchg(ptr, x, "", "", "")
#define arch_xchg_acquire(ptr, x) \
-({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg_acquire((ptr), \
- _x_, sizeof(*(ptr))); \
-})
-
-#define __xchg_release(ptr, new, size) \
-({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(new) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- switch (size) { \
- case 4: \
- __asm__ __volatile__ ( \
- RISCV_RELEASE_BARRIER \
- " amoswap.w %0, %2, %1\n" \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
- break; \
- case 8: \
- __asm__ __volatile__ ( \
- RISCV_RELEASE_BARRIER \
- " amoswap.d %0, %2, %1\n" \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- __ret; \
-})
+ _arch_xchg(ptr, x, "", "", RISCV_ACQUIRE_BARRIER)
#define arch_xchg_release(ptr, x) \
-({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg_release((ptr), \
- _x_, sizeof(*(ptr))); \
-})
-
-#define __arch_xchg(ptr, new, size) \
-({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(new) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- switch (size) { \
- case 4: \
- __asm__ __volatile__ ( \
- " amoswap.w.aqrl %0, %2, %1\n" \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
- break; \
- case 8: \
- __asm__ __volatile__ ( \
- " amoswap.d.aqrl %0, %2, %1\n" \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- __ret; \
-})
+ _arch_xchg(ptr, x, "", RISCV_RELEASE_BARRIER, "")
#define arch_xchg(ptr, x) \
-({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __arch_xchg((ptr), _x_, sizeof(*(ptr))); \
-})
+ _arch_xchg(ptr, x, ".aqrl", "", "")
#define xchg32(ptr, x) \
({ \
@@ -162,190 +101,95 @@
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
*/
-#define __cmpxchg_relaxed(ptr, old, new, size) \
-({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(*(ptr)) __old = (old); \
- __typeof__(*(ptr)) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- register unsigned int __rc; \
- switch (size) { \
- case 4: \
- __asm__ __volatile__ ( \
- "0: lr.w %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.w %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" ((long)__old), "rJ" (__new) \
- : "memory"); \
- break; \
- case 8: \
- __asm__ __volatile__ ( \
- "0: lr.d %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.d %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
- : "memory"); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- __ret; \
-})
-#define arch_cmpxchg_relaxed(ptr, o, n) \
+#define __arch_cmpxchg_masked(sc_sfx, prepend, append, r, p, o, n) \
+({ \
+ u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \
+ ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \
+ ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \
+ << __s; \
+ ulong __newx = (ulong)(n) << __s; \
+ ulong __oldx = (ulong)(o) << __s; \
+ ulong __retx; \
+ ulong __rc; \
+ \
+ __asm__ __volatile__ ( \
+ prepend \
+ "0: lr.w %0, %2\n" \
+ " and %1, %0, %z5\n" \
+ " bne %1, %z3, 1f\n" \
+ " and %1, %0, %z6\n" \
+ " or %1, %1, %z4\n" \
+ " sc.w" sc_sfx " %1, %1, %2\n" \
+ " bnez %1, 0b\n" \
+ append \
+ "1:\n" \
+ : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \
+ : "rJ" ((long)__oldx), "rJ" (__newx), \
+ "rJ" (__mask), "rJ" (~__mask) \
+ : "memory"); \
+ \
+ r = (__typeof__(*(p)))((__retx & __mask) >> __s); \
+})
+
+#define __arch_cmpxchg(lr_sfx, sc_sfx, prepend, append, r, p, co, o, n) \
({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
- _o_, _n_, sizeof(*(ptr))); \
-})
-
-#define __cmpxchg_acquire(ptr, old, new, size) \
+ register unsigned int __rc; \
+ \
+ __asm__ __volatile__ ( \
+ prepend \
+ "0: lr" lr_sfx " %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc" sc_sfx " %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ append \
+ "1:\n" \
+ : "=&r" (r), "=&r" (__rc), "+A" (*(p)) \
+ : "rJ" (co o), "rJ" (n) \
+ : "memory"); \
+})
+
+#define _arch_cmpxchg(ptr, old, new, sc_sfx, prepend, append) \
({ \
__typeof__(ptr) __ptr = (ptr); \
- __typeof__(*(ptr)) __old = (old); \
- __typeof__(*(ptr)) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- register unsigned int __rc; \
- switch (size) { \
+ __typeof__(*(__ptr)) __old = (old); \
+ __typeof__(*(__ptr)) __new = (new); \
+ __typeof__(*(__ptr)) __ret; \
+ \
+ switch (sizeof(*__ptr)) { \
+ case 1: \
+ case 2: \
+ __arch_cmpxchg_masked(sc_sfx, prepend, append, \
+ __ret, __ptr, __old, __new); \
+ break; \
case 4: \
- __asm__ __volatile__ ( \
- "0: lr.w %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.w %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- RISCV_ACQUIRE_BARRIER \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" ((long)__old), "rJ" (__new) \
- : "memory"); \
+ __arch_cmpxchg(".w", ".w" sc_sfx, prepend, append, \
+ __ret, __ptr, (long), __old, __new); \
break; \
case 8: \
- __asm__ __volatile__ ( \
- "0: lr.d %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.d %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- RISCV_ACQUIRE_BARRIER \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
- : "memory"); \
+ __arch_cmpxchg(".d", ".d" sc_sfx, prepend, append, \
+ __ret, __ptr, /**/, __old, __new); \
break; \
default: \
BUILD_BUG(); \
} \
- __ret; \
+ (__typeof__(*(__ptr)))__ret; \
})
-#define arch_cmpxchg_acquire(ptr, o, n) \
-({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
- _o_, _n_, sizeof(*(ptr))); \
-})
+#define arch_cmpxchg_relaxed(ptr, o, n) \
+ _arch_cmpxchg((ptr), (o), (n), "", "", "")
-#define __cmpxchg_release(ptr, old, new, size) \
-({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(*(ptr)) __old = (old); \
- __typeof__(*(ptr)) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- register unsigned int __rc; \
- switch (size) { \
- case 4: \
- __asm__ __volatile__ ( \
- RISCV_RELEASE_BARRIER \
- "0: lr.w %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.w %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" ((long)__old), "rJ" (__new) \
- : "memory"); \
- break; \
- case 8: \
- __asm__ __volatile__ ( \
- RISCV_RELEASE_BARRIER \
- "0: lr.d %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.d %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
- : "memory"); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- __ret; \
-})
+#define arch_cmpxchg_acquire(ptr, o, n) \
+ _arch_cmpxchg((ptr), (o), (n), "", "", RISCV_ACQUIRE_BARRIER)
#define arch_cmpxchg_release(ptr, o, n) \
-({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg_release((ptr), \
- _o_, _n_, sizeof(*(ptr))); \
-})
-
-#define __cmpxchg(ptr, old, new, size) \
-({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(*(ptr)) __old = (old); \
- __typeof__(*(ptr)) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- register unsigned int __rc; \
- switch (size) { \
- case 4: \
- __asm__ __volatile__ ( \
- "0: lr.w %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.w.rl %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- RISCV_FULL_BARRIER \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" ((long)__old), "rJ" (__new) \
- : "memory"); \
- break; \
- case 8: \
- __asm__ __volatile__ ( \
- "0: lr.d %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.d.rl %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- RISCV_FULL_BARRIER \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
- : "memory"); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- __ret; \
-})
+ _arch_cmpxchg((ptr), (o), (n), "", RISCV_RELEASE_BARRIER, "")
#define arch_cmpxchg(ptr, o, n) \
-({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg((ptr), \
- _o_, _n_, sizeof(*(ptr))); \
-})
+ _arch_cmpxchg((ptr), (o), (n), ".rl", "", " fence rw, rw\n")
#define arch_cmpxchg_local(ptr, o, n) \
- (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
+ arch_cmpxchg_relaxed((ptr), (o), (n))
#define arch_cmpxchg64(ptr, o, n) \
({ \
@@ -359,4 +203,22 @@
arch_cmpxchg_relaxed((ptr), (o), (n)); \
})
+#define arch_cmpxchg64_relaxed(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ arch_cmpxchg_relaxed((ptr), (o), (n)); \
+})
+
+#define arch_cmpxchg64_acquire(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ arch_cmpxchg_acquire((ptr), (o), (n)); \
+})
+
+#define arch_cmpxchg64_release(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ arch_cmpxchg_release((ptr), (o), (n)); \
+})
+
#endif /* _ASM_RISCV_CMPXCHG_H */
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 2468c55933cd..25966995da04 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -168,7 +168,8 @@
#define VSIP_TO_HVIP_SHIFT (IRQ_VS_SOFT - IRQ_S_SOFT)
#define VSIP_VALID_MASK ((_AC(1, UL) << IRQ_S_SOFT) | \
(_AC(1, UL) << IRQ_S_TIMER) | \
- (_AC(1, UL) << IRQ_S_EXT))
+ (_AC(1, UL) << IRQ_S_EXT) | \
+ (_AC(1, UL) << IRQ_PMU_OVF))
/* AIA CSR bits */
#define TOPI_IID_SHIFT 16
@@ -281,7 +282,7 @@
#define CSR_HPMCOUNTER30H 0xc9e
#define CSR_HPMCOUNTER31H 0xc9f
-#define CSR_SSCOUNTOVF 0xda0
+#define CSR_SCOUNTOVF 0xda0
#define CSR_SSTATUS 0x100
#define CSR_SIE 0x104
diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
index 1f2dbfb8a8bf..7c8a71a526a3 100644
--- a/arch/riscv/include/asm/errata_list.h
+++ b/arch/riscv/include/asm/errata_list.h
@@ -23,7 +23,7 @@
#endif
#ifdef CONFIG_ERRATA_THEAD
-#define ERRATA_THEAD_PBMT 0
+#define ERRATA_THEAD_MAE 0
#define ERRATA_THEAD_PMU 1
#define ERRATA_THEAD_NUMBER 2
#endif
@@ -43,30 +43,40 @@ ALTERNATIVE(__stringify(RISCV_PTR do_page_fault), \
CONFIG_ERRATA_SIFIVE_CIP_453)
#else /* !__ASSEMBLY__ */
-#define ALT_FLUSH_TLB_PAGE(x) \
+#define ALT_SFENCE_VMA_ASID(asid) \
+asm(ALTERNATIVE("sfence.vma x0, %0", "sfence.vma", SIFIVE_VENDOR_ID, \
+ ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \
+ : : "r" (asid) : "memory")
+
+#define ALT_SFENCE_VMA_ADDR(addr) \
asm(ALTERNATIVE("sfence.vma %0", "sfence.vma", SIFIVE_VENDOR_ID, \
ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \
: : "r" (addr) : "memory")
+#define ALT_SFENCE_VMA_ADDR_ASID(addr, asid) \
+asm(ALTERNATIVE("sfence.vma %0, %1", "sfence.vma", SIFIVE_VENDOR_ID, \
+ ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \
+ : : "r" (addr), "r" (asid) : "memory")
+
/*
* _val is marked as "will be overwritten", so need to set it to 0
* in the default case.
*/
#define ALT_SVPBMT_SHIFT 61
-#define ALT_THEAD_PBMT_SHIFT 59
+#define ALT_THEAD_MAE_SHIFT 59
#define ALT_SVPBMT(_val, prot) \
asm(ALTERNATIVE_2("li %0, 0\t\nnop", \
"li %0, %1\t\nslli %0,%0,%3", 0, \
RISCV_ISA_EXT_SVPBMT, CONFIG_RISCV_ISA_SVPBMT, \
"li %0, %2\t\nslli %0,%0,%4", THEAD_VENDOR_ID, \
- ERRATA_THEAD_PBMT, CONFIG_ERRATA_THEAD_PBMT) \
+ ERRATA_THEAD_MAE, CONFIG_ERRATA_THEAD_MAE) \
: "=r"(_val) \
: "I"(prot##_SVPBMT >> ALT_SVPBMT_SHIFT), \
- "I"(prot##_THEAD >> ALT_THEAD_PBMT_SHIFT), \
+ "I"(prot##_THEAD >> ALT_THEAD_MAE_SHIFT), \
"I"(ALT_SVPBMT_SHIFT), \
- "I"(ALT_THEAD_PBMT_SHIFT))
+ "I"(ALT_THEAD_MAE_SHIFT))
-#ifdef CONFIG_ERRATA_THEAD_PBMT
+#ifdef CONFIG_ERRATA_THEAD_MAE
/*
* IO/NOCACHE memory types are handled together with svpbmt,
* so on T-Head chips, check if no other memory type is set,
@@ -83,11 +93,11 @@ asm volatile(ALTERNATIVE( \
"slli t3, t3, %3\n\t" \
"or %0, %0, t3\n\t" \
"2:", THEAD_VENDOR_ID, \
- ERRATA_THEAD_PBMT, CONFIG_ERRATA_THEAD_PBMT) \
+ ERRATA_THEAD_MAE, CONFIG_ERRATA_THEAD_MAE) \
: "+r"(_val) \
- : "I"(_PAGE_MTMASK_THEAD >> ALT_THEAD_PBMT_SHIFT), \
- "I"(_PAGE_PMA_THEAD >> ALT_THEAD_PBMT_SHIFT), \
- "I"(ALT_THEAD_PBMT_SHIFT) \
+ : "I"(_PAGE_MTMASK_THEAD >> ALT_THEAD_MAE_SHIFT), \
+ "I"(_PAGE_PMA_THEAD >> ALT_THEAD_MAE_SHIFT), \
+ "I"(ALT_THEAD_MAE_SHIFT) \
: "t3")
#else
#define ALT_THEAD_PMA(_val)
diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h
index 22deb7a2a6ec..b1ce97a9dbfc 100644
--- a/arch/riscv/include/asm/hugetlb.h
+++ b/arch/riscv/include/asm/hugetlb.h
@@ -5,11 +5,11 @@
#include <asm/cacheflush.h>
#include <asm/page.h>
-static inline void arch_clear_hugepage_flags(struct page *page)
+static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
- clear_bit(PG_dcache_clean, &page->flags);
+ clear_bit(PG_dcache_clean, &folio->flags);
}
-#define arch_clear_hugepage_flags arch_clear_hugepage_flags
+#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
bool arch_hugetlb_migration_supported(struct hstate *h);
diff --git a/arch/riscv/include/asm/irqflags.h b/arch/riscv/include/asm/irqflags.h
index 08d4d6a5b7e9..6fd8cbfcfcc7 100644
--- a/arch/riscv/include/asm/irqflags.h
+++ b/arch/riscv/include/asm/irqflags.h
@@ -7,7 +7,6 @@
#ifndef _ASM_RISCV_IRQFLAGS_H
#define _ASM_RISCV_IRQFLAGS_H
-#include <asm/processor.h>
#include <asm/csr.h>
/* read interrupt enabled status */
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index 484d04a92fa6..d96281278586 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -43,6 +43,17 @@
KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(6)
+#define KVM_HEDELEG_DEFAULT (BIT(EXC_INST_MISALIGNED) | \
+ BIT(EXC_BREAKPOINT) | \
+ BIT(EXC_SYSCALL) | \
+ BIT(EXC_INST_PAGE_FAULT) | \
+ BIT(EXC_LOAD_PAGE_FAULT) | \
+ BIT(EXC_STORE_PAGE_FAULT))
+
+#define KVM_HIDELEG_DEFAULT (BIT(IRQ_VS_SOFT) | \
+ BIT(IRQ_VS_TIMER) | \
+ BIT(IRQ_VS_EXT))
+
enum kvm_riscv_hfence_type {
KVM_RISCV_HFENCE_UNKNOWN = 0,
KVM_RISCV_HFENCE_GVMA_VMID_GPA,
@@ -169,6 +180,7 @@ struct kvm_vcpu_csr {
struct kvm_vcpu_config {
u64 henvcfg;
u64 hstateen0;
+ unsigned long hedeleg;
};
struct kvm_vcpu_smstateen_csr {
@@ -211,6 +223,7 @@ struct kvm_vcpu_arch {
/* CPU context upon Guest VCPU reset */
struct kvm_cpu_context guest_reset_context;
+ spinlock_t reset_cntx_lock;
/* CPU CSR context upon Guest VCPU reset */
struct kvm_vcpu_csr guest_reset_csr;
@@ -252,8 +265,9 @@ struct kvm_vcpu_arch {
/* Cache pages needed to program page tables with spinlock held */
struct kvm_mmu_memory_cache mmu_page_cache;
- /* VCPU power-off state */
- bool power_off;
+ /* VCPU power state */
+ struct kvm_mp_state mp_state;
+ spinlock_t mp_state_lock;
/* Don't run the VCPU (blocked) */
bool pause;
@@ -374,8 +388,11 @@ int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu);
bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask);
+void __kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
+void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
+bool kvm_riscv_vcpu_stopped(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu);
diff --git a/arch/riscv/include/asm/kvm_vcpu_pmu.h b/arch/riscv/include/asm/kvm_vcpu_pmu.h
index 395518a1664e..fa0f535bbbf0 100644
--- a/arch/riscv/include/asm/kvm_vcpu_pmu.h
+++ b/arch/riscv/include/asm/kvm_vcpu_pmu.h
@@ -20,7 +20,7 @@ static_assert(RISCV_KVM_MAX_COUNTERS <= 64);
struct kvm_fw_event {
/* Current value of the event */
- unsigned long value;
+ u64 value;
/* Event monitoring status */
bool started;
@@ -36,6 +36,7 @@ struct kvm_pmc {
bool started;
/* Monitoring event ID */
unsigned long event_idx;
+ struct kvm_vcpu *vcpu;
};
/* PMU data structure per vcpu */
@@ -50,6 +51,12 @@ struct kvm_pmu {
bool init_done;
/* Bit map of all the virtual counter used */
DECLARE_BITMAP(pmc_in_use, RISCV_KVM_MAX_COUNTERS);
+ /* Bit map of all the virtual counter overflown */
+ DECLARE_BITMAP(pmc_overflown, RISCV_KVM_MAX_COUNTERS);
+ /* The address of the counter snapshot area (guest physical address) */
+ gpa_t snapshot_addr;
+ /* The actual data of the snapshot */
+ struct riscv_pmu_snapshot_data *sdata;
};
#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu_context)
@@ -82,9 +89,14 @@ int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_ba
unsigned long ctr_mask, unsigned long flags,
unsigned long eidx, u64 evtdata,
struct kvm_vcpu_sbi_return *retdata);
-int kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
+int kvm_riscv_vcpu_pmu_fw_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_fw_ctr_read_hi(struct kvm_vcpu *vcpu, unsigned long cidx,
+ struct kvm_vcpu_sbi_return *retdata);
void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long saddr_low,
+ unsigned long saddr_high, unsigned long flags,
+ struct kvm_vcpu_sbi_return *retdata);
void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu);
diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
index 355504b37f8e..947fd60f9051 100644
--- a/arch/riscv/include/asm/mmu.h
+++ b/arch/riscv/include/asm/mmu.h
@@ -19,6 +19,8 @@ typedef struct {
#ifdef CONFIG_SMP
/* A local icache flush is needed before user execution can resume. */
cpumask_t icache_stale_mask;
+ /* Force local icache flush on all migrations. */
+ bool force_icache_flush;
#endif
#ifdef CONFIG_BINFMT_ELF_FDPIC
unsigned long exec_fdpic_loadmap;
@@ -26,6 +28,9 @@ typedef struct {
#endif
} mm_context_t;
+#define cntx2asid(cntx) ((cntx) & SATP_ASID_MASK)
+#define cntx2version(cntx) ((cntx) & ~SATP_ASID_MASK)
+
void __init create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot);
#endif /* __ASSEMBLY__ */
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 2947423b5082..115ac98b8d72 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -89,7 +89,7 @@ typedef struct page *pgtable_t;
#define PTE_FMT "%08lx"
#endif
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) && defined(CONFIG_MMU)
/*
* We override this value as its generic definition uses __pa too early in
* the boot process (before kernel_map.va_pa_offset is set).
diff --git a/arch/riscv/include/asm/patch.h b/arch/riscv/include/asm/patch.h
index e88b52d39eac..9f5d6e14c405 100644
--- a/arch/riscv/include/asm/patch.h
+++ b/arch/riscv/include/asm/patch.h
@@ -6,6 +6,7 @@
#ifndef _ASM_RISCV_PATCH_H
#define _ASM_RISCV_PATCH_H
+int patch_insn_write(void *addr, const void *insn, size_t len);
int patch_text_nosync(void *addr, const void *insns, size_t len);
int patch_text_set_nosync(void *addr, u8 c, size_t len);
int patch_text(void *addr, u32 *insns, int ninsns);
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
index deaf971253a2..f52264304f77 100644
--- a/arch/riscv/include/asm/pgalloc.h
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -8,6 +8,7 @@
#define _ASM_RISCV_PGALLOC_H
#include <linux/mm.h>
+#include <asm/sbi.h>
#include <asm/tlb.h>
#ifdef CONFIG_MMU
@@ -15,6 +16,14 @@
#define __HAVE_ARCH_PUD_FREE
#include <asm-generic/pgalloc.h>
+static inline void riscv_tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt)
+{
+ if (riscv_use_sbi_for_rfence())
+ tlb_remove_ptdesc(tlb, pt);
+ else
+ tlb_remove_page_ptdesc(tlb, pt);
+}
+
static inline void pmd_populate_kernel(struct mm_struct *mm,
pmd_t *pmd, pte_t *pte)
{
@@ -102,10 +111,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
struct ptdesc *ptdesc = virt_to_ptdesc(pud);
pagetable_pud_dtor(ptdesc);
- if (riscv_use_ipi_for_rfence())
- tlb_remove_page_ptdesc(tlb, ptdesc);
- else
- tlb_remove_ptdesc(tlb, ptdesc);
+ riscv_tlb_remove_ptdesc(tlb, ptdesc);
}
}
@@ -139,12 +145,8 @@ static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
unsigned long addr)
{
- if (pgtable_l5_enabled) {
- if (riscv_use_ipi_for_rfence())
- tlb_remove_page_ptdesc(tlb, virt_to_ptdesc(p4d));
- else
- tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
- }
+ if (pgtable_l5_enabled)
+ riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
}
#endif /* __PAGETABLE_PMD_FOLDED */
@@ -176,10 +178,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
pagetable_pmd_dtor(ptdesc);
- if (riscv_use_ipi_for_rfence())
- tlb_remove_page_ptdesc(tlb, ptdesc);
- else
- tlb_remove_ptdesc(tlb, ptdesc);
+ riscv_tlb_remove_ptdesc(tlb, ptdesc);
}
#endif /* __PAGETABLE_PMD_FOLDED */
@@ -190,10 +189,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
struct ptdesc *ptdesc = page_ptdesc(pte);
pagetable_pte_dtor(ptdesc);
- if (riscv_use_ipi_for_rfence())
- tlb_remove_page_ptdesc(tlb, ptdesc);
- else
- tlb_remove_ptdesc(tlb, ptdesc);
+ riscv_tlb_remove_ptdesc(tlb, ptdesc);
}
#endif /* CONFIG_MMU */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 97fcde30e247..55a7c3ec246b 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -55,6 +55,9 @@
#define MODULES_LOWEST_VADDR (KERNEL_LINK_ADDR - SZ_2G)
#define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G)
#define MODULES_END (PFN_ALIGN((unsigned long)&_start))
+#else
+#define MODULES_VADDR VMALLOC_START
+#define MODULES_END VMALLOC_END
#endif
/*
@@ -593,6 +596,12 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
return ptep_test_and_clear_young(vma, address, ptep);
}
+#define pgprot_nx pgprot_nx
+static inline pgprot_t pgprot_nx(pgprot_t _prot)
+{
+ return __pgprot(pgprot_val(_prot) & ~_PAGE_EXEC);
+}
+
#define pgprot_noncached pgprot_noncached
static inline pgprot_t pgprot_noncached(pgprot_t _prot)
{
@@ -642,6 +651,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
#define __pud_to_phys(pud) (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT)
+#define pud_pfn pud_pfn
static inline unsigned long pud_pfn(pud_t pud)
{
return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT);
@@ -890,7 +900,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
#define PAGE_SHARED __pgprot(0)
#define PAGE_KERNEL __pgprot(0)
#define swapper_pg_dir NULL
-#define TASK_SIZE 0xffffffffUL
+#define TASK_SIZE _AC(-1, UL)
#define VMALLOC_START _AC(0, UL)
#define VMALLOC_END TASK_SIZE
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 0faf5f161f1e..68c3432dc6ea 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -68,6 +68,7 @@
#endif
#ifndef __ASSEMBLY__
+#include <linux/cpumask.h>
struct task_struct;
struct pt_regs;
@@ -122,6 +123,12 @@ struct thread_struct {
struct __riscv_v_ext_state vstate;
unsigned long align_ctl;
struct __riscv_v_ext_state kernel_vstate;
+#ifdef CONFIG_SMP
+ /* Flush the icache on migration */
+ bool force_icache_flush;
+ /* A forced icache flush is not needed if migrating to the previous cpu. */
+ unsigned int prev_cpu;
+#endif
};
/* Whitelist the fstate from the task_struct for hardened usercopy */
@@ -183,6 +190,9 @@ extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
#define GET_UNALIGN_CTL(tsk, addr) get_unalign_ctl((tsk), (addr))
#define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val))
+#define RISCV_SET_ICACHE_FLUSH_CTX(arg1, arg2) riscv_set_icache_flush_ctx(arg1, arg2)
+extern int riscv_set_icache_flush_ctx(unsigned long ctx, unsigned long per_thread);
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_PROCESSOR_H */
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index 6e68f8dff76b..9186e7984672 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -131,6 +131,8 @@ enum sbi_ext_pmu_fid {
SBI_EXT_PMU_COUNTER_START,
SBI_EXT_PMU_COUNTER_STOP,
SBI_EXT_PMU_COUNTER_FW_READ,
+ SBI_EXT_PMU_COUNTER_FW_READ_HI,
+ SBI_EXT_PMU_SNAPSHOT_SET_SHMEM,
};
union sbi_pmu_ctr_info {
@@ -147,6 +149,13 @@ union sbi_pmu_ctr_info {
};
};
+/* Data structure to contain the pmu snapshot data */
+struct riscv_pmu_snapshot_data {
+ u64 ctr_overflow_mask;
+ u64 ctr_values[64];
+ u64 reserved[447];
+};
+
#define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0)
#define RISCV_PMU_RAW_EVENT_IDX 0x20000
@@ -232,20 +241,22 @@ enum sbi_pmu_ctr_type {
#define SBI_PMU_EVENT_IDX_INVALID 0xFFFFFFFF
/* Flags defined for config matching function */
-#define SBI_PMU_CFG_FLAG_SKIP_MATCH (1 << 0)
-#define SBI_PMU_CFG_FLAG_CLEAR_VALUE (1 << 1)
-#define SBI_PMU_CFG_FLAG_AUTO_START (1 << 2)
-#define SBI_PMU_CFG_FLAG_SET_VUINH (1 << 3)
-#define SBI_PMU_CFG_FLAG_SET_VSINH (1 << 4)
-#define SBI_PMU_CFG_FLAG_SET_UINH (1 << 5)
-#define SBI_PMU_CFG_FLAG_SET_SINH (1 << 6)
-#define SBI_PMU_CFG_FLAG_SET_MINH (1 << 7)
+#define SBI_PMU_CFG_FLAG_SKIP_MATCH BIT(0)
+#define SBI_PMU_CFG_FLAG_CLEAR_VALUE BIT(1)
+#define SBI_PMU_CFG_FLAG_AUTO_START BIT(2)
+#define SBI_PMU_CFG_FLAG_SET_VUINH BIT(3)
+#define SBI_PMU_CFG_FLAG_SET_VSINH BIT(4)
+#define SBI_PMU_CFG_FLAG_SET_UINH BIT(5)
+#define SBI_PMU_CFG_FLAG_SET_SINH BIT(6)
+#define SBI_PMU_CFG_FLAG_SET_MINH BIT(7)
/* Flags defined for counter start function */
-#define SBI_PMU_START_FLAG_SET_INIT_VALUE (1 << 0)
+#define SBI_PMU_START_FLAG_SET_INIT_VALUE BIT(0)
+#define SBI_PMU_START_FLAG_INIT_SNAPSHOT BIT(1)
/* Flags defined for counter stop function */
-#define SBI_PMU_STOP_FLAG_RESET (1 << 0)
+#define SBI_PMU_STOP_FLAG_RESET BIT(0)
+#define SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT BIT(1)
enum sbi_ext_dbcn_fid {
SBI_EXT_DBCN_CONSOLE_WRITE = 0,
@@ -266,7 +277,7 @@ struct sbi_sta_struct {
u8 pad[47];
} __packed;
-#define SBI_STA_SHMEM_DISABLE -1
+#define SBI_SHMEM_DISABLE -1
/* SBI spec version fields */
#define SBI_SPEC_VERSION_DEFAULT 0x1
@@ -284,6 +295,7 @@ struct sbi_sta_struct {
#define SBI_ERR_ALREADY_AVAILABLE -6
#define SBI_ERR_ALREADY_STARTED -7
#define SBI_ERR_ALREADY_STOPPED -8
+#define SBI_ERR_NO_SHMEM -9
extern unsigned long sbi_spec_version;
struct sbiret {
@@ -355,8 +367,8 @@ static inline unsigned long sbi_minor_version(void)
static inline unsigned long sbi_mk_version(unsigned long major,
unsigned long minor)
{
- return ((major & SBI_SPEC_VERSION_MAJOR_MASK) <<
- SBI_SPEC_VERSION_MAJOR_SHIFT) | minor;
+ return ((major & SBI_SPEC_VERSION_MAJOR_MASK) << SBI_SPEC_VERSION_MAJOR_SHIFT)
+ | (minor & SBI_SPEC_VERSION_MINOR_MASK);
}
int sbi_err_map_linux_errno(int err);
@@ -375,8 +387,12 @@ unsigned long riscv_cached_marchid(unsigned int cpu_id);
unsigned long riscv_cached_mimpid(unsigned int cpu_id);
#if IS_ENABLED(CONFIG_SMP) && IS_ENABLED(CONFIG_RISCV_SBI)
+DECLARE_STATIC_KEY_FALSE(riscv_sbi_for_rfence);
+#define riscv_use_sbi_for_rfence() \
+ static_branch_unlikely(&riscv_sbi_for_rfence)
void sbi_ipi_init(void);
#else
+static inline bool riscv_use_sbi_for_rfence(void) { return false; }
static inline void sbi_ipi_init(void) { }
#endif
diff --git a/arch/riscv/include/asm/signal.h b/arch/riscv/include/asm/signal.h
deleted file mode 100644
index 956ae0a01bad..000000000000
--- a/arch/riscv/include/asm/signal.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-
-#ifndef __ASM_SIGNAL_H
-#define __ASM_SIGNAL_H
-
-#include <uapi/asm/signal.h>
-#include <uapi/asm/ptrace.h>
-
-asmlinkage __visible
-void do_work_pending(struct pt_regs *regs, unsigned long thread_info_flags);
-
-#endif
diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h
index 0d555847cde6..7ac80e9f2288 100644
--- a/arch/riscv/include/asm/smp.h
+++ b/arch/riscv/include/asm/smp.h
@@ -49,12 +49,7 @@ void riscv_ipi_disable(void);
bool riscv_ipi_have_virq_range(void);
/* Set the IPI interrupt numbers for arch (called by irqchip drivers) */
-void riscv_ipi_set_virq_range(int virq, int nr, bool use_for_rfence);
-
-/* Check if we can use IPIs for remote FENCEs */
-DECLARE_STATIC_KEY_FALSE(riscv_ipi_for_rfence);
-#define riscv_use_ipi_for_rfence() \
- static_branch_unlikely(&riscv_ipi_for_rfence)
+void riscv_ipi_set_virq_range(int virq, int nr);
/* Check other CPUs stop or not */
bool smp_crash_stop_failed(void);
@@ -104,16 +99,10 @@ static inline bool riscv_ipi_have_virq_range(void)
return false;
}
-static inline void riscv_ipi_set_virq_range(int virq, int nr,
- bool use_for_rfence)
+static inline void riscv_ipi_set_virq_range(int virq, int nr)
{
}
-static inline bool riscv_use_ipi_for_rfence(void)
-{
- return false;
-}
-
#endif /* CONFIG_SMP */
#if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP)
diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h
index 4718096fa5e3..4ffb022b097f 100644
--- a/arch/riscv/include/asm/suspend.h
+++ b/arch/riscv/include/asm/suspend.h
@@ -13,7 +13,6 @@ struct suspend_context {
/* Saved and restored by low-level functions */
struct pt_regs regs;
/* Saved and restored by high-level functions */
- unsigned long scratch;
unsigned long envcfg;
unsigned long tvec;
unsigned long ie;
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
index 7efdb0584d47..7594df37cc9f 100644
--- a/arch/riscv/include/asm/switch_to.h
+++ b/arch/riscv/include/asm/switch_to.h
@@ -8,6 +8,7 @@
#include <linux/jump_label.h>
#include <linux/sched/task_stack.h>
+#include <linux/mm_types.h>
#include <asm/vector.h>
#include <asm/cpufeature.h>
#include <asm/processor.h>
@@ -72,14 +73,36 @@ static __always_inline bool has_fpu(void) { return false; }
extern struct task_struct *__switch_to(struct task_struct *,
struct task_struct *);
+static inline bool switch_to_should_flush_icache(struct task_struct *task)
+{
+#ifdef CONFIG_SMP
+ bool stale_mm = task->mm && task->mm->context.force_icache_flush;
+ bool stale_thread = task->thread.force_icache_flush;
+ bool thread_migrated = smp_processor_id() != task->thread.prev_cpu;
+
+ return thread_migrated && (stale_mm || stale_thread);
+#else
+ return false;
+#endif
+}
+
+#ifdef CONFIG_SMP
+#define __set_prev_cpu(thread) ((thread).prev_cpu = smp_processor_id())
+#else
+#define __set_prev_cpu(thread)
+#endif
+
#define switch_to(prev, next, last) \
do { \
struct task_struct *__prev = (prev); \
struct task_struct *__next = (next); \
+ __set_prev_cpu(__prev->thread); \
if (has_fpu()) \
__switch_to_fpu(__prev, __next); \
if (has_vector()) \
__switch_to_vector(__prev, __next); \
+ if (switch_to_should_flush_icache(__next)) \
+ local_flush_icache_all(); \
((last) = __switch_to(__prev, __next)); \
} while (0)
diff --git a/arch/riscv/include/asm/syscall_wrapper.h b/arch/riscv/include/asm/syscall_wrapper.h
index 980094c2e976..ac80216549ff 100644
--- a/arch/riscv/include/asm/syscall_wrapper.h
+++ b/arch/riscv/include/asm/syscall_wrapper.h
@@ -36,7 +36,8 @@ asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *);
ulong) \
__attribute__((alias(__stringify(___se_##prefix##name)))); \
__diag_pop(); \
- static long noinline ___se_##prefix##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static long noinline ___se_##prefix##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ __used; \
static long ___se_##prefix##name(__MAP(x,__SC_LONG,__VA_ARGS__))
#define SC_RISCV_REGS_TO_ARGS(x, ...) \
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 4112cc8d1d69..72e559934952 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -15,24 +15,34 @@
#define FLUSH_TLB_NO_ASID ((unsigned long)-1)
#ifdef CONFIG_MMU
-extern unsigned long asid_mask;
-
static inline void local_flush_tlb_all(void)
{
__asm__ __volatile__ ("sfence.vma" : : : "memory");
}
+static inline void local_flush_tlb_all_asid(unsigned long asid)
+{
+ if (asid != FLUSH_TLB_NO_ASID)
+ ALT_SFENCE_VMA_ASID(asid);
+ else
+ local_flush_tlb_all();
+}
+
/* Flush one page from local TLB */
static inline void local_flush_tlb_page(unsigned long addr)
{
- ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
+ ALT_SFENCE_VMA_ADDR(addr);
+}
+
+static inline void local_flush_tlb_page_asid(unsigned long addr,
+ unsigned long asid)
+{
+ if (asid != FLUSH_TLB_NO_ASID)
+ ALT_SFENCE_VMA_ADDR_ASID(addr, asid);
+ else
+ local_flush_tlb_page(addr);
}
-#else /* CONFIG_MMU */
-#define local_flush_tlb_all() do { } while (0)
-#define local_flush_tlb_page(addr) do { } while (0)
-#endif /* CONFIG_MMU */
-#if defined(CONFIG_SMP) && defined(CONFIG_MMU)
void flush_tlb_all(void);
void flush_tlb_mm(struct mm_struct *mm);
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
@@ -55,27 +65,9 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
void arch_flush_tlb_batched_pending(struct mm_struct *mm);
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
-#else /* CONFIG_SMP && CONFIG_MMU */
-
-#define flush_tlb_all() local_flush_tlb_all()
-#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
-
-static inline void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- local_flush_tlb_all();
-}
-
-/* Flush a range of kernel pages */
-static inline void flush_tlb_kernel_range(unsigned long start,
- unsigned long end)
-{
- local_flush_tlb_all();
-}
-
-#define flush_tlb_mm(mm) flush_tlb_all()
-#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
-#define local_flush_tlb_kernel_range(start, end) flush_tlb_all()
-#endif /* !CONFIG_SMP || !CONFIG_MMU */
+extern unsigned long tlb_flush_all_threshold;
+#else /* CONFIG_MMU */
+#define local_flush_tlb_all() do { } while (0)
+#endif /* CONFIG_MMU */
#endif /* _ASM_RISCV_TLBFLUSH_H */
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index ec0cab9fbddd..72ec1d9bd3f3 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -319,7 +319,7 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
#define __get_kernel_nofault(dst, src, type, err_label) \
do { \
- long __kr_err; \
+ long __kr_err = 0; \
\
__get_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err); \
if (unlikely(__kr_err)) \
@@ -328,7 +328,7 @@ do { \
#define __put_kernel_nofault(dst, src, type, err_label) \
do { \
- long __kr_err; \
+ long __kr_err = 0; \
\
__put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err); \
if (unlikely(__kr_err)) \
diff --git a/arch/riscv/include/uapi/asm/auxvec.h b/arch/riscv/include/uapi/asm/auxvec.h
index 10aaa83db89e..95050ebe9ad0 100644
--- a/arch/riscv/include/uapi/asm/auxvec.h
+++ b/arch/riscv/include/uapi/asm/auxvec.h
@@ -34,7 +34,7 @@
#define AT_L3_CACHEGEOMETRY 47
/* entries in ARCH_DLINFO */
-#define AT_VECTOR_SIZE_ARCH 9
+#define AT_VECTOR_SIZE_ARCH 10
#define AT_MINSIGSTKSZ 51
#endif /* _UAPI_ASM_RISCV_AUXVEC_H */
diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h
index 9f2a8e3ff204..dda76a05420b 100644
--- a/arch/riscv/include/uapi/asm/hwprobe.h
+++ b/arch/riscv/include/uapi/asm/hwprobe.h
@@ -54,11 +54,12 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_EXT_ZFHMIN (1 << 28)
#define RISCV_HWPROBE_EXT_ZIHINTNTL (1 << 29)
#define RISCV_HWPROBE_EXT_ZVFH (1 << 30)
-#define RISCV_HWPROBE_EXT_ZVFHMIN (1 << 31)
+#define RISCV_HWPROBE_EXT_ZVFHMIN (1ULL << 31)
#define RISCV_HWPROBE_EXT_ZFA (1ULL << 32)
#define RISCV_HWPROBE_EXT_ZTSO (1ULL << 33)
#define RISCV_HWPROBE_EXT_ZACAS (1ULL << 34)
#define RISCV_HWPROBE_EXT_ZICOND (1ULL << 35)
+#define RISCV_HWPROBE_EXT_ZIHINTPAUSE (1ULL << 36)
#define RISCV_HWPROBE_KEY_CPUPERF_0 5
#define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
#define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0)
diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h
index b1c503c2959c..e878e7cc3978 100644
--- a/arch/riscv/include/uapi/asm/kvm.h
+++ b/arch/riscv/include/uapi/asm/kvm.h
@@ -167,6 +167,7 @@ enum KVM_RISCV_ISA_EXT_ID {
KVM_RISCV_ISA_EXT_ZFA,
KVM_RISCV_ISA_EXT_ZTSO,
KVM_RISCV_ISA_EXT_ZACAS,
+ KVM_RISCV_ISA_EXT_SSCOFPMF,
KVM_RISCV_ISA_EXT_MAX,
};
diff --git a/arch/riscv/kernel/compat_vdso/Makefile b/arch/riscv/kernel/compat_vdso/Makefile
index 62fa393b2eb2..24e37d1ef7ec 100644
--- a/arch/riscv/kernel/compat_vdso/Makefile
+++ b/arch/riscv/kernel/compat_vdso/Makefile
@@ -34,12 +34,6 @@ obj-compat_vdso := $(addprefix $(obj)/, $(obj-compat_vdso))
obj-y += compat_vdso.o
CPPFLAGS_compat_vdso.lds += -P -C -DCOMPAT_VDSO -U$(ARCH)
-# Disable profiling and instrumentation for VDSO code
-GCOV_PROFILE := n
-KCOV_INSTRUMENT := n
-KASAN_SANITIZE := n
-UBSAN_SANITIZE := n
-
# Force dependency
$(obj)/compat_vdso.o: $(obj)/compat_vdso.so
@@ -58,7 +52,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
# Generate VDSO offsets using helper script
-gen-compat_vdsosym := $(srctree)/$(src)/gen_compat_vdso_offsets.sh
+gen-compat_vdsosym := $(src)/gen_compat_vdso_offsets.sh
quiet_cmd_compat_vdsosym = VDSOSYM $@
cmd_compat_vdsosym = $(NM) $< | $(gen-compat_vdsosym) | LC_ALL=C sort > $@
@@ -74,5 +68,5 @@ quiet_cmd_compat_vdsold = VDSOLD $@
rm $@.tmp
# actual build commands
-quiet_cmd_compat_vdsoas = VDSOAS $@
+quiet_cmd_compat_vdsoas = VDSOAS $@
cmd_compat_vdsoas = $(COMPAT_CC) $(a_flags) $(COMPAT_CC_FLAGS) -c -o $@ $<
diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c
index 54260c16f991..11c0d2e0becf 100644
--- a/arch/riscv/kernel/elf_kexec.c
+++ b/arch/riscv/kernel/elf_kexec.c
@@ -19,6 +19,7 @@
#include <linux/libfdt.h>
#include <linux/types.h>
#include <linux/memblock.h>
+#include <linux/vmalloc.h>
#include <asm/setup.h>
int arch_kimage_file_post_load_cleanup(struct kimage *image)
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index f5aa24d9e1c1..4f4987a6d83d 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -8,6 +8,7 @@
#include <linux/ftrace.h>
#include <linux/uaccess.h>
#include <linux/memory.h>
+#include <linux/stop_machine.h>
#include <asm/cacheflush.h>
#include <asm/patch.h>
@@ -75,8 +76,7 @@ static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
make_call_t0(hook_pos, target, call);
/* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
- if (patch_text_nosync
- ((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
+ if (patch_insn_write((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
return -EPERM;
return 0;
@@ -88,7 +88,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
make_call_t0(rec->ip, addr, call);
- if (patch_text_nosync((void *)rec->ip, call, MCOUNT_INSN_SIZE))
+ if (patch_insn_write((void *)rec->ip, call, MCOUNT_INSN_SIZE))
return -EPERM;
return 0;
@@ -99,7 +99,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
{
unsigned int nops[2] = {NOP4, NOP4};
- if (patch_text_nosync((void *)rec->ip, nops, MCOUNT_INSN_SIZE))
+ if (patch_insn_write((void *)rec->ip, nops, MCOUNT_INSN_SIZE))
return -EPERM;
return 0;
@@ -134,6 +134,42 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
return ret;
}
+
+struct ftrace_modify_param {
+ int command;
+ atomic_t cpu_count;
+};
+
+static int __ftrace_modify_code(void *data)
+{
+ struct ftrace_modify_param *param = data;
+
+ if (atomic_inc_return(&param->cpu_count) == num_online_cpus()) {
+ ftrace_modify_all_code(param->command);
+ /*
+ * Make sure the patching store is effective *before* we
+ * increment the counter which releases all waiting CPUs
+ * by using the release variant of atomic increment. The
+ * release pairs with the call to local_flush_icache_all()
+ * on the waiting CPU.
+ */
+ atomic_inc_return_release(&param->cpu_count);
+ } else {
+ while (atomic_read(&param->cpu_count) <= num_online_cpus())
+ cpu_relax();
+ }
+
+ local_flush_icache_all();
+
+ return 0;
+}
+
+void arch_ftrace_update_code(int command)
+{
+ struct ftrace_modify_param param = { command, ATOMIC_INIT(0) };
+
+ stop_machine(__ftrace_modify_code, &param, cpu_online_mask);
+}
#endif
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 5e5a82644451..906f9a3a5d65 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/moduleloader.h>
-#include <linux/vmalloc.h>
#include <linux/sizes.h>
#include <linux/pgtable.h>
#include <asm/alternative.h>
@@ -905,17 +904,6 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
return 0;
}
-#if defined(CONFIG_MMU) && defined(CONFIG_64BIT)
-void *module_alloc(unsigned long size)
-{
- return __vmalloc_node_range(size, 1, MODULES_VADDR,
- MODULES_END, GFP_KERNEL,
- PAGE_KERNEL, VM_FLUSH_RESET_PERMS,
- NUMA_NO_NODE,
- __builtin_return_address(0));
-}
-#endif
-
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
diff --git a/arch/riscv/kernel/paravirt.c b/arch/riscv/kernel/paravirt.c
index 0d6225fd3194..fa6b0339a65d 100644
--- a/arch/riscv/kernel/paravirt.c
+++ b/arch/riscv/kernel/paravirt.c
@@ -62,7 +62,7 @@ static int sbi_sta_steal_time_set_shmem(unsigned long lo, unsigned long hi,
ret = sbi_ecall(SBI_EXT_STA, SBI_EXT_STA_STEAL_TIME_SET_SHMEM,
lo, hi, flags, 0, 0, 0);
if (ret.error) {
- if (lo == SBI_STA_SHMEM_DISABLE && hi == SBI_STA_SHMEM_DISABLE)
+ if (lo == SBI_SHMEM_DISABLE && hi == SBI_SHMEM_DISABLE)
pr_warn("Failed to disable steal-time shmem");
else
pr_warn("Failed to set steal-time shmem");
@@ -84,8 +84,8 @@ static int pv_time_cpu_online(unsigned int cpu)
static int pv_time_cpu_down_prepare(unsigned int cpu)
{
- return sbi_sta_steal_time_set_shmem(SBI_STA_SHMEM_DISABLE,
- SBI_STA_SHMEM_DISABLE, 0);
+ return sbi_sta_steal_time_set_shmem(SBI_SHMEM_DISABLE,
+ SBI_SHMEM_DISABLE, 0);
}
static u64 pv_time_steal_clock(int cpu)
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
index 37e87fdcf6a0..4007563fb607 100644
--- a/arch/riscv/kernel/patch.c
+++ b/arch/riscv/kernel/patch.c
@@ -80,6 +80,8 @@ static int __patch_insn_set(void *addr, u8 c, size_t len)
*/
lockdep_assert_held(&text_mutex);
+ preempt_disable();
+
if (across_pages)
patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
@@ -92,6 +94,8 @@ static int __patch_insn_set(void *addr, u8 c, size_t len)
if (across_pages)
patch_unmap(FIX_TEXT_POKE1);
+ preempt_enable();
+
return 0;
}
NOKPROBE_SYMBOL(__patch_insn_set);
@@ -122,6 +126,8 @@ static int __patch_insn_write(void *addr, const void *insn, size_t len)
if (!riscv_patch_in_stop_machine)
lockdep_assert_held(&text_mutex);
+ preempt_disable();
+
if (across_pages)
patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
@@ -134,6 +140,8 @@ static int __patch_insn_write(void *addr, const void *insn, size_t len)
if (across_pages)
patch_unmap(FIX_TEXT_POKE1);
+ preempt_enable();
+
return ret;
}
NOKPROBE_SYMBOL(__patch_insn_write);
@@ -188,7 +196,7 @@ int patch_text_set_nosync(void *addr, u8 c, size_t len)
}
NOKPROBE_SYMBOL(patch_text_set_nosync);
-static int patch_insn_write(void *addr, const void *insn, size_t len)
+int patch_insn_write(void *addr, const void *insn, size_t len)
{
size_t patched = 0;
size_t size;
@@ -232,16 +240,23 @@ static int patch_text_cb(void *data)
if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
for (i = 0; ret == 0 && i < patch->ninsns; i++) {
len = GET_INSN_LENGTH(patch->insns[i]);
- ret = patch_text_nosync(patch->addr + i * len,
- &patch->insns[i], len);
+ ret = patch_insn_write(patch->addr + i * len, &patch->insns[i], len);
}
- atomic_inc(&patch->cpu_count);
+ /*
+ * Make sure the patching store is effective *before* we
+ * increment the counter which releases all waiting CPUs
+ * by using the release variant of atomic increment. The
+ * release pairs with the call to local_flush_icache_all()
+ * on the waiting CPU.
+ */
+ atomic_inc_return_release(&patch->cpu_count);
} else {
while (atomic_read(&patch->cpu_count) <= num_online_cpus())
cpu_relax();
- smp_mb();
}
+ local_flush_icache_all();
+
return ret;
}
NOKPROBE_SYMBOL(patch_text_cb);
diff --git a/arch/riscv/kernel/pi/Makefile b/arch/riscv/kernel/pi/Makefile
index b75f150b923d..50bc5ef7dd2f 100644
--- a/arch/riscv/kernel/pi/Makefile
+++ b/arch/riscv/kernel/pi/Makefile
@@ -17,12 +17,6 @@ KBUILD_CFLAGS += -mcmodel=medany
CFLAGS_cmdline_early.o += -D__NO_FORTIFY
CFLAGS_lib-fdt_ro.o += -D__NO_FORTIFY
-GCOV_PROFILE := n
-KASAN_SANITIZE := n
-KCSAN_SANITIZE := n
-UBSAN_SANITIZE := n
-KCOV_INSTRUMENT := n
-
$(obj)/%.pi.o: OBJCOPYFLAGS := --prefix-symbols=__pi_ \
--remove-section=.note.gnu.property \
--prefix-alloc-sections=.init.pi
diff --git a/arch/riscv/kernel/probes/ftrace.c b/arch/riscv/kernel/probes/ftrace.c
index 7142ec42e889..a69dfa610aa8 100644
--- a/arch/riscv/kernel/probes/ftrace.c
+++ b/arch/riscv/kernel/probes/ftrace.c
@@ -11,6 +11,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct kprobe_ctlblk *kcb;
int bit;
+ if (unlikely(kprobe_ftrace_disabled))
+ return;
+
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
return;
diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c
index 2f08c14a933d..dfb28e57d900 100644
--- a/arch/riscv/kernel/probes/kprobes.c
+++ b/arch/riscv/kernel/probes/kprobes.c
@@ -6,6 +6,7 @@
#include <linux/extable.h>
#include <linux/slab.h>
#include <linux/stop_machine.h>
+#include <linux/vmalloc.h>
#include <asm/ptrace.h>
#include <linux/uaccess.h>
#include <asm/sections.h>
@@ -104,16 +105,6 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
return 0;
}
-#ifdef CONFIG_MMU
-void *alloc_insn_page(void)
-{
- return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
- GFP_KERNEL, PAGE_KERNEL_READ_EXEC,
- VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
- __builtin_return_address(0));
-}
-#endif
-
/* install breakpoint in text */
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index 92922dbd5b5c..e4bc61c4e58a 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -27,8 +27,6 @@
#include <asm/vector.h>
#include <asm/cpufeature.h>
-register unsigned long gp_in_global __asm__("gp");
-
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
@@ -37,7 +35,7 @@ EXPORT_SYMBOL(__stack_chk_guard);
extern asmlinkage void ret_from_fork(void);
-void arch_cpu_idle(void)
+void noinstr arch_cpu_idle(void)
{
cpu_do_idle();
}
@@ -207,7 +205,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
if (unlikely(args->fn)) {
/* Kernel thread */
memset(childregs, 0, sizeof(struct pt_regs));
- childregs->gp = gp_in_global;
/* Supervisor/Machine, irqs on: */
childregs->status = SR_PP | SR_PIE;
diff --git a/arch/riscv/kernel/sbi-ipi.c b/arch/riscv/kernel/sbi-ipi.c
index a4559695ce62..1026e22955cc 100644
--- a/arch/riscv/kernel/sbi-ipi.c
+++ b/arch/riscv/kernel/sbi-ipi.c
@@ -13,6 +13,9 @@
#include <linux/irqdomain.h>
#include <asm/sbi.h>
+DEFINE_STATIC_KEY_FALSE(riscv_sbi_for_rfence);
+EXPORT_SYMBOL_GPL(riscv_sbi_for_rfence);
+
static int sbi_ipi_virq;
static void sbi_ipi_handle(struct irq_desc *desc)
@@ -72,6 +75,12 @@ void __init sbi_ipi_init(void)
"irqchip/sbi-ipi:starting",
sbi_ipi_starting_cpu, NULL);
- riscv_ipi_set_virq_range(virq, BITS_PER_BYTE, false);
+ riscv_ipi_set_virq_range(virq, BITS_PER_BYTE);
pr_info("providing IPIs using SBI IPI extension\n");
+
+ /*
+ * Use the SBI remote fence extension to avoid
+ * the extra context switch needed to handle IPIs.
+ */
+ static_branch_enable(&riscv_sbi_for_rfence);
}
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 501e66debf69..5a2edd7f027e 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -119,6 +119,13 @@ static long __restore_v_state(struct pt_regs *regs, void __user *sc_vec)
struct __sc_riscv_v_state __user *state = sc_vec;
void __user *datap;
+ /*
+ * Mark the vstate as clean prior performing the actual copy,
+ * to avoid getting the vstate incorrectly clobbered by the
+ * discarded vector state.
+ */
+ riscv_v_vstate_set_restore(current, regs);
+
/* Copy everything of __sc_riscv_v_state except datap. */
err = __copy_from_user(&current->thread.vstate, &state->v_state,
offsetof(struct __riscv_v_ext_state, datap));
@@ -133,13 +140,7 @@ static long __restore_v_state(struct pt_regs *regs, void __user *sc_vec)
* Copy the whole vector content from user space datap. Use
* copy_from_user to prevent information leak.
*/
- err = copy_from_user(current->thread.vstate.datap, datap, riscv_v_vsize);
- if (unlikely(err))
- return err;
-
- riscv_v_vstate_set_restore(current, regs);
-
- return err;
+ return copy_from_user(current->thread.vstate.datap, datap, riscv_v_vsize);
}
#else
#define save_v_state(task, regs) (0)
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 45dd4035416e..8e6eb64459af 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -171,10 +171,7 @@ bool riscv_ipi_have_virq_range(void)
return (ipi_virq_base) ? true : false;
}
-DEFINE_STATIC_KEY_FALSE(riscv_ipi_for_rfence);
-EXPORT_SYMBOL_GPL(riscv_ipi_for_rfence);
-
-void riscv_ipi_set_virq_range(int virq, int nr, bool use_for_rfence)
+void riscv_ipi_set_virq_range(int virq, int nr)
{
int i, err;
@@ -197,12 +194,6 @@ void riscv_ipi_set_virq_range(int virq, int nr, bool use_for_rfence)
/* Enabled IPIs for boot CPU immediately */
riscv_ipi_enable();
-
- /* Update RFENCE static key */
- if (use_for_rfence)
- static_branch_enable(&riscv_ipi_for_rfence);
- else
- static_branch_disable(&riscv_ipi_for_rfence);
}
static const char * const ipi_names[] = {
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index d41090fc3203..4b3c50da48ba 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -26,7 +26,7 @@
#include <linux/sched/task_stack.h>
#include <linux/sched/mm.h>
-#include <asm/cpufeature.h>
+#include <asm/cacheflush.h>
#include <asm/cpu_ops.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
@@ -234,9 +234,10 @@ asmlinkage __visible void smp_callin(void)
riscv_user_isa_enable();
/*
- * Remote TLB flushes are ignored while the CPU is offline, so emit
- * a local TLB flush right now just in case.
+ * Remote cache and TLB flushes are ignored while the CPU is offline,
+ * so flush them both right now just in case.
*/
+ local_flush_icache_all();
local_flush_tlb_all();
complete(&cpu_running);
/*
diff --git a/arch/riscv/kernel/suspend.c b/arch/riscv/kernel/suspend.c
index 8a327b485b90..c8cec0cc5833 100644
--- a/arch/riscv/kernel/suspend.c
+++ b/arch/riscv/kernel/suspend.c
@@ -14,7 +14,6 @@
void suspend_save_csrs(struct suspend_context *context)
{
- context->scratch = csr_read(CSR_SCRATCH);
if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG))
context->envcfg = csr_read(CSR_ENVCFG);
context->tvec = csr_read(CSR_TVEC);
@@ -37,7 +36,7 @@ void suspend_save_csrs(struct suspend_context *context)
void suspend_restore_csrs(struct suspend_context *context)
{
- csr_write(CSR_SCRATCH, context->scratch);
+ csr_write(CSR_SCRATCH, 0);
if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG))
csr_write(CSR_ENVCFG, context->envcfg);
csr_write(CSR_TVEC, context->tvec);
diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c
index 8cae41a502dd..969ef3d59dbe 100644
--- a/arch/riscv/kernel/sys_hwprobe.c
+++ b/arch/riscv/kernel/sys_hwprobe.c
@@ -111,6 +111,7 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
EXT_KEY(ZTSO);
EXT_KEY(ZACAS);
EXT_KEY(ZICOND);
+ EXT_KEY(ZIHINTPAUSE);
if (has_vector()) {
EXT_KEY(ZVBB);
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index f1c1416a9f1e..64155323cc92 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -7,7 +7,6 @@
#include <linux/syscalls.h>
#include <asm/cacheflush.h>
-#include <asm-generic/mman-common.h>
static long riscv_sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 868d6280cf66..05a16b1f0aee 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -122,7 +122,7 @@ void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
pr_cont("\n");
__show_regs(regs);
- dump_instr(KERN_EMERG, regs);
+ dump_instr(KERN_INFO, regs);
}
force_sig_fault(signo, code, (void __user *)addr);
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index 2adb7c3e4dd5..b62d5a2f4541 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -264,86 +264,14 @@ static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
#define GET_F32_RS2C(insn, regs) (get_f32_rs(insn, 2, regs))
#define GET_F32_RS2S(insn, regs) (get_f32_rs(RVC_RS2S(insn), 0, regs))
-#ifdef CONFIG_RISCV_M_MODE
-static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val)
-{
- u8 val;
-
- asm volatile("lbu %0, %1" : "=&r" (val) : "m" (*addr));
- *r_val = val;
-
- return 0;
-}
-
-static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val)
-{
- asm volatile ("sb %0, %1\n" : : "r" (val), "m" (*addr));
-
- return 0;
-}
-
-static inline int get_insn(struct pt_regs *regs, ulong mepc, ulong *r_insn)
-{
- register ulong __mepc asm ("a2") = mepc;
- ulong val, rvc_mask = 3, tmp;
-
- asm ("and %[tmp], %[addr], 2\n"
- "bnez %[tmp], 1f\n"
-#if defined(CONFIG_64BIT)
- __stringify(LWU) " %[insn], (%[addr])\n"
-#else
- __stringify(LW) " %[insn], (%[addr])\n"
-#endif
- "and %[tmp], %[insn], %[rvc_mask]\n"
- "beq %[tmp], %[rvc_mask], 2f\n"
- "sll %[insn], %[insn], %[xlen_minus_16]\n"
- "srl %[insn], %[insn], %[xlen_minus_16]\n"
- "j 2f\n"
- "1:\n"
- "lhu %[insn], (%[addr])\n"
- "and %[tmp], %[insn], %[rvc_mask]\n"
- "bne %[tmp], %[rvc_mask], 2f\n"
- "lhu %[tmp], 2(%[addr])\n"
- "sll %[tmp], %[tmp], 16\n"
- "add %[insn], %[insn], %[tmp]\n"
- "2:"
- : [insn] "=&r" (val), [tmp] "=&r" (tmp)
- : [addr] "r" (__mepc), [rvc_mask] "r" (rvc_mask),
- [xlen_minus_16] "i" (XLEN_MINUS_16));
-
- *r_insn = val;
-
- return 0;
-}
-#else
-static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val)
-{
- if (user_mode(regs)) {
- return __get_user(*r_val, (u8 __user *)addr);
- } else {
- *r_val = *addr;
- return 0;
- }
-}
-
-static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val)
-{
- if (user_mode(regs)) {
- return __put_user(val, (u8 __user *)addr);
- } else {
- *addr = val;
- return 0;
- }
-}
-
-#define __read_insn(regs, insn, insn_addr) \
+#define __read_insn(regs, insn, insn_addr, type) \
({ \
int __ret; \
\
if (user_mode(regs)) { \
- __ret = __get_user(insn, insn_addr); \
+ __ret = __get_user(insn, (type __user *) insn_addr); \
} else { \
- insn = *(__force u16 *)insn_addr; \
+ insn = *(type *)insn_addr; \
__ret = 0; \
} \
\
@@ -356,9 +284,8 @@ static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn)
if (epc & 0x2) {
ulong tmp = 0;
- u16 __user *insn_addr = (u16 __user *)epc;
- if (__read_insn(regs, insn, insn_addr))
+ if (__read_insn(regs, insn, epc, u16))
return -EFAULT;
/* __get_user() uses regular "lw" which sign extend the loaded
* value make sure to clear higher order bits in case we "or" it
@@ -369,16 +296,14 @@ static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn)
*r_insn = insn;
return 0;
}
- insn_addr++;
- if (__read_insn(regs, tmp, insn_addr))
+ epc += sizeof(u16);
+ if (__read_insn(regs, tmp, epc, u16))
return -EFAULT;
*r_insn = (tmp << 16) | insn;
return 0;
} else {
- u32 __user *insn_addr = (u32 __user *)epc;
-
- if (__read_insn(regs, insn, insn_addr))
+ if (__read_insn(regs, insn, epc, u32))
return -EFAULT;
if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) {
*r_insn = insn;
@@ -390,7 +315,6 @@ static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn)
return 0;
}
}
-#endif
union reg_data {
u8 data_bytes[8];
@@ -409,7 +333,7 @@ int handle_misaligned_load(struct pt_regs *regs)
unsigned long epc = regs->epc;
unsigned long insn;
unsigned long addr = regs->badaddr;
- int i, fp = 0, shift = 0, len = 0;
+ int fp = 0, shift = 0, len = 0;
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
@@ -492,9 +416,11 @@ int handle_misaligned_load(struct pt_regs *regs)
return -EOPNOTSUPP;
val.data_u64 = 0;
- for (i = 0; i < len; i++) {
- if (load_u8(regs, (void *)(addr + i), &val.data_bytes[i]))
+ if (user_mode(regs)) {
+ if (raw_copy_from_user(&val, (u8 __user *)addr, len))
return -1;
+ } else {
+ memcpy(&val, (u8 *)addr, len);
}
if (!fp)
@@ -515,7 +441,7 @@ int handle_misaligned_store(struct pt_regs *regs)
unsigned long epc = regs->epc;
unsigned long insn;
unsigned long addr = regs->badaddr;
- int i, len = 0, fp = 0;
+ int len = 0, fp = 0;
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
@@ -588,9 +514,11 @@ int handle_misaligned_store(struct pt_regs *regs)
if (!IS_ENABLED(CONFIG_FPU) && fp)
return -EOPNOTSUPP;
- for (i = 0; i < len; i++) {
- if (store_u8(regs, (void *)(addr + i), val.data_bytes[i]))
+ if (user_mode(regs)) {
+ if (raw_copy_to_user((u8 __user *)addr, &val, len))
return -1;
+ } else {
+ memcpy((u8 *)addr, &val, len);
}
regs->epc = epc + INSN_LEN(insn);
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index 9b517fe1b8a8..f7ef8ad9b550 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -37,12 +37,7 @@ endif
# Disable -pg to prevent insert call site
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS)
-
-# Disable profiling and instrumentation for VDSO code
-GCOV_PROFILE := n
-KCOV_INSTRUMENT := n
-KASAN_SANITIZE := n
-UBSAN_SANITIZE := n
+CFLAGS_REMOVE_hwprobe.o = $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS)
# Force dependency
$(obj)/vdso.o: $(obj)/vdso.so
@@ -59,7 +54,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
# Generate VDSO offsets using helper script
-gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
+gen-vdsosym := $(src)/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile
index c9646521f113..c2cacfbc06a0 100644
--- a/arch/riscv/kvm/Makefile
+++ b/arch/riscv/kvm/Makefile
@@ -3,7 +3,7 @@
# Makefile for RISC-V KVM support
#
-ccflags-y += -I $(srctree)/$(src)
+ccflags-y += -I $(src)
include $(srctree)/virt/kvm/Makefile.kvm
diff --git a/arch/riscv/kvm/aia.c b/arch/riscv/kvm/aia.c
index a944294f6f23..0f0a9d11bb5f 100644
--- a/arch/riscv/kvm/aia.c
+++ b/arch/riscv/kvm/aia.c
@@ -545,6 +545,9 @@ void kvm_riscv_aia_enable(void)
enable_percpu_irq(hgei_parent_irq,
irq_get_trigger_type(hgei_parent_irq));
csr_set(CSR_HIE, BIT(IRQ_S_GEXT));
+ /* Enable IRQ filtering for overflow interrupt only if sscofpmf is present */
+ if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSCOFPMF))
+ csr_write(CSR_HVIEN, BIT(IRQ_PMU_OVF));
}
void kvm_riscv_aia_disable(void)
@@ -558,6 +561,8 @@ void kvm_riscv_aia_disable(void)
return;
hgctrl = get_cpu_ptr(&aia_hgei);
+ if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSCOFPMF))
+ csr_clear(CSR_HVIEN, BIT(IRQ_PMU_OVF));
/* Disable per-CPU SGEI interrupt */
csr_clear(CSR_HIE, BIT(IRQ_S_GEXT));
disable_percpu_irq(hgei_parent_irq);
diff --git a/arch/riscv/kvm/aia_aplic.c b/arch/riscv/kvm/aia_aplic.c
index 39e72aa016a4..b467ba5ed910 100644
--- a/arch/riscv/kvm/aia_aplic.c
+++ b/arch/riscv/kvm/aia_aplic.c
@@ -137,11 +137,21 @@ static void aplic_write_pending(struct aplic *aplic, u32 irq, bool pending)
raw_spin_lock_irqsave(&irqd->lock, flags);
sm = irqd->sourcecfg & APLIC_SOURCECFG_SM_MASK;
- if (!pending &&
- ((sm == APLIC_SOURCECFG_SM_LEVEL_HIGH) ||
- (sm == APLIC_SOURCECFG_SM_LEVEL_LOW)))
+ if (sm == APLIC_SOURCECFG_SM_INACTIVE)
goto skip_write_pending;
+ if (sm == APLIC_SOURCECFG_SM_LEVEL_HIGH ||
+ sm == APLIC_SOURCECFG_SM_LEVEL_LOW) {
+ if (!pending)
+ goto skip_write_pending;
+ if ((irqd->state & APLIC_IRQ_STATE_INPUT) &&
+ sm == APLIC_SOURCECFG_SM_LEVEL_LOW)
+ goto skip_write_pending;
+ if (!(irqd->state & APLIC_IRQ_STATE_INPUT) &&
+ sm == APLIC_SOURCECFG_SM_LEVEL_HIGH)
+ goto skip_write_pending;
+ }
+
if (pending)
irqd->state |= APLIC_IRQ_STATE_PENDING;
else
@@ -187,16 +197,31 @@ static void aplic_write_enabled(struct aplic *aplic, u32 irq, bool enabled)
static bool aplic_read_input(struct aplic *aplic, u32 irq)
{
- bool ret;
- unsigned long flags;
+ u32 sourcecfg, sm, raw_input, irq_inverted;
struct aplic_irq *irqd;
+ unsigned long flags;
+ bool ret = false;
if (!irq || aplic->nr_irqs <= irq)
return false;
irqd = &aplic->irqs[irq];
raw_spin_lock_irqsave(&irqd->lock, flags);
- ret = (irqd->state & APLIC_IRQ_STATE_INPUT) ? true : false;
+
+ sourcecfg = irqd->sourcecfg;
+ if (sourcecfg & APLIC_SOURCECFG_D)
+ goto skip;
+
+ sm = sourcecfg & APLIC_SOURCECFG_SM_MASK;
+ if (sm == APLIC_SOURCECFG_SM_INACTIVE)
+ goto skip;
+
+ raw_input = (irqd->state & APLIC_IRQ_STATE_INPUT) ? 1 : 0;
+ irq_inverted = (sm == APLIC_SOURCECFG_SM_LEVEL_LOW ||
+ sm == APLIC_SOURCECFG_SM_EDGE_FALL) ? 1 : 0;
+ ret = !!(raw_input ^ irq_inverted);
+
+skip:
raw_spin_unlock_irqrestore(&irqd->lock, flags);
return ret;
diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c
index 225a435d9c9a..bab2ec34cd87 100644
--- a/arch/riscv/kvm/main.c
+++ b/arch/riscv/kvm/main.c
@@ -22,22 +22,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
int kvm_arch_hardware_enable(void)
{
- unsigned long hideleg, hedeleg;
-
- hedeleg = 0;
- hedeleg |= (1UL << EXC_INST_MISALIGNED);
- hedeleg |= (1UL << EXC_BREAKPOINT);
- hedeleg |= (1UL << EXC_SYSCALL);
- hedeleg |= (1UL << EXC_INST_PAGE_FAULT);
- hedeleg |= (1UL << EXC_LOAD_PAGE_FAULT);
- hedeleg |= (1UL << EXC_STORE_PAGE_FAULT);
- csr_write(CSR_HEDELEG, hedeleg);
-
- hideleg = 0;
- hideleg |= (1UL << IRQ_VS_SOFT);
- hideleg |= (1UL << IRQ_VS_TIMER);
- hideleg |= (1UL << IRQ_VS_EXT);
- csr_write(CSR_HIDELEG, hideleg);
+ csr_write(CSR_HEDELEG, KVM_HEDELEG_DEFAULT);
+ csr_write(CSR_HIDELEG, KVM_HIDELEG_DEFAULT);
/* VS should access only the time counter directly. Everything else should trap */
csr_write(CSR_HCOUNTEREN, 0x02);
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index a9e2fd7245e1..b63650f9b966 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -550,26 +550,6 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
return false;
}
-bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
-{
- int ret;
- kvm_pfn_t pfn = pte_pfn(range->arg.pte);
-
- if (!kvm->arch.pgd)
- return false;
-
- WARN_ON(range->end - range->start != 1);
-
- ret = gstage_map_page(kvm, NULL, range->start << PAGE_SHIFT,
- __pfn_to_phys(pfn), PAGE_SIZE, true, true);
- if (ret) {
- kvm_debug("Failed to map G-stage page (error %d)\n", ret);
- return true;
- }
-
- return false;
-}
-
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
pte_t *ptep;
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index b5ca9f2e98ac..17e21df36cc1 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -64,7 +64,9 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
memcpy(csr, reset_csr, sizeof(*csr));
+ spin_lock(&vcpu->arch.reset_cntx_lock);
memcpy(cntx, reset_cntx, sizeof(*cntx));
+ spin_unlock(&vcpu->arch.reset_cntx_lock);
kvm_riscv_vcpu_fp_reset(vcpu);
@@ -102,6 +104,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *cntx;
struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
+ spin_lock_init(&vcpu->arch.mp_state_lock);
+
/* Mark this VCPU never ran */
vcpu->arch.ran_atleast_once = false;
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
@@ -119,12 +123,16 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
spin_lock_init(&vcpu->arch.hfence_lock);
/* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
+ spin_lock_init(&vcpu->arch.reset_cntx_lock);
+
+ spin_lock(&vcpu->arch.reset_cntx_lock);
cntx = &vcpu->arch.guest_reset_context;
cntx->sstatus = SR_SPP | SR_SPIE;
cntx->hstatus = 0;
cntx->hstatus |= HSTATUS_VTW;
cntx->hstatus |= HSTATUS_SPVP;
cntx->hstatus |= HSTATUS_SPV;
+ spin_unlock(&vcpu->arch.reset_cntx_lock);
if (kvm_riscv_vcpu_alloc_vector_context(vcpu, cntx))
return -ENOMEM;
@@ -201,7 +209,7 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) &&
- !vcpu->arch.power_off && !vcpu->arch.pause);
+ !kvm_riscv_vcpu_stopped(vcpu) && !vcpu->arch.pause);
}
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
@@ -365,6 +373,13 @@ void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
}
}
+ /* Sync up the HVIP.LCOFIP bit changes (only clear) by the guest */
+ if ((csr->hvip ^ hvip) & (1UL << IRQ_PMU_OVF)) {
+ if (!(hvip & (1UL << IRQ_PMU_OVF)) &&
+ !test_and_set_bit(IRQ_PMU_OVF, v->irqs_pending_mask))
+ clear_bit(IRQ_PMU_OVF, v->irqs_pending);
+ }
+
/* Sync-up AIA high interrupts */
kvm_riscv_vcpu_aia_sync_interrupts(vcpu);
@@ -382,7 +397,8 @@ int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
if (irq < IRQ_LOCAL_MAX &&
irq != IRQ_VS_SOFT &&
irq != IRQ_VS_TIMER &&
- irq != IRQ_VS_EXT)
+ irq != IRQ_VS_EXT &&
+ irq != IRQ_PMU_OVF)
return -EINVAL;
set_bit(irq, vcpu->arch.irqs_pending);
@@ -397,14 +413,15 @@ int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
{
/*
- * We only allow VS-mode software, timer, and external
+ * We only allow VS-mode software, timer, counter overflow and external
* interrupts when irq is one of the local interrupts
* defined by RISC-V privilege specification.
*/
if (irq < IRQ_LOCAL_MAX &&
irq != IRQ_VS_SOFT &&
irq != IRQ_VS_TIMER &&
- irq != IRQ_VS_EXT)
+ irq != IRQ_VS_EXT &&
+ irq != IRQ_PMU_OVF)
return -EINVAL;
clear_bit(irq, vcpu->arch.irqs_pending);
@@ -429,26 +446,42 @@ bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
return kvm_riscv_vcpu_aia_has_interrupts(vcpu, mask);
}
-void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
+void __kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
{
- vcpu->arch.power_off = true;
+ WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
kvm_make_request(KVM_REQ_SLEEP, vcpu);
kvm_vcpu_kick(vcpu);
}
-void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
+void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
+{
+ spin_lock(&vcpu->arch.mp_state_lock);
+ __kvm_riscv_vcpu_power_off(vcpu);
+ spin_unlock(&vcpu->arch.mp_state_lock);
+}
+
+void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
{
- vcpu->arch.power_off = false;
+ WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
kvm_vcpu_wake_up(vcpu);
}
+void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
+{
+ spin_lock(&vcpu->arch.mp_state_lock);
+ __kvm_riscv_vcpu_power_on(vcpu);
+ spin_unlock(&vcpu->arch.mp_state_lock);
+}
+
+bool kvm_riscv_vcpu_stopped(struct kvm_vcpu *vcpu)
+{
+ return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED;
+}
+
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
- if (vcpu->arch.power_off)
- mp_state->mp_state = KVM_MP_STATE_STOPPED;
- else
- mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
+ *mp_state = READ_ONCE(vcpu->arch.mp_state);
return 0;
}
@@ -458,25 +491,36 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
{
int ret = 0;
+ spin_lock(&vcpu->arch.mp_state_lock);
+
switch (mp_state->mp_state) {
case KVM_MP_STATE_RUNNABLE:
- vcpu->arch.power_off = false;
+ WRITE_ONCE(vcpu->arch.mp_state, *mp_state);
break;
case KVM_MP_STATE_STOPPED:
- kvm_riscv_vcpu_power_off(vcpu);
+ __kvm_riscv_vcpu_power_off(vcpu);
break;
default:
ret = -EINVAL;
}
+ spin_unlock(&vcpu->arch.mp_state_lock);
+
return ret;
}
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
- /* TODO; To be implemented later. */
- return -EINVAL;
+ if (dbg->control & KVM_GUESTDBG_ENABLE) {
+ vcpu->guest_debug = dbg->control;
+ vcpu->arch.cfg.hedeleg &= ~BIT(EXC_BREAKPOINT);
+ } else {
+ vcpu->guest_debug = 0;
+ vcpu->arch.cfg.hedeleg |= BIT(EXC_BREAKPOINT);
+ }
+
+ return 0;
}
static void kvm_riscv_vcpu_setup_config(struct kvm_vcpu *vcpu)
@@ -505,6 +549,10 @@ static void kvm_riscv_vcpu_setup_config(struct kvm_vcpu *vcpu)
if (riscv_isa_extension_available(isa, SMSTATEEN))
cfg->hstateen0 |= SMSTATEEN0_SSTATEEN0;
}
+
+ cfg->hedeleg = KVM_HEDELEG_DEFAULT;
+ if (vcpu->guest_debug)
+ cfg->hedeleg &= ~BIT(EXC_BREAKPOINT);
}
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -519,6 +567,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
csr_write(CSR_VSEPC, csr->vsepc);
csr_write(CSR_VSCAUSE, csr->vscause);
csr_write(CSR_VSTVAL, csr->vstval);
+ csr_write(CSR_HEDELEG, cfg->hedeleg);
csr_write(CSR_HVIP, csr->hvip);
csr_write(CSR_VSATP, csr->vsatp);
csr_write(CSR_HENVCFG, cfg->henvcfg);
@@ -584,11 +633,11 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) {
kvm_vcpu_srcu_read_unlock(vcpu);
rcuwait_wait_event(wait,
- (!vcpu->arch.power_off) && (!vcpu->arch.pause),
+ (!kvm_riscv_vcpu_stopped(vcpu)) && (!vcpu->arch.pause),
TASK_INTERRUPTIBLE);
kvm_vcpu_srcu_read_lock(vcpu);
- if (vcpu->arch.power_off || vcpu->arch.pause) {
+ if (kvm_riscv_vcpu_stopped(vcpu) || vcpu->arch.pause) {
/*
* Awaken to handle a signal, request to
* sleep again later.
diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c
index 2415722c01b8..5761f95abb60 100644
--- a/arch/riscv/kvm/vcpu_exit.c
+++ b/arch/riscv/kvm/vcpu_exit.c
@@ -204,6 +204,10 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
ret = kvm_riscv_vcpu_sbi_ecall(vcpu, run);
break;
+ case EXC_BREAKPOINT:
+ run->exit_reason = KVM_EXIT_DEBUG;
+ ret = 0;
+ break;
default:
break;
}
diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c
index f4a6124d25c9..c676275ea0a0 100644
--- a/arch/riscv/kvm/vcpu_onereg.c
+++ b/arch/riscv/kvm/vcpu_onereg.c
@@ -36,6 +36,7 @@ static const unsigned long kvm_isa_ext_arr[] = {
/* Multi letter extensions (alphabetically sorted) */
KVM_ISA_EXT_ARR(SMSTATEEN),
KVM_ISA_EXT_ARR(SSAIA),
+ KVM_ISA_EXT_ARR(SSCOFPMF),
KVM_ISA_EXT_ARR(SSTC),
KVM_ISA_EXT_ARR(SVINVAL),
KVM_ISA_EXT_ARR(SVNAPOT),
@@ -99,6 +100,9 @@ static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
switch (ext) {
case KVM_RISCV_ISA_EXT_H:
return false;
+ case KVM_RISCV_ISA_EXT_SSCOFPMF:
+ /* Sscofpmf depends on interrupt filtering defined in ssaia */
+ return __riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSAIA);
case KVM_RISCV_ISA_EXT_V:
return riscv_v_vstate_ctrl_user_allowed();
default:
@@ -116,6 +120,8 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
case KVM_RISCV_ISA_EXT_C:
case KVM_RISCV_ISA_EXT_I:
case KVM_RISCV_ISA_EXT_M:
+ /* There is not architectural config bit to disable sscofpmf completely */
+ case KVM_RISCV_ISA_EXT_SSCOFPMF:
case KVM_RISCV_ISA_EXT_SSTC:
case KVM_RISCV_ISA_EXT_SVINVAL:
case KVM_RISCV_ISA_EXT_SVNAPOT:
@@ -986,7 +992,7 @@ static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
{
- return copy_isa_ext_reg_indices(vcpu, NULL);;
+ return copy_isa_ext_reg_indices(vcpu, NULL);
}
static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c
index 86391a5061dd..04db1f993c47 100644
--- a/arch/riscv/kvm/vcpu_pmu.c
+++ b/arch/riscv/kvm/vcpu_pmu.c
@@ -14,6 +14,7 @@
#include <asm/csr.h>
#include <asm/kvm_vcpu_sbi.h>
#include <asm/kvm_vcpu_pmu.h>
+#include <asm/sbi.h>
#include <linux/bitops.h>
#define kvm_pmu_num_counters(pmu) ((pmu)->num_hw_ctrs + (pmu)->num_fw_ctrs)
@@ -39,7 +40,7 @@ static u64 kvm_pmu_get_sample_period(struct kvm_pmc *pmc)
u64 sample_period;
if (!pmc->counter_val)
- sample_period = counter_val_mask + 1;
+ sample_period = counter_val_mask;
else
sample_period = (-pmc->counter_val) & counter_val_mask;
@@ -196,6 +197,36 @@ static int pmu_get_pmc_index(struct kvm_pmu *pmu, unsigned long eidx,
return kvm_pmu_get_programmable_pmc_index(pmu, eidx, cbase, cmask);
}
+static int pmu_fw_ctr_read_hi(struct kvm_vcpu *vcpu, unsigned long cidx,
+ unsigned long *out_val)
+{
+ struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
+ struct kvm_pmc *pmc;
+ int fevent_code;
+
+ if (!IS_ENABLED(CONFIG_32BIT)) {
+ pr_warn("%s: should be invoked for only RV32\n", __func__);
+ return -EINVAL;
+ }
+
+ if (cidx >= kvm_pmu_num_counters(kvpmu) || cidx == 1) {
+ pr_warn("Invalid counter id [%ld]during read\n", cidx);
+ return -EINVAL;
+ }
+
+ pmc = &kvpmu->pmc[cidx];
+
+ if (pmc->cinfo.type != SBI_PMU_CTR_TYPE_FW)
+ return -EINVAL;
+
+ fevent_code = get_event_code(pmc->event_idx);
+ pmc->counter_val = kvpmu->fw_event[fevent_code].value;
+
+ *out_val = pmc->counter_val >> 32;
+
+ return 0;
+}
+
static int pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
unsigned long *out_val)
{
@@ -204,6 +235,11 @@ static int pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
u64 enabled, running;
int fevent_code;
+ if (cidx >= kvm_pmu_num_counters(kvpmu) || cidx == 1) {
+ pr_warn("Invalid counter id [%ld] during read\n", cidx);
+ return -EINVAL;
+ }
+
pmc = &kvpmu->pmc[cidx];
if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) {
@@ -229,8 +265,50 @@ static int kvm_pmu_validate_counter_mask(struct kvm_pmu *kvpmu, unsigned long ct
return 0;
}
-static int kvm_pmu_create_perf_event(struct kvm_pmc *pmc, struct perf_event_attr *attr,
- unsigned long flags, unsigned long eidx, unsigned long evtdata)
+static void kvm_riscv_pmu_overflow(struct perf_event *perf_event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ struct kvm_pmc *pmc = perf_event->overflow_handler_context;
+ struct kvm_vcpu *vcpu = pmc->vcpu;
+ struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
+ struct riscv_pmu *rpmu = to_riscv_pmu(perf_event->pmu);
+ u64 period;
+
+ /*
+ * Stop the event counting by directly accessing the perf_event.
+ * Otherwise, this needs to deferred via a workqueue.
+ * That will introduce skew in the counter value because the actual
+ * physical counter would start after returning from this function.
+ * It will be stopped again once the workqueue is scheduled
+ */
+ rpmu->pmu.stop(perf_event, PERF_EF_UPDATE);
+
+ /*
+ * The hw counter would start automatically when this function returns.
+ * Thus, the host may continue to interrupt and inject it to the guest
+ * even without the guest configuring the next event. Depending on the hardware
+ * the host may have some sluggishness only if privilege mode filtering is not
+ * available. In an ideal world, where qemu is not the only capable hardware,
+ * this can be removed.
+ * FYI: ARM64 does this way while x86 doesn't do anything as such.
+ * TODO: Should we keep it for RISC-V ?
+ */
+ period = -(local64_read(&perf_event->count));
+
+ local64_set(&perf_event->hw.period_left, 0);
+ perf_event->attr.sample_period = period;
+ perf_event->hw.sample_period = period;
+
+ set_bit(pmc->idx, kvpmu->pmc_overflown);
+ kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_PMU_OVF);
+
+ rpmu->pmu.start(perf_event, PERF_EF_RELOAD);
+}
+
+static long kvm_pmu_create_perf_event(struct kvm_pmc *pmc, struct perf_event_attr *attr,
+ unsigned long flags, unsigned long eidx,
+ unsigned long evtdata)
{
struct perf_event *event;
@@ -247,7 +325,7 @@ static int kvm_pmu_create_perf_event(struct kvm_pmc *pmc, struct perf_event_attr
*/
attr->sample_period = kvm_pmu_get_sample_period(pmc);
- event = perf_event_create_kernel_counter(attr, -1, current, NULL, pmc);
+ event = perf_event_create_kernel_counter(attr, -1, current, kvm_riscv_pmu_overflow, pmc);
if (IS_ERR(event)) {
pr_err("kvm pmu event creation failed for eidx %lx: %ld\n", eidx, PTR_ERR(event));
return PTR_ERR(event);
@@ -310,6 +388,80 @@ int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num,
return ret;
}
+static void kvm_pmu_clear_snapshot_area(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
+ int snapshot_area_size = sizeof(struct riscv_pmu_snapshot_data);
+
+ if (kvpmu->sdata) {
+ if (kvpmu->snapshot_addr != INVALID_GPA) {
+ memset(kvpmu->sdata, 0, snapshot_area_size);
+ kvm_vcpu_write_guest(vcpu, kvpmu->snapshot_addr,
+ kvpmu->sdata, snapshot_area_size);
+ } else {
+ pr_warn("snapshot address invalid\n");
+ }
+ kfree(kvpmu->sdata);
+ kvpmu->sdata = NULL;
+ }
+ kvpmu->snapshot_addr = INVALID_GPA;
+}
+
+int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long saddr_low,
+ unsigned long saddr_high, unsigned long flags,
+ struct kvm_vcpu_sbi_return *retdata)
+{
+ struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
+ int snapshot_area_size = sizeof(struct riscv_pmu_snapshot_data);
+ int sbiret = 0;
+ gpa_t saddr;
+ unsigned long hva;
+ bool writable;
+
+ if (!kvpmu || flags) {
+ sbiret = SBI_ERR_INVALID_PARAM;
+ goto out;
+ }
+
+ if (saddr_low == SBI_SHMEM_DISABLE && saddr_high == SBI_SHMEM_DISABLE) {
+ kvm_pmu_clear_snapshot_area(vcpu);
+ return 0;
+ }
+
+ saddr = saddr_low;
+
+ if (saddr_high != 0) {
+ if (IS_ENABLED(CONFIG_32BIT))
+ saddr |= ((gpa_t)saddr_high << 32);
+ else
+ sbiret = SBI_ERR_INVALID_ADDRESS;
+ goto out;
+ }
+
+ hva = kvm_vcpu_gfn_to_hva_prot(vcpu, saddr >> PAGE_SHIFT, &writable);
+ if (kvm_is_error_hva(hva) || !writable) {
+ sbiret = SBI_ERR_INVALID_ADDRESS;
+ goto out;
+ }
+
+ kvpmu->sdata = kzalloc(snapshot_area_size, GFP_ATOMIC);
+ if (!kvpmu->sdata)
+ return -ENOMEM;
+
+ if (kvm_vcpu_write_guest(vcpu, saddr, kvpmu->sdata, snapshot_area_size)) {
+ kfree(kvpmu->sdata);
+ sbiret = SBI_ERR_FAILURE;
+ goto out;
+ }
+
+ kvpmu->snapshot_addr = saddr;
+
+out:
+ retdata->err_val = sbiret;
+
+ return 0;
+}
+
int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu,
struct kvm_vcpu_sbi_return *retdata)
{
@@ -343,20 +495,40 @@ int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base,
int i, pmc_index, sbiret = 0;
struct kvm_pmc *pmc;
int fevent_code;
+ bool snap_flag_set = flags & SBI_PMU_START_FLAG_INIT_SNAPSHOT;
if (kvm_pmu_validate_counter_mask(kvpmu, ctr_base, ctr_mask) < 0) {
sbiret = SBI_ERR_INVALID_PARAM;
goto out;
}
+ if (snap_flag_set) {
+ if (kvpmu->snapshot_addr == INVALID_GPA) {
+ sbiret = SBI_ERR_NO_SHMEM;
+ goto out;
+ }
+ if (kvm_vcpu_read_guest(vcpu, kvpmu->snapshot_addr, kvpmu->sdata,
+ sizeof(struct riscv_pmu_snapshot_data))) {
+ pr_warn("Unable to read snapshot shared memory while starting counters\n");
+ sbiret = SBI_ERR_FAILURE;
+ goto out;
+ }
+ }
/* Start the counters that have been configured and requested by the guest */
for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) {
pmc_index = i + ctr_base;
if (!test_bit(pmc_index, kvpmu->pmc_in_use))
continue;
+ /* The guest started the counter again. Reset the overflow status */
+ clear_bit(pmc_index, kvpmu->pmc_overflown);
pmc = &kvpmu->pmc[pmc_index];
- if (flags & SBI_PMU_START_FLAG_SET_INIT_VALUE)
+ if (flags & SBI_PMU_START_FLAG_SET_INIT_VALUE) {
pmc->counter_val = ival;
+ } else if (snap_flag_set) {
+ /* The counter index in the snapshot are relative to the counter base */
+ pmc->counter_val = kvpmu->sdata->ctr_values[i];
+ }
+
if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) {
fevent_code = get_event_code(pmc->event_idx);
if (fevent_code >= SBI_PMU_FW_MAX) {
@@ -400,12 +572,19 @@ int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base,
u64 enabled, running;
struct kvm_pmc *pmc;
int fevent_code;
+ bool snap_flag_set = flags & SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT;
+ bool shmem_needs_update = false;
if (kvm_pmu_validate_counter_mask(kvpmu, ctr_base, ctr_mask) < 0) {
sbiret = SBI_ERR_INVALID_PARAM;
goto out;
}
+ if (snap_flag_set && kvpmu->snapshot_addr == INVALID_GPA) {
+ sbiret = SBI_ERR_NO_SHMEM;
+ goto out;
+ }
+
/* Stop the counters that have been configured and requested by the guest */
for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) {
pmc_index = i + ctr_base;
@@ -432,21 +611,49 @@ int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base,
sbiret = SBI_ERR_ALREADY_STOPPED;
}
- if (flags & SBI_PMU_STOP_FLAG_RESET) {
- /* Relase the counter if this is a reset request */
- pmc->counter_val += perf_event_read_value(pmc->perf_event,
- &enabled, &running);
+ if (flags & SBI_PMU_STOP_FLAG_RESET)
+ /* Release the counter if this is a reset request */
kvm_pmu_release_perf_event(pmc);
- }
} else {
sbiret = SBI_ERR_INVALID_PARAM;
}
+
+ if (snap_flag_set && !sbiret) {
+ if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW)
+ pmc->counter_val = kvpmu->fw_event[fevent_code].value;
+ else if (pmc->perf_event)
+ pmc->counter_val += perf_event_read_value(pmc->perf_event,
+ &enabled, &running);
+ /*
+ * The counter and overflow indicies in the snapshot region are w.r.to
+ * cbase. Modify the set bit in the counter mask instead of the pmc_index
+ * which indicates the absolute counter index.
+ */
+ if (test_bit(pmc_index, kvpmu->pmc_overflown))
+ kvpmu->sdata->ctr_overflow_mask |= BIT(i);
+ kvpmu->sdata->ctr_values[i] = pmc->counter_val;
+ shmem_needs_update = true;
+ }
+
if (flags & SBI_PMU_STOP_FLAG_RESET) {
pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID;
clear_bit(pmc_index, kvpmu->pmc_in_use);
+ clear_bit(pmc_index, kvpmu->pmc_overflown);
+ if (snap_flag_set) {
+ /*
+ * Only clear the given counter as the caller is responsible to
+ * validate both the overflow mask and configured counters.
+ */
+ kvpmu->sdata->ctr_overflow_mask &= ~BIT(i);
+ shmem_needs_update = true;
+ }
}
}
+ if (shmem_needs_update)
+ kvm_vcpu_write_guest(vcpu, kvpmu->snapshot_addr, kvpmu->sdata,
+ sizeof(struct riscv_pmu_snapshot_data));
+
out:
retdata->err_val = sbiret;
@@ -458,7 +665,8 @@ int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_ba
unsigned long eidx, u64 evtdata,
struct kvm_vcpu_sbi_return *retdata)
{
- int ctr_idx, ret, sbiret = 0;
+ int ctr_idx, sbiret = 0;
+ long ret;
bool is_fevent;
unsigned long event_code;
u32 etype = kvm_pmu_get_perf_event_type(eidx);
@@ -517,8 +725,10 @@ int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_ba
kvpmu->fw_event[event_code].started = true;
} else {
ret = kvm_pmu_create_perf_event(pmc, &attr, flags, eidx, evtdata);
- if (ret)
- return ret;
+ if (ret) {
+ sbiret = SBI_ERR_NOT_SUPPORTED;
+ goto out;
+ }
}
set_bit(ctr_idx, kvpmu->pmc_in_use);
@@ -530,7 +740,19 @@ out:
return 0;
}
-int kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
+int kvm_riscv_vcpu_pmu_fw_ctr_read_hi(struct kvm_vcpu *vcpu, unsigned long cidx,
+ struct kvm_vcpu_sbi_return *retdata)
+{
+ int ret;
+
+ ret = pmu_fw_ctr_read_hi(vcpu, cidx, &retdata->out_val);
+ if (ret == -EINVAL)
+ retdata->err_val = SBI_ERR_INVALID_PARAM;
+
+ return 0;
+}
+
+int kvm_riscv_vcpu_pmu_fw_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
struct kvm_vcpu_sbi_return *retdata)
{
int ret;
@@ -566,6 +788,7 @@ void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu)
kvpmu->num_hw_ctrs = num_hw_ctrs + 1;
kvpmu->num_fw_ctrs = SBI_PMU_FW_MAX;
memset(&kvpmu->fw_event, 0, SBI_PMU_FW_MAX * sizeof(struct kvm_fw_event));
+ kvpmu->snapshot_addr = INVALID_GPA;
if (kvpmu->num_hw_ctrs > RISCV_KVM_MAX_HW_CTRS) {
pr_warn_once("Limiting the hardware counters to 32 as specified by the ISA");
@@ -585,6 +808,7 @@ void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu)
pmc = &kvpmu->pmc[i];
pmc->idx = i;
pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID;
+ pmc->vcpu = vcpu;
if (i < kvpmu->num_hw_ctrs) {
pmc->cinfo.type = SBI_PMU_CTR_TYPE_HW;
if (i < 3)
@@ -601,7 +825,7 @@ void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu)
pmc->cinfo.csr = CSR_CYCLE + i;
} else {
pmc->cinfo.type = SBI_PMU_CTR_TYPE_FW;
- pmc->cinfo.width = BITS_PER_LONG - 1;
+ pmc->cinfo.width = 63;
}
}
@@ -617,14 +841,16 @@ void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu)
if (!kvpmu)
return;
- for_each_set_bit(i, kvpmu->pmc_in_use, RISCV_MAX_COUNTERS) {
+ for_each_set_bit(i, kvpmu->pmc_in_use, RISCV_KVM_MAX_COUNTERS) {
pmc = &kvpmu->pmc[i];
pmc->counter_val = 0;
kvm_pmu_release_perf_event(pmc);
pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID;
}
- bitmap_zero(kvpmu->pmc_in_use, RISCV_MAX_COUNTERS);
+ bitmap_zero(kvpmu->pmc_in_use, RISCV_KVM_MAX_COUNTERS);
+ bitmap_zero(kvpmu->pmc_overflown, RISCV_KVM_MAX_COUNTERS);
memset(&kvpmu->fw_event, 0, SBI_PMU_FW_MAX * sizeof(struct kvm_fw_event));
+ kvm_pmu_clear_snapshot_area(vcpu);
}
void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu)
diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
index 72a2ffb8dcd1..62f409d4176e 100644
--- a/arch/riscv/kvm/vcpu_sbi.c
+++ b/arch/riscv/kvm/vcpu_sbi.c
@@ -138,8 +138,11 @@ void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
unsigned long i;
struct kvm_vcpu *tmp;
- kvm_for_each_vcpu(i, tmp, vcpu->kvm)
- tmp->arch.power_off = true;
+ kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
+ spin_lock(&vcpu->arch.mp_state_lock);
+ WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
+ spin_unlock(&vcpu->arch.mp_state_lock);
+ }
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
memset(&run->system_event, 0, sizeof(run->system_event));
diff --git a/arch/riscv/kvm/vcpu_sbi_hsm.c b/arch/riscv/kvm/vcpu_sbi_hsm.c
index 7dca0e9381d9..dce667f4b6ab 100644
--- a/arch/riscv/kvm/vcpu_sbi_hsm.c
+++ b/arch/riscv/kvm/vcpu_sbi_hsm.c
@@ -18,13 +18,20 @@ static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
struct kvm_vcpu *target_vcpu;
unsigned long target_vcpuid = cp->a0;
+ int ret = 0;
target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
if (!target_vcpu)
return SBI_ERR_INVALID_PARAM;
- if (!target_vcpu->arch.power_off)
- return SBI_ERR_ALREADY_AVAILABLE;
+ spin_lock(&target_vcpu->arch.mp_state_lock);
+
+ if (!kvm_riscv_vcpu_stopped(target_vcpu)) {
+ ret = SBI_ERR_ALREADY_AVAILABLE;
+ goto out;
+ }
+
+ spin_lock(&target_vcpu->arch.reset_cntx_lock);
reset_cntx = &target_vcpu->arch.guest_reset_context;
/* start address */
reset_cntx->sepc = cp->a1;
@@ -32,21 +39,35 @@ static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu)
reset_cntx->a0 = target_vcpuid;
/* private data passed from kernel */
reset_cntx->a1 = cp->a2;
+ spin_unlock(&target_vcpu->arch.reset_cntx_lock);
+
kvm_make_request(KVM_REQ_VCPU_RESET, target_vcpu);
- kvm_riscv_vcpu_power_on(target_vcpu);
+ __kvm_riscv_vcpu_power_on(target_vcpu);
- return 0;
+out:
+ spin_unlock(&target_vcpu->arch.mp_state_lock);
+
+ return ret;
}
static int kvm_sbi_hsm_vcpu_stop(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.power_off)
- return SBI_ERR_FAILURE;
+ int ret = 0;
- kvm_riscv_vcpu_power_off(vcpu);
+ spin_lock(&vcpu->arch.mp_state_lock);
- return 0;
+ if (kvm_riscv_vcpu_stopped(vcpu)) {
+ ret = SBI_ERR_FAILURE;
+ goto out;
+ }
+
+ __kvm_riscv_vcpu_power_off(vcpu);
+
+out:
+ spin_unlock(&vcpu->arch.mp_state_lock);
+
+ return ret;
}
static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu)
@@ -58,7 +79,7 @@ static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu)
target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
if (!target_vcpu)
return SBI_ERR_INVALID_PARAM;
- if (!target_vcpu->arch.power_off)
+ if (!kvm_riscv_vcpu_stopped(target_vcpu))
return SBI_HSM_STATE_STARTED;
else if (vcpu->stat.generic.blocking)
return SBI_HSM_STATE_SUSPENDED;
@@ -71,14 +92,11 @@ static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
{
int ret = 0;
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
- struct kvm *kvm = vcpu->kvm;
unsigned long funcid = cp->a6;
switch (funcid) {
case SBI_EXT_HSM_HART_START:
- mutex_lock(&kvm->lock);
ret = kvm_sbi_hsm_vcpu_start(vcpu);
- mutex_unlock(&kvm->lock);
break;
case SBI_EXT_HSM_HART_STOP:
ret = kvm_sbi_hsm_vcpu_stop(vcpu);
diff --git a/arch/riscv/kvm/vcpu_sbi_pmu.c b/arch/riscv/kvm/vcpu_sbi_pmu.c
index 7eca72df2cbd..e4be34e03e83 100644
--- a/arch/riscv/kvm/vcpu_sbi_pmu.c
+++ b/arch/riscv/kvm/vcpu_sbi_pmu.c
@@ -42,9 +42,9 @@ static int kvm_sbi_ext_pmu_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
#endif
/*
* This can fail if perf core framework fails to create an event.
- * Forward the error to userspace because it's an error which
- * happened within the host kernel. The other option would be
- * to convert to an SBI error and forward to the guest.
+ * No need to forward the error to userspace and exit the guest.
+ * The operation can continue without profiling. Forward the
+ * appropriate SBI error to the guest.
*/
ret = kvm_riscv_vcpu_pmu_ctr_cfg_match(vcpu, cp->a0, cp->a1,
cp->a2, cp->a3, temp, retdata);
@@ -62,7 +62,16 @@ static int kvm_sbi_ext_pmu_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
ret = kvm_riscv_vcpu_pmu_ctr_stop(vcpu, cp->a0, cp->a1, cp->a2, retdata);
break;
case SBI_EXT_PMU_COUNTER_FW_READ:
- ret = kvm_riscv_vcpu_pmu_ctr_read(vcpu, cp->a0, retdata);
+ ret = kvm_riscv_vcpu_pmu_fw_ctr_read(vcpu, cp->a0, retdata);
+ break;
+ case SBI_EXT_PMU_COUNTER_FW_READ_HI:
+ if (IS_ENABLED(CONFIG_32BIT))
+ ret = kvm_riscv_vcpu_pmu_fw_ctr_read_hi(vcpu, cp->a0, retdata);
+ else
+ retdata->out_val = 0;
+ break;
+ case SBI_EXT_PMU_SNAPSHOT_SET_SHMEM:
+ ret = kvm_riscv_vcpu_pmu_snapshot_set_shmem(vcpu, cp->a0, cp->a1, cp->a2, retdata);
break;
default:
retdata->err_val = SBI_ERR_NOT_SUPPORTED;
diff --git a/arch/riscv/kvm/vcpu_sbi_sta.c b/arch/riscv/kvm/vcpu_sbi_sta.c
index d8cf9ca28c61..5f35427114c1 100644
--- a/arch/riscv/kvm/vcpu_sbi_sta.c
+++ b/arch/riscv/kvm/vcpu_sbi_sta.c
@@ -93,8 +93,8 @@ static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu)
if (flags != 0)
return SBI_ERR_INVALID_PARAM;
- if (shmem_phys_lo == SBI_STA_SHMEM_DISABLE &&
- shmem_phys_hi == SBI_STA_SHMEM_DISABLE) {
+ if (shmem_phys_lo == SBI_SHMEM_DISABLE &&
+ shmem_phys_hi == SBI_SHMEM_DISABLE) {
vcpu->arch.sta.shmem = INVALID_GPA;
return 0;
}
diff --git a/arch/riscv/kvm/vm.c b/arch/riscv/kvm/vm.c
index ce58bc48e5b8..7396b8654f45 100644
--- a/arch/riscv/kvm/vm.c
+++ b/arch/riscv/kvm/vm.c
@@ -186,6 +186,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_READONLY_MEM:
case KVM_CAP_MP_STATE:
case KVM_CAP_IMMEDIATE_EXIT:
+ case KVM_CAP_SET_GUEST_DEBUG:
r = 1;
break;
case KVM_CAP_NR_VCPUS:
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index 2c869f8026a8..cbe4d775ef56 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -13,14 +13,11 @@ endif
KCOV_INSTRUMENT_init.o := n
obj-y += init.o
-obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o pgtable.o
+obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o pgtable.o tlbflush.o
obj-y += cacheflush.o
obj-y += context.o
obj-y += pmem.o
-ifeq ($(CONFIG_MMU),y)
-obj-$(CONFIG_SMP) += tlbflush.o
-endif
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
obj-$(CONFIG_KASAN) += kasan_init.o
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index bc61ee5975e4..a03c994eed3b 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -5,6 +5,7 @@
#include <linux/acpi.h>
#include <linux/of.h>
+#include <linux/prctl.h>
#include <asm/acpi.h>
#include <asm/cacheflush.h>
@@ -21,7 +22,9 @@ void flush_icache_all(void)
{
local_flush_icache_all();
- if (IS_ENABLED(CONFIG_RISCV_SBI) && !riscv_use_ipi_for_rfence())
+ if (num_online_cpus() < 2)
+ return;
+ else if (riscv_use_sbi_for_rfence())
sbi_remote_fence_i(NULL);
else
on_each_cpu(ipi_remote_fence_i, NULL, 1);
@@ -69,8 +72,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
* with flush_icache_deferred().
*/
smp_mb();
- } else if (IS_ENABLED(CONFIG_RISCV_SBI) &&
- !riscv_use_ipi_for_rfence()) {
+ } else if (riscv_use_sbi_for_rfence()) {
sbi_remote_fence_i(&others);
} else {
on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
@@ -152,3 +154,115 @@ void __init riscv_init_cbo_blocksizes(void)
if (cboz_block_size)
riscv_cboz_block_size = cboz_block_size;
}
+
+#ifdef CONFIG_SMP
+static void set_icache_stale_mask(void)
+{
+ cpumask_t *mask;
+ bool stale_cpu;
+
+ /*
+ * Mark every other hart's icache as needing a flush for
+ * this MM. Maintain the previous value of the current
+ * cpu to handle the case when this function is called
+ * concurrently on different harts.
+ */
+ mask = &current->mm->context.icache_stale_mask;
+ stale_cpu = cpumask_test_cpu(smp_processor_id(), mask);
+
+ cpumask_setall(mask);
+ cpumask_assign_cpu(smp_processor_id(), mask, stale_cpu);
+}
+#endif
+
+/**
+ * riscv_set_icache_flush_ctx() - Enable/disable icache flushing instructions in
+ * userspace.
+ * @ctx: Set the type of icache flushing instructions permitted/prohibited in
+ * userspace. Supported values described below.
+ *
+ * Supported values for ctx:
+ *
+ * * %PR_RISCV_CTX_SW_FENCEI_ON: Allow fence.i in user space.
+ *
+ * * %PR_RISCV_CTX_SW_FENCEI_OFF: Disallow fence.i in user space. All threads in
+ * a process will be affected when ``scope == PR_RISCV_SCOPE_PER_PROCESS``.
+ * Therefore, caution must be taken; use this flag only when you can guarantee
+ * that no thread in the process will emit fence.i from this point onward.
+ *
+ * @scope: Set scope of where icache flushing instructions are allowed to be
+ * emitted. Supported values described below.
+ *
+ * Supported values for scope:
+ *
+ * * %PR_RISCV_SCOPE_PER_PROCESS: Ensure the icache of any thread in this process
+ * is coherent with instruction storage upon
+ * migration.
+ *
+ * * %PR_RISCV_SCOPE_PER_THREAD: Ensure the icache of the current thread is
+ * coherent with instruction storage upon
+ * migration.
+ *
+ * When ``scope == PR_RISCV_SCOPE_PER_PROCESS``, all threads in the process are
+ * permitted to emit icache flushing instructions. Whenever any thread in the
+ * process is migrated, the corresponding hart's icache will be guaranteed to be
+ * consistent with instruction storage. This does not enforce any guarantees
+ * outside of migration. If a thread modifies an instruction that another thread
+ * may attempt to execute, the other thread must still emit an icache flushing
+ * instruction before attempting to execute the potentially modified
+ * instruction. This must be performed by the user-space program.
+ *
+ * In per-thread context (eg. ``scope == PR_RISCV_SCOPE_PER_THREAD``) only the
+ * thread calling this function is permitted to emit icache flushing
+ * instructions. When the thread is migrated, the corresponding hart's icache
+ * will be guaranteed to be consistent with instruction storage.
+ *
+ * On kernels configured without SMP, this function is a nop as migrations
+ * across harts will not occur.
+ */
+int riscv_set_icache_flush_ctx(unsigned long ctx, unsigned long scope)
+{
+#ifdef CONFIG_SMP
+ switch (ctx) {
+ case PR_RISCV_CTX_SW_FENCEI_ON:
+ switch (scope) {
+ case PR_RISCV_SCOPE_PER_PROCESS:
+ current->mm->context.force_icache_flush = true;
+ break;
+ case PR_RISCV_SCOPE_PER_THREAD:
+ current->thread.force_icache_flush = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case PR_RISCV_CTX_SW_FENCEI_OFF:
+ switch (scope) {
+ case PR_RISCV_SCOPE_PER_PROCESS:
+ current->mm->context.force_icache_flush = false;
+
+ set_icache_stale_mask();
+ break;
+ case PR_RISCV_SCOPE_PER_THREAD:
+ current->thread.force_icache_flush = false;
+
+ set_icache_stale_mask();
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+#else
+ switch (ctx) {
+ case PR_RISCV_CTX_SW_FENCEI_ON:
+ case PR_RISCV_CTX_SW_FENCEI_OFF:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+#endif
+}
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index ba8eb3944687..4abe3de23225 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -15,14 +15,13 @@
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
+#include <asm/switch_to.h>
#ifdef CONFIG_MMU
DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
-static unsigned long asid_bits;
static unsigned long num_asids;
-unsigned long asid_mask;
static atomic_long_t current_version;
@@ -81,7 +80,7 @@ static void __flush_context(void)
if (cntx == 0)
cntx = per_cpu(reserved_context, i);
- __set_bit(cntx & asid_mask, context_asid_map);
+ __set_bit(cntx2asid(cntx), context_asid_map);
per_cpu(reserved_context, i) = cntx;
}
@@ -102,7 +101,7 @@ static unsigned long __new_context(struct mm_struct *mm)
lockdep_assert_held(&context_lock);
if (cntx != 0) {
- unsigned long newcntx = ver | (cntx & asid_mask);
+ unsigned long newcntx = ver | cntx2asid(cntx);
/*
* If our current CONTEXT was active during a rollover, we
@@ -115,7 +114,7 @@ static unsigned long __new_context(struct mm_struct *mm)
* We had a valid CONTEXT in a previous life, so try to
* re-use it if possible.
*/
- if (!__test_and_set_bit(cntx & asid_mask, context_asid_map))
+ if (!__test_and_set_bit(cntx2asid(cntx), context_asid_map))
return newcntx;
}
@@ -128,7 +127,7 @@ static unsigned long __new_context(struct mm_struct *mm)
goto set_asid;
/* We're out of ASIDs, so increment current_version */
- ver = atomic_long_add_return_relaxed(num_asids, &current_version);
+ ver = atomic_long_add_return_relaxed(BIT(SATP_ASID_BITS), &current_version);
/* Flush everything */
__flush_context();
@@ -168,7 +167,7 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
*/
old_active_cntx = atomic_long_read(&per_cpu(active_context, cpu));
if (old_active_cntx &&
- ((cntx & ~asid_mask) == atomic_long_read(&current_version)) &&
+ (cntx2version(cntx) == atomic_long_read(&current_version)) &&
atomic_long_cmpxchg_relaxed(&per_cpu(active_context, cpu),
old_active_cntx, cntx))
goto switch_mm_fast;
@@ -177,7 +176,7 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
/* Check that our ASID belongs to the current_version. */
cntx = atomic_long_read(&mm->context.id);
- if ((cntx & ~asid_mask) != atomic_long_read(&current_version)) {
+ if (cntx2version(cntx) != atomic_long_read(&current_version)) {
cntx = __new_context(mm);
atomic_long_set(&mm->context.id, cntx);
}
@@ -191,7 +190,7 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
switch_mm_fast:
csr_write(CSR_SATP, virt_to_pfn(mm->pgd) |
- ((cntx & asid_mask) << SATP_ASID_SHIFT) |
+ (cntx2asid(cntx) << SATP_ASID_SHIFT) |
satp_mode);
if (need_flush_tlb)
@@ -202,7 +201,7 @@ static void set_mm_noasid(struct mm_struct *mm)
{
/* Switch the page table and blindly nuke entire local TLB */
csr_write(CSR_SATP, virt_to_pfn(mm->pgd) | satp_mode);
- local_flush_tlb_all();
+ local_flush_tlb_all_asid(0);
}
static inline void set_mm(struct mm_struct *prev,
@@ -227,7 +226,7 @@ static inline void set_mm(struct mm_struct *prev,
static int __init asids_init(void)
{
- unsigned long old;
+ unsigned long asid_bits, old;
/* Figure-out number of ASID bits in HW */
old = csr_read(CSR_SATP);
@@ -247,7 +246,6 @@ static int __init asids_init(void)
/* Pre-compute ASID details */
if (asid_bits) {
num_asids = 1 << asid_bits;
- asid_mask = num_asids - 1;
}
/*
@@ -255,7 +253,7 @@ static int __init asids_init(void)
* at-least twice more than CPUs
*/
if (num_asids > (2 * num_possible_cpus())) {
- atomic_long_set(&current_version, num_asids);
+ atomic_long_set(&current_version, BIT(SATP_ASID_BITS));
context_asid_map = bitmap_zalloc(num_asids, GFP_KERNEL);
if (!context_asid_map)
@@ -297,21 +295,23 @@ static inline void set_mm(struct mm_struct *prev,
*
* The "cpu" argument must be the current local CPU number.
*/
-static inline void flush_icache_deferred(struct mm_struct *mm, unsigned int cpu)
+static inline void flush_icache_deferred(struct mm_struct *mm, unsigned int cpu,
+ struct task_struct *task)
{
#ifdef CONFIG_SMP
- cpumask_t *mask = &mm->context.icache_stale_mask;
-
- if (cpumask_test_cpu(cpu, mask)) {
- cpumask_clear_cpu(cpu, mask);
+ if (cpumask_test_and_clear_cpu(cpu, &mm->context.icache_stale_mask)) {
/*
* Ensure the remote hart's writes are visible to this hart.
* This pairs with a barrier in flush_icache_mm.
*/
smp_mb();
- local_flush_icache_all();
- }
+ /*
+ * If cache will be flushed in switch_to, no need to flush here.
+ */
+ if (!(task && switch_to_should_flush_icache(task)))
+ local_flush_icache_all();
+ }
#endif
}
@@ -334,5 +334,5 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
set_mm(prev, next, cpu);
- flush_icache_deferred(next, cpu);
+ flush_icache_deferred(next, cpu, task);
}
diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c
index 843107f834b2..cb89d7e0ba88 100644
--- a/arch/riscv/mm/dma-noncoherent.c
+++ b/arch/riscv/mm/dma-noncoherent.c
@@ -128,8 +128,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
ALT_CMO_OP(FLUSH, flush_addr, size, riscv_cbom_block_size);
}
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- bool coherent)
+void arch_setup_dma_ops(struct device *dev, bool coherent)
{
WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN,
TAINT_CPU_OUT_OF_SPEC,
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 3ba1d4dde5dd..5224f3733802 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -292,7 +292,10 @@ void handle_page_fault(struct pt_regs *regs)
if (unlikely(access_error(cause, vma))) {
vma_end_read(vma);
- goto lock_mmap;
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ tsk->thread.bad_cause = cause;
+ bad_area_nosemaphore(regs, SEGV_ACCERR, addr);
+ return;
}
fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs);
diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c
index 5ef2a6891158..0ebd968b33c9 100644
--- a/arch/riscv/mm/hugetlbpage.c
+++ b/arch/riscv/mm/hugetlbpage.c
@@ -399,16 +399,6 @@ static bool is_napot_size(unsigned long size)
#endif /*CONFIG_RISCV_ISA_SVNAPOT*/
-int pud_huge(pud_t pud)
-{
- return pud_leaf(pud);
-}
-
-int pmd_huge(pmd_t pmd)
-{
- return pmd_leaf(pmd);
-}
-
static bool __hugetlb_valid_size(unsigned long size)
{
if (size == HPAGE_SIZE)
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index fe8e159394d8..7e12925fc660 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -24,6 +24,7 @@
#include <linux/elf.h>
#endif
#include <linux/kfence.h>
+#include <linux/execmem.h>
#include <asm/fixmap.h>
#include <asm/io.h>
@@ -49,8 +50,8 @@ u64 satp_mode __ro_after_init = SATP_MODE_32;
EXPORT_SYMBOL(satp_mode);
#ifdef CONFIG_64BIT
-bool pgtable_l4_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL);
-bool pgtable_l5_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL);
+bool pgtable_l4_enabled __ro_after_init = !IS_ENABLED(CONFIG_XIP_KERNEL);
+bool pgtable_l5_enabled __ro_after_init = !IS_ENABLED(CONFIG_XIP_KERNEL);
EXPORT_SYMBOL(pgtable_l4_enabled);
EXPORT_SYMBOL(pgtable_l5_enabled);
#endif
@@ -161,11 +162,25 @@ static void print_vm_layout(void) { }
void __init mem_init(void)
{
+ bool swiotlb = max_pfn > PFN_DOWN(dma32_phys_limit);
#ifdef CONFIG_FLATMEM
BUG_ON(!mem_map);
#endif /* CONFIG_FLATMEM */
- swiotlb_init(max_pfn > PFN_DOWN(dma32_phys_limit), SWIOTLB_VERBOSE);
+ if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) && !swiotlb &&
+ dma_cache_alignment != 1) {
+ /*
+ * If no bouncing needed for ZONE_DMA, allocate 1MB swiotlb
+ * buffer per 1GB of RAM for kmalloc() bouncing on
+ * non-coherent platforms.
+ */
+ unsigned long size =
+ DIV_ROUND_UP(memblock_phys_mem_size(), 1024);
+ swiotlb_adjust_size(min(swiotlb_size_or_default(), size));
+ swiotlb = true;
+ }
+
+ swiotlb_init(swiotlb, SWIOTLB_VERBOSE);
memblock_free_all();
print_vm_layout();
@@ -231,7 +246,7 @@ static void __init setup_bootmem(void)
* In 64-bit, any use of __va/__pa before this point is wrong as we
* did not know the start of DRAM before.
*/
- if (IS_ENABLED(CONFIG_64BIT))
+ if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU))
kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
/*
@@ -1481,3 +1496,37 @@ void __init pgtable_cache_init(void)
preallocate_pgd_pages_range(MODULES_VADDR, MODULES_END, "bpf/modules");
}
#endif
+
+#ifdef CONFIG_EXECMEM
+#ifdef CONFIG_MMU
+static struct execmem_info execmem_info __ro_after_init;
+
+struct execmem_info __init *execmem_arch_setup(void)
+{
+ execmem_info = (struct execmem_info){
+ .ranges = {
+ [EXECMEM_DEFAULT] = {
+ .start = MODULES_VADDR,
+ .end = MODULES_END,
+ .pgprot = PAGE_KERNEL,
+ .alignment = 1,
+ },
+ [EXECMEM_KPROBES] = {
+ .start = VMALLOC_START,
+ .end = VMALLOC_END,
+ .pgprot = PAGE_KERNEL_READ_EXEC,
+ .alignment = 1,
+ },
+ [EXECMEM_BPF] = {
+ .start = BPF_JIT_REGION_START,
+ .end = BPF_JIT_REGION_END,
+ .pgprot = PAGE_KERNEL,
+ .alignment = PAGE_SIZE,
+ },
+ },
+ };
+
+ return &execmem_info;
+}
+#endif /* CONFIG_MMU */
+#endif /* CONFIG_EXECMEM */
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 893566e004b7..9b6e86ce3867 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -7,34 +7,11 @@
#include <asm/sbi.h>
#include <asm/mmu_context.h>
-static inline void local_flush_tlb_all_asid(unsigned long asid)
-{
- if (asid != FLUSH_TLB_NO_ASID)
- __asm__ __volatile__ ("sfence.vma x0, %0"
- :
- : "r" (asid)
- : "memory");
- else
- local_flush_tlb_all();
-}
-
-static inline void local_flush_tlb_page_asid(unsigned long addr,
- unsigned long asid)
-{
- if (asid != FLUSH_TLB_NO_ASID)
- __asm__ __volatile__ ("sfence.vma %0, %1"
- :
- : "r" (addr), "r" (asid)
- : "memory");
- else
- local_flush_tlb_page(addr);
-}
-
/*
* Flush entire TLB if number of entries to be flushed is greater
* than the threshold below.
*/
-static unsigned long tlb_flush_all_threshold __read_mostly = 64;
+unsigned long tlb_flush_all_threshold __read_mostly = 64;
static void local_flush_tlb_range_threshold_asid(unsigned long start,
unsigned long size,
@@ -79,10 +56,12 @@ static void __ipi_flush_tlb_all(void *info)
void flush_tlb_all(void)
{
- if (riscv_use_ipi_for_rfence())
- on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
- else
+ if (num_online_cpus() < 2)
+ local_flush_tlb_all();
+ else if (riscv_use_sbi_for_rfence())
sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID);
+ else
+ on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
}
struct flush_tlb_range_data {
@@ -99,50 +78,38 @@ static void __ipi_flush_tlb_range_asid(void *info)
local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
}
-static void __flush_tlb_range(struct cpumask *cmask, unsigned long asid,
+static void __flush_tlb_range(const struct cpumask *cmask, unsigned long asid,
unsigned long start, unsigned long size,
unsigned long stride)
{
- struct flush_tlb_range_data ftd;
- bool broadcast;
+ unsigned int cpu;
if (cpumask_empty(cmask))
return;
- if (cmask != cpu_online_mask) {
- unsigned int cpuid;
+ cpu = get_cpu();
- cpuid = get_cpu();
- /* check if the tlbflush needs to be sent to other CPUs */
- broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
+ /* Check if the TLB flush needs to be sent to other CPUs. */
+ if (cpumask_any_but(cmask, cpu) >= nr_cpu_ids) {
+ local_flush_tlb_range_asid(start, size, stride, asid);
+ } else if (riscv_use_sbi_for_rfence()) {
+ sbi_remote_sfence_vma_asid(cmask, start, size, asid);
} else {
- broadcast = true;
- }
+ struct flush_tlb_range_data ftd;
- if (broadcast) {
- if (riscv_use_ipi_for_rfence()) {
- ftd.asid = asid;
- ftd.start = start;
- ftd.size = size;
- ftd.stride = stride;
- on_each_cpu_mask(cmask,
- __ipi_flush_tlb_range_asid,
- &ftd, 1);
- } else
- sbi_remote_sfence_vma_asid(cmask,
- start, size, asid);
- } else {
- local_flush_tlb_range_asid(start, size, stride, asid);
+ ftd.asid = asid;
+ ftd.start = start;
+ ftd.size = size;
+ ftd.stride = stride;
+ on_each_cpu_mask(cmask, __ipi_flush_tlb_range_asid, &ftd, 1);
}
- if (cmask != cpu_online_mask)
- put_cpu();
+ put_cpu();
}
static inline unsigned long get_mm_asid(struct mm_struct *mm)
{
- return static_branch_unlikely(&use_asid_allocator) ?
- atomic_long_read(&mm->context.id) & asid_mask : FLUSH_TLB_NO_ASID;
+ return cntx2asid(atomic_long_read(&mm->context.id));
}
void flush_tlb_mm(struct mm_struct *mm)
@@ -200,7 +167,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
- __flush_tlb_range((struct cpumask *)cpu_online_mask, FLUSH_TLB_NO_ASID,
+ __flush_tlb_range(cpu_online_mask, FLUSH_TLB_NO_ASID,
start, end - start, PAGE_SIZE);
}
diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h
index f4b6b3b9edda..fdbf88ca8b70 100644
--- a/arch/riscv/net/bpf_jit.h
+++ b/arch/riscv/net/bpf_jit.h
@@ -81,6 +81,8 @@ struct rv_jit_context {
int nexentries;
unsigned long flags;
int stack_size;
+ u64 arena_vm_start;
+ u64 user_vm_start;
};
/* Convert from ninsns to bytes. */
@@ -606,7 +608,7 @@ static inline u32 rv_nop(void)
return rv_i_insn(0, 0, 0, 0, 0x13);
}
-/* RVC instrutions. */
+/* RVC instructions. */
static inline u16 rvc_addi4spn(u8 rd, u32 imm10)
{
@@ -735,7 +737,7 @@ static inline u16 rvc_swsp(u32 imm8, u8 rs2)
return rv_css_insn(0x6, imm, rs2, 0x2);
}
-/* RVZBB instrutions. */
+/* RVZBB instructions. */
static inline u32 rvzbb_sextb(u8 rd, u8 rs1)
{
return rv_i_insn(0x604, rs1, 1, rd, 0x13);
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index aac190085472..79a001d5533e 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -12,12 +12,14 @@
#include <linux/stop_machine.h>
#include <asm/patch.h>
#include <asm/cfi.h>
+#include <asm/percpu.h>
#include "bpf_jit.h"
#define RV_FENTRY_NINSNS 2
#define RV_REG_TCC RV_REG_A6
#define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */
+#define RV_REG_ARENA RV_REG_S7 /* For storing arena_vm_start */
static const int regmap[] = {
[BPF_REG_0] = RV_REG_A5,
@@ -255,6 +257,10 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
emit_ld(RV_REG_S6, store_offset, RV_REG_SP, ctx);
store_offset -= 8;
}
+ if (ctx->arena_vm_start) {
+ emit_ld(RV_REG_ARENA, store_offset, RV_REG_SP, ctx);
+ store_offset -= 8;
+ }
emit_addi(RV_REG_SP, RV_REG_SP, stack_adjust, ctx);
/* Set return value. */
@@ -498,33 +504,33 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
break;
/* src_reg = atomic_fetch_<op>(dst_reg + off16, src_reg) */
case BPF_ADD | BPF_FETCH:
- emit(is64 ? rv_amoadd_d(rs, rs, rd, 0, 0) :
- rv_amoadd_w(rs, rs, rd, 0, 0), ctx);
+ emit(is64 ? rv_amoadd_d(rs, rs, rd, 1, 1) :
+ rv_amoadd_w(rs, rs, rd, 1, 1), ctx);
if (!is64)
emit_zextw(rs, rs, ctx);
break;
case BPF_AND | BPF_FETCH:
- emit(is64 ? rv_amoand_d(rs, rs, rd, 0, 0) :
- rv_amoand_w(rs, rs, rd, 0, 0), ctx);
+ emit(is64 ? rv_amoand_d(rs, rs, rd, 1, 1) :
+ rv_amoand_w(rs, rs, rd, 1, 1), ctx);
if (!is64)
emit_zextw(rs, rs, ctx);
break;
case BPF_OR | BPF_FETCH:
- emit(is64 ? rv_amoor_d(rs, rs, rd, 0, 0) :
- rv_amoor_w(rs, rs, rd, 0, 0), ctx);
+ emit(is64 ? rv_amoor_d(rs, rs, rd, 1, 1) :
+ rv_amoor_w(rs, rs, rd, 1, 1), ctx);
if (!is64)
emit_zextw(rs, rs, ctx);
break;
case BPF_XOR | BPF_FETCH:
- emit(is64 ? rv_amoxor_d(rs, rs, rd, 0, 0) :
- rv_amoxor_w(rs, rs, rd, 0, 0), ctx);
+ emit(is64 ? rv_amoxor_d(rs, rs, rd, 1, 1) :
+ rv_amoxor_w(rs, rs, rd, 1, 1), ctx);
if (!is64)
emit_zextw(rs, rs, ctx);
break;
/* src_reg = atomic_xchg(dst_reg + off16, src_reg); */
case BPF_XCHG:
- emit(is64 ? rv_amoswap_d(rs, rs, rd, 0, 0) :
- rv_amoswap_w(rs, rs, rd, 0, 0), ctx);
+ emit(is64 ? rv_amoswap_d(rs, rs, rd, 1, 1) :
+ rv_amoswap_w(rs, rs, rd, 1, 1), ctx);
if (!is64)
emit_zextw(rs, rs, ctx);
break;
@@ -548,6 +554,7 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
+#define REG_DONT_CLEAR_MARKER 0 /* RV_REG_ZERO unused in pt_regmap */
bool ex_handler_bpf(const struct exception_table_entry *ex,
struct pt_regs *regs)
@@ -555,7 +562,8 @@ bool ex_handler_bpf(const struct exception_table_entry *ex,
off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
int regs_offset = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
- *(unsigned long *)((void *)regs + pt_regmap[regs_offset]) = 0;
+ if (regs_offset != REG_DONT_CLEAR_MARKER)
+ *(unsigned long *)((void *)regs + pt_regmap[regs_offset]) = 0;
regs->epc = (unsigned long)&ex->fixup - offset;
return true;
@@ -572,7 +580,8 @@ static int add_exception_handler(const struct bpf_insn *insn,
off_t fixup_offset;
if (!ctx->insns || !ctx->ro_insns || !ctx->prog->aux->extable ||
- (BPF_MODE(insn->code) != BPF_PROBE_MEM && BPF_MODE(insn->code) != BPF_PROBE_MEMSX))
+ (BPF_MODE(insn->code) != BPF_PROBE_MEM && BPF_MODE(insn->code) != BPF_PROBE_MEMSX &&
+ BPF_MODE(insn->code) != BPF_PROBE_MEM32))
return 0;
if (WARN_ON_ONCE(ctx->nexentries >= ctx->prog->aux->num_exentries))
@@ -722,6 +731,9 @@ static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_of
if (ret)
return ret;
+ /* store prog start time */
+ emit_mv(RV_REG_S1, RV_REG_A0, ctx);
+
/* if (__bpf_prog_enter(prog) == 0)
* goto skip_exec_of_prog;
*/
@@ -729,9 +741,6 @@ static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_of
/* nop reserved for conditional jump */
emit(rv_nop(), ctx);
- /* store prog start time */
- emit_mv(RV_REG_S1, RV_REG_A0, ctx);
-
/* arg1: &args_off */
emit_addi(RV_REG_A0, RV_REG_FP, -args_off, ctx);
if (!p->jited)
@@ -1073,6 +1082,33 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
/* dst = src */
case BPF_ALU | BPF_MOV | BPF_X:
case BPF_ALU64 | BPF_MOV | BPF_X:
+ if (insn_is_cast_user(insn)) {
+ emit_mv(RV_REG_T1, rs, ctx);
+ emit_zextw(RV_REG_T1, RV_REG_T1, ctx);
+ emit_imm(rd, (ctx->user_vm_start >> 32) << 32, ctx);
+ emit(rv_beq(RV_REG_T1, RV_REG_ZERO, 4), ctx);
+ emit_or(RV_REG_T1, rd, RV_REG_T1, ctx);
+ emit_mv(rd, RV_REG_T1, ctx);
+ break;
+ } else if (insn_is_mov_percpu_addr(insn)) {
+ if (rd != rs)
+ emit_mv(rd, rs, ctx);
+#ifdef CONFIG_SMP
+ /* Load current CPU number in T1 */
+ emit_ld(RV_REG_T1, offsetof(struct thread_info, cpu),
+ RV_REG_TP, ctx);
+ /* << 3 because offsets are 8 bytes */
+ emit_slli(RV_REG_T1, RV_REG_T1, 3, ctx);
+ /* Load address of __per_cpu_offset array in T2 */
+ emit_addr(RV_REG_T2, (u64)&__per_cpu_offset, extra_pass, ctx);
+ /* Add offset of current CPU to __per_cpu_offset */
+ emit_add(RV_REG_T1, RV_REG_T2, RV_REG_T1, ctx);
+ /* Load __per_cpu_offset[cpu] in T1 */
+ emit_ld(RV_REG_T1, 0, RV_REG_T1, ctx);
+ /* Add the offset to Rd */
+ emit_add(rd, rd, RV_REG_T1, ctx);
+#endif
+ }
if (imm == 1) {
/* Special mov32 for zext */
emit_zextw(rd, rd, ctx);
@@ -1457,12 +1493,44 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
bool fixed_addr;
u64 addr;
+ /* Inline calls to bpf_get_smp_processor_id()
+ *
+ * RV_REG_TP holds the address of the current CPU's task_struct and thread_info is
+ * at offset 0 in task_struct.
+ * Load cpu from thread_info:
+ * Set R0 to ((struct thread_info *)(RV_REG_TP))->cpu
+ *
+ * This replicates the implementation of raw_smp_processor_id() on RISCV
+ */
+ if (insn->src_reg == 0 && insn->imm == BPF_FUNC_get_smp_processor_id) {
+ /* Load current CPU number in R0 */
+ emit_ld(bpf_to_rv_reg(BPF_REG_0, ctx), offsetof(struct thread_info, cpu),
+ RV_REG_TP, ctx);
+ break;
+ }
+
mark_call(ctx);
ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
&addr, &fixed_addr);
if (ret < 0)
return ret;
+ if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
+ const struct btf_func_model *fm;
+ int idx;
+
+ fm = bpf_jit_find_kfunc_model(ctx->prog, insn);
+ if (!fm)
+ return -EINVAL;
+
+ for (idx = 0; idx < fm->nr_args; idx++) {
+ u8 reg = bpf_to_rv_reg(BPF_REG_1 + idx, ctx);
+
+ if (fm->arg_size[idx] == sizeof(int))
+ emit_sextw(reg, reg, ctx);
+ }
+ }
+
ret = emit_call(addr, fixed_addr, ctx);
if (ret)
return ret;
@@ -1523,6 +1591,11 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
+ /* LDX | PROBE_MEM32: dst = *(unsigned size *)(src + RV_REG_ARENA + off) */
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
{
int insn_len, insns_start;
bool sign_ext;
@@ -1530,6 +1603,11 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
sign_ext = BPF_MODE(insn->code) == BPF_MEMSX ||
BPF_MODE(insn->code) == BPF_PROBE_MEMSX;
+ if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
+ emit_add(RV_REG_T2, rs, RV_REG_ARENA, ctx);
+ rs = RV_REG_T2;
+ }
+
switch (BPF_SIZE(code)) {
case BPF_B:
if (is_12b_int(off)) {
@@ -1666,6 +1744,86 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
emit_sd(RV_REG_T2, 0, RV_REG_T1, ctx);
break;
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
+ {
+ int insn_len, insns_start;
+
+ emit_add(RV_REG_T3, rd, RV_REG_ARENA, ctx);
+ rd = RV_REG_T3;
+
+ /* Load imm to a register then store it */
+ emit_imm(RV_REG_T1, imm, ctx);
+
+ switch (BPF_SIZE(code)) {
+ case BPF_B:
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ emit(rv_sb(rd, off, RV_REG_T1), ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ emit_imm(RV_REG_T2, off, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
+ insns_start = ctx->ninsns;
+ emit(rv_sb(RV_REG_T2, 0, RV_REG_T1), ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ case BPF_H:
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ emit(rv_sh(rd, off, RV_REG_T1), ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ emit_imm(RV_REG_T2, off, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
+ insns_start = ctx->ninsns;
+ emit(rv_sh(RV_REG_T2, 0, RV_REG_T1), ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ case BPF_W:
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ emit_sw(rd, off, RV_REG_T1, ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ emit_imm(RV_REG_T2, off, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
+ insns_start = ctx->ninsns;
+ emit_sw(RV_REG_T2, 0, RV_REG_T1, ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ case BPF_DW:
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ emit_sd(rd, off, RV_REG_T1, ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ emit_imm(RV_REG_T2, off, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
+ insns_start = ctx->ninsns;
+ emit_sd(RV_REG_T2, 0, RV_REG_T1, ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ ret = add_exception_handler(insn, ctx, REG_DONT_CLEAR_MARKER,
+ insn_len);
+ if (ret)
+ return ret;
+
+ break;
+ }
+
/* STX: *(size *)(dst + off) = src */
case BPF_STX | BPF_MEM | BPF_B:
if (is_12b_int(off)) {
@@ -1712,6 +1870,84 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
emit_atomic(rd, rs, off, imm,
BPF_SIZE(code) == BPF_DW, ctx);
break;
+
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
+ {
+ int insn_len, insns_start;
+
+ emit_add(RV_REG_T2, rd, RV_REG_ARENA, ctx);
+ rd = RV_REG_T2;
+
+ switch (BPF_SIZE(code)) {
+ case BPF_B:
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ emit(rv_sb(rd, off, rs), ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+ insns_start = ctx->ninsns;
+ emit(rv_sb(RV_REG_T1, 0, rs), ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ case BPF_H:
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ emit(rv_sh(rd, off, rs), ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+ insns_start = ctx->ninsns;
+ emit(rv_sh(RV_REG_T1, 0, rs), ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ case BPF_W:
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ emit_sw(rd, off, rs, ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+ insns_start = ctx->ninsns;
+ emit_sw(RV_REG_T1, 0, rs, ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ case BPF_DW:
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ emit_sd(rd, off, rs, ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+ insns_start = ctx->ninsns;
+ emit_sd(RV_REG_T1, 0, rs, ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ ret = add_exception_handler(insn, ctx, REG_DONT_CLEAR_MARKER,
+ insn_len);
+ if (ret)
+ return ret;
+
+ break;
+ }
+
default:
pr_err("bpf-jit: unknown opcode %02x\n", code);
return -EINVAL;
@@ -1743,6 +1979,8 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx, bool is_subprog)
stack_adjust += 8;
if (seen_reg(RV_REG_S6, ctx))
stack_adjust += 8;
+ if (ctx->arena_vm_start)
+ stack_adjust += 8;
stack_adjust = round_up(stack_adjust, 16);
stack_adjust += bpf_stack_adjust;
@@ -1794,6 +2032,10 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx, bool is_subprog)
emit_sd(RV_REG_SP, store_offset, RV_REG_S6, ctx);
store_offset -= 8;
}
+ if (ctx->arena_vm_start) {
+ emit_sd(RV_REG_SP, store_offset, RV_REG_ARENA, ctx);
+ store_offset -= 8;
+ }
emit_addi(RV_REG_FP, RV_REG_SP, stack_adjust, ctx);
@@ -1807,6 +2049,9 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx, bool is_subprog)
emit_mv(RV_REG_TCC_SAVED, RV_REG_TCC, ctx);
ctx->stack_size = stack_adjust;
+
+ if (ctx->arena_vm_start)
+ emit_imm(RV_REG_ARENA, ctx->arena_vm_start, ctx);
}
void bpf_jit_build_epilogue(struct rv_jit_context *ctx)
@@ -1823,3 +2068,23 @@ bool bpf_jit_supports_ptr_xchg(void)
{
return true;
}
+
+bool bpf_jit_supports_arena(void)
+{
+ return true;
+}
+
+bool bpf_jit_supports_percpu_insn(void)
+{
+ return true;
+}
+
+bool bpf_jit_inlines_helper_call(s32 imm)
+{
+ switch (imm) {
+ case BPF_FUNC_get_smp_processor_id:
+ return true;
+ default:
+ return false;
+ }
+}
diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c
index 6b3acac30c06..0a96abdaca65 100644
--- a/arch/riscv/net/bpf_jit_core.c
+++ b/arch/riscv/net/bpf_jit_core.c
@@ -80,6 +80,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
goto skip_init_ctx;
}
+ ctx->arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena);
+ ctx->user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena);
ctx->prog = prog;
ctx->offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
if (!ctx->offset) {
@@ -219,19 +221,6 @@ u64 bpf_jit_alloc_exec_limit(void)
return BPF_JIT_REGION_SIZE;
}
-void *bpf_jit_alloc_exec(unsigned long size)
-{
- return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
- BPF_JIT_REGION_END, GFP_KERNEL,
- PAGE_KERNEL, 0, NUMA_NO_NODE,
- __builtin_return_address(0));
-}
-
-void bpf_jit_free_exec(void *addr)
-{
- return vfree(addr);
-}
-
void *bpf_arch_text_copy(void *dst, void *src, size_t len)
{
int ret;
diff --git a/arch/riscv/purgatory/Makefile b/arch/riscv/purgatory/Makefile
index 280b0eb352b8..f11945ee2490 100644
--- a/arch/riscv/purgatory/Makefile
+++ b/arch/riscv/purgatory/Makefile
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-OBJECT_FILES_NON_STANDARD := y
purgatory-y := purgatory.o sha256.o entry.o string.o ctype.o memcpy.o memset.o
purgatory-y += strcmp.o strlen.o strncmp.o
@@ -47,13 +46,6 @@ LDFLAGS_purgatory.ro := -r $(PURGATORY_LDFLAGS)
LDFLAGS_purgatory.chk := $(PURGATORY_LDFLAGS)
targets += purgatory.ro purgatory.chk
-# Sanitizer, etc. runtimes are unavailable and cannot be linked here.
-GCOV_PROFILE := n
-KASAN_SANITIZE := n
-UBSAN_SANITIZE := n
-KCSAN_SANITIZE := n
-KCOV_INSTRUMENT := n
-
# These are adjustments to the compiler flags used for objects that
# make up the standalone purgatory.ro
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 8f01ada6845e..c59d2b54df49 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -17,6 +17,9 @@ config ARCH_HAS_ILOG2_U32
config ARCH_HAS_ILOG2_U64
def_bool n
+config ARCH_PROC_KCORE_TEXT
+ def_bool y
+
config GENERIC_HWEIGHT
def_bool y
@@ -174,7 +177,7 @@ config S390
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_EBPF_JIT if HAVE_MARCH_Z196_FEATURES
select HAVE_EFFICIENT_UNALIGNED_ACCESS
- select HAVE_FAST_GUP
+ select HAVE_GUP_FAST
select HAVE_FENTRY
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_ARG_ACCESS_API
@@ -552,7 +555,7 @@ config EXPOLINE
If unsure, say N.
config EXPOLINE_EXTERN
- def_bool n
+ def_bool y if EXPOLINE
depends on EXPOLINE
depends on CC_IS_GCC && GCC_VERSION >= 110200
depends on $(success,$(srctree)/arch/s390/tools/gcc-thunk-extern.sh $(CC))
@@ -590,18 +593,6 @@ config RELOCATABLE
Note: this option exists only for documentation purposes, please do
not remove it.
-config PIE_BUILD
- def_bool CC_IS_CLANG && !$(cc-option,-munaligned-symbols)
- help
- If the compiler is unable to generate code that can manage unaligned
- symbols, the kernel is linked as a position-independent executable
- (PIE) and includes dynamic relocations that are processed early
- during bootup.
-
- For kpatch functionality, it is recommended to build the kernel
- without the PIE_BUILD option. PIE_BUILD is only enabled when the
- compiler lacks proper support for handling unaligned symbols.
-
config RANDOMIZE_BASE
bool "Randomize the address of the kernel image (KASLR)"
default y
@@ -611,6 +602,25 @@ config RANDOMIZE_BASE
as a security feature that deters exploit attempts relying on
knowledge of the location of kernel internals.
+config KERNEL_IMAGE_BASE
+ hex "Kernel image base address"
+ range 0x100000 0x1FFFFFE0000000 if !KASAN
+ range 0x100000 0x1BFFFFE0000000 if KASAN
+ default 0x3FFE0000000 if !KASAN
+ default 0x7FFFE0000000 if KASAN
+ help
+ This is the address at which the kernel image is loaded in case
+ Kernel Address Space Layout Randomization (KASLR) is disabled.
+
+ In case the Protected virtualization guest support is enabled the
+ Ultravisor imposes a virtual address limit. If the value of this
+ option leads to the kernel image exceeding the Ultravisor limit,
+ this option is ignored and the image is loaded below the limit.
+
+ If the value of this option leads to the kernel image overlapping
+ the virtual memory where other data structures are located, this
+ option is ignored and the image is loaded above the structures.
+
endmenu
menu "Memory setup"
@@ -724,6 +734,33 @@ config EADM_SCH
To compile this driver as a module, choose M here: the
module will be called eadm_sch.
+config AP
+ def_tristate y
+ prompt "Support for Adjunct Processors (ap)"
+ help
+ This driver allows usage to Adjunct Processor (AP) devices via
+ the ap bus, cards and queues. Supported Adjunct Processors are
+ the CryptoExpress Cards (CEX).
+
+ To compile this driver as a module, choose M here: the
+ module will be called ap.
+
+ If unsure, say Y (default).
+
+config AP_DEBUG
+ def_bool n
+ prompt "Enable debug features for Adjunct Processor (ap) devices"
+ depends on AP
+ help
+ Say 'Y' here to enable some additional debug features for Adjunct
+ Processor (ap) devices.
+
+ There will be some more sysfs attributes displayed for ap queues.
+
+ Do not enable on production level kernel build.
+
+ If unsure, say N.
+
config VFIO_CCW
def_tristate n
prompt "Support for VFIO-CCW subchannels"
@@ -740,7 +777,7 @@ config VFIO_AP
prompt "VFIO support for AP devices"
depends on KVM
depends on VFIO
- depends on ZCRYPT
+ depends on AP
select VFIO_MDEV
help
This driver grants access to Adjunct Processor (AP) devices
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 2dbb2d2f22f9..f2b21c7a70ef 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -14,14 +14,9 @@ KBUILD_AFLAGS_MODULE += -fPIC
KBUILD_CFLAGS_MODULE += -fPIC
KBUILD_AFLAGS += -m64
KBUILD_CFLAGS += -m64
-ifdef CONFIG_PIE_BUILD
-KBUILD_CFLAGS += -fPIE
-LDFLAGS_vmlinux := -pie -z notext
-else
-KBUILD_CFLAGS += $(call cc-option,-munaligned-symbols,)
-LDFLAGS_vmlinux := --emit-relocs --discard-none
+KBUILD_CFLAGS += -fPIC
+LDFLAGS_vmlinux := -no-pie --emit-relocs --discard-none
extra_tools := relocs
-endif
aflags_dwarf := -Wa,-gdwarf-2
KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__
ifndef CONFIG_AS_IS_LLVM
@@ -88,7 +83,6 @@ endif
ifdef CONFIG_EXPOLINE
ifdef CONFIG_EXPOLINE_EXTERN
- KBUILD_LDFLAGS_MODULE += arch/s390/lib/expoline/expoline.o
CC_FLAGS_EXPOLINE := -mindirect-branch=thunk-extern
CC_FLAGS_EXPOLINE += -mfunction-return=thunk-extern
else
@@ -167,11 +161,6 @@ vdso_prepare: prepare0
vdso-install-y += arch/s390/kernel/vdso64/vdso64.so.dbg
vdso-install-$(CONFIG_COMPAT) += arch/s390/kernel/vdso32/vdso32.so.dbg
-ifdef CONFIG_EXPOLINE_EXTERN
-modules_prepare: expoline_prepare
-expoline_prepare: scripts
- $(Q)$(MAKE) $(build)=arch/s390/lib/expoline arch/s390/lib/expoline/expoline.o
-endif
endif
# Don't use tabs in echo arguments
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index 294f08a8811a..070c9b2e905f 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -37,8 +37,7 @@ CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
obj-y := head.o als.o startup.o physmem_info.o ipl_parm.o ipl_report.o vmem.o
obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
-obj-y += version.o pgm_check_info.o ctype.o ipl_data.o
-obj-y += $(if $(CONFIG_PIE_BUILD),machine_kexec_reloc.o,relocs.o)
+obj-y += version.o pgm_check_info.o ctype.o ipl_data.o relocs.o
obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
obj-y += $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o
@@ -49,9 +48,7 @@ targets := bzImage section_cmp.boot.data section_cmp.boot.preserved.data $(obj-y
targets += vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
targets += vmlinux.bin.zst info.bin syms.bin vmlinux.syms $(obj-all)
-ifndef CONFIG_PIE_BUILD
targets += relocs.S
-endif
OBJECTS := $(addprefix $(obj)/,$(obj-y))
OBJECTS_ALL := $(addprefix $(obj)/,$(obj-all))
@@ -110,13 +107,11 @@ OBJCOPYFLAGS_vmlinux.bin := -O binary --remove-section=.comment --remove-section
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
-ifndef CONFIG_PIE_BUILD
CMD_RELOCS=arch/s390/tools/relocs
-quiet_cmd_relocs = RELOCS $@
+quiet_cmd_relocs = RELOCS $@
cmd_relocs = $(CMD_RELOCS) $< > $@
$(obj)/relocs.S: vmlinux FORCE
$(call if_changed,relocs)
-endif
suffix-$(CONFIG_KERNEL_GZIP) := .gz
suffix-$(CONFIG_KERNEL_BZIP2) := .bz2
diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
index 567d60f78bbc..18027fdc92b0 100644
--- a/arch/s390/boot/boot.h
+++ b/arch/s390/boot/boot.h
@@ -17,7 +17,6 @@ struct machine_info {
};
struct vmlinux_info {
- unsigned long default_lma;
unsigned long entry;
unsigned long image_size; /* does not include .bss */
unsigned long bss_size; /* uncompressed image .bss size */
@@ -25,14 +24,8 @@ struct vmlinux_info {
unsigned long bootdata_size;
unsigned long bootdata_preserved_off;
unsigned long bootdata_preserved_size;
-#ifdef CONFIG_PIE_BUILD
- unsigned long dynsym_start;
- unsigned long rela_dyn_start;
- unsigned long rela_dyn_end;
-#else
unsigned long got_start;
unsigned long got_end;
-#endif
unsigned long amode31_size;
unsigned long init_mm_off;
unsigned long swapper_pg_dir_off;
@@ -74,10 +67,11 @@ void sclp_early_setup_buffer(void);
void print_pgm_check_info(void);
unsigned long randomize_within_range(unsigned long size, unsigned long align,
unsigned long min, unsigned long max);
-void setup_vmem(unsigned long asce_limit);
+void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned long asce_limit);
void __printf(1, 2) decompressor_printk(const char *fmt, ...);
void print_stacktrace(unsigned long sp);
void error(char *m);
+int get_random(unsigned long limit, unsigned long *value);
extern struct machine_info machine;
@@ -98,6 +92,10 @@ extern struct vmlinux_info _vmlinux_info;
#define vmlinux _vmlinux_info
#define __abs_lowcore_pa(x) (((unsigned long)(x) - __abs_lowcore) % sizeof(struct lowcore))
+#define __kernel_va(x) ((void *)((unsigned long)(x) - __kaslr_offset_phys + __kaslr_offset))
+#define __kernel_pa(x) ((unsigned long)(x) - __kaslr_offset + __kaslr_offset_phys)
+#define __identity_va(x) ((void *)((unsigned long)(x) + __identity_base))
+#define __identity_pa(x) ((unsigned long)(x) - __identity_base)
static inline bool intersects(unsigned long addr0, unsigned long size0,
unsigned long addr1, unsigned long size1)
diff --git a/arch/s390/boot/decompressor.c b/arch/s390/boot/decompressor.c
index d762733a0753..f478e8e9cbda 100644
--- a/arch/s390/boot/decompressor.c
+++ b/arch/s390/boot/decompressor.c
@@ -63,24 +63,13 @@ static unsigned long free_mem_end_ptr = (unsigned long) _end + BOOT_HEAP_SIZE;
#include "../../../../lib/decompress_unzstd.c"
#endif
-#define decompress_offset ALIGN((unsigned long)_end + BOOT_HEAP_SIZE, PAGE_SIZE)
-
unsigned long mem_safe_offset(void)
{
- /*
- * due to 4MB HEAD_SIZE for bzip2
- * 'decompress_offset + vmlinux.image_size' could be larger than
- * kernel at final position + its .bss, so take the larger of two
- */
- return max(decompress_offset + vmlinux.image_size,
- vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size);
+ return ALIGN(free_mem_end_ptr, PAGE_SIZE);
}
-void *decompress_kernel(void)
+void deploy_kernel(void *output)
{
- void *output = (void *)decompress_offset;
-
__decompress(_compressed_start, _compressed_end - _compressed_start,
NULL, NULL, output, vmlinux.image_size, NULL, error);
- return output;
}
diff --git a/arch/s390/boot/decompressor.h b/arch/s390/boot/decompressor.h
index 92b81d2ea35d..4f966f06bd65 100644
--- a/arch/s390/boot/decompressor.h
+++ b/arch/s390/boot/decompressor.h
@@ -2,11 +2,9 @@
#ifndef BOOT_COMPRESSED_DECOMPRESSOR_H
#define BOOT_COMPRESSED_DECOMPRESSOR_H
-#ifdef CONFIG_KERNEL_UNCOMPRESSED
-static inline void *decompress_kernel(void) { return NULL; }
-#else
-void *decompress_kernel(void);
-#endif
+#ifndef CONFIG_KERNEL_UNCOMPRESSED
unsigned long mem_safe_offset(void);
+void deploy_kernel(void *output);
+#endif
#endif /* BOOT_COMPRESSED_DECOMPRESSOR_H */
diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
index 90602101e2ae..bd3bf5ef472d 100644
--- a/arch/s390/boot/kaslr.c
+++ b/arch/s390/boot/kaslr.c
@@ -43,7 +43,7 @@ static int check_prng(void)
return PRNG_MODE_TDES;
}
-static int get_random(unsigned long limit, unsigned long *value)
+int get_random(unsigned long limit, unsigned long *value)
{
struct prng_parm prng = {
/* initial parameter block for tdes mode, copied from libica */
diff --git a/arch/s390/boot/pgm_check_info.c b/arch/s390/boot/pgm_check_info.c
index 97244cd7a206..ea96275b0380 100644
--- a/arch/s390/boot/pgm_check_info.c
+++ b/arch/s390/boot/pgm_check_info.c
@@ -153,8 +153,10 @@ void print_pgm_check_info(void)
decompressor_printk("Kernel command line: %s\n", early_command_line);
decompressor_printk("Kernel fault: interruption code %04x ilc:%x\n",
S390_lowcore.pgm_code, S390_lowcore.pgm_ilc >> 1);
- if (kaslr_enabled())
+ if (kaslr_enabled()) {
decompressor_printk("Kernel random base: %lx\n", __kaslr_offset);
+ decompressor_printk("Kernel random base phys: %lx\n", __kaslr_offset_phys);
+ }
decompressor_printk("PSW : %016lx %016lx (%pS)\n",
S390_lowcore.psw_save_area.mask,
S390_lowcore.psw_save_area.addr,
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index 6cf89314209a..182aac6a0f77 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -3,6 +3,7 @@
#include <linux/elf.h>
#include <asm/page-states.h>
#include <asm/boot_data.h>
+#include <asm/extmem.h>
#include <asm/sections.h>
#include <asm/maccess.h>
#include <asm/cpu_mf.h>
@@ -18,7 +19,7 @@
#include "boot.h"
#include "uv.h"
-unsigned long __bootdata_preserved(__kaslr_offset);
+struct vm_layout __bootdata_preserved(vm_layout);
unsigned long __bootdata_preserved(__abs_lowcore);
unsigned long __bootdata_preserved(__memcpy_real_area);
pte_t *__bootdata_preserved(memcpy_real_ptep);
@@ -29,10 +30,8 @@ unsigned long __bootdata_preserved(vmemmap_size);
unsigned long __bootdata_preserved(MODULES_VADDR);
unsigned long __bootdata_preserved(MODULES_END);
unsigned long __bootdata_preserved(max_mappable);
-unsigned long __bootdata(ident_map_size);
u64 __bootdata_preserved(stfle_fac_list[16]);
-u64 __bootdata_preserved(alt_stfle_fac_list[16]);
struct oldmem_data __bootdata_preserved(oldmem_data);
struct machine_info machine;
@@ -109,9 +108,19 @@ static void setup_lpp(void)
}
#ifdef CONFIG_KERNEL_UNCOMPRESSED
-unsigned long mem_safe_offset(void)
+static unsigned long mem_safe_offset(void)
{
- return vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size;
+ return (unsigned long)_compressed_start;
+}
+
+static void deploy_kernel(void *output)
+{
+ void *uncompressed_start = (void *)_compressed_start;
+
+ if (output == uncompressed_start)
+ return;
+ memmove(output, uncompressed_start, vmlinux.image_size);
+ memset(uncompressed_start, 0, vmlinux.image_size);
}
#endif
@@ -141,70 +150,18 @@ static void copy_bootdata(void)
memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
}
-#ifdef CONFIG_PIE_BUILD
-static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr, unsigned long offset)
-{
- Elf64_Rela *rela_start, *rela_end, *rela;
- int r_type, r_sym, rc;
- Elf64_Addr loc, val;
- Elf64_Sym *dynsym;
-
- rela_start = (Elf64_Rela *) vmlinux.rela_dyn_start;
- rela_end = (Elf64_Rela *) vmlinux.rela_dyn_end;
- dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
- for (rela = rela_start; rela < rela_end; rela++) {
- loc = rela->r_offset + offset;
- val = rela->r_addend;
- r_sym = ELF64_R_SYM(rela->r_info);
- if (r_sym) {
- if (dynsym[r_sym].st_shndx != SHN_UNDEF)
- val += dynsym[r_sym].st_value + offset;
- } else {
- /*
- * 0 == undefined symbol table index (STN_UNDEF),
- * used for R_390_RELATIVE, only add KASLR offset
- */
- val += offset;
- }
- r_type = ELF64_R_TYPE(rela->r_info);
- rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0);
- if (rc)
- error("Unknown relocation type");
- }
-}
-
-static void kaslr_adjust_got(unsigned long offset) {}
-static void rescue_relocs(void) {}
-static void free_relocs(void) {}
-#else
-static int *vmlinux_relocs_64_start;
-static int *vmlinux_relocs_64_end;
-
-static void rescue_relocs(void)
-{
- unsigned long size = __vmlinux_relocs_64_end - __vmlinux_relocs_64_start;
-
- vmlinux_relocs_64_start = (void *)physmem_alloc_top_down(RR_RELOC, size, 0);
- vmlinux_relocs_64_end = (void *)vmlinux_relocs_64_start + size;
- memmove(vmlinux_relocs_64_start, __vmlinux_relocs_64_start, size);
-}
-
-static void free_relocs(void)
-{
- physmem_free(RR_RELOC);
-}
-
-static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr, unsigned long offset)
+static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr,
+ unsigned long offset, unsigned long phys_offset)
{
int *reloc;
long loc;
/* Adjust R_390_64 relocations */
- for (reloc = vmlinux_relocs_64_start; reloc < vmlinux_relocs_64_end; reloc++) {
- loc = (long)*reloc + offset;
+ for (reloc = (int *)__vmlinux_relocs_64_start; reloc < (int *)__vmlinux_relocs_64_end; reloc++) {
+ loc = (long)*reloc + phys_offset;
if (loc < min_addr || loc > max_addr)
error("64-bit relocation outside of kernel!\n");
- *(u64 *)loc += offset;
+ *(u64 *)loc += offset - __START_KERNEL;
}
}
@@ -217,9 +174,8 @@ static void kaslr_adjust_got(unsigned long offset)
* reason. Adjust the GOT entries.
*/
for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++)
- *entry += offset;
+ *entry += offset - __START_KERNEL;
}
-#endif
/*
* Merge information from several sources into a single ident_map_size value.
@@ -261,9 +217,26 @@ static void setup_ident_map_size(unsigned long max_physmem_end)
#endif
}
-static unsigned long setup_kernel_memory_layout(void)
+#define FIXMAP_SIZE round_up(MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE, sizeof(struct lowcore))
+
+static unsigned long get_vmem_size(unsigned long identity_size,
+ unsigned long vmemmap_size,
+ unsigned long vmalloc_size,
+ unsigned long rte_size)
+{
+ unsigned long max_mappable, vsize;
+
+ max_mappable = max(identity_size, MAX_DCSS_ADDR);
+ vsize = round_up(SZ_2G + max_mappable, rte_size) +
+ round_up(vmemmap_size, rte_size) +
+ FIXMAP_SIZE + MODULES_LEN + KASLR_LEN;
+ return size_add(vsize, vmalloc_size);
+}
+
+static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
{
unsigned long vmemmap_start;
+ unsigned long kernel_start;
unsigned long asce_limit;
unsigned long rte_size;
unsigned long pages;
@@ -275,12 +248,19 @@ static unsigned long setup_kernel_memory_layout(void)
vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
/* choose kernel address space layout: 4 or 3 levels. */
- vsize = round_up(ident_map_size, _REGION3_SIZE) + vmemmap_size +
- MODULES_LEN + MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE;
- vsize = size_add(vsize, vmalloc_size);
- if (IS_ENABLED(CONFIG_KASAN) || (vsize > _REGION2_SIZE)) {
+ BUILD_BUG_ON(!IS_ALIGNED(__START_KERNEL, THREAD_SIZE));
+ BUILD_BUG_ON(!IS_ALIGNED(__NO_KASLR_START_KERNEL, THREAD_SIZE));
+ BUILD_BUG_ON(__NO_KASLR_END_KERNEL > _REGION1_SIZE);
+ vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION3_SIZE);
+ if (IS_ENABLED(CONFIG_KASAN) || __NO_KASLR_END_KERNEL > _REGION2_SIZE ||
+ (vsize > _REGION2_SIZE && kaslr_enabled())) {
asce_limit = _REGION1_SIZE;
- rte_size = _REGION2_SIZE;
+ if (__NO_KASLR_END_KERNEL > _REGION2_SIZE) {
+ rte_size = _REGION2_SIZE;
+ vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION2_SIZE);
+ } else {
+ rte_size = _REGION3_SIZE;
+ }
} else {
asce_limit = _REGION2_SIZE;
rte_size = _REGION3_SIZE;
@@ -290,38 +270,67 @@ static unsigned long setup_kernel_memory_layout(void)
* Forcing modules and vmalloc area under the ultravisor
* secure storage limit, so that any vmalloc allocation
* we do could be used to back secure guest storage.
+ *
+ * Assume the secure storage limit always exceeds _REGION2_SIZE,
+ * otherwise asce_limit and rte_size would have been adjusted.
*/
vmax = adjust_to_uv_max(asce_limit);
#ifdef CONFIG_KASAN
+ BUILD_BUG_ON(__NO_KASLR_END_KERNEL > KASAN_SHADOW_START);
/* force vmalloc and modules below kasan shadow */
vmax = min(vmax, KASAN_SHADOW_START);
#endif
- __memcpy_real_area = round_down(vmax - MEMCPY_REAL_SIZE, PAGE_SIZE);
- __abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
- sizeof(struct lowcore));
- MODULES_END = round_down(__abs_lowcore, _SEGMENT_SIZE);
+ vsize = min(vsize, vmax);
+ if (kaslr_enabled()) {
+ unsigned long kernel_end, kaslr_len, slots, pos;
+
+ kaslr_len = max(KASLR_LEN, vmax - vsize);
+ slots = DIV_ROUND_UP(kaslr_len - kernel_size, THREAD_SIZE);
+ if (get_random(slots, &pos))
+ pos = 0;
+ kernel_end = vmax - pos * THREAD_SIZE;
+ kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE);
+ } else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) {
+ kernel_start = round_down(vmax - kernel_size, THREAD_SIZE);
+ decompressor_printk("The kernel base address is forced to %lx\n", kernel_start);
+ } else {
+ kernel_start = __NO_KASLR_START_KERNEL;
+ }
+ __kaslr_offset = kernel_start;
+
+ MODULES_END = round_down(kernel_start, _SEGMENT_SIZE);
MODULES_VADDR = MODULES_END - MODULES_LEN;
VMALLOC_END = MODULES_VADDR;
/* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
- vsize = round_down(VMALLOC_END / 2, _SEGMENT_SIZE);
+ vsize = (VMALLOC_END - FIXMAP_SIZE) / 2;
+ vsize = round_down(vsize, _SEGMENT_SIZE);
vmalloc_size = min(vmalloc_size, vsize);
VMALLOC_START = VMALLOC_END - vmalloc_size;
+ __memcpy_real_area = round_down(VMALLOC_START - MEMCPY_REAL_SIZE, PAGE_SIZE);
+ __abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
+ sizeof(struct lowcore));
+
/* split remaining virtual space between 1:1 mapping & vmemmap array */
- pages = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
+ pages = __abs_lowcore / (PAGE_SIZE + sizeof(struct page));
pages = SECTION_ALIGN_UP(pages);
/* keep vmemmap_start aligned to a top level region table entry */
- vmemmap_start = round_down(VMALLOC_START - pages * sizeof(struct page), rte_size);
- vmemmap_start = min(vmemmap_start, 1UL << MAX_PHYSMEM_BITS);
- /* maximum mappable address as seen by arch_get_mappable_range() */
- max_mappable = vmemmap_start;
+ vmemmap_start = round_down(__abs_lowcore - pages * sizeof(struct page), rte_size);
/* make sure identity map doesn't overlay with vmemmap */
ident_map_size = min(ident_map_size, vmemmap_start);
vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
- /* make sure vmemmap doesn't overlay with vmalloc area */
- VMALLOC_START = max(vmemmap_start + vmemmap_size, VMALLOC_START);
+ /* make sure vmemmap doesn't overlay with absolute lowcore area */
+ if (vmemmap_start + vmemmap_size > __abs_lowcore) {
+ vmemmap_size = SECTION_ALIGN_DOWN(ident_map_size / PAGE_SIZE) * sizeof(struct page);
+ ident_map_size = vmemmap_size / sizeof(struct page) * PAGE_SIZE;
+ }
vmemmap = (struct page *)vmemmap_start;
+ /* maximum address for which linear mapping could be created (DCSS, memory) */
+ BUILD_BUG_ON(MAX_DCSS_ADDR > (1UL << MAX_PHYSMEM_BITS));
+ max_mappable = max(ident_map_size, MAX_DCSS_ADDR);
+ max_mappable = min(max_mappable, vmemmap_start);
+ __identity_base = round_down(vmemmap_start - max_mappable, rte_size);
return asce_limit;
}
@@ -329,9 +338,9 @@ static unsigned long setup_kernel_memory_layout(void)
/*
* This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
*/
-static void clear_bss_section(unsigned long vmlinux_lma)
+static void clear_bss_section(unsigned long kernel_start)
{
- memset((void *)vmlinux_lma + vmlinux.image_size, 0, vmlinux.bss_size);
+ memset((void *)kernel_start + vmlinux.image_size, 0, vmlinux.bss_size);
}
/*
@@ -348,19 +357,12 @@ static void setup_vmalloc_size(void)
vmalloc_size = max(size, vmalloc_size);
}
-static void kaslr_adjust_vmlinux_info(unsigned long offset)
+static void kaslr_adjust_vmlinux_info(long offset)
{
- *(unsigned long *)(&vmlinux.entry) += offset;
vmlinux.bootdata_off += offset;
vmlinux.bootdata_preserved_off += offset;
-#ifdef CONFIG_PIE_BUILD
- vmlinux.rela_dyn_start += offset;
- vmlinux.rela_dyn_end += offset;
- vmlinux.dynsym_start += offset;
-#else
vmlinux.got_start += offset;
vmlinux.got_end += offset;
-#endif
vmlinux.init_mm_off += offset;
vmlinux.swapper_pg_dir_off += offset;
vmlinux.invalid_pg_dir_off += offset;
@@ -373,23 +375,30 @@ static void kaslr_adjust_vmlinux_info(unsigned long offset)
#endif
}
+static void fixup_vmlinux_info(void)
+{
+ vmlinux.entry -= __START_KERNEL;
+ kaslr_adjust_vmlinux_info(-__START_KERNEL);
+}
+
void startup_kernel(void)
{
- unsigned long max_physmem_end;
- unsigned long vmlinux_lma = 0;
+ unsigned long kernel_size = vmlinux.image_size + vmlinux.bss_size;
+ unsigned long nokaslr_offset_phys = mem_safe_offset();
unsigned long amode31_lma = 0;
+ unsigned long max_physmem_end;
unsigned long asce_limit;
unsigned long safe_addr;
- void *img;
psw_t psw;
+ fixup_vmlinux_info();
setup_lpp();
- safe_addr = mem_safe_offset();
+ safe_addr = PAGE_ALIGN(nokaslr_offset_phys + kernel_size);
/*
- * Reserve decompressor memory together with decompression heap, buffer and
- * memory which might be occupied by uncompressed kernel at default 1Mb
- * position (if KASLR is off or failed).
+ * Reserve decompressor memory together with decompression heap,
+ * buffer and memory which might be occupied by uncompressed kernel
+ * (if KASLR is off or failed).
*/
physmem_reserve(RR_DECOMPRESSOR, 0, safe_addr);
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && parmarea.initrd_size)
@@ -409,40 +418,39 @@ void startup_kernel(void)
max_physmem_end = detect_max_physmem_end();
setup_ident_map_size(max_physmem_end);
setup_vmalloc_size();
- asce_limit = setup_kernel_memory_layout();
+ asce_limit = setup_kernel_memory_layout(kernel_size);
/* got final ident_map_size, physmem allocations could be performed now */
physmem_set_usable_limit(ident_map_size);
detect_physmem_online_ranges(max_physmem_end);
save_ipl_cert_comp_list();
rescue_initrd(safe_addr, ident_map_size);
- rescue_relocs();
- if (kaslr_enabled()) {
- vmlinux_lma = randomize_within_range(vmlinux.image_size + vmlinux.bss_size,
- THREAD_SIZE, vmlinux.default_lma,
- ident_map_size);
- if (vmlinux_lma) {
- __kaslr_offset = vmlinux_lma - vmlinux.default_lma;
- kaslr_adjust_vmlinux_info(__kaslr_offset);
- }
- }
- vmlinux_lma = vmlinux_lma ?: vmlinux.default_lma;
- physmem_reserve(RR_VMLINUX, vmlinux_lma, vmlinux.image_size + vmlinux.bss_size);
-
- if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
- img = decompress_kernel();
- memmove((void *)vmlinux_lma, img, vmlinux.image_size);
- } else if (__kaslr_offset) {
- img = (void *)vmlinux.default_lma;
- memmove((void *)vmlinux_lma, img, vmlinux.image_size);
- memset(img, 0, vmlinux.image_size);
- }
+ if (kaslr_enabled())
+ __kaslr_offset_phys = randomize_within_range(kernel_size, THREAD_SIZE, 0, ident_map_size);
+ if (!__kaslr_offset_phys)
+ __kaslr_offset_phys = nokaslr_offset_phys;
+ kaslr_adjust_vmlinux_info(__kaslr_offset_phys);
+ physmem_reserve(RR_VMLINUX, __kaslr_offset_phys, kernel_size);
+ deploy_kernel((void *)__kaslr_offset_phys);
/* vmlinux decompression is done, shrink reserved low memory */
physmem_reserve(RR_DECOMPRESSOR, 0, (unsigned long)_decompressor_end);
+
+ /*
+ * In case KASLR is enabled the randomized location of .amode31
+ * section might overlap with .vmlinux.relocs section. To avoid that
+ * the below randomize_within_range() could have been called with
+ * __vmlinux_relocs_64_end as the lower range address. However,
+ * .amode31 section is written to by the decompressed kernel - at
+ * that time the contents of .vmlinux.relocs is not needed anymore.
+ * Conversly, .vmlinux.relocs is read only by the decompressor, even
+ * before the kernel started. Therefore, in case the two sections
+ * overlap there is no risk of corrupting any data.
+ */
if (kaslr_enabled())
amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, 0, SZ_2G);
- amode31_lma = amode31_lma ?: vmlinux.default_lma - vmlinux.amode31_size;
+ if (!amode31_lma)
+ amode31_lma = __kaslr_offset_phys - vmlinux.amode31_size;
physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size);
/*
@@ -458,23 +466,23 @@ void startup_kernel(void)
* - copy_bootdata() must follow setup_vmem() to propagate changes
* to bootdata made by setup_vmem()
*/
- clear_bss_section(vmlinux_lma);
- kaslr_adjust_relocs(vmlinux_lma, vmlinux_lma + vmlinux.image_size, __kaslr_offset);
+ clear_bss_section(__kaslr_offset_phys);
+ kaslr_adjust_relocs(__kaslr_offset_phys, __kaslr_offset_phys + vmlinux.image_size,
+ __kaslr_offset, __kaslr_offset_phys);
kaslr_adjust_got(__kaslr_offset);
- free_relocs();
- setup_vmem(asce_limit);
+ setup_vmem(__kaslr_offset, __kaslr_offset + kernel_size, asce_limit);
copy_bootdata();
/*
* Save KASLR offset for early dumps, before vmcore_info is set.
* Mark as uneven to distinguish from real vmcore_info pointer.
*/
- S390_lowcore.vmcore_info = __kaslr_offset ? __kaslr_offset | 0x1UL : 0;
+ S390_lowcore.vmcore_info = __kaslr_offset_phys ? __kaslr_offset_phys | 0x1UL : 0;
/*
* Jump to the decompressed kernel entry point and switch DAT mode on.
*/
- psw.addr = vmlinux.entry;
+ psw.addr = __kaslr_offset + vmlinux.entry;
psw.mask = PSW_KERNEL_BITS;
__load_psw(psw);
}
diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
index 09b10bb6e4d0..96d48b7112d4 100644
--- a/arch/s390/boot/vmem.c
+++ b/arch/s390/boot/vmem.c
@@ -27,6 +27,8 @@ enum populate_mode {
POPULATE_NONE,
POPULATE_DIRECT,
POPULATE_ABS_LOWCORE,
+ POPULATE_IDENTITY,
+ POPULATE_KERNEL,
#ifdef CONFIG_KASAN
POPULATE_KASAN_MAP_SHADOW,
POPULATE_KASAN_ZERO_SHADOW,
@@ -54,7 +56,7 @@ static inline void kasan_populate(unsigned long start, unsigned long end, enum p
pgtable_populate(start, end, mode);
}
-static void kasan_populate_shadow(void)
+static void kasan_populate_shadow(unsigned long kernel_start, unsigned long kernel_end)
{
pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
@@ -76,44 +78,20 @@ static void kasan_populate_shadow(void)
__arch_set_page_dat(kasan_early_shadow_pmd, 1UL << CRST_ALLOC_ORDER);
__arch_set_page_dat(kasan_early_shadow_pte, 1);
- /*
- * Current memory layout:
- * +- 0 -------------+ +- shadow start -+
- * |1:1 ident mapping| /|1/8 of ident map|
- * | | / | |
- * +-end of ident map+ / +----------------+
- * | ... gap ... | / | kasan |
- * | | / | zero page |
- * +- vmalloc area -+ / | mapping |
- * | vmalloc_size | / | (untracked) |
- * +- modules vaddr -+ / +----------------+
- * | 2Gb |/ | unmapped | allocated per module
- * +- shadow start -+ +----------------+
- * | 1/8 addr space | | zero pg mapping| (untracked)
- * +- shadow end ----+---------+- shadow end ---+
- *
- * Current memory layout (KASAN_VMALLOC):
- * +- 0 -------------+ +- shadow start -+
- * |1:1 ident mapping| /|1/8 of ident map|
- * | | / | |
- * +-end of ident map+ / +----------------+
- * | ... gap ... | / | kasan zero page| (untracked)
- * | | / | mapping |
- * +- vmalloc area -+ / +----------------+
- * | vmalloc_size | / |shallow populate|
- * +- modules vaddr -+ / +----------------+
- * | 2Gb |/ |shallow populate|
- * +- shadow start -+ +----------------+
- * | 1/8 addr space | | zero pg mapping| (untracked)
- * +- shadow end ----+---------+- shadow end ---+
- */
-
for_each_physmem_usable_range(i, &start, &end) {
- kasan_populate(start, end, POPULATE_KASAN_MAP_SHADOW);
- if (memgap_start && physmem_info.info_source == MEM_DETECT_DIAG260)
- kasan_populate(memgap_start, start, POPULATE_KASAN_ZERO_SHADOW);
+ kasan_populate((unsigned long)__identity_va(start),
+ (unsigned long)__identity_va(end),
+ POPULATE_KASAN_MAP_SHADOW);
+ if (memgap_start && physmem_info.info_source == MEM_DETECT_DIAG260) {
+ kasan_populate((unsigned long)__identity_va(memgap_start),
+ (unsigned long)__identity_va(start),
+ POPULATE_KASAN_ZERO_SHADOW);
+ }
memgap_start = end;
}
+ kasan_populate(kernel_start, kernel_end, POPULATE_KASAN_MAP_SHADOW);
+ kasan_populate(0, (unsigned long)__identity_va(0), POPULATE_KASAN_ZERO_SHADOW);
+ kasan_populate(AMODE31_START, AMODE31_END, POPULATE_KASAN_ZERO_SHADOW);
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
untracked_end = VMALLOC_START;
/* shallowly populate kasan shadow for vmalloc and modules */
@@ -122,8 +100,9 @@ static void kasan_populate_shadow(void)
untracked_end = MODULES_VADDR;
}
/* populate kasan shadow for untracked memory */
- kasan_populate(ident_map_size, untracked_end, POPULATE_KASAN_ZERO_SHADOW);
- kasan_populate(MODULES_END, _REGION1_SIZE, POPULATE_KASAN_ZERO_SHADOW);
+ kasan_populate((unsigned long)__identity_va(ident_map_size), untracked_end,
+ POPULATE_KASAN_ZERO_SHADOW);
+ kasan_populate(kernel_end, _REGION1_SIZE, POPULATE_KASAN_ZERO_SHADOW);
}
static bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
@@ -180,7 +159,9 @@ static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
}
#else
-static inline void kasan_populate_shadow(void) {}
+static inline void kasan_populate_shadow(unsigned long kernel_start, unsigned long kernel_end)
+{
+}
static inline bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
unsigned long end, enum populate_mode mode)
@@ -263,6 +244,10 @@ static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_m
return addr;
case POPULATE_ABS_LOWCORE:
return __abs_lowcore_pa(addr);
+ case POPULATE_KERNEL:
+ return __kernel_pa(addr);
+ case POPULATE_IDENTITY:
+ return __identity_pa(addr);
#ifdef CONFIG_KASAN
case POPULATE_KASAN_MAP_SHADOW:
addr = physmem_alloc_top_down(RR_VMEM, size, size);
@@ -274,15 +259,22 @@ static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_m
}
}
-static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end)
+static bool large_allowed(enum populate_mode mode)
+{
+ return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY);
+}
+
+static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end,
+ enum populate_mode mode)
{
- return machine.has_edat2 &&
+ return machine.has_edat2 && large_allowed(mode) &&
IS_ALIGNED(addr, PUD_SIZE) && (end - addr) >= PUD_SIZE;
}
-static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end)
+static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end,
+ enum populate_mode mode)
{
- return machine.has_edat1 &&
+ return machine.has_edat1 && large_allowed(mode) &&
IS_ALIGNED(addr, PMD_SIZE) && (end - addr) >= PMD_SIZE;
}
@@ -322,7 +314,7 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
if (pmd_none(*pmd)) {
if (kasan_pmd_populate_zero_shadow(pmd, addr, next, mode))
continue;
- if (can_large_pmd(pmd, addr, next)) {
+ if (can_large_pmd(pmd, addr, next, mode)) {
entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode));
entry = set_pmd_bit(entry, SEGMENT_KERNEL);
if (!machine.has_nx)
@@ -355,7 +347,7 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e
if (pud_none(*pud)) {
if (kasan_pud_populate_zero_shadow(pud, addr, next, mode))
continue;
- if (can_large_pud(pud, addr, next)) {
+ if (can_large_pud(pud, addr, next, mode)) {
entry = __pud(_pa(addr, _REGION3_SIZE, mode));
entry = set_pud_bit(entry, REGION3_KERNEL);
if (!machine.has_nx)
@@ -418,11 +410,12 @@ static void pgtable_populate(unsigned long addr, unsigned long end, enum populat
}
}
-void setup_vmem(unsigned long asce_limit)
+void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned long asce_limit)
{
unsigned long start, end;
unsigned long asce_type;
unsigned long asce_bits;
+ pgd_t *init_mm_pgd;
int i;
/*
@@ -433,6 +426,15 @@ void setup_vmem(unsigned long asce_limit)
for_each_physmem_online_range(i, &start, &end)
__arch_set_page_nodat((void *)start, (end - start) >> PAGE_SHIFT);
+ /*
+ * init_mm->pgd contains virtual address of swapper_pg_dir.
+ * It is unusable at this stage since DAT is yet off. Swap
+ * it for physical address of swapper_pg_dir and restore
+ * the virtual address after all page tables are created.
+ */
+ init_mm_pgd = init_mm.pgd;
+ init_mm.pgd = (pgd_t *)swapper_pg_dir;
+
if (asce_limit == _REGION1_SIZE) {
asce_type = _REGION2_ENTRY_EMPTY;
asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
@@ -453,15 +455,20 @@ void setup_vmem(unsigned long asce_limit)
* the lowcore and create the identity mapping only afterwards.
*/
pgtable_populate(0, sizeof(struct lowcore), POPULATE_DIRECT);
- for_each_physmem_usable_range(i, &start, &end)
- pgtable_populate(start, end, POPULATE_DIRECT);
+ for_each_physmem_usable_range(i, &start, &end) {
+ pgtable_populate((unsigned long)__identity_va(start),
+ (unsigned long)__identity_va(end),
+ POPULATE_IDENTITY);
+ }
+ pgtable_populate(kernel_start, kernel_end, POPULATE_KERNEL);
+ pgtable_populate(AMODE31_START, AMODE31_END, POPULATE_DIRECT);
pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore),
POPULATE_ABS_LOWCORE);
pgtable_populate(__memcpy_real_area, __memcpy_real_area + PAGE_SIZE,
POPULATE_NONE);
- memcpy_real_ptep = __virt_to_kpte(__memcpy_real_area);
+ memcpy_real_ptep = __identity_va(__virt_to_kpte(__memcpy_real_area));
- kasan_populate_shadow();
+ kasan_populate_shadow(kernel_start, kernel_end);
S390_lowcore.kernel_asce.val = swapper_pg_dir | asce_bits;
S390_lowcore.user_asce = s390_invalid_asce;
@@ -471,4 +478,5 @@ void setup_vmem(unsigned long asce_limit)
local_ctl_load(13, &S390_lowcore.kernel_asce);
init_mm.context.asce = S390_lowcore.kernel_asce.val;
+ init_mm.pgd = init_mm_pgd;
}
diff --git a/arch/s390/boot/vmlinux.lds.S b/arch/s390/boot/vmlinux.lds.S
index 3d7ea585ab99..1fe5a1d3ff60 100644
--- a/arch/s390/boot/vmlinux.lds.S
+++ b/arch/s390/boot/vmlinux.lds.S
@@ -99,8 +99,16 @@ SECTIONS
_decompressor_end = .;
+ . = ALIGN(4);
+ .vmlinux.relocs : {
+ __vmlinux_relocs_64_start = .;
+ *(.vmlinux.relocs_64)
+ __vmlinux_relocs_64_end = .;
+ }
+
#ifdef CONFIG_KERNEL_UNCOMPRESSED
- . = 0x100000;
+ . = ALIGN(PAGE_SIZE);
+ . += AMODE31_SIZE; /* .amode31 section */
#else
. = ALIGN(8);
#endif
@@ -110,24 +118,6 @@ SECTIONS
_compressed_end = .;
}
-#ifndef CONFIG_PIE_BUILD
- /*
- * When the kernel is built with CONFIG_KERNEL_UNCOMPRESSED, the entire
- * uncompressed vmlinux.bin is positioned in the bzImage decompressor
- * image at the default kernel LMA of 0x100000, enabling it to be
- * executed in-place. However, the size of .vmlinux.relocs could be
- * large enough to cause an overlap with the uncompressed kernel at the
- * address 0x100000. To address this issue, .vmlinux.relocs is
- * positioned after the .rodata.compressed.
- */
- . = ALIGN(4);
- .vmlinux.relocs : {
- __vmlinux_relocs_64_start = .;
- *(.vmlinux.relocs_64)
- __vmlinux_relocs_64_end = .;
- }
-#endif
-
#define SB_TRAILER_SIZE 32
/* Trailer needed for Secure Boot */
. += SB_TRAILER_SIZE; /* make sure .sb.trailer does not overwrite the previous section */
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index f1fb01cd5a25..145342e46ea8 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -760,7 +760,6 @@ CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
-CONFIG_CRYPTO_STATS=y
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_SHA1_S390=m
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index d33f814f78b2..dc237896f99d 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -745,7 +745,6 @@ CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
-CONFIG_CRYPTO_STATS=y
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_SHA1_S390=m
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index 99f7e1f2b70a..99ea3f12c5d2 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -125,8 +125,19 @@ struct s390_pxts_ctx {
static inline int __paes_keyblob2pkey(struct key_blob *kb,
struct pkey_protkey *pk)
{
- return pkey_keyblob2pkey(kb->key, kb->keylen,
- pk->protkey, &pk->len, &pk->type);
+ int i, ret = -EIO;
+
+ /* try three times in case of busy card */
+ for (i = 0; ret && i < 3; i++) {
+ if (ret == -EBUSY && in_task()) {
+ if (msleep_interruptible(1000))
+ return -EINTR;
+ }
+ ret = pkey_keyblob2pkey(kb->key, kb->keylen,
+ pk->protkey, &pk->len, &pk->type);
+ }
+
+ return ret;
}
static inline int __paes_convert_key(struct s390_paes_ctx *ctx)
diff --git a/arch/s390/include/asm/alternative-asm.h b/arch/s390/include/asm/alternative-asm.h
index 7db046596b93..608f6287ca9c 100644
--- a/arch/s390/include/asm/alternative-asm.h
+++ b/arch/s390/include/asm/alternative-asm.h
@@ -15,6 +15,7 @@
.long \alt_start - .
.word \feature
.byte \orig_end - \orig_start
+ .org . - ( \orig_end - \orig_start ) & 1
.org . - ( \orig_end - \orig_start ) + ( \alt_end - \alt_start )
.org . - ( \alt_end - \alt_start ) + ( \orig_end - \orig_start )
.endm
diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
index 904dd049f954..dd93b92c3ab6 100644
--- a/arch/s390/include/asm/alternative.h
+++ b/arch/s390/include/asm/alternative.h
@@ -53,6 +53,7 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
"\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \
"\t.word " __stringify(facility) "\n" /* facility bit */ \
"\t.byte " oldinstr_len "\n" /* instruction len */ \
+ "\t.org . - (" oldinstr_len ") & 1\n" \
"\t.org . - (" oldinstr_len ") + (" altinstr_len(num) ")\n" \
"\t.org . - (" altinstr_len(num) ") + (" oldinstr_len ")\n"
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index 43ac4a64f49b..395b02d6a133 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -223,13 +223,18 @@ static inline struct ap_queue_status ap_zapq(ap_qid_t qid, int fbit)
* config info as returned by the ap_qci() function.
*/
struct ap_config_info {
- unsigned int apsc : 1; /* S bit */
- unsigned int apxa : 1; /* N bit */
- unsigned int qact : 1; /* C bit */
- unsigned int rc8a : 1; /* R bit */
- unsigned int : 4;
- unsigned int apsb : 1; /* B bit */
- unsigned int : 23;
+ union {
+ unsigned int flags;
+ struct {
+ unsigned int apsc : 1; /* S bit */
+ unsigned int apxa : 1; /* N bit */
+ unsigned int qact : 1; /* C bit */
+ unsigned int rc8a : 1; /* R bit */
+ unsigned int : 4;
+ unsigned int apsb : 1; /* B bit */
+ unsigned int : 23;
+ };
+ };
unsigned char na; /* max # of APs - 1 */
unsigned char nd; /* max # of Domains - 1 */
unsigned char _reserved0[10];
@@ -544,15 +549,4 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
return reg1.status;
}
-/*
- * Interface to tell the AP bus code that a configuration
- * change has happened. The bus code should at least do
- * an ap bus resource rescan.
- */
-#if IS_ENABLED(CONFIG_ZCRYPT)
-void ap_bus_cfg_chg(void);
-#else
-static inline void ap_bus_cfg_chg(void){}
-#endif
-
#endif /* _ASM_S390_AP_H_ */
diff --git a/arch/s390/include/asm/asm-prototypes.h b/arch/s390/include/asm/asm-prototypes.h
index 56096ae26f29..f662eb4b9246 100644
--- a/arch/s390/include/asm/asm-prototypes.h
+++ b/arch/s390/include/asm/asm-prototypes.h
@@ -4,6 +4,7 @@
#include <linux/kvm_host.h>
#include <linux/ftrace.h>
#include <asm/fpu.h>
+#include <asm/nospec-branch.h>
#include <asm-generic/asm-prototypes.h>
__int128_t __ashlti3(__int128_t a, int b);
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 7138d189cc42..0c4cad7d5a5b 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -15,31 +15,31 @@
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
-static inline int arch_atomic_read(const atomic_t *v)
+static __always_inline int arch_atomic_read(const atomic_t *v)
{
return __atomic_read(v);
}
#define arch_atomic_read arch_atomic_read
-static inline void arch_atomic_set(atomic_t *v, int i)
+static __always_inline void arch_atomic_set(atomic_t *v, int i)
{
__atomic_set(v, i);
}
#define arch_atomic_set arch_atomic_set
-static inline int arch_atomic_add_return(int i, atomic_t *v)
+static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
{
return __atomic_add_barrier(i, &v->counter) + i;
}
#define arch_atomic_add_return arch_atomic_add_return
-static inline int arch_atomic_fetch_add(int i, atomic_t *v)
+static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
{
return __atomic_add_barrier(i, &v->counter);
}
#define arch_atomic_fetch_add arch_atomic_fetch_add
-static inline void arch_atomic_add(int i, atomic_t *v)
+static __always_inline void arch_atomic_add(int i, atomic_t *v)
{
__atomic_add(i, &v->counter);
}
@@ -50,11 +50,11 @@ static inline void arch_atomic_add(int i, atomic_t *v)
#define arch_atomic_fetch_sub(_i, _v) arch_atomic_fetch_add(-(int)(_i), _v)
#define ATOMIC_OPS(op) \
-static inline void arch_atomic_##op(int i, atomic_t *v) \
+static __always_inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
__atomic_##op(i, &v->counter); \
} \
-static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
+static __always_inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
return __atomic_##op##_barrier(i, &v->counter); \
}
@@ -74,7 +74,7 @@ ATOMIC_OPS(xor)
#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
-static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
+static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
return __atomic_cmpxchg(&v->counter, old, new);
}
@@ -82,31 +82,31 @@ static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
#define ATOMIC64_INIT(i) { (i) }
-static inline s64 arch_atomic64_read(const atomic64_t *v)
+static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
{
return __atomic64_read(v);
}
#define arch_atomic64_read arch_atomic64_read
-static inline void arch_atomic64_set(atomic64_t *v, s64 i)
+static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
__atomic64_set(v, i);
}
#define arch_atomic64_set arch_atomic64_set
-static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
+static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
{
return __atomic64_add_barrier(i, (long *)&v->counter) + i;
}
#define arch_atomic64_add_return arch_atomic64_add_return
-static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
+static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
return __atomic64_add_barrier(i, (long *)&v->counter);
}
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
-static inline void arch_atomic64_add(s64 i, atomic64_t *v)
+static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
{
__atomic64_add(i, (long *)&v->counter);
}
@@ -114,20 +114,20 @@ static inline void arch_atomic64_add(s64 i, atomic64_t *v)
#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
-static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
return __atomic64_cmpxchg((long *)&v->counter, old, new);
}
#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
-#define ATOMIC64_OPS(op) \
-static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
-{ \
- __atomic64_##op(i, (long *)&v->counter); \
-} \
-static inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
-{ \
- return __atomic64_##op##_barrier(i, (long *)&v->counter); \
+#define ATOMIC64_OPS(op) \
+static __always_inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
+{ \
+ __atomic64_##op(i, (long *)&v->counter); \
+} \
+static __always_inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
+{ \
+ return __atomic64_##op##_barrier(i, (long *)&v->counter); \
}
ATOMIC64_OPS(and)
diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h
index 50510e08b893..7fa5f96a553a 100644
--- a/arch/s390/include/asm/atomic_ops.h
+++ b/arch/s390/include/asm/atomic_ops.h
@@ -8,7 +8,7 @@
#ifndef __ARCH_S390_ATOMIC_OPS__
#define __ARCH_S390_ATOMIC_OPS__
-static inline int __atomic_read(const atomic_t *v)
+static __always_inline int __atomic_read(const atomic_t *v)
{
int c;
@@ -18,14 +18,14 @@ static inline int __atomic_read(const atomic_t *v)
return c;
}
-static inline void __atomic_set(atomic_t *v, int i)
+static __always_inline void __atomic_set(atomic_t *v, int i)
{
asm volatile(
" st %1,%0\n"
: "=R" (v->counter) : "d" (i));
}
-static inline s64 __atomic64_read(const atomic64_t *v)
+static __always_inline s64 __atomic64_read(const atomic64_t *v)
{
s64 c;
@@ -35,7 +35,7 @@ static inline s64 __atomic64_read(const atomic64_t *v)
return c;
}
-static inline void __atomic64_set(atomic64_t *v, s64 i)
+static __always_inline void __atomic64_set(atomic64_t *v, s64 i)
{
asm volatile(
" stg %1,%0\n"
@@ -45,7 +45,7 @@ static inline void __atomic64_set(atomic64_t *v, s64 i)
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
-static inline op_type op_name(op_type val, op_type *ptr) \
+static __always_inline op_type op_name(op_type val, op_type *ptr) \
{ \
op_type old; \
\
@@ -96,7 +96,7 @@ __ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi")
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#define __ATOMIC_OP(op_name, op_string) \
-static inline int op_name(int val, int *ptr) \
+static __always_inline int op_name(int val, int *ptr) \
{ \
int old, new; \
\
@@ -122,7 +122,7 @@ __ATOMIC_OPS(__atomic_xor, "xr")
#undef __ATOMIC_OPS
#define __ATOMIC64_OP(op_name, op_string) \
-static inline long op_name(long val, long *ptr) \
+static __always_inline long op_name(long val, long *ptr) \
{ \
long old, new; \
\
@@ -154,7 +154,7 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-static inline int __atomic_cmpxchg(int *ptr, int old, int new)
+static __always_inline int __atomic_cmpxchg(int *ptr, int old, int new)
{
asm volatile(
" cs %[old],%[new],%[ptr]"
@@ -164,7 +164,7 @@ static inline int __atomic_cmpxchg(int *ptr, int old, int new)
return old;
}
-static inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
+static __always_inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
{
int old_expected = old;
@@ -176,7 +176,7 @@ static inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
return old == old_expected;
}
-static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
+static __always_inline long __atomic64_cmpxchg(long *ptr, long old, long new)
{
asm volatile(
" csg %[old],%[new],%[ptr]"
@@ -186,7 +186,7 @@ static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
return old;
}
-static inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
+static __always_inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
{
long old_expected = old;
diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h
index bb48ea380c0d..bb78159d8042 100644
--- a/arch/s390/include/asm/chsc.h
+++ b/arch/s390/include/asm/chsc.h
@@ -11,6 +11,9 @@
#include <uapi/asm/chsc.h>
+/* struct from linux/notifier.h */
+struct notifier_block;
+
/**
* Operation codes for CHSC PNSO:
* PNSO_OC_NET_BRIDGE_INFO - only addresses that are visible to a bridgeport
@@ -66,4 +69,16 @@ struct chsc_pnso_area {
struct chsc_pnso_naid_l2 entries[];
} __packed __aligned(PAGE_SIZE);
+/*
+ * notifier interface - registered notifiers gets called on
+ * the following events:
+ * - ap config changed (CHSC_NOTIFY_AP_CFG)
+ */
+enum chsc_notify_type {
+ CHSC_NOTIFY_AP_CFG = 3,
+};
+
+int chsc_notifier_register(struct notifier_block *nb);
+int chsc_notifier_unregister(struct notifier_block *nb);
+
#endif /* _ASM_S390_CHSC_H */
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index b378e2b57ad8..c786538e397c 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -166,28 +166,86 @@
typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
-/**
- * cpacf_query() - check if a specific CPACF function is available
- * @opcode: the opcode of the crypto instruction
- * @func: the function code to test for
- *
- * Executes the query function for the given crypto instruction @opcode
- * and checks if @func is available
- *
- * Returns 1 if @func is available for @opcode, 0 otherwise
+/*
+ * Prototype for a not existing function to produce a link
+ * error if __cpacf_query() or __cpacf_check_opcode() is used
+ * with an invalid compile time const opcode.
*/
-static __always_inline void __cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
+void __cpacf_bad_opcode(void);
+
+static __always_inline void __cpacf_query_rre(u32 opc, u8 r1, u8 r2,
+ cpacf_mask_t *mask)
{
asm volatile(
- " lghi 0,0\n" /* query function */
- " lgr 1,%[mask]\n"
- " spm 0\n" /* pckmo doesn't change the cc */
- /* Parameter regs are ignored, but must be nonzero and unique */
- "0: .insn rrf,%[opc] << 16,2,4,6,0\n"
- " brc 1,0b\n" /* handle partial completion */
- : "=m" (*mask)
- : [mask] "d" ((unsigned long)mask), [opc] "i" (opcode)
- : "cc", "0", "1");
+ " la %%r1,%[mask]\n"
+ " xgr %%r0,%%r0\n"
+ " .insn rre,%[opc] << 16,%[r1],%[r2]\n"
+ : [mask] "=R" (*mask)
+ : [opc] "i" (opc),
+ [r1] "i" (r1), [r2] "i" (r2)
+ : "cc", "r0", "r1");
+}
+
+static __always_inline void __cpacf_query_rrf(u32 opc,
+ u8 r1, u8 r2, u8 r3, u8 m4,
+ cpacf_mask_t *mask)
+{
+ asm volatile(
+ " la %%r1,%[mask]\n"
+ " xgr %%r0,%%r0\n"
+ " .insn rrf,%[opc] << 16,%[r1],%[r2],%[r3],%[m4]\n"
+ : [mask] "=R" (*mask)
+ : [opc] "i" (opc), [r1] "i" (r1), [r2] "i" (r2),
+ [r3] "i" (r3), [m4] "i" (m4)
+ : "cc", "r0", "r1");
+}
+
+static __always_inline void __cpacf_query(unsigned int opcode,
+ cpacf_mask_t *mask)
+{
+ switch (opcode) {
+ case CPACF_KDSA:
+ __cpacf_query_rre(CPACF_KDSA, 0, 2, mask);
+ break;
+ case CPACF_KIMD:
+ __cpacf_query_rre(CPACF_KIMD, 0, 2, mask);
+ break;
+ case CPACF_KLMD:
+ __cpacf_query_rre(CPACF_KLMD, 0, 2, mask);
+ break;
+ case CPACF_KM:
+ __cpacf_query_rre(CPACF_KM, 2, 4, mask);
+ break;
+ case CPACF_KMA:
+ __cpacf_query_rrf(CPACF_KMA, 2, 4, 6, 0, mask);
+ break;
+ case CPACF_KMAC:
+ __cpacf_query_rre(CPACF_KMAC, 0, 2, mask);
+ break;
+ case CPACF_KMC:
+ __cpacf_query_rre(CPACF_KMC, 2, 4, mask);
+ break;
+ case CPACF_KMCTR:
+ __cpacf_query_rrf(CPACF_KMCTR, 2, 4, 6, 0, mask);
+ break;
+ case CPACF_KMF:
+ __cpacf_query_rre(CPACF_KMF, 2, 4, mask);
+ break;
+ case CPACF_KMO:
+ __cpacf_query_rre(CPACF_KMO, 2, 4, mask);
+ break;
+ case CPACF_PCC:
+ __cpacf_query_rre(CPACF_PCC, 0, 0, mask);
+ break;
+ case CPACF_PCKMO:
+ __cpacf_query_rre(CPACF_PCKMO, 0, 0, mask);
+ break;
+ case CPACF_PRNO:
+ __cpacf_query_rre(CPACF_PRNO, 2, 4, mask);
+ break;
+ default:
+ __cpacf_bad_opcode();
+ }
}
static __always_inline int __cpacf_check_opcode(unsigned int opcode)
@@ -211,10 +269,21 @@ static __always_inline int __cpacf_check_opcode(unsigned int opcode)
case CPACF_KMA:
return test_facility(146); /* check for MSA8 */
default:
- BUG();
+ __cpacf_bad_opcode();
+ return 0;
}
}
+/**
+ * cpacf_query() - check if a specific CPACF function is available
+ * @opcode: the opcode of the crypto instruction
+ * @func: the function code to test for
+ *
+ * Executes the query function for the given crypto instruction @opcode
+ * and checks if @func is available
+ *
+ * Returns 1 if @func is available for @opcode, 0 otherwise
+ */
static __always_inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
{
if (__cpacf_check_opcode(opcode)) {
diff --git a/arch/s390/include/asm/dwarf.h b/arch/s390/include/asm/dwarf.h
index 4f21ae561e4d..390906b8e386 100644
--- a/arch/s390/include/asm/dwarf.h
+++ b/arch/s390/include/asm/dwarf.h
@@ -9,6 +9,7 @@
#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
#define CFI_RESTORE .cfi_restore
+#define CFI_REL_OFFSET .cfi_rel_offset
#ifdef CONFIG_AS_CFI_VAL_OFFSET
#define CFI_VAL_OFFSET .cfi_val_offset
diff --git a/arch/s390/include/asm/extmem.h b/arch/s390/include/asm/extmem.h
index 568fd81bb77b..e0a06060afdd 100644
--- a/arch/s390/include/asm/extmem.h
+++ b/arch/s390/include/asm/extmem.h
@@ -8,6 +8,13 @@
#define _ASM_S390X_DCSS_H
#ifndef __ASSEMBLY__
+/*
+ * DCSS segment is defined as a contiguous range of pages using DEFSEG command.
+ * The range start and end is a page number with a value less than or equal to
+ * 0x7ffffff (see CP Commands and Utilities Reference).
+ */
+#define MAX_DCSS_ADDR (512UL * SZ_1G)
+
/* possible values for segment type as returned by segment_info */
#define SEG_TYPE_SW 0
#define SEG_TYPE_EW 1
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 621f23d5ae30..77e479d44f1e 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -8,12 +8,8 @@
#ifndef __ASSEMBLY__
-#ifdef CONFIG_CC_IS_CLANG
-/* https://llvm.org/pr41424 */
-#define ftrace_return_address(n) 0UL
-#else
-#define ftrace_return_address(n) __builtin_return_address(n)
-#endif
+unsigned long return_address(unsigned int n);
+#define ftrace_return_address(n) return_address(n)
void ftrace_caller(void);
diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
index 5cc46e0dde62..9725586f4259 100644
--- a/arch/s390/include/asm/gmap.h
+++ b/arch/s390/include/asm/gmap.h
@@ -146,7 +146,7 @@ int gmap_mprotect_notify(struct gmap *, unsigned long start,
void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
unsigned long gaddr, unsigned long vmaddr);
-int gmap_mark_unmergeable(void);
+int s390_disable_cow_sharing(void);
void s390_unlist_old_asce(struct gmap *gmap);
int s390_replace_asce(struct gmap *gmap);
void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns);
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index deb198a61039..ce5f4fe8be4d 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -39,11 +39,11 @@ static inline int prepare_hugepage_range(struct file *file,
return 0;
}
-static inline void arch_clear_hugepage_flags(struct page *page)
+static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
- clear_bit(PG_arch_1, &page->flags);
+ clear_bit(PG_arch_1, &folio->flags);
}
-#define arch_clear_hugepage_flags arch_clear_hugepage_flags
+#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz)
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 4453ad7c11ac..0fbc992d7a5e 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -73,6 +73,21 @@ static inline void ioport_unmap(void __iomem *p)
#define __raw_writel zpci_write_u32
#define __raw_writeq zpci_write_u64
+/* combine single writes by using store-block insn */
+static inline void __iowrite32_copy(void __iomem *to, const void *from,
+ size_t count)
+{
+ zpci_memcpy_toio(to, from, count * 4);
+}
+#define __iowrite32_copy __iowrite32_copy
+
+static inline void __iowrite64_copy(void __iomem *to, const void *from,
+ size_t count)
+{
+ zpci_memcpy_toio(to, from, count * 8);
+}
+#define __iowrite64_copy __iowrite64_copy
+
#endif /* CONFIG_PCI */
#include <asm-generic/io.h>
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index bb1b4bef1878..4c2dc7abc285 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -32,6 +32,11 @@ typedef struct {
unsigned int uses_skeys:1;
/* The mmu context uses CMM. */
unsigned int uses_cmm:1;
+ /*
+ * The mmu context allows COW-sharing of memory pages (KSM, zeropage).
+ * Note that COW-sharing during fork() is currently always allowed.
+ */
+ unsigned int allow_cow_sharing:1;
/* The gmaps associated with this context are allowed to use huge pages. */
unsigned int allow_gmap_hpage_1m:1;
} mm_context_t;
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 929af18b0908..a7789a9f6218 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -35,6 +35,7 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.has_pgste = 0;
mm->context.uses_skeys = 0;
mm->context.uses_cmm = 0;
+ mm->context.allow_cow_sharing = 1;
mm->context.allow_gmap_hpage_1m = 0;
#endif
switch (mm->context.asce_limit) {
diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h
index 82725cf783c7..b9c1f3cae842 100644
--- a/arch/s390/include/asm/nospec-branch.h
+++ b/arch/s390/include/asm/nospec-branch.h
@@ -17,6 +17,26 @@ static inline bool nospec_uses_trampoline(void)
return __is_defined(CC_USING_EXPOLINE) && !nospec_disable;
}
+#ifdef CONFIG_EXPOLINE_EXTERN
+
+void __s390_indirect_jump_r1(void);
+void __s390_indirect_jump_r2(void);
+void __s390_indirect_jump_r3(void);
+void __s390_indirect_jump_r4(void);
+void __s390_indirect_jump_r5(void);
+void __s390_indirect_jump_r6(void);
+void __s390_indirect_jump_r7(void);
+void __s390_indirect_jump_r8(void);
+void __s390_indirect_jump_r9(void);
+void __s390_indirect_jump_r10(void);
+void __s390_indirect_jump_r11(void);
+void __s390_indirect_jump_r12(void);
+void __s390_indirect_jump_r13(void);
+void __s390_indirect_jump_r14(void);
+void __s390_indirect_jump_r15(void);
+
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_EXPOLINE_H */
diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h
index 7a946c42ad13..cb15dd25bf21 100644
--- a/arch/s390/include/asm/nospec-insn.h
+++ b/arch/s390/include/asm/nospec-insn.h
@@ -16,24 +16,25 @@
*/
.macro __THUNK_PROLOG_NAME name
#ifdef CONFIG_EXPOLINE_EXTERN
- .pushsection .text,"ax",@progbits
- __ALIGN
+ SYM_CODE_START(\name)
#else
.pushsection .text.\name,"axG",@progbits,\name,comdat
-#endif
.globl \name
.hidden \name
.type \name,@function
\name:
CFI_STARTPROC
+#endif
.endm
.macro __THUNK_EPILOG_NAME name
- CFI_ENDPROC
#ifdef CONFIG_EXPOLINE_EXTERN
- .size \name, .-\name
-#endif
+ SYM_CODE_END(\name)
+ EXPORT_SYMBOL(\name)
+#else
+ CFI_ENDPROC
.popsection
+#endif
.endm
.macro __THUNK_PROLOG_BR r1
diff --git a/arch/s390/include/asm/os_info.h b/arch/s390/include/asm/os_info.h
index a4d2e103f116..3ee9e8f5ceae 100644
--- a/arch/s390/include/asm/os_info.h
+++ b/arch/s390/include/asm/os_info.h
@@ -17,11 +17,25 @@
#define OS_INFO_VMCOREINFO 0
#define OS_INFO_REIPL_BLOCK 1
#define OS_INFO_FLAGS_ENTRY 2
+#define OS_INFO_RESERVED 3
+#define OS_INFO_IDENTITY_BASE 4
+#define OS_INFO_KASLR_OFFSET 5
+#define OS_INFO_KASLR_OFF_PHYS 6
+#define OS_INFO_VMEMMAP 7
+#define OS_INFO_AMODE31_START 8
+#define OS_INFO_AMODE31_END 9
+#define OS_INFO_IMAGE_START 10
+#define OS_INFO_IMAGE_END 11
+#define OS_INFO_IMAGE_PHYS 12
+#define OS_INFO_MAX 13
#define OS_INFO_FLAG_REIPL_CLEAR (1UL << 0)
struct os_info_entry {
- u64 addr;
+ union {
+ u64 addr;
+ u64 val;
+ };
u64 size;
u32 csum;
} __packed;
@@ -33,17 +47,24 @@ struct os_info {
u16 version_minor;
u64 crashkernel_addr;
u64 crashkernel_size;
- struct os_info_entry entry[3];
- u8 reserved[4004];
+ struct os_info_entry entry[OS_INFO_MAX];
+ u8 reserved[3804];
} __packed;
void os_info_init(void);
-void os_info_entry_add(int nr, void *ptr, u64 len);
+void os_info_entry_add_data(int nr, void *ptr, u64 len);
+void os_info_entry_add_val(int nr, u64 val);
void os_info_crashkernel_add(unsigned long base, unsigned long size);
u32 os_info_csum(struct os_info *os_info);
#ifdef CONFIG_CRASH_DUMP
void *os_info_old_entry(int nr, unsigned long *size);
+static inline unsigned long os_info_old_value(int nr)
+{
+ unsigned long size;
+
+ return (unsigned long)os_info_old_entry(nr, &size);
+}
#else
static inline void *os_info_old_entry(int nr, unsigned long *size)
{
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 9381879f7ecf..224ff9d433ea 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -178,19 +178,52 @@ int arch_make_page_accessible(struct page *page);
#define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
#endif
-#define __PAGE_OFFSET 0x0UL
-#define PAGE_OFFSET 0x0UL
+struct vm_layout {
+ unsigned long kaslr_offset;
+ unsigned long kaslr_offset_phys;
+ unsigned long identity_base;
+ unsigned long identity_size;
+};
-#define __pa_nodebug(x) ((unsigned long)(x))
+extern struct vm_layout vm_layout;
+
+#define __kaslr_offset vm_layout.kaslr_offset
+#define __kaslr_offset_phys vm_layout.kaslr_offset_phys
+#define __identity_base vm_layout.identity_base
+#define ident_map_size vm_layout.identity_size
+
+static inline unsigned long kaslr_offset(void)
+{
+ return __kaslr_offset;
+}
+
+extern int __kaslr_enabled;
+static inline int kaslr_enabled(void)
+{
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ return __kaslr_enabled;
+ return 0;
+}
+
+#define __PAGE_OFFSET __identity_base
+#define PAGE_OFFSET __PAGE_OFFSET
#ifdef __DECOMPRESSOR
+#define __pa_nodebug(x) ((unsigned long)(x))
#define __pa(x) __pa_nodebug(x)
#define __pa32(x) __pa(x)
#define __va(x) ((void *)(unsigned long)(x))
#else /* __DECOMPRESSOR */
+static inline unsigned long __pa_nodebug(unsigned long x)
+{
+ if (x < __kaslr_offset)
+ return x - __identity_base;
+ return x - __kaslr_offset + __kaslr_offset_phys;
+}
+
#ifdef CONFIG_DEBUG_VIRTUAL
unsigned long __phys_addr(unsigned long x, bool is_31bit);
@@ -206,7 +239,7 @@ static inline unsigned long __phys_addr(unsigned long x, bool is_31bit)
#define __pa(x) __phys_addr((unsigned long)(x), false)
#define __pa32(x) __phys_addr((unsigned long)(x), true)
-#define __va(x) ((void *)(unsigned long)(x))
+#define __va(x) ((void *)((unsigned long)(x) + __identity_base))
#endif /* __DECOMPRESSOR */
@@ -231,7 +264,7 @@ static inline unsigned long virt_to_pfn(const void *kaddr)
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
#define page_to_virt(page) pfn_to_virt(page_to_pfn(page))
-#define virt_addr_valid(kaddr) pfn_valid(phys_to_pfn(__pa_nodebug(kaddr)))
+#define virt_addr_valid(kaddr) pfn_valid(phys_to_pfn(__pa_nodebug((unsigned long)(kaddr))))
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
@@ -240,4 +273,11 @@ static inline unsigned long virt_to_pfn(const void *kaddr)
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
+#define AMODE31_SIZE (3 * PAGE_SIZE)
+
+#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
+#define __START_KERNEL 0x100000
+#define __NO_KASLR_START_KERNEL CONFIG_KERNEL_IMAGE_BASE
+#define __NO_KASLR_END_KERNEL (__NO_KASLR_START_KERNEL + KERNEL_IMAGE_SIZE)
+
#endif /* _S390_PAGE_H */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 60950e7a25f5..70b6ee557eb2 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -107,6 +107,12 @@ static inline int is_module_addr(void *addr)
return 1;
}
+#ifdef CONFIG_RANDOMIZE_BASE
+#define KASLR_LEN (1UL << 31)
+#else
+#define KASLR_LEN 0UL
+#endif
+
/*
* A 64 bit pagetable entry of S390 has following format:
* | PFRA |0IPC| OS |
@@ -262,12 +268,14 @@ static inline int is_module_addr(void *addr)
#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
+#define _REGION3_ENTRY_HARDWARE_BITS 0xfffffffffffff6ffUL
+#define _REGION3_ENTRY_HARDWARE_BITS_LARGE 0xffffffff8001073cUL
#define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
#define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
#define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
#define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
-#define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */
-#define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */
+#define _REGION3_ENTRY_WRITE 0x0002 /* SW region write bit */
+#define _REGION3_ENTRY_READ 0x0001 /* SW region read bit */
#ifdef CONFIG_MEM_SOFT_DIRTY
#define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
@@ -278,9 +286,9 @@ static inline int is_module_addr(void *addr)
#define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
/* Bits in the segment table entry */
-#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
-#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
-#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
+#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe3fUL
+#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe3cUL
+#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff1073cUL
#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */
#define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
@@ -566,10 +574,20 @@ static inline pud_t set_pud_bit(pud_t pud, pgprot_t prot)
}
/*
- * In the case that a guest uses storage keys
- * faults should no longer be backed by zero pages
+ * As soon as the guest uses storage keys or enables PV, we deduplicate all
+ * mapped shared zeropages and prevent new shared zeropages from getting
+ * mapped.
*/
-#define mm_forbids_zeropage mm_has_pgste
+#define mm_forbids_zeropage mm_forbids_zeropage
+static inline int mm_forbids_zeropage(struct mm_struct *mm)
+{
+#ifdef CONFIG_PGSTE
+ if (!mm->context.allow_cow_sharing)
+ return 1;
+#endif
+ return 0;
+}
+
static inline int mm_uses_skeys(struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
@@ -1405,6 +1423,7 @@ static inline unsigned long pud_deref(pud_t pud)
return (unsigned long)__va(pud_val(pud) & origin_mask);
}
+#define pud_pfn pud_pfn
static inline unsigned long pud_pfn(pud_t pud)
{
return __pa(pud_deref(pud)) >> PAGE_SHIFT;
@@ -1768,8 +1787,10 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
- pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
+ pmd_t pmd;
+ VM_WARN_ON_ONCE(!pmd_present(*pmdp));
+ pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
}
diff --git a/arch/s390/include/asm/physmem_info.h b/arch/s390/include/asm/physmem_info.h
index e747b067f8db..f45cfc8bc233 100644
--- a/arch/s390/include/asm/physmem_info.h
+++ b/arch/s390/include/asm/physmem_info.h
@@ -22,7 +22,6 @@ enum reserved_range_type {
RR_DECOMPRESSOR,
RR_INITRD,
RR_VMLINUX,
- RR_RELOC,
RR_AMODE31,
RR_IPLREPORT,
RR_CERT_COMP_LIST,
@@ -170,4 +169,7 @@ static inline unsigned long get_physmem_reserved(enum reserved_range_type type,
return *size;
}
+#define AMODE31_START (physmem_info.reserved[RR_AMODE31].start)
+#define AMODE31_END (physmem_info.reserved[RR_AMODE31].end)
+
#endif
diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
index bf15da0fedbc..0e3da500e98c 100644
--- a/arch/s390/include/asm/preempt.h
+++ b/arch/s390/include/asm/preempt.h
@@ -12,12 +12,12 @@
#define PREEMPT_NEED_RESCHED 0x80000000
#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
-static inline int preempt_count(void)
+static __always_inline int preempt_count(void)
{
return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED;
}
-static inline void preempt_count_set(int pc)
+static __always_inline void preempt_count_set(int pc)
{
int old, new;
@@ -29,22 +29,22 @@ static inline void preempt_count_set(int pc)
old, new) != old);
}
-static inline void set_preempt_need_resched(void)
+static __always_inline void set_preempt_need_resched(void)
{
__atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
}
-static inline void clear_preempt_need_resched(void)
+static __always_inline void clear_preempt_need_resched(void)
{
__atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
}
-static inline bool test_preempt_need_resched(void)
+static __always_inline bool test_preempt_need_resched(void)
{
return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED);
}
-static inline void __preempt_count_add(int val)
+static __always_inline void __preempt_count_add(int val)
{
/*
* With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES
@@ -59,17 +59,17 @@ static inline void __preempt_count_add(int val)
__atomic_add(val, &S390_lowcore.preempt_count);
}
-static inline void __preempt_count_sub(int val)
+static __always_inline void __preempt_count_sub(int val)
{
__preempt_count_add(-val);
}
-static inline bool __preempt_count_dec_and_test(void)
+static __always_inline bool __preempt_count_dec_and_test(void)
{
return __atomic_add(-1, &S390_lowcore.preempt_count) == 1;
}
-static inline bool should_resched(int preempt_offset)
+static __always_inline bool should_resched(int preempt_offset)
{
return unlikely(READ_ONCE(S390_lowcore.preempt_count) ==
preempt_offset);
@@ -79,45 +79,45 @@ static inline bool should_resched(int preempt_offset)
#define PREEMPT_ENABLED (0)
-static inline int preempt_count(void)
+static __always_inline int preempt_count(void)
{
return READ_ONCE(S390_lowcore.preempt_count);
}
-static inline void preempt_count_set(int pc)
+static __always_inline void preempt_count_set(int pc)
{
S390_lowcore.preempt_count = pc;
}
-static inline void set_preempt_need_resched(void)
+static __always_inline void set_preempt_need_resched(void)
{
}
-static inline void clear_preempt_need_resched(void)
+static __always_inline void clear_preempt_need_resched(void)
{
}
-static inline bool test_preempt_need_resched(void)
+static __always_inline bool test_preempt_need_resched(void)
{
return false;
}
-static inline void __preempt_count_add(int val)
+static __always_inline void __preempt_count_add(int val)
{
S390_lowcore.preempt_count += val;
}
-static inline void __preempt_count_sub(int val)
+static __always_inline void __preempt_count_sub(int val)
{
S390_lowcore.preempt_count -= val;
}
-static inline bool __preempt_count_dec_and_test(void)
+static __always_inline bool __preempt_count_dec_and_test(void)
{
return !--S390_lowcore.preempt_count && tif_need_resched();
}
-static inline bool should_resched(int preempt_offset)
+static __always_inline bool should_resched(int preempt_offset)
{
return unlikely(preempt_count() == preempt_offset &&
tif_need_resched());
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index db9982f0e8cd..07ad5a1df878 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -40,6 +40,7 @@
#include <asm/setup.h>
#include <asm/runtime_instr.h>
#include <asm/irqflags.h>
+#include <asm/alternative.h>
typedef long (*sys_call_ptr_t)(struct pt_regs *regs);
@@ -92,12 +93,21 @@ static inline void get_cpu_id(struct cpuid *ptr)
asm volatile("stidp %0" : "=Q" (*ptr));
}
+static __always_inline unsigned long get_cpu_timer(void)
+{
+ unsigned long timer;
+
+ asm volatile("stpt %[timer]" : [timer] "=Q" (timer));
+ return timer;
+}
+
void s390_adjust_jiffies(void);
void s390_update_cpu_mhz(void);
void cpu_detect_mhz_feature(void);
extern const struct seq_operations cpuinfo_op;
extern void execve_tail(void);
+unsigned long vdso_text_size(void);
unsigned long vdso_size(void);
/*
@@ -304,8 +314,8 @@ static inline void __load_psw(psw_t psw)
*/
static __always_inline void __load_psw_mask(unsigned long mask)
{
+ psw_t psw __uninitialized;
unsigned long addr;
- psw_t psw;
psw.mask = mask;
@@ -393,6 +403,11 @@ static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
return arch_irqs_disabled_flags(regs->psw.mask);
}
+static __always_inline void bpon(void)
+{
+ asm volatile(ALTERNATIVE("nop", ".insn rrf,0xb2e80000,0,0,13,0", 82));
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_S390_PROCESSOR_H */
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 03bcaa8effb2..32f70873e2b7 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -127,20 +127,6 @@ extern void (*_machine_restart)(char *command);
extern void (*_machine_halt)(void);
extern void (*_machine_power_off)(void);
-extern unsigned long __kaslr_offset;
-static inline unsigned long kaslr_offset(void)
-{
- return __kaslr_offset;
-}
-
-extern int __kaslr_enabled;
-static inline int kaslr_enabled(void)
-{
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
- return __kaslr_enabled;
- return 0;
-}
-
struct oldmem_data {
unsigned long start;
unsigned long size;
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
index 433fde85b14e..85b6738b826a 100644
--- a/arch/s390/include/asm/stacktrace.h
+++ b/arch/s390/include/asm/stacktrace.h
@@ -2,6 +2,7 @@
#ifndef _ASM_S390_STACKTRACE_H
#define _ASM_S390_STACKTRACE_H
+#include <linux/stacktrace.h>
#include <linux/uaccess.h>
#include <linux/ptrace.h>
@@ -12,6 +13,17 @@ struct stack_frame_user {
unsigned long empty2[4];
};
+struct stack_frame_vdso_wrapper {
+ struct stack_frame_user sf;
+ unsigned long return_address;
+};
+
+struct perf_callchain_entry_ctx;
+
+void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
+ struct perf_callchain_entry_ctx *entry,
+ const struct pt_regs *regs, bool perf);
+
enum stack_type {
STACK_TYPE_UNKNOWN,
STACK_TYPE_TASK,
diff --git a/arch/s390/include/asm/vdso/gettimeofday.h b/arch/s390/include/asm/vdso/gettimeofday.h
index db84942eb78f..7937765ccfa5 100644
--- a/arch/s390/include/asm/vdso/gettimeofday.h
+++ b/arch/s390/include/asm/vdso/gettimeofday.h
@@ -6,16 +6,13 @@
#define VDSO_HAS_CLOCK_GETRES 1
+#define VDSO_DELTA_NOMASK 1
+
#include <asm/syscall.h>
#include <asm/timex.h>
#include <asm/unistd.h>
#include <linux/compiler.h>
-#define vdso_calc_delta __arch_vdso_calc_delta
-static __always_inline u64 __arch_vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
-{
- return (cycles - last) * mult;
-}
static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
{
diff --git a/arch/s390/include/asm/vtime.h b/arch/s390/include/asm/vtime.h
index fe17e448c0c5..561c91c1a87c 100644
--- a/arch/s390/include/asm/vtime.h
+++ b/arch/s390/include/asm/vtime.h
@@ -2,8 +2,6 @@
#ifndef _S390_VTIME_H
#define _S390_VTIME_H
-#define __ARCH_HAS_VTIME_TASK_SWITCH
-
static inline void update_timer_sys(void)
{
S390_lowcore.system_timer += S390_lowcore.last_update_timer - S390_lowcore.exit_timer;
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index fa029d0dc28f..7241fa194709 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -11,6 +11,8 @@ CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
# Do not trace early setup code
CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_rethook.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_stacktrace.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_unwind_bc.o = $(CC_FLAGS_FTRACE)
endif
@@ -57,7 +59,6 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o
obj-$(CONFIG_COMPAT) += $(compat-obj-y)
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_KPROBES) += kprobes.o
-obj-$(CONFIG_KPROBES) += kprobes_insn_page.o
obj-$(CONFIG_KPROBES) += mcount.o
obj-$(CONFIG_RETHOOK) += rethook.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
index e7bca29f9c34..1ac5f707dd70 100644
--- a/arch/s390/kernel/alternative.c
+++ b/arch/s390/kernel/alternative.c
@@ -33,13 +33,6 @@ static void __init_or_module __apply_alternatives(struct alt_instr *start,
if (!__test_facility(a->facility, alt_stfle_fac_list))
continue;
-
- if (unlikely(a->instrlen % 2)) {
- WARN_ONCE(1, "cpu alternatives instructions length is "
- "odd, skipping patching\n");
- continue;
- }
-
s390_kernel_write(instr, replacement, a->instrlen);
}
}
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index fa5f6885c74a..f55979f64d49 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -13,7 +13,6 @@
#include <linux/purgatory.h>
#include <linux/pgtable.h>
#include <linux/ftrace.h>
-#include <asm/idle.h>
#include <asm/gmap.h>
#include <asm/stacktrace.h>
@@ -66,10 +65,10 @@ int main(void)
OFFSET(__SF_SIE_CONTROL_PHYS, stack_frame, sie_control_block_phys);
DEFINE(STACK_FRAME_OVERHEAD, sizeof(struct stack_frame));
BLANK();
- /* idle data offsets */
- OFFSET(__CLOCK_IDLE_ENTER, s390_idle_data, clock_idle_enter);
- OFFSET(__TIMER_IDLE_ENTER, s390_idle_data, timer_idle_enter);
- OFFSET(__MT_CYCLES_ENTER, s390_idle_data, mt_cycles_enter);
+ OFFSET(__SFUSER_BACKCHAIN, stack_frame_user, back_chain);
+ DEFINE(STACK_FRAME_USER_OVERHEAD, sizeof(struct stack_frame_user));
+ OFFSET(__SFVDSO_RETURN_ADDRESS, stack_frame_vdso_wrapper, return_address);
+ DEFINE(STACK_FRAME_VDSO_OVERHEAD, sizeof(struct stack_frame_vdso_wrapper));
BLANK();
/* hardware defined lowcore locations 0x000 - 0x1ff */
OFFSET(__LC_EXT_PARAMS, lowcore, ext_params);
diff --git a/arch/s390/kernel/cert_store.c b/arch/s390/kernel/cert_store.c
index 554447768bdd..bf983513dd33 100644
--- a/arch/s390/kernel/cert_store.c
+++ b/arch/s390/kernel/cert_store.c
@@ -21,6 +21,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
+#include <linux/vmalloc.h>
#include <crypto/sha2.h>
#include <keys/user-type.h>
#include <asm/debug.h>
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index d09ebb6f5262..9863ebe75019 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -465,7 +465,11 @@ static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
ehdr->e_phoff = sizeof(Elf64_Ehdr);
ehdr->e_ehsize = sizeof(Elf64_Ehdr);
ehdr->e_phentsize = sizeof(Elf64_Phdr);
- ehdr->e_phnum = mem_chunk_cnt + 1;
+ /*
+ * Number of memory chunk PT_LOAD program headers plus one kernel
+ * image PT_LOAD program header plus one PT_NOTE program header.
+ */
+ ehdr->e_phnum = mem_chunk_cnt + 1 + 1;
return ehdr + 1;
}
@@ -501,15 +505,16 @@ static int get_mem_chunk_cnt(void)
*/
static void loads_init(Elf64_Phdr *phdr)
{
+ unsigned long old_identity_base = os_info_old_value(OS_INFO_IDENTITY_BASE);
phys_addr_t start, end;
u64 idx;
for_each_physmem_range(idx, &oldmem_type, &start, &end) {
- phdr->p_filesz = end - start;
phdr->p_type = PT_LOAD;
+ phdr->p_vaddr = old_identity_base + start;
phdr->p_offset = start;
- phdr->p_vaddr = (unsigned long)__va(start);
phdr->p_paddr = start;
+ phdr->p_filesz = end - start;
phdr->p_memsz = end - start;
phdr->p_flags = PF_R | PF_W | PF_X;
phdr->p_align = PAGE_SIZE;
@@ -518,6 +523,25 @@ static void loads_init(Elf64_Phdr *phdr)
}
/*
+ * Prepare PT_LOAD type program header for kernel image region
+ */
+static void text_init(Elf64_Phdr *phdr)
+{
+ unsigned long start_phys = os_info_old_value(OS_INFO_IMAGE_PHYS);
+ unsigned long start = os_info_old_value(OS_INFO_IMAGE_START);
+ unsigned long end = os_info_old_value(OS_INFO_IMAGE_END);
+
+ phdr->p_type = PT_LOAD;
+ phdr->p_vaddr = start;
+ phdr->p_filesz = end - start;
+ phdr->p_memsz = end - start;
+ phdr->p_offset = start_phys;
+ phdr->p_paddr = start_phys;
+ phdr->p_flags = PF_R | PF_W | PF_X;
+ phdr->p_align = PAGE_SIZE;
+}
+
+/*
* Initialize notes (new kernel)
*/
static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
@@ -557,6 +581,8 @@ static size_t get_elfcorehdr_size(int mem_chunk_cnt)
size += nt_vmcoreinfo_size();
/* nt_final */
size += sizeof(Elf64_Nhdr);
+ /* PT_LOAD type program header for kernel text region */
+ size += sizeof(Elf64_Phdr);
/* PT_LOADS */
size += mem_chunk_cnt * sizeof(Elf64_Phdr);
@@ -568,7 +594,7 @@ static size_t get_elfcorehdr_size(int mem_chunk_cnt)
*/
int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
{
- Elf64_Phdr *phdr_notes, *phdr_loads;
+ Elf64_Phdr *phdr_notes, *phdr_loads, *phdr_text;
size_t alloc_size;
int mem_chunk_cnt;
void *ptr, *hdr;
@@ -606,14 +632,19 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
/* Init program headers */
phdr_notes = ptr;
ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr));
+ phdr_text = ptr;
+ ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr));
phdr_loads = ptr;
ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr) * mem_chunk_cnt);
/* Init notes */
hdr_off = PTR_DIFF(ptr, hdr);
ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off);
+ /* Init kernel text program header */
+ text_init(phdr_text);
/* Init loads */
- hdr_off = PTR_DIFF(ptr, hdr);
loads_init(phdr_loads);
+ /* Finalize program headers */
+ hdr_off = PTR_DIFF(ptr, hdr);
*addr = (unsigned long long) hdr;
*size = (unsigned long long) hdr_off;
BUG_ON(elfcorehdr_size > alloc_size);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 787394978bc0..60cf917a7122 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -340,7 +340,8 @@ SYM_CODE_START(pgm_check_handler)
mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK
stctg %c1,%c1,__PT_CR1(%r11)
#if IS_ENABLED(CONFIG_KVM)
- lg %r12,__LC_GMAP
+ ltg %r12,__LC_GMAP
+ jz 5f
clc __GMAP_ASCE(8,%r12), __PT_CR1(%r11)
jne 5f
BPENTER __SF_SIE_FLAGS(%r10),_TIF_ISOLATE_BP_GUEST
@@ -440,29 +441,6 @@ INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
/*
- * Load idle PSW.
- */
-SYM_FUNC_START(psw_idle)
- stg %r14,(__SF_GPRS+8*8)(%r15)
- stg %r3,__SF_EMPTY(%r15)
- larl %r1,psw_idle_exit
- stg %r1,__SF_EMPTY+8(%r15)
- larl %r1,smp_cpu_mtid
- llgf %r1,0(%r1)
- ltgr %r1,%r1
- jz .Lpsw_idle_stcctm
- .insn rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2)
-.Lpsw_idle_stcctm:
- oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
- BPON
- stckf __CLOCK_IDLE_ENTER(%r2)
- stpt __TIMER_IDLE_ENTER(%r2)
- lpswe __SF_EMPTY(%r15)
-SYM_INNER_LABEL(psw_idle_exit, SYM_L_GLOBAL)
- BR_EX %r14
-SYM_FUNC_END(psw_idle)
-
-/*
* Machine check handler routines
*/
SYM_CODE_START(mcck_int_handler)
@@ -635,6 +613,7 @@ SYM_DATA_START_LOCAL(daton_psw)
SYM_DATA_END(daton_psw)
.section .rodata, "a"
+ .balign 8
#define SYSCALL(esame,emu) .quad __s390x_ ## esame
SYM_DATA_START(sys_call_table)
#include "asm/syscall_table.h"
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index c46381ea04ec..ddf2ee47cb87 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -7,13 +7,13 @@
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
-#include <linux/moduleloader.h>
#include <linux/hardirq.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/kprobes.h>
+#include <linux/execmem.h>
#include <trace/syscall.h>
#include <asm/asm-offsets.h>
#include <asm/text-patching.h>
@@ -220,7 +220,7 @@ static int __init ftrace_plt_init(void)
{
const char *start, *end;
- ftrace_plt = module_alloc(PAGE_SIZE);
+ ftrace_plt = execmem_alloc(EXECMEM_FTRACE, PAGE_SIZE);
if (!ftrace_plt)
panic("cannot allocate ftrace plt\n");
@@ -296,6 +296,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct kprobe *p;
int bit;
+ if (unlikely(kprobe_ftrace_disabled))
+ return;
+
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
return;
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
index e7239aaf428b..af9c97c0ad73 100644
--- a/arch/s390/kernel/idle.c
+++ b/arch/s390/kernel/idle.c
@@ -57,9 +57,13 @@ void noinstr arch_cpu_idle(void)
psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT |
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
clear_cpu_flag(CIF_NOHZ_DELAY);
-
- /* psw_idle() returns with interrupts disabled. */
- psw_idle(idle, psw_mask);
+ set_cpu_flag(CIF_ENABLED_WAIT);
+ if (smp_cpu_mtid)
+ stcctm(MT_DIAG, smp_cpu_mtid, (u64 *)&idle->mt_cycles_enter);
+ idle->clock_idle_enter = get_tod_clock_fast();
+ idle->timer_idle_enter = get_cpu_timer();
+ bpon();
+ __load_psw_mask(psw_mask);
}
static ssize_t show_idle_count(struct device *dev,
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 1486350a4177..3a7d6e172211 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -20,6 +20,7 @@
#include <linux/gfp.h>
#include <linux/crash_dump.h>
#include <linux/debug_locks.h>
+#include <linux/vmalloc.h>
#include <asm/asm-extable.h>
#include <asm/diag.h>
#include <asm/ipl.h>
@@ -266,7 +267,11 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
struct kobj_attribute *attr, \
const char *buf, size_t len) \
{ \
- strscpy(_value, buf, sizeof(_value)); \
+ if (len >= sizeof(_value)) \
+ return -E2BIG; \
+ len = strscpy(_value, buf, sizeof(_value)); \
+ if (len < 0) \
+ return len; \
strim(_value); \
return len; \
} \
@@ -275,6 +280,61 @@ static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
sys_##_prefix##_##_name##_show, \
sys_##_prefix##_##_name##_store)
+#define IPL_ATTR_SCP_DATA_SHOW_FN(_prefix, _ipl_block) \
+static ssize_t sys_##_prefix##_scp_data_show(struct file *filp, \
+ struct kobject *kobj, \
+ struct bin_attribute *attr, \
+ char *buf, loff_t off, \
+ size_t count) \
+{ \
+ size_t size = _ipl_block.scp_data_len; \
+ void *scp_data = _ipl_block.scp_data; \
+ \
+ return memory_read_from_buffer(buf, count, &off, \
+ scp_data, size); \
+}
+
+#define IPL_ATTR_SCP_DATA_STORE_FN(_prefix, _ipl_block_hdr, _ipl_block, _ipl_bp_len, _ipl_bp0_len)\
+static ssize_t sys_##_prefix##_scp_data_store(struct file *filp, \
+ struct kobject *kobj, \
+ struct bin_attribute *attr, \
+ char *buf, loff_t off, \
+ size_t count) \
+{ \
+ size_t scpdata_len = count; \
+ size_t padding; \
+ \
+ if (off) \
+ return -EINVAL; \
+ \
+ memcpy(_ipl_block.scp_data, buf, count); \
+ if (scpdata_len % 8) { \
+ padding = 8 - (scpdata_len % 8); \
+ memset(_ipl_block.scp_data + scpdata_len, \
+ 0, padding); \
+ scpdata_len += padding; \
+ } \
+ \
+ _ipl_block_hdr.len = _ipl_bp_len + scpdata_len; \
+ _ipl_block.len = _ipl_bp0_len + scpdata_len; \
+ _ipl_block.scp_data_len = scpdata_len; \
+ \
+ return count; \
+}
+
+#define DEFINE_IPL_ATTR_SCP_DATA_RO(_prefix, _ipl_block, _size) \
+IPL_ATTR_SCP_DATA_SHOW_FN(_prefix, _ipl_block) \
+static struct bin_attribute sys_##_prefix##_scp_data_attr = \
+ __BIN_ATTR(scp_data, 0444, sys_##_prefix##_scp_data_show, \
+ NULL, _size)
+
+#define DEFINE_IPL_ATTR_SCP_DATA_RW(_prefix, _ipl_block_hdr, _ipl_block, _ipl_bp_len, _ipl_bp0_len, _size)\
+IPL_ATTR_SCP_DATA_SHOW_FN(_prefix, _ipl_block) \
+IPL_ATTR_SCP_DATA_STORE_FN(_prefix, _ipl_block_hdr, _ipl_block, _ipl_bp_len, _ipl_bp0_len)\
+static struct bin_attribute sys_##_prefix##_scp_data_attr = \
+ __BIN_ATTR(scp_data, 0644, sys_##_prefix##_scp_data_show, \
+ sys_##_prefix##_scp_data_store, _size)
+
/*
* ipl section
*/
@@ -373,71 +433,38 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj,
static struct kobj_attribute sys_ipl_device_attr =
__ATTR(device, 0444, sys_ipl_device_show, NULL);
-static ssize_t ipl_parameter_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+static ssize_t sys_ipl_parameter_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
{
return memory_read_from_buffer(buf, count, &off, &ipl_block,
ipl_block.hdr.len);
}
-static struct bin_attribute ipl_parameter_attr =
- __BIN_ATTR(binary_parameter, 0444, ipl_parameter_read, NULL,
+static struct bin_attribute sys_ipl_parameter_attr =
+ __BIN_ATTR(binary_parameter, 0444, sys_ipl_parameter_read, NULL,
PAGE_SIZE);
-static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- unsigned int size = ipl_block.fcp.scp_data_len;
- void *scp_data = &ipl_block.fcp.scp_data;
-
- return memory_read_from_buffer(buf, count, &off, scp_data, size);
-}
-
-static ssize_t ipl_nvme_scp_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- unsigned int size = ipl_block.nvme.scp_data_len;
- void *scp_data = &ipl_block.nvme.scp_data;
-
- return memory_read_from_buffer(buf, count, &off, scp_data, size);
-}
-
-static ssize_t ipl_eckd_scp_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- unsigned int size = ipl_block.eckd.scp_data_len;
- void *scp_data = &ipl_block.eckd.scp_data;
-
- return memory_read_from_buffer(buf, count, &off, scp_data, size);
-}
-
-static struct bin_attribute ipl_scp_data_attr =
- __BIN_ATTR(scp_data, 0444, ipl_scp_data_read, NULL, PAGE_SIZE);
-
-static struct bin_attribute ipl_nvme_scp_data_attr =
- __BIN_ATTR(scp_data, 0444, ipl_nvme_scp_data_read, NULL, PAGE_SIZE);
-
-static struct bin_attribute ipl_eckd_scp_data_attr =
- __BIN_ATTR(scp_data, 0444, ipl_eckd_scp_data_read, NULL, PAGE_SIZE);
+DEFINE_IPL_ATTR_SCP_DATA_RO(ipl_fcp, ipl_block.fcp, PAGE_SIZE);
static struct bin_attribute *ipl_fcp_bin_attrs[] = {
- &ipl_parameter_attr,
- &ipl_scp_data_attr,
+ &sys_ipl_parameter_attr,
+ &sys_ipl_fcp_scp_data_attr,
NULL,
};
+DEFINE_IPL_ATTR_SCP_DATA_RO(ipl_nvme, ipl_block.nvme, PAGE_SIZE);
+
static struct bin_attribute *ipl_nvme_bin_attrs[] = {
- &ipl_parameter_attr,
- &ipl_nvme_scp_data_attr,
+ &sys_ipl_parameter_attr,
+ &sys_ipl_nvme_scp_data_attr,
NULL,
};
+DEFINE_IPL_ATTR_SCP_DATA_RO(ipl_eckd, ipl_block.eckd, PAGE_SIZE);
+
static struct bin_attribute *ipl_eckd_bin_attrs[] = {
- &ipl_parameter_attr,
- &ipl_eckd_scp_data_attr,
+ &sys_ipl_parameter_attr,
+ &sys_ipl_eckd_scp_data_attr,
NULL,
};
@@ -776,44 +803,10 @@ static struct kobj_attribute sys_reipl_ccw_vmparm_attr =
/* FCP reipl device attributes */
-static ssize_t reipl_fcp_scpdata_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
-{
- size_t size = reipl_block_fcp->fcp.scp_data_len;
- void *scp_data = reipl_block_fcp->fcp.scp_data;
-
- return memory_read_from_buffer(buf, count, &off, scp_data, size);
-}
-
-static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
-{
- size_t scpdata_len = count;
- size_t padding;
-
-
- if (off)
- return -EINVAL;
-
- memcpy(reipl_block_fcp->fcp.scp_data, buf, count);
- if (scpdata_len % 8) {
- padding = 8 - (scpdata_len % 8);
- memset(reipl_block_fcp->fcp.scp_data + scpdata_len,
- 0, padding);
- scpdata_len += padding;
- }
-
- reipl_block_fcp->hdr.len = IPL_BP_FCP_LEN + scpdata_len;
- reipl_block_fcp->fcp.len = IPL_BP0_FCP_LEN + scpdata_len;
- reipl_block_fcp->fcp.scp_data_len = scpdata_len;
-
- return count;
-}
-static struct bin_attribute sys_reipl_fcp_scp_data_attr =
- __BIN_ATTR(scp_data, 0644, reipl_fcp_scpdata_read,
- reipl_fcp_scpdata_write, DIAG308_SCPDATA_SIZE);
+DEFINE_IPL_ATTR_SCP_DATA_RW(reipl_fcp, reipl_block_fcp->hdr,
+ reipl_block_fcp->fcp,
+ IPL_BP_FCP_LEN, IPL_BP0_FCP_LEN,
+ DIAG308_SCPDATA_SIZE);
static struct bin_attribute *reipl_fcp_bin_attrs[] = {
&sys_reipl_fcp_scp_data_attr,
@@ -934,44 +927,10 @@ static struct kobj_attribute sys_reipl_fcp_clear_attr =
/* NVME reipl device attributes */
-static ssize_t reipl_nvme_scpdata_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
-{
- size_t size = reipl_block_nvme->nvme.scp_data_len;
- void *scp_data = reipl_block_nvme->nvme.scp_data;
-
- return memory_read_from_buffer(buf, count, &off, scp_data, size);
-}
-
-static ssize_t reipl_nvme_scpdata_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
-{
- size_t scpdata_len = count;
- size_t padding;
-
- if (off)
- return -EINVAL;
-
- memcpy(reipl_block_nvme->nvme.scp_data, buf, count);
- if (scpdata_len % 8) {
- padding = 8 - (scpdata_len % 8);
- memset(reipl_block_nvme->nvme.scp_data + scpdata_len,
- 0, padding);
- scpdata_len += padding;
- }
-
- reipl_block_nvme->hdr.len = IPL_BP_FCP_LEN + scpdata_len;
- reipl_block_nvme->nvme.len = IPL_BP0_FCP_LEN + scpdata_len;
- reipl_block_nvme->nvme.scp_data_len = scpdata_len;
-
- return count;
-}
-
-static struct bin_attribute sys_reipl_nvme_scp_data_attr =
- __BIN_ATTR(scp_data, 0644, reipl_nvme_scpdata_read,
- reipl_nvme_scpdata_write, DIAG308_SCPDATA_SIZE);
+DEFINE_IPL_ATTR_SCP_DATA_RW(reipl_nvme, reipl_block_nvme->hdr,
+ reipl_block_nvme->nvme,
+ IPL_BP_NVME_LEN, IPL_BP0_NVME_LEN,
+ DIAG308_SCPDATA_SIZE);
static struct bin_attribute *reipl_nvme_bin_attrs[] = {
&sys_reipl_nvme_scp_data_attr,
@@ -1067,44 +1026,10 @@ static struct attribute_group reipl_ccw_attr_group_lpar = {
/* ECKD reipl device attributes */
-static ssize_t reipl_eckd_scpdata_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
-{
- size_t size = reipl_block_eckd->eckd.scp_data_len;
- void *scp_data = reipl_block_eckd->eckd.scp_data;
-
- return memory_read_from_buffer(buf, count, &off, scp_data, size);
-}
-
-static ssize_t reipl_eckd_scpdata_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
-{
- size_t scpdata_len = count;
- size_t padding;
-
- if (off)
- return -EINVAL;
-
- memcpy(reipl_block_eckd->eckd.scp_data, buf, count);
- if (scpdata_len % 8) {
- padding = 8 - (scpdata_len % 8);
- memset(reipl_block_eckd->eckd.scp_data + scpdata_len,
- 0, padding);
- scpdata_len += padding;
- }
-
- reipl_block_eckd->hdr.len = IPL_BP_ECKD_LEN + scpdata_len;
- reipl_block_eckd->eckd.len = IPL_BP0_ECKD_LEN + scpdata_len;
- reipl_block_eckd->eckd.scp_data_len = scpdata_len;
-
- return count;
-}
-
-static struct bin_attribute sys_reipl_eckd_scp_data_attr =
- __BIN_ATTR(scp_data, 0644, reipl_eckd_scpdata_read,
- reipl_eckd_scpdata_write, DIAG308_SCPDATA_SIZE);
+DEFINE_IPL_ATTR_SCP_DATA_RW(reipl_eckd, reipl_block_eckd->hdr,
+ reipl_block_eckd->eckd,
+ IPL_BP_ECKD_LEN, IPL_BP0_ECKD_LEN,
+ DIAG308_SCPDATA_SIZE);
static struct bin_attribute *reipl_eckd_bin_attrs[] = {
&sys_reipl_eckd_scp_data_attr,
@@ -1209,8 +1134,8 @@ static struct attribute_group reipl_nss_attr_group = {
void set_os_info_reipl_block(void)
{
- os_info_entry_add(OS_INFO_REIPL_BLOCK, reipl_block_actual,
- reipl_block_actual->hdr.len);
+ os_info_entry_add_data(OS_INFO_REIPL_BLOCK, reipl_block_actual,
+ reipl_block_actual->hdr.len);
}
/* reipl type */
@@ -1648,6 +1573,11 @@ DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n",
DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
dump_block_fcp->fcp.devno);
+DEFINE_IPL_ATTR_SCP_DATA_RW(dump_fcp, dump_block_fcp->hdr,
+ dump_block_fcp->fcp,
+ IPL_BP_FCP_LEN, IPL_BP0_FCP_LEN,
+ DIAG308_SCPDATA_SIZE);
+
static struct attribute *dump_fcp_attrs[] = {
&sys_dump_fcp_device_attr.attr,
&sys_dump_fcp_wwpn_attr.attr,
@@ -1657,9 +1587,15 @@ static struct attribute *dump_fcp_attrs[] = {
NULL,
};
+static struct bin_attribute *dump_fcp_bin_attrs[] = {
+ &sys_dump_fcp_scp_data_attr,
+ NULL,
+};
+
static struct attribute_group dump_fcp_attr_group = {
.name = IPL_FCP_STR,
.attrs = dump_fcp_attrs,
+ .bin_attrs = dump_fcp_bin_attrs,
};
/* NVME dump device attributes */
@@ -1672,6 +1608,11 @@ DEFINE_IPL_ATTR_RW(dump_nvme, bootprog, "%lld\n", "%llx\n",
DEFINE_IPL_ATTR_RW(dump_nvme, br_lba, "%lld\n", "%llx\n",
dump_block_nvme->nvme.br_lba);
+DEFINE_IPL_ATTR_SCP_DATA_RW(dump_nvme, dump_block_nvme->hdr,
+ dump_block_nvme->nvme,
+ IPL_BP_NVME_LEN, IPL_BP0_NVME_LEN,
+ DIAG308_SCPDATA_SIZE);
+
static struct attribute *dump_nvme_attrs[] = {
&sys_dump_nvme_fid_attr.attr,
&sys_dump_nvme_nsid_attr.attr,
@@ -1680,9 +1621,15 @@ static struct attribute *dump_nvme_attrs[] = {
NULL,
};
+static struct bin_attribute *dump_nvme_bin_attrs[] = {
+ &sys_dump_nvme_scp_data_attr,
+ NULL,
+};
+
static struct attribute_group dump_nvme_attr_group = {
.name = IPL_NVME_STR,
.attrs = dump_nvme_attrs,
+ .bin_attrs = dump_nvme_bin_attrs,
};
/* ECKD dump device attributes */
@@ -1696,6 +1643,11 @@ IPL_ATTR_BR_CHR_STORE_FN(dump, dump_block_eckd->eckd);
static struct kobj_attribute sys_dump_eckd_br_chr_attr =
__ATTR(br_chr, 0644, eckd_dump_br_chr_show, eckd_dump_br_chr_store);
+DEFINE_IPL_ATTR_SCP_DATA_RW(dump_eckd, dump_block_eckd->hdr,
+ dump_block_eckd->eckd,
+ IPL_BP_ECKD_LEN, IPL_BP0_ECKD_LEN,
+ DIAG308_SCPDATA_SIZE);
+
static struct attribute *dump_eckd_attrs[] = {
&sys_dump_eckd_device_attr.attr,
&sys_dump_eckd_bootprog_attr.attr,
@@ -1703,9 +1655,15 @@ static struct attribute *dump_eckd_attrs[] = {
NULL,
};
+static struct bin_attribute *dump_eckd_bin_attrs[] = {
+ &sys_dump_eckd_scp_data_attr,
+ NULL,
+};
+
static struct attribute_group dump_eckd_attr_group = {
.name = IPL_ECKD_STR,
.attrs = dump_eckd_attrs,
+ .bin_attrs = dump_eckd_bin_attrs,
};
/* CCW dump device attributes */
@@ -1858,9 +1816,9 @@ static int __init dump_nvme_init(void)
}
dump_block_nvme->hdr.len = IPL_BP_NVME_LEN;
dump_block_nvme->hdr.version = IPL_PARM_BLOCK_VERSION;
- dump_block_nvme->fcp.len = IPL_BP0_NVME_LEN;
- dump_block_nvme->fcp.pbt = IPL_PBT_NVME;
- dump_block_nvme->fcp.opt = IPL_PB0_NVME_OPT_DUMP;
+ dump_block_nvme->nvme.len = IPL_BP0_NVME_LEN;
+ dump_block_nvme->nvme.pbt = IPL_PBT_NVME;
+ dump_block_nvme->nvme.opt = IPL_PB0_NVME_OPT_DUMP;
dump_capabilities |= DUMP_TYPE_NVME;
return 0;
}
@@ -1940,7 +1898,7 @@ static void dump_reipl_run(struct shutdown_trigger *trigger)
reipl_type == IPL_TYPE_NSS ||
reipl_type == IPL_TYPE_UNKNOWN)
os_info_flags |= OS_INFO_FLAG_REIPL_CLEAR;
- os_info_entry_add(OS_INFO_FLAGS_ENTRY, &os_info_flags, sizeof(os_info_flags));
+ os_info_entry_add_data(OS_INFO_FLAGS_ENTRY, &os_info_flags, sizeof(os_info_flags));
csum = (__force unsigned int)cksm(reipl_block_actual, reipl_block_actual->hdr.len, 0);
abs_lc = get_abs_lowcore();
abs_lc->ipib = __pa(reipl_block_actual);
@@ -1958,11 +1916,13 @@ static struct shutdown_action __refdata dump_reipl_action = {
* vmcmd shutdown action: Trigger vm command on shutdown.
*/
-static char vmcmd_on_reboot[128];
-static char vmcmd_on_panic[128];
-static char vmcmd_on_halt[128];
-static char vmcmd_on_poff[128];
-static char vmcmd_on_restart[128];
+#define VMCMD_MAX_SIZE 240
+
+static char vmcmd_on_reboot[VMCMD_MAX_SIZE + 1];
+static char vmcmd_on_panic[VMCMD_MAX_SIZE + 1];
+static char vmcmd_on_halt[VMCMD_MAX_SIZE + 1];
+static char vmcmd_on_poff[VMCMD_MAX_SIZE + 1];
+static char vmcmd_on_restart[VMCMD_MAX_SIZE + 1];
DEFINE_IPL_ATTR_STR_RW(vmcmd, on_reboot, "%s\n", "%s\n", vmcmd_on_reboot);
DEFINE_IPL_ATTR_STR_RW(vmcmd, on_panic, "%s\n", "%s\n", vmcmd_on_panic);
@@ -2288,8 +2248,8 @@ static int __init vmcmd_on_reboot_setup(char *str)
{
if (!MACHINE_IS_VM)
return 1;
- strncpy_skip_quote(vmcmd_on_reboot, str, 127);
- vmcmd_on_reboot[127] = 0;
+ strncpy_skip_quote(vmcmd_on_reboot, str, VMCMD_MAX_SIZE);
+ vmcmd_on_reboot[VMCMD_MAX_SIZE] = 0;
on_reboot_trigger.action = &vmcmd_action;
return 1;
}
@@ -2299,8 +2259,8 @@ static int __init vmcmd_on_panic_setup(char *str)
{
if (!MACHINE_IS_VM)
return 1;
- strncpy_skip_quote(vmcmd_on_panic, str, 127);
- vmcmd_on_panic[127] = 0;
+ strncpy_skip_quote(vmcmd_on_panic, str, VMCMD_MAX_SIZE);
+ vmcmd_on_panic[VMCMD_MAX_SIZE] = 0;
on_panic_trigger.action = &vmcmd_action;
return 1;
}
@@ -2310,8 +2270,8 @@ static int __init vmcmd_on_halt_setup(char *str)
{
if (!MACHINE_IS_VM)
return 1;
- strncpy_skip_quote(vmcmd_on_halt, str, 127);
- vmcmd_on_halt[127] = 0;
+ strncpy_skip_quote(vmcmd_on_halt, str, VMCMD_MAX_SIZE);
+ vmcmd_on_halt[VMCMD_MAX_SIZE] = 0;
on_halt_trigger.action = &vmcmd_action;
return 1;
}
@@ -2321,8 +2281,8 @@ static int __init vmcmd_on_poff_setup(char *str)
{
if (!MACHINE_IS_VM)
return 1;
- strncpy_skip_quote(vmcmd_on_poff, str, 127);
- vmcmd_on_poff[127] = 0;
+ strncpy_skip_quote(vmcmd_on_poff, str, VMCMD_MAX_SIZE);
+ vmcmd_on_poff[VMCMD_MAX_SIZE] = 0;
on_poff_trigger.action = &vmcmd_action;
return 1;
}
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 6f71b0ce1068..9acc6630abd3 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -29,6 +29,7 @@
#include <asm/hw_irq.h>
#include <asm/stacktrace.h>
#include <asm/softirq_stack.h>
+#include <asm/vtime.h>
#include "entry.h"
DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
@@ -150,6 +151,7 @@ void noinstr do_io_irq(struct pt_regs *regs)
if (from_idle)
account_idle_time_irq();
+ set_cpu_flag(CIF_NOHZ_DELAY);
do {
regs->tpi_info = S390_lowcore.tpi_info;
if (S390_lowcore.tpi_info.adapter_IO)
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index f0cf20d4b3c5..05c83505e979 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -9,7 +9,6 @@
#define pr_fmt(fmt) "kprobes: " fmt
-#include <linux/moduleloader.h>
#include <linux/kprobes.h>
#include <linux/ptrace.h>
#include <linux/preempt.h>
@@ -21,10 +20,10 @@
#include <linux/slab.h>
#include <linux/hardirq.h>
#include <linux/ftrace.h>
+#include <linux/execmem.h>
#include <asm/set_memory.h>
#include <asm/sections.h>
#include <asm/dis.h>
-#include "kprobes.h"
#include "entry.h"
DEFINE_PER_CPU(struct kprobe *, current_kprobe);
@@ -32,39 +31,17 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
struct kretprobe_blackpoint kretprobe_blacklist[] = { };
-static int insn_page_in_use;
-
void *alloc_insn_page(void)
{
void *page;
- page = module_alloc(PAGE_SIZE);
+ page = execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE);
if (!page)
return NULL;
set_memory_rox((unsigned long)page, 1);
return page;
}
-static void *alloc_s390_insn_page(void)
-{
- if (xchg(&insn_page_in_use, 1) == 1)
- return NULL;
- return &kprobes_insn_page;
-}
-
-static void free_s390_insn_page(void *page)
-{
- xchg(&insn_page_in_use, 0);
-}
-
-struct kprobe_insn_cache kprobe_s390_insn_slots = {
- .mutex = __MUTEX_INITIALIZER(kprobe_s390_insn_slots.mutex),
- .alloc = alloc_s390_insn_page,
- .free = free_s390_insn_page,
- .pages = LIST_HEAD_INIT(kprobe_s390_insn_slots.pages),
- .insn_size = MAX_INSN_SIZE,
-};
-
static void copy_instruction(struct kprobe *p)
{
kprobe_opcode_t insn[MAX_INSN_SIZE];
@@ -78,10 +55,10 @@ static void copy_instruction(struct kprobe *p)
if (probe_is_insn_relative_long(&insn[0])) {
/*
* For pc-relative instructions in RIL-b or RIL-c format patch
- * the RI2 displacement field. We have already made sure that
- * the insn slot for the patched instruction is within the same
- * 2GB area as the original instruction (either kernel image or
- * module area). Therefore the new displacement will always fit.
+ * the RI2 displacement field. The insn slot for the to be
+ * patched instruction is within the same 4GB area like the
+ * original instruction. Therefore the new displacement will
+ * always fit.
*/
disp = *(s32 *)&insn[1];
addr = (u64)(unsigned long)p->addr;
@@ -93,34 +70,6 @@ static void copy_instruction(struct kprobe *p)
}
NOKPROBE_SYMBOL(copy_instruction);
-static int s390_get_insn_slot(struct kprobe *p)
-{
- /*
- * Get an insn slot that is within the same 2GB area like the original
- * instruction. That way instructions with a 32bit signed displacement
- * field can be patched and executed within the insn slot.
- */
- p->ainsn.insn = NULL;
- if (is_kernel((unsigned long)p->addr))
- p->ainsn.insn = get_s390_insn_slot();
- else if (is_module_addr(p->addr))
- p->ainsn.insn = get_insn_slot();
- return p->ainsn.insn ? 0 : -ENOMEM;
-}
-NOKPROBE_SYMBOL(s390_get_insn_slot);
-
-static void s390_free_insn_slot(struct kprobe *p)
-{
- if (!p->ainsn.insn)
- return;
- if (is_kernel((unsigned long)p->addr))
- free_s390_insn_slot(p->ainsn.insn, 0);
- else
- free_insn_slot(p->ainsn.insn, 0);
- p->ainsn.insn = NULL;
-}
-NOKPROBE_SYMBOL(s390_free_insn_slot);
-
/* Check if paddr is at an instruction boundary */
static bool can_probe(unsigned long paddr)
{
@@ -174,7 +123,8 @@ int arch_prepare_kprobe(struct kprobe *p)
/* Make sure the probe isn't going on a difficult instruction */
if (probe_is_prohibited_opcode(p->addr))
return -EINVAL;
- if (s390_get_insn_slot(p))
+ p->ainsn.insn = get_insn_slot();
+ if (!p->ainsn.insn)
return -ENOMEM;
copy_instruction(p);
return 0;
@@ -216,7 +166,10 @@ NOKPROBE_SYMBOL(arch_disarm_kprobe);
void arch_remove_kprobe(struct kprobe *p)
{
- s390_free_insn_slot(p);
+ if (!p->ainsn.insn)
+ return;
+ free_insn_slot(p->ainsn.insn, 0);
+ p->ainsn.insn = NULL;
}
NOKPROBE_SYMBOL(arch_remove_kprobe);
diff --git a/arch/s390/kernel/kprobes.h b/arch/s390/kernel/kprobes.h
deleted file mode 100644
index dc3ed5098ee7..000000000000
--- a/arch/s390/kernel/kprobes.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-#ifndef _ARCH_S390_KPROBES_H
-#define _ARCH_S390_KPROBES_H
-
-#include <linux/kprobes.h>
-
-DEFINE_INSN_CACHE_OPS(s390_insn);
-
-#endif
diff --git a/arch/s390/kernel/kprobes_insn_page.S b/arch/s390/kernel/kprobes_insn_page.S
deleted file mode 100644
index 0fe4d725e98b..000000000000
--- a/arch/s390/kernel/kprobes_insn_page.S
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#include <linux/linkage.h>
-
-/*
- * insn_page is a special 4k aligned dummy function for kprobes.
- * It will contain all kprobed instructions that are out-of-line executed.
- * The page must be within the kernel image to guarantee that the
- * out-of-line instructions are within 2GB distance of their original
- * location. Using a dummy function ensures that the insn_page is within
- * the text section of the kernel and mapped read-only/executable from
- * the beginning on, thus avoiding to split large mappings if the page
- * would be in the data section instead.
- */
- .section .kprobes.text, "ax"
- .balign 4096
-SYM_CODE_START(kprobes_insn_page)
- .rept 2048
- .word 0x07fe
- .endr
-SYM_CODE_END(kprobes_insn_page)
- .previous
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 42215f9404af..91e207b50394 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -21,6 +21,7 @@
#include <linux/moduleloader.h>
#include <linux/bug.h>
#include <linux/memory.h>
+#include <linux/execmem.h>
#include <asm/alternative.h>
#include <asm/nospec-branch.h>
#include <asm/facility.h>
@@ -36,47 +37,10 @@
#define PLT_ENTRY_SIZE 22
-static unsigned long get_module_load_offset(void)
-{
- static DEFINE_MUTEX(module_kaslr_mutex);
- static unsigned long module_load_offset;
-
- if (!kaslr_enabled())
- return 0;
- /*
- * Calculate the module_load_offset the first time this code
- * is called. Once calculated it stays the same until reboot.
- */
- mutex_lock(&module_kaslr_mutex);
- if (!module_load_offset)
- module_load_offset = get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
- mutex_unlock(&module_kaslr_mutex);
- return module_load_offset;
-}
-
-void *module_alloc(unsigned long size)
-{
- gfp_t gfp_mask = GFP_KERNEL;
- void *p;
-
- if (PAGE_ALIGN(size) > MODULES_LEN)
- return NULL;
- p = __vmalloc_node_range(size, MODULE_ALIGN,
- MODULES_VADDR + get_module_load_offset(),
- MODULES_END, gfp_mask, PAGE_KERNEL,
- VM_FLUSH_RESET_PERMS | VM_DEFER_KMEMLEAK,
- NUMA_NO_NODE, __builtin_return_address(0));
- if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
- vfree(p);
- return NULL;
- }
- return p;
-}
-
#ifdef CONFIG_FUNCTION_TRACER
void module_arch_cleanup(struct module *mod)
{
- module_memfree(mod->arch.trampolines_start);
+ execmem_free(mod->arch.trampolines_start);
}
#endif
@@ -510,7 +474,7 @@ static int module_alloc_ftrace_hotpatch_trampolines(struct module *me,
size = FTRACE_HOTPATCH_TRAMPOLINES_SIZE(s->sh_size);
numpages = DIV_ROUND_UP(size, PAGE_SIZE);
- start = module_alloc(numpages * PAGE_SIZE);
+ start = execmem_alloc(EXECMEM_FTRACE, numpages * PAGE_SIZE);
if (!start)
return -ENOMEM;
set_memory_rox((unsigned long)start, numpages);
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index c77382a67325..230d010bac9b 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -31,6 +31,7 @@
#include <asm/crw.h>
#include <asm/asm-offsets.h>
#include <asm/pai.h>
+#include <asm/vtime.h>
struct mcck_struct {
unsigned int kill_task : 1;
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
index d1b16d83e49a..9b8c24ebb008 100644
--- a/arch/s390/kernel/nospec-branch.c
+++ b/arch/s390/kernel/nospec-branch.c
@@ -114,10 +114,10 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
type = BRASL_EXPOLINE; /* brasl instruction */
else
continue;
- thunk = instr + (*(int *)(instr + 2)) * 2;
+ thunk = instr + (long)(*(int *)(instr + 2)) * 2;
if (thunk[0] == 0xc6 && thunk[1] == 0x00)
/* exrl %r0,<target-br> */
- br = thunk + (*(int *)(thunk + 2)) * 2;
+ br = thunk + (long)(*(int *)(thunk + 2)) * 2;
else
continue;
if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0)
diff --git a/arch/s390/kernel/os_info.c b/arch/s390/kernel/os_info.c
index a801e6bd5341..b695f980bbde 100644
--- a/arch/s390/kernel/os_info.c
+++ b/arch/s390/kernel/os_info.c
@@ -15,8 +15,10 @@
#include <asm/checksum.h>
#include <asm/abs_lowcore.h>
#include <asm/os_info.h>
+#include <asm/physmem_info.h>
#include <asm/maccess.h>
#include <asm/asm-offsets.h>
+#include <asm/ipl.h>
/*
* OS info structure has to be page aligned
@@ -43,9 +45,9 @@ void os_info_crashkernel_add(unsigned long base, unsigned long size)
}
/*
- * Add OS info entry and update checksum
+ * Add OS info data entry and update checksum
*/
-void os_info_entry_add(int nr, void *ptr, u64 size)
+void os_info_entry_add_data(int nr, void *ptr, u64 size)
{
os_info.entry[nr].addr = __pa(ptr);
os_info.entry[nr].size = size;
@@ -54,15 +56,36 @@ void os_info_entry_add(int nr, void *ptr, u64 size)
}
/*
+ * Add OS info value entry and update checksum
+ */
+void os_info_entry_add_val(int nr, u64 value)
+{
+ os_info.entry[nr].val = value;
+ os_info.entry[nr].size = 0;
+ os_info.entry[nr].csum = 0;
+ os_info.csum = os_info_csum(&os_info);
+}
+
+/*
* Initialize OS info structure and set lowcore pointer
*/
void __init os_info_init(void)
{
struct lowcore *abs_lc;
+ BUILD_BUG_ON(sizeof(struct os_info) != PAGE_SIZE);
os_info.version_major = OS_INFO_VERSION_MAJOR;
os_info.version_minor = OS_INFO_VERSION_MINOR;
os_info.magic = OS_INFO_MAGIC;
+ os_info_entry_add_val(OS_INFO_IDENTITY_BASE, __identity_base);
+ os_info_entry_add_val(OS_INFO_KASLR_OFFSET, kaslr_offset());
+ os_info_entry_add_val(OS_INFO_KASLR_OFF_PHYS, __kaslr_offset_phys);
+ os_info_entry_add_val(OS_INFO_VMEMMAP, (unsigned long)vmemmap);
+ os_info_entry_add_val(OS_INFO_AMODE31_START, AMODE31_START);
+ os_info_entry_add_val(OS_INFO_AMODE31_END, AMODE31_END);
+ os_info_entry_add_val(OS_INFO_IMAGE_START, (unsigned long)_stext);
+ os_info_entry_add_val(OS_INFO_IMAGE_END, (unsigned long)_end);
+ os_info_entry_add_val(OS_INFO_IMAGE_PHYS, __pa_symbol(_stext));
os_info.csum = os_info_csum(&os_info);
abs_lc = get_abs_lowcore();
abs_lc->os_info = __pa(&os_info);
@@ -125,7 +148,7 @@ static void os_info_old_init(void)
if (os_info_init)
return;
- if (!oldmem_data.start)
+ if (!oldmem_data.start && !is_ipl_type_dump())
goto fail;
if (copy_oldmem_kernel(&addr, __LC_OS_INFO, sizeof(addr)))
goto fail;
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 41ed6e0f0a2a..1434642e9cba 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -428,7 +428,7 @@ static void cpum_cf_make_setsize(enum cpumf_ctr_set ctrset)
case CPUMF_CTR_SET_CRYPTO:
if (cpumf_ctr_info.csvn >= 1 && cpumf_ctr_info.csvn <= 5)
ctrset_size = 16;
- else if (cpumf_ctr_info.csvn == 6 || cpumf_ctr_info.csvn == 7)
+ else if (cpumf_ctr_info.csvn >= 6)
ctrset_size = 20;
break;
case CPUMF_CTR_SET_EXT:
diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c
index 0d64aafd158f..e4a6bfc91080 100644
--- a/arch/s390/kernel/perf_cpum_cf_events.c
+++ b/arch/s390/kernel/perf_cpum_cf_events.c
@@ -855,16 +855,11 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
}
/* Determine version specific crypto set */
- switch (ci.csvn) {
- case 1 ... 5:
+ csvn = none;
+ if (ci.csvn >= 1 && ci.csvn <= 5)
csvn = cpumcf_svn_12345_pmu_event_attr;
- break;
- case 6 ... 7:
+ else if (ci.csvn >= 6)
csvn = cpumcf_svn_67_pmu_event_attr;
- break;
- default:
- csvn = none;
- }
/* Determine model-specific counter set(s) */
get_cpu_id(&cpu_id);
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index dfa77da2fd2e..5fff629b1a89 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -218,39 +218,7 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
- struct stack_frame_user __user *sf;
- unsigned long ip, sp;
- bool first = true;
-
- if (is_compat_task())
- return;
- perf_callchain_store(entry, instruction_pointer(regs));
- sf = (void __user *)user_stack_pointer(regs);
- pagefault_disable();
- while (entry->nr < entry->max_stack) {
- if (__get_user(sp, &sf->back_chain))
- break;
- if (__get_user(ip, &sf->gprs[8]))
- break;
- if (ip & 0x1) {
- /*
- * If the instruction address is invalid, and this
- * is the first stack frame, assume r14 has not
- * been written to the stack yet. Otherwise exit.
- */
- if (first && !(regs->gprs[14] & 0x1))
- ip = regs->gprs[14];
- else
- break;
- }
- perf_callchain_store(entry, ip);
- /* Sanity check: ABI requires SP to be aligned 8 bytes. */
- if (!sp || sp & 0x7)
- break;
- sf = (void __user *)sp;
- first = false;
- }
- pagefault_enable();
+ arch_stack_walk_user_common(NULL, NULL, entry, regs, true);
}
/* Perf definitions for PMU event attributes in sysfs */
diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c
index 823d652e3917..4ad472d130a3 100644
--- a/arch/s390/kernel/perf_pai_crypto.c
+++ b/arch/s390/kernel/perf_pai_crypto.c
@@ -90,7 +90,6 @@ static void paicrypt_event_destroy(struct perf_event *event)
event->cpu);
struct paicrypt_map *cpump = mp->mapptr;
- cpump->event = NULL;
static_branch_dec(&pai_key);
mutex_lock(&pai_reserve_mutex);
debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d"
@@ -356,10 +355,15 @@ static int paicrypt_add(struct perf_event *event, int flags)
static void paicrypt_stop(struct perf_event *event, int flags)
{
- if (!event->attr.sample_period) /* Counting */
+ struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
+ struct paicrypt_map *cpump = mp->mapptr;
+
+ if (!event->attr.sample_period) { /* Counting */
paicrypt_read(event);
- else /* Sampling */
+ } else { /* Sampling */
perf_sched_cb_dec(event->pmu);
+ cpump->event = NULL;
+ }
event->hw.state = PERF_HES_STOPPED;
}
diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c
index 616a25606cd6..a6da7e0cc7a6 100644
--- a/arch/s390/kernel/perf_pai_ext.c
+++ b/arch/s390/kernel/perf_pai_ext.c
@@ -122,7 +122,6 @@ static void paiext_event_destroy(struct perf_event *event)
free_page(PAI_SAVE_AREA(event));
mutex_lock(&paiext_reserve_mutex);
- cpump->event = NULL;
if (refcount_dec_and_test(&cpump->refcnt)) /* Last reference gone */
paiext_free(mp);
paiext_root_free();
@@ -362,10 +361,15 @@ static int paiext_add(struct perf_event *event, int flags)
static void paiext_stop(struct perf_event *event, int flags)
{
- if (!event->attr.sample_period) /* Counting */
+ struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
+ struct paiext_map *cpump = mp->mapptr;
+
+ if (!event->attr.sample_period) { /* Counting */
paiext_read(event);
- else /* Sampling */
+ } else { /* Sampling */
perf_sched_cb_dec(event->pmu);
+ cpump->event = NULL;
+ }
event->hw.state = PERF_HES_STOPPED;
}
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index dd456b475861..d8740631df4b 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -86,11 +86,6 @@ void arch_release_task_struct(struct task_struct *tsk)
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
- /*
- * Save the floating-point or vector register state of the current
- * task and set the TIF_FPU flag to lazy restore the FPU register
- * state when returning to user space.
- */
save_user_fpu_regs();
*dst = *src;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 24ed33f044ec..90c2c786bb35 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -146,16 +146,16 @@ static u32 __amode31_ref *__ctl_linkage_stack = __ctl_linkage_stack_amode31;
static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31;
unsigned long __bootdata_preserved(max_mappable);
-unsigned long __bootdata(ident_map_size);
struct physmem_info __bootdata(physmem_info);
-unsigned long __bootdata_preserved(__kaslr_offset);
+struct vm_layout __bootdata_preserved(vm_layout);
+EXPORT_SYMBOL_GPL(vm_layout);
int __bootdata_preserved(__kaslr_enabled);
unsigned int __bootdata_preserved(zlib_dfltcc_support);
EXPORT_SYMBOL(zlib_dfltcc_support);
u64 __bootdata_preserved(stfle_fac_list[16]);
EXPORT_SYMBOL(stfle_fac_list);
-u64 __bootdata_preserved(alt_stfle_fac_list[16]);
+u64 alt_stfle_fac_list[16];
struct oldmem_data __bootdata_preserved(oldmem_data);
unsigned long VMALLOC_START;
@@ -765,7 +765,7 @@ static void __init relocate_amode31_section(void)
unsigned long amode31_size = __eamode31 - __samode31;
long amode31_offset, *ptr;
- amode31_offset = physmem_info.reserved[RR_AMODE31].start - (unsigned long)__samode31;
+ amode31_offset = AMODE31_START - (unsigned long)__samode31;
pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size);
/* Move original AMODE31 section to the new one */
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 94f440e38303..640363b2a105 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -5,6 +5,7 @@
* Copyright IBM Corp. 2006
*/
+#include <linux/perf_event.h>
#include <linux/stacktrace.h>
#include <linux/uaccess.h>
#include <linux/compat.h>
@@ -62,42 +63,121 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
return 0;
}
-void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
- const struct pt_regs *regs)
+static inline bool store_ip(stack_trace_consume_fn consume_entry, void *cookie,
+ struct perf_callchain_entry_ctx *entry, bool perf,
+ unsigned long ip)
+{
+#ifdef CONFIG_PERF_EVENTS
+ if (perf) {
+ if (perf_callchain_store(entry, ip))
+ return false;
+ return true;
+ }
+#endif
+ return consume_entry(cookie, ip);
+}
+
+static inline bool ip_invalid(unsigned long ip)
+{
+ /*
+ * Perform some basic checks if an instruction address taken
+ * from unreliable source is invalid.
+ */
+ if (ip & 1)
+ return true;
+ if (ip < mmap_min_addr)
+ return true;
+ if (ip >= current->mm->context.asce_limit)
+ return true;
+ return false;
+}
+
+static inline bool ip_within_vdso(unsigned long ip)
{
+ return in_range(ip, current->mm->context.vdso_base, vdso_text_size());
+}
+
+void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
+ struct perf_callchain_entry_ctx *entry,
+ const struct pt_regs *regs, bool perf)
+{
+ struct stack_frame_vdso_wrapper __user *sf_vdso;
struct stack_frame_user __user *sf;
unsigned long ip, sp;
bool first = true;
if (is_compat_task())
return;
- if (!consume_entry(cookie, instruction_pointer(regs)))
+ if (!current->mm)
+ return;
+ ip = instruction_pointer(regs);
+ if (!store_ip(consume_entry, cookie, entry, perf, ip))
return;
sf = (void __user *)user_stack_pointer(regs);
pagefault_disable();
while (1) {
if (__get_user(sp, &sf->back_chain))
break;
- if (__get_user(ip, &sf->gprs[8]))
+ /*
+ * VDSO entry code has a non-standard stack frame layout.
+ * See VDSO user wrapper code for details.
+ */
+ if (!sp && ip_within_vdso(ip)) {
+ sf_vdso = (void __user *)sf;
+ if (__get_user(ip, &sf_vdso->return_address))
+ break;
+ sp = (unsigned long)sf + STACK_FRAME_VDSO_OVERHEAD;
+ sf = (void __user *)sp;
+ if (__get_user(sp, &sf->back_chain))
+ break;
+ } else {
+ sf = (void __user *)sp;
+ if (__get_user(ip, &sf->gprs[8]))
+ break;
+ }
+ /* Sanity check: ABI requires SP to be 8 byte aligned. */
+ if (sp & 0x7)
break;
- if (ip & 0x1) {
+ if (ip_invalid(ip)) {
/*
* If the instruction address is invalid, and this
* is the first stack frame, assume r14 has not
* been written to the stack yet. Otherwise exit.
*/
- if (first && !(regs->gprs[14] & 0x1))
- ip = regs->gprs[14];
- else
+ if (!first)
+ break;
+ ip = regs->gprs[14];
+ if (ip_invalid(ip))
break;
}
- if (!consume_entry(cookie, ip))
- break;
- /* Sanity check: ABI requires SP to be aligned 8 bytes. */
- if (!sp || sp & 0x7)
- break;
- sf = (void __user *)sp;
+ if (!store_ip(consume_entry, cookie, entry, perf, ip))
+ return;
first = false;
}
pagefault_enable();
}
+
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+ const struct pt_regs *regs)
+{
+ arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, false);
+}
+
+unsigned long return_address(unsigned int n)
+{
+ struct unwind_state state;
+ unsigned long addr;
+
+ /* Increment to skip current stack entry */
+ n++;
+
+ unwind_for_each_frame(&state, NULL, NULL, 0) {
+ addr = unwind_get_return_address(&state);
+ if (!addr)
+ break;
+ if (!n--)
+ return addr;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(return_address);
diff --git a/arch/s390/kernel/syscalls/Makefile b/arch/s390/kernel/syscalls/Makefile
index fb85e797946d..1bb78b9468e8 100644
--- a/arch/s390/kernel/syscalls/Makefile
+++ b/arch/s390/kernel/syscalls/Makefile
@@ -4,8 +4,8 @@ gen := arch/$(ARCH)/include/generated
kapi := $(gen)/asm
uapi := $(gen)/uapi/asm
-syscall := $(srctree)/$(src)/syscall.tbl
-systbl := $(srctree)/$(src)/syscalltbl
+syscall := $(src)/syscall.tbl
+systbl := $(src)/syscalltbl
gen-y := $(kapi)/syscall_table.h
kapi-hdrs-y := $(kapi)/unistd_nr.h
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index fc07bc39e698..265fea37e030 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -21,6 +21,7 @@
/* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
int __bootdata_preserved(prot_virt_guest);
+EXPORT_SYMBOL(prot_virt_guest);
#endif
/*
@@ -181,36 +182,36 @@ int uv_convert_owned_from_secure(unsigned long paddr)
}
/*
- * Calculate the expected ref_count for a page that would otherwise have no
+ * Calculate the expected ref_count for a folio that would otherwise have no
* further pins. This was cribbed from similar functions in other places in
* the kernel, but with some slight modifications. We know that a secure
- * page can not be a huge page for example.
+ * folio can not be a large folio, for example.
*/
-static int expected_page_refs(struct page *page)
+static int expected_folio_refs(struct folio *folio)
{
int res;
- res = page_mapcount(page);
- if (PageSwapCache(page)) {
+ res = folio_mapcount(folio);
+ if (folio_test_swapcache(folio)) {
res++;
- } else if (page_mapping(page)) {
+ } else if (folio_mapping(folio)) {
res++;
- if (page_has_private(page))
+ if (folio->private)
res++;
}
return res;
}
-static int make_page_secure(struct page *page, struct uv_cb_header *uvcb)
+static int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
{
int expected, cc = 0;
- if (PageWriteback(page))
+ if (folio_test_writeback(folio))
return -EAGAIN;
- expected = expected_page_refs(page);
- if (!page_ref_freeze(page, expected))
+ expected = expected_folio_refs(folio);
+ if (!folio_ref_freeze(folio, expected))
return -EBUSY;
- set_bit(PG_arch_1, &page->flags);
+ set_bit(PG_arch_1, &folio->flags);
/*
* If the UVC does not succeed or fail immediately, we don't want to
* loop for long, or we might get stall notifications.
@@ -220,9 +221,9 @@ static int make_page_secure(struct page *page, struct uv_cb_header *uvcb)
* -EAGAIN and we let the callers deal with it.
*/
cc = __uv_call(0, (u64)uvcb);
- page_ref_unfreeze(page, expected);
+ folio_ref_unfreeze(folio, expected);
/*
- * Return -ENXIO if the page was not mapped, -EINVAL for other errors.
+ * Return -ENXIO if the folio was not mapped, -EINVAL for other errors.
* If busy or partially completed, return -EAGAIN.
*/
if (cc == UVC_CC_OK)
@@ -277,7 +278,7 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
bool local_drain = false;
spinlock_t *ptelock;
unsigned long uaddr;
- struct page *page;
+ struct folio *folio;
pte_t *ptep;
int rc;
@@ -306,15 +307,19 @@ again:
if (!ptep)
goto out;
if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
- page = pte_page(*ptep);
+ folio = page_folio(pte_page(*ptep));
+ rc = -EINVAL;
+ if (folio_test_large(folio))
+ goto unlock;
rc = -EAGAIN;
- if (trylock_page(page)) {
+ if (folio_trylock(folio)) {
if (should_export_before_import(uvcb, gmap->mm))
- uv_convert_from_secure(page_to_phys(page));
- rc = make_page_secure(page, uvcb);
- unlock_page(page);
+ uv_convert_from_secure(PFN_PHYS(folio_pfn(folio)));
+ rc = make_folio_secure(folio, uvcb);
+ folio_unlock(folio);
}
}
+unlock:
pte_unmap_unlock(ptep, ptelock);
out:
mmap_read_unlock(gmap->mm);
@@ -324,10 +329,10 @@ out:
* If we are here because the UVC returned busy or partial
* completion, this is just a useless check, but it is safe.
*/
- wait_on_page_writeback(page);
+ folio_wait_writeback(folio);
} else if (rc == -EBUSY) {
/*
- * If we have tried a local drain and the page refcount
+ * If we have tried a local drain and the folio refcount
* still does not match our expected safe value, try with a
* system wide drain. This is needed if the pagevecs holding
* the page are on a different CPU.
@@ -338,7 +343,7 @@ out:
return -EAGAIN;
}
/*
- * We are here if the page refcount does not match the
+ * We are here if the folio refcount does not match the
* expected safe value. The main culprits are usually
* pagevecs. With lru_add_drain() we drain the pagevecs
* on the local CPU so that hopefully the refcount will
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index a45b3a4c91db..2f967ac2b8e3 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -210,17 +210,22 @@ static unsigned long vdso_addr(unsigned long start, unsigned long len)
return addr;
}
-unsigned long vdso_size(void)
+unsigned long vdso_text_size(void)
{
- unsigned long size = VVAR_NR_PAGES * PAGE_SIZE;
+ unsigned long size;
if (is_compat_task())
- size += vdso32_end - vdso32_start;
+ size = vdso32_end - vdso32_start;
else
- size += vdso64_end - vdso64_start;
+ size = vdso64_end - vdso64_start;
return PAGE_ALIGN(size);
}
+unsigned long vdso_size(void)
+{
+ return vdso_text_size() + VVAR_NR_PAGES * PAGE_SIZE;
+}
+
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
unsigned long addr = VDSO_BASE;
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index b12a274cbb47..2c5afb88d298 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -1,8 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
# List of files in the vdso
-KCOV_INSTRUMENT := n
-
# Include the generic Makefile to check the built vdso.
include $(srctree)/lib/vdso/Makefile
obj-vdso32 = vdso_user_wrapper-32.o note-32.o
@@ -19,8 +17,10 @@ KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS))
KBUILD_AFLAGS_32 += -m31 -s
KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
+KBUILD_CFLAGS_32 := $(filter-out -mpacked-stack,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_32 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_32))
-KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin
+KBUILD_CFLAGS_32 := $(filter-out -fno-asynchronous-unwind-tables,$(KBUILD_CFLAGS_32))
+KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin -fasynchronous-unwind-tables
LDFLAGS_vdso32.so.dbg += -shared -soname=linux-vdso32.so.1 \
--hash-style=both --build-id=sha1 -melf_s390 -T
@@ -32,19 +32,13 @@ obj-y += vdso32_wrapper.o
targets += vdso32.lds
CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
-# Disable gcov profiling, ubsan and kasan for VDSO code
-GCOV_PROFILE := n
-UBSAN_SANITIZE := n
-KASAN_SANITIZE := n
-KCSAN_SANITIZE := n
-
# Force dependency (incbin is bad)
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
quiet_cmd_vdso_and_check = VDSO $@
cmd_vdso_and_check = $(cmd_ld); $(cmd_vdso_check)
-$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE
+$(obj)/vdso32.so.dbg: $(obj)/vdso32.lds $(obj-vdso32) FORCE
$(call if_changed,vdso_and_check)
# strip rule for the .so file
@@ -62,7 +56,7 @@ quiet_cmd_vdso32cc = VDSO32C $@
cmd_vdso32cc = $(CC) $(c_flags) -c -o $@ $<
# Generate VDSO offsets using helper script
-gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
+gen-vdsosym := $(src)/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index ef9832726097..ba19c0ca7c87 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -1,8 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
# List of files in the vdso
-KCOV_INSTRUMENT := n
-
# Include the generic Makefile to check the built vdso.
include $(srctree)/lib/vdso/Makefile
obj-vdso64 = vdso_user_wrapper.o note.o
@@ -24,9 +22,11 @@ KBUILD_AFLAGS_64 := $(filter-out -m64,$(KBUILD_AFLAGS))
KBUILD_AFLAGS_64 += -m64
KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
+KBUILD_CFLAGS_64 := $(filter-out -mpacked-stack,$(KBUILD_CFLAGS_64))
KBUILD_CFLAGS_64 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_64))
KBUILD_CFLAGS_64 := $(filter-out -munaligned-symbols,$(KBUILD_CFLAGS_64))
-KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin
+KBUILD_CFLAGS_64 := $(filter-out -fno-asynchronous-unwind-tables,$(KBUILD_CFLAGS_64))
+KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin -fasynchronous-unwind-tables
ldflags-y := -shared -soname=linux-vdso64.so.1 \
--hash-style=both --build-id=sha1 -T
@@ -37,12 +37,6 @@ obj-y += vdso64_wrapper.o
targets += vdso64.lds
CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
-# Disable gcov profiling, ubsan and kasan for VDSO code
-GCOV_PROFILE := n
-UBSAN_SANITIZE := n
-KASAN_SANITIZE := n
-KCSAN_SANITIZE := n
-
# Force dependency (incbin is bad)
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
@@ -50,7 +44,7 @@ quiet_cmd_vdso_and_check = VDSO $@
cmd_vdso_and_check = $(cmd_ld); $(cmd_vdso_check)
# link rule for the .so file, .lds has to be first
-$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) $(obj-cvdso64) FORCE
+$(obj)/vdso64.so.dbg: $(obj)/vdso64.lds $(obj-vdso64) $(obj-cvdso64) FORCE
$(call if_changed,vdso_and_check)
# strip rule for the .so file
@@ -72,7 +66,7 @@ quiet_cmd_vdso64cc = VDSO64C $@
cmd_vdso64cc = $(CC) $(c_flags) -c -o $@ $<
# Generate VDSO offsets using helper script
-gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
+gen-vdsosym := $(src)/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
diff --git a/arch/s390/kernel/vdso64/vdso_user_wrapper.S b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
index 57f62596e53b..e26e68675c08 100644
--- a/arch/s390/kernel/vdso64/vdso_user_wrapper.S
+++ b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
@@ -6,8 +6,6 @@
#include <asm/dwarf.h>
#include <asm/ptrace.h>
-#define WRAPPER_FRAME_SIZE (STACK_FRAME_OVERHEAD+8)
-
/*
* Older glibc version called vdso without allocating a stackframe. This wrapper
* is just used to allocate a stackframe. See
@@ -20,14 +18,17 @@
__ALIGN
__kernel_\func:
CFI_STARTPROC
- aghi %r15,-WRAPPER_FRAME_SIZE
- CFI_DEF_CFA_OFFSET (STACK_FRAME_OVERHEAD + WRAPPER_FRAME_SIZE)
- CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
- stg %r14,STACK_FRAME_OVERHEAD(%r15)
+ aghi %r15,-STACK_FRAME_VDSO_OVERHEAD
+ CFI_DEF_CFA_OFFSET (STACK_FRAME_USER_OVERHEAD + STACK_FRAME_VDSO_OVERHEAD)
+ CFI_VAL_OFFSET 15,-STACK_FRAME_USER_OVERHEAD
+ stg %r14,__SFVDSO_RETURN_ADDRESS(%r15)
+ CFI_REL_OFFSET 14,__SFVDSO_RETURN_ADDRESS
+ xc __SFUSER_BACKCHAIN(8,%r15),__SFUSER_BACKCHAIN(%r15)
brasl %r14,__s390_vdso_\func
- lg %r14,STACK_FRAME_OVERHEAD(%r15)
- aghi %r15,WRAPPER_FRAME_SIZE
- CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
+ lg %r14,__SFVDSO_RETURN_ADDRESS(%r15)
+ CFI_RESTORE 14
+ aghi %r15,STACK_FRAME_VDSO_OVERHEAD
+ CFI_DEF_CFA_OFFSET STACK_FRAME_USER_OVERHEAD
CFI_RESTORE 15
br %r14
CFI_ENDPROC
diff --git a/arch/s390/kernel/vmcore_info.c b/arch/s390/kernel/vmcore_info.c
index d296dfc22191..23f7d7619a99 100644
--- a/arch/s390/kernel/vmcore_info.c
+++ b/arch/s390/kernel/vmcore_info.c
@@ -14,7 +14,9 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
vmcoreinfo_append_str("SAMODE31=%lx\n", (unsigned long)__samode31);
vmcoreinfo_append_str("EAMODE31=%lx\n", (unsigned long)__eamode31);
+ vmcoreinfo_append_str("IDENTITYBASE=%lx\n", __identity_base);
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
+ vmcoreinfo_append_str("KERNELOFFPHYS=%lx\n", __kaslr_offset_phys);
abs_lc = get_abs_lowcore();
abs_lc->vmcore_info = paddr_vmcoreinfo_note();
put_abs_lowcore(abs_lc);
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 48de296e8905..a1ce3925ec71 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -39,7 +39,7 @@ PHDRS {
SECTIONS
{
- . = 0x100000;
+ . = __START_KERNEL;
.text : {
_stext = .; /* Start of text section */
_text = .; /* Text and read-only data */
@@ -183,7 +183,7 @@ SECTIONS
.amode31.data : {
*(.amode31.data)
}
- . = ALIGN(PAGE_SIZE);
+ . = _samode31 + AMODE31_SIZE;
_eamode31 = .;
/* early.c uses stsi, which requires page aligned data. */
@@ -192,31 +192,6 @@ SECTIONS
PERCPU_SECTION(0x100)
-#ifdef CONFIG_PIE_BUILD
- .dynsym ALIGN(8) : {
- __dynsym_start = .;
- *(.dynsym)
- __dynsym_end = .;
- }
- .rela.dyn ALIGN(8) : {
- __rela_dyn_start = .;
- *(.rela*)
- __rela_dyn_end = .;
- }
- .dynamic ALIGN(8) : {
- *(.dynamic)
- }
- .dynstr ALIGN(8) : {
- *(.dynstr)
- }
-#endif
- .hash ALIGN(8) : {
- *(.hash)
- }
- .gnu.hash ALIGN(8) : {
- *(.gnu.hash)
- }
-
. = ALIGN(PAGE_SIZE);
__init_end = .; /* freed after init ends here */
@@ -230,7 +205,6 @@ SECTIONS
* it should match struct vmlinux_info
*/
.vmlinux.info 0 (INFO) : {
- QUAD(_stext) /* default_lma */
QUAD(startup_continue) /* entry */
QUAD(__bss_start - _stext) /* image_size */
QUAD(__bss_stop - __bss_start) /* bss_size */
@@ -239,14 +213,8 @@ SECTIONS
QUAD(__boot_data_preserved_start) /* bootdata_preserved_off */
QUAD(__boot_data_preserved_end -
__boot_data_preserved_start) /* bootdata_preserved_size */
-#ifdef CONFIG_PIE_BUILD
- QUAD(__dynsym_start) /* dynsym_start */
- QUAD(__rela_dyn_start) /* rela_dyn_start */
- QUAD(__rela_dyn_end) /* rela_dyn_end */
-#else
QUAD(__got_start) /* got_start */
QUAD(__got_end) /* got_end */
-#endif
QUAD(_eamode31 - _samode31) /* amode31_size */
QUAD(init_mm)
QUAD(swapper_pg_dir)
@@ -282,12 +250,10 @@ SECTIONS
*(.plt) *(.plt.*) *(.iplt) *(.igot .igot.plt)
}
ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
-#ifndef CONFIG_PIE_BUILD
.rela.dyn : {
*(.rela.*) *(.rela_*)
}
ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
-#endif
/* Sections to be discarded */
DISCARDS
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 24a18e5ef6e8..ffc1db0cbf9c 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -33,14 +33,6 @@ static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
static DEFINE_PER_CPU(u64, mt_scaling_jiffies);
-static inline u64 get_vtimer(void)
-{
- u64 timer;
-
- asm volatile("stpt %0" : "=Q" (timer));
- return timer;
-}
-
static inline void set_vtimer(u64 expires)
{
u64 timer;
@@ -223,7 +215,7 @@ static u64 vtime_delta(void)
{
u64 timer = S390_lowcore.last_update_timer;
- S390_lowcore.last_update_timer = get_vtimer();
+ S390_lowcore.last_update_timer = get_cpu_timer();
return timer - S390_lowcore.last_update_timer;
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 5147b943a864..82e9631cd9ef 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -587,7 +587,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break;
case KVM_CAP_S390_HPAGE_1M:
r = 0;
- if (hpage && !kvm_is_ucontrol(kvm))
+ if (hpage && !(kvm && kvm_is_ucontrol(kvm)))
r = 1;
break;
case KVM_CAP_S390_MEM_OP:
@@ -2631,9 +2631,7 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
if (r)
break;
- mmap_write_lock(current->mm);
- r = gmap_mark_unmergeable();
- mmap_write_unlock(current->mm);
+ r = s390_disable_cow_sharing();
if (r)
break;
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index b2c9f010f0fe..c9ecae830634 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -12,6 +12,7 @@
#include <linux/list.h>
#include <linux/bitmap.h>
#include <linux/sched/signal.h>
+#include <linux/io.h>
#include <asm/gmap.h>
#include <asm/mmu_context.h>
@@ -361,7 +362,7 @@ end:
case -EACCES:
return set_validity_icpt(scb_s, 0x003CU);
}
- scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT2;
+ scb_s->crycbd = (u32)virt_to_phys(&vsie_page->crycb) | CRYCB_FORMAT2;
return 0;
}
@@ -1005,7 +1006,7 @@ static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
if (read_guest_real(vcpu, fac, &vsie_page->fac,
stfle_size() * sizeof(u64)))
return set_validity_icpt(scb_s, 0x1090U);
- scb_s->fac = (__u32)(__u64) &vsie_page->fac;
+ scb_s->fac = (u32)virt_to_phys(&vsie_page->fac);
}
return 0;
}
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index 90eac15ea62a..f43f897d3fc0 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -23,4 +23,4 @@ obj-$(CONFIG_S390_MODULES_SANITY_TEST_HELPERS) += test_modules_helpers.o
lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
-obj-$(CONFIG_EXPOLINE_EXTERN) += expoline/
+obj-$(CONFIG_EXPOLINE_EXTERN) += expoline.o
diff --git a/arch/s390/lib/expoline/expoline.S b/arch/s390/lib/expoline.S
index 92ed8409a7a4..92ed8409a7a4 100644
--- a/arch/s390/lib/expoline/expoline.S
+++ b/arch/s390/lib/expoline.S
diff --git a/arch/s390/lib/expoline/Makefile b/arch/s390/lib/expoline/Makefile
deleted file mode 100644
index 854631d9cb03..000000000000
--- a/arch/s390/lib/expoline/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-obj-y += expoline.o
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index c421dd44ffbe..65747f15dbec 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -75,7 +75,7 @@ static enum fault_type get_fault_type(struct pt_regs *regs)
if (!IS_ENABLED(CONFIG_PGSTE))
return KERNEL_FAULT;
gmap = (struct gmap *)S390_lowcore.gmap;
- if (regs->cr1 == gmap->asce)
+ if (gmap && gmap->asce == regs->cr1)
return GMAP_FAULT;
return KERNEL_FAULT;
}
@@ -325,7 +325,8 @@ static void do_exception(struct pt_regs *regs, int access)
goto lock_mmap;
if (!(vma->vm_flags & access)) {
vma_end_read(vma);
- goto lock_mmap;
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ return handle_fault_error_nolock(regs, SEGV_ACCERR);
}
fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 094b43b121cd..474a25ca5c48 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2550,41 +2550,6 @@ static inline void thp_split_mm(struct mm_struct *mm)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
- * Remove all empty zero pages from the mapping for lazy refaulting
- * - This must be called after mm->context.has_pgste is set, to avoid
- * future creation of zero pages
- * - This must be called after THP was disabled.
- *
- * mm contracts with s390, that even if mm were to remove a page table,
- * racing with the loop below and so causing pte_offset_map_lock() to fail,
- * it will never insert a page table containing empty zero pages once
- * mm_forbids_zeropage(mm) i.e. mm->context.has_pgste is set.
- */
-static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
- unsigned long end, struct mm_walk *walk)
-{
- unsigned long addr;
-
- for (addr = start; addr != end; addr += PAGE_SIZE) {
- pte_t *ptep;
- spinlock_t *ptl;
-
- ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
- if (!ptep)
- break;
- if (is_zero_pfn(pte_pfn(*ptep)))
- ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
- pte_unmap_unlock(ptep, ptl);
- }
- return 0;
-}
-
-static const struct mm_walk_ops zap_zero_walk_ops = {
- .pmd_entry = __zap_zero_pages,
- .walk_lock = PGWALK_WRLOCK,
-};
-
-/*
* switch on pgstes for its userspace process (for kvm)
*/
int s390_enable_sie(void)
@@ -2601,22 +2566,142 @@ int s390_enable_sie(void)
mm->context.has_pgste = 1;
/* split thp mappings and disable thp for future mappings */
thp_split_mm(mm);
- walk_page_range(mm, 0, TASK_SIZE, &zap_zero_walk_ops, NULL);
mmap_write_unlock(mm);
return 0;
}
EXPORT_SYMBOL_GPL(s390_enable_sie);
-int gmap_mark_unmergeable(void)
+static int find_zeropage_pte_entry(pte_t *pte, unsigned long addr,
+ unsigned long end, struct mm_walk *walk)
+{
+ unsigned long *found_addr = walk->private;
+
+ /* Return 1 of the page is a zeropage. */
+ if (is_zero_pfn(pte_pfn(*pte))) {
+ /*
+ * Shared zeropage in e.g., a FS DAX mapping? We cannot do the
+ * right thing and likely don't care: FAULT_FLAG_UNSHARE
+ * currently only works in COW mappings, which is also where
+ * mm_forbids_zeropage() is checked.
+ */
+ if (!is_cow_mapping(walk->vma->vm_flags))
+ return -EFAULT;
+
+ *found_addr = addr;
+ return 1;
+ }
+ return 0;
+}
+
+static const struct mm_walk_ops find_zeropage_ops = {
+ .pte_entry = find_zeropage_pte_entry,
+ .walk_lock = PGWALK_WRLOCK,
+};
+
+/*
+ * Unshare all shared zeropages, replacing them by anonymous pages. Note that
+ * we cannot simply zap all shared zeropages, because this could later
+ * trigger unexpected userfaultfd missing events.
+ *
+ * This must be called after mm->context.allow_cow_sharing was
+ * set to 0, to avoid future mappings of shared zeropages.
+ *
+ * mm contracts with s390, that even if mm were to remove a page table,
+ * and racing with walk_page_range_vma() calling pte_offset_map_lock()
+ * would fail, it will never insert a page table containing empty zero
+ * pages once mm_forbids_zeropage(mm) i.e.
+ * mm->context.allow_cow_sharing is set to 0.
+ */
+static int __s390_unshare_zeropages(struct mm_struct *mm)
+{
+ struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, 0);
+ unsigned long addr;
+ vm_fault_t fault;
+ int rc;
+
+ for_each_vma(vmi, vma) {
+ /*
+ * We could only look at COW mappings, but it's more future
+ * proof to catch unexpected zeropages in other mappings and
+ * fail.
+ */
+ if ((vma->vm_flags & VM_PFNMAP) || is_vm_hugetlb_page(vma))
+ continue;
+ addr = vma->vm_start;
+
+retry:
+ rc = walk_page_range_vma(vma, addr, vma->vm_end,
+ &find_zeropage_ops, &addr);
+ if (rc < 0)
+ return rc;
+ else if (!rc)
+ continue;
+
+ /* addr was updated by find_zeropage_pte_entry() */
+ fault = handle_mm_fault(vma, addr,
+ FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE,
+ NULL);
+ if (fault & VM_FAULT_OOM)
+ return -ENOMEM;
+ /*
+ * See break_ksm(): even after handle_mm_fault() returned 0, we
+ * must start the lookup from the current address, because
+ * handle_mm_fault() may back out if there's any difficulty.
+ *
+ * VM_FAULT_SIGBUS and VM_FAULT_SIGSEGV are unexpected but
+ * maybe they could trigger in the future on concurrent
+ * truncation. In that case, the shared zeropage would be gone
+ * and we can simply retry and make progress.
+ */
+ cond_resched();
+ goto retry;
+ }
+
+ return 0;
+}
+
+static int __s390_disable_cow_sharing(struct mm_struct *mm)
{
+ int rc;
+
+ if (!mm->context.allow_cow_sharing)
+ return 0;
+
+ mm->context.allow_cow_sharing = 0;
+
+ /* Replace all shared zeropages by anonymous pages. */
+ rc = __s390_unshare_zeropages(mm);
/*
* Make sure to disable KSM (if enabled for the whole process or
* individual VMAs). Note that nothing currently hinders user space
* from re-enabling it.
*/
- return ksm_disable(current->mm);
+ if (!rc)
+ rc = ksm_disable(mm);
+ if (rc)
+ mm->context.allow_cow_sharing = 1;
+ return rc;
+}
+
+/*
+ * Disable most COW-sharing of memory pages for the whole process:
+ * (1) Disable KSM and unmerge/unshare any KSM pages.
+ * (2) Disallow shared zeropages and unshare any zerpages that are mapped.
+ *
+ * Not that we currently don't bother with COW-shared pages that are shared
+ * with parent/child processes due to fork().
+ */
+int s390_disable_cow_sharing(void)
+{
+ int rc;
+
+ mmap_write_lock(current->mm);
+ rc = __s390_disable_cow_sharing(current->mm);
+ mmap_write_unlock(current->mm);
+ return rc;
}
-EXPORT_SYMBOL_GPL(gmap_mark_unmergeable);
+EXPORT_SYMBOL_GPL(s390_disable_cow_sharing);
/*
* Enable storage key handling from now on and initialize the storage
@@ -2661,7 +2746,7 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
return 0;
start = pmd_val(*pmd) & HPAGE_MASK;
- end = start + HPAGE_SIZE - 1;
+ end = start + HPAGE_SIZE;
__storage_key_init_range(start, end);
set_bit(PG_arch_1, &page->flags);
cond_resched();
@@ -2685,7 +2770,7 @@ int s390_enable_skey(void)
goto out_up;
mm->context.uses_skeys = 1;
- rc = gmap_mark_unmergeable();
+ rc = __s390_disable_cow_sharing(mm);
if (rc) {
mm->context.uses_skeys = 0;
goto out_up;
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index c2e8242bd15d..2675aab4acc7 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -139,7 +139,7 @@ static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
}
if (!test_and_set_bit(PG_arch_1, &page->flags))
- __storage_key_init_range(paddr, paddr + size - 1);
+ __storage_key_init_range(paddr, paddr + size);
}
void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
@@ -233,16 +233,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
return (pte_t *) pmdp;
}
-int pmd_huge(pmd_t pmd)
-{
- return pmd_leaf(pmd);
-}
-
-int pud_huge(pud_t pud)
-{
- return pud_leaf(pud);
-}
-
bool __init arch_hugetlb_valid_size(unsigned long size)
{
if (MACHINE_HAS_EDAT1 && size == PMD_SIZE)
@@ -258,14 +248,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
- info.flags = 0;
info.length = len;
info.low_limit = current->mm->mmap_base;
info.high_limit = TASK_SIZE;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
- info.align_offset = 0;
return vm_unmapped_area(&info);
}
@@ -274,7 +262,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
unsigned long addr;
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
@@ -282,7 +270,6 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
info.low_limit = PAGE_SIZE;
info.high_limit = current->mm->mmap_base;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
- info.align_offset = 0;
addr = vm_unmapped_area(&info);
/*
@@ -328,7 +315,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
goto check_asce_limit;
}
- if (mm->get_unmapped_area == arch_get_unmapped_area)
+ if (!test_bit(MMF_TOPDOWN, &mm->flags))
addr = hugetlb_get_unmapped_area_bottomup(file, addr, len,
pgoff, flags);
else
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index f6391442c0c2..e769d2726f4e 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -49,6 +49,7 @@
#include <asm/uv.h>
#include <linux/virtio_anchor.h>
#include <linux/virtio_config.h>
+#include <linux/execmem.h>
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir");
@@ -302,3 +303,32 @@ void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
vmem_remove_mapping(start, size);
}
#endif /* CONFIG_MEMORY_HOTPLUG */
+
+#ifdef CONFIG_EXECMEM
+static struct execmem_info execmem_info __ro_after_init;
+
+struct execmem_info __init *execmem_arch_setup(void)
+{
+ unsigned long module_load_offset = 0;
+ unsigned long start;
+
+ if (kaslr_enabled())
+ module_load_offset = get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
+
+ start = MODULES_VADDR + module_load_offset;
+
+ execmem_info = (struct execmem_info){
+ .ranges = {
+ [EXECMEM_DEFAULT] = {
+ .flags = EXECMEM_KASAN_SHADOW,
+ .start = start,
+ .end = MODULES_END,
+ .pgprot = PAGE_KERNEL,
+ .alignment = MODULE_ALIGN,
+ },
+ },
+ };
+
+ return &execmem_info;
+}
+#endif /* CONFIG_EXECMEM */
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index b14fc0887654..206756946589 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -86,7 +86,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
if (len > TASK_SIZE - mmap_min_addr)
return -ENOMEM;
@@ -102,7 +102,6 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
goto check_asce_limit;
}
- info.flags = 0;
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
@@ -122,7 +121,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long ad
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
/* requested length too big for entire address space */
if (len > TASK_SIZE - mmap_min_addr)
@@ -185,10 +184,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
*/
if (mmap_is_legacy(rlim_stack)) {
mm->mmap_base = mmap_base_legacy(random_factor);
- mm->get_unmapped_area = arch_get_unmapped_area;
+ clear_bit(MMF_TOPDOWN, &mm->flags);
} else {
mm->mmap_base = mmap_base(random_factor, rlim_stack);
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ set_bit(MMF_TOPDOWN, &mm->flags);
}
}
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 85cddf904cb2..41c714e21292 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -13,7 +13,9 @@
#include <linux/slab.h>
#include <linux/sort.h>
#include <asm/page-states.h>
+#include <asm/abs_lowcore.h>
#include <asm/cacheflush.h>
+#include <asm/maccess.h>
#include <asm/nospec-branch.h>
#include <asm/ctlreg.h>
#include <asm/pgalloc.h>
@@ -21,6 +23,7 @@
#include <asm/tlbflush.h>
#include <asm/sections.h>
#include <asm/set_memory.h>
+#include <asm/physmem_info.h>
static DEFINE_MUTEX(vmem_mutex);
@@ -436,7 +439,7 @@ static int modify_pagetable(unsigned long start, unsigned long end, bool add,
if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
return -EINVAL;
/* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
- if (WARN_ON_ONCE(end > VMALLOC_START))
+ if (WARN_ON_ONCE(end > __abs_lowcore))
return -EINVAL;
for (addr = start; addr < end; addr = next) {
next = pgd_addr_end(addr, end);
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index b418333bb086..4be8f5cadd02 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -516,11 +516,12 @@ static void bpf_skip(struct bpf_jit *jit, int size)
* PLT for hotpatchable calls. The calling convention is the same as for the
* ftrace hotpatch trampolines: %r0 is return address, %r1 is clobbered.
*/
-extern const char bpf_plt[];
-extern const char bpf_plt_ret[];
-extern const char bpf_plt_target[];
-extern const char bpf_plt_end[];
-#define BPF_PLT_SIZE 32
+struct bpf_plt {
+ char code[16];
+ void *ret;
+ void *target;
+} __packed;
+extern const struct bpf_plt bpf_plt;
asm(
".pushsection .rodata\n"
" .balign 8\n"
@@ -531,15 +532,14 @@ asm(
" .balign 8\n"
"bpf_plt_ret: .quad 0\n"
"bpf_plt_target: .quad 0\n"
- "bpf_plt_end:\n"
" .popsection\n"
);
-static void bpf_jit_plt(void *plt, void *ret, void *target)
+static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target)
{
- memcpy(plt, bpf_plt, BPF_PLT_SIZE);
- *(void **)((char *)plt + (bpf_plt_ret - bpf_plt)) = ret;
- *(void **)((char *)plt + (bpf_plt_target - bpf_plt)) = target ?: ret;
+ memcpy(plt, &bpf_plt, sizeof(*plt));
+ plt->ret = ret;
+ plt->target = target;
}
/*
@@ -662,9 +662,9 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
jit->prg = ALIGN(jit->prg, 8);
jit->prologue_plt = jit->prg;
if (jit->prg_buf)
- bpf_jit_plt(jit->prg_buf + jit->prg,
+ bpf_jit_plt((struct bpf_plt *)(jit->prg_buf + jit->prg),
jit->prg_buf + jit->prologue_plt_ret, NULL);
- jit->prg += BPF_PLT_SIZE;
+ jit->prg += sizeof(struct bpf_plt);
}
static int get_probe_mem_regno(const u8 *insn)
@@ -1427,8 +1427,12 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT6_DISP_LH(0xeb000000, is32 ? (op32) : (op64), \
(insn->imm & BPF_FETCH) ? src_reg : REG_W0, \
src_reg, dst_reg, off); \
- if (is32 && (insn->imm & BPF_FETCH)) \
- EMIT_ZERO(src_reg); \
+ if (insn->imm & BPF_FETCH) { \
+ /* bcr 14,0 - see atomic_fetch_{add,and,or,xor}() */ \
+ _EMIT2(0x07e0); \
+ if (is32) \
+ EMIT_ZERO(src_reg); \
+ } \
} while (0)
case BPF_ADD:
case BPF_ADD | BPF_FETCH:
@@ -2040,9 +2044,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
struct bpf_jit jit;
int pass;
- if (WARN_ON_ONCE(bpf_plt_end - bpf_plt != BPF_PLT_SIZE))
- return orig_fp;
-
if (!fp->jit_requested)
return orig_fp;
@@ -2111,7 +2112,11 @@ skip_init_ctx:
print_fn_code(jit.prg_buf, jit.size_prg);
}
if (!fp->is_func || extra_pass) {
- bpf_jit_binary_lock_ro(header);
+ if (bpf_jit_binary_lock_ro(header)) {
+ bpf_jit_binary_free(header);
+ fp = orig_fp;
+ goto free_addrs;
+ }
} else {
jit_data->header = header;
jit_data->ctx = jit;
@@ -2148,14 +2153,11 @@ bool bpf_jit_supports_far_kfunc_call(void)
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
void *old_addr, void *new_addr)
{
+ struct bpf_plt expected_plt, current_plt, new_plt, *plt;
struct {
u16 opc;
s32 disp;
} __packed insn;
- char expected_plt[BPF_PLT_SIZE];
- char current_plt[BPF_PLT_SIZE];
- char new_plt[BPF_PLT_SIZE];
- char *plt;
char *ret;
int err;
@@ -2174,18 +2176,18 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
*/
} else {
/* Verify the PLT. */
- plt = (char *)ip + (insn.disp << 1);
- err = copy_from_kernel_nofault(current_plt, plt, BPF_PLT_SIZE);
+ plt = ip + (insn.disp << 1);
+ err = copy_from_kernel_nofault(&current_plt, plt,
+ sizeof(current_plt));
if (err < 0)
return err;
ret = (char *)ip + 6;
- bpf_jit_plt(expected_plt, ret, old_addr);
- if (memcmp(current_plt, expected_plt, BPF_PLT_SIZE))
+ bpf_jit_plt(&expected_plt, ret, old_addr);
+ if (memcmp(&current_plt, &expected_plt, sizeof(current_plt)))
return -EINVAL;
/* Adjust the call address. */
- bpf_jit_plt(new_plt, ret, new_addr);
- s390_kernel_write(plt + (bpf_plt_target - bpf_plt),
- new_plt + (bpf_plt_target - bpf_plt),
+ bpf_jit_plt(&new_plt, ret, new_addr);
+ s390_kernel_write(&plt->target, &new_plt.target,
sizeof(void *));
}
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 26afde0d1ed3..0de0f6e405b5 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -250,12 +250,6 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
return 0;
}
-/* combine single writes by using store-block insn */
-void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
-{
- zpci_memcpy_toio(to, from, count * 8);
-}
-
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
unsigned long prot)
{
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index a90499c087f0..5398729bfe1b 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -169,7 +169,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
if (!(vma->vm_flags & VM_WRITE))
goto out_unlock_mmap;
- ret = follow_pte(vma->vm_mm, mmio_addr, &ptep, &ptl);
+ ret = follow_pte(vma, mmio_addr, &ptep, &ptl);
if (ret)
goto out_unlock_mmap;
@@ -308,7 +308,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
if (!(vma->vm_flags & VM_WRITE))
goto out_unlock_mmap;
- ret = follow_pte(vma->vm_mm, mmio_addr, &ptep, &ptl);
+ ret = follow_pte(vma, mmio_addr, &ptep, &ptl);
if (ret)
goto out_unlock_mmap;
diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c
index a0b872b74fe3..0f4f1e8fc480 100644
--- a/arch/s390/pci/pci_sysfs.c
+++ b/arch/s390/pci/pci_sysfs.c
@@ -172,7 +172,6 @@ static ssize_t uid_is_unique_show(struct device *dev,
}
static DEVICE_ATTR_RO(uid_is_unique);
-#ifndef CONFIG_DMI
/* analogous to smbios index */
static ssize_t index_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -202,7 +201,6 @@ static struct attribute_group zpci_ident_attr_group = {
.attrs = zpci_ident_attrs,
.is_visible = zpci_index_is_visible,
};
-#endif
static struct bin_attribute *zpci_bin_attrs[] = {
&bin_attr_util_string,
@@ -245,8 +243,6 @@ static struct attribute_group pfip_attr_group = {
const struct attribute_group *zpci_attr_groups[] = {
&zpci_attr_group,
&pfip_attr_group,
-#ifndef CONFIG_DMI
&zpci_ident_attr_group,
-#endif
NULL,
};
diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile
index 4e930f566878..24eccaa29337 100644
--- a/arch/s390/purgatory/Makefile
+++ b/arch/s390/purgatory/Makefile
@@ -1,7 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-OBJECT_FILES_NON_STANDARD := y
-
purgatory-y := head.o purgatory.o string.o sha256.o mem.o
targets += $(purgatory-y) purgatory.lds purgatory purgatory.chk purgatory.ro
@@ -15,12 +13,6 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS -D__NO_FORTIFY
$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE
$(call if_changed_rule,as_o_S)
-KCOV_INSTRUMENT := n
-GCOV_PROFILE := n
-UBSAN_SANITIZE := n
-KASAN_SANITIZE := n
-KCSAN_SANITIZE := n
-
KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
diff --git a/arch/s390/tools/relocs.c b/arch/s390/tools/relocs.c
index 30a732c808f3..a74dbd5c9896 100644
--- a/arch/s390/tools/relocs.c
+++ b/arch/s390/tools/relocs.c
@@ -280,7 +280,7 @@ static int do_reloc(struct section *sec, Elf_Rel *rel)
case R_390_GOTOFF64:
break;
case R_390_64:
- add_reloc(&relocs64, offset);
+ add_reloc(&relocs64, offset - ehdr.e_entry);
break;
default:
die("Unsupported relocation type: %d\n", r_type);
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 2ad3e29f0ebe..5e6a3ead51fb 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -38,7 +38,7 @@ config SUPERH
select HAVE_DEBUG_BUGVERBOSE
select HAVE_DEBUG_KMEMLEAK
select HAVE_DYNAMIC_FTRACE
- select HAVE_FAST_GUP if MMU
+ select HAVE_GUP_FAST if MMU
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_FTRACE_MCOUNT_RECORD
@@ -125,7 +125,8 @@ config ARCH_HAS_ILOG2_U64
config NO_IOPORT_MAP
def_bool !PCI
- depends on !SH_SHMIN && !SH_HP6XX && !SH_SOLUTION_ENGINE
+ depends on !SH_SHMIN && !SH_HP6XX && !SH_SOLUTION_ENGINE && \
+ !SH_DREAMCAST
config IO_TRAPPED
bool
@@ -709,7 +710,6 @@ config ROMIMAGE_MMCIF
choice
prompt "Kernel command line"
- optional
default CMDLINE_OVERWRITE
help
Setting this option allows the kernel command line arguments
@@ -727,6 +727,11 @@ config CMDLINE_EXTEND
Given string will be concatenated with arguments passed in
by a bootloader.
+config CMDLINE_FROM_BOOTLOADER
+ bool "Use bootloader kernel arguments"
+ help
+ Uses the command-line options passed by the boot loader.
+
endchoice
config CMDLINE
diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c
index f39c8196efdf..689ea14a6678 100644
--- a/arch/sh/boards/board-sh7757lcr.c
+++ b/arch/sh/boards/board-sh7757lcr.c
@@ -569,7 +569,7 @@ static int __init sh7757lcr_devices_setup(void)
arch_initcall(sh7757lcr_devices_setup);
/* Initialize IRQ setting */
-void __init init_sh7757lcr_IRQ(void)
+static void __init init_sh7757lcr_IRQ(void)
{
plat_irq_setup_pins(IRQ_MODE_IRQ7654);
plat_irq_setup_pins(IRQ_MODE_IRQ3210);
diff --git a/arch/sh/boards/board-sh7785lcr.c b/arch/sh/boards/board-sh7785lcr.c
index 77dad1e511b4..25c4968f0d8b 100644
--- a/arch/sh/boards/board-sh7785lcr.c
+++ b/arch/sh/boards/board-sh7785lcr.c
@@ -295,7 +295,7 @@ static int __init sh7785lcr_devices_setup(void)
device_initcall(sh7785lcr_devices_setup);
/* Initialize IRQ setting */
-void __init init_sh7785lcr_IRQ(void)
+static void __init init_sh7785lcr_IRQ(void)
{
plat_irq_setup_pins(IRQ_MODE_IRQ7654);
plat_irq_setup_pins(IRQ_MODE_IRQ3210);
diff --git a/arch/sh/boards/mach-dreamcast/setup.c b/arch/sh/boards/mach-dreamcast/setup.c
index 2d966c1c2cc1..daa8455549fa 100644
--- a/arch/sh/boards/mach-dreamcast/setup.c
+++ b/arch/sh/boards/mach-dreamcast/setup.c
@@ -25,10 +25,13 @@
#include <asm/irq.h>
#include <asm/rtc.h>
#include <asm/machvec.h>
+#include <cpu/addrspace.h>
#include <mach/sysasic.h>
static void __init dreamcast_setup(char **cmdline_p)
{
+ /* GAPS PCI bridge assumes P2 area relative addresses. */
+ __set_io_port_base(P2SEG);
}
static struct sh_machine_vector mv_dreamcast __initmv = {
diff --git a/arch/sh/boards/mach-highlander/pinmux-r7785rp.c b/arch/sh/boards/mach-highlander/pinmux-r7785rp.c
index 703179faf652..689bd8732d9e 100644
--- a/arch/sh/boards/mach-highlander/pinmux-r7785rp.c
+++ b/arch/sh/boards/mach-highlander/pinmux-r7785rp.c
@@ -5,6 +5,7 @@
#include <linux/init.h>
#include <linux/gpio.h>
#include <cpu/sh7785.h>
+#include <mach/highlander.h>
void __init highlander_plat_pinmux_setup(void)
{
diff --git a/arch/sh/boards/mach-sh03/rtc.c b/arch/sh/boards/mach-sh03/rtc.c
index 7fb474844a2d..bc6cf995128c 100644
--- a/arch/sh/boards/mach-sh03/rtc.c
+++ b/arch/sh/boards/mach-sh03/rtc.c
@@ -120,7 +120,7 @@ static int set_rtc_mmss(struct rtc_time *tm)
return retval;
}
-int sh03_rtc_settimeofday(struct device *dev, struct rtc_time *tm)
+static int sh03_rtc_settimeofday(struct device *dev, struct rtc_time *tm)
{
return set_rtc_mmss(tm);
}
diff --git a/arch/sh/boards/of-generic.c b/arch/sh/boards/of-generic.c
index f7f3e618e85b..cc88cb8908cc 100644
--- a/arch/sh/boards/of-generic.c
+++ b/arch/sh/boards/of-generic.c
@@ -10,6 +10,8 @@
#include <linux/of_fdt.h>
#include <linux/clocksource.h>
#include <linux/irqchip.h>
+
+#include <asm/clock.h>
#include <asm/machvec.h>
#include <asm/rtc.h>
diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile
index 6c6c791a1d06..8bc319ff54bf 100644
--- a/arch/sh/boot/compressed/Makefile
+++ b/arch/sh/boot/compressed/Makefile
@@ -5,15 +5,12 @@
# create a compressed vmlinux image from the original vmlinux
#
-OBJECTS := head_32.o misc.o cache.o piggy.o \
+OBJECTS := head_32.o misc.o piggy.o \
ashiftrt.o ashldi3.o ashrsi3.o ashlsi3.o lshrsi3.o
targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \
vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo $(OBJECTS)
-GCOV_PROFILE := n
-UBSAN_SANITIZE := n
-
#
# IMAGE_OFFSET is the load offset of the compression loader
#
diff --git a/arch/sh/boot/compressed/cache.c b/arch/sh/boot/compressed/cache.c
deleted file mode 100644
index 31e04ff4841e..000000000000
--- a/arch/sh/boot/compressed/cache.c
+++ /dev/null
@@ -1,13 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-int cache_control(unsigned int command)
-{
- volatile unsigned int *p = (volatile unsigned int *) 0x80000000;
- int i;
-
- for (i = 0; i < (32 * 1024); i += 32) {
- (void)*p;
- p += (32 / sizeof(int));
- }
-
- return 0;
-}
diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
index ca05c99a3d5b..3690379cc86b 100644
--- a/arch/sh/boot/compressed/misc.c
+++ b/arch/sh/boot/compressed/misc.c
@@ -16,6 +16,8 @@
#include <asm/addrspace.h>
#include <asm/page.h>
+#include "misc.h"
+
/*
* gzip declarations
*/
@@ -26,11 +28,6 @@
#undef memcpy
#define memzero(s, n) memset ((s), 0, (n))
-/* cache.c */
-#define CACHE_ENABLE 0
-#define CACHE_DISABLE 1
-int cache_control(unsigned int command);
-
extern char input_data[];
extern int input_len;
static unsigned char *output;
@@ -139,8 +136,6 @@ void decompress_kernel(void)
free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
puts("Uncompressing Linux... ");
- cache_control(CACHE_ENABLE);
__decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
- cache_control(CACHE_DISABLE);
puts("Ok, booting the kernel.\n");
}
diff --git a/arch/sh/boot/compressed/misc.h b/arch/sh/boot/compressed/misc.h
new file mode 100644
index 000000000000..2b4534faa305
--- /dev/null
+++ b/arch/sh/boot/compressed/misc.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef MISC_H
+#define MISC_H
+
+void arch_ftrace_ops_list_func(void);
+void decompress_kernel(void);
+void ftrace_stub(void);
+
+#endif /* MISC_H */
diff --git a/arch/sh/boot/dts/j2_mimas_v2.dts b/arch/sh/boot/dts/j2_mimas_v2.dts
index fa9562f78d53..faf884f53804 100644
--- a/arch/sh/boot/dts/j2_mimas_v2.dts
+++ b/arch/sh/boot/dts/j2_mimas_v2.dts
@@ -71,8 +71,6 @@
#address-cells = <1>;
#size-cells = <0>;
- spi-max-frequency = <25000000>;
-
reg = <0x40 0x8>;
sdcard@0 {
diff --git a/arch/sh/configs/apsh4a3a_defconfig b/arch/sh/configs/apsh4a3a_defconfig
index cc909f347877..9c2644443c4d 100644
--- a/arch/sh/configs/apsh4a3a_defconfig
+++ b/arch/sh/configs/apsh4a3a_defconfig
@@ -15,6 +15,7 @@ CONFIG_MEMORY_START=0x0C000000
CONFIG_FLATMEM_MANUAL=y
CONFIG_SH_STORE_QUEUES=y
CONFIG_SH_APSH4A3A=y
+CONFIG_CMDLINE_FROM_BOOTLOADER=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_KEXEC=y
CONFIG_PREEMPT=y
diff --git a/arch/sh/configs/apsh4ad0a_defconfig b/arch/sh/configs/apsh4ad0a_defconfig
index 64558bf60e10..05d21d91f41d 100644
--- a/arch/sh/configs/apsh4ad0a_defconfig
+++ b/arch/sh/configs/apsh4ad0a_defconfig
@@ -42,6 +42,7 @@ CONFIG_SECCOMP=y
CONFIG_PREEMPT=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=y
+CONFIG_CMDLINE_FROM_BOOTLOADER=y
CONFIG_PM=y
CONFIG_PM_DEBUG=y
CONFIG_PM=y
diff --git a/arch/sh/configs/edosk7705_defconfig b/arch/sh/configs/edosk7705_defconfig
index 9ee35269bee2..296ed768cbbb 100644
--- a/arch/sh/configs/edosk7705_defconfig
+++ b/arch/sh/configs/edosk7705_defconfig
@@ -6,7 +6,7 @@
# CONFIG_PRINTK is not set
# CONFIG_BUG is not set
# CONFIG_ELF_CORE is not set
-# CONFIG_BASE_FULL is not set
+CONFIG_BASE_SMALL=y
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
@@ -19,6 +19,7 @@
CONFIG_CPU_SUBTYPE_SH7705=y
CONFIG_SH_EDOSK7705=y
CONFIG_SH_PCLK_FREQ=31250000
+CONFIG_CMDLINE_FROM_BOOTLOADER=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
diff --git a/arch/sh/configs/hp6xx_defconfig b/arch/sh/configs/hp6xx_defconfig
index 0c45f2a0f9bd..77e3185f63e4 100644
--- a/arch/sh/configs/hp6xx_defconfig
+++ b/arch/sh/configs/hp6xx_defconfig
@@ -15,6 +15,7 @@ CONFIG_SH_DMA_API=y
CONFIG_HD64461_ENABLER=y
CONFIG_PCCARD=y
CONFIG_PM=y
+CONFIG_CMDLINE_FROM_BOOTLOADER=y
CONFIG_APM_EMULATION=y
# CONFIG_STANDALONE is not set
CONFIG_BLK_DEV_SD=y
diff --git a/arch/sh/configs/landisk_defconfig b/arch/sh/configs/landisk_defconfig
index 541082090918..0311380160f4 100644
--- a/arch/sh/configs/landisk_defconfig
+++ b/arch/sh/configs/landisk_defconfig
@@ -15,6 +15,7 @@ CONFIG_KEXEC=y
CONFIG_PCI=y
CONFIG_PCCARD=y
CONFIG_YENTA=y
+CONFIG_CMDLINE_FROM_BOOTLOADER=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/sh/configs/magicpanelr2_defconfig b/arch/sh/configs/magicpanelr2_defconfig
index 52937f9cc2ab..8d443749550e 100644
--- a/arch/sh/configs/magicpanelr2_defconfig
+++ b/arch/sh/configs/magicpanelr2_defconfig
@@ -22,6 +22,7 @@ CONFIG_SH_PCLK_FREQ=24000000
CONFIG_SH_DMA=y
CONFIG_SH_DMA_API=y
CONFIG_HEARTBEAT=y
+CONFIG_CMDLINE_FROM_BOOTLOADER=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/sh/configs/rsk7264_defconfig b/arch/sh/configs/rsk7264_defconfig
index a88cb3b77957..e4ef259425c4 100644
--- a/arch/sh/configs/rsk7264_defconfig
+++ b/arch/sh/configs/rsk7264_defconfig
@@ -21,6 +21,7 @@ CONFIG_MEMORY_START=0x0c000000
CONFIG_FLATMEM_MANUAL=y
CONFIG_CPU_BIG_ENDIAN=y
CONFIG_SH_RSK=y
+CONFIG_CMDLINE_FROM_BOOTLOADER=y
# CONFIG_SH_TIMER_MTU2 is not set
CONFIG_BINFMT_FLAT=y
CONFIG_NET=y
diff --git a/arch/sh/configs/rsk7269_defconfig b/arch/sh/configs/rsk7269_defconfig
index d9a7ce783c9b..e0d1560b2bfd 100644
--- a/arch/sh/configs/rsk7269_defconfig
+++ b/arch/sh/configs/rsk7269_defconfig
@@ -10,6 +10,7 @@ CONFIG_MEMORY_SIZE=0x02000000
CONFIG_FLATMEM_MANUAL=y
CONFIG_CPU_BIG_ENDIAN=y
CONFIG_SH_RSK=y
+CONFIG_CMDLINE_FROM_BOOTLOADER=y
# CONFIG_SH_TIMER_MTU2 is not set
CONFIG_SH_PCLK_FREQ=66700000
CONFIG_BINFMT_FLAT=y
diff --git a/arch/sh/configs/se7619_defconfig b/arch/sh/configs/se7619_defconfig
index 14d0f5ead502..b5ab945f205e 100644
--- a/arch/sh/configs/se7619_defconfig
+++ b/arch/sh/configs/se7619_defconfig
@@ -4,7 +4,7 @@ CONFIG_LOG_BUF_SHIFT=14
# CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_ELF_CORE is not set
-# CONFIG_BASE_FULL is not set
+CONFIG_BASE_SMALL=y
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
# CONFIG_VM_EVENT_COUNTERS is not set
@@ -14,6 +14,7 @@ CONFIG_FLATMEM_MANUAL=y
CONFIG_CPU_BIG_ENDIAN=y
CONFIG_SH_7619_SOLUTION_ENGINE=y
CONFIG_HZ_100=y
+CONFIG_CMDLINE_FROM_BOOTLOADER=y
CONFIG_BINFMT_FLAT=y
CONFIG_BINFMT_ZFLAT=y
# CONFIG_STANDALONE is not set
diff --git a/arch/sh/configs/se7705_defconfig b/arch/sh/configs/se7705_defconfig
index 16a0f72f0822..1752ddc2694a 100644
--- a/arch/sh/configs/se7705_defconfig
+++ b/arch/sh/configs/se7705_defconfig
@@ -13,6 +13,7 @@ CONFIG_FLATMEM_MANUAL=y
# CONFIG_SH_ADC is not set
CONFIG_SH_SOLUTION_ENGINE=y
CONFIG_HEARTBEAT=y
+CONFIG_CMDLINE_FROM_BOOTLOADER=y
CONFIG_PREEMPT=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/sh/configs/se7712_defconfig b/arch/sh/configs/se7712_defconfig
index dc854293da43..20f07aee5bde 100644
--- a/arch/sh/configs/se7712_defconfig
+++ b/arch/sh/configs/se7712_defconfig
@@ -7,7 +7,7 @@ CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_KALLSYMS_ALL=y
# CONFIG_BUG is not set
-# CONFIG_BASE_FULL is not set
+CONFIG_BASE_SMALL=y
# CONFIG_SHMEM is not set
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/sh/configs/se7721_defconfig b/arch/sh/configs/se7721_defconfig
index c891945b8a90..00862d3c030d 100644
--- a/arch/sh/configs/se7721_defconfig
+++ b/arch/sh/configs/se7721_defconfig
@@ -7,7 +7,7 @@ CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_KALLSYMS_ALL=y
# CONFIG_BUG is not set
-# CONFIG_BASE_FULL is not set
+CONFIG_BASE_SMALL=y
# CONFIG_SHMEM is not set
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/sh/configs/se7722_defconfig b/arch/sh/configs/se7722_defconfig
index 09e455817447..5327a2f70980 100644
--- a/arch/sh/configs/se7722_defconfig
+++ b/arch/sh/configs/se7722_defconfig
@@ -17,6 +17,7 @@ CONFIG_SH_7722_SOLUTION_ENGINE=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_HEARTBEAT=y
+CONFIG_CMDLINE_FROM_BOOTLOADER=y
CONFIG_KEXEC=y
CONFIG_PREEMPT=y
CONFIG_NET=y
diff --git a/arch/sh/configs/se7750_defconfig b/arch/sh/configs/se7750_defconfig
index 5fa6239ae4ea..a1e25d7de8a6 100644
--- a/arch/sh/configs/se7750_defconfig
+++ b/arch/sh/configs/se7750_defconfig
@@ -15,6 +15,7 @@ CONFIG_FLATMEM_MANUAL=y
CONFIG_SH_SOLUTION_ENGINE=y
CONFIG_SH_PCLK_FREQ=33333333
CONFIG_HEARTBEAT=y
+CONFIG_CMDLINE_FROM_BOOTLOADER=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/sh/configs/secureedge5410_defconfig b/arch/sh/configs/secureedge5410_defconfig
index 120176afe3f6..2f77b60e9540 100644
--- a/arch/sh/configs/secureedge5410_defconfig
+++ b/arch/sh/configs/secureedge5410_defconfig
@@ -10,6 +10,7 @@ CONFIG_SH_SECUREEDGE5410=y
CONFIG_SH_DMA=y
CONFIG_SH_DMA_API=y
CONFIG_PCI=y
+CONFIG_CMDLINE_FROM_BOOTLOADER=y
CONFIG_NET=y
CONFIG_INET=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
diff --git a/arch/sh/configs/sh7710voipgw_defconfig b/arch/sh/configs/sh7710voipgw_defconfig
index 7f742729df69..99a5d0760532 100644
--- a/arch/sh/configs/sh7710voipgw_defconfig
+++ b/arch/sh/configs/sh7710voipgw_defconfig
@@ -15,6 +15,7 @@ CONFIG_MEMORY_SIZE=0x00800000
CONFIG_FLATMEM_MANUAL=y
# CONFIG_SH_ADC is not set
CONFIG_SH_PCLK_FREQ=32768000
+CONFIG_CMDLINE_FROM_BOOTLOADER=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/sh/configs/sh7724_generic_defconfig b/arch/sh/configs/sh7724_generic_defconfig
index cbc9389a89a8..5440bd0ca4ed 100644
--- a/arch/sh/configs/sh7724_generic_defconfig
+++ b/arch/sh/configs/sh7724_generic_defconfig
@@ -12,6 +12,7 @@ CONFIG_CPU_FREQ=y
CONFIG_SH_CPU_FREQ=y
CONFIG_KEXEC=y
CONFIG_KEXEC_JUMP=y
+CONFIG_CMDLINE_FROM_BOOTLOADER=y
CONFIG_HIBERNATION=y
CONFIG_CPU_IDLE=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
diff --git a/arch/sh/configs/sh7770_generic_defconfig b/arch/sh/configs/sh7770_generic_defconfig
index ee2357deba0f..4338af8d02d0 100644
--- a/arch/sh/configs/sh7770_generic_defconfig
+++ b/arch/sh/configs/sh7770_generic_defconfig
@@ -14,6 +14,7 @@ CONFIG_SH_CPU_FREQ=y
CONFIG_KEXEC=y
CONFIG_KEXEC_JUMP=y
CONFIG_PM=y
+CONFIG_CMDLINE_FROM_BOOTLOADER=y
CONFIG_HIBERNATION=y
CONFIG_CPU_IDLE=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
diff --git a/arch/sh/configs/sh7785lcr_32bit_defconfig b/arch/sh/configs/sh7785lcr_32bit_defconfig
index 59262f42abe6..44f9b2317f09 100644
--- a/arch/sh/configs/sh7785lcr_32bit_defconfig
+++ b/arch/sh/configs/sh7785lcr_32bit_defconfig
@@ -32,6 +32,7 @@ CONFIG_PREEMPT=y
CONFIG_INTC_USERIMASK=y
CONFIG_PCI=y
CONFIG_PCI_DEBUG=y
+CONFIG_CMDLINE_FROM_BOOTLOADER=y
CONFIG_PM=y
CONFIG_CPU_IDLE=y
CONFIG_NET=y
diff --git a/arch/sh/configs/sh7785lcr_defconfig b/arch/sh/configs/sh7785lcr_defconfig
index 94381f8268ff..aec74b0e7003 100644
--- a/arch/sh/configs/sh7785lcr_defconfig
+++ b/arch/sh/configs/sh7785lcr_defconfig
@@ -17,6 +17,7 @@ CONFIG_HEARTBEAT=y
CONFIG_KEXEC=y
CONFIG_PREEMPT=y
CONFIG_PCI=y
+CONFIG_CMDLINE_FROM_BOOTLOADER=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/sh/configs/shmin_defconfig b/arch/sh/configs/shmin_defconfig
index e078b193a78a..bfeb004f130e 100644
--- a/arch/sh/configs/shmin_defconfig
+++ b/arch/sh/configs/shmin_defconfig
@@ -5,7 +5,7 @@ CONFIG_LOG_BUF_SHIFT=14
# CONFIG_HOTPLUG is not set
# CONFIG_BUG is not set
# CONFIG_ELF_CORE is not set
-# CONFIG_BASE_FULL is not set
+CONFIG_BASE_SMALL=y
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
# CONFIG_SHMEM is not set
diff --git a/arch/sh/configs/urquell_defconfig b/arch/sh/configs/urquell_defconfig
index 445bb451a5ec..00ef62133b04 100644
--- a/arch/sh/configs/urquell_defconfig
+++ b/arch/sh/configs/urquell_defconfig
@@ -34,6 +34,7 @@ CONFIG_PCIEPORTBUS=y
CONFIG_PCIEASPM_DEBUG=y
CONFIG_PCI_DEBUG=y
CONFIG_BINFMT_MISC=y
+CONFIG_CMDLINE_FROM_BOOTLOADER=y
CONFIG_PM=y
CONFIG_CPU_IDLE=y
CONFIG_NET=y
diff --git a/arch/sh/drivers/dma/dma-api.c b/arch/sh/drivers/dma/dma-api.c
index 89cd4a3b4cca..87e5a8928873 100644
--- a/arch/sh/drivers/dma/dma-api.c
+++ b/arch/sh/drivers/dma/dma-api.c
@@ -41,21 +41,6 @@ struct dma_info *get_dma_info(unsigned int chan)
}
EXPORT_SYMBOL(get_dma_info);
-struct dma_info *get_dma_info_by_name(const char *dmac_name)
-{
- struct dma_info *info;
-
- list_for_each_entry(info, &registered_dmac_list, list) {
- if (dmac_name && (strcmp(dmac_name, info->name) != 0))
- continue;
- else
- return info;
- }
-
- return NULL;
-}
-EXPORT_SYMBOL(get_dma_info_by_name);
-
static unsigned int get_nr_channels(void)
{
struct dma_info *info;
@@ -101,93 +86,6 @@ int get_dma_residue(unsigned int chan)
}
EXPORT_SYMBOL(get_dma_residue);
-static int search_cap(const char **haystack, const char *needle)
-{
- const char **p;
-
- for (p = haystack; *p; p++)
- if (strcmp(*p, needle) == 0)
- return 1;
-
- return 0;
-}
-
-/**
- * request_dma_bycap - Allocate a DMA channel based on its capabilities
- * @dmac: List of DMA controllers to search
- * @caps: List of capabilities
- *
- * Search all channels of all DMA controllers to find a channel which
- * matches the requested capabilities. The result is the channel
- * number if a match is found, or %-ENODEV if no match is found.
- *
- * Note that not all DMA controllers export capabilities, in which
- * case they can never be allocated using this API, and so
- * request_dma() must be used specifying the channel number.
- */
-int request_dma_bycap(const char **dmac, const char **caps, const char *dev_id)
-{
- unsigned int found = 0;
- struct dma_info *info;
- const char **p;
- int i;
-
- BUG_ON(!dmac || !caps);
-
- list_for_each_entry(info, &registered_dmac_list, list)
- if (strcmp(*dmac, info->name) == 0) {
- found = 1;
- break;
- }
-
- if (!found)
- return -ENODEV;
-
- for (i = 0; i < info->nr_channels; i++) {
- struct dma_channel *channel = &info->channels[i];
-
- if (unlikely(!channel->caps))
- continue;
-
- for (p = caps; *p; p++) {
- if (!search_cap(channel->caps, *p))
- break;
- if (request_dma(channel->chan, dev_id) == 0)
- return channel->chan;
- }
- }
-
- return -EINVAL;
-}
-EXPORT_SYMBOL(request_dma_bycap);
-
-int dmac_search_free_channel(const char *dev_id)
-{
- struct dma_channel *channel = { 0 };
- struct dma_info *info = get_dma_info(0);
- int i;
-
- for (i = 0; i < info->nr_channels; i++) {
- channel = &info->channels[i];
- if (unlikely(!channel))
- return -ENODEV;
-
- if (atomic_read(&channel->busy) == 0)
- break;
- }
-
- if (info->ops->request) {
- int result = info->ops->request(channel);
- if (result)
- return result;
-
- atomic_set(&channel->busy, 1);
- return channel->chan;
- }
-
- return -ENOSYS;
-}
-
int request_dma(unsigned int chan, const char *dev_id)
{
struct dma_channel *channel = { 0 };
@@ -240,35 +138,6 @@ void dma_wait_for_completion(unsigned int chan)
}
EXPORT_SYMBOL(dma_wait_for_completion);
-int register_chan_caps(const char *dmac, struct dma_chan_caps *caps)
-{
- struct dma_info *info;
- unsigned int found = 0;
- int i;
-
- list_for_each_entry(info, &registered_dmac_list, list)
- if (strcmp(dmac, info->name) == 0) {
- found = 1;
- break;
- }
-
- if (unlikely(!found))
- return -ENODEV;
-
- for (i = 0; i < info->nr_channels; i++, caps++) {
- struct dma_channel *channel;
-
- if ((info->first_channel_nr + i) != caps->ch_num)
- return -EINVAL;
-
- channel = &info->channels[i];
- channel->caps = caps->caplist;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(register_chan_caps);
-
void dma_configure_channel(unsigned int chan, unsigned long flags)
{
struct dma_info *info = get_dma_info(chan);
@@ -294,18 +163,6 @@ int dma_xfer(unsigned int chan, unsigned long from,
}
EXPORT_SYMBOL(dma_xfer);
-int dma_extend(unsigned int chan, unsigned long op, void *param)
-{
- struct dma_info *info = get_dma_info(chan);
- struct dma_channel *channel = get_dma_channel(chan);
-
- if (info->ops->extend)
- return info->ops->extend(channel, op, param);
-
- return -ENOSYS;
-}
-EXPORT_SYMBOL(dma_extend);
-
static int dma_proc_show(struct seq_file *m, void *v)
{
struct dma_info *info = v;
diff --git a/arch/sh/drivers/push-switch.c b/arch/sh/drivers/push-switch.c
index 6ecba5f521eb..362e4860bf52 100644
--- a/arch/sh/drivers/push-switch.c
+++ b/arch/sh/drivers/push-switch.c
@@ -91,7 +91,7 @@ err:
return ret;
}
-static int switch_drv_remove(struct platform_device *pdev)
+static void switch_drv_remove(struct platform_device *pdev)
{
struct push_switch *psw = platform_get_drvdata(pdev);
struct push_switch_platform_info *psw_info = pdev->dev.platform_data;
@@ -106,13 +106,11 @@ static int switch_drv_remove(struct platform_device *pdev)
free_irq(irq, pdev);
kfree(psw);
-
- return 0;
}
static struct platform_driver switch_driver = {
.probe = switch_drv_probe,
- .remove = switch_drv_remove,
+ .remove_new = switch_drv_remove,
.driver = {
.name = DRV_NAME,
},
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h
index 51112f54552b..e6642ff14889 100644
--- a/arch/sh/include/asm/cacheflush.h
+++ b/arch/sh/include/asm/cacheflush.h
@@ -104,6 +104,18 @@ void kunmap_coherent(void *kvaddr);
void cpu_cache_init(void);
+void __weak l2_cache_init(void);
+
+void __weak j2_cache_init(void);
+void __weak sh2_cache_init(void);
+void __weak sh2a_cache_init(void);
+void __weak sh3_cache_init(void);
+void __weak shx3_cache_init(void);
+void __weak sh4_cache_init(void);
+void __weak sh7705_cache_init(void);
+
+void __weak sh4__flush_region_init(void);
+
static inline void *sh_cacheop_vaddr(void *vaddr)
{
if (__in_29bit_mode())
diff --git a/arch/sh/include/asm/dma.h b/arch/sh/include/asm/dma.h
index c8bee3f985a2..6b6d409956d1 100644
--- a/arch/sh/include/asm/dma.h
+++ b/arch/sh/include/asm/dma.h
@@ -56,7 +56,6 @@ struct dma_ops {
int (*get_residue)(struct dma_channel *chan);
int (*xfer)(struct dma_channel *chan);
int (*configure)(struct dma_channel *chan, unsigned long flags);
- int (*extend)(struct dma_channel *chan, unsigned long op, void *param);
};
struct dma_channel {
@@ -118,8 +117,6 @@ extern int dma_xfer(unsigned int chan, unsigned long from,
#define dma_read_page(chan, from, to) \
dma_read(chan, from, to, PAGE_SIZE)
-extern int request_dma_bycap(const char **dmac, const char **caps,
- const char *dev_id);
extern int get_dma_residue(unsigned int chan);
extern struct dma_info *get_dma_info(unsigned int chan);
extern struct dma_channel *get_dma_channel(unsigned int chan);
@@ -128,10 +125,6 @@ extern void dma_configure_channel(unsigned int chan, unsigned long flags);
extern int register_dmac(struct dma_info *info);
extern void unregister_dmac(struct dma_info *info);
-extern struct dma_info *get_dma_info_by_name(const char *dmac_name);
-
-extern int dma_extend(unsigned int chan, unsigned long op, void *param);
-extern int register_chan_caps(const char *dmac, struct dma_chan_caps *capslist);
/* arch/sh/drivers/dma/dma-sysfs.c */
extern int dma_create_sysfs_files(struct dma_channel *, struct dma_info *);
diff --git a/arch/sh/include/asm/fb.h b/arch/sh/include/asm/fb.h
deleted file mode 100644
index 19df13ee9ca7..000000000000
--- a/arch/sh/include/asm/fb.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_FB_H_
-#define _ASM_FB_H_
-
-#include <asm-generic/fb.h>
-
-#endif /* _ASM_FB_H_ */
diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h
index 04584be8986c..0379f4cce5ed 100644
--- a/arch/sh/include/asm/fpu.h
+++ b/arch/sh/include/asm/fpu.h
@@ -64,6 +64,9 @@ static inline void clear_fpu(struct task_struct *tsk, struct pt_regs *regs)
preempt_enable();
}
+void float_raise(unsigned int flags);
+int float_rounding_mode(void);
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_FPU_H */
diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h
index b1c1dc0cc261..1c10e1066390 100644
--- a/arch/sh/include/asm/ftrace.h
+++ b/arch/sh/include/asm/ftrace.h
@@ -33,6 +33,8 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
return addr;
}
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr);
+
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_TRACER */
@@ -43,6 +45,14 @@ extern void *return_address(unsigned int);
#define ftrace_return_address(n) return_address(n)
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern void arch_ftrace_nmi_enter(void);
+extern void arch_ftrace_nmi_exit(void);
+#else
+static inline void arch_ftrace_nmi_enter(void) { }
+static inline void arch_ftrace_nmi_exit(void) { }
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_FTRACE_H */
diff --git a/arch/sh/include/asm/hugetlb.h b/arch/sh/include/asm/hugetlb.h
index 4d3ba39e681c..75028bd568ba 100644
--- a/arch/sh/include/asm/hugetlb.h
+++ b/arch/sh/include/asm/hugetlb.h
@@ -27,11 +27,11 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
return *ptep;
}
-static inline void arch_clear_hugepage_flags(struct page *page)
+static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
- clear_bit(PG_dcache_clean, &page->flags);
+ clear_bit(PG_dcache_clean, &folio->flags);
}
-#define arch_clear_hugepage_flags arch_clear_hugepage_flags
+#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
#include <asm-generic/hugetlb.h>
diff --git a/arch/sh/include/asm/hw_breakpoint.h b/arch/sh/include/asm/hw_breakpoint.h
index 361a0f57bdeb..74a438cea655 100644
--- a/arch/sh/include/asm/hw_breakpoint.h
+++ b/arch/sh/include/asm/hw_breakpoint.h
@@ -52,6 +52,8 @@ struct pmu;
/* arch/sh/kernel/hw_breakpoint.c */
extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int arch_bp_generic_fields(int sh_len, int sh_type, int *gen_len,
+ int *gen_type);
extern int hw_breakpoint_arch_parse(struct perf_event *bp,
const struct perf_event_attr *attr,
struct arch_hw_breakpoint *hw);
diff --git a/arch/sh/include/asm/setup.h b/arch/sh/include/asm/setup.h
index fc807011187f..84bb23a771f3 100644
--- a/arch/sh/include/asm/setup.h
+++ b/arch/sh/include/asm/setup.h
@@ -21,5 +21,6 @@
void sh_mv_setup(void);
void check_for_initrd(void);
void per_cpu_trap_init(void);
+void sh_fdt_init(phys_addr_t dt_phys);
#endif /* _SH_SETUP_H */
diff --git a/arch/sh/include/asm/syscalls.h b/arch/sh/include/asm/syscalls.h
index 387105316d28..39240e06e8aa 100644
--- a/arch/sh/include/asm/syscalls.h
+++ b/arch/sh/include/asm/syscalls.h
@@ -8,6 +8,7 @@ asmlinkage int old_mmap(unsigned long addr, unsigned long len,
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff);
+asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op);
#include <asm/syscalls_32.h>
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index aeb8915e9254..ddf324bfb9a0 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -24,6 +24,10 @@ static inline void tlb_unwire_entry(void)
BUG();
}
#endif /* CONFIG_CPU_SH4 */
+
+asmlinkage int handle_tlbmiss(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address);
+
#endif /* CONFIG_MMU */
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_TLB_H */
diff --git a/arch/sh/kernel/cpu/sh2a/opcode_helper.c b/arch/sh/kernel/cpu/sh2a/opcode_helper.c
index c509081d90b9..fcf53f5827eb 100644
--- a/arch/sh/kernel/cpu/sh2a/opcode_helper.c
+++ b/arch/sh/kernel/cpu/sh2a/opcode_helper.c
@@ -8,6 +8,8 @@
*/
#include <linux/kernel.h>
+#include <asm/processor.h>
+
/*
* Instructions on SH are generally fixed at 16-bits, however, SH-2A
* introduces some 32-bit instructions. Since there are no real
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
index 83ae1ad4a86e..d64d28c4f059 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
@@ -14,9 +14,12 @@
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <linux/io.h>
+
+#include <asm/cacheflush.h>
#include <asm/clock.h>
#include <asm/mmzone.h>
#include <asm/platform_early.h>
+
#include <cpu/sh7723.h>
/* Serial */
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
index 0d990ab1ba2a..ef4b26a4b3d6 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -21,6 +21,7 @@
#include <linux/io.h>
#include <linux/notifier.h>
+#include <asm/cacheflush.h>
#include <asm/suspend.h>
#include <asm/clock.h>
#include <asm/mmzone.h>
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
index 67e330b7ea46..2ad19a0c5e04 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
@@ -17,8 +17,11 @@
#include <linux/sh_dma.h>
#include <linux/sh_intc.h>
#include <linux/usb/ohci_pdriver.h>
+
#include <cpu/dma-register.h>
#include <cpu/sh7757.h>
+
+#include <asm/mmzone.h>
#include <asm/platform_early.h>
static struct plat_sci_port scif2_platform_data = {
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
index 74620f30b19b..c048842d8a58 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -400,20 +400,6 @@ static struct platform_device *sh7786_devices[] __initdata = {
&usb_ohci_device,
};
-/*
- * Please call this function if your platform board
- * use external clock for USB
- * */
-#define USBCTL0 0xffe70858
-#define CLOCK_MODE_MASK 0xffffff7f
-#define EXT_CLOCK_MODE 0x00000080
-
-void __init sh7786_usb_use_exclock(void)
-{
- u32 val = __raw_readl(USBCTL0) & CLOCK_MODE_MASK;
- __raw_writel(val | EXT_CLOCK_MODE, USBCTL0);
-}
-
#define USBINITREG1 0xffe70094
#define USBINITREG2 0xffe7009c
#define USBINITVAL1 0x00ff0040
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index bf8682e71830..45c8ae20d109 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -344,7 +344,7 @@ out:
* dwarf_lookup_fde - locate the FDE that covers pc
* @pc: the program counter
*/
-struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
+static struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
{
struct rb_node **rb_node = &fde_root.rb_node;
struct dwarf_fde *fde = NULL;
diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
index aed1ea8e2c2f..49c4ffd782d6 100644
--- a/arch/sh/kernel/kprobes.c
+++ b/arch/sh/kernel/kprobes.c
@@ -39,22 +39,17 @@ static DEFINE_PER_CPU(struct kprobe, saved_next_opcode2);
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
- kprobe_opcode_t opcode = *(kprobe_opcode_t *) (p->addr);
+ kprobe_opcode_t opcode = *p->addr;
if (OPCODE_RTE(opcode))
return -EFAULT; /* Bad breakpoint */
+ memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
p->opcode = opcode;
return 0;
}
-void __kprobes arch_copy_kprobe(struct kprobe *p)
-{
- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
- p->opcode = *p->addr;
-}
-
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
*p->addr = BREAKPOINT_INSTRUCTION;
@@ -253,7 +248,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
p = get_kprobe(addr);
if (!p) {
/* Not one of ours: let kernel handle it */
- if (*(kprobe_opcode_t *)addr != BREAKPOINT_INSTRUCTION) {
+ if (*addr != BREAKPOINT_INSTRUCTION) {
/*
* The breakpoint instruction was removed right
* after we hit it. Another cpu has removed
@@ -301,7 +296,7 @@ static void __used kretprobe_trampoline_holder(void)
/*
* Called when we hit the probe point at __kretprobe_trampoline
*/
-int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+static int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
regs->pc = __kretprobe_trampoline_handler(regs, NULL);
diff --git a/arch/sh/kernel/return_address.c b/arch/sh/kernel/return_address.c
index 8838094c9ff9..2ce22f11eab3 100644
--- a/arch/sh/kernel/return_address.c
+++ b/arch/sh/kernel/return_address.c
@@ -7,7 +7,9 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
+
#include <asm/dwarf.h>
+#include <asm/ftrace.h>
#ifdef CONFIG_DWARF_UNWINDER
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 5cf35a774dc7..108d808767fa 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -21,6 +21,8 @@
#include <linux/sched/hotplug.h>
#include <linux/atomic.h>
#include <linux/clockchips.h>
+#include <linux/profile.h>
+
#include <asm/processor.h>
#include <asm/mmu_context.h>
#include <asm/smp.h>
@@ -170,7 +172,7 @@ void native_play_dead(void)
}
#endif
-asmlinkage void start_secondary(void)
+static asmlinkage void start_secondary(void)
{
unsigned int cpu = smp_processor_id();
struct mm_struct *mm = &init_mm;
@@ -320,11 +322,13 @@ void smp_message_recv(unsigned int msg)
}
}
+#ifdef CONFIG_PROFILING
/* Not really SMP stuff ... */
int setup_profiling_timer(unsigned int multiplier)
{
return 0;
}
+#endif
#ifdef CONFIG_MMU
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index 01884054aeb2..4339c4cafa79 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -15,6 +15,8 @@
#include <linux/extable.h>
#include <linux/module.h> /* print_modules */
+
+#include <asm/ftrace.h>
#include <asm/unwinder.h>
#include <asm/traps.h>
@@ -170,14 +172,6 @@ BUILD_TRAP_HANDLER(bug)
force_sig(SIGTRAP);
}
-#ifdef CONFIG_DYNAMIC_FTRACE
-extern void arch_ftrace_nmi_enter(void);
-extern void arch_ftrace_nmi_exit(void);
-#else
-static inline void arch_ftrace_nmi_enter(void) { }
-static inline void arch_ftrace_nmi_exit(void) { }
-#endif
-
BUILD_TRAP_HANDLER(nmi)
{
TRAP_HANDLER_DECL;
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 6cdda3a621a1..1271b839a107 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -27,6 +27,7 @@
#include <asm/alignment.h>
#include <asm/fpu.h>
#include <asm/kprobes.h>
+#include <asm/setup.h>
#include <asm/traps.h>
#include <asm/bl_bit.h>
@@ -568,7 +569,7 @@ uspace_segv:
/*
* SH-DSP support gerg@snapgear.com.
*/
-int is_dsp_inst(struct pt_regs *regs)
+static int is_dsp_inst(struct pt_regs *regs)
{
unsigned short inst = 0;
@@ -590,7 +591,7 @@ int is_dsp_inst(struct pt_regs *regs)
return 0;
}
#else
-#define is_dsp_inst(regs) (0)
+static inline int is_dsp_inst(struct pt_regs *regs) { return 0; }
#endif /* CONFIG_SH_DSP */
#ifdef CONFIG_CPU_SH2A
diff --git a/arch/sh/kernel/vsyscall/Makefile b/arch/sh/kernel/vsyscall/Makefile
index 118744d349e2..cb4f0bb80c38 100644
--- a/arch/sh/kernel/vsyscall/Makefile
+++ b/arch/sh/kernel/vsyscall/Makefile
@@ -19,14 +19,14 @@ vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1 -Wl,--hash-style=sysv
SYSCFLAGS_vsyscall-trapa.so = $(vsyscall-flags)
$(obj)/vsyscall-trapa.so: \
-$(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
+$(obj)/vsyscall-%.so: $(obj)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
$(call if_changed,syscall)
# We also create a special relocatable object that should mirror the symbol
# table and layout of the linked DSO. With ld -R we can then refer to
# these symbols in the kernel code rather than hand-coded addresses.
SYSCFLAGS_vsyscall-dummy.o = -r
-$(obj)/vsyscall-dummy.o: $(src)/vsyscall.lds \
+$(obj)/vsyscall-dummy.o: $(obj)/vsyscall.lds \
$(obj)/vsyscall-trapa.o $(obj)/vsyscall-note.o FORCE
$(call if_changed,syscall)
diff --git a/arch/sh/lib/checksum.S b/arch/sh/lib/checksum.S
index 3e07074e0098..06fed5a21e8b 100644
--- a/arch/sh/lib/checksum.S
+++ b/arch/sh/lib/checksum.S
@@ -33,7 +33,8 @@
*/
/*
- * asmlinkage __wsum csum_partial(const void *buf, int len, __wsum sum);
+ * unsigned int csum_partial(const unsigned char *buf, int len,
+ * unsigned int sum);
*/
.text
@@ -45,31 +46,11 @@ ENTRY(csum_partial)
* Fortunately, it is easy to convert 2-byte alignment to 4-byte
* alignment for the unrolled loop.
*/
+ mov r5, r1
mov r4, r0
- tst #3, r0 ! Check alignment.
- bt/s 2f ! Jump if alignment is ok.
- mov r4, r7 ! Keep a copy to check for alignment
+ tst #2, r0 ! Check alignment.
+ bt 2f ! Jump if alignment is ok.
!
- tst #1, r0 ! Check alignment.
- bt 21f ! Jump if alignment is boundary of 2bytes.
-
- ! buf is odd
- tst r5, r5
- add #-1, r5
- bt 9f
- mov.b @r4+, r0
- extu.b r0, r0
- addc r0, r6 ! t=0 from previous tst
- mov r6, r0
- shll8 r6
- shlr16 r0
- shlr8 r0
- or r0, r6
- mov r4, r0
- tst #2, r0
- bt 2f
-21:
- ! buf is 2 byte aligned (len could be 0)
add #-2, r5 ! Alignment uses up two bytes.
cmp/pz r5 !
bt/s 1f ! Jump if we had at least two bytes.
@@ -77,17 +58,16 @@ ENTRY(csum_partial)
bra 6f
add #2, r5 ! r5 was < 2. Deal with it.
1:
+ mov r5, r1 ! Save new len for later use.
mov.w @r4+, r0
extu.w r0, r0
addc r0, r6
bf 2f
add #1, r6
2:
- ! buf is 4 byte aligned (len could be 0)
- mov r5, r1
mov #-5, r0
- shld r0, r1
- tst r1, r1
+ shld r0, r5
+ tst r5, r5
bt/s 4f ! if it's =0, go to 4f
clrt
.align 2
@@ -109,31 +89,30 @@ ENTRY(csum_partial)
addc r0, r6
addc r2, r6
movt r0
- dt r1
+ dt r5
bf/s 3b
cmp/eq #1, r0
- ! here, we know r1==0
- addc r1, r6 ! add carry to r6
+ ! here, we know r5==0
+ addc r5, r6 ! add carry to r6
4:
- mov r5, r0
+ mov r1, r0
and #0x1c, r0
tst r0, r0
- bt 6f
- ! 4 bytes or more remaining
- mov r0, r1
- shlr2 r1
+ bt/s 6f
+ mov r0, r5
+ shlr2 r5
mov #0, r2
5:
addc r2, r6
mov.l @r4+, r2
movt r0
- dt r1
+ dt r5
bf/s 5b
cmp/eq #1, r0
addc r2, r6
- addc r1, r6 ! r1==0 here, so it means add carry-bit
+ addc r5, r6 ! r5==0 here, so it means add carry-bit
6:
- ! 3 bytes or less remaining
+ mov r1, r5
mov #3, r0
and r0, r5
tst r5, r5
@@ -159,16 +138,6 @@ ENTRY(csum_partial)
mov #0, r0
addc r0, r6
9:
- ! Check if the buffer was misaligned, if so realign sum
- mov r7, r0
- tst #1, r0
- bt 10f
- mov r6, r0
- shll8 r6
- shlr16 r0
- shlr8 r0
- or r0, r6
-10:
rts
mov r6, r0
diff --git a/arch/sh/math-emu/math.c b/arch/sh/math-emu/math.c
index cdaef6501d76..b65703e06573 100644
--- a/arch/sh/math-emu/math.c
+++ b/arch/sh/math-emu/math.c
@@ -15,6 +15,8 @@
#include <linux/perf_event.h>
#include <linux/uaccess.h>
+
+#include <asm/fpu.h>
#include <asm/processor.h>
#include <asm/io.h>
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 862046f26981..46393b00137e 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -241,13 +241,14 @@ static void sh4_flush_cache_page(void *args)
if ((vma->vm_mm == current->active_mm))
vaddr = NULL;
else {
+ struct folio *folio = page_folio(page);
/*
* Use kmap_coherent or kmap_atomic to do flushes for
* another ASID than the current one.
*/
map_coherent = (current_cpu_data.dcache.n_aliases &&
- test_bit(PG_dcache_clean, &page->flags) &&
- page_mapcount(page));
+ test_bit(PG_dcache_clean, folio_flags(folio, 0)) &&
+ page_mapped(page));
if (map_coherent)
vaddr = kmap_coherent(page, address);
else
@@ -376,8 +377,6 @@ static void __flush_cache_one(unsigned long addr, unsigned long phys,
} while (--way_count != 0);
}
-extern void __weak sh4__flush_region_init(void);
-
/*
* SH-4 has virtually indexed and physically tagged cache.
*/
diff --git a/arch/sh/mm/cache-shx3.c b/arch/sh/mm/cache-shx3.c
index 24c58b7dc022..dec039a75664 100644
--- a/arch/sh/mm/cache-shx3.c
+++ b/arch/sh/mm/cache-shx3.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/io.h>
#include <asm/cache.h>
+#include <asm/cacheflush.h>
#define CCR_CACHE_SNM 0x40000 /* Hardware-assisted synonym avoidance */
#define CCR_CACHE_IBE 0x1000000 /* ICBI broadcast */
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index 9bcaa5619eab..6ebdeaff3021 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -84,7 +84,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
{
struct folio *folio = page_folio(page);
- if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
+ if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) &&
test_bit(PG_dcache_clean, &folio->flags)) {
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(dst, vfrom, len);
@@ -320,30 +320,20 @@ void __init cpu_cache_init(void)
goto skip;
if (boot_cpu_data.type == CPU_J2) {
- extern void __weak j2_cache_init(void);
-
j2_cache_init();
} else if (boot_cpu_data.family == CPU_FAMILY_SH2) {
- extern void __weak sh2_cache_init(void);
-
sh2_cache_init();
}
if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
- extern void __weak sh2a_cache_init(void);
-
sh2a_cache_init();
}
if (boot_cpu_data.family == CPU_FAMILY_SH3) {
- extern void __weak sh3_cache_init(void);
-
sh3_cache_init();
if ((boot_cpu_data.type == CPU_SH7705) &&
(boot_cpu_data.dcache.sets == 512)) {
- extern void __weak sh7705_cache_init(void);
-
sh7705_cache_init();
}
}
@@ -351,14 +341,10 @@ void __init cpu_cache_init(void)
if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
(boot_cpu_data.family == CPU_FAMILY_SH4A) ||
(boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
- extern void __weak sh4_cache_init(void);
-
sh4_cache_init();
if ((boot_cpu_data.type == CPU_SH7786) ||
(boot_cpu_data.type == CPU_SHX3)) {
- extern void __weak shx3_cache_init(void);
-
shx3_cache_init();
}
}
diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
index 6cb0ad73dbb9..ff209b55285a 100644
--- a/arch/sh/mm/hugetlbpage.c
+++ b/arch/sh/mm/hugetlbpage.c
@@ -70,13 +70,3 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
return pte;
}
-
-int pmd_huge(pmd_t pmd)
-{
- return 0;
-}
-
-int pud_huge(pud_t pud)
-{
- return 0;
-}
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index b82199878b45..bee329d4149a 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int do_colour_align;
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
@@ -88,7 +88,6 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
return addr;
}
- info.flags = 0;
info.length = len;
info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = TASK_SIZE;
@@ -106,7 +105,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
int do_colour_align;
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
diff --git a/arch/sh/mm/nommu.c b/arch/sh/mm/nommu.c
index 78c4b6e6d33b..fa3dc9428a73 100644
--- a/arch/sh/mm/nommu.c
+++ b/arch/sh/mm/nommu.c
@@ -10,6 +10,8 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/mm.h>
+
+#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <linux/uaccess.h>
diff --git a/arch/sh/mm/pgtable.c b/arch/sh/mm/pgtable.c
index cf7ce4b57359..3a4085ea0161 100644
--- a/arch/sh/mm/pgtable.c
+++ b/arch/sh/mm/pgtable.c
@@ -2,12 +2,14 @@
#include <linux/mm.h>
#include <linux/slab.h>
+#include <asm/pgalloc.h>
+
static struct kmem_cache *pgd_cachep;
#if PAGETABLE_LEVELS > 2
static struct kmem_cache *pmd_cachep;
#endif
-void pgd_ctor(void *x)
+static void pgd_ctor(void *x)
{
pgd_t *pgd = x;
diff --git a/arch/sh/mm/tlbex_32.c b/arch/sh/mm/tlbex_32.c
index 1c53868632ee..7d58578c15f4 100644
--- a/arch/sh/mm/tlbex_32.c
+++ b/arch/sh/mm/tlbex_32.c
@@ -14,6 +14,7 @@
#include <linux/kdebug.h>
#include <asm/mmu_context.h>
#include <asm/thread_info.h>
+#include <asm/tlb.h>
/*
* Called with interrupts disabled.
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
index 2a03daa68f28..757451c3ea1d 100644
--- a/arch/sparc/Makefile
+++ b/arch/sparc/Makefile
@@ -59,8 +59,8 @@ endif
libs-y += arch/sparc/prom/
libs-y += arch/sparc/lib/
-drivers-$(CONFIG_PM) += arch/sparc/power/
-drivers-$(CONFIG_FB_CORE) += arch/sparc/video/
+drivers-$(CONFIG_PM) += arch/sparc/power/
+drivers-$(CONFIG_VIDEO) += arch/sparc/video/
boot := arch/sparc/boot
diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h
index d0af82c240b7..8c1a3ca34eeb 100644
--- a/arch/sparc/include/asm/cmpxchg_32.h
+++ b/arch/sparc/include/asm/cmpxchg_32.h
@@ -38,21 +38,19 @@ static __always_inline unsigned long __arch_xchg(unsigned long x, __volatile__ v
/* bug catcher for when unsupported size is used - won't link */
void __cmpxchg_called_with_bad_pointer(void);
-/* we only need to support cmpxchg of a u32 on sparc */
-unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
+u8 __cmpxchg_u8(volatile u8 *m, u8 old, u8 new_);
+u16 __cmpxchg_u16(volatile u16 *m, u16 old, u16 new_);
+u32 __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
/* don't worry...optimizer will get rid of most of this */
static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
{
- switch (size) {
- case 4:
- return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_);
- default:
- __cmpxchg_called_with_bad_pointer();
- break;
- }
- return old;
+ return
+ size == 1 ? __cmpxchg_u8(ptr, old, new_) :
+ size == 2 ? __cmpxchg_u16(ptr, old, new_) :
+ size == 4 ? __cmpxchg_u32(ptr, old, new_) :
+ (__cmpxchg_called_with_bad_pointer(), old);
}
#define arch_cmpxchg(ptr, o, n) \
@@ -63,7 +61,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
(unsigned long)_n_, sizeof(*(ptr))); \
})
-u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new);
+u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new);
#define arch_cmpxchg64(ptr, old, new) __cmpxchg_u64(ptr, old, new)
#include <asm-generic/cmpxchg-local.h>
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index 9e85d57ac3f2..62bcafe38b1f 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -432,6 +432,8 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
#define VMALLOC_START _AC(0xfe600000,UL)
#define VMALLOC_END _AC(0xffc00000,UL)
+#define MODULES_VADDR VMALLOC_START
+#define MODULES_END VMALLOC_END
/* We provide our own get_unmapped_area to cope with VA holes for userland */
#define HAVE_ARCH_UNMAPPED_AREA
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 4d1bafaba942..3fe429d73a65 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -875,6 +875,7 @@ static inline bool pud_leaf(pud_t pud)
return pte_val(pte) & _PAGE_PMD_HUGE;
}
+#define pud_pfn pud_pfn
static inline unsigned long pud_pfn(pud_t pud)
{
pte_t pte = __pte(pud_val(pud));
@@ -956,7 +957,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
#ifdef DCACHE_ALIASING_POSSIBLE
#define __HAVE_ARCH_MOVE_PTE
-#define move_pte(pte, prot, old_addr, new_addr) \
+#define move_pte(pte, old_addr, new_addr) \
({ \
pte_t newpte = (pte); \
if (tlb_type != hypervisor && pte_present(pte)) { \
diff --git a/arch/sparc/include/asm/fb.h b/arch/sparc/include/asm/video.h
index 24440c0fda49..a6f48f52db58 100644
--- a/arch/sparc/include/asm/fb.h
+++ b/arch/sparc/include/asm/video.h
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _SPARC_FB_H_
-#define _SPARC_FB_H_
+#ifndef _SPARC_VIDEO_H_
+#define _SPARC_VIDEO_H_
#include <linux/io.h>
+#include <linux/types.h>
#include <asm/page.h>
-struct fb_info;
+struct device;
#ifdef CONFIG_SPARC32
static inline pgprot_t pgprot_framebuffer(pgprot_t prot,
@@ -18,8 +19,8 @@ static inline pgprot_t pgprot_framebuffer(pgprot_t prot,
#define pgprot_framebuffer pgprot_framebuffer
#endif
-int fb_is_primary_device(struct fb_info *info);
-#define fb_is_primary_device fb_is_primary_device
+bool video_is_primary_device(struct device *dev);
+#define video_is_primary_device video_is_primary_device
static inline void fb_memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
{
@@ -39,6 +40,6 @@ static inline void fb_memset_io(volatile void __iomem *addr, int c, size_t n)
}
#define fb_memset fb_memset_io
-#include <asm-generic/fb.h>
+#include <asm-generic/video.h>
-#endif /* _SPARC_FB_H_ */
+#endif /* _SPARC_VIDEO_H_ */
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index 66c45a2764bc..b8c51cc23d96 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -21,36 +21,6 @@
#include "entry.h"
-#ifdef CONFIG_SPARC64
-
-#include <linux/jump_label.h>
-
-static void *module_map(unsigned long size)
-{
- if (PAGE_ALIGN(size) > MODULES_LEN)
- return NULL;
- return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
- __builtin_return_address(0));
-}
-#else
-static void *module_map(unsigned long size)
-{
- return vmalloc(size);
-}
-#endif /* CONFIG_SPARC64 */
-
-void *module_alloc(unsigned long size)
-{
- void *ret;
-
- ret = module_map(size);
- if (ret)
- memset(ret, 0, size);
-
- return ret;
-}
-
/* Make generic code ignore STT_REGISTER dummy undefined symbols. */
int module_frob_arch_sections(Elf_Ehdr *hdr,
Elf_Shdr *sechdrs,
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
index 082a551897ed..08a19727795c 100644
--- a/arch/sparc/kernel/sys_sparc_32.c
+++ b/arch/sparc/kernel/sys_sparc_32.c
@@ -41,7 +41,7 @@ SYSCALL_DEFINE0(getpagesize)
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
{
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
@@ -59,7 +59,6 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
if (!addr)
addr = TASK_UNMAPPED_BASE;
- info.flags = 0;
info.length = len;
info.low_limit = addr;
info.high_limit = TASK_SIZE;
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 1e9a9e016237..d9c3b34ca744 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -93,7 +93,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
struct vm_area_struct * vma;
unsigned long task_size = TASK_SIZE;
int do_color_align;
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
@@ -126,7 +126,6 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
return addr;
}
- info.flags = 0;
info.length = len;
info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = min(task_size, VA_EXCLUDE_START);
@@ -154,7 +153,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
unsigned long task_size = STACK_TOP32;
unsigned long addr = addr0;
int do_color_align;
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
/* This should only ever run for 32-bit processes. */
BUG_ON(!test_thread_flag(TIF_32BIT));
@@ -218,14 +217,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
{
unsigned long align_goal, addr = -ENOMEM;
- unsigned long (*get_area)(struct file *, unsigned long,
- unsigned long, unsigned long, unsigned long);
-
- get_area = current->mm->get_unmapped_area;
if (flags & MAP_FIXED) {
/* Ok, don't mess with it. */
- return get_area(NULL, orig_addr, len, pgoff, flags);
+ return mm_get_unmapped_area(current->mm, NULL, orig_addr, len, pgoff, flags);
}
flags &= ~MAP_SHARED;
@@ -238,7 +233,8 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
align_goal = (64UL * 1024);
do {
- addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
+ addr = mm_get_unmapped_area(current->mm, NULL, orig_addr,
+ len + (align_goal - PAGE_SIZE), pgoff, flags);
if (!(addr & ~PAGE_MASK)) {
addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
break;
@@ -256,7 +252,7 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
* be obtained.
*/
if (addr & ~PAGE_MASK)
- addr = get_area(NULL, orig_addr, len, pgoff, flags);
+ addr = mm_get_unmapped_area(current->mm, NULL, orig_addr, len, pgoff, flags);
return addr;
}
@@ -292,7 +288,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
gap == RLIM_INFINITY ||
sysctl_legacy_va_layout) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
- mm->get_unmapped_area = arch_get_unmapped_area;
+ clear_bit(MMF_TOPDOWN, &mm->flags);
} else {
/* We know it's 32-bit */
unsigned long task_size = STACK_TOP32;
@@ -303,7 +299,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
gap = (task_size / 6 * 5);
mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ set_bit(MMF_TOPDOWN, &mm->flags);
}
}
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index cf80d1ae352b..8ae880ebf07a 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -159,32 +159,27 @@ unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask)
}
EXPORT_SYMBOL(sp32___change_bit);
-unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
-{
- unsigned long flags;
- u32 prev;
-
- spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
- if ((prev = *ptr) == old)
- *ptr = new;
- spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
-
- return (unsigned long)prev;
-}
+#define CMPXCHG(T) \
+ T __cmpxchg_##T(volatile T *ptr, T old, T new) \
+ { \
+ unsigned long flags; \
+ T prev; \
+ \
+ spin_lock_irqsave(ATOMIC_HASH(ptr), flags); \
+ if ((prev = *ptr) == old) \
+ *ptr = new; \
+ spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);\
+ \
+ return prev; \
+ }
+
+CMPXCHG(u8)
+CMPXCHG(u16)
+CMPXCHG(u32)
+CMPXCHG(u64)
+EXPORT_SYMBOL(__cmpxchg_u8);
+EXPORT_SYMBOL(__cmpxchg_u16);
EXPORT_SYMBOL(__cmpxchg_u32);
-
-u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new)
-{
- unsigned long flags;
- u64 prev;
-
- spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
- if ((prev = *ptr) == old)
- *ptr = new;
- spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
-
- return prev;
-}
EXPORT_SYMBOL(__cmpxchg_u64);
unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index 809d993f6d88..2d1752108d77 100644
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -14,3 +14,5 @@ obj-$(CONFIG_SPARC32) += leon_mm.o
# Only used by sparc64
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+
+obj-$(CONFIG_EXECMEM) += execmem.o
diff --git a/arch/sparc/mm/execmem.c b/arch/sparc/mm/execmem.c
new file mode 100644
index 000000000000..0fac97dd5728
--- /dev/null
+++ b/arch/sparc/mm/execmem.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/mm.h>
+#include <linux/execmem.h>
+
+static struct execmem_info execmem_info __ro_after_init;
+
+struct execmem_info __init *execmem_arch_setup(void)
+{
+ execmem_info = (struct execmem_info){
+ .ranges = {
+ [EXECMEM_DEFAULT] = {
+ .start = MODULES_VADDR,
+ .end = MODULES_END,
+ .pgprot = PAGE_KERNEL,
+ .alignment = 1,
+ },
+ },
+ };
+
+ return &execmem_info;
+}
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index b432500c13a5..cc91ca7a1e18 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -31,17 +31,15 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
{
struct hstate *h = hstate_file(filp);
unsigned long task_size = TASK_SIZE;
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
if (test_thread_flag(TIF_32BIT))
task_size = STACK_TOP32;
- info.flags = 0;
info.length = len;
info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = min(task_size, VA_EXCLUDE_START);
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
- info.align_offset = 0;
addr = vm_unmapped_area(&info);
if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
@@ -63,7 +61,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
struct hstate *h = hstate_file(filp);
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
/* This should only ever run for 32-bit processes. */
BUG_ON(!test_thread_flag(TIF_32BIT));
@@ -73,7 +71,6 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
- info.align_offset = 0;
addr = vm_unmapped_area(&info);
/*
@@ -123,7 +120,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
(!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
- if (mm->get_unmapped_area == arch_get_unmapped_area)
+ if (!test_bit(MMF_TOPDOWN, &mm->flags))
return hugetlb_get_unmapped_area_bottomup(file, addr, len,
pgoff, flags);
else
@@ -407,18 +404,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
return entry;
}
-int pmd_huge(pmd_t pmd)
-{
- return !pmd_none(pmd) &&
- (pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID;
-}
-
-int pud_huge(pud_t pud)
-{
- return !pud_none(pud) &&
- (pud_val(pud) & (_PAGE_VALID|_PAGE_PUD_HUGE)) != _PAGE_VALID;
-}
-
static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long addr)
{
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index b44d79d778c7..8648a50afe88 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -183,12 +183,12 @@ static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr,
* hugetlb_pte_count.
*/
if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
- if (is_huge_zero_page(pmd_page(pmd)))
+ if (is_huge_zero_pmd(pmd))
mm->context.hugetlb_pte_count++;
else
mm->context.thp_pte_count++;
} else {
- if (is_huge_zero_page(pmd_page(orig)))
+ if (is_huge_zero_pmd(orig))
mm->context.hugetlb_pte_count--;
else
mm->context.thp_pte_count--;
@@ -249,6 +249,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
{
pmd_t old, entry;
+ VM_WARN_ON_ONCE(!pmd_present(*pmdp));
entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
old = pmdp_establish(vma, address, pmdp, entry);
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
@@ -259,7 +260,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
* Sanity check pmd before doing the actual decrement.
*/
if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
- !is_huge_zero_page(pmd_page(entry)))
+ !is_huge_zero_pmd(entry))
(vma->vm_mm)->context.thp_pte_count--;
return old;
diff --git a/arch/sparc/net/bpf_jit_comp_32.c b/arch/sparc/net/bpf_jit_comp_32.c
index da2df1e84ed4..bda2dbd3f4c5 100644
--- a/arch/sparc/net/bpf_jit_comp_32.c
+++ b/arch/sparc/net/bpf_jit_comp_32.c
@@ -1,10 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/moduleloader.h>
#include <linux/workqueue.h>
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/cache.h>
#include <linux/if_vlan.h>
+#include <linux/execmem.h>
#include <asm/cacheflush.h>
#include <asm/ptrace.h>
@@ -713,7 +713,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
if (unlikely(proglen + ilen > oldproglen)) {
pr_err("bpb_jit_compile fatal error\n");
kfree(addrs);
- module_memfree(image);
+ execmem_free(image);
return;
}
memcpy(image + proglen, temp, ilen);
@@ -736,7 +736,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
break;
}
if (proglen == oldproglen) {
- image = module_alloc(proglen);
+ image = execmem_alloc(EXECMEM_BPF, proglen);
if (!image)
goto out;
}
@@ -758,7 +758,7 @@ out:
void bpf_jit_free(struct bpf_prog *fp)
{
if (fp->jited)
- module_memfree(fp->bpf_func);
+ execmem_free(fp->bpf_func);
bpf_prog_unlock_free(fp);
}
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
index fa0759bfe498..73bf0aea8baf 100644
--- a/arch/sparc/net/bpf_jit_comp_64.c
+++ b/arch/sparc/net/bpf_jit_comp_64.c
@@ -1602,7 +1602,11 @@ skip_init_ctx:
bpf_flush_icache(header, (u8 *)header + header->size);
if (!prog->is_func || extra_pass) {
- bpf_jit_binary_lock_ro(header);
+ if (bpf_jit_binary_lock_ro(header)) {
+ bpf_jit_binary_free(header);
+ prog = orig_prog;
+ goto out_off;
+ }
} else {
jit_data->ctx = ctx;
jit_data->image = image_ptr;
diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile
index e8aef2c8ae99..243dbfc4609d 100644
--- a/arch/sparc/vdso/Makefile
+++ b/arch/sparc/vdso/Makefile
@@ -2,7 +2,6 @@
#
# Building vDSO images for sparc.
#
-UBSAN_SANITIZE := n
# files to link into the vdso
vobjs-y := vdso-note.o vclock_gettime.o
@@ -103,7 +102,6 @@ quiet_cmd_vdso = VDSO $@
cmd_vdso = $(LD) -nostdlib -o $@ \
$(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
-T $(filter %.lds,$^) $(filter %.o,$^) && \
- sh $(srctree)/$(src)/checkundef.sh '$(OBJDUMP)' '$@'
+ sh $(src)/checkundef.sh '$(OBJDUMP)' '$@'
VDSO_LDFLAGS = -shared --hash-style=both --build-id=sha1 -Bsymbolic
-GCOV_PROFILE := n
diff --git a/arch/sparc/video/Makefile b/arch/sparc/video/Makefile
index d4d83f1702c6..dcfbe7a5912c 100644
--- a/arch/sparc/video/Makefile
+++ b/arch/sparc/video/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_FB_CORE) += fbdev.o
+obj-y += video-common.o
diff --git a/arch/sparc/video/fbdev.c b/arch/sparc/video/fbdev.c
deleted file mode 100644
index bff66dd1909a..000000000000
--- a/arch/sparc/video/fbdev.c
+++ /dev/null
@@ -1,26 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <linux/console.h>
-#include <linux/fb.h>
-#include <linux/module.h>
-
-#include <asm/prom.h>
-
-int fb_is_primary_device(struct fb_info *info)
-{
- struct device *dev = info->device;
- struct device_node *node;
-
- if (console_set_on_cmdline)
- return 0;
-
- node = dev->of_node;
- if (node && node == of_console_device)
- return 1;
-
- return 0;
-}
-EXPORT_SYMBOL(fb_is_primary_device);
-
-MODULE_DESCRIPTION("Sparc fbdev helpers");
-MODULE_LICENSE("GPL");
diff --git a/arch/sparc/video/video-common.c b/arch/sparc/video/video-common.c
new file mode 100644
index 000000000000..2414380caadc
--- /dev/null
+++ b/arch/sparc/video/video-common.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/console.h>
+#include <linux/device.h>
+#include <linux/module.h>
+
+#include <asm/prom.h>
+#include <asm/video.h>
+
+bool video_is_primary_device(struct device *dev)
+{
+ struct device_node *node = dev->of_node;
+
+ if (console_set_on_cmdline)
+ return false;
+
+ if (node && node == of_console_device)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(video_is_primary_device);
+
+MODULE_DESCRIPTION("Sparc video helpers");
+MODULE_LICENSE("GPL");
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index b2d834a29f3a..6fe34779291a 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -8,7 +8,6 @@ generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += extable.h
-generic-y += fb.h
generic-y += ftrace.h
generic-y += hw_irq.h
generic-y += irq_regs.h
@@ -28,3 +27,4 @@ generic-y += trace_clock.h
generic-y += kprobes.h
generic-y += mm_hooks.h
generic-y += vga.h
+generic-y += video.h
diff --git a/arch/um/include/asm/cpufeature.h b/arch/um/include/asm/cpufeature.h
index 66fe06db872f..1eb8b834fbec 100644
--- a/arch/um/include/asm/cpufeature.h
+++ b/arch/um/include/asm/cpufeature.h
@@ -38,8 +38,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
#define this_cpu_has(bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
- x86_this_cpu_test_bit(bit, \
- (unsigned long __percpu *)&cpu_info.x86_capability))
+ x86_this_cpu_test_bit(bit, cpu_info.x86_capability))
/*
* This macro is for detection of features which need kernel
diff --git a/arch/um/include/shared/um_malloc.h b/arch/um/include/shared/um_malloc.h
index 13da93284c2c..bf503658f08e 100644
--- a/arch/um/include/shared/um_malloc.h
+++ b/arch/um/include/shared/um_malloc.h
@@ -11,7 +11,8 @@
extern void *uml_kmalloc(int size, int flags);
extern void kfree(const void *ptr);
-extern void *vmalloc(unsigned long size);
+extern void *vmalloc_noprof(unsigned long size);
+#define vmalloc(...) vmalloc_noprof(__VA_ARGS__)
extern void vfree(void *ptr);
#endif /* __UM_MALLOC_H__ */
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index 811188be954c..f8567b933ffa 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -47,7 +47,7 @@ $(obj)/config.c: $(src)/config.c.in $(obj)/config.tmp FORCE
$(call if_changed,quote2)
quiet_cmd_mkcapflags = MKCAP $@
- cmd_mkcapflags = $(CONFIG_SHELL) $(srctree)/$(src)/../../x86/kernel/cpu/mkcapflags.sh $@ $^
+ cmd_mkcapflags = $(CONFIG_SHELL) $(src)/../../x86/kernel/cpu/mkcapflags.sh $@ $^
cpufeature = $(src)/../../x86/include/asm/cpufeatures.h
vmxfeature = $(src)/../../x86/include/asm/vmxfeatures.h
diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild
index 6a1f36df6a18..cf0ad89f5639 100644
--- a/arch/x86/Kbuild
+++ b/arch/x86/Kbuild
@@ -28,7 +28,7 @@ obj-y += net/
obj-$(CONFIG_KEXEC_FILE) += purgatory/
-obj-y += virt/svm/
+obj-y += virt/
# for cleaning
subdir- += boot tools
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 39886bab943a..671bc6fd557c 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -34,6 +34,7 @@ config X86_64
select SWIOTLB
select ARCH_HAS_ELFCORE_COMPAT
select ZONE_DMA32
+ select EXECMEM if DYNAMIC_FTRACE
config FORCE_DYNAMIC_FTRACE
def_bool y
@@ -62,6 +63,7 @@ config X86
select ACPI_HOTPLUG_CPU if ACPI_PROCESSOR && HOTPLUG_CPU
select ARCH_32BIT_OFF_T if X86_32
select ARCH_CLOCKSOURCE_INIT
+ select ARCH_CONFIGURES_CPU_MITIGATIONS
select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
select ARCH_ENABLE_HUGEPAGE_MIGRATION if X86_64 && HUGETLB_PAGE && MIGRATION
select ARCH_ENABLE_MEMORY_HOTPLUG if X86_64
@@ -168,6 +170,7 @@ config X86
select GENERIC_TIME_VSYSCALL
select GENERIC_GETTIMEOFDAY
select GENERIC_VDSO_TIME_NS
+ select GENERIC_VDSO_OVERFLOW_PROTECT
select GUP_GET_PXX_LOW_HIGH if X86_PAE
select HARDIRQS_SW_RESEND
select HARDLOCKUP_CHECK_TIMESTAMP if X86_64
@@ -221,7 +224,7 @@ config X86
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_EISA
select HAVE_EXIT_THREAD
- select HAVE_FAST_GUP
+ select HAVE_GUP_FAST
select HAVE_FENTRY if X86_64 || DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER
@@ -464,6 +467,17 @@ config X86_X2APIC
If you don't know what to do here, say N.
+config X86_POSTED_MSI
+ bool "Enable MSI and MSI-x delivery by posted interrupts"
+ depends on X86_64 && IRQ_REMAP
+ help
+ This enables MSIs that are under interrupt remapping to be delivered as
+ posted interrupts to the host kernel. Interrupt throughput can
+ potentially be improved by coalescing CPU notifications during high
+ frequency bursts.
+
+ If you don't know what to do here, say N.
+
config X86_MPPARSE
bool "Enable MPS table" if ACPI
default y
@@ -500,12 +514,11 @@ config X86_FRED
When enabled, try to use Flexible Return and Event Delivery
instead of the legacy SYSCALL/SYSENTER/IDT architecture for
ring transitions and exception/interrupt handling if the
- system supports.
+ system supports it.
-if X86_32
config X86_BIGSMP
bool "Support for big SMP systems with more than 8 CPUs"
- depends on SMP
+ depends on SMP && X86_32
help
This option is needed for the systems that have more than 8 CPUs.
@@ -518,7 +531,10 @@ config X86_EXTENDED_PLATFORM
systems out there.)
If you enable this option then you'll be able to select support
- for the following (non-PC) 32 bit x86 platforms:
+ for the following non-PC x86 platforms, depending on the value of
+ CONFIG_64BIT.
+
+ 32-bit platforms (CONFIG_64BIT=n):
Goldfish (Android emulator)
AMD Elan
RDC R-321x SoC
@@ -526,28 +542,14 @@ config X86_EXTENDED_PLATFORM
STA2X11-based (e.g. Northville)
Moorestown MID devices
- If you have one of these systems, or if you want to build a
- generic distribution kernel, say Y here - otherwise say N.
-endif # X86_32
-
-if X86_64
-config X86_EXTENDED_PLATFORM
- bool "Support for extended (non-PC) x86 platforms"
- default y
- help
- If you disable this option then the kernel will only support
- standard PC platforms. (which covers the vast majority of
- systems out there.)
-
- If you enable this option then you'll be able to select support
- for the following (non-PC) 64 bit x86 platforms:
+ 64-bit platforms (CONFIG_64BIT=y):
Numascale NumaChip
ScaleMP vSMP
SGI Ultraviolet
If you have one of these systems, or if you want to build a
generic distribution kernel, say Y here - otherwise say N.
-endif # X86_64
+
# This is an alphabetically sorted list of 64 bit extended platforms
# Please maintain the alphabetic order if and when there are additions
config X86_NUMACHIP
@@ -2429,16 +2431,20 @@ source "kernel/livepatch/Kconfig"
endmenu
config CC_HAS_NAMED_AS
- def_bool CC_IS_GCC && GCC_VERSION >= 120100
+ def_bool CC_IS_GCC && GCC_VERSION >= 90100
+
+config CC_HAS_NAMED_AS_FIXED_SANITIZERS
+ def_bool CC_IS_GCC && GCC_VERSION >= 130300
config USE_X86_SEG_SUPPORT
def_bool y
depends on CC_HAS_NAMED_AS
#
- # -fsanitize=kernel-address (KASAN) is at the moment incompatible
- # with named address spaces - see GCC PR sanitizer/111736.
+ # -fsanitize=kernel-address (KASAN) and -fsanitize=thread
+ # (KCSAN) are incompatible with named address spaces with
+ # GCC < 13.3 - see GCC PR sanitizer/111736.
#
- depends on !KASAN
+ depends on !(KASAN || KCSAN) || CC_HAS_NAMED_AS_FIXED_SANITIZERS
config CC_HAS_SLS
def_bool $(cc-option,-mharden-sls=all)
@@ -2486,17 +2492,21 @@ config PREFIX_SYMBOLS
def_bool y
depends on CALL_PADDING && !CFI_CLANG
-menuconfig SPECULATION_MITIGATIONS
- bool "Mitigations for speculative execution vulnerabilities"
+menuconfig CPU_MITIGATIONS
+ bool "Mitigations for CPU vulnerabilities"
default y
help
- Say Y here to enable options which enable mitigations for
- speculative execution hardware vulnerabilities.
+ Say Y here to enable options which enable mitigations for hardware
+ vulnerabilities (usually related to speculative execution).
+ Mitigations can be disabled or restricted to SMT systems at runtime
+ via the "mitigations" kernel parameter.
+
+ If you say N, all mitigations will be disabled. This CANNOT be
+ overridden at runtime.
- If you say N, all mitigations will be disabled. You really
- should know what you are doing to say so.
+ Say 'Y', unless you really know what you are doing.
-if SPECULATION_MITIGATIONS
+if CPU_MITIGATIONS
config MITIGATION_PAGE_TABLE_ISOLATION
bool "Remove the kernel mapping in user mode"
@@ -2631,6 +2641,16 @@ config MITIGATION_RFDS
stored in floating point, vector and integer registers.
See also <file:Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst>
+config MITIGATION_SPECTRE_BHI
+ bool "Mitigate Spectre-BHB (Branch History Injection)"
+ depends on CPU_SUP_INTEL
+ default y
+ help
+ Enable BHI mitigations. BHI attacks are a form of Spectre V2 attacks
+ where the branch history buffer is poisoned to speculatively steer
+ indirect branches.
+ See <file:Documentation/admin-guide/hw-vuln/spectre.rst>
+
endif
config ARCH_HAS_ADD_PAGES
diff --git a/arch/x86/Kconfig.assembler b/arch/x86/Kconfig.assembler
index 8ad41da301e5..59aedf32c4ea 100644
--- a/arch/x86/Kconfig.assembler
+++ b/arch/x86/Kconfig.assembler
@@ -25,6 +25,16 @@ config AS_GFNI
help
Supported by binutils >= 2.30 and LLVM integrated assembler
+config AS_VAES
+ def_bool $(as-instr,vaesenc %ymm0$(comma)%ymm1$(comma)%ymm2)
+ help
+ Supported by binutils >= 2.30 and LLVM integrated assembler
+
+config AS_VPCLMULQDQ
+ def_bool $(as-instr,vpclmulqdq \$0x10$(comma)%ymm0$(comma)%ymm1$(comma)%ymm2)
+ help
+ Supported by binutils >= 2.30 and LLVM integrated assembler
+
config AS_WRUSS
def_bool $(as-instr,wrussq %rax$(comma)(%rbx))
help
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 662d9d4033e6..e71e5252763f 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -251,8 +251,6 @@ archheaders:
libs-y += arch/x86/lib/
-core-y += arch/x86/virt/
-
# drivers-y are linked after core-y
drivers-$(CONFIG_MATH_EMULATION) += arch/x86/math-emu/
drivers-$(CONFIG_PCI) += arch/x86/pci/
@@ -260,7 +258,7 @@ drivers-$(CONFIG_PCI) += arch/x86/pci/
# suspend and hibernation support
drivers-$(CONFIG_PM) += arch/x86/power/
-drivers-$(CONFIG_FB_CORE) += arch/x86/video/
+drivers-$(CONFIG_VIDEO) += arch/x86/video/
####
# boot loader support. Several targets are kept for legacy purposes
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 3cece19b7473..9cc0ff6e9067 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -9,19 +9,6 @@
# Changed by many, many contributors over the years.
#
-# Sanitizer runtimes are unavailable and cannot be linked for early boot code.
-KASAN_SANITIZE := n
-KCSAN_SANITIZE := n
-KMSAN_SANITIZE := n
-OBJECT_FILES_NON_STANDARD := y
-
-# Kernel does not boot with kcov instrumentation here.
-# One of the problems observed was insertion of __sanitizer_cov_trace_pc()
-# callback into middle of per-cpu data enabling code. Thus the callback observed
-# inconsistent state and crashed. We are interested mostly in syscall coverage,
-# so boot code is not interesting anyway.
-KCOV_INSTRUMENT := n
-
# If you want to preset the SVGA mode, uncomment the next line and
# set SVGA_MODE to whatever number you want.
# Set it to -DSVGA_MODE=NORMAL_VGA if you just want the EGA/VGA mode.
@@ -69,8 +56,7 @@ KBUILD_CFLAGS := $(REALMODE_CFLAGS) -D_SETUP
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
-GCOV_PROFILE := n
-UBSAN_SANITIZE := n
+KBUILD_CFLAGS += $(CONFIG_CC_IMPLICIT_FALLTHROUGH)
$(obj)/bzImage: asflags-y := $(SVGA_MODE)
@@ -129,7 +115,7 @@ targets += mtools.conf
# genimage.sh requires bash, but it also has a bunch of other
# external dependencies.
quiet_cmd_genimage = GENIMAGE $3
-cmd_genimage = $(BASH) $(srctree)/$(src)/genimage.sh $2 $3 $(obj)/bzImage \
+ cmd_genimage = $(BASH) $(src)/genimage.sh $2 $3 $(obj)/bzImage \
$(obj)/mtools.conf '$(FDARGS)' $(FDINITRD)
PHONY += bzdisk fdimage fdimage144 fdimage288 hdimage isoimage
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index e9522c6893be..243ee86cb1b1 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -17,15 +17,6 @@
# (see scripts/Makefile.lib size_append)
# compressed vmlinux.bin.all + u32 size of vmlinux.bin.all
-# Sanitizer runtimes are unavailable and cannot be linked for early boot code.
-KASAN_SANITIZE := n
-KCSAN_SANITIZE := n
-KMSAN_SANITIZE := n
-OBJECT_FILES_NON_STANDARD := y
-
-# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
-KCOV_INSTRUMENT := n
-
targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 vmlinux.bin.zst
@@ -59,8 +50,6 @@ KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h
CFLAGS_sev.o += -I$(objtree)/arch/x86/lib/
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
-GCOV_PROFILE := n
-UBSAN_SANITIZE :=n
KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE)
KBUILD_LDFLAGS += $(call ld-option,--no-ld-generated-unwind-info)
diff --git a/arch/x86/boot/compressed/efi_mixed.S b/arch/x86/boot/compressed/efi_mixed.S
index 719e939050cb..876fc6d46a13 100644
--- a/arch/x86/boot/compressed/efi_mixed.S
+++ b/arch/x86/boot/compressed/efi_mixed.S
@@ -15,10 +15,12 @@
*/
#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
#include <asm/msr.h>
#include <asm/page_types.h>
#include <asm/processor-flags.h>
#include <asm/segment.h>
+#include <asm/setup.h>
.code64
.text
@@ -149,6 +151,7 @@ SYM_FUNC_END(__efi64_thunk)
SYM_FUNC_START(efi32_stub_entry)
call 1f
1: popl %ecx
+ leal (efi32_boot_args - 1b)(%ecx), %ebx
/* Clear BSS */
xorl %eax, %eax
@@ -163,6 +166,7 @@ SYM_FUNC_START(efi32_stub_entry)
popl %ecx
popl %edx
popl %esi
+ movl %esi, 8(%ebx)
jmp efi32_entry
SYM_FUNC_END(efi32_stub_entry)
#endif
@@ -239,8 +243,6 @@ SYM_FUNC_END(efi_enter32)
*
* Arguments: %ecx image handle
* %edx EFI system table pointer
- * %esi struct bootparams pointer (or NULL when not using
- * the EFI handover protocol)
*
* Since this is the point of no return for ordinary execution, no registers
* are considered live except for the function parameters. [Note that the EFI
@@ -266,9 +268,18 @@ SYM_FUNC_START_LOCAL(efi32_entry)
leal (efi32_boot_args - 1b)(%ebx), %ebx
movl %ecx, 0(%ebx)
movl %edx, 4(%ebx)
- movl %esi, 8(%ebx)
movb $0x0, 12(%ebx) // efi_is64
+ /*
+ * Allocate some memory for a temporary struct boot_params, which only
+ * needs the minimal pieces that startup_32() relies on.
+ */
+ subl $PARAM_SIZE, %esp
+ movl %esp, %esi
+ movl $PAGE_SIZE, BP_kernel_alignment(%esi)
+ movl $_end - 1b, BP_init_size(%esi)
+ subl $startup_32 - 1b, BP_init_size(%esi)
+
/* Disable paging */
movl %cr0, %eax
btrl $X86_CR0_PG_BIT, %eax
@@ -294,8 +305,7 @@ SYM_FUNC_START(efi32_pe_entry)
movl 8(%ebp), %ecx // image_handle
movl 12(%ebp), %edx // sys_table
- xorl %esi, %esi
- jmp efi32_entry // pass %ecx, %edx, %esi
+ jmp efi32_entry // pass %ecx, %edx
// no other registers remain live
2: popl %edi // restore callee-save registers
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index bf4a10a5794f..1dcb794c5479 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -398,6 +398,11 @@ SYM_CODE_START(startup_64)
call sev_enable
#endif
+ /* Preserve only the CR4 bits that must be preserved, and clear the rest */
+ movq %cr4, %rax
+ andl $(X86_CR4_PAE | X86_CR4_MCE | X86_CR4_LA57), %eax
+ movq %rax, %cr4
+
/*
* configure_5level_paging() updates the number of paging levels using
* a trampoline in 32-bit addressable memory if the current number does
diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
index ec71846d28c9..0457a9d7e515 100644
--- a/arch/x86/boot/compressed/sev.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -335,26 +335,6 @@ finish:
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
}
-static void enforce_vmpl0(void)
-{
- u64 attrs;
- int err;
-
- /*
- * RMPADJUST modifies RMP permissions of a lesser-privileged (numerically
- * higher) privilege level. Here, clear the VMPL1 permission mask of the
- * GHCB page. If the guest is not running at VMPL0, this will fail.
- *
- * If the guest is running at VMPL0, it will succeed. Even if that operation
- * modifies permission bits, it is still ok to do so currently because Linux
- * SNP guests are supported only on VMPL0 so VMPL1 or higher permission masks
- * changing is a don't-care.
- */
- attrs = 1;
- if (rmpadjust((unsigned long)&boot_ghcb_page, RMP_PG_SIZE_4K, attrs))
- sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_NOT_VMPL0);
-}
-
/*
* SNP_FEATURES_IMPL_REQ is the mask of SNP features that will need
* guest side implementation for proper functioning of the guest. If any
@@ -413,6 +393,85 @@ void snp_check_features(void)
}
}
+/* Search for Confidential Computing blob in the EFI config table. */
+static struct cc_blob_sev_info *find_cc_blob_efi(struct boot_params *bp)
+{
+ unsigned long cfg_table_pa;
+ unsigned int cfg_table_len;
+ int ret;
+
+ ret = efi_get_conf_table(bp, &cfg_table_pa, &cfg_table_len);
+ if (ret)
+ return NULL;
+
+ return (struct cc_blob_sev_info *)efi_find_vendor_table(bp, cfg_table_pa,
+ cfg_table_len,
+ EFI_CC_BLOB_GUID);
+}
+
+/*
+ * Initial set up of SNP relies on information provided by the
+ * Confidential Computing blob, which can be passed to the boot kernel
+ * by firmware/bootloader in the following ways:
+ *
+ * - via an entry in the EFI config table
+ * - via a setup_data structure, as defined by the Linux Boot Protocol
+ *
+ * Scan for the blob in that order.
+ */
+static struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
+{
+ struct cc_blob_sev_info *cc_info;
+
+ cc_info = find_cc_blob_efi(bp);
+ if (cc_info)
+ goto found_cc_info;
+
+ cc_info = find_cc_blob_setup_data(bp);
+ if (!cc_info)
+ return NULL;
+
+found_cc_info:
+ if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
+ sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
+
+ return cc_info;
+}
+
+/*
+ * Indicate SNP based on presence of SNP-specific CC blob. Subsequent checks
+ * will verify the SNP CPUID/MSR bits.
+ */
+static bool early_snp_init(struct boot_params *bp)
+{
+ struct cc_blob_sev_info *cc_info;
+
+ if (!bp)
+ return false;
+
+ cc_info = find_cc_blob(bp);
+ if (!cc_info)
+ return false;
+
+ /*
+ * If a SNP-specific Confidential Computing blob is present, then
+ * firmware/bootloader have indicated SNP support. Verifying this
+ * involves CPUID checks which will be more reliable if the SNP
+ * CPUID table is used. See comments over snp_setup_cpuid_table() for
+ * more details.
+ */
+ setup_cpuid_table(cc_info);
+
+ /*
+ * Pass run-time kernel a pointer to CC info via boot_params so EFI
+ * config table doesn't need to be searched again during early startup
+ * phase.
+ */
+ bp->cc_blob_address = (u32)(unsigned long)cc_info;
+
+ return true;
+}
+
/*
* sev_check_cpu_support - Check for SEV support in the CPU capabilities
*
@@ -463,7 +522,7 @@ void sev_enable(struct boot_params *bp)
bp->cc_blob_address = 0;
/*
- * Do an initial SEV capability check before snp_init() which
+ * Do an initial SEV capability check before early_snp_init() which
* loads the CPUID page and the same checks afterwards are done
* without the hypervisor and are trustworthy.
*
@@ -478,7 +537,7 @@ void sev_enable(struct boot_params *bp)
* Setup/preliminary detection of SNP. This will be sanity-checked
* against CPUID/MSR values later.
*/
- snp = snp_init(bp);
+ snp = early_snp_init(bp);
/* Now repeat the checks with the SNP CPUID table. */
@@ -509,7 +568,20 @@ void sev_enable(struct boot_params *bp)
if (!(get_hv_features() & GHCB_HV_FT_SNP))
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
- enforce_vmpl0();
+ /*
+ * Enforce running at VMPL0.
+ *
+ * RMPADJUST modifies RMP permissions of a lesser-privileged (numerically
+ * higher) privilege level. Here, clear the VMPL1 permission mask of the
+ * GHCB page. If the guest is not running at VMPL0, this will fail.
+ *
+ * If the guest is running at VMPL0, it will succeed. Even if that operation
+ * modifies permission bits, it is still ok to do so currently because Linux
+ * SNP guests running at VMPL0 only run at VMPL0, so VMPL1 or higher
+ * permission mask changes are a don't-care.
+ */
+ if (rmpadjust((unsigned long)&boot_ghcb_page, RMP_PG_SIZE_4K, 1))
+ sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_NOT_VMPL0);
}
if (snp && !(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
@@ -535,85 +607,6 @@ u64 sev_get_status(void)
return m.q;
}
-/* Search for Confidential Computing blob in the EFI config table. */
-static struct cc_blob_sev_info *find_cc_blob_efi(struct boot_params *bp)
-{
- unsigned long cfg_table_pa;
- unsigned int cfg_table_len;
- int ret;
-
- ret = efi_get_conf_table(bp, &cfg_table_pa, &cfg_table_len);
- if (ret)
- return NULL;
-
- return (struct cc_blob_sev_info *)efi_find_vendor_table(bp, cfg_table_pa,
- cfg_table_len,
- EFI_CC_BLOB_GUID);
-}
-
-/*
- * Initial set up of SNP relies on information provided by the
- * Confidential Computing blob, which can be passed to the boot kernel
- * by firmware/bootloader in the following ways:
- *
- * - via an entry in the EFI config table
- * - via a setup_data structure, as defined by the Linux Boot Protocol
- *
- * Scan for the blob in that order.
- */
-static struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
-{
- struct cc_blob_sev_info *cc_info;
-
- cc_info = find_cc_blob_efi(bp);
- if (cc_info)
- goto found_cc_info;
-
- cc_info = find_cc_blob_setup_data(bp);
- if (!cc_info)
- return NULL;
-
-found_cc_info:
- if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
- sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
-
- return cc_info;
-}
-
-/*
- * Indicate SNP based on presence of SNP-specific CC blob. Subsequent checks
- * will verify the SNP CPUID/MSR bits.
- */
-bool snp_init(struct boot_params *bp)
-{
- struct cc_blob_sev_info *cc_info;
-
- if (!bp)
- return false;
-
- cc_info = find_cc_blob(bp);
- if (!cc_info)
- return false;
-
- /*
- * If a SNP-specific Confidential Computing blob is present, then
- * firmware/bootloader have indicated SNP support. Verifying this
- * involves CPUID checks which will be more reliable if the SNP
- * CPUID table is used. See comments over snp_setup_cpuid_table() for
- * more details.
- */
- setup_cpuid_table(cc_info);
-
- /*
- * Pass run-time kernel a pointer to CC info via boot_params so EFI
- * config table doesn't need to be searched again during early startup
- * phase.
- */
- bp->cc_blob_address = (u32)(unsigned long)cc_info;
-
- return true;
-}
-
void sev_prep_identity_maps(unsigned long top_level_pgt)
{
/*
diff --git a/arch/x86/boot/main.c b/arch/x86/boot/main.c
index c4ea5258ab55..9049f390d834 100644
--- a/arch/x86/boot/main.c
+++ b/arch/x86/boot/main.c
@@ -119,8 +119,8 @@ static void init_heap(void)
char *stack_end;
if (boot_params.hdr.loadflags & CAN_USE_HEAP) {
- asm("leal %P1(%%esp),%0"
- : "=r" (stack_end) : "i" (-STACK_SIZE));
+ asm("leal %n1(%%esp),%0"
+ : "=r" (stack_end) : "i" (STACK_SIZE));
heap_end = (char *)
((size_t)boot_params.hdr.heap_end_ptr + 0x200);
diff --git a/arch/x86/boot/printf.c b/arch/x86/boot/printf.c
index 1237beeb9540..51dc14b714f6 100644
--- a/arch/x86/boot/printf.c
+++ b/arch/x86/boot/printf.c
@@ -246,6 +246,7 @@ int vsprintf(char *buf, const char *fmt, va_list args)
case 'x':
flags |= SMALL;
+ fallthrough;
case 'X':
base = 16;
break;
@@ -253,6 +254,8 @@ int vsprintf(char *buf, const char *fmt, va_list args)
case 'd':
case 'i':
flags |= SIGN;
+ break;
+
case 'u':
break;
diff --git a/arch/x86/coco/core.c b/arch/x86/coco/core.c
index d07be9d05cd0..b31ef2424d19 100644
--- a/arch/x86/coco/core.c
+++ b/arch/x86/coco/core.c
@@ -3,19 +3,28 @@
* Confidential Computing Platform Capability checks
*
* Copyright (C) 2021 Advanced Micro Devices, Inc.
+ * Copyright (C) 2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*/
#include <linux/export.h>
#include <linux/cc_platform.h>
+#include <linux/string.h>
+#include <linux/random.h>
+#include <asm/archrandom.h>
#include <asm/coco.h>
#include <asm/processor.h>
enum cc_vendor cc_vendor __ro_after_init = CC_VENDOR_NONE;
u64 cc_mask __ro_after_init;
+static struct cc_attr_flags {
+ __u64 host_sev_snp : 1,
+ __resv : 63;
+} cc_flags;
+
static bool noinstr intel_cc_platform_has(enum cc_attr attr)
{
switch (attr) {
@@ -89,6 +98,9 @@ static bool noinstr amd_cc_platform_has(enum cc_attr attr)
case CC_ATTR_GUEST_SEV_SNP:
return sev_status & MSR_AMD64_SEV_SNP_ENABLED;
+ case CC_ATTR_HOST_SEV_SNP:
+ return cc_flags.host_sev_snp;
+
default:
return false;
}
@@ -148,3 +160,84 @@ u64 cc_mkdec(u64 val)
}
}
EXPORT_SYMBOL_GPL(cc_mkdec);
+
+static void amd_cc_platform_clear(enum cc_attr attr)
+{
+ switch (attr) {
+ case CC_ATTR_HOST_SEV_SNP:
+ cc_flags.host_sev_snp = 0;
+ break;
+ default:
+ break;
+ }
+}
+
+void cc_platform_clear(enum cc_attr attr)
+{
+ switch (cc_vendor) {
+ case CC_VENDOR_AMD:
+ amd_cc_platform_clear(attr);
+ break;
+ default:
+ break;
+ }
+}
+
+static void amd_cc_platform_set(enum cc_attr attr)
+{
+ switch (attr) {
+ case CC_ATTR_HOST_SEV_SNP:
+ cc_flags.host_sev_snp = 1;
+ break;
+ default:
+ break;
+ }
+}
+
+void cc_platform_set(enum cc_attr attr)
+{
+ switch (cc_vendor) {
+ case CC_VENDOR_AMD:
+ amd_cc_platform_set(attr);
+ break;
+ default:
+ break;
+ }
+}
+
+__init void cc_random_init(void)
+{
+ /*
+ * The seed is 32 bytes (in units of longs), which is 256 bits, which
+ * is the security level that the RNG is targeting.
+ */
+ unsigned long rng_seed[32 / sizeof(long)];
+ size_t i, longs;
+
+ if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
+ return;
+
+ /*
+ * Since the CoCo threat model includes the host, the only reliable
+ * source of entropy that can be neither observed nor manipulated is
+ * RDRAND. Usually, RDRAND failure is considered tolerable, but since
+ * CoCo guests have no other unobservable source of entropy, it's
+ * important to at least ensure the RNG gets some initial random seeds.
+ */
+ for (i = 0; i < ARRAY_SIZE(rng_seed); i += longs) {
+ longs = arch_get_random_longs(&rng_seed[i], ARRAY_SIZE(rng_seed) - i);
+
+ /*
+ * A zero return value means that the guest doesn't have RDRAND
+ * or the CPU is physically broken, and in both cases that
+ * means most crypto inside of the CoCo instance will be
+ * broken, defeating the purpose of CoCo in the first place. So
+ * just panic here because it's absolutely unsafe to continue
+ * executing.
+ */
+ if (longs == 0)
+ panic("RDRAND is defective.");
+ }
+ add_device_randomness(rng_seed, sizeof(rng_seed));
+ memzero_explicit(rng_seed, sizeof(rng_seed));
+}
diff --git a/arch/x86/configs/hardening.config b/arch/x86/configs/hardening.config
index 7b497f3b7bc3..de319852a1e9 100644
--- a/arch/x86/configs/hardening.config
+++ b/arch/x86/configs/hardening.config
@@ -10,5 +10,8 @@ CONFIG_INTEL_IOMMU_DEFAULT_ON=y
CONFIG_INTEL_IOMMU_SVM=y
CONFIG_AMD_IOMMU=y
+# Enforce CET Indirect Branch Tracking in the kernel.
+CONFIG_X86_KERNEL_IBT=y
+
# Enable CET Shadow Stack for userspace.
CONFIG_X86_USER_SHADOW_STACK=y
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 9aa46093c91b..9c5ce5613738 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -48,7 +48,8 @@ chacha-x86_64-$(CONFIG_AS_AVX512) += chacha-avx512vl-x86_64.o
obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o
aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o
-aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o
+aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o \
+ aes_ctrby8_avx-x86_64.o aes-xts-avx-x86_64.o
obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
sha1-ssse3-y := sha1_avx2_x86_64_asm.o sha1_ssse3_asm.o sha1_ssse3_glue.o
diff --git a/arch/x86/crypto/aes-xts-avx-x86_64.S b/arch/x86/crypto/aes-xts-avx-x86_64.S
new file mode 100644
index 000000000000..48f97b79f7a9
--- /dev/null
+++ b/arch/x86/crypto/aes-xts-avx-x86_64.S
@@ -0,0 +1,845 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * AES-XTS for modern x86_64 CPUs
+ *
+ * Copyright 2024 Google LLC
+ *
+ * Author: Eric Biggers <ebiggers@google.com>
+ */
+
+/*
+ * This file implements AES-XTS for modern x86_64 CPUs. To handle the
+ * complexities of coding for x86 SIMD, e.g. where every vector length needs
+ * different code, it uses a macro to generate several implementations that
+ * share similar source code but are targeted at different CPUs, listed below:
+ *
+ * AES-NI + AVX
+ * - 128-bit vectors (1 AES block per vector)
+ * - VEX-coded instructions
+ * - xmm0-xmm15
+ * - This is for older CPUs that lack VAES but do have AVX.
+ *
+ * VAES + VPCLMULQDQ + AVX2
+ * - 256-bit vectors (2 AES blocks per vector)
+ * - VEX-coded instructions
+ * - ymm0-ymm15
+ * - This is for CPUs that have VAES but lack AVX512 or AVX10,
+ * e.g. Intel's Alder Lake and AMD's Zen 3.
+ *
+ * VAES + VPCLMULQDQ + AVX10/256 + BMI2
+ * - 256-bit vectors (2 AES blocks per vector)
+ * - EVEX-coded instructions
+ * - ymm0-ymm31
+ * - This is for CPUs that have AVX512 but where using zmm registers causes
+ * downclocking, and for CPUs that have AVX10/256 but not AVX10/512.
+ * - By "AVX10/256" we really mean (AVX512BW + AVX512VL) || AVX10/256.
+ * To avoid confusion with 512-bit, we just write AVX10/256.
+ *
+ * VAES + VPCLMULQDQ + AVX10/512 + BMI2
+ * - Same as the previous one, but upgrades to 512-bit vectors
+ * (4 AES blocks per vector) in zmm0-zmm31.
+ * - This is for CPUs that have good AVX512 or AVX10/512 support.
+ *
+ * This file doesn't have an implementation for AES-NI alone (without AVX), as
+ * the lack of VEX would make all the assembly code different.
+ *
+ * When we use VAES, we also use VPCLMULQDQ to parallelize the computation of
+ * the XTS tweaks. This avoids a bottleneck. Currently there don't seem to be
+ * any CPUs that support VAES but not VPCLMULQDQ. If that changes, we might
+ * need to start also providing an implementation using VAES alone.
+ *
+ * The AES-XTS implementations in this file support everything required by the
+ * crypto API, including support for arbitrary input lengths and multi-part
+ * processing. However, they are most heavily optimized for the common case of
+ * power-of-2 length inputs that are processed in a single part (disk sectors).
+ */
+
+#include <linux/linkage.h>
+#include <linux/cfi_types.h>
+
+.section .rodata
+.p2align 4
+.Lgf_poly:
+ // The low 64 bits of this value represent the polynomial x^7 + x^2 + x
+ // + 1. It is the value that must be XOR'd into the low 64 bits of the
+ // tweak each time a 1 is carried out of the high 64 bits.
+ //
+ // The high 64 bits of this value is just the internal carry bit that
+ // exists when there's a carry out of the low 64 bits of the tweak.
+ .quad 0x87, 1
+
+ // This table contains constants for vpshufb and vpblendvb, used to
+ // handle variable byte shifts and blending during ciphertext stealing
+ // on CPUs that don't support AVX10-style masking.
+.Lcts_permute_table:
+ .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80
+ .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80
+ .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
+ .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+ .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80
+ .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80
+.text
+
+// Function parameters
+.set KEY, %rdi // Initially points to crypto_aes_ctx, then is
+ // advanced to point to 7th-from-last round key
+.set SRC, %rsi // Pointer to next source data
+.set DST, %rdx // Pointer to next destination data
+.set LEN, %ecx // Remaining length in bytes
+.set LEN8, %cl
+.set LEN64, %rcx
+.set TWEAK, %r8 // Pointer to next tweak
+
+// %rax holds the AES key length in bytes.
+.set KEYLEN, %eax
+.set KEYLEN64, %rax
+
+// %r9-r11 are available as temporaries.
+
+.macro _define_Vi i
+.if VL == 16
+ .set V\i, %xmm\i
+.elseif VL == 32
+ .set V\i, %ymm\i
+.elseif VL == 64
+ .set V\i, %zmm\i
+.else
+ .error "Unsupported Vector Length (VL)"
+.endif
+.endm
+
+.macro _define_aliases
+ // Define register aliases V0-V15, or V0-V31 if all 32 SIMD registers
+ // are available, that map to the xmm, ymm, or zmm registers according
+ // to the selected Vector Length (VL).
+ _define_Vi 0
+ _define_Vi 1
+ _define_Vi 2
+ _define_Vi 3
+ _define_Vi 4
+ _define_Vi 5
+ _define_Vi 6
+ _define_Vi 7
+ _define_Vi 8
+ _define_Vi 9
+ _define_Vi 10
+ _define_Vi 11
+ _define_Vi 12
+ _define_Vi 13
+ _define_Vi 14
+ _define_Vi 15
+.if USE_AVX10
+ _define_Vi 16
+ _define_Vi 17
+ _define_Vi 18
+ _define_Vi 19
+ _define_Vi 20
+ _define_Vi 21
+ _define_Vi 22
+ _define_Vi 23
+ _define_Vi 24
+ _define_Vi 25
+ _define_Vi 26
+ _define_Vi 27
+ _define_Vi 28
+ _define_Vi 29
+ _define_Vi 30
+ _define_Vi 31
+.endif
+
+ // V0-V3 hold the data blocks during the main loop, or temporary values
+ // otherwise. V4-V5 hold temporary values.
+
+ // V6-V9 hold XTS tweaks. Each 128-bit lane holds one tweak.
+ .set TWEAK0_XMM, %xmm6
+ .set TWEAK0, V6
+ .set TWEAK1_XMM, %xmm7
+ .set TWEAK1, V7
+ .set TWEAK2, V8
+ .set TWEAK3, V9
+
+ // V10-V13 are used for computing the next values of TWEAK[0-3].
+ .set NEXT_TWEAK0, V10
+ .set NEXT_TWEAK1, V11
+ .set NEXT_TWEAK2, V12
+ .set NEXT_TWEAK3, V13
+
+ // V14 holds the constant from .Lgf_poly, copied to all 128-bit lanes.
+ .set GF_POLY_XMM, %xmm14
+ .set GF_POLY, V14
+
+ // V15 holds the key for AES "round 0", copied to all 128-bit lanes.
+ .set KEY0_XMM, %xmm15
+ .set KEY0, V15
+
+ // If 32 SIMD registers are available, then V16-V29 hold the remaining
+ // AES round keys, copied to all 128-bit lanes.
+ //
+ // AES-128, AES-192, and AES-256 use different numbers of round keys.
+ // To allow handling all three variants efficiently, we align the round
+ // keys to the *end* of this register range. I.e., AES-128 uses
+ // KEY5-KEY14, AES-192 uses KEY3-KEY14, and AES-256 uses KEY1-KEY14.
+ // (All also use KEY0 for the XOR-only "round" at the beginning.)
+.if USE_AVX10
+ .set KEY1_XMM, %xmm16
+ .set KEY1, V16
+ .set KEY2_XMM, %xmm17
+ .set KEY2, V17
+ .set KEY3_XMM, %xmm18
+ .set KEY3, V18
+ .set KEY4_XMM, %xmm19
+ .set KEY4, V19
+ .set KEY5_XMM, %xmm20
+ .set KEY5, V20
+ .set KEY6_XMM, %xmm21
+ .set KEY6, V21
+ .set KEY7_XMM, %xmm22
+ .set KEY7, V22
+ .set KEY8_XMM, %xmm23
+ .set KEY8, V23
+ .set KEY9_XMM, %xmm24
+ .set KEY9, V24
+ .set KEY10_XMM, %xmm25
+ .set KEY10, V25
+ .set KEY11_XMM, %xmm26
+ .set KEY11, V26
+ .set KEY12_XMM, %xmm27
+ .set KEY12, V27
+ .set KEY13_XMM, %xmm28
+ .set KEY13, V28
+ .set KEY14_XMM, %xmm29
+ .set KEY14, V29
+.endif
+ // V30-V31 are currently unused.
+.endm
+
+// Move a vector between memory and a register.
+.macro _vmovdqu src, dst
+.if VL < 64
+ vmovdqu \src, \dst
+.else
+ vmovdqu8 \src, \dst
+.endif
+.endm
+
+// Broadcast a 128-bit value into a vector.
+.macro _vbroadcast128 src, dst
+.if VL == 16 && !USE_AVX10
+ vmovdqu \src, \dst
+.elseif VL == 32 && !USE_AVX10
+ vbroadcasti128 \src, \dst
+.else
+ vbroadcasti32x4 \src, \dst
+.endif
+.endm
+
+// XOR two vectors together.
+.macro _vpxor src1, src2, dst
+.if USE_AVX10
+ vpxord \src1, \src2, \dst
+.else
+ vpxor \src1, \src2, \dst
+.endif
+.endm
+
+// XOR three vectors together.
+.macro _xor3 src1, src2, src3_and_dst
+.if USE_AVX10
+ // vpternlogd with immediate 0x96 is a three-argument XOR.
+ vpternlogd $0x96, \src1, \src2, \src3_and_dst
+.else
+ vpxor \src1, \src3_and_dst, \src3_and_dst
+ vpxor \src2, \src3_and_dst, \src3_and_dst
+.endif
+.endm
+
+// Given a 128-bit XTS tweak in the xmm register \src, compute the next tweak
+// (by multiplying by the polynomial 'x') and write it to \dst.
+.macro _next_tweak src, tmp, dst
+ vpshufd $0x13, \src, \tmp
+ vpaddq \src, \src, \dst
+ vpsrad $31, \tmp, \tmp
+ vpand GF_POLY_XMM, \tmp, \tmp
+ vpxor \tmp, \dst, \dst
+.endm
+
+// Given the XTS tweak(s) in the vector \src, compute the next vector of
+// tweak(s) (by multiplying by the polynomial 'x^(VL/16)') and write it to \dst.
+//
+// If VL > 16, then there are multiple tweaks, and we use vpclmulqdq to compute
+// all tweaks in the vector in parallel. If VL=16, we just do the regular
+// computation without vpclmulqdq, as it's the faster method for a single tweak.
+.macro _next_tweakvec src, tmp1, tmp2, dst
+.if VL == 16
+ _next_tweak \src, \tmp1, \dst
+.else
+ vpsrlq $64 - VL/16, \src, \tmp1
+ vpclmulqdq $0x01, GF_POLY, \tmp1, \tmp2
+ vpslldq $8, \tmp1, \tmp1
+ vpsllq $VL/16, \src, \dst
+ _xor3 \tmp1, \tmp2, \dst
+.endif
+.endm
+
+// Given the first XTS tweak at (TWEAK), compute the first set of tweaks and
+// store them in the vector registers TWEAK0-TWEAK3. Clobbers V0-V5.
+.macro _compute_first_set_of_tweaks
+ vmovdqu (TWEAK), TWEAK0_XMM
+ _vbroadcast128 .Lgf_poly(%rip), GF_POLY
+.if VL == 16
+ // With VL=16, multiplying by x serially is fastest.
+ _next_tweak TWEAK0, %xmm0, TWEAK1
+ _next_tweak TWEAK1, %xmm0, TWEAK2
+ _next_tweak TWEAK2, %xmm0, TWEAK3
+.else
+.if VL == 32
+ // Compute the second block of TWEAK0.
+ _next_tweak TWEAK0_XMM, %xmm0, %xmm1
+ vinserti128 $1, %xmm1, TWEAK0, TWEAK0
+.elseif VL == 64
+ // Compute the remaining blocks of TWEAK0.
+ _next_tweak TWEAK0_XMM, %xmm0, %xmm1
+ _next_tweak %xmm1, %xmm0, %xmm2
+ _next_tweak %xmm2, %xmm0, %xmm3
+ vinserti32x4 $1, %xmm1, TWEAK0, TWEAK0
+ vinserti32x4 $2, %xmm2, TWEAK0, TWEAK0
+ vinserti32x4 $3, %xmm3, TWEAK0, TWEAK0
+.endif
+ // Compute TWEAK[1-3] from TWEAK0.
+ vpsrlq $64 - 1*VL/16, TWEAK0, V0
+ vpsrlq $64 - 2*VL/16, TWEAK0, V2
+ vpsrlq $64 - 3*VL/16, TWEAK0, V4
+ vpclmulqdq $0x01, GF_POLY, V0, V1
+ vpclmulqdq $0x01, GF_POLY, V2, V3
+ vpclmulqdq $0x01, GF_POLY, V4, V5
+ vpslldq $8, V0, V0
+ vpslldq $8, V2, V2
+ vpslldq $8, V4, V4
+ vpsllq $1*VL/16, TWEAK0, TWEAK1
+ vpsllq $2*VL/16, TWEAK0, TWEAK2
+ vpsllq $3*VL/16, TWEAK0, TWEAK3
+.if USE_AVX10
+ vpternlogd $0x96, V0, V1, TWEAK1
+ vpternlogd $0x96, V2, V3, TWEAK2
+ vpternlogd $0x96, V4, V5, TWEAK3
+.else
+ vpxor V0, TWEAK1, TWEAK1
+ vpxor V2, TWEAK2, TWEAK2
+ vpxor V4, TWEAK3, TWEAK3
+ vpxor V1, TWEAK1, TWEAK1
+ vpxor V3, TWEAK2, TWEAK2
+ vpxor V5, TWEAK3, TWEAK3
+.endif
+.endif
+.endm
+
+// Do one step in computing the next set of tweaks using the method of just
+// multiplying by x repeatedly (the same method _next_tweak uses).
+.macro _tweak_step_mulx i
+.if \i == 0
+ .set PREV_TWEAK, TWEAK3
+ .set NEXT_TWEAK, NEXT_TWEAK0
+.elseif \i == 5
+ .set PREV_TWEAK, NEXT_TWEAK0
+ .set NEXT_TWEAK, NEXT_TWEAK1
+.elseif \i == 10
+ .set PREV_TWEAK, NEXT_TWEAK1
+ .set NEXT_TWEAK, NEXT_TWEAK2
+.elseif \i == 15
+ .set PREV_TWEAK, NEXT_TWEAK2
+ .set NEXT_TWEAK, NEXT_TWEAK3
+.endif
+.if \i >= 0 && \i < 20 && \i % 5 == 0
+ vpshufd $0x13, PREV_TWEAK, V5
+.elseif \i >= 0 && \i < 20 && \i % 5 == 1
+ vpaddq PREV_TWEAK, PREV_TWEAK, NEXT_TWEAK
+.elseif \i >= 0 && \i < 20 && \i % 5 == 2
+ vpsrad $31, V5, V5
+.elseif \i >= 0 && \i < 20 && \i % 5 == 3
+ vpand GF_POLY, V5, V5
+.elseif \i >= 0 && \i < 20 && \i % 5 == 4
+ vpxor V5, NEXT_TWEAK, NEXT_TWEAK
+.elseif \i == 1000
+ vmovdqa NEXT_TWEAK0, TWEAK0
+ vmovdqa NEXT_TWEAK1, TWEAK1
+ vmovdqa NEXT_TWEAK2, TWEAK2
+ vmovdqa NEXT_TWEAK3, TWEAK3
+.endif
+.endm
+
+// Do one step in computing the next set of tweaks using the VPCLMULQDQ method
+// (the same method _next_tweakvec uses for VL > 16). This means multiplying
+// each tweak by x^(4*VL/16) independently. Since 4*VL/16 is a multiple of 8
+// when VL > 16 (which it is here), the needed shift amounts are byte-aligned,
+// which allows the use of vpsrldq and vpslldq to do 128-bit wide shifts.
+.macro _tweak_step_pclmul i
+.if \i == 0
+ vpsrldq $(128 - 4*VL/16) / 8, TWEAK0, NEXT_TWEAK0
+.elseif \i == 2
+ vpsrldq $(128 - 4*VL/16) / 8, TWEAK1, NEXT_TWEAK1
+.elseif \i == 4
+ vpsrldq $(128 - 4*VL/16) / 8, TWEAK2, NEXT_TWEAK2
+.elseif \i == 6
+ vpsrldq $(128 - 4*VL/16) / 8, TWEAK3, NEXT_TWEAK3
+.elseif \i == 8
+ vpclmulqdq $0x00, GF_POLY, NEXT_TWEAK0, NEXT_TWEAK0
+.elseif \i == 10
+ vpclmulqdq $0x00, GF_POLY, NEXT_TWEAK1, NEXT_TWEAK1
+.elseif \i == 12
+ vpclmulqdq $0x00, GF_POLY, NEXT_TWEAK2, NEXT_TWEAK2
+.elseif \i == 14
+ vpclmulqdq $0x00, GF_POLY, NEXT_TWEAK3, NEXT_TWEAK3
+.elseif \i == 1000
+ vpslldq $(4*VL/16) / 8, TWEAK0, TWEAK0
+ vpslldq $(4*VL/16) / 8, TWEAK1, TWEAK1
+ vpslldq $(4*VL/16) / 8, TWEAK2, TWEAK2
+ vpslldq $(4*VL/16) / 8, TWEAK3, TWEAK3
+ _vpxor NEXT_TWEAK0, TWEAK0, TWEAK0
+ _vpxor NEXT_TWEAK1, TWEAK1, TWEAK1
+ _vpxor NEXT_TWEAK2, TWEAK2, TWEAK2
+ _vpxor NEXT_TWEAK3, TWEAK3, TWEAK3
+.endif
+.endm
+
+// _tweak_step does one step of the computation of the next set of tweaks from
+// TWEAK[0-3]. To complete all steps, this is invoked with increasing values of
+// \i that include at least 0 through 19, then 1000 which signals the last step.
+//
+// This is used to interleave the computation of the next set of tweaks with the
+// AES en/decryptions, which increases performance in some cases.
+.macro _tweak_step i
+.if VL == 16
+ _tweak_step_mulx \i
+.else
+ _tweak_step_pclmul \i
+.endif
+.endm
+
+.macro _setup_round_keys enc
+
+ // Select either the encryption round keys or the decryption round keys.
+.if \enc
+ .set OFFS, 0
+.else
+ .set OFFS, 240
+.endif
+
+ // Load the round key for "round 0".
+ _vbroadcast128 OFFS(KEY), KEY0
+
+ // Increment KEY to make it so that 7*16(KEY) is the last round key.
+ // For AES-128, increment by 3*16, resulting in the 10 round keys (not
+ // counting the zero-th round key which was just loaded into KEY0) being
+ // -2*16(KEY) through 7*16(KEY). For AES-192, increment by 5*16 and use
+ // 12 round keys -4*16(KEY) through 7*16(KEY). For AES-256, increment
+ // by 7*16 and use 14 round keys -6*16(KEY) through 7*16(KEY).
+ //
+ // This rebasing provides two benefits. First, it makes the offset to
+ // any round key be in the range [-96, 112], fitting in a signed byte.
+ // This shortens VEX-encoded instructions that access the later round
+ // keys which otherwise would need 4-byte offsets. Second, it makes it
+ // easy to do AES-128 and AES-192 by skipping irrelevant rounds at the
+ // beginning. Skipping rounds at the end doesn't work as well because
+ // the last round needs different instructions.
+ //
+ // An alternative approach would be to roll up all the round loops. We
+ // don't do that because it isn't compatible with caching the round keys
+ // in registers which we do when possible (see below), and also because
+ // it seems unwise to rely *too* heavily on the CPU's branch predictor.
+ lea OFFS-16(KEY, KEYLEN64, 4), KEY
+
+ // If all 32 SIMD registers are available, cache all the round keys.
+.if USE_AVX10
+ cmp $24, KEYLEN
+ jl .Laes128\@
+ je .Laes192\@
+ _vbroadcast128 -6*16(KEY), KEY1
+ _vbroadcast128 -5*16(KEY), KEY2
+.Laes192\@:
+ _vbroadcast128 -4*16(KEY), KEY3
+ _vbroadcast128 -3*16(KEY), KEY4
+.Laes128\@:
+ _vbroadcast128 -2*16(KEY), KEY5
+ _vbroadcast128 -1*16(KEY), KEY6
+ _vbroadcast128 0*16(KEY), KEY7
+ _vbroadcast128 1*16(KEY), KEY8
+ _vbroadcast128 2*16(KEY), KEY9
+ _vbroadcast128 3*16(KEY), KEY10
+ _vbroadcast128 4*16(KEY), KEY11
+ _vbroadcast128 5*16(KEY), KEY12
+ _vbroadcast128 6*16(KEY), KEY13
+ _vbroadcast128 7*16(KEY), KEY14
+.endif
+.endm
+
+// Do a single round of AES encryption (if \enc==1) or decryption (if \enc==0)
+// on the block(s) in \data using the round key(s) in \key. The register length
+// determines the number of AES blocks en/decrypted.
+.macro _vaes enc, last, key, data
+.if \enc
+.if \last
+ vaesenclast \key, \data, \data
+.else
+ vaesenc \key, \data, \data
+.endif
+.else
+.if \last
+ vaesdeclast \key, \data, \data
+.else
+ vaesdec \key, \data, \data
+.endif
+.endif
+.endm
+
+// Do a single round of AES en/decryption on the block(s) in \data, using the
+// same key for all block(s). The round key is loaded from the appropriate
+// register or memory location for round \i. May clobber V4.
+.macro _vaes_1x enc, last, i, xmm_suffix, data
+.if USE_AVX10
+ _vaes \enc, \last, KEY\i\xmm_suffix, \data
+.else
+.ifnb \xmm_suffix
+ _vaes \enc, \last, (\i-7)*16(KEY), \data
+.else
+ _vbroadcast128 (\i-7)*16(KEY), V4
+ _vaes \enc, \last, V4, \data
+.endif
+.endif
+.endm
+
+// Do a single round of AES en/decryption on the blocks in registers V0-V3,
+// using the same key for all blocks. The round key is loaded from the
+// appropriate register or memory location for round \i. In addition, does two
+// steps of the computation of the next set of tweaks. May clobber V4.
+.macro _vaes_4x enc, last, i
+.if USE_AVX10
+ _tweak_step (2*(\i-5))
+ _vaes \enc, \last, KEY\i, V0
+ _vaes \enc, \last, KEY\i, V1
+ _tweak_step (2*(\i-5) + 1)
+ _vaes \enc, \last, KEY\i, V2
+ _vaes \enc, \last, KEY\i, V3
+.else
+ _vbroadcast128 (\i-7)*16(KEY), V4
+ _tweak_step (2*(\i-5))
+ _vaes \enc, \last, V4, V0
+ _vaes \enc, \last, V4, V1
+ _tweak_step (2*(\i-5) + 1)
+ _vaes \enc, \last, V4, V2
+ _vaes \enc, \last, V4, V3
+.endif
+.endm
+
+// Do tweaked AES en/decryption (i.e., XOR with \tweak, then AES en/decrypt,
+// then XOR with \tweak again) of the block(s) in \data. To process a single
+// block, use xmm registers and set \xmm_suffix=_XMM. To process a vector of
+// length VL, use V* registers and leave \xmm_suffix empty. May clobber V4.
+.macro _aes_crypt enc, xmm_suffix, tweak, data
+ _xor3 KEY0\xmm_suffix, \tweak, \data
+ cmp $24, KEYLEN
+ jl .Laes128\@
+ je .Laes192\@
+ _vaes_1x \enc, 0, 1, \xmm_suffix, \data
+ _vaes_1x \enc, 0, 2, \xmm_suffix, \data
+.Laes192\@:
+ _vaes_1x \enc, 0, 3, \xmm_suffix, \data
+ _vaes_1x \enc, 0, 4, \xmm_suffix, \data
+.Laes128\@:
+ _vaes_1x \enc, 0, 5, \xmm_suffix, \data
+ _vaes_1x \enc, 0, 6, \xmm_suffix, \data
+ _vaes_1x \enc, 0, 7, \xmm_suffix, \data
+ _vaes_1x \enc, 0, 8, \xmm_suffix, \data
+ _vaes_1x \enc, 0, 9, \xmm_suffix, \data
+ _vaes_1x \enc, 0, 10, \xmm_suffix, \data
+ _vaes_1x \enc, 0, 11, \xmm_suffix, \data
+ _vaes_1x \enc, 0, 12, \xmm_suffix, \data
+ _vaes_1x \enc, 0, 13, \xmm_suffix, \data
+ _vaes_1x \enc, 1, 14, \xmm_suffix, \data
+ _vpxor \tweak, \data, \data
+.endm
+
+.macro _aes_xts_crypt enc
+ _define_aliases
+
+.if !\enc
+ // When decrypting a message whose length isn't a multiple of the AES
+ // block length, exclude the last full block from the main loop by
+ // subtracting 16 from LEN. This is needed because ciphertext stealing
+ // decryption uses the last two tweaks in reverse order. We'll handle
+ // the last full block and the partial block specially at the end.
+ lea -16(LEN), %eax
+ test $15, LEN8
+ cmovnz %eax, LEN
+.endif
+
+ // Load the AES key length: 16 (AES-128), 24 (AES-192), or 32 (AES-256).
+ movl 480(KEY), KEYLEN
+
+ // Setup the pointer to the round keys and cache as many as possible.
+ _setup_round_keys \enc
+
+ // Compute the first set of tweaks TWEAK[0-3].
+ _compute_first_set_of_tweaks
+
+ sub $4*VL, LEN
+ jl .Lhandle_remainder\@
+
+.Lmain_loop\@:
+ // This is the main loop, en/decrypting 4*VL bytes per iteration.
+
+ // XOR each source block with its tweak and the zero-th round key.
+.if USE_AVX10
+ vmovdqu8 0*VL(SRC), V0
+ vmovdqu8 1*VL(SRC), V1
+ vmovdqu8 2*VL(SRC), V2
+ vmovdqu8 3*VL(SRC), V3
+ vpternlogd $0x96, TWEAK0, KEY0, V0
+ vpternlogd $0x96, TWEAK1, KEY0, V1
+ vpternlogd $0x96, TWEAK2, KEY0, V2
+ vpternlogd $0x96, TWEAK3, KEY0, V3
+.else
+ vpxor 0*VL(SRC), KEY0, V0
+ vpxor 1*VL(SRC), KEY0, V1
+ vpxor 2*VL(SRC), KEY0, V2
+ vpxor 3*VL(SRC), KEY0, V3
+ vpxor TWEAK0, V0, V0
+ vpxor TWEAK1, V1, V1
+ vpxor TWEAK2, V2, V2
+ vpxor TWEAK3, V3, V3
+.endif
+ cmp $24, KEYLEN
+ jl .Laes128\@
+ je .Laes192\@
+ // Do all the AES rounds on the data blocks, interleaved with
+ // the computation of the next set of tweaks.
+ _vaes_4x \enc, 0, 1
+ _vaes_4x \enc, 0, 2
+.Laes192\@:
+ _vaes_4x \enc, 0, 3
+ _vaes_4x \enc, 0, 4
+.Laes128\@:
+ _vaes_4x \enc, 0, 5
+ _vaes_4x \enc, 0, 6
+ _vaes_4x \enc, 0, 7
+ _vaes_4x \enc, 0, 8
+ _vaes_4x \enc, 0, 9
+ _vaes_4x \enc, 0, 10
+ _vaes_4x \enc, 0, 11
+ _vaes_4x \enc, 0, 12
+ _vaes_4x \enc, 0, 13
+ _vaes_4x \enc, 1, 14
+
+ // XOR in the tweaks again.
+ _vpxor TWEAK0, V0, V0
+ _vpxor TWEAK1, V1, V1
+ _vpxor TWEAK2, V2, V2
+ _vpxor TWEAK3, V3, V3
+
+ // Store the destination blocks.
+ _vmovdqu V0, 0*VL(DST)
+ _vmovdqu V1, 1*VL(DST)
+ _vmovdqu V2, 2*VL(DST)
+ _vmovdqu V3, 3*VL(DST)
+
+ // Finish computing the next set of tweaks.
+ _tweak_step 1000
+
+ add $4*VL, SRC
+ add $4*VL, DST
+ sub $4*VL, LEN
+ jge .Lmain_loop\@
+
+ // Check for the uncommon case where the data length isn't a multiple of
+ // 4*VL. Handle it out-of-line in order to optimize for the common
+ // case. In the common case, just fall through to the ret.
+ test $4*VL-1, LEN8
+ jnz .Lhandle_remainder\@
+.Ldone\@:
+ // Store the next tweak back to *TWEAK to support continuation calls.
+ vmovdqu TWEAK0_XMM, (TWEAK)
+.if VL > 16
+ vzeroupper
+.endif
+ RET
+
+.Lhandle_remainder\@:
+
+ // En/decrypt any remaining full blocks, one vector at a time.
+.if VL > 16
+ add $3*VL, LEN // Undo extra sub of 4*VL, then sub VL.
+ jl .Lvec_at_a_time_done\@
+.Lvec_at_a_time\@:
+ _vmovdqu (SRC), V0
+ _aes_crypt \enc, , TWEAK0, V0
+ _vmovdqu V0, (DST)
+ _next_tweakvec TWEAK0, V0, V1, TWEAK0
+ add $VL, SRC
+ add $VL, DST
+ sub $VL, LEN
+ jge .Lvec_at_a_time\@
+.Lvec_at_a_time_done\@:
+ add $VL-16, LEN // Undo extra sub of VL, then sub 16.
+.else
+ add $4*VL-16, LEN // Undo extra sub of 4*VL, then sub 16.
+.endif
+
+ // En/decrypt any remaining full blocks, one at a time.
+ jl .Lblock_at_a_time_done\@
+.Lblock_at_a_time\@:
+ vmovdqu (SRC), %xmm0
+ _aes_crypt \enc, _XMM, TWEAK0_XMM, %xmm0
+ vmovdqu %xmm0, (DST)
+ _next_tweak TWEAK0_XMM, %xmm0, TWEAK0_XMM
+ add $16, SRC
+ add $16, DST
+ sub $16, LEN
+ jge .Lblock_at_a_time\@
+.Lblock_at_a_time_done\@:
+ add $16, LEN // Undo the extra sub of 16.
+ // Now 0 <= LEN <= 15. If LEN is zero, we're done.
+ jz .Ldone\@
+
+ // Otherwise 1 <= LEN <= 15, but the real remaining length is 16 + LEN.
+ // Do ciphertext stealing to process the last 16 + LEN bytes.
+
+.if \enc
+ // If encrypting, the main loop already encrypted the last full block to
+ // create the CTS intermediate ciphertext. Prepare for the rest of CTS
+ // by rewinding the pointers and loading the intermediate ciphertext.
+ sub $16, SRC
+ sub $16, DST
+ vmovdqu (DST), %xmm0
+.else
+ // If decrypting, the main loop didn't decrypt the last full block
+ // because CTS decryption uses the last two tweaks in reverse order.
+ // Do it now by advancing the tweak and decrypting the last full block.
+ _next_tweak TWEAK0_XMM, %xmm0, TWEAK1_XMM
+ vmovdqu (SRC), %xmm0
+ _aes_crypt \enc, _XMM, TWEAK1_XMM, %xmm0
+.endif
+
+.if USE_AVX10
+ // Create a mask that has the first LEN bits set.
+ mov $-1, %r9d
+ bzhi LEN, %r9d, %r9d
+ kmovd %r9d, %k1
+
+ // Swap the first LEN bytes of the en/decryption of the last full block
+ // with the partial block. Note that to support in-place en/decryption,
+ // the load from the src partial block must happen before the store to
+ // the dst partial block.
+ vmovdqa %xmm0, %xmm1
+ vmovdqu8 16(SRC), %xmm0{%k1}
+ vmovdqu8 %xmm1, 16(DST){%k1}
+.else
+ lea .Lcts_permute_table(%rip), %r9
+
+ // Load the src partial block, left-aligned. Note that to support
+ // in-place en/decryption, this must happen before the store to the dst
+ // partial block.
+ vmovdqu (SRC, LEN64, 1), %xmm1
+
+ // Shift the first LEN bytes of the en/decryption of the last full block
+ // to the end of a register, then store it to DST+LEN. This stores the
+ // dst partial block. It also writes to the second part of the dst last
+ // full block, but that part is overwritten later.
+ vpshufb (%r9, LEN64, 1), %xmm0, %xmm2
+ vmovdqu %xmm2, (DST, LEN64, 1)
+
+ // Make xmm3 contain [16-LEN,16-LEN+1,...,14,15,0x80,0x80,...].
+ sub LEN64, %r9
+ vmovdqu 32(%r9), %xmm3
+
+ // Shift the src partial block to the beginning of its register.
+ vpshufb %xmm3, %xmm1, %xmm1
+
+ // Do a blend to generate the src partial block followed by the second
+ // part of the en/decryption of the last full block.
+ vpblendvb %xmm3, %xmm0, %xmm1, %xmm0
+.endif
+ // En/decrypt again and store the last full block.
+ _aes_crypt \enc, _XMM, TWEAK0_XMM, %xmm0
+ vmovdqu %xmm0, (DST)
+ jmp .Ldone\@
+.endm
+
+// void aes_xts_encrypt_iv(const struct crypto_aes_ctx *tweak_key,
+// u8 iv[AES_BLOCK_SIZE]);
+SYM_TYPED_FUNC_START(aes_xts_encrypt_iv)
+ vmovdqu (%rsi), %xmm0
+ vpxor (%rdi), %xmm0, %xmm0
+ movl 480(%rdi), %eax // AES key length
+ lea -16(%rdi, %rax, 4), %rdi
+ cmp $24, %eax
+ jl .Lencrypt_iv_aes128
+ je .Lencrypt_iv_aes192
+ vaesenc -6*16(%rdi), %xmm0, %xmm0
+ vaesenc -5*16(%rdi), %xmm0, %xmm0
+.Lencrypt_iv_aes192:
+ vaesenc -4*16(%rdi), %xmm0, %xmm0
+ vaesenc -3*16(%rdi), %xmm0, %xmm0
+.Lencrypt_iv_aes128:
+ vaesenc -2*16(%rdi), %xmm0, %xmm0
+ vaesenc -1*16(%rdi), %xmm0, %xmm0
+ vaesenc 0*16(%rdi), %xmm0, %xmm0
+ vaesenc 1*16(%rdi), %xmm0, %xmm0
+ vaesenc 2*16(%rdi), %xmm0, %xmm0
+ vaesenc 3*16(%rdi), %xmm0, %xmm0
+ vaesenc 4*16(%rdi), %xmm0, %xmm0
+ vaesenc 5*16(%rdi), %xmm0, %xmm0
+ vaesenc 6*16(%rdi), %xmm0, %xmm0
+ vaesenclast 7*16(%rdi), %xmm0, %xmm0
+ vmovdqu %xmm0, (%rsi)
+ RET
+SYM_FUNC_END(aes_xts_encrypt_iv)
+
+// Below are the actual AES-XTS encryption and decryption functions,
+// instantiated from the above macro. They all have the following prototype:
+//
+// void (*xts_asm_func)(const struct crypto_aes_ctx *key,
+// const u8 *src, u8 *dst, unsigned int len,
+// u8 tweak[AES_BLOCK_SIZE]);
+//
+// |key| is the data key. |tweak| contains the next tweak; the encryption of
+// the original IV with the tweak key was already done. This function supports
+// incremental computation, but |len| must always be >= 16 (AES_BLOCK_SIZE), and
+// |len| must be a multiple of 16 except on the last call. If |len| is a
+// multiple of 16, then this function updates |tweak| to contain the next tweak.
+
+.set VL, 16
+.set USE_AVX10, 0
+SYM_TYPED_FUNC_START(aes_xts_encrypt_aesni_avx)
+ _aes_xts_crypt 1
+SYM_FUNC_END(aes_xts_encrypt_aesni_avx)
+SYM_TYPED_FUNC_START(aes_xts_decrypt_aesni_avx)
+ _aes_xts_crypt 0
+SYM_FUNC_END(aes_xts_decrypt_aesni_avx)
+
+#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ)
+.set VL, 32
+.set USE_AVX10, 0
+SYM_TYPED_FUNC_START(aes_xts_encrypt_vaes_avx2)
+ _aes_xts_crypt 1
+SYM_FUNC_END(aes_xts_encrypt_vaes_avx2)
+SYM_TYPED_FUNC_START(aes_xts_decrypt_vaes_avx2)
+ _aes_xts_crypt 0
+SYM_FUNC_END(aes_xts_decrypt_vaes_avx2)
+
+.set VL, 32
+.set USE_AVX10, 1
+SYM_TYPED_FUNC_START(aes_xts_encrypt_vaes_avx10_256)
+ _aes_xts_crypt 1
+SYM_FUNC_END(aes_xts_encrypt_vaes_avx10_256)
+SYM_TYPED_FUNC_START(aes_xts_decrypt_vaes_avx10_256)
+ _aes_xts_crypt 0
+SYM_FUNC_END(aes_xts_decrypt_vaes_avx10_256)
+
+.set VL, 64
+.set USE_AVX10, 1
+SYM_TYPED_FUNC_START(aes_xts_encrypt_vaes_avx10_512)
+ _aes_xts_crypt 1
+SYM_FUNC_END(aes_xts_encrypt_vaes_avx10_512)
+SYM_TYPED_FUNC_START(aes_xts_decrypt_vaes_avx10_512)
+ _aes_xts_crypt 0
+SYM_FUNC_END(aes_xts_decrypt_vaes_avx10_512)
+#endif /* CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ */
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 411d8c83e88a..39066b57a70e 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -83,9 +83,6 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
.text
-
-#define STACK_OFFSET 8*3
-
#define AadHash 16*0
#define AadLen 16*1
#define InLen (16*1)+8
@@ -116,11 +113,6 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
#define arg4 rcx
#define arg5 r8
#define arg6 r9
-#define arg7 STACK_OFFSET+8(%rsp)
-#define arg8 STACK_OFFSET+16(%rsp)
-#define arg9 STACK_OFFSET+24(%rsp)
-#define arg10 STACK_OFFSET+32(%rsp)
-#define arg11 STACK_OFFSET+40(%rsp)
#define keysize 2*15*16(%arg1)
#endif
@@ -1507,184 +1499,6 @@ _esb_loop_\@:
MOVADQ (%r10),\TMP1
aesenclast \TMP1,\XMM0
.endm
-/*****************************************************************************
-* void aesni_gcm_dec(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
-* struct gcm_context_data *data
-* // Context data
-* u8 *out, // Plaintext output. Encrypt in-place is allowed.
-* const u8 *in, // Ciphertext input
-* u64 plaintext_len, // Length of data in bytes for decryption.
-* u8 *iv, // Pre-counter block j0: 4 byte salt (from Security Association)
-* // concatenated with 8 byte Initialisation Vector (from IPSec ESP Payload)
-* // concatenated with 0x00000001. 16-byte aligned pointer.
-* u8 *hash_subkey, // H, the Hash sub key input. Data starts on a 16-byte boundary.
-* const u8 *aad, // Additional Authentication Data (AAD)
-* u64 aad_len, // Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 bytes
-* u8 *auth_tag, // Authenticated Tag output. The driver will compare this to the
-* // given authentication tag and only return the plaintext if they match.
-* u64 auth_tag_len); // Authenticated Tag Length in bytes. Valid values are 16
-* // (most likely), 12 or 8.
-*
-* Assumptions:
-*
-* keys:
-* keys are pre-expanded and aligned to 16 bytes. we are using the first
-* set of 11 keys in the data structure void *aes_ctx
-*
-* iv:
-* 0 1 2 3
-* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-* | Salt (From the SA) |
-* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-* | Initialization Vector |
-* | (This is the sequence number from IPSec header) |
-* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-* | 0x1 |
-* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-*
-*
-*
-* AAD:
-* AAD padded to 128 bits with 0
-* for example, assume AAD is a u32 vector
-*
-* if AAD is 8 bytes:
-* AAD[3] = {A0, A1};
-* padded AAD in xmm register = {A1 A0 0 0}
-*
-* 0 1 2 3
-* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-* | SPI (A1) |
-* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-* | 32-bit Sequence Number (A0) |
-* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-* | 0x0 |
-* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-*
-* AAD Format with 32-bit Sequence Number
-*
-* if AAD is 12 bytes:
-* AAD[3] = {A0, A1, A2};
-* padded AAD in xmm register = {A2 A1 A0 0}
-*
-* 0 1 2 3
-* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-* | SPI (A2) |
-* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-* | 64-bit Extended Sequence Number {A1,A0} |
-* | |
-* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-* | 0x0 |
-* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-*
-* AAD Format with 64-bit Extended Sequence Number
-*
-* poly = x^128 + x^127 + x^126 + x^121 + 1
-*
-*****************************************************************************/
-SYM_FUNC_START(aesni_gcm_dec)
- FUNC_SAVE
-
- GCM_INIT %arg6, arg7, arg8, arg9
- GCM_ENC_DEC dec
- GCM_COMPLETE arg10, arg11
- FUNC_RESTORE
- RET
-SYM_FUNC_END(aesni_gcm_dec)
-
-
-/*****************************************************************************
-* void aesni_gcm_enc(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
-* struct gcm_context_data *data
-* // Context data
-* u8 *out, // Ciphertext output. Encrypt in-place is allowed.
-* const u8 *in, // Plaintext input
-* u64 plaintext_len, // Length of data in bytes for encryption.
-* u8 *iv, // Pre-counter block j0: 4 byte salt (from Security Association)
-* // concatenated with 8 byte Initialisation Vector (from IPSec ESP Payload)
-* // concatenated with 0x00000001. 16-byte aligned pointer.
-* u8 *hash_subkey, // H, the Hash sub key input. Data starts on a 16-byte boundary.
-* const u8 *aad, // Additional Authentication Data (AAD)
-* u64 aad_len, // Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 bytes
-* u8 *auth_tag, // Authenticated Tag output.
-* u64 auth_tag_len); // Authenticated Tag Length in bytes. Valid values are 16 (most likely),
-* // 12 or 8.
-*
-* Assumptions:
-*
-* keys:
-* keys are pre-expanded and aligned to 16 bytes. we are using the
-* first set of 11 keys in the data structure void *aes_ctx
-*
-*
-* iv:
-* 0 1 2 3
-* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-* | Salt (From the SA) |
-* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-* | Initialization Vector |
-* | (This is the sequence number from IPSec header) |
-* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-* | 0x1 |
-* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-*
-*
-*
-* AAD:
-* AAD padded to 128 bits with 0
-* for example, assume AAD is a u32 vector
-*
-* if AAD is 8 bytes:
-* AAD[3] = {A0, A1};
-* padded AAD in xmm register = {A1 A0 0 0}
-*
-* 0 1 2 3
-* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-* | SPI (A1) |
-* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-* | 32-bit Sequence Number (A0) |
-* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-* | 0x0 |
-* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-*
-* AAD Format with 32-bit Sequence Number
-*
-* if AAD is 12 bytes:
-* AAD[3] = {A0, A1, A2};
-* padded AAD in xmm register = {A2 A1 A0 0}
-*
-* 0 1 2 3
-* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-* | SPI (A2) |
-* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-* | 64-bit Extended Sequence Number {A1,A0} |
-* | |
-* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-* | 0x0 |
-* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-*
-* AAD Format with 64-bit Extended Sequence Number
-*
-* poly = x^128 + x^127 + x^126 + x^121 + 1
-***************************************************************************/
-SYM_FUNC_START(aesni_gcm_enc)
- FUNC_SAVE
-
- GCM_INIT %arg6, arg7, arg8, arg9
- GCM_ENC_DEC enc
-
- GCM_COMPLETE arg10, arg11
- FUNC_RESTORE
- RET
-SYM_FUNC_END(aesni_gcm_enc)
/*****************************************************************************
* void aesni_gcm_init(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
@@ -1820,8 +1634,8 @@ SYM_FUNC_START_LOCAL(_key_expansion_256b)
SYM_FUNC_END(_key_expansion_256b)
/*
- * int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
- * unsigned int key_len)
+ * void aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
+ * unsigned int key_len)
*/
SYM_FUNC_START(aesni_set_key)
FRAME_BEGIN
@@ -1926,7 +1740,6 @@ SYM_FUNC_START(aesni_set_key)
sub $0x10, UKEYP
cmp TKEYP, KEYP
jb .Ldec_key_loop
- xor AREG, AREG
#ifndef __x86_64__
popl KEYP
#endif
@@ -2826,28 +2639,24 @@ SYM_FUNC_END(aesni_ctr_enc)
.previous
/*
- * _aesni_gf128mul_x_ble: internal ABI
- * Multiply in GF(2^128) for XTS IVs
+ * _aesni_gf128mul_x_ble: Multiply in GF(2^128) for XTS IVs
* input:
* IV: current IV
* GF128MUL_MASK == mask with 0x87 and 0x01
* output:
* IV: next IV
* changed:
- * CTR: == temporary value
+ * KEY: == temporary value
*/
-#define _aesni_gf128mul_x_ble() \
- pshufd $0x13, IV, KEY; \
- paddq IV, IV; \
- psrad $31, KEY; \
- pand GF128MUL_MASK, KEY; \
- pxor KEY, IV;
+.macro _aesni_gf128mul_x_ble
+ pshufd $0x13, IV, KEY
+ paddq IV, IV
+ psrad $31, KEY
+ pand GF128MUL_MASK, KEY
+ pxor KEY, IV
+.endm
-/*
- * void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst,
- * const u8 *src, unsigned int len, le128 *iv)
- */
-SYM_FUNC_START(aesni_xts_encrypt)
+.macro _aesni_xts_crypt enc
FRAME_BEGIN
#ifndef __x86_64__
pushl IVP
@@ -2866,35 +2675,46 @@ SYM_FUNC_START(aesni_xts_encrypt)
movups (IVP), IV
mov 480(KEYP), KLEN
+.if !\enc
+ add $240, KEYP
-.Lxts_enc_loop4:
+ test $15, LEN
+ jz .Lxts_loop4\@
+ sub $16, LEN
+.endif
+
+.Lxts_loop4\@:
sub $64, LEN
- jl .Lxts_enc_1x
+ jl .Lxts_1x\@
movdqa IV, STATE1
movdqu 0x00(INP), IN
pxor IN, STATE1
movdqu IV, 0x00(OUTP)
- _aesni_gf128mul_x_ble()
+ _aesni_gf128mul_x_ble
movdqa IV, STATE2
movdqu 0x10(INP), IN
pxor IN, STATE2
movdqu IV, 0x10(OUTP)
- _aesni_gf128mul_x_ble()
+ _aesni_gf128mul_x_ble
movdqa IV, STATE3
movdqu 0x20(INP), IN
pxor IN, STATE3
movdqu IV, 0x20(OUTP)
- _aesni_gf128mul_x_ble()
+ _aesni_gf128mul_x_ble
movdqa IV, STATE4
movdqu 0x30(INP), IN
pxor IN, STATE4
movdqu IV, 0x30(OUTP)
+.if \enc
call _aesni_enc4
+.else
+ call _aesni_dec4
+.endif
movdqu 0x00(OUTP), IN
pxor IN, STATE1
@@ -2912,17 +2732,17 @@ SYM_FUNC_START(aesni_xts_encrypt)
pxor IN, STATE4
movdqu STATE4, 0x30(OUTP)
- _aesni_gf128mul_x_ble()
+ _aesni_gf128mul_x_ble
add $64, INP
add $64, OUTP
test LEN, LEN
- jnz .Lxts_enc_loop4
+ jnz .Lxts_loop4\@
-.Lxts_enc_ret_iv:
+.Lxts_ret_iv\@:
movups IV, (IVP)
-.Lxts_enc_ret:
+.Lxts_ret\@:
#ifndef __x86_64__
popl KLEN
popl KEYP
@@ -2932,201 +2752,60 @@ SYM_FUNC_START(aesni_xts_encrypt)
FRAME_END
RET
-.Lxts_enc_1x:
+.Lxts_1x\@:
add $64, LEN
- jz .Lxts_enc_ret_iv
+ jz .Lxts_ret_iv\@
+.if \enc
sub $16, LEN
- jl .Lxts_enc_cts4
+ jl .Lxts_cts4\@
+.endif
-.Lxts_enc_loop1:
+.Lxts_loop1\@:
movdqu (INP), STATE
+.if \enc
pxor IV, STATE
call _aesni_enc1
- pxor IV, STATE
- _aesni_gf128mul_x_ble()
-
- test LEN, LEN
- jz .Lxts_enc_out
-
+.else
add $16, INP
sub $16, LEN
- jl .Lxts_enc_cts1
-
- movdqu STATE, (OUTP)
- add $16, OUTP
- jmp .Lxts_enc_loop1
-
-.Lxts_enc_out:
- movdqu STATE, (OUTP)
- jmp .Lxts_enc_ret_iv
-
-.Lxts_enc_cts4:
- movdqa STATE4, STATE
- sub $16, OUTP
-
-.Lxts_enc_cts1:
-#ifndef __x86_64__
- lea .Lcts_permute_table, T1
-#else
- lea .Lcts_permute_table(%rip), T1
-#endif
- add LEN, INP /* rewind input pointer */
- add $16, LEN /* # bytes in final block */
- movups (INP), IN1
-
- mov T1, IVP
- add $32, IVP
- add LEN, T1
- sub LEN, IVP
- add OUTP, LEN
-
- movups (T1), %xmm4
- movaps STATE, IN2
- pshufb %xmm4, STATE
- movups STATE, (LEN)
-
- movups (IVP), %xmm0
- pshufb %xmm0, IN1
- pblendvb IN2, IN1
- movaps IN1, STATE
-
+ jl .Lxts_cts1\@
pxor IV, STATE
- call _aesni_enc1
+ call _aesni_dec1
+.endif
pxor IV, STATE
+ _aesni_gf128mul_x_ble
- movups STATE, (OUTP)
- jmp .Lxts_enc_ret
-SYM_FUNC_END(aesni_xts_encrypt)
-
-/*
- * void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst,
- * const u8 *src, unsigned int len, le128 *iv)
- */
-SYM_FUNC_START(aesni_xts_decrypt)
- FRAME_BEGIN
-#ifndef __x86_64__
- pushl IVP
- pushl LEN
- pushl KEYP
- pushl KLEN
- movl (FRAME_OFFSET+20)(%esp), KEYP # ctx
- movl (FRAME_OFFSET+24)(%esp), OUTP # dst
- movl (FRAME_OFFSET+28)(%esp), INP # src
- movl (FRAME_OFFSET+32)(%esp), LEN # len
- movl (FRAME_OFFSET+36)(%esp), IVP # iv
- movdqa .Lgf128mul_x_ble_mask, GF128MUL_MASK
-#else
- movdqa .Lgf128mul_x_ble_mask(%rip), GF128MUL_MASK
-#endif
- movups (IVP), IV
-
- mov 480(KEYP), KLEN
- add $240, KEYP
-
- test $15, LEN
- jz .Lxts_dec_loop4
- sub $16, LEN
-
-.Lxts_dec_loop4:
- sub $64, LEN
- jl .Lxts_dec_1x
-
- movdqa IV, STATE1
- movdqu 0x00(INP), IN
- pxor IN, STATE1
- movdqu IV, 0x00(OUTP)
-
- _aesni_gf128mul_x_ble()
- movdqa IV, STATE2
- movdqu 0x10(INP), IN
- pxor IN, STATE2
- movdqu IV, 0x10(OUTP)
-
- _aesni_gf128mul_x_ble()
- movdqa IV, STATE3
- movdqu 0x20(INP), IN
- pxor IN, STATE3
- movdqu IV, 0x20(OUTP)
-
- _aesni_gf128mul_x_ble()
- movdqa IV, STATE4
- movdqu 0x30(INP), IN
- pxor IN, STATE4
- movdqu IV, 0x30(OUTP)
-
- call _aesni_dec4
-
- movdqu 0x00(OUTP), IN
- pxor IN, STATE1
- movdqu STATE1, 0x00(OUTP)
-
- movdqu 0x10(OUTP), IN
- pxor IN, STATE2
- movdqu STATE2, 0x10(OUTP)
-
- movdqu 0x20(OUTP), IN
- pxor IN, STATE3
- movdqu STATE3, 0x20(OUTP)
-
- movdqu 0x30(OUTP), IN
- pxor IN, STATE4
- movdqu STATE4, 0x30(OUTP)
-
- _aesni_gf128mul_x_ble()
-
- add $64, INP
- add $64, OUTP
test LEN, LEN
- jnz .Lxts_dec_loop4
-
-.Lxts_dec_ret_iv:
- movups IV, (IVP)
-
-.Lxts_dec_ret:
-#ifndef __x86_64__
- popl KLEN
- popl KEYP
- popl LEN
- popl IVP
-#endif
- FRAME_END
- RET
-
-.Lxts_dec_1x:
- add $64, LEN
- jz .Lxts_dec_ret_iv
-
-.Lxts_dec_loop1:
- movdqu (INP), STATE
+ jz .Lxts_out\@
+.if \enc
add $16, INP
sub $16, LEN
- jl .Lxts_dec_cts1
-
- pxor IV, STATE
- call _aesni_dec1
- pxor IV, STATE
- _aesni_gf128mul_x_ble()
-
- test LEN, LEN
- jz .Lxts_dec_out
+ jl .Lxts_cts1\@
+.endif
movdqu STATE, (OUTP)
add $16, OUTP
- jmp .Lxts_dec_loop1
+ jmp .Lxts_loop1\@
-.Lxts_dec_out:
+.Lxts_out\@:
movdqu STATE, (OUTP)
- jmp .Lxts_dec_ret_iv
+ jmp .Lxts_ret_iv\@
-.Lxts_dec_cts1:
+.if \enc
+.Lxts_cts4\@:
+ movdqa STATE4, STATE
+ sub $16, OUTP
+.Lxts_cts1\@:
+.else
+.Lxts_cts1\@:
movdqa IV, STATE4
- _aesni_gf128mul_x_ble()
+ _aesni_gf128mul_x_ble
pxor IV, STATE
call _aesni_dec1
pxor IV, STATE
-
+.endif
#ifndef __x86_64__
lea .Lcts_permute_table, T1
#else
@@ -3152,10 +2831,32 @@ SYM_FUNC_START(aesni_xts_decrypt)
pblendvb IN2, IN1
movaps IN1, STATE
+.if \enc
+ pxor IV, STATE
+ call _aesni_enc1
+ pxor IV, STATE
+.else
pxor STATE4, STATE
call _aesni_dec1
pxor STATE4, STATE
+.endif
movups STATE, (OUTP)
- jmp .Lxts_dec_ret
-SYM_FUNC_END(aesni_xts_decrypt)
+ jmp .Lxts_ret\@
+.endm
+
+/*
+ * void aesni_xts_enc(const struct crypto_aes_ctx *ctx, u8 *dst,
+ * const u8 *src, unsigned int len, le128 *iv)
+ */
+SYM_FUNC_START(aesni_xts_enc)
+ _aesni_xts_crypt 1
+SYM_FUNC_END(aesni_xts_enc)
+
+/*
+ * void aesni_xts_dec(const struct crypto_aes_ctx *ctx, u8 *dst,
+ * const u8 *src, unsigned int len, le128 *iv)
+ */
+SYM_FUNC_START(aesni_xts_dec)
+ _aesni_xts_crypt 0
+SYM_FUNC_END(aesni_xts_dec)
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index b1d90c25975a..5b25d2a58aeb 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -40,7 +40,6 @@
#define AESNI_ALIGN 16
#define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE - 1))
-#define RFC4106_HASH_SUBKEY_SIZE 16
#define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
#define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
#define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
@@ -87,8 +86,8 @@ static inline void *aes_align_addr(void *addr)
return PTR_ALIGN(addr, AESNI_ALIGN);
}
-asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
- unsigned int key_len);
+asmlinkage void aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
+ unsigned int key_len);
asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in);
asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in);
asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
@@ -107,11 +106,11 @@ asmlinkage void aesni_cts_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
#define AVX_GEN2_OPTSIZE 640
#define AVX_GEN4_OPTSIZE 4096
-asmlinkage void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out,
- const u8 *in, unsigned int len, u8 *iv);
+asmlinkage void aesni_xts_enc(const struct crypto_aes_ctx *ctx, u8 *out,
+ const u8 *in, unsigned int len, u8 *iv);
-asmlinkage void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out,
- const u8 *in, unsigned int len, u8 *iv);
+asmlinkage void aesni_xts_dec(const struct crypto_aes_ctx *ctx, u8 *out,
+ const u8 *in, unsigned int len, u8 *iv);
#ifdef CONFIG_X86_64
@@ -233,19 +232,17 @@ static int aes_set_key_common(struct crypto_aes_ctx *ctx,
{
int err;
- if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
- key_len != AES_KEYSIZE_256)
- return -EINVAL;
-
if (!crypto_simd_usable())
- err = aes_expandkey(ctx, in_key, key_len);
- else {
- kernel_fpu_begin();
- err = aesni_set_key(ctx, in_key, key_len);
- kernel_fpu_end();
- }
+ return aes_expandkey(ctx, in_key, key_len);
- return err;
+ err = aes_check_keylen(key_len);
+ if (err)
+ return err;
+
+ kernel_fpu_begin();
+ aesni_set_key(ctx, in_key, key_len);
+ kernel_fpu_end();
+ return 0;
}
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
@@ -592,23 +589,12 @@ static int xctr_crypt(struct skcipher_request *req)
return err;
}
-static int
-rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
+static int aes_gcm_derive_hash_subkey(const struct crypto_aes_ctx *aes_key,
+ u8 hash_subkey[AES_BLOCK_SIZE])
{
- struct crypto_aes_ctx ctx;
- int ret;
-
- ret = aes_expandkey(&ctx, key, key_len);
- if (ret)
- return ret;
-
- /* Clear the data in the hash sub key container to zero.*/
- /* We want to cipher all zeros to create the hash sub key. */
- memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
-
- aes_encrypt(&ctx, hash_subkey, hash_subkey);
+ static const u8 zeroes[AES_BLOCK_SIZE];
- memzero_explicit(&ctx, sizeof(ctx));
+ aes_encrypt(aes_key, hash_subkey, zeroes);
return 0;
}
@@ -626,7 +612,8 @@ static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
return aes_set_key_common(&ctx->aes_key_expanded, key, key_len) ?:
- rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
+ aes_gcm_derive_hash_subkey(&ctx->aes_key_expanded,
+ ctx->hash_subkey);
}
/* This is the Integrity Check Value (aka the authentication tag) length and can
@@ -877,7 +864,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
}
#endif
-static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
+static int xts_setkey_aesni(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct aesni_xts_ctx *ctx = aes_xts_ctx(tfm);
@@ -898,108 +885,149 @@ static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
return aes_set_key_common(&ctx->tweak_ctx, key + keylen, keylen);
}
-static int xts_crypt(struct skcipher_request *req, bool encrypt)
+typedef void (*xts_encrypt_iv_func)(const struct crypto_aes_ctx *tweak_key,
+ u8 iv[AES_BLOCK_SIZE]);
+typedef void (*xts_crypt_func)(const struct crypto_aes_ctx *key,
+ const u8 *src, u8 *dst, unsigned int len,
+ u8 tweak[AES_BLOCK_SIZE]);
+
+/* This handles cases where the source and/or destination span pages. */
+static noinline int
+xts_crypt_slowpath(struct skcipher_request *req, xts_crypt_func crypt_func)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct aesni_xts_ctx *ctx = aes_xts_ctx(tfm);
+ const struct aesni_xts_ctx *ctx = aes_xts_ctx(tfm);
int tail = req->cryptlen % AES_BLOCK_SIZE;
+ struct scatterlist sg_src[2], sg_dst[2];
struct skcipher_request subreq;
struct skcipher_walk walk;
+ struct scatterlist *src, *dst;
int err;
- if (req->cryptlen < AES_BLOCK_SIZE)
- return -EINVAL;
-
- err = skcipher_walk_virt(&walk, req, false);
- if (!walk.nbytes)
- return err;
-
- if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
- int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
-
- skcipher_walk_abort(&walk);
-
+ /*
+ * If the message length isn't divisible by the AES block size, then
+ * separate off the last full block and the partial block. This ensures
+ * that they are processed in the same call to the assembly function,
+ * which is required for ciphertext stealing.
+ */
+ if (tail) {
skcipher_request_set_tfm(&subreq, tfm);
skcipher_request_set_callback(&subreq,
skcipher_request_flags(req),
NULL, NULL);
skcipher_request_set_crypt(&subreq, req->src, req->dst,
- blocks * AES_BLOCK_SIZE, req->iv);
+ req->cryptlen - tail - AES_BLOCK_SIZE,
+ req->iv);
req = &subreq;
+ }
- err = skcipher_walk_virt(&walk, req, false);
- if (!walk.nbytes)
- return err;
- } else {
- tail = 0;
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while (walk.nbytes) {
+ kernel_fpu_begin();
+ (*crypt_func)(&ctx->crypt_ctx,
+ walk.src.virt.addr, walk.dst.virt.addr,
+ walk.nbytes & ~(AES_BLOCK_SIZE - 1), req->iv);
+ kernel_fpu_end();
+ err = skcipher_walk_done(&walk,
+ walk.nbytes & (AES_BLOCK_SIZE - 1));
}
- kernel_fpu_begin();
+ if (err || !tail)
+ return err;
- /* calculate first value of T */
- aesni_enc(&ctx->tweak_ctx, walk.iv, walk.iv);
+ /* Do ciphertext stealing with the last full block and partial block. */
- while (walk.nbytes > 0) {
- int nbytes = walk.nbytes;
-
- if (nbytes < walk.total)
- nbytes &= ~(AES_BLOCK_SIZE - 1);
-
- if (encrypt)
- aesni_xts_encrypt(&ctx->crypt_ctx,
- walk.dst.virt.addr, walk.src.virt.addr,
- nbytes, walk.iv);
- else
- aesni_xts_decrypt(&ctx->crypt_ctx,
- walk.dst.virt.addr, walk.src.virt.addr,
- nbytes, walk.iv);
- kernel_fpu_end();
+ dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
+ if (req->dst != req->src)
+ dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
+ req->iv);
- if (walk.nbytes > 0)
- kernel_fpu_begin();
- }
+ err = skcipher_walk_virt(&walk, req, false);
+ if (err)
+ return err;
- if (unlikely(tail > 0 && !err)) {
- struct scatterlist sg_src[2], sg_dst[2];
- struct scatterlist *src, *dst;
+ kernel_fpu_begin();
+ (*crypt_func)(&ctx->crypt_ctx, walk.src.virt.addr, walk.dst.virt.addr,
+ walk.nbytes, req->iv);
+ kernel_fpu_end();
- dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
- if (req->dst != req->src)
- dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
+ return skcipher_walk_done(&walk, 0);
+}
- skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
- req->iv);
+/* __always_inline to avoid indirect call in fastpath */
+static __always_inline int
+xts_crypt(struct skcipher_request *req, xts_encrypt_iv_func encrypt_iv,
+ xts_crypt_func crypt_func)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct aesni_xts_ctx *ctx = aes_xts_ctx(tfm);
+ const unsigned int cryptlen = req->cryptlen;
+ struct scatterlist *src = req->src;
+ struct scatterlist *dst = req->dst;
- err = skcipher_walk_virt(&walk, &subreq, false);
- if (err)
- return err;
+ if (unlikely(cryptlen < AES_BLOCK_SIZE))
+ return -EINVAL;
- kernel_fpu_begin();
- if (encrypt)
- aesni_xts_encrypt(&ctx->crypt_ctx,
- walk.dst.virt.addr, walk.src.virt.addr,
- walk.nbytes, walk.iv);
- else
- aesni_xts_decrypt(&ctx->crypt_ctx,
- walk.dst.virt.addr, walk.src.virt.addr,
- walk.nbytes, walk.iv);
- kernel_fpu_end();
+ kernel_fpu_begin();
+ (*encrypt_iv)(&ctx->tweak_ctx, req->iv);
- err = skcipher_walk_done(&walk, 0);
+ /*
+ * In practice, virtually all XTS plaintexts and ciphertexts are either
+ * 512 or 4096 bytes, aligned such that they don't span page boundaries.
+ * To optimize the performance of these cases, and also any other case
+ * where no page boundary is spanned, the below fast-path handles
+ * single-page sources and destinations as efficiently as possible.
+ */
+ if (likely(src->length >= cryptlen && dst->length >= cryptlen &&
+ src->offset + cryptlen <= PAGE_SIZE &&
+ dst->offset + cryptlen <= PAGE_SIZE)) {
+ struct page *src_page = sg_page(src);
+ struct page *dst_page = sg_page(dst);
+ void *src_virt = kmap_local_page(src_page) + src->offset;
+ void *dst_virt = kmap_local_page(dst_page) + dst->offset;
+
+ (*crypt_func)(&ctx->crypt_ctx, src_virt, dst_virt, cryptlen,
+ req->iv);
+ kunmap_local(dst_virt);
+ kunmap_local(src_virt);
+ kernel_fpu_end();
+ return 0;
}
- return err;
+ kernel_fpu_end();
+ return xts_crypt_slowpath(req, crypt_func);
+}
+
+static void aesni_xts_encrypt_iv(const struct crypto_aes_ctx *tweak_key,
+ u8 iv[AES_BLOCK_SIZE])
+{
+ aesni_enc(tweak_key, iv, iv);
+}
+
+static void aesni_xts_encrypt(const struct crypto_aes_ctx *key,
+ const u8 *src, u8 *dst, unsigned int len,
+ u8 tweak[AES_BLOCK_SIZE])
+{
+ aesni_xts_enc(key, dst, src, len, tweak);
}
-static int xts_encrypt(struct skcipher_request *req)
+static void aesni_xts_decrypt(const struct crypto_aes_ctx *key,
+ const u8 *src, u8 *dst, unsigned int len,
+ u8 tweak[AES_BLOCK_SIZE])
{
- return xts_crypt(req, true);
+ aesni_xts_dec(key, dst, src, len, tweak);
}
-static int xts_decrypt(struct skcipher_request *req)
+static int xts_encrypt_aesni(struct skcipher_request *req)
{
- return xts_crypt(req, false);
+ return xts_crypt(req, aesni_xts_encrypt_iv, aesni_xts_encrypt);
+}
+
+static int xts_decrypt_aesni(struct skcipher_request *req)
+{
+ return xts_crypt(req, aesni_xts_encrypt_iv, aesni_xts_decrypt);
}
static struct crypto_alg aesni_cipher_alg = {
@@ -1103,9 +1131,9 @@ static struct skcipher_alg aesni_skciphers[] = {
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.walksize = 2 * AES_BLOCK_SIZE,
- .setkey = xts_aesni_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
+ .setkey = xts_setkey_aesni,
+ .encrypt = xts_encrypt_aesni,
+ .decrypt = xts_decrypt_aesni,
}
};
@@ -1137,7 +1165,149 @@ static struct skcipher_alg aesni_xctr = {
};
static struct simd_skcipher_alg *aesni_simd_xctr;
-#endif /* CONFIG_X86_64 */
+
+asmlinkage void aes_xts_encrypt_iv(const struct crypto_aes_ctx *tweak_key,
+ u8 iv[AES_BLOCK_SIZE]);
+
+#define DEFINE_XTS_ALG(suffix, driver_name, priority) \
+ \
+asmlinkage void \
+aes_xts_encrypt_##suffix(const struct crypto_aes_ctx *key, const u8 *src, \
+ u8 *dst, unsigned int len, u8 tweak[AES_BLOCK_SIZE]); \
+asmlinkage void \
+aes_xts_decrypt_##suffix(const struct crypto_aes_ctx *key, const u8 *src, \
+ u8 *dst, unsigned int len, u8 tweak[AES_BLOCK_SIZE]); \
+ \
+static int xts_encrypt_##suffix(struct skcipher_request *req) \
+{ \
+ return xts_crypt(req, aes_xts_encrypt_iv, aes_xts_encrypt_##suffix); \
+} \
+ \
+static int xts_decrypt_##suffix(struct skcipher_request *req) \
+{ \
+ return xts_crypt(req, aes_xts_encrypt_iv, aes_xts_decrypt_##suffix); \
+} \
+ \
+static struct skcipher_alg aes_xts_alg_##suffix = { \
+ .base = { \
+ .cra_name = "__xts(aes)", \
+ .cra_driver_name = "__" driver_name, \
+ .cra_priority = priority, \
+ .cra_flags = CRYPTO_ALG_INTERNAL, \
+ .cra_blocksize = AES_BLOCK_SIZE, \
+ .cra_ctxsize = XTS_AES_CTX_SIZE, \
+ .cra_module = THIS_MODULE, \
+ }, \
+ .min_keysize = 2 * AES_MIN_KEY_SIZE, \
+ .max_keysize = 2 * AES_MAX_KEY_SIZE, \
+ .ivsize = AES_BLOCK_SIZE, \
+ .walksize = 2 * AES_BLOCK_SIZE, \
+ .setkey = xts_setkey_aesni, \
+ .encrypt = xts_encrypt_##suffix, \
+ .decrypt = xts_decrypt_##suffix, \
+}; \
+ \
+static struct simd_skcipher_alg *aes_xts_simdalg_##suffix
+
+DEFINE_XTS_ALG(aesni_avx, "xts-aes-aesni-avx", 500);
+#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ)
+DEFINE_XTS_ALG(vaes_avx2, "xts-aes-vaes-avx2", 600);
+DEFINE_XTS_ALG(vaes_avx10_256, "xts-aes-vaes-avx10_256", 700);
+DEFINE_XTS_ALG(vaes_avx10_512, "xts-aes-vaes-avx10_512", 800);
+#endif
+
+/*
+ * This is a list of CPU models that are known to suffer from downclocking when
+ * zmm registers (512-bit vectors) are used. On these CPUs, the AES-XTS
+ * implementation with zmm registers won't be used by default. An
+ * implementation with ymm registers (256-bit vectors) will be used instead.
+ */
+static const struct x86_cpu_id zmm_exclusion_list[] = {
+ { .vendor = X86_VENDOR_INTEL, .family = 6, .model = INTEL_FAM6_SKYLAKE_X },
+ { .vendor = X86_VENDOR_INTEL, .family = 6, .model = INTEL_FAM6_ICELAKE_X },
+ { .vendor = X86_VENDOR_INTEL, .family = 6, .model = INTEL_FAM6_ICELAKE_D },
+ { .vendor = X86_VENDOR_INTEL, .family = 6, .model = INTEL_FAM6_ICELAKE },
+ { .vendor = X86_VENDOR_INTEL, .family = 6, .model = INTEL_FAM6_ICELAKE_L },
+ { .vendor = X86_VENDOR_INTEL, .family = 6, .model = INTEL_FAM6_ICELAKE_NNPI },
+ { .vendor = X86_VENDOR_INTEL, .family = 6, .model = INTEL_FAM6_TIGERLAKE_L },
+ { .vendor = X86_VENDOR_INTEL, .family = 6, .model = INTEL_FAM6_TIGERLAKE },
+ /* Allow Rocket Lake and later, and Sapphire Rapids and later. */
+ /* Also allow AMD CPUs (starting with Zen 4, the first with AVX-512). */
+ {},
+};
+
+static int __init register_xts_algs(void)
+{
+ int err;
+
+ if (!boot_cpu_has(X86_FEATURE_AVX))
+ return 0;
+ err = simd_register_skciphers_compat(&aes_xts_alg_aesni_avx, 1,
+ &aes_xts_simdalg_aesni_avx);
+ if (err)
+ return err;
+#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ)
+ if (!boot_cpu_has(X86_FEATURE_AVX2) ||
+ !boot_cpu_has(X86_FEATURE_VAES) ||
+ !boot_cpu_has(X86_FEATURE_VPCLMULQDQ) ||
+ !boot_cpu_has(X86_FEATURE_PCLMULQDQ) ||
+ !cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
+ return 0;
+ err = simd_register_skciphers_compat(&aes_xts_alg_vaes_avx2, 1,
+ &aes_xts_simdalg_vaes_avx2);
+ if (err)
+ return err;
+
+ if (!boot_cpu_has(X86_FEATURE_AVX512BW) ||
+ !boot_cpu_has(X86_FEATURE_AVX512VL) ||
+ !boot_cpu_has(X86_FEATURE_BMI2) ||
+ !cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM |
+ XFEATURE_MASK_AVX512, NULL))
+ return 0;
+
+ err = simd_register_skciphers_compat(&aes_xts_alg_vaes_avx10_256, 1,
+ &aes_xts_simdalg_vaes_avx10_256);
+ if (err)
+ return err;
+
+ if (x86_match_cpu(zmm_exclusion_list))
+ aes_xts_alg_vaes_avx10_512.base.cra_priority = 1;
+
+ err = simd_register_skciphers_compat(&aes_xts_alg_vaes_avx10_512, 1,
+ &aes_xts_simdalg_vaes_avx10_512);
+ if (err)
+ return err;
+#endif /* CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ */
+ return 0;
+}
+
+static void unregister_xts_algs(void)
+{
+ if (aes_xts_simdalg_aesni_avx)
+ simd_unregister_skciphers(&aes_xts_alg_aesni_avx, 1,
+ &aes_xts_simdalg_aesni_avx);
+#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ)
+ if (aes_xts_simdalg_vaes_avx2)
+ simd_unregister_skciphers(&aes_xts_alg_vaes_avx2, 1,
+ &aes_xts_simdalg_vaes_avx2);
+ if (aes_xts_simdalg_vaes_avx10_256)
+ simd_unregister_skciphers(&aes_xts_alg_vaes_avx10_256, 1,
+ &aes_xts_simdalg_vaes_avx10_256);
+ if (aes_xts_simdalg_vaes_avx10_512)
+ simd_unregister_skciphers(&aes_xts_alg_vaes_avx10_512, 1,
+ &aes_xts_simdalg_vaes_avx10_512);
+#endif
+}
+#else /* CONFIG_X86_64 */
+static int __init register_xts_algs(void)
+{
+ return 0;
+}
+
+static void unregister_xts_algs(void)
+{
+}
+#endif /* !CONFIG_X86_64 */
#ifdef CONFIG_X86_64
static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
@@ -1146,7 +1316,8 @@ static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
return aes_set_key_common(&ctx->aes_key_expanded, key, key_len) ?:
- rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
+ aes_gcm_derive_hash_subkey(&ctx->aes_key_expanded,
+ ctx->hash_subkey);
}
static int generic_gcmaes_encrypt(struct aead_request *req)
@@ -1276,13 +1447,21 @@ static int __init aesni_init(void)
goto unregister_aeads;
#endif /* CONFIG_X86_64 */
+ err = register_xts_algs();
+ if (err)
+ goto unregister_xts;
+
return 0;
+unregister_xts:
+ unregister_xts_algs();
#ifdef CONFIG_X86_64
+ if (aesni_simd_xctr)
+ simd_unregister_skciphers(&aesni_xctr, 1, &aesni_simd_xctr);
unregister_aeads:
+#endif /* CONFIG_X86_64 */
simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
aesni_simd_aeads);
-#endif /* CONFIG_X86_64 */
unregister_skciphers:
simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
@@ -1303,6 +1482,7 @@ static void __exit aesni_exit(void)
if (boot_cpu_has(X86_FEATURE_AVX))
simd_unregister_skciphers(&aesni_xctr, 1, &aesni_simd_xctr);
#endif /* CONFIG_X86_64 */
+ unregister_xts_algs();
}
late_initcall(aesni_init);
diff --git a/arch/x86/crypto/nh-avx2-x86_64.S b/arch/x86/crypto/nh-avx2-x86_64.S
index ef73a3ab8726..791386d9a83a 100644
--- a/arch/x86/crypto/nh-avx2-x86_64.S
+++ b/arch/x86/crypto/nh-avx2-x86_64.S
@@ -154,5 +154,6 @@ SYM_TYPED_FUNC_START(nh_avx2)
vpaddq T1, T0, T0
vpaddq T4, T0, T0
vmovdqu T0, (HASH)
+ vzeroupper
RET
SYM_FUNC_END(nh_avx2)
diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
index 9918212faf91..0ffb072be956 100644
--- a/arch/x86/crypto/sha256-avx2-asm.S
+++ b/arch/x86/crypto/sha256-avx2-asm.S
@@ -716,6 +716,7 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx)
popq %r13
popq %r12
popq %rbx
+ vzeroupper
RET
SYM_FUNC_END(sha256_transform_rorx)
diff --git a/arch/x86/crypto/sha256_ni_asm.S b/arch/x86/crypto/sha256_ni_asm.S
index 537b6dcd7ed8..d515a55a3bc1 100644
--- a/arch/x86/crypto/sha256_ni_asm.S
+++ b/arch/x86/crypto/sha256_ni_asm.S
@@ -62,20 +62,41 @@
#define SHA256CONSTANTS %rax
-#define MSG %xmm0
+#define MSG %xmm0 /* sha256rnds2 implicit operand */
#define STATE0 %xmm1
#define STATE1 %xmm2
-#define MSGTMP0 %xmm3
-#define MSGTMP1 %xmm4
-#define MSGTMP2 %xmm5
-#define MSGTMP3 %xmm6
-#define MSGTMP4 %xmm7
+#define MSG0 %xmm3
+#define MSG1 %xmm4
+#define MSG2 %xmm5
+#define MSG3 %xmm6
+#define TMP %xmm7
#define SHUF_MASK %xmm8
#define ABEF_SAVE %xmm9
#define CDGH_SAVE %xmm10
+.macro do_4rounds i, m0, m1, m2, m3
+.if \i < 16
+ movdqu \i*4(DATA_PTR), \m0
+ pshufb SHUF_MASK, \m0
+.endif
+ movdqa (\i-32)*4(SHA256CONSTANTS), MSG
+ paddd \m0, MSG
+ sha256rnds2 STATE0, STATE1
+.if \i >= 12 && \i < 60
+ movdqa \m0, TMP
+ palignr $4, \m3, TMP
+ paddd TMP, \m1
+ sha256msg2 \m0, \m1
+.endif
+ punpckhqdq MSG, MSG
+ sha256rnds2 STATE1, STATE0
+.if \i >= 4 && \i < 52
+ sha256msg1 \m0, \m3
+.endif
+.endm
+
/*
* Intel SHA Extensions optimized implementation of a SHA-256 update function
*
@@ -86,9 +107,6 @@
* store partial blocks. All message padding and hash value initialization must
* be done outside the update function.
*
- * The indented lines in the loop are instructions related to rounds processing.
- * The non-indented lines are instructions related to the message schedule.
- *
* void sha256_ni_transform(uint32_t *digest, const void *data,
uint32_t numBlocks);
* digest : pointer to digest
@@ -108,202 +126,29 @@ SYM_TYPED_FUNC_START(sha256_ni_transform)
* Need to reorder these appropriately
* DCBA, HGFE -> ABEF, CDGH
*/
- movdqu 0*16(DIGEST_PTR), STATE0
- movdqu 1*16(DIGEST_PTR), STATE1
+ movdqu 0*16(DIGEST_PTR), STATE0 /* DCBA */
+ movdqu 1*16(DIGEST_PTR), STATE1 /* HGFE */
- pshufd $0xB1, STATE0, STATE0 /* CDAB */
- pshufd $0x1B, STATE1, STATE1 /* EFGH */
- movdqa STATE0, MSGTMP4
- palignr $8, STATE1, STATE0 /* ABEF */
- pblendw $0xF0, MSGTMP4, STATE1 /* CDGH */
+ movdqa STATE0, TMP
+ punpcklqdq STATE1, STATE0 /* FEBA */
+ punpckhqdq TMP, STATE1 /* DCHG */
+ pshufd $0x1B, STATE0, STATE0 /* ABEF */
+ pshufd $0xB1, STATE1, STATE1 /* CDGH */
movdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), SHUF_MASK
- lea K256(%rip), SHA256CONSTANTS
+ lea K256+32*4(%rip), SHA256CONSTANTS
.Lloop0:
/* Save hash values for addition after rounds */
movdqa STATE0, ABEF_SAVE
movdqa STATE1, CDGH_SAVE
- /* Rounds 0-3 */
- movdqu 0*16(DATA_PTR), MSG
- pshufb SHUF_MASK, MSG
- movdqa MSG, MSGTMP0
- paddd 0*16(SHA256CONSTANTS), MSG
- sha256rnds2 STATE0, STATE1
- pshufd $0x0E, MSG, MSG
- sha256rnds2 STATE1, STATE0
-
- /* Rounds 4-7 */
- movdqu 1*16(DATA_PTR), MSG
- pshufb SHUF_MASK, MSG
- movdqa MSG, MSGTMP1
- paddd 1*16(SHA256CONSTANTS), MSG
- sha256rnds2 STATE0, STATE1
- pshufd $0x0E, MSG, MSG
- sha256rnds2 STATE1, STATE0
- sha256msg1 MSGTMP1, MSGTMP0
-
- /* Rounds 8-11 */
- movdqu 2*16(DATA_PTR), MSG
- pshufb SHUF_MASK, MSG
- movdqa MSG, MSGTMP2
- paddd 2*16(SHA256CONSTANTS), MSG
- sha256rnds2 STATE0, STATE1
- pshufd $0x0E, MSG, MSG
- sha256rnds2 STATE1, STATE0
- sha256msg1 MSGTMP2, MSGTMP1
-
- /* Rounds 12-15 */
- movdqu 3*16(DATA_PTR), MSG
- pshufb SHUF_MASK, MSG
- movdqa MSG, MSGTMP3
- paddd 3*16(SHA256CONSTANTS), MSG
- sha256rnds2 STATE0, STATE1
- movdqa MSGTMP3, MSGTMP4
- palignr $4, MSGTMP2, MSGTMP4
- paddd MSGTMP4, MSGTMP0
- sha256msg2 MSGTMP3, MSGTMP0
- pshufd $0x0E, MSG, MSG
- sha256rnds2 STATE1, STATE0
- sha256msg1 MSGTMP3, MSGTMP2
-
- /* Rounds 16-19 */
- movdqa MSGTMP0, MSG
- paddd 4*16(SHA256CONSTANTS), MSG
- sha256rnds2 STATE0, STATE1
- movdqa MSGTMP0, MSGTMP4
- palignr $4, MSGTMP3, MSGTMP4
- paddd MSGTMP4, MSGTMP1
- sha256msg2 MSGTMP0, MSGTMP1
- pshufd $0x0E, MSG, MSG
- sha256rnds2 STATE1, STATE0
- sha256msg1 MSGTMP0, MSGTMP3
-
- /* Rounds 20-23 */
- movdqa MSGTMP1, MSG
- paddd 5*16(SHA256CONSTANTS), MSG
- sha256rnds2 STATE0, STATE1
- movdqa MSGTMP1, MSGTMP4
- palignr $4, MSGTMP0, MSGTMP4
- paddd MSGTMP4, MSGTMP2
- sha256msg2 MSGTMP1, MSGTMP2
- pshufd $0x0E, MSG, MSG
- sha256rnds2 STATE1, STATE0
- sha256msg1 MSGTMP1, MSGTMP0
-
- /* Rounds 24-27 */
- movdqa MSGTMP2, MSG
- paddd 6*16(SHA256CONSTANTS), MSG
- sha256rnds2 STATE0, STATE1
- movdqa MSGTMP2, MSGTMP4
- palignr $4, MSGTMP1, MSGTMP4
- paddd MSGTMP4, MSGTMP3
- sha256msg2 MSGTMP2, MSGTMP3
- pshufd $0x0E, MSG, MSG
- sha256rnds2 STATE1, STATE0
- sha256msg1 MSGTMP2, MSGTMP1
-
- /* Rounds 28-31 */
- movdqa MSGTMP3, MSG
- paddd 7*16(SHA256CONSTANTS), MSG
- sha256rnds2 STATE0, STATE1
- movdqa MSGTMP3, MSGTMP4
- palignr $4, MSGTMP2, MSGTMP4
- paddd MSGTMP4, MSGTMP0
- sha256msg2 MSGTMP3, MSGTMP0
- pshufd $0x0E, MSG, MSG
- sha256rnds2 STATE1, STATE0
- sha256msg1 MSGTMP3, MSGTMP2
-
- /* Rounds 32-35 */
- movdqa MSGTMP0, MSG
- paddd 8*16(SHA256CONSTANTS), MSG
- sha256rnds2 STATE0, STATE1
- movdqa MSGTMP0, MSGTMP4
- palignr $4, MSGTMP3, MSGTMP4
- paddd MSGTMP4, MSGTMP1
- sha256msg2 MSGTMP0, MSGTMP1
- pshufd $0x0E, MSG, MSG
- sha256rnds2 STATE1, STATE0
- sha256msg1 MSGTMP0, MSGTMP3
-
- /* Rounds 36-39 */
- movdqa MSGTMP1, MSG
- paddd 9*16(SHA256CONSTANTS), MSG
- sha256rnds2 STATE0, STATE1
- movdqa MSGTMP1, MSGTMP4
- palignr $4, MSGTMP0, MSGTMP4
- paddd MSGTMP4, MSGTMP2
- sha256msg2 MSGTMP1, MSGTMP2
- pshufd $0x0E, MSG, MSG
- sha256rnds2 STATE1, STATE0
- sha256msg1 MSGTMP1, MSGTMP0
-
- /* Rounds 40-43 */
- movdqa MSGTMP2, MSG
- paddd 10*16(SHA256CONSTANTS), MSG
- sha256rnds2 STATE0, STATE1
- movdqa MSGTMP2, MSGTMP4
- palignr $4, MSGTMP1, MSGTMP4
- paddd MSGTMP4, MSGTMP3
- sha256msg2 MSGTMP2, MSGTMP3
- pshufd $0x0E, MSG, MSG
- sha256rnds2 STATE1, STATE0
- sha256msg1 MSGTMP2, MSGTMP1
-
- /* Rounds 44-47 */
- movdqa MSGTMP3, MSG
- paddd 11*16(SHA256CONSTANTS), MSG
- sha256rnds2 STATE0, STATE1
- movdqa MSGTMP3, MSGTMP4
- palignr $4, MSGTMP2, MSGTMP4
- paddd MSGTMP4, MSGTMP0
- sha256msg2 MSGTMP3, MSGTMP0
- pshufd $0x0E, MSG, MSG
- sha256rnds2 STATE1, STATE0
- sha256msg1 MSGTMP3, MSGTMP2
-
- /* Rounds 48-51 */
- movdqa MSGTMP0, MSG
- paddd 12*16(SHA256CONSTANTS), MSG
- sha256rnds2 STATE0, STATE1
- movdqa MSGTMP0, MSGTMP4
- palignr $4, MSGTMP3, MSGTMP4
- paddd MSGTMP4, MSGTMP1
- sha256msg2 MSGTMP0, MSGTMP1
- pshufd $0x0E, MSG, MSG
- sha256rnds2 STATE1, STATE0
- sha256msg1 MSGTMP0, MSGTMP3
-
- /* Rounds 52-55 */
- movdqa MSGTMP1, MSG
- paddd 13*16(SHA256CONSTANTS), MSG
- sha256rnds2 STATE0, STATE1
- movdqa MSGTMP1, MSGTMP4
- palignr $4, MSGTMP0, MSGTMP4
- paddd MSGTMP4, MSGTMP2
- sha256msg2 MSGTMP1, MSGTMP2
- pshufd $0x0E, MSG, MSG
- sha256rnds2 STATE1, STATE0
-
- /* Rounds 56-59 */
- movdqa MSGTMP2, MSG
- paddd 14*16(SHA256CONSTANTS), MSG
- sha256rnds2 STATE0, STATE1
- movdqa MSGTMP2, MSGTMP4
- palignr $4, MSGTMP1, MSGTMP4
- paddd MSGTMP4, MSGTMP3
- sha256msg2 MSGTMP2, MSGTMP3
- pshufd $0x0E, MSG, MSG
- sha256rnds2 STATE1, STATE0
-
- /* Rounds 60-63 */
- movdqa MSGTMP3, MSG
- paddd 15*16(SHA256CONSTANTS), MSG
- sha256rnds2 STATE0, STATE1
- pshufd $0x0E, MSG, MSG
- sha256rnds2 STATE1, STATE0
+.irp i, 0, 16, 32, 48
+ do_4rounds (\i + 0), MSG0, MSG1, MSG2, MSG3
+ do_4rounds (\i + 4), MSG1, MSG2, MSG3, MSG0
+ do_4rounds (\i + 8), MSG2, MSG3, MSG0, MSG1
+ do_4rounds (\i + 12), MSG3, MSG0, MSG1, MSG2
+.endr
/* Add current hash values with previously saved */
paddd ABEF_SAVE, STATE0
@@ -315,14 +160,14 @@ SYM_TYPED_FUNC_START(sha256_ni_transform)
jne .Lloop0
/* Write hash values back in the correct order */
- pshufd $0x1B, STATE0, STATE0 /* FEBA */
- pshufd $0xB1, STATE1, STATE1 /* DCHG */
- movdqa STATE0, MSGTMP4
- pblendw $0xF0, STATE1, STATE0 /* DCBA */
- palignr $8, MSGTMP4, STATE1 /* HGFE */
-
- movdqu STATE0, 0*16(DIGEST_PTR)
- movdqu STATE1, 1*16(DIGEST_PTR)
+ movdqa STATE0, TMP
+ punpcklqdq STATE1, STATE0 /* GHEF */
+ punpckhqdq TMP, STATE1 /* ABCD */
+ pshufd $0xB1, STATE0, STATE0 /* HGFE */
+ pshufd $0x1B, STATE1, STATE1 /* DCBA */
+
+ movdqu STATE1, 0*16(DIGEST_PTR)
+ movdqu STATE0, 1*16(DIGEST_PTR)
.Ldone_hash:
diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
index f08496cd6870..24973f42c43f 100644
--- a/arch/x86/crypto/sha512-avx2-asm.S
+++ b/arch/x86/crypto/sha512-avx2-asm.S
@@ -680,6 +680,7 @@ SYM_TYPED_FUNC_START(sha512_transform_rorx)
pop %r12
pop %rbx
+ vzeroupper
RET
SYM_FUNC_END(sha512_transform_rorx)
diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
index c93e7f5c2a06..ce1cc1622385 100644
--- a/arch/x86/entry/Makefile
+++ b/arch/x86/entry/Makefile
@@ -17,7 +17,7 @@ obj-y += common.o
obj-y += vdso/
obj-y += vsyscall/
-obj-$(CONFIG_PREEMPTION) += thunk_$(BITS).o
+obj-$(CONFIG_PREEMPTION) += thunk.o
CFLAGS_entry_fred.o += -fno-stack-protector
CFLAGS_REMOVE_entry_fred.o += -pg $(CC_FLAGS_FTRACE)
obj-$(CONFIG_X86_FRED) += entry_64_fred.o entry_fred.o
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 6356060caaf3..51cc9c7cb9bd 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -49,7 +49,7 @@ static __always_inline bool do_syscall_x64(struct pt_regs *regs, int nr)
if (likely(unr < NR_syscalls)) {
unr = array_index_nospec(unr, NR_syscalls);
- regs->ax = sys_call_table[unr](regs);
+ regs->ax = x64_sys_call(regs, unr);
return true;
}
return false;
@@ -66,7 +66,7 @@ static __always_inline bool do_syscall_x32(struct pt_regs *regs, int nr)
if (IS_ENABLED(CONFIG_X86_X32_ABI) && likely(xnr < X32_NR_syscalls)) {
xnr = array_index_nospec(xnr, X32_NR_syscalls);
- regs->ax = x32_sys_call_table[xnr](regs);
+ regs->ax = x32_sys_call(regs, xnr);
return true;
}
return false;
@@ -162,7 +162,7 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
if (likely(unr < IA32_NR_syscalls)) {
unr = array_index_nospec(unr, IA32_NR_syscalls);
- regs->ax = ia32_sys_call_table[unr](regs);
+ regs->ax = ia32_sys_call(regs, unr);
} else if (nr != -1) {
regs->ax = __ia32_sys_ni_syscall(regs);
}
@@ -189,7 +189,7 @@ static __always_inline bool int80_is_external(void)
}
/**
- * int80_emulation - 32-bit legacy syscall entry
+ * do_int80_emulation - 32-bit legacy syscall C entry from asm
*
* This entry point can be used by 32-bit and 64-bit programs to perform
* 32-bit system calls. Instances of INT $0x80 can be found inline in
@@ -207,7 +207,7 @@ static __always_inline bool int80_is_external(void)
* eax: system call number
* ebx, ecx, edx, esi, edi, ebp: arg1 - arg 6
*/
-DEFINE_IDTENTRY_RAW(int80_emulation)
+__visible noinstr void do_int80_emulation(struct pt_regs *regs)
{
int nr;
@@ -255,6 +255,71 @@ DEFINE_IDTENTRY_RAW(int80_emulation)
instrumentation_end();
syscall_exit_to_user_mode(regs);
}
+
+#ifdef CONFIG_X86_FRED
+/*
+ * A FRED-specific INT80 handler is warranted for the follwing reasons:
+ *
+ * 1) As INT instructions and hardware interrupts are separate event
+ * types, FRED does not preclude the use of vector 0x80 for external
+ * interrupts. As a result, the FRED setup code does not reserve
+ * vector 0x80 and calling int80_is_external() is not merely
+ * suboptimal but actively incorrect: it could cause a system call
+ * to be incorrectly ignored.
+ *
+ * 2) It is called only for handling vector 0x80 of event type
+ * EVENT_TYPE_SWINT and will never be called to handle any external
+ * interrupt (event type EVENT_TYPE_EXTINT).
+ *
+ * 3) FRED has separate entry flows depending on if the event came from
+ * user space or kernel space, and because the kernel does not use
+ * INT insns, the FRED kernel entry handler fred_entry_from_kernel()
+ * falls through to fred_bad_type() if the event type is
+ * EVENT_TYPE_SWINT, i.e., INT insns. So if the kernel is handling
+ * an INT insn, it can only be from a user level.
+ *
+ * 4) int80_emulation() does a CLEAR_BRANCH_HISTORY. While FRED will
+ * likely take a different approach if it is ever needed: it
+ * probably belongs in either fred_intx()/ fred_other() or
+ * asm_fred_entrypoint_user(), depending on if this ought to be done
+ * for all entries from userspace or only system
+ * calls.
+ *
+ * 5) INT $0x80 is the fast path for 32-bit system calls under FRED.
+ */
+DEFINE_FREDENTRY_RAW(int80_emulation)
+{
+ int nr;
+
+ enter_from_user_mode(regs);
+
+ instrumentation_begin();
+ add_random_kstack_offset();
+
+ /*
+ * FRED pushed 0 into regs::orig_ax and regs::ax contains the
+ * syscall number.
+ *
+ * User tracing code (ptrace or signal handlers) might assume
+ * that the regs::orig_ax contains a 32-bit number on invoking
+ * a 32-bit syscall.
+ *
+ * Establish the syscall convention by saving the 32bit truncated
+ * syscall number in regs::orig_ax and by invalidating regs::ax.
+ */
+ regs->orig_ax = regs->ax & GENMASK(31, 0);
+ regs->ax = -ENOSYS;
+
+ nr = syscall_32_enter(regs);
+
+ local_irq_enable();
+ nr = syscall_enter_from_user_mode_work(regs, nr);
+ do_syscall_32_irqs_on(regs, nr);
+
+ instrumentation_end();
+ syscall_exit_to_user_mode(regs);
+}
+#endif
#else /* CONFIG_IA32_EMULATION */
/* Handles int $0x80 on a 32bit kernel */
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 8af2a26b24f6..1b5be07f8669 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -116,6 +116,7 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
/* clobbers %rax, make sure it is after saving the syscall nr */
IBRS_ENTER
UNTRAIN_RET
+ CLEAR_BRANCH_HISTORY
call do_syscall_64 /* returns with IRQs disabled */
@@ -1491,3 +1492,63 @@ SYM_CODE_START_NOALIGN(rewind_stack_and_make_dead)
call make_task_dead
SYM_CODE_END(rewind_stack_and_make_dead)
.popsection
+
+/*
+ * This sequence executes branches in order to remove user branch information
+ * from the branch history tracker in the Branch Predictor, therefore removing
+ * user influence on subsequent BTB lookups.
+ *
+ * It should be used on parts prior to Alder Lake. Newer parts should use the
+ * BHI_DIS_S hardware control instead. If a pre-Alder Lake part is being
+ * virtualized on newer hardware the VMM should protect against BHI attacks by
+ * setting BHI_DIS_S for the guests.
+ *
+ * CALLs/RETs are necessary to prevent Loop Stream Detector(LSD) from engaging
+ * and not clearing the branch history. The call tree looks like:
+ *
+ * call 1
+ * call 2
+ * call 2
+ * call 2
+ * call 2
+ * call 2
+ * ret
+ * ret
+ * ret
+ * ret
+ * ret
+ * ret
+ *
+ * This means that the stack is non-constant and ORC can't unwind it with %rsp
+ * alone. Therefore we unconditionally set up the frame pointer, which allows
+ * ORC to unwind properly.
+ *
+ * The alignment is for performance and not for safety, and may be safely
+ * refactored in the future if needed.
+ */
+SYM_FUNC_START(clear_bhb_loop)
+ push %rbp
+ mov %rsp, %rbp
+ movl $5, %ecx
+ ANNOTATE_INTRA_FUNCTION_CALL
+ call 1f
+ jmp 5f
+ .align 64, 0xcc
+ ANNOTATE_INTRA_FUNCTION_CALL
+1: call 2f
+ RET
+ .align 64, 0xcc
+2: movl $5, %eax
+3: jmp 4f
+ nop
+4: sub $1, %eax
+ jnz 3b
+ sub $1, %ecx
+ jnz 1b
+ RET
+5: lfence
+ pop %rbp
+ RET
+SYM_FUNC_END(clear_bhb_loop)
+EXPORT_SYMBOL_GPL(clear_bhb_loop)
+STACK_FRAME_NON_STANDARD(clear_bhb_loop)
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index eabf48c4d4b4..11c9b8efdc4c 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -7,7 +7,6 @@
#include <asm/asm-offsets.h>
#include <asm/current.h>
#include <asm/errno.h>
-#include <asm/ia32_unistd.h>
#include <asm/thread_info.h>
#include <asm/segment.h>
#include <asm/irqflags.h>
@@ -92,6 +91,7 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL)
IBRS_ENTER
UNTRAIN_RET
+ CLEAR_BRANCH_HISTORY
/*
* SYSENTER doesn't filter flags, so we need to clear NT and AC
@@ -206,6 +206,7 @@ SYM_INNER_LABEL(entry_SYSCALL_compat_after_hwframe, SYM_L_GLOBAL)
IBRS_ENTER
UNTRAIN_RET
+ CLEAR_BRANCH_HISTORY
movq %rsp, %rdi
call do_fast_syscall_32
@@ -276,3 +277,17 @@ SYM_INNER_LABEL(entry_SYSRETL_compat_end, SYM_L_GLOBAL)
ANNOTATE_NOENDBR
int3
SYM_CODE_END(entry_SYSCALL_compat)
+
+/*
+ * int 0x80 is used by 32 bit mode as a system call entry. Normally idt entries
+ * point to C routines, however since this is a system call interface the branch
+ * history needs to be scrubbed to protect against BHI attacks, and that
+ * scrubbing needs to take place in assembly code prior to entering any C
+ * routines.
+ */
+SYM_CODE_START(int80_emulation)
+ ANNOTATE_NOENDBR
+ UNWIND_HINT_FUNC
+ CLEAR_BRANCH_HISTORY
+ jmp do_int80_emulation
+SYM_CODE_END(int80_emulation)
diff --git a/arch/x86/entry/entry_fred.c b/arch/x86/entry/entry_fred.c
index ac120cbdaaf2..f004a4dc74c2 100644
--- a/arch/x86/entry/entry_fred.c
+++ b/arch/x86/entry/entry_fred.c
@@ -28,9 +28,9 @@ static noinstr void fred_bad_type(struct pt_regs *regs, unsigned long error_code
if (regs->fred_cs.sl > 0) {
pr_emerg("PANIC: invalid or fatal FRED event; event type %u "
"vector %u error 0x%lx aux 0x%lx at %04x:%016lx\n",
- regs->fred_ss.type, regs->fred_ss.vector, regs->orig_ax,
+ regs->fred_ss.type, regs->fred_ss.vector, error_code,
fred_event_data(regs), regs->cs, regs->ip);
- die("invalid or fatal FRED event", regs, regs->orig_ax);
+ die("invalid or fatal FRED event", regs, error_code);
panic("invalid or fatal FRED event");
} else {
unsigned long flags = oops_begin();
@@ -38,10 +38,10 @@ static noinstr void fred_bad_type(struct pt_regs *regs, unsigned long error_code
pr_alert("BUG: invalid or fatal FRED event; event type %u "
"vector %u error 0x%lx aux 0x%lx at %04x:%016lx\n",
- regs->fred_ss.type, regs->fred_ss.vector, regs->orig_ax,
+ regs->fred_ss.type, regs->fred_ss.vector, error_code,
fred_event_data(regs), regs->cs, regs->ip);
- if (__die("Invalid or fatal FRED event", regs, regs->orig_ax))
+ if (__die("Invalid or fatal FRED event", regs, error_code))
sig = 0;
oops_end(flags, regs, sig);
@@ -66,7 +66,7 @@ static noinstr void fred_intx(struct pt_regs *regs)
/* INT80 */
case IA32_SYSCALL_VECTOR:
if (ia32_enabled())
- return int80_emulation(regs);
+ return fred_int80_emulation(regs);
fallthrough;
#endif
@@ -117,6 +117,8 @@ static idtentry_t sysvec_table[NR_SYSTEM_VECTORS] __ro_after_init = {
SYSVEC(POSTED_INTR_VECTOR, kvm_posted_intr_ipi),
SYSVEC(POSTED_INTR_WAKEUP_VECTOR, kvm_posted_intr_wakeup_ipi),
SYSVEC(POSTED_INTR_NESTED_VECTOR, kvm_posted_intr_nested_ipi),
+
+ SYSVEC(POSTED_MSI_NOTIFICATION_VECTOR, posted_msi_notification),
};
static bool fred_setup_done __initdata;
diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c
index 8cfc9bc73e7f..c2235bae17ef 100644
--- a/arch/x86/entry/syscall_32.c
+++ b/arch/x86/entry/syscall_32.c
@@ -18,8 +18,25 @@
#include <asm/syscalls_32.h>
#undef __SYSCALL
+/*
+ * The sys_call_table[] is no longer used for system calls, but
+ * kernel/trace/trace_syscalls.c still wants to know the system
+ * call address.
+ */
+#ifdef CONFIG_X86_32
#define __SYSCALL(nr, sym) __ia32_##sym,
-
-__visible const sys_call_ptr_t ia32_sys_call_table[] = {
+const sys_call_ptr_t sys_call_table[] = {
#include <asm/syscalls_32.h>
};
+#undef __SYSCALL
+#endif
+
+#define __SYSCALL(nr, sym) case nr: return __ia32_##sym(regs);
+
+long ia32_sys_call(const struct pt_regs *regs, unsigned int nr)
+{
+ switch (nr) {
+ #include <asm/syscalls_32.h>
+ default: return __ia32_sys_ni_syscall(regs);
+ }
+};
diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c
index be120eec1fc9..33b3f09e6f15 100644
--- a/arch/x86/entry/syscall_64.c
+++ b/arch/x86/entry/syscall_64.c
@@ -11,8 +11,23 @@
#include <asm/syscalls_64.h>
#undef __SYSCALL
+/*
+ * The sys_call_table[] is no longer used for system calls, but
+ * kernel/trace/trace_syscalls.c still wants to know the system
+ * call address.
+ */
#define __SYSCALL(nr, sym) __x64_##sym,
-
-asmlinkage const sys_call_ptr_t sys_call_table[] = {
+const sys_call_ptr_t sys_call_table[] = {
#include <asm/syscalls_64.h>
};
+#undef __SYSCALL
+
+#define __SYSCALL(nr, sym) case nr: return __x64_##sym(regs);
+
+long x64_sys_call(const struct pt_regs *regs, unsigned int nr)
+{
+ switch (nr) {
+ #include <asm/syscalls_64.h>
+ default: return __x64_sys_ni_syscall(regs);
+ }
+};
diff --git a/arch/x86/entry/syscall_x32.c b/arch/x86/entry/syscall_x32.c
index bdd0e03a1265..03de4a932131 100644
--- a/arch/x86/entry/syscall_x32.c
+++ b/arch/x86/entry/syscall_x32.c
@@ -11,8 +11,12 @@
#include <asm/syscalls_x32.h>
#undef __SYSCALL
-#define __SYSCALL(nr, sym) __x64_##sym,
+#define __SYSCALL(nr, sym) case nr: return __x64_##sym(regs);
-asmlinkage const sys_call_ptr_t x32_sys_call_table[] = {
-#include <asm/syscalls_x32.h>
+long x32_sys_call(const struct pt_regs *regs, unsigned int nr)
+{
+ switch (nr) {
+ #include <asm/syscalls_x32.h>
+ default: return __x64_sys_ni_syscall(regs);
+ }
};
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index 7e8d46f4147f..cc78226ffc35 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -374,7 +374,7 @@
450 common set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat
452 common fchmodat2 sys_fchmodat2
-453 64 map_shadow_stack sys_map_shadow_stack
+453 common map_shadow_stack sys_map_shadow_stack
454 common futex_wake sys_futex_wake
455 common futex_wait sys_futex_wait
456 common futex_requeue sys_futex_requeue
diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk.S
index 119ebdc3d362..119ebdc3d362 100644
--- a/arch/x86/entry/thunk_64.S
+++ b/arch/x86/entry/thunk.S
diff --git a/arch/x86/entry/thunk_32.S b/arch/x86/entry/thunk_32.S
deleted file mode 100644
index da37f42f4549..000000000000
--- a/arch/x86/entry/thunk_32.S
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Trampoline to trace irqs off. (otherwise CALLER_ADDR1 might crash)
- * Copyright 2008 by Steven Rostedt, Red Hat, Inc
- * (inspired by Andi Kleen's thunk_64.S)
- */
-
-#include <linux/export.h>
-#include <linux/linkage.h>
-#include <asm/asm.h>
-
-#include "calling.h"
-
-THUNK preempt_schedule_thunk, preempt_schedule
-THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace
-EXPORT_SYMBOL(preempt_schedule_thunk)
-EXPORT_SYMBOL(preempt_schedule_notrace_thunk)
-
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index fd63051bbbbb..215a1b202a91 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -6,20 +6,6 @@
# Include the generic Makefile to check the built vDSO:
include $(srctree)/lib/vdso/Makefile
-# Sanitizer runtimes are unavailable and cannot be linked here.
-KASAN_SANITIZE := n
-KMSAN_SANITIZE_vclock_gettime.o := n
-KMSAN_SANITIZE_vdso32/vclock_gettime.o := n
-KMSAN_SANITIZE_vgetcpu.o := n
-KMSAN_SANITIZE_vdso32/vgetcpu.o := n
-
-UBSAN_SANITIZE := n
-KCSAN_SANITIZE := n
-OBJECT_FILES_NON_STANDARD := y
-
-# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
-KCOV_INSTRUMENT := n
-
# Files to link into the vDSO:
vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o
vobjs32-y := vdso32/note.o vdso32/system_call.o vdso32/sigreturn.o
@@ -28,22 +14,12 @@ vobjs-$(CONFIG_X86_SGX) += vsgx.o
# Files to link into the kernel:
obj-y += vma.o extable.o
-KASAN_SANITIZE_vma.o := y
-UBSAN_SANITIZE_vma.o := y
-KCSAN_SANITIZE_vma.o := y
-
-OBJECT_FILES_NON_STANDARD_vma.o := n
-OBJECT_FILES_NON_STANDARD_extable.o := n
# vDSO images to build:
obj-$(CONFIG_X86_64) += vdso-image-64.o
obj-$(CONFIG_X86_X32_ABI) += vdso-image-x32.o
obj-$(CONFIG_COMPAT_32) += vdso-image-32.o vdso32-setup.o
-OBJECT_FILES_NON_STANDARD_vdso-image-32.o := n
-OBJECT_FILES_NON_STANDARD_vdso-image-64.o := n
-OBJECT_FILES_NON_STANDARD_vdso32-setup.o := n
-
vobjs := $(addprefix $(obj)/, $(vobjs-y))
vobjs32 := $(addprefix $(obj)/, $(vobjs32-y))
@@ -175,11 +151,10 @@ quiet_cmd_vdso = VDSO $@
cmd_vdso = $(LD) -o $@ \
$(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
-T $(filter %.lds,$^) $(filter %.o,$^) && \
- sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
+ sh $(src)/checkundef.sh '$(NM)' '$@'
VDSO_LDFLAGS = -shared --hash-style=both --build-id=sha1 \
$(call ld-option, --eh-frame-hdr) -Bsymbolic -z noexecstack
-GCOV_PROFILE := n
quiet_cmd_vdso_and_check = VDSO $@
cmd_vdso_and_check = $(cmd_vdso); $(cmd_vdso_check)
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
index a3c0df11d0e6..2fb7d53cf333 100644
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
@@ -98,11 +98,6 @@ static int addr_to_vsyscall_nr(unsigned long addr)
static bool write_ok_or_segv(unsigned long ptr, size_t size)
{
- /*
- * XXX: if access_ok, get_user, and put_user handled
- * sig_on_uaccess_err, this could go away.
- */
-
if (!access_ok((void __user *)ptr, size)) {
struct thread_struct *thread = &current->thread;
@@ -120,10 +115,8 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size)
bool emulate_vsyscall(unsigned long error_code,
struct pt_regs *regs, unsigned long address)
{
- struct task_struct *tsk;
unsigned long caller;
int vsyscall_nr, syscall_nr, tmp;
- int prev_sig_on_uaccess_err;
long ret;
unsigned long orig_dx;
@@ -172,8 +165,6 @@ bool emulate_vsyscall(unsigned long error_code,
goto sigsegv;
}
- tsk = current;
-
/*
* Check for access_ok violations and find the syscall nr.
*
@@ -234,12 +225,8 @@ bool emulate_vsyscall(unsigned long error_code,
goto do_ret; /* skip requested */
/*
- * With a real vsyscall, page faults cause SIGSEGV. We want to
- * preserve that behavior to make writing exploits harder.
+ * With a real vsyscall, page faults cause SIGSEGV.
*/
- prev_sig_on_uaccess_err = current->thread.sig_on_uaccess_err;
- current->thread.sig_on_uaccess_err = 1;
-
ret = -EFAULT;
switch (vsyscall_nr) {
case 0:
@@ -262,23 +249,12 @@ bool emulate_vsyscall(unsigned long error_code,
break;
}
- current->thread.sig_on_uaccess_err = prev_sig_on_uaccess_err;
-
check_fault:
if (ret == -EFAULT) {
/* Bad news -- userspace fed a bad pointer to a vsyscall. */
warn_bad_vsyscall(KERN_INFO, regs,
"vsyscall fault (exploit attempt?)");
-
- /*
- * If we failed to generate a signal for any reason,
- * generate one here. (This should be impossible.)
- */
- if (WARN_ON_ONCE(!sigismember(&tsk->pending.signal, SIGBUS) &&
- !sigismember(&tsk->pending.signal, SIGSEGV)))
- goto sigsegv;
-
- return true; /* Don't emulate the ret. */
+ goto sigsegv;
}
regs->ax = ret;
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index aec16e581f5b..1fc4ce44e743 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -250,7 +250,7 @@ static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
/*
* AMD Performance Monitor Family 17h and later:
*/
-static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
+static const u64 amd_zen1_perfmon_event_map[PERF_COUNT_HW_MAX] =
{
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
@@ -262,10 +262,39 @@ static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187,
};
+static const u64 amd_zen2_perfmon_event_map[PERF_COUNT_HW_MAX] =
+{
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00a9,
+};
+
+static const u64 amd_zen4_perfmon_event_map[PERF_COUNT_HW_MAX] =
+{
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00a9,
+ [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x100000120,
+};
+
static u64 amd_pmu_event_map(int hw_event)
{
- if (boot_cpu_data.x86 >= 0x17)
- return amd_f17h_perfmon_event_map[hw_event];
+ if (cpu_feature_enabled(X86_FEATURE_ZEN4) || boot_cpu_data.x86 >= 0x1a)
+ return amd_zen4_perfmon_event_map[hw_event];
+
+ if (cpu_feature_enabled(X86_FEATURE_ZEN2) || boot_cpu_data.x86 >= 0x19)
+ return amd_zen2_perfmon_event_map[hw_event];
+
+ if (cpu_feature_enabled(X86_FEATURE_ZEN1))
+ return amd_zen1_perfmon_event_map[hw_event];
return amd_perfmon_event_map[hw_event];
}
@@ -618,7 +647,7 @@ static void amd_pmu_cpu_dead(int cpu)
}
}
-static inline void amd_pmu_set_global_ctl(u64 ctl)
+static __always_inline void amd_pmu_set_global_ctl(u64 ctl)
{
wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, ctl);
}
@@ -878,6 +907,37 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
return amd_pmu_adjust_nmi_window(handled);
}
+/*
+ * AMD-specific callback invoked through perf_snapshot_branch_stack static
+ * call, defined in include/linux/perf_event.h. See its definition for API
+ * details. It's up to caller to provide enough space in *entries* to fit all
+ * LBR records, otherwise returned result will be truncated to *cnt* entries.
+ */
+static int amd_pmu_v2_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
+{
+ struct cpu_hw_events *cpuc;
+ unsigned long flags;
+
+ /*
+ * The sequence of steps to freeze LBR should be completely inlined
+ * and contain no branches to minimize contamination of LBR snapshot
+ */
+ local_irq_save(flags);
+ amd_pmu_core_disable_all();
+ __amd_pmu_lbr_disable();
+
+ cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ amd_pmu_lbr_read();
+ cnt = min(cnt, x86_pmu.lbr_nr);
+ memcpy(entries, cpuc->lbr_entries, sizeof(struct perf_branch_entry) * cnt);
+
+ amd_pmu_v2_enable_all(0);
+ local_irq_restore(flags);
+
+ return cnt;
+}
+
static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -904,8 +964,8 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
if (!status)
goto done;
- /* Read branch records before unfreezing */
- if (status & GLOBAL_STATUS_LBRS_FROZEN) {
+ /* Read branch records */
+ if (x86_pmu.lbr_nr) {
amd_pmu_lbr_read();
status &= ~GLOBAL_STATUS_LBRS_FROZEN;
}
@@ -1414,6 +1474,10 @@ static int __init amd_core_pmu_init(void)
static_call_update(amd_pmu_branch_reset, amd_pmu_lbr_reset);
static_call_update(amd_pmu_branch_add, amd_pmu_lbr_add);
static_call_update(amd_pmu_branch_del, amd_pmu_lbr_del);
+
+ /* Only support branch_stack snapshot on perfmon v2 */
+ if (x86_pmu.handle_irq == amd_pmu_v2_handle_irq)
+ static_call_update(perf_snapshot_branch_stack, amd_pmu_v2_snapshot_branch_stack);
} else if (!amd_brs_init()) {
/*
* BRS requires special event constraints and flushing on ctxsw.
diff --git a/arch/x86/events/amd/lbr.c b/arch/x86/events/amd/lbr.c
index 4a1e600314d5..19c7b76e21bc 100644
--- a/arch/x86/events/amd/lbr.c
+++ b/arch/x86/events/amd/lbr.c
@@ -310,10 +310,6 @@ int amd_pmu_lbr_hw_config(struct perf_event *event)
{
int ret = 0;
- /* LBR is not recommended in counting mode */
- if (!is_sampling_event(event))
- return -EINVAL;
-
ret = amd_pmu_lbr_setup_filter(event);
if (!ret)
event->attach_state |= PERF_ATTACH_SCHED_CB;
@@ -402,26 +398,23 @@ void amd_pmu_lbr_enable_all(void)
wrmsrl(MSR_AMD64_LBR_SELECT, lbr_select);
}
- rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
- rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
+ if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) {
+ rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
+ wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
+ }
- wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
+ rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN);
}
void amd_pmu_lbr_disable_all(void)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- u64 dbg_ctl, dbg_extn_cfg;
if (!cpuc->lbr_users || !x86_pmu.lbr_nr)
return;
- rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
- rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
-
- wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg & ~DBG_EXTN_CFG_LBRV2EN);
- wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
+ __amd_pmu_lbr_disable();
}
__init int amd_pmu_lbr_init(void)
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 09050641ce5d..5b0dd07b1ef1 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1644,6 +1644,7 @@ static void x86_pmu_del(struct perf_event *event, int flags)
while (++i < cpuc->n_events) {
cpuc->event_list[i-1] = cpuc->event_list[i];
cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
+ cpuc->assign[i-1] = cpuc->assign[i];
}
cpuc->event_constraint[i-1] = NULL;
--cpuc->n_events;
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 326c8cd5aa2d..e64eaa8dda5a 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -143,12 +143,6 @@ struct cstate_model {
#define SLM_PKG_C6_USE_C7_MSR (1UL << 0)
#define KNL_CORE_C6_MSR (1UL << 1)
-struct perf_cstate_msr {
- u64 msr;
- struct perf_pmu_events_attr *attr;
-};
-
-
/* cstate_core PMU */
static struct pmu cstate_core_pmu;
static bool has_cstate_core;
@@ -696,78 +690,78 @@ static const struct cstate_model srf_cstates __initconst = {
static const struct x86_cpu_id intel_cstates_match[] __initconst = {
- X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &nhm_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &nhm_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &nhm_cstates),
-
- X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &nhm_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &nhm_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &nhm_cstates),
-
- X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &snb_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &snb_cstates),
-
- X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &snb_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &snb_cstates),
-
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &snb_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &snb_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &snb_cstates),
-
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &hswult_cstates),
-
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &slm_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D, &slm_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &slm_cstates),
-
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &snb_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &snb_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &snb_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &snb_cstates),
-
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &snb_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &snb_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &snb_cstates),
-
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &hswult_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &hswult_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &hswult_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &hswult_cstates),
-
- X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &cnl_cstates),
-
- X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &knl_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &knl_cstates),
-
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &glm_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &glm_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &glm_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &glm_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &glm_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &glm_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &adl_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &srf_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &grr_cstates),
-
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &icx_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &icx_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, &icx_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, &icx_cstates),
-
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &icl_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &icl_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &icl_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &adl_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &adl_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &adl_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &adl_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, &adl_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &adl_cstates),
+ X86_MATCH_VFM(INTEL_NEHALEM, &nhm_cstates),
+ X86_MATCH_VFM(INTEL_NEHALEM_EP, &nhm_cstates),
+ X86_MATCH_VFM(INTEL_NEHALEM_EX, &nhm_cstates),
+
+ X86_MATCH_VFM(INTEL_WESTMERE, &nhm_cstates),
+ X86_MATCH_VFM(INTEL_WESTMERE_EP, &nhm_cstates),
+ X86_MATCH_VFM(INTEL_WESTMERE_EX, &nhm_cstates),
+
+ X86_MATCH_VFM(INTEL_SANDYBRIDGE, &snb_cstates),
+ X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &snb_cstates),
+
+ X86_MATCH_VFM(INTEL_IVYBRIDGE, &snb_cstates),
+ X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &snb_cstates),
+
+ X86_MATCH_VFM(INTEL_HASWELL, &snb_cstates),
+ X86_MATCH_VFM(INTEL_HASWELL_X, &snb_cstates),
+ X86_MATCH_VFM(INTEL_HASWELL_G, &snb_cstates),
+
+ X86_MATCH_VFM(INTEL_HASWELL_L, &hswult_cstates),
+
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &slm_cstates),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_D, &slm_cstates),
+ X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &slm_cstates),
+
+ X86_MATCH_VFM(INTEL_BROADWELL, &snb_cstates),
+ X86_MATCH_VFM(INTEL_BROADWELL_D, &snb_cstates),
+ X86_MATCH_VFM(INTEL_BROADWELL_G, &snb_cstates),
+ X86_MATCH_VFM(INTEL_BROADWELL_X, &snb_cstates),
+
+ X86_MATCH_VFM(INTEL_SKYLAKE_L, &snb_cstates),
+ X86_MATCH_VFM(INTEL_SKYLAKE, &snb_cstates),
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, &snb_cstates),
+
+ X86_MATCH_VFM(INTEL_KABYLAKE_L, &hswult_cstates),
+ X86_MATCH_VFM(INTEL_KABYLAKE, &hswult_cstates),
+ X86_MATCH_VFM(INTEL_COMETLAKE_L, &hswult_cstates),
+ X86_MATCH_VFM(INTEL_COMETLAKE, &hswult_cstates),
+
+ X86_MATCH_VFM(INTEL_CANNONLAKE_L, &cnl_cstates),
+
+ X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &knl_cstates),
+ X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &knl_cstates),
+
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &glm_cstates),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &glm_cstates),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &glm_cstates),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &glm_cstates),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT, &glm_cstates),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, &glm_cstates),
+ X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &adl_cstates),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &srf_cstates),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &grr_cstates),
+
+ X86_MATCH_VFM(INTEL_ICELAKE_L, &icl_cstates),
+ X86_MATCH_VFM(INTEL_ICELAKE, &icl_cstates),
+ X86_MATCH_VFM(INTEL_ICELAKE_X, &icx_cstates),
+ X86_MATCH_VFM(INTEL_ICELAKE_D, &icx_cstates),
+ X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &icx_cstates),
+ X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &icx_cstates),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &icx_cstates),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, &icx_cstates),
+
+ X86_MATCH_VFM(INTEL_TIGERLAKE_L, &icl_cstates),
+ X86_MATCH_VFM(INTEL_TIGERLAKE, &icl_cstates),
+ X86_MATCH_VFM(INTEL_ROCKETLAKE, &icl_cstates),
+ X86_MATCH_VFM(INTEL_ALDERLAKE, &adl_cstates),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, &adl_cstates),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, &adl_cstates),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &adl_cstates),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &adl_cstates),
+ X86_MATCH_VFM(INTEL_METEORLAKE, &adl_cstates),
+ X86_MATCH_VFM(INTEL_METEORLAKE_L, &adl_cstates),
{ },
};
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 2641ba620f12..e010bfed8417 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1237,11 +1237,11 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
struct pmu *pmu = event->pmu;
/*
- * Make sure we get updated with the first PEBS
- * event. It will trigger also during removal, but
- * that does not hurt:
+ * Make sure we get updated with the first PEBS event.
+ * During removal, ->pebs_data_cfg is still valid for
+ * the last PEBS event. Don't clear it.
*/
- if (cpuc->n_pebs == 1)
+ if ((cpuc->n_pebs == 1) && add)
cpuc->pebs_data_cfg = PEBS_UPDATE_DS_SW;
if (needed_cb != pebs_needs_sched_cb(cpuc)) {
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 78cd5084104e..dc641b50814e 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -2,6 +2,7 @@
#include <linux/perf_event.h>
#include <linux/types.h>
+#include <asm/cpu_device_id.h>
#include <asm/perf_event.h>
#include <asm/msr.h>
@@ -1457,7 +1458,7 @@ void __init intel_pmu_lbr_init_atom(void)
* to have an operational LBR which can freeze
* on PMU interrupt
*/
- if (boot_cpu_data.x86_model == 28
+ if (boot_cpu_data.x86_vfm == INTEL_ATOM_BONNELL
&& boot_cpu_data.x86_stepping < 10) {
pr_cont("LBR disabled due to erratum");
return;
@@ -1693,6 +1694,7 @@ void x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
lbr->from = x86_pmu.lbr_from;
lbr->to = x86_pmu.lbr_to;
lbr->info = x86_pmu.lbr_info;
+ lbr->has_callstack = x86_pmu_has_lbr_callstack();
}
EXPORT_SYMBOL_GPL(x86_perf_get_lbr);
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index 8e2a12235e62..14db6d9d318b 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -22,7 +22,7 @@
#include <asm/insn.h>
#include <asm/io.h>
#include <asm/intel_pt.h>
-#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#include "../perf_event.h"
#include "pt.h"
@@ -211,11 +211,11 @@ static int __init pt_pmu_hw_init(void)
}
/* model-specific quirks */
- switch (boot_cpu_data.x86_model) {
- case INTEL_FAM6_BROADWELL:
- case INTEL_FAM6_BROADWELL_D:
- case INTEL_FAM6_BROADWELL_G:
- case INTEL_FAM6_BROADWELL_X:
+ switch (boot_cpu_data.x86_vfm) {
+ case INTEL_BROADWELL:
+ case INTEL_BROADWELL_D:
+ case INTEL_BROADWELL_G:
+ case INTEL_BROADWELL_X:
/* not setting BRANCH_EN will #GP, erratum BDM106 */
pt_pmu.branch_en_always_on = true;
break;
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 258e2cdf28fa..419c517b8594 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -1829,56 +1829,56 @@ static const struct intel_uncore_init_fun generic_uncore_init __initconst = {
};
static const struct x86_cpu_id intel_uncore_match[] __initconst = {
- X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &nhm_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &nhm_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &nhm_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &nhm_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &snb_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &ivb_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &hsw_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &hsw_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &hsw_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &bdw_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &bdw_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &snbep_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &nhmex_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &nhmex_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &ivbep_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &hswep_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &bdx_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &bdx_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &knl_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &knl_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &skl_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &skl_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &skx_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &skl_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &skl_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &skl_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &skl_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, &icl_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &tgl_l_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &tgl_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &rkl_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &adl_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &adl_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &adl_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &adl_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, &mtl_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &mtl_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &spr_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &spr_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, &gnr_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, &gnr_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &adl_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &gnr_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &gnr_uncore_init),
+ X86_MATCH_VFM(INTEL_NEHALEM_EP, &nhm_uncore_init),
+ X86_MATCH_VFM(INTEL_NEHALEM, &nhm_uncore_init),
+ X86_MATCH_VFM(INTEL_WESTMERE, &nhm_uncore_init),
+ X86_MATCH_VFM(INTEL_WESTMERE_EP, &nhm_uncore_init),
+ X86_MATCH_VFM(INTEL_SANDYBRIDGE, &snb_uncore_init),
+ X86_MATCH_VFM(INTEL_IVYBRIDGE, &ivb_uncore_init),
+ X86_MATCH_VFM(INTEL_HASWELL, &hsw_uncore_init),
+ X86_MATCH_VFM(INTEL_HASWELL_L, &hsw_uncore_init),
+ X86_MATCH_VFM(INTEL_HASWELL_G, &hsw_uncore_init),
+ X86_MATCH_VFM(INTEL_BROADWELL, &bdw_uncore_init),
+ X86_MATCH_VFM(INTEL_BROADWELL_G, &bdw_uncore_init),
+ X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &snbep_uncore_init),
+ X86_MATCH_VFM(INTEL_NEHALEM_EX, &nhmex_uncore_init),
+ X86_MATCH_VFM(INTEL_WESTMERE_EX, &nhmex_uncore_init),
+ X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &ivbep_uncore_init),
+ X86_MATCH_VFM(INTEL_HASWELL_X, &hswep_uncore_init),
+ X86_MATCH_VFM(INTEL_BROADWELL_X, &bdx_uncore_init),
+ X86_MATCH_VFM(INTEL_BROADWELL_D, &bdx_uncore_init),
+ X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &knl_uncore_init),
+ X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &knl_uncore_init),
+ X86_MATCH_VFM(INTEL_SKYLAKE, &skl_uncore_init),
+ X86_MATCH_VFM(INTEL_SKYLAKE_L, &skl_uncore_init),
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, &skx_uncore_init),
+ X86_MATCH_VFM(INTEL_KABYLAKE_L, &skl_uncore_init),
+ X86_MATCH_VFM(INTEL_KABYLAKE, &skl_uncore_init),
+ X86_MATCH_VFM(INTEL_COMETLAKE_L, &skl_uncore_init),
+ X86_MATCH_VFM(INTEL_COMETLAKE, &skl_uncore_init),
+ X86_MATCH_VFM(INTEL_ICELAKE_L, &icl_uncore_init),
+ X86_MATCH_VFM(INTEL_ICELAKE_NNPI, &icl_uncore_init),
+ X86_MATCH_VFM(INTEL_ICELAKE, &icl_uncore_init),
+ X86_MATCH_VFM(INTEL_ICELAKE_D, &icx_uncore_init),
+ X86_MATCH_VFM(INTEL_ICELAKE_X, &icx_uncore_init),
+ X86_MATCH_VFM(INTEL_TIGERLAKE_L, &tgl_l_uncore_init),
+ X86_MATCH_VFM(INTEL_TIGERLAKE, &tgl_uncore_init),
+ X86_MATCH_VFM(INTEL_ROCKETLAKE, &rkl_uncore_init),
+ X86_MATCH_VFM(INTEL_ALDERLAKE, &adl_uncore_init),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, &adl_uncore_init),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, &adl_uncore_init),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &adl_uncore_init),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &adl_uncore_init),
+ X86_MATCH_VFM(INTEL_METEORLAKE, &mtl_uncore_init),
+ X86_MATCH_VFM(INTEL_METEORLAKE_L, &mtl_uncore_init),
+ X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &spr_uncore_init),
+ X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &spr_uncore_init),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &gnr_uncore_init),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, &gnr_uncore_init),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &snr_uncore_init),
+ X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &adl_uncore_init),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &gnr_uncore_init),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &gnr_uncore_init),
{},
};
MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
diff --git a/arch/x86/events/intel/uncore_nhmex.c b/arch/x86/events/intel/uncore_nhmex.c
index 92da8aaa5966..466833478e81 100644
--- a/arch/x86/events/intel/uncore_nhmex.c
+++ b/arch/x86/events/intel/uncore_nhmex.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Nehalem-EX/Westmere-EX uncore support */
+#include <asm/cpu_device_id.h>
#include "uncore.h"
/* NHM-EX event control */
@@ -1217,7 +1218,7 @@ static struct intel_uncore_type *nhmex_msr_uncores[] = {
void nhmex_uncore_cpu_init(void)
{
- if (boot_cpu_data.x86_model == 46)
+ if (boot_cpu_data.x86_vfm == INTEL_NEHALEM_EX)
uncore_nhmex = true;
else
nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 2eaf0f339849..74b8b21e8990 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* SandyBridge-EP/IvyTown uncore support */
+#include <asm/cpu_device_id.h>
#include "uncore.h"
#include "uncore_discovery.h"
@@ -3285,7 +3286,7 @@ void bdx_uncore_cpu_init(void)
uncore_msr_uncores = bdx_msr_uncores;
/* Detect systems with no SBOXes */
- if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID))
+ if (boot_cpu_data.x86_vfm == INTEL_BROADWELL_D || hswep_has_limit_sbox(BDX_PCU_DID))
uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
@@ -5394,7 +5395,7 @@ static int icx_iio_get_topology(struct intel_uncore_type *type)
static void icx_iio_set_mapping(struct intel_uncore_type *type)
{
/* Detect ICX-D system. This case is not supported */
- if (boot_cpu_data.x86_model == INTEL_FAM6_ICELAKE_D) {
+ if (boot_cpu_data.x86_vfm == INTEL_ICELAKE_D) {
pmu_clear_mapping_attr(type->attr_update, &icx_iio_mapping_group);
return;
}
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index 9e237b30f017..45b1866ff051 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -2,7 +2,7 @@
#include <linux/perf_event.h>
#include <linux/sysfs.h>
#include <linux/nospec.h>
-#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#include "probe.h"
enum perf_msr_id {
@@ -43,75 +43,75 @@ static bool test_intel(int idx, void *data)
boot_cpu_data.x86 != 6)
return false;
- switch (boot_cpu_data.x86_model) {
- case INTEL_FAM6_NEHALEM:
- case INTEL_FAM6_NEHALEM_G:
- case INTEL_FAM6_NEHALEM_EP:
- case INTEL_FAM6_NEHALEM_EX:
-
- case INTEL_FAM6_WESTMERE:
- case INTEL_FAM6_WESTMERE_EP:
- case INTEL_FAM6_WESTMERE_EX:
-
- case INTEL_FAM6_SANDYBRIDGE:
- case INTEL_FAM6_SANDYBRIDGE_X:
-
- case INTEL_FAM6_IVYBRIDGE:
- case INTEL_FAM6_IVYBRIDGE_X:
-
- case INTEL_FAM6_HASWELL:
- case INTEL_FAM6_HASWELL_X:
- case INTEL_FAM6_HASWELL_L:
- case INTEL_FAM6_HASWELL_G:
-
- case INTEL_FAM6_BROADWELL:
- case INTEL_FAM6_BROADWELL_D:
- case INTEL_FAM6_BROADWELL_G:
- case INTEL_FAM6_BROADWELL_X:
- case INTEL_FAM6_SAPPHIRERAPIDS_X:
- case INTEL_FAM6_EMERALDRAPIDS_X:
- case INTEL_FAM6_GRANITERAPIDS_X:
- case INTEL_FAM6_GRANITERAPIDS_D:
-
- case INTEL_FAM6_ATOM_SILVERMONT:
- case INTEL_FAM6_ATOM_SILVERMONT_D:
- case INTEL_FAM6_ATOM_AIRMONT:
-
- case INTEL_FAM6_ATOM_GOLDMONT:
- case INTEL_FAM6_ATOM_GOLDMONT_D:
- case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
- case INTEL_FAM6_ATOM_TREMONT_D:
- case INTEL_FAM6_ATOM_TREMONT:
- case INTEL_FAM6_ATOM_TREMONT_L:
-
- case INTEL_FAM6_XEON_PHI_KNL:
- case INTEL_FAM6_XEON_PHI_KNM:
+ switch (boot_cpu_data.x86_vfm) {
+ case INTEL_NEHALEM:
+ case INTEL_NEHALEM_G:
+ case INTEL_NEHALEM_EP:
+ case INTEL_NEHALEM_EX:
+
+ case INTEL_WESTMERE:
+ case INTEL_WESTMERE_EP:
+ case INTEL_WESTMERE_EX:
+
+ case INTEL_SANDYBRIDGE:
+ case INTEL_SANDYBRIDGE_X:
+
+ case INTEL_IVYBRIDGE:
+ case INTEL_IVYBRIDGE_X:
+
+ case INTEL_HASWELL:
+ case INTEL_HASWELL_X:
+ case INTEL_HASWELL_L:
+ case INTEL_HASWELL_G:
+
+ case INTEL_BROADWELL:
+ case INTEL_BROADWELL_D:
+ case INTEL_BROADWELL_G:
+ case INTEL_BROADWELL_X:
+ case INTEL_SAPPHIRERAPIDS_X:
+ case INTEL_EMERALDRAPIDS_X:
+ case INTEL_GRANITERAPIDS_X:
+ case INTEL_GRANITERAPIDS_D:
+
+ case INTEL_ATOM_SILVERMONT:
+ case INTEL_ATOM_SILVERMONT_D:
+ case INTEL_ATOM_AIRMONT:
+
+ case INTEL_ATOM_GOLDMONT:
+ case INTEL_ATOM_GOLDMONT_D:
+ case INTEL_ATOM_GOLDMONT_PLUS:
+ case INTEL_ATOM_TREMONT_D:
+ case INTEL_ATOM_TREMONT:
+ case INTEL_ATOM_TREMONT_L:
+
+ case INTEL_XEON_PHI_KNL:
+ case INTEL_XEON_PHI_KNM:
if (idx == PERF_MSR_SMI)
return true;
break;
- case INTEL_FAM6_SKYLAKE_L:
- case INTEL_FAM6_SKYLAKE:
- case INTEL_FAM6_SKYLAKE_X:
- case INTEL_FAM6_KABYLAKE_L:
- case INTEL_FAM6_KABYLAKE:
- case INTEL_FAM6_COMETLAKE_L:
- case INTEL_FAM6_COMETLAKE:
- case INTEL_FAM6_ICELAKE_L:
- case INTEL_FAM6_ICELAKE:
- case INTEL_FAM6_ICELAKE_X:
- case INTEL_FAM6_ICELAKE_D:
- case INTEL_FAM6_TIGERLAKE_L:
- case INTEL_FAM6_TIGERLAKE:
- case INTEL_FAM6_ROCKETLAKE:
- case INTEL_FAM6_ALDERLAKE:
- case INTEL_FAM6_ALDERLAKE_L:
- case INTEL_FAM6_ATOM_GRACEMONT:
- case INTEL_FAM6_RAPTORLAKE:
- case INTEL_FAM6_RAPTORLAKE_P:
- case INTEL_FAM6_RAPTORLAKE_S:
- case INTEL_FAM6_METEORLAKE:
- case INTEL_FAM6_METEORLAKE_L:
+ case INTEL_SKYLAKE_L:
+ case INTEL_SKYLAKE:
+ case INTEL_SKYLAKE_X:
+ case INTEL_KABYLAKE_L:
+ case INTEL_KABYLAKE:
+ case INTEL_COMETLAKE_L:
+ case INTEL_COMETLAKE:
+ case INTEL_ICELAKE_L:
+ case INTEL_ICELAKE:
+ case INTEL_ICELAKE_X:
+ case INTEL_ICELAKE_D:
+ case INTEL_TIGERLAKE_L:
+ case INTEL_TIGERLAKE:
+ case INTEL_ROCKETLAKE:
+ case INTEL_ALDERLAKE:
+ case INTEL_ALDERLAKE_L:
+ case INTEL_ATOM_GRACEMONT:
+ case INTEL_RAPTORLAKE:
+ case INTEL_RAPTORLAKE_P:
+ case INTEL_RAPTORLAKE_S:
+ case INTEL_METEORLAKE:
+ case INTEL_METEORLAKE_L:
if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
return true;
break;
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index fb56518356ec..72b022a1e16c 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -1329,6 +1329,19 @@ void amd_pmu_lbr_enable_all(void);
void amd_pmu_lbr_disable_all(void);
int amd_pmu_lbr_hw_config(struct perf_event *event);
+static __always_inline void __amd_pmu_lbr_disable(void)
+{
+ u64 dbg_ctl, dbg_extn_cfg;
+
+ rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
+ wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg & ~DBG_EXTN_CFG_LBRV2EN);
+
+ if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) {
+ rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
+ wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
+ }
+}
+
#ifdef CONFIG_PERF_EVENTS_AMD_BRS
#define AMD_FAM19H_BRS_EVENT 0xc4 /* RETIRED_TAKEN_BRANCH_INSTRUCTIONS */
diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index fb2b1961e5a3..46e673585560 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -114,8 +114,8 @@ struct rapl_pmu {
struct rapl_pmus {
struct pmu pmu;
- unsigned int maxdie;
- struct rapl_pmu *pmus[] __counted_by(maxdie);
+ unsigned int nr_rapl_pmu;
+ struct rapl_pmu *pmus[] __counted_by(nr_rapl_pmu);
};
enum rapl_unit_quirk {
@@ -141,13 +141,13 @@ static struct perf_msr *rapl_msrs;
static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
{
- unsigned int dieid = topology_logical_die_id(cpu);
+ unsigned int rapl_pmu_idx = topology_logical_die_id(cpu);
/*
* The unsigned check also catches the '-1' return value for non
* existent mappings in the topology map.
*/
- return dieid < rapl_pmus->maxdie ? rapl_pmus->pmus[dieid] : NULL;
+ return rapl_pmu_idx < rapl_pmus->nr_rapl_pmu ? rapl_pmus->pmus[rapl_pmu_idx] : NULL;
}
static inline u64 rapl_read_counter(struct perf_event *event)
@@ -658,7 +658,7 @@ static void cleanup_rapl_pmus(void)
{
int i;
- for (i = 0; i < rapl_pmus->maxdie; i++)
+ for (i = 0; i < rapl_pmus->nr_rapl_pmu; i++)
kfree(rapl_pmus->pmus[i]);
kfree(rapl_pmus);
}
@@ -674,15 +674,13 @@ static const struct attribute_group *rapl_attr_update[] = {
static int __init init_rapl_pmus(void)
{
- int maxdie = topology_max_packages() * topology_max_dies_per_package();
- size_t size;
+ int nr_rapl_pmu = topology_max_packages() * topology_max_dies_per_package();
- size = sizeof(*rapl_pmus) + maxdie * sizeof(struct rapl_pmu *);
- rapl_pmus = kzalloc(size, GFP_KERNEL);
+ rapl_pmus = kzalloc(struct_size(rapl_pmus, pmus, nr_rapl_pmu), GFP_KERNEL);
if (!rapl_pmus)
return -ENOMEM;
- rapl_pmus->maxdie = maxdie;
+ rapl_pmus->nr_rapl_pmu = nr_rapl_pmu;
rapl_pmus->pmu.attr_groups = rapl_attr_groups;
rapl_pmus->pmu.attr_update = rapl_attr_update;
rapl_pmus->pmu.task_ctx_nr = perf_invalid_context;
@@ -808,6 +806,9 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &model_skl),
+ X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE_H, &model_skl),
+ X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE, &model_skl),
+ X86_MATCH_INTEL_FAM6_MODEL(LUNARLAKE_M, &model_skl),
{},
};
MODULE_DEVICE_TABLE(x86cpu, rapl_model_match);
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index 5fc45543e955..0569f579338b 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -105,7 +105,7 @@ static bool cpu_is_self(int cpu)
* IPI implementation on Hyper-V.
*/
static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
- bool exclude_self)
+ bool exclude_self)
{
struct hv_send_ipi_ex *ipi_arg;
unsigned long flags;
@@ -132,8 +132,8 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
if (!cpumask_equal(mask, cpu_present_mask) || exclude_self) {
ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
- nr_bank = cpumask_to_vpset_skip(&(ipi_arg->vp_set), mask,
- exclude_self ? cpu_is_self : NULL);
+ nr_bank = cpumask_to_vpset_skip(&ipi_arg->vp_set, mask,
+ exclude_self ? cpu_is_self : NULL);
/*
* 'nr_bank <= 0' means some CPUs in cpumask can't be
@@ -147,7 +147,7 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
}
status = hv_do_rep_hypercall(HVCALL_SEND_IPI_EX, 0, nr_bank,
- ipi_arg, NULL);
+ ipi_arg, NULL);
ipi_mask_ex_done:
local_irq_restore(flags);
@@ -155,7 +155,7 @@ ipi_mask_ex_done:
}
static bool __send_ipi_mask(const struct cpumask *mask, int vector,
- bool exclude_self)
+ bool exclude_self)
{
int cur_cpu, vcpu, this_cpu = smp_processor_id();
struct hv_send_ipi ipi_arg;
@@ -181,7 +181,7 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector,
return false;
}
- if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
+ if (vector < HV_IPI_LOW_VECTOR || vector > HV_IPI_HIGH_VECTOR)
return false;
/*
@@ -218,7 +218,7 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector,
}
status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, ipi_arg.vector,
- ipi_arg.cpu_mask);
+ ipi_arg.cpu_mask);
return hv_result_success(status);
do_ex_hypercall:
@@ -241,7 +241,7 @@ static bool __send_ipi_one(int cpu, int vector)
return false;
}
- if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
+ if (vector < HV_IPI_LOW_VECTOR || vector > HV_IPI_HIGH_VECTOR)
return false;
if (vp >= 64)
diff --git a/arch/x86/hyperv/hv_proc.c b/arch/x86/hyperv/hv_proc.c
index 68a0843d4750..3fa1f2ee7b0d 100644
--- a/arch/x86/hyperv/hv_proc.c
+++ b/arch/x86/hyperv/hv_proc.c
@@ -3,7 +3,6 @@
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/clockchips.h>
-#include <linux/acpi.h>
#include <linux/hyperv.h>
#include <linux/slab.h>
#include <linux/cpuhotplug.h>
@@ -116,12 +115,11 @@ free_buf:
int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id)
{
- struct hv_add_logical_processor_in *input;
- struct hv_add_logical_processor_out *output;
+ struct hv_input_add_logical_processor *input;
+ struct hv_output_add_logical_processor *output;
u64 status;
unsigned long flags;
int ret = HV_STATUS_SUCCESS;
- int pxm = node_to_pxm(node);
/*
* When adding a logical processor, the hypervisor may return
@@ -137,11 +135,7 @@ int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id)
input->lp_index = lp_index;
input->apic_id = apic_id;
- input->flags = 0;
- input->proximity_domain_info.domain_id = pxm;
- input->proximity_domain_info.flags.reserved = 0;
- input->proximity_domain_info.flags.proximity_info_valid = 1;
- input->proximity_domain_info.flags.proximity_preferred = 1;
+ input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
status = hv_do_hypercall(HVCALL_ADD_LOGICAL_PROCESSOR,
input, output);
local_irq_restore(flags);
@@ -166,7 +160,6 @@ int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
u64 status;
unsigned long irq_flags;
int ret = HV_STATUS_SUCCESS;
- int pxm = node_to_pxm(node);
/* Root VPs don't seem to need pages deposited */
if (partition_id != hv_current_partition_id) {
@@ -185,14 +178,7 @@ int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
input->vp_index = vp_index;
input->flags = flags;
input->subnode_type = HvSubnodeAny;
- if (node != NUMA_NO_NODE) {
- input->proximity_domain_info.domain_id = pxm;
- input->proximity_domain_info.flags.reserved = 0;
- input->proximity_domain_info.flags.proximity_info_valid = 1;
- input->proximity_domain_info.flags.proximity_preferred = 1;
- } else {
- input->proximity_domain_info.as_uint64 = 0;
- }
+ input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
status = hv_do_hypercall(HVCALL_CREATE_VP, input, NULL);
local_irq_restore(irq_flags);
diff --git a/arch/x86/hyperv/hv_vtl.c b/arch/x86/hyperv/hv_vtl.c
index 5c7de79423b8..04775346369c 100644
--- a/arch/x86/hyperv/hv_vtl.c
+++ b/arch/x86/hyperv/hv_vtl.c
@@ -34,7 +34,6 @@ void __init hv_vtl_init_platform(void)
/* Avoid searching for BIOS MP tables */
x86_init.mpparse.find_mptable = x86_init_noop;
x86_init.mpparse.early_parse_smp_cfg = x86_init_noop;
- x86_init.mpparse.parse_smp_cfg = x86_init_noop;
x86_platform.get_wallclock = get_rtc_noop;
x86_platform.set_wallclock = set_rtc_noop;
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index f896eed4516c..5af926c050f0 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -56,6 +56,8 @@ static inline void disable_acpi(void)
extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
+extern int acpi_blacklisted(void);
+
static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
static inline void acpi_disable_pci(void)
{
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index fcd20c6dc7f9..ba99ef75f56c 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -117,7 +117,7 @@ extern void callthunks_patch_builtin_calls(void);
extern void callthunks_patch_module_calls(struct callthunk_sites *sites,
struct module *mod);
extern void *callthunks_translate_call_dest(void *dest);
-extern int x86_call_depth_emit_accounting(u8 **pprog, void *func);
+extern int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip);
#else
static __always_inline void callthunks_patch_builtin_calls(void) {}
static __always_inline void
@@ -128,7 +128,7 @@ static __always_inline void *callthunks_translate_call_dest(void *dest)
return dest;
}
static __always_inline int x86_call_depth_emit_accounting(u8 **pprog,
- void *func)
+ void *func, void *ip)
{
return 0;
}
@@ -286,20 +286,6 @@ static inline int alternatives_text_reserved(void *start, void *end)
asm_inline volatile (ALTERNATIVE(oldinstr, newinstr, ft_flags) \
: : "i" (0), ## input)
-/*
- * This is similar to alternative_input. But it has two features and
- * respective instructions.
- *
- * If CPU has feature2, newinstr2 is used.
- * Otherwise, if CPU has feature1, newinstr1 is used.
- * Otherwise, oldinstr is used.
- */
-#define alternative_input_2(oldinstr, newinstr1, ft_flags1, newinstr2, \
- ft_flags2, input...) \
- asm_inline volatile(ALTERNATIVE_2(oldinstr, newinstr1, ft_flags1, \
- newinstr2, ft_flags2) \
- : : "i" (0), ## input)
-
/* Like alternative_input, but with a single output argument */
#define alternative_io(oldinstr, newinstr, ft_flags, output, input...) \
asm_inline volatile (ALTERNATIVE(oldinstr, newinstr, ft_flags) \
@@ -307,7 +293,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
/* Like alternative_io, but for replacing a direct call with another one. */
#define alternative_call(oldfunc, newfunc, ft_flags, output, input...) \
- asm_inline volatile (ALTERNATIVE("call %P[old]", "call %P[new]", ft_flags) \
+ asm_inline volatile (ALTERNATIVE("call %c[old]", "call %c[new]", ft_flags) \
: output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
/*
@@ -316,12 +302,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
* Otherwise, if CPU has feature1, function1 is used.
* Otherwise, old function is used.
*/
-#define alternative_call_2(oldfunc, newfunc1, ft_flags1, newfunc2, ft_flags2, \
- output, input...) \
- asm_inline volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", ft_flags1,\
- "call %P[new2]", ft_flags2) \
- : output, ASM_CALL_CONSTRAINT \
- : [old] "i" (oldfunc), [new1] "i" (newfunc1), \
+#define alternative_call_2(oldfunc, newfunc1, ft_flags1, newfunc2, ft_flags2, \
+ output, input...) \
+ asm_inline volatile (ALTERNATIVE_2("call %c[old]", "call %c[new1]", ft_flags1, \
+ "call %c[new2]", ft_flags2) \
+ : output, ASM_CALL_CONSTRAINT \
+ : [old] "i" (oldfunc), [new1] "i" (newfunc1), \
[new2] "i" (newfunc2), ## input)
/*
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 94ce0f7c9d3a..9327eb00e96d 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -13,6 +13,8 @@
#include <asm/mpspec.h>
#include <asm/msr.h>
#include <asm/hardirq.h>
+#include <asm/io.h>
+#include <asm/posted_intr.h>
#define ARCH_APICTIMER_STOPS_ON_C3 1
@@ -91,14 +93,14 @@ static inline void native_apic_mem_write(u32 reg, u32 v)
{
volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg);
- alternative_io("movl %0, %P1", "xchgl %0, %P1", X86_BUG_11AP,
+ alternative_io("movl %0, %1", "xchgl %0, %1", X86_BUG_11AP,
ASM_OUTPUT2("=r" (v), "=m" (*addr)),
ASM_OUTPUT2("0" (v), "m" (*addr)));
}
static inline u32 native_apic_mem_read(u32 reg)
{
- return *((volatile u32 *)(APIC_BASE + reg));
+ return readl((void __iomem *)(APIC_BASE + reg));
}
static inline void native_apic_mem_eoi(void)
@@ -499,6 +501,11 @@ static inline bool lapic_vector_set_in_irr(unsigned int vector)
return !!(irr & (1U << (vector % 32)));
}
+static inline bool is_vector_pending(unsigned int vector)
+{
+ return lapic_vector_set_in_irr(vector) || pi_pending_this_cpu(vector);
+}
+
/*
* Warm reset vector position:
*/
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
index 076bf8dee702..25466c4d2134 100644
--- a/arch/x86/include/asm/asm-prototypes.h
+++ b/arch/x86/include/asm/asm-prototypes.h
@@ -14,6 +14,7 @@
#include <asm/asm.h>
#include <asm/fred.h>
#include <asm/gsseg.h>
+#include <asm/nospec-branch.h>
#ifndef CONFIG_X86_CMPXCHG64
extern void cmpxchg8b_emu(void);
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index ca8eed1d496a..2bec0c89a95c 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -229,9 +229,6 @@ register unsigned long current_stack_pointer asm(_ASM_SP);
#define _ASM_EXTABLE_UA(from, to) \
_ASM_EXTABLE_TYPE(from, to, EX_TYPE_UACCESS)
-#define _ASM_EXTABLE_CPY(from, to) \
- _ASM_EXTABLE_TYPE(from, to, EX_TYPE_COPY)
-
#define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_TYPE(from, to, EX_TYPE_FAULT)
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 55a55ec04350..55b4d24356ea 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -86,11 +86,7 @@ static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
}
#define arch_atomic_add_return arch_atomic_add_return
-static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
-{
- return arch_atomic_add_return(-i, v);
-}
-#define arch_atomic_sub_return arch_atomic_sub_return
+#define arch_atomic_sub_return(i, v) arch_atomic_add_return(-(i), v)
static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
{
@@ -98,11 +94,7 @@ static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
}
#define arch_atomic_fetch_add arch_atomic_fetch_add
-static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
-{
- return xadd(&v->counter, -i);
-}
-#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+#define arch_atomic_fetch_sub(i, v) arch_atomic_fetch_add(-(i), v)
static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 3486d91b8595..8db2ec4d6cda 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -14,6 +14,32 @@ typedef struct {
#define ATOMIC64_INIT(val) { (val) }
+/*
+ * Read an atomic64_t non-atomically.
+ *
+ * This is intended to be used in cases where a subsequent atomic operation
+ * will handle the torn value, and can be used to prime the first iteration
+ * of unconditional try_cmpxchg() loops, e.g.:
+ *
+ * s64 val = arch_atomic64_read_nonatomic(v);
+ * do { } while (!arch_atomic64_try_cmpxchg(v, &val, val OP i);
+ *
+ * This is NOT safe to use where the value is not always checked by a
+ * subsequent atomic operation, such as in conditional try_cmpxchg() loops
+ * that can break before the atomic operation, e.g.:
+ *
+ * s64 val = arch_atomic64_read_nonatomic(v);
+ * do {
+ * if (condition(val))
+ * break;
+ * } while (!arch_atomic64_try_cmpxchg(v, &val, val OP i);
+ */
+static __always_inline s64 arch_atomic64_read_nonatomic(const atomic64_t *v)
+{
+ /* See comment in arch_atomic_read(). */
+ return __READ_ONCE(v->counter);
+}
+
#define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
#ifndef ATOMIC64_EXPORT
#define ATOMIC64_DECL_ONE __ATOMIC64_DECL
@@ -24,7 +50,7 @@ typedef struct {
#ifdef CONFIG_X86_CMPXCHG64
#define __alternative_atomic64(f, g, out, in...) \
- asm volatile("call %P[func]" \
+ asm volatile("call %c[func]" \
: out : [func] "i" (atomic64_##g##_cx8), ## in)
#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
@@ -61,12 +87,18 @@ ATOMIC64_DECL(add_unless);
#undef __ATOMIC64_DECL
#undef ATOMIC64_EXPORT
-static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
+static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
- return arch_cmpxchg64(&v->counter, o, n);
+ return arch_cmpxchg64(&v->counter, old, new);
}
#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
+static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+ return arch_try_cmpxchg64(&v->counter, old, new);
+}
+#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
+
static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
{
s64 o;
@@ -195,69 +227,62 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v)
{
- s64 old, c = 0;
+ s64 val = arch_atomic64_read_nonatomic(v);
- while ((old = arch_atomic64_cmpxchg(v, c, c & i)) != c)
- c = old;
+ do { } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
}
static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
{
- s64 old, c = 0;
+ s64 val = arch_atomic64_read_nonatomic(v);
- while ((old = arch_atomic64_cmpxchg(v, c, c & i)) != c)
- c = old;
+ do { } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
- return old;
+ return val;
}
#define arch_atomic64_fetch_and arch_atomic64_fetch_and
static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v)
{
- s64 old, c = 0;
+ s64 val = arch_atomic64_read_nonatomic(v);
- while ((old = arch_atomic64_cmpxchg(v, c, c | i)) != c)
- c = old;
+ do { } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
}
static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
{
- s64 old, c = 0;
+ s64 val = arch_atomic64_read_nonatomic(v);
- while ((old = arch_atomic64_cmpxchg(v, c, c | i)) != c)
- c = old;
+ do { } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
- return old;
+ return val;
}
#define arch_atomic64_fetch_or arch_atomic64_fetch_or
static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v)
{
- s64 old, c = 0;
+ s64 val = arch_atomic64_read_nonatomic(v);
- while ((old = arch_atomic64_cmpxchg(v, c, c ^ i)) != c)
- c = old;
+ do { } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
}
static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
{
- s64 old, c = 0;
+ s64 val = arch_atomic64_read_nonatomic(v);
- while ((old = arch_atomic64_cmpxchg(v, c, c ^ i)) != c)
- c = old;
+ do { } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
- return old;
+ return val;
}
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
- s64 old, c = 0;
+ s64 val = arch_atomic64_read_nonatomic(v);
- while ((old = arch_atomic64_cmpxchg(v, c, c + i)) != c)
- c = old;
+ do { } while (!arch_atomic64_try_cmpxchg(v, &val, val + i));
- return old;
+ return val;
}
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 3165c0feedf7..ae12acae5b06 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -80,11 +80,7 @@ static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
}
#define arch_atomic64_add_return arch_atomic64_add_return
-static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
-{
- return arch_atomic64_add_return(-i, v);
-}
-#define arch_atomic64_sub_return arch_atomic64_sub_return
+#define arch_atomic64_sub_return(i, v) arch_atomic64_add_return(-(i), v)
static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
@@ -92,11 +88,7 @@ static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
}
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
-static __always_inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
-{
- return xadd(&v->counter, -i);
-}
-#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
+#define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), v)
static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index fe1e7e3cc844..63bdc6b85219 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -79,6 +79,9 @@ do { \
#define __smp_mb__before_atomic() do { } while (0)
#define __smp_mb__after_atomic() do { } while (0)
+/* Writing to CR3 provides a full memory barrier in switch_mm(). */
+#define smp_mb__after_switch_mm() do { } while (0)
+
#include <asm-generic/barrier.h>
#endif /* _ASM_X86_BARRIER_H */
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index a3e0be0470a4..3e5b111e619d 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -6,11 +6,6 @@
#include <asm/pgtable_types.h>
#include <uapi/asm/boot.h>
-/* Physical address where kernel should be loaded. */
-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
- + (CONFIG_PHYSICAL_ALIGN - 1)) \
- & ~(CONFIG_PHYSICAL_ALIGN - 1))
-
/* Minimum kernel alignment, as a power of two */
#ifdef CONFIG_X86_64
# define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index b5731c51f0f4..ed2797f132ce 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -3,103 +3,150 @@
#define _ASM_X86_CMPXCHG_32_H
/*
- * Note: if you use set64_bit(), __cmpxchg64(), or their variants,
+ * Note: if you use __cmpxchg64(), or their variants,
* you need to test for the feature in boot_cpu_data.
*/
-#ifdef CONFIG_X86_CMPXCHG64
-#define arch_cmpxchg64(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
- (unsigned long long)(n)))
-#define arch_cmpxchg64_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
- (unsigned long long)(n)))
-#define arch_try_cmpxchg64(ptr, po, n) \
- __try_cmpxchg64((ptr), (unsigned long long *)(po), \
- (unsigned long long)(n))
-#endif
+union __u64_halves {
+ u64 full;
+ struct {
+ u32 low, high;
+ };
+};
+
+#define __arch_cmpxchg64(_ptr, _old, _new, _lock) \
+({ \
+ union __u64_halves o = { .full = (_old), }, \
+ n = { .full = (_new), }; \
+ \
+ asm volatile(_lock "cmpxchg8b %[ptr]" \
+ : [ptr] "+m" (*(_ptr)), \
+ "+a" (o.low), "+d" (o.high) \
+ : "b" (n.low), "c" (n.high) \
+ : "memory"); \
+ \
+ o.full; \
+})
+
+
+static __always_inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
+{
+ return __arch_cmpxchg64(ptr, old, new, LOCK_PREFIX);
+}
-static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
+static __always_inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
{
- u64 prev;
- asm volatile(LOCK_PREFIX "cmpxchg8b %1"
- : "=A" (prev),
- "+m" (*ptr)
- : "b" ((u32)new),
- "c" ((u32)(new >> 32)),
- "0" (old)
- : "memory");
- return prev;
+ return __arch_cmpxchg64(ptr, old, new,);
}
-static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
+#define __arch_try_cmpxchg64(_ptr, _oldp, _new, _lock) \
+({ \
+ union __u64_halves o = { .full = *(_oldp), }, \
+ n = { .full = (_new), }; \
+ bool ret; \
+ \
+ asm volatile(_lock "cmpxchg8b %[ptr]" \
+ CC_SET(e) \
+ : CC_OUT(e) (ret), \
+ [ptr] "+m" (*(_ptr)), \
+ "+a" (o.low), "+d" (o.high) \
+ : "b" (n.low), "c" (n.high) \
+ : "memory"); \
+ \
+ if (unlikely(!ret)) \
+ *(_oldp) = o.full; \
+ \
+ likely(ret); \
+})
+
+static __always_inline bool __try_cmpxchg64(volatile u64 *ptr, u64 *oldp, u64 new)
{
- u64 prev;
- asm volatile("cmpxchg8b %1"
- : "=A" (prev),
- "+m" (*ptr)
- : "b" ((u32)new),
- "c" ((u32)(new >> 32)),
- "0" (old)
- : "memory");
- return prev;
+ return __arch_try_cmpxchg64(ptr, oldp, new, LOCK_PREFIX);
}
-static inline bool __try_cmpxchg64(volatile u64 *ptr, u64 *pold, u64 new)
+static __always_inline bool __try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp, u64 new)
{
- bool success;
- u64 old = *pold;
- asm volatile(LOCK_PREFIX "cmpxchg8b %[ptr]"
- CC_SET(z)
- : CC_OUT(z) (success),
- [ptr] "+m" (*ptr),
- "+A" (old)
- : "b" ((u32)new),
- "c" ((u32)(new >> 32))
- : "memory");
-
- if (unlikely(!success))
- *pold = old;
- return success;
+ return __arch_try_cmpxchg64(ptr, oldp, new,);
}
-#ifndef CONFIG_X86_CMPXCHG64
+#ifdef CONFIG_X86_CMPXCHG64
+
+#define arch_cmpxchg64 __cmpxchg64
+
+#define arch_cmpxchg64_local __cmpxchg64_local
+
+#define arch_try_cmpxchg64 __try_cmpxchg64
+
+#define arch_try_cmpxchg64_local __try_cmpxchg64_local
+
+#else
+
/*
* Building a kernel capable running on 80386 and 80486. It may be necessary
* to simulate the cmpxchg8b on the 80386 and 80486 CPU.
*/
-#define arch_cmpxchg64(ptr, o, n) \
-({ \
- __typeof__(*(ptr)) __ret; \
- __typeof__(*(ptr)) __old = (o); \
- __typeof__(*(ptr)) __new = (n); \
- alternative_io(LOCK_PREFIX_HERE \
- "call cmpxchg8b_emu", \
- "lock; cmpxchg8b (%%esi)" , \
- X86_FEATURE_CX8, \
- "=A" (__ret), \
- "S" ((ptr)), "0" (__old), \
- "b" ((unsigned int)__new), \
- "c" ((unsigned int)(__new>>32)) \
- : "memory"); \
- __ret; })
-
-
-#define arch_cmpxchg64_local(ptr, o, n) \
-({ \
- __typeof__(*(ptr)) __ret; \
- __typeof__(*(ptr)) __old = (o); \
- __typeof__(*(ptr)) __new = (n); \
- alternative_io("call cmpxchg8b_emu", \
- "cmpxchg8b (%%esi)" , \
- X86_FEATURE_CX8, \
- "=A" (__ret), \
- "S" ((ptr)), "0" (__old), \
- "b" ((unsigned int)__new), \
- "c" ((unsigned int)(__new>>32)) \
- : "memory"); \
- __ret; })
+#define __arch_cmpxchg64_emu(_ptr, _old, _new, _lock_loc, _lock) \
+({ \
+ union __u64_halves o = { .full = (_old), }, \
+ n = { .full = (_new), }; \
+ \
+ asm volatile(ALTERNATIVE(_lock_loc \
+ "call cmpxchg8b_emu", \
+ _lock "cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
+ : [ptr] "+m" (*(_ptr)), \
+ "+a" (o.low), "+d" (o.high) \
+ : "b" (n.low), "c" (n.high), "S" (_ptr) \
+ : "memory"); \
+ \
+ o.full; \
+})
+
+static __always_inline u64 arch_cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
+{
+ return __arch_cmpxchg64_emu(ptr, old, new, LOCK_PREFIX_HERE, "lock; ");
+}
+#define arch_cmpxchg64 arch_cmpxchg64
+
+static __always_inline u64 arch_cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
+{
+ return __arch_cmpxchg64_emu(ptr, old, new, ,);
+}
+#define arch_cmpxchg64_local arch_cmpxchg64_local
+
+#define __arch_try_cmpxchg64_emu(_ptr, _oldp, _new, _lock_loc, _lock) \
+({ \
+ union __u64_halves o = { .full = *(_oldp), }, \
+ n = { .full = (_new), }; \
+ bool ret; \
+ \
+ asm volatile(ALTERNATIVE(_lock_loc \
+ "call cmpxchg8b_emu", \
+ _lock "cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
+ CC_SET(e) \
+ : CC_OUT(e) (ret), \
+ [ptr] "+m" (*(_ptr)), \
+ "+a" (o.low), "+d" (o.high) \
+ : "b" (n.low), "c" (n.high), "S" (_ptr) \
+ : "memory"); \
+ \
+ if (unlikely(!ret)) \
+ *(_oldp) = o.full; \
+ \
+ likely(ret); \
+})
+
+static __always_inline bool arch_try_cmpxchg64(volatile u64 *ptr, u64 *oldp, u64 new)
+{
+ return __arch_try_cmpxchg64_emu(ptr, oldp, new, LOCK_PREFIX_HERE, "lock; ");
+}
+#define arch_try_cmpxchg64 arch_try_cmpxchg64
+
+static __always_inline bool arch_try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp, u64 new)
+{
+ return __arch_try_cmpxchg64_emu(ptr, oldp, new, ,);
+}
+#define arch_try_cmpxchg64_local arch_try_cmpxchg64_local
#endif
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
index 44b08b53ab32..5e241306db26 100644
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -20,6 +20,12 @@
arch_try_cmpxchg((ptr), (po), (n)); \
})
+#define arch_try_cmpxchg64_local(ptr, po, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ arch_try_cmpxchg_local((ptr), (po), (n)); \
+})
+
union __u128_halves {
u128 full;
struct {
@@ -62,7 +68,7 @@ static __always_inline u128 arch_cmpxchg128_local(volatile u128 *ptr, u128 old,
asm volatile(_lock "cmpxchg16b %[ptr]" \
CC_SET(e) \
: CC_OUT(e) (ret), \
- [ptr] "+m" (*ptr), \
+ [ptr] "+m" (*(_ptr)), \
"+a" (o.low), "+d" (o.high) \
: "b" (n.low), "c" (n.high) \
: "memory"); \
diff --git a/arch/x86/include/asm/coco.h b/arch/x86/include/asm/coco.h
index fb7388bbc212..aa6c8f8ca958 100644
--- a/arch/x86/include/asm/coco.h
+++ b/arch/x86/include/asm/coco.h
@@ -22,8 +22,10 @@ static inline void cc_set_mask(u64 mask)
u64 cc_mkenc(u64 val);
u64 cc_mkdec(u64 val);
+void cc_random_init(void);
#else
#define cc_vendor (CC_VENDOR_NONE)
+static const u64 cc_mask = 0;
static inline u64 cc_mkenc(u64 val)
{
@@ -34,6 +36,7 @@ static inline u64 cc_mkdec(u64 val)
{
return val;
}
+static inline void cc_random_init(void) { }
#endif
#endif /* _ASM_X86_COCO_H */
diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h
index eb8fcede9e3b..970a232009c3 100644
--- a/arch/x86/include/asm/cpu_device_id.h
+++ b/arch/x86/include/asm/cpu_device_id.h
@@ -3,6 +3,39 @@
#define _ASM_X86_CPU_DEVICE_ID
/*
+ * Can't use <linux/bitfield.h> because it generates expressions that
+ * cannot be used in structure initializers. Bitfield construction
+ * here must match the union in struct cpuinfo_86:
+ * union {
+ * struct {
+ * __u8 x86_model;
+ * __u8 x86;
+ * __u8 x86_vendor;
+ * __u8 x86_reserved;
+ * };
+ * __u32 x86_vfm;
+ * };
+ */
+#define VFM_MODEL_BIT 0
+#define VFM_FAMILY_BIT 8
+#define VFM_VENDOR_BIT 16
+#define VFM_RSVD_BIT 24
+
+#define VFM_MODEL_MASK GENMASK(VFM_FAMILY_BIT - 1, VFM_MODEL_BIT)
+#define VFM_FAMILY_MASK GENMASK(VFM_VENDOR_BIT - 1, VFM_FAMILY_BIT)
+#define VFM_VENDOR_MASK GENMASK(VFM_RSVD_BIT - 1, VFM_VENDOR_BIT)
+
+#define VFM_MODEL(vfm) (((vfm) & VFM_MODEL_MASK) >> VFM_MODEL_BIT)
+#define VFM_FAMILY(vfm) (((vfm) & VFM_FAMILY_MASK) >> VFM_FAMILY_BIT)
+#define VFM_VENDOR(vfm) (((vfm) & VFM_VENDOR_MASK) >> VFM_VENDOR_BIT)
+
+#define VFM_MAKE(_vendor, _family, _model) ( \
+ ((_model) << VFM_MODEL_BIT) | \
+ ((_family) << VFM_FAMILY_BIT) | \
+ ((_vendor) << VFM_VENDOR_BIT) \
+)
+
+/*
* Declare drivers belonging to specific x86 CPUs
* Similar in spirit to pci_device_id and related PCI functions
*
@@ -49,6 +82,16 @@
.driver_data = (unsigned long) _data \
}
+#define X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE(_vendor, _family, _model, \
+ _steppings, _feature, _data) { \
+ .vendor = _vendor, \
+ .family = _family, \
+ .model = _model, \
+ .steppings = _steppings, \
+ .feature = _feature, \
+ .driver_data = (unsigned long) _data \
+}
+
/**
* X86_MATCH_VENDOR_FAM_MODEL_FEATURE - Macro for CPU matching
* @_vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
@@ -164,6 +207,56 @@
X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
steppings, X86_FEATURE_ANY, data)
+/**
+ * X86_MATCH_VFM - Match encoded vendor/family/model
+ * @vfm: Encoded 8-bits each for vendor, family, model
+ * @data: Driver specific data or NULL. The internal storage
+ * format is unsigned long. The supplied value, pointer
+ * etc. is cast to unsigned long internally.
+ *
+ * Stepping and feature are set to wildcards
+ */
+#define X86_MATCH_VFM(vfm, data) \
+ X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE( \
+ VFM_VENDOR(vfm), \
+ VFM_FAMILY(vfm), \
+ VFM_MODEL(vfm), \
+ X86_STEPPING_ANY, X86_FEATURE_ANY, data)
+
+/**
+ * X86_MATCH_VFM_STEPPINGS - Match encoded vendor/family/model/stepping
+ * @vfm: Encoded 8-bits each for vendor, family, model
+ * @steppings: Bitmask of steppings to match
+ * @data: Driver specific data or NULL. The internal storage
+ * format is unsigned long. The supplied value, pointer
+ * etc. is cast to unsigned long internally.
+ *
+ * feature is set to wildcard
+ */
+#define X86_MATCH_VFM_STEPPINGS(vfm, steppings, data) \
+ X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE( \
+ VFM_VENDOR(vfm), \
+ VFM_FAMILY(vfm), \
+ VFM_MODEL(vfm), \
+ steppings, X86_FEATURE_ANY, data)
+
+/**
+ * X86_MATCH_VFM_FEATURE - Match encoded vendor/family/model/feature
+ * @vfm: Encoded 8-bits each for vendor, family, model
+ * @feature: A X86_FEATURE bit
+ * @data: Driver specific data or NULL. The internal storage
+ * format is unsigned long. The supplied value, pointer
+ * etc. is cast to unsigned long internally.
+ *
+ * Steppings is set to wildcard
+ */
+#define X86_MATCH_VFM_FEATURE(vfm, feature, data) \
+ X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE( \
+ VFM_VENDOR(vfm), \
+ VFM_FAMILY(vfm), \
+ VFM_MODEL(vfm), \
+ X86_STEPPING_ANY, feature, data)
+
/*
* Match specific microcode revisions.
*
@@ -190,6 +283,14 @@ struct x86_cpu_desc {
.x86_microcode_rev = (revision), \
}
+#define AMD_CPU_DESC(fam, model, stepping, revision) { \
+ .x86_family = (fam), \
+ .x86_vendor = X86_VENDOR_AMD, \
+ .x86_model = (model), \
+ .x86_stepping = (stepping), \
+ .x86_microcode_rev = (revision), \
+}
+
extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
extern bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table);
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index a1273698fc43..0b9611da6c53 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -33,6 +33,8 @@ enum cpuid_leafs
CPUID_7_EDX,
CPUID_8000_001F_EAX,
CPUID_8000_0021_EAX,
+ CPUID_LNX_5,
+ NR_CPUID_WORDS,
};
#define X86_CAP_FMT_NUM "%d:%d"
@@ -91,8 +93,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 21, feature_bit) || \
REQUIRED_MASK_CHECK || \
- BUILD_BUG_ON_ZERO(NCAPINTS != 21))
+ BUILD_BUG_ON_ZERO(NCAPINTS != 22))
#define DISABLED_MASK_BIT_SET(feature_bit) \
( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
@@ -116,8 +119,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 21, feature_bit) || \
DISABLED_MASK_CHECK || \
- BUILD_BUG_ON_ZERO(NCAPINTS != 21))
+ BUILD_BUG_ON_ZERO(NCAPINTS != 22))
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
@@ -125,8 +129,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
#define this_cpu_has(bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
- x86_this_cpu_test_bit(bit, \
- (unsigned long __percpu *)&cpu_info.x86_capability))
+ x86_this_cpu_test_bit(bit, cpu_info.x86_capability))
/*
* This macro is for detection of features which need kernel
@@ -146,8 +149,12 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
extern void setup_clear_cpu_cap(unsigned int bit);
extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
-#define setup_force_cpu_cap(bit) do { \
- set_cpu_cap(&boot_cpu_data, bit); \
+#define setup_force_cpu_cap(bit) do { \
+ \
+ if (!boot_cpu_has(bit)) \
+ WARN_ON(alternatives_patched); \
+ \
+ set_cpu_cap(&boot_cpu_data, bit); \
set_bit(bit, (unsigned long *)cpu_caps_set); \
} while (0)
@@ -168,11 +175,10 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
*/
static __always_inline bool _static_cpu_has(u16 bit)
{
- asm goto(
- ALTERNATIVE_TERNARY("jmp 6f", %P[feature], "", "jmp %l[t_no]")
+ asm goto(ALTERNATIVE_TERNARY("jmp 6f", %c[feature], "", "jmp %l[t_no]")
".pushsection .altinstr_aux,\"ax\"\n"
"6:\n"
- " testb %[bitnum]," _ASM_RIP(%P[cap_byte]) "\n"
+ " testb %[bitnum], %a[cap_byte]\n"
" jnz %l[t_yes]\n"
" jmp %l[t_no]\n"
".popsection\n"
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index f0337f7bcf16..3c7434329661 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -13,7 +13,7 @@
/*
* Defines x86 CPU feature bits
*/
-#define NCAPINTS 21 /* N 32-bit words worth of info */
+#define NCAPINTS 22 /* N 32-bit words worth of info */
#define NBUGINTS 2 /* N 32-bit bug flags */
/*
@@ -460,6 +460,18 @@
#define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */
/*
+ * Extended auxiliary flags: Linux defined - for features scattered in various
+ * CPUID levels like 0x80000022, etc and Linux defined features.
+ *
+ * Reuse free bits when adding new feature flags!
+ */
+#define X86_FEATURE_AMD_LBR_PMC_FREEZE (21*32+ 0) /* AMD LBR and PMC Freeze */
+#define X86_FEATURE_CLEAR_BHB_LOOP (21*32+ 1) /* "" Clear branch history at syscall entry using SW loop */
+#define X86_FEATURE_BHI_CTRL (21*32+ 2) /* "" BHI_DIS_S HW control available */
+#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* "" BHI_DIS_S HW control enabled */
+#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */
+
+/*
* BUG word(s)
*/
#define X86_BUG(x) (NCAPINTS*32 + (x))
@@ -507,4 +519,5 @@
#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */
#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */
+#define X86_BUG_BHI X86_BUG(1*32 + 3) /* CPU is affected by Branch History Injection */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/crash_reserve.h b/arch/x86/include/asm/crash_reserve.h
index 152239f95541..7835b2cdff04 100644
--- a/arch/x86/include/asm/crash_reserve.h
+++ b/arch/x86/include/asm/crash_reserve.h
@@ -39,4 +39,6 @@ static inline unsigned long crash_low_size_default(void)
#endif
}
+#define HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY
+
#endif /* _X86_CRASH_RESERVE_H */
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index da4054fbf533..c492bdc97b05 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -155,6 +155,7 @@
#define DISABLED_MASK18 (DISABLE_IBT)
#define DISABLED_MASK19 (DISABLE_SEV_SNP)
#define DISABLED_MASK20 0
-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
+#define DISABLED_MASK21 0
+#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
#endif /* _ASM_X86_DISABLED_FEATURES_H */
diff --git a/arch/x86/include/asm/e820/api.h b/arch/x86/include/asm/e820/api.h
index e8f58ddd06d9..2e74a7f0e935 100644
--- a/arch/x86/include/asm/e820/api.h
+++ b/arch/x86/include/asm/e820/api.h
@@ -17,6 +17,7 @@ extern bool e820__mapped_all(u64 start, u64 end, enum e820_type type);
extern void e820__range_add (u64 start, u64 size, enum e820_type type);
extern u64 e820__range_update(u64 start, u64 size, enum e820_type old_type, enum e820_type new_type);
extern u64 e820__range_remove(u64 start, u64 size, enum e820_type old_type, bool check_type);
+extern u64 e820__range_update_table(struct e820_table *t, u64 start, u64 size, enum e820_type old_type, enum e820_type new_type);
extern void e820__print_table(char *who);
extern int e820__update_table(struct e820_table *table);
diff --git a/arch/x86/include/asm/extable_fixup_types.h b/arch/x86/include/asm/extable_fixup_types.h
index 7acf0383be80..906b0d5541e8 100644
--- a/arch/x86/include/asm/extable_fixup_types.h
+++ b/arch/x86/include/asm/extable_fixup_types.h
@@ -36,7 +36,7 @@
#define EX_TYPE_DEFAULT 1
#define EX_TYPE_FAULT 2
#define EX_TYPE_UACCESS 3
-#define EX_TYPE_COPY 4
+/* unused, was: #define EX_TYPE_COPY 4 */
#define EX_TYPE_CLEAR_FS 5
#define EX_TYPE_FPU_RESTORE 6
#define EX_TYPE_BPF 7
diff --git a/arch/x86/include/asm/fb.h b/arch/x86/include/asm/fb.h
deleted file mode 100644
index c3b9582de7ef..000000000000
--- a/arch/x86/include/asm/fb.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_FB_H
-#define _ASM_X86_FB_H
-
-#include <asm/page.h>
-
-struct fb_info;
-
-pgprot_t pgprot_framebuffer(pgprot_t prot,
- unsigned long vm_start, unsigned long vm_end,
- unsigned long offset);
-#define pgprot_framebuffer pgprot_framebuffer
-
-int fb_is_primary_device(struct fb_info *info);
-#define fb_is_primary_device fb_is_primary_device
-
-#include <asm-generic/fb.h>
-
-#endif /* _ASM_X86_FB_H */
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
index a2be3aefff9f..f86ad3335529 100644
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -143,6 +143,9 @@ extern void fpstate_clear_xstate_component(struct fpstate *fps, unsigned int xfe
extern u64 xstate_get_guest_group_perm(void);
+extern void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr);
+
+
/* KVM specific functions */
extern bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu);
extern void fpu_free_guest_fpstate(struct fpu_guest *gfpu);
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index fbc7722b87d1..c67fa6ad098a 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -44,10 +44,16 @@ typedef struct {
unsigned int irq_hv_reenlightenment_count;
unsigned int hyperv_stimer0_count;
#endif
+#ifdef CONFIG_X86_POSTED_MSI
+ unsigned int posted_msi_notification_count;
+#endif
} ____cacheline_aligned irq_cpustat_t;
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+#ifdef CONFIG_X86_POSTED_MSI
+DECLARE_PER_CPU_ALIGNED(struct pi_desc, posted_msi_pi_desc);
+#endif
#define __ARCH_IRQ_STAT
#define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h
index 4212c00c9708..9d69f3f8dbab 100644
--- a/arch/x86/include/asm/ia32.h
+++ b/arch/x86/include/asm/ia32.h
@@ -56,17 +56,6 @@ struct stat64 {
unsigned long long st_ino;
} __attribute__((packed));
-#define IA32_STACK_TOP IA32_PAGE_OFFSET
-
-#ifdef __KERNEL__
-struct linux_binprm;
-extern int ia32_setup_arg_pages(struct linux_binprm *bprm,
- unsigned long stack_top, int exec_stack);
-struct mm_struct;
-extern void ia32_pick_mmap_layout(struct mm_struct *mm);
-
-#endif
-
extern bool __ia32_enabled;
static __always_inline bool ia32_enabled(void)
diff --git a/arch/x86/include/asm/ia32_unistd.h b/arch/x86/include/asm/ia32_unistd.h
deleted file mode 100644
index aa065c94ccf5..000000000000
--- a/arch/x86/include/asm/ia32_unistd.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_IA32_UNISTD_H
-#define _ASM_X86_IA32_UNISTD_H
-
-/*
- * This file contains the system call numbers of the ia32 compat ABI,
- * this is for the kernel only.
- */
-#define __SYSCALL_ia32_NR(x) (x)
-#include <asm/unistd_32_ia32.h>
-
-#endif /* _ASM_X86_IA32_UNISTD_H */
diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
index 749c7411d2f1..d4f24499b256 100644
--- a/arch/x86/include/asm/idtentry.h
+++ b/arch/x86/include/asm/idtentry.h
@@ -751,6 +751,12 @@ DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_NESTED_VECTOR, sysvec_kvm_posted_intr_nested
# define fred_sysvec_kvm_posted_intr_nested_ipi NULL
#endif
+# ifdef CONFIG_X86_POSTED_MSI
+DECLARE_IDTENTRY_SYSVEC(POSTED_MSI_NOTIFICATION_VECTOR, sysvec_posted_msi_notification);
+#else
+# define fred_sysvec_posted_msi_notification NULL
+# endif
+
#if IS_ENABLED(CONFIG_HYPERV)
DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR, sysvec_hyperv_callback);
DECLARE_IDTENTRY_SYSVEC(HYPERV_REENLIGHTENMENT_VECTOR, sysvec_hyperv_reenlightenment);
diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h
index b56c5741581a..53e4015242b4 100644
--- a/arch/x86/include/asm/inat.h
+++ b/arch/x86/include/asm/inat.h
@@ -35,6 +35,8 @@
#define INAT_PFX_VEX2 13 /* 2-bytes VEX prefix */
#define INAT_PFX_VEX3 14 /* 3-bytes VEX prefix */
#define INAT_PFX_EVEX 15 /* EVEX prefix */
+/* x86-64 REX2 prefix */
+#define INAT_PFX_REX2 16 /* 0xD5 */
#define INAT_LSTPFX_MAX 3
#define INAT_LGCPFX_MAX 11
@@ -50,7 +52,7 @@
/* Legacy prefix */
#define INAT_PFX_OFFS 0
-#define INAT_PFX_BITS 4
+#define INAT_PFX_BITS 5
#define INAT_PFX_MAX ((1 << INAT_PFX_BITS) - 1)
#define INAT_PFX_MASK (INAT_PFX_MAX << INAT_PFX_OFFS)
/* Escape opcodes */
@@ -77,6 +79,9 @@
#define INAT_VEXOK (1 << (INAT_FLAG_OFFS + 5))
#define INAT_VEXONLY (1 << (INAT_FLAG_OFFS + 6))
#define INAT_EVEXONLY (1 << (INAT_FLAG_OFFS + 7))
+#define INAT_NO_REX2 (1 << (INAT_FLAG_OFFS + 8))
+#define INAT_REX2_VARIANT (1 << (INAT_FLAG_OFFS + 9))
+#define INAT_EVEX_SCALABLE (1 << (INAT_FLAG_OFFS + 10))
/* Attribute making macros for attribute tables */
#define INAT_MAKE_PREFIX(pfx) (pfx << INAT_PFX_OFFS)
#define INAT_MAKE_ESCAPE(esc) (esc << INAT_ESC_OFFS)
@@ -128,6 +133,11 @@ static inline int inat_is_rex_prefix(insn_attr_t attr)
return (attr & INAT_PFX_MASK) == INAT_PFX_REX;
}
+static inline int inat_is_rex2_prefix(insn_attr_t attr)
+{
+ return (attr & INAT_PFX_MASK) == INAT_PFX_REX2;
+}
+
static inline int inat_last_prefix_id(insn_attr_t attr)
{
if ((attr & INAT_PFX_MASK) > INAT_LSTPFX_MAX)
@@ -227,4 +237,9 @@ static inline int inat_must_evex(insn_attr_t attr)
{
return attr & INAT_EVEXONLY;
}
+
+static inline int inat_evex_scalable(insn_attr_t attr)
+{
+ return attr & INAT_EVEX_SCALABLE;
+}
#endif
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
index 1b29f58f730f..7152ea809e6a 100644
--- a/arch/x86/include/asm/insn.h
+++ b/arch/x86/include/asm/insn.h
@@ -112,10 +112,15 @@ struct insn {
#define X86_SIB_INDEX(sib) (((sib) & 0x38) >> 3)
#define X86_SIB_BASE(sib) ((sib) & 0x07)
-#define X86_REX_W(rex) ((rex) & 8)
-#define X86_REX_R(rex) ((rex) & 4)
-#define X86_REX_X(rex) ((rex) & 2)
-#define X86_REX_B(rex) ((rex) & 1)
+#define X86_REX2_M(rex) ((rex) & 0x80) /* REX2 M0 */
+#define X86_REX2_R(rex) ((rex) & 0x40) /* REX2 R4 */
+#define X86_REX2_X(rex) ((rex) & 0x20) /* REX2 X4 */
+#define X86_REX2_B(rex) ((rex) & 0x10) /* REX2 B4 */
+
+#define X86_REX_W(rex) ((rex) & 8) /* REX or REX2 W */
+#define X86_REX_R(rex) ((rex) & 4) /* REX or REX2 R3 */
+#define X86_REX_X(rex) ((rex) & 2) /* REX or REX2 X3 */
+#define X86_REX_B(rex) ((rex) & 1) /* REX or REX2 B3 */
/* VEX bit flags */
#define X86_VEX_W(vex) ((vex) & 0x80) /* VEX3 Byte2 */
@@ -161,6 +166,18 @@ static inline void insn_get_attribute(struct insn *insn)
/* Instruction uses RIP-relative addressing */
extern int insn_rip_relative(struct insn *insn);
+static inline int insn_is_rex2(struct insn *insn)
+{
+ if (!insn->prefixes.got)
+ insn_get_prefixes(insn);
+ return insn->rex_prefix.nbytes == 2;
+}
+
+static inline insn_byte_t insn_rex2_m_bit(struct insn *insn)
+{
+ return X86_REX2_M(insn->rex_prefix.bytes[1]);
+}
+
static inline int insn_is_avx(struct insn *insn)
{
if (!insn->prefixes.got)
@@ -198,6 +215,13 @@ static inline insn_byte_t insn_vex_p_bits(struct insn *insn)
return X86_VEX_P(insn->vex_prefix.bytes[2]);
}
+static inline insn_byte_t insn_vex_w_bit(struct insn *insn)
+{
+ if (insn->vex_prefix.nbytes < 3)
+ return 0;
+ return X86_VEX_W(insn->vex_prefix.bytes[2]);
+}
+
/* Get the last prefix id from last prefix or VEX prefix */
static inline int insn_last_prefix_id(struct insn *insn)
{
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index d0941f4c2724..f81a851c46dc 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -40,137 +40,221 @@
* their own names :-(
*/
+#define IFM(_fam, _model) VFM_MAKE(X86_VENDOR_INTEL, _fam, _model)
+
/* Wildcard match for FAM6 so X86_MATCH_INTEL_FAM6_MODEL(ANY) works */
#define INTEL_FAM6_ANY X86_MODEL_ANY
+/* Wildcard match for FAM6 so X86_MATCH_VFM(ANY) works */
+#define INTEL_ANY IFM(X86_FAMILY_ANY, X86_MODEL_ANY)
#define INTEL_FAM6_CORE_YONAH 0x0E
+#define INTEL_CORE_YONAH IFM(6, 0x0E)
#define INTEL_FAM6_CORE2_MEROM 0x0F
+#define INTEL_CORE2_MEROM IFM(6, 0x0F)
#define INTEL_FAM6_CORE2_MEROM_L 0x16
+#define INTEL_CORE2_MEROM_L IFM(6, 0x16)
#define INTEL_FAM6_CORE2_PENRYN 0x17
+#define INTEL_CORE2_PENRYN IFM(6, 0x17)
#define INTEL_FAM6_CORE2_DUNNINGTON 0x1D
+#define INTEL_CORE2_DUNNINGTON IFM(6, 0x1D)
#define INTEL_FAM6_NEHALEM 0x1E
+#define INTEL_NEHALEM IFM(6, 0x1E)
#define INTEL_FAM6_NEHALEM_G 0x1F /* Auburndale / Havendale */
+#define INTEL_NEHALEM_G IFM(6, 0x1F) /* Auburndale / Havendale */
#define INTEL_FAM6_NEHALEM_EP 0x1A
+#define INTEL_NEHALEM_EP IFM(6, 0x1A)
#define INTEL_FAM6_NEHALEM_EX 0x2E
+#define INTEL_NEHALEM_EX IFM(6, 0x2E)
#define INTEL_FAM6_WESTMERE 0x25
+#define INTEL_WESTMERE IFM(6, 0x25)
#define INTEL_FAM6_WESTMERE_EP 0x2C
+#define INTEL_WESTMERE_EP IFM(6, 0x2C)
#define INTEL_FAM6_WESTMERE_EX 0x2F
+#define INTEL_WESTMERE_EX IFM(6, 0x2F)
#define INTEL_FAM6_SANDYBRIDGE 0x2A
+#define INTEL_SANDYBRIDGE IFM(6, 0x2A)
#define INTEL_FAM6_SANDYBRIDGE_X 0x2D
+#define INTEL_SANDYBRIDGE_X IFM(6, 0x2D)
#define INTEL_FAM6_IVYBRIDGE 0x3A
+#define INTEL_IVYBRIDGE IFM(6, 0x3A)
#define INTEL_FAM6_IVYBRIDGE_X 0x3E
+#define INTEL_IVYBRIDGE_X IFM(6, 0x3E)
#define INTEL_FAM6_HASWELL 0x3C
+#define INTEL_HASWELL IFM(6, 0x3C)
#define INTEL_FAM6_HASWELL_X 0x3F
+#define INTEL_HASWELL_X IFM(6, 0x3F)
#define INTEL_FAM6_HASWELL_L 0x45
+#define INTEL_HASWELL_L IFM(6, 0x45)
#define INTEL_FAM6_HASWELL_G 0x46
+#define INTEL_HASWELL_G IFM(6, 0x46)
#define INTEL_FAM6_BROADWELL 0x3D
+#define INTEL_BROADWELL IFM(6, 0x3D)
#define INTEL_FAM6_BROADWELL_G 0x47
+#define INTEL_BROADWELL_G IFM(6, 0x47)
#define INTEL_FAM6_BROADWELL_X 0x4F
+#define INTEL_BROADWELL_X IFM(6, 0x4F)
#define INTEL_FAM6_BROADWELL_D 0x56
+#define INTEL_BROADWELL_D IFM(6, 0x56)
#define INTEL_FAM6_SKYLAKE_L 0x4E /* Sky Lake */
+#define INTEL_SKYLAKE_L IFM(6, 0x4E) /* Sky Lake */
#define INTEL_FAM6_SKYLAKE 0x5E /* Sky Lake */
+#define INTEL_SKYLAKE IFM(6, 0x5E) /* Sky Lake */
#define INTEL_FAM6_SKYLAKE_X 0x55 /* Sky Lake */
+#define INTEL_SKYLAKE_X IFM(6, 0x55) /* Sky Lake */
/* CASCADELAKE_X 0x55 Sky Lake -- s: 7 */
/* COOPERLAKE_X 0x55 Sky Lake -- s: 11 */
#define INTEL_FAM6_KABYLAKE_L 0x8E /* Sky Lake */
+#define INTEL_KABYLAKE_L IFM(6, 0x8E) /* Sky Lake */
/* AMBERLAKE_L 0x8E Sky Lake -- s: 9 */
/* COFFEELAKE_L 0x8E Sky Lake -- s: 10 */
/* WHISKEYLAKE_L 0x8E Sky Lake -- s: 11,12 */
#define INTEL_FAM6_KABYLAKE 0x9E /* Sky Lake */
+#define INTEL_KABYLAKE IFM(6, 0x9E) /* Sky Lake */
/* COFFEELAKE 0x9E Sky Lake -- s: 10-13 */
#define INTEL_FAM6_COMETLAKE 0xA5 /* Sky Lake */
+#define INTEL_COMETLAKE IFM(6, 0xA5) /* Sky Lake */
#define INTEL_FAM6_COMETLAKE_L 0xA6 /* Sky Lake */
+#define INTEL_COMETLAKE_L IFM(6, 0xA6) /* Sky Lake */
#define INTEL_FAM6_CANNONLAKE_L 0x66 /* Palm Cove */
+#define INTEL_CANNONLAKE_L IFM(6, 0x66) /* Palm Cove */
#define INTEL_FAM6_ICELAKE_X 0x6A /* Sunny Cove */
+#define INTEL_ICELAKE_X IFM(6, 0x6A) /* Sunny Cove */
#define INTEL_FAM6_ICELAKE_D 0x6C /* Sunny Cove */
+#define INTEL_ICELAKE_D IFM(6, 0x6C) /* Sunny Cove */
#define INTEL_FAM6_ICELAKE 0x7D /* Sunny Cove */
+#define INTEL_ICELAKE IFM(6, 0x7D) /* Sunny Cove */
#define INTEL_FAM6_ICELAKE_L 0x7E /* Sunny Cove */
+#define INTEL_ICELAKE_L IFM(6, 0x7E) /* Sunny Cove */
#define INTEL_FAM6_ICELAKE_NNPI 0x9D /* Sunny Cove */
+#define INTEL_ICELAKE_NNPI IFM(6, 0x9D) /* Sunny Cove */
#define INTEL_FAM6_ROCKETLAKE 0xA7 /* Cypress Cove */
+#define INTEL_ROCKETLAKE IFM(6, 0xA7) /* Cypress Cove */
#define INTEL_FAM6_TIGERLAKE_L 0x8C /* Willow Cove */
+#define INTEL_TIGERLAKE_L IFM(6, 0x8C) /* Willow Cove */
#define INTEL_FAM6_TIGERLAKE 0x8D /* Willow Cove */
+#define INTEL_TIGERLAKE IFM(6, 0x8D) /* Willow Cove */
#define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F /* Golden Cove */
+#define INTEL_SAPPHIRERAPIDS_X IFM(6, 0x8F) /* Golden Cove */
#define INTEL_FAM6_EMERALDRAPIDS_X 0xCF
+#define INTEL_EMERALDRAPIDS_X IFM(6, 0xCF)
#define INTEL_FAM6_GRANITERAPIDS_X 0xAD
+#define INTEL_GRANITERAPIDS_X IFM(6, 0xAD)
#define INTEL_FAM6_GRANITERAPIDS_D 0xAE
+#define INTEL_GRANITERAPIDS_D IFM(6, 0xAE)
/* "Hybrid" Processors (P-Core/E-Core) */
#define INTEL_FAM6_LAKEFIELD 0x8A /* Sunny Cove / Tremont */
+#define INTEL_LAKEFIELD IFM(6, 0x8A) /* Sunny Cove / Tremont */
#define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */
+#define INTEL_ALDERLAKE IFM(6, 0x97) /* Golden Cove / Gracemont */
#define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */
+#define INTEL_ALDERLAKE_L IFM(6, 0x9A) /* Golden Cove / Gracemont */
#define INTEL_FAM6_RAPTORLAKE 0xB7 /* Raptor Cove / Enhanced Gracemont */
+#define INTEL_RAPTORLAKE IFM(6, 0xB7) /* Raptor Cove / Enhanced Gracemont */
#define INTEL_FAM6_RAPTORLAKE_P 0xBA
+#define INTEL_RAPTORLAKE_P IFM(6, 0xBA)
#define INTEL_FAM6_RAPTORLAKE_S 0xBF
+#define INTEL_RAPTORLAKE_S IFM(6, 0xBF)
#define INTEL_FAM6_METEORLAKE 0xAC
+#define INTEL_METEORLAKE IFM(6, 0xAC)
#define INTEL_FAM6_METEORLAKE_L 0xAA
+#define INTEL_METEORLAKE_L IFM(6, 0xAA)
#define INTEL_FAM6_ARROWLAKE_H 0xC5
+#define INTEL_ARROWLAKE_H IFM(6, 0xC5)
#define INTEL_FAM6_ARROWLAKE 0xC6
+#define INTEL_ARROWLAKE IFM(6, 0xC6)
#define INTEL_FAM6_ARROWLAKE_U 0xB5
+#define INTEL_ARROWLAKE_U IFM(6, 0xB5)
#define INTEL_FAM6_LUNARLAKE_M 0xBD
+#define INTEL_LUNARLAKE_M IFM(6, 0xBD)
/* "Small Core" Processors (Atom/E-Core) */
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
+#define INTEL_ATOM_BONNELL IFM(6, 0x1C) /* Diamondville, Pineview */
#define INTEL_FAM6_ATOM_BONNELL_MID 0x26 /* Silverthorne, Lincroft */
+#define INTEL_ATOM_BONNELL_MID IFM(6, 0x26) /* Silverthorne, Lincroft */
#define INTEL_FAM6_ATOM_SALTWELL 0x36 /* Cedarview */
+#define INTEL_ATOM_SALTWELL IFM(6, 0x36) /* Cedarview */
#define INTEL_FAM6_ATOM_SALTWELL_MID 0x27 /* Penwell */
+#define INTEL_ATOM_SALTWELL_MID IFM(6, 0x27) /* Penwell */
#define INTEL_FAM6_ATOM_SALTWELL_TABLET 0x35 /* Cloverview */
+#define INTEL_ATOM_SALTWELL_TABLET IFM(6, 0x35) /* Cloverview */
#define INTEL_FAM6_ATOM_SILVERMONT 0x37 /* Bay Trail, Valleyview */
+#define INTEL_ATOM_SILVERMONT IFM(6, 0x37) /* Bay Trail, Valleyview */
#define INTEL_FAM6_ATOM_SILVERMONT_D 0x4D /* Avaton, Rangely */
+#define INTEL_ATOM_SILVERMONT_D IFM(6, 0x4D) /* Avaton, Rangely */
#define INTEL_FAM6_ATOM_SILVERMONT_MID 0x4A /* Merriefield */
+#define INTEL_ATOM_SILVERMONT_MID IFM(6, 0x4A) /* Merriefield */
#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* Cherry Trail, Braswell */
+#define INTEL_ATOM_AIRMONT IFM(6, 0x4C) /* Cherry Trail, Braswell */
#define INTEL_FAM6_ATOM_AIRMONT_MID 0x5A /* Moorefield */
+#define INTEL_ATOM_AIRMONT_MID IFM(6, 0x5A) /* Moorefield */
#define INTEL_FAM6_ATOM_AIRMONT_NP 0x75 /* Lightning Mountain */
+#define INTEL_ATOM_AIRMONT_NP IFM(6, 0x75) /* Lightning Mountain */
#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */
+#define INTEL_ATOM_GOLDMONT IFM(6, 0x5C) /* Apollo Lake */
#define INTEL_FAM6_ATOM_GOLDMONT_D 0x5F /* Denverton */
+#define INTEL_ATOM_GOLDMONT_D IFM(6, 0x5F) /* Denverton */
/* Note: the micro-architecture is "Goldmont Plus" */
#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */
+#define INTEL_ATOM_GOLDMONT_PLUS IFM(6, 0x7A) /* Gemini Lake */
#define INTEL_FAM6_ATOM_TREMONT_D 0x86 /* Jacobsville */
+#define INTEL_ATOM_TREMONT_D IFM(6, 0x86) /* Jacobsville */
#define INTEL_FAM6_ATOM_TREMONT 0x96 /* Elkhart Lake */
+#define INTEL_ATOM_TREMONT IFM(6, 0x96) /* Elkhart Lake */
#define INTEL_FAM6_ATOM_TREMONT_L 0x9C /* Jasper Lake */
+#define INTEL_ATOM_TREMONT_L IFM(6, 0x9C) /* Jasper Lake */
#define INTEL_FAM6_ATOM_GRACEMONT 0xBE /* Alderlake N */
+#define INTEL_ATOM_GRACEMONT IFM(6, 0xBE) /* Alderlake N */
#define INTEL_FAM6_ATOM_CRESTMONT_X 0xAF /* Sierra Forest */
+#define INTEL_ATOM_CRESTMONT_X IFM(6, 0xAF) /* Sierra Forest */
#define INTEL_FAM6_ATOM_CRESTMONT 0xB6 /* Grand Ridge */
+#define INTEL_ATOM_CRESTMONT IFM(6, 0xB6) /* Grand Ridge */
#define INTEL_FAM6_ATOM_DARKMONT_X 0xDD /* Clearwater Forest */
+#define INTEL_ATOM_DARKMONT_X IFM(6, 0xDD) /* Clearwater Forest */
/* Xeon Phi */
#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
+#define INTEL_XEON_PHI_KNL IFM(6, 0x57) /* Knights Landing */
#define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */
+#define INTEL_XEON_PHI_KNM IFM(6, 0x85) /* Knights Mill */
/* Family 5 */
#define INTEL_FAM5_QUARK_X1000 0x09 /* Quark X1000 SoC */
+#define INTEL_QUARK_X1000 IFM(5, 0x09) /* Quark X1000 SoC */
#endif /* _ASM_X86_INTEL_FAMILY_H */
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 294cd2a40818..1d60427379c9 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -42,6 +42,7 @@
#include <asm/early_ioremap.h>
#include <asm/pgtable_types.h>
#include <asm/shared/io.h>
+#include <asm/special_insns.h>
#define build_mmio_read(name, size, type, reg, barrier) \
static inline type name(const volatile void __iomem *addr) \
@@ -209,6 +210,23 @@ void memset_io(volatile void __iomem *, int, size_t);
#define memcpy_toio memcpy_toio
#define memset_io memset_io
+#ifdef CONFIG_X86_64
+/*
+ * Commit 0f07496144c2 ("[PATCH] Add faster __iowrite32_copy routine for
+ * x86_64") says that circa 2006 rep movsl is noticeably faster than a copy
+ * loop.
+ */
+static inline void __iowrite32_copy(void __iomem *to, const void *from,
+ size_t count)
+{
+ asm volatile("rep ; movsl"
+ : "=&c"(count), "=&D"(to), "=&S"(from)
+ : "0"(count), "1"(to), "2"(from)
+ : "memory");
+}
+#define __iowrite32_copy __iowrite32_copy
+#endif
+
/*
* ISA space is 'always mapped' on a typical x86 system, no need to
* explicitly ioremap() it. The fact that the ISA IO space is mapped
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h
index 7a2ed154a5e1..5036f13ab69f 100644
--- a/arch/x86/include/asm/irq_remapping.h
+++ b/arch/x86/include/asm/irq_remapping.h
@@ -50,6 +50,13 @@ static inline struct irq_domain *arch_get_ir_parent_domain(void)
return x86_vector_domain;
}
+extern bool enable_posted_msi;
+
+static inline bool posted_msi_supported(void)
+{
+ return enable_posted_msi && irq_remapping_cap(IRQ_POSTING_CAP);
+}
+
#else /* CONFIG_IRQ_REMAP */
static inline bool irq_remapping_cap(enum irq_remap_cap cap) { return 0; }
diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h
index 798183867d78..b71ad173f877 100644
--- a/arch/x86/include/asm/irq_stack.h
+++ b/arch/x86/include/asm/irq_stack.h
@@ -100,7 +100,7 @@
}
#define ASM_CALL_ARG0 \
- "call %P[__func] \n" \
+ "call %c[__func] \n" \
ASM_REACHABLE
#define ASM_CALL_ARG1 \
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index d18bfb238f66..13aea8fc3d45 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -97,10 +97,16 @@
#define LOCAL_TIMER_VECTOR 0xec
+/*
+ * Posted interrupt notification vector for all device MSIs delivered to
+ * the host kernel.
+ */
+#define POSTED_MSI_NOTIFICATION_VECTOR 0xeb
+
#define NR_VECTORS 256
#ifdef CONFIG_X86_LOCAL_APIC
-#define FIRST_SYSTEM_VECTOR LOCAL_TIMER_VECTOR
+#define FIRST_SYSTEM_VECTOR POSTED_MSI_NOTIFICATION_VECTOR
#else
#define FIRST_SYSTEM_VECTOR NR_VECTORS
#endif
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index 91ca9a9ee3a2..ae5482a2f0ca 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -207,18 +207,11 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image);
extern void kdump_nmi_shootdown_cpus(void);
#ifdef CONFIG_CRASH_HOTPLUG
-void arch_crash_handle_hotplug_event(struct kimage *image);
+void arch_crash_handle_hotplug_event(struct kimage *image, void *arg);
#define arch_crash_handle_hotplug_event arch_crash_handle_hotplug_event
-#ifdef CONFIG_HOTPLUG_CPU
-int arch_crash_hotplug_cpu_support(void);
-#define crash_hotplug_cpu_support arch_crash_hotplug_cpu_support
-#endif
-
-#ifdef CONFIG_MEMORY_HOTPLUG
-int arch_crash_hotplug_memory_support(void);
-#define crash_hotplug_memory_support arch_crash_hotplug_memory_support
-#endif
+int arch_crash_hotplug_support(struct kimage *image, unsigned long kexec_flags);
+#define arch_crash_hotplug_support arch_crash_hotplug_support
unsigned int arch_crash_get_elfcorehdr_size(void);
#define crash_get_elfcorehdr_size arch_crash_get_elfcorehdr_size
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 110d7f29ca9a..5187fcf4b610 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -121,6 +121,7 @@ KVM_X86_OP(enter_smm)
KVM_X86_OP(leave_smm)
KVM_X86_OP(enable_smi_window)
#endif
+KVM_X86_OP_OPTIONAL(dev_get_attr)
KVM_X86_OP_OPTIONAL(mem_enc_ioctl)
KVM_X86_OP_OPTIONAL(mem_enc_register_region)
KVM_X86_OP_OPTIONAL(mem_enc_unregister_region)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 16e07a2eee19..ece45b3f6f20 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -254,28 +254,31 @@ enum x86_intercept_stage;
KVM_GUESTDBG_INJECT_DB | \
KVM_GUESTDBG_BLOCKIRQ)
+#define PFERR_PRESENT_MASK BIT(0)
+#define PFERR_WRITE_MASK BIT(1)
+#define PFERR_USER_MASK BIT(2)
+#define PFERR_RSVD_MASK BIT(3)
+#define PFERR_FETCH_MASK BIT(4)
+#define PFERR_PK_MASK BIT(5)
+#define PFERR_SGX_MASK BIT(15)
+#define PFERR_GUEST_RMP_MASK BIT_ULL(31)
+#define PFERR_GUEST_FINAL_MASK BIT_ULL(32)
+#define PFERR_GUEST_PAGE_MASK BIT_ULL(33)
+#define PFERR_GUEST_ENC_MASK BIT_ULL(34)
+#define PFERR_GUEST_SIZEM_MASK BIT_ULL(35)
+#define PFERR_GUEST_VMPL_MASK BIT_ULL(36)
-#define PFERR_PRESENT_BIT 0
-#define PFERR_WRITE_BIT 1
-#define PFERR_USER_BIT 2
-#define PFERR_RSVD_BIT 3
-#define PFERR_FETCH_BIT 4
-#define PFERR_PK_BIT 5
-#define PFERR_SGX_BIT 15
-#define PFERR_GUEST_FINAL_BIT 32
-#define PFERR_GUEST_PAGE_BIT 33
-#define PFERR_IMPLICIT_ACCESS_BIT 48
-
-#define PFERR_PRESENT_MASK BIT(PFERR_PRESENT_BIT)
-#define PFERR_WRITE_MASK BIT(PFERR_WRITE_BIT)
-#define PFERR_USER_MASK BIT(PFERR_USER_BIT)
-#define PFERR_RSVD_MASK BIT(PFERR_RSVD_BIT)
-#define PFERR_FETCH_MASK BIT(PFERR_FETCH_BIT)
-#define PFERR_PK_MASK BIT(PFERR_PK_BIT)
-#define PFERR_SGX_MASK BIT(PFERR_SGX_BIT)
-#define PFERR_GUEST_FINAL_MASK BIT_ULL(PFERR_GUEST_FINAL_BIT)
-#define PFERR_GUEST_PAGE_MASK BIT_ULL(PFERR_GUEST_PAGE_BIT)
-#define PFERR_IMPLICIT_ACCESS BIT_ULL(PFERR_IMPLICIT_ACCESS_BIT)
+/*
+ * IMPLICIT_ACCESS is a KVM-defined flag used to correctly perform SMAP checks
+ * when emulating instructions that triggers implicit access.
+ */
+#define PFERR_IMPLICIT_ACCESS BIT_ULL(48)
+/*
+ * PRIVATE_ACCESS is a KVM-defined flag us to indicate that a fault occurred
+ * when the guest was accessing private memory.
+ */
+#define PFERR_PRIVATE_ACCESS BIT_ULL(49)
+#define PFERR_SYNTHETIC_MASK (PFERR_IMPLICIT_ACCESS | PFERR_PRIVATE_ACCESS)
#define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \
PFERR_WRITE_MASK | \
@@ -855,6 +858,7 @@ struct kvm_vcpu_arch {
int cpuid_nent;
struct kvm_cpuid_entry2 *cpuid_entries;
struct kvm_hypervisor_cpuid kvm_cpuid;
+ bool is_amd_compatible;
/*
* FIXME: Drop this macro and use KVM_NR_GOVERNED_FEATURES directly
@@ -993,9 +997,6 @@ struct kvm_vcpu_arch {
u64 msr_kvm_poll_control;
- /* set at EPT violation at this point */
- unsigned long exit_qualification;
-
/* pv related host specific info */
struct {
bool pv_unhalted;
@@ -1279,12 +1280,14 @@ enum kvm_apicv_inhibit {
};
struct kvm_arch {
- unsigned long vm_type;
unsigned long n_used_mmu_pages;
unsigned long n_requested_mmu_pages;
unsigned long n_max_mmu_pages;
unsigned int indirect_shadow_pages;
u8 mmu_valid_gen;
+ u8 vm_type;
+ bool has_private_mem;
+ bool has_protected_state;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
struct list_head active_mmu_pages;
struct list_head zapped_obsolete_pages;
@@ -1311,6 +1314,8 @@ struct kvm_arch {
*/
spinlock_t mmu_unsync_pages_lock;
+ u64 shadow_mmio_value;
+
struct iommu_domain *iommu_domain;
bool iommu_noncoherent;
#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
@@ -1778,6 +1783,7 @@ struct kvm_x86_ops {
void (*enable_smi_window)(struct kvm_vcpu *vcpu);
#endif
+ int (*dev_get_attr)(u32 group, u64 attr, u64 *val);
int (*mem_enc_ioctl)(struct kvm *kvm, void __user *argp);
int (*mem_enc_register_region)(struct kvm *kvm, struct kvm_enc_region *argp);
int (*mem_enc_unregister_region)(struct kvm *kvm, struct kvm_enc_region *argp);
@@ -1843,6 +1849,7 @@ struct kvm_arch_async_pf {
gfn_t gfn;
unsigned long cr3;
bool direct_map;
+ u64 error_code;
};
extern u32 __read_mostly kvm_nr_uret_msrs;
@@ -2139,6 +2146,10 @@ static inline void kvm_clear_apicv_inhibit(struct kvm *kvm,
kvm_set_or_clear_apicv_inhibit(kvm, reason, false);
}
+unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
+ unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ int op_64_bit, int cpl);
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
@@ -2152,8 +2163,9 @@ void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd);
void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
int tdp_max_root_level, int tdp_huge_page_level);
+
#ifdef CONFIG_KVM_PRIVATE_MEM
-#define kvm_arch_has_private_mem(kvm) ((kvm)->arch.vm_type != KVM_X86_DEFAULT_VM)
+#define kvm_arch_has_private_mem(kvm) ((kvm)->arch.has_private_mem)
#else
#define kvm_arch_has_private_mem(kvm) false
#endif
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index de3118305838..dfd2e9699bd7 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -13,6 +13,7 @@
#define MCG_CTL_P BIT_ULL(8) /* MCG_CTL register available */
#define MCG_EXT_P BIT_ULL(9) /* Extended registers available */
#define MCG_CMCI_P BIT_ULL(10) /* CMCI supported */
+#define MCG_SEAM_NR BIT_ULL(12) /* MCG_STATUS_SEAM_NR supported */
#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */
#define MCG_EXT_CNT_SHIFT 16
#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
@@ -25,6 +26,7 @@
#define MCG_STATUS_EIPV BIT_ULL(1) /* ip points to correct instruction */
#define MCG_STATUS_MCIP BIT_ULL(2) /* machine check in progress */
#define MCG_STATUS_LMCES BIT_ULL(3) /* LMCE signaled */
+#define MCG_STATUS_SEAM_NR BIT_ULL(12) /* Machine check inside SEAM non-root mode */
/* MCG_EXT_CTL register defines */
#define MCG_EXT_CTL_LMCE_EN BIT_ULL(0) /* Enable LMCE */
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index c72c7ff78fcd..d593e52e6635 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -16,10 +16,10 @@ extern int pic_mode;
* Summit or generic (i.e. installer) kernels need lots of bus entries.
* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets.
*/
-#if CONFIG_BASE_SMALL == 0
-# define MAX_MP_BUSSES 260
-#else
+#ifdef CONFIG_BASE_SMALL
# define MAX_MP_BUSSES 32
+#else
+# define MAX_MP_BUSSES 260
#endif
#define MAX_IRQ_SOURCES 256
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 05956bd8bacf..e022e6eb766c 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -61,10 +61,13 @@
#define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
#define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior */
#define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT)
+#define SPEC_CTRL_BHI_DIS_S_SHIFT 10 /* Disable Branch History Injection behavior */
+#define SPEC_CTRL_BHI_DIS_S BIT(SPEC_CTRL_BHI_DIS_S_SHIFT)
/* A mask for bits which the kernel toggles when controlling mitigations */
#define SPEC_CTRL_MITIGATIONS_MASK (SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD \
- | SPEC_CTRL_RRSBA_DIS_S)
+ | SPEC_CTRL_RRSBA_DIS_S \
+ | SPEC_CTRL_BHI_DIS_S)
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
@@ -163,6 +166,14 @@
* are restricted to targets in
* kernel.
*/
+#define ARCH_CAP_BHI_NO BIT(20) /*
+ * CPU is not affected by Branch
+ * History Injection.
+ */
+#define ARCH_CAP_XAPIC_DISABLE BIT(21) /*
+ * IA32_XAPIC_DISABLE_STATUS MSR
+ * supported
+ */
#define ARCH_CAP_PBRSB_NO BIT(24) /*
* Not susceptible to Post-Barrier
* Return Stack Buffer Predictions.
@@ -185,11 +196,6 @@
* File.
*/
-#define ARCH_CAP_XAPIC_DISABLE BIT(21) /*
- * IA32_XAPIC_DISABLE_STATUS MSR
- * supported
- */
-
#define MSR_IA32_FLUSH_CMD 0x0000010b
#define L1D_FLUSH BIT(0) /*
* Writeback and invalidate the
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index fc3a8a3c7ffe..ff5f1ecc7d1e 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -262,11 +262,20 @@
.Lskip_rsb_\@:
.endm
+/*
+ * The CALL to srso_alias_untrain_ret() must be patched in directly at
+ * the spot where untraining must be done, ie., srso_alias_untrain_ret()
+ * must be the target of a CALL instruction instead of indirectly
+ * jumping to a wrapper which then calls it. Therefore, this macro is
+ * called outside of __UNTRAIN_RET below, for the time being, before the
+ * kernel can support nested alternatives with arbitrary nesting.
+ */
+.macro CALL_UNTRAIN_RET
#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO)
-#define CALL_UNTRAIN_RET "call entry_untrain_ret"
-#else
-#define CALL_UNTRAIN_RET ""
+ ALTERNATIVE_2 "", "call entry_untrain_ret", X86_FEATURE_UNRET, \
+ "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
#endif
+.endm
/*
* Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
@@ -282,8 +291,8 @@
.macro __UNTRAIN_RET ibpb_feature, call_depth_insns
#if defined(CONFIG_MITIGATION_RETHUNK) || defined(CONFIG_MITIGATION_IBPB_ENTRY)
VALIDATE_UNRET_END
- ALTERNATIVE_3 "", \
- CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
+ CALL_UNTRAIN_RET
+ ALTERNATIVE_2 "", \
"call entry_ibpb", \ibpb_feature, \
__stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH
#endif
@@ -317,6 +326,19 @@
ALTERNATIVE "", __stringify(verw _ASM_RIP(mds_verw_sel)), X86_FEATURE_CLEAR_CPU_BUF
.endm
+#ifdef CONFIG_X86_64
+.macro CLEAR_BRANCH_HISTORY
+ ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP
+.endm
+
+.macro CLEAR_BRANCH_HISTORY_VMEXIT
+ ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT
+.endm
+#else
+#define CLEAR_BRANCH_HISTORY
+#define CLEAR_BRANCH_HISTORY_VMEXIT
+#endif
+
#else /* __ASSEMBLY__ */
#define ANNOTATE_RETPOLINE_SAFE \
@@ -342,6 +364,8 @@ extern void retbleed_return_thunk(void);
static inline void retbleed_return_thunk(void) {}
#endif
+extern void srso_alias_untrain_ret(void);
+
#ifdef CONFIG_MITIGATION_SRSO
extern void srso_return_thunk(void);
extern void srso_alias_return_thunk(void);
@@ -357,6 +381,10 @@ extern void srso_alias_return_thunk(void);
extern void entry_untrain_ret(void);
extern void entry_ibpb(void);
+#ifdef CONFIG_X86_64
+extern void clear_bhb_loop(void);
+#endif
+
extern void (*x86_return_thunk)(void);
extern void __warn_thunk(void);
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index 9da9c8a2f1df..52f1b4ff0cc1 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -31,10 +31,12 @@
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
-#define __PHYSICAL_START ALIGN(CONFIG_PHYSICAL_START, \
- CONFIG_PHYSICAL_ALIGN)
+/* Physical address where kernel should be loaded. */
+#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
+ + (CONFIG_PHYSICAL_ALIGN - 1)) \
+ & ~(CONFIG_PHYSICAL_ALIGN - 1))
-#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
+#define __START_KERNEL (__START_KERNEL_map + LOAD_PHYSICAL_ADDR)
#ifdef CONFIG_X86_64
#include <asm/page_64_types.h>
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 44958ebaf626..3bedee1801e2 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -59,36 +59,24 @@
#define __force_percpu_prefix "%%"__stringify(__percpu_seg)":"
#define __my_cpu_offset this_cpu_read(this_cpu_off)
-#ifdef CONFIG_USE_X86_SEG_SUPPORT
-/*
- * Efficient implementation for cases in which the compiler supports
- * named address spaces. Allows the compiler to perform additional
- * optimizations that can save more instructions.
- */
-#define arch_raw_cpu_ptr(ptr) \
-({ \
- unsigned long tcp_ptr__; \
- tcp_ptr__ = __raw_cpu_read(, this_cpu_off); \
- \
- tcp_ptr__ += (unsigned long)(ptr); \
- (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \
-})
-#else /* CONFIG_USE_X86_SEG_SUPPORT */
/*
* Compared to the generic __my_cpu_offset version, the following
* saves one instruction and avoids clobbering a temp register.
+ *
+ * arch_raw_cpu_ptr should not be used in 32-bit VDSO for a 64-bit
+ * kernel, because games are played with CONFIG_X86_64 there and
+ * sizeof(this_cpu_off) becames 4.
*/
-#define arch_raw_cpu_ptr(ptr) \
+#ifndef BUILD_VDSO32_64
+#define arch_raw_cpu_ptr(_ptr) \
({ \
- unsigned long tcp_ptr__; \
- asm ("mov " __percpu_arg(1) ", %0" \
- : "=r" (tcp_ptr__) \
- : "m" (__my_cpu_var(this_cpu_off))); \
- \
- tcp_ptr__ += (unsigned long)(ptr); \
- (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \
+ unsigned long tcp_ptr__ = raw_cpu_read_long(this_cpu_off); \
+ tcp_ptr__ += (__force unsigned long)(_ptr); \
+ (typeof(*(_ptr)) __kernel __force *)tcp_ptr__; \
})
-#endif /* CONFIG_USE_X86_SEG_SUPPORT */
+#else
+#define arch_raw_cpu_ptr(_ptr) ({ BUILD_BUG(); (typeof(_ptr))0; })
+#endif
#define PER_CPU_VAR(var) %__percpu_seg:(var)__percpu_rel
@@ -102,8 +90,8 @@
#endif /* CONFIG_SMP */
#define __my_cpu_type(var) typeof(var) __percpu_seg_override
-#define __my_cpu_ptr(ptr) (__my_cpu_type(*ptr) *)(uintptr_t)(ptr)
-#define __my_cpu_var(var) (*__my_cpu_ptr(&var))
+#define __my_cpu_ptr(ptr) (__my_cpu_type(*(ptr))*)(__force uintptr_t)(ptr)
+#define __my_cpu_var(var) (*__my_cpu_ptr(&(var)))
#define __percpu_arg(x) __percpu_prefix "%" #x
#define __force_percpu_arg(x) __force_percpu_prefix "%" #x
@@ -230,25 +218,26 @@ do { \
})
/*
- * xchg is implemented using cmpxchg without a lock prefix. xchg is
- * expensive due to the implied lock prefix. The processor cannot prefetch
- * cachelines if xchg is used.
+ * raw_cpu_xchg() can use a load-store since
+ * it is not required to be IRQ-safe.
*/
-#define percpu_xchg_op(size, qual, _var, _nval) \
+#define raw_percpu_xchg_op(_var, _nval) \
({ \
- __pcpu_type_##size pxo_old__; \
- __pcpu_type_##size pxo_new__ = __pcpu_cast_##size(_nval); \
- asm qual (__pcpu_op2_##size("mov", __percpu_arg([var]), \
- "%[oval]") \
- "\n1:\t" \
- __pcpu_op2_##size("cmpxchg", "%[nval]", \
- __percpu_arg([var])) \
- "\n\tjnz 1b" \
- : [oval] "=&a" (pxo_old__), \
- [var] "+m" (__my_cpu_var(_var)) \
- : [nval] __pcpu_reg_##size(, pxo_new__) \
- : "memory"); \
- (typeof(_var))(unsigned long) pxo_old__; \
+ typeof(_var) pxo_old__ = raw_cpu_read(_var); \
+ raw_cpu_write(_var, _nval); \
+ pxo_old__; \
+})
+
+/*
+ * this_cpu_xchg() is implemented using cmpxchg without a lock prefix.
+ * xchg is expensive due to the implied lock prefix. The processor
+ * cannot prefetch cachelines if xchg is used.
+ */
+#define this_percpu_xchg_op(_var, _nval) \
+({ \
+ typeof(_var) pxo_old__ = this_cpu_read(_var); \
+ do { } while (!this_cpu_try_cmpxchg(_var, &pxo_old__, _nval)); \
+ pxo_old__; \
})
/*
@@ -428,10 +417,6 @@ do { \
* actually per-thread variables implemented as per-CPU variables and
* thus stable for the duration of the respective task.
*/
-#define this_cpu_read_stable_1(pcp) percpu_stable_op(1, "mov", pcp)
-#define this_cpu_read_stable_2(pcp) percpu_stable_op(2, "mov", pcp)
-#define this_cpu_read_stable_4(pcp) percpu_stable_op(4, "mov", pcp)
-#define this_cpu_read_stable_8(pcp) percpu_stable_op(8, "mov", pcp)
#define this_cpu_read_stable(pcp) __pcpu_size_call_return(this_cpu_read_stable_, pcp)
#ifdef CONFIG_USE_X86_SEG_SUPPORT
@@ -500,6 +485,10 @@ do { \
#define this_cpu_read_const(pcp) ({ BUILD_BUG(); (typeof(pcp))0; })
#endif /* CONFIG_USE_X86_SEG_SUPPORT */
+#define this_cpu_read_stable_1(pcp) percpu_stable_op(1, "mov", pcp)
+#define this_cpu_read_stable_2(pcp) percpu_stable_op(2, "mov", pcp)
+#define this_cpu_read_stable_4(pcp) percpu_stable_op(4, "mov", pcp)
+
#define raw_cpu_add_1(pcp, val) percpu_add_op(1, , (pcp), val)
#define raw_cpu_add_2(pcp, val) percpu_add_op(2, , (pcp), val)
#define raw_cpu_add_4(pcp, val) percpu_add_op(4, , (pcp), val)
@@ -509,18 +498,6 @@ do { \
#define raw_cpu_or_1(pcp, val) percpu_to_op(1, , "or", (pcp), val)
#define raw_cpu_or_2(pcp, val) percpu_to_op(2, , "or", (pcp), val)
#define raw_cpu_or_4(pcp, val) percpu_to_op(4, , "or", (pcp), val)
-
-/*
- * raw_cpu_xchg() can use a load-store since it is not required to be
- * IRQ-safe.
- */
-#define raw_percpu_xchg_op(var, nval) \
-({ \
- typeof(var) pxo_ret__ = raw_cpu_read(var); \
- raw_cpu_write(var, (nval)); \
- pxo_ret__; \
-})
-
#define raw_cpu_xchg_1(pcp, val) raw_percpu_xchg_op(pcp, val)
#define raw_cpu_xchg_2(pcp, val) raw_percpu_xchg_op(pcp, val)
#define raw_cpu_xchg_4(pcp, val) raw_percpu_xchg_op(pcp, val)
@@ -534,9 +511,9 @@ do { \
#define this_cpu_or_1(pcp, val) percpu_to_op(1, volatile, "or", (pcp), val)
#define this_cpu_or_2(pcp, val) percpu_to_op(2, volatile, "or", (pcp), val)
#define this_cpu_or_4(pcp, val) percpu_to_op(4, volatile, "or", (pcp), val)
-#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(1, volatile, pcp, nval)
-#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(2, volatile, pcp, nval)
-#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(4, volatile, pcp, nval)
+#define this_cpu_xchg_1(pcp, nval) this_percpu_xchg_op(pcp, nval)
+#define this_cpu_xchg_2(pcp, nval) this_percpu_xchg_op(pcp, nval)
+#define this_cpu_xchg_4(pcp, nval) this_percpu_xchg_op(pcp, nval)
#define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(1, , pcp, val)
#define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(2, , pcp, val)
@@ -563,6 +540,8 @@ do { \
* 32 bit must fall back to generic operations.
*/
#ifdef CONFIG_X86_64
+#define this_cpu_read_stable_8(pcp) percpu_stable_op(8, "mov", pcp)
+
#define raw_cpu_add_8(pcp, val) percpu_add_op(8, , (pcp), val)
#define raw_cpu_and_8(pcp, val) percpu_to_op(8, , "and", (pcp), val)
#define raw_cpu_or_8(pcp, val) percpu_to_op(8, , "or", (pcp), val)
@@ -575,41 +554,41 @@ do { \
#define this_cpu_and_8(pcp, val) percpu_to_op(8, volatile, "and", (pcp), val)
#define this_cpu_or_8(pcp, val) percpu_to_op(8, volatile, "or", (pcp), val)
#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(8, volatile, pcp, val)
-#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(8, volatile, pcp, nval)
+#define this_cpu_xchg_8(pcp, nval) this_percpu_xchg_op(pcp, nval)
#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval)
#define this_cpu_try_cmpxchg_8(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval)
-#endif
-
-static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr,
- const unsigned long __percpu *addr)
-{
- unsigned long __percpu *a =
- (unsigned long __percpu *)addr + nr / BITS_PER_LONG;
-#ifdef CONFIG_X86_64
- return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_8(*a)) != 0;
+#define raw_cpu_read_long(pcp) raw_cpu_read_8(pcp)
#else
- return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_4(*a)) != 0;
-#endif
-}
+/* There is no generic 64 bit read stable operation for 32 bit targets. */
+#define this_cpu_read_stable_8(pcp) ({ BUILD_BUG(); (typeof(pcp))0; })
-static inline bool x86_this_cpu_variable_test_bit(int nr,
- const unsigned long __percpu *addr)
-{
- bool oldbit;
+#define raw_cpu_read_long(pcp) raw_cpu_read_4(pcp)
+#endif
- asm volatile("btl "__percpu_arg(2)",%1"
- CC_SET(c)
- : CC_OUT(c) (oldbit)
- : "m" (*__my_cpu_ptr((unsigned long __percpu *)(addr))), "Ir" (nr));
+#define x86_this_cpu_constant_test_bit(_nr, _var) \
+({ \
+ unsigned long __percpu *addr__ = \
+ (unsigned long __percpu *)&(_var) + ((_nr) / BITS_PER_LONG); \
+ !!((1UL << ((_nr) % BITS_PER_LONG)) & raw_cpu_read(*addr__)); \
+})
- return oldbit;
-}
+#define x86_this_cpu_variable_test_bit(_nr, _var) \
+({ \
+ bool oldbit; \
+ \
+ asm volatile("btl %[nr], " __percpu_arg([var]) \
+ CC_SET(c) \
+ : CC_OUT(c) (oldbit) \
+ : [var] "m" (__my_cpu_var(_var)), \
+ [nr] "rI" (_nr)); \
+ oldbit; \
+})
-#define x86_this_cpu_test_bit(nr, addr) \
- (__builtin_constant_p((nr)) \
- ? x86_this_cpu_constant_test_bit((nr), (addr)) \
- : x86_this_cpu_variable_test_bit((nr), (addr)))
+#define x86_this_cpu_test_bit(_nr, _var) \
+ (__builtin_constant_p(_nr) \
+ ? x86_this_cpu_constant_test_bit(_nr, _var) \
+ : x86_this_cpu_variable_test_bit(_nr, _var))
#include <asm-generic/percpu.h>
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 3736b8a46c04..7f1e17250546 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -555,6 +555,7 @@ struct x86_pmu_lbr {
unsigned int from;
unsigned int to;
unsigned int info;
+ bool has_callstack;
};
extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 315535ffb258..65b8e5bb902c 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -234,6 +234,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
}
+#define pud_pfn pud_pfn
static inline unsigned long pud_pfn(pud_t pud)
{
phys_addr_t pfn = pud_val(pud);
@@ -387,23 +388,7 @@ static inline pte_t pte_wrprotect(pte_t pte)
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
static inline int pte_uffd_wp(pte_t pte)
{
- bool wp = pte_flags(pte) & _PAGE_UFFD_WP;
-
-#ifdef CONFIG_DEBUG_VM
- /*
- * Having write bit for wr-protect-marked present ptes is fatal,
- * because it means the uffd-wp bit will be ignored and write will
- * just go through.
- *
- * Use any chance of pgtable walking to verify this (e.g., when
- * page swapped out or being migrated for all purposes). It means
- * something is already wrong. Tell the admin even before the
- * process crashes. We also nail it with wrong pgtable setup.
- */
- WARN_ON_ONCE(wp && pte_write(pte));
-#endif
-
- return wp;
+ return pte_flags(pte) & _PAGE_UFFD_WP;
}
static inline pte_t pte_mkuffd_wp(pte_t pte)
@@ -1200,7 +1185,6 @@ static inline int pgd_none(pgd_t pgd)
extern int direct_gbpages;
void init_mem_mapping(void);
void early_alloc_pgt_buf(void);
-extern void memblock_find_dma_reserve(void);
void __init poking_init(void);
unsigned long init_memory_mapping(unsigned long start,
unsigned long end, pgprot_t prot);
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 7e9db77231ac..3c4407271d08 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -245,6 +245,7 @@ extern void cleanup_highmap(void);
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+#define HAVE_ARCH_UNMAPPED_AREA_VMFLAGS
#define PAGE_AGP PAGE_KERNEL_NOCACHE
#define HAVE_PAGE_AGP 1
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 0b748ee16b3d..b78644962626 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -148,7 +148,7 @@
#define _COMMON_PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
_PAGE_SPECIAL | _PAGE_ACCESSED | \
_PAGE_DIRTY_BITS | _PAGE_SOFT_DIRTY | \
- _PAGE_DEVMAP | _PAGE_ENC | _PAGE_UFFD_WP)
+ _PAGE_DEVMAP | _PAGE_CC | _PAGE_UFFD_WP)
#define _PAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PAT)
#define _HPAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PAT_LARGE)
@@ -173,6 +173,7 @@ enum page_cache_mode {
};
#endif
+#define _PAGE_CC (_AT(pteval_t, cc_mask))
#define _PAGE_ENC (_AT(pteval_t, sme_me_mask))
#define _PAGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)
@@ -566,6 +567,8 @@ static inline void update_page_count(int level, unsigned long pages) { }
extern pte_t *lookup_address(unsigned long address, unsigned int *level);
extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
unsigned int *level);
+pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address,
+ unsigned int *level, bool *nx, bool *rw);
extern pmd_t *lookup_pmd_address(unsigned long address);
extern phys_addr_t slow_virt_to_phys(void *__address);
extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
diff --git a/arch/x86/include/asm/posted_intr.h b/arch/x86/include/asm/posted_intr.h
new file mode 100644
index 000000000000..de788b400fba
--- /dev/null
+++ b/arch/x86/include/asm/posted_intr.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _X86_POSTED_INTR_H
+#define _X86_POSTED_INTR_H
+#include <asm/irq_vectors.h>
+
+#define POSTED_INTR_ON 0
+#define POSTED_INTR_SN 1
+
+#define PID_TABLE_ENTRY_VALID 1
+
+/* Posted-Interrupt Descriptor */
+struct pi_desc {
+ union {
+ u32 pir[8]; /* Posted interrupt requested */
+ u64 pir64[4];
+ };
+ union {
+ struct {
+ u16 notifications; /* Suppress and outstanding bits */
+ u8 nv;
+ u8 rsvd_2;
+ u32 ndst;
+ };
+ u64 control;
+ };
+ u32 rsvd[6];
+} __aligned(64);
+
+static inline bool pi_test_and_set_on(struct pi_desc *pi_desc)
+{
+ return test_and_set_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control);
+}
+
+static inline bool pi_test_and_clear_on(struct pi_desc *pi_desc)
+{
+ return test_and_clear_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control);
+}
+
+static inline bool pi_test_and_clear_sn(struct pi_desc *pi_desc)
+{
+ return test_and_clear_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control);
+}
+
+static inline bool pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
+{
+ return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
+}
+
+static inline bool pi_is_pir_empty(struct pi_desc *pi_desc)
+{
+ return bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS);
+}
+
+static inline void pi_set_sn(struct pi_desc *pi_desc)
+{
+ set_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control);
+}
+
+static inline void pi_set_on(struct pi_desc *pi_desc)
+{
+ set_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control);
+}
+
+static inline void pi_clear_on(struct pi_desc *pi_desc)
+{
+ clear_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control);
+}
+
+static inline void pi_clear_sn(struct pi_desc *pi_desc)
+{
+ clear_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control);
+}
+
+static inline bool pi_test_on(struct pi_desc *pi_desc)
+{
+ return test_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control);
+}
+
+static inline bool pi_test_sn(struct pi_desc *pi_desc)
+{
+ return test_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control);
+}
+
+/* Non-atomic helpers */
+static inline void __pi_set_sn(struct pi_desc *pi_desc)
+{
+ pi_desc->notifications |= BIT(POSTED_INTR_SN);
+}
+
+static inline void __pi_clear_sn(struct pi_desc *pi_desc)
+{
+ pi_desc->notifications &= ~BIT(POSTED_INTR_SN);
+}
+
+#ifdef CONFIG_X86_POSTED_MSI
+/*
+ * Not all external vectors are subject to interrupt remapping, e.g. IOMMU's
+ * own interrupts. Here we do not distinguish them since those vector bits in
+ * PIR will always be zero.
+ */
+static inline bool pi_pending_this_cpu(unsigned int vector)
+{
+ struct pi_desc *pid = this_cpu_ptr(&posted_msi_pi_desc);
+
+ if (WARN_ON_ONCE(vector > NR_VECTORS || vector < FIRST_EXTERNAL_VECTOR))
+ return false;
+
+ return test_bit(vector, (unsigned long *)pid->pir);
+}
+
+extern void intel_posted_msi_init(void);
+#else
+static inline bool pi_pending_this_cpu(unsigned int vector) { return false; }
+
+static inline void intel_posted_msi_init(void) {};
+#endif /* X86_POSTED_MSI */
+
+#endif /* _X86_POSTED_INTR_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 811548f131f4..cb4f6c513c48 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -108,9 +108,23 @@ struct cpuinfo_topology {
};
struct cpuinfo_x86 {
- __u8 x86; /* CPU family */
- __u8 x86_vendor; /* CPU vendor */
- __u8 x86_model;
+ union {
+ /*
+ * The particular ordering (low-to-high) of (vendor,
+ * family, model) is done in case range of models, like
+ * it is usually done on AMD, need to be compared.
+ */
+ struct {
+ __u8 x86_model;
+ /* CPU family */
+ __u8 x86;
+ /* CPU vendor */
+ __u8 x86_vendor;
+ __u8 x86_reserved;
+ };
+ /* combined vendor, family, model */
+ __u32 x86_vfm;
+ };
__u8 x86_stepping;
#ifdef CONFIG_X86_64
/* Number of 4K pages in DTLB/ITLB combined(in pages): */
@@ -472,7 +486,6 @@ struct thread_struct {
unsigned long iopl_emul;
unsigned int iopl_warn:1;
- unsigned int sig_on_uaccess_err:1;
/*
* Protection Keys Register for Userspace. Loaded immediately on
@@ -587,7 +600,7 @@ extern char ignore_fpu_irq;
# define BASE_PREFETCH ""
# define ARCH_HAS_PREFETCH
#else
-# define BASE_PREFETCH "prefetcht0 %P1"
+# define BASE_PREFETCH "prefetcht0 %1"
#endif
/*
@@ -598,7 +611,7 @@ extern char ignore_fpu_irq;
*/
static inline void prefetch(const void *x)
{
- alternative_input(BASE_PREFETCH, "prefetchnta %P1",
+ alternative_input(BASE_PREFETCH, "prefetchnta %1",
X86_FEATURE_XMM,
"m" (*(const char *)x));
}
@@ -610,7 +623,7 @@ static inline void prefetch(const void *x)
*/
static __always_inline void prefetchw(const void *x)
{
- alternative_input(BASE_PREFETCH, "prefetchw %P1",
+ alternative_input(BASE_PREFETCH, "prefetchw %1",
X86_FEATURE_3DNOWPREFETCH,
"m" (*(const char *)x));
}
@@ -636,12 +649,10 @@ static __always_inline void prefetchw(const void *x)
#define KSTK_ESP(task) (task_pt_regs(task)->sp)
#else
-extern unsigned long __end_init_task[];
+extern unsigned long __top_init_kernel_stack[];
#define INIT_THREAD { \
- .sp = (unsigned long)&__end_init_task - \
- TOP_OF_KERNEL_STACK_PADDING - \
- sizeof(struct pt_regs), \
+ .sp = (unsigned long)&__top_init_kernel_stack, \
}
extern unsigned long KSTK_ESP(struct task_struct *task);
diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h
index 043758a2e627..365798cb4408 100644
--- a/arch/x86/include/asm/prom.h
+++ b/arch/x86/include/asm/prom.h
@@ -23,19 +23,14 @@ extern int of_ioapic;
extern u64 initial_dtb;
extern void add_dtb(u64 data);
void x86_of_pci_init(void);
-void x86_dtb_parse_smp_config(void);
+void x86_flattree_get_config(void);
#else
static inline void add_dtb(u64 data) { }
static inline void x86_of_pci_init(void) { }
-static inline void x86_dtb_parse_smp_config(void) { }
+static inline void x86_flattree_get_config(void) { }
#define of_ioapic 0
#endif
-#ifdef CONFIG_OF_EARLY_FLATTREE
-void x86_flattree_get_config(void);
-#else
-static inline void x86_flattree_get_config(void) { }
-#endif
extern char cmd_line[COMMAND_LINE_SIZE];
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index cde8357bb226..a053c1293975 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -85,6 +85,8 @@ DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
#define virt_spin_lock virt_spin_lock
static inline bool virt_spin_lock(struct qspinlock *lock)
{
+ int val;
+
if (!static_branch_likely(&virt_spin_lock_key))
return false;
@@ -94,10 +96,13 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
* horrible lock 'holder' preemption issues.
*/
- do {
- while (atomic_read(&lock->val) != 0)
- cpu_relax();
- } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
+ __retry:
+ val = atomic_read(&lock->val);
+
+ if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) {
+ cpu_relax();
+ goto __retry;
+ }
return true;
}
diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h
index ef9697f20129..0a985784be9b 100644
--- a/arch/x86/include/asm/qspinlock_paravirt.h
+++ b/arch/x86/include/asm/qspinlock_paravirt.h
@@ -25,9 +25,9 @@ __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text");
*
* void __lockfunc __pv_queued_spin_unlock(struct qspinlock *lock)
* {
- * u8 lockval = cmpxchg(&lock->locked, _Q_LOCKED_VAL, 0);
+ * u8 lockval = _Q_LOCKED_VAL;
*
- * if (likely(lockval == _Q_LOCKED_VAL))
+ * if (try_cmpxchg(&lock->locked, &lockval, 0))
* return;
* pv_queued_spin_unlock_slowpath(lock, lockval);
* }
@@ -40,10 +40,9 @@ __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text");
#define PV_UNLOCK_ASM \
FRAME_BEGIN \
"push %rdx\n\t" \
- "mov $0x1,%eax\n\t" \
+ "mov $" __stringify(_Q_LOCKED_VAL) ",%eax\n\t" \
"xor %edx,%edx\n\t" \
LOCK_PREFIX "cmpxchg %dl,(%rdi)\n\t" \
- "cmp $0x1,%al\n\t" \
"jne .slowpath\n\t" \
"pop %rdx\n\t" \
FRAME_END \
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
index 7ba1726b71c7..e9187ddd3d1f 100644
--- a/arch/x86/include/asm/required-features.h
+++ b/arch/x86/include/asm/required-features.h
@@ -99,6 +99,7 @@
#define REQUIRED_MASK18 0
#define REQUIRED_MASK19 0
#define REQUIRED_MASK20 0
-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
+#define REQUIRED_MASK21 0
+#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
diff --git a/arch/x86/include/asm/seccomp.h b/arch/x86/include/asm/seccomp.h
index fef16e398161..42bcd42d70d1 100644
--- a/arch/x86/include/asm/seccomp.h
+++ b/arch/x86/include/asm/seccomp.h
@@ -9,7 +9,7 @@
#endif
#ifdef CONFIG_COMPAT
-#include <asm/ia32_unistd.h>
+#include <asm/unistd_32_ia32.h>
#define __NR_seccomp_read_32 __NR_ia32_read
#define __NR_seccomp_write_32 __NR_ia32_write
#define __NR_seccomp_exit_32 __NR_ia32_exit
diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
index b463fcbd4b90..5a8246dd532f 100644
--- a/arch/x86/include/asm/sev-common.h
+++ b/arch/x86/include/asm/sev-common.h
@@ -54,8 +54,10 @@
(((unsigned long)fn) << 32))
/* AP Reset Hold */
-#define GHCB_MSR_AP_RESET_HOLD_REQ 0x006
-#define GHCB_MSR_AP_RESET_HOLD_RESP 0x007
+#define GHCB_MSR_AP_RESET_HOLD_REQ 0x006
+#define GHCB_MSR_AP_RESET_HOLD_RESP 0x007
+#define GHCB_MSR_AP_RESET_HOLD_RESULT_POS 12
+#define GHCB_MSR_AP_RESET_HOLD_RESULT_MASK GENMASK_ULL(51, 0)
/* GHCB GPA Register */
#define GHCB_MSR_REG_GPA_REQ 0x012
@@ -99,6 +101,8 @@ enum psc_op {
/* GHCB Hypervisor Feature Request/Response */
#define GHCB_MSR_HV_FT_REQ 0x080
#define GHCB_MSR_HV_FT_RESP 0x081
+#define GHCB_MSR_HV_FT_POS 12
+#define GHCB_MSR_HV_FT_MASK GENMASK_ULL(51, 0)
#define GHCB_MSR_HV_FT_RESP_VAL(v) \
/* GHCBData[63:12] */ \
(((u64)(v) & GENMASK_ULL(63, 12)) >> 12)
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index 9477b4053bce..ca20cc4e5826 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -140,7 +140,7 @@ struct secrets_os_area {
#define VMPCK_KEY_LEN 32
/* See the SNP spec version 0.9 for secrets page format */
-struct snp_secrets_page_layout {
+struct snp_secrets_page {
u32 version;
u32 imien : 1,
rsvd1 : 31;
@@ -218,17 +218,16 @@ void early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
unsigned long npages);
void early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
unsigned long npages);
-void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op);
void snp_set_memory_shared(unsigned long vaddr, unsigned long npages);
void snp_set_memory_private(unsigned long vaddr, unsigned long npages);
void snp_set_wakeup_secondary_cpu(void);
bool snp_init(struct boot_params *bp);
void __noreturn snp_abort(void);
+void snp_dmi_setup(void);
int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio);
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
u64 snp_get_unsupported_features(u64 status);
u64 sev_get_status(void);
-void kdump_sev_callback(void);
void sev_show_status(void);
#else
static inline void sev_es_ist_enter(struct pt_regs *regs) { }
@@ -244,12 +243,12 @@ static inline void __init
early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned long npages) { }
static inline void __init
early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned long npages) { }
-static inline void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op) { }
static inline void snp_set_memory_shared(unsigned long vaddr, unsigned long npages) { }
static inline void snp_set_memory_private(unsigned long vaddr, unsigned long npages) { }
static inline void snp_set_wakeup_secondary_cpu(void) { }
static inline bool snp_init(struct boot_params *bp) { return false; }
static inline void snp_abort(void) { }
+static inline void snp_dmi_setup(void) { }
static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio)
{
return -ENOTTY;
@@ -258,7 +257,6 @@ static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *in
static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
static inline u64 snp_get_unsupported_features(u64 status) { return 0; }
static inline u64 sev_get_status(void) { return 0; }
-static inline void kdump_sev_callback(void) { }
static inline void sev_show_status(void) { }
#endif
@@ -270,6 +268,8 @@ int psmash(u64 pfn);
int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 asid, bool immutable);
int rmp_make_shared(u64 pfn, enum pg_level level);
void snp_leak_pages(u64 pfn, unsigned int npages);
+void kdump_sev_callback(void);
+void snp_fixup_e820_tables(void);
#else
static inline bool snp_probe_rmptable_info(void) { return false; }
static inline int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) { return -ENODEV; }
@@ -282,6 +282,8 @@ static inline int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 as
}
static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV; }
static inline void snp_leak_pages(u64 pfn, unsigned int npages) {}
+static inline void kdump_sev_callback(void) { }
+static inline void snp_fixup_e820_tables(void) {}
#endif
#endif
diff --git a/arch/x86/include/asm/sparsemem.h b/arch/x86/include/asm/sparsemem.h
index 1be13b2dfe8b..64df897c0ee3 100644
--- a/arch/x86/include/asm/sparsemem.h
+++ b/arch/x86/include/asm/sparsemem.h
@@ -37,8 +37,6 @@ extern int phys_to_target_node(phys_addr_t start);
#define phys_to_target_node phys_to_target_node
extern int memory_add_physaddr_to_nid(u64 start);
#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
-extern int numa_fill_memblks(u64 start, u64 end);
-#define numa_fill_memblks numa_fill_memblks
#endif
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 2e9fc5c400cd..aec6e2d3aa1d 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -182,8 +182,8 @@ static __always_inline void clflush(volatile void *__p)
static inline void clflushopt(volatile void *__p)
{
- alternative_io(".byte 0x3e; clflush %P0",
- ".byte 0x66; clflush %P0",
+ alternative_io(".byte 0x3e; clflush %0",
+ ".byte 0x66; clflush %0",
X86_FEATURE_CLFLUSHOPT,
"+m" (*(volatile char __force *)__p));
}
@@ -205,9 +205,9 @@ static inline void clwb(volatile void *__p)
#ifdef CONFIG_X86_USER_SHADOW_STACK
static inline int write_user_shstk_64(u64 __user *addr, u64 val)
{
- asm goto("1: wrussq %[val], (%[addr])\n"
+ asm goto("1: wrussq %[val], %[addr]\n"
_ASM_EXTABLE(1b, %l[fail])
- :: [addr] "r" (addr), [val] "r" (val)
+ :: [addr] "m" (*addr), [val] "r" (val)
:: fail);
return 0;
fail:
diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
index 857d364b9888..9d0b324eab21 100644
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -30,37 +30,40 @@ void *__memset(void *s, int c, size_t n);
#define __HAVE_ARCH_MEMSET16
static inline void *memset16(uint16_t *s, uint16_t v, size_t n)
{
- long d0, d1;
- asm volatile("rep\n\t"
- "stosw"
- : "=&c" (d0), "=&D" (d1)
- : "a" (v), "1" (s), "0" (n)
- : "memory");
- return s;
+ const __auto_type s0 = s;
+ asm volatile (
+ "rep stosw"
+ : "+D" (s), "+c" (n)
+ : "a" (v)
+ : "memory"
+ );
+ return s0;
}
#define __HAVE_ARCH_MEMSET32
static inline void *memset32(uint32_t *s, uint32_t v, size_t n)
{
- long d0, d1;
- asm volatile("rep\n\t"
- "stosl"
- : "=&c" (d0), "=&D" (d1)
- : "a" (v), "1" (s), "0" (n)
- : "memory");
- return s;
+ const __auto_type s0 = s;
+ asm volatile (
+ "rep stosl"
+ : "+D" (s), "+c" (n)
+ : "a" (v)
+ : "memory"
+ );
+ return s0;
}
#define __HAVE_ARCH_MEMSET64
static inline void *memset64(uint64_t *s, uint64_t v, size_t n)
{
- long d0, d1;
- asm volatile("rep\n\t"
- "stosq"
- : "=&c" (d0), "=&D" (d1)
- : "a" (v), "1" (s), "0" (n)
- : "memory");
- return s;
+ const __auto_type s0 = s;
+ asm volatile (
+ "rep stosq"
+ : "+D" (s), "+c" (n)
+ : "a" (v)
+ : "memory"
+ );
+ return s0;
}
#endif
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index f44e2f9ab65d..2fc7bc3863ff 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -16,19 +16,17 @@
#include <asm/thread_info.h> /* for TS_COMPAT */
#include <asm/unistd.h>
+/* This is used purely for kernel/trace/trace_syscalls.c */
typedef long (*sys_call_ptr_t)(const struct pt_regs *);
extern const sys_call_ptr_t sys_call_table[];
-#if defined(CONFIG_X86_32)
-#define ia32_sys_call_table sys_call_table
-#else
/*
* These may not exist, but still put the prototypes in so we
* can use IS_ENABLED().
*/
-extern const sys_call_ptr_t ia32_sys_call_table[];
-extern const sys_call_ptr_t x32_sys_call_table[];
-#endif
+extern long ia32_sys_call(const struct pt_regs *, unsigned int nr);
+extern long x32_sys_call(const struct pt_regs *, unsigned int nr);
+extern long x64_sys_call(const struct pt_regs *, unsigned int nr);
/*
* Only the low 32 bits of orig_ax are meaningful, so we return int.
@@ -127,6 +125,7 @@ static inline int syscall_get_arch(struct task_struct *task)
}
bool do_syscall_64(struct pt_regs *regs, int nr);
+void do_int80_emulation(struct pt_regs *regs);
#endif /* CONFIG_X86_32 */
diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
index 345aafbc1964..6259f1937fe7 100644
--- a/arch/x86/include/asm/text-patching.h
+++ b/arch/x86/include/asm/text-patching.h
@@ -15,7 +15,7 @@
extern void text_poke_early(void *addr, const void *opcode, size_t len);
-extern void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len);
+extern void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len);
/*
* Clear and restore the kernel write-protection flag on the local CPU.
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 237dc8cdd12b..0f9bab92a43d 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -78,7 +78,7 @@ extern int __get_user_bad(void);
int __ret_gu; \
register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
__chk_user_ptr(ptr); \
- asm volatile("call __" #fn "_%P4" \
+ asm volatile("call __" #fn "_%c4" \
: "=a" (__ret_gu), "=r" (__val_gu), \
ASM_CALL_CONSTRAINT \
: "0" (ptr), "i" (sizeof(*(ptr)))); \
@@ -177,7 +177,7 @@ extern void __put_user_nocheck_8(void);
__chk_user_ptr(__ptr); \
__ptr_pu = __ptr; \
__val_pu = __x; \
- asm volatile("call __" #fn "_%P[size]" \
+ asm volatile("call __" #fn "_%c[size]" \
: "=c" (__ret_pu), \
ASM_CALL_CONSTRAINT \
: "0" (__ptr_pu), \
diff --git a/arch/x86/include/asm/vdso/gettimeofday.h b/arch/x86/include/asm/vdso/gettimeofday.h
index 8e048ca980df..0ef36190abe6 100644
--- a/arch/x86/include/asm/vdso/gettimeofday.h
+++ b/arch/x86/include/asm/vdso/gettimeofday.h
@@ -300,7 +300,7 @@ static inline bool arch_vdso_cycles_ok(u64 cycles)
#define vdso_cycles_ok arch_vdso_cycles_ok
/*
- * x86 specific delta calculation.
+ * x86 specific calculation of nanoseconds for the current cycle count
*
* The regular implementation assumes that clocksource reads are globally
* monotonic. The TSC can be slightly off across sockets which can cause
@@ -308,8 +308,8 @@ static inline bool arch_vdso_cycles_ok(u64 cycles)
* jump.
*
* Therefore it needs to be verified that @cycles are greater than
- * @last. If not then use @last, which is the base time of the current
- * conversion period.
+ * @vd->cycles_last. If not then use @vd->cycles_last, which is the base
+ * time of the current conversion period.
*
* This variant also uses a custom mask because while the clocksource mask of
* all the VDSO capable clocksources on x86 is U64_MAX, the above code uses
@@ -317,25 +317,37 @@ static inline bool arch_vdso_cycles_ok(u64 cycles)
* declares everything with the MSB/Sign-bit set as invalid. Therefore the
* effective mask is S64_MAX.
*/
-static __always_inline
-u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
+static __always_inline u64 vdso_calc_ns(const struct vdso_data *vd, u64 cycles, u64 base)
{
- /*
- * Due to the MSB/Sign-bit being used as invalid marker (see
- * arch_vdso_cycles_valid() above), the effective mask is S64_MAX.
- */
- u64 delta = (cycles - last) & S64_MAX;
+ u64 delta = cycles - vd->cycle_last;
/*
- * Due to the above mentioned TSC wobbles, filter out negative motion.
- * Per the above masking, the effective sign bit is now bit 62.
+ * Negative motion and deltas which can cause multiplication
+ * overflow require special treatment. This check covers both as
+ * negative motion is guaranteed to be greater than @vd::max_cycles
+ * due to unsigned comparison.
+ *
+ * Due to the MSB/Sign-bit being used as invalid marker (see
+ * arch_vdso_cycles_valid() above), the effective mask is S64_MAX,
+ * but that case is also unlikely and will also take the unlikely path
+ * here.
*/
- if (unlikely(delta & (1ULL << 62)))
- return 0;
+ if (unlikely(delta > vd->max_cycles)) {
+ /*
+ * Due to the above mentioned TSC wobbles, filter out
+ * negative motion. Per the above masking, the effective
+ * sign bit is now bit 62.
+ */
+ if (delta & (1ULL << 62))
+ return base >> vd->shift;
+
+ /* Handle multiplication overflow gracefully */
+ return mul_u64_u32_add_u64_shr(delta & S64_MAX, vd->mult, base, vd->shift);
+ }
- return delta * mult;
+ return ((delta * vd->mult) + base) >> vd->shift;
}
-#define vdso_calc_delta vdso_calc_delta
+#define vdso_calc_ns vdso_calc_ns
#endif /* !__ASSEMBLY__ */
diff --git a/arch/x86/include/asm/video.h b/arch/x86/include/asm/video.h
new file mode 100644
index 000000000000..0950c9535fae
--- /dev/null
+++ b/arch/x86/include/asm/video.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_VIDEO_H
+#define _ASM_X86_VIDEO_H
+
+#include <linux/types.h>
+
+#include <asm/page.h>
+
+struct device;
+
+pgprot_t pgprot_framebuffer(pgprot_t prot,
+ unsigned long vm_start, unsigned long vm_end,
+ unsigned long offset);
+#define pgprot_framebuffer pgprot_framebuffer
+
+bool video_is_primary_device(struct device *dev);
+#define video_is_primary_device video_is_primary_device
+
+#include <asm-generic/video.h>
+
+#endif /* _ASM_X86_VIDEO_H */
diff --git a/arch/x86/include/asm/vm86.h b/arch/x86/include/asm/vm86.h
index 9e8ac5073ecb..62ee19909903 100644
--- a/arch/x86/include/asm/vm86.h
+++ b/arch/x86/include/asm/vm86.h
@@ -84,7 +84,7 @@ static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c)
static inline void save_v86_state(struct kernel_vm86_regs *a, int b) { }
-#define free_vm86(t) do { } while(0)
+#define free_vm86(task) do { (void)(task); } while(0)
#endif /* CONFIG_VM86 */
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 4dba17363008..d77a31039f24 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -71,6 +71,7 @@
#define SECONDARY_EXEC_ENCLS_EXITING VMCS_CONTROL_BIT(ENCLS_EXITING)
#define SECONDARY_EXEC_RDSEED_EXITING VMCS_CONTROL_BIT(RDSEED_EXITING)
#define SECONDARY_EXEC_ENABLE_PML VMCS_CONTROL_BIT(PAGE_MOD_LOGGING)
+#define SECONDARY_EXEC_EPT_VIOLATION_VE VMCS_CONTROL_BIT(EPT_VIOLATION_VE)
#define SECONDARY_EXEC_PT_CONCEAL_VMX VMCS_CONTROL_BIT(PT_CONCEAL_VMX)
#define SECONDARY_EXEC_ENABLE_XSAVES VMCS_CONTROL_BIT(XSAVES)
#define SECONDARY_EXEC_MODE_BASED_EPT_EXEC VMCS_CONTROL_BIT(MODE_BASED_EPT_EXEC)
@@ -226,6 +227,8 @@ enum vmcs_field {
VMREAD_BITMAP_HIGH = 0x00002027,
VMWRITE_BITMAP = 0x00002028,
VMWRITE_BITMAP_HIGH = 0x00002029,
+ VE_INFORMATION_ADDRESS = 0x0000202A,
+ VE_INFORMATION_ADDRESS_HIGH = 0x0000202B,
XSS_EXIT_BITMAP = 0x0000202C,
XSS_EXIT_BITMAP_HIGH = 0x0000202D,
ENCLS_EXITING_BITMAP = 0x0000202E,
@@ -514,6 +517,7 @@ enum vmcs_field {
#define VMX_EPT_IPAT_BIT (1ull << 6)
#define VMX_EPT_ACCESS_BIT (1ull << 8)
#define VMX_EPT_DIRTY_BIT (1ull << 9)
+#define VMX_EPT_SUPPRESS_VE_BIT (1ull << 63)
#define VMX_EPT_RWX_MASK (VMX_EPT_READABLE_MASK | \
VMX_EPT_WRITABLE_MASK | \
VMX_EPT_EXECUTABLE_MASK)
@@ -630,4 +634,13 @@ enum vmx_l1d_flush_state {
extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;
+struct vmx_ve_information {
+ u32 exit_reason;
+ u32 delivery;
+ u64 exit_qualification;
+ u64 guest_linear_address;
+ u64 guest_physical_address;
+ u16 eptp_index;
+};
+
#endif
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index b89b40f250e6..6149eabe200f 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -30,12 +30,13 @@ struct x86_init_mpparse {
* @reserve_resources: reserve the standard resources for the
* platform
* @memory_setup: platform specific memory setup
- *
+ * @dmi_setup: platform specific DMI setup
*/
struct x86_init_resources {
void (*probe_roms)(void);
void (*reserve_resources)(void);
char *(*memory_setup)(void);
+ void (*dmi_setup)(void);
};
/**
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index ad29984d5e39..9fae1b73b529 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -457,8 +457,13 @@ struct kvm_sync_regs {
#define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001
-/* attributes for system fd (group 0) */
-#define KVM_X86_XCOMP_GUEST_SUPP 0
+/* vendor-independent attributes for system fd (group 0) */
+#define KVM_X86_GRP_SYSTEM 0
+# define KVM_X86_XCOMP_GUEST_SUPP 0
+
+/* vendor-specific groups and attributes for system fd */
+#define KVM_X86_GRP_SEV 1
+# define KVM_X86_SEV_VMSA_FEATURES 0
struct kvm_vmx_nested_state_data {
__u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
@@ -689,43 +694,62 @@ enum sev_cmd_id {
/* Guest Migration Extension */
KVM_SEV_SEND_CANCEL,
+ /* Second time is the charm; improved versions of the above ioctls. */
+ KVM_SEV_INIT2,
+
KVM_SEV_NR_MAX,
};
struct kvm_sev_cmd {
__u32 id;
+ __u32 pad0;
__u64 data;
__u32 error;
__u32 sev_fd;
};
+struct kvm_sev_init {
+ __u64 vmsa_features;
+ __u32 flags;
+ __u16 ghcb_version;
+ __u16 pad1;
+ __u32 pad2[8];
+};
+
struct kvm_sev_launch_start {
__u32 handle;
__u32 policy;
__u64 dh_uaddr;
__u32 dh_len;
+ __u32 pad0;
__u64 session_uaddr;
__u32 session_len;
+ __u32 pad1;
};
struct kvm_sev_launch_update_data {
__u64 uaddr;
__u32 len;
+ __u32 pad0;
};
struct kvm_sev_launch_secret {
__u64 hdr_uaddr;
__u32 hdr_len;
+ __u32 pad0;
__u64 guest_uaddr;
__u32 guest_len;
+ __u32 pad1;
__u64 trans_uaddr;
__u32 trans_len;
+ __u32 pad2;
};
struct kvm_sev_launch_measure {
__u64 uaddr;
__u32 len;
+ __u32 pad0;
};
struct kvm_sev_guest_status {
@@ -738,33 +762,43 @@ struct kvm_sev_dbg {
__u64 src_uaddr;
__u64 dst_uaddr;
__u32 len;
+ __u32 pad0;
};
struct kvm_sev_attestation_report {
__u8 mnonce[16];
__u64 uaddr;
__u32 len;
+ __u32 pad0;
};
struct kvm_sev_send_start {
__u32 policy;
+ __u32 pad0;
__u64 pdh_cert_uaddr;
__u32 pdh_cert_len;
+ __u32 pad1;
__u64 plat_certs_uaddr;
__u32 plat_certs_len;
+ __u32 pad2;
__u64 amd_certs_uaddr;
__u32 amd_certs_len;
+ __u32 pad3;
__u64 session_uaddr;
__u32 session_len;
+ __u32 pad4;
};
struct kvm_sev_send_update_data {
__u64 hdr_uaddr;
__u32 hdr_len;
+ __u32 pad0;
__u64 guest_uaddr;
__u32 guest_len;
+ __u32 pad1;
__u64 trans_uaddr;
__u32 trans_len;
+ __u32 pad2;
};
struct kvm_sev_receive_start {
@@ -772,17 +806,22 @@ struct kvm_sev_receive_start {
__u32 policy;
__u64 pdh_uaddr;
__u32 pdh_len;
+ __u32 pad0;
__u64 session_uaddr;
__u32 session_len;
+ __u32 pad1;
};
struct kvm_sev_receive_update_data {
__u64 hdr_uaddr;
__u32 hdr_len;
+ __u32 pad0;
__u64 guest_uaddr;
__u32 guest_len;
+ __u32 pad1;
__u64 trans_uaddr;
__u32 trans_len;
+ __u32 pad2;
};
#define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
@@ -833,5 +872,7 @@ struct kvm_hyperv_eventfd {
#define KVM_X86_DEFAULT_VM 0
#define KVM_X86_SW_PROTECTED_VM 1
+#define KVM_X86_SEV_VM 2
+#define KVM_X86_SEV_ES_VM 3
#endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
index 6bc3456a8ebf..a1efa7907a0b 100644
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -142,7 +142,6 @@ struct kvm_vcpu_pv_apf_data {
__u32 token;
__u8 pad[56];
- __u32 enabled;
};
#define KVM_PV_EOI_BIT 0
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 74077694da7d..20a0dd51700a 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -40,7 +40,7 @@ KMSAN_SANITIZE_sev.o := n
KCOV_INSTRUMENT_head$(BITS).o := n
KCOV_INSTRUMENT_sev.o := n
-CFLAGS_irq.o := -I $(srctree)/$(src)/../include/asm/trace
+CFLAGS_irq.o := -I $(src)/../include/asm/trace
obj-y += head_$(BITS).o
obj-y += head$(BITS).o
@@ -62,7 +62,7 @@ obj-$(CONFIG_X86_64) += sys_x86_64.o
obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
obj-$(CONFIG_SYSFS) += ksysfs.o
obj-y += bootflag.o e820.o
-obj-y += pci-dma.o quirks.o topology.o kdebugfs.o
+obj-y += pci-dma.o quirks.o kdebugfs.o
obj-y += alternative.o i8253.o hw_breakpoint.o
obj-y += tsc.o tsc_msr.o io_delay.o rtc.o
obj-y += resource.o
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 45a280f2161c..89de61243272 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -125,6 +125,20 @@ const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
};
/*
+ * Nomenclature for variable names to simplify and clarify this code and ease
+ * any potential staring at it:
+ *
+ * @instr: source address of the original instructions in the kernel text as
+ * generated by the compiler.
+ *
+ * @buf: temporary buffer on which the patching operates. This buffer is
+ * eventually text-poked into the kernel image.
+ *
+ * @replacement/@repl: pointer to the opcodes which are replacing @instr, located
+ * in the .altinstr_replacement section.
+ */
+
+/*
* Fill the buffer with a single effective instruction of size @len.
*
* In order not to issue an ORC stack depth tracking CFI entry (Call Frame Info)
@@ -133,28 +147,28 @@ const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
* each single-byte NOPs). If @len to fill out is > ASM_NOP_MAX, pad with INT3 and
* *jump* over instead of executing long and daft NOPs.
*/
-static void add_nop(u8 *instr, unsigned int len)
+static void add_nop(u8 *buf, unsigned int len)
{
- u8 *target = instr + len;
+ u8 *target = buf + len;
if (!len)
return;
if (len <= ASM_NOP_MAX) {
- memcpy(instr, x86_nops[len], len);
+ memcpy(buf, x86_nops[len], len);
return;
}
if (len < 128) {
- __text_gen_insn(instr, JMP8_INSN_OPCODE, instr, target, JMP8_INSN_SIZE);
- instr += JMP8_INSN_SIZE;
+ __text_gen_insn(buf, JMP8_INSN_OPCODE, buf, target, JMP8_INSN_SIZE);
+ buf += JMP8_INSN_SIZE;
} else {
- __text_gen_insn(instr, JMP32_INSN_OPCODE, instr, target, JMP32_INSN_SIZE);
- instr += JMP32_INSN_SIZE;
+ __text_gen_insn(buf, JMP32_INSN_OPCODE, buf, target, JMP32_INSN_SIZE);
+ buf += JMP32_INSN_SIZE;
}
- for (;instr < target; instr++)
- *instr = INT3_INSN_OPCODE;
+ for (;buf < target; buf++)
+ *buf = INT3_INSN_OPCODE;
}
extern s32 __retpoline_sites[], __retpoline_sites_end[];
@@ -187,12 +201,12 @@ static bool insn_is_nop(struct insn *insn)
* Find the offset of the first non-NOP instruction starting at @offset
* but no further than @len.
*/
-static int skip_nops(u8 *instr, int offset, int len)
+static int skip_nops(u8 *buf, int offset, int len)
{
struct insn insn;
for (; offset < len; offset += insn.length) {
- if (insn_decode_kernel(&insn, &instr[offset]))
+ if (insn_decode_kernel(&insn, &buf[offset]))
break;
if (!insn_is_nop(&insn))
@@ -203,66 +217,32 @@ static int skip_nops(u8 *instr, int offset, int len)
}
/*
- * Optimize a sequence of NOPs, possibly preceded by an unconditional jump
- * to the end of the NOP sequence into a single NOP.
- */
-static bool
-__optimize_nops(u8 *instr, size_t len, struct insn *insn, int *next, int *prev, int *target)
-{
- int i = *next - insn->length;
-
- switch (insn->opcode.bytes[0]) {
- case JMP8_INSN_OPCODE:
- case JMP32_INSN_OPCODE:
- *prev = i;
- *target = *next + insn->immediate.value;
- return false;
- }
-
- if (insn_is_nop(insn)) {
- int nop = i;
-
- *next = skip_nops(instr, *next, len);
- if (*target && *next == *target)
- nop = *prev;
-
- add_nop(instr + nop, *next - nop);
- DUMP_BYTES(ALT, instr, len, "%px: [%d:%d) optimized NOPs: ", instr, nop, *next);
- return true;
- }
-
- *target = 0;
- return false;
-}
-
-/*
* "noinline" to cause control flow change and thus invalidate I$ and
* cause refetch after modification.
*/
-static void __init_or_module noinline optimize_nops(u8 *instr, size_t len)
+static void noinline optimize_nops(const u8 * const instr, u8 *buf, size_t len)
{
- int prev, target = 0;
-
for (int next, i = 0; i < len; i = next) {
struct insn insn;
- if (insn_decode_kernel(&insn, &instr[i]))
+ if (insn_decode_kernel(&insn, &buf[i]))
return;
next = i + insn.length;
- __optimize_nops(instr, len, &insn, &next, &prev, &target);
- }
-}
+ if (insn_is_nop(&insn)) {
+ int nop = i;
-static void __init_or_module noinline optimize_nops_inplace(u8 *instr, size_t len)
-{
- unsigned long flags;
+ /* Has the NOP already been optimized? */
+ if (i + insn.length == len)
+ return;
- local_irq_save(flags);
- optimize_nops(instr, len);
- sync_core();
- local_irq_restore(flags);
+ next = skip_nops(buf, next, len);
+
+ add_nop(buf + nop, next - nop);
+ DUMP_BYTES(ALT, buf, len, "%px: [%d:%d) optimized NOPs: ", instr, nop, next);
+ }
+ }
}
/*
@@ -335,11 +315,9 @@ bool need_reloc(unsigned long offset, u8 *src, size_t src_len)
return (target < src || target > src + src_len);
}
-void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
+static void __apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len)
{
- int prev, target = 0;
-
- for (int next, i = 0; i < len; i = next) {
+ for (int next, i = 0; i < instrlen; i = next) {
struct insn insn;
if (WARN_ON_ONCE(insn_decode_kernel(&insn, &buf[i])))
@@ -347,9 +325,6 @@ void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
next = i + insn.length;
- if (__optimize_nops(buf, len, &insn, &next, &prev, &target))
- continue;
-
switch (insn.opcode.bytes[0]) {
case 0x0f:
if (insn.opcode.bytes[1] < 0x80 ||
@@ -361,10 +336,10 @@ void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
case JMP8_INSN_OPCODE:
case JMP32_INSN_OPCODE:
case CALL_INSN_OPCODE:
- if (need_reloc(next + insn.immediate.value, src, src_len)) {
+ if (need_reloc(next + insn.immediate.value, repl, repl_len)) {
apply_reloc(insn.immediate.nbytes,
buf + i + insn_offset_immediate(&insn),
- src - dest);
+ repl - instr);
}
/*
@@ -372,7 +347,7 @@ void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
*/
if (insn.opcode.bytes[0] == JMP32_INSN_OPCODE) {
s32 imm = insn.immediate.value;
- imm += src - dest;
+ imm += repl - instr;
imm += JMP32_INSN_SIZE - JMP8_INSN_SIZE;
if ((imm >> 31) == (imm >> 7)) {
buf[i+0] = JMP8_INSN_OPCODE;
@@ -385,15 +360,21 @@ void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
}
if (insn_rip_relative(&insn)) {
- if (need_reloc(next + insn.displacement.value, src, src_len)) {
+ if (need_reloc(next + insn.displacement.value, repl, repl_len)) {
apply_reloc(insn.displacement.nbytes,
buf + i + insn_offset_displacement(&insn),
- src - dest);
+ repl - instr);
}
}
}
}
+void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len)
+{
+ __apply_relocation(buf, instr, instrlen, repl, repl_len);
+ optimize_nops(instr, buf, instrlen);
+}
+
/* Low-level backend functions usable from alternative code replacements. */
DEFINE_ASM_FUNC(nop_func, "", .entry.text);
EXPORT_SYMBOL_GPL(nop_func);
@@ -464,9 +445,9 @@ static int alt_replace_call(u8 *instr, u8 *insn_buff, struct alt_instr *a)
void __init_or_module noinline apply_alternatives(struct alt_instr *start,
struct alt_instr *end)
{
- struct alt_instr *a;
- u8 *instr, *replacement;
u8 insn_buff[MAX_PATCH_LEN];
+ u8 *instr, *replacement;
+ struct alt_instr *a;
DPRINTK(ALT, "alt table %px, -> %px", start, end);
@@ -504,7 +485,9 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
* patch if feature is *NOT* present.
*/
if (!boot_cpu_has(a->cpuid) == !(a->flags & ALT_FLAG_NOT)) {
- optimize_nops_inplace(instr, a->instrlen);
+ memcpy(insn_buff, instr, a->instrlen);
+ optimize_nops(instr, insn_buff, a->instrlen);
+ text_poke_early(instr, insn_buff, a->instrlen);
continue;
}
@@ -526,7 +509,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
insn_buff[insn_buff_sz] = 0x90;
- apply_relocation(insn_buff, a->instrlen, instr, replacement, a->replacementlen);
+ apply_relocation(insn_buff, instr, a->instrlen, replacement, a->replacementlen);
DUMP_BYTES(ALT, instr, a->instrlen, "%px: old_insn: ", instr);
DUMP_BYTES(ALT, replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
@@ -761,7 +744,7 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
len = patch_retpoline(addr, &insn, bytes);
if (len == insn.length) {
- optimize_nops(bytes, len);
+ optimize_nops(addr, bytes, len);
DUMP_BYTES(RETPOLINE, ((u8*)addr), len, "%px: orig: ", addr);
DUMP_BYTES(RETPOLINE, ((u8*)bytes), len, "%px: repl: ", addr);
text_poke_early(addr, bytes, len);
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index 2ae98f754e59..c884deca839b 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -676,7 +676,7 @@ static const struct dma_map_ops gart_dma_ops = {
.get_sgtable = dma_common_get_sgtable,
.dma_supported = dma_direct_supported,
.get_required_mask = dma_direct_get_required_mask,
- .alloc_pages = dma_direct_alloc_pages,
+ .alloc_pages_op = dma_direct_alloc_pages,
.free_pages = dma_direct_free_pages,
};
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 5bf5f9fc5753..3cf156f70859 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -95,6 +95,7 @@ static const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M70H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_DF_F3) },
{}
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index a42d8a6f7149..66fd4b2a37a3 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -497,32 +497,32 @@ static struct clock_event_device lapic_clockevent = {
static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
static const struct x86_cpu_id deadline_match[] __initconst = {
- X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x2, 0x2), 0x3a), /* EP */
- X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x4, 0x4), 0x0f), /* EX */
+ X86_MATCH_VFM_STEPPINGS(INTEL_HASWELL_X, X86_STEPPINGS(0x2, 0x2), 0x3a), /* EP */
+ X86_MATCH_VFM_STEPPINGS(INTEL_HASWELL_X, X86_STEPPINGS(0x4, 0x4), 0x0f), /* EX */
- X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_X, 0x0b000020),
+ X86_MATCH_VFM(INTEL_BROADWELL_X, 0x0b000020),
- X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x2, 0x2), 0x00000011),
- X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x3, 0x3), 0x0700000e),
- X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x4, 0x4), 0x0f00000c),
- X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x5, 0x5), 0x0e000003),
+ X86_MATCH_VFM_STEPPINGS(INTEL_BROADWELL_D, X86_STEPPINGS(0x2, 0x2), 0x00000011),
+ X86_MATCH_VFM_STEPPINGS(INTEL_BROADWELL_D, X86_STEPPINGS(0x3, 0x3), 0x0700000e),
+ X86_MATCH_VFM_STEPPINGS(INTEL_BROADWELL_D, X86_STEPPINGS(0x4, 0x4), 0x0f00000c),
+ X86_MATCH_VFM_STEPPINGS(INTEL_BROADWELL_D, X86_STEPPINGS(0x5, 0x5), 0x0e000003),
- X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x3, 0x3), 0x01000136),
- X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x4, 0x4), 0x02000014),
- X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x5, 0xf), 0),
+ X86_MATCH_VFM_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPINGS(0x3, 0x3), 0x01000136),
+ X86_MATCH_VFM_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPINGS(0x4, 0x4), 0x02000014),
+ X86_MATCH_VFM_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPINGS(0x5, 0xf), 0),
- X86_MATCH_INTEL_FAM6_MODEL( HASWELL, 0x22),
- X86_MATCH_INTEL_FAM6_MODEL( HASWELL_L, 0x20),
- X86_MATCH_INTEL_FAM6_MODEL( HASWELL_G, 0x17),
+ X86_MATCH_VFM(INTEL_HASWELL, 0x22),
+ X86_MATCH_VFM(INTEL_HASWELL_L, 0x20),
+ X86_MATCH_VFM(INTEL_HASWELL_G, 0x17),
- X86_MATCH_INTEL_FAM6_MODEL( BROADWELL, 0x25),
- X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_G, 0x17),
+ X86_MATCH_VFM(INTEL_BROADWELL, 0x25),
+ X86_MATCH_VFM(INTEL_BROADWELL_G, 0x17),
- X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE_L, 0xb2),
- X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE, 0xb2),
+ X86_MATCH_VFM(INTEL_SKYLAKE_L, 0xb2),
+ X86_MATCH_VFM(INTEL_SKYLAKE, 0xb2),
- X86_MATCH_INTEL_FAM6_MODEL( KABYLAKE_L, 0x52),
- X86_MATCH_INTEL_FAM6_MODEL( KABYLAKE, 0x52),
+ X86_MATCH_VFM(INTEL_KABYLAKE_L, 0x52),
+ X86_MATCH_VFM(INTEL_KABYLAKE, 0x52),
{},
};
@@ -631,7 +631,7 @@ void lapic_update_tsc_freq(void)
static __initdata int lapic_cal_loops = -1;
static __initdata long lapic_cal_t1, lapic_cal_t2;
static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2;
-static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
+static __initdata u32 lapic_cal_pm1, lapic_cal_pm2;
static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
/*
@@ -641,7 +641,7 @@ static void __init lapic_cal_handler(struct clock_event_device *dev)
{
unsigned long long tsc = 0;
long tapic = apic_read(APIC_TMCCT);
- unsigned long pm = acpi_pm_read_early();
+ u32 pm = acpi_pm_read_early();
if (boot_cpu_has(X86_FEATURE_TSC))
tsc = rdtsc();
@@ -666,7 +666,7 @@ static void __init lapic_cal_handler(struct clock_event_device *dev)
}
static int __init
-calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
+calibrate_by_pmtimer(u32 deltapm, long *delta, long *deltatsc)
{
const long pm_100ms = PMTMR_TICKS_PER_SEC / 10;
const long pm_thresh = pm_100ms / 100;
@@ -677,7 +677,7 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
return -1;
#endif
- apic_printk(APIC_VERBOSE, "... PM-Timer delta = %ld\n", deltapm);
+ apic_printk(APIC_VERBOSE, "... PM-Timer delta = %u\n", deltapm);
/* Check, if the PM timer is available */
if (!deltapm)
@@ -1687,11 +1687,11 @@ static int x2apic_state;
static bool x2apic_hw_locked(void)
{
- u64 ia32_cap;
+ u64 x86_arch_cap_msr;
u64 msr;
- ia32_cap = x86_read_arch_cap_msr();
- if (ia32_cap & ARCH_CAP_XAPIC_DISABLE) {
+ x86_arch_cap_msr = x86_read_arch_cap_msr();
+ if (x86_arch_cap_msr & ARCH_CAP_XAPIC_DISABLE) {
rdmsrl(MSR_IA32_XAPIC_DISABLE_STATUS, msr);
return (msr & LEGACY_XAPIC_DISABLED);
}
@@ -1771,7 +1771,7 @@ void x2apic_setup(void)
__x2apic_enable();
}
-static __init void apic_set_fixmap(void);
+static __init void apic_set_fixmap(bool read_apic);
static __init void x2apic_disable(void)
{
@@ -1793,7 +1793,12 @@ static __init void x2apic_disable(void)
}
__x2apic_disable();
- apic_set_fixmap();
+ /*
+ * Don't reread the APIC ID as it was already done from
+ * check_x2apic() and the APIC driver still is a x2APIC variant,
+ * which fails to do the read after x2APIC was disabled.
+ */
+ apic_set_fixmap(false);
}
static __init void x2apic_enable(void)
@@ -2057,13 +2062,14 @@ void __init init_apic_mappings(void)
}
}
-static __init void apic_set_fixmap(void)
+static __init void apic_set_fixmap(bool read_apic)
{
set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
apic_mmio_base = APIC_BASE;
apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
apic_mmio_base, mp_lapic_addr);
- apic_read_boot_cpu_id(false);
+ if (read_apic)
+ apic_read_boot_cpu_id(false);
}
void __init register_lapic_address(unsigned long address)
@@ -2073,7 +2079,7 @@ void __init register_lapic_address(unsigned long address)
mp_lapic_addr = address;
if (!x2apic_mode)
- apic_set_fixmap();
+ apic_set_fixmap(true);
}
/*
diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
index d9651f15ae4f..340769242dea 100644
--- a/arch/x86/kernel/apic/msi.c
+++ b/arch/x86/kernel/apic/msi.c
@@ -184,7 +184,6 @@ static int x86_msi_prepare(struct irq_domain *domain, struct device *dev,
alloc->type = X86_IRQ_ALLOC_TYPE_PCI_MSI;
return 0;
case DOMAIN_BUS_PCI_DEVICE_MSIX:
- case DOMAIN_BUS_PCI_DEVICE_IMS:
alloc->type = X86_IRQ_ALLOC_TYPE_PCI_MSIX;
return 0;
default:
@@ -229,10 +228,6 @@ static bool x86_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
case DOMAIN_BUS_PCI_DEVICE_MSI:
case DOMAIN_BUS_PCI_DEVICE_MSIX:
break;
- case DOMAIN_BUS_PCI_DEVICE_IMS:
- if (!(pops->supported_flags & MSI_FLAG_PCI_IMS))
- return false;
- break;
default:
WARN_ON_ONCE(1);
return false;
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 185738c72766..9eec52925fa3 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -965,7 +965,7 @@ static void __vector_cleanup(struct vector_cleanup *cl, bool check_irr)
lockdep_assert_held(&vector_lock);
hlist_for_each_entry_safe(apicd, tmp, &cl->head, clist) {
- unsigned int irr, vector = apicd->prev_vector;
+ unsigned int vector = apicd->prev_vector;
/*
* Paranoia: Check if the vector that needs to be cleaned
@@ -979,8 +979,7 @@ static void __vector_cleanup(struct vector_cleanup *cl, bool check_irr)
* fixup_irqs() was just called to scan IRR for set bits and
* forward them to new destination CPUs via IPIs.
*/
- irr = check_irr ? apic_read(APIC_IRR + (vector / 32 * 0x10)) : 0;
- if (irr & (1U << (vector % 32))) {
+ if (check_irr && is_vector_pending(vector)) {
pr_warn_once("Moved interrupt pending in old target APIC %u\n", apicd->irq);
rearm = true;
continue;
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 567dbd2fe4b6..7db83212effb 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -178,13 +178,16 @@ static int x2apic_prepare_cpu(unsigned int cpu)
u32 phys_apicid = apic->cpu_present_to_apicid(cpu);
u32 cluster = apic_cluster(phys_apicid);
u32 logical_apicid = (cluster << 16) | (1 << (phys_apicid & 0xf));
+ int node = cpu_to_node(cpu);
x86_cpu_to_logical_apicid[cpu] = logical_apicid;
- if (alloc_clustermask(cpu, cluster, cpu_to_node(cpu)) < 0)
+ if (alloc_clustermask(cpu, cluster, node) < 0)
return -ENOMEM;
- if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL))
+
+ if (!zalloc_cpumask_var_node(&per_cpu(ipi_mask, cpu), GFP_KERNEL, node))
return -ENOMEM;
+
return 0;
}
diff --git a/arch/x86/kernel/callthunks.c b/arch/x86/kernel/callthunks.c
index 30335182b6b0..465647456753 100644
--- a/arch/x86/kernel/callthunks.c
+++ b/arch/x86/kernel/callthunks.c
@@ -185,8 +185,7 @@ static void *patch_dest(void *dest, bool direct)
u8 *pad = dest - tsize;
memcpy(insn_buff, skl_call_thunk_template, tsize);
- apply_relocation(insn_buff, tsize, pad,
- skl_call_thunk_template, tsize);
+ apply_relocation(insn_buff, pad, tsize, skl_call_thunk_template, tsize);
/* Already patched? */
if (!bcmp(pad, insn_buff, tsize))
@@ -308,13 +307,12 @@ static bool is_callthunk(void *addr)
pad = (void *)(dest - tmpl_size);
memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
- apply_relocation(insn_buff, tmpl_size, pad,
- skl_call_thunk_template, tmpl_size);
+ apply_relocation(insn_buff, pad, tmpl_size, skl_call_thunk_template, tmpl_size);
return !bcmp(pad, insn_buff, tmpl_size);
}
-int x86_call_depth_emit_accounting(u8 **pprog, void *func)
+int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip)
{
unsigned int tmpl_size = SKL_TMPL_SIZE;
u8 insn_buff[MAX_PATCH_LEN];
@@ -327,8 +325,7 @@ int x86_call_depth_emit_accounting(u8 **pprog, void *func)
return 0;
memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
- apply_relocation(insn_buff, tmpl_size, *pprog,
- skl_call_thunk_template, tmpl_size);
+ apply_relocation(insn_buff, ip, tmpl_size, skl_call_thunk_template, tmpl_size);
memcpy(*pprog, insn_buff, tmpl_size);
*pprog += tmpl_size;
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index eb4dbcdf41f1..a02bba0ed6b9 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -60,7 +60,7 @@ obj-$(CONFIG_ACRN_GUEST) += acrn.o
obj-$(CONFIG_DEBUG_FS) += debugfs.o
quiet_cmd_mkcapflags = MKCAP $@
- cmd_mkcapflags = $(CONFIG_SHELL) $(srctree)/$(src)/mkcapflags.sh $@ $^
+ cmd_mkcapflags = $(CONFIG_SHELL) $(src)/mkcapflags.sh $@ $^
cpufeature = $(src)/../../include/asm/cpufeatures.h
vmxfeature = $(src)/../../include/asm/vmxfeatures.h
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 6d8677e80ddb..44df3f11e731 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -13,6 +13,7 @@
#include <asm/apic.h>
#include <asm/cacheinfo.h>
#include <asm/cpu.h>
+#include <asm/cpu_device_id.h>
#include <asm/spec-ctrl.h>
#include <asm/smp.h>
#include <asm/numa.h>
@@ -345,6 +346,28 @@ static void srat_detect_node(struct cpuinfo_x86 *c)
#endif
}
+static void bsp_determine_snp(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
+ cc_vendor = CC_VENDOR_AMD;
+
+ if (cpu_has(c, X86_FEATURE_SEV_SNP)) {
+ /*
+ * RMP table entry format is not architectural and is defined by the
+ * per-processor PPR. Restrict SNP support on the known CPU models
+ * for which the RMP table entry format is currently defined for.
+ */
+ if (!cpu_has(c, X86_FEATURE_HYPERVISOR) &&
+ c->x86 >= 0x19 && snp_probe_rmptable_info()) {
+ cc_platform_set(CC_ATTR_HOST_SEV_SNP);
+ } else {
+ setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
+ cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
+ }
+ }
+#endif
+}
+
static void bsp_init_amd(struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
@@ -437,8 +460,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
case 0x1a:
switch (c->x86_model) {
- case 0x00 ... 0x0f:
- case 0x20 ... 0x2f:
+ case 0x00 ... 0x2f:
case 0x40 ... 0x4f:
case 0x70 ... 0x7f:
setup_force_cpu_cap(X86_FEATURE_ZEN5);
@@ -452,21 +474,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
break;
}
- if (cpu_has(c, X86_FEATURE_SEV_SNP)) {
- /*
- * RMP table entry format is not architectural and it can vary by processor
- * and is defined by the per-processor PPR. Restrict SNP support on the
- * known CPU model and family for which the RMP table entry format is
- * currently defined for.
- */
- if (!boot_cpu_has(X86_FEATURE_ZEN3) &&
- !boot_cpu_has(X86_FEATURE_ZEN4) &&
- !boot_cpu_has(X86_FEATURE_ZEN5))
- setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
- else if (!snp_probe_rmptable_info())
- setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
- }
-
+ bsp_determine_snp(c);
return;
warn:
@@ -527,7 +535,6 @@ clear_sev:
static void early_init_amd(struct cpuinfo_x86 *c)
{
- u64 value;
u32 dummy;
if (c->x86 >= 0xf)
@@ -595,20 +602,6 @@ static void early_init_amd(struct cpuinfo_x86 *c)
early_detect_mem_encrypt(c);
- /* Re-enable TopologyExtensions if switched off by BIOS */
- if (c->x86 == 0x15 &&
- (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
- !cpu_has(c, X86_FEATURE_TOPOEXT)) {
-
- if (msr_set_bit(0xc0011005, 54) > 0) {
- rdmsrl(0xc0011005, value);
- if (value & BIT_64(54)) {
- set_cpu_cap(c, X86_FEATURE_TOPOEXT);
- pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
- }
- }
- }
-
if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) {
if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB))
setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
@@ -802,6 +795,11 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
clear_rdrand_cpuid_bit(c);
}
+static const struct x86_cpu_desc erratum_1386_microcode[] = {
+ AMD_CPU_DESC(0x17, 0x1, 0x2, 0x0800126e),
+ AMD_CPU_DESC(0x17, 0x31, 0x0, 0x08301052),
+};
+
static void fix_erratum_1386(struct cpuinfo_x86 *c)
{
/*
@@ -811,7 +809,13 @@ static void fix_erratum_1386(struct cpuinfo_x86 *c)
*
* Affected parts all have no supervisor XSAVE states, meaning that
* the XSAVEC instruction (which works fine) is equivalent.
+ *
+ * Clear the feature flag only on microcode revisions which
+ * don't have the fix.
*/
+ if (x86_cpu_has_min_microcode_rev(erratum_1386_microcode))
+ return;
+
clear_cpu_cap(c, X86_FEATURE_XSAVES);
}
diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
index fdbb5f07448f..f9a8c7b7943f 100644
--- a/arch/x86/kernel/cpu/aperfmperf.c
+++ b/arch/x86/kernel/cpu/aperfmperf.c
@@ -124,25 +124,24 @@ static bool __init slv_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
return true;
}
-#define X86_MATCH(model) \
- X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, \
- INTEL_FAM6_##model, X86_FEATURE_APERFMPERF, NULL)
+#define X86_MATCH(vfm) \
+ X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_APERFMPERF, NULL)
static const struct x86_cpu_id has_knl_turbo_ratio_limits[] __initconst = {
- X86_MATCH(XEON_PHI_KNL),
- X86_MATCH(XEON_PHI_KNM),
+ X86_MATCH(INTEL_XEON_PHI_KNL),
+ X86_MATCH(INTEL_XEON_PHI_KNM),
{}
};
static const struct x86_cpu_id has_skx_turbo_ratio_limits[] __initconst = {
- X86_MATCH(SKYLAKE_X),
+ X86_MATCH(INTEL_SKYLAKE_X),
{}
};
static const struct x86_cpu_id has_glm_turbo_ratio_limits[] __initconst = {
- X86_MATCH(ATOM_GOLDMONT),
- X86_MATCH(ATOM_GOLDMONT_D),
- X86_MATCH(ATOM_GOLDMONT_PLUS),
+ X86_MATCH(INTEL_ATOM_GOLDMONT),
+ X86_MATCH(INTEL_ATOM_GOLDMONT_D),
+ X86_MATCH(INTEL_ATOM_GOLDMONT_PLUS),
{}
};
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index e7ba936d798b..b6f927f6c567 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -26,7 +26,7 @@
#include <asm/msr.h>
#include <asm/vmx.h>
#include <asm/paravirt.h>
-#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#include <asm/e820/api.h>
#include <asm/hypervisor.h>
#include <asm/tlbflush.h>
@@ -61,6 +61,8 @@ EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
EXPORT_SYMBOL_GPL(x86_pred_cmd);
+static u64 __ro_after_init x86_arch_cap_msr;
+
static DEFINE_MUTEX(spec_ctrl_mutex);
void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
@@ -144,6 +146,8 @@ void __init cpu_select_mitigations(void)
x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
}
+ x86_arch_cap_msr = x86_read_arch_cap_msr();
+
/* Select the proper CPU mitigations before patching alternatives: */
spectre_v1_select_mitigation();
spectre_v2_select_mitigation();
@@ -301,8 +305,6 @@ static const char * const taa_strings[] = {
static void __init taa_select_mitigation(void)
{
- u64 ia32_cap;
-
if (!boot_cpu_has_bug(X86_BUG_TAA)) {
taa_mitigation = TAA_MITIGATION_OFF;
return;
@@ -341,9 +343,8 @@ static void __init taa_select_mitigation(void)
* On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
* update is required.
*/
- ia32_cap = x86_read_arch_cap_msr();
- if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
- !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
+ if ( (x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
+ !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
/*
@@ -401,8 +402,6 @@ static const char * const mmio_strings[] = {
static void __init mmio_select_mitigation(void)
{
- u64 ia32_cap;
-
if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
cpu_mitigations_off()) {
@@ -413,8 +412,6 @@ static void __init mmio_select_mitigation(void)
if (mmio_mitigation == MMIO_MITIGATION_OFF)
return;
- ia32_cap = x86_read_arch_cap_msr();
-
/*
* Enable CPU buffer clear mitigation for host and VMM, if also affected
* by MDS or TAA. Otherwise, enable mitigation for VMM only.
@@ -437,7 +434,7 @@ static void __init mmio_select_mitigation(void)
* be propagated to uncore buffers, clearing the Fill buffers on idle
* is required irrespective of SMT state.
*/
- if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
+ if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
static_branch_enable(&mds_idle_clear);
/*
@@ -447,10 +444,10 @@ static void __init mmio_select_mitigation(void)
* FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
* affected systems.
*/
- if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
+ if ((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
(boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
- !(ia32_cap & ARCH_CAP_MDS_NO)))
+ !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))
mmio_mitigation = MMIO_MITIGATION_VERW;
else
mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
@@ -508,7 +505,7 @@ static void __init rfds_select_mitigation(void)
if (rfds_mitigation == RFDS_MITIGATION_OFF)
return;
- if (x86_read_arch_cap_msr() & ARCH_CAP_RFDS_CLEAR)
+ if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
else
rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
@@ -659,8 +656,6 @@ void update_srbds_msr(void)
static void __init srbds_select_mitigation(void)
{
- u64 ia32_cap;
-
if (!boot_cpu_has_bug(X86_BUG_SRBDS))
return;
@@ -669,8 +664,7 @@ static void __init srbds_select_mitigation(void)
* are only exposed to SRBDS when TSX is enabled or when CPU is affected
* by Processor MMIO Stale Data vulnerability.
*/
- ia32_cap = x86_read_arch_cap_msr();
- if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
+ if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
@@ -813,7 +807,7 @@ static void __init gds_select_mitigation(void)
/* Will verify below that mitigation _can_ be disabled */
/* No microcode */
- if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) {
+ if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
if (gds_mitigation == GDS_MITIGATION_FORCE) {
/*
* This only needs to be done on the boot CPU so do it
@@ -1544,20 +1538,25 @@ static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
return SPECTRE_V2_RETPOLINE;
}
+static bool __ro_after_init rrsba_disabled;
+
/* Disable in-kernel use of non-RSB RET predictors */
static void __init spec_ctrl_disable_kernel_rrsba(void)
{
- u64 ia32_cap;
+ if (rrsba_disabled)
+ return;
- if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
+ if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) {
+ rrsba_disabled = true;
return;
+ }
- ia32_cap = x86_read_arch_cap_msr();
+ if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
+ return;
- if (ia32_cap & ARCH_CAP_RRSBA) {
- x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
- update_spec_ctrl(x86_spec_ctrl_base);
- }
+ x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
+ update_spec_ctrl(x86_spec_ctrl_base);
+ rrsba_disabled = true;
}
static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
@@ -1607,6 +1606,74 @@ static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_
dump_stack();
}
+/*
+ * Set BHI_DIS_S to prevent indirect branches in kernel to be influenced by
+ * branch history in userspace. Not needed if BHI_NO is set.
+ */
+static bool __init spec_ctrl_bhi_dis(void)
+{
+ if (!boot_cpu_has(X86_FEATURE_BHI_CTRL))
+ return false;
+
+ x86_spec_ctrl_base |= SPEC_CTRL_BHI_DIS_S;
+ update_spec_ctrl(x86_spec_ctrl_base);
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_HW);
+
+ return true;
+}
+
+enum bhi_mitigations {
+ BHI_MITIGATION_OFF,
+ BHI_MITIGATION_ON,
+};
+
+static enum bhi_mitigations bhi_mitigation __ro_after_init =
+ IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_ON : BHI_MITIGATION_OFF;
+
+static int __init spectre_bhi_parse_cmdline(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ if (!strcmp(str, "off"))
+ bhi_mitigation = BHI_MITIGATION_OFF;
+ else if (!strcmp(str, "on"))
+ bhi_mitigation = BHI_MITIGATION_ON;
+ else
+ pr_err("Ignoring unknown spectre_bhi option (%s)", str);
+
+ return 0;
+}
+early_param("spectre_bhi", spectre_bhi_parse_cmdline);
+
+static void __init bhi_select_mitigation(void)
+{
+ if (bhi_mitigation == BHI_MITIGATION_OFF)
+ return;
+
+ /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
+ if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
+ !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
+ spec_ctrl_disable_kernel_rrsba();
+ if (rrsba_disabled)
+ return;
+ }
+
+ if (spec_ctrl_bhi_dis())
+ return;
+
+ if (!IS_ENABLED(CONFIG_X86_64))
+ return;
+
+ /* Mitigate KVM by default */
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT);
+ pr_info("Spectre BHI mitigation: SW BHB clearing on vm exit\n");
+
+ /* Mitigate syscalls when the mitigation is forced =on */
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP);
+ pr_info("Spectre BHI mitigation: SW BHB clearing on syscall\n");
+}
+
static void __init spectre_v2_select_mitigation(void)
{
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -1718,6 +1785,9 @@ static void __init spectre_v2_select_mitigation(void)
mode == SPECTRE_V2_RETPOLINE)
spec_ctrl_disable_kernel_rrsba();
+ if (boot_cpu_has(X86_BUG_BHI))
+ bhi_select_mitigation();
+
spectre_v2_enabled = mode;
pr_info("%s\n", spectre_v2_strings[mode]);
@@ -1832,8 +1902,6 @@ static void update_indir_branch_cond(void)
/* Update the static key controlling the MDS CPU buffer clear in idle */
static void update_mds_branch_idle(void)
{
- u64 ia32_cap = x86_read_arch_cap_msr();
-
/*
* Enable the idle clearing if SMT is active on CPUs which are
* affected only by MSBDS and not any other MDS variant.
@@ -1848,7 +1916,7 @@ static void update_mds_branch_idle(void)
if (sched_smt_active()) {
static_branch_enable(&mds_idle_clear);
} else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
- (ia32_cap & ARCH_CAP_FBSDP_NO)) {
+ (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
static_branch_disable(&mds_idle_clear);
}
}
@@ -2323,20 +2391,20 @@ static void override_cache_bits(struct cpuinfo_x86 *c)
if (c->x86 != 6)
return;
- switch (c->x86_model) {
- case INTEL_FAM6_NEHALEM:
- case INTEL_FAM6_WESTMERE:
- case INTEL_FAM6_SANDYBRIDGE:
- case INTEL_FAM6_IVYBRIDGE:
- case INTEL_FAM6_HASWELL:
- case INTEL_FAM6_HASWELL_L:
- case INTEL_FAM6_HASWELL_G:
- case INTEL_FAM6_BROADWELL:
- case INTEL_FAM6_BROADWELL_G:
- case INTEL_FAM6_SKYLAKE_L:
- case INTEL_FAM6_SKYLAKE:
- case INTEL_FAM6_KABYLAKE_L:
- case INTEL_FAM6_KABYLAKE:
+ switch (c->x86_vfm) {
+ case INTEL_NEHALEM:
+ case INTEL_WESTMERE:
+ case INTEL_SANDYBRIDGE:
+ case INTEL_IVYBRIDGE:
+ case INTEL_HASWELL:
+ case INTEL_HASWELL_L:
+ case INTEL_HASWELL_G:
+ case INTEL_BROADWELL:
+ case INTEL_BROADWELL_G:
+ case INTEL_SKYLAKE_L:
+ case INTEL_SKYLAKE:
+ case INTEL_KABYLAKE_L:
+ case INTEL_KABYLAKE:
if (c->x86_cache_bits < 44)
c->x86_cache_bits = 44;
break;
@@ -2695,15 +2763,15 @@ static char *stibp_state(void)
switch (spectre_v2_user_stibp) {
case SPECTRE_V2_USER_NONE:
- return ", STIBP: disabled";
+ return "; STIBP: disabled";
case SPECTRE_V2_USER_STRICT:
- return ", STIBP: forced";
+ return "; STIBP: forced";
case SPECTRE_V2_USER_STRICT_PREFERRED:
- return ", STIBP: always-on";
+ return "; STIBP: always-on";
case SPECTRE_V2_USER_PRCTL:
case SPECTRE_V2_USER_SECCOMP:
if (static_key_enabled(&switch_to_cond_stibp))
- return ", STIBP: conditional";
+ return "; STIBP: conditional";
}
return "";
}
@@ -2712,10 +2780,10 @@ static char *ibpb_state(void)
{
if (boot_cpu_has(X86_FEATURE_IBPB)) {
if (static_key_enabled(&switch_mm_always_ibpb))
- return ", IBPB: always-on";
+ return "; IBPB: always-on";
if (static_key_enabled(&switch_mm_cond_ibpb))
- return ", IBPB: conditional";
- return ", IBPB: disabled";
+ return "; IBPB: conditional";
+ return "; IBPB: disabled";
}
return "";
}
@@ -2725,14 +2793,32 @@ static char *pbrsb_eibrs_state(void)
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
- return ", PBRSB-eIBRS: SW sequence";
+ return "; PBRSB-eIBRS: SW sequence";
else
- return ", PBRSB-eIBRS: Vulnerable";
+ return "; PBRSB-eIBRS: Vulnerable";
} else {
- return ", PBRSB-eIBRS: Not affected";
+ return "; PBRSB-eIBRS: Not affected";
}
}
+static const char *spectre_bhi_state(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_BHI))
+ return "; BHI: Not affected";
+ else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
+ return "; BHI: BHI_DIS_S";
+ else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
+ return "; BHI: SW loop, KVM: SW loop";
+ else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
+ !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) &&
+ rrsba_disabled)
+ return "; BHI: Retpoline";
+ else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
+ return "; BHI: Vulnerable, KVM: SW loop";
+
+ return "; BHI: Vulnerable";
+}
+
static ssize_t spectre_v2_show_state(char *buf)
{
if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
@@ -2745,13 +2831,15 @@ static ssize_t spectre_v2_show_state(char *buf)
spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
- return sysfs_emit(buf, "%s%s%s%s%s%s%s\n",
+ return sysfs_emit(buf, "%s%s%s%s%s%s%s%s\n",
spectre_v2_strings[spectre_v2_enabled],
ibpb_state(),
- boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? "; IBRS_FW" : "",
stibp_state(),
- boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
+ boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? "; RSB filling" : "",
pbrsb_eibrs_state(),
+ spectre_bhi_state(),
+ /* this should always be at the end */
spectre_v2_module_string());
}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 5c1e6d6be267..2b170da84f97 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -68,6 +68,7 @@
#include <asm/traps.h>
#include <asm/sev.h>
#include <asm/tdx.h>
+#include <asm/posted_intr.h>
#include "cpu.h"
@@ -114,17 +115,17 @@ static const struct x86_cpu_id ppin_cpuids[] = {
X86_MATCH_FEATURE(X86_FEATURE_INTEL_PPIN, &ppin_info[X86_VENDOR_INTEL]),
/* Legacy models without CPUID enumeration */
- X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &ppin_info[X86_VENDOR_INTEL]),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &ppin_info[X86_VENDOR_INTEL]),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &ppin_info[X86_VENDOR_INTEL]),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &ppin_info[X86_VENDOR_INTEL]),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &ppin_info[X86_VENDOR_INTEL]),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &ppin_info[X86_VENDOR_INTEL]),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &ppin_info[X86_VENDOR_INTEL]),
- X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
- X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
- X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &ppin_info[X86_VENDOR_INTEL]),
- X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &ppin_info[X86_VENDOR_INTEL]),
+ X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &ppin_info[X86_VENDOR_INTEL]),
+ X86_MATCH_VFM(INTEL_HASWELL_X, &ppin_info[X86_VENDOR_INTEL]),
+ X86_MATCH_VFM(INTEL_BROADWELL_D, &ppin_info[X86_VENDOR_INTEL]),
+ X86_MATCH_VFM(INTEL_BROADWELL_X, &ppin_info[X86_VENDOR_INTEL]),
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, &ppin_info[X86_VENDOR_INTEL]),
+ X86_MATCH_VFM(INTEL_ICELAKE_X, &ppin_info[X86_VENDOR_INTEL]),
+ X86_MATCH_VFM(INTEL_ICELAKE_D, &ppin_info[X86_VENDOR_INTEL]),
+ X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
+ X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
+ X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &ppin_info[X86_VENDOR_INTEL]),
+ X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &ppin_info[X86_VENDOR_INTEL]),
{}
};
@@ -1053,18 +1054,9 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
void get_cpu_address_sizes(struct cpuinfo_x86 *c)
{
u32 eax, ebx, ecx, edx;
- bool vp_bits_from_cpuid = true;
if (!cpu_has(c, X86_FEATURE_CPUID) ||
- (c->extended_cpuid_level < 0x80000008))
- vp_bits_from_cpuid = false;
-
- if (vp_bits_from_cpuid) {
- cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
-
- c->x86_virt_bits = (eax >> 8) & 0xff;
- c->x86_phys_bits = eax & 0xff;
- } else {
+ (c->extended_cpuid_level < 0x80000008)) {
if (IS_ENABLED(CONFIG_X86_64)) {
c->x86_clflush_size = 64;
c->x86_phys_bits = 36;
@@ -1078,7 +1070,13 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c)
cpu_has(c, X86_FEATURE_PSE36))
c->x86_phys_bits = 36;
}
+ } else {
+ cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
+
+ c->x86_virt_bits = (eax >> 8) & 0xff;
+ c->x86_phys_bits = eax & 0xff;
}
+
c->x86_cache_bits = c->x86_phys_bits;
c->x86_cache_alignment = c->x86_clflush_size;
}
@@ -1120,12 +1118,13 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
#define NO_SPECTRE_V2 BIT(8)
#define NO_MMIO BIT(9)
#define NO_EIBRS_PBRSB BIT(10)
+#define NO_BHI BIT(11)
#define VULNWL(vendor, family, model, whitelist) \
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
-#define VULNWL_INTEL(model, whitelist) \
- VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
+#define VULNWL_INTEL(vfm, whitelist) \
+ X86_MATCH_VFM(vfm, whitelist)
#define VULNWL_AMD(family, whitelist) \
VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
@@ -1142,32 +1141,32 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
VULNWL(VORTEX, 6, X86_MODEL_ANY, NO_SPECULATION),
/* Intel Family 6 */
- VULNWL_INTEL(TIGERLAKE, NO_MMIO),
- VULNWL_INTEL(TIGERLAKE_L, NO_MMIO),
- VULNWL_INTEL(ALDERLAKE, NO_MMIO),
- VULNWL_INTEL(ALDERLAKE_L, NO_MMIO),
+ VULNWL_INTEL(INTEL_TIGERLAKE, NO_MMIO),
+ VULNWL_INTEL(INTEL_TIGERLAKE_L, NO_MMIO),
+ VULNWL_INTEL(INTEL_ALDERLAKE, NO_MMIO),
+ VULNWL_INTEL(INTEL_ALDERLAKE_L, NO_MMIO),
- VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
- VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
- VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
- VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
- VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(INTEL_ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(INTEL_ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(INTEL_ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(INTEL_ATOM_BONNELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(INTEL_ATOM_BONNELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
- VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
- VULNWL_INTEL(ATOM_SILVERMONT_D, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
- VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
- VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
- VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
- VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(INTEL_ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(INTEL_ATOM_SILVERMONT_D, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(INTEL_ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(INTEL_ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(INTEL_XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(INTEL_XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
- VULNWL_INTEL(CORE_YONAH, NO_SSB),
+ VULNWL_INTEL(INTEL_CORE_YONAH, NO_SSB),
- VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
- VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(INTEL_ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(INTEL_ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
- VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
- VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
- VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
+ VULNWL_INTEL(INTEL_ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+ VULNWL_INTEL(INTEL_ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+ VULNWL_INTEL(INTEL_ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
/*
* Technically, swapgs isn't serializing on AMD (despite it previously
@@ -1177,33 +1176,31 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
* good enough for our purposes.
*/
- VULNWL_INTEL(ATOM_TREMONT, NO_EIBRS_PBRSB),
- VULNWL_INTEL(ATOM_TREMONT_L, NO_EIBRS_PBRSB),
- VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
+ VULNWL_INTEL(INTEL_ATOM_TREMONT, NO_EIBRS_PBRSB),
+ VULNWL_INTEL(INTEL_ATOM_TREMONT_L, NO_EIBRS_PBRSB),
+ VULNWL_INTEL(INTEL_ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
/* AMD Family 0xf - 0x12 */
- VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
- VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
- VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
- VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
+ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
+ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
+ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
- VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
- VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
+ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB | NO_BHI),
+ VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB | NO_BHI),
/* Zhaoxin Family 7 */
- VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
- VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
+ VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO | NO_BHI),
+ VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO | NO_BHI),
{}
};
#define VULNBL(vendor, family, model, blacklist) \
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist)
-#define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \
- X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \
- INTEL_FAM6_##model, steppings, \
- X86_FEATURE_ANY, issues)
+#define VULNBL_INTEL_STEPPINGS(vfm, steppings, issues) \
+ X86_MATCH_VFM_STEPPINGS(vfm, steppings, issues)
#define VULNBL_AMD(family, blacklist) \
VULNBL(AMD, family, X86_MODEL_ANY, blacklist)
@@ -1228,43 +1225,43 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
#define RFDS BIT(7)
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
- VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
- VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS),
- VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS),
- VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS),
- VULNBL_INTEL_STEPPINGS(HASWELL_X, X86_STEPPING_ANY, MMIO),
- VULNBL_INTEL_STEPPINGS(BROADWELL_D, X86_STEPPING_ANY, MMIO),
- VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS),
- VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO),
- VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS),
- VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
- VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
- VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
- VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
- VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
- VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED),
- VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
- VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS),
- VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS),
- VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
- VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED),
- VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
- VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS),
- VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS),
- VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
- VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
- VULNBL_INTEL_STEPPINGS(ALDERLAKE, X86_STEPPING_ANY, RFDS),
- VULNBL_INTEL_STEPPINGS(ALDERLAKE_L, X86_STEPPING_ANY, RFDS),
- VULNBL_INTEL_STEPPINGS(RAPTORLAKE, X86_STEPPING_ANY, RFDS),
- VULNBL_INTEL_STEPPINGS(RAPTORLAKE_P, X86_STEPPING_ANY, RFDS),
- VULNBL_INTEL_STEPPINGS(RAPTORLAKE_S, X86_STEPPING_ANY, RFDS),
- VULNBL_INTEL_STEPPINGS(ATOM_GRACEMONT, X86_STEPPING_ANY, RFDS),
- VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS),
- VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO | RFDS),
- VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS),
- VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT, X86_STEPPING_ANY, RFDS),
- VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT_D, X86_STEPPING_ANY, RFDS),
- VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT_PLUS, X86_STEPPING_ANY, RFDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_HASWELL, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_L, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_G, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_X, X86_STEPPING_ANY, MMIO),
+ VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_D, X86_STEPPING_ANY, MMIO),
+ VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_G, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_X, X86_STEPPING_ANY, MMIO),
+ VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED),
+ VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED),
+ VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE_L, X86_STEPPING_ANY, GDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE, X86_STEPPING_ANY, GDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
+ VULNBL_INTEL_STEPPINGS(INTEL_ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_ALDERLAKE, X86_STEPPING_ANY, RFDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_ALDERLAKE_L, X86_STEPPING_ANY, RFDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE, X86_STEPPING_ANY, RFDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE_P, X86_STEPPING_ANY, RFDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE_S, X86_STEPPING_ANY, RFDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GRACEMONT, X86_STEPPING_ANY, RFDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO | RFDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GOLDMONT, X86_STEPPING_ANY, RFDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GOLDMONT_D, X86_STEPPING_ANY, RFDS),
+ VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GOLDMONT_PLUS, X86_STEPPING_ANY, RFDS),
VULNBL_AMD(0x15, RETBLEED),
VULNBL_AMD(0x16, RETBLEED),
@@ -1283,25 +1280,25 @@ static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long whi
u64 x86_read_arch_cap_msr(void)
{
- u64 ia32_cap = 0;
+ u64 x86_arch_cap_msr = 0;
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr);
- return ia32_cap;
+ return x86_arch_cap_msr;
}
-static bool arch_cap_mmio_immune(u64 ia32_cap)
+static bool arch_cap_mmio_immune(u64 x86_arch_cap_msr)
{
- return (ia32_cap & ARCH_CAP_FBSDP_NO &&
- ia32_cap & ARCH_CAP_PSDP_NO &&
- ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
+ return (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO &&
+ x86_arch_cap_msr & ARCH_CAP_PSDP_NO &&
+ x86_arch_cap_msr & ARCH_CAP_SBDR_SSDP_NO);
}
-static bool __init vulnerable_to_rfds(u64 ia32_cap)
+static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
{
/* The "immunity" bit trumps everything else: */
- if (ia32_cap & ARCH_CAP_RFDS_NO)
+ if (x86_arch_cap_msr & ARCH_CAP_RFDS_NO)
return false;
/*
@@ -1309,7 +1306,7 @@ static bool __init vulnerable_to_rfds(u64 ia32_cap)
* indicate that mitigation is needed because guest is running on a
* vulnerable hardware or may migrate to such hardware:
*/
- if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
+ if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
return true;
/* Only consult the blacklist when there is no enumeration: */
@@ -1318,11 +1315,11 @@ static bool __init vulnerable_to_rfds(u64 ia32_cap)
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
{
- u64 ia32_cap = x86_read_arch_cap_msr();
+ u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
/* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
- !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
+ !(x86_arch_cap_msr & ARCH_CAP_PSCHANGE_MC_NO))
setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
@@ -1334,7 +1331,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
- !(ia32_cap & ARCH_CAP_SSB_NO) &&
+ !(x86_arch_cap_msr & ARCH_CAP_SSB_NO) &&
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
@@ -1345,17 +1342,17 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
* Don't use AutoIBRS when SNP is enabled because it degrades host
* userspace indirect branch performance.
*/
- if ((ia32_cap & ARCH_CAP_IBRS_ALL) ||
+ if ((x86_arch_cap_msr & ARCH_CAP_IBRS_ALL) ||
(cpu_has(c, X86_FEATURE_AUTOIBRS) &&
!cpu_feature_enabled(X86_FEATURE_SEV_SNP))) {
setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
- !(ia32_cap & ARCH_CAP_PBRSB_NO))
+ !(x86_arch_cap_msr & ARCH_CAP_PBRSB_NO))
setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
}
if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
- !(ia32_cap & ARCH_CAP_MDS_NO)) {
+ !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)) {
setup_force_cpu_bug(X86_BUG_MDS);
if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
@@ -1374,9 +1371,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
* TSX_CTRL check alone is not sufficient for cases when the microcode
* update is not present or running as guest that don't get TSX_CTRL.
*/
- if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
+ if (!(x86_arch_cap_msr & ARCH_CAP_TAA_NO) &&
(cpu_has(c, X86_FEATURE_RTM) ||
- (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
+ (x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)))
setup_force_cpu_bug(X86_BUG_TAA);
/*
@@ -1402,7 +1399,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
* Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
* nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
*/
- if (!arch_cap_mmio_immune(ia32_cap)) {
+ if (!arch_cap_mmio_immune(x86_arch_cap_msr)) {
if (cpu_matches(cpu_vuln_blacklist, MMIO))
setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
@@ -1410,7 +1407,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
}
if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
- if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
+ if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (x86_arch_cap_msr & ARCH_CAP_RSBA))
setup_force_cpu_bug(X86_BUG_RETBLEED);
}
@@ -1428,18 +1425,25 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
* disabling AVX2. The only way to do this in HW is to clear XCR0[2],
* which means that AVX will be disabled.
*/
- if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) &&
+ if (cpu_matches(cpu_vuln_blacklist, GDS) && !(x86_arch_cap_msr & ARCH_CAP_GDS_NO) &&
boot_cpu_has(X86_FEATURE_AVX))
setup_force_cpu_bug(X86_BUG_GDS);
- if (vulnerable_to_rfds(ia32_cap))
+ if (vulnerable_to_rfds(x86_arch_cap_msr))
setup_force_cpu_bug(X86_BUG_RFDS);
+ /* When virtualized, eIBRS could be hidden, assume vulnerable */
+ if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) &&
+ !cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
+ (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
+ boot_cpu_has(X86_FEATURE_HYPERVISOR)))
+ setup_force_cpu_bug(X86_BUG_BHI);
+
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return;
/* Rogue Data Cache Load? No! */
- if (ia32_cap & ARCH_CAP_RDCL_NO)
+ if (x86_arch_cap_msr & ARCH_CAP_RDCL_NO)
return;
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
@@ -2219,6 +2223,8 @@ void cpu_init(void)
barrier();
x2apic_setup();
+
+ intel_posted_msi_init();
}
mmgrab(&init_mm);
diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c
index b7174209d855..b7d9f530ae16 100644
--- a/arch/x86/kernel/cpu/cpuid-deps.c
+++ b/arch/x86/kernel/cpu/cpuid-deps.c
@@ -44,7 +44,10 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_F16C, X86_FEATURE_XMM2, },
{ X86_FEATURE_AES, X86_FEATURE_XMM2 },
{ X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 },
+ { X86_FEATURE_GFNI, X86_FEATURE_XMM2 },
{ X86_FEATURE_FMA, X86_FEATURE_AVX },
+ { X86_FEATURE_VAES, X86_FEATURE_AVX },
+ { X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX },
{ X86_FEATURE_AVX2, X86_FEATURE_AVX, },
{ X86_FEATURE_AVX512F, X86_FEATURE_AVX, },
{ X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F },
@@ -56,9 +59,6 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F },
{ X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F },
{ X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL },
- { X86_FEATURE_GFNI, X86_FEATURE_AVX512VL },
- { X86_FEATURE_VAES, X86_FEATURE_AVX512VL },
- { X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F },
@@ -114,6 +114,9 @@ static void do_clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int feature)
if (WARN_ON(feature >= MAX_FEATURE_BITS))
return;
+ if (boot_cpu_has(feature))
+ WARN_ON(alternatives_patched);
+
clear_feature(c, feature);
/* Collect all features to disable, handling dependencies */
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index be30d7fa2e66..3c3e7e5695ba 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -228,6 +228,7 @@ static void detect_tme_early(struct cpuinfo_x86 *c)
if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
pr_info_once("x86/tme: not enabled by BIOS\n");
mktme_status = MKTME_DISABLED;
+ clear_cpu_cap(c, X86_FEATURE_TME);
return;
}
diff --git a/arch/x86/kernel/cpu/intel_epb.c b/arch/x86/kernel/cpu/intel_epb.c
index f18d35fe27a9..30b1d63b97f3 100644
--- a/arch/x86/kernel/cpu/intel_epb.c
+++ b/arch/x86/kernel/cpu/intel_epb.c
@@ -204,12 +204,12 @@ static int intel_epb_offline(unsigned int cpu)
}
static const struct x86_cpu_id intel_epb_normal[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L,
- ENERGY_PERF_BIAS_NORMAL_POWERSAVE),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT,
- ENERGY_PERF_BIAS_NORMAL_POWERSAVE),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P,
- ENERGY_PERF_BIAS_NORMAL_POWERSAVE),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L,
+ ENERGY_PERF_BIAS_NORMAL_POWERSAVE),
+ X86_MATCH_VFM(INTEL_ATOM_GRACEMONT,
+ ENERGY_PERF_BIAS_NORMAL_POWERSAVE),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P,
+ ENERGY_PERF_BIAS_NORMAL_POWERSAVE),
{}
};
diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c
index ad6776081e60..8651643bddae 100644
--- a/arch/x86/kernel/cpu/match.c
+++ b/arch/x86/kernel/cpu/match.c
@@ -17,8 +17,7 @@
*
* A typical table entry would be to match a specific CPU
*
- * X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_BROADWELL,
- * X86_FEATURE_ANY, NULL);
+ * X86_MATCH_VFM_FEATURE(INTEL_BROADWELL, X86_FEATURE_ANY, NULL);
*
* Fields can be wildcarded with %X86_VENDOR_ANY, %X86_FAMILY_ANY,
* %X86_MODEL_ANY, %X86_FEATURE_ANY (except for vendor)
@@ -26,7 +25,7 @@
* asm/cpu_device_id.h contains a set of useful macros which are shortcuts
* for various common selections. The above can be shortened to:
*
- * X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, NULL);
+ * X86_MATCH_VFM(INTEL_BROADWELL, NULL);
*
* Arrays used to match for this should also be declared using
* MODULE_DEVICE_TABLE(x86cpu, ...)
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index b5cc557cfc37..ad0623b659ed 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -47,7 +47,7 @@
#include <linux/kexec.h>
#include <asm/fred.h>
-#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#include <asm/processor.h>
#include <asm/traps.h>
#include <asm/tlbflush.h>
@@ -1593,6 +1593,24 @@ noinstr void do_machine_check(struct pt_regs *regs)
else
queue_task_work(&m, msg, kill_me_maybe);
+ } else if (m.mcgstatus & MCG_STATUS_SEAM_NR) {
+ /*
+ * Saved RIP on stack makes it look like the machine check
+ * was taken in the kernel on the instruction following
+ * the entry to SEAM mode. But MCG_STATUS_SEAM_NR indicates
+ * that the machine check was taken inside SEAM non-root
+ * mode. CPU core has already marked that guest as dead.
+ * It is OK for the kernel to resume execution at the
+ * apparent point of the machine check as the fault did
+ * not occur there. Mark the page as poisoned so it won't
+ * be added to free list when the guest is terminated.
+ */
+ if (mce_usable_address(&m)) {
+ struct page *p = pfn_to_online_page(m.addr >> PAGE_SHIFT);
+
+ if (p)
+ SetPageHWPoison(p);
+ }
} else {
/*
* Handle an MCE which has happened in kernel space but from
@@ -1930,14 +1948,14 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
cfg->bootlog = 0;
- if (c->x86 == 6 && c->x86_model == 45)
+ if (c->x86_vfm == INTEL_SANDYBRIDGE_X)
mce_flags.snb_ifu_quirk = 1;
/*
* Skylake, Cascacde Lake and Cooper Lake require a quirk on
* rep movs.
*/
- if (c->x86 == 6 && c->x86_model == INTEL_FAM6_SKYLAKE_X)
+ if (c->x86_vfm == INTEL_SKYLAKE_X)
mce_flags.skx_repmov_quirk = 1;
}
@@ -2500,12 +2518,14 @@ static ssize_t set_bank(struct device *s, struct device_attribute *attr,
return -EINVAL;
b = &per_cpu(mce_banks_array, s->id)[bank];
-
if (!b->init)
return -ENODEV;
b->ctl = new;
+
+ mutex_lock(&mce_sysfs_mutex);
mce_restart();
+ mutex_unlock(&mce_sysfs_mutex);
return size;
}
diff --git a/arch/x86/kernel/cpu/mce/genpool.c b/arch/x86/kernel/cpu/mce/genpool.c
index fbe8b61c3413..4284749ec803 100644
--- a/arch/x86/kernel/cpu/mce/genpool.c
+++ b/arch/x86/kernel/cpu/mce/genpool.c
@@ -16,14 +16,14 @@
* used to save error information organized in a lock-less list.
*
* This memory pool is only to be used to save MCE records in MCE context.
- * MCE events are rare, so a fixed size memory pool should be enough. Use
- * 2 pages to save MCE events for now (~80 MCE records at most).
+ * MCE events are rare, so a fixed size memory pool should be enough.
+ * Allocate on a sliding scale based on number of CPUs.
*/
-#define MCE_POOLSZ (2 * PAGE_SIZE)
+#define MCE_MIN_ENTRIES 80
+#define MCE_PER_CPU 2
static struct gen_pool *mce_evt_pool;
static LLIST_HEAD(mce_event_llist);
-static char gen_pool_buf[MCE_POOLSZ];
/*
* Compare the record "t" with each of the records on list "l" to see if
@@ -118,22 +118,32 @@ int mce_gen_pool_add(struct mce *mce)
static int mce_gen_pool_create(void)
{
- struct gen_pool *tmpp;
+ int mce_numrecords, mce_poolsz, order;
+ struct gen_pool *gpool;
int ret = -ENOMEM;
-
- tmpp = gen_pool_create(ilog2(sizeof(struct mce_evt_llist)), -1);
- if (!tmpp)
- goto out;
-
- ret = gen_pool_add(tmpp, (unsigned long)gen_pool_buf, MCE_POOLSZ, -1);
+ void *mce_pool;
+
+ order = order_base_2(sizeof(struct mce_evt_llist));
+ gpool = gen_pool_create(order, -1);
+ if (!gpool)
+ return ret;
+
+ mce_numrecords = max(MCE_MIN_ENTRIES, num_possible_cpus() * MCE_PER_CPU);
+ mce_poolsz = mce_numrecords * (1 << order);
+ mce_pool = kmalloc(mce_poolsz, GFP_KERNEL);
+ if (!mce_pool) {
+ gen_pool_destroy(gpool);
+ return ret;
+ }
+ ret = gen_pool_add(gpool, (unsigned long)mce_pool, mce_poolsz, -1);
if (ret) {
- gen_pool_destroy(tmpp);
- goto out;
+ gen_pool_destroy(gpool);
+ kfree(mce_pool);
+ return ret;
}
- mce_evt_pool = tmpp;
+ mce_evt_pool = gpool;
-out:
return ret;
}
diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c
index 399b62e223d2..f6103e6bf69a 100644
--- a/arch/x86/kernel/cpu/mce/intel.c
+++ b/arch/x86/kernel/cpu/mce/intel.c
@@ -13,7 +13,7 @@
#include <linux/cpumask.h>
#include <asm/apic.h>
#include <asm/cpufeature.h>
-#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#include <asm/processor.h>
#include <asm/msr.h>
#include <asm/mce.h>
@@ -455,10 +455,10 @@ static void intel_imc_init(struct cpuinfo_x86 *c)
{
u64 error_control;
- switch (c->x86_model) {
- case INTEL_FAM6_SANDYBRIDGE_X:
- case INTEL_FAM6_IVYBRIDGE_X:
- case INTEL_FAM6_HASWELL_X:
+ switch (c->x86_vfm) {
+ case INTEL_SANDYBRIDGE_X:
+ case INTEL_IVYBRIDGE_X:
+ case INTEL_HASWELL_X:
if (rdmsrl_safe(MSR_ERROR_CONTROL, &error_control))
return;
error_control |= 2;
@@ -484,12 +484,11 @@ bool intel_filter_mce(struct mce *m)
struct cpuinfo_x86 *c = &boot_cpu_data;
/* MCE errata HSD131, HSM142, HSW131, BDM48, HSM142 and SKX37 */
- if ((c->x86 == 6) &&
- ((c->x86_model == INTEL_FAM6_HASWELL) ||
- (c->x86_model == INTEL_FAM6_HASWELL_L) ||
- (c->x86_model == INTEL_FAM6_BROADWELL) ||
- (c->x86_model == INTEL_FAM6_HASWELL_G) ||
- (c->x86_model == INTEL_FAM6_SKYLAKE_X)) &&
+ if ((c->x86_vfm == INTEL_HASWELL ||
+ c->x86_vfm == INTEL_HASWELL_L ||
+ c->x86_vfm == INTEL_BROADWELL ||
+ c->x86_vfm == INTEL_HASWELL_G ||
+ c->x86_vfm == INTEL_SKYLAKE_X) &&
(m->bank == 0) &&
((m->status & 0xa0000000ffffffff) == 0x80000000000f0005))
return true;
diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c
index c4477162c07d..dac4d64dfb2a 100644
--- a/arch/x86/kernel/cpu/mce/severity.c
+++ b/arch/x86/kernel/cpu/mce/severity.c
@@ -12,7 +12,7 @@
#include <linux/uaccess.h>
#include <asm/mce.h>
-#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#include <asm/traps.h>
#include <asm/insn.h>
#include <asm/insn-eval.h>
@@ -39,20 +39,20 @@ static struct severity {
u64 mask;
u64 result;
unsigned char sev;
- unsigned char mcgmask;
- unsigned char mcgres;
+ unsigned short mcgmask;
+ unsigned short mcgres;
unsigned char ser;
unsigned char context;
unsigned char excp;
unsigned char covered;
- unsigned char cpu_model;
+ unsigned int cpu_vfm;
unsigned char cpu_minstepping;
unsigned char bank_lo, bank_hi;
char *msg;
} severities[] = {
#define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c }
#define BANK_RANGE(l, h) .bank_lo = l, .bank_hi = h
-#define MODEL_STEPPING(m, s) .cpu_model = m, .cpu_minstepping = s
+#define VFM_STEPPING(m, s) .cpu_vfm = m, .cpu_minstepping = s
#define KERNEL .context = IN_KERNEL
#define USER .context = IN_USER
#define KERNEL_RECOV .context = IN_KERNEL_RECOV
@@ -128,7 +128,7 @@ static struct severity {
MCESEV(
AO, "Uncorrected Patrol Scrub Error",
SER, MASK(MCI_STATUS_UC|MCI_ADDR|0xffffeff0, MCI_ADDR|0x001000c0),
- MODEL_STEPPING(INTEL_FAM6_SKYLAKE_X, 4), BANK_RANGE(13, 18)
+ VFM_STEPPING(INTEL_SKYLAKE_X, 4), BANK_RANGE(13, 18)
),
/* ignore OVER for UCNA */
@@ -174,6 +174,18 @@ static struct severity {
USER
),
MCESEV(
+ AR, "Data load error in SEAM non-root mode",
+ SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
+ MCGMASK(MCG_STATUS_SEAM_NR, MCG_STATUS_SEAM_NR),
+ KERNEL
+ ),
+ MCESEV(
+ AR, "Instruction fetch error in SEAM non-root mode",
+ SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
+ MCGMASK(MCG_STATUS_SEAM_NR, MCG_STATUS_SEAM_NR),
+ KERNEL
+ ),
+ MCESEV(
PANIC, "Data load in unrecoverable area of kernel",
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
KERNEL
@@ -290,7 +302,6 @@ static noinstr int error_context(struct mce *m, struct pt_regs *regs)
switch (fixup_type) {
case EX_TYPE_UACCESS:
- case EX_TYPE_COPY:
if (!copy_user)
return IN_KERNEL;
m->kflags |= MCE_IN_KERNEL_COPYIN;
@@ -386,7 +397,7 @@ static noinstr int mce_severity_intel(struct mce *m, struct pt_regs *regs, char
continue;
if (s->excp && excp != s->excp)
continue;
- if (s->cpu_model && boot_cpu_data.x86_model != s->cpu_model)
+ if (s->cpu_vfm && boot_cpu_data.x86_vfm != s->cpu_vfm)
continue;
if (s->cpu_minstepping && boot_cpu_data.x86_stepping < s->cpu_minstepping)
continue;
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 13b45b9c806d..c0d56c02b8da 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -84,8 +84,6 @@ struct microcode_amd {
unsigned int mpb[];
};
-#define PATCH_MAX_SIZE (3 * PAGE_SIZE)
-
static struct equiv_cpu_table {
unsigned int num_entries;
struct equiv_cpu_entry *entry;
@@ -465,7 +463,7 @@ static bool early_apply_microcode(u32 cpuid_1_eax, u32 old_rev, void *ucode, siz
return !__apply_microcode_amd(mc);
}
-static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
+static bool get_builtin_microcode(struct cpio_data *cp, u8 family)
{
char fw_name[36] = "amd-ucode/microcode_amd.bin";
struct firmware fw;
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 232026a239a6..b3658d11e7b6 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -60,11 +60,6 @@ module_param(force_minrev, bool, S_IRUSR | S_IWUSR);
*/
struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
-struct cpu_info_ctx {
- struct cpu_signature *cpu_sig;
- int err;
-};
-
/*
* Those patch levels cannot be updated to newer ones and thus should be final.
*/
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 5f0414452b67..815fa67356a2 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -21,7 +21,7 @@
#include <linux/uio.h>
#include <linux/mm.h>
-#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#include <asm/processor.h>
#include <asm/tlbflush.h>
#include <asm/setup.h>
@@ -577,8 +577,7 @@ static bool is_blacklisted(unsigned int cpu)
* This behavior is documented in item BDF90, #334165 (Intel Xeon
* Processor E7-8800/4800 v4 Product Family).
*/
- if (c->x86 == 6 &&
- c->x86_model == INTEL_FAM6_BROADWELL_X &&
+ if (c->x86_vfm == INTEL_BROADWELL_X &&
c->x86_stepping == 0x01 &&
llc_size_per_core > 2621440 &&
c->microcode < 0x0b000021) {
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 422a4ddc2ab7..7b29ebda024f 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -108,7 +108,7 @@ static inline void k8_check_syscfg_dram_mod_en(void)
(boot_cpu_data.x86 >= 0x0f)))
return;
- if (cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ if (cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return;
rdmsr(MSR_AMD64_SYSCFG, lo, hi);
diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index 83e40341583e..a113d9aba553 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -22,7 +22,7 @@
#include <linux/cacheinfo.h>
#include <linux/cpuhotplug.h>
-#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#include <asm/resctrl.h>
#include "internal.h"
@@ -56,14 +56,9 @@ int max_name_width, max_data_width;
*/
bool rdt_alloc_capable;
-static void
-mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
- struct rdt_resource *r);
-static void
-cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
-static void
-mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m,
- struct rdt_resource *r);
+static void mba_wrmsr_intel(struct msr_param *m);
+static void cat_wrmsr(struct msr_param *m);
+static void mba_wrmsr_amd(struct msr_param *m);
#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].r_resctrl.domains)
@@ -309,12 +304,11 @@ static void rdt_get_cdp_l2_config(void)
rdt_get_cdp_config(RDT_RESOURCE_L2);
}
-static void
-mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
+static void mba_wrmsr_amd(struct msr_param *m)
{
+ struct rdt_hw_resource *hw_res = resctrl_to_arch_res(m->res);
+ struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(m->dom);
unsigned int i;
- struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
- struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
for (i = m->low; i < m->high; i++)
wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
@@ -334,25 +328,22 @@ static u32 delay_bw_map(unsigned long bw, struct rdt_resource *r)
return r->default_ctrl;
}
-static void
-mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
- struct rdt_resource *r)
+static void mba_wrmsr_intel(struct msr_param *m)
{
+ struct rdt_hw_resource *hw_res = resctrl_to_arch_res(m->res);
+ struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(m->dom);
unsigned int i;
- struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
- struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
/* Write the delay values for mba. */
for (i = m->low; i < m->high; i++)
- wrmsrl(hw_res->msr_base + i, delay_bw_map(hw_dom->ctrl_val[i], r));
+ wrmsrl(hw_res->msr_base + i, delay_bw_map(hw_dom->ctrl_val[i], m->res));
}
-static void
-cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
+static void cat_wrmsr(struct msr_param *m)
{
+ struct rdt_hw_resource *hw_res = resctrl_to_arch_res(m->res);
+ struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(m->dom);
unsigned int i;
- struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
- struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
for (i = m->low; i < m->high; i++)
wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
@@ -362,6 +353,8 @@ struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r)
{
struct rdt_domain *d;
+ lockdep_assert_cpus_held();
+
list_for_each_entry(d, &r->domains, list) {
/* Find the domain that contains this CPU */
if (cpumask_test_cpu(cpu, &d->cpu_mask))
@@ -378,19 +371,11 @@ u32 resctrl_arch_get_num_closid(struct rdt_resource *r)
void rdt_ctrl_update(void *arg)
{
+ struct rdt_hw_resource *hw_res;
struct msr_param *m = arg;
- struct rdt_hw_resource *hw_res = resctrl_to_arch_res(m->res);
- struct rdt_resource *r = m->res;
- int cpu = smp_processor_id();
- struct rdt_domain *d;
- d = get_domain_from_cpu(cpu, r);
- if (d) {
- hw_res->msr_update(d, m, r);
- return;
- }
- pr_warn_once("cpu %d not found in any domain for resource %s\n",
- cpu, r->name);
+ hw_res = resctrl_to_arch_res(m->res);
+ hw_res->msr_update(m);
}
/*
@@ -463,9 +448,11 @@ static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
hw_dom->ctrl_val = dc;
setup_default_ctrlval(r, dc);
+ m.res = r;
+ m.dom = d;
m.low = 0;
m.high = hw_res->num_closid;
- hw_res->msr_update(d, &m, r);
+ hw_res->msr_update(&m);
return 0;
}
@@ -821,18 +808,18 @@ static __init bool get_rdt_mon_resources(void)
static __init void __check_quirks_intel(void)
{
- switch (boot_cpu_data.x86_model) {
- case INTEL_FAM6_HASWELL_X:
+ switch (boot_cpu_data.x86_vfm) {
+ case INTEL_HASWELL_X:
if (!rdt_options[RDT_FLAG_L3_CAT].force_off)
cache_alloc_hsw_probe();
break;
- case INTEL_FAM6_SKYLAKE_X:
+ case INTEL_SKYLAKE_X:
if (boot_cpu_data.x86_stepping <= 4)
set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
else
set_rdt_options("!l3cat");
fallthrough;
- case INTEL_FAM6_BROADWELL_X:
+ case INTEL_BROADWELL_X:
intel_rdt_mbm_apply_quirk();
break;
}
diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
index 7997b47743a2..b7291f60399c 100644
--- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
@@ -272,22 +272,6 @@ static u32 get_config_index(u32 closid, enum resctrl_conf_type type)
}
}
-static bool apply_config(struct rdt_hw_domain *hw_dom,
- struct resctrl_staged_config *cfg, u32 idx,
- cpumask_var_t cpu_mask)
-{
- struct rdt_domain *dom = &hw_dom->d_resctrl;
-
- if (cfg->new_ctrl != hw_dom->ctrl_val[idx]) {
- cpumask_set_cpu(cpumask_any(&dom->cpu_mask), cpu_mask);
- hw_dom->ctrl_val[idx] = cfg->new_ctrl;
-
- return true;
- }
-
- return false;
-}
-
int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
u32 closid, enum resctrl_conf_type t, u32 cfg_val)
{
@@ -302,9 +286,10 @@ int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
hw_dom->ctrl_val[idx] = cfg_val;
msr_param.res = r;
+ msr_param.dom = d;
msr_param.low = idx;
msr_param.high = idx + 1;
- hw_res->msr_update(d, &msr_param, r);
+ hw_res->msr_update(&msr_param);
return 0;
}
@@ -315,48 +300,39 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
struct rdt_hw_domain *hw_dom;
struct msr_param msr_param;
enum resctrl_conf_type t;
- cpumask_var_t cpu_mask;
struct rdt_domain *d;
u32 idx;
/* Walking r->domains, ensure it can't race with cpuhp */
lockdep_assert_cpus_held();
- if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
- return -ENOMEM;
-
- msr_param.res = NULL;
list_for_each_entry(d, &r->domains, list) {
hw_dom = resctrl_to_arch_dom(d);
+ msr_param.res = NULL;
for (t = 0; t < CDP_NUM_TYPES; t++) {
cfg = &hw_dom->d_resctrl.staged_config[t];
if (!cfg->have_new_ctrl)
continue;
idx = get_config_index(closid, t);
- if (!apply_config(hw_dom, cfg, idx, cpu_mask))
+ if (cfg->new_ctrl == hw_dom->ctrl_val[idx])
continue;
+ hw_dom->ctrl_val[idx] = cfg->new_ctrl;
if (!msr_param.res) {
msr_param.low = idx;
msr_param.high = msr_param.low + 1;
msr_param.res = r;
+ msr_param.dom = d;
} else {
msr_param.low = min(msr_param.low, idx);
msr_param.high = max(msr_param.high, idx + 1);
}
}
+ if (msr_param.res)
+ smp_call_function_any(&d->cpu_mask, rdt_ctrl_update, &msr_param, 1);
}
- if (cpumask_empty(cpu_mask))
- goto done;
-
- /* Update resource control msr on all the CPUs. */
- on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1);
-
-done:
- free_cpumask_var(cpu_mask);
-
return 0;
}
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index c99f26ebe7a6..f1d926832ec8 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -78,7 +78,8 @@ cpumask_any_housekeeping(const struct cpumask *mask, int exclude_cpu)
else
cpu = cpumask_any_but(mask, exclude_cpu);
- if (!IS_ENABLED(CONFIG_NO_HZ_FULL))
+ /* Only continue if tick_nohz_full_mask has been initialized. */
+ if (!tick_nohz_full_enabled())
return cpu;
/* If the CPU picked isn't marked nohz_full nothing more needs doing. */
@@ -378,11 +379,13 @@ static inline struct rdt_hw_domain *resctrl_to_arch_dom(struct rdt_domain *r)
/**
* struct msr_param - set a range of MSRs from a domain
* @res: The resource to use
+ * @dom: The domain to update
* @low: Beginning index from base MSR
* @high: End index
*/
struct msr_param {
struct rdt_resource *res;
+ struct rdt_domain *dom;
u32 low;
u32 high;
};
@@ -442,8 +445,7 @@ struct rdt_hw_resource {
struct rdt_resource r_resctrl;
u32 num_closid;
unsigned int msr_base;
- void (*msr_update) (struct rdt_domain *d, struct msr_param *m,
- struct rdt_resource *r);
+ void (*msr_update)(struct msr_param *m);
unsigned int mon_scale;
unsigned int mbm_width;
unsigned int mbm_cfg_mask;
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index c34a35ec0f03..2345e6836593 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -24,6 +24,7 @@
#include <asm/resctrl.h>
#include "internal.h"
+#include "trace.h"
/**
* struct rmid_entry - dirty tracking for all RMID.
@@ -354,6 +355,16 @@ void __check_limbo(struct rdt_domain *d, bool force_free)
rmid_dirty = true;
} else {
rmid_dirty = (val >= resctrl_rmid_realloc_threshold);
+
+ /*
+ * x86's CLOSID and RMID are independent numbers, so the entry's
+ * CLOSID is an empty CLOSID (X86_RESCTRL_EMPTY_CLOSID). On Arm the
+ * RMID (PMG) extends the CLOSID (PARTID) space with bits that aren't
+ * used to select the configuration. It is thus necessary to track both
+ * CLOSID and RMID because there may be dependencies between them
+ * on some architectures.
+ */
+ trace_mon_llc_occupancy_limbo(entry->closid, entry->rmid, d->id, val);
}
if (force_free || !rmid_dirty) {
diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
index 884b88e25141..aacf236dfe3b 100644
--- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
@@ -23,7 +23,7 @@
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
-#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#include <asm/resctrl.h>
#include <asm/perf_event.h>
@@ -31,7 +31,7 @@
#include "internal.h"
#define CREATE_TRACE_POINTS
-#include "pseudo_lock_event.h"
+#include "trace.h"
/*
* The bits needed to disable hardware prefetching varies based on the
@@ -88,8 +88,8 @@ static u64 get_prefetch_disable_bits(void)
boot_cpu_data.x86 != 6)
return 0;
- switch (boot_cpu_data.x86_model) {
- case INTEL_FAM6_BROADWELL_X:
+ switch (boot_cpu_data.x86_vfm) {
+ case INTEL_BROADWELL_X:
/*
* SDM defines bits of MSR_MISC_FEATURE_CONTROL register
* as:
@@ -100,8 +100,8 @@ static u64 get_prefetch_disable_bits(void)
* 63:4 Reserved
*/
return 0xF;
- case INTEL_FAM6_ATOM_GOLDMONT:
- case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
+ case INTEL_ATOM_GOLDMONT:
+ case INTEL_ATOM_GOLDMONT_PLUS:
/*
* SDM defines bits of MSR_MISC_FEATURE_CONTROL register
* as:
@@ -1084,9 +1084,9 @@ static int measure_l2_residency(void *_plr)
* L2_HIT 02H
* L2_MISS 10H
*/
- switch (boot_cpu_data.x86_model) {
- case INTEL_FAM6_ATOM_GOLDMONT:
- case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
+ switch (boot_cpu_data.x86_vfm) {
+ case INTEL_ATOM_GOLDMONT:
+ case INTEL_ATOM_GOLDMONT_PLUS:
perf_miss_attr.config = X86_CONFIG(.event = 0xd1,
.umask = 0x10);
perf_hit_attr.config = X86_CONFIG(.event = 0xd1,
@@ -1123,8 +1123,8 @@ static int measure_l3_residency(void *_plr)
* MISS 41H
*/
- switch (boot_cpu_data.x86_model) {
- case INTEL_FAM6_BROADWELL_X:
+ switch (boot_cpu_data.x86_vfm) {
+ case INTEL_BROADWELL_X:
/* On BDW the hit event counts references, not hits */
perf_hit_attr.config = X86_CONFIG(.event = 0x2e,
.umask = 0x4f);
@@ -1142,7 +1142,7 @@ static int measure_l3_residency(void *_plr)
*/
counts.miss_after -= counts.miss_before;
- if (boot_cpu_data.x86_model == INTEL_FAM6_BROADWELL_X) {
+ if (boot_cpu_data.x86_vfm == INTEL_BROADWELL_X) {
/*
* On BDW references and misses are counted, need to adjust.
* Sometimes the "hits" counter is a bit more than the
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 011e17efb1a6..02f213f1c51c 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -2813,16 +2813,12 @@ static int reset_all_ctrls(struct rdt_resource *r)
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
struct rdt_hw_domain *hw_dom;
struct msr_param msr_param;
- cpumask_var_t cpu_mask;
struct rdt_domain *d;
int i;
/* Walking r->domains, ensure it can't race with cpuhp */
lockdep_assert_cpus_held();
- if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
- return -ENOMEM;
-
msr_param.res = r;
msr_param.low = 0;
msr_param.high = hw_res->num_closid;
@@ -2834,17 +2830,13 @@ static int reset_all_ctrls(struct rdt_resource *r)
*/
list_for_each_entry(d, &r->domains, list) {
hw_dom = resctrl_to_arch_dom(d);
- cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
for (i = 0; i < hw_res->num_closid; i++)
hw_dom->ctrl_val[i] = r->default_ctrl;
+ msr_param.dom = d;
+ smp_call_function_any(&d->cpu_mask, rdt_ctrl_update, &msr_param, 1);
}
- /* Update CBM on all the CPUs in cpu_mask */
- on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1);
-
- free_cpumask_var(cpu_mask);
-
return 0;
}
diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock_event.h b/arch/x86/kernel/cpu/resctrl/trace.h
index 428ebbd4270b..2a506316b303 100644
--- a/arch/x86/kernel/cpu/resctrl/pseudo_lock_event.h
+++ b/arch/x86/kernel/cpu/resctrl/trace.h
@@ -2,8 +2,8 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM resctrl
-#if !defined(_TRACE_PSEUDO_LOCK_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_PSEUDO_LOCK_H
+#if !defined(_TRACE_RESCTRL_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RESCTRL_H
#include <linux/tracepoint.h>
@@ -35,9 +35,25 @@ TRACE_EVENT(pseudo_lock_l3,
TP_printk("hits=%llu miss=%llu",
__entry->l3_hits, __entry->l3_miss));
-#endif /* _TRACE_PSEUDO_LOCK_H */
+TRACE_EVENT(mon_llc_occupancy_limbo,
+ TP_PROTO(u32 ctrl_hw_id, u32 mon_hw_id, int domain_id, u64 llc_occupancy_bytes),
+ TP_ARGS(ctrl_hw_id, mon_hw_id, domain_id, llc_occupancy_bytes),
+ TP_STRUCT__entry(__field(u32, ctrl_hw_id)
+ __field(u32, mon_hw_id)
+ __field(int, domain_id)
+ __field(u64, llc_occupancy_bytes)),
+ TP_fast_assign(__entry->ctrl_hw_id = ctrl_hw_id;
+ __entry->mon_hw_id = mon_hw_id;
+ __entry->domain_id = domain_id;
+ __entry->llc_occupancy_bytes = llc_occupancy_bytes;),
+ TP_printk("ctrl_hw_id=%u mon_hw_id=%u domain_id=%d llc_occupancy_bytes=%llu",
+ __entry->ctrl_hw_id, __entry->mon_hw_id, __entry->domain_id,
+ __entry->llc_occupancy_bytes)
+ );
+
+#endif /* _TRACE_RESCTRL_H */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE pseudo_lock_event
+#define TRACE_INCLUDE_FILE trace
#include <trace/define_trace.h>
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 0dad49a09b7a..af5aa2c754c2 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -28,6 +28,7 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
{ X86_FEATURE_INTEL_PPIN, CPUID_EBX, 0, 0x00000007, 1 },
{ X86_FEATURE_RRSBA_CTRL, CPUID_EDX, 2, 0x00000007, 2 },
+ { X86_FEATURE_BHI_CTRL, CPUID_EDX, 4, 0x00000007, 2 },
{ X86_FEATURE_CQM_LLC, CPUID_EDX, 1, 0x0000000f, 0 },
{ X86_FEATURE_CQM_OCCUP_LLC, CPUID_EDX, 0, 0x0000000f, 1 },
{ X86_FEATURE_CQM_MBM_TOTAL, CPUID_EDX, 1, 0x0000000f, 1 },
@@ -49,6 +50,7 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
{ X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 },
{ X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 },
+ { X86_FEATURE_AMD_LBR_PMC_FREEZE, CPUID_EAX, 2, 0x80000022, 0 },
{ 0, 0, 0, 0, 0 }
};
diff --git a/arch/x86/kernel/cpu/sgx/driver.c b/arch/x86/kernel/cpu/sgx/driver.c
index 262f5fb18d74..22b65a5f5ec6 100644
--- a/arch/x86/kernel/cpu/sgx/driver.c
+++ b/arch/x86/kernel/cpu/sgx/driver.c
@@ -113,7 +113,7 @@ static unsigned long sgx_get_unmapped_area(struct file *file,
if (flags & MAP_FIXED)
return addr;
- return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+ return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
}
#ifdef CONFIG_COMPAT
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index 166692f2d501..27892e57c4ef 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -13,6 +13,7 @@
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
+#include <linux/vmalloc.h>
#include <asm/sgx.h>
#include "driver.h"
#include "encl.h"
diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
index aaca8d235dc2..d17c9b71eb4a 100644
--- a/arch/x86/kernel/cpu/topology.c
+++ b/arch/x86/kernel/cpu/topology.c
@@ -123,7 +123,6 @@ static void topo_set_cpuids(unsigned int cpu, u32 apic_id, u32 acpi_id)
early_per_cpu(x86_cpu_to_apicid, cpu) = apic_id;
early_per_cpu(x86_cpu_to_acpiid, cpu) = acpi_id;
#endif
- set_cpu_possible(cpu, true);
set_cpu_present(cpu, true);
}
@@ -210,7 +209,11 @@ static __init void topo_register_apic(u32 apic_id, u32 acpi_id, bool present)
topo_info.nr_disabled_cpus++;
}
- /* Register present and possible CPUs in the domain maps */
+ /*
+ * Register present and possible CPUs in the domain
+ * maps. cpu_possible_map will be updated in
+ * topology_init_possible_cpus() after enumeration is done.
+ */
for (dom = TOPO_SMT_DOMAIN; dom < TOPO_MAX_DOMAIN; dom++)
set_bit(topo_apicid(apic_id, dom), apic_maps[dom].map);
}
diff --git a/arch/x86/kernel/cpu/topology_amd.c b/arch/x86/kernel/cpu/topology_amd.c
index 1a8b3ad493af..d419deed6a48 100644
--- a/arch/x86/kernel/cpu/topology_amd.c
+++ b/arch/x86/kernel/cpu/topology_amd.c
@@ -29,11 +29,21 @@ static bool parse_8000_0008(struct topo_scan *tscan)
if (!sft)
sft = get_count_order(ecx.cpu_nthreads + 1);
- topology_set_dom(tscan, TOPO_SMT_DOMAIN, sft, ecx.cpu_nthreads + 1);
+ /*
+ * cpu_nthreads describes the number of threads in the package
+ * sft is the number of APIC ID bits per package
+ *
+ * As the number of actual threads per core is not described in
+ * this leaf, just set the CORE domain shift and let the later
+ * parsers set SMT shift. Assume one thread per core by default
+ * which is correct if there are no other CPUID leafs to parse.
+ */
+ topology_update_dom(tscan, TOPO_SMT_DOMAIN, 0, 1);
+ topology_set_dom(tscan, TOPO_CORE_DOMAIN, sft, ecx.cpu_nthreads + 1);
return true;
}
-static void store_node(struct topo_scan *tscan, unsigned int nr_nodes, u16 node_id)
+static void store_node(struct topo_scan *tscan, u16 nr_nodes, u16 node_id)
{
/*
* Starting with Fam 17h the DIE domain could probably be used to
@@ -48,7 +58,7 @@ static void store_node(struct topo_scan *tscan, unsigned int nr_nodes, u16 node_
tscan->amd_node_id = node_id;
}
-static bool parse_8000_001e(struct topo_scan *tscan, bool has_0xb)
+static bool parse_8000_001e(struct topo_scan *tscan, bool has_topoext)
{
struct {
// eax
@@ -73,12 +83,14 @@ static bool parse_8000_001e(struct topo_scan *tscan, bool has_0xb)
tscan->c->topo.initial_apicid = leaf.ext_apic_id;
/*
- * If leaf 0xb is available, then SMT shift is set already. If not
- * take it from ecx.threads_per_core and use topo_update_dom() -
- * topology_set_dom() would propagate and overwrite the already
- * propagated CORE level.
+ * If leaf 0xb is available, then the domain shifts are set
+ * already and nothing to do here.
*/
- if (!has_0xb) {
+ if (!has_topoext) {
+ /*
+ * Leaf 0x80000008 set the CORE domain shift already.
+ * Update the SMT domain, but do not propagate it.
+ */
unsigned int nthreads = leaf.core_nthreads + 1;
topology_update_dom(tscan, TOPO_SMT_DOMAIN, get_count_order(nthreads), nthreads);
@@ -107,64 +119,86 @@ static bool parse_8000_001e(struct topo_scan *tscan, bool has_0xb)
return true;
}
-static bool parse_fam10h_node_id(struct topo_scan *tscan)
+static void parse_fam10h_node_id(struct topo_scan *tscan)
{
- struct {
- union {
+ union {
+ struct {
u64 node_id : 3,
nodes_per_pkg : 3,
unused : 58;
- u64 msr;
};
+ u64 msr;
} nid;
if (!boot_cpu_has(X86_FEATURE_NODEID_MSR))
- return false;
+ return;
rdmsrl(MSR_FAM10H_NODE_ID, nid.msr);
store_node(tscan, nid.nodes_per_pkg + 1, nid.node_id);
tscan->c->topo.llc_id = nid.node_id;
- return true;
}
static void legacy_set_llc(struct topo_scan *tscan)
{
unsigned int apicid = tscan->c->topo.initial_apicid;
- /* parse_8000_0008() set everything up except llc_id */
- tscan->c->topo.llc_id = apicid >> tscan->dom_shifts[TOPO_CORE_DOMAIN];
+ /* If none of the parsers set LLC ID then use the die ID for it. */
+ if (tscan->c->topo.llc_id == BAD_APICID)
+ tscan->c->topo.llc_id = apicid >> tscan->dom_shifts[TOPO_CORE_DOMAIN];
+}
+
+static void topoext_fixup(struct topo_scan *tscan)
+{
+ struct cpuinfo_x86 *c = tscan->c;
+ u64 msrval;
+
+ /* Try to re-enable TopologyExtensions if switched off by BIOS */
+ if (cpu_has(c, X86_FEATURE_TOPOEXT) || c->x86_vendor != X86_VENDOR_AMD ||
+ c->x86 != 0x15 || c->x86_model < 0x10 || c->x86_model > 0x6f)
+ return;
+
+ if (msr_set_bit(0xc0011005, 54) <= 0)
+ return;
+
+ rdmsrl(0xc0011005, msrval);
+ if (msrval & BIT_64(54)) {
+ set_cpu_cap(c, X86_FEATURE_TOPOEXT);
+ pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
+ }
}
static void parse_topology_amd(struct topo_scan *tscan)
{
- bool has_0xb = false;
+ bool has_topoext = false;
/*
* If the extended topology leaf 0x8000_001e is available
- * try to get SMT and CORE shift from leaf 0xb first, then
- * try to get the CORE shift from leaf 0x8000_0008.
+ * try to get SMT, CORE, TILE, and DIE shifts from extended
+ * CPUID leaf 0x8000_0026 on supported processors first. If
+ * extended CPUID leaf 0x8000_0026 is not supported, try to
+ * get SMT and CORE shift from leaf 0xb first, then try to
+ * get the CORE shift from leaf 0x8000_0008.
*/
if (cpu_feature_enabled(X86_FEATURE_TOPOEXT))
- has_0xb = cpu_parse_topology_ext(tscan);
+ has_topoext = cpu_parse_topology_ext(tscan);
- if (!has_0xb && !parse_8000_0008(tscan))
+ if (!has_topoext && !parse_8000_0008(tscan))
return;
/* Prefer leaf 0x8000001e if available */
- if (parse_8000_001e(tscan, has_0xb))
+ if (parse_8000_001e(tscan, has_topoext))
return;
/* Try the NODEID MSR */
- if (parse_fam10h_node_id(tscan))
- return;
-
- legacy_set_llc(tscan);
+ parse_fam10h_node_id(tscan);
}
void cpu_parse_topology_amd(struct topo_scan *tscan)
{
tscan->amd_nodes_per_pkg = 1;
+ topoext_fixup(tscan);
parse_topology_amd(tscan);
+ legacy_set_llc(tscan);
if (tscan->amd_nodes_per_pkg > 1)
set_cpu_cap(tscan->c, X86_FEATURE_AMD_DCM);
diff --git a/arch/x86/kernel/cpu/topology_ext.c b/arch/x86/kernel/cpu/topology_ext.c
index e477228cd5b2..467b0326bf1a 100644
--- a/arch/x86/kernel/cpu/topology_ext.c
+++ b/arch/x86/kernel/cpu/topology_ext.c
@@ -13,7 +13,10 @@ enum topo_types {
CORE_TYPE = 2,
MAX_TYPE_0B = 3,
MODULE_TYPE = 3,
+ AMD_CCD_TYPE = 3,
TILE_TYPE = 4,
+ AMD_SOCKET_TYPE = 4,
+ MAX_TYPE_80000026 = 5,
DIE_TYPE = 5,
DIEGRP_TYPE = 6,
MAX_TYPE_1F = 7,
@@ -32,6 +35,13 @@ static const unsigned int topo_domain_map_0b_1f[MAX_TYPE_1F] = {
[DIEGRP_TYPE] = TOPO_DIEGRP_DOMAIN,
};
+static const unsigned int topo_domain_map_80000026[MAX_TYPE_80000026] = {
+ [SMT_TYPE] = TOPO_SMT_DOMAIN,
+ [CORE_TYPE] = TOPO_CORE_DOMAIN,
+ [AMD_CCD_TYPE] = TOPO_TILE_DOMAIN,
+ [AMD_SOCKET_TYPE] = TOPO_DIE_DOMAIN,
+};
+
static inline bool topo_subleaf(struct topo_scan *tscan, u32 leaf, u32 subleaf,
unsigned int *last_dom)
{
@@ -56,6 +66,7 @@ static inline bool topo_subleaf(struct topo_scan *tscan, u32 leaf, u32 subleaf,
switch (leaf) {
case 0x0b: maxtype = MAX_TYPE_0B; map = topo_domain_map_0b_1f; break;
case 0x1f: maxtype = MAX_TYPE_1F; map = topo_domain_map_0b_1f; break;
+ case 0x80000026: maxtype = MAX_TYPE_80000026; map = topo_domain_map_80000026; break;
default: return false;
}
@@ -125,6 +136,10 @@ bool cpu_parse_topology_ext(struct topo_scan *tscan)
if (tscan->c->cpuid_level >= 0x1f && parse_topology_leaf(tscan, 0x1f))
return true;
+ /* AMD: Try leaf 0x80000026 first. */
+ if (tscan->c->extended_cpuid_level >= 0x80000026 && parse_topology_leaf(tscan, 0x80000026))
+ return true;
+
/* Intel/AMD: Fall back to leaf 0xB if available */
return tscan->c->cpuid_level >= 0x0b && parse_topology_leaf(tscan, 0x0b);
}
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index e74d0c4286c1..f06501445cd9 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -402,20 +402,26 @@ int crash_load_segments(struct kimage *image)
#undef pr_fmt
#define pr_fmt(fmt) "crash hp: " fmt
-/* These functions provide the value for the sysfs crash_hotplug nodes */
-#ifdef CONFIG_HOTPLUG_CPU
-int arch_crash_hotplug_cpu_support(void)
+int arch_crash_hotplug_support(struct kimage *image, unsigned long kexec_flags)
{
- return crash_check_update_elfcorehdr();
-}
-#endif
-#ifdef CONFIG_MEMORY_HOTPLUG
-int arch_crash_hotplug_memory_support(void)
-{
- return crash_check_update_elfcorehdr();
-}
+#ifdef CONFIG_KEXEC_FILE
+ if (image->file_mode)
+ return 1;
#endif
+ /*
+ * Initially, crash hotplug support for kexec_load was added
+ * with the KEXEC_UPDATE_ELFCOREHDR flag. Later, this
+ * functionality was expanded to accommodate multiple kexec
+ * segment updates, leading to the introduction of the
+ * KEXEC_CRASH_HOTPLUG_SUPPORT kexec flag bit. Consequently,
+ * when the kexec tool sends either of these flags, it indicates
+ * that the required kexec segment (elfcorehdr) is excluded from
+ * the SHA calculation.
+ */
+ return (kexec_flags & KEXEC_UPDATE_ELFCOREHDR ||
+ kexec_flags & KEXEC_CRASH_HOTPLUG_SUPPORT);
+}
unsigned int arch_crash_get_elfcorehdr_size(void)
{
@@ -432,10 +438,12 @@ unsigned int arch_crash_get_elfcorehdr_size(void)
/**
* arch_crash_handle_hotplug_event() - Handle hotplug elfcorehdr changes
* @image: a pointer to kexec_crash_image
+ * @arg: struct memory_notify handler for memory hotplug case and
+ * NULL for CPU hotplug case.
*
* Prepare the new elfcorehdr and replace the existing elfcorehdr.
*/
-void arch_crash_handle_hotplug_event(struct kimage *image)
+void arch_crash_handle_hotplug_event(struct kimage *image, void *arg)
{
void *elfbuf = NULL, *old_elfcorehdr;
unsigned long nr_mem_ranges;
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 003e0298f46a..8e3c53b4d070 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -24,6 +24,7 @@
#include <asm/pci_x86.h>
#include <asm/setup.h>
#include <asm/i8259.h>
+#include <asm/numa.h>
#include <asm/prom.h>
__initdata u64 initial_dtb;
@@ -137,6 +138,7 @@ static void __init dtb_cpu_setup(void)
continue;
}
topology_register_apic(apic_id, CPU_ACPIID_INVALID, true);
+ set_apicid_to_node(apic_id, of_node_to_nid(dn));
}
}
@@ -277,9 +279,18 @@ static void __init dtb_apic_setup(void)
dtb_ioapic_setup();
}
-#ifdef CONFIG_OF_EARLY_FLATTREE
+static void __init x86_dtb_parse_smp_config(void)
+{
+ if (!of_have_populated_dt())
+ return;
+
+ dtb_setup_hpet();
+ dtb_apic_setup();
+}
+
void __init x86_flattree_get_config(void)
{
+#ifdef CONFIG_OF_EARLY_FLATTREE
u32 size, map_len;
void *dt;
@@ -301,14 +312,7 @@ void __init x86_flattree_get_config(void)
if (initial_dtb)
early_memunmap(dt, map_len);
-}
#endif
-
-void __init x86_dtb_parse_smp_config(void)
-{
- if (!of_have_populated_dt())
- return;
-
- dtb_setup_hpet();
- dtb_apic_setup();
+ if (of_have_populated_dt())
+ x86_init.mpparse.parse_smp_cfg = x86_dtb_parse_smp_config;
}
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 44a91ef5a23b..a7d562697e50 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -405,8 +405,8 @@ static void __die_header(const char *str, struct pt_regs *regs, long err)
pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT";
printk(KERN_DEFAULT
- "%s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff, ++die_counter,
- pr,
+ "Oops: %s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff,
+ ++die_counter, pr,
IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
IS_ENABLED(CONFIG_KASAN) ? " KASAN" : "",
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 6f1b379e3b38..68b09f718f10 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -532,9 +532,10 @@ u64 __init e820__range_update(u64 start, u64 size, enum e820_type old_type, enum
return __e820__range_update(e820_table, start, size, old_type, new_type);
}
-static u64 __init e820__range_update_kexec(u64 start, u64 size, enum e820_type old_type, enum e820_type new_type)
+u64 __init e820__range_update_table(struct e820_table *t, u64 start, u64 size,
+ enum e820_type old_type, enum e820_type new_type)
{
- return __e820__range_update(e820_table_kexec, start, size, old_type, new_type);
+ return __e820__range_update(t, start, size, old_type, new_type);
}
/* Remove a range of memory from the E820 table: */
@@ -806,7 +807,7 @@ u64 __init e820__memblock_alloc_reserved(u64 size, u64 align)
addr = memblock_phys_alloc(size, align);
if (addr) {
- e820__range_update_kexec(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED);
+ e820__range_update_table(e820_table_kexec, addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED);
pr_info("update e820_table_kexec for e820__memblock_alloc_reserved()\n");
e820__update_table_kexec();
}
diff --git a/arch/x86/kernel/eisa.c b/arch/x86/kernel/eisa.c
index e963344b0449..53935b4d62e3 100644
--- a/arch/x86/kernel/eisa.c
+++ b/arch/x86/kernel/eisa.c
@@ -2,6 +2,7 @@
/*
* EISA specific code
*/
+#include <linux/cc_platform.h>
#include <linux/ioport.h>
#include <linux/eisa.h>
#include <linux/io.h>
@@ -12,7 +13,7 @@ static __init int eisa_bus_probe(void)
{
void __iomem *p;
- if (xen_pv_domain() && !xen_initial_domain())
+ if ((xen_pv_domain() && !xen_initial_domain()) || cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
return 0;
p = ioremap(0x0FFFD9, 4);
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 520deb411a70..1209c7aebb21 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -145,8 +145,8 @@ void restore_fpregs_from_fpstate(struct fpstate *fpstate, u64 mask)
asm volatile(
"fnclex\n\t"
"emms\n\t"
- "fildl %P[addr]" /* set F?P to defined value */
- : : [addr] "m" (fpstate));
+ "fildl %[addr]" /* set F?P to defined value */
+ : : [addr] "m" (*fpstate));
}
if (use_xsave()) {
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 33a214b1a4ce..c5a026fee5e0 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -991,6 +991,7 @@ void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
return __raw_xsave_addr(xsave, xfeature_nr);
}
+EXPORT_SYMBOL_GPL(get_xsave_addr);
#ifdef CONFIG_ARCH_HAS_PKEYS
@@ -1434,8 +1435,8 @@ static bool xstate_op_valid(struct fpstate *fpstate, u64 mask, bool rstor)
return rstor;
/*
- * XSAVE(S): clone(), fpu_swap_kvm_fpu()
- * XRSTORS(S): fpu_swap_kvm_fpu()
+ * XSAVE(S): clone(), fpu_swap_kvm_fpstate()
+ * XRSTORS(S): fpu_swap_kvm_fpstate()
*/
/*
diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
index 19ca623ffa2a..05df04f39628 100644
--- a/arch/x86/kernel/fpu/xstate.h
+++ b/arch/x86/kernel/fpu/xstate.h
@@ -54,8 +54,6 @@ extern int copy_sigframe_from_user_to_xstate(struct task_struct *tsk, const void
extern void fpu__init_cpu_xstate(void);
extern void fpu__init_system_xstate(unsigned int legacy_size);
-extern void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr);
-
static inline u64 xfeatures_mask_supervisor(void)
{
return fpu_kernel_cfg.max_features & XFEATURE_MASK_SUPERVISOR_SUPPORTED;
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 70139d9d2e01..8da0e66ca22d 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -25,6 +25,7 @@
#include <linux/memory.h>
#include <linux/vmalloc.h>
#include <linux/set_memory.h>
+#include <linux/execmem.h>
#include <trace/syscall.h>
@@ -260,25 +261,14 @@ void arch_ftrace_update_code(int command)
/* Currently only x86_64 supports dynamic trampolines */
#ifdef CONFIG_X86_64
-#ifdef CONFIG_MODULES
-#include <linux/moduleloader.h>
-/* Module allocation simplifies allocating memory for code */
static inline void *alloc_tramp(unsigned long size)
{
- return module_alloc(size);
+ return execmem_alloc(EXECMEM_FTRACE, size);
}
static inline void tramp_free(void *tramp)
{
- module_memfree(tramp);
+ execmem_free(tramp);
}
-#else
-/* Trampolines can only be created if modules are supported */
-static inline void *alloc_tramp(unsigned long size)
-{
- return NULL;
-}
-static inline void tramp_free(void *tramp) { }
-#endif
/* Defined as markers to the end of the ftrace default trampolines */
extern void ftrace_regs_caller_end(void);
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index b50f3641c4d6..2e42056d2306 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -44,9 +44,6 @@
#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id
-
-#define SIZEOF_PTREGS 17*4
-
/*
* Worst-case size of the kernel mapping we need to make:
* a relocatable kernel can live anywhere in lowmem, so we need to be able
@@ -488,19 +485,13 @@ SYM_DATA_END(initial_page_table)
.data
.balign 4
-/*
- * The SIZEOF_PTREGS gap is a convention which helps the in-kernel unwinder
- * reliably detect the end of the stack.
- */
-SYM_DATA(initial_stack,
- .long init_thread_union + THREAD_SIZE -
- SIZEOF_PTREGS - TOP_OF_KERNEL_STACK_PADDING)
+SYM_DATA(initial_stack, .long __top_init_kernel_stack)
__INITRODATA
int_msg:
.asciz "Unknown interrupt or fault at: %p %p %p\n"
-#include "../../x86/xen/xen-head.S"
+#include "../xen/xen-head.S"
/*
* The IDT and GDT 'descriptors' are a strange 48-bit object
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index d8198fbd70e5..330922b328bf 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -66,7 +66,7 @@ SYM_CODE_START_NOALIGN(startup_64)
mov %rsi, %r15
/* Set up the stack for verify_cpu() */
- leaq (__end_init_task - TOP_OF_KERNEL_STACK_PADDING - PTREGS_SIZE)(%rip), %rsp
+ leaq __top_init_kernel_stack(%rip), %rsp
/* Setup GSBASE to allow stack canary access for C code */
movl $MSR_GS_BASE, %ecx
@@ -720,7 +720,7 @@ SYM_DATA(smpboot_control, .long 0)
SYM_DATA(phys_base, .quad 0x0)
EXPORT_SYMBOL(phys_base)
-#include "../../x86/xen/xen-head.S"
+#include "../xen/xen-head.S"
__PAGE_ALIGNED_BSS
SYM_DATA_START_PAGE_ALIGNED(empty_zero_page)
diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
index fc37c8d83daf..f445bec516a0 100644
--- a/arch/x86/kernel/idt.c
+++ b/arch/x86/kernel/idt.c
@@ -163,6 +163,9 @@ static const __initconst struct idt_data apic_idts[] = {
# endif
INTG(SPURIOUS_APIC_VECTOR, asm_sysvec_spurious_apic_interrupt),
INTG(ERROR_APIC_VECTOR, asm_sysvec_error_interrupt),
+# ifdef CONFIG_X86_POSTED_MSI
+ INTG(POSTED_MSI_NOTIFICATION_VECTOR, asm_sysvec_posted_msi_notification),
+# endif
#endif
};
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 35fde0107901..385e3a5fc304 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -22,6 +22,8 @@
#include <asm/desc.h>
#include <asm/traps.h>
#include <asm/thermal.h>
+#include <asm/posted_intr.h>
+#include <asm/irq_remapping.h>
#define CREATE_TRACE_POINTS
#include <asm/trace/irq_vectors.h>
@@ -182,6 +184,13 @@ int arch_show_interrupts(struct seq_file *p, int prec)
irq_stats(j)->kvm_posted_intr_wakeup_ipis);
seq_puts(p, " Posted-interrupt wakeup event\n");
#endif
+#ifdef CONFIG_X86_POSTED_MSI
+ seq_printf(p, "%*s: ", prec, "PMN");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ",
+ irq_stats(j)->posted_msi_notification_count);
+ seq_puts(p, " Posted MSI notification event\n");
+#endif
return 0;
}
@@ -240,24 +249,16 @@ static __always_inline void handle_irq(struct irq_desc *desc,
__handle_irq(desc, regs);
}
-/*
- * common_interrupt() handles all normal device IRQ's (the special SMP
- * cross-CPU interrupts have their own entry points).
- */
-DEFINE_IDTENTRY_IRQ(common_interrupt)
+static __always_inline int call_irq_handler(int vector, struct pt_regs *regs)
{
- struct pt_regs *old_regs = set_irq_regs(regs);
struct irq_desc *desc;
-
- /* entry code tells RCU that we're not quiescent. Check it. */
- RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
+ int ret = 0;
desc = __this_cpu_read(vector_irq[vector]);
if (likely(!IS_ERR_OR_NULL(desc))) {
handle_irq(desc, regs);
} else {
- apic_eoi();
-
+ ret = -EINVAL;
if (desc == VECTOR_UNUSED) {
pr_emerg_ratelimited("%s: %d.%u No irq handler for vector\n",
__func__, smp_processor_id(),
@@ -267,6 +268,23 @@ DEFINE_IDTENTRY_IRQ(common_interrupt)
}
}
+ return ret;
+}
+
+/*
+ * common_interrupt() handles all normal device IRQ's (the special SMP
+ * cross-CPU interrupts have their own entry points).
+ */
+DEFINE_IDTENTRY_IRQ(common_interrupt)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ /* entry code tells RCU that we're not quiescent. Check it. */
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
+
+ if (unlikely(call_irq_handler(vector, regs)))
+ apic_eoi();
+
set_irq_regs(old_regs);
}
@@ -334,12 +352,139 @@ DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_nested_ipi)
}
#endif
+#ifdef CONFIG_X86_POSTED_MSI
+
+/* Posted Interrupt Descriptors for coalesced MSIs to be posted */
+DEFINE_PER_CPU_ALIGNED(struct pi_desc, posted_msi_pi_desc);
+
+void intel_posted_msi_init(void)
+{
+ u32 destination;
+ u32 apic_id;
+
+ this_cpu_write(posted_msi_pi_desc.nv, POSTED_MSI_NOTIFICATION_VECTOR);
+
+ /*
+ * APIC destination ID is stored in bit 8:15 while in XAPIC mode.
+ * VT-d spec. CH 9.11
+ */
+ apic_id = this_cpu_read(x86_cpu_to_apicid);
+ destination = x2apic_enabled() ? apic_id : apic_id << 8;
+ this_cpu_write(posted_msi_pi_desc.ndst, destination);
+}
+
+/*
+ * De-multiplexing posted interrupts is on the performance path, the code
+ * below is written to optimize the cache performance based on the following
+ * considerations:
+ * 1.Posted interrupt descriptor (PID) fits in a cache line that is frequently
+ * accessed by both CPU and IOMMU.
+ * 2.During posted MSI processing, the CPU needs to do 64-bit read and xchg
+ * for checking and clearing posted interrupt request (PIR), a 256 bit field
+ * within the PID.
+ * 3.On the other side, the IOMMU does atomic swaps of the entire PID cache
+ * line when posting interrupts and setting control bits.
+ * 4.The CPU can access the cache line a magnitude faster than the IOMMU.
+ * 5.Each time the IOMMU does interrupt posting to the PIR will evict the PID
+ * cache line. The cache line states after each operation are as follows:
+ * CPU IOMMU PID Cache line state
+ * ---------------------------------------------------------------
+ *...read64 exclusive
+ *...lock xchg64 modified
+ *... post/atomic swap invalid
+ *...-------------------------------------------------------------
+ *
+ * To reduce L1 data cache miss, it is important to avoid contention with
+ * IOMMU's interrupt posting/atomic swap. Therefore, a copy of PIR is used
+ * to dispatch interrupt handlers.
+ *
+ * In addition, the code is trying to keep the cache line state consistent
+ * as much as possible. e.g. when making a copy and clearing the PIR
+ * (assuming non-zero PIR bits are present in the entire PIR), it does:
+ * read, read, read, read, xchg, xchg, xchg, xchg
+ * instead of:
+ * read, xchg, read, xchg, read, xchg, read, xchg
+ */
+static __always_inline bool handle_pending_pir(u64 *pir, struct pt_regs *regs)
+{
+ int i, vec = FIRST_EXTERNAL_VECTOR;
+ unsigned long pir_copy[4];
+ bool handled = false;
+
+ for (i = 0; i < 4; i++)
+ pir_copy[i] = pir[i];
+
+ for (i = 0; i < 4; i++) {
+ if (!pir_copy[i])
+ continue;
+
+ pir_copy[i] = arch_xchg(&pir[i], 0);
+ handled = true;
+ }
+
+ if (handled) {
+ for_each_set_bit_from(vec, pir_copy, FIRST_SYSTEM_VECTOR)
+ call_irq_handler(vec, regs);
+ }
+
+ return handled;
+}
+
+/*
+ * Performance data shows that 3 is good enough to harvest 90+% of the benefit
+ * on high IRQ rate workload.
+ */
+#define MAX_POSTED_MSI_COALESCING_LOOP 3
+
+/*
+ * For MSIs that are delivered as posted interrupts, the CPU notifications
+ * can be coalesced if the MSIs arrive in high frequency bursts.
+ */
+DEFINE_IDTENTRY_SYSVEC(sysvec_posted_msi_notification)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ struct pi_desc *pid;
+ int i = 0;
+
+ pid = this_cpu_ptr(&posted_msi_pi_desc);
+
+ inc_irq_stat(posted_msi_notification_count);
+ irq_enter();
+
+ /*
+ * Max coalescing count includes the extra round of handle_pending_pir
+ * after clearing the outstanding notification bit. Hence, at most
+ * MAX_POSTED_MSI_COALESCING_LOOP - 1 loops are executed here.
+ */
+ while (++i < MAX_POSTED_MSI_COALESCING_LOOP) {
+ if (!handle_pending_pir(pid->pir64, regs))
+ break;
+ }
+
+ /*
+ * Clear outstanding notification bit to allow new IRQ notifications,
+ * do this last to maximize the window of interrupt coalescing.
+ */
+ pi_clear_on(pid);
+
+ /*
+ * There could be a race of PI notification and the clearing of ON bit,
+ * process PIR bits one last time such that handling the new interrupts
+ * are not delayed until the next IRQ.
+ */
+ handle_pending_pir(pid->pir64, regs);
+
+ apic_eoi();
+ irq_exit();
+ set_irq_regs(old_regs);
+}
+#endif /* X86_POSTED_MSI */
#ifdef CONFIG_HOTPLUG_CPU
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
void fixup_irqs(void)
{
- unsigned int irr, vector;
+ unsigned int vector;
struct irq_desc *desc;
struct irq_data *data;
struct irq_chip *chip;
@@ -366,8 +511,7 @@ void fixup_irqs(void)
if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector])))
continue;
- irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
- if (irr & (1 << (vector % 32))) {
+ if (is_vector_pending(vector)) {
desc = __this_cpu_read(vector_irq[vector]);
raw_spin_lock(&desc->lock);
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index fe0c859873d1..ade0043ce56e 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -18,6 +18,7 @@
#include <linux/uaccess.h>
#include <linux/smp.h>
#include <linux/sched/task_stack.h>
+#include <linux/vmalloc.h>
#include <asm/cpu_entry_area.h>
#include <asm/softirq_stack.h>
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index d0e49bd7c6f3..72e6a45e7ec2 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -40,12 +40,12 @@
#include <linux/kgdb.h>
#include <linux/ftrace.h>
#include <linux/kasan.h>
-#include <linux/moduleloader.h>
#include <linux/objtool.h>
#include <linux/vmalloc.h>
#include <linux/pgtable.h>
#include <linux/set_memory.h>
#include <linux/cfi.h>
+#include <linux/execmem.h>
#include <asm/text-patching.h>
#include <asm/cacheflush.h>
@@ -495,7 +495,7 @@ void *alloc_insn_page(void)
{
void *page;
- page = module_alloc(PAGE_SIZE);
+ page = execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE);
if (!page)
return NULL;
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
index dd2ec14adb77..15af7e98e161 100644
--- a/arch/x86/kernel/kprobes/ftrace.c
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -21,6 +21,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct kprobe_ctlblk *kcb;
int bit;
+ if (unlikely(kprobe_ftrace_disabled))
+ return;
+
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
return;
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 4cadfd606e8e..263f8aed4e2c 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -44,7 +44,7 @@
#include <asm/svm.h>
#include <asm/e820/api.h>
-DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
+DEFINE_STATIC_KEY_FALSE_RO(kvm_async_pf_enabled);
static int kvmapf = 1;
@@ -65,6 +65,7 @@ static int __init parse_no_stealacc(char *arg)
early_param("no-steal-acc", parse_no_stealacc);
+static DEFINE_PER_CPU_READ_MOSTLY(bool, async_pf_enabled);
static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
static int has_steal_clock = 0;
@@ -244,7 +245,7 @@ noinstr u32 kvm_read_and_reset_apf_flags(void)
{
u32 flags = 0;
- if (__this_cpu_read(apf_reason.enabled)) {
+ if (__this_cpu_read(async_pf_enabled)) {
flags = __this_cpu_read(apf_reason.flags);
__this_cpu_write(apf_reason.flags, 0);
}
@@ -295,7 +296,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
inc_irq_stat(irq_hv_callback_count);
- if (__this_cpu_read(apf_reason.enabled)) {
+ if (__this_cpu_read(async_pf_enabled)) {
token = __this_cpu_read(apf_reason.token);
kvm_async_pf_task_wake(token);
__this_cpu_write(apf_reason.token, 0);
@@ -362,7 +363,7 @@ static void kvm_guest_cpu_init(void)
wrmsrl(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR);
wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
- __this_cpu_write(apf_reason.enabled, 1);
+ __this_cpu_write(async_pf_enabled, true);
pr_debug("setup async PF for cpu %d\n", smp_processor_id());
}
@@ -383,11 +384,11 @@ static void kvm_guest_cpu_init(void)
static void kvm_pv_disable_apf(void)
{
- if (!__this_cpu_read(apf_reason.enabled))
+ if (!__this_cpu_read(async_pf_enabled))
return;
wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
- __this_cpu_write(apf_reason.enabled, 0);
+ __this_cpu_write(async_pf_enabled, false);
pr_debug("disable async PF for cpu %d\n", smp_processor_id());
}
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index e18914c0e38a..837450b6e882 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -36,57 +36,6 @@ do { \
} while (0)
#endif
-#ifdef CONFIG_RANDOMIZE_BASE
-static unsigned long module_load_offset;
-
-/* Mutex protects the module_load_offset. */
-static DEFINE_MUTEX(module_kaslr_mutex);
-
-static unsigned long int get_module_load_offset(void)
-{
- if (kaslr_enabled()) {
- mutex_lock(&module_kaslr_mutex);
- /*
- * Calculate the module_load_offset the first time this
- * code is called. Once calculated it stays the same until
- * reboot.
- */
- if (module_load_offset == 0)
- module_load_offset =
- get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
- mutex_unlock(&module_kaslr_mutex);
- }
- return module_load_offset;
-}
-#else
-static unsigned long int get_module_load_offset(void)
-{
- return 0;
-}
-#endif
-
-void *module_alloc(unsigned long size)
-{
- gfp_t gfp_mask = GFP_KERNEL;
- void *p;
-
- if (PAGE_ALIGN(size) > MODULES_LEN)
- return NULL;
-
- p = __vmalloc_node_range(size, MODULE_ALIGN,
- MODULES_VADDR + get_module_load_offset(),
- MODULES_END, gfp_mask, PAGE_KERNEL,
- VM_FLUSH_RESET_PERMS | VM_DEFER_KMEMLEAK,
- NUMA_NO_NODE, __builtin_return_address(0));
-
- if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
- vfree(p);
- return NULL;
- }
-
- return p;
-}
-
#ifdef CONFIG_X86_32
int apply_relocate(Elf32_Shdr *sechdrs,
const char *strtab,
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 9a5b372c706f..ed163c8c8604 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -580,7 +580,7 @@ EXPORT_SYMBOL_GPL(asm_exc_nmi_kvm_vmx);
static char *nmi_check_stall_msg[] = {
/* */
-/* +--------- nsp->idt_seq_snap & 0x1: CPU is in NMI handler. */
+/* +--------- nmi_seq & 0x1: CPU is currently in NMI handler. */
/* | +------ cpu_is_offline(cpu) */
/* | | +--- nsp->idt_calls_snap != atomic_long_read(&nsp->idt_calls): */
/* | | | NMI handler has been invoked. */
@@ -628,22 +628,26 @@ void nmi_backtrace_stall_check(const struct cpumask *btp)
nmi_seq = READ_ONCE(nsp->idt_nmi_seq);
if (nsp->idt_nmi_seq_snap + 1 == nmi_seq && (nmi_seq & 0x1)) {
msgp = "CPU entered NMI handler function, but has not exited";
- } else if ((nsp->idt_nmi_seq_snap & 0x1) != (nmi_seq & 0x1)) {
- msgp = "CPU is handling NMIs";
- } else {
- idx = ((nsp->idt_seq_snap & 0x1) << 2) |
+ } else if (nsp->idt_nmi_seq_snap == nmi_seq ||
+ nsp->idt_nmi_seq_snap + 1 == nmi_seq) {
+ idx = ((nmi_seq & 0x1) << 2) |
(cpu_is_offline(cpu) << 1) |
(nsp->idt_calls_snap != atomic_long_read(&nsp->idt_calls));
msgp = nmi_check_stall_msg[idx];
if (nsp->idt_ignored_snap != READ_ONCE(nsp->idt_ignored) && (idx & 0x1))
modp = ", but OK because ignore_nmis was set";
- if (nmi_seq & 0x1)
- msghp = " (CPU currently in NMI handler function)";
- else if (nsp->idt_nmi_seq_snap + 1 == nmi_seq)
+ if (nsp->idt_nmi_seq_snap + 1 == nmi_seq)
msghp = " (CPU exited one NMI handler function)";
+ else if (nmi_seq & 0x1)
+ msghp = " (CPU currently in NMI handler function)";
+ else
+ msghp = " (CPU was never in an NMI handler function)";
+ } else {
+ msgp = "CPU is handling NMIs";
}
- pr_alert("%s: CPU %d: %s%s%s, last activity: %lu jiffies ago.\n",
- __func__, cpu, msgp, modp, msghp, j - READ_ONCE(nsp->recv_jiffies));
+ pr_alert("%s: CPU %d: %s%s%s\n", __func__, cpu, msgp, modp, msghp);
+ pr_alert("%s: last activity: %lu jiffies ago.\n",
+ __func__, j - READ_ONCE(nsp->recv_jiffies));
}
}
diff --git a/arch/x86/kernel/probe_roms.c b/arch/x86/kernel/probe_roms.c
index 319fef37d9dc..cc2c34ba7228 100644
--- a/arch/x86/kernel/probe_roms.c
+++ b/arch/x86/kernel/probe_roms.c
@@ -203,16 +203,6 @@ void __init probe_roms(void)
unsigned char c;
int i;
- /*
- * The ROM memory range is not part of the e820 table and is therefore not
- * pre-validated by BIOS. The kernel page table maps the ROM region as encrypted
- * memory, and SNP requires encrypted memory to be validated before access.
- * Do that here.
- */
- snp_prep_memory(video_rom_resource.start,
- ((system_rom_resource.end + 1) - video_rom_resource.start),
- SNP_PAGE_STATE_PRIVATE);
-
/* video rom */
upper = adapter_rom_resources[0].start;
for (start = video_rom_resource.start; start < upper; start += 2048) {
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 7062b84dd467..6d3d20e3e43a 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -139,7 +139,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
log_lvl, d3, d6, d7);
}
- if (cpu_feature_enabled(X86_FEATURE_OSPKE))
+ if (cr4 & X86_CR4_PKE)
printk("%sPKRU: %08x\n", log_lvl, read_pkru());
}
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 2e7066980f3e..51a849a79c98 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -10,7 +10,6 @@
#include <asm/vsyscall.h>
#include <asm/x86_init.h>
#include <asm/time.h>
-#include <asm/intel-mid.h>
#include <asm/setup.h>
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index ef206500ed6f..05c5aa951da7 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -7,9 +7,9 @@
*/
#include <linux/acpi.h>
#include <linux/console.h>
+#include <linux/cpu.h>
#include <linux/crash_dump.h>
#include <linux/dma-map-ops.h>
-#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/ima.h>
#include <linux/init_ohci1394_dma.h>
@@ -36,6 +36,7 @@
#include <asm/bios_ebda.h>
#include <asm/bugs.h>
#include <asm/cacheinfo.h>
+#include <asm/coco.h>
#include <asm/cpu.h>
#include <asm/efi.h>
#include <asm/gart.h>
@@ -753,6 +754,22 @@ void __init setup_arch(char **cmdline_p)
boot_cpu_data.x86_phys_bits = MAX_PHYSMEM_BITS;
#endif
+#ifdef CONFIG_CMDLINE_BOOL
+#ifdef CONFIG_CMDLINE_OVERRIDE
+ strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+#else
+ if (builtin_cmdline[0]) {
+ /* append boot loader cmdline to builtin */
+ strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
+ strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+ strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+ }
+#endif
+#endif
+
+ strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
+ *cmdline_p = command_line;
+
/*
* If we have OLPC OFW, we might end up relocating the fixmap due to
* reserve_top(), so do this before touching the ioremap area.
@@ -832,22 +849,6 @@ void __init setup_arch(char **cmdline_p)
bss_resource.start = __pa_symbol(__bss_start);
bss_resource.end = __pa_symbol(__bss_stop)-1;
-#ifdef CONFIG_CMDLINE_BOOL
-#ifdef CONFIG_CMDLINE_OVERRIDE
- strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
-#else
- if (builtin_cmdline[0]) {
- /* append boot loader cmdline to builtin */
- strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
- strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
- strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
- }
-#endif
-#endif
-
- strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
- *cmdline_p = command_line;
-
/*
* x86_configure_nx() is called before parse_early_param() to detect
* whether hardware doesn't support NX (so that the early EHCI debug
@@ -902,7 +903,7 @@ void __init setup_arch(char **cmdline_p)
efi_init();
reserve_ibft_region();
- dmi_setup();
+ x86_init.resources.dmi_setup();
/*
* VMware detection requires dmi to be available, so this
@@ -992,6 +993,7 @@ void __init setup_arch(char **cmdline_p)
* memory size.
*/
mem_encrypt_setup_arch();
+ cc_random_init();
efi_fake_memmap();
efi_find_mirror();
@@ -1106,8 +1108,6 @@ void __init setup_arch(char **cmdline_p)
*/
arch_reserve_crashkernel();
- memblock_find_dma_reserve();
-
if (!early_xdbc_setup_hardware())
early_xdbc_register_console();
@@ -1217,3 +1217,10 @@ static int __init register_kernel_offset_dumper(void)
return 0;
}
__initcall(register_kernel_offset_dumper);
+
+#ifdef CONFIG_HOTPLUG_CPU
+bool arch_cpu_is_hotpluggable(int cpu)
+{
+ return cpu > 0;
+}
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
index 8b04958da5e7..b4f8fa0f722c 100644
--- a/arch/x86/kernel/sev-shared.c
+++ b/arch/x86/kernel/sev-shared.c
@@ -1203,12 +1203,14 @@ static enum es_result vc_check_opcode_bytes(struct es_em_ctxt *ctxt,
break;
case SVM_EXIT_MONITOR:
- if (opcode == 0x010f && modrm == 0xc8)
+ /* MONITOR and MONITORX instructions generate the same error code */
+ if (opcode == 0x010f && (modrm == 0xc8 || modrm == 0xfa))
return ES_OK;
break;
case SVM_EXIT_MWAIT:
- if (opcode == 0x010f && modrm == 0xc9)
+ /* MWAIT and MWAITX instructions generate the same error code */
+ if (opcode == 0x010f && (modrm == 0xc9 || modrm == 0xfb))
return ES_OK;
break;
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index b59b09c2f284..3342ed58e168 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/psp-sev.h>
+#include <linux/dmi.h>
#include <uapi/linux/sev-guest.h>
#include <asm/init.h>
@@ -647,7 +648,7 @@ static u64 __init get_secrets_page(void)
static u64 __init get_snp_jump_table_addr(void)
{
- struct snp_secrets_page_layout *layout;
+ struct snp_secrets_page *secrets;
void __iomem *mem;
u64 pa, addr;
@@ -661,9 +662,9 @@ static u64 __init get_snp_jump_table_addr(void)
return 0;
}
- layout = (__force struct snp_secrets_page_layout *)mem;
+ secrets = (__force struct snp_secrets_page *)mem;
- addr = layout->os_area.ap_jump_table_pa;
+ addr = secrets->os_area.ap_jump_table_pa;
iounmap(mem);
return addr;
@@ -795,21 +796,6 @@ void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr
early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_SHARED);
}
-void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op)
-{
- unsigned long vaddr, npages;
-
- vaddr = (unsigned long)__va(paddr);
- npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
-
- if (op == SNP_PAGE_STATE_PRIVATE)
- early_snp_set_memory_private(vaddr, paddr, npages);
- else if (op == SNP_PAGE_STATE_SHARED)
- early_snp_set_memory_shared(vaddr, paddr, npages);
- else
- WARN(1, "invalid memory op %d\n", op);
-}
-
static unsigned long __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
unsigned long vaddr_end, int op)
{
@@ -952,7 +938,7 @@ static int snp_set_vmsa(void *va, bool vmsa)
#define INIT_LDTR_ATTRIBS (SVM_SELECTOR_P_MASK | 2)
#define INIT_TR_ATTRIBS (SVM_SELECTOR_P_MASK | 3)
-static void *snp_alloc_vmsa_page(void)
+static void *snp_alloc_vmsa_page(int cpu)
{
struct page *p;
@@ -964,7 +950,7 @@ static void *snp_alloc_vmsa_page(void)
*
* Allocate an 8k page which is also 8k-aligned.
*/
- p = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO, 1);
+ p = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL_ACCOUNT | __GFP_ZERO, 1);
if (!p)
return NULL;
@@ -1033,7 +1019,7 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
* #VMEXIT of that vCPU would wipe out all of the settings being done
* here.
*/
- vmsa = (struct sev_es_save_area *)snp_alloc_vmsa_page();
+ vmsa = (struct sev_es_save_area *)snp_alloc_vmsa_page(cpu);
if (!vmsa)
return -ENOMEM;
@@ -1355,7 +1341,7 @@ static void __init alloc_runtime_data(int cpu)
{
struct sev_es_runtime_data *data;
- data = memblock_alloc(sizeof(*data), PAGE_SIZE);
+ data = memblock_alloc_node(sizeof(*data), PAGE_SIZE, cpu_to_node(cpu));
if (!data)
panic("Can't allocate SEV-ES runtime data");
@@ -2136,6 +2122,17 @@ void __head __noreturn snp_abort(void)
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
}
+/*
+ * SEV-SNP guests should only execute dmi_setup() if EFI_CONFIG_TABLES are
+ * enabled, as the alternative (fallback) logic for DMI probing in the legacy
+ * ROM region can cause a crash since this region is not pre-validated.
+ */
+void __init snp_dmi_setup(void)
+{
+ if (efi_enabled(EFI_CONFIG_TABLES))
+ dmi_setup();
+}
+
static void dump_cpuid_table(void)
{
const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
@@ -2287,16 +2284,6 @@ static int __init snp_init_platform_device(void)
}
device_initcall(snp_init_platform_device);
-void kdump_sev_callback(void)
-{
- /*
- * Do wbinvd() on remote CPUs when SNP is enabled in order to
- * safely do SNP_SHUTDOWN on the local CPU.
- */
- if (cpu_feature_enabled(X86_FEATURE_SEV_SNP))
- wbinvd();
-}
-
void sev_show_status(void)
{
int i;
diff --git a/arch/x86/kernel/shstk.c b/arch/x86/kernel/shstk.c
index 59e15dd8d0f8..6f1e9883f074 100644
--- a/arch/x86/kernel/shstk.c
+++ b/arch/x86/kernel/shstk.c
@@ -163,8 +163,8 @@ static int shstk_setup(void)
if (features_enabled(ARCH_SHSTK_SHSTK))
return 0;
- /* Also not supported for 32 bit and x32 */
- if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK) || in_32bit_syscall())
+ /* Also not supported for 32 bit */
+ if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK) || in_ia32_syscall())
return -EOPNOTSUPP;
size = adjust_shstk_size(0);
diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c
index c12624bc82a3..ef654530bf5a 100644
--- a/arch/x86/kernel/signal_32.c
+++ b/arch/x86/kernel/signal_32.c
@@ -34,7 +34,7 @@
#include <asm/gsseg.h>
#ifdef CONFIG_IA32_EMULATION
-#include <asm/ia32_unistd.h>
+#include <asm/unistd_32_ia32.h>
static inline void reload_segments(struct sigcontext_32 *sc)
{
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
index 23d8aaf8d9fd..8a94053c5444 100644
--- a/arch/x86/kernel/signal_64.c
+++ b/arch/x86/kernel/signal_64.c
@@ -315,6 +315,9 @@ int x32_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
uc_flags = frame_uc_flags(regs);
+ if (setup_signal_shadow_stack(ksig))
+ return -EFAULT;
+
if (!user_access_begin(frame, sizeof(*frame)))
return -EFAULT;
@@ -377,6 +380,9 @@ COMPAT_SYSCALL_DEFINE0(x32_rt_sigreturn)
if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags))
goto badframe;
+ if (restore_signal_shadow_stack())
+ goto badframe;
+
if (compat_restore_altstack(&frame->uc.uc_stack))
goto badframe;
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 76bb65045c64..0c35207320cb 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -438,9 +438,9 @@ static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
*/
static const struct x86_cpu_id intel_cod_cpu[] = {
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, 0), /* COD */
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, 0), /* COD */
- X86_MATCH_INTEL_FAM6_MODEL(ANY, 1), /* SNC */
+ X86_MATCH_VFM(INTEL_HASWELL_X, 0), /* COD */
+ X86_MATCH_VFM(INTEL_BROADWELL_X, 0), /* COD */
+ X86_MATCH_VFM(INTEL_ANY, 1), /* SNC */
{}
};
@@ -1033,20 +1033,22 @@ static __init void disable_smp(void)
void __init smp_prepare_cpus_common(void)
{
- unsigned int i;
+ unsigned int cpu, node;
/* Mark all except the boot CPU as hotpluggable */
- for_each_possible_cpu(i) {
- if (i)
- per_cpu(cpu_info.cpu_index, i) = nr_cpu_ids;
+ for_each_possible_cpu(cpu) {
+ if (cpu)
+ per_cpu(cpu_info.cpu_index, cpu) = nr_cpu_ids;
}
- for_each_possible_cpu(i) {
- zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
- zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
- zalloc_cpumask_var(&per_cpu(cpu_die_map, i), GFP_KERNEL);
- zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
- zalloc_cpumask_var(&per_cpu(cpu_l2c_shared_map, i), GFP_KERNEL);
+ for_each_possible_cpu(cpu) {
+ node = cpu_to_node(cpu);
+
+ zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), GFP_KERNEL, node);
+ zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), GFP_KERNEL, node);
+ zalloc_cpumask_var_node(&per_cpu(cpu_die_map, cpu), GFP_KERNEL, node);
+ zalloc_cpumask_var_node(&per_cpu(cpu_llc_shared_map, cpu), GFP_KERNEL, node);
+ zalloc_cpumask_var_node(&per_cpu(cpu_l2c_shared_map, cpu), GFP_KERNEL, node);
}
set_cpu_sibling_map(0);
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index cb9fa1d5c66f..01d7cd85ef97 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -112,13 +112,21 @@ static void find_start_end(unsigned long addr, unsigned long flags,
*end = task_size_64bit(addr > DEFAULT_MAP_WINDOW);
}
+static inline unsigned long stack_guard_placement(vm_flags_t vm_flags)
+{
+ if (vm_flags & VM_SHADOW_STACK)
+ return PAGE_SIZE;
+
+ return 0;
+}
+
unsigned long
-arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
unsigned long begin, end;
if (flags & MAP_FIXED)
@@ -137,12 +145,11 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
return addr;
}
- info.flags = 0;
info.length = len;
info.low_limit = begin;
info.high_limit = end;
- info.align_mask = 0;
info.align_offset = pgoff << PAGE_SHIFT;
+ info.start_gap = stack_guard_placement(vm_flags);
if (filp) {
info.align_mask = get_align_mask();
info.align_offset += get_align_bits();
@@ -151,14 +158,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
}
unsigned long
-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- const unsigned long len, const unsigned long pgoff,
- const unsigned long flags)
+arch_get_unmapped_area_topdown_vmflags(struct file *filp, unsigned long addr0,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags, vm_flags_t vm_flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
/* requested length too big for entire address space */
if (len > TASK_SIZE)
@@ -192,6 +199,7 @@ get_unmapped_area:
info.low_limit = PAGE_SIZE;
info.high_limit = get_mmap_base(0);
+ info.start_gap = stack_guard_placement(vm_flags);
/*
* If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
@@ -203,7 +211,6 @@ get_unmapped_area:
if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
- info.align_mask = 0;
info.align_offset = pgoff << PAGE_SHIFT;
if (filp) {
info.align_mask = get_align_mask();
@@ -223,3 +230,18 @@ bottomup:
*/
return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
}
+
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ return arch_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, 0);
+}
+
+unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
+ const unsigned long len, const unsigned long pgoff,
+ const unsigned long flags)
+{
+ return arch_get_unmapped_area_topdown_vmflags(filp, addr, len, pgoff, flags, 0);
+}
diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c
deleted file mode 100644
index d42c28b8bfd8..000000000000
--- a/arch/x86/kernel/topology.c
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Populate sysfs with topology information
- *
- * Written by: Matthew Dobson, IBM Corporation
- * Original Code: Paul Dorwin, IBM Corporation, Patrick Mochel, OSDL
- *
- * Copyright (C) 2002, IBM Corp.
- *
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Send feedback to <colpatch@us.ibm.com>
- */
-#include <linux/interrupt.h>
-#include <linux/nodemask.h>
-#include <linux/export.h>
-#include <linux/mmzone.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/irq.h>
-#include <asm/io_apic.h>
-#include <asm/cpu.h>
-
-#ifdef CONFIG_HOTPLUG_CPU
-bool arch_cpu_is_hotpluggable(int cpu)
-{
- return cpu > 0;
-}
-#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 5a69a49acc96..06b170759e5b 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -26,7 +26,7 @@
#include <asm/x86_init.h>
#include <asm/geode.h>
#include <asm/apic.h>
-#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#include <asm/i8259.h>
#include <asm/uv/uv.h>
@@ -44,7 +44,7 @@ EXPORT_SYMBOL(tsc_khz);
static int __read_mostly tsc_unstable;
static unsigned int __initdata tsc_early_khz;
-static DEFINE_STATIC_KEY_FALSE(__use_tsc);
+static DEFINE_STATIC_KEY_FALSE_RO(__use_tsc);
int tsc_clocksource_reliable;
@@ -682,7 +682,7 @@ unsigned long native_calibrate_tsc(void)
* clock.
*/
if (crystal_khz == 0 &&
- boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT_D)
+ boot_cpu_data.x86_vfm == INTEL_ATOM_GOLDMONT_D)
crystal_khz = 25000;
/*
@@ -713,7 +713,7 @@ unsigned long native_calibrate_tsc(void)
* For Atom SoCs TSC is the only reliable clocksource.
* Mark TSC reliable so no watchdog on it.
*/
- if (boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT)
+ if (boot_cpu_data.x86_vfm == INTEL_ATOM_GOLDMONT)
setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
index 6555a857a1e6..deeb02825670 100644
--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -147,13 +147,13 @@ static const struct freq_desc freq_desc_lgm = {
};
static const struct x86_cpu_id tsc_msr_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL_MID, &freq_desc_pnw),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL_TABLET,&freq_desc_clv),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &freq_desc_byt),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &freq_desc_tng),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &freq_desc_cht),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT_MID, &freq_desc_ann),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT_NP, &freq_desc_lgm),
+ X86_MATCH_VFM(INTEL_ATOM_SALTWELL_MID, &freq_desc_pnw),
+ X86_MATCH_VFM(INTEL_ATOM_SALTWELL_TABLET, &freq_desc_clv),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &freq_desc_byt),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &freq_desc_tng),
+ X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &freq_desc_cht),
+ X86_MATCH_VFM(INTEL_ATOM_AIRMONT_MID, &freq_desc_ann),
+ X86_MATCH_VFM(INTEL_ATOM_AIRMONT_NP, &freq_desc_lgm),
{}
};
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 1123ef3ccf90..4334033658ed 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -193,11 +193,9 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu)
cur->warned = false;
/*
- * If a non-zero TSC value for socket 0 may be valid then the default
- * adjusted value cannot assumed to be zero either.
+ * The default adjust value cannot be assumed to be zero on any socket.
*/
- if (tsc_async_resets)
- cur->adjusted = bootval;
+ cur->adjusted = bootval;
/*
* Check whether this CPU is the first in a package to come up. In
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 56451fd2099e..3509afc6a672 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -15,11 +15,7 @@
* put it inside the section definition.
*/
-#ifdef CONFIG_X86_32
-#define LOAD_OFFSET __PAGE_OFFSET
-#else
#define LOAD_OFFSET __START_KERNEL_map
-#endif
#define RUNTIME_DISCARD_EXIT
#define EMITS_PT_NOTE
@@ -114,11 +110,10 @@ PHDRS {
SECTIONS
{
+ . = __START_KERNEL;
#ifdef CONFIG_X86_32
- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET);
#else
- . = __START_KERNEL;
phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET);
#endif
@@ -172,6 +167,9 @@ SECTIONS
/* init_task */
INIT_TASK_DATA(THREAD_SIZE)
+ /* equivalent to task_pt_regs(&init_task) */
+ __top_init_kernel_stack = __end_init_stack - TOP_OF_KERNEL_STACK_PADDING - PTREGS_SIZE;
+
#ifdef CONFIG_X86_32
/* 32 bit has nosave before _edata */
NOSAVE_DATA
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index a42830dc151b..d5dc5a92635a 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -3,6 +3,7 @@
*
* For licencing details see kernel-base/COPYING
*/
+#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/export.h>
@@ -66,6 +67,7 @@ struct x86_init_ops x86_init __initdata = {
.probe_roms = probe_roms,
.reserve_resources = reserve_standard_io_resources,
.memory_setup = e820__memory_setup_default,
+ .dmi_setup = dmi_setup,
},
.mpparse = {
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 3aaf7e86a859..d64fb2b3eb69 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -95,6 +95,19 @@ config KVM_INTEL
To compile this as a module, choose M here: the module
will be called kvm-intel.
+config KVM_INTEL_PROVE_VE
+ bool "Check that guests do not receive #VE exceptions"
+ default KVM_PROVE_MMU || DEBUG_KERNEL
+ depends on KVM_INTEL
+ help
+
+ Checks that KVM's page table management code will not incorrectly
+ let guests receive a virtualization exception. Virtualization
+ exceptions will be trapped by the hypervisor rather than injected
+ in the guest.
+
+ If unsure, say N.
+
config X86_SGX_KVM
bool "Software Guard eXtensions (SGX) Virtualization"
depends on X86_SGX && KVM_INTEL
@@ -122,6 +135,7 @@ config KVM_AMD_SEV
default y
depends on KVM_AMD && X86_64
depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m)
+ select ARCH_HAS_CC_PLATFORM
help
Provides support for launching Encrypted VMs (SEV) and Encrypted VMs
with Encrypted State (SEV-ES) on AMD processors.
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index a88bb14266b6..5494669a055a 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -3,11 +3,6 @@
ccflags-y += -I $(srctree)/arch/x86/kvm
ccflags-$(CONFIG_KVM_WERROR) += -Werror
-ifeq ($(CONFIG_FRAME_POINTER),y)
-OBJECT_FILES_NON_STANDARD_vmx/vmenter.o := y
-OBJECT_FILES_NON_STANDARD_svm/vmenter.o := y
-endif
-
include $(srctree)/virt/kvm/Makefile.kvm
kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \
@@ -21,14 +16,15 @@ kvm-$(CONFIG_KVM_XEN) += xen.o
kvm-$(CONFIG_KVM_SMM) += smm.o
kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \
- vmx/nested.o vmx/posted_intr.o
+ vmx/nested.o vmx/posted_intr.o vmx/main.o
kvm-intel-$(CONFIG_X86_SGX_KVM) += vmx/sgx.o
kvm-intel-$(CONFIG_KVM_HYPERV) += vmx/hyperv.o vmx/hyperv_evmcs.o
-kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o \
- svm/sev.o
-kvm-amd-$(CONFIG_KVM_HYPERV) += svm/hyperv.o
+kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o
+
+kvm-amd-$(CONFIG_KVM_AMD_SEV) += svm/sev.o
+kvm-amd-$(CONFIG_KVM_HYPERV) += svm/hyperv.o
ifdef CONFIG_HYPERV
kvm-y += kvm_onhyperv.o
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index adba49afb5fe..f2f2be5d1141 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -189,15 +189,15 @@ static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2
return 0;
}
-static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcpu,
- const char *sig)
+static struct kvm_hypervisor_cpuid __kvm_get_hypervisor_cpuid(struct kvm_cpuid_entry2 *entries,
+ int nent, const char *sig)
{
struct kvm_hypervisor_cpuid cpuid = {};
struct kvm_cpuid_entry2 *entry;
u32 base;
for_each_possible_hypervisor_cpuid_base(base) {
- entry = kvm_find_cpuid_entry(vcpu, base);
+ entry = cpuid_entry2_find(entries, nent, base, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
if (entry) {
u32 signature[3];
@@ -217,22 +217,29 @@ static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcp
return cpuid;
}
-static struct kvm_cpuid_entry2 *__kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu,
- struct kvm_cpuid_entry2 *entries, int nent)
+static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcpu,
+ const char *sig)
{
- u32 base = vcpu->arch.kvm_cpuid.base;
-
- if (!base)
- return NULL;
+ return __kvm_get_hypervisor_cpuid(vcpu->arch.cpuid_entries,
+ vcpu->arch.cpuid_nent, sig);
+}
- return cpuid_entry2_find(entries, nent, base | KVM_CPUID_FEATURES,
+static struct kvm_cpuid_entry2 *__kvm_find_kvm_cpuid_features(struct kvm_cpuid_entry2 *entries,
+ int nent, u32 kvm_cpuid_base)
+{
+ return cpuid_entry2_find(entries, nent, kvm_cpuid_base | KVM_CPUID_FEATURES,
KVM_CPUID_INDEX_NOT_SIGNIFICANT);
}
static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
{
- return __kvm_find_kvm_cpuid_features(vcpu, vcpu->arch.cpuid_entries,
- vcpu->arch.cpuid_nent);
+ u32 base = vcpu->arch.kvm_cpuid.base;
+
+ if (!base)
+ return NULL;
+
+ return __kvm_find_kvm_cpuid_features(vcpu->arch.cpuid_entries,
+ vcpu->arch.cpuid_nent, base);
}
void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
@@ -266,6 +273,7 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
int nent)
{
struct kvm_cpuid_entry2 *best;
+ struct kvm_hypervisor_cpuid kvm_cpuid;
best = cpuid_entry2_find(entries, nent, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
if (best) {
@@ -292,10 +300,12 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
- best = __kvm_find_kvm_cpuid_features(vcpu, entries, nent);
- if (kvm_hlt_in_guest(vcpu->kvm) && best &&
- (best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
- best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
+ kvm_cpuid = __kvm_get_hypervisor_cpuid(entries, nent, KVM_SIGNATURE);
+ if (kvm_cpuid.base) {
+ best = __kvm_find_kvm_cpuid_features(entries, nent, kvm_cpuid.base);
+ if (kvm_hlt_in_guest(vcpu->kvm) && best)
+ best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
+ }
if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
best = cpuid_entry2_find(entries, nent, 0x1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
@@ -366,6 +376,7 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
kvm_update_pv_runtime(vcpu);
+ vcpu->arch.is_amd_compatible = guest_cpuid_is_amd_or_hygon(vcpu);
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
@@ -761,7 +772,7 @@ void kvm_set_cpu_caps(void)
kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0);
kvm_cpu_cap_mask(CPUID_8000_001F_EAX,
- 0 /* SME */ | F(SEV) | 0 /* VM_PAGE_FLUSH */ | F(SEV_ES) |
+ 0 /* SME */ | 0 /* SEV */ | 0 /* VM_PAGE_FLUSH */ | 0 /* SEV_ES */ |
F(SME_COHERENT));
kvm_cpu_cap_mask(CPUID_8000_0021_EAX,
@@ -1221,9 +1232,22 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->eax = entry->ebx = entry->ecx = 0;
break;
case 0x80000008: {
- unsigned g_phys_as = (entry->eax >> 16) & 0xff;
- unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
- unsigned phys_as = entry->eax & 0xff;
+ /*
+ * GuestPhysAddrSize (EAX[23:16]) is intended for software
+ * use.
+ *
+ * KVM's ABI is to report the effective MAXPHYADDR for the
+ * guest in PhysAddrSize (phys_as), and the maximum
+ * *addressable* GPA in GuestPhysAddrSize (g_phys_as).
+ *
+ * GuestPhysAddrSize is valid if and only if TDP is enabled,
+ * in which case the max GPA that can be addressed by KVM may
+ * be less than the max GPA that can be legally generated by
+ * the guest, e.g. if MAXPHYADDR>48 but the CPU doesn't
+ * support 5-level TDP.
+ */
+ unsigned int virt_as = max((entry->eax >> 8) & 0xff, 48U);
+ unsigned int phys_as, g_phys_as;
/*
* If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as
@@ -1231,16 +1255,24 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
* reductions in MAXPHYADDR for memory encryption affect shadow
* paging, too.
*
- * If TDP is enabled but an explicit guest MAXPHYADDR is not
- * provided, use the raw bare metal MAXPHYADDR as reductions to
- * the HPAs do not affect GPAs.
+ * If TDP is enabled, use the raw bare metal MAXPHYADDR as
+ * reductions to the HPAs do not affect GPAs. The max
+ * addressable GPA is the same as the max effective GPA, except
+ * that it's capped at 48 bits if 5-level TDP isn't supported
+ * (hardware processes bits 51:48 only when walking the fifth
+ * level page table).
*/
- if (!tdp_enabled)
- g_phys_as = boot_cpu_data.x86_phys_bits;
- else if (!g_phys_as)
+ if (!tdp_enabled) {
+ phys_as = boot_cpu_data.x86_phys_bits;
+ g_phys_as = 0;
+ } else {
+ phys_as = entry->eax & 0xff;
g_phys_as = phys_as;
+ if (kvm_mmu_get_max_tdp_level() < 5)
+ g_phys_as = min(g_phys_as, 48);
+ }
- entry->eax = g_phys_as | (virt_as << 8);
+ entry->eax = phys_as | (virt_as << 8) | (g_phys_as << 16);
entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8));
entry->edx = 0;
cpuid_entry_override(entry, CPUID_8000_0008_EBX);
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 856e3037e74f..23dbb9eb277c 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -120,6 +120,16 @@ static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu)
return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
}
+static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.is_amd_compatible;
+}
+
+static inline bool guest_cpuid_is_intel_compatible(struct kvm_vcpu *vcpu)
+{
+ return !guest_cpuid_is_amd_compatible(vcpu);
+}
+
static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
index 5382646162a3..29ea4313e1bb 100644
--- a/arch/x86/kvm/kvm_emulate.h
+++ b/arch/x86/kvm/kvm_emulate.h
@@ -26,6 +26,7 @@ struct x86_exception {
bool nested_page_fault;
u64 address; /* cr2 or nested page fault gpa */
u8 async_page_fault;
+ unsigned long exit_qualification;
};
/*
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index cf37586f0466..ebf41023be38 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2776,7 +2776,8 @@ int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
- if (r && lvt_type == APIC_LVTPC)
+ if (r && lvt_type == APIC_LVTPC &&
+ guest_cpuid_is_intel_compatible(apic->vcpu))
kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
return r;
}
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 60f21bb4c27b..2e454316f2a2 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -100,6 +100,8 @@ static inline u8 kvm_get_shadow_phys_bits(void)
return boot_cpu_data.x86_phys_bits;
}
+u8 kvm_mmu_get_max_tdp_level(void);
+
void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask);
void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask);
void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only);
@@ -213,7 +215,7 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
*/
u64 implicit_access = access & PFERR_IMPLICIT_ACCESS;
bool not_smap = ((rflags & X86_EFLAGS_AC) | implicit_access) == X86_EFLAGS_AC;
- int index = (pfec + (not_smap << PFERR_RSVD_BIT)) >> 1;
+ int index = (pfec | (not_smap ? PFERR_RSVD_MASK : 0)) >> 1;
u32 errcode = PFERR_PRESENT_MASK;
bool fault;
@@ -234,8 +236,7 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
/* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
- offset = (pfec & ~1) +
- ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
+ offset = (pfec & ~1) | ((pte_access & PT_USER_MASK) ? PFERR_RSVD_MASK : 0);
pkru_bits &= mmu->pkru_mask >> offset;
errcode |= -pkru_bits & PFERR_PK_MASK;
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 992e651540e8..662f62dfb2aa 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -432,8 +432,8 @@ static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
* The idea using the light way get the spte on x86_32 guest is from
* gup_get_pte (mm/gup.c).
*
- * An spte tlb flush may be pending, because kvm_set_pte_rmap
- * coalesces them and we are running out of the MMU lock. Therefore
+ * An spte tlb flush may be pending, because they are coalesced and
+ * we are running out of the MMU lock. Therefore
* we need to protect against in-progress updates of the spte.
*
* Reading the spte while an update is in progress may get the old value
@@ -567,9 +567,9 @@ static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
if (!is_shadow_present_pte(old_spte) ||
!spte_has_volatile_bits(old_spte))
- __update_clear_spte_fast(sptep, 0ull);
+ __update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE);
else
- old_spte = __update_clear_spte_slow(sptep, 0ull);
+ old_spte = __update_clear_spte_slow(sptep, SHADOW_NONPRESENT_VALUE);
if (!is_shadow_present_pte(old_spte))
return old_spte;
@@ -603,7 +603,7 @@ static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
*/
static void mmu_spte_clear_no_track(u64 *sptep)
{
- __update_clear_spte_fast(sptep, 0ull);
+ __update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE);
}
static u64 mmu_spte_get_lockless(u64 *sptep)
@@ -831,6 +831,15 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
gfn_t gfn;
kvm->arch.indirect_shadow_pages++;
+ /*
+ * Ensure indirect_shadow_pages is elevated prior to re-reading guest
+ * child PTEs in FNAME(gpte_changed), i.e. guarantee either in-flight
+ * emulated writes are visible before re-reading guest PTEs, or that
+ * an emulated write will see the elevated count and acquire mmu_lock
+ * to update SPTEs. Pairs with the smp_mb() in kvm_mmu_track_write().
+ */
+ smp_mb();
+
gfn = sp->gfn;
slots = kvm_memslots_for_spte_role(kvm, sp->role);
slot = __gfn_to_memslot(slots, gfn);
@@ -1448,49 +1457,11 @@ static bool __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
}
static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
- struct kvm_memory_slot *slot, gfn_t gfn, int level,
- pte_t unused)
+ struct kvm_memory_slot *slot, gfn_t gfn, int level)
{
return __kvm_zap_rmap(kvm, rmap_head, slot);
}
-static bool kvm_set_pte_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
- struct kvm_memory_slot *slot, gfn_t gfn, int level,
- pte_t pte)
-{
- u64 *sptep;
- struct rmap_iterator iter;
- bool need_flush = false;
- u64 new_spte;
- kvm_pfn_t new_pfn;
-
- WARN_ON_ONCE(pte_huge(pte));
- new_pfn = pte_pfn(pte);
-
-restart:
- for_each_rmap_spte(rmap_head, &iter, sptep) {
- need_flush = true;
-
- if (pte_write(pte)) {
- kvm_zap_one_rmap_spte(kvm, rmap_head, sptep);
- goto restart;
- } else {
- new_spte = kvm_mmu_changed_pte_notifier_make_spte(
- *sptep, new_pfn);
-
- mmu_spte_clear_track_bits(kvm, sptep);
- mmu_spte_set(sptep, new_spte);
- }
- }
-
- if (need_flush && kvm_available_flush_remote_tlbs_range()) {
- kvm_flush_remote_tlbs_gfn(kvm, gfn, level);
- return false;
- }
-
- return need_flush;
-}
-
struct slot_rmap_walk_iterator {
/* input fields. */
const struct kvm_memory_slot *slot;
@@ -1562,7 +1533,7 @@ static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot, gfn_t gfn,
- int level, pte_t pte);
+ int level);
static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
struct kvm_gfn_range *range,
@@ -1574,7 +1545,7 @@ static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
range->start, range->end - 1, &iterator)
ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
- iterator.level, range->arg.pte);
+ iterator.level);
return ret;
}
@@ -1596,22 +1567,8 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
return flush;
}
-bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
-{
- bool flush = false;
-
- if (kvm_memslots_have_rmaps(kvm))
- flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmap);
-
- if (tdp_mmu_enabled)
- flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range);
-
- return flush;
-}
-
static bool kvm_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
- struct kvm_memory_slot *slot, gfn_t gfn, int level,
- pte_t unused)
+ struct kvm_memory_slot *slot, gfn_t gfn, int level)
{
u64 *sptep;
struct rmap_iterator iter;
@@ -1624,8 +1581,7 @@ static bool kvm_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
}
static bool kvm_test_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
- struct kvm_memory_slot *slot, gfn_t gfn,
- int level, pte_t unused)
+ struct kvm_memory_slot *slot, gfn_t gfn, int level)
{
u64 *sptep;
struct rmap_iterator iter;
@@ -1950,7 +1906,8 @@ static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
static int kvm_sync_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i)
{
- if (!sp->spt[i])
+ /* sp->spt[i] has initial value of shadow page table allocation */
+ if (sp->spt[i] == SHADOW_NONPRESENT_VALUE)
return 0;
return vcpu->arch.mmu->sync_spte(vcpu, sp, i);
@@ -2514,7 +2471,7 @@ static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
return kvm_mmu_prepare_zap_page(kvm, child,
invalid_list);
}
- } else if (is_mmio_spte(pte)) {
+ } else if (is_mmio_spte(kvm, pte)) {
mmu_spte_clear_no_track(spte);
}
return 0;
@@ -3314,9 +3271,19 @@ static int kvm_handle_noslot_fault(struct kvm_vcpu *vcpu,
{
gva_t gva = fault->is_tdp ? 0 : fault->addr;
+ if (fault->is_private) {
+ kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
+ return -EFAULT;
+ }
+
vcpu_cache_mmio_info(vcpu, gva, fault->gfn,
access & shadow_mmio_access_mask);
+ fault->slot = NULL;
+ fault->pfn = KVM_PFN_NOSLOT;
+ fault->map_writable = false;
+ fault->hva = KVM_HVA_ERR_BAD;
+
/*
* If MMIO caching is disabled, emulate immediately without
* touching the shadow page tables as attempting to install an
@@ -4196,7 +4163,7 @@ static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
if (WARN_ON_ONCE(reserved))
return -EINVAL;
- if (is_mmio_spte(spte)) {
+ if (is_mmio_spte(vcpu->kvm, spte)) {
gfn_t gfn = get_mmio_spte_gfn(spte);
unsigned int access = get_mmio_spte_access(spte);
@@ -4259,24 +4226,28 @@ static u32 alloc_apf_token(struct kvm_vcpu *vcpu)
return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
}
-static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
- gfn_t gfn)
+static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu,
+ struct kvm_page_fault *fault)
{
struct kvm_arch_async_pf arch;
arch.token = alloc_apf_token(vcpu);
- arch.gfn = gfn;
+ arch.gfn = fault->gfn;
+ arch.error_code = fault->error_code;
arch.direct_map = vcpu->arch.mmu->root_role.direct;
arch.cr3 = kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu);
- return kvm_setup_async_pf(vcpu, cr2_or_gpa,
- kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
+ return kvm_setup_async_pf(vcpu, fault->addr,
+ kvm_vcpu_gfn_to_hva(vcpu, fault->gfn), &arch);
}
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
{
int r;
+ if (WARN_ON_ONCE(work->arch.error_code & PFERR_PRIVATE_ACCESS))
+ return;
+
if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) ||
work->wakeup_all)
return;
@@ -4289,7 +4260,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
work->arch.cr3 != kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu))
return;
- kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true, NULL);
+ kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, work->arch.error_code, true, NULL);
}
static inline u8 kvm_max_level_for_order(int order)
@@ -4309,14 +4280,6 @@ static inline u8 kvm_max_level_for_order(int order)
return PG_LEVEL_4K;
}
-static void kvm_mmu_prepare_memory_fault_exit(struct kvm_vcpu *vcpu,
- struct kvm_page_fault *fault)
-{
- kvm_prepare_memory_fault_exit(vcpu, fault->gfn << PAGE_SHIFT,
- PAGE_SIZE, fault->write, fault->exec,
- fault->is_private);
-}
-
static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault)
{
@@ -4343,48 +4306,15 @@ static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{
- struct kvm_memory_slot *slot = fault->slot;
bool async;
- /*
- * Retry the page fault if the gfn hit a memslot that is being deleted
- * or moved. This ensures any existing SPTEs for the old memslot will
- * be zapped before KVM inserts a new MMIO SPTE for the gfn.
- */
- if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
- return RET_PF_RETRY;
-
- if (!kvm_is_visible_memslot(slot)) {
- /* Don't expose private memslots to L2. */
- if (is_guest_mode(vcpu)) {
- fault->slot = NULL;
- fault->pfn = KVM_PFN_NOSLOT;
- fault->map_writable = false;
- return RET_PF_CONTINUE;
- }
- /*
- * If the APIC access page exists but is disabled, go directly
- * to emulation without caching the MMIO access or creating a
- * MMIO SPTE. That way the cache doesn't need to be purged
- * when the AVIC is re-enabled.
- */
- if (slot && slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT &&
- !kvm_apicv_activated(vcpu->kvm))
- return RET_PF_EMULATE;
- }
-
- if (fault->is_private != kvm_mem_is_private(vcpu->kvm, fault->gfn)) {
- kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
- return -EFAULT;
- }
-
if (fault->is_private)
return kvm_faultin_pfn_private(vcpu, fault);
async = false;
- fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, false, &async,
- fault->write, &fault->map_writable,
- &fault->hva);
+ fault->pfn = __gfn_to_pfn_memslot(fault->slot, fault->gfn, false, false,
+ &async, fault->write,
+ &fault->map_writable, &fault->hva);
if (!async)
return RET_PF_CONTINUE; /* *pfn has correct page already */
@@ -4394,7 +4324,7 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
trace_kvm_async_pf_repeated_fault(fault->addr, fault->gfn);
kvm_make_request(KVM_REQ_APF_HALT, vcpu);
return RET_PF_RETRY;
- } else if (kvm_arch_setup_async_pf(vcpu, fault->addr, fault->gfn)) {
+ } else if (kvm_arch_setup_async_pf(vcpu, fault)) {
return RET_PF_RETRY;
}
}
@@ -4404,17 +4334,72 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
* to wait for IO. Note, gup always bails if it is unable to quickly
* get a page and a fatal signal, i.e. SIGKILL, is pending.
*/
- fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, true, NULL,
- fault->write, &fault->map_writable,
- &fault->hva);
+ fault->pfn = __gfn_to_pfn_memslot(fault->slot, fault->gfn, false, true,
+ NULL, fault->write,
+ &fault->map_writable, &fault->hva);
return RET_PF_CONTINUE;
}
static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
unsigned int access)
{
+ struct kvm_memory_slot *slot = fault->slot;
int ret;
+ /*
+ * Note that the mmu_invalidate_seq also serves to detect a concurrent
+ * change in attributes. is_page_fault_stale() will detect an
+ * invalidation relate to fault->fn and resume the guest without
+ * installing a mapping in the page tables.
+ */
+ fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq;
+ smp_rmb();
+
+ /*
+ * Now that we have a snapshot of mmu_invalidate_seq we can check for a
+ * private vs. shared mismatch.
+ */
+ if (fault->is_private != kvm_mem_is_private(vcpu->kvm, fault->gfn)) {
+ kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
+ return -EFAULT;
+ }
+
+ if (unlikely(!slot))
+ return kvm_handle_noslot_fault(vcpu, fault, access);
+
+ /*
+ * Retry the page fault if the gfn hit a memslot that is being deleted
+ * or moved. This ensures any existing SPTEs for the old memslot will
+ * be zapped before KVM inserts a new MMIO SPTE for the gfn.
+ */
+ if (slot->flags & KVM_MEMSLOT_INVALID)
+ return RET_PF_RETRY;
+
+ if (slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT) {
+ /*
+ * Don't map L1's APIC access page into L2, KVM doesn't support
+ * using APICv/AVIC to accelerate L2 accesses to L1's APIC,
+ * i.e. the access needs to be emulated. Emulating access to
+ * L1's APIC is also correct if L1 is accelerating L2's own
+ * virtual APIC, but for some reason L1 also maps _L1's_ APIC
+ * into L2. Note, vcpu_is_mmio_gpa() always treats access to
+ * the APIC as MMIO. Allow an MMIO SPTE to be created, as KVM
+ * uses different roots for L1 vs. L2, i.e. there is no danger
+ * of breaking APICv/AVIC for L1.
+ */
+ if (is_guest_mode(vcpu))
+ return kvm_handle_noslot_fault(vcpu, fault, access);
+
+ /*
+ * If the APIC access page exists but is disabled, go directly
+ * to emulation without caching the MMIO access or creating a
+ * MMIO SPTE. That way the cache doesn't need to be purged
+ * when the AVIC is re-enabled.
+ */
+ if (!kvm_apicv_activated(vcpu->kvm))
+ return RET_PF_EMULATE;
+ }
+
fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq;
smp_rmb();
@@ -4439,8 +4424,7 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
* *guaranteed* to need to retry, i.e. waiting until mmu_lock is held
* to detect retry guarantees the worst case latency for the vCPU.
*/
- if (fault->slot &&
- mmu_invalidate_retry_gfn_unsafe(vcpu->kvm, fault->mmu_seq, fault->gfn))
+ if (mmu_invalidate_retry_gfn_unsafe(vcpu->kvm, fault->mmu_seq, fault->gfn))
return RET_PF_RETRY;
ret = __kvm_faultin_pfn(vcpu, fault);
@@ -4450,7 +4434,7 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
if (unlikely(is_error_pfn(fault->pfn)))
return kvm_handle_error_pfn(vcpu, fault);
- if (unlikely(!fault->slot))
+ if (WARN_ON_ONCE(!fault->slot || is_noslot_pfn(fault->pfn)))
return kvm_handle_noslot_fault(vcpu, fault, access);
/*
@@ -4561,6 +4545,16 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
if (WARN_ON_ONCE(fault_address >> 32))
return -EFAULT;
#endif
+ /*
+ * Legacy #PF exception only have a 32-bit error code. Simply drop the
+ * upper bits as KVM doesn't use them for #PF (because they are never
+ * set), and to ensure there are no collisions with KVM-defined bits.
+ */
+ if (WARN_ON_ONCE(error_code >> 32))
+ error_code = lower_32_bits(error_code);
+
+ /* Ensure the above sanity check also covers KVM-defined flags. */
+ BUILD_BUG_ON(lower_32_bits(PFERR_SYNTHETIC_MASK));
vcpu->arch.l1tf_flush_l1d = true;
if (!flags) {
@@ -4812,7 +4806,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
unsigned int access)
{
- if (unlikely(is_mmio_spte(*sptep))) {
+ if (unlikely(is_mmio_spte(vcpu->kvm, *sptep))) {
if (gfn != get_mmio_spte_gfn(*sptep)) {
mmu_spte_clear_no_track(sptep);
return true;
@@ -4935,7 +4929,7 @@ static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
context->cpu_role.base.level, is_efer_nx(context),
guest_can_use(vcpu, X86_FEATURE_GBPAGES),
is_cr4_pse(context),
- guest_cpuid_is_amd_or_hygon(vcpu));
+ guest_cpuid_is_amd_compatible(vcpu));
}
static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
@@ -5322,6 +5316,11 @@ static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
return max_tdp_level;
}
+u8 kvm_mmu_get_max_tdp_level(void)
+{
+ return tdp_root_level ? tdp_root_level : max_tdp_level;
+}
+
static union kvm_mmu_page_role
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
union kvm_cpu_role cpu_role)
@@ -5576,9 +5575,9 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
* that problem is swept under the rug; KVM's CPUID API is horrific and
* it's all but impossible to solve it without introducing a new API.
*/
- vcpu->arch.root_mmu.root_role.word = 0;
- vcpu->arch.guest_mmu.root_role.word = 0;
- vcpu->arch.nested_mmu.root_role.word = 0;
+ vcpu->arch.root_mmu.root_role.invalid = 1;
+ vcpu->arch.guest_mmu.root_role.invalid = 1;
+ vcpu->arch.nested_mmu.root_role.invalid = 1;
vcpu->arch.root_mmu.cpu_role.ext.valid = 0;
vcpu->arch.guest_mmu.cpu_role.ext.valid = 0;
vcpu->arch.nested_mmu.cpu_role.ext.valid = 0;
@@ -5802,10 +5801,15 @@ void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
bool flush = false;
/*
- * If we don't have indirect shadow pages, it means no page is
- * write-protected, so we can exit simply.
+ * When emulating guest writes, ensure the written value is visible to
+ * any task that is handling page faults before checking whether or not
+ * KVM is shadowing a guest PTE. This ensures either KVM will create
+ * the correct SPTE in the page fault handler, or this task will see
+ * a non-zero indirect_shadow_pages. Pairs with the smp_mb() in
+ * account_shadowed().
*/
- if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
+ smp_mb();
+ if (!vcpu->kvm->arch.indirect_shadow_pages)
return;
write_lock(&vcpu->kvm->mmu_lock);
@@ -5846,30 +5850,35 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err
int r, emulation_type = EMULTYPE_PF;
bool direct = vcpu->arch.mmu->root_role.direct;
- /*
- * IMPLICIT_ACCESS is a KVM-defined flag used to correctly perform SMAP
- * checks when emulating instructions that triggers implicit access.
- * WARN if hardware generates a fault with an error code that collides
- * with the KVM-defined value. Clear the flag and continue on, i.e.
- * don't terminate the VM, as KVM can't possibly be relying on a flag
- * that KVM doesn't know about.
- */
- if (WARN_ON_ONCE(error_code & PFERR_IMPLICIT_ACCESS))
- error_code &= ~PFERR_IMPLICIT_ACCESS;
-
if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
return RET_PF_RETRY;
+ /*
+ * Except for reserved faults (emulated MMIO is shared-only), set the
+ * PFERR_PRIVATE_ACCESS flag for software-protected VMs based on the gfn's
+ * current attributes, which are the source of truth for such VMs. Note,
+ * this wrong for nested MMUs as the GPA is an L2 GPA, but KVM doesn't
+ * currently supported nested virtualization (among many other things)
+ * for software-protected VMs.
+ */
+ if (IS_ENABLED(CONFIG_KVM_SW_PROTECTED_VM) &&
+ !(error_code & PFERR_RSVD_MASK) &&
+ vcpu->kvm->arch.vm_type == KVM_X86_SW_PROTECTED_VM &&
+ kvm_mem_is_private(vcpu->kvm, gpa_to_gfn(cr2_or_gpa)))
+ error_code |= PFERR_PRIVATE_ACCESS;
+
r = RET_PF_INVALID;
if (unlikely(error_code & PFERR_RSVD_MASK)) {
+ if (WARN_ON_ONCE(error_code & PFERR_PRIVATE_ACCESS))
+ return -EFAULT;
+
r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
if (r == RET_PF_EMULATE)
goto emulate;
}
if (r == RET_PF_INVALID) {
- r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
- lower_32_bits(error_code), false,
+ r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa, error_code, false,
&emulation_type);
if (KVM_BUG_ON(r == RET_PF_INVALID, vcpu->kvm))
return -EIO;
@@ -6173,7 +6182,10 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
- vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
+ vcpu->arch.mmu_shadow_page_cache.init_value =
+ SHADOW_NONPRESENT_VALUE;
+ if (!vcpu->arch.mmu_shadow_page_cache.init_value)
+ vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
vcpu->arch.mmu = &vcpu->arch.root_mmu;
vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
@@ -6316,6 +6328,7 @@ static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
void kvm_mmu_init_vm(struct kvm *kvm)
{
+ kvm->arch.shadow_mmio_value = shadow_mmio_value;
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
@@ -7399,7 +7412,8 @@ bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
* by the memslot, KVM can't use a hugepage due to the
* misaligned address regardless of memory attributes.
*/
- if (gfn >= slot->base_gfn) {
+ if (gfn >= slot->base_gfn &&
+ gfn + nr_pages <= slot->base_gfn + slot->npages) {
if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
hugepage_clear_mixed(slot, gfn, level);
else
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index 5390a591a571..ce2fcd19ba6b 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -190,7 +190,7 @@ static inline bool is_nx_huge_page_enabled(struct kvm *kvm)
struct kvm_page_fault {
/* arguments to kvm_mmu_do_page_fault. */
const gpa_t addr;
- const u32 error_code;
+ const u64 error_code;
const bool prefetch;
/* Derived from error_code. */
@@ -279,8 +279,16 @@ enum {
RET_PF_SPURIOUS,
};
+static inline void kvm_mmu_prepare_memory_fault_exit(struct kvm_vcpu *vcpu,
+ struct kvm_page_fault *fault)
+{
+ kvm_prepare_memory_fault_exit(vcpu, fault->gfn << PAGE_SHIFT,
+ PAGE_SIZE, fault->write, fault->exec,
+ fault->is_private);
+}
+
static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
- u32 err, bool prefetch, int *emulation_type)
+ u64 err, bool prefetch, int *emulation_type)
{
struct kvm_page_fault fault = {
.addr = cr2_or_gpa,
@@ -298,7 +306,10 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
.max_level = KVM_MAX_HUGEPAGE_LEVEL,
.req_level = PG_LEVEL_4K,
.goal_level = PG_LEVEL_4K,
- .is_private = kvm_mem_is_private(vcpu->kvm, cr2_or_gpa >> PAGE_SHIFT),
+ .is_private = err & PFERR_PRIVATE_ACCESS,
+
+ .pfn = KVM_PFN_ERR_FAULT,
+ .hva = KVM_HVA_ERR_BAD,
};
int r;
@@ -320,6 +331,17 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
else
r = vcpu->arch.mmu->page_fault(vcpu, &fault);
+ /*
+ * Not sure what's happening, but punt to userspace and hope that
+ * they can fix it by changing memory to shared, or they can
+ * provide a better error.
+ */
+ if (r == RET_PF_EMULATE && fault.is_private) {
+ pr_warn_ratelimited("kvm: unexpected emulation request on private memory\n");
+ kvm_mmu_prepare_memory_fault_exit(vcpu, &fault);
+ return -EFAULT;
+ }
+
if (fault.write_fault_to_shadow_pgtable && emulation_type)
*emulation_type |= EMULTYPE_WRITE_PF_TO_SP;
diff --git a/arch/x86/kvm/mmu/mmutrace.h b/arch/x86/kvm/mmu/mmutrace.h
index ae86820cef69..195d98bc8de8 100644
--- a/arch/x86/kvm/mmu/mmutrace.h
+++ b/arch/x86/kvm/mmu/mmutrace.h
@@ -260,7 +260,7 @@ TRACE_EVENT(
TP_STRUCT__entry(
__field(int, vcpu_id)
__field(gpa_t, cr2_or_gpa)
- __field(u32, error_code)
+ __field(u64, error_code)
__field(u64 *, sptep)
__field(u64, old_spte)
__field(u64, new_spte)
diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c
index f6448284c18e..561c331fd6ec 100644
--- a/arch/x86/kvm/mmu/page_track.c
+++ b/arch/x86/kvm/mmu/page_track.c
@@ -41,7 +41,7 @@ bool kvm_page_track_write_tracking_enabled(struct kvm *kvm)
void kvm_page_track_free_memslot(struct kvm_memory_slot *slot)
{
- kvfree(slot->arch.gfn_write_track);
+ vfree(slot->arch.gfn_write_track);
slot->arch.gfn_write_track = NULL;
}
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 4d4e98fe4f35..d3dbcf382ed2 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -497,21 +497,21 @@ error:
* The other bits are set to 0.
*/
if (!(errcode & PFERR_RSVD_MASK)) {
- vcpu->arch.exit_qualification &= (EPT_VIOLATION_GVA_IS_VALID |
- EPT_VIOLATION_GVA_TRANSLATED);
+ walker->fault.exit_qualification = 0;
+
if (write_fault)
- vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_WRITE;
+ walker->fault.exit_qualification |= EPT_VIOLATION_ACC_WRITE;
if (user_fault)
- vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_READ;
+ walker->fault.exit_qualification |= EPT_VIOLATION_ACC_READ;
if (fetch_fault)
- vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_INSTR;
+ walker->fault.exit_qualification |= EPT_VIOLATION_ACC_INSTR;
/*
* Note, pte_access holds the raw RWX bits from the EPTE, not
* ACC_*_MASK flags!
*/
- vcpu->arch.exit_qualification |= (pte_access & VMX_EPT_RWX_MASK) <<
- EPT_VIOLATION_RWX_SHIFT;
+ walker->fault.exit_qualification |= (pte_access & VMX_EPT_RWX_MASK) <<
+ EPT_VIOLATION_RWX_SHIFT;
}
#endif
walker->fault.address = addr;
@@ -911,7 +911,7 @@ static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int
gpa_t pte_gpa;
gfn_t gfn;
- if (WARN_ON_ONCE(!sp->spt[i]))
+ if (WARN_ON_ONCE(sp->spt[i] == SHADOW_NONPRESENT_VALUE))
return 0;
first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
@@ -933,13 +933,13 @@ static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int
return 0;
/*
- * Drop the SPTE if the new protections would result in a RWX=0
- * SPTE or if the gfn is changing. The RWX=0 case only affects
- * EPT with execute-only support, i.e. EPT without an effective
- * "present" bit, as all other paging modes will create a
- * read-only SPTE if pte_access is zero.
+ * Drop the SPTE if the new protections result in no effective
+ * "present" bit or if the gfn is changing. The former case
+ * only affects EPT with execute-only support with pte_access==0;
+ * all other paging modes will create a read-only SPTE if
+ * pte_access is zero.
*/
- if ((!pte_access && !shadow_present_mask) ||
+ if ((pte_access | shadow_present_mask) == SHADOW_NONPRESENT_VALUE ||
gfn != kvm_mmu_page_get_gfn(sp, i)) {
drop_spte(vcpu->kvm, &sp->spt[i]);
return 1;
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index 4a599130e9c9..a5e014d7bc62 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -74,10 +74,10 @@ u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
u64 spte = generation_mmio_spte_mask(gen);
u64 gpa = gfn << PAGE_SHIFT;
- WARN_ON_ONCE(!shadow_mmio_value);
+ WARN_ON_ONCE(!vcpu->kvm->arch.shadow_mmio_value);
access &= shadow_mmio_access_mask;
- spte |= shadow_mmio_value | access;
+ spte |= vcpu->kvm->arch.shadow_mmio_value | access;
spte |= gpa | shadow_nonpresent_or_rsvd_mask;
spte |= (gpa & shadow_nonpresent_or_rsvd_mask)
<< SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
@@ -144,19 +144,19 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
u64 spte = SPTE_MMU_PRESENT_MASK;
bool wrprot = false;
- WARN_ON_ONCE(!pte_access && !shadow_present_mask);
+ /*
+ * For the EPT case, shadow_present_mask has no RWX bits set if
+ * exec-only page table entries are supported. In that case,
+ * ACC_USER_MASK and shadow_user_mask are used to represent
+ * read access. See FNAME(gpte_access) in paging_tmpl.h.
+ */
+ WARN_ON_ONCE((pte_access | shadow_present_mask) == SHADOW_NONPRESENT_VALUE);
if (sp->role.ad_disabled)
spte |= SPTE_TDP_AD_DISABLED;
else if (kvm_mmu_page_ad_need_write_protect(sp))
spte |= SPTE_TDP_AD_WRPROT_ONLY;
- /*
- * For the EPT case, shadow_present_mask is 0 if hardware
- * supports exec-only page table entries. In that case,
- * ACC_USER_MASK and shadow_user_mask are used to represent
- * read access. See FNAME(gpte_access) in paging_tmpl.h.
- */
spte |= shadow_present_mask;
if (!prefetch)
spte |= spte_shadow_accessed_mask(spte);
@@ -322,22 +322,6 @@ u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled)
return spte;
}
-u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn)
-{
- u64 new_spte;
-
- new_spte = old_spte & ~SPTE_BASE_ADDR_MASK;
- new_spte |= (u64)new_pfn << PAGE_SHIFT;
-
- new_spte &= ~PT_WRITABLE_MASK;
- new_spte &= ~shadow_host_writable_mask;
- new_spte &= ~shadow_mmu_writable_mask;
-
- new_spte = mark_spte_for_access_track(new_spte);
-
- return new_spte;
-}
-
u64 mark_spte_for_access_track(u64 spte)
{
if (spte_ad_enabled(spte))
@@ -429,7 +413,9 @@ void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only)
shadow_dirty_mask = has_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull;
shadow_nx_mask = 0ull;
shadow_x_mask = VMX_EPT_EXECUTABLE_MASK;
- shadow_present_mask = has_exec_only ? 0ull : VMX_EPT_READABLE_MASK;
+ /* VMX_EPT_SUPPRESS_VE_BIT is needed for W or X violation. */
+ shadow_present_mask =
+ (has_exec_only ? 0ull : VMX_EPT_READABLE_MASK) | VMX_EPT_SUPPRESS_VE_BIT;
/*
* EPT overrides the host MTRRs, and so KVM must program the desired
* memtype directly into the SPTEs. Note, this mask is just the mask
@@ -446,7 +432,7 @@ void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only)
* of an EPT paging-structure entry is 110b (write/execute).
*/
kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE,
- VMX_EPT_RWX_MASK, 0);
+ VMX_EPT_RWX_MASK | VMX_EPT_SUPPRESS_VE_BIT, 0);
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_ept_masks);
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index a129951c9a88..5dd5405fa07a 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -149,6 +149,22 @@ static_assert(MMIO_SPTE_GEN_LOW_BITS == 8 && MMIO_SPTE_GEN_HIGH_BITS == 11);
#define MMIO_SPTE_GEN_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_BITS + MMIO_SPTE_GEN_HIGH_BITS - 1, 0)
+/*
+ * Non-present SPTE value needs to set bit 63 for TDX, in order to suppress
+ * #VE and get EPT violations on non-present PTEs. We can use the
+ * same value also without TDX for both VMX and SVM:
+ *
+ * For SVM NPT, for non-present spte (bit 0 = 0), other bits are ignored.
+ * For VMX EPT, bit 63 is ignored if #VE is disabled. (EPT_VIOLATION_VE=0)
+ * bit 63 is #VE suppress if #VE is enabled. (EPT_VIOLATION_VE=1)
+ */
+#ifdef CONFIG_X86_64
+#define SHADOW_NONPRESENT_VALUE BIT_ULL(63)
+static_assert(!(SHADOW_NONPRESENT_VALUE & SPTE_MMU_PRESENT_MASK));
+#else
+#define SHADOW_NONPRESENT_VALUE 0ULL
+#endif
+
extern u64 __read_mostly shadow_host_writable_mask;
extern u64 __read_mostly shadow_mmu_writable_mask;
extern u64 __read_mostly shadow_nx_mask;
@@ -190,11 +206,11 @@ extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
*
* Use a semi-arbitrary value that doesn't set RWX bits, i.e. is not-present on
* both AMD and Intel CPUs, and doesn't set PFN bits, i.e. doesn't create a L1TF
- * vulnerability. Use only low bits to avoid 64-bit immediates.
+ * vulnerability.
*
* Only used by the TDP MMU.
*/
-#define REMOVED_SPTE 0x5a0ULL
+#define REMOVED_SPTE (SHADOW_NONPRESENT_VALUE | 0x5a0ULL)
/* Removed SPTEs must not be misconstrued as shadow present PTEs. */
static_assert(!(REMOVED_SPTE & SPTE_MMU_PRESENT_MASK));
@@ -249,9 +265,9 @@ static inline struct kvm_mmu_page *root_to_sp(hpa_t root)
return spte_to_child_sp(root);
}
-static inline bool is_mmio_spte(u64 spte)
+static inline bool is_mmio_spte(struct kvm *kvm, u64 spte)
{
- return (spte & shadow_mmio_mask) == shadow_mmio_value &&
+ return (spte & shadow_mmio_mask) == kvm->arch.shadow_mmio_value &&
likely(enable_mmio_caching);
}
@@ -496,8 +512,6 @@ static inline u64 restore_acc_track_spte(u64 spte)
return spte;
}
-u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn);
-
void __init kvm_mmu_spte_module_init(void);
void kvm_mmu_reset_all_pte_masks(void);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index d078157e62aa..1259dd63defc 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -495,8 +495,8 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
* impact the guest since both the former and current SPTEs
* are nonpresent.
*/
- if (WARN_ON_ONCE(!is_mmio_spte(old_spte) &&
- !is_mmio_spte(new_spte) &&
+ if (WARN_ON_ONCE(!is_mmio_spte(kvm, old_spte) &&
+ !is_mmio_spte(kvm, new_spte) &&
!is_removed_spte(new_spte)))
pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
"should not be replaced with another,\n"
@@ -530,6 +530,31 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
kvm_set_pfn_accessed(spte_to_pfn(old_spte));
}
+static inline int __tdp_mmu_set_spte_atomic(struct tdp_iter *iter, u64 new_spte)
+{
+ u64 *sptep = rcu_dereference(iter->sptep);
+
+ /*
+ * The caller is responsible for ensuring the old SPTE is not a REMOVED
+ * SPTE. KVM should never attempt to zap or manipulate a REMOVED SPTE,
+ * and pre-checking before inserting a new SPTE is advantageous as it
+ * avoids unnecessary work.
+ */
+ WARN_ON_ONCE(iter->yielded || is_removed_spte(iter->old_spte));
+
+ /*
+ * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
+ * does not hold the mmu_lock. On failure, i.e. if a different logical
+ * CPU modified the SPTE, try_cmpxchg64() updates iter->old_spte with
+ * the current value, so the caller operates on fresh data, e.g. if it
+ * retries tdp_mmu_set_spte_atomic()
+ */
+ if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte))
+ return -EBUSY;
+
+ return 0;
+}
+
/*
* tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically
* and handle the associated bookkeeping. Do not mark the page dirty
@@ -551,27 +576,13 @@ static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm,
struct tdp_iter *iter,
u64 new_spte)
{
- u64 *sptep = rcu_dereference(iter->sptep);
-
- /*
- * The caller is responsible for ensuring the old SPTE is not a REMOVED
- * SPTE. KVM should never attempt to zap or manipulate a REMOVED SPTE,
- * and pre-checking before inserting a new SPTE is advantageous as it
- * avoids unnecessary work.
- */
- WARN_ON_ONCE(iter->yielded || is_removed_spte(iter->old_spte));
+ int ret;
lockdep_assert_held_read(&kvm->mmu_lock);
- /*
- * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
- * does not hold the mmu_lock. On failure, i.e. if a different logical
- * CPU modified the SPTE, try_cmpxchg64() updates iter->old_spte with
- * the current value, so the caller operates on fresh data, e.g. if it
- * retries tdp_mmu_set_spte_atomic()
- */
- if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte))
- return -EBUSY;
+ ret = __tdp_mmu_set_spte_atomic(iter, new_spte);
+ if (ret)
+ return ret;
handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
new_spte, iter->level, true);
@@ -584,13 +595,17 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
{
int ret;
+ lockdep_assert_held_read(&kvm->mmu_lock);
+
/*
- * Freeze the SPTE by setting it to a special,
- * non-present value. This will stop other threads from
- * immediately installing a present entry in its place
- * before the TLBs are flushed.
+ * Freeze the SPTE by setting it to a special, non-present value. This
+ * will stop other threads from immediately installing a present entry
+ * in its place before the TLBs are flushed.
+ *
+ * Delay processing of the zapped SPTE until after TLBs are flushed and
+ * the REMOVED_SPTE is replaced (see below).
*/
- ret = tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE);
+ ret = __tdp_mmu_set_spte_atomic(iter, REMOVED_SPTE);
if (ret)
return ret;
@@ -599,11 +614,19 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
/*
* No other thread can overwrite the removed SPTE as they must either
* wait on the MMU lock or use tdp_mmu_set_spte_atomic() which will not
- * overwrite the special removed SPTE value. No bookkeeping is needed
- * here since the SPTE is going from non-present to non-present. Use
- * the raw write helper to avoid an unnecessary check on volatile bits.
+ * overwrite the special removed SPTE value. Use the raw write helper to
+ * avoid an unnecessary check on volatile bits.
*/
- __kvm_tdp_mmu_write_spte(iter->sptep, 0);
+ __kvm_tdp_mmu_write_spte(iter->sptep, SHADOW_NONPRESENT_VALUE);
+
+ /*
+ * Process the zapped SPTE after flushing TLBs, and after replacing
+ * REMOVED_SPTE with 0. This minimizes the amount of time vCPUs are
+ * blocked by the REMOVED_SPTE and reduces contention on the child
+ * SPTEs.
+ */
+ handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
+ 0, iter->level, true);
return 0;
}
@@ -740,8 +763,8 @@ retry:
continue;
if (!shared)
- tdp_mmu_iter_set_spte(kvm, &iter, 0);
- else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0))
+ tdp_mmu_iter_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);
+ else if (tdp_mmu_set_spte_atomic(kvm, &iter, SHADOW_NONPRESENT_VALUE))
goto retry;
}
}
@@ -808,8 +831,8 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte)))
return false;
- tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0,
- sp->gfn, sp->role.level + 1);
+ tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte,
+ SHADOW_NONPRESENT_VALUE, sp->gfn, sp->role.level + 1);
return true;
}
@@ -843,7 +866,7 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
!is_last_spte(iter.old_spte, iter.level))
continue;
- tdp_mmu_iter_set_spte(kvm, &iter, 0);
+ tdp_mmu_iter_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);
/*
* Zappings SPTEs in invalid roots doesn't require a TLB flush,
@@ -1028,7 +1051,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
}
/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
- if (unlikely(is_mmio_spte(new_spte))) {
+ if (unlikely(is_mmio_spte(vcpu->kvm, new_spte))) {
vcpu->stat.pf_mmio_spte_created++;
trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
new_spte);
@@ -1258,52 +1281,6 @@ bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
}
-static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
- struct kvm_gfn_range *range)
-{
- u64 new_spte;
-
- /* Huge pages aren't expected to be modified without first being zapped. */
- WARN_ON_ONCE(pte_huge(range->arg.pte) || range->start + 1 != range->end);
-
- if (iter->level != PG_LEVEL_4K ||
- !is_shadow_present_pte(iter->old_spte))
- return false;
-
- /*
- * Note, when changing a read-only SPTE, it's not strictly necessary to
- * zero the SPTE before setting the new PFN, but doing so preserves the
- * invariant that the PFN of a present * leaf SPTE can never change.
- * See handle_changed_spte().
- */
- tdp_mmu_iter_set_spte(kvm, iter, 0);
-
- if (!pte_write(range->arg.pte)) {
- new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
- pte_pfn(range->arg.pte));
-
- tdp_mmu_iter_set_spte(kvm, iter, new_spte);
- }
-
- return true;
-}
-
-/*
- * Handle the changed_pte MMU notifier for the TDP MMU.
- * data is a pointer to the new pte_t mapping the HVA specified by the MMU
- * notifier.
- * Returns non-zero if a flush is needed before releasing the MMU lock.
- */
-bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
-{
- /*
- * No need to handle the remote TLB flush under RCU protection, the
- * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a
- * shadow page. See the WARN on pfn_changed in handle_changed_spte().
- */
- return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
-}
-
/*
* Remove write access from all SPTEs at or above min_level that map GFNs
* [start, end). Returns true if an SPTE has been changed and the TLBs need to
@@ -1548,17 +1525,21 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
}
}
-/*
- * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
- * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
- * If AD bits are not enabled, this will require clearing the writable bit on
- * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
- * be flushed.
- */
+static bool tdp_mmu_need_write_protect(struct kvm_mmu_page *sp)
+{
+ /*
+ * All TDP MMU shadow pages share the same role as their root, aside
+ * from level, so it is valid to key off any shadow page to determine if
+ * write protection is needed for an entire tree.
+ */
+ return kvm_mmu_page_ad_need_write_protect(sp) || !kvm_ad_enabled();
+}
+
static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end)
{
- u64 dbit = kvm_ad_enabled() ? shadow_dirty_mask : PT_WRITABLE_MASK;
+ const u64 dbit = tdp_mmu_need_write_protect(root) ? PT_WRITABLE_MASK :
+ shadow_dirty_mask;
struct tdp_iter iter;
bool spte_set = false;
@@ -1573,7 +1554,7 @@ retry:
if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
continue;
- KVM_MMU_WARN_ON(kvm_ad_enabled() &&
+ KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
spte_ad_need_write_protect(iter.old_spte));
if (!(iter.old_spte & dbit))
@@ -1590,11 +1571,9 @@ retry:
}
/*
- * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
- * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
- * If AD bits are not enabled, this will require clearing the writable bit on
- * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
- * be flushed.
+ * Clear the dirty status (D-bit or W-bit) of all the SPTEs mapping GFNs in the
+ * memslot. Returns true if an SPTE has been changed and the TLBs need to be
+ * flushed.
*/
bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
const struct kvm_memory_slot *slot)
@@ -1610,18 +1589,11 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
return spte_set;
}
-/*
- * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
- * set in mask, starting at gfn. The given memslot is expected to contain all
- * the GFNs represented by set bits in the mask. If AD bits are enabled,
- * clearing the dirty status will involve clearing the dirty bit on each SPTE
- * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
- */
static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t gfn, unsigned long mask, bool wrprot)
{
- u64 dbit = (wrprot || !kvm_ad_enabled()) ? PT_WRITABLE_MASK :
- shadow_dirty_mask;
+ const u64 dbit = (wrprot || tdp_mmu_need_write_protect(root)) ? PT_WRITABLE_MASK :
+ shadow_dirty_mask;
struct tdp_iter iter;
lockdep_assert_held_write(&kvm->mmu_lock);
@@ -1633,7 +1605,7 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
if (!mask)
break;
- KVM_MMU_WARN_ON(kvm_ad_enabled() &&
+ KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
spte_ad_need_write_protect(iter.old_spte));
if (iter.level > PG_LEVEL_4K ||
@@ -1659,11 +1631,9 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
}
/*
- * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
- * set in mask, starting at gfn. The given memslot is expected to contain all
- * the GFNs represented by set bits in the mask. If AD bits are enabled,
- * clearing the dirty status will involve clearing the dirty bit on each SPTE
- * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
+ * Clear the dirty status (D-bit or W-bit) of all the 4k SPTEs mapping GFNs for
+ * which a bit is set in mask, starting at gfn. The given memslot is expected to
+ * contain all the GFNs represented by set bits in the mask.
*/
void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index 6e1ea04ca885..58b55e61bd33 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -31,7 +31,6 @@ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
bool flush);
bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
-bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
const struct kvm_memory_slot *slot, int min_level);
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index c397b28e3d1b..a593b03c9aed 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -775,8 +775,20 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->pebs_data_cfg_mask = ~0ull;
bitmap_zero(pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX);
- if (vcpu->kvm->arch.enable_pmu)
- static_call(kvm_x86_pmu_refresh)(vcpu);
+ if (!vcpu->kvm->arch.enable_pmu)
+ return;
+
+ static_call(kvm_x86_pmu_refresh)(vcpu);
+
+ /*
+ * At RESET, both Intel and AMD CPUs set all enable bits for general
+ * purpose counters in IA32_PERF_GLOBAL_CTRL (so that software that
+ * was written for v1 PMUs don't unknowingly leave GP counters disabled
+ * in the global controls). Emulate that behavior when refreshing the
+ * PMU so that userspace doesn't need to manually set PERF_GLOBAL_CTRL.
+ */
+ if (kvm_pmu_has_perf_global_ctrl(pmu) && pmu->nr_arch_gp_counters)
+ pmu->global_ctrl = GENMASK_ULL(pmu->nr_arch_gp_counters - 1, 0);
}
void kvm_pmu_init(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h
index aadefcaa9561..2f4e155080ba 100644
--- a/arch/x86/kvm/reverse_cpuid.h
+++ b/arch/x86/kvm/reverse_cpuid.h
@@ -52,7 +52,7 @@ enum kvm_only_cpuid_leafs {
#define X86_FEATURE_IPRED_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 1)
#define KVM_X86_FEATURE_RRSBA_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 2)
#define X86_FEATURE_DDPD_U KVM_X86_FEATURE(CPUID_7_2_EDX, 3)
-#define X86_FEATURE_BHI_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 4)
+#define KVM_X86_FEATURE_BHI_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 4)
#define X86_FEATURE_MCDT_NO KVM_X86_FEATURE(CPUID_7_2_EDX, 5)
/* CPUID level 0x80000007 (EDX). */
@@ -102,10 +102,12 @@ static const struct cpuid_reg reverse_cpuid[] = {
*/
static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
{
+ BUILD_BUG_ON(NR_CPUID_WORDS != NCAPINTS);
BUILD_BUG_ON(x86_leaf == CPUID_LNX_1);
BUILD_BUG_ON(x86_leaf == CPUID_LNX_2);
BUILD_BUG_ON(x86_leaf == CPUID_LNX_3);
BUILD_BUG_ON(x86_leaf == CPUID_LNX_4);
+ BUILD_BUG_ON(x86_leaf == CPUID_LNX_5);
BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
}
@@ -126,6 +128,7 @@ static __always_inline u32 __feature_translate(int x86_feature)
KVM_X86_TRANSLATE_FEATURE(CONSTANT_TSC);
KVM_X86_TRANSLATE_FEATURE(PERFMON_V2);
KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL);
+ KVM_X86_TRANSLATE_FEATURE(BHI_CTRL);
default:
return x86_feature;
}
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index ae0ac12382b9..0623cfaa7bb0 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -23,6 +23,7 @@
#include <asm/pkru.h>
#include <asm/trapnr.h>
#include <asm/fpu/xcr.h>
+#include <asm/fpu/xstate.h>
#include <asm/debugreg.h>
#include "mmu.h"
@@ -32,22 +33,12 @@
#include "cpuid.h"
#include "trace.h"
-#ifndef CONFIG_KVM_AMD_SEV
-/*
- * When this config is not defined, SEV feature is not supported and APIs in
- * this file are not used but this file still gets compiled into the KVM AMD
- * module.
- *
- * We will not have MISC_CG_RES_SEV and MISC_CG_RES_SEV_ES entries in the enum
- * misc_res_type {} defined in linux/misc_cgroup.h.
- *
- * Below macros allow compilation to succeed.
- */
-#define MISC_CG_RES_SEV MISC_CG_RES_TYPES
-#define MISC_CG_RES_SEV_ES MISC_CG_RES_TYPES
-#endif
+#define GHCB_VERSION_MAX 2ULL
+#define GHCB_VERSION_DEFAULT 2ULL
+#define GHCB_VERSION_MIN 1ULL
+
+#define GHCB_HV_FT_SUPPORTED GHCB_HV_FT_SNP
-#ifdef CONFIG_KVM_AMD_SEV
/* enable/disable SEV support */
static bool sev_enabled = true;
module_param_named(sev, sev_enabled, bool, 0444);
@@ -57,13 +48,13 @@ static bool sev_es_enabled = true;
module_param_named(sev_es, sev_es_enabled, bool, 0444);
/* enable/disable SEV-ES DebugSwap support */
-static bool sev_es_debug_swap_enabled = false;
+static bool sev_es_debug_swap_enabled = true;
module_param_named(debug_swap, sev_es_debug_swap_enabled, bool, 0444);
-#else
-#define sev_enabled false
-#define sev_es_enabled false
-#define sev_es_debug_swap_enabled false
-#endif /* CONFIG_KVM_AMD_SEV */
+static u64 sev_supported_vmsa_features;
+
+#define AP_RESET_HOLD_NONE 0
+#define AP_RESET_HOLD_NAE_EVENT 1
+#define AP_RESET_HOLD_MSR_PROTO 2
static u8 sev_enc_bit;
static DECLARE_RWSEM(sev_deactivate_lock);
@@ -84,9 +75,10 @@ struct enc_region {
};
/* Called with the sev_bitmap_lock held, or on shutdown */
-static int sev_flush_asids(int min_asid, int max_asid)
+static int sev_flush_asids(unsigned int min_asid, unsigned int max_asid)
{
- int ret, asid, error = 0;
+ int ret, error = 0;
+ unsigned int asid;
/* Check if there are any ASIDs to reclaim before performing a flush */
asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
@@ -112,11 +104,19 @@ static int sev_flush_asids(int min_asid, int max_asid)
static inline bool is_mirroring_enc_context(struct kvm *kvm)
{
- return !!to_kvm_svm(kvm)->sev_info.enc_context_owner;
+ return !!to_kvm_sev_info(kvm)->enc_context_owner;
+}
+
+static bool sev_vcpu_has_debug_swap(struct vcpu_svm *svm)
+{
+ struct kvm_vcpu *vcpu = &svm->vcpu;
+ struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
+
+ return sev->vmsa_features & SVM_SEV_FEAT_DEBUG_SWAP;
}
/* Must be called with the sev_bitmap_lock held */
-static bool __sev_recycle_asids(int min_asid, int max_asid)
+static bool __sev_recycle_asids(unsigned int min_asid, unsigned int max_asid)
{
if (sev_flush_asids(min_asid, max_asid))
return false;
@@ -143,8 +143,20 @@ static void sev_misc_cg_uncharge(struct kvm_sev_info *sev)
static int sev_asid_new(struct kvm_sev_info *sev)
{
- int asid, min_asid, max_asid, ret;
+ /*
+ * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
+ * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
+ * Note: min ASID can end up larger than the max if basic SEV support is
+ * effectively disabled by disallowing use of ASIDs for SEV guests.
+ */
+ unsigned int min_asid = sev->es_active ? 1 : min_sev_asid;
+ unsigned int max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
+ unsigned int asid;
bool retry = true;
+ int ret;
+
+ if (min_asid > max_asid)
+ return -ENOTTY;
WARN_ON(sev->misc_cg);
sev->misc_cg = get_current_misc_cg();
@@ -157,12 +169,6 @@ static int sev_asid_new(struct kvm_sev_info *sev)
mutex_lock(&sev_bitmap_lock);
- /*
- * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
- * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
- */
- min_asid = sev->es_active ? 1 : min_sev_asid;
- max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
again:
asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
if (asid > max_asid) {
@@ -179,7 +185,8 @@ again:
mutex_unlock(&sev_bitmap_lock);
- return asid;
+ sev->asid = asid;
+ return 0;
e_uncharge:
sev_misc_cg_uncharge(sev);
put_misc_cg(sev->misc_cg);
@@ -187,7 +194,7 @@ e_uncharge:
return ret;
}
-static int sev_get_asid(struct kvm *kvm)
+static unsigned int sev_get_asid(struct kvm *kvm)
{
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
@@ -243,25 +250,47 @@ static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
sev_decommission(handle);
}
-static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
+static int __sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp,
+ struct kvm_sev_init *data,
+ unsigned long vm_type)
{
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct sev_platform_init_args init_args = {0};
- int asid, ret;
+ bool es_active = vm_type != KVM_X86_SEV_VM;
+ u64 valid_vmsa_features = es_active ? sev_supported_vmsa_features : 0;
+ int ret;
if (kvm->created_vcpus)
return -EINVAL;
- ret = -EBUSY;
+ if (data->flags)
+ return -EINVAL;
+
+ if (data->vmsa_features & ~valid_vmsa_features)
+ return -EINVAL;
+
+ if (data->ghcb_version > GHCB_VERSION_MAX || (!es_active && data->ghcb_version))
+ return -EINVAL;
+
if (unlikely(sev->active))
- return ret;
+ return -EINVAL;
sev->active = true;
- sev->es_active = argp->id == KVM_SEV_ES_INIT;
- asid = sev_asid_new(sev);
- if (asid < 0)
+ sev->es_active = es_active;
+ sev->vmsa_features = data->vmsa_features;
+ sev->ghcb_version = data->ghcb_version;
+
+ /*
+ * Currently KVM supports the full range of mandatory features defined
+ * by version 2 of the GHCB protocol, so default to that for SEV-ES
+ * guests created via KVM_SEV_INIT2.
+ */
+ if (sev->es_active && !sev->ghcb_version)
+ sev->ghcb_version = GHCB_VERSION_DEFAULT;
+
+ ret = sev_asid_new(sev);
+ if (ret)
goto e_no_asid;
- sev->asid = asid;
init_args.probe = false;
ret = sev_platform_init(&init_args);
@@ -270,6 +299,7 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
INIT_LIST_HEAD(&sev->regions_list);
INIT_LIST_HEAD(&sev->mirror_vms);
+ sev->need_init = false;
kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_SEV);
@@ -280,15 +310,57 @@ e_free:
sev_asid_free(sev);
sev->asid = 0;
e_no_asid:
+ sev->vmsa_features = 0;
sev->es_active = false;
sev->active = false;
return ret;
}
+static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+ struct kvm_sev_init data = {
+ .vmsa_features = 0,
+ .ghcb_version = 0,
+ };
+ unsigned long vm_type;
+
+ if (kvm->arch.vm_type != KVM_X86_DEFAULT_VM)
+ return -EINVAL;
+
+ vm_type = (argp->id == KVM_SEV_INIT ? KVM_X86_SEV_VM : KVM_X86_SEV_ES_VM);
+
+ /*
+ * KVM_SEV_ES_INIT has been deprecated by KVM_SEV_INIT2, so it will
+ * continue to only ever support the minimal GHCB protocol version.
+ */
+ if (vm_type == KVM_X86_SEV_ES_VM)
+ data.ghcb_version = GHCB_VERSION_MIN;
+
+ return __sev_guest_init(kvm, argp, &data, vm_type);
+}
+
+static int sev_guest_init2(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_init data;
+
+ if (!sev->need_init)
+ return -EINVAL;
+
+ if (kvm->arch.vm_type != KVM_X86_SEV_VM &&
+ kvm->arch.vm_type != KVM_X86_SEV_ES_VM)
+ return -EINVAL;
+
+ if (copy_from_user(&data, u64_to_user_ptr(argp->data), sizeof(data)))
+ return -EFAULT;
+
+ return __sev_guest_init(kvm, argp, &data, kvm->arch.vm_type);
+}
+
static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
{
+ unsigned int asid = sev_get_asid(kvm);
struct sev_data_activate activate;
- int asid = sev_get_asid(kvm);
int ret;
/* activate ASID on the given handle */
@@ -333,7 +405,7 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (!sev_guest(kvm))
return -ENOTTY;
- if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
+ if (copy_from_user(&params, u64_to_user_ptr(argp->data), sizeof(params)))
return -EFAULT;
memset(&start, 0, sizeof(start));
@@ -377,7 +449,7 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
/* return handle to userspace */
params.handle = start.handle;
- if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
+ if (copy_to_user(u64_to_user_ptr(argp->data), &params, sizeof(params))) {
sev_unbind_asid(kvm, start.handle);
ret = -EFAULT;
goto e_free_session;
@@ -428,7 +500,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
/* Avoid using vmalloc for smaller buffers. */
size = npages * sizeof(struct page *);
if (size > PAGE_SIZE)
- pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+ pages = __vmalloc(size, GFP_KERNEL_ACCOUNT);
else
pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
@@ -516,7 +588,7 @@ static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (!sev_guest(kvm))
return -ENOTTY;
- if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
+ if (copy_from_user(&params, u64_to_user_ptr(argp->data), sizeof(params)))
return -EFAULT;
vaddr = params.uaddr;
@@ -574,7 +646,13 @@ e_unpin:
static int sev_es_sync_vmsa(struct vcpu_svm *svm)
{
+ struct kvm_vcpu *vcpu = &svm->vcpu;
+ struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
struct sev_es_save_area *save = svm->sev_es.vmsa;
+ struct xregs_state *xsave;
+ const u8 *s;
+ u8 *d;
+ int i;
/* Check some debug related fields before encrypting the VMSA */
if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1))
@@ -615,10 +693,44 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
save->xss = svm->vcpu.arch.ia32_xss;
save->dr6 = svm->vcpu.arch.dr6;
- if (sev_es_debug_swap_enabled) {
- save->sev_features |= SVM_SEV_FEAT_DEBUG_SWAP;
- pr_warn_once("Enabling DebugSwap with KVM_SEV_ES_INIT. "
- "This will not work starting with Linux 6.10\n");
+ save->sev_features = sev->vmsa_features;
+
+ /*
+ * Skip FPU and AVX setup with KVM_SEV_ES_INIT to avoid
+ * breaking older measurements.
+ */
+ if (vcpu->kvm->arch.vm_type != KVM_X86_DEFAULT_VM) {
+ xsave = &vcpu->arch.guest_fpu.fpstate->regs.xsave;
+ save->x87_dp = xsave->i387.rdp;
+ save->mxcsr = xsave->i387.mxcsr;
+ save->x87_ftw = xsave->i387.twd;
+ save->x87_fsw = xsave->i387.swd;
+ save->x87_fcw = xsave->i387.cwd;
+ save->x87_fop = xsave->i387.fop;
+ save->x87_ds = 0;
+ save->x87_cs = 0;
+ save->x87_rip = xsave->i387.rip;
+
+ for (i = 0; i < 8; i++) {
+ /*
+ * The format of the x87 save area is undocumented and
+ * definitely not what you would expect. It consists of
+ * an 8*8 bytes area with bytes 0-7, and an 8*2 bytes
+ * area with bytes 8-9 of each register.
+ */
+ d = save->fpreg_x87 + i * 8;
+ s = ((u8 *)xsave->i387.st_space) + i * 16;
+ memcpy(d, s, 8);
+ save->fpreg_x87[64 + i * 2] = s[8];
+ save->fpreg_x87[64 + i * 2 + 1] = s[9];
+ }
+ memcpy(save->fpreg_xmm, xsave->i387.xmm_space, 256);
+
+ s = get_xsave_addr(xsave, XFEATURE_YMM);
+ if (s)
+ memcpy(save->fpreg_ymm, s, 256);
+ else
+ memset(save->fpreg_ymm, 0, 256);
}
pr_debug("Virtual Machine Save Area (VMSA):\n");
@@ -652,13 +764,20 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE);
vmsa.reserved = 0;
- vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
+ vmsa.handle = to_kvm_sev_info(kvm)->handle;
vmsa.address = __sme_pa(svm->sev_es.vmsa);
vmsa.len = PAGE_SIZE;
ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
if (ret)
return ret;
+ /*
+ * SEV-ES guests maintain an encrypted version of their FPU
+ * state which is restored and saved on VMRUN and VMEXIT.
+ * Mark vcpu->arch.guest_fpu->fpstate as scratch so it won't
+ * do xsave/xrstor on it.
+ */
+ fpstate_set_confidential(&vcpu->arch.guest_fpu);
vcpu->arch.guest_state_protected = true;
return 0;
}
@@ -689,7 +808,7 @@ static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
- void __user *measure = (void __user *)(uintptr_t)argp->data;
+ void __user *measure = u64_to_user_ptr(argp->data);
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct sev_data_launch_measure data;
struct kvm_sev_launch_measure params;
@@ -709,7 +828,7 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (!params.len)
goto cmd;
- p = (void __user *)(uintptr_t)params.uaddr;
+ p = u64_to_user_ptr(params.uaddr);
if (p) {
if (params.len > SEV_FW_BLOB_MAX_SIZE)
return -EINVAL;
@@ -782,7 +901,7 @@ static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
params.state = data.state;
params.handle = data.handle;
- if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
+ if (copy_to_user(u64_to_user_ptr(argp->data), &params, sizeof(params)))
ret = -EFAULT;
return ret;
@@ -947,7 +1066,7 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
if (!sev_guest(kvm))
return -ENOTTY;
- if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
+ if (copy_from_user(&debug, u64_to_user_ptr(argp->data), sizeof(debug)))
return -EFAULT;
if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
@@ -1031,7 +1150,7 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (!sev_guest(kvm))
return -ENOTTY;
- if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
+ if (copy_from_user(&params, u64_to_user_ptr(argp->data), sizeof(params)))
return -EFAULT;
pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
@@ -1095,7 +1214,7 @@ e_unpin_memory:
static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
- void __user *report = (void __user *)(uintptr_t)argp->data;
+ void __user *report = u64_to_user_ptr(argp->data);
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct sev_data_attestation_report data;
struct kvm_sev_attestation_report params;
@@ -1106,7 +1225,7 @@ static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (!sev_guest(kvm))
return -ENOTTY;
- if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
+ if (copy_from_user(&params, u64_to_user_ptr(argp->data), sizeof(params)))
return -EFAULT;
memset(&data, 0, sizeof(data));
@@ -1115,7 +1234,7 @@ static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (!params.len)
goto cmd;
- p = (void __user *)(uintptr_t)params.uaddr;
+ p = u64_to_user_ptr(params.uaddr);
if (p) {
if (params.len > SEV_FW_BLOB_MAX_SIZE)
return -EINVAL;
@@ -1168,7 +1287,7 @@ __sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
params->session_len = data.session_len;
- if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
+ if (copy_to_user(u64_to_user_ptr(argp->data), params,
sizeof(struct kvm_sev_send_start)))
ret = -EFAULT;
@@ -1187,7 +1306,7 @@ static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (!sev_guest(kvm))
return -ENOTTY;
- if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
+ if (copy_from_user(&params, u64_to_user_ptr(argp->data),
sizeof(struct kvm_sev_send_start)))
return -EFAULT;
@@ -1242,7 +1361,7 @@ static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
- if (!ret && copy_to_user((void __user *)(uintptr_t)params.session_uaddr,
+ if (!ret && copy_to_user(u64_to_user_ptr(params.session_uaddr),
session_data, params.session_len)) {
ret = -EFAULT;
goto e_free_amd_cert;
@@ -1250,7 +1369,7 @@ static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
params.policy = data.policy;
params.session_len = data.session_len;
- if (copy_to_user((void __user *)(uintptr_t)argp->data, &params,
+ if (copy_to_user(u64_to_user_ptr(argp->data), &params,
sizeof(struct kvm_sev_send_start)))
ret = -EFAULT;
@@ -1281,7 +1400,7 @@ __sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
params->hdr_len = data.hdr_len;
params->trans_len = data.trans_len;
- if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
+ if (copy_to_user(u64_to_user_ptr(argp->data), params,
sizeof(struct kvm_sev_send_update_data)))
ret = -EFAULT;
@@ -1301,7 +1420,7 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (!sev_guest(kvm))
return -ENOTTY;
- if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
+ if (copy_from_user(&params, u64_to_user_ptr(argp->data),
sizeof(struct kvm_sev_send_update_data)))
return -EFAULT;
@@ -1352,14 +1471,14 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
goto e_free_trans_data;
/* copy transport buffer to user space */
- if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr,
+ if (copy_to_user(u64_to_user_ptr(params.trans_uaddr),
trans_data, params.trans_len)) {
ret = -EFAULT;
goto e_free_trans_data;
}
/* Copy packet header to userspace. */
- if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr,
+ if (copy_to_user(u64_to_user_ptr(params.hdr_uaddr), hdr,
params.hdr_len))
ret = -EFAULT;
@@ -1411,7 +1530,7 @@ static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
return -ENOTTY;
/* Get parameter from the userspace */
- if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
+ if (copy_from_user(&params, u64_to_user_ptr(argp->data),
sizeof(struct kvm_sev_receive_start)))
return -EFAULT;
@@ -1453,7 +1572,7 @@ static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
}
params.handle = start.handle;
- if (copy_to_user((void __user *)(uintptr_t)argp->data,
+ if (copy_to_user(u64_to_user_ptr(argp->data),
&params, sizeof(struct kvm_sev_receive_start))) {
ret = -EFAULT;
sev_unbind_asid(kvm, start.handle);
@@ -1484,7 +1603,7 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (!sev_guest(kvm))
return -EINVAL;
- if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
+ if (copy_from_user(&params, u64_to_user_ptr(argp->data),
sizeof(struct kvm_sev_receive_update_data)))
return -EFAULT;
@@ -1699,6 +1818,7 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
dst->pages_locked = src->pages_locked;
dst->enc_context_owner = src->enc_context_owner;
dst->es_active = src->es_active;
+ dst->vmsa_features = src->vmsa_features;
src->asid = 0;
src->active = false;
@@ -1806,7 +1926,8 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
if (ret)
goto out_fput;
- if (sev_guest(kvm) || !sev_guest(source_kvm)) {
+ if (kvm->arch.vm_type != source_kvm->arch.vm_type ||
+ sev_guest(kvm) || !sev_guest(source_kvm)) {
ret = -EINVAL;
goto out_unlock;
}
@@ -1855,6 +1976,21 @@ out_fput:
return ret;
}
+int sev_dev_get_attr(u32 group, u64 attr, u64 *val)
+{
+ if (group != KVM_X86_GRP_SEV)
+ return -ENXIO;
+
+ switch (attr) {
+ case KVM_X86_SEV_VMSA_FEATURES:
+ *val = sev_supported_vmsa_features;
+ return 0;
+
+ default:
+ return -ENXIO;
+ }
+}
+
int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
{
struct kvm_sev_cmd sev_cmd;
@@ -1888,6 +2024,9 @@ int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
case KVM_SEV_INIT:
r = sev_guest_init(kvm, &sev_cmd);
break;
+ case KVM_SEV_INIT2:
+ r = sev_guest_init2(kvm, &sev_cmd);
+ break;
case KVM_SEV_LAUNCH_START:
r = sev_launch_start(kvm, &sev_cmd);
break;
@@ -2115,6 +2254,7 @@ int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd)
mirror_sev->asid = source_sev->asid;
mirror_sev->fd = source_sev->fd;
mirror_sev->es_active = source_sev->es_active;
+ mirror_sev->need_init = false;
mirror_sev->handle = source_sev->handle;
INIT_LIST_HEAD(&mirror_sev->regions_list);
INIT_LIST_HEAD(&mirror_sev->mirror_vms);
@@ -2180,15 +2320,18 @@ void sev_vm_destroy(struct kvm *kvm)
void __init sev_set_cpu_caps(void)
{
- if (!sev_enabled)
- kvm_cpu_cap_clear(X86_FEATURE_SEV);
- if (!sev_es_enabled)
- kvm_cpu_cap_clear(X86_FEATURE_SEV_ES);
+ if (sev_enabled) {
+ kvm_cpu_cap_set(X86_FEATURE_SEV);
+ kvm_caps.supported_vm_types |= BIT(KVM_X86_SEV_VM);
+ }
+ if (sev_es_enabled) {
+ kvm_cpu_cap_set(X86_FEATURE_SEV_ES);
+ kvm_caps.supported_vm_types |= BIT(KVM_X86_SEV_ES_VM);
+ }
}
void __init sev_hardware_setup(void)
{
-#ifdef CONFIG_KVM_AMD_SEV
unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count;
bool sev_es_supported = false;
bool sev_supported = false;
@@ -2240,8 +2383,10 @@ void __init sev_hardware_setup(void)
goto out;
}
- sev_asid_count = max_sev_asid - min_sev_asid + 1;
- WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count));
+ if (min_sev_asid <= max_sev_asid) {
+ sev_asid_count = max_sev_asid - min_sev_asid + 1;
+ WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count));
+ }
sev_supported = true;
/* SEV-ES support requested? */
@@ -2272,7 +2417,9 @@ void __init sev_hardware_setup(void)
out:
if (boot_cpu_has(X86_FEATURE_SEV))
pr_info("SEV %s (ASIDs %u - %u)\n",
- sev_supported ? "enabled" : "disabled",
+ sev_supported ? min_sev_asid <= max_sev_asid ? "enabled" :
+ "unusable" :
+ "disabled",
min_sev_asid, max_sev_asid);
if (boot_cpu_has(X86_FEATURE_SEV_ES))
pr_info("SEV-ES %s (ASIDs %u - %u)\n",
@@ -2284,7 +2431,10 @@ out:
if (!sev_es_enabled || !cpu_feature_enabled(X86_FEATURE_DEBUG_SWAP) ||
!cpu_feature_enabled(X86_FEATURE_NO_NESTED_DATA_BP))
sev_es_debug_swap_enabled = false;
-#endif
+
+ sev_supported_vmsa_features = 0;
+ if (sev_es_debug_swap_enabled)
+ sev_supported_vmsa_features |= SVM_SEV_FEAT_DEBUG_SWAP;
}
void sev_hardware_unsetup(void)
@@ -2320,7 +2470,7 @@ int sev_cpu_init(struct svm_cpu_data *sd)
*/
static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
{
- int asid = to_kvm_svm(vcpu->kvm)->sev_info.asid;
+ unsigned int asid = sev_get_asid(vcpu->kvm);
/*
* Note! The address must be a kernel address, as regular page walk
@@ -2575,6 +2725,8 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
case SVM_VMGEXIT_AP_HLT_LOOP:
case SVM_VMGEXIT_AP_JUMP_TABLE:
case SVM_VMGEXIT_UNSUPPORTED_EVENT:
+ case SVM_VMGEXIT_HV_FEATURES:
+ case SVM_VMGEXIT_TERM_REQUEST:
break;
default:
reason = GHCB_ERR_INVALID_EVENT;
@@ -2605,6 +2757,9 @@ vmgexit_err:
void sev_es_unmap_ghcb(struct vcpu_svm *svm)
{
+ /* Clear any indication that the vCPU is in a type of AP Reset Hold */
+ svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_NONE;
+
if (!svm->sev_es.ghcb)
return;
@@ -2638,7 +2793,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
void pre_sev_run(struct vcpu_svm *svm, int cpu)
{
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
- int asid = sev_get_asid(svm->vcpu.kvm);
+ unsigned int asid = sev_get_asid(svm->vcpu.kvm);
/* Assign the asid allocated with this SEV guest */
svm->asid = asid;
@@ -2764,6 +2919,7 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
{
struct vmcb_control_area *control = &svm->vmcb->control;
struct kvm_vcpu *vcpu = &svm->vcpu;
+ struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
u64 ghcb_info;
int ret = 1;
@@ -2774,7 +2930,7 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
switch (ghcb_info) {
case GHCB_MSR_SEV_INFO_REQ:
- set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
+ set_ghcb_msr(svm, GHCB_MSR_SEV_INFO((__u64)sev->ghcb_version,
GHCB_VERSION_MIN,
sev_enc_bit));
break;
@@ -2816,6 +2972,28 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
GHCB_MSR_INFO_POS);
break;
}
+ case GHCB_MSR_AP_RESET_HOLD_REQ:
+ svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_MSR_PROTO;
+ ret = kvm_emulate_ap_reset_hold(&svm->vcpu);
+
+ /*
+ * Preset the result to a non-SIPI return and then only set
+ * the result to non-zero when delivering a SIPI.
+ */
+ set_ghcb_msr_bits(svm, 0,
+ GHCB_MSR_AP_RESET_HOLD_RESULT_MASK,
+ GHCB_MSR_AP_RESET_HOLD_RESULT_POS);
+
+ set_ghcb_msr_bits(svm, GHCB_MSR_AP_RESET_HOLD_RESP,
+ GHCB_MSR_INFO_MASK,
+ GHCB_MSR_INFO_POS);
+ break;
+ case GHCB_MSR_HV_FT_REQ:
+ set_ghcb_msr_bits(svm, GHCB_HV_FT_SUPPORTED,
+ GHCB_MSR_HV_FT_MASK, GHCB_MSR_HV_FT_POS);
+ set_ghcb_msr_bits(svm, GHCB_MSR_HV_FT_RESP,
+ GHCB_MSR_INFO_MASK, GHCB_MSR_INFO_POS);
+ break;
case GHCB_MSR_TERM_REQ: {
u64 reason_set, reason_code;
@@ -2915,6 +3093,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
ret = 1;
break;
case SVM_VMGEXIT_AP_HLT_LOOP:
+ svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_NAE_EVENT;
ret = kvm_emulate_ap_reset_hold(vcpu);
break;
case SVM_VMGEXIT_AP_JUMP_TABLE: {
@@ -2939,6 +3118,19 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
ret = 1;
break;
}
+ case SVM_VMGEXIT_HV_FEATURES:
+ ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_HV_FT_SUPPORTED);
+
+ ret = 1;
+ break;
+ case SVM_VMGEXIT_TERM_REQUEST:
+ pr_info("SEV-ES guest requested termination: reason %#llx info %#llx\n",
+ control->exit_info_1, control->exit_info_2);
+ vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
+ vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SEV_TERM;
+ vcpu->run->system_event.ndata = 1;
+ vcpu->run->system_event.data[0] = control->ghcb_gpa;
+ break;
case SVM_VMGEXIT_UNSUPPORTED_EVENT:
vcpu_unimpl(vcpu,
"vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
@@ -3053,7 +3245,7 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
svm_set_intercept(svm, TRAP_CR8_WRITE);
vmcb->control.intercepts[INTERCEPT_DR] = 0;
- if (!sev_es_debug_swap_enabled) {
+ if (!sev_vcpu_has_debug_swap(svm)) {
vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
recalc_intercepts(svm);
@@ -3099,16 +3291,19 @@ void sev_init_vmcb(struct vcpu_svm *svm)
void sev_es_vcpu_reset(struct vcpu_svm *svm)
{
+ struct kvm_vcpu *vcpu = &svm->vcpu;
+ struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
+
/*
* Set the GHCB MSR value as per the GHCB specification when emulating
* vCPU RESET for an SEV-ES guest.
*/
- set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
+ set_ghcb_msr(svm, GHCB_MSR_SEV_INFO((__u64)sev->ghcb_version,
GHCB_VERSION_MIN,
sev_enc_bit));
}
-void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa)
+void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa)
{
/*
* All host state for SEV-ES guests is categorized into three swap types
@@ -3136,7 +3331,7 @@ void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa)
* the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU both
* saves and loads debug registers (Type-A).
*/
- if (sev_es_debug_swap_enabled) {
+ if (sev_vcpu_has_debug_swap(svm)) {
hostsa->dr0 = native_get_debugreg(0);
hostsa->dr1 = native_get_debugreg(1);
hostsa->dr2 = native_get_debugreg(2);
@@ -3158,15 +3353,31 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
return;
}
- /*
- * Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where
- * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
- * non-zero value.
- */
- if (!svm->sev_es.ghcb)
- return;
+ /* Subsequent SIPI */
+ switch (svm->sev_es.ap_reset_hold_type) {
+ case AP_RESET_HOLD_NAE_EVENT:
+ /*
+ * Return from an AP Reset Hold VMGEXIT, where the guest will
+ * set the CS and RIP. Set SW_EXIT_INFO_2 to a non-zero value.
+ */
+ ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);
+ break;
+ case AP_RESET_HOLD_MSR_PROTO:
+ /*
+ * Return from an AP Reset Hold VMGEXIT, where the guest will
+ * set the CS and RIP. Set GHCB data field to a non-zero value.
+ */
+ set_ghcb_msr_bits(svm, 1,
+ GHCB_MSR_AP_RESET_HOLD_RESULT_MASK,
+ GHCB_MSR_AP_RESET_HOLD_RESULT_POS);
- ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);
+ set_ghcb_msr_bits(svm, GHCB_MSR_AP_RESET_HOLD_RESP,
+ GHCB_MSR_INFO_MASK,
+ GHCB_MSR_INFO_POS);
+ break;
+ default:
+ break;
+ }
}
struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu)
@@ -3174,7 +3385,7 @@ struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu)
unsigned long pfn;
struct page *p;
- if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
/*
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index d1a9f9951635..c8dc25886c16 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1433,14 +1433,6 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu)
vmsa_page = snp_safe_alloc_page(vcpu);
if (!vmsa_page)
goto error_free_vmcb_page;
-
- /*
- * SEV-ES guests maintain an encrypted version of their FPU
- * state which is restored and saved on VMRUN and VMEXIT.
- * Mark vcpu->arch.guest_fpu->fpstate as scratch so it won't
- * do xsave/xrstor on it.
- */
- fpstate_set_confidential(&vcpu->arch.guest_fpu);
}
err = avic_init_vcpu(svm);
@@ -1503,6 +1495,11 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
__free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
}
+static struct sev_es_save_area *sev_es_host_save_area(struct svm_cpu_data *sd)
+{
+ return page_address(sd->save_area) + 0x400;
+}
+
static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1519,12 +1516,8 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
* or subsequent vmload of host save area.
*/
vmsave(sd->save_area_pa);
- if (sev_es_guest(vcpu->kvm)) {
- struct sev_es_save_area *hostsa;
- hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
-
- sev_es_prepare_switch_to_guest(hostsa);
- }
+ if (sev_es_guest(vcpu->kvm))
+ sev_es_prepare_switch_to_guest(svm, sev_es_host_save_area(sd));
if (tsc_scaling)
__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
@@ -2055,6 +2048,15 @@ static int npf_interception(struct kvm_vcpu *vcpu)
u64 fault_address = svm->vmcb->control.exit_info_2;
u64 error_code = svm->vmcb->control.exit_info_1;
+ /*
+ * WARN if hardware generates a fault with an error code that collides
+ * with KVM-defined sythentic flags. Clear the flags and continue on,
+ * i.e. don't terminate the VM, as KVM can't possibly be relying on a
+ * flag that KVM doesn't know about.
+ */
+ if (WARN_ON_ONCE(error_code & PFERR_SYNTHETIC_MASK))
+ error_code &= ~PFERR_SYNTHETIC_MASK;
+
trace_kvm_page_fault(vcpu, fault_address, error_code);
return kvm_mmu_page_fault(vcpu, fault_address, error_code,
static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
@@ -3303,7 +3305,9 @@ static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[SVM_EXIT_RSM] = rsm_interception,
[SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception,
[SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception,
+#ifdef CONFIG_KVM_AMD_SEV
[SVM_EXIT_VMGEXIT] = sev_handle_vmgexit,
+#endif
};
static void dump_vmcb(struct kvm_vcpu *vcpu)
@@ -4084,6 +4088,9 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu)
static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu)
{
+ if (to_kvm_sev_info(vcpu->kvm)->need_init)
+ return -EINVAL;
+
return 1;
}
@@ -4101,6 +4108,7 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_intercepted)
{
+ struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
struct vcpu_svm *svm = to_svm(vcpu);
guest_state_enter_irqoff();
@@ -4108,7 +4116,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
amd_clear_divider();
if (sev_es_guest(vcpu->kvm))
- __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted);
+ __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted,
+ sev_es_host_save_area(sd));
else
__svm_vcpu_run(svm, spec_ctrl_intercepted);
@@ -4889,6 +4898,14 @@ static void svm_vm_destroy(struct kvm *kvm)
static int svm_vm_init(struct kvm *kvm)
{
+ int type = kvm->arch.vm_type;
+
+ if (type != KVM_X86_DEFAULT_VM &&
+ type != KVM_X86_SW_PROTECTED_VM) {
+ kvm->arch.has_protected_state = (type == KVM_X86_SEV_ES_VM);
+ to_kvm_sev_info(kvm)->need_init = true;
+ }
+
if (!pause_filter_count || !pause_filter_thresh)
kvm->arch.pause_in_guest = true;
@@ -5023,6 +5040,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.enable_smi_window = svm_enable_smi_window,
#endif
+#ifdef CONFIG_KVM_AMD_SEV
+ .dev_get_attr = sev_dev_get_attr,
.mem_enc_ioctl = sev_mem_enc_ioctl,
.mem_enc_register_region = sev_mem_enc_register_region,
.mem_enc_unregister_region = sev_mem_enc_unregister_region,
@@ -5030,7 +5049,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.vm_copy_enc_context_from = sev_vm_copy_enc_context_from,
.vm_move_enc_context_from = sev_vm_move_enc_context_from,
-
+#endif
.check_emulate_instruction = svm_check_emulate_instruction,
.apic_init_signal_blocked = svm_apic_init_signal_blocked,
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 7f1fbd874c45..be57213cd295 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -79,12 +79,15 @@ enum {
struct kvm_sev_info {
bool active; /* SEV enabled guest */
bool es_active; /* SEV-ES enabled guest */
+ bool need_init; /* waiting for SEV_INIT2 */
unsigned int asid; /* ASID used for this guest */
unsigned int handle; /* SEV firmware handle */
int fd; /* SEV device fd */
unsigned long pages_locked; /* Number of pages locked */
struct list_head regions_list; /* List of registered regions */
u64 ap_jump_table; /* SEV-ES AP Jump Table address */
+ u64 vmsa_features;
+ u16 ghcb_version; /* Highest guest GHCB protocol version allowed */
struct kvm *enc_context_owner; /* Owner of copied encryption context */
struct list_head mirror_vms; /* List of VMs mirroring */
struct list_head mirror_entry; /* Use as a list entry of mirrors */
@@ -197,6 +200,7 @@ struct vcpu_sev_es_state {
u8 valid_bitmap[16];
struct kvm_host_map ghcb_map;
bool received_first_sipi;
+ unsigned int ap_reset_hold_type;
/* SEV-ES scratch area support */
u64 sw_scratch;
@@ -318,6 +322,11 @@ static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
return container_of(kvm, struct kvm_svm, kvm);
}
+static __always_inline struct kvm_sev_info *to_kvm_sev_info(struct kvm *kvm)
+{
+ return &to_kvm_svm(kvm)->sev_info;
+}
+
static __always_inline bool sev_guest(struct kvm *kvm)
{
#ifdef CONFIG_KVM_AMD_SEV
@@ -664,13 +673,16 @@ void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu);
/* sev.c */
-#define GHCB_VERSION_MAX 1ULL
-#define GHCB_VERSION_MIN 1ULL
-
-
-extern unsigned int max_sev_asid;
+void pre_sev_run(struct vcpu_svm *svm, int cpu);
+void sev_init_vmcb(struct vcpu_svm *svm);
+void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm);
+int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
+void sev_es_vcpu_reset(struct vcpu_svm *svm);
+void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
+void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa);
+void sev_es_unmap_ghcb(struct vcpu_svm *svm);
-void sev_vm_destroy(struct kvm *kvm);
+#ifdef CONFIG_KVM_AMD_SEV
int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp);
int sev_mem_enc_register_region(struct kvm *kvm,
struct kvm_enc_region *range);
@@ -679,26 +691,37 @@ int sev_mem_enc_unregister_region(struct kvm *kvm,
int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd);
int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd);
void sev_guest_memory_reclaimed(struct kvm *kvm);
+int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
-void pre_sev_run(struct vcpu_svm *svm, int cpu);
+/* These symbols are used in common code and are stubbed below. */
+struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu);
+void sev_free_vcpu(struct kvm_vcpu *vcpu);
+void sev_vm_destroy(struct kvm *kvm);
void __init sev_set_cpu_caps(void);
void __init sev_hardware_setup(void);
void sev_hardware_unsetup(void);
int sev_cpu_init(struct svm_cpu_data *sd);
-void sev_init_vmcb(struct vcpu_svm *svm);
-void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm);
-void sev_free_vcpu(struct kvm_vcpu *vcpu);
-int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
-int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
-void sev_es_vcpu_reset(struct vcpu_svm *svm);
-void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
-void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa);
-void sev_es_unmap_ghcb(struct vcpu_svm *svm);
-struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu);
+int sev_dev_get_attr(u32 group, u64 attr, u64 *val);
+extern unsigned int max_sev_asid;
+#else
+static inline struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu) {
+ return alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+}
+
+static inline void sev_free_vcpu(struct kvm_vcpu *vcpu) {}
+static inline void sev_vm_destroy(struct kvm *kvm) {}
+static inline void __init sev_set_cpu_caps(void) {}
+static inline void __init sev_hardware_setup(void) {}
+static inline void sev_hardware_unsetup(void) {}
+static inline int sev_cpu_init(struct svm_cpu_data *sd) { return 0; }
+static inline int sev_dev_get_attr(u32 group, u64 attr, u64 *val) { return -ENXIO; }
+#define max_sev_asid 0
+#endif
/* vmenter.S */
-void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
+void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted,
+ struct sev_es_save_area *hostsa);
void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
#define DEFINE_KVM_GHCB_ACCESSORS(field) \
diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
index 187018c424bf..a0c8eb37d3e1 100644
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -3,6 +3,7 @@
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/bitsperlong.h>
+#include <asm/frame.h>
#include <asm/kvm_vcpu_regs.h>
#include <asm/nospec-branch.h>
#include "kvm-asm-offsets.h"
@@ -67,7 +68,7 @@
"", X86_FEATURE_V_SPEC_CTRL
901:
.endm
-.macro RESTORE_HOST_SPEC_CTRL_BODY
+.macro RESTORE_HOST_SPEC_CTRL_BODY spec_ctrl_intercepted:req
900:
/* Same for after vmexit. */
mov $MSR_IA32_SPEC_CTRL, %ecx
@@ -76,7 +77,7 @@
* Load the value that the guest had written into MSR_IA32_SPEC_CTRL,
* if it was not intercepted during guest execution.
*/
- cmpb $0, (%_ASM_SP)
+ cmpb $0, \spec_ctrl_intercepted
jnz 998f
rdmsr
movl %eax, SVM_spec_ctrl(%_ASM_DI)
@@ -99,6 +100,7 @@
*/
SYM_FUNC_START(__svm_vcpu_run)
push %_ASM_BP
+ mov %_ASM_SP, %_ASM_BP
#ifdef CONFIG_X86_64
push %r15
push %r14
@@ -268,7 +270,7 @@ SYM_FUNC_START(__svm_vcpu_run)
RET
RESTORE_GUEST_SPEC_CTRL_BODY
- RESTORE_HOST_SPEC_CTRL_BODY
+ RESTORE_HOST_SPEC_CTRL_BODY (%_ASM_SP)
10: cmpb $0, _ASM_RIP(kvm_rebooting)
jne 2b
@@ -290,66 +292,68 @@ SYM_FUNC_START(__svm_vcpu_run)
SYM_FUNC_END(__svm_vcpu_run)
+#ifdef CONFIG_KVM_AMD_SEV
+
+
+#ifdef CONFIG_X86_64
+#define SEV_ES_GPRS_BASE 0x300
+#define SEV_ES_RBX (SEV_ES_GPRS_BASE + __VCPU_REGS_RBX * WORD_SIZE)
+#define SEV_ES_RBP (SEV_ES_GPRS_BASE + __VCPU_REGS_RBP * WORD_SIZE)
+#define SEV_ES_RSI (SEV_ES_GPRS_BASE + __VCPU_REGS_RSI * WORD_SIZE)
+#define SEV_ES_RDI (SEV_ES_GPRS_BASE + __VCPU_REGS_RDI * WORD_SIZE)
+#define SEV_ES_R12 (SEV_ES_GPRS_BASE + __VCPU_REGS_R12 * WORD_SIZE)
+#define SEV_ES_R13 (SEV_ES_GPRS_BASE + __VCPU_REGS_R13 * WORD_SIZE)
+#define SEV_ES_R14 (SEV_ES_GPRS_BASE + __VCPU_REGS_R14 * WORD_SIZE)
+#define SEV_ES_R15 (SEV_ES_GPRS_BASE + __VCPU_REGS_R15 * WORD_SIZE)
+#endif
+
/**
* __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
* @svm: struct vcpu_svm *
* @spec_ctrl_intercepted: bool
*/
SYM_FUNC_START(__svm_sev_es_vcpu_run)
- push %_ASM_BP
-#ifdef CONFIG_X86_64
- push %r15
- push %r14
- push %r13
- push %r12
-#else
- push %edi
- push %esi
-#endif
- push %_ASM_BX
+ FRAME_BEGIN
/*
- * Save variables needed after vmexit on the stack, in inverse
- * order compared to when they are needed.
+ * Save non-volatile (callee-saved) registers to the host save area.
+ * Except for RAX and RSP, all GPRs are restored on #VMEXIT, but not
+ * saved on VMRUN.
*/
+ mov %rbp, SEV_ES_RBP (%rdx)
+ mov %r15, SEV_ES_R15 (%rdx)
+ mov %r14, SEV_ES_R14 (%rdx)
+ mov %r13, SEV_ES_R13 (%rdx)
+ mov %r12, SEV_ES_R12 (%rdx)
+ mov %rbx, SEV_ES_RBX (%rdx)
- /* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
- push %_ASM_ARG2
-
- /* Save @svm. */
- push %_ASM_ARG1
-
-.ifnc _ASM_ARG1, _ASM_DI
/*
- * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
- * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
+ * Save volatile registers that hold arguments that are needed after
+ * #VMEXIT (RDI=@svm and RSI=@spec_ctrl_intercepted).
*/
- mov %_ASM_ARG1, %_ASM_DI
-.endif
+ mov %rdi, SEV_ES_RDI (%rdx)
+ mov %rsi, SEV_ES_RSI (%rdx)
- /* Clobbers RAX, RCX, RDX. */
+ /* Clobbers RAX, RCX, RDX (@hostsa). */
RESTORE_GUEST_SPEC_CTRL
/* Get svm->current_vmcb->pa into RAX. */
- mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
- mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
+ mov SVM_current_vmcb(%rdi), %rax
+ mov KVM_VMCB_pa(%rax), %rax
/* Enter guest mode */
sti
-1: vmrun %_ASM_AX
+1: vmrun %rax
2: cli
- /* Pop @svm to RDI, guest registers have been saved already. */
- pop %_ASM_DI
-
#ifdef CONFIG_MITIGATION_RETPOLINE
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
- FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
+ FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
#endif
- /* Clobbers RAX, RCX, RDX. */
+ /* Clobbers RAX, RCX, RDX, consumes RDI (@svm) and RSI (@spec_ctrl_intercepted). */
RESTORE_HOST_SPEC_CTRL
/*
@@ -361,30 +365,17 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
*/
UNTRAIN_RET_VM
- /* "Pop" @spec_ctrl_intercepted. */
- pop %_ASM_BX
-
- pop %_ASM_BX
-
-#ifdef CONFIG_X86_64
- pop %r12
- pop %r13
- pop %r14
- pop %r15
-#else
- pop %esi
- pop %edi
-#endif
- pop %_ASM_BP
+ FRAME_END
RET
RESTORE_GUEST_SPEC_CTRL_BODY
- RESTORE_HOST_SPEC_CTRL_BODY
+ RESTORE_HOST_SPEC_CTRL_BODY %sil
-3: cmpb $0, _ASM_RIP(kvm_rebooting)
+3: cmpb $0, kvm_rebooting(%rip)
jne 2b
ud2
_ASM_EXTABLE(1b, 3b)
SYM_FUNC_END(__svm_sev_es_vcpu_run)
+#endif /* CONFIG_KVM_AMD_SEV */
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 88659de4d2a7..9d0b02ef307e 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -735,13 +735,13 @@ TRACE_EVENT(kvm_nested_intr_vmexit,
* Tracepoint for nested #vmexit because of interrupt pending
*/
TRACE_EVENT(kvm_invlpga,
- TP_PROTO(__u64 rip, int asid, u64 address),
+ TP_PROTO(__u64 rip, unsigned int asid, u64 address),
TP_ARGS(rip, asid, address),
TP_STRUCT__entry(
- __field( __u64, rip )
- __field( int, asid )
- __field( __u64, address )
+ __field( __u64, rip )
+ __field( unsigned int, asid )
+ __field( __u64, address )
),
TP_fast_assign(
@@ -750,7 +750,7 @@ TRACE_EVENT(kvm_invlpga,
__entry->address = address;
),
- TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
+ TP_printk("rip: 0x%016llx asid: %u address: 0x%016llx",
__entry->rip, __entry->asid, __entry->address)
);
@@ -1074,7 +1074,7 @@ TRACE_EVENT(kvm_smm_transition,
);
/*
- * Tracepoint for VT-d posted-interrupts.
+ * Tracepoint for VT-d posted-interrupts and AMD-Vi Guest Virtual APIC.
*/
TRACE_EVENT(kvm_pi_irte_update,
TP_PROTO(unsigned int host_irq, unsigned int vcpu_id,
@@ -1100,7 +1100,7 @@ TRACE_EVENT(kvm_pi_irte_update,
__entry->set = set;
),
- TP_printk("VT-d PI is %s for irq %u, vcpu %u, gsi: 0x%x, "
+ TP_printk("PI is %s for irq %u, vcpu %u, gsi: 0x%x, "
"gvec: 0x%x, pi_desc_addr: 0x%llx",
__entry->set ? "enabled and being updated" : "disabled",
__entry->host_irq,
diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
new file mode 100644
index 000000000000..d4ed681785fd
--- /dev/null
+++ b/arch/x86/kvm/vmx/main.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/moduleparam.h>
+
+#include "x86_ops.h"
+#include "vmx.h"
+#include "nested.h"
+#include "pmu.h"
+#include "posted_intr.h"
+
+#define VMX_REQUIRED_APICV_INHIBITS \
+ (BIT(APICV_INHIBIT_REASON_DISABLE)| \
+ BIT(APICV_INHIBIT_REASON_ABSENT) | \
+ BIT(APICV_INHIBIT_REASON_HYPERV) | \
+ BIT(APICV_INHIBIT_REASON_BLOCKIRQ) | \
+ BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED) | \
+ BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) | \
+ BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED))
+
+struct kvm_x86_ops vt_x86_ops __initdata = {
+ .name = KBUILD_MODNAME,
+
+ .check_processor_compatibility = vmx_check_processor_compat,
+
+ .hardware_unsetup = vmx_hardware_unsetup,
+
+ .hardware_enable = vmx_hardware_enable,
+ .hardware_disable = vmx_hardware_disable,
+ .has_emulated_msr = vmx_has_emulated_msr,
+
+ .vm_size = sizeof(struct kvm_vmx),
+ .vm_init = vmx_vm_init,
+ .vm_destroy = vmx_vm_destroy,
+
+ .vcpu_precreate = vmx_vcpu_precreate,
+ .vcpu_create = vmx_vcpu_create,
+ .vcpu_free = vmx_vcpu_free,
+ .vcpu_reset = vmx_vcpu_reset,
+
+ .prepare_switch_to_guest = vmx_prepare_switch_to_guest,
+ .vcpu_load = vmx_vcpu_load,
+ .vcpu_put = vmx_vcpu_put,
+
+ .update_exception_bitmap = vmx_update_exception_bitmap,
+ .get_msr_feature = vmx_get_msr_feature,
+ .get_msr = vmx_get_msr,
+ .set_msr = vmx_set_msr,
+ .get_segment_base = vmx_get_segment_base,
+ .get_segment = vmx_get_segment,
+ .set_segment = vmx_set_segment,
+ .get_cpl = vmx_get_cpl,
+ .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
+ .is_valid_cr0 = vmx_is_valid_cr0,
+ .set_cr0 = vmx_set_cr0,
+ .is_valid_cr4 = vmx_is_valid_cr4,
+ .set_cr4 = vmx_set_cr4,
+ .set_efer = vmx_set_efer,
+ .get_idt = vmx_get_idt,
+ .set_idt = vmx_set_idt,
+ .get_gdt = vmx_get_gdt,
+ .set_gdt = vmx_set_gdt,
+ .set_dr7 = vmx_set_dr7,
+ .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
+ .cache_reg = vmx_cache_reg,
+ .get_rflags = vmx_get_rflags,
+ .set_rflags = vmx_set_rflags,
+ .get_if_flag = vmx_get_if_flag,
+
+ .flush_tlb_all = vmx_flush_tlb_all,
+ .flush_tlb_current = vmx_flush_tlb_current,
+ .flush_tlb_gva = vmx_flush_tlb_gva,
+ .flush_tlb_guest = vmx_flush_tlb_guest,
+
+ .vcpu_pre_run = vmx_vcpu_pre_run,
+ .vcpu_run = vmx_vcpu_run,
+ .handle_exit = vmx_handle_exit,
+ .skip_emulated_instruction = vmx_skip_emulated_instruction,
+ .update_emulated_instruction = vmx_update_emulated_instruction,
+ .set_interrupt_shadow = vmx_set_interrupt_shadow,
+ .get_interrupt_shadow = vmx_get_interrupt_shadow,
+ .patch_hypercall = vmx_patch_hypercall,
+ .inject_irq = vmx_inject_irq,
+ .inject_nmi = vmx_inject_nmi,
+ .inject_exception = vmx_inject_exception,
+ .cancel_injection = vmx_cancel_injection,
+ .interrupt_allowed = vmx_interrupt_allowed,
+ .nmi_allowed = vmx_nmi_allowed,
+ .get_nmi_mask = vmx_get_nmi_mask,
+ .set_nmi_mask = vmx_set_nmi_mask,
+ .enable_nmi_window = vmx_enable_nmi_window,
+ .enable_irq_window = vmx_enable_irq_window,
+ .update_cr8_intercept = vmx_update_cr8_intercept,
+ .set_virtual_apic_mode = vmx_set_virtual_apic_mode,
+ .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
+ .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
+ .load_eoi_exitmap = vmx_load_eoi_exitmap,
+ .apicv_pre_state_restore = vmx_apicv_pre_state_restore,
+ .required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS,
+ .hwapic_irr_update = vmx_hwapic_irr_update,
+ .hwapic_isr_update = vmx_hwapic_isr_update,
+ .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
+ .sync_pir_to_irr = vmx_sync_pir_to_irr,
+ .deliver_interrupt = vmx_deliver_interrupt,
+ .dy_apicv_has_pending_interrupt = pi_has_pending_interrupt,
+
+ .set_tss_addr = vmx_set_tss_addr,
+ .set_identity_map_addr = vmx_set_identity_map_addr,
+ .get_mt_mask = vmx_get_mt_mask,
+
+ .get_exit_info = vmx_get_exit_info,
+
+ .vcpu_after_set_cpuid = vmx_vcpu_after_set_cpuid,
+
+ .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
+
+ .get_l2_tsc_offset = vmx_get_l2_tsc_offset,
+ .get_l2_tsc_multiplier = vmx_get_l2_tsc_multiplier,
+ .write_tsc_offset = vmx_write_tsc_offset,
+ .write_tsc_multiplier = vmx_write_tsc_multiplier,
+
+ .load_mmu_pgd = vmx_load_mmu_pgd,
+
+ .check_intercept = vmx_check_intercept,
+ .handle_exit_irqoff = vmx_handle_exit_irqoff,
+
+ .sched_in = vmx_sched_in,
+
+ .cpu_dirty_log_size = PML_ENTITY_NUM,
+ .update_cpu_dirty_logging = vmx_update_cpu_dirty_logging,
+
+ .nested_ops = &vmx_nested_ops,
+
+ .pi_update_irte = vmx_pi_update_irte,
+ .pi_start_assignment = vmx_pi_start_assignment,
+
+#ifdef CONFIG_X86_64
+ .set_hv_timer = vmx_set_hv_timer,
+ .cancel_hv_timer = vmx_cancel_hv_timer,
+#endif
+
+ .setup_mce = vmx_setup_mce,
+
+#ifdef CONFIG_KVM_SMM
+ .smi_allowed = vmx_smi_allowed,
+ .enter_smm = vmx_enter_smm,
+ .leave_smm = vmx_leave_smm,
+ .enable_smi_window = vmx_enable_smi_window,
+#endif
+
+ .check_emulate_instruction = vmx_check_emulate_instruction,
+ .apic_init_signal_blocked = vmx_apic_init_signal_blocked,
+ .migrate_timers = vmx_migrate_timers,
+
+ .msr_filter_changed = vmx_msr_filter_changed,
+ .complete_emulated_msr = kvm_complete_insn_gp,
+
+ .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
+
+ .get_untagged_addr = vmx_get_untagged_addr,
+};
+
+struct kvm_x86_init_ops vt_init_ops __initdata = {
+ .hardware_setup = vmx_hardware_setup,
+ .handle_intel_pt_intr = NULL,
+
+ .runtime_ops = &vt_x86_ops,
+ .pmu_ops = &intel_pmu_ops,
+};
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index d05ddf751491..d5b832126e34 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -409,18 +409,40 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long exit_qualification;
u32 vm_exit_reason;
- unsigned long exit_qualification = vcpu->arch.exit_qualification;
if (vmx->nested.pml_full) {
vm_exit_reason = EXIT_REASON_PML_FULL;
vmx->nested.pml_full = false;
- exit_qualification &= INTR_INFO_UNBLOCK_NMI;
+
+ /*
+ * It should be impossible to trigger a nested PML Full VM-Exit
+ * for anything other than an EPT Violation from L2. KVM *can*
+ * trigger nEPT page fault injection in response to an EPT
+ * Misconfig, e.g. if the MMIO SPTE was stale and L1's EPT
+ * tables also changed, but KVM should not treat EPT Misconfig
+ * VM-Exits as writes.
+ */
+ WARN_ON_ONCE(vmx->exit_reason.basic != EXIT_REASON_EPT_VIOLATION);
+
+ /*
+ * PML Full and EPT Violation VM-Exits both use bit 12 to report
+ * "NMI unblocking due to IRET", i.e. the bit can be propagated
+ * as-is from the original EXIT_QUALIFICATION.
+ */
+ exit_qualification = vmx_get_exit_qual(vcpu) & INTR_INFO_UNBLOCK_NMI;
} else {
- if (fault->error_code & PFERR_RSVD_MASK)
+ if (fault->error_code & PFERR_RSVD_MASK) {
vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
- else
+ exit_qualification = 0;
+ } else {
+ exit_qualification = fault->exit_qualification;
+ exit_qualification |= vmx_get_exit_qual(vcpu) &
+ (EPT_VIOLATION_GVA_IS_VALID |
+ EPT_VIOLATION_GVA_TRANSLATED);
vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
+ }
/*
* Although the caller (kvm_inject_emulated_page_fault) would
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 12ade343a17e..be40474de6e4 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -535,7 +535,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
perf_capabilities = vcpu_get_perf_capabilities(vcpu);
if (cpuid_model_is_consistent(vcpu) &&
(perf_capabilities & PMU_CAP_LBR_FMT))
- x86_perf_get_lbr(&lbr_desc->records);
+ memcpy(&lbr_desc->records, &vmx_lbr_caps, sizeof(vmx_lbr_caps));
else
lbr_desc->records.nr = 0;
diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c
index af662312fd07..ec08fa3caf43 100644
--- a/arch/x86/kvm/vmx/posted_intr.c
+++ b/arch/x86/kvm/vmx/posted_intr.c
@@ -107,7 +107,7 @@ void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
* handle task migration (@cpu != vcpu->cpu).
*/
new.ndst = dest;
- new.sn = 0;
+ __pi_clear_sn(&new);
/*
* Restore the notification vector; in the blocking case, the
@@ -157,7 +157,7 @@ static void pi_enable_wakeup_handler(struct kvm_vcpu *vcpu)
&per_cpu(wakeup_vcpus_on_cpu, vcpu->cpu));
raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
- WARN(pi_desc->sn, "PI descriptor SN field set before blocking");
+ WARN(pi_test_sn(pi_desc), "PI descriptor SN field set before blocking");
old.control = READ_ONCE(pi_desc->control);
do {
diff --git a/arch/x86/kvm/vmx/posted_intr.h b/arch/x86/kvm/vmx/posted_intr.h
index 26992076552e..6b2a0226257e 100644
--- a/arch/x86/kvm/vmx/posted_intr.h
+++ b/arch/x86/kvm/vmx/posted_intr.h
@@ -1,98 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __KVM_X86_VMX_POSTED_INTR_H
#define __KVM_X86_VMX_POSTED_INTR_H
-
-#define POSTED_INTR_ON 0
-#define POSTED_INTR_SN 1
-
-#define PID_TABLE_ENTRY_VALID 1
-
-/* Posted-Interrupt Descriptor */
-struct pi_desc {
- u32 pir[8]; /* Posted interrupt requested */
- union {
- struct {
- /* bit 256 - Outstanding Notification */
- u16 on : 1,
- /* bit 257 - Suppress Notification */
- sn : 1,
- /* bit 271:258 - Reserved */
- rsvd_1 : 14;
- /* bit 279:272 - Notification Vector */
- u8 nv;
- /* bit 287:280 - Reserved */
- u8 rsvd_2;
- /* bit 319:288 - Notification Destination */
- u32 ndst;
- };
- u64 control;
- };
- u32 rsvd[6];
-} __aligned(64);
-
-static inline bool pi_test_and_set_on(struct pi_desc *pi_desc)
-{
- return test_and_set_bit(POSTED_INTR_ON,
- (unsigned long *)&pi_desc->control);
-}
-
-static inline bool pi_test_and_clear_on(struct pi_desc *pi_desc)
-{
- return test_and_clear_bit(POSTED_INTR_ON,
- (unsigned long *)&pi_desc->control);
-}
-
-static inline bool pi_test_and_clear_sn(struct pi_desc *pi_desc)
-{
- return test_and_clear_bit(POSTED_INTR_SN,
- (unsigned long *)&pi_desc->control);
-}
-
-static inline bool pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
-{
- return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
-}
-
-static inline bool pi_is_pir_empty(struct pi_desc *pi_desc)
-{
- return bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS);
-}
-
-static inline void pi_set_sn(struct pi_desc *pi_desc)
-{
- set_bit(POSTED_INTR_SN,
- (unsigned long *)&pi_desc->control);
-}
-
-static inline void pi_set_on(struct pi_desc *pi_desc)
-{
- set_bit(POSTED_INTR_ON,
- (unsigned long *)&pi_desc->control);
-}
-
-static inline void pi_clear_on(struct pi_desc *pi_desc)
-{
- clear_bit(POSTED_INTR_ON,
- (unsigned long *)&pi_desc->control);
-}
-
-static inline void pi_clear_sn(struct pi_desc *pi_desc)
-{
- clear_bit(POSTED_INTR_SN,
- (unsigned long *)&pi_desc->control);
-}
-
-static inline bool pi_test_on(struct pi_desc *pi_desc)
-{
- return test_bit(POSTED_INTR_ON,
- (unsigned long *)&pi_desc->control);
-}
-
-static inline bool pi_test_sn(struct pi_desc *pi_desc)
-{
- return test_bit(POSTED_INTR_SN,
- (unsigned long *)&pi_desc->control);
-}
+#include <asm/posted_intr.h>
void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu);
void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/vmx/vmcs.h b/arch/x86/kvm/vmx/vmcs.h
index 7c1996b433e2..b25625314658 100644
--- a/arch/x86/kvm/vmx/vmcs.h
+++ b/arch/x86/kvm/vmx/vmcs.h
@@ -140,6 +140,11 @@ static inline bool is_nm_fault(u32 intr_info)
return is_exception_n(intr_info, NM_VECTOR);
}
+static inline bool is_ve_fault(u32 intr_info)
+{
+ return is_exception_n(intr_info, VE_VECTOR);
+}
+
/* Undocumented: icebp/int1 */
static inline bool is_icebp(u32 intr_info)
{
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 2bfbf758d061..f6986dee6f8c 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -275,6 +275,8 @@ SYM_INNER_LABEL_ALIGN(vmx_vmexit, SYM_L_GLOBAL)
call vmx_spec_ctrl_restore_host
+ CLEAR_BRANCH_HISTORY_VMEXIT
+
/* Put return value in AX */
mov %_ASM_BX, %_ASM_AX
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index c37a89eda90f..6051fad5945f 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -68,8 +68,10 @@
#include "vmcs12.h"
#include "vmx.h"
#include "x86.h"
+#include "x86_ops.h"
#include "smm.h"
#include "vmx_onhyperv.h"
+#include "posted_intr.h"
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
@@ -218,6 +220,8 @@ module_param(ple_window_max, uint, 0444);
int __read_mostly pt_mode = PT_MODE_SYSTEM;
module_param(pt_mode, int, S_IRUGO);
+struct x86_pmu_lbr __ro_after_init vmx_lbr_caps;
+
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
static DEFINE_MUTEX(vmx_l1d_flush_mutex);
@@ -528,8 +532,6 @@ static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
static unsigned long host_idt_base;
#if IS_ENABLED(CONFIG_HYPERV)
-static struct kvm_x86_ops vmx_x86_ops __initdata;
-
static bool __read_mostly enlightened_vmcs = true;
module_param(enlightened_vmcs, bool, 0444);
@@ -579,9 +581,8 @@ static __init void hv_init_evmcs(void)
}
if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH)
- vmx_x86_ops.enable_l2_tlb_flush
+ vt_x86_ops.enable_l2_tlb_flush
= hv_enable_l2_tlb_flush;
-
} else {
enlightened_vmcs = false;
}
@@ -872,6 +873,12 @@ void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
(1u << DB_VECTOR) | (1u << AC_VECTOR);
/*
+ * #VE isn't used for VMX. To test against unexpected changes
+ * related to #VE for VMX, intercept unexpected #VE and warn on it.
+ */
+ if (IS_ENABLED(CONFIG_KVM_INTEL_PROVE_VE))
+ eb |= 1u << VE_VECTOR;
+ /*
* Guest access to VMware backdoor ports could legitimately
* trigger #GP because of TSS I/O permission bitmap.
* We intercept those #GP and allow access to them anyway
@@ -1475,7 +1482,7 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
* Switches to specified vcpu, until a matching vcpu_put(), but assumes
* vcpu mutex is already taken.
*/
-static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -1486,7 +1493,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vmx->host_debugctlmsr = get_debugctlmsr();
}
-static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
+void vmx_vcpu_put(struct kvm_vcpu *vcpu)
{
vmx_vcpu_pi_put(vcpu);
@@ -1545,7 +1552,7 @@ void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
vmx->emulation_required = vmx_emulation_required(vcpu);
}
-static bool vmx_get_if_flag(struct kvm_vcpu *vcpu)
+bool vmx_get_if_flag(struct kvm_vcpu *vcpu)
{
return vmx_get_rflags(vcpu) & X86_EFLAGS_IF;
}
@@ -1651,8 +1658,8 @@ static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data)
return 0;
}
-static int vmx_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
- void *insn, int insn_len)
+int vmx_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
+ void *insn, int insn_len)
{
/*
* Emulation of instructions in SGX enclaves is impossible as RIP does
@@ -1736,7 +1743,7 @@ rip_updated:
* Recognizes a pending MTF VM-exit and records the nested state for later
* delivery.
*/
-static void vmx_update_emulated_instruction(struct kvm_vcpu *vcpu)
+void vmx_update_emulated_instruction(struct kvm_vcpu *vcpu)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -1767,7 +1774,7 @@ static void vmx_update_emulated_instruction(struct kvm_vcpu *vcpu)
}
}
-static int vmx_skip_emulated_instruction(struct kvm_vcpu *vcpu)
+int vmx_skip_emulated_instruction(struct kvm_vcpu *vcpu)
{
vmx_update_emulated_instruction(vcpu);
return skip_emulated_instruction(vcpu);
@@ -1786,7 +1793,7 @@ static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
}
-static void vmx_inject_exception(struct kvm_vcpu *vcpu)
+void vmx_inject_exception(struct kvm_vcpu *vcpu)
{
struct kvm_queued_exception *ex = &vcpu->arch.exception;
u32 intr_info = ex->vector | INTR_INFO_VALID_MASK;
@@ -1907,12 +1914,12 @@ u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
return kvm_caps.default_tsc_scaling_ratio;
}
-static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu)
+void vmx_write_tsc_offset(struct kvm_vcpu *vcpu)
{
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
}
-static void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu)
+void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu)
{
vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
}
@@ -1955,7 +1962,7 @@ static inline bool is_vmx_feature_control_msr_valid(struct vcpu_vmx *vmx,
return !(msr->data & ~valid_bits);
}
-static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
+int vmx_get_msr_feature(struct kvm_msr_entry *msr)
{
switch (msr->index) {
case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
@@ -1972,7 +1979,7 @@ static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
* Returns 0 on success, non-0 otherwise.
* Assumes vcpu_load() was already called.
*/
-static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmx_uret_msr *msr;
@@ -2153,7 +2160,7 @@ static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated
* Returns 0 on success, non-0 otherwise.
* Assumes vcpu_load() was already called.
*/
-static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmx_uret_msr *msr;
@@ -2456,7 +2463,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return ret;
}
-static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
+void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
unsigned long guest_owned_bits;
@@ -2604,6 +2611,9 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
&_cpu_based_2nd_exec_control))
return -EIO;
}
+ if (!IS_ENABLED(CONFIG_KVM_INTEL_PROVE_VE))
+ _cpu_based_2nd_exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE;
+
#ifndef CONFIG_X86_64
if (!(_cpu_based_2nd_exec_control &
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
@@ -2628,6 +2638,7 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
return -EIO;
vmx_cap->ept = 0;
+ _cpu_based_2nd_exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE;
}
if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) &&
vmx_cap->vpid) {
@@ -2757,7 +2768,7 @@ static bool kvm_is_vmx_supported(void)
return supported;
}
-static int vmx_check_processor_compat(void)
+int vmx_check_processor_compat(void)
{
int cpu = raw_smp_processor_id();
struct vmcs_config vmcs_conf;
@@ -2799,7 +2810,7 @@ fault:
return -EFAULT;
}
-static int vmx_hardware_enable(void)
+int vmx_hardware_enable(void)
{
int cpu = raw_smp_processor_id();
u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
@@ -2839,7 +2850,7 @@ static void vmclear_local_loaded_vmcss(void)
__loaded_vmcs_clear(v);
}
-static void vmx_hardware_disable(void)
+void vmx_hardware_disable(void)
{
vmclear_local_loaded_vmcss();
@@ -3153,7 +3164,7 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
#endif
-static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)
+void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -3183,7 +3194,7 @@ static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu)
return to_vmx(vcpu)->vpid;
}
-static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
+void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *mmu = vcpu->arch.mmu;
u64 root_hpa = mmu->root.hpa;
@@ -3199,7 +3210,7 @@ static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
vpid_sync_context(vmx_get_current_vpid(vcpu));
}
-static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
+void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
{
/*
* vpid_sync_vcpu_addr() is a nop if vpid==0, see the comment in
@@ -3208,7 +3219,7 @@ static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
vpid_sync_vcpu_addr(vmx_get_current_vpid(vcpu), addr);
}
-static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
+void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
{
/*
* vpid_sync_context() is a nop if vpid==0, e.g. if enable_vpid==0 or a
@@ -3253,7 +3264,7 @@ void ept_save_pdptrs(struct kvm_vcpu *vcpu)
#define CR3_EXITING_BITS (CPU_BASED_CR3_LOAD_EXITING | \
CPU_BASED_CR3_STORE_EXITING)
-static bool vmx_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+bool vmx_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
if (is_guest_mode(vcpu))
return nested_guest_cr0_valid(vcpu, cr0);
@@ -3374,8 +3385,7 @@ u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level)
return eptp;
}
-static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
- int root_level)
+void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level)
{
struct kvm *kvm = vcpu->kvm;
bool update_guest_cr3 = true;
@@ -3404,8 +3414,7 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
vmcs_writel(GUEST_CR3, guest_cr3);
}
-
-static bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
/*
* We operate under the default treatment of SMM, so VMX cannot be
@@ -3521,7 +3530,7 @@ void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
var->g = (ar >> 15) & 1;
}
-static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
+u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
{
struct kvm_segment s;
@@ -3598,14 +3607,14 @@ void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
}
-static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
+void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
{
__vmx_set_segment(vcpu, var, seg);
to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
}
-static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
+void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
{
u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
@@ -3613,25 +3622,25 @@ static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
*l = (ar >> 13) & 1;
}
-static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
dt->address = vmcs_readl(GUEST_IDTR_BASE);
}
-static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
vmcs_writel(GUEST_IDTR_BASE, dt->address);
}
-static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
dt->address = vmcs_readl(GUEST_GDTR_BASE);
}
-static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
vmcs_writel(GUEST_GDTR_BASE, dt->address);
@@ -4099,7 +4108,7 @@ void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu)
}
}
-static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
+bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
void *vapic_page;
@@ -4119,7 +4128,7 @@ static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
return ((rvi & 0xf0) > (vppr & 0xf0));
}
-static void vmx_msr_filter_changed(struct kvm_vcpu *vcpu)
+void vmx_msr_filter_changed(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 i;
@@ -4263,8 +4272,8 @@ static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
return 0;
}
-static void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
- int trig_mode, int vector)
+void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
+ int trig_mode, int vector)
{
struct kvm_vcpu *vcpu = apic->vcpu;
@@ -4426,7 +4435,7 @@ static u32 vmx_vmexit_ctrl(void)
~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
}
-static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
+void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -4592,6 +4601,7 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
if (!enable_ept) {
exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
+ exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE;
enable_unrestricted_guest = 0;
}
if (!enable_unrestricted_guest)
@@ -4690,7 +4700,7 @@ static int vmx_alloc_ipiv_pid_table(struct kvm *kvm)
return 0;
}
-static int vmx_vcpu_precreate(struct kvm *kvm)
+int vmx_vcpu_precreate(struct kvm *kvm)
{
return vmx_alloc_ipiv_pid_table(kvm);
}
@@ -4715,8 +4725,12 @@ static void init_vmcs(struct vcpu_vmx *vmx)
exec_controls_set(vmx, vmx_exec_control(vmx));
- if (cpu_has_secondary_exec_ctrls())
+ if (cpu_has_secondary_exec_ctrls()) {
secondary_exec_controls_set(vmx, vmx_secondary_exec_control(vmx));
+ if (vmx->ve_info)
+ vmcs_write64(VE_INFORMATION_ADDRESS,
+ __pa(vmx->ve_info));
+ }
if (cpu_has_tertiary_exec_ctrls())
tertiary_exec_controls_set(vmx, vmx_tertiary_exec_control(vmx));
@@ -4842,10 +4856,10 @@ static void __vmx_vcpu_reset(struct kvm_vcpu *vcpu)
* or POSTED_INTR_WAKEUP_VECTOR.
*/
vmx->pi_desc.nv = POSTED_INTR_VECTOR;
- vmx->pi_desc.sn = 1;
+ __pi_set_sn(&vmx->pi_desc);
}
-static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
+void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -4904,12 +4918,12 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vmx_update_fb_clear_dis(vcpu, vmx);
}
-static void vmx_enable_irq_window(struct kvm_vcpu *vcpu)
+void vmx_enable_irq_window(struct kvm_vcpu *vcpu)
{
exec_controls_setbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
}
-static void vmx_enable_nmi_window(struct kvm_vcpu *vcpu)
+void vmx_enable_nmi_window(struct kvm_vcpu *vcpu)
{
if (!enable_vnmi ||
vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
@@ -4920,7 +4934,7 @@ static void vmx_enable_nmi_window(struct kvm_vcpu *vcpu)
exec_controls_setbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING);
}
-static void vmx_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
+void vmx_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
uint32_t intr;
@@ -4948,7 +4962,7 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
vmx_clear_hlt(vcpu);
}
-static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
+void vmx_inject_nmi(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -5026,7 +5040,7 @@ bool vmx_nmi_blocked(struct kvm_vcpu *vcpu)
GUEST_INTR_STATE_NMI));
}
-static int vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
+int vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
{
if (to_vmx(vcpu)->nested.nested_run_pending)
return -EBUSY;
@@ -5048,7 +5062,7 @@ bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
}
-static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
+int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
{
if (to_vmx(vcpu)->nested.nested_run_pending)
return -EBUSY;
@@ -5063,7 +5077,7 @@ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
return !vmx_interrupt_blocked(vcpu);
}
-static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
+int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
{
void __user *ret;
@@ -5083,7 +5097,7 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
return init_rmode_tss(kvm, ret);
}
-static int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
+int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
{
to_kvm_vmx(kvm)->ept_identity_map_addr = ident_addr;
return 0;
@@ -5204,6 +5218,9 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
if (is_invalid_opcode(intr_info))
return handle_ud(vcpu);
+ if (KVM_BUG_ON(is_ve_fault(intr_info), vcpu->kvm))
+ return -EIO;
+
error_code = 0;
if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
@@ -5369,8 +5386,7 @@ static int handle_io(struct kvm_vcpu *vcpu)
return kvm_fast_pio(vcpu, size, port, in);
}
-static void
-vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
+void vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
{
/*
* Patch in the VMCALL instruction:
@@ -5576,7 +5592,7 @@ out:
return kvm_complete_insn_gp(vcpu, err);
}
-static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
+void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
{
get_debugreg(vcpu->arch.db[0], 0);
get_debugreg(vcpu->arch.db[1], 1);
@@ -5595,7 +5611,7 @@ static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
set_debugreg(DR6_RESERVED, 6);
}
-static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
+void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
{
vmcs_writel(GUEST_DR7, val);
}
@@ -5768,8 +5784,6 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) != 0 ?
PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
- vcpu->arch.exit_qualification = exit_qualification;
-
/*
* Check that the GPA doesn't exceed physical memory limits, as that is
* a guest page fault. We have to emulate the instruction here, because
@@ -5866,7 +5880,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
return 1;
}
-static int vmx_vcpu_pre_run(struct kvm_vcpu *vcpu)
+int vmx_vcpu_pre_run(struct kvm_vcpu *vcpu)
{
if (vmx_emulation_required_with_pending_exception(vcpu)) {
kvm_prepare_emulation_failure_exit(vcpu);
@@ -6154,9 +6168,8 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
static const int kvm_vmx_max_exit_handlers =
ARRAY_SIZE(kvm_vmx_exit_handlers);
-static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
- u64 *info1, u64 *info2,
- u32 *intr_info, u32 *error_code)
+void vmx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
+ u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6414,6 +6427,24 @@ void dump_vmcs(struct kvm_vcpu *vcpu)
if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
pr_err("Virtual processor ID = 0x%04x\n",
vmcs_read16(VIRTUAL_PROCESSOR_ID));
+ if (secondary_exec_control & SECONDARY_EXEC_EPT_VIOLATION_VE) {
+ struct vmx_ve_information *ve_info = vmx->ve_info;
+ u64 ve_info_pa = vmcs_read64(VE_INFORMATION_ADDRESS);
+
+ /*
+ * If KVM is dumping the VMCS, then something has gone wrong
+ * already. Derefencing an address from the VMCS, which could
+ * very well be corrupted, is a terrible idea. The virtual
+ * address is known so use it.
+ */
+ pr_err("VE info address = 0x%016llx%s\n", ve_info_pa,
+ ve_info_pa == __pa(ve_info) ? "" : "(corrupted!)");
+ pr_err("ve_info: 0x%08x 0x%08x 0x%016llx 0x%016llx 0x%016llx 0x%04x\n",
+ ve_info->exit_reason, ve_info->delivery,
+ ve_info->exit_qualification,
+ ve_info->guest_linear_address,
+ ve_info->guest_physical_address, ve_info->eptp_index);
+ }
}
/*
@@ -6599,7 +6630,7 @@ unexpected_vmexit:
return 0;
}
-static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
+int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
{
int ret = __vmx_handle_exit(vcpu, exit_fastpath);
@@ -6687,7 +6718,7 @@ static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu)
: "eax", "ebx", "ecx", "edx");
}
-static void vmx_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
+void vmx_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
int tpr_threshold;
@@ -6757,7 +6788,7 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
vmx_update_msr_bitmap_x2apic(vcpu);
}
-static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
+void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
{
const gfn_t gfn = APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT;
struct kvm *kvm = vcpu->kvm;
@@ -6826,7 +6857,7 @@ out:
kvm_release_pfn_clean(pfn);
}
-static void vmx_hwapic_isr_update(int max_isr)
+void vmx_hwapic_isr_update(int max_isr)
{
u16 status;
u8 old;
@@ -6860,7 +6891,7 @@ static void vmx_set_rvi(int vector)
}
}
-static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
+void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
{
/*
* When running L2, updating RVI is only relevant when
@@ -6874,7 +6905,7 @@ static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
vmx_set_rvi(max_irr);
}
-static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
+int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int max_irr;
@@ -6920,7 +6951,7 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
return max_irr;
}
-static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
{
if (!kvm_vcpu_apicv_active(vcpu))
return;
@@ -6931,7 +6962,7 @@ static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
}
-static void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
+void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6962,24 +6993,22 @@ static void handle_nm_fault_irqoff(struct kvm_vcpu *vcpu)
rdmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
}
-static void handle_exception_irqoff(struct vcpu_vmx *vmx)
+static void handle_exception_irqoff(struct kvm_vcpu *vcpu, u32 intr_info)
{
- u32 intr_info = vmx_get_intr_info(&vmx->vcpu);
-
/* if exit due to PF check for async PF */
if (is_page_fault(intr_info))
- vmx->vcpu.arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags();
+ vcpu->arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags();
/* if exit due to NM, handle before interrupts are enabled */
else if (is_nm_fault(intr_info))
- handle_nm_fault_irqoff(&vmx->vcpu);
+ handle_nm_fault_irqoff(vcpu);
/* Handle machine checks before interrupts are enabled */
else if (is_machine_check(intr_info))
kvm_machine_check();
}
-static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
+static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu,
+ u32 intr_info)
{
- u32 intr_info = vmx_get_intr_info(vcpu);
unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
if (KVM_BUG(!is_external_intr(intr_info), vcpu->kvm,
@@ -6996,7 +7025,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
vcpu->arch.at_instruction_boundary = true;
}
-static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
+void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -7004,16 +7033,16 @@ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
return;
if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
- handle_external_interrupt_irqoff(vcpu);
+ handle_external_interrupt_irqoff(vcpu, vmx_get_intr_info(vcpu));
else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI)
- handle_exception_irqoff(vmx);
+ handle_exception_irqoff(vcpu, vmx_get_intr_info(vcpu));
}
/*
* The kvm parameter can be NULL (module initialization, or invocation before
* VM creation). Be sure to check the kvm parameter before using it.
*/
-static bool vmx_has_emulated_msr(struct kvm *kvm, u32 index)
+bool vmx_has_emulated_msr(struct kvm *kvm, u32 index)
{
switch (index) {
case MSR_IA32_SMBASE:
@@ -7136,7 +7165,7 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
IDT_VECTORING_ERROR_CODE);
}
-static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
+void vmx_cancel_injection(struct kvm_vcpu *vcpu)
{
__vmx_complete_interrupts(vcpu,
vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
@@ -7306,7 +7335,7 @@ out:
guest_state_exit_irqoff();
}
-static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
+fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long cr3, cr4;
@@ -7461,7 +7490,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
return vmx_exit_handlers_fastpath(vcpu, force_immediate_exit);
}
-static void vmx_vcpu_free(struct kvm_vcpu *vcpu)
+void vmx_vcpu_free(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -7470,9 +7499,10 @@ static void vmx_vcpu_free(struct kvm_vcpu *vcpu)
free_vpid(vmx->vpid);
nested_vmx_free_vcpu(vcpu);
free_loaded_vmcs(vmx->loaded_vmcs);
+ free_page((unsigned long)vmx->ve_info);
}
-static int vmx_vcpu_create(struct kvm_vcpu *vcpu)
+int vmx_vcpu_create(struct kvm_vcpu *vcpu)
{
struct vmx_uret_msr *tsx_ctrl;
struct vcpu_vmx *vmx;
@@ -7563,6 +7593,20 @@ static int vmx_vcpu_create(struct kvm_vcpu *vcpu)
goto free_vmcs;
}
+ err = -ENOMEM;
+ if (vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_EPT_VIOLATION_VE) {
+ struct page *page;
+
+ BUILD_BUG_ON(sizeof(*vmx->ve_info) > PAGE_SIZE);
+
+ /* ve_info must be page aligned. */
+ page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+ if (!page)
+ goto free_vmcs;
+
+ vmx->ve_info = page_to_virt(page);
+ }
+
if (vmx_can_use_ipiv(vcpu))
WRITE_ONCE(to_kvm_vmx(vcpu->kvm)->pid_table[vcpu->vcpu_id],
__pa(&vmx->pi_desc) | PID_TABLE_ENTRY_VALID);
@@ -7581,7 +7625,7 @@ free_vpid:
#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
-static int vmx_vm_init(struct kvm *kvm)
+int vmx_vm_init(struct kvm *kvm)
{
if (!ple_gap)
kvm->arch.pause_in_guest = true;
@@ -7612,7 +7656,7 @@ static int vmx_vm_init(struct kvm *kvm)
return 0;
}
-static u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
+u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
{
/* We wanted to honor guest CD/MTRR/PAT, but doing so could result in
* memory aliases with conflicting memory types and sometimes MCEs.
@@ -7784,7 +7828,7 @@ static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4));
}
-static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
+void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -7862,10 +7906,9 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
vmx_update_exception_bitmap(vcpu);
}
-static u64 vmx_get_perf_capabilities(void)
+static __init u64 vmx_get_perf_capabilities(void)
{
u64 perf_cap = PMU_CAP_FW_WRITES;
- struct x86_pmu_lbr lbr;
u64 host_perf_cap = 0;
if (!enable_pmu)
@@ -7875,15 +7918,43 @@ static u64 vmx_get_perf_capabilities(void)
rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR)) {
- x86_perf_get_lbr(&lbr);
- if (lbr.nr)
+ x86_perf_get_lbr(&vmx_lbr_caps);
+
+ /*
+ * KVM requires LBR callstack support, as the overhead due to
+ * context switching LBRs without said support is too high.
+ * See intel_pmu_create_guest_lbr_event() for more info.
+ */
+ if (!vmx_lbr_caps.has_callstack)
+ memset(&vmx_lbr_caps, 0, sizeof(vmx_lbr_caps));
+ else if (vmx_lbr_caps.nr)
perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT;
}
if (vmx_pebs_supported()) {
perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
- if ((perf_cap & PERF_CAP_PEBS_FORMAT) < 4)
- perf_cap &= ~PERF_CAP_PEBS_BASELINE;
+
+ /*
+ * Disallow adaptive PEBS as it is functionally broken, can be
+ * used by the guest to read *host* LBRs, and can be used to
+ * bypass userspace event filters. To correctly and safely
+ * support adaptive PEBS, KVM needs to:
+ *
+ * 1. Account for the ADAPTIVE flag when (re)programming fixed
+ * counters.
+ *
+ * 2. Gain support from perf (or take direct control of counter
+ * programming) to support events without adaptive PEBS
+ * enabled for the hardware counter.
+ *
+ * 3. Ensure LBR MSRs cannot hold host data on VM-Entry with
+ * adaptive PEBS enabled and MSR_PEBS_DATA_CFG.LBRS=1.
+ *
+ * 4. Document which PMU events are effectively exposed to the
+ * guest via adaptive PEBS, and make adaptive PEBS mutually
+ * exclusive with KVM_SET_PMU_EVENT_FILTER if necessary.
+ */
+ perf_cap &= ~PERF_CAP_PEBS_BASELINE;
}
return perf_cap;
@@ -7972,10 +8043,10 @@ static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
}
-static int vmx_check_intercept(struct kvm_vcpu *vcpu,
- struct x86_instruction_info *info,
- enum x86_intercept_stage stage,
- struct x86_exception *exception)
+int vmx_check_intercept(struct kvm_vcpu *vcpu,
+ struct x86_instruction_info *info,
+ enum x86_intercept_stage stage,
+ struct x86_exception *exception)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
@@ -8055,8 +8126,8 @@ static inline int u64_shl_div_u64(u64 a, unsigned int shift,
return 0;
}
-static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
- bool *expired)
+int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
+ bool *expired)
{
struct vcpu_vmx *vmx;
u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles;
@@ -8095,13 +8166,13 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
return 0;
}
-static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
+void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
{
to_vmx(vcpu)->hv_deadline_tsc = -1;
}
#endif
-static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
+void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
{
if (!kvm_pause_in_guest(vcpu->kvm))
shrink_ple_window(vcpu);
@@ -8130,7 +8201,7 @@ void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_ENABLE_PML);
}
-static void vmx_setup_mce(struct kvm_vcpu *vcpu)
+void vmx_setup_mce(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.mcg_cap & MCG_LMCE_P)
to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
@@ -8141,7 +8212,7 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu)
}
#ifdef CONFIG_KVM_SMM
-static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
+int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
{
/* we need a nested vmexit to enter SMM, postpone if run is pending */
if (to_vmx(vcpu)->nested.nested_run_pending)
@@ -8149,7 +8220,7 @@ static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
return !is_smm(vcpu);
}
-static int vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
+int vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -8170,7 +8241,7 @@ static int vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
return 0;
}
-static int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
+int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int ret;
@@ -8191,18 +8262,18 @@ static int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
return 0;
}
-static void vmx_enable_smi_window(struct kvm_vcpu *vcpu)
+void vmx_enable_smi_window(struct kvm_vcpu *vcpu)
{
/* RSM will cause a vmexit anyway. */
}
#endif
-static bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
+bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
{
return to_vmx(vcpu)->nested.vmxon && !is_guest_mode(vcpu);
}
-static void vmx_migrate_timers(struct kvm_vcpu *vcpu)
+void vmx_migrate_timers(struct kvm_vcpu *vcpu)
{
if (is_guest_mode(vcpu)) {
struct hrtimer *timer = &to_vmx(vcpu)->nested.preemption_timer;
@@ -8212,7 +8283,7 @@ static void vmx_migrate_timers(struct kvm_vcpu *vcpu)
}
}
-static void vmx_hardware_unsetup(void)
+void vmx_hardware_unsetup(void)
{
kvm_set_posted_intr_wakeup_handler(NULL);
@@ -8222,18 +8293,7 @@ static void vmx_hardware_unsetup(void)
free_kvm_area();
}
-#define VMX_REQUIRED_APICV_INHIBITS \
-( \
- BIT(APICV_INHIBIT_REASON_DISABLE)| \
- BIT(APICV_INHIBIT_REASON_ABSENT) | \
- BIT(APICV_INHIBIT_REASON_HYPERV) | \
- BIT(APICV_INHIBIT_REASON_BLOCKIRQ) | \
- BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED) | \
- BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) | \
- BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED) \
-)
-
-static void vmx_vm_destroy(struct kvm *kvm)
+void vmx_vm_destroy(struct kvm *kvm)
{
struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
@@ -8284,148 +8344,6 @@ gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags
return (sign_extend64(gva, lam_bit) & ~BIT_ULL(63)) | (gva & BIT_ULL(63));
}
-static struct kvm_x86_ops vmx_x86_ops __initdata = {
- .name = KBUILD_MODNAME,
-
- .check_processor_compatibility = vmx_check_processor_compat,
-
- .hardware_unsetup = vmx_hardware_unsetup,
-
- .hardware_enable = vmx_hardware_enable,
- .hardware_disable = vmx_hardware_disable,
- .has_emulated_msr = vmx_has_emulated_msr,
-
- .vm_size = sizeof(struct kvm_vmx),
- .vm_init = vmx_vm_init,
- .vm_destroy = vmx_vm_destroy,
-
- .vcpu_precreate = vmx_vcpu_precreate,
- .vcpu_create = vmx_vcpu_create,
- .vcpu_free = vmx_vcpu_free,
- .vcpu_reset = vmx_vcpu_reset,
-
- .prepare_switch_to_guest = vmx_prepare_switch_to_guest,
- .vcpu_load = vmx_vcpu_load,
- .vcpu_put = vmx_vcpu_put,
-
- .update_exception_bitmap = vmx_update_exception_bitmap,
- .get_msr_feature = vmx_get_msr_feature,
- .get_msr = vmx_get_msr,
- .set_msr = vmx_set_msr,
- .get_segment_base = vmx_get_segment_base,
- .get_segment = vmx_get_segment,
- .set_segment = vmx_set_segment,
- .get_cpl = vmx_get_cpl,
- .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
- .is_valid_cr0 = vmx_is_valid_cr0,
- .set_cr0 = vmx_set_cr0,
- .is_valid_cr4 = vmx_is_valid_cr4,
- .set_cr4 = vmx_set_cr4,
- .set_efer = vmx_set_efer,
- .get_idt = vmx_get_idt,
- .set_idt = vmx_set_idt,
- .get_gdt = vmx_get_gdt,
- .set_gdt = vmx_set_gdt,
- .set_dr7 = vmx_set_dr7,
- .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
- .cache_reg = vmx_cache_reg,
- .get_rflags = vmx_get_rflags,
- .set_rflags = vmx_set_rflags,
- .get_if_flag = vmx_get_if_flag,
-
- .flush_tlb_all = vmx_flush_tlb_all,
- .flush_tlb_current = vmx_flush_tlb_current,
- .flush_tlb_gva = vmx_flush_tlb_gva,
- .flush_tlb_guest = vmx_flush_tlb_guest,
-
- .vcpu_pre_run = vmx_vcpu_pre_run,
- .vcpu_run = vmx_vcpu_run,
- .handle_exit = vmx_handle_exit,
- .skip_emulated_instruction = vmx_skip_emulated_instruction,
- .update_emulated_instruction = vmx_update_emulated_instruction,
- .set_interrupt_shadow = vmx_set_interrupt_shadow,
- .get_interrupt_shadow = vmx_get_interrupt_shadow,
- .patch_hypercall = vmx_patch_hypercall,
- .inject_irq = vmx_inject_irq,
- .inject_nmi = vmx_inject_nmi,
- .inject_exception = vmx_inject_exception,
- .cancel_injection = vmx_cancel_injection,
- .interrupt_allowed = vmx_interrupt_allowed,
- .nmi_allowed = vmx_nmi_allowed,
- .get_nmi_mask = vmx_get_nmi_mask,
- .set_nmi_mask = vmx_set_nmi_mask,
- .enable_nmi_window = vmx_enable_nmi_window,
- .enable_irq_window = vmx_enable_irq_window,
- .update_cr8_intercept = vmx_update_cr8_intercept,
- .set_virtual_apic_mode = vmx_set_virtual_apic_mode,
- .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
- .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
- .load_eoi_exitmap = vmx_load_eoi_exitmap,
- .apicv_pre_state_restore = vmx_apicv_pre_state_restore,
- .required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS,
- .hwapic_irr_update = vmx_hwapic_irr_update,
- .hwapic_isr_update = vmx_hwapic_isr_update,
- .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
- .sync_pir_to_irr = vmx_sync_pir_to_irr,
- .deliver_interrupt = vmx_deliver_interrupt,
- .dy_apicv_has_pending_interrupt = pi_has_pending_interrupt,
-
- .set_tss_addr = vmx_set_tss_addr,
- .set_identity_map_addr = vmx_set_identity_map_addr,
- .get_mt_mask = vmx_get_mt_mask,
-
- .get_exit_info = vmx_get_exit_info,
-
- .vcpu_after_set_cpuid = vmx_vcpu_after_set_cpuid,
-
- .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
-
- .get_l2_tsc_offset = vmx_get_l2_tsc_offset,
- .get_l2_tsc_multiplier = vmx_get_l2_tsc_multiplier,
- .write_tsc_offset = vmx_write_tsc_offset,
- .write_tsc_multiplier = vmx_write_tsc_multiplier,
-
- .load_mmu_pgd = vmx_load_mmu_pgd,
-
- .check_intercept = vmx_check_intercept,
- .handle_exit_irqoff = vmx_handle_exit_irqoff,
-
- .sched_in = vmx_sched_in,
-
- .cpu_dirty_log_size = PML_ENTITY_NUM,
- .update_cpu_dirty_logging = vmx_update_cpu_dirty_logging,
-
- .nested_ops = &vmx_nested_ops,
-
- .pi_update_irte = vmx_pi_update_irte,
- .pi_start_assignment = vmx_pi_start_assignment,
-
-#ifdef CONFIG_X86_64
- .set_hv_timer = vmx_set_hv_timer,
- .cancel_hv_timer = vmx_cancel_hv_timer,
-#endif
-
- .setup_mce = vmx_setup_mce,
-
-#ifdef CONFIG_KVM_SMM
- .smi_allowed = vmx_smi_allowed,
- .enter_smm = vmx_enter_smm,
- .leave_smm = vmx_leave_smm,
- .enable_smi_window = vmx_enable_smi_window,
-#endif
-
- .check_emulate_instruction = vmx_check_emulate_instruction,
- .apic_init_signal_blocked = vmx_apic_init_signal_blocked,
- .migrate_timers = vmx_migrate_timers,
-
- .msr_filter_changed = vmx_msr_filter_changed,
- .complete_emulated_msr = kvm_complete_insn_gp,
-
- .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
-
- .get_untagged_addr = vmx_get_untagged_addr,
-};
-
static unsigned int vmx_handle_intel_pt_intr(void)
{
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
@@ -8491,9 +8409,7 @@ static void __init vmx_setup_me_spte_mask(void)
kvm_mmu_set_me_spte_mask(0, me_mask);
}
-static struct kvm_x86_init_ops vmx_init_ops __initdata;
-
-static __init int hardware_setup(void)
+__init int vmx_hardware_setup(void)
{
unsigned long host_bndcfgs;
struct desc_ptr dt;
@@ -8562,16 +8478,16 @@ static __init int hardware_setup(void)
* using the APIC_ACCESS_ADDR VMCS field.
*/
if (!flexpriority_enabled)
- vmx_x86_ops.set_apic_access_page_addr = NULL;
+ vt_x86_ops.set_apic_access_page_addr = NULL;
if (!cpu_has_vmx_tpr_shadow())
- vmx_x86_ops.update_cr8_intercept = NULL;
+ vt_x86_ops.update_cr8_intercept = NULL;
#if IS_ENABLED(CONFIG_HYPERV)
if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
&& enable_ept) {
- vmx_x86_ops.flush_remote_tlbs = hv_flush_remote_tlbs;
- vmx_x86_ops.flush_remote_tlbs_range = hv_flush_remote_tlbs_range;
+ vt_x86_ops.flush_remote_tlbs = hv_flush_remote_tlbs;
+ vt_x86_ops.flush_remote_tlbs_range = hv_flush_remote_tlbs_range;
}
#endif
@@ -8586,7 +8502,7 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_apicv())
enable_apicv = 0;
if (!enable_apicv)
- vmx_x86_ops.sync_pir_to_irr = NULL;
+ vt_x86_ops.sync_pir_to_irr = NULL;
if (!enable_apicv || !cpu_has_vmx_ipiv())
enable_ipiv = false;
@@ -8622,7 +8538,7 @@ static __init int hardware_setup(void)
enable_pml = 0;
if (!enable_pml)
- vmx_x86_ops.cpu_dirty_log_size = 0;
+ vt_x86_ops.cpu_dirty_log_size = 0;
if (!cpu_has_vmx_preemption_timer())
enable_preemption_timer = false;
@@ -8647,8 +8563,8 @@ static __init int hardware_setup(void)
}
if (!enable_preemption_timer) {
- vmx_x86_ops.set_hv_timer = NULL;
- vmx_x86_ops.cancel_hv_timer = NULL;
+ vt_x86_ops.set_hv_timer = NULL;
+ vt_x86_ops.cancel_hv_timer = NULL;
}
kvm_caps.supported_mce_cap |= MCG_LMCE_P;
@@ -8659,9 +8575,9 @@ static __init int hardware_setup(void)
if (!enable_ept || !enable_pmu || !cpu_has_vmx_intel_pt())
pt_mode = PT_MODE_SYSTEM;
if (pt_mode == PT_MODE_HOST_GUEST)
- vmx_init_ops.handle_intel_pt_intr = vmx_handle_intel_pt_intr;
+ vt_init_ops.handle_intel_pt_intr = vmx_handle_intel_pt_intr;
else
- vmx_init_ops.handle_intel_pt_intr = NULL;
+ vt_init_ops.handle_intel_pt_intr = NULL;
setup_default_sgx_lepubkeyhash();
@@ -8684,14 +8600,6 @@ static __init int hardware_setup(void)
return r;
}
-static struct kvm_x86_init_ops vmx_init_ops __initdata = {
- .hardware_setup = hardware_setup,
- .handle_intel_pt_intr = NULL,
-
- .runtime_ops = &vmx_x86_ops,
- .pmu_ops = &intel_pmu_ops,
-};
-
static void vmx_cleanup_l1d_flush(void)
{
if (vmx_l1d_flush_pages) {
@@ -8733,7 +8641,7 @@ static int __init vmx_init(void)
*/
hv_init_evmcs();
- r = kvm_x86_vendor_init(&vmx_init_ops);
+ r = kvm_x86_vendor_init(&vt_init_ops);
if (r)
return r;
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 65786dbe7d60..7b64e271a931 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -7,14 +7,15 @@
#include <asm/kvm.h>
#include <asm/intel_pt.h>
#include <asm/perf_event.h>
+#include <asm/posted_intr.h>
#include "capabilities.h"
#include "../kvm_cache_regs.h"
-#include "posted_intr.h"
#include "vmcs.h"
#include "vmx_ops.h"
#include "../cpuid.h"
#include "run_flags.h"
+#include "../mmu.h"
#define MSR_TYPE_R 1
#define MSR_TYPE_W 2
@@ -109,6 +110,8 @@ struct lbr_desc {
bool msr_passthrough;
};
+extern struct x86_pmu_lbr vmx_lbr_caps;
+
/*
* The nested_vmx structure is part of vcpu_vmx, and holds information we need
* for correct emulation of VMX (i.e., nested VMX) on this vcpu.
@@ -362,6 +365,9 @@ struct vcpu_vmx {
DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
} shadow_msr_intercept;
+
+ /* ve_info must be page aligned. */
+ struct vmx_ve_information *ve_info;
};
struct kvm_vmx {
@@ -574,7 +580,8 @@ static inline u8 vmx_get_rvi(void)
SECONDARY_EXEC_ENABLE_VMFUNC | \
SECONDARY_EXEC_BUS_LOCK_DETECTION | \
SECONDARY_EXEC_NOTIFY_VM_EXITING | \
- SECONDARY_EXEC_ENCLS_EXITING)
+ SECONDARY_EXEC_ENCLS_EXITING | \
+ SECONDARY_EXEC_EPT_VIOLATION_VE)
#define KVM_REQUIRED_VMX_TERTIARY_VM_EXEC_CONTROL 0
#define KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL \
@@ -719,7 +726,8 @@ static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
if (!enable_ept)
return true;
- return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
+ return allow_smaller_maxphyaddr &&
+ cpuid_maxphyaddr(vcpu) < kvm_get_shadow_phys_bits();
}
static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
new file mode 100644
index 000000000000..502704596c83
--- /dev/null
+++ b/arch/x86/kvm/vmx/x86_ops.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __KVM_X86_VMX_X86_OPS_H
+#define __KVM_X86_VMX_X86_OPS_H
+
+#include <linux/kvm_host.h>
+
+#include "x86.h"
+
+__init int vmx_hardware_setup(void);
+
+extern struct kvm_x86_ops vt_x86_ops __initdata;
+extern struct kvm_x86_init_ops vt_init_ops __initdata;
+
+void vmx_hardware_unsetup(void);
+int vmx_check_processor_compat(void);
+int vmx_hardware_enable(void);
+void vmx_hardware_disable(void);
+int vmx_vm_init(struct kvm *kvm);
+void vmx_vm_destroy(struct kvm *kvm);
+int vmx_vcpu_precreate(struct kvm *kvm);
+int vmx_vcpu_create(struct kvm_vcpu *vcpu);
+int vmx_vcpu_pre_run(struct kvm_vcpu *vcpu);
+fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit);
+void vmx_vcpu_free(struct kvm_vcpu *vcpu);
+void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
+void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+void vmx_vcpu_put(struct kvm_vcpu *vcpu);
+int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath);
+void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu);
+int vmx_skip_emulated_instruction(struct kvm_vcpu *vcpu);
+void vmx_update_emulated_instruction(struct kvm_vcpu *vcpu);
+int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
+#ifdef CONFIG_KVM_SMM
+int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection);
+int vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram);
+int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram);
+void vmx_enable_smi_window(struct kvm_vcpu *vcpu);
+#endif
+int vmx_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
+ void *insn, int insn_len);
+int vmx_check_intercept(struct kvm_vcpu *vcpu,
+ struct x86_instruction_info *info,
+ enum x86_intercept_stage stage,
+ struct x86_exception *exception);
+bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu);
+void vmx_migrate_timers(struct kvm_vcpu *vcpu);
+void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
+void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu);
+bool vmx_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason);
+void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
+void vmx_hwapic_isr_update(int max_isr);
+bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu);
+int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu);
+void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
+ int trig_mode, int vector);
+void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu);
+bool vmx_has_emulated_msr(struct kvm *kvm, u32 index);
+void vmx_msr_filter_changed(struct kvm_vcpu *vcpu);
+void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
+void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
+int vmx_get_msr_feature(struct kvm_msr_entry *msr);
+int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
+u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg);
+void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
+void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
+int vmx_get_cpl(struct kvm_vcpu *vcpu);
+void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
+bool vmx_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
+void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
+void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
+void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
+bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
+int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
+void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val);
+void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu);
+void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg);
+unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
+void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
+bool vmx_get_if_flag(struct kvm_vcpu *vcpu);
+void vmx_flush_tlb_all(struct kvm_vcpu *vcpu);
+void vmx_flush_tlb_current(struct kvm_vcpu *vcpu);
+void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr);
+void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu);
+void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
+u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
+void vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall);
+void vmx_inject_irq(struct kvm_vcpu *vcpu, bool reinjected);
+void vmx_inject_nmi(struct kvm_vcpu *vcpu);
+void vmx_inject_exception(struct kvm_vcpu *vcpu);
+void vmx_cancel_injection(struct kvm_vcpu *vcpu);
+int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection);
+int vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection);
+bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
+void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
+void vmx_enable_nmi_window(struct kvm_vcpu *vcpu);
+void vmx_enable_irq_window(struct kvm_vcpu *vcpu);
+void vmx_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr);
+void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu);
+void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
+void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
+int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
+int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr);
+u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
+void vmx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
+ u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code);
+u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
+u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
+void vmx_write_tsc_offset(struct kvm_vcpu *vcpu);
+void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu);
+void vmx_request_immediate_exit(struct kvm_vcpu *vcpu);
+void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu);
+void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
+#ifdef CONFIG_X86_64
+int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
+ bool *expired);
+void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu);
+#endif
+void vmx_setup_mce(struct kvm_vcpu *vcpu);
+
+#endif /* __KVM_X86_VMX_X86_OPS_H */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 47d9f03b7778..082ac6d95a3a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -92,9 +92,12 @@
#define MAX_IO_MSRS 256
#define KVM_MAX_MCE_BANKS 32
-struct kvm_caps kvm_caps __read_mostly = {
- .supported_mce_cap = MCG_CTL_P | MCG_SER_P,
-};
+/*
+ * Note, kvm_caps fields should *never* have default values, all fields must be
+ * recomputed from scratch during vendor module load, e.g. to account for a
+ * vendor module being reloaded with different module parameters.
+ */
+struct kvm_caps kvm_caps __read_mostly;
EXPORT_SYMBOL_GPL(kvm_caps);
#define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e))
@@ -1621,7 +1624,7 @@ static bool kvm_is_immutable_feature_msr(u32 msr)
ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \
- ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR)
+ ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO)
static u64 kvm_get_arch_capabilities(void)
{
@@ -2230,16 +2233,13 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
/*
* Disallow writes to immutable feature MSRs after KVM_RUN. KVM does
* not support modifying the guest vCPU model on the fly, e.g. changing
- * the nVMX capabilities while L2 is running is nonsensical. Ignore
+ * the nVMX capabilities while L2 is running is nonsensical. Allow
* writes of the same value, e.g. to allow userspace to blindly stuff
* all MSRs when emulating RESET.
*/
- if (kvm_vcpu_has_run(vcpu) && kvm_is_immutable_feature_msr(index)) {
- if (do_get_msr(vcpu, index, &val) || *data != val)
- return -EINVAL;
-
- return 0;
- }
+ if (kvm_vcpu_has_run(vcpu) && kvm_is_immutable_feature_msr(index) &&
+ (do_get_msr(vcpu, index, &val) || *data != val))
+ return -EINVAL;
return kvm_set_msr_ignored_check(vcpu, index, *data, true);
}
@@ -3470,7 +3470,7 @@ static bool is_mci_status_msr(u32 msr)
static bool can_set_mci_status(struct kvm_vcpu *vcpu)
{
/* McStatusWrEn enabled? */
- if (guest_cpuid_is_amd_or_hygon(vcpu))
+ if (guest_cpuid_is_amd_compatible(vcpu))
return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
return false;
@@ -4629,9 +4629,7 @@ static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu,
static bool kvm_is_vm_type_supported(unsigned long type)
{
- return type == KVM_X86_DEFAULT_VM ||
- (type == KVM_X86_SW_PROTECTED_VM &&
- IS_ENABLED(CONFIG_KVM_SW_PROTECTED_VM) && tdp_mmu_enabled);
+ return type < 32 && (kvm_caps.supported_vm_types & BIT(type));
}
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
@@ -4832,9 +4830,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = kvm_caps.has_notify_vmexit;
break;
case KVM_CAP_VM_TYPES:
- r = BIT(KVM_X86_DEFAULT_VM);
- if (kvm_is_vm_type_supported(KVM_X86_SW_PROTECTED_VM))
- r |= BIT(KVM_X86_SW_PROTECTED_VM);
+ r = kvm_caps.supported_vm_types;
break;
default:
break;
@@ -4842,46 +4838,44 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
return r;
}
-static inline void __user *kvm_get_attr_addr(struct kvm_device_attr *attr)
+static int __kvm_x86_dev_get_attr(struct kvm_device_attr *attr, u64 *val)
{
- void __user *uaddr = (void __user*)(unsigned long)attr->addr;
-
- if ((u64)(unsigned long)uaddr != attr->addr)
- return ERR_PTR_USR(-EFAULT);
- return uaddr;
-}
-
-static int kvm_x86_dev_get_attr(struct kvm_device_attr *attr)
-{
- u64 __user *uaddr = kvm_get_attr_addr(attr);
-
- if (attr->group)
+ if (attr->group) {
+ if (kvm_x86_ops.dev_get_attr)
+ return static_call(kvm_x86_dev_get_attr)(attr->group, attr->attr, val);
return -ENXIO;
-
- if (IS_ERR(uaddr))
- return PTR_ERR(uaddr);
+ }
switch (attr->attr) {
case KVM_X86_XCOMP_GUEST_SUPP:
- if (put_user(kvm_caps.supported_xcr0, uaddr))
- return -EFAULT;
+ *val = kvm_caps.supported_xcr0;
return 0;
default:
return -ENXIO;
}
}
+static int kvm_x86_dev_get_attr(struct kvm_device_attr *attr)
+{
+ u64 __user *uaddr = u64_to_user_ptr(attr->addr);
+ int r;
+ u64 val;
+
+ r = __kvm_x86_dev_get_attr(attr, &val);
+ if (r < 0)
+ return r;
+
+ if (put_user(val, uaddr))
+ return -EFAULT;
+
+ return 0;
+}
+
static int kvm_x86_dev_has_attr(struct kvm_device_attr *attr)
{
- if (attr->group)
- return -ENXIO;
+ u64 val;
- switch (attr->attr) {
- case KVM_X86_XCOMP_GUEST_SUPP:
- return 0;
- default:
- return -ENXIO;
- }
+ return __kvm_x86_dev_get_attr(attr, &val);
}
long kvm_arch_dev_ioctl(struct file *filp,
@@ -5557,11 +5551,15 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
return 0;
}
-static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
- struct kvm_debugregs *dbgregs)
+static int kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
+ struct kvm_debugregs *dbgregs)
{
unsigned int i;
+ if (vcpu->kvm->arch.has_protected_state &&
+ vcpu->arch.guest_state_protected)
+ return -EINVAL;
+
memset(dbgregs, 0, sizeof(*dbgregs));
BUILD_BUG_ON(ARRAY_SIZE(vcpu->arch.db) != ARRAY_SIZE(dbgregs->db));
@@ -5570,6 +5568,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
dbgregs->dr6 = vcpu->arch.dr6;
dbgregs->dr7 = vcpu->arch.dr7;
+ return 0;
}
static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
@@ -5577,6 +5576,10 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
{
unsigned int i;
+ if (vcpu->kvm->arch.has_protected_state &&
+ vcpu->arch.guest_state_protected)
+ return -EINVAL;
+
if (dbgregs->flags)
return -EINVAL;
@@ -5597,8 +5600,8 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
}
-static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
- u8 *state, unsigned int size)
+static int kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
+ u8 *state, unsigned int size)
{
/*
* Only copy state for features that are enabled for the guest. The
@@ -5616,24 +5619,25 @@ static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
XFEATURE_MASK_FPSSE;
if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
- return;
+ return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, state, size,
supported_xcr0, vcpu->arch.pkru);
+ return 0;
}
-static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
- struct kvm_xsave *guest_xsave)
+static int kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
+ struct kvm_xsave *guest_xsave)
{
- kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region,
- sizeof(guest_xsave->region));
+ return kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region,
+ sizeof(guest_xsave->region));
}
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
struct kvm_xsave *guest_xsave)
{
if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
- return 0;
+ return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
return fpu_copy_uabi_to_guest_fpstate(&vcpu->arch.guest_fpu,
guest_xsave->region,
@@ -5641,18 +5645,23 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
&vcpu->arch.pkru);
}
-static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
- struct kvm_xcrs *guest_xcrs)
+static int kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
+ struct kvm_xcrs *guest_xcrs)
{
+ if (vcpu->kvm->arch.has_protected_state &&
+ vcpu->arch.guest_state_protected)
+ return -EINVAL;
+
if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
guest_xcrs->nr_xcrs = 0;
- return;
+ return 0;
}
guest_xcrs->nr_xcrs = 1;
guest_xcrs->flags = 0;
guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
+ return 0;
}
static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
@@ -5660,6 +5669,10 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
{
int i, r = 0;
+ if (vcpu->kvm->arch.has_protected_state &&
+ vcpu->arch.guest_state_protected)
+ return -EINVAL;
+
if (!boot_cpu_has(X86_FEATURE_XSAVE))
return -EINVAL;
@@ -5712,12 +5725,9 @@ static int kvm_arch_tsc_has_attr(struct kvm_vcpu *vcpu,
static int kvm_arch_tsc_get_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
- u64 __user *uaddr = kvm_get_attr_addr(attr);
+ u64 __user *uaddr = u64_to_user_ptr(attr->addr);
int r;
- if (IS_ERR(uaddr))
- return PTR_ERR(uaddr);
-
switch (attr->attr) {
case KVM_VCPU_TSC_OFFSET:
r = -EFAULT;
@@ -5735,13 +5745,10 @@ static int kvm_arch_tsc_get_attr(struct kvm_vcpu *vcpu,
static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
- u64 __user *uaddr = kvm_get_attr_addr(attr);
+ u64 __user *uaddr = u64_to_user_ptr(attr->addr);
struct kvm *kvm = vcpu->kvm;
int r;
- if (IS_ERR(uaddr))
- return PTR_ERR(uaddr);
-
switch (attr->attr) {
case KVM_VCPU_TSC_OFFSET: {
u64 offset, tsc, ns;
@@ -6048,7 +6055,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
case KVM_GET_DEBUGREGS: {
struct kvm_debugregs dbgregs;
- kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
+ r = kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
+ if (r < 0)
+ break;
r = -EFAULT;
if (copy_to_user(argp, &dbgregs,
@@ -6078,7 +6087,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
if (!u.xsave)
break;
- kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
+ r = kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
+ if (r < 0)
+ break;
r = -EFAULT;
if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
@@ -6107,7 +6118,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
if (!u.xsave)
break;
- kvm_vcpu_ioctl_x86_get_xsave2(vcpu, u.buffer, size);
+ r = kvm_vcpu_ioctl_x86_get_xsave2(vcpu, u.buffer, size);
+ if (r < 0)
+ break;
r = -EFAULT;
if (copy_to_user(argp, u.xsave, size))
@@ -6123,7 +6136,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
if (!u.xcrs)
break;
- kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
+ r = kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
+ if (r < 0)
+ break;
r = -EFAULT;
if (copy_to_user(argp, u.xcrs,
@@ -6267,6 +6282,11 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
}
#endif
case KVM_GET_SREGS2: {
+ r = -EINVAL;
+ if (vcpu->kvm->arch.has_protected_state &&
+ vcpu->arch.guest_state_protected)
+ goto out;
+
u.sregs2 = kzalloc(sizeof(struct kvm_sregs2), GFP_KERNEL);
r = -ENOMEM;
if (!u.sregs2)
@@ -6279,6 +6299,11 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break;
}
case KVM_SET_SREGS2: {
+ r = -EINVAL;
+ if (vcpu->kvm->arch.has_protected_state &&
+ vcpu->arch.guest_state_protected)
+ goto out;
+
u.sregs2 = memdup_user(argp, sizeof(struct kvm_sregs2));
if (IS_ERR(u.sregs2)) {
r = PTR_ERR(u.sregs2);
@@ -9732,6 +9757,8 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
return -EIO;
}
+ memset(&kvm_caps, 0, sizeof(kvm_caps));
+
x86_emulator_cache = kvm_alloc_emulator_cache();
if (!x86_emulator_cache) {
pr_err("failed to allocate cache for x86 emulator\n");
@@ -9750,6 +9777,9 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
if (r)
goto out_free_percpu;
+ kvm_caps.supported_vm_types = BIT(KVM_X86_DEFAULT_VM);
+ kvm_caps.supported_mce_cap = MCG_CTL_P | MCG_SER_P;
+
if (boot_cpu_has(X86_FEATURE_XSAVE)) {
host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
kvm_caps.supported_xcr0 = host_xcr0 & KVM_SUPPORTED_XCR0;
@@ -9795,6 +9825,9 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
kvm_register_perf_callbacks(ops->handle_intel_pt_intr);
+ if (IS_ENABLED(CONFIG_KVM_SW_PROTECTED_VM) && tdp_mmu_enabled)
+ kvm_caps.supported_vm_types |= BIT(KVM_X86_SW_PROTECTED_VM);
+
if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
kvm_caps.supported_xss = 0;
@@ -9995,15 +10028,12 @@ static void set_or_clear_apicv_inhibit(unsigned long *inhibits,
static void kvm_apicv_init(struct kvm *kvm)
{
- unsigned long *inhibits = &kvm->arch.apicv_inhibit_reasons;
+ enum kvm_apicv_inhibit reason = enable_apicv ? APICV_INHIBIT_REASON_ABSENT :
+ APICV_INHIBIT_REASON_DISABLE;
- init_rwsem(&kvm->arch.apicv_update_lock);
-
- set_or_clear_apicv_inhibit(inhibits, APICV_INHIBIT_REASON_ABSENT, true);
+ set_or_clear_apicv_inhibit(&kvm->arch.apicv_inhibit_reasons, reason, true);
- if (!enable_apicv)
- set_or_clear_apicv_inhibit(inhibits,
- APICV_INHIBIT_REASON_DISABLE, true);
+ init_rwsem(&kvm->arch.apicv_update_lock);
}
static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
@@ -10051,26 +10081,15 @@ static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu);
}
-int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
+unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
+ unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ int op_64_bit, int cpl)
{
- unsigned long nr, a0, a1, a2, a3, ret;
- int op_64_bit;
-
- if (kvm_xen_hypercall_enabled(vcpu->kvm))
- return kvm_xen_hypercall(vcpu);
-
- if (kvm_hv_hypercall_enabled(vcpu))
- return kvm_hv_hypercall(vcpu);
-
- nr = kvm_rax_read(vcpu);
- a0 = kvm_rbx_read(vcpu);
- a1 = kvm_rcx_read(vcpu);
- a2 = kvm_rdx_read(vcpu);
- a3 = kvm_rsi_read(vcpu);
+ unsigned long ret;
trace_kvm_hypercall(nr, a0, a1, a2, a3);
- op_64_bit = is_64_bit_hypercall(vcpu);
if (!op_64_bit) {
nr &= 0xFFFFFFFF;
a0 &= 0xFFFFFFFF;
@@ -10079,7 +10098,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
a3 &= 0xFFFFFFFF;
}
- if (static_call(kvm_x86_get_cpl)(vcpu) != 0) {
+ if (cpl) {
ret = -KVM_EPERM;
goto out;
}
@@ -10140,18 +10159,49 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
WARN_ON_ONCE(vcpu->run->hypercall.flags & KVM_EXIT_HYPERCALL_MBZ);
vcpu->arch.complete_userspace_io = complete_hypercall_exit;
+ /* stat is incremented on completion. */
return 0;
}
default:
ret = -KVM_ENOSYS;
break;
}
+
out:
+ ++vcpu->stat.hypercalls;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__kvm_emulate_hypercall);
+
+int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
+{
+ unsigned long nr, a0, a1, a2, a3, ret;
+ int op_64_bit;
+ int cpl;
+
+ if (kvm_xen_hypercall_enabled(vcpu->kvm))
+ return kvm_xen_hypercall(vcpu);
+
+ if (kvm_hv_hypercall_enabled(vcpu))
+ return kvm_hv_hypercall(vcpu);
+
+ nr = kvm_rax_read(vcpu);
+ a0 = kvm_rbx_read(vcpu);
+ a1 = kvm_rcx_read(vcpu);
+ a2 = kvm_rdx_read(vcpu);
+ a3 = kvm_rsi_read(vcpu);
+ op_64_bit = is_64_bit_hypercall(vcpu);
+ cpl = static_call(kvm_x86_get_cpl)(vcpu);
+
+ ret = __kvm_emulate_hypercall(vcpu, nr, a0, a1, a2, a3, op_64_bit, cpl);
+ if (nr == KVM_HC_MAP_GPA_RANGE && !ret)
+ /* MAP_GPA tosses the request to the user space. */
+ return 0;
+
if (!op_64_bit)
ret = (u32)ret;
kvm_rax_write(vcpu, ret);
- ++vcpu->stat.hypercalls;
return kvm_skip_emulated_instruction(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
@@ -11486,6 +11536,10 @@ static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
+ if (vcpu->kvm->arch.has_protected_state &&
+ vcpu->arch.guest_state_protected)
+ return -EINVAL;
+
vcpu_load(vcpu);
__get_regs(vcpu, regs);
vcpu_put(vcpu);
@@ -11527,6 +11581,10 @@ static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
+ if (vcpu->kvm->arch.has_protected_state &&
+ vcpu->arch.guest_state_protected)
+ return -EINVAL;
+
vcpu_load(vcpu);
__set_regs(vcpu, regs);
vcpu_put(vcpu);
@@ -11599,6 +11657,10 @@ static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
+ if (vcpu->kvm->arch.has_protected_state &&
+ vcpu->arch.guest_state_protected)
+ return -EINVAL;
+
vcpu_load(vcpu);
__get_sregs(vcpu, sregs);
vcpu_put(vcpu);
@@ -11866,6 +11928,10 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
{
int ret;
+ if (vcpu->kvm->arch.has_protected_state &&
+ vcpu->arch.guest_state_protected)
+ return -EINVAL;
+
vcpu_load(vcpu);
ret = __set_sregs(vcpu, sregs);
vcpu_put(vcpu);
@@ -11983,7 +12049,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
struct fxregs_state *fxsave;
if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
- return 0;
+ return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
vcpu_load(vcpu);
@@ -12006,7 +12072,7 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
struct fxregs_state *fxsave;
if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
- return 0;
+ return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
vcpu_load(vcpu);
@@ -12532,6 +12598,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
return -EINVAL;
kvm->arch.vm_type = type;
+ kvm->arch.has_private_mem =
+ (type == KVM_X86_SW_PROTECTED_VM);
ret = kvm_page_track_init(kvm);
if (ret)
@@ -12731,7 +12799,7 @@ static void memslot_rmap_free(struct kvm_memory_slot *slot)
int i;
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
- kvfree(slot->arch.rmap[i]);
+ vfree(slot->arch.rmap[i]);
slot->arch.rmap[i] = NULL;
}
}
@@ -12743,7 +12811,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
memslot_rmap_free(slot);
for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
- kvfree(slot->arch.lpage_info[i - 1]);
+ vfree(slot->arch.lpage_info[i - 1]);
slot->arch.lpage_info[i - 1] = NULL;
}
@@ -12835,7 +12903,7 @@ out_free:
memslot_rmap_free(slot);
for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
- kvfree(slot->arch.lpage_info[i - 1]);
+ vfree(slot->arch.lpage_info[i - 1]);
slot->arch.lpage_info[i - 1] = NULL;
}
return -ENOMEM;
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index a8b71803777b..d80a4c6b5a38 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -24,6 +24,8 @@ struct kvm_caps {
bool has_bus_lock_exit;
/* notify VM exit supported? */
bool has_notify_vmexit;
+ /* bit mask of VM types */
+ u32 supported_vm_types;
u64 supported_mce_cap;
u64 supported_xcr0;
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 6da73513f026..98583a9dbab3 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -53,7 +53,6 @@ ifneq ($(CONFIG_X86_CMPXCHG64),y)
lib-y += atomic64_386_32.o
endif
else
- obj-y += iomap_copy_64.o
ifneq ($(CONFIG_GENERIC_CSUM),y)
lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o
endif
diff --git a/arch/x86/lib/copy_mc.c b/arch/x86/lib/copy_mc.c
index 6e8b7e600def..97e88e58567b 100644
--- a/arch/x86/lib/copy_mc.c
+++ b/arch/x86/lib/copy_mc.c
@@ -4,6 +4,7 @@
#include <linux/jump_label.h>
#include <linux/uaccess.h>
#include <linux/export.h>
+#include <linux/instrumented.h>
#include <linux/string.h>
#include <linux/types.h>
@@ -61,10 +62,20 @@ unsigned long copy_mc_enhanced_fast_string(void *dst, const void *src, unsigned
*/
unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigned len)
{
- if (copy_mc_fragile_enabled)
- return copy_mc_fragile(dst, src, len);
- if (static_cpu_has(X86_FEATURE_ERMS))
- return copy_mc_enhanced_fast_string(dst, src, len);
+ unsigned long ret;
+
+ if (copy_mc_fragile_enabled) {
+ instrument_memcpy_before(dst, src, len);
+ ret = copy_mc_fragile(dst, src, len);
+ instrument_memcpy_after(dst, src, len, ret);
+ return ret;
+ }
+ if (static_cpu_has(X86_FEATURE_ERMS)) {
+ instrument_memcpy_before(dst, src, len);
+ ret = copy_mc_enhanced_fast_string(dst, src, len);
+ instrument_memcpy_after(dst, src, len, ret);
+ return ret;
+ }
memcpy(dst, src, len);
return 0;
}
@@ -75,6 +86,7 @@ unsigned long __must_check copy_mc_to_user(void __user *dst, const void *src, un
unsigned long ret;
if (copy_mc_fragile_enabled) {
+ instrument_copy_to_user(dst, src, len);
__uaccess_begin();
ret = copy_mc_fragile((__force void *)dst, src, len);
__uaccess_end();
@@ -82,6 +94,7 @@ unsigned long __must_check copy_mc_to_user(void __user *dst, const void *src, un
}
if (static_cpu_has(X86_FEATURE_ERMS)) {
+ instrument_copy_to_user(dst, src, len);
__uaccess_begin();
ret = copy_mc_enhanced_fast_string((__force void *)dst, src, len);
__uaccess_end();
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
index 1bb155a0955b..5952ab41c60f 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -185,6 +185,17 @@ found:
if (X86_REX_W(b))
/* REX.W overrides opnd_size */
insn->opnd_bytes = 8;
+ } else if (inat_is_rex2_prefix(attr)) {
+ insn_set_byte(&insn->rex_prefix, 0, b);
+ b = peek_nbyte_next(insn_byte_t, insn, 1);
+ insn_set_byte(&insn->rex_prefix, 1, b);
+ insn->rex_prefix.nbytes = 2;
+ insn->next_byte += 2;
+ if (X86_REX_W(b))
+ /* REX.W overrides opnd_size */
+ insn->opnd_bytes = 8;
+ insn->rex_prefix.got = 1;
+ goto vex_end;
}
}
insn->rex_prefix.got = 1;
@@ -283,6 +294,10 @@ int insn_get_opcode(struct insn *insn)
m = insn_vex_m_bits(insn);
p = insn_vex_p_bits(insn);
insn->attr = inat_get_avx_attribute(op, m, p);
+ /* SCALABLE EVEX uses p bits to encode operand size */
+ if (inat_evex_scalable(insn->attr) && !insn_vex_w_bit(insn) &&
+ p == INAT_PFX_OPNDSZ)
+ insn->opnd_bytes = 2;
if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) ||
(!inat_accept_vex(insn->attr) &&
!inat_is_group(insn->attr))) {
@@ -294,6 +309,20 @@ int insn_get_opcode(struct insn *insn)
goto end;
}
+ /* Check if there is REX2 prefix or not */
+ if (insn_is_rex2(insn)) {
+ if (insn_rex2_m_bit(insn)) {
+ /* map 1 is escape 0x0f */
+ insn_attr_t esc_attr = inat_get_opcode_attribute(0x0f);
+
+ pfx_id = insn_last_prefix_id(insn);
+ insn->attr = inat_get_escape_attribute(op, pfx_id, esc_attr);
+ } else {
+ insn->attr = inat_get_opcode_attribute(op);
+ }
+ goto end;
+ }
+
insn->attr = inat_get_opcode_attribute(op);
while (inat_is_escape(insn->attr)) {
/* Get escaped opcode */
diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
deleted file mode 100644
index 6ff2f56cb0f7..000000000000
--- a/arch/x86/lib/iomap_copy_64.S
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2006 PathScale, Inc. All Rights Reserved.
- */
-
-#include <linux/linkage.h>
-
-/*
- * override generic version in lib/iomap_copy.c
- */
-SYM_FUNC_START(__iowrite32_copy)
- movl %edx,%ecx
- rep movsl
- RET
-SYM_FUNC_END(__iowrite32_copy)
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index 721b528da9ac..391059b2c6fb 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -163,6 +163,7 @@ SYM_CODE_START_NOALIGN(srso_alias_untrain_ret)
lfence
jmp srso_alias_return_thunk
SYM_FUNC_END(srso_alias_untrain_ret)
+__EXPORT_THUNK(srso_alias_untrain_ret)
.popsection
.pushsection .text..__x86.rethunk_safe
@@ -224,10 +225,16 @@ SYM_CODE_START(srso_return_thunk)
SYM_CODE_END(srso_return_thunk)
#define JMP_SRSO_UNTRAIN_RET "jmp srso_untrain_ret"
-#define JMP_SRSO_ALIAS_UNTRAIN_RET "jmp srso_alias_untrain_ret"
#else /* !CONFIG_MITIGATION_SRSO */
+/* Dummy for the alternative in CALL_UNTRAIN_RET. */
+SYM_CODE_START(srso_alias_untrain_ret)
+ ANNOTATE_UNRET_SAFE
+ ANNOTATE_NOENDBR
+ ret
+ int3
+SYM_FUNC_END(srso_alias_untrain_ret)
+__EXPORT_THUNK(srso_alias_untrain_ret)
#define JMP_SRSO_UNTRAIN_RET "ud2"
-#define JMP_SRSO_ALIAS_UNTRAIN_RET "ud2"
#endif /* CONFIG_MITIGATION_SRSO */
#ifdef CONFIG_MITIGATION_UNRET_ENTRY
@@ -319,9 +326,7 @@ SYM_FUNC_END(retbleed_untrain_ret)
#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO)
SYM_FUNC_START(entry_untrain_ret)
- ALTERNATIVE_2 JMP_RETBLEED_UNTRAIN_RET, \
- JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO, \
- JMP_SRSO_ALIAS_UNTRAIN_RET, X86_FEATURE_SRSO_ALIAS
+ ALTERNATIVE JMP_RETBLEED_UNTRAIN_RET, JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO
SYM_FUNC_END(entry_untrain_ret)
__EXPORT_THUNK(entry_untrain_ret)
@@ -377,8 +382,15 @@ SYM_FUNC_END(call_depth_return_thunk)
SYM_CODE_START(__x86_return_thunk)
UNWIND_HINT_FUNC
ANNOTATE_NOENDBR
+#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || \
+ defined(CONFIG_MITIGATION_SRSO) || \
+ defined(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)
ALTERNATIVE __stringify(ANNOTATE_UNRET_SAFE; ret), \
"jmp warn_thunk_thunk", X86_FEATURE_ALWAYS
+#else
+ ANNOTATE_UNRET_SAFE
+ ret
+#endif
int3
SYM_CODE_END(__x86_return_thunk)
EXPORT_SYMBOL(__x86_return_thunk)
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index 12af572201a2..caedb3ef6688 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -23,6 +23,7 @@
#
# AVX Superscripts
# (ev): this opcode requires EVEX prefix.
+# (es): this opcode requires EVEX prefix and is SCALABALE.
# (evo): this opcode is changed by EVEX prefix (EVEX opcode)
# (v): this opcode requires VEX prefix.
# (v1): this opcode only supports 128bit VEX.
@@ -33,6 +34,10 @@
# - (F2): the last prefix is 0xF2
# - (!F3) : the last prefix is not 0xF3 (including non-last prefix case)
# - (66&F2): Both 0x66 and 0xF2 prefixes are specified.
+#
+# REX2 Prefix
+# - (!REX2): REX2 is not allowed
+# - (REX2): REX2 variant e.g. JMPABS
Table: one byte opcode
Referrer:
@@ -148,7 +153,7 @@ AVXcode:
65: SEG=GS (Prefix)
66: Operand-Size (Prefix)
67: Address-Size (Prefix)
-68: PUSH Iz (d64)
+68: PUSH Iz
69: IMUL Gv,Ev,Iz
6a: PUSH Ib (d64)
6b: IMUL Gv,Ev,Ib
@@ -157,22 +162,22 @@ AVXcode:
6e: OUTS/OUTSB DX,Xb
6f: OUTS/OUTSW/OUTSD DX,Xz
# 0x70 - 0x7f
-70: JO Jb
-71: JNO Jb
-72: JB/JNAE/JC Jb
-73: JNB/JAE/JNC Jb
-74: JZ/JE Jb
-75: JNZ/JNE Jb
-76: JBE/JNA Jb
-77: JNBE/JA Jb
-78: JS Jb
-79: JNS Jb
-7a: JP/JPE Jb
-7b: JNP/JPO Jb
-7c: JL/JNGE Jb
-7d: JNL/JGE Jb
-7e: JLE/JNG Jb
-7f: JNLE/JG Jb
+70: JO Jb (!REX2)
+71: JNO Jb (!REX2)
+72: JB/JNAE/JC Jb (!REX2)
+73: JNB/JAE/JNC Jb (!REX2)
+74: JZ/JE Jb (!REX2)
+75: JNZ/JNE Jb (!REX2)
+76: JBE/JNA Jb (!REX2)
+77: JNBE/JA Jb (!REX2)
+78: JS Jb (!REX2)
+79: JNS Jb (!REX2)
+7a: JP/JPE Jb (!REX2)
+7b: JNP/JPO Jb (!REX2)
+7c: JL/JNGE Jb (!REX2)
+7d: JNL/JGE Jb (!REX2)
+7e: JLE/JNG Jb (!REX2)
+7f: JNLE/JG Jb (!REX2)
# 0x80 - 0x8f
80: Grp1 Eb,Ib (1A)
81: Grp1 Ev,Iz (1A)
@@ -208,24 +213,24 @@ AVXcode:
9e: SAHF
9f: LAHF
# 0xa0 - 0xaf
-a0: MOV AL,Ob
-a1: MOV rAX,Ov
-a2: MOV Ob,AL
-a3: MOV Ov,rAX
-a4: MOVS/B Yb,Xb
-a5: MOVS/W/D/Q Yv,Xv
-a6: CMPS/B Xb,Yb
-a7: CMPS/W/D Xv,Yv
-a8: TEST AL,Ib
-a9: TEST rAX,Iz
-aa: STOS/B Yb,AL
-ab: STOS/W/D/Q Yv,rAX
-ac: LODS/B AL,Xb
-ad: LODS/W/D/Q rAX,Xv
-ae: SCAS/B AL,Yb
+a0: MOV AL,Ob (!REX2)
+a1: MOV rAX,Ov (!REX2) | JMPABS O (REX2),(o64)
+a2: MOV Ob,AL (!REX2)
+a3: MOV Ov,rAX (!REX2)
+a4: MOVS/B Yb,Xb (!REX2)
+a5: MOVS/W/D/Q Yv,Xv (!REX2)
+a6: CMPS/B Xb,Yb (!REX2)
+a7: CMPS/W/D Xv,Yv (!REX2)
+a8: TEST AL,Ib (!REX2)
+a9: TEST rAX,Iz (!REX2)
+aa: STOS/B Yb,AL (!REX2)
+ab: STOS/W/D/Q Yv,rAX (!REX2)
+ac: LODS/B AL,Xb (!REX2)
+ad: LODS/W/D/Q rAX,Xv (!REX2)
+ae: SCAS/B AL,Yb (!REX2)
# Note: The May 2011 Intel manual shows Xv for the second parameter of the
# next instruction but Yv is correct
-af: SCAS/W/D/Q rAX,Yv
+af: SCAS/W/D/Q rAX,Yv (!REX2)
# 0xb0 - 0xbf
b0: MOV AL/R8L,Ib
b1: MOV CL/R9L,Ib
@@ -266,7 +271,7 @@ d1: Grp2 Ev,1 (1A)
d2: Grp2 Eb,CL (1A)
d3: Grp2 Ev,CL (1A)
d4: AAM Ib (i64)
-d5: AAD Ib (i64)
+d5: AAD Ib (i64) | REX2 (Prefix),(o64)
d6:
d7: XLAT/XLATB
d8: ESC
@@ -281,26 +286,26 @@ df: ESC
# Note: "forced64" is Intel CPU behavior: they ignore 0x66 prefix
# in 64-bit mode. AMD CPUs accept 0x66 prefix, it causes RIP truncation
# to 16 bits. In 32-bit mode, 0x66 is accepted by both Intel and AMD.
-e0: LOOPNE/LOOPNZ Jb (f64)
-e1: LOOPE/LOOPZ Jb (f64)
-e2: LOOP Jb (f64)
-e3: JrCXZ Jb (f64)
-e4: IN AL,Ib
-e5: IN eAX,Ib
-e6: OUT Ib,AL
-e7: OUT Ib,eAX
+e0: LOOPNE/LOOPNZ Jb (f64) (!REX2)
+e1: LOOPE/LOOPZ Jb (f64) (!REX2)
+e2: LOOP Jb (f64) (!REX2)
+e3: JrCXZ Jb (f64) (!REX2)
+e4: IN AL,Ib (!REX2)
+e5: IN eAX,Ib (!REX2)
+e6: OUT Ib,AL (!REX2)
+e7: OUT Ib,eAX (!REX2)
# With 0x66 prefix in 64-bit mode, for AMD CPUs immediate offset
# in "near" jumps and calls is 16-bit. For CALL,
# push of return address is 16-bit wide, RSP is decremented by 2
# but is not truncated to 16 bits, unlike RIP.
-e8: CALL Jz (f64)
-e9: JMP-near Jz (f64)
-ea: JMP-far Ap (i64)
-eb: JMP-short Jb (f64)
-ec: IN AL,DX
-ed: IN eAX,DX
-ee: OUT DX,AL
-ef: OUT DX,eAX
+e8: CALL Jz (f64) (!REX2)
+e9: JMP-near Jz (f64) (!REX2)
+ea: JMP-far Ap (i64) (!REX2)
+eb: JMP-short Jb (f64) (!REX2)
+ec: IN AL,DX (!REX2)
+ed: IN eAX,DX (!REX2)
+ee: OUT DX,AL (!REX2)
+ef: OUT DX,eAX (!REX2)
# 0xf0 - 0xff
f0: LOCK (Prefix)
f1:
@@ -386,14 +391,14 @@ AVXcode: 1
2e: vucomiss Vss,Wss (v1) | vucomisd Vsd,Wsd (66),(v1)
2f: vcomiss Vss,Wss (v1) | vcomisd Vsd,Wsd (66),(v1)
# 0x0f 0x30-0x3f
-30: WRMSR
-31: RDTSC
-32: RDMSR
-33: RDPMC
-34: SYSENTER
-35: SYSEXIT
+30: WRMSR (!REX2)
+31: RDTSC (!REX2)
+32: RDMSR (!REX2)
+33: RDPMC (!REX2)
+34: SYSENTER (!REX2)
+35: SYSEXIT (!REX2)
36:
-37: GETSEC
+37: GETSEC (!REX2)
38: escape # 3-byte escape 1
39:
3a: escape # 3-byte escape 2
@@ -473,22 +478,22 @@ AVXcode: 1
7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqa32/64 Wx,Vx (66),(evo) | vmovdqu Wx,Vx (F3) | vmovdqu32/64 Wx,Vx (F3),(evo) | vmovdqu8/16 Wx,Vx (F2),(ev)
# 0x0f 0x80-0x8f
# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
-80: JO Jz (f64)
-81: JNO Jz (f64)
-82: JB/JC/JNAE Jz (f64)
-83: JAE/JNB/JNC Jz (f64)
-84: JE/JZ Jz (f64)
-85: JNE/JNZ Jz (f64)
-86: JBE/JNA Jz (f64)
-87: JA/JNBE Jz (f64)
-88: JS Jz (f64)
-89: JNS Jz (f64)
-8a: JP/JPE Jz (f64)
-8b: JNP/JPO Jz (f64)
-8c: JL/JNGE Jz (f64)
-8d: JNL/JGE Jz (f64)
-8e: JLE/JNG Jz (f64)
-8f: JNLE/JG Jz (f64)
+80: JO Jz (f64) (!REX2)
+81: JNO Jz (f64) (!REX2)
+82: JB/JC/JNAE Jz (f64) (!REX2)
+83: JAE/JNB/JNC Jz (f64) (!REX2)
+84: JE/JZ Jz (f64) (!REX2)
+85: JNE/JNZ Jz (f64) (!REX2)
+86: JBE/JNA Jz (f64) (!REX2)
+87: JA/JNBE Jz (f64) (!REX2)
+88: JS Jz (f64) (!REX2)
+89: JNS Jz (f64) (!REX2)
+8a: JP/JPE Jz (f64) (!REX2)
+8b: JNP/JPO Jz (f64) (!REX2)
+8c: JL/JNGE Jz (f64) (!REX2)
+8d: JNL/JGE Jz (f64) (!REX2)
+8e: JLE/JNG Jz (f64) (!REX2)
+8f: JNLE/JG Jz (f64) (!REX2)
# 0x0f 0x90-0x9f
90: SETO Eb | kmovw/q Vk,Wk | kmovb/d Vk,Wk (66)
91: SETNO Eb | kmovw/q Mv,Vk | kmovb/d Mv,Vk (66)
@@ -698,17 +703,17 @@ AVXcode: 2
4d: vrcp14ss/d Vsd,Hpd,Wsd (66),(ev)
4e: vrsqrt14ps/d Vpd,Wpd (66),(ev)
4f: vrsqrt14ss/d Vsd,Hsd,Wsd (66),(ev)
-50: vpdpbusd Vx,Hx,Wx (66),(ev)
-51: vpdpbusds Vx,Hx,Wx (66),(ev)
-52: vdpbf16ps Vx,Hx,Wx (F3),(ev) | vpdpwssd Vx,Hx,Wx (66),(ev) | vp4dpwssd Vdqq,Hdqq,Wdq (F2),(ev)
-53: vpdpwssds Vx,Hx,Wx (66),(ev) | vp4dpwssds Vdqq,Hdqq,Wdq (F2),(ev)
+50: vpdpbusd Vx,Hx,Wx (66) | vpdpbssd Vx,Hx,Wx (F2),(v) | vpdpbsud Vx,Hx,Wx (F3),(v) | vpdpbuud Vx,Hx,Wx (v)
+51: vpdpbusds Vx,Hx,Wx (66) | vpdpbssds Vx,Hx,Wx (F2),(v) | vpdpbsuds Vx,Hx,Wx (F3),(v) | vpdpbuuds Vx,Hx,Wx (v)
+52: vdpbf16ps Vx,Hx,Wx (F3),(ev) | vpdpwssd Vx,Hx,Wx (66) | vp4dpwssd Vdqq,Hdqq,Wdq (F2),(ev)
+53: vpdpwssds Vx,Hx,Wx (66) | vp4dpwssds Vdqq,Hdqq,Wdq (F2),(ev)
54: vpopcntb/w Vx,Wx (66),(ev)
55: vpopcntd/q Vx,Wx (66),(ev)
58: vpbroadcastd Vx,Wx (66),(v)
59: vpbroadcastq Vx,Wx (66),(v) | vbroadcasti32x2 Vx,Wx (66),(evo)
5a: vbroadcasti128 Vqq,Mdq (66),(v) | vbroadcasti32x4/64x2 Vx,Wx (66),(evo)
5b: vbroadcasti32x8/64x4 Vqq,Mdq (66),(ev)
-5c: TDPBF16PS Vt,Wt,Ht (F3),(v1)
+5c: TDPBF16PS Vt,Wt,Ht (F3),(v1) | TDPFP16PS Vt,Wt,Ht (F2),(v1),(o64)
# Skip 0x5d
5e: TDPBSSD Vt,Wt,Ht (F2),(v1) | TDPBSUD Vt,Wt,Ht (F3),(v1) | TDPBUSD Vt,Wt,Ht (66),(v1) | TDPBUUD Vt,Wt,Ht (v1)
# Skip 0x5f-0x61
@@ -718,10 +723,12 @@ AVXcode: 2
65: vblendmps/d Vx,Hx,Wx (66),(ev)
66: vpblendmb/w Vx,Hx,Wx (66),(ev)
68: vp2intersectd/q Kx,Hx,Wx (F2),(ev)
-# Skip 0x69-0x6f
+# Skip 0x69-0x6b
+6c: TCMMIMFP16PS Vt,Wt,Ht (66),(v1),(o64) | TCMMRLFP16PS Vt,Wt,Ht (v1),(o64)
+# Skip 0x6d-0x6f
70: vpshldvw Vx,Hx,Wx (66),(ev)
71: vpshldvd/q Vx,Hx,Wx (66),(ev)
-72: vcvtne2ps2bf16 Vx,Hx,Wx (F2),(ev) | vcvtneps2bf16 Vx,Wx (F3),(ev) | vpshrdvw Vx,Hx,Wx (66),(ev)
+72: vcvtne2ps2bf16 Vx,Hx,Wx (F2),(ev) | vcvtneps2bf16 Vx,Wx (F3) | vpshrdvw Vx,Hx,Wx (66),(ev)
73: vpshrdvd/q Vx,Hx,Wx (66),(ev)
75: vpermi2b/w Vx,Hx,Wx (66),(ev)
76: vpermi2d/q Vx,Hx,Wx (66),(ev)
@@ -777,8 +784,10 @@ ac: vfnmadd213ps/d Vx,Hx,Wx (66),(v)
ad: vfnmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
ae: vfnmsub213ps/d Vx,Hx,Wx (66),(v)
af: vfnmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
-b4: vpmadd52luq Vx,Hx,Wx (66),(ev)
-b5: vpmadd52huq Vx,Hx,Wx (66),(ev)
+b0: vcvtneebf162ps Vx,Mx (F3),(!11B),(v) | vcvtneeph2ps Vx,Mx (66),(!11B),(v) | vcvtneobf162ps Vx,Mx (F2),(!11B),(v) | vcvtneoph2ps Vx,Mx (!11B),(v)
+b1: vbcstnebf162ps Vx,Mw (F3),(!11B),(v) | vbcstnesh2ps Vx,Mw (66),(!11B),(v)
+b4: vpmadd52luq Vx,Hx,Wx (66)
+b5: vpmadd52huq Vx,Hx,Wx (66)
b6: vfmaddsub231ps/d Vx,Hx,Wx (66),(v)
b7: vfmsubadd231ps/d Vx,Hx,Wx (66),(v)
b8: vfmadd231ps/d Vx,Hx,Wx (66),(v)
@@ -796,15 +805,35 @@ c7: Grp19 (1A)
c8: sha1nexte Vdq,Wdq | vexp2ps/d Vx,Wx (66),(ev)
c9: sha1msg1 Vdq,Wdq
ca: sha1msg2 Vdq,Wdq | vrcp28ps/d Vx,Wx (66),(ev)
-cb: sha256rnds2 Vdq,Wdq | vrcp28ss/d Vx,Hx,Wx (66),(ev)
-cc: sha256msg1 Vdq,Wdq | vrsqrt28ps/d Vx,Wx (66),(ev)
-cd: sha256msg2 Vdq,Wdq | vrsqrt28ss/d Vx,Hx,Wx (66),(ev)
+cb: sha256rnds2 Vdq,Wdq | vrcp28ss/d Vx,Hx,Wx (66),(ev) | vsha512rnds2 Vqq,Hqq,Udq (F2),(11B),(v)
+cc: sha256msg1 Vdq,Wdq | vrsqrt28ps/d Vx,Wx (66),(ev) | vsha512msg1 Vqq,Udq (F2),(11B),(v)
+cd: sha256msg2 Vdq,Wdq | vrsqrt28ss/d Vx,Hx,Wx (66),(ev) | vsha512msg2 Vqq,Uqq (F2),(11B),(v)
cf: vgf2p8mulb Vx,Wx (66)
+d2: vpdpwsud Vx,Hx,Wx (F3),(v) | vpdpwusd Vx,Hx,Wx (66),(v) | vpdpwuud Vx,Hx,Wx (v)
+d3: vpdpwsuds Vx,Hx,Wx (F3),(v) | vpdpwusds Vx,Hx,Wx (66),(v) | vpdpwuuds Vx,Hx,Wx (v)
+d8: AESENCWIDE128KL Qpi (F3),(000),(00B) | AESENCWIDE256KL Qpi (F3),(000),(10B) | AESDECWIDE128KL Qpi (F3),(000),(01B) | AESDECWIDE256KL Qpi (F3),(000),(11B)
+da: vsm3msg1 Vdq,Hdq,Udq (v1) | vsm3msg2 Vdq,Hdq,Udq (66),(v1) | vsm4key4 Vx,Hx,Wx (F3),(v) | vsm4rnds4 Vx,Hx,Wx (F2),(v)
db: VAESIMC Vdq,Wdq (66),(v1)
-dc: vaesenc Vx,Hx,Wx (66)
-dd: vaesenclast Vx,Hx,Wx (66)
-de: vaesdec Vx,Hx,Wx (66)
-df: vaesdeclast Vx,Hx,Wx (66)
+dc: vaesenc Vx,Hx,Wx (66) | LOADIWKEY Vx,Hx (F3) | AESENC128KL Vpd,Qpi (F3)
+dd: vaesenclast Vx,Hx,Wx (66) | AESDEC128KL Vpd,Qpi (F3)
+de: vaesdec Vx,Hx,Wx (66) | AESENC256KL Vpd,Qpi (F3)
+df: vaesdeclast Vx,Hx,Wx (66) | AESDEC256KL Vpd,Qpi (F3)
+e0: CMPOXADD My,Gy,By (66),(v1),(o64)
+e1: CMPNOXADD My,Gy,By (66),(v1),(o64)
+e2: CMPBXADD My,Gy,By (66),(v1),(o64)
+e3: CMPNBXADD My,Gy,By (66),(v1),(o64)
+e4: CMPZXADD My,Gy,By (66),(v1),(o64)
+e5: CMPNZXADD My,Gy,By (66),(v1),(o64)
+e6: CMPBEXADD My,Gy,By (66),(v1),(o64)
+e7: CMPNBEXADD My,Gy,By (66),(v1),(o64)
+e8: CMPSXADD My,Gy,By (66),(v1),(o64)
+e9: CMPNSXADD My,Gy,By (66),(v1),(o64)
+ea: CMPPXADD My,Gy,By (66),(v1),(o64)
+eb: CMPNPXADD My,Gy,By (66),(v1),(o64)
+ec: CMPLXADD My,Gy,By (66),(v1),(o64)
+ed: CMPNLXADD My,Gy,By (66),(v1),(o64)
+ee: CMPLEXADD My,Gy,By (66),(v1),(o64)
+ef: CMPNLEXADD My,Gy,By (66),(v1),(o64)
f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2) | CRC32 Gd,Eb (66&F2)
f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2) | CRC32 Gd,Ew (66&F2)
f2: ANDN Gy,By,Ey (v)
@@ -812,8 +841,11 @@ f3: Grp17 (1A)
f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v) | WRUSSD/Q My,Gy (66)
f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v) | WRSSD/Q My,Gy
f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
-f8: MOVDIR64B Gv,Mdqq (66) | ENQCMD Gv,Mdqq (F2) | ENQCMDS Gv,Mdqq (F3)
+f8: MOVDIR64B Gv,Mdqq (66) | ENQCMD Gv,Mdqq (F2) | ENQCMDS Gv,Mdqq (F3) | URDMSR Rq,Gq (F2),(11B) | UWRMSR Gq,Rq (F3),(11B)
f9: MOVDIRI My,Gy
+fa: ENCODEKEY128 Ew,Ew (F3)
+fb: ENCODEKEY256 Ew,Ew (F3)
+fc: AADD My,Gy | AAND My,Gy (66) | AOR My,Gy (F2) | AXOR My,Gy (F3)
EndTable
Table: 3-byte opcode 2 (0x0f 0x3a)
@@ -893,10 +925,103 @@ c2: vcmpph Vx,Hx,Wx,Ib (ev) | vcmpsh Vx,Hx,Wx,Ib (F3),(ev)
cc: sha1rnds4 Vdq,Wdq,Ib
ce: vgf2p8affineqb Vx,Wx,Ib (66)
cf: vgf2p8affineinvqb Vx,Wx,Ib (66)
+de: vsm3rnds2 Vdq,Hdq,Wdq,Ib (66),(v1)
df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1)
f0: RORX Gy,Ey,Ib (F2),(v) | HRESET Gv,Ib (F3),(000),(11B)
EndTable
+Table: EVEX map 4
+Referrer:
+AVXcode: 4
+00: ADD Eb,Gb (ev)
+01: ADD Ev,Gv (es) | ADD Ev,Gv (66),(es)
+02: ADD Gb,Eb (ev)
+03: ADD Gv,Ev (es) | ADD Gv,Ev (66),(es)
+08: OR Eb,Gb (ev)
+09: OR Ev,Gv (es) | OR Ev,Gv (66),(es)
+0a: OR Gb,Eb (ev)
+0b: OR Gv,Ev (es) | OR Gv,Ev (66),(es)
+10: ADC Eb,Gb (ev)
+11: ADC Ev,Gv (es) | ADC Ev,Gv (66),(es)
+12: ADC Gb,Eb (ev)
+13: ADC Gv,Ev (es) | ADC Gv,Ev (66),(es)
+18: SBB Eb,Gb (ev)
+19: SBB Ev,Gv (es) | SBB Ev,Gv (66),(es)
+1a: SBB Gb,Eb (ev)
+1b: SBB Gv,Ev (es) | SBB Gv,Ev (66),(es)
+20: AND Eb,Gb (ev)
+21: AND Ev,Gv (es) | AND Ev,Gv (66),(es)
+22: AND Gb,Eb (ev)
+23: AND Gv,Ev (es) | AND Gv,Ev (66),(es)
+24: SHLD Ev,Gv,Ib (es) | SHLD Ev,Gv,Ib (66),(es)
+28: SUB Eb,Gb (ev)
+29: SUB Ev,Gv (es) | SUB Ev,Gv (66),(es)
+2a: SUB Gb,Eb (ev)
+2b: SUB Gv,Ev (es) | SUB Gv,Ev (66),(es)
+2c: SHRD Ev,Gv,Ib (es) | SHRD Ev,Gv,Ib (66),(es)
+30: XOR Eb,Gb (ev)
+31: XOR Ev,Gv (es) | XOR Ev,Gv (66),(es)
+32: XOR Gb,Eb (ev)
+33: XOR Gv,Ev (es) | XOR Gv,Ev (66),(es)
+# CCMPSCC instructions are: CCOMB, CCOMBE, CCOMF, CCOML, CCOMLE, CCOMNB, CCOMNBE, CCOMNL, CCOMNLE,
+# CCOMNO, CCOMNS, CCOMNZ, CCOMO, CCOMS, CCOMT, CCOMZ
+38: CCMPSCC Eb,Gb (ev)
+39: CCMPSCC Ev,Gv (es) | CCMPSCC Ev,Gv (66),(es)
+3a: CCMPSCC Gv,Ev (ev)
+3b: CCMPSCC Gv,Ev (es) | CCMPSCC Gv,Ev (66),(es)
+40: CMOVO Gv,Ev (es) | CMOVO Gv,Ev (66),(es) | CFCMOVO Ev,Ev (es) | CFCMOVO Ev,Ev (66),(es) | SETO Eb (F2),(ev)
+41: CMOVNO Gv,Ev (es) | CMOVNO Gv,Ev (66),(es) | CFCMOVNO Ev,Ev (es) | CFCMOVNO Ev,Ev (66),(es) | SETNO Eb (F2),(ev)
+42: CMOVB Gv,Ev (es) | CMOVB Gv,Ev (66),(es) | CFCMOVB Ev,Ev (es) | CFCMOVB Ev,Ev (66),(es) | SETB Eb (F2),(ev)
+43: CMOVNB Gv,Ev (es) | CMOVNB Gv,Ev (66),(es) | CFCMOVNB Ev,Ev (es) | CFCMOVNB Ev,Ev (66),(es) | SETNB Eb (F2),(ev)
+44: CMOVZ Gv,Ev (es) | CMOVZ Gv,Ev (66),(es) | CFCMOVZ Ev,Ev (es) | CFCMOVZ Ev,Ev (66),(es) | SETZ Eb (F2),(ev)
+45: CMOVNZ Gv,Ev (es) | CMOVNZ Gv,Ev (66),(es) | CFCMOVNZ Ev,Ev (es) | CFCMOVNZ Ev,Ev (66),(es) | SETNZ Eb (F2),(ev)
+46: CMOVBE Gv,Ev (es) | CMOVBE Gv,Ev (66),(es) | CFCMOVBE Ev,Ev (es) | CFCMOVBE Ev,Ev (66),(es) | SETBE Eb (F2),(ev)
+47: CMOVNBE Gv,Ev (es) | CMOVNBE Gv,Ev (66),(es) | CFCMOVNBE Ev,Ev (es) | CFCMOVNBE Ev,Ev (66),(es) | SETNBE Eb (F2),(ev)
+48: CMOVS Gv,Ev (es) | CMOVS Gv,Ev (66),(es) | CFCMOVS Ev,Ev (es) | CFCMOVS Ev,Ev (66),(es) | SETS Eb (F2),(ev)
+49: CMOVNS Gv,Ev (es) | CMOVNS Gv,Ev (66),(es) | CFCMOVNS Ev,Ev (es) | CFCMOVNS Ev,Ev (66),(es) | SETNS Eb (F2),(ev)
+4a: CMOVP Gv,Ev (es) | CMOVP Gv,Ev (66),(es) | CFCMOVP Ev,Ev (es) | CFCMOVP Ev,Ev (66),(es) | SETP Eb (F2),(ev)
+4b: CMOVNP Gv,Ev (es) | CMOVNP Gv,Ev (66),(es) | CFCMOVNP Ev,Ev (es) | CFCMOVNP Ev,Ev (66),(es) | SETNP Eb (F2),(ev)
+4c: CMOVL Gv,Ev (es) | CMOVL Gv,Ev (66),(es) | CFCMOVL Ev,Ev (es) | CFCMOVL Ev,Ev (66),(es) | SETL Eb (F2),(ev)
+4d: CMOVNL Gv,Ev (es) | CMOVNL Gv,Ev (66),(es) | CFCMOVNL Ev,Ev (es) | CFCMOVNL Ev,Ev (66),(es) | SETNL Eb (F2),(ev)
+4e: CMOVLE Gv,Ev (es) | CMOVLE Gv,Ev (66),(es) | CFCMOVLE Ev,Ev (es) | CFCMOVLE Ev,Ev (66),(es) | SETLE Eb (F2),(ev)
+4f: CMOVNLE Gv,Ev (es) | CMOVNLE Gv,Ev (66),(es) | CFCMOVNLE Ev,Ev (es) | CFCMOVNLE Ev,Ev (66),(es) | SETNLE Eb (F2),(ev)
+60: MOVBE Gv,Ev (es) | MOVBE Gv,Ev (66),(es)
+61: MOVBE Ev,Gv (es) | MOVBE Ev,Gv (66),(es)
+65: WRUSSD Md,Gd (66),(ev) | WRUSSQ Mq,Gq (66),(ev)
+66: ADCX Gy,Ey (66),(ev) | ADOX Gy,Ey (F3),(ev) | WRSSD Md,Gd (ev) | WRSSQ Mq,Gq (66),(ev)
+69: IMUL Gv,Ev,Iz (es) | IMUL Gv,Ev,Iz (66),(es)
+6b: IMUL Gv,Ev,Ib (es) | IMUL Gv,Ev,Ib (66),(es)
+80: Grp1 Eb,Ib (1A),(ev)
+81: Grp1 Ev,Iz (1A),(es)
+83: Grp1 Ev,Ib (1A),(es)
+# CTESTSCC instructions are: CTESTB, CTESTBE, CTESTF, CTESTL, CTESTLE, CTESTNB, CTESTNBE, CTESTNL,
+# CTESTNLE, CTESTNO, CTESTNS, CTESTNZ, CTESTO, CTESTS, CTESTT, CTESTZ
+84: CTESTSCC (ev)
+85: CTESTSCC (es) | CTESTSCC (66),(es)
+88: POPCNT Gv,Ev (es) | POPCNT Gv,Ev (66),(es)
+8f: POP2 Bq,Rq (000),(11B),(ev)
+a5: SHLD Ev,Gv,CL (es) | SHLD Ev,Gv,CL (66),(es)
+ad: SHRD Ev,Gv,CL (es) | SHRD Ev,Gv,CL (66),(es)
+af: IMUL Gv,Ev (es) | IMUL Gv,Ev (66),(es)
+c0: Grp2 Eb,Ib (1A),(ev)
+c1: Grp2 Ev,Ib (1A),(es)
+d0: Grp2 Eb,1 (1A),(ev)
+d1: Grp2 Ev,1 (1A),(es)
+d2: Grp2 Eb,CL (1A),(ev)
+d3: Grp2 Ev,CL (1A),(es)
+f0: CRC32 Gy,Eb (es) | INVEPT Gq,Mdq (F3),(ev)
+f1: CRC32 Gy,Ey (es) | CRC32 Gy,Ey (66),(es) | INVVPID Gy,Mdq (F3),(ev)
+f2: INVPCID Gy,Mdq (F3),(ev)
+f4: TZCNT Gv,Ev (es) | TZCNT Gv,Ev (66),(es)
+f5: LZCNT Gv,Ev (es) | LZCNT Gv,Ev (66),(es)
+f6: Grp3_1 Eb (1A),(ev)
+f7: Grp3_2 Ev (1A),(es)
+f8: MOVDIR64B Gv,Mdqq (66),(ev) | ENQCMD Gv,Mdqq (F2),(ev) | ENQCMDS Gv,Mdqq (F3),(ev) | URDMSR Rq,Gq (F2),(11B),(ev) | UWRMSR Gq,Rq (F3),(11B),(ev)
+f9: MOVDIRI My,Gy (ev)
+fe: Grp4 (1A),(ev)
+ff: Grp5 (1A),(es) | PUSH2 Bq,Rq (110),(11B),(ev)
+EndTable
+
Table: EVEX map 5
Referrer:
AVXcode: 5
@@ -975,6 +1100,12 @@ d6: vfcmulcph Vx,Hx,Wx (F2),(ev) | vfmulcph Vx,Hx,Wx (F3),(ev)
d7: vfcmulcsh Vx,Hx,Wx (F2),(ev) | vfmulcsh Vx,Hx,Wx (F3),(ev)
EndTable
+Table: VEX map 7
+Referrer:
+AVXcode: 7
+f8: URDMSR Rq,Id (F2),(v1),(11B) | UWRMSR Id,Rq (F3),(v1),(11B)
+EndTable
+
GrpTable: Grp1
0: ADD
1: OR
@@ -1051,7 +1182,7 @@ GrpTable: Grp6
EndTable
GrpTable: Grp7
-0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) | PCONFIG (101),(11B) | ENCLV (000),(11B) | WRMSRNS (110),(11B)
+0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) | PCONFIG (101),(11B) | ENCLV (000),(11B) | WRMSRNS (110),(11B) | RDMSRLIST (F2),(110),(11B) | WRMSRLIST (F3),(110),(11B) | PBNDKB (111),(11B)
1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B) | ENCLS (111),(11B) | ERETU (F3),(010),(11B) | ERETS (F2),(010),(11B)
2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) | ENCLU (111),(11B)
3: LIDT Ms
@@ -1137,6 +1268,8 @@ GrpTable: Grp16
1: prefetch T0
2: prefetch T1
3: prefetch T2
+6: prefetch IT1
+7: prefetch IT0
EndTable
GrpTable: Grp17
diff --git a/arch/x86/math-emu/fpu_etc.c b/arch/x86/math-emu/fpu_etc.c
index 1b118fd93140..39423ec409e1 100644
--- a/arch/x86/math-emu/fpu_etc.c
+++ b/arch/x86/math-emu/fpu_etc.c
@@ -120,9 +120,14 @@ static void fxam(FPU_REG *st0_ptr, u_char st0tag)
setcc(c);
}
+static void FPU_ST0_illegal(FPU_REG *st0_ptr, u_char st0_tag)
+{
+ FPU_illegal();
+}
+
static FUNC_ST0 const fp_etc_table[] = {
- fchs, fabs, (FUNC_ST0) FPU_illegal, (FUNC_ST0) FPU_illegal,
- ftst_, fxam, (FUNC_ST0) FPU_illegal, (FUNC_ST0) FPU_illegal
+ fchs, fabs, FPU_ST0_illegal, FPU_ST0_illegal,
+ ftst_, fxam, FPU_ST0_illegal, FPU_ST0_illegal,
};
void FPU_etc(void)
diff --git a/arch/x86/math-emu/fpu_trig.c b/arch/x86/math-emu/fpu_trig.c
index 990d847ae902..85daf98c81c3 100644
--- a/arch/x86/math-emu/fpu_trig.c
+++ b/arch/x86/math-emu/fpu_trig.c
@@ -433,13 +433,13 @@ static void fxtract(FPU_REG *st0_ptr, u_char st0_tag)
#endif /* PARANOID */
}
-static void fdecstp(void)
+static void fdecstp(FPU_REG *st0_ptr, u_char st0_tag)
{
clear_C1();
top--;
}
-static void fincstp(void)
+static void fincstp(FPU_REG *st0_ptr, u_char st0_tag)
{
clear_C1();
top++;
@@ -1631,7 +1631,7 @@ static void fscale(FPU_REG *st0_ptr, u_char st0_tag)
static FUNC_ST0 const trig_table_a[] = {
f2xm1, fyl2x, fptan, fpatan,
- fxtract, fprem1, (FUNC_ST0) fdecstp, (FUNC_ST0) fincstp
+ fxtract, fprem1, fdecstp, fincstp,
};
void FPU_triga(void)
diff --git a/arch/x86/math-emu/reg_constant.c b/arch/x86/math-emu/reg_constant.c
index 742619e94bdf..003a0b2753e6 100644
--- a/arch/x86/math-emu/reg_constant.c
+++ b/arch/x86/math-emu/reg_constant.c
@@ -108,8 +108,13 @@ static void fldz(int rc)
typedef void (*FUNC_RC) (int);
+static void FPU_RC_illegal(int unused)
+{
+ FPU_illegal();
+}
+
static FUNC_RC constants_table[] = {
- fld1, fldl2t, fldl2e, fldpi, fldlg2, fldln2, fldz, (FUNC_RC) FPU_illegal
+ fld1, fldl2t, fldl2e, fldpi, fldlg2, fldln2, fldz, FPU_RC_illegal
};
void fconst(void)
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 428048e73bd2..8d3a00e5c528 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -34,7 +34,7 @@ obj-y += pat/
CFLAGS_physaddr.o := -fno-stack-protector
CFLAGS_mem_encrypt_identity.o := -fno-stack-protector
-CFLAGS_fault.o := -I $(srctree)/$(src)/../include/asm/trace
+CFLAGS_fault.o := -I $(src)/../include/asm/trace
obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index b522933bfa56..51986e8a9d35 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -164,13 +164,6 @@ static bool ex_handler_uaccess(const struct exception_table_entry *fixup,
return ex_handler_default(fixup, regs);
}
-static bool ex_handler_copy(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
-{
- WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?");
- return ex_handler_fault(fixup, regs, trapnr);
-}
-
static bool ex_handler_msr(const struct exception_table_entry *fixup,
struct pt_regs *regs, bool wrmsr, bool safe, int reg)
{
@@ -341,8 +334,6 @@ int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code,
return ex_handler_fault(e, regs, trapnr);
case EX_TYPE_UACCESS:
return ex_handler_uaccess(e, regs, trapnr, fault_addr);
- case EX_TYPE_COPY:
- return ex_handler_copy(e, regs, trapnr);
case EX_TYPE_CLEAR_FS:
return ex_handler_clear_fs(e, regs);
case EX_TYPE_FPU_RESTORE:
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 622d12ec7f08..e6c469b323cc 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -20,6 +20,7 @@
#include <linux/efi.h> /* efi_crash_gracefully_on_page_fault()*/
#include <linux/mm_types.h>
#include <linux/mm.h> /* find_and_lock_vma() */
+#include <linux/vmalloc.h>
#include <asm/cpufeature.h> /* boot_cpu_has, ... */
#include <asm/traps.h> /* dotraplinkage, ... */
@@ -514,18 +515,19 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long ad
if (error_code & X86_PF_INSTR) {
unsigned int level;
+ bool nx, rw;
pgd_t *pgd;
pte_t *pte;
pgd = __va(read_cr3_pa());
pgd += pgd_index(address);
- pte = lookup_address_in_pgd(pgd, address, &level);
+ pte = lookup_address_in_pgd_attr(pgd, address, &level, &nx, &rw);
- if (pte && pte_present(*pte) && !pte_exec(*pte))
+ if (pte && pte_present(*pte) && (!pte_exec(*pte) || nx))
pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n",
from_kuid(&init_user_ns, current_uid()));
- if (pte && pte_present(*pte) && pte_exec(*pte) &&
+ if (pte && pte_present(*pte) && pte_exec(*pte) && !nx &&
(pgd_flags(*pgd) & _PAGE_USER) &&
(__read_cr4() & X86_CR4_SMEP))
pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n",
@@ -723,39 +725,8 @@ kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
WARN_ON_ONCE(user_mode(regs));
/* Are we prepared to handle this kernel fault? */
- if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) {
- /*
- * Any interrupt that takes a fault gets the fixup. This makes
- * the below recursive fault logic only apply to a faults from
- * task context.
- */
- if (in_interrupt())
- return;
-
- /*
- * Per the above we're !in_interrupt(), aka. task context.
- *
- * In this case we need to make sure we're not recursively
- * faulting through the emulate_vsyscall() logic.
- */
- if (current->thread.sig_on_uaccess_err && signal) {
- sanitize_error_code(address, &error_code);
-
- set_signal_archinfo(address, error_code);
-
- if (si_code == SEGV_PKUERR) {
- force_sig_pkuerr((void __user *)address, pkey);
- } else {
- /* XXX: hwpoison faults will set the wrong code. */
- force_sig_fault(signal, si_code, (void __user *)address);
- }
- }
-
- /*
- * Barring that, we can do the fixup and be happy.
- */
+ if (fixup_exception(regs, X86_TRAP_PF, error_code, address))
return;
- }
/*
* AMD erratum #91 manifests as a spurious page fault on a PREFETCH
@@ -865,14 +836,17 @@ bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
static void
__bad_area(struct pt_regs *regs, unsigned long error_code,
- unsigned long address, u32 pkey, int si_code)
+ unsigned long address, struct mm_struct *mm,
+ struct vm_area_struct *vma, u32 pkey, int si_code)
{
- struct mm_struct *mm = current->mm;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
- mmap_read_unlock(mm);
+ if (mm)
+ mmap_read_unlock(mm);
+ else
+ vma_end_read(vma);
__bad_area_nosemaphore(regs, error_code, address, pkey, si_code);
}
@@ -896,7 +870,8 @@ static inline bool bad_area_access_from_pkeys(unsigned long error_code,
static noinline void
bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
- unsigned long address, struct vm_area_struct *vma)
+ unsigned long address, struct mm_struct *mm,
+ struct vm_area_struct *vma)
{
/*
* This OSPKE check is not strictly necessary at runtime.
@@ -926,9 +901,9 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
*/
u32 pkey = vma_pkey(vma);
- __bad_area(regs, error_code, address, pkey, SEGV_PKUERR);
+ __bad_area(regs, error_code, address, mm, vma, pkey, SEGV_PKUERR);
} else {
- __bad_area(regs, error_code, address, 0, SEGV_ACCERR);
+ __bad_area(regs, error_code, address, mm, vma, 0, SEGV_ACCERR);
}
}
@@ -1356,8 +1331,9 @@ void do_user_addr_fault(struct pt_regs *regs,
goto lock_mmap;
if (unlikely(access_error(error_code, vma))) {
- vma_end_read(vma);
- goto lock_mmap;
+ bad_area_access_error(regs, error_code, address, NULL, vma);
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ return;
}
fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
@@ -1393,7 +1369,7 @@ retry:
* we can handle it..
*/
if (unlikely(access_error(error_code, vma))) {
- bad_area_access_error(regs, error_code, address, vma);
+ bad_area_access_error(regs, error_code, address, mm, vma);
return;
}
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 5804bbae4f01..807a5859a3c4 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -19,41 +19,14 @@
#include <asm/tlbflush.h>
#include <asm/elf.h>
-/*
- * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
- * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
- * Otherwise, returns 0.
- */
-int pmd_huge(pmd_t pmd)
-{
- return !pmd_none(pmd) &&
- (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
-}
-
-/*
- * pud_huge() returns 1 if @pud is hugetlb related entry, that is normal
- * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
- * Otherwise, returns 0.
- */
-int pud_huge(pud_t pud)
-{
-#if CONFIG_PGTABLE_LEVELS > 2
- return !pud_none(pud) &&
- (pud_val(pud) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
-#else
- return 0;
-#endif
-}
-
#ifdef CONFIG_HUGETLB_PAGE
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
- info.flags = 0;
info.length = len;
info.low_limit = get_mmap_base(1);
@@ -65,7 +38,6 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW);
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
- info.align_offset = 0;
return vm_unmapped_area(&info);
}
@@ -74,7 +46,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
@@ -89,7 +61,6 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
- info.align_offset = 0;
addr = vm_unmapped_area(&info);
/*
@@ -141,7 +112,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
}
get_unmapped_area:
- if (mm->get_unmapped_area == arch_get_unmapped_area)
+ if (!test_bit(MMF_TOPDOWN, &mm->flags))
return hugetlb_get_unmapped_area_bottomup(file, addr, len,
pgoff, flags);
else
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
index a204a332c71f..968d7005f4a7 100644
--- a/arch/x86/mm/ident_map.c
+++ b/arch/x86/mm/ident_map.c
@@ -26,31 +26,18 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
for (; addr < end; addr = next) {
pud_t *pud = pud_page + pud_index(addr);
pmd_t *pmd;
- bool use_gbpage;
next = (addr & PUD_MASK) + PUD_SIZE;
if (next > end)
next = end;
- /* if this is already a gbpage, this portion is already mapped */
- if (pud_leaf(*pud))
- continue;
-
- /* Is using a gbpage allowed? */
- use_gbpage = info->direct_gbpages;
-
- /* Don't use gbpage if it maps more than the requested region. */
- /* at the begining: */
- use_gbpage &= ((addr & ~PUD_MASK) == 0);
- /* ... or at the end: */
- use_gbpage &= ((next & ~PUD_MASK) == 0);
-
- /* Never overwrite existing mappings */
- use_gbpage &= !pud_present(*pud);
-
- if (use_gbpage) {
+ if (info->direct_gbpages) {
pud_t pudval;
+ if (pud_present(*pud))
+ continue;
+
+ addr &= PUD_MASK;
pudval = __pud((addr - info->offset) | info->page_flag);
set_pud(pud, pudval);
continue;
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 679893ea5e68..eb503f53c319 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -7,6 +7,7 @@
#include <linux/swapops.h>
#include <linux/kmemleak.h>
#include <linux/sched/task.h>
+#include <linux/execmem.h>
#include <asm/set_memory.h>
#include <asm/cpu_device_id.h>
@@ -261,21 +262,17 @@ static void __init probe_page_size_mask(void)
}
}
-#define INTEL_MATCH(_model) { .vendor = X86_VENDOR_INTEL, \
- .family = 6, \
- .model = _model, \
- }
/*
* INVLPG may not properly flush Global entries
* on these CPUs when PCIDs are enabled.
*/
static const struct x86_cpu_id invlpg_miss_ids[] = {
- INTEL_MATCH(INTEL_FAM6_ALDERLAKE ),
- INTEL_MATCH(INTEL_FAM6_ALDERLAKE_L ),
- INTEL_MATCH(INTEL_FAM6_ATOM_GRACEMONT ),
- INTEL_MATCH(INTEL_FAM6_RAPTORLAKE ),
- INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_P),
- INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_S),
+ X86_MATCH_VFM(INTEL_ALDERLAKE, 0),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, 0),
+ X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, 0),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, 0),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, 0),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, 0),
{}
};
@@ -990,53 +987,6 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
}
#endif
-/*
- * Calculate the precise size of the DMA zone (first 16 MB of RAM),
- * and pass it to the MM layer - to help it set zone watermarks more
- * accurately.
- *
- * Done on 64-bit systems only for the time being, although 32-bit systems
- * might benefit from this as well.
- */
-void __init memblock_find_dma_reserve(void)
-{
-#ifdef CONFIG_X86_64
- u64 nr_pages = 0, nr_free_pages = 0;
- unsigned long start_pfn, end_pfn;
- phys_addr_t start_addr, end_addr;
- int i;
- u64 u;
-
- /*
- * Iterate over all memory ranges (free and reserved ones alike),
- * to calculate the total number of pages in the first 16 MB of RAM:
- */
- nr_pages = 0;
- for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
- start_pfn = min(start_pfn, MAX_DMA_PFN);
- end_pfn = min(end_pfn, MAX_DMA_PFN);
-
- nr_pages += end_pfn - start_pfn;
- }
-
- /*
- * Iterate over free memory ranges to calculate the number of free
- * pages in the DMA zone, while not counting potential partial
- * pages at the beginning or the end of the range:
- */
- nr_free_pages = 0;
- for_each_free_mem_range(u, NUMA_NO_NODE, MEMBLOCK_NONE, &start_addr, &end_addr, NULL) {
- start_pfn = min_t(unsigned long, PFN_UP(start_addr), MAX_DMA_PFN);
- end_pfn = min_t(unsigned long, PFN_DOWN(end_addr), MAX_DMA_PFN);
-
- if (start_pfn < end_pfn)
- nr_free_pages += end_pfn - start_pfn;
- }
-
- set_dma_reserve(nr_pages - nr_free_pages);
-#endif
-}
-
void __init zone_sizes_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
@@ -1099,3 +1049,31 @@ unsigned long arch_max_swapfile_size(void)
return pages;
}
#endif
+
+#ifdef CONFIG_EXECMEM
+static struct execmem_info execmem_info __ro_after_init;
+
+struct execmem_info __init *execmem_arch_setup(void)
+{
+ unsigned long start, offset = 0;
+
+ if (kaslr_enabled())
+ offset = get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
+
+ start = MODULES_VADDR + offset;
+
+ execmem_info = (struct execmem_info){
+ .ranges = {
+ [EXECMEM_DEFAULT] = {
+ .flags = EXECMEM_KASAN_SHADOW,
+ .start = start,
+ .end = MODULES_END,
+ .pgprot = PAGE_KERNEL,
+ .alignment = MODULE_ALIGN,
+ },
+ },
+ };
+
+ return &execmem_info;
+}
+#endif /* CONFIG_EXECMEM */
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 6f3b3e028718..0a120d85d7bb 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -102,6 +102,13 @@ void __init mem_encrypt_setup_arch(void)
phys_addr_t total_mem = memblock_phys_mem_size();
unsigned long size;
+ /*
+ * Do RMP table fixups after the e820 tables have been setup by
+ * e820__memory_setup().
+ */
+ if (cc_platform_has(CC_ATTR_HOST_SEV_SNP))
+ snp_fixup_e820_tables();
+
if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
return;
diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
index 70b91de2e053..422602f6039b 100644
--- a/arch/x86/mm/mem_encrypt_amd.c
+++ b/arch/x86/mm/mem_encrypt_amd.c
@@ -492,6 +492,24 @@ void __init sme_early_init(void)
*/
if (sev_status & MSR_AMD64_SEV_ENABLED)
ia32_disable();
+
+ /*
+ * Override init functions that scan the ROM region in SEV-SNP guests,
+ * as this memory is not pre-validated and would thus cause a crash.
+ */
+ if (sev_status & MSR_AMD64_SEV_SNP_ENABLED) {
+ x86_init.mpparse.find_mptable = x86_init_noop;
+ x86_init.pci.init_irq = x86_init_noop;
+ x86_init.resources.probe_roms = x86_init_noop;
+
+ /*
+ * DMI setup behavior for SEV-SNP guests depends on
+ * efi_enabled(EFI_CONFIG_TABLES), which hasn't been
+ * parsed yet. snp_dmi_setup() will run after that
+ * parsing has happened.
+ */
+ x86_init.resources.dmi_setup = snp_dmi_setup;
+ }
}
void __init mem_encrypt_free_decrypted_mem(void)
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index c90c20904a60..a2cabb1c81e1 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -129,9 +129,9 @@ static void arch_pick_mmap_base(unsigned long *base, unsigned long *legacy_base,
void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
{
if (mmap_is_legacy())
- mm->get_unmapped_area = arch_get_unmapped_area;
+ clear_bit(MMF_TOPDOWN, &mm->flags);
else
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ set_bit(MMF_TOPDOWN, &mm->flags);
arch_pick_mmap_base(&mm->mmap_base, &mm->mmap_legacy_base,
arch_rnd(mmap64_rnd_bits), task_size_64bit(0),
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 65e9a6e391c0..ce84ba86e69e 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -929,6 +929,8 @@ int memory_add_physaddr_to_nid(u64 start)
}
EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
+#endif
+
static int __init cmp_memblk(const void *a, const void *b)
{
const struct numa_memblk *ma = *(const struct numa_memblk **)a;
@@ -1001,5 +1003,3 @@ int __init numa_fill_memblks(u64 start, u64 end)
}
return 0;
}
-
-#endif
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 104544359d69..65fda406e6f2 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -24,6 +24,8 @@
#include <linux/memblock.h>
#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <asm/pgtable_areas.h>
#include "numa_internal.h"
diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
index 0d72183b5dd0..bdc2a240c2aa 100644
--- a/arch/x86/mm/pat/memtype.c
+++ b/arch/x86/mm/pat/memtype.c
@@ -39,6 +39,7 @@
#include <linux/pfn_t.h>
#include <linux/slab.h>
#include <linux/mm.h>
+#include <linux/highmem.h>
#include <linux/fs.h>
#include <linux/rbtree.h>
@@ -947,6 +948,61 @@ static void free_pfn_range(u64 paddr, unsigned long size)
memtype_free(paddr, paddr + size);
}
+static int follow_phys(struct vm_area_struct *vma, unsigned long *prot,
+ resource_size_t *phys)
+{
+ pte_t *ptep, pte;
+ spinlock_t *ptl;
+
+ if (follow_pte(vma, vma->vm_start, &ptep, &ptl))
+ return -EINVAL;
+
+ pte = ptep_get(ptep);
+
+ /* Never return PFNs of anon folios in COW mappings. */
+ if (vm_normal_folio(vma, vma->vm_start, pte)) {
+ pte_unmap_unlock(ptep, ptl);
+ return -EINVAL;
+ }
+
+ *prot = pgprot_val(pte_pgprot(pte));
+ *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
+ pte_unmap_unlock(ptep, ptl);
+ return 0;
+}
+
+static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr,
+ pgprot_t *pgprot)
+{
+ unsigned long prot;
+
+ VM_WARN_ON_ONCE(!(vma->vm_flags & VM_PAT));
+
+ /*
+ * We need the starting PFN and cachemode used for track_pfn_remap()
+ * that covered the whole VMA. For most mappings, we can obtain that
+ * information from the page tables. For COW mappings, we might now
+ * suddenly have anon folios mapped and follow_phys() will fail.
+ *
+ * Fallback to using vma->vm_pgoff, see remap_pfn_range_notrack(), to
+ * detect the PFN. If we need the cachemode as well, we're out of luck
+ * for now and have to fail fork().
+ */
+ if (!follow_phys(vma, &prot, paddr)) {
+ if (pgprot)
+ *pgprot = __pgprot(prot);
+ return 0;
+ }
+ if (is_cow_mapping(vma->vm_flags)) {
+ if (pgprot)
+ return -EINVAL;
+ *paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
+ return 0;
+ }
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+
/*
* track_pfn_copy is called when vma that is covering the pfnmap gets
* copied through copy_page_range().
@@ -957,20 +1013,13 @@ static void free_pfn_range(u64 paddr, unsigned long size)
int track_pfn_copy(struct vm_area_struct *vma)
{
resource_size_t paddr;
- unsigned long prot;
unsigned long vma_size = vma->vm_end - vma->vm_start;
pgprot_t pgprot;
if (vma->vm_flags & VM_PAT) {
- /*
- * reserve the whole chunk covered by vma. We need the
- * starting address and protection from pte.
- */
- if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
- WARN_ON_ONCE(1);
+ if (get_pat_info(vma, &paddr, &pgprot))
return -EINVAL;
- }
- pgprot = __pgprot(prot);
+ /* reserve the whole chunk covered by vma. */
return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
}
@@ -1045,7 +1094,6 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
unsigned long size, bool mm_wr_locked)
{
resource_size_t paddr;
- unsigned long prot;
if (vma && !(vma->vm_flags & VM_PAT))
return;
@@ -1053,11 +1101,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
/* free the chunk starting from pfn or the whole chunk */
paddr = (resource_size_t)pfn << PAGE_SHIFT;
if (!paddr && !size) {
- if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
- WARN_ON_ONCE(1);
+ if (get_pat_info(vma, &paddr, NULL))
return;
- }
-
size = vma->vm_end - vma->vm_start;
}
free_pfn_range(paddr, size);
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 80c9037ffadf..19fdfbb171ed 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -619,7 +619,8 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
* Validate strict W^X semantics.
*/
static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long start,
- unsigned long pfn, unsigned long npg)
+ unsigned long pfn, unsigned long npg,
+ bool nx, bool rw)
{
unsigned long end;
@@ -641,6 +642,10 @@ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long star
if ((pgprot_val(new) & (_PAGE_RW | _PAGE_NX)) != _PAGE_RW)
return new;
+ /* Non-leaf translation entries can disable writing or execution. */
+ if (!rw || nx)
+ return new;
+
end = start + npg * PAGE_SIZE - 1;
WARN_ONCE(1, "CPA detected W^X violation: %016llx -> %016llx range: 0x%016lx - 0x%016lx PFN %lx\n",
(unsigned long long)pgprot_val(old),
@@ -657,20 +662,26 @@ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long star
/*
* Lookup the page table entry for a virtual address in a specific pgd.
- * Return a pointer to the entry and the level of the mapping.
+ * Return a pointer to the entry, the level of the mapping, and the effective
+ * NX and RW bits of all page table levels.
*/
-pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
- unsigned int *level)
+pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address,
+ unsigned int *level, bool *nx, bool *rw)
{
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
*level = PG_LEVEL_NONE;
+ *nx = false;
+ *rw = true;
if (pgd_none(*pgd))
return NULL;
+ *nx |= pgd_flags(*pgd) & _PAGE_NX;
+ *rw &= pgd_flags(*pgd) & _PAGE_RW;
+
p4d = p4d_offset(pgd, address);
if (p4d_none(*p4d))
return NULL;
@@ -679,6 +690,9 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
if (p4d_leaf(*p4d) || !p4d_present(*p4d))
return (pte_t *)p4d;
+ *nx |= p4d_flags(*p4d) & _PAGE_NX;
+ *rw &= p4d_flags(*p4d) & _PAGE_RW;
+
pud = pud_offset(p4d, address);
if (pud_none(*pud))
return NULL;
@@ -687,6 +701,9 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
if (pud_leaf(*pud) || !pud_present(*pud))
return (pte_t *)pud;
+ *nx |= pud_flags(*pud) & _PAGE_NX;
+ *rw &= pud_flags(*pud) & _PAGE_RW;
+
pmd = pmd_offset(pud, address);
if (pmd_none(*pmd))
return NULL;
@@ -695,12 +712,27 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
if (pmd_leaf(*pmd) || !pmd_present(*pmd))
return (pte_t *)pmd;
+ *nx |= pmd_flags(*pmd) & _PAGE_NX;
+ *rw &= pmd_flags(*pmd) & _PAGE_RW;
+
*level = PG_LEVEL_4K;
return pte_offset_kernel(pmd, address);
}
/*
+ * Lookup the page table entry for a virtual address in a specific pgd.
+ * Return a pointer to the entry and the level of the mapping.
+ */
+pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
+ unsigned int *level)
+{
+ bool nx, rw;
+
+ return lookup_address_in_pgd_attr(pgd, address, level, &nx, &rw);
+}
+
+/*
* Lookup the page table entry for a virtual address. Return a pointer
* to the entry and the level of the mapping.
*
@@ -715,13 +747,16 @@ pte_t *lookup_address(unsigned long address, unsigned int *level)
EXPORT_SYMBOL_GPL(lookup_address);
static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
- unsigned int *level)
+ unsigned int *level, bool *nx, bool *rw)
{
- if (cpa->pgd)
- return lookup_address_in_pgd(cpa->pgd + pgd_index(address),
- address, level);
+ pgd_t *pgd;
+
+ if (!cpa->pgd)
+ pgd = pgd_offset_k(address);
+ else
+ pgd = cpa->pgd + pgd_index(address);
- return lookup_address(address, level);
+ return lookup_address_in_pgd_attr(pgd, address, level, nx, rw);
}
/*
@@ -849,12 +884,13 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
pgprot_t old_prot, new_prot, req_prot, chk_prot;
pte_t new_pte, *tmp;
enum pg_level level;
+ bool nx, rw;
/*
* Check for races, another CPU might have split this page
* up already:
*/
- tmp = _lookup_address_cpa(cpa, address, &level);
+ tmp = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
if (tmp != kpte)
return 1;
@@ -965,7 +1001,8 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages,
psize, CPA_DETECT);
- new_prot = verify_rwx(old_prot, new_prot, lpaddr, old_pfn, numpages);
+ new_prot = verify_rwx(old_prot, new_prot, lpaddr, old_pfn, numpages,
+ nx, rw);
/*
* If there is a conflict, split the large page.
@@ -1046,6 +1083,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
pte_t *pbase = (pte_t *)page_address(base);
unsigned int i, level;
pgprot_t ref_prot;
+ bool nx, rw;
pte_t *tmp;
spin_lock(&pgd_lock);
@@ -1053,7 +1091,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
* Check for races, another CPU might have split this page
* up for us already:
*/
- tmp = _lookup_address_cpa(cpa, address, &level);
+ tmp = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
if (tmp != kpte) {
spin_unlock(&pgd_lock);
return 1;
@@ -1594,10 +1632,11 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
int do_split, err;
unsigned int level;
pte_t *kpte, old_pte;
+ bool nx, rw;
address = __cpa_addr(cpa, cpa->curpage);
repeat:
- kpte = _lookup_address_cpa(cpa, address, &level);
+ kpte = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
if (!kpte)
return __cpa_process_fault(cpa, address, primary);
@@ -1619,7 +1658,8 @@ repeat:
new_prot = static_protections(new_prot, address, pfn, 1, 0,
CPA_PROTECT);
- new_prot = verify_rwx(old_prot, new_prot, address, pfn, 1);
+ new_prot = verify_rwx(old_prot, new_prot, address, pfn, 1,
+ nx, rw);
new_prot = pgprot_clear_protnone_bits(new_prot);
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index d007591b8059..93e54ba91fbf 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -631,6 +631,8 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
+ VM_WARN_ON_ONCE(!pmd_present(*pmdp));
+
/*
* No flush is necessary. Once an invalid PTE is established, the PTE's
* access and dirty bits cannot be updated.
@@ -731,7 +733,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
return 0;
/* Bail out if we are we on a populated non-leaf entry: */
- if (pud_present(*pud) && !pud_huge(*pud))
+ if (pud_present(*pud) && !pud_leaf(*pud))
return 0;
set_pte((pte_t *)pud, pfn_pte(
@@ -760,7 +762,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
}
/* Bail out if we are we on a populated non-leaf entry: */
- if (pmd_present(*pmd) && !pmd_huge(*pmd))
+ if (pmd_present(*pmd) && !pmd_leaf(*pmd))
return 0;
set_pte((pte_t *)pmd, pfn_pte(
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index a7ba8e178645..5159c7a22922 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -480,7 +480,7 @@ static int emit_call(u8 **pprog, void *func, void *ip)
static int emit_rsb_call(u8 **pprog, void *func, void *ip)
{
OPTIMIZER_HIDE_VAR(func);
- x86_call_depth_emit_accounting(pprog, func);
+ ip += x86_call_depth_emit_accounting(pprog, func, ip);
return emit_patch(pprog, func, ip, 0xE8);
}
@@ -816,9 +816,10 @@ done:
static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
const u32 imm32_hi, const u32 imm32_lo)
{
+ u64 imm64 = ((u64)imm32_hi << 32) | (u32)imm32_lo;
u8 *prog = *pprog;
- if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
+ if (is_uimm32(imm64)) {
/*
* For emitting plain u32, where sign bit must not be
* propagated LLVM tends to load imm64 over mov32
@@ -826,6 +827,8 @@ static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
* 'mov %eax, imm32' instead.
*/
emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
+ } else if (is_simm32(imm64)) {
+ emit_mov_imm32(&prog, true, dst_reg, imm32_lo);
} else {
/* movabsq rax, imm64 */
EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
@@ -1169,6 +1172,54 @@ static int emit_atomic(u8 **pprog, u8 atomic_op,
return 0;
}
+static int emit_atomic_index(u8 **pprog, u8 atomic_op, u32 size,
+ u32 dst_reg, u32 src_reg, u32 index_reg, int off)
+{
+ u8 *prog = *pprog;
+
+ EMIT1(0xF0); /* lock prefix */
+ switch (size) {
+ case BPF_W:
+ EMIT1(add_3mod(0x40, dst_reg, src_reg, index_reg));
+ break;
+ case BPF_DW:
+ EMIT1(add_3mod(0x48, dst_reg, src_reg, index_reg));
+ break;
+ default:
+ pr_err("bpf_jit: 1 and 2 byte atomics are not supported\n");
+ return -EFAULT;
+ }
+
+ /* emit opcode */
+ switch (atomic_op) {
+ case BPF_ADD:
+ case BPF_AND:
+ case BPF_OR:
+ case BPF_XOR:
+ /* lock *(u32/u64*)(dst_reg + idx_reg + off) <op>= src_reg */
+ EMIT1(simple_alu_opcodes[atomic_op]);
+ break;
+ case BPF_ADD | BPF_FETCH:
+ /* src_reg = atomic_fetch_add(dst_reg + idx_reg + off, src_reg); */
+ EMIT2(0x0F, 0xC1);
+ break;
+ case BPF_XCHG:
+ /* src_reg = atomic_xchg(dst_reg + idx_reg + off, src_reg); */
+ EMIT1(0x87);
+ break;
+ case BPF_CMPXCHG:
+ /* r0 = atomic_cmpxchg(dst_reg + idx_reg + off, r0, src_reg); */
+ EMIT2(0x0F, 0xB1);
+ break;
+ default:
+ pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
+ return -EFAULT;
+ }
+ emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off);
+ *pprog = prog;
+ return 0;
+}
+
#define DONT_CLEAR 1
bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
@@ -1351,8 +1402,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
break;
case BPF_ALU64 | BPF_MOV | BPF_X:
- if (insn->off == BPF_ADDR_SPACE_CAST &&
- insn->imm == 1U << 16) {
+ if (insn_is_cast_user(insn)) {
if (dst_reg != src_reg)
/* 32-bit mov */
emit_mov_reg(&prog, false, dst_reg, src_reg);
@@ -1383,6 +1433,16 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
maybe_emit_mod(&prog, AUX_REG, dst_reg, true);
EMIT3(0x0F, 0x44, add_2reg(0xC0, AUX_REG, dst_reg));
break;
+ } else if (insn_is_mov_percpu_addr(insn)) {
+ /* mov <dst>, <src> (if necessary) */
+ EMIT_mov(dst_reg, src_reg);
+#ifdef CONFIG_SMP
+ /* add <dst>, gs:[<off>] */
+ EMIT2(0x65, add_1mod(0x48, dst_reg));
+ EMIT3(0x03, add_2reg(0x04, 0, dst_reg), 0x25);
+ EMIT((u32)(unsigned long)&this_cpu_off, 4);
+#endif
+ break;
}
fallthrough;
case BPF_ALU | BPF_MOV | BPF_X:
@@ -1807,36 +1867,41 @@ populate_extable:
if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
/* Conservatively check that src_reg + insn->off is a kernel address:
- * src_reg + insn->off >= TASK_SIZE_MAX + PAGE_SIZE
- * src_reg is used as scratch for src_reg += insn->off and restored
- * after emit_ldx if necessary
+ * src_reg + insn->off > TASK_SIZE_MAX + PAGE_SIZE
+ * and
+ * src_reg + insn->off < VSYSCALL_ADDR
*/
- u64 limit = TASK_SIZE_MAX + PAGE_SIZE;
+ u64 limit = TASK_SIZE_MAX + PAGE_SIZE - VSYSCALL_ADDR;
u8 *end_of_jmp;
- /* At end of these emitted checks, insn->off will have been added
- * to src_reg, so no need to do relative load with insn->off offset
- */
- insn_off = 0;
+ /* movabsq r10, VSYSCALL_ADDR */
+ emit_mov_imm64(&prog, BPF_REG_AX, (long)VSYSCALL_ADDR >> 32,
+ (u32)(long)VSYSCALL_ADDR);
- /* movabsq r11, limit */
- EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG));
- EMIT((u32)limit, 4);
- EMIT(limit >> 32, 4);
+ /* mov src_reg, r11 */
+ EMIT_mov(AUX_REG, src_reg);
if (insn->off) {
- /* add src_reg, insn->off */
- maybe_emit_1mod(&prog, src_reg, true);
- EMIT2_off32(0x81, add_1reg(0xC0, src_reg), insn->off);
+ /* add r11, insn->off */
+ maybe_emit_1mod(&prog, AUX_REG, true);
+ EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
}
- /* cmp src_reg, r11 */
- maybe_emit_mod(&prog, src_reg, AUX_REG, true);
- EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG));
+ /* sub r11, r10 */
+ maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
+ EMIT2(0x29, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
+
+ /* movabsq r10, limit */
+ emit_mov_imm64(&prog, BPF_REG_AX, (long)limit >> 32,
+ (u32)(long)limit);
- /* if unsigned '>=', goto load */
- EMIT2(X86_JAE, 0);
+ /* cmp r10, r11 */
+ maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
+ EMIT2(0x39, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
+
+ /* if unsigned '>', goto load */
+ EMIT2(X86_JA, 0);
end_of_jmp = prog;
/* xor dst_reg, dst_reg */
@@ -1862,18 +1927,6 @@ populate_extable:
/* populate jmp_offset for JMP above */
start_of_ldx[-1] = prog - start_of_ldx;
- if (insn->off && src_reg != dst_reg) {
- /* sub src_reg, insn->off
- * Restore src_reg after "add src_reg, insn->off" in prev
- * if statement. But if src_reg == dst_reg, emit_ldx
- * above already clobbered src_reg, so no need to restore.
- * If add src_reg, insn->off was unnecessary, no need to
- * restore either.
- */
- maybe_emit_1mod(&prog, src_reg, true);
- EMIT2_off32(0x81, add_1reg(0xE8, src_reg), insn->off);
- }
-
if (!bpf_prog->aux->extable)
break;
@@ -1970,22 +2023,28 @@ populate_extable:
return err;
break;
+ case BPF_STX | BPF_PROBE_ATOMIC | BPF_W:
+ case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW:
+ start_of_ldx = prog;
+ err = emit_atomic_index(&prog, insn->imm, BPF_SIZE(insn->code),
+ dst_reg, src_reg, X86_REG_R12, insn->off);
+ if (err)
+ return err;
+ goto populate_extable;
+
/* call */
case BPF_JMP | BPF_CALL: {
- int offs;
+ u8 *ip = image + addrs[i - 1];
func = (u8 *) __bpf_call_base + imm32;
if (tail_call_reachable) {
RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
- if (!imm32)
- return -EINVAL;
- offs = 7 + x86_call_depth_emit_accounting(&prog, func);
- } else {
- if (!imm32)
- return -EINVAL;
- offs = x86_call_depth_emit_accounting(&prog, func);
+ ip += 7;
}
- if (emit_call(&prog, func, image + addrs[i - 1] + offs))
+ if (!imm32)
+ return -EINVAL;
+ ip += x86_call_depth_emit_accounting(&prog, func, ip);
+ if (emit_call(&prog, func, ip))
return -EINVAL;
break;
}
@@ -2835,7 +2894,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
* Direct-call fentry stub, as such it needs accounting for the
* __fentry__ call.
*/
- x86_call_depth_emit_accounting(&prog, NULL);
+ x86_call_depth_emit_accounting(&prog, NULL, image);
}
EMIT1(0x55); /* push rbp */
EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
@@ -3004,12 +3063,9 @@ void arch_free_bpf_trampoline(void *image, unsigned int size)
bpf_prog_pack_free(image, size);
}
-void arch_protect_bpf_trampoline(void *image, unsigned int size)
-{
-}
-
-void arch_unprotect_bpf_trampoline(void *image, unsigned int size)
+int arch_protect_bpf_trampoline(void *image, unsigned int size)
{
+ return 0;
}
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
@@ -3369,6 +3425,11 @@ bool bpf_jit_supports_subprog_tailcalls(void)
return true;
}
+bool bpf_jit_supports_percpu_insn(void)
+{
+ return true;
+}
+
void bpf_jit_free(struct bpf_prog *prog)
{
if (prog->jited) {
@@ -3472,7 +3533,28 @@ bool bpf_jit_supports_arena(void)
return true;
}
+bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
+{
+ if (!in_arena)
+ return true;
+ switch (insn->code) {
+ case BPF_STX | BPF_ATOMIC | BPF_W:
+ case BPF_STX | BPF_ATOMIC | BPF_DW:
+ if (insn->imm == (BPF_AND | BPF_FETCH) ||
+ insn->imm == (BPF_OR | BPF_FETCH) ||
+ insn->imm == (BPF_XOR | BPF_FETCH))
+ return false;
+ }
+ return true;
+}
+
bool bpf_jit_supports_ptr_xchg(void)
{
return true;
}
+
+/* x86-64 JIT emits its own code to filter user addresses so return 0 here */
+u64 bpf_arch_uaddress_limit(void)
+{
+ return 0;
+}
diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
index c10083a8e68e..de0f9e5f9f73 100644
--- a/arch/x86/net/bpf_jit_comp32.c
+++ b/arch/x86/net/bpf_jit_comp32.c
@@ -2600,8 +2600,7 @@ out_image:
if (bpf_jit_enable > 1)
bpf_jit_dump(prog->len, proglen, pass + 1, image);
- if (image) {
- bpf_jit_binary_lock_ro(header);
+ if (image && !bpf_jit_binary_lock_ro(header)) {
prog->bpf_func = (void *)image;
prog->jited = 1;
prog->jited_len = proglen;
diff --git a/arch/x86/pci/ce4100.c b/arch/x86/pci/ce4100.c
index 87313701f069..f5dbd25651e0 100644
--- a/arch/x86/pci/ce4100.c
+++ b/arch/x86/pci/ce4100.c
@@ -35,12 +35,6 @@ struct sim_dev_reg {
struct sim_reg sim_reg;
};
-struct sim_reg_op {
- void (*init)(struct sim_dev_reg *reg);
- void (*read)(struct sim_dev_reg *reg, u32 value);
- void (*write)(struct sim_dev_reg *reg, u32 value);
-};
-
#define MB (1024 * 1024)
#define KB (1024)
#define SIZE_TO_MASK(size) (~(size - 1))
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 0cc9520666ef..39255f0eb14d 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -518,7 +518,34 @@ static bool __ref pci_mmcfg_reserved(struct device *dev,
{
struct resource *conflict;
- if (!early && !acpi_disabled) {
+ if (early) {
+
+ /*
+ * Don't try to do this check unless configuration type 1
+ * is available. How about type 2?
+ */
+
+ /*
+ * 946f2ee5c731 ("Check that MCFG points to an e820
+ * reserved area") added this E820 check in 2006 to work
+ * around BIOS defects.
+ *
+ * Per PCI Firmware r3.3, sec 4.1.2, ECAM space must be
+ * reserved by a PNP0C02 resource, but it need not be
+ * mentioned in E820. Before the ACPI interpreter is
+ * available, we can't check for PNP0C02 resources, so
+ * there's no reliable way to verify the region in this
+ * early check. Keep it only for the old machines that
+ * motivated 946f2ee5c731.
+ */
+ if (dmi_get_bios_year() < 2016 && raw_pci_ops)
+ return is_mmconf_reserved(e820__mapped_all, cfg, dev,
+ "E820 entry");
+
+ return true;
+ }
+
+ if (!acpi_disabled) {
if (is_mmconf_reserved(is_acpi_reserved, cfg, dev,
"ACPI motherboard resource"))
return true;
@@ -551,16 +578,7 @@ static bool __ref pci_mmcfg_reserved(struct device *dev,
* For MCFG information constructed from hotpluggable host bridge's
* _CBA method, just assume it's reserved.
*/
- if (pci_mmcfg_running_state)
- return true;
-
- /* Don't try to do this check unless configuration
- type 1 is available. how about type 2 ?*/
- if (raw_pci_ops)
- return is_mmconf_reserved(e820__mapped_all, cfg, dev,
- "E820 entry");
-
- return false;
+ return pci_mmcfg_running_state;
}
static void __init pci_mmcfg_reject_broken(int early)
diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
index f3aab76e357a..4b18c6404363 100644
--- a/arch/x86/pci/olpc.c
+++ b/arch/x86/pci/olpc.c
@@ -154,9 +154,6 @@ static const uint32_t ehci_hdr[] = { /* dev f function 4 - devfn = 7d */
0x0, 0x40, 0x0, 0x40a, /* CapPtr INT-D, IRQA */
0xc8020001, 0x0, 0x0, 0x0, /* Capabilities - 40 is R/O, 44 is
mask 8103 (power control) */
-#if 0
- 0x1, 0x40080000, 0x0, 0x0, /* EECP - see EHCI spec section 2.1.7 */
-#endif
0x01000001, 0x0, 0x0, 0x0, /* EECP - see EHCI spec section 2.1.7 */
0x2020, 0x0, 0x0, 0x0, /* (EHCI page 8) 60 SBRN (R/O),
61 FLADJ (R/W), PORTWAKECAP */
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
index f32451bdcfdd..f8126821a94d 100644
--- a/arch/x86/platform/ce4100/ce4100.c
+++ b/arch/x86/platform/ce4100/ce4100.c
@@ -139,7 +139,6 @@ void __init x86_ce4100_early_setup(void)
x86_init.resources.probe_roms = x86_init_noop;
x86_init.mpparse.find_mptable = x86_init_noop;
x86_init.mpparse.early_parse_smp_cfg = x86_init_noop;
- x86_init.mpparse.parse_smp_cfg = x86_dtb_parse_smp_config;
x86_init.pci.init = ce4100_pci_init;
x86_init.pci.init_irq = sdv_pci_init;
diff --git a/arch/x86/platform/iris/iris.c b/arch/x86/platform/iris/iris.c
index b42bfdab01a9..c5f3bbdbdcfe 100644
--- a/arch/x86/platform/iris/iris.c
+++ b/arch/x86/platform/iris/iris.c
@@ -62,11 +62,10 @@ static int iris_probe(struct platform_device *pdev)
return 0;
}
-static int iris_remove(struct platform_device *pdev)
+static void iris_remove(struct platform_device *pdev)
{
pm_power_off = old_pm_power_off;
printk(KERN_INFO "Iris power_off handler uninstalled.\n");
- return 0;
}
static struct platform_driver iris_driver = {
@@ -74,7 +73,7 @@ static struct platform_driver iris_driver = {
.name = "iris",
},
.probe = iris_probe,
- .remove = iris_remove,
+ .remove_new = iris_remove,
};
static struct resource iris_resources[] = {
diff --git a/arch/x86/platform/olpc/olpc-xo1-pm.c b/arch/x86/platform/olpc/olpc-xo1-pm.c
index f067ac780ba7..6a9c42de74e7 100644
--- a/arch/x86/platform/olpc/olpc-xo1-pm.c
+++ b/arch/x86/platform/olpc/olpc-xo1-pm.c
@@ -144,7 +144,7 @@ static int xo1_pm_probe(struct platform_device *pdev)
return 0;
}
-static int xo1_pm_remove(struct platform_device *pdev)
+static void xo1_pm_remove(struct platform_device *pdev)
{
if (strcmp(pdev->name, "cs5535-pms") == 0)
pms_base = 0;
@@ -152,7 +152,6 @@ static int xo1_pm_remove(struct platform_device *pdev)
acpi_base = 0;
pm_power_off = NULL;
- return 0;
}
static struct platform_driver cs5535_pms_driver = {
@@ -160,7 +159,7 @@ static struct platform_driver cs5535_pms_driver = {
.name = "cs5535-pms",
},
.probe = xo1_pm_probe,
- .remove = xo1_pm_remove,
+ .remove_new = xo1_pm_remove,
};
static struct platform_driver cs5535_acpi_driver = {
@@ -168,7 +167,7 @@ static struct platform_driver cs5535_acpi_driver = {
.name = "olpc-xo1-pm-acpi",
},
.probe = xo1_pm_probe,
- .remove = xo1_pm_remove,
+ .remove_new = xo1_pm_remove,
};
static int __init xo1_pm_init(void)
diff --git a/arch/x86/platform/olpc/olpc-xo1-sci.c b/arch/x86/platform/olpc/olpc-xo1-sci.c
index 89f25af4b3c3..46d42ff6e18a 100644
--- a/arch/x86/platform/olpc/olpc-xo1-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo1-sci.c
@@ -598,7 +598,7 @@ err_ebook:
return r;
}
-static int xo1_sci_remove(struct platform_device *pdev)
+static void xo1_sci_remove(struct platform_device *pdev)
{
free_irq(sci_irq, pdev);
cancel_work_sync(&sci_work);
@@ -608,7 +608,6 @@ static int xo1_sci_remove(struct platform_device *pdev)
free_ebook_switch();
free_power_button();
acpi_base = 0;
- return 0;
}
static struct platform_driver xo1_sci_driver = {
@@ -617,7 +616,7 @@ static struct platform_driver xo1_sci_driver = {
.dev_groups = lid_groups,
},
.probe = xo1_sci_probe,
- .remove = xo1_sci_remove,
+ .remove_new = xo1_sci_remove,
.suspend = xo1_sci_suspend,
.resume = xo1_sci_resume,
};
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index bc31863c5ee6..ebdfd7b84feb 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-OBJECT_FILES_NON_STANDARD := y
purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string.o
@@ -30,19 +29,12 @@ LDFLAGS_purgatory.ro := -r $(PURGATORY_LDFLAGS)
LDFLAGS_purgatory.chk := $(PURGATORY_LDFLAGS)
targets += purgatory.ro purgatory.chk
-# Sanitizer, etc. runtimes are unavailable and cannot be linked here.
-GCOV_PROFILE := n
-KASAN_SANITIZE := n
-UBSAN_SANITIZE := n
-KCSAN_SANITIZE := n
-KMSAN_SANITIZE := n
-KCOV_INSTRUMENT := n
-
# These are adjustments to the compiler flags used for objects that
# make up the standalone purgatory.ro
PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
-PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss -g0
+PURGATORY_CFLAGS := -mcmodel=small -ffreestanding -fno-zero-initialized-in-bss -g0
+PURGATORY_CFLAGS += -fpic -fvisibility=hidden
PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING
PURGATORY_CFLAGS += -fno-stack-protector
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index f614009d3e4e..a0fb39abc5c8 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -7,15 +7,6 @@
#
#
-# Sanitizer runtimes are unavailable and cannot be linked here.
-KASAN_SANITIZE := n
-KCSAN_SANITIZE := n
-KMSAN_SANITIZE := n
-OBJECT_FILES_NON_STANDARD := y
-
-# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
-KCOV_INSTRUMENT := n
-
always-y := realmode.bin realmode.relocs
wakeup-objs := wakeup_asm.o wakemain.o video-mode.o
@@ -76,5 +67,3 @@ KBUILD_CFLAGS := $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
-I$(srctree)/arch/x86/boot
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
-GCOV_PROFILE := n
-UBSAN_SANITIZE := n
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk
index af38469afd14..5770c8097f32 100644
--- a/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/arch/x86/tools/gen-insn-attr-x86.awk
@@ -64,7 +64,9 @@ BEGIN {
modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])"
force64_expr = "\\([df]64\\)"
- rex_expr = "^REX(\\.[XRWB]+)*"
+ rex_expr = "^((REX(\\.[XRWB]+)+)|(REX$))"
+ rex2_expr = "\\(REX2\\)"
+ no_rex2_expr = "\\(!REX2\\)"
fpu_expr = "^ESC" # TODO
lprefix1_expr = "\\((66|!F3)\\)"
@@ -81,6 +83,8 @@ BEGIN {
vexonly_expr = "\\(v\\)"
# All opcodes with (ev) superscript supports *only* EVEX prefix
evexonly_expr = "\\(ev\\)"
+ # (es) is the same as (ev) but also "SCALABLE" i.e. W and pp determine operand size
+ evex_scalable_expr = "\\(es\\)"
prefix_expr = "\\(Prefix\\)"
prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ"
@@ -99,6 +103,7 @@ BEGIN {
prefix_num["VEX+1byte"] = "INAT_PFX_VEX2"
prefix_num["VEX+2byte"] = "INAT_PFX_VEX3"
prefix_num["EVEX"] = "INAT_PFX_EVEX"
+ prefix_num["REX2"] = "INAT_PFX_REX2"
clear_vars()
}
@@ -314,6 +319,10 @@ function convert_operands(count,opnd, i,j,imm,mod)
if (match(ext, force64_expr))
flags = add_flags(flags, "INAT_FORCE64")
+ # check REX2 not allowed
+ if (match(ext, no_rex2_expr))
+ flags = add_flags(flags, "INAT_NO_REX2")
+
# check REX prefix
if (match(opcode, rex_expr))
flags = add_flags(flags, "INAT_MAKE_PREFIX(INAT_PFX_REX)")
@@ -325,6 +334,8 @@ function convert_operands(count,opnd, i,j,imm,mod)
# check VEX codes
if (match(ext, evexonly_expr))
flags = add_flags(flags, "INAT_VEXOK | INAT_EVEXONLY")
+ else if (match(ext, evex_scalable_expr))
+ flags = add_flags(flags, "INAT_VEXOK | INAT_EVEXONLY | INAT_EVEX_SCALABLE")
else if (match(ext, vexonly_expr))
flags = add_flags(flags, "INAT_VEXOK | INAT_VEXONLY")
else if (match(ext, vexok_expr) || match(opcode, vexok_opcode_expr))
@@ -351,6 +362,8 @@ function convert_operands(count,opnd, i,j,imm,mod)
lptable3[idx] = add_flags(lptable3[idx],flags)
variant = "INAT_VARIANT"
}
+ if (match(ext, rex2_expr))
+ table[idx] = add_flags(table[idx], "INAT_REX2_VARIANT")
if (!match(ext, lprefix_expr)){
table[idx] = add_flags(table[idx],flags)
}
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index b029fb81ebee..c101bed61940 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -11,41 +11,42 @@
#define Elf_Shdr ElfW(Shdr)
#define Elf_Sym ElfW(Sym)
-static Elf_Ehdr ehdr;
-static unsigned long shnum;
-static unsigned int shstrndx;
-static unsigned int shsymtabndx;
-static unsigned int shxsymtabndx;
+static Elf_Ehdr ehdr;
+static unsigned long shnum;
+static unsigned int shstrndx;
+static unsigned int shsymtabndx;
+static unsigned int shxsymtabndx;
static int sym_index(Elf_Sym *sym);
struct relocs {
- uint32_t *offset;
- unsigned long count;
- unsigned long size;
+ uint32_t *offset;
+ unsigned long count;
+ unsigned long size;
};
-static struct relocs relocs16;
-static struct relocs relocs32;
+static struct relocs relocs16;
+static struct relocs relocs32;
+
#if ELF_BITS == 64
-static struct relocs relocs32neg;
-static struct relocs relocs64;
-#define FMT PRIu64
+static struct relocs relocs32neg;
+static struct relocs relocs64;
+# define FMT PRIu64
#else
-#define FMT PRIu32
+# define FMT PRIu32
#endif
struct section {
- Elf_Shdr shdr;
- struct section *link;
- Elf_Sym *symtab;
- Elf32_Word *xsymtab;
- Elf_Rel *reltab;
- char *strtab;
+ Elf_Shdr shdr;
+ struct section *link;
+ Elf_Sym *symtab;
+ Elf32_Word *xsymtab;
+ Elf_Rel *reltab;
+ char *strtab;
};
-static struct section *secs;
+static struct section *secs;
-static const char * const sym_regex_kernel[S_NSYMTYPES] = {
+static const char * const sym_regex_kernel[S_NSYMTYPES] = {
/*
* Following symbols have been audited. There values are constant and do
* not change if bzImage is loaded at a different physical address than
@@ -115,13 +116,13 @@ static const char * const sym_regex_realmode[S_NSYMTYPES] = {
"^pa_",
};
-static const char * const *sym_regex;
+static const char * const *sym_regex;
+
+static regex_t sym_regex_c[S_NSYMTYPES];
-static regex_t sym_regex_c[S_NSYMTYPES];
static int is_reloc(enum symtype type, const char *sym_name)
{
- return sym_regex[type] &&
- !regexec(&sym_regex_c[type], sym_name, 0, NULL, 0);
+ return sym_regex[type] && !regexec(&sym_regex_c[type], sym_name, 0, NULL, 0);
}
static void regex_init(int use_real_mode)
@@ -139,8 +140,7 @@ static void regex_init(int use_real_mode)
if (!sym_regex[i])
continue;
- err = regcomp(&sym_regex_c[i], sym_regex[i],
- REG_EXTENDED|REG_NOSUB);
+ err = regcomp(&sym_regex_c[i], sym_regex[i], REG_EXTENDED|REG_NOSUB);
if (err) {
regerror(err, &sym_regex_c[i], errbuf, sizeof(errbuf));
@@ -163,9 +163,10 @@ static const char *sym_type(unsigned type)
#undef SYM_TYPE
};
const char *name = "unknown sym type name";
- if (type < ARRAY_SIZE(type_name)) {
+
+ if (type < ARRAY_SIZE(type_name))
name = type_name[type];
- }
+
return name;
}
@@ -179,9 +180,10 @@ static const char *sym_bind(unsigned bind)
#undef SYM_BIND
};
const char *name = "unknown sym bind name";
- if (bind < ARRAY_SIZE(bind_name)) {
+
+ if (bind < ARRAY_SIZE(bind_name))
name = bind_name[bind];
- }
+
return name;
}
@@ -196,9 +198,10 @@ static const char *sym_visibility(unsigned visibility)
#undef SYM_VISIBILITY
};
const char *name = "unknown sym visibility name";
- if (visibility < ARRAY_SIZE(visibility_name)) {
+
+ if (visibility < ARRAY_SIZE(visibility_name))
name = visibility_name[visibility];
- }
+
return name;
}
@@ -244,9 +247,10 @@ static const char *rel_type(unsigned type)
#undef REL_TYPE
};
const char *name = "unknown type rel type name";
- if (type < ARRAY_SIZE(type_name) && type_name[type]) {
+
+ if (type < ARRAY_SIZE(type_name) && type_name[type])
name = type_name[type];
- }
+
return name;
}
@@ -256,15 +260,14 @@ static const char *sec_name(unsigned shndx)
const char *name;
sec_strtab = secs[shstrndx].strtab;
name = "<noname>";
- if (shndx < shnum) {
+
+ if (shndx < shnum)
name = sec_strtab + secs[shndx].shdr.sh_name;
- }
- else if (shndx == SHN_ABS) {
+ else if (shndx == SHN_ABS)
name = "ABSOLUTE";
- }
- else if (shndx == SHN_COMMON) {
+ else if (shndx == SHN_COMMON)
name = "COMMON";
- }
+
return name;
}
@@ -272,18 +275,19 @@ static const char *sym_name(const char *sym_strtab, Elf_Sym *sym)
{
const char *name;
name = "<noname>";
- if (sym->st_name) {
+
+ if (sym->st_name)
name = sym_strtab + sym->st_name;
- }
- else {
+ else
name = sec_name(sym_index(sym));
- }
+
return name;
}
static Elf_Sym *sym_lookup(const char *symname)
{
int i;
+
for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
long nsyms;
@@ -309,14 +313,15 @@ static Elf_Sym *sym_lookup(const char *symname)
}
#if BYTE_ORDER == LITTLE_ENDIAN
-#define le16_to_cpu(val) (val)
-#define le32_to_cpu(val) (val)
-#define le64_to_cpu(val) (val)
+# define le16_to_cpu(val) (val)
+# define le32_to_cpu(val) (val)
+# define le64_to_cpu(val) (val)
#endif
+
#if BYTE_ORDER == BIG_ENDIAN
-#define le16_to_cpu(val) bswap_16(val)
-#define le32_to_cpu(val) bswap_32(val)
-#define le64_to_cpu(val) bswap_64(val)
+# define le16_to_cpu(val) bswap_16(val)
+# define le32_to_cpu(val) bswap_32(val)
+# define le64_to_cpu(val) bswap_64(val)
#endif
static uint16_t elf16_to_cpu(uint16_t val)
@@ -337,13 +342,13 @@ static uint64_t elf64_to_cpu(uint64_t val)
{
return le64_to_cpu(val);
}
-#define elf_addr_to_cpu(x) elf64_to_cpu(x)
-#define elf_off_to_cpu(x) elf64_to_cpu(x)
-#define elf_xword_to_cpu(x) elf64_to_cpu(x)
+# define elf_addr_to_cpu(x) elf64_to_cpu(x)
+# define elf_off_to_cpu(x) elf64_to_cpu(x)
+# define elf_xword_to_cpu(x) elf64_to_cpu(x)
#else
-#define elf_addr_to_cpu(x) elf32_to_cpu(x)
-#define elf_off_to_cpu(x) elf32_to_cpu(x)
-#define elf_xword_to_cpu(x) elf32_to_cpu(x)
+# define elf_addr_to_cpu(x) elf32_to_cpu(x)
+# define elf_off_to_cpu(x) elf32_to_cpu(x)
+# define elf_xword_to_cpu(x) elf32_to_cpu(x)
#endif
static int sym_index(Elf_Sym *sym)
@@ -365,22 +370,17 @@ static int sym_index(Elf_Sym *sym)
static void read_ehdr(FILE *fp)
{
- if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) {
- die("Cannot read ELF header: %s\n",
- strerror(errno));
- }
- if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0) {
+ if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1)
+ die("Cannot read ELF header: %s\n", strerror(errno));
+ if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0)
die("No ELF magic\n");
- }
- if (ehdr.e_ident[EI_CLASS] != ELF_CLASS) {
+ if (ehdr.e_ident[EI_CLASS] != ELF_CLASS)
die("Not a %d bit executable\n", ELF_BITS);
- }
- if (ehdr.e_ident[EI_DATA] != ELFDATA2LSB) {
+ if (ehdr.e_ident[EI_DATA] != ELFDATA2LSB)
die("Not a LSB ELF executable\n");
- }
- if (ehdr.e_ident[EI_VERSION] != EV_CURRENT) {
+ if (ehdr.e_ident[EI_VERSION] != EV_CURRENT)
die("Unknown ELF version\n");
- }
+
/* Convert the fields to native endian */
ehdr.e_type = elf_half_to_cpu(ehdr.e_type);
ehdr.e_machine = elf_half_to_cpu(ehdr.e_machine);
@@ -439,19 +439,18 @@ static void read_shdrs(FILE *fp)
Elf_Shdr shdr;
secs = calloc(shnum, sizeof(struct section));
- if (!secs) {
- die("Unable to allocate %ld section headers\n",
- shnum);
- }
- if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0) {
- die("Seek to %" FMT " failed: %s\n",
- ehdr.e_shoff, strerror(errno));
- }
+ if (!secs)
+ die("Unable to allocate %ld section headers\n", shnum);
+
+ if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0)
+ die("Seek to %" FMT " failed: %s\n", ehdr.e_shoff, strerror(errno));
+
for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
+
if (fread(&shdr, sizeof(shdr), 1, fp) != 1)
- die("Cannot read ELF section headers %d/%ld: %s\n",
- i, shnum, strerror(errno));
+ die("Cannot read ELF section headers %d/%ld: %s\n", i, shnum, strerror(errno));
+
sec->shdr.sh_name = elf_word_to_cpu(shdr.sh_name);
sec->shdr.sh_type = elf_word_to_cpu(shdr.sh_type);
sec->shdr.sh_flags = elf_xword_to_cpu(shdr.sh_flags);
@@ -471,31 +470,28 @@ static void read_shdrs(FILE *fp)
static void read_strtabs(FILE *fp)
{
int i;
+
for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
- if (sec->shdr.sh_type != SHT_STRTAB) {
+
+ if (sec->shdr.sh_type != SHT_STRTAB)
continue;
- }
+
sec->strtab = malloc(sec->shdr.sh_size);
- if (!sec->strtab) {
- die("malloc of %" FMT " bytes for strtab failed\n",
- sec->shdr.sh_size);
- }
- if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
- die("Seek to %" FMT " failed: %s\n",
- sec->shdr.sh_offset, strerror(errno));
- }
- if (fread(sec->strtab, 1, sec->shdr.sh_size, fp)
- != sec->shdr.sh_size) {
- die("Cannot read symbol table: %s\n",
- strerror(errno));
- }
+ if (!sec->strtab)
+ die("malloc of %" FMT " bytes for strtab failed\n", sec->shdr.sh_size);
+
+ if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0)
+ die("Seek to %" FMT " failed: %s\n", sec->shdr.sh_offset, strerror(errno));
+
+ if (fread(sec->strtab, 1, sec->shdr.sh_size, fp) != sec->shdr.sh_size)
+ die("Cannot read symbol table: %s\n", strerror(errno));
}
}
static void read_symtabs(FILE *fp)
{
- int i,j;
+ int i, j;
for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
@@ -504,19 +500,15 @@ static void read_symtabs(FILE *fp)
switch (sec->shdr.sh_type) {
case SHT_SYMTAB_SHNDX:
sec->xsymtab = malloc(sec->shdr.sh_size);
- if (!sec->xsymtab) {
- die("malloc of %" FMT " bytes for xsymtab failed\n",
- sec->shdr.sh_size);
- }
- if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
- die("Seek to %" FMT " failed: %s\n",
- sec->shdr.sh_offset, strerror(errno));
- }
- if (fread(sec->xsymtab, 1, sec->shdr.sh_size, fp)
- != sec->shdr.sh_size) {
- die("Cannot read extended symbol table: %s\n",
- strerror(errno));
- }
+ if (!sec->xsymtab)
+ die("malloc of %" FMT " bytes for xsymtab failed\n", sec->shdr.sh_size);
+
+ if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0)
+ die("Seek to %" FMT " failed: %s\n", sec->shdr.sh_offset, strerror(errno));
+
+ if (fread(sec->xsymtab, 1, sec->shdr.sh_size, fp) != sec->shdr.sh_size)
+ die("Cannot read extended symbol table: %s\n", strerror(errno));
+
shxsymtabndx = i;
continue;
@@ -524,19 +516,15 @@ static void read_symtabs(FILE *fp)
num_syms = sec->shdr.sh_size / sizeof(Elf_Sym);
sec->symtab = malloc(sec->shdr.sh_size);
- if (!sec->symtab) {
- die("malloc of %" FMT " bytes for symtab failed\n",
- sec->shdr.sh_size);
- }
- if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
- die("Seek to %" FMT " failed: %s\n",
- sec->shdr.sh_offset, strerror(errno));
- }
- if (fread(sec->symtab, 1, sec->shdr.sh_size, fp)
- != sec->shdr.sh_size) {
- die("Cannot read symbol table: %s\n",
- strerror(errno));
- }
+ if (!sec->symtab)
+ die("malloc of %" FMT " bytes for symtab failed\n", sec->shdr.sh_size);
+
+ if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0)
+ die("Seek to %" FMT " failed: %s\n", sec->shdr.sh_offset, strerror(errno));
+
+ if (fread(sec->symtab, 1, sec->shdr.sh_size, fp) != sec->shdr.sh_size)
+ die("Cannot read symbol table: %s\n", strerror(errno));
+
for (j = 0; j < num_syms; j++) {
Elf_Sym *sym = &sec->symtab[j];
@@ -557,28 +545,27 @@ static void read_symtabs(FILE *fp)
static void read_relocs(FILE *fp)
{
- int i,j;
+ int i, j;
+
for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
- if (sec->shdr.sh_type != SHT_REL_TYPE) {
+
+ if (sec->shdr.sh_type != SHT_REL_TYPE)
continue;
- }
+
sec->reltab = malloc(sec->shdr.sh_size);
- if (!sec->reltab) {
- die("malloc of %" FMT " bytes for relocs failed\n",
- sec->shdr.sh_size);
- }
- if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
- die("Seek to %" FMT " failed: %s\n",
- sec->shdr.sh_offset, strerror(errno));
- }
- if (fread(sec->reltab, 1, sec->shdr.sh_size, fp)
- != sec->shdr.sh_size) {
- die("Cannot read symbol table: %s\n",
- strerror(errno));
- }
+ if (!sec->reltab)
+ die("malloc of %" FMT " bytes for relocs failed\n", sec->shdr.sh_size);
+
+ if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0)
+ die("Seek to %" FMT " failed: %s\n", sec->shdr.sh_offset, strerror(errno));
+
+ if (fread(sec->reltab, 1, sec->shdr.sh_size, fp) != sec->shdr.sh_size)
+ die("Cannot read symbol table: %s\n", strerror(errno));
+
for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
Elf_Rel *rel = &sec->reltab[j];
+
rel->r_offset = elf_addr_to_cpu(rel->r_offset);
rel->r_info = elf_xword_to_cpu(rel->r_info);
#if (SHT_REL_TYPE == SHT_RELA)
@@ -601,23 +588,27 @@ static void print_absolute_symbols(void)
printf("Absolute symbols\n");
printf(" Num: Value Size Type Bind Visibility Name\n");
+
for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
char *sym_strtab;
int j;
- if (sec->shdr.sh_type != SHT_SYMTAB) {
+ if (sec->shdr.sh_type != SHT_SYMTAB)
continue;
- }
+
sym_strtab = sec->link->strtab;
+
for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Sym); j++) {
Elf_Sym *sym;
const char *name;
+
sym = &sec->symtab[j];
name = sym_name(sym_strtab, sym);
- if (sym->st_shndx != SHN_ABS) {
+
+ if (sym->st_shndx != SHN_ABS)
continue;
- }
+
printf(format,
j, sym->st_value, sym->st_size,
sym_type(ELF_ST_TYPE(sym->st_info)),
@@ -645,34 +636,37 @@ static void print_absolute_relocs(void)
char *sym_strtab;
Elf_Sym *sh_symtab;
int j;
- if (sec->shdr.sh_type != SHT_REL_TYPE) {
+
+ if (sec->shdr.sh_type != SHT_REL_TYPE)
continue;
- }
+
sec_symtab = sec->link;
sec_applies = &secs[sec->shdr.sh_info];
- if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) {
+ if (!(sec_applies->shdr.sh_flags & SHF_ALLOC))
continue;
- }
+
/*
* Do not perform relocations in .notes section; any
* values there are meant for pre-boot consumption (e.g.
* startup_xen).
*/
- if (sec_applies->shdr.sh_type == SHT_NOTE) {
+ if (sec_applies->shdr.sh_type == SHT_NOTE)
continue;
- }
+
sh_symtab = sec_symtab->symtab;
sym_strtab = sec_symtab->link->strtab;
+
for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
Elf_Rel *rel;
Elf_Sym *sym;
const char *name;
+
rel = &sec->reltab[j];
sym = &sh_symtab[ELF_R_SYM(rel->r_info)];
name = sym_name(sym_strtab, sym);
- if (sym->st_shndx != SHN_ABS) {
+
+ if (sym->st_shndx != SHN_ABS)
continue;
- }
/* Absolute symbols are not relocated if bzImage is
* loaded at a non-compiled address. Display a warning
@@ -691,10 +685,8 @@ static void print_absolute_relocs(void)
continue;
if (!printed) {
- printf("WARNING: Absolute relocations"
- " present\n");
- printf("Offset Info Type Sym.Value "
- "Sym.Name\n");
+ printf("WARNING: Absolute relocations present\n");
+ printf("Offset Info Type Sym.Value Sym.Name\n");
printed = 1;
}
@@ -718,8 +710,8 @@ static void add_reloc(struct relocs *r, uint32_t offset)
void *mem = realloc(r->offset, newsize * sizeof(r->offset[0]));
if (!mem)
- die("realloc of %ld entries for relocs failed\n",
- newsize);
+ die("realloc of %ld entries for relocs failed\n", newsize);
+
r->offset = mem;
r->size = newsize;
}
@@ -730,6 +722,7 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
Elf_Sym *sym, const char *symname))
{
int i;
+
/* Walk through the relocations */
for (i = 0; i < shnum; i++) {
char *sym_strtab;
@@ -738,16 +731,25 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
int j;
struct section *sec = &secs[i];
- if (sec->shdr.sh_type != SHT_REL_TYPE) {
+ if (sec->shdr.sh_type != SHT_REL_TYPE)
continue;
- }
+
sec_symtab = sec->link;
sec_applies = &secs[sec->shdr.sh_info];
- if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) {
+ if (!(sec_applies->shdr.sh_flags & SHF_ALLOC))
continue;
- }
+
+ /*
+ * Do not perform relocations in .notes sections; any
+ * values there are meant for pre-boot consumption (e.g.
+ * startup_xen).
+ */
+ if (sec_applies->shdr.sh_type == SHT_NOTE)
+ continue;
+
sh_symtab = sec_symtab->symtab;
sym_strtab = sec_symtab->link->strtab;
+
for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
Elf_Rel *rel = &sec->reltab[j];
Elf_Sym *sym = &sh_symtab[ELF_R_SYM(rel->r_info)];
@@ -781,14 +783,16 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
* kernel data and does not require special treatment.
*
*/
-static int per_cpu_shndx = -1;
+static int per_cpu_shndx = -1;
static Elf_Addr per_cpu_load_addr;
static void percpu_init(void)
{
int i;
+
for (i = 0; i < shnum; i++) {
ElfW(Sym) *sym;
+
if (strcmp(sec_name(i), ".data..percpu"))
continue;
@@ -801,6 +805,7 @@ static void percpu_init(void)
per_cpu_shndx = i;
per_cpu_load_addr = sym->st_value;
+
return;
}
}
@@ -871,8 +876,7 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
* Only used by jump labels
*/
if (is_percpu_sym(sym, symname))
- die("Invalid R_X86_64_PC64 relocation against per-CPU symbol %s\n",
- symname);
+ die("Invalid R_X86_64_PC64 relocation against per-CPU symbol %s\n", symname);
break;
case R_X86_64_32:
@@ -892,8 +896,7 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
if (is_reloc(S_ABS, symname))
break;
- die("Invalid absolute %s relocation: %s\n",
- rel_type(r_type), symname);
+ die("Invalid absolute %s relocation: %s\n", rel_type(r_type), symname);
break;
}
@@ -913,8 +916,7 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
break;
default:
- die("Unsupported relocation type: %s (%d)\n",
- rel_type(r_type), r_type);
+ die("Unsupported relocation type: %s (%d)\n", rel_type(r_type), r_type);
break;
}
@@ -951,8 +953,7 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
if (is_reloc(S_ABS, symname))
break;
- die("Invalid absolute %s relocation: %s\n",
- rel_type(r_type), symname);
+ die("Invalid absolute %s relocation: %s\n", rel_type(r_type), symname);
break;
}
@@ -960,16 +961,14 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
break;
default:
- die("Unsupported relocation type: %s (%d)\n",
- rel_type(r_type), r_type);
+ die("Unsupported relocation type: %s (%d)\n", rel_type(r_type), r_type);
break;
}
return 0;
}
-static int do_reloc_real(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
- const char *symname)
+static int do_reloc_real(struct section *sec, Elf_Rel *rel, Elf_Sym *sym, const char *symname)
{
unsigned r_type = ELF32_R_TYPE(rel->r_info);
int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
@@ -1004,9 +1003,7 @@ static int do_reloc_real(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
if (!is_reloc(S_LIN, symname))
break;
}
- die("Invalid %s %s relocation: %s\n",
- shn_abs ? "absolute" : "relative",
- rel_type(r_type), symname);
+ die("Invalid %s %s relocation: %s\n", shn_abs ? "absolute" : "relative", rel_type(r_type), symname);
break;
case R_386_32:
@@ -1027,14 +1024,11 @@ static int do_reloc_real(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
add_reloc(&relocs32, rel->r_offset);
break;
}
- die("Invalid %s %s relocation: %s\n",
- shn_abs ? "absolute" : "relative",
- rel_type(r_type), symname);
+ die("Invalid %s %s relocation: %s\n", shn_abs ? "absolute" : "relative", rel_type(r_type), symname);
break;
default:
- die("Unsupported relocation type: %s (%d)\n",
- rel_type(r_type), r_type);
+ die("Unsupported relocation type: %s (%d)\n", rel_type(r_type), r_type);
break;
}
@@ -1046,7 +1040,10 @@ static int do_reloc_real(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
static int cmp_relocs(const void *va, const void *vb)
{
const uint32_t *a, *b;
- a = va; b = vb;
+
+ a = va;
+ b = vb;
+
return (*a == *b)? 0 : (*a > *b)? 1 : -1;
}
@@ -1060,6 +1057,7 @@ static int write32(uint32_t v, FILE *f)
unsigned char buf[4];
put_unaligned_le32(v, buf);
+
return fwrite(buf, 1, 4, f) == 4 ? 0 : -1;
}
@@ -1072,8 +1070,7 @@ static void emit_relocs(int as_text, int use_real_mode)
{
int i;
int (*write_reloc)(uint32_t, FILE *) = write32;
- int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
- const char *symname);
+ int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym, const char *symname);
#if ELF_BITS == 64
if (!use_real_mode)
@@ -1160,6 +1157,7 @@ static int do_reloc_info(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
rel_type(ELF_R_TYPE(rel->r_info)),
symname,
sec_name(sym_index(sym)));
+
return 0;
}
@@ -1185,19 +1183,24 @@ void process(FILE *fp, int use_real_mode, int as_text,
read_strtabs(fp);
read_symtabs(fp);
read_relocs(fp);
+
if (ELF_BITS == 64)
percpu_init();
+
if (show_absolute_syms) {
print_absolute_symbols();
return;
}
+
if (show_absolute_relocs) {
print_absolute_relocs();
return;
}
+
if (show_reloc_info) {
print_reloc_info();
return;
}
+
emit_relocs(as_text, use_real_mode);
}
diff --git a/arch/x86/um/vdso/Makefile b/arch/x86/um/vdso/Makefile
index b86d634730b2..6a77ea6434ff 100644
--- a/arch/x86/um/vdso/Makefile
+++ b/arch/x86/um/vdso/Makefile
@@ -3,12 +3,6 @@
# Building vDSO images for x86.
#
-# do not instrument on vdso because KASAN is not compatible with user mode
-KASAN_SANITIZE := n
-
-# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
-KCOV_INSTRUMENT := n
-
VDSO64-y := y
vdso-install-$(VDSO64-y) += vdso.so
@@ -63,7 +57,6 @@ quiet_cmd_vdso = VDSO $@
cmd_vdso = $(CC) -nostdlib -o $@ \
$(CC_FLAGS_LTO) $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
-Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
- sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
+ sh $(src)/checkundef.sh '$(NM)' '$@'
VDSO_LDFLAGS = -fPIC -shared -Wl,--hash-style=sysv -z noexecstack
-GCOV_PROFILE := n
diff --git a/arch/x86/video/Makefile b/arch/x86/video/Makefile
index 5ebe48752ffc..dcfbe7a5912c 100644
--- a/arch/x86/video/Makefile
+++ b/arch/x86/video/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_FB_CORE) += fbdev.o
+
+obj-y += video-common.o
diff --git a/arch/x86/video/fbdev.c b/arch/x86/video/video-common.c
index 1dd6528cc947..81fc97a2a837 100644
--- a/arch/x86/video/fbdev.c
+++ b/arch/x86/video/video-common.c
@@ -7,11 +7,11 @@
*
*/
-#include <linux/fb.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/vgaarb.h>
-#include <asm/fb.h>
+
+#include <asm/video.h>
pgprot_t pgprot_framebuffer(pgprot_t prot,
unsigned long vm_start, unsigned long vm_end,
@@ -25,20 +25,17 @@ pgprot_t pgprot_framebuffer(pgprot_t prot,
}
EXPORT_SYMBOL(pgprot_framebuffer);
-int fb_is_primary_device(struct fb_info *info)
+bool video_is_primary_device(struct device *dev)
{
- struct device *device = info->device;
- struct pci_dev *pci_dev;
+ struct pci_dev *pdev;
- if (!device || !dev_is_pci(device))
- return 0;
+ if (!dev_is_pci(dev))
+ return false;
- pci_dev = to_pci_dev(device);
+ pdev = to_pci_dev(dev);
- if (pci_dev == vga_default_device())
- return 1;
- return 0;
+ return (pdev == vga_default_device());
}
-EXPORT_SYMBOL(fb_is_primary_device);
+EXPORT_SYMBOL(video_is_primary_device);
MODULE_LICENSE("GPL");
diff --git a/arch/x86/virt/Makefile b/arch/x86/virt/Makefile
index 1e36502cd738..ea343fc392dc 100644
--- a/arch/x86/virt/Makefile
+++ b/arch/x86/virt/Makefile
@@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y += vmx/
+obj-y += svm/ vmx/
diff --git a/arch/x86/virt/svm/sev.c b/arch/x86/virt/svm/sev.c
index cffe1157a90a..0ae10535c699 100644
--- a/arch/x86/virt/svm/sev.c
+++ b/arch/x86/virt/svm/sev.c
@@ -77,7 +77,7 @@ static int __mfd_enable(unsigned int cpu)
{
u64 val;
- if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return 0;
rdmsrl(MSR_AMD64_SYSCFG, val);
@@ -98,7 +98,7 @@ static int __snp_enable(unsigned int cpu)
{
u64 val;
- if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return 0;
rdmsrl(MSR_AMD64_SYSCFG, val);
@@ -163,6 +163,42 @@ bool snp_probe_rmptable_info(void)
return true;
}
+static void __init __snp_fixup_e820_tables(u64 pa)
+{
+ if (IS_ALIGNED(pa, PMD_SIZE))
+ return;
+
+ /*
+ * Handle cases where the RMP table placement by the BIOS is not
+ * 2M aligned and the kexec kernel could try to allocate
+ * from within that chunk which then causes a fatal RMP fault.
+ *
+ * The e820_table needs to be updated as it is converted to
+ * kernel memory resources and used by KEXEC_FILE_LOAD syscall
+ * to load kexec segments.
+ *
+ * The e820_table_firmware needs to be updated as it is exposed
+ * to sysfs and used by the KEXEC_LOAD syscall to load kexec
+ * segments.
+ *
+ * The e820_table_kexec needs to be updated as it passed to
+ * the kexec-ed kernel.
+ */
+ pa = ALIGN_DOWN(pa, PMD_SIZE);
+ if (e820__mapped_any(pa, pa + PMD_SIZE, E820_TYPE_RAM)) {
+ pr_info("Reserving start/end of RMP table on a 2MB boundary [0x%016llx]\n", pa);
+ e820__range_update(pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
+ e820__range_update_table(e820_table_kexec, pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
+ e820__range_update_table(e820_table_firmware, pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
+ }
+}
+
+void __init snp_fixup_e820_tables(void)
+{
+ __snp_fixup_e820_tables(probed_rmp_base);
+ __snp_fixup_e820_tables(probed_rmp_base + probed_rmp_size);
+}
+
/*
* Do the necessary preparations which are verified by the firmware as
* described in the SNP_INIT_EX firmware command description in the SNP
@@ -174,11 +210,11 @@ static int __init snp_rmptable_init(void)
u64 rmptable_size;
u64 val;
- if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return 0;
if (!amd_iommu_snp_en)
- return 0;
+ goto nosnp;
if (!probed_rmp_size)
goto nosnp;
@@ -225,7 +261,7 @@ skip_enable:
return 0;
nosnp:
- setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
+ cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
return -ENOSYS;
}
@@ -246,7 +282,7 @@ static struct rmpentry *__snp_lookup_rmpentry(u64 pfn, int *level)
{
struct rmpentry *large_entry, *entry;
- if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return ERR_PTR(-ENODEV);
entry = get_rmpentry(pfn);
@@ -363,7 +399,7 @@ int psmash(u64 pfn)
unsigned long paddr = pfn << PAGE_SHIFT;
int ret;
- if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return -ENODEV;
if (!pfn_valid(pfn))
@@ -472,7 +508,7 @@ static int rmpupdate(u64 pfn, struct rmp_state *state)
unsigned long paddr = pfn << PAGE_SHIFT;
int ret, level;
- if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return -ENODEV;
level = RMP_TO_PG_LEVEL(state->pagesize);
@@ -558,3 +594,13 @@ void snp_leak_pages(u64 pfn, unsigned int npages)
spin_unlock(&snp_leaked_pages_list_lock);
}
EXPORT_SYMBOL_GPL(snp_leak_pages);
+
+void kdump_sev_callback(void)
+{
+ /*
+ * Do wbinvd() on remote CPUs when SNP is enabled in order to
+ * safely do SNP_SHUTDOWN on the local CPU.
+ */
+ if (cc_platform_has(CC_ATTR_HOST_SEV_SNP))
+ wbinvd();
+}
diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c
index 4d6826a76f78..49a1c6890b55 100644
--- a/arch/x86/virt/vmx/tdx/tdx.c
+++ b/arch/x86/virt/vmx/tdx/tdx.c
@@ -27,7 +27,6 @@
#include <linux/log2.h>
#include <linux/acpi.h>
#include <linux/suspend.h>
-#include <linux/acpi.h>
#include <asm/page.h>
#include <asm/special_insns.h>
#include <asm/msr-index.h>
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index a01ca255b0c6..5f3a69f6ec34 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1,8 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
-#include <linux/memblock.h>
-#endif
#include <linux/console.h>
#include <linux/cpu.h>
#include <linux/kexec.h>
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index ace2eb054053..9ba53814ed6a 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -219,13 +219,21 @@ static __read_mostly unsigned int cpuid_leaf5_edx_val;
static void xen_cpuid(unsigned int *ax, unsigned int *bx,
unsigned int *cx, unsigned int *dx)
{
- unsigned maskebx = ~0;
+ unsigned int maskebx = ~0;
+ unsigned int or_ebx = 0;
/*
* Mask out inconvenient features, to try and disable as many
* unsupported kernel subsystems as possible.
*/
switch (*ax) {
+ case 0x1:
+ /* Replace initial APIC ID in bits 24-31 of EBX. */
+ /* See xen_pv_smp_config() for related topology preparations. */
+ maskebx = 0x00ffffff;
+ or_ebx = smp_processor_id() << 24;
+ break;
+
case CPUID_MWAIT_LEAF:
/* Synthesize the values.. */
*ax = 0;
@@ -248,6 +256,7 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
: "0" (*ax), "2" (*cx));
*bx &= maskebx;
+ *bx |= or_ebx;
}
static bool __init xen_check_mwait(void)
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 27d1a5b7f571..ac41d83b38d3 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -154,9 +154,9 @@ static void __init xen_pv_smp_config(void)
u32 apicid = 0;
int i;
- topology_register_boot_apic(apicid++);
+ topology_register_boot_apic(apicid);
- for (i = 1; i < nr_cpu_ids; i++)
+ for (i = 0; i < nr_cpu_ids; i++)
topology_register_apic(apicid++, CPU_ACPIID_INVALID, true);
/* Pretend to be a proper enumerated system */
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 04101b984f24..758bcd47b72d 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -49,7 +49,7 @@ SYM_CODE_START(startup_xen)
ANNOTATE_NOENDBR
cld
- leaq (__end_init_task - TOP_OF_KERNEL_STACK_PADDING - PTREGS_SIZE)(%rip), %rsp
+ leaq __top_init_kernel_stack(%rip), %rsp
/* Set up %gs.
*
diff --git a/arch/xtensa/boot/dts/Makefile b/arch/xtensa/boot/dts/Makefile
index 720628c0d8b9..d6408c16d74e 100644
--- a/arch/xtensa/boot/dts/Makefile
+++ b/arch/xtensa/boot/dts/Makefile
@@ -10,5 +10,4 @@
obj-$(CONFIG_OF) += $(addsuffix .dtb.o, $(CONFIG_BUILTIN_DTB_SOURCE))
# for CONFIG_OF_ALL_DTBS test
-dtstree := $(srctree)/$(src)
-dtb- := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
+dtb- := $(patsubst $(src)/%.dts,%.dtb, $(wildcard $(src)/*.dts))
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
index 38bcecb0e457..a2b6bb5429f5 100644
--- a/arch/xtensa/include/asm/cacheflush.h
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -100,6 +100,10 @@ void flush_cache_range(struct vm_area_struct*, ulong, ulong);
void flush_icache_range(unsigned long start, unsigned long end);
void flush_cache_page(struct vm_area_struct*,
unsigned long, unsigned long);
+#define flush_cache_all flush_cache_all
+#define flush_cache_range flush_cache_range
+#define flush_icache_range flush_icache_range
+#define flush_cache_page flush_cache_page
#else
#define flush_cache_all local_flush_cache_all
#define flush_cache_range local_flush_cache_range
@@ -136,20 +140,7 @@ void local_flush_cache_page(struct vm_area_struct *vma,
#else
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_dup_mm(mm) do { } while (0)
-
-#define flush_cache_vmap(start,end) do { } while (0)
-#define flush_cache_vmap_early(start,end) do { } while (0)
-#define flush_cache_vunmap(start,end) do { } while (0)
-
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page) do { } while (0)
-
#define flush_icache_range local_flush_icache_range
-#define flush_cache_page(vma, addr, pfn) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
#endif
@@ -162,15 +153,14 @@ void local_flush_cache_page(struct vm_area_struct *vma,
__invalidate_icache_range(start,(end) - (start)); \
} while (0)
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-
#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
extern void copy_to_user_page(struct vm_area_struct*, struct page*,
unsigned long, void*, const void*, unsigned long);
extern void copy_from_user_page(struct vm_area_struct*, struct page*,
unsigned long, void*, const void*, unsigned long);
+#define copy_to_user_page copy_to_user_page
+#define copy_from_user_page copy_from_user_page
#else
@@ -186,4 +176,6 @@ extern void copy_from_user_page(struct vm_area_struct*, struct page*,
#endif
+#include <asm-generic/cacheflush.h>
+
#endif /* _XTENSA_CACHEFLUSH_H */
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index d008a153a2b9..7ed1a2085bd7 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -115,9 +115,9 @@
#define MAKE_RA_FOR_CALL(ra,ws) (((ra) & 0x3fffffff) | (ws) << 30)
/* Convert return address to a valid pc
- * Note: We assume that the stack pointer is in the same 1GB ranges as the ra
+ * Note: 'text' is the address within the same 1GB range as the ra
*/
-#define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000))
+#define MAKE_PC_FROM_RA(ra, text) (((ra) & 0x3fffffff) | ((unsigned long)(text) & 0xc0000000))
#elif defined(__XTENSA_CALL0_ABI__)
@@ -127,9 +127,9 @@
#define MAKE_RA_FOR_CALL(ra, ws) (ra)
/* Convert return address to a valid pc
- * Note: We assume that the stack pointer is in the same 1GB ranges as the ra
+ * Note: 'text' is not used as 'ra' is always the full address
*/
-#define MAKE_PC_FROM_RA(ra, sp) (ra)
+#define MAKE_PC_FROM_RA(ra, text) (ra)
#else
#error Unsupported Xtensa ABI
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h
index a270467556dc..86c70117371b 100644
--- a/arch/xtensa/include/asm/ptrace.h
+++ b/arch/xtensa/include/asm/ptrace.h
@@ -87,7 +87,7 @@ struct pt_regs {
# define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
# define instruction_pointer(regs) ((regs)->pc)
# define return_pointer(regs) (MAKE_PC_FROM_RA((regs)->areg[0], \
- (regs)->areg[1]))
+ (regs)->pc))
# ifndef CONFIG_SMP
# define profile_pc(regs) instruction_pointer(regs)
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index a815577d25fd..7bd66677f7b6 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -47,6 +47,7 @@
#include <asm/asm-offsets.h>
#include <asm/regs.h>
#include <asm/hw_breakpoint.h>
+#include <asm/sections.h>
#include <asm/traps.h>
extern void ret_from_fork(void);
@@ -380,7 +381,7 @@ unsigned long __get_wchan(struct task_struct *p)
int count = 0;
sp = p->thread.sp;
- pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
+ pc = MAKE_PC_FROM_RA(p->thread.ra, _text);
do {
if (sp < stack_page + sizeof(struct task_struct) ||
@@ -392,7 +393,7 @@ unsigned long __get_wchan(struct task_struct *p)
/* Stack layout: sp-4: ra, sp-3: sp' */
- pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp);
+ pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), _text);
sp = SPILL_SLOT(sp, 1);
} while (count++ < 16);
return 0;
diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
index 831ffb648bda..ed324fdf2a2f 100644
--- a/arch/xtensa/kernel/stacktrace.c
+++ b/arch/xtensa/kernel/stacktrace.c
@@ -13,6 +13,7 @@
#include <linux/stacktrace.h>
#include <asm/ftrace.h>
+#include <asm/sections.h>
#include <asm/stacktrace.h>
#include <asm/traps.h>
#include <linux/uaccess.h>
@@ -189,7 +190,7 @@ void walk_stackframe(unsigned long *sp,
if (a1 <= (unsigned long)sp)
break;
- frame.pc = MAKE_PC_FROM_RA(a0, a1);
+ frame.pc = MAKE_PC_FROM_RA(a0, _text);
frame.sp = a1;
if (fn(&frame, data))
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index 7ec66a79f472..23be0e7516ce 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -87,12 +87,13 @@ static inline void *coherent_kvaddr(struct page *page, unsigned long base,
void clear_user_highpage(struct page *page, unsigned long vaddr)
{
+ struct folio *folio = page_folio(page);
unsigned long paddr;
void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
preempt_disable();
kmap_invalidate_coherent(page, vaddr);
- set_bit(PG_arch_1, &page->flags);
+ set_bit(PG_arch_1, folio_flags(folio, 0));
clear_page_alias(kvaddr, paddr);
preempt_enable();
}
@@ -101,6 +102,7 @@ EXPORT_SYMBOL(clear_user_highpage);
void copy_user_highpage(struct page *dst, struct page *src,
unsigned long vaddr, struct vm_area_struct *vma)
{
+ struct folio *folio = page_folio(dst);
unsigned long dst_paddr, src_paddr;
void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr,
&dst_paddr);
@@ -109,7 +111,7 @@ void copy_user_highpage(struct page *dst, struct page *src,
preempt_disable();
kmap_invalidate_coherent(dst, vaddr);
- set_bit(PG_arch_1, &dst->flags);
+ set_bit(PG_arch_1, folio_flags(folio, 0));
copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
preempt_enable();
}
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
index 4f974b74883c..d8b60d6e50a8 100644
--- a/arch/xtensa/mm/tlb.c
+++ b/arch/xtensa/mm/tlb.c
@@ -256,12 +256,13 @@ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
dtlb ? 'D' : 'I', w, e, r0, r1, pte);
if (pte == 0 || !pte_present(__pte(pte))) {
struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
- pr_err("page refcount: %d, mapcount: %d\n",
- page_count(p),
- page_mapcount(p));
- if (!page_count(p))
+ struct folio *f = page_folio(p);
+
+ pr_err("folio refcount: %d, mapcount: %d\n",
+ folio_ref_count(f), folio_mapcount(f));
+ if (!folio_ref_count(f))
rc |= TLB_INSANE;
- else if (page_mapcount(p))
+ else if (folio_mapped(f))
rc |= TLB_SUSPICIOUS;
} else {
rc |= TLB_INSANE;
diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
index 8896e691c051..abec44b687df 100644
--- a/arch/xtensa/platforms/iss/console.c
+++ b/arch/xtensa/platforms/iss/console.c
@@ -166,10 +166,8 @@ late_initcall(rs_init);
static void iss_console_write(struct console *co, const char *s, unsigned count)
{
- if (s && *s != 0) {
- int len = strlen(s);
- simc_write(1, s, count < len ? count : len);
- }
+ if (s && *s != 0)
+ simc_write(1, s, min(count, strlen(s)));
}
static struct tty_driver* iss_console_device(struct console *c, int *index)
diff --git a/block/Kconfig b/block/Kconfig
index 1de4682d48cc..dc12af58dbae 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -100,7 +100,6 @@ config BLK_DEV_WRITE_MOUNTED
config BLK_DEV_ZONED
bool "Zoned block device support"
- select MQ_IOSCHED_DEADLINE
help
Block layer zoned block device support. This option enables
support for ZAC/ZBC/ZNS host-managed and host-aware zoned block
@@ -120,17 +119,6 @@ config BLK_DEV_THROTTLING
See Documentation/admin-guide/cgroup-v1/blkio-controller.rst for more information.
-config BLK_DEV_THROTTLING_LOW
- bool "Block throttling .low limit interface support (EXPERIMENTAL)"
- depends on BLK_DEV_THROTTLING
- help
- Add .low limit interface for block throttling. The low limit is a best
- effort limit to prioritize cgroups. Depending on the setting, the limit
- can be used to protect cgroups in terms of bandwidth/iops and better
- utilize disk resource.
-
- Note, this is an experimental interface and could be changed someday.
-
config BLK_WBT
bool "Enable support for block device writeback throttling"
help
@@ -198,10 +186,6 @@ config BLK_DEBUG_FS
Unless you are building a kernel for a tiny system, you should
say Y here.
-config BLK_DEBUG_FS_ZONED
- bool
- default BLK_DEBUG_FS && BLK_DEV_ZONED
-
config BLK_SED_OPAL
bool "Logic for interfacing with Opal enabled SEDs"
depends on KEYS
diff --git a/block/Makefile b/block/Makefile
index 46ada9dc8bbf..168150b9c510 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -33,7 +33,6 @@ obj-$(CONFIG_BLK_MQ_VIRTIO) += blk-mq-virtio.o
obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o
obj-$(CONFIG_BLK_WBT) += blk-wbt.o
obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
-obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o
obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o
obj-$(CONFIG_BLK_PM) += blk-pm.o
obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += blk-crypto.o blk-crypto-profile.o \
diff --git a/block/bdev.c b/block/bdev.c
index 7a5f611c3d2e..353677ac49b3 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -43,6 +43,11 @@ static inline struct bdev_inode *BDEV_I(struct inode *inode)
return container_of(inode, struct bdev_inode, vfs_inode);
}
+static inline struct inode *BD_INODE(struct block_device *bdev)
+{
+ return &container_of(bdev, struct bdev_inode, bdev)->vfs_inode;
+}
+
struct block_device *I_BDEV(struct inode *inode)
{
return &BDEV_I(inode)->bdev;
@@ -57,7 +62,7 @@ EXPORT_SYMBOL(file_bdev);
static void bdev_write_inode(struct block_device *bdev)
{
- struct inode *inode = bdev->bd_inode;
+ struct inode *inode = BD_INODE(bdev);
int ret;
spin_lock(&inode->i_lock);
@@ -76,7 +81,7 @@ static void bdev_write_inode(struct block_device *bdev)
/* Kill _all_ buffers and pagecache , dirty or not.. */
static void kill_bdev(struct block_device *bdev)
{
- struct address_space *mapping = bdev->bd_inode->i_mapping;
+ struct address_space *mapping = bdev->bd_mapping;
if (mapping_empty(mapping))
return;
@@ -88,7 +93,7 @@ static void kill_bdev(struct block_device *bdev)
/* Invalidate clean unused buffers and pagecache. */
void invalidate_bdev(struct block_device *bdev)
{
- struct address_space *mapping = bdev->bd_inode->i_mapping;
+ struct address_space *mapping = bdev->bd_mapping;
if (mapping->nrpages) {
invalidate_bh_lrus();
@@ -116,7 +121,7 @@ int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
goto invalidate;
}
- truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend);
+ truncate_inode_pages_range(bdev->bd_mapping, lstart, lend);
if (!(mode & BLK_OPEN_EXCL))
bd_abort_claiming(bdev, truncate_bdev_range);
return 0;
@@ -126,7 +131,7 @@ invalidate:
* Someone else has handle exclusively open. Try invalidating instead.
* The 'end' argument is inclusive so the rounding is safe.
*/
- return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
+ return invalidate_inode_pages2_range(bdev->bd_mapping,
lstart >> PAGE_SHIFT,
lend >> PAGE_SHIFT);
}
@@ -134,18 +139,21 @@ invalidate:
static void set_init_blocksize(struct block_device *bdev)
{
unsigned int bsize = bdev_logical_block_size(bdev);
- loff_t size = i_size_read(bdev->bd_inode);
+ loff_t size = i_size_read(BD_INODE(bdev));
while (bsize < PAGE_SIZE) {
if (size & bsize)
break;
bsize <<= 1;
}
- bdev->bd_inode->i_blkbits = blksize_bits(bsize);
+ BD_INODE(bdev)->i_blkbits = blksize_bits(bsize);
}
-int set_blocksize(struct block_device *bdev, int size)
+int set_blocksize(struct file *file, int size)
{
+ struct inode *inode = file->f_mapping->host;
+ struct block_device *bdev = I_BDEV(inode);
+
/* Size must be a power of two, and between 512 and PAGE_SIZE */
if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
return -EINVAL;
@@ -154,10 +162,13 @@ int set_blocksize(struct block_device *bdev, int size)
if (size < bdev_logical_block_size(bdev))
return -EINVAL;
+ if (!file->private_data)
+ return -EINVAL;
+
/* Don't change the size if it is same as current */
- if (bdev->bd_inode->i_blkbits != blksize_bits(size)) {
+ if (inode->i_blkbits != blksize_bits(size)) {
sync_blockdev(bdev);
- bdev->bd_inode->i_blkbits = blksize_bits(size);
+ inode->i_blkbits = blksize_bits(size);
kill_bdev(bdev);
}
return 0;
@@ -167,7 +178,7 @@ EXPORT_SYMBOL(set_blocksize);
int sb_set_blocksize(struct super_block *sb, int size)
{
- if (set_blocksize(sb->s_bdev, size))
+ if (set_blocksize(sb->s_bdev_file, size))
return 0;
/* If we get here, we know size is power of two
* and it's value is between 512 and PAGE_SIZE */
@@ -192,7 +203,7 @@ int sync_blockdev_nowait(struct block_device *bdev)
{
if (!bdev)
return 0;
- return filemap_flush(bdev->bd_inode->i_mapping);
+ return filemap_flush(bdev->bd_mapping);
}
EXPORT_SYMBOL_GPL(sync_blockdev_nowait);
@@ -204,13 +215,13 @@ int sync_blockdev(struct block_device *bdev)
{
if (!bdev)
return 0;
- return filemap_write_and_wait(bdev->bd_inode->i_mapping);
+ return filemap_write_and_wait(bdev->bd_mapping);
}
EXPORT_SYMBOL(sync_blockdev);
int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend)
{
- return filemap_write_and_wait_range(bdev->bd_inode->i_mapping,
+ return filemap_write_and_wait_range(bdev->bd_mapping,
lstart, lend);
}
EXPORT_SYMBOL(sync_blockdev_range);
@@ -411,13 +422,11 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
mutex_init(&bdev->bd_fsfreeze_mutex);
spin_lock_init(&bdev->bd_size_lock);
mutex_init(&bdev->bd_holder_lock);
- bdev->bd_partno = partno;
- bdev->bd_inode = inode;
+ atomic_set(&bdev->__bd_flags, partno);
+ bdev->bd_mapping = &inode->i_data;
bdev->bd_queue = disk->queue;
- if (partno)
- bdev->bd_has_submit_bio = disk->part0->bd_has_submit_bio;
- else
- bdev->bd_has_submit_bio = false;
+ if (partno && bdev_test_flag(disk->part0, BD_HAS_SUBMIT_BIO))
+ bdev_set_flag(bdev, BD_HAS_SUBMIT_BIO);
bdev->bd_stats = alloc_percpu(struct disk_stats);
if (!bdev->bd_stats) {
iput(inode);
@@ -430,19 +439,30 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
{
spin_lock(&bdev->bd_size_lock);
- i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
+ i_size_write(BD_INODE(bdev), (loff_t)sectors << SECTOR_SHIFT);
bdev->bd_nr_sectors = sectors;
spin_unlock(&bdev->bd_size_lock);
}
void bdev_add(struct block_device *bdev, dev_t dev)
{
+ struct inode *inode = BD_INODE(bdev);
if (bdev_stable_writes(bdev))
- mapping_set_stable_writes(bdev->bd_inode->i_mapping);
+ mapping_set_stable_writes(bdev->bd_mapping);
bdev->bd_dev = dev;
- bdev->bd_inode->i_rdev = dev;
- bdev->bd_inode->i_ino = dev;
- insert_inode_hash(bdev->bd_inode);
+ inode->i_rdev = dev;
+ inode->i_ino = dev;
+ insert_inode_hash(inode);
+}
+
+void bdev_unhash(struct block_device *bdev)
+{
+ remove_inode_hash(BD_INODE(bdev));
+}
+
+void bdev_drop(struct block_device *bdev)
+{
+ iput(BD_INODE(bdev));
}
long nr_blockdev_pages(void)
@@ -583,9 +603,6 @@ static void bd_finish_claiming(struct block_device *bdev, void *holder,
mutex_unlock(&bdev->bd_holder_lock);
bd_clear_claiming(whole, holder);
mutex_unlock(&bdev_lock);
-
- if (hops && hops->get_holder)
- hops->get_holder(holder);
}
/**
@@ -608,7 +625,6 @@ EXPORT_SYMBOL(bd_abort_claiming);
static void bd_end_claim(struct block_device *bdev, void *holder)
{
struct block_device *whole = bdev_whole(bdev);
- const struct blk_holder_ops *hops = bdev->bd_holder_ops;
bool unblock = false;
/*
@@ -624,23 +640,20 @@ static void bd_end_claim(struct block_device *bdev, void *holder)
bdev->bd_holder = NULL;
bdev->bd_holder_ops = NULL;
mutex_unlock(&bdev->bd_holder_lock);
- if (bdev->bd_write_holder)
+ if (bdev_test_flag(bdev, BD_WRITE_HOLDER))
unblock = true;
}
if (!whole->bd_holders)
whole->bd_holder = NULL;
mutex_unlock(&bdev_lock);
- if (hops && hops->put_holder)
- hops->put_holder(holder);
-
/*
* If this was the last claim, remove holder link and unblock evpoll if
* it was a write holder.
*/
if (unblock) {
disk_unblock_events(bdev->bd_disk);
- bdev->bd_write_holder = false;
+ bdev_clear_flag(bdev, BD_WRITE_HOLDER);
}
}
@@ -652,6 +665,14 @@ static void blkdev_flush_mapping(struct block_device *bdev)
bdev_write_inode(bdev);
}
+static void blkdev_put_whole(struct block_device *bdev)
+{
+ if (atomic_dec_and_test(&bdev->bd_openers))
+ blkdev_flush_mapping(bdev);
+ if (bdev->bd_disk->fops->release)
+ bdev->bd_disk->fops->release(bdev->bd_disk);
+}
+
static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode)
{
struct gendisk *disk = bdev->bd_disk;
@@ -670,20 +691,21 @@ static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode)
if (!atomic_read(&bdev->bd_openers))
set_init_blocksize(bdev);
- if (test_bit(GD_NEED_PART_SCAN, &disk->state))
- bdev_disk_changed(disk, false);
atomic_inc(&bdev->bd_openers);
+ if (test_bit(GD_NEED_PART_SCAN, &disk->state)) {
+ /*
+ * Only return scanning errors if we are called from contexts
+ * that explicitly want them, e.g. the BLKRRPART ioctl.
+ */
+ ret = bdev_disk_changed(disk, false);
+ if (ret && (mode & BLK_OPEN_STRICT_SCAN)) {
+ blkdev_put_whole(bdev);
+ return ret;
+ }
+ }
return 0;
}
-static void blkdev_put_whole(struct block_device *bdev)
-{
- if (atomic_dec_and_test(&bdev->bd_openers))
- blkdev_flush_mapping(bdev);
- if (bdev->bd_disk->fops->release)
- bdev->bd_disk->fops->release(bdev->bd_disk);
-}
-
static int blkdev_get_part(struct block_device *part, blk_mode_t mode)
{
struct gendisk *disk = part->bd_disk;
@@ -776,17 +798,17 @@ void blkdev_put_no_open(struct block_device *bdev)
static bool bdev_writes_blocked(struct block_device *bdev)
{
- return bdev->bd_writers == -1;
+ return bdev->bd_writers < 0;
}
static void bdev_block_writes(struct block_device *bdev)
{
- bdev->bd_writers = -1;
+ bdev->bd_writers--;
}
static void bdev_unblock_writes(struct block_device *bdev)
{
- bdev->bd_writers = 0;
+ bdev->bd_writers++;
}
static bool bdev_may_open(struct block_device *bdev, blk_mode_t mode)
@@ -813,6 +835,11 @@ static void bdev_claim_write_access(struct block_device *bdev, blk_mode_t mode)
bdev->bd_writers++;
}
+static inline bool bdev_unclaimed(const struct file *bdev_file)
+{
+ return bdev_file->private_data == BDEV_I(bdev_file->f_mapping->host);
+}
+
static void bdev_yield_write_access(struct file *bdev_file)
{
struct block_device *bdev;
@@ -820,14 +847,15 @@ static void bdev_yield_write_access(struct file *bdev_file)
if (bdev_allow_write_mounted)
return;
+ if (bdev_unclaimed(bdev_file))
+ return;
+
bdev = file_bdev(bdev_file);
- /* Yield exclusive or shared write access. */
- if (bdev_file->f_mode & FMODE_WRITE) {
- if (bdev_writes_blocked(bdev))
- bdev_unblock_writes(bdev);
- else
- bdev->bd_writers--;
- }
+
+ if (bdev_file->f_mode & FMODE_WRITE_RESTRICTED)
+ bdev_unblock_writes(bdev);
+ else if (bdev_file->f_mode & FMODE_WRITE)
+ bdev->bd_writers--;
}
/**
@@ -874,7 +902,7 @@ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
goto abort_claiming;
ret = -EBUSY;
if (!bdev_may_open(bdev, mode))
- goto abort_claiming;
+ goto put_module;
if (bdev_is_partition(bdev))
ret = blkdev_get_part(bdev, mode);
else
@@ -892,9 +920,10 @@ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
* writeable reference is too fragile given the way @mode is
* used in blkdev_get/put().
*/
- if ((mode & BLK_OPEN_WRITE) && !bdev->bd_write_holder &&
+ if ((mode & BLK_OPEN_WRITE) &&
+ !bdev_test_flag(bdev, BD_WRITE_HOLDER) &&
(disk->event_flags & DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE)) {
- bdev->bd_write_holder = true;
+ bdev_set_flag(bdev, BD_WRITE_HOLDER);
unblock_events = false;
}
}
@@ -904,10 +933,12 @@ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
disk_unblock_events(disk);
bdev_file->f_flags |= O_LARGEFILE;
- bdev_file->f_mode |= FMODE_BUF_RASYNC | FMODE_CAN_ODIRECT;
+ bdev_file->f_mode |= FMODE_CAN_ODIRECT;
if (bdev_nowait(bdev))
bdev_file->f_mode |= FMODE_NOWAIT;
- bdev_file->f_mapping = bdev->bd_inode->i_mapping;
+ if (mode & BLK_OPEN_RESTRICT_WRITES)
+ bdev_file->f_mode |= FMODE_WRITE_RESTRICTED;
+ bdev_file->f_mapping = bdev->bd_mapping;
bdev_file->f_wb_err = filemap_sample_wb_err(bdev_file->f_mapping);
bdev_file->private_data = holder;
@@ -969,13 +1000,13 @@ struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
return ERR_PTR(-ENXIO);
flags = blk_to_file_flags(mode);
- bdev_file = alloc_file_pseudo_noaccount(bdev->bd_inode,
+ bdev_file = alloc_file_pseudo_noaccount(BD_INODE(bdev),
blockdev_mnt, "", flags | O_LARGEFILE, &def_blk_fops);
if (IS_ERR(bdev_file)) {
blkdev_put_no_open(bdev);
return bdev_file;
}
- ihold(bdev->bd_inode);
+ ihold(BD_INODE(bdev));
ret = bdev_open(bdev, mode, holder, hops, bdev_file);
if (ret) {
@@ -1012,6 +1043,20 @@ struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode,
}
EXPORT_SYMBOL(bdev_file_open_by_path);
+static inline void bd_yield_claim(struct file *bdev_file)
+{
+ struct block_device *bdev = file_bdev(bdev_file);
+ void *holder = bdev_file->private_data;
+
+ lockdep_assert_held(&bdev->bd_disk->open_mutex);
+
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(holder)))
+ return;
+
+ if (!bdev_unclaimed(bdev_file))
+ bd_end_claim(bdev, holder);
+}
+
void bdev_release(struct file *bdev_file)
{
struct block_device *bdev = file_bdev(bdev_file);
@@ -1036,7 +1081,7 @@ void bdev_release(struct file *bdev_file)
bdev_yield_write_access(bdev_file);
if (holder)
- bd_end_claim(bdev, holder);
+ bd_yield_claim(bdev_file);
/*
* Trigger event checking and tell drivers to flush MEDIA_CHANGE
@@ -1057,6 +1102,39 @@ put_no_open:
}
/**
+ * bdev_fput - yield claim to the block device and put the file
+ * @bdev_file: open block device
+ *
+ * Yield claim on the block device and put the file. Ensure that the
+ * block device can be reclaimed before the file is closed which is a
+ * deferred operation.
+ */
+void bdev_fput(struct file *bdev_file)
+{
+ if (WARN_ON_ONCE(bdev_file->f_op != &def_blk_fops))
+ return;
+
+ if (bdev_file->private_data) {
+ struct block_device *bdev = file_bdev(bdev_file);
+ struct gendisk *disk = bdev->bd_disk;
+
+ mutex_lock(&disk->open_mutex);
+ bdev_yield_write_access(bdev_file);
+ bd_yield_claim(bdev_file);
+ /*
+ * Tell release we already gave up our hold on the
+ * device and if write restrictions are available that
+ * we already gave up write access to the device.
+ */
+ bdev_file->private_data = BDEV_I(bdev_file->f_mapping->host);
+ mutex_unlock(&disk->open_mutex);
+ }
+
+ fput(bdev_file);
+}
+EXPORT_SYMBOL(bdev_fput);
+
+/**
* lookup_bdev() - Look up a struct block_device by name.
* @pathname: Name of the block device in the filesystem.
* @dev: Pointer to the block device's dev_t, if found.
@@ -1203,6 +1281,18 @@ void bdev_statx_dioalign(struct inode *inode, struct kstat *stat)
blkdev_put_no_open(bdev);
}
+bool disk_live(struct gendisk *disk)
+{
+ return !inode_unhashed(BD_INODE(disk->part0));
+}
+EXPORT_SYMBOL_GPL(disk_live);
+
+unsigned int block_size(struct block_device *bdev)
+{
+ return 1 << BD_INODE(bdev)->i_blkbits;
+}
+EXPORT_SYMBOL_GPL(block_size);
+
static int __init setup_bdev_allow_write_mounted(char *str)
{
if (kstrtobool(str, &bdev_allow_write_mounted))
diff --git a/block/bio.c b/block/bio.c
index d24420ed1c4c..e9e809a63c59 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -345,18 +345,29 @@ void bio_chain(struct bio *bio, struct bio *parent)
}
EXPORT_SYMBOL(bio_chain);
-struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
- unsigned int nr_pages, blk_opf_t opf, gfp_t gfp)
+/**
+ * bio_chain_and_submit - submit a bio after chaining it to another one
+ * @prev: bio to chain and submit
+ * @new: bio to chain to
+ *
+ * If @prev is non-NULL, chain it to @new and submit it.
+ *
+ * Return: @new.
+ */
+struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new)
{
- struct bio *new = bio_alloc(bdev, nr_pages, opf, gfp);
-
- if (bio) {
- bio_chain(bio, new);
- submit_bio(bio);
+ if (prev) {
+ bio_chain(prev, new);
+ submit_bio(prev);
}
-
return new;
}
+
+struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
+ unsigned int nr_pages, blk_opf_t opf, gfp_t gfp)
+{
+ return bio_chain_and_submit(bio, bio_alloc(bdev, nr_pages, opf, gfp));
+}
EXPORT_SYMBOL_GPL(blk_next_bio);
static void bio_alloc_rescue(struct work_struct *work)
@@ -1125,6 +1136,7 @@ void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
WARN_ON_ONCE(off > UINT_MAX);
__bio_add_page(bio, &folio->page, len, off);
}
+EXPORT_SYMBOL_GPL(bio_add_folio_nofail);
/**
* bio_add_folio - Attempt to add part of a folio to a bio.
@@ -1384,6 +1396,26 @@ int submit_bio_wait(struct bio *bio)
}
EXPORT_SYMBOL(submit_bio_wait);
+static void bio_wait_end_io(struct bio *bio)
+{
+ complete(bio->bi_private);
+ bio_put(bio);
+}
+
+/*
+ * bio_await_chain - ends @bio and waits for every chained bio to complete
+ */
+void bio_await_chain(struct bio *bio)
+{
+ DECLARE_COMPLETION_ONSTACK_MAP(done,
+ bio->bi_bdev->bd_disk->lockdep_map);
+
+ bio->bi_private = &done;
+ bio->bi_end_io = bio_wait_end_io;
+ bio_endio(bio);
+ blk_wait_io(&done);
+}
+
void __bio_advance(struct bio *bio, unsigned bytes)
{
if (bio_integrity(bio))
@@ -1576,6 +1608,8 @@ again:
if (!bio_integrity_endio(bio))
return;
+ blk_zone_bio_endio(bio);
+
rq_qos_done_bio(bio);
if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
@@ -1596,7 +1630,6 @@ again:
goto again;
}
- blk_throtl_bio_endio(bio);
/* release cgroup info */
bio_uninit(bio);
if (bio->bi_end_io)
diff --git a/block/blk-cgroup-rwstat.c b/block/blk-cgroup-rwstat.c
index 3304e841df7c..a55fb0c53558 100644
--- a/block/blk-cgroup-rwstat.c
+++ b/block/blk-cgroup-rwstat.c
@@ -9,25 +9,19 @@ int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
{
int i, ret;
- for (i = 0; i < BLKG_RWSTAT_NR; i++) {
- ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
- if (ret) {
- while (--i >= 0)
- percpu_counter_destroy(&rwstat->cpu_cnt[i]);
- return ret;
- }
+ ret = percpu_counter_init_many(rwstat->cpu_cnt, 0, gfp, BLKG_RWSTAT_NR);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
atomic64_set(&rwstat->aux_cnt[i], 0);
- }
return 0;
}
EXPORT_SYMBOL_GPL(blkg_rwstat_init);
void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
{
- int i;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++)
- percpu_counter_destroy(&rwstat->cpu_cnt[i]);
+ percpu_counter_destroy_many(rwstat->cpu_cnt, BLKG_RWSTAT_NR);
}
EXPORT_SYMBOL_GPL(blkg_rwstat_exit);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index bdbb557feb5a..4b1a35ab0ea4 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -218,8 +218,7 @@ static void blkg_async_bio_workfn(struct work_struct *work)
/* as long as there are pending bios, @blkg can't go away */
spin_lock(&blkg->async_bio_lock);
- bio_list_merge(&bios, &blkg->async_bios);
- bio_list_init(&blkg->async_bios);
+ bio_list_merge_init(&bios, &blkg->async_bios);
spin_unlock(&blkg->async_bio_lock);
/* start plug only when bio_list contains at least 2 bios */
@@ -1409,6 +1408,12 @@ static int blkcg_css_online(struct cgroup_subsys_state *css)
return 0;
}
+void blkg_init_queue(struct request_queue *q)
+{
+ INIT_LIST_HEAD(&q->blkg_list);
+ mutex_init(&q->blkcg_mutex);
+}
+
int blkcg_init_disk(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
@@ -1416,9 +1421,6 @@ int blkcg_init_disk(struct gendisk *disk)
bool preloaded;
int ret;
- INIT_LIST_HEAD(&q->blkg_list);
- mutex_init(&q->blkcg_mutex);
-
new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
if (!new_blkg)
return -ENOMEM;
@@ -1441,14 +1443,8 @@ int blkcg_init_disk(struct gendisk *disk)
if (ret)
goto err_destroy_all;
- ret = blk_throtl_init(disk);
- if (ret)
- goto err_ioprio_exit;
-
return 0;
-err_ioprio_exit:
- blk_ioprio_exit(disk);
err_destroy_all:
blkg_destroy_all(disk);
return ret;
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 78b74106bf10..90b3959d88cf 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -189,6 +189,7 @@ struct blkcg_policy {
extern struct blkcg blkcg_root;
extern bool blkcg_debug_stats;
+void blkg_init_queue(struct request_queue *q);
int blkcg_init_disk(struct gendisk *disk);
void blkcg_exit_disk(struct gendisk *disk);
@@ -482,6 +483,7 @@ struct blkcg {
};
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
+static inline void blkg_init_queue(struct request_queue *q) { }
static inline int blkcg_init_disk(struct gendisk *disk) { return 0; }
static inline void blkcg_exit_disk(struct gendisk *disk) { }
static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
diff --git a/block/blk-core.c b/block/blk-core.c
index a16b5abdbbf5..ea44b13af9af 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -442,6 +442,8 @@ struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id)
init_waitqueue_head(&q->mq_freeze_wq);
mutex_init(&q->mq_freeze_lock);
+ blkg_init_queue(q);
+
/*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
* See blk_register_queue() for details.
@@ -494,7 +496,8 @@ __setup("fail_make_request=", setup_fail_make_request);
bool should_fail_request(struct block_device *part, unsigned int bytes)
{
- return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
+ return bdev_test_flag(part, BD_MAKE_IT_FAIL) &&
+ should_fail(&fail_make_request, bytes);
}
static int __init fail_make_request_debugfs(void)
@@ -514,10 +517,11 @@ static inline void bio_check_ro(struct bio *bio)
if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
return;
- if (bio->bi_bdev->bd_ro_warned)
+ if (bdev_test_flag(bio->bi_bdev, BD_RO_WARNED))
return;
- bio->bi_bdev->bd_ro_warned = true;
+ bdev_set_flag(bio->bi_bdev, BD_RO_WARNED);
+
/*
* Use ioctl to set underlying disk of raid/dm to read-only
* will trigger this.
@@ -589,8 +593,7 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q,
return BLK_STS_NOTSUPP;
/* The bio sector must point to the start of a sequential zone */
- if (!bdev_is_zone_start(bio->bi_bdev, bio->bi_iter.bi_sector) ||
- !bio_zone_is_seq(bio))
+ if (!bdev_is_zone_start(bio->bi_bdev, bio->bi_iter.bi_sector))
return BLK_STS_IOERR;
/*
@@ -602,7 +605,7 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q,
return BLK_STS_IOERR;
/* Make sure the BIO is small enough and will not get split */
- if (nr_sectors > q->limits.max_zone_append_sectors)
+ if (nr_sectors > queue_max_zone_append_sectors(q))
return BLK_STS_IOERR;
bio->bi_opf |= REQ_NOMERGE;
@@ -615,7 +618,7 @@ static void __submit_bio(struct bio *bio)
if (unlikely(!blk_crypto_bio_prep(&bio)))
return;
- if (!bio->bi_bdev->bd_has_submit_bio) {
+ if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) {
blk_mq_submit_bio(bio);
} else if (likely(bio_queue_enter(bio) == 0)) {
struct gendisk *disk = bio->bi_bdev->bd_disk;
@@ -647,11 +650,13 @@ static void __submit_bio(struct bio *bio)
static void __submit_bio_noacct(struct bio *bio)
{
struct bio_list bio_list_on_stack[2];
+ struct blk_plug plug;
BUG_ON(bio->bi_next);
bio_list_init(&bio_list_on_stack[0]);
current->bio_list = bio_list_on_stack;
+ blk_start_plug(&plug);
do {
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
@@ -685,19 +690,23 @@ static void __submit_bio_noacct(struct bio *bio)
bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
} while ((bio = bio_list_pop(&bio_list_on_stack[0])));
+ blk_finish_plug(&plug);
current->bio_list = NULL;
}
static void __submit_bio_noacct_mq(struct bio *bio)
{
struct bio_list bio_list[2] = { };
+ struct blk_plug plug;
current->bio_list = bio_list;
+ blk_start_plug(&plug);
do {
__submit_bio(bio);
} while ((bio = bio_list_pop(&bio_list[0])));
+ blk_finish_plug(&plug);
current->bio_list = NULL;
}
@@ -723,7 +732,7 @@ void submit_bio_noacct_nocheck(struct bio *bio)
*/
if (current->bio_list)
bio_list_add(&current->bio_list[0], bio);
- else if (!bio->bi_bdev->bd_has_submit_bio)
+ else if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO))
__submit_bio_noacct_mq(bio);
else
__submit_bio_noacct(bio);
@@ -759,7 +768,8 @@ void submit_bio_noacct(struct bio *bio)
if (!bio_flagged(bio, BIO_REMAPPED)) {
if (unlikely(bio_check_eod(bio)))
goto end_io;
- if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
+ if (bdev_is_partition(bdev) &&
+ unlikely(blk_partition_remap(bio)))
goto end_io;
}
@@ -908,12 +918,6 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
return 0;
- /*
- * As the requests that require a zone lock are not plugged in the
- * first place, directly accessing the plug instead of using
- * blk_mq_plug() should not have any consequences during flushing for
- * zoned devices.
- */
blk_flush_plug(current->plug, false);
/*
@@ -985,11 +989,12 @@ void update_io_ticks(struct block_device *part, unsigned long now, bool end)
unsigned long stamp;
again:
stamp = READ_ONCE(part->bd_stamp);
- if (unlikely(time_after(now, stamp))) {
- if (likely(try_cmpxchg(&part->bd_stamp, &stamp, now)))
- __part_stat_add(part, io_ticks, end ? now - stamp : 1);
- }
- if (part->bd_partno) {
+ if (unlikely(time_after(now, stamp)) &&
+ likely(try_cmpxchg(&part->bd_stamp, &stamp, now)) &&
+ (end || part_in_flight(part)))
+ __part_stat_add(part, io_ticks, now - stamp);
+
+ if (bdev_is_partition(part)) {
part = bdev_whole(part);
goto again;
}
@@ -1195,6 +1200,7 @@ void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
if (unlikely(!rq_list_empty(plug->cached_rq)))
blk_mq_free_plug_rqs(plug);
+ plug->cur_ktime = 0;
current->flags &= ~PF_BLOCK_TS;
}
diff --git a/block/blk-flush.c b/block/blk-flush.c
index b0f314f4bc14..c17cf8ed8113 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -130,6 +130,8 @@ static void blk_flush_restore_request(struct request *rq)
* original @rq->bio. Restore it.
*/
rq->bio = rq->biotail;
+ if (rq->bio)
+ rq->__sector = rq->bio->bi_iter.bi_sector;
/* make @rq a normal request */
rq->rq_flags &= ~RQF_FLUSH_SEQ;
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index 9a85bfbbc45a..690ca99dfaca 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -1347,7 +1347,7 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
{
struct ioc *ioc = iocg->ioc;
struct blkcg_gq *blkg = iocg_to_blkg(iocg);
- u64 tdelta, delay, new_delay;
+ u64 tdelta, delay, new_delay, shift;
s64 vover, vover_pct;
u32 hwa;
@@ -1362,8 +1362,9 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
/* calculate the current delay in effect - 1/2 every second */
tdelta = now->now - iocg->delay_at;
- if (iocg->delay)
- delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
+ shift = div64_u64(tdelta, USEC_PER_SEC);
+ if (iocg->delay && shift < BITS_PER_LONG)
+ delay = iocg->delay >> shift;
else
delay = 0;
@@ -1438,8 +1439,11 @@ static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
lockdep_assert_held(&iocg->ioc->lock);
lockdep_assert_held(&iocg->waitq.lock);
- /* make sure that nobody messed with @iocg */
- WARN_ON_ONCE(list_empty(&iocg->active_list));
+ /*
+ * make sure that nobody messed with @iocg. Check iocg->pd.online
+ * to avoid warn when removing blkcg or disk.
+ */
+ WARN_ON_ONCE(list_empty(&iocg->active_list) && iocg->pd.online);
WARN_ON_ONCE(iocg->inuse > 1);
iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index a6954eafb8c8..442da9dad042 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -35,51 +35,39 @@ static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector)
return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT;
}
-int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, struct bio **biop)
+struct bio *blk_alloc_discard_bio(struct block_device *bdev,
+ sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask)
{
- struct bio *bio = *biop;
- sector_t bs_mask;
-
- if (bdev_read_only(bdev))
- return -EPERM;
- if (!bdev_max_discard_sectors(bdev))
- return -EOPNOTSUPP;
-
- /* In case the discard granularity isn't set by buggy device driver */
- if (WARN_ON_ONCE(!bdev_discard_granularity(bdev))) {
- pr_err_ratelimited("%pg: Error: discard_granularity is 0.\n",
- bdev);
- return -EOPNOTSUPP;
- }
-
- bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
- if ((sector | nr_sects) & bs_mask)
- return -EINVAL;
+ sector_t bio_sects = min(*nr_sects, bio_discard_limit(bdev, *sector));
+ struct bio *bio;
- if (!nr_sects)
- return -EINVAL;
+ if (!bio_sects)
+ return NULL;
- while (nr_sects) {
- sector_t req_sects =
- min(nr_sects, bio_discard_limit(bdev, sector));
+ bio = bio_alloc(bdev, 0, REQ_OP_DISCARD, gfp_mask);
+ if (!bio)
+ return NULL;
+ bio->bi_iter.bi_sector = *sector;
+ bio->bi_iter.bi_size = bio_sects << SECTOR_SHIFT;
+ *sector += bio_sects;
+ *nr_sects -= bio_sects;
+ /*
+ * We can loop for a long time in here if someone does full device
+ * discards (like mkfs). Be nice and allow us to schedule out to avoid
+ * softlocking if preempt is disabled.
+ */
+ cond_resched();
+ return bio;
+}
- bio = blk_next_bio(bio, bdev, 0, REQ_OP_DISCARD, gfp_mask);
- bio->bi_iter.bi_sector = sector;
- bio->bi_iter.bi_size = req_sects << 9;
- sector += req_sects;
- nr_sects -= req_sects;
-
- /*
- * We can loop for a long time in here, if someone does
- * full device discards (like mkfs). Be nice and allow
- * us to schedule out to avoid softlocking if preempt
- * is disabled.
- */
- cond_resched();
- }
+int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, struct bio **biop)
+{
+ struct bio *bio;
- *biop = bio;
+ while ((bio = blk_alloc_discard_bio(bdev, &sector, &nr_sects,
+ gfp_mask)))
+ *biop = bio_chain_and_submit(*biop, bio);
return 0;
}
EXPORT_SYMBOL(__blkdev_issue_discard);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 2a06fd33039d..8534c35e0497 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -377,6 +377,7 @@ struct bio *__bio_split_to_limits(struct bio *bio,
blkcg_bio_issue_init(split);
bio_chain(split, bio);
trace_block_split(split, bio->bi_iter.bi_sector);
+ WARN_ON_ONCE(bio_zone_write_plugging(bio));
submit_bio_noacct(bio);
return split;
}
@@ -726,7 +727,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
* which can be mixed are set in each bio and mark @rq as mixed
* merged.
*/
-void blk_rq_set_mixed_merge(struct request *rq)
+static void blk_rq_set_mixed_merge(struct request *rq)
{
blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
struct bio *bio;
@@ -779,6 +780,8 @@ static void blk_account_io_merge_request(struct request *req)
if (blk_do_io_stat(req)) {
part_stat_lock();
part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
+ part_stat_local_dec(req->part,
+ in_flight[op_is_write(req_op(req))]);
part_stat_unlock();
}
}
@@ -972,13 +975,7 @@ static void blk_account_io_merge_bio(struct request *req)
part_stat_unlock();
}
-enum bio_merge_status {
- BIO_MERGE_OK,
- BIO_MERGE_NONE,
- BIO_MERGE_FAILED,
-};
-
-static enum bio_merge_status bio_attempt_back_merge(struct request *req,
+enum bio_merge_status bio_attempt_back_merge(struct request *req,
struct bio *bio, unsigned int nr_segs)
{
const blk_opf_t ff = bio_failfast(bio);
@@ -994,6 +991,9 @@ static enum bio_merge_status bio_attempt_back_merge(struct request *req,
blk_update_mixed_merge(req, bio, false);
+ if (req->rq_flags & RQF_ZONE_WRITE_PLUGGING)
+ blk_zone_write_plug_bio_merged(bio);
+
req->biotail->bi_next = bio;
req->biotail = bio;
req->__data_len += bio->bi_iter.bi_size;
@@ -1009,6 +1009,14 @@ static enum bio_merge_status bio_attempt_front_merge(struct request *req,
{
const blk_opf_t ff = bio_failfast(bio);
+ /*
+ * A front merge for writes to sequential zones of a zoned block device
+ * can happen only if the user submitted writes out of order. Do not
+ * merge such write to let it fail.
+ */
+ if (req->rq_flags & RQF_ZONE_WRITE_PLUGGING)
+ return BIO_MERGE_FAILED;
+
if (!ll_front_merge_fn(req, bio, nr_segs))
return BIO_MERGE_FAILED;
@@ -1107,10 +1115,9 @@ static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs)
{
- struct blk_plug *plug;
+ struct blk_plug *plug = current->plug;
struct request *rq;
- plug = blk_mq_plug(bio);
if (!plug || rq_list_empty(plug->mq_list))
return false;
diff --git a/block/blk-mq-debugfs-zoned.c b/block/blk-mq-debugfs-zoned.c
deleted file mode 100644
index a77b099c34b7..000000000000
--- a/block/blk-mq-debugfs-zoned.c
+++ /dev/null
@@ -1,22 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2017 Western Digital Corporation or its affiliates.
- */
-
-#include <linux/blkdev.h>
-#include "blk-mq-debugfs.h"
-
-int queue_zone_wlock_show(void *data, struct seq_file *m)
-{
- struct request_queue *q = data;
- unsigned int i;
-
- if (!q->disk->seq_zones_wlock)
- return 0;
-
- for (i = 0; i < q->disk->nr_zones; i++)
- if (test_bit(i, q->disk->seq_zones_wlock))
- seq_printf(m, "%u\n", i);
-
- return 0;
-}
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 94668e72ab09..770c0c2b72fa 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -160,7 +160,7 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
{ "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
{ "pm_only", 0600, queue_pm_only_show, NULL },
{ "state", 0600, queue_state_show, queue_state_write },
- { "zone_wlock", 0400, queue_zone_wlock_show, NULL },
+ { "zone_wplugs", 0400, queue_zone_wplugs_show, NULL },
{ },
};
@@ -256,7 +256,6 @@ static const char *const rqf_name[] = {
RQF_NAME(HASHED),
RQF_NAME(STATS),
RQF_NAME(SPECIAL_PAYLOAD),
- RQF_NAME(ZONE_WRITE_LOCKED),
RQF_NAME(TIMED_OUT),
RQF_NAME(RESV),
};
diff --git a/block/blk-mq-debugfs.h b/block/blk-mq-debugfs.h
index 9c7d4b6117d4..c80e453e3014 100644
--- a/block/blk-mq-debugfs.h
+++ b/block/blk-mq-debugfs.h
@@ -83,10 +83,10 @@ static inline void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
}
#endif
-#ifdef CONFIG_BLK_DEBUG_FS_ZONED
-int queue_zone_wlock_show(void *data, struct seq_file *m);
+#if defined(CONFIG_BLK_DEV_ZONED) && defined(CONFIG_BLK_DEBUG_FS)
+int queue_zone_wplugs_show(void *data, struct seq_file *m);
#else
-static inline int queue_zone_wlock_show(void *data, struct seq_file *m)
+static inline int queue_zone_wplugs_show(void *data, struct seq_file *m)
{
return 0;
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 555ada922cf0..fc364a226e95 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -28,6 +28,7 @@
#include <linux/prefetch.h>
#include <linux/blk-crypto.h>
#include <linux/part_stat.h>
+#include <linux/sched/isolation.h>
#include <trace/events/block.h>
@@ -92,7 +93,7 @@ static bool blk_mq_check_inflight(struct request *rq, void *priv)
struct mq_inflight *mi = priv;
if (rq->part && blk_do_io_stat(rq) &&
- (!mi->part->bd_partno || rq->part == mi->part) &&
+ (!bdev_is_partition(mi->part) || rq->part == mi->part) &&
blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
mi->inflight[rq_data_dir(rq)]++;
@@ -690,6 +691,8 @@ static void blk_mq_finish_request(struct request *rq)
{
struct request_queue *q = rq->q;
+ blk_zone_finish_request(rq);
+
if (rq->rq_flags & RQF_USE_SCHED) {
q->elevator->type->ops.finish_request(rq);
/*
@@ -761,36 +764,6 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
}
EXPORT_SYMBOL(blk_dump_rq_flags);
-static void req_bio_endio(struct request *rq, struct bio *bio,
- unsigned int nbytes, blk_status_t error)
-{
- if (unlikely(error)) {
- bio->bi_status = error;
- } else if (req_op(rq) == REQ_OP_ZONE_APPEND) {
- /*
- * Partial zone append completions cannot be supported as the
- * BIO fragments may end up not being written sequentially.
- * For such case, force the completed nbytes to be equal to
- * the BIO size so that bio_advance() sets the BIO remaining
- * size to 0 and we end up calling bio_endio() before returning.
- */
- if (bio->bi_iter.bi_size != nbytes) {
- bio->bi_status = BLK_STS_IOERR;
- nbytes = bio->bi_iter.bi_size;
- } else {
- bio->bi_iter.bi_sector = rq->__sector;
- }
- }
-
- bio_advance(bio, nbytes);
-
- if (unlikely(rq->rq_flags & RQF_QUIET))
- bio_set_flag(bio, BIO_QUIET);
- /* don't actually finish bio if it's part of flush sequence */
- if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
- bio_endio(bio);
-}
-
static void blk_account_io_completion(struct request *req, unsigned int bytes)
{
if (req->part && blk_do_io_stat(req)) {
@@ -850,8 +823,7 @@ static void blk_complete_request(struct request *req)
/* Completion has already been traced */
bio_clear_flag(bio, BIO_TRACE_COMPLETION);
- if (req_op(req) == REQ_OP_ZONE_APPEND)
- bio->bi_iter.bi_sector = req->__sector;
+ blk_zone_update_request_bio(req, bio);
if (!is_flush)
bio_endio(bio);
@@ -894,6 +866,8 @@ static void blk_complete_request(struct request *req)
bool blk_update_request(struct request *req, blk_status_t error,
unsigned int nr_bytes)
{
+ bool is_flush = req->rq_flags & RQF_FLUSH_SEQ;
+ bool quiet = req->rq_flags & RQF_QUIET;
int total_bytes;
trace_block_rq_complete(req, error, nr_bytes);
@@ -914,9 +888,8 @@ bool blk_update_request(struct request *req, blk_status_t error,
if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req))
__blk_crypto_rq_put_keyslot(req);
- if (unlikely(error && !blk_rq_is_passthrough(req) &&
- !(req->rq_flags & RQF_QUIET)) &&
- !test_bit(GD_DEAD, &req->q->disk->state)) {
+ if (unlikely(error && !blk_rq_is_passthrough(req) && !quiet) &&
+ !test_bit(GD_DEAD, &req->q->disk->state)) {
blk_print_req_error(req, error);
trace_block_rq_error(req, error, nr_bytes);
}
@@ -928,12 +901,33 @@ bool blk_update_request(struct request *req, blk_status_t error,
struct bio *bio = req->bio;
unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
- if (bio_bytes == bio->bi_iter.bi_size)
+ if (unlikely(error))
+ bio->bi_status = error;
+
+ if (bio_bytes == bio->bi_iter.bi_size) {
req->bio = bio->bi_next;
+ } else if (bio_is_zone_append(bio) && error == BLK_STS_OK) {
+ /*
+ * Partial zone append completions cannot be supported
+ * as the BIO fragments may end up not being written
+ * sequentially.
+ */
+ bio->bi_status = BLK_STS_IOERR;
+ }
/* Completion has already been traced */
bio_clear_flag(bio, BIO_TRACE_COMPLETION);
- req_bio_endio(req, bio, bio_bytes, error);
+ if (unlikely(quiet))
+ bio_set_flag(bio, BIO_QUIET);
+
+ bio_advance(bio, bio_bytes);
+
+ /* Don't actually finish bio if it's part of flush sequence */
+ if (!bio->bi_iter.bi_size) {
+ blk_zone_update_request_bio(req, bio);
+ if (!is_flush)
+ bio_endio(bio);
+ }
total_bytes += bio_bytes;
nr_bytes -= bio_bytes;
@@ -1002,6 +996,8 @@ static inline void blk_account_io_done(struct request *req, u64 now)
update_io_ticks(req->part, jiffies, true);
part_stat_inc(req->part, ios[sgrp]);
part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
+ part_stat_local_dec(req->part,
+ in_flight[op_is_write(req_op(req))]);
part_stat_unlock();
}
}
@@ -1024,6 +1020,8 @@ static inline void blk_account_io_start(struct request *req)
part_stat_lock();
update_io_ticks(req->part, jiffies, false);
+ part_stat_local_inc(req->part,
+ in_flight[op_is_write(req_op(req))]);
part_stat_unlock();
}
}
@@ -1335,11 +1333,6 @@ void blk_execute_rq_nowait(struct request *rq, bool at_head)
blk_account_io_start(rq);
- /*
- * As plugging can be enabled for passthrough requests on a zoned
- * device, directly accessing the plug instead of using blk_mq_plug()
- * should not have any consequences.
- */
if (current->plug && !at_head) {
blk_add_rq_to_plug(current->plug, rq);
return;
@@ -1926,19 +1919,6 @@ static void blk_mq_handle_dev_resource(struct request *rq,
__blk_mq_requeue_request(rq);
}
-static void blk_mq_handle_zone_resource(struct request *rq,
- struct list_head *zone_list)
-{
- /*
- * If we end up here it is because we cannot dispatch a request to a
- * specific zone due to LLD level zone-write locking or other zone
- * related resource not being available. In this case, set the request
- * aside in zone_list for retrying it later.
- */
- list_add(&rq->queuelist, zone_list);
- __blk_mq_requeue_request(rq);
-}
-
enum prep_dispatch {
PREP_DISPATCH_OK,
PREP_DISPATCH_NO_TAG,
@@ -2024,7 +2004,6 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
struct request *rq;
int queued;
blk_status_t ret = BLK_STS_OK;
- LIST_HEAD(zone_list);
bool needs_resource = false;
if (list_empty(list))
@@ -2066,23 +2045,11 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
case BLK_STS_DEV_RESOURCE:
blk_mq_handle_dev_resource(rq, list);
goto out;
- case BLK_STS_ZONE_RESOURCE:
- /*
- * Move the request to zone_list and keep going through
- * the dispatch list to find more requests the drive can
- * accept.
- */
- blk_mq_handle_zone_resource(rq, &zone_list);
- needs_resource = true;
- break;
default:
blk_mq_end_request(rq, ret);
}
} while (!list_empty(list));
out:
- if (!list_empty(&zone_list))
- list_splice_tail_init(&zone_list, list);
-
/* If we didn't flush the entire list, we could have told the driver
* there was more coming, but that turned out to be a lie.
*/
@@ -2169,6 +2136,15 @@ static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
}
/*
+ * ->next_cpu is always calculated from hctx->cpumask, so simply use
+ * it for speeding up the check
+ */
+static bool blk_mq_hctx_empty_cpumask(struct blk_mq_hw_ctx *hctx)
+{
+ return hctx->next_cpu >= nr_cpu_ids;
+}
+
+/*
* It'd be great if the workqueue API had a way to pass
* in a mask and had some smarts for more clever placement.
* For now we just round-robin here, switching for every
@@ -2179,7 +2155,8 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
bool tried = false;
int next_cpu = hctx->next_cpu;
- if (hctx->queue->nr_hw_queues == 1)
+ /* Switch to unbound if no allowable CPUs in this hctx */
+ if (hctx->queue->nr_hw_queues == 1 || blk_mq_hctx_empty_cpumask(hctx))
return WORK_CPU_UNBOUND;
if (--hctx->next_cpu_batch <= 0) {
@@ -2953,22 +2930,37 @@ static void blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug,
void blk_mq_submit_bio(struct bio *bio)
{
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
- struct blk_plug *plug = blk_mq_plug(bio);
+ struct blk_plug *plug = current->plug;
const int is_sync = op_is_sync(bio->bi_opf);
struct blk_mq_hw_ctx *hctx;
unsigned int nr_segs = 1;
struct request *rq;
blk_status_t ret;
+ /*
+ * If the plug has a cached request for this queue, try to use it.
+ */
+ rq = blk_mq_peek_cached_request(plug, q, bio->bi_opf);
+
+ /*
+ * A BIO that was released from a zone write plug has already been
+ * through the preparation in this function, already holds a reference
+ * on the queue usage counter, and is the only write BIO in-flight for
+ * the target zone. Go straight to preparing a request for it.
+ */
+ if (bio_zone_write_plugging(bio)) {
+ nr_segs = bio->__bi_nr_segments;
+ if (rq)
+ blk_queue_exit(q);
+ goto new_request;
+ }
+
bio = blk_queue_bounce(bio, q);
/*
- * If the plug has a cached request for this queue, try use it.
- *
* The cached request already holds a q_usage_counter reference and we
* don't have to acquire a new one if we use it.
*/
- rq = blk_mq_peek_cached_request(plug, q, bio->bi_opf);
if (!rq) {
if (unlikely(bio_queue_enter(bio)))
return;
@@ -2985,6 +2977,10 @@ void blk_mq_submit_bio(struct bio *bio)
if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
goto queue_exit;
+ if (blk_queue_is_zoned(q) && blk_zone_plug_bio(bio, nr_segs))
+ goto queue_exit;
+
+new_request:
if (!rq) {
rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
if (unlikely(!rq))
@@ -3007,6 +3003,9 @@ void blk_mq_submit_bio(struct bio *bio)
return;
}
+ if (bio_zone_write_plugging(bio))
+ blk_zone_write_plug_init_request(rq);
+
if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq))
return;
@@ -3488,14 +3487,30 @@ static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
return data.has_rq;
}
-static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
- struct blk_mq_hw_ctx *hctx)
+static bool blk_mq_hctx_has_online_cpu(struct blk_mq_hw_ctx *hctx,
+ unsigned int this_cpu)
{
- if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu)
- return false;
- if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
- return false;
- return true;
+ enum hctx_type type = hctx->type;
+ int cpu;
+
+ /*
+ * hctx->cpumask has to rule out isolated CPUs, but userspace still
+ * might submit IOs on these isolated CPUs, so use the queue map to
+ * check if all CPUs mapped to this hctx are offline
+ */
+ for_each_online_cpu(cpu) {
+ struct blk_mq_hw_ctx *h = blk_mq_map_queue_type(hctx->queue,
+ type, cpu);
+
+ if (h != hctx)
+ continue;
+
+ /* this hctx has at least one online CPU */
+ if (this_cpu != cpu)
+ return true;
+ }
+
+ return false;
}
static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
@@ -3503,8 +3518,7 @@ static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
struct blk_mq_hw_ctx, cpuhp_online);
- if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
- !blk_mq_last_cpu_in_hctx(cpu, hctx))
+ if (blk_mq_hctx_has_online_cpu(hctx, cpu))
return 0;
/*
@@ -3912,6 +3926,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
}
queue_for_each_hw_ctx(q, hctx, i) {
+ int cpu;
+
/*
* If no software queues are mapped to this hardware queue,
* disable it and free the request entries.
@@ -3939,6 +3955,15 @@ static void blk_mq_map_swqueue(struct request_queue *q)
sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
/*
+ * Rule out isolated CPUs from hctx->cpumask to avoid
+ * running block kworker on isolated CPUs
+ */
+ for_each_cpu(cpu, hctx->cpumask) {
+ if (cpu_is_isolated(cpu))
+ cpumask_clear_cpu(cpu, hctx->cpumask);
+ }
+
+ /*
* Initialize batch roundrobin counts
*/
hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index f75a9ecfebde..260beea8e332 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -365,37 +365,6 @@ static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
qmap->mq_map[cpu] = 0;
}
-/*
- * blk_mq_plug() - Get caller context plug
- * @bio : the bio being submitted by the caller context
- *
- * Plugging, by design, may delay the insertion of BIOs into the elevator in
- * order to increase BIO merging opportunities. This however can cause BIO
- * insertion order to change from the order in which submit_bio() is being
- * executed in the case of multiple contexts concurrently issuing BIOs to a
- * device, even if these context are synchronized to tightly control BIO issuing
- * order. While this is not a problem with regular block devices, this ordering
- * change can cause write BIO failures with zoned block devices as these
- * require sequential write patterns to zones. Prevent this from happening by
- * ignoring the plug state of a BIO issuing context if it is for a zoned block
- * device and the BIO to plug is a write operation.
- *
- * Return current->plug if the bio can be plugged and NULL otherwise
- */
-static inline struct blk_plug *blk_mq_plug( struct bio *bio)
-{
- /* Zoned block device write operation case: do not plug the BIO */
- if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
- bdev_op_is_zoned_write(bio->bi_bdev, bio_op(bio)))
- return NULL;
-
- /*
- * For regular block devices or read operations, use the context plug
- * which may be NULL if blk_start_plug() was not executed.
- */
- return current->plug;
-}
-
/* Free all requests on the list */
static inline void blk_mq_free_requests(struct list_head *list)
{
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 3c7d8d638ab5..a7fe8e90240a 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -146,8 +146,7 @@ static int blk_validate_limits(struct queue_limits *lim)
max_hw_sectors = min_not_zero(lim->max_hw_sectors,
lim->max_dev_sectors);
if (lim->max_user_sectors) {
- if (lim->max_user_sectors > max_hw_sectors ||
- lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
+ if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
return -EINVAL;
lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
} else {
@@ -183,16 +182,15 @@ static int blk_validate_limits(struct queue_limits *lim)
return -EINVAL;
/*
- * Devices that require a virtual boundary do not support scatter/gather
- * I/O natively, but instead require a descriptor list entry for each
- * page (which might not be identical to the Linux PAGE_SIZE). Because
- * of that they are not limited by our notion of "segment size".
+ * Stacking device may have both virtual boundary and max segment
+ * size limit, so allow this setting now, and long-term the two
+ * might need to move out of stacking limits since we have immutable
+ * bvec and lower layer bio splitting is supposed to handle the two
+ * correctly.
*/
if (lim->virt_boundary_mask) {
- if (WARN_ON_ONCE(lim->max_segment_size &&
- lim->max_segment_size != UINT_MAX))
- return -EINVAL;
- lim->max_segment_size = UINT_MAX;
+ if (!lim->max_segment_size)
+ lim->max_segment_size = UINT_MAX;
} else {
/*
* The maximum segment size has an odd historic 64k default that
@@ -285,72 +283,6 @@ int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
EXPORT_SYMBOL_GPL(queue_limits_set);
/**
- * blk_queue_bounce_limit - set bounce buffer limit for queue
- * @q: the request queue for the device
- * @bounce: bounce limit to enforce
- *
- * Description:
- * Force bouncing for ISA DMA ranges or highmem.
- *
- * DEPRECATED, don't use in new code.
- **/
-void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
-{
- q->limits.bounce = bounce;
-}
-EXPORT_SYMBOL(blk_queue_bounce_limit);
-
-/**
- * blk_queue_max_hw_sectors - set max sectors for a request for this queue
- * @q: the request queue for the device
- * @max_hw_sectors: max hardware sectors in the usual 512b unit
- *
- * Description:
- * Enables a low level driver to set a hard upper limit,
- * max_hw_sectors, on the size of requests. max_hw_sectors is set by
- * the device driver based upon the capabilities of the I/O
- * controller.
- *
- * max_dev_sectors is a hard limit imposed by the storage device for
- * READ/WRITE requests. It is set by the disk driver.
- *
- * max_sectors is a soft limit imposed by the block layer for
- * filesystem type requests. This value can be overridden on a
- * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
- * The soft limit can not exceed max_hw_sectors.
- **/
-void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
-{
- struct queue_limits *limits = &q->limits;
- unsigned int max_sectors;
-
- if ((max_hw_sectors << 9) < PAGE_SIZE) {
- max_hw_sectors = 1 << (PAGE_SHIFT - 9);
- pr_info("%s: set to minimum %u\n", __func__, max_hw_sectors);
- }
-
- max_hw_sectors = round_down(max_hw_sectors,
- limits->logical_block_size >> SECTOR_SHIFT);
- limits->max_hw_sectors = max_hw_sectors;
-
- max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
-
- if (limits->max_user_sectors)
- max_sectors = min(max_sectors, limits->max_user_sectors);
- else
- max_sectors = min(max_sectors, BLK_DEF_MAX_SECTORS_CAP);
-
- max_sectors = round_down(max_sectors,
- limits->logical_block_size >> SECTOR_SHIFT);
- limits->max_sectors = max_sectors;
-
- if (!q->disk)
- return;
- q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9);
-}
-EXPORT_SYMBOL(blk_queue_max_hw_sectors);
-
-/**
* blk_queue_chunk_sectors - set size of the chunk for this queue
* @q: the request queue for the device
* @chunk_sectors: chunk sectors in the usual 512b unit
@@ -413,89 +345,38 @@ EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
* blk_queue_max_zone_append_sectors - set max sectors for a single zone append
* @q: the request queue for the device
* @max_zone_append_sectors: maximum number of sectors to write per command
+ *
+ * Sets the maximum number of sectors allowed for zone append commands. If
+ * Specifying 0 for @max_zone_append_sectors indicates that the queue does
+ * not natively support zone append operations and that the block layer must
+ * emulate these operations using regular writes.
**/
void blk_queue_max_zone_append_sectors(struct request_queue *q,
unsigned int max_zone_append_sectors)
{
- unsigned int max_sectors;
+ unsigned int max_sectors = 0;
if (WARN_ON(!blk_queue_is_zoned(q)))
return;
- max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors);
- max_sectors = min(q->limits.chunk_sectors, max_sectors);
+ if (max_zone_append_sectors) {
+ max_sectors = min(q->limits.max_hw_sectors,
+ max_zone_append_sectors);
+ max_sectors = min(q->limits.chunk_sectors, max_sectors);
- /*
- * Signal eventual driver bugs resulting in the max_zone_append sectors limit
- * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set,
- * or the max_hw_sectors limit not set.
- */
- WARN_ON(!max_sectors);
+ /*
+ * Signal eventual driver bugs resulting in the max_zone_append
+ * sectors limit being 0 due to the chunk_sectors limit (zone
+ * size) not set or the max_hw_sectors limit not set.
+ */
+ WARN_ON_ONCE(!max_sectors);
+ }
q->limits.max_zone_append_sectors = max_sectors;
}
EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
/**
- * blk_queue_max_segments - set max hw segments for a request for this queue
- * @q: the request queue for the device
- * @max_segments: max number of segments
- *
- * Description:
- * Enables a low level driver to set an upper limit on the number of
- * hw data segments in a request.
- **/
-void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
-{
- if (!max_segments) {
- max_segments = 1;
- pr_info("%s: set to minimum %u\n", __func__, max_segments);
- }
-
- q->limits.max_segments = max_segments;
-}
-EXPORT_SYMBOL(blk_queue_max_segments);
-
-/**
- * blk_queue_max_discard_segments - set max segments for discard requests
- * @q: the request queue for the device
- * @max_segments: max number of segments
- *
- * Description:
- * Enables a low level driver to set an upper limit on the number of
- * segments in a discard request.
- **/
-void blk_queue_max_discard_segments(struct request_queue *q,
- unsigned short max_segments)
-{
- q->limits.max_discard_segments = max_segments;
-}
-EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
-
-/**
- * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
- * @q: the request queue for the device
- * @max_size: max size of segment in bytes
- *
- * Description:
- * Enables a low level driver to set an upper limit on the size of a
- * coalesced segment
- **/
-void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
-{
- if (max_size < PAGE_SIZE) {
- max_size = PAGE_SIZE;
- pr_info("%s: set to minimum %u\n", __func__, max_size);
- }
-
- /* see blk_queue_virt_boundary() for the explanation */
- WARN_ON_ONCE(q->limits.virt_boundary_mask);
-
- q->limits.max_segment_size = max_size;
-}
-EXPORT_SYMBOL(blk_queue_max_segment_size);
-
-/**
* blk_queue_logical_block_size - set logical block size for the queue
* @q: the request queue for the device
* @size: the logical block size, in bytes
@@ -661,29 +542,6 @@ void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
}
EXPORT_SYMBOL(blk_limits_io_opt);
-/**
- * blk_queue_io_opt - set optimal request size for the queue
- * @q: the request queue for the device
- * @opt: optimal request size in bytes
- *
- * Description:
- * Storage devices may report an optimal I/O size, which is the
- * device's preferred unit for sustained I/O. This is rarely reported
- * for disk drives. For RAID arrays it is usually the stripe width or
- * the internal track size. A properly aligned multiple of
- * optimal_io_size is the preferred request size for workloads where
- * sustained throughput is desired.
- */
-void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
-{
- blk_limits_io_opt(&q->limits, opt);
- if (!q->disk)
- return;
- q->disk->bdi->ra_pages =
- max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
-}
-EXPORT_SYMBOL(blk_queue_io_opt);
-
static int queue_limit_alignment_offset(const struct queue_limits *lim,
sector_t sector)
{
@@ -757,8 +615,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
b->max_write_zeroes_sectors);
- t->max_zone_append_sectors = min(t->max_zone_append_sectors,
- b->max_zone_append_sectors);
+ t->max_zone_append_sectors = min(queue_limits_max_zone_append_sectors(t),
+ queue_limits_max_zone_append_sectors(b));
t->bounce = max(t->bounce, b->bounce);
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
@@ -934,81 +792,6 @@ void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
EXPORT_SYMBOL(blk_queue_update_dma_pad);
/**
- * blk_queue_segment_boundary - set boundary rules for segment merging
- * @q: the request queue for the device
- * @mask: the memory boundary mask
- **/
-void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
-{
- if (mask < PAGE_SIZE - 1) {
- mask = PAGE_SIZE - 1;
- pr_info("%s: set to minimum %lx\n", __func__, mask);
- }
-
- q->limits.seg_boundary_mask = mask;
-}
-EXPORT_SYMBOL(blk_queue_segment_boundary);
-
-/**
- * blk_queue_virt_boundary - set boundary rules for bio merging
- * @q: the request queue for the device
- * @mask: the memory boundary mask
- **/
-void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
-{
- q->limits.virt_boundary_mask = mask;
-
- /*
- * Devices that require a virtual boundary do not support scatter/gather
- * I/O natively, but instead require a descriptor list entry for each
- * page (which might not be idential to the Linux PAGE_SIZE). Because
- * of that they are not limited by our notion of "segment size".
- */
- if (mask)
- q->limits.max_segment_size = UINT_MAX;
-}
-EXPORT_SYMBOL(blk_queue_virt_boundary);
-
-/**
- * blk_queue_dma_alignment - set dma length and memory alignment
- * @q: the request queue for the device
- * @mask: alignment mask
- *
- * description:
- * set required memory and length alignment for direct dma transactions.
- * this is used when building direct io requests for the queue.
- *
- **/
-void blk_queue_dma_alignment(struct request_queue *q, int mask)
-{
- q->limits.dma_alignment = mask;
-}
-EXPORT_SYMBOL(blk_queue_dma_alignment);
-
-/**
- * blk_queue_update_dma_alignment - update dma length and memory alignment
- * @q: the request queue for the device
- * @mask: alignment mask
- *
- * description:
- * update required memory and length alignment for direct dma transactions.
- * If the requested alignment is larger than the current alignment, then
- * the current queue alignment is updated to the new value, otherwise it
- * is left alone. The design of this is to allow multiple objects
- * (driver, device, transport etc) to set their respective
- * alignments without having them interfere.
- *
- **/
-void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
-{
- BUG_ON(mask > PAGE_SIZE);
-
- if (mask > q->limits.dma_alignment)
- q->limits.dma_alignment = mask;
-}
-EXPORT_SYMBOL(blk_queue_update_dma_alignment);
-
-/**
* blk_set_queue_depth - tell the block layer about the device queue depth
* @q: the request queue for the device
* @depth: queue depth
@@ -1046,44 +829,6 @@ void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
EXPORT_SYMBOL_GPL(blk_queue_write_cache);
/**
- * blk_queue_required_elevator_features - Set a queue required elevator features
- * @q: the request queue for the target device
- * @features: Required elevator features OR'ed together
- *
- * Tell the block layer that for the device controlled through @q, only the
- * only elevators that can be used are those that implement at least the set of
- * features specified by @features.
- */
-void blk_queue_required_elevator_features(struct request_queue *q,
- unsigned int features)
-{
- q->required_elevator_features = features;
-}
-EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features);
-
-/**
- * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
- * @q: the request queue for the device
- * @dev: the device pointer for dma
- *
- * Tell the block layer about merging the segments by dma map of @q.
- */
-bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
- struct device *dev)
-{
- unsigned long boundary = dma_get_merge_boundary(dev);
-
- if (!boundary)
- return false;
-
- /* No need to update max_segment_size. see blk_queue_virt_boundary() */
- blk_queue_virt_boundary(q, boundary);
-
- return true;
-}
-EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
-
-/**
* disk_set_zoned - inidicate a zoned device
* @disk: gendisk to configure
*/
diff --git a/block/blk-stat.c b/block/blk-stat.c
index e42c263e53fb..eaf60097bbe1 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -57,9 +57,6 @@ void blk_stat_add(struct request *rq, u64 now)
value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
- if (req_op(rq) == REQ_OP_READ || req_op(rq) == REQ_OP_WRITE)
- blk_throtl_stat_add(rq, value);
-
rcu_read_lock();
cpu = get_cpu();
list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 8c8f69d8ba48..f0f9314ab65c 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -224,7 +224,7 @@ static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
{
- unsigned long long max_sectors = q->limits.max_zone_append_sectors;
+ unsigned long long max_sectors = queue_max_zone_append_sectors(q);
return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT);
}
@@ -516,10 +516,6 @@ QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
-QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time");
-#endif
-
/* legacy alias for logical_block_size: */
static struct queue_sysfs_entry queue_hw_sector_size_entry = {
.attr = {.name = "hw_sector_size", .mode = 0444 },
@@ -640,9 +636,6 @@ static struct attribute *queue_attrs[] = {
&queue_fua_entry.attr,
&queue_dax_entry.attr,
&queue_poll_delay_entry.attr,
-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
- &blk_throtl_sample_time_entry.attr,
-#endif
&queue_virt_boundary_mask_entry.attr,
&queue_dma_alignment_entry.attr,
NULL,
@@ -814,7 +807,6 @@ int blk_register_queue(struct gendisk *disk)
blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
wbt_enable_default(disk);
- blk_throtl_register(disk);
/* Now everything is ready and send out KOBJ_ADD uevent */
kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index f4850a6f860b..80aaca18bfb0 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -25,18 +25,6 @@
#define DFL_THROTL_SLICE_HD (HZ / 10)
#define DFL_THROTL_SLICE_SSD (HZ / 50)
#define MAX_THROTL_SLICE (HZ)
-#define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
-#define MIN_THROTL_BPS (320 * 1024)
-#define MIN_THROTL_IOPS (10)
-#define DFL_LATENCY_TARGET (-1L)
-#define DFL_IDLE_THRESHOLD (0)
-#define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
-#define LATENCY_FILTERED_SSD (0)
-/*
- * For HD, very small latency comes from sequential IO. Such IO is helpless to
- * help determine if its IO is impacted by others, hence we ignore the IO
- */
-#define LATENCY_FILTERED_HD (1000L) /* 1ms */
/* A workqueue to queue throttle related work */
static struct workqueue_struct *kthrotld_workqueue;
@@ -70,19 +58,6 @@ struct throtl_data
/* Work for dispatching throttled bios */
struct work_struct dispatch_work;
- unsigned int limit_index;
- bool limit_valid[LIMIT_CNT];
-
- unsigned long low_upgrade_time;
- unsigned long low_downgrade_time;
-
- unsigned int scale;
-
- struct latency_bucket tmp_buckets[2][LATENCY_BUCKET_SIZE];
- struct avg_latency_bucket avg_buckets[2][LATENCY_BUCKET_SIZE];
- struct latency_bucket __percpu *latency_buckets[2];
- unsigned long last_calculate_time;
- unsigned long filtered_latency;
bool track_bio_latency;
};
@@ -126,84 +101,24 @@ static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
return container_of(sq, struct throtl_data, service_queue);
}
-/*
- * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
- * make the IO dispatch more smooth.
- * Scale up: linearly scale up according to elapsed time since upgrade. For
- * every throtl_slice, the limit scales up 1/2 .low limit till the
- * limit hits .max limit
- * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
- */
-static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td)
-{
- /* arbitrary value to avoid too big scale */
- if (td->scale < 4096 && time_after_eq(jiffies,
- td->low_upgrade_time + td->scale * td->throtl_slice))
- td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice;
-
- return low + (low >> 1) * td->scale;
-}
-
static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
{
struct blkcg_gq *blkg = tg_to_blkg(tg);
- struct throtl_data *td;
- uint64_t ret;
if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
return U64_MAX;
- td = tg->td;
- ret = tg->bps[rw][td->limit_index];
- if (ret == 0 && td->limit_index == LIMIT_LOW) {
- /* intermediate node or iops isn't 0 */
- if (!list_empty(&blkg->blkcg->css.children) ||
- tg->iops[rw][td->limit_index])
- return U64_MAX;
- else
- return MIN_THROTL_BPS;
- }
-
- if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
- tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
- uint64_t adjusted;
-
- adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
- ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
- }
- return ret;
+ return tg->bps[rw];
}
static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
{
struct blkcg_gq *blkg = tg_to_blkg(tg);
- struct throtl_data *td;
- unsigned int ret;
if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
return UINT_MAX;
- td = tg->td;
- ret = tg->iops[rw][td->limit_index];
- if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
- /* intermediate node or bps isn't 0 */
- if (!list_empty(&blkg->blkcg->css.children) ||
- tg->bps[rw][td->limit_index])
- return UINT_MAX;
- else
- return MIN_THROTL_IOPS;
- }
-
- if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
- tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
- uint64_t adjusted;
-
- adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
- if (adjusted > UINT_MAX)
- adjusted = UINT_MAX;
- ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
- }
- return ret;
+ return tg->iops[rw];
}
#define request_bucket_index(sectors) \
@@ -359,20 +274,10 @@ static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk,
}
RB_CLEAR_NODE(&tg->rb_node);
- tg->bps[READ][LIMIT_MAX] = U64_MAX;
- tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
- tg->iops[READ][LIMIT_MAX] = UINT_MAX;
- tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
- tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
- tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
- tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
- tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
- /* LIMIT_LOW will have default value 0 */
-
- tg->latency_target = DFL_LATENCY_TARGET;
- tg->latency_target_conf = DFL_LATENCY_TARGET;
- tg->idletime_threshold = DFL_IDLE_THRESHOLD;
- tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
+ tg->bps[READ] = U64_MAX;
+ tg->bps[WRITE] = U64_MAX;
+ tg->iops[READ] = UINT_MAX;
+ tg->iops[WRITE] = UINT_MAX;
return &tg->pd;
@@ -418,18 +323,15 @@ static void throtl_pd_init(struct blkg_policy_data *pd)
static void tg_update_has_rules(struct throtl_grp *tg)
{
struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
- struct throtl_data *td = tg->td;
int rw;
for (rw = READ; rw <= WRITE; rw++) {
tg->has_rules_iops[rw] =
(parent_tg && parent_tg->has_rules_iops[rw]) ||
- (td->limit_valid[td->limit_index] &&
- tg_iops_limit(tg, rw) != UINT_MAX);
+ tg_iops_limit(tg, rw) != UINT_MAX;
tg->has_rules_bps[rw] =
(parent_tg && parent_tg->has_rules_bps[rw]) ||
- (td->limit_valid[td->limit_index] &&
- (tg_bps_limit(tg, rw) != U64_MAX));
+ tg_bps_limit(tg, rw) != U64_MAX;
}
}
@@ -443,49 +345,6 @@ static void throtl_pd_online(struct blkg_policy_data *pd)
tg_update_has_rules(tg);
}
-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
-static void blk_throtl_update_limit_valid(struct throtl_data *td)
-{
- struct cgroup_subsys_state *pos_css;
- struct blkcg_gq *blkg;
- bool low_valid = false;
-
- rcu_read_lock();
- blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
- struct throtl_grp *tg = blkg_to_tg(blkg);
-
- if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
- tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) {
- low_valid = true;
- break;
- }
- }
- rcu_read_unlock();
-
- td->limit_valid[LIMIT_LOW] = low_valid;
-}
-#else
-static inline void blk_throtl_update_limit_valid(struct throtl_data *td)
-{
-}
-#endif
-
-static void throtl_upgrade_state(struct throtl_data *td);
-static void throtl_pd_offline(struct blkg_policy_data *pd)
-{
- struct throtl_grp *tg = pd_to_tg(pd);
-
- tg->bps[READ][LIMIT_LOW] = 0;
- tg->bps[WRITE][LIMIT_LOW] = 0;
- tg->iops[READ][LIMIT_LOW] = 0;
- tg->iops[WRITE][LIMIT_LOW] = 0;
-
- blk_throtl_update_limit_valid(tg->td);
-
- if (!tg->td->limit_valid[tg->td->limit_index])
- throtl_upgrade_state(tg->td);
-}
-
static void throtl_pd_free(struct blkg_policy_data *pd)
{
struct throtl_grp *tg = pd_to_tg(pd);
@@ -1151,8 +1010,6 @@ static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
return nr_disp;
}
-static bool throtl_can_upgrade(struct throtl_data *td,
- struct throtl_grp *this_tg);
/**
* throtl_pending_timer_fn - timer function for service_queue->pending_timer
* @t: the pending_timer member of the throtl_service_queue being serviced
@@ -1189,9 +1046,6 @@ static void throtl_pending_timer_fn(struct timer_list *t)
if (!q->root_blkg)
goto out_unlock;
- if (throtl_can_upgrade(td, NULL))
- throtl_upgrade_state(td);
-
again:
parent_sq = sq->parent_sq;
dispatched = false;
@@ -1331,22 +1185,12 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
blkg_for_each_descendant_pre(blkg, pos_css,
global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
struct throtl_grp *this_tg = blkg_to_tg(blkg);
- struct throtl_grp *parent_tg;
tg_update_has_rules(this_tg);
/* ignore root/second level */
if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
!blkg->parent->parent)
continue;
- parent_tg = blkg_to_tg(blkg->parent);
- /*
- * make sure all children has lower idle time threshold and
- * higher latency target
- */
- this_tg->idletime_threshold = min(this_tg->idletime_threshold,
- parent_tg->idletime_threshold);
- this_tg->latency_target = max(this_tg->latency_target,
- parent_tg->latency_target);
}
rcu_read_unlock();
@@ -1367,6 +1211,53 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
}
}
+static int blk_throtl_init(struct gendisk *disk)
+{
+ struct request_queue *q = disk->queue;
+ struct throtl_data *td;
+ int ret;
+
+ td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
+ if (!td)
+ return -ENOMEM;
+
+ INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
+ throtl_service_queue_init(&td->service_queue);
+
+ /*
+ * Freeze queue before activating policy, to synchronize with IO path,
+ * which is protected by 'q_usage_counter'.
+ */
+ blk_mq_freeze_queue(disk->queue);
+ blk_mq_quiesce_queue(disk->queue);
+
+ q->td = td;
+ td->queue = q;
+
+ /* activate policy */
+ ret = blkcg_activate_policy(disk, &blkcg_policy_throtl);
+ if (ret) {
+ q->td = NULL;
+ kfree(td);
+ goto out;
+ }
+
+ if (blk_queue_nonrot(q))
+ td->throtl_slice = DFL_THROTL_SLICE_SSD;
+ else
+ td->throtl_slice = DFL_THROTL_SLICE_HD;
+ td->track_bio_latency = !queue_is_mq(q);
+ if (!td->track_bio_latency)
+ blk_stat_enable_accounting(q);
+
+out:
+ blk_mq_unquiesce_queue(disk->queue);
+ blk_mq_unfreeze_queue(disk->queue);
+
+ return ret;
+}
+
+
static ssize_t tg_set_conf(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off, bool is_u64)
{
@@ -1378,6 +1269,16 @@ static ssize_t tg_set_conf(struct kernfs_open_file *of,
blkg_conf_init(&ctx, buf);
+ ret = blkg_conf_open_bdev(&ctx);
+ if (ret)
+ goto out_finish;
+
+ if (!blk_throtl_activated(ctx.bdev->bd_queue)) {
+ ret = blk_throtl_init(ctx.bdev->bd_disk);
+ if (ret)
+ goto out_finish;
+ }
+
ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, &ctx);
if (ret)
goto out_finish;
@@ -1444,25 +1345,25 @@ static int tg_print_rwstat_recursive(struct seq_file *sf, void *v)
static struct cftype throtl_legacy_files[] = {
{
.name = "throttle.read_bps_device",
- .private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
+ .private = offsetof(struct throtl_grp, bps[READ]),
.seq_show = tg_print_conf_u64,
.write = tg_set_conf_u64,
},
{
.name = "throttle.write_bps_device",
- .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
+ .private = offsetof(struct throtl_grp, bps[WRITE]),
.seq_show = tg_print_conf_u64,
.write = tg_set_conf_u64,
},
{
.name = "throttle.read_iops_device",
- .private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
+ .private = offsetof(struct throtl_grp, iops[READ]),
.seq_show = tg_print_conf_uint,
.write = tg_set_conf_uint,
},
{
.name = "throttle.write_iops_device",
- .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
+ .private = offsetof(struct throtl_grp, iops[WRITE]),
.seq_show = tg_print_conf_uint,
.write = tg_set_conf_uint,
},
@@ -1494,61 +1395,43 @@ static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
{
struct throtl_grp *tg = pd_to_tg(pd);
const char *dname = blkg_dev_name(pd->blkg);
- char bufs[4][21] = { "max", "max", "max", "max" };
u64 bps_dft;
unsigned int iops_dft;
- char idle_time[26] = "";
- char latency_time[26] = "";
if (!dname)
return 0;
- if (off == LIMIT_LOW) {
- bps_dft = 0;
- iops_dft = 0;
- } else {
- bps_dft = U64_MAX;
- iops_dft = UINT_MAX;
- }
+ bps_dft = U64_MAX;
+ iops_dft = UINT_MAX;
- if (tg->bps_conf[READ][off] == bps_dft &&
- tg->bps_conf[WRITE][off] == bps_dft &&
- tg->iops_conf[READ][off] == iops_dft &&
- tg->iops_conf[WRITE][off] == iops_dft &&
- (off != LIMIT_LOW ||
- (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
- tg->latency_target_conf == DFL_LATENCY_TARGET)))
+ if (tg->bps_conf[READ] == bps_dft &&
+ tg->bps_conf[WRITE] == bps_dft &&
+ tg->iops_conf[READ] == iops_dft &&
+ tg->iops_conf[WRITE] == iops_dft)
return 0;
- if (tg->bps_conf[READ][off] != U64_MAX)
- snprintf(bufs[0], sizeof(bufs[0]), "%llu",
- tg->bps_conf[READ][off]);
- if (tg->bps_conf[WRITE][off] != U64_MAX)
- snprintf(bufs[1], sizeof(bufs[1]), "%llu",
- tg->bps_conf[WRITE][off]);
- if (tg->iops_conf[READ][off] != UINT_MAX)
- snprintf(bufs[2], sizeof(bufs[2]), "%u",
- tg->iops_conf[READ][off]);
- if (tg->iops_conf[WRITE][off] != UINT_MAX)
- snprintf(bufs[3], sizeof(bufs[3]), "%u",
- tg->iops_conf[WRITE][off]);
- if (off == LIMIT_LOW) {
- if (tg->idletime_threshold_conf == ULONG_MAX)
- strcpy(idle_time, " idle=max");
- else
- snprintf(idle_time, sizeof(idle_time), " idle=%lu",
- tg->idletime_threshold_conf);
+ seq_printf(sf, "%s", dname);
+ if (tg->bps_conf[READ] == U64_MAX)
+ seq_printf(sf, " rbps=max");
+ else
+ seq_printf(sf, " rbps=%llu", tg->bps_conf[READ]);
- if (tg->latency_target_conf == ULONG_MAX)
- strcpy(latency_time, " latency=max");
- else
- snprintf(latency_time, sizeof(latency_time),
- " latency=%lu", tg->latency_target_conf);
- }
+ if (tg->bps_conf[WRITE] == U64_MAX)
+ seq_printf(sf, " wbps=max");
+ else
+ seq_printf(sf, " wbps=%llu", tg->bps_conf[WRITE]);
+
+ if (tg->iops_conf[READ] == UINT_MAX)
+ seq_printf(sf, " riops=max");
+ else
+ seq_printf(sf, " riops=%u", tg->iops_conf[READ]);
- seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
- dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time,
- latency_time);
+ if (tg->iops_conf[WRITE] == UINT_MAX)
+ seq_printf(sf, " wiops=max");
+ else
+ seq_printf(sf, " wiops=%u", tg->iops_conf[WRITE]);
+
+ seq_printf(sf, "\n");
return 0;
}
@@ -1566,13 +1449,20 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of,
struct blkg_conf_ctx ctx;
struct throtl_grp *tg;
u64 v[4];
- unsigned long idle_time;
- unsigned long latency_time;
int ret;
- int index = of_cft(of)->private;
blkg_conf_init(&ctx, buf);
+ ret = blkg_conf_open_bdev(&ctx);
+ if (ret)
+ goto out_finish;
+
+ if (!blk_throtl_activated(ctx.bdev->bd_queue)) {
+ ret = blk_throtl_init(ctx.bdev->bd_disk);
+ if (ret)
+ goto out_finish;
+ }
+
ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, &ctx);
if (ret)
goto out_finish;
@@ -1580,13 +1470,11 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of,
tg = blkg_to_tg(ctx.blkg);
tg_update_carryover(tg);
- v[0] = tg->bps_conf[READ][index];
- v[1] = tg->bps_conf[WRITE][index];
- v[2] = tg->iops_conf[READ][index];
- v[3] = tg->iops_conf[WRITE][index];
+ v[0] = tg->bps[READ];
+ v[1] = tg->bps[WRITE];
+ v[2] = tg->iops[READ];
+ v[3] = tg->iops[WRITE];
- idle_time = tg->idletime_threshold_conf;
- latency_time = tg->latency_target_conf;
while (true) {
char tok[27]; /* wiops=18446744073709551616 */
char *p;
@@ -1618,60 +1506,16 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of,
v[2] = min_t(u64, val, UINT_MAX);
else if (!strcmp(tok, "wiops") && val > 1)
v[3] = min_t(u64, val, UINT_MAX);
- else if (off == LIMIT_LOW && !strcmp(tok, "idle"))
- idle_time = val;
- else if (off == LIMIT_LOW && !strcmp(tok, "latency"))
- latency_time = val;
else
goto out_finish;
}
- tg->bps_conf[READ][index] = v[0];
- tg->bps_conf[WRITE][index] = v[1];
- tg->iops_conf[READ][index] = v[2];
- tg->iops_conf[WRITE][index] = v[3];
+ tg->bps[READ] = v[0];
+ tg->bps[WRITE] = v[1];
+ tg->iops[READ] = v[2];
+ tg->iops[WRITE] = v[3];
- if (index == LIMIT_MAX) {
- tg->bps[READ][index] = v[0];
- tg->bps[WRITE][index] = v[1];
- tg->iops[READ][index] = v[2];
- tg->iops[WRITE][index] = v[3];
- }
- tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
- tg->bps_conf[READ][LIMIT_MAX]);
- tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
- tg->bps_conf[WRITE][LIMIT_MAX]);
- tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
- tg->iops_conf[READ][LIMIT_MAX]);
- tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
- tg->iops_conf[WRITE][LIMIT_MAX]);
- tg->idletime_threshold_conf = idle_time;
- tg->latency_target_conf = latency_time;
-
- /* force user to configure all settings for low limit */
- if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
- tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
- tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
- tg->latency_target_conf == DFL_LATENCY_TARGET) {
- tg->bps[READ][LIMIT_LOW] = 0;
- tg->bps[WRITE][LIMIT_LOW] = 0;
- tg->iops[READ][LIMIT_LOW] = 0;
- tg->iops[WRITE][LIMIT_LOW] = 0;
- tg->idletime_threshold = DFL_IDLE_THRESHOLD;
- tg->latency_target = DFL_LATENCY_TARGET;
- } else if (index == LIMIT_LOW) {
- tg->idletime_threshold = tg->idletime_threshold_conf;
- tg->latency_target = tg->latency_target_conf;
- }
-
- blk_throtl_update_limit_valid(tg->td);
- if (tg->td->limit_valid[LIMIT_LOW]) {
- if (index == LIMIT_LOW)
- tg->td->limit_index = LIMIT_LOW;
- } else
- tg->td->limit_index = LIMIT_MAX;
- tg_conf_updated(tg, index == LIMIT_LOW &&
- tg->td->limit_valid[LIMIT_LOW]);
+ tg_conf_updated(tg, false);
ret = 0;
out_finish:
blkg_conf_exit(&ctx);
@@ -1679,21 +1523,11 @@ out_finish:
}
static struct cftype throtl_files[] = {
-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
- {
- .name = "low",
- .flags = CFTYPE_NOT_ON_ROOT,
- .seq_show = tg_print_limit,
- .write = tg_set_limit,
- .private = LIMIT_LOW,
- },
-#endif
{
.name = "max",
.flags = CFTYPE_NOT_ON_ROOT,
.seq_show = tg_print_limit,
.write = tg_set_limit,
- .private = LIMIT_MAX,
},
{ } /* terminate */
};
@@ -1712,7 +1546,6 @@ struct blkcg_policy blkcg_policy_throtl = {
.pd_alloc_fn = throtl_pd_alloc,
.pd_init_fn = throtl_pd_init,
.pd_online_fn = throtl_pd_online,
- .pd_offline_fn = throtl_pd_offline,
.pd_free_fn = throtl_pd_free,
};
@@ -1722,6 +1555,9 @@ void blk_throtl_cancel_bios(struct gendisk *disk)
struct cgroup_subsys_state *pos_css;
struct blkcg_gq *blkg;
+ if (!blk_throtl_activated(q))
+ return;
+
spin_lock_irq(&q->queue_lock);
/*
* queue_lock is held, rcu lock is not needed here technically.
@@ -1761,418 +1597,6 @@ void blk_throtl_cancel_bios(struct gendisk *disk)
spin_unlock_irq(&q->queue_lock);
}
-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
-static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
-{
- unsigned long rtime = jiffies, wtime = jiffies;
-
- if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
- rtime = tg->last_low_overflow_time[READ];
- if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
- wtime = tg->last_low_overflow_time[WRITE];
- return min(rtime, wtime);
-}
-
-static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
-{
- struct throtl_service_queue *parent_sq;
- struct throtl_grp *parent = tg;
- unsigned long ret = __tg_last_low_overflow_time(tg);
-
- while (true) {
- parent_sq = parent->service_queue.parent_sq;
- parent = sq_to_tg(parent_sq);
- if (!parent)
- break;
-
- /*
- * The parent doesn't have low limit, it always reaches low
- * limit. Its overflow time is useless for children
- */
- if (!parent->bps[READ][LIMIT_LOW] &&
- !parent->iops[READ][LIMIT_LOW] &&
- !parent->bps[WRITE][LIMIT_LOW] &&
- !parent->iops[WRITE][LIMIT_LOW])
- continue;
- if (time_after(__tg_last_low_overflow_time(parent), ret))
- ret = __tg_last_low_overflow_time(parent);
- }
- return ret;
-}
-
-static bool throtl_tg_is_idle(struct throtl_grp *tg)
-{
- /*
- * cgroup is idle if:
- * - single idle is too long, longer than a fixed value (in case user
- * configure a too big threshold) or 4 times of idletime threshold
- * - average think time is more than threshold
- * - IO latency is largely below threshold
- */
- unsigned long time;
- bool ret;
-
- time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
- ret = tg->latency_target == DFL_LATENCY_TARGET ||
- tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
- (blk_time_get_ns() >> 10) - tg->last_finish_time > time ||
- tg->avg_idletime > tg->idletime_threshold ||
- (tg->latency_target && tg->bio_cnt &&
- tg->bad_bio_cnt * 5 < tg->bio_cnt);
- throtl_log(&tg->service_queue,
- "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
- tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
- tg->bio_cnt, ret, tg->td->scale);
- return ret;
-}
-
-static bool throtl_low_limit_reached(struct throtl_grp *tg, int rw)
-{
- struct throtl_service_queue *sq = &tg->service_queue;
- bool limit = tg->bps[rw][LIMIT_LOW] || tg->iops[rw][LIMIT_LOW];
-
- /*
- * if low limit is zero, low limit is always reached.
- * if low limit is non-zero, we can check if there is any request
- * is queued to determine if low limit is reached as we throttle
- * request according to limit.
- */
- return !limit || sq->nr_queued[rw];
-}
-
-static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
-{
- /*
- * cgroup reaches low limit when low limit of READ and WRITE are
- * both reached, it's ok to upgrade to next limit if cgroup reaches
- * low limit
- */
- if (throtl_low_limit_reached(tg, READ) &&
- throtl_low_limit_reached(tg, WRITE))
- return true;
-
- if (time_after_eq(jiffies,
- tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
- throtl_tg_is_idle(tg))
- return true;
- return false;
-}
-
-static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
-{
- while (true) {
- if (throtl_tg_can_upgrade(tg))
- return true;
- tg = sq_to_tg(tg->service_queue.parent_sq);
- if (!tg || !tg_to_blkg(tg)->parent)
- return false;
- }
- return false;
-}
-
-static bool throtl_can_upgrade(struct throtl_data *td,
- struct throtl_grp *this_tg)
-{
- struct cgroup_subsys_state *pos_css;
- struct blkcg_gq *blkg;
-
- if (td->limit_index != LIMIT_LOW)
- return false;
-
- if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice))
- return false;
-
- rcu_read_lock();
- blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
- struct throtl_grp *tg = blkg_to_tg(blkg);
-
- if (tg == this_tg)
- continue;
- if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
- continue;
- if (!throtl_hierarchy_can_upgrade(tg)) {
- rcu_read_unlock();
- return false;
- }
- }
- rcu_read_unlock();
- return true;
-}
-
-static void throtl_upgrade_check(struct throtl_grp *tg)
-{
- unsigned long now = jiffies;
-
- if (tg->td->limit_index != LIMIT_LOW)
- return;
-
- if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
- return;
-
- tg->last_check_time = now;
-
- if (!time_after_eq(now,
- __tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
- return;
-
- if (throtl_can_upgrade(tg->td, NULL))
- throtl_upgrade_state(tg->td);
-}
-
-static void throtl_upgrade_state(struct throtl_data *td)
-{
- struct cgroup_subsys_state *pos_css;
- struct blkcg_gq *blkg;
-
- throtl_log(&td->service_queue, "upgrade to max");
- td->limit_index = LIMIT_MAX;
- td->low_upgrade_time = jiffies;
- td->scale = 0;
- rcu_read_lock();
- blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
- struct throtl_grp *tg = blkg_to_tg(blkg);
- struct throtl_service_queue *sq = &tg->service_queue;
-
- tg->disptime = jiffies - 1;
- throtl_select_dispatch(sq);
- throtl_schedule_next_dispatch(sq, true);
- }
- rcu_read_unlock();
- throtl_select_dispatch(&td->service_queue);
- throtl_schedule_next_dispatch(&td->service_queue, true);
- queue_work(kthrotld_workqueue, &td->dispatch_work);
-}
-
-static void throtl_downgrade_state(struct throtl_data *td)
-{
- td->scale /= 2;
-
- throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
- if (td->scale) {
- td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
- return;
- }
-
- td->limit_index = LIMIT_LOW;
- td->low_downgrade_time = jiffies;
-}
-
-static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
-{
- struct throtl_data *td = tg->td;
- unsigned long now = jiffies;
-
- /*
- * If cgroup is below low limit, consider downgrade and throttle other
- * cgroups
- */
- if (time_after_eq(now, tg_last_low_overflow_time(tg) +
- td->throtl_slice) &&
- (!throtl_tg_is_idle(tg) ||
- !list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
- return true;
- return false;
-}
-
-static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
-{
- struct throtl_data *td = tg->td;
-
- if (time_before(jiffies, td->low_upgrade_time + td->throtl_slice))
- return false;
-
- while (true) {
- if (!throtl_tg_can_downgrade(tg))
- return false;
- tg = sq_to_tg(tg->service_queue.parent_sq);
- if (!tg || !tg_to_blkg(tg)->parent)
- break;
- }
- return true;
-}
-
-static void throtl_downgrade_check(struct throtl_grp *tg)
-{
- uint64_t bps;
- unsigned int iops;
- unsigned long elapsed_time;
- unsigned long now = jiffies;
-
- if (tg->td->limit_index != LIMIT_MAX ||
- !tg->td->limit_valid[LIMIT_LOW])
- return;
- if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
- return;
- if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
- return;
-
- elapsed_time = now - tg->last_check_time;
- tg->last_check_time = now;
-
- if (time_before(now, tg_last_low_overflow_time(tg) +
- tg->td->throtl_slice))
- return;
-
- if (tg->bps[READ][LIMIT_LOW]) {
- bps = tg->last_bytes_disp[READ] * HZ;
- do_div(bps, elapsed_time);
- if (bps >= tg->bps[READ][LIMIT_LOW])
- tg->last_low_overflow_time[READ] = now;
- }
-
- if (tg->bps[WRITE][LIMIT_LOW]) {
- bps = tg->last_bytes_disp[WRITE] * HZ;
- do_div(bps, elapsed_time);
- if (bps >= tg->bps[WRITE][LIMIT_LOW])
- tg->last_low_overflow_time[WRITE] = now;
- }
-
- if (tg->iops[READ][LIMIT_LOW]) {
- iops = tg->last_io_disp[READ] * HZ / elapsed_time;
- if (iops >= tg->iops[READ][LIMIT_LOW])
- tg->last_low_overflow_time[READ] = now;
- }
-
- if (tg->iops[WRITE][LIMIT_LOW]) {
- iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
- if (iops >= tg->iops[WRITE][LIMIT_LOW])
- tg->last_low_overflow_time[WRITE] = now;
- }
-
- /*
- * If cgroup is below low limit, consider downgrade and throttle other
- * cgroups
- */
- if (throtl_hierarchy_can_downgrade(tg))
- throtl_downgrade_state(tg->td);
-
- tg->last_bytes_disp[READ] = 0;
- tg->last_bytes_disp[WRITE] = 0;
- tg->last_io_disp[READ] = 0;
- tg->last_io_disp[WRITE] = 0;
-}
-
-static void blk_throtl_update_idletime(struct throtl_grp *tg)
-{
- unsigned long now;
- unsigned long last_finish_time = tg->last_finish_time;
-
- if (last_finish_time == 0)
- return;
-
- now = blk_time_get_ns() >> 10;
- if (now <= last_finish_time ||
- last_finish_time == tg->checked_last_finish_time)
- return;
-
- tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3;
- tg->checked_last_finish_time = last_finish_time;
-}
-
-static void throtl_update_latency_buckets(struct throtl_data *td)
-{
- struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE];
- int i, cpu, rw;
- unsigned long last_latency[2] = { 0 };
- unsigned long latency[2];
-
- if (!blk_queue_nonrot(td->queue) || !td->limit_valid[LIMIT_LOW])
- return;
- if (time_before(jiffies, td->last_calculate_time + HZ))
- return;
- td->last_calculate_time = jiffies;
-
- memset(avg_latency, 0, sizeof(avg_latency));
- for (rw = READ; rw <= WRITE; rw++) {
- for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
- struct latency_bucket *tmp = &td->tmp_buckets[rw][i];
-
- for_each_possible_cpu(cpu) {
- struct latency_bucket *bucket;
-
- /* this isn't race free, but ok in practice */
- bucket = per_cpu_ptr(td->latency_buckets[rw],
- cpu);
- tmp->total_latency += bucket[i].total_latency;
- tmp->samples += bucket[i].samples;
- bucket[i].total_latency = 0;
- bucket[i].samples = 0;
- }
-
- if (tmp->samples >= 32) {
- int samples = tmp->samples;
-
- latency[rw] = tmp->total_latency;
-
- tmp->total_latency = 0;
- tmp->samples = 0;
- latency[rw] /= samples;
- if (latency[rw] == 0)
- continue;
- avg_latency[rw][i].latency = latency[rw];
- }
- }
- }
-
- for (rw = READ; rw <= WRITE; rw++) {
- for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
- if (!avg_latency[rw][i].latency) {
- if (td->avg_buckets[rw][i].latency < last_latency[rw])
- td->avg_buckets[rw][i].latency =
- last_latency[rw];
- continue;
- }
-
- if (!td->avg_buckets[rw][i].valid)
- latency[rw] = avg_latency[rw][i].latency;
- else
- latency[rw] = (td->avg_buckets[rw][i].latency * 7 +
- avg_latency[rw][i].latency) >> 3;
-
- td->avg_buckets[rw][i].latency = max(latency[rw],
- last_latency[rw]);
- td->avg_buckets[rw][i].valid = true;
- last_latency[rw] = td->avg_buckets[rw][i].latency;
- }
- }
-
- for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
- throtl_log(&td->service_queue,
- "Latency bucket %d: read latency=%ld, read valid=%d, "
- "write latency=%ld, write valid=%d", i,
- td->avg_buckets[READ][i].latency,
- td->avg_buckets[READ][i].valid,
- td->avg_buckets[WRITE][i].latency,
- td->avg_buckets[WRITE][i].valid);
-}
-#else
-static inline void throtl_update_latency_buckets(struct throtl_data *td)
-{
-}
-
-static void blk_throtl_update_idletime(struct throtl_grp *tg)
-{
-}
-
-static void throtl_downgrade_check(struct throtl_grp *tg)
-{
-}
-
-static void throtl_upgrade_check(struct throtl_grp *tg)
-{
-}
-
-static bool throtl_can_upgrade(struct throtl_data *td,
- struct throtl_grp *this_tg)
-{
- return false;
-}
-
-static void throtl_upgrade_state(struct throtl_data *td)
-{
-}
-#endif
-
bool __blk_throtl_bio(struct bio *bio)
{
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
@@ -2185,21 +1609,12 @@ bool __blk_throtl_bio(struct bio *bio)
struct throtl_data *td = tg->td;
rcu_read_lock();
-
spin_lock_irq(&q->queue_lock);
-
- throtl_update_latency_buckets(td);
-
- blk_throtl_update_idletime(tg);
-
sq = &tg->service_queue;
-again:
while (true) {
if (tg->last_low_overflow_time[rw] == 0)
tg->last_low_overflow_time[rw] = jiffies;
- throtl_downgrade_check(tg);
- throtl_upgrade_check(tg);
/* throtl is FIFO - if bios are already queued, should queue */
if (sq->nr_queued[rw])
break;
@@ -2207,10 +1622,6 @@ again:
/* if above limits, break to queue */
if (!tg_may_dispatch(tg, bio, NULL)) {
tg->last_low_overflow_time[rw] = jiffies;
- if (throtl_can_upgrade(td, tg)) {
- throtl_upgrade_state(td);
- goto again;
- }
break;
}
@@ -2270,215 +1681,25 @@ again:
}
out_unlock:
-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
- if (throttled || !td->track_bio_latency)
- bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY;
-#endif
spin_unlock_irq(&q->queue_lock);
rcu_read_unlock();
return throttled;
}
-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
-static void throtl_track_latency(struct throtl_data *td, sector_t size,
- enum req_op op, unsigned long time)
-{
- const bool rw = op_is_write(op);
- struct latency_bucket *latency;
- int index;
-
- if (!td || td->limit_index != LIMIT_LOW ||
- !(op == REQ_OP_READ || op == REQ_OP_WRITE) ||
- !blk_queue_nonrot(td->queue))
- return;
-
- index = request_bucket_index(size);
-
- latency = get_cpu_ptr(td->latency_buckets[rw]);
- latency[index].total_latency += time;
- latency[index].samples++;
- put_cpu_ptr(td->latency_buckets[rw]);
-}
-
-void blk_throtl_stat_add(struct request *rq, u64 time_ns)
-{
- struct request_queue *q = rq->q;
- struct throtl_data *td = q->td;
-
- throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq),
- time_ns >> 10);
-}
-
-void blk_throtl_bio_endio(struct bio *bio)
-{
- struct blkcg_gq *blkg;
- struct throtl_grp *tg;
- u64 finish_time_ns;
- unsigned long finish_time;
- unsigned long start_time;
- unsigned long lat;
- int rw = bio_data_dir(bio);
-
- blkg = bio->bi_blkg;
- if (!blkg)
- return;
- tg = blkg_to_tg(blkg);
- if (!tg->td->limit_valid[LIMIT_LOW])
- return;
-
- finish_time_ns = blk_time_get_ns();
- tg->last_finish_time = finish_time_ns >> 10;
-
- start_time = bio_issue_time(&bio->bi_issue) >> 10;
- finish_time = __bio_issue_time(finish_time_ns) >> 10;
- if (!start_time || finish_time <= start_time)
- return;
-
- lat = finish_time - start_time;
- /* this is only for bio based driver */
- if (!(bio->bi_issue.value & BIO_ISSUE_THROTL_SKIP_LATENCY))
- throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue),
- bio_op(bio), lat);
-
- if (tg->latency_target && lat >= tg->td->filtered_latency) {
- int bucket;
- unsigned int threshold;
-
- bucket = request_bucket_index(bio_issue_size(&bio->bi_issue));
- threshold = tg->td->avg_buckets[rw][bucket].latency +
- tg->latency_target;
- if (lat > threshold)
- tg->bad_bio_cnt++;
- /*
- * Not race free, could get wrong count, which means cgroups
- * will be throttled
- */
- tg->bio_cnt++;
- }
-
- if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) {
- tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies;
- tg->bio_cnt /= 2;
- tg->bad_bio_cnt /= 2;
- }
-}
-#endif
-
-int blk_throtl_init(struct gendisk *disk)
-{
- struct request_queue *q = disk->queue;
- struct throtl_data *td;
- int ret;
-
- td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
- if (!td)
- return -ENOMEM;
- td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) *
- LATENCY_BUCKET_SIZE, __alignof__(u64));
- if (!td->latency_buckets[READ]) {
- kfree(td);
- return -ENOMEM;
- }
- td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) *
- LATENCY_BUCKET_SIZE, __alignof__(u64));
- if (!td->latency_buckets[WRITE]) {
- free_percpu(td->latency_buckets[READ]);
- kfree(td);
- return -ENOMEM;
- }
-
- INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
- throtl_service_queue_init(&td->service_queue);
-
- q->td = td;
- td->queue = q;
-
- td->limit_valid[LIMIT_MAX] = true;
- td->limit_index = LIMIT_MAX;
- td->low_upgrade_time = jiffies;
- td->low_downgrade_time = jiffies;
-
- /* activate policy */
- ret = blkcg_activate_policy(disk, &blkcg_policy_throtl);
- if (ret) {
- free_percpu(td->latency_buckets[READ]);
- free_percpu(td->latency_buckets[WRITE]);
- kfree(td);
- }
- return ret;
-}
-
void blk_throtl_exit(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
- BUG_ON(!q->td);
+ if (!blk_throtl_activated(q))
+ return;
+
del_timer_sync(&q->td->service_queue.pending_timer);
throtl_shutdown_wq(q);
blkcg_deactivate_policy(disk, &blkcg_policy_throtl);
- free_percpu(q->td->latency_buckets[READ]);
- free_percpu(q->td->latency_buckets[WRITE]);
kfree(q->td);
}
-void blk_throtl_register(struct gendisk *disk)
-{
- struct request_queue *q = disk->queue;
- struct throtl_data *td;
- int i;
-
- td = q->td;
- BUG_ON(!td);
-
- if (blk_queue_nonrot(q)) {
- td->throtl_slice = DFL_THROTL_SLICE_SSD;
- td->filtered_latency = LATENCY_FILTERED_SSD;
- } else {
- td->throtl_slice = DFL_THROTL_SLICE_HD;
- td->filtered_latency = LATENCY_FILTERED_HD;
- for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
- td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY;
- td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY;
- }
- }
-#ifndef CONFIG_BLK_DEV_THROTTLING_LOW
- /* if no low limit, use previous default */
- td->throtl_slice = DFL_THROTL_SLICE_HD;
-
-#else
- td->track_bio_latency = !queue_is_mq(q);
- if (!td->track_bio_latency)
- blk_stat_enable_accounting(q);
-#endif
-}
-
-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
-ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
-{
- if (!q->td)
- return -EINVAL;
- return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice));
-}
-
-ssize_t blk_throtl_sample_time_store(struct request_queue *q,
- const char *page, size_t count)
-{
- unsigned long v;
- unsigned long t;
-
- if (!q->td)
- return -EINVAL;
- if (kstrtoul(page, 10, &v))
- return -EINVAL;
- t = msecs_to_jiffies(v);
- if (t == 0 || t > MAX_THROTL_SLICE)
- return -EINVAL;
- q->td->throtl_slice = t;
- return count;
-}
-#endif
-
static int __init throtl_init(void)
{
kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
diff --git a/block/blk-throttle.h b/block/blk-throttle.h
index bffbc9cfc8ab..393c3d134b96 100644
--- a/block/blk-throttle.h
+++ b/block/blk-throttle.h
@@ -58,12 +58,6 @@ enum tg_state_flags {
THROTL_TG_CANCELING = 1 << 2, /* starts to cancel bio */
};
-enum {
- LIMIT_LOW,
- LIMIT_MAX,
- LIMIT_CNT,
-};
-
struct throtl_grp {
/* must be the first member */
struct blkg_policy_data pd;
@@ -102,14 +96,14 @@ struct throtl_grp {
bool has_rules_iops[2];
/* internally used bytes per second rate limits */
- uint64_t bps[2][LIMIT_CNT];
+ uint64_t bps[2];
/* user configured bps limits */
- uint64_t bps_conf[2][LIMIT_CNT];
+ uint64_t bps_conf[2];
/* internally used IOPS limits */
- unsigned int iops[2][LIMIT_CNT];
+ unsigned int iops[2];
/* user configured IOPS limits */
- unsigned int iops_conf[2][LIMIT_CNT];
+ unsigned int iops_conf[2];
/* Number of bytes dispatched in current slice */
uint64_t bytes_disp[2];
@@ -132,22 +126,10 @@ struct throtl_grp {
unsigned long last_check_time;
- unsigned long latency_target; /* us */
- unsigned long latency_target_conf; /* us */
/* When did we start a new slice */
unsigned long slice_start[2];
unsigned long slice_end[2];
- unsigned long last_finish_time; /* ns / 1024 */
- unsigned long checked_last_finish_time; /* ns / 1024 */
- unsigned long avg_idletime; /* ns / 1024 */
- unsigned long idletime_threshold; /* us */
- unsigned long idletime_threshold_conf; /* us */
-
- unsigned int bio_cnt; /* total bios */
- unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
- unsigned long bio_cnt_reset_time;
-
struct blkg_rwstat stat_bytes;
struct blkg_rwstat stat_ios;
};
@@ -168,23 +150,33 @@ static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
* Internal throttling interface
*/
#ifndef CONFIG_BLK_DEV_THROTTLING
-static inline int blk_throtl_init(struct gendisk *disk) { return 0; }
static inline void blk_throtl_exit(struct gendisk *disk) { }
-static inline void blk_throtl_register(struct gendisk *disk) { }
static inline bool blk_throtl_bio(struct bio *bio) { return false; }
static inline void blk_throtl_cancel_bios(struct gendisk *disk) { }
#else /* CONFIG_BLK_DEV_THROTTLING */
-int blk_throtl_init(struct gendisk *disk);
void blk_throtl_exit(struct gendisk *disk);
-void blk_throtl_register(struct gendisk *disk);
bool __blk_throtl_bio(struct bio *bio);
void blk_throtl_cancel_bios(struct gendisk *disk);
+static inline bool blk_throtl_activated(struct request_queue *q)
+{
+ return q->td != NULL;
+}
+
static inline bool blk_should_throtl(struct bio *bio)
{
- struct throtl_grp *tg = blkg_to_tg(bio->bi_blkg);
+ struct throtl_grp *tg;
int rw = bio_data_dir(bio);
+ /*
+ * This is called under bio_queue_enter(), and it's synchronized with
+ * the activation of blk-throtl, which is protected by
+ * blk_mq_freeze_queue().
+ */
+ if (!blk_throtl_activated(bio->bi_bdev->bd_queue))
+ return false;
+
+ tg = blkg_to_tg(bio->bi_blkg);
if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
if (!bio_flagged(bio, BIO_CGROUP_ACCT)) {
bio_set_flag(bio, BIO_CGROUP_ACCT);
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index da0f4b2a8fa0..03aa4eead39e 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -7,6 +7,7 @@
*
* Copyright (c) 2016, Damien Le Moal
* Copyright (c) 2016, Western Digital
+ * Copyright (c) 2024, Western Digital Corporation or its affiliates.
*/
#include <linux/kernel.h>
@@ -16,8 +17,13 @@
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/sched/mm.h>
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+#include <linux/mempool.h>
#include "blk.h"
+#include "blk-mq-sched.h"
+#include "blk-mq-debugfs.h"
#define ZONE_COND_NAME(name) [BLK_ZONE_COND_##name] = #name
static const char *const zone_cond_name[] = {
@@ -32,6 +38,64 @@ static const char *const zone_cond_name[] = {
};
#undef ZONE_COND_NAME
+/*
+ * Per-zone write plug.
+ * @node: hlist_node structure for managing the plug using a hash table.
+ * @link: To list the plug in the zone write plug error list of the disk.
+ * @ref: Zone write plug reference counter. A zone write plug reference is
+ * always at least 1 when the plug is hashed in the disk plug hash table.
+ * The reference is incremented whenever a new BIO needing plugging is
+ * submitted and when a function needs to manipulate a plug. The
+ * reference count is decremented whenever a plugged BIO completes and
+ * when a function that referenced the plug returns. The initial
+ * reference is dropped whenever the zone of the zone write plug is reset,
+ * finished and when the zone becomes full (last write BIO to the zone
+ * completes).
+ * @lock: Spinlock to atomically manipulate the plug.
+ * @flags: Flags indicating the plug state.
+ * @zone_no: The number of the zone the plug is managing.
+ * @wp_offset: The zone write pointer location relative to the start of the zone
+ * as a number of 512B sectors.
+ * @bio_list: The list of BIOs that are currently plugged.
+ * @bio_work: Work struct to handle issuing of plugged BIOs
+ * @rcu_head: RCU head to free zone write plugs with an RCU grace period.
+ * @disk: The gendisk the plug belongs to.
+ */
+struct blk_zone_wplug {
+ struct hlist_node node;
+ struct list_head link;
+ atomic_t ref;
+ spinlock_t lock;
+ unsigned int flags;
+ unsigned int zone_no;
+ unsigned int wp_offset;
+ struct bio_list bio_list;
+ struct work_struct bio_work;
+ struct rcu_head rcu_head;
+ struct gendisk *disk;
+};
+
+/*
+ * Zone write plug flags bits:
+ * - BLK_ZONE_WPLUG_PLUGGED: Indicates that the zone write plug is plugged,
+ * that is, that write BIOs are being throttled due to a write BIO already
+ * being executed or the zone write plug bio list is not empty.
+ * - BLK_ZONE_WPLUG_ERROR: Indicates that a write error happened which will be
+ * recovered with a report zone to update the zone write pointer offset.
+ * - BLK_ZONE_WPLUG_UNHASHED: Indicates that the zone write plug was removed
+ * from the disk hash table and that the initial reference to the zone
+ * write plug set when the plug was first added to the hash table has been
+ * dropped. This flag is set when a zone is reset, finished or become full,
+ * to prevent new references to the zone write plug to be taken for
+ * newly incoming BIOs. A zone write plug flagged with this flag will be
+ * freed once all remaining references from BIOs or functions are dropped.
+ */
+#define BLK_ZONE_WPLUG_PLUGGED (1U << 0)
+#define BLK_ZONE_WPLUG_ERROR (1U << 1)
+#define BLK_ZONE_WPLUG_UNHASHED (1U << 2)
+
+#define BLK_ZONE_WPLUG_BUSY (BLK_ZONE_WPLUG_PLUGGED | BLK_ZONE_WPLUG_ERROR)
+
/**
* blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX.
* @zone_cond: BLK_ZONE_COND_XXX.
@@ -51,52 +115,6 @@ const char *blk_zone_cond_str(enum blk_zone_cond zone_cond)
}
EXPORT_SYMBOL_GPL(blk_zone_cond_str);
-/*
- * Return true if a request is a write requests that needs zone write locking.
- */
-bool blk_req_needs_zone_write_lock(struct request *rq)
-{
- if (!rq->q->disk->seq_zones_wlock)
- return false;
-
- return blk_rq_is_seq_zoned_write(rq);
-}
-EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock);
-
-bool blk_req_zone_write_trylock(struct request *rq)
-{
- unsigned int zno = blk_rq_zone_no(rq);
-
- if (test_and_set_bit(zno, rq->q->disk->seq_zones_wlock))
- return false;
-
- WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
- rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;
-
- return true;
-}
-EXPORT_SYMBOL_GPL(blk_req_zone_write_trylock);
-
-void __blk_req_zone_write_lock(struct request *rq)
-{
- if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq),
- rq->q->disk->seq_zones_wlock)))
- return;
-
- WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
- rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;
-}
-EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock);
-
-void __blk_req_zone_write_unlock(struct request *rq)
-{
- rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED;
- if (rq->q->disk->seq_zones_wlock)
- WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq),
- rq->q->disk->seq_zones_wlock));
-}
-EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock);
-
/**
* bdev_nr_zones - Get number of zones
* @bdev: Target device
@@ -398,7 +416,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
op = REQ_OP_ZONE_RESET;
/* Invalidate the page cache, including dirty pages. */
- filemap_invalidate_lock(bdev->bd_inode->i_mapping);
+ filemap_invalidate_lock(bdev->bd_mapping);
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
if (ret)
goto fail;
@@ -420,28 +438,1293 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
fail:
if (cmd == BLKRESETZONE)
- filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
+ filemap_invalidate_unlock(bdev->bd_mapping);
return ret;
}
-void disk_free_zone_bitmaps(struct gendisk *disk)
+static inline bool disk_zone_is_conv(struct gendisk *disk, sector_t sector)
+{
+ if (!disk->conv_zones_bitmap)
+ return false;
+ return test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap);
+}
+
+static bool disk_insert_zone_wplug(struct gendisk *disk,
+ struct blk_zone_wplug *zwplug)
+{
+ struct blk_zone_wplug *zwplg;
+ unsigned long flags;
+ unsigned int idx =
+ hash_32(zwplug->zone_no, disk->zone_wplugs_hash_bits);
+
+ /*
+ * Add the new zone write plug to the hash table, but carefully as we
+ * are racing with other submission context, so we may already have a
+ * zone write plug for the same zone.
+ */
+ spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
+ hlist_for_each_entry_rcu(zwplg, &disk->zone_wplugs_hash[idx], node) {
+ if (zwplg->zone_no == zwplug->zone_no) {
+ spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
+ return false;
+ }
+ }
+ hlist_add_head_rcu(&zwplug->node, &disk->zone_wplugs_hash[idx]);
+ spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
+
+ return true;
+}
+
+static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
+ sector_t sector)
+{
+ unsigned int zno = disk_zone_no(disk, sector);
+ unsigned int idx = hash_32(zno, disk->zone_wplugs_hash_bits);
+ struct blk_zone_wplug *zwplug;
+
+ rcu_read_lock();
+
+ hlist_for_each_entry_rcu(zwplug, &disk->zone_wplugs_hash[idx], node) {
+ if (zwplug->zone_no == zno &&
+ atomic_inc_not_zero(&zwplug->ref)) {
+ rcu_read_unlock();
+ return zwplug;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static void disk_free_zone_wplug_rcu(struct rcu_head *rcu_head)
{
+ struct blk_zone_wplug *zwplug =
+ container_of(rcu_head, struct blk_zone_wplug, rcu_head);
+
+ mempool_free(zwplug, zwplug->disk->zone_wplugs_pool);
+}
+
+static inline void disk_put_zone_wplug(struct blk_zone_wplug *zwplug)
+{
+ if (atomic_dec_and_test(&zwplug->ref)) {
+ WARN_ON_ONCE(!bio_list_empty(&zwplug->bio_list));
+ WARN_ON_ONCE(!list_empty(&zwplug->link));
+ WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_UNHASHED));
+
+ call_rcu(&zwplug->rcu_head, disk_free_zone_wplug_rcu);
+ }
+}
+
+static inline bool disk_should_remove_zone_wplug(struct gendisk *disk,
+ struct blk_zone_wplug *zwplug)
+{
+ /* If the zone write plug was already removed, we are done. */
+ if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED)
+ return false;
+
+ /* If the zone write plug is still busy, it cannot be removed. */
+ if (zwplug->flags & BLK_ZONE_WPLUG_BUSY)
+ return false;
+
+ /*
+ * Completions of BIOs with blk_zone_write_plug_bio_endio() may
+ * happen after handling a request completion with
+ * blk_zone_write_plug_finish_request() (e.g. with split BIOs
+ * that are chained). In such case, disk_zone_wplug_unplug_bio()
+ * should not attempt to remove the zone write plug until all BIO
+ * completions are seen. Check by looking at the zone write plug
+ * reference count, which is 2 when the plug is unused (one reference
+ * taken when the plug was allocated and another reference taken by the
+ * caller context).
+ */
+ if (atomic_read(&zwplug->ref) > 2)
+ return false;
+
+ /* We can remove zone write plugs for zones that are empty or full. */
+ return !zwplug->wp_offset || zwplug->wp_offset >= disk->zone_capacity;
+}
+
+static void disk_remove_zone_wplug(struct gendisk *disk,
+ struct blk_zone_wplug *zwplug)
+{
+ unsigned long flags;
+
+ /* If the zone write plug was already removed, we have nothing to do. */
+ if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED)
+ return;
+
+ /*
+ * Mark the zone write plug as unhashed and drop the extra reference we
+ * took when the plug was inserted in the hash table.
+ */
+ zwplug->flags |= BLK_ZONE_WPLUG_UNHASHED;
+ spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
+ hlist_del_init_rcu(&zwplug->node);
+ spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
+ disk_put_zone_wplug(zwplug);
+}
+
+static void blk_zone_wplug_bio_work(struct work_struct *work);
+
+/*
+ * Get a reference on the write plug for the zone containing @sector.
+ * If the plug does not exist, it is allocated and hashed.
+ * Return a pointer to the zone write plug with the plug spinlock held.
+ */
+static struct blk_zone_wplug *disk_get_and_lock_zone_wplug(struct gendisk *disk,
+ sector_t sector, gfp_t gfp_mask,
+ unsigned long *flags)
+{
+ unsigned int zno = disk_zone_no(disk, sector);
+ struct blk_zone_wplug *zwplug;
+
+again:
+ zwplug = disk_get_zone_wplug(disk, sector);
+ if (zwplug) {
+ /*
+ * Check that a BIO completion or a zone reset or finish
+ * operation has not already removed the zone write plug from
+ * the hash table and dropped its reference count. In such case,
+ * we need to get a new plug so start over from the beginning.
+ */
+ spin_lock_irqsave(&zwplug->lock, *flags);
+ if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED) {
+ spin_unlock_irqrestore(&zwplug->lock, *flags);
+ disk_put_zone_wplug(zwplug);
+ goto again;
+ }
+ return zwplug;
+ }
+
+ /*
+ * Allocate and initialize a zone write plug with an extra reference
+ * so that it is not freed when the zone write plug becomes idle without
+ * the zone being full.
+ */
+ zwplug = mempool_alloc(disk->zone_wplugs_pool, gfp_mask);
+ if (!zwplug)
+ return NULL;
+
+ INIT_HLIST_NODE(&zwplug->node);
+ INIT_LIST_HEAD(&zwplug->link);
+ atomic_set(&zwplug->ref, 2);
+ spin_lock_init(&zwplug->lock);
+ zwplug->flags = 0;
+ zwplug->zone_no = zno;
+ zwplug->wp_offset = sector & (disk->queue->limits.chunk_sectors - 1);
+ bio_list_init(&zwplug->bio_list);
+ INIT_WORK(&zwplug->bio_work, blk_zone_wplug_bio_work);
+ zwplug->disk = disk;
+
+ spin_lock_irqsave(&zwplug->lock, *flags);
+
+ /*
+ * Insert the new zone write plug in the hash table. This can fail only
+ * if another context already inserted a plug. Retry from the beginning
+ * in such case.
+ */
+ if (!disk_insert_zone_wplug(disk, zwplug)) {
+ spin_unlock_irqrestore(&zwplug->lock, *flags);
+ mempool_free(zwplug, disk->zone_wplugs_pool);
+ goto again;
+ }
+
+ return zwplug;
+}
+
+static inline void blk_zone_wplug_bio_io_error(struct blk_zone_wplug *zwplug,
+ struct bio *bio)
+{
+ struct request_queue *q = zwplug->disk->queue;
+
+ bio_clear_flag(bio, BIO_ZONE_WRITE_PLUGGING);
+ bio_io_error(bio);
+ disk_put_zone_wplug(zwplug);
+ blk_queue_exit(q);
+}
+
+/*
+ * Abort (fail) all plugged BIOs of a zone write plug.
+ */
+static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug)
+{
+ struct bio *bio;
+
+ while ((bio = bio_list_pop(&zwplug->bio_list)))
+ blk_zone_wplug_bio_io_error(zwplug, bio);
+}
+
+/*
+ * Abort (fail) all plugged BIOs of a zone write plug that are not aligned
+ * with the assumed write pointer location of the zone when the BIO will
+ * be unplugged.
+ */
+static void disk_zone_wplug_abort_unaligned(struct gendisk *disk,
+ struct blk_zone_wplug *zwplug)
+{
+ unsigned int zone_capacity = disk->zone_capacity;
+ unsigned int wp_offset = zwplug->wp_offset;
+ struct bio_list bl = BIO_EMPTY_LIST;
+ struct bio *bio;
+
+ while ((bio = bio_list_pop(&zwplug->bio_list))) {
+ if (wp_offset >= zone_capacity ||
+ (bio_op(bio) != REQ_OP_ZONE_APPEND &&
+ bio_offset_from_zone_start(bio) != wp_offset)) {
+ blk_zone_wplug_bio_io_error(zwplug, bio);
+ continue;
+ }
+
+ wp_offset += bio_sectors(bio);
+ bio_list_add(&bl, bio);
+ }
+
+ bio_list_merge(&zwplug->bio_list, &bl);
+}
+
+static inline void disk_zone_wplug_set_error(struct gendisk *disk,
+ struct blk_zone_wplug *zwplug)
+{
+ unsigned long flags;
+
+ if (zwplug->flags & BLK_ZONE_WPLUG_ERROR)
+ return;
+
+ /*
+ * At this point, we already have a reference on the zone write plug.
+ * However, since we are going to add the plug to the disk zone write
+ * plugs work list, increase its reference count. This reference will
+ * be dropped in disk_zone_wplugs_work() once the error state is
+ * handled, or in disk_zone_wplug_clear_error() if the zone is reset or
+ * finished.
+ */
+ zwplug->flags |= BLK_ZONE_WPLUG_ERROR;
+ atomic_inc(&zwplug->ref);
+
+ spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
+ list_add_tail(&zwplug->link, &disk->zone_wplugs_err_list);
+ spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
+}
+
+static inline void disk_zone_wplug_clear_error(struct gendisk *disk,
+ struct blk_zone_wplug *zwplug)
+{
+ unsigned long flags;
+
+ if (!(zwplug->flags & BLK_ZONE_WPLUG_ERROR))
+ return;
+
+ /*
+ * We are racing with the error handling work which drops the reference
+ * on the zone write plug after handling the error state. So remove the
+ * plug from the error list and drop its reference count only if the
+ * error handling has not yet started, that is, if the zone write plug
+ * is still listed.
+ */
+ spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
+ if (!list_empty(&zwplug->link)) {
+ list_del_init(&zwplug->link);
+ zwplug->flags &= ~BLK_ZONE_WPLUG_ERROR;
+ disk_put_zone_wplug(zwplug);
+ }
+ spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
+}
+
+/*
+ * Set a zone write plug write pointer offset to either 0 (zone reset case)
+ * or to the zone size (zone finish case). This aborts all plugged BIOs, which
+ * is fine to do as doing a zone reset or zone finish while writes are in-flight
+ * is a mistake from the user which will most likely cause all plugged BIOs to
+ * fail anyway.
+ */
+static void disk_zone_wplug_set_wp_offset(struct gendisk *disk,
+ struct blk_zone_wplug *zwplug,
+ unsigned int wp_offset)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&zwplug->lock, flags);
+
+ /*
+ * Make sure that a BIO completion or another zone reset or finish
+ * operation has not already removed the plug from the hash table.
+ */
+ if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED) {
+ spin_unlock_irqrestore(&zwplug->lock, flags);
+ return;
+ }
+
+ /* Update the zone write pointer and abort all plugged BIOs. */
+ zwplug->wp_offset = wp_offset;
+ disk_zone_wplug_abort(zwplug);
+
+ /*
+ * Updating the write pointer offset puts back the zone
+ * in a good state. So clear the error flag and decrement the
+ * error count if we were in error state.
+ */
+ disk_zone_wplug_clear_error(disk, zwplug);
+
+ /*
+ * The zone write plug now has no BIO plugged: remove it from the
+ * hash table so that it cannot be seen. The plug will be freed
+ * when the last reference is dropped.
+ */
+ if (disk_should_remove_zone_wplug(disk, zwplug))
+ disk_remove_zone_wplug(disk, zwplug);
+
+ spin_unlock_irqrestore(&zwplug->lock, flags);
+}
+
+static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio,
+ unsigned int wp_offset)
+{
+ struct gendisk *disk = bio->bi_bdev->bd_disk;
+ sector_t sector = bio->bi_iter.bi_sector;
+ struct blk_zone_wplug *zwplug;
+
+ /* Conventional zones cannot be reset nor finished. */
+ if (disk_zone_is_conv(disk, sector)) {
+ bio_io_error(bio);
+ return true;
+ }
+
+ /*
+ * If we have a zone write plug, set its write pointer offset to 0
+ * (reset case) or to the zone size (finish case). This will abort all
+ * BIOs plugged for the target zone. It is fine as resetting or
+ * finishing zones while writes are still in-flight will result in the
+ * writes failing anyway.
+ */
+ zwplug = disk_get_zone_wplug(disk, sector);
+ if (zwplug) {
+ disk_zone_wplug_set_wp_offset(disk, zwplug, wp_offset);
+ disk_put_zone_wplug(zwplug);
+ }
+
+ return false;
+}
+
+static bool blk_zone_wplug_handle_reset_all(struct bio *bio)
+{
+ struct gendisk *disk = bio->bi_bdev->bd_disk;
+ struct blk_zone_wplug *zwplug;
+ sector_t sector;
+
+ /*
+ * Set the write pointer offset of all zone write plugs to 0. This will
+ * abort all plugged BIOs. It is fine as resetting zones while writes
+ * are still in-flight will result in the writes failing anyway.
+ */
+ for (sector = 0; sector < get_capacity(disk);
+ sector += disk->queue->limits.chunk_sectors) {
+ zwplug = disk_get_zone_wplug(disk, sector);
+ if (zwplug) {
+ disk_zone_wplug_set_wp_offset(disk, zwplug, 0);
+ disk_put_zone_wplug(zwplug);
+ }
+ }
+
+ return false;
+}
+
+static inline void blk_zone_wplug_add_bio(struct blk_zone_wplug *zwplug,
+ struct bio *bio, unsigned int nr_segs)
+{
+ /*
+ * Grab an extra reference on the BIO request queue usage counter.
+ * This reference will be reused to submit a request for the BIO for
+ * blk-mq devices and dropped when the BIO is failed and after
+ * it is issued in the case of BIO-based devices.
+ */
+ percpu_ref_get(&bio->bi_bdev->bd_disk->queue->q_usage_counter);
+
+ /*
+ * The BIO is being plugged and thus will have to wait for the on-going
+ * write and for all other writes already plugged. So polling makes
+ * no sense.
+ */
+ bio_clear_polled(bio);
+
+ /*
+ * Reuse the poll cookie field to store the number of segments when
+ * split to the hardware limits.
+ */
+ bio->__bi_nr_segments = nr_segs;
+
+ /*
+ * We always receive BIOs after they are split and ready to be issued.
+ * The block layer passes the parts of a split BIO in order, and the
+ * user must also issue write sequentially. So simply add the new BIO
+ * at the tail of the list to preserve the sequential write order.
+ */
+ bio_list_add(&zwplug->bio_list, bio);
+}
+
+/*
+ * Called from bio_attempt_back_merge() when a BIO was merged with a request.
+ */
+void blk_zone_write_plug_bio_merged(struct bio *bio)
+{
+ struct blk_zone_wplug *zwplug;
+ unsigned long flags;
+
+ /*
+ * If the BIO was already plugged, then we were called through
+ * blk_zone_write_plug_init_request() -> blk_attempt_bio_merge().
+ * For this case, we already hold a reference on the zone write plug for
+ * the BIO and blk_zone_write_plug_init_request() will handle the
+ * zone write pointer offset update.
+ */
+ if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING))
+ return;
+
+ bio_set_flag(bio, BIO_ZONE_WRITE_PLUGGING);
+
+ /*
+ * Get a reference on the zone write plug of the target zone and advance
+ * the zone write pointer offset. Given that this is a merge, we already
+ * have at least one request and one BIO referencing the zone write
+ * plug. So this should not fail.
+ */
+ zwplug = disk_get_zone_wplug(bio->bi_bdev->bd_disk,
+ bio->bi_iter.bi_sector);
+ if (WARN_ON_ONCE(!zwplug))
+ return;
+
+ spin_lock_irqsave(&zwplug->lock, flags);
+ zwplug->wp_offset += bio_sectors(bio);
+ spin_unlock_irqrestore(&zwplug->lock, flags);
+}
+
+/*
+ * Attempt to merge plugged BIOs with a newly prepared request for a BIO that
+ * already went through zone write plugging (either a new BIO or one that was
+ * unplugged).
+ */
+void blk_zone_write_plug_init_request(struct request *req)
+{
+ sector_t req_back_sector = blk_rq_pos(req) + blk_rq_sectors(req);
+ struct request_queue *q = req->q;
+ struct gendisk *disk = q->disk;
+ unsigned int zone_capacity = disk->zone_capacity;
+ struct blk_zone_wplug *zwplug =
+ disk_get_zone_wplug(disk, blk_rq_pos(req));
+ unsigned long flags;
+ struct bio *bio;
+
+ if (WARN_ON_ONCE(!zwplug))
+ return;
+
+ /*
+ * Indicate that completion of this request needs to be handled with
+ * blk_zone_write_plug_finish_request(), which will drop the reference
+ * on the zone write plug we took above on entry to this function.
+ */
+ req->rq_flags |= RQF_ZONE_WRITE_PLUGGING;
+
+ if (blk_queue_nomerges(q))
+ return;
+
+ /*
+ * Walk through the list of plugged BIOs to check if they can be merged
+ * into the back of the request.
+ */
+ spin_lock_irqsave(&zwplug->lock, flags);
+ while (zwplug->wp_offset < zone_capacity) {
+ bio = bio_list_peek(&zwplug->bio_list);
+ if (!bio)
+ break;
+
+ if (bio->bi_iter.bi_sector != req_back_sector ||
+ !blk_rq_merge_ok(req, bio))
+ break;
+
+ WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE_ZEROES &&
+ !bio->__bi_nr_segments);
+
+ bio_list_pop(&zwplug->bio_list);
+ if (bio_attempt_back_merge(req, bio, bio->__bi_nr_segments) !=
+ BIO_MERGE_OK) {
+ bio_list_add_head(&zwplug->bio_list, bio);
+ break;
+ }
+
+ /*
+ * Drop the extra reference on the queue usage we got when
+ * plugging the BIO and advance the write pointer offset.
+ */
+ blk_queue_exit(q);
+ zwplug->wp_offset += bio_sectors(bio);
+
+ req_back_sector += bio_sectors(bio);
+ }
+ spin_unlock_irqrestore(&zwplug->lock, flags);
+}
+
+/*
+ * Check and prepare a BIO for submission by incrementing the write pointer
+ * offset of its zone write plug and changing zone append operations into
+ * regular write when zone append emulation is needed.
+ */
+static bool blk_zone_wplug_prepare_bio(struct blk_zone_wplug *zwplug,
+ struct bio *bio)
+{
+ struct gendisk *disk = bio->bi_bdev->bd_disk;
+
+ /*
+ * Check that the user is not attempting to write to a full zone.
+ * We know such BIO will fail, and that would potentially overflow our
+ * write pointer offset beyond the end of the zone.
+ */
+ if (zwplug->wp_offset >= disk->zone_capacity)
+ goto err;
+
+ if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
+ /*
+ * Use a regular write starting at the current write pointer.
+ * Similarly to native zone append operations, do not allow
+ * merging.
+ */
+ bio->bi_opf &= ~REQ_OP_MASK;
+ bio->bi_opf |= REQ_OP_WRITE | REQ_NOMERGE;
+ bio->bi_iter.bi_sector += zwplug->wp_offset;
+
+ /*
+ * Remember that this BIO is in fact a zone append operation
+ * so that we can restore its operation code on completion.
+ */
+ bio_set_flag(bio, BIO_EMULATES_ZONE_APPEND);
+ } else {
+ /*
+ * Check for non-sequential writes early because we avoid a
+ * whole lot of error handling trouble if we don't send it off
+ * to the driver.
+ */
+ if (bio_offset_from_zone_start(bio) != zwplug->wp_offset)
+ goto err;
+ }
+
+ /* Advance the zone write pointer offset. */
+ zwplug->wp_offset += bio_sectors(bio);
+
+ return true;
+
+err:
+ /* We detected an invalid write BIO: schedule error recovery. */
+ disk_zone_wplug_set_error(disk, zwplug);
+ kblockd_schedule_work(&disk->zone_wplugs_work);
+ return false;
+}
+
+static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs)
+{
+ struct gendisk *disk = bio->bi_bdev->bd_disk;
+ sector_t sector = bio->bi_iter.bi_sector;
+ struct blk_zone_wplug *zwplug;
+ gfp_t gfp_mask = GFP_NOIO;
+ unsigned long flags;
+
+ /*
+ * BIOs must be fully contained within a zone so that we use the correct
+ * zone write plug for the entire BIO. For blk-mq devices, the block
+ * layer should already have done any splitting required to ensure this
+ * and this BIO should thus not be straddling zone boundaries. For
+ * BIO-based devices, it is the responsibility of the driver to split
+ * the bio before submitting it.
+ */
+ if (WARN_ON_ONCE(bio_straddles_zones(bio))) {
+ bio_io_error(bio);
+ return true;
+ }
+
+ /* Conventional zones do not need write plugging. */
+ if (disk_zone_is_conv(disk, sector)) {
+ /* Zone append to conventional zones is not allowed. */
+ if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
+ bio_io_error(bio);
+ return true;
+ }
+ return false;
+ }
+
+ if (bio->bi_opf & REQ_NOWAIT)
+ gfp_mask = GFP_NOWAIT;
+
+ zwplug = disk_get_and_lock_zone_wplug(disk, sector, gfp_mask, &flags);
+ if (!zwplug) {
+ bio_io_error(bio);
+ return true;
+ }
+
+ /* Indicate that this BIO is being handled using zone write plugging. */
+ bio_set_flag(bio, BIO_ZONE_WRITE_PLUGGING);
+
+ /*
+ * If the zone is already plugged or has a pending error, add the BIO
+ * to the plug BIO list. Otherwise, plug and let the BIO execute.
+ */
+ if (zwplug->flags & BLK_ZONE_WPLUG_BUSY)
+ goto plug;
+
+ /*
+ * If an error is detected when preparing the BIO, add it to the BIO
+ * list so that error recovery can deal with it.
+ */
+ if (!blk_zone_wplug_prepare_bio(zwplug, bio))
+ goto plug;
+
+ zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED;
+
+ spin_unlock_irqrestore(&zwplug->lock, flags);
+
+ return false;
+
+plug:
+ zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED;
+ blk_zone_wplug_add_bio(zwplug, bio, nr_segs);
+
+ spin_unlock_irqrestore(&zwplug->lock, flags);
+
+ return true;
+}
+
+/**
+ * blk_zone_plug_bio - Handle a zone write BIO with zone write plugging
+ * @bio: The BIO being submitted
+ * @nr_segs: The number of physical segments of @bio
+ *
+ * Handle write, write zeroes and zone append operations requiring emulation
+ * using zone write plugging.
+ *
+ * Return true whenever @bio execution needs to be delayed through the zone
+ * write plug. Otherwise, return false to let the submission path process
+ * @bio normally.
+ */
+bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
+{
+ struct block_device *bdev = bio->bi_bdev;
+
+ if (!bdev->bd_disk->zone_wplugs_hash)
+ return false;
+
+ /*
+ * If the BIO already has the plugging flag set, then it was already
+ * handled through this path and this is a submission from the zone
+ * plug bio submit work.
+ */
+ if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING))
+ return false;
+
+ /*
+ * We do not need to do anything special for empty flush BIOs, e.g
+ * BIOs such as issued by blkdev_issue_flush(). The is because it is
+ * the responsibility of the user to first wait for the completion of
+ * write operations for flush to have any effect on the persistence of
+ * the written data.
+ */
+ if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
+ return false;
+
+ /*
+ * Regular writes and write zeroes need to be handled through the target
+ * zone write plug. This includes writes with REQ_FUA | REQ_PREFLUSH
+ * which may need to go through the flush machinery depending on the
+ * target device capabilities. Plugging such writes is fine as the flush
+ * machinery operates at the request level, below the plug, and
+ * completion of the flush sequence will go through the regular BIO
+ * completion, which will handle zone write plugging.
+ * Zone append operations for devices that requested emulation must
+ * also be plugged so that these BIOs can be changed into regular
+ * write BIOs.
+ * Zone reset, reset all and finish commands need special treatment
+ * to correctly track the write pointer offset of zones. These commands
+ * are not plugged as we do not need serialization with write
+ * operations. It is the responsibility of the user to not issue reset
+ * and finish commands when write operations are in flight.
+ */
+ switch (bio_op(bio)) {
+ case REQ_OP_ZONE_APPEND:
+ if (!bdev_emulates_zone_append(bdev))
+ return false;
+ fallthrough;
+ case REQ_OP_WRITE:
+ case REQ_OP_WRITE_ZEROES:
+ return blk_zone_wplug_handle_write(bio, nr_segs);
+ case REQ_OP_ZONE_RESET:
+ return blk_zone_wplug_handle_reset_or_finish(bio, 0);
+ case REQ_OP_ZONE_FINISH:
+ return blk_zone_wplug_handle_reset_or_finish(bio,
+ bdev_zone_sectors(bdev));
+ case REQ_OP_ZONE_RESET_ALL:
+ return blk_zone_wplug_handle_reset_all(bio);
+ default:
+ return false;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(blk_zone_plug_bio);
+
+static void disk_zone_wplug_schedule_bio_work(struct gendisk *disk,
+ struct blk_zone_wplug *zwplug)
+{
+ /*
+ * Take a reference on the zone write plug and schedule the submission
+ * of the next plugged BIO. blk_zone_wplug_bio_work() will release the
+ * reference we take here.
+ */
+ WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED));
+ atomic_inc(&zwplug->ref);
+ queue_work(disk->zone_wplugs_wq, &zwplug->bio_work);
+}
+
+static void disk_zone_wplug_unplug_bio(struct gendisk *disk,
+ struct blk_zone_wplug *zwplug)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&zwplug->lock, flags);
+
+ /*
+ * If we had an error, schedule error recovery. The recovery work
+ * will restart submission of plugged BIOs.
+ */
+ if (zwplug->flags & BLK_ZONE_WPLUG_ERROR) {
+ spin_unlock_irqrestore(&zwplug->lock, flags);
+ kblockd_schedule_work(&disk->zone_wplugs_work);
+ return;
+ }
+
+ /* Schedule submission of the next plugged BIO if we have one. */
+ if (!bio_list_empty(&zwplug->bio_list)) {
+ disk_zone_wplug_schedule_bio_work(disk, zwplug);
+ spin_unlock_irqrestore(&zwplug->lock, flags);
+ return;
+ }
+
+ zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
+
+ /*
+ * If the zone is full (it was fully written or finished, or empty
+ * (it was reset), remove its zone write plug from the hash table.
+ */
+ if (disk_should_remove_zone_wplug(disk, zwplug))
+ disk_remove_zone_wplug(disk, zwplug);
+
+ spin_unlock_irqrestore(&zwplug->lock, flags);
+}
+
+void blk_zone_write_plug_bio_endio(struct bio *bio)
+{
+ struct gendisk *disk = bio->bi_bdev->bd_disk;
+ struct blk_zone_wplug *zwplug =
+ disk_get_zone_wplug(disk, bio->bi_iter.bi_sector);
+ unsigned long flags;
+
+ if (WARN_ON_ONCE(!zwplug))
+ return;
+
+ /* Make sure we do not see this BIO again by clearing the plug flag. */
+ bio_clear_flag(bio, BIO_ZONE_WRITE_PLUGGING);
+
+ /*
+ * If this is a regular write emulating a zone append operation,
+ * restore the original operation code.
+ */
+ if (bio_flagged(bio, BIO_EMULATES_ZONE_APPEND)) {
+ bio->bi_opf &= ~REQ_OP_MASK;
+ bio->bi_opf |= REQ_OP_ZONE_APPEND;
+ }
+
+ /*
+ * If the BIO failed, mark the plug as having an error to trigger
+ * recovery.
+ */
+ if (bio->bi_status != BLK_STS_OK) {
+ spin_lock_irqsave(&zwplug->lock, flags);
+ disk_zone_wplug_set_error(disk, zwplug);
+ spin_unlock_irqrestore(&zwplug->lock, flags);
+ }
+
+ /* Drop the reference we took when the BIO was issued. */
+ disk_put_zone_wplug(zwplug);
+
+ /*
+ * For BIO-based devices, blk_zone_write_plug_finish_request()
+ * is not called. So we need to schedule execution of the next
+ * plugged BIO here.
+ */
+ if (bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO))
+ disk_zone_wplug_unplug_bio(disk, zwplug);
+
+ /* Drop the reference we took when entering this function. */
+ disk_put_zone_wplug(zwplug);
+}
+
+void blk_zone_write_plug_finish_request(struct request *req)
+{
+ struct gendisk *disk = req->q->disk;
+ struct blk_zone_wplug *zwplug;
+
+ zwplug = disk_get_zone_wplug(disk, req->__sector);
+ if (WARN_ON_ONCE(!zwplug))
+ return;
+
+ req->rq_flags &= ~RQF_ZONE_WRITE_PLUGGING;
+
+ /*
+ * Drop the reference we took when the request was initialized in
+ * blk_zone_write_plug_init_request().
+ */
+ disk_put_zone_wplug(zwplug);
+
+ disk_zone_wplug_unplug_bio(disk, zwplug);
+
+ /* Drop the reference we took when entering this function. */
+ disk_put_zone_wplug(zwplug);
+}
+
+static void blk_zone_wplug_bio_work(struct work_struct *work)
+{
+ struct blk_zone_wplug *zwplug =
+ container_of(work, struct blk_zone_wplug, bio_work);
+ struct block_device *bdev;
+ unsigned long flags;
+ struct bio *bio;
+
+ /*
+ * Submit the next plugged BIO. If we do not have any, clear
+ * the plugged flag.
+ */
+ spin_lock_irqsave(&zwplug->lock, flags);
+
+ bio = bio_list_pop(&zwplug->bio_list);
+ if (!bio) {
+ zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
+ spin_unlock_irqrestore(&zwplug->lock, flags);
+ goto put_zwplug;
+ }
+
+ if (!blk_zone_wplug_prepare_bio(zwplug, bio)) {
+ /* Error recovery will decide what to do with the BIO. */
+ bio_list_add_head(&zwplug->bio_list, bio);
+ spin_unlock_irqrestore(&zwplug->lock, flags);
+ goto put_zwplug;
+ }
+
+ spin_unlock_irqrestore(&zwplug->lock, flags);
+
+ bdev = bio->bi_bdev;
+ submit_bio_noacct_nocheck(bio);
+
+ /*
+ * blk-mq devices will reuse the extra reference on the request queue
+ * usage counter we took when the BIO was plugged, but the submission
+ * path for BIO-based devices will not do that. So drop this extra
+ * reference here.
+ */
+ if (bdev_test_flag(bdev, BD_HAS_SUBMIT_BIO))
+ blk_queue_exit(bdev->bd_disk->queue);
+
+put_zwplug:
+ /* Drop the reference we took in disk_zone_wplug_schedule_bio_work(). */
+ disk_put_zone_wplug(zwplug);
+}
+
+static unsigned int blk_zone_wp_offset(struct blk_zone *zone)
+{
+ switch (zone->cond) {
+ case BLK_ZONE_COND_IMP_OPEN:
+ case BLK_ZONE_COND_EXP_OPEN:
+ case BLK_ZONE_COND_CLOSED:
+ return zone->wp - zone->start;
+ case BLK_ZONE_COND_FULL:
+ return zone->len;
+ case BLK_ZONE_COND_EMPTY:
+ return 0;
+ case BLK_ZONE_COND_NOT_WP:
+ case BLK_ZONE_COND_OFFLINE:
+ case BLK_ZONE_COND_READONLY:
+ default:
+ /*
+ * Conventional, offline and read-only zones do not have a valid
+ * write pointer.
+ */
+ return UINT_MAX;
+ }
+}
+
+static int blk_zone_wplug_report_zone_cb(struct blk_zone *zone,
+ unsigned int idx, void *data)
+{
+ struct blk_zone *zonep = data;
+
+ *zonep = *zone;
+ return 0;
+}
+
+static void disk_zone_wplug_handle_error(struct gendisk *disk,
+ struct blk_zone_wplug *zwplug)
+{
+ sector_t zone_start_sector =
+ bdev_zone_sectors(disk->part0) * zwplug->zone_no;
+ unsigned int noio_flag;
+ struct blk_zone zone;
+ unsigned long flags;
+ int ret;
+
+ /* Get the current zone information from the device. */
+ noio_flag = memalloc_noio_save();
+ ret = disk->fops->report_zones(disk, zone_start_sector, 1,
+ blk_zone_wplug_report_zone_cb, &zone);
+ memalloc_noio_restore(noio_flag);
+
+ spin_lock_irqsave(&zwplug->lock, flags);
+
+ /*
+ * A zone reset or finish may have cleared the error already. In such
+ * case, do nothing as the report zones may have seen the "old" write
+ * pointer value before the reset/finish operation completed.
+ */
+ if (!(zwplug->flags & BLK_ZONE_WPLUG_ERROR))
+ goto unlock;
+
+ zwplug->flags &= ~BLK_ZONE_WPLUG_ERROR;
+
+ if (ret != 1) {
+ /*
+ * We failed to get the zone information, meaning that something
+ * is likely really wrong with the device. Abort all remaining
+ * plugged BIOs as otherwise we could endup waiting forever on
+ * plugged BIOs to complete if there is a queue freeze on-going.
+ */
+ disk_zone_wplug_abort(zwplug);
+ goto unplug;
+ }
+
+ /* Update the zone write pointer offset. */
+ zwplug->wp_offset = blk_zone_wp_offset(&zone);
+ disk_zone_wplug_abort_unaligned(disk, zwplug);
+
+ /* Restart BIO submission if we still have any BIO left. */
+ if (!bio_list_empty(&zwplug->bio_list)) {
+ disk_zone_wplug_schedule_bio_work(disk, zwplug);
+ goto unlock;
+ }
+
+unplug:
+ zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
+ if (disk_should_remove_zone_wplug(disk, zwplug))
+ disk_remove_zone_wplug(disk, zwplug);
+
+unlock:
+ spin_unlock_irqrestore(&zwplug->lock, flags);
+}
+
+static void disk_zone_wplugs_work(struct work_struct *work)
+{
+ struct gendisk *disk =
+ container_of(work, struct gendisk, zone_wplugs_work);
+ struct blk_zone_wplug *zwplug;
+ unsigned long flags;
+
+ spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
+
+ while (!list_empty(&disk->zone_wplugs_err_list)) {
+ zwplug = list_first_entry(&disk->zone_wplugs_err_list,
+ struct blk_zone_wplug, link);
+ list_del_init(&zwplug->link);
+ spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
+
+ disk_zone_wplug_handle_error(disk, zwplug);
+ disk_put_zone_wplug(zwplug);
+
+ spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
+}
+
+static inline unsigned int disk_zone_wplugs_hash_size(struct gendisk *disk)
+{
+ return 1U << disk->zone_wplugs_hash_bits;
+}
+
+void disk_init_zone_resources(struct gendisk *disk)
+{
+ spin_lock_init(&disk->zone_wplugs_lock);
+ INIT_LIST_HEAD(&disk->zone_wplugs_err_list);
+ INIT_WORK(&disk->zone_wplugs_work, disk_zone_wplugs_work);
+}
+
+/*
+ * For the size of a disk zone write plug hash table, use the size of the
+ * zone write plug mempool, which is the maximum of the disk open zones and
+ * active zones limits. But do not exceed 4KB (512 hlist head entries), that is,
+ * 9 bits. For a disk that has no limits, mempool size defaults to 128.
+ */
+#define BLK_ZONE_WPLUG_MAX_HASH_BITS 9
+#define BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE 128
+
+static int disk_alloc_zone_resources(struct gendisk *disk,
+ unsigned int pool_size)
+{
+ unsigned int i;
+
+ disk->zone_wplugs_hash_bits =
+ min(ilog2(pool_size) + 1, BLK_ZONE_WPLUG_MAX_HASH_BITS);
+
+ disk->zone_wplugs_hash =
+ kcalloc(disk_zone_wplugs_hash_size(disk),
+ sizeof(struct hlist_head), GFP_KERNEL);
+ if (!disk->zone_wplugs_hash)
+ return -ENOMEM;
+
+ for (i = 0; i < disk_zone_wplugs_hash_size(disk); i++)
+ INIT_HLIST_HEAD(&disk->zone_wplugs_hash[i]);
+
+ disk->zone_wplugs_pool = mempool_create_kmalloc_pool(pool_size,
+ sizeof(struct blk_zone_wplug));
+ if (!disk->zone_wplugs_pool)
+ goto free_hash;
+
+ disk->zone_wplugs_wq =
+ alloc_workqueue("%s_zwplugs", WQ_MEM_RECLAIM | WQ_HIGHPRI,
+ pool_size, disk->disk_name);
+ if (!disk->zone_wplugs_wq)
+ goto destroy_pool;
+
+ return 0;
+
+destroy_pool:
+ mempool_destroy(disk->zone_wplugs_pool);
+ disk->zone_wplugs_pool = NULL;
+free_hash:
+ kfree(disk->zone_wplugs_hash);
+ disk->zone_wplugs_hash = NULL;
+ disk->zone_wplugs_hash_bits = 0;
+ return -ENOMEM;
+}
+
+static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk)
+{
+ struct blk_zone_wplug *zwplug;
+ unsigned int i;
+
+ if (!disk->zone_wplugs_hash)
+ return;
+
+ /* Free all the zone write plugs we have. */
+ for (i = 0; i < disk_zone_wplugs_hash_size(disk); i++) {
+ while (!hlist_empty(&disk->zone_wplugs_hash[i])) {
+ zwplug = hlist_entry(disk->zone_wplugs_hash[i].first,
+ struct blk_zone_wplug, node);
+ atomic_inc(&zwplug->ref);
+ disk_remove_zone_wplug(disk, zwplug);
+ disk_put_zone_wplug(zwplug);
+ }
+ }
+
+ kfree(disk->zone_wplugs_hash);
+ disk->zone_wplugs_hash = NULL;
+ disk->zone_wplugs_hash_bits = 0;
+}
+
+void disk_free_zone_resources(struct gendisk *disk)
+{
+ cancel_work_sync(&disk->zone_wplugs_work);
+
+ if (disk->zone_wplugs_wq) {
+ destroy_workqueue(disk->zone_wplugs_wq);
+ disk->zone_wplugs_wq = NULL;
+ }
+
+ disk_destroy_zone_wplugs_hash_table(disk);
+
+ /*
+ * Wait for the zone write plugs to be RCU-freed before
+ * destorying the mempool.
+ */
+ rcu_barrier();
+
+ mempool_destroy(disk->zone_wplugs_pool);
+ disk->zone_wplugs_pool = NULL;
+
kfree(disk->conv_zones_bitmap);
disk->conv_zones_bitmap = NULL;
- kfree(disk->seq_zones_wlock);
- disk->seq_zones_wlock = NULL;
+ disk->zone_capacity = 0;
+ disk->nr_zones = 0;
+}
+
+static inline bool disk_need_zone_resources(struct gendisk *disk)
+{
+ /*
+ * All mq zoned devices need zone resources so that the block layer
+ * can automatically handle write BIO plugging. BIO-based device drivers
+ * (e.g. DM devices) are normally responsible for handling zone write
+ * ordering and do not need zone resources, unless the driver requires
+ * zone append emulation.
+ */
+ return queue_is_mq(disk->queue) ||
+ queue_emulates_zone_append(disk->queue);
+}
+
+static int disk_revalidate_zone_resources(struct gendisk *disk,
+ unsigned int nr_zones)
+{
+ struct queue_limits *lim = &disk->queue->limits;
+ unsigned int pool_size;
+
+ if (!disk_need_zone_resources(disk))
+ return 0;
+
+ /*
+ * If the device has no limit on the maximum number of open and active
+ * zones, use BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE.
+ */
+ pool_size = max(lim->max_open_zones, lim->max_active_zones);
+ if (!pool_size)
+ pool_size = min(BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE, nr_zones);
+
+ if (!disk->zone_wplugs_hash)
+ return disk_alloc_zone_resources(disk, pool_size);
+
+ return 0;
}
struct blk_revalidate_zone_args {
struct gendisk *disk;
unsigned long *conv_zones_bitmap;
- unsigned long *seq_zones_wlock;
unsigned int nr_zones;
+ unsigned int zone_capacity;
sector_t sector;
};
/*
+ * Update the disk zone resources information and device queue limits.
+ * The disk queue is frozen when this is executed.
+ */
+static int disk_update_zone_resources(struct gendisk *disk,
+ struct blk_revalidate_zone_args *args)
+{
+ struct request_queue *q = disk->queue;
+ unsigned int nr_seq_zones, nr_conv_zones = 0;
+ unsigned int pool_size;
+ struct queue_limits lim;
+
+ disk->nr_zones = args->nr_zones;
+ disk->zone_capacity = args->zone_capacity;
+ swap(disk->conv_zones_bitmap, args->conv_zones_bitmap);
+ if (disk->conv_zones_bitmap)
+ nr_conv_zones = bitmap_weight(disk->conv_zones_bitmap,
+ disk->nr_zones);
+ if (nr_conv_zones >= disk->nr_zones) {
+ pr_warn("%s: Invalid number of conventional zones %u / %u\n",
+ disk->disk_name, nr_conv_zones, disk->nr_zones);
+ return -ENODEV;
+ }
+
+ if (!disk->zone_wplugs_pool)
+ return 0;
+
+ /*
+ * If the device has no limit on the maximum number of open and active
+ * zones, set its max open zone limit to the mempool size to indicate
+ * to the user that there is a potential performance impact due to
+ * dynamic zone write plug allocation when simultaneously writing to
+ * more zones than the size of the mempool.
+ */
+ lim = queue_limits_start_update(q);
+
+ nr_seq_zones = disk->nr_zones - nr_conv_zones;
+ pool_size = max(lim.max_open_zones, lim.max_active_zones);
+ if (!pool_size)
+ pool_size = min(BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE, nr_seq_zones);
+
+ mempool_resize(disk->zone_wplugs_pool, pool_size);
+
+ if (!lim.max_open_zones && !lim.max_active_zones) {
+ if (pool_size < nr_seq_zones)
+ lim.max_open_zones = pool_size;
+ else
+ lim.max_open_zones = 0;
+ }
+
+ return queue_limits_commit_update(q, &lim);
+}
+
+static int blk_revalidate_conv_zone(struct blk_zone *zone, unsigned int idx,
+ struct blk_revalidate_zone_args *args)
+{
+ struct gendisk *disk = args->disk;
+ struct request_queue *q = disk->queue;
+
+ if (zone->capacity != zone->len) {
+ pr_warn("%s: Invalid conventional zone capacity\n",
+ disk->disk_name);
+ return -ENODEV;
+ }
+
+ if (!disk_need_zone_resources(disk))
+ return 0;
+
+ if (!args->conv_zones_bitmap) {
+ args->conv_zones_bitmap =
+ blk_alloc_zone_bitmap(q->node, args->nr_zones);
+ if (!args->conv_zones_bitmap)
+ return -ENOMEM;
+ }
+
+ set_bit(idx, args->conv_zones_bitmap);
+
+ return 0;
+}
+
+static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx,
+ struct blk_revalidate_zone_args *args)
+{
+ struct gendisk *disk = args->disk;
+ struct blk_zone_wplug *zwplug;
+ unsigned int wp_offset;
+ unsigned long flags;
+
+ /*
+ * Remember the capacity of the first sequential zone and check
+ * if it is constant for all zones.
+ */
+ if (!args->zone_capacity)
+ args->zone_capacity = zone->capacity;
+ if (zone->capacity != args->zone_capacity) {
+ pr_warn("%s: Invalid variable zone capacity\n",
+ disk->disk_name);
+ return -ENODEV;
+ }
+
+ /*
+ * We need to track the write pointer of all zones that are not
+ * empty nor full. So make sure we have a zone write plug for
+ * such zone if the device has a zone write plug hash table.
+ */
+ if (!disk->zone_wplugs_hash)
+ return 0;
+
+ wp_offset = blk_zone_wp_offset(zone);
+ if (!wp_offset || wp_offset >= zone->capacity)
+ return 0;
+
+ zwplug = disk_get_and_lock_zone_wplug(disk, zone->wp, GFP_NOIO, &flags);
+ if (!zwplug)
+ return -ENOMEM;
+ spin_unlock_irqrestore(&zwplug->lock, flags);
+ disk_put_zone_wplug(zwplug);
+
+ return 0;
+}
+
+/*
* Helper function to check the validity of zones of a zoned block device.
*/
static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
@@ -449,9 +1732,9 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
{
struct blk_revalidate_zone_args *args = data;
struct gendisk *disk = args->disk;
- struct request_queue *q = disk->queue;
sector_t capacity = get_capacity(disk);
- sector_t zone_sectors = q->limits.chunk_sectors;
+ sector_t zone_sectors = disk->queue->limits.chunk_sectors;
+ int ret;
/* Check for bad zones and holes in the zone report */
if (zone->start != args->sector) {
@@ -482,66 +1765,57 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
return -ENODEV;
}
+ if (!zone->capacity || zone->capacity > zone->len) {
+ pr_warn("%s: Invalid zone capacity\n",
+ disk->disk_name);
+ return -ENODEV;
+ }
+
/* Check zone type */
switch (zone->type) {
case BLK_ZONE_TYPE_CONVENTIONAL:
- if (!args->conv_zones_bitmap) {
- args->conv_zones_bitmap =
- blk_alloc_zone_bitmap(q->node, args->nr_zones);
- if (!args->conv_zones_bitmap)
- return -ENOMEM;
- }
- set_bit(idx, args->conv_zones_bitmap);
+ ret = blk_revalidate_conv_zone(zone, idx, args);
break;
case BLK_ZONE_TYPE_SEQWRITE_REQ:
- if (!args->seq_zones_wlock) {
- args->seq_zones_wlock =
- blk_alloc_zone_bitmap(q->node, args->nr_zones);
- if (!args->seq_zones_wlock)
- return -ENOMEM;
- }
+ ret = blk_revalidate_seq_zone(zone, idx, args);
break;
case BLK_ZONE_TYPE_SEQWRITE_PREF:
default:
pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n",
disk->disk_name, (int)zone->type, zone->start);
- return -ENODEV;
+ ret = -ENODEV;
}
- args->sector += zone->len;
- return 0;
+ if (!ret)
+ args->sector += zone->len;
+
+ return ret;
}
/**
- * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps
+ * blk_revalidate_disk_zones - (re)allocate and initialize zone write plugs
* @disk: Target disk
- * @update_driver_data: Callback to update driver data on the frozen disk
*
- * Helper function for low-level device drivers to check and (re) allocate and
- * initialize a disk request queue zone bitmaps. This functions should normally
- * be called within the disk ->revalidate method for blk-mq based drivers.
+ * Helper function for low-level device drivers to check, (re) allocate and
+ * initialize resources used for managing zoned disks. This function should
+ * normally be called by blk-mq based drivers when a zoned gendisk is probed
+ * and when the zone configuration of the gendisk changes (e.g. after a format).
* Before calling this function, the device driver must already have set the
* device zone size (chunk_sector limit) and the max zone append limit.
- * For BIO based drivers, this function cannot be used. BIO based device drivers
- * only need to set disk->nr_zones so that the sysfs exposed value is correct.
- * If the @update_driver_data callback function is not NULL, the callback is
- * executed with the device request queue frozen after all zones have been
- * checked.
+ * BIO based drivers can also use this function as long as the device queue
+ * can be safely frozen.
*/
-int blk_revalidate_disk_zones(struct gendisk *disk,
- void (*update_driver_data)(struct gendisk *disk))
+int blk_revalidate_disk_zones(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
sector_t zone_sectors = q->limits.chunk_sectors;
sector_t capacity = get_capacity(disk);
struct blk_revalidate_zone_args args = { };
unsigned int noio_flag;
- int ret;
+ int ret = -ENOMEM;
if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
return -EIO;
- if (WARN_ON_ONCE(!queue_is_mq(q)))
- return -EIO;
if (!capacity)
return -ENODEV;
@@ -556,7 +1830,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
return -ENODEV;
}
- if (!q->limits.max_zone_append_sectors) {
+ if (!queue_max_zone_append_sectors(q)) {
pr_warn("%s: Invalid 0 maximum zone append limit\n",
disk->disk_name);
return -ENODEV;
@@ -569,6 +1843,11 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
args.disk = disk;
args.nr_zones = (capacity + zone_sectors - 1) >> ilog2(zone_sectors);
noio_flag = memalloc_noio_save();
+ ret = disk_revalidate_zone_resources(disk, args.nr_zones);
+ if (ret) {
+ memalloc_noio_restore(noio_flag);
+ return ret;
+ }
ret = disk->fops->report_zones(disk, 0, UINT_MAX,
blk_revalidate_zone_cb, &args);
if (!ret) {
@@ -588,26 +1867,59 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
}
/*
- * Install the new bitmaps and update nr_zones only once the queue is
- * stopped and all I/Os are completed (i.e. a scheduler is not
- * referencing the bitmaps).
+ * Set the new disk zone parameters only once the queue is frozen and
+ * all I/Os are completed.
*/
blk_mq_freeze_queue(q);
- if (ret > 0) {
- disk->nr_zones = args.nr_zones;
- swap(disk->seq_zones_wlock, args.seq_zones_wlock);
- swap(disk->conv_zones_bitmap, args.conv_zones_bitmap);
- if (update_driver_data)
- update_driver_data(disk);
- ret = 0;
- } else {
+ if (ret > 0)
+ ret = disk_update_zone_resources(disk, &args);
+ else
pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
- disk_free_zone_bitmaps(disk);
- }
+ if (ret)
+ disk_free_zone_resources(disk);
blk_mq_unfreeze_queue(q);
- kfree(args.seq_zones_wlock);
kfree(args.conv_zones_bitmap);
+
return ret;
}
EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
+
+#ifdef CONFIG_BLK_DEBUG_FS
+
+int queue_zone_wplugs_show(void *data, struct seq_file *m)
+{
+ struct request_queue *q = data;
+ struct gendisk *disk = q->disk;
+ struct blk_zone_wplug *zwplug;
+ unsigned int zwp_wp_offset, zwp_flags;
+ unsigned int zwp_zone_no, zwp_ref;
+ unsigned int zwp_bio_list_size, i;
+ unsigned long flags;
+
+ if (!disk->zone_wplugs_hash)
+ return 0;
+
+ rcu_read_lock();
+ for (i = 0; i < disk_zone_wplugs_hash_size(disk); i++) {
+ hlist_for_each_entry_rcu(zwplug,
+ &disk->zone_wplugs_hash[i], node) {
+ spin_lock_irqsave(&zwplug->lock, flags);
+ zwp_zone_no = zwplug->zone_no;
+ zwp_flags = zwplug->flags;
+ zwp_ref = atomic_read(&zwplug->ref);
+ zwp_wp_offset = zwplug->wp_offset;
+ zwp_bio_list_size = bio_list_size(&zwplug->bio_list);
+ spin_unlock_irqrestore(&zwplug->lock, flags);
+
+ seq_printf(m, "%u 0x%x %u %u %u\n",
+ zwp_zone_no, zwp_flags, zwp_ref,
+ zwp_wp_offset, zwp_bio_list_size);
+ }
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+#endif
diff --git a/block/blk.h b/block/blk.h
index 5cac4e29ae17..189bc25beb50 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -38,6 +38,7 @@ void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
void blk_queue_start_drain(struct request_queue *q);
int __bio_queue_enter(struct request_queue *q, struct bio *bio);
void submit_bio_noacct_nocheck(struct bio *bio);
+void bio_await_chain(struct bio *bio);
static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
{
@@ -269,6 +270,14 @@ static inline void bio_integrity_free(struct bio *bio)
unsigned long blk_rq_timeout(unsigned long timeout);
void blk_add_timer(struct request *req);
+enum bio_merge_status {
+ BIO_MERGE_OK,
+ BIO_MERGE_NONE,
+ BIO_MERGE_FAILED,
+};
+
+enum bio_merge_status bio_attempt_back_merge(struct request *req,
+ struct bio *bio, unsigned int nr_segs);
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs);
bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
@@ -339,7 +348,6 @@ int ll_back_merge_fn(struct request *req, struct bio *bio,
bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
struct request *next);
unsigned int blk_recalc_rq_segments(struct request *rq);
-void blk_rq_set_mixed_merge(struct request *rq);
bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
@@ -358,6 +366,7 @@ static inline bool blk_do_io_stat(struct request *rq)
}
void update_io_ticks(struct block_device *part, unsigned long now, bool end);
+unsigned int part_in_flight(struct block_device *part);
static inline void req_set_nomerge(struct request_queue *q, struct request *req)
{
@@ -379,17 +388,6 @@ static inline void ioc_clear_queue(struct request_queue *q)
}
#endif /* CONFIG_BLK_ICQ */
-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
-extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
-extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
- const char *page, size_t count);
-extern void blk_throtl_bio_endio(struct bio *bio);
-extern void blk_throtl_stat_add(struct request *rq, u64 time);
-#else
-static inline void blk_throtl_bio_endio(struct bio *bio) { }
-static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
-#endif
-
struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q);
static inline bool blk_queue_may_bounce(struct request_queue *q)
@@ -408,13 +406,85 @@ static inline struct bio *blk_queue_bounce(struct bio *bio,
}
#ifdef CONFIG_BLK_DEV_ZONED
-void disk_free_zone_bitmaps(struct gendisk *disk);
+void disk_init_zone_resources(struct gendisk *disk);
+void disk_free_zone_resources(struct gendisk *disk);
+static inline bool bio_zone_write_plugging(struct bio *bio)
+{
+ return bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING);
+}
+static inline bool bio_is_zone_append(struct bio *bio)
+{
+ return bio_op(bio) == REQ_OP_ZONE_APPEND ||
+ bio_flagged(bio, BIO_EMULATES_ZONE_APPEND);
+}
+void blk_zone_write_plug_bio_merged(struct bio *bio);
+void blk_zone_write_plug_init_request(struct request *rq);
+static inline void blk_zone_update_request_bio(struct request *rq,
+ struct bio *bio)
+{
+ /*
+ * For zone append requests, the request sector indicates the location
+ * at which the BIO data was written. Return this value to the BIO
+ * issuer through the BIO iter sector.
+ * For plugged zone writes, which include emulated zone append, we need
+ * the original BIO sector so that blk_zone_write_plug_bio_endio() can
+ * lookup the zone write plug.
+ */
+ if (req_op(rq) == REQ_OP_ZONE_APPEND || bio_zone_write_plugging(bio))
+ bio->bi_iter.bi_sector = rq->__sector;
+}
+void blk_zone_write_plug_bio_endio(struct bio *bio);
+static inline void blk_zone_bio_endio(struct bio *bio)
+{
+ /*
+ * For write BIOs to zoned devices, signal the completion of the BIO so
+ * that the next write BIO can be submitted by zone write plugging.
+ */
+ if (bio_zone_write_plugging(bio))
+ blk_zone_write_plug_bio_endio(bio);
+}
+
+void blk_zone_write_plug_finish_request(struct request *rq);
+static inline void blk_zone_finish_request(struct request *rq)
+{
+ if (rq->rq_flags & RQF_ZONE_WRITE_PLUGGING)
+ blk_zone_write_plug_finish_request(rq);
+}
int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd,
unsigned long arg);
int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg);
#else /* CONFIG_BLK_DEV_ZONED */
-static inline void disk_free_zone_bitmaps(struct gendisk *disk) {}
+static inline void disk_init_zone_resources(struct gendisk *disk)
+{
+}
+static inline void disk_free_zone_resources(struct gendisk *disk)
+{
+}
+static inline bool bio_zone_write_plugging(struct bio *bio)
+{
+ return false;
+}
+static inline bool bio_is_zone_append(struct bio *bio)
+{
+ return false;
+}
+static inline void blk_zone_write_plug_bio_merged(struct bio *bio)
+{
+}
+static inline void blk_zone_write_plug_init_request(struct request *rq)
+{
+}
+static inline void blk_zone_update_request_bio(struct request *rq,
+ struct bio *bio)
+{
+}
+static inline void blk_zone_bio_endio(struct bio *bio)
+{
+}
+static inline void blk_zone_finish_request(struct request *rq)
+{
+}
static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
unsigned int cmd, unsigned long arg)
{
@@ -429,6 +499,8 @@ static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
void bdev_add(struct block_device *bdev, dev_t dev);
+void bdev_unhash(struct block_device *bdev);
+void bdev_drop(struct block_device *bdev);
int blk_alloc_ext_minor(void);
void blk_free_ext_minor(unsigned int minor);
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index bcc7dee6abce..ee738d129a9f 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -354,12 +354,14 @@ static const struct blk_mq_ops bsg_mq_ops = {
* bsg_setup_queue - Create and add the bsg hooks so we can receive requests
* @dev: device to attach bsg device to
* @name: device to give bsg device
+ * @lim: queue limits for the bsg queue
* @job_fn: bsg job handler
* @timeout: timeout handler function pointer
* @dd_job_size: size of LLD data needed for each job
*/
struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
- bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size)
+ struct queue_limits *lim, bsg_job_fn *job_fn,
+ bsg_timeout_fn *timeout, int dd_job_size)
{
struct bsg_set *bset;
struct blk_mq_tag_set *set;
@@ -383,7 +385,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
if (blk_mq_alloc_tag_set(set))
goto out_tag_set;
- q = blk_mq_alloc_queue(set, NULL, NULL);
+ q = blk_mq_alloc_queue(set, lim, NULL);
if (IS_ERR(q)) {
ret = PTR_ERR(q);
goto out_queue;
diff --git a/block/early-lookup.c b/block/early-lookup.c
index 3effbd0d35e9..3fb57f7d2b12 100644
--- a/block/early-lookup.c
+++ b/block/early-lookup.c
@@ -78,7 +78,7 @@ static int __init devt_from_partuuid(const char *uuid_str, dev_t *devt)
* to the partition number found by UUID.
*/
*devt = part_devt(dev_to_disk(dev),
- dev_to_bdev(dev)->bd_partno + offset);
+ bdev_partno(dev_to_bdev(dev)) + offset);
} else {
*devt = dev->devt;
}
diff --git a/block/elevator.c b/block/elevator.c
index 5ff093cb3cf8..f64ebd726e58 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -83,13 +83,6 @@ bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
}
EXPORT_SYMBOL(elv_bio_merge_ok);
-static inline bool elv_support_features(struct request_queue *q,
- const struct elevator_type *e)
-{
- return (q->required_elevator_features & e->elevator_features) ==
- q->required_elevator_features;
-}
-
/**
* elevator_match - Check whether @e's name or alias matches @name
* @e: Scheduler to test
@@ -120,7 +113,7 @@ static struct elevator_type *elevator_find_get(struct request_queue *q,
spin_lock(&elv_list_lock);
e = __elevator_find(name);
- if (e && (!elv_support_features(q, e) || !elevator_tryget(e)))
+ if (e && (!elevator_tryget(e)))
e = NULL;
spin_unlock(&elv_list_lock);
return e;
@@ -580,34 +573,8 @@ static struct elevator_type *elevator_get_default(struct request_queue *q)
}
/*
- * Get the first elevator providing the features required by the request queue.
- * Default to "none" if no matching elevator is found.
- */
-static struct elevator_type *elevator_get_by_features(struct request_queue *q)
-{
- struct elevator_type *e, *found = NULL;
-
- spin_lock(&elv_list_lock);
-
- list_for_each_entry(e, &elv_list, list) {
- if (elv_support_features(q, e)) {
- found = e;
- break;
- }
- }
-
- if (found && !elevator_tryget(found))
- found = NULL;
-
- spin_unlock(&elv_list_lock);
- return found;
-}
-
-/*
- * For a device queue that has no required features, use the default elevator
- * settings. Otherwise, use the first elevator available matching the required
- * features. If no suitable elevator is find or if the chosen elevator
- * initialization fails, fall back to the "none" elevator (no elevator).
+ * Use the default elevator settings. If the chosen elevator initialization
+ * fails, fall back to the "none" elevator (no elevator).
*/
void elevator_init_mq(struct request_queue *q)
{
@@ -622,10 +589,7 @@ void elevator_init_mq(struct request_queue *q)
if (unlikely(q->elevator))
return;
- if (!q->required_elevator_features)
- e = elevator_get_default(q);
- else
- e = elevator_get_by_features(q);
+ e = elevator_get_default(q);
if (!e)
return;
@@ -781,7 +745,7 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name)
list_for_each_entry(e, &elv_list, list) {
if (e == cur)
len += sprintf(name+len, "[%s] ", e->elevator_name);
- else if (elv_support_features(q, e))
+ else
len += sprintf(name+len, "%s ", e->elevator_name);
}
spin_unlock(&elv_list_lock);
diff --git a/block/elevator.h b/block/elevator.h
index 7ca3d7b6ed82..e9a050a96e53 100644
--- a/block/elevator.h
+++ b/block/elevator.h
@@ -74,7 +74,6 @@ struct elevator_type
struct elv_fs_entry *elevator_attrs;
const char *elevator_name;
const char *elevator_alias;
- const unsigned int elevator_features;
struct module *elevator_owner;
#ifdef CONFIG_BLK_DEBUG_FS
const struct blk_mq_debugfs_attr *queue_debugfs_attrs;
diff --git a/block/fops.c b/block/fops.c
index 679d9b752fe8..376265935714 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -44,18 +44,15 @@ static bool blkdev_dio_unaligned(struct block_device *bdev, loff_t pos,
#define DIO_INLINE_BIO_VECS 4
static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
- struct iov_iter *iter, unsigned int nr_pages)
+ struct iov_iter *iter, struct block_device *bdev,
+ unsigned int nr_pages)
{
- struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs;
loff_t pos = iocb->ki_pos;
bool should_dirty = false;
struct bio bio;
ssize_t ret;
- if (blkdev_dio_unaligned(bdev, pos, iter))
- return -EINVAL;
-
if (nr_pages <= DIO_INLINE_BIO_VECS)
vecs = inline_vecs;
else {
@@ -161,9 +158,8 @@ static void blkdev_bio_end_io(struct bio *bio)
}
static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
- unsigned int nr_pages)
+ struct block_device *bdev, unsigned int nr_pages)
{
- struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
struct blk_plug plug;
struct blkdev_dio *dio;
struct bio *bio;
@@ -172,9 +168,6 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
loff_t pos = iocb->ki_pos;
int ret = 0;
- if (blkdev_dio_unaligned(bdev, pos, iter))
- return -EINVAL;
-
if (iocb->ki_flags & IOCB_ALLOC_CACHE)
opf |= REQ_ALLOC_CACHE;
bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
@@ -302,9 +295,9 @@ static void blkdev_bio_end_io_async(struct bio *bio)
static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
struct iov_iter *iter,
+ struct block_device *bdev,
unsigned int nr_pages)
{
- struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
bool is_read = iov_iter_rw(iter) == READ;
blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
struct blkdev_dio *dio;
@@ -312,9 +305,6 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
loff_t pos = iocb->ki_pos;
int ret = 0;
- if (blkdev_dio_unaligned(bdev, pos, iter))
- return -EINVAL;
-
if (iocb->ki_flags & IOCB_ALLOC_CACHE)
opf |= REQ_ALLOC_CACHE;
bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
@@ -368,18 +358,23 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
{
+ struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
unsigned int nr_pages;
if (!iov_iter_count(iter))
return 0;
+ if (blkdev_dio_unaligned(bdev, iocb->ki_pos, iter))
+ return -EINVAL;
+
nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
if (likely(nr_pages <= BIO_MAX_VECS)) {
if (is_sync_kiocb(iocb))
- return __blkdev_direct_IO_simple(iocb, iter, nr_pages);
- return __blkdev_direct_IO_async(iocb, iter, nr_pages);
+ return __blkdev_direct_IO_simple(iocb, iter, bdev,
+ nr_pages);
+ return __blkdev_direct_IO_async(iocb, iter, bdev, nr_pages);
}
- return __blkdev_direct_IO(iocb, iter, bio_max_segs(nr_pages));
+ return __blkdev_direct_IO(iocb, iter, bdev, bio_max_segs(nr_pages));
}
static int blkdev_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
@@ -390,7 +385,7 @@ static int blkdev_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
iomap->bdev = bdev;
iomap->offset = ALIGN_DOWN(offset, bdev_logical_block_size(bdev));
- if (iomap->offset >= isize)
+ if (offset >= isize)
return -EIO;
iomap->type = IOMAP_MAPPED;
iomap->addr = iomap->offset;
@@ -668,8 +663,8 @@ static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from)
static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
- struct block_device *bdev = I_BDEV(file->f_mapping->host);
- struct inode *bd_inode = bdev->bd_inode;
+ struct inode *bd_inode = bdev_file_inode(file);
+ struct block_device *bdev = I_BDEV(bd_inode);
loff_t size = bdev_nr_bytes(bdev);
size_t shorted = 0;
ssize_t ret;
@@ -863,6 +858,7 @@ const struct file_operations def_blk_fops = {
.splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
.fallocate = blkdev_fallocate,
+ .fop_flags = FOP_BUFFER_RASYNC,
};
static __init int blkdev_init(void)
diff --git a/block/genhd.c b/block/genhd.c
index bb29a68e1d67..8f1f3c6b4d67 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -118,7 +118,7 @@ static void part_stat_read_all(struct block_device *part,
}
}
-static unsigned int part_in_flight(struct block_device *part)
+unsigned int part_in_flight(struct block_device *part)
{
unsigned int inflight = 0;
int cpu;
@@ -345,9 +345,7 @@ int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode)
struct file *file;
int ret = 0;
- if (disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN))
- return -EINVAL;
- if (test_bit(GD_SUPPRESS_PART_SCAN, &disk->state))
+ if (!disk_has_partscan(disk))
return -EINVAL;
if (disk->open_partitions)
return -EBUSY;
@@ -413,7 +411,8 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
elevator_init_mq(disk->queue);
/* Mark bdev as having a submit_bio, if needed */
- disk->part0->bd_has_submit_bio = disk->fops->submit_bio != NULL;
+ if (disk->fops->submit_bio)
+ bdev_set_flag(disk->part0, BD_HAS_SUBMIT_BIO);
/*
* If the driver provides an explicit major number it also must provide
@@ -503,8 +502,7 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
goto out_unregister_bdi;
/* Make sure the first partition scan will be proceed */
- if (get_capacity(disk) && !(disk->flags & GENHD_FL_NO_PART) &&
- !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state))
+ if (get_capacity(disk) && disk_has_partscan(disk))
set_bit(GD_NEED_PART_SCAN, &disk->state);
bdev_add(disk->part0, ddev->devt);
@@ -656,7 +654,7 @@ void del_gendisk(struct gendisk *disk)
*/
mutex_lock(&disk->open_mutex);
xa_for_each(&disk->part_tbl, idx, part)
- remove_inode_hash(part->bd_inode);
+ bdev_unhash(part);
mutex_unlock(&disk->open_mutex);
/*
@@ -745,7 +743,7 @@ void invalidate_disk(struct gendisk *disk)
struct block_device *bdev = disk->part0;
invalidate_bdev(bdev);
- bdev->bd_inode->i_mapping->wb_err = 0;
+ bdev->bd_mapping->wb_err = 0;
set_capacity(disk, 0);
}
EXPORT_SYMBOL(invalidate_disk);
@@ -954,15 +952,10 @@ ssize_t part_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct block_device *bdev = dev_to_bdev(dev);
- struct request_queue *q = bdev_get_queue(bdev);
struct disk_stats stat;
unsigned int inflight;
- if (queue_is_mq(q))
- inflight = blk_mq_in_flight(q, bdev);
- else
- inflight = part_in_flight(bdev);
-
+ inflight = part_in_flight(bdev);
if (inflight) {
part_stat_lock();
update_io_ticks(bdev, jiffies, true);
@@ -1047,6 +1040,12 @@ static ssize_t diskseq_show(struct device *dev,
return sprintf(buf, "%llu\n", disk->diskseq);
}
+static ssize_t partscan_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", disk_has_partscan(dev_to_disk(dev)));
+}
+
static DEVICE_ATTR(range, 0444, disk_range_show, NULL);
static DEVICE_ATTR(ext_range, 0444, disk_ext_range_show, NULL);
static DEVICE_ATTR(removable, 0444, disk_removable_show, NULL);
@@ -1060,12 +1059,14 @@ static DEVICE_ATTR(stat, 0444, part_stat_show, NULL);
static DEVICE_ATTR(inflight, 0444, part_inflight_show, NULL);
static DEVICE_ATTR(badblocks, 0644, disk_badblocks_show, disk_badblocks_store);
static DEVICE_ATTR(diskseq, 0444, diskseq_show, NULL);
+static DEVICE_ATTR(partscan, 0444, partscan_show, NULL);
#ifdef CONFIG_FAIL_MAKE_REQUEST
ssize_t part_fail_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", dev_to_bdev(dev)->bd_make_it_fail);
+ return sprintf(buf, "%d\n",
+ bdev_test_flag(dev_to_bdev(dev), BD_MAKE_IT_FAIL));
}
ssize_t part_fail_store(struct device *dev,
@@ -1074,9 +1075,12 @@ ssize_t part_fail_store(struct device *dev,
{
int i;
- if (count > 0 && sscanf(buf, "%d", &i) > 0)
- dev_to_bdev(dev)->bd_make_it_fail = i;
-
+ if (count > 0 && sscanf(buf, "%d", &i) > 0) {
+ if (i)
+ bdev_set_flag(dev_to_bdev(dev), BD_MAKE_IT_FAIL);
+ else
+ bdev_clear_flag(dev_to_bdev(dev), BD_MAKE_IT_FAIL);
+ }
return count;
}
@@ -1106,6 +1110,7 @@ static struct attribute *disk_attrs[] = {
&dev_attr_events_async.attr,
&dev_attr_events_poll_msecs.attr,
&dev_attr_diskseq.attr,
+ &dev_attr_partscan.attr,
#ifdef CONFIG_FAIL_MAKE_REQUEST
&dev_attr_fail.attr,
#endif
@@ -1182,7 +1187,7 @@ static void disk_release(struct device *dev)
disk_release_events(disk);
kfree(disk->random);
- disk_free_zone_bitmaps(disk);
+ disk_free_zone_resources(disk);
xa_destroy(&disk->part_tbl);
disk->queue->disk = NULL;
@@ -1191,7 +1196,7 @@ static void disk_release(struct device *dev)
if (test_bit(GD_ADDED, &disk->state) && disk->fops->free_disk)
disk->fops->free_disk(disk);
- iput(disk->part0->bd_inode); /* frees the disk */
+ bdev_drop(disk->part0); /* frees the disk */
}
static int block_uevent(const struct device *dev, struct kobj_uevent_env *env)
@@ -1251,11 +1256,8 @@ static int diskstats_show(struct seq_file *seqf, void *v)
xa_for_each(&gp->part_tbl, idx, hd) {
if (bdev_is_partition(hd) && !bdev_nr_sectors(hd))
continue;
- if (queue_is_mq(gp->queue))
- inflight = blk_mq_in_flight(gp->queue, hd);
- else
- inflight = part_in_flight(hd);
+ inflight = part_in_flight(hd);
if (inflight) {
part_stat_lock();
update_io_ticks(hd, jiffies, true);
@@ -1364,6 +1366,7 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
if (blkcg_init_disk(disk))
goto out_erase_part0;
+ disk_init_zone_resources(disk);
rand_initialize_disk(disk);
disk_to_dev(disk)->class = &block_class;
disk_to_dev(disk)->type = &disk_type;
@@ -1381,7 +1384,7 @@ out_erase_part0:
out_destroy_part_tbl:
xa_destroy(&disk->part_tbl);
disk->part0->bd_disk = NULL;
- iput(disk->part0->bd_inode);
+ bdev_drop(disk->part0);
out_free_bdi:
bdi_put(disk->bdi);
out_free_bioset:
diff --git a/block/ioctl.c b/block/ioctl.c
index 0c76137adcaa..d570e1695896 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -33,7 +33,7 @@ static int blkpg_do_ioctl(struct block_device *bdev,
if (op == BLKPG_DEL_PARTITION)
return bdev_del_partition(disk, p.pno);
- if (p.start < 0 || p.length <= 0 || p.start + p.length < 0)
+ if (p.start < 0 || p.length <= 0 || LLONG_MAX - p.length < p.start)
return -EINVAL;
/* Check that the partition is aligned to the block size */
if (!IS_ALIGNED(p.start | p.length, bdev_logical_block_size(bdev)))
@@ -95,9 +95,11 @@ static int compat_blkpg_ioctl(struct block_device *bdev,
static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
unsigned long arg)
{
- uint64_t range[2];
- uint64_t start, len;
- struct inode *inode = bdev->bd_inode;
+ unsigned int bs_mask = bdev_logical_block_size(bdev) - 1;
+ uint64_t range[2], start, len, end;
+ struct bio *prev = NULL, *bio;
+ sector_t sector, nr_sects;
+ struct blk_plug plug;
int err;
if (!(mode & BLK_OPEN_WRITE))
@@ -105,6 +107,8 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
if (!bdev_max_discard_sectors(bdev))
return -EOPNOTSUPP;
+ if (bdev_read_only(bdev))
+ return -EPERM;
if (copy_from_user(range, (void __user *)arg, sizeof(range)))
return -EFAULT;
@@ -112,21 +116,47 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
start = range[0];
len = range[1];
- if (start & 511)
+ if (!len)
return -EINVAL;
- if (len & 511)
+ if ((start | len) & bs_mask)
return -EINVAL;
- if (start + len > bdev_nr_bytes(bdev))
+ if (check_add_overflow(start, len, &end) ||
+ end > bdev_nr_bytes(bdev))
return -EINVAL;
- filemap_invalidate_lock(inode->i_mapping);
+ filemap_invalidate_lock(bdev->bd_mapping);
err = truncate_bdev_range(bdev, mode, start, start + len - 1);
if (err)
goto fail;
- err = blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
+
+ sector = start >> SECTOR_SHIFT;
+ nr_sects = len >> SECTOR_SHIFT;
+
+ blk_start_plug(&plug);
+ while (1) {
+ if (fatal_signal_pending(current)) {
+ if (prev)
+ bio_await_chain(prev);
+ err = -EINTR;
+ goto out_unplug;
+ }
+ bio = blk_alloc_discard_bio(bdev, &sector, &nr_sects,
+ GFP_KERNEL);
+ if (!bio)
+ break;
+ prev = bio_chain_and_submit(prev, bio);
+ }
+ if (prev) {
+ err = submit_bio_wait(prev);
+ if (err == -EOPNOTSUPP)
+ err = 0;
+ bio_put(prev);
+ }
+out_unplug:
+ blk_finish_plug(&plug);
fail:
- filemap_invalidate_unlock(inode->i_mapping);
+ filemap_invalidate_unlock(bdev->bd_mapping);
return err;
}
@@ -151,12 +181,12 @@ static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode,
if (start + len > bdev_nr_bytes(bdev))
return -EINVAL;
- filemap_invalidate_lock(bdev->bd_inode->i_mapping);
+ filemap_invalidate_lock(bdev->bd_mapping);
err = truncate_bdev_range(bdev, mode, start, start + len - 1);
if (!err)
err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9,
GFP_KERNEL);
- filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
+ filemap_invalidate_unlock(bdev->bd_mapping);
return err;
}
@@ -166,7 +196,6 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
{
uint64_t range[2];
uint64_t start, end, len;
- struct inode *inode = bdev->bd_inode;
int err;
if (!(mode & BLK_OPEN_WRITE))
@@ -189,7 +218,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
return -EINVAL;
/* Invalidate the page cache, including dirty pages */
- filemap_invalidate_lock(inode->i_mapping);
+ filemap_invalidate_lock(bdev->bd_mapping);
err = truncate_bdev_range(bdev, mode, start, end);
if (err)
goto fail;
@@ -198,7 +227,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
BLKDEV_ZERO_NOUNMAP);
fail:
- filemap_invalidate_unlock(inode->i_mapping);
+ filemap_invalidate_unlock(bdev->bd_mapping);
return err;
}
@@ -402,7 +431,10 @@ static int blkdev_roset(struct block_device *bdev, unsigned cmd,
if (ret)
return ret;
}
- bdev->bd_read_only = n;
+ if (n)
+ bdev_set_flag(bdev, BD_READ_ONLY);
+ else
+ bdev_clear_flag(bdev, BD_READ_ONLY);
return 0;
}
@@ -472,11 +504,14 @@ static int compat_hdio_getgeo(struct block_device *bdev,
#endif
/* set the logical block size */
-static int blkdev_bszset(struct block_device *bdev, blk_mode_t mode,
+static int blkdev_bszset(struct file *file, blk_mode_t mode,
int __user *argp)
{
+ // this one might be file_inode(file)->i_rdev - a rare valid
+ // use of file_inode() for those.
+ dev_t dev = I_BDEV(file->f_mapping->host)->bd_dev;
+ struct file *excl_file;
int ret, n;
- struct file *file;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -486,13 +521,13 @@ static int blkdev_bszset(struct block_device *bdev, blk_mode_t mode,
return -EFAULT;
if (mode & BLK_OPEN_EXCL)
- return set_blocksize(bdev, n);
+ return set_blocksize(file, n);
- file = bdev_file_open_by_dev(bdev->bd_dev, mode, &bdev, NULL);
- if (IS_ERR(file))
+ excl_file = bdev_file_open_by_dev(dev, mode, &dev, NULL);
+ if (IS_ERR(excl_file))
return -EBUSY;
- ret = set_blocksize(bdev, n);
- fput(file);
+ ret = set_blocksize(excl_file, n);
+ fput(excl_file);
return ret;
}
@@ -562,7 +597,8 @@ static int blkdev_common_ioctl(struct block_device *bdev, blk_mode_t mode,
return -EACCES;
if (bdev_is_partition(bdev))
return -EINVAL;
- return disk_scan_partitions(bdev->bd_disk, mode);
+ return disk_scan_partitions(bdev->bd_disk,
+ mode | BLK_OPEN_STRICT_SCAN);
case BLKTRACESTART:
case BLKTRACESTOP:
case BLKTRACETEARDOWN:
@@ -620,7 +656,7 @@ long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
return put_int(argp, block_size(bdev));
case BLKBSZSET:
- return blkdev_bszset(bdev, mode, argp);
+ return blkdev_bszset(file, mode, argp);
case BLKGETSIZE64:
return put_u64(argp, bdev_nr_bytes(bdev));
@@ -680,7 +716,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
return put_int(argp, bdev_logical_block_size(bdev));
case BLKBSZSET_32:
- return blkdev_bszset(bdev, mode, argp);
+ return blkdev_bszset(file, mode, argp);
case BLKGETSIZE64_32:
return put_u64(argp, bdev_nr_bytes(bdev));
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 02a916ba62ee..94eede4fb9eb 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -102,7 +102,6 @@ struct deadline_data {
int prio_aging_expire;
spinlock_t lock;
- spinlock_t zone_lock;
};
/* Maps an I/O priority class to a deadline scheduler priority. */
@@ -129,36 +128,7 @@ static u8 dd_rq_ioclass(struct request *rq)
}
/*
- * get the request before `rq' in sector-sorted order
- */
-static inline struct request *
-deadline_earlier_request(struct request *rq)
-{
- struct rb_node *node = rb_prev(&rq->rb_node);
-
- if (node)
- return rb_entry_rq(node);
-
- return NULL;
-}
-
-/*
- * get the request after `rq' in sector-sorted order
- */
-static inline struct request *
-deadline_latter_request(struct request *rq)
-{
- struct rb_node *node = rb_next(&rq->rb_node);
-
- if (node)
- return rb_entry_rq(node);
-
- return NULL;
-}
-
-/*
- * Return the first request for which blk_rq_pos() >= @pos. For zoned devices,
- * return the first request after the start of the zone containing @pos.
+ * Return the first request for which blk_rq_pos() >= @pos.
*/
static inline struct request *deadline_from_pos(struct dd_per_prio *per_prio,
enum dd_data_dir data_dir, sector_t pos)
@@ -170,14 +140,6 @@ static inline struct request *deadline_from_pos(struct dd_per_prio *per_prio,
return NULL;
rq = rb_entry_rq(node);
- /*
- * A zoned write may have been requeued with a starting position that
- * is below that of the most recently dispatched request. Hence, for
- * zoned writes, start searching from the start of a zone.
- */
- if (blk_rq_is_seq_zoned_write(rq))
- pos = round_down(pos, rq->q->limits.chunk_sectors);
-
while (node) {
rq = rb_entry_rq(node);
if (blk_rq_pos(rq) >= pos) {
@@ -309,36 +271,6 @@ static inline bool deadline_check_fifo(struct dd_per_prio *per_prio,
}
/*
- * Check if rq has a sequential request preceding it.
- */
-static bool deadline_is_seq_write(struct deadline_data *dd, struct request *rq)
-{
- struct request *prev = deadline_earlier_request(rq);
-
- if (!prev)
- return false;
-
- return blk_rq_pos(prev) + blk_rq_sectors(prev) == blk_rq_pos(rq);
-}
-
-/*
- * Skip all write requests that are sequential from @rq, even if we cross
- * a zone boundary.
- */
-static struct request *deadline_skip_seq_writes(struct deadline_data *dd,
- struct request *rq)
-{
- sector_t pos = blk_rq_pos(rq);
-
- do {
- pos += blk_rq_sectors(rq);
- rq = deadline_latter_request(rq);
- } while (rq && blk_rq_pos(rq) == pos);
-
- return rq;
-}
-
-/*
* For the specified data direction, return the next request to
* dispatch using arrival ordered lists.
*/
@@ -346,40 +278,10 @@ static struct request *
deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
enum dd_data_dir data_dir)
{
- struct request *rq, *rb_rq, *next;
- unsigned long flags;
-
if (list_empty(&per_prio->fifo_list[data_dir]))
return NULL;
- rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
- if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
- return rq;
-
- /*
- * Look for a write request that can be dispatched, that is one with
- * an unlocked target zone. For some HDDs, breaking a sequential
- * write stream can lead to lower throughput, so make sure to preserve
- * sequential write streams, even if that stream crosses into the next
- * zones and these zones are unlocked.
- */
- spin_lock_irqsave(&dd->zone_lock, flags);
- list_for_each_entry_safe(rq, next, &per_prio->fifo_list[DD_WRITE],
- queuelist) {
- /* Check whether a prior request exists for the same zone. */
- rb_rq = deadline_from_pos(per_prio, data_dir, blk_rq_pos(rq));
- if (rb_rq && blk_rq_pos(rb_rq) < blk_rq_pos(rq))
- rq = rb_rq;
- if (blk_req_can_dispatch_to_zone(rq) &&
- (blk_queue_nonrot(rq->q) ||
- !deadline_is_seq_write(dd, rq)))
- goto out;
- }
- rq = NULL;
-out:
- spin_unlock_irqrestore(&dd->zone_lock, flags);
-
- return rq;
+ return rq_entry_fifo(per_prio->fifo_list[data_dir].next);
}
/*
@@ -390,36 +292,8 @@ static struct request *
deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
enum dd_data_dir data_dir)
{
- struct request *rq;
- unsigned long flags;
-
- rq = deadline_from_pos(per_prio, data_dir,
- per_prio->latest_pos[data_dir]);
- if (!rq)
- return NULL;
-
- if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
- return rq;
-
- /*
- * Look for a write request that can be dispatched, that is one with
- * an unlocked target zone. For some HDDs, breaking a sequential
- * write stream can lead to lower throughput, so make sure to preserve
- * sequential write streams, even if that stream crosses into the next
- * zones and these zones are unlocked.
- */
- spin_lock_irqsave(&dd->zone_lock, flags);
- while (rq) {
- if (blk_req_can_dispatch_to_zone(rq))
- break;
- if (blk_queue_nonrot(rq->q))
- rq = deadline_latter_request(rq);
- else
- rq = deadline_skip_seq_writes(dd, rq);
- }
- spin_unlock_irqrestore(&dd->zone_lock, flags);
-
- return rq;
+ return deadline_from_pos(per_prio, data_dir,
+ per_prio->latest_pos[data_dir]);
}
/*
@@ -525,10 +399,6 @@ dispatch_find_request:
rq = next_rq;
}
- /*
- * For a zoned block device, if we only have writes queued and none of
- * them can be dispatched, rq will be NULL.
- */
if (!rq)
return NULL;
@@ -549,10 +419,6 @@ done:
prio = ioprio_class_to_prio[ioprio_class];
dd->per_prio[prio].latest_pos[data_dir] = blk_rq_pos(rq);
dd->per_prio[prio].stats.dispatched++;
- /*
- * If the request needs its target zone locked, do it.
- */
- blk_req_zone_write_lock(rq);
rq->rq_flags |= RQF_STARTED;
return rq;
}
@@ -722,7 +588,6 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
dd->fifo_batch = fifo_batch;
dd->prio_aging_expire = prio_aging_expire;
spin_lock_init(&dd->lock);
- spin_lock_init(&dd->zone_lock);
/* We dispatch from request queue wide instead of hw queue */
blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
@@ -804,12 +669,6 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
lockdep_assert_held(&dd->lock);
- /*
- * This may be a requeue of a write request that has locked its
- * target zone. If it is the case, this releases the zone lock.
- */
- blk_req_zone_write_unlock(rq);
-
prio = ioprio_class_to_prio[ioprio_class];
per_prio = &dd->per_prio[prio];
if (!rq->elv.priv[0]) {
@@ -841,18 +700,6 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
*/
rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
insert_before = &per_prio->fifo_list[data_dir];
-#ifdef CONFIG_BLK_DEV_ZONED
- /*
- * Insert zoned writes such that requests are sorted by
- * position per zone.
- */
- if (blk_rq_is_seq_zoned_write(rq)) {
- struct request *rq2 = deadline_latter_request(rq);
-
- if (rq2 && blk_rq_zone_no(rq2) == blk_rq_zone_no(rq))
- insert_before = &rq2->queuelist;
- }
-#endif
list_add_tail(&rq->queuelist, insert_before);
}
}
@@ -887,33 +734,8 @@ static void dd_prepare_request(struct request *rq)
rq->elv.priv[0] = NULL;
}
-static bool dd_has_write_work(struct blk_mq_hw_ctx *hctx)
-{
- struct deadline_data *dd = hctx->queue->elevator->elevator_data;
- enum dd_prio p;
-
- for (p = 0; p <= DD_PRIO_MAX; p++)
- if (!list_empty_careful(&dd->per_prio[p].fifo_list[DD_WRITE]))
- return true;
-
- return false;
-}
-
/*
* Callback from inside blk_mq_free_request().
- *
- * For zoned block devices, write unlock the target zone of
- * completed write requests. Do this while holding the zone lock
- * spinlock so that the zone is never unlocked while deadline_fifo_request()
- * or deadline_next_request() are executing. This function is called for
- * all requests, whether or not these requests complete successfully.
- *
- * For a zoned block device, __dd_dispatch_request() may have stopped
- * dispatching requests if all the queued requests are write requests directed
- * at zones that are already locked due to on-going write requests. To ensure
- * write request dispatch progress in this case, mark the queue as needing a
- * restart to ensure that the queue is run again after completion of the
- * request and zones being unlocked.
*/
static void dd_finish_request(struct request *rq)
{
@@ -928,21 +750,8 @@ static void dd_finish_request(struct request *rq)
* called dd_insert_requests(). Skip requests that bypassed I/O
* scheduling. See also blk_mq_request_bypass_insert().
*/
- if (!rq->elv.priv[0])
- return;
-
- atomic_inc(&per_prio->stats.completed);
-
- if (blk_queue_is_zoned(q)) {
- unsigned long flags;
-
- spin_lock_irqsave(&dd->zone_lock, flags);
- blk_req_zone_write_unlock(rq);
- spin_unlock_irqrestore(&dd->zone_lock, flags);
-
- if (dd_has_write_work(rq->mq_hctx))
- blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
- }
+ if (rq->elv.priv[0])
+ atomic_inc(&per_prio->stats.completed);
}
static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
@@ -1266,7 +1075,6 @@ static struct elevator_type mq_deadline = {
.elevator_attrs = deadline_attrs,
.elevator_name = "mq-deadline",
.elevator_alias = "deadline",
- .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
.elevator_owner = THIS_MODULE,
};
MODULE_ALIAS("mq-deadline-iosched");
diff --git a/block/partitions/cmdline.c b/block/partitions/cmdline.c
index c03bc105e575..152c85df92b2 100644
--- a/block/partitions/cmdline.c
+++ b/block/partitions/cmdline.c
@@ -70,8 +70,8 @@ static int parse_subpart(struct cmdline_subpart **subpart, char *partdef)
}
if (*partdef == '(') {
- int length;
- char *next = strchr(++partdef, ')');
+ partdef++;
+ char *next = strsep(&partdef, ")");
if (!next) {
pr_warn("cmdline partition format is invalid.");
@@ -79,11 +79,7 @@ static int parse_subpart(struct cmdline_subpart **subpart, char *partdef)
goto fail;
}
- length = min_t(int, next - partdef,
- sizeof(new_subpart->name) - 1);
- strscpy(new_subpart->name, partdef, length);
-
- partdef = ++next;
+ strscpy(new_subpart->name, next, sizeof(new_subpart->name));
} else
new_subpart->name[0] = '\0';
@@ -117,14 +113,12 @@ static void free_subpart(struct cmdline_parts *parts)
}
}
-static int parse_parts(struct cmdline_parts **parts, const char *bdevdef)
+static int parse_parts(struct cmdline_parts **parts, char *bdevdef)
{
int ret = -EINVAL;
char *next;
- int length;
struct cmdline_subpart **next_subpart;
struct cmdline_parts *newparts;
- char buf[BDEVNAME_SIZE + 32 + 4];
*parts = NULL;
@@ -132,28 +126,19 @@ static int parse_parts(struct cmdline_parts **parts, const char *bdevdef)
if (!newparts)
return -ENOMEM;
- next = strchr(bdevdef, ':');
+ next = strsep(&bdevdef, ":");
if (!next) {
pr_warn("cmdline partition has no block device.");
goto fail;
}
- length = min_t(int, next - bdevdef, sizeof(newparts->name) - 1);
- strscpy(newparts->name, bdevdef, length);
+ strscpy(newparts->name, next, sizeof(newparts->name));
newparts->nr_subparts = 0;
next_subpart = &newparts->subpart;
- while (next && *(++next)) {
- bdevdef = next;
- next = strchr(bdevdef, ',');
-
- length = (!next) ? (sizeof(buf) - 1) :
- min_t(int, next - bdevdef, sizeof(buf) - 1);
-
- strscpy(buf, bdevdef, length);
-
- ret = parse_subpart(next_subpart, buf);
+ while ((next = strsep(&bdevdef, ","))) {
+ ret = parse_subpart(next_subpart, next);
if (ret)
goto fail;
@@ -199,24 +184,17 @@ static int cmdline_parts_parse(struct cmdline_parts **parts,
*parts = NULL;
- next = pbuf = buf = kstrdup(cmdline, GFP_KERNEL);
+ pbuf = buf = kstrdup(cmdline, GFP_KERNEL);
if (!buf)
return -ENOMEM;
next_parts = parts;
- while (next && *pbuf) {
- next = strchr(pbuf, ';');
- if (next)
- *next = '\0';
-
- ret = parse_parts(next_parts, pbuf);
+ while ((next = strsep(&pbuf, ";"))) {
+ ret = parse_parts(next_parts, next);
if (ret)
goto fail;
- if (next)
- pbuf = ++next;
-
next_parts = &(*next_parts)->next_parts;
}
@@ -250,7 +228,6 @@ static struct cmdline_parts *bdev_parts;
static int add_part(int slot, struct cmdline_subpart *subpart,
struct parsed_partitions *state)
{
- int label_min;
struct partition_meta_info *info;
char tmp[sizeof(info->volname) + 4];
@@ -262,9 +239,7 @@ static int add_part(int slot, struct cmdline_subpart *subpart,
info = &state->parts[slot].info;
- label_min = min_t(int, sizeof(info->volname) - 1,
- sizeof(subpart->name));
- strscpy(info->volname, subpart->name, label_min);
+ strscpy(info->volname, subpart->name, sizeof(info->volname));
snprintf(tmp, sizeof(tmp), "(%s)", info->volname);
strlcat(state->pp_buf, tmp, PAGE_SIZE);
diff --git a/block/partitions/core.c b/block/partitions/core.c
index b11e88c82c8c..ab76e64f0f6c 100644
--- a/block/partitions/core.c
+++ b/block/partitions/core.c
@@ -173,7 +173,7 @@ static struct parsed_partitions *check_partition(struct gendisk *hd)
static ssize_t part_partition_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", dev_to_bdev(dev)->bd_partno);
+ return sprintf(buf, "%d\n", bdev_partno(dev_to_bdev(dev)));
}
static ssize_t part_start_show(struct device *dev,
@@ -243,14 +243,14 @@ static const struct attribute_group *part_attr_groups[] = {
static void part_release(struct device *dev)
{
put_disk(dev_to_bdev(dev)->bd_disk);
- iput(dev_to_bdev(dev)->bd_inode);
+ bdev_drop(dev_to_bdev(dev));
}
static int part_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct block_device *part = dev_to_bdev(dev);
- add_uevent_var(env, "PARTN=%u", part->bd_partno);
+ add_uevent_var(env, "PARTN=%u", bdev_partno(part));
if (part->bd_meta_info && part->bd_meta_info->volname[0])
add_uevent_var(env, "PARTNAME=%s", part->bd_meta_info->volname);
return 0;
@@ -267,7 +267,7 @@ void drop_partition(struct block_device *part)
{
lockdep_assert_held(&part->bd_disk->open_mutex);
- xa_erase(&part->bd_disk->part_tbl, part->bd_partno);
+ xa_erase(&part->bd_disk->part_tbl, bdev_partno(part));
kobject_put(part->bd_holder_dir);
device_del(&part->bd_device);
@@ -338,8 +338,8 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
pdev->parent = ddev;
/* in consecutive minor range? */
- if (bdev->bd_partno < disk->minors) {
- devt = MKDEV(disk->major, disk->first_minor + bdev->bd_partno);
+ if (bdev_partno(bdev) < disk->minors) {
+ devt = MKDEV(disk->major, disk->first_minor + bdev_partno(bdev));
} else {
err = blk_alloc_ext_minor();
if (err < 0)
@@ -404,7 +404,7 @@ static bool partition_overlaps(struct gendisk *disk, sector_t start,
rcu_read_lock();
xa_for_each_start(&disk->part_tbl, idx, part, 1) {
- if (part->bd_partno != skip_partno &&
+ if (bdev_partno(part) != skip_partno &&
start < part->bd_start_sect + bdev_nr_sectors(part) &&
start + length > part->bd_start_sect) {
overlap = true;
@@ -469,7 +469,7 @@ int bdev_del_partition(struct gendisk *disk, int partno)
* Just delete the partition and invalidate it.
*/
- remove_inode_hash(part->bd_inode);
+ bdev_unhash(part);
invalidate_bdev(part);
drop_partition(part);
ret = 0;
@@ -573,10 +573,7 @@ static int blk_add_partitions(struct gendisk *disk)
struct parsed_partitions *state;
int ret = -EAGAIN, p;
- if (disk->flags & GENHD_FL_NO_PART)
- return 0;
-
- if (test_bit(GD_SUPPRESS_PART_SCAN, &disk->state))
+ if (!disk_has_partscan(disk))
return 0;
state = check_partition(disk);
@@ -655,7 +652,7 @@ rescan:
* it cannot be looked up any more even when openers
* still hold references.
*/
- remove_inode_hash(part->bd_inode);
+ bdev_unhash(part);
/*
* If @disk->open_partitions isn't elevated but there's
@@ -704,7 +701,7 @@ EXPORT_SYMBOL_GPL(bdev_disk_changed);
void *read_part_sector(struct parsed_partitions *state, sector_t n, Sector *p)
{
- struct address_space *mapping = state->disk->part0->bd_inode->i_mapping;
+ struct address_space *mapping = state->disk->part0->bd_mapping;
struct folio *folio;
if (n >= get_capacity(state->disk)) {
diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c
index 38e58960ae03..2bd42fedb907 100644
--- a/block/partitions/ldm.c
+++ b/block/partitions/ldm.c
@@ -131,8 +131,7 @@ static bool ldm_parse_tocblock (const u8 *data, struct tocblock *toc)
ldm_crit ("Cannot find TOCBLOCK, database may be corrupt.");
return false;
}
- strncpy (toc->bitmap1_name, data + 0x24, sizeof (toc->bitmap1_name));
- toc->bitmap1_name[sizeof (toc->bitmap1_name) - 1] = 0;
+ strscpy_pad(toc->bitmap1_name, data + 0x24, sizeof(toc->bitmap1_name));
toc->bitmap1_start = get_unaligned_be64(data + 0x2E);
toc->bitmap1_size = get_unaligned_be64(data + 0x36);
@@ -142,8 +141,7 @@ static bool ldm_parse_tocblock (const u8 *data, struct tocblock *toc)
TOC_BITMAP1, toc->bitmap1_name);
return false;
}
- strncpy (toc->bitmap2_name, data + 0x46, sizeof (toc->bitmap2_name));
- toc->bitmap2_name[sizeof (toc->bitmap2_name) - 1] = 0;
+ strscpy_pad(toc->bitmap2_name, data + 0x46, sizeof(toc->bitmap2_name));
toc->bitmap2_start = get_unaligned_be64(data + 0x50);
toc->bitmap2_size = get_unaligned_be64(data + 0x58);
if (strncmp (toc->bitmap2_name, TOC_BITMAP2,
diff --git a/certs/Makefile b/certs/Makefile
index 799ad7b9e68a..1094e3860c2a 100644
--- a/certs/Makefile
+++ b/certs/Makefile
@@ -13,7 +13,7 @@ CFLAGS_blacklist_hashes.o := -I $(obj)
quiet_cmd_check_and_copy_blacklist_hash_list = GEN $@
cmd_check_and_copy_blacklist_hash_list = \
$(if $(CONFIG_SYSTEM_BLACKLIST_HASH_LIST), \
- $(AWK) -f $(srctree)/$(src)/check-blacklist-hashes.awk $(CONFIG_SYSTEM_BLACKLIST_HASH_LIST) >&2; \
+ $(AWK) -f $(src)/check-blacklist-hashes.awk $(CONFIG_SYSTEM_BLACKLIST_HASH_LIST) >&2; \
{ cat $(CONFIG_SYSTEM_BLACKLIST_HASH_LIST); echo $(comma) NULL; } > $@, \
echo NULL > $@)
@@ -55,7 +55,7 @@ $(obj)/signing_key.pem: $(obj)/x509.genkey FORCE
targets += signing_key.pem
quiet_cmd_copy_x509_config = COPY $@
- cmd_copy_x509_config = cat $(srctree)/$(src)/default_x509.genkey > $@
+ cmd_copy_x509_config = cat $(src)/default_x509.genkey > $@
# You can provide your own config file. If not present, copy the default one.
$(obj)/x509.genkey:
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 2903ce19f15c..5688d42a59c2 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1456,26 +1456,6 @@ config CRYPTO_USER_API_ENABLE_OBSOLETE
already been phased out from internal use by the kernel, and are
only useful for userspace clients that still rely on them.
-config CRYPTO_STATS
- bool "Crypto usage statistics"
- depends on CRYPTO_USER
- help
- Enable the gathering of crypto stats.
-
- Enabling this option reduces the performance of the crypto API. It
- should only be enabled when there is actually a use case for it.
-
- This collects data sizes, numbers of requests, and numbers
- of errors processed by:
- - AEAD ciphers (encrypt, decrypt)
- - asymmetric key ciphers (encrypt, decrypt, verify, sign)
- - symmetric key ciphers (encrypt, decrypt)
- - compression algorithms (compress, decompress)
- - hash algorithms (hash)
- - key-agreement protocol primitives (setsecret, generate
- public key, compute shared secret)
- - RNG (generate, seed)
-
endmenu
config CRYPTO_HASH_INFO
diff --git a/crypto/Makefile b/crypto/Makefile
index 408f0a1f9ab9..edbbaa3ffef5 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -20,6 +20,9 @@ crypto_skcipher-y += lskcipher.o
crypto_skcipher-y += skcipher.o
obj-$(CONFIG_CRYPTO_SKCIPHER2) += crypto_skcipher.o
+ifeq ($(CONFIG_BPF_SYSCALL),y)
+obj-$(CONFIG_CRYPTO_SKCIPHER2) += bpf_crypto_skcipher.o
+endif
obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o
@@ -69,8 +72,6 @@ cryptomgr-y := algboss.o testmgr.o
obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
obj-$(CONFIG_CRYPTO_USER) += crypto_user.o
-crypto_user-y := crypto_user_base.o
-crypto_user-$(CONFIG_CRYPTO_STATS) += crypto_user_stat.o
obj-$(CONFIG_CRYPTO_CMAC) += cmac.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
diff --git a/crypto/acompress.c b/crypto/acompress.c
index 1c682810a484..6fdf0ff9f3c0 100644
--- a/crypto/acompress.c
+++ b/crypto/acompress.c
@@ -93,32 +93,6 @@ static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
return extsize;
}
-static inline int __crypto_acomp_report_stat(struct sk_buff *skb,
- struct crypto_alg *alg)
-{
- struct comp_alg_common *calg = __crypto_comp_alg_common(alg);
- struct crypto_istat_compress *istat = comp_get_stat(calg);
- struct crypto_stat_compress racomp;
-
- memset(&racomp, 0, sizeof(racomp));
-
- strscpy(racomp.type, "acomp", sizeof(racomp.type));
- racomp.stat_compress_cnt = atomic64_read(&istat->compress_cnt);
- racomp.stat_compress_tlen = atomic64_read(&istat->compress_tlen);
- racomp.stat_decompress_cnt = atomic64_read(&istat->decompress_cnt);
- racomp.stat_decompress_tlen = atomic64_read(&istat->decompress_tlen);
- racomp.stat_err_cnt = atomic64_read(&istat->err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp);
-}
-
-#ifdef CONFIG_CRYPTO_STATS
-int crypto_acomp_report_stat(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return __crypto_acomp_report_stat(skb, alg);
-}
-#endif
-
static const struct crypto_type crypto_acomp_type = {
.extsize = crypto_acomp_extsize,
.init_tfm = crypto_acomp_init_tfm,
@@ -128,9 +102,6 @@ static const struct crypto_type crypto_acomp_type = {
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_acomp_report,
#endif
-#ifdef CONFIG_CRYPTO_STATS
- .report_stat = crypto_acomp_report_stat,
-#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
.type = CRYPTO_ALG_TYPE_ACOMPRESS,
@@ -184,13 +155,9 @@ EXPORT_SYMBOL_GPL(acomp_request_free);
void comp_prepare_alg(struct comp_alg_common *alg)
{
- struct crypto_istat_compress *istat = comp_get_stat(alg);
struct crypto_alg *base = &alg->base;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- memset(istat, 0, sizeof(*istat));
}
int crypto_register_acomp(struct acomp_alg *alg)
diff --git a/crypto/aead.c b/crypto/aead.c
index 54906633566a..cade532413bf 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -20,15 +20,6 @@
#include "internal.h"
-static inline struct crypto_istat_aead *aead_get_stat(struct aead_alg *alg)
-{
-#ifdef CONFIG_CRYPTO_STATS
- return &alg->stat;
-#else
- return NULL;
-#endif
-}
-
static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
@@ -45,8 +36,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
ret = crypto_aead_alg(tfm)->setkey(tfm, alignbuffer, keylen);
- memset(alignbuffer, 0, keylen);
- kfree(buffer);
+ kfree_sensitive(buffer);
return ret;
}
@@ -90,62 +80,28 @@ int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
}
EXPORT_SYMBOL_GPL(crypto_aead_setauthsize);
-static inline int crypto_aead_errstat(struct crypto_istat_aead *istat, int err)
-{
- if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
- return err;
-
- if (err && err != -EINPROGRESS && err != -EBUSY)
- atomic64_inc(&istat->err_cnt);
-
- return err;
-}
-
int crypto_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct aead_alg *alg = crypto_aead_alg(aead);
- struct crypto_istat_aead *istat;
- int ret;
-
- istat = aead_get_stat(alg);
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
- atomic64_inc(&istat->encrypt_cnt);
- atomic64_add(req->cryptlen, &istat->encrypt_tlen);
- }
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
- ret = -ENOKEY;
- else
- ret = alg->encrypt(req);
+ return -ENOKEY;
- return crypto_aead_errstat(istat, ret);
+ return crypto_aead_alg(aead)->encrypt(req);
}
EXPORT_SYMBOL_GPL(crypto_aead_encrypt);
int crypto_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct aead_alg *alg = crypto_aead_alg(aead);
- struct crypto_istat_aead *istat;
- int ret;
-
- istat = aead_get_stat(alg);
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
- atomic64_inc(&istat->encrypt_cnt);
- atomic64_add(req->cryptlen, &istat->encrypt_tlen);
- }
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
- ret = -ENOKEY;
- else if (req->cryptlen < crypto_aead_authsize(aead))
- ret = -EINVAL;
- else
- ret = alg->decrypt(req);
+ return -ENOKEY;
+
+ if (req->cryptlen < crypto_aead_authsize(aead))
+ return -EINVAL;
- return crypto_aead_errstat(istat, ret);
+ return crypto_aead_alg(aead)->decrypt(req);
}
EXPORT_SYMBOL_GPL(crypto_aead_decrypt);
@@ -215,26 +171,6 @@ static void crypto_aead_free_instance(struct crypto_instance *inst)
aead->free(aead);
}
-static int __maybe_unused crypto_aead_report_stat(
- struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct aead_alg *aead = container_of(alg, struct aead_alg, base);
- struct crypto_istat_aead *istat = aead_get_stat(aead);
- struct crypto_stat_aead raead;
-
- memset(&raead, 0, sizeof(raead));
-
- strscpy(raead.type, "aead", sizeof(raead.type));
-
- raead.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
- raead.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
- raead.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
- raead.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
- raead.stat_err_cnt = atomic64_read(&istat->err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead);
-}
-
static const struct crypto_type crypto_aead_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_aead_init_tfm,
@@ -245,9 +181,6 @@ static const struct crypto_type crypto_aead_type = {
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_aead_report,
#endif
-#ifdef CONFIG_CRYPTO_STATS
- .report_stat = crypto_aead_report_stat,
-#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_AEAD,
@@ -277,7 +210,6 @@ EXPORT_SYMBOL_GPL(crypto_has_aead);
static int aead_prepare_alg(struct aead_alg *alg)
{
- struct crypto_istat_aead *istat = aead_get_stat(alg);
struct crypto_alg *base = &alg->base;
if (max3(alg->maxauthsize, alg->ivsize, alg->chunksize) >
@@ -291,9 +223,6 @@ static int aead_prepare_alg(struct aead_alg *alg)
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AEAD;
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- memset(istat, 0, sizeof(*istat));
-
return 0;
}
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 68cc9290cabe..18cfead0081d 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -407,7 +407,8 @@ unlock:
return err;
}
-int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
+int af_alg_accept(struct sock *sk, struct socket *newsock,
+ struct proto_accept_arg *arg)
{
struct alg_sock *ask = alg_sk(sk);
const struct af_alg_type *type;
@@ -422,7 +423,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
if (!type)
goto unlock;
- sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, kern);
+ sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, arg->kern);
err = -ENOMEM;
if (!sk2)
goto unlock;
@@ -468,10 +469,10 @@ unlock:
}
EXPORT_SYMBOL_GPL(af_alg_accept);
-static int alg_accept(struct socket *sock, struct socket *newsock, int flags,
- bool kern)
+static int alg_accept(struct socket *sock, struct socket *newsock,
+ struct proto_accept_arg *arg)
{
- return af_alg_accept(sock->sk, newsock, kern);
+ return af_alg_accept(sock->sk, newsock, arg);
}
static const struct proto_ops alg_proto_ops = {
@@ -847,7 +848,7 @@ void af_alg_wmem_wakeup(struct sock *sk)
wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
EPOLLRDNORM |
EPOLLRDBAND);
- sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+ sk_wake_async_rcu(sk, SOCK_WAKE_WAITD, POLL_IN);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup);
@@ -914,7 +915,7 @@ static void af_alg_data_wakeup(struct sock *sk)
wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
EPOLLRDNORM |
EPOLLRDBAND);
- sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+ sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
rcu_read_unlock();
}
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 0ac83f7f701d..bcd9de009a91 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -27,22 +27,6 @@
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
-static inline struct crypto_istat_hash *ahash_get_stat(struct ahash_alg *alg)
-{
- return hash_get_stat(&alg->halg);
-}
-
-static inline int crypto_ahash_errstat(struct ahash_alg *alg, int err)
-{
- if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
- return err;
-
- if (err && err != -EINPROGRESS && err != -EBUSY)
- atomic64_inc(&ahash_get_stat(alg)->err_cnt);
-
- return err;
-}
-
/*
* For an ahash tfm that is using an shash algorithm (instead of an ahash
* algorithm), this returns the underlying shash tfm.
@@ -344,75 +328,47 @@ static void ahash_restore_req(struct ahash_request *req, int err)
int crypto_ahash_update(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ahash_alg *alg;
if (likely(tfm->using_shash))
return shash_ahash_update(req, ahash_request_ctx(req));
- alg = crypto_ahash_alg(tfm);
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- atomic64_add(req->nbytes, &ahash_get_stat(alg)->hash_tlen);
- return crypto_ahash_errstat(alg, alg->update(req));
+ return crypto_ahash_alg(tfm)->update(req);
}
EXPORT_SYMBOL_GPL(crypto_ahash_update);
int crypto_ahash_final(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ahash_alg *alg;
if (likely(tfm->using_shash))
return crypto_shash_final(ahash_request_ctx(req), req->result);
- alg = crypto_ahash_alg(tfm);
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- atomic64_inc(&ahash_get_stat(alg)->hash_cnt);
- return crypto_ahash_errstat(alg, alg->final(req));
+ return crypto_ahash_alg(tfm)->final(req);
}
EXPORT_SYMBOL_GPL(crypto_ahash_final);
int crypto_ahash_finup(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ahash_alg *alg;
if (likely(tfm->using_shash))
return shash_ahash_finup(req, ahash_request_ctx(req));
- alg = crypto_ahash_alg(tfm);
- if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
- struct crypto_istat_hash *istat = ahash_get_stat(alg);
-
- atomic64_inc(&istat->hash_cnt);
- atomic64_add(req->nbytes, &istat->hash_tlen);
- }
- return crypto_ahash_errstat(alg, alg->finup(req));
+ return crypto_ahash_alg(tfm)->finup(req);
}
EXPORT_SYMBOL_GPL(crypto_ahash_finup);
int crypto_ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ahash_alg *alg;
- int err;
if (likely(tfm->using_shash))
return shash_ahash_digest(req, prepare_shash_desc(req, tfm));
- alg = crypto_ahash_alg(tfm);
- if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
- struct crypto_istat_hash *istat = ahash_get_stat(alg);
-
- atomic64_inc(&istat->hash_cnt);
- atomic64_add(req->nbytes, &istat->hash_tlen);
- }
-
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- err = -ENOKEY;
- else
- err = alg->digest(req);
+ return -ENOKEY;
- return crypto_ahash_errstat(alg, err);
+ return crypto_ahash_alg(tfm)->digest(req);
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
@@ -571,12 +527,6 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
__crypto_hash_alg_common(alg)->digestsize);
}
-static int __maybe_unused crypto_ahash_report_stat(
- struct sk_buff *skb, struct crypto_alg *alg)
-{
- return crypto_hash_report_stat(skb, alg, "ahash");
-}
-
static const struct crypto_type crypto_ahash_type = {
.extsize = crypto_ahash_extsize,
.init_tfm = crypto_ahash_init_tfm,
@@ -587,9 +537,6 @@ static const struct crypto_type crypto_ahash_type = {
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_ahash_report,
#endif
-#ifdef CONFIG_CRYPTO_STATS
- .report_stat = crypto_ahash_report_stat,
-#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
.type = CRYPTO_ALG_TYPE_AHASH,
diff --git a/crypto/akcipher.c b/crypto/akcipher.c
index 52813f0b19e4..e0ff5f4dda6d 100644
--- a/crypto/akcipher.c
+++ b/crypto/akcipher.c
@@ -70,30 +70,6 @@ static void crypto_akcipher_free_instance(struct crypto_instance *inst)
akcipher->free(akcipher);
}
-static int __maybe_unused crypto_akcipher_report_stat(
- struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct akcipher_alg *akcipher = __crypto_akcipher_alg(alg);
- struct crypto_istat_akcipher *istat;
- struct crypto_stat_akcipher rakcipher;
-
- istat = akcipher_get_stat(akcipher);
-
- memset(&rakcipher, 0, sizeof(rakcipher));
-
- strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
- rakcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
- rakcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
- rakcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
- rakcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
- rakcipher.stat_sign_cnt = atomic64_read(&istat->sign_cnt);
- rakcipher.stat_verify_cnt = atomic64_read(&istat->verify_cnt);
- rakcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER,
- sizeof(rakcipher), &rakcipher);
-}
-
static const struct crypto_type crypto_akcipher_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_akcipher_init_tfm,
@@ -104,9 +80,6 @@ static const struct crypto_type crypto_akcipher_type = {
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_akcipher_report,
#endif
-#ifdef CONFIG_CRYPTO_STATS
- .report_stat = crypto_akcipher_report_stat,
-#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
.type = CRYPTO_ALG_TYPE_AKCIPHER,
@@ -131,15 +104,11 @@ EXPORT_SYMBOL_GPL(crypto_alloc_akcipher);
static void akcipher_prepare_alg(struct akcipher_alg *alg)
{
- struct crypto_istat_akcipher *istat = akcipher_get_stat(alg);
struct crypto_alg *base = &alg->base;
base->cra_type = &crypto_akcipher_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER;
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- memset(istat, 0, sizeof(*istat));
}
static int akcipher_default_op(struct akcipher_request *req)
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 0de1e6697949..1aa5f306998a 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -138,9 +138,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
goto err_free_param;
}
- if (!i)
- goto err_free_param;
-
param->tb[i + 1] = NULL;
param->type.attr.rta_len = sizeof(param->type);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index e24c829d7a01..7c7394d46a23 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -223,8 +223,8 @@ unlock:
return err ?: len;
}
-static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
- bool kern)
+static int hash_accept(struct socket *sock, struct socket *newsock,
+ struct proto_accept_arg *arg)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
@@ -252,7 +252,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
if (err)
goto out_free_state;
- err = af_alg_accept(ask->parent, newsock, kern);
+ err = af_alg_accept(ask->parent, newsock, arg);
if (err)
goto out_free_state;
@@ -355,7 +355,7 @@ static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
}
static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
- int flags, bool kern)
+ struct proto_accept_arg *arg)
{
int err;
@@ -363,7 +363,7 @@ static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
if (err)
return err;
- return hash_accept(sock, newsock, flags, kern);
+ return hash_accept(sock, newsock, arg);
}
static struct proto_ops algif_hash_ops_nokey = {
diff --git a/crypto/api.c b/crypto/api.c
index 7f402107f0cc..6aa5a3b4ed5e 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -202,18 +202,18 @@ static void crypto_start_test(struct crypto_larval *larval)
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
{
struct crypto_larval *larval = (void *)alg;
- long timeout;
+ long time_left;
if (!crypto_boot_test_finished())
crypto_start_test(larval);
- timeout = wait_for_completion_killable_timeout(
+ time_left = wait_for_completion_killable_timeout(
&larval->completion, 60 * HZ);
alg = larval->adult;
- if (timeout < 0)
+ if (time_left < 0)
alg = ERR_PTR(-EINTR);
- else if (!timeout)
+ else if (!time_left)
alg = ERR_PTR(-ETIMEDOUT);
else if (!alg)
alg = ERR_PTR(-ENOENT);
diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
index 59ec726b7c77..e1345b8f39f1 100644
--- a/crypto/asymmetric_keys/Kconfig
+++ b/crypto/asymmetric_keys/Kconfig
@@ -15,6 +15,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
select MPILIB
select CRYPTO_HASH_INFO
select CRYPTO_AKCIPHER
+ select CRYPTO_SIG
select CRYPTO_HASH
help
This option provides support for asymmetric public key type handling.
@@ -85,5 +86,21 @@ config FIPS_SIGNATURE_SELFTEST
depends on ASYMMETRIC_KEY_TYPE
depends on PKCS7_MESSAGE_PARSER=X509_CERTIFICATE_PARSER
depends on X509_CERTIFICATE_PARSER
+ depends on CRYPTO_RSA
+ depends on CRYPTO_SHA256
+
+config FIPS_SIGNATURE_SELFTEST_RSA
+ bool
+ default y
+ depends on FIPS_SIGNATURE_SELFTEST
+ depends on CRYPTO_SHA256=y || CRYPTO_SHA256=FIPS_SIGNATURE_SELFTEST
+ depends on CRYPTO_RSA=y || CRYPTO_RSA=FIPS_SIGNATURE_SELFTEST
+
+config FIPS_SIGNATURE_SELFTEST_ECDSA
+ bool
+ default y
+ depends on FIPS_SIGNATURE_SELFTEST
+ depends on CRYPTO_SHA256=y || CRYPTO_SHA256=FIPS_SIGNATURE_SELFTEST
+ depends on CRYPTO_ECDSA=y || CRYPTO_ECDSA=FIPS_SIGNATURE_SELFTEST
endif # ASYMMETRIC_KEY_TYPE
diff --git a/crypto/asymmetric_keys/Makefile b/crypto/asymmetric_keys/Makefile
index 1a273d6df3eb..bc65d3b98dcb 100644
--- a/crypto/asymmetric_keys/Makefile
+++ b/crypto/asymmetric_keys/Makefile
@@ -24,6 +24,8 @@ x509_key_parser-y := \
x509_public_key.o
obj-$(CONFIG_FIPS_SIGNATURE_SELFTEST) += x509_selftest.o
x509_selftest-y += selftest.o
+x509_selftest-$(CONFIG_FIPS_SIGNATURE_SELFTEST_RSA) += selftest_rsa.o
+x509_selftest-$(CONFIG_FIPS_SIGNATURE_SELFTEST_ECDSA) += selftest_ecdsa.o
$(obj)/x509_cert_parser.o: \
$(obj)/x509.asn1.h \
diff --git a/crypto/asymmetric_keys/mscode_parser.c b/crypto/asymmetric_keys/mscode_parser.c
index 05402ef8964e..8aecbe4637f3 100644
--- a/crypto/asymmetric_keys/mscode_parser.c
+++ b/crypto/asymmetric_keys/mscode_parser.c
@@ -75,6 +75,9 @@ int mscode_note_digest_algo(void *context, size_t hdrlen,
oid = look_up_OID(value, vlen);
switch (oid) {
+ case OID_sha1:
+ ctx->digest_algo = "sha1";
+ break;
case OID_sha256:
ctx->digest_algo = "sha256";
break;
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index 5b08c50722d0..231ad7b3789d 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -227,6 +227,9 @@ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen,
struct pkcs7_parse_context *ctx = context;
switch (ctx->last_oid) {
+ case OID_sha1:
+ ctx->sinfo->sig->hash_algo = "sha1";
+ break;
case OID_sha256:
ctx->sinfo->sig->hash_algo = "sha256";
break;
@@ -278,6 +281,7 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen,
ctx->sinfo->sig->pkey_algo = "rsa";
ctx->sinfo->sig->encoding = "pkcs1";
break;
+ case OID_id_ecdsa_with_sha1:
case OID_id_ecdsa_with_sha224:
case OID_id_ecdsa_with_sha256:
case OID_id_ecdsa_with_sha384:
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index e5f22691febd..3474fb34ded9 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -115,7 +115,8 @@ software_key_determine_akcipher(const struct public_key *pkey,
*/
if (!hash_algo)
return -EINVAL;
- if (strcmp(hash_algo, "sha224") != 0 &&
+ if (strcmp(hash_algo, "sha1") != 0 &&
+ strcmp(hash_algo, "sha224") != 0 &&
strcmp(hash_algo, "sha256") != 0 &&
strcmp(hash_algo, "sha384") != 0 &&
strcmp(hash_algo, "sha512") != 0 &&
@@ -233,6 +234,7 @@ static int software_key_query(const struct kernel_pkey_params *params,
info->key_size = len * 8;
if (strncmp(pkey->pkey_algo, "ecdsa", 5) == 0) {
+ int slen = len;
/*
* ECDSA key sizes are much smaller than RSA, and thus could
* operate on (hashed) inputs that are larger than key size.
@@ -246,8 +248,19 @@ static int software_key_query(const struct kernel_pkey_params *params,
* Verify takes ECDSA-Sig (described in RFC 5480) as input,
* which is actually 2 'key_size'-bit integers encoded in
* ASN.1. Account for the ASN.1 encoding overhead here.
+ *
+ * NIST P192/256/384 may prepend a '0' to a coordinate to
+ * indicate a positive integer. NIST P521 never needs it.
*/
- info->max_sig_size = 2 * (len + 3) + 2;
+ if (strcmp(pkey->pkey_algo, "ecdsa-nist-p521") != 0)
+ slen += 1;
+ /* Length of encoding the x & y coordinates */
+ slen = 2 * (slen + 2);
+ /*
+ * If coordinate encoding takes at least 128 bytes then an
+ * additional byte for length encoding is needed.
+ */
+ info->max_sig_size = 1 + (slen >= 128) + 1 + slen;
} else {
info->max_data_size = len;
info->max_sig_size = len;
diff --git a/crypto/asymmetric_keys/selftest.c b/crypto/asymmetric_keys/selftest.c
index c50da7ef90ae..98dc5cdfdebe 100644
--- a/crypto/asymmetric_keys/selftest.c
+++ b/crypto/asymmetric_keys/selftest.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/* Self-testing for signature checking.
*
* Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
@@ -9,179 +10,18 @@
#include <linux/kernel.h>
#include <linux/key.h>
#include <linux/module.h>
+#include "selftest.h"
#include "x509_parser.h"
-struct certs_test {
- const u8 *data;
- size_t data_len;
- const u8 *pkcs7;
- size_t pkcs7_len;
-};
-
-/*
- * Set of X.509 certificates to provide public keys for the tests. These will
- * be loaded into a temporary keyring for the duration of the testing.
- */
-static const __initconst u8 certs_selftest_keys[] = {
- "\x30\x82\x05\x55\x30\x82\x03\x3d\xa0\x03\x02\x01\x02\x02\x14\x73"
- "\x98\xea\x98\x2d\xd0\x2e\xa8\xb1\xcf\x57\xc7\xf2\x97\xb3\xe6\x1a"
- "\xfc\x8c\x0a\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x0b"
- "\x05\x00\x30\x34\x31\x32\x30\x30\x06\x03\x55\x04\x03\x0c\x29\x43"
- "\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66"
- "\x69\x63\x61\x74\x69\x6f\x6e\x20\x73\x65\x6c\x66\x2d\x74\x65\x73"
- "\x74\x69\x6e\x67\x20\x6b\x65\x79\x30\x20\x17\x0d\x32\x32\x30\x35"
- "\x31\x38\x32\x32\x33\x32\x34\x31\x5a\x18\x0f\x32\x31\x32\x32\x30"
- "\x34\x32\x34\x32\x32\x33\x32\x34\x31\x5a\x30\x34\x31\x32\x30\x30"
- "\x06\x03\x55\x04\x03\x0c\x29\x43\x65\x72\x74\x69\x66\x69\x63\x61"
- "\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63\x61\x74\x69\x6f\x6e\x20"
- "\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x6b\x65\x79"
- "\x30\x82\x02\x22\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01"
- "\x01\x05\x00\x03\x82\x02\x0f\x00\x30\x82\x02\x0a\x02\x82\x02\x01"
- "\x00\xcc\xac\x49\xdd\x3b\xca\xb0\x15\x7e\x84\x6a\xb2\x0a\x69\x5f"
- "\x1c\x0a\x61\x82\x3b\x4f\x2c\xa3\x95\x2c\x08\x58\x4b\xb1\x5d\x99"
- "\xe0\xc3\xc1\x79\xc2\xb3\xeb\xc0\x1e\x6d\x3e\x54\x1d\xbd\xb7\x92"
- "\x7b\x4d\xb5\x95\x58\xb2\x52\x2e\xc6\x24\x4b\x71\x63\x80\x32\x77"
- "\xa7\x38\x5e\xdb\x72\xae\x6e\x0d\xec\xfb\xb6\x6d\x01\x7f\xe9\x55"
- "\x66\xdf\xbf\x1d\x76\x78\x02\x31\xe8\xe5\x07\xf8\xb7\x82\x5c\x0d"
- "\xd4\xbb\xfb\xa2\x59\x0d\x2e\x3a\x78\x95\x3a\x8b\x46\x06\x47\x44"
- "\x46\xd7\xcd\x06\x6a\x41\x13\xe3\x19\xf6\xbb\x6e\x38\xf4\x83\x01"
- "\xa3\xbf\x4a\x39\x4f\xd7\x0a\xe9\x38\xb3\xf5\x94\x14\x4e\xdd\xf7"
- "\x43\xfd\x24\xb2\x49\x3c\xa5\xf7\x7a\x7c\xd4\x45\x3d\x97\x75\x68"
- "\xf1\xed\x4c\x42\x0b\x70\xca\x85\xf3\xde\xe5\x88\x2c\xc5\xbe\xb6"
- "\x97\x34\xba\x24\x02\xcd\x8b\x86\x9f\xa9\x73\xca\x73\xcf\x92\x81"
- "\xee\x75\x55\xbb\x18\x67\x5c\xff\x3f\xb5\xdd\x33\x1b\x0c\xe9\x78"
- "\xdb\x5c\xcf\xaa\x5c\x43\x42\xdf\x5e\xa9\x6d\xec\xd7\xd7\xff\xe6"
- "\xa1\x3a\x92\x1a\xda\xae\xf6\x8c\x6f\x7b\xd5\xb4\x6e\x06\xe9\x8f"
- "\xe8\xde\x09\x31\x89\xed\x0e\x11\xa1\xfa\x8a\xe9\xe9\x64\x59\x62"
- "\x53\xda\xd1\x70\xbe\x11\xd4\x99\x97\x11\xcf\x99\xde\x0b\x9d\x94"
- "\x7e\xaa\xb8\x52\xea\x37\xdb\x90\x7e\x35\xbd\xd9\xfe\x6d\x0a\x48"
- "\x70\x28\xdd\xd5\x0d\x7f\x03\x80\x93\x14\x23\x8f\xb9\x22\xcd\x7c"
- "\x29\xfe\xf1\x72\xb5\x5c\x0b\x12\xcf\x9c\x15\xf6\x11\x4c\x7a\x45"
- "\x25\x8c\x45\x0a\x34\xac\x2d\x9a\x81\xca\x0b\x13\x22\xcd\xeb\x1a"
- "\x38\x88\x18\x97\x96\x08\x81\xaa\xcc\x8f\x0f\x8a\x32\x7b\x76\x68"
- "\x03\x68\x43\xbf\x11\xba\x55\x60\xfd\x80\x1c\x0d\x9b\x69\xb6\x09"
- "\x72\xbc\x0f\x41\x2f\x07\x82\xc6\xe3\xb2\x13\x91\xc4\x6d\x14\x95"
- "\x31\xbe\x19\xbd\xbc\xed\xe1\x4c\x74\xa2\xe0\x78\x0b\xbb\x94\xec"
- "\x4c\x53\x3a\xa2\xb5\x84\x1d\x4b\x65\x7e\xdc\xf7\xdb\x36\x7d\xbe"
- "\x9e\x3b\x36\x66\x42\x66\x76\x35\xbf\xbe\xf0\xc1\x3c\x7c\xe9\x42"
- "\x5c\x24\x53\x03\x05\xa8\x67\x24\x50\x02\x75\xff\x24\x46\x3b\x35"
- "\x89\x76\xe6\x70\xda\xc5\x51\x8c\x9a\xe5\x05\xb0\x0b\xd0\x2d\xd4"
- "\x7d\x57\x75\x94\x6b\xf9\x0a\xad\x0e\x41\x00\x15\xd0\x4f\xc0\x7f"
- "\x90\x2d\x18\x48\x8f\x28\xfe\x5d\xa7\xcd\x99\x9e\xbd\x02\x6c\x8a"
- "\x31\xf3\x1c\xc7\x4b\xe6\x93\xcd\x42\xa2\xe4\x68\x10\x47\x9d\xfc"
- "\x21\x02\x03\x01\x00\x01\xa3\x5d\x30\x5b\x30\x0c\x06\x03\x55\x1d"
- "\x13\x01\x01\xff\x04\x02\x30\x00\x30\x0b\x06\x03\x55\x1d\x0f\x04"
- "\x04\x03\x02\x07\x80\x30\x1d\x06\x03\x55\x1d\x0e\x04\x16\x04\x14"
- "\xf5\x87\x03\xbb\x33\xce\x1b\x73\xee\x02\xec\xcd\xee\x5b\x88\x17"
- "\x51\x8f\xe3\xdb\x30\x1f\x06\x03\x55\x1d\x23\x04\x18\x30\x16\x80"
- "\x14\xf5\x87\x03\xbb\x33\xce\x1b\x73\xee\x02\xec\xcd\xee\x5b\x88"
- "\x17\x51\x8f\xe3\xdb\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01"
- "\x01\x0b\x05\x00\x03\x82\x02\x01\x00\xc0\x2e\x12\x41\x7b\x73\x85"
- "\x16\xc8\xdb\x86\x79\xe8\xf5\xcd\x44\xf4\xc6\xe2\x81\x23\x5e\x47"
- "\xcb\xab\x25\xf1\x1e\x58\x3e\x31\x7f\x78\xad\x85\xeb\xfe\x14\x88"
- "\x60\xf7\x7f\xd2\x26\xa2\xf4\x98\x2a\xfd\xba\x05\x0c\x20\x33\x12"
- "\xcc\x4d\x14\x61\x64\x81\x93\xd3\x33\xed\xc8\xff\xf1\x78\xcc\x5f"
- "\x51\x9f\x09\xd7\xbe\x0d\x5c\x74\xfd\x9b\xdf\x52\x4a\xc9\xa8\x71"
- "\x25\x33\x04\x10\x67\x36\xd0\xb3\x0b\xc9\xa1\x40\x72\xae\x41\x7b"
- "\x68\xe6\xe4\x7b\xd0\x28\xf7\x6d\xe7\x3f\x50\xfc\x91\x7c\x91\x56"
- "\xd4\xdf\xa6\xbb\xe8\x4d\x1b\x58\xaa\x28\xfa\xc1\x19\xeb\x11\x2f"
- "\x24\x8b\x7c\xc5\xa9\x86\x26\xaa\x6e\xb7\x9b\xd5\xf8\x06\xfb\x02"
- "\x52\x7b\x9c\x9e\xa1\xe0\x07\x8b\x5e\xe4\xb8\x55\x29\xf6\x48\x52"
- "\x1c\x1b\x54\x2d\x46\xd8\xe5\x71\xb9\x60\xd1\x45\xb5\x92\x89\x8a"
- "\x63\x58\x2a\xb3\xc6\xb2\x76\xe2\x3c\x82\x59\x04\xae\x5a\xc4\x99"
- "\x7b\x2e\x4b\x46\x57\xb8\x29\x24\xb2\xfd\xee\x2c\x0d\xa4\x83\xfa"
- "\x65\x2a\x07\x35\x8b\x97\xcf\xbd\x96\x2e\xd1\x7e\x6c\xc2\x1e\x87"
- "\xb6\x6c\x76\x65\xb5\xb2\x62\xda\x8b\xe9\x73\xe3\xdb\x33\xdd\x13"
- "\x3a\x17\x63\x6a\x76\xde\x8d\x8f\xe0\x47\x61\x28\x3a\x83\xff\x8f"
- "\xe7\xc7\xe0\x4a\xa3\xe5\x07\xcf\xe9\x8c\x35\x35\x2e\xe7\x80\x66"
- "\x31\xbf\x91\x58\x0a\xe1\x25\x3d\x38\xd3\xa4\xf0\x59\x34\x47\x07"
- "\x62\x0f\xbe\x30\xdd\x81\x88\x58\xf0\x28\xb0\x96\xe5\x82\xf8\x05"
- "\xb7\x13\x01\xbc\xfa\xc6\x1f\x86\x72\xcc\xf9\xee\x8e\xd9\xd6\x04"
- "\x8c\x24\x6c\xbf\x0f\x5d\x37\x39\xcf\x45\xc1\x93\x3a\xd2\xed\x5c"
- "\x58\x79\x74\x86\x62\x30\x7e\x8e\xbb\xdd\x7a\xa9\xed\xca\x40\xcb"
- "\x62\x47\xf4\xb4\x9f\x52\x7f\x72\x63\xa8\xf0\x2b\xaf\x45\x2a\x48"
- "\x19\x6d\xe3\xfb\xf9\x19\x66\x69\xc8\xcc\x62\x87\x6c\x53\x2b\x2d"
- "\x6e\x90\x6c\x54\x3a\x82\x25\x41\xcb\x18\x6a\xa4\x22\xa8\xa1\xc4"
- "\x47\xd7\x81\x00\x1c\x15\x51\x0f\x1a\xaf\xef\x9f\xa6\x61\x8c\xbd"
- "\x6b\x8b\xed\xe6\xac\x0e\xb6\x3a\x4c\x92\xe6\x0f\x91\x0a\x0f\x71"
- "\xc7\xa0\xb9\x0d\x3a\x17\x5a\x6f\x35\xc8\xe7\x50\x4f\x46\xe8\x70"
- "\x60\x48\x06\x82\x8b\x66\x58\xe6\x73\x91\x9c\x12\x3d\x35\x8e\x46"
- "\xad\x5a\xf5\xb3\xdb\x69\x21\x04\xfd\xd3\x1c\xdf\x94\x9d\x56\xb0"
- "\x0a\xd1\x95\x76\x8d\xec\x9e\xdd\x0b\x15\x97\x64\xad\xe5\xf2\x62"
- "\x02\xfc\x9e\x5f\x56\x42\x39\x05\xb3"
-};
-
-/*
- * Signed data and detached signature blobs that form the verification tests.
- */
-static const __initconst u8 certs_selftest_1_data[] = {
- "\x54\x68\x69\x73\x20\x69\x73\x20\x73\x6f\x6d\x65\x20\x74\x65\x73"
- "\x74\x20\x64\x61\x74\x61\x20\x75\x73\x65\x64\x20\x66\x6f\x72\x20"
- "\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x63\x65\x72"
- "\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63"
- "\x61\x74\x69\x6f\x6e\x2e\x0a"
-};
-
-static const __initconst u8 certs_selftest_1_pkcs7[] = {
- "\x30\x82\x02\xab\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x07\x02\xa0"
- "\x82\x02\x9c\x30\x82\x02\x98\x02\x01\x01\x31\x0d\x30\x0b\x06\x09"
- "\x60\x86\x48\x01\x65\x03\x04\x02\x01\x30\x0b\x06\x09\x2a\x86\x48"
- "\x86\xf7\x0d\x01\x07\x01\x31\x82\x02\x75\x30\x82\x02\x71\x02\x01"
- "\x01\x30\x4c\x30\x34\x31\x32\x30\x30\x06\x03\x55\x04\x03\x0c\x29"
- "\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69"
- "\x66\x69\x63\x61\x74\x69\x6f\x6e\x20\x73\x65\x6c\x66\x2d\x74\x65"
- "\x73\x74\x69\x6e\x67\x20\x6b\x65\x79\x02\x14\x73\x98\xea\x98\x2d"
- "\xd0\x2e\xa8\xb1\xcf\x57\xc7\xf2\x97\xb3\xe6\x1a\xfc\x8c\x0a\x30"
- "\x0b\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x30\x0d\x06\x09"
- "\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01\x05\x00\x04\x82\x02\x00\xac"
- "\xb0\xf2\x07\xd6\x99\x6d\xc0\xc0\xd9\x8d\x31\x0d\x7e\x04\xeb\xc3"
- "\x88\x90\xc4\x58\x46\xd4\xe2\xa0\xa3\x25\xe3\x04\x50\x37\x85\x8c"
- "\x91\xc6\xfc\xc5\xd4\x92\xfd\x05\xd8\xb8\xa3\xb8\xba\x89\x13\x00"
- "\x88\x79\x99\x51\x6b\x5b\x28\x31\xc0\xb3\x1b\x7a\x68\x2c\x00\xdb"
- "\x4b\x46\x11\xf3\xfa\x50\x8e\x19\x89\xa2\x4c\xda\x4c\x89\x01\x11"
- "\x89\xee\xd3\xc8\xc1\xe7\xa7\xf6\xb2\xa2\xf8\x65\xb8\x35\x20\x33"
- "\xba\x12\x62\xd5\xbd\xaa\x71\xe5\x5b\xc0\x6a\x32\xff\x6a\x2e\x23"
- "\xef\x2b\xb6\x58\xb1\xfb\x5f\x82\x34\x40\x6d\x9f\xbc\x27\xac\x37"
- "\x23\x99\xcf\x7d\x20\xb2\x39\x01\xc0\x12\xce\xd7\x5d\x2f\xb6\xab"
- "\xb5\x56\x4f\xef\xf4\x72\x07\x58\x65\xa9\xeb\x1f\x75\x1c\x5f\x0c"
- "\x88\xe0\xa4\xe2\xcd\x73\x2b\x9e\xb2\x05\x7e\x12\xf8\xd0\x66\x41"
- "\xcc\x12\x63\xd4\xd6\xac\x9b\x1d\x14\x77\x8d\x1c\x57\xd5\x27\xc6"
- "\x49\xa2\x41\x43\xf3\x59\x29\xe5\xcb\xd1\x75\xbc\x3a\x97\x2a\x72"
- "\x22\x66\xc5\x3b\xc1\xba\xfc\x53\x18\x98\xe2\x21\x64\xc6\x52\x87"
- "\x13\xd5\x7c\x42\xe8\xfb\x9c\x9a\x45\x32\xd5\xa5\x22\x62\x9d\xd4"
- "\xcb\xa4\xfa\x77\xbb\x50\x24\x0b\x8b\x88\x99\x15\x56\xa9\x1e\x92"
- "\xbf\x5d\x94\x77\xb6\xf1\x67\x01\x60\x06\x58\x5c\xdf\x18\x52\x79"
- "\x37\x30\x93\x7d\x87\x04\xf1\xe0\x55\x59\x52\xf3\xc2\xb1\x1c\x5b"
- "\x12\x7c\x49\x87\xfb\xf7\xed\xdd\x95\x71\xec\x4b\x1a\x85\x08\xb0"
- "\xa0\x36\xc4\x7b\xab\x40\xe0\xf1\x98\xcc\xaf\x19\x40\x8f\x47\x6f"
- "\xf0\x6c\x84\x29\x7f\x7f\x04\x46\xcb\x08\x0f\xe0\xc1\xc9\x70\x6e"
- "\x95\x3b\xa4\xbc\x29\x2b\x53\x67\x45\x1b\x0d\xbc\x13\xa5\x76\x31"
- "\xaf\xb9\xd0\xe0\x60\x12\xd2\xf4\xb7\x7c\x58\x7e\xf6\x2d\xbb\x24"
- "\x14\x5a\x20\x24\xa8\x12\xdf\x25\xbd\x42\xce\x96\x7c\x2e\xba\x14"
- "\x1b\x81\x9f\x18\x45\xa4\xc6\x70\x3e\x0e\xf0\xd3\x7b\x9c\x10\xbe"
- "\xb8\x7a\x89\xc5\x9e\xd9\x97\xdf\xd7\xe7\xc6\x1d\xc0\x20\x6c\xb8"
- "\x1e\x3a\x63\xb8\x39\x8e\x8e\x62\xd5\xd2\xb4\xcd\xff\x46\xfc\x8e"
- "\xec\x07\x35\x0c\xff\xb0\x05\xe6\xf4\xe5\xfe\xa2\xe3\x0a\xe6\x36"
- "\xa7\x4a\x7e\x62\x1d\xc4\x50\x39\x35\x4e\x28\xcb\x4a\xfb\x9d\xdb"
- "\xdd\x23\xd6\x53\xb1\x74\x77\x12\xf7\x9c\xf0\x9a\x6b\xf7\xa9\x64"
- "\x2d\x86\x21\x2a\xcf\xc6\x54\xf5\xc9\xad\xfa\xb5\x12\xb4\xf3\x51"
- "\x77\x55\x3c\x6f\x0c\x32\xd3\x8c\x44\x39\x71\x25\xfe\x96\xd2"
-};
-
-/*
- * List of tests to be run.
- */
-#define TEST(data, pkcs7) { data, sizeof(data) - 1, pkcs7, sizeof(pkcs7) - 1 }
-static const struct certs_test certs_tests[] __initconst = {
- TEST(certs_selftest_1_data, certs_selftest_1_pkcs7),
-};
-
-static int __init fips_signature_selftest(void)
+void fips_signature_selftest(const char *name,
+ const u8 *keys, size_t keys_len,
+ const u8 *data, size_t data_len,
+ const u8 *sig, size_t sig_len)
{
struct key *keyring;
- int ret, i;
+ int ret;
- pr_notice("Running certificate verification selftests\n");
+ pr_notice("Running certificate verification %s selftest\n", name);
keyring = keyring_alloc(".certs_selftest",
GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(),
@@ -191,40 +31,41 @@ static int __init fips_signature_selftest(void)
KEY_ALLOC_NOT_IN_QUOTA,
NULL, NULL);
if (IS_ERR(keyring))
- panic("Can't allocate certs selftest keyring: %ld\n",
- PTR_ERR(keyring));
+ panic("Can't allocate certs %s selftest keyring: %ld\n", name, PTR_ERR(keyring));
- ret = x509_load_certificate_list(certs_selftest_keys,
- sizeof(certs_selftest_keys) - 1, keyring);
+ ret = x509_load_certificate_list(keys, keys_len, keyring);
if (ret < 0)
- panic("Can't allocate certs selftest keyring: %d\n", ret);
+ panic("Can't allocate certs %s selftest keyring: %d\n", name, ret);
- for (i = 0; i < ARRAY_SIZE(certs_tests); i++) {
- const struct certs_test *test = &certs_tests[i];
- struct pkcs7_message *pkcs7;
+ struct pkcs7_message *pkcs7;
- pkcs7 = pkcs7_parse_message(test->pkcs7, test->pkcs7_len);
- if (IS_ERR(pkcs7))
- panic("Certs selftest %d: pkcs7_parse_message() = %d\n", i, ret);
+ pkcs7 = pkcs7_parse_message(sig, sig_len);
+ if (IS_ERR(pkcs7))
+ panic("Certs %s selftest: pkcs7_parse_message() = %d\n", name, ret);
- pkcs7_supply_detached_data(pkcs7, test->data, test->data_len);
+ pkcs7_supply_detached_data(pkcs7, data, data_len);
- ret = pkcs7_verify(pkcs7, VERIFYING_MODULE_SIGNATURE);
- if (ret < 0)
- panic("Certs selftest %d: pkcs7_verify() = %d\n", i, ret);
+ ret = pkcs7_verify(pkcs7, VERIFYING_MODULE_SIGNATURE);
+ if (ret < 0)
+ panic("Certs %s selftest: pkcs7_verify() = %d\n", name, ret);
- ret = pkcs7_validate_trust(pkcs7, keyring);
- if (ret < 0)
- panic("Certs selftest %d: pkcs7_validate_trust() = %d\n", i, ret);
+ ret = pkcs7_validate_trust(pkcs7, keyring);
+ if (ret < 0)
+ panic("Certs %s selftest: pkcs7_validate_trust() = %d\n", name, ret);
- pkcs7_free_message(pkcs7);
- }
+ pkcs7_free_message(pkcs7);
key_put(keyring);
+}
+
+static int __init fips_signature_selftest_init(void)
+{
+ fips_signature_selftest_rsa();
+ fips_signature_selftest_ecdsa();
return 0;
}
-late_initcall(fips_signature_selftest);
+late_initcall(fips_signature_selftest_init);
MODULE_DESCRIPTION("X.509 self tests");
MODULE_AUTHOR("Red Hat, Inc.");
diff --git a/crypto/asymmetric_keys/selftest.h b/crypto/asymmetric_keys/selftest.h
new file mode 100644
index 000000000000..4139f05906cb
--- /dev/null
+++ b/crypto/asymmetric_keys/selftest.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Helper function for self-testing PKCS#7 signature verification.
+ *
+ * Copyright (C) 2024 Joachim Vandersmissen <git@jvdsn.com>
+ */
+
+void fips_signature_selftest(const char *name,
+ const u8 *keys, size_t keys_len,
+ const u8 *data, size_t data_len,
+ const u8 *sig, size_t sig_len);
+
+#ifdef CONFIG_FIPS_SIGNATURE_SELFTEST_RSA
+void __init fips_signature_selftest_rsa(void);
+#else
+static inline void __init fips_signature_selftest_rsa(void) { }
+#endif
+
+#ifdef CONFIG_FIPS_SIGNATURE_SELFTEST_ECDSA
+void __init fips_signature_selftest_ecdsa(void);
+#else
+static inline void __init fips_signature_selftest_ecdsa(void) { }
+#endif
diff --git a/crypto/asymmetric_keys/selftest_ecdsa.c b/crypto/asymmetric_keys/selftest_ecdsa.c
new file mode 100644
index 000000000000..20a0868b30de
--- /dev/null
+++ b/crypto/asymmetric_keys/selftest_ecdsa.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Self-tests for PKCS#7 ECDSA signature verification.
+ *
+ * Copyright (C) 2024 Joachim Vandersmissen <git@jvdsn.com>
+ */
+
+#include <linux/module.h>
+#include "selftest.h"
+
+/*
+ * Set of X.509 certificates to provide public keys for the tests. These will
+ * be loaded into a temporary keyring for the duration of the testing.
+ */
+static const u8 certs_selftest_ecdsa_keys[] __initconst = {
+ /* P-256 ECDSA certificate */
+ "\x30\x82\x01\xd4\x30\x82\x01\x7b\xa0\x03\x02\x01\x02\x02\x14\x2e"
+ "\xea\x64\x8d\x7f\x17\xe6\x2e\x9e\x58\x69\xc8\x87\xc6\x8e\x1b\xd0"
+ "\xf8\x6f\xde\x30\x0a\x06\x08\x2a\x86\x48\xce\x3d\x04\x03\x02\x30"
+ "\x3a\x31\x38\x30\x36\x06\x03\x55\x04\x03\x0c\x2f\x43\x65\x72\x74"
+ "\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63\x61"
+ "\x74\x69\x6f\x6e\x20\x45\x43\x44\x53\x41\x20\x73\x65\x6c\x66\x2d"
+ "\x74\x65\x73\x74\x69\x6e\x67\x20\x6b\x65\x79\x30\x20\x17\x0d\x32"
+ "\x34\x30\x34\x31\x33\x32\x32\x31\x36\x32\x36\x5a\x18\x0f\x32\x31"
+ "\x32\x34\x30\x33\x32\x30\x32\x32\x31\x36\x32\x36\x5a\x30\x3a\x31"
+ "\x38\x30\x36\x06\x03\x55\x04\x03\x0c\x2f\x43\x65\x72\x74\x69\x66"
+ "\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63\x61\x74\x69"
+ "\x6f\x6e\x20\x45\x43\x44\x53\x41\x20\x73\x65\x6c\x66\x2d\x74\x65"
+ "\x73\x74\x69\x6e\x67\x20\x6b\x65\x79\x30\x59\x30\x13\x06\x07\x2a"
+ "\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07"
+ "\x03\x42\x00\x04\x07\xe5\x6b\x51\xaf\xfc\x19\x41\x2c\x88\x92\x6b"
+ "\x77\x57\x71\x03\x9e\xe2\xfe\x6e\x6a\x71\x4e\xc7\x29\x9f\x90\xe1"
+ "\x77\x18\x9f\xc2\xe7\x0a\x82\xd0\x8a\xe1\x81\xa9\x71\x7c\x5a\x73"
+ "\xfb\x25\xb9\x5b\x1e\x24\x8c\x73\x9f\xf8\x38\xf8\x48\xb4\xad\x16"
+ "\x19\xc0\x22\xc6\xa3\x5d\x30\x5b\x30\x1d\x06\x03\x55\x1d\x0e\x04"
+ "\x16\x04\x14\x29\x00\xbc\xea\x1d\xeb\x7b\xc8\x47\x9a\x84\xa2\x3d"
+ "\x75\x8e\xfd\xfd\xd2\xb2\xd3\x30\x1f\x06\x03\x55\x1d\x23\x04\x18"
+ "\x30\x16\x80\x14\x29\x00\xbc\xea\x1d\xeb\x7b\xc8\x47\x9a\x84\xa2"
+ "\x3d\x75\x8e\xfd\xfd\xd2\xb2\xd3\x30\x0c\x06\x03\x55\x1d\x13\x01"
+ "\x01\xff\x04\x02\x30\x00\x30\x0b\x06\x03\x55\x1d\x0f\x04\x04\x03"
+ "\x02\x07\x80\x30\x0a\x06\x08\x2a\x86\x48\xce\x3d\x04\x03\x02\x03"
+ "\x47\x00\x30\x44\x02\x20\x1a\xd7\xac\x07\xc8\x97\x38\xf4\x89\x43"
+ "\x7e\xc7\x66\x6e\xa5\x00\x7c\x12\x1d\xb4\x09\x76\x0c\x99\x6b\x8c"
+ "\x26\x5d\xe9\x70\x5c\xb4\x02\x20\x73\xb7\xc7\x7a\x5a\xdb\x67\x0a"
+ "\x96\x42\x19\xcf\x4f\x67\x4f\x35\x6a\xee\x29\x25\xf2\x4f\xc8\x10"
+ "\x14\x9d\x79\x69\x1c\x7a\xd7\x5d"
+};
+
+/*
+ * Signed data and detached signature blobs that form the verification tests.
+ */
+static const u8 certs_selftest_ecdsa_data[] __initconst = {
+ "\x54\x68\x69\x73\x20\x69\x73\x20\x73\x6f\x6d\x65\x20\x74\x65\x73"
+ "\x74\x20\x64\x61\x74\x61\x20\x75\x73\x65\x64\x20\x66\x6f\x72\x20"
+ "\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x63\x65\x72"
+ "\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63"
+ "\x61\x74\x69\x6f\x6e\x2e\x0a"
+};
+
+static const u8 certs_selftest_ecdsa_sig[] __initconst = {
+ /* ECDSA signature using SHA-256 */
+ "\x30\x81\xf4\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x07\x02\xa0\x81"
+ "\xe6\x30\x81\xe3\x02\x01\x01\x31\x0f\x30\x0d\x06\x09\x60\x86\x48"
+ "\x01\x65\x03\x04\x02\x01\x05\x00\x30\x0b\x06\x09\x2a\x86\x48\x86"
+ "\xf7\x0d\x01\x07\x01\x31\x81\xbf\x30\x81\xbc\x02\x01\x01\x30\x52"
+ "\x30\x3a\x31\x38\x30\x36\x06\x03\x55\x04\x03\x0c\x2f\x43\x65\x72"
+ "\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63"
+ "\x61\x74\x69\x6f\x6e\x20\x45\x43\x44\x53\x41\x20\x73\x65\x6c\x66"
+ "\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x6b\x65\x79\x02\x14\x2e\xea"
+ "\x64\x8d\x7f\x17\xe6\x2e\x9e\x58\x69\xc8\x87\xc6\x8e\x1b\xd0\xf8"
+ "\x6f\xde\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05"
+ "\x00\x30\x0a\x06\x08\x2a\x86\x48\xce\x3d\x04\x03\x02\x04\x48\x30"
+ "\x46\x02\x21\x00\x86\xd1\xf4\x06\xb6\x49\x79\xf9\x09\x5f\x35\x1a"
+ "\x94\x7e\x0e\x1a\x12\x4d\xd9\xe6\x2a\x2d\xcf\x2d\x0a\xee\x88\x76"
+ "\xe0\x35\xf3\xeb\x02\x21\x00\xdf\x11\x8a\xab\x31\xf6\x3c\x1f\x32"
+ "\x43\x94\xe2\xb8\x35\xc9\xf3\x12\x4e\x9b\x31\x08\x10\x5d\x8d\xe2"
+ "\x43\x0a\x5f\xf5\xfd\xa2\xf1"
+};
+
+void __init fips_signature_selftest_ecdsa(void)
+{
+ fips_signature_selftest("ECDSA",
+ certs_selftest_ecdsa_keys,
+ sizeof(certs_selftest_ecdsa_keys) - 1,
+ certs_selftest_ecdsa_data,
+ sizeof(certs_selftest_ecdsa_data) - 1,
+ certs_selftest_ecdsa_sig,
+ sizeof(certs_selftest_ecdsa_sig) - 1);
+}
diff --git a/crypto/asymmetric_keys/selftest_rsa.c b/crypto/asymmetric_keys/selftest_rsa.c
new file mode 100644
index 000000000000..09c9815e456a
--- /dev/null
+++ b/crypto/asymmetric_keys/selftest_rsa.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Self-tests for PKCS#7 RSA signature verification.
+ *
+ * Copyright (C) 2024 Joachim Vandersmissen <git@jvdsn.com>
+ */
+
+#include <linux/module.h>
+#include "selftest.h"
+
+/*
+ * Set of X.509 certificates to provide public keys for the tests. These will
+ * be loaded into a temporary keyring for the duration of the testing.
+ */
+static const u8 certs_selftest_rsa_keys[] __initconst = {
+ /* 4096-bit RSA certificate */
+ "\x30\x82\x05\x55\x30\x82\x03\x3d\xa0\x03\x02\x01\x02\x02\x14\x73"
+ "\x98\xea\x98\x2d\xd0\x2e\xa8\xb1\xcf\x57\xc7\xf2\x97\xb3\xe6\x1a"
+ "\xfc\x8c\x0a\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x0b"
+ "\x05\x00\x30\x34\x31\x32\x30\x30\x06\x03\x55\x04\x03\x0c\x29\x43"
+ "\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66"
+ "\x69\x63\x61\x74\x69\x6f\x6e\x20\x73\x65\x6c\x66\x2d\x74\x65\x73"
+ "\x74\x69\x6e\x67\x20\x6b\x65\x79\x30\x20\x17\x0d\x32\x32\x30\x35"
+ "\x31\x38\x32\x32\x33\x32\x34\x31\x5a\x18\x0f\x32\x31\x32\x32\x30"
+ "\x34\x32\x34\x32\x32\x33\x32\x34\x31\x5a\x30\x34\x31\x32\x30\x30"
+ "\x06\x03\x55\x04\x03\x0c\x29\x43\x65\x72\x74\x69\x66\x69\x63\x61"
+ "\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63\x61\x74\x69\x6f\x6e\x20"
+ "\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x6b\x65\x79"
+ "\x30\x82\x02\x22\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01"
+ "\x01\x05\x00\x03\x82\x02\x0f\x00\x30\x82\x02\x0a\x02\x82\x02\x01"
+ "\x00\xcc\xac\x49\xdd\x3b\xca\xb0\x15\x7e\x84\x6a\xb2\x0a\x69\x5f"
+ "\x1c\x0a\x61\x82\x3b\x4f\x2c\xa3\x95\x2c\x08\x58\x4b\xb1\x5d\x99"
+ "\xe0\xc3\xc1\x79\xc2\xb3\xeb\xc0\x1e\x6d\x3e\x54\x1d\xbd\xb7\x92"
+ "\x7b\x4d\xb5\x95\x58\xb2\x52\x2e\xc6\x24\x4b\x71\x63\x80\x32\x77"
+ "\xa7\x38\x5e\xdb\x72\xae\x6e\x0d\xec\xfb\xb6\x6d\x01\x7f\xe9\x55"
+ "\x66\xdf\xbf\x1d\x76\x78\x02\x31\xe8\xe5\x07\xf8\xb7\x82\x5c\x0d"
+ "\xd4\xbb\xfb\xa2\x59\x0d\x2e\x3a\x78\x95\x3a\x8b\x46\x06\x47\x44"
+ "\x46\xd7\xcd\x06\x6a\x41\x13\xe3\x19\xf6\xbb\x6e\x38\xf4\x83\x01"
+ "\xa3\xbf\x4a\x39\x4f\xd7\x0a\xe9\x38\xb3\xf5\x94\x14\x4e\xdd\xf7"
+ "\x43\xfd\x24\xb2\x49\x3c\xa5\xf7\x7a\x7c\xd4\x45\x3d\x97\x75\x68"
+ "\xf1\xed\x4c\x42\x0b\x70\xca\x85\xf3\xde\xe5\x88\x2c\xc5\xbe\xb6"
+ "\x97\x34\xba\x24\x02\xcd\x8b\x86\x9f\xa9\x73\xca\x73\xcf\x92\x81"
+ "\xee\x75\x55\xbb\x18\x67\x5c\xff\x3f\xb5\xdd\x33\x1b\x0c\xe9\x78"
+ "\xdb\x5c\xcf\xaa\x5c\x43\x42\xdf\x5e\xa9\x6d\xec\xd7\xd7\xff\xe6"
+ "\xa1\x3a\x92\x1a\xda\xae\xf6\x8c\x6f\x7b\xd5\xb4\x6e\x06\xe9\x8f"
+ "\xe8\xde\x09\x31\x89\xed\x0e\x11\xa1\xfa\x8a\xe9\xe9\x64\x59\x62"
+ "\x53\xda\xd1\x70\xbe\x11\xd4\x99\x97\x11\xcf\x99\xde\x0b\x9d\x94"
+ "\x7e\xaa\xb8\x52\xea\x37\xdb\x90\x7e\x35\xbd\xd9\xfe\x6d\x0a\x48"
+ "\x70\x28\xdd\xd5\x0d\x7f\x03\x80\x93\x14\x23\x8f\xb9\x22\xcd\x7c"
+ "\x29\xfe\xf1\x72\xb5\x5c\x0b\x12\xcf\x9c\x15\xf6\x11\x4c\x7a\x45"
+ "\x25\x8c\x45\x0a\x34\xac\x2d\x9a\x81\xca\x0b\x13\x22\xcd\xeb\x1a"
+ "\x38\x88\x18\x97\x96\x08\x81\xaa\xcc\x8f\x0f\x8a\x32\x7b\x76\x68"
+ "\x03\x68\x43\xbf\x11\xba\x55\x60\xfd\x80\x1c\x0d\x9b\x69\xb6\x09"
+ "\x72\xbc\x0f\x41\x2f\x07\x82\xc6\xe3\xb2\x13\x91\xc4\x6d\x14\x95"
+ "\x31\xbe\x19\xbd\xbc\xed\xe1\x4c\x74\xa2\xe0\x78\x0b\xbb\x94\xec"
+ "\x4c\x53\x3a\xa2\xb5\x84\x1d\x4b\x65\x7e\xdc\xf7\xdb\x36\x7d\xbe"
+ "\x9e\x3b\x36\x66\x42\x66\x76\x35\xbf\xbe\xf0\xc1\x3c\x7c\xe9\x42"
+ "\x5c\x24\x53\x03\x05\xa8\x67\x24\x50\x02\x75\xff\x24\x46\x3b\x35"
+ "\x89\x76\xe6\x70\xda\xc5\x51\x8c\x9a\xe5\x05\xb0\x0b\xd0\x2d\xd4"
+ "\x7d\x57\x75\x94\x6b\xf9\x0a\xad\x0e\x41\x00\x15\xd0\x4f\xc0\x7f"
+ "\x90\x2d\x18\x48\x8f\x28\xfe\x5d\xa7\xcd\x99\x9e\xbd\x02\x6c\x8a"
+ "\x31\xf3\x1c\xc7\x4b\xe6\x93\xcd\x42\xa2\xe4\x68\x10\x47\x9d\xfc"
+ "\x21\x02\x03\x01\x00\x01\xa3\x5d\x30\x5b\x30\x0c\x06\x03\x55\x1d"
+ "\x13\x01\x01\xff\x04\x02\x30\x00\x30\x0b\x06\x03\x55\x1d\x0f\x04"
+ "\x04\x03\x02\x07\x80\x30\x1d\x06\x03\x55\x1d\x0e\x04\x16\x04\x14"
+ "\xf5\x87\x03\xbb\x33\xce\x1b\x73\xee\x02\xec\xcd\xee\x5b\x88\x17"
+ "\x51\x8f\xe3\xdb\x30\x1f\x06\x03\x55\x1d\x23\x04\x18\x30\x16\x80"
+ "\x14\xf5\x87\x03\xbb\x33\xce\x1b\x73\xee\x02\xec\xcd\xee\x5b\x88"
+ "\x17\x51\x8f\xe3\xdb\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01"
+ "\x01\x0b\x05\x00\x03\x82\x02\x01\x00\xc0\x2e\x12\x41\x7b\x73\x85"
+ "\x16\xc8\xdb\x86\x79\xe8\xf5\xcd\x44\xf4\xc6\xe2\x81\x23\x5e\x47"
+ "\xcb\xab\x25\xf1\x1e\x58\x3e\x31\x7f\x78\xad\x85\xeb\xfe\x14\x88"
+ "\x60\xf7\x7f\xd2\x26\xa2\xf4\x98\x2a\xfd\xba\x05\x0c\x20\x33\x12"
+ "\xcc\x4d\x14\x61\x64\x81\x93\xd3\x33\xed\xc8\xff\xf1\x78\xcc\x5f"
+ "\x51\x9f\x09\xd7\xbe\x0d\x5c\x74\xfd\x9b\xdf\x52\x4a\xc9\xa8\x71"
+ "\x25\x33\x04\x10\x67\x36\xd0\xb3\x0b\xc9\xa1\x40\x72\xae\x41\x7b"
+ "\x68\xe6\xe4\x7b\xd0\x28\xf7\x6d\xe7\x3f\x50\xfc\x91\x7c\x91\x56"
+ "\xd4\xdf\xa6\xbb\xe8\x4d\x1b\x58\xaa\x28\xfa\xc1\x19\xeb\x11\x2f"
+ "\x24\x8b\x7c\xc5\xa9\x86\x26\xaa\x6e\xb7\x9b\xd5\xf8\x06\xfb\x02"
+ "\x52\x7b\x9c\x9e\xa1\xe0\x07\x8b\x5e\xe4\xb8\x55\x29\xf6\x48\x52"
+ "\x1c\x1b\x54\x2d\x46\xd8\xe5\x71\xb9\x60\xd1\x45\xb5\x92\x89\x8a"
+ "\x63\x58\x2a\xb3\xc6\xb2\x76\xe2\x3c\x82\x59\x04\xae\x5a\xc4\x99"
+ "\x7b\x2e\x4b\x46\x57\xb8\x29\x24\xb2\xfd\xee\x2c\x0d\xa4\x83\xfa"
+ "\x65\x2a\x07\x35\x8b\x97\xcf\xbd\x96\x2e\xd1\x7e\x6c\xc2\x1e\x87"
+ "\xb6\x6c\x76\x65\xb5\xb2\x62\xda\x8b\xe9\x73\xe3\xdb\x33\xdd\x13"
+ "\x3a\x17\x63\x6a\x76\xde\x8d\x8f\xe0\x47\x61\x28\x3a\x83\xff\x8f"
+ "\xe7\xc7\xe0\x4a\xa3\xe5\x07\xcf\xe9\x8c\x35\x35\x2e\xe7\x80\x66"
+ "\x31\xbf\x91\x58\x0a\xe1\x25\x3d\x38\xd3\xa4\xf0\x59\x34\x47\x07"
+ "\x62\x0f\xbe\x30\xdd\x81\x88\x58\xf0\x28\xb0\x96\xe5\x82\xf8\x05"
+ "\xb7\x13\x01\xbc\xfa\xc6\x1f\x86\x72\xcc\xf9\xee\x8e\xd9\xd6\x04"
+ "\x8c\x24\x6c\xbf\x0f\x5d\x37\x39\xcf\x45\xc1\x93\x3a\xd2\xed\x5c"
+ "\x58\x79\x74\x86\x62\x30\x7e\x8e\xbb\xdd\x7a\xa9\xed\xca\x40\xcb"
+ "\x62\x47\xf4\xb4\x9f\x52\x7f\x72\x63\xa8\xf0\x2b\xaf\x45\x2a\x48"
+ "\x19\x6d\xe3\xfb\xf9\x19\x66\x69\xc8\xcc\x62\x87\x6c\x53\x2b\x2d"
+ "\x6e\x90\x6c\x54\x3a\x82\x25\x41\xcb\x18\x6a\xa4\x22\xa8\xa1\xc4"
+ "\x47\xd7\x81\x00\x1c\x15\x51\x0f\x1a\xaf\xef\x9f\xa6\x61\x8c\xbd"
+ "\x6b\x8b\xed\xe6\xac\x0e\xb6\x3a\x4c\x92\xe6\x0f\x91\x0a\x0f\x71"
+ "\xc7\xa0\xb9\x0d\x3a\x17\x5a\x6f\x35\xc8\xe7\x50\x4f\x46\xe8\x70"
+ "\x60\x48\x06\x82\x8b\x66\x58\xe6\x73\x91\x9c\x12\x3d\x35\x8e\x46"
+ "\xad\x5a\xf5\xb3\xdb\x69\x21\x04\xfd\xd3\x1c\xdf\x94\x9d\x56\xb0"
+ "\x0a\xd1\x95\x76\x8d\xec\x9e\xdd\x0b\x15\x97\x64\xad\xe5\xf2\x62"
+ "\x02\xfc\x9e\x5f\x56\x42\x39\x05\xb3"
+};
+
+/*
+ * Signed data and detached signature blobs that form the verification tests.
+ */
+static const u8 certs_selftest_rsa_data[] __initconst = {
+ "\x54\x68\x69\x73\x20\x69\x73\x20\x73\x6f\x6d\x65\x20\x74\x65\x73"
+ "\x74\x20\x64\x61\x74\x61\x20\x75\x73\x65\x64\x20\x66\x6f\x72\x20"
+ "\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x63\x65\x72"
+ "\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63"
+ "\x61\x74\x69\x6f\x6e\x2e\x0a"
+};
+
+static const u8 certs_selftest_rsa_sig[] __initconst = {
+ /* RSA signature using PKCS#1 v1.5 padding with SHA-256 */
+ "\x30\x82\x02\xab\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x07\x02\xa0"
+ "\x82\x02\x9c\x30\x82\x02\x98\x02\x01\x01\x31\x0d\x30\x0b\x06\x09"
+ "\x60\x86\x48\x01\x65\x03\x04\x02\x01\x30\x0b\x06\x09\x2a\x86\x48"
+ "\x86\xf7\x0d\x01\x07\x01\x31\x82\x02\x75\x30\x82\x02\x71\x02\x01"
+ "\x01\x30\x4c\x30\x34\x31\x32\x30\x30\x06\x03\x55\x04\x03\x0c\x29"
+ "\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69"
+ "\x66\x69\x63\x61\x74\x69\x6f\x6e\x20\x73\x65\x6c\x66\x2d\x74\x65"
+ "\x73\x74\x69\x6e\x67\x20\x6b\x65\x79\x02\x14\x73\x98\xea\x98\x2d"
+ "\xd0\x2e\xa8\xb1\xcf\x57\xc7\xf2\x97\xb3\xe6\x1a\xfc\x8c\x0a\x30"
+ "\x0b\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x30\x0d\x06\x09"
+ "\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01\x05\x00\x04\x82\x02\x00\xac"
+ "\xb0\xf2\x07\xd6\x99\x6d\xc0\xc0\xd9\x8d\x31\x0d\x7e\x04\xeb\xc3"
+ "\x88\x90\xc4\x58\x46\xd4\xe2\xa0\xa3\x25\xe3\x04\x50\x37\x85\x8c"
+ "\x91\xc6\xfc\xc5\xd4\x92\xfd\x05\xd8\xb8\xa3\xb8\xba\x89\x13\x00"
+ "\x88\x79\x99\x51\x6b\x5b\x28\x31\xc0\xb3\x1b\x7a\x68\x2c\x00\xdb"
+ "\x4b\x46\x11\xf3\xfa\x50\x8e\x19\x89\xa2\x4c\xda\x4c\x89\x01\x11"
+ "\x89\xee\xd3\xc8\xc1\xe7\xa7\xf6\xb2\xa2\xf8\x65\xb8\x35\x20\x33"
+ "\xba\x12\x62\xd5\xbd\xaa\x71\xe5\x5b\xc0\x6a\x32\xff\x6a\x2e\x23"
+ "\xef\x2b\xb6\x58\xb1\xfb\x5f\x82\x34\x40\x6d\x9f\xbc\x27\xac\x37"
+ "\x23\x99\xcf\x7d\x20\xb2\x39\x01\xc0\x12\xce\xd7\x5d\x2f\xb6\xab"
+ "\xb5\x56\x4f\xef\xf4\x72\x07\x58\x65\xa9\xeb\x1f\x75\x1c\x5f\x0c"
+ "\x88\xe0\xa4\xe2\xcd\x73\x2b\x9e\xb2\x05\x7e\x12\xf8\xd0\x66\x41"
+ "\xcc\x12\x63\xd4\xd6\xac\x9b\x1d\x14\x77\x8d\x1c\x57\xd5\x27\xc6"
+ "\x49\xa2\x41\x43\xf3\x59\x29\xe5\xcb\xd1\x75\xbc\x3a\x97\x2a\x72"
+ "\x22\x66\xc5\x3b\xc1\xba\xfc\x53\x18\x98\xe2\x21\x64\xc6\x52\x87"
+ "\x13\xd5\x7c\x42\xe8\xfb\x9c\x9a\x45\x32\xd5\xa5\x22\x62\x9d\xd4"
+ "\xcb\xa4\xfa\x77\xbb\x50\x24\x0b\x8b\x88\x99\x15\x56\xa9\x1e\x92"
+ "\xbf\x5d\x94\x77\xb6\xf1\x67\x01\x60\x06\x58\x5c\xdf\x18\x52\x79"
+ "\x37\x30\x93\x7d\x87\x04\xf1\xe0\x55\x59\x52\xf3\xc2\xb1\x1c\x5b"
+ "\x12\x7c\x49\x87\xfb\xf7\xed\xdd\x95\x71\xec\x4b\x1a\x85\x08\xb0"
+ "\xa0\x36\xc4\x7b\xab\x40\xe0\xf1\x98\xcc\xaf\x19\x40\x8f\x47\x6f"
+ "\xf0\x6c\x84\x29\x7f\x7f\x04\x46\xcb\x08\x0f\xe0\xc1\xc9\x70\x6e"
+ "\x95\x3b\xa4\xbc\x29\x2b\x53\x67\x45\x1b\x0d\xbc\x13\xa5\x76\x31"
+ "\xaf\xb9\xd0\xe0\x60\x12\xd2\xf4\xb7\x7c\x58\x7e\xf6\x2d\xbb\x24"
+ "\x14\x5a\x20\x24\xa8\x12\xdf\x25\xbd\x42\xce\x96\x7c\x2e\xba\x14"
+ "\x1b\x81\x9f\x18\x45\xa4\xc6\x70\x3e\x0e\xf0\xd3\x7b\x9c\x10\xbe"
+ "\xb8\x7a\x89\xc5\x9e\xd9\x97\xdf\xd7\xe7\xc6\x1d\xc0\x20\x6c\xb8"
+ "\x1e\x3a\x63\xb8\x39\x8e\x8e\x62\xd5\xd2\xb4\xcd\xff\x46\xfc\x8e"
+ "\xec\x07\x35\x0c\xff\xb0\x05\xe6\xf4\xe5\xfe\xa2\xe3\x0a\xe6\x36"
+ "\xa7\x4a\x7e\x62\x1d\xc4\x50\x39\x35\x4e\x28\xcb\x4a\xfb\x9d\xdb"
+ "\xdd\x23\xd6\x53\xb1\x74\x77\x12\xf7\x9c\xf0\x9a\x6b\xf7\xa9\x64"
+ "\x2d\x86\x21\x2a\xcf\xc6\x54\xf5\xc9\xad\xfa\xb5\x12\xb4\xf3\x51"
+ "\x77\x55\x3c\x6f\x0c\x32\xd3\x8c\x44\x39\x71\x25\xfe\x96\xd2"
+};
+
+void __init fips_signature_selftest_rsa(void)
+{
+ fips_signature_selftest("RSA",
+ certs_selftest_rsa_keys,
+ sizeof(certs_selftest_rsa_keys) - 1,
+ certs_selftest_rsa_data,
+ sizeof(certs_selftest_rsa_data) - 1,
+ certs_selftest_rsa_sig,
+ sizeof(certs_selftest_rsa_sig) - 1);
+}
diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c
index 398983be77e8..2deff81f8af5 100644
--- a/crypto/asymmetric_keys/signature.c
+++ b/crypto/asymmetric_keys/signature.c
@@ -115,7 +115,7 @@ EXPORT_SYMBOL_GPL(decrypt_blob);
* Sign the specified data blob using the private key specified by params->key.
* The signature is wrapped in an encoding if params->encoding is specified
* (eg. "pkcs1"). If the encoding needs to know the digest type, this can be
- * passed through params->hash_algo (eg. "sha512").
+ * passed through params->hash_algo (eg. "sha1").
*
* Returns the length of the data placed in the signature buffer or an error.
*/
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 487204d39426..25cc4273472f 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -60,24 +60,23 @@ EXPORT_SYMBOL_GPL(x509_free_certificate);
*/
struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
{
- struct x509_certificate *cert;
- struct x509_parse_context *ctx;
+ struct x509_certificate *cert __free(x509_free_certificate);
+ struct x509_parse_context *ctx __free(kfree) = NULL;
struct asymmetric_key_id *kid;
long ret;
- ret = -ENOMEM;
cert = kzalloc(sizeof(struct x509_certificate), GFP_KERNEL);
if (!cert)
- goto error_no_cert;
+ return ERR_PTR(-ENOMEM);
cert->pub = kzalloc(sizeof(struct public_key), GFP_KERNEL);
if (!cert->pub)
- goto error_no_ctx;
+ return ERR_PTR(-ENOMEM);
cert->sig = kzalloc(sizeof(struct public_key_signature), GFP_KERNEL);
if (!cert->sig)
- goto error_no_ctx;
+ return ERR_PTR(-ENOMEM);
ctx = kzalloc(sizeof(struct x509_parse_context), GFP_KERNEL);
if (!ctx)
- goto error_no_ctx;
+ return ERR_PTR(-ENOMEM);
ctx->cert = cert;
ctx->data = (unsigned long)data;
@@ -85,7 +84,7 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
/* Attempt to decode the certificate */
ret = asn1_ber_decoder(&x509_decoder, ctx, data, datalen);
if (ret < 0)
- goto error_decode;
+ return ERR_PTR(ret);
/* Decode the AuthorityKeyIdentifier */
if (ctx->raw_akid) {
@@ -95,20 +94,19 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
ctx->raw_akid, ctx->raw_akid_size);
if (ret < 0) {
pr_warn("Couldn't decode AuthKeyIdentifier\n");
- goto error_decode;
+ return ERR_PTR(ret);
}
}
- ret = -ENOMEM;
cert->pub->key = kmemdup(ctx->key, ctx->key_size, GFP_KERNEL);
if (!cert->pub->key)
- goto error_decode;
+ return ERR_PTR(-ENOMEM);
cert->pub->keylen = ctx->key_size;
cert->pub->params = kmemdup(ctx->params, ctx->params_size, GFP_KERNEL);
if (!cert->pub->params)
- goto error_decode;
+ return ERR_PTR(-ENOMEM);
cert->pub->paramlen = ctx->params_size;
cert->pub->algo = ctx->key_algo;
@@ -116,33 +114,23 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
/* Grab the signature bits */
ret = x509_get_sig_params(cert);
if (ret < 0)
- goto error_decode;
+ return ERR_PTR(ret);
/* Generate cert issuer + serial number key ID */
kid = asymmetric_key_generate_id(cert->raw_serial,
cert->raw_serial_size,
cert->raw_issuer,
cert->raw_issuer_size);
- if (IS_ERR(kid)) {
- ret = PTR_ERR(kid);
- goto error_decode;
- }
+ if (IS_ERR(kid))
+ return ERR_CAST(kid);
cert->id = kid;
/* Detect self-signed certificates */
ret = x509_check_for_self_signed(cert);
if (ret < 0)
- goto error_decode;
+ return ERR_PTR(ret);
- kfree(ctx);
- return cert;
-
-error_decode:
- kfree(ctx);
-error_no_ctx:
- x509_free_certificate(cert);
-error_no_cert:
- return ERR_PTR(ret);
+ return_ptr(cert);
}
EXPORT_SYMBOL_GPL(x509_cert_parse);
@@ -198,6 +186,10 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag,
default:
return -ENOPKG; /* Unsupported combination */
+ case OID_sha1WithRSAEncryption:
+ ctx->cert->sig->hash_algo = "sha1";
+ goto rsa_pkcs1;
+
case OID_sha256WithRSAEncryption:
ctx->cert->sig->hash_algo = "sha256";
goto rsa_pkcs1;
@@ -214,6 +206,10 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag,
ctx->cert->sig->hash_algo = "sha224";
goto rsa_pkcs1;
+ case OID_id_ecdsa_with_sha1:
+ ctx->cert->sig->hash_algo = "sha1";
+ goto ecdsa;
+
case OID_id_rsassa_pkcs1_v1_5_with_sha3_256:
ctx->cert->sig->hash_algo = "sha3-256";
goto rsa_pkcs1;
@@ -538,6 +534,9 @@ int x509_extract_key_data(void *context, size_t hdrlen,
case OID_id_ansip384r1:
ctx->cert->pub->pkey_algo = "ecdsa-nist-p384";
break;
+ case OID_id_ansip521r1:
+ ctx->cert->pub->pkey_algo = "ecdsa-nist-p521";
+ break;
default:
return -ENOPKG;
}
diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h
index 97a886cbe01c..0688c222806b 100644
--- a/crypto/asymmetric_keys/x509_parser.h
+++ b/crypto/asymmetric_keys/x509_parser.h
@@ -5,6 +5,7 @@
* Written by David Howells (dhowells@redhat.com)
*/
+#include <linux/cleanup.h>
#include <linux/time.h>
#include <crypto/public_key.h>
#include <keys/asymmetric-type.h>
@@ -44,6 +45,8 @@ struct x509_certificate {
* x509_cert_parser.c
*/
extern void x509_free_certificate(struct x509_certificate *cert);
+DEFINE_FREE(x509_free_certificate, struct x509_certificate *,
+ if (!IS_ERR(_T)) x509_free_certificate(_T))
extern struct x509_certificate *x509_cert_parse(const void *data, size_t datalen);
extern int x509_decode_time(time64_t *_t, size_t hdrlen,
unsigned char tag,
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index 6a4f00be22fc..00ac7159fba2 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -161,12 +161,11 @@ not_self_signed:
*/
static int x509_key_preparse(struct key_preparsed_payload *prep)
{
- struct asymmetric_key_ids *kids;
- struct x509_certificate *cert;
+ struct x509_certificate *cert __free(x509_free_certificate);
+ struct asymmetric_key_ids *kids __free(kfree) = NULL;
+ char *p, *desc __free(kfree) = NULL;
const char *q;
size_t srlen, sulen;
- char *desc = NULL, *p;
- int ret;
cert = x509_cert_parse(prep->data, prep->datalen);
if (IS_ERR(cert))
@@ -188,9 +187,8 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
}
/* Don't permit addition of blacklisted keys */
- ret = -EKEYREJECTED;
if (cert->blacklisted)
- goto error_free_cert;
+ return -EKEYREJECTED;
/* Propose a description */
sulen = strlen(cert->subject);
@@ -202,10 +200,9 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
q = cert->raw_serial;
}
- ret = -ENOMEM;
desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL);
if (!desc)
- goto error_free_cert;
+ return -ENOMEM;
p = memcpy(desc, cert->subject, sulen);
p += sulen;
*p++ = ':';
@@ -215,16 +212,14 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
kids = kmalloc(sizeof(struct asymmetric_key_ids), GFP_KERNEL);
if (!kids)
- goto error_free_desc;
+ return -ENOMEM;
kids->id[0] = cert->id;
kids->id[1] = cert->skid;
kids->id[2] = asymmetric_key_generate_id(cert->raw_subject,
cert->raw_subject_size,
"", 0);
- if (IS_ERR(kids->id[2])) {
- ret = PTR_ERR(kids->id[2]);
- goto error_free_kids;
- }
+ if (IS_ERR(kids->id[2]))
+ return PTR_ERR(kids->id[2]);
/* We're pinning the module by being linked against it */
__module_get(public_key_subtype.owner);
@@ -242,15 +237,7 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
cert->sig = NULL;
desc = NULL;
kids = NULL;
- ret = 0;
-
-error_free_kids:
- kfree(kids);
-error_free_desc:
- kfree(desc);
-error_free_cert:
- x509_free_certificate(cert);
- return ret;
+ return 0;
}
static struct asymmetric_key_parser x509_key_parser = {
diff --git a/crypto/bpf_crypto_skcipher.c b/crypto/bpf_crypto_skcipher.c
new file mode 100644
index 000000000000..b5e657415770
--- /dev/null
+++ b/crypto/bpf_crypto_skcipher.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Meta, Inc */
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/bpf_crypto.h>
+#include <crypto/skcipher.h>
+
+static void *bpf_crypto_lskcipher_alloc_tfm(const char *algo)
+{
+ return crypto_alloc_lskcipher(algo, 0, 0);
+}
+
+static void bpf_crypto_lskcipher_free_tfm(void *tfm)
+{
+ crypto_free_lskcipher(tfm);
+}
+
+static int bpf_crypto_lskcipher_has_algo(const char *algo)
+{
+ return crypto_has_skcipher(algo, CRYPTO_ALG_TYPE_LSKCIPHER, CRYPTO_ALG_TYPE_MASK);
+}
+
+static int bpf_crypto_lskcipher_setkey(void *tfm, const u8 *key, unsigned int keylen)
+{
+ return crypto_lskcipher_setkey(tfm, key, keylen);
+}
+
+static u32 bpf_crypto_lskcipher_get_flags(void *tfm)
+{
+ return crypto_lskcipher_get_flags(tfm);
+}
+
+static unsigned int bpf_crypto_lskcipher_ivsize(void *tfm)
+{
+ return crypto_lskcipher_ivsize(tfm);
+}
+
+static unsigned int bpf_crypto_lskcipher_statesize(void *tfm)
+{
+ return crypto_lskcipher_statesize(tfm);
+}
+
+static int bpf_crypto_lskcipher_encrypt(void *tfm, const u8 *src, u8 *dst,
+ unsigned int len, u8 *siv)
+{
+ return crypto_lskcipher_encrypt(tfm, src, dst, len, siv);
+}
+
+static int bpf_crypto_lskcipher_decrypt(void *tfm, const u8 *src, u8 *dst,
+ unsigned int len, u8 *siv)
+{
+ return crypto_lskcipher_decrypt(tfm, src, dst, len, siv);
+}
+
+static const struct bpf_crypto_type bpf_crypto_lskcipher_type = {
+ .alloc_tfm = bpf_crypto_lskcipher_alloc_tfm,
+ .free_tfm = bpf_crypto_lskcipher_free_tfm,
+ .has_algo = bpf_crypto_lskcipher_has_algo,
+ .setkey = bpf_crypto_lskcipher_setkey,
+ .encrypt = bpf_crypto_lskcipher_encrypt,
+ .decrypt = bpf_crypto_lskcipher_decrypt,
+ .ivsize = bpf_crypto_lskcipher_ivsize,
+ .statesize = bpf_crypto_lskcipher_statesize,
+ .get_flags = bpf_crypto_lskcipher_get_flags,
+ .owner = THIS_MODULE,
+ .name = "skcipher",
+};
+
+static int __init bpf_crypto_skcipher_init(void)
+{
+ return bpf_crypto_register_type(&bpf_crypto_lskcipher_type);
+}
+
+static void __exit bpf_crypto_skcipher_exit(void)
+{
+ int err = bpf_crypto_unregister_type(&bpf_crypto_lskcipher_type);
+ WARN_ON_ONCE(err);
+}
+
+module_init(bpf_crypto_skcipher_init);
+module_exit(bpf_crypto_skcipher_exit);
+MODULE_LICENSE("GPL");
diff --git a/crypto/cipher.c b/crypto/cipher.c
index 47c77a3e5978..40cae908788e 100644
--- a/crypto/cipher.c
+++ b/crypto/cipher.c
@@ -34,8 +34,7 @@ static int setkey_unaligned(struct crypto_cipher *tfm, const u8 *key,
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
ret = cia->cia_setkey(crypto_cipher_tfm(tfm), alignbuffer, keylen);
- memset(alignbuffer, 0, keylen);
- kfree(buffer);
+ kfree_sensitive(buffer);
return ret;
}
diff --git a/crypto/compress.h b/crypto/compress.h
index 19f65516d699..c3cedfb5e606 100644
--- a/crypto/compress.h
+++ b/crypto/compress.h
@@ -13,14 +13,11 @@
struct acomp_req;
struct comp_alg_common;
-struct sk_buff;
int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
-int crypto_acomp_report_stat(struct sk_buff *skb, struct crypto_alg *alg);
-
void comp_prepare_alg(struct comp_alg_common *alg);
#endif /* _LOCAL_CRYPTO_COMPRESS_H */
diff --git a/crypto/crypto_user_base.c b/crypto/crypto_user.c
index 3fa20f12989f..6c571834e86a 100644
--- a/crypto/crypto_user_base.c
+++ b/crypto/crypto_user.c
@@ -18,7 +18,6 @@
#include <crypto/internal/rng.h>
#include <crypto/akcipher.h>
#include <crypto/kpp.h>
-#include <crypto/internal/cryptouser.h>
#include "internal.h"
@@ -33,7 +32,7 @@ struct crypto_dump_info {
u16 nlmsg_flags;
};
-struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
+static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
{
struct crypto_alg *q, *alg = NULL;
@@ -387,6 +386,13 @@ static int crypto_del_rng(struct sk_buff *skb, struct nlmsghdr *nlh,
return crypto_del_default_rng();
}
+static int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
+ struct nlattr **attrs)
+{
+ /* No longer supported */
+ return -ENOTSUPP;
+}
+
#define MSGSIZE(type) sizeof(struct type)
static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = {
diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c
deleted file mode 100644
index d4f3d39b5137..000000000000
--- a/crypto/crypto_user_stat.c
+++ /dev/null
@@ -1,176 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Crypto user configuration API.
- *
- * Copyright (C) 2017-2018 Corentin Labbe <clabbe@baylibre.com>
- *
- */
-
-#include <crypto/algapi.h>
-#include <crypto/internal/cryptouser.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <net/netlink.h>
-#include <net/sock.h>
-
-#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
-
-struct crypto_dump_info {
- struct sk_buff *in_skb;
- struct sk_buff *out_skb;
- u32 nlmsg_seq;
- u16 nlmsg_flags;
-};
-
-static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_stat_cipher rcipher;
-
- memset(&rcipher, 0, sizeof(rcipher));
-
- strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
-
- return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
-}
-
-static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_stat_compress rcomp;
-
- memset(&rcomp, 0, sizeof(rcomp));
-
- strscpy(rcomp.type, "compression", sizeof(rcomp.type));
-
- return nla_put(skb, CRYPTOCFGA_STAT_COMPRESS, sizeof(rcomp), &rcomp);
-}
-
-static int crypto_reportstat_one(struct crypto_alg *alg,
- struct crypto_user_alg *ualg,
- struct sk_buff *skb)
-{
- memset(ualg, 0, sizeof(*ualg));
-
- strscpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
- strscpy(ualg->cru_driver_name, alg->cra_driver_name,
- sizeof(ualg->cru_driver_name));
- strscpy(ualg->cru_module_name, module_name(alg->cra_module),
- sizeof(ualg->cru_module_name));
-
- ualg->cru_type = 0;
- ualg->cru_mask = 0;
- ualg->cru_flags = alg->cra_flags;
- ualg->cru_refcnt = refcount_read(&alg->cra_refcnt);
-
- if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
- goto nla_put_failure;
- if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
- struct crypto_stat_larval rl;
-
- memset(&rl, 0, sizeof(rl));
- strscpy(rl.type, "larval", sizeof(rl.type));
- if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL, sizeof(rl), &rl))
- goto nla_put_failure;
- goto out;
- }
-
- if (alg->cra_type && alg->cra_type->report_stat) {
- if (alg->cra_type->report_stat(skb, alg))
- goto nla_put_failure;
- goto out;
- }
-
- switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
- case CRYPTO_ALG_TYPE_CIPHER:
- if (crypto_report_cipher(skb, alg))
- goto nla_put_failure;
- break;
- case CRYPTO_ALG_TYPE_COMPRESS:
- if (crypto_report_comp(skb, alg))
- goto nla_put_failure;
- break;
- default:
- pr_err("ERROR: Unhandled alg %d in %s\n",
- alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL),
- __func__);
- }
-
-out:
- return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
-}
-
-static int crypto_reportstat_alg(struct crypto_alg *alg,
- struct crypto_dump_info *info)
-{
- struct sk_buff *in_skb = info->in_skb;
- struct sk_buff *skb = info->out_skb;
- struct nlmsghdr *nlh;
- struct crypto_user_alg *ualg;
- int err = 0;
-
- nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq,
- CRYPTO_MSG_GETSTAT, sizeof(*ualg), info->nlmsg_flags);
- if (!nlh) {
- err = -EMSGSIZE;
- goto out;
- }
-
- ualg = nlmsg_data(nlh);
-
- err = crypto_reportstat_one(alg, ualg, skb);
- if (err) {
- nlmsg_cancel(skb, nlh);
- goto out;
- }
-
- nlmsg_end(skb, nlh);
-
-out:
- return err;
-}
-
-int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
- struct nlattr **attrs)
-{
- struct net *net = sock_net(in_skb->sk);
- struct crypto_user_alg *p = nlmsg_data(in_nlh);
- struct crypto_alg *alg;
- struct sk_buff *skb;
- struct crypto_dump_info info;
- int err;
-
- if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
- return -EINVAL;
-
- alg = crypto_alg_match(p, 0);
- if (!alg)
- return -ENOENT;
-
- err = -ENOMEM;
- skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
- if (!skb)
- goto drop_alg;
-
- info.in_skb = in_skb;
- info.out_skb = skb;
- info.nlmsg_seq = in_nlh->nlmsg_seq;
- info.nlmsg_flags = 0;
-
- err = crypto_reportstat_alg(alg, &info);
-
-drop_alg:
- crypto_mod_put(alg);
-
- if (err) {
- kfree_skb(skb);
- return err;
- }
-
- return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
-}
-
-MODULE_LICENSE("GPL");
diff --git a/crypto/ecc.c b/crypto/ecc.c
index f53fb4d6af99..fe761256e335 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -60,12 +60,36 @@ const struct ecc_curve *ecc_get_curve(unsigned int curve_id)
return &nist_p256;
case ECC_CURVE_NIST_P384:
return &nist_p384;
+ case ECC_CURVE_NIST_P521:
+ return &nist_p521;
default:
return NULL;
}
}
EXPORT_SYMBOL(ecc_get_curve);
+void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes,
+ u64 *out, unsigned int ndigits)
+{
+ int diff = ndigits - DIV_ROUND_UP(nbytes, sizeof(u64));
+ unsigned int o = nbytes & 7;
+ __be64 msd = 0;
+
+ /* diff > 0: not enough input bytes: set most significant digits to 0 */
+ if (diff > 0) {
+ ndigits -= diff;
+ memset(&out[ndigits - 1], 0, diff * sizeof(u64));
+ }
+
+ if (o) {
+ memcpy((u8 *)&msd + sizeof(msd) - o, in, o);
+ out[--ndigits] = be64_to_cpu(msd);
+ in += o;
+ }
+ ecc_swap_digits(in, out, ndigits);
+}
+EXPORT_SYMBOL(ecc_digits_from_bytes);
+
static u64 *ecc_alloc_digits_space(unsigned int ndigits)
{
size_t len = ndigits * sizeof(u64);
@@ -689,7 +713,7 @@ static void vli_mmod_barrett(u64 *result, u64 *product, const u64 *mod,
static void vli_mmod_fast_192(u64 *result, const u64 *product,
const u64 *curve_prime, u64 *tmp)
{
- const unsigned int ndigits = 3;
+ const unsigned int ndigits = ECC_CURVE_NIST_P192_DIGITS;
int carry;
vli_set(result, product, ndigits);
@@ -717,7 +741,7 @@ static void vli_mmod_fast_256(u64 *result, const u64 *product,
const u64 *curve_prime, u64 *tmp)
{
int carry;
- const unsigned int ndigits = 4;
+ const unsigned int ndigits = ECC_CURVE_NIST_P256_DIGITS;
/* t */
vli_set(result, product, ndigits);
@@ -800,7 +824,7 @@ static void vli_mmod_fast_384(u64 *result, const u64 *product,
const u64 *curve_prime, u64 *tmp)
{
int carry;
- const unsigned int ndigits = 6;
+ const unsigned int ndigits = ECC_CURVE_NIST_P384_DIGITS;
/* t */
vli_set(result, product, ndigits);
@@ -902,6 +926,28 @@ static void vli_mmod_fast_384(u64 *result, const u64 *product,
#undef AND64H
#undef AND64L
+/*
+ * Computes result = product % curve_prime
+ * from "Recommendations for Discrete Logarithm-Based Cryptography:
+ * Elliptic Curve Domain Parameters" section G.1.4
+ */
+static void vli_mmod_fast_521(u64 *result, const u64 *product,
+ const u64 *curve_prime, u64 *tmp)
+{
+ const unsigned int ndigits = ECC_CURVE_NIST_P521_DIGITS;
+ size_t i;
+
+ /* Initialize result with lowest 521 bits from product */
+ vli_set(result, product, ndigits);
+ result[8] &= 0x1ff;
+
+ for (i = 0; i < ndigits; i++)
+ tmp[i] = (product[8 + i] >> 9) | (product[9 + i] << 55);
+ tmp[8] &= 0x1ff;
+
+ vli_mod_add(result, result, tmp, curve_prime, ndigits);
+}
+
/* Computes result = product % curve_prime for different curve_primes.
*
* Note that curve_primes are distinguished just by heuristic check and
@@ -932,15 +978,18 @@ static bool vli_mmod_fast(u64 *result, u64 *product,
}
switch (ndigits) {
- case 3:
+ case ECC_CURVE_NIST_P192_DIGITS:
vli_mmod_fast_192(result, product, curve_prime, tmp);
break;
- case 4:
+ case ECC_CURVE_NIST_P256_DIGITS:
vli_mmod_fast_256(result, product, curve_prime, tmp);
break;
- case 6:
+ case ECC_CURVE_NIST_P384_DIGITS:
vli_mmod_fast_384(result, product, curve_prime, tmp);
break;
+ case ECC_CURVE_NIST_P521_DIGITS:
+ vli_mmod_fast_521(result, product, curve_prime, tmp);
+ break;
default:
pr_err_ratelimited("ecc: unsupported digits size!\n");
return false;
@@ -1295,7 +1344,10 @@ static void ecc_point_mult(struct ecc_point *result,
carry = vli_add(sk[0], scalar, curve->n, ndigits);
vli_add(sk[1], sk[0], curve->n, ndigits);
scalar = sk[!carry];
- num_bits = sizeof(u64) * ndigits * 8 + 1;
+ if (curve->nbits == 521) /* NIST P521 */
+ num_bits = curve->nbits + 2;
+ else
+ num_bits = sizeof(u64) * ndigits * 8 + 1;
vli_set(rx[1], point->x, ndigits);
vli_set(ry[1], point->y, ndigits);
@@ -1416,6 +1468,12 @@ void ecc_point_mult_shamir(const struct ecc_point *result,
}
EXPORT_SYMBOL(ecc_point_mult_shamir);
+/*
+ * This function performs checks equivalent to Appendix A.4.2 of FIPS 186-5.
+ * Whereas A.4.2 results in an integer in the interval [1, n-1], this function
+ * ensures that the integer is in the range of [2, n-3]. We are slightly
+ * stricter because of the currently used scalar multiplication algorithm.
+ */
static int __ecc_is_key_valid(const struct ecc_curve *curve,
const u64 *private_key, unsigned int ndigits)
{
@@ -1455,31 +1513,29 @@ int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
EXPORT_SYMBOL(ecc_is_key_valid);
/*
- * ECC private keys are generated using the method of extra random bits,
- * equivalent to that described in FIPS 186-4, Appendix B.4.1.
- *
- * d = (c mod(n–1)) + 1 where c is a string of random bits, 64 bits longer
- * than requested
- * 0 <= c mod(n-1) <= n-2 and implies that
- * 1 <= d <= n-1
+ * ECC private keys are generated using the method of rejection sampling,
+ * equivalent to that described in FIPS 186-5, Appendix A.2.2.
*
* This method generates a private key uniformly distributed in the range
- * [1, n-1].
+ * [2, n-3].
*/
-int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey)
+int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits,
+ u64 *private_key)
{
const struct ecc_curve *curve = ecc_get_curve(curve_id);
- u64 priv[ECC_MAX_DIGITS];
unsigned int nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
unsigned int nbits = vli_num_bits(curve->n, ndigits);
int err;
- /* Check that N is included in Table 1 of FIPS 186-4, section 6.1.1 */
- if (nbits < 160 || ndigits > ARRAY_SIZE(priv))
+ /*
+ * Step 1 & 2: check that N is included in Table 1 of FIPS 186-5,
+ * section 6.1.1.
+ */
+ if (nbits < 224)
return -EINVAL;
/*
- * FIPS 186-4 recommends that the private key should be obtained from a
+ * FIPS 186-5 recommends that the private key should be obtained from a
* RBG with a security strength equal to or greater than the security
* strength associated with N.
*
@@ -1492,17 +1548,17 @@ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey)
if (crypto_get_default_rng())
return -EFAULT;
- err = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes);
+ /* Step 3: obtain N returned_bits from the DRBG. */
+ err = crypto_rng_get_bytes(crypto_default_rng,
+ (u8 *)private_key, nbytes);
crypto_put_default_rng();
if (err)
return err;
- /* Make sure the private key is in the valid range. */
- if (__ecc_is_key_valid(curve, priv, ndigits))
+ /* Step 4: make sure the private key is in the valid range. */
+ if (__ecc_is_key_valid(curve, private_key, ndigits))
return -EINVAL;
- ecc_swap_digits(priv, privkey, ndigits);
-
return 0;
}
EXPORT_SYMBOL(ecc_gen_privkey);
@@ -1512,23 +1568,20 @@ int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits,
{
int ret = 0;
struct ecc_point *pk;
- u64 priv[ECC_MAX_DIGITS];
const struct ecc_curve *curve = ecc_get_curve(curve_id);
- if (!private_key || !curve || ndigits > ARRAY_SIZE(priv)) {
+ if (!private_key) {
ret = -EINVAL;
goto out;
}
- ecc_swap_digits(private_key, priv, ndigits);
-
pk = ecc_alloc_point(ndigits);
if (!pk) {
ret = -ENOMEM;
goto out;
}
- ecc_point_mult(pk, &curve->g, priv, NULL, curve, ndigits);
+ ecc_point_mult(pk, &curve->g, private_key, NULL, curve, ndigits);
/* SP800-56A rev 3 5.6.2.1.3 key check */
if (ecc_is_pubkey_valid_full(curve, pk)) {
@@ -1612,13 +1665,11 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
{
int ret = 0;
struct ecc_point *product, *pk;
- u64 priv[ECC_MAX_DIGITS];
u64 rand_z[ECC_MAX_DIGITS];
unsigned int nbytes;
const struct ecc_curve *curve = ecc_get_curve(curve_id);
- if (!private_key || !public_key || !curve ||
- ndigits > ARRAY_SIZE(priv) || ndigits > ARRAY_SIZE(rand_z)) {
+ if (!private_key || !public_key || ndigits > ARRAY_SIZE(rand_z)) {
ret = -EINVAL;
goto out;
}
@@ -1639,15 +1690,13 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
if (ret)
goto err_alloc_product;
- ecc_swap_digits(private_key, priv, ndigits);
-
product = ecc_alloc_point(ndigits);
if (!product) {
ret = -ENOMEM;
goto err_alloc_product;
}
- ecc_point_mult(product, pk, priv, rand_z, curve, ndigits);
+ ecc_point_mult(product, pk, private_key, rand_z, curve, ndigits);
if (ecc_point_is_zero(product)) {
ret = -EFAULT;
@@ -1657,7 +1706,6 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
ecc_swap_digits(product->x, secret, ndigits);
err_validity:
- memzero_explicit(priv, sizeof(priv));
memzero_explicit(rand_z, sizeof(rand_z));
ecc_free_point(product);
err_alloc_product:
diff --git a/crypto/ecc_curve_defs.h b/crypto/ecc_curve_defs.h
index 9719934c9428..0ecade7d02f5 100644
--- a/crypto/ecc_curve_defs.h
+++ b/crypto/ecc_curve_defs.h
@@ -17,6 +17,7 @@ static u64 nist_p192_b[] = { 0xFEB8DEECC146B9B1ull, 0x0FA7E9AB72243049ull,
0x64210519E59C80E7ull };
static struct ecc_curve nist_p192 = {
.name = "nist_192",
+ .nbits = 192,
.g = {
.x = nist_p192_g_x,
.y = nist_p192_g_y,
@@ -43,6 +44,7 @@ static u64 nist_p256_b[] = { 0x3BCE3C3E27D2604Bull, 0x651D06B0CC53B0F6ull,
0xB3EBBD55769886BCull, 0x5AC635D8AA3A93E7ull };
static struct ecc_curve nist_p256 = {
.name = "nist_256",
+ .nbits = 256,
.g = {
.x = nist_p256_g_x,
.y = nist_p256_g_y,
@@ -75,6 +77,7 @@ static u64 nist_p384_b[] = { 0x2a85c8edd3ec2aefull, 0xc656398d8a2ed19dull,
0x988e056be3f82d19ull, 0xb3312fa7e23ee7e4ull };
static struct ecc_curve nist_p384 = {
.name = "nist_384",
+ .nbits = 384,
.g = {
.x = nist_p384_g_x,
.y = nist_p384_g_y,
@@ -86,6 +89,51 @@ static struct ecc_curve nist_p384 = {
.b = nist_p384_b
};
+/* NIST P-521 */
+static u64 nist_p521_g_x[] = { 0xf97e7e31c2e5bd66ull, 0x3348b3c1856a429bull,
+ 0xfe1dc127a2ffa8deull, 0xa14b5e77efe75928ull,
+ 0xf828af606b4d3dbaull, 0x9c648139053fb521ull,
+ 0x9e3ecb662395b442ull, 0x858e06b70404e9cdull,
+ 0xc6ull };
+static u64 nist_p521_g_y[] = { 0x88be94769fd16650ull, 0x353c7086a272c240ull,
+ 0xc550b9013fad0761ull, 0x97ee72995ef42640ull,
+ 0x17afbd17273e662cull, 0x98f54449579b4468ull,
+ 0x5c8a5fb42c7d1bd9ull, 0x39296a789a3bc004ull,
+ 0x118ull };
+static u64 nist_p521_p[] = { 0xffffffffffffffffull, 0xffffffffffffffffull,
+ 0xffffffffffffffffull, 0xffffffffffffffffull,
+ 0xffffffffffffffffull, 0xffffffffffffffffull,
+ 0xffffffffffffffffull, 0xffffffffffffffffull,
+ 0x1ffull };
+static u64 nist_p521_n[] = { 0xbb6fb71e91386409ull, 0x3bb5c9b8899c47aeull,
+ 0x7fcc0148f709a5d0ull, 0x51868783bf2f966bull,
+ 0xfffffffffffffffaull, 0xffffffffffffffffull,
+ 0xffffffffffffffffull, 0xffffffffffffffffull,
+ 0x1ffull };
+static u64 nist_p521_a[] = { 0xfffffffffffffffcull, 0xffffffffffffffffull,
+ 0xffffffffffffffffull, 0xffffffffffffffffull,
+ 0xffffffffffffffffull, 0xffffffffffffffffull,
+ 0xffffffffffffffffull, 0xffffffffffffffffull,
+ 0x1ffull };
+static u64 nist_p521_b[] = { 0xef451fd46b503f00ull, 0x3573df883d2c34f1ull,
+ 0x1652c0bd3bb1bf07ull, 0x56193951ec7e937bull,
+ 0xb8b489918ef109e1ull, 0xa2da725b99b315f3ull,
+ 0x929a21a0b68540eeull, 0x953eb9618e1c9a1full,
+ 0x051ull };
+static struct ecc_curve nist_p521 = {
+ .name = "nist_521",
+ .nbits = 521,
+ .g = {
+ .x = nist_p521_g_x,
+ .y = nist_p521_g_y,
+ .ndigits = 9,
+ },
+ .p = nist_p521_p,
+ .n = nist_p521_n,
+ .a = nist_p521_a,
+ .b = nist_p521_b
+};
+
/* curve25519 */
static u64 curve25519_g_x[] = { 0x0000000000000009, 0x0000000000000000,
0x0000000000000000, 0x0000000000000000 };
@@ -95,6 +143,7 @@ static u64 curve25519_a[] = { 0x000000000001DB41, 0x0000000000000000,
0x0000000000000000, 0x0000000000000000 };
static const struct ecc_curve ecc_25519 = {
.name = "curve25519",
+ .nbits = 255,
.g = {
.x = curve25519_g_x,
.ndigits = 4,
diff --git a/crypto/ecdh.c b/crypto/ecdh.c
index 80afee3234fb..72cfd1590156 100644
--- a/crypto/ecdh.c
+++ b/crypto/ecdh.c
@@ -28,23 +28,28 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
{
struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
struct ecdh params;
+ int ret = 0;
if (crypto_ecdh_decode_key(buf, len, &params) < 0 ||
params.key_size > sizeof(u64) * ctx->ndigits)
return -EINVAL;
+ memset(ctx->private_key, 0, sizeof(ctx->private_key));
+
if (!params.key || !params.key_size)
return ecc_gen_privkey(ctx->curve_id, ctx->ndigits,
ctx->private_key);
- memcpy(ctx->private_key, params.key, params.key_size);
+ ecc_digits_from_bytes(params.key, params.key_size,
+ ctx->private_key, ctx->ndigits);
if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits,
ctx->private_key, params.key_size) < 0) {
memzero_explicit(ctx->private_key, params.key_size);
- return -EINVAL;
+ ret = -EINVAL;
}
- return 0;
+
+ return ret;
}
static int ecdh_compute_value(struct kpp_request *req)
diff --git a/crypto/ecdsa.c b/crypto/ecdsa.c
index fbd76498aba8..258fffbf623d 100644
--- a/crypto/ecdsa.c
+++ b/crypto/ecdsa.c
@@ -35,8 +35,8 @@ struct ecdsa_signature_ctx {
static int ecdsa_get_signature_rs(u64 *dest, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen, unsigned int ndigits)
{
- size_t keylen = ndigits * sizeof(u64);
- ssize_t diff = vlen - keylen;
+ size_t bufsize = ndigits * sizeof(u64);
+ ssize_t diff = vlen - bufsize;
const char *d = value;
u8 rs[ECC_MAX_BYTES];
@@ -58,7 +58,7 @@ static int ecdsa_get_signature_rs(u64 *dest, size_t hdrlen, unsigned char tag,
if (diff)
return -EINVAL;
}
- if (-diff >= keylen)
+ if (-diff >= bufsize)
return -EINVAL;
if (diff) {
@@ -122,7 +122,7 @@ static int _ecdsa_verify(struct ecc_ctx *ctx, const u64 *hash, const u64 *r, con
/* res.x = res.x mod n (if res.x > order) */
if (unlikely(vli_cmp(res.x, curve->n, ndigits) == 1))
- /* faster alternative for NIST p384, p256 & p192 */
+ /* faster alternative for NIST p521, p384, p256 & p192 */
vli_sub(res.x, res.x, curve->n, ndigits);
if (!vli_cmp(res.x, r, ndigits))
@@ -138,7 +138,7 @@ static int ecdsa_verify(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
- size_t keylen = ctx->curve->g.ndigits * sizeof(u64);
+ size_t bufsize = ctx->curve->g.ndigits * sizeof(u64);
struct ecdsa_signature_ctx sig_ctx = {
.curve = ctx->curve,
};
@@ -165,14 +165,14 @@ static int ecdsa_verify(struct akcipher_request *req)
goto error;
/* if the hash is shorter then we will add leading zeros to fit to ndigits */
- diff = keylen - req->dst_len;
+ diff = bufsize - req->dst_len;
if (diff >= 0) {
if (diff)
memset(rawhash, 0, diff);
memcpy(&rawhash[diff], buffer + req->src_len, req->dst_len);
} else if (diff < 0) {
/* given hash is longer, we take the left-most bytes */
- memcpy(&rawhash, buffer + req->src_len, keylen);
+ memcpy(&rawhash, buffer + req->src_len, bufsize);
}
ecc_swap_digits((u64 *)rawhash, hash, ctx->curve->g.ndigits);
@@ -222,28 +222,32 @@ static int ecdsa_ecc_ctx_reset(struct ecc_ctx *ctx)
static int ecdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen)
{
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
+ unsigned int digitlen, ndigits;
const unsigned char *d = key;
- const u64 *digits = (const u64 *)&d[1];
- unsigned int ndigits;
int ret;
ret = ecdsa_ecc_ctx_reset(ctx);
if (ret < 0)
return ret;
- if (keylen < 1 || (((keylen - 1) >> 1) % sizeof(u64)) != 0)
+ if (keylen < 1 || ((keylen - 1) & 1) != 0)
return -EINVAL;
/* we only accept uncompressed format indicated by '4' */
if (d[0] != 4)
return -EINVAL;
keylen--;
- ndigits = (keylen >> 1) / sizeof(u64);
+ digitlen = keylen >> 1;
+
+ ndigits = DIV_ROUND_UP(digitlen, sizeof(u64));
if (ndigits != ctx->curve->g.ndigits)
return -EINVAL;
- ecc_swap_digits(digits, ctx->pub_key.x, ndigits);
- ecc_swap_digits(&digits[ndigits], ctx->pub_key.y, ndigits);
+ d++;
+
+ ecc_digits_from_bytes(d, digitlen, ctx->pub_key.x, ndigits);
+ ecc_digits_from_bytes(&d[digitlen], digitlen, ctx->pub_key.y, ndigits);
+
ret = ecc_is_pubkey_valid_full(ctx->curve, &ctx->pub_key);
ctx->pub_key_set = ret == 0;
@@ -262,9 +266,31 @@ static unsigned int ecdsa_max_size(struct crypto_akcipher *tfm)
{
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
- return ctx->pub_key.ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
+ return DIV_ROUND_UP(ctx->curve->nbits, 8);
+}
+
+static int ecdsa_nist_p521_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P521);
}
+static struct akcipher_alg ecdsa_nist_p521 = {
+ .verify = ecdsa_verify,
+ .set_pub_key = ecdsa_set_pub_key,
+ .max_size = ecdsa_max_size,
+ .init = ecdsa_nist_p521_init_tfm,
+ .exit = ecdsa_exit_tfm,
+ .base = {
+ .cra_name = "ecdsa-nist-p521",
+ .cra_driver_name = "ecdsa-nist-p521-generic",
+ .cra_priority = 100,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct ecc_ctx),
+ },
+};
+
static int ecdsa_nist_p384_init_tfm(struct crypto_akcipher *tfm)
{
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
@@ -348,8 +374,15 @@ static int __init ecdsa_init(void)
if (ret)
goto nist_p384_error;
+ ret = crypto_register_akcipher(&ecdsa_nist_p521);
+ if (ret)
+ goto nist_p521_error;
+
return 0;
+nist_p521_error:
+ crypto_unregister_akcipher(&ecdsa_nist_p384);
+
nist_p384_error:
crypto_unregister_akcipher(&ecdsa_nist_p256);
@@ -365,6 +398,7 @@ static void __exit ecdsa_exit(void)
crypto_unregister_akcipher(&ecdsa_nist_p192);
crypto_unregister_akcipher(&ecdsa_nist_p256);
crypto_unregister_akcipher(&ecdsa_nist_p384);
+ crypto_unregister_akcipher(&ecdsa_nist_p521);
}
subsys_initcall(ecdsa_init);
@@ -373,4 +407,8 @@ module_exit(ecdsa_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Stefan Berger <stefanb@linux.ibm.com>");
MODULE_DESCRIPTION("ECDSA generic algorithm");
+MODULE_ALIAS_CRYPTO("ecdsa-nist-p192");
+MODULE_ALIAS_CRYPTO("ecdsa-nist-p256");
+MODULE_ALIAS_CRYPTO("ecdsa-nist-p384");
+MODULE_ALIAS_CRYPTO("ecdsa-nist-p521");
MODULE_ALIAS_CRYPTO("ecdsa-generic");
diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c
index f3c6b5e15e75..3811f3805b5d 100644
--- a/crypto/ecrdsa.c
+++ b/crypto/ecrdsa.c
@@ -294,4 +294,5 @@ module_exit(ecrdsa_mod_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vitaly Chikunov <vt@altlinux.org>");
MODULE_DESCRIPTION("EC-RDSA generic algorithm");
+MODULE_ALIAS_CRYPTO("ecrdsa");
MODULE_ALIAS_CRYPTO("ecrdsa-generic");
diff --git a/crypto/ecrdsa_defs.h b/crypto/ecrdsa_defs.h
index 0056335b9d03..1c2c2449e331 100644
--- a/crypto/ecrdsa_defs.h
+++ b/crypto/ecrdsa_defs.h
@@ -47,6 +47,7 @@ static u64 cp256a_b[] = {
static struct ecc_curve gost_cp256a = {
.name = "cp256a",
+ .nbits = 256,
.g = {
.x = cp256a_g_x,
.y = cp256a_g_y,
@@ -80,6 +81,7 @@ static u64 cp256b_b[] = {
static struct ecc_curve gost_cp256b = {
.name = "cp256b",
+ .nbits = 256,
.g = {
.x = cp256b_g_x,
.y = cp256b_g_y,
@@ -117,6 +119,7 @@ static u64 cp256c_b[] = {
static struct ecc_curve gost_cp256c = {
.name = "cp256c",
+ .nbits = 256,
.g = {
.x = cp256c_g_x,
.y = cp256c_g_y,
@@ -166,6 +169,7 @@ static u64 tc512a_b[] = {
static struct ecc_curve gost_tc512a = {
.name = "tc512a",
+ .nbits = 512,
.g = {
.x = tc512a_g_x,
.y = tc512a_g_y,
@@ -211,6 +215,7 @@ static u64 tc512b_b[] = {
static struct ecc_curve gost_tc512b = {
.name = "tc512b",
+ .nbits = 512,
.g = {
.x = tc512b_g_x,
.y = tc512b_g_y,
diff --git a/crypto/fips.c b/crypto/fips.c
index 92fd506abb21..8a784018ebfc 100644
--- a/crypto/fips.c
+++ b/crypto/fips.c
@@ -63,7 +63,6 @@ static struct ctl_table crypto_sysctl_table[] = {
.mode = 0444,
.proc_handler = proc_dostring
},
- {}
};
static struct ctl_table_header *crypto_sysctls;
diff --git a/crypto/hash.h b/crypto/hash.h
index 93f6ba0df263..cf9aee07f77d 100644
--- a/crypto/hash.h
+++ b/crypto/hash.h
@@ -8,39 +8,9 @@
#define _LOCAL_CRYPTO_HASH_H
#include <crypto/internal/hash.h>
-#include <linux/cryptouser.h>
#include "internal.h"
-static inline struct crypto_istat_hash *hash_get_stat(
- struct hash_alg_common *alg)
-{
-#ifdef CONFIG_CRYPTO_STATS
- return &alg->stat;
-#else
- return NULL;
-#endif
-}
-
-static inline int crypto_hash_report_stat(struct sk_buff *skb,
- struct crypto_alg *alg,
- const char *type)
-{
- struct hash_alg_common *halg = __crypto_hash_alg_common(alg);
- struct crypto_istat_hash *istat = hash_get_stat(halg);
- struct crypto_stat_hash rhash;
-
- memset(&rhash, 0, sizeof(rhash));
-
- strscpy(rhash.type, type, sizeof(rhash.type));
-
- rhash.stat_hash_cnt = atomic64_read(&istat->hash_cnt);
- rhash.stat_hash_tlen = atomic64_read(&istat->hash_tlen);
- rhash.stat_err_cnt = atomic64_read(&istat->err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
-}
-
extern const struct crypto_type crypto_shash_type;
int hash_prepare_alg(struct hash_alg_common *alg);
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
index 76edbf8af0ac..c24d4ff2b4a8 100644
--- a/crypto/jitterentropy-kcapi.c
+++ b/crypto/jitterentropy-kcapi.c
@@ -61,8 +61,7 @@ void *jent_kvzalloc(unsigned int len)
void jent_kvzfree(void *ptr, unsigned int len)
{
- memzero_explicit(ptr, len);
- kvfree(ptr);
+ kvfree_sensitive(ptr, len);
}
void *jent_zalloc(unsigned int len)
diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c
index 26a9048bc893..d7056de8c0d7 100644
--- a/crypto/jitterentropy.c
+++ b/crypto/jitterentropy.c
@@ -157,8 +157,8 @@ struct rand_data {
/*
* See the SP 800-90B comment #10b for the corrected cutoff for the SP 800-90B
* APT.
- * http://www.untruth.org/~josh/sp80090b/UL%20SP800-90B-final%20comments%20v1.9%2020191212.pdf
- * In in the syntax of R, this is C = 2 + qbinom(1 − 2^(−30), 511, 2^(-1/osr)).
+ * https://www.untruth.org/~josh/sp80090b/UL%20SP800-90B-final%20comments%20v1.9%2020191212.pdf
+ * In the syntax of R, this is C = 2 + qbinom(1 − 2^(−30), 511, 2^(-1/osr)).
* (The original formula wasn't correct because the first symbol must
* necessarily have been observed, so there is no chance of observing 0 of these
* symbols.)
diff --git a/crypto/kpp.c b/crypto/kpp.c
index 33d44e59387f..ecc63a1a948d 100644
--- a/crypto/kpp.c
+++ b/crypto/kpp.c
@@ -66,29 +66,6 @@ static void crypto_kpp_free_instance(struct crypto_instance *inst)
kpp->free(kpp);
}
-static int __maybe_unused crypto_kpp_report_stat(
- struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct kpp_alg *kpp = __crypto_kpp_alg(alg);
- struct crypto_istat_kpp *istat;
- struct crypto_stat_kpp rkpp;
-
- istat = kpp_get_stat(kpp);
-
- memset(&rkpp, 0, sizeof(rkpp));
-
- strscpy(rkpp.type, "kpp", sizeof(rkpp.type));
-
- rkpp.stat_setsecret_cnt = atomic64_read(&istat->setsecret_cnt);
- rkpp.stat_generate_public_key_cnt =
- atomic64_read(&istat->generate_public_key_cnt);
- rkpp.stat_compute_shared_secret_cnt =
- atomic64_read(&istat->compute_shared_secret_cnt);
- rkpp.stat_err_cnt = atomic64_read(&istat->err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp);
-}
-
static const struct crypto_type crypto_kpp_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_kpp_init_tfm,
@@ -99,9 +76,6 @@ static const struct crypto_type crypto_kpp_type = {
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_kpp_report,
#endif
-#ifdef CONFIG_CRYPTO_STATS
- .report_stat = crypto_kpp_report_stat,
-#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_KPP,
@@ -131,15 +105,11 @@ EXPORT_SYMBOL_GPL(crypto_has_kpp);
static void kpp_prepare_alg(struct kpp_alg *alg)
{
- struct crypto_istat_kpp *istat = kpp_get_stat(alg);
struct crypto_alg *base = &alg->base;
base->cra_type = &crypto_kpp_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_KPP;
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- memset(istat, 0, sizeof(*istat));
}
int crypto_register_kpp(struct kpp_alg *alg)
diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c
index 0f1bd7dcde24..cdb4897c63e6 100644
--- a/crypto/lskcipher.c
+++ b/crypto/lskcipher.c
@@ -29,25 +29,6 @@ static inline struct lskcipher_alg *__crypto_lskcipher_alg(
return container_of(alg, struct lskcipher_alg, co.base);
}
-static inline struct crypto_istat_cipher *lskcipher_get_stat(
- struct lskcipher_alg *alg)
-{
- return skcipher_get_stat_common(&alg->co);
-}
-
-static inline int crypto_lskcipher_errstat(struct lskcipher_alg *alg, int err)
-{
- struct crypto_istat_cipher *istat = lskcipher_get_stat(alg);
-
- if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
- return err;
-
- if (err)
- atomic64_inc(&istat->err_cnt);
-
- return err;
-}
-
static int lskcipher_setkey_unaligned(struct crypto_lskcipher *tfm,
const u8 *key, unsigned int keylen)
{
@@ -147,20 +128,13 @@ static int crypto_lskcipher_crypt(struct crypto_lskcipher *tfm, const u8 *src,
u32 flags))
{
unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
- struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
- int ret;
if (((unsigned long)src | (unsigned long)dst | (unsigned long)iv) &
- alignmask) {
- ret = crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv,
- crypt);
- goto out;
- }
+ alignmask)
+ return crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv,
+ crypt);
- ret = crypt(tfm, src, dst, len, iv, CRYPTO_LSKCIPHER_FLAG_FINAL);
-
-out:
- return crypto_lskcipher_errstat(alg, ret);
+ return crypt(tfm, src, dst, len, iv, CRYPTO_LSKCIPHER_FLAG_FINAL);
}
int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
@@ -168,13 +142,6 @@ int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
{
struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
- if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
- struct crypto_istat_cipher *istat = lskcipher_get_stat(alg);
-
- atomic64_inc(&istat->encrypt_cnt);
- atomic64_add(len, &istat->encrypt_tlen);
- }
-
return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->encrypt);
}
EXPORT_SYMBOL_GPL(crypto_lskcipher_encrypt);
@@ -184,13 +151,6 @@ int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
{
struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
- if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
- struct crypto_istat_cipher *istat = lskcipher_get_stat(alg);
-
- atomic64_inc(&istat->decrypt_cnt);
- atomic64_add(len, &istat->decrypt_tlen);
- }
-
return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->decrypt);
}
EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt);
@@ -320,28 +280,6 @@ static int __maybe_unused crypto_lskcipher_report(
sizeof(rblkcipher), &rblkcipher);
}
-static int __maybe_unused crypto_lskcipher_report_stat(
- struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg);
- struct crypto_istat_cipher *istat;
- struct crypto_stat_cipher rcipher;
-
- istat = lskcipher_get_stat(skcipher);
-
- memset(&rcipher, 0, sizeof(rcipher));
-
- strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
-
- rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
- rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
- rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
- rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
- rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
-}
-
static const struct crypto_type crypto_lskcipher_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_lskcipher_init_tfm,
@@ -352,9 +290,6 @@ static const struct crypto_type crypto_lskcipher_type = {
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_lskcipher_report,
#endif
-#ifdef CONFIG_CRYPTO_STATS
- .report_stat = crypto_lskcipher_report_stat,
-#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_LSKCIPHER,
diff --git a/crypto/rng.c b/crypto/rng.c
index 279dffdebf59..9d8804e46422 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -30,30 +30,24 @@ static int crypto_default_rng_refcnt;
int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
{
- struct rng_alg *alg = crypto_rng_alg(tfm);
u8 *buf = NULL;
int err;
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- atomic64_inc(&rng_get_stat(alg)->seed_cnt);
-
if (!seed && slen) {
buf = kmalloc(slen, GFP_KERNEL);
- err = -ENOMEM;
if (!buf)
- goto out;
+ return -ENOMEM;
err = get_random_bytes_wait(buf, slen);
if (err)
- goto free_buf;
+ goto out;
seed = buf;
}
- err = alg->seed(tfm, seed, slen);
-free_buf:
- kfree_sensitive(buf);
+ err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
out:
- return crypto_rng_errstat(alg, err);
+ kfree_sensitive(buf);
+ return err;
}
EXPORT_SYMBOL_GPL(crypto_rng_reset);
@@ -91,27 +85,6 @@ static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "seedsize : %u\n", seedsize(alg));
}
-static int __maybe_unused crypto_rng_report_stat(
- struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct rng_alg *rng = __crypto_rng_alg(alg);
- struct crypto_istat_rng *istat;
- struct crypto_stat_rng rrng;
-
- istat = rng_get_stat(rng);
-
- memset(&rrng, 0, sizeof(rrng));
-
- strscpy(rrng.type, "rng", sizeof(rrng.type));
-
- rrng.stat_generate_cnt = atomic64_read(&istat->generate_cnt);
- rrng.stat_generate_tlen = atomic64_read(&istat->generate_tlen);
- rrng.stat_seed_cnt = atomic64_read(&istat->seed_cnt);
- rrng.stat_err_cnt = atomic64_read(&istat->err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng);
-}
-
static const struct crypto_type crypto_rng_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_rng_init_tfm,
@@ -121,9 +94,6 @@ static const struct crypto_type crypto_rng_type = {
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_rng_report,
#endif
-#ifdef CONFIG_CRYPTO_STATS
- .report_stat = crypto_rng_report_stat,
-#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_RNG,
@@ -199,7 +169,6 @@ EXPORT_SYMBOL_GPL(crypto_del_default_rng);
int crypto_register_rng(struct rng_alg *alg)
{
- struct crypto_istat_rng *istat = rng_get_stat(alg);
struct crypto_alg *base = &alg->base;
if (alg->seedsize > PAGE_SIZE / 8)
@@ -209,9 +178,6 @@ int crypto_register_rng(struct rng_alg *alg)
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_RNG;
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- memset(istat, 0, sizeof(*istat));
-
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_rng);
diff --git a/crypto/scompress.c b/crypto/scompress.c
index 60bbb7ea4060..1cef6bb06a81 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -271,9 +271,6 @@ static const struct crypto_type crypto_scomp_type = {
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_scomp_report,
#endif
-#ifdef CONFIG_CRYPTO_STATS
- .report_stat = crypto_acomp_report_stat,
-#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SCOMPRESS,
diff --git a/crypto/shash.c b/crypto/shash.c
index c3f7f6a25280..301ab42bf849 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -16,18 +16,6 @@
#include "hash.h"
-static inline struct crypto_istat_hash *shash_get_stat(struct shash_alg *alg)
-{
- return hash_get_stat(&alg->halg);
-}
-
-static inline int crypto_shash_errstat(struct shash_alg *alg, int err)
-{
- if (IS_ENABLED(CONFIG_CRYPTO_STATS) && err)
- atomic64_inc(&shash_get_stat(alg)->err_cnt);
- return err;
-}
-
int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
@@ -61,29 +49,13 @@ EXPORT_SYMBOL_GPL(crypto_shash_setkey);
int crypto_shash_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct shash_alg *shash = crypto_shash_alg(desc->tfm);
- int err;
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- atomic64_add(len, &shash_get_stat(shash)->hash_tlen);
-
- err = shash->update(desc, data, len);
-
- return crypto_shash_errstat(shash, err);
+ return crypto_shash_alg(desc->tfm)->update(desc, data, len);
}
EXPORT_SYMBOL_GPL(crypto_shash_update);
int crypto_shash_final(struct shash_desc *desc, u8 *out)
{
- struct shash_alg *shash = crypto_shash_alg(desc->tfm);
- int err;
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- atomic64_inc(&shash_get_stat(shash)->hash_cnt);
-
- err = shash->final(desc, out);
-
- return crypto_shash_errstat(shash, err);
+ return crypto_shash_alg(desc->tfm)->final(desc, out);
}
EXPORT_SYMBOL_GPL(crypto_shash_final);
@@ -99,20 +71,7 @@ static int shash_default_finup(struct shash_desc *desc, const u8 *data,
int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- struct crypto_shash *tfm = desc->tfm;
- struct shash_alg *shash = crypto_shash_alg(tfm);
- int err;
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
- struct crypto_istat_hash *istat = shash_get_stat(shash);
-
- atomic64_inc(&istat->hash_cnt);
- atomic64_add(len, &istat->hash_tlen);
- }
-
- err = shash->finup(desc, data, len, out);
-
- return crypto_shash_errstat(shash, err);
+ return crypto_shash_alg(desc->tfm)->finup(desc, data, len, out);
}
EXPORT_SYMBOL_GPL(crypto_shash_finup);
@@ -129,22 +88,11 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct crypto_shash *tfm = desc->tfm;
- struct shash_alg *shash = crypto_shash_alg(tfm);
- int err;
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
- struct crypto_istat_hash *istat = shash_get_stat(shash);
-
- atomic64_inc(&istat->hash_cnt);
- atomic64_add(len, &istat->hash_tlen);
- }
if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- err = -ENOKEY;
- else
- err = shash->digest(desc, data, len, out);
+ return -ENOKEY;
- return crypto_shash_errstat(shash, err);
+ return crypto_shash_alg(tfm)->digest(desc, data, len, out);
}
EXPORT_SYMBOL_GPL(crypto_shash_digest);
@@ -265,12 +213,6 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "digestsize : %u\n", salg->digestsize);
}
-static int __maybe_unused crypto_shash_report_stat(
- struct sk_buff *skb, struct crypto_alg *alg)
-{
- return crypto_hash_report_stat(skb, alg, "shash");
-}
-
const struct crypto_type crypto_shash_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_shash_init_tfm,
@@ -281,9 +223,6 @@ const struct crypto_type crypto_shash_type = {
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_shash_report,
#endif
-#ifdef CONFIG_CRYPTO_STATS
- .report_stat = crypto_shash_report_stat,
-#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SHASH,
@@ -350,7 +289,6 @@ EXPORT_SYMBOL_GPL(crypto_clone_shash);
int hash_prepare_alg(struct hash_alg_common *alg)
{
- struct crypto_istat_hash *istat = hash_get_stat(alg);
struct crypto_alg *base = &alg->base;
if (alg->digestsize > HASH_MAX_DIGESTSIZE)
@@ -362,9 +300,6 @@ int hash_prepare_alg(struct hash_alg_common *alg)
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- memset(istat, 0, sizeof(*istat));
-
return 0;
}
diff --git a/crypto/sig.c b/crypto/sig.c
index 224c47019297..7645bedf3a1f 100644
--- a/crypto/sig.c
+++ b/crypto/sig.c
@@ -45,16 +45,6 @@ static int __maybe_unused crypto_sig_report(struct sk_buff *skb,
return nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER, sizeof(rsig), &rsig);
}
-static int __maybe_unused crypto_sig_report_stat(struct sk_buff *skb,
- struct crypto_alg *alg)
-{
- struct crypto_stat_akcipher rsig = {};
-
- strscpy(rsig.type, "sig", sizeof(rsig.type));
-
- return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER, sizeof(rsig), &rsig);
-}
-
static const struct crypto_type crypto_sig_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_sig_init_tfm,
@@ -64,9 +54,6 @@ static const struct crypto_type crypto_sig_type = {
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_sig_report,
#endif
-#ifdef CONFIG_CRYPTO_STATS
- .report_stat = crypto_sig_report_stat,
-#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_SIG_MASK,
.type = CRYPTO_ALG_TYPE_SIG,
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index bc70e159d27d..ceed7f33a67b 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -89,25 +89,6 @@ static inline struct skcipher_alg *__crypto_skcipher_alg(
return container_of(alg, struct skcipher_alg, base);
}
-static inline struct crypto_istat_cipher *skcipher_get_stat(
- struct skcipher_alg *alg)
-{
- return skcipher_get_stat_common(&alg->co);
-}
-
-static inline int crypto_skcipher_errstat(struct skcipher_alg *alg, int err)
-{
- struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
-
- if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
- return err;
-
- if (err && err != -EINPROGRESS && err != -EBUSY)
- atomic64_inc(&istat->err_cnt);
-
- return err;
-}
-
static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
{
u8 *addr;
@@ -654,23 +635,12 @@ int crypto_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- int ret;
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
- struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
-
- atomic64_inc(&istat->encrypt_cnt);
- atomic64_add(req->cryptlen, &istat->encrypt_tlen);
- }
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- ret = -ENOKEY;
- else if (alg->co.base.cra_type != &crypto_skcipher_type)
- ret = crypto_lskcipher_encrypt_sg(req);
- else
- ret = alg->encrypt(req);
-
- return crypto_skcipher_errstat(alg, ret);
+ return -ENOKEY;
+ if (alg->co.base.cra_type != &crypto_skcipher_type)
+ return crypto_lskcipher_encrypt_sg(req);
+ return alg->encrypt(req);
}
EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
@@ -678,23 +648,12 @@ int crypto_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- int ret;
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
- struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
-
- atomic64_inc(&istat->decrypt_cnt);
- atomic64_add(req->cryptlen, &istat->decrypt_tlen);
- }
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- ret = -ENOKEY;
- else if (alg->co.base.cra_type != &crypto_skcipher_type)
- ret = crypto_lskcipher_decrypt_sg(req);
- else
- ret = alg->decrypt(req);
-
- return crypto_skcipher_errstat(alg, ret);
+ return -ENOKEY;
+ if (alg->co.base.cra_type != &crypto_skcipher_type)
+ return crypto_lskcipher_decrypt_sg(req);
+ return alg->decrypt(req);
}
EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
@@ -846,28 +805,6 @@ static int __maybe_unused crypto_skcipher_report(
sizeof(rblkcipher), &rblkcipher);
}
-static int __maybe_unused crypto_skcipher_report_stat(
- struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
- struct crypto_istat_cipher *istat;
- struct crypto_stat_cipher rcipher;
-
- istat = skcipher_get_stat(skcipher);
-
- memset(&rcipher, 0, sizeof(rcipher));
-
- strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
-
- rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
- rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
- rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
- rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
- rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
-}
-
static const struct crypto_type crypto_skcipher_type = {
.extsize = crypto_skcipher_extsize,
.init_tfm = crypto_skcipher_init_tfm,
@@ -878,9 +815,6 @@ static const struct crypto_type crypto_skcipher_type = {
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_skcipher_report,
#endif
-#ifdef CONFIG_CRYPTO_STATS
- .report_stat = crypto_skcipher_report_stat,
-#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK,
.type = CRYPTO_ALG_TYPE_SKCIPHER,
@@ -935,7 +869,6 @@ EXPORT_SYMBOL_GPL(crypto_has_skcipher);
int skcipher_prepare_alg_common(struct skcipher_alg_common *alg)
{
- struct crypto_istat_cipher *istat = skcipher_get_stat_common(alg);
struct crypto_alg *base = &alg->base;
if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
@@ -948,9 +881,6 @@ int skcipher_prepare_alg_common(struct skcipher_alg_common *alg)
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- memset(istat, 0, sizeof(*istat));
-
return 0;
}
diff --git a/crypto/skcipher.h b/crypto/skcipher.h
index 16c9484360da..703651367dd8 100644
--- a/crypto/skcipher.h
+++ b/crypto/skcipher.h
@@ -10,16 +10,6 @@
#include <crypto/internal/skcipher.h>
#include "internal.h"
-static inline struct crypto_istat_cipher *skcipher_get_stat_common(
- struct skcipher_alg_common *alg)
-{
-#ifdef CONFIG_CRYPTO_STATS
- return &alg->stat;
-#else
- return NULL;
-#endif
-}
-
int crypto_lskcipher_encrypt_sg(struct skcipher_request *req);
int crypto_lskcipher_decrypt_sg(struct skcipher_request *req);
int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 3dddd288ca02..00f5a6cf341a 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -5098,6 +5098,13 @@ static const struct alg_test_desc alg_test_descs[] = {
.akcipher = __VECS(ecdsa_nist_p384_tv_template)
}
}, {
+ .alg = "ecdsa-nist-p521",
+ .test = alg_test_akcipher,
+ .fips_allowed = 1,
+ .suite = {
+ .akcipher = __VECS(ecdsa_nist_p521_tv_template)
+ }
+ }, {
.alg = "ecrdsa",
.test = alg_test_akcipher,
.suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 986f331a5fc2..5350cfd9d325 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -653,6 +653,30 @@ static const struct akcipher_testvec rsa_tv_template[] = {
static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = {
{
.key =
+ "\x04\xf7\x46\xf8\x2f\x15\xf6\x22\x8e\xd7\x57\x4f\xcc\xe7\xbb\xc1"
+ "\xd4\x09\x73\xcf\xea\xd0\x15\x07\x3d\xa5\x8a\x8a\x95\x43\xe4\x68"
+ "\xea\xc6\x25\xc1\xc1\x01\x25\x4c\x7e\xc3\x3c\xa6\x04\x0a\xe7\x08"
+ "\x98",
+ .key_len = 49,
+ .params =
+ "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+ "\xce\x3d\x03\x01\x01",
+ .param_len = 21,
+ .m =
+ "\xcd\xb9\xd2\x1c\xb7\x6f\xcd\x44\xb3\xfd\x63\xea\xa3\x66\x7f\xae"
+ "\x63\x85\xe7\x82",
+ .m_size = 20,
+ .algo = OID_id_ecdsa_with_sha1,
+ .c =
+ "\x30\x35\x02\x19\x00\xba\xe5\x93\x83\x6e\xb6\x3b\x63\xa0\x27\x91"
+ "\xc6\xf6\x7f\xc3\x09\xad\x59\xad\x88\x27\xd6\x92\x6b\x02\x18\x10"
+ "\x68\x01\x9d\xba\xce\x83\x08\xef\x95\x52\x7b\xa0\x0f\xe4\x18\x86"
+ "\x80\x6f\xa5\x79\x77\xda\xd0",
+ .c_size = 55,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ }, {
+ .key =
"\x04\xb6\x4b\xb1\xd1\xac\xba\x24\x8f\x65\xb2\x60\x00\x90\xbf\xbd"
"\x78\x05\x73\xe9\x79\x1d\x6f\x7c\x0b\xd2\xc3\x93\xa7\x28\xe1\x75"
"\xf7\xd5\x95\x1d\x28\x10\xc0\x75\x50\x5c\x1a\x4f\x3f\x8f\xa5\xee"
@@ -756,6 +780,32 @@ static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = {
static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = {
{
.key =
+ "\x04\xb9\x7b\xbb\xd7\x17\x64\xd2\x7e\xfc\x81\x5d\x87\x06\x83\x41"
+ "\x22\xd6\x9a\xaa\x87\x17\xec\x4f\x63\x55\x2f\x94\xba\xdd\x83\xe9"
+ "\x34\x4b\xf3\xe9\x91\x13\x50\xb6\xcb\xca\x62\x08\xe7\x3b\x09\xdc"
+ "\xc3\x63\x4b\x2d\xb9\x73\x53\xe4\x45\xe6\x7c\xad\xe7\x6b\xb0\xe8"
+ "\xaf",
+ .key_len = 65,
+ .params =
+ "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+ "\xce\x3d\x03\x01\x07",
+ .param_len = 21,
+ .m =
+ "\xc2\x2b\x5f\x91\x78\x34\x26\x09\x42\x8d\x6f\x51\xb2\xc5\xaf\x4c"
+ "\x0b\xde\x6a\x42",
+ .m_size = 20,
+ .algo = OID_id_ecdsa_with_sha1,
+ .c =
+ "\x30\x46\x02\x21\x00\xf9\x25\xce\x9f\x3a\xa6\x35\x81\xcf\xd4\xe7"
+ "\xb7\xf0\x82\x56\x41\xf7\xd4\xad\x8d\x94\x5a\x69\x89\xee\xca\x6a"
+ "\x52\x0e\x48\x4d\xcc\x02\x21\x00\xd7\xe4\xef\x52\x66\xd3\x5b\x9d"
+ "\x8a\xfa\x54\x93\x29\xa7\x70\x86\xf1\x03\x03\xf3\x3b\xe2\x73\xf7"
+ "\xfb\x9d\x8b\xde\xd4\x8d\x6f\xad",
+ .c_size = 72,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ }, {
+ .key =
"\x04\x8b\x6d\xc0\x33\x8e\x2d\x8b\x67\xf5\xeb\xc4\x7f\xa0\xf5\xd9"
"\x7b\x03\xa5\x78\x9a\xb5\xea\x14\xe4\x23\xd0\xaf\xd7\x0e\x2e\xa0"
"\xc9\x8b\xdb\x95\xf8\xb3\xaf\xac\x00\x2c\x2c\x1f\x7a\xfd\x95\x88"
@@ -866,6 +916,36 @@ static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = {
static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = {
{
+ .key = /* secp384r1(sha1) */
+ "\x04\x89\x25\xf3\x97\x88\xcb\xb0\x78\xc5\x72\x9a\x14\x6e\x7a\xb1"
+ "\x5a\xa5\x24\xf1\x95\x06\x9e\x28\xfb\xc4\xb9\xbe\x5a\x0d\xd9\x9f"
+ "\xf3\xd1\x4d\x2d\x07\x99\xbd\xda\xa7\x66\xec\xbb\xea\xba\x79\x42"
+ "\xc9\x34\x89\x6a\xe7\x0b\xc3\xf2\xfe\x32\x30\xbe\xba\xf9\xdf\x7e"
+ "\x4b\x6a\x07\x8e\x26\x66\x3f\x1d\xec\xa2\x57\x91\x51\xdd\x17\x0e"
+ "\x0b\x25\xd6\x80\x5c\x3b\xe6\x1a\x98\x48\x91\x45\x7a\x73\xb0\xc3"
+ "\xf1",
+ .key_len = 97,
+ .params =
+ "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
+ "\x00\x22",
+ .param_len = 18,
+ .m =
+ "\x12\x55\x28\xf0\x77\xd5\xb6\x21\x71\x32\x48\xcd\x28\xa8\x25\x22"
+ "\x3a\x69\xc1\x93",
+ .m_size = 20,
+ .algo = OID_id_ecdsa_with_sha1,
+ .c =
+ "\x30\x66\x02\x31\x00\xf5\x0f\x24\x4c\x07\x93\x6f\x21\x57\x55\x07"
+ "\x20\x43\x30\xde\xa0\x8d\x26\x8e\xae\x63\x3f\xbc\x20\x3a\xc6\xf1"
+ "\x32\x3c\xce\x70\x2b\x78\xf1\x4c\x26\xe6\x5b\x86\xcf\xec\x7c\x7e"
+ "\xd0\x87\xd7\xd7\x6e\x02\x31\x00\xcd\xbb\x7e\x81\x5d\x8f\x63\xc0"
+ "\x5f\x63\xb1\xbe\x5e\x4c\x0e\xa1\xdf\x28\x8c\x1b\xfa\xf9\x95\x88"
+ "\x74\xa0\x0f\xbf\xaf\xc3\x36\x76\x4a\xa1\x59\xf1\x1c\xa4\x58\x26"
+ "\x79\x12\x2a\xb7\xc5\x15\x92\xc5",
+ .c_size = 104,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ }, {
.key = /* secp384r1(sha224) */
"\x04\x69\x6c\xcf\x62\xee\xd0\x0d\xe5\xb5\x2f\x70\x54\xcf\x26\xa0"
"\xd9\x98\x8d\x92\x2a\xab\x9b\x11\xcb\x48\x18\xa1\xa9\x0d\xd5\x18"
@@ -991,6 +1071,152 @@ static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = {
},
};
+static const struct akcipher_testvec ecdsa_nist_p521_tv_template[] = {
+ {
+ .key = /* secp521r1(sha224) */
+ "\x04\x01\x4f\x43\x18\xb6\xa9\xc9\x5d\x68\xd3\xa9\x42\xf8\x98\xc0"
+ "\xd2\xd1\xa9\x50\x3b\xe8\xc4\x40\xe6\x11\x78\x88\x4b\xbd\x76\xa7"
+ "\x9a\xe0\xdd\x31\xa4\x67\x78\x45\x33\x9e\x8c\xd1\xc7\x44\xac\x61"
+ "\x68\xc8\x04\xe7\x5c\x79\xb1\xf1\x41\x0c\x71\xc0\x53\xa8\xbc\xfb"
+ "\xf5\xca\xd4\x01\x40\xfd\xa3\x45\xda\x08\xe0\xb4\xcb\x28\x3b\x0a"
+ "\x02\x35\x5f\x02\x9f\x3f\xcd\xef\x08\x22\x40\x97\x74\x65\xb7\x76"
+ "\x85\xc7\xc0\x5c\xfb\x81\xe1\xa5\xde\x0c\x4e\x8b\x12\x31\xb6\x47"
+ "\xed\x37\x0f\x99\x3f\x26\xba\xa3\x8e\xff\x79\x34\x7c\x3a\xfe\x1f"
+ "\x3b\x83\x82\x2f\x14",
+ .key_len = 133,
+ .params =
+ "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
+ "\x00\x23",
+ .param_len = 18,
+ .m =
+ "\xa2\x3a\x6a\x8c\x7b\x3c\xf2\x51\xf8\xbe\x5f\x4f\x3b\x15\x05\xc4"
+ "\xb5\xbc\x19\xe7\x21\x85\xe9\x23\x06\x33\x62\xfb",
+ .m_size = 28,
+ .algo = OID_id_ecdsa_with_sha224,
+ .c =
+ "\x30\x81\x86\x02\x41\x01\xd6\x43\xe7\xff\x42\xb2\xba\x74\x35\xf6"
+ "\xdc\x6d\x02\x7b\x22\xac\xe2\xef\x07\x92\xee\x60\x94\x06\xf8\x3f"
+ "\x59\x0f\x74\xf0\x3f\xd8\x18\xc6\x37\x8a\xcb\xa7\xd8\x7d\x98\x85"
+ "\x29\x88\xff\x0b\x94\x94\x6c\xa6\x9b\x89\x8b\x1e\xfd\x09\x46\x6b"
+ "\xc7\xaf\x7a\xb9\x19\x0a\x02\x41\x3a\x26\x0d\x55\xcd\x23\x1e\x7d"
+ "\xa0\x5e\xf9\x88\xf3\xd2\x32\x90\x57\x0f\xf8\x65\x97\x6b\x09\x4d"
+ "\x22\x26\x0b\x5f\x49\x32\x6b\x91\x99\x30\x90\x0f\x1c\x8f\x78\xd3"
+ "\x9f\x0e\x64\xcc\xc4\xe8\x43\xd9\x0e\x1c\xad\x22\xda\x82\x00\x35"
+ "\xa3\x50\xb1\xa5\x98\x92\x2a\xa5\x52",
+ .c_size = 137,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ },
+ {
+ .key = /* secp521r1(sha256) */
+ "\x04\x01\x05\x3a\x6b\x3b\x5a\x0f\xa7\xb9\xb7\x32\x53\x4e\xe2\xae"
+ "\x0a\x52\xc5\xda\xdd\x5a\x79\x1c\x30\x2d\x33\x07\x79\xd5\x70\x14"
+ "\x61\x0c\xec\x26\x4d\xd8\x35\x57\x04\x1d\x88\x33\x4d\xce\x05\x36"
+ "\xa5\xaf\x56\x84\xfa\x0b\x9e\xff\x7b\x30\x4b\x92\x1d\x06\xf8\x81"
+ "\x24\x1e\x51\x00\x09\x21\x51\xf7\x46\x0a\x77\xdb\xb5\x0c\xe7\x9c"
+ "\xff\x27\x3c\x02\x71\xd7\x85\x36\xf1\xaa\x11\x59\xd8\xb8\xdc\x09"
+ "\xdc\x6d\x5a\x6f\x63\x07\x6c\xe1\xe5\x4d\x6e\x0f\x6e\xfb\x7c\x05"
+ "\x8a\xe9\x53\xa8\xcf\xce\x43\x0e\x82\x20\x86\xbc\x88\x9c\xb7\xe3"
+ "\xe6\x77\x1e\x1f\x8a",
+ .key_len = 133,
+ .params =
+ "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
+ "\x00\x23",
+ .param_len = 18,
+ .m =
+ "\xcc\x97\x73\x0c\x73\xa2\x53\x2b\xfa\xd7\x83\x1d\x0c\x72\x1b\x39"
+ "\x80\x71\x8d\xdd\xc5\x9b\xff\x55\x32\x98\x25\xa2\x58\x2e\xb7\x73",
+ .m_size = 32,
+ .algo = OID_id_ecdsa_with_sha256,
+ .c =
+ "\x30\x81\x88\x02\x42\x00\xcd\xa5\x5f\x57\x52\x27\x78\x3a\xb5\x06"
+ "\x0f\xfd\x83\xfc\x0e\xd9\xce\x50\x9f\x7d\x1f\xca\x8b\xa8\x2d\x56"
+ "\x3c\xf6\xf0\xd8\xe1\xb7\x5d\x95\x35\x6f\x02\x0e\xaf\xe1\x4c\xae"
+ "\xce\x54\x76\x9a\xc2\x8f\xb8\x38\x1f\x46\x0b\x04\x64\x34\x79\xde"
+ "\x7e\xd7\x59\x10\xe9\xd9\xd5\x02\x42\x01\xcf\x50\x85\x38\xf9\x15"
+ "\x83\x18\x04\x6b\x35\xae\x65\xb5\x99\x12\x0a\xa9\x79\x24\xb9\x37"
+ "\x35\xdd\xa0\xe0\x87\x2c\x44\x4b\x5a\xee\xaf\xfa\x10\xdd\x9b\xfb"
+ "\x36\x1a\x31\x03\x42\x02\x5f\x50\xf0\xa2\x0d\x1c\x57\x56\x8f\x12"
+ "\xb7\x1d\x91\x55\x38\xb6\xf6\x34\x65\xc7\xbd",
+ .c_size = 139,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ },
+ {
+ .key = /* secp521r1(sha384) */
+ "\x04\x00\x2e\xd6\x21\x04\x75\xc3\xdc\x7d\xff\x0e\xf3\x70\x25\x2b"
+ "\xad\x72\xfc\x5a\x91\xf1\xd5\x9c\x64\xf3\x1f\x47\x11\x10\x62\x33"
+ "\xfd\x2e\xe8\x32\xca\x9e\x6f\x0a\x4c\x5b\x35\x9a\x46\xc5\xe7\xd4"
+ "\x38\xda\xb2\xf0\xf4\x87\xf3\x86\xf4\xea\x70\xad\x1e\xd4\x78\x8c"
+ "\x36\x18\x17\x00\xa2\xa0\x34\x1b\x2e\x6a\xdf\x06\xd6\x99\x2d\x47"
+ "\x50\x92\x1a\x8a\x72\x9c\x23\x44\xfa\xa7\xa9\xed\xa6\xef\x26\x14"
+ "\xb3\x9d\xfe\x5e\xa3\x8c\xd8\x29\xf8\xdf\xad\xa6\xab\xfc\xdd\x46"
+ "\x22\x6e\xd7\x35\xc7\x23\xb7\x13\xae\xb6\x34\xff\xd7\x80\xe5\x39"
+ "\xb3\x3b\x5b\x1b\x94",
+ .key_len = 133,
+ .params =
+ "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
+ "\x00\x23",
+ .param_len = 18,
+ .m =
+ "\x36\x98\xd6\x82\xfa\xad\xed\x3c\xb9\x40\xb6\x4d\x9e\xb7\x04\x26"
+ "\xad\x72\x34\x44\xd2\x81\xb4\x9b\xbe\x01\x04\x7a\xd8\x50\xf8\x59"
+ "\xba\xad\x23\x85\x6b\x59\xbe\xfb\xf6\x86\xd4\x67\xa8\x43\x28\x76",
+ .m_size = 48,
+ .algo = OID_id_ecdsa_with_sha384,
+ .c =
+ "\x30\x81\x88\x02\x42\x00\x93\x96\x76\x3c\x27\xea\xaa\x9c\x26\xec"
+ "\x51\xdc\xe8\x35\x5e\xae\x16\xf2\x4b\x64\x98\xf7\xec\xda\xc7\x7e"
+ "\x42\x71\x86\x57\x2d\xf1\x7d\xe4\xdf\x9b\x7d\x9e\x47\xca\x33\x32"
+ "\x76\x06\xd0\xf9\xc0\xe4\xe6\x84\x59\xfd\x1a\xc4\x40\xdd\x43\xb8"
+ "\x6a\xdd\xfb\xe6\x63\x4e\x28\x02\x42\x00\xff\xc3\x6a\x87\x6e\xb5"
+ "\x13\x1f\x20\x55\xce\x37\x97\xc9\x05\x51\xe5\xe4\x3c\xbc\x93\x65"
+ "\x57\x1c\x30\xda\xa7\xcd\x26\x28\x76\x3b\x52\xdf\xc4\xc0\xdb\x54"
+ "\xdb\x8a\x0d\x6a\xc3\xf3\x7a\xd1\xfa\xe7\xa7\xe5\x5a\x94\x56\xcf"
+ "\x8f\xb4\x22\xc6\x4f\xab\x2b\x62\xc1\x42\xb1",
+ .c_size = 139,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ },
+ {
+ .key = /* secp521r1(sha512) */
+ "\x04\x00\xc7\x65\xee\x0b\x86\x7d\x8f\x02\xf1\x74\x5b\xb0\x4c\x3f"
+ "\xa6\x35\x60\x9f\x55\x23\x11\xcc\xdf\xb8\x42\x99\xee\x6c\x96\x6a"
+ "\x27\xa2\x56\xb2\x2b\x03\xad\x0f\xe7\x97\xde\x09\x5d\xb4\xc5\x5f"
+ "\xbd\x87\x37\xbf\x5a\x16\x35\x56\x08\xfd\x6f\x06\x1a\x1c\x84\xee"
+ "\xc3\x64\xb3\x00\x9e\xbd\x6e\x60\x76\xee\x69\xfd\x3a\xb8\xcd\x7e"
+ "\x91\x68\x53\x57\x44\x13\x2e\x77\x09\x2a\xbe\x48\xbd\x91\xd8\xf6"
+ "\x21\x16\x53\x99\xd5\xf0\x40\xad\xa6\xf8\x58\x26\xb6\x9a\xf8\x77"
+ "\xfe\x3a\x05\x1a\xdb\xa9\x0f\xc0\x6c\x76\x30\x8c\xd8\xde\x44\xae"
+ "\xd0\x17\xdf\x49\x6a",
+ .key_len = 133,
+ .params =
+ "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
+ "\x00\x23",
+ .param_len = 18,
+ .m =
+ "\x5c\xa6\xbc\x79\xb8\xa0\x1e\x11\x83\xf7\xe9\x05\xdf\xba\xf7\x69"
+ "\x97\x22\x32\xe4\x94\x7c\x65\xbd\x74\xc6\x9a\x8b\xbd\x0d\xdc\xed"
+ "\xf5\x9c\xeb\xe1\xc5\x68\x40\xf2\xc7\x04\xde\x9e\x0d\x76\xc5\xa3"
+ "\xf9\x3c\x6c\x98\x08\x31\xbd\x39\xe8\x42\x7f\x80\x39\x6f\xfe\x68",
+ .m_size = 64,
+ .algo = OID_id_ecdsa_with_sha512,
+ .c =
+ "\x30\x81\x88\x02\x42\x01\x5c\x71\x86\x96\xac\x21\x33\x7e\x4e\xaa"
+ "\x86\xec\xa8\x05\x03\x52\x56\x63\x0e\x02\xcc\x94\xa9\x05\xb9\xfb"
+ "\x62\x1e\x42\x03\x6c\x74\x8a\x1f\x12\x3e\xb7\x7e\x51\xff\x7f\x27"
+ "\x93\xe8\x6c\x49\x7d\x28\xfc\x80\xa6\x13\xfc\xb6\x90\xf7\xbb\x28"
+ "\xb5\x04\xb0\xb6\x33\x1c\x7e\x02\x42\x01\x70\x43\x52\x1d\xe3\xc6"
+ "\xbd\x5a\x40\x95\x35\x89\x4f\x41\x5f\x9e\x19\x88\x05\x3e\x43\x39"
+ "\x01\xbd\xb7\x7a\x76\x37\x51\x47\x49\x98\x12\x71\xd0\xe9\xca\xa7"
+ "\xc0\xcb\xaa\x00\x55\xbb\x6a\xb4\x73\x00\xd2\x72\x74\x13\x63\x39"
+ "\xa6\xe5\x25\x46\x1e\x77\x44\x78\xe0\xd1\x04",
+ .c_size = 139,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ },
+};
+
/*
* EC-RDSA test vectors are generated by gost-engine.
*/
diff --git a/drivers/Makefile b/drivers/Makefile
index 3bf5cab4b451..fe9ceb0d2288 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -6,11 +6,6 @@
# Rewritten to use lists instead of if-statements.
#
-# Some driver Makefiles miss $(srctree)/ for include directive.
-ifdef building_out_of_srctree
-MAKEFLAGS += --include-dir=$(srctree)
-endif
-
obj-y += cache/
obj-y += irqchip/
obj-y += bus/
diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
index d09d29775b3f..e07e447d08d1 100644
--- a/drivers/accel/ivpu/ivpu_debugfs.c
+++ b/drivers/accel/ivpu/ivpu_debugfs.c
@@ -3,6 +3,8 @@
* Copyright (C) 2020-2023 Intel Corporation
*/
+#include <linux/debugfs.h>
+
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 39f6d1b98fd6..51d3f1a55d02 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/firmware.h>
@@ -131,22 +131,6 @@ static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param
return 0;
}
-static int ivpu_get_core_clock_rate(struct ivpu_device *vdev, u64 *clk_rate)
-{
- int ret;
-
- ret = ivpu_rpm_get_if_active(vdev);
- if (ret < 0)
- return ret;
-
- *clk_rate = ret ? ivpu_hw_reg_pll_freq_get(vdev) : 0;
-
- if (ret)
- ivpu_rpm_put(vdev);
-
- return 0;
-}
-
static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
@@ -170,7 +154,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = vdev->platform;
break;
case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
- ret = ivpu_get_core_clock_rate(vdev, &args->value);
+ args->value = ivpu_hw_ratio_to_freq(vdev, vdev->hw->pll.max_ratio);
break;
case DRM_IVPU_PARAM_NUM_CONTEXTS:
args->value = ivpu_get_context_count(vdev);
@@ -387,12 +371,15 @@ int ivpu_shutdown(struct ivpu_device *vdev)
{
int ret;
- ivpu_prepare_for_reset(vdev);
+ /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
+ pci_save_state(to_pci_dev(vdev->drm.dev));
ret = ivpu_hw_power_down(vdev);
if (ret)
ivpu_warn(vdev, "Failed to power down HW: %d\n", ret);
+ pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
+
return ret;
}
@@ -530,7 +517,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
atomic64_set(&vdev->unique_id_counter, 0);
- xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
+ xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
@@ -560,11 +547,11 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
/* Power up early so the rest of init code can access VPU registers */
ret = ivpu_hw_power_up(vdev);
if (ret)
- goto err_power_down;
+ goto err_shutdown;
ret = ivpu_mmu_global_context_init(vdev);
if (ret)
- goto err_power_down;
+ goto err_shutdown;
ret = ivpu_mmu_init(vdev);
if (ret)
@@ -601,10 +588,8 @@ err_mmu_rctx_fini:
ivpu_mmu_reserved_context_fini(vdev);
err_mmu_gctx_fini:
ivpu_mmu_global_context_fini(vdev);
-err_power_down:
- ivpu_hw_power_down(vdev);
- if (IVPU_WA(d3hot_after_power_off))
- pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
+err_shutdown:
+ ivpu_shutdown(vdev);
err_xa_destroy:
xa_destroy(&vdev->db_xa);
xa_destroy(&vdev->submitted_jobs_xa);
@@ -628,9 +613,8 @@ static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
static void ivpu_dev_fini(struct ivpu_device *vdev)
{
ivpu_pm_disable(vdev);
+ ivpu_prepare_for_reset(vdev);
ivpu_shutdown(vdev);
- if (IVPU_WA(d3hot_after_power_off))
- pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
ivpu_jobs_abort_all(vdev);
ivpu_job_done_consumer_fini(vdev);
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 7be0500d9bb8..bb4374d0eaec 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#ifndef __IVPU_DRV_H__
@@ -90,7 +90,6 @@
struct ivpu_wa_table {
bool punit_disabled;
bool clear_runtime_mem;
- bool d3hot_after_power_off;
bool interrupt_clear_with_0;
bool disable_clock_relinquish;
bool disable_d0i3_msg;
diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
index b2909168a0a6..094c659d2800 100644
--- a/drivers/accel/ivpu/ivpu_hw.h
+++ b/drivers/accel/ivpu/ivpu_hw.h
@@ -21,6 +21,7 @@ struct ivpu_hw_ops {
u32 (*profiling_freq_get)(struct ivpu_device *vdev);
void (*profiling_freq_drive)(struct ivpu_device *vdev, bool enable);
u32 (*reg_pll_freq_get)(struct ivpu_device *vdev);
+ u32 (*ratio_to_freq)(struct ivpu_device *vdev, u32 ratio);
u32 (*reg_telemetry_offset_get)(struct ivpu_device *vdev);
u32 (*reg_telemetry_size_get)(struct ivpu_device *vdev);
u32 (*reg_telemetry_enable_get)(struct ivpu_device *vdev);
@@ -130,6 +131,11 @@ static inline u32 ivpu_hw_reg_pll_freq_get(struct ivpu_device *vdev)
return vdev->hw->ops->reg_pll_freq_get(vdev);
};
+static inline u32 ivpu_hw_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
+{
+ return vdev->hw->ops->ratio_to_freq(vdev, ratio);
+}
+
static inline u32 ivpu_hw_reg_telemetry_offset_get(struct ivpu_device *vdev)
{
return vdev->hw->ops->reg_telemetry_offset_get(vdev);
diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c
index 9a0c9498baba..bd25e2d9fb0f 100644
--- a/drivers/accel/ivpu/ivpu_hw_37xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_37xx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include "ivpu_drv.h"
@@ -75,7 +75,6 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
{
vdev->wa.punit_disabled = false;
vdev->wa.clear_runtime_mem = false;
- vdev->wa.d3hot_after_power_off = true;
REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, BUTTRESS_ALL_IRQ_MASK);
if (REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) == BUTTRESS_ALL_IRQ_MASK) {
@@ -86,7 +85,6 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
IVPU_PRINT_WA(punit_disabled);
IVPU_PRINT_WA(clear_runtime_mem);
- IVPU_PRINT_WA(d3hot_after_power_off);
IVPU_PRINT_WA(interrupt_clear_with_0);
}
@@ -805,12 +803,12 @@ static void ivpu_hw_37xx_profiling_freq_drive(struct ivpu_device *vdev, bool ena
/* Profiling freq - is a debug feature. Unavailable on VPU 37XX. */
}
-static u32 ivpu_hw_37xx_pll_to_freq(u32 ratio, u32 config)
+static u32 ivpu_hw_37xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
{
u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
u32 cpu_clock;
- if ((config & 0xff) == PLL_RATIO_4_3)
+ if ((vdev->hw->config & 0xff) == PLL_RATIO_4_3)
cpu_clock = pll_clock * 2 / 4;
else
cpu_clock = pll_clock * 2 / 5;
@@ -829,7 +827,7 @@ static u32 ivpu_hw_37xx_reg_pll_freq_get(struct ivpu_device *vdev)
if (!ivpu_is_silicon(vdev))
return PLL_SIMULATION_FREQ;
- return ivpu_hw_37xx_pll_to_freq(pll_curr_ratio, vdev->hw->config);
+ return ivpu_hw_37xx_ratio_to_freq(vdev, pll_curr_ratio);
}
static u32 ivpu_hw_37xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
@@ -1052,6 +1050,7 @@ const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
.profiling_freq_get = ivpu_hw_37xx_profiling_freq_get,
.profiling_freq_drive = ivpu_hw_37xx_profiling_freq_drive,
.reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get,
+ .ratio_to_freq = ivpu_hw_37xx_ratio_to_freq,
.reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get,
.reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get,
.reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get,
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c
index e4eddbf5d11c..b0b88d4c8926 100644
--- a/drivers/accel/ivpu/ivpu_hw_40xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_40xx.c
@@ -980,6 +980,11 @@ static u32 ivpu_hw_40xx_reg_pll_freq_get(struct ivpu_device *vdev)
return PLL_RATIO_TO_FREQ(pll_curr_ratio);
}
+static u32 ivpu_hw_40xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
+{
+ return PLL_RATIO_TO_FREQ(ratio);
+}
+
static u32 ivpu_hw_40xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
{
return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET);
@@ -1230,6 +1235,7 @@ const struct ivpu_hw_ops ivpu_hw_40xx_ops = {
.profiling_freq_get = ivpu_hw_40xx_profiling_freq_get,
.profiling_freq_drive = ivpu_hw_40xx_profiling_freq_drive,
.reg_pll_freq_get = ivpu_hw_40xx_reg_pll_freq_get,
+ .ratio_to_freq = ivpu_hw_40xx_ratio_to_freq,
.reg_telemetry_offset_get = ivpu_hw_40xx_reg_telemetry_offset_get,
.reg_telemetry_size_get = ivpu_hw_40xx_reg_telemetry_size_get,
.reg_telemetry_enable_get = ivpu_hw_40xx_reg_telemetry_enable_get,
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index 04ac4b9840fb..56ff067f63e2 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/genalloc.h>
@@ -501,7 +501,11 @@ int ivpu_ipc_init(struct ivpu_device *vdev)
spin_lock_init(&ipc->cons_lock);
INIT_LIST_HEAD(&ipc->cons_list);
INIT_LIST_HEAD(&ipc->cb_msg_list);
- drmm_mutex_init(&vdev->drm, &ipc->lock);
+ ret = drmm_mutex_init(&vdev->drm, &ipc->lock);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize ipc->lock, ret %d\n", ret);
+ goto err_free_rx;
+ }
ivpu_ipc_reset(vdev);
return 0;
diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c
index 91bd640655ab..2e46b322c450 100644
--- a/drivers/accel/ivpu/ivpu_mmu.c
+++ b/drivers/accel/ivpu/ivpu_mmu.c
@@ -278,7 +278,7 @@ static const char *ivpu_mmu_event_to_str(u32 cmd)
case IVPU_MMU_EVT_F_VMS_FETCH:
return "Fetch of VMS caused external abort";
default:
- return "Unknown CMDQ command";
+ return "Unknown event";
}
}
@@ -286,15 +286,15 @@ static const char *ivpu_mmu_cmdq_err_to_str(u32 err)
{
switch (err) {
case IVPU_MMU_CERROR_NONE:
- return "No CMDQ Error";
+ return "No error";
case IVPU_MMU_CERROR_ILL:
return "Illegal command";
case IVPU_MMU_CERROR_ABT:
- return "External abort on CMDQ read";
+ return "External abort on command queue read";
case IVPU_MMU_CERROR_ATC_INV_SYNC:
return "Sync failed to complete ATS invalidation";
default:
- return "Unknown CMDQ Error";
+ return "Unknown error";
}
}
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c
index fe6161299236..128aef8e5a19 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.c
+++ b/drivers/accel/ivpu/ivpu_mmu_context.c
@@ -6,6 +6,7 @@
#include <linux/bitfield.h>
#include <linux/highmem.h>
#include <linux/set_memory.h>
+#include <linux/vmalloc.h>
#include <drm/drm_cache.h>
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index 7cce1c928a7f..4f5ea466731f 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/highmem.h>
@@ -58,14 +58,11 @@ static int ivpu_suspend(struct ivpu_device *vdev)
{
int ret;
- /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
- pci_save_state(to_pci_dev(vdev->drm.dev));
+ ivpu_prepare_for_reset(vdev);
ret = ivpu_shutdown(vdev);
if (ret)
- ivpu_err(vdev, "Failed to shutdown VPU: %d\n", ret);
-
- pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
+ ivpu_err(vdev, "Failed to shutdown NPU: %d\n", ret);
return ret;
}
@@ -74,10 +71,10 @@ static int ivpu_resume(struct ivpu_device *vdev)
{
int ret;
- pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
+retry:
pci_restore_state(to_pci_dev(vdev->drm.dev));
+ pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
-retry:
ret = ivpu_hw_power_up(vdev);
if (ret) {
ivpu_err(vdev, "Failed to power up HW: %d\n", ret);
@@ -100,6 +97,7 @@ err_mmu_disable:
ivpu_mmu_disable(vdev);
err_power_down:
ivpu_hw_power_down(vdev);
+ pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
if (!ivpu_fw_is_cold_boot(vdev)) {
ivpu_pm_prepare_cold_boot(vdev);
diff --git a/drivers/accel/qaic/Makefile b/drivers/accel/qaic/Makefile
index 3f7f6dfde7f2..35e883515629 100644
--- a/drivers/accel/qaic/Makefile
+++ b/drivers/accel/qaic/Makefile
@@ -10,4 +10,7 @@ qaic-y := \
qaic_control.o \
qaic_data.o \
qaic_drv.o \
- qaic_timesync.o
+ qaic_timesync.o \
+ sahara.o
+
+qaic-$(CONFIG_DEBUG_FS) += qaic_debugfs.o
diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h
index 9256653b3036..02561b6cecc6 100644
--- a/drivers/accel/qaic/qaic.h
+++ b/drivers/accel/qaic/qaic.h
@@ -153,6 +153,14 @@ struct qaic_device {
struct mhi_device *qts_ch;
/* Work queue for tasks related to MHI "QAIC_TIMESYNC" channel */
struct workqueue_struct *qts_wq;
+ /* Head of list of page allocated by MHI bootlog device */
+ struct list_head bootlog;
+ /* MHI bootlog channel device */
+ struct mhi_device *bootlog_ch;
+ /* Work queue for tasks related to MHI bootlog device */
+ struct workqueue_struct *bootlog_wq;
+ /* Synchronizes access of pages in MHI bootlog device */
+ struct mutex bootlog_mutex;
};
struct qaic_drm_device {
@@ -280,6 +288,7 @@ int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr);
void enable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr);
void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id);
void release_dbc(struct qaic_device *qdev, u32 dbc_id);
+void qaic_data_get_fifo_info(struct dma_bridge_chan *dbc, u32 *head, u32 *tail);
void wake_all_cntl(struct qaic_device *qdev);
void qaic_dev_reset_clean_local_state(struct qaic_device *qdev);
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index 2459fe4a3f95..e86e71c1cdd8 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -1981,3 +1981,12 @@ void release_dbc(struct qaic_device *qdev, u32 dbc_id)
dbc->in_use = false;
wake_up(&dbc->dbc_release);
}
+
+void qaic_data_get_fifo_info(struct dma_bridge_chan *dbc, u32 *head, u32 *tail)
+{
+ if (!dbc || !head || !tail)
+ return;
+
+ *head = readl(dbc->dbc_base + REQHP_OFF);
+ *tail = readl(dbc->dbc_base + REQTP_OFF);
+}
diff --git a/drivers/accel/qaic/qaic_debugfs.c b/drivers/accel/qaic/qaic_debugfs.c
new file mode 100644
index 000000000000..20b653d99e52
--- /dev/null
+++ b/drivers/accel/qaic/qaic_debugfs.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* Copyright (c) 2020, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/mutex.h>
+#include <linux/overflow.h>
+#include <linux/pci.h>
+#include <linux/seq_file.h>
+#include <linux/sprintf.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "qaic.h"
+#include "qaic_debugfs.h"
+
+#define BOOTLOG_POOL_SIZE 16
+#define BOOTLOG_MSG_SIZE 512
+#define QAIC_DBC_DIR_NAME 9
+
+struct bootlog_msg {
+ /* Buffer for bootlog messages */
+ char str[BOOTLOG_MSG_SIZE];
+ /* Root struct of device, used to access device resources */
+ struct qaic_device *qdev;
+ /* Work struct to schedule work coming on QAIC_LOGGING channel */
+ struct work_struct work;
+};
+
+struct bootlog_page {
+ /* Node in list of bootlog pages maintained by root device struct */
+ struct list_head node;
+ /* Total size of the buffer that holds the bootlogs. It is PAGE_SIZE */
+ unsigned int size;
+ /* Offset for the next bootlog */
+ unsigned int offset;
+};
+
+static int bootlog_show(struct seq_file *s, void *unused)
+{
+ struct bootlog_page *page;
+ struct qaic_device *qdev;
+ void *page_end;
+ void *log;
+
+ qdev = s->private;
+ mutex_lock(&qdev->bootlog_mutex);
+ list_for_each_entry(page, &qdev->bootlog, node) {
+ log = page + 1;
+ page_end = (void *)page + page->offset;
+ while (log < page_end) {
+ seq_printf(s, "%s", (char *)log);
+ log += strlen(log) + 1;
+ }
+ }
+ mutex_unlock(&qdev->bootlog_mutex);
+
+ return 0;
+}
+
+static int bootlog_fops_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, bootlog_show, inode->i_private);
+}
+
+static const struct file_operations bootlog_fops = {
+ .owner = THIS_MODULE,
+ .open = bootlog_fops_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int read_dbc_fifo_size(struct seq_file *s, void *unused)
+{
+ struct dma_bridge_chan *dbc = s->private;
+
+ seq_printf(s, "%u\n", dbc->nelem);
+ return 0;
+}
+
+static int fifo_size_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, read_dbc_fifo_size, inode->i_private);
+}
+
+static const struct file_operations fifo_size_fops = {
+ .owner = THIS_MODULE,
+ .open = fifo_size_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int read_dbc_queued(struct seq_file *s, void *unused)
+{
+ struct dma_bridge_chan *dbc = s->private;
+ u32 tail = 0, head = 0;
+
+ qaic_data_get_fifo_info(dbc, &head, &tail);
+
+ if (head == U32_MAX || tail == U32_MAX)
+ seq_printf(s, "%u\n", 0);
+ else if (head > tail)
+ seq_printf(s, "%u\n", dbc->nelem - head + tail);
+ else
+ seq_printf(s, "%u\n", tail - head);
+
+ return 0;
+}
+
+static int queued_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, read_dbc_queued, inode->i_private);
+}
+
+static const struct file_operations queued_fops = {
+ .owner = THIS_MODULE,
+ .open = queued_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void qaic_debugfs_init(struct qaic_drm_device *qddev)
+{
+ struct qaic_device *qdev = qddev->qdev;
+ struct dentry *debugfs_root;
+ struct dentry *debugfs_dir;
+ char name[QAIC_DBC_DIR_NAME];
+ u32 i;
+
+ debugfs_root = to_drm(qddev)->debugfs_root;
+
+ debugfs_create_file("bootlog", 0400, debugfs_root, qdev, &bootlog_fops);
+ /*
+ * 256 dbcs per device is likely the max we will ever see and lets static checking see a
+ * reasonable range.
+ */
+ for (i = 0; i < qdev->num_dbc && i < 256; ++i) {
+ snprintf(name, QAIC_DBC_DIR_NAME, "dbc%03u", i);
+ debugfs_dir = debugfs_create_dir(name, debugfs_root);
+ debugfs_create_file("fifo_size", 0400, debugfs_dir, &qdev->dbc[i], &fifo_size_fops);
+ debugfs_create_file("queued", 0400, debugfs_dir, &qdev->dbc[i], &queued_fops);
+ }
+}
+
+static struct bootlog_page *alloc_bootlog_page(struct qaic_device *qdev)
+{
+ struct bootlog_page *page;
+
+ page = (struct bootlog_page *)devm_get_free_pages(&qdev->pdev->dev, GFP_KERNEL, 0);
+ if (!page)
+ return page;
+
+ page->size = PAGE_SIZE;
+ page->offset = sizeof(*page);
+ list_add_tail(&page->node, &qdev->bootlog);
+
+ return page;
+}
+
+static int reset_bootlog(struct qaic_device *qdev)
+{
+ struct bootlog_page *page;
+ struct bootlog_page *i;
+
+ mutex_lock(&qdev->bootlog_mutex);
+ list_for_each_entry_safe(page, i, &qdev->bootlog, node) {
+ list_del(&page->node);
+ devm_free_pages(&qdev->pdev->dev, (unsigned long)page);
+ }
+
+ page = alloc_bootlog_page(qdev);
+ mutex_unlock(&qdev->bootlog_mutex);
+ if (!page)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void *bootlog_get_space(struct qaic_device *qdev, unsigned int size)
+{
+ struct bootlog_page *page;
+
+ page = list_last_entry(&qdev->bootlog, struct bootlog_page, node);
+
+ if (size_add(size, sizeof(*page)) > page->size)
+ return NULL;
+
+ if (page->offset + size > page->size) {
+ page = alloc_bootlog_page(qdev);
+ if (!page)
+ return NULL;
+ }
+
+ return (void *)page + page->offset;
+}
+
+static void bootlog_commit(struct qaic_device *qdev, unsigned int size)
+{
+ struct bootlog_page *page;
+
+ page = list_last_entry(&qdev->bootlog, struct bootlog_page, node);
+
+ page->offset += size;
+}
+
+static void bootlog_log(struct work_struct *work)
+{
+ struct bootlog_msg *msg = container_of(work, struct bootlog_msg, work);
+ unsigned int len = strlen(msg->str) + 1;
+ struct qaic_device *qdev = msg->qdev;
+ void *log;
+
+ mutex_lock(&qdev->bootlog_mutex);
+ log = bootlog_get_space(qdev, len);
+ if (log) {
+ memcpy(log, msg, len);
+ bootlog_commit(qdev, len);
+ }
+ mutex_unlock(&qdev->bootlog_mutex);
+
+ if (mhi_queue_buf(qdev->bootlog_ch, DMA_FROM_DEVICE, msg, BOOTLOG_MSG_SIZE, MHI_EOT))
+ devm_kfree(&qdev->pdev->dev, msg);
+}
+
+static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
+ struct bootlog_msg *msg;
+ int i, ret;
+
+ qdev->bootlog_wq = alloc_ordered_workqueue("qaic_bootlog", 0);
+ if (!qdev->bootlog_wq) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = reset_bootlog(qdev);
+ if (ret)
+ goto destroy_workqueue;
+
+ ret = mhi_prepare_for_transfer(mhi_dev);
+ if (ret)
+ goto destroy_workqueue;
+
+ for (i = 0; i < BOOTLOG_POOL_SIZE; i++) {
+ msg = devm_kzalloc(&qdev->pdev->dev, sizeof(*msg), GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto mhi_unprepare;
+ }
+
+ msg->qdev = qdev;
+ INIT_WORK(&msg->work, bootlog_log);
+
+ ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, msg, BOOTLOG_MSG_SIZE, MHI_EOT);
+ if (ret)
+ goto mhi_unprepare;
+ }
+
+ dev_set_drvdata(&mhi_dev->dev, qdev);
+ qdev->bootlog_ch = mhi_dev;
+ return 0;
+
+mhi_unprepare:
+ mhi_unprepare_from_transfer(mhi_dev);
+destroy_workqueue:
+ flush_workqueue(qdev->bootlog_wq);
+ destroy_workqueue(qdev->bootlog_wq);
+out:
+ return ret;
+}
+
+static void qaic_bootlog_mhi_remove(struct mhi_device *mhi_dev)
+{
+ struct qaic_device *qdev;
+
+ qdev = dev_get_drvdata(&mhi_dev->dev);
+
+ mhi_unprepare_from_transfer(qdev->bootlog_ch);
+ flush_workqueue(qdev->bootlog_wq);
+ destroy_workqueue(qdev->bootlog_wq);
+ qdev->bootlog_ch = NULL;
+}
+
+static void qaic_bootlog_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+}
+
+static void qaic_bootlog_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev);
+ struct bootlog_msg *msg = mhi_result->buf_addr;
+
+ if (mhi_result->transaction_status) {
+ devm_kfree(&qdev->pdev->dev, msg);
+ return;
+ }
+
+ /* Force a null at the end of the transferred string */
+ msg->str[mhi_result->bytes_xferd - 1] = 0;
+
+ queue_work(qdev->bootlog_wq, &msg->work);
+}
+
+static const struct mhi_device_id qaic_bootlog_mhi_match_table[] = {
+ { .chan = "QAIC_LOGGING", },
+ {},
+};
+
+static struct mhi_driver qaic_bootlog_mhi_driver = {
+ .id_table = qaic_bootlog_mhi_match_table,
+ .remove = qaic_bootlog_mhi_remove,
+ .probe = qaic_bootlog_mhi_probe,
+ .ul_xfer_cb = qaic_bootlog_mhi_ul_xfer_cb,
+ .dl_xfer_cb = qaic_bootlog_mhi_dl_xfer_cb,
+ .driver = {
+ .name = "qaic_bootlog",
+ },
+};
+
+int qaic_bootlog_register(void)
+{
+ return mhi_driver_register(&qaic_bootlog_mhi_driver);
+}
+
+void qaic_bootlog_unregister(void)
+{
+ mhi_driver_unregister(&qaic_bootlog_mhi_driver);
+}
diff --git a/drivers/accel/qaic/qaic_debugfs.h b/drivers/accel/qaic/qaic_debugfs.h
new file mode 100644
index 000000000000..05e74f84cf9f
--- /dev/null
+++ b/drivers/accel/qaic/qaic_debugfs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/* Copyright (c) 2020, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */
+
+#ifndef __QAIC_DEBUGFS_H__
+#define __QAIC_DEBUGFS_H__
+
+#include <drm/drm_file.h>
+
+#ifdef CONFIG_DEBUG_FS
+int qaic_bootlog_register(void);
+void qaic_bootlog_unregister(void);
+void qaic_debugfs_init(struct qaic_drm_device *qddev);
+#else
+static inline int qaic_bootlog_register(void) { return 0; }
+static inline void qaic_bootlog_unregister(void) {}
+static inline void qaic_debugfs_init(struct qaic_drm_device *qddev) {}
+#endif /* CONFIG_DEBUG_FS */
+#endif /* __QAIC_DEBUGFS_H__ */
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index d1a632dbaec6..580b29ed1902 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -28,7 +28,9 @@
#include "mhi_controller.h"
#include "qaic.h"
+#include "qaic_debugfs.h"
#include "qaic_timesync.h"
+#include "sahara.h"
MODULE_IMPORT_NS(DMA_BUF);
@@ -229,8 +231,12 @@ static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id)
qddev->partition_id = partition_id;
ret = drm_dev_register(drm, 0);
- if (ret)
+ if (ret) {
pci_dbg(qdev->pdev, "drm_dev_register failed %d\n", ret);
+ return ret;
+ }
+
+ qaic_debugfs_init(qddev);
return ret;
}
@@ -382,6 +388,9 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
ret = drmm_mutex_init(drm, &qdev->cntl_mutex);
if (ret)
return NULL;
+ ret = drmm_mutex_init(drm, &qdev->bootlog_mutex);
+ if (ret)
+ return NULL;
qdev->cntl_wq = qaicm_wq_init(drm, "qaic_cntl");
if (IS_ERR(qdev->cntl_wq))
@@ -399,6 +408,7 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
qddev->qdev = qdev;
INIT_LIST_HEAD(&qdev->cntl_xfer_list);
+ INIT_LIST_HEAD(&qdev->bootlog);
INIT_LIST_HEAD(&qddev->users);
for (i = 0; i < qdev->num_dbc; ++i) {
@@ -635,12 +645,24 @@ static int __init qaic_init(void)
goto free_pci;
}
+ ret = sahara_register();
+ if (ret) {
+ pr_debug("qaic: sahara_register failed %d\n", ret);
+ goto free_mhi;
+ }
+
ret = qaic_timesync_init();
if (ret)
pr_debug("qaic: qaic_timesync_init failed %d\n", ret);
+ ret = qaic_bootlog_register();
+ if (ret)
+ pr_debug("qaic: qaic_bootlog_register failed %d\n", ret);
+
return 0;
+free_mhi:
+ mhi_driver_unregister(&qaic_mhi_driver);
free_pci:
pci_unregister_driver(&qaic_pci_driver);
return ret;
@@ -664,7 +686,9 @@ static void __exit qaic_exit(void)
* reinitializing the link_up state after the cleanup is done.
*/
link_up = true;
+ qaic_bootlog_unregister();
qaic_timesync_deinit();
+ sahara_unregister();
mhi_driver_unregister(&qaic_mhi_driver);
pci_unregister_driver(&qaic_pci_driver);
}
diff --git a/drivers/accel/qaic/sahara.c b/drivers/accel/qaic/sahara.c
new file mode 100644
index 000000000000..bf94bbab6be5
--- /dev/null
+++ b/drivers/accel/qaic/sahara.c
@@ -0,0 +1,449 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */
+
+#include <linux/firmware.h>
+#include <linux/limits.h>
+#include <linux/mhi.h>
+#include <linux/minmax.h>
+#include <linux/mod_devicetable.h>
+#include <linux/overflow.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "sahara.h"
+
+#define SAHARA_HELLO_CMD 0x1 /* Min protocol version 1.0 */
+#define SAHARA_HELLO_RESP_CMD 0x2 /* Min protocol version 1.0 */
+#define SAHARA_READ_DATA_CMD 0x3 /* Min protocol version 1.0 */
+#define SAHARA_END_OF_IMAGE_CMD 0x4 /* Min protocol version 1.0 */
+#define SAHARA_DONE_CMD 0x5 /* Min protocol version 1.0 */
+#define SAHARA_DONE_RESP_CMD 0x6 /* Min protocol version 1.0 */
+#define SAHARA_RESET_CMD 0x7 /* Min protocol version 1.0 */
+#define SAHARA_RESET_RESP_CMD 0x8 /* Min protocol version 1.0 */
+#define SAHARA_MEM_DEBUG_CMD 0x9 /* Min protocol version 2.0 */
+#define SAHARA_MEM_READ_CMD 0xa /* Min protocol version 2.0 */
+#define SAHARA_CMD_READY_CMD 0xb /* Min protocol version 2.1 */
+#define SAHARA_SWITCH_MODE_CMD 0xc /* Min protocol version 2.1 */
+#define SAHARA_EXECUTE_CMD 0xd /* Min protocol version 2.1 */
+#define SAHARA_EXECUTE_RESP_CMD 0xe /* Min protocol version 2.1 */
+#define SAHARA_EXECUTE_DATA_CMD 0xf /* Min protocol version 2.1 */
+#define SAHARA_MEM_DEBUG64_CMD 0x10 /* Min protocol version 2.5 */
+#define SAHARA_MEM_READ64_CMD 0x11 /* Min protocol version 2.5 */
+#define SAHARA_READ_DATA64_CMD 0x12 /* Min protocol version 2.8 */
+#define SAHARA_RESET_STATE_CMD 0x13 /* Min protocol version 2.9 */
+#define SAHARA_WRITE_DATA_CMD 0x14 /* Min protocol version 3.0 */
+
+#define SAHARA_PACKET_MAX_SIZE 0xffffU /* MHI_MAX_MTU */
+#define SAHARA_TRANSFER_MAX_SIZE 0x80000
+#define SAHARA_NUM_TX_BUF DIV_ROUND_UP(SAHARA_TRANSFER_MAX_SIZE,\
+ SAHARA_PACKET_MAX_SIZE)
+#define SAHARA_IMAGE_ID_NONE U32_MAX
+
+#define SAHARA_VERSION 2
+#define SAHARA_SUCCESS 0
+
+#define SAHARA_MODE_IMAGE_TX_PENDING 0x0
+#define SAHARA_MODE_IMAGE_TX_COMPLETE 0x1
+#define SAHARA_MODE_MEMORY_DEBUG 0x2
+#define SAHARA_MODE_COMMAND 0x3
+
+#define SAHARA_HELLO_LENGTH 0x30
+#define SAHARA_READ_DATA_LENGTH 0x14
+#define SAHARA_END_OF_IMAGE_LENGTH 0x10
+#define SAHARA_DONE_LENGTH 0x8
+#define SAHARA_RESET_LENGTH 0x8
+
+struct sahara_packet {
+ __le32 cmd;
+ __le32 length;
+
+ union {
+ struct {
+ __le32 version;
+ __le32 version_compat;
+ __le32 max_length;
+ __le32 mode;
+ } hello;
+ struct {
+ __le32 version;
+ __le32 version_compat;
+ __le32 status;
+ __le32 mode;
+ } hello_resp;
+ struct {
+ __le32 image;
+ __le32 offset;
+ __le32 length;
+ } read_data;
+ struct {
+ __le32 image;
+ __le32 status;
+ } end_of_image;
+ };
+};
+
+struct sahara_context {
+ struct sahara_packet *tx[SAHARA_NUM_TX_BUF];
+ struct sahara_packet *rx;
+ struct work_struct work;
+ struct mhi_device *mhi_dev;
+ const char **image_table;
+ u32 table_size;
+ u32 active_image_id;
+ const struct firmware *firmware;
+};
+
+static const char *aic100_image_table[] = {
+ [1] = "qcom/aic100/fw1.bin",
+ [2] = "qcom/aic100/fw2.bin",
+ [4] = "qcom/aic100/fw4.bin",
+ [5] = "qcom/aic100/fw5.bin",
+ [6] = "qcom/aic100/fw6.bin",
+ [8] = "qcom/aic100/fw8.bin",
+ [9] = "qcom/aic100/fw9.bin",
+ [10] = "qcom/aic100/fw10.bin",
+};
+
+static int sahara_find_image(struct sahara_context *context, u32 image_id)
+{
+ int ret;
+
+ if (image_id == context->active_image_id)
+ return 0;
+
+ if (context->active_image_id != SAHARA_IMAGE_ID_NONE) {
+ dev_err(&context->mhi_dev->dev, "image id %d is not valid as %d is active\n",
+ image_id, context->active_image_id);
+ return -EINVAL;
+ }
+
+ if (image_id >= context->table_size || !context->image_table[image_id]) {
+ dev_err(&context->mhi_dev->dev, "request for unknown image: %d\n", image_id);
+ return -EINVAL;
+ }
+
+ /*
+ * This image might be optional. The device may continue without it.
+ * Only the device knows. Suppress error messages that could suggest an
+ * a problem when we were actually able to continue.
+ */
+ ret = firmware_request_nowarn(&context->firmware,
+ context->image_table[image_id],
+ &context->mhi_dev->dev);
+ if (ret) {
+ dev_dbg(&context->mhi_dev->dev, "request for image id %d / file %s failed %d\n",
+ image_id, context->image_table[image_id], ret);
+ return ret;
+ }
+
+ context->active_image_id = image_id;
+
+ return 0;
+}
+
+static void sahara_release_image(struct sahara_context *context)
+{
+ if (context->active_image_id != SAHARA_IMAGE_ID_NONE)
+ release_firmware(context->firmware);
+ context->active_image_id = SAHARA_IMAGE_ID_NONE;
+}
+
+static void sahara_send_reset(struct sahara_context *context)
+{
+ int ret;
+
+ context->tx[0]->cmd = cpu_to_le32(SAHARA_RESET_CMD);
+ context->tx[0]->length = cpu_to_le32(SAHARA_RESET_LENGTH);
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
+ SAHARA_RESET_LENGTH, MHI_EOT);
+ if (ret)
+ dev_err(&context->mhi_dev->dev, "Unable to send reset response %d\n", ret);
+}
+
+static void sahara_hello(struct sahara_context *context)
+{
+ int ret;
+
+ dev_dbg(&context->mhi_dev->dev,
+ "HELLO cmd received. length:%d version:%d version_compat:%d max_length:%d mode:%d\n",
+ le32_to_cpu(context->rx->length),
+ le32_to_cpu(context->rx->hello.version),
+ le32_to_cpu(context->rx->hello.version_compat),
+ le32_to_cpu(context->rx->hello.max_length),
+ le32_to_cpu(context->rx->hello.mode));
+
+ if (le32_to_cpu(context->rx->length) != SAHARA_HELLO_LENGTH) {
+ dev_err(&context->mhi_dev->dev, "Malformed hello packet - length %d\n",
+ le32_to_cpu(context->rx->length));
+ return;
+ }
+ if (le32_to_cpu(context->rx->hello.version) != SAHARA_VERSION) {
+ dev_err(&context->mhi_dev->dev, "Unsupported hello packet - version %d\n",
+ le32_to_cpu(context->rx->hello.version));
+ return;
+ }
+
+ if (le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_PENDING &&
+ le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_COMPLETE) {
+ dev_err(&context->mhi_dev->dev, "Unsupported hello packet - mode %d\n",
+ le32_to_cpu(context->rx->hello.mode));
+ return;
+ }
+
+ context->tx[0]->cmd = cpu_to_le32(SAHARA_HELLO_RESP_CMD);
+ context->tx[0]->length = cpu_to_le32(SAHARA_HELLO_LENGTH);
+ context->tx[0]->hello_resp.version = cpu_to_le32(SAHARA_VERSION);
+ context->tx[0]->hello_resp.version_compat = cpu_to_le32(SAHARA_VERSION);
+ context->tx[0]->hello_resp.status = cpu_to_le32(SAHARA_SUCCESS);
+ context->tx[0]->hello_resp.mode = context->rx->hello_resp.mode;
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
+ SAHARA_HELLO_LENGTH, MHI_EOT);
+ if (ret)
+ dev_err(&context->mhi_dev->dev, "Unable to send hello response %d\n", ret);
+}
+
+static void sahara_read_data(struct sahara_context *context)
+{
+ u32 image_id, data_offset, data_len, pkt_data_len;
+ int ret;
+ int i;
+
+ dev_dbg(&context->mhi_dev->dev,
+ "READ_DATA cmd received. length:%d image:%d offset:%d data_length:%d\n",
+ le32_to_cpu(context->rx->length),
+ le32_to_cpu(context->rx->read_data.image),
+ le32_to_cpu(context->rx->read_data.offset),
+ le32_to_cpu(context->rx->read_data.length));
+
+ if (le32_to_cpu(context->rx->length) != SAHARA_READ_DATA_LENGTH) {
+ dev_err(&context->mhi_dev->dev, "Malformed read_data packet - length %d\n",
+ le32_to_cpu(context->rx->length));
+ return;
+ }
+
+ image_id = le32_to_cpu(context->rx->read_data.image);
+ data_offset = le32_to_cpu(context->rx->read_data.offset);
+ data_len = le32_to_cpu(context->rx->read_data.length);
+
+ ret = sahara_find_image(context, image_id);
+ if (ret) {
+ sahara_send_reset(context);
+ return;
+ }
+
+ /*
+ * Image is released when the device is done with it via
+ * SAHARA_END_OF_IMAGE_CMD. sahara_send_reset() will either cause the
+ * device to retry the operation with a modification, or decide to be
+ * done with the image and trigger SAHARA_END_OF_IMAGE_CMD.
+ * release_image() is called from SAHARA_END_OF_IMAGE_CMD. processing
+ * and is not needed here on error.
+ */
+
+ if (data_len > SAHARA_TRANSFER_MAX_SIZE) {
+ dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data len %d exceeds max xfer size %d\n",
+ data_len, SAHARA_TRANSFER_MAX_SIZE);
+ sahara_send_reset(context);
+ return;
+ }
+
+ if (data_offset >= context->firmware->size) {
+ dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data offset %d exceeds file size %zu\n",
+ data_offset, context->firmware->size);
+ sahara_send_reset(context);
+ return;
+ }
+
+ if (size_add(data_offset, data_len) > context->firmware->size) {
+ dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data offset %d and length %d exceeds file size %zu\n",
+ data_offset, data_len, context->firmware->size);
+ sahara_send_reset(context);
+ return;
+ }
+
+ for (i = 0; i < SAHARA_NUM_TX_BUF && data_len; ++i) {
+ pkt_data_len = min(data_len, SAHARA_PACKET_MAX_SIZE);
+
+ memcpy(context->tx[i], &context->firmware->data[data_offset], pkt_data_len);
+
+ data_offset += pkt_data_len;
+ data_len -= pkt_data_len;
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE,
+ context->tx[i], pkt_data_len,
+ !data_len ? MHI_EOT : MHI_CHAIN);
+ if (ret) {
+ dev_err(&context->mhi_dev->dev, "Unable to send read_data response %d\n",
+ ret);
+ return;
+ }
+ }
+}
+
+static void sahara_end_of_image(struct sahara_context *context)
+{
+ int ret;
+
+ dev_dbg(&context->mhi_dev->dev,
+ "END_OF_IMAGE cmd received. length:%d image:%d status:%d\n",
+ le32_to_cpu(context->rx->length),
+ le32_to_cpu(context->rx->end_of_image.image),
+ le32_to_cpu(context->rx->end_of_image.status));
+
+ if (le32_to_cpu(context->rx->length) != SAHARA_END_OF_IMAGE_LENGTH) {
+ dev_err(&context->mhi_dev->dev, "Malformed end_of_image packet - length %d\n",
+ le32_to_cpu(context->rx->length));
+ return;
+ }
+
+ if (context->active_image_id != SAHARA_IMAGE_ID_NONE &&
+ le32_to_cpu(context->rx->end_of_image.image) != context->active_image_id) {
+ dev_err(&context->mhi_dev->dev, "Malformed end_of_image packet - image %d is not the active image\n",
+ le32_to_cpu(context->rx->end_of_image.image));
+ return;
+ }
+
+ sahara_release_image(context);
+
+ if (le32_to_cpu(context->rx->end_of_image.status))
+ return;
+
+ context->tx[0]->cmd = cpu_to_le32(SAHARA_DONE_CMD);
+ context->tx[0]->length = cpu_to_le32(SAHARA_DONE_LENGTH);
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
+ SAHARA_DONE_LENGTH, MHI_EOT);
+ if (ret)
+ dev_dbg(&context->mhi_dev->dev, "Unable to send done response %d\n", ret);
+}
+
+static void sahara_processing(struct work_struct *work)
+{
+ struct sahara_context *context = container_of(work, struct sahara_context, work);
+ int ret;
+
+ switch (le32_to_cpu(context->rx->cmd)) {
+ case SAHARA_HELLO_CMD:
+ sahara_hello(context);
+ break;
+ case SAHARA_READ_DATA_CMD:
+ sahara_read_data(context);
+ break;
+ case SAHARA_END_OF_IMAGE_CMD:
+ sahara_end_of_image(context);
+ break;
+ case SAHARA_DONE_RESP_CMD:
+ /* Intentional do nothing as we don't need to exit an app */
+ break;
+ default:
+ dev_err(&context->mhi_dev->dev, "Unknown command %d\n",
+ le32_to_cpu(context->rx->cmd));
+ break;
+ }
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_FROM_DEVICE, context->rx,
+ SAHARA_PACKET_MAX_SIZE, MHI_EOT);
+ if (ret)
+ dev_err(&context->mhi_dev->dev, "Unable to requeue rx buf %d\n", ret);
+}
+
+static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct sahara_context *context;
+ int ret;
+ int i;
+
+ context = devm_kzalloc(&mhi_dev->dev, sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ context->rx = devm_kzalloc(&mhi_dev->dev, SAHARA_PACKET_MAX_SIZE, GFP_KERNEL);
+ if (!context->rx)
+ return -ENOMEM;
+
+ /*
+ * AIC100 defines SAHARA_TRANSFER_MAX_SIZE as the largest value it
+ * will request for READ_DATA. This is larger than
+ * SAHARA_PACKET_MAX_SIZE, and we need 9x SAHARA_PACKET_MAX_SIZE to
+ * cover SAHARA_TRANSFER_MAX_SIZE. When the remote side issues a
+ * READ_DATA, it requires a transfer of the exact size requested. We
+ * can use MHI_CHAIN to link multiple buffers into a single transfer
+ * but the remote side will not consume the buffers until it sees an
+ * EOT, thus we need to allocate enough buffers to put in the tx fifo
+ * to cover an entire READ_DATA request of the max size.
+ */
+ for (i = 0; i < SAHARA_NUM_TX_BUF; ++i) {
+ context->tx[i] = devm_kzalloc(&mhi_dev->dev, SAHARA_PACKET_MAX_SIZE, GFP_KERNEL);
+ if (!context->tx[i])
+ return -ENOMEM;
+ }
+
+ context->mhi_dev = mhi_dev;
+ INIT_WORK(&context->work, sahara_processing);
+ context->image_table = aic100_image_table;
+ context->table_size = ARRAY_SIZE(aic100_image_table);
+ context->active_image_id = SAHARA_IMAGE_ID_NONE;
+ dev_set_drvdata(&mhi_dev->dev, context);
+
+ ret = mhi_prepare_for_transfer(mhi_dev);
+ if (ret)
+ return ret;
+
+ ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, context->rx, SAHARA_PACKET_MAX_SIZE, MHI_EOT);
+ if (ret) {
+ mhi_unprepare_from_transfer(mhi_dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sahara_mhi_remove(struct mhi_device *mhi_dev)
+{
+ struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev);
+
+ cancel_work_sync(&context->work);
+ sahara_release_image(context);
+ mhi_unprepare_from_transfer(mhi_dev);
+}
+
+static void sahara_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+}
+
+static void sahara_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev);
+
+ if (!mhi_result->transaction_status)
+ schedule_work(&context->work);
+}
+
+static const struct mhi_device_id sahara_mhi_match_table[] = {
+ { .chan = "QAIC_SAHARA", },
+ {},
+};
+
+static struct mhi_driver sahara_mhi_driver = {
+ .id_table = sahara_mhi_match_table,
+ .remove = sahara_mhi_remove,
+ .probe = sahara_mhi_probe,
+ .ul_xfer_cb = sahara_mhi_ul_xfer_cb,
+ .dl_xfer_cb = sahara_mhi_dl_xfer_cb,
+ .driver = {
+ .name = "sahara",
+ },
+};
+
+int sahara_register(void)
+{
+ return mhi_driver_register(&sahara_mhi_driver);
+}
+
+void sahara_unregister(void)
+{
+ mhi_driver_unregister(&sahara_mhi_driver);
+}
diff --git a/drivers/accel/qaic/sahara.h b/drivers/accel/qaic/sahara.h
new file mode 100644
index 000000000000..640208acc0d1
--- /dev/null
+++ b/drivers/accel/qaic/sahara.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */
+
+#ifndef __SAHARA_H__
+#define __SAHARA_H__
+
+int sahara_register(void);
+void sahara_unregister(void);
+#endif /* __SAHARA_H__ */
diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
index 1fbc9b921c4f..736c2eb8c0f3 100644
--- a/drivers/accessibility/speakup/main.c
+++ b/drivers/accessibility/speakup/main.c
@@ -574,7 +574,7 @@ static u_long get_word(struct vc_data *vc)
}
attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
buf[cnt++] = attr_ch;
- while (tmpx < vc->vc_cols - 1) {
+ while (tmpx < vc->vc_cols - 1 && cnt < sizeof(buf) - 1) {
tmp_pos += 2;
tmpx++;
ch = get_char(vc, (u_short *)tmp_pos, &temp);
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index ff1689bb3124..e3a7c2aedd5f 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -469,6 +469,9 @@ config ACPI_REDUCED_HARDWARE_ONLY
If you are unsure what to do, do not enable this option.
+config ACPI_NHLT
+ bool
+
source "drivers/acpi/nfit/Kconfig"
source "drivers/acpi/numa/Kconfig"
source "drivers/acpi/apei/Kconfig"
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 8cc8c0d9c873..39ea5cfa8326 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -14,7 +14,6 @@ tables.o: $(src)/../../include/$(CONFIG_ACPI_CUSTOM_DSDT_FILE) ;
endif
obj-$(CONFIG_ACPI) += tables.o
-obj-$(CONFIG_X86) += blacklist.o
#
# ACPI Core Subsystem (Interpreter)
@@ -46,7 +45,6 @@ acpi-y += ec.o
acpi-$(CONFIG_ACPI_DOCK) += dock.o
acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o
obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o
-acpi-$(CONFIG_PCI) += acpi_lpss.o
acpi-y += acpi_apd.o
acpi-y += acpi_platform.o
acpi-y += acpi_pnp.o
@@ -55,10 +53,6 @@ acpi-y += event.o
acpi-y += evged.o
acpi-y += sysfs.o
acpi-y += property.o
-acpi-$(CONFIG_X86) += acpi_cmos_rtc.o
-acpi-$(CONFIG_X86) += x86/apple.o
-acpi-$(CONFIG_X86) += x86/utils.o
-acpi-$(CONFIG_X86) += x86/s2idle.o
acpi-$(CONFIG_DEBUG_FS) += debugfs.o
acpi-y += acpi_lpat.o
acpi-$(CONFIG_ACPI_FPDT) += acpi_fpdt.o
@@ -93,6 +87,7 @@ obj-$(CONFIG_ACPI_THERMAL_LIB) += thermal_lib.o
obj-$(CONFIG_ACPI_THERMAL) += thermal.o
obj-$(CONFIG_ACPI_PLATFORM_PROFILE) += platform_profile.o
obj-$(CONFIG_ACPI_NFIT) += nfit/
+obj-$(CONFIG_ACPI_NHLT) += nhlt.o
obj-$(CONFIG_ACPI_NUMA) += numa/
obj-$(CONFIG_ACPI) += acpi_memhotplug.o
obj-$(CONFIG_ACPI_HOTPLUG_IOAPIC) += ioapic.o
@@ -132,3 +127,4 @@ obj-$(CONFIG_ARM64) += arm64/
obj-$(CONFIG_ACPI_VIOT) += viot.o
obj-$(CONFIG_RISCV) += riscv/
+obj-$(CONFIG_X86) += x86/
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
index 0555f68c2dfd..5fba4dab5d08 100644
--- a/drivers/acpi/acpi_ipmi.c
+++ b/drivers/acpi/acpi_ipmi.c
@@ -22,6 +22,8 @@ MODULE_LICENSE("GPL");
/* the IPMI timeout is 5s */
#define IPMI_TIMEOUT (5000)
#define ACPI_IPMI_MAX_MSG_LENGTH 64
+/* 2s should be suffient for SMI being selected */
+#define ACPI_IPMI_SMI_SELECTION_TIMEOUT (2 * HZ)
struct acpi_ipmi_device {
/* the device list attached to driver_data.ipmi_devices */
@@ -54,6 +56,7 @@ struct ipmi_driver_data {
* to this selected global IPMI system interface.
*/
struct acpi_ipmi_device *selected_smi;
+ struct completion smi_selection_done;
};
struct acpi_ipmi_msg {
@@ -463,8 +466,10 @@ static void ipmi_register_bmc(int iface, struct device *dev)
if (temp->handle == handle)
goto err_lock;
}
- if (!driver_data.selected_smi)
+ if (!driver_data.selected_smi) {
driver_data.selected_smi = ipmi_device;
+ complete(&driver_data.smi_selection_done);
+ }
list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
mutex_unlock(&driver_data.ipmi_lock);
@@ -578,6 +583,20 @@ out_msg:
return status;
}
+int acpi_wait_for_acpi_ipmi(void)
+{
+ long ret;
+
+ ret = wait_for_completion_interruptible_timeout(&driver_data.smi_selection_done,
+ ACPI_IPMI_SMI_SELECTION_TIMEOUT);
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_wait_for_acpi_ipmi);
+
static int __init acpi_ipmi_init(void)
{
int result;
@@ -586,6 +605,8 @@ static int __init acpi_ipmi_init(void)
if (acpi_disabled)
return 0;
+ init_completion(&driver_data.smi_selection_done);
+
status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
ACPI_ADR_SPACE_IPMI,
&acpi_ipmi_space_handler,
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 30f3fc13c29d..8d18af396de9 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -5,6 +5,7 @@
ccflags-y := -D_LINUX -DBUILDING_ACPICA
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
+CFLAGS_tbfind.o += $(call cc-disable-warning, stringop-truncation)
# use acpi.o to put all files here into acpi.o modparam namespace
obj-y += acpi.o
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 82563b44af35..02012168a087 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -547,7 +547,7 @@ struct acpi_field_info {
struct acpi_ged_handler_info {
struct acpi_ged_handler_info *next;
- u32 int_id; /* The interrupt ID that triggers the execution ofthe evt_method. */
+ u32 int_id; /* The interrupt ID that triggers the execution of the evt_method. */
struct acpi_namespace_node *evt_method; /* The _EVT method to be executed when an interrupt with ID = int_ID is received */
};
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 1bdfeee5d7c5..8fc02946d3cd 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -48,7 +48,7 @@
u8 descriptor_type; /* To differentiate various internal objs */\
u8 type; /* acpi_object_type */\
u16 reference_count; /* For object deletion management */\
- u8 flags;
+ u8 flags
/*
* Note: There are 3 bytes available here before the
* next natural alignment boundary (for both 32/64 cases)
@@ -71,10 +71,12 @@
*****************************************************************************/
struct acpi_object_common {
-ACPI_OBJECT_COMMON_HEADER};
+ ACPI_OBJECT_COMMON_HEADER;
+};
struct acpi_object_integer {
- ACPI_OBJECT_COMMON_HEADER u8 fill[3]; /* Prevent warning on some compilers */
+ ACPI_OBJECT_COMMON_HEADER;
+ u8 fill[3]; /* Prevent warning on some compilers */
u64 value;
};
@@ -86,23 +88,26 @@ struct acpi_object_integer {
*/
#define ACPI_COMMON_BUFFER_INFO(_type) \
_type *pointer; \
- u32 length;
+ u32 length
/* Null terminated, ASCII characters only */
struct acpi_object_string {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_BUFFER_INFO(char) /* String in AML stream or allocated string */
+ ACPI_OBJECT_COMMON_HEADER;
+ ACPI_COMMON_BUFFER_INFO(char); /* String in AML stream or allocated string */
};
struct acpi_object_buffer {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_BUFFER_INFO(u8) /* Buffer in AML stream or allocated buffer */
+ ACPI_OBJECT_COMMON_HEADER;
+ ACPI_COMMON_BUFFER_INFO(u8); /* Buffer in AML stream or allocated buffer */
u32 aml_length;
u8 *aml_start;
struct acpi_namespace_node *node; /* Link back to parent node */
};
struct acpi_object_package {
- ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *node; /* Link back to parent node */
+ ACPI_OBJECT_COMMON_HEADER;
+ struct acpi_namespace_node *node; /* Link back to parent node */
union acpi_operand_object **elements; /* Array of pointers to acpi_objects */
u8 *aml_start;
u32 aml_length;
@@ -116,11 +121,13 @@ struct acpi_object_package {
*****************************************************************************/
struct acpi_object_event {
- ACPI_OBJECT_COMMON_HEADER acpi_semaphore os_semaphore; /* Actual OS synchronization object */
+ ACPI_OBJECT_COMMON_HEADER;
+ acpi_semaphore os_semaphore; /* Actual OS synchronization object */
};
struct acpi_object_mutex {
- ACPI_OBJECT_COMMON_HEADER u8 sync_level; /* 0-15, specified in Mutex() call */
+ ACPI_OBJECT_COMMON_HEADER;
+ u8 sync_level; /* 0-15, specified in Mutex() call */
u16 acquisition_depth; /* Allow multiple Acquires, same thread */
acpi_mutex os_mutex; /* Actual OS synchronization object */
acpi_thread_id thread_id; /* Current owner of the mutex */
@@ -132,7 +139,8 @@ struct acpi_object_mutex {
};
struct acpi_object_region {
- ACPI_OBJECT_COMMON_HEADER u8 space_id;
+ ACPI_OBJECT_COMMON_HEADER;
+ u8 space_id;
struct acpi_namespace_node *node; /* Containing namespace node */
union acpi_operand_object *handler; /* Handler for region access */
union acpi_operand_object *next;
@@ -142,7 +150,8 @@ struct acpi_object_region {
};
struct acpi_object_method {
- ACPI_OBJECT_COMMON_HEADER u8 info_flags;
+ ACPI_OBJECT_COMMON_HEADER;
+ u8 info_flags;
u8 param_count;
u8 sync_level;
union acpi_operand_object *mutex;
@@ -178,33 +187,43 @@ struct acpi_object_method {
*/
#define ACPI_COMMON_NOTIFY_INFO \
union acpi_operand_object *notify_list[2]; /* Handlers for system/device notifies */\
- union acpi_operand_object *handler; /* Handler for Address space */
+ union acpi_operand_object *handler /* Handler for Address space */
/* COMMON NOTIFY for POWER, PROCESSOR, DEVICE, and THERMAL */
struct acpi_object_notify_common {
-ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
+ ACPI_OBJECT_COMMON_HEADER;
+ ACPI_COMMON_NOTIFY_INFO;
+};
struct acpi_object_device {
- ACPI_OBJECT_COMMON_HEADER
- ACPI_COMMON_NOTIFY_INFO struct acpi_gpe_block_info *gpe_block;
+ ACPI_OBJECT_COMMON_HEADER;
+ ACPI_COMMON_NOTIFY_INFO;
+ struct acpi_gpe_block_info *gpe_block;
};
struct acpi_object_power_resource {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO u32 system_level;
+ ACPI_OBJECT_COMMON_HEADER;
+ ACPI_COMMON_NOTIFY_INFO;
+ u32 system_level;
u32 resource_order;
};
struct acpi_object_processor {
- ACPI_OBJECT_COMMON_HEADER
- /* The next two fields take advantage of the 3-byte space before NOTIFY_INFO */
+ ACPI_OBJECT_COMMON_HEADER;
+
+ /* The next two fields take advantage of the 3-byte space before NOTIFY_INFO */
+
u8 proc_id;
u8 length;
- ACPI_COMMON_NOTIFY_INFO acpi_io_address address;
+ ACPI_COMMON_NOTIFY_INFO;
+ acpi_io_address address;
};
struct acpi_object_thermal_zone {
-ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
+ ACPI_OBJECT_COMMON_HEADER;
+ ACPI_COMMON_NOTIFY_INFO;
+};
/******************************************************************************
*
@@ -226,17 +245,21 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
u32 base_byte_offset; /* Byte offset within containing object */\
u32 value; /* Value to store into the Bank or Index register */\
u8 start_field_bit_offset;/* Bit offset within first field datum (0-63) */\
- u8 access_length; /* For serial regions/fields */
+ u8 access_length /* For serial regions/fields */
/* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */
struct acpi_object_field_common {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Parent Operation Region object (REGION/BANK fields only) */
+ ACPI_OBJECT_COMMON_HEADER;
+ ACPI_COMMON_FIELD_INFO;
+ union acpi_operand_object *region_obj; /* Parent Operation Region object (REGION/BANK fields only) */
};
struct acpi_object_region_field {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length;
+ ACPI_OBJECT_COMMON_HEADER;
+ ACPI_COMMON_FIELD_INFO;
+ u16 resource_length;
union acpi_operand_object *region_obj; /* Containing op_region object */
u8 *resource_buffer; /* resource_template for serial regions/fields */
u16 pin_number_index; /* Index relative to previous Connection/Template */
@@ -244,16 +267,20 @@ struct acpi_object_region_field {
};
struct acpi_object_bank_field {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Containing op_region object */
+ ACPI_OBJECT_COMMON_HEADER;
+ ACPI_COMMON_FIELD_INFO;
+ union acpi_operand_object *region_obj; /* Containing op_region object */
union acpi_operand_object *bank_obj; /* bank_select Register object */
};
struct acpi_object_index_field {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO
- /*
- * No "RegionObj" pointer needed since the Index and Data registers
- * are each field definitions unto themselves.
- */
+ ACPI_OBJECT_COMMON_HEADER;
+ ACPI_COMMON_FIELD_INFO;
+
+ /*
+ * No "RegionObj" pointer needed since the Index and Data registers
+ * are each field definitions unto themselves.
+ */
union acpi_operand_object *index_obj; /* Index register */
union acpi_operand_object *data_obj; /* Data register */
};
@@ -261,7 +288,9 @@ struct acpi_object_index_field {
/* The buffer_field is different in that it is part of a Buffer, not an op_region */
struct acpi_object_buffer_field {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u8 is_create_field; /* Special case for objects created by create_field() */
+ ACPI_OBJECT_COMMON_HEADER;
+ ACPI_COMMON_FIELD_INFO;
+ u8 is_create_field; /* Special case for objects created by create_field() */
union acpi_operand_object *buffer_obj; /* Containing Buffer object */
};
@@ -272,7 +301,8 @@ struct acpi_object_buffer_field {
*****************************************************************************/
struct acpi_object_notify_handler {
- ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *node; /* Parent device */
+ ACPI_OBJECT_COMMON_HEADER;
+ struct acpi_namespace_node *node; /* Parent device */
u32 handler_type; /* Type: Device/System/Both */
acpi_notify_handler handler; /* Handler address */
void *context;
@@ -280,7 +310,8 @@ struct acpi_object_notify_handler {
};
struct acpi_object_addr_handler {
- ACPI_OBJECT_COMMON_HEADER u8 space_id;
+ ACPI_OBJECT_COMMON_HEADER;
+ u8 space_id;
u8 handler_flags;
acpi_adr_space_handler handler;
struct acpi_namespace_node *node; /* Parent device */
@@ -307,7 +338,8 @@ struct acpi_object_addr_handler {
* The Reference.Class differentiates these types.
*/
struct acpi_object_reference {
- ACPI_OBJECT_COMMON_HEADER u8 class; /* Reference Class */
+ ACPI_OBJECT_COMMON_HEADER;
+ u8 class; /* Reference Class */
u8 target_type; /* Used for Index Op */
u8 resolved; /* Reference has been resolved to a value */
void *object; /* name_op=>HANDLE to obj, index_op=>union acpi_operand_object */
@@ -340,7 +372,8 @@ typedef enum {
* Currently: Region and field_unit types
*/
struct acpi_object_extra {
- ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *method_REG; /* _REG method for this region (if any) */
+ ACPI_OBJECT_COMMON_HEADER;
+ struct acpi_namespace_node *method_REG; /* _REG method for this region (if any) */
struct acpi_namespace_node *scope_node;
void *region_context; /* Region-specific data */
u8 *aml_start;
@@ -350,14 +383,16 @@ struct acpi_object_extra {
/* Additional data that can be attached to namespace nodes */
struct acpi_object_data {
- ACPI_OBJECT_COMMON_HEADER acpi_object_handler handler;
+ ACPI_OBJECT_COMMON_HEADER;
+ acpi_object_handler handler;
void *pointer;
};
/* Structure used when objects are cached for reuse */
struct acpi_object_cache_list {
- ACPI_OBJECT_COMMON_HEADER union acpi_operand_object *next; /* Link for object cache and internal lists */
+ ACPI_OBJECT_COMMON_HEADER;
+ union acpi_operand_object *next; /* Link for object cache and internal lists */
};
/******************************************************************************
diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c
index b91155ea9c34..c9131259f717 100644
--- a/drivers/acpi/acpica/dbnames.c
+++ b/drivers/acpi/acpica/dbnames.c
@@ -550,8 +550,12 @@ acpi_db_walk_for_fields(acpi_handle obj_handle,
ACPI_FREE(buffer.pointer);
buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
- acpi_evaluate_object(obj_handle, NULL, NULL, &buffer);
-
+ status = acpi_evaluate_object(obj_handle, NULL, NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("Could Not evaluate object %p\n",
+ obj_handle);
+ return (AE_OK);
+ }
/*
* Since this is a field unit, surround the output in braces
*/
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 0dbc4d88919a..38f408cf13ce 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -413,6 +413,7 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
gpe_event_info->flags &= ~(ACPI_GPE_DISPATCH_MASK);
gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD);
gpe_event_info->dispatch.method_node = method_node;
+ walk_info->count++;
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
"Registered GPE method %s as GPE number 0x%.2X\n",
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 44267a92bce5..3c126c6d306b 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -315,23 +315,19 @@ void acpi_tb_parse_fadt(void)
ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
NULL, FALSE, TRUE, &acpi_gbl_dsdt_index);
- /* If Hardware Reduced flag is set, there is no FACS */
-
- if (!acpi_gbl_reduced_hardware) {
- if (acpi_gbl_FADT.facs) {
- acpi_tb_install_standard_table((acpi_physical_address)
- acpi_gbl_FADT.facs,
- ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
- NULL, FALSE, TRUE,
- &acpi_gbl_facs_index);
- }
- if (acpi_gbl_FADT.Xfacs) {
- acpi_tb_install_standard_table((acpi_physical_address)
- acpi_gbl_FADT.Xfacs,
- ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
- NULL, FALSE, TRUE,
- &acpi_gbl_xfacs_index);
- }
+ if (acpi_gbl_FADT.facs) {
+ acpi_tb_install_standard_table((acpi_physical_address)
+ acpi_gbl_FADT.facs,
+ ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
+ NULL, FALSE, TRUE,
+ &acpi_gbl_facs_index);
+ }
+ if (acpi_gbl_FADT.Xfacs) {
+ acpi_tb_install_standard_table((acpi_physical_address)
+ acpi_gbl_FADT.Xfacs,
+ ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
+ NULL, FALSE, TRUE,
+ &acpi_gbl_xfacs_index);
}
}
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index bb4a56e5673a..15fa68a5ea6e 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -36,12 +36,7 @@ acpi_status acpi_tb_initialize_facs(void)
{
struct acpi_table_facs *facs;
- /* If Hardware Reduced flag is set, there is no FACS */
-
- if (acpi_gbl_reduced_hardware) {
- acpi_gbl_FACS = NULL;
- return (AE_OK);
- } else if (acpi_gbl_FADT.Xfacs &&
+ if (acpi_gbl_FADT.Xfacs &&
(!acpi_gbl_FADT.facs
|| !acpi_gbl_use32_bit_facs_addresses)) {
(void)acpi_get_table_by_index(acpi_gbl_xfacs_index,
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index c5f6c85a3a09..3d71bd9245c7 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -62,7 +62,12 @@ void acpi_ut_track_stack_ptr(void)
acpi_size current_sp;
if (&current_sp < acpi_gbl_lowest_stack_pointer) {
+#pragma GCC diagnostic push
+#if defined(__GNUC__) && __GNUC__ >= 12
+#pragma GCC diagnostic ignored "-Wdangling-pointer="
+#endif
acpi_gbl_lowest_stack_pointer = &current_sp;
+#pragma GCC diagnostic pop
}
if (acpi_gbl_nesting_level > acpi_gbl_deepest_nesting) {
diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c
index 66e7f529e92f..9515bcfe5e97 100644
--- a/drivers/acpi/apei/einj-core.c
+++ b/drivers/acpi/apei/einj-core.c
@@ -873,8 +873,14 @@ static void __exit einj_remove(struct platform_device *pdev)
}
static struct platform_device *einj_dev;
-static struct platform_driver einj_driver = {
- .remove_new = einj_remove,
+/*
+ * einj_remove() lives in .exit.text. For drivers registered via
+ * platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver einj_driver __refdata = {
+ .remove_new = __exit_p(einj_remove),
.driver = {
.name = "acpi-einj",
},
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 512067cac170..623cc0cb4a65 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -26,6 +26,8 @@
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/cper.h>
+#include <linux/cleanup.h>
+#include <linux/cxl-event.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/ratelimit.h>
@@ -33,6 +35,7 @@
#include <linux/irq_work.h>
#include <linux/llist.h>
#include <linux/genalloc.h>
+#include <linux/kfifo.h>
#include <linux/pci.h>
#include <linux/pfn.h>
#include <linux/aer.h>
@@ -673,6 +676,75 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
schedule_work(&entry->work);
}
+/* Room for 8 entries for each of the 4 event log queues */
+#define CXL_CPER_FIFO_DEPTH 32
+DEFINE_KFIFO(cxl_cper_fifo, struct cxl_cper_work_data, CXL_CPER_FIFO_DEPTH);
+
+/* Synchronize schedule_work() with cxl_cper_work changes */
+static DEFINE_SPINLOCK(cxl_cper_work_lock);
+struct work_struct *cxl_cper_work;
+
+static void cxl_cper_post_event(enum cxl_event_type event_type,
+ struct cxl_cper_event_rec *rec)
+{
+ struct cxl_cper_work_data wd;
+
+ if (rec->hdr.length <= sizeof(rec->hdr) ||
+ rec->hdr.length > sizeof(*rec)) {
+ pr_err(FW_WARN "CXL CPER Invalid section length (%u)\n",
+ rec->hdr.length);
+ return;
+ }
+
+ if (!(rec->hdr.validation_bits & CPER_CXL_COMP_EVENT_LOG_VALID)) {
+ pr_err(FW_WARN "CXL CPER invalid event\n");
+ return;
+ }
+
+ guard(spinlock_irqsave)(&cxl_cper_work_lock);
+
+ if (!cxl_cper_work)
+ return;
+
+ wd.event_type = event_type;
+ memcpy(&wd.rec, rec, sizeof(wd.rec));
+
+ if (!kfifo_put(&cxl_cper_fifo, wd)) {
+ pr_err_ratelimited("CXL CPER kfifo overflow\n");
+ return;
+ }
+
+ schedule_work(cxl_cper_work);
+}
+
+int cxl_cper_register_work(struct work_struct *work)
+{
+ if (cxl_cper_work)
+ return -EINVAL;
+
+ guard(spinlock)(&cxl_cper_work_lock);
+ cxl_cper_work = work;
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cper_register_work, CXL);
+
+int cxl_cper_unregister_work(struct work_struct *work)
+{
+ if (cxl_cper_work != work)
+ return -EINVAL;
+
+ guard(spinlock)(&cxl_cper_work_lock);
+ cxl_cper_work = NULL;
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_work, CXL);
+
+int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd)
+{
+ return kfifo_get(&cxl_cper_fifo, wd);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cper_kfifo_get, CXL);
+
static bool ghes_do_proc(struct ghes *ghes,
const struct acpi_hest_generic_status *estatus)
{
@@ -707,6 +779,18 @@ static bool ghes_do_proc(struct ghes *ghes,
}
else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
queued = ghes_handle_arm_hw_error(gdata, sev, sync);
+ } else if (guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID)) {
+ struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata);
+
+ cxl_cper_post_event(CXL_CPER_EVENT_GEN_MEDIA, rec);
+ } else if (guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID)) {
+ struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata);
+
+ cxl_cper_post_event(CXL_CPER_EVENT_DRAM, rec);
+ } else if (guid_equal(sec_type, &CPER_SEC_CXL_MEM_MODULE_GUID)) {
+ struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata);
+
+ cxl_cper_post_event(CXL_CPER_EVENT_MEM_MODULE, rec);
} else {
void *err = acpi_hest_get_payload(gdata);
diff --git a/drivers/acpi/arm64/dma.c b/drivers/acpi/arm64/dma.c
index 93d796531af3..52b2abf88689 100644
--- a/drivers/acpi/arm64/dma.c
+++ b/drivers/acpi/arm64/dma.c
@@ -8,7 +8,6 @@ void acpi_arch_dma_setup(struct device *dev)
{
int ret;
u64 end, mask;
- u64 size = 0;
const struct bus_dma_region *map = NULL;
/*
@@ -23,31 +22,23 @@ void acpi_arch_dma_setup(struct device *dev)
}
if (dev->coherent_dma_mask)
- size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
+ end = dev->coherent_dma_mask;
else
- size = 1ULL << 32;
+ end = (1ULL << 32) - 1;
ret = acpi_dma_get_range(dev, &map);
if (!ret && map) {
- const struct bus_dma_region *r = map;
-
- for (end = 0; r->size; r++) {
- if (r->dma_start + r->size - 1 > end)
- end = r->dma_start + r->size - 1;
- }
-
- size = end + 1;
+ end = dma_range_map_max(map);
dev->dma_range_map = map;
}
if (ret == -ENODEV)
- ret = iort_dma_get_ranges(dev, &size);
+ ret = iort_dma_get_ranges(dev, &end);
if (!ret) {
/*
* Limit coherent and dma mask based on size retrieved from
* firmware.
*/
- end = size - 1;
mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->bus_dma_limit = end;
dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask);
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 6496ff5a6ba2..c0b1c2c19444 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -1367,7 +1367,7 @@ int iort_iommu_configure_id(struct device *dev, const u32 *input_id)
{ return -ENODEV; }
#endif
-static int nc_dma_get_range(struct device *dev, u64 *size)
+static int nc_dma_get_range(struct device *dev, u64 *limit)
{
struct acpi_iort_node *node;
struct acpi_iort_named_component *ncomp;
@@ -1384,13 +1384,13 @@ static int nc_dma_get_range(struct device *dev, u64 *size)
return -EINVAL;
}
- *size = ncomp->memory_address_limit >= 64 ? U64_MAX :
- 1ULL<<ncomp->memory_address_limit;
+ *limit = ncomp->memory_address_limit >= 64 ? U64_MAX :
+ (1ULL << ncomp->memory_address_limit) - 1;
return 0;
}
-static int rc_dma_get_range(struct device *dev, u64 *size)
+static int rc_dma_get_range(struct device *dev, u64 *limit)
{
struct acpi_iort_node *node;
struct acpi_iort_root_complex *rc;
@@ -1408,8 +1408,8 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
return -EINVAL;
}
- *size = rc->memory_address_limit >= 64 ? U64_MAX :
- 1ULL<<rc->memory_address_limit;
+ *limit = rc->memory_address_limit >= 64 ? U64_MAX :
+ (1ULL << rc->memory_address_limit) - 1;
return 0;
}
@@ -1417,16 +1417,16 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
/**
* iort_dma_get_ranges() - Look up DMA addressing limit for the device
* @dev: device to lookup
- * @size: DMA range size result pointer
+ * @limit: DMA limit result pointer
*
* Return: 0 on success, an error otherwise.
*/
-int iort_dma_get_ranges(struct device *dev, u64 *size)
+int iort_dma_get_ranges(struct device *dev, u64 *limit)
{
if (dev_is_pci(dev))
- return rc_dma_get_range(dev, size);
+ return rc_dma_get_range(dev, limit);
else
- return nc_dma_get_range(dev, size);
+ return nc_dma_get_range(dev, limit);
}
static void __init acpi_iort_register_irq(int hwirq, const char *name,
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index d9fa730416f1..787eca838410 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -112,6 +112,17 @@ int acpi_bus_get_status(struct acpi_device *device)
if (ACPI_FAILURE(status))
return -ENODEV;
+ if (!device->status.present && device->status.enabled) {
+ pr_info(FW_BUG "Device [%s] status [%08x]: not present and enabled\n",
+ device->pnp.bus_id, (u32)sta);
+ device->status.enabled = 0;
+ /*
+ * The status is clearly invalid, so clear the functional bit as
+ * well to avoid attempting to use the device.
+ */
+ device->status.functional = 0;
+ }
+
acpi_set_device_status(device, sta);
if (device->status.functional && !device->status.present) {
@@ -316,9 +327,14 @@ static void acpi_bus_osc_negotiate_platform_control(void)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT;
if (IS_ENABLED(CONFIG_ACPI_PROCESSOR))
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT;
+ if (IS_ENABLED(CONFIG_ACPI_THERMAL))
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_FAST_THERMAL_SAMPLING_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT;
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_OVER_16_PSTATES_SUPPORT;
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_GED_SUPPORT;
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_IRQ_RESOURCE_SOURCE_SUPPORT;
if (IS_ENABLED(CONFIG_ACPI_PRMT))
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PRM_SUPPORT;
if (IS_ENABLED(CONFIG_ACPI_FFH))
@@ -990,25 +1006,26 @@ EXPORT_SYMBOL_GPL(acpi_driver_match_device);
-------------------------------------------------------------------------- */
/**
- * acpi_bus_register_driver - register a driver with the ACPI bus
+ * __acpi_bus_register_driver - register a driver with the ACPI bus
* @driver: driver being registered
+ * @owner: owning module/driver
*
* Registers a driver with the ACPI bus. Searches the namespace for all
* devices that match the driver's criteria and binds. Returns zero for
* success or a negative error status for failure.
*/
-int acpi_bus_register_driver(struct acpi_driver *driver)
+int __acpi_bus_register_driver(struct acpi_driver *driver, struct module *owner)
{
if (acpi_disabled)
return -ENODEV;
driver->drv.name = driver->name;
driver->drv.bus = &acpi_bus_type;
- driver->drv.owner = driver->owner;
+ driver->drv.owner = owner;
return driver_register(&driver->drv);
}
-EXPORT_SYMBOL(acpi_bus_register_driver);
+EXPORT_SYMBOL(__acpi_bus_register_driver);
/**
* acpi_bus_unregister_driver - unregisters a driver with the ACPI bus
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 4bfbe55553f4..1d857978f5f4 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -170,8 +170,8 @@ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
#define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
/* Shift and apply the mask for CPC reads/writes */
-#define MASK_VAL(reg, val) ((val) >> ((reg)->bit_offset & \
- GENMASK(((reg)->bit_width), 0)))
+#define MASK_VAL(reg, val) (((val) >> (reg)->bit_offset) & \
+ GENMASK(((reg)->bit_width) - 1, 0))
static ssize_t show_feedback_ctrs(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
@@ -686,8 +686,10 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
if (!osc_sb_cppc2_support_acked) {
pr_debug("CPPC v2 _OSC not acked\n");
- if (!cpc_supported_by_cpu())
+ if (!cpc_supported_by_cpu()) {
+ pr_debug("CPPC is not supported by the CPU\n");
return -ENODEV;
+ }
}
/* Parse the ACPI _CPC table for this CPU. */
@@ -1002,14 +1004,14 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
}
*val = 0;
+ size = GET_BIT_WIDTH(reg);
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
- u32 width = GET_BIT_WIDTH(reg);
u32 val_u32;
acpi_status status;
status = acpi_os_read_port((acpi_io_address)reg->address,
- &val_u32, width);
+ &val_u32, size);
if (ACPI_FAILURE(status)) {
pr_debug("Error: Failed to read SystemIO port %llx\n",
reg->address);
@@ -1018,17 +1020,22 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
*val = val_u32;
return 0;
- } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
+ /*
+ * For registers in PCC space, the register size is determined
+ * by the bit width field; the access size is used to indicate
+ * the PCC subspace id.
+ */
+ size = reg->bit_width;
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
+ }
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
return cpc_read_ffh(cpu, reg, val);
else
return acpi_os_read_memory((acpi_physical_address)reg->address,
- val, reg->bit_width);
-
- size = GET_BIT_WIDTH(reg);
+ val, size);
switch (size) {
case 8:
@@ -1044,8 +1051,13 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
*val = readq_relaxed(vaddr);
break;
default:
- pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
- reg->bit_width, pcc_ss_id);
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ pr_debug("Error: Cannot read %u bit width from system memory: 0x%llx\n",
+ size, reg->address);
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+ pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
+ size, pcc_ss_id);
+ }
return -EFAULT;
}
@@ -1063,12 +1075,13 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
+ size = GET_BIT_WIDTH(reg);
+
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
- u32 width = GET_BIT_WIDTH(reg);
acpi_status status;
status = acpi_os_write_port((acpi_io_address)reg->address,
- (u32)val, width);
+ (u32)val, size);
if (ACPI_FAILURE(status)) {
pr_debug("Error: Failed to write SystemIO port %llx\n",
reg->address);
@@ -1076,17 +1089,22 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
}
return 0;
- } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
+ /*
+ * For registers in PCC space, the register size is determined
+ * by the bit width field; the access size is used to indicate
+ * the PCC subspace id.
+ */
+ size = reg->bit_width;
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
+ }
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
return cpc_write_ffh(cpu, reg, val);
else
return acpi_os_write_memory((acpi_physical_address)reg->address,
- val, reg->bit_width);
-
- size = GET_BIT_WIDTH(reg);
+ val, size);
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
val = MASK_VAL(reg, val);
@@ -1105,8 +1123,13 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
writeq_relaxed(val, vaddr);
break;
default:
- pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
- reg->bit_width, pcc_ss_id);
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ pr_debug("Error: Cannot write %u bit width to system memory: 0x%llx\n",
+ size, reg->address);
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+ pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
+ size, pcc_ss_id);
+ }
ret_val = -EFAULT;
break;
}
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index a7c00ef78086..34affbda295e 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -88,43 +88,29 @@ static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event,
enum dock_callback_type cb_type)
{
struct acpi_device *adev = dd->adev;
+ acpi_hp_fixup fixup = NULL;
+ acpi_hp_uevent uevent = NULL;
+ acpi_hp_notify notify = NULL;
acpi_lock_hp_context();
- if (!adev->hp)
- goto out;
-
- if (cb_type == DOCK_CALL_FIXUP) {
- void (*fixup)(struct acpi_device *);
-
- fixup = adev->hp->fixup;
- if (fixup) {
- acpi_unlock_hp_context();
- fixup(adev);
- return;
- }
- } else if (cb_type == DOCK_CALL_UEVENT) {
- void (*uevent)(struct acpi_device *, u32);
-
- uevent = adev->hp->uevent;
- if (uevent) {
- acpi_unlock_hp_context();
- uevent(adev, event);
- return;
- }
- } else {
- int (*notify)(struct acpi_device *, u32);
-
- notify = adev->hp->notify;
- if (notify) {
- acpi_unlock_hp_context();
- notify(adev, event);
- return;
- }
+ if (adev->hp) {
+ if (cb_type == DOCK_CALL_FIXUP)
+ fixup = adev->hp->fixup;
+ else if (cb_type == DOCK_CALL_UEVENT)
+ uevent = adev->hp->uevent;
+ else
+ notify = adev->hp->notify;
}
- out:
acpi_unlock_hp_context();
+
+ if (fixup)
+ fixup(adev);
+ else if (uevent)
+ uevent(adev, event);
+ else if (notify)
+ notify(adev, event);
}
static struct dock_station *find_dock_station(acpi_handle handle)
diff --git a/drivers/acpi/dptf/dptf_pch_fivr.c b/drivers/acpi/dptf/dptf_pch_fivr.c
index 654aaa53c67f..d202730fafd8 100644
--- a/drivers/acpi/dptf/dptf_pch_fivr.c
+++ b/drivers/acpi/dptf/dptf_pch_fivr.c
@@ -150,6 +150,7 @@ static const struct acpi_device_id pch_fivr_device_ids[] = {
{"INTC1045", 0},
{"INTC1049", 0},
{"INTC1064", 0},
+ {"INTC106B", 0},
{"INTC10A3", 0},
{"", 0},
};
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index b8187babbbbb..8023b3e23315 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -232,6 +232,8 @@ static const struct acpi_device_id int3407_device_ids[] = {
{"INTC1061", 0},
{"INTC1065", 0},
{"INTC1066", 0},
+ {"INTC106C", 0},
+ {"INTC106D", 0},
{"INTC10A4", 0},
{"INTC10A5", 0},
{"", 0},
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
index b7113fa92fa6..014ada759954 100644
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ b/drivers/acpi/dptf/int340x_thermal.c
@@ -43,6 +43,12 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = {
{"INTC1064"},
{"INTC1065"},
{"INTC1066"},
+ {"INTC1068"},
+ {"INTC1069"},
+ {"INTC106A"},
+ {"INTC106B"},
+ {"INTC106C"},
+ {"INTC106D"},
{"INTC10A0"},
{"INTC10A1"},
{"INTC10A2"},
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 02255795b800..e7793ee9e649 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1482,13 +1482,14 @@ static bool install_gpio_irq_event_handler(struct acpi_ec *ec)
static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
bool call_reg)
{
+ acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle;
acpi_status status;
acpi_ec_start(ec, false);
if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
acpi_ec_enter_noirq(ec);
- status = acpi_install_address_space_handler_no_reg(ec->handle,
+ status = acpi_install_address_space_handler_no_reg(scope_handle,
ACPI_ADR_SPACE_EC,
&acpi_ec_space_handler,
NULL, ec);
@@ -1497,11 +1498,10 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
return -ENODEV;
}
set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
- ec->address_space_handler_holder = ec->handle;
}
if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) {
- acpi_execute_reg_methods(ec->handle, ACPI_ADR_SPACE_EC);
+ acpi_execute_reg_methods(scope_handle, ACPI_ADR_SPACE_EC);
set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags);
}
@@ -1553,10 +1553,13 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
static void ec_remove_handlers(struct acpi_ec *ec)
{
+ acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle;
+
if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
if (ACPI_FAILURE(acpi_remove_address_space_handler(
- ec->address_space_handler_holder,
- ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
+ scope_handle,
+ ACPI_ADR_SPACE_EC,
+ &acpi_ec_space_handler)))
pr_err("failed to remove space handler\n");
clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
}
@@ -1595,14 +1598,18 @@ static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device, bool ca
{
int ret;
- ret = ec_install_handlers(ec, device, call_reg);
- if (ret)
- return ret;
-
/* First EC capable of handling transactions */
if (!first_ec)
first_ec = ec;
+ ret = ec_install_handlers(ec, device, call_reg);
+ if (ret) {
+ if (ec == first_ec)
+ first_ec = NULL;
+
+ return ret;
+ }
+
pr_info("EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", ec->command_addr,
ec->data_addr);
diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h
index e7b4b4e4a55e..f89d19c922dc 100644
--- a/drivers/acpi/fan.h
+++ b/drivers/acpi/fan.h
@@ -15,6 +15,7 @@
{"INTC1044", }, /* Fan for Tiger Lake generation */ \
{"INTC1048", }, /* Fan for Alder Lake generation */ \
{"INTC1063", }, /* Fan for Meteor Lake generation */ \
+ {"INTC106A", }, /* Fan for Lunar Lake generation */ \
{"INTC10A2", }, /* Fan for Raptor Lake generation */ \
{"PNP0C0B", } /* Generic ACPI fan */
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index ca72a0dc5715..2a0e9fc7b74c 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -69,7 +69,8 @@ void acpi_debugfs_init(void);
#else
static inline void acpi_debugfs_init(void) { return; }
#endif
-#ifdef CONFIG_PCI
+
+#if defined(CONFIG_X86) && defined(CONFIG_PCI)
void acpi_lpss_init(void);
#else
static inline void acpi_lpss_init(void) {}
@@ -185,7 +186,6 @@ enum acpi_ec_event_state {
struct acpi_ec {
acpi_handle handle;
- acpi_handle address_space_handler_holder;
int gpe;
int irq;
unsigned long command_addr;
diff --git a/drivers/acpi/nhlt.c b/drivers/acpi/nhlt.c
new file mode 100644
index 000000000000..dc1bd0df9228
--- /dev/null
+++ b/drivers/acpi/nhlt.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright(c) 2023-2024 Intel Corporation
+ *
+ * Authors: Cezary Rojewski <cezary.rojewski@intel.com>
+ * Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+ */
+
+#define pr_fmt(fmt) "ACPI: NHLT: " fmt
+
+#include <linux/acpi.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/minmax.h>
+#include <linux/printk.h>
+#include <linux/types.h>
+#include <acpi/nhlt.h>
+
+static struct acpi_table_nhlt *acpi_gbl_nhlt;
+
+static struct acpi_table_nhlt empty_nhlt = {
+ .header = {
+ .signature = ACPI_SIG_NHLT,
+ },
+};
+
+/**
+ * acpi_nhlt_get_gbl_table - Retrieve a pointer to the first NHLT table.
+ *
+ * If there is no NHLT in the system, acpi_gbl_nhlt will instead point to an
+ * empty table.
+ *
+ * Return: ACPI status code of the operation.
+ */
+acpi_status acpi_nhlt_get_gbl_table(void)
+{
+ acpi_status status;
+
+ status = acpi_get_table(ACPI_SIG_NHLT, 0, (struct acpi_table_header **)(&acpi_gbl_nhlt));
+ if (!acpi_gbl_nhlt)
+ acpi_gbl_nhlt = &empty_nhlt;
+ return status;
+}
+EXPORT_SYMBOL_GPL(acpi_nhlt_get_gbl_table);
+
+/**
+ * acpi_nhlt_put_gbl_table - Release the global NHLT table.
+ */
+void acpi_nhlt_put_gbl_table(void)
+{
+ acpi_put_table((struct acpi_table_header *)acpi_gbl_nhlt);
+}
+EXPORT_SYMBOL_GPL(acpi_nhlt_put_gbl_table);
+
+/**
+ * acpi_nhlt_endpoint_match - Verify if an endpoint matches criteria.
+ * @ep: the endpoint to check.
+ * @link_type: the hardware link type, e.g.: PDM or SSP.
+ * @dev_type: the device type.
+ * @dir: stream direction.
+ * @bus_id: the ID of virtual bus hosting the endpoint.
+ *
+ * Either of @link_type, @dev_type, @dir or @bus_id may be set to a negative
+ * value to ignore the parameter when matching.
+ *
+ * Return: %true if endpoint matches specified criteria or %false otherwise.
+ */
+bool acpi_nhlt_endpoint_match(const struct acpi_nhlt_endpoint *ep,
+ int link_type, int dev_type, int dir, int bus_id)
+{
+ return ep &&
+ (link_type < 0 || ep->link_type == link_type) &&
+ (dev_type < 0 || ep->device_type == dev_type) &&
+ (bus_id < 0 || ep->virtual_bus_id == bus_id) &&
+ (dir < 0 || ep->direction == dir);
+}
+EXPORT_SYMBOL_GPL(acpi_nhlt_endpoint_match);
+
+/**
+ * acpi_nhlt_tb_find_endpoint - Search a NHLT table for an endpoint.
+ * @tb: the table to search.
+ * @link_type: the hardware link type, e.g.: PDM or SSP.
+ * @dev_type: the device type.
+ * @dir: stream direction.
+ * @bus_id: the ID of virtual bus hosting the endpoint.
+ *
+ * Either of @link_type, @dev_type, @dir or @bus_id may be set to a negative
+ * value to ignore the parameter during the search.
+ *
+ * Return: A pointer to endpoint matching the criteria, %NULL if not found or
+ * an ERR_PTR() otherwise.
+ */
+struct acpi_nhlt_endpoint *
+acpi_nhlt_tb_find_endpoint(const struct acpi_table_nhlt *tb,
+ int link_type, int dev_type, int dir, int bus_id)
+{
+ struct acpi_nhlt_endpoint *ep;
+
+ for_each_nhlt_endpoint(tb, ep)
+ if (acpi_nhlt_endpoint_match(ep, link_type, dev_type, dir, bus_id))
+ return ep;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(acpi_nhlt_tb_find_endpoint);
+
+/**
+ * acpi_nhlt_find_endpoint - Search all NHLT tables for an endpoint.
+ * @link_type: the hardware link type, e.g.: PDM or SSP.
+ * @dev_type: the device type.
+ * @dir: stream direction.
+ * @bus_id: the ID of virtual bus hosting the endpoint.
+ *
+ * Either of @link_type, @dev_type, @dir or @bus_id may be set to a negative
+ * value to ignore the parameter during the search.
+ *
+ * Return: A pointer to endpoint matching the criteria, %NULL if not found or
+ * an ERR_PTR() otherwise.
+ */
+struct acpi_nhlt_endpoint *
+acpi_nhlt_find_endpoint(int link_type, int dev_type, int dir, int bus_id)
+{
+ /* TODO: Currently limited to table of index 0. */
+ return acpi_nhlt_tb_find_endpoint(acpi_gbl_nhlt, link_type, dev_type, dir, bus_id);
+}
+EXPORT_SYMBOL_GPL(acpi_nhlt_find_endpoint);
+
+/**
+ * acpi_nhlt_endpoint_find_fmtcfg - Search endpoint's formats configuration space
+ * for a specific format.
+ * @ep: the endpoint to search.
+ * @ch: number of channels.
+ * @rate: samples per second.
+ * @vbps: valid bits per sample.
+ * @bps: bits per sample.
+ *
+ * Return: A pointer to format matching the criteria, %NULL if not found or
+ * an ERR_PTR() otherwise.
+ */
+struct acpi_nhlt_format_config *
+acpi_nhlt_endpoint_find_fmtcfg(const struct acpi_nhlt_endpoint *ep,
+ u16 ch, u32 rate, u16 vbps, u16 bps)
+{
+ struct acpi_nhlt_wave_formatext *wav;
+ struct acpi_nhlt_format_config *fmt;
+
+ for_each_nhlt_endpoint_fmtcfg(ep, fmt) {
+ wav = &fmt->format;
+
+ if (wav->valid_bits_per_sample == vbps &&
+ wav->samples_per_sec == rate &&
+ wav->bits_per_sample == bps &&
+ wav->channel_count == ch)
+ return fmt;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(acpi_nhlt_endpoint_find_fmtcfg);
+
+/**
+ * acpi_nhlt_tb_find_fmtcfg - Search a NHLT table for a specific format.
+ * @tb: the table to search.
+ * @link_type: the hardware link type, e.g.: PDM or SSP.
+ * @dev_type: the device type.
+ * @dir: stream direction.
+ * @bus_id: the ID of virtual bus hosting the endpoint.
+ *
+ * @ch: number of channels.
+ * @rate: samples per second.
+ * @vbps: valid bits per sample.
+ * @bps: bits per sample.
+ *
+ * Either of @link_type, @dev_type, @dir or @bus_id may be set to a negative
+ * value to ignore the parameter during the search.
+ *
+ * Return: A pointer to format matching the criteria, %NULL if not found or
+ * an ERR_PTR() otherwise.
+ */
+struct acpi_nhlt_format_config *
+acpi_nhlt_tb_find_fmtcfg(const struct acpi_table_nhlt *tb,
+ int link_type, int dev_type, int dir, int bus_id,
+ u16 ch, u32 rate, u16 vbps, u16 bps)
+{
+ struct acpi_nhlt_format_config *fmt;
+ struct acpi_nhlt_endpoint *ep;
+
+ for_each_nhlt_endpoint(tb, ep) {
+ if (!acpi_nhlt_endpoint_match(ep, link_type, dev_type, dir, bus_id))
+ continue;
+
+ fmt = acpi_nhlt_endpoint_find_fmtcfg(ep, ch, rate, vbps, bps);
+ if (fmt)
+ return fmt;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(acpi_nhlt_tb_find_fmtcfg);
+
+/**
+ * acpi_nhlt_find_fmtcfg - Search all NHLT tables for a specific format.
+ * @link_type: the hardware link type, e.g.: PDM or SSP.
+ * @dev_type: the device type.
+ * @dir: stream direction.
+ * @bus_id: the ID of virtual bus hosting the endpoint.
+ *
+ * @ch: number of channels.
+ * @rate: samples per second.
+ * @vbps: valid bits per sample.
+ * @bps: bits per sample.
+ *
+ * Either of @link_type, @dev_type, @dir or @bus_id may be set to a negative
+ * value to ignore the parameter during the search.
+ *
+ * Return: A pointer to format matching the criteria, %NULL if not found or
+ * an ERR_PTR() otherwise.
+ */
+struct acpi_nhlt_format_config *
+acpi_nhlt_find_fmtcfg(int link_type, int dev_type, int dir, int bus_id,
+ u16 ch, u32 rate, u16 vbps, u16 bps)
+{
+ /* TODO: Currently limited to table of index 0. */
+ return acpi_nhlt_tb_find_fmtcfg(acpi_gbl_nhlt, link_type, dev_type, dir, bus_id,
+ ch, rate, vbps, bps);
+}
+EXPORT_SYMBOL_GPL(acpi_nhlt_find_fmtcfg);
+
+static bool acpi_nhlt_config_is_micdevice(struct acpi_nhlt_config *cfg)
+{
+ return cfg->capabilities_size >= sizeof(struct acpi_nhlt_micdevice_config);
+}
+
+static bool acpi_nhlt_config_is_vendor_micdevice(struct acpi_nhlt_config *cfg)
+{
+ struct acpi_nhlt_vendor_micdevice_config *devcfg = __acpi_nhlt_config_caps(cfg);
+
+ return cfg->capabilities_size >= sizeof(*devcfg) &&
+ cfg->capabilities_size == struct_size(devcfg, mics, devcfg->mics_count);
+}
+
+/**
+ * acpi_nhlt_endpoint_mic_count - Retrieve number of digital microphones for a PDM endpoint.
+ * @ep: the endpoint to return microphones count for.
+ *
+ * Return: A number of microphones or an error code if an invalid endpoint is provided.
+ */
+int acpi_nhlt_endpoint_mic_count(const struct acpi_nhlt_endpoint *ep)
+{
+ union acpi_nhlt_device_config *devcfg;
+ struct acpi_nhlt_format_config *fmt;
+ struct acpi_nhlt_config *cfg;
+ u16 max_ch = 0;
+
+ if (!ep || ep->link_type != ACPI_NHLT_LINKTYPE_PDM)
+ return -EINVAL;
+
+ /* Find max number of channels based on formats configuration. */
+ for_each_nhlt_endpoint_fmtcfg(ep, fmt)
+ max_ch = max(fmt->format.channel_count, max_ch);
+
+ cfg = __acpi_nhlt_endpoint_config(ep);
+ devcfg = __acpi_nhlt_config_caps(cfg);
+
+ /* If @ep is not a mic array, fallback to channels count. */
+ if (!acpi_nhlt_config_is_micdevice(cfg) ||
+ devcfg->gen.config_type != ACPI_NHLT_CONFIGTYPE_MICARRAY)
+ return max_ch;
+
+ switch (devcfg->mic.array_type) {
+ case ACPI_NHLT_ARRAYTYPE_LINEAR2_SMALL:
+ case ACPI_NHLT_ARRAYTYPE_LINEAR2_BIG:
+ return 2;
+
+ case ACPI_NHLT_ARRAYTYPE_LINEAR4_GEO1:
+ case ACPI_NHLT_ARRAYTYPE_PLANAR4_LSHAPED:
+ case ACPI_NHLT_ARRAYTYPE_LINEAR4_GEO2:
+ return 4;
+
+ case ACPI_NHLT_ARRAYTYPE_VENDOR:
+ if (!acpi_nhlt_config_is_vendor_micdevice(cfg))
+ return -EINVAL;
+ return devcfg->vendor_mic.mics_count;
+
+ default:
+ pr_warn("undefined mic array type: %#x\n", devcfg->mic.array_type);
+ return max_ch;
+ }
+}
+EXPORT_SYMBOL_GPL(acpi_nhlt_endpoint_mic_count);
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index e45e64993c50..e3f26e71637a 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -208,16 +208,26 @@ int __init srat_disabled(void)
return acpi_numa < 0;
}
-#if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH)
+__weak int __init numa_fill_memblks(u64 start, u64 end)
+{
+ return NUMA_NO_MEMBLK;
+}
+
/*
* Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
* I/O localities since SRAT does not list them. I/O localities are
* not supported at this point.
*/
-void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
+static int __init acpi_parse_slit(struct acpi_table_header *table)
{
+ struct acpi_table_slit *slit = (struct acpi_table_slit *)table;
int i, j;
+ if (!slit_valid(slit)) {
+ pr_info("SLIT table looks invalid. Not used.\n");
+ return -EINVAL;
+ }
+
for (i = 0; i < slit->locality_count; i++) {
const int from_node = pxm_to_node(i);
@@ -234,28 +244,34 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
slit->entry[slit->locality_count * i + j]);
}
}
+
+ return 0;
}
-/*
- * Default callback for parsing of the Proximity Domain <-> Memory
- * Area mappings
- */
-int __init
-acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
+static int parsed_numa_memblks __initdata;
+
+static int __init
+acpi_parse_memory_affinity(union acpi_subtable_headers *header,
+ const unsigned long table_end)
{
+ struct acpi_srat_mem_affinity *ma;
u64 start, end;
u32 hotpluggable;
int node, pxm;
+ ma = (struct acpi_srat_mem_affinity *)header;
+
+ acpi_table_print_srat_entry(&header->common);
+
if (srat_disabled())
- goto out_err;
+ return 0;
if (ma->header.length < sizeof(struct acpi_srat_mem_affinity)) {
pr_err("SRAT: Unexpected header length: %d\n",
ma->header.length);
goto out_err_bad_srat;
}
if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
- goto out_err;
+ return 0;
hotpluggable = IS_ENABLED(CONFIG_MEMORY_HOTPLUG) &&
(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE);
@@ -293,11 +309,15 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
max_possible_pfn = max(max_possible_pfn, PFN_UP(end - 1));
+ parsed_numa_memblks++;
+
return 0;
+
out_err_bad_srat:
+ /* Just disable SRAT, but do not fail and ignore errors. */
bad_srat();
-out_err:
- return -EINVAL;
+
+ return 0;
}
static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
@@ -340,26 +360,6 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
(*fake_pxm)++;
return 0;
}
-#else
-static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
- void *arg, const unsigned long table_end)
-{
- return 0;
-}
-#endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */
-
-static int __init acpi_parse_slit(struct acpi_table_header *table)
-{
- struct acpi_table_slit *slit = (struct acpi_table_slit *)table;
-
- if (!slit_valid(slit)) {
- pr_info("SLIT table looks invalid. Not used.\n");
- return -EINVAL;
- }
- acpi_numa_slit_init(slit);
-
- return 0;
-}
void __init __weak
acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
@@ -450,24 +450,6 @@ acpi_parse_gi_affinity(union acpi_subtable_headers *header,
}
#endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */
-static int __initdata parsed_numa_memblks;
-
-static int __init
-acpi_parse_memory_affinity(union acpi_subtable_headers * header,
- const unsigned long end)
-{
- struct acpi_srat_mem_affinity *memory_affinity;
-
- memory_affinity = (struct acpi_srat_mem_affinity *)header;
-
- acpi_table_print_srat_entry(&header->common);
-
- /* let architecture-dependent part to do it */
- if (!acpi_numa_memory_affinity_init(memory_affinity))
- parsed_numa_memblks++;
- return 0;
-}
-
static int __init acpi_parse_srat(struct acpi_table_header *table)
{
struct acpi_table_srat *srat = (struct acpi_table_srat *)table;
diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c
index d418462ab791..4a9704730224 100644
--- a/drivers/acpi/platform_profile.c
+++ b/drivers/acpi/platform_profile.c
@@ -136,6 +136,45 @@ void platform_profile_notify(void)
}
EXPORT_SYMBOL_GPL(platform_profile_notify);
+int platform_profile_cycle(void)
+{
+ enum platform_profile_option profile;
+ enum platform_profile_option next;
+ int err;
+
+ err = mutex_lock_interruptible(&profile_lock);
+ if (err)
+ return err;
+
+ if (!cur_profile) {
+ mutex_unlock(&profile_lock);
+ return -ENODEV;
+ }
+
+ err = cur_profile->profile_get(cur_profile, &profile);
+ if (err) {
+ mutex_unlock(&profile_lock);
+ return err;
+ }
+
+ next = find_next_bit_wrap(cur_profile->choices, PLATFORM_PROFILE_LAST,
+ profile + 1);
+
+ if (WARN_ON(next == PLATFORM_PROFILE_LAST)) {
+ mutex_unlock(&profile_lock);
+ return -EINVAL;
+ }
+
+ err = cur_profile->profile_set(cur_profile, next);
+ mutex_unlock(&profile_lock);
+
+ if (!err)
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(platform_profile_cycle);
+
int platform_profile_register(struct platform_profile_handler *pprof)
{
int err;
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 2b73580c9f36..80a52a4e66dd 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -31,9 +31,14 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data,
* not defined without a warning. For instance if any of the properties
* from different GUID appear in a property list of another, it will be
* accepted by the kernel. Firmware validation tools should catch these.
+ *
+ * References:
+ *
+ * [1] UEFI DSD Guide.
+ * https://github.com/UEFI/DSD-Guide/blob/main/src/dsd-guide.adoc
*/
static const guid_t prp_guids[] = {
- /* ACPI _DSD device properties GUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */
+ /* ACPI _DSD device properties GUID [1]: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */
GUID_INIT(0xdaffd814, 0x6eba, 0x4d8c,
0x8a, 0x91, 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01),
/* Hotplug in D3 GUID: 6211e2c0-58a3-4af3-90e1-927a4e0c55a4 */
@@ -53,12 +58,12 @@ static const guid_t prp_guids[] = {
0xa5, 0x61, 0x99, 0xa5, 0x18, 0x97, 0x62, 0xd0),
};
-/* ACPI _DSD data subnodes GUID: dbb8e3e6-5886-4ba6-8795-1319f52a966b */
+/* ACPI _DSD data subnodes GUID [1]: dbb8e3e6-5886-4ba6-8795-1319f52a966b */
static const guid_t ads_guid =
GUID_INIT(0xdbb8e3e6, 0x5886, 0x4ba6,
0x87, 0x95, 0x13, 0x19, 0xf5, 0x2a, 0x96, 0x6b);
-/* ACPI _DSD data buffer GUID: edb12dd0-363d-4085-a3d2-49522ca160c4 */
+/* ACPI _DSD data buffer GUID [1]: edb12dd0-363d-4085-a3d2-49522ca160c4 */
static const guid_t buffer_prop_guid =
GUID_INIT(0xedb12dd0, 0x363d, 0x4085,
0xa3, 0xd2, 0x49, 0x52, 0x2c, 0xa1, 0x60, 0xc4);
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 59423fe9d0f2..b5bf8b81a050 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -518,6 +518,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
},
},
{
+ /* Asus Vivobook Pro N6506MV */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "N6506MV"),
+ },
+ },
+ {
/* LG Electronics 17U70P */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
@@ -534,6 +541,12 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
*/
static const struct dmi_system_id irq1_edge_low_force_override[] = {
{
+ /* XMG APEX 17 (M23) */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxBGxx"),
+ },
+ },
+ {
/* TongFang GMxRGxx/XMG CORE 15 (M22)/TUXEDO Stellaris 15 Gen4 AMD */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
@@ -630,6 +643,18 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
DMI_MATCH(DMI_BOARD_NAME, "X565"),
},
},
+ {
+ /* TongFang GXxHRXx/TUXEDO InfinityBook Pro Gen9 AMD */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GXxHRXx"),
+ },
+ },
+ {
+ /* TongFang GMxHGxx/TUXEDO Stellaris Slim Gen1 AMD */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"),
+ },
+ },
{ }
};
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 7c157bf92695..503773707e01 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -73,8 +73,7 @@ void acpi_unlock_hp_context(void)
void acpi_initialize_hp_context(struct acpi_device *adev,
struct acpi_hotplug_context *hp,
- int (*notify)(struct acpi_device *, u32),
- void (*uevent)(struct acpi_device *, u32))
+ acpi_hp_notify notify, acpi_hp_uevent uevent)
{
acpi_lock_hp_context();
hp->notify = notify;
@@ -428,7 +427,7 @@ void acpi_device_hotplug(struct acpi_device *adev, u32 src)
} else if (adev->flags.hotplug_notify) {
error = acpi_generic_hotplug_event(adev, src);
} else {
- int (*notify)(struct acpi_device *, u32);
+ acpi_hp_notify notify;
acpi_lock_hp_context();
notify = adev->hp ? adev->hp->notify : NULL;
@@ -1298,10 +1297,10 @@ const char *acpi_device_hid(struct acpi_device *device)
{
struct acpi_hardware_id *hid;
- if (list_empty(&device->pnp.ids))
+ hid = list_first_entry_or_null(&device->pnp.ids, struct acpi_hardware_id, list);
+ if (!hid)
return dummy_hid;
- hid = list_first_entry(&device->pnp.ids, struct acpi_hardware_id, list);
return hid->id;
}
EXPORT_SYMBOL(acpi_device_hid);
@@ -1581,12 +1580,13 @@ int acpi_iommu_fwspec_init(struct device *dev, u32 id,
struct fwnode_handle *fwnode,
const struct iommu_ops *ops)
{
- int ret = iommu_fwspec_init(dev, fwnode, ops);
+ int ret;
- if (!ret)
- ret = iommu_fwspec_add_ids(dev, &id, 1);
+ ret = iommu_fwspec_init(dev, fwnode, ops);
+ if (ret)
+ return ret;
- return ret;
+ return iommu_fwspec_add_ids(dev, &id, 1);
}
static inline const struct iommu_ops *acpi_iommu_fwspec_ops(struct device *dev)
@@ -1625,12 +1625,11 @@ static int acpi_iommu_configure_id(struct device *dev, const u32 *id_in)
if (!err && dev->bus)
err = iommu_probe_device(dev);
- /* Ignore all other errors apart from EPROBE_DEFER */
- if (err == -EPROBE_DEFER) {
+ if (err == -EPROBE_DEFER)
return err;
- } else if (err) {
+ if (err) {
dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
- return -ENODEV;
+ return err;
}
if (!acpi_iommu_fwspec_ops(dev))
return -ENODEV;
@@ -1671,16 +1670,12 @@ int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
acpi_arch_dma_setup(dev);
+ /* Ignore all other errors apart from EPROBE_DEFER */
ret = acpi_iommu_configure_id(dev, input_id);
if (ret == -EPROBE_DEFER)
return -EPROBE_DEFER;
- /*
- * Historically this routine doesn't fail driver probing due to errors
- * in acpi_iommu_configure_id()
- */
-
- arch_setup_dma_ops(dev, 0, U64_MAX, attr == DEV_DMA_COHERENT);
+ arch_setup_dma_ops(dev, attr == DEV_DMA_COHERENT);
return 0;
}
@@ -1843,7 +1838,8 @@ static void acpi_scan_dep_init(struct acpi_device *adev)
if (dep->honor_dep)
adev->flags.honor_deps = 1;
- adev->dep_unmet++;
+ if (!dep->met)
+ adev->dep_unmet++;
}
}
}
@@ -1961,7 +1957,7 @@ bool acpi_device_is_present(const struct acpi_device *adev)
bool acpi_device_is_enabled(const struct acpi_device *adev)
{
- return adev->status.present && adev->status.enabled;
+ return adev->status.enabled;
}
static bool acpi_scan_handler_matching(struct acpi_scan_handler *handler,
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 302dce0b2b50..d67881b50bca 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -662,14 +662,15 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz,
{
int result;
- tz->thermal_zone = thermal_zone_device_register_with_trips("acpitz",
- trip_table,
- trip_count,
- tz,
- &acpi_thermal_zone_ops,
- NULL,
- passive_delay,
- tz->polling_frequency * 100);
+ if (trip_count)
+ tz->thermal_zone = thermal_zone_device_register_with_trips(
+ "acpitz", trip_table, trip_count, tz,
+ &acpi_thermal_zone_ops, NULL, passive_delay,
+ tz->polling_frequency * 100);
+ else
+ tz->thermal_zone = thermal_tripless_zone_device_register(
+ "acpitz", tz, &acpi_thermal_zone_ops, NULL);
+
if (IS_ERR(tz->thermal_zone))
return PTR_ERR(tz->thermal_zone);
@@ -901,11 +902,8 @@ static int acpi_thermal_add(struct acpi_device *device)
trip++;
}
- if (trip == trip_table) {
+ if (trip == trip_table)
pr_warn(FW_BUG "No valid trip points!\n");
- result = -ENODEV;
- goto free_memory;
- }
result = acpi_thermal_register_thermal_zone(tz, trip_table,
trip - trip_table,
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 9fdcc620c652..2cc3821b2b16 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -499,6 +499,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
{
.callback = video_detect_force_native,
+ /* Lenovo Slim 7 16ARH7 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82UX"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
/* Lenovo ThinkPad X131e (3371 AMD version) */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/acpi/x86/Makefile b/drivers/acpi/x86/Makefile
new file mode 100644
index 000000000000..63c99509ed9d
--- /dev/null
+++ b/drivers/acpi/x86/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_ACPI) += acpi-x86.o
+acpi-x86-y += apple.o
+acpi-x86-y += cmos_rtc.o
+acpi-x86-$(CONFIG_PCI) += lpss.o
+acpi-x86-y += s2idle.o
+acpi-x86-y += utils.o
+
+obj-$(CONFIG_X86) += blacklist.o
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/x86/blacklist.c
index a558d24fb788..55214d0a12b1 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/x86/blacklist.c
@@ -17,7 +17,7 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
-#include "internal.h"
+#include "../internal.h"
#ifdef CONFIG_DMI
static const struct dmi_system_id acpi_rev_dmi_table[] __initconst;
diff --git a/drivers/acpi/acpi_cmos_rtc.c b/drivers/acpi/x86/cmos_rtc.c
index 9b55d1593d16..51643ff6fe5f 100644
--- a/drivers/acpi/acpi_cmos_rtc.c
+++ b/drivers/acpi/x86/cmos_rtc.c
@@ -15,7 +15,7 @@
#include <linux/module.h>
#include <linux/mc146818rtc.h>
-#include "internal.h"
+#include "../internal.h"
static const struct acpi_device_id acpi_cmos_rtc_ids[] = {
{ "PNP0B00" },
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/x86/lpss.c
index 04e273167e92..148e29c2c526 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/x86/lpss.c
@@ -25,7 +25,7 @@
#include <linux/suspend.h>
#include <linux/delay.h>
-#include "internal.h"
+#include "../internal.h"
#ifdef CONFIG_X86_INTEL_LPSS
@@ -325,6 +325,7 @@ static const struct lpss_device_desc bsw_i2c_dev_desc = {
static const struct property_entry bsw_spi_properties[] = {
PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BSW_SSP),
+ PROPERTY_ENTRY_U32("num-cs", 2),
{ }
};
@@ -886,10 +887,8 @@ static int acpi_lpss_activate(struct device *dev)
if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE))
lpss_deassert_reset(pdata);
-#ifdef CONFIG_PM
if (pdata->dev_desc->flags & LPSS_SAVE_CTX_ONCE)
acpi_lpss_save_ctx(dev, pdata);
-#endif
return 0;
}
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index cd84af23f7ea..dd0b40b9bbe8 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -492,16 +492,14 @@ static int lps0_device_attach(struct acpi_device *adev,
unsigned int func_mask;
/*
- * Avoid evaluating the same _DSM function for two
- * different UUIDs and prioritize the MSFT one.
+ * Log a message if the _DSM function sets for two
+ * different UUIDs overlap.
*/
func_mask = lps0_dsm_func_mask & lps0_dsm_func_mask_microsoft;
- if (func_mask) {
+ if (func_mask)
acpi_handle_info(adev->handle,
"Duplicate LPS0 _DSM functions (mask: 0x%x)\n",
func_mask);
- lps0_dsm_func_mask &= ~func_mask;
- }
}
}
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index 90c3d2eab9e9..7dca73417e2b 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -101,6 +101,15 @@ static const struct override_status_id override_status_ids[] = {
}),
/*
+ * The Dell XPS 15 9550 has a SMO8110 accelerometer /
+ * HDD freefall sensor which is wrongly marked as not present.
+ */
+ PRESENT_ENTRY_HID("SMO8810", "1", SKYLAKE, {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS 15 9550"),
+ }),
+
+ /*
* The GPD win BIOS dated 20170221 has disabled the accelerometer, the
* drivers sometimes cause crashes under Windows and this is how the
* manufacturer has solved this :| The DMI match may not seem unique,
@@ -260,9 +269,10 @@ bool force_storage_d3(void)
#define ACPI_QUIRK_SKIP_I2C_CLIENTS BIT(0)
#define ACPI_QUIRK_UART1_SKIP BIT(1)
#define ACPI_QUIRK_UART1_TTY_UART2_SKIP BIT(2)
-#define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY BIT(3)
-#define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY BIT(4)
-#define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS BIT(5)
+#define ACPI_QUIRK_PNP_UART1_SKIP BIT(3)
+#define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY BIT(4)
+#define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY BIT(5)
+#define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS BIT(6)
static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
/*
@@ -342,6 +352,7 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_PNP_UART1_SKIP |
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
},
{
@@ -440,14 +451,18 @@ static int acpi_dmi_skip_serdev_enumeration(struct device *controller_parent, bo
if (ret)
return 0;
- /* to not match on PNP enumerated debug UARTs */
- if (!dev_is_platform(controller_parent))
- return 0;
-
dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
if (dmi_id)
quirks = (unsigned long)dmi_id->driver_data;
+ if (!dev_is_platform(controller_parent)) {
+ /* PNP enumerated UARTs */
+ if ((quirks & ACPI_QUIRK_PNP_UART1_SKIP) && uid == 1)
+ *skip = true;
+
+ return 0;
+ }
+
if ((quirks & ACPI_QUIRK_UART1_SKIP) && uid == 1)
*skip = true;
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index a24c152bfaac..aba3aa95b224 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -488,28 +488,31 @@ static int __init amba_stub_drv_init(void)
* waiting on amba_match(). So, register a stub driver to make sure
* amba_match() is called even if no amba driver has been registered.
*/
- return amba_driver_register(&amba_proxy_drv);
+ return __amba_driver_register(&amba_proxy_drv, NULL);
}
late_initcall_sync(amba_stub_drv_init);
/**
- * amba_driver_register - register an AMBA device driver
+ * __amba_driver_register - register an AMBA device driver
* @drv: amba device driver structure
+ * @owner: owning module/driver
*
* Register an AMBA device driver with the Linux device model
* core. If devices pre-exist, the drivers probe function will
* be called.
*/
-int amba_driver_register(struct amba_driver *drv)
+int __amba_driver_register(struct amba_driver *drv,
+ struct module *owner)
{
if (!drv->probe)
return -EINVAL;
+ drv->drv.owner = owner;
drv->drv.bus = &amba_bustype;
return driver_register(&drv->drv);
}
-EXPORT_SYMBOL(amba_driver_register);
+EXPORT_SYMBOL(__amba_driver_register);
/**
* amba_driver_unregister - remove an AMBA device driver
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index bad28cf42010..dd6923d37931 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1708,8 +1708,10 @@ static size_t binder_get_object(struct binder_proc *proc,
size_t object_size = 0;
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
- if (offset > buffer->data_size || read_size < sizeof(*hdr))
+ if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
+ !IS_ALIGNED(offset, sizeof(u32)))
return 0;
+
if (u) {
if (copy_from_user(object, u + offset, read_size))
return 0;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 928ec93c6b45..b595494ab9b4 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -556,7 +556,7 @@ comment "PATA SFF controllers with BMDMA"
config PATA_ALI
tristate "ALi PATA support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select PATA_TIMINGS
help
This option enables support for the ALi ATA interfaces
@@ -566,7 +566,7 @@ config PATA_ALI
config PATA_AMD
tristate "AMD/NVidia PATA support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select PATA_TIMINGS
help
This option enables support for the AMD and NVidia PATA
@@ -584,7 +584,7 @@ config PATA_ARASAN_CF
config PATA_ARTOP
tristate "ARTOP 6210/6260 PATA support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This option enables support for ARTOP PATA controllers.
@@ -611,7 +611,7 @@ config PATA_ATP867X
config PATA_CMD64X
tristate "CMD64x PATA support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select PATA_TIMINGS
help
This option enables support for the CMD64x series chips
@@ -658,7 +658,7 @@ config PATA_CS5536
config PATA_CYPRESS
tristate "Cypress CY82C693 PATA support (Very Experimental)"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select PATA_TIMINGS
help
This option enables support for the Cypress/Contaq CY82C693
@@ -706,7 +706,7 @@ config PATA_HPT366
config PATA_HPT37X
tristate "HPT 370/370A/371/372/374/302 PATA support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This option enables support for the majority of the later HPT
PATA controllers via the new ATA layer.
@@ -715,7 +715,7 @@ config PATA_HPT37X
config PATA_HPT3X2N
tristate "HPT 371N/372N/302N PATA support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This option enables support for the N variant HPT PATA
controllers via the new ATA layer.
@@ -818,7 +818,7 @@ config PATA_MPC52xx
config PATA_NETCELL
tristate "NETCELL Revolution RAID support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This option enables support for the Netcell Revolution RAID
PATA controller.
@@ -854,7 +854,7 @@ config PATA_OLDPIIX
config PATA_OPTIDMA
tristate "OPTI FireStar PATA support (Very Experimental)"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This option enables DMA/PIO support for the later OPTi
controllers found on some old motherboards and in some
@@ -864,7 +864,7 @@ config PATA_OPTIDMA
config PATA_PDC2027X
tristate "Promise PATA 2027x support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This option enables support for Promise PATA pdc20268 to pdc20277 host adapters.
@@ -872,7 +872,7 @@ config PATA_PDC2027X
config PATA_PDC_OLD
tristate "Older Promise PATA controller support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This option enables support for the Promise 20246, 20262, 20263,
20265 and 20267 adapters.
@@ -900,7 +900,7 @@ config PATA_RDC
config PATA_SC1200
tristate "SC1200 PATA support"
- depends on PCI && (X86_32 || COMPILE_TEST)
+ depends on PCI && (X86_32 || COMPILE_TEST) && HAS_IOPORT
help
This option enables support for the NatSemi/AMD SC1200 SoC
companion chip used with the Geode processor family.
@@ -918,7 +918,7 @@ config PATA_SCH
config PATA_SERVERWORKS
tristate "SERVERWORKS OSB4/CSB5/CSB6/HT1000 PATA support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This option enables support for the Serverworks OSB4/CSB5/CSB6 and
HT1000 PATA controllers, via the new ATA layer.
@@ -1182,7 +1182,7 @@ config ATA_GENERIC
config PATA_LEGACY
tristate "Legacy ISA PATA support (Experimental)"
- depends on (ISA || PCI)
+ depends on (ISA || PCI) && HAS_IOPORT
select PATA_TIMINGS
help
This option enables support for ISA/VLB/PCI bus legacy PATA
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 562302e2e57c..6548f10e61d9 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -666,6 +666,87 @@ static int mobile_lpm_policy = -1;
module_param(mobile_lpm_policy, int, 0644);
MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
+static char *ahci_mask_port_map;
+module_param_named(mask_port_map, ahci_mask_port_map, charp, 0444);
+MODULE_PARM_DESC(mask_port_map,
+ "32-bits port map masks to ignore controllers ports. "
+ "Valid values are: "
+ "\"<mask>\" to apply the same mask to all AHCI controller "
+ "devices, and \"<pci_dev>=<mask>,<pci_dev>=<mask>,...\" to "
+ "specify different masks for the controllers specified, "
+ "where <pci_dev> is the PCI ID of an AHCI controller in the "
+ "form \"domain:bus:dev.func\"");
+
+static void ahci_apply_port_map_mask(struct device *dev,
+ struct ahci_host_priv *hpriv, char *mask_s)
+{
+ unsigned int mask;
+
+ if (kstrtouint(mask_s, 0, &mask)) {
+ dev_err(dev, "Invalid port map mask\n");
+ return;
+ }
+
+ hpriv->mask_port_map = mask;
+}
+
+static void ahci_get_port_map_mask(struct device *dev,
+ struct ahci_host_priv *hpriv)
+{
+ char *param, *end, *str, *mask_s;
+ char *name;
+
+ if (!strlen(ahci_mask_port_map))
+ return;
+
+ str = kstrdup(ahci_mask_port_map, GFP_KERNEL);
+ if (!str)
+ return;
+
+ /* Handle single mask case */
+ if (!strchr(str, '=')) {
+ ahci_apply_port_map_mask(dev, hpriv, str);
+ goto free;
+ }
+
+ /*
+ * Mask list case: parse the parameter to apply the mask only if
+ * the device name matches.
+ */
+ param = str;
+ end = param + strlen(param);
+ while (param && param < end && *param) {
+ name = param;
+ param = strchr(name, '=');
+ if (!param)
+ break;
+
+ *param = '\0';
+ param++;
+ if (param >= end)
+ break;
+
+ if (strcmp(dev_name(dev), name) != 0) {
+ param = strchr(param, ',');
+ if (param)
+ param++;
+ continue;
+ }
+
+ mask_s = param;
+ param = strchr(mask_s, ',');
+ if (param) {
+ *param = '\0';
+ param++;
+ }
+
+ ahci_apply_port_map_mask(dev, hpriv, mask_s);
+ }
+
+free:
+ kfree(str);
+}
+
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
struct ahci_host_priv *hpriv)
{
@@ -688,6 +769,10 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
"Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
}
+ /* Handle port map masks passed as module parameter. */
+ if (ahci_mask_port_map)
+ ahci_get_port_map_mask(&pdev->dev, hpriv);
+
ahci_save_initial_config(&pdev->dev, hpriv);
}
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 344c87210d8f..8f40f75ba08c 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -397,7 +397,7 @@ extern const struct attribute_group *ahci_sdev_groups[];
.sdev_groups = ahci_sdev_groups, \
.change_queue_depth = ata_scsi_change_queue_depth, \
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
- .slave_configure = ata_scsi_slave_config
+ .device_configure = ata_scsi_device_configure
extern struct ata_port_operations ahci_ops;
extern struct ata_port_operations ahci_platform_ops;
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index d4a626f87963..79a8b0aa37bf 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -30,7 +30,6 @@
#define ST_AHCI_OOBR_CIMAX_SHIFT 0
struct st_ahci_drv_data {
- struct platform_device *ahci;
struct reset_control *pwr;
struct reset_control *sw_rst;
struct reset_control *pwr_rst;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index be3412cdb22e..4f35aab81a0a 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1480,19 +1480,19 @@ static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
}
/**
- * ata_exec_internal_sg - execute libata internal command
+ * ata_exec_internal - execute libata internal command
* @dev: Device to which the command is sent
* @tf: Taskfile registers for the command and the result
* @cdb: CDB for packet command
* @dma_dir: Data transfer direction of the command
- * @sgl: sg list for the data buffer of the command
- * @n_elem: Number of sg entries
+ * @buf: Data buffer of the command
+ * @buflen: Length of data buffer
* @timeout: Timeout in msecs (0 for default)
*
- * Executes libata internal command with timeout. @tf contains
- * command on entry and result on return. Timeout and error
- * conditions are reported via return value. No recovery action
- * is taken after a command times out. It's caller's duty to
+ * Executes libata internal command with timeout. @tf contains
+ * the command on entry and the result on return. Timeout and error
+ * conditions are reported via the return value. No recovery action
+ * is taken after a command times out. It is the caller's duty to
* clean up after timeout.
*
* LOCKING:
@@ -1501,34 +1501,38 @@ static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
* RETURNS:
* Zero on success, AC_ERR_* mask on failure
*/
-static unsigned ata_exec_internal_sg(struct ata_device *dev,
- struct ata_taskfile *tf, const u8 *cdb,
- int dma_dir, struct scatterlist *sgl,
- unsigned int n_elem, unsigned int timeout)
+unsigned int ata_exec_internal(struct ata_device *dev, struct ata_taskfile *tf,
+ const u8 *cdb, enum dma_data_direction dma_dir,
+ void *buf, unsigned int buflen,
+ unsigned int timeout)
{
struct ata_link *link = dev->link;
struct ata_port *ap = link->ap;
u8 command = tf->command;
- int auto_timeout = 0;
struct ata_queued_cmd *qc;
+ struct scatterlist sgl;
unsigned int preempted_tag;
u32 preempted_sactive;
u64 preempted_qc_active;
int preempted_nr_active_links;
+ bool auto_timeout = false;
DECLARE_COMPLETION_ONSTACK(wait);
unsigned long flags;
unsigned int err_mask;
int rc;
+ if (WARN_ON(dma_dir != DMA_NONE && !buf))
+ return AC_ERR_INVALID;
+
spin_lock_irqsave(ap->lock, flags);
- /* no internal command while frozen */
+ /* No internal command while frozen */
if (ata_port_is_frozen(ap)) {
spin_unlock_irqrestore(ap->lock, flags);
return AC_ERR_SYSTEM;
}
- /* initialize internal qc */
+ /* Initialize internal qc */
qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
qc->tag = ATA_TAG_INTERNAL;
@@ -1547,12 +1551,12 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev,
ap->qc_active = 0;
ap->nr_active_links = 0;
- /* prepare & issue qc */
+ /* Prepare and issue qc */
qc->tf = *tf;
if (cdb)
memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
- /* some SATA bridges need us to indicate data xfer direction */
+ /* Some SATA bridges need us to indicate data xfer direction */
if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
dma_dir == DMA_FROM_DEVICE)
qc->tf.feature |= ATAPI_DMADIR;
@@ -1560,13 +1564,8 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev,
qc->flags |= ATA_QCFLAG_RESULT_TF;
qc->dma_dir = dma_dir;
if (dma_dir != DMA_NONE) {
- unsigned int i, buflen = 0;
- struct scatterlist *sg;
-
- for_each_sg(sgl, sg, n_elem, i)
- buflen += sg->length;
-
- ata_sg_init(qc, sgl, n_elem);
+ sg_init_one(&sgl, buf, buflen);
+ ata_sg_init(qc, &sgl, 1);
qc->nbytes = buflen;
}
@@ -1578,11 +1577,11 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev,
spin_unlock_irqrestore(ap->lock, flags);
if (!timeout) {
- if (ata_probe_timeout)
+ if (ata_probe_timeout) {
timeout = ata_probe_timeout * 1000;
- else {
+ } else {
timeout = ata_internal_cmd_timeout(dev, command);
- auto_timeout = 1;
+ auto_timeout = true;
}
}
@@ -1595,30 +1594,25 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev,
ata_sff_flush_pio_task(ap);
if (!rc) {
- spin_lock_irqsave(ap->lock, flags);
-
- /* We're racing with irq here. If we lose, the
- * following test prevents us from completing the qc
- * twice. If we win, the port is frozen and will be
- * cleaned up by ->post_internal_cmd().
+ /*
+ * We are racing with irq here. If we lose, the following test
+ * prevents us from completing the qc twice. If we win, the port
+ * is frozen and will be cleaned up by ->post_internal_cmd().
*/
+ spin_lock_irqsave(ap->lock, flags);
if (qc->flags & ATA_QCFLAG_ACTIVE) {
qc->err_mask |= AC_ERR_TIMEOUT;
-
ata_port_freeze(ap);
-
ata_dev_warn(dev, "qc timeout after %u msecs (cmd 0x%x)\n",
timeout, command);
}
-
spin_unlock_irqrestore(ap->lock, flags);
}
- /* do post_internal_cmd */
if (ap->ops->post_internal_cmd)
ap->ops->post_internal_cmd(qc);
- /* perform minimal error analysis */
+ /* Perform minimal error analysis */
if (qc->flags & ATA_QCFLAG_EH) {
if (qc->result_tf.status & (ATA_ERR | ATA_DF))
qc->err_mask |= AC_ERR_DEV;
@@ -1632,7 +1626,7 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev,
qc->result_tf.status |= ATA_SENSE;
}
- /* finish up */
+ /* Finish up */
spin_lock_irqsave(ap->lock, flags);
*tf = qc->result_tf;
@@ -1653,44 +1647,6 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev,
}
/**
- * ata_exec_internal - execute libata internal command
- * @dev: Device to which the command is sent
- * @tf: Taskfile registers for the command and the result
- * @cdb: CDB for packet command
- * @dma_dir: Data transfer direction of the command
- * @buf: Data buffer of the command
- * @buflen: Length of data buffer
- * @timeout: Timeout in msecs (0 for default)
- *
- * Wrapper around ata_exec_internal_sg() which takes simple
- * buffer instead of sg list.
- *
- * LOCKING:
- * None. Should be called with kernel context, might sleep.
- *
- * RETURNS:
- * Zero on success, AC_ERR_* mask on failure
- */
-unsigned ata_exec_internal(struct ata_device *dev,
- struct ata_taskfile *tf, const u8 *cdb,
- int dma_dir, void *buf, unsigned int buflen,
- unsigned int timeout)
-{
- struct scatterlist *psg = NULL, sg;
- unsigned int n_elem = 0;
-
- if (dma_dir != DMA_NONE) {
- WARN_ON(!buf);
- sg_init_one(&sg, buf, buflen);
- psg = &sg;
- n_elem++;
- }
-
- return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
- timeout);
-}
-
-/**
* ata_pio_need_iordy - check if iordy needed
* @adev: ATA device
*
@@ -2539,7 +2495,7 @@ static void ata_dev_config_cdl(struct ata_device *dev)
bool cdl_enabled;
u64 val;
- if (ata_id_major_version(dev->id) < 12)
+ if (ata_id_major_version(dev->id) < 11)
goto not_supported;
if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE) ||
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index b0d6e69c4a5b..214b935c2ced 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -712,8 +712,10 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
ehc->saved_ncq_enabled |= 1 << devno;
/* If we are resuming, wake up the device */
- if (ap->pflags & ATA_PFLAG_RESUMING)
+ if (ap->pflags & ATA_PFLAG_RESUMING) {
+ dev->flags |= ATA_DFLAG_RESUMING;
ehc->i.dev_action[devno] |= ATA_EH_SET_ACTIVE;
+ }
}
}
@@ -3169,6 +3171,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
return 0;
err:
+ dev->flags &= ~ATA_DFLAG_RESUMING;
*r_failed_dev = dev;
return rc;
}
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 0fb1934875f2..9e047bf912b1 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -848,80 +848,143 @@ DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
ata_scsi_lpm_show, ata_scsi_lpm_store);
EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
-static ssize_t ata_ncq_prio_supported_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
+/**
+ * ata_ncq_prio_supported - Check if device supports NCQ Priority
+ * @ap: ATA port of the target device
+ * @sdev: SCSI device
+ * @supported: Address of a boolean to store the result
+ *
+ * Helper to check if device supports NCQ Priority feature.
+ *
+ * Context: Any context. Takes and releases @ap->lock.
+ *
+ * Return:
+ * * %0 - OK. Status is stored into @supported
+ * * %-ENODEV - Failed to find the ATA device
+ */
+int ata_ncq_prio_supported(struct ata_port *ap, struct scsi_device *sdev,
+ bool *supported)
{
- struct scsi_device *sdev = to_scsi_device(device);
- struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev;
- bool ncq_prio_supported;
+ unsigned long flags;
int rc = 0;
- spin_lock_irq(ap->lock);
+ spin_lock_irqsave(ap->lock, flags);
dev = ata_scsi_find_dev(ap, sdev);
if (!dev)
rc = -ENODEV;
else
- ncq_prio_supported = dev->flags & ATA_DFLAG_NCQ_PRIO;
- spin_unlock_irq(ap->lock);
+ *supported = dev->flags & ATA_DFLAG_NCQ_PRIO;
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ata_ncq_prio_supported);
+
+static ssize_t ata_ncq_prio_supported_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(device);
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ bool supported;
+ int rc;
+
+ rc = ata_ncq_prio_supported(ap, sdev, &supported);
+ if (rc)
+ return rc;
- return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_supported);
+ return sysfs_emit(buf, "%d\n", supported);
}
DEVICE_ATTR(ncq_prio_supported, S_IRUGO, ata_ncq_prio_supported_show, NULL);
EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_supported);
-static ssize_t ata_ncq_prio_enable_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
+/**
+ * ata_ncq_prio_enabled - Check if NCQ Priority is enabled
+ * @ap: ATA port of the target device
+ * @sdev: SCSI device
+ * @enabled: Address of a boolean to store the result
+ *
+ * Helper to check if NCQ Priority feature is enabled.
+ *
+ * Context: Any context. Takes and releases @ap->lock.
+ *
+ * Return:
+ * * %0 - OK. Status is stored into @enabled
+ * * %-ENODEV - Failed to find the ATA device
+ */
+int ata_ncq_prio_enabled(struct ata_port *ap, struct scsi_device *sdev,
+ bool *enabled)
{
- struct scsi_device *sdev = to_scsi_device(device);
- struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev;
- bool ncq_prio_enable;
+ unsigned long flags;
int rc = 0;
- spin_lock_irq(ap->lock);
+ spin_lock_irqsave(ap->lock, flags);
dev = ata_scsi_find_dev(ap, sdev);
if (!dev)
rc = -ENODEV;
else
- ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED;
- spin_unlock_irq(ap->lock);
+ *enabled = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED;
+ spin_unlock_irqrestore(ap->lock, flags);
- return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_enable);
+ return rc;
}
+EXPORT_SYMBOL_GPL(ata_ncq_prio_enabled);
-static ssize_t ata_ncq_prio_enable_store(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static ssize_t ata_ncq_prio_enable_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
{
struct scsi_device *sdev = to_scsi_device(device);
- struct ata_port *ap;
- struct ata_device *dev;
- long int input;
- int rc = 0;
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ bool enabled;
+ int rc;
- rc = kstrtol(buf, 10, &input);
+ rc = ata_ncq_prio_enabled(ap, sdev, &enabled);
if (rc)
return rc;
- if ((input < 0) || (input > 1))
- return -EINVAL;
- ap = ata_shost_to_port(sdev->host);
- dev = ata_scsi_find_dev(ap, sdev);
- if (unlikely(!dev))
- return -ENODEV;
+ return sysfs_emit(buf, "%d\n", enabled);
+}
+
+/**
+ * ata_ncq_prio_enable - Enable/disable NCQ Priority
+ * @ap: ATA port of the target device
+ * @sdev: SCSI device
+ * @enable: true - enable NCQ Priority, false - disable NCQ Priority
+ *
+ * Helper to enable/disable NCQ Priority feature.
+ *
+ * Context: Any context. Takes and releases @ap->lock.
+ *
+ * Return:
+ * * %0 - OK. Status is stored into @enabled
+ * * %-ENODEV - Failed to find the ATA device
+ * * %-EINVAL - NCQ Priority is not supported or CDL is enabled
+ */
+int ata_ncq_prio_enable(struct ata_port *ap, struct scsi_device *sdev,
+ bool enable)
+{
+ struct ata_device *dev;
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(ap->lock, flags);
- spin_lock_irq(ap->lock);
+ dev = ata_scsi_find_dev(ap, sdev);
+ if (!dev) {
+ rc = -ENODEV;
+ goto unlock;
+ }
if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) {
rc = -EINVAL;
goto unlock;
}
- if (input) {
+ if (enable) {
if (dev->flags & ATA_DFLAG_CDL_ENABLED) {
ata_dev_err(dev,
"CDL must be disabled to enable NCQ priority\n");
@@ -934,9 +997,30 @@ static ssize_t ata_ncq_prio_enable_store(struct device *device,
}
unlock:
- spin_unlock_irq(ap->lock);
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ata_ncq_prio_enable);
+
+static ssize_t ata_ncq_prio_enable_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct scsi_device *sdev = to_scsi_device(device);
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ bool enable;
+ int rc;
+
+ rc = kstrtobool(buf, &enable);
+ if (rc)
+ return rc;
+
+ rc = ata_ncq_prio_enable(ap, sdev, enable);
+ if (rc)
+ return rc;
- return rc ? rc : len;
+ return len;
}
DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR,
@@ -1170,21 +1254,24 @@ void ata_sas_tport_delete(struct ata_port *ap)
EXPORT_SYMBOL_GPL(ata_sas_tport_delete);
/**
- * ata_sas_slave_configure - Default slave_config routine for libata devices
+ * ata_sas_device_configure - Default device_configure routine for libata
+ * devices
* @sdev: SCSI device to configure
+ * @lim: queue limits
* @ap: ATA port to which SCSI device is attached
*
* RETURNS:
* Zero.
*/
-int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
+int ata_sas_device_configure(struct scsi_device *sdev, struct queue_limits *lim,
+ struct ata_port *ap)
{
ata_scsi_sdev_config(sdev);
- return ata_scsi_dev_config(sdev, ap->link.device);
+ return ata_scsi_dev_config(sdev, lim, ap->link.device);
}
-EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
+EXPORT_SYMBOL_GPL(ata_sas_device_configure);
/**
* ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 0a0f483124c3..cdf29b178ddc 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1021,7 +1021,8 @@ bool ata_scsi_dma_need_drain(struct request *rq)
}
EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain);
-int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
+int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
+ struct ata_device *dev)
{
struct request_queue *q = sdev->request_queue;
int depth = 1;
@@ -1031,7 +1032,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
/* configure max sectors */
dev->max_sectors = min(dev->max_sectors, sdev->host->max_sectors);
- blk_queue_max_hw_sectors(q, dev->max_sectors);
+ lim->max_hw_sectors = dev->max_sectors;
if (dev->class == ATA_DEV_ATAPI) {
sdev->sector_size = ATA_SECT_SIZE;
@@ -1040,7 +1041,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1);
/* make room for appending the drain */
- blk_queue_max_segments(q, queue_max_segments(q) - 1);
+ lim->max_segments--;
sdev->dma_drain_len = ATAPI_MAX_DRAIN;
sdev->dma_drain_buf = kmalloc(sdev->dma_drain_len, GFP_NOIO);
@@ -1077,7 +1078,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
"sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
sdev->sector_size);
- blk_queue_update_dma_alignment(q, sdev->sector_size - 1);
+ lim->dma_alignment = sdev->sector_size - 1;
if (dev->flags & ATA_DFLAG_AN)
set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
@@ -1131,8 +1132,9 @@ int ata_scsi_slave_alloc(struct scsi_device *sdev)
EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc);
/**
- * ata_scsi_slave_config - Set SCSI device attributes
+ * ata_scsi_device_configure - Set SCSI device attributes
* @sdev: SCSI device to examine
+ * @lim: queue limits
*
* This is called before we actually start reading
* and writing to the device, to configure certain
@@ -1142,17 +1144,18 @@ EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc);
* Defined by SCSI layer. We don't really care.
*/
-int ata_scsi_slave_config(struct scsi_device *sdev)
+int ata_scsi_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
if (dev)
- return ata_scsi_dev_config(sdev, dev);
+ return ata_scsi_dev_config(sdev, lim, dev);
return 0;
}
-EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
+EXPORT_SYMBOL_GPL(ata_scsi_device_configure);
/**
* ata_scsi_slave_destroy - SCSI device is about to be destroyed
@@ -4730,6 +4733,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
struct ata_link *link;
struct ata_device *dev;
unsigned long flags;
+ bool do_resume;
int ret = 0;
mutex_lock(&ap->scsi_scan_mutex);
@@ -4744,25 +4748,34 @@ void ata_scsi_dev_rescan(struct work_struct *work)
* bail out.
*/
if (ap->pflags & ATA_PFLAG_SUSPENDED)
- goto unlock;
+ goto unlock_ap;
if (!sdev)
continue;
if (scsi_device_get(sdev))
continue;
+ do_resume = dev->flags & ATA_DFLAG_RESUMING;
+
spin_unlock_irqrestore(ap->lock, flags);
+ if (do_resume) {
+ ret = scsi_resume_device(sdev);
+ if (ret == -EWOULDBLOCK)
+ goto unlock_scan;
+ dev->flags &= ~ATA_DFLAG_RESUMING;
+ }
ret = scsi_rescan_device(sdev);
scsi_device_put(sdev);
spin_lock_irqsave(ap->lock, flags);
if (ret)
- goto unlock;
+ goto unlock_ap;
}
}
-unlock:
+unlock_ap:
spin_unlock_irqrestore(ap->lock, flags);
+unlock_scan:
mutex_unlock(&ap->scsi_scan_mutex);
/* Reschedule with a delay if scsi_rescan_device() returned an error */
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 95a19c4ef2a1..250f7dae05fd 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -3032,6 +3032,7 @@ EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);
*/
int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
{
+#ifdef CONFIG_HAS_IOPORT
unsigned long bmdma = pci_resource_start(pdev, 4);
u8 simplex;
@@ -3044,6 +3045,9 @@ int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
if (simplex & 0x80)
return -EOPNOTSUPP;
return 0;
+#else
+ return -ENOENT;
+#endif /* CONFIG_HAS_IOPORT */
}
EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 5c685bb1939e..38ce13b55474 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -50,10 +50,10 @@ extern int ata_build_rw_tf(struct ata_queued_cmd *qc, u64 block, u32 n_block,
unsigned int tf_flags, int dld, int class);
extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
struct ata_device *dev);
-extern unsigned ata_exec_internal(struct ata_device *dev,
- struct ata_taskfile *tf, const u8 *cdb,
- int dma_dir, void *buf, unsigned int buflen,
- unsigned int timeout);
+unsigned int ata_exec_internal(struct ata_device *dev, struct ata_taskfile *tf,
+ const u8 *cdb, enum dma_data_direction dma_dir,
+ void *buf, unsigned int buflen,
+ unsigned int timeout);
extern int ata_wait_ready(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link));
extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
@@ -131,7 +131,8 @@ extern void ata_scsi_dev_rescan(struct work_struct *work);
extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
unsigned int id, u64 lun);
void ata_scsi_sdev_config(struct scsi_device *sdev);
-int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev);
+int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
+ struct ata_device *dev);
int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev);
/* libata-eh.c */
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 38795508c2e9..027cf67101ef 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -151,12 +151,6 @@ static int cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (!host)
return -ENOMEM;
- /* Perform set up for DMA */
- if (pci_enable_device_io(pdev)) {
- dev_err(&pdev->dev, "unable to configure BAR2.\n");
- return -ENODEV;
- }
-
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
dev_err(&pdev->dev, "unable to configure DMA mask.\n");
return -ENODEV;
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 448a511cbc17..e7ac142c2423 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -173,8 +173,6 @@ static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
static struct legacy_probe probe_list[NR_HOST];
static struct legacy_data legacy_data[NR_HOST];
static struct ata_host *legacy_host[NR_HOST];
-static int nr_legacy_host;
-
/**
* legacy_probe_add - Add interface to probe list
@@ -1276,9 +1274,11 @@ static __exit void legacy_exit(void)
{
int i;
- for (i = 0; i < nr_legacy_host; i++) {
+ for (i = 0; i < NR_HOST; i++) {
struct legacy_data *ld = &legacy_data[i];
- ata_host_detach(legacy_host[i]);
+
+ if (legacy_host[i])
+ ata_host_detach(legacy_host[i]);
platform_device_unregister(ld->platform_dev);
}
}
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 4ac854f6b057..817838e2f70e 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -796,7 +796,8 @@ static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
/* Hook the standard slave config to fixup some HW related alignment
* restrictions
*/
-static int pata_macio_slave_config(struct scsi_device *sdev)
+static int pata_macio_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct pata_macio_priv *priv = ap->private_data;
@@ -805,7 +806,7 @@ static int pata_macio_slave_config(struct scsi_device *sdev)
int rc;
/* First call original */
- rc = ata_scsi_slave_config(sdev);
+ rc = ata_scsi_device_configure(sdev, lim);
if (rc)
return rc;
@@ -814,7 +815,7 @@ static int pata_macio_slave_config(struct scsi_device *sdev)
/* OHare has issues with non cache aligned DMA on some chipsets */
if (priv->kind == controller_ohare) {
- blk_queue_update_dma_alignment(sdev->request_queue, 31);
+ lim->dma_alignment = 31;
blk_queue_update_dma_pad(sdev->request_queue, 31);
/* Tell the world about it */
@@ -829,7 +830,7 @@ static int pata_macio_slave_config(struct scsi_device *sdev)
/* Shasta and K2 seem to have "issues" with reads ... */
if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) {
/* Allright these are bad, apply restrictions */
- blk_queue_update_dma_alignment(sdev->request_queue, 15);
+ lim->dma_alignment = 15;
blk_queue_update_dma_pad(sdev->request_queue, 15);
/* We enable MWI and hack cache line size directly here, this
@@ -918,7 +919,7 @@ static const struct scsi_host_template pata_macio_sht = {
* use 64K minus 256
*/
.max_segment_size = MAX_DBDMA_SEG,
- .slave_configure = pata_macio_slave_config,
+ .device_configure = pata_macio_device_configure,
.sdev_groups = ata_common_sdev_groups,
.can_queue = ATA_DEF_QUEUE,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
@@ -1371,9 +1372,6 @@ static struct pci_driver pata_macio_pci_driver = {
.suspend = pata_macio_pci_suspend,
.resume = pata_macio_pci_resume,
#endif
- .driver = {
- .owner = THIS_MODULE,
- },
};
MODULE_DEVICE_TABLE(pci, pata_macio_pci_match);
diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
index 400b22ee99c3..4c270999ba3c 100644
--- a/drivers/ata/sata_gemini.c
+++ b/drivers/ata/sata_gemini.c
@@ -200,7 +200,10 @@ int gemini_sata_start_bridge(struct sata_gemini *sg, unsigned int bridge)
pclk = sg->sata0_pclk;
else
pclk = sg->sata1_pclk;
- clk_enable(pclk);
+ ret = clk_enable(pclk);
+ if (ret)
+ return ret;
+
msleep(10);
/* Do not keep clocking a bridge that is not online */
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index e82786c63fbd..05c905827dc5 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -673,7 +673,7 @@ static const struct scsi_host_template mv6_sht = {
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
- .slave_configure = ata_scsi_slave_config
+ .device_configure = ata_scsi_device_configure
};
static struct ata_port_operations mv5_ops = {
@@ -787,37 +787,6 @@ static const struct ata_port_info mv_port_info[] = {
},
};
-static const struct pci_device_id mv_pci_tbl[] = {
- { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
- { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
- { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
- { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
- /* RocketRAID 1720/174x have different identifiers */
- { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
- { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
- { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
-
- { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
- { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
- { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
- { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
- { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
-
- { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
-
- /* Adaptec 1430SA */
- { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
-
- /* Marvell 7042 support */
- { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
-
- /* Highpoint RocketRAID PCIe series */
- { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
- { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
-
- { } /* terminate list */
-};
-
static const struct mv_hw_ops mv5xxx_ops = {
.phy_errata = mv5_phy_errata,
.enable_leds = mv5_enable_leds,
@@ -4303,6 +4272,36 @@ static int mv_pci_init_one(struct pci_dev *pdev,
static int mv_pci_device_resume(struct pci_dev *pdev);
#endif
+static const struct pci_device_id mv_pci_tbl[] = {
+ { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
+ { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
+ { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
+ { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
+ /* RocketRAID 1720/174x have different identifiers */
+ { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
+ { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
+ { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
+
+ { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
+ { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
+ { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
+ { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
+ { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
+
+ { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
+
+ /* Adaptec 1430SA */
+ { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
+
+ /* Marvell 7042 support */
+ { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
+
+ /* Highpoint RocketRAID PCIe series */
+ { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
+ { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
+
+ { } /* terminate list */
+};
static struct pci_driver mv_pci_driver = {
.name = DRV_NAME,
@@ -4315,6 +4314,7 @@ static struct pci_driver mv_pci_driver = {
#endif
};
+MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
/**
* mv_print_info - Dump key info to kernel log for perusal.
@@ -4487,7 +4487,6 @@ static void __exit mv_exit(void)
MODULE_AUTHOR("Brett Russ");
MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 0a0cee755bde..36d99043ef50 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -296,7 +296,8 @@ static void nv_nf2_freeze(struct ata_port *ap);
static void nv_nf2_thaw(struct ata_port *ap);
static void nv_ck804_freeze(struct ata_port *ap);
static void nv_ck804_thaw(struct ata_port *ap);
-static int nv_adma_slave_config(struct scsi_device *sdev);
+static int nv_adma_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
@@ -318,7 +319,8 @@ static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
static void nv_mcp55_thaw(struct ata_port *ap);
static void nv_mcp55_freeze(struct ata_port *ap);
static void nv_swncq_error_handler(struct ata_port *ap);
-static int nv_swncq_slave_config(struct scsi_device *sdev);
+static int nv_swncq_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
static int nv_swncq_port_start(struct ata_port *ap);
static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
@@ -380,7 +382,7 @@ static const struct scsi_host_template nv_adma_sht = {
.can_queue = NV_ADMA_MAX_CPBS,
.sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
.dma_boundary = NV_ADMA_DMA_BOUNDARY,
- .slave_configure = nv_adma_slave_config,
+ .device_configure = nv_adma_device_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
@@ -391,7 +393,7 @@ static const struct scsi_host_template nv_swncq_sht = {
.can_queue = ATA_MAX_QUEUE - 1,
.sg_tablesize = LIBATA_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
- .slave_configure = nv_swncq_slave_config,
+ .device_configure = nv_swncq_device_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
@@ -661,7 +663,8 @@ static void nv_adma_mode(struct ata_port *ap)
pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
}
-static int nv_adma_slave_config(struct scsi_device *sdev)
+static int nv_adma_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct nv_adma_port_priv *pp = ap->private_data;
@@ -673,7 +676,7 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
int adma_enable;
u32 current_reg, new_reg, config_mask;
- rc = ata_scsi_slave_config(sdev);
+ rc = ata_scsi_device_configure(sdev, lim);
if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
/* Not a proper libata device, ignore */
@@ -740,8 +743,8 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
}
- blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
- blk_queue_max_segments(sdev->request_queue, sg_tablesize);
+ lim->seg_boundary_mask = segment_boundary;
+ lim->max_segments = sg_tablesize;
ata_port_info(ap,
"DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
(unsigned long long)*ap->host->dev->dma_mask,
@@ -1868,7 +1871,8 @@ static void nv_swncq_host_init(struct ata_host *host)
writel(~0x0, mmio + NV_INT_STATUS_MCP55);
}
-static int nv_swncq_slave_config(struct scsi_device *sdev)
+static int nv_swncq_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
@@ -1878,7 +1882,7 @@ static int nv_swncq_slave_config(struct scsi_device *sdev)
u8 check_maxtor = 0;
unsigned char model_num[ATA_ID_PROD_LEN + 1];
- rc = ata_scsi_slave_config(sdev);
+ rc = ata_scsi_device_configure(sdev, lim);
if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
/* Not a proper libata device, ignore */
return rc;
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 142e70bfc498..72c03cbdaff4 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -381,7 +381,7 @@ static const struct scsi_host_template sil24_sht = {
.tag_alloc_policy = BLK_TAG_ALLOC_FIFO,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .slave_configure = ata_scsi_slave_config
+ .device_configure = ata_scsi_device_configure
};
static struct ata_port_operations sil24_ops = {
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index b51d7a9d0d90..a482741eb181 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -957,8 +957,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
offset -= (idx * window_size);
idx++;
- dist = ((long) (window_size - (offset + size))) >= 0 ? size :
- (long) (window_size - offset);
+ dist = min(size, window_size - offset);
memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
psource += dist;
@@ -1005,8 +1004,7 @@ static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
readl(mmio + PDC_DIMM_WINDOW_CTLR);
offset -= (idx * window_size);
idx++;
- dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
- (long) (window_size - offset);
+ dist = min(size, window_size - offset);
memcpy_toio(dimm_mmio + offset / 4, psource, dist);
writel(0x01, mmio + PDC_GENERAL_CTLR);
readl(mmio + PDC_GENERAL_CTLR);
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 9fb1575f8d88..cb00f8244e41 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -94,9 +94,6 @@
static const struct atmdev_ops fore200e_ops;
-static LIST_HEAD(fore200e_boards);
-
-
MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
diff --git a/drivers/atm/fore200e.h b/drivers/atm/fore200e.h
index caf0ea6a328a..5d95fe9fd836 100644
--- a/drivers/atm/fore200e.h
+++ b/drivers/atm/fore200e.h
@@ -830,7 +830,6 @@ typedef struct fore200e_vc_map {
/* per-device data */
typedef struct fore200e {
- struct list_head entry; /* next device */
const struct fore200e_bus* bus; /* bus-dependent code and data */
union fore200e_regs regs; /* bus-dependent registers */
struct atm_dev* atm_dev; /* ATM device */
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 151d95f96b11..69d2138d7efb 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -16,6 +16,9 @@ menuconfig AUXDISPLAY
if AUXDISPLAY
+#
+# Character LCD section
+#
config CHARLCD
tristate "Character LCD core support" if COMPILE_TEST
help
@@ -25,12 +28,6 @@ config CHARLCD
This is some character LCD core interface that multiple drivers can
use.
-config LINEDISP
- tristate "Character line display core support" if COMPILE_TEST
- help
- This is the core support for single-line character displays, to be
- selected by drivers that use it.
-
config HD44780_COMMON
tristate "Common functions for HD44780 (and compatibles) LCD displays" if COMPILE_TEST
select CHARLCD
@@ -52,145 +49,6 @@ config HD44780
kernel and started at boot.
If you don't understand what all this is about, say N.
-config KS0108
- tristate "KS0108 LCD Controller"
- depends on PARPORT_PC
- default n
- help
- If you have a LCD controlled by one or more KS0108
- controllers, say Y. You will need also another more specific
- driver for your LCD.
-
- Depends on Parallel Port support. If you say Y at
- parport, you will be able to compile this as a module (M)
- and built-in as well (Y).
-
- To compile this as a module, choose M here:
- the module will be called ks0108.
-
- If unsure, say N.
-
-config KS0108_PORT
- hex "Parallel port where the LCD is connected"
- depends on KS0108
- default 0x378
- help
- The address of the parallel port where the LCD is connected.
-
- The first standard parallel port address is 0x378.
- The second standard parallel port address is 0x278.
- The third standard parallel port address is 0x3BC.
-
- You can specify a different address if you need.
-
- If you don't know what I'm talking about, load the parport module,
- and execute "dmesg" or "cat /proc/ioports". You can see there how
- many parallel ports are present and which address each one has.
-
- Usually you only need to use 0x378.
-
- If you compile this as a module, you can still override this
- using the module parameters.
-
-config KS0108_DELAY
- int "Delay between each control writing (microseconds)"
- depends on KS0108
- default "2"
- help
- Amount of time the ks0108 should wait between each control write
- to the parallel port.
-
- If your LCD seems to miss random writings, increment this.
-
- If you don't know what I'm talking about, ignore it.
-
- If you compile this as a module, you can still override this
- value using the module parameters.
-
-config CFAG12864B
- tristate "CFAG12864B LCD"
- depends on X86
- depends on FB
- depends on KS0108
- select FB_SYSMEM_HELPERS
- default n
- help
- If you have a Crystalfontz 128x64 2-color LCD, cfag12864b Series,
- say Y. You also need the ks0108 LCD Controller driver.
-
- For help about how to wire your LCD to the parallel port,
- check Documentation/admin-guide/auxdisplay/cfag12864b.rst
-
- Depends on the x86 arch and the framebuffer support.
-
- The LCD framebuffer driver can be attached to a console.
- It will work fine. However, you can't attach it to the fbdev driver
- of the xorg server.
-
- To compile this as a module, choose M here:
- the modules will be called cfag12864b and cfag12864bfb.
-
- If unsure, say N.
-
-config CFAG12864B_RATE
- int "Refresh rate (hertz)"
- depends on CFAG12864B
- default "20"
- help
- Refresh rate of the LCD.
-
- As the LCD is not memory mapped, the driver has to make the work by
- software. This means you should be careful setting this value higher.
- If your CPUs are really slow or you feel the system is slowed down,
- decrease the value.
-
- Be careful modifying this value to a very high value:
- You can freeze the computer, or the LCD maybe can't draw as fast as you
- are requesting.
-
- If you don't know what I'm talking about, ignore it.
-
- If you compile this as a module, you can still override this
- value using the module parameters.
-
-config IMG_ASCII_LCD
- tristate "Imagination Technologies ASCII LCD Display"
- depends on HAS_IOMEM
- default y if MIPS_MALTA
- select MFD_SYSCON
- select LINEDISP
- help
- Enable this to support the simple ASCII LCD displays found on
- development boards such as the MIPS Boston, MIPS Malta & MIPS SEAD3
- from Imagination Technologies.
-
-config HT16K33
- tristate "Holtek Ht16K33 LED controller with keyscan"
- depends on FB && I2C && INPUT
- select FB_SYSMEM_HELPERS
- select INPUT_MATRIXKMAP
- select FB_BACKLIGHT
- select NEW_LEDS
- select LEDS_CLASS
- select LINEDISP
- help
- Say yes here to add support for Holtek HT16K33, RAM mapping 16*8
- LED controller driver with keyscan.
-
-config MAX6959
- tristate "Maxim MAX6958/6959 7-segment LED controller"
- depends on I2C
- select REGMAP_I2C
- select LINEDISP
- help
- If you say yes here you get support for the following Maxim chips
- (I2C 7-segment LED display controller):
- - MAX6958
- - MAX6959 (input support)
-
- This driver can also be built as a module. If so, the module
- will be called max6959.
-
config LCD2S
tristate "lcd2s 20x4 character display over I2C console"
depends on I2C
@@ -201,27 +59,6 @@ config LCD2S
is a simple single color character display. You have to connect it
to an I2C bus.
-config ARM_CHARLCD
- bool "ARM Ltd. Character LCD Driver"
- depends on PLAT_VERSATILE
- help
- This is a driver for the character LCD found on the ARM Ltd.
- Versatile and RealView Platform Baseboards. It doesn't do
- very much more than display the text "ARM Linux" on the first
- line and the Linux version on the second line, but that's
- still useful.
-
-config SEG_LED_GPIO
- tristate "Generic 7-segment LED display"
- depends on GPIOLIB || COMPILE_TEST
- select LINEDISP
- help
- This driver supports a generic 7-segment LED display made up
- of GPIO pins connected to the individual segments.
-
- This driver can also be built as a module. If so, the module
- will be called seg-led-gpio.
-
menuconfig PARPORT_PANEL
tristate "Parallel port LCD/Keypad Panel support"
depends on PARPORT
@@ -480,7 +317,6 @@ endif # PARPORT_PANEL
config PANEL_CHANGE_MESSAGE
bool "Change LCD initialization message ?"
depends on CHARLCD
- default "n"
help
This allows you to replace the boot message indicating the kernel version
and the driver version with a custom message. This is useful on appliances
@@ -529,8 +365,184 @@ choice
endchoice
+#
+# Samsung KS0108 LCD controller section
+#
+config KS0108
+ tristate "KS0108 LCD Controller"
+ depends on PARPORT_PC
+ help
+ If you have a LCD controlled by one or more KS0108
+ controllers, say Y. You will need also another more specific
+ driver for your LCD.
+
+ Depends on Parallel Port support. If you say Y at
+ parport, you will be able to compile this as a module (M)
+ and built-in as well (Y).
+
+ To compile this as a module, choose M here:
+ the module will be called ks0108.
+
+ If unsure, say N.
+
+config KS0108_PORT
+ hex "Parallel port where the LCD is connected"
+ depends on KS0108
+ default 0x378
+ help
+ The address of the parallel port where the LCD is connected.
+
+ The first standard parallel port address is 0x378.
+ The second standard parallel port address is 0x278.
+ The third standard parallel port address is 0x3BC.
+
+ You can specify a different address if you need.
+
+ If you don't know what I'm talking about, load the parport module,
+ and execute "dmesg" or "cat /proc/ioports". You can see there how
+ many parallel ports are present and which address each one has.
+
+ Usually you only need to use 0x378.
+
+ If you compile this as a module, you can still override this
+ using the module parameters.
+
+config KS0108_DELAY
+ int "Delay between each control writing (microseconds)"
+ depends on KS0108
+ default "2"
+ help
+ Amount of time the ks0108 should wait between each control write
+ to the parallel port.
+
+ If your LCD seems to miss random writings, increment this.
+
+ If you don't know what I'm talking about, ignore it.
+
+ If you compile this as a module, you can still override this
+ value using the module parameters.
+
+config CFAG12864B
+ tristate "CFAG12864B LCD"
+ depends on X86
+ depends on FB
+ depends on KS0108
+ select FB_SYSMEM_HELPERS
+ help
+ If you have a Crystalfontz 128x64 2-color LCD, cfag12864b Series,
+ say Y. You also need the ks0108 LCD Controller driver.
+
+ For help about how to wire your LCD to the parallel port,
+ check Documentation/admin-guide/auxdisplay/cfag12864b.rst
+
+ Depends on the x86 arch and the framebuffer support.
+
+ The LCD framebuffer driver can be attached to a console.
+ It will work fine. However, you can't attach it to the fbdev driver
+ of the xorg server.
+
+ To compile this as a module, choose M here:
+ the modules will be called cfag12864b and cfag12864bfb.
+
+ If unsure, say N.
+
+config CFAG12864B_RATE
+ int "Refresh rate (hertz)"
+ depends on CFAG12864B
+ default "20"
+ help
+ Refresh rate of the LCD.
+
+ As the LCD is not memory mapped, the driver has to make the work by
+ software. This means you should be careful setting this value higher.
+ If your CPUs are really slow or you feel the system is slowed down,
+ decrease the value.
+
+ Be careful modifying this value to a very high value:
+ You can freeze the computer, or the LCD maybe can't draw as fast as you
+ are requesting.
+
+ If you don't know what I'm talking about, ignore it.
+
+ If you compile this as a module, you can still override this
+ value using the module parameters.
+
+#
+# Single character line display section
+#
+config LINEDISP
+ tristate "Character line display core support" if COMPILE_TEST
+ help
+ This is the core support for single-line character displays, to be
+ selected by drivers that use it.
+
+config IMG_ASCII_LCD
+ tristate "Imagination Technologies ASCII LCD Display"
+ depends on HAS_IOMEM
+ default y if MIPS_MALTA
+ select MFD_SYSCON
+ select LINEDISP
+ help
+ Enable this to support the simple ASCII LCD displays found on
+ development boards such as the MIPS Boston, MIPS Malta & MIPS SEAD3
+ from Imagination Technologies.
+
+config HT16K33
+ tristate "Holtek Ht16K33 LED controller with keyscan"
+ depends on FB && I2C && INPUT
+ select FB_SYSMEM_HELPERS
+ select INPUT_MATRIXKMAP
+ select FB_BACKLIGHT
+ select NEW_LEDS
+ select LEDS_CLASS
+ select LINEDISP
+ help
+ Say yes here to add support for Holtek HT16K33, RAM mapping 16*8
+ LED controller driver with keyscan.
+
+config MAX6959
+ tristate "Maxim MAX6958/6959 7-segment LED controller"
+ depends on I2C
+ select REGMAP_I2C
+ select LINEDISP
+ help
+ If you say yes here you get support for the following Maxim chips
+ (I2C 7-segment LED display controller):
+ - MAX6958
+ - MAX6959 (input support)
+
+ This driver can also be built as a module. If so, the module
+ will be called max6959.
+
+config SEG_LED_GPIO
+ tristate "Generic 7-segment LED display"
+ depends on GPIOLIB || COMPILE_TEST
+ select LINEDISP
+ help
+ This driver supports a generic 7-segment LED display made up
+ of GPIO pins connected to the individual segments.
+
+ This driver can also be built as a module. If so, the module
+ will be called seg-led-gpio.
+
+#
+# Character LCD with non-conforming interface section
+#
+config ARM_CHARLCD
+ bool "ARM Ltd. Character LCD Driver"
+ depends on PLAT_VERSATILE
+ help
+ This is a driver for the character LCD found on the ARM Ltd.
+ Versatile and RealView Platform Baseboards. It doesn't do
+ very much more than display the text "ARM Linux" on the first
+ line and the Linux version on the second line, but that's
+ still useful.
+
endif # AUXDISPLAY
+#
+# Deprecated options
+#
config PANEL
tristate "Parallel port LCD/Keypad Panel support (OLD OPTION)"
depends on PARPORT
diff --git a/drivers/auxdisplay/Makefile b/drivers/auxdisplay/Makefile
index 4a8ea41b0550..f5c13ed1cd4f 100644
--- a/drivers/auxdisplay/Makefile
+++ b/drivers/auxdisplay/Makefile
@@ -3,16 +3,16 @@
# Makefile for the kernel auxiliary displays device drivers.
#
-obj-$(CONFIG_CHARLCD) += charlcd.o
-obj-$(CONFIG_HD44780_COMMON) += hd44780_common.o
obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
-obj-$(CONFIG_KS0108) += ks0108.o
obj-$(CONFIG_CFAG12864B) += cfag12864b.o cfag12864bfb.o
-obj-$(CONFIG_IMG_ASCII_LCD) += img-ascii-lcd.o
+obj-$(CONFIG_CHARLCD) += charlcd.o
+obj-$(CONFIG_HD44780_COMMON) += hd44780_common.o
obj-$(CONFIG_HD44780) += hd44780.o
obj-$(CONFIG_HT16K33) += ht16k33.o
-obj-$(CONFIG_PARPORT_PANEL) += panel.o
+obj-$(CONFIG_IMG_ASCII_LCD) += img-ascii-lcd.o
+obj-$(CONFIG_KS0108) += ks0108.o
obj-$(CONFIG_LCD2S) += lcd2s.o
obj-$(CONFIG_LINEDISP) += line-display.o
obj-$(CONFIG_MAX6959) += max6959.o
+obj-$(CONFIG_PARPORT_PANEL) += panel.o
obj-$(CONFIG_SEG_LED_GPIO) += seg-led-gpio.o
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 6d309e4971b6..bb9463814454 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -17,7 +17,9 @@
#include <linux/uaccess.h>
#include <linux/workqueue.h>
+#ifndef CONFIG_PANEL_BOOT_MESSAGE
#include <generated/utsrelease.h>
+#endif
#include "charlcd.h"
@@ -678,4 +680,5 @@ int charlcd_unregister(struct charlcd *lcd)
}
EXPORT_SYMBOL_GPL(charlcd_unregister);
+MODULE_DESCRIPTION("Character LCD core support");
MODULE_LICENSE("GPL");
diff --git a/drivers/auxdisplay/seg-led-gpio.c b/drivers/auxdisplay/seg-led-gpio.c
index 35a8dbb1e9d2..183ab3011cbb 100644
--- a/drivers/auxdisplay/seg-led-gpio.c
+++ b/drivers/auxdisplay/seg-led-gpio.c
@@ -81,14 +81,12 @@ static int seg_led_probe(struct platform_device *pdev)
return linedisp_register(&priv->linedisp, dev, 1, &seg_led_linedisp_ops);
}
-static int seg_led_remove(struct platform_device *pdev)
+static void seg_led_remove(struct platform_device *pdev)
{
struct seg_led_priv *priv = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&priv->work);
linedisp_unregister(&priv->linedisp);
-
- return 0;
}
static const struct of_device_id seg_led_of_match[] = {
@@ -99,7 +97,7 @@ MODULE_DEVICE_TABLE(of, seg_led_of_match);
static struct platform_driver seg_led_driver = {
.probe = seg_led_probe,
- .remove = seg_led_remove,
+ .remove_new = seg_led_remove,
.driver = {
.name = "seg-led-gpio",
.of_match_table = seg_led_of_match,
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 024b78a0cfc1..c66d070207a0 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -22,7 +22,7 @@
#include <linux/units.h>
#define CREATE_TRACE_POINTS
-#include <trace/events/thermal_pressure.h>
+#include <trace/events/hw_pressure.h>
static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
static struct cpumask scale_freq_counters_mask;
@@ -160,26 +160,26 @@ void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
per_cpu(cpu_scale, cpu) = capacity;
}
-DEFINE_PER_CPU(unsigned long, thermal_pressure);
+DEFINE_PER_CPU(unsigned long, hw_pressure);
/**
- * topology_update_thermal_pressure() - Update thermal pressure for CPUs
+ * topology_update_hw_pressure() - Update HW pressure for CPUs
* @cpus : The related CPUs for which capacity has been reduced
* @capped_freq : The maximum allowed frequency that CPUs can run at
*
- * Update the value of thermal pressure for all @cpus in the mask. The
+ * Update the value of HW pressure for all @cpus in the mask. The
* cpumask should include all (online+offline) affected CPUs, to avoid
* operating on stale data when hot-plug is used for some CPUs. The
* @capped_freq reflects the currently allowed max CPUs frequency due to
- * thermal capping. It might be also a boost frequency value, which is bigger
+ * HW capping. It might be also a boost frequency value, which is bigger
* than the internal 'capacity_freq_ref' max frequency. In such case the
* pressure value should simply be removed, since this is an indication that
- * there is no thermal throttling. The @capped_freq must be provided in kHz.
+ * there is no HW throttling. The @capped_freq must be provided in kHz.
*/
-void topology_update_thermal_pressure(const struct cpumask *cpus,
+void topology_update_hw_pressure(const struct cpumask *cpus,
unsigned long capped_freq)
{
- unsigned long max_capacity, capacity, th_pressure;
+ unsigned long max_capacity, capacity, pressure;
u32 max_freq;
int cpu;
@@ -189,21 +189,21 @@ void topology_update_thermal_pressure(const struct cpumask *cpus,
/*
* Handle properly the boost frequencies, which should simply clean
- * the thermal pressure value.
+ * the HW pressure value.
*/
if (max_freq <= capped_freq)
capacity = max_capacity;
else
capacity = mult_frac(max_capacity, capped_freq, max_freq);
- th_pressure = max_capacity - capacity;
+ pressure = max_capacity - capacity;
- trace_thermal_pressure_update(cpu, th_pressure);
+ trace_hw_pressure_update(cpu, pressure);
for_each_cpu(cpu, cpus)
- WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
+ WRITE_ONCE(per_cpu(hw_pressure, cpu), pressure);
}
-EXPORT_SYMBOL_GPL(topology_update_thermal_pressure);
+EXPORT_SYMBOL_GPL(topology_update_hw_pressure);
static ssize_t cpu_capacity_show(struct device *dev,
struct device_attribute *attr,
diff --git a/drivers/base/core.c b/drivers/base/core.c
index b93f3c5716ae..5f4e03336e68 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -44,6 +44,7 @@ static bool fw_devlink_is_permissive(void);
static void __fw_devlink_link_to_consumers(struct device *dev);
static bool fw_devlink_drv_reg_done;
static bool fw_devlink_best_effort;
+static struct workqueue_struct *device_link_wq;
/**
* __fwnode_link_add - Create a link between two fwnode_handles.
@@ -533,12 +534,26 @@ static void devlink_dev_release(struct device *dev)
/*
* It may take a while to complete this work because of the SRCU
* synchronization in device_link_release_fn() and if the consumer or
- * supplier devices get deleted when it runs, so put it into the "long"
- * workqueue.
+ * supplier devices get deleted when it runs, so put it into the
+ * dedicated workqueue.
*/
- queue_work(system_long_wq, &link->rm_work);
+ queue_work(device_link_wq, &link->rm_work);
}
+/**
+ * device_link_wait_removal - Wait for ongoing devlink removal jobs to terminate
+ */
+void device_link_wait_removal(void)
+{
+ /*
+ * devlink removal jobs are queued in the dedicated work queue.
+ * To be sure that all removal jobs are terminated, ensure that any
+ * scheduled work has run to completion.
+ */
+ flush_workqueue(device_link_wq);
+}
+EXPORT_SYMBOL_GPL(device_link_wait_removal);
+
static struct class devlink_class = {
.name = "devlink",
.dev_groups = devlink_groups,
@@ -4164,9 +4179,14 @@ int __init devices_init(void)
sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
if (!sysfs_dev_char_kobj)
goto char_kobj_err;
+ device_link_wq = alloc_workqueue("device_link_wq", 0, 0);
+ if (!device_link_wq)
+ goto wq_err;
return 0;
+ wq_err:
+ kobject_put(sysfs_dev_char_kobj);
char_kobj_err:
kobject_put(sysfs_dev_block_kobj);
block_kobj_err:
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 56fba44ba391..c61ecb0c2ae2 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -306,7 +306,7 @@ static ssize_t crash_hotplug_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sysfs_emit(buf, "%d\n", crash_hotplug_cpu_support());
+ return sysfs_emit(buf, "%d\n", crash_check_hotplug_support());
}
static DEVICE_ATTR_ADMIN_RO(crash_hotplug);
#endif
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 7e2d1f0d903a..82aeb09b3d1b 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -305,6 +305,29 @@ static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset,
}
/**
+ * dev_coredump_put - remove device coredump
+ * @dev: the struct device for the crashed device
+ *
+ * dev_coredump_put() removes coredump, if exists, for a given device from
+ * the file system and free its associated data otherwise, does nothing.
+ *
+ * It is useful for modules that do not want to keep coredump
+ * available after its unload.
+ */
+void dev_coredump_put(struct device *dev)
+{
+ struct device *existing;
+
+ existing = class_find_device(&devcd_class, NULL, dev,
+ devcd_match_failing);
+ if (existing) {
+ devcd_free(existing, NULL);
+ put_device(existing);
+ }
+}
+EXPORT_SYMBOL_GPL(dev_coredump_put);
+
+/**
* dev_coredumpm - create device coredump with read/free methods
* @dev: the struct device for the crashed device
* @owner: the module that contains the read/free functions, use %THIS_MODULE
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index c0436f46cfb7..67858eeb92ed 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -535,7 +535,7 @@ static DEVICE_ATTR_RW(auto_online_blocks);
static ssize_t crash_hotplug_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sysfs_emit(buf, "%d\n", crash_hotplug_memory_support());
+ return sysfs_emit(buf, "%d\n", crash_check_hotplug_support());
}
static DEVICE_ATTR_RO(crash_hotplug);
#endif
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 5679f966f676..4a67e83300e1 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -208,7 +208,7 @@ static ktime_t initcall_debug_start(struct device *dev, void *cb)
if (!pm_print_times_enabled)
return 0;
- dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
+ dev_info(dev, "calling %ps @ %i, parent: %s\n", cb,
task_pid_nr(current),
dev->parent ? dev_name(dev->parent) : "none");
return ktime_get();
@@ -223,7 +223,7 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime,
return;
rettime = ktime_get();
- dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
+ dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error,
(unsigned long long)ktime_us_delta(rettime, calltime));
}
@@ -1927,7 +1927,7 @@ EXPORT_SYMBOL_GPL(dpm_suspend_start);
void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
{
if (ret)
- dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
+ dev_err(dev, "%s(): %ps returns %d\n", function, fn, ret);
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index a917219feea6..752b417e8129 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -451,16 +451,15 @@ static struct wakeup_source *device_wakeup_detach(struct device *dev)
* Detach the @dev's wakeup source object from it, unregister this wakeup source
* object and destroy it.
*/
-int device_wakeup_disable(struct device *dev)
+void device_wakeup_disable(struct device *dev)
{
struct wakeup_source *ws;
if (!dev || !dev->power.can_wakeup)
- return -EINVAL;
+ return;
ws = device_wakeup_detach(dev);
wakeup_source_unregister(ws);
- return 0;
}
EXPORT_SYMBOL_GPL(device_wakeup_disable);
@@ -502,7 +501,11 @@ EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
*/
int device_set_wakeup_enable(struct device *dev, bool enable)
{
- return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
+ if (enable)
+ return device_wakeup_enable(dev);
+
+ device_wakeup_disable(dev);
+ return 0;
}
EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index bcdb25bec77c..83acccdc1008 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -326,20 +326,22 @@ struct regmap_ram_data {
* Create a test register map with data stored in RAM, not intended
* for practical use.
*/
-struct regmap *__regmap_init_ram(const struct regmap_config *config,
+struct regmap *__regmap_init_ram(struct device *dev,
+ const struct regmap_config *config,
struct regmap_ram_data *data,
struct lock_class_key *lock_key,
const char *lock_name);
-#define regmap_init_ram(config, data) \
- __regmap_lockdep_wrapper(__regmap_init_ram, #config, config, data)
+#define regmap_init_ram(dev, config, data) \
+ __regmap_lockdep_wrapper(__regmap_init_ram, #dev, dev, config, data)
-struct regmap *__regmap_init_raw_ram(const struct regmap_config *config,
+struct regmap *__regmap_init_raw_ram(struct device *dev,
+ const struct regmap_config *config,
struct regmap_ram_data *data,
struct lock_class_key *lock_key,
const char *lock_name);
-#define regmap_init_raw_ram(config, data) \
- __regmap_lockdep_wrapper(__regmap_init_raw_ram, #config, config, data)
+#define regmap_init_raw_ram(dev, config, data) \
+ __regmap_lockdep_wrapper(__regmap_init_raw_ram, #dev, dev, config, data)
#endif
diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
index 41edd6a430eb..e42433404854 100644
--- a/drivers/base/regmap/regcache-maple.c
+++ b/drivers/base/regmap/regcache-maple.c
@@ -112,7 +112,7 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
unsigned long *entry, *lower, *upper;
unsigned long lower_index, lower_last;
unsigned long upper_index, upper_last;
- int ret;
+ int ret = 0;
lower = NULL;
upper = NULL;
@@ -145,7 +145,7 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
upper_index = max + 1;
upper_last = mas.last;
- upper = kmemdup(&entry[max + 1],
+ upper = kmemdup(&entry[max - mas.index + 1],
((mas.last - max) *
sizeof(unsigned long)),
map->alloc_flags);
@@ -244,7 +244,7 @@ static int regcache_maple_sync(struct regmap *map, unsigned int min,
unsigned long lmin = min;
unsigned long lmax = max;
unsigned int r, v, sync_start;
- int ret;
+ int ret = 0;
bool sync_needed = false;
map->cache_bypass = true;
@@ -294,7 +294,7 @@ static int regcache_maple_exit(struct regmap *map)
{
struct maple_tree *mt = map->cache;
MA_STATE(mas, mt, 0, UINT_MAX);
- unsigned int *entry;;
+ unsigned int *entry;
/* if we've already been called then just return */
if (!mt)
diff --git a/drivers/base/regmap/regmap-i3c.c b/drivers/base/regmap/regmap-i3c.c
index 0328b0b34284..b5300b7c477e 100644
--- a/drivers/base/regmap/regmap-i3c.c
+++ b/drivers/base/regmap/regmap-i3c.c
@@ -56,5 +56,5 @@ struct regmap *__devm_regmap_init_i3c(struct i3c_device *i3c,
EXPORT_SYMBOL_GPL(__devm_regmap_init_i3c);
MODULE_AUTHOR("Vitor Soares <vitor.soares@synopsys.com>");
-MODULE_DESCRIPTION("Regmap I3C Module");
+MODULE_DESCRIPTION("regmap I3C Module");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c
index bb2ab6129f38..9c5314785fc2 100644
--- a/drivers/base/regmap/regmap-kunit.c
+++ b/drivers/base/regmap/regmap-kunit.c
@@ -4,11 +4,26 @@
//
// Copyright 2023 Arm Ltd
+#include <kunit/device.h>
+#include <kunit/resource.h>
#include <kunit/test.h>
#include "internal.h"
#define BLOCK_TEST_SIZE 12
+KUNIT_DEFINE_ACTION_WRAPPER(regmap_exit_action, regmap_exit, struct regmap *);
+
+struct regmap_test_priv {
+ struct device *dev;
+};
+
+struct regmap_test_param {
+ enum regcache_type cache;
+ enum regmap_endian val_endian;
+
+ unsigned int from_reg;
+};
+
static void get_changed_bytes(void *orig, void *new, size_t size)
{
char *o = orig;
@@ -27,57 +42,128 @@ static void get_changed_bytes(void *orig, void *new, size_t size)
}
static const struct regmap_config test_regmap_config = {
- .max_register = BLOCK_TEST_SIZE,
.reg_stride = 1,
.val_bits = sizeof(unsigned int) * 8,
};
-struct regcache_types {
- enum regcache_type type;
- const char *name;
-};
+static const char *regcache_type_name(enum regcache_type type)
+{
+ switch (type) {
+ case REGCACHE_NONE:
+ return "none";
+ case REGCACHE_FLAT:
+ return "flat";
+ case REGCACHE_RBTREE:
+ return "rbtree";
+ case REGCACHE_MAPLE:
+ return "maple";
+ default:
+ return NULL;
+ }
+}
+
+static const char *regmap_endian_name(enum regmap_endian endian)
+{
+ switch (endian) {
+ case REGMAP_ENDIAN_BIG:
+ return "big";
+ case REGMAP_ENDIAN_LITTLE:
+ return "little";
+ case REGMAP_ENDIAN_DEFAULT:
+ return "default";
+ case REGMAP_ENDIAN_NATIVE:
+ return "native";
+ default:
+ return NULL;
+ }
+}
-static void case_to_desc(const struct regcache_types *t, char *desc)
+static void param_to_desc(const struct regmap_test_param *param, char *desc)
{
- strcpy(desc, t->name);
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s-%s @%#x",
+ regcache_type_name(param->cache),
+ regmap_endian_name(param->val_endian),
+ param->from_reg);
}
-static const struct regcache_types regcache_types_list[] = {
- { REGCACHE_NONE, "none" },
- { REGCACHE_FLAT, "flat" },
- { REGCACHE_RBTREE, "rbtree" },
- { REGCACHE_MAPLE, "maple" },
+static const struct regmap_test_param regcache_types_list[] = {
+ { .cache = REGCACHE_NONE },
+ { .cache = REGCACHE_FLAT },
+ { .cache = REGCACHE_RBTREE },
+ { .cache = REGCACHE_MAPLE },
};
-KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, case_to_desc);
+KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, param_to_desc);
-static const struct regcache_types real_cache_types_list[] = {
- { REGCACHE_FLAT, "flat" },
- { REGCACHE_RBTREE, "rbtree" },
- { REGCACHE_MAPLE, "maple" },
+static const struct regmap_test_param real_cache_types_only_list[] = {
+ { .cache = REGCACHE_FLAT },
+ { .cache = REGCACHE_RBTREE },
+ { .cache = REGCACHE_MAPLE },
};
-KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, case_to_desc);
+KUNIT_ARRAY_PARAM(real_cache_types_only, real_cache_types_only_list, param_to_desc);
+
+static const struct regmap_test_param real_cache_types_list[] = {
+ { .cache = REGCACHE_FLAT, .from_reg = 0 },
+ { .cache = REGCACHE_FLAT, .from_reg = 0x2001 },
+ { .cache = REGCACHE_FLAT, .from_reg = 0x2002 },
+ { .cache = REGCACHE_FLAT, .from_reg = 0x2003 },
+ { .cache = REGCACHE_FLAT, .from_reg = 0x2004 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 },
+};
-static const struct regcache_types sparse_cache_types_list[] = {
- { REGCACHE_RBTREE, "rbtree" },
- { REGCACHE_MAPLE, "maple" },
+KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, param_to_desc);
+
+static const struct regmap_test_param sparse_cache_types_list[] = {
+ { .cache = REGCACHE_RBTREE, .from_reg = 0 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 },
};
-KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, case_to_desc);
+KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, param_to_desc);
-static struct regmap *gen_regmap(struct regmap_config *config,
+static struct regmap *gen_regmap(struct kunit *test,
+ struct regmap_config *config,
struct regmap_ram_data **data)
{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap_test_priv *priv = test->priv;
unsigned int *buf;
struct regmap *ret;
- size_t size = (config->max_register + 1) * sizeof(unsigned int);
+ size_t size;
int i;
struct reg_default *defaults;
+ config->cache_type = param->cache;
config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
config->cache_type == REGCACHE_MAPLE;
+ if (config->max_register == 0) {
+ config->max_register = param->from_reg;
+ if (config->num_reg_defaults)
+ config->max_register += (config->num_reg_defaults - 1) *
+ config->reg_stride;
+ else
+ config->max_register += (BLOCK_TEST_SIZE * config->reg_stride);
+ }
+
+ size = (config->max_register + 1) * sizeof(unsigned int);
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
@@ -98,37 +184,40 @@ static struct regmap *gen_regmap(struct regmap_config *config,
config->reg_defaults = defaults;
for (i = 0; i < config->num_reg_defaults; i++) {
- defaults[i].reg = i * config->reg_stride;
- defaults[i].def = buf[i * config->reg_stride];
+ defaults[i].reg = param->from_reg + (i * config->reg_stride);
+ defaults[i].def = buf[param->from_reg + (i * config->reg_stride)];
}
}
- ret = regmap_init_ram(config, *data);
+ ret = regmap_init_ram(priv->dev, config, *data);
if (IS_ERR(ret)) {
kfree(buf);
kfree(*data);
+ } else {
+ kunit_add_action(test, regmap_exit_action, ret);
}
return ret;
}
-static bool reg_5_false(struct device *context, unsigned int reg)
+static bool reg_5_false(struct device *dev, unsigned int reg)
{
- return reg != 5;
+ struct kunit *test = dev_get_drvdata(dev);
+ const struct regmap_test_param *param = test->param_value;
+
+ return reg != (param->from_reg + 5);
}
static void basic_read_write(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int val, rval;
config = test_regmap_config;
- config.cache_type = t->type;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -141,14 +230,11 @@ static void basic_read_write(struct kunit *test)
KUNIT_EXPECT_EQ(test, val, rval);
/* If using a cache the cache satisfied the read */
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[0]);
-
- regmap_exit(map);
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[0]);
}
static void bulk_write(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -156,9 +242,8 @@ static void bulk_write(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -178,14 +263,11 @@ static void bulk_write(struct kunit *test)
/* If using a cache the cache satisfied the read */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
-
- regmap_exit(map);
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
}
static void bulk_read(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -193,9 +275,8 @@ static void bulk_read(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -211,14 +292,140 @@ static void bulk_read(struct kunit *test)
/* If using a cache the cache satisfied the read */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
+}
- regmap_exit(map);
+static void read_bypassed(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val[BLOCK_TEST_SIZE], rval;
+ int i;
+
+ config = test_regmap_config;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ KUNIT_EXPECT_FALSE(test, map->cache_bypass);
+
+ get_random_bytes(&val, sizeof(val));
+
+ /* Write some test values */
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val)));
+
+ regcache_cache_only(map, true);
+
+ /*
+ * While in cache-only regmap_read_bypassed() should return the register
+ * value and leave the map in cache-only.
+ */
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ /* Put inverted bits in rval to prove we really read the value */
+ rval = ~val[i];
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval));
+ KUNIT_EXPECT_EQ(test, val[i], rval);
+
+ rval = ~val[i];
+ KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
+ KUNIT_EXPECT_EQ(test, val[i], rval);
+ KUNIT_EXPECT_TRUE(test, map->cache_only);
+ KUNIT_EXPECT_FALSE(test, map->cache_bypass);
+ }
+
+ /*
+ * Change the underlying register values to prove it is returning
+ * real values not cached values.
+ */
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ val[i] = ~val[i];
+ data->vals[param->from_reg + i] = val[i];
+ }
+
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ rval = ~val[i];
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval));
+ KUNIT_EXPECT_NE(test, val[i], rval);
+
+ rval = ~val[i];
+ KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
+ KUNIT_EXPECT_EQ(test, val[i], rval);
+ KUNIT_EXPECT_TRUE(test, map->cache_only);
+ KUNIT_EXPECT_FALSE(test, map->cache_bypass);
+ }
+}
+
+static void read_bypassed_volatile(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val[BLOCK_TEST_SIZE], rval;
+ int i;
+
+ config = test_regmap_config;
+ /* All registers except #5 volatile */
+ config.volatile_reg = reg_5_false;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ KUNIT_EXPECT_FALSE(test, map->cache_bypass);
+
+ get_random_bytes(&val, sizeof(val));
+
+ /* Write some test values */
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val)));
+
+ regcache_cache_only(map, true);
+
+ /*
+ * While in cache-only regmap_read_bypassed() should return the register
+ * value and leave the map in cache-only.
+ */
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ /* Register #5 is non-volatile so should read from cache */
+ KUNIT_EXPECT_EQ(test, (i == 5) ? 0 : -EBUSY,
+ regmap_read(map, param->from_reg + i, &rval));
+
+ /* Put inverted bits in rval to prove we really read the value */
+ rval = ~val[i];
+ KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
+ KUNIT_EXPECT_EQ(test, val[i], rval);
+ KUNIT_EXPECT_TRUE(test, map->cache_only);
+ KUNIT_EXPECT_FALSE(test, map->cache_bypass);
+ }
+
+ /*
+ * Change the underlying register values to prove it is returning
+ * real values not cached values.
+ */
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ val[i] = ~val[i];
+ data->vals[param->from_reg + i] = val[i];
+ }
+
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ if (i == 5)
+ continue;
+
+ rval = ~val[i];
+ KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
+ KUNIT_EXPECT_EQ(test, val[i], rval);
+ KUNIT_EXPECT_TRUE(test, map->cache_only);
+ KUNIT_EXPECT_FALSE(test, map->cache_bypass);
+ }
}
static void write_readonly(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -226,11 +433,10 @@ static void write_readonly(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
config.writeable_reg = reg_5_false;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -247,13 +453,10 @@ static void write_readonly(struct kunit *test)
/* Did that match what we see on the device? */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
-
- regmap_exit(map);
}
static void read_writeonly(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -261,10 +464,9 @@ static void read_writeonly(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.readable_reg = reg_5_false;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -277,7 +479,7 @@ static void read_writeonly(struct kunit *test)
* fail if we aren't using the flat cache.
*/
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
- if (t->type != REGCACHE_FLAT) {
+ if (config.cache_type != REGCACHE_FLAT) {
KUNIT_EXPECT_EQ(test, i != 5,
regmap_read(map, i, &val) == 0);
} else {
@@ -287,13 +489,10 @@ static void read_writeonly(struct kunit *test)
/* Did we trigger a hardware access? */
KUNIT_EXPECT_FALSE(test, data->read[5]);
-
- regmap_exit(map);
}
static void reg_defaults(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -301,10 +500,9 @@ static void reg_defaults(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -316,12 +514,11 @@ static void reg_defaults(struct kunit *test)
/* The data should have been read from cache if there was one */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
}
static void reg_defaults_read_dev(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -329,17 +526,16 @@ static void reg_defaults_read_dev(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults_raw = BLOCK_TEST_SIZE;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* We should have read the cache defaults back from the map */
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
- KUNIT_EXPECT_EQ(test, t->type != REGCACHE_NONE, data->read[i]);
+ KUNIT_EXPECT_EQ(test, config.cache_type != REGCACHE_NONE, data->read[i]);
data->read[i] = false;
}
@@ -350,12 +546,11 @@ static void reg_defaults_read_dev(struct kunit *test)
/* The data should have been read from cache if there was one */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
}
static void register_patch(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -365,10 +560,9 @@ static void register_patch(struct kunit *test)
/* We need defaults so readback works */
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -401,13 +595,10 @@ static void register_patch(struct kunit *test)
break;
}
}
-
- regmap_exit(map);
}
static void stride(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -415,11 +606,10 @@ static void stride(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.reg_stride = 2;
config.num_reg_defaults = BLOCK_TEST_SIZE / 2;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -437,15 +627,13 @@ static void stride(struct kunit *test)
} else {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
KUNIT_EXPECT_EQ(test, data->vals[i], rval);
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE,
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE,
data->read[i]);
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
KUNIT_EXPECT_TRUE(test, data->written[i]);
}
}
-
- regmap_exit(map);
}
static struct regmap_range_cfg test_range = {
@@ -481,7 +669,6 @@ static bool test_range_all_volatile(struct device *dev, unsigned int reg)
static void basic_ranges(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -489,13 +676,12 @@ static void basic_ranges(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.volatile_reg = test_range_all_volatile;
config.ranges = &test_range;
config.num_ranges = 1;
config.max_register = test_range.range_max;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -546,14 +732,11 @@ static void basic_ranges(struct kunit *test)
KUNIT_EXPECT_FALSE(test, data->read[i]);
KUNIT_EXPECT_FALSE(test, data->written[i]);
}
-
- regmap_exit(map);
}
/* Try to stress dynamic creation of cache data structures */
static void stress_insert(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -562,10 +745,9 @@ static void stress_insert(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.max_register = 300;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -599,24 +781,21 @@ static void stress_insert(struct kunit *test)
for (i = 0; i < config.max_register; i ++) {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
KUNIT_EXPECT_EQ(test, rval, vals[i]);
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
}
-
- regmap_exit(map);
}
static void cache_bypass(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int val, rval;
config = test_regmap_config;
- config.cache_type = t->type;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -624,28 +803,26 @@ static void cache_bypass(struct kunit *test)
get_random_bytes(&val, sizeof(val));
/* Ensure the cache has a value in it */
- KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val));
/* Bypass then write a different value */
regcache_cache_bypass(map, true);
- KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val + 1));
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val + 1));
/* Read the bypassed value */
- KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
KUNIT_EXPECT_EQ(test, val + 1, rval);
- KUNIT_EXPECT_EQ(test, data->vals[0], rval);
+ KUNIT_EXPECT_EQ(test, data->vals[param->from_reg], rval);
/* Disable bypass, the cache should still return the original value */
regcache_cache_bypass(map, false);
- KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
KUNIT_EXPECT_EQ(test, val, rval);
-
- regmap_exit(map);
}
-static void cache_sync(struct kunit *test)
+static void cache_sync_marked_dirty(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -653,9 +830,8 @@ static void cache_sync(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -663,10 +839,10 @@ static void cache_sync(struct kunit *test)
get_random_bytes(&val, sizeof(val));
/* Put some data into the cache */
- KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
BLOCK_TEST_SIZE));
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- data->written[i] = false;
+ data->written[param->from_reg + i] = false;
/* Trash the data on the device itself then resync */
regcache_mark_dirty(map);
@@ -674,16 +850,63 @@ static void cache_sync(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* Did we just write the correct data out? */
- KUNIT_EXPECT_MEMEQ(test, data->vals, val, sizeof(val));
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val));
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, true, data->written[i]);
+ KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]);
+}
+
+static void cache_sync_after_cache_only(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val[BLOCK_TEST_SIZE];
+ unsigned int val_mask;
+ int i;
- regmap_exit(map);
+ config = test_regmap_config;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ val_mask = GENMASK(config.val_bits - 1, 0);
+ get_random_bytes(&val, sizeof(val));
+
+ /* Put some data into the cache */
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
+ BLOCK_TEST_SIZE));
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+
+ /* Set cache-only and change the values */
+ regcache_cache_only(map, true);
+ for (i = 0; i < ARRAY_SIZE(val); ++i)
+ val[i] = ~val[i] & val_mask;
+
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
+ BLOCK_TEST_SIZE));
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
+
+ KUNIT_EXPECT_MEMNEQ(test, &data->vals[param->from_reg], val, sizeof(val));
+
+ /* Exit cache-only and sync the cache without marking hardware registers dirty */
+ regcache_cache_only(map, false);
+
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+
+ /* Did we just write the correct data out? */
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val));
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + i]);
}
-static void cache_sync_defaults(struct kunit *test)
+static void cache_sync_defaults_marked_dirty(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -691,10 +914,9 @@ static void cache_sync_defaults(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -702,24 +924,85 @@ static void cache_sync_defaults(struct kunit *test)
get_random_bytes(&val, sizeof(val));
/* Change the value of one register */
- KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 2, val));
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, val));
/* Resync */
regcache_mark_dirty(map);
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- data->written[i] = false;
+ data->written[param->from_reg + i] = false;
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* Did we just sync the one register we touched? */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, i == 2, data->written[i]);
+ KUNIT_EXPECT_EQ(test, i == 2, data->written[param->from_reg + i]);
+
+ /* Rewrite registers back to their defaults */
+ for (i = 0; i < config.num_reg_defaults; ++i)
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, config.reg_defaults[i].reg,
+ config.reg_defaults[i].def));
- regmap_exit(map);
+ /*
+ * Resync after regcache_mark_dirty() should not write out registers
+ * that are at default value
+ */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+ regcache_mark_dirty(map);
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
+}
+
+static void cache_sync_default_after_cache_only(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int orig_val;
+ int i;
+
+ config = test_regmap_config;
+ config.num_reg_defaults = BLOCK_TEST_SIZE;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + 2, &orig_val));
+
+ /* Enter cache-only and change the value of one register */
+ regcache_cache_only(map, true);
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val + 1));
+
+ /* Exit cache-only and resync, should write out the changed register */
+ regcache_cache_only(map, false);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+
+ /* Was the register written out? */
+ KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]);
+ KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val + 1);
+
+ /* Enter cache-only and write register back to its default value */
+ regcache_cache_only(map, true);
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val));
+
+ /* Resync should write out the new value */
+ regcache_cache_only(map, false);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+ KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]);
+ KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val);
}
static void cache_sync_readonly(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -727,40 +1010,37 @@ static void cache_sync_readonly(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.writeable_reg = reg_5_false;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* Read all registers to fill the cache */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
/* Change the value of all registers, readonly should fail */
get_random_bytes(&val, sizeof(val));
regcache_cache_only(map, true);
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
+ KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, param->from_reg + i, val) == 0);
regcache_cache_only(map, false);
/* Resync */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- data->written[i] = false;
+ data->written[param->from_reg + i] = false;
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* Did that match what we see on the device? */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
-
- regmap_exit(map);
+ KUNIT_EXPECT_EQ(test, i != 5, data->written[param->from_reg + i]);
}
static void cache_sync_patch(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -770,23 +1050,22 @@ static void cache_sync_patch(struct kunit *test)
/* We need defaults so readback works */
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* Stash the original values */
- KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
BLOCK_TEST_SIZE));
/* Patch a couple of values */
- patch[0].reg = 2;
+ patch[0].reg = param->from_reg + 2;
patch[0].def = rval[2] + 1;
patch[0].delay_us = 0;
- patch[1].reg = 5;
+ patch[1].reg = param->from_reg + 5;
patch[1].def = rval[5] + 1;
patch[1].delay_us = 0;
KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
@@ -795,33 +1074,31 @@ static void cache_sync_patch(struct kunit *test)
/* Sync the cache */
regcache_mark_dirty(map);
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- data->written[i] = false;
+ data->written[param->from_reg + i] = false;
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* The patch should be on the device but not in the cache */
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
- KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
KUNIT_EXPECT_EQ(test, val, rval[i]);
switch (i) {
case 2:
case 5:
- KUNIT_EXPECT_EQ(test, true, data->written[i]);
- KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
+ KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]);
+ KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i] + 1);
break;
default:
- KUNIT_EXPECT_EQ(test, false, data->written[i]);
- KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
+ KUNIT_EXPECT_EQ(test, false, data->written[param->from_reg + i]);
+ KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i]);
break;
}
}
-
- regmap_exit(map);
}
static void cache_drop(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -829,41 +1106,267 @@ static void cache_drop(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* Ensure the data is read from the cache */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- data->read[i] = false;
- KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
+ data->read[param->from_reg + i] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
BLOCK_TEST_SIZE));
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
- KUNIT_EXPECT_FALSE(test, data->read[i]);
- data->read[i] = false;
+ KUNIT_EXPECT_FALSE(test, data->read[param->from_reg + i]);
+ data->read[param->from_reg + i] = false;
}
- KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
/* Drop some registers */
- KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 3, 5));
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, param->from_reg + 3,
+ param->from_reg + 5));
/* Reread and check only the dropped registers hit the device. */
- KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
BLOCK_TEST_SIZE));
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, data->read[i], i >= 3 && i <= 5);
- KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
+ KUNIT_EXPECT_EQ(test, data->read[param->from_reg + i], i >= 3 && i <= 5);
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
+}
+
+static void cache_drop_with_non_contiguous_ranges(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val[4][BLOCK_TEST_SIZE];
+ unsigned int reg;
+ const int num_ranges = ARRAY_SIZE(val) * 2;
+ int rangeidx, i;
- regmap_exit(map);
+ static_assert(ARRAY_SIZE(val) == 4);
+
+ config = test_regmap_config;
+ config.max_register = param->from_reg + (num_ranges * BLOCK_TEST_SIZE);
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ for (i = 0; i < config.max_register + 1; i++)
+ data->written[i] = false;
+
+ /* Create non-contiguous cache blocks by writing every other range */
+ get_random_bytes(&val, sizeof(val));
+ for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) {
+ reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, reg,
+ &val[rangeidx / 2],
+ BLOCK_TEST_SIZE));
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[reg],
+ &val[rangeidx / 2], sizeof(val[rangeidx / 2]));
+ }
+
+ /* Check that odd ranges weren't written */
+ for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) {
+ reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
+ }
+
+ /* Drop range 2 */
+ reg = param->from_reg + (2 * BLOCK_TEST_SIZE);
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg, reg + BLOCK_TEST_SIZE - 1));
+
+ /* Drop part of range 4 */
+ reg = param->from_reg + (4 * BLOCK_TEST_SIZE);
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg + 3, reg + 5));
+
+ /* Mark dirty and reset mock registers to 0 */
+ regcache_mark_dirty(map);
+ for (i = 0; i < config.max_register + 1; i++) {
+ data->vals[i] = 0;
+ data->written[i] = false;
+ }
+
+ /* The registers that were dropped from range 4 should now remain at 0 */
+ val[4 / 2][3] = 0;
+ val[4 / 2][4] = 0;
+ val[4 / 2][5] = 0;
+
+ /* Sync and check that the expected register ranges were written */
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+
+ /* Check that odd ranges weren't written */
+ for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) {
+ reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
+ }
+
+ /* Check that even ranges (except 2 and 4) were written */
+ for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) {
+ if ((rangeidx == 2) || (rangeidx == 4))
+ continue;
+
+ reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_TRUE(test, data->written[reg + i]);
+
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[reg],
+ &val[rangeidx / 2], sizeof(val[rangeidx / 2]));
+ }
+
+ /* Check that range 2 wasn't written */
+ reg = param->from_reg + (2 * BLOCK_TEST_SIZE);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
+
+ /* Check that range 4 was partially written */
+ reg = param->from_reg + (4 * BLOCK_TEST_SIZE);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, data->written[reg + i], i < 3 || i > 5);
+
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], &val[4 / 2], sizeof(val[4 / 2]));
+
+ /* Nothing before param->from_reg should have been written */
+ for (i = 0; i < param->from_reg; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[i]);
+}
+
+static void cache_drop_all_and_sync_marked_dirty(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int rval[BLOCK_TEST_SIZE];
+ int i;
+
+ config = test_regmap_config;
+ config.num_reg_defaults = BLOCK_TEST_SIZE;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ /* Ensure the data is read from the cache */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->read[param->from_reg + i] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
+ BLOCK_TEST_SIZE));
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
+
+ /* Change all values in cache from defaults */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
+
+ /* Drop all registers */
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
+
+ /* Mark dirty and cache sync should not write anything. */
+ regcache_mark_dirty(map);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+ for (i = 0; i <= config.max_register; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[i]);
+}
+
+static void cache_drop_all_and_sync_no_defaults(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int rval[BLOCK_TEST_SIZE];
+ int i;
+
+ config = test_regmap_config;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ /* Ensure the data is read from the cache */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->read[param->from_reg + i] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
+ BLOCK_TEST_SIZE));
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
+
+ /* Change all values in cache */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
+
+ /* Drop all registers */
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
+
+ /*
+ * Sync cache without marking it dirty. All registers were dropped
+ * so the cache should not have any entries to write out.
+ */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+ for (i = 0; i <= config.max_register; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[i]);
+}
+
+static void cache_drop_all_and_sync_has_defaults(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int rval[BLOCK_TEST_SIZE];
+ int i;
+
+ config = test_regmap_config;
+ config.num_reg_defaults = BLOCK_TEST_SIZE;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ /* Ensure the data is read from the cache */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->read[param->from_reg + i] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
+ BLOCK_TEST_SIZE));
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
+
+ /* Change all values in cache from defaults */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
+
+ /* Drop all registers */
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
+
+ /*
+ * Sync cache without marking it dirty. All registers were dropped
+ * so the cache should not have any entries to write out.
+ */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+ for (i = 0; i <= config.max_register; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[i]);
}
static void cache_present(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -871,39 +1374,35 @@ static void cache_present(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- data->read[i] = false;
+ data->read[param->from_reg + i] = false;
/* No defaults so no registers cached. */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, i));
+ KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i));
/* We didn't trigger any reads */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_ASSERT_FALSE(test, data->read[i]);
+ KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]);
/* Fill the cache */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
/* Now everything should be cached */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, i));
-
- regmap_exit(map);
+ KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, param->from_reg + i));
}
/* Check that caching the window register works with sync */
static void cache_range_window_reg(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -911,13 +1410,12 @@ static void cache_range_window_reg(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.volatile_reg = test_range_window_volatile;
config.ranges = &test_range;
config.num_ranges = 1;
config.max_register = test_range.range_max;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -953,41 +1451,29 @@ static void cache_range_window_reg(struct kunit *test)
KUNIT_ASSERT_EQ(test, val, 2);
}
-struct raw_test_types {
- const char *name;
-
- enum regcache_type cache_type;
- enum regmap_endian val_endian;
-};
-
-static void raw_to_desc(const struct raw_test_types *t, char *desc)
-{
- strcpy(desc, t->name);
-}
-
-static const struct raw_test_types raw_types_list[] = {
- { "none-little", REGCACHE_NONE, REGMAP_ENDIAN_LITTLE },
- { "none-big", REGCACHE_NONE, REGMAP_ENDIAN_BIG },
- { "flat-little", REGCACHE_FLAT, REGMAP_ENDIAN_LITTLE },
- { "flat-big", REGCACHE_FLAT, REGMAP_ENDIAN_BIG },
- { "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
- { "rbtree-big", REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
- { "maple-little", REGCACHE_MAPLE, REGMAP_ENDIAN_LITTLE },
- { "maple-big", REGCACHE_MAPLE, REGMAP_ENDIAN_BIG },
+static const struct regmap_test_param raw_types_list[] = {
+ { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG },
};
-KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, raw_to_desc);
+KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, param_to_desc);
-static const struct raw_test_types raw_cache_types_list[] = {
- { "flat-little", REGCACHE_FLAT, REGMAP_ENDIAN_LITTLE },
- { "flat-big", REGCACHE_FLAT, REGMAP_ENDIAN_BIG },
- { "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
- { "rbtree-big", REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
- { "maple-little", REGCACHE_MAPLE, REGMAP_ENDIAN_LITTLE },
- { "maple-big", REGCACHE_MAPLE, REGMAP_ENDIAN_BIG },
+static const struct regmap_test_param raw_cache_types_list[] = {
+ { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG },
};
-KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, raw_to_desc);
+KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, param_to_desc);
static const struct regmap_config raw_regmap_config = {
.max_register = BLOCK_TEST_SIZE,
@@ -997,18 +1483,20 @@ static const struct regmap_config raw_regmap_config = {
.val_bits = 16,
};
-static struct regmap *gen_raw_regmap(struct regmap_config *config,
- struct raw_test_types *test_type,
+static struct regmap *gen_raw_regmap(struct kunit *test,
+ struct regmap_config *config,
struct regmap_ram_data **data)
{
+ struct regmap_test_priv *priv = test->priv;
+ const struct regmap_test_param *param = test->param_value;
u16 *buf;
struct regmap *ret;
size_t size = (config->max_register + 1) * config->reg_bits / 8;
int i;
struct reg_default *defaults;
- config->cache_type = test_type->cache_type;
- config->val_format_endian = test_type->val_endian;
+ config->cache_type = param->cache;
+ config->val_format_endian = param->val_endian;
config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
config->cache_type == REGCACHE_MAPLE;
@@ -1033,7 +1521,7 @@ static struct regmap *gen_raw_regmap(struct regmap_config *config,
for (i = 0; i < config->num_reg_defaults; i++) {
defaults[i].reg = i;
- switch (test_type->val_endian) {
+ switch (param->val_endian) {
case REGMAP_ENDIAN_LITTLE:
defaults[i].def = le16_to_cpu(buf[i]);
break;
@@ -1052,10 +1540,12 @@ static struct regmap *gen_raw_regmap(struct regmap_config *config,
if (config->cache_type == REGCACHE_NONE)
config->num_reg_defaults = 0;
- ret = regmap_init_raw_ram(config, *data);
+ ret = regmap_init_raw_ram(priv->dev, config, *data);
if (IS_ERR(ret)) {
kfree(buf);
kfree(*data);
+ } else {
+ kunit_add_action(test, regmap_exit_action, ret);
}
return ret;
@@ -1063,7 +1553,6 @@ static struct regmap *gen_raw_regmap(struct regmap_config *config,
static void raw_read_defaults_single(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1072,7 +1561,7 @@ static void raw_read_defaults_single(struct kunit *test)
config = raw_regmap_config;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -1082,13 +1571,10 @@ static void raw_read_defaults_single(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
}
-
- regmap_exit(map);
}
static void raw_read_defaults(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1099,35 +1585,31 @@ static void raw_read_defaults(struct kunit *test)
config = raw_regmap_config;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
val_len = sizeof(*rval) * (config.max_register + 1);
- rval = kmalloc(val_len, GFP_KERNEL);
+ rval = kunit_kmalloc(test, val_len, GFP_KERNEL);
KUNIT_ASSERT_TRUE(test, rval != NULL);
if (!rval)
return;
-
+
/* Check that we can read the defaults via the API */
KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len));
for (i = 0; i < config.max_register + 1; i++) {
def = config.reg_defaults[i].def;
if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
- KUNIT_EXPECT_EQ(test, def, be16_to_cpu(rval[i]));
+ KUNIT_EXPECT_EQ(test, def, be16_to_cpu((__force __be16)rval[i]));
} else {
- KUNIT_EXPECT_EQ(test, def, le16_to_cpu(rval[i]));
+ KUNIT_EXPECT_EQ(test, def, le16_to_cpu((__force __le16)rval[i]));
}
}
-
- kfree(rval);
- regmap_exit(map);
}
static void raw_write_read_single(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1136,7 +1618,7 @@ static void raw_write_read_single(struct kunit *test)
config = raw_regmap_config;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -1147,13 +1629,10 @@ static void raw_write_read_single(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
KUNIT_EXPECT_EQ(test, val, rval);
-
- regmap_exit(map);
}
static void raw_write(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1164,7 +1643,7 @@ static void raw_write(struct kunit *test)
config = raw_regmap_config;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -1185,10 +1664,10 @@ static void raw_write(struct kunit *test)
case 3:
if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
KUNIT_EXPECT_EQ(test, rval,
- be16_to_cpu(val[i % 2]));
+ be16_to_cpu((__force __be16)val[i % 2]));
} else {
KUNIT_EXPECT_EQ(test, rval,
- le16_to_cpu(val[i % 2]));
+ le16_to_cpu((__force __le16)val[i % 2]));
}
break;
default:
@@ -1199,8 +1678,6 @@ static void raw_write(struct kunit *test)
/* The values should appear in the "hardware" */
KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
-
- regmap_exit(map);
}
static bool reg_zero(struct device *dev, unsigned int reg)
@@ -1215,7 +1692,6 @@ static bool ram_reg_zero(struct regmap_ram_data *data, unsigned int reg)
static void raw_noinc_write(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1228,7 +1704,7 @@ static void raw_noinc_write(struct kunit *test)
config.writeable_noinc_reg = reg_zero;
config.readable_noinc_reg = reg_zero;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -1259,13 +1735,10 @@ static void raw_noinc_write(struct kunit *test)
/* Make sure we didn't touch the register after the noinc register */
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
KUNIT_ASSERT_EQ(test, val_test, val);
-
- regmap_exit(map);
}
static void raw_sync(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1276,7 +1749,7 @@ static void raw_sync(struct kunit *test)
config = raw_regmap_config;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -1300,10 +1773,10 @@ static void raw_sync(struct kunit *test)
case 3:
if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
KUNIT_EXPECT_EQ(test, rval,
- be16_to_cpu(val[i - 2]));
+ be16_to_cpu((__force __be16)val[i - 2]));
} else {
KUNIT_EXPECT_EQ(test, rval,
- le16_to_cpu(val[i - 2]));
+ le16_to_cpu((__force __le16)val[i - 2]));
}
break;
case 4:
@@ -1323,7 +1796,7 @@ static void raw_sync(struct kunit *test)
val[2] = cpu_to_be16(val[2]);
else
val[2] = cpu_to_le16(val[2]);
-
+
/* The values should not appear in the "hardware" */
KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val));
@@ -1337,13 +1810,10 @@ static void raw_sync(struct kunit *test)
/* The values should now appear in the "hardware" */
KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val));
-
- regmap_exit(map);
}
static void raw_ranges(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1356,7 +1826,7 @@ static void raw_ranges(struct kunit *test)
config.num_ranges = 1;
config.max_register = test_range.range_max;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -1402,12 +1872,12 @@ static void raw_ranges(struct kunit *test)
KUNIT_EXPECT_FALSE(test, data->read[i]);
KUNIT_EXPECT_FALSE(test, data->written[i]);
}
-
- regmap_exit(map);
}
static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
+ KUNIT_CASE_PARAM(read_bypassed, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(read_bypassed_volatile, real_cache_types_gen_params),
KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params),
@@ -1419,13 +1889,19 @@ static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params),
KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params),
KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params),
- KUNIT_CASE_PARAM(cache_sync, real_cache_types_gen_params),
- KUNIT_CASE_PARAM(cache_sync_defaults, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_sync_marked_dirty, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_sync_after_cache_only, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_sync_defaults_marked_dirty, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_sync_default_after_cache_only, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_drop_with_non_contiguous_ranges, sparse_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_drop_all_and_sync_marked_dirty, sparse_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_drop_all_and_sync_no_defaults, sparse_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_drop_all_and_sync_has_defaults, sparse_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
- KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_only_gen_params),
KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
@@ -1437,8 +1913,40 @@ static struct kunit_case regmap_test_cases[] = {
{}
};
+static int regmap_test_init(struct kunit *test)
+{
+ struct regmap_test_priv *priv;
+ struct device *dev;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ test->priv = priv;
+
+ dev = kunit_device_register(test, "regmap_test");
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ priv->dev = get_device(dev);
+ dev_set_drvdata(dev, test);
+
+ return 0;
+}
+
+static void regmap_test_exit(struct kunit *test)
+{
+ struct regmap_test_priv *priv = test->priv;
+
+ /* Destroy the dummy struct device */
+ if (priv && priv->dev)
+ put_device(priv->dev);
+}
+
static struct kunit_suite regmap_test_suite = {
.name = "regmap",
+ .init = regmap_test_init,
+ .exit = regmap_test_exit,
.test_cases = regmap_test_cases,
};
kunit_test_suite(regmap_test_suite);
diff --git a/drivers/base/regmap/regmap-mdio.c b/drivers/base/regmap/regmap-mdio.c
index 6aa6a2409478..9573bf3b52f4 100644
--- a/drivers/base/regmap/regmap-mdio.c
+++ b/drivers/base/regmap/regmap-mdio.c
@@ -117,5 +117,5 @@ struct regmap *__devm_regmap_init_mdio(struct mdio_device *mdio_dev,
EXPORT_SYMBOL_GPL(__devm_regmap_init_mdio);
MODULE_AUTHOR("Sander Vanheule <sander@svanheule.net>");
-MODULE_DESCRIPTION("Regmap MDIO Module");
+MODULE_DESCRIPTION("regmap MDIO Module");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-ram.c b/drivers/base/regmap/regmap-ram.c
index 192d6b131dff..5b4cbf982a11 100644
--- a/drivers/base/regmap/regmap-ram.c
+++ b/drivers/base/regmap/regmap-ram.c
@@ -53,7 +53,8 @@ static const struct regmap_bus regmap_ram = {
.free_context = regmap_ram_free_context,
};
-struct regmap *__regmap_init_ram(const struct regmap_config *config,
+struct regmap *__regmap_init_ram(struct device *dev,
+ const struct regmap_config *config,
struct regmap_ram_data *data,
struct lock_class_key *lock_key,
const char *lock_name)
@@ -75,7 +76,7 @@ struct regmap *__regmap_init_ram(const struct regmap_config *config,
if (!data->written)
return ERR_PTR(-ENOMEM);
- map = __regmap_init(NULL, &regmap_ram, data, config,
+ map = __regmap_init(dev, &regmap_ram, data, config,
lock_key, lock_name);
return map;
diff --git a/drivers/base/regmap/regmap-raw-ram.c b/drivers/base/regmap/regmap-raw-ram.c
index 93ae07b503fd..69eabfb89eda 100644
--- a/drivers/base/regmap/regmap-raw-ram.c
+++ b/drivers/base/regmap/regmap-raw-ram.c
@@ -107,7 +107,8 @@ static const struct regmap_bus regmap_raw_ram = {
.free_context = regmap_raw_ram_free_context,
};
-struct regmap *__regmap_init_raw_ram(const struct regmap_config *config,
+struct regmap *__regmap_init_raw_ram(struct device *dev,
+ const struct regmap_config *config,
struct regmap_ram_data *data,
struct lock_class_key *lock_key,
const char *lock_name)
@@ -134,7 +135,7 @@ struct regmap *__regmap_init_raw_ram(const struct regmap_config *config,
data->reg_endian = config->reg_format_endian;
- map = __regmap_init(NULL, &regmap_raw_ram, data, config,
+ map = __regmap_init(dev, &regmap_raw_ram, data, config,
lock_key, lock_name);
return map;
diff --git a/drivers/base/regmap/regmap-sdw-mbq.c b/drivers/base/regmap/regmap-sdw-mbq.c
index 388c3a087bd9..c99eada83780 100644
--- a/drivers/base/regmap/regmap-sdw-mbq.c
+++ b/drivers/base/regmap/regmap-sdw-mbq.c
@@ -97,5 +97,5 @@ struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw_mbq);
-MODULE_DESCRIPTION("Regmap SoundWire MBQ Module");
+MODULE_DESCRIPTION("regmap SoundWire MBQ Module");
MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c
index 159c0b740b00..ea631ac7c7ec 100644
--- a/drivers/base/regmap/regmap-sdw.c
+++ b/drivers/base/regmap/regmap-sdw.c
@@ -98,5 +98,5 @@ struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw);
-MODULE_DESCRIPTION("Regmap SoundWire Module");
+MODULE_DESCRIPTION("regmap SoundWire Module");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 37ab23a9d034..094cf2a2ca3c 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -165,4 +165,5 @@ struct regmap *__devm_regmap_init_spi(struct spi_device *spi,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_spi);
+MODULE_DESCRIPTION("regmap SPI Module");
MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 5cb425f6f02d..0a34dd3c4f38 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -2839,6 +2839,43 @@ int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
EXPORT_SYMBOL_GPL(regmap_read);
/**
+ * regmap_read_bypassed() - Read a value from a single register direct
+ * from the device, bypassing the cache
+ *
+ * @map: Register map to read from
+ * @reg: Register to be read from
+ * @val: Pointer to store read value
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_read_bypassed(struct regmap *map, unsigned int reg, unsigned int *val)
+{
+ int ret;
+ bool bypass, cache_only;
+
+ if (!IS_ALIGNED(reg, map->reg_stride))
+ return -EINVAL;
+
+ map->lock(map->lock_arg);
+
+ bypass = map->cache_bypass;
+ cache_only = map->cache_only;
+ map->cache_bypass = true;
+ map->cache_only = false;
+
+ ret = _regmap_read(map, reg, val);
+
+ map->cache_bypass = bypass;
+ map->cache_only = cache_only;
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_read_bypassed);
+
+/**
* regmap_raw_read() - Read raw data from the device
*
* @map: Register map to read from
diff --git a/drivers/bcma/host_soc.c b/drivers/bcma/host_soc.c
index 90d5bdc12e03..8ae0b918e740 100644
--- a/drivers/bcma/host_soc.c
+++ b/drivers/bcma/host_soc.c
@@ -240,15 +240,13 @@ err_unmap_mmio:
return err;
}
-static int bcma_host_soc_remove(struct platform_device *pdev)
+static void bcma_host_soc_remove(struct platform_device *pdev)
{
struct bcma_bus *bus = platform_get_drvdata(pdev);
bcma_bus_unregister(bus);
iounmap(bus->mmio);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static const struct of_device_id bcma_host_soc_of_match[] = {
@@ -263,7 +261,7 @@ static struct platform_driver bcma_host_soc_driver = {
.of_match_table = bcma_host_soc_of_match,
},
.probe = bcma_host_soc_probe,
- .remove = bcma_host_soc_remove,
+ .remove_new = bcma_host_soc_remove,
};
int __init bcma_host_soc_register_driver(void)
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index e322cef6596b..b900fe9e0030 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -29,10 +29,7 @@
/*
* Each block ramdisk device has a xarray brd_pages of pages that stores
- * the pages containing the block device's contents. A brd page's ->index is
- * its offset in PAGE_SIZE units. This is similar to, but in no way connected
- * with, the kernel's pagecache or buffer cache (which sit above our block
- * device).
+ * the pages containing the block device's contents.
*/
struct brd_device {
int brd_number;
@@ -51,15 +48,7 @@ struct brd_device {
*/
static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
{
- pgoff_t idx;
- struct page *page;
-
- idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */
- page = xa_load(&brd->brd_pages, idx);
-
- BUG_ON(page && page->index != idx);
-
- return page;
+ return xa_load(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT);
}
/*
@@ -67,8 +56,8 @@ static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
*/
static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp)
{
- pgoff_t idx;
- struct page *page, *cur;
+ pgoff_t idx = sector >> PAGE_SECTORS_SHIFT;
+ struct page *page;
int ret = 0;
page = brd_lookup_page(brd, sector);
@@ -80,23 +69,16 @@ static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp)
return -ENOMEM;
xa_lock(&brd->brd_pages);
-
- idx = sector >> PAGE_SECTORS_SHIFT;
- page->index = idx;
-
- cur = __xa_cmpxchg(&brd->brd_pages, idx, NULL, page, gfp);
-
- if (unlikely(cur)) {
- __free_page(page);
- ret = xa_err(cur);
- if (!ret && (cur->index != idx))
- ret = -EIO;
- } else {
+ ret = __xa_insert(&brd->brd_pages, idx, page, gfp);
+ if (!ret)
brd->brd_nr_pages++;
- }
-
xa_unlock(&brd->brd_pages);
+ if (ret < 0) {
+ __free_page(page);
+ if (ret == -EBUSY)
+ ret = 0;
+ }
return ret;
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 28a95fd366fe..93780f41646b 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -445,9 +445,9 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
if (rw == ITER_SOURCE)
- ret = call_write_iter(file, &cmd->iocb, &iter);
+ ret = file->f_op->write_iter(&cmd->iocb, &iter);
else
- ret = call_read_iter(file, &cmd->iocb, &iter);
+ ret = file->f_op->read_iter(&cmd->iocb, &iter);
lo_rw_aio_do_completion(cmd);
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 71c39bcd872c..4005a8b685e8 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -225,6 +225,10 @@ static unsigned long g_cache_size;
module_param_named(cache_size, g_cache_size, ulong, 0444);
MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)");
+static bool g_fua = true;
+module_param_named(fua, g_fua, bool, 0444);
+MODULE_PARM_DESC(zoned, "Enable/disable FUA support when cache_size is used. Default: true");
+
static unsigned int g_mbps;
module_param_named(mbps, g_mbps, uint, 0444);
MODULE_PARM_DESC(mbps, "Limit maximum bandwidth (in MiB/s). Default: 0 (no limit)");
@@ -253,6 +257,11 @@ static unsigned int g_zone_max_active;
module_param_named(zone_max_active, g_zone_max_active, uint, 0444);
MODULE_PARM_DESC(zone_max_active, "Maximum number of active zones when block device is zoned. Default: 0 (no limit)");
+static int g_zone_append_max_sectors = INT_MAX;
+module_param_named(zone_append_max_sectors, g_zone_append_max_sectors, int, 0444);
+MODULE_PARM_DESC(zone_append_max_sectors,
+ "Maximum size of a zone append command (in 512B sectors). Specify 0 for zone append emulation");
+
static struct nullb_device *null_alloc_dev(void);
static void null_free_dev(struct nullb_device *dev);
static void null_del_dev(struct nullb *nullb);
@@ -436,10 +445,12 @@ NULLB_DEVICE_ATTR(zone_capacity, ulong, NULL);
NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);
NULLB_DEVICE_ATTR(zone_max_open, uint, NULL);
NULLB_DEVICE_ATTR(zone_max_active, uint, NULL);
+NULLB_DEVICE_ATTR(zone_append_max_sectors, uint, NULL);
NULLB_DEVICE_ATTR(virt_boundary, bool, NULL);
NULLB_DEVICE_ATTR(no_sched, bool, NULL);
NULLB_DEVICE_ATTR(shared_tags, bool, NULL);
NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL);
+NULLB_DEVICE_ATTR(fua, bool, NULL);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{
@@ -580,12 +591,14 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_zone_nr_conv,
&nullb_device_attr_zone_max_open,
&nullb_device_attr_zone_max_active,
+ &nullb_device_attr_zone_append_max_sectors,
&nullb_device_attr_zone_readonly,
&nullb_device_attr_zone_offline,
&nullb_device_attr_virt_boundary,
&nullb_device_attr_no_sched,
&nullb_device_attr_shared_tags,
&nullb_device_attr_shared_tag_bitmap,
+ &nullb_device_attr_fua,
NULL,
};
@@ -664,14 +677,14 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
static ssize_t memb_group_features_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE,
- "badblocks,blocking,blocksize,cache_size,"
+ "badblocks,blocking,blocksize,cache_size,fua,"
"completion_nsec,discard,home_node,hw_queue_depth,"
"irqmode,max_sectors,mbps,memory_backed,no_sched,"
"poll_queues,power,queue_mode,shared_tag_bitmap,"
"shared_tags,size,submit_queues,use_per_node_hctx,"
"virt_boundary,zoned,zone_capacity,zone_max_active,"
"zone_max_open,zone_nr_conv,zone_offline,zone_readonly,"
- "zone_size\n");
+ "zone_size,zone_append_max_sectors\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -751,10 +764,13 @@ static struct nullb_device *null_alloc_dev(void)
dev->zone_nr_conv = g_zone_nr_conv;
dev->zone_max_open = g_zone_max_open;
dev->zone_max_active = g_zone_max_active;
+ dev->zone_append_max_sectors = g_zone_append_max_sectors;
dev->virt_boundary = g_virt_boundary;
dev->no_sched = g_no_sched;
dev->shared_tags = g_shared_tags;
dev->shared_tag_bitmap = g_shared_tag_bitmap;
+ dev->fua = g_fua;
+
return dev;
}
@@ -1151,7 +1167,7 @@ blk_status_t null_handle_discard(struct nullb_device *dev,
return BLK_STS_OK;
}
-static int null_handle_flush(struct nullb *nullb)
+static blk_status_t null_handle_flush(struct nullb *nullb)
{
int err;
@@ -1168,7 +1184,7 @@ static int null_handle_flush(struct nullb *nullb)
WARN_ON(!radix_tree_empty(&nullb->dev->cache));
spin_unlock_irq(&nullb->lock);
- return err;
+ return errno_to_blk_status(err);
}
static int null_transfer(struct nullb *nullb, struct page *page,
@@ -1206,7 +1222,7 @@ static int null_handle_rq(struct nullb_cmd *cmd)
{
struct request *rq = blk_mq_rq_from_pdu(cmd);
struct nullb *nullb = cmd->nq->dev->nullb;
- int err;
+ int err = 0;
unsigned int len;
sector_t sector = blk_rq_pos(rq);
struct req_iterator iter;
@@ -1218,15 +1234,13 @@ static int null_handle_rq(struct nullb_cmd *cmd)
err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
op_is_write(req_op(rq)), sector,
rq->cmd_flags & REQ_FUA);
- if (err) {
- spin_unlock_irq(&nullb->lock);
- return err;
- }
+ if (err)
+ break;
sector += len >> SECTOR_SHIFT;
}
spin_unlock_irq(&nullb->lock);
- return 0;
+ return errno_to_blk_status(err);
}
static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
@@ -1273,8 +1287,8 @@ static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
if (op == REQ_OP_DISCARD)
return null_handle_discard(dev, sector, nr_sectors);
- return errno_to_blk_status(null_handle_rq(cmd));
+ return null_handle_rq(cmd);
}
static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd)
@@ -1343,7 +1357,7 @@ static void null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
blk_status_t sts;
if (op == REQ_OP_FLUSH) {
- cmd->error = errno_to_blk_status(null_handle_flush(nullb));
+ cmd->error = null_handle_flush(nullb);
goto out;
}
@@ -1912,7 +1926,7 @@ static int null_add_dev(struct nullb_device *dev)
if (dev->cache_size > 0) {
set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
- blk_queue_write_cache(nullb->q, true, true);
+ blk_queue_write_cache(nullb->q, true, dev->fua);
}
nullb->q->queuedata = nullb;
@@ -1965,10 +1979,10 @@ static int null_add_dev(struct nullb_device *dev)
out_ida_free:
ida_free(&nullb_indexes, nullb->index);
-out_cleanup_zone:
- null_free_zoned_dev(dev);
out_cleanup_disk:
put_disk(nullb->disk);
+out_cleanup_zone:
+ null_free_zoned_dev(dev);
out_cleanup_tags:
if (nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
@@ -2113,10 +2127,13 @@ static void __exit null_exit(void)
if (tag_set.ops)
blk_mq_free_tag_set(&tag_set);
+
+ mutex_destroy(&lock);
}
module_init(null_init);
module_exit(null_exit);
MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
+MODULE_DESCRIPTION("multi queue aware block test driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index 477b97746823..3234e6c85eed 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -82,6 +82,7 @@ struct nullb_device {
unsigned int zone_nr_conv; /* number of conventional zones */
unsigned int zone_max_open; /* max number of open zones */
unsigned int zone_max_active; /* max number of active zones */
+ unsigned int zone_append_max_sectors; /* Max sectors per zone append command */
unsigned int submit_queues; /* number of submission queues */
unsigned int prev_submit_queues; /* number of submission queues before change */
unsigned int poll_queues; /* number of IOPOLL submission queues */
@@ -104,6 +105,7 @@ struct nullb_device {
bool no_sched; /* no IO scheduler for the device */
bool shared_tags; /* share tag set between devices for blk-mq */
bool shared_tag_bitmap; /* use hostwide shared tags */
+ bool fua; /* Support FUA */
};
struct nullb {
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index 1689e2584104..5b5a63adacc1 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -9,6 +9,8 @@
#undef pr_fmt
#define pr_fmt(fmt) "null_blk: " fmt
+#define NULL_ZONE_INVALID_WP ((sector_t)-1)
+
static inline sector_t mb_to_sects(unsigned long mb)
{
return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
@@ -19,18 +21,6 @@ static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
return sect >> ilog2(dev->zone_size_sects);
}
-static inline void null_lock_zone_res(struct nullb_device *dev)
-{
- if (dev->need_zone_res_mgmt)
- spin_lock_irq(&dev->zone_res_lock);
-}
-
-static inline void null_unlock_zone_res(struct nullb_device *dev)
-{
- if (dev->need_zone_res_mgmt)
- spin_unlock_irq(&dev->zone_res_lock);
-}
-
static inline void null_init_zone_lock(struct nullb_device *dev,
struct nullb_zone *zone)
{
@@ -103,6 +93,11 @@ int null_init_zoned_dev(struct nullb_device *dev,
dev->zone_nr_conv);
}
+ dev->zone_append_max_sectors =
+ min(ALIGN_DOWN(dev->zone_append_max_sectors,
+ dev->blocksize >> SECTOR_SHIFT),
+ zone_capacity_sects);
+
/* Max active zones has to be < nbr of seq zones in order to be enforceable */
if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) {
dev->zone_max_active = 0;
@@ -154,7 +149,7 @@ int null_init_zoned_dev(struct nullb_device *dev,
lim->zoned = true;
lim->chunk_sectors = dev->zone_size_sects;
- lim->max_zone_append_sectors = dev->zone_size_sects;
+ lim->max_zone_append_sectors = dev->zone_append_max_sectors;
lim->max_open_zones = dev->zone_max_open;
lim->max_active_zones = dev->zone_max_active;
return 0;
@@ -163,11 +158,16 @@ int null_init_zoned_dev(struct nullb_device *dev,
int null_register_zoned_dev(struct nullb *nullb)
{
struct request_queue *q = nullb->q;
+ struct gendisk *disk = nullb->disk;
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
- blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
- nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
- return blk_revalidate_disk_zones(nullb->disk, NULL);
+ disk->nr_zones = bdev_nr_zones(disk->part0);
+
+ pr_info("%s: using %s zone append\n",
+ disk->disk_name,
+ queue_emulates_zone_append(q) ? "emulated" : "native");
+
+ return blk_revalidate_disk_zones(disk);
}
void null_free_zoned_dev(struct nullb_device *dev)
@@ -241,35 +241,6 @@ size_t null_zone_valid_read_len(struct nullb *nullb,
return (zone->wp - sector) << SECTOR_SHIFT;
}
-static blk_status_t __null_close_zone(struct nullb_device *dev,
- struct nullb_zone *zone)
-{
- switch (zone->cond) {
- case BLK_ZONE_COND_CLOSED:
- /* close operation on closed is not an error */
- return BLK_STS_OK;
- case BLK_ZONE_COND_IMP_OPEN:
- dev->nr_zones_imp_open--;
- break;
- case BLK_ZONE_COND_EXP_OPEN:
- dev->nr_zones_exp_open--;
- break;
- case BLK_ZONE_COND_EMPTY:
- case BLK_ZONE_COND_FULL:
- default:
- return BLK_STS_IOERR;
- }
-
- if (zone->wp == zone->start) {
- zone->cond = BLK_ZONE_COND_EMPTY;
- } else {
- zone->cond = BLK_ZONE_COND_CLOSED;
- dev->nr_zones_closed++;
- }
-
- return BLK_STS_OK;
-}
-
static void null_close_imp_open_zone(struct nullb_device *dev)
{
struct nullb_zone *zone;
@@ -286,7 +257,13 @@ static void null_close_imp_open_zone(struct nullb_device *dev)
zno = dev->zone_nr_conv;
if (zone->cond == BLK_ZONE_COND_IMP_OPEN) {
- __null_close_zone(dev, zone);
+ dev->nr_zones_imp_open--;
+ if (zone->wp == zone->start) {
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ } else {
+ zone->cond = BLK_ZONE_COND_CLOSED;
+ dev->nr_zones_closed++;
+ }
dev->imp_close_zone_no = zno;
return;
}
@@ -374,73 +351,73 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
null_lock_zone(dev, zone);
- if (zone->cond == BLK_ZONE_COND_FULL ||
- zone->cond == BLK_ZONE_COND_READONLY ||
- zone->cond == BLK_ZONE_COND_OFFLINE) {
- /* Cannot write to the zone */
- ret = BLK_STS_IOERR;
- goto unlock;
- }
-
/*
- * Regular writes must be at the write pointer position.
- * Zone append writes are automatically issued at the write
- * pointer and the position returned using the request or BIO
- * sector.
+ * Regular writes must be at the write pointer position. Zone append
+ * writes are automatically issued at the write pointer and the position
+ * returned using the request sector. Note that we do not check the zone
+ * condition because for FULL, READONLY and OFFLINE zones, the sector
+ * check against the zone write pointer will always result in failing
+ * the command.
*/
if (append) {
+ if (WARN_ON_ONCE(!dev->zone_append_max_sectors) ||
+ zone->wp == NULL_ZONE_INVALID_WP) {
+ ret = BLK_STS_IOERR;
+ goto unlock_zone;
+ }
sector = zone->wp;
blk_mq_rq_from_pdu(cmd)->__sector = sector;
- } else if (sector != zone->wp) {
- ret = BLK_STS_IOERR;
- goto unlock;
}
- if (zone->wp + nr_sectors > zone->start + zone->capacity) {
+ if (sector != zone->wp ||
+ zone->wp + nr_sectors > zone->start + zone->capacity) {
ret = BLK_STS_IOERR;
- goto unlock;
+ goto unlock_zone;
}
if (zone->cond == BLK_ZONE_COND_CLOSED ||
zone->cond == BLK_ZONE_COND_EMPTY) {
- null_lock_zone_res(dev);
+ if (dev->need_zone_res_mgmt) {
+ spin_lock(&dev->zone_res_lock);
- ret = null_check_zone_resources(dev, zone);
- if (ret != BLK_STS_OK) {
- null_unlock_zone_res(dev);
- goto unlock;
- }
- if (zone->cond == BLK_ZONE_COND_CLOSED) {
- dev->nr_zones_closed--;
- dev->nr_zones_imp_open++;
- } else if (zone->cond == BLK_ZONE_COND_EMPTY) {
- dev->nr_zones_imp_open++;
- }
+ ret = null_check_zone_resources(dev, zone);
+ if (ret != BLK_STS_OK) {
+ spin_unlock(&dev->zone_res_lock);
+ goto unlock_zone;
+ }
+ if (zone->cond == BLK_ZONE_COND_CLOSED) {
+ dev->nr_zones_closed--;
+ dev->nr_zones_imp_open++;
+ } else if (zone->cond == BLK_ZONE_COND_EMPTY) {
+ dev->nr_zones_imp_open++;
+ }
- if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
- zone->cond = BLK_ZONE_COND_IMP_OPEN;
+ spin_unlock(&dev->zone_res_lock);
+ }
- null_unlock_zone_res(dev);
+ zone->cond = BLK_ZONE_COND_IMP_OPEN;
}
ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
if (ret != BLK_STS_OK)
- goto unlock;
+ goto unlock_zone;
zone->wp += nr_sectors;
if (zone->wp == zone->start + zone->capacity) {
- null_lock_zone_res(dev);
- if (zone->cond == BLK_ZONE_COND_EXP_OPEN)
- dev->nr_zones_exp_open--;
- else if (zone->cond == BLK_ZONE_COND_IMP_OPEN)
- dev->nr_zones_imp_open--;
+ if (dev->need_zone_res_mgmt) {
+ spin_lock(&dev->zone_res_lock);
+ if (zone->cond == BLK_ZONE_COND_EXP_OPEN)
+ dev->nr_zones_exp_open--;
+ else if (zone->cond == BLK_ZONE_COND_IMP_OPEN)
+ dev->nr_zones_imp_open--;
+ spin_unlock(&dev->zone_res_lock);
+ }
zone->cond = BLK_ZONE_COND_FULL;
- null_unlock_zone_res(dev);
}
ret = BLK_STS_OK;
-unlock:
+unlock_zone:
null_unlock_zone(dev, zone);
return ret;
@@ -454,54 +431,100 @@ static blk_status_t null_open_zone(struct nullb_device *dev,
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return BLK_STS_IOERR;
- null_lock_zone_res(dev);
-
switch (zone->cond) {
case BLK_ZONE_COND_EXP_OPEN:
- /* open operation on exp open is not an error */
- goto unlock;
+ /* Open operation on exp open is not an error */
+ return BLK_STS_OK;
case BLK_ZONE_COND_EMPTY:
- ret = null_check_zone_resources(dev, zone);
- if (ret != BLK_STS_OK)
- goto unlock;
- break;
case BLK_ZONE_COND_IMP_OPEN:
- dev->nr_zones_imp_open--;
- break;
case BLK_ZONE_COND_CLOSED:
- ret = null_check_zone_resources(dev, zone);
- if (ret != BLK_STS_OK)
- goto unlock;
- dev->nr_zones_closed--;
break;
case BLK_ZONE_COND_FULL:
default:
- ret = BLK_STS_IOERR;
- goto unlock;
+ return BLK_STS_IOERR;
}
- zone->cond = BLK_ZONE_COND_EXP_OPEN;
- dev->nr_zones_exp_open++;
+ if (dev->need_zone_res_mgmt) {
+ spin_lock(&dev->zone_res_lock);
-unlock:
- null_unlock_zone_res(dev);
+ switch (zone->cond) {
+ case BLK_ZONE_COND_EMPTY:
+ ret = null_check_zone_resources(dev, zone);
+ if (ret != BLK_STS_OK) {
+ spin_unlock(&dev->zone_res_lock);
+ return ret;
+ }
+ break;
+ case BLK_ZONE_COND_IMP_OPEN:
+ dev->nr_zones_imp_open--;
+ break;
+ case BLK_ZONE_COND_CLOSED:
+ ret = null_check_zone_resources(dev, zone);
+ if (ret != BLK_STS_OK) {
+ spin_unlock(&dev->zone_res_lock);
+ return ret;
+ }
+ dev->nr_zones_closed--;
+ break;
+ default:
+ break;
+ }
- return ret;
+ dev->nr_zones_exp_open++;
+
+ spin_unlock(&dev->zone_res_lock);
+ }
+
+ zone->cond = BLK_ZONE_COND_EXP_OPEN;
+
+ return BLK_STS_OK;
}
static blk_status_t null_close_zone(struct nullb_device *dev,
struct nullb_zone *zone)
{
- blk_status_t ret;
-
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return BLK_STS_IOERR;
- null_lock_zone_res(dev);
- ret = __null_close_zone(dev, zone);
- null_unlock_zone_res(dev);
+ switch (zone->cond) {
+ case BLK_ZONE_COND_CLOSED:
+ /* close operation on closed is not an error */
+ return BLK_STS_OK;
+ case BLK_ZONE_COND_IMP_OPEN:
+ case BLK_ZONE_COND_EXP_OPEN:
+ break;
+ case BLK_ZONE_COND_EMPTY:
+ case BLK_ZONE_COND_FULL:
+ default:
+ return BLK_STS_IOERR;
+ }
+
+ if (dev->need_zone_res_mgmt) {
+ spin_lock(&dev->zone_res_lock);
- return ret;
+ switch (zone->cond) {
+ case BLK_ZONE_COND_IMP_OPEN:
+ dev->nr_zones_imp_open--;
+ break;
+ case BLK_ZONE_COND_EXP_OPEN:
+ dev->nr_zones_exp_open--;
+ break;
+ default:
+ break;
+ }
+
+ if (zone->wp > zone->start)
+ dev->nr_zones_closed++;
+
+ spin_unlock(&dev->zone_res_lock);
+ }
+
+ if (zone->wp == zone->start)
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ else
+ zone->cond = BLK_ZONE_COND_CLOSED;
+
+ return BLK_STS_OK;
}
static blk_status_t null_finish_zone(struct nullb_device *dev,
@@ -512,41 +535,47 @@ static blk_status_t null_finish_zone(struct nullb_device *dev,
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return BLK_STS_IOERR;
- null_lock_zone_res(dev);
+ if (dev->need_zone_res_mgmt) {
+ spin_lock(&dev->zone_res_lock);
- switch (zone->cond) {
- case BLK_ZONE_COND_FULL:
- /* finish operation on full is not an error */
- goto unlock;
- case BLK_ZONE_COND_EMPTY:
- ret = null_check_zone_resources(dev, zone);
- if (ret != BLK_STS_OK)
- goto unlock;
- break;
- case BLK_ZONE_COND_IMP_OPEN:
- dev->nr_zones_imp_open--;
- break;
- case BLK_ZONE_COND_EXP_OPEN:
- dev->nr_zones_exp_open--;
- break;
- case BLK_ZONE_COND_CLOSED:
- ret = null_check_zone_resources(dev, zone);
- if (ret != BLK_STS_OK)
- goto unlock;
- dev->nr_zones_closed--;
- break;
- default:
- ret = BLK_STS_IOERR;
- goto unlock;
+ switch (zone->cond) {
+ case BLK_ZONE_COND_FULL:
+ /* Finish operation on full is not an error */
+ spin_unlock(&dev->zone_res_lock);
+ return BLK_STS_OK;
+ case BLK_ZONE_COND_EMPTY:
+ ret = null_check_zone_resources(dev, zone);
+ if (ret != BLK_STS_OK) {
+ spin_unlock(&dev->zone_res_lock);
+ return ret;
+ }
+ break;
+ case BLK_ZONE_COND_IMP_OPEN:
+ dev->nr_zones_imp_open--;
+ break;
+ case BLK_ZONE_COND_EXP_OPEN:
+ dev->nr_zones_exp_open--;
+ break;
+ case BLK_ZONE_COND_CLOSED:
+ ret = null_check_zone_resources(dev, zone);
+ if (ret != BLK_STS_OK) {
+ spin_unlock(&dev->zone_res_lock);
+ return ret;
+ }
+ dev->nr_zones_closed--;
+ break;
+ default:
+ spin_unlock(&dev->zone_res_lock);
+ return BLK_STS_IOERR;
+ }
+
+ spin_unlock(&dev->zone_res_lock);
}
zone->cond = BLK_ZONE_COND_FULL;
zone->wp = zone->start + zone->len;
-unlock:
- null_unlock_zone_res(dev);
-
- return ret;
+ return BLK_STS_OK;
}
static blk_status_t null_reset_zone(struct nullb_device *dev,
@@ -555,34 +584,33 @@ static blk_status_t null_reset_zone(struct nullb_device *dev,
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return BLK_STS_IOERR;
- null_lock_zone_res(dev);
+ if (dev->need_zone_res_mgmt) {
+ spin_lock(&dev->zone_res_lock);
- switch (zone->cond) {
- case BLK_ZONE_COND_EMPTY:
- /* reset operation on empty is not an error */
- null_unlock_zone_res(dev);
- return BLK_STS_OK;
- case BLK_ZONE_COND_IMP_OPEN:
- dev->nr_zones_imp_open--;
- break;
- case BLK_ZONE_COND_EXP_OPEN:
- dev->nr_zones_exp_open--;
- break;
- case BLK_ZONE_COND_CLOSED:
- dev->nr_zones_closed--;
- break;
- case BLK_ZONE_COND_FULL:
- break;
- default:
- null_unlock_zone_res(dev);
- return BLK_STS_IOERR;
+ switch (zone->cond) {
+ case BLK_ZONE_COND_IMP_OPEN:
+ dev->nr_zones_imp_open--;
+ break;
+ case BLK_ZONE_COND_EXP_OPEN:
+ dev->nr_zones_exp_open--;
+ break;
+ case BLK_ZONE_COND_CLOSED:
+ dev->nr_zones_closed--;
+ break;
+ case BLK_ZONE_COND_EMPTY:
+ case BLK_ZONE_COND_FULL:
+ break;
+ default:
+ spin_unlock(&dev->zone_res_lock);
+ return BLK_STS_IOERR;
+ }
+
+ spin_unlock(&dev->zone_res_lock);
}
zone->cond = BLK_ZONE_COND_EMPTY;
zone->wp = zone->start;
- null_unlock_zone_res(dev);
-
if (dev->memory_backed)
return null_handle_discard(dev, zone->start, zone->len);
@@ -711,7 +739,7 @@ static void null_set_zone_cond(struct nullb_device *dev,
zone->cond != BLK_ZONE_COND_OFFLINE)
null_finish_zone(dev, zone);
zone->cond = cond;
- zone->wp = (sector_t)-1;
+ zone->wp = NULL_ZONE_INVALID_WP;
}
null_unlock_zone(dev, zone);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 21728e9ea5c3..8a2ce8070010 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2215,6 +2215,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, bool write)
}
dev_info(ddev, "%lukB available on disc\n", lba << 1);
}
+ set_blocksize(bdev_file, CD_FRAMESIZE);
return 0;
@@ -2278,11 +2279,6 @@ static int pkt_open(struct gendisk *disk, blk_mode_t mode)
ret = pkt_open_dev(pd, mode & BLK_OPEN_WRITE);
if (ret)
goto out_dec;
- /*
- * needed here as well, since ext2 (among others) may change
- * the blocksize at mount time
- */
- set_blocksize(disk->part0, CD_FRAMESIZE);
}
mutex_unlock(&ctl_mutex);
mutex_unlock(&pktcdvd_mutex);
@@ -2526,7 +2522,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
__module_get(THIS_MODULE);
pd->bdev_file = bdev_file;
- set_blocksize(file_bdev(bdev_file), CD_FRAMESIZE);
atomic_set(&pd->cdrw.pending_bios, 0);
pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->disk->disk_name);
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index bea3d5cf8a83..176657dce3e3 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -221,7 +221,7 @@ static int ublk_get_nr_zones(const struct ublk_device *ub)
static int ublk_revalidate_disk_zones(struct ublk_device *ub)
{
- return blk_revalidate_disk_zones(ub->ub_disk, NULL);
+ return blk_revalidate_disk_zones(ub->ub_disk);
}
static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
@@ -249,8 +249,7 @@ static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
static void ublk_dev_param_zoned_apply(struct ublk_device *ub)
{
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ub->ub_disk->queue);
- blk_queue_required_elevator_features(ub->ub_disk->queue,
- ELEVATOR_F_ZBD_SEQ_WRITE);
+
ub->ub_disk->nr_zones = ublk_get_nr_zones(ub);
}
@@ -2177,7 +2176,8 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
.max_hw_sectors = p->max_sectors,
.chunk_sectors = p->chunk_sectors,
.virt_boundary_mask = p->virt_boundary_mask,
-
+ .max_segments = USHRT_MAX,
+ .max_segment_size = UINT_MAX,
};
struct gendisk *disk;
int ret = -EINVAL;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 42dea7601d87..c1af0a7d56c8 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -1543,7 +1543,7 @@ static int virtblk_probe(struct virtio_device *vdev)
*/
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && lim.zoned) {
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, vblk->disk->queue);
- err = blk_revalidate_disk_zones(vblk->disk, NULL);
+ err = blk_revalidate_disk_zones(vblk->disk);
if (err)
goto out_cleanup_disk;
}
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index f0639df6cd18..3acd7006ad2c 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -426,11 +426,10 @@ static void reset_bdev(struct zram *zram)
if (!zram->backing_dev)
return;
- fput(zram->bdev_file);
/* hope filp_close flush all of IO */
filp_close(zram->backing_dev, NULL);
zram->backing_dev = NULL;
- zram->bdev_file = NULL;
+ zram->bdev = NULL;
zram->disk->fops = &zram_devops;
kvfree(zram->bitmap);
zram->bitmap = NULL;
@@ -473,10 +472,8 @@ static ssize_t backing_dev_store(struct device *dev,
size_t sz;
struct file *backing_dev = NULL;
struct inode *inode;
- struct address_space *mapping;
unsigned int bitmap_sz;
unsigned long nr_pages, *bitmap = NULL;
- struct file *bdev_file = NULL;
int err;
struct zram *zram = dev_to_zram(dev);
@@ -497,15 +494,14 @@ static ssize_t backing_dev_store(struct device *dev,
if (sz > 0 && file_name[sz - 1] == '\n')
file_name[sz - 1] = 0x00;
- backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
+ backing_dev = filp_open(file_name, O_RDWR | O_LARGEFILE | O_EXCL, 0);
if (IS_ERR(backing_dev)) {
err = PTR_ERR(backing_dev);
backing_dev = NULL;
goto out;
}
- mapping = backing_dev->f_mapping;
- inode = mapping->host;
+ inode = backing_dev->f_mapping->host;
/* Support only block device in this moment */
if (!S_ISBLK(inode->i_mode)) {
@@ -513,14 +509,6 @@ static ssize_t backing_dev_store(struct device *dev,
goto out;
}
- bdev_file = bdev_file_open_by_dev(inode->i_rdev,
- BLK_OPEN_READ | BLK_OPEN_WRITE, zram, NULL);
- if (IS_ERR(bdev_file)) {
- err = PTR_ERR(bdev_file);
- bdev_file = NULL;
- goto out;
- }
-
nr_pages = i_size_read(inode) >> PAGE_SHIFT;
bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
@@ -531,7 +519,7 @@ static ssize_t backing_dev_store(struct device *dev,
reset_bdev(zram);
- zram->bdev_file = bdev_file;
+ zram->bdev = I_BDEV(inode);
zram->backing_dev = backing_dev;
zram->bitmap = bitmap;
zram->nr_pages = nr_pages;
@@ -544,9 +532,6 @@ static ssize_t backing_dev_store(struct device *dev,
out:
kvfree(bitmap);
- if (bdev_file)
- fput(bdev_file);
-
if (backing_dev)
filp_close(backing_dev, NULL);
@@ -587,7 +572,7 @@ static void read_from_bdev_async(struct zram *zram, struct page *page,
{
struct bio *bio;
- bio = bio_alloc(file_bdev(zram->bdev_file), 1, parent->bi_opf, GFP_NOIO);
+ bio = bio_alloc(zram->bdev, 1, parent->bi_opf, GFP_NOIO);
bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
__bio_add_page(bio, page, PAGE_SIZE, 0);
bio_chain(bio, parent);
@@ -703,7 +688,7 @@ static ssize_t writeback_store(struct device *dev,
continue;
}
- bio_init(&bio, file_bdev(zram->bdev_file), &bio_vec, 1,
+ bio_init(&bio, zram->bdev, &bio_vec, 1,
REQ_OP_WRITE | REQ_SYNC);
bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
__bio_add_page(&bio, page, PAGE_SIZE, 0);
@@ -785,7 +770,7 @@ static void zram_sync_read(struct work_struct *work)
struct bio_vec bv;
struct bio bio;
- bio_init(&bio, file_bdev(zw->zram->bdev_file), &bv, 1, REQ_OP_READ);
+ bio_init(&bio, zw->zram->bdev, &bv, 1, REQ_OP_READ);
bio.bi_iter.bi_sector = zw->entry * (PAGE_SIZE >> 9);
__bio_add_page(&bio, zw->page, PAGE_SIZE, 0);
zw->error = submit_bio_wait(&bio);
@@ -1568,7 +1553,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
* Corresponding ZRAM slot should be locked.
*/
static int zram_recompress(struct zram *zram, u32 index, struct page *page,
- u32 threshold, u32 prio, u32 prio_max)
+ u64 *num_recomp_pages, u32 threshold, u32 prio,
+ u32 prio_max)
{
struct zcomp_strm *zstrm = NULL;
unsigned long handle_old;
@@ -1645,6 +1631,15 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
if (!zstrm)
return 0;
+ /*
+ * Decrement the limit (if set) on pages we can recompress, even
+ * when current recompression was unsuccessful or did not compress
+ * the page below the threshold, because we still spent resources
+ * on it.
+ */
+ if (*num_recomp_pages)
+ *num_recomp_pages -= 1;
+
if (class_index_new >= class_index_old) {
/*
* Secondary algorithms failed to re-compress the page
@@ -1710,6 +1705,7 @@ static ssize_t recompress_store(struct device *dev,
struct zram *zram = dev_to_zram(dev);
unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
char *args, *param, *val, *algo = NULL;
+ u64 num_recomp_pages = ULLONG_MAX;
u32 mode = 0, threshold = 0;
unsigned long index;
struct page *page;
@@ -1732,6 +1728,17 @@ static ssize_t recompress_store(struct device *dev,
continue;
}
+ if (!strcmp(param, "max_pages")) {
+ /*
+ * Limit the number of entries (pages) we attempt to
+ * recompress.
+ */
+ ret = kstrtoull(val, 10, &num_recomp_pages);
+ if (ret)
+ return ret;
+ continue;
+ }
+
if (!strcmp(param, "threshold")) {
/*
* We will re-compress only idle objects equal or
@@ -1788,6 +1795,9 @@ static ssize_t recompress_store(struct device *dev,
for (index = 0; index < nr_pages; index++) {
int err = 0;
+ if (!num_recomp_pages)
+ break;
+
zram_slot_lock(zram, index);
if (!zram_allocated(zram, index))
@@ -1807,8 +1817,8 @@ static ssize_t recompress_store(struct device *dev,
zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
goto next;
- err = zram_recompress(zram, index, page, threshold,
- prio, prio_max);
+ err = zram_recompress(zram, index, page, &num_recomp_pages,
+ threshold, prio, prio_max);
next:
zram_slot_unlock(zram, index);
if (err) {
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 37bf29f34d26..35e322144629 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -132,7 +132,7 @@ struct zram {
spinlock_t wb_limit_lock;
bool wb_limit_enable;
u64 bd_wb_limit;
- struct file *bdev_file;
+ struct block_device *bdev;
unsigned long *bitmap;
unsigned long nr_pages;
#endif
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index bc211c324206..0b5f218ac505 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -478,5 +478,16 @@ config BT_NXPUART
Say Y here to compile support for NXP Bluetooth UART device into
the kernel, or say M here to compile as a module (btnxpuart).
+config BT_INTEL_PCIE
+ tristate "Intel HCI PCIe driver"
+ depends on PCI
+ select BT_INTEL
+ select FW_LOADER
+ help
+ Intel Bluetooth transport driver for PCIe.
+ This driver is required if you want to use Intel Bluetooth device
+ with PCIe interface.
+ Say Y here to compiler support for Intel Bluetooth PCIe device into
+ the kernel or say M to compile it as module (btintel_pcie)
endmenu
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 7a5967e9ac48..0730d6684d1a 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_BT_HCIBTUSB) += btusb.o
obj-$(CONFIG_BT_HCIBTSDIO) += btsdio.o
obj-$(CONFIG_BT_INTEL) += btintel.o
+obj-$(CONFIG_BT_INTEL_PCIE) += btintel_pcie.o btintel.o
obj-$(CONFIG_BT_ATH3K) += ath3k.o
obj-$(CONFIG_BT_MRVL) += btmrvl.o
obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 88262d3a9392..ce97b336fbfb 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -3,7 +3,6 @@
* Copyright (c) 2008-2009 Atheros Communications Inc.
*/
-
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -128,7 +127,6 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
* for AR3012
*/
static const struct usb_device_id ath3k_blist_tbl[] = {
-
/* Atheros AR3012 with sflash firmware*/
{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
@@ -202,7 +200,7 @@ static inline void ath3k_log_failed_loading(int err, int len, int size,
#define TIMEGAP_USEC_MAX 100
static int ath3k_load_firmware(struct usb_device *udev,
- const struct firmware *firmware)
+ const struct firmware *firmware)
{
u8 *send_buf;
int len = 0;
@@ -237,9 +235,9 @@ static int ath3k_load_firmware(struct usb_device *udev,
memcpy(send_buf, firmware->data + sent, size);
err = usb_bulk_msg(udev, pipe, send_buf, size,
- &len, 3000);
+ &len, 3000);
- if (err || (len != size)) {
+ if (err || len != size) {
ath3k_log_failed_loading(err, len, size, count);
goto error;
}
@@ -262,7 +260,7 @@ static int ath3k_get_state(struct usb_device *udev, unsigned char *state)
}
static int ath3k_get_version(struct usb_device *udev,
- struct ath3k_version *version)
+ struct ath3k_version *version)
{
return usb_control_msg_recv(udev, 0, ATH3K_GETVERSION,
USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
@@ -271,7 +269,7 @@ static int ath3k_get_version(struct usb_device *udev,
}
static int ath3k_load_fwfile(struct usb_device *udev,
- const struct firmware *firmware)
+ const struct firmware *firmware)
{
u8 *send_buf;
int len = 0;
@@ -310,8 +308,8 @@ static int ath3k_load_fwfile(struct usb_device *udev,
memcpy(send_buf, firmware->data + sent, size);
err = usb_bulk_msg(udev, pipe, send_buf, size,
- &len, 3000);
- if (err || (len != size)) {
+ &len, 3000);
+ if (err || len != size) {
ath3k_log_failed_loading(err, len, size, count);
kfree(send_buf);
return err;
@@ -425,7 +423,6 @@ static int ath3k_load_syscfg(struct usb_device *udev)
}
switch (fw_version.ref_clock) {
-
case ATH3K_XTAL_FREQ_26M:
clk_value = 26;
break;
@@ -441,7 +438,7 @@ static int ath3k_load_syscfg(struct usb_device *udev)
}
snprintf(filename, ATH3K_NAME_LEN, "ar3k/ramps_0x%08x_%d%s",
- le32_to_cpu(fw_version.rom_version), clk_value, ".dfu");
+ le32_to_cpu(fw_version.rom_version), clk_value, ".dfu");
ret = request_firmware(&firmware, filename, &udev->dev);
if (ret < 0) {
@@ -456,7 +453,7 @@ static int ath3k_load_syscfg(struct usb_device *udev)
}
static int ath3k_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+ const struct usb_device_id *id)
{
const struct firmware *firmware;
struct usb_device *udev = interface_to_usbdev(intf);
@@ -505,10 +502,10 @@ static int ath3k_probe(struct usb_interface *intf,
if (ret < 0) {
if (ret == -ENOENT)
BT_ERR("Firmware file \"%s\" not found",
- ATH3K_FIRMWARE);
+ ATH3K_FIRMWARE);
else
BT_ERR("Firmware file \"%s\" request failed (err=%d)",
- ATH3K_FIRMWARE, ret);
+ ATH3K_FIRMWARE, ret);
return ret;
}
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 6ba7f5d1b837..0c855c3ee1c1 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -245,7 +245,7 @@ static int btintel_set_diag_combined(struct hci_dev *hdev, bool enable)
return ret;
}
-static void btintel_hw_error(struct hci_dev *hdev, u8 code)
+void btintel_hw_error(struct hci_dev *hdev, u8 code)
{
struct sk_buff *skb;
u8 type = 0x00;
@@ -277,6 +277,7 @@ static void btintel_hw_error(struct hci_dev *hdev, u8 code)
kfree_skb(skb);
}
+EXPORT_SYMBOL_GPL(btintel_hw_error);
int btintel_version_info(struct hci_dev *hdev, struct intel_version *ver)
{
@@ -455,8 +456,8 @@ int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver)
}
EXPORT_SYMBOL_GPL(btintel_read_version);
-static int btintel_version_info_tlv(struct hci_dev *hdev,
- struct intel_version_tlv *version)
+int btintel_version_info_tlv(struct hci_dev *hdev,
+ struct intel_version_tlv *version)
{
const char *variant;
@@ -481,6 +482,7 @@ static int btintel_version_info_tlv(struct hci_dev *hdev,
case 0x19: /* Slr-F */
case 0x1b: /* Mgr */
case 0x1c: /* Gale Peak (GaP) */
+ case 0x1e: /* BlazarI (Bzr) */
break;
default:
bt_dev_err(hdev, "Unsupported Intel hardware variant (0x%x)",
@@ -489,7 +491,7 @@ static int btintel_version_info_tlv(struct hci_dev *hdev,
}
switch (version->img_type) {
- case 0x01:
+ case BTINTEL_IMG_BOOTLOADER:
variant = "Bootloader";
/* It is required that every single firmware fragment is acknowledged
* with a command complete event. If the boot parameters indicate
@@ -521,7 +523,10 @@ static int btintel_version_info_tlv(struct hci_dev *hdev,
version->min_fw_build_nn, version->min_fw_build_cw,
2000 + version->min_fw_build_yy);
break;
- case 0x03:
+ case BTINTEL_IMG_IML:
+ variant = "Intermediate loader";
+ break;
+ case BTINTEL_IMG_OP:
variant = "Firmware";
break;
default:
@@ -535,15 +540,16 @@ static int btintel_version_info_tlv(struct hci_dev *hdev,
bt_dev_info(hdev, "%s timestamp %u.%u buildtype %u build %u", variant,
2000 + (version->timestamp >> 8), version->timestamp & 0xff,
version->build_type, version->build_num);
- if (version->img_type == 0x03)
+ if (version->img_type == BTINTEL_IMG_OP)
bt_dev_info(hdev, "Firmware SHA1: 0x%8.8x", version->git_sha1);
return 0;
}
+EXPORT_SYMBOL_GPL(btintel_version_info_tlv);
-static int btintel_parse_version_tlv(struct hci_dev *hdev,
- struct intel_version_tlv *version,
- struct sk_buff *skb)
+int btintel_parse_version_tlv(struct hci_dev *hdev,
+ struct intel_version_tlv *version,
+ struct sk_buff *skb)
{
/* Consume Command Complete Status field */
skb_pull(skb, 1);
@@ -645,6 +651,7 @@ static int btintel_parse_version_tlv(struct hci_dev *hdev,
return 0;
}
+EXPORT_SYMBOL_GPL(btintel_parse_version_tlv);
static int btintel_read_version_tlv(struct hci_dev *hdev,
struct intel_version_tlv *version)
@@ -1172,7 +1179,7 @@ static int btintel_download_fw_tlv(struct hci_dev *hdev,
* If the firmware version has changed that means it needs to be reset
* to bootloader when operational so the new firmware can be loaded.
*/
- if (ver->img_type == 0x03)
+ if (ver->img_type == BTINTEL_IMG_OP)
return -EINVAL;
/* iBT hardware variants 0x0b, 0x0c, 0x11, 0x12, 0x13, 0x14 support
@@ -2194,10 +2201,26 @@ static void btintel_get_fw_name_tlv(const struct intel_version_tlv *ver,
char *fw_name, size_t len,
const char *suffix)
{
+ const char *format;
/* The firmware file name for new generation controllers will be
* ibt-<cnvi_top type+cnvi_top step>-<cnvr_top type+cnvr_top step>
*/
- snprintf(fw_name, len, "intel/ibt-%04x-%04x.%s",
+ switch (ver->cnvi_top & 0xfff) {
+ /* Only Blazar product supports downloading of intermediate loader
+ * image
+ */
+ case BTINTEL_CNVI_BLAZARI:
+ if (ver->img_type == BTINTEL_IMG_BOOTLOADER)
+ format = "intel/ibt-%04x-%04x-iml.%s";
+ else
+ format = "intel/ibt-%04x-%04x.%s";
+ break;
+ default:
+ format = "intel/ibt-%04x-%04x.%s";
+ break;
+ }
+
+ snprintf(fw_name, len, format,
INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvi_top),
INTEL_CNVX_TOP_STEP(ver->cnvi_top)),
INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvr_top),
@@ -2230,7 +2253,7 @@ static int btintel_prepare_fw_download_tlv(struct hci_dev *hdev,
* It is not possible to use the Secure Boot Parameters in this
* case since that command is only available in bootloader mode.
*/
- if (ver->img_type == 0x03) {
+ if (ver->img_type == BTINTEL_IMG_OP) {
btintel_clear_flag(hdev, INTEL_BOOTLOADER);
btintel_check_bdaddr(hdev);
} else {
@@ -2577,8 +2600,8 @@ static void btintel_set_dsm_reset_method(struct hci_dev *hdev,
data->acpi_reset_method = btintel_acpi_reset_method;
}
-static int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
- struct intel_version_tlv *ver)
+int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
+ struct intel_version_tlv *ver)
{
u32 boot_param;
char ddcname[64];
@@ -2600,13 +2623,30 @@ static int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
return err;
/* check if controller is already having an operational firmware */
- if (ver->img_type == 0x03)
+ if (ver->img_type == BTINTEL_IMG_OP)
goto finish;
err = btintel_boot(hdev, boot_param);
if (err)
return err;
+ err = btintel_read_version_tlv(hdev, ver);
+ if (err)
+ return err;
+
+ /* If image type returned is BTINTEL_IMG_IML, then controller supports
+ * intermediae loader image
+ */
+ if (ver->img_type == BTINTEL_IMG_IML) {
+ err = btintel_prepare_fw_download_tlv(hdev, ver, &boot_param);
+ if (err)
+ return err;
+
+ err = btintel_boot(hdev, boot_param);
+ if (err)
+ return err;
+ }
+
btintel_clear_flag(hdev, INTEL_BOOTLOADER);
btintel_get_fw_name_tlv(ver, ddcname, sizeof(ddcname), "ddc");
@@ -2645,8 +2685,9 @@ finish:
return 0;
}
+EXPORT_SYMBOL_GPL(btintel_bootloader_setup_tlv);
-static void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
+void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
{
switch (hw_variant) {
/* Legacy bootloader devices that supports MSFT Extension */
@@ -2662,6 +2703,7 @@ static void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
case 0x19:
case 0x1b:
case 0x1c:
+ case 0x1e:
hci_set_msft_opcode(hdev, 0xFC1E);
break;
default:
@@ -2669,6 +2711,7 @@ static void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
break;
}
}
+EXPORT_SYMBOL_GPL(btintel_set_msft_opcode);
static void btintel_print_fseq_info(struct hci_dev *hdev)
{
@@ -2920,6 +2963,11 @@ static int btintel_setup_combined(struct hci_dev *hdev)
err = -EINVAL;
}
+ hci_set_hw_info(hdev,
+ "INTEL platform=%u variant=%u revision=%u",
+ ver.hw_platform, ver.hw_variant,
+ ver.hw_revision);
+
goto exit_error;
}
@@ -2996,6 +3044,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)
case 0x19:
case 0x1b:
case 0x1c:
+ case 0x1e:
/* Display version information of TLV type */
btintel_version_info_tlv(hdev, &ver_tlv);
@@ -3024,13 +3073,17 @@ static int btintel_setup_combined(struct hci_dev *hdev)
break;
}
+ hci_set_hw_info(hdev, "INTEL platform=%u variant=%u",
+ INTEL_HW_PLATFORM(ver_tlv.cnvi_bt),
+ INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
+
exit_error:
kfree_skb(skb);
return err;
}
-static int btintel_shutdown_combined(struct hci_dev *hdev)
+int btintel_shutdown_combined(struct hci_dev *hdev)
{
struct sk_buff *skb;
int ret;
@@ -3064,6 +3117,7 @@ static int btintel_shutdown_combined(struct hci_dev *hdev)
return 0;
}
+EXPORT_SYMBOL_GPL(btintel_shutdown_combined);
int btintel_configure_setup(struct hci_dev *hdev, const char *driver_name)
{
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index d19fcdb9ff0b..b5fea735e260 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -51,6 +51,12 @@ struct intel_tlv {
u8 val[];
} __packed;
+#define BTINTEL_CNVI_BLAZARI 0x900
+
+#define BTINTEL_IMG_BOOTLOADER 0x01 /* Bootloader image */
+#define BTINTEL_IMG_IML 0x02 /* Intermediate image */
+#define BTINTEL_IMG_OP 0x03 /* Operational image */
+
struct intel_version_tlv {
u32 cnvi_top;
u32 cnvr_top;
@@ -203,7 +209,7 @@ struct btintel_data {
#define btintel_wait_on_flag_timeout(hdev, nr, m, to) \
wait_on_bit_timeout(btintel_get_flag(hdev), (nr), m, to)
-#if IS_ENABLED(CONFIG_BT_INTEL)
+#if IS_ENABLED(CONFIG_BT_INTEL) || IS_ENABLED(CONFIG_BT_INTEL_PCIE)
int btintel_check_bdaddr(struct hci_dev *hdev);
int btintel_enter_mfg(struct hci_dev *hdev);
@@ -228,6 +234,16 @@ void btintel_bootup(struct hci_dev *hdev, const void *ptr, unsigned int len);
void btintel_secure_send_result(struct hci_dev *hdev,
const void *ptr, unsigned int len);
int btintel_set_quality_report(struct hci_dev *hdev, bool enable);
+int btintel_version_info_tlv(struct hci_dev *hdev,
+ struct intel_version_tlv *version);
+int btintel_parse_version_tlv(struct hci_dev *hdev,
+ struct intel_version_tlv *version,
+ struct sk_buff *skb);
+void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant);
+int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
+ struct intel_version_tlv *ver);
+int btintel_shutdown_combined(struct hci_dev *hdev);
+void btintel_hw_error(struct hci_dev *hdev, u8 code);
#else
static inline int btintel_check_bdaddr(struct hci_dev *hdev)
@@ -324,4 +340,37 @@ static inline int btintel_set_quality_report(struct hci_dev *hdev, bool enable)
{
return -ENODEV;
}
+
+static inline int btintel_version_info_tlv(struct hci_dev *hdev,
+ struct intel_version_tlv *version)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int btintel_parse_version_tlv(struct hci_dev *hdev,
+ struct intel_version_tlv *version,
+ struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
+
+{
+}
+
+static inline int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
+ struct intel_version_tlv *ver)
+{
+ return -ENODEV;
+}
+
+static inline int btintel_shutdown_combined(struct hci_dev *hdev)
+{
+ return -ENODEV;
+}
+
+static inline void btintel_hw_error(struct hci_dev *hdev, u8 code)
+{
+}
#endif
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
new file mode 100644
index 000000000000..5b6805d87fcf
--- /dev/null
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -0,0 +1,1357 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *
+ * Bluetooth support for Intel PCIe devices
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#include <asm/unaligned.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "btintel.h"
+#include "btintel_pcie.h"
+
+#define VERSION "0.1"
+
+#define BTINTEL_PCI_DEVICE(dev, subdev) \
+ .vendor = PCI_VENDOR_ID_INTEL, \
+ .device = (dev), \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = (subdev), \
+ .driver_data = 0
+
+#define POLL_INTERVAL_US 10
+
+/* Intel Bluetooth PCIe device id table */
+static const struct pci_device_id btintel_pcie_table[] = {
+ { BTINTEL_PCI_DEVICE(0xA876, PCI_ANY_ID) },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
+
+/* Intel PCIe uses 4 bytes of HCI type instead of 1 byte BT SIG HCI type */
+#define BTINTEL_PCIE_HCI_TYPE_LEN 4
+#define BTINTEL_PCIE_HCI_CMD_PKT 0x00000001
+#define BTINTEL_PCIE_HCI_ACL_PKT 0x00000002
+#define BTINTEL_PCIE_HCI_SCO_PKT 0x00000003
+#define BTINTEL_PCIE_HCI_EVT_PKT 0x00000004
+
+static inline void ipc_print_ia_ring(struct hci_dev *hdev, struct ia *ia,
+ u16 queue_num)
+{
+ bt_dev_dbg(hdev, "IA: %s: tr-h:%02u tr-t:%02u cr-h:%02u cr-t:%02u",
+ queue_num == BTINTEL_PCIE_TXQ_NUM ? "TXQ" : "RXQ",
+ ia->tr_hia[queue_num], ia->tr_tia[queue_num],
+ ia->cr_hia[queue_num], ia->cr_tia[queue_num]);
+}
+
+static inline void ipc_print_urbd1(struct hci_dev *hdev, struct urbd1 *urbd1,
+ u16 index)
+{
+ bt_dev_dbg(hdev, "RXQ:urbd1(%u) frbd_tag:%u status: 0x%x fixed:0x%x",
+ index, urbd1->frbd_tag, urbd1->status, urbd1->fixed);
+}
+
+static int btintel_pcie_poll_bit(struct btintel_pcie_data *data, u32 offset,
+ u32 bits, u32 mask, int timeout_us)
+{
+ int t = 0;
+ u32 reg;
+
+ do {
+ reg = btintel_pcie_rd_reg32(data, offset);
+
+ if ((reg & mask) == (bits & mask))
+ return t;
+ udelay(POLL_INTERVAL_US);
+ t += POLL_INTERVAL_US;
+ } while (t < timeout_us);
+
+ return -ETIMEDOUT;
+}
+
+static struct btintel_pcie_data *btintel_pcie_get_data(struct msix_entry *entry)
+{
+ u8 queue = entry->entry;
+ struct msix_entry *entries = entry - queue;
+
+ return container_of(entries, struct btintel_pcie_data, msix_entries[0]);
+}
+
+/* Set the doorbell for TXQ to notify the device that @index (actually index-1)
+ * of the TFD is updated and ready to transmit.
+ */
+static void btintel_pcie_set_tx_db(struct btintel_pcie_data *data, u16 index)
+{
+ u32 val;
+
+ val = index;
+ val |= (BTINTEL_PCIE_TX_DB_VEC << 16);
+
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR, val);
+}
+
+/* Copy the data to next(@tfd_index) data buffer and update the TFD(transfer
+ * descriptor) with the data length and the DMA address of the data buffer.
+ */
+static void btintel_pcie_prepare_tx(struct txq *txq, u16 tfd_index,
+ struct sk_buff *skb)
+{
+ struct data_buf *buf;
+ struct tfd *tfd;
+
+ tfd = &txq->tfds[tfd_index];
+ memset(tfd, 0, sizeof(*tfd));
+
+ buf = &txq->bufs[tfd_index];
+
+ tfd->size = skb->len;
+ tfd->addr = buf->data_p_addr;
+
+ /* Copy the outgoing data to DMA buffer */
+ memcpy(buf->data, skb->data, tfd->size);
+}
+
+static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
+ struct sk_buff *skb)
+{
+ int ret;
+ u16 tfd_index;
+ struct txq *txq = &data->txq;
+
+ tfd_index = data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM];
+
+ if (tfd_index > txq->count)
+ return -ERANGE;
+
+ /* Prepare for TX. It updates the TFD with the length of data and
+ * address of the DMA buffer, and copy the data to the DMA buffer
+ */
+ btintel_pcie_prepare_tx(txq, tfd_index, skb);
+
+ tfd_index = (tfd_index + 1) % txq->count;
+ data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM] = tfd_index;
+
+ /* Arm wait event condition */
+ data->tx_wait_done = false;
+
+ /* Set the doorbell to notify the device */
+ btintel_pcie_set_tx_db(data, tfd_index);
+
+ /* Wait for the complete interrupt - URBD0 */
+ ret = wait_event_timeout(data->tx_wait_q, data->tx_wait_done,
+ msecs_to_jiffies(BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS));
+ if (!ret)
+ return -ETIME;
+
+ return 0;
+}
+
+/* Set the doorbell for RXQ to notify the device that @index (actually index-1)
+ * is available to receive the data
+ */
+static void btintel_pcie_set_rx_db(struct btintel_pcie_data *data, u16 index)
+{
+ u32 val;
+
+ val = index;
+ val |= (BTINTEL_PCIE_RX_DB_VEC << 16);
+
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR, val);
+}
+
+/* Update the FRBD (free buffer descriptor) with the @frbd_index and the
+ * DMA address of the free buffer.
+ */
+static void btintel_pcie_prepare_rx(struct rxq *rxq, u16 frbd_index)
+{
+ struct data_buf *buf;
+ struct frbd *frbd;
+
+ /* Get the buffer of the FRBD for DMA */
+ buf = &rxq->bufs[frbd_index];
+
+ frbd = &rxq->frbds[frbd_index];
+ memset(frbd, 0, sizeof(*frbd));
+
+ /* Update FRBD */
+ frbd->tag = frbd_index;
+ frbd->addr = buf->data_p_addr;
+}
+
+static int btintel_pcie_submit_rx(struct btintel_pcie_data *data)
+{
+ u16 frbd_index;
+ struct rxq *rxq = &data->rxq;
+
+ frbd_index = data->ia.tr_hia[BTINTEL_PCIE_RXQ_NUM];
+
+ if (frbd_index > rxq->count)
+ return -ERANGE;
+
+ /* Prepare for RX submit. It updates the FRBD with the address of DMA
+ * buffer
+ */
+ btintel_pcie_prepare_rx(rxq, frbd_index);
+
+ frbd_index = (frbd_index + 1) % rxq->count;
+ data->ia.tr_hia[BTINTEL_PCIE_RXQ_NUM] = frbd_index;
+ ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_RXQ_NUM);
+
+ /* Set the doorbell to notify the device */
+ btintel_pcie_set_rx_db(data, frbd_index);
+
+ return 0;
+}
+
+static int btintel_pcie_start_rx(struct btintel_pcie_data *data)
+{
+ int i, ret;
+
+ for (i = 0; i < BTINTEL_PCIE_RX_MAX_QUEUE; i++) {
+ ret = btintel_pcie_submit_rx(data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void btintel_pcie_reset_ia(struct btintel_pcie_data *data)
+{
+ memset(data->ia.tr_hia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
+ memset(data->ia.tr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
+ memset(data->ia.cr_hia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
+ memset(data->ia.cr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
+}
+
+static void btintel_pcie_reset_bt(struct btintel_pcie_data *data)
+{
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
+ BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
+}
+
+/* This function enables BT function by setting BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT bit in
+ * BTINTEL_PCIE_CSR_FUNC_CTRL_REG register and wait for MSI-X with
+ * BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0.
+ * Then the host reads firmware version from BTINTEL_CSR_F2D_MBX and the boot stage
+ * from BTINTEL_PCIE_CSR_BOOT_STAGE_REG.
+ */
+static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
+{
+ int err;
+
+ data->gp0_received = false;
+
+ /* Update the DMA address of CI struct to CSR */
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG,
+ data->ci_p_addr & 0xffffffff);
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG,
+ (u64)data->ci_p_addr >> 32);
+
+ /* Reset the cached value of boot stage. it is updated by the MSI-X
+ * gp0 interrupt handler.
+ */
+ data->boot_stage_cache = 0x0;
+
+ /* Set MAC_INIT bit to start primary bootloader */
+ btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+
+ btintel_pcie_set_reg_bits(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
+ BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
+
+ /* Wait until MAC_ACCESS is granted */
+ err = btintel_pcie_poll_bit(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
+ BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS,
+ BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS,
+ BTINTEL_DEFAULT_MAC_ACCESS_TIMEOUT_US);
+ if (err < 0)
+ return -ENODEV;
+
+ /* MAC is ready. Enable BT FUNC */
+ btintel_pcie_set_reg_bits(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
+ BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
+
+ btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+
+ /* wait for interrupt from the device after booting up to primary
+ * bootloader.
+ */
+ err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
+ msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT));
+ if (!err)
+ return -ETIME;
+
+ /* Check cached boot stage is BTINTEL_PCIE_CSR_BOOT_STAGE_ROM(BIT(0)) */
+ if (~data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ROM)
+ return -ENODEV;
+
+ return 0;
+}
+
+/* This function handles the MSI-X interrupt for gp0 cause (bit 0 in
+ * BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES) which is sent for boot stage and image response.
+ */
+static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
+{
+ u32 reg;
+
+ /* This interrupt is for three different causes and it is not easy to
+ * know what causes the interrupt. So, it compares each register value
+ * with cached value and update it before it wake up the queue.
+ */
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
+ if (reg != data->boot_stage_cache)
+ data->boot_stage_cache = reg;
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IMG_RESPONSE_REG);
+ if (reg != data->img_resp_cache)
+ data->img_resp_cache = reg;
+
+ data->gp0_received = true;
+
+ /* If the boot stage is OP or IML, reset IA and start RX again */
+ if (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW ||
+ data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML) {
+ btintel_pcie_reset_ia(data);
+ btintel_pcie_start_rx(data);
+ }
+
+ wake_up(&data->gp0_wait_q);
+}
+
+/* This function handles the MSX-X interrupt for rx queue 0 which is for TX
+ */
+static void btintel_pcie_msix_tx_handle(struct btintel_pcie_data *data)
+{
+ u16 cr_tia, cr_hia;
+ struct txq *txq;
+ struct urbd0 *urbd0;
+
+ cr_tia = data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM];
+ cr_hia = data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
+
+ if (cr_tia == cr_hia)
+ return;
+
+ txq = &data->txq;
+
+ while (cr_tia != cr_hia) {
+ data->tx_wait_done = true;
+ wake_up(&data->tx_wait_q);
+
+ urbd0 = &txq->urbd0s[cr_tia];
+
+ if (urbd0->tfd_index > txq->count)
+ return;
+
+ cr_tia = (cr_tia + 1) % txq->count;
+ data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] = cr_tia;
+ ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_TXQ_NUM);
+ }
+}
+
+/* Process the received rx data
+ * It check the frame header to identify the data type and create skb
+ * and calling HCI API
+ */
+static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
+ struct sk_buff *skb)
+{
+ int ret;
+ u8 pkt_type;
+ u16 plen;
+ u32 pcie_pkt_type;
+ struct sk_buff *new_skb;
+ void *pdata;
+ struct hci_dev *hdev = data->hdev;
+
+ spin_lock(&data->hci_rx_lock);
+
+ /* The first 4 bytes indicates the Intel PCIe specific packet type */
+ pdata = skb_pull_data(skb, BTINTEL_PCIE_HCI_TYPE_LEN);
+ if (!data) {
+ bt_dev_err(hdev, "Corrupted packet received");
+ ret = -EILSEQ;
+ goto exit_error;
+ }
+
+ pcie_pkt_type = get_unaligned_le32(pdata);
+
+ switch (pcie_pkt_type) {
+ case BTINTEL_PCIE_HCI_ACL_PKT:
+ if (skb->len >= HCI_ACL_HDR_SIZE) {
+ plen = HCI_ACL_HDR_SIZE + __le16_to_cpu(hci_acl_hdr(skb)->dlen);
+ pkt_type = HCI_ACLDATA_PKT;
+ } else {
+ bt_dev_err(hdev, "ACL packet is too short");
+ ret = -EILSEQ;
+ goto exit_error;
+ }
+ break;
+
+ case BTINTEL_PCIE_HCI_SCO_PKT:
+ if (skb->len >= HCI_SCO_HDR_SIZE) {
+ plen = HCI_SCO_HDR_SIZE + hci_sco_hdr(skb)->dlen;
+ pkt_type = HCI_SCODATA_PKT;
+ } else {
+ bt_dev_err(hdev, "SCO packet is too short");
+ ret = -EILSEQ;
+ goto exit_error;
+ }
+ break;
+
+ case BTINTEL_PCIE_HCI_EVT_PKT:
+ if (skb->len >= HCI_EVENT_HDR_SIZE) {
+ plen = HCI_EVENT_HDR_SIZE + hci_event_hdr(skb)->plen;
+ pkt_type = HCI_EVENT_PKT;
+ } else {
+ bt_dev_err(hdev, "Event packet is too short");
+ ret = -EILSEQ;
+ goto exit_error;
+ }
+ break;
+ default:
+ bt_dev_err(hdev, "Invalid packet type received: 0x%4.4x",
+ pcie_pkt_type);
+ ret = -EINVAL;
+ goto exit_error;
+ }
+
+ if (skb->len < plen) {
+ bt_dev_err(hdev, "Received corrupted packet. type: 0x%2.2x",
+ pkt_type);
+ ret = -EILSEQ;
+ goto exit_error;
+ }
+
+ bt_dev_dbg(hdev, "pkt_type: 0x%2.2x len: %u", pkt_type, plen);
+
+ new_skb = bt_skb_alloc(plen, GFP_ATOMIC);
+ if (!new_skb) {
+ bt_dev_err(hdev, "Failed to allocate memory for skb of len: %u",
+ skb->len);
+ ret = -ENOMEM;
+ goto exit_error;
+ }
+
+ hci_skb_pkt_type(new_skb) = pkt_type;
+ skb_put_data(new_skb, skb->data, plen);
+ hdev->stat.byte_rx += plen;
+
+ if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT)
+ ret = btintel_recv_event(hdev, new_skb);
+ else
+ ret = hci_recv_frame(hdev, new_skb);
+
+exit_error:
+ if (ret)
+ hdev->stat.err_rx++;
+
+ spin_unlock(&data->hci_rx_lock);
+
+ return ret;
+}
+
+static void btintel_pcie_rx_work(struct work_struct *work)
+{
+ struct btintel_pcie_data *data = container_of(work,
+ struct btintel_pcie_data, rx_work);
+ struct sk_buff *skb;
+ int err;
+ struct hci_dev *hdev = data->hdev;
+
+ /* Process the sk_buf in queue and send to the HCI layer */
+ while ((skb = skb_dequeue(&data->rx_skb_q))) {
+ err = btintel_pcie_recv_frame(data, skb);
+ if (err)
+ bt_dev_err(hdev, "Failed to send received frame: %d",
+ err);
+ kfree_skb(skb);
+ }
+}
+
+/* create sk_buff with data and save it to queue and start RX work */
+static int btintel_pcie_submit_rx_work(struct btintel_pcie_data *data, u8 status,
+ void *buf)
+{
+ int ret, len;
+ struct rfh_hdr *rfh_hdr;
+ struct sk_buff *skb;
+
+ rfh_hdr = buf;
+
+ len = rfh_hdr->packet_len;
+ if (len <= 0) {
+ ret = -EINVAL;
+ goto resubmit;
+ }
+
+ /* Remove RFH header */
+ buf += sizeof(*rfh_hdr);
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto resubmit;
+ }
+
+ skb_put_data(skb, buf, len);
+ skb_queue_tail(&data->rx_skb_q, skb);
+ queue_work(data->workqueue, &data->rx_work);
+
+resubmit:
+ ret = btintel_pcie_submit_rx(data);
+
+ return ret;
+}
+
+/* Handles the MSI-X interrupt for rx queue 1 which is for RX */
+static void btintel_pcie_msix_rx_handle(struct btintel_pcie_data *data)
+{
+ u16 cr_hia, cr_tia;
+ struct rxq *rxq;
+ struct urbd1 *urbd1;
+ struct data_buf *buf;
+ int ret;
+ struct hci_dev *hdev = data->hdev;
+
+ cr_hia = data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM];
+ cr_tia = data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
+
+ bt_dev_dbg(hdev, "RXQ: cr_hia: %u cr_tia: %u", cr_hia, cr_tia);
+
+ /* Check CR_TIA and CR_HIA for change */
+ if (cr_tia == cr_hia) {
+ bt_dev_warn(hdev, "RXQ: no new CD found");
+ return;
+ }
+
+ rxq = &data->rxq;
+
+ /* The firmware sends multiple CD in a single MSI-X and it needs to
+ * process all received CDs in this interrupt.
+ */
+ while (cr_tia != cr_hia) {
+ urbd1 = &rxq->urbd1s[cr_tia];
+ ipc_print_urbd1(data->hdev, urbd1, cr_tia);
+
+ buf = &rxq->bufs[urbd1->frbd_tag];
+ if (!buf) {
+ bt_dev_err(hdev, "RXQ: failed to get the DMA buffer for %d",
+ urbd1->frbd_tag);
+ return;
+ }
+
+ ret = btintel_pcie_submit_rx_work(data, urbd1->status,
+ buf->data);
+ if (ret) {
+ bt_dev_err(hdev, "RXQ: failed to submit rx request");
+ return;
+ }
+
+ cr_tia = (cr_tia + 1) % rxq->count;
+ data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM] = cr_tia;
+ ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_RXQ_NUM);
+ }
+}
+
+static irqreturn_t btintel_pcie_msix_isr(int irq, void *data)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
+{
+ struct msix_entry *entry = dev_id;
+ struct btintel_pcie_data *data = btintel_pcie_get_data(entry);
+ u32 intr_fh, intr_hw;
+
+ spin_lock(&data->irq_lock);
+ intr_fh = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES);
+ intr_hw = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES);
+
+ /* Clear causes registers to avoid being handling the same cause */
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES, intr_fh);
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES, intr_hw);
+ spin_unlock(&data->irq_lock);
+
+ if (unlikely(!(intr_fh | intr_hw))) {
+ /* Ignore interrupt, inta == 0 */
+ return IRQ_NONE;
+ }
+
+ /* This interrupt is triggered by the firmware after updating
+ * boot_stage register and image_response register
+ */
+ if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
+ btintel_pcie_msix_gp0_handler(data);
+
+ /* For TX */
+ if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0)
+ btintel_pcie_msix_tx_handle(data);
+
+ /* For RX */
+ if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1)
+ btintel_pcie_msix_rx_handle(data);
+
+ /*
+ * Before sending the interrupt the HW disables it to prevent a nested
+ * interrupt. This is done by writing 1 to the corresponding bit in
+ * the mask register. After handling the interrupt, it should be
+ * re-enabled by clearing this bit. This register is defined as write 1
+ * clear (W1C) register, meaning that it's cleared by writing 1
+ * to the bit.
+ */
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_AUTOMASK_ST,
+ BIT(entry->entry));
+
+ return IRQ_HANDLED;
+}
+
+/* This function requests the irq for MSI-X and registers the handlers per irq.
+ * Currently, it requests only 1 irq for all interrupt causes.
+ */
+static int btintel_pcie_setup_irq(struct btintel_pcie_data *data)
+{
+ int err;
+ int num_irqs, i;
+
+ for (i = 0; i < BTINTEL_PCIE_MSIX_VEC_MAX; i++)
+ data->msix_entries[i].entry = i;
+
+ num_irqs = pci_alloc_irq_vectors(data->pdev, BTINTEL_PCIE_MSIX_VEC_MIN,
+ BTINTEL_PCIE_MSIX_VEC_MAX, PCI_IRQ_MSIX);
+ if (num_irqs < 0)
+ return num_irqs;
+
+ data->alloc_vecs = num_irqs;
+ data->msix_enabled = 1;
+ data->def_irq = 0;
+
+ /* setup irq handler */
+ for (i = 0; i < data->alloc_vecs; i++) {
+ struct msix_entry *msix_entry;
+
+ msix_entry = &data->msix_entries[i];
+ msix_entry->vector = pci_irq_vector(data->pdev, i);
+
+ err = devm_request_threaded_irq(&data->pdev->dev,
+ msix_entry->vector,
+ btintel_pcie_msix_isr,
+ btintel_pcie_irq_msix_handler,
+ IRQF_SHARED,
+ KBUILD_MODNAME,
+ msix_entry);
+ if (err) {
+ pci_free_irq_vectors(data->pdev);
+ data->alloc_vecs = 0;
+ return err;
+ }
+ }
+ return 0;
+}
+
+struct btintel_pcie_causes_list {
+ u32 cause;
+ u32 mask_reg;
+ u8 cause_num;
+};
+
+static struct btintel_pcie_causes_list causes_list[] = {
+ { BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x00 },
+ { BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x01 },
+ { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x20 },
+};
+
+/* This function configures the interrupt masks for both HW_INT_CAUSES and
+ * FH_INT_CAUSES which are meaningful to us.
+ *
+ * After resetting BT function via PCIE FLR or FUNC_CTRL reset, the driver
+ * need to call this function again to configure since the masks
+ * are reset to 0xFFFFFFFF after reset.
+ */
+static void btintel_pcie_config_msix(struct btintel_pcie_data *data)
+{
+ int i;
+ int val = data->def_irq | BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE;
+
+ /* Set Non Auto Clear Cause */
+ for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
+ btintel_pcie_wr_reg8(data,
+ BTINTEL_PCIE_CSR_MSIX_IVAR(causes_list[i].cause_num),
+ val);
+ btintel_pcie_clr_reg_bits(data,
+ causes_list[i].mask_reg,
+ causes_list[i].cause);
+ }
+
+ /* Save the initial interrupt mask */
+ data->fh_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK);
+ data->hw_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK);
+}
+
+static int btintel_pcie_config_pcie(struct pci_dev *pdev,
+ struct btintel_pcie_data *data)
+{
+ int err;
+
+ err = pcim_enable_device(pdev);
+ if (err)
+ return err;
+
+ pci_set_master(pdev);
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+ }
+
+ err = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
+ if (err)
+ return err;
+
+ data->base_addr = pcim_iomap_table(pdev)[0];
+ if (!data->base_addr)
+ return -ENODEV;
+
+ err = btintel_pcie_setup_irq(data);
+ if (err)
+ return err;
+
+ /* Configure MSI-X with causes list */
+ btintel_pcie_config_msix(data);
+
+ return 0;
+}
+
+static void btintel_pcie_init_ci(struct btintel_pcie_data *data,
+ struct ctx_info *ci)
+{
+ ci->version = 0x1;
+ ci->size = sizeof(*ci);
+ ci->config = 0x0000;
+ ci->addr_cr_hia = data->ia.cr_hia_p_addr;
+ ci->addr_tr_tia = data->ia.tr_tia_p_addr;
+ ci->addr_cr_tia = data->ia.cr_tia_p_addr;
+ ci->addr_tr_hia = data->ia.tr_hia_p_addr;
+ ci->num_cr_ia = BTINTEL_PCIE_NUM_QUEUES;
+ ci->num_tr_ia = BTINTEL_PCIE_NUM_QUEUES;
+ ci->addr_urbdq0 = data->txq.urbd0s_p_addr;
+ ci->addr_tfdq = data->txq.tfds_p_addr;
+ ci->num_tfdq = data->txq.count;
+ ci->num_urbdq0 = data->txq.count;
+ ci->tfdq_db_vec = BTINTEL_PCIE_TXQ_NUM;
+ ci->urbdq0_db_vec = BTINTEL_PCIE_TXQ_NUM;
+ ci->rbd_size = BTINTEL_PCIE_RBD_SIZE_4K;
+ ci->addr_frbdq = data->rxq.frbds_p_addr;
+ ci->num_frbdq = data->rxq.count;
+ ci->frbdq_db_vec = BTINTEL_PCIE_RXQ_NUM;
+ ci->addr_urbdq1 = data->rxq.urbd1s_p_addr;
+ ci->num_urbdq1 = data->rxq.count;
+ ci->urbdq_db_vec = BTINTEL_PCIE_RXQ_NUM;
+}
+
+static void btintel_pcie_free_txq_bufs(struct btintel_pcie_data *data,
+ struct txq *txq)
+{
+ /* Free data buffers first */
+ dma_free_coherent(&data->pdev->dev, txq->count * BTINTEL_PCIE_BUFFER_SIZE,
+ txq->buf_v_addr, txq->buf_p_addr);
+ kfree(txq->bufs);
+}
+
+static int btintel_pcie_setup_txq_bufs(struct btintel_pcie_data *data,
+ struct txq *txq)
+{
+ int i;
+ struct data_buf *buf;
+
+ /* Allocate the same number of buffers as the descriptor */
+ txq->bufs = kmalloc_array(txq->count, sizeof(*buf), GFP_KERNEL);
+ if (!txq->bufs)
+ return -ENOMEM;
+
+ /* Allocate full chunk of data buffer for DMA first and do indexing and
+ * initialization next, so it can be freed easily
+ */
+ txq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev,
+ txq->count * BTINTEL_PCIE_BUFFER_SIZE,
+ &txq->buf_p_addr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!txq->buf_v_addr) {
+ kfree(txq->bufs);
+ return -ENOMEM;
+ }
+ memset(txq->buf_v_addr, 0, txq->count * BTINTEL_PCIE_BUFFER_SIZE);
+
+ /* Setup the allocated DMA buffer to bufs. Each data_buf should
+ * have virtual address and physical address
+ */
+ for (i = 0; i < txq->count; i++) {
+ buf = &txq->bufs[i];
+ buf->data_p_addr = txq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
+ buf->data = txq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
+ }
+
+ return 0;
+}
+
+static void btintel_pcie_free_rxq_bufs(struct btintel_pcie_data *data,
+ struct rxq *rxq)
+{
+ /* Free data buffers first */
+ dma_free_coherent(&data->pdev->dev, rxq->count * BTINTEL_PCIE_BUFFER_SIZE,
+ rxq->buf_v_addr, rxq->buf_p_addr);
+ kfree(rxq->bufs);
+}
+
+static int btintel_pcie_setup_rxq_bufs(struct btintel_pcie_data *data,
+ struct rxq *rxq)
+{
+ int i;
+ struct data_buf *buf;
+
+ /* Allocate the same number of buffers as the descriptor */
+ rxq->bufs = kmalloc_array(rxq->count, sizeof(*buf), GFP_KERNEL);
+ if (!rxq->bufs)
+ return -ENOMEM;
+
+ /* Allocate full chunk of data buffer for DMA first and do indexing and
+ * initialization next, so it can be freed easily
+ */
+ rxq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev,
+ rxq->count * BTINTEL_PCIE_BUFFER_SIZE,
+ &rxq->buf_p_addr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!rxq->buf_v_addr) {
+ kfree(rxq->bufs);
+ return -ENOMEM;
+ }
+ memset(rxq->buf_v_addr, 0, rxq->count * BTINTEL_PCIE_BUFFER_SIZE);
+
+ /* Setup the allocated DMA buffer to bufs. Each data_buf should
+ * have virtual address and physical address
+ */
+ for (i = 0; i < rxq->count; i++) {
+ buf = &rxq->bufs[i];
+ buf->data_p_addr = rxq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
+ buf->data = rxq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
+ }
+
+ return 0;
+}
+
+static void btintel_pcie_setup_ia(struct btintel_pcie_data *data,
+ dma_addr_t p_addr, void *v_addr,
+ struct ia *ia)
+{
+ /* TR Head Index Array */
+ ia->tr_hia_p_addr = p_addr;
+ ia->tr_hia = v_addr;
+
+ /* TR Tail Index Array */
+ ia->tr_tia_p_addr = p_addr + sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES;
+ ia->tr_tia = v_addr + sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES;
+
+ /* CR Head index Array */
+ ia->cr_hia_p_addr = p_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 2);
+ ia->cr_hia = v_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 2);
+
+ /* CR Tail Index Array */
+ ia->cr_tia_p_addr = p_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 3);
+ ia->cr_tia = v_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 3);
+}
+
+static void btintel_pcie_free(struct btintel_pcie_data *data)
+{
+ btintel_pcie_free_rxq_bufs(data, &data->rxq);
+ btintel_pcie_free_txq_bufs(data, &data->txq);
+
+ dma_pool_free(data->dma_pool, data->dma_v_addr, data->dma_p_addr);
+ dma_pool_destroy(data->dma_pool);
+}
+
+/* Allocate tx and rx queues, any related data structures and buffers.
+ */
+static int btintel_pcie_alloc(struct btintel_pcie_data *data)
+{
+ int err = 0;
+ size_t total;
+ dma_addr_t p_addr;
+ void *v_addr;
+
+ /* Allocate the chunk of DMA memory for descriptors, index array, and
+ * context information, instead of allocating individually.
+ * The DMA memory for data buffer is allocated while setting up the
+ * each queue.
+ *
+ * Total size is sum of the following
+ * + size of TFD * Number of descriptors in queue
+ * + size of URBD0 * Number of descriptors in queue
+ * + size of FRBD * Number of descriptors in queue
+ * + size of URBD1 * Number of descriptors in queue
+ * + size of index * Number of queues(2) * type of index array(4)
+ * + size of context information
+ */
+ total = (sizeof(struct tfd) + sizeof(struct urbd0) + sizeof(struct frbd)
+ + sizeof(struct urbd1)) * BTINTEL_DESCS_COUNT;
+
+ /* Add the sum of size of index array and size of ci struct */
+ total += (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4) + sizeof(struct ctx_info);
+
+ /* Allocate DMA Pool */
+ data->dma_pool = dma_pool_create(KBUILD_MODNAME, &data->pdev->dev,
+ total, BTINTEL_PCIE_DMA_POOL_ALIGNMENT, 0);
+ if (!data->dma_pool) {
+ err = -ENOMEM;
+ goto exit_error;
+ }
+
+ v_addr = dma_pool_zalloc(data->dma_pool, GFP_KERNEL | __GFP_NOWARN,
+ &p_addr);
+ if (!v_addr) {
+ dma_pool_destroy(data->dma_pool);
+ err = -ENOMEM;
+ goto exit_error;
+ }
+
+ data->dma_p_addr = p_addr;
+ data->dma_v_addr = v_addr;
+
+ /* Setup descriptor count */
+ data->txq.count = BTINTEL_DESCS_COUNT;
+ data->rxq.count = BTINTEL_DESCS_COUNT;
+
+ /* Setup tfds */
+ data->txq.tfds_p_addr = p_addr;
+ data->txq.tfds = v_addr;
+
+ p_addr += (sizeof(struct tfd) * BTINTEL_DESCS_COUNT);
+ v_addr += (sizeof(struct tfd) * BTINTEL_DESCS_COUNT);
+
+ /* Setup urbd0 */
+ data->txq.urbd0s_p_addr = p_addr;
+ data->txq.urbd0s = v_addr;
+
+ p_addr += (sizeof(struct urbd0) * BTINTEL_DESCS_COUNT);
+ v_addr += (sizeof(struct urbd0) * BTINTEL_DESCS_COUNT);
+
+ /* Setup FRBD*/
+ data->rxq.frbds_p_addr = p_addr;
+ data->rxq.frbds = v_addr;
+
+ p_addr += (sizeof(struct frbd) * BTINTEL_DESCS_COUNT);
+ v_addr += (sizeof(struct frbd) * BTINTEL_DESCS_COUNT);
+
+ /* Setup urbd1 */
+ data->rxq.urbd1s_p_addr = p_addr;
+ data->rxq.urbd1s = v_addr;
+
+ p_addr += (sizeof(struct urbd1) * BTINTEL_DESCS_COUNT);
+ v_addr += (sizeof(struct urbd1) * BTINTEL_DESCS_COUNT);
+
+ /* Setup data buffers for txq */
+ err = btintel_pcie_setup_txq_bufs(data, &data->txq);
+ if (err)
+ goto exit_error_pool;
+
+ /* Setup data buffers for rxq */
+ err = btintel_pcie_setup_rxq_bufs(data, &data->rxq);
+ if (err)
+ goto exit_error_txq;
+
+ /* Setup Index Array */
+ btintel_pcie_setup_ia(data, p_addr, v_addr, &data->ia);
+
+ /* Setup Context Information */
+ p_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
+ v_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
+
+ data->ci = v_addr;
+ data->ci_p_addr = p_addr;
+
+ /* Initialize the CI */
+ btintel_pcie_init_ci(data, data->ci);
+
+ return 0;
+
+exit_error_txq:
+ btintel_pcie_free_txq_bufs(data, &data->txq);
+exit_error_pool:
+ dma_pool_free(data->dma_pool, data->dma_v_addr, data->dma_p_addr);
+ dma_pool_destroy(data->dma_pool);
+exit_error:
+ return err;
+}
+
+static int btintel_pcie_open(struct hci_dev *hdev)
+{
+ bt_dev_dbg(hdev, "");
+
+ return 0;
+}
+
+static int btintel_pcie_close(struct hci_dev *hdev)
+{
+ bt_dev_dbg(hdev, "");
+
+ return 0;
+}
+
+static int btintel_pcie_inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
+{
+ struct sk_buff *skb;
+ struct hci_event_hdr *hdr;
+ struct hci_ev_cmd_complete *evt;
+
+ skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = (struct hci_event_hdr *)skb_put(skb, sizeof(*hdr));
+ hdr->evt = HCI_EV_CMD_COMPLETE;
+ hdr->plen = sizeof(*evt) + 1;
+
+ evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt));
+ evt->ncmd = 0x01;
+ evt->opcode = cpu_to_le16(opcode);
+
+ *(u8 *)skb_put(skb, 1) = 0x00;
+
+ hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
+
+ return hci_recv_frame(hdev, skb);
+}
+
+static int btintel_pcie_send_frame(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+ int ret;
+ u32 type;
+
+ /* Due to the fw limitation, the type header of the packet should be
+ * 4 bytes unlike 1 byte for UART. In UART, the firmware can read
+ * the first byte to get the packet type and redirect the rest of data
+ * packet to the right handler.
+ *
+ * But for PCIe, THF(Transfer Flow Handler) fetches the 4 bytes of data
+ * from DMA memory and by the time it reads the first 4 bytes, it has
+ * already consumed some part of packet. Thus the packet type indicator
+ * for iBT PCIe is 4 bytes.
+ *
+ * Luckily, when HCI core creates the skb, it allocates 8 bytes of
+ * head room for profile and driver use, and before sending the data
+ * to the device, append the iBT PCIe packet type in the front.
+ */
+ switch (hci_skb_pkt_type(skb)) {
+ case HCI_COMMAND_PKT:
+ type = BTINTEL_PCIE_HCI_CMD_PKT;
+ if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
+ struct hci_command_hdr *cmd = (void *)skb->data;
+ __u16 opcode = le16_to_cpu(cmd->opcode);
+
+ /* When the 0xfc01 command is issued to boot into
+ * the operational firmware, it will actually not
+ * send a command complete event. To keep the flow
+ * control working inject that event here.
+ */
+ if (opcode == 0xfc01)
+ btintel_pcie_inject_cmd_complete(hdev, opcode);
+ }
+ hdev->stat.cmd_tx++;
+ break;
+ case HCI_ACLDATA_PKT:
+ type = BTINTEL_PCIE_HCI_ACL_PKT;
+ hdev->stat.acl_tx++;
+ break;
+ case HCI_SCODATA_PKT:
+ type = BTINTEL_PCIE_HCI_SCO_PKT;
+ hdev->stat.sco_tx++;
+ break;
+ default:
+ bt_dev_err(hdev, "Unknown HCI packet type");
+ return -EILSEQ;
+ }
+ memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &type,
+ BTINTEL_PCIE_HCI_TYPE_LEN);
+
+ ret = btintel_pcie_send_sync(data, skb);
+ if (ret) {
+ hdev->stat.err_tx++;
+ bt_dev_err(hdev, "Failed to send frame (%d)", ret);
+ goto exit_error;
+ }
+ hdev->stat.byte_tx += skb->len;
+ kfree_skb(skb);
+
+exit_error:
+ return ret;
+}
+
+static void btintel_pcie_release_hdev(struct btintel_pcie_data *data)
+{
+ struct hci_dev *hdev;
+
+ hdev = data->hdev;
+ hci_unregister_dev(hdev);
+ hci_free_dev(hdev);
+ data->hdev = NULL;
+}
+
+static int btintel_pcie_setup(struct hci_dev *hdev)
+{
+ const u8 param[1] = { 0xFF };
+ struct intel_version_tlv ver_tlv;
+ struct sk_buff *skb;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ skb = __hci_cmd_sync(hdev, 0xfc05, 1, param, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Reading Intel version command failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ /* Check the status */
+ if (skb->data[0]) {
+ bt_dev_err(hdev, "Intel Read Version command failed (%02x)",
+ skb->data[0]);
+ err = -EIO;
+ goto exit_error;
+ }
+
+ /* Apply the common HCI quirks for Intel device */
+ set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+ set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
+
+ /* Set up the quality report callback for Intel devices */
+ hdev->set_quality_report = btintel_set_quality_report;
+
+ memset(&ver_tlv, 0, sizeof(ver_tlv));
+ /* For TLV type device, parse the tlv data */
+ err = btintel_parse_version_tlv(hdev, &ver_tlv, skb);
+ if (err) {
+ bt_dev_err(hdev, "Failed to parse TLV version information");
+ goto exit_error;
+ }
+
+ switch (INTEL_HW_PLATFORM(ver_tlv.cnvi_bt)) {
+ case 0x37:
+ break;
+ default:
+ bt_dev_err(hdev, "Unsupported Intel hardware platform (0x%2x)",
+ INTEL_HW_PLATFORM(ver_tlv.cnvi_bt));
+ err = -EINVAL;
+ goto exit_error;
+ }
+
+ /* Check for supported iBT hardware variants of this firmware
+ * loading method.
+ *
+ * This check has been put in place to ensure correct forward
+ * compatibility options when newer hardware variants come
+ * along.
+ */
+ switch (INTEL_HW_VARIANT(ver_tlv.cnvi_bt)) {
+ case 0x1e: /* BzrI */
+ /* Display version information of TLV type */
+ btintel_version_info_tlv(hdev, &ver_tlv);
+
+ /* Apply the device specific HCI quirks for TLV based devices
+ *
+ * All TLV based devices support WBS
+ */
+ set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+
+ /* Apply LE States quirk from solar onwards */
+ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+
+ /* Setup MSFT Extension support */
+ btintel_set_msft_opcode(hdev,
+ INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
+
+ err = btintel_bootloader_setup_tlv(hdev, &ver_tlv);
+ if (err)
+ goto exit_error;
+ break;
+ default:
+ bt_dev_err(hdev, "Unsupported Intel hw variant (%u)",
+ INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
+ err = -EINVAL;
+ break;
+ }
+
+exit_error:
+ kfree_skb(skb);
+
+ return err;
+}
+
+static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
+{
+ int err;
+ struct hci_dev *hdev;
+
+ hdev = hci_alloc_dev();
+ if (!hdev)
+ return -ENOMEM;
+
+ hdev->bus = HCI_PCI;
+ hci_set_drvdata(hdev, data);
+
+ data->hdev = hdev;
+ SET_HCIDEV_DEV(hdev, &data->pdev->dev);
+
+ hdev->manufacturer = 2;
+ hdev->open = btintel_pcie_open;
+ hdev->close = btintel_pcie_close;
+ hdev->send = btintel_pcie_send_frame;
+ hdev->setup = btintel_pcie_setup;
+ hdev->shutdown = btintel_shutdown_combined;
+ hdev->hw_error = btintel_hw_error;
+ hdev->set_diag = btintel_set_diag;
+ hdev->set_bdaddr = btintel_set_bdaddr;
+
+ err = hci_register_dev(hdev);
+ if (err < 0) {
+ BT_ERR("Failed to register to hdev (%d)", err);
+ goto exit_error;
+ }
+
+ return 0;
+
+exit_error:
+ hci_free_dev(hdev);
+ return err;
+}
+
+static int btintel_pcie_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int err;
+ struct btintel_pcie_data *data;
+
+ if (!pdev)
+ return -ENODEV;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->pdev = pdev;
+
+ spin_lock_init(&data->irq_lock);
+ spin_lock_init(&data->hci_rx_lock);
+
+ init_waitqueue_head(&data->gp0_wait_q);
+ data->gp0_received = false;
+
+ init_waitqueue_head(&data->tx_wait_q);
+ data->tx_wait_done = false;
+
+ data->workqueue = alloc_ordered_workqueue(KBUILD_MODNAME, WQ_HIGHPRI);
+ if (!data->workqueue)
+ return -ENOMEM;
+
+ skb_queue_head_init(&data->rx_skb_q);
+ INIT_WORK(&data->rx_work, btintel_pcie_rx_work);
+
+ data->boot_stage_cache = 0x00;
+ data->img_resp_cache = 0x00;
+
+ err = btintel_pcie_config_pcie(pdev, data);
+ if (err)
+ goto exit_error;
+
+ pci_set_drvdata(pdev, data);
+
+ err = btintel_pcie_alloc(data);
+ if (err)
+ goto exit_error;
+
+ err = btintel_pcie_enable_bt(data);
+ if (err)
+ goto exit_error;
+
+ /* CNV information (CNVi and CNVr) is in CSR */
+ data->cnvi = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_HW_REV_REG);
+
+ data->cnvr = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_RF_ID_REG);
+
+ err = btintel_pcie_start_rx(data);
+ if (err)
+ goto exit_error;
+
+ err = btintel_pcie_setup_hdev(data);
+ if (err)
+ goto exit_error;
+
+ bt_dev_dbg(data->hdev, "cnvi: 0x%8.8x cnvr: 0x%8.8x", data->cnvi,
+ data->cnvr);
+ return 0;
+
+exit_error:
+ /* reset device before exit */
+ btintel_pcie_reset_bt(data);
+
+ pci_clear_master(pdev);
+
+ pci_set_drvdata(pdev, NULL);
+
+ return err;
+}
+
+static void btintel_pcie_remove(struct pci_dev *pdev)
+{
+ struct btintel_pcie_data *data;
+
+ data = pci_get_drvdata(pdev);
+
+ btintel_pcie_reset_bt(data);
+
+ pci_free_irq_vectors(pdev);
+
+ btintel_pcie_release_hdev(data);
+
+ flush_work(&data->rx_work);
+
+ destroy_workqueue(data->workqueue);
+
+ btintel_pcie_free(data);
+
+ pci_clear_master(pdev);
+
+ pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver btintel_pcie_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = btintel_pcie_table,
+ .probe = btintel_pcie_probe,
+ .remove = btintel_pcie_remove,
+};
+module_pci_driver(btintel_pcie_driver);
+
+MODULE_AUTHOR("Tedd Ho-Jeong An <tedd.an@intel.com>");
+MODULE_DESCRIPTION("Intel Bluetooth PCIe transport driver ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/btintel_pcie.h b/drivers/bluetooth/btintel_pcie.h
new file mode 100644
index 000000000000..baaff70420f5
--- /dev/null
+++ b/drivers/bluetooth/btintel_pcie.h
@@ -0,0 +1,430 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *
+ * Bluetooth support for Intel PCIe devices
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+
+/* Control and Status Register(BTINTEL_PCIE_CSR) */
+#define BTINTEL_PCIE_CSR_BASE (0x000)
+#define BTINTEL_PCIE_CSR_FUNC_CTRL_REG (BTINTEL_PCIE_CSR_BASE + 0x024)
+#define BTINTEL_PCIE_CSR_HW_REV_REG (BTINTEL_PCIE_CSR_BASE + 0x028)
+#define BTINTEL_PCIE_CSR_RF_ID_REG (BTINTEL_PCIE_CSR_BASE + 0x09C)
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_REG (BTINTEL_PCIE_CSR_BASE + 0x108)
+#define BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG (BTINTEL_PCIE_CSR_BASE + 0x118)
+#define BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG (BTINTEL_PCIE_CSR_BASE + 0x11C)
+#define BTINTEL_PCIE_CSR_IMG_RESPONSE_REG (BTINTEL_PCIE_CSR_BASE + 0x12C)
+#define BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR (BTINTEL_PCIE_CSR_BASE + 0x460)
+
+/* BTINTEL_PCIE_CSR Function Control Register */
+#define BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA (BIT(0))
+#define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT (BIT(6))
+#define BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT (BIT(7))
+#define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS (BIT(20))
+#define BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET (BIT(31))
+
+/* Value for BTINTEL_PCIE_CSR_BOOT_STAGE register */
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_ROM (BIT(0))
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_IML (BIT(1))
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW (BIT(2))
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_ROM_LOCKDOWN (BIT(10))
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_IML_LOCKDOWN (BIT(11))
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_MAC_ACCESS_ON (BIT(16))
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_ALIVE (BIT(23))
+
+/* Registers for MSI-X */
+#define BTINTEL_PCIE_CSR_MSIX_BASE (0x2000)
+#define BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES (BTINTEL_PCIE_CSR_MSIX_BASE + 0x0800)
+#define BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK (BTINTEL_PCIE_CSR_MSIX_BASE + 0x0804)
+#define BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES (BTINTEL_PCIE_CSR_MSIX_BASE + 0x0808)
+#define BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK (BTINTEL_PCIE_CSR_MSIX_BASE + 0x080C)
+#define BTINTEL_PCIE_CSR_MSIX_AUTOMASK_ST (BTINTEL_PCIE_CSR_MSIX_BASE + 0x0810)
+#define BTINTEL_PCIE_CSR_MSIX_AUTOMASK_EN (BTINTEL_PCIE_CSR_MSIX_BASE + 0x0814)
+#define BTINTEL_PCIE_CSR_MSIX_IVAR_BASE (BTINTEL_PCIE_CSR_MSIX_BASE + 0x0880)
+#define BTINTEL_PCIE_CSR_MSIX_IVAR(cause) (BTINTEL_PCIE_CSR_MSIX_IVAR_BASE + (cause))
+
+/* Causes for the FH register interrupts */
+enum msix_fh_int_causes {
+ BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0 = BIT(0), /* cause 0 */
+ BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1 = BIT(1), /* cause 1 */
+};
+
+/* Causes for the HW register interrupts */
+enum msix_hw_int_causes {
+ BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0 = BIT(0), /* cause 32 */
+};
+
+#define BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE BIT(7)
+
+/* Minimum and Maximum number of MSI-X Vector
+ * Intel Bluetooth PCIe support only 1 vector
+ */
+#define BTINTEL_PCIE_MSIX_VEC_MAX 1
+#define BTINTEL_PCIE_MSIX_VEC_MIN 1
+
+/* Default poll time for MAC access during init */
+#define BTINTEL_DEFAULT_MAC_ACCESS_TIMEOUT_US 200000
+
+/* Default interrupt timeout in msec */
+#define BTINTEL_DEFAULT_INTR_TIMEOUT 3000
+
+/* The number of descriptors in TX/RX queues */
+#define BTINTEL_DESCS_COUNT 16
+
+/* Number of Queue for TX and RX
+ * It indicates the index of the IA(Index Array)
+ */
+enum {
+ BTINTEL_PCIE_TXQ_NUM = 0,
+ BTINTEL_PCIE_RXQ_NUM = 1,
+ BTINTEL_PCIE_NUM_QUEUES = 2,
+};
+
+/* The size of DMA buffer for TX and RX in bytes */
+#define BTINTEL_PCIE_BUFFER_SIZE 4096
+
+/* DMA allocation alignment */
+#define BTINTEL_PCIE_DMA_POOL_ALIGNMENT 256
+
+#define BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS 500
+
+/* Doorbell vector for TFD */
+#define BTINTEL_PCIE_TX_DB_VEC 0
+
+/* Number of pending RX requests for downlink */
+#define BTINTEL_PCIE_RX_MAX_QUEUE 6
+
+/* Doorbell vector for FRBD */
+#define BTINTEL_PCIE_RX_DB_VEC 513
+
+/* RBD buffer size mapping */
+#define BTINTEL_PCIE_RBD_SIZE_4K 0x04
+
+/*
+ * Struct for Context Information (v2)
+ *
+ * All members are write-only for host and read-only for device.
+ *
+ * @version: Version of context information
+ * @size: Size of context information
+ * @config: Config with which host wants peripheral to execute
+ * Subset of capability register published by device
+ * @addr_tr_hia: Address of TR Head Index Array
+ * @addr_tr_tia: Address of TR Tail Index Array
+ * @addr_cr_hia: Address of CR Head Index Array
+ * @addr_cr_tia: Address of CR Tail Index Array
+ * @num_tr_ia: Number of entries in TR Index Arrays
+ * @num_cr_ia: Number of entries in CR Index Arrays
+ * @rbd_siz: RBD Size { 0x4=4K }
+ * @addr_tfdq: Address of TFD Queue(tx)
+ * @addr_urbdq0: Address of URBD Queue(tx)
+ * @num_tfdq: Number of TFD in TFD Queue(tx)
+ * @num_urbdq0: Number of URBD in URBD Queue(tx)
+ * @tfdq_db_vec: Queue number of TFD
+ * @urbdq0_db_vec: Queue number of URBD
+ * @addr_frbdq: Address of FRBD Queue(rx)
+ * @addr_urbdq1: Address of URBD Queue(rx)
+ * @num_frbdq: Number of FRBD in FRBD Queue(rx)
+ * @frbdq_db_vec: Queue number of FRBD
+ * @num_urbdq1: Number of URBD in URBD Queue(rx)
+ * @urbdq_db_vec: Queue number of URBDQ1
+ * @tr_msi_vec: Transfer Ring MSI-X Vector
+ * @cr_msi_vec: Completion Ring MSI-X Vector
+ * @dbgc_addr: DBGC first fragment address
+ * @dbgc_size: DBGC buffer size
+ * @early_enable: Enarly debug enable
+ * @dbg_output_mode: Debug output mode
+ * Bit[4] DBGC O/P { 0=SRAM, 1=DRAM(not relevant for NPK) }
+ * Bit[5] DBGC I/P { 0=BDBG, 1=DBGI }
+ * Bits[6:7] DBGI O/P(relevant if bit[5] = 1)
+ * 0=BT DBGC, 1=WiFi DBGC, 2=NPK }
+ * @dbg_preset: Debug preset
+ * @ext_addr: Address of context information extension
+ * @ext_size: Size of context information part
+ *
+ * Total 38 DWords
+ */
+struct ctx_info {
+ u16 version;
+ u16 size;
+ u32 config;
+ u32 reserved_dw02;
+ u32 reserved_dw03;
+ u64 addr_tr_hia;
+ u64 addr_tr_tia;
+ u64 addr_cr_hia;
+ u64 addr_cr_tia;
+ u16 num_tr_ia;
+ u16 num_cr_ia;
+ u32 rbd_size:4,
+ reserved_dw13:28;
+ u64 addr_tfdq;
+ u64 addr_urbdq0;
+ u16 num_tfdq;
+ u16 num_urbdq0;
+ u16 tfdq_db_vec;
+ u16 urbdq0_db_vec;
+ u64 addr_frbdq;
+ u64 addr_urbdq1;
+ u16 num_frbdq;
+ u16 frbdq_db_vec;
+ u16 num_urbdq1;
+ u16 urbdq_db_vec;
+ u16 tr_msi_vec;
+ u16 cr_msi_vec;
+ u32 reserved_dw27;
+ u64 dbgc_addr;
+ u32 dbgc_size;
+ u32 early_enable:1,
+ reserved_dw31:3,
+ dbg_output_mode:4,
+ dbg_preset:8,
+ reserved2_dw31:16;
+ u64 ext_addr;
+ u32 ext_size;
+ u32 test_param;
+ u32 reserved_dw36;
+ u32 reserved_dw37;
+} __packed;
+
+/* Transfer Descriptor for TX
+ * @type: Not in use. Set to 0x0
+ * @size: Size of data in the buffer
+ * @addr: DMA Address of buffer
+ */
+struct tfd {
+ u8 type;
+ u16 size;
+ u8 reserved;
+ u64 addr;
+ u32 reserved1;
+} __packed;
+
+/* URB Descriptor for TX
+ * @tfd_index: Index of TFD in TFDQ + 1
+ * @num_txq: Queue index of TFD Queue
+ * @cmpl_count: Completion count. Always 0x01
+ * @immediate_cmpl: Immediate completion flag: Always 0x01
+ */
+struct urbd0 {
+ u32 tfd_index:16,
+ num_txq:8,
+ cmpl_count:4,
+ reserved:3,
+ immediate_cmpl:1;
+} __packed;
+
+/* FRB Descriptor for RX
+ * @tag: RX buffer tag (index of RX buffer queue)
+ * @addr: Address of buffer
+ */
+struct frbd {
+ u32 tag:16,
+ reserved:16;
+ u32 reserved2;
+ u64 addr;
+} __packed;
+
+/* URB Descriptor for RX
+ * @frbd_tag: Tag from FRBD
+ * @status: Status
+ */
+struct urbd1 {
+ u32 frbd_tag:16,
+ status:1,
+ reserved:14,
+ fixed:1;
+} __packed;
+
+/* RFH header in RX packet
+ * @packet_len: Length of the data in the buffer
+ * @rxq: RX Queue number
+ * @cmd_id: Command ID. Not in Use
+ */
+struct rfh_hdr {
+ u64 packet_len:16,
+ rxq:6,
+ reserved:10,
+ cmd_id:16,
+ reserved1:16;
+} __packed;
+
+/* Internal data buffer
+ * @data: pointer to the data buffer
+ * @p_addr: physical address of data buffer
+ */
+struct data_buf {
+ u8 *data;
+ dma_addr_t data_p_addr;
+};
+
+/* Index Array */
+struct ia {
+ dma_addr_t tr_hia_p_addr;
+ u16 *tr_hia;
+ dma_addr_t tr_tia_p_addr;
+ u16 *tr_tia;
+ dma_addr_t cr_hia_p_addr;
+ u16 *cr_hia;
+ dma_addr_t cr_tia_p_addr;
+ u16 *cr_tia;
+};
+
+/* Structure for TX Queue
+ * @count: Number of descriptors
+ * @tfds: Array of TFD
+ * @urbd0s: Array of URBD0
+ * @buf: Array of data_buf structure
+ */
+struct txq {
+ u16 count;
+
+ dma_addr_t tfds_p_addr;
+ struct tfd *tfds;
+
+ dma_addr_t urbd0s_p_addr;
+ struct urbd0 *urbd0s;
+
+ dma_addr_t buf_p_addr;
+ void *buf_v_addr;
+ struct data_buf *bufs;
+};
+
+/* Structure for RX Queue
+ * @count: Number of descriptors
+ * @frbds: Array of FRBD
+ * @urbd1s: Array of URBD1
+ * @buf: Array of data_buf structure
+ */
+struct rxq {
+ u16 count;
+
+ dma_addr_t frbds_p_addr;
+ struct frbd *frbds;
+
+ dma_addr_t urbd1s_p_addr;
+ struct urbd1 *urbd1s;
+
+ dma_addr_t buf_p_addr;
+ void *buf_v_addr;
+ struct data_buf *bufs;
+};
+
+/* struct btintel_pcie_data
+ * @pdev: pci device
+ * @hdev: hdev device
+ * @flags: driver state
+ * @irq_lock: spinlock for MSI-X
+ * @hci_rx_lock: spinlock for HCI RX flow
+ * @base_addr: pci base address (from BAR)
+ * @msix_entries: array of MSI-X entries
+ * @msix_enabled: true if MSI-X is enabled;
+ * @alloc_vecs: number of interrupt vectors allocated
+ * @def_irq: default irq for all causes
+ * @fh_init_mask: initial unmasked rxq causes
+ * @hw_init_mask: initial unmaksed hw causes
+ * @boot_stage_cache: cached value of boot stage register
+ * @img_resp_cache: cached value of image response register
+ * @cnvi: CNVi register value
+ * @cnvr: CNVr register value
+ * @gp0_received: condition for gp0 interrupt
+ * @gp0_wait_q: wait_q for gp0 interrupt
+ * @tx_wait_done: condition for tx interrupt
+ * @tx_wait_q: wait_q for tx interrupt
+ * @workqueue: workqueue for RX work
+ * @rx_skb_q: SKB queue for RX packet
+ * @rx_work: RX work struct to process the RX packet in @rx_skb_q
+ * @dma_pool: DMA pool for descriptors, index array and ci
+ * @dma_p_addr: DMA address for pool
+ * @dma_v_addr: address of pool
+ * @ci_p_addr: DMA address for CI struct
+ * @ci: CI struct
+ * @ia: Index Array struct
+ * @txq: TX Queue struct
+ * @rxq: RX Queue struct
+ */
+struct btintel_pcie_data {
+ struct pci_dev *pdev;
+ struct hci_dev *hdev;
+
+ unsigned long flags;
+ /* lock used in MSI-X interrupt */
+ spinlock_t irq_lock;
+ /* lock to serialize rx events */
+ spinlock_t hci_rx_lock;
+
+ void __iomem *base_addr;
+
+ struct msix_entry msix_entries[BTINTEL_PCIE_MSIX_VEC_MAX];
+ bool msix_enabled;
+ u32 alloc_vecs;
+ u32 def_irq;
+
+ u32 fh_init_mask;
+ u32 hw_init_mask;
+
+ u32 boot_stage_cache;
+ u32 img_resp_cache;
+
+ u32 cnvi;
+ u32 cnvr;
+
+ bool gp0_received;
+ wait_queue_head_t gp0_wait_q;
+
+ bool tx_wait_done;
+ wait_queue_head_t tx_wait_q;
+
+ struct workqueue_struct *workqueue;
+ struct sk_buff_head rx_skb_q;
+ struct work_struct rx_work;
+
+ struct dma_pool *dma_pool;
+ dma_addr_t dma_p_addr;
+ void *dma_v_addr;
+
+ dma_addr_t ci_p_addr;
+ struct ctx_info *ci;
+ struct ia ia;
+ struct txq txq;
+ struct rxq rxq;
+};
+
+static inline u32 btintel_pcie_rd_reg32(struct btintel_pcie_data *data,
+ u32 offset)
+{
+ return ioread32(data->base_addr + offset);
+}
+
+static inline void btintel_pcie_wr_reg8(struct btintel_pcie_data *data,
+ u32 offset, u8 val)
+{
+ iowrite8(val, data->base_addr + offset);
+}
+
+static inline void btintel_pcie_wr_reg32(struct btintel_pcie_data *data,
+ u32 offset, u32 val)
+{
+ iowrite32(val, data->base_addr + offset);
+}
+
+static inline void btintel_pcie_set_reg_bits(struct btintel_pcie_data *data,
+ u32 offset, u32 bits)
+{
+ u32 r;
+
+ r = ioread32(data->base_addr + offset);
+ r |= bits;
+ iowrite32(r, data->base_addr + offset);
+}
+
+static inline void btintel_pcie_clr_reg_bits(struct btintel_pcie_data *data,
+ u32 offset, u32 bits)
+{
+ u32 r;
+
+ r = ioread32(data->base_addr + offset);
+ r &= ~bits;
+ iowrite32(r, data->base_addr + offset);
+}
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 9658b33c824a..18f34998a120 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -121,13 +121,6 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
((event->data[2] == MODULE_BROUGHT_UP) ||
(event->data[2] == MODULE_ALREADY_UP)) ?
"Bring-up succeed" : "Bring-up failed");
-
- if (event->length > 3 && event->data[3])
- priv->btmrvl_dev.dev_type = HCI_AMP;
- else
- priv->btmrvl_dev.dev_type = HCI_PRIMARY;
-
- BT_DBG("dev_type: %d", priv->btmrvl_dev.dev_type);
} else if (priv->btmrvl_dev.sendcmdflag &&
event->data[1] == MODULE_SHUTDOWN_REQ) {
BT_DBG("EVENT:%s", (event->data[2]) ?
@@ -686,8 +679,6 @@ int btmrvl_register_hdev(struct btmrvl_private *priv)
hdev->wakeup = btmrvl_wakeup;
SET_HCIDEV_DEV(hdev, &card->func->dev);
- hdev->dev_type = priv->btmrvl_dev.dev_type;
-
ret = hci_register_dev(hdev);
if (ret < 0) {
BT_ERR("Can not register HCI device");
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index d76c799553aa..85b7f2bb4259 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -1736,7 +1736,6 @@ static struct sdio_driver bt_mrvl_sdio = {
.probe = btmrvl_sdio_probe,
.remove = btmrvl_sdio_remove,
.drv = {
- .owner = THIS_MODULE,
.coredump = btmrvl_sdio_coredump,
.pm = &btmrvl_sdio_pm_ops,
}
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index ac8ebccd3507..812fd2a8f853 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -380,8 +380,10 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
switch (data->cd_info.state) {
case HCI_DEVCOREDUMP_IDLE:
err = hci_devcd_init(hdev, MTK_COREDUMP_SIZE);
- if (err < 0)
+ if (err < 0) {
+ kfree_skb(skb);
break;
+ }
data->cd_info.cnt = 0;
/* It is supposed coredump can be done within 5 seconds */
@@ -407,9 +409,6 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
break;
}
- if (err < 0)
- kfree_skb(skb);
-
return err;
}
EXPORT_SYMBOL_GPL(btmtk_process_coredump);
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index ff4868c83cd8..8ded9ef8089a 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -1519,7 +1519,6 @@ static struct sdio_driver btmtksdio_driver = {
.remove = btmtksdio_remove,
.id_table = btmtksdio_table,
.drv = {
- .owner = THIS_MODULE,
.pm = BTMTKSDIO_PM_OPS,
}
};
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index b40b32fa7f1c..dfbbac92242a 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -13,8 +13,6 @@
#include "btqca.h"
-#define VERSION "0.1"
-
int qca_read_soc_version(struct hci_dev *hdev, struct qca_btsoc_version *ver,
enum qca_btsoc_type soc_type)
{
@@ -55,11 +53,6 @@ int qca_read_soc_version(struct hci_dev *hdev, struct qca_btsoc_version *ver,
}
edl = (struct edl_event_hdr *)(skb->data);
- if (!edl) {
- bt_dev_err(hdev, "QCA TLV with no header");
- err = -EILSEQ;
- goto out;
- }
if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
edl->rtype != rtype) {
@@ -99,7 +92,8 @@ static int qca_read_fw_build_info(struct hci_dev *hdev)
{
struct sk_buff *skb;
struct edl_event_hdr *edl;
- char cmd, build_label[QCA_FW_BUILD_VER_LEN];
+ char *build_label;
+ char cmd;
int build_lbl_len, err = 0;
bt_dev_dbg(hdev, "QCA read fw build info");
@@ -114,13 +108,13 @@ static int qca_read_fw_build_info(struct hci_dev *hdev)
return err;
}
- edl = (struct edl_event_hdr *)(skb->data);
- if (!edl) {
- bt_dev_err(hdev, "QCA read fw build info with no header");
+ if (skb->len < sizeof(*edl)) {
err = -EILSEQ;
goto out;
}
+ edl = (struct edl_event_hdr *)(skb->data);
+
if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
edl->rtype != EDL_GET_BUILD_INFO_CMD) {
bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp,
@@ -129,14 +123,27 @@ static int qca_read_fw_build_info(struct hci_dev *hdev)
goto out;
}
+ if (skb->len < sizeof(*edl) + 1) {
+ err = -EILSEQ;
+ goto out;
+ }
+
build_lbl_len = edl->data[0];
- if (build_lbl_len <= QCA_FW_BUILD_VER_LEN - 1) {
- memcpy(build_label, edl->data + 1, build_lbl_len);
- *(build_label + build_lbl_len) = '\0';
+
+ if (skb->len < sizeof(*edl) + 1 + build_lbl_len) {
+ err = -EILSEQ;
+ goto out;
+ }
+
+ build_label = kstrndup(&edl->data[1], build_lbl_len, GFP_KERNEL);
+ if (!build_label) {
+ err = -ENOMEM;
+ goto out;
}
hci_set_fw_info(hdev, "%s", build_label);
+ kfree(build_label);
out:
kfree_skb(skb);
return err;
@@ -166,11 +173,6 @@ static int qca_send_patch_config_cmd(struct hci_dev *hdev)
}
edl = (struct edl_event_hdr *)(skb->data);
- if (!edl) {
- bt_dev_err(hdev, "QCA Patch config with no header");
- err = -EILSEQ;
- goto out;
- }
if (edl->cresp != EDL_PATCH_CONFIG_RES_EVT || edl->rtype != EDL_PATCH_CONFIG_CMD) {
bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp,
@@ -235,6 +237,11 @@ static int qca_read_fw_board_id(struct hci_dev *hdev, u16 *bid)
goto out;
}
+ if (skb->len < 3) {
+ err = -EILSEQ;
+ goto out;
+ }
+
*bid = (edl->data[1] << 8) + edl->data[2];
bt_dev_dbg(hdev, "%s: bid = %x", __func__, *bid);
@@ -265,9 +272,10 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
}
EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
-static void qca_tlv_check_data(struct hci_dev *hdev,
+static int qca_tlv_check_data(struct hci_dev *hdev,
struct qca_fw_config *config,
- u8 *fw_data, enum qca_btsoc_type soc_type)
+ u8 *fw_data, size_t fw_size,
+ enum qca_btsoc_type soc_type)
{
const u8 *data;
u32 type_len;
@@ -277,12 +285,16 @@ static void qca_tlv_check_data(struct hci_dev *hdev,
struct tlv_type_patch *tlv_patch;
struct tlv_type_nvm *tlv_nvm;
uint8_t nvm_baud_rate = config->user_baud_rate;
+ u8 type;
config->dnld_mode = QCA_SKIP_EVT_NONE;
config->dnld_type = QCA_SKIP_EVT_NONE;
switch (config->type) {
case ELF_TYPE_PATCH:
+ if (fw_size < 7)
+ return -EINVAL;
+
config->dnld_mode = QCA_SKIP_EVT_VSE_CC;
config->dnld_type = QCA_SKIP_EVT_VSE_CC;
@@ -291,6 +303,9 @@ static void qca_tlv_check_data(struct hci_dev *hdev,
bt_dev_dbg(hdev, "File version : 0x%x", fw_data[6]);
break;
case TLV_TYPE_PATCH:
+ if (fw_size < sizeof(struct tlv_type_hdr) + sizeof(struct tlv_type_patch))
+ return -EINVAL;
+
tlv = (struct tlv_type_hdr *)fw_data;
type_len = le32_to_cpu(tlv->type_len);
tlv_patch = (struct tlv_type_patch *)tlv->data;
@@ -330,25 +345,64 @@ static void qca_tlv_check_data(struct hci_dev *hdev,
break;
case TLV_TYPE_NVM:
+ if (fw_size < sizeof(struct tlv_type_hdr))
+ return -EINVAL;
+
tlv = (struct tlv_type_hdr *)fw_data;
type_len = le32_to_cpu(tlv->type_len);
- length = (type_len >> 8) & 0x00ffffff;
+ length = type_len >> 8;
+ type = type_len & 0xff;
- BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff);
+ /* Some NVM files have more than one set of tags, only parse
+ * the first set when it has type 2 for now. When there is
+ * more than one set there is an enclosing header of type 4.
+ */
+ if (type == 4) {
+ if (fw_size < 2 * sizeof(struct tlv_type_hdr))
+ return -EINVAL;
+
+ tlv++;
+
+ type_len = le32_to_cpu(tlv->type_len);
+ length = type_len >> 8;
+ type = type_len & 0xff;
+ }
+
+ BT_DBG("TLV Type\t\t : 0x%x", type);
BT_DBG("Length\t\t : %d bytes", length);
+ if (type != 2)
+ break;
+
+ if (fw_size < length + (tlv->data - fw_data))
+ return -EINVAL;
+
idx = 0;
data = tlv->data;
- while (idx < length) {
+ while (idx < length - sizeof(struct tlv_type_nvm)) {
tlv_nvm = (struct tlv_type_nvm *)(data + idx);
tag_id = le16_to_cpu(tlv_nvm->tag_id);
tag_len = le16_to_cpu(tlv_nvm->tag_len);
+ if (length < idx + sizeof(struct tlv_type_nvm) + tag_len)
+ return -EINVAL;
+
/* Update NVM tags as needed */
switch (tag_id) {
+ case EDL_TAG_ID_BD_ADDR:
+ if (tag_len != sizeof(bdaddr_t))
+ return -EINVAL;
+
+ memcpy(&config->bdaddr, tlv_nvm->data, sizeof(bdaddr_t));
+
+ break;
+
case EDL_TAG_ID_HCI:
+ if (tag_len < 3)
+ return -EINVAL;
+
/* HCI transport layer parameters
* enabling software inband sleep
* onto controller side.
@@ -364,6 +418,9 @@ static void qca_tlv_check_data(struct hci_dev *hdev,
break;
case EDL_TAG_ID_DEEP_SLEEP:
+ if (tag_len < 1)
+ return -EINVAL;
+
/* Sleep enable mask
* enabling deep sleep feature on controller.
*/
@@ -372,14 +429,16 @@ static void qca_tlv_check_data(struct hci_dev *hdev,
break;
}
- idx += (sizeof(u16) + sizeof(u16) + 8 + tag_len);
+ idx += sizeof(struct tlv_type_nvm) + tag_len;
}
break;
default:
BT_ERR("Unknown TLV type %d", config->type);
- break;
+ return -EINVAL;
}
+
+ return 0;
}
static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size,
@@ -428,11 +487,6 @@ static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size,
}
edl = (struct edl_event_hdr *)(skb->data);
- if (!edl) {
- bt_dev_err(hdev, "TLV with no header");
- err = -EILSEQ;
- goto out;
- }
if (edl->cresp != EDL_CMD_REQ_RES_EVT || edl->rtype != rtype) {
bt_dev_err(hdev, "QCA TLV with error stat 0x%x rtype 0x%x",
@@ -529,7 +583,9 @@ static int qca_download_firmware(struct hci_dev *hdev,
memcpy(data, fw->data, size);
release_firmware(fw);
- qca_tlv_check_data(hdev, config, data, soc_type);
+ ret = qca_tlv_check_data(hdev, config, data, size, soc_type);
+ if (ret)
+ goto out;
segment = data;
remain = size;
@@ -612,6 +668,38 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
}
EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome);
+static int qca_check_bdaddr(struct hci_dev *hdev, const struct qca_fw_config *config)
+{
+ struct hci_rp_read_bd_addr *bda;
+ struct sk_buff *skb;
+ int err;
+
+ if (bacmp(&hdev->public_addr, BDADDR_ANY))
+ return 0;
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Failed to read device address (%d)", err);
+ return err;
+ }
+
+ if (skb->len != sizeof(*bda)) {
+ bt_dev_err(hdev, "Device address length mismatch");
+ kfree_skb(skb);
+ return -EIO;
+ }
+
+ bda = (struct hci_rp_read_bd_addr *)skb->data;
+ if (!bacmp(&bda->bdaddr, &config->bdaddr))
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+
+ kfree_skb(skb);
+
+ return 0;
+}
+
static void qca_generate_hsp_nvm_name(char *fwname, size_t max_size,
struct qca_btsoc_version ver, u8 rom_ver, u16 bid)
{
@@ -629,11 +717,24 @@ static void qca_generate_hsp_nvm_name(char *fwname, size_t max_size,
snprintf(fwname, max_size, "qca/hpnv%02x%s.%x", rom_ver, variant, bid);
}
+static inline void qca_get_nvm_name_generic(struct qca_fw_config *cfg,
+ const char *stem, u8 rom_ver, u16 bid)
+{
+ if (bid == 0x0)
+ snprintf(cfg->fwname, sizeof(cfg->fwname), "qca/%snv%02x.bin", stem, rom_ver);
+ else if (bid & 0xff00)
+ snprintf(cfg->fwname, sizeof(cfg->fwname),
+ "qca/%snv%02x.b%x", stem, rom_ver, bid);
+ else
+ snprintf(cfg->fwname, sizeof(cfg->fwname),
+ "qca/%snv%02x.b%02x", stem, rom_ver, bid);
+}
+
int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, struct qca_btsoc_version ver,
const char *firmware_name)
{
- struct qca_fw_config config;
+ struct qca_fw_config config = {};
int err;
u8 rom_ver = 0;
u32 soc_ver;
@@ -709,7 +810,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
/* Give the controller some time to get ready to receive the NVM */
msleep(10);
- if (soc_type == QCA_QCA2066)
+ if (soc_type == QCA_QCA2066 || soc_type == QCA_WCN7850)
qca_read_fw_board_id(hdev, &boardid);
/* Download NVM configuration */
@@ -751,8 +852,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
"qca/hpnv%02x.bin", rom_ver);
break;
case QCA_WCN7850:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/hmtnv%02x.bin", rom_ver);
+ qca_get_nvm_name_generic(&config, "hmt", rom_ver, boardid);
break;
default:
@@ -818,6 +918,10 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
break;
}
+ err = qca_check_bdaddr(hdev, &config);
+ if (err)
+ return err;
+
bt_dev_info(hdev, "QCA setup on UART is completed");
return 0;
@@ -826,11 +930,15 @@ EXPORT_SYMBOL_GPL(qca_uart_setup);
int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
{
+ bdaddr_t bdaddr_swapped;
struct sk_buff *skb;
int err;
- skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6, bdaddr,
- HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
+ baswap(&bdaddr_swapped, bdaddr);
+
+ skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6,
+ &bdaddr_swapped, HCI_EV_VENDOR,
+ HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
bt_dev_err(hdev, "QCA Change address cmd failed (%d)", err);
@@ -845,6 +953,5 @@ EXPORT_SYMBOL_GPL(qca_set_bdaddr);
MODULE_AUTHOR("Ben Young Tae Kim <ytkim@qca.qualcomm.com>");
-MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION);
-MODULE_VERSION(VERSION);
+MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family");
MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index dc31984f71dc..bb5207d7a8c7 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -5,32 +5,33 @@
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*/
-#define EDL_PATCH_CMD_OPCODE (0xFC00)
-#define EDL_NVM_ACCESS_OPCODE (0xFC0B)
-#define EDL_WRITE_BD_ADDR_OPCODE (0xFC14)
-#define EDL_PATCH_CMD_LEN (1)
-#define EDL_PATCH_VER_REQ_CMD (0x19)
-#define EDL_PATCH_TLV_REQ_CMD (0x1E)
-#define EDL_GET_BUILD_INFO_CMD (0x20)
-#define EDL_GET_BID_REQ_CMD (0x23)
-#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01)
-#define EDL_PATCH_CONFIG_CMD (0x28)
-#define MAX_SIZE_PER_TLV_SEGMENT (243)
-#define QCA_PRE_SHUTDOWN_CMD (0xFC08)
-#define QCA_DISABLE_LOGGING (0xFC17)
-
-#define EDL_CMD_REQ_RES_EVT (0x00)
-#define EDL_PATCH_VER_RES_EVT (0x19)
-#define EDL_APP_VER_RES_EVT (0x02)
-#define EDL_TVL_DNLD_RES_EVT (0x04)
-#define EDL_CMD_EXE_STATUS_EVT (0x00)
-#define EDL_SET_BAUDRATE_RSP_EVT (0x92)
-#define EDL_NVM_ACCESS_CODE_EVT (0x0B)
-#define EDL_PATCH_CONFIG_RES_EVT (0x00)
-#define QCA_DISABLE_LOGGING_SUB_OP (0x14)
-
-#define EDL_TAG_ID_HCI (17)
-#define EDL_TAG_ID_DEEP_SLEEP (27)
+#define EDL_PATCH_CMD_OPCODE 0xFC00
+#define EDL_NVM_ACCESS_OPCODE 0xFC0B
+#define EDL_WRITE_BD_ADDR_OPCODE 0xFC14
+#define EDL_PATCH_CMD_LEN 1
+#define EDL_PATCH_VER_REQ_CMD 0x19
+#define EDL_PATCH_TLV_REQ_CMD 0x1E
+#define EDL_GET_BUILD_INFO_CMD 0x20
+#define EDL_GET_BID_REQ_CMD 0x23
+#define EDL_NVM_ACCESS_SET_REQ_CMD 0x01
+#define EDL_PATCH_CONFIG_CMD 0x28
+#define MAX_SIZE_PER_TLV_SEGMENT 243
+#define QCA_PRE_SHUTDOWN_CMD 0xFC08
+#define QCA_DISABLE_LOGGING 0xFC17
+
+#define EDL_CMD_REQ_RES_EVT 0x00
+#define EDL_PATCH_VER_RES_EVT 0x19
+#define EDL_APP_VER_RES_EVT 0x02
+#define EDL_TVL_DNLD_RES_EVT 0x04
+#define EDL_CMD_EXE_STATUS_EVT 0x00
+#define EDL_SET_BAUDRATE_RSP_EVT 0x92
+#define EDL_NVM_ACCESS_CODE_EVT 0x0B
+#define EDL_PATCH_CONFIG_RES_EVT 0x00
+#define QCA_DISABLE_LOGGING_SUB_OP 0x14
+
+#define EDL_TAG_ID_BD_ADDR 2
+#define EDL_TAG_ID_HCI 17
+#define EDL_TAG_ID_DEEP_SLEEP 27
#define QCA_WCN3990_POWERON_PULSE 0xFC
#define QCA_WCN3990_POWEROFF_PULSE 0xC0
@@ -38,7 +39,7 @@
#define QCA_HCI_CC_OPCODE 0xFC00
#define QCA_HCI_CC_SUCCESS 0x00
-#define QCA_WCN3991_SOC_ID (0x40014320)
+#define QCA_WCN3991_SOC_ID 0x40014320
/* QCA chipset version can be decided by patch and SoC
* version, combination with upper 2 bytes from SoC
@@ -47,12 +48,11 @@
#define get_soc_ver(soc_id, rom_ver) \
((le32_to_cpu(soc_id) << 16) | (le16_to_cpu(rom_ver)))
-#define QCA_FW_BUILD_VER_LEN 255
-#define QCA_HSP_GF_SOC_ID 0x1200
-#define QCA_HSP_GF_SOC_MASK 0x0000ff00
+#define QCA_HSP_GF_SOC_ID 0x1200
+#define QCA_HSP_GF_SOC_MASK 0x0000ff00
enum qca_baudrate {
- QCA_BAUDRATE_115200 = 0,
+ QCA_BAUDRATE_115200 = 0,
QCA_BAUDRATE_57600,
QCA_BAUDRATE_38400,
QCA_BAUDRATE_19200,
@@ -71,7 +71,7 @@ enum qca_baudrate {
QCA_BAUDRATE_1600000,
QCA_BAUDRATE_3200000,
QCA_BAUDRATE_3500000,
- QCA_BAUDRATE_AUTO = 0xFE,
+ QCA_BAUDRATE_AUTO = 0xFE,
QCA_BAUDRATE_RESERVED
};
@@ -94,6 +94,7 @@ struct qca_fw_config {
uint8_t user_baud_rate;
enum qca_tlv_dnld_mode dnld_mode;
enum qca_tlv_dnld_mode dnld_type;
+ bdaddr_t bdaddr;
};
struct edl_event_hdr {
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
index 11c7e04bf394..88dbb2f3fabf 100644
--- a/drivers/bluetooth/btqcomsmd.c
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -197,7 +197,7 @@ destroy_acl_channel:
return ret;
}
-static int btqcomsmd_remove(struct platform_device *pdev)
+static void btqcomsmd_remove(struct platform_device *pdev)
{
struct btqcomsmd *btq = platform_get_drvdata(pdev);
@@ -206,8 +206,6 @@ static int btqcomsmd_remove(struct platform_device *pdev)
rpmsg_destroy_ept(btq->cmd_channel);
rpmsg_destroy_ept(btq->acl_channel);
-
- return 0;
}
static const struct of_device_id btqcomsmd_of_match[] = {
@@ -218,7 +216,7 @@ MODULE_DEVICE_TABLE(of, btqcomsmd_of_match);
static struct platform_driver btqcomsmd_driver = {
.probe = btqcomsmd_probe,
- .remove = btqcomsmd_remove,
+ .remove_new = btqcomsmd_remove,
.driver = {
.name = "btqcomsmd",
.of_match_table = btqcomsmd_of_match,
diff --git a/drivers/bluetooth/btrsi.c b/drivers/bluetooth/btrsi.c
index 634cf8f5ed2d..0c91d7635ac3 100644
--- a/drivers/bluetooth/btrsi.c
+++ b/drivers/bluetooth/btrsi.c
@@ -134,7 +134,6 @@ static int rsi_hci_attach(void *priv, struct rsi_proto_ops *ops)
hdev->bus = HCI_USB;
hci_set_drvdata(hdev, h_adapter);
- hdev->dev_type = HCI_PRIMARY;
hdev->open = rsi_hci_open;
hdev->close = rsi_hci_close;
hdev->flush = rsi_hci_flush;
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index cc50de69e8dc..4f1e37b4f780 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -1339,6 +1339,13 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
btrtl_set_quirks(hdev, btrtl_dev);
+ hci_set_hw_info(hdev,
+ "RTL lmp_subver=%u hci_rev=%u hci_ver=%u hci_bus=%u",
+ btrtl_dev->ic_info->lmp_subver,
+ btrtl_dev->ic_info->hci_rev,
+ btrtl_dev->ic_info->hci_ver,
+ btrtl_dev->ic_info->hci_bus);
+
btrtl_free(btrtl_dev);
return ret;
}
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index f19d31ee37ea..fdcfe9c50313 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -32,9 +32,6 @@ static const struct sdio_device_id btsdio_table[] = {
/* Generic Bluetooth Type-B SDIO device */
{ SDIO_DEVICE_CLASS(SDIO_CLASS_BT_B) },
- /* Generic Bluetooth AMP controller */
- { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_AMP) },
-
{ } /* Terminating entry */
};
@@ -319,11 +316,6 @@ static int btsdio_probe(struct sdio_func *func,
hdev->bus = HCI_SDIO;
hci_set_drvdata(hdev, data);
- if (id->class == SDIO_CLASS_BT_AMP)
- hdev->dev_type = HCI_AMP;
- else
- hdev->dev_type = HCI_PRIMARY;
-
data->hdev = hdev;
SET_HCIDEV_DEV(hdev, &func->dev);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 06e915b57283..e384ef6ff050 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -477,6 +477,7 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x8087, 0x0033), .driver_info = BTUSB_INTEL_COMBINED },
{ USB_DEVICE(0x8087, 0x0035), .driver_info = BTUSB_INTEL_COMBINED },
{ USB_DEVICE(0x8087, 0x0036), .driver_info = BTUSB_INTEL_COMBINED },
+ { USB_DEVICE(0x8087, 0x0037), .driver_info = BTUSB_INTEL_COMBINED },
{ USB_DEVICE(0x8087, 0x0038), .driver_info = BTUSB_INTEL_COMBINED },
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL_COMBINED |
@@ -542,6 +543,8 @@ static const struct usb_device_id quirks_table[] = {
/* Realtek 8852BE Bluetooth devices */
{ USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0bda, 0x4853), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK |
@@ -586,6 +589,9 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x0489, 0xe0c8), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x0489, 0xe0cd), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0e0), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
@@ -595,6 +601,9 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x04ca, 0x3802), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x0e8d, 0x0608), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x13d3, 0x3563), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
@@ -610,10 +619,12 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x13d3, 0x3583), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
- { USB_DEVICE(0x0489, 0xe0cd), .driver_info = BTUSB_MEDIATEK |
+ { USB_DEVICE(0x13d3, 0x3606), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
- { USB_DEVICE(0x0e8d, 0x0608), .driver_info = BTUSB_MEDIATEK |
+
+ /* MediaTek MT7922 Bluetooth devices */
+ { USB_DEVICE(0x13d3, 0x3585), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
@@ -624,12 +635,6 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x0489, 0xe0d9), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
- { USB_DEVICE(0x0489, 0xe0f5), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
- { USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0e2), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
@@ -654,14 +659,38 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x04ca, 0x3804), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x04ca, 0x38e4), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x13d3, 0x3605), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x13d3, 0x3607), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x13d3, 0x3614), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x13d3, 0x3615), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x35f5, 0x7922), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
/* Additional MediaTek MT7925 Bluetooth devices */
+ { USB_DEVICE(0x0489, 0xe113), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x13d3, 0x3602), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x13d3, 0x3603), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
@@ -2949,7 +2978,7 @@ static int btusb_mtk_uhw_reg_read(struct btusb_data *data, u32 reg, u32 *val)
err = usb_control_msg(data->udev, pipe, 0x01,
0xDE,
reg >> 16, reg & 0xffff,
- buf, 4, USB_CTRL_SET_TIMEOUT);
+ buf, 4, USB_CTRL_GET_TIMEOUT);
if (err < 0) {
bt_dev_err(hdev, "Failed to read uhw reg(%d)", err);
goto err_free_buf;
@@ -2977,7 +3006,7 @@ static int btusb_mtk_reg_read(struct btusb_data *data, u32 reg, u32 *val)
err = usb_control_msg(data->udev, pipe, 0x63,
USB_TYPE_VENDOR | USB_DIR_IN,
reg >> 16, reg & 0xffff,
- buf, size, USB_CTRL_SET_TIMEOUT);
+ buf, size, USB_CTRL_GET_TIMEOUT);
if (err < 0)
goto err_free_buf;
@@ -3116,6 +3145,7 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
bt_dev_err(hdev, "Failed to get fw flavor (%d)", err);
return err;
}
+ fw_flavor = (fw_flavor & 0x00000080) >> 7;
}
mediatek = hci_get_priv(hdev);
@@ -3480,13 +3510,12 @@ static void btusb_dump_hdr_qca(struct hci_dev *hdev, struct sk_buff *skb)
static void btusb_coredump_qca(struct hci_dev *hdev)
{
+ int err;
static const u8 param[] = { 0x26 };
- struct sk_buff *skb;
- skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT);
- if (IS_ERR(skb))
- bt_dev_err(hdev, "%s: triggle crash failed (%ld)", __func__, PTR_ERR(skb));
- kfree_skb(skb);
+ err = __hci_cmd_send(hdev, 0xfc0c, 1, param);
+ if (err < 0)
+ bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
}
/*
@@ -3692,7 +3721,7 @@ static int btusb_qca_send_vendor_req(struct usb_device *udev, u8 request,
*/
pipe = usb_rcvctrlpipe(udev, 0);
err = usb_control_msg(udev, pipe, request, USB_TYPE_VENDOR | USB_DIR_IN,
- 0, 0, buf, size, USB_CTRL_SET_TIMEOUT);
+ 0, 0, buf, size, USB_CTRL_GET_TIMEOUT);
if (err < 0) {
dev_err(&udev->dev, "Failed to access otp area (%d)", err);
goto done;
@@ -4330,11 +4359,6 @@ static int btusb_probe(struct usb_interface *intf,
hdev->bus = HCI_USB;
hci_set_drvdata(hdev, data);
- if (id->driver_info & BTUSB_AMP)
- hdev->dev_type = HCI_AMP;
- else
- hdev->dev_type = HCI_PRIMARY;
-
data->hdev = hdev;
SET_HCIDEV_DEV(hdev, &intf->dev);
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 874d23089b39..89d4c2224546 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -1293,7 +1293,7 @@ static int bcm_probe(struct platform_device *pdev)
return 0;
}
-static int bcm_remove(struct platform_device *pdev)
+static void bcm_remove(struct platform_device *pdev)
{
struct bcm_device *dev = platform_get_drvdata(pdev);
@@ -1302,8 +1302,6 @@ static int bcm_remove(struct platform_device *pdev)
mutex_unlock(&bcm_device_lock);
dev_info(&pdev->dev, "%s device unregistered.\n", dev->name);
-
- return 0;
}
static const struct hci_uart_proto bcm_proto = {
@@ -1487,7 +1485,7 @@ static const struct acpi_device_id bcm_acpi_match[] = {
{ "BCM2EA1" },
{ "BCM2EA2", (long)&bcm43430_device_data },
{ "BCM2EA3", (long)&bcm43430_device_data },
- { "BCM2EA4" },
+ { "BCM2EA4", (long)&bcm43430_device_data }, /* bcm43455 */
{ "BCM2EA5" },
{ "BCM2EA6" },
{ "BCM2EA7" },
@@ -1509,7 +1507,7 @@ static const struct dev_pm_ops bcm_pm_ops = {
static struct platform_driver bcm_driver = {
.probe = bcm_probe,
- .remove = bcm_remove,
+ .remove_new = bcm_remove,
.driver = {
.name = "hci_bcm",
.acpi_match_table = ACPI_PTR(bcm_acpi_match),
diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c
index 9a7243d5db71..0c2f15235b4c 100644
--- a/drivers/bluetooth/hci_bcm4377.c
+++ b/drivers/bluetooth/hci_bcm4377.c
@@ -2361,7 +2361,6 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
bcm4377->hdev = hdev;
hdev->bus = HCI_PCI;
- hdev->dev_type = HCI_PRIMARY;
hdev->open = bcm4377_hci_open;
hdev->close = bcm4377_hci_close;
hdev->send = bcm4377_hci_send_frame;
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 78afb9a348e7..999ccd5bb4f2 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -537,7 +537,7 @@ static int intel_setup(struct hci_uart *hu)
int speed_change = 0;
int err;
- bt_dev_dbg(hdev, "start intel_setup");
+ bt_dev_dbg(hdev, "");
hu->hdev->set_diag = btintel_set_diag;
hu->hdev->set_bdaddr = btintel_set_bdaddr;
@@ -591,12 +591,12 @@ static int intel_setup(struct hci_uart *hu)
return -EINVAL;
}
- /* Check for supported iBT hardware variants of this firmware
- * loading method.
- *
- * This check has been put in place to ensure correct forward
- * compatibility options when newer hardware variants come along.
- */
+ /* Check for supported iBT hardware variants of this firmware
+ * loading method.
+ *
+ * This check has been put in place to ensure correct forward
+ * compatibility options when newer hardware variants come along.
+ */
switch (ver.hw_variant) {
case 0x0b: /* LnP */
case 0x0c: /* WsP */
@@ -777,7 +777,7 @@ static int intel_setup(struct hci_uart *hu)
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
- duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+ duration = (unsigned long long)ktime_to_ns(delta) >> 10;
bt_dev_info(hdev, "Firmware loaded in %llu usecs", duration);
@@ -822,7 +822,7 @@ done:
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
- duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+ duration = (unsigned long long)ktime_to_ns(delta) >> 10;
bt_dev_info(hdev, "Device booted in %llu usecs", duration);
@@ -977,6 +977,7 @@ static int intel_recv(struct hci_uart *hu, const void *data, int count)
ARRAY_SIZE(intel_recv_pkts));
if (IS_ERR(intel->rx_skb)) {
int err = PTR_ERR(intel->rx_skb);
+
bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
intel->rx_skb = NULL;
return err;
@@ -1190,7 +1191,7 @@ no_irq:
return 0;
}
-static int intel_remove(struct platform_device *pdev)
+static void intel_remove(struct platform_device *pdev)
{
struct intel_device *idev = platform_get_drvdata(pdev);
@@ -1201,13 +1202,11 @@ static int intel_remove(struct platform_device *pdev)
mutex_unlock(&intel_device_list_lock);
dev_info(&pdev->dev, "unregistered.\n");
-
- return 0;
}
static struct platform_driver intel_driver = {
.probe = intel_probe,
- .remove = intel_remove,
+ .remove_new = intel_remove,
.driver = {
.name = "hci_intel",
.acpi_match_table = ACPI_PTR(intel_acpi_match),
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index a26367e9fb19..17a2f158a0df 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -667,11 +667,6 @@ static int hci_uart_register_dev(struct hci_uart *hu)
if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags))
set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
- if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags))
- hdev->dev_type = HCI_AMP;
- else
- hdev->dev_type = HCI_PRIMARY;
-
/* Only call open() for the protocol after hdev is fully initialized as
* open() (or a timer/workqueue it starts) may attempt to reference it.
*/
@@ -722,7 +717,6 @@ static int hci_uart_set_flags(struct hci_uart *hu, unsigned long flags)
{
unsigned long valid_flags = BIT(HCI_UART_RAW_DEVICE) |
BIT(HCI_UART_RESET_ON_INIT) |
- BIT(HCI_UART_CREATE_AMP) |
BIT(HCI_UART_INIT_PENDING) |
BIT(HCI_UART_EXT_CONFIG) |
BIT(HCI_UART_VND_DETECT);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 8a60ad7acd70..0c9c9ee56592 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -7,7 +7,6 @@
*
* Copyright (C) 2007 Texas Instruments, Inc.
* Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Acknowledgements:
* This file is based on hci_ll.c, which was...
@@ -226,6 +225,7 @@ struct qca_serdev {
struct qca_power *bt_power;
u32 init_speed;
u32 oper_speed;
+ bool bdaddr_property_broken;
const char *firmware_name;
};
@@ -1672,6 +1672,9 @@ static bool qca_wakeup(struct hci_dev *hdev)
struct hci_uart *hu = hci_get_drvdata(hdev);
bool wakeup;
+ if (!hu->serdev)
+ return true;
+
/* BT SoC attached through the serial bus is handled by the serdev driver.
* So we need to use the device handle of the serdev driver to get the
* status of device may wakeup.
@@ -1843,6 +1846,7 @@ static int qca_setup(struct hci_uart *hu)
const char *firmware_name = qca_get_firmware_name(hu);
int ret;
struct qca_btsoc_version ver;
+ struct qca_serdev *qcadev;
const char *soc_name;
ret = qca_check_speeds(hu);
@@ -1904,16 +1908,9 @@ retry:
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
-
- /* Set BDA quirk bit for reading BDA value from fwnode property
- * only if that property exist in DT.
- */
- if (fwnode_property_present(dev_fwnode(hdev->dev.parent), "local-bd-address")) {
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
- bt_dev_info(hdev, "setting quirk bit to read BDA from fwnode later");
- } else {
- bt_dev_dbg(hdev, "local-bd-address` is not present in the devicetree so not setting quirk bit for BDA");
- }
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+ if (qcadev->bdaddr_property_broken)
+ set_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks);
hci_set_aosp_capable(hdev);
@@ -1961,8 +1958,10 @@ retry:
qca_debugfs_init(hdev);
hu->hdev->hw_error = qca_hw_error;
hu->hdev->cmd_timeout = qca_cmd_timeout;
- if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
- hu->hdev->wakeup = qca_wakeup;
+ if (hu->serdev) {
+ if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
+ hu->hdev->wakeup = qca_wakeup;
+ }
} else if (ret == -ENOENT) {
/* No patch/nvm-config found, run with original fw/config */
set_bit(QCA_ROM_FW, &qca->flags);
@@ -2295,6 +2294,9 @@ static int qca_serdev_probe(struct serdev_device *serdev)
if (!qcadev->oper_speed)
BT_DBG("UART will pick default operating speed");
+ qcadev->bdaddr_property_broken = device_property_read_bool(&serdev->dev,
+ "qcom,local-bd-address-broken");
+
if (data)
qcadev->btsoc_type = data->soc_type;
else
@@ -2330,16 +2332,21 @@ static int qca_serdev_probe(struct serdev_device *serdev)
(data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855)) {
dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
- power_ctrl_enabled = false;
+ return PTR_ERR(qcadev->bt_en);
}
+ if (!qcadev->bt_en)
+ power_ctrl_enabled = false;
+
qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
GPIOD_IN);
if (IS_ERR(qcadev->sw_ctrl) &&
(data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855 ||
- data->soc_type == QCA_WCN7850))
- dev_warn(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
+ data->soc_type == QCA_WCN7850)) {
+ dev_err(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
+ return PTR_ERR(qcadev->sw_ctrl);
+ }
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
if (IS_ERR(qcadev->susclk)) {
@@ -2358,10 +2365,13 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
if (IS_ERR(qcadev->bt_en)) {
- dev_warn(&serdev->dev, "failed to acquire enable gpio\n");
- power_ctrl_enabled = false;
+ dev_err(&serdev->dev, "failed to acquire enable gpio\n");
+ return PTR_ERR(qcadev->bt_en);
}
+ if (!qcadev->bt_en)
+ power_ctrl_enabled = false;
+
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
if (IS_ERR(qcadev->susclk)) {
dev_warn(&serdev->dev, "failed to acquire clk\n");
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 85c0d9b68f5f..89a22e9b3253 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -366,11 +366,6 @@ int hci_uart_register_device_priv(struct hci_uart *hu,
if (test_bit(HCI_UART_EXT_CONFIG, &hu->hdev_flags))
set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks);
- if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags))
- hdev->dev_type = HCI_AMP;
- else
- hdev->dev_type = HCI_PRIMARY;
-
if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
return 0;
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 68c8c7e95d64..00bf7ae82c5b 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -37,7 +37,6 @@
#define HCI_UART_RAW_DEVICE 0
#define HCI_UART_RESET_ON_INIT 1
-#define HCI_UART_CREATE_AMP 2
#define HCI_UART_INIT_PENDING 3
#define HCI_UART_EXT_CONFIG 4
#define HCI_UART_VND_DETECT 5
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 572d68d52965..28750a40f0ed 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -384,17 +384,10 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
{
struct hci_dev *hdev;
struct sk_buff *skb;
- __u8 dev_type;
if (data->hdev)
return -EBADFD;
- /* bits 0-1 are dev_type (Primary or AMP) */
- dev_type = opcode & 0x03;
-
- if (dev_type != HCI_PRIMARY && dev_type != HCI_AMP)
- return -EINVAL;
-
/* bits 2-5 are reserved (must be zero) */
if (opcode & 0x3c)
return -EINVAL;
@@ -412,7 +405,6 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
data->hdev = hdev;
hdev->bus = HCI_VIRTUAL;
- hdev->dev_type = dev_type;
hci_set_drvdata(hdev, data);
hdev->open = vhci_open_dev;
@@ -634,7 +626,7 @@ static void vhci_open_timeout(struct work_struct *work)
struct vhci_data *data = container_of(work, struct vhci_data,
open_timeout.work);
- vhci_create_device(data, amp ? HCI_AMP : HCI_PRIMARY);
+ vhci_create_device(data, 0x00);
}
static int vhci_open(struct inode *inode, struct file *file)
diff --git a/drivers/bluetooth/virtio_bt.c b/drivers/bluetooth/virtio_bt.c
index 2ac70b560c46..18208e152a36 100644
--- a/drivers/bluetooth/virtio_bt.c
+++ b/drivers/bluetooth/virtio_bt.c
@@ -274,7 +274,6 @@ static int virtbt_probe(struct virtio_device *vdev)
switch (type) {
case VIRTIO_BT_CONFIG_TYPE_PRIMARY:
- case VIRTIO_BT_CONFIG_TYPE_AMP:
break;
default:
return -EINVAL;
@@ -303,7 +302,6 @@ static int virtbt_probe(struct virtio_device *vdev)
vbt->hdev = hdev;
hdev->bus = HCI_VIRTIO;
- hdev->dev_type = type;
hci_set_drvdata(hdev, vbt);
hdev->open = virtbt_open;
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index d5e7fa9173a1..64cd2ee03aa3 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -163,6 +163,16 @@ config QCOM_SSC_BLOCK_BUS
i2c/spi/uart controllers, a hexagon core, and a clock controller
which provides clocks for the above.
+config STM32_FIREWALL
+ bool "STM32 Firewall framework"
+ depends on (ARCH_STM32 || COMPILE_TEST) && OF
+ select OF_DYNAMIC
+ help
+ Say y to enable STM32 firewall framework and its services. Firewall
+ controllers will be able to register to the framework. Access for
+ hardware resources linked to a firewall controller can be requested
+ through this STM32 framework.
+
config SUN50I_DE2_BUS
bool "Allwinner A64 DE2 Bus Driver"
default ARM64
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index d90eed189a65..cddd4984d6af 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o
obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
obj-$(CONFIG_QCOM_EBI2) += qcom-ebi2.o
obj-$(CONFIG_QCOM_SSC_BLOCK_BUS) += qcom-ssc-block-bus.o
+obj-$(CONFIG_STM32_FIREWALL) += stm32_firewall.o stm32_rifsc.o stm32_etzpc.o
obj-$(CONFIG_SUN50I_DE2_BUS) += sun50i-de2.o
obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o
obj-$(CONFIG_OF) += simple-pm-bus.o
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index 65ae758f3194..ee29162da4ee 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -410,6 +410,7 @@ static const struct of_device_id brcmstb_gisb_arb_of_match[] = {
{ .compatible = "brcm,bcm74165-gisb-arb", .data = gisb_offsets_bcm74165 },
{ },
};
+MODULE_DEVICE_TABLE(of, brcmstb_gisb_arb_of_match);
static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
{
diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h
index 5fe49311b8eb..aaad40a07f69 100644
--- a/drivers/bus/mhi/host/internal.h
+++ b/drivers/bus/mhi/host/internal.h
@@ -80,6 +80,7 @@ enum dev_st_transition {
DEV_ST_TRANSITION_FP,
DEV_ST_TRANSITION_SYS_ERR,
DEV_ST_TRANSITION_DISABLE,
+ DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE,
DEV_ST_TRANSITION_MAX,
};
@@ -90,7 +91,8 @@ enum dev_st_transition {
dev_st_trans(MISSION_MODE, "MISSION MODE") \
dev_st_trans(FP, "FLASH PROGRAMMER") \
dev_st_trans(SYS_ERR, "SYS ERROR") \
- dev_st_trans_end(DISABLE, "DISABLE")
+ dev_st_trans(DISABLE, "DISABLE") \
+ dev_st_trans_end(DISABLE_DESTROY_DEVICE, "DISABLE (DESTROY DEVICE)")
extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX];
#define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \
diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
index 8b40d3f01acc..11c0e751f223 100644
--- a/drivers/bus/mhi/host/pm.c
+++ b/drivers/bus/mhi/host/pm.c
@@ -468,7 +468,8 @@ error_mission_mode:
}
/* Handle shutdown transitions */
-static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
+static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
+ bool destroy_device)
{
enum mhi_pm_state cur_state;
struct mhi_event *mhi_event;
@@ -530,8 +531,16 @@ skip_mhi_reset:
dev_dbg(dev, "Waiting for all pending threads to complete\n");
wake_up_all(&mhi_cntrl->state_event);
- dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
- device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
+ /*
+ * Only destroy the 'struct device' for channels if indicated by the
+ * 'destroy_device' flag. Because, during system suspend or hibernation
+ * state, there is no need to destroy the 'struct device' as the endpoint
+ * device would still be physically attached to the machine.
+ */
+ if (destroy_device) {
+ dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
+ device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
+ }
mutex_lock(&mhi_cntrl->pm_mutex);
@@ -821,7 +830,10 @@ void mhi_pm_st_worker(struct work_struct *work)
mhi_pm_sys_error_transition(mhi_cntrl);
break;
case DEV_ST_TRANSITION_DISABLE:
- mhi_pm_disable_transition(mhi_cntrl);
+ mhi_pm_disable_transition(mhi_cntrl, false);
+ break;
+ case DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE:
+ mhi_pm_disable_transition(mhi_cntrl, true);
break;
default:
break;
@@ -1175,7 +1187,8 @@ error_exit:
}
EXPORT_SYMBOL_GPL(mhi_async_power_up);
-void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
+static void __mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful,
+ bool destroy_device)
{
enum mhi_pm_state cur_state, transition_state;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
@@ -1211,15 +1224,32 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
write_unlock_irq(&mhi_cntrl->pm_lock);
mutex_unlock(&mhi_cntrl->pm_mutex);
- mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
+ if (destroy_device)
+ mhi_queue_state_transition(mhi_cntrl,
+ DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE);
+ else
+ mhi_queue_state_transition(mhi_cntrl,
+ DEV_ST_TRANSITION_DISABLE);
/* Wait for shutdown to complete */
flush_work(&mhi_cntrl->st_worker);
disable_irq(mhi_cntrl->irq[0]);
}
+
+void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
+{
+ __mhi_power_down(mhi_cntrl, graceful, true);
+}
EXPORT_SYMBOL_GPL(mhi_power_down);
+void mhi_power_down_keep_dev(struct mhi_controller *mhi_cntrl,
+ bool graceful)
+{
+ __mhi_power_down(mhi_cntrl, graceful, false);
+}
+EXPORT_SYMBOL_GPL(mhi_power_down_keep_dev);
+
int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
{
int ret = mhi_async_power_up(mhi_cntrl);
diff --git a/drivers/bus/stm32_etzpc.c b/drivers/bus/stm32_etzpc.c
new file mode 100644
index 000000000000..7fc0f16960be
--- /dev/null
+++ b/drivers/bus/stm32_etzpc.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023, STMicroelectronics - All Rights Reserved
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include "stm32_firewall.h"
+
+/*
+ * ETZPC registers
+ */
+#define ETZPC_DECPROT 0x10
+#define ETZPC_HWCFGR 0x3F0
+
+/*
+ * HWCFGR register
+ */
+#define ETZPC_HWCFGR_NUM_TZMA GENMASK(7, 0)
+#define ETZPC_HWCFGR_NUM_PER_SEC GENMASK(15, 8)
+#define ETZPC_HWCFGR_NUM_AHB_SEC GENMASK(23, 16)
+#define ETZPC_HWCFGR_CHUNKS1N4 GENMASK(31, 24)
+
+/*
+ * ETZPC miscellaneous
+ */
+#define ETZPC_PROT_MASK GENMASK(1, 0)
+#define ETZPC_PROT_A7NS 0x3
+#define ETZPC_DECPROT_SHIFT 1
+
+#define IDS_PER_DECPROT_REGS 16
+
+static int stm32_etzpc_grant_access(struct stm32_firewall_controller *ctrl, u32 firewall_id)
+{
+ u32 offset, reg_offset, sec_val;
+
+ if (firewall_id >= ctrl->max_entries) {
+ dev_err(ctrl->dev, "Invalid sys bus ID %u", firewall_id);
+ return -EINVAL;
+ }
+
+ /* Check access configuration, 16 peripherals per register */
+ reg_offset = ETZPC_DECPROT + 0x4 * (firewall_id / IDS_PER_DECPROT_REGS);
+ offset = (firewall_id % IDS_PER_DECPROT_REGS) << ETZPC_DECPROT_SHIFT;
+
+ /* Verify peripheral is non-secure and attributed to cortex A7 */
+ sec_val = (readl(ctrl->mmio + reg_offset) >> offset) & ETZPC_PROT_MASK;
+ if (sec_val != ETZPC_PROT_A7NS) {
+ dev_dbg(ctrl->dev, "Invalid bus configuration: reg_offset %#x, value %d\n",
+ reg_offset, sec_val);
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+static void stm32_etzpc_release_access(struct stm32_firewall_controller *ctrl __maybe_unused,
+ u32 firewall_id __maybe_unused)
+{
+}
+
+static int stm32_etzpc_probe(struct platform_device *pdev)
+{
+ struct stm32_firewall_controller *etzpc_controller;
+ struct device_node *np = pdev->dev.of_node;
+ u32 nb_per, nb_master;
+ struct resource *res;
+ void __iomem *mmio;
+ int rc;
+
+ etzpc_controller = devm_kzalloc(&pdev->dev, sizeof(*etzpc_controller), GFP_KERNEL);
+ if (!etzpc_controller)
+ return -ENOMEM;
+
+ mmio = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(mmio))
+ return PTR_ERR(mmio);
+
+ etzpc_controller->dev = &pdev->dev;
+ etzpc_controller->mmio = mmio;
+ etzpc_controller->name = dev_driver_string(etzpc_controller->dev);
+ etzpc_controller->type = STM32_PERIPHERAL_FIREWALL | STM32_MEMORY_FIREWALL;
+ etzpc_controller->grant_access = stm32_etzpc_grant_access;
+ etzpc_controller->release_access = stm32_etzpc_release_access;
+
+ /* Get number of etzpc entries*/
+ nb_per = FIELD_GET(ETZPC_HWCFGR_NUM_PER_SEC,
+ readl(etzpc_controller->mmio + ETZPC_HWCFGR));
+ nb_master = FIELD_GET(ETZPC_HWCFGR_NUM_AHB_SEC,
+ readl(etzpc_controller->mmio + ETZPC_HWCFGR));
+ etzpc_controller->max_entries = nb_per + nb_master;
+
+ platform_set_drvdata(pdev, etzpc_controller);
+
+ rc = stm32_firewall_controller_register(etzpc_controller);
+ if (rc) {
+ dev_err(etzpc_controller->dev, "Couldn't register as a firewall controller: %d",
+ rc);
+ return rc;
+ }
+
+ rc = stm32_firewall_populate_bus(etzpc_controller);
+ if (rc) {
+ dev_err(etzpc_controller->dev, "Couldn't populate ETZPC bus: %d",
+ rc);
+ return rc;
+ }
+
+ /* Populate all allowed nodes */
+ return of_platform_populate(np, NULL, NULL, &pdev->dev);
+}
+
+static const struct of_device_id stm32_etzpc_of_match[] = {
+ { .compatible = "st,stm32-etzpc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, stm32_etzpc_of_match);
+
+static struct platform_driver stm32_etzpc_driver = {
+ .probe = stm32_etzpc_probe,
+ .driver = {
+ .name = "stm32-etzpc",
+ .of_match_table = stm32_etzpc_of_match,
+ },
+};
+module_platform_driver(stm32_etzpc_driver);
+
+MODULE_AUTHOR("Gatien Chevallier <gatien.chevallier@foss.st.com>");
+MODULE_DESCRIPTION("STMicroelectronics ETZPC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/bus/stm32_firewall.c b/drivers/bus/stm32_firewall.c
new file mode 100644
index 000000000000..2fc9761dadec
--- /dev/null
+++ b/drivers/bus/stm32_firewall.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023, STMicroelectronics - All Rights Reserved
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/bus/stm32_firewall_device.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+
+#include "stm32_firewall.h"
+
+/* Corresponds to STM32_FIREWALL_MAX_EXTRA_ARGS + firewall ID */
+#define STM32_FIREWALL_MAX_ARGS (STM32_FIREWALL_MAX_EXTRA_ARGS + 1)
+
+static LIST_HEAD(firewall_controller_list);
+static DEFINE_MUTEX(firewall_controller_list_lock);
+
+/* Firewall device API */
+
+int stm32_firewall_get_firewall(struct device_node *np, struct stm32_firewall *firewall,
+ unsigned int nb_firewall)
+{
+ struct stm32_firewall_controller *ctrl;
+ struct of_phandle_iterator it;
+ unsigned int i, j = 0;
+ int err;
+
+ if (!firewall || !nb_firewall)
+ return -EINVAL;
+
+ /* Parse property with phandle parsed out */
+ of_for_each_phandle(&it, err, np, "access-controllers", "#access-controller-cells", 0) {
+ struct of_phandle_args provider_args;
+ struct device_node *provider = it.node;
+ const char *fw_entry;
+ bool match = false;
+
+ if (err) {
+ pr_err("Unable to get access-controllers property for node %s\n, err: %d",
+ np->full_name, err);
+ of_node_put(provider);
+ return err;
+ }
+
+ if (j >= nb_firewall) {
+ pr_err("Too many firewall controllers");
+ of_node_put(provider);
+ return -EINVAL;
+ }
+
+ provider_args.args_count = of_phandle_iterator_args(&it, provider_args.args,
+ STM32_FIREWALL_MAX_ARGS);
+
+ /* Check if the parsed phandle corresponds to a registered firewall controller */
+ mutex_lock(&firewall_controller_list_lock);
+ list_for_each_entry(ctrl, &firewall_controller_list, entry) {
+ if (ctrl->dev->of_node->phandle == it.phandle) {
+ match = true;
+ firewall[j].firewall_ctrl = ctrl;
+ break;
+ }
+ }
+ mutex_unlock(&firewall_controller_list_lock);
+
+ if (!match) {
+ firewall[j].firewall_ctrl = NULL;
+ pr_err("No firewall controller registered for %s\n", np->full_name);
+ of_node_put(provider);
+ return -ENODEV;
+ }
+
+ err = of_property_read_string_index(np, "access-controller-names", j, &fw_entry);
+ if (err == 0)
+ firewall[j].entry = fw_entry;
+
+ /* Handle the case when there are no arguments given along with the phandle */
+ if (provider_args.args_count < 0 ||
+ provider_args.args_count > STM32_FIREWALL_MAX_ARGS) {
+ of_node_put(provider);
+ return -EINVAL;
+ } else if (provider_args.args_count == 0) {
+ firewall[j].extra_args_size = 0;
+ firewall[j].firewall_id = U32_MAX;
+ j++;
+ continue;
+ }
+
+ /* The firewall ID is always the first argument */
+ firewall[j].firewall_id = provider_args.args[0];
+
+ /* Extra args start at the second argument */
+ for (i = 0; i < provider_args.args_count - 1; i++)
+ firewall[j].extra_args[i] = provider_args.args[i + 1];
+
+ /* Remove the firewall ID arg that is not an extra argument */
+ firewall[j].extra_args_size = provider_args.args_count - 1;
+
+ j++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_firewall_get_firewall);
+
+int stm32_firewall_grant_access(struct stm32_firewall *firewall)
+{
+ struct stm32_firewall_controller *firewall_controller;
+
+ if (!firewall || firewall->firewall_id == U32_MAX)
+ return -EINVAL;
+
+ firewall_controller = firewall->firewall_ctrl;
+
+ if (!firewall_controller)
+ return -ENODEV;
+
+ return firewall_controller->grant_access(firewall_controller, firewall->firewall_id);
+}
+EXPORT_SYMBOL_GPL(stm32_firewall_grant_access);
+
+int stm32_firewall_grant_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id)
+{
+ struct stm32_firewall_controller *firewall_controller;
+
+ if (!firewall || subsystem_id == U32_MAX || firewall->firewall_id == U32_MAX)
+ return -EINVAL;
+
+ firewall_controller = firewall->firewall_ctrl;
+
+ if (!firewall_controller)
+ return -ENODEV;
+
+ return firewall_controller->grant_access(firewall_controller, subsystem_id);
+}
+EXPORT_SYMBOL_GPL(stm32_firewall_grant_access_by_id);
+
+void stm32_firewall_release_access(struct stm32_firewall *firewall)
+{
+ struct stm32_firewall_controller *firewall_controller;
+
+ if (!firewall || firewall->firewall_id == U32_MAX) {
+ pr_debug("Incorrect arguments when releasing a firewall access\n");
+ return;
+ }
+
+ firewall_controller = firewall->firewall_ctrl;
+
+ if (!firewall_controller) {
+ pr_debug("No firewall controller to release\n");
+ return;
+ }
+
+ firewall_controller->release_access(firewall_controller, firewall->firewall_id);
+}
+EXPORT_SYMBOL_GPL(stm32_firewall_release_access);
+
+void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id)
+{
+ struct stm32_firewall_controller *firewall_controller;
+
+ if (!firewall || subsystem_id == U32_MAX || firewall->firewall_id == U32_MAX) {
+ pr_debug("Incorrect arguments when releasing a firewall access");
+ return;
+ }
+
+ firewall_controller = firewall->firewall_ctrl;
+
+ if (!firewall_controller) {
+ pr_debug("No firewall controller to release");
+ return;
+ }
+
+ firewall_controller->release_access(firewall_controller, subsystem_id);
+}
+EXPORT_SYMBOL_GPL(stm32_firewall_release_access_by_id);
+
+/* Firewall controller API */
+
+int stm32_firewall_controller_register(struct stm32_firewall_controller *firewall_controller)
+{
+ struct stm32_firewall_controller *ctrl;
+
+ if (!firewall_controller)
+ return -ENODEV;
+
+ pr_info("Registering %s firewall controller\n", firewall_controller->name);
+
+ mutex_lock(&firewall_controller_list_lock);
+ list_for_each_entry(ctrl, &firewall_controller_list, entry) {
+ if (ctrl == firewall_controller) {
+ pr_debug("%s firewall controller already registered\n",
+ firewall_controller->name);
+ mutex_unlock(&firewall_controller_list_lock);
+ return 0;
+ }
+ }
+ list_add_tail(&firewall_controller->entry, &firewall_controller_list);
+ mutex_unlock(&firewall_controller_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_firewall_controller_register);
+
+void stm32_firewall_controller_unregister(struct stm32_firewall_controller *firewall_controller)
+{
+ struct stm32_firewall_controller *ctrl;
+ bool controller_removed = false;
+
+ if (!firewall_controller) {
+ pr_debug("Null reference while unregistering firewall controller\n");
+ return;
+ }
+
+ mutex_lock(&firewall_controller_list_lock);
+ list_for_each_entry(ctrl, &firewall_controller_list, entry) {
+ if (ctrl == firewall_controller) {
+ controller_removed = true;
+ list_del_init(&ctrl->entry);
+ break;
+ }
+ }
+ mutex_unlock(&firewall_controller_list_lock);
+
+ if (!controller_removed)
+ pr_debug("There was no firewall controller named %s to unregister\n",
+ firewall_controller->name);
+}
+EXPORT_SYMBOL_GPL(stm32_firewall_controller_unregister);
+
+int stm32_firewall_populate_bus(struct stm32_firewall_controller *firewall_controller)
+{
+ struct stm32_firewall *firewalls;
+ struct device_node *child;
+ struct device *parent;
+ unsigned int i;
+ int len;
+ int err;
+
+ parent = firewall_controller->dev;
+
+ dev_dbg(parent, "Populating %s system bus\n", dev_name(firewall_controller->dev));
+
+ for_each_available_child_of_node(dev_of_node(parent), child) {
+ /* The access-controllers property is mandatory for firewall bus devices */
+ len = of_count_phandle_with_args(child, "access-controllers",
+ "#access-controller-cells");
+ if (len <= 0) {
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ firewalls = kcalloc(len, sizeof(*firewalls), GFP_KERNEL);
+ if (!firewalls) {
+ of_node_put(child);
+ return -ENOMEM;
+ }
+
+ err = stm32_firewall_get_firewall(child, firewalls, (unsigned int)len);
+ if (err) {
+ kfree(firewalls);
+ of_node_put(child);
+ return err;
+ }
+
+ for (i = 0; i < len; i++) {
+ if (firewall_controller->grant_access(firewall_controller,
+ firewalls[i].firewall_id)) {
+ /*
+ * Peripheral access not allowed or not defined.
+ * Mark the node as populated so platform bus won't probe it
+ */
+ of_detach_node(child);
+ dev_err(parent, "%s: Device driver will not be probed\n",
+ child->full_name);
+ }
+ }
+
+ kfree(firewalls);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_firewall_populate_bus);
diff --git a/drivers/bus/stm32_firewall.h b/drivers/bus/stm32_firewall.h
new file mode 100644
index 000000000000..e5fac85fe346
--- /dev/null
+++ b/drivers/bus/stm32_firewall.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023, STMicroelectronics - All Rights Reserved
+ */
+
+#ifndef _STM32_FIREWALL_H
+#define _STM32_FIREWALL_H
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+/**
+ * STM32_PERIPHERAL_FIREWALL: This type of firewall protects peripherals
+ * STM32_MEMORY_FIREWALL: This type of firewall protects memories/subsets of memory
+ * zones
+ * STM32_NOTYPE_FIREWALL: Undefined firewall type
+ */
+
+#define STM32_PERIPHERAL_FIREWALL BIT(1)
+#define STM32_MEMORY_FIREWALL BIT(2)
+#define STM32_NOTYPE_FIREWALL BIT(3)
+
+/**
+ * struct stm32_firewall_controller - Information on firewall controller supplying services
+ *
+ * @name: Name of the firewall controller
+ * @dev: Device reference of the firewall controller
+ * @mmio: Base address of the firewall controller
+ * @entry: List entry of the firewall controller list
+ * @type: Type of firewall
+ * @max_entries: Number of entries covered by the firewall
+ * @grant_access: Callback used to grant access for a device access against a
+ * firewall controller
+ * @release_access: Callback used to release resources taken by a device when access was
+ * granted
+ * @grant_memory_range_access: Callback used to grant access for a device to a given memory region
+ */
+struct stm32_firewall_controller {
+ const char *name;
+ struct device *dev;
+ void __iomem *mmio;
+ struct list_head entry;
+ unsigned int type;
+ unsigned int max_entries;
+
+ int (*grant_access)(struct stm32_firewall_controller *ctrl, u32 id);
+ void (*release_access)(struct stm32_firewall_controller *ctrl, u32 id);
+ int (*grant_memory_range_access)(struct stm32_firewall_controller *ctrl, phys_addr_t paddr,
+ size_t size);
+};
+
+/**
+ * stm32_firewall_controller_register - Register a firewall controller to the STM32 firewall
+ * framework
+ * @firewall_controller: Firewall controller to register
+ *
+ * Returns 0 in case of success or -ENODEV if no controller was given.
+ */
+int stm32_firewall_controller_register(struct stm32_firewall_controller *firewall_controller);
+
+/**
+ * stm32_firewall_controller_unregister - Unregister a firewall controller from the STM32
+ * firewall framework
+ * @firewall_controller: Firewall controller to unregister
+ */
+void stm32_firewall_controller_unregister(struct stm32_firewall_controller *firewall_controller);
+
+/**
+ * stm32_firewall_populate_bus - Populate device tree nodes that have a correct firewall
+ * configuration. This is used at boot-time only, as a sanity check
+ * between device tree and firewalls hardware configurations to
+ * prevent a kernel crash when a device driver is not granted access
+ *
+ * @firewall_controller: Firewall controller which nodes will be populated or not
+ *
+ * Returns 0 in case of success or appropriate errno code if error occurred.
+ */
+int stm32_firewall_populate_bus(struct stm32_firewall_controller *firewall_controller);
+
+#endif /* _STM32_FIREWALL_H */
diff --git a/drivers/bus/stm32_rifsc.c b/drivers/bus/stm32_rifsc.c
new file mode 100644
index 000000000000..4cf1b60014b7
--- /dev/null
+++ b/drivers/bus/stm32_rifsc.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023, STMicroelectronics - All Rights Reserved
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include "stm32_firewall.h"
+
+/*
+ * RIFSC offset register
+ */
+#define RIFSC_RISC_SECCFGR0 0x10
+#define RIFSC_RISC_PRIVCFGR0 0x30
+#define RIFSC_RISC_PER0_CIDCFGR 0x100
+#define RIFSC_RISC_PER0_SEMCR 0x104
+#define RIFSC_RISC_HWCFGR2 0xFEC
+
+/*
+ * SEMCR register
+ */
+#define SEMCR_MUTEX BIT(0)
+
+/*
+ * HWCFGR2 register
+ */
+#define HWCFGR2_CONF1_MASK GENMASK(15, 0)
+#define HWCFGR2_CONF2_MASK GENMASK(23, 16)
+#define HWCFGR2_CONF3_MASK GENMASK(31, 24)
+
+/*
+ * RIFSC miscellaneous
+ */
+#define RIFSC_RISC_CFEN_MASK BIT(0)
+#define RIFSC_RISC_SEM_EN_MASK BIT(1)
+#define RIFSC_RISC_SCID_MASK GENMASK(6, 4)
+#define RIFSC_RISC_SEML_SHIFT 16
+#define RIFSC_RISC_SEMWL_MASK GENMASK(23, 16)
+#define RIFSC_RISC_PER_ID_MASK GENMASK(31, 24)
+
+#define RIFSC_RISC_PERx_CID_MASK (RIFSC_RISC_CFEN_MASK | \
+ RIFSC_RISC_SEM_EN_MASK | \
+ RIFSC_RISC_SCID_MASK | \
+ RIFSC_RISC_SEMWL_MASK)
+
+#define IDS_PER_RISC_SEC_PRIV_REGS 32
+
+/* RIF miscellaneous */
+/*
+ * CIDCFGR register fields
+ */
+#define CIDCFGR_CFEN BIT(0)
+#define CIDCFGR_SEMEN BIT(1)
+#define CIDCFGR_SEMWL(x) BIT(RIFSC_RISC_SEML_SHIFT + (x))
+
+#define SEMWL_SHIFT 16
+
+/* Compartiment IDs */
+#define RIF_CID0 0x0
+#define RIF_CID1 0x1
+
+static bool stm32_rifsc_is_semaphore_available(void __iomem *addr)
+{
+ return !(readl(addr) & SEMCR_MUTEX);
+}
+
+static int stm32_rif_acquire_semaphore(struct stm32_firewall_controller *stm32_firewall_controller,
+ int id)
+{
+ void __iomem *addr = stm32_firewall_controller->mmio + RIFSC_RISC_PER0_SEMCR + 0x8 * id;
+
+ writel(SEMCR_MUTEX, addr);
+
+ /* Check that CID1 has the semaphore */
+ if (stm32_rifsc_is_semaphore_available(addr) ||
+ FIELD_GET(RIFSC_RISC_SCID_MASK, readl(addr)) != RIF_CID1)
+ return -EACCES;
+
+ return 0;
+}
+
+static void stm32_rif_release_semaphore(struct stm32_firewall_controller *stm32_firewall_controller,
+ int id)
+{
+ void __iomem *addr = stm32_firewall_controller->mmio + RIFSC_RISC_PER0_SEMCR + 0x8 * id;
+
+ if (stm32_rifsc_is_semaphore_available(addr))
+ return;
+
+ writel(SEMCR_MUTEX, addr);
+
+ /* Ok if another compartment takes the semaphore before the check */
+ WARN_ON(!stm32_rifsc_is_semaphore_available(addr) &&
+ FIELD_GET(RIFSC_RISC_SCID_MASK, readl(addr)) == RIF_CID1);
+}
+
+static int stm32_rifsc_grant_access(struct stm32_firewall_controller *ctrl, u32 firewall_id)
+{
+ struct stm32_firewall_controller *rifsc_controller = ctrl;
+ u32 reg_offset, reg_id, sec_reg_value, cid_reg_value;
+ int rc;
+
+ if (firewall_id >= rifsc_controller->max_entries) {
+ dev_err(rifsc_controller->dev, "Invalid sys bus ID %u", firewall_id);
+ return -EINVAL;
+ }
+
+ /*
+ * RIFSC_RISC_PRIVCFGRx and RIFSC_RISC_SECCFGRx both handle configuration access for
+ * 32 peripherals. On the other hand, there is one _RIFSC_RISC_PERx_CIDCFGR register
+ * per peripheral
+ */
+ reg_id = firewall_id / IDS_PER_RISC_SEC_PRIV_REGS;
+ reg_offset = firewall_id % IDS_PER_RISC_SEC_PRIV_REGS;
+ sec_reg_value = readl(rifsc_controller->mmio + RIFSC_RISC_SECCFGR0 + 0x4 * reg_id);
+ cid_reg_value = readl(rifsc_controller->mmio + RIFSC_RISC_PER0_CIDCFGR + 0x8 * firewall_id);
+
+ /* First check conditions for semaphore mode, which doesn't take into account static CID. */
+ if ((cid_reg_value & CIDCFGR_SEMEN) && (cid_reg_value & CIDCFGR_CFEN)) {
+ if (cid_reg_value & BIT(RIF_CID1 + SEMWL_SHIFT)) {
+ /* Static CID is irrelevant if semaphore mode */
+ goto skip_cid_check;
+ } else {
+ dev_dbg(rifsc_controller->dev,
+ "Invalid bus semaphore configuration: index %d\n", firewall_id);
+ return -EACCES;
+ }
+ }
+
+ /*
+ * Skip CID check if CID filtering isn't enabled or filtering is enabled on CID0, which
+ * corresponds to whatever CID.
+ */
+ if (!(cid_reg_value & CIDCFGR_CFEN) ||
+ FIELD_GET(RIFSC_RISC_SCID_MASK, cid_reg_value) == RIF_CID0)
+ goto skip_cid_check;
+
+ /* Coherency check with the CID configuration */
+ if (FIELD_GET(RIFSC_RISC_SCID_MASK, cid_reg_value) != RIF_CID1) {
+ dev_dbg(rifsc_controller->dev, "Invalid CID configuration for peripheral: %d\n",
+ firewall_id);
+ return -EACCES;
+ }
+
+skip_cid_check:
+ /* Check security configuration */
+ if (sec_reg_value & BIT(reg_offset)) {
+ dev_dbg(rifsc_controller->dev,
+ "Invalid security configuration for peripheral: %d\n", firewall_id);
+ return -EACCES;
+ }
+
+ /*
+ * If the peripheral is in semaphore mode, take the semaphore so that
+ * the CID1 has the ownership.
+ */
+ if ((cid_reg_value & CIDCFGR_SEMEN) && (cid_reg_value & CIDCFGR_CFEN)) {
+ rc = stm32_rif_acquire_semaphore(rifsc_controller, firewall_id);
+ if (rc) {
+ dev_err(rifsc_controller->dev,
+ "Couldn't acquire semaphore for peripheral: %d\n", firewall_id);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static void stm32_rifsc_release_access(struct stm32_firewall_controller *ctrl, u32 firewall_id)
+{
+ stm32_rif_release_semaphore(ctrl, firewall_id);
+}
+
+static int stm32_rifsc_probe(struct platform_device *pdev)
+{
+ struct stm32_firewall_controller *rifsc_controller;
+ struct device_node *np = pdev->dev.of_node;
+ u32 nb_risup, nb_rimu, nb_risal;
+ struct resource *res;
+ void __iomem *mmio;
+ int rc;
+
+ rifsc_controller = devm_kzalloc(&pdev->dev, sizeof(*rifsc_controller), GFP_KERNEL);
+ if (!rifsc_controller)
+ return -ENOMEM;
+
+ mmio = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(mmio))
+ return PTR_ERR(mmio);
+
+ rifsc_controller->dev = &pdev->dev;
+ rifsc_controller->mmio = mmio;
+ rifsc_controller->name = dev_driver_string(rifsc_controller->dev);
+ rifsc_controller->type = STM32_PERIPHERAL_FIREWALL | STM32_MEMORY_FIREWALL;
+ rifsc_controller->grant_access = stm32_rifsc_grant_access;
+ rifsc_controller->release_access = stm32_rifsc_release_access;
+
+ /* Get number of RIFSC entries*/
+ nb_risup = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF1_MASK;
+ nb_rimu = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF2_MASK;
+ nb_risal = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF3_MASK;
+ rifsc_controller->max_entries = nb_risup + nb_rimu + nb_risal;
+
+ platform_set_drvdata(pdev, rifsc_controller);
+
+ rc = stm32_firewall_controller_register(rifsc_controller);
+ if (rc) {
+ dev_err(rifsc_controller->dev, "Couldn't register as a firewall controller: %d",
+ rc);
+ return rc;
+ }
+
+ rc = stm32_firewall_populate_bus(rifsc_controller);
+ if (rc) {
+ dev_err(rifsc_controller->dev, "Couldn't populate RIFSC bus: %d",
+ rc);
+ return rc;
+ }
+
+ /* Populate all allowed nodes */
+ return of_platform_populate(np, NULL, NULL, &pdev->dev);
+}
+
+static const struct of_device_id stm32_rifsc_of_match[] = {
+ { .compatible = "st,stm32mp25-rifsc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, stm32_rifsc_of_match);
+
+static struct platform_driver stm32_rifsc_driver = {
+ .probe = stm32_rifsc_probe,
+ .driver = {
+ .name = "stm32-rifsc",
+ .of_match_table = stm32_rifsc_of_match,
+ },
+};
+module_platform_driver(stm32_rifsc_driver);
+
+MODULE_AUTHOR("Gatien Chevallier <gatien.chevallier@foss.st.com>");
+MODULE_DESCRIPTION("STMicroelectronics RIFSC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 41d33f39efe5..8767e04d6c89 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -1,6 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
/*
* ti-sysc.c - Texas Instruments sysc interconnect target driver
+ *
+ * TI SoCs have an interconnect target wrapper IP for many devices. The wrapper
+ * IP manages clock gating, resets, and PM capabilities for the connected devices.
+ *
+ * Copyright (C) 2017-2024 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ * Many features are based on the earlier omap_hwmod arch code with thanks to all
+ * the people who developed and debugged the code over the years:
+ *
+ * Copyright (C) 2009-2011 Nokia Corporation
+ * Copyright (C) 2011-2021 Texas Instruments Incorporated - https://www.ti.com/
*/
#include <linux/io.h>
@@ -1458,8 +1469,7 @@ static int __maybe_unused sysc_noirq_suspend(struct device *dev)
ddata = dev_get_drvdata(dev);
- if (ddata->cfg.quirks &
- (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
+ if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)
return 0;
if (!ddata->enabled)
@@ -1477,8 +1487,7 @@ static int __maybe_unused sysc_noirq_resume(struct device *dev)
ddata = dev_get_drvdata(dev);
- if (ddata->cfg.quirks &
- (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
+ if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)
return 0;
if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_RESUME) {
@@ -1529,19 +1538,6 @@ struct sysc_revision_quirk {
}
static const struct sysc_revision_quirk sysc_revision_quirks[] = {
- /* These drivers need to be fixed to not use pm_runtime_irq_safe() */
- SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
- SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
- /* Uarts on omap4 and later */
- SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
- SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
- SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
-
/* Quirks that need to be set based on the module address */
SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT |
@@ -1599,6 +1595,17 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("sata", 0, 0xfc, 0x1100, -ENODEV, 0x5e412000, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT),
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT),
+ /* Uarts on omap4 and later */
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT),
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT),
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT),
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff,
@@ -2145,8 +2152,7 @@ static int sysc_reset(struct sysc *ddata)
sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
if (ddata->legacy_mode ||
- ddata->cap->regbits->srst_shift < 0 ||
- ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
+ ddata->cap->regbits->srst_shift < 0)
return 0;
sysc_mask = BIT(ddata->cap->regbits->srst_shift);
@@ -2240,12 +2246,14 @@ static int sysc_init_module(struct sysc *ddata)
goto err_main_clocks;
}
- error = sysc_reset(ddata);
- if (error)
- dev_err(ddata->dev, "Reset failed with %d\n", error);
+ if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) {
+ error = sysc_reset(ddata);
+ if (error)
+ dev_err(ddata->dev, "Reset failed with %d\n", error);
- if (error && !ddata->legacy_mode)
- sysc_disable_module(ddata->dev);
+ if (error && !ddata->legacy_mode)
+ sysc_disable_module(ddata->dev);
+ }
err_main_clocks:
if (error)
@@ -2447,89 +2455,6 @@ static int __maybe_unused sysc_child_runtime_resume(struct device *dev)
return pm_generic_runtime_resume(dev);
}
-#ifdef CONFIG_PM_SLEEP
-static int sysc_child_suspend_noirq(struct device *dev)
-{
- struct sysc *ddata;
- int error;
-
- ddata = sysc_child_to_parent(dev);
-
- dev_dbg(ddata->dev, "%s %s\n", __func__,
- ddata->name ? ddata->name : "");
-
- error = pm_generic_suspend_noirq(dev);
- if (error) {
- dev_err(dev, "%s error at %i: %i\n",
- __func__, __LINE__, error);
-
- return error;
- }
-
- if (!pm_runtime_status_suspended(dev)) {
- error = pm_generic_runtime_suspend(dev);
- if (error) {
- dev_dbg(dev, "%s busy at %i: %i\n",
- __func__, __LINE__, error);
-
- return 0;
- }
-
- error = sysc_runtime_suspend(ddata->dev);
- if (error) {
- dev_err(dev, "%s error at %i: %i\n",
- __func__, __LINE__, error);
-
- return error;
- }
-
- ddata->child_needs_resume = true;
- }
-
- return 0;
-}
-
-static int sysc_child_resume_noirq(struct device *dev)
-{
- struct sysc *ddata;
- int error;
-
- ddata = sysc_child_to_parent(dev);
-
- dev_dbg(ddata->dev, "%s %s\n", __func__,
- ddata->name ? ddata->name : "");
-
- if (ddata->child_needs_resume) {
- ddata->child_needs_resume = false;
-
- error = sysc_runtime_resume(ddata->dev);
- if (error)
- dev_err(ddata->dev,
- "%s runtime resume error: %i\n",
- __func__, error);
-
- error = pm_generic_runtime_resume(dev);
- if (error)
- dev_err(ddata->dev,
- "%s generic runtime resume: %i\n",
- __func__, error);
- }
-
- return pm_generic_resume_noirq(dev);
-}
-#endif
-
-static struct dev_pm_domain sysc_child_pm_domain = {
- .ops = {
- SET_RUNTIME_PM_OPS(sysc_child_runtime_suspend,
- sysc_child_runtime_resume,
- NULL)
- USE_PLATFORM_PM_SLEEP_OPS
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_child_suspend_noirq,
- sysc_child_resume_noirq)
- }
-};
-
/* Caller needs to take list_lock if ever used outside of cpu_pm */
static void sysc_reinit_modules(struct sysc_soc_info *soc)
{
@@ -2600,25 +2525,6 @@ out_unlock:
mutex_unlock(&sysc_soc->list_lock);
}
-/**
- * sysc_legacy_idle_quirk - handle children in omap_device compatible way
- * @ddata: device driver data
- * @child: child device driver
- *
- * Allow idle for child devices as done with _od_runtime_suspend().
- * Otherwise many child devices will not idle because of the permanent
- * parent usecount set in pm_runtime_irq_safe().
- *
- * Note that the long term solution is to just modify the child device
- * drivers to not set pm_runtime_irq_safe() and then this can be just
- * dropped.
- */
-static void sysc_legacy_idle_quirk(struct sysc *ddata, struct device *child)
-{
- if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE)
- dev_pm_domain_set(child, &sysc_child_pm_domain);
-}
-
static int sysc_notifier_call(struct notifier_block *nb,
unsigned long event, void *device)
{
@@ -2635,7 +2541,6 @@ static int sysc_notifier_call(struct notifier_block *nb,
error = sysc_child_add_clocks(ddata, dev);
if (error)
return error;
- sysc_legacy_idle_quirk(ddata, dev);
break;
default:
break;
@@ -2859,8 +2764,7 @@ static const struct sysc_capabilities sysc_34xx_sr = {
.type = TI_SYSC_OMAP34XX_SR,
.sysc_mask = SYSC_OMAP2_CLOCKACTIVITY,
.regbits = &sysc_regbits_omap34xx_sr,
- .mod_quirks = SYSC_QUIRK_USE_CLOCKACT | SYSC_QUIRK_UNCACHED |
- SYSC_QUIRK_LEGACY_IDLE,
+ .mod_quirks = SYSC_QUIRK_USE_CLOCKACT | SYSC_QUIRK_UNCACHED,
};
/*
@@ -2881,13 +2785,12 @@ static const struct sysc_capabilities sysc_36xx_sr = {
.type = TI_SYSC_OMAP36XX_SR,
.sysc_mask = SYSC_OMAP3_SR_ENAWAKEUP,
.regbits = &sysc_regbits_omap36xx_sr,
- .mod_quirks = SYSC_QUIRK_UNCACHED | SYSC_QUIRK_LEGACY_IDLE,
+ .mod_quirks = SYSC_QUIRK_UNCACHED,
};
static const struct sysc_capabilities sysc_omap4_sr = {
.type = TI_SYSC_OMAP4_SR,
.regbits = &sysc_regbits_omap36xx_sr,
- .mod_quirks = SYSC_QUIRK_LEGACY_IDLE,
};
/*
diff --git a/drivers/cache/sifive_ccache.c b/drivers/cache/sifive_ccache.c
index 89ed6cd6b059..6874b72ec59d 100644
--- a/drivers/cache/sifive_ccache.c
+++ b/drivers/cache/sifive_ccache.c
@@ -15,6 +15,8 @@
#include <linux/of_address.h>
#include <linux/device.h>
#include <linux/bitfield.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <asm/cacheflush.h>
#include <asm/cacheinfo.h>
#include <asm/dma-noncoherent.h>
@@ -247,13 +249,49 @@ static irqreturn_t ccache_int_handler(int irq, void *device)
return IRQ_HANDLED;
}
+static int sifive_ccache_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned long quirks;
+ int intr_num, rc;
+
+ quirks = (unsigned long)device_get_match_data(dev);
+
+ intr_num = platform_irq_count(pdev);
+ if (!intr_num)
+ return dev_err_probe(dev, -ENODEV, "No interrupts property\n");
+
+ for (int i = 0; i < intr_num; i++) {
+ if (i == DATA_UNCORR && (quirks & QUIRK_BROKEN_DATA_UNCORR))
+ continue;
+
+ g_irq[i] = platform_get_irq(pdev, i);
+ if (g_irq[i] < 0)
+ return g_irq[i];
+
+ rc = devm_request_irq(dev, g_irq[i], ccache_int_handler, 0, "ccache_ecc", NULL);
+ if (rc)
+ return dev_err_probe(dev, rc, "Could not request IRQ %d\n", g_irq[i]);
+ }
+
+ return 0;
+}
+
+static struct platform_driver sifive_ccache_driver = {
+ .probe = sifive_ccache_probe,
+ .driver = {
+ .name = "sifive_ccache",
+ .of_match_table = sifive_ccache_ids,
+ },
+};
+
static int __init sifive_ccache_init(void)
{
struct device_node *np;
struct resource res;
- int i, rc, intr_num;
const struct of_device_id *match;
- unsigned long quirks;
+ unsigned long quirks __maybe_unused;
+ int rc;
np = of_find_matching_node_and_match(NULL, sifive_ccache_ids, &match);
if (!np)
@@ -277,28 +315,6 @@ static int __init sifive_ccache_init(void)
goto err_unmap;
}
- intr_num = of_property_count_u32_elems(np, "interrupts");
- if (!intr_num) {
- pr_err("No interrupts property\n");
- rc = -ENODEV;
- goto err_unmap;
- }
-
- for (i = 0; i < intr_num; i++) {
- g_irq[i] = irq_of_parse_and_map(np, i);
-
- if (i == DATA_UNCORR && (quirks & QUIRK_BROKEN_DATA_UNCORR))
- continue;
-
- rc = request_irq(g_irq[i], ccache_int_handler, 0, "ccache_ecc",
- NULL);
- if (rc) {
- pr_err("Could not request IRQ %d\n", g_irq[i]);
- goto err_free_irq;
- }
- }
- of_node_put(np);
-
#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
if (quirks & QUIRK_NONSTANDARD_CACHE_OPS) {
riscv_cbom_block_size = SIFIVE_CCACHE_LINE_SIZE;
@@ -315,11 +331,15 @@ static int __init sifive_ccache_init(void)
#ifdef CONFIG_DEBUG_FS
setup_sifive_debug();
#endif
+
+ rc = platform_driver_register(&sifive_ccache_driver);
+ if (rc)
+ goto err_unmap;
+
+ of_node_put(np);
+
return 0;
-err_free_irq:
- while (--i >= 0)
- free_irq(g_irq[i], NULL);
err_unmap:
iounmap(ccache_base);
err_node_put:
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c
index c9bf2c219841..f0d0c044731c 100644
--- a/drivers/char/agp/alpha-agp.c
+++ b/drivers/char/agp/alpha-agp.c
@@ -149,7 +149,7 @@ struct agp_bridge_driver alpha_core_agp_driver = {
struct agp_bridge_data *alpha_bridge;
-int __init
+static int __init
alpha_core_agp_setup(void)
{
alpha_agp_info *agp = alpha_mv.agp_info();
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index a3bbdd6e60fc..f5c71a617a99 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -382,7 +382,7 @@ static ssize_t rng_current_show(struct device *dev,
if (IS_ERR(rng))
return PTR_ERR(rng);
- ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
+ ret = sysfs_emit(buf, "%s\n", rng ? rng->name : "none");
put_rng(rng);
return ret;
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c
index 07ec000e4cd7..94ee18a1120a 100644
--- a/drivers/char/hw_random/mxc-rnga.c
+++ b/drivers/char/hw_random/mxc-rnga.c
@@ -131,7 +131,7 @@ static void mxc_rnga_cleanup(struct hwrng *rng)
__raw_writel(ctrl & ~RNGA_CONTROL_GO, mxc_rng->mem + RNGA_CONTROL);
}
-static int __init mxc_rnga_probe(struct platform_device *pdev)
+static int mxc_rnga_probe(struct platform_device *pdev)
{
int err;
struct mxc_rng *mxc_rng;
@@ -176,7 +176,7 @@ err_ioremap:
return err;
}
-static void __exit mxc_rnga_remove(struct platform_device *pdev)
+static void mxc_rnga_remove(struct platform_device *pdev)
{
struct mxc_rng *mxc_rng = platform_get_drvdata(pdev);
@@ -197,10 +197,11 @@ static struct platform_driver mxc_rnga_driver = {
.name = "mxc_rnga",
.of_match_table = mxc_rnga_of_match,
},
- .remove_new = __exit_p(mxc_rnga_remove),
+ .probe = mxc_rnga_probe,
+ .remove_new = mxc_rnga_remove,
};
-module_platform_driver_probe(mxc_rnga_driver, mxc_rnga_probe);
+module_platform_driver(mxc_rnga_driver);
MODULE_AUTHOR("Freescale Semiconductor, Inc.");
MODULE_DESCRIPTION("H/W RNGA driver for i.MX");
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index a2009fc4ad3c..f2a2aa7a531c 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -78,7 +78,6 @@ MODULE_DEVICE_TABLE(amba, nmk_rng_ids);
static struct amba_driver nmk_rng_driver = {
.drv = {
- .owner = THIS_MODULE,
.name = "rng",
},
.probe = nmk_rng_probe,
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 379bc245c520..0e903d6e22e3 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -220,7 +220,8 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
if (err && i > RNG_NB_RECOVER_TRIES) {
dev_err((struct device *)priv->rng.priv,
"Couldn't recover from seed error\n");
- return -ENOTRECOVERABLE;
+ retval = -ENOTRECOVERABLE;
+ goto exit_rpm;
}
continue;
@@ -238,7 +239,8 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
if (err && i > RNG_NB_RECOVER_TRIES) {
dev_err((struct device *)priv->rng.priv,
"Couldn't recover from seed error");
- return -ENOTRECOVERABLE;
+ retval = -ENOTRECOVERABLE;
+ goto exit_rpm;
}
continue;
@@ -250,6 +252,7 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
max -= sizeof(u32);
}
+exit_rpm:
pm_runtime_mark_last_busy((struct device *) priv->rng.priv);
pm_runtime_put_sync_autosuspend((struct device *) priv->rng.priv);
@@ -353,13 +356,15 @@ static int stm32_rng_init(struct hwrng *rng)
err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_SR, reg,
reg & RNG_SR_DRDY,
10, 100000);
- if (err | (reg & ~RNG_SR_DRDY)) {
+ if (err || (reg & ~RNG_SR_DRDY)) {
clk_disable_unprepare(priv->clk);
dev_err((struct device *)priv->rng.priv,
"%s: timeout:%x SR: %x!\n", __func__, err, reg);
return -EINVAL;
}
+ clk_disable_unprepare(priv->clk);
+
return 0;
}
@@ -384,6 +389,11 @@ static int __maybe_unused stm32_rng_runtime_suspend(struct device *dev)
static int __maybe_unused stm32_rng_suspend(struct device *dev)
{
struct stm32_rng_private *priv = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ return err;
if (priv->data->has_cond_reset) {
priv->pm_conf.nscr = readl_relaxed(priv->base + RNG_NSCR);
@@ -465,6 +475,8 @@ static int __maybe_unused stm32_rng_resume(struct device *dev)
writel_relaxed(reg, priv->base + RNG_CR);
}
+ clk_disable_unprepare(priv->clk);
+
return 0;
}
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 3c6670cf905f..9b80e622ae80 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -544,7 +544,7 @@ static unsigned long get_unmapped_area_zero(struct file *file,
}
/* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
- return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+ return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
#else
return -ENOSYS;
#endif
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 456be28ba67c..2597cb43f438 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -702,7 +702,7 @@ static void extract_entropy(void *buf, size_t len)
static void __cold _credit_init_bits(size_t bits)
{
- static struct execute_work set_ready;
+ static DECLARE_WORK(set_ready, crng_set_ready);
unsigned int new, orig, add;
unsigned long flags;
@@ -718,8 +718,8 @@ static void __cold _credit_init_bits(size_t bits)
if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
- if (static_key_initialized)
- execute_in_process_context(crng_set_ready, &set_ready);
+ if (static_key_initialized && system_unbound_wq)
+ queue_work(system_unbound_wq, &set_ready);
atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
@@ -890,8 +890,8 @@ void __init random_init(void)
/*
* If we were initialized by the cpu or bootloader before jump labels
- * are initialized, then we should enable the static branch here, where
- * it's guaranteed that jump labels have been initialized.
+ * or workqueues are initialized, then we should enable the static
+ * branch here, where it's guaranteed that these have been initialized.
*/
if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
crng_set_ready(NULL);
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 927088b2c3d3..e63a6a17793c 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -27,6 +27,20 @@ menuconfig TCG_TPM
if TCG_TPM
+config TCG_TPM2_HMAC
+ bool "Use HMAC and encrypted transactions on the TPM bus"
+ default y
+ select CRYPTO_ECDH
+ select CRYPTO_LIB_AESCFB
+ select CRYPTO_LIB_SHA256
+ help
+ Setting this causes us to deploy a scheme which uses request
+ and response HMACs in addition to encryption for
+ communicating with the TPM to prevent or detect bus snooping
+ and interposer attacks (see tpm-security.rst). Saying Y
+ here adds some encryption overhead to all kernel to TPM
+ transactions.
+
config HW_RANDOM_TPM
bool "TPM HW Random Number Generator support"
depends on TCG_TPM && HW_RANDOM && !(TCG_TPM=y && HW_RANDOM=m)
@@ -149,6 +163,7 @@ config TCG_NSC
config TCG_ATMEL
tristate "Atmel TPM Interface"
depends on PPC64 || HAS_IOPORT_MAP
+ depends on HAS_IOPORT
help
If you have a TPM security chip from Atmel say Yes and it
will be accessible from within Linux. To compile this driver
@@ -156,7 +171,7 @@ config TCG_ATMEL
config TCG_INFINEON
tristate "Infineon Technologies TPM Interface"
- depends on PNP
+ depends on PNP || COMPILE_TEST
help
If you have a TPM security chip from Infineon Technologies
(either SLD 9630 TT 1.1 or SLB 9635 TT 1.2) say Yes and it
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 0222b1ddb310..4c695b0388f3 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -15,7 +15,9 @@ tpm-y += tpm-sysfs.o
tpm-y += eventlog/common.o
tpm-y += eventlog/tpm1.o
tpm-y += eventlog/tpm2.o
+tpm-y += tpm-buf.o
+tpm-$(CONFIG_TCG_TPM2_HMAC) += tpm2-sessions.o
tpm-$(CONFIG_ACPI) += tpm_ppi.o eventlog/acpi.o
tpm-$(CONFIG_EFI) += eventlog/efi.o
tpm-$(CONFIG_OF) += eventlog/of.o
diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
index bd757d836c5c..69533d0bfb51 100644
--- a/drivers/char/tpm/eventlog/acpi.c
+++ b/drivers/char/tpm/eventlog/acpi.c
@@ -142,7 +142,6 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
log->bios_event_log_end = log->bios_event_log + len;
- ret = -EIO;
virt = acpi_os_map_iomem(start, len);
if (!virt) {
dev_warn(&chip->dev, "%s: Failed to map ACPI memory\n", __func__);
diff --git a/drivers/char/tpm/tpm-buf.c b/drivers/char/tpm/tpm-buf.c
new file mode 100644
index 000000000000..647c6ca92ac3
--- /dev/null
+++ b/drivers/char/tpm/tpm-buf.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Handling of TPM command and other buffers.
+ */
+
+#include <linux/tpm_command.h>
+#include <linux/module.h>
+#include <linux/tpm.h>
+
+/**
+ * tpm_buf_init() - Allocate and initialize a TPM command
+ * @buf: A &tpm_buf
+ * @tag: TPM_TAG_RQU_COMMAND, TPM2_ST_NO_SESSIONS or TPM2_ST_SESSIONS
+ * @ordinal: A command ordinal
+ *
+ * Return: 0 or -ENOMEM
+ */
+int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal)
+{
+ buf->data = (u8 *)__get_free_page(GFP_KERNEL);
+ if (!buf->data)
+ return -ENOMEM;
+
+ tpm_buf_reset(buf, tag, ordinal);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_buf_init);
+
+/**
+ * tpm_buf_reset() - Initialize a TPM command
+ * @buf: A &tpm_buf
+ * @tag: TPM_TAG_RQU_COMMAND, TPM2_ST_NO_SESSIONS or TPM2_ST_SESSIONS
+ * @ordinal: A command ordinal
+ */
+void tpm_buf_reset(struct tpm_buf *buf, u16 tag, u32 ordinal)
+{
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+
+ WARN_ON(tag != TPM_TAG_RQU_COMMAND && tag != TPM2_ST_NO_SESSIONS &&
+ tag != TPM2_ST_SESSIONS && tag != 0);
+
+ buf->flags = 0;
+ buf->length = sizeof(*head);
+ head->tag = cpu_to_be16(tag);
+ head->length = cpu_to_be32(sizeof(*head));
+ head->ordinal = cpu_to_be32(ordinal);
+ buf->handles = 0;
+}
+EXPORT_SYMBOL_GPL(tpm_buf_reset);
+
+/**
+ * tpm_buf_init_sized() - Allocate and initialize a sized (TPM2B) buffer
+ * @buf: A @tpm_buf
+ *
+ * Return: 0 or -ENOMEM
+ */
+int tpm_buf_init_sized(struct tpm_buf *buf)
+{
+ buf->data = (u8 *)__get_free_page(GFP_KERNEL);
+ if (!buf->data)
+ return -ENOMEM;
+
+ tpm_buf_reset_sized(buf);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_buf_init_sized);
+
+/**
+ * tpm_buf_reset_sized() - Initialize a sized buffer
+ * @buf: A &tpm_buf
+ */
+void tpm_buf_reset_sized(struct tpm_buf *buf)
+{
+ buf->flags = TPM_BUF_TPM2B;
+ buf->length = 2;
+ buf->data[0] = 0;
+ buf->data[1] = 0;
+}
+EXPORT_SYMBOL_GPL(tpm_buf_reset_sized);
+
+void tpm_buf_destroy(struct tpm_buf *buf)
+{
+ free_page((unsigned long)buf->data);
+}
+EXPORT_SYMBOL_GPL(tpm_buf_destroy);
+
+/**
+ * tpm_buf_length() - Return the number of bytes consumed by the data
+ * @buf: A &tpm_buf
+ *
+ * Return: The number of bytes consumed by the buffer
+ */
+u32 tpm_buf_length(struct tpm_buf *buf)
+{
+ return buf->length;
+}
+EXPORT_SYMBOL_GPL(tpm_buf_length);
+
+/**
+ * tpm_buf_append() - Append data to an initialized buffer
+ * @buf: A &tpm_buf
+ * @new_data: A data blob
+ * @new_length: Size of the appended data
+ */
+void tpm_buf_append(struct tpm_buf *buf, const u8 *new_data, u16 new_length)
+{
+ /* Return silently if overflow has already happened. */
+ if (buf->flags & TPM_BUF_OVERFLOW)
+ return;
+
+ if ((buf->length + new_length) > PAGE_SIZE) {
+ WARN(1, "tpm_buf: write overflow\n");
+ buf->flags |= TPM_BUF_OVERFLOW;
+ return;
+ }
+
+ memcpy(&buf->data[buf->length], new_data, new_length);
+ buf->length += new_length;
+
+ if (buf->flags & TPM_BUF_TPM2B)
+ ((__be16 *)buf->data)[0] = cpu_to_be16(buf->length - 2);
+ else
+ ((struct tpm_header *)buf->data)->length = cpu_to_be32(buf->length);
+}
+EXPORT_SYMBOL_GPL(tpm_buf_append);
+
+void tpm_buf_append_u8(struct tpm_buf *buf, const u8 value)
+{
+ tpm_buf_append(buf, &value, 1);
+}
+EXPORT_SYMBOL_GPL(tpm_buf_append_u8);
+
+void tpm_buf_append_u16(struct tpm_buf *buf, const u16 value)
+{
+ __be16 value2 = cpu_to_be16(value);
+
+ tpm_buf_append(buf, (u8 *)&value2, 2);
+}
+EXPORT_SYMBOL_GPL(tpm_buf_append_u16);
+
+void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value)
+{
+ __be32 value2 = cpu_to_be32(value);
+
+ tpm_buf_append(buf, (u8 *)&value2, 4);
+}
+EXPORT_SYMBOL_GPL(tpm_buf_append_u32);
+
+/**
+ * tpm_buf_read() - Read from a TPM buffer
+ * @buf: &tpm_buf instance
+ * @offset: offset within the buffer
+ * @count: the number of bytes to read
+ * @output: the output buffer
+ */
+static void tpm_buf_read(struct tpm_buf *buf, off_t *offset, size_t count, void *output)
+{
+ off_t next_offset;
+
+ /* Return silently if overflow has already happened. */
+ if (buf->flags & TPM_BUF_BOUNDARY_ERROR)
+ return;
+
+ next_offset = *offset + count;
+ if (next_offset > buf->length) {
+ WARN(1, "tpm_buf: read out of boundary\n");
+ buf->flags |= TPM_BUF_BOUNDARY_ERROR;
+ return;
+ }
+
+ memcpy(output, &buf->data[*offset], count);
+ *offset = next_offset;
+}
+
+/**
+ * tpm_buf_read_u8() - Read 8-bit word from a TPM buffer
+ * @buf: &tpm_buf instance
+ * @offset: offset within the buffer
+ *
+ * Return: next 8-bit word
+ */
+u8 tpm_buf_read_u8(struct tpm_buf *buf, off_t *offset)
+{
+ u8 value;
+
+ tpm_buf_read(buf, offset, sizeof(value), &value);
+
+ return value;
+}
+EXPORT_SYMBOL_GPL(tpm_buf_read_u8);
+
+/**
+ * tpm_buf_read_u16() - Read 16-bit word from a TPM buffer
+ * @buf: &tpm_buf instance
+ * @offset: offset within the buffer
+ *
+ * Return: next 16-bit word
+ */
+u16 tpm_buf_read_u16(struct tpm_buf *buf, off_t *offset)
+{
+ u16 value;
+
+ tpm_buf_read(buf, offset, sizeof(value), &value);
+
+ return be16_to_cpu(value);
+}
+EXPORT_SYMBOL_GPL(tpm_buf_read_u16);
+
+/**
+ * tpm_buf_read_u32() - Read 32-bit word from a TPM buffer
+ * @buf: &tpm_buf instance
+ * @offset: offset within the buffer
+ *
+ * Return: next 32-bit word
+ */
+u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset)
+{
+ u32 value;
+
+ tpm_buf_read(buf, offset, sizeof(value), &value);
+
+ return be32_to_cpu(value);
+}
+EXPORT_SYMBOL_GPL(tpm_buf_read_u32);
+
+static u16 tpm_buf_tag(struct tpm_buf *buf)
+{
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+
+ return be16_to_cpu(head->tag);
+}
+
+/**
+ * tpm_buf_parameters - return the TPM response parameters area of the tpm_buf
+ * @buf: tpm_buf to use
+ *
+ * Where the parameters are located depends on the tag of a TPM
+ * command (it's immediately after the header for TPM_ST_NO_SESSIONS
+ * or 4 bytes after for TPM_ST_SESSIONS). Evaluate this and return a
+ * pointer to the first byte of the parameters area.
+ *
+ * @return: pointer to parameters area
+ */
+u8 *tpm_buf_parameters(struct tpm_buf *buf)
+{
+ int offset = TPM_HEADER_SIZE;
+
+ if (tpm_buf_tag(buf) == TPM2_ST_SESSIONS)
+ offset += 4;
+
+ return &buf->data[offset];
+}
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 42b1062e33cd..854546000c92 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -158,6 +158,9 @@ int tpm_try_get_ops(struct tpm_chip *chip)
{
int rc = -EIO;
+ if (chip->flags & TPM_CHIP_FLAG_DISABLE)
+ return rc;
+
get_device(&chip->dev);
down_read(&chip->ops_sem);
@@ -275,6 +278,9 @@ static void tpm_dev_release(struct device *dev)
kfree(chip->work_space.context_buf);
kfree(chip->work_space.session_buf);
kfree(chip->allocated_banks);
+#ifdef CONFIG_TCG_TPM2_HMAC
+ kfree(chip->auth);
+#endif
kfree(chip);
}
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 757336324c90..5da134f12c9a 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -232,6 +232,7 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_buf *buf,
if (len < min_rsp_body_length + TPM_HEADER_SIZE)
return -EFAULT;
+ buf->length = len;
return 0;
}
EXPORT_SYMBOL_GPL(tpm_transmit_cmd);
@@ -342,31 +343,6 @@ out:
}
EXPORT_SYMBOL_GPL(tpm_pcr_extend);
-/**
- * tpm_send - send a TPM command
- * @chip: a &struct tpm_chip instance, %NULL for the default chip
- * @cmd: a TPM command buffer
- * @buflen: the length of the TPM command buffer
- *
- * Return: same as with tpm_transmit_cmd()
- */
-int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen)
-{
- struct tpm_buf buf;
- int rc;
-
- chip = tpm_find_get_ops(chip);
- if (!chip)
- return -ENODEV;
-
- buf.data = cmd;
- rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to a send a command");
-
- tpm_put_ops(chip);
- return rc;
-}
-EXPORT_SYMBOL_GPL(tpm_send);
-
int tpm_auto_startup(struct tpm_chip *chip)
{
int rc;
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index 54c71473aa29..94231f052ea7 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -309,6 +309,21 @@ static ssize_t tpm_version_major_show(struct device *dev,
}
static DEVICE_ATTR_RO(tpm_version_major);
+#ifdef CONFIG_TCG_TPM2_HMAC
+static ssize_t null_name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ int size = TPM2_NAME_SIZE;
+
+ bin2hex(buf, chip->null_key_name, size);
+ size *= 2;
+ buf[size++] = '\n';
+ return size;
+}
+static DEVICE_ATTR_RO(null_name);
+#endif
+
static struct attribute *tpm1_dev_attrs[] = {
&dev_attr_pubek.attr,
&dev_attr_pcrs.attr,
@@ -326,6 +341,9 @@ static struct attribute *tpm1_dev_attrs[] = {
static struct attribute *tpm2_dev_attrs[] = {
&dev_attr_tpm_version_major.attr,
+#ifdef CONFIG_TCG_TPM2_HMAC
+ &dev_attr_null_name.attr,
+#endif
NULL
};
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 61445f1dc46d..6b8b9956ba69 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -312,9 +312,23 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, void *buf,
size_t *bufsiz);
int tpm_devs_add(struct tpm_chip *chip);
void tpm_devs_remove(struct tpm_chip *chip);
+int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf,
+ unsigned int buf_size, unsigned int *offset);
+int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
+ unsigned int *offset, u32 *handle);
void tpm_bios_log_setup(struct tpm_chip *chip);
void tpm_bios_log_teardown(struct tpm_chip *chip);
int tpm_dev_common_init(void);
void tpm_dev_common_exit(void);
+
+#ifdef CONFIG_TCG_TPM2_HMAC
+int tpm2_sessions_init(struct tpm_chip *chip);
+#else
+static inline int tpm2_sessions_init(struct tpm_chip *chip)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 93545be190a5..0cdf892ec2a7 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -216,13 +216,6 @@ out:
return rc;
}
-struct tpm2_null_auth_area {
- __be32 handle;
- __be16 nonce_size;
- u8 attributes;
- __be16 auth_size;
-} __packed;
-
/**
* tpm2_pcr_extend() - extend a PCR value
*
@@ -236,24 +229,22 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
struct tpm_digest *digests)
{
struct tpm_buf buf;
- struct tpm2_null_auth_area auth_area;
int rc;
int i;
- rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_PCR_EXTEND);
+ rc = tpm2_start_auth_session(chip);
if (rc)
return rc;
- tpm_buf_append_u32(&buf, pcr_idx);
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_PCR_EXTEND);
+ if (rc) {
+ tpm2_end_auth_session(chip);
+ return rc;
+ }
- auth_area.handle = cpu_to_be32(TPM2_RS_PW);
- auth_area.nonce_size = 0;
- auth_area.attributes = 0;
- auth_area.auth_size = 0;
+ tpm_buf_append_name(chip, &buf, pcr_idx, NULL);
+ tpm_buf_append_hmac_session(chip, &buf, 0, NULL, 0);
- tpm_buf_append_u32(&buf, sizeof(struct tpm2_null_auth_area));
- tpm_buf_append(&buf, (const unsigned char *)&auth_area,
- sizeof(auth_area));
tpm_buf_append_u32(&buf, chip->nr_allocated_banks);
for (i = 0; i < chip->nr_allocated_banks; i++) {
@@ -262,7 +253,9 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
chip->allocated_banks[i].digest_size);
}
+ tpm_buf_fill_hmac_session(chip, &buf);
rc = tpm_transmit_cmd(chip, &buf, 0, "attempting extend a PCR value");
+ rc = tpm_buf_check_hmac_response(chip, &buf, rc);
tpm_buf_destroy(&buf);
@@ -299,25 +292,35 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
if (!num_bytes || max > TPM_MAX_RNG_DATA)
return -EINVAL;
- err = tpm_buf_init(&buf, 0, 0);
+ err = tpm2_start_auth_session(chip);
if (err)
return err;
+ err = tpm_buf_init(&buf, 0, 0);
+ if (err) {
+ tpm2_end_auth_session(chip);
+ return err;
+ }
+
do {
- tpm_buf_reset(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_RANDOM);
+ tpm_buf_reset(&buf, TPM2_ST_SESSIONS, TPM2_CC_GET_RANDOM);
+ tpm_buf_append_hmac_session_opt(chip, &buf, TPM2_SA_ENCRYPT
+ | TPM2_SA_CONTINUE_SESSION,
+ NULL, 0);
tpm_buf_append_u16(&buf, num_bytes);
+ tpm_buf_fill_hmac_session(chip, &buf);
err = tpm_transmit_cmd(chip, &buf,
offsetof(struct tpm2_get_random_out,
buffer),
"attempting get random");
+ err = tpm_buf_check_hmac_response(chip, &buf, err);
if (err) {
if (err > 0)
err = -EIO;
goto out;
}
- out = (struct tpm2_get_random_out *)
- &buf.data[TPM_HEADER_SIZE];
+ out = (struct tpm2_get_random_out *)tpm_buf_parameters(&buf);
recd = min_t(u32, be16_to_cpu(out->size), num_bytes);
if (tpm_buf_length(&buf) <
TPM_HEADER_SIZE +
@@ -334,9 +337,12 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
} while (retries-- && total < max);
tpm_buf_destroy(&buf);
+ tpm2_end_auth_session(chip);
+
return total ? total : -EIO;
out:
tpm_buf_destroy(&buf);
+ tpm2_end_auth_session(chip);
return err;
}
@@ -759,6 +765,11 @@ int tpm2_auto_startup(struct tpm_chip *chip)
rc = 0;
}
+ if (rc)
+ goto out;
+
+ rc = tpm2_sessions_init(chip);
+
out:
/*
* Infineon TPM in field upgrade mode will return no data for the number
diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c
new file mode 100644
index 000000000000..ea8860661876
--- /dev/null
+++ b/drivers/char/tpm/tpm2-sessions.c
@@ -0,0 +1,1286 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (C) 2018 James.Bottomley@HansenPartnership.com
+ *
+ * Cryptographic helper routines for handling TPM2 sessions for
+ * authorization HMAC and request response encryption.
+ *
+ * The idea is to ensure that every TPM command is HMAC protected by a
+ * session, meaning in-flight tampering would be detected and in
+ * addition all sensitive inputs and responses should be encrypted.
+ *
+ * The basic way this works is to use a TPM feature called salted
+ * sessions where a random secret used in session construction is
+ * encrypted to the public part of a known TPM key. The problem is we
+ * have no known keys, so initially a primary Elliptic Curve key is
+ * derived from the NULL seed (we use EC because most TPMs generate
+ * these keys much faster than RSA ones). The curve used is NIST_P256
+ * because that's now mandated to be present in 'TCG TPM v2.0
+ * Provisioning Guidance'
+ *
+ * Threat problems: the initial TPM2_CreatePrimary is not (and cannot
+ * be) session protected, so a clever Man in the Middle could return a
+ * public key they control to this command and from there intercept
+ * and decode all subsequent session based transactions. The kernel
+ * cannot mitigate this threat but, after boot, userspace can get
+ * proof this has not happened by asking the TPM to certify the NULL
+ * key. This certification would chain back to the TPM Endorsement
+ * Certificate and prove the NULL seed primary had not been tampered
+ * with and thus all sessions must have been cryptographically secure.
+ * To assist with this, the initial NULL seed public key name is made
+ * available in a sysfs file.
+ *
+ * Use of these functions:
+ *
+ * The design is all the crypto, hash and hmac gunk is confined in this
+ * file and never needs to be seen even by the kernel internal user. To
+ * the user there's an init function tpm2_sessions_init() that needs to
+ * be called once per TPM which generates the NULL seed primary key.
+ *
+ * These are the usage functions:
+ *
+ * tpm2_start_auth_session() which allocates the opaque auth structure
+ * and gets a session from the TPM. This must be called before
+ * any of the following functions. The session is protected by a
+ * session_key which is derived from a random salt value
+ * encrypted to the NULL seed.
+ * tpm2_end_auth_session() kills the session and frees the resources.
+ * Under normal operation this function is done by
+ * tpm_buf_check_hmac_response(), so this is only to be used on
+ * error legs where the latter is not executed.
+ * tpm_buf_append_name() to add a handle to the buffer. This must be
+ * used in place of the usual tpm_buf_append_u32() for adding
+ * handles because handles have to be processed specially when
+ * calculating the HMAC. In particular, for NV, volatile and
+ * permanent objects you now need to provide the name.
+ * tpm_buf_append_hmac_session() which appends the hmac session to the
+ * buf in the same way tpm_buf_append_auth does().
+ * tpm_buf_fill_hmac_session() This calculates the correct hash and
+ * places it in the buffer. It must be called after the complete
+ * command buffer is finalized so it can fill in the correct HMAC
+ * based on the parameters.
+ * tpm_buf_check_hmac_response() which checks the session response in
+ * the buffer and calculates what it should be. If there's a
+ * mismatch it will log a warning and return an error. If
+ * tpm_buf_append_hmac_session() did not specify
+ * TPM_SA_CONTINUE_SESSION then the session will be closed (if it
+ * hasn't been consumed) and the auth structure freed.
+ */
+
+#include "tpm.h"
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <asm/unaligned.h>
+#include <crypto/kpp.h>
+#include <crypto/ecdh.h>
+#include <crypto/hash.h>
+#include <crypto/hmac.h>
+
+/* maximum number of names the TPM must remember for authorization */
+#define AUTH_MAX_NAMES 3
+
+static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy,
+ u32 *handle, u8 *name);
+
+/*
+ * This is the structure that carries all the auth information (like
+ * session handle, nonces, session key and auth) from use to use it is
+ * designed to be opaque to anything outside.
+ */
+struct tpm2_auth {
+ u32 handle;
+ /*
+ * This has two meanings: before tpm_buf_fill_hmac_session()
+ * it marks the offset in the buffer of the start of the
+ * sessions (i.e. after all the handles). Once the buffer has
+ * been filled it markes the session number of our auth
+ * session so we can find it again in the response buffer.
+ *
+ * The two cases are distinguished because the first offset
+ * must always be greater than TPM_HEADER_SIZE and the second
+ * must be less than or equal to 5.
+ */
+ u32 session;
+ /*
+ * the size here is variable and set by the size of our_nonce
+ * which must be between 16 and the name hash length. we set
+ * the maximum sha256 size for the greatest protection
+ */
+ u8 our_nonce[SHA256_DIGEST_SIZE];
+ u8 tpm_nonce[SHA256_DIGEST_SIZE];
+ /*
+ * the salt is only used across the session command/response
+ * after that it can be used as a scratch area
+ */
+ union {
+ u8 salt[EC_PT_SZ];
+ /* scratch for key + IV */
+ u8 scratch[AES_KEY_BYTES + AES_BLOCK_SIZE];
+ };
+ /*
+ * the session key and passphrase are the same size as the
+ * name digest (sha256 again). The session key is constant
+ * for the use of the session and the passphrase can change
+ * with every invocation.
+ *
+ * Note: these fields must be adjacent and in this order
+ * because several HMAC/KDF schemes use the combination of the
+ * session_key and passphrase.
+ */
+ u8 session_key[SHA256_DIGEST_SIZE];
+ u8 passphrase[SHA256_DIGEST_SIZE];
+ int passphrase_len;
+ struct crypto_aes_ctx aes_ctx;
+ /* saved session attributes: */
+ u8 attrs;
+ __be32 ordinal;
+
+ /*
+ * memory for three authorization handles. We know them by
+ * handle, but they are part of the session by name, which
+ * we must compute and remember
+ */
+ u32 name_h[AUTH_MAX_NAMES];
+ u8 name[AUTH_MAX_NAMES][2 + SHA512_DIGEST_SIZE];
+};
+
+/*
+ * Name Size based on TPM algorithm (assumes no hash bigger than 255)
+ */
+static u8 name_size(const u8 *name)
+{
+ static u8 size_map[] = {
+ [TPM_ALG_SHA1] = SHA1_DIGEST_SIZE,
+ [TPM_ALG_SHA256] = SHA256_DIGEST_SIZE,
+ [TPM_ALG_SHA384] = SHA384_DIGEST_SIZE,
+ [TPM_ALG_SHA512] = SHA512_DIGEST_SIZE,
+ };
+ u16 alg = get_unaligned_be16(name);
+ return size_map[alg] + 2;
+}
+
+/*
+ * It turns out the crypto hmac(sha256) is hard for us to consume
+ * because it assumes a fixed key and the TPM seems to change the key
+ * on every operation, so we weld the hmac init and final functions in
+ * here to give it the same usage characteristics as a regular hash
+ */
+static void tpm2_hmac_init(struct sha256_state *sctx, u8 *key, u32 key_len)
+{
+ u8 pad[SHA256_BLOCK_SIZE];
+ int i;
+
+ sha256_init(sctx);
+ for (i = 0; i < sizeof(pad); i++) {
+ if (i < key_len)
+ pad[i] = key[i];
+ else
+ pad[i] = 0;
+ pad[i] ^= HMAC_IPAD_VALUE;
+ }
+ sha256_update(sctx, pad, sizeof(pad));
+}
+
+static void tpm2_hmac_final(struct sha256_state *sctx, u8 *key, u32 key_len,
+ u8 *out)
+{
+ u8 pad[SHA256_BLOCK_SIZE];
+ int i;
+
+ for (i = 0; i < sizeof(pad); i++) {
+ if (i < key_len)
+ pad[i] = key[i];
+ else
+ pad[i] = 0;
+ pad[i] ^= HMAC_OPAD_VALUE;
+ }
+
+ /* collect the final hash; use out as temporary storage */
+ sha256_final(sctx, out);
+
+ sha256_init(sctx);
+ sha256_update(sctx, pad, sizeof(pad));
+ sha256_update(sctx, out, SHA256_DIGEST_SIZE);
+ sha256_final(sctx, out);
+}
+
+/*
+ * assume hash sha256 and nonces u, v of size SHA256_DIGEST_SIZE but
+ * otherwise standard tpm2_KDFa. Note output is in bytes not bits.
+ */
+static void tpm2_KDFa(u8 *key, u32 key_len, const char *label, u8 *u,
+ u8 *v, u32 bytes, u8 *out)
+{
+ u32 counter = 1;
+ const __be32 bits = cpu_to_be32(bytes * 8);
+
+ while (bytes > 0) {
+ struct sha256_state sctx;
+ __be32 c = cpu_to_be32(counter);
+
+ tpm2_hmac_init(&sctx, key, key_len);
+ sha256_update(&sctx, (u8 *)&c, sizeof(c));
+ sha256_update(&sctx, label, strlen(label)+1);
+ sha256_update(&sctx, u, SHA256_DIGEST_SIZE);
+ sha256_update(&sctx, v, SHA256_DIGEST_SIZE);
+ sha256_update(&sctx, (u8 *)&bits, sizeof(bits));
+ tpm2_hmac_final(&sctx, key, key_len, out);
+
+ bytes -= SHA256_DIGEST_SIZE;
+ counter++;
+ out += SHA256_DIGEST_SIZE;
+ }
+}
+
+/*
+ * Somewhat of a bastardization of the real KDFe. We're assuming
+ * we're working with known point sizes for the input parameters and
+ * the hash algorithm is fixed at sha256. Because we know that the
+ * point size is 32 bytes like the hash size, there's no need to loop
+ * in this KDF.
+ */
+static void tpm2_KDFe(u8 z[EC_PT_SZ], const char *str, u8 *pt_u, u8 *pt_v,
+ u8 *out)
+{
+ struct sha256_state sctx;
+ /*
+ * this should be an iterative counter, but because we know
+ * we're only taking 32 bytes for the point using a sha256
+ * hash which is also 32 bytes, there's only one loop
+ */
+ __be32 c = cpu_to_be32(1);
+
+ sha256_init(&sctx);
+ /* counter (BE) */
+ sha256_update(&sctx, (u8 *)&c, sizeof(c));
+ /* secret value */
+ sha256_update(&sctx, z, EC_PT_SZ);
+ /* string including trailing zero */
+ sha256_update(&sctx, str, strlen(str)+1);
+ sha256_update(&sctx, pt_u, EC_PT_SZ);
+ sha256_update(&sctx, pt_v, EC_PT_SZ);
+ sha256_final(&sctx, out);
+}
+
+static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip)
+{
+ struct crypto_kpp *kpp;
+ struct kpp_request *req;
+ struct scatterlist s[2], d[1];
+ struct ecdh p = {0};
+ u8 encoded_key[EC_PT_SZ], *x, *y;
+ unsigned int buf_len;
+
+ /* secret is two sized points */
+ tpm_buf_append_u16(buf, (EC_PT_SZ + 2)*2);
+ /*
+ * we cheat here and append uninitialized data to form
+ * the points. All we care about is getting the two
+ * co-ordinate pointers, which will be used to overwrite
+ * the uninitialized data
+ */
+ tpm_buf_append_u16(buf, EC_PT_SZ);
+ x = &buf->data[tpm_buf_length(buf)];
+ tpm_buf_append(buf, encoded_key, EC_PT_SZ);
+ tpm_buf_append_u16(buf, EC_PT_SZ);
+ y = &buf->data[tpm_buf_length(buf)];
+ tpm_buf_append(buf, encoded_key, EC_PT_SZ);
+ sg_init_table(s, 2);
+ sg_set_buf(&s[0], x, EC_PT_SZ);
+ sg_set_buf(&s[1], y, EC_PT_SZ);
+
+ kpp = crypto_alloc_kpp("ecdh-nist-p256", CRYPTO_ALG_INTERNAL, 0);
+ if (IS_ERR(kpp)) {
+ dev_err(&chip->dev, "crypto ecdh allocation failed\n");
+ return;
+ }
+
+ buf_len = crypto_ecdh_key_len(&p);
+ if (sizeof(encoded_key) < buf_len) {
+ dev_err(&chip->dev, "salt buffer too small needs %d\n",
+ buf_len);
+ goto out;
+ }
+ crypto_ecdh_encode_key(encoded_key, buf_len, &p);
+ /* this generates a random private key */
+ crypto_kpp_set_secret(kpp, encoded_key, buf_len);
+
+ /* salt is now the public point of this private key */
+ req = kpp_request_alloc(kpp, GFP_KERNEL);
+ if (!req)
+ goto out;
+ kpp_request_set_input(req, NULL, 0);
+ kpp_request_set_output(req, s, EC_PT_SZ*2);
+ crypto_kpp_generate_public_key(req);
+ /*
+ * we're not done: now we have to compute the shared secret
+ * which is our private key multiplied by the tpm_key public
+ * point, we actually only take the x point and discard the y
+ * point and feed it through KDFe to get the final secret salt
+ */
+ sg_set_buf(&s[0], chip->null_ec_key_x, EC_PT_SZ);
+ sg_set_buf(&s[1], chip->null_ec_key_y, EC_PT_SZ);
+ kpp_request_set_input(req, s, EC_PT_SZ*2);
+ sg_init_one(d, chip->auth->salt, EC_PT_SZ);
+ kpp_request_set_output(req, d, EC_PT_SZ);
+ crypto_kpp_compute_shared_secret(req);
+ kpp_request_free(req);
+
+ /*
+ * pass the shared secret through KDFe for salt. Note salt
+ * area is used both for input shared secret and output salt.
+ * This works because KDFe fully consumes the secret before it
+ * writes the salt
+ */
+ tpm2_KDFe(chip->auth->salt, "SECRET", x, chip->null_ec_key_x,
+ chip->auth->salt);
+
+ out:
+ crypto_free_kpp(kpp);
+}
+
+/**
+ * tpm_buf_append_hmac_session() - Append a TPM session element
+ * @chip: the TPM chip structure
+ * @buf: The buffer to be appended
+ * @attributes: The session attributes
+ * @passphrase: The session authority (NULL if none)
+ * @passphrase_len: The length of the session authority (0 if none)
+ *
+ * This fills in a session structure in the TPM command buffer, except
+ * for the HMAC which cannot be computed until the command buffer is
+ * complete. The type of session is controlled by the @attributes,
+ * the main ones of which are TPM2_SA_CONTINUE_SESSION which means the
+ * session won't terminate after tpm_buf_check_hmac_response(),
+ * TPM2_SA_DECRYPT which means this buffers first parameter should be
+ * encrypted with a session key and TPM2_SA_ENCRYPT, which means the
+ * response buffer's first parameter needs to be decrypted (confusing,
+ * but the defines are written from the point of view of the TPM).
+ *
+ * Any session appended by this command must be finalized by calling
+ * tpm_buf_fill_hmac_session() otherwise the HMAC will be incorrect
+ * and the TPM will reject the command.
+ *
+ * As with most tpm_buf operations, success is assumed because failure
+ * will be caused by an incorrect programming model and indicated by a
+ * kernel message.
+ */
+void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf,
+ u8 attributes, u8 *passphrase,
+ int passphrase_len)
+{
+ u8 nonce[SHA256_DIGEST_SIZE];
+ u32 len;
+ struct tpm2_auth *auth = chip->auth;
+
+ /*
+ * The Architecture Guide requires us to strip trailing zeros
+ * before computing the HMAC
+ */
+ while (passphrase && passphrase_len > 0
+ && passphrase[passphrase_len - 1] == '\0')
+ passphrase_len--;
+
+ auth->attrs = attributes;
+ auth->passphrase_len = passphrase_len;
+ if (passphrase_len)
+ memcpy(auth->passphrase, passphrase, passphrase_len);
+
+ if (auth->session != tpm_buf_length(buf)) {
+ /* we're not the first session */
+ len = get_unaligned_be32(&buf->data[auth->session]);
+ if (4 + len + auth->session != tpm_buf_length(buf)) {
+ WARN(1, "session length mismatch, cannot append");
+ return;
+ }
+
+ /* add our new session */
+ len += 9 + 2 * SHA256_DIGEST_SIZE;
+ put_unaligned_be32(len, &buf->data[auth->session]);
+ } else {
+ tpm_buf_append_u32(buf, 9 + 2 * SHA256_DIGEST_SIZE);
+ }
+
+ /* random number for our nonce */
+ get_random_bytes(nonce, sizeof(nonce));
+ memcpy(auth->our_nonce, nonce, sizeof(nonce));
+ tpm_buf_append_u32(buf, auth->handle);
+ /* our new nonce */
+ tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE);
+ tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE);
+ tpm_buf_append_u8(buf, auth->attrs);
+ /* and put a placeholder for the hmac */
+ tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE);
+ tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE);
+}
+EXPORT_SYMBOL(tpm_buf_append_hmac_session);
+
+/**
+ * tpm_buf_fill_hmac_session() - finalize the session HMAC
+ * @chip: the TPM chip structure
+ * @buf: The buffer to be appended
+ *
+ * This command must not be called until all of the parameters have
+ * been appended to @buf otherwise the computed HMAC will be
+ * incorrect.
+ *
+ * This function computes and fills in the session HMAC using the
+ * session key and, if TPM2_SA_DECRYPT was specified, computes the
+ * encryption key and encrypts the first parameter of the command
+ * buffer with it.
+ *
+ * As with most tpm_buf operations, success is assumed because failure
+ * will be caused by an incorrect programming model and indicated by a
+ * kernel message.
+ */
+void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
+{
+ u32 cc, handles, val;
+ struct tpm2_auth *auth = chip->auth;
+ int i;
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+ off_t offset_s = TPM_HEADER_SIZE, offset_p;
+ u8 *hmac = NULL;
+ u32 attrs;
+ u8 cphash[SHA256_DIGEST_SIZE];
+ struct sha256_state sctx;
+
+ /* save the command code in BE format */
+ auth->ordinal = head->ordinal;
+
+ cc = be32_to_cpu(head->ordinal);
+
+ i = tpm2_find_cc(chip, cc);
+ if (i < 0) {
+ dev_err(&chip->dev, "Command 0x%x not found in TPM\n", cc);
+ return;
+ }
+ attrs = chip->cc_attrs_tbl[i];
+
+ handles = (attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0);
+
+ /*
+ * just check the names, it's easy to make mistakes. This
+ * would happen if someone added a handle via
+ * tpm_buf_append_u32() instead of tpm_buf_append_name()
+ */
+ for (i = 0; i < handles; i++) {
+ u32 handle = tpm_buf_read_u32(buf, &offset_s);
+
+ if (auth->name_h[i] != handle) {
+ dev_err(&chip->dev, "TPM: handle %d wrong for name\n",
+ i);
+ return;
+ }
+ }
+ /* point offset_s to the start of the sessions */
+ val = tpm_buf_read_u32(buf, &offset_s);
+ /* point offset_p to the start of the parameters */
+ offset_p = offset_s + val;
+ for (i = 1; offset_s < offset_p; i++) {
+ u32 handle = tpm_buf_read_u32(buf, &offset_s);
+ u16 len;
+ u8 a;
+
+ /* nonce (already in auth) */
+ len = tpm_buf_read_u16(buf, &offset_s);
+ offset_s += len;
+
+ a = tpm_buf_read_u8(buf, &offset_s);
+
+ len = tpm_buf_read_u16(buf, &offset_s);
+ if (handle == auth->handle && auth->attrs == a) {
+ hmac = &buf->data[offset_s];
+ /*
+ * save our session number so we know which
+ * session in the response belongs to us
+ */
+ auth->session = i;
+ }
+
+ offset_s += len;
+ }
+ if (offset_s != offset_p) {
+ dev_err(&chip->dev, "TPM session length is incorrect\n");
+ return;
+ }
+ if (!hmac) {
+ dev_err(&chip->dev, "TPM could not find HMAC session\n");
+ return;
+ }
+
+ /* encrypt before HMAC */
+ if (auth->attrs & TPM2_SA_DECRYPT) {
+ u16 len;
+
+ /* need key and IV */
+ tpm2_KDFa(auth->session_key, SHA256_DIGEST_SIZE
+ + auth->passphrase_len, "CFB", auth->our_nonce,
+ auth->tpm_nonce, AES_KEY_BYTES + AES_BLOCK_SIZE,
+ auth->scratch);
+
+ len = tpm_buf_read_u16(buf, &offset_p);
+ aes_expandkey(&auth->aes_ctx, auth->scratch, AES_KEY_BYTES);
+ aescfb_encrypt(&auth->aes_ctx, &buf->data[offset_p],
+ &buf->data[offset_p], len,
+ auth->scratch + AES_KEY_BYTES);
+ /* reset p to beginning of parameters for HMAC */
+ offset_p -= 2;
+ }
+
+ sha256_init(&sctx);
+ /* ordinal is already BE */
+ sha256_update(&sctx, (u8 *)&head->ordinal, sizeof(head->ordinal));
+ /* add the handle names */
+ for (i = 0; i < handles; i++) {
+ enum tpm2_mso_type mso = tpm2_handle_mso(auth->name_h[i]);
+
+ if (mso == TPM2_MSO_PERSISTENT ||
+ mso == TPM2_MSO_VOLATILE ||
+ mso == TPM2_MSO_NVRAM) {
+ sha256_update(&sctx, auth->name[i],
+ name_size(auth->name[i]));
+ } else {
+ __be32 h = cpu_to_be32(auth->name_h[i]);
+
+ sha256_update(&sctx, (u8 *)&h, 4);
+ }
+ }
+ if (offset_s != tpm_buf_length(buf))
+ sha256_update(&sctx, &buf->data[offset_s],
+ tpm_buf_length(buf) - offset_s);
+ sha256_final(&sctx, cphash);
+
+ /* now calculate the hmac */
+ tpm2_hmac_init(&sctx, auth->session_key, sizeof(auth->session_key)
+ + auth->passphrase_len);
+ sha256_update(&sctx, cphash, sizeof(cphash));
+ sha256_update(&sctx, auth->our_nonce, sizeof(auth->our_nonce));
+ sha256_update(&sctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
+ sha256_update(&sctx, &auth->attrs, 1);
+ tpm2_hmac_final(&sctx, auth->session_key, sizeof(auth->session_key)
+ + auth->passphrase_len, hmac);
+}
+EXPORT_SYMBOL(tpm_buf_fill_hmac_session);
+
+static int tpm2_parse_read_public(char *name, struct tpm_buf *buf)
+{
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+ off_t offset = TPM_HEADER_SIZE;
+ u32 tot_len = be32_to_cpu(head->length);
+ u32 val;
+
+ /* we're starting after the header so adjust the length */
+ tot_len -= TPM_HEADER_SIZE;
+
+ /* skip public */
+ val = tpm_buf_read_u16(buf, &offset);
+ if (val > tot_len)
+ return -EINVAL;
+ offset += val;
+ /* name */
+ val = tpm_buf_read_u16(buf, &offset);
+ if (val != name_size(&buf->data[offset]))
+ return -EINVAL;
+ memcpy(name, &buf->data[offset], val);
+ /* forget the rest */
+ return 0;
+}
+
+static int tpm2_read_public(struct tpm_chip *chip, u32 handle, char *name)
+{
+ struct tpm_buf buf;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_READ_PUBLIC);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, handle);
+ rc = tpm_transmit_cmd(chip, &buf, 0, "read public");
+ if (rc == TPM2_RC_SUCCESS)
+ rc = tpm2_parse_read_public(name, &buf);
+
+ tpm_buf_destroy(&buf);
+
+ return rc;
+}
+
+/**
+ * tpm_buf_append_name() - add a handle area to the buffer
+ * @chip: the TPM chip structure
+ * @buf: The buffer to be appended
+ * @handle: The handle to be appended
+ * @name: The name of the handle (may be NULL)
+ *
+ * In order to compute session HMACs, we need to know the names of the
+ * objects pointed to by the handles. For most objects, this is simply
+ * the actual 4 byte handle or an empty buf (in these cases @name
+ * should be NULL) but for volatile objects, permanent objects and NV
+ * areas, the name is defined as the hash (according to the name
+ * algorithm which should be set to sha256) of the public area to
+ * which the two byte algorithm id has been appended. For these
+ * objects, the @name pointer should point to this. If a name is
+ * required but @name is NULL, then TPM2_ReadPublic() will be called
+ * on the handle to obtain the name.
+ *
+ * As with most tpm_buf operations, success is assumed because failure
+ * will be caused by an incorrect programming model and indicated by a
+ * kernel message.
+ */
+void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
+ u32 handle, u8 *name)
+{
+ enum tpm2_mso_type mso = tpm2_handle_mso(handle);
+ struct tpm2_auth *auth = chip->auth;
+ int slot;
+
+ slot = (tpm_buf_length(buf) - TPM_HEADER_SIZE)/4;
+ if (slot >= AUTH_MAX_NAMES) {
+ dev_err(&chip->dev, "TPM: too many handles\n");
+ return;
+ }
+ WARN(auth->session != tpm_buf_length(buf),
+ "name added in wrong place\n");
+ tpm_buf_append_u32(buf, handle);
+ auth->session += 4;
+
+ if (mso == TPM2_MSO_PERSISTENT ||
+ mso == TPM2_MSO_VOLATILE ||
+ mso == TPM2_MSO_NVRAM) {
+ if (!name)
+ tpm2_read_public(chip, handle, auth->name[slot]);
+ } else {
+ if (name)
+ dev_err(&chip->dev, "TPM: Handle does not require name but one is specified\n");
+ }
+
+ auth->name_h[slot] = handle;
+ if (name)
+ memcpy(auth->name[slot], name, name_size(name));
+}
+EXPORT_SYMBOL(tpm_buf_append_name);
+
+/**
+ * tpm_buf_check_hmac_response() - check the TPM return HMAC for correctness
+ * @chip: the TPM chip structure
+ * @buf: the original command buffer (which now contains the response)
+ * @rc: the return code from tpm_transmit_cmd
+ *
+ * If @rc is non zero, @buf may not contain an actual return, so @rc
+ * is passed through as the return and the session cleaned up and
+ * de-allocated if required (this is required if
+ * TPM2_SA_CONTINUE_SESSION was not specified as a session flag).
+ *
+ * If @rc is zero, the response HMAC is computed against the returned
+ * @buf and matched to the TPM one in the session area. If there is a
+ * mismatch, an error is logged and -EINVAL returned.
+ *
+ * The reason for this is that the command issue and HMAC check
+ * sequence should look like:
+ *
+ * rc = tpm_transmit_cmd(...);
+ * rc = tpm_buf_check_hmac_response(&buf, auth, rc);
+ * if (rc)
+ * ...
+ *
+ * Which is easily layered into the current contrl flow.
+ *
+ * Returns: 0 on success or an error.
+ */
+int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
+ int rc)
+{
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+ struct tpm2_auth *auth = chip->auth;
+ off_t offset_s, offset_p;
+ u8 rphash[SHA256_DIGEST_SIZE];
+ u32 attrs;
+ struct sha256_state sctx;
+ u16 tag = be16_to_cpu(head->tag);
+ u32 cc = be32_to_cpu(auth->ordinal);
+ int parm_len, len, i, handles;
+
+ if (auth->session >= TPM_HEADER_SIZE) {
+ WARN(1, "tpm session not filled correctly\n");
+ goto out;
+ }
+
+ if (rc != 0)
+ /* pass non success rc through and close the session */
+ goto out;
+
+ rc = -EINVAL;
+ if (tag != TPM2_ST_SESSIONS) {
+ dev_err(&chip->dev, "TPM: HMAC response check has no sessions tag\n");
+ goto out;
+ }
+
+ i = tpm2_find_cc(chip, cc);
+ if (i < 0)
+ goto out;
+ attrs = chip->cc_attrs_tbl[i];
+ handles = (attrs >> TPM2_CC_ATTR_RHANDLE) & 1;
+
+ /* point to area beyond handles */
+ offset_s = TPM_HEADER_SIZE + handles * 4;
+ parm_len = tpm_buf_read_u32(buf, &offset_s);
+ offset_p = offset_s;
+ offset_s += parm_len;
+ /* skip over any sessions before ours */
+ for (i = 0; i < auth->session - 1; i++) {
+ len = tpm_buf_read_u16(buf, &offset_s);
+ offset_s += len + 1;
+ len = tpm_buf_read_u16(buf, &offset_s);
+ offset_s += len;
+ }
+ /* TPM nonce */
+ len = tpm_buf_read_u16(buf, &offset_s);
+ if (offset_s + len > tpm_buf_length(buf))
+ goto out;
+ if (len != SHA256_DIGEST_SIZE)
+ goto out;
+ memcpy(auth->tpm_nonce, &buf->data[offset_s], len);
+ offset_s += len;
+ attrs = tpm_buf_read_u8(buf, &offset_s);
+ len = tpm_buf_read_u16(buf, &offset_s);
+ if (offset_s + len != tpm_buf_length(buf))
+ goto out;
+ if (len != SHA256_DIGEST_SIZE)
+ goto out;
+ /*
+ * offset_s points to the HMAC. now calculate comparison, beginning
+ * with rphash
+ */
+ sha256_init(&sctx);
+ /* yes, I know this is now zero, but it's what the standard says */
+ sha256_update(&sctx, (u8 *)&head->return_code,
+ sizeof(head->return_code));
+ /* ordinal is already BE */
+ sha256_update(&sctx, (u8 *)&auth->ordinal, sizeof(auth->ordinal));
+ sha256_update(&sctx, &buf->data[offset_p], parm_len);
+ sha256_final(&sctx, rphash);
+
+ /* now calculate the hmac */
+ tpm2_hmac_init(&sctx, auth->session_key, sizeof(auth->session_key)
+ + auth->passphrase_len);
+ sha256_update(&sctx, rphash, sizeof(rphash));
+ sha256_update(&sctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
+ sha256_update(&sctx, auth->our_nonce, sizeof(auth->our_nonce));
+ sha256_update(&sctx, &auth->attrs, 1);
+ /* we're done with the rphash, so put our idea of the hmac there */
+ tpm2_hmac_final(&sctx, auth->session_key, sizeof(auth->session_key)
+ + auth->passphrase_len, rphash);
+ if (memcmp(rphash, &buf->data[offset_s], SHA256_DIGEST_SIZE) == 0) {
+ rc = 0;
+ } else {
+ dev_err(&chip->dev, "TPM: HMAC check failed\n");
+ goto out;
+ }
+
+ /* now do response decryption */
+ if (auth->attrs & TPM2_SA_ENCRYPT) {
+ /* need key and IV */
+ tpm2_KDFa(auth->session_key, SHA256_DIGEST_SIZE
+ + auth->passphrase_len, "CFB", auth->tpm_nonce,
+ auth->our_nonce, AES_KEY_BYTES + AES_BLOCK_SIZE,
+ auth->scratch);
+
+ len = tpm_buf_read_u16(buf, &offset_p);
+ aes_expandkey(&auth->aes_ctx, auth->scratch, AES_KEY_BYTES);
+ aescfb_decrypt(&auth->aes_ctx, &buf->data[offset_p],
+ &buf->data[offset_p], len,
+ auth->scratch + AES_KEY_BYTES);
+ }
+
+ out:
+ if ((auth->attrs & TPM2_SA_CONTINUE_SESSION) == 0) {
+ if (rc)
+ /* manually close the session if it wasn't consumed */
+ tpm2_flush_context(chip, auth->handle);
+ memzero_explicit(auth, sizeof(*auth));
+ } else {
+ /* reset for next use */
+ auth->session = TPM_HEADER_SIZE;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(tpm_buf_check_hmac_response);
+
+/**
+ * tpm2_end_auth_session() - kill the allocated auth session
+ * @chip: the TPM chip structure
+ *
+ * ends the session started by tpm2_start_auth_session and frees all
+ * the resources. Under normal conditions,
+ * tpm_buf_check_hmac_response() will correctly end the session if
+ * required, so this function is only for use in error legs that will
+ * bypass the normal invocation of tpm_buf_check_hmac_response().
+ */
+void tpm2_end_auth_session(struct tpm_chip *chip)
+{
+ tpm2_flush_context(chip, chip->auth->handle);
+ memzero_explicit(chip->auth, sizeof(*chip->auth));
+}
+EXPORT_SYMBOL(tpm2_end_auth_session);
+
+static int tpm2_parse_start_auth_session(struct tpm2_auth *auth,
+ struct tpm_buf *buf)
+{
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+ u32 tot_len = be32_to_cpu(head->length);
+ off_t offset = TPM_HEADER_SIZE;
+ u32 val;
+
+ /* we're starting after the header so adjust the length */
+ tot_len -= TPM_HEADER_SIZE;
+
+ /* should have handle plus nonce */
+ if (tot_len != 4 + 2 + sizeof(auth->tpm_nonce))
+ return -EINVAL;
+
+ auth->handle = tpm_buf_read_u32(buf, &offset);
+ val = tpm_buf_read_u16(buf, &offset);
+ if (val != sizeof(auth->tpm_nonce))
+ return -EINVAL;
+ memcpy(auth->tpm_nonce, &buf->data[offset], sizeof(auth->tpm_nonce));
+ /* now compute the session key from the nonces */
+ tpm2_KDFa(auth->salt, sizeof(auth->salt), "ATH", auth->tpm_nonce,
+ auth->our_nonce, sizeof(auth->session_key),
+ auth->session_key);
+
+ return 0;
+}
+
+static int tpm2_load_null(struct tpm_chip *chip, u32 *null_key)
+{
+ int rc;
+ unsigned int offset = 0; /* dummy offset for null seed context */
+ u8 name[SHA256_DIGEST_SIZE + 2];
+
+ rc = tpm2_load_context(chip, chip->null_key_context, &offset,
+ null_key);
+ if (rc != -EINVAL)
+ return rc;
+
+ /* an integrity failure may mean the TPM has been reset */
+ dev_err(&chip->dev, "NULL key integrity failure!\n");
+ /* check the null name against what we know */
+ tpm2_create_primary(chip, TPM2_RH_NULL, NULL, name);
+ if (memcmp(name, chip->null_key_name, sizeof(name)) == 0)
+ /* name unchanged, assume transient integrity failure */
+ return rc;
+ /*
+ * Fatal TPM failure: the NULL seed has actually changed, so
+ * the TPM must have been illegally reset. All in-kernel TPM
+ * operations will fail because the NULL primary can't be
+ * loaded to salt the sessions, but disable the TPM anyway so
+ * userspace programmes can't be compromised by it.
+ */
+ dev_err(&chip->dev, "NULL name has changed, disabling TPM due to interference\n");
+ chip->flags |= TPM_CHIP_FLAG_DISABLE;
+
+ return rc;
+}
+
+/**
+ * tpm2_start_auth_session() - create a HMAC authentication session with the TPM
+ * @chip: the TPM chip structure to create the session with
+ *
+ * This function loads the NULL seed from its saved context and starts
+ * an authentication session on the null seed, fills in the
+ * @chip->auth structure to contain all the session details necessary
+ * for performing the HMAC, encrypt and decrypt operations and
+ * returns. The NULL seed is flushed before this function returns.
+ *
+ * Return: zero on success or actual error encountered.
+ */
+int tpm2_start_auth_session(struct tpm_chip *chip)
+{
+ struct tpm_buf buf;
+ struct tpm2_auth *auth = chip->auth;
+ int rc;
+ u32 null_key;
+
+ rc = tpm2_load_null(chip, &null_key);
+ if (rc)
+ goto out;
+
+ auth->session = TPM_HEADER_SIZE;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_START_AUTH_SESS);
+ if (rc)
+ goto out;
+
+ /* salt key handle */
+ tpm_buf_append_u32(&buf, null_key);
+ /* bind key handle */
+ tpm_buf_append_u32(&buf, TPM2_RH_NULL);
+ /* nonce caller */
+ get_random_bytes(auth->our_nonce, sizeof(auth->our_nonce));
+ tpm_buf_append_u16(&buf, sizeof(auth->our_nonce));
+ tpm_buf_append(&buf, auth->our_nonce, sizeof(auth->our_nonce));
+
+ /* append encrypted salt and squirrel away unencrypted in auth */
+ tpm_buf_append_salt(&buf, chip);
+ /* session type (HMAC, audit or policy) */
+ tpm_buf_append_u8(&buf, TPM2_SE_HMAC);
+
+ /* symmetric encryption parameters */
+ /* symmetric algorithm */
+ tpm_buf_append_u16(&buf, TPM_ALG_AES);
+ /* bits for symmetric algorithm */
+ tpm_buf_append_u16(&buf, AES_KEY_BITS);
+ /* symmetric algorithm mode (must be CFB) */
+ tpm_buf_append_u16(&buf, TPM_ALG_CFB);
+ /* hash algorithm for session */
+ tpm_buf_append_u16(&buf, TPM_ALG_SHA256);
+
+ rc = tpm_transmit_cmd(chip, &buf, 0, "start auth session");
+ tpm2_flush_context(chip, null_key);
+
+ if (rc == TPM2_RC_SUCCESS)
+ rc = tpm2_parse_start_auth_session(auth, &buf);
+
+ tpm_buf_destroy(&buf);
+
+ if (rc)
+ goto out;
+
+ out:
+ return rc;
+}
+EXPORT_SYMBOL(tpm2_start_auth_session);
+
+/**
+ * tpm2_parse_create_primary() - parse the data returned from TPM_CC_CREATE_PRIMARY
+ *
+ * @chip: The TPM the primary was created under
+ * @buf: The response buffer from the chip
+ * @handle: pointer to be filled in with the return handle of the primary
+ * @hierarchy: The hierarchy the primary was created for
+ * @name: pointer to be filled in with the primary key name
+ *
+ * Return:
+ * * 0 - OK
+ * * -errno - A system error
+ * * TPM_RC - A TPM error
+ */
+static int tpm2_parse_create_primary(struct tpm_chip *chip, struct tpm_buf *buf,
+ u32 *handle, u32 hierarchy, u8 *name)
+{
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+ off_t offset_r = TPM_HEADER_SIZE, offset_t;
+ u16 len = TPM_HEADER_SIZE;
+ u32 total_len = be32_to_cpu(head->length);
+ u32 val, param_len, keyhandle;
+
+ keyhandle = tpm_buf_read_u32(buf, &offset_r);
+ if (handle)
+ *handle = keyhandle;
+ else
+ tpm2_flush_context(chip, keyhandle);
+
+ param_len = tpm_buf_read_u32(buf, &offset_r);
+ /*
+ * param_len doesn't include the header, but all the other
+ * lengths and offsets do, so add it to parm len to make
+ * the comparisons easier
+ */
+ param_len += TPM_HEADER_SIZE;
+
+ if (param_len + 8 > total_len)
+ return -EINVAL;
+ len = tpm_buf_read_u16(buf, &offset_r);
+ offset_t = offset_r;
+ if (name) {
+ /*
+ * now we have the public area, compute the name of
+ * the object
+ */
+ put_unaligned_be16(TPM_ALG_SHA256, name);
+ sha256(&buf->data[offset_r], len, name + 2);
+ }
+
+ /* validate the public key */
+ val = tpm_buf_read_u16(buf, &offset_t);
+
+ /* key type (must be what we asked for) */
+ if (val != TPM_ALG_ECC)
+ return -EINVAL;
+ val = tpm_buf_read_u16(buf, &offset_t);
+
+ /* name algorithm */
+ if (val != TPM_ALG_SHA256)
+ return -EINVAL;
+ val = tpm_buf_read_u32(buf, &offset_t);
+
+ /* object properties */
+ if (val != TPM2_OA_TMPL)
+ return -EINVAL;
+
+ /* auth policy (empty) */
+ val = tpm_buf_read_u16(buf, &offset_t);
+ if (val != 0)
+ return -EINVAL;
+
+ /* symmetric key parameters */
+ val = tpm_buf_read_u16(buf, &offset_t);
+ if (val != TPM_ALG_AES)
+ return -EINVAL;
+
+ /* symmetric key length */
+ val = tpm_buf_read_u16(buf, &offset_t);
+ if (val != AES_KEY_BITS)
+ return -EINVAL;
+
+ /* symmetric encryption scheme */
+ val = tpm_buf_read_u16(buf, &offset_t);
+ if (val != TPM_ALG_CFB)
+ return -EINVAL;
+
+ /* signing scheme */
+ val = tpm_buf_read_u16(buf, &offset_t);
+ if (val != TPM_ALG_NULL)
+ return -EINVAL;
+
+ /* ECC Curve */
+ val = tpm_buf_read_u16(buf, &offset_t);
+ if (val != TPM2_ECC_NIST_P256)
+ return -EINVAL;
+
+ /* KDF Scheme */
+ val = tpm_buf_read_u16(buf, &offset_t);
+ if (val != TPM_ALG_NULL)
+ return -EINVAL;
+
+ /* extract public key (x and y points) */
+ val = tpm_buf_read_u16(buf, &offset_t);
+ if (val != EC_PT_SZ)
+ return -EINVAL;
+ memcpy(chip->null_ec_key_x, &buf->data[offset_t], val);
+ offset_t += val;
+ val = tpm_buf_read_u16(buf, &offset_t);
+ if (val != EC_PT_SZ)
+ return -EINVAL;
+ memcpy(chip->null_ec_key_y, &buf->data[offset_t], val);
+ offset_t += val;
+
+ /* original length of the whole TPM2B */
+ offset_r += len;
+
+ /* should have exactly consumed the TPM2B public structure */
+ if (offset_t != offset_r)
+ return -EINVAL;
+ if (offset_r > param_len)
+ return -EINVAL;
+
+ /* creation data (skip) */
+ len = tpm_buf_read_u16(buf, &offset_r);
+ offset_r += len;
+ if (offset_r > param_len)
+ return -EINVAL;
+
+ /* creation digest (must be sha256) */
+ len = tpm_buf_read_u16(buf, &offset_r);
+ offset_r += len;
+ if (len != SHA256_DIGEST_SIZE || offset_r > param_len)
+ return -EINVAL;
+
+ /* TPMT_TK_CREATION follows */
+ /* tag, must be TPM_ST_CREATION (0x8021) */
+ val = tpm_buf_read_u16(buf, &offset_r);
+ if (val != TPM2_ST_CREATION || offset_r > param_len)
+ return -EINVAL;
+
+ /* hierarchy */
+ val = tpm_buf_read_u32(buf, &offset_r);
+ if (val != hierarchy || offset_r > param_len)
+ return -EINVAL;
+
+ /* the ticket digest HMAC (might not be sha256) */
+ len = tpm_buf_read_u16(buf, &offset_r);
+ offset_r += len;
+ if (offset_r > param_len)
+ return -EINVAL;
+
+ /*
+ * finally we have the name, which is a sha256 digest plus a 2
+ * byte algorithm type
+ */
+ len = tpm_buf_read_u16(buf, &offset_r);
+ if (offset_r + len != param_len + 8)
+ return -EINVAL;
+ if (len != SHA256_DIGEST_SIZE + 2)
+ return -EINVAL;
+
+ if (memcmp(chip->null_key_name, &buf->data[offset_r],
+ SHA256_DIGEST_SIZE + 2) != 0) {
+ dev_err(&chip->dev, "NULL Seed name comparison failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * tpm2_create_primary() - create a primary key using a fixed P-256 template
+ *
+ * @chip: the TPM chip to create under
+ * @hierarchy: The hierarchy handle to create under
+ * @handle: The returned volatile handle on success
+ * @name: The name of the returned key
+ *
+ * For platforms that might not have a persistent primary, this can be
+ * used to create one quickly on the fly (it uses Elliptic Curve not
+ * RSA, so even slow TPMs can create one fast). The template uses the
+ * TCG mandated H one for non-endorsement ECC primaries, i.e. P-256
+ * elliptic curve (the only current one all TPM2s are required to
+ * have) a sha256 name hash and no policy.
+ *
+ * Return:
+ * * 0 - OK
+ * * -errno - A system error
+ * * TPM_RC - A TPM error
+ */
+static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy,
+ u32 *handle, u8 *name)
+{
+ int rc;
+ struct tpm_buf buf;
+ struct tpm_buf template;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_CREATE_PRIMARY);
+ if (rc)
+ return rc;
+
+ rc = tpm_buf_init_sized(&template);
+ if (rc) {
+ tpm_buf_destroy(&buf);
+ return rc;
+ }
+
+ /*
+ * create the template. Note: in order for userspace to
+ * verify the security of the system, it will have to create
+ * and certify this NULL primary, meaning all the template
+ * parameters will have to be identical, so conform exactly to
+ * the TCG TPM v2.0 Provisioning Guidance for the SRK ECC
+ * key H template (H has zero size unique points)
+ */
+
+ /* key type */
+ tpm_buf_append_u16(&template, TPM_ALG_ECC);
+
+ /* name algorithm */
+ tpm_buf_append_u16(&template, TPM_ALG_SHA256);
+
+ /* object properties */
+ tpm_buf_append_u32(&template, TPM2_OA_TMPL);
+
+ /* sauth policy (empty) */
+ tpm_buf_append_u16(&template, 0);
+
+ /* BEGIN parameters: key specific; for ECC*/
+
+ /* symmetric algorithm */
+ tpm_buf_append_u16(&template, TPM_ALG_AES);
+
+ /* bits for symmetric algorithm */
+ tpm_buf_append_u16(&template, AES_KEY_BITS);
+
+ /* algorithm mode (must be CFB) */
+ tpm_buf_append_u16(&template, TPM_ALG_CFB);
+
+ /* scheme (NULL means any scheme) */
+ tpm_buf_append_u16(&template, TPM_ALG_NULL);
+
+ /* ECC Curve ID */
+ tpm_buf_append_u16(&template, TPM2_ECC_NIST_P256);
+
+ /* KDF Scheme */
+ tpm_buf_append_u16(&template, TPM_ALG_NULL);
+
+ /* unique: key specific; for ECC it is two zero size points */
+ tpm_buf_append_u16(&template, 0);
+ tpm_buf_append_u16(&template, 0);
+
+ /* END parameters */
+
+ /* primary handle */
+ tpm_buf_append_u32(&buf, hierarchy);
+ tpm_buf_append_empty_auth(&buf, TPM2_RS_PW);
+
+ /* sensitive create size is 4 for two empty buffers */
+ tpm_buf_append_u16(&buf, 4);
+
+ /* sensitive create auth data (empty) */
+ tpm_buf_append_u16(&buf, 0);
+
+ /* sensitive create sensitive data (empty) */
+ tpm_buf_append_u16(&buf, 0);
+
+ /* the public template */
+ tpm_buf_append(&buf, template.data, template.length);
+ tpm_buf_destroy(&template);
+
+ /* outside info (empty) */
+ tpm_buf_append_u16(&buf, 0);
+
+ /* creation PCR (none) */
+ tpm_buf_append_u32(&buf, 0);
+
+ rc = tpm_transmit_cmd(chip, &buf, 0,
+ "attempting to create NULL primary");
+
+ if (rc == TPM2_RC_SUCCESS)
+ rc = tpm2_parse_create_primary(chip, &buf, handle, hierarchy,
+ name);
+
+ tpm_buf_destroy(&buf);
+
+ return rc;
+}
+
+static int tpm2_create_null_primary(struct tpm_chip *chip)
+{
+ u32 null_key;
+ int rc;
+
+ rc = tpm2_create_primary(chip, TPM2_RH_NULL, &null_key,
+ chip->null_key_name);
+
+ if (rc == TPM2_RC_SUCCESS) {
+ unsigned int offset = 0; /* dummy offset for null key context */
+
+ rc = tpm2_save_context(chip, null_key, chip->null_key_context,
+ sizeof(chip->null_key_context), &offset);
+ tpm2_flush_context(chip, null_key);
+ }
+
+ return rc;
+}
+
+/**
+ * tpm2_sessions_init() - start of day initialization for the sessions code
+ * @chip: TPM chip
+ *
+ * Derive and context save the null primary and allocate memory in the
+ * struct tpm_chip for the authorizations.
+ */
+int tpm2_sessions_init(struct tpm_chip *chip)
+{
+ int rc;
+
+ rc = tpm2_create_null_primary(chip);
+ if (rc)
+ dev_err(&chip->dev, "TPM: security failed (NULL seed derivation): %d\n", rc);
+
+ chip->auth = kmalloc(sizeof(*chip->auth), GFP_KERNEL);
+ if (!chip->auth)
+ return -ENOMEM;
+
+ return rc;
+}
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
index 363afdd4d1d3..4892d491da8d 100644
--- a/drivers/char/tpm/tpm2-space.c
+++ b/drivers/char/tpm/tpm2-space.c
@@ -68,8 +68,8 @@ void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space)
kfree(space->session_buf);
}
-static int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
- unsigned int *offset, u32 *handle)
+int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
+ unsigned int *offset, u32 *handle)
{
struct tpm_buf tbuf;
struct tpm2_context *ctx;
@@ -105,6 +105,9 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
*handle = 0;
tpm_buf_destroy(&tbuf);
return -ENOENT;
+ } else if (tpm2_rc_value(rc) == TPM2_RC_INTEGRITY) {
+ tpm_buf_destroy(&tbuf);
+ return -EINVAL;
} else if (rc > 0) {
dev_warn(&chip->dev, "%s: failed with a TPM error 0x%04X\n",
__func__, rc);
@@ -119,8 +122,8 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
return 0;
}
-static int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf,
- unsigned int buf_size, unsigned int *offset)
+int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf,
+ unsigned int buf_size, unsigned int *offset)
{
struct tpm_buf tbuf;
unsigned int body_size;
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index 9c924a1440a9..2d2ae37153ba 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -51,34 +51,40 @@ static struct tpm_inf_dev tpm_dev;
static inline void tpm_data_out(unsigned char data, unsigned char offset)
{
+#ifdef CONFIG_HAS_IOPORT
if (tpm_dev.iotype == TPM_INF_IO_PORT)
outb(data, tpm_dev.data_regs + offset);
else
+#endif
writeb(data, tpm_dev.mem_base + tpm_dev.data_regs + offset);
}
static inline unsigned char tpm_data_in(unsigned char offset)
{
+#ifdef CONFIG_HAS_IOPORT
if (tpm_dev.iotype == TPM_INF_IO_PORT)
return inb(tpm_dev.data_regs + offset);
- else
- return readb(tpm_dev.mem_base + tpm_dev.data_regs + offset);
+#endif
+ return readb(tpm_dev.mem_base + tpm_dev.data_regs + offset);
}
static inline void tpm_config_out(unsigned char data, unsigned char offset)
{
+#ifdef CONFIG_HAS_IOPORT
if (tpm_dev.iotype == TPM_INF_IO_PORT)
outb(data, tpm_dev.config_port + offset);
else
+#endif
writeb(data, tpm_dev.mem_base + tpm_dev.index_off + offset);
}
static inline unsigned char tpm_config_in(unsigned char offset)
{
+#ifdef CONFIG_HAS_IOPORT
if (tpm_dev.iotype == TPM_INF_IO_PORT)
return inb(tpm_dev.config_port + offset);
- else
- return readb(tpm_dev.mem_base + tpm_dev.index_off + offset);
+#endif
+ return readb(tpm_dev.mem_base + tpm_dev.index_off + offset);
}
/* TPM header definitions */
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 714070ebb6e7..176cd8dbf1db 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -1057,11 +1057,6 @@ static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value)
clkrun_val &= ~LPC_CLKRUN_EN;
iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET);
- /*
- * Write any random value on port 0x80 which is on LPC, to make
- * sure LPC clock is running before sending any TPM command.
- */
- outb(0xCC, 0x80);
} else {
data->clkrun_enabled--;
if (data->clkrun_enabled)
@@ -1072,13 +1067,15 @@ static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value)
/* Enable LPC CLKRUN# */
clkrun_val |= LPC_CLKRUN_EN;
iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET);
-
- /*
- * Write any random value on port 0x80 which is on LPC, to make
- * sure LPC clock is running before sending any TPM command.
- */
- outb(0xCC, 0x80);
}
+
+#ifdef CONFIG_HAS_IOPORT
+ /*
+ * Write any random value on port 0x80 which is on LPC, to make
+ * sure LPC clock is running before sending any TPM command.
+ */
+ outb(0xCC, 0x80);
+#endif
}
static const struct tpm_class_ops tpm_tis = {
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 50af5fc7f570..3e9099504fad 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -451,8 +451,8 @@ config COMMON_CLK_FIXED_MMIO
config COMMON_CLK_K210
bool "Clock driver for the Canaan Kendryte K210 SoC"
- depends on OF && RISCV && SOC_CANAAN
- default SOC_CANAAN
+ depends on OF && RISCV && SOC_CANAAN_K210
+ default SOC_CANAAN_K210
help
Support for the Canaan Kendryte K210 RISC-V SoC clocks.
@@ -489,6 +489,7 @@ source "drivers/clk/rockchip/Kconfig"
source "drivers/clk/samsung/Kconfig"
source "drivers/clk/sifive/Kconfig"
source "drivers/clk/socfpga/Kconfig"
+source "drivers/clk/sophgo/Kconfig"
source "drivers/clk/sprd/Kconfig"
source "drivers/clk/starfive/Kconfig"
source "drivers/clk/sunxi/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 14fa8d4ecc1f..4abe16c8ccdf 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -118,6 +118,7 @@ obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/
obj-$(CONFIG_CLK_SIFIVE) += sifive/
obj-y += socfpga/
+obj-y += sophgo/
obj-$(CONFIG_PLAT_SPEAR) += spear/
obj-y += sprd/
obj-$(CONFIG_ARCH_STI) += st/
diff --git a/drivers/clk/bcm/clk-bcm2711-dvp.c b/drivers/clk/bcm/clk-bcm2711-dvp.c
index e4fbbf3c40fe..3cb235df9d37 100644
--- a/drivers/clk/bcm/clk-bcm2711-dvp.c
+++ b/drivers/clk/bcm/clk-bcm2711-dvp.c
@@ -56,6 +56,8 @@ static int clk_dvp_probe(struct platform_device *pdev)
if (ret)
return ret;
+ data->num = NR_CLOCKS;
+
data->hws[0] = clk_hw_register_gate_parent_data(&pdev->dev,
"hdmi0-108MHz",
&clk_dvp_parent, 0,
@@ -76,7 +78,6 @@ static int clk_dvp_probe(struct platform_device *pdev)
goto unregister_clk0;
}
- data->num = NR_CLOCKS;
ret = of_clk_add_hw_provider(pdev->dev.of_node, of_clk_hw_onecell_get,
data);
if (ret)
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index 829406dc44a2..4d411408e4af 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -371,8 +371,8 @@ static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi,
if (IS_ERR(hw))
return PTR_ERR(hw);
- data->hws[clks->id] = hw;
data->num = clks->id + 1;
+ data->hws[clks->id] = hw;
}
clks++;
diff --git a/drivers/clk/clk-en7523.c b/drivers/clk/clk-en7523.c
index 7cde328495e2..ccc394692671 100644
--- a/drivers/clk/clk-en7523.c
+++ b/drivers/clk/clk-en7523.c
@@ -3,14 +3,16 @@
#include <linux/delay.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <dt-bindings/clock/en7523-clk.h>
#define REG_PCI_CONTROL 0x88
#define REG_PCI_CONTROL_PERSTOUT BIT(29)
#define REG_PCI_CONTROL_PERSTOUT1 BIT(26)
+#define REG_PCI_CONTROL_REFCLK_EN0 BIT(23)
#define REG_PCI_CONTROL_REFCLK_EN1 BIT(22)
+#define REG_PCI_CONTROL_PERSTOUT2 BIT(16)
#define REG_GSW_CLK_DIV_SEL 0x1b4
#define REG_EMI_CLK_DIV_SEL 0x1b8
#define REG_BUS_CLK_DIV_SEL 0x1bc
@@ -18,10 +20,25 @@
#define REG_SPI_CLK_FREQ_SEL 0x1c8
#define REG_NPU_CLK_DIV_SEL 0x1fc
#define REG_CRYPTO_CLKSRC 0x200
-#define REG_RESET_CONTROL 0x834
+#define REG_RESET_CONTROL2 0x830
+#define REG_RESET2_CONTROL_PCIE2 BIT(27)
+#define REG_RESET_CONTROL1 0x834
#define REG_RESET_CONTROL_PCIEHB BIT(29)
#define REG_RESET_CONTROL_PCIE1 BIT(27)
#define REG_RESET_CONTROL_PCIE2 BIT(26)
+/* EN7581 */
+#define REG_PCIE0_MEM 0x00
+#define REG_PCIE0_MEM_MASK 0x04
+#define REG_PCIE1_MEM 0x08
+#define REG_PCIE1_MEM_MASK 0x0c
+#define REG_PCIE2_MEM 0x10
+#define REG_PCIE2_MEM_MASK 0x14
+#define REG_PCIE_RESET_OPEN_DRAIN 0x018c
+#define REG_PCIE_RESET_OPEN_DRAIN_MASK GENMASK(2, 0)
+#define REG_NP_SCU_PCIC 0x88
+#define REG_NP_SCU_SSTR 0x9c
+#define REG_PCIE_XSI0_SEL_MASK GENMASK(14, 13)
+#define REG_PCIE_XSI1_SEL_MASK GENMASK(12, 11)
struct en_clk_desc {
int id;
@@ -47,6 +64,12 @@ struct en_clk_gate {
struct clk_hw hw;
};
+struct en_clk_soc_data {
+ const struct clk_ops pcie_ops;
+ int (*hw_init)(struct platform_device *pdev, void __iomem *base,
+ void __iomem *np_base);
+};
+
static const u32 gsw_base[] = { 400000000, 500000000 };
static const u32 emi_base[] = { 333000000, 400000000 };
static const u32 bus_base[] = { 500000000, 540000000 };
@@ -145,11 +168,6 @@ static const struct en_clk_desc en7523_base_clks[] = {
}
};
-static const struct of_device_id of_match_clk_en7523[] = {
- { .compatible = "airoha,en7523-scu", },
- { /* sentinel */ }
-};
-
static unsigned int en7523_get_base_rate(void __iomem *base, unsigned int i)
{
const struct en_clk_desc *desc = &en7523_base_clks[i];
@@ -212,14 +230,14 @@ static int en7523_pci_prepare(struct clk_hw *hw)
usleep_range(1000, 2000);
/* Reset to default */
- val = readl(np_base + REG_RESET_CONTROL);
+ val = readl(np_base + REG_RESET_CONTROL1);
mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2 |
REG_RESET_CONTROL_PCIEHB;
- writel(val & ~mask, np_base + REG_RESET_CONTROL);
+ writel(val & ~mask, np_base + REG_RESET_CONTROL1);
usleep_range(1000, 2000);
- writel(val | mask, np_base + REG_RESET_CONTROL);
+ writel(val | mask, np_base + REG_RESET_CONTROL1);
msleep(100);
- writel(val & ~mask, np_base + REG_RESET_CONTROL);
+ writel(val & ~mask, np_base + REG_RESET_CONTROL1);
usleep_range(5000, 10000);
/* Release device */
@@ -247,14 +265,10 @@ static void en7523_pci_unprepare(struct clk_hw *hw)
static struct clk_hw *en7523_register_pcie_clk(struct device *dev,
void __iomem *np_base)
{
- static const struct clk_ops pcie_gate_ops = {
- .is_enabled = en7523_pci_is_enabled,
- .prepare = en7523_pci_prepare,
- .unprepare = en7523_pci_unprepare,
- };
+ const struct en_clk_soc_data *soc_data = device_get_match_data(dev);
struct clk_init_data init = {
.name = "pcie",
- .ops = &pcie_gate_ops,
+ .ops = &soc_data->pcie_ops,
};
struct en_clk_gate *cg;
@@ -264,7 +278,10 @@ static struct clk_hw *en7523_register_pcie_clk(struct device *dev,
cg->base = np_base;
cg->hw.init = &init;
- en7523_pci_unprepare(&cg->hw);
+
+ if (init.ops->disable)
+ init.ops->disable(&cg->hw);
+ init.ops->unprepare(&cg->hw);
if (clk_hw_register(dev, &cg->hw))
return NULL;
@@ -272,6 +289,111 @@ static struct clk_hw *en7523_register_pcie_clk(struct device *dev,
return &cg->hw;
}
+static int en7581_pci_is_enabled(struct clk_hw *hw)
+{
+ struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
+ u32 val, mask;
+
+ mask = REG_PCI_CONTROL_REFCLK_EN0 | REG_PCI_CONTROL_REFCLK_EN1;
+ val = readl(cg->base + REG_PCI_CONTROL);
+ return (val & mask) == mask;
+}
+
+static int en7581_pci_prepare(struct clk_hw *hw)
+{
+ struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
+ void __iomem *np_base = cg->base;
+ u32 val, mask;
+
+ mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2 |
+ REG_RESET_CONTROL_PCIEHB;
+ val = readl(np_base + REG_RESET_CONTROL1);
+ writel(val & ~mask, np_base + REG_RESET_CONTROL1);
+ val = readl(np_base + REG_RESET_CONTROL2);
+ writel(val & ~REG_RESET2_CONTROL_PCIE2, np_base + REG_RESET_CONTROL2);
+ usleep_range(5000, 10000);
+
+ return 0;
+}
+
+static int en7581_pci_enable(struct clk_hw *hw)
+{
+ struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
+ void __iomem *np_base = cg->base;
+ u32 val, mask;
+
+ mask = REG_PCI_CONTROL_REFCLK_EN0 | REG_PCI_CONTROL_REFCLK_EN1 |
+ REG_PCI_CONTROL_PERSTOUT1 | REG_PCI_CONTROL_PERSTOUT2 |
+ REG_PCI_CONTROL_PERSTOUT;
+ val = readl(np_base + REG_PCI_CONTROL);
+ writel(val | mask, np_base + REG_PCI_CONTROL);
+ msleep(250);
+
+ return 0;
+}
+
+static void en7581_pci_unprepare(struct clk_hw *hw)
+{
+ struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
+ void __iomem *np_base = cg->base;
+ u32 val, mask;
+
+ mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2 |
+ REG_RESET_CONTROL_PCIEHB;
+ val = readl(np_base + REG_RESET_CONTROL1);
+ writel(val | mask, np_base + REG_RESET_CONTROL1);
+ mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2;
+ writel(val | mask, np_base + REG_RESET_CONTROL1);
+ val = readl(np_base + REG_RESET_CONTROL2);
+ writel(val | REG_RESET_CONTROL_PCIE2, np_base + REG_RESET_CONTROL2);
+ msleep(100);
+}
+
+static void en7581_pci_disable(struct clk_hw *hw)
+{
+ struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
+ void __iomem *np_base = cg->base;
+ u32 val, mask;
+
+ mask = REG_PCI_CONTROL_REFCLK_EN0 | REG_PCI_CONTROL_REFCLK_EN1 |
+ REG_PCI_CONTROL_PERSTOUT1 | REG_PCI_CONTROL_PERSTOUT2 |
+ REG_PCI_CONTROL_PERSTOUT;
+ val = readl(np_base + REG_PCI_CONTROL);
+ writel(val & ~mask, np_base + REG_PCI_CONTROL);
+ usleep_range(1000, 2000);
+}
+
+static int en7581_clk_hw_init(struct platform_device *pdev,
+ void __iomem *base,
+ void __iomem *np_base)
+{
+ void __iomem *pb_base;
+ u32 val;
+
+ pb_base = devm_platform_ioremap_resource(pdev, 2);
+ if (IS_ERR(pb_base))
+ return PTR_ERR(pb_base);
+
+ val = readl(np_base + REG_NP_SCU_SSTR);
+ val &= ~(REG_PCIE_XSI0_SEL_MASK | REG_PCIE_XSI1_SEL_MASK);
+ writel(val, np_base + REG_NP_SCU_SSTR);
+ val = readl(np_base + REG_NP_SCU_PCIC);
+ writel(val | 3, np_base + REG_NP_SCU_PCIC);
+
+ writel(0x20000000, pb_base + REG_PCIE0_MEM);
+ writel(0xfc000000, pb_base + REG_PCIE0_MEM_MASK);
+ writel(0x24000000, pb_base + REG_PCIE1_MEM);
+ writel(0xfc000000, pb_base + REG_PCIE1_MEM_MASK);
+ writel(0x28000000, pb_base + REG_PCIE2_MEM);
+ writel(0xfc000000, pb_base + REG_PCIE2_MEM_MASK);
+
+ val = readl(base + REG_PCIE_RESET_OPEN_DRAIN);
+ writel(val | REG_PCIE_RESET_OPEN_DRAIN_MASK,
+ base + REG_PCIE_RESET_OPEN_DRAIN);
+
+ return 0;
+}
+
static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data,
void __iomem *base, void __iomem *np_base)
{
@@ -304,6 +426,7 @@ static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_dat
static int en7523_clk_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
+ const struct en_clk_soc_data *soc_data;
struct clk_hw_onecell_data *clk_data;
void __iomem *base, *np_base;
int r;
@@ -316,6 +439,13 @@ static int en7523_clk_probe(struct platform_device *pdev)
if (IS_ERR(np_base))
return PTR_ERR(np_base);
+ soc_data = device_get_match_data(&pdev->dev);
+ if (soc_data->hw_init) {
+ r = soc_data->hw_init(pdev, base, np_base);
+ if (r)
+ return r;
+ }
+
clk_data = devm_kzalloc(&pdev->dev,
struct_size(clk_data, hws, EN7523_NUM_CLOCKS),
GFP_KERNEL);
@@ -333,6 +463,31 @@ static int en7523_clk_probe(struct platform_device *pdev)
return r;
}
+static const struct en_clk_soc_data en7523_data = {
+ .pcie_ops = {
+ .is_enabled = en7523_pci_is_enabled,
+ .prepare = en7523_pci_prepare,
+ .unprepare = en7523_pci_unprepare,
+ },
+};
+
+static const struct en_clk_soc_data en7581_data = {
+ .pcie_ops = {
+ .is_enabled = en7581_pci_is_enabled,
+ .prepare = en7581_pci_prepare,
+ .enable = en7581_pci_enable,
+ .unprepare = en7581_pci_unprepare,
+ .disable = en7581_pci_disable,
+ },
+ .hw_init = en7581_clk_hw_init,
+};
+
+static const struct of_device_id of_match_clk_en7523[] = {
+ { .compatible = "airoha,en7523-scu", .data = &en7523_data },
+ { .compatible = "airoha,en7581-scu", .data = &en7581_data },
+ { /* sentinel */ }
+};
+
static struct platform_driver clk_en7523_drv = {
.probe = en7523_clk_probe,
.driver = {
diff --git a/drivers/clk/clk-gemini.c b/drivers/clk/clk-gemini.c
index ba0ff01bf4dc..856b008e07c6 100644
--- a/drivers/clk/clk-gemini.c
+++ b/drivers/clk/clk-gemini.c
@@ -67,12 +67,10 @@ struct gemini_gate_data {
* struct clk_gemini_pci - Gemini PCI clock
* @hw: corresponding clock hardware entry
* @map: regmap to access the registers
- * @rate: current rate
*/
struct clk_gemini_pci {
struct clk_hw hw;
struct regmap *map;
- unsigned long rate;
};
/**
diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c
index 2a0cea2946f9..6e68a41a70a1 100644
--- a/drivers/clk/clk-highbank.c
+++ b/drivers/clk/clk-highbank.c
@@ -37,7 +37,6 @@
struct hb_clk {
struct clk_hw hw;
void __iomem *reg;
- char *parent_name;
};
#define to_hb_clk(p) container_of(p, struct hb_clk, hw)
diff --git a/drivers/clk/clk-loongson2.c b/drivers/clk/clk-loongson2.c
index bacdcbb287ac..820bb1e9e3b7 100644
--- a/drivers/clk/clk-loongson2.c
+++ b/drivers/clk/clk-loongson2.c
@@ -13,317 +13,348 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <dt-bindings/clock/loongson,ls2k-clk.h>
-#define LOONGSON2_PLL_MULT_SHIFT 32
-#define LOONGSON2_PLL_MULT_WIDTH 10
-#define LOONGSON2_PLL_DIV_SHIFT 26
-#define LOONGSON2_PLL_DIV_WIDTH 6
-#define LOONGSON2_APB_FREQSCALE_SHIFT 20
-#define LOONGSON2_APB_FREQSCALE_WIDTH 3
-#define LOONGSON2_USB_FREQSCALE_SHIFT 16
-#define LOONGSON2_USB_FREQSCALE_WIDTH 3
-#define LOONGSON2_SATA_FREQSCALE_SHIFT 12
-#define LOONGSON2_SATA_FREQSCALE_WIDTH 3
-#define LOONGSON2_BOOT_FREQSCALE_SHIFT 8
-#define LOONGSON2_BOOT_FREQSCALE_WIDTH 3
-
-static void __iomem *loongson2_pll_base;
-
static const struct clk_parent_data pdata[] = {
- { .fw_name = "ref_100m",},
+ { .fw_name = "ref_100m", },
};
-static struct clk_hw *loongson2_clk_register(struct device *dev,
- const char *name,
- const char *parent_name,
- const struct clk_ops *ops,
- unsigned long flags)
-{
- int ret;
- struct clk_hw *hw;
- struct clk_init_data init = { };
-
- hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL);
- if (!hw)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = ops;
- init.flags = flags;
- init.num_parents = 1;
-
- if (!parent_name)
- init.parent_data = pdata;
- else
- init.parent_names = &parent_name;
-
- hw->init = &init;
-
- ret = devm_clk_hw_register(dev, hw);
- if (ret)
- hw = ERR_PTR(ret);
-
- return hw;
-}
-
-static unsigned long loongson2_calc_pll_rate(int offset, unsigned long rate)
-{
- u64 val;
- u32 mult, div;
-
- val = readq(loongson2_pll_base + offset);
-
- mult = (val >> LOONGSON2_PLL_MULT_SHIFT) &
- clk_div_mask(LOONGSON2_PLL_MULT_WIDTH);
- div = (val >> LOONGSON2_PLL_DIV_SHIFT) &
- clk_div_mask(LOONGSON2_PLL_DIV_WIDTH);
-
- return div_u64((u64)rate * mult, div);
-}
+enum loongson2_clk_type {
+ CLK_TYPE_PLL,
+ CLK_TYPE_SCALE,
+ CLK_TYPE_DIVIDER,
+ CLK_TYPE_GATE,
+ CLK_TYPE_FIXED,
+ CLK_TYPE_NONE,
+};
-static unsigned long loongson2_node_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- return loongson2_calc_pll_rate(0x0, parent_rate);
-}
+struct loongson2_clk_provider {
+ void __iomem *base;
+ struct device *dev;
+ struct clk_hw_onecell_data clk_data;
+ spinlock_t clk_lock; /* protect access to DIV registers */
+};
-static const struct clk_ops loongson2_node_clk_ops = {
- .recalc_rate = loongson2_node_recalc_rate,
+struct loongson2_clk_data {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 div_shift;
+ u8 div_width;
+ u8 mult_shift;
+ u8 mult_width;
};
-static unsigned long loongson2_ddr_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- return loongson2_calc_pll_rate(0x10, parent_rate);
-}
+struct loongson2_clk_board_info {
+ u8 id;
+ enum loongson2_clk_type type;
+ const char *name;
+ const char *parent_name;
+ unsigned long fixed_rate;
+ u8 reg_offset;
+ u8 div_shift;
+ u8 div_width;
+ u8 mult_shift;
+ u8 mult_width;
+ u8 bit_idx;
+};
-static const struct clk_ops loongson2_ddr_clk_ops = {
- .recalc_rate = loongson2_ddr_recalc_rate,
+#define CLK_DIV(_id, _name, _pname, _offset, _dshift, _dwidth) \
+ { \
+ .id = _id, \
+ .type = CLK_TYPE_DIVIDER, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .reg_offset = _offset, \
+ .div_shift = _dshift, \
+ .div_width = _dwidth, \
+ }
+
+#define CLK_PLL(_id, _name, _offset, _mshift, _mwidth, \
+ _dshift, _dwidth) \
+ { \
+ .id = _id, \
+ .type = CLK_TYPE_PLL, \
+ .name = _name, \
+ .parent_name = NULL, \
+ .reg_offset = _offset, \
+ .mult_shift = _mshift, \
+ .mult_width = _mwidth, \
+ .div_shift = _dshift, \
+ .div_width = _dwidth, \
+ }
+
+#define CLK_SCALE(_id, _name, _pname, _offset, \
+ _dshift, _dwidth) \
+ { \
+ .id = _id, \
+ .type = CLK_TYPE_SCALE, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .reg_offset = _offset, \
+ .div_shift = _dshift, \
+ .div_width = _dwidth, \
+ }
+
+#define CLK_GATE(_id, _name, _pname, _offset, _bidx) \
+ { \
+ .id = _id, \
+ .type = CLK_TYPE_GATE, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .reg_offset = _offset, \
+ .bit_idx = _bidx, \
+ }
+
+#define CLK_FIXED(_id, _name, _pname, _rate) \
+ { \
+ .id = _id, \
+ .type = CLK_TYPE_FIXED, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .fixed_rate = _rate, \
+ }
+
+static const struct loongson2_clk_board_info ls2k0500_clks[] = {
+ CLK_PLL(LOONGSON2_NODE_PLL, "pll_node", 0, 16, 8, 8, 6),
+ CLK_PLL(LOONGSON2_DDR_PLL, "pll_ddr", 0x8, 16, 8, 8, 6),
+ CLK_PLL(LOONGSON2_DC_PLL, "pll_soc", 0x10, 16, 8, 8, 6),
+ CLK_PLL(LOONGSON2_PIX0_PLL, "pll_pix0", 0x18, 16, 8, 8, 6),
+ CLK_PLL(LOONGSON2_PIX1_PLL, "pll_pix1", 0x20, 16, 8, 8, 6),
+ CLK_DIV(LOONGSON2_NODE_CLK, "clk_node", "pll_node", 0, 24, 6),
+ CLK_DIV(LOONGSON2_DDR_CLK, "clk_ddr", "pll_ddr", 0x8, 24, 6),
+ CLK_DIV(LOONGSON2_HDA_CLK, "clk_hda", "pll_ddr", 0xc, 8, 6),
+ CLK_DIV(LOONGSON2_GPU_CLK, "clk_gpu", "pll_soc", 0x10, 24, 6),
+ CLK_DIV(LOONGSON2_DC_CLK, "clk_sb", "pll_soc", 0x14, 0, 6),
+ CLK_DIV(LOONGSON2_GMAC_CLK, "clk_gmac", "pll_soc", 0x14, 8, 6),
+ CLK_DIV(LOONGSON2_PIX0_CLK, "clk_pix0", "pll_pix0", 0x18, 24, 6),
+ CLK_DIV(LOONGSON2_PIX1_CLK, "clk_pix1", "pll_pix1", 0x20, 24, 6),
+ CLK_SCALE(LOONGSON2_BOOT_CLK, "clk_boot", "clk_sb", 0x28, 8, 3),
+ CLK_SCALE(LOONGSON2_SATA_CLK, "clk_sata", "clk_sb", 0x28, 12, 3),
+ CLK_SCALE(LOONGSON2_USB_CLK, "clk_usb", "clk_sb", 0x28, 16, 3),
+ CLK_SCALE(LOONGSON2_APB_CLK, "clk_apb", "clk_sb", 0x28, 20, 3),
+ { /* Sentinel */ },
};
-static unsigned long loongson2_dc_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- return loongson2_calc_pll_rate(0x20, parent_rate);
-}
+static const struct loongson2_clk_board_info ls2k1000_clks[] = {
+ CLK_PLL(LOONGSON2_NODE_PLL, "pll_node", 0, 32, 10, 26, 6),
+ CLK_PLL(LOONGSON2_DDR_PLL, "pll_ddr", 0x10, 32, 10, 26, 6),
+ CLK_PLL(LOONGSON2_DC_PLL, "pll_dc", 0x20, 32, 10, 26, 6),
+ CLK_PLL(LOONGSON2_PIX0_PLL, "pll_pix0", 0x30, 32, 10, 26, 6),
+ CLK_PLL(LOONGSON2_PIX1_PLL, "pll_pix1", 0x40, 32, 10, 26, 6),
+ CLK_DIV(LOONGSON2_NODE_CLK, "clk_node", "pll_node", 0x8, 0, 6),
+ CLK_DIV(LOONGSON2_DDR_CLK, "clk_ddr", "pll_ddr", 0x18, 0, 6),
+ CLK_DIV(LOONGSON2_GPU_CLK, "clk_gpu", "pll_ddr", 0x18, 22, 6),
+ /*
+ * The hda clk divisor in the upper 32bits and the clk-prodiver
+ * layer code doesn't support 64bit io operation thus a conversion
+ * is required that subtract shift by 32 and add 4byte to the hda
+ * address
+ */
+ CLK_DIV(LOONGSON2_HDA_CLK, "clk_hda", "pll_ddr", 0x22, 12, 7),
+ CLK_DIV(LOONGSON2_DC_CLK, "clk_dc", "pll_dc", 0x28, 0, 6),
+ CLK_DIV(LOONGSON2_GMAC_CLK, "clk_gmac", "pll_dc", 0x28, 22, 6),
+ CLK_DIV(LOONGSON2_PIX0_CLK, "clk_pix0", "pll_pix0", 0x38, 0, 6),
+ CLK_DIV(LOONGSON2_PIX1_CLK, "clk_pix1", "pll_pix1", 0x38, 0, 6),
+ CLK_SCALE(LOONGSON2_BOOT_CLK, "clk_boot", NULL, 0x50, 8, 3),
+ CLK_SCALE(LOONGSON2_SATA_CLK, "clk_sata", "clk_gmac", 0x50, 12, 3),
+ CLK_SCALE(LOONGSON2_USB_CLK, "clk_usb", "clk_gmac", 0x50, 16, 3),
+ CLK_SCALE(LOONGSON2_APB_CLK, "clk_apb", "clk_gmac", 0x50, 20, 3),
+ { /* Sentinel */ },
+};
-static const struct clk_ops loongson2_dc_clk_ops = {
- .recalc_rate = loongson2_dc_recalc_rate,
+static const struct loongson2_clk_board_info ls2k2000_clks[] = {
+ CLK_PLL(LOONGSON2_DC_PLL, "pll_0", 0, 21, 9, 32, 6),
+ CLK_PLL(LOONGSON2_DDR_PLL, "pll_1", 0x10, 21, 9, 32, 6),
+ CLK_PLL(LOONGSON2_NODE_PLL, "pll_2", 0x20, 21, 9, 32, 6),
+ CLK_PLL(LOONGSON2_PIX0_PLL, "pll_pix0", 0x30, 21, 9, 32, 6),
+ CLK_PLL(LOONGSON2_PIX1_PLL, "pll_pix1", 0x40, 21, 9, 32, 6),
+ CLK_GATE(LOONGSON2_OUT0_GATE, "out0_gate", "pll_0", 0, 40),
+ CLK_GATE(LOONGSON2_GMAC_GATE, "gmac_gate", "pll_0", 0, 41),
+ CLK_GATE(LOONGSON2_RIO_GATE, "rio_gate", "pll_0", 0, 42),
+ CLK_GATE(LOONGSON2_DC_GATE, "dc_gate", "pll_1", 0x10, 40),
+ CLK_GATE(LOONGSON2_DDR_GATE, "ddr_gate", "pll_1", 0x10, 41),
+ CLK_GATE(LOONGSON2_GPU_GATE, "gpu_gate", "pll_1", 0x10, 42),
+ CLK_GATE(LOONGSON2_HDA_GATE, "hda_gate", "pll_2", 0x20, 40),
+ CLK_GATE(LOONGSON2_NODE_GATE, "node_gate", "pll_2", 0x20, 41),
+ CLK_GATE(LOONGSON2_EMMC_GATE, "emmc_gate", "pll_2", 0x20, 42),
+ CLK_GATE(LOONGSON2_PIX0_GATE, "pix0_gate", "pll_pix0", 0x30, 40),
+ CLK_GATE(LOONGSON2_PIX1_GATE, "pix1_gate", "pll_pix1", 0x40, 40),
+ CLK_DIV(LOONGSON2_OUT0_CLK, "clk_out0", "out0_gate", 0, 0, 6),
+ CLK_DIV(LOONGSON2_GMAC_CLK, "clk_gmac", "gmac_gate", 0, 7, 6),
+ CLK_DIV(LOONGSON2_RIO_CLK, "clk_rio", "rio_gate", 0, 14, 6),
+ CLK_DIV(LOONGSON2_DC_CLK, "clk_dc", "dc_gate", 0x10, 0, 6),
+ CLK_DIV(LOONGSON2_GPU_CLK, "clk_gpu", "gpu_gate", 0x10, 7, 6),
+ CLK_DIV(LOONGSON2_DDR_CLK, "clk_ddr", "ddr_gate", 0x10, 14, 6),
+ CLK_DIV(LOONGSON2_HDA_CLK, "clk_hda", "hda_gate", 0x20, 0, 6),
+ CLK_DIV(LOONGSON2_NODE_CLK, "clk_node", "node_gate", 0x20, 7, 6),
+ CLK_DIV(LOONGSON2_EMMC_CLK, "clk_emmc", "emmc_gate", 0x20, 14, 6),
+ CLK_DIV(LOONGSON2_PIX0_CLK, "clk_pix0", "pll_pix0", 0x30, 0, 6),
+ CLK_DIV(LOONGSON2_PIX1_CLK, "clk_pix1", "pll_pix1", 0x40, 0, 6),
+ CLK_SCALE(LOONGSON2_SATA_CLK, "clk_sata", "clk_out0", 0x50, 12, 3),
+ CLK_SCALE(LOONGSON2_USB_CLK, "clk_usb", "clk_out0", 0x50, 16, 3),
+ CLK_SCALE(LOONGSON2_APB_CLK, "clk_apb", "clk_node", 0x50, 20, 3),
+ CLK_SCALE(LOONGSON2_BOOT_CLK, "clk_boot", NULL, 0x50, 23, 3),
+ CLK_SCALE(LOONGSON2_DES_CLK, "clk_des", "clk_node", 0x50, 40, 3),
+ CLK_SCALE(LOONGSON2_I2S_CLK, "clk_i2s", "clk_node", 0x50, 44, 3),
+ CLK_FIXED(LOONGSON2_MISC_CLK, "clk_misc", NULL, 50000000),
+ { /* Sentinel */ },
};
-static unsigned long loongson2_pix0_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+static inline struct loongson2_clk_data *to_loongson2_clk(struct clk_hw *hw)
{
- return loongson2_calc_pll_rate(0x30, parent_rate);
+ return container_of(hw, struct loongson2_clk_data, hw);
}
-static const struct clk_ops loongson2_pix0_clk_ops = {
- .recalc_rate = loongson2_pix0_recalc_rate,
-};
-
-static unsigned long loongson2_pix1_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+static inline unsigned long loongson2_rate_part(u64 val, u8 shift, u8 width)
{
- return loongson2_calc_pll_rate(0x40, parent_rate);
+ return (val & GENMASK(shift + width - 1, shift)) >> shift;
}
-static const struct clk_ops loongson2_pix1_clk_ops = {
- .recalc_rate = loongson2_pix1_recalc_rate,
-};
-
-static unsigned long loongson2_calc_rate(unsigned long rate,
- int shift, int width)
+static unsigned long loongson2_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
{
- u64 val;
- u32 mult;
-
- val = readq(loongson2_pll_base + 0x50);
+ u64 val, mult, div;
+ struct loongson2_clk_data *clk = to_loongson2_clk(hw);
- mult = (val >> shift) & clk_div_mask(width);
+ val = readq(clk->reg);
+ mult = loongson2_rate_part(val, clk->mult_shift, clk->mult_width);
+ div = loongson2_rate_part(val, clk->div_shift, clk->div_width);
- return div_u64((u64)rate * (mult + 1), 8);
-}
-
-static unsigned long loongson2_boot_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- return loongson2_calc_rate(parent_rate,
- LOONGSON2_BOOT_FREQSCALE_SHIFT,
- LOONGSON2_BOOT_FREQSCALE_WIDTH);
+ return div_u64((u64)parent_rate * mult, div);
}
-static const struct clk_ops loongson2_boot_clk_ops = {
- .recalc_rate = loongson2_boot_recalc_rate,
+static const struct clk_ops loongson2_pll_recalc_ops = {
+ .recalc_rate = loongson2_pll_recalc_rate,
};
-static unsigned long loongson2_apb_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+static unsigned long loongson2_freqscale_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
{
- return loongson2_calc_rate(parent_rate,
- LOONGSON2_APB_FREQSCALE_SHIFT,
- LOONGSON2_APB_FREQSCALE_WIDTH);
-}
+ u64 val, mult;
+ struct loongson2_clk_data *clk = to_loongson2_clk(hw);
-static const struct clk_ops loongson2_apb_clk_ops = {
- .recalc_rate = loongson2_apb_recalc_rate,
-};
+ val = readq(clk->reg);
+ mult = loongson2_rate_part(val, clk->div_shift, clk->div_width) + 1;
-static unsigned long loongson2_usb_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- return loongson2_calc_rate(parent_rate,
- LOONGSON2_USB_FREQSCALE_SHIFT,
- LOONGSON2_USB_FREQSCALE_WIDTH);
+ return div_u64((u64)parent_rate * mult, 8);
}
-static const struct clk_ops loongson2_usb_clk_ops = {
- .recalc_rate = loongson2_usb_recalc_rate,
+static const struct clk_ops loongson2_freqscale_recalc_ops = {
+ .recalc_rate = loongson2_freqscale_recalc_rate,
};
-static unsigned long loongson2_sata_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+static struct clk_hw *loongson2_clk_register(struct loongson2_clk_provider *clp,
+ const struct loongson2_clk_board_info *cld,
+ const struct clk_ops *ops)
{
- return loongson2_calc_rate(parent_rate,
- LOONGSON2_SATA_FREQSCALE_SHIFT,
- LOONGSON2_SATA_FREQSCALE_WIDTH);
-}
+ int ret;
+ struct clk_hw *hw;
+ struct loongson2_clk_data *clk;
+ struct clk_init_data init = { };
-static const struct clk_ops loongson2_sata_clk_ops = {
- .recalc_rate = loongson2_sata_recalc_rate,
-};
+ clk = devm_kzalloc(clp->dev, sizeof(*clk), GFP_KERNEL);
+ if (!clk)
+ return ERR_PTR(-ENOMEM);
-static inline int loongson2_check_clk_hws(struct clk_hw *clks[], unsigned int count)
-{
- unsigned int i;
+ init.name = cld->name;
+ init.ops = ops;
+ init.flags = 0;
+ init.num_parents = 1;
- for (i = 0; i < count; i++)
- if (IS_ERR(clks[i])) {
- pr_err("Loongson2 clk %u: register failed with %ld\n",
- i, PTR_ERR(clks[i]));
- return PTR_ERR(clks[i]);
- }
+ if (!cld->parent_name)
+ init.parent_data = pdata;
+ else
+ init.parent_names = &cld->parent_name;
+
+ clk->reg = clp->base + cld->reg_offset;
+ clk->div_shift = cld->div_shift;
+ clk->div_width = cld->div_width;
+ clk->mult_shift = cld->mult_shift;
+ clk->mult_width = cld->mult_width;
+ clk->hw.init = &init;
- return 0;
+ hw = &clk->hw;
+ ret = devm_clk_hw_register(clp->dev, hw);
+ if (ret)
+ clk = ERR_PTR(ret);
+
+ return hw;
}
static int loongson2_clk_probe(struct platform_device *pdev)
{
- int ret;
- struct clk_hw **hws;
- struct clk_hw_onecell_data *clk_hw_data;
- spinlock_t loongson2_clk_lock;
+ int i, clks_num = 0;
+ struct clk_hw *hw;
struct device *dev = &pdev->dev;
+ struct loongson2_clk_provider *clp;
+ const struct loongson2_clk_board_info *p, *data;
- loongson2_pll_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(loongson2_pll_base))
- return PTR_ERR(loongson2_pll_base);
-
- clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, LOONGSON2_CLK_END),
- GFP_KERNEL);
- if (WARN_ON(!clk_hw_data))
- return -ENOMEM;
-
- clk_hw_data->num = LOONGSON2_CLK_END;
- hws = clk_hw_data->hws;
-
- hws[LOONGSON2_NODE_PLL] = loongson2_clk_register(dev, "node_pll",
- NULL,
- &loongson2_node_clk_ops, 0);
-
- hws[LOONGSON2_DDR_PLL] = loongson2_clk_register(dev, "ddr_pll",
- NULL,
- &loongson2_ddr_clk_ops, 0);
+ data = device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
- hws[LOONGSON2_DC_PLL] = loongson2_clk_register(dev, "dc_pll",
- NULL,
- &loongson2_dc_clk_ops, 0);
+ for (p = data; p->name; p++)
+ clks_num++;
- hws[LOONGSON2_PIX0_PLL] = loongson2_clk_register(dev, "pix0_pll",
- NULL,
- &loongson2_pix0_clk_ops, 0);
-
- hws[LOONGSON2_PIX1_PLL] = loongson2_clk_register(dev, "pix1_pll",
- NULL,
- &loongson2_pix1_clk_ops, 0);
+ clp = devm_kzalloc(dev, struct_size(clp, clk_data.hws, clks_num),
+ GFP_KERNEL);
+ if (!clp)
+ return -ENOMEM;
- hws[LOONGSON2_BOOT_CLK] = loongson2_clk_register(dev, "boot",
- NULL,
- &loongson2_boot_clk_ops, 0);
+ clp->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(clp->base))
+ return PTR_ERR(clp->base);
+
+ spin_lock_init(&clp->clk_lock);
+ clp->clk_data.num = clks_num + 1;
+ clp->dev = dev;
+
+ for (i = 0; i < clks_num; i++) {
+ p = &data[i];
+ switch (p->type) {
+ case CLK_TYPE_PLL:
+ hw = loongson2_clk_register(clp, p,
+ &loongson2_pll_recalc_ops);
+ break;
+ case CLK_TYPE_SCALE:
+ hw = loongson2_clk_register(clp, p,
+ &loongson2_freqscale_recalc_ops);
+ break;
+ case CLK_TYPE_DIVIDER:
+ hw = devm_clk_hw_register_divider(dev, p->name,
+ p->parent_name, 0,
+ clp->base + p->reg_offset,
+ p->div_shift, p->div_width,
+ CLK_DIVIDER_ONE_BASED,
+ &clp->clk_lock);
+ break;
+ case CLK_TYPE_GATE:
+ hw = devm_clk_hw_register_gate(dev, p->name, p->parent_name, 0,
+ clp->base + p->reg_offset,
+ p->bit_idx, 0,
+ &clp->clk_lock);
+ break;
+ case CLK_TYPE_FIXED:
+ hw = clk_hw_register_fixed_rate_parent_data(dev, p->name, pdata,
+ 0, p->fixed_rate);
+ break;
+ default:
+ return dev_err_probe(dev, -EINVAL, "Invalid clk type\n");
+ }
- hws[LOONGSON2_NODE_CLK] = devm_clk_hw_register_divider(dev, "node",
- "node_pll", 0,
- loongson2_pll_base + 0x8, 0,
- 6, CLK_DIVIDER_ONE_BASED,
- &loongson2_clk_lock);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw),
+ "Register clk: %s, type: %u failed!\n",
+ p->name, p->type);
- /*
- * The hda clk divisor in the upper 32bits and the clk-prodiver
- * layer code doesn't support 64bit io operation thus a conversion
- * is required that subtract shift by 32 and add 4byte to the hda
- * address
- */
- hws[LOONGSON2_HDA_CLK] = devm_clk_hw_register_divider(dev, "hda",
- "ddr_pll", 0,
- loongson2_pll_base + 0x22, 12,
- 7, CLK_DIVIDER_ONE_BASED,
- &loongson2_clk_lock);
-
- hws[LOONGSON2_GPU_CLK] = devm_clk_hw_register_divider(dev, "gpu",
- "ddr_pll", 0,
- loongson2_pll_base + 0x18, 22,
- 6, CLK_DIVIDER_ONE_BASED,
- &loongson2_clk_lock);
-
- hws[LOONGSON2_DDR_CLK] = devm_clk_hw_register_divider(dev, "ddr",
- "ddr_pll", 0,
- loongson2_pll_base + 0x18, 0,
- 6, CLK_DIVIDER_ONE_BASED,
- &loongson2_clk_lock);
-
- hws[LOONGSON2_GMAC_CLK] = devm_clk_hw_register_divider(dev, "gmac",
- "dc_pll", 0,
- loongson2_pll_base + 0x28, 22,
- 6, CLK_DIVIDER_ONE_BASED,
- &loongson2_clk_lock);
-
- hws[LOONGSON2_DC_CLK] = devm_clk_hw_register_divider(dev, "dc",
- "dc_pll", 0,
- loongson2_pll_base + 0x28, 0,
- 6, CLK_DIVIDER_ONE_BASED,
- &loongson2_clk_lock);
-
- hws[LOONGSON2_APB_CLK] = loongson2_clk_register(dev, "apb",
- "gmac",
- &loongson2_apb_clk_ops, 0);
-
- hws[LOONGSON2_USB_CLK] = loongson2_clk_register(dev, "usb",
- "gmac",
- &loongson2_usb_clk_ops, 0);
-
- hws[LOONGSON2_SATA_CLK] = loongson2_clk_register(dev, "sata",
- "gmac",
- &loongson2_sata_clk_ops, 0);
-
- hws[LOONGSON2_PIX0_CLK] = clk_hw_register_divider(NULL, "pix0",
- "pix0_pll", 0,
- loongson2_pll_base + 0x38, 0, 6,
- CLK_DIVIDER_ONE_BASED,
- &loongson2_clk_lock);
-
- hws[LOONGSON2_PIX1_CLK] = clk_hw_register_divider(NULL, "pix1",
- "pix1_pll", 0,
- loongson2_pll_base + 0x48, 0, 6,
- CLK_DIVIDER_ONE_BASED,
- &loongson2_clk_lock);
-
- ret = loongson2_check_clk_hws(hws, LOONGSON2_CLK_END);
- if (ret)
- return ret;
+ clp->clk_data.hws[p->id] = hw;
+ }
- return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_hw_data);
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, &clp->clk_data);
}
static const struct of_device_id loongson2_clk_match_table[] = {
- { .compatible = "loongson,ls2k-clk" },
+ { .compatible = "loongson,ls2k0500-clk", .data = &ls2k0500_clks },
+ { .compatible = "loongson,ls2k-clk", .data = &ls2k1000_clks },
+ { .compatible = "loongson,ls2k2000-clk", .data = &ls2k2000_clks },
{ }
};
MODULE_DEVICE_TABLE(of, loongson2_clk_match_table);
@@ -338,4 +369,5 @@ static struct platform_driver loongson2_clk_driver = {
module_platform_driver(loongson2_clk_driver);
MODULE_DESCRIPTION("Loongson2 clock driver");
+MODULE_AUTHOR("Loongson Technology Corporation Limited");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-renesas-pcie.c b/drivers/clk/clk-renesas-pcie.c
index 53e21ac302e6..4c3a5e4eb77a 100644
--- a/drivers/clk/clk-renesas-pcie.c
+++ b/drivers/clk/clk-renesas-pcie.c
@@ -25,10 +25,12 @@
#define RS9_REG_SS_AMP_0V7 0x1
#define RS9_REG_SS_AMP_0V8 0x2
#define RS9_REG_SS_AMP_0V9 0x3
+#define RS9_REG_SS_AMP_DEFAULT RS9_REG_SS_AMP_0V8
#define RS9_REG_SS_AMP_MASK 0x3
#define RS9_REG_SS_SSC_100 0
#define RS9_REG_SS_SSC_M025 (1 << 3)
#define RS9_REG_SS_SSC_M050 (3 << 3)
+#define RS9_REG_SS_SSC_DEFAULT RS9_REG_SS_SSC_100
#define RS9_REG_SS_SSC_MASK (3 << 3)
#define RS9_REG_SS_SSC_LOCK BIT(5)
#define RS9_REG_SR 0x2
@@ -205,8 +207,8 @@ static int rs9_get_common_config(struct rs9_driver_data *rs9)
int ret;
/* Set defaults */
- rs9->pll_amplitude = RS9_REG_SS_AMP_0V7;
- rs9->pll_ssc = RS9_REG_SS_SSC_100;
+ rs9->pll_amplitude = RS9_REG_SS_AMP_DEFAULT;
+ rs9->pll_ssc = RS9_REG_SS_SSC_DEFAULT;
/* Output clock amplitude */
ret = of_property_read_u32(np, "renesas,out-amplitude-microvolt",
@@ -247,13 +249,13 @@ static void rs9_update_config(struct rs9_driver_data *rs9)
int i;
/* If amplitude is non-default, update it. */
- if (rs9->pll_amplitude != RS9_REG_SS_AMP_0V7) {
+ if (rs9->pll_amplitude != RS9_REG_SS_AMP_DEFAULT) {
regmap_update_bits(rs9->regmap, RS9_REG_SS, RS9_REG_SS_AMP_MASK,
rs9->pll_amplitude);
}
/* If SSC is non-default, update it. */
- if (rs9->pll_ssc != RS9_REG_SS_SSC_100) {
+ if (rs9->pll_ssc != RS9_REG_SS_SSC_DEFAULT) {
regmap_update_bits(rs9->regmap, RS9_REG_SS, RS9_REG_SS_SSC_MASK,
rs9->pll_ssc);
}
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index 8cbe24789c24..d86a02563f6c 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -2,9 +2,10 @@
/*
* System Control and Power Interface (SCMI) Protocol based clock driver
*
- * Copyright (C) 2018-2022 ARM Ltd.
+ * Copyright (C) 2018-2024 ARM Ltd.
*/
+#include <linux/bits.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -16,6 +17,17 @@
#define NOT_ATOMIC false
#define ATOMIC true
+enum scmi_clk_feats {
+ SCMI_CLK_ATOMIC_SUPPORTED,
+ SCMI_CLK_STATE_CTRL_SUPPORTED,
+ SCMI_CLK_RATE_CTRL_SUPPORTED,
+ SCMI_CLK_PARENT_CTRL_SUPPORTED,
+ SCMI_CLK_DUTY_CYCLE_SUPPORTED,
+ SCMI_CLK_FEATS_COUNT
+};
+
+#define SCMI_MAX_CLK_OPS BIT(SCMI_CLK_FEATS_COUNT)
+
static const struct scmi_clk_proto_ops *scmi_proto_clk_ops;
struct scmi_clk {
@@ -158,41 +170,44 @@ static int scmi_clk_atomic_is_enabled(struct clk_hw *hw)
return !!enabled;
}
-/*
- * We can provide enable/disable/is_enabled atomic callbacks only if the
- * underlying SCMI transport for an SCMI instance is configured to handle
- * SCMI commands in an atomic manner.
- *
- * When no SCMI atomic transport support is available we instead provide only
- * the prepare/unprepare API, as allowed by the clock framework when atomic
- * calls are not available.
- *
- * Two distinct sets of clk_ops are provided since we could have multiple SCMI
- * instances with different underlying transport quality, so they cannot be
- * shared.
- */
-static const struct clk_ops scmi_clk_ops = {
- .recalc_rate = scmi_clk_recalc_rate,
- .round_rate = scmi_clk_round_rate,
- .set_rate = scmi_clk_set_rate,
- .prepare = scmi_clk_enable,
- .unprepare = scmi_clk_disable,
- .set_parent = scmi_clk_set_parent,
- .get_parent = scmi_clk_get_parent,
- .determine_rate = scmi_clk_determine_rate,
-};
+static int scmi_clk_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
+{
+ int ret;
+ u32 val;
+ struct scmi_clk *clk = to_scmi_clk(hw);
-static const struct clk_ops scmi_atomic_clk_ops = {
- .recalc_rate = scmi_clk_recalc_rate,
- .round_rate = scmi_clk_round_rate,
- .set_rate = scmi_clk_set_rate,
- .enable = scmi_clk_atomic_enable,
- .disable = scmi_clk_atomic_disable,
- .is_enabled = scmi_clk_atomic_is_enabled,
- .set_parent = scmi_clk_set_parent,
- .get_parent = scmi_clk_get_parent,
- .determine_rate = scmi_clk_determine_rate,
-};
+ ret = scmi_proto_clk_ops->config_oem_get(clk->ph, clk->id,
+ SCMI_CLOCK_CFG_DUTY_CYCLE,
+ &val, NULL, false);
+ if (!ret) {
+ duty->num = val;
+ duty->den = 100;
+ } else {
+ dev_warn(clk->dev,
+ "Failed to get duty cycle for clock ID %d\n", clk->id);
+ }
+
+ return ret;
+}
+
+static int scmi_clk_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
+{
+ int ret;
+ u32 val;
+ struct scmi_clk *clk = to_scmi_clk(hw);
+
+ /* SCMI OEM Duty Cycle is expressed as a percentage */
+ val = (duty->num * 100) / duty->den;
+ ret = scmi_proto_clk_ops->config_oem_set(clk->ph, clk->id,
+ SCMI_CLOCK_CFG_DUTY_CYCLE,
+ val, false);
+ if (ret)
+ dev_warn(clk->dev,
+ "Failed to set duty cycle(%u/%u) for clock ID %d\n",
+ duty->num, duty->den, clk->id);
+
+ return ret;
+}
static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk,
const struct clk_ops *scmi_ops)
@@ -230,17 +245,153 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk,
return ret;
}
+/**
+ * scmi_clk_ops_alloc() - Alloc and configure clock operations
+ * @dev: A device reference for devres
+ * @feats_key: A bitmap representing the desired clk_ops capabilities
+ *
+ * Allocate and configure a proper set of clock operations depending on the
+ * specifically required SCMI clock features.
+ *
+ * Return: A pointer to the allocated and configured clk_ops on success,
+ * or NULL on allocation failure.
+ */
+static const struct clk_ops *
+scmi_clk_ops_alloc(struct device *dev, unsigned long feats_key)
+{
+ struct clk_ops *ops;
+
+ ops = devm_kzalloc(dev, sizeof(*ops), GFP_KERNEL);
+ if (!ops)
+ return NULL;
+ /*
+ * We can provide enable/disable/is_enabled atomic callbacks only if the
+ * underlying SCMI transport for an SCMI instance is configured to
+ * handle SCMI commands in an atomic manner.
+ *
+ * When no SCMI atomic transport support is available we instead provide
+ * only the prepare/unprepare API, as allowed by the clock framework
+ * when atomic calls are not available.
+ */
+ if (feats_key & BIT(SCMI_CLK_STATE_CTRL_SUPPORTED)) {
+ if (feats_key & BIT(SCMI_CLK_ATOMIC_SUPPORTED)) {
+ ops->enable = scmi_clk_atomic_enable;
+ ops->disable = scmi_clk_atomic_disable;
+ } else {
+ ops->prepare = scmi_clk_enable;
+ ops->unprepare = scmi_clk_disable;
+ }
+ }
+
+ if (feats_key & BIT(SCMI_CLK_ATOMIC_SUPPORTED))
+ ops->is_enabled = scmi_clk_atomic_is_enabled;
+
+ /* Rate ops */
+ ops->recalc_rate = scmi_clk_recalc_rate;
+ ops->round_rate = scmi_clk_round_rate;
+ ops->determine_rate = scmi_clk_determine_rate;
+ if (feats_key & BIT(SCMI_CLK_RATE_CTRL_SUPPORTED))
+ ops->set_rate = scmi_clk_set_rate;
+
+ /* Parent ops */
+ ops->get_parent = scmi_clk_get_parent;
+ if (feats_key & BIT(SCMI_CLK_PARENT_CTRL_SUPPORTED))
+ ops->set_parent = scmi_clk_set_parent;
+
+ /* Duty cycle */
+ if (feats_key & BIT(SCMI_CLK_DUTY_CYCLE_SUPPORTED)) {
+ ops->get_duty_cycle = scmi_clk_get_duty_cycle;
+ ops->set_duty_cycle = scmi_clk_set_duty_cycle;
+ }
+
+ return ops;
+}
+
+/**
+ * scmi_clk_ops_select() - Select a proper set of clock operations
+ * @sclk: A reference to an SCMI clock descriptor
+ * @atomic_capable: A flag to indicate if atomic mode is supported by the
+ * transport
+ * @atomic_threshold_us: Platform atomic threshold value in microseconds:
+ * clk_ops are atomic when clock enable latency is less
+ * than this threshold
+ * @clk_ops_db: A reference to the array used as a database to store all the
+ * created clock operations combinations.
+ * @db_size: Maximum number of entries held by @clk_ops_db
+ *
+ * After having built a bitmap descriptor to represent the set of features
+ * needed by this SCMI clock, at first use it to lookup into the set of
+ * previously allocated clk_ops to check if a suitable combination of clock
+ * operations was already created; when no match is found allocate a brand new
+ * set of clk_ops satisfying the required combination of features and save it
+ * for future references.
+ *
+ * In this way only one set of clk_ops is ever created for each different
+ * combination that is effectively needed by a driver instance.
+ *
+ * Return: A pointer to the allocated and configured clk_ops on success, or
+ * NULL otherwise.
+ */
+static const struct clk_ops *
+scmi_clk_ops_select(struct scmi_clk *sclk, bool atomic_capable,
+ unsigned int atomic_threshold_us,
+ const struct clk_ops **clk_ops_db, size_t db_size)
+{
+ const struct scmi_clock_info *ci = sclk->info;
+ unsigned int feats_key = 0;
+ const struct clk_ops *ops;
+
+ /*
+ * Note that when transport is atomic but SCMI protocol did not
+ * specify (or support) an enable_latency associated with a
+ * clock, we default to use atomic operations mode.
+ */
+ if (atomic_capable && ci->enable_latency <= atomic_threshold_us)
+ feats_key |= BIT(SCMI_CLK_ATOMIC_SUPPORTED);
+
+ if (!ci->state_ctrl_forbidden)
+ feats_key |= BIT(SCMI_CLK_STATE_CTRL_SUPPORTED);
+
+ if (!ci->rate_ctrl_forbidden)
+ feats_key |= BIT(SCMI_CLK_RATE_CTRL_SUPPORTED);
+
+ if (!ci->parent_ctrl_forbidden)
+ feats_key |= BIT(SCMI_CLK_PARENT_CTRL_SUPPORTED);
+
+ if (ci->extended_config)
+ feats_key |= BIT(SCMI_CLK_DUTY_CYCLE_SUPPORTED);
+
+ if (WARN_ON(feats_key >= db_size))
+ return NULL;
+
+ /* Lookup previously allocated ops */
+ ops = clk_ops_db[feats_key];
+ if (ops)
+ return ops;
+
+ /* Did not find a pre-allocated clock_ops */
+ ops = scmi_clk_ops_alloc(sclk->dev, feats_key);
+ if (!ops)
+ return NULL;
+
+ /* Store new ops combinations */
+ clk_ops_db[feats_key] = ops;
+
+ return ops;
+}
+
static int scmi_clocks_probe(struct scmi_device *sdev)
{
int idx, count, err;
- unsigned int atomic_threshold;
- bool is_atomic;
+ unsigned int atomic_threshold_us;
+ bool transport_is_atomic;
struct clk_hw **hws;
struct clk_hw_onecell_data *clk_data;
struct device *dev = &sdev->dev;
struct device_node *np = dev->of_node;
const struct scmi_handle *handle = sdev->handle;
struct scmi_protocol_handle *ph;
+ const struct clk_ops *scmi_clk_ops_db[SCMI_MAX_CLK_OPS] = {};
if (!handle)
return -ENODEV;
@@ -264,7 +415,8 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
clk_data->num = count;
hws = clk_data->hws;
- is_atomic = handle->is_transport_atomic(handle, &atomic_threshold);
+ transport_is_atomic = handle->is_transport_atomic(handle,
+ &atomic_threshold_us);
for (idx = 0; idx < count; idx++) {
struct scmi_clk *sclk;
@@ -286,15 +438,17 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
sclk->dev = dev;
/*
- * Note that when transport is atomic but SCMI protocol did not
- * specify (or support) an enable_latency associated with a
- * clock, we default to use atomic operations mode.
+ * Note that the scmi_clk_ops_db is on the stack, not global,
+ * because it cannot be shared between mulitple probe-sequences
+ * to avoid sharing the devm_ allocated clk_ops between multiple
+ * SCMI clk driver instances.
*/
- if (is_atomic &&
- sclk->info->enable_latency <= atomic_threshold)
- scmi_ops = &scmi_atomic_clk_ops;
- else
- scmi_ops = &scmi_clk_ops;
+ scmi_ops = scmi_clk_ops_select(sclk, transport_is_atomic,
+ atomic_threshold_us,
+ scmi_clk_ops_db,
+ ARRAY_SIZE(scmi_clk_ops_db));
+ if (!scmi_ops)
+ return -ENOMEM;
/* Initialize clock parent data. */
if (sclk->info->num_parents > 0) {
@@ -318,8 +472,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
} else {
dev_dbg(dev, "Registered clock:%s%s\n",
sclk->info->name,
- scmi_ops == &scmi_atomic_clk_ops ?
- " (atomic ops)" : "");
+ scmi_ops->enable ? " (atomic ops)" : "");
hws[idx] = &sclk->hw;
}
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 25371c91a58f..8cca52be993f 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -37,6 +37,10 @@ static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
+/* List of registered clks that use runtime PM */
+static HLIST_HEAD(clk_rpm_list);
+static DEFINE_MUTEX(clk_rpm_list_lock);
+
static const struct hlist_head *all_lists[] = {
&clk_root_list,
&clk_orphan_list,
@@ -59,6 +63,7 @@ struct clk_core {
struct clk_hw *hw;
struct module *owner;
struct device *dev;
+ struct hlist_node rpm_node;
struct device_node *of_node;
struct clk_core *parent;
struct clk_parent_map *parents;
@@ -122,6 +127,89 @@ static void clk_pm_runtime_put(struct clk_core *core)
pm_runtime_put_sync(core->dev);
}
+/**
+ * clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
+ *
+ * Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so
+ * that disabling unused clks avoids a deadlock where a device is runtime PM
+ * resuming/suspending and the runtime PM callback is trying to grab the
+ * prepare_lock for something like clk_prepare_enable() while
+ * clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime
+ * PM resume/suspend the device as well.
+ *
+ * Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on
+ * success. Otherwise the lock is released on failure.
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int clk_pm_runtime_get_all(void)
+{
+ int ret;
+ struct clk_core *core, *failed;
+
+ /*
+ * Grab the list lock to prevent any new clks from being registered
+ * or unregistered until clk_pm_runtime_put_all().
+ */
+ mutex_lock(&clk_rpm_list_lock);
+
+ /*
+ * Runtime PM "get" all the devices that are needed for the clks
+ * currently registered. Do this without holding the prepare_lock, to
+ * avoid the deadlock.
+ */
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
+ ret = clk_pm_runtime_get(core);
+ if (ret) {
+ failed = core;
+ pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n",
+ dev_name(failed->dev), failed->name);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
+ if (core == failed)
+ break;
+
+ clk_pm_runtime_put(core);
+ }
+ mutex_unlock(&clk_rpm_list_lock);
+
+ return ret;
+}
+
+/**
+ * clk_pm_runtime_put_all() - Runtime "put" all clk provider devices
+ *
+ * Put the runtime PM references taken in clk_pm_runtime_get_all() and release
+ * the 'clk_rpm_list_lock'.
+ */
+static void clk_pm_runtime_put_all(void)
+{
+ struct clk_core *core;
+
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node)
+ clk_pm_runtime_put(core);
+ mutex_unlock(&clk_rpm_list_lock);
+}
+
+static void clk_pm_runtime_init(struct clk_core *core)
+{
+ struct device *dev = core->dev;
+
+ if (dev && pm_runtime_enabled(dev)) {
+ core->rpm_enabled = true;
+
+ mutex_lock(&clk_rpm_list_lock);
+ hlist_add_head(&core->rpm_node, &clk_rpm_list);
+ mutex_unlock(&clk_rpm_list_lock);
+ }
+}
+
/*** locking ***/
static void clk_prepare_lock(void)
{
@@ -1381,9 +1469,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
if (core->flags & CLK_IGNORE_UNUSED)
return;
- if (clk_pm_runtime_get(core))
- return;
-
if (clk_core_is_prepared(core)) {
trace_clk_unprepare(core);
if (core->ops->unprepare_unused)
@@ -1392,8 +1477,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
core->ops->unprepare(core->hw);
trace_clk_unprepare_complete(core);
}
-
- clk_pm_runtime_put(core);
}
static void __init clk_disable_unused_subtree(struct clk_core *core)
@@ -1409,9 +1492,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_prepare_enable(core->parent);
- if (clk_pm_runtime_get(core))
- goto unprepare_out;
-
flags = clk_enable_lock();
if (core->enable_count)
@@ -1436,8 +1516,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
unlock_out:
clk_enable_unlock(flags);
- clk_pm_runtime_put(core);
-unprepare_out:
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_disable_unprepare(core->parent);
}
@@ -1453,6 +1531,7 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
static int __init clk_disable_unused(void)
{
struct clk_core *core;
+ int ret;
if (clk_ignore_unused) {
pr_warn("clk: Not disabling unused clocks\n");
@@ -1461,6 +1540,13 @@ static int __init clk_disable_unused(void)
pr_info("clk: Disabling unused clocks\n");
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
+ /*
+ * Grab the prepare lock to keep the clk topology stable while iterating
+ * over clks.
+ */
clk_prepare_lock();
hlist_for_each_entry(core, &clk_root_list, child_node)
@@ -1477,6 +1563,8 @@ static int __init clk_disable_unused(void)
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
+
return 0;
}
late_initcall_sync(clk_disable_unused);
@@ -3252,9 +3340,7 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
{
struct clk_core *child;
- clk_pm_runtime_get(c);
clk_summary_show_one(s, c, level);
- clk_pm_runtime_put(c);
hlist_for_each_entry(child, &c->children, child_node)
clk_summary_show_subtree(s, child, level + 1);
@@ -3264,11 +3350,15 @@ static int clk_summary_show(struct seq_file *s, void *data)
{
struct clk_core *c;
struct hlist_head **lists = s->private;
+ int ret;
seq_puts(s, " enable prepare protect duty hardware connection\n");
seq_puts(s, " clock count count count rate accuracy phase cycle enable consumer id\n");
seq_puts(s, "---------------------------------------------------------------------------------------------------------------------------------------------\n");
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
clk_prepare_lock();
@@ -3277,6 +3367,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
clk_summary_show_subtree(s, c, 0);
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
return 0;
}
@@ -3324,8 +3415,14 @@ static int clk_dump_show(struct seq_file *s, void *data)
struct clk_core *c;
bool first_node = true;
struct hlist_head **lists = s->private;
+ int ret;
+
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
seq_putc(s, '{');
+
clk_prepare_lock();
for (; *lists; lists++) {
@@ -3338,6 +3435,7 @@ static int clk_dump_show(struct seq_file *s, void *data)
}
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
seq_puts(s, "}\n");
return 0;
@@ -3981,8 +4079,6 @@ static int __clk_core_init(struct clk_core *core)
}
clk_core_reparent_orphans_nolock();
-
- kref_init(&core->ref);
out:
clk_pm_runtime_put(core);
unlock:
@@ -4211,6 +4307,22 @@ static void clk_core_free_parent_map(struct clk_core *core)
kfree(core->parents);
}
+/* Free memory allocated for a struct clk_core */
+static void __clk_release(struct kref *ref)
+{
+ struct clk_core *core = container_of(ref, struct clk_core, ref);
+
+ if (core->rpm_enabled) {
+ mutex_lock(&clk_rpm_list_lock);
+ hlist_del(&core->rpm_node);
+ mutex_unlock(&clk_rpm_list_lock);
+ }
+
+ clk_core_free_parent_map(core);
+ kfree_const(core->name);
+ kfree(core);
+}
+
static struct clk *
__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
{
@@ -4231,6 +4343,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
goto fail_out;
}
+ kref_init(&core->ref);
+
core->name = kstrdup_const(init->name, GFP_KERNEL);
if (!core->name) {
ret = -ENOMEM;
@@ -4243,9 +4357,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
}
core->ops = init->ops;
- if (dev && pm_runtime_enabled(dev))
- core->rpm_enabled = true;
core->dev = dev;
+ clk_pm_runtime_init(core);
core->of_node = np;
if (dev && dev->driver)
core->owner = dev->driver->owner;
@@ -4285,12 +4398,10 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
hw->clk = NULL;
fail_create_clk:
- clk_core_free_parent_map(core);
fail_parents:
fail_ops:
- kfree_const(core->name);
fail_name:
- kfree(core);
+ kref_put(&core->ref, __clk_release);
fail_out:
return ERR_PTR(ret);
}
@@ -4370,18 +4481,6 @@ int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(of_clk_hw_register);
-/* Free memory allocated for a clock. */
-static void __clk_release(struct kref *ref)
-{
- struct clk_core *core = container_of(ref, struct clk_core, ref);
-
- lockdep_assert_held(&prepare_lock);
-
- clk_core_free_parent_map(core);
- kfree_const(core->name);
- kfree(core);
-}
-
/*
* Empty clk_ops for unregistered clocks. These are used temporarily
* after clk_unregister() was called on a clock and until last clock
@@ -4472,7 +4571,8 @@ void clk_unregister(struct clk *clk)
if (ops == &clk_nodrv_ops) {
pr_err("%s: unregistered clock: %s\n", __func__,
clk->core->name);
- goto unlock;
+ clk_prepare_unlock();
+ return;
}
/*
* Assign empty clock ops for consumers that might still hold
@@ -4506,11 +4606,10 @@ void clk_unregister(struct clk *clk)
if (clk->core->protect_count)
pr_warn("%s: unregistering protected clock: %s\n",
__func__, clk->core->name);
+ clk_prepare_unlock();
kref_put(&clk->core->ref, __clk_release);
free_clk(clk);
-unlock:
- clk_prepare_unlock();
}
EXPORT_SYMBOL_GPL(clk_unregister);
@@ -4669,13 +4768,11 @@ void __clk_put(struct clk *clk)
if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX)
clk_set_rate_range_nolock(clk, 0, ULONG_MAX);
- owner = clk->core->owner;
- kref_put(&clk->core->ref, __clk_release);
-
clk_prepare_unlock();
+ owner = clk->core->owner;
+ kref_put(&clk->core->ref, __clk_release);
module_put(owner);
-
free_clk(clk);
}
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 9cd80522ca2d..6a77d7e201a9 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -158,23 +158,54 @@ vclkdev_alloc(struct clk_hw *hw, const char *con_id, const char *dev_fmt,
va_list ap)
{
struct clk_lookup_alloc *cla;
+ struct va_format vaf;
+ const char *failure;
+ va_list ap_copy;
+ size_t max_size;
+ ssize_t res;
cla = kzalloc(sizeof(*cla), GFP_KERNEL);
if (!cla)
return NULL;
+ va_copy(ap_copy, ap);
+
cla->cl.clk_hw = hw;
if (con_id) {
- strscpy(cla->con_id, con_id, sizeof(cla->con_id));
+ res = strscpy(cla->con_id, con_id, sizeof(cla->con_id));
+ if (res < 0) {
+ max_size = sizeof(cla->con_id);
+ failure = "connection";
+ goto fail;
+ }
cla->cl.con_id = cla->con_id;
}
if (dev_fmt) {
- vscnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap);
+ res = vsnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap);
+ if (res >= sizeof(cla->dev_id)) {
+ max_size = sizeof(cla->dev_id);
+ failure = "device";
+ goto fail;
+ }
cla->cl.dev_id = cla->dev_id;
}
+ va_end(ap_copy);
+
return &cla->cl;
+
+fail:
+ if (dev_fmt)
+ vaf.fmt = dev_fmt;
+ else
+ vaf.fmt = "null-device";
+ vaf.va = &ap_copy;
+ pr_err("%pV:%s: %s ID is greater than %zu\n",
+ &vaf, con_id, failure, max_size);
+ va_end(ap_copy);
+ kfree(cla);
+ return NULL;
}
static struct clk_lookup *
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
index db3bca5f4ec9..6da0fba68225 100644
--- a/drivers/clk/imx/Kconfig
+++ b/drivers/clk/imx/Kconfig
@@ -114,6 +114,13 @@ config CLK_IMX93
help
Build the driver for i.MX93 CCM Clock Driver
+config CLK_IMX95_BLK_CTL
+ tristate "IMX95 Clock Driver for BLK CTL"
+ depends on ARCH_MXC || COMPILE_TEST
+ select MXC_CLK
+ help
+ Build the clock driver for i.MX95 BLK CTL
+
config CLK_IMXRT1050
tristate "IMXRT1050 CCM Clock Driver"
depends on SOC_IMXRT || COMPILE_TEST
diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile
index d4b8e10b1970..03f2b2a1ab63 100644
--- a/drivers/clk/imx/Makefile
+++ b/drivers/clk/imx/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_CLK_IMX8MP) += clk-imx8mp.o clk-imx8mp-audiomix.o
obj-$(CONFIG_CLK_IMX8MQ) += clk-imx8mq.o
obj-$(CONFIG_CLK_IMX93) += clk-imx93.o
+obj-$(CONFIG_CLK_IMX95_BLK_CTL) += clk-imx95-blk-ctl.o
obj-$(CONFIG_MXC_CLK_SCU) += clk-imx-scu.o clk-imx-lpcg-scu.o clk-imx-acm.o
clk-imx-scu-$(CONFIG_CLK_IMX8QXP) += clk-scu.o clk-imx8qxp.o \
diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c
index 55ed211a5e0b..b381d6f784c8 100644
--- a/drivers/clk/imx/clk-imx8mp-audiomix.c
+++ b/drivers/clk/imx/clk-imx8mp-audiomix.c
@@ -7,10 +7,12 @@
#include <linux/clk-provider.h>
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <dt-bindings/clock/imx8mp-clock.h>
@@ -18,6 +20,7 @@
#define CLKEN0 0x000
#define CLKEN1 0x004
+#define EARC 0x200
#define SAI1_MCLK_SEL 0x300
#define SAI2_MCLK_SEL 0x304
#define SAI3_MCLK_SEL 0x308
@@ -26,6 +29,11 @@
#define SAI7_MCLK_SEL 0x314
#define PDM_SEL 0x318
#define SAI_PLL_GNRL_CTL 0x400
+#define SAI_PLL_FDIVL_CTL0 0x404
+#define SAI_PLL_FDIVL_CTL1 0x408
+#define SAI_PLL_SSCG_CTL 0x40C
+#define SAI_PLL_MNIT_CTL 0x410
+#define IPG_LP_CTRL 0x504
#define SAIn_MCLK1_PARENT(n) \
static const struct clk_parent_data \
@@ -182,26 +190,82 @@ static struct clk_imx8mp_audiomix_sel sels[] = {
CLK_SAIn(7)
};
+static const u16 audiomix_regs[] = {
+ CLKEN0,
+ CLKEN1,
+ EARC,
+ SAI1_MCLK_SEL,
+ SAI2_MCLK_SEL,
+ SAI3_MCLK_SEL,
+ SAI5_MCLK_SEL,
+ SAI6_MCLK_SEL,
+ SAI7_MCLK_SEL,
+ PDM_SEL,
+ SAI_PLL_GNRL_CTL,
+ SAI_PLL_FDIVL_CTL0,
+ SAI_PLL_FDIVL_CTL1,
+ SAI_PLL_SSCG_CTL,
+ SAI_PLL_MNIT_CTL,
+ IPG_LP_CTRL,
+};
+
+struct clk_imx8mp_audiomix_priv {
+ void __iomem *base;
+ u32 regs_save[ARRAY_SIZE(audiomix_regs)];
+
+ /* Must be last */
+ struct clk_hw_onecell_data clk_data;
+};
+
+static void clk_imx8mp_audiomix_save_restore(struct device *dev, bool save)
+{
+ struct clk_imx8mp_audiomix_priv *priv = dev_get_drvdata(dev);
+ void __iomem *base = priv->base;
+ int i;
+
+ if (save) {
+ for (i = 0; i < ARRAY_SIZE(audiomix_regs); i++)
+ priv->regs_save[i] = readl(base + audiomix_regs[i]);
+ } else {
+ for (i = 0; i < ARRAY_SIZE(audiomix_regs); i++)
+ writel(priv->regs_save[i], base + audiomix_regs[i]);
+ }
+}
+
static int clk_imx8mp_audiomix_probe(struct platform_device *pdev)
{
- struct clk_hw_onecell_data *priv;
+ struct clk_imx8mp_audiomix_priv *priv;
+ struct clk_hw_onecell_data *clk_hw_data;
struct device *dev = &pdev->dev;
void __iomem *base;
struct clk_hw *hw;
- int i;
+ int i, ret;
priv = devm_kzalloc(dev,
- struct_size(priv, hws, IMX8MP_CLK_AUDIOMIX_END),
+ struct_size(priv, clk_data.hws, IMX8MP_CLK_AUDIOMIX_END),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->num = IMX8MP_CLK_AUDIOMIX_END;
+ clk_hw_data = &priv->clk_data;
+ clk_hw_data->num = IMX8MP_CLK_AUDIOMIX_END;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
+ priv->base = base;
+ dev_set_drvdata(dev, priv);
+
+ /*
+ * pm_runtime_enable needs to be called before clk register.
+ * That is to make core->rpm_enabled to be true for clock
+ * usage.
+ */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
for (i = 0; i < ARRAY_SIZE(sels); i++) {
if (sels[i].num_parents == 1) {
hw = devm_clk_hw_register_gate_parent_data(dev,
@@ -216,10 +280,12 @@ static int clk_imx8mp_audiomix_probe(struct platform_device *pdev)
0, NULL, NULL);
}
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto err_clk_register;
+ }
- priv->hws[sels[i].clkid] = hw;
+ clk_hw_data->hws[sels[i].clkid] = hw;
}
/* SAI PLL */
@@ -228,39 +294,84 @@ static int clk_imx8mp_audiomix_probe(struct platform_device *pdev)
ARRAY_SIZE(clk_imx8mp_audiomix_pll_parents),
CLK_SET_RATE_NO_REPARENT, base + SAI_PLL_GNRL_CTL,
0, 2, 0, NULL, NULL);
- priv->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_REF_SEL] = hw;
+ clk_hw_data->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_REF_SEL] = hw;
hw = imx_dev_clk_hw_pll14xx(dev, "sai_pll", "sai_pll_ref_sel",
base + 0x400, &imx_1443x_pll);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
- priv->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL] = hw;
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto err_clk_register;
+ }
+ clk_hw_data->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL] = hw;
hw = devm_clk_hw_register_mux_parent_data_table(dev,
"sai_pll_bypass", clk_imx8mp_audiomix_pll_bypass_sels,
ARRAY_SIZE(clk_imx8mp_audiomix_pll_bypass_sels),
CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
base + SAI_PLL_GNRL_CTL, 16, 1, 0, NULL, NULL);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
- priv->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_BYPASS] = hw;
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto err_clk_register;
+ }
+
+ clk_hw_data->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_BYPASS] = hw;
hw = devm_clk_hw_register_gate(dev, "sai_pll_out", "sai_pll_bypass",
0, base + SAI_PLL_GNRL_CTL, 13,
0, NULL);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
- priv->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_OUT] = hw;
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto err_clk_register;
+ }
+ clk_hw_data->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_OUT] = hw;
hw = devm_clk_hw_register_fixed_factor(dev, "sai_pll_out_div2",
"sai_pll_out", 0, 1, 2);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto err_clk_register;
+ }
+
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get,
+ clk_hw_data);
+ if (ret)
+ goto err_clk_register;
+
+ pm_runtime_put_sync(dev);
+ return 0;
+
+err_clk_register:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static void clk_imx8mp_audiomix_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+}
+
+static int clk_imx8mp_audiomix_runtime_suspend(struct device *dev)
+{
+ clk_imx8mp_audiomix_save_restore(dev, true);
- return devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get,
- priv);
+ return 0;
}
+static int clk_imx8mp_audiomix_runtime_resume(struct device *dev)
+{
+ clk_imx8mp_audiomix_save_restore(dev, false);
+
+ return 0;
+}
+
+static const struct dev_pm_ops clk_imx8mp_audiomix_pm_ops = {
+ RUNTIME_PM_OPS(clk_imx8mp_audiomix_runtime_suspend,
+ clk_imx8mp_audiomix_runtime_resume, NULL)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
static const struct of_device_id clk_imx8mp_audiomix_of_match[] = {
{ .compatible = "fsl,imx8mp-audio-blk-ctrl" },
{ /* sentinel */ }
@@ -269,9 +380,11 @@ MODULE_DEVICE_TABLE(of, clk_imx8mp_audiomix_of_match);
static struct platform_driver clk_imx8mp_audiomix_driver = {
.probe = clk_imx8mp_audiomix_probe,
+ .remove_new = clk_imx8mp_audiomix_remove,
.driver = {
.name = "imx8mp-audio-blk-ctrl",
.of_match_table = clk_imx8mp_audiomix_of_match,
+ .pm = pm_ptr(&clk_imx8mp_audiomix_pm_ops),
},
};
diff --git a/drivers/clk/imx/clk-imx95-blk-ctl.c b/drivers/clk/imx/clk-imx95-blk-ctl.c
new file mode 100644
index 000000000000..74f595f9e5e3
--- /dev/null
+++ b/drivers/clk/imx/clk-imx95-blk-ctl.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <dt-bindings/clock/nxp,imx95-clock.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/pm_runtime.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+enum {
+ CLK_GATE,
+ CLK_DIVIDER,
+ CLK_MUX,
+};
+
+struct imx95_blk_ctl {
+ struct device *dev;
+ spinlock_t lock;
+ struct clk *clk_apb;
+
+ void __iomem *base;
+ /* clock gate register */
+ u32 clk_reg_restore;
+};
+
+struct imx95_blk_ctl_clk_dev_data {
+ const char *name;
+ const char * const *parent_names;
+ u32 num_parents;
+ u32 reg;
+ u32 bit_idx;
+ u32 bit_width;
+ u32 clk_type;
+ u32 flags;
+ u32 flags2;
+ u32 type;
+};
+
+struct imx95_blk_ctl_dev_data {
+ const struct imx95_blk_ctl_clk_dev_data *clk_dev_data;
+ u32 num_clks;
+ bool rpm_enabled;
+ u32 clk_reg_offset;
+};
+
+static const struct imx95_blk_ctl_clk_dev_data vpublk_clk_dev_data[] = {
+ [IMX95_CLK_VPUBLK_WAVE] = {
+ .name = "vpublk_wave_vpu",
+ .parent_names = (const char *[]){ "vpu", },
+ .num_parents = 1,
+ .reg = 8,
+ .bit_idx = 0,
+ .type = CLK_GATE,
+ .flags = CLK_SET_RATE_PARENT,
+ .flags2 = CLK_GATE_SET_TO_DISABLE,
+ },
+ [IMX95_CLK_VPUBLK_JPEG_ENC] = {
+ .name = "vpublk_jpeg_enc",
+ .parent_names = (const char *[]){ "vpujpeg", },
+ .num_parents = 1,
+ .reg = 8,
+ .bit_idx = 1,
+ .type = CLK_GATE,
+ .flags = CLK_SET_RATE_PARENT,
+ .flags2 = CLK_GATE_SET_TO_DISABLE,
+ },
+ [IMX95_CLK_VPUBLK_JPEG_DEC] = {
+ .name = "vpublk_jpeg_dec",
+ .parent_names = (const char *[]){ "vpujpeg", },
+ .num_parents = 1,
+ .reg = 8,
+ .bit_idx = 2,
+ .type = CLK_GATE,
+ .flags = CLK_SET_RATE_PARENT,
+ .flags2 = CLK_GATE_SET_TO_DISABLE,
+ }
+};
+
+static const struct imx95_blk_ctl_dev_data vpublk_dev_data = {
+ .num_clks = ARRAY_SIZE(vpublk_clk_dev_data),
+ .clk_dev_data = vpublk_clk_dev_data,
+ .rpm_enabled = true,
+ .clk_reg_offset = 8,
+};
+
+static const struct imx95_blk_ctl_clk_dev_data camblk_clk_dev_data[] = {
+ [IMX95_CLK_CAMBLK_CSI2_FOR0] = {
+ .name = "camblk_csi2_for0",
+ .parent_names = (const char *[]){ "camisi", },
+ .num_parents = 1,
+ .reg = 0,
+ .bit_idx = 0,
+ .type = CLK_GATE,
+ .flags = CLK_SET_RATE_PARENT,
+ .flags2 = CLK_GATE_SET_TO_DISABLE,
+ },
+ [IMX95_CLK_CAMBLK_CSI2_FOR1] = {
+ .name = "camblk_csi2_for1",
+ .parent_names = (const char *[]){ "camisi", },
+ .num_parents = 1,
+ .reg = 0,
+ .bit_idx = 1,
+ .type = CLK_GATE,
+ .flags = CLK_SET_RATE_PARENT,
+ .flags2 = CLK_GATE_SET_TO_DISABLE,
+ },
+ [IMX95_CLK_CAMBLK_ISP_AXI] = {
+ .name = "camblk_isp_axi",
+ .parent_names = (const char *[]){ "camaxi", },
+ .num_parents = 1,
+ .reg = 0,
+ .bit_idx = 4,
+ .type = CLK_GATE,
+ .flags = CLK_SET_RATE_PARENT,
+ .flags2 = CLK_GATE_SET_TO_DISABLE,
+ },
+ [IMX95_CLK_CAMBLK_ISP_PIXEL] = {
+ .name = "camblk_isp_pixel",
+ .parent_names = (const char *[]){ "camisi", },
+ .num_parents = 1,
+ .reg = 0,
+ .bit_idx = 5,
+ .type = CLK_GATE,
+ .flags = CLK_SET_RATE_PARENT,
+ .flags2 = CLK_GATE_SET_TO_DISABLE,
+ },
+ [IMX95_CLK_CAMBLK_ISP] = {
+ .name = "camblk_isp",
+ .parent_names = (const char *[]){ "camisi", },
+ .num_parents = 1,
+ .reg = 0,
+ .bit_idx = 6,
+ .type = CLK_GATE,
+ .flags = CLK_SET_RATE_PARENT,
+ .flags2 = CLK_GATE_SET_TO_DISABLE,
+ }
+};
+
+static const struct imx95_blk_ctl_dev_data camblk_dev_data = {
+ .num_clks = ARRAY_SIZE(camblk_clk_dev_data),
+ .clk_dev_data = camblk_clk_dev_data,
+ .clk_reg_offset = 0,
+};
+
+static const struct imx95_blk_ctl_clk_dev_data lvds_clk_dev_data[] = {
+ [IMX95_CLK_DISPMIX_LVDS_PHY_DIV] = {
+ .name = "ldb_phy_div",
+ .parent_names = (const char *[]){ "ldbpll", },
+ .num_parents = 1,
+ .reg = 0,
+ .bit_idx = 0,
+ .bit_width = 1,
+ .type = CLK_DIVIDER,
+ .flags2 = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ [IMX95_CLK_DISPMIX_LVDS_CH0_GATE] = {
+ .name = "lvds_ch0_gate",
+ .parent_names = (const char *[]){ "ldb_phy_div", },
+ .num_parents = 1,
+ .reg = 0,
+ .bit_idx = 1,
+ .bit_width = 1,
+ .type = CLK_GATE,
+ .flags = CLK_SET_RATE_PARENT,
+ .flags2 = CLK_GATE_SET_TO_DISABLE,
+ },
+ [IMX95_CLK_DISPMIX_LVDS_CH1_GATE] = {
+ .name = "lvds_ch1_gate",
+ .parent_names = (const char *[]){ "ldb_phy_div", },
+ .num_parents = 1,
+ .reg = 0,
+ .bit_idx = 2,
+ .bit_width = 1,
+ .type = CLK_GATE,
+ .flags = CLK_SET_RATE_PARENT,
+ .flags2 = CLK_GATE_SET_TO_DISABLE,
+ },
+ [IMX95_CLK_DISPMIX_PIX_DI0_GATE] = {
+ .name = "lvds_di0_gate",
+ .parent_names = (const char *[]){ "ldb_pll_div7", },
+ .num_parents = 1,
+ .reg = 0,
+ .bit_idx = 3,
+ .bit_width = 1,
+ .type = CLK_GATE,
+ .flags = CLK_SET_RATE_PARENT,
+ .flags2 = CLK_GATE_SET_TO_DISABLE,
+ },
+ [IMX95_CLK_DISPMIX_PIX_DI1_GATE] = {
+ .name = "lvds_di1_gate",
+ .parent_names = (const char *[]){ "ldb_pll_div7", },
+ .num_parents = 1,
+ .reg = 0,
+ .bit_idx = 4,
+ .bit_width = 1,
+ .type = CLK_GATE,
+ .flags = CLK_SET_RATE_PARENT,
+ .flags2 = CLK_GATE_SET_TO_DISABLE,
+ },
+};
+
+static const struct imx95_blk_ctl_dev_data lvds_csr_dev_data = {
+ .num_clks = ARRAY_SIZE(lvds_clk_dev_data),
+ .clk_dev_data = lvds_clk_dev_data,
+ .clk_reg_offset = 0,
+};
+
+static const struct imx95_blk_ctl_clk_dev_data dispmix_csr_clk_dev_data[] = {
+ [IMX95_CLK_DISPMIX_ENG0_SEL] = {
+ .name = "disp_engine0_sel",
+ .parent_names = (const char *[]){"videopll1", "dsi_pll", "ldb_pll_div7", },
+ .num_parents = 4,
+ .reg = 0,
+ .bit_idx = 0,
+ .bit_width = 2,
+ .type = CLK_MUX,
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
+ },
+ [IMX95_CLK_DISPMIX_ENG1_SEL] = {
+ .name = "disp_engine1_sel",
+ .parent_names = (const char *[]){"videopll1", "dsi_pll", "ldb_pll_div7", },
+ .num_parents = 4,
+ .reg = 0,
+ .bit_idx = 2,
+ .bit_width = 2,
+ .type = CLK_MUX,
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
+ }
+};
+
+static const struct imx95_blk_ctl_dev_data dispmix_csr_dev_data = {
+ .num_clks = ARRAY_SIZE(dispmix_csr_clk_dev_data),
+ .clk_dev_data = dispmix_csr_clk_dev_data,
+ .clk_reg_offset = 0,
+};
+
+static int imx95_bc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct imx95_blk_ctl_dev_data *bc_data;
+ struct imx95_blk_ctl *bc;
+ struct clk_hw_onecell_data *clk_hw_data;
+ struct clk_hw **hws;
+ void __iomem *base;
+ int i, ret;
+
+ bc = devm_kzalloc(dev, sizeof(*bc), GFP_KERNEL);
+ if (!bc)
+ return -ENOMEM;
+ bc->dev = dev;
+ dev_set_drvdata(&pdev->dev, bc);
+
+ spin_lock_init(&bc->lock);
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ bc->base = base;
+ bc->clk_apb = devm_clk_get(dev, NULL);
+ if (IS_ERR(bc->clk_apb))
+ return dev_err_probe(dev, PTR_ERR(bc->clk_apb), "failed to get APB clock\n");
+
+ ret = clk_prepare_enable(bc->clk_apb);
+ if (ret) {
+ dev_err(dev, "failed to enable apb clock: %d\n", ret);
+ return ret;
+ }
+
+ bc_data = of_device_get_match_data(dev);
+ if (!bc_data)
+ return devm_of_platform_populate(dev);
+
+ clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, bc_data->num_clks),
+ GFP_KERNEL);
+ if (!clk_hw_data)
+ return -ENOMEM;
+
+ if (bc_data->rpm_enabled)
+ pm_runtime_enable(&pdev->dev);
+
+ clk_hw_data->num = bc_data->num_clks;
+ hws = clk_hw_data->hws;
+
+ for (i = 0; i < bc_data->num_clks; i++) {
+ const struct imx95_blk_ctl_clk_dev_data *data = &bc_data->clk_dev_data[i];
+ void __iomem *reg = base + data->reg;
+
+ if (data->type == CLK_MUX) {
+ hws[i] = clk_hw_register_mux(dev, data->name, data->parent_names,
+ data->num_parents, data->flags, reg,
+ data->bit_idx, data->bit_width,
+ data->flags2, &bc->lock);
+ } else if (data->type == CLK_DIVIDER) {
+ hws[i] = clk_hw_register_divider(dev, data->name, data->parent_names[0],
+ data->flags, reg, data->bit_idx,
+ data->bit_width, data->flags2, &bc->lock);
+ } else {
+ hws[i] = clk_hw_register_gate(dev, data->name, data->parent_names[0],
+ data->flags, reg, data->bit_idx,
+ data->flags2, &bc->lock);
+ }
+ if (IS_ERR(hws[i])) {
+ ret = PTR_ERR(hws[i]);
+ dev_err(dev, "failed to register: %s:%d\n", data->name, ret);
+ goto cleanup;
+ }
+ }
+
+ ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, clk_hw_data);
+ if (ret)
+ goto cleanup;
+
+ ret = devm_of_platform_populate(dev);
+ if (ret) {
+ of_clk_del_provider(dev->of_node);
+ goto cleanup;
+ }
+
+ if (pm_runtime_enabled(bc->dev))
+ clk_disable_unprepare(bc->clk_apb);
+
+ return 0;
+
+cleanup:
+ for (i = 0; i < bc_data->num_clks; i++) {
+ if (IS_ERR_OR_NULL(hws[i]))
+ continue;
+ clk_hw_unregister(hws[i]);
+ }
+
+ if (bc_data->rpm_enabled)
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int imx95_bc_runtime_suspend(struct device *dev)
+{
+ struct imx95_blk_ctl *bc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(bc->clk_apb);
+ return 0;
+}
+
+static int imx95_bc_runtime_resume(struct device *dev)
+{
+ struct imx95_blk_ctl *bc = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(bc->clk_apb);
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int imx95_bc_suspend(struct device *dev)
+{
+ struct imx95_blk_ctl *bc = dev_get_drvdata(dev);
+ const struct imx95_blk_ctl_dev_data *bc_data;
+ int ret;
+
+ bc_data = of_device_get_match_data(dev);
+ if (!bc_data)
+ return 0;
+
+ if (bc_data->rpm_enabled) {
+ ret = pm_runtime_get_sync(bc->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(bc->dev);
+ return ret;
+ }
+ }
+
+ bc->clk_reg_restore = readl(bc->base + bc_data->clk_reg_offset);
+
+ return 0;
+}
+
+static int imx95_bc_resume(struct device *dev)
+{
+ struct imx95_blk_ctl *bc = dev_get_drvdata(dev);
+ const struct imx95_blk_ctl_dev_data *bc_data;
+
+ bc_data = of_device_get_match_data(dev);
+ if (!bc_data)
+ return 0;
+
+ writel(bc->clk_reg_restore, bc->base + bc_data->clk_reg_offset);
+
+ if (bc_data->rpm_enabled)
+ pm_runtime_put(bc->dev);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops imx95_bc_pm_ops = {
+ SET_RUNTIME_PM_OPS(imx95_bc_runtime_suspend, imx95_bc_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(imx95_bc_suspend, imx95_bc_resume)
+};
+
+static const struct of_device_id imx95_bc_of_match[] = {
+ { .compatible = "nxp,imx95-camera-csr", .data = &camblk_dev_data },
+ { .compatible = "nxp,imx95-display-master-csr", },
+ { .compatible = "nxp,imx95-lvds-csr", .data = &lvds_csr_dev_data },
+ { .compatible = "nxp,imx95-display-csr", .data = &dispmix_csr_dev_data },
+ { .compatible = "nxp,imx95-vpu-csr", .data = &vpublk_dev_data },
+ { /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, imx95_bc_of_match);
+
+static struct platform_driver imx95_bc_driver = {
+ .probe = imx95_bc_probe,
+ .driver = {
+ .name = "imx95-blk-ctl",
+ .of_match_table = imx95_bc_of_match,
+ .pm = &imx95_bc_pm_ops,
+ },
+};
+module_platform_driver(imx95_bc_driver);
+
+MODULE_DESCRIPTION("NXP i.MX95 blk ctl driver");
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7988-infracfg.c b/drivers/clk/mediatek/clk-mt7988-infracfg.c
index 449041f8abbc..c8c023afe3e5 100644
--- a/drivers/clk/mediatek/clk-mt7988-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt7988-infracfg.c
@@ -156,7 +156,7 @@ static const struct mtk_gate infra_clks[] = {
GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P1, "infra_pcie_peri_ck_26m_ck_p1",
"csw_infra_f26m_sel", 8),
GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P2, "infra_pcie_peri_ck_26m_ck_p2",
- "csw_infra_f26m_sel", 9),
+ "infra_pcie_peri_ck_26m_ck_p3", 9),
GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P3, "infra_pcie_peri_ck_26m_ck_p3",
"csw_infra_f26m_sel", 10),
/* INFRA1 */
diff --git a/drivers/clk/mediatek/clk-mt8365-mm.c b/drivers/clk/mediatek/clk-mt8365-mm.c
index 01a2ef8f594e..3f62ec750733 100644
--- a/drivers/clk/mediatek/clk-mt8365-mm.c
+++ b/drivers/clk/mediatek/clk-mt8365-mm.c
@@ -53,7 +53,7 @@ static const struct mtk_gate mm_clks[] = {
GATE_MM0(CLK_MM_MM_DSI0, "mm_dsi0", "mm_sel", 17),
GATE_MM0(CLK_MM_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 18),
GATE_MM0(CLK_MM_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 19),
- GATE_MM0(CLK_MM_DPI0_DPI0, "mm_dpi0_dpi0", "vpll_dpix", 20),
+ GATE_MM0(CLK_MM_DPI0_DPI0, "mm_dpi0_dpi0", "dpi0_sel", 20),
GATE_MM0(CLK_MM_MM_FAKE, "mm_fake", "mm_sel", 21),
GATE_MM0(CLK_MM_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 22),
GATE_MM0(CLK_MM_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 23),
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index 2e55368dc4d8..bd37ab4d1a9b 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include "clk-mtk.h"
@@ -494,6 +495,16 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
return IS_ERR(base) ? PTR_ERR(base) : -ENOMEM;
}
+
+ devm_pm_runtime_enable(&pdev->dev);
+ /*
+ * Do a pm_runtime_resume_and_get() to workaround a possible
+ * deadlock between clk_register() and the genpd framework.
+ */
+ r = pm_runtime_resume_and_get(&pdev->dev);
+ if (r)
+ return r;
+
/* Calculate how many clk_hw_onecell_data entries to allocate */
num_clks = mcd->num_clks + mcd->num_composite_clks;
num_clks += mcd->num_fixed_clks + mcd->num_factor_clks;
@@ -574,6 +585,8 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
goto unregister_clks;
}
+ pm_runtime_put(&pdev->dev);
+
return r;
unregister_clks:
@@ -604,6 +617,8 @@ free_data:
free_base:
if (mcd->shared_io && base)
iounmap(base);
+
+ pm_runtime_put(&pdev->dev);
return r;
}
diff --git a/drivers/clk/mediatek/clk-pllfh.c b/drivers/clk/mediatek/clk-pllfh.c
index 3a2b3f90be25..094ec8a26d66 100644
--- a/drivers/clk/mediatek/clk-pllfh.c
+++ b/drivers/clk/mediatek/clk-pllfh.c
@@ -68,7 +68,7 @@ void fhctl_parse_dt(const u8 *compatible_node, struct mtk_pllfh_data *pllfhs,
node = of_find_compatible_node(NULL, NULL, compatible_node);
if (!node) {
- pr_err("cannot find \"%s\"\n", compatible_node);
+ pr_warn("cannot find \"%s\"\n", compatible_node);
return;
}
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index 29ffd14d267b..59a40a49f8e1 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -30,6 +30,10 @@ config COMMON_CLK_MESON_VID_PLL_DIV
tristate
select COMMON_CLK_MESON_REGMAP
+config COMMON_CLK_MESON_VCLK
+ tristate
+ select COMMON_CLK_MESON_REGMAP
+
config COMMON_CLK_MESON_CLKC_UTILS
tristate
@@ -140,6 +144,7 @@ config COMMON_CLK_G12A
select COMMON_CLK_MESON_EE_CLKC
select COMMON_CLK_MESON_CPU_DYNDIV
select COMMON_CLK_MESON_VID_PLL_DIV
+ select COMMON_CLK_MESON_VCLK
select MFD_SYSCON
help
Support for the clock controller on Amlogic S905D2, S905X2 and S905Y2
diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile
index 9ee4b954c896..9ba43fe7a07a 100644
--- a/drivers/clk/meson/Makefile
+++ b/drivers/clk/meson/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_COMMON_CLK_MESON_PLL) += clk-pll.o
obj-$(CONFIG_COMMON_CLK_MESON_REGMAP) += clk-regmap.o
obj-$(CONFIG_COMMON_CLK_MESON_SCLK_DIV) += sclk-div.o
obj-$(CONFIG_COMMON_CLK_MESON_VID_PLL_DIV) += vid-pll-div.o
+obj-$(CONFIG_COMMON_CLK_MESON_VCLK) += vclk.o
# Amlogic Clock controllers
diff --git a/drivers/clk/meson/a1-peripherals.c b/drivers/clk/meson/a1-peripherals.c
index e2a1f12f9175..621af1e6e4b2 100644
--- a/drivers/clk/meson/a1-peripherals.c
+++ b/drivers/clk/meson/a1-peripherals.c
@@ -2187,6 +2187,7 @@ static struct regmap_config a1_periphs_regmap_cfg = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
+ .max_register = DMC_CLK_CTRL,
};
static struct meson_clk_hw_data a1_periphs_clks = {
diff --git a/drivers/clk/meson/a1-pll.c b/drivers/clk/meson/a1-pll.c
index 4325e8a6a3ef..90b0aeeb049c 100644
--- a/drivers/clk/meson/a1-pll.c
+++ b/drivers/clk/meson/a1-pll.c
@@ -299,6 +299,7 @@ static struct regmap_config a1_pll_regmap_cfg = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
+ .max_register = ANACTRL_HIFIPLL_STS,
};
static struct meson_clk_hw_data a1_pll_clks = {
diff --git a/drivers/clk/meson/axg-aoclk.c b/drivers/clk/meson/axg-aoclk.c
index d80ab4728f7a..e4d0f46f47f5 100644
--- a/drivers/clk/meson/axg-aoclk.c
+++ b/drivers/clk/meson/axg-aoclk.c
@@ -340,4 +340,4 @@ static struct platform_driver axg_aoclkc_driver = {
};
module_platform_driver(axg_aoclkc_driver);
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
index ac3482960903..e03a5bf899c0 100644
--- a/drivers/clk/meson/axg-audio.c
+++ b/drivers/clk/meson/axg-audio.c
@@ -1877,4 +1877,4 @@ module_platform_driver(axg_audio_driver);
MODULE_DESCRIPTION("Amlogic AXG/G12A/SM1 Audio Clock driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index 5f60f2bcca59..52d610110e44 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -2185,4 +2185,4 @@ static struct platform_driver axg_driver = {
};
module_platform_driver(axg_driver);
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/clk-cpu-dyndiv.c b/drivers/clk/meson/clk-cpu-dyndiv.c
index 8778c149d26a..aa824b030cb8 100644
--- a/drivers/clk/meson/clk-cpu-dyndiv.c
+++ b/drivers/clk/meson/clk-cpu-dyndiv.c
@@ -69,4 +69,4 @@ EXPORT_SYMBOL_GPL(meson_clk_cpu_dyndiv_ops);
MODULE_DESCRIPTION("Amlogic CPU Dynamic Clock divider");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/clk-dualdiv.c b/drivers/clk/meson/clk-dualdiv.c
index feae49a8f6dc..d46c02b51be5 100644
--- a/drivers/clk/meson/clk-dualdiv.c
+++ b/drivers/clk/meson/clk-dualdiv.c
@@ -140,4 +140,4 @@ EXPORT_SYMBOL_GPL(meson_clk_dualdiv_ro_ops);
MODULE_DESCRIPTION("Amlogic dual divider driver");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c
index 20255e129b37..eae9b7dc5a6c 100644
--- a/drivers/clk/meson/clk-mpll.c
+++ b/drivers/clk/meson/clk-mpll.c
@@ -177,4 +177,4 @@ EXPORT_SYMBOL_GPL(meson_clk_mpll_ops);
MODULE_DESCRIPTION("Amlogic MPLL driver");
MODULE_AUTHOR("Michael Turquette <mturquette@baylibre.com>");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/clk-phase.c b/drivers/clk/meson/clk-phase.c
index a6763439f7d2..ff3f0b1a3ed1 100644
--- a/drivers/clk/meson/clk-phase.c
+++ b/drivers/clk/meson/clk-phase.c
@@ -183,4 +183,4 @@ EXPORT_SYMBOL_GPL(meson_sclk_ws_inv_ops);
MODULE_DESCRIPTION("Amlogic phase driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
index 6fa7639a3050..07db8b5c3000 100644
--- a/drivers/clk/meson/clk-pll.c
+++ b/drivers/clk/meson/clk-pll.c
@@ -436,8 +436,8 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
ret = meson_clk_pll_enable(hw);
if (ret) {
- pr_warn("%s: pll did not lock, trying to restore old rate %lu\n",
- __func__, old_rate);
+ pr_warn("%s: pll %s didn't lock, trying to set old rate %lu\n",
+ __func__, clk_hw_get_name(hw), old_rate);
/*
* FIXME: Do we really need/want this HACK ?
* It looks unsafe. what happens if the clock gets into a
@@ -486,4 +486,4 @@ EXPORT_SYMBOL_GPL(meson_clk_pll_ro_ops);
MODULE_DESCRIPTION("Amlogic PLL driver");
MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/clk-regmap.c b/drivers/clk/meson/clk-regmap.c
index 8ad8977cf1c2..ad116d24f700 100644
--- a/drivers/clk/meson/clk-regmap.c
+++ b/drivers/clk/meson/clk-regmap.c
@@ -183,4 +183,4 @@ EXPORT_SYMBOL_GPL(clk_regmap_mux_ro_ops);
MODULE_DESCRIPTION("Amlogic regmap backed clock driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/g12a-aoclk.c b/drivers/clk/meson/g12a-aoclk.c
index c6b1d55cd7c8..58976ed8b92a 100644
--- a/drivers/clk/meson/g12a-aoclk.c
+++ b/drivers/clk/meson/g12a-aoclk.c
@@ -475,4 +475,4 @@ static struct platform_driver g12a_aoclkc_driver = {
};
module_platform_driver(g12a_aoclkc_driver);
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index 90f4c6103014..56e66ecc306e 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -22,6 +22,7 @@
#include "clk-regmap.h"
#include "clk-cpu-dyndiv.h"
#include "vid-pll-div.h"
+#include "vclk.h"
#include "meson-eeclk.h"
#include "g12a.h"
@@ -3165,7 +3166,7 @@ static struct clk_regmap g12a_vclk2_sel = {
.ops = &clk_regmap_mux_ops,
.parent_hws = g12a_vclk_parent_hws,
.num_parents = ARRAY_SIZE(g12a_vclk_parent_hws),
- .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ .flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -3193,7 +3194,6 @@ static struct clk_regmap g12a_vclk2_input = {
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) { &g12a_vclk2_sel.hw },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
};
@@ -3215,19 +3215,32 @@ static struct clk_regmap g12a_vclk_div = {
};
static struct clk_regmap g12a_vclk2_div = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_VIID_CLK_DIV,
- .shift = 0,
- .width = 8,
+ .data = &(struct meson_vclk_div_data){
+ .div = {
+ .reg_off = HHI_VIID_CLK_DIV,
+ .shift = 0,
+ .width = 8,
+ },
+ .enable = {
+ .reg_off = HHI_VIID_CLK_DIV,
+ .shift = 16,
+ .width = 1,
+ },
+ .reset = {
+ .reg_off = HHI_VIID_CLK_DIV,
+ .shift = 17,
+ .width = 1,
+ },
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "vclk2_div",
- .ops = &clk_regmap_divider_ops,
+ .ops = &meson_vclk_div_ops,
.parent_hws = (const struct clk_hw *[]) {
&g12a_vclk2_input.hw
},
.num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE,
+ .flags = CLK_SET_RATE_GATE,
},
};
@@ -3246,16 +3259,24 @@ static struct clk_regmap g12a_vclk = {
};
static struct clk_regmap g12a_vclk2 = {
- .data = &(struct clk_regmap_gate_data){
- .offset = HHI_VIID_CLK_CNTL,
- .bit_idx = 19,
+ .data = &(struct meson_vclk_gate_data){
+ .enable = {
+ .reg_off = HHI_VIID_CLK_CNTL,
+ .shift = 19,
+ .width = 1,
+ },
+ .reset = {
+ .reg_off = HHI_VIID_CLK_CNTL,
+ .shift = 15,
+ .width = 1,
+ },
},
.hw.init = &(struct clk_init_data) {
.name = "vclk2",
- .ops = &clk_regmap_gate_ops,
+ .ops = &meson_vclk_gate_ops,
.parent_hws = (const struct clk_hw *[]) { &g12a_vclk2_div.hw },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -3339,7 +3360,7 @@ static struct clk_regmap g12a_vclk2_div1 = {
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) { &g12a_vclk2.hw },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -3353,7 +3374,7 @@ static struct clk_regmap g12a_vclk2_div2_en = {
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) { &g12a_vclk2.hw },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -3367,7 +3388,7 @@ static struct clk_regmap g12a_vclk2_div4_en = {
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) { &g12a_vclk2.hw },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -3381,7 +3402,7 @@ static struct clk_regmap g12a_vclk2_div6_en = {
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) { &g12a_vclk2.hw },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -3395,7 +3416,7 @@ static struct clk_regmap g12a_vclk2_div12_en = {
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) { &g12a_vclk2.hw },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -3461,6 +3482,7 @@ static struct clk_fixed_factor g12a_vclk2_div2 = {
&g12a_vclk2_div2_en.hw
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -3474,6 +3496,7 @@ static struct clk_fixed_factor g12a_vclk2_div4 = {
&g12a_vclk2_div4_en.hw
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -3487,6 +3510,7 @@ static struct clk_fixed_factor g12a_vclk2_div6 = {
&g12a_vclk2_div6_en.hw
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -3500,6 +3524,7 @@ static struct clk_fixed_factor g12a_vclk2_div12 = {
&g12a_vclk2_div12_en.hw
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -3561,7 +3586,7 @@ static struct clk_regmap g12a_cts_encl_sel = {
.ops = &clk_regmap_mux_ops,
.parent_hws = g12a_cts_parent_hws,
.num_parents = ARRAY_SIZE(g12a_cts_parent_hws),
- .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
},
};
@@ -3717,15 +3742,26 @@ static struct clk_regmap g12a_mipi_dsi_pxclk_sel = {
.ops = &clk_regmap_mux_ops,
.parent_hws = g12a_mipi_dsi_pxclk_parent_hws,
.num_parents = ARRAY_SIZE(g12a_mipi_dsi_pxclk_parent_hws),
- .flags = CLK_SET_RATE_NO_REPARENT,
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
},
};
+/*
+ * FIXME: Force as bypass by forcing a single /1 table entry, and doensn't on boot value
+ * when setting a clock whith this node in the clock path, but doesn't garantee the divider
+ * is at /1 at boot until a rate is set.
+ */
+static const struct clk_div_table g12a_mipi_dsi_pxclk_div_table[] = {
+ { .val = 0, .div = 1 },
+ { /* sentinel */ },
+};
+
static struct clk_regmap g12a_mipi_dsi_pxclk_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_MIPIDSI_PHY_CLK_CNTL,
.shift = 0,
.width = 7,
+ .table = g12a_mipi_dsi_pxclk_div_table,
},
.hw.init = &(struct clk_init_data){
.name = "mipi_dsi_pxclk_div",
@@ -5578,4 +5614,4 @@ static struct platform_driver g12a_driver = {
};
module_platform_driver(g12a_driver);
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/gxbb-aoclk.c b/drivers/clk/meson/gxbb-aoclk.c
index 4aec1740ac34..dbda563729db 100644
--- a/drivers/clk/meson/gxbb-aoclk.c
+++ b/drivers/clk/meson/gxbb-aoclk.c
@@ -300,4 +300,4 @@ static struct platform_driver gxbb_aoclkc_driver = {
},
};
module_platform_driver(gxbb_aoclkc_driver);
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 1b1279d94781..29507b8c4304 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -3569,4 +3569,4 @@ static struct platform_driver gxbb_driver = {
};
module_platform_driver(gxbb_driver);
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
index bf466fef263c..b8a9d59e6726 100644
--- a/drivers/clk/meson/meson-aoclk.c
+++ b/drivers/clk/meson/meson-aoclk.c
@@ -89,4 +89,4 @@ int meson_aoclkc_probe(struct platform_device *pdev)
return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, (void *)&data->hw_clks);
}
EXPORT_SYMBOL_GPL(meson_aoclkc_probe);
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c
index 845ca8bfa346..3cbc7f233bba 100644
--- a/drivers/clk/meson/meson-eeclk.c
+++ b/drivers/clk/meson/meson-eeclk.c
@@ -58,4 +58,4 @@ int meson_eeclkc_probe(struct platform_device *pdev)
return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, (void *)&data->hw_clks);
}
EXPORT_SYMBOL_GPL(meson_eeclkc_probe);
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/s4-peripherals.c b/drivers/clk/meson/s4-peripherals.c
index 6c35de3d536f..5e17ca50ab09 100644
--- a/drivers/clk/meson/s4-peripherals.c
+++ b/drivers/clk/meson/s4-peripherals.c
@@ -3751,6 +3751,7 @@ static struct regmap_config clkc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
+ .max_register = CLKCTRL_DEMOD_CLK_CTRL,
};
static struct meson_clk_hw_data s4_periphs_clks = {
@@ -3799,6 +3800,7 @@ static const struct of_device_id clkc_match_table[] = {
},
{}
};
+MODULE_DEVICE_TABLE(of, clkc_match_table);
static struct platform_driver s4_driver = {
.probe = meson_s4_periphs_probe,
diff --git a/drivers/clk/meson/s4-pll.c b/drivers/clk/meson/s4-pll.c
index 8dfaeccaadc2..d2650d96400c 100644
--- a/drivers/clk/meson/s4-pll.c
+++ b/drivers/clk/meson/s4-pll.c
@@ -798,6 +798,7 @@ static struct regmap_config clkc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
+ .max_register = ANACTRL_HDMIPLL_CTRL0,
};
static struct meson_clk_hw_data s4_pll_clks = {
@@ -853,6 +854,7 @@ static const struct of_device_id clkc_match_table[] = {
},
{}
};
+MODULE_DEVICE_TABLE(of, clkc_match_table);
static struct platform_driver s4_driver = {
.probe = meson_s4_pll_probe,
diff --git a/drivers/clk/meson/sclk-div.c b/drivers/clk/meson/sclk-div.c
index d12c45c4c261..987f5b06587c 100644
--- a/drivers/clk/meson/sclk-div.c
+++ b/drivers/clk/meson/sclk-div.c
@@ -251,4 +251,4 @@ EXPORT_SYMBOL_GPL(meson_sclk_div_ops);
MODULE_DESCRIPTION("Amlogic Sample divider driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/vclk.c b/drivers/clk/meson/vclk.c
new file mode 100644
index 000000000000..e886df55d6e3
--- /dev/null
+++ b/drivers/clk/meson/vclk.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 Neil Armstrong <neil.armstrong@linaro.org>
+ */
+
+#include <linux/module.h>
+#include "vclk.h"
+
+/* The VCLK gate has a supplementary reset bit to pulse after ungating */
+
+static inline struct meson_vclk_gate_data *
+clk_get_meson_vclk_gate_data(struct clk_regmap *clk)
+{
+ return (struct meson_vclk_gate_data *)clk->data;
+}
+
+static int meson_vclk_gate_enable(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_vclk_gate_data *vclk = clk_get_meson_vclk_gate_data(clk);
+
+ meson_parm_write(clk->map, &vclk->enable, 1);
+
+ /* Do a reset pulse */
+ meson_parm_write(clk->map, &vclk->reset, 1);
+ meson_parm_write(clk->map, &vclk->reset, 0);
+
+ return 0;
+}
+
+static void meson_vclk_gate_disable(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_vclk_gate_data *vclk = clk_get_meson_vclk_gate_data(clk);
+
+ meson_parm_write(clk->map, &vclk->enable, 0);
+}
+
+static int meson_vclk_gate_is_enabled(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_vclk_gate_data *vclk = clk_get_meson_vclk_gate_data(clk);
+
+ return meson_parm_read(clk->map, &vclk->enable);
+}
+
+const struct clk_ops meson_vclk_gate_ops = {
+ .enable = meson_vclk_gate_enable,
+ .disable = meson_vclk_gate_disable,
+ .is_enabled = meson_vclk_gate_is_enabled,
+};
+EXPORT_SYMBOL_GPL(meson_vclk_gate_ops);
+
+/* The VCLK Divider has supplementary reset & enable bits */
+
+static inline struct meson_vclk_div_data *
+clk_get_meson_vclk_div_data(struct clk_regmap *clk)
+{
+ return (struct meson_vclk_div_data *)clk->data;
+}
+
+static unsigned long meson_vclk_div_recalc_rate(struct clk_hw *hw,
+ unsigned long prate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_vclk_div_data *vclk = clk_get_meson_vclk_div_data(clk);
+
+ return divider_recalc_rate(hw, prate, meson_parm_read(clk->map, &vclk->div),
+ vclk->table, vclk->flags, vclk->div.width);
+}
+
+static int meson_vclk_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_vclk_div_data *vclk = clk_get_meson_vclk_div_data(clk);
+
+ return divider_determine_rate(hw, req, vclk->table, vclk->div.width,
+ vclk->flags);
+}
+
+static int meson_vclk_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_vclk_div_data *vclk = clk_get_meson_vclk_div_data(clk);
+ int ret;
+
+ ret = divider_get_val(rate, parent_rate, vclk->table, vclk->div.width,
+ vclk->flags);
+ if (ret < 0)
+ return ret;
+
+ meson_parm_write(clk->map, &vclk->div, ret);
+
+ return 0;
+};
+
+static int meson_vclk_div_enable(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_vclk_div_data *vclk = clk_get_meson_vclk_div_data(clk);
+
+ /* Unreset the divider when ungating */
+ meson_parm_write(clk->map, &vclk->reset, 0);
+ meson_parm_write(clk->map, &vclk->enable, 1);
+
+ return 0;
+}
+
+static void meson_vclk_div_disable(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_vclk_div_data *vclk = clk_get_meson_vclk_div_data(clk);
+
+ /* Reset the divider when gating */
+ meson_parm_write(clk->map, &vclk->enable, 0);
+ meson_parm_write(clk->map, &vclk->reset, 1);
+}
+
+static int meson_vclk_div_is_enabled(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_vclk_div_data *vclk = clk_get_meson_vclk_div_data(clk);
+
+ return meson_parm_read(clk->map, &vclk->enable);
+}
+
+const struct clk_ops meson_vclk_div_ops = {
+ .recalc_rate = meson_vclk_div_recalc_rate,
+ .determine_rate = meson_vclk_div_determine_rate,
+ .set_rate = meson_vclk_div_set_rate,
+ .enable = meson_vclk_div_enable,
+ .disable = meson_vclk_div_disable,
+ .is_enabled = meson_vclk_div_is_enabled,
+};
+EXPORT_SYMBOL_GPL(meson_vclk_div_ops);
+
+MODULE_DESCRIPTION("Amlogic vclk clock driver");
+MODULE_AUTHOR("Neil Armstrong <neil.armstrong@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/vclk.h b/drivers/clk/meson/vclk.h
new file mode 100644
index 000000000000..20b0b181db09
--- /dev/null
+++ b/drivers/clk/meson/vclk.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Neil Armstrong <neil.armstrong@linaro.org>
+ */
+
+#ifndef __VCLK_H
+#define __VCLK_H
+
+#include "clk-regmap.h"
+#include "parm.h"
+
+/**
+ * struct meson_vclk_gate_data - vclk_gate regmap backed specific data
+ *
+ * @enable: vclk enable field
+ * @reset: vclk reset field
+ * @flags: hardware-specific flags
+ *
+ * Flags:
+ * Same as clk_gate except CLK_GATE_HIWORD_MASK which is ignored
+ */
+struct meson_vclk_gate_data {
+ struct parm enable;
+ struct parm reset;
+ u8 flags;
+};
+
+extern const struct clk_ops meson_vclk_gate_ops;
+
+/**
+ * struct meson_vclk_div_data - vclk_div regmap back specific data
+ *
+ * @div: divider field
+ * @enable: vclk divider enable field
+ * @reset: vclk divider reset field
+ * @table: array of value/divider pairs, last entry should have div = 0
+ *
+ * Flags:
+ * Same as clk_divider except CLK_DIVIDER_HIWORD_MASK which is ignored
+ */
+struct meson_vclk_div_data {
+ struct parm div;
+ struct parm enable;
+ struct parm reset;
+ const struct clk_div_table *table;
+ u8 flags;
+};
+
+extern const struct clk_ops meson_vclk_div_ops;
+
+#endif /* __VCLK_H */
diff --git a/drivers/clk/meson/vid-pll-div.c b/drivers/clk/meson/vid-pll-div.c
index daff235bc763..ee129f86794d 100644
--- a/drivers/clk/meson/vid-pll-div.c
+++ b/drivers/clk/meson/vid-pll-div.c
@@ -96,4 +96,4 @@ EXPORT_SYMBOL_GPL(meson_vid_pll_div_ro_ops);
MODULE_DESCRIPTION("Amlogic video pll divider driver");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/microchip/clk-mpfs.c b/drivers/clk/microchip/clk-mpfs.c
index 22eab91a6712..28ec0da88cb3 100644
--- a/drivers/clk/microchip/clk-mpfs.c
+++ b/drivers/clk/microchip/clk-mpfs.c
@@ -4,12 +4,10 @@
*
* Copyright (C) 2020-2022 Microchip Technology Inc. All rights reserved.
*/
-#include <linux/auxiliary_bus.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/slab.h>
#include <dt-bindings/clock/microchip,mpfs-clock.h>
#include <soc/microchip/mpfs.h>
@@ -361,93 +359,6 @@ static int mpfs_clk_register_periphs(struct device *dev, struct mpfs_periph_hw_c
return 0;
}
-/*
- * Peripheral clock resets
- */
-
-#if IS_ENABLED(CONFIG_RESET_CONTROLLER)
-
-u32 mpfs_reset_read(struct device *dev)
-{
- struct mpfs_clock_data *clock_data = dev_get_drvdata(dev->parent);
-
- return readl_relaxed(clock_data->base + REG_SUBBLK_RESET_CR);
-}
-EXPORT_SYMBOL_NS_GPL(mpfs_reset_read, MCHP_CLK_MPFS);
-
-void mpfs_reset_write(struct device *dev, u32 val)
-{
- struct mpfs_clock_data *clock_data = dev_get_drvdata(dev->parent);
-
- writel_relaxed(val, clock_data->base + REG_SUBBLK_RESET_CR);
-}
-EXPORT_SYMBOL_NS_GPL(mpfs_reset_write, MCHP_CLK_MPFS);
-
-static void mpfs_reset_unregister_adev(void *_adev)
-{
- struct auxiliary_device *adev = _adev;
-
- auxiliary_device_delete(adev);
- auxiliary_device_uninit(adev);
-}
-
-static void mpfs_reset_adev_release(struct device *dev)
-{
- struct auxiliary_device *adev = to_auxiliary_dev(dev);
-
- kfree(adev);
-}
-
-static struct auxiliary_device *mpfs_reset_adev_alloc(struct mpfs_clock_data *clk_data)
-{
- struct auxiliary_device *adev;
- int ret;
-
- adev = kzalloc(sizeof(*adev), GFP_KERNEL);
- if (!adev)
- return ERR_PTR(-ENOMEM);
-
- adev->name = "reset-mpfs";
- adev->dev.parent = clk_data->dev;
- adev->dev.release = mpfs_reset_adev_release;
- adev->id = 666u;
-
- ret = auxiliary_device_init(adev);
- if (ret) {
- kfree(adev);
- return ERR_PTR(ret);
- }
-
- return adev;
-}
-
-static int mpfs_reset_controller_register(struct mpfs_clock_data *clk_data)
-{
- struct auxiliary_device *adev;
- int ret;
-
- adev = mpfs_reset_adev_alloc(clk_data);
- if (IS_ERR(adev))
- return PTR_ERR(adev);
-
- ret = auxiliary_device_add(adev);
- if (ret) {
- auxiliary_device_uninit(adev);
- return ret;
- }
-
- return devm_add_action_or_reset(clk_data->dev, mpfs_reset_unregister_adev, adev);
-}
-
-#else /* !CONFIG_RESET_CONTROLLER */
-
-static int mpfs_reset_controller_register(struct mpfs_clock_data *clk_data)
-{
- return 0;
-}
-
-#endif /* !CONFIG_RESET_CONTROLLER */
-
static int mpfs_clk_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -499,7 +410,7 @@ static int mpfs_clk_probe(struct platform_device *pdev)
if (ret)
return ret;
- return mpfs_reset_controller_register(clk_data);
+ return mpfs_reset_controller_register(dev, clk_data->base + REG_SUBBLK_RESET_CR);
}
static const struct of_device_id mpfs_clk_of_match_table[] = {
@@ -532,3 +443,4 @@ MODULE_DESCRIPTION("Microchip PolarFire SoC Clock Driver");
MODULE_AUTHOR("Padmarao Begari <padmarao.begari@microchip.com>");
MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_IMPORT_NS(MCHP_CLK_MPFS);
diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c
index 69ebf65081b8..81efa885069b 100644
--- a/drivers/clk/nxp/clk-lpc18xx-cgu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c
@@ -250,7 +250,6 @@ static struct lpc18xx_cgu_base_clk lpc18xx_cgu_base_clks[] = {
struct lpc18xx_pll {
struct clk_hw hw;
void __iomem *reg;
- spinlock_t *lock;
u8 flags;
};
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 8ab08e7b5b6c..1bb51a058872 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -474,6 +474,7 @@ config SC_CAMCC_7280
config SC_CAMCC_8280XP
tristate "SC8280XP Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SC_GCC_8280XP
help
Support for the camera clock controller on Qualcomm Technologies, Inc
@@ -1094,6 +1095,7 @@ config SM_GPUCC_8550
config SM_GPUCC_8650
tristate "SM8650 Graphics Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SM_GCC_8650
help
Support for the graphics clock controller on SM8650 devices.
diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
index 678b805f13d4..5f7f537e4ecb 100644
--- a/drivers/clk/qcom/apss-ipq-pll.c
+++ b/drivers/clk/qcom/apss-ipq-pll.c
@@ -8,61 +8,54 @@
#include "clk-alpha-pll.h"
-/*
- * Even though APSS PLL type is of existing one (like Huayra), its offsets
- * are different from the one mentioned in the clk-alpha-pll.c, since the
- * PLL is specific to APSS, so lets the define the same.
- */
-static const u8 ipq_pll_offsets[][PLL_OFF_MAX_REGS] = {
- [CLK_ALPHA_PLL_TYPE_HUAYRA] = {
- [PLL_OFF_L_VAL] = 0x08,
- [PLL_OFF_ALPHA_VAL] = 0x10,
- [PLL_OFF_USER_CTL] = 0x18,
- [PLL_OFF_CONFIG_CTL] = 0x20,
- [PLL_OFF_CONFIG_CTL_U] = 0x24,
- [PLL_OFF_STATUS] = 0x28,
- [PLL_OFF_TEST_CTL] = 0x30,
- [PLL_OFF_TEST_CTL_U] = 0x34,
- },
- [CLK_ALPHA_PLL_TYPE_STROMER_PLUS] = {
- [PLL_OFF_L_VAL] = 0x08,
- [PLL_OFF_ALPHA_VAL] = 0x10,
- [PLL_OFF_ALPHA_VAL_U] = 0x14,
- [PLL_OFF_USER_CTL] = 0x18,
- [PLL_OFF_USER_CTL_U] = 0x1c,
- [PLL_OFF_CONFIG_CTL] = 0x20,
- [PLL_OFF_STATUS] = 0x28,
- [PLL_OFF_TEST_CTL] = 0x30,
- [PLL_OFF_TEST_CTL_U] = 0x34,
+static struct clk_alpha_pll ipq_pll_huayra = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_HUAYRA_APSS],
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .clkr = {
+ .enable_reg = 0x0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "a53pll",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_huayra_ops,
+ },
},
};
-static struct clk_alpha_pll ipq_pll_huayra = {
+static struct clk_alpha_pll ipq_pll_stromer = {
.offset = 0x0,
- .regs = ipq_pll_offsets[CLK_ALPHA_PLL_TYPE_HUAYRA],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_STROMER],
.flags = SUPPORTS_DYNAMIC_UPDATE,
.clkr = {
.enable_reg = 0x0,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "a53pll",
.parent_data = &(const struct clk_parent_data) {
.fw_name = "xo",
},
.num_parents = 1,
- .ops = &clk_alpha_pll_huayra_ops,
+ .ops = &clk_alpha_pll_stromer_ops,
},
},
};
static struct clk_alpha_pll ipq_pll_stromer_plus = {
.offset = 0x0,
- .regs = ipq_pll_offsets[CLK_ALPHA_PLL_TYPE_STROMER_PLUS],
+ /*
+ * The register offsets of the Stromer Plus PLL used in IPQ5332
+ * are the same as the Stromer PLL's offsets.
+ */
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_STROMER],
.flags = SUPPORTS_DYNAMIC_UPDATE,
.clkr = {
.enable_reg = 0x0,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "a53pll",
.parent_data = &(const struct clk_parent_data) {
.fw_name = "xo",
@@ -73,8 +66,9 @@ static struct clk_alpha_pll ipq_pll_stromer_plus = {
},
};
+/* 1.008 GHz configuration */
static const struct alpha_pll_config ipq5018_pll_config = {
- .l = 0x32,
+ .l = 0x2a,
.config_ctl_val = 0x4001075b,
.config_ctl_hi_val = 0x304,
.main_output_mask = BIT(0),
@@ -144,30 +138,30 @@ struct apss_pll_data {
};
static const struct apss_pll_data ipq5018_pll_data = {
- .pll_type = CLK_ALPHA_PLL_TYPE_STROMER_PLUS,
- .pll = &ipq_pll_stromer_plus,
+ .pll_type = CLK_ALPHA_PLL_TYPE_STROMER,
+ .pll = &ipq_pll_stromer,
.pll_config = &ipq5018_pll_config,
};
-static struct apss_pll_data ipq5332_pll_data = {
+static const struct apss_pll_data ipq5332_pll_data = {
.pll_type = CLK_ALPHA_PLL_TYPE_STROMER_PLUS,
.pll = &ipq_pll_stromer_plus,
.pll_config = &ipq5332_pll_config,
};
-static struct apss_pll_data ipq8074_pll_data = {
+static const struct apss_pll_data ipq8074_pll_data = {
.pll_type = CLK_ALPHA_PLL_TYPE_HUAYRA,
.pll = &ipq_pll_huayra,
.pll_config = &ipq8074_pll_config,
};
-static struct apss_pll_data ipq6018_pll_data = {
+static const struct apss_pll_data ipq6018_pll_data = {
.pll_type = CLK_ALPHA_PLL_TYPE_HUAYRA,
.pll = &ipq_pll_huayra,
.pll_config = &ipq6018_pll_config,
};
-static struct apss_pll_data ipq9574_pll_data = {
+static const struct apss_pll_data ipq9574_pll_data = {
.pll_type = CLK_ALPHA_PLL_TYPE_HUAYRA,
.pll = &ipq_pll_huayra,
.pll_config = &ipq9574_pll_config,
@@ -203,7 +197,8 @@ static int apss_ipq_pll_probe(struct platform_device *pdev)
if (data->pll_type == CLK_ALPHA_PLL_TYPE_HUAYRA)
clk_alpha_pll_configure(data->pll, regmap, data->pll_config);
- else if (data->pll_type == CLK_ALPHA_PLL_TYPE_STROMER_PLUS)
+ else if (data->pll_type == CLK_ALPHA_PLL_TYPE_STROMER ||
+ data->pll_type == CLK_ALPHA_PLL_TYPE_STROMER_PLUS)
clk_stromer_pll_configure(data->pll, regmap, data->pll_config);
ret = devm_clk_register_regmap(dev, &data->pll->clkr);
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 8a412ef47e16..d4227909d1fe 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -83,6 +83,16 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL_U] = 0x20,
[PLL_OFF_STATUS] = 0x24,
},
+ [CLK_ALPHA_PLL_TYPE_HUAYRA_APSS] = {
+ [PLL_OFF_L_VAL] = 0x08,
+ [PLL_OFF_ALPHA_VAL] = 0x10,
+ [PLL_OFF_USER_CTL] = 0x18,
+ [PLL_OFF_CONFIG_CTL] = 0x20,
+ [PLL_OFF_CONFIG_CTL_U] = 0x24,
+ [PLL_OFF_STATUS] = 0x28,
+ [PLL_OFF_TEST_CTL] = 0x30,
+ [PLL_OFF_TEST_CTL_U] = 0x34,
+ },
[CLK_ALPHA_PLL_TYPE_BRAMMO] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
@@ -213,10 +223,9 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_USER_CTL] = 0x18,
[PLL_OFF_USER_CTL_U] = 0x1c,
[PLL_OFF_CONFIG_CTL] = 0x20,
- [PLL_OFF_CONFIG_CTL_U] = 0xff,
+ [PLL_OFF_STATUS] = 0x28,
[PLL_OFF_TEST_CTL] = 0x30,
[PLL_OFF_TEST_CTL_U] = 0x34,
- [PLL_OFF_STATUS] = 0x28,
},
[CLK_ALPHA_PLL_TYPE_STROMER_PLUS] = {
[PLL_OFF_L_VAL] = 0x04,
@@ -2114,6 +2123,15 @@ void clk_lucid_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regma
{
u32 lval = config->l;
+ /*
+ * If the bootloader left the PLL enabled it's likely that there are
+ * RCGs that will lock up if we disable the PLL below.
+ */
+ if (trion_pll_is_enabled(pll, regmap)) {
+ pr_debug("Lucid Evo PLL is already enabled, skipping configuration\n");
+ return;
+ }
+
lval |= TRION_PLL_CAL_VAL << LUCID_EVO_PLL_CAL_L_VAL_SHIFT;
clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), lval);
clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
@@ -2490,6 +2508,8 @@ static int clk_alpha_pll_stromer_set_rate(struct clk_hw *hw, unsigned long rate,
rate = alpha_pll_round_rate(rate, prate, &l, &a, ALPHA_REG_BITWIDTH);
regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+
+ a <<= ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH;
regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
a >> ALPHA_BITWIDTH);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index fb6d50263bb9..c7055b6c42f1 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -15,6 +15,7 @@
enum {
CLK_ALPHA_PLL_TYPE_DEFAULT,
CLK_ALPHA_PLL_TYPE_HUAYRA,
+ CLK_ALPHA_PLL_TYPE_HUAYRA_APSS,
CLK_ALPHA_PLL_TYPE_BRAMMO,
CLK_ALPHA_PLL_TYPE_FABIA,
CLK_ALPHA_PLL_TYPE_TRION,
@@ -73,8 +74,10 @@ struct pll_vco {
/**
* struct clk_alpha_pll - phase locked loop (PLL)
* @offset: base address of registers
- * @vco_table: array of VCO settings
* @regs: alpha pll register map (see @clk_alpha_pll_regs)
+ * @vco_table: array of VCO settings
+ * @num_vco: number of VCO settings in @vco_table
+ * @flags: bitmask to indicate features supported by the hardware
* @clkr: regmap clock handle
*/
struct clk_alpha_pll {
diff --git a/drivers/clk/qcom/clk-cbf-8996.c b/drivers/clk/qcom/clk-cbf-8996.c
index fe24b4abeab4..76bf523431b8 100644
--- a/drivers/clk/qcom/clk-cbf-8996.c
+++ b/drivers/clk/qcom/clk-cbf-8996.c
@@ -41,17 +41,6 @@ enum {
#define CBF_PLL_OFFSET 0xf000
-static const u8 cbf_pll_regs[PLL_OFF_MAX_REGS] = {
- [PLL_OFF_L_VAL] = 0x08,
- [PLL_OFF_ALPHA_VAL] = 0x10,
- [PLL_OFF_USER_CTL] = 0x18,
- [PLL_OFF_CONFIG_CTL] = 0x20,
- [PLL_OFF_CONFIG_CTL_U] = 0x24,
- [PLL_OFF_TEST_CTL] = 0x30,
- [PLL_OFF_TEST_CTL_U] = 0x34,
- [PLL_OFF_STATUS] = 0x28,
-};
-
static struct alpha_pll_config cbfpll_config = {
.l = 72,
.config_ctl_val = 0x200d4828,
@@ -67,7 +56,7 @@ static struct alpha_pll_config cbfpll_config = {
static struct clk_alpha_pll cbf_pll = {
.offset = CBF_PLL_OFFSET,
- .regs = cbf_pll_regs,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_HUAYRA_APSS],
.flags = SUPPORTS_DYNAMIC_UPDATE | SUPPORTS_FSM_MODE,
.clkr.hw.init = &(struct clk_init_data){
.name = "cbf_pll",
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index e6d84c8c7989..d7414361e432 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -17,6 +17,23 @@ struct freq_tbl {
u16 n;
};
+#define C(s, h, m, n) { (s), (2 * (h) - 1), (m), (n) }
+#define FM(f, confs) { (f), ARRAY_SIZE(confs), (confs) }
+#define FMS(f, s, h, m, n) { (f), 1, (const struct freq_conf []){ C(s, h, m, n) } }
+
+struct freq_conf {
+ u8 src;
+ u8 pre_div;
+ u16 m;
+ u16 n;
+};
+
+struct freq_multi_tbl {
+ unsigned long freq;
+ size_t num_confs;
+ const struct freq_conf *confs;
+};
+
/**
* struct mn - M/N:D counter
* @mnctr_en_bit: bit to enable mn counter
@@ -138,6 +155,7 @@ extern const struct clk_ops clk_dyn_rcg_ops;
* @safe_src_index: safe src index value
* @parent_map: map from software's parent index to hardware's src_sel field
* @freq_tbl: frequency table
+ * @freq_multi_tbl: frequency table for clocks reachable with multiple RCGs conf
* @clkr: regmap clock handle
* @cfg_off: defines the cfg register offset from the CMD_RCGR + CFG_REG
* @parked_cfg: cached value of the CFG register for parked RCGs
@@ -149,7 +167,10 @@ struct clk_rcg2 {
u8 hid_width;
u8 safe_src_index;
const struct parent_map *parent_map;
- const struct freq_tbl *freq_tbl;
+ union {
+ const struct freq_tbl *freq_tbl;
+ const struct freq_multi_tbl *freq_multi_tbl;
+ };
struct clk_regmap clkr;
u8 cfg_off;
u32 parked_cfg;
@@ -169,6 +190,7 @@ struct clk_rcg2_gfx3d {
extern const struct clk_ops clk_rcg2_ops;
extern const struct clk_ops clk_rcg2_floor_ops;
+extern const struct clk_ops clk_rcg2_fm_ops;
extern const struct clk_ops clk_rcg2_mux_closest_ops;
extern const struct clk_ops clk_edp_pixel_ops;
extern const struct clk_ops clk_byte_ops;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 5183c74b074f..9b3aaa7f20ac 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -260,6 +260,115 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
return 0;
}
+static const struct freq_conf *
+__clk_rcg2_select_conf(struct clk_hw *hw, const struct freq_multi_tbl *f,
+ unsigned long req_rate)
+{
+ unsigned long rate_diff, best_rate_diff = ULONG_MAX;
+ const struct freq_conf *conf, *best_conf = NULL;
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const char *name = clk_hw_get_name(hw);
+ unsigned long parent_rate, rate;
+ struct clk_hw *p;
+ int index, i;
+
+ /* Exit early if only one config is defined */
+ if (f->num_confs == 1) {
+ best_conf = f->confs;
+ goto exit;
+ }
+
+ /* Search in each provided config the one that is near the wanted rate */
+ for (i = 0, conf = f->confs; i < f->num_confs; i++, conf++) {
+ index = qcom_find_src_index(hw, rcg->parent_map, conf->src);
+ if (index < 0)
+ continue;
+
+ p = clk_hw_get_parent_by_index(hw, index);
+ if (!p)
+ continue;
+
+ parent_rate = clk_hw_get_rate(p);
+ rate = calc_rate(parent_rate, conf->n, conf->m, conf->n, conf->pre_div);
+
+ if (rate == req_rate) {
+ best_conf = conf;
+ goto exit;
+ }
+
+ rate_diff = abs_diff(req_rate, rate);
+ if (rate_diff < best_rate_diff) {
+ best_rate_diff = rate_diff;
+ best_conf = conf;
+ }
+ }
+
+ /*
+ * Very unlikely. Warn if we couldn't find a correct config
+ * due to parent not found in every config.
+ */
+ if (unlikely(!best_conf)) {
+ WARN(1, "%s: can't find a configuration for rate %lu\n",
+ name, req_rate);
+ return ERR_PTR(-EINVAL);
+ }
+
+exit:
+ return best_conf;
+}
+
+static int _freq_tbl_fm_determine_rate(struct clk_hw *hw, const struct freq_multi_tbl *f,
+ struct clk_rate_request *req)
+{
+ unsigned long clk_flags, rate = req->rate;
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const struct freq_conf *conf;
+ struct clk_hw *p;
+ int index;
+
+ f = qcom_find_freq_multi(f, rate);
+ if (!f || !f->confs)
+ return -EINVAL;
+
+ conf = __clk_rcg2_select_conf(hw, f, rate);
+ if (IS_ERR(conf))
+ return PTR_ERR(conf);
+ index = qcom_find_src_index(hw, rcg->parent_map, conf->src);
+ if (index < 0)
+ return index;
+
+ clk_flags = clk_hw_get_flags(hw);
+ p = clk_hw_get_parent_by_index(hw, index);
+ if (!p)
+ return -EINVAL;
+
+ if (clk_flags & CLK_SET_RATE_PARENT) {
+ rate = f->freq;
+ if (conf->pre_div) {
+ if (!rate)
+ rate = req->rate;
+ rate /= 2;
+ rate *= conf->pre_div + 1;
+ }
+
+ if (conf->n) {
+ u64 tmp = rate;
+
+ tmp = tmp * conf->n;
+ do_div(tmp, conf->m);
+ rate = tmp;
+ }
+ } else {
+ rate = clk_hw_get_rate(p);
+ }
+
+ req->best_parent_hw = p;
+ req->best_parent_rate = rate;
+ req->rate = f->freq;
+
+ return 0;
+}
+
static int clk_rcg2_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
@@ -276,6 +385,14 @@ static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
}
+static int clk_rcg2_fm_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ return _freq_tbl_fm_determine_rate(hw, rcg->freq_multi_tbl, req);
+}
+
static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
u32 *_cfg)
{
@@ -371,6 +488,30 @@ static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
return clk_rcg2_configure(rcg, f);
}
+static int __clk_rcg2_fm_set_rate(struct clk_hw *hw, unsigned long rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const struct freq_multi_tbl *f;
+ const struct freq_conf *conf;
+ struct freq_tbl f_tbl = {};
+
+ f = qcom_find_freq_multi(rcg->freq_multi_tbl, rate);
+ if (!f || !f->confs)
+ return -EINVAL;
+
+ conf = __clk_rcg2_select_conf(hw, f, rate);
+ if (IS_ERR(conf))
+ return PTR_ERR(conf);
+
+ f_tbl.freq = f->freq;
+ f_tbl.src = conf->src;
+ f_tbl.pre_div = conf->pre_div;
+ f_tbl.m = conf->m;
+ f_tbl.n = conf->n;
+
+ return clk_rcg2_configure(rcg, &f_tbl);
+}
+
static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@@ -383,6 +524,12 @@ static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
return __clk_rcg2_set_rate(hw, rate, FLOOR);
}
+static int clk_rcg2_fm_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_rcg2_fm_set_rate(hw, rate);
+}
+
static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
unsigned long rate, unsigned long parent_rate, u8 index)
{
@@ -395,6 +542,12 @@ static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
return __clk_rcg2_set_rate(hw, rate, FLOOR);
}
+static int clk_rcg2_fm_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return __clk_rcg2_fm_set_rate(hw, rate);
+}
+
static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -505,6 +658,19 @@ const struct clk_ops clk_rcg2_floor_ops = {
};
EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
+const struct clk_ops clk_rcg2_fm_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .determine_rate = clk_rcg2_fm_determine_rate,
+ .set_rate = clk_rcg2_fm_set_rate,
+ .set_rate_and_parent = clk_rcg2_fm_set_rate_and_parent,
+ .get_duty_cycle = clk_rcg2_get_duty_cycle,
+ .set_duty_cycle = clk_rcg2_set_duty_cycle,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_fm_ops);
+
const struct clk_ops clk_rcg2_mux_closest_ops = {
.determine_rate = __clk_mux_determine_rate_closest,
.get_parent = clk_rcg2_get_parent,
diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c
index 745026ef4d9c..9da034f8f2ff 100644
--- a/drivers/clk/qcom/clk-rpm.c
+++ b/drivers/clk/qcom/clk-rpm.c
@@ -98,7 +98,6 @@ struct clk_rpm {
};
struct rpm_cc {
- struct qcom_rpm *rpm;
struct clk_rpm **clks;
size_t num_clks;
u32 xo_buffer_value;
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 8602c02047d0..45c5255bcd11 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -768,6 +768,7 @@ static struct clk_smd_rpm *msm8976_clks[] = {
static const struct rpm_smd_clk_desc rpm_clk_msm8976 = {
.clks = msm8976_clks,
+ .num_clks = ARRAY_SIZE(msm8976_clks),
.icc_clks = bimc_pcnoc_snoc_smmnoc_icc_clks,
.num_icc_clks = ARRAY_SIZE(bimc_pcnoc_snoc_smmnoc_icc_clks),
};
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 75f09e6e057e..48f81e3a5e80 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -41,6 +41,24 @@ struct freq_tbl *qcom_find_freq(const struct freq_tbl *f, unsigned long rate)
}
EXPORT_SYMBOL_GPL(qcom_find_freq);
+const struct freq_multi_tbl *qcom_find_freq_multi(const struct freq_multi_tbl *f,
+ unsigned long rate)
+{
+ if (!f)
+ return NULL;
+
+ if (!f->freq)
+ return f;
+
+ for (; f->freq; f++)
+ if (rate <= f->freq)
+ return f;
+
+ /* Default to our fastest rate */
+ return f - 1;
+}
+EXPORT_SYMBOL_GPL(qcom_find_freq_multi);
+
const struct freq_tbl *qcom_find_freq_floor(const struct freq_tbl *f,
unsigned long rate)
{
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index 9c8f7b798d9f..2d4a8a837e6c 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -45,6 +45,8 @@ extern const struct freq_tbl *qcom_find_freq(const struct freq_tbl *f,
unsigned long rate);
extern const struct freq_tbl *qcom_find_freq_floor(const struct freq_tbl *f,
unsigned long rate);
+extern const struct freq_multi_tbl *qcom_find_freq_multi(const struct freq_multi_tbl *f,
+ unsigned long rate);
extern void
qcom_pll_set_fsm_mode(struct regmap *m, u32 reg, u8 bias_count, u8 lock_count);
extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map,
diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c
index 839435362010..e4b7464c4d0e 100644
--- a/drivers/clk/qcom/dispcc-sm6350.c
+++ b/drivers/clk/qcom/dispcc-sm6350.c
@@ -221,26 +221,17 @@ static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = {
},
};
-static const struct freq_tbl ftbl_disp_cc_mdss_dp_link_clk_src[] = {
- F(162000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(270000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(540000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(810000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
- { }
-};
-
static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = {
.cmd_rcgr = 0x10f8,
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_0,
- .freq_tbl = ftbl_disp_cc_mdss_dp_link_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "disp_cc_mdss_dp_link_clk_src",
.parent_data = disp_cc_parent_data_0,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
diff --git a/drivers/clk/qcom/dispcc-sm8450.c b/drivers/clk/qcom/dispcc-sm8450.c
index 92e9c4e7b13d..49bb4f58c391 100644
--- a/drivers/clk/qcom/dispcc-sm8450.c
+++ b/drivers/clk/qcom/dispcc-sm8450.c
@@ -309,26 +309,17 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
},
};
-static const struct freq_tbl ftbl_disp_cc_mdss_dptx0_link_clk_src[] = {
- F(162000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(270000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(540000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(810000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
- { }
-};
-
static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
.cmd_rcgr = 0x819c,
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
.clkr.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
@@ -382,13 +373,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
.clkr.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
@@ -442,13 +432,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
.clkr.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
@@ -502,13 +491,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
.clkr.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
diff --git a/drivers/clk/qcom/dispcc-sm8550.c b/drivers/clk/qcom/dispcc-sm8550.c
index 3672c73ac11c..38ecea805503 100644
--- a/drivers/clk/qcom/dispcc-sm8550.c
+++ b/drivers/clk/qcom/dispcc-sm8550.c
@@ -345,26 +345,17 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
},
};
-static const struct freq_tbl ftbl_disp_cc_mdss_dptx0_link_clk_src[] = {
- F(162000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(270000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(540000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(810000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
- { }
-};
-
static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
.cmd_rcgr = 0x8170,
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_7,
- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
.clkr.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_link_clk_src",
.parent_data = disp_cc_parent_data_7,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_7),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
@@ -418,13 +409,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
.clkr.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
@@ -478,13 +468,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
.clkr.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
@@ -538,13 +527,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
.clkr.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
diff --git a/drivers/clk/qcom/dispcc-sm8650.c b/drivers/clk/qcom/dispcc-sm8650.c
index 9539db0d9114..3eb64bcad487 100644
--- a/drivers/clk/qcom/dispcc-sm8650.c
+++ b/drivers/clk/qcom/dispcc-sm8650.c
@@ -343,26 +343,17 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
},
};
-static const struct freq_tbl ftbl_disp_cc_mdss_dptx0_link_clk_src[] = {
- F(162000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(270000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(540000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(810000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
- { }
-};
-
static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
.cmd_rcgr = 0x8170,
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_7,
- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_link_clk_src",
.parent_data = disp_cc_parent_data_7,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_7),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
@@ -416,13 +407,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
@@ -476,13 +466,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
@@ -536,13 +525,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
index 7bc679871f32..d2be56c5892d 100644
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -1677,15 +1677,23 @@ static struct clk_regmap_div nss_port4_tx_div_clk_src = {
},
};
-static const struct freq_tbl ftbl_nss_port5_rx_clk_src[] = {
- F(19200000, P_XO, 1, 0, 0),
- F(25000000, P_UNIPHY1_RX, 12.5, 0, 0),
- F(25000000, P_UNIPHY0_RX, 5, 0, 0),
- F(78125000, P_UNIPHY1_RX, 4, 0, 0),
- F(125000000, P_UNIPHY1_RX, 2.5, 0, 0),
- F(125000000, P_UNIPHY0_RX, 1, 0, 0),
- F(156250000, P_UNIPHY1_RX, 2, 0, 0),
- F(312500000, P_UNIPHY1_RX, 1, 0, 0),
+static const struct freq_conf ftbl_nss_port5_rx_clk_src_25[] = {
+ C(P_UNIPHY1_RX, 12.5, 0, 0),
+ C(P_UNIPHY0_RX, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_port5_rx_clk_src_125[] = {
+ C(P_UNIPHY1_RX, 2.5, 0, 0),
+ C(P_UNIPHY0_RX, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_port5_rx_clk_src[] = {
+ FMS(19200000, P_XO, 1, 0, 0),
+ FM(25000000, ftbl_nss_port5_rx_clk_src_25),
+ FMS(78125000, P_UNIPHY1_RX, 4, 0, 0),
+ FM(125000000, ftbl_nss_port5_rx_clk_src_125),
+ FMS(156250000, P_UNIPHY1_RX, 2, 0, 0),
+ FMS(312500000, P_UNIPHY1_RX, 1, 0, 0),
{ }
};
@@ -1712,14 +1720,14 @@ gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias_map[] = {
static struct clk_rcg2 nss_port5_rx_clk_src = {
.cmd_rcgr = 0x68060,
- .freq_tbl = ftbl_nss_port5_rx_clk_src,
+ .freq_multi_tbl = ftbl_nss_port5_rx_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "nss_port5_rx_clk_src",
.parent_data = gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias,
.num_parents = ARRAY_SIZE(gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_fm_ops,
},
};
@@ -1739,15 +1747,23 @@ static struct clk_regmap_div nss_port5_rx_div_clk_src = {
},
};
-static const struct freq_tbl ftbl_nss_port5_tx_clk_src[] = {
- F(19200000, P_XO, 1, 0, 0),
- F(25000000, P_UNIPHY1_TX, 12.5, 0, 0),
- F(25000000, P_UNIPHY0_TX, 5, 0, 0),
- F(78125000, P_UNIPHY1_TX, 4, 0, 0),
- F(125000000, P_UNIPHY1_TX, 2.5, 0, 0),
- F(125000000, P_UNIPHY0_TX, 1, 0, 0),
- F(156250000, P_UNIPHY1_TX, 2, 0, 0),
- F(312500000, P_UNIPHY1_TX, 1, 0, 0),
+static const struct freq_conf ftbl_nss_port5_tx_clk_src_25[] = {
+ C(P_UNIPHY1_TX, 12.5, 0, 0),
+ C(P_UNIPHY0_TX, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_port5_tx_clk_src_125[] = {
+ C(P_UNIPHY1_TX, 2.5, 0, 0),
+ C(P_UNIPHY0_TX, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_port5_tx_clk_src[] = {
+ FMS(19200000, P_XO, 1, 0, 0),
+ FM(25000000, ftbl_nss_port5_tx_clk_src_25),
+ FMS(78125000, P_UNIPHY1_TX, 4, 0, 0),
+ FM(125000000, ftbl_nss_port5_tx_clk_src_125),
+ FMS(156250000, P_UNIPHY1_TX, 2, 0, 0),
+ FMS(312500000, P_UNIPHY1_TX, 1, 0, 0),
{ }
};
@@ -1774,14 +1790,14 @@ gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias_map[] = {
static struct clk_rcg2 nss_port5_tx_clk_src = {
.cmd_rcgr = 0x68068,
- .freq_tbl = ftbl_nss_port5_tx_clk_src,
+ .freq_multi_tbl = ftbl_nss_port5_tx_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "nss_port5_tx_clk_src",
.parent_data = gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias,
.num_parents = ARRAY_SIZE(gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_fm_ops,
},
};
@@ -1801,15 +1817,23 @@ static struct clk_regmap_div nss_port5_tx_div_clk_src = {
},
};
-static const struct freq_tbl ftbl_nss_port6_rx_clk_src[] = {
- F(19200000, P_XO, 1, 0, 0),
- F(25000000, P_UNIPHY2_RX, 5, 0, 0),
- F(25000000, P_UNIPHY2_RX, 12.5, 0, 0),
- F(78125000, P_UNIPHY2_RX, 4, 0, 0),
- F(125000000, P_UNIPHY2_RX, 1, 0, 0),
- F(125000000, P_UNIPHY2_RX, 2.5, 0, 0),
- F(156250000, P_UNIPHY2_RX, 2, 0, 0),
- F(312500000, P_UNIPHY2_RX, 1, 0, 0),
+static const struct freq_conf ftbl_nss_port6_rx_clk_src_25[] = {
+ C(P_UNIPHY2_RX, 5, 0, 0),
+ C(P_UNIPHY2_RX, 12.5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_port6_rx_clk_src_125[] = {
+ C(P_UNIPHY2_RX, 1, 0, 0),
+ C(P_UNIPHY2_RX, 2.5, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_port6_rx_clk_src[] = {
+ FMS(19200000, P_XO, 1, 0, 0),
+ FM(25000000, ftbl_nss_port6_rx_clk_src_25),
+ FMS(78125000, P_UNIPHY2_RX, 4, 0, 0),
+ FM(125000000, ftbl_nss_port6_rx_clk_src_125),
+ FMS(156250000, P_UNIPHY2_RX, 2, 0, 0),
+ FMS(312500000, P_UNIPHY2_RX, 1, 0, 0),
{ }
};
@@ -1831,14 +1855,14 @@ static const struct parent_map gcc_xo_uniphy2_rx_tx_ubi32_bias_map[] = {
static struct clk_rcg2 nss_port6_rx_clk_src = {
.cmd_rcgr = 0x68070,
- .freq_tbl = ftbl_nss_port6_rx_clk_src,
+ .freq_multi_tbl = ftbl_nss_port6_rx_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_uniphy2_rx_tx_ubi32_bias_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "nss_port6_rx_clk_src",
.parent_data = gcc_xo_uniphy2_rx_tx_ubi32_bias,
.num_parents = ARRAY_SIZE(gcc_xo_uniphy2_rx_tx_ubi32_bias),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_fm_ops,
},
};
@@ -1858,15 +1882,23 @@ static struct clk_regmap_div nss_port6_rx_div_clk_src = {
},
};
-static const struct freq_tbl ftbl_nss_port6_tx_clk_src[] = {
- F(19200000, P_XO, 1, 0, 0),
- F(25000000, P_UNIPHY2_TX, 5, 0, 0),
- F(25000000, P_UNIPHY2_TX, 12.5, 0, 0),
- F(78125000, P_UNIPHY2_TX, 4, 0, 0),
- F(125000000, P_UNIPHY2_TX, 1, 0, 0),
- F(125000000, P_UNIPHY2_TX, 2.5, 0, 0),
- F(156250000, P_UNIPHY2_TX, 2, 0, 0),
- F(312500000, P_UNIPHY2_TX, 1, 0, 0),
+static const struct freq_conf ftbl_nss_port6_tx_clk_src_25[] = {
+ C(P_UNIPHY2_TX, 5, 0, 0),
+ C(P_UNIPHY2_TX, 12.5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_port6_tx_clk_src_125[] = {
+ C(P_UNIPHY2_TX, 1, 0, 0),
+ C(P_UNIPHY2_TX, 2.5, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_port6_tx_clk_src[] = {
+ FMS(19200000, P_XO, 1, 0, 0),
+ FM(25000000, ftbl_nss_port6_tx_clk_src_25),
+ FMS(78125000, P_UNIPHY1_RX, 4, 0, 0),
+ FM(125000000, ftbl_nss_port6_tx_clk_src_125),
+ FMS(156250000, P_UNIPHY1_RX, 2, 0, 0),
+ FMS(312500000, P_UNIPHY1_RX, 1, 0, 0),
{ }
};
@@ -1888,14 +1920,14 @@ static const struct parent_map gcc_xo_uniphy2_tx_rx_ubi32_bias_map[] = {
static struct clk_rcg2 nss_port6_tx_clk_src = {
.cmd_rcgr = 0x68078,
- .freq_tbl = ftbl_nss_port6_tx_clk_src,
+ .freq_multi_tbl = ftbl_nss_port6_tx_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_uniphy2_tx_rx_ubi32_bias_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "nss_port6_tx_clk_src",
.parent_data = gcc_xo_uniphy2_tx_rx_ubi32_bias,
.num_parents = ARRAY_SIZE(gcc_xo_uniphy2_tx_rx_ubi32_bias),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_fm_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-msm8917.c b/drivers/clk/qcom/gcc-msm8917.c
index f2dd132e2fb1..f2b8729e4198 100644
--- a/drivers/clk/qcom/gcc-msm8917.c
+++ b/drivers/clk/qcom/gcc-msm8917.c
@@ -3278,6 +3278,7 @@ static const struct of_device_id gcc_msm8917_match_table[] = {
{ .compatible = "qcom,gcc-qm215", .data = &gcc_qm215_desc },
{},
};
+MODULE_DEVICE_TABLE(of, gcc_msm8917_match_table);
static struct platform_driver gcc_msm8917_driver = {
.probe = gcc_msm8917_probe,
diff --git a/drivers/clk/qcom/gcc-msm8953.c b/drivers/clk/qcom/gcc-msm8953.c
index 68359534ff25..7563bff58118 100644
--- a/drivers/clk/qcom/gcc-msm8953.c
+++ b/drivers/clk/qcom/gcc-msm8953.c
@@ -4227,6 +4227,7 @@ static const struct of_device_id gcc_msm8953_match_table[] = {
{ .compatible = "qcom,gcc-msm8953" },
{},
};
+MODULE_DEVICE_TABLE(of, gcc_msm8953_match_table);
static struct platform_driver gcc_msm8953_driver = {
.probe = gcc_msm8953_probe,
diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c
index a47ef9dfa808..1f748141d12c 100644
--- a/drivers/clk/qcom/gcc-sm8150.c
+++ b/drivers/clk/qcom/gcc-sm8150.c
@@ -207,28 +207,6 @@ static const struct clk_parent_data gcc_parents_7[] = {
{ .hw = &gpll0_out_even.clkr.hw },
};
-static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
- F(19200000, P_BI_TCXO, 1, 0, 0),
- F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
- F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
- { }
-};
-
-static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
- .cmd_rcgr = 0x48014,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = gcc_parent_map_0,
- .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_cpuss_ahb_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
-};
-
static const struct freq_tbl ftbl_gcc_emac_ptp_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
@@ -1361,24 +1339,6 @@ static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
},
};
-static struct clk_branch gcc_cpuss_ahb_clk = {
- .halt_reg = 0x48000,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x52004,
- .enable_mask = BIT(21),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_cpuss_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){
- &gcc_cpuss_ahb_clk_src.clkr.hw },
- .num_parents = 1,
- /* required for cpuss */
- .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_cpuss_dvm_bus_clk = {
.halt_reg = 0x48190,
.halt_check = BRANCH_HALT,
@@ -2685,24 +2645,6 @@ static struct clk_branch gcc_sdcc4_apps_clk = {
},
};
-static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
- .halt_reg = 0x4819c,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x52004,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_sys_noc_cpuss_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){
- &gcc_cpuss_ahb_clk_src.clkr.hw },
- .num_parents = 1,
- /* required for cpuss */
- .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_tsif_ahb_clk = {
.halt_reg = 0x36004,
.halt_check = BRANCH_HALT,
@@ -3550,8 +3492,6 @@ static struct clk_regmap *gcc_sm8150_clocks[] = {
[GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
[GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
[GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
- [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
- [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
[GCC_CPUSS_DVM_BUS_CLK] = &gcc_cpuss_dvm_bus_clk.clkr,
[GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr,
[GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr,
@@ -3669,7 +3609,6 @@ static struct clk_regmap *gcc_sm8150_clocks[] = {
[GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
[GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
[GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
- [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
[GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr,
[GCC_TSIF_INACTIVITY_TIMERS_CLK] = &gcc_tsif_inactivity_timers_clk.clkr,
[GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index e7a4068b9f39..df9618ab7eea 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -487,9 +487,14 @@ int gdsc_register(struct gdsc_desc *desc,
if (!scs[i] || !scs[i]->supply)
continue;
- scs[i]->rsupply = devm_regulator_get(dev, scs[i]->supply);
- if (IS_ERR(scs[i]->rsupply))
- return PTR_ERR(scs[i]->rsupply);
+ scs[i]->rsupply = devm_regulator_get_optional(dev, scs[i]->supply);
+ if (IS_ERR(scs[i]->rsupply)) {
+ ret = PTR_ERR(scs[i]->rsupply);
+ if (ret != -ENODEV)
+ return ret;
+
+ scs[i]->rsupply = NULL;
+ }
}
data->num_domains = num;
diff --git a/drivers/clk/qcom/hfpll.c b/drivers/clk/qcom/hfpll.c
index dac27e31ef60..b0b0cb074b4a 100644
--- a/drivers/clk/qcom/hfpll.c
+++ b/drivers/clk/qcom/hfpll.c
@@ -14,7 +14,7 @@
#include "clk-regmap.h"
#include "clk-hfpll.h"
-static const struct hfpll_data hdata = {
+static const struct hfpll_data qcs404 = {
.mode_reg = 0x00,
.l_reg = 0x04,
.m_reg = 0x08,
@@ -84,10 +84,12 @@ static const struct hfpll_data msm8976_cci = {
};
static const struct of_device_id qcom_hfpll_match_table[] = {
- { .compatible = "qcom,hfpll", .data = &hdata },
{ .compatible = "qcom,msm8976-hfpll-a53", .data = &msm8976_a53 },
{ .compatible = "qcom,msm8976-hfpll-a72", .data = &msm8976_a72 },
{ .compatible = "qcom,msm8976-hfpll-cci", .data = &msm8976_cci },
+ { .compatible = "qcom,qcs404-hfpll", .data = &qcs404 },
+ /* Deprecated in bindings */
+ { .compatible = "qcom,hfpll", .data = &qcs404 },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_hfpll_match_table);
diff --git a/drivers/clk/qcom/mmcc-msm8998.c b/drivers/clk/qcom/mmcc-msm8998.c
index 1180e48c687a..275fb3b71ede 100644
--- a/drivers/clk/qcom/mmcc-msm8998.c
+++ b/drivers/clk/qcom/mmcc-msm8998.c
@@ -2535,6 +2535,8 @@ static struct clk_branch vmem_ahb_clk = {
static struct gdsc video_top_gdsc = {
.gdscr = 0x1024,
+ .cxcs = (unsigned int []){ 0x1028, 0x1034, 0x1038 },
+ .cxc_count = 3,
.pd = {
.name = "video_top",
},
@@ -2543,20 +2545,26 @@ static struct gdsc video_top_gdsc = {
static struct gdsc video_subcore0_gdsc = {
.gdscr = 0x1040,
+ .cxcs = (unsigned int []){ 0x1048 },
+ .cxc_count = 1,
.pd = {
.name = "video_subcore0",
},
.parent = &video_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL,
};
static struct gdsc video_subcore1_gdsc = {
.gdscr = 0x1044,
+ .cxcs = (unsigned int []){ 0x104c },
+ .cxc_count = 1,
.pd = {
.name = "video_subcore1",
},
.parent = &video_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL,
};
static struct gdsc mdss_gdsc = {
diff --git a/drivers/clk/renesas/clk-r8a73a4.c b/drivers/clk/renesas/clk-r8a73a4.c
index f45c2c45808b..4b1815147f77 100644
--- a/drivers/clk/renesas/clk-r8a73a4.c
+++ b/drivers/clk/renesas/clk-r8a73a4.c
@@ -30,8 +30,6 @@ struct r8a73a4_cpg {
#define CPG_PLL2HCR 0xe4
#define CPG_PLL2SCR 0xf4
-#define CLK_ENABLE_ON_INIT BIT(0)
-
struct div4_clk {
const char *name;
unsigned int reg;
diff --git a/drivers/clk/renesas/clk-r8a7740.c b/drivers/clk/renesas/clk-r8a7740.c
index 3ee3f57e4e9a..22e9be7240bb 100644
--- a/drivers/clk/renesas/clk-r8a7740.c
+++ b/drivers/clk/renesas/clk-r8a7740.c
@@ -26,28 +26,25 @@ struct r8a7740_cpg {
#define CPG_USBCKCR 0x8c
#define CPG_FRQCRC 0xe0
-#define CLK_ENABLE_ON_INIT BIT(0)
-
struct div4_clk {
const char *name;
unsigned int reg;
unsigned int shift;
- int flags;
};
static struct div4_clk div4_clks[] = {
- { "i", CPG_FRQCRA, 20, CLK_ENABLE_ON_INIT },
- { "zg", CPG_FRQCRA, 16, CLK_ENABLE_ON_INIT },
- { "b", CPG_FRQCRA, 8, CLK_ENABLE_ON_INIT },
- { "m1", CPG_FRQCRA, 4, CLK_ENABLE_ON_INIT },
- { "hp", CPG_FRQCRB, 4, 0 },
- { "hpp", CPG_FRQCRC, 20, 0 },
- { "usbp", CPG_FRQCRC, 16, 0 },
- { "s", CPG_FRQCRC, 12, 0 },
- { "zb", CPG_FRQCRC, 8, 0 },
- { "m3", CPG_FRQCRC, 4, 0 },
- { "cp", CPG_FRQCRC, 0, 0 },
- { NULL, 0, 0, 0 },
+ { "i", CPG_FRQCRA, 20 },
+ { "zg", CPG_FRQCRA, 16 },
+ { "b", CPG_FRQCRA, 8 },
+ { "m1", CPG_FRQCRA, 4 },
+ { "hp", CPG_FRQCRB, 4 },
+ { "hpp", CPG_FRQCRC, 20 },
+ { "usbp", CPG_FRQCRC, 16 },
+ { "s", CPG_FRQCRC, 12 },
+ { "zb", CPG_FRQCRC, 8 },
+ { "m3", CPG_FRQCRC, 4 },
+ { "cp", CPG_FRQCRC, 0 },
+ { NULL, 0, 0 },
};
static const struct clk_div_table div4_div_table[] = {
diff --git a/drivers/clk/renesas/clk-sh73a0.c b/drivers/clk/renesas/clk-sh73a0.c
index 8c51090f13e1..47fc99ccd283 100644
--- a/drivers/clk/renesas/clk-sh73a0.c
+++ b/drivers/clk/renesas/clk-sh73a0.c
@@ -34,8 +34,6 @@ struct sh73a0_cpg {
#define CPG_DSI0PHYCR 0x6c
#define CPG_DSI1PHYCR 0x70
-#define CLK_ENABLE_ON_INIT BIT(0)
-
struct div4_clk {
const char *name;
const char *parent;
diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
index 4c2872f45387..ff3f85e906fe 100644
--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
@@ -139,7 +139,7 @@ static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
DEF_MOD("avb3", 214, R8A779A0_CLK_S3D2),
DEF_MOD("avb4", 215, R8A779A0_CLK_S3D2),
DEF_MOD("avb5", 216, R8A779A0_CLK_S3D2),
- DEF_MOD("canfd0", 328, R8A779A0_CLK_CANFD),
+ DEF_MOD("canfd0", 328, R8A779A0_CLK_S3D2),
DEF_MOD("csi40", 331, R8A779A0_CLK_CSI0),
DEF_MOD("csi41", 400, R8A779A0_CLK_CSI0),
DEF_MOD("csi42", 401, R8A779A0_CLK_CSI0),
diff --git a/drivers/clk/renesas/r8a779h0-cpg-mssr.c b/drivers/clk/renesas/r8a779h0-cpg-mssr.c
index 71f67a1c86d8..079b55b30b23 100644
--- a/drivers/clk/renesas/r8a779h0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779h0-cpg-mssr.c
@@ -184,14 +184,35 @@ static const struct mssr_mod_clk r8a779h0_mod_clks[] = {
DEF_MOD("i2c1", 519, R8A779H0_CLK_S0D6_PER),
DEF_MOD("i2c2", 520, R8A779H0_CLK_S0D6_PER),
DEF_MOD("i2c3", 521, R8A779H0_CLK_S0D6_PER),
+ DEF_MOD("irqc", 611, R8A779H0_CLK_CL16M),
+ DEF_MOD("msi0", 618, R8A779H0_CLK_MSO),
+ DEF_MOD("msi1", 619, R8A779H0_CLK_MSO),
+ DEF_MOD("msi2", 620, R8A779H0_CLK_MSO),
+ DEF_MOD("msi3", 621, R8A779H0_CLK_MSO),
+ DEF_MOD("msi4", 622, R8A779H0_CLK_MSO),
+ DEF_MOD("msi5", 623, R8A779H0_CLK_MSO),
DEF_MOD("rpc-if", 629, R8A779H0_CLK_RPCD2),
+ DEF_MOD("scif0", 702, R8A779H0_CLK_SASYNCPERD4),
+ DEF_MOD("scif1", 703, R8A779H0_CLK_SASYNCPERD4),
+ DEF_MOD("scif3", 704, R8A779H0_CLK_SASYNCPERD4),
+ DEF_MOD("scif4", 705, R8A779H0_CLK_SASYNCPERD4),
DEF_MOD("sdhi0", 706, R8A779H0_CLK_SD0),
DEF_MOD("sydm1", 709, R8A779H0_CLK_S0D6_PER),
DEF_MOD("sydm2", 710, R8A779H0_CLK_S0D6_PER),
+ DEF_MOD("tmu0", 713, R8A779H0_CLK_SASYNCRT),
+ DEF_MOD("tmu1", 714, R8A779H0_CLK_SASYNCPERD2),
+ DEF_MOD("tmu2", 715, R8A779H0_CLK_SASYNCPERD2),
+ DEF_MOD("tmu3", 716, R8A779H0_CLK_SASYNCPERD2),
+ DEF_MOD("tmu4", 717, R8A779H0_CLK_SASYNCPERD2),
DEF_MOD("wdt1:wdt0", 907, R8A779H0_CLK_R),
+ DEF_MOD("cmt0", 910, R8A779H0_CLK_R),
+ DEF_MOD("cmt1", 911, R8A779H0_CLK_R),
+ DEF_MOD("cmt2", 912, R8A779H0_CLK_R),
+ DEF_MOD("cmt3", 913, R8A779H0_CLK_R),
DEF_MOD("pfc0", 915, R8A779H0_CLK_CP),
DEF_MOD("pfc1", 916, R8A779H0_CLK_CP),
DEF_MOD("pfc2", 917, R8A779H0_CLK_CP),
+ DEF_MOD("tsc2:tsc1", 919, R8A779H0_CLK_CL16M),
};
/*
diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c
index 33532673d25d..16acc95f3c62 100644
--- a/drivers/clk/renesas/r9a07g043-cpg.c
+++ b/drivers/clk/renesas/r9a07g043-cpg.c
@@ -149,7 +149,7 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
#endif
};
-static struct rzg2l_mod_clk r9a07g043_mod_clks[] = {
+static const struct rzg2l_mod_clk r9a07g043_mod_clks[] = {
#ifdef CONFIG_ARM64
DEF_MOD("gic", R9A07G043_GIC600_GICCLK, R9A07G043_CLK_P1,
0x514, 0),
@@ -280,9 +280,13 @@ static struct rzg2l_mod_clk r9a07g043_mod_clks[] = {
0x5a8, 1),
DEF_MOD("tsu_pclk", R9A07G043_TSU_PCLK, R9A07G043_CLK_TSU,
0x5ac, 0),
+#ifdef CONFIG_RISCV
+ DEF_MOD("nceplic_aclk", R9A07G043_NCEPLIC_ACLK, R9A07G043_CLK_P1,
+ 0x608, 0),
+#endif
};
-static struct rzg2l_reset r9a07g043_resets[] = {
+static const struct rzg2l_reset r9a07g043_resets[] = {
#ifdef CONFIG_ARM64
DEF_RST(R9A07G043_GIC600_GICRESET_N, 0x814, 0),
DEF_RST(R9A07G043_GIC600_DBG_GICRESET_N, 0x814, 1),
@@ -338,6 +342,10 @@ static struct rzg2l_reset r9a07g043_resets[] = {
DEF_RST(R9A07G043_ADC_PRESETN, 0x8a8, 0),
DEF_RST(R9A07G043_ADC_ADRST_N, 0x8a8, 1),
DEF_RST(R9A07G043_TSU_PRESETN, 0x8ac, 0),
+#ifdef CONFIG_RISCV
+ DEF_RST(R9A07G043_NCEPLIC_ARESETN, 0x908, 0),
+#endif
+
};
static const unsigned int r9a07g043_crit_mod_clks[] __initconst = {
@@ -347,6 +355,7 @@ static const unsigned int r9a07g043_crit_mod_clks[] __initconst = {
#endif
#ifdef CONFIG_RISCV
MOD_CLK_BASE + R9A07G043_IAX45_CLK,
+ MOD_CLK_BASE + R9A07G043_NCEPLIC_ACLK,
#endif
MOD_CLK_BASE + R9A07G043_DMAC_ACLK,
};
diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c
index 48404cafea3f..f6df3f7a31b5 100644
--- a/drivers/clk/renesas/r9a07g044-cpg.c
+++ b/drivers/clk/renesas/r9a07g044-cpg.c
@@ -368,7 +368,7 @@ static const struct {
#endif
};
-static struct rzg2l_reset r9a07g044_resets[] = {
+static const struct rzg2l_reset r9a07g044_resets[] = {
DEF_RST(R9A07G044_GIC600_GICRESET_N, 0x814, 0),
DEF_RST(R9A07G044_GIC600_DBG_GICRESET_N, 0x814, 1),
DEF_RST(R9A07G044_IA55_RESETN, 0x818, 0),
diff --git a/drivers/clk/renesas/r9a08g045-cpg.c b/drivers/clk/renesas/r9a08g045-cpg.c
index c3e6da2de197..b068733b145f 100644
--- a/drivers/clk/renesas/r9a08g045-cpg.c
+++ b/drivers/clk/renesas/r9a08g045-cpg.c
@@ -240,6 +240,43 @@ static const unsigned int r9a08g045_crit_mod_clks[] __initconst = {
MOD_CLK_BASE + R9A08G045_DMAC_ACLK,
};
+static const struct rzg2l_cpg_pm_domain_init_data r9a08g045_pm_domains[] = {
+ /* Keep always-on domain on the first position for proper domains registration. */
+ DEF_PD("always-on", R9A08G045_PD_ALWAYS_ON,
+ DEF_REG_CONF(0, 0),
+ RZG2L_PD_F_ALWAYS_ON),
+ DEF_PD("gic", R9A08G045_PD_GIC,
+ DEF_REG_CONF(CPG_BUS_ACPU_MSTOP, BIT(3)),
+ RZG2L_PD_F_ALWAYS_ON),
+ DEF_PD("ia55", R9A08G045_PD_IA55,
+ DEF_REG_CONF(CPG_BUS_PERI_CPU_MSTOP, BIT(13)),
+ RZG2L_PD_F_ALWAYS_ON),
+ DEF_PD("dmac", R9A08G045_PD_DMAC,
+ DEF_REG_CONF(CPG_BUS_REG1_MSTOP, GENMASK(3, 0)),
+ RZG2L_PD_F_ALWAYS_ON),
+ DEF_PD("wdt0", R9A08G045_PD_WDT0,
+ DEF_REG_CONF(CPG_BUS_REG0_MSTOP, BIT(0)),
+ RZG2L_PD_F_NONE),
+ DEF_PD("sdhi0", R9A08G045_PD_SDHI0,
+ DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(0)),
+ RZG2L_PD_F_NONE),
+ DEF_PD("sdhi1", R9A08G045_PD_SDHI1,
+ DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(1)),
+ RZG2L_PD_F_NONE),
+ DEF_PD("sdhi2", R9A08G045_PD_SDHI2,
+ DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(11)),
+ RZG2L_PD_F_NONE),
+ DEF_PD("eth0", R9A08G045_PD_ETHER0,
+ DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(2)),
+ RZG2L_PD_F_NONE),
+ DEF_PD("eth1", R9A08G045_PD_ETHER1,
+ DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(3)),
+ RZG2L_PD_F_NONE),
+ DEF_PD("scif0", R9A08G045_PD_SCIF0,
+ DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(1)),
+ RZG2L_PD_F_NONE),
+};
+
const struct rzg2l_cpg_info r9a08g045_cpg_info = {
/* Core Clocks */
.core_clks = r9a08g045_core_clks,
@@ -260,5 +297,9 @@ const struct rzg2l_cpg_info r9a08g045_cpg_info = {
.resets = r9a08g045_resets,
.num_resets = R9A08G045_VBAT_BRESETN + 1, /* Last reset ID + 1 */
+ /* Power domains */
+ .pm_domains = r9a08g045_pm_domains,
+ .num_pm_domains = ARRAY_SIZE(r9a08g045_pm_domains),
+
.has_clk_mon_regs = true,
};
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index 3d2daa4ba2a4..04b78064d4e0 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -139,7 +139,6 @@ struct rzg2l_pll5_mux_dsi_div_param {
* @num_resets: Number of Module Resets in info->resets[]
* @last_dt_core_clk: ID of the last Core Clock exported to DT
* @info: Pointer to platform data
- * @genpd: PM domain
* @mux_dsi_div_params: pll5 mux and dsi div parameters
*/
struct rzg2l_cpg_priv {
@@ -156,8 +155,6 @@ struct rzg2l_cpg_priv {
const struct rzg2l_cpg_info *info;
- struct generic_pm_domain genpd;
-
struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params;
};
@@ -1559,9 +1556,34 @@ static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv *priv,
return true;
}
+/**
+ * struct rzg2l_cpg_pm_domains - RZ/G2L PM domains data structure
+ * @onecell_data: cell data
+ * @domains: generic PM domains
+ */
+struct rzg2l_cpg_pm_domains {
+ struct genpd_onecell_data onecell_data;
+ struct generic_pm_domain *domains[];
+};
+
+/**
+ * struct rzg2l_cpg_pd - RZ/G2L power domain data structure
+ * @genpd: generic PM domain
+ * @priv: pointer to CPG private data structure
+ * @conf: CPG PM domain configuration info
+ * @id: RZ/G2L power domain ID
+ */
+struct rzg2l_cpg_pd {
+ struct generic_pm_domain genpd;
+ struct rzg2l_cpg_priv *priv;
+ struct rzg2l_cpg_pm_domain_conf conf;
+ u16 id;
+};
+
static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
{
- struct rzg2l_cpg_priv *priv = container_of(domain, struct rzg2l_cpg_priv, genpd);
+ struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
+ struct rzg2l_cpg_priv *priv = pd->priv;
struct device_node *np = dev->of_node;
struct of_phandle_args clkspec;
bool once = true;
@@ -1618,30 +1640,179 @@ static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device
static void rzg2l_cpg_genpd_remove(void *data)
{
+ struct genpd_onecell_data *celldata = data;
+
+ for (unsigned int i = 0; i < celldata->num_domains; i++)
+ pm_genpd_remove(celldata->domains[i]);
+}
+
+static void rzg2l_cpg_genpd_remove_simple(void *data)
+{
pm_genpd_remove(data);
}
+static int rzg2l_cpg_power_on(struct generic_pm_domain *domain)
+{
+ struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
+ struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop;
+ struct rzg2l_cpg_priv *priv = pd->priv;
+
+ /* Set MSTOP. */
+ if (mstop.mask)
+ writel(mstop.mask << 16, priv->base + mstop.off);
+
+ return 0;
+}
+
+static int rzg2l_cpg_power_off(struct generic_pm_domain *domain)
+{
+ struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
+ struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop;
+ struct rzg2l_cpg_priv *priv = pd->priv;
+
+ /* Set MSTOP. */
+ if (mstop.mask)
+ writel(mstop.mask | (mstop.mask << 16), priv->base + mstop.off);
+
+ return 0;
+}
+
+static int __init rzg2l_cpg_pd_setup(struct rzg2l_cpg_pd *pd, bool always_on)
+{
+ struct dev_power_governor *governor;
+
+ pd->genpd.flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
+ pd->genpd.attach_dev = rzg2l_cpg_attach_dev;
+ pd->genpd.detach_dev = rzg2l_cpg_detach_dev;
+ if (always_on) {
+ pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
+ governor = &pm_domain_always_on_gov;
+ } else {
+ pd->genpd.power_on = rzg2l_cpg_power_on;
+ pd->genpd.power_off = rzg2l_cpg_power_off;
+ governor = &simple_qos_governor;
+ }
+
+ return pm_genpd_init(&pd->genpd, governor, !always_on);
+}
+
static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv)
{
struct device *dev = priv->dev;
struct device_node *np = dev->of_node;
- struct generic_pm_domain *genpd = &priv->genpd;
+ struct rzg2l_cpg_pd *pd;
+ int ret;
+
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+ pd->genpd.name = np->name;
+ pd->priv = priv;
+ ret = rzg2l_cpg_pd_setup(pd, true);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove_simple, &pd->genpd);
+ if (ret)
+ return ret;
+
+ return of_genpd_add_provider_simple(np, &pd->genpd);
+}
+
+static struct generic_pm_domain *
+rzg2l_cpg_pm_domain_xlate(const struct of_phandle_args *spec, void *data)
+{
+ struct generic_pm_domain *domain = ERR_PTR(-ENOENT);
+ struct genpd_onecell_data *genpd = data;
+
+ if (spec->args_count != 1)
+ return ERR_PTR(-EINVAL);
+
+ for (unsigned int i = 0; i < genpd->num_domains; i++) {
+ struct rzg2l_cpg_pd *pd = container_of(genpd->domains[i], struct rzg2l_cpg_pd,
+ genpd);
+
+ if (pd->id == spec->args[0]) {
+ domain = &pd->genpd;
+ break;
+ }
+ }
+
+ return domain;
+}
+
+static int __init rzg2l_cpg_add_pm_domains(struct rzg2l_cpg_priv *priv)
+{
+ const struct rzg2l_cpg_info *info = priv->info;
+ struct device *dev = priv->dev;
+ struct device_node *np = dev->of_node;
+ struct rzg2l_cpg_pm_domains *domains;
+ struct generic_pm_domain *parent;
+ u32 ncells;
int ret;
- genpd->name = np->name;
- genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
- GENPD_FLAG_ACTIVE_WAKEUP;
- genpd->attach_dev = rzg2l_cpg_attach_dev;
- genpd->detach_dev = rzg2l_cpg_detach_dev;
- ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
+ ret = of_property_read_u32(np, "#power-domain-cells", &ncells);
+ if (ret)
+ return ret;
+
+ /* For backward compatibility. */
+ if (!ncells)
+ return rzg2l_cpg_add_clk_domain(priv);
+
+ domains = devm_kzalloc(dev, struct_size(domains, domains, info->num_pm_domains),
+ GFP_KERNEL);
+ if (!domains)
+ return -ENOMEM;
+
+ domains->onecell_data.domains = domains->domains;
+ domains->onecell_data.num_domains = info->num_pm_domains;
+ domains->onecell_data.xlate = rzg2l_cpg_pm_domain_xlate;
+
+ ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, &domains->onecell_data);
if (ret)
return ret;
- ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, genpd);
+ for (unsigned int i = 0; i < info->num_pm_domains; i++) {
+ bool always_on = !!(info->pm_domains[i].flags & RZG2L_PD_F_ALWAYS_ON);
+ struct rzg2l_cpg_pd *pd;
+
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+ pd->genpd.name = info->pm_domains[i].name;
+ pd->conf = info->pm_domains[i].conf;
+ pd->id = info->pm_domains[i].id;
+ pd->priv = priv;
+
+ ret = rzg2l_cpg_pd_setup(pd, always_on);
+ if (ret)
+ return ret;
+
+ if (always_on) {
+ ret = rzg2l_cpg_power_on(&pd->genpd);
+ if (ret)
+ return ret;
+ }
+
+ domains->domains[i] = &pd->genpd;
+ /* Parent should be on the very first entry of info->pm_domains[]. */
+ if (!i) {
+ parent = &pd->genpd;
+ continue;
+ }
+
+ ret = pm_genpd_add_subdomain(parent, &pd->genpd);
+ if (ret)
+ return ret;
+ }
+
+ ret = of_genpd_add_provider_onecell(np, &domains->onecell_data);
if (ret)
return ret;
- return of_genpd_add_provider_simple(np, genpd);
+ return 0;
}
static int __init rzg2l_cpg_probe(struct platform_device *pdev)
@@ -1697,7 +1868,7 @@ static int __init rzg2l_cpg_probe(struct platform_device *pdev)
if (error)
return error;
- error = rzg2l_cpg_add_clk_domain(priv);
+ error = rzg2l_cpg_add_pm_domains(priv);
if (error)
return error;
diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h
index 6e38c8fc888c..ecfe7e7ea8a1 100644
--- a/drivers/clk/renesas/rzg2l-cpg.h
+++ b/drivers/clk/renesas/rzg2l-cpg.h
@@ -27,6 +27,18 @@
#define CPG_PL6_ETH_SSEL (0x418)
#define CPG_PL5_SDIV (0x420)
#define CPG_RST_MON (0x680)
+#define CPG_BUS_ACPU_MSTOP (0xB60)
+#define CPG_BUS_MCPU1_MSTOP (0xB64)
+#define CPG_BUS_MCPU2_MSTOP (0xB68)
+#define CPG_BUS_PERI_COM_MSTOP (0xB6C)
+#define CPG_BUS_PERI_CPU_MSTOP (0xB70)
+#define CPG_BUS_PERI_DDR_MSTOP (0xB74)
+#define CPG_BUS_REG0_MSTOP (0xB7C)
+#define CPG_BUS_REG1_MSTOP (0xB80)
+#define CPG_BUS_TZCDDR_MSTOP (0xB84)
+#define CPG_MHU_MSTOP (0xB88)
+#define CPG_BUS_MCPU3_MSTOP (0xB90)
+#define CPG_BUS_PERI_CPU2_MSTOP (0xB94)
#define CPG_OTHERFUNC1_REG (0xBE8)
#define CPG_SIPLL5_STBY_RESETB BIT(0)
@@ -235,6 +247,55 @@ struct rzg2l_reset {
DEF_RST_MON(_id, _off, _bit, -1)
/**
+ * struct rzg2l_cpg_reg_conf - RZ/G2L register configuration data structure
+ * @off: register offset
+ * @mask: register mask
+ */
+struct rzg2l_cpg_reg_conf {
+ u16 off;
+ u16 mask;
+};
+
+#define DEF_REG_CONF(_off, _mask) ((struct rzg2l_cpg_reg_conf) { .off = (_off), .mask = (_mask) })
+
+/**
+ * struct rzg2l_cpg_pm_domain_conf - PM domain configuration data structure
+ * @mstop: MSTOP register configuration
+ */
+struct rzg2l_cpg_pm_domain_conf {
+ struct rzg2l_cpg_reg_conf mstop;
+};
+
+/**
+ * struct rzg2l_cpg_pm_domain_init_data - PM domain init data
+ * @name: PM domain name
+ * @conf: PM domain configuration
+ * @flags: RZG2L PM domain flags (see RZG2L_PD_F_*)
+ * @id: PM domain ID (similar to the ones defined in
+ * include/dt-bindings/clock/<soc-id>-cpg.h)
+ */
+struct rzg2l_cpg_pm_domain_init_data {
+ const char * const name;
+ struct rzg2l_cpg_pm_domain_conf conf;
+ u32 flags;
+ u16 id;
+};
+
+#define DEF_PD(_name, _id, _mstop_conf, _flags) \
+ { \
+ .name = (_name), \
+ .id = (_id), \
+ .conf = { \
+ .mstop = (_mstop_conf), \
+ }, \
+ .flags = (_flags), \
+ }
+
+/* Power domain flags. */
+#define RZG2L_PD_F_ALWAYS_ON BIT(0)
+#define RZG2L_PD_F_NONE (0)
+
+/**
* struct rzg2l_cpg_info - SoC-specific CPG Description
*
* @core_clks: Array of Core Clock definitions
@@ -252,6 +313,8 @@ struct rzg2l_reset {
* @crit_mod_clks: Array with Module Clock IDs of critical clocks that
* should not be disabled without a knowledgeable driver
* @num_crit_mod_clks: Number of entries in crit_mod_clks[]
+ * @pm_domains: PM domains init data array
+ * @num_pm_domains: Number of PM domains
* @has_clk_mon_regs: Flag indicating whether the SoC has CLK_MON registers
*/
struct rzg2l_cpg_info {
@@ -278,6 +341,10 @@ struct rzg2l_cpg_info {
const unsigned int *crit_mod_clks;
unsigned int num_crit_mod_clks;
+ /* Power domain. */
+ const struct rzg2l_cpg_pm_domain_init_data *pm_domains;
+ unsigned int num_pm_domains;
+
bool has_clk_mon_regs;
};
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index 975454a3dd72..91012078681b 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -14,7 +14,6 @@
struct rockchip_mmc_clock {
struct clk_hw hw;
void __iomem *reg;
- int id;
int shift;
int cached_phase;
struct notifier_block clk_rate_change_nb;
diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c
index 8cb21d10beca..53d10b1c627b 100644
--- a/drivers/clk/rockchip/clk-rk3568.c
+++ b/drivers/clk/rockchip/clk-rk3568.c
@@ -64,6 +64,7 @@ static struct rockchip_pll_rate_table rk3568_pll_rates[] = {
RK3036_PLL_RATE(912000000, 1, 76, 2, 1, 1, 0),
RK3036_PLL_RATE(816000000, 1, 68, 2, 1, 1, 0),
RK3036_PLL_RATE(800000000, 3, 200, 2, 1, 1, 0),
+ RK3036_PLL_RATE(724000000, 3, 181, 2, 1, 1, 0),
RK3036_PLL_RATE(700000000, 3, 350, 4, 1, 1, 0),
RK3036_PLL_RATE(696000000, 1, 116, 4, 1, 1, 0),
RK3036_PLL_RATE(600000000, 1, 100, 4, 1, 1, 0),
@@ -215,6 +216,7 @@ static const struct rockchip_cpuclk_reg_data rk3568_cpuclk_data = {
PNAME(mux_pll_p) = { "xin24m" };
PNAME(mux_usb480m_p) = { "xin24m", "usb480m_phy", "clk_rtc_32k" };
+PNAME(mux_usb480m_phy_p) = { "clk_usbphy0_480m", "clk_usbphy1_480m"};
PNAME(mux_armclk_p) = { "apll", "gpll" };
PNAME(clk_i2s0_8ch_tx_p) = { "clk_i2s0_8ch_tx_src", "clk_i2s0_8ch_tx_frac", "i2s0_mclkin", "xin_osc0_half" };
PNAME(clk_i2s0_8ch_rx_p) = { "clk_i2s0_8ch_rx_src", "clk_i2s0_8ch_rx_frac", "i2s0_mclkin", "xin_osc0_half" };
@@ -485,6 +487,9 @@ static struct rockchip_clk_branch rk3568_clk_branches[] __initdata = {
MUX(USB480M, "usb480m", mux_usb480m_p, CLK_SET_RATE_PARENT,
RK3568_MODE_CON0, 14, 2, MFLAGS),
+ MUX(USB480M_PHY, "usb480m_phy", mux_usb480m_phy_p, CLK_SET_RATE_PARENT,
+ RK3568_MISC_CON2, 15, 1, MFLAGS),
+
/* PD_CORE */
COMPOSITE(0, "sclk_core_src", apll_gpll_npll_p, CLK_IGNORE_UNUSED,
RK3568_CLKSEL_CON(2), 8, 2, MFLAGS, 0, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
diff --git a/drivers/clk/rockchip/rst-rk3588.c b/drivers/clk/rockchip/rst-rk3588.c
index e855bb8d5413..c4ebc01f1c9c 100644
--- a/drivers/clk/rockchip/rst-rk3588.c
+++ b/drivers/clk/rockchip/rst-rk3588.c
@@ -577,6 +577,7 @@ static const int rk3588_register_offset[] = {
/* SOFTRST_CON59 */
RK3588_CRU_RESET_OFFSET(SRST_A_HDCP1_BIU, 59, 6),
+ RK3588_CRU_RESET_OFFSET(SRST_A_HDMIRX_BIU, 59, 7),
RK3588_CRU_RESET_OFFSET(SRST_A_VO1_BIU, 59, 8),
RK3588_CRU_RESET_OFFSET(SRST_H_VOP1_BIU, 59, 9),
RK3588_CRU_RESET_OFFSET(SRST_H_VOP1_S_BIU, 59, 10),
diff --git a/drivers/clk/samsung/clk-exynos-arm64.c b/drivers/clk/samsung/clk-exynos-arm64.c
index 6fb7194df7ab..bf7de21f329e 100644
--- a/drivers/clk/samsung/clk-exynos-arm64.c
+++ b/drivers/clk/samsung/clk-exynos-arm64.c
@@ -17,10 +17,17 @@
#include "clk-exynos-arm64.h"
+/* PLL register bits */
+#define PLL_CON1_MANUAL BIT(1)
+
/* Gate register bits */
#define GATE_MANUAL BIT(20)
#define GATE_ENABLE_HWACG BIT(28)
+/* PLL_CONx_PLL register offsets range */
+#define PLL_CON_OFF_START 0x100
+#define PLL_CON_OFF_END 0x600
+
/* Gate register offsets range */
#define GATE_OFF_START 0x2000
#define GATE_OFF_END 0x2fff
@@ -38,17 +45,36 @@ struct exynos_arm64_cmu_data {
struct samsung_clk_provider *ctx;
};
+/* Check if the register offset is a GATE register */
+static bool is_gate_reg(unsigned long off)
+{
+ return off >= GATE_OFF_START && off <= GATE_OFF_END;
+}
+
+/* Check if the register offset is a PLL_CONx register */
+static bool is_pll_conx_reg(unsigned long off)
+{
+ return off >= PLL_CON_OFF_START && off <= PLL_CON_OFF_END;
+}
+
+/* Check if the register offset is a PLL_CON1 register */
+static bool is_pll_con1_reg(unsigned long off)
+{
+ return is_pll_conx_reg(off) && (off & 0xf) == 0x4 && !(off & 0x10);
+}
+
/**
* exynos_arm64_init_clocks - Set clocks initial configuration
- * @np: CMU device tree node with "reg" property (CMU addr)
- * @reg_offs: Register offsets array for clocks to init
- * @reg_offs_len: Number of register offsets in reg_offs array
+ * @np: CMU device tree node with "reg" property (CMU addr)
+ * @cmu: CMU data
*
- * Set manual control mode for all gate clocks.
+ * Set manual control mode for all gate and PLL clocks.
*/
static void __init exynos_arm64_init_clocks(struct device_node *np,
- const unsigned long *reg_offs, size_t reg_offs_len)
+ const struct samsung_cmu_info *cmu)
{
+ const unsigned long *reg_offs = cmu->clk_regs;
+ size_t reg_offs_len = cmu->nr_clk_regs;
void __iomem *reg_base;
size_t i;
@@ -60,14 +86,14 @@ static void __init exynos_arm64_init_clocks(struct device_node *np,
void __iomem *reg = reg_base + reg_offs[i];
u32 val;
- /* Modify only gate clock registers */
- if (reg_offs[i] < GATE_OFF_START || reg_offs[i] > GATE_OFF_END)
- continue;
-
- val = readl(reg);
- val |= GATE_MANUAL;
- val &= ~GATE_ENABLE_HWACG;
- writel(val, reg);
+ if (cmu->manual_plls && is_pll_con1_reg(reg_offs[i])) {
+ writel(PLL_CON1_MANUAL, reg);
+ } else if (is_gate_reg(reg_offs[i])) {
+ val = readl(reg);
+ val |= GATE_MANUAL;
+ val &= ~GATE_ENABLE_HWACG;
+ writel(val, reg);
+ }
}
iounmap(reg_base);
@@ -177,7 +203,7 @@ void __init exynos_arm64_register_cmu(struct device *dev,
pr_err("%s: could not enable bus clock %s; err = %d\n",
__func__, cmu->clk_name, err);
- exynos_arm64_init_clocks(np, cmu->clk_regs, cmu->nr_clk_regs);
+ exynos_arm64_init_clocks(np, cmu);
samsung_cmu_register_one(np, cmu);
}
@@ -224,7 +250,7 @@ int __init exynos_arm64_register_cmu_pm(struct platform_device *pdev,
__func__, cmu->clk_name, ret);
if (set_manual)
- exynos_arm64_init_clocks(np, cmu->clk_regs, cmu->nr_clk_regs);
+ exynos_arm64_init_clocks(np, cmu);
reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base))
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 3484e6cc80ad..503c6f5b20d5 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -13,9 +13,9 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
-#include <linux/property.h>
#define EXYNOS_CLKOUT_NR_CLKS 1
#define EXYNOS_CLKOUT_PARENTS 32
@@ -84,17 +84,24 @@ MODULE_DEVICE_TABLE(of, exynos_clkout_ids);
static int exynos_clkout_match_parent_dev(struct device *dev, u32 *mux_mask)
{
const struct exynos_clkout_variant *variant;
+ const struct of_device_id *match;
if (!dev->parent) {
dev_err(dev, "not instantiated from MFD\n");
return -EINVAL;
}
- variant = device_get_match_data(dev->parent);
- if (!variant) {
+ /*
+ * 'exynos_clkout_ids' arrays is not the ids array matched by
+ * the dev->parent driver, so of_device_get_match_data() or
+ * device_get_match_data() cannot be used here.
+ */
+ match = of_match_device(exynos_clkout_ids, dev->parent);
+ if (!match) {
dev_err(dev, "cannot match parent device\n");
return -EINVAL;
}
+ variant = match->data;
*mux_mask = variant->mux_mask;
diff --git a/drivers/clk/samsung/clk-exynos850.c b/drivers/clk/samsung/clk-exynos850.c
index 82cfa22c0788..6215471c4ac6 100644
--- a/drivers/clk/samsung/clk-exynos850.c
+++ b/drivers/clk/samsung/clk-exynos850.c
@@ -14,13 +14,16 @@
#include <dt-bindings/clock/exynos850.h>
#include "clk.h"
+#include "clk-cpu.h"
#include "clk-exynos-arm64.h"
/* NOTE: Must be equal to the last clock ID increased by one */
-#define CLKS_NR_TOP (CLK_DOUT_G3D_SWITCH + 1)
+#define CLKS_NR_TOP (CLK_DOUT_CPUCL1_SWITCH + 1)
#define CLKS_NR_APM (CLK_GOUT_SYSREG_APM_PCLK + 1)
#define CLKS_NR_AUD (CLK_GOUT_AUD_CMU_AUD_PCLK + 1)
#define CLKS_NR_CMGP (CLK_GOUT_SYSREG_CMGP_PCLK + 1)
+#define CLKS_NR_CPUCL0 (CLK_CLUSTER0_SCLK + 1)
+#define CLKS_NR_CPUCL1 (CLK_CLUSTER1_SCLK + 1)
#define CLKS_NR_G3D (CLK_GOUT_G3D_SYSREG_PCLK + 1)
#define CLKS_NR_HSI (CLK_GOUT_HSI_CMU_HSI_PCLK + 1)
#define CLKS_NR_IS (CLK_GOUT_IS_SYSREG_PCLK + 1)
@@ -47,6 +50,10 @@
#define CLK_CON_MUX_MUX_CLKCMU_CORE_CCI 0x1018
#define CLK_CON_MUX_MUX_CLKCMU_CORE_MMC_EMBD 0x101c
#define CLK_CON_MUX_MUX_CLKCMU_CORE_SSS 0x1020
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG 0x1024
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH 0x1028
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_DBG 0x102c
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH 0x1030
#define CLK_CON_MUX_MUX_CLKCMU_DPU 0x1034
#define CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH 0x1038
#define CLK_CON_MUX_MUX_CLKCMU_HSI_BUS 0x103c
@@ -69,6 +76,10 @@
#define CLK_CON_DIV_CLKCMU_CORE_CCI 0x1824
#define CLK_CON_DIV_CLKCMU_CORE_MMC_EMBD 0x1828
#define CLK_CON_DIV_CLKCMU_CORE_SSS 0x182c
+#define CLK_CON_DIV_CLKCMU_CPUCL0_DBG 0x1830
+#define CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH 0x1834
+#define CLK_CON_DIV_CLKCMU_CPUCL1_DBG 0x1838
+#define CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH 0x183c
#define CLK_CON_DIV_CLKCMU_DPU 0x1840
#define CLK_CON_DIV_CLKCMU_G3D_SWITCH 0x1844
#define CLK_CON_DIV_CLKCMU_HSI_BUS 0x1848
@@ -97,6 +108,10 @@
#define CLK_CON_GAT_GATE_CLKCMU_CORE_CCI 0x2020
#define CLK_CON_GAT_GATE_CLKCMU_CORE_MMC_EMBD 0x2024
#define CLK_CON_GAT_GATE_CLKCMU_CORE_SSS 0x2028
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG 0x202c
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH 0x2030
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL1_DBG 0x2034
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH 0x2038
#define CLK_CON_GAT_GATE_CLKCMU_DPU 0x203c
#define CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH 0x2040
#define CLK_CON_GAT_GATE_CLKCMU_HSI_BUS 0x2044
@@ -130,6 +145,10 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_MUX_MUX_CLKCMU_CORE_CCI,
CLK_CON_MUX_MUX_CLKCMU_CORE_MMC_EMBD,
CLK_CON_MUX_MUX_CLKCMU_CORE_SSS,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL1_DBG,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
CLK_CON_MUX_MUX_CLKCMU_DPU,
CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH,
CLK_CON_MUX_MUX_CLKCMU_HSI_BUS,
@@ -152,6 +171,10 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_DIV_CLKCMU_CORE_CCI,
CLK_CON_DIV_CLKCMU_CORE_MMC_EMBD,
CLK_CON_DIV_CLKCMU_CORE_SSS,
+ CLK_CON_DIV_CLKCMU_CPUCL0_DBG,
+ CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_DIV_CLKCMU_CPUCL1_DBG,
+ CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH,
CLK_CON_DIV_CLKCMU_DPU,
CLK_CON_DIV_CLKCMU_G3D_SWITCH,
CLK_CON_DIV_CLKCMU_HSI_BUS,
@@ -180,6 +203,10 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_GAT_GATE_CLKCMU_CORE_CCI,
CLK_CON_GAT_GATE_CLKCMU_CORE_MMC_EMBD,
CLK_CON_GAT_GATE_CLKCMU_CORE_SSS,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL1_DBG,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH,
CLK_CON_GAT_GATE_CLKCMU_DPU,
CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH,
CLK_CON_GAT_GATE_CLKCMU_HSI_BUS,
@@ -234,6 +261,14 @@ PNAME(mout_core_mmc_embd_p) = { "oscclk", "dout_shared0_div2",
"oscclk", "oscclk" };
PNAME(mout_core_sss_p) = { "dout_shared0_div3", "dout_shared1_div3",
"dout_shared0_div4", "dout_shared1_div4" };
+/* List of parent clocks for Muxes in CMU_TOP: for CMU_CPUCL0 */
+PNAME(mout_cpucl0_switch_p) = { "fout_shared0_pll", "fout_shared1_pll",
+ "dout_shared0_div2", "dout_shared1_div2" };
+PNAME(mout_cpucl0_dbg_p) = { "dout_shared0_div4", "dout_shared1_div4" };
+/* List of parent clocks for Muxes in CMU_TOP: for CMU_CPUCL1 */
+PNAME(mout_cpucl1_switch_p) = { "fout_shared0_pll", "fout_shared1_pll",
+ "dout_shared0_div2", "dout_shared1_div2" };
+PNAME(mout_cpucl1_dbg_p) = { "dout_shared0_div4", "dout_shared1_div4" };
/* List of parent clocks for Muxes in CMU_TOP: for CMU_G3D */
PNAME(mout_g3d_switch_p) = { "dout_shared0_div2", "dout_shared1_div2",
"dout_shared0_div3", "dout_shared1_div3" };
@@ -300,6 +335,18 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
MUX(CLK_MOUT_CORE_SSS, "mout_core_sss", mout_core_sss_p,
CLK_CON_MUX_MUX_CLKCMU_CORE_SSS, 0, 2),
+ /* CPUCL0 */
+ MUX(CLK_MOUT_CPUCL0_DBG, "mout_cpucl0_dbg", mout_cpucl0_dbg_p,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG, 0, 1),
+ MUX(CLK_MOUT_CPUCL0_SWITCH, "mout_cpucl0_switch", mout_cpucl0_switch_p,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH, 0, 2),
+
+ /* CPUCL1 */
+ MUX(CLK_MOUT_CPUCL1_DBG, "mout_cpucl1_dbg", mout_cpucl1_dbg_p,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL1_DBG, 0, 1),
+ MUX(CLK_MOUT_CPUCL1_SWITCH, "mout_cpucl1_switch", mout_cpucl1_switch_p,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH, 0, 2),
+
/* DPU */
MUX(CLK_MOUT_DPU, "mout_dpu", mout_dpu_p,
CLK_CON_MUX_MUX_CLKCMU_DPU, 0, 2),
@@ -378,6 +425,18 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
DIV(CLK_DOUT_CORE_SSS, "dout_core_sss", "gout_core_sss",
CLK_CON_DIV_CLKCMU_CORE_SSS, 0, 4),
+ /* CPUCL0 */
+ DIV(CLK_DOUT_CPUCL0_DBG, "dout_cpucl0_dbg", "gout_cpucl0_dbg",
+ CLK_CON_DIV_CLKCMU_CPUCL0_DBG, 0, 3),
+ DIV(CLK_DOUT_CPUCL0_SWITCH, "dout_cpucl0_switch", "gout_cpucl0_switch",
+ CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH, 0, 3),
+
+ /* CPUCL1 */
+ DIV(CLK_DOUT_CPUCL1_DBG, "dout_cpucl1_dbg", "gout_cpucl1_dbg",
+ CLK_CON_DIV_CLKCMU_CPUCL1_DBG, 0, 3),
+ DIV(CLK_DOUT_CPUCL1_SWITCH, "dout_cpucl1_switch", "gout_cpucl1_switch",
+ CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH, 0, 3),
+
/* DPU */
DIV(CLK_DOUT_DPU, "dout_dpu", "gout_dpu",
CLK_CON_DIV_CLKCMU_DPU, 0, 4),
@@ -442,6 +501,18 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
GATE(CLK_GOUT_AUD, "gout_aud", "mout_aud",
CLK_CON_GAT_GATE_CLKCMU_AUD, 21, 0, 0),
+ /* CPUCL0 */
+ GATE(CLK_GOUT_CPUCL0_DBG, "gout_cpucl0_dbg", "mout_cpucl0_dbg",
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG, 21, 0, 0),
+ GATE(CLK_GOUT_CPUCL0_SWITCH, "gout_cpucl0_switch", "mout_cpucl0_switch",
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH, 21, 0, 0),
+
+ /* CPUCL1 */
+ GATE(CLK_GOUT_CPUCL1_DBG, "gout_cpucl1_dbg", "mout_cpucl1_dbg",
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL1_DBG, 21, 0, 0),
+ GATE(CLK_GOUT_CPUCL1_SWITCH, "gout_cpucl1_switch", "mout_cpucl1_switch",
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH, 21, 0, 0),
+
/* DPU */
GATE(CLK_GOUT_DPU, "gout_dpu", "mout_dpu",
CLK_CON_GAT_GATE_CLKCMU_DPU, 21, 0, 0),
@@ -1030,6 +1101,373 @@ static const struct samsung_cmu_info cmgp_cmu_info __initconst = {
.clk_name = "gout_clkcmu_cmgp_bus",
};
+/* ---- CMU_CPUCL0 ---------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_CPUCL0 (0x10900000) */
+#define PLL_LOCKTIME_PLL_CPUCL0 0x0000
+#define PLL_CON0_PLL_CPUCL0 0x0100
+#define PLL_CON1_PLL_CPUCL0 0x0104
+#define PLL_CON3_PLL_CPUCL0 0x010c
+#define PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER 0x0610
+#define CLK_CON_MUX_MUX_CLK_CPUCL0_PLL 0x100c
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK 0x1800
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK 0x1808
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLKDBG 0x180c
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK 0x1810
+#define CLK_CON_DIV_DIV_CLK_CPUCL0_CMUREF 0x1814
+#define CLK_CON_DIV_DIV_CLK_CPUCL0_CPU 0x1818
+#define CLK_CON_DIV_DIV_CLK_CPUCL0_PCLK 0x181c
+#define CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_ATCLK 0x2000
+#define CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_PCLK 0x2004
+#define CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_PERIPHCLK 0x2008
+#define CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_SCLK 0x200c
+#define CLK_CON_GAT_CLK_CPUCL0_CMU_CPUCL0_PCLK 0x2010
+#define CLK_CON_GAT_GATE_CLK_CPUCL0_CPU 0x2020
+
+static const unsigned long cpucl0_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_CPUCL0,
+ PLL_CON0_PLL_CPUCL0,
+ PLL_CON1_PLL_CPUCL0,
+ PLL_CON3_PLL_CPUCL0,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER,
+ CLK_CON_MUX_MUX_CLK_CPUCL0_PLL,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLKDBG,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK,
+ CLK_CON_DIV_DIV_CLK_CPUCL0_CMUREF,
+ CLK_CON_DIV_DIV_CLK_CPUCL0_CPU,
+ CLK_CON_DIV_DIV_CLK_CPUCL0_PCLK,
+ CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_ATCLK,
+ CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_PCLK,
+ CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_PERIPHCLK,
+ CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_SCLK,
+ CLK_CON_GAT_CLK_CPUCL0_CMU_CPUCL0_PCLK,
+ CLK_CON_GAT_GATE_CLK_CPUCL0_CPU,
+};
+
+/* List of parent clocks for Muxes in CMU_CPUCL0 */
+PNAME(mout_pll_cpucl0_p) = { "oscclk", "fout_cpucl0_pll" };
+PNAME(mout_cpucl0_switch_user_p) = { "oscclk", "dout_cpucl0_switch" };
+PNAME(mout_cpucl0_dbg_user_p) = { "oscclk", "dout_cpucl0_dbg" };
+PNAME(mout_cpucl0_pll_p) = { "mout_pll_cpucl0",
+ "mout_cpucl0_switch_user" };
+
+static const struct samsung_pll_rate_table cpu_pll_rates[] __initconst = {
+ PLL_35XX_RATE(26 * MHZ, 2210000000U, 255, 3, 0),
+ PLL_35XX_RATE(26 * MHZ, 2106000000U, 243, 3, 0),
+ PLL_35XX_RATE(26 * MHZ, 2002000000U, 231, 3, 0),
+ PLL_35XX_RATE(26 * MHZ, 1846000000U, 213, 3, 0),
+ PLL_35XX_RATE(26 * MHZ, 1742000000U, 201, 3, 0),
+ PLL_35XX_RATE(26 * MHZ, 1586000000U, 183, 3, 0),
+ PLL_35XX_RATE(26 * MHZ, 1456000000U, 168, 3, 0),
+ PLL_35XX_RATE(26 * MHZ, 1300000000U, 150, 3, 0),
+ PLL_35XX_RATE(26 * MHZ, 1157000000U, 267, 3, 1),
+ PLL_35XX_RATE(26 * MHZ, 1053000000U, 243, 3, 1),
+ PLL_35XX_RATE(26 * MHZ, 949000000U, 219, 3, 1),
+ PLL_35XX_RATE(26 * MHZ, 806000000U, 186, 3, 1),
+ PLL_35XX_RATE(26 * MHZ, 650000000U, 150, 3, 1),
+ PLL_35XX_RATE(26 * MHZ, 546000000U, 252, 3, 2),
+ PLL_35XX_RATE(26 * MHZ, 442000000U, 204, 3, 2),
+ PLL_35XX_RATE(26 * MHZ, 351000000U, 162, 3, 2),
+ PLL_35XX_RATE(26 * MHZ, 247000000U, 114, 3, 2),
+ PLL_35XX_RATE(26 * MHZ, 182000000U, 168, 3, 3),
+ PLL_35XX_RATE(26 * MHZ, 130000000U, 120, 3, 3),
+};
+
+static const struct samsung_pll_clock cpucl0_pll_clks[] __initconst = {
+ PLL(pll_0822x, CLK_FOUT_CPUCL0_PLL, "fout_cpucl0_pll", "oscclk",
+ PLL_LOCKTIME_PLL_CPUCL0, PLL_CON3_PLL_CPUCL0, cpu_pll_rates),
+};
+
+static const struct samsung_mux_clock cpucl0_mux_clks[] __initconst = {
+ MUX_F(CLK_MOUT_PLL_CPUCL0, "mout_pll_cpucl0", mout_pll_cpucl0_p,
+ PLL_CON0_PLL_CPUCL0, 4, 1,
+ CLK_SET_RATE_PARENT | CLK_RECALC_NEW_RATES, 0),
+ MUX_F(CLK_MOUT_CPUCL0_SWITCH_USER, "mout_cpucl0_switch_user",
+ mout_cpucl0_switch_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER, 4, 1,
+ CLK_SET_RATE_PARENT, 0),
+ MUX(CLK_MOUT_CPUCL0_DBG_USER, "mout_cpucl0_dbg_user",
+ mout_cpucl0_dbg_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER, 4, 1),
+ MUX_F(CLK_MOUT_CPUCL0_PLL, "mout_cpucl0_pll", mout_cpucl0_pll_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL0_PLL, 0, 1, CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_div_clock cpucl0_div_clks[] __initconst = {
+ DIV_F(CLK_DOUT_CPUCL0_CPU, "dout_cpucl0_cpu", "mout_cpucl0_pll",
+ CLK_CON_DIV_DIV_CLK_CPUCL0_CPU, 0, 1,
+ CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
+ DIV_F(CLK_DOUT_CPUCL0_CMUREF, "dout_cpucl0_cmuref", "dout_cpucl0_cpu",
+ CLK_CON_DIV_DIV_CLK_CPUCL0_CMUREF, 0, 3,
+ CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
+ DIV_F(CLK_DOUT_CPUCL0_PCLK, "dout_cpucl0_pclk", "dout_cpucl0_cpu",
+ CLK_CON_DIV_DIV_CLK_CPUCL0_PCLK, 0, 4,
+ CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
+
+ /* EMBEDDED_CMU_CPUCL0 */
+ DIV_F(CLK_DOUT_CLUSTER0_ACLK, "dout_cluster0_aclk", "gout_cluster0_cpu",
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK, 0, 4,
+ CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
+ DIV_F(CLK_DOUT_CLUSTER0_ATCLK, "dout_cluster0_atclk",
+ "gout_cluster0_cpu", CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK, 0, 4,
+ CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
+ DIV_F(CLK_DOUT_CLUSTER0_PCLKDBG, "dout_cluster0_pclkdbg",
+ "gout_cluster0_cpu", CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLKDBG, 0, 4,
+ CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
+ DIV_F(CLK_DOUT_CLUSTER0_PERIPHCLK, "dout_cluster0_periphclk",
+ "gout_cluster0_cpu", CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK, 0, 4,
+ CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
+};
+
+static const struct samsung_gate_clock cpucl0_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_CPUCL0_CMU_CPUCL0_PCLK, "gout_cpucl0_cmu_cpucl0_pclk",
+ "dout_cpucl0_pclk",
+ CLK_CON_GAT_CLK_CPUCL0_CMU_CPUCL0_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+
+ /* EMBEDDED_CMU_CPUCL0 */
+ GATE(CLK_GOUT_CLUSTER0_CPU, "gout_cluster0_cpu", "dout_cpucl0_cpu",
+ CLK_CON_GAT_GATE_CLK_CPUCL0_CPU, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CLUSTER0_SCLK, "gout_cluster0_sclk", "gout_cluster0_cpu",
+ CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_SCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CLUSTER0_ATCLK, "gout_cluster0_atclk",
+ "dout_cluster0_atclk",
+ CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_ATCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CLUSTER0_PERIPHCLK, "gout_cluster0_periphclk",
+ "dout_cluster0_periphclk",
+ CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_PERIPHCLK, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CLUSTER0_PCLK, "gout_cluster0_pclk",
+ "dout_cluster0_pclkdbg",
+ CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+};
+
+/*
+ * Each parameter is going to be written into the corresponding DIV register. So
+ * the actual divider value for each parameter will be 1/(param+1). All these
+ * parameters must be in the range of 0..15, as the divider range for all of
+ * these DIV clocks is 1..16. The default values for these dividers is
+ * (1, 3, 3, 1).
+ */
+#define E850_CPU_DIV0(aclk, atclk, pclkdbg, periphclk) \
+ (((aclk) << 16) | ((atclk) << 12) | ((pclkdbg) << 8) | \
+ ((periphclk) << 4))
+
+static const struct exynos_cpuclk_cfg_data exynos850_cluster_clk_d[] __initconst
+= {
+ { 2210000, E850_CPU_DIV0(1, 3, 3, 1) },
+ { 2106000, E850_CPU_DIV0(1, 3, 3, 1) },
+ { 2002000, E850_CPU_DIV0(1, 3, 3, 1) },
+ { 1846000, E850_CPU_DIV0(1, 3, 3, 1) },
+ { 1742000, E850_CPU_DIV0(1, 3, 3, 1) },
+ { 1586000, E850_CPU_DIV0(1, 3, 3, 1) },
+ { 1456000, E850_CPU_DIV0(1, 3, 3, 1) },
+ { 1300000, E850_CPU_DIV0(1, 3, 3, 1) },
+ { 1157000, E850_CPU_DIV0(1, 3, 3, 1) },
+ { 1053000, E850_CPU_DIV0(1, 3, 3, 1) },
+ { 949000, E850_CPU_DIV0(1, 3, 3, 1) },
+ { 806000, E850_CPU_DIV0(1, 3, 3, 1) },
+ { 650000, E850_CPU_DIV0(1, 3, 3, 1) },
+ { 546000, E850_CPU_DIV0(1, 3, 3, 1) },
+ { 442000, E850_CPU_DIV0(1, 3, 3, 1) },
+ { 351000, E850_CPU_DIV0(1, 3, 3, 1) },
+ { 247000, E850_CPU_DIV0(1, 3, 3, 1) },
+ { 182000, E850_CPU_DIV0(1, 3, 3, 1) },
+ { 130000, E850_CPU_DIV0(1, 3, 3, 1) },
+ { 0 }
+};
+
+static const struct samsung_cpu_clock cpucl0_cpu_clks[] __initconst = {
+ CPU_CLK(CLK_CLUSTER0_SCLK, "cluster0_clk", CLK_MOUT_PLL_CPUCL0,
+ CLK_MOUT_CPUCL0_SWITCH_USER, 0, 0x0, CPUCLK_LAYOUT_E850_CL0,
+ exynos850_cluster_clk_d),
+};
+
+static const struct samsung_cmu_info cpucl0_cmu_info __initconst = {
+ .pll_clks = cpucl0_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cpucl0_pll_clks),
+ .mux_clks = cpucl0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cpucl0_mux_clks),
+ .div_clks = cpucl0_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cpucl0_div_clks),
+ .gate_clks = cpucl0_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cpucl0_gate_clks),
+ .cpu_clks = cpucl0_cpu_clks,
+ .nr_cpu_clks = ARRAY_SIZE(cpucl0_cpu_clks),
+ .nr_clk_ids = CLKS_NR_CPUCL0,
+ .clk_regs = cpucl0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cpucl0_clk_regs),
+ .clk_name = "dout_cpucl0_switch",
+ .manual_plls = true,
+};
+
+static void __init exynos850_cmu_cpucl0_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &cpucl0_cmu_info);
+}
+
+/* Register CMU_CPUCL0 early, as CPU clocks should be available ASAP */
+CLK_OF_DECLARE(exynos850_cmu_cpucl0, "samsung,exynos850-cmu-cpucl0",
+ exynos850_cmu_cpucl0_init);
+
+/* ---- CMU_CPUCL1 ---------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_CPUCL1 (0x10800000) */
+#define PLL_LOCKTIME_PLL_CPUCL1 0x0000
+#define PLL_CON0_PLL_CPUCL1 0x0100
+#define PLL_CON1_PLL_CPUCL1 0x0104
+#define PLL_CON3_PLL_CPUCL1 0x010c
+#define PLL_CON0_MUX_CLKCMU_CPUCL1_DBG_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER 0x0610
+#define CLK_CON_MUX_MUX_CLK_CPUCL1_PLL 0x1000
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK 0x1800
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK 0x1808
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLKDBG 0x180c
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK 0x1810
+#define CLK_CON_DIV_DIV_CLK_CPUCL1_CMUREF 0x1814
+#define CLK_CON_DIV_DIV_CLK_CPUCL1_CPU 0x1818
+#define CLK_CON_DIV_DIV_CLK_CPUCL1_PCLK 0x181c
+#define CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_ATCLK 0x2000
+#define CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_PCLK 0x2004
+#define CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_PERIPHCLK 0x2008
+#define CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_SCLK 0x200c
+#define CLK_CON_GAT_CLK_CPUCL1_CMU_CPUCL1_PCLK 0x2010
+#define CLK_CON_GAT_GATE_CLK_CPUCL1_CPU 0x2020
+
+static const unsigned long cpucl1_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_CPUCL1,
+ PLL_CON0_PLL_CPUCL1,
+ PLL_CON1_PLL_CPUCL1,
+ PLL_CON3_PLL_CPUCL1,
+ PLL_CON0_MUX_CLKCMU_CPUCL1_DBG_USER,
+ PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER,
+ CLK_CON_MUX_MUX_CLK_CPUCL1_PLL,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLKDBG,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK,
+ CLK_CON_DIV_DIV_CLK_CPUCL1_CMUREF,
+ CLK_CON_DIV_DIV_CLK_CPUCL1_CPU,
+ CLK_CON_DIV_DIV_CLK_CPUCL1_PCLK,
+ CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_ATCLK,
+ CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_PCLK,
+ CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_PERIPHCLK,
+ CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_SCLK,
+ CLK_CON_GAT_CLK_CPUCL1_CMU_CPUCL1_PCLK,
+ CLK_CON_GAT_GATE_CLK_CPUCL1_CPU,
+};
+
+/* List of parent clocks for Muxes in CMU_CPUCL0 */
+PNAME(mout_pll_cpucl1_p) = { "oscclk", "fout_cpucl1_pll" };
+PNAME(mout_cpucl1_switch_user_p) = { "oscclk", "dout_cpucl1_switch" };
+PNAME(mout_cpucl1_dbg_user_p) = { "oscclk", "dout_cpucl1_dbg" };
+PNAME(mout_cpucl1_pll_p) = { "mout_pll_cpucl1",
+ "mout_cpucl1_switch_user" };
+
+static const struct samsung_pll_clock cpucl1_pll_clks[] __initconst = {
+ PLL(pll_0822x, CLK_FOUT_CPUCL1_PLL, "fout_cpucl1_pll", "oscclk",
+ PLL_LOCKTIME_PLL_CPUCL1, PLL_CON3_PLL_CPUCL1, cpu_pll_rates),
+};
+
+static const struct samsung_mux_clock cpucl1_mux_clks[] __initconst = {
+ MUX_F(CLK_MOUT_PLL_CPUCL1, "mout_pll_cpucl1", mout_pll_cpucl1_p,
+ PLL_CON0_PLL_CPUCL1, 4, 1,
+ CLK_SET_RATE_PARENT | CLK_RECALC_NEW_RATES, 0),
+ MUX_F(CLK_MOUT_CPUCL1_SWITCH_USER, "mout_cpucl1_switch_user",
+ mout_cpucl1_switch_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER, 4, 1,
+ CLK_SET_RATE_PARENT, 0),
+ MUX(CLK_MOUT_CPUCL1_DBG_USER, "mout_cpucl1_dbg_user",
+ mout_cpucl1_dbg_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL1_DBG_USER, 4, 1),
+ MUX_F(CLK_MOUT_CPUCL1_PLL, "mout_cpucl1_pll", mout_cpucl1_pll_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL1_PLL, 0, 1, CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_div_clock cpucl1_div_clks[] __initconst = {
+ DIV_F(CLK_DOUT_CPUCL1_CPU, "dout_cpucl1_cpu", "mout_cpucl1_pll",
+ CLK_CON_DIV_DIV_CLK_CPUCL1_CPU, 0, 1,
+ CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
+ DIV_F(CLK_DOUT_CPUCL1_CMUREF, "dout_cpucl1_cmuref", "dout_cpucl1_cpu",
+ CLK_CON_DIV_DIV_CLK_CPUCL1_CMUREF, 0, 3,
+ CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
+ DIV_F(CLK_DOUT_CPUCL1_PCLK, "dout_cpucl1_pclk", "dout_cpucl1_cpu",
+ CLK_CON_DIV_DIV_CLK_CPUCL1_PCLK, 0, 4,
+ CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
+
+ /* EMBEDDED_CMU_CPUCL1 */
+ DIV_F(CLK_DOUT_CLUSTER1_ACLK, "dout_cluster1_aclk", "gout_cluster1_cpu",
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK, 0, 4,
+ CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
+ DIV_F(CLK_DOUT_CLUSTER1_ATCLK, "dout_cluster1_atclk",
+ "gout_cluster1_cpu", CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK, 0, 4,
+ CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
+ DIV_F(CLK_DOUT_CLUSTER1_PCLKDBG, "dout_cluster1_pclkdbg",
+ "gout_cluster1_cpu", CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLKDBG, 0, 4,
+ CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
+ DIV_F(CLK_DOUT_CLUSTER1_PERIPHCLK, "dout_cluster1_periphclk",
+ "gout_cluster1_cpu", CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK, 0, 4,
+ CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
+};
+
+static const struct samsung_gate_clock cpucl1_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_CPUCL1_CMU_CPUCL1_PCLK, "gout_cpucl1_cmu_cpucl1_pclk",
+ "dout_cpucl1_pclk",
+ CLK_CON_GAT_CLK_CPUCL1_CMU_CPUCL1_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+
+ /* EMBEDDED_CMU_CPUCL1 */
+ GATE(CLK_GOUT_CLUSTER1_CPU, "gout_cluster1_cpu", "dout_cpucl1_cpu",
+ CLK_CON_GAT_GATE_CLK_CPUCL1_CPU, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CLUSTER1_SCLK, "gout_cluster1_sclk", "gout_cluster1_cpu",
+ CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_SCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CLUSTER1_ATCLK, "gout_cluster1_atclk",
+ "dout_cluster1_atclk",
+ CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_ATCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CLUSTER1_PERIPHCLK, "gout_cluster1_periphclk",
+ "dout_cluster1_periphclk",
+ CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_PERIPHCLK, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CLUSTER1_PCLK, "gout_cluster1_pclk",
+ "dout_cluster1_pclkdbg",
+ CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cpu_clock cpucl1_cpu_clks[] __initconst = {
+ CPU_CLK(CLK_CLUSTER1_SCLK, "cluster1_clk", CLK_MOUT_PLL_CPUCL1,
+ CLK_MOUT_CPUCL1_SWITCH_USER, 0, 0x0, CPUCLK_LAYOUT_E850_CL1,
+ exynos850_cluster_clk_d),
+};
+
+static const struct samsung_cmu_info cpucl1_cmu_info __initconst = {
+ .pll_clks = cpucl1_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cpucl1_pll_clks),
+ .mux_clks = cpucl1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cpucl1_mux_clks),
+ .div_clks = cpucl1_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cpucl1_div_clks),
+ .gate_clks = cpucl1_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cpucl1_gate_clks),
+ .cpu_clks = cpucl1_cpu_clks,
+ .nr_cpu_clks = ARRAY_SIZE(cpucl1_cpu_clks),
+ .nr_clk_ids = CLKS_NR_CPUCL1,
+ .clk_regs = cpucl1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cpucl1_clk_regs),
+ .clk_name = "dout_cpucl1_switch",
+ .manual_plls = true,
+};
+
+static void __init exynos850_cmu_cpucl1_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &cpucl1_cmu_info);
+}
+
+/* Register CMU_CPUCL1 early, as CPU clocks should be available ASAP */
+CLK_OF_DECLARE(exynos850_cmu_cpucl1, "samsung,exynos850-cmu-cpucl1",
+ exynos850_cmu_cpucl1_init);
+
/* ---- CMU_G3D ------------------------------------------------------------- */
/* Register Offset definitions for CMU_G3D (0x11400000) */
diff --git a/drivers/clk/samsung/clk-exynosautov9.c b/drivers/clk/samsung/clk-exynosautov9.c
index e9c06eb93e66..f04bacacab2c 100644
--- a/drivers/clk/samsung/clk-exynosautov9.c
+++ b/drivers/clk/samsung/clk-exynosautov9.c
@@ -352,13 +352,13 @@ static const struct samsung_pll_clock top_pll_clks[] __initconst = {
/* CMU_TOP_PURECLKCOMP */
PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk",
PLL_LOCKTIME_PLL_SHARED0, PLL_CON3_PLL_SHARED0, NULL),
- PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared1_pll", "oscclk",
+ PLL(pll_0822x, FOUT_SHARED1_PLL, "fout_shared1_pll", "oscclk",
PLL_LOCKTIME_PLL_SHARED1, PLL_CON3_PLL_SHARED1, NULL),
- PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared2_pll", "oscclk",
+ PLL(pll_0822x, FOUT_SHARED2_PLL, "fout_shared2_pll", "oscclk",
PLL_LOCKTIME_PLL_SHARED2, PLL_CON3_PLL_SHARED2, NULL),
- PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared3_pll", "oscclk",
+ PLL(pll_0822x, FOUT_SHARED3_PLL, "fout_shared3_pll", "oscclk",
PLL_LOCKTIME_PLL_SHARED3, PLL_CON3_PLL_SHARED3, NULL),
- PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared4_pll", "oscclk",
+ PLL(pll_0822x, FOUT_SHARED4_PLL, "fout_shared4_pll", "oscclk",
PLL_LOCKTIME_PLL_SHARED4, PLL_CON3_PLL_SHARED4, NULL),
};
diff --git a/drivers/clk/samsung/clk-gs101.c b/drivers/clk/samsung/clk-gs101.c
index d065e343a85d..ba9570f7a5fa 100644
--- a/drivers/clk/samsung/clk-gs101.c
+++ b/drivers/clk/samsung/clk-gs101.c
@@ -15,10 +15,13 @@
#include "clk.h"
#include "clk-exynos-arm64.h"
+#include "clk-pll.h"
/* NOTE: Must be equal to the last clock ID increased by one */
#define CLKS_NR_TOP (CLK_GOUT_CMU_TPU_UART + 1)
#define CLKS_NR_APM (CLK_APM_PLL_DIV16_APM + 1)
+#define CLKS_NR_HSI0 (CLK_GOUT_HSI0_XIU_P_HSI0_ACLK + 1)
+#define CLKS_NR_HSI2 (CLK_GOUT_HSI2_XIU_P_HSI2_ACLK + 1)
#define CLKS_NR_MISC (CLK_GOUT_MISC_XIU_D_MISC_ACLK + 1)
#define CLKS_NR_PERIC0 (CLK_GOUT_PERIC0_SYSREG_PERIC0_PCLK + 1)
#define CLKS_NR_PERIC1 (CLK_GOUT_PERIC1_SYSREG_PERIC1_PCLK + 1)
@@ -1893,16 +1896,16 @@ static const struct samsung_gate_clock apm_gate_clks[] __initconst = {
CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_G_SWD_IPCLKPORT_PCLK, 21, 0, 0),
GATE(CLK_GOUT_APM_UASC_P_APM_ACLK,
"gout_apm_uasc_p_apm_aclk", "gout_apm_func",
- CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_P_APM_IPCLKPORT_ACLK, 21, 0, 0),
+ CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_P_APM_IPCLKPORT_ACLK, 21, CLK_IS_CRITICAL, 0),
GATE(CLK_GOUT_APM_UASC_P_APM_PCLK,
"gout_apm_uasc_p_apm_pclk", "gout_apm_func",
- CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_P_APM_IPCLKPORT_PCLK, 21, 0, 0),
+ CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_P_APM_IPCLKPORT_PCLK, 21, CLK_IS_CRITICAL, 0),
GATE(CLK_GOUT_APM_WDT_APM_PCLK,
"gout_apm_wdt_apm_pclk", "gout_apm_func",
CLK_CON_GAT_GOUT_BLK_APM_UID_WDT_APM_IPCLKPORT_PCLK, 21, 0, 0),
GATE(CLK_GOUT_APM_XIU_DP_APM_ACLK,
"gout_apm_xiu_dp_apm_aclk", "gout_apm_func",
- CLK_CON_GAT_GOUT_BLK_APM_UID_XIU_DP_APM_IPCLKPORT_ACLK, 21, 0, 0),
+ CLK_CON_GAT_GOUT_BLK_APM_UID_XIU_DP_APM_IPCLKPORT_ACLK, 21, CLK_IS_CRITICAL, 0),
};
static const struct samsung_cmu_info apm_cmu_info __initconst = {
@@ -1919,6 +1922,958 @@ static const struct samsung_cmu_info apm_cmu_info __initconst = {
.nr_clk_regs = ARRAY_SIZE(apm_clk_regs),
};
+/* ---- CMU_HSI0 ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_HSI0 (0x11000000) */
+#define PLL_LOCKTIME_PLL_USB 0x0004
+#define PLL_CON0_PLL_USB 0x0140
+#define PLL_CON1_PLL_USB 0x0144
+#define PLL_CON2_PLL_USB 0x0148
+#define PLL_CON3_PLL_USB 0x014c
+#define PLL_CON4_PLL_USB 0x0150
+#define PLL_CON0_MUX_CLKCMU_HSI0_ALT_USER 0x0600
+#define PLL_CON1_MUX_CLKCMU_HSI0_ALT_USER 0x0604
+#define PLL_CON0_MUX_CLKCMU_HSI0_BUS_USER 0x0610
+#define PLL_CON1_MUX_CLKCMU_HSI0_BUS_USER 0x0614
+#define PLL_CON0_MUX_CLKCMU_HSI0_DPGTC_USER 0x0620
+#define PLL_CON1_MUX_CLKCMU_HSI0_DPGTC_USER 0x0624
+#define PLL_CON0_MUX_CLKCMU_HSI0_TCXO_USER 0x0630
+#define PLL_CON1_MUX_CLKCMU_HSI0_TCXO_USER 0x0634
+#define PLL_CON0_MUX_CLKCMU_HSI0_USB20_USER 0x0640
+#define PLL_CON1_MUX_CLKCMU_HSI0_USB20_USER 0x0644
+#define PLL_CON0_MUX_CLKCMU_HSI0_USB31DRD_USER 0x0650
+#define PLL_CON1_MUX_CLKCMU_HSI0_USB31DRD_USER 0x0654
+#define PLL_CON0_MUX_CLKCMU_HSI0_USPDPDBG_USER 0x0660
+#define PLL_CON1_MUX_CLKCMU_HSI0_USPDPDBG_USER 0x0664
+#define HSI0_CMU_HSI0_CONTROLLER_OPTION 0x0800
+#define CLKOUT_CON_BLK_HSI0_CMU_HSI0_CLKOUT0 0x0810
+#define CLK_CON_MUX_MUX_CLK_HSI0_BUS 0x1000
+#define CLK_CON_MUX_MUX_CLK_HSI0_USB20_REF 0x1004
+#define CLK_CON_MUX_MUX_CLK_HSI0_USB31DRD 0x1008
+#define CLK_CON_DIV_DIV_CLK_HSI0_USB31DRD 0x1800
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_HSI0_CMU_HSI0_IPCLKPORT_PCLK 0x2000
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB31DRD_SUSPEND_CLK_26 0x2004
+#define CLK_CON_GAT_CLK_HSI0_ALT 0x2008
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_GTC_CLK 0x200c
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_PCLK 0x2010
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_D_TZPC_HSI0_IPCLKPORT_PCLK 0x2014
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_ETR_MIU_IPCLKPORT_I_ACLK 0x2018
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_ETR_MIU_IPCLKPORT_I_PCLK 0x201c
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_GPC_HSI0_IPCLKPORT_PCLK 0x2020
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_G_ETR_HSI0_IPCLKPORT_I_CLK 0x2024
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_P_AOCHSI0_IPCLKPORT_I_CLK 0x2028
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_P_HSI0_IPCLKPORT_I_CLK 0x202c
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHS_ACEL_D_HSI0_IPCLKPORT_I_CLK 0x2030
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHS_AXI_D_HSI0AOC_IPCLKPORT_I_CLK 0x2034
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_AOC_IPCLKPORT_ACLK 0x2038
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_AOC_IPCLKPORT_PCLK 0x203c
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS0_IPCLKPORT_ACLK 0x2040
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS0_IPCLKPORT_PCLK 0x2044
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_BUS_IPCLKPORT_CLK 0x2048
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_SSMT_USB_IPCLKPORT_ACLK 0x204c
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_SSMT_USB_IPCLKPORT_PCLK 0x2050
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSMMU_USB_IPCLKPORT_CLK_S2 0x2054
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSREG_HSI0_IPCLKPORT_PCLK 0x2058
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_UASC_HSI0_CTRL_IPCLKPORT_ACLK 0x205c
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_UASC_HSI0_CTRL_IPCLKPORT_PCLK 0x2060
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_UASC_HSI0_LINK_IPCLKPORT_ACLK 0x2064
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_UASC_HSI0_LINK_IPCLKPORT_PCLK 0x2068
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_ACLK_PHYCTRL 0x206c
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_BUS_CLK_EARLY 0x2070
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB20_PHY_REFCLK_26 0x2074
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB31DRD_REF_CLK_40 0x2078
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_REF_SOC_PLL 0x207c
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_SCL_APB_PCLK 0x2080
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBPCS_APB_CLK 0x2084
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_USBDPPHY_I_ACLK 0x2088
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_USBDPPHY_UDBG_I_APB_PCLK 0x208c
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D0_HSI0_IPCLKPORT_ACLK 0x2090
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D1_HSI0_IPCLKPORT_ACLK 0x2094
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_P_HSI0_IPCLKPORT_ACLK 0x2098
+#define DMYQCH_CON_USB31DRD_QCH 0x3000
+#define DMYQCH_CON_USB31DRD_QCH_REF 0x3004
+#define PCH_CON_LHM_AXI_G_ETR_HSI0_PCH 0x3008
+#define PCH_CON_LHM_AXI_P_AOCHSI0_PCH 0x300c
+#define PCH_CON_LHM_AXI_P_HSI0_PCH 0x3010
+#define PCH_CON_LHS_ACEL_D_HSI0_PCH 0x3014
+#define PCH_CON_LHS_AXI_D_HSI0AOC_PCH 0x3018
+#define QCH_CON_DP_LINK_QCH_GTC_CLK 0x301c
+#define QCH_CON_DP_LINK_QCH_PCLK 0x3020
+#define QCH_CON_D_TZPC_HSI0_QCH 0x3024
+#define QCH_CON_ETR_MIU_QCH_ACLK 0x3028
+#define QCH_CON_ETR_MIU_QCH_PCLK 0x302c
+#define QCH_CON_GPC_HSI0_QCH 0x3030
+#define QCH_CON_HSI0_CMU_HSI0_QCH 0x3034
+#define QCH_CON_LHM_AXI_G_ETR_HSI0_QCH 0x3038
+#define QCH_CON_LHM_AXI_P_AOCHSI0_QCH 0x303c
+#define QCH_CON_LHM_AXI_P_HSI0_QCH 0x3040
+#define QCH_CON_LHS_ACEL_D_HSI0_QCH 0x3044
+#define QCH_CON_LHS_AXI_D_HSI0AOC_QCH 0x3048
+#define QCH_CON_PPMU_HSI0_AOC_QCH 0x304c
+#define QCH_CON_PPMU_HSI0_BUS0_QCH 0x3050
+#define QCH_CON_SSMT_USB_QCH 0x3054
+#define QCH_CON_SYSMMU_USB_QCH 0x3058
+#define QCH_CON_SYSREG_HSI0_QCH 0x305c
+#define QCH_CON_UASC_HSI0_CTRL_QCH 0x3060
+#define QCH_CON_UASC_HSI0_LINK_QCH 0x3064
+#define QCH_CON_USB31DRD_QCH_APB 0x3068
+#define QCH_CON_USB31DRD_QCH_DBG 0x306c
+#define QCH_CON_USB31DRD_QCH_PCS 0x3070
+#define QCH_CON_USB31DRD_QCH_SLV_CTRL 0x3074
+#define QCH_CON_USB31DRD_QCH_SLV_LINK 0x3078
+#define QUEUE_CTRL_REG_BLK_HSI0_CMU_HSI0 0x3c00
+
+static const unsigned long hsi0_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_USB,
+ PLL_CON0_PLL_USB,
+ PLL_CON1_PLL_USB,
+ PLL_CON2_PLL_USB,
+ PLL_CON3_PLL_USB,
+ PLL_CON4_PLL_USB,
+ PLL_CON0_MUX_CLKCMU_HSI0_ALT_USER,
+ PLL_CON1_MUX_CLKCMU_HSI0_ALT_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_BUS_USER,
+ PLL_CON1_MUX_CLKCMU_HSI0_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_DPGTC_USER,
+ PLL_CON1_MUX_CLKCMU_HSI0_DPGTC_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_TCXO_USER,
+ PLL_CON1_MUX_CLKCMU_HSI0_TCXO_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_USB20_USER,
+ PLL_CON1_MUX_CLKCMU_HSI0_USB20_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_USB31DRD_USER,
+ PLL_CON1_MUX_CLKCMU_HSI0_USB31DRD_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_USPDPDBG_USER,
+ PLL_CON1_MUX_CLKCMU_HSI0_USPDPDBG_USER,
+ HSI0_CMU_HSI0_CONTROLLER_OPTION,
+ CLKOUT_CON_BLK_HSI0_CMU_HSI0_CLKOUT0,
+ CLK_CON_MUX_MUX_CLK_HSI0_BUS,
+ CLK_CON_MUX_MUX_CLK_HSI0_USB20_REF,
+ CLK_CON_MUX_MUX_CLK_HSI0_USB31DRD,
+ CLK_CON_DIV_DIV_CLK_HSI0_USB31DRD,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_HSI0_CMU_HSI0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB31DRD_SUSPEND_CLK_26,
+ CLK_CON_GAT_CLK_HSI0_ALT,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_GTC_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_D_TZPC_HSI0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_ETR_MIU_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_ETR_MIU_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_GPC_HSI0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_G_ETR_HSI0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_P_AOCHSI0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_P_HSI0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHS_ACEL_D_HSI0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHS_AXI_D_HSI0AOC_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_AOC_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_AOC_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS0_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_BUS_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_SSMT_USB_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_SSMT_USB_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSMMU_USB_IPCLKPORT_CLK_S2,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSREG_HSI0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_UASC_HSI0_CTRL_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_UASC_HSI0_CTRL_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_UASC_HSI0_LINK_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_UASC_HSI0_LINK_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_ACLK_PHYCTRL,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_BUS_CLK_EARLY,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB20_PHY_REFCLK_26,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB31DRD_REF_CLK_40,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_REF_SOC_PLL,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_SCL_APB_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBPCS_APB_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_USBDPPHY_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_USBDPPHY_UDBG_I_APB_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D0_HSI0_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D1_HSI0_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_P_HSI0_IPCLKPORT_ACLK,
+ DMYQCH_CON_USB31DRD_QCH,
+ DMYQCH_CON_USB31DRD_QCH_REF,
+ PCH_CON_LHM_AXI_G_ETR_HSI0_PCH,
+ PCH_CON_LHM_AXI_P_AOCHSI0_PCH,
+ PCH_CON_LHM_AXI_P_HSI0_PCH,
+ PCH_CON_LHS_ACEL_D_HSI0_PCH,
+ PCH_CON_LHS_AXI_D_HSI0AOC_PCH,
+ QCH_CON_DP_LINK_QCH_GTC_CLK,
+ QCH_CON_DP_LINK_QCH_PCLK,
+ QCH_CON_D_TZPC_HSI0_QCH,
+ QCH_CON_ETR_MIU_QCH_ACLK,
+ QCH_CON_ETR_MIU_QCH_PCLK,
+ QCH_CON_GPC_HSI0_QCH,
+ QCH_CON_HSI0_CMU_HSI0_QCH,
+ QCH_CON_LHM_AXI_G_ETR_HSI0_QCH,
+ QCH_CON_LHM_AXI_P_AOCHSI0_QCH,
+ QCH_CON_LHM_AXI_P_HSI0_QCH,
+ QCH_CON_LHS_ACEL_D_HSI0_QCH,
+ QCH_CON_LHS_AXI_D_HSI0AOC_QCH,
+ QCH_CON_PPMU_HSI0_AOC_QCH,
+ QCH_CON_PPMU_HSI0_BUS0_QCH,
+ QCH_CON_SSMT_USB_QCH,
+ QCH_CON_SYSMMU_USB_QCH,
+ QCH_CON_SYSREG_HSI0_QCH,
+ QCH_CON_UASC_HSI0_CTRL_QCH,
+ QCH_CON_UASC_HSI0_LINK_QCH,
+ QCH_CON_USB31DRD_QCH_APB,
+ QCH_CON_USB31DRD_QCH_DBG,
+ QCH_CON_USB31DRD_QCH_PCS,
+ QCH_CON_USB31DRD_QCH_SLV_CTRL,
+ QCH_CON_USB31DRD_QCH_SLV_LINK,
+ QUEUE_CTRL_REG_BLK_HSI0_CMU_HSI0,
+};
+
+/* List of parent clocks for Muxes in CMU_HSI0 */
+PNAME(mout_pll_usb_p) = { "oscclk", "fout_usb_pll" };
+PNAME(mout_hsi0_alt_user_p) = { "oscclk",
+ "gout_hsi0_clk_hsi0_alt" };
+PNAME(mout_hsi0_bus_user_p) = { "oscclk", "dout_cmu_hsi0_bus" };
+PNAME(mout_hsi0_dpgtc_user_p) = { "oscclk", "dout_cmu_hsi0_dpgtc" };
+PNAME(mout_hsi0_tcxo_user_p) = { "oscclk", "tcxo_hsi1_hsi0" };
+PNAME(mout_hsi0_usb20_user_p) = { "oscclk", "usb20phy_phy_clock" };
+PNAME(mout_hsi0_usb31drd_user_p) = { "oscclk",
+ "dout_cmu_hsi0_usb31drd" };
+PNAME(mout_hsi0_usbdpdbg_user_p) = { "oscclk",
+ "dout_cmu_hsi0_usbdpdbg" };
+PNAME(mout_hsi0_bus_p) = { "mout_hsi0_bus_user",
+ "mout_hsi0_alt_user" };
+PNAME(mout_hsi0_usb20_ref_p) = { "fout_usb_pll",
+ "mout_hsi0_tcxo_user" };
+PNAME(mout_hsi0_usb31drd_p) = { "fout_usb_pll",
+ "mout_hsi0_usb31drd_user",
+ "dout_hsi0_usb31drd",
+ "fout_usb_pll" };
+
+static const struct samsung_pll_rate_table cmu_hsi0_usb_pll_rates[] __initconst = {
+ PLL_35XX_RATE(24576000, 19200000, 150, 6, 5),
+ { /* sentinel */ }
+};
+
+static const struct samsung_pll_clock cmu_hsi0_pll_clks[] __initconst = {
+ PLL(pll_0518x, CLK_FOUT_USB_PLL, "fout_usb_pll", "oscclk",
+ PLL_LOCKTIME_PLL_USB, PLL_CON3_PLL_USB,
+ cmu_hsi0_usb_pll_rates),
+};
+
+static const struct samsung_mux_clock hsi0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PLL_USB,
+ "mout_pll_usb", mout_pll_usb_p,
+ PLL_CON0_PLL_USB, 4, 1),
+ MUX(CLK_MOUT_HSI0_ALT_USER,
+ "mout_hsi0_alt_user", mout_hsi0_alt_user_p,
+ PLL_CON0_MUX_CLKCMU_HSI0_ALT_USER, 4, 1),
+ MUX(CLK_MOUT_HSI0_BUS_USER,
+ "mout_hsi0_bus_user", mout_hsi0_bus_user_p,
+ PLL_CON0_MUX_CLKCMU_HSI0_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_HSI0_DPGTC_USER,
+ "mout_hsi0_dpgtc_user", mout_hsi0_dpgtc_user_p,
+ PLL_CON0_MUX_CLKCMU_HSI0_DPGTC_USER, 4, 1),
+ MUX(CLK_MOUT_HSI0_TCXO_USER,
+ "mout_hsi0_tcxo_user", mout_hsi0_tcxo_user_p,
+ PLL_CON0_MUX_CLKCMU_HSI0_TCXO_USER, 4, 1),
+ MUX(CLK_MOUT_HSI0_USB20_USER,
+ "mout_hsi0_usb20_user", mout_hsi0_usb20_user_p,
+ PLL_CON0_MUX_CLKCMU_HSI0_USB20_USER, 4, 1),
+ MUX(CLK_MOUT_HSI0_USB31DRD_USER,
+ "mout_hsi0_usb31drd_user", mout_hsi0_usb31drd_user_p,
+ PLL_CON0_MUX_CLKCMU_HSI0_USB31DRD_USER, 4, 1),
+ MUX(CLK_MOUT_HSI0_USBDPDBG_USER,
+ "mout_hsi0_usbdpdbg_user", mout_hsi0_usbdpdbg_user_p,
+ PLL_CON0_MUX_CLKCMU_HSI0_USPDPDBG_USER, 4, 1),
+ MUX(CLK_MOUT_HSI0_BUS,
+ "mout_hsi0_bus", mout_hsi0_bus_p,
+ CLK_CON_MUX_MUX_CLK_HSI0_BUS, 0, 1),
+ MUX(CLK_MOUT_HSI0_USB20_REF,
+ "mout_hsi0_usb20_ref", mout_hsi0_usb20_ref_p,
+ CLK_CON_MUX_MUX_CLK_HSI0_USB20_REF, 0, 1),
+ MUX(CLK_MOUT_HSI0_USB31DRD,
+ "mout_hsi0_usb31drd", mout_hsi0_usb31drd_p,
+ CLK_CON_MUX_MUX_CLK_HSI0_USB31DRD, 0, 2),
+};
+
+static const struct samsung_div_clock hsi0_div_clks[] __initconst = {
+ DIV(CLK_DOUT_HSI0_USB31DRD,
+ "dout_hsi0_usb31drd", "mout_hsi0_usb20_user",
+ CLK_CON_DIV_DIV_CLK_HSI0_USB31DRD, 0, 3),
+};
+
+static const struct samsung_gate_clock hsi0_gate_clks[] __initconst = {
+ /* TODO: should have a driver for this */
+ GATE(CLK_GOUT_HSI0_PCLK,
+ "gout_hsi0_hsi0_pclk", "mout_hsi0_bus",
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_HSI0_CMU_HSI0_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_I_USB31DRD_SUSPEND_CLK_26,
+ "gout_hsi0_usb31drd_i_usb31drd_suspend_clk_26",
+ "mout_hsi0_usb20_ref",
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB31DRD_SUSPEND_CLK_26,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_CLK_HSI0_ALT,
+ "gout_hsi0_clk_hsi0_alt", "ioclk_clk_hsi0_alt",
+ CLK_CON_GAT_CLK_HSI0_ALT, 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_DP_LINK_I_DP_GTC_CLK,
+ "gout_hsi0_dp_link_i_dp_gtc_clk", "mout_hsi0_dpgtc_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_GTC_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_DP_LINK_I_PCLK,
+ "gout_hsi0_dp_link_i_pclk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_D_TZPC_HSI0_PCLK,
+ "gout_hsi0_d_tzpc_hsi0_pclk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_D_TZPC_HSI0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_ETR_MIU_I_ACLK,
+ "gout_hsi0_etr_miu_i_aclk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_ETR_MIU_IPCLKPORT_I_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_ETR_MIU_I_PCLK,
+ "gout_hsi0_etr_miu_i_pclk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_ETR_MIU_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_GPC_HSI0_PCLK,
+ "gout_hsi0_gpc_hsi0_pclk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_GPC_HSI0_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_LHM_AXI_G_ETR_HSI0_I_CLK,
+ "gout_hsi0_lhm_axi_g_etr_hsi0_i_clk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_G_ETR_HSI0_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_LHM_AXI_P_AOCHSI0_I_CLK,
+ "gout_hsi0_lhm_axi_p_aochsi0_i_clk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_P_AOCHSI0_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ /* TODO: should have a driver for this */
+ GATE(CLK_GOUT_HSI0_LHM_AXI_P_HSI0_I_CLK,
+ "gout_hsi0_lhm_axi_p_hsi0_i_clk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_P_HSI0_IPCLKPORT_I_CLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ /* TODO: should have a driver for this */
+ GATE(CLK_GOUT_HSI0_LHS_ACEL_D_HSI0_I_CLK,
+ "gout_hsi0_lhs_acel_d_hsi0_i_clk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHS_ACEL_D_HSI0_IPCLKPORT_I_CLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_HSI0_LHS_AXI_D_HSI0AOC_I_CLK,
+ "gout_hsi0_lhs_axi_d_hsi0aoc_i_clk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHS_AXI_D_HSI0AOC_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_PPMU_HSI0_AOC_ACLK,
+ "gout_hsi0_ppmu_hsi0_aoc_aclk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_AOC_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_PPMU_HSI0_AOC_PCLK,
+ "gout_hsi0_ppmu_hsi0_aoc_pclk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_AOC_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_PPMU_HSI0_BUS0_ACLK,
+ "gout_hsi0_ppmu_hsi0_bus0_aclk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS0_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_PPMU_HSI0_BUS0_PCLK,
+ "gout_hsi0_ppmu_hsi0_bus0_pclk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_CLK_HSI0_BUS_CLK,
+ "gout_hsi0_clk_hsi0_bus_clk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_BUS_IPCLKPORT_CLK,
+ 21, 0, 0),
+ /* TODO: should have a driver for this */
+ GATE(CLK_GOUT_HSI0_SSMT_USB_ACLK,
+ "gout_hsi0_ssmt_usb_aclk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_SSMT_USB_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ /* TODO: should have a driver for this */
+ GATE(CLK_GOUT_HSI0_SSMT_USB_PCLK,
+ "gout_hsi0_ssmt_usb_pclk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_SSMT_USB_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ /* TODO: should have a driver for this */
+ GATE(CLK_GOUT_HSI0_SYSMMU_USB_CLK_S2,
+ "gout_hsi0_sysmmu_usb_clk_s2", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSMMU_USB_IPCLKPORT_CLK_S2,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_HSI0_SYSREG_HSI0_PCLK,
+ "gout_hsi0_sysreg_hsi0_pclk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSREG_HSI0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_UASC_HSI0_CTRL_ACLK,
+ "gout_hsi0_uasc_hsi0_ctrl_aclk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_UASC_HSI0_CTRL_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_UASC_HSI0_CTRL_PCLK,
+ "gout_hsi0_uasc_hsi0_ctrl_pclk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_UASC_HSI0_CTRL_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_UASC_HSI0_LINK_ACLK,
+ "gout_hsi0_uasc_hsi0_link_aclk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_UASC_HSI0_LINK_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_UASC_HSI0_LINK_PCLK,
+ "gout_hsi0_uasc_hsi0_link_pclk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_UASC_HSI0_LINK_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_ACLK_PHYCTRL,
+ "gout_hsi0_usb31drd_aclk_phyctrl", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_ACLK_PHYCTRL,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_BUS_CLK_EARLY,
+ "gout_hsi0_usb31drd_bus_clk_early", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_BUS_CLK_EARLY,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_I_USB20_PHY_REFCLK_26,
+ "gout_hsi0_usb31drd_i_usb20_phy_refclk_26", "mout_hsi0_usb20_ref",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB20_PHY_REFCLK_26,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_I_USB31DRD_REF_CLK_40,
+ "gout_hsi0_usb31drd_i_usb31drd_ref_clk_40", "mout_hsi0_usb31drd",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB31DRD_REF_CLK_40,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_I_USBDPPHY_REF_SOC_PLL,
+ "gout_hsi0_usb31drd_i_usbdpphy_ref_soc_pll",
+ "mout_hsi0_usbdpdbg_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_REF_SOC_PLL,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_I_USBDPPHY_SCL_APB_PCLK,
+ "gout_hsi0_usb31drd_i_usbdpphy_scl_apb_pclk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_SCL_APB_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_I_USBPCS_APB_CLK,
+ "gout_hsi0_usb31drd_i_usbpcs_apb_clk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBPCS_APB_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_USBDPPHY_I_ACLK,
+ "gout_hsi0_usb31drd_usbdpphy_i_aclk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_USBDPPHY_I_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_USBDPPHY_UDBG_I_APB_PCLK,
+ "gout_hsi0_usb31drd_usbdpphy_udbg_i_apb_pclk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_USBDPPHY_UDBG_I_APB_PCLK,
+ 21, 0, 0),
+ /* TODO: should have a driver for this */
+ GATE(CLK_GOUT_HSI0_XIU_D0_HSI0_ACLK,
+ "gout_hsi0_xiu_d0_hsi0_aclk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D0_HSI0_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ /* TODO: should have a driver for this */
+ GATE(CLK_GOUT_HSI0_XIU_D1_HSI0_ACLK,
+ "gout_hsi0_xiu_d1_hsi0_aclk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D1_HSI0_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ /* TODO: should have a driver for this */
+ GATE(CLK_GOUT_HSI0_XIU_P_HSI0_ACLK,
+ "gout_hsi0_xiu_p_hsi0_aclk", "mout_hsi0_bus",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_P_HSI0_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_fixed_rate_clock hsi0_fixed_clks[] __initconst = {
+ FRATE(0, "tcxo_hsi1_hsi0", NULL, 0, 26000000),
+ FRATE(0, "usb20phy_phy_clock", NULL, 0, 120000000),
+ /* until we implement APMGSA */
+ FRATE(0, "ioclk_clk_hsi0_alt", NULL, 0, 213000000),
+};
+
+static const struct samsung_cmu_info hsi0_cmu_info __initconst = {
+ .pll_clks = cmu_hsi0_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cmu_hsi0_pll_clks),
+ .mux_clks = hsi0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(hsi0_mux_clks),
+ .div_clks = hsi0_div_clks,
+ .nr_div_clks = ARRAY_SIZE(hsi0_div_clks),
+ .gate_clks = hsi0_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(hsi0_gate_clks),
+ .fixed_clks = hsi0_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(hsi0_fixed_clks),
+ .nr_clk_ids = CLKS_NR_HSI0,
+ .clk_regs = hsi0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(hsi0_clk_regs),
+ .clk_name = "bus",
+};
+
+/* ---- CMU_HSI2 ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_HSI2 (0x14400000) */
+#define PLL_CON0_MUX_CLKCMU_HSI2_BUS_USER 0x0600
+#define PLL_CON1_MUX_CLKCMU_HSI2_BUS_USER 0x0604
+#define PLL_CON0_MUX_CLKCMU_HSI2_MMC_CARD_USER 0x0610
+#define PLL_CON1_MUX_CLKCMU_HSI2_MMC_CARD_USER 0x0614
+#define PLL_CON0_MUX_CLKCMU_HSI2_PCIE_USER 0x0620
+#define PLL_CON1_MUX_CLKCMU_HSI2_PCIE_USER 0x0624
+#define PLL_CON0_MUX_CLKCMU_HSI2_UFS_EMBD_USER 0x0630
+#define PLL_CON1_MUX_CLKCMU_HSI2_UFS_EMBD_USER 0x0634
+#define HSI2_CMU_HSI2_CONTROLLER_OPTION 0x0800
+#define CLKOUT_CON_BLK_HSI2_CMU_HSI2_CLKOUT0 0x0810
+#define CLK_CON_GAT_CLK_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_PCIE_SUB_CTRL_INST_0_PHY_REFCLK_IN 0x2000
+#define CLK_CON_GAT_CLK_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_PCIE_SUB_CTRL_INST_0_PHY_REFCLK_IN 0x2004
+#define CLK_CON_GAT_CLK_BLK_HSI2_UID_SSMT_PCIE_IA_GEN4A_1_IPCLKPORT_ACLK 0x2008
+#define CLK_CON_GAT_CLK_BLK_HSI2_UID_SSMT_PCIE_IA_GEN4A_1_IPCLKPORT_PCLK 0x200c
+#define CLK_CON_GAT_CLK_BLK_HSI2_UID_SSMT_PCIE_IA_GEN4B_1_IPCLKPORT_ACLK 0x2010
+#define CLK_CON_GAT_CLK_BLK_HSI2_UID_SSMT_PCIE_IA_GEN4B_1_IPCLKPORT_PCLK 0x2014
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_D_TZPC_HSI2_IPCLKPORT_PCLK 0x201c
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_GPC_HSI2_IPCLKPORT_PCLK 0x2020
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_GPIO_HSI2_IPCLKPORT_PCLK 0x2024
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_HSI2_CMU_HSI2_IPCLKPORT_PCLK 0x2028
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_LHM_AXI_P_HSI2_IPCLKPORT_I_CLK 0x202c
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_LHS_ACEL_D_HSI2_IPCLKPORT_I_CLK 0x2030
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_MMC_CARD_IPCLKPORT_I_ACLK 0x2034
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_MMC_CARD_IPCLKPORT_SDCLKIN 0x2038
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_G4X2_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG 0x203c
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_G4X2_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG 0x2040
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_G4X2_DWC_PCIE_CTL_INST_0_SLV_ACLK_UG 0x2044
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK 0x2048
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_G4X1_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG 0x204c
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_G4X1_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG 0x2050
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_G4X1_DWC_PCIE_CTL_INST_0_SLV_ACLK_UG 0x2054
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK 0x2058
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCS_PMA_INST_0_PHY_UDBG_I_APB_PCLK 0x205c
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCS_PMA_INST_0_PIPE_PAL_PCIE_INST_0_I_APB_PCLK 0x2060
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCS_PMA_INST_0_SF_PCIEPHY210X2_LN05LPE_QCH_TM_WRAPPER_INST_0_I_APB_PCLK 0x2064
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_IA_GEN4A_1_IPCLKPORT_I_CLK 0x2068
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_IA_GEN4B_1_IPCLKPORT_I_CLK 0x206c
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PPMU_HSI2_IPCLKPORT_ACLK 0x2070
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PPMU_HSI2_IPCLKPORT_PCLK 0x2074
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_MMC_CARD_HSI2_IPCLKPORT_ACLK 0x2078
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_MMC_CARD_HSI2_IPCLKPORT_PCLK 0x207c
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_PCIE_GEN4A_HSI2_IPCLKPORT_ACLK 0x2080
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_PCIE_GEN4A_HSI2_IPCLKPORT_PCLK 0x2084
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_PCIE_GEN4B_HSI2_IPCLKPORT_ACLK 0x2088
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_PCIE_GEN4B_HSI2_IPCLKPORT_PCLK 0x208c
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_UFS_EMBD_HSI2_IPCLKPORT_ACLK 0x2090
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_UFS_EMBD_HSI2_IPCLKPORT_PCLK 0x2094
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_RSTNSYNC_CLK_HSI2_BUS_IPCLKPORT_CLK 0x2098
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_RSTNSYNC_CLK_HSI2_OSCCLK_IPCLKPORT_CLK 0x209c
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_SSMT_HSI2_IPCLKPORT_ACLK 0x20a0
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_SSMT_HSI2_IPCLKPORT_PCLK 0x20a4
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_SYSMMU_HSI2_IPCLKPORT_CLK_S2 0x20a8
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_SYSREG_HSI2_IPCLKPORT_PCLK 0x20ac
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_DBI_1_IPCLKPORT_ACLK 0x20b0
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_DBI_1_IPCLKPORT_PCLK 0x20b4
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_SLV_1_IPCLKPORT_ACLK 0x20b8
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_SLV_1_IPCLKPORT_PCLK 0x20bc
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4B_DBI_1_IPCLKPORT_ACLK 0x20c0
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4B_DBI_1_IPCLKPORT_PCLK 0x20c4
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4B_SLV_1_IPCLKPORT_ACLK 0x20c8
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4B_SLV_1_IPCLKPORT_PCLK 0x20cc
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_ACLK 0x20d0
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_CLK_UNIPRO 0x20d4
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_FMP_CLK 0x20d8
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_XIU_D_HSI2_IPCLKPORT_ACLK 0x20dc
+#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_XIU_P_HSI2_IPCLKPORT_ACLK 0x20e0
+#define DMYQCH_CON_PCIE_GEN4_1_QCH_SCLK_1 0x3000
+#define PCH_CON_LHM_AXI_P_HSI2_PCH 0x3008
+#define PCH_CON_LHS_ACEL_D_HSI2_PCH 0x300c
+#define QCH_CON_D_TZPC_HSI2_QCH 0x3010
+#define QCH_CON_GPC_HSI2_QCH 0x3014
+#define QCH_CON_GPIO_HSI2_QCH 0x3018
+#define QCH_CON_HSI2_CMU_HSI2_QCH 0x301c
+#define QCH_CON_LHM_AXI_P_HSI2_QCH 0x3020
+#define QCH_CON_LHS_ACEL_D_HSI2_QCH 0x3024
+#define QCH_CON_MMC_CARD_QCH 0x3028
+#define QCH_CON_PCIE_GEN4_1_QCH_APB_1 0x302c
+#define QCH_CON_PCIE_GEN4_1_QCH_APB_2 0x3030
+#define QCH_CON_PCIE_GEN4_1_QCH_AXI_1 0x3034
+#define QCH_CON_PCIE_GEN4_1_QCH_AXI_2 0x3038
+#define QCH_CON_PCIE_GEN4_1_QCH_DBG_1 0x303c
+#define QCH_CON_PCIE_GEN4_1_QCH_DBG_2 0x3040
+#define QCH_CON_PCIE_GEN4_1_QCH_PCS_APB 0x3044
+#define QCH_CON_PCIE_GEN4_1_QCH_PMA_APB 0x3048
+#define QCH_CON_PCIE_GEN4_1_QCH_UDBG 0x304c
+#define QCH_CON_PCIE_IA_GEN4A_1_QCH 0x3050
+#define QCH_CON_PCIE_IA_GEN4B_1_QCH 0x3054
+#define QCH_CON_PPMU_HSI2_QCH 0x3058
+#define QCH_CON_QE_MMC_CARD_HSI2_QCH 0x305c
+#define QCH_CON_QE_PCIE_GEN4A_HSI2_QCH 0x3060
+#define QCH_CON_QE_PCIE_GEN4B_HSI2_QCH 0x3064
+#define QCH_CON_QE_UFS_EMBD_HSI2_QCH 0x3068
+#define QCH_CON_SSMT_HSI2_QCH 0x306c
+#define QCH_CON_SSMT_PCIE_IA_GEN4A_1_QCH 0x3070
+#define QCH_CON_SSMT_PCIE_IA_GEN4B_1_QCH 0x3074
+#define QCH_CON_SYSMMU_HSI2_QCH 0x3078
+#define QCH_CON_SYSREG_HSI2_QCH 0x307c
+#define QCH_CON_UASC_PCIE_GEN4A_DBI_1_QCH 0x3080
+#define QCH_CON_UASC_PCIE_GEN4A_SLV_1_QCH 0x3084
+#define QCH_CON_UASC_PCIE_GEN4B_DBI_1_QCH 0x3088
+#define QCH_CON_UASC_PCIE_GEN4B_SLV_1_QCH 0x308c
+#define QCH_CON_UFS_EMBD_QCH 0x3090
+#define QCH_CON_UFS_EMBD_QCH_FMP 0x3094
+#define QUEUE_CTRL_REG_BLK_HSI2_CMU_HSI2 0x3c00
+
+static const unsigned long cmu_hsi2_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_HSI2_BUS_USER,
+ PLL_CON1_MUX_CLKCMU_HSI2_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_HSI2_MMC_CARD_USER,
+ PLL_CON1_MUX_CLKCMU_HSI2_MMC_CARD_USER,
+ PLL_CON0_MUX_CLKCMU_HSI2_PCIE_USER,
+ PLL_CON1_MUX_CLKCMU_HSI2_PCIE_USER,
+ PLL_CON0_MUX_CLKCMU_HSI2_UFS_EMBD_USER,
+ PLL_CON1_MUX_CLKCMU_HSI2_UFS_EMBD_USER,
+ HSI2_CMU_HSI2_CONTROLLER_OPTION,
+ CLKOUT_CON_BLK_HSI2_CMU_HSI2_CLKOUT0,
+ CLK_CON_GAT_CLK_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_PCIE_SUB_CTRL_INST_0_PHY_REFCLK_IN,
+ CLK_CON_GAT_CLK_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_PCIE_SUB_CTRL_INST_0_PHY_REFCLK_IN,
+ CLK_CON_GAT_CLK_BLK_HSI2_UID_SSMT_PCIE_IA_GEN4A_1_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_HSI2_UID_SSMT_PCIE_IA_GEN4A_1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_HSI2_UID_SSMT_PCIE_IA_GEN4B_1_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_HSI2_UID_SSMT_PCIE_IA_GEN4B_1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_D_TZPC_HSI2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_GPC_HSI2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_GPIO_HSI2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_HSI2_CMU_HSI2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_LHM_AXI_P_HSI2_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_LHS_ACEL_D_HSI2_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_MMC_CARD_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_MMC_CARD_IPCLKPORT_SDCLKIN,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_G4X2_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_G4X2_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_G4X2_DWC_PCIE_CTL_INST_0_SLV_ACLK_UG,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_G4X1_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_G4X1_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_G4X1_DWC_PCIE_CTL_INST_0_SLV_ACLK_UG,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCS_PMA_INST_0_PHY_UDBG_I_APB_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCS_PMA_INST_0_PIPE_PAL_PCIE_INST_0_I_APB_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCS_PMA_INST_0_SF_PCIEPHY210X2_LN05LPE_QCH_TM_WRAPPER_INST_0_I_APB_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_IA_GEN4A_1_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_IA_GEN4B_1_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PPMU_HSI2_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PPMU_HSI2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_MMC_CARD_HSI2_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_MMC_CARD_HSI2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_PCIE_GEN4A_HSI2_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_PCIE_GEN4A_HSI2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_PCIE_GEN4B_HSI2_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_PCIE_GEN4B_HSI2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_UFS_EMBD_HSI2_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_UFS_EMBD_HSI2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_RSTNSYNC_CLK_HSI2_BUS_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_RSTNSYNC_CLK_HSI2_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_SSMT_HSI2_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_SSMT_HSI2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_SYSMMU_HSI2_IPCLKPORT_CLK_S2,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_SYSREG_HSI2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_DBI_1_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_DBI_1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_SLV_1_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_SLV_1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4B_DBI_1_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4B_DBI_1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4B_SLV_1_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4B_SLV_1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_CLK_UNIPRO,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_FMP_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_XIU_D_HSI2_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_XIU_P_HSI2_IPCLKPORT_ACLK,
+ DMYQCH_CON_PCIE_GEN4_1_QCH_SCLK_1,
+ PCH_CON_LHM_AXI_P_HSI2_PCH,
+ PCH_CON_LHS_ACEL_D_HSI2_PCH,
+ QCH_CON_D_TZPC_HSI2_QCH,
+ QCH_CON_GPC_HSI2_QCH,
+ QCH_CON_GPIO_HSI2_QCH,
+ QCH_CON_HSI2_CMU_HSI2_QCH,
+ QCH_CON_LHM_AXI_P_HSI2_QCH,
+ QCH_CON_LHS_ACEL_D_HSI2_QCH,
+ QCH_CON_MMC_CARD_QCH,
+ QCH_CON_PCIE_GEN4_1_QCH_APB_1,
+ QCH_CON_PCIE_GEN4_1_QCH_APB_2,
+ QCH_CON_PCIE_GEN4_1_QCH_AXI_1,
+ QCH_CON_PCIE_GEN4_1_QCH_AXI_2,
+ QCH_CON_PCIE_GEN4_1_QCH_DBG_1,
+ QCH_CON_PCIE_GEN4_1_QCH_DBG_2,
+ QCH_CON_PCIE_GEN4_1_QCH_PCS_APB,
+ QCH_CON_PCIE_GEN4_1_QCH_PMA_APB,
+ QCH_CON_PCIE_GEN4_1_QCH_UDBG,
+ QCH_CON_PCIE_IA_GEN4A_1_QCH,
+ QCH_CON_PCIE_IA_GEN4B_1_QCH,
+ QCH_CON_PPMU_HSI2_QCH,
+ QCH_CON_QE_MMC_CARD_HSI2_QCH,
+ QCH_CON_QE_PCIE_GEN4A_HSI2_QCH,
+ QCH_CON_QE_PCIE_GEN4B_HSI2_QCH,
+ QCH_CON_QE_UFS_EMBD_HSI2_QCH,
+ QCH_CON_SSMT_HSI2_QCH,
+ QCH_CON_SSMT_PCIE_IA_GEN4A_1_QCH,
+ QCH_CON_SSMT_PCIE_IA_GEN4B_1_QCH,
+ QCH_CON_SYSMMU_HSI2_QCH,
+ QCH_CON_SYSREG_HSI2_QCH,
+ QCH_CON_UASC_PCIE_GEN4A_DBI_1_QCH,
+ QCH_CON_UASC_PCIE_GEN4A_SLV_1_QCH,
+ QCH_CON_UASC_PCIE_GEN4B_DBI_1_QCH,
+ QCH_CON_UASC_PCIE_GEN4B_SLV_1_QCH,
+ QCH_CON_UFS_EMBD_QCH,
+ QCH_CON_UFS_EMBD_QCH_FMP,
+ QUEUE_CTRL_REG_BLK_HSI2_CMU_HSI2,
+};
+
+PNAME(mout_hsi2_bus_user_p) = { "oscclk", "dout_cmu_hsi2_bus" };
+PNAME(mout_hsi2_mmc_card_user_p) = { "oscclk", "dout_cmu_hsi2_mmc_card" };
+PNAME(mout_hsi2_pcie_user_p) = { "oscclk", "dout_cmu_hsi2_pcie" };
+PNAME(mout_hsi2_ufs_embd_user_p) = { "oscclk", "dout_cmu_hsi2_ufs_embd" };
+
+static const struct samsung_mux_clock hsi2_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_HSI2_BUS_USER, "mout_hsi2_bus_user", mout_hsi2_bus_user_p,
+ PLL_CON0_MUX_CLKCMU_HSI2_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_HSI2_MMC_CARD_USER, "mout_hsi2_mmc_card_user",
+ mout_hsi2_mmc_card_user_p, PLL_CON0_MUX_CLKCMU_HSI2_MMC_CARD_USER,
+ 4, 1),
+ MUX(CLK_MOUT_HSI2_PCIE_USER, "mout_hsi2_pcie_user",
+ mout_hsi2_pcie_user_p, PLL_CON0_MUX_CLKCMU_HSI2_PCIE_USER,
+ 4, 1),
+ MUX(CLK_MOUT_HSI2_UFS_EMBD_USER, "mout_hsi2_ufs_embd_user",
+ mout_hsi2_ufs_embd_user_p, PLL_CON0_MUX_CLKCMU_HSI2_UFS_EMBD_USER,
+ 4, 1),
+};
+
+static const struct samsung_gate_clock hsi2_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_003_PHY_REFCLK_IN,
+ "gout_hsi2_pcie_gen4_1_pcie_003_phy_refclk_in",
+ "mout_hsi2_pcie_user",
+ CLK_CON_GAT_CLK_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_PCIE_SUB_CTRL_INST_0_PHY_REFCLK_IN,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_004_PHY_REFCLK_IN,
+ "gout_hsi2_pcie_gen4_1_pcie_004_phy_refclk_in",
+ "mout_hsi2_pcie_user",
+ CLK_CON_GAT_CLK_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_PCIE_SUB_CTRL_INST_0_PHY_REFCLK_IN,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_SSMT_PCIE_IA_GEN4A_1_ACLK,
+ "gout_hsi2_ssmt_pcie_ia_gen4a_1_aclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_CLK_BLK_HSI2_UID_SSMT_PCIE_IA_GEN4A_1_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_SSMT_PCIE_IA_GEN4A_1_PCLK,
+ "gout_hsi2_ssmt_pcie_ia_gen4a_1_pclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_CLK_BLK_HSI2_UID_SSMT_PCIE_IA_GEN4A_1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_SSMT_PCIE_IA_GEN4B_1_ACLK,
+ "gout_hsi2_ssmt_pcie_ia_gen4b_1_aclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_CLK_BLK_HSI2_UID_SSMT_PCIE_IA_GEN4B_1_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_SSMT_PCIE_IA_GEN4B_1_PCLK,
+ "gout_hsi2_ssmt_pcie_ia_gen4b_1_pclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_CLK_BLK_HSI2_UID_SSMT_PCIE_IA_GEN4B_1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_D_TZPC_HSI2_PCLK,
+ "gout_hsi2_d_tzpc_hsi2_pclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_D_TZPC_HSI2_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_GPC_HSI2_PCLK,
+ "gout_hsi2_gpc_hsi2_pclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_GPC_HSI2_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_GPIO_HSI2_PCLK,
+ "gout_hsi2_gpio_hsi2_pclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_GPIO_HSI2_IPCLKPORT_PCLK, 21,
+ CLK_IGNORE_UNUSED, 0),
+ /* Disabling this clock makes the system hang. Mark the clock as critical. */
+ GATE(CLK_GOUT_HSI2_HSI2_CMU_HSI2_PCLK,
+ "gout_hsi2_hsi2_cmu_hsi2_pclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_HSI2_CMU_HSI2_IPCLKPORT_PCLK,
+ 21, CLK_IS_CRITICAL, 0),
+ /* Disabling this clock makes the system hang. Mark the clock as critical. */
+ GATE(CLK_GOUT_HSI2_LHM_AXI_P_HSI2_I_CLK,
+ "gout_hsi2_lhm_axi_p_hsi2_i_clk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_LHM_AXI_P_HSI2_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ /* TODO: should have a driver for this */
+ GATE(CLK_GOUT_HSI2_LHS_ACEL_D_HSI2_I_CLK,
+ "gout_hsi2_lhs_acel_d_hsi2_i_clk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_LHS_ACEL_D_HSI2_IPCLKPORT_I_CLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_HSI2_MMC_CARD_I_ACLK,
+ "gout_hsi2_mmc_card_i_aclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_MMC_CARD_IPCLKPORT_I_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_MMC_CARD_SDCLKIN,
+ "gout_hsi2_mmc_card_sdclkin", "mout_hsi2_mmc_card_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_MMC_CARD_IPCLKPORT_SDCLKIN,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_003_DBI_ACLK_UG,
+ "gout_hsi2_pcie_gen4_1_pcie_003_dbi_aclk_ug", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_G4X2_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_003_MSTR_ACLK_UG,
+ "gout_hsi2_pcie_gen4_1_pcie_003_mstr_aclk_ug",
+ "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_G4X2_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_003_SLV_ACLK_UG,
+ "gout_hsi2_pcie_gen4_1_pcie_003_slv_aclk_ug", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_G4X2_DWC_PCIE_CTL_INST_0_SLV_ACLK_UG,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_003_I_DRIVER_APB_CLK,
+ "gout_hsi2_pcie_gen4_1_pcie_003_i_driver_apb_clk",
+ "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_004_DBI_ACLK_UG,
+ "gout_hsi2_pcie_gen4_1_pcie_004_dbi_aclk_ug", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_G4X1_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_004_MSTR_ACLK_UG,
+ "gout_hsi2_pcie_gen4_1_pcie_004_mstr_aclk_ug",
+ "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_G4X1_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_004_SLV_ACLK_UG,
+ "gout_hsi2_pcie_gen4_1_pcie_004_slv_aclk_ug", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_G4X1_DWC_PCIE_CTL_INST_0_SLV_ACLK_UG,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_004_I_DRIVER_APB_CLK,
+ "gout_hsi2_pcie_gen4_1_pcie_004_i_driver_apb_clk",
+ "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCS_PMA_PHY_UDBG_I_APB_PCLK,
+ "gout_hsi2_pcie_gen4_1_pcs_pma_phy_udbg_i_apb_pclk",
+ "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCS_PMA_INST_0_PHY_UDBG_I_APB_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCS_PMA_PIPE_PAL_PCIE_I_APB_PCLK,
+ "gout_hsi2_pcie_gen4_1_pcs_pma_pipe_pal_pcie_i_apb_pclk",
+ "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCS_PMA_INST_0_PIPE_PAL_PCIE_INST_0_I_APB_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCS_PMA_PCIEPHY210X2_QCH_I_APB_PCLK,
+ "gout_hsi2_pcie_gen4_1_pcs_pma_pciephy210x2_qch_i_apb_pclk",
+ "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCS_PMA_INST_0_SF_PCIEPHY210X2_LN05LPE_QCH_TM_WRAPPER_INST_0_I_APB_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_PCIE_IA_GEN4A_1_I_CLK,
+ "gout_hsi2_pcie_ia_gen4a_1_i_clk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_IA_GEN4A_1_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_PCIE_IA_GEN4B_1_I_CLK,
+ "gout_hsi2_pcie_ia_gen4b_1_i_clk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_IA_GEN4B_1_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_PPMU_HSI2_ACLK,
+ "gout_hsi2_ppmu_hsi2_aclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PPMU_HSI2_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_PPMU_HSI2_PCLK,
+ "gout_hsi2_ppmu_hsi2_pclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_PPMU_HSI2_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_QE_MMC_CARD_HSI2_ACLK,
+ "gout_hsi2_qe_mmc_card_hsi2_aclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_MMC_CARD_HSI2_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_QE_MMC_CARD_HSI2_PCLK,
+ "gout_hsi2_qe_mmc_card_hsi2_pclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_MMC_CARD_HSI2_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_QE_PCIE_GEN4A_HSI2_ACLK,
+ "gout_hsi2_qe_pcie_gen4a_hsi2_aclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_PCIE_GEN4A_HSI2_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_QE_PCIE_GEN4A_HSI2_PCLK,
+ "gout_hsi2_qe_pcie_gen4a_hsi2_pclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_PCIE_GEN4A_HSI2_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_QE_PCIE_GEN4B_HSI2_ACLK,
+ "gout_hsi2_qe_pcie_gen4b_hsi2_aclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_PCIE_GEN4B_HSI2_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_QE_PCIE_GEN4B_HSI2_PCLK,
+ "gout_hsi2_qe_pcie_gen4b_hsi2_pclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_PCIE_GEN4B_HSI2_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_QE_UFS_EMBD_HSI2_ACLK,
+ "gout_hsi2_qe_ufs_embd_hsi2_aclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_UFS_EMBD_HSI2_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_QE_UFS_EMBD_HSI2_PCLK,
+ "gout_hsi2_qe_ufs_embd_hsi2_pclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_UFS_EMBD_HSI2_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_CLK_HSI2_BUS_CLK,
+ "gout_hsi2_clk_hsi2_bus_clk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_RSTNSYNC_CLK_HSI2_BUS_IPCLKPORT_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_HSI2_CLK_HSI2_OSCCLK_CLK,
+ "gout_hsi2_clk_hsi2_oscclk_clk", "oscclk",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_RSTNSYNC_CLK_HSI2_OSCCLK_IPCLKPORT_CLK,
+ 21, 0, 0),
+ /* TODO: should have a driver for this */
+ GATE(CLK_GOUT_HSI2_SSMT_HSI2_ACLK,
+ "gout_hsi2_ssmt_hsi2_aclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_SSMT_HSI2_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ /* TODO: should have a driver for this */
+ GATE(CLK_GOUT_HSI2_SSMT_HSI2_PCLK,
+ "gout_hsi2_ssmt_hsi2_pclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_SSMT_HSI2_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ /* TODO: should have a driver for this */
+ GATE(CLK_GOUT_HSI2_SYSMMU_HSI2_CLK_S2,
+ "gout_hsi2_sysmmu_hsi2_clk_s2", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_SYSMMU_HSI2_IPCLKPORT_CLK_S2,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_HSI2_SYSREG_HSI2_PCLK,
+ "gout_hsi2_sysreg_hsi2_pclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_SYSREG_HSI2_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_UASC_PCIE_GEN4A_DBI_1_ACLK,
+ "gout_hsi2_uasc_pcie_gen4a_dbi_1_aclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_DBI_1_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_UASC_PCIE_GEN4A_DBI_1_PCLK,
+ "gout_hsi2_uasc_pcie_gen4a_dbi_1_pclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_DBI_1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_UASC_PCIE_GEN4A_SLV_1_ACLK,
+ "gout_hsi2_uasc_pcie_gen4a_slv_1_aclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_SLV_1_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_UASC_PCIE_GEN4A_SLV_1_PCLK,
+ "gout_hsi2_uasc_pcie_gen4a_slv_1_pclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_SLV_1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_UASC_PCIE_GEN4B_DBI_1_ACLK,
+ "gout_hsi2_uasc_pcie_gen4b_dbi_1_aclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4B_DBI_1_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_UASC_PCIE_GEN4B_DBI_1_PCLK,
+ "gout_hsi2_uasc_pcie_gen4b_dbi_1_pclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4B_DBI_1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_UASC_PCIE_GEN4B_SLV_1_ACLK,
+ "gout_hsi2_uasc_pcie_gen4b_slv_1_aclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4B_SLV_1_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_UASC_PCIE_GEN4B_SLV_1_PCLK,
+ "gout_hsi2_uasc_pcie_gen4b_slv_1_pclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4B_SLV_1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_UFS_EMBD_I_ACLK,
+ "gout_hsi2_ufs_embd_i_aclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_UFS_EMBD_I_CLK_UNIPRO,
+ "gout_hsi2_ufs_embd_i_clk_unipro", "mout_hsi2_ufs_embd_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_CLK_UNIPRO,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI2_UFS_EMBD_I_FMP_CLK,
+ "gout_hsi2_ufs_embd_i_fmp_clk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_FMP_CLK,
+ 21, 0, 0),
+ /* TODO: should have a driver for this */
+ GATE(CLK_GOUT_HSI2_XIU_D_HSI2_ACLK,
+ "gout_hsi2_xiu_d_hsi2_aclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_XIU_D_HSI2_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ /* TODO: should have a driver for this */
+ GATE(CLK_GOUT_HSI2_XIU_P_HSI2_ACLK,
+ "gout_hsi2_xiu_p_hsi2_aclk", "mout_hsi2_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI2_UID_XIU_P_HSI2_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cmu_info hsi2_cmu_info __initconst = {
+ .mux_clks = hsi2_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(hsi2_mux_clks),
+ .gate_clks = hsi2_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(hsi2_gate_clks),
+ .nr_clk_ids = CLKS_NR_HSI2,
+ .clk_regs = cmu_hsi2_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_hsi2_clk_regs),
+ .clk_name = "bus",
+};
+
/* ---- CMU_MISC ------------------------------------------------------------ */
/* Register Offset definitions for CMU_MISC (0x10010000) */
@@ -2763,33 +3718,33 @@ static const struct samsung_mux_clock peric0_mux_clks[] __initconst = {
MUX(CLK_MOUT_PERIC0_USI0_UART_USER,
"mout_peric0_usi0_uart_user", mout_peric0_usi0_uart_user_p,
PLL_CON0_MUX_CLKCMU_PERIC0_USI0_UART_USER, 4, 1),
- MUX(CLK_MOUT_PERIC0_USI14_USI_USER,
- "mout_peric0_usi14_usi_user", mout_peric0_usi_usi_user_p,
- PLL_CON0_MUX_CLKCMU_PERIC0_USI14_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC0_USI1_USI_USER,
- "mout_peric0_usi1_usi_user", mout_peric0_usi_usi_user_p,
- PLL_CON0_MUX_CLKCMU_PERIC0_USI1_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC0_USI2_USI_USER,
- "mout_peric0_usi2_usi_user", mout_peric0_usi_usi_user_p,
- PLL_CON0_MUX_CLKCMU_PERIC0_USI2_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC0_USI3_USI_USER,
- "mout_peric0_usi3_usi_user", mout_peric0_usi_usi_user_p,
- PLL_CON0_MUX_CLKCMU_PERIC0_USI3_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC0_USI4_USI_USER,
- "mout_peric0_usi4_usi_user", mout_peric0_usi_usi_user_p,
- PLL_CON0_MUX_CLKCMU_PERIC0_USI4_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC0_USI5_USI_USER,
- "mout_peric0_usi5_usi_user", mout_peric0_usi_usi_user_p,
- PLL_CON0_MUX_CLKCMU_PERIC0_USI5_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC0_USI6_USI_USER,
- "mout_peric0_usi6_usi_user", mout_peric0_usi_usi_user_p,
- PLL_CON0_MUX_CLKCMU_PERIC0_USI6_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC0_USI7_USI_USER,
- "mout_peric0_usi7_usi_user", mout_peric0_usi_usi_user_p,
- PLL_CON0_MUX_CLKCMU_PERIC0_USI7_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC0_USI8_USI_USER,
- "mout_peric0_usi8_usi_user", mout_peric0_usi_usi_user_p,
- PLL_CON0_MUX_CLKCMU_PERIC0_USI8_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC0_USI14_USI_USER,
+ "mout_peric0_usi14_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI14_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC0_USI1_USI_USER,
+ "mout_peric0_usi1_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI1_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC0_USI2_USI_USER,
+ "mout_peric0_usi2_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI2_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC0_USI3_USI_USER,
+ "mout_peric0_usi3_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI3_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC0_USI4_USI_USER,
+ "mout_peric0_usi4_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI4_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC0_USI5_USI_USER,
+ "mout_peric0_usi5_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI5_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC0_USI6_USI_USER,
+ "mout_peric0_usi6_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI6_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC0_USI7_USI_USER,
+ "mout_peric0_usi7_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI7_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC0_USI8_USI_USER,
+ "mout_peric0_usi8_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI8_USI_USER, 4, 1),
};
static const struct samsung_div_clock peric0_div_clks[] __initconst = {
@@ -2798,33 +3753,42 @@ static const struct samsung_div_clock peric0_div_clks[] __initconst = {
DIV(CLK_DOUT_PERIC0_USI0_UART,
"dout_peric0_usi0_uart", "mout_peric0_usi0_uart_user",
CLK_CON_DIV_DIV_CLK_PERIC0_USI0_UART, 0, 4),
- DIV(CLK_DOUT_PERIC0_USI14_USI,
- "dout_peric0_usi14_usi", "mout_peric0_usi14_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC0_USI14_USI, 0, 4),
- DIV(CLK_DOUT_PERIC0_USI1_USI,
- "dout_peric0_usi1_usi", "mout_peric0_usi1_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC0_USI1_USI, 0, 4),
- DIV(CLK_DOUT_PERIC0_USI2_USI,
- "dout_peric0_usi2_usi", "mout_peric0_usi2_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC0_USI2_USI, 0, 4),
- DIV(CLK_DOUT_PERIC0_USI3_USI,
- "dout_peric0_usi3_usi", "mout_peric0_usi3_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC0_USI3_USI, 0, 4),
- DIV(CLK_DOUT_PERIC0_USI4_USI,
- "dout_peric0_usi4_usi", "mout_peric0_usi4_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC0_USI4_USI, 0, 4),
- DIV(CLK_DOUT_PERIC0_USI5_USI,
- "dout_peric0_usi5_usi", "mout_peric0_usi5_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC0_USI5_USI, 0, 4),
- DIV(CLK_DOUT_PERIC0_USI6_USI,
- "dout_peric0_usi6_usi", "mout_peric0_usi6_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC0_USI6_USI, 0, 4),
- DIV(CLK_DOUT_PERIC0_USI7_USI,
- "dout_peric0_usi7_usi", "mout_peric0_usi7_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC0_USI7_USI, 0, 4),
- DIV(CLK_DOUT_PERIC0_USI8_USI,
- "dout_peric0_usi8_usi", "mout_peric0_usi8_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC0_USI8_USI, 0, 4),
+ DIV_F(CLK_DOUT_PERIC0_USI14_USI,
+ "dout_peric0_usi14_usi", "mout_peric0_usi14_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI14_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC0_USI1_USI,
+ "dout_peric0_usi1_usi", "mout_peric0_usi1_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI1_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC0_USI2_USI,
+ "dout_peric0_usi2_usi", "mout_peric0_usi2_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI2_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC0_USI3_USI,
+ "dout_peric0_usi3_usi", "mout_peric0_usi3_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI3_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC0_USI4_USI,
+ "dout_peric0_usi4_usi", "mout_peric0_usi4_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI4_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC0_USI5_USI,
+ "dout_peric0_usi5_usi", "mout_peric0_usi5_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI5_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC0_USI6_USI,
+ "dout_peric0_usi6_usi", "mout_peric0_usi6_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI6_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC0_USI7_USI,
+ "dout_peric0_usi7_usi", "mout_peric0_usi7_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI7_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC0_USI8_USI,
+ "dout_peric0_usi8_usi", "mout_peric0_usi8_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI8_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
};
static const struct samsung_gate_clock peric0_gate_clks[] __initconst = {
@@ -2857,11 +3821,11 @@ static const struct samsung_gate_clock peric0_gate_clks[] __initconst = {
GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_0,
"gout_peric0_peric0_top0_ipclk_0", "dout_peric0_usi1_usi",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_0,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_1,
"gout_peric0_peric0_top0_ipclk_1", "dout_peric0_usi2_usi",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_1,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_10,
"gout_peric0_peric0_top0_ipclk_10", "dout_peric0_i3c",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_10,
@@ -2889,27 +3853,27 @@ static const struct samsung_gate_clock peric0_gate_clks[] __initconst = {
GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_2,
"gout_peric0_peric0_top0_ipclk_2", "dout_peric0_usi3_usi",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_2,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_3,
"gout_peric0_peric0_top0_ipclk_3", "dout_peric0_usi4_usi",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_3,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_4,
"gout_peric0_peric0_top0_ipclk_4", "dout_peric0_usi5_usi",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_4,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_5,
"gout_peric0_peric0_top0_ipclk_5", "dout_peric0_usi6_usi",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_5,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_6,
"gout_peric0_peric0_top0_ipclk_6", "dout_peric0_usi7_usi",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_6,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_7,
"gout_peric0_peric0_top0_ipclk_7", "dout_peric0_usi8_usi",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_7,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_8,
"gout_peric0_peric0_top0_ipclk_8", "dout_peric0_i3c",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_8,
@@ -2990,7 +3954,7 @@ static const struct samsung_gate_clock peric0_gate_clks[] __initconst = {
GATE(CLK_GOUT_PERIC0_PERIC0_TOP1_IPCLK_2,
"gout_peric0_peric0_top1_ipclk_2", "dout_peric0_usi14_usi",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_2,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
/* Disabling this clock makes the system hang. Mark the clock as critical. */
GATE(CLK_GOUT_PERIC0_PERIC0_TOP1_PCLK_0,
"gout_peric0_peric0_top1_pclk_0", "mout_peric0_bus_user",
@@ -3230,47 +4194,53 @@ static const struct samsung_mux_clock peric1_mux_clks[] __initconst = {
MUX(CLK_MOUT_PERIC1_I3C_USER,
"mout_peric1_i3c_user", mout_peric1_nonbususer_p,
PLL_CON0_MUX_CLKCMU_PERIC1_I3C_USER, 4, 1),
- MUX(CLK_MOUT_PERIC1_USI0_USI_USER,
- "mout_peric1_usi0_usi_user", mout_peric1_nonbususer_p,
- PLL_CON0_MUX_CLKCMU_PERIC1_USI0_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC1_USI10_USI_USER,
- "mout_peric1_usi10_usi_user", mout_peric1_nonbususer_p,
- PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC1_USI11_USI_USER,
- "mout_peric1_usi11_usi_user", mout_peric1_nonbususer_p,
- PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC1_USI12_USI_USER,
- "mout_peric1_usi12_usi_user", mout_peric1_nonbususer_p,
- PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC1_USI13_USI_USER,
- "mout_peric1_usi13_usi_user", mout_peric1_nonbususer_p,
- PLL_CON0_MUX_CLKCMU_PERIC1_USI13_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC1_USI9_USI_USER,
- "mout_peric1_usi9_usi_user", mout_peric1_nonbususer_p,
- PLL_CON0_MUX_CLKCMU_PERIC1_USI9_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC1_USI0_USI_USER,
+ "mout_peric1_usi0_usi_user", mout_peric1_nonbususer_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI0_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC1_USI10_USI_USER,
+ "mout_peric1_usi10_usi_user", mout_peric1_nonbususer_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC1_USI11_USI_USER,
+ "mout_peric1_usi11_usi_user", mout_peric1_nonbususer_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC1_USI12_USI_USER,
+ "mout_peric1_usi12_usi_user", mout_peric1_nonbususer_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC1_USI13_USI_USER,
+ "mout_peric1_usi13_usi_user", mout_peric1_nonbususer_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI13_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC1_USI9_USI_USER,
+ "mout_peric1_usi9_usi_user", mout_peric1_nonbususer_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI9_USI_USER, 4, 1),
};
static const struct samsung_div_clock peric1_div_clks[] __initconst = {
DIV(CLK_DOUT_PERIC1_I3C, "dout_peric1_i3c", "mout_peric1_i3c_user",
CLK_CON_DIV_DIV_CLK_PERIC1_I3C, 0, 4),
- DIV(CLK_DOUT_PERIC1_USI0_USI,
- "dout_peric1_usi0_usi", "mout_peric1_usi0_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC1_USI0_USI, 0, 4),
- DIV(CLK_DOUT_PERIC1_USI10_USI,
- "dout_peric1_usi10_usi", "mout_peric1_usi10_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI, 0, 4),
- DIV(CLK_DOUT_PERIC1_USI11_USI,
- "dout_peric1_usi11_usi", "mout_peric1_usi11_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI, 0, 4),
- DIV(CLK_DOUT_PERIC1_USI12_USI,
- "dout_peric1_usi12_usi", "mout_peric1_usi12_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI, 0, 4),
- DIV(CLK_DOUT_PERIC1_USI13_USI,
- "dout_peric1_usi13_usi", "mout_peric1_usi13_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC1_USI13_USI, 0, 4),
- DIV(CLK_DOUT_PERIC1_USI9_USI,
- "dout_peric1_usi9_usi", "mout_peric1_usi9_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC1_USI9_USI, 0, 4),
+ DIV_F(CLK_DOUT_PERIC1_USI0_USI,
+ "dout_peric1_usi0_usi", "mout_peric1_usi0_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI0_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC1_USI10_USI,
+ "dout_peric1_usi10_usi", "mout_peric1_usi10_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC1_USI11_USI,
+ "dout_peric1_usi11_usi", "mout_peric1_usi11_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC1_USI12_USI,
+ "dout_peric1_usi12_usi", "mout_peric1_usi12_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC1_USI13_USI,
+ "dout_peric1_usi13_usi", "mout_peric1_usi13_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI13_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC1_USI9_USI,
+ "dout_peric1_usi9_usi", "mout_peric1_usi9_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI9_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
};
static const struct samsung_gate_clock peric1_gate_clks[] __initconst = {
@@ -3305,27 +4275,27 @@ static const struct samsung_gate_clock peric1_gate_clks[] __initconst = {
GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_1,
"gout_peric1_peric1_top0_ipclk_1", "dout_peric1_usi0_usi",
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_1,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_2,
"gout_peric1_peric1_top0_ipclk_2", "dout_peric1_usi9_usi",
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_2,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_3,
"gout_peric1_peric1_top0_ipclk_3", "dout_peric1_usi10_usi",
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_3,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_4,
"gout_peric1_peric1_top0_ipclk_4", "dout_peric1_usi11_usi",
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_4,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_5,
"gout_peric1_peric1_top0_ipclk_5", "dout_peric1_usi12_usi",
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_5,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_6,
"gout_peric1_peric1_top0_ipclk_6", "dout_peric1_usi13_usi",
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_6,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_8,
"gout_peric1_peric1_top0_ipclk_8", "dout_peric1_i3c",
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_8,
@@ -3427,6 +4397,12 @@ static const struct of_device_id gs101_cmu_of_match[] = {
.compatible = "google,gs101-cmu-apm",
.data = &apm_cmu_info,
}, {
+ .compatible = "google,gs101-cmu-hsi0",
+ .data = &hsi0_cmu_info,
+ }, {
+ .compatible = "google,gs101-cmu-hsi2",
+ .data = &hsi2_cmu_info,
+ }, {
.compatible = "google,gs101-cmu-peric0",
.data = &peric0_cmu_info,
}, {
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index b6701905f254..afa5760ed3a1 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -139,7 +139,7 @@ void __init samsung_clk_register_fixed_rate(struct samsung_clk_provider *ctx,
unsigned int nr_clk)
{
struct clk_hw *clk_hw;
- unsigned int idx, ret;
+ unsigned int idx;
for (idx = 0; idx < nr_clk; idx++, list++) {
clk_hw = clk_hw_register_fixed_rate(ctx->dev, list->name,
@@ -151,15 +151,6 @@ void __init samsung_clk_register_fixed_rate(struct samsung_clk_provider *ctx,
}
samsung_clk_add_lookup(ctx, clk_hw, list->id);
-
- /*
- * Unconditionally add a clock lookup for the fixed rate clocks.
- * There are not many of these on any of Samsung platforms.
- */
- ret = clk_hw_register_clkdev(clk_hw, list->name, NULL);
- if (ret)
- pr_err("%s: failed to register clock lookup for %s",
- __func__, list->name);
}
}
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index a763309e6f12..fb06caa71f0a 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -133,7 +133,7 @@ struct samsung_mux_clock {
.name = cname, \
.parent_names = pnames, \
.num_parents = ARRAY_SIZE(pnames), \
- .flags = (f) | CLK_SET_RATE_NO_REPARENT, \
+ .flags = f, \
.offset = o, \
.shift = s, \
.width = w, \
@@ -141,9 +141,16 @@ struct samsung_mux_clock {
}
#define MUX(_id, cname, pnames, o, s, w) \
- __MUX(_id, cname, pnames, o, s, w, 0, 0)
+ __MUX(_id, cname, pnames, o, s, w, CLK_SET_RATE_NO_REPARENT, 0)
#define MUX_F(_id, cname, pnames, o, s, w, f, mf) \
+ __MUX(_id, cname, pnames, o, s, w, (f) | CLK_SET_RATE_NO_REPARENT, mf)
+
+/* Used by MUX clocks where reparenting on clock rate change is allowed. */
+#define nMUX(_id, cname, pnames, o, s, w) \
+ __MUX(_id, cname, pnames, o, s, w, 0, 0)
+
+#define nMUX_F(_id, cname, pnames, o, s, w, f, mf) \
__MUX(_id, cname, pnames, o, s, w, f, mf)
/**
@@ -330,6 +337,7 @@ struct samsung_clock_reg_cache {
* @suspend_regs: list of clock registers to set before suspend
* @nr_suspend_regs: count of clock registers in @suspend_regs
* @clk_name: name of the parent clock needed for CMU register access
+ * @manual_plls: Enable manual control for PLL clocks
*/
struct samsung_cmu_info {
const struct samsung_pll_clock *pll_clks;
@@ -354,6 +362,9 @@ struct samsung_cmu_info {
const struct samsung_clk_reg_dump *suspend_regs;
unsigned int nr_suspend_regs;
const char *clk_name;
+
+ /* ARM64 Exynos CMUs */
+ bool manual_plls;
};
struct samsung_clk_provider *samsung_clk_init(struct device *dev,
diff --git a/drivers/clk/sophgo/Kconfig b/drivers/clk/sophgo/Kconfig
new file mode 100644
index 000000000000..1cc49be71bdb
--- /dev/null
+++ b/drivers/clk/sophgo/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+# common clock support for SOPHGO SoC family.
+
+config CLK_SOPHGO_CV1800
+ tristate "Support for the Sophgo CV1800 series SoCs clock controller"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ This driver supports clock controller of Sophgo CV18XX series SoC.
+ The driver require a 25MHz Oscillator to function generate clock.
+ It includes PLLs, common clock function and some vendor clock for
+ IPs of CV18XX series SoC
diff --git a/drivers/clk/sophgo/Makefile b/drivers/clk/sophgo/Makefile
new file mode 100644
index 000000000000..a50320764200
--- /dev/null
+++ b/drivers/clk/sophgo/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CLK_SOPHGO_CV1800) += clk-sophgo-cv1800.o
+
+clk-sophgo-cv1800-y += clk-cv1800.o
+clk-sophgo-cv1800-y += clk-cv18xx-common.o
+clk-sophgo-cv1800-y += clk-cv18xx-ip.o
+clk-sophgo-cv1800-y += clk-cv18xx-pll.o
diff --git a/drivers/clk/sophgo/clk-cv1800.c b/drivers/clk/sophgo/clk-cv1800.c
new file mode 100644
index 000000000000..2da4c24621cf
--- /dev/null
+++ b/drivers/clk/sophgo/clk-cv1800.c
@@ -0,0 +1,1537 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Inochi Amaoto <inochiama@outlook.com>
+ */
+
+#include <linux/module.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+
+#include "clk-cv1800.h"
+
+#include "clk-cv18xx-common.h"
+#include "clk-cv18xx-ip.h"
+#include "clk-cv18xx-pll.h"
+
+struct cv1800_clk_ctrl;
+
+struct cv1800_clk_desc {
+ struct clk_hw_onecell_data *clks_data;
+
+ int (*pre_init)(struct device *dev, void __iomem *base,
+ struct cv1800_clk_ctrl *ctrl,
+ const struct cv1800_clk_desc *desc);
+};
+
+struct cv1800_clk_ctrl {
+ const struct cv1800_clk_desc *desc;
+ spinlock_t lock;
+};
+
+#define CV1800_DIV_FLAG \
+ (CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ROUND_CLOSEST)
+static const struct clk_parent_data osc_parents[] = {
+ { .index = 0 },
+};
+
+static const struct cv1800_clk_pll_limit pll_limits[] = {
+ {
+ .pre_div = _CV1800_PLL_LIMIT(1, 127),
+ .div = _CV1800_PLL_LIMIT(6, 127),
+ .post_div = _CV1800_PLL_LIMIT(1, 127),
+ .ictrl = _CV1800_PLL_LIMIT(0, 7),
+ .mode = _CV1800_PLL_LIMIT(0, 3),
+ },
+ {
+ .pre_div = _CV1800_PLL_LIMIT(1, 127),
+ .div = _CV1800_PLL_LIMIT(6, 127),
+ .post_div = _CV1800_PLL_LIMIT(1, 127),
+ .ictrl = _CV1800_PLL_LIMIT(0, 7),
+ .mode = _CV1800_PLL_LIMIT(0, 3),
+ },
+};
+
+static CV1800_INTEGRAL_PLL(clk_fpll, osc_parents,
+ REG_FPLL_CSR,
+ REG_PLL_G6_CTRL, 8,
+ REG_PLL_G6_STATUS, 2,
+ pll_limits,
+ CLK_IS_CRITICAL);
+
+static CV1800_INTEGRAL_PLL(clk_mipimpll, osc_parents,
+ REG_MIPIMPLL_CSR,
+ REG_PLL_G2_CTRL, 0,
+ REG_PLL_G2_STATUS, 0,
+ pll_limits,
+ CLK_IS_CRITICAL);
+
+static const struct clk_parent_data clk_mipimpll_parents[] = {
+ { .hw = &clk_mipimpll.common.hw },
+};
+static const struct clk_parent_data clk_bypass_mipimpll_parents[] = {
+ { .index = 0 },
+ { .hw = &clk_mipimpll.common.hw },
+};
+static const struct clk_parent_data clk_bypass_fpll_parents[] = {
+ { .index = 0 },
+ { .hw = &clk_fpll.common.hw },
+};
+
+static struct cv1800_clk_pll_synthesizer clk_mpll_synthesizer = {
+ .en = CV1800_CLK_BIT(REG_PLL_G6_SSC_SYN_CTRL, 2),
+ .clk_half = CV1800_CLK_BIT(REG_PLL_G6_SSC_SYN_CTRL, 0),
+ .ctrl = REG_MPLL_SSC_SYN_CTRL,
+ .set = REG_MPLL_SSC_SYN_SET,
+};
+static CV1800_FACTIONAL_PLL(clk_mpll, clk_bypass_mipimpll_parents,
+ REG_MPLL_CSR,
+ REG_PLL_G6_CTRL, 0,
+ REG_PLL_G6_STATUS, 0,
+ pll_limits,
+ &clk_mpll_synthesizer,
+ CLK_IS_CRITICAL);
+
+static struct cv1800_clk_pll_synthesizer clk_tpll_synthesizer = {
+ .en = CV1800_CLK_BIT(REG_PLL_G6_SSC_SYN_CTRL, 3),
+ .clk_half = CV1800_CLK_BIT(REG_PLL_G6_SSC_SYN_CTRL, 0),
+ .ctrl = REG_TPLL_SSC_SYN_CTRL,
+ .set = REG_TPLL_SSC_SYN_SET,
+};
+static CV1800_FACTIONAL_PLL(clk_tpll, clk_bypass_mipimpll_parents,
+ REG_TPLL_CSR,
+ REG_PLL_G6_CTRL, 4,
+ REG_PLL_G6_STATUS, 1,
+ pll_limits,
+ &clk_tpll_synthesizer,
+ CLK_IS_CRITICAL);
+
+static struct cv1800_clk_pll_synthesizer clk_a0pll_synthesizer = {
+ .en = CV1800_CLK_BIT(REG_PLL_G2_SSC_SYN_CTRL, 2),
+ .clk_half = CV1800_CLK_BIT(REG_PLL_G2_SSC_SYN_CTRL, 0),
+ .ctrl = REG_A0PLL_SSC_SYN_CTRL,
+ .set = REG_A0PLL_SSC_SYN_SET,
+};
+static CV1800_FACTIONAL_PLL(clk_a0pll, clk_bypass_mipimpll_parents,
+ REG_A0PLL_CSR,
+ REG_PLL_G2_CTRL, 4,
+ REG_PLL_G2_STATUS, 1,
+ pll_limits,
+ &clk_a0pll_synthesizer,
+ CLK_IS_CRITICAL);
+
+static struct cv1800_clk_pll_synthesizer clk_disppll_synthesizer = {
+ .en = CV1800_CLK_BIT(REG_PLL_G2_SSC_SYN_CTRL, 3),
+ .clk_half = CV1800_CLK_BIT(REG_PLL_G2_SSC_SYN_CTRL, 0),
+ .ctrl = REG_DISPPLL_SSC_SYN_CTRL,
+ .set = REG_DISPPLL_SSC_SYN_SET,
+};
+static CV1800_FACTIONAL_PLL(clk_disppll, clk_bypass_mipimpll_parents,
+ REG_DISPPLL_CSR,
+ REG_PLL_G2_CTRL, 8,
+ REG_PLL_G2_STATUS, 2,
+ pll_limits,
+ &clk_disppll_synthesizer,
+ CLK_IS_CRITICAL);
+
+static struct cv1800_clk_pll_synthesizer clk_cam0pll_synthesizer = {
+ .en = CV1800_CLK_BIT(REG_PLL_G2_SSC_SYN_CTRL, 4),
+ .clk_half = CV1800_CLK_BIT(REG_PLL_G2_SSC_SYN_CTRL, 0),
+ .ctrl = REG_CAM0PLL_SSC_SYN_CTRL,
+ .set = REG_CAM0PLL_SSC_SYN_SET,
+};
+static CV1800_FACTIONAL_PLL(clk_cam0pll, clk_bypass_mipimpll_parents,
+ REG_CAM0PLL_CSR,
+ REG_PLL_G2_CTRL, 12,
+ REG_PLL_G2_STATUS, 3,
+ pll_limits,
+ &clk_cam0pll_synthesizer,
+ CLK_IGNORE_UNUSED);
+
+static struct cv1800_clk_pll_synthesizer clk_cam1pll_synthesizer = {
+ .en = CV1800_CLK_BIT(REG_PLL_G2_SSC_SYN_CTRL, 5),
+ .clk_half = CV1800_CLK_BIT(REG_PLL_G2_SSC_SYN_CTRL, 0),
+ .ctrl = REG_CAM1PLL_SSC_SYN_CTRL,
+ .set = REG_CAM1PLL_SSC_SYN_SET,
+};
+static CV1800_FACTIONAL_PLL(clk_cam1pll, clk_bypass_mipimpll_parents,
+ REG_CAM1PLL_CSR,
+ REG_PLL_G2_CTRL, 16,
+ REG_PLL_G2_STATUS, 4,
+ pll_limits,
+ &clk_cam1pll_synthesizer,
+ CLK_IS_CRITICAL);
+
+static const struct clk_parent_data clk_cam0pll_parents[] = {
+ { .hw = &clk_cam0pll.common.hw },
+};
+
+/* G2D */
+static CV1800_FIXED_DIV(clk_cam0pll_d2, clk_cam0pll_parents,
+ REG_CAM0PLL_CLK_CSR, 1,
+ 2,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED);
+static CV1800_FIXED_DIV(clk_cam0pll_d3, clk_cam0pll_parents,
+ REG_CAM0PLL_CLK_CSR, 2,
+ 3,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED);
+static CV1800_FIXED_DIV(clk_mipimpll_d3, clk_mipimpll_parents,
+ REG_MIPIMPLL_CLK_CSR, 2,
+ 3,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED);
+
+/* TPU */
+static const struct clk_parent_data clk_tpu_parents[] = {
+ { .index = 0 },
+ { .hw = &clk_tpll.common.hw },
+ { .hw = &clk_a0pll.common.hw },
+ { .hw = &clk_mipimpll.common.hw },
+ { .hw = &clk_fpll.common.hw },
+};
+
+static CV1800_BYPASS_MUX(clk_tpu, clk_tpu_parents,
+ REG_CLK_EN_0, 4,
+ REG_DIV_CLK_TPU, 16, 4, 3, CV1800_DIV_FLAG,
+ REG_DIV_CLK_TPU, 8, 2,
+ REG_CLK_BYP_0, 3,
+ 0);
+static CV1800_GATE(clk_tpu_fab, clk_mipimpll_parents,
+ REG_CLK_EN_0, 5,
+ 0);
+
+/* FABRIC_AXI6 */
+static CV1800_BYPASS_DIV(clk_axi6, clk_bypass_fpll_parents,
+ REG_CLK_EN_2, 2,
+ REG_DIV_CLK_AXI6, 16, 4, 15, CV1800_DIV_FLAG,
+ REG_CLK_BYP_0, 20,
+ CLK_IS_CRITICAL);
+
+static const struct clk_parent_data clk_axi6_bus_parents[] = {
+ { .hw = &clk_axi6.div.common.hw },
+};
+static const struct clk_parent_data clk_bypass_axi6_bus_parents[] = {
+ { .index = 0 },
+ { .hw = &clk_axi6.div.common.hw },
+};
+
+/* FABRIC_AXI4 */
+static const struct clk_parent_data clk_axi4_parents[] = {
+ { .index = 0 },
+ { .hw = &clk_fpll.common.hw },
+ { .hw = &clk_disppll.common.hw },
+};
+
+static CV1800_BYPASS_MUX(clk_axi4, clk_axi4_parents,
+ REG_CLK_EN_2, 1,
+ REG_DIV_CLK_AXI4, 16, 4, 5, CV1800_DIV_FLAG,
+ REG_DIV_CLK_AXI4, 8, 2,
+ REG_CLK_BYP_0, 19,
+ CLK_IS_CRITICAL);
+
+static const struct clk_parent_data clk_axi4_bus_parents[] = {
+ { .hw = &clk_axi4.mux.common.hw },
+};
+
+/* XTAL_MISC */
+static CV1800_GATE(clk_xtal_misc, osc_parents,
+ REG_CLK_EN_0, 14,
+ CLK_IS_CRITICAL);
+
+static const struct clk_parent_data clk_timer_parents[] = {
+ { .hw = &clk_xtal_misc.common.hw },
+};
+
+/* TOP */
+static const struct clk_parent_data clk_cam0_200_parents[] = {
+ { .index = 0 },
+ { .index = 0 },
+ { .hw = &clk_disppll.common.hw },
+};
+
+static CV1800_BYPASS_MUX(clk_cam0_200, clk_cam0_200_parents,
+ REG_CLK_EN_1, 13,
+ REG_DIV_CLK_CAM0_200, 16, 4, 1, CV1800_DIV_FLAG,
+ REG_DIV_CLK_CAM0_200, 8, 2,
+ REG_CLK_BYP_0, 16,
+ CLK_IS_CRITICAL);
+static CV1800_DIV(clk_1m, osc_parents,
+ REG_CLK_EN_3, 5,
+ REG_DIV_CLK_1M, 16, 6, 25, CV1800_DIV_FLAG,
+ CLK_IS_CRITICAL);
+static CV1800_GATE(clk_pm, clk_axi6_bus_parents,
+ REG_CLK_EN_3, 8,
+ CLK_IS_CRITICAL);
+static CV1800_GATE(clk_timer0, clk_timer_parents,
+ REG_CLK_EN_3, 9,
+ CLK_IS_CRITICAL);
+static CV1800_GATE(clk_timer1, clk_timer_parents,
+ REG_CLK_EN_3, 10,
+ CLK_IS_CRITICAL);
+static CV1800_GATE(clk_timer2, clk_timer_parents,
+ REG_CLK_EN_3, 11,
+ CLK_IS_CRITICAL);
+static CV1800_GATE(clk_timer3, clk_timer_parents,
+ REG_CLK_EN_3, 12,
+ CLK_IS_CRITICAL);
+static CV1800_GATE(clk_timer4, clk_timer_parents,
+ REG_CLK_EN_3, 13,
+ CLK_IS_CRITICAL);
+static CV1800_GATE(clk_timer5, clk_timer_parents,
+ REG_CLK_EN_3, 14,
+ CLK_IS_CRITICAL);
+static CV1800_GATE(clk_timer6, clk_timer_parents,
+ REG_CLK_EN_3, 15,
+ CLK_IS_CRITICAL);
+static CV1800_GATE(clk_timer7, clk_timer_parents,
+ REG_CLK_EN_3, 16,
+ CLK_IS_CRITICAL);
+
+static const struct clk_parent_data clk_parents_1m[] = {
+ { .hw = &clk_1m.common.hw },
+};
+static const struct clk_parent_data clk_uart_parents[] = {
+ { .hw = &clk_cam0_200.mux.common.hw },
+};
+
+/* AHB ROM */
+static CV1800_GATE(clk_ahb_rom, clk_axi4_bus_parents,
+ REG_CLK_EN_0, 6,
+ 0);
+
+/* RTC */
+static CV1800_GATE(clk_rtc_25m, osc_parents,
+ REG_CLK_EN_0, 8,
+ CLK_IS_CRITICAL);
+static CV1800_BYPASS_DIV(clk_src_rtc_sys_0, clk_bypass_fpll_parents,
+ REG_CLK_EN_4, 6,
+ REG_DIV_CLK_RTCSYS_SRC_0, 16, 4, 5, CV1800_DIV_FLAG,
+ REG_CLK_BYP_1, 5,
+ CLK_IS_CRITICAL);
+
+/* TEMPSEN */
+static CV1800_GATE(clk_tempsen, osc_parents,
+ REG_CLK_EN_0, 9,
+ 0);
+
+/* SARADC */
+static CV1800_GATE(clk_saradc, osc_parents,
+ REG_CLK_EN_0, 10,
+ 0);
+
+/* EFUSE */
+static CV1800_GATE(clk_efuse, osc_parents,
+ REG_CLK_EN_0, 11,
+ 0);
+static CV1800_GATE(clk_apb_efuse, osc_parents,
+ REG_CLK_EN_0, 12,
+ 0);
+
+/* WDT */
+static CV1800_GATE(clk_apb_wdt, osc_parents,
+ REG_CLK_EN_1, 7,
+ CLK_IS_CRITICAL);
+
+/* WGN */
+static CV1800_GATE(clk_wgn, osc_parents,
+ REG_CLK_EN_3, 22,
+ 0);
+static CV1800_GATE(clk_wgn0, osc_parents,
+ REG_CLK_EN_3, 23,
+ 0);
+static CV1800_GATE(clk_wgn1, osc_parents,
+ REG_CLK_EN_3, 24,
+ 0);
+static CV1800_GATE(clk_wgn2, osc_parents,
+ REG_CLK_EN_3, 25,
+ 0);
+
+/* KEYSCAN */
+static CV1800_GATE(clk_keyscan, osc_parents,
+ REG_CLK_EN_3, 26,
+ 0);
+
+/* EMMC */
+static CV1800_GATE(clk_axi4_emmc, clk_axi4_bus_parents,
+ REG_CLK_EN_0, 15,
+ 0);
+static CV1800_BYPASS_MUX(clk_emmc, clk_axi4_parents,
+ REG_CLK_EN_0, 16,
+ REG_DIV_CLK_EMMC, 16, 5, 15, CV1800_DIV_FLAG,
+ REG_DIV_CLK_EMMC, 8, 2,
+ REG_CLK_BYP_0, 5,
+ 0);
+static CV1800_DIV(clk_emmc_100k, clk_parents_1m,
+ REG_CLK_EN_0, 17,
+ REG_DIV_CLK_EMMC_100K, 16, 8, 10, CV1800_DIV_FLAG,
+ 0);
+
+/* SD */
+static CV1800_GATE(clk_axi4_sd0, clk_axi4_bus_parents,
+ REG_CLK_EN_0, 18,
+ 0);
+static CV1800_BYPASS_MUX(clk_sd0, clk_axi4_parents,
+ REG_CLK_EN_0, 19,
+ REG_DIV_CLK_SD0, 16, 5, 15, CV1800_DIV_FLAG,
+ REG_DIV_CLK_SD0, 8, 2,
+ REG_CLK_BYP_0, 6,
+ 0);
+static CV1800_DIV(clk_sd0_100k, clk_parents_1m,
+ REG_CLK_EN_0, 20,
+ REG_DIV_CLK_SD0_100K, 16, 8, 10, CV1800_DIV_FLAG,
+ 0);
+static CV1800_GATE(clk_axi4_sd1, clk_axi4_bus_parents,
+ REG_CLK_EN_0, 21,
+ 0);
+static CV1800_BYPASS_MUX(clk_sd1, clk_axi4_parents,
+ REG_CLK_EN_0, 22,
+ REG_DIV_CLK_SD1, 16, 5, 15, CV1800_DIV_FLAG,
+ REG_DIV_CLK_SD1, 8, 2,
+ REG_CLK_BYP_0, 7,
+ 0);
+static CV1800_DIV(clk_sd1_100k, clk_parents_1m,
+ REG_CLK_EN_0, 23,
+ REG_DIV_CLK_SD1_100K, 16, 8, 10, CV1800_DIV_FLAG,
+ 0);
+
+/* SPI NAND */
+static CV1800_BYPASS_MUX(clk_spi_nand, clk_axi4_parents,
+ REG_CLK_EN_0, 24,
+ REG_DIV_CLK_SPI_NAND, 16, 5, 8, CV1800_DIV_FLAG,
+ REG_DIV_CLK_SPI_NAND, 8, 2,
+ REG_CLK_BYP_0, 8,
+ 0);
+
+/* GPIO */
+static CV1800_DIV(clk_gpio_db, clk_parents_1m,
+ REG_CLK_EN_0, 31,
+ REG_DIV_CLK_GPIO_DB, 16, 16, 10, CV1800_DIV_FLAG,
+ CLK_IS_CRITICAL);
+static CV1800_GATE(clk_apb_gpio, clk_axi6_bus_parents,
+ REG_CLK_EN_0, 29,
+ CLK_IS_CRITICAL);
+static CV1800_GATE(clk_apb_gpio_intr, clk_axi6_bus_parents,
+ REG_CLK_EN_0, 30,
+ CLK_IS_CRITICAL);
+
+/* ETH */
+static CV1800_BYPASS_DIV(clk_eth0_500m, clk_bypass_fpll_parents,
+ REG_CLK_EN_0, 25,
+ REG_DIV_CLK_GPIO_DB, 16, 4, 3, CV1800_DIV_FLAG,
+ REG_CLK_BYP_0, 9,
+ 0);
+static CV1800_GATE(clk_axi4_eth0, clk_axi4_bus_parents,
+ REG_CLK_EN_0, 26,
+ 0);
+static CV1800_BYPASS_DIV(clk_eth1_500m, clk_bypass_fpll_parents,
+ REG_CLK_EN_0, 27,
+ REG_DIV_CLK_GPIO_DB, 16, 4, 3, CV1800_DIV_FLAG,
+ REG_CLK_BYP_0, 10,
+ 0);
+static CV1800_GATE(clk_axi4_eth1, clk_axi4_bus_parents,
+ REG_CLK_EN_0, 28,
+ 0);
+
+/* SF */
+static CV1800_GATE(clk_ahb_sf, clk_axi4_bus_parents,
+ REG_CLK_EN_1, 0,
+ 0);
+static CV1800_GATE(clk_ahb_sf1, clk_axi4_bus_parents,
+ REG_CLK_EN_3, 27,
+ 0);
+
+/* AUDSRC */
+static CV1800_ACLK(clk_a24m, clk_mipimpll_parents,
+ REG_APLL_FRAC_DIV_CTRL, 0,
+ REG_APLL_FRAC_DIV_CTRL, 3,
+ REG_APLL_FRAC_DIV_CTRL, 1,
+ REG_APLL_FRAC_DIV_CTRL, 2,
+ REG_APLL_FRAC_DIV_M, 0, 22, CV1800_DIV_FLAG,
+ REG_APLL_FRAC_DIV_N, 0, 22, CV1800_DIV_FLAG,
+ 24576000,
+ 0);
+
+static const struct clk_parent_data clk_aud_parents[] = {
+ { .index = 0 },
+ { .hw = &clk_a0pll.common.hw },
+ { .hw = &clk_a24m.common.hw },
+};
+
+static CV1800_BYPASS_MUX(clk_audsrc, clk_aud_parents,
+ REG_CLK_EN_4, 1,
+ REG_DIV_CLK_AUDSRC, 16, 8, 18, CV1800_DIV_FLAG,
+ REG_DIV_CLK_AUDSRC, 8, 2,
+ REG_CLK_BYP_1, 2,
+ 0);
+static CV1800_GATE(clk_apb_audsrc, clk_axi4_bus_parents,
+ REG_CLK_EN_4, 2,
+ 0);
+
+/* SDMA */
+static CV1800_GATE(clk_sdma_axi, clk_axi4_bus_parents,
+ REG_CLK_EN_1, 1,
+ 0);
+static CV1800_BYPASS_MUX(clk_sdma_aud0, clk_aud_parents,
+ REG_CLK_EN_1, 2,
+ REG_DIV_CLK_SDMA_AUD0, 16, 8, 18, CV1800_DIV_FLAG,
+ REG_DIV_CLK_SDMA_AUD0, 8, 2,
+ REG_CLK_BYP_0, 11,
+ 0);
+static CV1800_BYPASS_MUX(clk_sdma_aud1, clk_aud_parents,
+ REG_CLK_EN_1, 3,
+ REG_DIV_CLK_SDMA_AUD1, 16, 8, 18, CV1800_DIV_FLAG,
+ REG_DIV_CLK_SDMA_AUD1, 8, 2,
+ REG_CLK_BYP_0, 12,
+ 0);
+static CV1800_BYPASS_MUX(clk_sdma_aud2, clk_aud_parents,
+ REG_CLK_EN_1, 3,
+ REG_DIV_CLK_SDMA_AUD2, 16, 8, 18, CV1800_DIV_FLAG,
+ REG_DIV_CLK_SDMA_AUD2, 8, 2,
+ REG_CLK_BYP_0, 13,
+ 0);
+static CV1800_BYPASS_MUX(clk_sdma_aud3, clk_aud_parents,
+ REG_CLK_EN_1, 3,
+ REG_DIV_CLK_SDMA_AUD3, 16, 8, 18, CV1800_DIV_FLAG,
+ REG_DIV_CLK_SDMA_AUD3, 8, 2,
+ REG_CLK_BYP_0, 14,
+ 0);
+
+/* SPI */
+static CV1800_GATE(clk_apb_spi0, clk_axi4_bus_parents,
+ REG_CLK_EN_1, 9,
+ 0);
+static CV1800_GATE(clk_apb_spi1, clk_axi4_bus_parents,
+ REG_CLK_EN_1, 10,
+ 0);
+static CV1800_GATE(clk_apb_spi2, clk_axi4_bus_parents,
+ REG_CLK_EN_1, 11,
+ 0);
+static CV1800_GATE(clk_apb_spi3, clk_axi4_bus_parents,
+ REG_CLK_EN_1, 12,
+ 0);
+static CV1800_BYPASS_DIV(clk_spi, clk_bypass_fpll_parents,
+ REG_CLK_EN_3, 6,
+ REG_DIV_CLK_SPI, 16, 6, 8, CV1800_DIV_FLAG,
+ REG_CLK_BYP_0, 30,
+ 0);
+
+/* UART */
+static CV1800_GATE(clk_uart0, clk_uart_parents,
+ REG_CLK_EN_1, 14,
+ CLK_IS_CRITICAL);
+static CV1800_GATE(clk_apb_uart0, clk_axi4_bus_parents,
+ REG_CLK_EN_1, 15,
+ CLK_IS_CRITICAL);
+static CV1800_GATE(clk_uart1, clk_uart_parents,
+ REG_CLK_EN_1, 16,
+ 0);
+static CV1800_GATE(clk_apb_uart1, clk_axi4_bus_parents,
+ REG_CLK_EN_1, 17,
+ 0);
+static CV1800_GATE(clk_uart2, clk_uart_parents,
+ REG_CLK_EN_1, 18,
+ 0);
+static CV1800_GATE(clk_apb_uart2, clk_axi4_bus_parents,
+ REG_CLK_EN_1, 19,
+ 0);
+static CV1800_GATE(clk_uart3, clk_uart_parents,
+ REG_CLK_EN_1, 20,
+ 0);
+static CV1800_GATE(clk_apb_uart3, clk_axi4_bus_parents,
+ REG_CLK_EN_1, 21,
+ 0);
+static CV1800_GATE(clk_uart4, clk_uart_parents,
+ REG_CLK_EN_1, 22,
+ 0);
+static CV1800_GATE(clk_apb_uart4, clk_axi4_bus_parents,
+ REG_CLK_EN_1, 23,
+ 0);
+
+/* I2S */
+static CV1800_GATE(clk_apb_i2s0, clk_axi4_bus_parents,
+ REG_CLK_EN_1, 24,
+ 0);
+static CV1800_GATE(clk_apb_i2s1, clk_axi4_bus_parents,
+ REG_CLK_EN_1, 25,
+ 0);
+static CV1800_GATE(clk_apb_i2s2, clk_axi4_bus_parents,
+ REG_CLK_EN_1, 26,
+ 0);
+static CV1800_GATE(clk_apb_i2s3, clk_axi4_bus_parents,
+ REG_CLK_EN_1, 27,
+ 0);
+
+/* DEBUG */
+static CV1800_GATE(clk_debug, osc_parents,
+ REG_CLK_EN_0, 13,
+ CLK_IS_CRITICAL);
+static CV1800_BYPASS_DIV(clk_ap_debug, clk_bypass_fpll_parents,
+ REG_CLK_EN_4, 5,
+ REG_DIV_CLK_AP_DEBUG, 16, 4, 5, CV1800_DIV_FLAG,
+ REG_CLK_BYP_1, 4,
+ CLK_IS_CRITICAL);
+
+/* DDR */
+static CV1800_GATE(clk_ddr_axi_reg, clk_axi6_bus_parents,
+ REG_CLK_EN_0, 7,
+ CLK_IS_CRITICAL);
+
+/* I2C */
+static CV1800_GATE(clk_apb_i2c, clk_axi4_bus_parents,
+ REG_CLK_EN_1, 6,
+ 0);
+static CV1800_BYPASS_DIV(clk_i2c, clk_bypass_axi6_bus_parents,
+ REG_CLK_EN_3, 7,
+ REG_DIV_CLK_I2C, 16, 4, 1, CV1800_DIV_FLAG,
+ REG_CLK_BYP_0, 31,
+ 0);
+static CV1800_GATE(clk_apb_i2c0, clk_axi4_bus_parents,
+ REG_CLK_EN_3, 17,
+ 0);
+static CV1800_GATE(clk_apb_i2c1, clk_axi4_bus_parents,
+ REG_CLK_EN_3, 18,
+ 0);
+static CV1800_GATE(clk_apb_i2c2, clk_axi4_bus_parents,
+ REG_CLK_EN_3, 19,
+ 0);
+static CV1800_GATE(clk_apb_i2c3, clk_axi4_bus_parents,
+ REG_CLK_EN_3, 20,
+ 0);
+static CV1800_GATE(clk_apb_i2c4, clk_axi4_bus_parents,
+ REG_CLK_EN_3, 21,
+ 0);
+
+/* USB */
+static CV1800_GATE(clk_axi4_usb, clk_axi4_bus_parents,
+ REG_CLK_EN_1, 28,
+ 0);
+static CV1800_GATE(clk_apb_usb, clk_axi4_bus_parents,
+ REG_CLK_EN_1, 29,
+ 0);
+static CV1800_BYPASS_FIXED_DIV(clk_usb_125m, clk_bypass_fpll_parents,
+ REG_CLK_EN_1, 30,
+ 12,
+ REG_CLK_BYP_0, 17,
+ CLK_SET_RATE_PARENT);
+static CV1800_FIXED_DIV(clk_usb_33k, clk_parents_1m,
+ REG_CLK_EN_1, 31,
+ 3,
+ 0);
+static CV1800_BYPASS_FIXED_DIV(clk_usb_12m, clk_bypass_fpll_parents,
+ REG_CLK_EN_2, 0,
+ 125,
+ REG_CLK_BYP_0, 18,
+ CLK_SET_RATE_PARENT);
+
+/* VIP SYS */
+static const struct clk_parent_data clk_vip_sys_parents[] = {
+ { .index = 0 },
+ { .hw = &clk_mipimpll.common.hw },
+ { .hw = &clk_cam0pll.common.hw },
+ { .hw = &clk_disppll.common.hw },
+ { .hw = &clk_fpll.common.hw },
+};
+static const struct clk_parent_data clk_disp_vip_parents[] = {
+ { .index = 0 },
+ { .hw = &clk_disppll.common.hw },
+};
+
+static CV1800_BYPASS_DIV(clk_dsi_esc, clk_bypass_axi6_bus_parents,
+ REG_CLK_EN_2, 3,
+ REG_DIV_CLK_DSI_ESC, 16, 4, 5, CV1800_DIV_FLAG,
+ REG_CLK_BYP_0, 21,
+ 0);
+static CV1800_BYPASS_MUX(clk_axi_vip, clk_vip_sys_parents,
+ REG_CLK_EN_2, 4,
+ REG_DIV_CLK_AXI_VIP, 16, 4, 3, CV1800_DIV_FLAG,
+ REG_DIV_CLK_AXI_VIP, 8, 2,
+ REG_CLK_BYP_0, 22,
+ 0);
+
+static const struct clk_parent_data clk_axi_vip_bus_parents[] = {
+ { .hw = &clk_axi_vip.mux.common.hw },
+};
+
+static CV1800_BYPASS_MUX(clk_src_vip_sys_0, clk_vip_sys_parents,
+ REG_CLK_EN_2, 5,
+ REG_DIV_CLK_SRC_VIP_SYS_0, 16, 4, 6, CV1800_DIV_FLAG,
+ REG_DIV_CLK_SRC_VIP_SYS_0, 8, 2,
+ REG_CLK_BYP_0, 23,
+ 0);
+static CV1800_BYPASS_MUX(clk_src_vip_sys_1, clk_vip_sys_parents,
+ REG_CLK_EN_2, 6,
+ REG_DIV_CLK_SRC_VIP_SYS_1, 16, 4, 6, CV1800_DIV_FLAG,
+ REG_DIV_CLK_SRC_VIP_SYS_1, 8, 2,
+ REG_CLK_BYP_0, 24,
+ 0);
+static CV1800_BYPASS_DIV(clk_disp_src_vip, clk_disp_vip_parents,
+ REG_CLK_EN_2, 7,
+ REG_DIV_CLK_DISP_SRC_VIP, 16, 4, 8, CV1800_DIV_FLAG,
+ REG_CLK_BYP_0, 25,
+ 0);
+static CV1800_BYPASS_MUX(clk_src_vip_sys_2, clk_vip_sys_parents,
+ REG_CLK_EN_3, 29,
+ REG_DIV_CLK_SRC_VIP_SYS_2, 16, 4, 2, CV1800_DIV_FLAG,
+ REG_DIV_CLK_SRC_VIP_SYS_2, 8, 2,
+ REG_CLK_BYP_1, 1,
+ 0);
+static CV1800_GATE(clk_csi_mac0_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_2, 18,
+ 0);
+static CV1800_GATE(clk_csi_mac1_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_2, 19,
+ 0);
+static CV1800_GATE(clk_isp_top_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_2, 20,
+ 0);
+static CV1800_GATE(clk_img_d_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_2, 21,
+ 0);
+static CV1800_GATE(clk_img_v_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_2, 22,
+ 0);
+static CV1800_GATE(clk_sc_top_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_2, 23,
+ 0);
+static CV1800_GATE(clk_sc_d_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_2, 24,
+ 0);
+static CV1800_GATE(clk_sc_v1_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_2, 25,
+ 0);
+static CV1800_GATE(clk_sc_v2_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_2, 26,
+ 0);
+static CV1800_GATE(clk_sc_v3_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_2, 27,
+ 0);
+static CV1800_GATE(clk_dwa_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_2, 28,
+ 0);
+static CV1800_GATE(clk_bt_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_2, 29,
+ 0);
+static CV1800_GATE(clk_disp_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_2, 30,
+ 0);
+static CV1800_GATE(clk_dsi_mac_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_2, 31,
+ 0);
+static CV1800_GATE(clk_lvds0_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_3, 0,
+ 0);
+static CV1800_GATE(clk_lvds1_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_3, 1,
+ 0);
+static CV1800_GATE(clk_csi0_rx_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_3, 2,
+ 0);
+static CV1800_GATE(clk_csi1_rx_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_3, 3,
+ 0);
+static CV1800_GATE(clk_pad_vi_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_3, 4,
+ 0);
+static CV1800_GATE(clk_pad_vi1_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_3, 30,
+ 0);
+static CV1800_GATE(clk_cfg_reg_vip, clk_axi6_bus_parents,
+ REG_CLK_EN_3, 31,
+ 0);
+static CV1800_GATE(clk_pad_vi2_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_4, 7,
+ 0);
+static CV1800_GATE(clk_csi_be_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_4, 8,
+ 0);
+static CV1800_GATE(clk_vip_ip0, clk_axi_vip_bus_parents,
+ REG_CLK_EN_4, 9,
+ 0);
+static CV1800_GATE(clk_vip_ip1, clk_axi_vip_bus_parents,
+ REG_CLK_EN_4, 10,
+ 0);
+static CV1800_GATE(clk_vip_ip2, clk_axi_vip_bus_parents,
+ REG_CLK_EN_4, 11,
+ 0);
+static CV1800_GATE(clk_vip_ip3, clk_axi_vip_bus_parents,
+ REG_CLK_EN_4, 12,
+ 0);
+static CV1800_BYPASS_MUX(clk_src_vip_sys_3, clk_vip_sys_parents,
+ REG_CLK_EN_4, 15,
+ REG_DIV_CLK_SRC_VIP_SYS_3, 16, 4, 2, CV1800_DIV_FLAG,
+ REG_DIV_CLK_SRC_VIP_SYS_3, 8, 2,
+ REG_CLK_BYP_1, 8,
+ 0);
+static CV1800_BYPASS_MUX(clk_src_vip_sys_4, clk_vip_sys_parents,
+ REG_CLK_EN_4, 16,
+ REG_DIV_CLK_SRC_VIP_SYS_4, 16, 4, 3, CV1800_DIV_FLAG,
+ REG_DIV_CLK_SRC_VIP_SYS_4, 8, 2,
+ REG_CLK_BYP_1, 9,
+ 0);
+static CV1800_GATE(clk_ive_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_4, 17,
+ 0);
+static CV1800_GATE(clk_raw_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_4, 18,
+ 0);
+static CV1800_GATE(clk_osdc_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_4, 19,
+ 0);
+static CV1800_GATE(clk_csi_mac2_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_4, 20,
+ 0);
+static CV1800_GATE(clk_cam0_vip, clk_axi_vip_bus_parents,
+ REG_CLK_EN_4, 21,
+ 0);
+
+/* CAM OUT */
+static const struct clk_parent_data clk_cam_parents[] = {
+ { .hw = &clk_cam0pll.common.hw },
+ { .hw = &clk_cam0pll_d2.common.hw },
+ { .hw = &clk_cam0pll_d3.common.hw },
+ { .hw = &clk_mipimpll_d3.common.hw },
+};
+
+static CV1800_MUX(clk_cam0, clk_cam_parents,
+ REG_CLK_EN_2, 16,
+ REG_CLK_CAM0_SRC_DIV, 16, 6, 0, CV1800_DIV_FLAG,
+ REG_CLK_CAM0_SRC_DIV, 8, 2,
+ CLK_IGNORE_UNUSED);
+static CV1800_MUX(clk_cam1, clk_cam_parents,
+ REG_CLK_EN_2, 17,
+ REG_CLK_CAM1_SRC_DIV, 16, 6, 0, CV1800_DIV_FLAG,
+ REG_CLK_CAM1_SRC_DIV, 8, 2,
+ CLK_IGNORE_UNUSED);
+
+/* VIDEO SUBSYS */
+static const struct clk_parent_data clk_axi_video_codec_parents[] = {
+ { .index = 0 },
+ { .hw = &clk_a0pll.common.hw },
+ { .hw = &clk_mipimpll.common.hw },
+ { .hw = &clk_cam1pll.common.hw },
+ { .hw = &clk_fpll.common.hw },
+};
+static const struct clk_parent_data clk_vc_src0_parents[] = {
+ { .index = 0 },
+ { .hw = &clk_disppll.common.hw },
+ { .hw = &clk_mipimpll.common.hw },
+ { .hw = &clk_cam1pll.common.hw },
+ { .hw = &clk_fpll.common.hw },
+};
+static const struct clk_parent_data clk_vc_src1_parents[] = {
+ { .index = 0 },
+ { .hw = &clk_cam1pll.common.hw },
+};
+
+static CV1800_BYPASS_MUX(clk_axi_video_codec, clk_axi_video_codec_parents,
+ REG_CLK_EN_2, 8,
+ REG_DIV_CLK_AXI_VIDEO_CODEC, 16, 4, 2, CV1800_DIV_FLAG,
+ REG_DIV_CLK_AXI_VIDEO_CODEC, 8, 2,
+ REG_CLK_BYP_0, 26,
+ 0);
+
+static const struct clk_parent_data clk_axi_video_codec_bus_parents[] = {
+ { .hw = &clk_axi_video_codec.mux.common.hw },
+};
+
+static CV1800_BYPASS_MUX(clk_vc_src0, clk_vc_src0_parents,
+ REG_CLK_EN_2, 9,
+ REG_DIV_CLK_VC_SRC0, 16, 4, 2, CV1800_DIV_FLAG,
+ REG_DIV_CLK_VC_SRC0, 8, 2,
+ REG_CLK_BYP_0, 27,
+ 0);
+
+static CV1800_GATE(clk_h264c, clk_axi_video_codec_bus_parents,
+ REG_CLK_EN_2, 10,
+ 0);
+static CV1800_GATE(clk_h265c, clk_axi_video_codec_bus_parents,
+ REG_CLK_EN_2, 11,
+ 0);
+static CV1800_GATE(clk_jpeg, clk_axi_video_codec_bus_parents,
+ REG_CLK_EN_2, 12,
+ CLK_IGNORE_UNUSED);
+static CV1800_GATE(clk_apb_jpeg, clk_axi6_bus_parents,
+ REG_CLK_EN_2, 13,
+ CLK_IGNORE_UNUSED);
+static CV1800_GATE(clk_apb_h264c, clk_axi6_bus_parents,
+ REG_CLK_EN_2, 14,
+ 0);
+static CV1800_GATE(clk_apb_h265c, clk_axi6_bus_parents,
+ REG_CLK_EN_2, 15,
+ 0);
+static CV1800_BYPASS_FIXED_DIV(clk_vc_src1, clk_vc_src1_parents,
+ REG_CLK_EN_3, 28,
+ 2,
+ REG_CLK_BYP_1, 0,
+ CLK_SET_RATE_PARENT);
+static CV1800_BYPASS_FIXED_DIV(clk_vc_src2, clk_bypass_fpll_parents,
+ REG_CLK_EN_4, 3,
+ 3,
+ REG_CLK_BYP_1, 3,
+ CLK_SET_RATE_PARENT);
+
+/* VC SYS */
+static CV1800_GATE(clk_cfg_reg_vc, clk_axi6_bus_parents,
+ REG_CLK_EN_4, 0,
+ CLK_IGNORE_UNUSED);
+
+/* PWM */
+static CV1800_BYPASS_MUX(clk_pwm_src, clk_axi4_parents,
+ REG_CLK_EN_4, 4,
+ REG_DIV_CLK_PWM_SRC_0, 16, 6, 10, CV1800_DIV_FLAG,
+ REG_DIV_CLK_PWM_SRC_0, 8, 2,
+ REG_CLK_BYP_0, 15,
+ CLK_IS_CRITICAL);
+
+static const struct clk_parent_data clk_pwm_parents[] = {
+ { .hw = &clk_pwm_src.mux.common.hw },
+};
+
+static CV1800_GATE(clk_pwm, clk_pwm_parents,
+ REG_CLK_EN_1, 8,
+ CLK_IS_CRITICAL);
+
+/* C906 */
+static const struct clk_parent_data clk_c906_0_parents[] = {
+ { .index = 0 },
+ { .hw = &clk_tpll.common.hw },
+ { .hw = &clk_a0pll.common.hw },
+ { .hw = &clk_mipimpll.common.hw },
+ { .hw = &clk_mpll.common.hw },
+ { .hw = &clk_fpll.common.hw },
+};
+static const struct clk_parent_data clk_c906_1_parents[] = {
+ { .index = 0 },
+ { .hw = &clk_tpll.common.hw },
+ { .hw = &clk_a0pll.common.hw },
+ { .hw = &clk_disppll.common.hw },
+ { .hw = &clk_mpll.common.hw },
+ { .hw = &clk_fpll.common.hw },
+};
+
+static const s8 clk_c906_parent2sel[] = {
+ -1, /* osc */
+ 0, /* mux 0: clk_tpll(c906_0), clk_tpll(c906_1) */
+ 0, /* mux 0: clk_a0pll(c906_0), clk_a0pll(c906_1) */
+ 0, /* mux 0: clk_mipimpll(c906_0), clk_disppll(c906_1) */
+ 0, /* mux 0: clk_mpll(c906_0), clk_mpll(c906_1) */
+ 1 /* mux 1: clk_fpll(c906_0), clk_fpll(c906_1) */
+};
+
+static const u8 clk_c906_sel2parent[2][4] = {
+ [0] = {
+ 1,
+ 2,
+ 3,
+ 4
+ },
+ [1] = {
+ 5,
+ 5,
+ 5,
+ 5
+ },
+};
+
+static CV1800_MMUX(clk_c906_0, clk_c906_0_parents,
+ REG_CLK_EN_4, 13,
+ REG_DIV_CLK_C906_0_0, 16, 4, 1, CV1800_DIV_FLAG,
+ REG_DIV_CLK_C906_0_1, 16, 4, 2, CV1800_DIV_FLAG,
+ REG_DIV_CLK_C906_0_0, 8, 2,
+ REG_DIV_CLK_C906_0_1, 8, 2,
+ REG_CLK_BYP_1, 6,
+ REG_CLK_SEL_0, 23,
+ clk_c906_parent2sel,
+ clk_c906_sel2parent[0], clk_c906_sel2parent[1],
+ CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
+static CV1800_MMUX(clk_c906_1, clk_c906_1_parents,
+ REG_CLK_EN_4, 14,
+ REG_DIV_CLK_C906_1_0, 16, 4, 2, CV1800_DIV_FLAG,
+ REG_DIV_CLK_C906_1_1, 16, 4, 3, CV1800_DIV_FLAG,
+ REG_DIV_CLK_C906_1_0, 8, 2,
+ REG_DIV_CLK_C906_1_1, 8, 2,
+ REG_CLK_BYP_1, 7,
+ REG_CLK_SEL_0, 24,
+ clk_c906_parent2sel,
+ clk_c906_sel2parent[0], clk_c906_sel2parent[1],
+ CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
+
+/* A53 */
+static CV1800_BYPASS_DIV(clk_cpu_axi0, clk_axi4_parents,
+ REG_CLK_EN_0, 1,
+ REG_DIV_CLK_CPU_AXI0, 16, 4, 3, CV1800_DIV_FLAG,
+ REG_CLK_BYP_0, 1,
+ CLK_IS_CRITICAL);
+static CV1800_BYPASS_DIV(clk_cpu_gic, clk_bypass_fpll_parents,
+ REG_CLK_EN_0, 2,
+ REG_DIV_CLK_CPU_GIC, 16, 4, 5, CV1800_DIV_FLAG,
+ REG_CLK_BYP_0, 2,
+ CLK_IS_CRITICAL);
+static CV1800_GATE(clk_xtal_ap, osc_parents,
+ REG_CLK_EN_0, 3,
+ CLK_IS_CRITICAL);
+
+static const struct clk_parent_data clk_a53_parents[] = {
+ { .index = 0 },
+ { .hw = &clk_tpll.common.hw },
+ { .hw = &clk_a0pll.common.hw },
+ { .hw = &clk_mipimpll.common.hw },
+ { .hw = &clk_mpll.common.hw },
+ { .hw = &clk_fpll.common.hw },
+};
+
+static const s8 clk_a53_parent2sel[] = {
+ -1, /* osc */
+ 0, /* mux 0: clk_tpll */
+ 0, /* mux 0: clk_a0pll */
+ 0, /* mux 0: clk_mipimpll */
+ 0, /* mux 0: clk_mpll */
+ 1 /* mux 1: clk_fpll */
+};
+
+static const u8 clk_a53_sel2parent[2][4] = {
+ [0] = {
+ 1,
+ 2,
+ 3,
+ 4
+ },
+ [1] = {
+ 5,
+ 5,
+ 5,
+ 5
+ },
+};
+
+/*
+ * Clock for A53 cpu in the CV18XX/SG200X series.
+ * For CV180X and CV181X series, this clock is not used, but can not
+ * be set to bypass mode, or the SoC will hang.
+ */
+static CV1800_MMUX(clk_a53, clk_a53_parents,
+ REG_CLK_EN_0, 0,
+ REG_DIV_CLK_A53_0, 16, 4, 1, CV1800_DIV_FLAG,
+ REG_DIV_CLK_A53_1, 16, 4, 2, CV1800_DIV_FLAG,
+ REG_DIV_CLK_A53_0, 8, 2,
+ REG_DIV_CLK_A53_1, 8, 2,
+ REG_CLK_BYP_0, 0,
+ REG_CLK_SEL_0, 0,
+ clk_a53_parent2sel,
+ clk_a53_sel2parent[0], clk_a53_sel2parent[1],
+ CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
+
+static struct clk_hw_onecell_data cv1800_hw_clks = {
+ .num = CV1800_CLK_MAX,
+ .hws = {
+ [CLK_MPLL] = &clk_mpll.common.hw,
+ [CLK_TPLL] = &clk_tpll.common.hw,
+ [CLK_FPLL] = &clk_fpll.common.hw,
+ [CLK_MIPIMPLL] = &clk_mipimpll.common.hw,
+ [CLK_A0PLL] = &clk_a0pll.common.hw,
+ [CLK_DISPPLL] = &clk_disppll.common.hw,
+ [CLK_CAM0PLL] = &clk_cam0pll.common.hw,
+ [CLK_CAM1PLL] = &clk_cam1pll.common.hw,
+
+ [CLK_MIPIMPLL_D3] = &clk_mipimpll_d3.common.hw,
+ [CLK_CAM0PLL_D2] = &clk_cam0pll_d2.common.hw,
+ [CLK_CAM0PLL_D3] = &clk_cam0pll_d3.common.hw,
+
+ [CLK_TPU] = &clk_tpu.mux.common.hw,
+ [CLK_TPU_FAB] = &clk_tpu_fab.common.hw,
+ [CLK_AHB_ROM] = &clk_ahb_rom.common.hw,
+ [CLK_DDR_AXI_REG] = &clk_ddr_axi_reg.common.hw,
+ [CLK_RTC_25M] = &clk_rtc_25m.common.hw,
+ [CLK_SRC_RTC_SYS_0] = &clk_src_rtc_sys_0.div.common.hw,
+ [CLK_TEMPSEN] = &clk_tempsen.common.hw,
+ [CLK_SARADC] = &clk_saradc.common.hw,
+ [CLK_EFUSE] = &clk_efuse.common.hw,
+ [CLK_APB_EFUSE] = &clk_apb_efuse.common.hw,
+ [CLK_DEBUG] = &clk_debug.common.hw,
+ [CLK_AP_DEBUG] = &clk_ap_debug.div.common.hw,
+ [CLK_XTAL_MISC] = &clk_xtal_misc.common.hw,
+ [CLK_AXI4_EMMC] = &clk_axi4_emmc.common.hw,
+ [CLK_EMMC] = &clk_emmc.mux.common.hw,
+ [CLK_EMMC_100K] = &clk_emmc_100k.common.hw,
+ [CLK_AXI4_SD0] = &clk_axi4_sd0.common.hw,
+ [CLK_SD0] = &clk_sd0.mux.common.hw,
+ [CLK_SD0_100K] = &clk_sd0_100k.common.hw,
+ [CLK_AXI4_SD1] = &clk_axi4_sd1.common.hw,
+ [CLK_SD1] = &clk_sd1.mux.common.hw,
+ [CLK_SD1_100K] = &clk_sd1_100k.common.hw,
+ [CLK_SPI_NAND] = &clk_spi_nand.mux.common.hw,
+ [CLK_ETH0_500M] = &clk_eth0_500m.div.common.hw,
+ [CLK_AXI4_ETH0] = &clk_axi4_eth0.common.hw,
+ [CLK_ETH1_500M] = &clk_eth1_500m.div.common.hw,
+ [CLK_AXI4_ETH1] = &clk_axi4_eth1.common.hw,
+ [CLK_APB_GPIO] = &clk_apb_gpio.common.hw,
+ [CLK_APB_GPIO_INTR] = &clk_apb_gpio_intr.common.hw,
+ [CLK_GPIO_DB] = &clk_gpio_db.common.hw,
+ [CLK_AHB_SF] = &clk_ahb_sf.common.hw,
+ [CLK_AHB_SF1] = &clk_ahb_sf1.common.hw,
+ [CLK_A24M] = &clk_a24m.common.hw,
+ [CLK_AUDSRC] = &clk_audsrc.mux.common.hw,
+ [CLK_APB_AUDSRC] = &clk_apb_audsrc.common.hw,
+ [CLK_SDMA_AXI] = &clk_sdma_axi.common.hw,
+ [CLK_SDMA_AUD0] = &clk_sdma_aud0.mux.common.hw,
+ [CLK_SDMA_AUD1] = &clk_sdma_aud1.mux.common.hw,
+ [CLK_SDMA_AUD2] = &clk_sdma_aud2.mux.common.hw,
+ [CLK_SDMA_AUD3] = &clk_sdma_aud3.mux.common.hw,
+ [CLK_I2C] = &clk_i2c.div.common.hw,
+ [CLK_APB_I2C] = &clk_apb_i2c.common.hw,
+ [CLK_APB_I2C0] = &clk_apb_i2c0.common.hw,
+ [CLK_APB_I2C1] = &clk_apb_i2c1.common.hw,
+ [CLK_APB_I2C2] = &clk_apb_i2c2.common.hw,
+ [CLK_APB_I2C3] = &clk_apb_i2c3.common.hw,
+ [CLK_APB_I2C4] = &clk_apb_i2c4.common.hw,
+ [CLK_APB_WDT] = &clk_apb_wdt.common.hw,
+ [CLK_PWM_SRC] = &clk_pwm_src.mux.common.hw,
+ [CLK_PWM] = &clk_pwm.common.hw,
+ [CLK_SPI] = &clk_spi.div.common.hw,
+ [CLK_APB_SPI0] = &clk_apb_spi0.common.hw,
+ [CLK_APB_SPI1] = &clk_apb_spi1.common.hw,
+ [CLK_APB_SPI2] = &clk_apb_spi2.common.hw,
+ [CLK_APB_SPI3] = &clk_apb_spi3.common.hw,
+ [CLK_1M] = &clk_1m.common.hw,
+ [CLK_CAM0_200] = &clk_cam0_200.mux.common.hw,
+ [CLK_PM] = &clk_pm.common.hw,
+ [CLK_TIMER0] = &clk_timer0.common.hw,
+ [CLK_TIMER1] = &clk_timer1.common.hw,
+ [CLK_TIMER2] = &clk_timer2.common.hw,
+ [CLK_TIMER3] = &clk_timer3.common.hw,
+ [CLK_TIMER4] = &clk_timer4.common.hw,
+ [CLK_TIMER5] = &clk_timer5.common.hw,
+ [CLK_TIMER6] = &clk_timer6.common.hw,
+ [CLK_TIMER7] = &clk_timer7.common.hw,
+ [CLK_UART0] = &clk_uart0.common.hw,
+ [CLK_APB_UART0] = &clk_apb_uart0.common.hw,
+ [CLK_UART1] = &clk_uart1.common.hw,
+ [CLK_APB_UART1] = &clk_apb_uart1.common.hw,
+ [CLK_UART2] = &clk_uart2.common.hw,
+ [CLK_APB_UART2] = &clk_apb_uart2.common.hw,
+ [CLK_UART3] = &clk_uart3.common.hw,
+ [CLK_APB_UART3] = &clk_apb_uart3.common.hw,
+ [CLK_UART4] = &clk_uart4.common.hw,
+ [CLK_APB_UART4] = &clk_apb_uart4.common.hw,
+ [CLK_APB_I2S0] = &clk_apb_i2s0.common.hw,
+ [CLK_APB_I2S1] = &clk_apb_i2s1.common.hw,
+ [CLK_APB_I2S2] = &clk_apb_i2s2.common.hw,
+ [CLK_APB_I2S3] = &clk_apb_i2s3.common.hw,
+ [CLK_AXI4_USB] = &clk_axi4_usb.common.hw,
+ [CLK_APB_USB] = &clk_apb_usb.common.hw,
+ [CLK_USB_125M] = &clk_usb_125m.div.common.hw,
+ [CLK_USB_33K] = &clk_usb_33k.common.hw,
+ [CLK_USB_12M] = &clk_usb_12m.div.common.hw,
+ [CLK_AXI4] = &clk_axi4.mux.common.hw,
+ [CLK_AXI6] = &clk_axi6.div.common.hw,
+ [CLK_DSI_ESC] = &clk_dsi_esc.div.common.hw,
+ [CLK_AXI_VIP] = &clk_axi_vip.mux.common.hw,
+ [CLK_SRC_VIP_SYS_0] = &clk_src_vip_sys_0.mux.common.hw,
+ [CLK_SRC_VIP_SYS_1] = &clk_src_vip_sys_1.mux.common.hw,
+ [CLK_SRC_VIP_SYS_2] = &clk_src_vip_sys_2.mux.common.hw,
+ [CLK_SRC_VIP_SYS_3] = &clk_src_vip_sys_3.mux.common.hw,
+ [CLK_SRC_VIP_SYS_4] = &clk_src_vip_sys_4.mux.common.hw,
+ [CLK_CSI_BE_VIP] = &clk_csi_be_vip.common.hw,
+ [CLK_CSI_MAC0_VIP] = &clk_csi_mac0_vip.common.hw,
+ [CLK_CSI_MAC1_VIP] = &clk_csi_mac1_vip.common.hw,
+ [CLK_CSI_MAC2_VIP] = &clk_csi_mac2_vip.common.hw,
+ [CLK_CSI0_RX_VIP] = &clk_csi0_rx_vip.common.hw,
+ [CLK_CSI1_RX_VIP] = &clk_csi1_rx_vip.common.hw,
+ [CLK_ISP_TOP_VIP] = &clk_isp_top_vip.common.hw,
+ [CLK_IMG_D_VIP] = &clk_img_d_vip.common.hw,
+ [CLK_IMG_V_VIP] = &clk_img_v_vip.common.hw,
+ [CLK_SC_TOP_VIP] = &clk_sc_top_vip.common.hw,
+ [CLK_SC_D_VIP] = &clk_sc_d_vip.common.hw,
+ [CLK_SC_V1_VIP] = &clk_sc_v1_vip.common.hw,
+ [CLK_SC_V2_VIP] = &clk_sc_v2_vip.common.hw,
+ [CLK_SC_V3_VIP] = &clk_sc_v3_vip.common.hw,
+ [CLK_DWA_VIP] = &clk_dwa_vip.common.hw,
+ [CLK_BT_VIP] = &clk_bt_vip.common.hw,
+ [CLK_DISP_VIP] = &clk_disp_vip.common.hw,
+ [CLK_DSI_MAC_VIP] = &clk_dsi_mac_vip.common.hw,
+ [CLK_LVDS0_VIP] = &clk_lvds0_vip.common.hw,
+ [CLK_LVDS1_VIP] = &clk_lvds1_vip.common.hw,
+ [CLK_PAD_VI_VIP] = &clk_pad_vi_vip.common.hw,
+ [CLK_PAD_VI1_VIP] = &clk_pad_vi1_vip.common.hw,
+ [CLK_PAD_VI2_VIP] = &clk_pad_vi2_vip.common.hw,
+ [CLK_CFG_REG_VIP] = &clk_cfg_reg_vip.common.hw,
+ [CLK_VIP_IP0] = &clk_vip_ip0.common.hw,
+ [CLK_VIP_IP1] = &clk_vip_ip1.common.hw,
+ [CLK_VIP_IP2] = &clk_vip_ip2.common.hw,
+ [CLK_VIP_IP3] = &clk_vip_ip3.common.hw,
+ [CLK_IVE_VIP] = &clk_ive_vip.common.hw,
+ [CLK_RAW_VIP] = &clk_raw_vip.common.hw,
+ [CLK_OSDC_VIP] = &clk_osdc_vip.common.hw,
+ [CLK_CAM0_VIP] = &clk_cam0_vip.common.hw,
+ [CLK_AXI_VIDEO_CODEC] = &clk_axi_video_codec.mux.common.hw,
+ [CLK_VC_SRC0] = &clk_vc_src0.mux.common.hw,
+ [CLK_VC_SRC1] = &clk_vc_src1.div.common.hw,
+ [CLK_VC_SRC2] = &clk_vc_src2.div.common.hw,
+ [CLK_H264C] = &clk_h264c.common.hw,
+ [CLK_APB_H264C] = &clk_apb_h264c.common.hw,
+ [CLK_H265C] = &clk_h265c.common.hw,
+ [CLK_APB_H265C] = &clk_apb_h265c.common.hw,
+ [CLK_JPEG] = &clk_jpeg.common.hw,
+ [CLK_APB_JPEG] = &clk_apb_jpeg.common.hw,
+ [CLK_CAM0] = &clk_cam0.common.hw,
+ [CLK_CAM1] = &clk_cam1.common.hw,
+ [CLK_WGN] = &clk_wgn.common.hw,
+ [CLK_WGN0] = &clk_wgn0.common.hw,
+ [CLK_WGN1] = &clk_wgn1.common.hw,
+ [CLK_WGN2] = &clk_wgn2.common.hw,
+ [CLK_KEYSCAN] = &clk_keyscan.common.hw,
+ [CLK_CFG_REG_VC] = &clk_cfg_reg_vc.common.hw,
+ [CLK_C906_0] = &clk_c906_0.common.hw,
+ [CLK_C906_1] = &clk_c906_1.common.hw,
+ [CLK_A53] = &clk_a53.common.hw,
+ [CLK_CPU_AXI0] = &clk_cpu_axi0.div.common.hw,
+ [CLK_CPU_GIC] = &clk_cpu_gic.div.common.hw,
+ [CLK_XTAL_AP] = &clk_xtal_ap.common.hw,
+ },
+};
+
+static void cv18xx_clk_disable_auto_pd(void __iomem *base)
+{
+ static const u16 CV1800_PD_CLK[] = {
+ REG_MIPIMPLL_CLK_CSR,
+ REG_A0PLL_CLK_CSR,
+ REG_DISPPLL_CLK_CSR,
+ REG_CAM0PLL_CLK_CSR,
+ REG_CAM1PLL_CLK_CSR,
+ };
+
+ u32 val;
+ int i;
+
+ /* disable auto power down */
+ for (i = 0; i < ARRAY_SIZE(CV1800_PD_CLK); i++) {
+ u32 reg = CV1800_PD_CLK[i];
+
+ val = readl(base + reg);
+ val |= GENMASK(12, 9);
+ val &= ~BIT(8);
+ writel(val, base + reg);
+ }
+}
+
+static void cv18xx_clk_disable_a53(void __iomem *base)
+{
+ u32 val = readl(base + REG_CLK_BYP_0);
+
+ /* Set bypass clock for clk_a53 */
+ val |= BIT(0);
+
+ /* Set bypass clock for clk_cpu_axi0 */
+ val |= BIT(1);
+
+ /* Set bypass clock for clk_cpu_gic */
+ val |= BIT(2);
+
+ writel(val, base + REG_CLK_BYP_0);
+}
+
+static int cv1800_pre_init(struct device *dev, void __iomem *base,
+ struct cv1800_clk_ctrl *ctrl,
+ const struct cv1800_clk_desc *desc)
+{
+ u32 val = readl(base + REG_CLK_EN_2);
+
+ /* disable unsupported clk_disp_src_vip */
+ val &= ~BIT(7);
+
+ writel(val, base + REG_CLK_EN_2);
+
+ cv18xx_clk_disable_a53(base);
+ cv18xx_clk_disable_auto_pd(base);
+
+ return 0;
+}
+
+static const struct cv1800_clk_desc cv1800_desc = {
+ .clks_data = &cv1800_hw_clks,
+ .pre_init = cv1800_pre_init,
+};
+
+static struct clk_hw_onecell_data cv1810_hw_clks = {
+ .num = CV1810_CLK_MAX,
+ .hws = {
+ [CLK_MPLL] = &clk_mpll.common.hw,
+ [CLK_TPLL] = &clk_tpll.common.hw,
+ [CLK_FPLL] = &clk_fpll.common.hw,
+ [CLK_MIPIMPLL] = &clk_mipimpll.common.hw,
+ [CLK_A0PLL] = &clk_a0pll.common.hw,
+ [CLK_DISPPLL] = &clk_disppll.common.hw,
+ [CLK_CAM0PLL] = &clk_cam0pll.common.hw,
+ [CLK_CAM1PLL] = &clk_cam1pll.common.hw,
+
+ [CLK_MIPIMPLL_D3] = &clk_mipimpll_d3.common.hw,
+ [CLK_CAM0PLL_D2] = &clk_cam0pll_d2.common.hw,
+ [CLK_CAM0PLL_D3] = &clk_cam0pll_d3.common.hw,
+
+ [CLK_TPU] = &clk_tpu.mux.common.hw,
+ [CLK_TPU_FAB] = &clk_tpu_fab.common.hw,
+ [CLK_AHB_ROM] = &clk_ahb_rom.common.hw,
+ [CLK_DDR_AXI_REG] = &clk_ddr_axi_reg.common.hw,
+ [CLK_RTC_25M] = &clk_rtc_25m.common.hw,
+ [CLK_SRC_RTC_SYS_0] = &clk_src_rtc_sys_0.div.common.hw,
+ [CLK_TEMPSEN] = &clk_tempsen.common.hw,
+ [CLK_SARADC] = &clk_saradc.common.hw,
+ [CLK_EFUSE] = &clk_efuse.common.hw,
+ [CLK_APB_EFUSE] = &clk_apb_efuse.common.hw,
+ [CLK_DEBUG] = &clk_debug.common.hw,
+ [CLK_AP_DEBUG] = &clk_ap_debug.div.common.hw,
+ [CLK_XTAL_MISC] = &clk_xtal_misc.common.hw,
+ [CLK_AXI4_EMMC] = &clk_axi4_emmc.common.hw,
+ [CLK_EMMC] = &clk_emmc.mux.common.hw,
+ [CLK_EMMC_100K] = &clk_emmc_100k.common.hw,
+ [CLK_AXI4_SD0] = &clk_axi4_sd0.common.hw,
+ [CLK_SD0] = &clk_sd0.mux.common.hw,
+ [CLK_SD0_100K] = &clk_sd0_100k.common.hw,
+ [CLK_AXI4_SD1] = &clk_axi4_sd1.common.hw,
+ [CLK_SD1] = &clk_sd1.mux.common.hw,
+ [CLK_SD1_100K] = &clk_sd1_100k.common.hw,
+ [CLK_SPI_NAND] = &clk_spi_nand.mux.common.hw,
+ [CLK_ETH0_500M] = &clk_eth0_500m.div.common.hw,
+ [CLK_AXI4_ETH0] = &clk_axi4_eth0.common.hw,
+ [CLK_ETH1_500M] = &clk_eth1_500m.div.common.hw,
+ [CLK_AXI4_ETH1] = &clk_axi4_eth1.common.hw,
+ [CLK_APB_GPIO] = &clk_apb_gpio.common.hw,
+ [CLK_APB_GPIO_INTR] = &clk_apb_gpio_intr.common.hw,
+ [CLK_GPIO_DB] = &clk_gpio_db.common.hw,
+ [CLK_AHB_SF] = &clk_ahb_sf.common.hw,
+ [CLK_AHB_SF1] = &clk_ahb_sf1.common.hw,
+ [CLK_A24M] = &clk_a24m.common.hw,
+ [CLK_AUDSRC] = &clk_audsrc.mux.common.hw,
+ [CLK_APB_AUDSRC] = &clk_apb_audsrc.common.hw,
+ [CLK_SDMA_AXI] = &clk_sdma_axi.common.hw,
+ [CLK_SDMA_AUD0] = &clk_sdma_aud0.mux.common.hw,
+ [CLK_SDMA_AUD1] = &clk_sdma_aud1.mux.common.hw,
+ [CLK_SDMA_AUD2] = &clk_sdma_aud2.mux.common.hw,
+ [CLK_SDMA_AUD3] = &clk_sdma_aud3.mux.common.hw,
+ [CLK_I2C] = &clk_i2c.div.common.hw,
+ [CLK_APB_I2C] = &clk_apb_i2c.common.hw,
+ [CLK_APB_I2C0] = &clk_apb_i2c0.common.hw,
+ [CLK_APB_I2C1] = &clk_apb_i2c1.common.hw,
+ [CLK_APB_I2C2] = &clk_apb_i2c2.common.hw,
+ [CLK_APB_I2C3] = &clk_apb_i2c3.common.hw,
+ [CLK_APB_I2C4] = &clk_apb_i2c4.common.hw,
+ [CLK_APB_WDT] = &clk_apb_wdt.common.hw,
+ [CLK_PWM_SRC] = &clk_pwm_src.mux.common.hw,
+ [CLK_PWM] = &clk_pwm.common.hw,
+ [CLK_SPI] = &clk_spi.div.common.hw,
+ [CLK_APB_SPI0] = &clk_apb_spi0.common.hw,
+ [CLK_APB_SPI1] = &clk_apb_spi1.common.hw,
+ [CLK_APB_SPI2] = &clk_apb_spi2.common.hw,
+ [CLK_APB_SPI3] = &clk_apb_spi3.common.hw,
+ [CLK_1M] = &clk_1m.common.hw,
+ [CLK_CAM0_200] = &clk_cam0_200.mux.common.hw,
+ [CLK_PM] = &clk_pm.common.hw,
+ [CLK_TIMER0] = &clk_timer0.common.hw,
+ [CLK_TIMER1] = &clk_timer1.common.hw,
+ [CLK_TIMER2] = &clk_timer2.common.hw,
+ [CLK_TIMER3] = &clk_timer3.common.hw,
+ [CLK_TIMER4] = &clk_timer4.common.hw,
+ [CLK_TIMER5] = &clk_timer5.common.hw,
+ [CLK_TIMER6] = &clk_timer6.common.hw,
+ [CLK_TIMER7] = &clk_timer7.common.hw,
+ [CLK_UART0] = &clk_uart0.common.hw,
+ [CLK_APB_UART0] = &clk_apb_uart0.common.hw,
+ [CLK_UART1] = &clk_uart1.common.hw,
+ [CLK_APB_UART1] = &clk_apb_uart1.common.hw,
+ [CLK_UART2] = &clk_uart2.common.hw,
+ [CLK_APB_UART2] = &clk_apb_uart2.common.hw,
+ [CLK_UART3] = &clk_uart3.common.hw,
+ [CLK_APB_UART3] = &clk_apb_uart3.common.hw,
+ [CLK_UART4] = &clk_uart4.common.hw,
+ [CLK_APB_UART4] = &clk_apb_uart4.common.hw,
+ [CLK_APB_I2S0] = &clk_apb_i2s0.common.hw,
+ [CLK_APB_I2S1] = &clk_apb_i2s1.common.hw,
+ [CLK_APB_I2S2] = &clk_apb_i2s2.common.hw,
+ [CLK_APB_I2S3] = &clk_apb_i2s3.common.hw,
+ [CLK_AXI4_USB] = &clk_axi4_usb.common.hw,
+ [CLK_APB_USB] = &clk_apb_usb.common.hw,
+ [CLK_USB_125M] = &clk_usb_125m.div.common.hw,
+ [CLK_USB_33K] = &clk_usb_33k.common.hw,
+ [CLK_USB_12M] = &clk_usb_12m.div.common.hw,
+ [CLK_AXI4] = &clk_axi4.mux.common.hw,
+ [CLK_AXI6] = &clk_axi6.div.common.hw,
+ [CLK_DSI_ESC] = &clk_dsi_esc.div.common.hw,
+ [CLK_AXI_VIP] = &clk_axi_vip.mux.common.hw,
+ [CLK_SRC_VIP_SYS_0] = &clk_src_vip_sys_0.mux.common.hw,
+ [CLK_SRC_VIP_SYS_1] = &clk_src_vip_sys_1.mux.common.hw,
+ [CLK_SRC_VIP_SYS_2] = &clk_src_vip_sys_2.mux.common.hw,
+ [CLK_SRC_VIP_SYS_3] = &clk_src_vip_sys_3.mux.common.hw,
+ [CLK_SRC_VIP_SYS_4] = &clk_src_vip_sys_4.mux.common.hw,
+ [CLK_CSI_BE_VIP] = &clk_csi_be_vip.common.hw,
+ [CLK_CSI_MAC0_VIP] = &clk_csi_mac0_vip.common.hw,
+ [CLK_CSI_MAC1_VIP] = &clk_csi_mac1_vip.common.hw,
+ [CLK_CSI_MAC2_VIP] = &clk_csi_mac2_vip.common.hw,
+ [CLK_CSI0_RX_VIP] = &clk_csi0_rx_vip.common.hw,
+ [CLK_CSI1_RX_VIP] = &clk_csi1_rx_vip.common.hw,
+ [CLK_ISP_TOP_VIP] = &clk_isp_top_vip.common.hw,
+ [CLK_IMG_D_VIP] = &clk_img_d_vip.common.hw,
+ [CLK_IMG_V_VIP] = &clk_img_v_vip.common.hw,
+ [CLK_SC_TOP_VIP] = &clk_sc_top_vip.common.hw,
+ [CLK_SC_D_VIP] = &clk_sc_d_vip.common.hw,
+ [CLK_SC_V1_VIP] = &clk_sc_v1_vip.common.hw,
+ [CLK_SC_V2_VIP] = &clk_sc_v2_vip.common.hw,
+ [CLK_SC_V3_VIP] = &clk_sc_v3_vip.common.hw,
+ [CLK_DWA_VIP] = &clk_dwa_vip.common.hw,
+ [CLK_BT_VIP] = &clk_bt_vip.common.hw,
+ [CLK_DISP_VIP] = &clk_disp_vip.common.hw,
+ [CLK_DSI_MAC_VIP] = &clk_dsi_mac_vip.common.hw,
+ [CLK_LVDS0_VIP] = &clk_lvds0_vip.common.hw,
+ [CLK_LVDS1_VIP] = &clk_lvds1_vip.common.hw,
+ [CLK_PAD_VI_VIP] = &clk_pad_vi_vip.common.hw,
+ [CLK_PAD_VI1_VIP] = &clk_pad_vi1_vip.common.hw,
+ [CLK_PAD_VI2_VIP] = &clk_pad_vi2_vip.common.hw,
+ [CLK_CFG_REG_VIP] = &clk_cfg_reg_vip.common.hw,
+ [CLK_VIP_IP0] = &clk_vip_ip0.common.hw,
+ [CLK_VIP_IP1] = &clk_vip_ip1.common.hw,
+ [CLK_VIP_IP2] = &clk_vip_ip2.common.hw,
+ [CLK_VIP_IP3] = &clk_vip_ip3.common.hw,
+ [CLK_IVE_VIP] = &clk_ive_vip.common.hw,
+ [CLK_RAW_VIP] = &clk_raw_vip.common.hw,
+ [CLK_OSDC_VIP] = &clk_osdc_vip.common.hw,
+ [CLK_CAM0_VIP] = &clk_cam0_vip.common.hw,
+ [CLK_AXI_VIDEO_CODEC] = &clk_axi_video_codec.mux.common.hw,
+ [CLK_VC_SRC0] = &clk_vc_src0.mux.common.hw,
+ [CLK_VC_SRC1] = &clk_vc_src1.div.common.hw,
+ [CLK_VC_SRC2] = &clk_vc_src2.div.common.hw,
+ [CLK_H264C] = &clk_h264c.common.hw,
+ [CLK_APB_H264C] = &clk_apb_h264c.common.hw,
+ [CLK_H265C] = &clk_h265c.common.hw,
+ [CLK_APB_H265C] = &clk_apb_h265c.common.hw,
+ [CLK_JPEG] = &clk_jpeg.common.hw,
+ [CLK_APB_JPEG] = &clk_apb_jpeg.common.hw,
+ [CLK_CAM0] = &clk_cam0.common.hw,
+ [CLK_CAM1] = &clk_cam1.common.hw,
+ [CLK_WGN] = &clk_wgn.common.hw,
+ [CLK_WGN0] = &clk_wgn0.common.hw,
+ [CLK_WGN1] = &clk_wgn1.common.hw,
+ [CLK_WGN2] = &clk_wgn2.common.hw,
+ [CLK_KEYSCAN] = &clk_keyscan.common.hw,
+ [CLK_CFG_REG_VC] = &clk_cfg_reg_vc.common.hw,
+ [CLK_C906_0] = &clk_c906_0.common.hw,
+ [CLK_C906_1] = &clk_c906_1.common.hw,
+ [CLK_A53] = &clk_a53.common.hw,
+ [CLK_CPU_AXI0] = &clk_cpu_axi0.div.common.hw,
+ [CLK_CPU_GIC] = &clk_cpu_gic.div.common.hw,
+ [CLK_XTAL_AP] = &clk_xtal_ap.common.hw,
+ [CLK_DISP_SRC_VIP] = &clk_disp_src_vip.div.common.hw,
+ },
+};
+
+static int cv1810_pre_init(struct device *dev, void __iomem *base,
+ struct cv1800_clk_ctrl *ctrl,
+ const struct cv1800_clk_desc *desc)
+{
+ cv18xx_clk_disable_a53(base);
+ cv18xx_clk_disable_auto_pd(base);
+
+ return 0;
+}
+
+static const struct cv1800_clk_desc cv1810_desc = {
+ .clks_data = &cv1810_hw_clks,
+ .pre_init = cv1810_pre_init,
+};
+
+static int sg2000_pre_init(struct device *dev, void __iomem *base,
+ struct cv1800_clk_ctrl *ctrl,
+ const struct cv1800_clk_desc *desc)
+{
+ cv18xx_clk_disable_auto_pd(base);
+
+ return 0;
+}
+
+static const struct cv1800_clk_desc sg2000_desc = {
+ .clks_data = &cv1810_hw_clks,
+ .pre_init = sg2000_pre_init,
+};
+
+static int cv1800_clk_init_ctrl(struct device *dev, void __iomem *reg,
+ struct cv1800_clk_ctrl *ctrl,
+ const struct cv1800_clk_desc *desc)
+{
+ int i, ret;
+
+ ctrl->desc = desc;
+ spin_lock_init(&ctrl->lock);
+
+ for (i = 0; i < desc->clks_data->num; i++) {
+ struct clk_hw *hw = desc->clks_data->hws[i];
+ struct cv1800_clk_common *common;
+ const char *name;
+
+ if (!hw)
+ continue;
+
+ name = hw->init->name;
+
+ common = hw_to_cv1800_clk_common(hw);
+ common->base = reg;
+ common->lock = &ctrl->lock;
+
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret) {
+ dev_err(dev, "Couldn't register clock %d - %s\n",
+ i, name);
+ return ret;
+ }
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ desc->clks_data);
+}
+
+static int cv1800_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *reg;
+ int ret;
+ const struct cv1800_clk_desc *desc;
+ struct cv1800_clk_ctrl *ctrl;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ desc = device_get_match_data(dev);
+ if (!desc) {
+ dev_err(dev, "no match data for platform\n");
+ return -EINVAL;
+ }
+
+ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ if (desc->pre_init) {
+ ret = desc->pre_init(dev, reg, ctrl, desc);
+ if (ret)
+ return ret;
+ }
+
+ return cv1800_clk_init_ctrl(dev, reg, ctrl, desc);
+}
+
+static const struct of_device_id cv1800_clk_ids[] = {
+ { .compatible = "sophgo,cv1800-clk", .data = &cv1800_desc },
+ { .compatible = "sophgo,cv1810-clk", .data = &cv1810_desc },
+ { .compatible = "sophgo,sg2000-clk", .data = &sg2000_desc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cv1800_clk_ids);
+
+static struct platform_driver cv1800_clk_driver = {
+ .probe = cv1800_clk_probe,
+ .driver = {
+ .name = "cv1800-clk",
+ .suppress_bind_attrs = true,
+ .of_match_table = cv1800_clk_ids,
+ },
+};
+module_platform_driver(cv1800_clk_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sophgo/clk-cv1800.h b/drivers/clk/sophgo/clk-cv1800.h
new file mode 100644
index 000000000000..1e7107b5d05e
--- /dev/null
+++ b/drivers/clk/sophgo/clk-cv1800.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Inochi Amaoto <inochiama@outlook.com>
+ */
+
+#ifndef _CLK_SOPHGO_CV1800_H_
+#define _CLK_SOPHGO_CV1800_H_
+
+#include <dt-bindings/clock/sophgo,cv1800.h>
+
+#define CV1800_CLK_MAX (CLK_XTAL_AP + 1)
+#define CV1810_CLK_MAX (CLK_DISP_SRC_VIP + 1)
+
+#define REG_PLL_G2_CTRL 0x800
+#define REG_PLL_G2_STATUS 0x804
+#define REG_MIPIMPLL_CSR 0x808
+#define REG_A0PLL_CSR 0x80C
+#define REG_DISPPLL_CSR 0x810
+#define REG_CAM0PLL_CSR 0x814
+#define REG_CAM1PLL_CSR 0x818
+#define REG_PLL_G2_SSC_SYN_CTRL 0x840
+#define REG_A0PLL_SSC_SYN_CTRL 0x850
+#define REG_A0PLL_SSC_SYN_SET 0x854
+#define REG_A0PLL_SSC_SYN_SPAN 0x858
+#define REG_A0PLL_SSC_SYN_STEP 0x85C
+#define REG_DISPPLL_SSC_SYN_CTRL 0x860
+#define REG_DISPPLL_SSC_SYN_SET 0x864
+#define REG_DISPPLL_SSC_SYN_SPAN 0x868
+#define REG_DISPPLL_SSC_SYN_STEP 0x86C
+#define REG_CAM0PLL_SSC_SYN_CTRL 0x870
+#define REG_CAM0PLL_SSC_SYN_SET 0x874
+#define REG_CAM0PLL_SSC_SYN_SPAN 0x878
+#define REG_CAM0PLL_SSC_SYN_STEP 0x87C
+#define REG_CAM1PLL_SSC_SYN_CTRL 0x880
+#define REG_CAM1PLL_SSC_SYN_SET 0x884
+#define REG_CAM1PLL_SSC_SYN_SPAN 0x888
+#define REG_CAM1PLL_SSC_SYN_STEP 0x88C
+#define REG_APLL_FRAC_DIV_CTRL 0x890
+#define REG_APLL_FRAC_DIV_M 0x894
+#define REG_APLL_FRAC_DIV_N 0x898
+#define REG_MIPIMPLL_CLK_CSR 0x8A0
+#define REG_A0PLL_CLK_CSR 0x8A4
+#define REG_DISPPLL_CLK_CSR 0x8A8
+#define REG_CAM0PLL_CLK_CSR 0x8AC
+#define REG_CAM1PLL_CLK_CSR 0x8B0
+#define REG_CLK_CAM0_SRC_DIV 0x8C0
+#define REG_CLK_CAM1_SRC_DIV 0x8C4
+
+/* top_pll_g6 */
+#define REG_PLL_G6_CTRL 0x900
+#define REG_PLL_G6_STATUS 0x904
+#define REG_MPLL_CSR 0x908
+#define REG_TPLL_CSR 0x90C
+#define REG_FPLL_CSR 0x910
+#define REG_PLL_G6_SSC_SYN_CTRL 0x940
+#define REG_DPLL_SSC_SYN_CTRL 0x950
+#define REG_DPLL_SSC_SYN_SET 0x954
+#define REG_DPLL_SSC_SYN_SPAN 0x958
+#define REG_DPLL_SSC_SYN_STEP 0x95C
+#define REG_MPLL_SSC_SYN_CTRL 0x960
+#define REG_MPLL_SSC_SYN_SET 0x964
+#define REG_MPLL_SSC_SYN_SPAN 0x968
+#define REG_MPLL_SSC_SYN_STEP 0x96C
+#define REG_TPLL_SSC_SYN_CTRL 0x970
+#define REG_TPLL_SSC_SYN_SET 0x974
+#define REG_TPLL_SSC_SYN_SPAN 0x978
+#define REG_TPLL_SSC_SYN_STEP 0x97C
+
+/* clkgen */
+#define REG_CLK_EN_0 0x000
+#define REG_CLK_EN_1 0x004
+#define REG_CLK_EN_2 0x008
+#define REG_CLK_EN_3 0x00C
+#define REG_CLK_EN_4 0x010
+#define REG_CLK_SEL_0 0x020
+#define REG_CLK_BYP_0 0x030
+#define REG_CLK_BYP_1 0x034
+
+#define REG_DIV_CLK_A53_0 0x040
+#define REG_DIV_CLK_A53_1 0x044
+#define REG_DIV_CLK_CPU_AXI0 0x048
+#define REG_DIV_CLK_CPU_GIC 0x050
+#define REG_DIV_CLK_TPU 0x054
+#define REG_DIV_CLK_EMMC 0x064
+#define REG_DIV_CLK_EMMC_100K 0x06C
+#define REG_DIV_CLK_SD0 0x070
+#define REG_DIV_CLK_SD0_100K 0x078
+#define REG_DIV_CLK_SD1 0x07C
+#define REG_DIV_CLK_SD1_100K 0x084
+#define REG_DIV_CLK_SPI_NAND 0x088
+#define REG_DIV_CLK_ETH0_500M 0x08C
+#define REG_DIV_CLK_ETH1_500M 0x090
+#define REG_DIV_CLK_GPIO_DB 0x094
+#define REG_DIV_CLK_SDMA_AUD0 0x098
+#define REG_DIV_CLK_SDMA_AUD1 0x09C
+#define REG_DIV_CLK_SDMA_AUD2 0x0A0
+#define REG_DIV_CLK_SDMA_AUD3 0x0A4
+#define REG_DIV_CLK_CAM0_200 0x0A8
+#define REG_DIV_CLK_AXI4 0x0B8
+#define REG_DIV_CLK_AXI6 0x0BC
+#define REG_DIV_CLK_DSI_ESC 0x0C4
+#define REG_DIV_CLK_AXI_VIP 0x0C8
+#define REG_DIV_CLK_SRC_VIP_SYS_0 0x0D0
+#define REG_DIV_CLK_SRC_VIP_SYS_1 0x0D8
+#define REG_DIV_CLK_DISP_SRC_VIP 0x0E0
+#define REG_DIV_CLK_AXI_VIDEO_CODEC 0x0E4
+#define REG_DIV_CLK_VC_SRC0 0x0EC
+#define REG_DIV_CLK_1M 0x0FC
+#define REG_DIV_CLK_SPI 0x100
+#define REG_DIV_CLK_I2C 0x104
+#define REG_DIV_CLK_SRC_VIP_SYS_2 0x110
+#define REG_DIV_CLK_AUDSRC 0x118
+#define REG_DIV_CLK_PWM_SRC_0 0x120
+#define REG_DIV_CLK_AP_DEBUG 0x128
+#define REG_DIV_CLK_RTCSYS_SRC_0 0x12C
+#define REG_DIV_CLK_C906_0_0 0x130
+#define REG_DIV_CLK_C906_0_1 0x134
+#define REG_DIV_CLK_C906_1_0 0x138
+#define REG_DIV_CLK_C906_1_1 0x13C
+#define REG_DIV_CLK_SRC_VIP_SYS_3 0x140
+#define REG_DIV_CLK_SRC_VIP_SYS_4 0x144
+
+#endif /* _CLK_SOPHGO_CV1800_H_ */
diff --git a/drivers/clk/sophgo/clk-cv18xx-common.c b/drivers/clk/sophgo/clk-cv18xx-common.c
new file mode 100644
index 000000000000..cbcdd88f0e23
--- /dev/null
+++ b/drivers/clk/sophgo/clk-cv18xx-common.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Inochi Amaoto <inochiama@outlook.com>
+ */
+
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/spinlock.h>
+#include <linux/bug.h>
+
+#include "clk-cv18xx-common.h"
+
+int cv1800_clk_setbit(struct cv1800_clk_common *common,
+ struct cv1800_clk_regbit *field)
+{
+ u32 mask = BIT(field->shift);
+ u32 value;
+ unsigned long flags;
+
+ spin_lock_irqsave(common->lock, flags);
+
+ value = readl(common->base + field->reg);
+ writel(value | mask, common->base + field->reg);
+
+ spin_unlock_irqrestore(common->lock, flags);
+
+ return 0;
+}
+
+int cv1800_clk_clearbit(struct cv1800_clk_common *common,
+ struct cv1800_clk_regbit *field)
+{
+ u32 mask = BIT(field->shift);
+ u32 value;
+ unsigned long flags;
+
+ spin_lock_irqsave(common->lock, flags);
+
+ value = readl(common->base + field->reg);
+ writel(value & ~mask, common->base + field->reg);
+
+ spin_unlock_irqrestore(common->lock, flags);
+
+ return 0;
+}
+
+int cv1800_clk_checkbit(struct cv1800_clk_common *common,
+ struct cv1800_clk_regbit *field)
+{
+ return readl(common->base + field->reg) & BIT(field->shift);
+}
+
+#define PLL_LOCK_TIMEOUT_US (200 * 1000)
+
+void cv1800_clk_wait_for_lock(struct cv1800_clk_common *common,
+ u32 reg, u32 lock)
+{
+ void __iomem *addr = common->base + reg;
+ u32 regval;
+
+ if (!lock)
+ return;
+
+ WARN_ON(readl_relaxed_poll_timeout(addr, regval, regval & lock,
+ 100, PLL_LOCK_TIMEOUT_US));
+}
diff --git a/drivers/clk/sophgo/clk-cv18xx-common.h b/drivers/clk/sophgo/clk-cv18xx-common.h
new file mode 100644
index 000000000000..2bfda02b2064
--- /dev/null
+++ b/drivers/clk/sophgo/clk-cv18xx-common.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Inochi Amaoto <inochiama@outlook.com>
+ */
+
+#ifndef _CLK_SOPHGO_CV18XX_IP_H_
+#define _CLK_SOPHGO_CV18XX_IP_H_
+
+#include <linux/compiler.h>
+#include <linux/clk-provider.h>
+#include <linux/bitfield.h>
+
+struct cv1800_clk_common {
+ void __iomem *base;
+ spinlock_t *lock;
+ struct clk_hw hw;
+ unsigned long features;
+};
+
+#define CV1800_CLK_COMMON(_name, _parents, _op, _flags) \
+ { \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parents, \
+ _op, _flags), \
+ }
+
+static inline struct cv1800_clk_common *
+hw_to_cv1800_clk_common(struct clk_hw *hw)
+{
+ return container_of(hw, struct cv1800_clk_common, hw);
+}
+
+struct cv1800_clk_regbit {
+ u16 reg;
+ s8 shift;
+};
+
+struct cv1800_clk_regfield {
+ u16 reg;
+ u8 shift;
+ u8 width;
+ s16 initval;
+ unsigned long flags;
+};
+
+#define CV1800_CLK_BIT(_reg, _shift) \
+ { \
+ .reg = _reg, \
+ .shift = _shift, \
+ }
+
+#define CV1800_CLK_REG(_reg, _shift, _width, _initval, _flags) \
+ { \
+ .reg = _reg, \
+ .shift = _shift, \
+ .width = _width, \
+ .initval = _initval, \
+ .flags = _flags, \
+ }
+
+#define cv1800_clk_regfield_genmask(_reg) \
+ GENMASK((_reg)->shift + (_reg)->width - 1, (_reg)->shift)
+#define cv1800_clk_regfield_get(_val, _reg) \
+ (((_val) >> (_reg)->shift) & GENMASK((_reg)->width - 1, 0))
+#define cv1800_clk_regfield_set(_val, _new, _reg) \
+ (((_val) & ~cv1800_clk_regfield_genmask((_reg))) | \
+ (((_new) & GENMASK((_reg)->width - 1, 0)) << (_reg)->shift))
+
+#define _CV1800_SET_FIELD(_reg, _val, _field) \
+ (((_reg) & ~(_field)) | FIELD_PREP((_field), (_val)))
+
+int cv1800_clk_setbit(struct cv1800_clk_common *common,
+ struct cv1800_clk_regbit *field);
+int cv1800_clk_clearbit(struct cv1800_clk_common *common,
+ struct cv1800_clk_regbit *field);
+int cv1800_clk_checkbit(struct cv1800_clk_common *common,
+ struct cv1800_clk_regbit *field);
+
+void cv1800_clk_wait_for_lock(struct cv1800_clk_common *common,
+ u32 reg, u32 lock);
+
+#endif // _CLK_SOPHGO_CV18XX_IP_H_
diff --git a/drivers/clk/sophgo/clk-cv18xx-ip.c b/drivers/clk/sophgo/clk-cv18xx-ip.c
new file mode 100644
index 000000000000..805f561725ae
--- /dev/null
+++ b/drivers/clk/sophgo/clk-cv18xx-ip.c
@@ -0,0 +1,887 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Inochi Amaoto <inochiama@outlook.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/gcd.h>
+#include <linux/spinlock.h>
+
+#include "clk-cv18xx-ip.h"
+
+/* GATE */
+static inline struct cv1800_clk_gate *hw_to_cv1800_clk_gate(struct clk_hw *hw)
+{
+ struct cv1800_clk_common *common = hw_to_cv1800_clk_common(hw);
+
+ return container_of(common, struct cv1800_clk_gate, common);
+}
+
+static int gate_enable(struct clk_hw *hw)
+{
+ struct cv1800_clk_gate *gate = hw_to_cv1800_clk_gate(hw);
+
+ return cv1800_clk_setbit(&gate->common, &gate->gate);
+}
+
+static void gate_disable(struct clk_hw *hw)
+{
+ struct cv1800_clk_gate *gate = hw_to_cv1800_clk_gate(hw);
+
+ cv1800_clk_clearbit(&gate->common, &gate->gate);
+}
+
+static int gate_is_enabled(struct clk_hw *hw)
+{
+ struct cv1800_clk_gate *gate = hw_to_cv1800_clk_gate(hw);
+
+ return cv1800_clk_checkbit(&gate->common, &gate->gate);
+}
+
+static unsigned long gate_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return parent_rate;
+}
+
+static long gate_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ return *parent_rate;
+}
+
+static int gate_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return 0;
+}
+
+const struct clk_ops cv1800_clk_gate_ops = {
+ .disable = gate_disable,
+ .enable = gate_enable,
+ .is_enabled = gate_is_enabled,
+
+ .recalc_rate = gate_recalc_rate,
+ .round_rate = gate_round_rate,
+ .set_rate = gate_set_rate,
+};
+
+/* DIV */
+#define _DIV_EN_CLK_DIV_FACTOR_FIELD BIT(3)
+
+#define DIV_GET_EN_CLK_DIV_FACTOR(_reg) \
+ FIELD_GET(_DIV_EN_CLK_DIV_FACTOR_FIELD, _reg)
+
+#define DIV_SET_EN_DIV_FACTOR(_reg) \
+ _CV1800_SET_FIELD(_reg, 1, _DIV_EN_CLK_DIV_FACTOR_FIELD)
+
+static inline struct cv1800_clk_div *hw_to_cv1800_clk_div(struct clk_hw *hw)
+{
+ struct cv1800_clk_common *common = hw_to_cv1800_clk_common(hw);
+
+ return container_of(common, struct cv1800_clk_div, common);
+}
+
+static int div_enable(struct clk_hw *hw)
+{
+ struct cv1800_clk_div *div = hw_to_cv1800_clk_div(hw);
+
+ return cv1800_clk_setbit(&div->common, &div->gate);
+}
+
+static void div_disable(struct clk_hw *hw)
+{
+ struct cv1800_clk_div *div = hw_to_cv1800_clk_div(hw);
+
+ cv1800_clk_clearbit(&div->common, &div->gate);
+}
+
+static int div_is_enabled(struct clk_hw *hw)
+{
+ struct cv1800_clk_div *div = hw_to_cv1800_clk_div(hw);
+
+ return cv1800_clk_checkbit(&div->common, &div->gate);
+}
+
+static int div_helper_set_rate(struct cv1800_clk_common *common,
+ struct cv1800_clk_regfield *div,
+ unsigned long val)
+{
+ unsigned long flags;
+ u32 reg;
+
+ if (div->width == 0)
+ return 0;
+
+ spin_lock_irqsave(common->lock, flags);
+
+ reg = readl(common->base + div->reg);
+ reg = cv1800_clk_regfield_set(reg, val, div);
+ if (div->initval > 0)
+ reg = DIV_SET_EN_DIV_FACTOR(reg);
+
+ writel(reg, common->base + div->reg);
+
+ spin_unlock_irqrestore(common->lock, flags);
+
+ return 0;
+}
+
+static u32 div_helper_get_clockdiv(struct cv1800_clk_common *common,
+ struct cv1800_clk_regfield *div)
+{
+ u32 clockdiv = 1;
+ u32 reg;
+
+ if (!div || div->initval < 0 || (div->width == 0 && div->initval <= 0))
+ return 1;
+
+ if (div->width == 0 && div->initval > 0)
+ return div->initval;
+
+ reg = readl(common->base + div->reg);
+
+ if (div->initval == 0 || DIV_GET_EN_CLK_DIV_FACTOR(reg))
+ clockdiv = cv1800_clk_regfield_get(reg, div);
+ else if (div->initval > 0)
+ clockdiv = div->initval;
+
+ return clockdiv;
+}
+
+static u32 div_helper_round_rate(struct cv1800_clk_regfield *div,
+ struct clk_hw *hw, struct clk_hw *parent,
+ unsigned long rate, unsigned long *prate)
+{
+ if (div->width == 0) {
+ if (div->initval <= 0)
+ return DIV_ROUND_UP_ULL(*prate, 1);
+ else
+ return DIV_ROUND_UP_ULL(*prate, div->initval);
+ }
+
+ return divider_round_rate_parent(hw, parent, rate, prate, NULL,
+ div->width, div->flags);
+}
+
+static long div_round_rate(struct clk_hw *parent, unsigned long *parent_rate,
+ unsigned long rate, int id, void *data)
+{
+ struct cv1800_clk_div *div = data;
+
+ return div_helper_round_rate(&div->div, &div->common.hw, parent,
+ rate, parent_rate);
+}
+
+static bool div_is_better_rate(struct cv1800_clk_common *common,
+ unsigned long target, unsigned long now,
+ unsigned long best)
+{
+ if (common->features & CLK_DIVIDER_ROUND_CLOSEST)
+ return abs_diff(target, now) < abs_diff(target, best);
+
+ return now <= target && now > best;
+}
+
+static int mux_helper_determine_rate(struct cv1800_clk_common *common,
+ struct clk_rate_request *req,
+ long (*round)(struct clk_hw *,
+ unsigned long *,
+ unsigned long,
+ int,
+ void *),
+ void *data)
+{
+ unsigned long best_parent_rate = 0, best_rate = 0;
+ struct clk_hw *best_parent, *hw = &common->hw;
+ unsigned int i;
+
+ if (clk_hw_get_flags(hw) & CLK_SET_RATE_NO_REPARENT) {
+ unsigned long adj_parent_rate;
+
+ best_parent = clk_hw_get_parent(hw);
+ best_parent_rate = clk_hw_get_rate(best_parent);
+
+ best_rate = round(best_parent, &adj_parent_rate,
+ req->rate, -1, data);
+
+ goto find;
+ }
+
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ unsigned long tmp_rate, parent_rate;
+ struct clk_hw *parent;
+
+ parent = clk_hw_get_parent_by_index(hw, i);
+ if (!parent)
+ continue;
+
+ parent_rate = clk_hw_get_rate(parent);
+
+ tmp_rate = round(parent, &parent_rate, req->rate, i, data);
+
+ if (tmp_rate == req->rate) {
+ best_parent = parent;
+ best_parent_rate = parent_rate;
+ best_rate = tmp_rate;
+ goto find;
+ }
+
+ if (div_is_better_rate(common, req->rate,
+ tmp_rate, best_rate)) {
+ best_parent = parent;
+ best_parent_rate = parent_rate;
+ best_rate = tmp_rate;
+ }
+ }
+
+ if (best_rate == 0)
+ return -EINVAL;
+
+find:
+ req->best_parent_hw = best_parent;
+ req->best_parent_rate = best_parent_rate;
+ req->rate = best_rate;
+ return 0;
+}
+
+static int div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct cv1800_clk_div *div = hw_to_cv1800_clk_div(hw);
+
+ return mux_helper_determine_rate(&div->common, req,
+ div_round_rate, div);
+}
+
+static unsigned long div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct cv1800_clk_div *div = hw_to_cv1800_clk_div(hw);
+ unsigned long val;
+
+ val = div_helper_get_clockdiv(&div->common, &div->div);
+ if (val == 0)
+ return 0;
+
+ return divider_recalc_rate(hw, parent_rate, val, NULL,
+ div->div.flags, div->div.width);
+}
+
+static int div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct cv1800_clk_div *div = hw_to_cv1800_clk_div(hw);
+ unsigned long val;
+
+ val = divider_get_val(rate, parent_rate, NULL,
+ div->div.width, div->div.flags);
+
+ return div_helper_set_rate(&div->common, &div->div, val);
+}
+
+const struct clk_ops cv1800_clk_div_ops = {
+ .disable = div_disable,
+ .enable = div_enable,
+ .is_enabled = div_is_enabled,
+
+ .determine_rate = div_determine_rate,
+ .recalc_rate = div_recalc_rate,
+ .set_rate = div_set_rate,
+};
+
+static inline struct cv1800_clk_bypass_div *
+hw_to_cv1800_clk_bypass_div(struct clk_hw *hw)
+{
+ struct cv1800_clk_div *div = hw_to_cv1800_clk_div(hw);
+
+ return container_of(div, struct cv1800_clk_bypass_div, div);
+}
+
+static long bypass_div_round_rate(struct clk_hw *parent,
+ unsigned long *parent_rate,
+ unsigned long rate, int id, void *data)
+{
+ struct cv1800_clk_bypass_div *div = data;
+
+ if (id == -1) {
+ if (cv1800_clk_checkbit(&div->div.common, &div->bypass))
+ return *parent_rate;
+ else
+ return div_round_rate(parent, parent_rate, rate,
+ -1, &div->div);
+ }
+
+ if (id == 0)
+ return *parent_rate;
+
+ return div_round_rate(parent, parent_rate, rate, id - 1, &div->div);
+}
+
+static int bypass_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct cv1800_clk_bypass_div *div = hw_to_cv1800_clk_bypass_div(hw);
+
+ return mux_helper_determine_rate(&div->div.common, req,
+ bypass_div_round_rate, div);
+}
+
+static unsigned long bypass_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct cv1800_clk_bypass_div *div = hw_to_cv1800_clk_bypass_div(hw);
+
+ if (cv1800_clk_checkbit(&div->div.common, &div->bypass))
+ return parent_rate;
+
+ return div_recalc_rate(hw, parent_rate);
+}
+
+static int bypass_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct cv1800_clk_bypass_div *div = hw_to_cv1800_clk_bypass_div(hw);
+
+ if (cv1800_clk_checkbit(&div->div.common, &div->bypass))
+ return 0;
+
+ return div_set_rate(hw, rate, parent_rate);
+}
+
+static u8 bypass_div_get_parent(struct clk_hw *hw)
+{
+ struct cv1800_clk_bypass_div *div = hw_to_cv1800_clk_bypass_div(hw);
+
+ if (cv1800_clk_checkbit(&div->div.common, &div->bypass))
+ return 0;
+
+ return 1;
+}
+
+static int bypass_div_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct cv1800_clk_bypass_div *div = hw_to_cv1800_clk_bypass_div(hw);
+
+ if (index)
+ return cv1800_clk_clearbit(&div->div.common, &div->bypass);
+
+ return cv1800_clk_setbit(&div->div.common, &div->bypass);
+}
+
+const struct clk_ops cv1800_clk_bypass_div_ops = {
+ .disable = div_disable,
+ .enable = div_enable,
+ .is_enabled = div_is_enabled,
+
+ .determine_rate = bypass_div_determine_rate,
+ .recalc_rate = bypass_div_recalc_rate,
+ .set_rate = bypass_div_set_rate,
+
+ .set_parent = bypass_div_set_parent,
+ .get_parent = bypass_div_get_parent,
+};
+
+/* MUX */
+static inline struct cv1800_clk_mux *hw_to_cv1800_clk_mux(struct clk_hw *hw)
+{
+ struct cv1800_clk_common *common = hw_to_cv1800_clk_common(hw);
+
+ return container_of(common, struct cv1800_clk_mux, common);
+}
+
+static int mux_enable(struct clk_hw *hw)
+{
+ struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw);
+
+ return cv1800_clk_setbit(&mux->common, &mux->gate);
+}
+
+static void mux_disable(struct clk_hw *hw)
+{
+ struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw);
+
+ cv1800_clk_clearbit(&mux->common, &mux->gate);
+}
+
+static int mux_is_enabled(struct clk_hw *hw)
+{
+ struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw);
+
+ return cv1800_clk_checkbit(&mux->common, &mux->gate);
+}
+
+static long mux_round_rate(struct clk_hw *parent, unsigned long *parent_rate,
+ unsigned long rate, int id, void *data)
+{
+ struct cv1800_clk_mux *mux = data;
+
+ return div_helper_round_rate(&mux->div, &mux->common.hw, parent,
+ rate, parent_rate);
+}
+
+static int mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw);
+
+ return mux_helper_determine_rate(&mux->common, req,
+ mux_round_rate, mux);
+}
+
+static unsigned long mux_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw);
+ unsigned long val;
+
+ val = div_helper_get_clockdiv(&mux->common, &mux->div);
+ if (val == 0)
+ return 0;
+
+ return divider_recalc_rate(hw, parent_rate, val, NULL,
+ mux->div.flags, mux->div.width);
+}
+
+static int mux_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw);
+ unsigned long val;
+
+ val = divider_get_val(rate, parent_rate, NULL,
+ mux->div.width, mux->div.flags);
+
+ return div_helper_set_rate(&mux->common, &mux->div, val);
+}
+
+static u8 mux_get_parent(struct clk_hw *hw)
+{
+ struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw);
+ u32 reg = readl(mux->common.base + mux->mux.reg);
+
+ return cv1800_clk_regfield_get(reg, &mux->mux);
+}
+
+static int _mux_set_parent(struct cv1800_clk_mux *mux, u8 index)
+{
+ u32 reg;
+
+ reg = readl(mux->common.base + mux->mux.reg);
+ reg = cv1800_clk_regfield_set(reg, index, &mux->mux);
+ writel(reg, mux->common.base + mux->mux.reg);
+
+ return 0;
+}
+
+static int mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(mux->common.lock, flags);
+
+ _mux_set_parent(mux, index);
+
+ spin_unlock_irqrestore(mux->common.lock, flags);
+
+ return 0;
+}
+
+const struct clk_ops cv1800_clk_mux_ops = {
+ .disable = mux_disable,
+ .enable = mux_enable,
+ .is_enabled = mux_is_enabled,
+
+ .determine_rate = mux_determine_rate,
+ .recalc_rate = mux_recalc_rate,
+ .set_rate = mux_set_rate,
+
+ .set_parent = mux_set_parent,
+ .get_parent = mux_get_parent,
+};
+
+static inline struct cv1800_clk_bypass_mux *
+hw_to_cv1800_clk_bypass_mux(struct clk_hw *hw)
+{
+ struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw);
+
+ return container_of(mux, struct cv1800_clk_bypass_mux, mux);
+}
+
+static long bypass_mux_round_rate(struct clk_hw *parent,
+ unsigned long *parent_rate,
+ unsigned long rate, int id, void *data)
+{
+ struct cv1800_clk_bypass_mux *mux = data;
+
+ if (id == -1) {
+ if (cv1800_clk_checkbit(&mux->mux.common, &mux->bypass))
+ return *parent_rate;
+ else
+ return mux_round_rate(parent, parent_rate, rate,
+ -1, &mux->mux);
+ }
+
+ if (id == 0)
+ return *parent_rate;
+
+ return mux_round_rate(parent, parent_rate, rate, id - 1, &mux->mux);
+}
+
+static int bypass_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct cv1800_clk_bypass_mux *mux = hw_to_cv1800_clk_bypass_mux(hw);
+
+ return mux_helper_determine_rate(&mux->mux.common, req,
+ bypass_mux_round_rate, mux);
+}
+
+static unsigned long bypass_mux_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct cv1800_clk_bypass_mux *mux = hw_to_cv1800_clk_bypass_mux(hw);
+
+ if (cv1800_clk_checkbit(&mux->mux.common, &mux->bypass))
+ return parent_rate;
+
+ return mux_recalc_rate(hw, parent_rate);
+}
+
+static int bypass_mux_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct cv1800_clk_bypass_mux *mux = hw_to_cv1800_clk_bypass_mux(hw);
+
+ if (cv1800_clk_checkbit(&mux->mux.common, &mux->bypass))
+ return 0;
+
+ return mux_set_rate(hw, rate, parent_rate);
+}
+
+static u8 bypass_mux_get_parent(struct clk_hw *hw)
+{
+ struct cv1800_clk_bypass_mux *mux = hw_to_cv1800_clk_bypass_mux(hw);
+
+ if (cv1800_clk_checkbit(&mux->mux.common, &mux->bypass))
+ return 0;
+
+ return mux_get_parent(hw) + 1;
+}
+
+static int bypass_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct cv1800_clk_bypass_mux *mux = hw_to_cv1800_clk_bypass_mux(hw);
+
+ if (index == 0)
+ return cv1800_clk_setbit(&mux->mux.common, &mux->bypass);
+
+ return cv1800_clk_clearbit(&mux->mux.common, &mux->bypass);
+}
+
+const struct clk_ops cv1800_clk_bypass_mux_ops = {
+ .disable = mux_disable,
+ .enable = mux_enable,
+ .is_enabled = mux_is_enabled,
+
+ .determine_rate = bypass_mux_determine_rate,
+ .recalc_rate = bypass_mux_recalc_rate,
+ .set_rate = bypass_mux_set_rate,
+
+ .set_parent = bypass_mux_set_parent,
+ .get_parent = bypass_mux_get_parent,
+};
+
+/* MMUX */
+static inline struct cv1800_clk_mmux *hw_to_cv1800_clk_mmux(struct clk_hw *hw)
+{
+ struct cv1800_clk_common *common = hw_to_cv1800_clk_common(hw);
+
+ return container_of(common, struct cv1800_clk_mmux, common);
+}
+
+static u8 mmux_get_parent_id(struct cv1800_clk_mmux *mmux)
+{
+ struct clk_hw *hw = &mmux->common.hw;
+ struct clk_hw *parent = clk_hw_get_parent(hw);
+ unsigned int i;
+
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ if (parent == clk_hw_get_parent_by_index(hw, i))
+ return i;
+ }
+
+ unreachable();
+}
+
+static int mmux_enable(struct clk_hw *hw)
+{
+ struct cv1800_clk_mmux *mmux = hw_to_cv1800_clk_mmux(hw);
+
+ return cv1800_clk_setbit(&mmux->common, &mmux->gate);
+}
+
+static void mmux_disable(struct clk_hw *hw)
+{
+ struct cv1800_clk_mmux *mmux = hw_to_cv1800_clk_mmux(hw);
+
+ cv1800_clk_clearbit(&mmux->common, &mmux->gate);
+}
+
+static int mmux_is_enabled(struct clk_hw *hw)
+{
+ struct cv1800_clk_mmux *mmux = hw_to_cv1800_clk_mmux(hw);
+
+ return cv1800_clk_checkbit(&mmux->common, &mmux->gate);
+}
+
+static long mmux_round_rate(struct clk_hw *parent, unsigned long *parent_rate,
+ unsigned long rate, int id, void *data)
+{
+ struct cv1800_clk_mmux *mmux = data;
+ s8 div_id;
+
+ if (id == -1) {
+ if (cv1800_clk_checkbit(&mmux->common, &mmux->bypass))
+ return *parent_rate;
+
+ id = mmux_get_parent_id(mmux);
+ }
+
+ div_id = mmux->parent2sel[id];
+
+ if (div_id < 0)
+ return *parent_rate;
+
+ return div_helper_round_rate(&mmux->div[div_id],
+ &mmux->common.hw, parent,
+ rate, parent_rate);
+}
+
+static int mmux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct cv1800_clk_mmux *mmux = hw_to_cv1800_clk_mmux(hw);
+
+ return mux_helper_determine_rate(&mmux->common, req,
+ mmux_round_rate, mmux);
+}
+
+static unsigned long mmux_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct cv1800_clk_mmux *mmux = hw_to_cv1800_clk_mmux(hw);
+ unsigned long val;
+ struct cv1800_clk_regfield *div;
+
+ if (cv1800_clk_checkbit(&mmux->common, &mmux->bypass))
+ return parent_rate;
+
+ if (cv1800_clk_checkbit(&mmux->common, &mmux->clk_sel))
+ div = &mmux->div[0];
+ else
+ div = &mmux->div[1];
+
+ val = div_helper_get_clockdiv(&mmux->common, div);
+ if (val == 0)
+ return 0;
+
+ return divider_recalc_rate(hw, parent_rate, val, NULL,
+ div->flags, div->width);
+}
+
+static int mmux_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct cv1800_clk_mmux *mmux = hw_to_cv1800_clk_mmux(hw);
+ struct cv1800_clk_regfield *div;
+ unsigned long val;
+
+ if (cv1800_clk_checkbit(&mmux->common, &mmux->bypass))
+ return parent_rate;
+
+ if (cv1800_clk_checkbit(&mmux->common, &mmux->clk_sel))
+ div = &mmux->div[0];
+ else
+ div = &mmux->div[1];
+
+ val = divider_get_val(rate, parent_rate, NULL,
+ div->width, div->flags);
+
+ return div_helper_set_rate(&mmux->common, div, val);
+}
+
+static u8 mmux_get_parent(struct clk_hw *hw)
+{
+ struct cv1800_clk_mmux *mmux = hw_to_cv1800_clk_mmux(hw);
+ struct cv1800_clk_regfield *mux;
+ u32 reg;
+ s8 clk_sel;
+
+ if (cv1800_clk_checkbit(&mmux->common, &mmux->bypass))
+ return 0;
+
+ if (cv1800_clk_checkbit(&mmux->common, &mmux->clk_sel))
+ clk_sel = 0;
+ else
+ clk_sel = 1;
+ mux = &mmux->mux[clk_sel];
+
+ reg = readl(mmux->common.base + mux->reg);
+
+ return mmux->sel2parent[clk_sel][cv1800_clk_regfield_get(reg, mux)];
+}
+
+static int mmux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct cv1800_clk_mmux *mmux = hw_to_cv1800_clk_mmux(hw);
+ struct cv1800_clk_regfield *mux;
+ unsigned long flags;
+ u32 reg;
+ s8 clk_sel = mmux->parent2sel[index];
+
+ if (index == 0 || clk_sel == -1) {
+ cv1800_clk_setbit(&mmux->common, &mmux->bypass);
+ goto release;
+ }
+
+ cv1800_clk_clearbit(&mmux->common, &mmux->bypass);
+
+ if (clk_sel)
+ cv1800_clk_clearbit(&mmux->common, &mmux->clk_sel);
+ else
+ cv1800_clk_setbit(&mmux->common, &mmux->clk_sel);
+
+ spin_lock_irqsave(mmux->common.lock, flags);
+
+ mux = &mmux->mux[clk_sel];
+ reg = readl(mmux->common.base + mux->reg);
+ reg = cv1800_clk_regfield_set(reg, index, mux);
+
+ writel(reg, mmux->common.base + mux->reg);
+
+ spin_unlock_irqrestore(mmux->common.lock, flags);
+
+release:
+ return 0;
+}
+
+const struct clk_ops cv1800_clk_mmux_ops = {
+ .disable = mmux_disable,
+ .enable = mmux_enable,
+ .is_enabled = mmux_is_enabled,
+
+ .determine_rate = mmux_determine_rate,
+ .recalc_rate = mmux_recalc_rate,
+ .set_rate = mmux_set_rate,
+
+ .set_parent = mmux_set_parent,
+ .get_parent = mmux_get_parent,
+};
+
+/* AUDIO CLK */
+static inline struct cv1800_clk_audio *
+hw_to_cv1800_clk_audio(struct clk_hw *hw)
+{
+ struct cv1800_clk_common *common = hw_to_cv1800_clk_common(hw);
+
+ return container_of(common, struct cv1800_clk_audio, common);
+}
+
+static int aclk_enable(struct clk_hw *hw)
+{
+ struct cv1800_clk_audio *aclk = hw_to_cv1800_clk_audio(hw);
+
+ cv1800_clk_setbit(&aclk->common, &aclk->src_en);
+ return cv1800_clk_setbit(&aclk->common, &aclk->output_en);
+}
+
+static void aclk_disable(struct clk_hw *hw)
+{
+ struct cv1800_clk_audio *aclk = hw_to_cv1800_clk_audio(hw);
+
+ cv1800_clk_clearbit(&aclk->common, &aclk->output_en);
+ cv1800_clk_clearbit(&aclk->common, &aclk->src_en);
+}
+
+static int aclk_is_enabled(struct clk_hw *hw)
+{
+ struct cv1800_clk_audio *aclk = hw_to_cv1800_clk_audio(hw);
+
+ return cv1800_clk_checkbit(&aclk->common, &aclk->output_en);
+}
+
+static int aclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct cv1800_clk_audio *aclk = hw_to_cv1800_clk_audio(hw);
+
+ req->rate = aclk->target_rate;
+
+ return 0;
+}
+
+static unsigned long aclk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct cv1800_clk_audio *aclk = hw_to_cv1800_clk_audio(hw);
+ u64 rate = parent_rate;
+ u64 factor = 2;
+ u32 regval;
+
+ if (!cv1800_clk_checkbit(&aclk->common, &aclk->div_en))
+ return 0;
+
+ regval = readl(aclk->common.base + aclk->m.reg);
+ factor *= cv1800_clk_regfield_get(regval, &aclk->m);
+
+ regval = readl(aclk->common.base + aclk->n.reg);
+ rate *= cv1800_clk_regfield_get(regval, &aclk->n);
+
+ return DIV64_U64_ROUND_UP(rate, factor);
+}
+
+static void aclk_determine_mn(unsigned long parent_rate, unsigned long rate,
+ u32 *m, u32 *n)
+{
+ u32 tm = parent_rate / 2;
+ u32 tn = rate;
+ u32 tcommon = gcd(tm, tn);
+ *m = tm / tcommon;
+ *n = tn / tcommon;
+}
+
+static int aclk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct cv1800_clk_audio *aclk = hw_to_cv1800_clk_audio(hw);
+ unsigned long flags;
+ u32 m, n;
+
+ aclk_determine_mn(parent_rate, rate,
+ &m, &n);
+
+ spin_lock_irqsave(aclk->common.lock, flags);
+
+ writel(m, aclk->common.base + aclk->m.reg);
+ writel(n, aclk->common.base + aclk->n.reg);
+
+ cv1800_clk_setbit(&aclk->common, &aclk->div_en);
+ cv1800_clk_setbit(&aclk->common, &aclk->div_up);
+
+ spin_unlock_irqrestore(aclk->common.lock, flags);
+
+ return 0;
+}
+
+const struct clk_ops cv1800_clk_audio_ops = {
+ .disable = aclk_disable,
+ .enable = aclk_enable,
+ .is_enabled = aclk_is_enabled,
+
+ .determine_rate = aclk_determine_rate,
+ .recalc_rate = aclk_recalc_rate,
+ .set_rate = aclk_set_rate,
+};
diff --git a/drivers/clk/sophgo/clk-cv18xx-ip.h b/drivers/clk/sophgo/clk-cv18xx-ip.h
new file mode 100644
index 000000000000..b37ba42bfde3
--- /dev/null
+++ b/drivers/clk/sophgo/clk-cv18xx-ip.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Inochi Amaoto <inochiama@outlook.com>
+ */
+
+#ifndef _CLK_SOPHGO_CV1800_IP_H_
+#define _CLK_SOPHGO_CV1800_IP_H_
+
+#include "clk-cv18xx-common.h"
+
+struct cv1800_clk_gate {
+ struct cv1800_clk_common common;
+ struct cv1800_clk_regbit gate;
+};
+
+struct cv1800_clk_div_data {
+ u32 reg;
+ u32 mask;
+ u32 width;
+ u32 init;
+ u32 flags;
+};
+
+struct cv1800_clk_div {
+ struct cv1800_clk_common common;
+ struct cv1800_clk_regbit gate;
+ struct cv1800_clk_regfield div;
+};
+
+struct cv1800_clk_bypass_div {
+ struct cv1800_clk_div div;
+ struct cv1800_clk_regbit bypass;
+};
+
+struct cv1800_clk_mux {
+ struct cv1800_clk_common common;
+ struct cv1800_clk_regbit gate;
+ struct cv1800_clk_regfield div;
+ struct cv1800_clk_regfield mux;
+};
+
+struct cv1800_clk_bypass_mux {
+ struct cv1800_clk_mux mux;
+ struct cv1800_clk_regbit bypass;
+};
+
+struct cv1800_clk_mmux {
+ struct cv1800_clk_common common;
+ struct cv1800_clk_regbit gate;
+ struct cv1800_clk_regfield div[2];
+ struct cv1800_clk_regfield mux[2];
+ struct cv1800_clk_regbit bypass;
+ struct cv1800_clk_regbit clk_sel;
+ const s8 *parent2sel;
+ const u8 *sel2parent[2];
+};
+
+struct cv1800_clk_audio {
+ struct cv1800_clk_common common;
+ struct cv1800_clk_regbit src_en;
+ struct cv1800_clk_regbit output_en;
+ struct cv1800_clk_regbit div_en;
+ struct cv1800_clk_regbit div_up;
+ struct cv1800_clk_regfield m;
+ struct cv1800_clk_regfield n;
+ u32 target_rate;
+};
+
+#define CV1800_GATE(_name, _parent, _gate_reg, _gate_shift, _flags) \
+ struct cv1800_clk_gate _name = { \
+ .common = CV1800_CLK_COMMON(#_name, _parent, \
+ &cv1800_clk_gate_ops, \
+ _flags), \
+ .gate = CV1800_CLK_BIT(_gate_reg, _gate_shift), \
+ }
+
+#define _CV1800_DIV(_name, _parent, _gate_reg, _gate_shift, \
+ _div_reg, _div_shift, _div_width, _div_init, \
+ _div_flag, _ops, _flags) \
+ { \
+ .common = CV1800_CLK_COMMON(#_name, _parent, \
+ _ops, _flags), \
+ .gate = CV1800_CLK_BIT(_gate_reg, \
+ _gate_shift), \
+ .div = CV1800_CLK_REG(_div_reg, _div_shift, \
+ _div_width, _div_init, \
+ _div_flag), \
+ }
+
+#define _CV1800_FIXED_DIV_FLAG \
+ (CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ROUND_CLOSEST)
+
+#define _CV1800_FIXED_DIV(_name, _parent, _gate_reg, _gate_shift, \
+ _fix_div, _ops, _flags) \
+ { \
+ .common = CV1800_CLK_COMMON(#_name, _parent, \
+ _ops, _flags), \
+ .gate = CV1800_CLK_BIT(_gate_reg, \
+ _gate_shift), \
+ .div = CV1800_CLK_REG(0, 0, 0, \
+ _fix_div, \
+ _CV1800_FIXED_DIV_FLAG),\
+ }
+
+#define CV1800_DIV(_name, _parent, _gate_reg, _gate_shift, \
+ _div_reg, _div_shift, _div_width, _div_init, \
+ _div_flag, _flags) \
+ struct cv1800_clk_div _name = \
+ _CV1800_DIV(_name, _parent, _gate_reg, _gate_shift, \
+ _div_reg, _div_shift, _div_width, _div_init,\
+ _div_flag, &cv1800_clk_div_ops, _flags)
+
+#define CV1800_BYPASS_DIV(_name, _parent, _gate_reg, _gate_shift, \
+ _div_reg, _div_shift, _div_width, _div_init, \
+ _div_flag, _bypass_reg, _bypass_shift, _flags)\
+ struct cv1800_clk_bypass_div _name = { \
+ .div = _CV1800_DIV(_name, _parent, \
+ _gate_reg, _gate_shift, \
+ _div_reg, _div_shift, \
+ _div_width, _div_init, _div_flag, \
+ &cv1800_clk_bypass_div_ops, \
+ _flags), \
+ .bypass = CV1800_CLK_BIT(_bypass_reg, _bypass_shift), \
+ }
+
+#define CV1800_FIXED_DIV(_name, _parent, _gate_reg, _gate_shift, \
+ _fix_div, _flags) \
+ struct cv1800_clk_div _name = \
+ _CV1800_FIXED_DIV(_name, _parent, \
+ _gate_reg, _gate_shift, \
+ _fix_div, \
+ &cv1800_clk_div_ops, _flags) \
+
+#define CV1800_BYPASS_FIXED_DIV(_name, _parent, _gate_reg, _gate_shift, \
+ _fix_div, _bypass_reg, _bypass_shift, \
+ _flags) \
+ struct cv1800_clk_bypass_div _name = { \
+ .div = _CV1800_FIXED_DIV(_name, _parent, \
+ _gate_reg, _gate_shift, \
+ _fix_div, \
+ &cv1800_clk_bypass_div_ops, \
+ _flags), \
+ .bypass = CV1800_CLK_BIT(_bypass_reg, _bypass_shift), \
+ }
+
+#define _CV1800_MUX(_name, _parent, _gate_reg, _gate_shift, \
+ _div_reg, _div_shift, _div_width, _div_init, \
+ _div_flag, \
+ _mux_reg, _mux_shift, _mux_width, \
+ _ops, _flags) \
+ { \
+ .common = CV1800_CLK_COMMON(#_name, _parent, \
+ _ops, _flags), \
+ .gate = CV1800_CLK_BIT(_gate_reg, \
+ _gate_shift), \
+ .div = CV1800_CLK_REG(_div_reg, _div_shift, \
+ _div_width, _div_init, \
+ _div_flag), \
+ .mux = CV1800_CLK_REG(_mux_reg, _mux_shift, \
+ _mux_width, 0, 0), \
+ }
+
+#define CV1800_MUX(_name, _parent, _gate_reg, _gate_shift, \
+ _div_reg, _div_shift, _div_width, _div_init, \
+ _div_flag, \
+ _mux_reg, _mux_shift, _mux_width, _flags) \
+ struct cv1800_clk_mux _name = \
+ _CV1800_MUX(_name, _parent, _gate_reg, _gate_shift, \
+ _div_reg, _div_shift, _div_width, _div_init,\
+ _div_flag, _mux_reg, _mux_shift, _mux_width,\
+ &cv1800_clk_mux_ops, _flags)
+
+#define CV1800_BYPASS_MUX(_name, _parent, _gate_reg, _gate_shift, \
+ _div_reg, _div_shift, _div_width, _div_init, \
+ _div_flag, \
+ _mux_reg, _mux_shift, _mux_width, \
+ _bypass_reg, _bypass_shift, _flags) \
+ struct cv1800_clk_bypass_mux _name = { \
+ .mux = _CV1800_MUX(_name, _parent, \
+ _gate_reg, _gate_shift, \
+ _div_reg, _div_shift, _div_width, \
+ _div_init, _div_flag, \
+ _mux_reg, _mux_shift, _mux_width, \
+ &cv1800_clk_bypass_mux_ops, \
+ _flags), \
+ .bypass = CV1800_CLK_BIT(_bypass_reg, _bypass_shift), \
+ }
+
+#define CV1800_MMUX(_name, _parent, _gate_reg, _gate_shift, \
+ _div0_reg, _div0_shift, _div0_width, _div0_init, \
+ _div0_flag, \
+ _div1_reg, _div1_shift, _div1_width, _div1_init, \
+ _div1_flag, \
+ _mux0_reg, _mux0_shift, _mux0_width, \
+ _mux1_reg, _mux1_shift, _mux1_width, \
+ _bypass_reg, _bypass_shift, \
+ _clk_sel_reg, _clk_sel_shift, \
+ _parent2sel, _sel2parent0, _sel2parent1, _flags) \
+ struct cv1800_clk_mmux _name = { \
+ .common = CV1800_CLK_COMMON(#_name, _parent, \
+ &cv1800_clk_mmux_ops,\
+ _flags), \
+ .gate = CV1800_CLK_BIT(_gate_reg, _gate_shift),\
+ .div = { \
+ CV1800_CLK_REG(_div0_reg, _div0_shift, \
+ _div0_width, _div0_init, \
+ _div0_flag), \
+ CV1800_CLK_REG(_div1_reg, _div1_shift, \
+ _div1_width, _div1_init, \
+ _div1_flag), \
+ }, \
+ .mux = { \
+ CV1800_CLK_REG(_mux0_reg, _mux0_shift, \
+ _mux0_width, 0, 0), \
+ CV1800_CLK_REG(_mux1_reg, _mux1_shift, \
+ _mux1_width, 0, 0), \
+ }, \
+ .bypass = CV1800_CLK_BIT(_bypass_reg, \
+ _bypass_shift), \
+ .clk_sel = CV1800_CLK_BIT(_clk_sel_reg, \
+ _clk_sel_shift), \
+ .parent2sel = _parent2sel, \
+ .sel2parent = { _sel2parent0, _sel2parent1 }, \
+ }
+
+#define CV1800_ACLK(_name, _parent, \
+ _src_en_reg, _src_en_reg_shift, \
+ _output_en_reg, _output_en_shift, \
+ _div_en_reg, _div_en_reg_shift, \
+ _div_up_reg, _div_up_reg_shift, \
+ _m_reg, _m_shift, _m_width, _m_flag, \
+ _n_reg, _n_shift, _n_width, _n_flag, \
+ _target_rate, _flags) \
+ struct cv1800_clk_audio _name = { \
+ .common = CV1800_CLK_COMMON(#_name, _parent, \
+ &cv1800_clk_audio_ops,\
+ _flags), \
+ .src_en = CV1800_CLK_BIT(_src_en_reg, \
+ _src_en_reg_shift), \
+ .output_en = CV1800_CLK_BIT(_output_en_reg, \
+ _output_en_shift), \
+ .div_en = CV1800_CLK_BIT(_div_en_reg, \
+ _div_en_reg_shift), \
+ .div_up = CV1800_CLK_BIT(_div_up_reg, \
+ _div_up_reg_shift), \
+ .m = CV1800_CLK_REG(_m_reg, _m_shift, \
+ _m_width, 0, _m_flag), \
+ .n = CV1800_CLK_REG(_n_reg, _n_shift, \
+ _n_width, 0, _n_flag), \
+ .target_rate = _target_rate, \
+ }
+
+extern const struct clk_ops cv1800_clk_gate_ops;
+extern const struct clk_ops cv1800_clk_div_ops;
+extern const struct clk_ops cv1800_clk_bypass_div_ops;
+extern const struct clk_ops cv1800_clk_mux_ops;
+extern const struct clk_ops cv1800_clk_bypass_mux_ops;
+extern const struct clk_ops cv1800_clk_mmux_ops;
+extern const struct clk_ops cv1800_clk_audio_ops;
+
+#endif // _CLK_SOPHGO_CV1800_IP_H_
diff --git a/drivers/clk/sophgo/clk-cv18xx-pll.c b/drivers/clk/sophgo/clk-cv18xx-pll.c
new file mode 100644
index 000000000000..29e24098bf5f
--- /dev/null
+++ b/drivers/clk/sophgo/clk-cv18xx-pll.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Inochi Amaoto <inochiama@outlook.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/limits.h>
+#include <linux/spinlock.h>
+
+#include "clk-cv18xx-pll.h"
+
+static inline struct cv1800_clk_pll *hw_to_cv1800_clk_pll(struct clk_hw *hw)
+{
+ struct cv1800_clk_common *common = hw_to_cv1800_clk_common(hw);
+
+ return container_of(common, struct cv1800_clk_pll, common);
+}
+
+static unsigned long ipll_calc_rate(unsigned long parent_rate,
+ unsigned long pre_div_sel,
+ unsigned long div_sel,
+ unsigned long post_div_sel)
+{
+ uint64_t rate = parent_rate;
+
+ rate *= div_sel;
+ do_div(rate, pre_div_sel * post_div_sel);
+
+ return rate;
+}
+
+static unsigned long ipll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct cv1800_clk_pll *pll = hw_to_cv1800_clk_pll(hw);
+ u32 value;
+
+ value = readl(pll->common.base + pll->pll_reg);
+
+ return ipll_calc_rate(parent_rate,
+ PLL_GET_PRE_DIV_SEL(value),
+ PLL_GET_DIV_SEL(value),
+ PLL_GET_POST_DIV_SEL(value));
+}
+
+static int ipll_find_rate(const struct cv1800_clk_pll_limit *limit,
+ unsigned long prate, unsigned long *rate,
+ u32 *value)
+{
+ unsigned long best_rate = 0;
+ unsigned long trate = *rate;
+ unsigned long pre_div_sel = 0, div_sel = 0, post_div_sel = 0;
+ unsigned long pre, div, post;
+ u32 detected = *value;
+ unsigned long tmp;
+
+ for_each_pll_limit_range(pre, &limit->pre_div) {
+ for_each_pll_limit_range(div, &limit->div) {
+ for_each_pll_limit_range(post, &limit->post_div) {
+ tmp = ipll_calc_rate(prate, pre, div, post);
+
+ if (tmp > trate)
+ continue;
+
+ if ((trate - tmp) < (trate - best_rate)) {
+ best_rate = tmp;
+ pre_div_sel = pre;
+ div_sel = div;
+ post_div_sel = post;
+ }
+ }
+ }
+ }
+
+ if (best_rate) {
+ detected = PLL_SET_PRE_DIV_SEL(detected, pre_div_sel);
+ detected = PLL_SET_POST_DIV_SEL(detected, post_div_sel);
+ detected = PLL_SET_DIV_SEL(detected, div_sel);
+ *value = detected;
+ *rate = best_rate;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int ipll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
+{
+ u32 val;
+ struct cv1800_clk_pll *pll = hw_to_cv1800_clk_pll(hw);
+
+ return ipll_find_rate(pll->pll_limit, req->best_parent_rate,
+ &req->rate, &val);
+}
+
+static void pll_get_mode_ctrl(unsigned long div_sel,
+ bool (*mode_ctrl_check)(unsigned long,
+ unsigned long,
+ unsigned long),
+ const struct cv1800_clk_pll_limit *limit,
+ u32 *value)
+{
+ unsigned long ictrl = 0, mode = 0;
+ u32 detected = *value;
+
+ for_each_pll_limit_range(mode, &limit->mode) {
+ for_each_pll_limit_range(ictrl, &limit->ictrl) {
+ if (mode_ctrl_check(div_sel, ictrl, mode)) {
+ detected = PLL_SET_SEL_MODE(detected, mode);
+ detected = PLL_SET_ICTRL(detected, ictrl);
+ *value = detected;
+ return;
+ }
+ }
+ }
+}
+
+static bool ipll_check_mode_ctrl_restrict(unsigned long div_sel,
+ unsigned long ictrl,
+ unsigned long mode)
+{
+ unsigned long left_rest = 20 * div_sel;
+ unsigned long right_rest = 35 * div_sel;
+ unsigned long test = 184 * (1 + mode) * (1 + ictrl) / 2;
+
+ return test > left_rest && test <= right_rest;
+}
+
+static int ipll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ u32 regval, detected = 0;
+ unsigned long flags;
+ struct cv1800_clk_pll *pll = hw_to_cv1800_clk_pll(hw);
+
+ ipll_find_rate(pll->pll_limit, parent_rate, &rate, &detected);
+ pll_get_mode_ctrl(PLL_GET_DIV_SEL(detected),
+ ipll_check_mode_ctrl_restrict,
+ pll->pll_limit, &detected);
+
+ spin_lock_irqsave(pll->common.lock, flags);
+
+ regval = readl(pll->common.base + pll->pll_reg);
+ regval = PLL_COPY_REG(regval, detected);
+
+ writel(regval, pll->common.base + pll->pll_reg);
+
+ spin_unlock_irqrestore(pll->common.lock, flags);
+
+ cv1800_clk_wait_for_lock(&pll->common, pll->pll_status.reg,
+ BIT(pll->pll_status.shift));
+
+ return 0;
+}
+
+static int pll_enable(struct clk_hw *hw)
+{
+ struct cv1800_clk_pll *pll = hw_to_cv1800_clk_pll(hw);
+
+ return cv1800_clk_clearbit(&pll->common, &pll->pll_pwd);
+}
+
+static void pll_disable(struct clk_hw *hw)
+{
+ struct cv1800_clk_pll *pll = hw_to_cv1800_clk_pll(hw);
+
+ cv1800_clk_setbit(&pll->common, &pll->pll_pwd);
+}
+
+static int pll_is_enable(struct clk_hw *hw)
+{
+ struct cv1800_clk_pll *pll = hw_to_cv1800_clk_pll(hw);
+
+ return cv1800_clk_checkbit(&pll->common, &pll->pll_pwd) == 0;
+}
+
+const struct clk_ops cv1800_clk_ipll_ops = {
+ .disable = pll_disable,
+ .enable = pll_enable,
+ .is_enabled = pll_is_enable,
+
+ .recalc_rate = ipll_recalc_rate,
+ .determine_rate = ipll_determine_rate,
+ .set_rate = ipll_set_rate,
+};
+
+#define PLL_SYN_FACTOR_DOT_POS 26
+#define PLL_SYN_FACTOR_MINIMUM ((4 << PLL_SYN_FACTOR_DOT_POS) + 1)
+
+static bool fpll_is_factional_mode(struct cv1800_clk_pll *pll)
+{
+ return cv1800_clk_checkbit(&pll->common, &pll->pll_syn->en);
+}
+
+static unsigned long fpll_calc_rate(unsigned long parent_rate,
+ unsigned long pre_div_sel,
+ unsigned long div_sel,
+ unsigned long post_div_sel,
+ unsigned long ssc_syn_set,
+ bool is_full_parent)
+{
+ u64 dividend = parent_rate * div_sel;
+ u64 factor = ssc_syn_set * pre_div_sel * post_div_sel;
+ unsigned long rate;
+
+ dividend <<= PLL_SYN_FACTOR_DOT_POS - 1;
+ rate = div64_u64_rem(dividend, factor, &dividend);
+
+ if (is_full_parent) {
+ dividend <<= 1;
+ rate <<= 1;
+ }
+
+ rate += DIV64_U64_ROUND_CLOSEST(dividend, factor);
+
+ return rate;
+}
+
+static unsigned long fpll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct cv1800_clk_pll *pll = hw_to_cv1800_clk_pll(hw);
+ u32 value;
+ bool clk_full;
+ u32 syn_set;
+
+ if (!fpll_is_factional_mode(pll))
+ return ipll_recalc_rate(hw, parent_rate);
+
+ syn_set = readl(pll->common.base + pll->pll_syn->set);
+
+ if (syn_set == 0)
+ return 0;
+
+ clk_full = cv1800_clk_checkbit(&pll->common,
+ &pll->pll_syn->clk_half);
+
+ value = readl(pll->common.base + pll->pll_reg);
+
+ return fpll_calc_rate(parent_rate,
+ PLL_GET_PRE_DIV_SEL(value),
+ PLL_GET_DIV_SEL(value),
+ PLL_GET_POST_DIV_SEL(value),
+ syn_set, clk_full);
+}
+
+static unsigned long fpll_find_synthesizer(unsigned long parent,
+ unsigned long rate,
+ unsigned long pre_div,
+ unsigned long div,
+ unsigned long post_div,
+ bool is_full_parent,
+ u32 *ssc_syn_set)
+{
+ u32 test_max = U32_MAX, test_min = PLL_SYN_FACTOR_MINIMUM;
+ unsigned long trate;
+
+ while (test_min < test_max) {
+ u32 tssc = (test_max + test_min) / 2;
+
+ trate = fpll_calc_rate(parent, pre_div, div, post_div,
+ tssc, is_full_parent);
+
+ if (trate == rate) {
+ test_min = tssc;
+ break;
+ }
+
+ if (trate > rate)
+ test_min = tssc + 1;
+ else
+ test_max = tssc - 1;
+ }
+
+ if (trate != 0)
+ *ssc_syn_set = test_min;
+
+ return trate;
+}
+
+static int fpll_find_rate(struct cv1800_clk_pll *pll,
+ const struct cv1800_clk_pll_limit *limit,
+ unsigned long prate,
+ unsigned long *rate,
+ u32 *value, u32 *ssc_syn_set)
+{
+ unsigned long best_rate = 0;
+ unsigned long pre_div_sel = 0, div_sel = 0, post_div_sel = 0;
+ unsigned long pre, div, post;
+ unsigned long trate = *rate;
+ u32 detected = *value;
+ unsigned long tmp;
+ bool clk_full = cv1800_clk_checkbit(&pll->common,
+ &pll->pll_syn->clk_half);
+
+ for_each_pll_limit_range(pre, &limit->pre_div) {
+ for_each_pll_limit_range(post, &limit->post_div) {
+ for_each_pll_limit_range(div, &limit->div) {
+ tmp = fpll_find_synthesizer(prate, trate,
+ pre, div, post,
+ clk_full,
+ ssc_syn_set);
+
+ if ((trate - tmp) < (trate - best_rate)) {
+ best_rate = tmp;
+ pre_div_sel = pre;
+ div_sel = div;
+ post_div_sel = post;
+ }
+ }
+ }
+ }
+
+ if (best_rate) {
+ detected = PLL_SET_PRE_DIV_SEL(detected, pre_div_sel);
+ detected = PLL_SET_POST_DIV_SEL(detected, post_div_sel);
+ detected = PLL_SET_DIV_SEL(detected, div_sel);
+ *value = detected;
+ *rate = best_rate;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int fpll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
+{
+ struct cv1800_clk_pll *pll = hw_to_cv1800_clk_pll(hw);
+ u32 val, ssc_syn_set;
+
+ if (!fpll_is_factional_mode(pll))
+ return ipll_determine_rate(hw, req);
+
+ fpll_find_rate(pll, &pll->pll_limit[2], req->best_parent_rate,
+ &req->rate, &val, &ssc_syn_set);
+
+ return 0;
+}
+
+static bool fpll_check_mode_ctrl_restrict(unsigned long div_sel,
+ unsigned long ictrl,
+ unsigned long mode)
+{
+ unsigned long left_rest = 10 * div_sel;
+ unsigned long right_rest = 24 * div_sel;
+ unsigned long test = 184 * (1 + mode) * (1 + ictrl) / 2;
+
+ return test > left_rest && test <= right_rest;
+}
+
+static int fpll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ u32 regval;
+ u32 detected = 0, detected_ssc = 0;
+ unsigned long flags;
+ struct cv1800_clk_pll *pll = hw_to_cv1800_clk_pll(hw);
+
+ if (!fpll_is_factional_mode(pll))
+ return ipll_set_rate(hw, rate, parent_rate);
+
+ fpll_find_rate(pll, &pll->pll_limit[2], parent_rate,
+ &rate, &detected, &detected_ssc);
+ pll_get_mode_ctrl(PLL_GET_DIV_SEL(detected),
+ fpll_check_mode_ctrl_restrict,
+ pll->pll_limit, &detected);
+
+ spin_lock_irqsave(pll->common.lock, flags);
+
+ writel(detected_ssc, pll->common.base + pll->pll_syn->set);
+
+ regval = readl(pll->common.base + pll->pll_reg);
+ regval = PLL_COPY_REG(regval, detected);
+
+ writel(regval, pll->common.base + pll->pll_reg);
+
+ spin_unlock_irqrestore(pll->common.lock, flags);
+
+ cv1800_clk_wait_for_lock(&pll->common, pll->pll_status.reg,
+ BIT(pll->pll_status.shift));
+
+ return 0;
+}
+
+static u8 fpll_get_parent(struct clk_hw *hw)
+{
+ struct cv1800_clk_pll *pll = hw_to_cv1800_clk_pll(hw);
+
+ if (fpll_is_factional_mode(pll))
+ return 1;
+
+ return 0;
+}
+
+static int fpll_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct cv1800_clk_pll *pll = hw_to_cv1800_clk_pll(hw);
+
+ if (index)
+ cv1800_clk_setbit(&pll->common, &pll->pll_syn->en);
+ else
+ cv1800_clk_clearbit(&pll->common, &pll->pll_syn->en);
+
+ return 0;
+}
+
+const struct clk_ops cv1800_clk_fpll_ops = {
+ .disable = pll_disable,
+ .enable = pll_enable,
+ .is_enabled = pll_is_enable,
+
+ .recalc_rate = fpll_recalc_rate,
+ .determine_rate = fpll_determine_rate,
+ .set_rate = fpll_set_rate,
+
+ .set_parent = fpll_set_parent,
+ .get_parent = fpll_get_parent,
+};
diff --git a/drivers/clk/sophgo/clk-cv18xx-pll.h b/drivers/clk/sophgo/clk-cv18xx-pll.h
new file mode 100644
index 000000000000..7a33f3da2d64
--- /dev/null
+++ b/drivers/clk/sophgo/clk-cv18xx-pll.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Inochi Amaoto <inochiama@outlook.com>
+ */
+
+#ifndef _CLK_SOPHGO_CV1800_PLL_H_
+#define _CLK_SOPHGO_CV1800_PLL_H_
+
+#include "clk-cv18xx-common.h"
+
+struct cv1800_clk_pll_limit {
+ struct {
+ u8 min;
+ u8 max;
+ } pre_div, div, post_div, ictrl, mode;
+};
+
+#define _CV1800_PLL_LIMIT(_min, _max) \
+ { \
+ .min = _min, \
+ .max = _max, \
+ } \
+
+#define for_each_pll_limit_range(_var, _restrict) \
+ for (_var = (_restrict)->min; _var <= (_restrict)->max; _var++)
+
+struct cv1800_clk_pll_synthesizer {
+ struct cv1800_clk_regbit en;
+ struct cv1800_clk_regbit clk_half;
+ u32 ctrl;
+ u32 set;
+};
+
+#define _PLL_PRE_DIV_SEL_FIELD GENMASK(6, 0)
+#define _PLL_POST_DIV_SEL_FIELD GENMASK(14, 8)
+#define _PLL_SEL_MODE_FIELD GENMASK(16, 15)
+#define _PLL_DIV_SEL_FIELD GENMASK(23, 17)
+#define _PLL_ICTRL_FIELD GENMASK(26, 24)
+
+#define _PLL_ALL_FIELD_MASK \
+ (_PLL_PRE_DIV_SEL_FIELD | \
+ _PLL_POST_DIV_SEL_FIELD | \
+ _PLL_SEL_MODE_FIELD | \
+ _PLL_DIV_SEL_FIELD | \
+ _PLL_ICTRL_FIELD)
+
+#define PLL_COPY_REG(_dest, _src) \
+ (((_dest) & (~_PLL_ALL_FIELD_MASK)) | ((_src) & _PLL_ALL_FIELD_MASK))
+
+#define PLL_GET_PRE_DIV_SEL(_reg) \
+ FIELD_GET(_PLL_PRE_DIV_SEL_FIELD, (_reg))
+#define PLL_GET_POST_DIV_SEL(_reg) \
+ FIELD_GET(_PLL_POST_DIV_SEL_FIELD, (_reg))
+#define PLL_GET_SEL_MODE(_reg) \
+ FIELD_GET(_PLL_SEL_MODE_FIELD, (_reg))
+#define PLL_GET_DIV_SEL(_reg) \
+ FIELD_GET(_PLL_DIV_SEL_FIELD, (_reg))
+#define PLL_GET_ICTRL(_reg) \
+ FIELD_GET(_PLL_ICTRL_FIELD, (_reg))
+
+#define PLL_SET_PRE_DIV_SEL(_reg, _val) \
+ _CV1800_SET_FIELD((_reg), (_val), _PLL_PRE_DIV_SEL_FIELD)
+#define PLL_SET_POST_DIV_SEL(_reg, _val) \
+ _CV1800_SET_FIELD((_reg), (_val), _PLL_POST_DIV_SEL_FIELD)
+#define PLL_SET_SEL_MODE(_reg, _val) \
+ _CV1800_SET_FIELD((_reg), (_val), _PLL_SEL_MODE_FIELD)
+#define PLL_SET_DIV_SEL(_reg, _val) \
+ _CV1800_SET_FIELD((_reg), (_val), _PLL_DIV_SEL_FIELD)
+#define PLL_SET_ICTRL(_reg, _val) \
+ _CV1800_SET_FIELD((_reg), (_val), _PLL_ICTRL_FIELD)
+
+struct cv1800_clk_pll {
+ struct cv1800_clk_common common;
+ u32 pll_reg;
+ struct cv1800_clk_regbit pll_pwd;
+ struct cv1800_clk_regbit pll_status;
+ const struct cv1800_clk_pll_limit *pll_limit;
+ struct cv1800_clk_pll_synthesizer *pll_syn;
+};
+
+#define CV1800_INTEGRAL_PLL(_name, _parent, _pll_reg, \
+ _pll_pwd_reg, _pll_pwd_shift, \
+ _pll_status_reg, _pll_status_shift, \
+ _pll_limit, _flags) \
+ struct cv1800_clk_pll _name = { \
+ .common = CV1800_CLK_COMMON(#_name, _parent, \
+ &cv1800_clk_ipll_ops,\
+ _flags), \
+ .pll_reg = _pll_reg, \
+ .pll_pwd = CV1800_CLK_BIT(_pll_pwd_reg, \
+ _pll_pwd_shift), \
+ .pll_status = CV1800_CLK_BIT(_pll_status_reg, \
+ _pll_status_shift), \
+ .pll_limit = _pll_limit, \
+ .pll_syn = NULL, \
+ }
+
+#define CV1800_FACTIONAL_PLL(_name, _parent, _pll_reg, \
+ _pll_pwd_reg, _pll_pwd_shift, \
+ _pll_status_reg, _pll_status_shift, \
+ _pll_limit, _pll_syn, _flags) \
+ struct cv1800_clk_pll _name = { \
+ .common = CV1800_CLK_COMMON(#_name, _parent, \
+ &cv1800_clk_fpll_ops,\
+ _flags), \
+ .pll_reg = _pll_reg, \
+ .pll_pwd = CV1800_CLK_BIT(_pll_pwd_reg, \
+ _pll_pwd_shift), \
+ .pll_status = CV1800_CLK_BIT(_pll_status_reg, \
+ _pll_status_shift), \
+ .pll_limit = _pll_limit, \
+ .pll_syn = _pll_syn, \
+ }
+
+extern const struct clk_ops cv1800_clk_ipll_ops;
+extern const struct clk_ops cv1800_clk_fpll_ops;
+
+#endif // _CLK_SOPHGO_CV1800_PLL_H_
diff --git a/drivers/clk/stm32/Kconfig b/drivers/clk/stm32/Kconfig
index 3c8493a94a11..dca409d52652 100644
--- a/drivers/clk/stm32/Kconfig
+++ b/drivers/clk/stm32/Kconfig
@@ -25,5 +25,12 @@ config COMMON_CLK_STM32MP157
help
Support for stm32mp15x SoC family clocks.
+config COMMON_CLK_STM32MP257
+ bool "Clock driver for stm32mp25x clocks"
+ depends on ARM64 || COMPILE_TEST
+ default y
+ help
+ Support for stm32mp25x SoC family clocks.
+
endif
diff --git a/drivers/clk/stm32/Makefile b/drivers/clk/stm32/Makefile
index 5ced7fe3ddec..0a627164fcce 100644
--- a/drivers/clk/stm32/Makefile
+++ b/drivers/clk/stm32/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_COMMON_CLK_STM32MP135) += clk-stm32mp13.o clk-stm32-core.o reset-stm32.o
obj-$(CONFIG_COMMON_CLK_STM32MP157) += clk-stm32mp1.o reset-stm32.o
+obj-$(CONFIG_COMMON_CLK_STM32MP257) += clk-stm32mp25.o clk-stm32-core.o reset-stm32.o
diff --git a/drivers/clk/stm32/clk-stm32-core.c b/drivers/clk/stm32/clk-stm32-core.c
index 58705fcad334..1721a3ed7386 100644
--- a/drivers/clk/stm32/clk-stm32-core.c
+++ b/drivers/clk/stm32/clk-stm32-core.c
@@ -25,7 +25,6 @@ static int stm32_rcc_clock_init(struct device *dev,
{
const struct stm32_rcc_match_data *data = match->data;
struct clk_hw_onecell_data *clk_data = data->hw_clks;
- struct device_node *np = dev_of_node(dev);
struct clk_hw **hws;
int n, max_binding;
@@ -64,7 +63,7 @@ static int stm32_rcc_clock_init(struct device *dev,
hws[cfg_clock->id] = hw;
}
- return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
}
int stm32_rcc_init(struct device *dev, const struct of_device_id *match_data,
@@ -638,7 +637,7 @@ struct clk_hw *clk_stm32_mux_register(struct device *dev,
mux->lock = lock;
mux->clock_data = data->clock_data;
- err = clk_hw_register(dev, hw);
+ err = devm_clk_hw_register(dev, hw);
if (err)
return ERR_PTR(err);
@@ -659,7 +658,7 @@ struct clk_hw *clk_stm32_gate_register(struct device *dev,
gate->lock = lock;
gate->clock_data = data->clock_data;
- err = clk_hw_register(dev, hw);
+ err = devm_clk_hw_register(dev, hw);
if (err)
return ERR_PTR(err);
@@ -680,7 +679,7 @@ struct clk_hw *clk_stm32_div_register(struct device *dev,
div->lock = lock;
div->clock_data = data->clock_data;
- err = clk_hw_register(dev, hw);
+ err = devm_clk_hw_register(dev, hw);
if (err)
return ERR_PTR(err);
@@ -701,7 +700,7 @@ struct clk_hw *clk_stm32_composite_register(struct device *dev,
composite->lock = lock;
composite->clock_data = data->clock_data;
- err = clk_hw_register(dev, hw);
+ err = devm_clk_hw_register(dev, hw);
if (err)
return ERR_PTR(err);
diff --git a/drivers/clk/stm32/clk-stm32mp13.c b/drivers/clk/stm32/clk-stm32mp13.c
index d4ecb3c34a1b..bf81d7491708 100644
--- a/drivers/clk/stm32/clk-stm32mp13.c
+++ b/drivers/clk/stm32/clk-stm32mp13.c
@@ -1536,77 +1536,16 @@ static const struct of_device_id stm32mp13_match_data[] = {
};
MODULE_DEVICE_TABLE(of, stm32mp13_match_data);
-static int stm32mp1_rcc_init(struct device *dev)
-{
- void __iomem *rcc_base;
- int ret = -ENOMEM;
-
- rcc_base = of_iomap(dev_of_node(dev), 0);
- if (!rcc_base) {
- dev_err(dev, "%pOFn: unable to map resource", dev_of_node(dev));
- goto out;
- }
-
- ret = stm32_rcc_init(dev, stm32mp13_match_data, rcc_base);
-out:
- if (ret) {
- if (rcc_base)
- iounmap(rcc_base);
-
- of_node_put(dev_of_node(dev));
- }
-
- return ret;
-}
-
-static int get_clock_deps(struct device *dev)
-{
- static const char * const clock_deps_name[] = {
- "hsi", "hse", "csi", "lsi", "lse",
- };
- size_t deps_size = sizeof(struct clk *) * ARRAY_SIZE(clock_deps_name);
- struct clk **clk_deps;
- int i;
-
- clk_deps = devm_kzalloc(dev, deps_size, GFP_KERNEL);
- if (!clk_deps)
- return -ENOMEM;
-
- for (i = 0; i < ARRAY_SIZE(clock_deps_name); i++) {
- struct clk *clk = of_clk_get_by_name(dev_of_node(dev),
- clock_deps_name[i]);
-
- if (IS_ERR(clk)) {
- if (PTR_ERR(clk) != -EINVAL && PTR_ERR(clk) != -ENOENT)
- return PTR_ERR(clk);
- } else {
- /* Device gets a reference count on the clock */
- clk_deps[i] = devm_clk_get(dev, __clk_get_name(clk));
- clk_put(clk);
- }
- }
-
- return 0;
-}
-
static int stm32mp1_rcc_clocks_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- int ret = get_clock_deps(dev);
+ void __iomem *base;
- if (!ret)
- ret = stm32mp1_rcc_init(dev);
-
- return ret;
-}
-
-static void stm32mp1_rcc_clocks_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *child, *np = dev_of_node(dev);
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (WARN_ON(IS_ERR(base)))
+ return PTR_ERR(base);
- for_each_available_child_of_node(np, child)
- of_clk_del_provider(child);
+ return stm32_rcc_init(dev, stm32mp13_match_data, base);
}
static struct platform_driver stm32mp13_rcc_clocks_driver = {
@@ -1615,7 +1554,6 @@ static struct platform_driver stm32mp13_rcc_clocks_driver = {
.of_match_table = stm32mp13_match_data,
},
.probe = stm32mp1_rcc_clocks_probe,
- .remove_new = stm32mp1_rcc_clocks_remove,
};
static int __init stm32mp13_clocks_init(void)
diff --git a/drivers/clk/stm32/clk-stm32mp25.c b/drivers/clk/stm32/clk-stm32mp25.c
new file mode 100644
index 000000000000..210b75b39e50
--- /dev/null
+++ b/drivers/clk/stm32/clk-stm32mp25.c
@@ -0,0 +1,1875 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) STMicroelectronics 2023 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-stm32-core.h"
+#include "reset-stm32.h"
+#include "stm32mp25_rcc.h"
+
+#include <dt-bindings/clock/st,stm32mp25-rcc.h>
+#include <dt-bindings/reset/st,stm32mp25-rcc.h>
+
+enum {
+ HSE,
+ HSI,
+ MSI,
+ LSE,
+ LSI,
+ HSE_DIV2,
+ ICN_HS_MCU,
+ ICN_LS_MCU,
+ ICN_SDMMC,
+ ICN_DDR,
+ ICN_DISPLAY,
+ ICN_HSL,
+ ICN_NIC,
+ ICN_VID,
+ FLEXGEN_07,
+ FLEXGEN_08,
+ FLEXGEN_09,
+ FLEXGEN_10,
+ FLEXGEN_11,
+ FLEXGEN_12,
+ FLEXGEN_13,
+ FLEXGEN_14,
+ FLEXGEN_15,
+ FLEXGEN_16,
+ FLEXGEN_17,
+ FLEXGEN_18,
+ FLEXGEN_19,
+ FLEXGEN_20,
+ FLEXGEN_21,
+ FLEXGEN_22,
+ FLEXGEN_23,
+ FLEXGEN_24,
+ FLEXGEN_25,
+ FLEXGEN_26,
+ FLEXGEN_27,
+ FLEXGEN_28,
+ FLEXGEN_29,
+ FLEXGEN_30,
+ FLEXGEN_31,
+ FLEXGEN_32,
+ FLEXGEN_33,
+ FLEXGEN_34,
+ FLEXGEN_35,
+ FLEXGEN_36,
+ FLEXGEN_37,
+ FLEXGEN_38,
+ FLEXGEN_39,
+ FLEXGEN_40,
+ FLEXGEN_41,
+ FLEXGEN_42,
+ FLEXGEN_43,
+ FLEXGEN_44,
+ FLEXGEN_45,
+ FLEXGEN_46,
+ FLEXGEN_47,
+ FLEXGEN_48,
+ FLEXGEN_49,
+ FLEXGEN_50,
+ FLEXGEN_51,
+ FLEXGEN_52,
+ FLEXGEN_53,
+ FLEXGEN_54,
+ FLEXGEN_55,
+ FLEXGEN_56,
+ FLEXGEN_57,
+ FLEXGEN_58,
+ FLEXGEN_59,
+ FLEXGEN_60,
+ FLEXGEN_61,
+ FLEXGEN_62,
+ FLEXGEN_63,
+ ICN_APB1,
+ ICN_APB2,
+ ICN_APB3,
+ ICN_APB4,
+ ICN_APBDBG,
+ TIMG1,
+ TIMG2,
+ PLL3,
+ DSI_TXBYTE,
+};
+
+static const struct clk_parent_data adc12_src[] = {
+ { .index = FLEXGEN_46 },
+ { .index = ICN_LS_MCU },
+};
+
+static const struct clk_parent_data adc3_src[] = {
+ { .index = FLEXGEN_47 },
+ { .index = ICN_LS_MCU },
+ { .index = FLEXGEN_46 },
+};
+
+static const struct clk_parent_data usb2phy1_src[] = {
+ { .index = FLEXGEN_57 },
+ { .index = HSE_DIV2 },
+};
+
+static const struct clk_parent_data usb2phy2_src[] = {
+ { .index = FLEXGEN_58 },
+ { .index = HSE_DIV2 },
+};
+
+static const struct clk_parent_data usb3pciphy_src[] = {
+ { .index = FLEXGEN_34 },
+ { .index = HSE_DIV2 },
+};
+
+static struct clk_stm32_gate ck_ker_ltdc;
+
+static const struct clk_parent_data dsiblane_src[] = {
+ { .index = DSI_TXBYTE },
+ { .hw = &ck_ker_ltdc.hw },
+};
+
+static const struct clk_parent_data dsiphy_src[] = {
+ { .index = FLEXGEN_28 },
+ { .index = HSE },
+};
+
+static const struct clk_parent_data lvdsphy_src[] = {
+ { .index = FLEXGEN_32 },
+ { .index = HSE },
+};
+
+static const struct clk_parent_data dts_src[] = {
+ { .index = HSI },
+ { .index = HSE },
+ { .index = MSI },
+};
+
+static const struct clk_parent_data mco1_src[] = {
+ { .index = FLEXGEN_61 },
+};
+
+static const struct clk_parent_data mco2_src[] = {
+ { .index = FLEXGEN_62 },
+};
+
+enum enum_mux_cfg {
+ MUX_ADC12,
+ MUX_ADC3,
+ MUX_DSIBLANE,
+ MUX_DSIPHY,
+ MUX_DTS,
+ MUX_LVDSPHY,
+ MUX_MCO1,
+ MUX_MCO2,
+ MUX_USB2PHY1,
+ MUX_USB2PHY2,
+ MUX_USB3PCIEPHY,
+ MUX_NB
+};
+
+#define MUX_CFG(id, _offset, _shift, _witdh) \
+ [id] = { \
+ .offset = (_offset), \
+ .shift = (_shift), \
+ .width = (_witdh), \
+ }
+
+static const struct stm32_mux_cfg stm32mp25_muxes[MUX_NB] = {
+ MUX_CFG(MUX_ADC12, RCC_ADC12CFGR, 12, 1),
+ MUX_CFG(MUX_ADC3, RCC_ADC3CFGR, 12, 2),
+ MUX_CFG(MUX_DSIBLANE, RCC_DSICFGR, 12, 1),
+ MUX_CFG(MUX_DSIPHY, RCC_DSICFGR, 15, 1),
+ MUX_CFG(MUX_DTS, RCC_DTSCFGR, 12, 2),
+ MUX_CFG(MUX_LVDSPHY, RCC_LVDSCFGR, 15, 1),
+ MUX_CFG(MUX_MCO1, RCC_MCO1CFGR, 0, 1),
+ MUX_CFG(MUX_MCO2, RCC_MCO2CFGR, 0, 1),
+ MUX_CFG(MUX_USB2PHY1, RCC_USB2PHY1CFGR, 15, 1),
+ MUX_CFG(MUX_USB2PHY2, RCC_USB2PHY2CFGR, 15, 1),
+ MUX_CFG(MUX_USB3PCIEPHY, RCC_USB3PCIEPHYCFGR, 15, 1),
+};
+
+enum enum_gate_cfg {
+ GATE_ADC12,
+ GATE_ADC3,
+ GATE_ADF1,
+ GATE_CCI,
+ GATE_CRC,
+ GATE_CRYP1,
+ GATE_CRYP2,
+ GATE_CSI,
+ GATE_DCMIPP,
+ GATE_DSI,
+ GATE_DTS,
+ GATE_ETH1,
+ GATE_ETH1MAC,
+ GATE_ETH1RX,
+ GATE_ETH1STP,
+ GATE_ETH1TX,
+ GATE_ETH2,
+ GATE_ETH2MAC,
+ GATE_ETH2RX,
+ GATE_ETH2STP,
+ GATE_ETH2TX,
+ GATE_ETHSW,
+ GATE_ETHSWACMCFG,
+ GATE_ETHSWACMMSG,
+ GATE_ETHSWMAC,
+ GATE_ETHSWREF,
+ GATE_FDCAN,
+ GATE_GPU,
+ GATE_HASH,
+ GATE_HDP,
+ GATE_I2C1,
+ GATE_I2C2,
+ GATE_I2C3,
+ GATE_I2C4,
+ GATE_I2C5,
+ GATE_I2C6,
+ GATE_I2C7,
+ GATE_I2C8,
+ GATE_I3C1,
+ GATE_I3C2,
+ GATE_I3C3,
+ GATE_I3C4,
+ GATE_IS2M,
+ GATE_IWDG1,
+ GATE_IWDG2,
+ GATE_IWDG3,
+ GATE_IWDG4,
+ GATE_IWDG5,
+ GATE_LPTIM1,
+ GATE_LPTIM2,
+ GATE_LPTIM3,
+ GATE_LPTIM4,
+ GATE_LPTIM5,
+ GATE_LPUART1,
+ GATE_LTDC,
+ GATE_LVDS,
+ GATE_MCO1,
+ GATE_MCO2,
+ GATE_MDF1,
+ GATE_OSPIIOM,
+ GATE_PCIE,
+ GATE_PKA,
+ GATE_RNG,
+ GATE_SAES,
+ GATE_SAI1,
+ GATE_SAI2,
+ GATE_SAI3,
+ GATE_SAI4,
+ GATE_SDMMC1,
+ GATE_SDMMC2,
+ GATE_SDMMC3,
+ GATE_SERC,
+ GATE_SPDIFRX,
+ GATE_SPI1,
+ GATE_SPI2,
+ GATE_SPI3,
+ GATE_SPI4,
+ GATE_SPI5,
+ GATE_SPI6,
+ GATE_SPI7,
+ GATE_SPI8,
+ GATE_TIM1,
+ GATE_TIM10,
+ GATE_TIM11,
+ GATE_TIM12,
+ GATE_TIM13,
+ GATE_TIM14,
+ GATE_TIM15,
+ GATE_TIM16,
+ GATE_TIM17,
+ GATE_TIM2,
+ GATE_TIM20,
+ GATE_TIM3,
+ GATE_TIM4,
+ GATE_TIM5,
+ GATE_TIM6,
+ GATE_TIM7,
+ GATE_TIM8,
+ GATE_UART4,
+ GATE_UART5,
+ GATE_UART7,
+ GATE_UART8,
+ GATE_UART9,
+ GATE_USART1,
+ GATE_USART2,
+ GATE_USART3,
+ GATE_USART6,
+ GATE_USBH,
+ GATE_USB2PHY1,
+ GATE_USB2PHY2,
+ GATE_USB3DR,
+ GATE_USB3PCIEPHY,
+ GATE_USBTC,
+ GATE_VDEC,
+ GATE_VENC,
+ GATE_VREF,
+ GATE_WWDG1,
+ GATE_WWDG2,
+ GATE_NB
+};
+
+#define GATE_CFG(id, _offset, _bit_idx, _offset_clr) \
+ [id] = { \
+ .offset = (_offset), \
+ .bit_idx = (_bit_idx), \
+ .set_clr = (_offset_clr), \
+ }
+
+static const struct stm32_gate_cfg stm32mp25_gates[GATE_NB] = {
+ GATE_CFG(GATE_ADC12, RCC_ADC12CFGR, 1, 0),
+ GATE_CFG(GATE_ADC3, RCC_ADC3CFGR, 1, 0),
+ GATE_CFG(GATE_ADF1, RCC_ADF1CFGR, 1, 0),
+ GATE_CFG(GATE_CCI, RCC_CCICFGR, 1, 0),
+ GATE_CFG(GATE_CRC, RCC_CRCCFGR, 1, 0),
+ GATE_CFG(GATE_CRYP1, RCC_CRYP1CFGR, 1, 0),
+ GATE_CFG(GATE_CRYP2, RCC_CRYP2CFGR, 1, 0),
+ GATE_CFG(GATE_CSI, RCC_CSICFGR, 1, 0),
+ GATE_CFG(GATE_DCMIPP, RCC_DCMIPPCFGR, 1, 0),
+ GATE_CFG(GATE_DSI, RCC_DSICFGR, 1, 0),
+ GATE_CFG(GATE_DTS, RCC_DTSCFGR, 1, 0),
+ GATE_CFG(GATE_ETH1, RCC_ETH1CFGR, 5, 0),
+ GATE_CFG(GATE_ETH1MAC, RCC_ETH1CFGR, 1, 0),
+ GATE_CFG(GATE_ETH1RX, RCC_ETH1CFGR, 10, 0),
+ GATE_CFG(GATE_ETH1STP, RCC_ETH1CFGR, 4, 0),
+ GATE_CFG(GATE_ETH1TX, RCC_ETH1CFGR, 8, 0),
+ GATE_CFG(GATE_ETH2, RCC_ETH2CFGR, 5, 0),
+ GATE_CFG(GATE_ETH2MAC, RCC_ETH2CFGR, 1, 0),
+ GATE_CFG(GATE_ETH2RX, RCC_ETH2CFGR, 10, 0),
+ GATE_CFG(GATE_ETH2STP, RCC_ETH2CFGR, 4, 0),
+ GATE_CFG(GATE_ETH2TX, RCC_ETH2CFGR, 8, 0),
+ GATE_CFG(GATE_ETHSW, RCC_ETHSWCFGR, 5, 0),
+ GATE_CFG(GATE_ETHSWACMCFG, RCC_ETHSWACMCFGR, 1, 0),
+ GATE_CFG(GATE_ETHSWACMMSG, RCC_ETHSWACMMSGCFGR, 1, 0),
+ GATE_CFG(GATE_ETHSWMAC, RCC_ETHSWCFGR, 1, 0),
+ GATE_CFG(GATE_ETHSWREF, RCC_ETHSWCFGR, 21, 0),
+ GATE_CFG(GATE_FDCAN, RCC_FDCANCFGR, 1, 0),
+ GATE_CFG(GATE_GPU, RCC_GPUCFGR, 1, 0),
+ GATE_CFG(GATE_HASH, RCC_HASHCFGR, 1, 0),
+ GATE_CFG(GATE_HDP, RCC_HDPCFGR, 1, 0),
+ GATE_CFG(GATE_I2C1, RCC_I2C1CFGR, 1, 0),
+ GATE_CFG(GATE_I2C2, RCC_I2C2CFGR, 1, 0),
+ GATE_CFG(GATE_I2C3, RCC_I2C3CFGR, 1, 0),
+ GATE_CFG(GATE_I2C4, RCC_I2C4CFGR, 1, 0),
+ GATE_CFG(GATE_I2C5, RCC_I2C5CFGR, 1, 0),
+ GATE_CFG(GATE_I2C6, RCC_I2C6CFGR, 1, 0),
+ GATE_CFG(GATE_I2C7, RCC_I2C7CFGR, 1, 0),
+ GATE_CFG(GATE_I2C8, RCC_I2C8CFGR, 1, 0),
+ GATE_CFG(GATE_I3C1, RCC_I3C1CFGR, 1, 0),
+ GATE_CFG(GATE_I3C2, RCC_I3C2CFGR, 1, 0),
+ GATE_CFG(GATE_I3C3, RCC_I3C3CFGR, 1, 0),
+ GATE_CFG(GATE_I3C4, RCC_I3C4CFGR, 1, 0),
+ GATE_CFG(GATE_IS2M, RCC_IS2MCFGR, 1, 0),
+ GATE_CFG(GATE_IWDG1, RCC_IWDG1CFGR, 1, 0),
+ GATE_CFG(GATE_IWDG2, RCC_IWDG2CFGR, 1, 0),
+ GATE_CFG(GATE_IWDG3, RCC_IWDG3CFGR, 1, 0),
+ GATE_CFG(GATE_IWDG4, RCC_IWDG4CFGR, 1, 0),
+ GATE_CFG(GATE_IWDG5, RCC_IWDG5CFGR, 1, 0),
+ GATE_CFG(GATE_LPTIM1, RCC_LPTIM1CFGR, 1, 0),
+ GATE_CFG(GATE_LPTIM2, RCC_LPTIM2CFGR, 1, 0),
+ GATE_CFG(GATE_LPTIM3, RCC_LPTIM3CFGR, 1, 0),
+ GATE_CFG(GATE_LPTIM4, RCC_LPTIM4CFGR, 1, 0),
+ GATE_CFG(GATE_LPTIM5, RCC_LPTIM5CFGR, 1, 0),
+ GATE_CFG(GATE_LPUART1, RCC_LPUART1CFGR, 1, 0),
+ GATE_CFG(GATE_LTDC, RCC_LTDCCFGR, 1, 0),
+ GATE_CFG(GATE_LVDS, RCC_LVDSCFGR, 1, 0),
+ GATE_CFG(GATE_MCO1, RCC_MCO1CFGR, 8, 0),
+ GATE_CFG(GATE_MCO2, RCC_MCO2CFGR, 8, 0),
+ GATE_CFG(GATE_MDF1, RCC_MDF1CFGR, 1, 0),
+ GATE_CFG(GATE_OSPIIOM, RCC_OSPIIOMCFGR, 1, 0),
+ GATE_CFG(GATE_PCIE, RCC_PCIECFGR, 1, 0),
+ GATE_CFG(GATE_PKA, RCC_PKACFGR, 1, 0),
+ GATE_CFG(GATE_RNG, RCC_RNGCFGR, 1, 0),
+ GATE_CFG(GATE_SAES, RCC_SAESCFGR, 1, 0),
+ GATE_CFG(GATE_SAI1, RCC_SAI1CFGR, 1, 0),
+ GATE_CFG(GATE_SAI2, RCC_SAI2CFGR, 1, 0),
+ GATE_CFG(GATE_SAI3, RCC_SAI3CFGR, 1, 0),
+ GATE_CFG(GATE_SAI4, RCC_SAI4CFGR, 1, 0),
+ GATE_CFG(GATE_SDMMC1, RCC_SDMMC1CFGR, 1, 0),
+ GATE_CFG(GATE_SDMMC2, RCC_SDMMC2CFGR, 1, 0),
+ GATE_CFG(GATE_SDMMC3, RCC_SDMMC3CFGR, 1, 0),
+ GATE_CFG(GATE_SERC, RCC_SERCCFGR, 1, 0),
+ GATE_CFG(GATE_SPDIFRX, RCC_SPDIFRXCFGR, 1, 0),
+ GATE_CFG(GATE_SPI1, RCC_SPI1CFGR, 1, 0),
+ GATE_CFG(GATE_SPI2, RCC_SPI2CFGR, 1, 0),
+ GATE_CFG(GATE_SPI3, RCC_SPI3CFGR, 1, 0),
+ GATE_CFG(GATE_SPI4, RCC_SPI4CFGR, 1, 0),
+ GATE_CFG(GATE_SPI5, RCC_SPI5CFGR, 1, 0),
+ GATE_CFG(GATE_SPI6, RCC_SPI6CFGR, 1, 0),
+ GATE_CFG(GATE_SPI7, RCC_SPI7CFGR, 1, 0),
+ GATE_CFG(GATE_SPI8, RCC_SPI8CFGR, 1, 0),
+ GATE_CFG(GATE_TIM1, RCC_TIM1CFGR, 1, 0),
+ GATE_CFG(GATE_TIM10, RCC_TIM10CFGR, 1, 0),
+ GATE_CFG(GATE_TIM11, RCC_TIM11CFGR, 1, 0),
+ GATE_CFG(GATE_TIM12, RCC_TIM12CFGR, 1, 0),
+ GATE_CFG(GATE_TIM13, RCC_TIM13CFGR, 1, 0),
+ GATE_CFG(GATE_TIM14, RCC_TIM14CFGR, 1, 0),
+ GATE_CFG(GATE_TIM15, RCC_TIM15CFGR, 1, 0),
+ GATE_CFG(GATE_TIM16, RCC_TIM16CFGR, 1, 0),
+ GATE_CFG(GATE_TIM17, RCC_TIM17CFGR, 1, 0),
+ GATE_CFG(GATE_TIM2, RCC_TIM2CFGR, 1, 0),
+ GATE_CFG(GATE_TIM20, RCC_TIM20CFGR, 1, 0),
+ GATE_CFG(GATE_TIM3, RCC_TIM3CFGR, 1, 0),
+ GATE_CFG(GATE_TIM4, RCC_TIM4CFGR, 1, 0),
+ GATE_CFG(GATE_TIM5, RCC_TIM5CFGR, 1, 0),
+ GATE_CFG(GATE_TIM6, RCC_TIM6CFGR, 1, 0),
+ GATE_CFG(GATE_TIM7, RCC_TIM7CFGR, 1, 0),
+ GATE_CFG(GATE_TIM8, RCC_TIM8CFGR, 1, 0),
+ GATE_CFG(GATE_UART4, RCC_UART4CFGR, 1, 0),
+ GATE_CFG(GATE_UART5, RCC_UART5CFGR, 1, 0),
+ GATE_CFG(GATE_UART7, RCC_UART7CFGR, 1, 0),
+ GATE_CFG(GATE_UART8, RCC_UART8CFGR, 1, 0),
+ GATE_CFG(GATE_UART9, RCC_UART9CFGR, 1, 0),
+ GATE_CFG(GATE_USART1, RCC_USART1CFGR, 1, 0),
+ GATE_CFG(GATE_USART2, RCC_USART2CFGR, 1, 0),
+ GATE_CFG(GATE_USART3, RCC_USART3CFGR, 1, 0),
+ GATE_CFG(GATE_USART6, RCC_USART6CFGR, 1, 0),
+ GATE_CFG(GATE_USBH, RCC_USBHCFGR, 1, 0),
+ GATE_CFG(GATE_USB2PHY1, RCC_USB2PHY1CFGR, 1, 0),
+ GATE_CFG(GATE_USB2PHY2, RCC_USB2PHY2CFGR, 1, 0),
+ GATE_CFG(GATE_USB3DR, RCC_USB3DRCFGR, 1, 0),
+ GATE_CFG(GATE_USB3PCIEPHY, RCC_USB3PCIEPHYCFGR, 1, 0),
+ GATE_CFG(GATE_USBTC, RCC_USBTCCFGR, 1, 0),
+ GATE_CFG(GATE_VDEC, RCC_VDECCFGR, 1, 0),
+ GATE_CFG(GATE_VENC, RCC_VENCCFGR, 1, 0),
+ GATE_CFG(GATE_VREF, RCC_VREFCFGR, 1, 0),
+ GATE_CFG(GATE_WWDG1, RCC_WWDG1CFGR, 1, 0),
+ GATE_CFG(GATE_WWDG2, RCC_WWDG2CFGR, 1, 0),
+};
+
+#define CLK_HW_INIT_INDEX(_name, _parent, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_data = (const struct clk_parent_data[]) { \
+ { .index = _parent }, \
+ }, \
+ .num_parents = 1, \
+ .ops = _ops, \
+ })
+
+/* ADC */
+static struct clk_stm32_gate ck_icn_p_adc12 = {
+ .gate_id = GATE_ADC12,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_adc12", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_composite ck_ker_adc12 = {
+ .gate_id = GATE_ADC12,
+ .mux_id = MUX_ADC12,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_adc12", adc12_src, &clk_stm32_composite_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_adc3 = {
+ .gate_id = GATE_ADC3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_adc3", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_composite ck_ker_adc3 = {
+ .gate_id = GATE_ADC3,
+ .mux_id = MUX_ADC3,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_adc3", adc3_src, &clk_stm32_composite_ops, 0),
+};
+
+/* ADF */
+static struct clk_stm32_gate ck_icn_p_adf1 = {
+ .gate_id = GATE_ADF1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_adf1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_adf1 = {
+ .gate_id = GATE_ADF1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_adf1", FLEXGEN_42, &clk_stm32_gate_ops, 0),
+};
+
+/* DCMI */
+static struct clk_stm32_gate ck_icn_p_cci = {
+ .gate_id = GATE_CCI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_cci", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* CSI-HOST */
+static struct clk_stm32_gate ck_icn_p_csi = {
+ .gate_id = GATE_CSI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_csi", ICN_APB4, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_csi = {
+ .gate_id = GATE_CSI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_csi", FLEXGEN_29, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_csitxesc = {
+ .gate_id = GATE_CSI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_csitxesc", FLEXGEN_30, &clk_stm32_gate_ops, 0),
+};
+
+/* CSI-PHY */
+static struct clk_stm32_gate ck_ker_csiphy = {
+ .gate_id = GATE_CSI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_csiphy", FLEXGEN_31, &clk_stm32_gate_ops, 0),
+};
+
+/* DCMIPP */
+static struct clk_stm32_gate ck_icn_p_dcmipp = {
+ .gate_id = GATE_DCMIPP,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_dcmipp", ICN_APB4, &clk_stm32_gate_ops, 0),
+};
+
+/* CRC */
+static struct clk_stm32_gate ck_icn_p_crc = {
+ .gate_id = GATE_CRC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_crc", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* CRYP */
+static struct clk_stm32_gate ck_icn_p_cryp1 = {
+ .gate_id = GATE_CRYP1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_cryp1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_cryp2 = {
+ .gate_id = GATE_CRYP2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_cryp2", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* DBG & TRACE*/
+/* Trace and debug clocks are managed by SCMI */
+
+/* LTDC */
+static struct clk_stm32_gate ck_icn_p_ltdc = {
+ .gate_id = GATE_LTDC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_ltdc", ICN_APB4, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_ltdc = {
+ .gate_id = GATE_LTDC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_ltdc", FLEXGEN_27, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+/* DSI */
+static struct clk_stm32_gate ck_icn_p_dsi = {
+ .gate_id = GATE_DSI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_dsi", ICN_APB4, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_composite clk_lanebyte = {
+ .gate_id = GATE_DSI,
+ .mux_id = MUX_DSIBLANE,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("clk_lanebyte", dsiblane_src,
+ &clk_stm32_composite_ops, 0),
+};
+
+/* LVDS */
+static struct clk_stm32_gate ck_icn_p_lvds = {
+ .gate_id = GATE_LVDS,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lvds", ICN_APB4, &clk_stm32_gate_ops, 0),
+};
+
+/* DSI PHY */
+static struct clk_stm32_composite clk_phy_dsi = {
+ .gate_id = GATE_DSI,
+ .mux_id = MUX_DSIPHY,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("clk_phy_dsi", dsiphy_src,
+ &clk_stm32_composite_ops, 0),
+};
+
+/* LVDS PHY */
+static struct clk_stm32_composite ck_ker_lvdsphy = {
+ .gate_id = GATE_LVDS,
+ .mux_id = MUX_LVDSPHY,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_lvdsphy", lvdsphy_src,
+ &clk_stm32_composite_ops, 0),
+};
+
+/* DTS */
+static struct clk_stm32_composite ck_ker_dts = {
+ .gate_id = GATE_DTS,
+ .mux_id = MUX_DTS,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_dts", dts_src,
+ &clk_stm32_composite_ops, 0),
+};
+
+/* ETHERNET */
+static struct clk_stm32_gate ck_icn_p_eth1 = {
+ .gate_id = GATE_ETH1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_eth1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1stp = {
+ .gate_id = GATE_ETH1STP,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1stp", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1 = {
+ .gate_id = GATE_ETH1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1", FLEXGEN_54, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1ptp = {
+ .gate_id = GATE_ETH1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1ptp", FLEXGEN_56, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1mac = {
+ .gate_id = GATE_ETH1MAC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1mac", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1tx = {
+ .gate_id = GATE_ETH1TX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1tx", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1rx = {
+ .gate_id = GATE_ETH1RX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1rx", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_eth2 = {
+ .gate_id = GATE_ETH2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_eth2", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2stp = {
+ .gate_id = GATE_ETH2STP,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2stp", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2 = {
+ .gate_id = GATE_ETH2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2", FLEXGEN_55, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2ptp = {
+ .gate_id = GATE_ETH2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2ptp", FLEXGEN_56, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2mac = {
+ .gate_id = GATE_ETH2MAC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2mac", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2tx = {
+ .gate_id = GATE_ETH2TX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2tx", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2rx = {
+ .gate_id = GATE_ETH2RX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2rx", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_ethsw = {
+ .gate_id = GATE_ETHSWMAC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_ethsw", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_ethsw = {
+ .gate_id = GATE_ETHSW,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_ethsw", FLEXGEN_54, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_ethswref = {
+ .gate_id = GATE_ETHSWREF,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_ethswref", FLEXGEN_60, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_ethsw_acm_cfg = {
+ .gate_id = GATE_ETHSWACMCFG,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_ethsw_acm_cfg", ICN_LS_MCU,
+ &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_ethsw_acm_msg = {
+ .gate_id = GATE_ETHSWACMMSG,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_ethsw_acm_msg", ICN_LS_MCU,
+ &clk_stm32_gate_ops, 0),
+};
+
+/* FDCAN */
+static struct clk_stm32_gate ck_icn_p_fdcan = {
+ .gate_id = GATE_FDCAN,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_fdcan", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_fdcan = {
+ .gate_id = GATE_FDCAN,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_fdcan", FLEXGEN_26, &clk_stm32_gate_ops, 0),
+};
+
+/* GPU */
+static struct clk_stm32_gate ck_icn_m_gpu = {
+ .gate_id = GATE_GPU,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_gpu", FLEXGEN_59, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_gpu = {
+ .gate_id = GATE_GPU,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_gpu", PLL3, &clk_stm32_gate_ops, 0),
+};
+
+/* HASH */
+static struct clk_stm32_gate ck_icn_p_hash = {
+ .gate_id = GATE_HASH,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_hash", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* HDP */
+static struct clk_stm32_gate ck_icn_p_hdp = {
+ .gate_id = GATE_HDP,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_hdp", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+/* I2C */
+static struct clk_stm32_gate ck_icn_p_i2c8 = {
+ .gate_id = GATE_I2C8,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c8", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i2c1 = {
+ .gate_id = GATE_I2C1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c1", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i2c2 = {
+ .gate_id = GATE_I2C2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i2c3 = {
+ .gate_id = GATE_I2C3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c3", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i2c4 = {
+ .gate_id = GATE_I2C4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c4", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i2c5 = {
+ .gate_id = GATE_I2C5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c5", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i2c6 = {
+ .gate_id = GATE_I2C6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c6", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i2c7 = {
+ .gate_id = GATE_I2C7,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c7", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i2c1 = {
+ .gate_id = GATE_I2C1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c1", FLEXGEN_12, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i2c2 = {
+ .gate_id = GATE_I2C2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c2", FLEXGEN_12, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i2c3 = {
+ .gate_id = GATE_I2C3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c3", FLEXGEN_13, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i2c5 = {
+ .gate_id = GATE_I2C5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c5", FLEXGEN_13, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i2c4 = {
+ .gate_id = GATE_I2C4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c4", FLEXGEN_14, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i2c6 = {
+ .gate_id = GATE_I2C6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c6", FLEXGEN_14, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i2c7 = {
+ .gate_id = GATE_I2C7,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c7", FLEXGEN_15, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i2c8 = {
+ .gate_id = GATE_I2C8,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c8", FLEXGEN_38, &clk_stm32_gate_ops, 0),
+};
+
+/* I3C */
+static struct clk_stm32_gate ck_icn_p_i3c1 = {
+ .gate_id = GATE_I3C1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i3c1", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i3c2 = {
+ .gate_id = GATE_I3C2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i3c2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i3c3 = {
+ .gate_id = GATE_I3C3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i3c3", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i3c4 = {
+ .gate_id = GATE_I3C4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i3c4", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i3c1 = {
+ .gate_id = GATE_I3C1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i3c1", FLEXGEN_12, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i3c2 = {
+ .gate_id = GATE_I3C2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i3c2", FLEXGEN_12, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i3c3 = {
+ .gate_id = GATE_I3C3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i3c3", FLEXGEN_13, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i3c4 = {
+ .gate_id = GATE_I3C4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i3c4", FLEXGEN_36, &clk_stm32_gate_ops, 0),
+};
+
+/* I2S */
+static struct clk_stm32_gate ck_icn_p_is2m = {
+ .gate_id = GATE_IS2M,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_is2m", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+/* IWDG */
+static struct clk_stm32_gate ck_icn_p_iwdg2 = {
+ .gate_id = GATE_IWDG2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_iwdg2", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_iwdg3 = {
+ .gate_id = GATE_IWDG3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_iwdg3", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_iwdg4 = {
+ .gate_id = GATE_IWDG4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_iwdg4", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_iwdg5 = {
+ .gate_id = GATE_IWDG5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_iwdg5", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* LPTIM */
+static struct clk_stm32_gate ck_icn_p_lptim1 = {
+ .gate_id = GATE_LPTIM1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim1", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_lptim2 = {
+ .gate_id = GATE_LPTIM2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_lptim3 = {
+ .gate_id = GATE_LPTIM3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim3", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_lptim4 = {
+ .gate_id = GATE_LPTIM4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim4", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_lptim5 = {
+ .gate_id = GATE_LPTIM5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim5", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lptim1 = {
+ .gate_id = GATE_LPTIM1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim1", FLEXGEN_07, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lptim2 = {
+ .gate_id = GATE_LPTIM2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim2", FLEXGEN_07, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lptim3 = {
+ .gate_id = GATE_LPTIM3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim3", FLEXGEN_40, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lptim4 = {
+ .gate_id = GATE_LPTIM4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim4", FLEXGEN_41, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lptim5 = {
+ .gate_id = GATE_LPTIM5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim5", FLEXGEN_41, &clk_stm32_gate_ops, 0),
+};
+
+/* LPUART */
+static struct clk_stm32_gate ck_icn_p_lpuart1 = {
+ .gate_id = GATE_LPUART1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lpuart1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lpuart1 = {
+ .gate_id = GATE_LPUART1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lpuart1", FLEXGEN_39, &clk_stm32_gate_ops, 0),
+};
+
+/* MCO1 & MCO2 */
+static struct clk_stm32_composite ck_mco1 = {
+ .gate_id = GATE_MCO1,
+ .mux_id = MUX_MCO1,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_mco1", mco1_src, &clk_stm32_composite_ops, 0),
+};
+
+static struct clk_stm32_composite ck_mco2 = {
+ .gate_id = GATE_MCO2,
+ .mux_id = MUX_MCO2,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_mco2", mco2_src, &clk_stm32_composite_ops, 0),
+};
+
+/* MDF */
+static struct clk_stm32_gate ck_icn_p_mdf1 = {
+ .gate_id = GATE_MDF1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_mdf1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_mdf1 = {
+ .gate_id = GATE_MDF1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_mdf1", FLEXGEN_23, &clk_stm32_gate_ops, 0),
+};
+
+/* OSPI */
+static struct clk_stm32_gate ck_icn_p_ospiiom = {
+ .gate_id = GATE_OSPIIOM,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_ospiiom", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* PCIE */
+static struct clk_stm32_gate ck_icn_p_pcie = {
+ .gate_id = GATE_PCIE,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_pcie", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* SAI */
+static struct clk_stm32_gate ck_icn_p_sai1 = {
+ .gate_id = GATE_SAI1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_sai1", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_sai2 = {
+ .gate_id = GATE_SAI2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_sai2", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_sai3 = {
+ .gate_id = GATE_SAI3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_sai3", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_sai4 = {
+ .gate_id = GATE_SAI4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_sai4", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_sai1 = {
+ .gate_id = GATE_SAI1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sai1", FLEXGEN_23, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_sai2 = {
+ .gate_id = GATE_SAI2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sai2", FLEXGEN_24, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_sai3 = {
+ .gate_id = GATE_SAI3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sai3", FLEXGEN_25, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_sai4 = {
+ .gate_id = GATE_SAI4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sai4", FLEXGEN_25, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+/* SDMMC */
+static struct clk_stm32_gate ck_icn_m_sdmmc1 = {
+ .gate_id = GATE_SDMMC1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_sdmmc1", ICN_SDMMC, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_m_sdmmc2 = {
+ .gate_id = GATE_SDMMC2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_sdmmc2", ICN_SDMMC, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_m_sdmmc3 = {
+ .gate_id = GATE_SDMMC3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_sdmmc3", ICN_SDMMC, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_sdmmc1 = {
+ .gate_id = GATE_SDMMC1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sdmmc1", FLEXGEN_51, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_sdmmc2 = {
+ .gate_id = GATE_SDMMC2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sdmmc2", FLEXGEN_52, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_sdmmc3 = {
+ .gate_id = GATE_SDMMC3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sdmmc3", FLEXGEN_53, &clk_stm32_gate_ops, 0),
+};
+
+/* SPDIF */
+static struct clk_stm32_gate ck_icn_p_spdifrx = {
+ .gate_id = GATE_SPDIFRX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spdifrx", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_spdifrx = {
+ .gate_id = GATE_SPDIFRX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spdifrx", FLEXGEN_11, &clk_stm32_gate_ops, 0),
+};
+
+/* SPI */
+static struct clk_stm32_gate ck_icn_p_spi1 = {
+ .gate_id = GATE_SPI1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi1", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi2 = {
+ .gate_id = GATE_SPI2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi3 = {
+ .gate_id = GATE_SPI3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi3", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi4 = {
+ .gate_id = GATE_SPI4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi4", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi5 = {
+ .gate_id = GATE_SPI5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi5", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi6 = {
+ .gate_id = GATE_SPI6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi6", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi7 = {
+ .gate_id = GATE_SPI7,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi7", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi8 = {
+ .gate_id = GATE_SPI8,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi8", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_spi1 = {
+ .gate_id = GATE_SPI1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi1", FLEXGEN_16, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_spi2 = {
+ .gate_id = GATE_SPI2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi2", FLEXGEN_10, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_spi3 = {
+ .gate_id = GATE_SPI3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi3", FLEXGEN_10, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_spi4 = {
+ .gate_id = GATE_SPI4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi4", FLEXGEN_17, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_spi5 = {
+ .gate_id = GATE_SPI5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi5", FLEXGEN_17, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_spi6 = {
+ .gate_id = GATE_SPI6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi6", FLEXGEN_18, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_spi7 = {
+ .gate_id = GATE_SPI7,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi7", FLEXGEN_18, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_spi8 = {
+ .gate_id = GATE_SPI8,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi8", FLEXGEN_37, &clk_stm32_gate_ops, 0),
+};
+
+/* Timers */
+static struct clk_stm32_gate ck_icn_p_tim2 = {
+ .gate_id = GATE_TIM2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim3 = {
+ .gate_id = GATE_TIM3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim3", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim4 = {
+ .gate_id = GATE_TIM4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim4", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim5 = {
+ .gate_id = GATE_TIM5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim5", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim6 = {
+ .gate_id = GATE_TIM6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim6", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim7 = {
+ .gate_id = GATE_TIM7,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim7", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim10 = {
+ .gate_id = GATE_TIM10,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim10", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim11 = {
+ .gate_id = GATE_TIM11,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim11", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim12 = {
+ .gate_id = GATE_TIM12,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim12", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim13 = {
+ .gate_id = GATE_TIM13,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim13", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim14 = {
+ .gate_id = GATE_TIM14,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim14", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim1 = {
+ .gate_id = GATE_TIM1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim1", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim8 = {
+ .gate_id = GATE_TIM8,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim8", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim15 = {
+ .gate_id = GATE_TIM15,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim15", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim16 = {
+ .gate_id = GATE_TIM16,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim16", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim17 = {
+ .gate_id = GATE_TIM17,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim17", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim20 = {
+ .gate_id = GATE_TIM20,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim20", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim2 = {
+ .gate_id = GATE_TIM2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim2", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim3 = {
+ .gate_id = GATE_TIM3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim3", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim4 = {
+ .gate_id = GATE_TIM4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim4", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim5 = {
+ .gate_id = GATE_TIM5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim5", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim6 = {
+ .gate_id = GATE_TIM6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim6", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim7 = {
+ .gate_id = GATE_TIM7,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim7", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim10 = {
+ .gate_id = GATE_TIM10,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim10", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim11 = {
+ .gate_id = GATE_TIM11,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim11", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim12 = {
+ .gate_id = GATE_TIM12,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim12", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim13 = {
+ .gate_id = GATE_TIM13,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim13", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim14 = {
+ .gate_id = GATE_TIM14,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim14", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim1 = {
+ .gate_id = GATE_TIM1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim1", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim8 = {
+ .gate_id = GATE_TIM8,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim8", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim15 = {
+ .gate_id = GATE_TIM15,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim15", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim16 = {
+ .gate_id = GATE_TIM16,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim16", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim17 = {
+ .gate_id = GATE_TIM17,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim17", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim20 = {
+ .gate_id = GATE_TIM20,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim20", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+/* UART/USART */
+static struct clk_stm32_gate ck_icn_p_usart2 = {
+ .gate_id = GATE_USART2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usart2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_usart3 = {
+ .gate_id = GATE_USART3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usart3", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_uart4 = {
+ .gate_id = GATE_UART4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_uart4", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_uart5 = {
+ .gate_id = GATE_UART5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_uart5", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_usart1 = {
+ .gate_id = GATE_USART1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usart1", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_usart6 = {
+ .gate_id = GATE_USART6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usart6", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_uart7 = {
+ .gate_id = GATE_UART7,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_uart7", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_uart8 = {
+ .gate_id = GATE_UART8,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_uart8", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_uart9 = {
+ .gate_id = GATE_UART9,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_uart9", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_usart2 = {
+ .gate_id = GATE_USART2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_usart2", FLEXGEN_08, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_uart4 = {
+ .gate_id = GATE_UART4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_uart4", FLEXGEN_08, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_usart3 = {
+ .gate_id = GATE_USART3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_usart3", FLEXGEN_09, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_uart5 = {
+ .gate_id = GATE_UART5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_uart5", FLEXGEN_09, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_usart1 = {
+ .gate_id = GATE_USART1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_usart1", FLEXGEN_19, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_usart6 = {
+ .gate_id = GATE_USART6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_usart6", FLEXGEN_20, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_uart7 = {
+ .gate_id = GATE_UART7,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_uart7", FLEXGEN_21, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_uart8 = {
+ .gate_id = GATE_UART8,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_uart8", FLEXGEN_21, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_uart9 = {
+ .gate_id = GATE_UART9,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_uart9", FLEXGEN_22, &clk_stm32_gate_ops, 0),
+};
+
+/* USB2PHY1 */
+static struct clk_stm32_composite ck_ker_usb2phy1 = {
+ .gate_id = GATE_USB2PHY1,
+ .mux_id = MUX_USB2PHY1,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_usb2phy1", usb2phy1_src,
+ &clk_stm32_composite_ops, 0),
+};
+
+/* USB2H */
+static struct clk_stm32_gate ck_icn_m_usb2ehci = {
+ .gate_id = GATE_USBH,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_usb2ehci", ICN_HSL, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_m_usb2ohci = {
+ .gate_id = GATE_USBH,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_usb2ohci", ICN_HSL, &clk_stm32_gate_ops, 0),
+};
+
+/* USB2PHY2 */
+static struct clk_stm32_composite ck_ker_usb2phy2_en = {
+ .gate_id = GATE_USB2PHY2,
+ .mux_id = MUX_USB2PHY2,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_usb2phy2_en", usb2phy2_src,
+ &clk_stm32_composite_ops, 0),
+};
+
+/* USB3 PCIe COMBOPHY */
+static struct clk_stm32_gate ck_icn_p_usb3pciephy = {
+ .gate_id = GATE_USB3PCIEPHY,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usb3pciephy", ICN_APB4, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_composite ck_ker_usb3pciephy = {
+ .gate_id = GATE_USB3PCIEPHY,
+ .mux_id = MUX_USB3PCIEPHY,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_usb3pciephy", usb3pciphy_src,
+ &clk_stm32_composite_ops, 0),
+};
+
+/* USB3 DRD */
+static struct clk_stm32_gate ck_icn_m_usb3dr = {
+ .gate_id = GATE_USB3DR,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_usb3dr", ICN_HSL, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_usb2phy2 = {
+ .gate_id = GATE_USB3DR,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_usb2phy2", FLEXGEN_58, &clk_stm32_gate_ops, 0),
+};
+
+/* USBTC */
+static struct clk_stm32_gate ck_icn_p_usbtc = {
+ .gate_id = GATE_USBTC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usbtc", ICN_APB4, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_usbtc = {
+ .gate_id = GATE_USBTC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_usbtc", FLEXGEN_35, &clk_stm32_gate_ops, 0),
+};
+
+/* VDEC / VENC */
+static struct clk_stm32_gate ck_icn_p_vdec = {
+ .gate_id = GATE_VDEC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_vdec", ICN_APB4, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_venc = {
+ .gate_id = GATE_VENC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_venc", ICN_APB4, &clk_stm32_gate_ops, 0),
+};
+
+/* VREF */
+static struct clk_stm32_gate ck_icn_p_vref = {
+ .gate_id = GATE_VREF,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_vref", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+/* WWDG */
+static struct clk_stm32_gate ck_icn_p_wwdg1 = {
+ .gate_id = GATE_WWDG1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_wwdg1", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_wwdg2 = {
+ .gate_id = GATE_WWDG2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_wwdg2", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+#define SECF_NONE -1
+
+static const struct clock_config stm32mp25_clock_cfg[] = {
+ STM32_GATE_CFG(CK_BUS_ETH1, ck_icn_p_eth1, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_ETH2, ck_icn_p_eth2, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_PCIE, ck_icn_p_pcie, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_ETHSW, ck_icn_p_ethsw, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_ADC12, ck_icn_p_adc12, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_ADC3, ck_icn_p_adc3, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_CCI, ck_icn_p_cci, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_CRC, ck_icn_p_crc, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_MDF1, ck_icn_p_mdf1, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_OSPIIOM, ck_icn_p_ospiiom, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_HASH, ck_icn_p_hash, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_CRYP1, ck_icn_p_cryp1, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_CRYP2, ck_icn_p_cryp2, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_ADF1, ck_icn_p_adf1, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_SPI8, ck_icn_p_spi8, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_LPUART1, ck_icn_p_lpuart1, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_I2C8, ck_icn_p_i2c8, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_LPTIM3, ck_icn_p_lptim3, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_LPTIM4, ck_icn_p_lptim4, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_LPTIM5, ck_icn_p_lptim5, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_IWDG5, ck_icn_p_iwdg5, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_WWDG2, ck_icn_p_wwdg2, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_I3C4, ck_icn_p_i3c4, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_SDMMC1, ck_icn_m_sdmmc1, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_SDMMC2, ck_icn_m_sdmmc2, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_SDMMC3, ck_icn_m_sdmmc3, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_USB2OHCI, ck_icn_m_usb2ohci, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_USB2EHCI, ck_icn_m_usb2ehci, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_USB3DR, ck_icn_m_usb3dr, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_TIM2, ck_icn_p_tim2, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_TIM3, ck_icn_p_tim3, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_TIM4, ck_icn_p_tim4, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_TIM5, ck_icn_p_tim5, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_TIM6, ck_icn_p_tim6, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_TIM7, ck_icn_p_tim7, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_TIM10, ck_icn_p_tim10, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_TIM11, ck_icn_p_tim11, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_TIM12, ck_icn_p_tim12, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_TIM13, ck_icn_p_tim13, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_TIM14, ck_icn_p_tim14, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_LPTIM1, ck_icn_p_lptim1, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_LPTIM2, ck_icn_p_lptim2, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_SPI2, ck_icn_p_spi2, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_SPI3, ck_icn_p_spi3, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_SPDIFRX, ck_icn_p_spdifrx, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_USART2, ck_icn_p_usart2, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_USART3, ck_icn_p_usart3, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_UART4, ck_icn_p_uart4, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_UART5, ck_icn_p_uart5, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_I2C1, ck_icn_p_i2c1, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_I2C2, ck_icn_p_i2c2, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_I2C3, ck_icn_p_i2c3, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_I2C4, ck_icn_p_i2c4, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_I2C5, ck_icn_p_i2c5, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_I2C6, ck_icn_p_i2c6, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_I2C7, ck_icn_p_i2c7, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_I3C1, ck_icn_p_i3c1, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_I3C2, ck_icn_p_i3c2, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_I3C3, ck_icn_p_i3c3, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_TIM1, ck_icn_p_tim1, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_TIM8, ck_icn_p_tim8, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_TIM15, ck_icn_p_tim15, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_TIM16, ck_icn_p_tim16, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_TIM17, ck_icn_p_tim17, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_TIM20, ck_icn_p_tim20, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_SAI1, ck_icn_p_sai1, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_SAI2, ck_icn_p_sai2, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_SAI3, ck_icn_p_sai3, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_SAI4, ck_icn_p_sai4, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_USART1, ck_icn_p_usart1, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_USART6, ck_icn_p_usart6, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_UART7, ck_icn_p_uart7, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_UART8, ck_icn_p_uart8, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_UART9, ck_icn_p_uart9, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_FDCAN, ck_icn_p_fdcan, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_SPI1, ck_icn_p_spi1, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_SPI4, ck_icn_p_spi4, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_SPI5, ck_icn_p_spi5, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_SPI6, ck_icn_p_spi6, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_SPI7, ck_icn_p_spi7, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_IWDG2, ck_icn_p_iwdg2, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_IWDG3, ck_icn_p_iwdg3, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_IWDG4, ck_icn_p_iwdg4, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_WWDG1, ck_icn_p_wwdg1, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_VREF, ck_icn_p_vref, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_HDP, ck_icn_p_hdp, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_IS2M, ck_icn_p_is2m, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_DSI, ck_icn_p_dsi, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_LTDC, ck_icn_p_ltdc, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_CSI, ck_icn_p_csi, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_DCMIPP, ck_icn_p_dcmipp, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_LVDS, ck_icn_p_lvds, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_USBTC, ck_icn_p_usbtc, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_USB3PCIEPHY, ck_icn_p_usb3pciephy, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_VDEC, ck_icn_p_vdec, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_VENC, ck_icn_p_venc, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_TIM2, ck_ker_tim2, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_TIM3, ck_ker_tim3, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_TIM4, ck_ker_tim4, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_TIM5, ck_ker_tim5, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_TIM6, ck_ker_tim6, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_TIM7, ck_ker_tim7, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_TIM10, ck_ker_tim10, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_TIM11, ck_ker_tim11, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_TIM12, ck_ker_tim12, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_TIM13, ck_ker_tim13, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_TIM14, ck_ker_tim14, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_TIM1, ck_ker_tim1, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_TIM8, ck_ker_tim8, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_TIM15, ck_ker_tim15, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_TIM16, ck_ker_tim16, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_TIM17, ck_ker_tim17, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_TIM20, ck_ker_tim20, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_LPTIM1, ck_ker_lptim1, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_LPTIM2, ck_ker_lptim2, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_USART2, ck_ker_usart2, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_UART4, ck_ker_uart4, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_USART3, ck_ker_usart3, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_UART5, ck_ker_uart5, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_SPI2, ck_ker_spi2, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_SPI3, ck_ker_spi3, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_SPDIFRX, ck_ker_spdifrx, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_I2C1, ck_ker_i2c1, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_I2C2, ck_ker_i2c2, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_I3C1, ck_ker_i3c1, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_I3C2, ck_ker_i3c2, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_I2C3, ck_ker_i2c3, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_I2C5, ck_ker_i2c5, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_I3C3, ck_ker_i3c3, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_I2C4, ck_ker_i2c4, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_I2C6, ck_ker_i2c6, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_I2C7, ck_ker_i2c7, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_SPI1, ck_ker_spi1, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_SPI4, ck_ker_spi4, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_SPI5, ck_ker_spi5, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_SPI6, ck_ker_spi6, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_SPI7, ck_ker_spi7, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_USART1, ck_ker_usart1, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_USART6, ck_ker_usart6, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_UART7, ck_ker_uart7, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_UART8, ck_ker_uart8, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_UART9, ck_ker_uart9, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_MDF1, ck_ker_mdf1, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_SAI1, ck_ker_sai1, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_SAI2, ck_ker_sai2, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_SAI3, ck_ker_sai3, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_SAI4, ck_ker_sai4, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_FDCAN, ck_ker_fdcan, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_CSI, ck_ker_csi, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_CSITXESC, ck_ker_csitxesc, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_CSIPHY, ck_ker_csiphy, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_USBTC, ck_ker_usbtc, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_I3C4, ck_ker_i3c4, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_SPI8, ck_ker_spi8, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_I2C8, ck_ker_i2c8, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_LPUART1, ck_ker_lpuart1, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_LPTIM3, ck_ker_lptim3, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_LPTIM4, ck_ker_lptim4, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_LPTIM5, ck_ker_lptim5, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_ADF1, ck_ker_adf1, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_SDMMC1, ck_ker_sdmmc1, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_SDMMC2, ck_ker_sdmmc2, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_SDMMC3, ck_ker_sdmmc3, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_ETH1, ck_ker_eth1, SECF_NONE),
+ STM32_GATE_CFG(CK_ETH1_STP, ck_ker_eth1stp, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_ETHSW, ck_ker_ethsw, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_ETH2, ck_ker_eth2, SECF_NONE),
+ STM32_GATE_CFG(CK_ETH2_STP, ck_ker_eth2stp, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_ETH1PTP, ck_ker_eth1ptp, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_ETH2PTP, ck_ker_eth2ptp, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_GPU, ck_icn_m_gpu, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_GPU, ck_ker_gpu, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_ETHSWREF, ck_ker_ethswref, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_ETHSWACMCFG, ck_icn_p_ethsw_acm_cfg, SECF_NONE),
+ STM32_GATE_CFG(CK_BUS_ETHSWACMMSG, ck_icn_p_ethsw_acm_msg, SECF_NONE),
+ STM32_GATE_CFG(CK_ETH1_MAC, ck_ker_eth1mac, SECF_NONE),
+ STM32_GATE_CFG(CK_ETH1_TX, ck_ker_eth1tx, SECF_NONE),
+ STM32_GATE_CFG(CK_ETH1_RX, ck_ker_eth1rx, SECF_NONE),
+ STM32_GATE_CFG(CK_ETH2_MAC, ck_ker_eth2mac, SECF_NONE),
+ STM32_GATE_CFG(CK_ETH2_TX, ck_ker_eth2tx, SECF_NONE),
+ STM32_GATE_CFG(CK_ETH2_RX, ck_ker_eth2rx, SECF_NONE),
+ STM32_COMPOSITE_CFG(CK_MCO1, ck_mco1, SECF_NONE),
+ STM32_COMPOSITE_CFG(CK_MCO2, ck_mco2, SECF_NONE),
+ STM32_COMPOSITE_CFG(CK_KER_ADC12, ck_ker_adc12, SECF_NONE),
+ STM32_COMPOSITE_CFG(CK_KER_ADC3, ck_ker_adc3, SECF_NONE),
+ STM32_COMPOSITE_CFG(CK_KER_USB2PHY1, ck_ker_usb2phy1, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_USB2PHY2, ck_ker_usb2phy2, SECF_NONE),
+ STM32_COMPOSITE_CFG(CK_KER_USB2PHY2EN, ck_ker_usb2phy2_en, SECF_NONE),
+ STM32_COMPOSITE_CFG(CK_KER_USB3PCIEPHY, ck_ker_usb3pciephy, SECF_NONE),
+ STM32_COMPOSITE_CFG(CK_KER_DSIBLANE, clk_lanebyte, SECF_NONE),
+ STM32_COMPOSITE_CFG(CK_KER_DSIPHY, clk_phy_dsi, SECF_NONE),
+ STM32_COMPOSITE_CFG(CK_KER_LVDSPHY, ck_ker_lvdsphy, SECF_NONE),
+ STM32_COMPOSITE_CFG(CK_KER_DTS, ck_ker_dts, SECF_NONE),
+ STM32_GATE_CFG(CK_KER_LTDC, ck_ker_ltdc, SECF_NONE),
+};
+
+#define RESET_MP25(id, _offset, _bit_idx, _set_clr) \
+ [id] = &(struct stm32_reset_cfg){ \
+ .offset = (_offset), \
+ .bit_idx = (_bit_idx), \
+ .set_clr = (_set_clr), \
+ }
+
+static const struct stm32_reset_cfg *stm32mp25_reset_cfg[STM32MP25_LAST_RESET] = {
+ RESET_MP25(TIM1_R, RCC_TIM1CFGR, 0, 0),
+ RESET_MP25(TIM2_R, RCC_TIM2CFGR, 0, 0),
+ RESET_MP25(TIM3_R, RCC_TIM3CFGR, 0, 0),
+ RESET_MP25(TIM4_R, RCC_TIM4CFGR, 0, 0),
+ RESET_MP25(TIM5_R, RCC_TIM5CFGR, 0, 0),
+ RESET_MP25(TIM6_R, RCC_TIM6CFGR, 0, 0),
+ RESET_MP25(TIM7_R, RCC_TIM7CFGR, 0, 0),
+ RESET_MP25(TIM8_R, RCC_TIM8CFGR, 0, 0),
+ RESET_MP25(TIM10_R, RCC_TIM10CFGR, 0, 0),
+ RESET_MP25(TIM11_R, RCC_TIM11CFGR, 0, 0),
+ RESET_MP25(TIM12_R, RCC_TIM12CFGR, 0, 0),
+ RESET_MP25(TIM13_R, RCC_TIM13CFGR, 0, 0),
+ RESET_MP25(TIM14_R, RCC_TIM14CFGR, 0, 0),
+ RESET_MP25(TIM15_R, RCC_TIM15CFGR, 0, 0),
+ RESET_MP25(TIM16_R, RCC_TIM16CFGR, 0, 0),
+ RESET_MP25(TIM17_R, RCC_TIM17CFGR, 0, 0),
+ RESET_MP25(TIM20_R, RCC_TIM20CFGR, 0, 0),
+ RESET_MP25(LPTIM1_R, RCC_LPTIM1CFGR, 0, 0),
+ RESET_MP25(LPTIM2_R, RCC_LPTIM2CFGR, 0, 0),
+ RESET_MP25(LPTIM3_R, RCC_LPTIM3CFGR, 0, 0),
+ RESET_MP25(LPTIM4_R, RCC_LPTIM4CFGR, 0, 0),
+ RESET_MP25(LPTIM5_R, RCC_LPTIM5CFGR, 0, 0),
+ RESET_MP25(SPI1_R, RCC_SPI1CFGR, 0, 0),
+ RESET_MP25(SPI2_R, RCC_SPI2CFGR, 0, 0),
+ RESET_MP25(SPI3_R, RCC_SPI3CFGR, 0, 0),
+ RESET_MP25(SPI4_R, RCC_SPI4CFGR, 0, 0),
+ RESET_MP25(SPI5_R, RCC_SPI5CFGR, 0, 0),
+ RESET_MP25(SPI6_R, RCC_SPI6CFGR, 0, 0),
+ RESET_MP25(SPI7_R, RCC_SPI7CFGR, 0, 0),
+ RESET_MP25(SPI8_R, RCC_SPI8CFGR, 0, 0),
+ RESET_MP25(SPDIFRX_R, RCC_SPDIFRXCFGR, 0, 0),
+ RESET_MP25(USART1_R, RCC_USART1CFGR, 0, 0),
+ RESET_MP25(USART2_R, RCC_USART2CFGR, 0, 0),
+ RESET_MP25(USART3_R, RCC_USART3CFGR, 0, 0),
+ RESET_MP25(UART4_R, RCC_UART4CFGR, 0, 0),
+ RESET_MP25(UART5_R, RCC_UART5CFGR, 0, 0),
+ RESET_MP25(USART6_R, RCC_USART6CFGR, 0, 0),
+ RESET_MP25(UART7_R, RCC_UART7CFGR, 0, 0),
+ RESET_MP25(UART8_R, RCC_UART8CFGR, 0, 0),
+ RESET_MP25(UART9_R, RCC_UART9CFGR, 0, 0),
+ RESET_MP25(LPUART1_R, RCC_LPUART1CFGR, 0, 0),
+ RESET_MP25(IS2M_R, RCC_IS2MCFGR, 0, 0),
+ RESET_MP25(I2C1_R, RCC_I2C1CFGR, 0, 0),
+ RESET_MP25(I2C2_R, RCC_I2C2CFGR, 0, 0),
+ RESET_MP25(I2C3_R, RCC_I2C3CFGR, 0, 0),
+ RESET_MP25(I2C4_R, RCC_I2C4CFGR, 0, 0),
+ RESET_MP25(I2C5_R, RCC_I2C5CFGR, 0, 0),
+ RESET_MP25(I2C6_R, RCC_I2C6CFGR, 0, 0),
+ RESET_MP25(I2C7_R, RCC_I2C7CFGR, 0, 0),
+ RESET_MP25(I2C8_R, RCC_I2C8CFGR, 0, 0),
+ RESET_MP25(SAI1_R, RCC_SAI1CFGR, 0, 0),
+ RESET_MP25(SAI2_R, RCC_SAI2CFGR, 0, 0),
+ RESET_MP25(SAI3_R, RCC_SAI3CFGR, 0, 0),
+ RESET_MP25(SAI4_R, RCC_SAI4CFGR, 0, 0),
+ RESET_MP25(MDF1_R, RCC_MDF1CFGR, 0, 0),
+ RESET_MP25(MDF2_R, RCC_ADF1CFGR, 0, 0),
+ RESET_MP25(FDCAN_R, RCC_FDCANCFGR, 0, 0),
+ RESET_MP25(HDP_R, RCC_HDPCFGR, 0, 0),
+ RESET_MP25(ADC12_R, RCC_ADC12CFGR, 0, 0),
+ RESET_MP25(ADC3_R, RCC_ADC3CFGR, 0, 0),
+ RESET_MP25(ETH1_R, RCC_ETH1CFGR, 0, 0),
+ RESET_MP25(ETH2_R, RCC_ETH2CFGR, 0, 0),
+ RESET_MP25(USBH_R, RCC_USBHCFGR, 0, 0),
+ RESET_MP25(USB2PHY1_R, RCC_USB2PHY1CFGR, 0, 0),
+ RESET_MP25(USB2PHY2_R, RCC_USB2PHY2CFGR, 0, 0),
+ RESET_MP25(USB3DR_R, RCC_USB3DRCFGR, 0, 0),
+ RESET_MP25(USB3PCIEPHY_R, RCC_USB3PCIEPHYCFGR, 0, 0),
+ RESET_MP25(USBTC_R, RCC_USBTCCFGR, 0, 0),
+ RESET_MP25(ETHSW_R, RCC_ETHSWCFGR, 0, 0),
+ RESET_MP25(SDMMC1_R, RCC_SDMMC1CFGR, 0, 0),
+ RESET_MP25(SDMMC1DLL_R, RCC_SDMMC1CFGR, 16, 0),
+ RESET_MP25(SDMMC2_R, RCC_SDMMC2CFGR, 0, 0),
+ RESET_MP25(SDMMC2DLL_R, RCC_SDMMC2CFGR, 16, 0),
+ RESET_MP25(SDMMC3_R, RCC_SDMMC3CFGR, 0, 0),
+ RESET_MP25(SDMMC3DLL_R, RCC_SDMMC3CFGR, 16, 0),
+ RESET_MP25(GPU_R, RCC_GPUCFGR, 0, 0),
+ RESET_MP25(LTDC_R, RCC_LTDCCFGR, 0, 0),
+ RESET_MP25(DSI_R, RCC_DSICFGR, 0, 0),
+ RESET_MP25(LVDS_R, RCC_LVDSCFGR, 0, 0),
+ RESET_MP25(CSI_R, RCC_CSICFGR, 0, 0),
+ RESET_MP25(DCMIPP_R, RCC_DCMIPPCFGR, 0, 0),
+ RESET_MP25(CCI_R, RCC_CCICFGR, 0, 0),
+ RESET_MP25(VDEC_R, RCC_VDECCFGR, 0, 0),
+ RESET_MP25(VENC_R, RCC_VENCCFGR, 0, 0),
+ RESET_MP25(WWDG1_R, RCC_WWDG1CFGR, 0, 0),
+ RESET_MP25(WWDG2_R, RCC_WWDG2CFGR, 0, 0),
+ RESET_MP25(VREF_R, RCC_VREFCFGR, 0, 0),
+ RESET_MP25(DTS_R, RCC_DTSCFGR, 0, 0),
+ RESET_MP25(CRC_R, RCC_CRCCFGR, 0, 0),
+ RESET_MP25(SERC_R, RCC_SERCCFGR, 0, 0),
+ RESET_MP25(OSPIIOM_R, RCC_OSPIIOMCFGR, 0, 0),
+ RESET_MP25(I3C1_R, RCC_I3C1CFGR, 0, 0),
+ RESET_MP25(I3C2_R, RCC_I3C2CFGR, 0, 0),
+ RESET_MP25(I3C3_R, RCC_I3C3CFGR, 0, 0),
+ RESET_MP25(I3C4_R, RCC_I3C4CFGR, 0, 0),
+ RESET_MP25(IWDG2_KER_R, RCC_IWDGC1CFGSETR, 18, 1),
+ RESET_MP25(IWDG4_KER_R, RCC_IWDGC2CFGSETR, 18, 1),
+ RESET_MP25(RNG_R, RCC_RNGCFGR, 0, 0),
+ RESET_MP25(PKA_R, RCC_PKACFGR, 0, 0),
+ RESET_MP25(SAES_R, RCC_SAESCFGR, 0, 0),
+ RESET_MP25(HASH_R, RCC_HASHCFGR, 0, 0),
+ RESET_MP25(CRYP1_R, RCC_CRYP1CFGR, 0, 0),
+ RESET_MP25(CRYP2_R, RCC_CRYP2CFGR, 0, 0),
+ RESET_MP25(PCIE_R, RCC_PCIECFGR, 0, 0),
+};
+
+static u16 stm32mp25_cpt_gate[GATE_NB];
+
+static struct clk_stm32_clock_data stm32mp25_clock_data = {
+ .gate_cpt = stm32mp25_cpt_gate,
+ .gates = stm32mp25_gates,
+ .muxes = stm32mp25_muxes,
+};
+
+static struct clk_stm32_reset_data stm32mp25_reset_data = {
+ .reset_lines = stm32mp25_reset_cfg,
+ .nr_lines = ARRAY_SIZE(stm32mp25_reset_cfg),
+};
+
+static const struct stm32_rcc_match_data stm32mp25_data = {
+ .tab_clocks = stm32mp25_clock_cfg,
+ .num_clocks = ARRAY_SIZE(stm32mp25_clock_cfg),
+ .maxbinding = STM32MP25_LAST_CLK,
+ .clock_data = &stm32mp25_clock_data,
+ .reset_data = &stm32mp25_reset_data,
+};
+
+static const struct of_device_id stm32mp25_match_data[] = {
+ { .compatible = "st,stm32mp25-rcc", .data = &stm32mp25_data, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, stm32mp25_match_data);
+
+static int stm32mp25_rcc_clocks_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ return stm32_rcc_init(dev, stm32mp25_match_data, base);
+}
+
+static struct platform_driver stm32mp25_rcc_clocks_driver = {
+ .driver = {
+ .name = "stm32mp25_rcc",
+ .of_match_table = stm32mp25_match_data,
+ },
+ .probe = stm32mp25_rcc_clocks_probe,
+};
+
+static int __init stm32mp25_clocks_init(void)
+{
+ return platform_driver_register(&stm32mp25_rcc_clocks_driver);
+}
+
+core_initcall(stm32mp25_clocks_init);
diff --git a/drivers/clk/stm32/reset-stm32.c b/drivers/clk/stm32/reset-stm32.c
index 14c2ee1eebee..5a8f525842ce 100644
--- a/drivers/clk/stm32/reset-stm32.c
+++ b/drivers/clk/stm32/reset-stm32.c
@@ -19,6 +19,7 @@ struct stm32_reset_data {
struct reset_controller_dev rcdev;
void __iomem *membase;
u32 clear_offset;
+ const struct stm32_reset_cfg **reset_lines;
};
static inline struct stm32_reset_data *
@@ -27,22 +28,46 @@ to_stm32_reset_data(struct reset_controller_dev *rcdev)
return container_of(rcdev, struct stm32_reset_data, rcdev);
}
+static const struct stm32_reset_cfg *stm32_get_reset_line(struct reset_controller_dev *rcdev,
+ unsigned long id,
+ struct stm32_reset_cfg *line)
+{
+ struct stm32_reset_data *data = to_stm32_reset_data(rcdev);
+
+ if (!data->reset_lines) {
+ int reg_width = sizeof(u32);
+ int bank = id / (reg_width * BITS_PER_BYTE);
+ int offset = id % (reg_width * BITS_PER_BYTE);
+
+ line->offset = bank * reg_width;
+ line->bit_idx = offset;
+ line->set_clr = (data->clear_offset ? true : false);
+
+ return line;
+ }
+
+ return data->reset_lines[id];
+}
+
static int stm32_reset_update(struct reset_controller_dev *rcdev,
unsigned long id, bool assert)
{
struct stm32_reset_data *data = to_stm32_reset_data(rcdev);
- int reg_width = sizeof(u32);
- int bank = id / (reg_width * BITS_PER_BYTE);
- int offset = id % (reg_width * BITS_PER_BYTE);
+ struct stm32_reset_cfg line_reset;
+ const struct stm32_reset_cfg *ptr_line;
- if (data->clear_offset) {
+ ptr_line = stm32_get_reset_line(rcdev, id, &line_reset);
+ if (!ptr_line)
+ return -EPERM;
+
+ if (ptr_line->set_clr) {
void __iomem *addr;
- addr = data->membase + (bank * reg_width);
+ addr = data->membase + ptr_line->offset;
if (!assert)
addr += data->clear_offset;
- writel(BIT(offset), addr);
+ writel(BIT(ptr_line->bit_idx), addr);
} else {
unsigned long flags;
@@ -50,14 +75,14 @@ static int stm32_reset_update(struct reset_controller_dev *rcdev,
spin_lock_irqsave(&data->lock, flags);
- reg = readl(data->membase + (bank * reg_width));
+ reg = readl(data->membase + ptr_line->offset);
if (assert)
- reg |= BIT(offset);
+ reg |= BIT(ptr_line->bit_idx);
else
- reg &= ~BIT(offset);
+ reg &= ~BIT(ptr_line->bit_idx);
- writel(reg, data->membase + (bank * reg_width));
+ writel(reg, data->membase + ptr_line->offset);
spin_unlock_irqrestore(&data->lock, flags);
}
@@ -81,14 +106,17 @@ static int stm32_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct stm32_reset_data *data = to_stm32_reset_data(rcdev);
- int reg_width = sizeof(u32);
- int bank = id / (reg_width * BITS_PER_BYTE);
- int offset = id % (reg_width * BITS_PER_BYTE);
+ struct stm32_reset_cfg line_reset;
+ const struct stm32_reset_cfg *ptr_line;
u32 reg;
- reg = readl(data->membase + (bank * reg_width));
+ ptr_line = stm32_get_reset_line(rcdev, id, &line_reset);
+ if (!ptr_line)
+ return -EPERM;
+
+ reg = readl(data->membase + ptr_line->offset);
- return !!(reg & BIT(offset));
+ return !!(reg & BIT(ptr_line->bit_idx));
}
static const struct reset_control_ops stm32_reset_ops = {
@@ -113,6 +141,7 @@ int stm32_rcc_reset_init(struct device *dev, struct clk_stm32_reset_data *data,
reset_data->rcdev.ops = &stm32_reset_ops;
reset_data->rcdev.of_node = dev_of_node(dev);
reset_data->rcdev.nr_resets = data->nr_lines;
+ reset_data->reset_lines = data->reset_lines;
reset_data->clear_offset = data->clear_offset;
return reset_controller_register(&reset_data->rcdev);
diff --git a/drivers/clk/stm32/reset-stm32.h b/drivers/clk/stm32/reset-stm32.h
index 8cf1cc9be480..f79cad21dfd6 100644
--- a/drivers/clk/stm32/reset-stm32.h
+++ b/drivers/clk/stm32/reset-stm32.h
@@ -4,8 +4,15 @@
* Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics.
*/
+struct stm32_reset_cfg {
+ u16 offset;
+ u8 bit_idx;
+ bool set_clr;
+};
+
struct clk_stm32_reset_data {
const struct reset_control_ops *ops;
+ const struct stm32_reset_cfg **reset_lines;
unsigned int nr_lines;
u32 clear_offset;
};
diff --git a/drivers/clk/stm32/stm32mp25_rcc.h b/drivers/clk/stm32/stm32mp25_rcc.h
new file mode 100644
index 000000000000..687bc6a78627
--- /dev/null
+++ b/drivers/clk/stm32/stm32mp25_rcc.h
@@ -0,0 +1,712 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) STMicroelectronics 2023 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics.
+ */
+
+#ifndef STM32MP25_RCC_H
+#define STM32MP25_RCC_H
+
+#define RCC_SECCFGR0 0x0
+#define RCC_SECCFGR1 0x4
+#define RCC_SECCFGR2 0x8
+#define RCC_SECCFGR3 0xC
+#define RCC_PRIVCFGR0 0x10
+#define RCC_PRIVCFGR1 0x14
+#define RCC_PRIVCFGR2 0x18
+#define RCC_PRIVCFGR3 0x1C
+#define RCC_RCFGLOCKR0 0x20
+#define RCC_RCFGLOCKR1 0x24
+#define RCC_RCFGLOCKR2 0x28
+#define RCC_RCFGLOCKR3 0x2C
+#define RCC_R0CIDCFGR 0x30
+#define RCC_R0SEMCR 0x34
+#define RCC_R1CIDCFGR 0x38
+#define RCC_R1SEMCR 0x3C
+#define RCC_R2CIDCFGR 0x40
+#define RCC_R2SEMCR 0x44
+#define RCC_R3CIDCFGR 0x48
+#define RCC_R3SEMCR 0x4C
+#define RCC_R4CIDCFGR 0x50
+#define RCC_R4SEMCR 0x54
+#define RCC_R5CIDCFGR 0x58
+#define RCC_R5SEMCR 0x5C
+#define RCC_R6CIDCFGR 0x60
+#define RCC_R6SEMCR 0x64
+#define RCC_R7CIDCFGR 0x68
+#define RCC_R7SEMCR 0x6C
+#define RCC_R8CIDCFGR 0x70
+#define RCC_R8SEMCR 0x74
+#define RCC_R9CIDCFGR 0x78
+#define RCC_R9SEMCR 0x7C
+#define RCC_R10CIDCFGR 0x80
+#define RCC_R10SEMCR 0x84
+#define RCC_R11CIDCFGR 0x88
+#define RCC_R11SEMCR 0x8C
+#define RCC_R12CIDCFGR 0x90
+#define RCC_R12SEMCR 0x94
+#define RCC_R13CIDCFGR 0x98
+#define RCC_R13SEMCR 0x9C
+#define RCC_R14CIDCFGR 0xA0
+#define RCC_R14SEMCR 0xA4
+#define RCC_R15CIDCFGR 0xA8
+#define RCC_R15SEMCR 0xAC
+#define RCC_R16CIDCFGR 0xB0
+#define RCC_R16SEMCR 0xB4
+#define RCC_R17CIDCFGR 0xB8
+#define RCC_R17SEMCR 0xBC
+#define RCC_R18CIDCFGR 0xC0
+#define RCC_R18SEMCR 0xC4
+#define RCC_R19CIDCFGR 0xC8
+#define RCC_R19SEMCR 0xCC
+#define RCC_R20CIDCFGR 0xD0
+#define RCC_R20SEMCR 0xD4
+#define RCC_R21CIDCFGR 0xD8
+#define RCC_R21SEMCR 0xDC
+#define RCC_R22CIDCFGR 0xE0
+#define RCC_R22SEMCR 0xE4
+#define RCC_R23CIDCFGR 0xE8
+#define RCC_R23SEMCR 0xEC
+#define RCC_R24CIDCFGR 0xF0
+#define RCC_R24SEMCR 0xF4
+#define RCC_R25CIDCFGR 0xF8
+#define RCC_R25SEMCR 0xFC
+#define RCC_R26CIDCFGR 0x100
+#define RCC_R26SEMCR 0x104
+#define RCC_R27CIDCFGR 0x108
+#define RCC_R27SEMCR 0x10C
+#define RCC_R28CIDCFGR 0x110
+#define RCC_R28SEMCR 0x114
+#define RCC_R29CIDCFGR 0x118
+#define RCC_R29SEMCR 0x11C
+#define RCC_R30CIDCFGR 0x120
+#define RCC_R30SEMCR 0x124
+#define RCC_R31CIDCFGR 0x128
+#define RCC_R31SEMCR 0x12C
+#define RCC_R32CIDCFGR 0x130
+#define RCC_R32SEMCR 0x134
+#define RCC_R33CIDCFGR 0x138
+#define RCC_R33SEMCR 0x13C
+#define RCC_R34CIDCFGR 0x140
+#define RCC_R34SEMCR 0x144
+#define RCC_R35CIDCFGR 0x148
+#define RCC_R35SEMCR 0x14C
+#define RCC_R36CIDCFGR 0x150
+#define RCC_R36SEMCR 0x154
+#define RCC_R37CIDCFGR 0x158
+#define RCC_R37SEMCR 0x15C
+#define RCC_R38CIDCFGR 0x160
+#define RCC_R38SEMCR 0x164
+#define RCC_R39CIDCFGR 0x168
+#define RCC_R39SEMCR 0x16C
+#define RCC_R40CIDCFGR 0x170
+#define RCC_R40SEMCR 0x174
+#define RCC_R41CIDCFGR 0x178
+#define RCC_R41SEMCR 0x17C
+#define RCC_R42CIDCFGR 0x180
+#define RCC_R42SEMCR 0x184
+#define RCC_R43CIDCFGR 0x188
+#define RCC_R43SEMCR 0x18C
+#define RCC_R44CIDCFGR 0x190
+#define RCC_R44SEMCR 0x194
+#define RCC_R45CIDCFGR 0x198
+#define RCC_R45SEMCR 0x19C
+#define RCC_R46CIDCFGR 0x1A0
+#define RCC_R46SEMCR 0x1A4
+#define RCC_R47CIDCFGR 0x1A8
+#define RCC_R47SEMCR 0x1AC
+#define RCC_R48CIDCFGR 0x1B0
+#define RCC_R48SEMCR 0x1B4
+#define RCC_R49CIDCFGR 0x1B8
+#define RCC_R49SEMCR 0x1BC
+#define RCC_R50CIDCFGR 0x1C0
+#define RCC_R50SEMCR 0x1C4
+#define RCC_R51CIDCFGR 0x1C8
+#define RCC_R51SEMCR 0x1CC
+#define RCC_R52CIDCFGR 0x1D0
+#define RCC_R52SEMCR 0x1D4
+#define RCC_R53CIDCFGR 0x1D8
+#define RCC_R53SEMCR 0x1DC
+#define RCC_R54CIDCFGR 0x1E0
+#define RCC_R54SEMCR 0x1E4
+#define RCC_R55CIDCFGR 0x1E8
+#define RCC_R55SEMCR 0x1EC
+#define RCC_R56CIDCFGR 0x1F0
+#define RCC_R56SEMCR 0x1F4
+#define RCC_R57CIDCFGR 0x1F8
+#define RCC_R57SEMCR 0x1FC
+#define RCC_R58CIDCFGR 0x200
+#define RCC_R58SEMCR 0x204
+#define RCC_R59CIDCFGR 0x208
+#define RCC_R59SEMCR 0x20C
+#define RCC_R60CIDCFGR 0x210
+#define RCC_R60SEMCR 0x214
+#define RCC_R61CIDCFGR 0x218
+#define RCC_R61SEMCR 0x21C
+#define RCC_R62CIDCFGR 0x220
+#define RCC_R62SEMCR 0x224
+#define RCC_R63CIDCFGR 0x228
+#define RCC_R63SEMCR 0x22C
+#define RCC_R64CIDCFGR 0x230
+#define RCC_R64SEMCR 0x234
+#define RCC_R65CIDCFGR 0x238
+#define RCC_R65SEMCR 0x23C
+#define RCC_R66CIDCFGR 0x240
+#define RCC_R66SEMCR 0x244
+#define RCC_R67CIDCFGR 0x248
+#define RCC_R67SEMCR 0x24C
+#define RCC_R68CIDCFGR 0x250
+#define RCC_R68SEMCR 0x254
+#define RCC_R69CIDCFGR 0x258
+#define RCC_R69SEMCR 0x25C
+#define RCC_R70CIDCFGR 0x260
+#define RCC_R70SEMCR 0x264
+#define RCC_R71CIDCFGR 0x268
+#define RCC_R71SEMCR 0x26C
+#define RCC_R72CIDCFGR 0x270
+#define RCC_R72SEMCR 0x274
+#define RCC_R73CIDCFGR 0x278
+#define RCC_R73SEMCR 0x27C
+#define RCC_R74CIDCFGR 0x280
+#define RCC_R74SEMCR 0x284
+#define RCC_R75CIDCFGR 0x288
+#define RCC_R75SEMCR 0x28C
+#define RCC_R76CIDCFGR 0x290
+#define RCC_R76SEMCR 0x294
+#define RCC_R77CIDCFGR 0x298
+#define RCC_R77SEMCR 0x29C
+#define RCC_R78CIDCFGR 0x2A0
+#define RCC_R78SEMCR 0x2A4
+#define RCC_R79CIDCFGR 0x2A8
+#define RCC_R79SEMCR 0x2AC
+#define RCC_R80CIDCFGR 0x2B0
+#define RCC_R80SEMCR 0x2B4
+#define RCC_R81CIDCFGR 0x2B8
+#define RCC_R81SEMCR 0x2BC
+#define RCC_R82CIDCFGR 0x2C0
+#define RCC_R82SEMCR 0x2C4
+#define RCC_R83CIDCFGR 0x2C8
+#define RCC_R83SEMCR 0x2CC
+#define RCC_R84CIDCFGR 0x2D0
+#define RCC_R84SEMCR 0x2D4
+#define RCC_R85CIDCFGR 0x2D8
+#define RCC_R85SEMCR 0x2DC
+#define RCC_R86CIDCFGR 0x2E0
+#define RCC_R86SEMCR 0x2E4
+#define RCC_R87CIDCFGR 0x2E8
+#define RCC_R87SEMCR 0x2EC
+#define RCC_R88CIDCFGR 0x2F0
+#define RCC_R88SEMCR 0x2F4
+#define RCC_R89CIDCFGR 0x2F8
+#define RCC_R89SEMCR 0x2FC
+#define RCC_R90CIDCFGR 0x300
+#define RCC_R90SEMCR 0x304
+#define RCC_R91CIDCFGR 0x308
+#define RCC_R91SEMCR 0x30C
+#define RCC_R92CIDCFGR 0x310
+#define RCC_R92SEMCR 0x314
+#define RCC_R93CIDCFGR 0x318
+#define RCC_R93SEMCR 0x31C
+#define RCC_R94CIDCFGR 0x320
+#define RCC_R94SEMCR 0x324
+#define RCC_R95CIDCFGR 0x328
+#define RCC_R95SEMCR 0x32C
+#define RCC_R96CIDCFGR 0x330
+#define RCC_R96SEMCR 0x334
+#define RCC_R97CIDCFGR 0x338
+#define RCC_R97SEMCR 0x33C
+#define RCC_R98CIDCFGR 0x340
+#define RCC_R98SEMCR 0x344
+#define RCC_R99CIDCFGR 0x348
+#define RCC_R99SEMCR 0x34C
+#define RCC_R100CIDCFGR 0x350
+#define RCC_R100SEMCR 0x354
+#define RCC_R101CIDCFGR 0x358
+#define RCC_R101SEMCR 0x35C
+#define RCC_R102CIDCFGR 0x360
+#define RCC_R102SEMCR 0x364
+#define RCC_R103CIDCFGR 0x368
+#define RCC_R103SEMCR 0x36C
+#define RCC_R104CIDCFGR 0x370
+#define RCC_R104SEMCR 0x374
+#define RCC_R105CIDCFGR 0x378
+#define RCC_R105SEMCR 0x37C
+#define RCC_R106CIDCFGR 0x380
+#define RCC_R106SEMCR 0x384
+#define RCC_R107CIDCFGR 0x388
+#define RCC_R107SEMCR 0x38C
+#define RCC_R108CIDCFGR 0x390
+#define RCC_R108SEMCR 0x394
+#define RCC_R109CIDCFGR 0x398
+#define RCC_R109SEMCR 0x39C
+#define RCC_R110CIDCFGR 0x3A0
+#define RCC_R110SEMCR 0x3A4
+#define RCC_R111CIDCFGR 0x3A8
+#define RCC_R111SEMCR 0x3AC
+#define RCC_R112CIDCFGR 0x3B0
+#define RCC_R112SEMCR 0x3B4
+#define RCC_R113CIDCFGR 0x3B8
+#define RCC_R113SEMCR 0x3BC
+#define RCC_GRSTCSETR 0x400
+#define RCC_C1RSTCSETR 0x404
+#define RCC_C1P1RSTCSETR 0x408
+#define RCC_C2RSTCSETR 0x40C
+#define RCC_HWRSTSCLRR 0x410
+#define RCC_C1HWRSTSCLRR 0x414
+#define RCC_C2HWRSTSCLRR 0x418
+#define RCC_C1BOOTRSTSSETR 0x41C
+#define RCC_C1BOOTRSTSCLRR 0x420
+#define RCC_C2BOOTRSTSSETR 0x424
+#define RCC_C2BOOTRSTSCLRR 0x428
+#define RCC_C1SREQSETR 0x42C
+#define RCC_C1SREQCLRR 0x430
+#define RCC_CPUBOOTCR 0x434
+#define RCC_STBYBOOTCR 0x438
+#define RCC_LEGBOOTCR 0x43C
+#define RCC_BDCR 0x440
+#define RCC_D3DCR 0x444
+#define RCC_D3DSR 0x448
+#define RCC_RDCR 0x44C
+#define RCC_C1MSRDCR 0x450
+#define RCC_PWRLPDLYCR 0x454
+#define RCC_C1CIESETR 0x458
+#define RCC_C1CIFCLRR 0x45C
+#define RCC_C2CIESETR 0x460
+#define RCC_C2CIFCLRR 0x464
+#define RCC_IWDGC1FZSETR 0x468
+#define RCC_IWDGC1FZCLRR 0x46C
+#define RCC_IWDGC1CFGSETR 0x470
+#define RCC_IWDGC1CFGCLRR 0x474
+#define RCC_IWDGC2FZSETR 0x478
+#define RCC_IWDGC2FZCLRR 0x47C
+#define RCC_IWDGC2CFGSETR 0x480
+#define RCC_IWDGC2CFGCLRR 0x484
+#define RCC_IWDGC3CFGSETR 0x488
+#define RCC_IWDGC3CFGCLRR 0x48C
+#define RCC_C3CFGR 0x490
+#define RCC_MCO1CFGR 0x494
+#define RCC_MCO2CFGR 0x498
+#define RCC_OCENSETR 0x49C
+#define RCC_OCENCLRR 0x4A0
+#define RCC_OCRDYR 0x4A4
+#define RCC_HSICFGR 0x4A8
+#define RCC_MSICFGR 0x4AC
+#define RCC_RTCDIVR 0x4B0
+#define RCC_APB1DIVR 0x4B4
+#define RCC_APB2DIVR 0x4B8
+#define RCC_APB3DIVR 0x4BC
+#define RCC_APB4DIVR 0x4C0
+#define RCC_APBDBGDIVR 0x4C4
+#define RCC_TIMG1PRER 0x4C8
+#define RCC_TIMG2PRER 0x4CC
+#define RCC_LSMCUDIVR 0x4D0
+#define RCC_DDRCPCFGR 0x4D4
+#define RCC_DDRCAPBCFGR 0x4D8
+#define RCC_DDRPHYCAPBCFGR 0x4DC
+#define RCC_DDRPHYCCFGR 0x4E0
+#define RCC_DDRCFGR 0x4E4
+#define RCC_DDRITFCFGR 0x4E8
+#define RCC_SYSRAMCFGR 0x4F0
+#define RCC_VDERAMCFGR 0x4F4
+#define RCC_SRAM1CFGR 0x4F8
+#define RCC_SRAM2CFGR 0x4FC
+#define RCC_RETRAMCFGR 0x500
+#define RCC_BKPSRAMCFGR 0x504
+#define RCC_LPSRAM1CFGR 0x508
+#define RCC_LPSRAM2CFGR 0x50C
+#define RCC_LPSRAM3CFGR 0x510
+#define RCC_OSPI1CFGR 0x514
+#define RCC_OSPI2CFGR 0x518
+#define RCC_FMCCFGR 0x51C
+#define RCC_DBGCFGR 0x520
+#define RCC_STM500CFGR 0x524
+#define RCC_ETRCFGR 0x528
+#define RCC_GPIOACFGR 0x52C
+#define RCC_GPIOBCFGR 0x530
+#define RCC_GPIOCCFGR 0x534
+#define RCC_GPIODCFGR 0x538
+#define RCC_GPIOECFGR 0x53C
+#define RCC_GPIOFCFGR 0x540
+#define RCC_GPIOGCFGR 0x544
+#define RCC_GPIOHCFGR 0x548
+#define RCC_GPIOICFGR 0x54C
+#define RCC_GPIOJCFGR 0x550
+#define RCC_GPIOKCFGR 0x554
+#define RCC_GPIOZCFGR 0x558
+#define RCC_HPDMA1CFGR 0x55C
+#define RCC_HPDMA2CFGR 0x560
+#define RCC_HPDMA3CFGR 0x564
+#define RCC_LPDMACFGR 0x568
+#define RCC_HSEMCFGR 0x56C
+#define RCC_IPCC1CFGR 0x570
+#define RCC_IPCC2CFGR 0x574
+#define RCC_RTCCFGR 0x578
+#define RCC_SYSCPU1CFGR 0x580
+#define RCC_BSECCFGR 0x584
+#define RCC_IS2MCFGR 0x58C
+#define RCC_PLL2CFGR1 0x590
+#define RCC_PLL2CFGR2 0x594
+#define RCC_PLL2CFGR3 0x598
+#define RCC_PLL2CFGR4 0x59C
+#define RCC_PLL2CFGR5 0x5A0
+#define RCC_PLL2CFGR6 0x5A8
+#define RCC_PLL2CFGR7 0x5AC
+#define RCC_PLL3CFGR1 0x5B8
+#define RCC_PLL3CFGR2 0x5BC
+#define RCC_PLL3CFGR3 0x5C0
+#define RCC_PLL3CFGR4 0x5C4
+#define RCC_PLL3CFGR5 0x5C8
+#define RCC_PLL3CFGR6 0x5D0
+#define RCC_PLL3CFGR7 0x5D4
+#define RCC_HSIFMONCR 0x5E0
+#define RCC_HSIFVALR 0x5E4
+#define RCC_TIM1CFGR 0x700
+#define RCC_TIM2CFGR 0x704
+#define RCC_TIM3CFGR 0x708
+#define RCC_TIM4CFGR 0x70C
+#define RCC_TIM5CFGR 0x710
+#define RCC_TIM6CFGR 0x714
+#define RCC_TIM7CFGR 0x718
+#define RCC_TIM8CFGR 0x71C
+#define RCC_TIM10CFGR 0x720
+#define RCC_TIM11CFGR 0x724
+#define RCC_TIM12CFGR 0x728
+#define RCC_TIM13CFGR 0x72C
+#define RCC_TIM14CFGR 0x730
+#define RCC_TIM15CFGR 0x734
+#define RCC_TIM16CFGR 0x738
+#define RCC_TIM17CFGR 0x73C
+#define RCC_TIM20CFGR 0x740
+#define RCC_LPTIM1CFGR 0x744
+#define RCC_LPTIM2CFGR 0x748
+#define RCC_LPTIM3CFGR 0x74C
+#define RCC_LPTIM4CFGR 0x750
+#define RCC_LPTIM5CFGR 0x754
+#define RCC_SPI1CFGR 0x758
+#define RCC_SPI2CFGR 0x75C
+#define RCC_SPI3CFGR 0x760
+#define RCC_SPI4CFGR 0x764
+#define RCC_SPI5CFGR 0x768
+#define RCC_SPI6CFGR 0x76C
+#define RCC_SPI7CFGR 0x770
+#define RCC_SPI8CFGR 0x774
+#define RCC_SPDIFRXCFGR 0x778
+#define RCC_USART1CFGR 0x77C
+#define RCC_USART2CFGR 0x780
+#define RCC_USART3CFGR 0x784
+#define RCC_UART4CFGR 0x788
+#define RCC_UART5CFGR 0x78C
+#define RCC_USART6CFGR 0x790
+#define RCC_UART7CFGR 0x794
+#define RCC_UART8CFGR 0x798
+#define RCC_UART9CFGR 0x79C
+#define RCC_LPUART1CFGR 0x7A0
+#define RCC_I2C1CFGR 0x7A4
+#define RCC_I2C2CFGR 0x7A8
+#define RCC_I2C3CFGR 0x7AC
+#define RCC_I2C4CFGR 0x7B0
+#define RCC_I2C5CFGR 0x7B4
+#define RCC_I2C6CFGR 0x7B8
+#define RCC_I2C7CFGR 0x7BC
+#define RCC_I2C8CFGR 0x7C0
+#define RCC_SAI1CFGR 0x7C4
+#define RCC_SAI2CFGR 0x7C8
+#define RCC_SAI3CFGR 0x7CC
+#define RCC_SAI4CFGR 0x7D0
+#define RCC_MDF1CFGR 0x7D8
+#define RCC_ADF1CFGR 0x7DC
+#define RCC_FDCANCFGR 0x7E0
+#define RCC_HDPCFGR 0x7E4
+#define RCC_ADC12CFGR 0x7E8
+#define RCC_ADC3CFGR 0x7EC
+#define RCC_ETH1CFGR 0x7F0
+#define RCC_ETH2CFGR 0x7F4
+#define RCC_USBHCFGR 0x7FC
+#define RCC_USB2PHY1CFGR 0x800
+#define RCC_USB2PHY2CFGR 0x804
+#define RCC_USB3DRCFGR 0x808
+#define RCC_USB3PCIEPHYCFGR 0x80C
+#define RCC_PCIECFGR 0x810
+#define RCC_USBTCCFGR 0x814
+#define RCC_ETHSWCFGR 0x818
+#define RCC_ETHSWACMCFGR 0x81C
+#define RCC_ETHSWACMMSGCFGR 0x820
+#define RCC_STGENCFGR 0x824
+#define RCC_SDMMC1CFGR 0x830
+#define RCC_SDMMC2CFGR 0x834
+#define RCC_SDMMC3CFGR 0x838
+#define RCC_GPUCFGR 0x83C
+#define RCC_LTDCCFGR 0x840
+#define RCC_DSICFGR 0x844
+#define RCC_LVDSCFGR 0x850
+#define RCC_CSICFGR 0x858
+#define RCC_DCMIPPCFGR 0x85C
+#define RCC_CCICFGR 0x860
+#define RCC_VDECCFGR 0x864
+#define RCC_VENCCFGR 0x868
+#define RCC_RNGCFGR 0x870
+#define RCC_PKACFGR 0x874
+#define RCC_SAESCFGR 0x878
+#define RCC_HASHCFGR 0x87C
+#define RCC_CRYP1CFGR 0x880
+#define RCC_CRYP2CFGR 0x884
+#define RCC_IWDG1CFGR 0x888
+#define RCC_IWDG2CFGR 0x88C
+#define RCC_IWDG3CFGR 0x890
+#define RCC_IWDG4CFGR 0x894
+#define RCC_IWDG5CFGR 0x898
+#define RCC_WWDG1CFGR 0x89C
+#define RCC_WWDG2CFGR 0x8A0
+#define RCC_VREFCFGR 0x8A8
+#define RCC_DTSCFGR 0x8AC
+#define RCC_CRCCFGR 0x8B4
+#define RCC_SERCCFGR 0x8B8
+#define RCC_OSPIIOMCFGR 0x8BC
+#define RCC_GICV2MCFGR 0x8C0
+#define RCC_I3C1CFGR 0x8C8
+#define RCC_I3C2CFGR 0x8CC
+#define RCC_I3C3CFGR 0x8D0
+#define RCC_I3C4CFGR 0x8D4
+#define RCC_MUXSELCFGR 0x1000
+#define RCC_XBAR0CFGR 0x1018
+#define RCC_XBAR1CFGR 0x101C
+#define RCC_XBAR2CFGR 0x1020
+#define RCC_XBAR3CFGR 0x1024
+#define RCC_XBAR4CFGR 0x1028
+#define RCC_XBAR5CFGR 0x102C
+#define RCC_XBAR6CFGR 0x1030
+#define RCC_XBAR7CFGR 0x1034
+#define RCC_XBAR8CFGR 0x1038
+#define RCC_XBAR9CFGR 0x103C
+#define RCC_XBAR10CFGR 0x1040
+#define RCC_XBAR11CFGR 0x1044
+#define RCC_XBAR12CFGR 0x1048
+#define RCC_XBAR13CFGR 0x104C
+#define RCC_XBAR14CFGR 0x1050
+#define RCC_XBAR15CFGR 0x1054
+#define RCC_XBAR16CFGR 0x1058
+#define RCC_XBAR17CFGR 0x105C
+#define RCC_XBAR18CFGR 0x1060
+#define RCC_XBAR19CFGR 0x1064
+#define RCC_XBAR20CFGR 0x1068
+#define RCC_XBAR21CFGR 0x106C
+#define RCC_XBAR22CFGR 0x1070
+#define RCC_XBAR23CFGR 0x1074
+#define RCC_XBAR24CFGR 0x1078
+#define RCC_XBAR25CFGR 0x107C
+#define RCC_XBAR26CFGR 0x1080
+#define RCC_XBAR27CFGR 0x1084
+#define RCC_XBAR28CFGR 0x1088
+#define RCC_XBAR29CFGR 0x108C
+#define RCC_XBAR30CFGR 0x1090
+#define RCC_XBAR31CFGR 0x1094
+#define RCC_XBAR32CFGR 0x1098
+#define RCC_XBAR33CFGR 0x109C
+#define RCC_XBAR34CFGR 0x10A0
+#define RCC_XBAR35CFGR 0x10A4
+#define RCC_XBAR36CFGR 0x10A8
+#define RCC_XBAR37CFGR 0x10AC
+#define RCC_XBAR38CFGR 0x10B0
+#define RCC_XBAR39CFGR 0x10B4
+#define RCC_XBAR40CFGR 0x10B8
+#define RCC_XBAR41CFGR 0x10BC
+#define RCC_XBAR42CFGR 0x10C0
+#define RCC_XBAR43CFGR 0x10C4
+#define RCC_XBAR44CFGR 0x10C8
+#define RCC_XBAR45CFGR 0x10CC
+#define RCC_XBAR46CFGR 0x10D0
+#define RCC_XBAR47CFGR 0x10D4
+#define RCC_XBAR48CFGR 0x10D8
+#define RCC_XBAR49CFGR 0x10DC
+#define RCC_XBAR50CFGR 0x10E0
+#define RCC_XBAR51CFGR 0x10E4
+#define RCC_XBAR52CFGR 0x10E8
+#define RCC_XBAR53CFGR 0x10EC
+#define RCC_XBAR54CFGR 0x10F0
+#define RCC_XBAR55CFGR 0x10F4
+#define RCC_XBAR56CFGR 0x10F8
+#define RCC_XBAR57CFGR 0x10FC
+#define RCC_XBAR58CFGR 0x1100
+#define RCC_XBAR59CFGR 0x1104
+#define RCC_XBAR60CFGR 0x1108
+#define RCC_XBAR61CFGR 0x110C
+#define RCC_XBAR62CFGR 0x1110
+#define RCC_XBAR63CFGR 0x1114
+#define RCC_PREDIV0CFGR 0x1118
+#define RCC_PREDIV1CFGR 0x111C
+#define RCC_PREDIV2CFGR 0x1120
+#define RCC_PREDIV3CFGR 0x1124
+#define RCC_PREDIV4CFGR 0x1128
+#define RCC_PREDIV5CFGR 0x112C
+#define RCC_PREDIV6CFGR 0x1130
+#define RCC_PREDIV7CFGR 0x1134
+#define RCC_PREDIV8CFGR 0x1138
+#define RCC_PREDIV9CFGR 0x113C
+#define RCC_PREDIV10CFGR 0x1140
+#define RCC_PREDIV11CFGR 0x1144
+#define RCC_PREDIV12CFGR 0x1148
+#define RCC_PREDIV13CFGR 0x114C
+#define RCC_PREDIV14CFGR 0x1150
+#define RCC_PREDIV15CFGR 0x1154
+#define RCC_PREDIV16CFGR 0x1158
+#define RCC_PREDIV17CFGR 0x115C
+#define RCC_PREDIV18CFGR 0x1160
+#define RCC_PREDIV19CFGR 0x1164
+#define RCC_PREDIV20CFGR 0x1168
+#define RCC_PREDIV21CFGR 0x116C
+#define RCC_PREDIV22CFGR 0x1170
+#define RCC_PREDIV23CFGR 0x1174
+#define RCC_PREDIV24CFGR 0x1178
+#define RCC_PREDIV25CFGR 0x117C
+#define RCC_PREDIV26CFGR 0x1180
+#define RCC_PREDIV27CFGR 0x1184
+#define RCC_PREDIV28CFGR 0x1188
+#define RCC_PREDIV29CFGR 0x118C
+#define RCC_PREDIV30CFGR 0x1190
+#define RCC_PREDIV31CFGR 0x1194
+#define RCC_PREDIV32CFGR 0x1198
+#define RCC_PREDIV33CFGR 0x119C
+#define RCC_PREDIV34CFGR 0x11A0
+#define RCC_PREDIV35CFGR 0x11A4
+#define RCC_PREDIV36CFGR 0x11A8
+#define RCC_PREDIV37CFGR 0x11AC
+#define RCC_PREDIV38CFGR 0x11B0
+#define RCC_PREDIV39CFGR 0x11B4
+#define RCC_PREDIV40CFGR 0x11B8
+#define RCC_PREDIV41CFGR 0x11BC
+#define RCC_PREDIV42CFGR 0x11C0
+#define RCC_PREDIV43CFGR 0x11C4
+#define RCC_PREDIV44CFGR 0x11C8
+#define RCC_PREDIV45CFGR 0x11CC
+#define RCC_PREDIV46CFGR 0x11D0
+#define RCC_PREDIV47CFGR 0x11D4
+#define RCC_PREDIV48CFGR 0x11D8
+#define RCC_PREDIV49CFGR 0x11DC
+#define RCC_PREDIV50CFGR 0x11E0
+#define RCC_PREDIV51CFGR 0x11E4
+#define RCC_PREDIV52CFGR 0x11E8
+#define RCC_PREDIV53CFGR 0x11EC
+#define RCC_PREDIV54CFGR 0x11F0
+#define RCC_PREDIV55CFGR 0x11F4
+#define RCC_PREDIV56CFGR 0x11F8
+#define RCC_PREDIV57CFGR 0x11FC
+#define RCC_PREDIV58CFGR 0x1200
+#define RCC_PREDIV59CFGR 0x1204
+#define RCC_PREDIV60CFGR 0x1208
+#define RCC_PREDIV61CFGR 0x120C
+#define RCC_PREDIV62CFGR 0x1210
+#define RCC_PREDIV63CFGR 0x1214
+#define RCC_PREDIVSR1 0x1218
+#define RCC_PREDIVSR2 0x121C
+#define RCC_FINDIV0CFGR 0x1224
+#define RCC_FINDIV1CFGR 0x1228
+#define RCC_FINDIV2CFGR 0x122C
+#define RCC_FINDIV3CFGR 0x1230
+#define RCC_FINDIV4CFGR 0x1234
+#define RCC_FINDIV5CFGR 0x1238
+#define RCC_FINDIV6CFGR 0x123C
+#define RCC_FINDIV7CFGR 0x1240
+#define RCC_FINDIV8CFGR 0x1244
+#define RCC_FINDIV9CFGR 0x1248
+#define RCC_FINDIV10CFGR 0x124C
+#define RCC_FINDIV11CFGR 0x1250
+#define RCC_FINDIV12CFGR 0x1254
+#define RCC_FINDIV13CFGR 0x1258
+#define RCC_FINDIV14CFGR 0x125C
+#define RCC_FINDIV15CFGR 0x1260
+#define RCC_FINDIV16CFGR 0x1264
+#define RCC_FINDIV17CFGR 0x1268
+#define RCC_FINDIV18CFGR 0x126C
+#define RCC_FINDIV19CFGR 0x1270
+#define RCC_FINDIV20CFGR 0x1274
+#define RCC_FINDIV21CFGR 0x1278
+#define RCC_FINDIV22CFGR 0x127C
+#define RCC_FINDIV23CFGR 0x1280
+#define RCC_FINDIV24CFGR 0x1284
+#define RCC_FINDIV25CFGR 0x1288
+#define RCC_FINDIV26CFGR 0x128C
+#define RCC_FINDIV27CFGR 0x1290
+#define RCC_FINDIV28CFGR 0x1294
+#define RCC_FINDIV29CFGR 0x1298
+#define RCC_FINDIV30CFGR 0x129C
+#define RCC_FINDIV31CFGR 0x12A0
+#define RCC_FINDIV32CFGR 0x12A4
+#define RCC_FINDIV33CFGR 0x12A8
+#define RCC_FINDIV34CFGR 0x12AC
+#define RCC_FINDIV35CFGR 0x12B0
+#define RCC_FINDIV36CFGR 0x12B4
+#define RCC_FINDIV37CFGR 0x12B8
+#define RCC_FINDIV38CFGR 0x12BC
+#define RCC_FINDIV39CFGR 0x12C0
+#define RCC_FINDIV40CFGR 0x12C4
+#define RCC_FINDIV41CFGR 0x12C8
+#define RCC_FINDIV42CFGR 0x12CC
+#define RCC_FINDIV43CFGR 0x12D0
+#define RCC_FINDIV44CFGR 0x12D4
+#define RCC_FINDIV45CFGR 0x12D8
+#define RCC_FINDIV46CFGR 0x12DC
+#define RCC_FINDIV47CFGR 0x12E0
+#define RCC_FINDIV48CFGR 0x12E4
+#define RCC_FINDIV49CFGR 0x12E8
+#define RCC_FINDIV50CFGR 0x12EC
+#define RCC_FINDIV51CFGR 0x12F0
+#define RCC_FINDIV52CFGR 0x12F4
+#define RCC_FINDIV53CFGR 0x12F8
+#define RCC_FINDIV54CFGR 0x12FC
+#define RCC_FINDIV55CFGR 0x1300
+#define RCC_FINDIV56CFGR 0x1304
+#define RCC_FINDIV57CFGR 0x1308
+#define RCC_FINDIV58CFGR 0x130C
+#define RCC_FINDIV59CFGR 0x1310
+#define RCC_FINDIV60CFGR 0x1314
+#define RCC_FINDIV61CFGR 0x1318
+#define RCC_FINDIV62CFGR 0x131C
+#define RCC_FINDIV63CFGR 0x1320
+#define RCC_FINDIVSR1 0x1324
+#define RCC_FINDIVSR2 0x1328
+#define RCC_FCALCOBS0CFGR 0x1340
+#define RCC_FCALCOBS1CFGR 0x1344
+#define RCC_FCALCREFCFGR 0x1348
+#define RCC_FCALCCR1 0x134C
+#define RCC_FCALCCR2 0x1354
+#define RCC_FCALCSR 0x1358
+#define RCC_PLL4CFGR1 0x1360
+#define RCC_PLL4CFGR2 0x1364
+#define RCC_PLL4CFGR3 0x1368
+#define RCC_PLL4CFGR4 0x136C
+#define RCC_PLL4CFGR5 0x1370
+#define RCC_PLL4CFGR6 0x1378
+#define RCC_PLL4CFGR7 0x137C
+#define RCC_PLL5CFGR1 0x1388
+#define RCC_PLL5CFGR2 0x138C
+#define RCC_PLL5CFGR3 0x1390
+#define RCC_PLL5CFGR4 0x1394
+#define RCC_PLL5CFGR5 0x1398
+#define RCC_PLL5CFGR6 0x13A0
+#define RCC_PLL5CFGR7 0x13A4
+#define RCC_PLL6CFGR1 0x13B0
+#define RCC_PLL6CFGR2 0x13B4
+#define RCC_PLL6CFGR3 0x13B8
+#define RCC_PLL6CFGR4 0x13BC
+#define RCC_PLL6CFGR5 0x13C0
+#define RCC_PLL6CFGR6 0x13C8
+#define RCC_PLL6CFGR7 0x13CC
+#define RCC_PLL7CFGR1 0x13D8
+#define RCC_PLL7CFGR2 0x13DC
+#define RCC_PLL7CFGR3 0x13E0
+#define RCC_PLL7CFGR4 0x13E4
+#define RCC_PLL7CFGR5 0x13E8
+#define RCC_PLL7CFGR6 0x13F0
+#define RCC_PLL7CFGR7 0x13F4
+#define RCC_PLL8CFGR1 0x1400
+#define RCC_PLL8CFGR2 0x1404
+#define RCC_PLL8CFGR3 0x1408
+#define RCC_PLL8CFGR4 0x140C
+#define RCC_PLL8CFGR5 0x1410
+#define RCC_PLL8CFGR6 0x1418
+#define RCC_PLL8CFGR7 0x141C
+#define RCC_VERR 0xFFF4
+#define RCC_IDR 0xFFF8
+#define RCC_SIDR 0xFFFC
+
+#endif /* STM32MP25_RCC_H */
diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c
index 9d3ffd3fb2c1..0736f6c81269 100644
--- a/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c
@@ -125,6 +125,7 @@ static const struct of_device_id sun20i_d1_r_ccu_ids[] = {
{ .compatible = "allwinner,sun20i-d1-r-ccu" },
{ }
};
+MODULE_DEVICE_TABLE(of, sun20i_d1_r_ccu_ids);
static struct platform_driver sun20i_d1_r_ccu_driver = {
.probe = sun20i_d1_r_ccu_probe,
diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
index 48a8fb2c43b7..60756aadfad6 100644
--- a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
+++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
@@ -1394,6 +1394,7 @@ static const struct of_device_id sun20i_d1_ccu_ids[] = {
{ .compatible = "allwinner,sun20i-d1-ccu" },
{ }
};
+MODULE_DEVICE_TABLE(of, sun20i_d1_ccu_ids);
static struct platform_driver sun20i_d1_ccu_driver = {
.probe = sun20i_d1_ccu_probe,
diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
index 451ebb7c99a3..14f5c3da652b 100644
--- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
+++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
@@ -1481,6 +1481,7 @@ static const struct of_device_id sun4i_a10_ccu_ids[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, sun4i_a10_ccu_ids);
static struct platform_driver sun4i_a10_ccu_driver = {
.probe = sun4i_a10_ccu_probe,
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c
index fddd6c877cec..3b983bb59bd9 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c
@@ -202,6 +202,7 @@ static const struct of_device_id sun50i_a100_r_ccu_ids[] = {
{ .compatible = "allwinner,sun50i-a100-r-ccu" },
{ }
};
+MODULE_DEVICE_TABLE(of, sun50i_a100_r_ccu_ids);
static struct platform_driver sun50i_a100_r_ccu_driver = {
.probe = sun50i_a100_r_ccu_probe,
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
index 5f93b5526e13..38aa6e5f298e 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
@@ -1264,6 +1264,7 @@ static const struct of_device_id sun50i_a100_ccu_ids[] = {
{ .compatible = "allwinner,sun50i-a100-ccu" },
{ }
};
+MODULE_DEVICE_TABLE(of, sun50i_a100_ccu_ids);
static struct platform_driver sun50i_a100_ccu_driver = {
.probe = sun50i_a100_ccu_probe,
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index 8951ffc14ff5..491b16cfe368 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -171,17 +171,21 @@ static struct ccu_nkm pll_mipi_clk = {
* user manual, and by experiments the PLL doesn't work without
* these bits toggled.
*/
- .enable = BIT(31) | BIT(23) | BIT(22),
- .lock = BIT(28),
- .n = _SUNXI_CCU_MULT(8, 4),
- .k = _SUNXI_CCU_MULT_MIN(4, 2, 2),
- .m = _SUNXI_CCU_DIV(0, 4),
+ .enable = BIT(31) | BIT(23) | BIT(22),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT(8, 4),
+ .k = _SUNXI_CCU_MULT_MIN(4, 2, 2),
+ .m = _SUNXI_CCU_DIV(0, 4),
+ .max_m_n_ratio = 3,
+ .min_parent_m_ratio = 24000000,
.common = {
.reg = 0x040,
.hw.init = CLK_HW_INIT("pll-mipi", "pll-video0",
&ccu_nkm_ops,
CLK_SET_RATE_UNGATE | CLK_SET_RATE_PARENT),
.features = CCU_FEATURE_CLOSEST_RATE,
+ .min_rate = 500000000,
+ .max_rate = 1400000000,
},
};
@@ -978,6 +982,7 @@ static const struct of_device_id sun50i_a64_ccu_ids[] = {
{ .compatible = "allwinner,sun50i-a64-ccu" },
{ }
};
+MODULE_DEVICE_TABLE(of, sun50i_a64_ccu_ids);
static struct platform_driver sun50i_a64_ccu_driver = {
.probe = sun50i_a64_ccu_probe,
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
index 02b28cfc5525..e2dc29fa99e7 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
@@ -244,6 +244,7 @@ static const struct of_device_id sun50i_h6_r_ccu_ids[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, sun50i_h6_r_ccu_ids);
static struct platform_driver sun50i_h6_r_ccu_driver = {
.probe = sun50i_h6_r_ccu_probe,
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index 42568c616181..e6421c2ba122 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -1181,11 +1181,18 @@ static const u32 usb2_clk_regs[] = {
SUN50I_H6_USB3_CLK_REG,
};
+static struct ccu_mux_nb sun50i_h6_cpu_nb = {
+ .common = &cpux_clk.common,
+ .cm = &cpux_clk.mux,
+ .delay_us = 1,
+ .bypass_index = 0, /* index of 24 MHz oscillator */
+};
+
static int sun50i_h6_ccu_probe(struct platform_device *pdev)
{
void __iomem *reg;
+ int i, ret;
u32 val;
- int i;
reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
@@ -1252,13 +1259,22 @@ static int sun50i_h6_ccu_probe(struct platform_device *pdev)
val |= BIT(24);
writel(val, reg + SUN50I_H6_HDMI_CEC_CLK_REG);
- return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_h6_ccu_desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_h6_ccu_desc);
+ if (ret)
+ return ret;
+
+ /* Reparent CPU during PLL CPUX rate changes */
+ ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
+ &sun50i_h6_cpu_nb);
+
+ return 0;
}
static const struct of_device_id sun50i_h6_ccu_ids[] = {
{ .compatible = "allwinner,sun50i-h6-ccu" },
{ }
};
+MODULE_DEVICE_TABLE(of, sun50i_h6_ccu_ids);
static struct platform_driver sun50i_h6_ccu_driver = {
.probe = sun50i_h6_ccu_probe,
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
index 21e918582aa5..45aae1ae5178 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
@@ -1154,6 +1154,7 @@ static const struct of_device_id sun50i_h616_ccu_ids[] = {
{ .compatible = "allwinner,sun50i-h616-ccu" },
{ }
};
+MODULE_DEVICE_TABLE(of, sun50i_h616_ccu_ids);
static struct platform_driver sun50i_h616_ccu_driver = {
.probe = sun50i_h616_ccu_probe,
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 0762deffb33c..8cb8cbbdbafb 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -1271,6 +1271,7 @@ static const struct of_device_id sun6i_a31_ccu_ids[] = {
{ .compatible = "allwinner,sun6i-a31-ccu" },
{ }
};
+MODULE_DEVICE_TABLE(of, sun6i_a31_ccu_ids);
static struct platform_driver sun6i_a31_ccu_driver = {
.probe = sun6i_a31_ccu_probe,
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
index fdc8ccc586c9..5a98c4e9e667 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
@@ -336,6 +336,7 @@ static const struct of_device_id sun6i_rtc_ccu_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, sun6i_rtc_ccu_match);
int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg)
{
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
index e80cc3864e44..e748ad612b8f 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
@@ -751,6 +751,7 @@ static const struct of_device_id sun8i_a23_ccu_ids[] = {
{ .compatible = "allwinner,sun8i-a23-ccu" },
{ }
};
+MODULE_DEVICE_TABLE(of, sun8i_a23_ccu_ids);
static struct platform_driver sun8i_a23_ccu_driver = {
.probe = sun8i_a23_ccu_probe,
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
index d12878a1ba9e..8a27a1777600 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
@@ -823,6 +823,7 @@ static const struct of_device_id sun8i_a33_ccu_ids[] = {
{ .compatible = "allwinner,sun8i-a33-ccu" },
{ }
};
+MODULE_DEVICE_TABLE(of, sun8i_a33_ccu_ids);
static struct platform_driver sun8i_a33_ccu_driver = {
.probe = sun8i_a33_ccu_probe,
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
index 76cbd9e9e89f..93eca47935cf 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
@@ -911,6 +911,7 @@ static const struct of_device_id sun8i_a83t_ccu_ids[] = {
{ .compatible = "allwinner,sun8i-a83t-ccu" },
{ }
};
+MODULE_DEVICE_TABLE(of, sun8i_a83t_ccu_ids);
static struct platform_driver sun8i_a83t_ccu_driver = {
.probe = sun8i_a83t_ccu_probe,
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
index 6a043a0a9dd6..b0b8dba239ae 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
@@ -337,6 +337,7 @@ static const struct of_device_id sunxi_de2_clk_ids[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, sunxi_de2_clk_ids);
static struct platform_driver sunxi_de2_clk_driver = {
.probe = sunxi_de2_clk_probe,
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index 74274c17efb3..ca5739fa04f7 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -1082,6 +1082,7 @@ static const struct of_device_id sun8i_h3_ccu_ids[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, sun8i_h3_ccu_ids);
static struct platform_driver sun8i_h3_ccu_driver = {
.probe = sun8i_h3_ccu_probe,
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
index 4890a976b1a0..bac7e737db98 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
@@ -262,6 +262,7 @@ static const struct of_device_id sun8i_r_ccu_ids[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, sun8i_r_ccu_ids);
static struct platform_driver sun8i_r_ccu_driver = {
.probe = sun8i_r_ccu_probe,
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
index 31eca0d3bc1e..3774b293e74c 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
@@ -1363,6 +1363,7 @@ static const struct of_device_id sun8i_r40_ccu_ids[] = {
{ .compatible = "allwinner,sun8i-r40-ccu" },
{ }
};
+MODULE_DEVICE_TABLE(of, sun8i_r40_ccu_ids);
static struct platform_driver sun8i_r40_ccu_driver = {
.probe = sun8i_r40_ccu_probe,
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index f3ce8664b288..994258a3ad2e 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -768,6 +768,7 @@ static const struct of_device_id sun8i_v3s_ccu_ids[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, sun8i_v3s_ccu_ids);
static struct platform_driver sun8i_v3s_ccu_driver = {
.probe = sun8i_v3s_ccu_probe,
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
index 1d8b1ae1619d..ae7939d3f502 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
@@ -254,6 +254,7 @@ static const struct of_device_id sun9i_a80_de_clk_ids[] = {
{ .compatible = "allwinner,sun9i-a80-de-clks" },
{ }
};
+MODULE_DEVICE_TABLE(of, sun9i_a80_de_clk_ids);
static struct platform_driver sun9i_a80_de_clk_driver = {
.probe = sun9i_a80_de_clk_probe,
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
index a0fb0da8f356..bfa2ff9d52a4 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
@@ -127,6 +127,7 @@ static const struct of_device_id sun9i_a80_usb_clk_ids[] = {
{ .compatible = "allwinner,sun9i-a80-usb-clks" },
{ }
};
+MODULE_DEVICE_TABLE(of, sun9i_a80_usb_clk_ids);
static struct platform_driver sun9i_a80_usb_clk_driver = {
.probe = sun9i_a80_usb_clk_probe,
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
index 730fd8e28014..c05805e4ad22 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
@@ -1236,6 +1236,7 @@ static const struct of_device_id sun9i_a80_ccu_ids[] = {
{ .compatible = "allwinner,sun9i-a80-ccu" },
{ }
};
+MODULE_DEVICE_TABLE(of, sun9i_a80_ccu_ids);
static struct platform_driver sun9i_a80_ccu_driver = {
.probe = sun9i_a80_ccu_probe,
diff --git a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
index 0d5b60b123b7..76d3d070b2a7 100644
--- a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
+++ b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
@@ -565,6 +565,7 @@ static const struct of_device_id suniv_f1c100s_ccu_ids[] = {
{ .compatible = "allwinner,suniv-f1c100s-ccu" },
{ }
};
+MODULE_DEVICE_TABLE(of, suniv_f1c100s_ccu_ids);
static struct platform_driver suniv_f1c100s_ccu_driver = {
.probe = suniv_f1c100s_ccu_probe,
diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c
index 8babce55302f..ac0091b4ce24 100644
--- a/drivers/clk/sunxi-ng/ccu_common.c
+++ b/drivers/clk/sunxi-ng/ccu_common.c
@@ -44,6 +44,16 @@ bool ccu_is_better_rate(struct ccu_common *common,
unsigned long current_rate,
unsigned long best_rate)
{
+ unsigned long min_rate, max_rate;
+
+ clk_hw_get_rate_range(&common->hw, &min_rate, &max_rate);
+
+ if (current_rate > max_rate)
+ return false;
+
+ if (current_rate < min_rate)
+ return false;
+
if (common->features & CCU_FEATURE_CLOSEST_RATE)
return abs(current_rate - target_rate) < abs(best_rate - target_rate);
@@ -122,6 +132,7 @@ static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev,
for (i = 0; i < desc->hw_clks->num ; i++) {
struct clk_hw *hw = desc->hw_clks->hws[i];
+ struct ccu_common *common = hw_to_ccu_common(hw);
const char *name;
if (!hw)
@@ -136,6 +147,14 @@ static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev,
pr_err("Couldn't register clock %d - %s\n", i, name);
goto err_clk_unreg;
}
+
+ if (common->max_rate)
+ clk_hw_set_rate_range(hw, common->min_rate,
+ common->max_rate);
+ else
+ WARN(common->min_rate,
+ "No max_rate, ignoring min_rate of clock %d - %s\n",
+ i, name);
}
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
diff --git a/drivers/clk/sunxi-ng/ccu_common.h b/drivers/clk/sunxi-ng/ccu_common.h
index 942a72c09437..329734f8cf42 100644
--- a/drivers/clk/sunxi-ng/ccu_common.h
+++ b/drivers/clk/sunxi-ng/ccu_common.h
@@ -31,6 +31,9 @@ struct ccu_common {
u16 lock_reg;
u32 prediv;
+ unsigned long min_rate;
+ unsigned long max_rate;
+
unsigned long features;
spinlock_t *lock;
struct clk_hw hw;
diff --git a/drivers/clk/sunxi-ng/ccu_nkm.c b/drivers/clk/sunxi-ng/ccu_nkm.c
index 853f84398e2b..1168d894d636 100644
--- a/drivers/clk/sunxi-ng/ccu_nkm.c
+++ b/drivers/clk/sunxi-ng/ccu_nkm.c
@@ -16,6 +16,20 @@ struct _ccu_nkm {
unsigned long m, min_m, max_m;
};
+static bool ccu_nkm_is_valid_rate(struct ccu_common *common, unsigned long parent,
+ unsigned long n, unsigned long m)
+{
+ struct ccu_nkm *nkm = container_of(common, struct ccu_nkm, common);
+
+ if (nkm->max_m_n_ratio && (m > nkm->max_m_n_ratio * n))
+ return false;
+
+ if (nkm->min_parent_m_ratio && (parent < nkm->min_parent_m_ratio * m))
+ return false;
+
+ return true;
+}
+
static unsigned long ccu_nkm_find_best_with_parent_adj(struct ccu_common *common,
struct clk_hw *parent_hw,
unsigned long *parent, unsigned long rate,
@@ -31,6 +45,10 @@ static unsigned long ccu_nkm_find_best_with_parent_adj(struct ccu_common *common
unsigned long tmp_rate, tmp_parent;
tmp_parent = clk_hw_round_rate(parent_hw, rate * _m / (_n * _k));
+
+ if (!ccu_nkm_is_valid_rate(common, tmp_parent, _n, _m))
+ continue;
+
tmp_rate = tmp_parent * _n * _k / _m;
if (ccu_is_better_rate(common, rate, tmp_rate, best_rate) ||
@@ -64,6 +82,9 @@ static unsigned long ccu_nkm_find_best(unsigned long parent, unsigned long rate,
for (_k = nkm->min_k; _k <= nkm->max_k; _k++) {
for (_n = nkm->min_n; _n <= nkm->max_n; _n++) {
for (_m = nkm->min_m; _m <= nkm->max_m; _m++) {
+ if (!ccu_nkm_is_valid_rate(common, parent, _n, _m))
+ continue;
+
unsigned long tmp_rate;
tmp_rate = parent * _n * _k / _m;
diff --git a/drivers/clk/sunxi-ng/ccu_nkm.h b/drivers/clk/sunxi-ng/ccu_nkm.h
index 6601defb3f38..c409212ee40e 100644
--- a/drivers/clk/sunxi-ng/ccu_nkm.h
+++ b/drivers/clk/sunxi-ng/ccu_nkm.h
@@ -27,6 +27,8 @@ struct ccu_nkm {
struct ccu_mux_internal mux;
unsigned int fixed_post_div;
+ unsigned long max_m_n_ratio;
+ unsigned long min_parent_m_ratio;
struct ccu_common common;
};
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 403ec81f561b..3386bd1903df 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -34,8 +34,6 @@ static const struct clk_ops dpll_m4xen_ck_ops = {
.save_context = &omap3_core_dpll_save_context,
.restore_context = &omap3_core_dpll_restore_context,
};
-#else
-static const struct clk_ops dpll_m4xen_ck_ops = {};
#endif
#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) || \
@@ -95,11 +93,7 @@ static const struct clk_ops omap3_dpll_core_ck_ops = {
.recalc_rate = &omap3_dpll_recalc,
.round_rate = &omap2_dpll_round_rate,
};
-#else
-static const struct clk_ops omap3_dpll_core_ck_ops = {};
-#endif
-#ifdef CONFIG_ARCH_OMAP3
static const struct clk_ops omap3_dpll_ck_ops = {
.enable = &omap3_noncore_dpll_enable,
.disable = &omap3_noncore_dpll_disable,
@@ -137,9 +131,13 @@ static const struct clk_ops omap3_dpll_per_ck_ops = {
};
#endif
+#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
+ defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \
+ defined(CONFIG_SOC_AM43XX)
static const struct clk_ops dpll_x2_ck_ops = {
.recalc_rate = &omap3_clkoutx2_recalc,
};
+#endif
/**
* _register_dpll - low level registration of a DPLL clock
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 8d4a52056684..5bb43cc1a8df 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -331,7 +331,7 @@ static u64 notrace hisi_161010101_read_cntvct_el0(void)
return __hisi_161010101_read_reg(cntvct_el0);
}
-static struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
+static const struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
/*
* Note that trailing spaces are required to properly match
* the OEM table information.
diff --git a/drivers/clocksource/renesas-ostm.c b/drivers/clocksource/renesas-ostm.c
index 8da972dc1713..3fcbd02b2483 100644
--- a/drivers/clocksource/renesas-ostm.c
+++ b/drivers/clocksource/renesas-ostm.c
@@ -210,6 +210,7 @@ static int __init ostm_init(struct device_node *np)
pr_info("%pOF: used for clock events\n", np);
}
+ of_node_set_flag(np, OF_POPULATED);
return 0;
err_cleanup:
@@ -224,7 +225,7 @@ err_free:
TIMER_OF_DECLARE(ostm, "renesas,ostm", ostm_init);
-#ifdef CONFIG_ARCH_RZG2L
+#if defined(CONFIG_ARCH_RZG2L) || defined(CONFIG_ARCH_R9A09G057)
static int __init ostm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
diff --git a/drivers/clocksource/timer-clint.c b/drivers/clocksource/timer-clint.c
index 09fd292eb83d..0bdd9d7ec545 100644
--- a/drivers/clocksource/timer-clint.c
+++ b/drivers/clocksource/timer-clint.c
@@ -251,7 +251,7 @@ static int __init clint_timer_init_dt(struct device_node *np)
}
irq_set_chained_handler(clint_ipi_irq, clint_ipi_interrupt);
- riscv_ipi_set_virq_range(rc, BITS_PER_BYTE, true);
+ riscv_ipi_set_virq_range(rc, BITS_PER_BYTE);
clint_clear_ipi();
#endif
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index 56acf2617262..b7a34b1a975e 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -129,7 +129,6 @@ struct dmtimer {
void __iomem *func_base; /* function register base */
atomic_t enabled;
- unsigned long rate;
unsigned reserved:1;
unsigned posted:1;
unsigned omap1:1;
diff --git a/drivers/comedi/drivers/vmk80xx.c b/drivers/comedi/drivers/vmk80xx.c
index 4536ed43f65b..84dce5184a77 100644
--- a/drivers/comedi/drivers/vmk80xx.c
+++ b/drivers/comedi/drivers/vmk80xx.c
@@ -641,33 +641,22 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev)
struct vmk80xx_private *devpriv = dev->private;
struct usb_interface *intf = comedi_to_usb_interface(dev);
struct usb_host_interface *iface_desc = intf->cur_altsetting;
- struct usb_endpoint_descriptor *ep_desc;
- int i;
-
- if (iface_desc->desc.bNumEndpoints != 2)
- return -ENODEV;
-
- for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
- ep_desc = &iface_desc->endpoint[i].desc;
-
- if (usb_endpoint_is_int_in(ep_desc) ||
- usb_endpoint_is_bulk_in(ep_desc)) {
- if (!devpriv->ep_rx)
- devpriv->ep_rx = ep_desc;
- continue;
- }
+ struct usb_endpoint_descriptor *ep_rx_desc, *ep_tx_desc;
+ int ret;
- if (usb_endpoint_is_int_out(ep_desc) ||
- usb_endpoint_is_bulk_out(ep_desc)) {
- if (!devpriv->ep_tx)
- devpriv->ep_tx = ep_desc;
- continue;
- }
- }
+ if (devpriv->model == VMK8061_MODEL)
+ ret = usb_find_common_endpoints(iface_desc, &ep_rx_desc,
+ &ep_tx_desc, NULL, NULL);
+ else
+ ret = usb_find_common_endpoints(iface_desc, NULL, NULL,
+ &ep_rx_desc, &ep_tx_desc);
- if (!devpriv->ep_rx || !devpriv->ep_tx)
+ if (ret)
return -ENODEV;
+ devpriv->ep_rx = ep_rx_desc;
+ devpriv->ep_tx = ep_tx_desc;
+
if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx))
return -EINVAL;
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 2015c9fcc3c9..1b7e82a0ad2e 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -50,7 +50,8 @@
#define AMD_PSTATE_TRANSITION_LATENCY 20000
#define AMD_PSTATE_TRANSITION_DELAY 1000
-#define AMD_PSTATE_PREFCORE_THRESHOLD 166
+#define CPPC_HIGHEST_PERF_PERFORMANCE 196
+#define CPPC_HIGHEST_PERF_DEFAULT 166
/*
* TODO: We need more time to fine tune processors with shared memory solution
@@ -67,6 +68,7 @@ static struct cpufreq_driver amd_pstate_epp_driver;
static int cppc_state = AMD_PSTATE_UNDEFINED;
static bool cppc_enabled;
static bool amd_pstate_prefcore = true;
+static struct quirk_entry *quirks;
/*
* AMD Energy Preference Performance (EPP)
@@ -111,6 +113,41 @@ static unsigned int epp_values[] = {
typedef int (*cppc_mode_transition_fn)(int);
+static struct quirk_entry quirk_amd_7k62 = {
+ .nominal_freq = 2600,
+ .lowest_freq = 550,
+};
+
+static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi)
+{
+ /**
+ * match the broken bios for family 17h processor support CPPC V2
+ * broken BIOS lack of nominal_freq and lowest_freq capabilities
+ * definition in ACPI tables
+ */
+ if (boot_cpu_has(X86_FEATURE_ZEN2)) {
+ quirks = dmi->driver_data;
+ pr_info("Overriding nominal and lowest frequencies for %s\n", dmi->ident);
+ return 1;
+ }
+
+ return 0;
+}
+
+static const struct dmi_system_id amd_pstate_quirks_table[] __initconst = {
+ {
+ .callback = dmi_matched_7k62_bios_bug,
+ .ident = "AMD EPYC 7K62",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VERSION, "5.14"),
+ DMI_MATCH(DMI_BIOS_RELEASE, "12/12/2019"),
+ },
+ .driver_data = &quirk_amd_7k62,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(dmi, amd_pstate_quirks_table);
+
static inline int get_mode_idx_from_str(const char *str, size_t size)
{
int i;
@@ -290,6 +327,21 @@ static inline int amd_pstate_enable(bool enable)
return static_call(amd_pstate_enable)(enable);
}
+static u32 amd_pstate_highest_perf_set(struct amd_cpudata *cpudata)
+{
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ /*
+ * For AMD CPUs with Family ID 19H and Model ID range 0x70 to 0x7f,
+ * the highest performance level is set to 196.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=218759
+ */
+ if (c->x86 == 0x19 && (c->x86_model >= 0x70 && c->x86_model <= 0x7f))
+ return CPPC_HIGHEST_PERF_PERFORMANCE;
+
+ return CPPC_HIGHEST_PERF_DEFAULT;
+}
+
static int pstate_init_perf(struct amd_cpudata *cpudata)
{
u64 cap1;
@@ -306,7 +358,7 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
* the default max perf.
*/
if (cpudata->hw_prefcore)
- highest_perf = AMD_PSTATE_PREFCORE_THRESHOLD;
+ highest_perf = amd_pstate_highest_perf_set(cpudata);
else
highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
@@ -330,7 +382,7 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
return ret;
if (cpudata->hw_prefcore)
- highest_perf = AMD_PSTATE_PREFCORE_THRESHOLD;
+ highest_perf = amd_pstate_highest_perf_set(cpudata);
else
highest_perf = cppc_perf.highest_perf;
@@ -604,78 +656,6 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
cpufreq_cpu_put(policy);
}
-static int amd_get_min_freq(struct amd_cpudata *cpudata)
-{
- struct cppc_perf_caps cppc_perf;
-
- int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
- if (ret)
- return ret;
-
- /* Switch to khz */
- return cppc_perf.lowest_freq * 1000;
-}
-
-static int amd_get_max_freq(struct amd_cpudata *cpudata)
-{
- struct cppc_perf_caps cppc_perf;
- u32 max_perf, max_freq, nominal_freq, nominal_perf;
- u64 boost_ratio;
-
- int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
- if (ret)
- return ret;
-
- nominal_freq = cppc_perf.nominal_freq;
- nominal_perf = READ_ONCE(cpudata->nominal_perf);
- max_perf = READ_ONCE(cpudata->highest_perf);
-
- boost_ratio = div_u64(max_perf << SCHED_CAPACITY_SHIFT,
- nominal_perf);
-
- max_freq = nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT;
-
- /* Switch to khz */
- return max_freq * 1000;
-}
-
-static int amd_get_nominal_freq(struct amd_cpudata *cpudata)
-{
- struct cppc_perf_caps cppc_perf;
-
- int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
- if (ret)
- return ret;
-
- /* Switch to khz */
- return cppc_perf.nominal_freq * 1000;
-}
-
-static int amd_get_lowest_nonlinear_freq(struct amd_cpudata *cpudata)
-{
- struct cppc_perf_caps cppc_perf;
- u32 lowest_nonlinear_freq, lowest_nonlinear_perf,
- nominal_freq, nominal_perf;
- u64 lowest_nonlinear_ratio;
-
- int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
- if (ret)
- return ret;
-
- nominal_freq = cppc_perf.nominal_freq;
- nominal_perf = READ_ONCE(cpudata->nominal_perf);
-
- lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
-
- lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
- nominal_perf);
-
- lowest_nonlinear_freq = nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT;
-
- /* Switch to khz */
- return lowest_nonlinear_freq * 1000;
-}
-
static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
{
struct amd_cpudata *cpudata = policy->driver_data;
@@ -828,9 +808,93 @@ free_cpufreq_put:
mutex_unlock(&amd_pstate_driver_lock);
}
+/*
+ * Get pstate transition delay time from ACPI tables that firmware set
+ * instead of using hardcode value directly.
+ */
+static u32 amd_pstate_get_transition_delay_us(unsigned int cpu)
+{
+ u32 transition_delay_ns;
+
+ transition_delay_ns = cppc_get_transition_latency(cpu);
+ if (transition_delay_ns == CPUFREQ_ETERNAL)
+ return AMD_PSTATE_TRANSITION_DELAY;
+
+ return transition_delay_ns / NSEC_PER_USEC;
+}
+
+/*
+ * Get pstate transition latency value from ACPI tables that firmware
+ * set instead of using hardcode value directly.
+ */
+static u32 amd_pstate_get_transition_latency(unsigned int cpu)
+{
+ u32 transition_latency;
+
+ transition_latency = cppc_get_transition_latency(cpu);
+ if (transition_latency == CPUFREQ_ETERNAL)
+ return AMD_PSTATE_TRANSITION_LATENCY;
+
+ return transition_latency;
+}
+
+/*
+ * amd_pstate_init_freq: Initialize the max_freq, min_freq,
+ * nominal_freq and lowest_nonlinear_freq for
+ * the @cpudata object.
+ *
+ * Requires: highest_perf, lowest_perf, nominal_perf and
+ * lowest_nonlinear_perf members of @cpudata to be
+ * initialized.
+ *
+ * Returns 0 on success, non-zero value on failure.
+ */
+static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
+{
+ int ret;
+ u32 min_freq;
+ u32 highest_perf, max_freq;
+ u32 nominal_perf, nominal_freq;
+ u32 lowest_nonlinear_perf, lowest_nonlinear_freq;
+ u32 boost_ratio, lowest_nonlinear_ratio;
+ struct cppc_perf_caps cppc_perf;
+
+ ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
+ if (ret)
+ return ret;
+
+ if (quirks && quirks->lowest_freq)
+ min_freq = quirks->lowest_freq * 1000;
+ else
+ min_freq = cppc_perf.lowest_freq * 1000;
+
+ if (quirks && quirks->nominal_freq)
+ nominal_freq = quirks->nominal_freq ;
+ else
+ nominal_freq = cppc_perf.nominal_freq;
+
+ nominal_perf = READ_ONCE(cpudata->nominal_perf);
+
+ highest_perf = READ_ONCE(cpudata->highest_perf);
+ boost_ratio = div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
+ max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
+
+ lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
+ lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
+ nominal_perf);
+ lowest_nonlinear_freq = (nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
+
+ WRITE_ONCE(cpudata->min_freq, min_freq);
+ WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq);
+ WRITE_ONCE(cpudata->nominal_freq, nominal_freq);
+ WRITE_ONCE(cpudata->max_freq, max_freq);
+
+ return 0;
+}
+
static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
{
- int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
+ int min_freq, max_freq, nominal_freq, ret;
struct device *dev;
struct amd_cpudata *cpudata;
@@ -855,20 +919,25 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
- min_freq = amd_get_min_freq(cpudata);
- max_freq = amd_get_max_freq(cpudata);
- nominal_freq = amd_get_nominal_freq(cpudata);
- lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
+ ret = amd_pstate_init_freq(cpudata);
+ if (ret)
+ goto free_cpudata1;
+
+ min_freq = READ_ONCE(cpudata->min_freq);
+ max_freq = READ_ONCE(cpudata->max_freq);
+ nominal_freq = READ_ONCE(cpudata->nominal_freq);
- if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
- dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
- min_freq, max_freq);
+ if (min_freq <= 0 || max_freq <= 0 ||
+ nominal_freq <= 0 || min_freq > max_freq) {
+ dev_err(dev,
+ "min_freq(%d) or max_freq(%d) or nominal_freq (%d) value is incorrect, check _CPC in ACPI tables\n",
+ min_freq, max_freq, nominal_freq);
ret = -EINVAL;
goto free_cpudata1;
}
- policy->cpuinfo.transition_latency = AMD_PSTATE_TRANSITION_LATENCY;
- policy->transition_delay_us = AMD_PSTATE_TRANSITION_DELAY;
+ policy->cpuinfo.transition_latency = amd_pstate_get_transition_latency(policy->cpu);
+ policy->transition_delay_us = amd_pstate_get_transition_delay_us(policy->cpu);
policy->min = min_freq;
policy->max = max_freq;
@@ -896,13 +965,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
goto free_cpudata2;
}
- /* Initial processor data capability frequencies */
- cpudata->max_freq = max_freq;
- cpudata->min_freq = min_freq;
cpudata->max_limit_freq = max_freq;
cpudata->min_limit_freq = min_freq;
- cpudata->nominal_freq = nominal_freq;
- cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
policy->driver_data = cpudata;
@@ -966,7 +1030,7 @@ static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
int max_freq;
struct amd_cpudata *cpudata = policy->driver_data;
- max_freq = amd_get_max_freq(cpudata);
+ max_freq = READ_ONCE(cpudata->max_freq);
if (max_freq < 0)
return max_freq;
@@ -979,7 +1043,7 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli
int freq;
struct amd_cpudata *cpudata = policy->driver_data;
- freq = amd_get_lowest_nonlinear_freq(cpudata);
+ freq = READ_ONCE(cpudata->lowest_nonlinear_freq);
if (freq < 0)
return freq;
@@ -1290,7 +1354,7 @@ static bool amd_pstate_acpi_pm_profile_undefined(void)
static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
{
- int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
+ int min_freq, max_freq, nominal_freq, ret;
struct amd_cpudata *cpudata;
struct device *dev;
u64 value;
@@ -1317,13 +1381,18 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
- min_freq = amd_get_min_freq(cpudata);
- max_freq = amd_get_max_freq(cpudata);
- nominal_freq = amd_get_nominal_freq(cpudata);
- lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
- if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
- dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
- min_freq, max_freq);
+ ret = amd_pstate_init_freq(cpudata);
+ if (ret)
+ goto free_cpudata1;
+
+ min_freq = READ_ONCE(cpudata->min_freq);
+ max_freq = READ_ONCE(cpudata->max_freq);
+ nominal_freq = READ_ONCE(cpudata->nominal_freq);
+ if (min_freq <= 0 || max_freq <= 0 ||
+ nominal_freq <= 0 || min_freq > max_freq) {
+ dev_err(dev,
+ "min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect, check _CPC in ACPI tables\n",
+ min_freq, max_freq, nominal_freq);
ret = -EINVAL;
goto free_cpudata1;
}
@@ -1333,12 +1402,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
/* It will be updated by governor */
policy->cur = policy->cpuinfo.min_freq;
- /* Initial processor data capability frequencies */
- cpudata->max_freq = max_freq;
- cpudata->min_freq = min_freq;
- cpudata->nominal_freq = nominal_freq;
- cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
-
policy->driver_data = cpudata;
cpudata->epp_cached = amd_pstate_get_epp(cpudata, 0);
@@ -1378,6 +1441,13 @@ free_cpudata1:
static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
{
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ if (cpudata) {
+ kfree(cpudata);
+ policy->driver_data = NULL;
+ }
+
pr_debug("CPU %d exiting\n", policy->cpu);
return 0;
}
@@ -1656,6 +1726,11 @@ static int __init amd_pstate_init(void)
if (cpufreq_get_current_driver())
return -EEXIST;
+ quirks = NULL;
+
+ /* check if this machine need CPPC quirks */
+ dmi_check_system(amd_pstate_quirks_table);
+
switch (cppc_state) {
case AMD_PSTATE_UNDEFINED:
/* Disable on the following configs by default:
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 1a1857b0a6f4..ea8438550b49 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -481,9 +481,12 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct private_data *priv;
+
if (!policy)
return 0;
- struct private_data *priv = policy->driver_data;
+
+ priv = policy->driver_data;
cpufreq_cpu_put(policy);
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 64420d9cfd1e..15f1d41920a3 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -741,10 +741,15 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
{
struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- struct cppc_cpudata *cpu_data = policy->driver_data;
+ struct cppc_cpudata *cpu_data;
u64 delivered_perf;
int ret;
+ if (!policy)
+ return -ENODEV;
+
+ cpu_data = policy->driver_data;
+
cpufreq_cpu_put(policy);
ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
@@ -822,10 +827,15 @@ static struct cpufreq_driver cppc_cpufreq_driver = {
static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- struct cppc_cpudata *cpu_data = policy->driver_data;
+ struct cppc_cpudata *cpu_data;
u64 desired_perf;
int ret;
+ if (!policy)
+ return -ENODEV;
+
+ cpu_data = policy->driver_data;
+
cpufreq_cpu_put(policy);
ret = cppc_get_desired_perf(cpu, &desired_perf);
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index b993a498084b..c74dd1e01e0d 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -104,6 +104,9 @@ static const struct of_device_id allowlist[] __initconst = {
*/
static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "allwinner,sun50i-h6", },
+ { .compatible = "allwinner,sun50i-h616", },
+ { .compatible = "allwinner,sun50i-h618", },
+ { .compatible = "allwinner,sun50i-h700", },
{ .compatible = "apple,arm-platform", },
@@ -195,19 +198,18 @@ static const struct of_device_id blocklist[] __initconst = {
static bool __init cpu0_node_has_opp_v2_prop(void)
{
- struct device_node *np = of_cpu_device_node_get(0);
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(0);
bool ret = false;
if (of_property_present(np, "operating-points-v2"))
ret = true;
- of_node_put(np);
return ret;
}
static int __init cpufreq_dt_platdev_init(void)
{
- struct device_node *np = of_find_node_by_path("/");
+ struct device_node *np __free(device_node) = of_find_node_by_path("/");
const struct of_device_id *match;
const void *data = NULL;
@@ -223,11 +225,9 @@ static int __init cpufreq_dt_platdev_init(void)
if (cpu0_node_has_opp_v2_prop() && !of_match_node(blocklist, np))
goto create_pdev;
- of_node_put(np);
return -ENODEV;
create_pdev:
- of_node_put(np);
return PTR_ERR_OR_ZERO(platform_device_register_data(NULL, "cpufreq-dt",
-1, data,
sizeof(struct cpufreq_dt_platform_data)));
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 2d83bbc65dd0..907e22632fda 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -68,12 +68,9 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
*/
static const char *find_supply_name(struct device *dev)
{
- struct device_node *np;
+ struct device_node *np __free(device_node) = of_node_get(dev->of_node);
struct property *pp;
int cpu = dev->id;
- const char *name = NULL;
-
- np = of_node_get(dev->of_node);
/* This must be valid for sure */
if (WARN_ON(!np))
@@ -82,22 +79,16 @@ static const char *find_supply_name(struct device *dev)
/* Try "cpu0" for older DTs */
if (!cpu) {
pp = of_find_property(np, "cpu0-supply", NULL);
- if (pp) {
- name = "cpu0";
- goto node_put;
- }
+ if (pp)
+ return "cpu0";
}
pp = of_find_property(np, "cpu-supply", NULL);
- if (pp) {
- name = "cpu";
- goto node_put;
- }
+ if (pp)
+ return "cpu";
dev_dbg(dev, "no regulator for cpu%d\n", cpu);
-node_put:
- of_node_put(np);
- return name;
+ return NULL;
}
static int cpufreq_init(struct cpufreq_policy *policy)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 66e10a19d76a..a45aac17c20f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1679,10 +1679,13 @@ static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
*/
if (cpufreq_driver->offline) {
cpufreq_driver->offline(policy);
- } else if (cpufreq_driver->exit) {
- cpufreq_driver->exit(policy);
- policy->freq_table = NULL;
+ return;
}
+
+ if (cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
+
+ policy->freq_table = NULL;
}
static int cpufreq_offline(unsigned int cpu)
@@ -1740,7 +1743,7 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
}
/* We did light-weight exit earlier, do full tear down now */
- if (cpufreq_driver->offline)
+ if (cpufreq_driver->offline && cpufreq_driver->exit)
cpufreq_driver->exit(policy);
up_write(&policy->rwsem);
@@ -2582,6 +2585,40 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
}
EXPORT_SYMBOL(cpufreq_get_policy);
+DEFINE_PER_CPU(unsigned long, cpufreq_pressure);
+
+/**
+ * cpufreq_update_pressure() - Update cpufreq pressure for CPUs
+ * @policy: cpufreq policy of the CPUs.
+ *
+ * Update the value of cpufreq pressure for all @cpus in the policy.
+ */
+static void cpufreq_update_pressure(struct cpufreq_policy *policy)
+{
+ unsigned long max_capacity, capped_freq, pressure;
+ u32 max_freq;
+ int cpu;
+
+ cpu = cpumask_first(policy->related_cpus);
+ max_freq = arch_scale_freq_ref(cpu);
+ capped_freq = policy->max;
+
+ /*
+ * Handle properly the boost frequencies, which should simply clean
+ * the cpufreq pressure value.
+ */
+ if (max_freq <= capped_freq) {
+ pressure = 0;
+ } else {
+ max_capacity = arch_scale_cpu_capacity(cpu);
+ pressure = max_capacity -
+ mult_frac(max_capacity, capped_freq, max_freq);
+ }
+
+ for_each_cpu(cpu, policy->related_cpus)
+ WRITE_ONCE(per_cpu(cpufreq_pressure, cpu), pressure);
+}
+
/**
* cpufreq_set_policy - Modify cpufreq policy parameters.
* @policy: Policy object to modify.
@@ -2637,6 +2674,8 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
trace_cpu_frequency_limits(policy);
+ cpufreq_update_pressure(policy);
+
policy->cached_target_freq = UINT_MAX;
pr_debug("new min and max freqs are %u - %u kHz\n",
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index c17dc51a5a02..10e80d912b8d 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -70,7 +70,7 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
struct cpufreq_frequency_table *table)
{
struct cpufreq_frequency_table *pos;
- unsigned int freq, next_larger = ~0;
+ unsigned int freq, prev_smaller = 0;
bool found = false;
pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
@@ -86,12 +86,12 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
break;
}
- if ((next_larger > freq) && (freq > policy->max))
- next_larger = freq;
+ if ((prev_smaller < freq) && (freq <= policy->max))
+ prev_smaller = freq;
}
if (!found) {
- policy->max = next_larger;
+ policy->max = prev_smaller;
cpufreq_verify_within_cpu_limits(policy);
}
@@ -194,7 +194,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
}
if (optimal.driver_data > i) {
if (suboptimal.driver_data > i) {
- WARN(1, "Invalid frequency table: %d\n", policy->cpu);
+ WARN(1, "Invalid frequency table: %u\n", policy->cpu);
return 0;
}
@@ -254,7 +254,7 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
if (show_boost ^ (pos->flags & CPUFREQ_BOOST_FREQ))
continue;
- count += sprintf(&buf[count], "%d ", pos->frequency);
+ count += sprintf(&buf[count], "%u ", pos->frequency);
}
count += sprintf(&buf[count], "\n");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index dbbf299f4219..4b986c044741 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -173,7 +173,6 @@ struct vid_data {
* based on the MSR_IA32_MISC_ENABLE value and whether or
* not the maximum reported turbo P-state is different from
* the maximum reported non-turbo one.
- * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq.
* @min_perf_pct: Minimum capacity limit in percent of the maximum turbo
* P-state capacity.
* @max_perf_pct: Maximum capacity limit in percent of the maximum turbo
@@ -182,7 +181,6 @@ struct vid_data {
struct global_params {
bool no_turbo;
bool turbo_disabled;
- bool turbo_disabled_mf;
int max_perf_pct;
int min_perf_pct;
};
@@ -213,7 +211,7 @@ struct global_params {
* @epp_policy: Last saved policy used to set EPP/EPB
* @epp_default: Power on default HWP energy performance
* preference/bias
- * @epp_cached Cached HWP energy-performance preference value
+ * @epp_cached: Cached HWP energy-performance preference value
* @hwp_req_cached: Cached value of the last HWP Request MSR
* @hwp_cap_cached: Cached value of the last HWP Capabilities MSR
* @last_io_update: Last time when IO wake flag was set
@@ -292,11 +290,11 @@ struct pstate_funcs {
static struct pstate_funcs pstate_funcs __read_mostly;
-static int hwp_active __read_mostly;
-static int hwp_mode_bdw __read_mostly;
-static bool per_cpu_limits __read_mostly;
+static bool hwp_active __ro_after_init;
+static int hwp_mode_bdw __ro_after_init;
+static bool per_cpu_limits __ro_after_init;
+static bool hwp_forced __ro_after_init;
static bool hwp_boost __read_mostly;
-static bool hwp_forced __read_mostly;
static struct cpufreq_driver *intel_pstate_driver __read_mostly;
@@ -594,12 +592,13 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq);
}
-static inline void update_turbo_state(void)
+static bool turbo_is_disabled(void)
{
u64 misc_en;
rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
- global.turbo_disabled = misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
+
+ return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
}
static int min_perf_pct_min(void)
@@ -1154,12 +1153,15 @@ static void intel_pstate_update_policies(void)
static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
struct cpufreq_policy *policy)
{
- policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
+ intel_pstate_get_hwp_cap(cpudata);
+
+ policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ?
cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
+
refresh_frequency_limits(policy);
}
-static void intel_pstate_update_max_freq(unsigned int cpu)
+static void intel_pstate_update_limits(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
@@ -1171,25 +1173,12 @@ static void intel_pstate_update_max_freq(unsigned int cpu)
cpufreq_cpu_release(policy);
}
-static void intel_pstate_update_limits(unsigned int cpu)
+static void intel_pstate_update_limits_for_all(void)
{
- mutex_lock(&intel_pstate_driver_lock);
-
- update_turbo_state();
- /*
- * If turbo has been turned on or off globally, policy limits for
- * all CPUs need to be updated to reflect that.
- */
- if (global.turbo_disabled_mf != global.turbo_disabled) {
- global.turbo_disabled_mf = global.turbo_disabled;
- arch_set_max_freq_ratio(global.turbo_disabled);
- for_each_possible_cpu(cpu)
- intel_pstate_update_max_freq(cpu);
- } else {
- cpufreq_update_policy(cpu);
- }
+ int cpu;
- mutex_unlock(&intel_pstate_driver_lock);
+ for_each_possible_cpu(cpu)
+ intel_pstate_update_limits(cpu);
}
/************************** sysfs begin ************************/
@@ -1287,11 +1276,7 @@ static ssize_t show_no_turbo(struct kobject *kobj,
return -EAGAIN;
}
- update_turbo_state();
- if (global.turbo_disabled)
- ret = sprintf(buf, "%u\n", global.turbo_disabled);
- else
- ret = sprintf(buf, "%u\n", global.no_turbo);
+ ret = sprintf(buf, "%u\n", global.no_turbo);
mutex_unlock(&intel_pstate_driver_lock);
@@ -1302,32 +1287,34 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
- int ret;
+ bool no_turbo;
- ret = sscanf(buf, "%u", &input);
- if (ret != 1)
+ if (sscanf(buf, "%u", &input) != 1)
return -EINVAL;
mutex_lock(&intel_pstate_driver_lock);
if (!intel_pstate_driver) {
- mutex_unlock(&intel_pstate_driver_lock);
- return -EAGAIN;
+ count = -EAGAIN;
+ goto unlock_driver;
}
- mutex_lock(&intel_pstate_limits_lock);
+ no_turbo = !!clamp_t(int, input, 0, 1);
+
+ if (no_turbo == global.no_turbo)
+ goto unlock_driver;
- update_turbo_state();
if (global.turbo_disabled) {
pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
- mutex_unlock(&intel_pstate_limits_lock);
- mutex_unlock(&intel_pstate_driver_lock);
- return -EPERM;
+ count = -EPERM;
+ goto unlock_driver;
}
- global.no_turbo = clamp_t(int, input, 0, 1);
+ WRITE_ONCE(global.no_turbo, no_turbo);
- if (global.no_turbo) {
+ mutex_lock(&intel_pstate_limits_lock);
+
+ if (no_turbo) {
struct cpudata *cpu = all_cpu_data[0];
int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate;
@@ -1338,9 +1325,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
mutex_unlock(&intel_pstate_limits_lock);
- intel_pstate_update_policies();
- arch_set_max_freq_ratio(global.no_turbo);
+ intel_pstate_update_limits_for_all();
+ arch_set_max_freq_ratio(no_turbo);
+unlock_driver:
mutex_unlock(&intel_pstate_driver_lock);
return count;
@@ -1621,7 +1609,6 @@ static void intel_pstate_notify_work(struct work_struct *work)
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu);
if (policy) {
- intel_pstate_get_hwp_cap(cpudata);
__intel_pstate_update_max_freq(cpudata, policy);
cpufreq_cpu_release(policy);
@@ -1636,11 +1623,10 @@ static cpumask_t hwp_intr_enable_mask;
void notify_hwp_interrupt(void)
{
unsigned int this_cpu = smp_processor_id();
- struct cpudata *cpudata;
unsigned long flags;
u64 value;
- if (!READ_ONCE(hwp_active) || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
+ if (!hwp_active || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
return;
rdmsrl_safe(MSR_HWP_STATUS, &value);
@@ -1652,24 +1638,8 @@ void notify_hwp_interrupt(void)
if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))
goto ack_intr;
- /*
- * Currently we never free all_cpu_data. And we can't reach here
- * without this allocated. But for safety for future changes, added
- * check.
- */
- if (unlikely(!READ_ONCE(all_cpu_data)))
- goto ack_intr;
-
- /*
- * The free is done during cleanup, when cpufreq registry is failed.
- * We wouldn't be here if it fails on init or switch status. But for
- * future changes, added check.
- */
- cpudata = READ_ONCE(all_cpu_data[this_cpu]);
- if (unlikely(!cpudata))
- goto ack_intr;
-
- schedule_delayed_work(&cpudata->hwp_notify_work, msecs_to_jiffies(10));
+ schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work,
+ msecs_to_jiffies(10));
spin_unlock_irqrestore(&hwp_notify_lock, flags);
@@ -1682,7 +1652,7 @@ ack_intr:
static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
{
- unsigned long flags;
+ bool cancel_work;
if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
return;
@@ -1690,22 +1660,22 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
- spin_lock_irqsave(&hwp_notify_lock, flags);
- if (cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask))
- cancel_delayed_work(&cpudata->hwp_notify_work);
- spin_unlock_irqrestore(&hwp_notify_lock, flags);
+ spin_lock_irq(&hwp_notify_lock);
+ cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask);
+ spin_unlock_irq(&hwp_notify_lock);
+
+ if (cancel_work)
+ cancel_delayed_work_sync(&cpudata->hwp_notify_work);
}
static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
{
/* Enable HWP notification interrupt for guaranteed performance change */
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
- unsigned long flags;
-
- spin_lock_irqsave(&hwp_notify_lock, flags);
+ spin_lock_irq(&hwp_notify_lock);
INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
- spin_unlock_irqrestore(&hwp_notify_lock, flags);
+ spin_unlock_irq(&hwp_notify_lock);
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01);
@@ -1791,7 +1761,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate)
u32 vid;
val = (u64)pstate << 8;
- if (global.no_turbo && !global.turbo_disabled)
+ if (READ_ONCE(global.no_turbo) && !global.turbo_disabled)
val |= (u64)1 << 32;
vid_fp = cpudata->vid.min + mul_fp(
@@ -1956,7 +1926,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
u64 val;
val = (u64)pstate << 8;
- if (global.no_turbo && !global.turbo_disabled)
+ if (READ_ONCE(global.no_turbo) && !global.turbo_disabled)
val |= (u64)1 << 32;
return val;
@@ -2029,14 +1999,6 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
}
-static void intel_pstate_max_within_limits(struct cpudata *cpu)
-{
- int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
-
- update_turbo_state();
- intel_pstate_set_pstate(cpu, pstate);
-}
-
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu);
@@ -2262,7 +2224,7 @@ static inline int32_t get_target_pstate(struct cpudata *cpu)
sample->busy_scaled = busy_frac * 100;
- target = global.no_turbo || global.turbo_disabled ?
+ target = READ_ONCE(global.no_turbo) ?
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
target += target >> 2;
target = mul_fp(target, busy_frac);
@@ -2306,8 +2268,6 @@ static void intel_pstate_adjust_pstate(struct cpudata *cpu)
struct sample *sample;
int target_pstate;
- update_turbo_state();
-
target_pstate = get_target_pstate(cpu);
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
@@ -2437,6 +2397,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
+#ifdef CONFIG_ACPI
static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
X86_MATCH(BROADWELL_D, core_funcs),
X86_MATCH(BROADWELL_X, core_funcs),
@@ -2445,6 +2406,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
X86_MATCH(SAPPHIRERAPIDS_X, core_funcs),
{}
};
+#endif
static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
X86_MATCH(KABYLAKE, core_funcs),
@@ -2526,7 +2488,7 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu)
static int intel_pstate_get_max_freq(struct cpudata *cpu)
{
- return global.turbo_disabled || global.no_turbo ?
+ return READ_ONCE(global.no_turbo) ?
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
}
@@ -2611,12 +2573,14 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
+
/*
* NOHZ_FULL CPUs need this as the governor callback may not
* be invoked on them.
*/
intel_pstate_clear_update_util_hook(policy->cpu);
- intel_pstate_max_within_limits(cpu);
+ intel_pstate_set_pstate(cpu, pstate);
} else {
intel_pstate_set_update_util_hook(policy->cpu);
}
@@ -2659,10 +2623,9 @@ static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
{
int max_freq;
- update_turbo_state();
if (hwp_active) {
intel_pstate_get_hwp_cap(cpu);
- max_freq = global.no_turbo || global.turbo_disabled ?
+ max_freq = READ_ONCE(global.no_turbo) ?
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
} else {
max_freq = intel_pstate_get_max_freq(cpu);
@@ -2756,9 +2719,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.min_freq = cpu->pstate.min_freq;
- update_turbo_state();
- global.turbo_disabled_mf = global.turbo_disabled;
- policy->cpuinfo.max_freq = global.turbo_disabled ?
+ policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ?
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
policy->min = policy->cpuinfo.min_freq;
@@ -2923,8 +2884,6 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
struct cpufreq_freqs freqs;
int target_pstate;
- update_turbo_state();
-
freqs.old = policy->cur;
freqs.new = target_freq;
@@ -2946,8 +2905,6 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
struct cpudata *cpu = all_cpu_data[policy->cpu];
int target_pstate;
- update_turbo_state();
-
target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq);
target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
@@ -2965,9 +2922,9 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,
int old_pstate = cpu->pstate.current_pstate;
int cap_pstate, min_pstate, max_pstate, target_pstate;
- update_turbo_state();
- cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) :
- HWP_HIGHEST_PERF(hwp_cap);
+ cap_pstate = READ_ONCE(global.no_turbo) ?
+ HWP_GUARANTEED_PERF(hwp_cap) :
+ HWP_HIGHEST_PERF(hwp_cap);
/* Optimization: Avoid unnecessary divisions. */
@@ -3135,10 +3092,8 @@ static void intel_pstate_driver_cleanup(void)
if (intel_pstate_driver == &intel_pstate)
intel_pstate_clear_update_util_hook(cpu);
- spin_lock(&hwp_notify_lock);
kfree(all_cpu_data[cpu]);
WRITE_ONCE(all_cpu_data[cpu], NULL);
- spin_unlock(&hwp_notify_lock);
}
}
cpus_read_unlock();
@@ -3155,6 +3110,10 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
memset(&global, 0, sizeof(global));
global.max_perf_pct = 100;
+ global.turbo_disabled = turbo_is_disabled();
+ global.no_turbo = global.turbo_disabled;
+
+ arch_set_max_freq_ratio(global.turbo_disabled);
intel_pstate_driver = driver;
ret = cpufreq_register_driver(intel_pstate_driver);
@@ -3466,7 +3425,7 @@ static int __init intel_pstate_init(void)
* deal with it.
*/
if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
- WRITE_ONCE(hwp_active, 1);
+ hwp_active = true;
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
intel_cpufreq.attr = hwp_cpufreq_attrs;
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index a0a61919bc4c..518606adf14e 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -707,6 +707,15 @@ static const struct mtk_cpufreq_platform_data mt7623_platform_data = {
.ccifreq_supported = false,
};
+static const struct mtk_cpufreq_platform_data mt7988_platform_data = {
+ .min_volt_shift = 100000,
+ .max_volt_shift = 200000,
+ .proc_max_volt = 900000,
+ .sram_min_volt = 0,
+ .sram_max_volt = 1150000,
+ .ccifreq_supported = true,
+};
+
static const struct mtk_cpufreq_platform_data mt8183_platform_data = {
.min_volt_shift = 100000,
.max_volt_shift = 200000,
@@ -740,6 +749,7 @@ static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
{ .compatible = "mediatek,mt2712", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt7622", .data = &mt7622_platform_data },
{ .compatible = "mediatek,mt7623", .data = &mt7623_platform_data },
+ { .compatible = "mediatek,mt7988a", .data = &mt7988_platform_data },
{ .compatible = "mediatek,mt8167", .data = &mt8516_platform_data },
{ .compatible = "mediatek,mt817x", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt8173", .data = &mt2701_platform_data },
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index df3567c1e93b..6c9f0888a2a7 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -120,9 +120,9 @@ static int cpu_750fx_cpu_speed(int low_speed)
/* tweak L2 for high voltage */
if (has_cpu_l2lve) {
- hid2 = mfspr(SPRN_HID2);
+ hid2 = mfspr(SPRN_HID2_750FX);
hid2 &= ~0x2000;
- mtspr(SPRN_HID2, hid2);
+ mtspr(SPRN_HID2_750FX, hid2);
}
}
#ifdef CONFIG_PPC_BOOK3S_32
@@ -131,9 +131,9 @@ static int cpu_750fx_cpu_speed(int low_speed)
if (low_speed == 1) {
/* tweak L2 for low voltage */
if (has_cpu_l2lve) {
- hid2 = mfspr(SPRN_HID2);
+ hid2 = mfspr(SPRN_HID2_750FX);
hid2 |= 0x2000;
- mtspr(SPRN_HID2, hid2);
+ mtspr(SPRN_HID2_750FX, hid2);
}
/* ramping down, set voltage last */
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 70b0f21968a0..ec8df5496a0c 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -347,8 +347,8 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
throttled_freq = freq_hz / HZ_PER_KHZ;
- /* Update thermal pressure (the boost frequencies are accepted) */
- arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
+ /* Update HW pressure (the boost frequencies are accepted) */
+ arch_update_hw_pressure(policy->related_cpus, throttled_freq);
/*
* In the unlikely case policy is unregistered do not enable
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 32a9c88f8ff6..0b882765cd66 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/arm-smccc.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
@@ -18,26 +19,155 @@
#include <linux/pm_opp.h>
#include <linux/slab.h>
-#define MAX_NAME_LEN 7
-
#define NVMEM_MASK 0x7
#define NVMEM_SHIFT 5
static struct platform_device *cpufreq_dt_pdev, *sun50i_cpufreq_pdev;
+struct sunxi_cpufreq_data {
+ u32 (*efuse_xlate)(u32 speedbin);
+};
+
+static u32 sun50i_h6_efuse_xlate(u32 speedbin)
+{
+ u32 efuse_value;
+
+ efuse_value = (speedbin >> NVMEM_SHIFT) & NVMEM_MASK;
+
+ /*
+ * We treat unexpected efuse values as if the SoC was from
+ * the slowest bin. Expected efuse values are 1-3, slowest
+ * to fastest.
+ */
+ if (efuse_value >= 1 && efuse_value <= 3)
+ return efuse_value - 1;
+ else
+ return 0;
+}
+
+static int get_soc_id_revision(void)
+{
+#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
+ return arm_smccc_get_soc_id_revision();
+#else
+ return SMCCC_RET_NOT_SUPPORTED;
+#endif
+}
+
+/*
+ * Judging by the OPP tables in the vendor BSP, the quality order of the
+ * returned speedbin index is 4 -> 0/2 -> 3 -> 1, from worst to best.
+ * 0 and 2 seem identical from the OPP tables' point of view.
+ */
+static u32 sun50i_h616_efuse_xlate(u32 speedbin)
+{
+ int ver_bits = get_soc_id_revision();
+ u32 value = 0;
+
+ switch (speedbin & 0xffff) {
+ case 0x2000:
+ value = 0;
+ break;
+ case 0x2400:
+ case 0x7400:
+ case 0x2c00:
+ case 0x7c00:
+ if (ver_bits != SMCCC_RET_NOT_SUPPORTED && ver_bits <= 1) {
+ /* ic version A/B */
+ value = 1;
+ } else {
+ /* ic version C and later version */
+ value = 2;
+ }
+ break;
+ case 0x5000:
+ case 0x5400:
+ case 0x6000:
+ value = 3;
+ break;
+ case 0x5c00:
+ value = 4;
+ break;
+ case 0x5d00:
+ value = 0;
+ break;
+ default:
+ pr_warn("sun50i-cpufreq-nvmem: unknown speed bin 0x%x, using default bin 0\n",
+ speedbin & 0xffff);
+ value = 0;
+ break;
+ }
+
+ return value;
+}
+
+static struct sunxi_cpufreq_data sun50i_h6_cpufreq_data = {
+ .efuse_xlate = sun50i_h6_efuse_xlate,
+};
+
+static struct sunxi_cpufreq_data sun50i_h616_cpufreq_data = {
+ .efuse_xlate = sun50i_h616_efuse_xlate,
+};
+
+static const struct of_device_id cpu_opp_match_list[] = {
+ { .compatible = "allwinner,sun50i-h6-operating-points",
+ .data = &sun50i_h6_cpufreq_data,
+ },
+ { .compatible = "allwinner,sun50i-h616-operating-points",
+ .data = &sun50i_h616_cpufreq_data,
+ },
+ {}
+};
+
+/**
+ * dt_has_supported_hw() - Check if any OPPs use opp-supported-hw
+ *
+ * If we ask the cpufreq framework to use the opp-supported-hw feature, it
+ * will ignore every OPP node without that DT property. If none of the OPPs
+ * have it, the driver will fail probing, due to the lack of OPPs.
+ *
+ * Returns true if we have at least one OPP with the opp-supported-hw property.
+ */
+static bool dt_has_supported_hw(void)
+{
+ bool has_opp_supported_hw = false;
+ struct device_node *np, *opp;
+ struct device *cpu_dev;
+
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev)
+ return false;
+
+ np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
+ if (!np)
+ return false;
+
+ for_each_child_of_node(np, opp) {
+ if (of_find_property(opp, "opp-supported-hw", NULL)) {
+ has_opp_supported_hw = true;
+ break;
+ }
+ }
+
+ of_node_put(np);
+
+ return has_opp_supported_hw;
+}
+
/**
* sun50i_cpufreq_get_efuse() - Determine speed grade from efuse value
- * @versions: Set to the value parsed from efuse
*
- * Returns 0 if success.
+ * Returns non-negative speed bin index on success, a negative error
+ * value otherwise.
*/
-static int sun50i_cpufreq_get_efuse(u32 *versions)
+static int sun50i_cpufreq_get_efuse(void)
{
+ const struct sunxi_cpufreq_data *opp_data;
struct nvmem_cell *speedbin_nvmem;
+ const struct of_device_id *match;
struct device_node *np;
struct device *cpu_dev;
- u32 *speedbin, efuse_value;
- size_t len;
+ u32 *speedbin;
int ret;
cpu_dev = get_cpu_device(0);
@@ -48,12 +178,12 @@ static int sun50i_cpufreq_get_efuse(u32 *versions)
if (!np)
return -ENOENT;
- ret = of_device_is_compatible(np,
- "allwinner,sun50i-h6-operating-points");
- if (!ret) {
+ match = of_match_node(cpu_opp_match_list, np);
+ if (!match) {
of_node_put(np);
return -ENOENT;
}
+ opp_data = match->data;
speedbin_nvmem = of_nvmem_cell_get(np, NULL);
of_node_put(np);
@@ -61,33 +191,25 @@ static int sun50i_cpufreq_get_efuse(u32 *versions)
return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
"Could not get nvmem cell\n");
- speedbin = nvmem_cell_read(speedbin_nvmem, &len);
+ speedbin = nvmem_cell_read(speedbin_nvmem, NULL);
nvmem_cell_put(speedbin_nvmem);
if (IS_ERR(speedbin))
return PTR_ERR(speedbin);
- efuse_value = (*speedbin >> NVMEM_SHIFT) & NVMEM_MASK;
-
- /*
- * We treat unexpected efuse values as if the SoC was from
- * the slowest bin. Expected efuse values are 1-3, slowest
- * to fastest.
- */
- if (efuse_value >= 1 && efuse_value <= 3)
- *versions = efuse_value - 1;
- else
- *versions = 0;
+ ret = opp_data->efuse_xlate(*speedbin);
kfree(speedbin);
- return 0;
+
+ return ret;
};
static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
{
int *opp_tokens;
- char name[MAX_NAME_LEN];
- unsigned int cpu;
- u32 speed = 0;
+ char name[] = "speedXXXXXXXXXXX"; /* Integers can take 11 chars max */
+ unsigned int cpu, supported_hw;
+ struct dev_pm_opp_config config = {};
+ int speed;
int ret;
opp_tokens = kcalloc(num_possible_cpus(), sizeof(*opp_tokens),
@@ -95,13 +217,24 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
if (!opp_tokens)
return -ENOMEM;
- ret = sun50i_cpufreq_get_efuse(&speed);
- if (ret) {
+ speed = sun50i_cpufreq_get_efuse();
+ if (speed < 0) {
kfree(opp_tokens);
- return ret;
+ return speed;
}
- snprintf(name, MAX_NAME_LEN, "speed%d", speed);
+ /*
+ * We need at least one OPP with the "opp-supported-hw" property,
+ * or else the upper layers will ignore every OPP and will bail out.
+ */
+ if (dt_has_supported_hw()) {
+ supported_hw = 1U << speed;
+ config.supported_hw = &supported_hw;
+ config.supported_hw_count = 1;
+ }
+
+ snprintf(name, sizeof(name), "speed%d", speed);
+ config.prop_name = name;
for_each_possible_cpu(cpu) {
struct device *cpu_dev = get_cpu_device(cpu);
@@ -111,12 +244,11 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
goto free_opp;
}
- opp_tokens[cpu] = dev_pm_opp_set_prop_name(cpu_dev, name);
- if (opp_tokens[cpu] < 0) {
- ret = opp_tokens[cpu];
- pr_err("Failed to set prop name\n");
+ ret = dev_pm_opp_set_config(cpu_dev, &config);
+ if (ret < 0)
goto free_opp;
- }
+
+ opp_tokens[cpu] = ret;
}
cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
@@ -131,7 +263,7 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
free_opp:
for_each_possible_cpu(cpu)
- dev_pm_opp_put_prop_name(opp_tokens[cpu]);
+ dev_pm_opp_clear_config(opp_tokens[cpu]);
kfree(opp_tokens);
return ret;
@@ -145,7 +277,7 @@ static void sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
for_each_possible_cpu(cpu)
- dev_pm_opp_put_prop_name(opp_tokens[cpu]);
+ dev_pm_opp_clear_config(opp_tokens[cpu]);
kfree(opp_tokens);
}
@@ -160,6 +292,9 @@ static struct platform_driver sun50i_cpufreq_driver = {
static const struct of_device_id sun50i_cpufreq_match_list[] = {
{ .compatible = "allwinner,sun50i-h6" },
+ { .compatible = "allwinner,sun50i-h616" },
+ { .compatible = "allwinner,sun50i-h618" },
+ { .compatible = "allwinner,sun50i-h700" },
{}
};
MODULE_DEVICE_TABLE(of, sun50i_cpufreq_match_list);
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
index aae951d4e77c..514146d98bca 100644
--- a/drivers/cpufreq/tegra124-cpufreq.c
+++ b/drivers/cpufreq/tegra124-cpufreq.c
@@ -52,12 +52,15 @@ out:
static int tegra124_cpufreq_probe(struct platform_device *pdev)
{
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(0);
struct tegra124_cpufreq_priv *priv;
- struct device_node *np;
struct device *cpu_dev;
struct platform_device_info cpufreq_dt_devinfo = {};
int ret;
+ if (!np)
+ return -ENODEV;
+
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -66,15 +69,9 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
if (!cpu_dev)
return -ENODEV;
- np = of_cpu_device_node_get(0);
- if (!np)
- return -ENODEV;
-
priv->cpu_clk = of_clk_get_by_name(np, "cpu_g");
- if (IS_ERR(priv->cpu_clk)) {
- ret = PTR_ERR(priv->cpu_clk);
- goto out_put_np;
- }
+ if (IS_ERR(priv->cpu_clk))
+ return PTR_ERR(priv->cpu_clk);
priv->dfll_clk = of_clk_get_by_name(np, "dfll");
if (IS_ERR(priv->dfll_clk)) {
@@ -110,8 +107,6 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- of_node_put(np);
-
return 0;
out_put_pllp_clk:
@@ -122,8 +117,6 @@ out_put_dfll_clk:
clk_put(priv->dfll_clk);
out_put_cpu_clk:
clk_put(priv->cpu_clk);
-out_put_np:
- of_node_put(np);
return ret;
}
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 46c41e2ca727..714ed53753fa 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -347,12 +347,10 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
static const struct of_device_id *ti_cpufreq_match_node(void)
{
- struct device_node *np;
+ struct device_node *np __free(device_node) = of_find_node_by_path("/");
const struct of_device_id *match;
- np = of_find_node_by_path("/");
match = of_match_node(ti_cpufreq_of_match, np);
- of_node_put(np);
return match;
}
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 9acde71558d5..bb8761c8a42e 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -439,13 +439,8 @@ static int cpuidle_coupled_clear_pokes(int cpu)
static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled)
{
- cpumask_t cpus;
- int ret;
-
- cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
- ret = cpumask_and(&cpus, &cpuidle_coupled_poke_pending, &cpus);
-
- return ret;
+ return cpumask_first_and_and(cpu_online_mask, &coupled->coupled_cpus,
+ &cpuidle_coupled_poke_pending) < nr_cpu_ids;
}
/**
@@ -626,9 +621,7 @@ out:
static void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled)
{
- cpumask_t cpus;
- cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
- coupled->online_count = cpumask_weight(&cpus);
+ coupled->online_count = cpumask_weight_and(cpu_online_mask, &coupled->coupled_cpus);
}
/**
diff --git a/drivers/cpuidle/cpuidle-kirkwood.c b/drivers/cpuidle/cpuidle-kirkwood.c
index 13bf743f885b..602c4dfdd7e2 100644
--- a/drivers/cpuidle/cpuidle-kirkwood.c
+++ b/drivers/cpuidle/cpuidle-kirkwood.c
@@ -59,15 +59,14 @@ static int kirkwood_cpuidle_probe(struct platform_device *pdev)
return cpuidle_register(&kirkwood_idle_driver, NULL);
}
-static int kirkwood_cpuidle_remove(struct platform_device *pdev)
+static void kirkwood_cpuidle_remove(struct platform_device *pdev)
{
cpuidle_unregister(&kirkwood_idle_driver);
- return 0;
}
static struct platform_driver kirkwood_cpuidle_driver = {
.probe = kirkwood_cpuidle_probe,
- .remove = kirkwood_cpuidle_remove,
+ .remove_new = kirkwood_cpuidle_remove,
.driver = {
.name = "kirkwood_cpuidle",
},
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
index b88af1262f1a..fae958794339 100644
--- a/drivers/cpuidle/cpuidle-psci-domain.c
+++ b/drivers/cpuidle/cpuidle-psci-domain.c
@@ -20,6 +20,7 @@
#include <linux/string.h>
#include "cpuidle-psci.h"
+#include "dt_idle_genpd.h"
struct psci_pd_provider {
struct list_head link;
@@ -200,4 +201,4 @@ static int __init psci_idle_init_domains(void)
{
return platform_driver_register(&psci_cpuidle_domain_driver);
}
-subsys_initcall(psci_idle_init_domains);
+core_initcall(psci_idle_init_domains);
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index bf68920d038a..782030a27703 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -28,6 +28,7 @@
#include "cpuidle-psci.h"
#include "dt_idle_states.h"
+#include "dt_idle_genpd.h"
struct psci_cpuidle_data {
u32 *psci_states;
@@ -224,7 +225,7 @@ static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
if (IS_ENABLED(CONFIG_PREEMPT_RT))
return 0;
- data->dev = psci_dt_attach_cpu(cpu);
+ data->dev = dt_idle_attach_cpu(cpu, "psci");
if (IS_ERR_OR_NULL(data->dev))
return PTR_ERR_OR_ZERO(data->dev);
@@ -311,7 +312,7 @@ static void psci_cpu_deinit_idle(int cpu)
{
struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu);
- psci_dt_detach_cpu(data->dev);
+ dt_idle_detach_cpu(data->dev);
psci_cpuidle_use_cpuhp = false;
}
diff --git a/drivers/cpuidle/cpuidle-psci.h b/drivers/cpuidle/cpuidle-psci.h
index 4e132640ed64..ef004ec7a7c5 100644
--- a/drivers/cpuidle/cpuidle-psci.h
+++ b/drivers/cpuidle/cpuidle-psci.h
@@ -3,29 +3,9 @@
#ifndef __CPUIDLE_PSCI_H
#define __CPUIDLE_PSCI_H
-struct device;
struct device_node;
void psci_set_domain_state(u32 state);
int psci_dt_parse_state_node(struct device_node *np, u32 *state);
-#ifdef CONFIG_ARM_PSCI_CPUIDLE_DOMAIN
-
-#include "dt_idle_genpd.h"
-
-static inline struct device *psci_dt_attach_cpu(int cpu)
-{
- return dt_idle_attach_cpu(cpu, "psci");
-}
-
-static inline void psci_dt_detach_cpu(struct device *dev)
-{
- dt_idle_detach_cpu(dev);
-}
-
-#else
-static inline struct device *psci_dt_attach_cpu(int cpu) { return NULL; }
-static inline void psci_dt_detach_cpu(struct device *dev) { }
-#endif
-
#endif /* __CPUIDLE_PSCI_H */
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 8e9058c4ea63..6617eb494a11 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -44,6 +44,7 @@ static DEFINE_PER_CPU(struct ladder_device, ladder_devices);
/**
* ladder_do_selection - prepares private data for a state change
+ * @dev: the CPU
* @ldev: the ladder device
* @old_idx: the current state index
* @new_idx: the new target state index
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 3d02702456a5..94f23c6fc93b 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -67,6 +67,7 @@ config CRYPTO_DEV_GEODE
config ZCRYPT
tristate "Support for s390 cryptographic adapters"
depends on S390
+ depends on AP
select HW_RANDOM
help
Select this option if you want to enable support for
@@ -74,23 +75,6 @@ config ZCRYPT
to 8 in Coprocessor (CEXxC), EP11 Coprocessor (CEXxP)
or Accelerator (CEXxA) mode.
-config ZCRYPT_DEBUG
- bool "Enable debug features for s390 cryptographic adapters"
- default n
- depends on DEBUG_KERNEL
- depends on ZCRYPT
- help
- Say 'Y' here to enable some additional debug features on the
- s390 cryptographic adapters driver.
-
- There will be some more sysfs attributes displayed for ap cards
- and queues and some flags on crypto requests are interpreted as
- debugging messages to force error injection.
-
- Do not enable on production level kernel build.
-
- If unsure, say N.
-
config PKEY
tristate "Kernel API for protected key handling"
depends on S390
@@ -660,6 +644,14 @@ config CRYPTO_DEV_ROCKCHIP_DEBUG
This will create /sys/kernel/debug/rk3288_crypto/stats for displaying
the number of requests per algorithm and other internal stats.
+config CRYPTO_DEV_TEGRA
+ tristate "Enable Tegra Security Engine"
+ depends on TEGRA_HOST1X
+ select CRYPTO_ENGINE
+
+ help
+ Select this to enable Tegra Security Engine which accelerates various
+ AES encryption/decryption and HASH algorithms.
config CRYPTO_DEV_ZYNQMP_AES
tristate "Support for Xilinx ZynqMP AES hw accelerator"
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 95331bc6456b..ad4ccef67d12 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
obj-$(CONFIG_CRYPTO_DEV_SL3516) += gemini/
obj-y += stm32/
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
+obj-$(CONFIG_CRYPTO_DEV_TEGRA) += tegra/
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
#obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c
index 83a9093eff25..a895e4289efa 100644
--- a/drivers/crypto/atmel-i2c.c
+++ b/drivers/crypto/atmel-i2c.c
@@ -51,7 +51,7 @@ static void atmel_i2c_checksum(struct atmel_i2c_cmd *cmd)
*__crc16 = cpu_to_le16(bitrev16(crc16(0, data, len)));
}
-void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd)
+void atmel_i2c_init_read_config_cmd(struct atmel_i2c_cmd *cmd)
{
cmd->word_addr = COMMAND;
cmd->opcode = OPCODE_READ;
@@ -68,7 +68,31 @@ void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd)
cmd->msecs = MAX_EXEC_TIME_READ;
cmd->rxsize = READ_RSP_SIZE;
}
-EXPORT_SYMBOL(atmel_i2c_init_read_cmd);
+EXPORT_SYMBOL(atmel_i2c_init_read_config_cmd);
+
+int atmel_i2c_init_read_otp_cmd(struct atmel_i2c_cmd *cmd, u16 addr)
+{
+ if (addr < 0 || addr > OTP_ZONE_SIZE)
+ return -1;
+
+ cmd->word_addr = COMMAND;
+ cmd->opcode = OPCODE_READ;
+ /*
+ * Read the word from OTP zone that may contain e.g. serial
+ * numbers or similar if persistently pre-initialized and locked
+ */
+ cmd->param1 = OTP_ZONE;
+ cmd->param2 = cpu_to_le16(addr);
+ cmd->count = READ_COUNT;
+
+ atmel_i2c_checksum(cmd);
+
+ cmd->msecs = MAX_EXEC_TIME_READ;
+ cmd->rxsize = READ_RSP_SIZE;
+
+ return 0;
+}
+EXPORT_SYMBOL(atmel_i2c_init_read_otp_cmd);
void atmel_i2c_init_random_cmd(struct atmel_i2c_cmd *cmd)
{
@@ -301,7 +325,7 @@ static int device_sanity_check(struct i2c_client *client)
if (!cmd)
return -ENOMEM;
- atmel_i2c_init_read_cmd(cmd);
+ atmel_i2c_init_read_config_cmd(cmd);
ret = atmel_i2c_send_receive(client, cmd);
if (ret)
diff --git a/drivers/crypto/atmel-i2c.h b/drivers/crypto/atmel-i2c.h
index c0bd429ee2c7..72f04c15682f 100644
--- a/drivers/crypto/atmel-i2c.h
+++ b/drivers/crypto/atmel-i2c.h
@@ -64,6 +64,10 @@ struct atmel_i2c_cmd {
/* Definitions for eeprom organization */
#define CONFIGURATION_ZONE 0
+#define OTP_ZONE 1
+
+/* Definitions for eeprom zone sizes */
+#define OTP_ZONE_SIZE 64
/* Definitions for Indexes common to all commands */
#define RSP_DATA_IDX 1 /* buffer index of data in response */
@@ -124,6 +128,7 @@ struct atmel_ecc_driver_data {
* @wake_token : wake token array of zeros
* @wake_token_sz : size in bytes of the wake_token
* @tfm_count : number of active crypto transformations on i2c client
+ * @hwrng : hold the hardware generated rng
*
* Reads and writes from/to the i2c client are sequential. The first byte
* transmitted to the device is treated as the byte size. Any attempt to send
@@ -177,7 +182,8 @@ void atmel_i2c_flush_queue(void);
int atmel_i2c_send_receive(struct i2c_client *client, struct atmel_i2c_cmd *cmd);
-void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd);
+void atmel_i2c_init_read_config_cmd(struct atmel_i2c_cmd *cmd);
+int atmel_i2c_init_read_otp_cmd(struct atmel_i2c_cmd *cmd, u16 addr);
void atmel_i2c_init_random_cmd(struct atmel_i2c_cmd *cmd);
void atmel_i2c_init_genkey_cmd(struct atmel_i2c_cmd *cmd, u16 keyid);
int atmel_i2c_init_ecdh_cmd(struct atmel_i2c_cmd *cmd,
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
index c77f482d2a97..24ffdf505023 100644
--- a/drivers/crypto/atmel-sha204a.c
+++ b/drivers/crypto/atmel-sha204a.c
@@ -91,6 +91,62 @@ static int atmel_sha204a_rng_read(struct hwrng *rng, void *data, size_t max,
return max;
}
+static int atmel_sha204a_otp_read(struct i2c_client *client, u16 addr, u8 *otp)
+{
+ struct atmel_i2c_cmd cmd;
+ int ret = -1;
+
+ if (atmel_i2c_init_read_otp_cmd(&cmd, addr) < 0) {
+ dev_err(&client->dev, "failed, invalid otp address %04X\n",
+ addr);
+ return ret;
+ }
+
+ ret = atmel_i2c_send_receive(client, &cmd);
+
+ if (cmd.data[0] == 0xff) {
+ dev_err(&client->dev, "failed, device not ready\n");
+ return -ret;
+ }
+
+ memcpy(otp, cmd.data+1, 4);
+
+ return ret;
+}
+
+static ssize_t otp_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u16 addr;
+ u8 otp[OTP_ZONE_SIZE];
+ char *str = buf;
+ struct i2c_client *client = to_i2c_client(dev);
+ int i;
+
+ for (addr = 0; addr < OTP_ZONE_SIZE/4; addr++) {
+ if (atmel_sha204a_otp_read(client, addr, otp + addr * 4) < 0) {
+ dev_err(dev, "failed to read otp zone\n");
+ break;
+ }
+ }
+
+ for (i = 0; i < addr*2; i++)
+ str += sprintf(str, "%02X", otp[i]);
+ str += sprintf(str, "\n");
+ return str - buf;
+}
+static DEVICE_ATTR_RO(otp);
+
+static struct attribute *atmel_sha204a_attrs[] = {
+ &dev_attr_otp.attr,
+ NULL
+};
+
+static const struct attribute_group atmel_sha204a_groups = {
+ .name = "atsha204a",
+ .attrs = atmel_sha204a_attrs,
+};
+
static int atmel_sha204a_probe(struct i2c_client *client)
{
struct atmel_i2c_client_priv *i2c_priv;
@@ -111,6 +167,16 @@ static int atmel_sha204a_probe(struct i2c_client *client)
if (ret)
dev_warn(&client->dev, "failed to register RNG (%d)\n", ret);
+ /* otp read out */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ ret = sysfs_create_group(&client->dev.kobj, &atmel_sha204a_groups);
+ if (ret) {
+ dev_err(&client->dev, "failed to register sysfs entry\n");
+ return ret;
+ }
+
return ret;
}
@@ -123,6 +189,8 @@ static void atmel_sha204a_remove(struct i2c_client *client)
return;
}
+ sysfs_remove_group(&client->dev.kobj, &atmel_sha204a_groups);
+
kfree((void *)i2c_priv->hwrng.priv);
}
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
index 07989bb8c220..3fdc64b5a65e 100644
--- a/drivers/crypto/bcm/spu2.c
+++ b/drivers/crypto/bcm/spu2.c
@@ -495,7 +495,7 @@ static void spu2_dump_omd(u8 *omd, u16 hash_key_len, u16 ciph_key_len,
if (hash_iv_len) {
packet_log(" Hash IV Length %u bytes\n", hash_iv_len);
packet_dump(" hash IV: ", ptr, hash_iv_len);
- ptr += ciph_key_len;
+ ptr += hash_iv_len;
}
if (ciph_iv_len) {
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index bdf367f3f679..bd418dea586d 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -512,6 +512,7 @@ static const struct of_device_id caam_match[] = {
MODULE_DEVICE_TABLE(of, caam_match);
struct caam_imx_data {
+ bool page0_access;
const struct clk_bulk_data *clks;
int num_clks;
};
@@ -524,6 +525,7 @@ static const struct clk_bulk_data caam_imx6_clks[] = {
};
static const struct caam_imx_data caam_imx6_data = {
+ .page0_access = true,
.clks = caam_imx6_clks,
.num_clks = ARRAY_SIZE(caam_imx6_clks),
};
@@ -534,6 +536,7 @@ static const struct clk_bulk_data caam_imx7_clks[] = {
};
static const struct caam_imx_data caam_imx7_data = {
+ .page0_access = true,
.clks = caam_imx7_clks,
.num_clks = ARRAY_SIZE(caam_imx7_clks),
};
@@ -545,6 +548,7 @@ static const struct clk_bulk_data caam_imx6ul_clks[] = {
};
static const struct caam_imx_data caam_imx6ul_data = {
+ .page0_access = true,
.clks = caam_imx6ul_clks,
.num_clks = ARRAY_SIZE(caam_imx6ul_clks),
};
@@ -554,15 +558,19 @@ static const struct clk_bulk_data caam_vf610_clks[] = {
};
static const struct caam_imx_data caam_vf610_data = {
+ .page0_access = true,
.clks = caam_vf610_clks,
.num_clks = ARRAY_SIZE(caam_vf610_clks),
};
+static const struct caam_imx_data caam_imx8ulp_data;
+
static const struct soc_device_attribute caam_imx_soc_table[] = {
{ .soc_id = "i.MX6UL", .data = &caam_imx6ul_data },
{ .soc_id = "i.MX6*", .data = &caam_imx6_data },
{ .soc_id = "i.MX7*", .data = &caam_imx7_data },
{ .soc_id = "i.MX8M*", .data = &caam_imx7_data },
+ { .soc_id = "i.MX8ULP", .data = &caam_imx8ulp_data },
{ .soc_id = "VF*", .data = &caam_vf610_data },
{ .family = "Freescale i.MX" },
{ /* sentinel */ }
@@ -860,6 +868,7 @@ static int caam_probe(struct platform_device *pdev)
int pg_size;
int BLOCK_OFFSET = 0;
bool reg_access = true;
+ const struct caam_imx_data *imx_soc_data;
ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
if (!ctrlpriv)
@@ -894,12 +903,20 @@ static int caam_probe(struct platform_device *pdev)
return -EINVAL;
}
+ imx_soc_data = imx_soc_match->data;
+ reg_access = reg_access && imx_soc_data->page0_access;
+ /*
+ * CAAM clocks cannot be controlled from kernel.
+ */
+ if (!imx_soc_data->num_clks)
+ goto iomap_ctrl;
+
ret = init_clocks(dev, imx_soc_match->data);
if (ret)
return ret;
}
-
+iomap_ctrl:
/* Get configuration properties from device tree */
/* First, get register page */
ctrl = devm_of_iomap(dev, nprop, 0, NULL);
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index f44efbb89c34..2102377f727b 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -1090,7 +1090,7 @@ static int __sev_snp_init_locked(int *error)
void *arg = &data;
int cmd, rc = 0;
- if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return -ENODEV;
sev = psp->sev_data;
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
index 473301237760..ff6ceb4feee0 100644
--- a/drivers/crypto/ccp/sp-platform.c
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -39,44 +39,38 @@ static const struct sp_dev_vdata dev_vdata[] = {
},
};
-#ifdef CONFIG_ACPI
static const struct acpi_device_id sp_acpi_match[] = {
{ "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] },
{ },
};
MODULE_DEVICE_TABLE(acpi, sp_acpi_match);
-#endif
-#ifdef CONFIG_OF
static const struct of_device_id sp_of_match[] = {
{ .compatible = "amd,ccp-seattle-v1a",
.data = (const void *)&dev_vdata[0] },
{ },
};
MODULE_DEVICE_TABLE(of, sp_of_match);
-#endif
static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev)
{
-#ifdef CONFIG_OF
const struct of_device_id *match;
match = of_match_node(sp_of_match, pdev->dev.of_node);
if (match && match->data)
return (struct sp_dev_vdata *)match->data;
-#endif
+
return NULL;
}
static struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev)
{
-#ifdef CONFIG_ACPI
const struct acpi_device_id *match;
match = acpi_match_device(sp_acpi_match, &pdev->dev);
if (match && match->driver_data)
return (struct sp_dev_vdata *)match->driver_data;
-#endif
+
return NULL;
}
@@ -212,12 +206,8 @@ static int sp_platform_resume(struct platform_device *pdev)
static struct platform_driver sp_platform_driver = {
.driver = {
.name = "ccp",
-#ifdef CONFIG_ACPI
.acpi_match_table = sp_acpi_match,
-#endif
-#ifdef CONFIG_OF
.of_match_table = sp_of_match,
-#endif
},
.probe = sp_platform_probe,
.remove_new = sp_platform_remove,
diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c
index cd67fa348ca7..1b9b7bccdeff 100644
--- a/drivers/crypto/hisilicon/debugfs.c
+++ b/drivers/crypto/hisilicon/debugfs.c
@@ -13,6 +13,7 @@
#define QM_DFX_COMMON_LEN 0xC3
#define QM_DFX_REGS_LEN 4UL
#define QM_DBG_TMP_BUF_LEN 22
+#define QM_XQC_ADDR_MASK GENMASK(31, 0)
#define CURRENT_FUN_MASK GENMASK(5, 0)
#define CURRENT_Q_MASK GENMASK(31, 16)
#define QM_SQE_ADDR_MASK GENMASK(7, 0)
@@ -167,7 +168,6 @@ static void dump_show(struct hisi_qm *qm, void *info,
static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
{
struct device *dev = &qm->pdev->dev;
- struct qm_sqc *sqc_curr;
struct qm_sqc sqc;
u32 qp_id;
int ret;
@@ -183,6 +183,8 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 1);
if (!ret) {
+ sqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
+ sqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
dump_show(qm, &sqc, sizeof(struct qm_sqc), name);
return 0;
@@ -190,9 +192,10 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
down_read(&qm->qps_lock);
if (qm->sqc) {
- sqc_curr = qm->sqc + qp_id;
-
- dump_show(qm, sqc_curr, sizeof(*sqc_curr), "SOFT SQC");
+ memcpy(&sqc, qm->sqc + qp_id * sizeof(struct qm_sqc), sizeof(struct qm_sqc));
+ sqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
+ sqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
+ dump_show(qm, &sqc, sizeof(struct qm_sqc), "SOFT SQC");
}
up_read(&qm->qps_lock);
@@ -202,7 +205,6 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
{
struct device *dev = &qm->pdev->dev;
- struct qm_cqc *cqc_curr;
struct qm_cqc cqc;
u32 qp_id;
int ret;
@@ -218,6 +220,8 @@ static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 1);
if (!ret) {
+ cqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
+ cqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
dump_show(qm, &cqc, sizeof(struct qm_cqc), name);
return 0;
@@ -225,9 +229,10 @@ static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
down_read(&qm->qps_lock);
if (qm->cqc) {
- cqc_curr = qm->cqc + qp_id;
-
- dump_show(qm, cqc_curr, sizeof(*cqc_curr), "SOFT CQC");
+ memcpy(&cqc, qm->cqc + qp_id * sizeof(struct qm_cqc), sizeof(struct qm_cqc));
+ cqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
+ cqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
+ dump_show(qm, &cqc, sizeof(struct qm_cqc), "SOFT CQC");
}
up_read(&qm->qps_lock);
@@ -263,6 +268,10 @@ static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, char *name)
if (ret)
return ret;
+ aeqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
+ aeqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
+ eqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
+ eqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
dump_show(qm, xeqc, size, name);
return ret;
@@ -310,27 +319,26 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s,
static int qm_sq_dump(struct hisi_qm *qm, char *s, char *name)
{
- u16 sq_depth = qm->qp_array->cq_depth;
- void *sqe, *sqe_curr;
+ u16 sq_depth = qm->qp_array->sq_depth;
struct hisi_qp *qp;
u32 qp_id, sqe_id;
+ void *sqe;
int ret;
ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth);
if (ret)
return ret;
- sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL);
+ sqe = kzalloc(qm->sqe_size, GFP_KERNEL);
if (!sqe)
return -ENOMEM;
qp = &qm->qp_array[qp_id];
- memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth);
- sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
- memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
+ memcpy(sqe, qp->sqe + sqe_id * qm->sqe_size, qm->sqe_size);
+ memset(sqe + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
qm->debug.sqe_mask_len);
- dump_show(qm, sqe_curr, qm->sqe_size, name);
+ dump_show(qm, sqe, qm->sqe_size, name);
kfree(sqe);
@@ -809,8 +817,14 @@ static void dfx_regs_uninit(struct hisi_qm *qm,
{
int i;
+ if (!dregs)
+ return;
+
/* Setting the pointer is NULL to prevent double free */
for (i = 0; i < reg_len; i++) {
+ if (!dregs[i].regs)
+ continue;
+
kfree(dregs[i].regs);
dregs[i].regs = NULL;
}
@@ -860,14 +874,21 @@ alloc_error:
static int qm_diff_regs_init(struct hisi_qm *qm,
struct dfx_diff_registers *dregs, u32 reg_len)
{
+ int ret;
+
qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
- if (IS_ERR(qm->debug.qm_diff_regs))
- return PTR_ERR(qm->debug.qm_diff_regs);
+ if (IS_ERR(qm->debug.qm_diff_regs)) {
+ ret = PTR_ERR(qm->debug.qm_diff_regs);
+ qm->debug.qm_diff_regs = NULL;
+ return ret;
+ }
qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len);
if (IS_ERR(qm->debug.acc_diff_regs)) {
dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
- return PTR_ERR(qm->debug.acc_diff_regs);
+ ret = PTR_ERR(qm->debug.acc_diff_regs);
+ qm->debug.acc_diff_regs = NULL;
+ return ret;
}
return 0;
@@ -908,7 +929,9 @@ static int qm_last_regs_init(struct hisi_qm *qm)
static void qm_diff_regs_uninit(struct hisi_qm *qm, u32 reg_len)
{
dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len);
+ qm->debug.acc_diff_regs = NULL;
dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
+ qm->debug.qm_diff_regs = NULL;
}
/**
@@ -1075,12 +1098,12 @@ static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
{
struct debugfs_file *file = qm->debug.files + index;
- debugfs_create_file(qm_debug_file_name[index], 0600, dir, file,
- &qm_debug_fops);
-
file->index = index;
mutex_init(&file->lock);
file->debug = &qm->debug;
+
+ debugfs_create_file(qm_debug_file_name[index], 0600, dir, file,
+ &qm_debug_fops);
}
static int qm_debugfs_atomic64_set(void *data, u64 val)
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index d93aa6630a57..10aa4da93323 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -106,7 +106,7 @@
#define HPRE_SHAPER_TYPE_RATE 640
#define HPRE_VIA_MSI_DSM 1
#define HPRE_SQE_MASK_OFFSET 8
-#define HPRE_SQE_MASK_LEN 24
+#define HPRE_SQE_MASK_LEN 44
#define HPRE_CTX_Q_NUM_DEF 1
#define HPRE_DFX_BASE 0x301000
@@ -1074,41 +1074,40 @@ static int hpre_debugfs_init(struct hisi_qm *qm)
struct device *dev = &qm->pdev->dev;
int ret;
- qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
- hpre_debugfs_root);
-
- qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;
- qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
ret = hisi_qm_regs_debugfs_init(qm, hpre_diff_regs, ARRAY_SIZE(hpre_diff_regs));
if (ret) {
dev_warn(dev, "Failed to init HPRE diff regs!\n");
- goto debugfs_remove;
+ return ret;
}
+ qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
+ hpre_debugfs_root);
+ qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;
+ qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
+
hisi_qm_debug_init(qm);
if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) {
ret = hpre_ctrl_debug_init(qm);
if (ret)
- goto failed_to_create;
+ goto debugfs_remove;
}
hpre_dfx_debug_init(qm);
return 0;
-failed_to_create:
- hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
debugfs_remove:
debugfs_remove_recursive(qm->debug.debug_root);
+ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
return ret;
}
static void hpre_debugfs_exit(struct hisi_qm *qm)
{
- hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
-
debugfs_remove_recursive(qm->debug.debug_root);
+
+ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
}
static int hpre_pre_store_cap_reg(struct hisi_qm *qm)
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 92f0a1d9b4a6..3dac8d8e8568 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -645,6 +645,9 @@ int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op
tmp_xqc = qm->xqc_buf.aeqc;
xqc_dma = qm->xqc_buf.aeqc_dma;
break;
+ default:
+ dev_err(&qm->pdev->dev, "unknown mailbox cmd %u\n", cmd);
+ return -EINVAL;
}
/* Setting xqc will fail if master OOO is blocked. */
@@ -2893,12 +2896,9 @@ void hisi_qm_uninit(struct hisi_qm *qm)
hisi_qm_set_state(qm, QM_NOT_READY);
up_write(&qm->qps_lock);
+ qm_remove_uacce(qm);
qm_irqs_unregister(qm);
hisi_qm_pci_uninit(qm);
- if (qm->use_sva) {
- uacce_remove(qm->uacce);
- qm->uacce = NULL;
- }
}
EXPORT_SYMBOL_GPL(hisi_qm_uninit);
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 93a972fcbf63..0558f98e221f 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -481,8 +481,10 @@ static void sec_alg_resource_free(struct sec_ctx *ctx,
if (ctx->pbuf_supported)
sec_free_pbuf_resource(dev, qp_ctx->res);
- if (ctx->alg_type == SEC_AEAD)
+ if (ctx->alg_type == SEC_AEAD) {
sec_free_mac_resource(dev, qp_ctx->res);
+ sec_free_aiv_resource(dev, qp_ctx->res);
+ }
}
static int sec_alloc_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index c290d8937b19..75aad04ffe5e 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -99,8 +99,8 @@
#define SEC_DBGFS_VAL_MAX_LEN 20
#define SEC_SINGLE_PORT_MAX_TRANS 0x2060
-#define SEC_SQE_MASK_OFFSET 64
-#define SEC_SQE_MASK_LEN 48
+#define SEC_SQE_MASK_OFFSET 16
+#define SEC_SQE_MASK_LEN 108
#define SEC_SHAPER_TYPE_RATE 400
#define SEC_DFX_BASE 0x301000
@@ -152,7 +152,7 @@ static const struct hisi_qm_cap_info sec_basic_info[] = {
{SEC_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x1, 0x1, 0x1},
{SEC_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x4, 0x4, 0x4},
{SEC_CORES_PER_CLUSTER_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x4, 0x4, 0x4},
- {SEC_CORE_ENABLE_BITMAP, 0x3140, 32, GENMASK(31, 0), 0x17F, 0x17F, 0xF},
+ {SEC_CORE_ENABLE_BITMAP, 0x3140, 0, GENMASK(31, 0), 0x17F, 0x17F, 0xF},
{SEC_DRV_ALG_BITMAP_LOW, 0x3144, 0, GENMASK(31, 0), 0x18050CB, 0x18050CB, 0x18670CF},
{SEC_DRV_ALG_BITMAP_HIGH, 0x3148, 0, GENMASK(31, 0), 0x395C, 0x395C, 0x395C},
{SEC_DEV_ALG_BITMAP_LOW, 0x314c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
@@ -901,37 +901,36 @@ static int sec_debugfs_init(struct hisi_qm *qm)
struct device *dev = &qm->pdev->dev;
int ret;
- qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
- sec_debugfs_root);
- qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
- qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
-
ret = hisi_qm_regs_debugfs_init(qm, sec_diff_regs, ARRAY_SIZE(sec_diff_regs));
if (ret) {
dev_warn(dev, "Failed to init SEC diff regs!\n");
- goto debugfs_remove;
+ return ret;
}
+ qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
+ sec_debugfs_root);
+ qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
+ qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
+
hisi_qm_debug_init(qm);
ret = sec_debug_init(qm);
if (ret)
- goto failed_to_create;
+ goto debugfs_remove;
return 0;
-failed_to_create:
- hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
debugfs_remove:
- debugfs_remove_recursive(sec_debugfs_root);
+ debugfs_remove_recursive(qm->debug.debug_root);
+ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
return ret;
}
static void sec_debugfs_exit(struct hisi_qm *qm)
{
- hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
-
debugfs_remove_recursive(qm->debug.debug_root);
+
+ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
}
static int sec_show_last_regs_init(struct hisi_qm *qm)
@@ -1324,7 +1323,8 @@ static struct pci_driver sec_pci_driver = {
.probe = sec_probe,
.remove = sec_remove,
.err_handler = &sec_err_handler,
- .sriov_configure = hisi_qm_sriov_configure,
+ .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?
+ hisi_qm_sriov_configure : NULL,
.shutdown = hisi_qm_dev_shutdown,
.driver.pm = &sec_pm_ops,
};
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index 0beca257c20b..568acd0aee3f 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -161,9 +161,6 @@ static struct hisi_acc_hw_sgl *acc_get_sgl(struct hisi_acc_sgl_pool *pool,
struct mem_block *block;
u32 block_index, offset;
- if (!pool || !hw_sgl_dma || index >= pool->count)
- return ERR_PTR(-EINVAL);
-
block = pool->mem_block;
block_index = index / pool->sgl_num_per_block;
offset = index % pool->sgl_num_per_block;
@@ -230,7 +227,7 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
struct scatterlist *sg;
int sg_n;
- if (!dev || !sgl || !pool || !hw_sgl_dma)
+ if (!dev || !sgl || !pool || !hw_sgl_dma || index >= pool->count)
return ERR_PTR(-EINVAL);
sg_n = sg_nents(sgl);
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index c065fd867161..c94a7b20d07e 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -887,36 +887,34 @@ static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm)
static int hisi_zip_debugfs_init(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- struct dentry *dev_d;
int ret;
- dev_d = debugfs_create_dir(dev_name(dev), hzip_debugfs_root);
-
- qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET;
- qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN;
- qm->debug.debug_root = dev_d;
ret = hisi_qm_regs_debugfs_init(qm, hzip_diff_regs, ARRAY_SIZE(hzip_diff_regs));
if (ret) {
dev_warn(dev, "Failed to init ZIP diff regs!\n");
- goto debugfs_remove;
+ return ret;
}
+ qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET;
+ qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN;
+ qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
+ hzip_debugfs_root);
+
hisi_qm_debug_init(qm);
if (qm->fun_type == QM_HW_PF) {
ret = hisi_zip_ctrl_debug_init(qm);
if (ret)
- goto failed_to_create;
+ goto debugfs_remove;
}
hisi_zip_dfx_debug_init(qm);
return 0;
-failed_to_create:
- hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
debugfs_remove:
- debugfs_remove_recursive(hzip_debugfs_root);
+ debugfs_remove_recursive(qm->debug.debug_root);
+ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
return ret;
}
@@ -940,10 +938,10 @@ static void hisi_zip_debug_regs_clear(struct hisi_qm *qm)
static void hisi_zip_debugfs_exit(struct hisi_qm *qm)
{
- hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
-
debugfs_remove_recursive(qm->debug.debug_root);
+ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
+
if (qm->fun_type == QM_HW_PF) {
hisi_zip_debug_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
diff --git a/drivers/crypto/intel/iaa/iaa_crypto.h b/drivers/crypto/intel/iaa/iaa_crypto.h
index 2524091a5f70..56985e395263 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto.h
+++ b/drivers/crypto/intel/iaa/iaa_crypto.h
@@ -49,10 +49,10 @@ struct iaa_wq {
struct iaa_device *iaa_device;
- u64 comp_calls;
- u64 comp_bytes;
- u64 decomp_calls;
- u64 decomp_bytes;
+ atomic64_t comp_calls;
+ atomic64_t comp_bytes;
+ atomic64_t decomp_calls;
+ atomic64_t decomp_bytes;
};
struct iaa_device_compression_mode {
@@ -73,10 +73,10 @@ struct iaa_device {
int n_wq;
struct list_head wqs;
- u64 comp_calls;
- u64 comp_bytes;
- u64 decomp_calls;
- u64 decomp_bytes;
+ atomic64_t comp_calls;
+ atomic64_t comp_bytes;
+ atomic64_t decomp_calls;
+ atomic64_t decomp_bytes;
};
struct wq_table_entry {
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index 1cd304de5388..e810d286ee8c 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -347,18 +347,16 @@ int add_iaa_compression_mode(const char *name,
goto free;
if (ll_table) {
- mode->ll_table = kzalloc(ll_table_size, GFP_KERNEL);
+ mode->ll_table = kmemdup(ll_table, ll_table_size, GFP_KERNEL);
if (!mode->ll_table)
goto free;
- memcpy(mode->ll_table, ll_table, ll_table_size);
mode->ll_table_size = ll_table_size;
}
if (d_table) {
- mode->d_table = kzalloc(d_table_size, GFP_KERNEL);
+ mode->d_table = kmemdup(d_table, d_table_size, GFP_KERNEL);
if (!mode->d_table)
goto free;
- memcpy(mode->d_table, d_table, d_table_size);
mode->d_table_size = d_table_size;
}
@@ -806,6 +804,8 @@ static int save_iaa_wq(struct idxd_wq *wq)
return -EINVAL;
cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa;
+ if (!cpus_per_iaa)
+ cpus_per_iaa = 1;
out:
return 0;
}
@@ -821,10 +821,12 @@ static void remove_iaa_wq(struct idxd_wq *wq)
}
}
- if (nr_iaa)
+ if (nr_iaa) {
cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa;
- else
- cpus_per_iaa = 0;
+ if (!cpus_per_iaa)
+ cpus_per_iaa = 1;
+ } else
+ cpus_per_iaa = 1;
}
static int wq_table_add_wqs(int iaa, int cpu)
@@ -918,7 +920,7 @@ static void rebalance_wq_table(void)
for_each_node_with_cpus(node) {
node_cpus = cpumask_of_node(node);
- for (cpu = 0; cpu < nr_cpus_per_node; cpu++) {
+ for (cpu = 0; cpu < cpumask_weight(node_cpus); cpu++) {
int node_cpu = cpumask_nth(cpu, node_cpus);
if (WARN_ON(node_cpu >= nr_cpu_ids)) {
@@ -1075,8 +1077,8 @@ static void iaa_desc_complete(struct idxd_desc *idxd_desc,
update_total_comp_bytes_out(ctx->req->dlen);
update_wq_comp_bytes(iaa_wq->wq, ctx->req->dlen);
} else {
- update_total_decomp_bytes_in(ctx->req->dlen);
- update_wq_decomp_bytes(iaa_wq->wq, ctx->req->dlen);
+ update_total_decomp_bytes_in(ctx->req->slen);
+ update_wq_decomp_bytes(iaa_wq->wq, ctx->req->slen);
}
if (ctx->compress && compression_ctx->verify_compress) {
@@ -1494,7 +1496,6 @@ static int iaa_comp_acompress(struct acomp_req *req)
u32 compression_crc;
struct idxd_wq *wq;
struct device *dev;
- u64 start_time_ns;
int order = -1;
compression_ctx = crypto_tfm_ctx(tfm);
@@ -1568,10 +1569,8 @@ static int iaa_comp_acompress(struct acomp_req *req)
" req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
req->dst, req->dlen, sg_dma_len(req->dst));
- start_time_ns = iaa_get_ts();
ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr,
&req->dlen, &compression_crc, disable_async);
- update_max_comp_delay_ns(start_time_ns);
if (ret == -EINPROGRESS)
return ret;
@@ -1618,7 +1617,6 @@ static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req)
struct iaa_wq *iaa_wq;
struct device *dev;
struct idxd_wq *wq;
- u64 start_time_ns;
int order = -1;
cpu = get_cpu();
@@ -1675,10 +1673,8 @@ alloc_dest:
dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
" req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
req->dst, req->dlen, sg_dma_len(req->dst));
- start_time_ns = iaa_get_ts();
ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
dst_addr, &req->dlen, true);
- update_max_decomp_delay_ns(start_time_ns);
if (ret == -EOVERFLOW) {
dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
req->dlen *= 2;
@@ -1709,7 +1705,6 @@ static int iaa_comp_adecompress(struct acomp_req *req)
int nr_sgs, cpu, ret = 0;
struct iaa_wq *iaa_wq;
struct device *dev;
- u64 start_time_ns;
struct idxd_wq *wq;
if (!iaa_crypto_enabled) {
@@ -1769,10 +1764,8 @@ static int iaa_comp_adecompress(struct acomp_req *req)
" req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
req->dst, req->dlen, sg_dma_len(req->dst));
- start_time_ns = iaa_get_ts();
ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
dst_addr, &req->dlen, false);
- update_max_decomp_delay_ns(start_time_ns);
if (ret == -EINPROGRESS)
return ret;
@@ -2010,7 +2003,7 @@ static int __init iaa_crypto_init_module(void)
int ret = 0;
int node;
- nr_cpus = num_online_cpus();
+ nr_cpus = num_possible_cpus();
for_each_node_with_cpus(node)
nr_nodes++;
if (!nr_nodes) {
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.c b/drivers/crypto/intel/iaa/iaa_crypto_stats.c
index c9f83af4b307..f5cc3d29ca19 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_stats.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.c
@@ -17,141 +17,117 @@
#include "iaa_crypto.h"
#include "iaa_crypto_stats.h"
-static u64 total_comp_calls;
-static u64 total_decomp_calls;
-static u64 total_sw_decomp_calls;
-static u64 max_comp_delay_ns;
-static u64 max_decomp_delay_ns;
-static u64 total_comp_bytes_out;
-static u64 total_decomp_bytes_in;
-static u64 total_completion_einval_errors;
-static u64 total_completion_timeout_errors;
-static u64 total_completion_comp_buf_overflow_errors;
+static atomic64_t total_comp_calls;
+static atomic64_t total_decomp_calls;
+static atomic64_t total_sw_decomp_calls;
+static atomic64_t total_comp_bytes_out;
+static atomic64_t total_decomp_bytes_in;
+static atomic64_t total_completion_einval_errors;
+static atomic64_t total_completion_timeout_errors;
+static atomic64_t total_completion_comp_buf_overflow_errors;
static struct dentry *iaa_crypto_debugfs_root;
void update_total_comp_calls(void)
{
- total_comp_calls++;
+ atomic64_inc(&total_comp_calls);
}
void update_total_comp_bytes_out(int n)
{
- total_comp_bytes_out += n;
+ atomic64_add(n, &total_comp_bytes_out);
}
void update_total_decomp_calls(void)
{
- total_decomp_calls++;
+ atomic64_inc(&total_decomp_calls);
}
void update_total_sw_decomp_calls(void)
{
- total_sw_decomp_calls++;
+ atomic64_inc(&total_sw_decomp_calls);
}
void update_total_decomp_bytes_in(int n)
{
- total_decomp_bytes_in += n;
+ atomic64_add(n, &total_decomp_bytes_in);
}
void update_completion_einval_errs(void)
{
- total_completion_einval_errors++;
+ atomic64_inc(&total_completion_einval_errors);
}
void update_completion_timeout_errs(void)
{
- total_completion_timeout_errors++;
+ atomic64_inc(&total_completion_timeout_errors);
}
void update_completion_comp_buf_overflow_errs(void)
{
- total_completion_comp_buf_overflow_errors++;
-}
-
-void update_max_comp_delay_ns(u64 start_time_ns)
-{
- u64 time_diff;
-
- time_diff = ktime_get_ns() - start_time_ns;
-
- if (time_diff > max_comp_delay_ns)
- max_comp_delay_ns = time_diff;
-}
-
-void update_max_decomp_delay_ns(u64 start_time_ns)
-{
- u64 time_diff;
-
- time_diff = ktime_get_ns() - start_time_ns;
-
- if (time_diff > max_decomp_delay_ns)
- max_decomp_delay_ns = time_diff;
+ atomic64_inc(&total_completion_comp_buf_overflow_errors);
}
void update_wq_comp_calls(struct idxd_wq *idxd_wq)
{
struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
- wq->comp_calls++;
- wq->iaa_device->comp_calls++;
+ atomic64_inc(&wq->comp_calls);
+ atomic64_inc(&wq->iaa_device->comp_calls);
}
void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n)
{
struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
- wq->comp_bytes += n;
- wq->iaa_device->comp_bytes += n;
+ atomic64_add(n, &wq->comp_bytes);
+ atomic64_add(n, &wq->iaa_device->comp_bytes);
}
void update_wq_decomp_calls(struct idxd_wq *idxd_wq)
{
struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
- wq->decomp_calls++;
- wq->iaa_device->decomp_calls++;
+ atomic64_inc(&wq->decomp_calls);
+ atomic64_inc(&wq->iaa_device->decomp_calls);
}
void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n)
{
struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
- wq->decomp_bytes += n;
- wq->iaa_device->decomp_bytes += n;
+ atomic64_add(n, &wq->decomp_bytes);
+ atomic64_add(n, &wq->iaa_device->decomp_bytes);
}
static void reset_iaa_crypto_stats(void)
{
- total_comp_calls = 0;
- total_decomp_calls = 0;
- total_sw_decomp_calls = 0;
- max_comp_delay_ns = 0;
- max_decomp_delay_ns = 0;
- total_comp_bytes_out = 0;
- total_decomp_bytes_in = 0;
- total_completion_einval_errors = 0;
- total_completion_timeout_errors = 0;
- total_completion_comp_buf_overflow_errors = 0;
+ atomic64_set(&total_comp_calls, 0);
+ atomic64_set(&total_decomp_calls, 0);
+ atomic64_set(&total_sw_decomp_calls, 0);
+ atomic64_set(&total_comp_bytes_out, 0);
+ atomic64_set(&total_decomp_bytes_in, 0);
+ atomic64_set(&total_completion_einval_errors, 0);
+ atomic64_set(&total_completion_timeout_errors, 0);
+ atomic64_set(&total_completion_comp_buf_overflow_errors, 0);
}
static void reset_wq_stats(struct iaa_wq *wq)
{
- wq->comp_calls = 0;
- wq->comp_bytes = 0;
- wq->decomp_calls = 0;
- wq->decomp_bytes = 0;
+ atomic64_set(&wq->comp_calls, 0);
+ atomic64_set(&wq->comp_bytes, 0);
+ atomic64_set(&wq->decomp_calls, 0);
+ atomic64_set(&wq->decomp_bytes, 0);
}
static void reset_device_stats(struct iaa_device *iaa_device)
{
struct iaa_wq *iaa_wq;
- iaa_device->comp_calls = 0;
- iaa_device->comp_bytes = 0;
- iaa_device->decomp_calls = 0;
- iaa_device->decomp_bytes = 0;
+ atomic64_set(&iaa_device->comp_calls, 0);
+ atomic64_set(&iaa_device->comp_bytes, 0);
+ atomic64_set(&iaa_device->decomp_calls, 0);
+ atomic64_set(&iaa_device->decomp_bytes, 0);
list_for_each_entry(iaa_wq, &iaa_device->wqs, list)
reset_wq_stats(iaa_wq);
@@ -160,10 +136,14 @@ static void reset_device_stats(struct iaa_device *iaa_device)
static void wq_show(struct seq_file *m, struct iaa_wq *iaa_wq)
{
seq_printf(m, " name: %s\n", iaa_wq->wq->name);
- seq_printf(m, " comp_calls: %llu\n", iaa_wq->comp_calls);
- seq_printf(m, " comp_bytes: %llu\n", iaa_wq->comp_bytes);
- seq_printf(m, " decomp_calls: %llu\n", iaa_wq->decomp_calls);
- seq_printf(m, " decomp_bytes: %llu\n\n", iaa_wq->decomp_bytes);
+ seq_printf(m, " comp_calls: %llu\n",
+ atomic64_read(&iaa_wq->comp_calls));
+ seq_printf(m, " comp_bytes: %llu\n",
+ atomic64_read(&iaa_wq->comp_bytes));
+ seq_printf(m, " decomp_calls: %llu\n",
+ atomic64_read(&iaa_wq->decomp_calls));
+ seq_printf(m, " decomp_bytes: %llu\n\n",
+ atomic64_read(&iaa_wq->decomp_bytes));
}
static void device_stats_show(struct seq_file *m, struct iaa_device *iaa_device)
@@ -173,30 +153,41 @@ static void device_stats_show(struct seq_file *m, struct iaa_device *iaa_device)
seq_puts(m, "iaa device:\n");
seq_printf(m, " id: %d\n", iaa_device->idxd->id);
seq_printf(m, " n_wqs: %d\n", iaa_device->n_wq);
- seq_printf(m, " comp_calls: %llu\n", iaa_device->comp_calls);
- seq_printf(m, " comp_bytes: %llu\n", iaa_device->comp_bytes);
- seq_printf(m, " decomp_calls: %llu\n", iaa_device->decomp_calls);
- seq_printf(m, " decomp_bytes: %llu\n", iaa_device->decomp_bytes);
+ seq_printf(m, " comp_calls: %llu\n",
+ atomic64_read(&iaa_device->comp_calls));
+ seq_printf(m, " comp_bytes: %llu\n",
+ atomic64_read(&iaa_device->comp_bytes));
+ seq_printf(m, " decomp_calls: %llu\n",
+ atomic64_read(&iaa_device->decomp_calls));
+ seq_printf(m, " decomp_bytes: %llu\n",
+ atomic64_read(&iaa_device->decomp_bytes));
seq_puts(m, " wqs:\n");
list_for_each_entry(iaa_wq, &iaa_device->wqs, list)
wq_show(m, iaa_wq);
}
-static void global_stats_show(struct seq_file *m)
+static int global_stats_show(struct seq_file *m, void *v)
{
seq_puts(m, "global stats:\n");
- seq_printf(m, " total_comp_calls: %llu\n", total_comp_calls);
- seq_printf(m, " total_decomp_calls: %llu\n", total_decomp_calls);
- seq_printf(m, " total_sw_decomp_calls: %llu\n", total_sw_decomp_calls);
- seq_printf(m, " total_comp_bytes_out: %llu\n", total_comp_bytes_out);
- seq_printf(m, " total_decomp_bytes_in: %llu\n", total_decomp_bytes_in);
+ seq_printf(m, " total_comp_calls: %llu\n",
+ atomic64_read(&total_comp_calls));
+ seq_printf(m, " total_decomp_calls: %llu\n",
+ atomic64_read(&total_decomp_calls));
+ seq_printf(m, " total_sw_decomp_calls: %llu\n",
+ atomic64_read(&total_sw_decomp_calls));
+ seq_printf(m, " total_comp_bytes_out: %llu\n",
+ atomic64_read(&total_comp_bytes_out));
+ seq_printf(m, " total_decomp_bytes_in: %llu\n",
+ atomic64_read(&total_decomp_bytes_in));
seq_printf(m, " total_completion_einval_errors: %llu\n",
- total_completion_einval_errors);
+ atomic64_read(&total_completion_einval_errors));
seq_printf(m, " total_completion_timeout_errors: %llu\n",
- total_completion_timeout_errors);
+ atomic64_read(&total_completion_timeout_errors));
seq_printf(m, " total_completion_comp_buf_overflow_errors: %llu\n\n",
- total_completion_comp_buf_overflow_errors);
+ atomic64_read(&total_completion_comp_buf_overflow_errors));
+
+ return 0;
}
static int wq_stats_show(struct seq_file *m, void *v)
@@ -205,8 +196,6 @@ static int wq_stats_show(struct seq_file *m, void *v)
mutex_lock(&iaa_devices_lock);
- global_stats_show(m);
-
list_for_each_entry(iaa_device, &iaa_devices, list)
device_stats_show(m, iaa_device);
@@ -243,6 +232,18 @@ static const struct file_operations wq_stats_fops = {
.release = single_release,
};
+static int global_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, global_stats_show, file);
+}
+
+static const struct file_operations global_stats_fops = {
+ .open = global_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
DEFINE_DEBUGFS_ATTRIBUTE(wq_stats_reset_fops, NULL, iaa_crypto_stats_reset, "%llu\n");
int __init iaa_crypto_debugfs_init(void)
@@ -252,20 +253,8 @@ int __init iaa_crypto_debugfs_init(void)
iaa_crypto_debugfs_root = debugfs_create_dir("iaa_crypto", NULL);
- debugfs_create_u64("max_comp_delay_ns", 0644,
- iaa_crypto_debugfs_root, &max_comp_delay_ns);
- debugfs_create_u64("max_decomp_delay_ns", 0644,
- iaa_crypto_debugfs_root, &max_decomp_delay_ns);
- debugfs_create_u64("total_comp_calls", 0644,
- iaa_crypto_debugfs_root, &total_comp_calls);
- debugfs_create_u64("total_decomp_calls", 0644,
- iaa_crypto_debugfs_root, &total_decomp_calls);
- debugfs_create_u64("total_sw_decomp_calls", 0644,
- iaa_crypto_debugfs_root, &total_sw_decomp_calls);
- debugfs_create_u64("total_comp_bytes_out", 0644,
- iaa_crypto_debugfs_root, &total_comp_bytes_out);
- debugfs_create_u64("total_decomp_bytes_in", 0644,
- iaa_crypto_debugfs_root, &total_decomp_bytes_in);
+ debugfs_create_file("global_stats", 0644, iaa_crypto_debugfs_root, NULL,
+ &global_stats_fops);
debugfs_create_file("wq_stats", 0644, iaa_crypto_debugfs_root, NULL,
&wq_stats_fops);
debugfs_create_file("stats_reset", 0644, iaa_crypto_debugfs_root, NULL,
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.h b/drivers/crypto/intel/iaa/iaa_crypto_stats.h
index c916ca83f070..3787a5f507eb 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_stats.h
+++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.h
@@ -13,8 +13,6 @@ void update_total_comp_bytes_out(int n);
void update_total_decomp_calls(void);
void update_total_sw_decomp_calls(void);
void update_total_decomp_bytes_in(int n);
-void update_max_comp_delay_ns(u64 start_time_ns);
-void update_max_decomp_delay_ns(u64 start_time_ns);
void update_completion_einval_errs(void);
void update_completion_timeout_errs(void);
void update_completion_comp_buf_overflow_errs(void);
@@ -24,8 +22,6 @@ void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n);
void update_wq_decomp_calls(struct idxd_wq *idxd_wq);
void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n);
-static inline u64 iaa_get_ts(void) { return ktime_get_ns(); }
-
#else
static inline int iaa_crypto_debugfs_init(void) { return 0; }
static inline void iaa_crypto_debugfs_cleanup(void) {}
@@ -35,8 +31,6 @@ static inline void update_total_comp_bytes_out(int n) {}
static inline void update_total_decomp_calls(void) {}
static inline void update_total_sw_decomp_calls(void) {}
static inline void update_total_decomp_bytes_in(int n) {}
-static inline void update_max_comp_delay_ns(u64 start_time_ns) {}
-static inline void update_max_decomp_delay_ns(u64 start_time_ns) {}
static inline void update_completion_einval_errs(void) {}
static inline void update_completion_timeout_errs(void) {}
static inline void update_completion_comp_buf_overflow_errs(void) {}
@@ -46,8 +40,6 @@ static inline void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n) {}
static inline void update_wq_decomp_calls(struct idxd_wq *idxd_wq) {}
static inline void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n) {}
-static inline u64 iaa_get_ts(void) { return 0; }
-
#endif // CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS
#endif
diff --git a/drivers/crypto/intel/qat/qat_420xx/Makefile b/drivers/crypto/intel/qat/qat_420xx/Makefile
index a90fbe00b3c8..45728659fbc4 100644
--- a/drivers/crypto/intel/qat/qat_420xx/Makefile
+++ b/drivers/crypto/intel/qat/qat_420xx/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
+ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx.o
qat_420xx-objs := adf_drv.o adf_420xx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
index 1102c47f8293..78f0ea49254d 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
@@ -10,12 +10,14 @@
#include <adf_fw_config.h>
#include <adf_gen4_config.h>
#include <adf_gen4_dc.h>
+#include <adf_gen4_hw_csr_data.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_gen4_pm.h>
#include <adf_gen4_ras.h>
#include <adf_gen4_timer.h>
#include <adf_gen4_tl.h>
+#include <adf_gen4_vf_mig.h>
#include "adf_420xx_hw_data.h"
#include "icp_qat_hw.h"
@@ -296,7 +298,7 @@ static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
{
if (adf_gen4_init_thd2arb_map(accel_dev))
dev_warn(&GET_DEV(accel_dev),
- "Generate of the thread to arbiter map failed");
+ "Failed to generate thread to arbiter mapping");
return GET_HW_DATA(accel_dev)->thd_to_arb_map;
}
@@ -487,6 +489,7 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id)
adf_gen4_init_dc_ops(&hw_data->dc_ops);
adf_gen4_init_ras_ops(&hw_data->ras_ops);
adf_gen4_init_tl_data(&hw_data->tl_data);
+ adf_gen4_init_vf_mig_ops(&hw_data->vfmig_ops);
adf_init_rl_data(&hw_data->rl_data);
}
diff --git a/drivers/crypto/intel/qat/qat_4xxx/Makefile b/drivers/crypto/intel/qat/qat_4xxx/Makefile
index ff9c8b5897ea..9ba202079a22 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/Makefile
+++ b/drivers/crypto/intel/qat/qat_4xxx/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-ccflags-y := -I $(srctree)/$(src)/../qat_common
+ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx.o
qat_4xxx-objs := adf_drv.o adf_4xxx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index 927506cf271d..9fd7ec53b9f3 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -10,12 +10,14 @@
#include <adf_fw_config.h>
#include <adf_gen4_config.h>
#include <adf_gen4_dc.h>
+#include <adf_gen4_hw_csr_data.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_gen4_pm.h>
#include "adf_gen4_ras.h"
#include <adf_gen4_timer.h>
#include <adf_gen4_tl.h>
+#include <adf_gen4_vf_mig.h>
#include "adf_4xxx_hw_data.h"
#include "icp_qat_hw.h"
@@ -208,7 +210,7 @@ static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
{
if (adf_gen4_init_thd2arb_map(accel_dev))
dev_warn(&GET_DEV(accel_dev),
- "Generate of the thread to arbiter map failed");
+ "Failed to generate thread to arbiter mapping");
return GET_HW_DATA(accel_dev)->thd_to_arb_map;
}
@@ -454,6 +456,8 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map;
hw_data->disable_iov = adf_disable_sriov;
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
+ hw_data->bank_state_save = adf_gen4_bank_state_save;
+ hw_data->bank_state_restore = adf_gen4_bank_state_restore;
hw_data->enable_pm = adf_gen4_enable_pm;
hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
hw_data->dev_config = adf_gen4_dev_config;
@@ -469,6 +473,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
adf_gen4_init_dc_ops(&hw_data->dc_ops);
adf_gen4_init_ras_ops(&hw_data->ras_ops);
adf_gen4_init_tl_data(&hw_data->tl_data);
+ adf_gen4_init_vf_mig_ops(&hw_data->vfmig_ops);
adf_init_rl_data(&hw_data->rl_data);
}
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
index 9762f2bf7727..d26564cebdec 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
@@ -197,7 +197,9 @@ module_pci_driver(adf_driver);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_FIRMWARE(ADF_4XXX_FW);
+MODULE_FIRMWARE(ADF_402XX_FW);
MODULE_FIRMWARE(ADF_4XXX_MMP);
+MODULE_FIRMWARE(ADF_402XX_MMP);
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
MODULE_SOFTDEP("pre: crypto-intel_qat");
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/Makefile b/drivers/crypto/intel/qat/qat_c3xxx/Makefile
index 92ef416ccc78..7a06ad519bfc 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/Makefile
+++ b/drivers/crypto/intel/qat/qat_c3xxx/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
+ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o
qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index a882e0ea2279..201f9412c582 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -6,6 +6,7 @@
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include "adf_c3xxx_hw_data.h"
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
index b6d76825a92c..7ef633058c4f 100644
--- a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
+ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o
qat_c3xxxvf-objs := adf_drv.o adf_c3xxxvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
index 84d9486e04de..a512ca4efd3f 100644
--- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
@@ -4,6 +4,7 @@
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include <adf_pfvf_vf_msg.h>
diff --git a/drivers/crypto/intel/qat/qat_c62x/Makefile b/drivers/crypto/intel/qat/qat_c62x/Makefile
index d581f7c87d6c..cc9255b3b198 100644
--- a/drivers/crypto/intel/qat/qat_c62x/Makefile
+++ b/drivers/crypto/intel/qat/qat_c62x/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
+ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o
qat_c62x-objs := adf_drv.o adf_c62x_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
index 48cf3eb7c734..6b5b0cf9c7c7 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
@@ -6,6 +6,7 @@
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include "adf_c62x_hw_data.h"
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/Makefile b/drivers/crypto/intel/qat/qat_c62xvf/Makefile
index 446c3d638605..256786662d60 100644
--- a/drivers/crypto/intel/qat/qat_c62xvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_c62xvf/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
+ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o
qat_c62xvf-objs := adf_drv.o adf_c62xvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
index 751d7aa57fc7..4aaaaf921734 100644
--- a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
@@ -4,6 +4,7 @@
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include <adf_pfvf_vf_msg.h>
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 5915cde8a7aa..6f9266edc9f1 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -14,16 +14,20 @@ intel_qat-objs := adf_cfg.o \
adf_hw_arbiter.o \
adf_sysfs.o \
adf_sysfs_ras_counters.o \
+ adf_gen2_hw_csr_data.o \
adf_gen2_hw_data.o \
adf_gen2_config.o \
adf_gen4_config.o \
+ adf_gen4_hw_csr_data.o \
adf_gen4_hw_data.o \
+ adf_gen4_vf_mig.o \
adf_gen4_pm.o \
adf_gen2_dc.o \
adf_gen4_dc.o \
adf_gen4_ras.o \
adf_gen4_timer.o \
adf_clock.o \
+ adf_mstate_mgr.o \
qat_crypto.o \
qat_compression.o \
qat_comp_algs.o \
@@ -52,6 +56,6 @@ intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \
intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \
adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \
- adf_gen2_pfvf.o adf_gen4_pfvf.o
+ adf_gen2_pfvf.o adf_gen4_pfvf.o qat_mig_dev.o
intel_qat-$(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION) += adf_heartbeat_inject.o
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index 08658c3a01e9..7830ecb1a1f1 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -9,6 +9,7 @@
#include <linux/pci.h>
#include <linux/ratelimit.h>
#include <linux/types.h>
+#include <linux/qat/qat_mig_dev.h>
#include "adf_cfg_common.h"
#include "adf_rl.h"
#include "adf_telemetry.h"
@@ -140,6 +141,40 @@ struct admin_info {
u32 mailbox_offset;
};
+struct ring_config {
+ u64 base;
+ u32 config;
+ u32 head;
+ u32 tail;
+ u32 reserved0;
+};
+
+struct bank_state {
+ u32 ringstat0;
+ u32 ringstat1;
+ u32 ringuostat;
+ u32 ringestat;
+ u32 ringnestat;
+ u32 ringnfstat;
+ u32 ringfstat;
+ u32 ringcstat0;
+ u32 ringcstat1;
+ u32 ringcstat2;
+ u32 ringcstat3;
+ u32 iaintflagen;
+ u32 iaintflagreg;
+ u32 iaintflagsrcsel0;
+ u32 iaintflagsrcsel1;
+ u32 iaintcolen;
+ u32 iaintcolctl;
+ u32 iaintflagandcolen;
+ u32 ringexpstat;
+ u32 ringexpintenable;
+ u32 ringsrvarben;
+ u32 reserved0;
+ struct ring_config rings[ADF_ETR_MAX_RINGS_PER_BANK];
+};
+
struct adf_hw_csr_ops {
u64 (*build_csr_ring_base_addr)(dma_addr_t addr, u32 size);
u32 (*read_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
@@ -150,22 +185,49 @@ struct adf_hw_csr_ops {
u32 ring);
void (*write_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
u32 ring, u32 value);
+ u32 (*read_csr_stat)(void __iomem *csr_base_addr, u32 bank);
+ u32 (*read_csr_uo_stat)(void __iomem *csr_base_addr, u32 bank);
u32 (*read_csr_e_stat)(void __iomem *csr_base_addr, u32 bank);
+ u32 (*read_csr_ne_stat)(void __iomem *csr_base_addr, u32 bank);
+ u32 (*read_csr_nf_stat)(void __iomem *csr_base_addr, u32 bank);
+ u32 (*read_csr_f_stat)(void __iomem *csr_base_addr, u32 bank);
+ u32 (*read_csr_c_stat)(void __iomem *csr_base_addr, u32 bank);
+ u32 (*read_csr_exp_stat)(void __iomem *csr_base_addr, u32 bank);
+ u32 (*read_csr_exp_int_en)(void __iomem *csr_base_addr, u32 bank);
+ void (*write_csr_exp_int_en)(void __iomem *csr_base_addr, u32 bank,
+ u32 value);
+ u32 (*read_csr_ring_config)(void __iomem *csr_base_addr, u32 bank,
+ u32 ring);
void (*write_csr_ring_config)(void __iomem *csr_base_addr, u32 bank,
u32 ring, u32 value);
+ dma_addr_t (*read_csr_ring_base)(void __iomem *csr_base_addr, u32 bank,
+ u32 ring);
void (*write_csr_ring_base)(void __iomem *csr_base_addr, u32 bank,
u32 ring, dma_addr_t addr);
+ u32 (*read_csr_int_en)(void __iomem *csr_base_addr, u32 bank);
+ void (*write_csr_int_en)(void __iomem *csr_base_addr, u32 bank,
+ u32 value);
+ u32 (*read_csr_int_flag)(void __iomem *csr_base_addr, u32 bank);
void (*write_csr_int_flag)(void __iomem *csr_base_addr, u32 bank,
u32 value);
+ u32 (*read_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank);
void (*write_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank);
+ void (*write_csr_int_srcsel_w_val)(void __iomem *csr_base_addr,
+ u32 bank, u32 value);
+ u32 (*read_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank);
void (*write_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank,
u32 value);
+ u32 (*read_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank);
void (*write_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank,
u32 value);
+ u32 (*read_csr_int_flag_and_col)(void __iomem *csr_base_addr,
+ u32 bank);
void (*write_csr_int_flag_and_col)(void __iomem *csr_base_addr,
u32 bank, u32 value);
+ u32 (*read_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank);
void (*write_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank,
u32 value);
+ u32 (*get_int_col_ctl_enable_mask)(void);
};
struct adf_cfg_device_data;
@@ -197,6 +259,20 @@ struct adf_dc_ops {
void (*build_deflate_ctx)(void *ctx);
};
+struct qat_migdev_ops {
+ int (*init)(struct qat_mig_dev *mdev);
+ void (*cleanup)(struct qat_mig_dev *mdev);
+ void (*reset)(struct qat_mig_dev *mdev);
+ int (*open)(struct qat_mig_dev *mdev);
+ void (*close)(struct qat_mig_dev *mdev);
+ int (*suspend)(struct qat_mig_dev *mdev);
+ int (*resume)(struct qat_mig_dev *mdev);
+ int (*save_state)(struct qat_mig_dev *mdev);
+ int (*save_setup)(struct qat_mig_dev *mdev);
+ int (*load_state)(struct qat_mig_dev *mdev);
+ int (*load_setup)(struct qat_mig_dev *mdev, int size);
+};
+
struct adf_dev_err_mask {
u32 cppagentcmdpar_mask;
u32 parerr_ath_cph_mask;
@@ -244,6 +320,10 @@ struct adf_hw_device_data {
void (*enable_ints)(struct adf_accel_dev *accel_dev);
void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr);
+ int (*bank_state_save)(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct bank_state *state);
+ int (*bank_state_restore)(struct adf_accel_dev *accel_dev,
+ u32 bank_number, struct bank_state *state);
void (*reset_device)(struct adf_accel_dev *accel_dev);
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
@@ -260,6 +340,7 @@ struct adf_hw_device_data {
struct adf_dev_err_mask dev_err_mask;
struct adf_rl_hw_data rl_data;
struct adf_tl_hw_data tl_data;
+ struct qat_migdev_ops vfmig_ops;
const char *fw_name;
const char *fw_mmp_name;
u32 fuses;
@@ -316,6 +397,7 @@ struct adf_hw_device_data {
#define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops)
#define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
#define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops)
+#define GET_VFMIG_OPS(accel_dev) (&(accel_dev)->hw_device->vfmig_ops)
#define GET_TL_DATA(accel_dev) GET_HW_DATA(accel_dev)->tl_data
#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
@@ -330,11 +412,17 @@ struct adf_fw_loader_data {
struct adf_accel_vf_info {
struct adf_accel_dev *accel_dev;
struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
+ struct mutex pfvf_mig_lock; /* protects PFVF state for migration */
struct ratelimit_state vf2pf_ratelimit;
u32 vf_nr;
bool init;
bool restarting;
u8 vf_compat_ver;
+ /*
+ * Private area used for device migration.
+ * Memory allocation and free is managed by migration driver.
+ */
+ void *mig_priv;
};
struct adf_dc_data {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c
index 9da2278bd5b7..04260f61d042 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c
@@ -130,8 +130,7 @@ static void adf_device_reset_worker(struct work_struct *work)
if (adf_dev_restart(accel_dev)) {
/* The device hanged and we can't restart it so stop here */
dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
- if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
- completion_done(&reset_data->compl))
+ if (reset_data->mode == ADF_DEV_RESET_ASYNC)
kfree(reset_data);
WARN(1, "QAT: device restart failed. Device is unusable\n");
return;
@@ -147,16 +146,8 @@ static void adf_device_reset_worker(struct work_struct *work)
adf_dev_restarted_notify(accel_dev);
clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
- /*
- * The dev is back alive. Notify the caller if in sync mode
- *
- * If device restart will take a more time than expected,
- * the schedule_reset() function can timeout and exit. This can be
- * detected by calling the completion_done() function. In this case
- * the reset_data structure needs to be freed here.
- */
- if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
- completion_done(&reset_data->compl))
+ /* The dev is back alive. Notify the caller if in sync mode */
+ if (reset_data->mode == ADF_DEV_RESET_ASYNC)
kfree(reset_data);
else
complete(&reset_data->compl);
@@ -191,10 +182,10 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
if (!timeout) {
dev_err(&GET_DEV(accel_dev),
"Reset device timeout expired\n");
+ cancel_work_sync(&reset_data->reset_work);
ret = -EFAULT;
- } else {
- kfree(reset_data);
}
+ kfree(reset_data);
return ret;
}
return 0;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
index 57328249c89e..3bec9e20bad0 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
@@ -248,6 +248,16 @@ static inline void __iomem *adf_get_pmisc_base(struct adf_accel_dev *accel_dev)
return pmisc->virt_addr;
}
+static inline void __iomem *adf_get_etr_base(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *etr;
+
+ etr = &GET_BARS(accel_dev)[hw_data->get_etr_bar_id(hw_data)];
+
+ return etr->virt_addr;
+}
+
static inline void __iomem *adf_get_aram_base(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c
new file mode 100644
index 000000000000..650c9edd8a66
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024 Intel Corporation */
+#include <linux/types.h>
+#include "adf_gen2_hw_csr_data.h"
+
+static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
+{
+ return BUILD_RING_BASE_ADDR(addr, size);
+}
+
+static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+ return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ u32 value)
+{
+ WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
+}
+
+static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+ return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ u32 value)
+{
+ WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
+}
+
+static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_E_STAT(csr_base_addr, bank);
+}
+
+static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank,
+ u32 ring, u32 value)
+{
+ WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
+}
+
+static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ dma_addr_t addr)
+{
+ WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
+}
+
+static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, u32 value)
+{
+ WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
+{
+ WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
+}
+
+static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
+}
+
+static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
+}
+
+void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
+{
+ csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
+ csr_ops->read_csr_ring_head = read_csr_ring_head;
+ csr_ops->write_csr_ring_head = write_csr_ring_head;
+ csr_ops->read_csr_ring_tail = read_csr_ring_tail;
+ csr_ops->write_csr_ring_tail = write_csr_ring_tail;
+ csr_ops->read_csr_e_stat = read_csr_e_stat;
+ csr_ops->write_csr_ring_config = write_csr_ring_config;
+ csr_ops->write_csr_ring_base = write_csr_ring_base;
+ csr_ops->write_csr_int_flag = write_csr_int_flag;
+ csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
+ csr_ops->write_csr_int_col_en = write_csr_int_col_en;
+ csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
+ csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
+ csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h
new file mode 100644
index 000000000000..55058b0f9e52
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2024 Intel Corporation */
+#ifndef ADF_GEN2_HW_CSR_DATA_H_
+#define ADF_GEN2_HW_CSR_DATA_H_
+
+#include <linux/bitops.h>
+#include "adf_accel_devices.h"
+
+#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
+#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
+#define ADF_RING_CSR_RING_CONFIG 0x000
+#define ADF_RING_CSR_RING_LBASE 0x040
+#define ADF_RING_CSR_RING_UBASE 0x080
+#define ADF_RING_CSR_RING_HEAD 0x0C0
+#define ADF_RING_CSR_RING_TAIL 0x100
+#define ADF_RING_CSR_E_STAT 0x14C
+#define ADF_RING_CSR_INT_FLAG 0x170
+#define ADF_RING_CSR_INT_SRCSEL 0x174
+#define ADF_RING_CSR_INT_SRCSEL_2 0x178
+#define ADF_RING_CSR_INT_COL_EN 0x17C
+#define ADF_RING_CSR_INT_COL_CTL 0x180
+#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
+#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
+#define ADF_RING_BUNDLE_SIZE 0x1000
+#define ADF_ARB_REG_SLOT 0x1000
+#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
+
+#define BUILD_RING_BASE_ADDR(addr, size) \
+ (((addr) >> 6) & (GENMASK_ULL(63, 0) << (size)))
+#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_HEAD + ((ring) << 2))
+#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_TAIL + ((ring) << 2))
+#define READ_CSR_E_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_E_STAT)
+#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value)
+#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
+do { \
+ u32 l_base = 0, u_base = 0; \
+ l_base = (u32)((value) & 0xFFFFFFFF); \
+ u_base = (u32)(((value) & 0xFFFFFFFF00000000ULL) >> 32); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_LBASE + ((ring) << 2), l_base); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_UBASE + ((ring) << 2), u_base); \
+} while (0)
+
+#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
+#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
+#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_INT_FLAG, value)
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+do { \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
+} while (0)
+#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_INT_COL_EN, value)
+#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_INT_COL_CTL, \
+ ADF_RING_CSR_INT_COL_CTL_ENABLE | (value))
+#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_INT_FLAG_AND_COL, value)
+
+#define WRITE_CSR_RING_SRV_ARB_EN(csr_addr, index, value) \
+ ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
+ (ADF_ARB_REG_SLOT * (index)), value)
+
+void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
index d1884547b5a1..1f64bf49b221 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
@@ -111,103 +111,6 @@ void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev)
}
EXPORT_SYMBOL_GPL(adf_gen2_enable_ints);
-static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
-{
- return BUILD_RING_BASE_ADDR(addr, size);
-}
-
-static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
-{
- return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
-}
-
-static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
- u32 value)
-{
- WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
-}
-
-static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
-{
- return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
-}
-
-static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
- u32 value)
-{
- WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
-}
-
-static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
-{
- return READ_CSR_E_STAT(csr_base_addr, bank);
-}
-
-static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank,
- u32 ring, u32 value)
-{
- WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
-}
-
-static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
- dma_addr_t addr)
-{
- WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
-}
-
-static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, u32 value)
-{
- WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
-{
- WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
-}
-
-static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank,
- u32 value)
-{
- WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
- u32 value)
-{
- WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
- u32 value)
-{
- WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
-}
-
-static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
- u32 value)
-{
- WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
-}
-
-void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
-{
- csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
- csr_ops->read_csr_ring_head = read_csr_ring_head;
- csr_ops->write_csr_ring_head = write_csr_ring_head;
- csr_ops->read_csr_ring_tail = read_csr_ring_tail;
- csr_ops->write_csr_ring_tail = write_csr_ring_tail;
- csr_ops->read_csr_e_stat = read_csr_e_stat;
- csr_ops->write_csr_ring_config = write_csr_ring_config;
- csr_ops->write_csr_ring_base = write_csr_ring_base;
- csr_ops->write_csr_int_flag = write_csr_int_flag;
- csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
- csr_ops->write_csr_int_col_en = write_csr_int_col_en;
- csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
- csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
- csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
-}
-EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_ops);
-
u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
index 6bd341061de4..708e9186127b 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
@@ -6,78 +6,9 @@
#include "adf_accel_devices.h"
#include "adf_cfg_common.h"
-/* Transport access */
-#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
-#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
-#define ADF_RING_CSR_RING_CONFIG 0x000
-#define ADF_RING_CSR_RING_LBASE 0x040
-#define ADF_RING_CSR_RING_UBASE 0x080
-#define ADF_RING_CSR_RING_HEAD 0x0C0
-#define ADF_RING_CSR_RING_TAIL 0x100
-#define ADF_RING_CSR_E_STAT 0x14C
-#define ADF_RING_CSR_INT_FLAG 0x170
-#define ADF_RING_CSR_INT_SRCSEL 0x174
-#define ADF_RING_CSR_INT_SRCSEL_2 0x178
-#define ADF_RING_CSR_INT_COL_EN 0x17C
-#define ADF_RING_CSR_INT_COL_CTL 0x180
-#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
-#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
-#define ADF_RING_BUNDLE_SIZE 0x1000
#define ADF_GEN2_RX_RINGS_OFFSET 8
#define ADF_GEN2_TX_RINGS_MASK 0xFF
-#define BUILD_RING_BASE_ADDR(addr, size) \
- (((addr) >> 6) & (GENMASK_ULL(63, 0) << (size)))
-#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
- ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_RING_HEAD + ((ring) << 2))
-#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
- ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_RING_TAIL + ((ring) << 2))
-#define READ_CSR_E_STAT(csr_base_addr, bank) \
- ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_E_STAT)
-#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value)
-#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
-do { \
- u32 l_base = 0, u_base = 0; \
- l_base = (u32)((value) & 0xFFFFFFFF); \
- u_base = (u32)(((value) & 0xFFFFFFFF00000000ULL) >> 32); \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_RING_LBASE + ((ring) << 2), l_base); \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_RING_UBASE + ((ring) << 2), u_base); \
-} while (0)
-
-#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
-#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
-#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_INT_FLAG, value)
-#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
-do { \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
-} while (0)
-#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_INT_COL_EN, value)
-#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_INT_COL_CTL, \
- ADF_RING_CSR_INT_COL_CTL_ENABLE | (value))
-#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_INT_FLAG_AND_COL, value)
-
/* AE to function map */
#define AE2FUNCTION_MAP_A_OFFSET (0x3A400 + 0x190)
#define AE2FUNCTION_MAP_B_OFFSET (0x3A400 + 0x310)
@@ -106,12 +37,6 @@ do { \
#define ADF_ARB_OFFSET 0x30000
#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
#define ADF_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0))
-#define ADF_ARB_REG_SLOT 0x1000
-#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
-
-#define WRITE_CSR_RING_SRV_ARB_EN(csr_addr, index, value) \
- ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
- (ADF_ARB_REG_SLOT * (index)), value)
/* Power gating */
#define ADF_POWERGATE_DC BIT(23)
@@ -158,7 +83,6 @@ u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self);
void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev);
void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
int num_a_regs, int num_b_regs);
-void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info);
void adf_gen2_get_arb_info(struct arb_info *arb_info);
void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c
new file mode 100644
index 000000000000..6609c248aaba
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024 Intel Corporation */
+#include <linux/types.h>
+#include "adf_gen4_hw_csr_data.h"
+
+static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
+{
+ return BUILD_RING_BASE_ADDR(addr, size);
+}
+
+static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+ return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ u32 value)
+{
+ WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
+}
+
+static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+ return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ u32 value)
+{
+ WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
+}
+
+static u32 read_csr_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_uo_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_UO_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_E_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_ne_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_NE_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_nf_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_NF_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_f_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_F_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_c_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_C_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_exp_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_EXP_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_exp_int_en(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_EXP_INT_EN(csr_base_addr, bank);
+}
+
+static void write_csr_exp_int_en(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_EXP_INT_EN(csr_base_addr, bank, value);
+}
+
+static u32 read_csr_ring_config(void __iomem *csr_base_addr, u32 bank,
+ u32 ring)
+{
+ return READ_CSR_RING_CONFIG(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ u32 value)
+{
+ WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
+}
+
+static dma_addr_t read_csr_ring_base(void __iomem *csr_base_addr, u32 bank,
+ u32 ring)
+{
+ return READ_CSR_RING_BASE(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ dma_addr_t addr)
+{
+ WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
+}
+
+static u32 read_csr_int_en(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_INT_EN(csr_base_addr, bank);
+}
+
+static void write_csr_int_en(void __iomem *csr_base_addr, u32 bank, u32 value)
+{
+ WRITE_CSR_INT_EN(csr_base_addr, bank, value);
+}
+
+static u32 read_csr_int_flag(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_INT_FLAG(csr_base_addr, bank);
+}
+
+static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
+}
+
+static u32 read_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_INT_SRCSEL(csr_base_addr, bank);
+}
+
+static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
+{
+ WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
+}
+
+static void write_csr_int_srcsel_w_val(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_SRCSEL_W_VAL(csr_base_addr, bank, value);
+}
+
+static u32 read_csr_int_col_en(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_INT_COL_EN(csr_base_addr, bank);
+}
+
+static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, u32 value)
+{
+ WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
+}
+
+static u32 read_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_INT_COL_CTL(csr_base_addr, bank);
+}
+
+static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
+}
+
+static u32 read_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_INT_FLAG_AND_COL(csr_base_addr, bank);
+}
+
+static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
+}
+
+static u32 read_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_RING_SRV_ARB_EN(csr_base_addr, bank);
+}
+
+static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
+}
+
+static u32 get_int_col_ctl_enable_mask(void)
+{
+ return ADF_RING_CSR_INT_COL_CTL_ENABLE;
+}
+
+void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
+{
+ csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
+ csr_ops->read_csr_ring_head = read_csr_ring_head;
+ csr_ops->write_csr_ring_head = write_csr_ring_head;
+ csr_ops->read_csr_ring_tail = read_csr_ring_tail;
+ csr_ops->write_csr_ring_tail = write_csr_ring_tail;
+ csr_ops->read_csr_stat = read_csr_stat;
+ csr_ops->read_csr_uo_stat = read_csr_uo_stat;
+ csr_ops->read_csr_e_stat = read_csr_e_stat;
+ csr_ops->read_csr_ne_stat = read_csr_ne_stat;
+ csr_ops->read_csr_nf_stat = read_csr_nf_stat;
+ csr_ops->read_csr_f_stat = read_csr_f_stat;
+ csr_ops->read_csr_c_stat = read_csr_c_stat;
+ csr_ops->read_csr_exp_stat = read_csr_exp_stat;
+ csr_ops->read_csr_exp_int_en = read_csr_exp_int_en;
+ csr_ops->write_csr_exp_int_en = write_csr_exp_int_en;
+ csr_ops->read_csr_ring_config = read_csr_ring_config;
+ csr_ops->write_csr_ring_config = write_csr_ring_config;
+ csr_ops->read_csr_ring_base = read_csr_ring_base;
+ csr_ops->write_csr_ring_base = write_csr_ring_base;
+ csr_ops->read_csr_int_en = read_csr_int_en;
+ csr_ops->write_csr_int_en = write_csr_int_en;
+ csr_ops->read_csr_int_flag = read_csr_int_flag;
+ csr_ops->write_csr_int_flag = write_csr_int_flag;
+ csr_ops->read_csr_int_srcsel = read_csr_int_srcsel;
+ csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
+ csr_ops->write_csr_int_srcsel_w_val = write_csr_int_srcsel_w_val;
+ csr_ops->read_csr_int_col_en = read_csr_int_col_en;
+ csr_ops->write_csr_int_col_en = write_csr_int_col_en;
+ csr_ops->read_csr_int_col_ctl = read_csr_int_col_ctl;
+ csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
+ csr_ops->read_csr_int_flag_and_col = read_csr_int_flag_and_col;
+ csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
+ csr_ops->read_csr_ring_srv_arb_en = read_csr_ring_srv_arb_en;
+ csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
+ csr_ops->get_int_col_ctl_enable_mask = get_int_col_ctl_enable_mask;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h
new file mode 100644
index 000000000000..6f33e7c87c2c
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2024 Intel Corporation */
+#ifndef ADF_GEN4_HW_CSR_DATA_H_
+#define ADF_GEN4_HW_CSR_DATA_H_
+
+#include <linux/bitops.h>
+#include "adf_accel_devices.h"
+
+#define ADF_BANK_INT_SRC_SEL_MASK 0x44UL
+#define ADF_RING_CSR_RING_CONFIG 0x1000
+#define ADF_RING_CSR_RING_LBASE 0x1040
+#define ADF_RING_CSR_RING_UBASE 0x1080
+#define ADF_RING_CSR_RING_HEAD 0x0C0
+#define ADF_RING_CSR_RING_TAIL 0x100
+#define ADF_RING_CSR_STAT 0x140
+#define ADF_RING_CSR_UO_STAT 0x148
+#define ADF_RING_CSR_E_STAT 0x14C
+#define ADF_RING_CSR_NE_STAT 0x150
+#define ADF_RING_CSR_NF_STAT 0x154
+#define ADF_RING_CSR_F_STAT 0x158
+#define ADF_RING_CSR_C_STAT 0x15C
+#define ADF_RING_CSR_INT_FLAG_EN 0x16C
+#define ADF_RING_CSR_INT_FLAG 0x170
+#define ADF_RING_CSR_INT_SRCSEL 0x174
+#define ADF_RING_CSR_INT_COL_EN 0x17C
+#define ADF_RING_CSR_INT_COL_CTL 0x180
+#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
+#define ADF_RING_CSR_EXP_STAT 0x188
+#define ADF_RING_CSR_EXP_INT_EN 0x18C
+#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
+#define ADF_RING_CSR_ADDR_OFFSET 0x100000
+#define ADF_RING_BUNDLE_SIZE 0x2000
+#define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C
+
+#define BUILD_RING_BASE_ADDR(addr, size) \
+ ((((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) << 6)
+#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_HEAD + ((ring) << 2))
+#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_TAIL + ((ring) << 2))
+#define READ_CSR_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_STAT)
+#define READ_CSR_UO_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_UO_STAT)
+#define READ_CSR_E_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_E_STAT)
+#define READ_CSR_NE_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_NE_STAT)
+#define READ_CSR_NF_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_NF_STAT)
+#define READ_CSR_F_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_F_STAT)
+#define READ_CSR_C_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_C_STAT)
+#define READ_CSR_EXP_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_EXP_STAT)
+#define READ_CSR_EXP_INT_EN(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_EXP_INT_EN)
+#define WRITE_CSR_EXP_INT_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_EXP_INT_EN, value)
+#define READ_CSR_RING_CONFIG(csr_base_addr, bank, ring) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_CONFIG + ((ring) << 2))
+#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value)
+#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
+do { \
+ void __iomem *_csr_base_addr = csr_base_addr; \
+ u32 _bank = bank; \
+ u32 _ring = ring; \
+ dma_addr_t _value = value; \
+ u32 l_base = 0, u_base = 0; \
+ l_base = lower_32_bits(_value); \
+ u_base = upper_32_bits(_value); \
+ ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (_bank) + \
+ ADF_RING_CSR_RING_LBASE + ((_ring) << 2), l_base); \
+ ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (_bank) + \
+ ADF_RING_CSR_RING_UBASE + ((_ring) << 2), u_base); \
+} while (0)
+
+static inline u64 read_base(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+ u32 l_base, u_base;
+
+ /*
+ * Use special IO wrapper for ring base as LBASE and UBASE are
+ * not physically contigious
+ */
+ l_base = ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) +
+ ADF_RING_CSR_RING_LBASE + (ring << 2));
+ u_base = ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) +
+ ADF_RING_CSR_RING_UBASE + (ring << 2));
+
+ return (u64)u_base << 32 | (u64)l_base;
+}
+
+#define READ_CSR_RING_BASE(csr_base_addr, bank, ring) \
+ read_base((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, (bank), (ring))
+
+#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
+#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
+#define READ_CSR_INT_EN(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_FLAG_EN)
+#define WRITE_CSR_INT_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_FLAG_EN, (value))
+#define READ_CSR_INT_FLAG(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_FLAG)
+#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_FLAG, (value))
+#define READ_CSR_INT_SRCSEL(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_SRCSEL)
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK)
+#define WRITE_CSR_INT_SRCSEL_W_VAL(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_SRCSEL, (value))
+#define READ_CSR_INT_COL_EN(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_COL_EN)
+#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_COL_EN, (value))
+#define READ_CSR_INT_COL_CTL(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_COL_CTL)
+#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_COL_CTL, \
+ ADF_RING_CSR_INT_COL_CTL_ENABLE | (value))
+#define READ_CSR_INT_FLAG_AND_COL(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_FLAG_AND_COL)
+#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_FLAG_AND_COL, (value))
+
+#define READ_CSR_RING_SRV_ARB_EN(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_SRV_ARB_EN)
+#define WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_SRV_ARB_EN, (value))
+
+void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
index d28e1921940a..41a0979e68c1 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2020 Intel Corporation */
#include <linux/iopoll.h>
+#include <asm/div64.h>
#include "adf_accel_devices.h"
#include "adf_cfg_services.h"
#include "adf_common_drv.h"
@@ -8,103 +9,6 @@
#include "adf_gen4_hw_data.h"
#include "adf_gen4_pm.h"
-static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
-{
- return BUILD_RING_BASE_ADDR(addr, size);
-}
-
-static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
-{
- return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
-}
-
-static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
- u32 value)
-{
- WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
-}
-
-static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
-{
- return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
-}
-
-static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
- u32 value)
-{
- WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
-}
-
-static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
-{
- return READ_CSR_E_STAT(csr_base_addr, bank);
-}
-
-static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring,
- u32 value)
-{
- WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
-}
-
-static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
- dma_addr_t addr)
-{
- WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
-}
-
-static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank,
- u32 value)
-{
- WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
-{
- WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
-}
-
-static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, u32 value)
-{
- WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
- u32 value)
-{
- WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
- u32 value)
-{
- WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
-}
-
-static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
- u32 value)
-{
- WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
-}
-
-void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
-{
- csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
- csr_ops->read_csr_ring_head = read_csr_ring_head;
- csr_ops->write_csr_ring_head = write_csr_ring_head;
- csr_ops->read_csr_ring_tail = read_csr_ring_tail;
- csr_ops->write_csr_ring_tail = write_csr_ring_tail;
- csr_ops->read_csr_e_stat = read_csr_e_stat;
- csr_ops->write_csr_ring_config = write_csr_ring_config;
- csr_ops->write_csr_ring_base = write_csr_ring_base;
- csr_ops->write_csr_int_flag = write_csr_int_flag;
- csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
- csr_ops->write_csr_int_col_en = write_csr_int_col_en;
- csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
- csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
- csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops);
-
u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self)
{
return ADF_GEN4_ACCELERATORS_MASK;
@@ -321,8 +225,7 @@ static int reset_ring_pair(void __iomem *csr, u32 bank_number)
int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- u32 etr_bar_id = hw_data->get_etr_bar_id(hw_data);
- void __iomem *csr;
+ void __iomem *csr = adf_get_etr_base(accel_dev);
int ret;
if (bank_number >= hw_data->num_banks)
@@ -331,7 +234,6 @@ int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
dev_dbg(&GET_DEV(accel_dev),
"ring pair reset for bank:%d\n", bank_number);
- csr = (&GET_BARS(accel_dev)[etr_bar_id])->virt_addr;
ret = reset_ring_pair(csr, bank_number);
if (ret)
dev_err(&GET_DEV(accel_dev),
@@ -489,3 +391,281 @@ set_mask:
return ring_to_svc_map;
}
EXPORT_SYMBOL_GPL(adf_gen4_get_ring_to_svc_map);
+
+/*
+ * adf_gen4_bank_quiesce_coal_timer() - quiesce bank coalesced interrupt timer
+ * @accel_dev: Pointer to the device structure
+ * @bank_idx: Offset to the bank within this device
+ * @timeout_ms: Timeout in milliseconds for the operation
+ *
+ * This function tries to quiesce the coalesced interrupt timer of a bank if
+ * it has been enabled and triggered.
+ *
+ * Returns 0 on success, error code otherwise
+ *
+ */
+int adf_gen4_bank_quiesce_coal_timer(struct adf_accel_dev *accel_dev,
+ u32 bank_idx, int timeout_ms)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+ void __iomem *csr_misc = adf_get_pmisc_base(accel_dev);
+ void __iomem *csr_etr = adf_get_etr_base(accel_dev);
+ u32 int_col_ctl, int_col_mask, int_col_en;
+ u32 e_stat, intsrc;
+ u64 wait_us;
+ int ret;
+
+ if (timeout_ms < 0)
+ return -EINVAL;
+
+ int_col_ctl = csr_ops->read_csr_int_col_ctl(csr_etr, bank_idx);
+ int_col_mask = csr_ops->get_int_col_ctl_enable_mask();
+ if (!(int_col_ctl & int_col_mask))
+ return 0;
+
+ int_col_en = csr_ops->read_csr_int_col_en(csr_etr, bank_idx);
+ int_col_en &= BIT(ADF_WQM_CSR_RP_IDX_RX);
+
+ e_stat = csr_ops->read_csr_e_stat(csr_etr, bank_idx);
+ if (!(~e_stat & int_col_en))
+ return 0;
+
+ wait_us = 2 * ((int_col_ctl & ~int_col_mask) << 8) * USEC_PER_SEC;
+ do_div(wait_us, hw_data->clock_frequency);
+ wait_us = min(wait_us, (u64)timeout_ms * USEC_PER_MSEC);
+ dev_dbg(&GET_DEV(accel_dev),
+ "wait for bank %d - coalesced timer expires in %llu us (max=%u ms estat=0x%x intcolen=0x%x)\n",
+ bank_idx, wait_us, timeout_ms, e_stat, int_col_en);
+
+ ret = read_poll_timeout(ADF_CSR_RD, intsrc, intsrc,
+ ADF_COALESCED_POLL_DELAY_US, wait_us, true,
+ csr_misc, ADF_WQM_CSR_RPINTSOU(bank_idx));
+ if (ret)
+ dev_warn(&GET_DEV(accel_dev),
+ "coalesced timer for bank %d expired (%llu us)\n",
+ bank_idx, wait_us);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_bank_quiesce_coal_timer);
+
+static int drain_bank(void __iomem *csr, u32 bank_number, int timeout_us)
+{
+ u32 status;
+
+ ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
+ ADF_WQM_CSR_RPRESETCTL_DRAIN);
+
+ return read_poll_timeout(ADF_CSR_RD, status,
+ status & ADF_WQM_CSR_RPRESETSTS_STATUS,
+ ADF_RPRESET_POLL_DELAY_US, timeout_us, true,
+ csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
+}
+
+void adf_gen4_bank_drain_finish(struct adf_accel_dev *accel_dev,
+ u32 bank_number)
+{
+ void __iomem *csr = adf_get_etr_base(accel_dev);
+
+ ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number),
+ ADF_WQM_CSR_RPRESETSTS_STATUS);
+}
+
+int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev,
+ u32 bank_number, int timeout_us)
+{
+ void __iomem *csr = adf_get_etr_base(accel_dev);
+ int ret;
+
+ dev_dbg(&GET_DEV(accel_dev), "Drain bank %d\n", bank_number);
+
+ ret = drain_bank(csr, bank_number, timeout_us);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev), "Bank drain failed (timeout)\n");
+ else
+ dev_dbg(&GET_DEV(accel_dev), "Bank drain successful\n");
+
+ return ret;
+}
+
+static void bank_state_save(struct adf_hw_csr_ops *ops, void __iomem *base,
+ u32 bank, struct bank_state *state, u32 num_rings)
+{
+ u32 i;
+
+ state->ringstat0 = ops->read_csr_stat(base, bank);
+ state->ringuostat = ops->read_csr_uo_stat(base, bank);
+ state->ringestat = ops->read_csr_e_stat(base, bank);
+ state->ringnestat = ops->read_csr_ne_stat(base, bank);
+ state->ringnfstat = ops->read_csr_nf_stat(base, bank);
+ state->ringfstat = ops->read_csr_f_stat(base, bank);
+ state->ringcstat0 = ops->read_csr_c_stat(base, bank);
+ state->iaintflagen = ops->read_csr_int_en(base, bank);
+ state->iaintflagreg = ops->read_csr_int_flag(base, bank);
+ state->iaintflagsrcsel0 = ops->read_csr_int_srcsel(base, bank);
+ state->iaintcolen = ops->read_csr_int_col_en(base, bank);
+ state->iaintcolctl = ops->read_csr_int_col_ctl(base, bank);
+ state->iaintflagandcolen = ops->read_csr_int_flag_and_col(base, bank);
+ state->ringexpstat = ops->read_csr_exp_stat(base, bank);
+ state->ringexpintenable = ops->read_csr_exp_int_en(base, bank);
+ state->ringsrvarben = ops->read_csr_ring_srv_arb_en(base, bank);
+
+ for (i = 0; i < num_rings; i++) {
+ state->rings[i].head = ops->read_csr_ring_head(base, bank, i);
+ state->rings[i].tail = ops->read_csr_ring_tail(base, bank, i);
+ state->rings[i].config = ops->read_csr_ring_config(base, bank, i);
+ state->rings[i].base = ops->read_csr_ring_base(base, bank, i);
+ }
+}
+
+#define CHECK_STAT(op, expect_val, name, args...) \
+({ \
+ u32 __expect_val = (expect_val); \
+ u32 actual_val = op(args); \
+ (__expect_val == actual_val) ? 0 : \
+ (pr_err("QAT: Fail to restore %s register. Expected 0x%x, actual 0x%x\n", \
+ name, __expect_val, actual_val), -EINVAL); \
+})
+
+static int bank_state_restore(struct adf_hw_csr_ops *ops, void __iomem *base,
+ u32 bank, struct bank_state *state, u32 num_rings,
+ int tx_rx_gap)
+{
+ u32 val, tmp_val, i;
+ int ret;
+
+ for (i = 0; i < num_rings; i++)
+ ops->write_csr_ring_base(base, bank, i, state->rings[i].base);
+
+ for (i = 0; i < num_rings; i++)
+ ops->write_csr_ring_config(base, bank, i, state->rings[i].config);
+
+ for (i = 0; i < num_rings / 2; i++) {
+ int tx = i * (tx_rx_gap + 1);
+ int rx = tx + tx_rx_gap;
+
+ ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
+ ops->write_csr_ring_tail(base, bank, tx, state->rings[tx].tail);
+
+ /*
+ * The TX ring head needs to be updated again to make sure that
+ * the HW will not consider the ring as full when it is empty
+ * and the correct state flags are set to match the recovered state.
+ */
+ if (state->ringestat & BIT(tx)) {
+ val = ops->read_csr_int_srcsel(base, bank);
+ val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK;
+ ops->write_csr_int_srcsel_w_val(base, bank, val);
+ ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
+ }
+
+ ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
+ val = ops->read_csr_int_srcsel(base, bank);
+ val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
+ ops->write_csr_int_srcsel_w_val(base, bank, val);
+
+ ops->write_csr_ring_head(base, bank, rx, state->rings[rx].head);
+ val = ops->read_csr_int_srcsel(base, bank);
+ val |= ADF_RP_INT_SRC_SEL_F_FALL_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
+ ops->write_csr_int_srcsel_w_val(base, bank, val);
+
+ /*
+ * The RX ring tail needs to be updated again to make sure that
+ * the HW will not consider the ring as empty when it is full
+ * and the correct state flags are set to match the recovered state.
+ */
+ if (state->ringfstat & BIT(rx))
+ ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
+ }
+
+ ops->write_csr_int_flag_and_col(base, bank, state->iaintflagandcolen);
+ ops->write_csr_int_en(base, bank, state->iaintflagen);
+ ops->write_csr_int_col_en(base, bank, state->iaintcolen);
+ ops->write_csr_int_srcsel_w_val(base, bank, state->iaintflagsrcsel0);
+ ops->write_csr_exp_int_en(base, bank, state->ringexpintenable);
+ ops->write_csr_int_col_ctl(base, bank, state->iaintcolctl);
+ ops->write_csr_ring_srv_arb_en(base, bank, state->ringsrvarben);
+
+ /* Check that all ring statuses match the saved state. */
+ ret = CHECK_STAT(ops->read_csr_stat, state->ringstat0, "ringstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = CHECK_STAT(ops->read_csr_e_stat, state->ringestat, "ringestat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = CHECK_STAT(ops->read_csr_ne_stat, state->ringnestat, "ringnestat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = CHECK_STAT(ops->read_csr_nf_stat, state->ringnfstat, "ringnfstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = CHECK_STAT(ops->read_csr_f_stat, state->ringfstat, "ringfstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = CHECK_STAT(ops->read_csr_c_stat, state->ringcstat0, "ringcstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ tmp_val = ops->read_csr_exp_stat(base, bank);
+ val = state->ringexpstat;
+ if (tmp_val && !val) {
+ pr_err("QAT: Bank was restored with exception: 0x%x\n", val);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct bank_state *state)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+ void __iomem *csr_base = adf_get_etr_base(accel_dev);
+
+ if (bank_number >= hw_data->num_banks || !state)
+ return -EINVAL;
+
+ dev_dbg(&GET_DEV(accel_dev), "Saving state of bank %d\n", bank_number);
+
+ bank_state_save(csr_ops, csr_base, bank_number, state,
+ hw_data->num_rings_per_bank);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_bank_state_save);
+
+int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct bank_state *state)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+ void __iomem *csr_base = adf_get_etr_base(accel_dev);
+ int ret;
+
+ if (bank_number >= hw_data->num_banks || !state)
+ return -EINVAL;
+
+ dev_dbg(&GET_DEV(accel_dev), "Restoring state of bank %d\n", bank_number);
+
+ ret = bank_state_restore(csr_ops, csr_base, bank_number, state,
+ hw_data->num_rings_per_bank, hw_data->tx_rx_gap);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev),
+ "Unable to restore state of bank %d\n", bank_number);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_bank_state_restore);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
index c6e80df5a85a..8b10926cedba 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
/* Copyright(c) 2020 Intel Corporation */
-#ifndef ADF_GEN4_HW_CSR_DATA_H_
-#define ADF_GEN4_HW_CSR_DATA_H_
+#ifndef ADF_GEN4_HW_DATA_H_
+#define ADF_GEN4_HW_DATA_H_
#include <linux/units.h>
@@ -54,95 +54,6 @@
#define ADF_GEN4_ADMINMSGLR_OFFSET 0x500578
#define ADF_GEN4_MAILBOX_BASE_OFFSET 0x600970
-/* Transport access */
-#define ADF_BANK_INT_SRC_SEL_MASK 0x44UL
-#define ADF_RING_CSR_RING_CONFIG 0x1000
-#define ADF_RING_CSR_RING_LBASE 0x1040
-#define ADF_RING_CSR_RING_UBASE 0x1080
-#define ADF_RING_CSR_RING_HEAD 0x0C0
-#define ADF_RING_CSR_RING_TAIL 0x100
-#define ADF_RING_CSR_E_STAT 0x14C
-#define ADF_RING_CSR_INT_FLAG 0x170
-#define ADF_RING_CSR_INT_SRCSEL 0x174
-#define ADF_RING_CSR_INT_COL_CTL 0x180
-#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
-#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
-#define ADF_RING_CSR_INT_COL_EN 0x17C
-#define ADF_RING_CSR_ADDR_OFFSET 0x100000
-#define ADF_RING_BUNDLE_SIZE 0x2000
-
-#define BUILD_RING_BASE_ADDR(addr, size) \
- ((((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) << 6)
-#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
- ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_RING_HEAD + ((ring) << 2))
-#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
- ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_RING_TAIL + ((ring) << 2))
-#define READ_CSR_E_STAT(csr_base_addr, bank) \
- ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_E_STAT)
-#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value)
-#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
-do { \
- void __iomem *_csr_base_addr = csr_base_addr; \
- u32 _bank = bank; \
- u32 _ring = ring; \
- dma_addr_t _value = value; \
- u32 l_base = 0, u_base = 0; \
- l_base = lower_32_bits(_value); \
- u_base = upper_32_bits(_value); \
- ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (_bank) + \
- ADF_RING_CSR_RING_LBASE + ((_ring) << 2), l_base); \
- ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (_bank) + \
- ADF_RING_CSR_RING_UBASE + ((_ring) << 2), u_base); \
-} while (0)
-
-#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
-#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
-#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_INT_FLAG, (value))
-#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK)
-#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_INT_COL_EN, (value))
-#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_INT_COL_CTL, \
- ADF_RING_CSR_INT_COL_CTL_ENABLE | (value))
-#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_INT_FLAG_AND_COL, (value))
-
-/* Arbiter configuration */
-#define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C
-
-#define WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_RING_SRV_ARB_EN, (value))
-
/* Default ring mapping */
#define ADF_GEN4_DEFAULT_RING_TO_SRV_MAP \
(ASYM << ADF_CFG_SERV_RING_PAIR_0_SHIFT | \
@@ -166,10 +77,20 @@ do { \
#define ADF_RPRESET_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
#define ADF_RPRESET_POLL_DELAY_US 20
#define ADF_WQM_CSR_RPRESETCTL_RESET BIT(0)
+#define ADF_WQM_CSR_RPRESETCTL_DRAIN BIT(2)
#define ADF_WQM_CSR_RPRESETCTL(bank) (0x6000 + ((bank) << 3))
#define ADF_WQM_CSR_RPRESETSTS_STATUS BIT(0)
#define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
+/* Ring interrupt */
+#define ADF_RP_INT_SRC_SEL_F_RISE_MASK BIT(2)
+#define ADF_RP_INT_SRC_SEL_F_FALL_MASK GENMASK(2, 0)
+#define ADF_RP_INT_SRC_SEL_RANGE_WIDTH 4
+#define ADF_COALESCED_POLL_TIMEOUT_US (1 * USEC_PER_SEC)
+#define ADF_COALESCED_POLL_DELAY_US 1000
+#define ADF_WQM_CSR_RPINTSOU(bank) (0x200000 + ((bank) << 12))
+#define ADF_WQM_CSR_RP_IDX_RX 1
+
/* Error source registers */
#define ADF_GEN4_ERRSOU0 (0x41A200)
#define ADF_GEN4_ERRSOU1 (0x41A204)
@@ -197,6 +118,19 @@ do { \
/* Arbiter threads mask with error value */
#define ADF_GEN4_ENA_THD_MASK_ERROR GENMASK(ADF_NUM_THREADS_PER_AE, 0)
+/* PF2VM communication channel */
+#define ADF_GEN4_PF2VM_OFFSET(i) (0x40B010 + (i) * 0x20)
+#define ADF_GEN4_VM2PF_OFFSET(i) (0x40B014 + (i) * 0x20)
+#define ADF_GEN4_VINTMSKPF2VM_OFFSET(i) (0x40B00C + (i) * 0x20)
+#define ADF_GEN4_VINTSOUPF2VM_OFFSET(i) (0x40B008 + (i) * 0x20)
+#define ADF_GEN4_VINTMSK_OFFSET(i) (0x40B004 + (i) * 0x20)
+#define ADF_GEN4_VINTSOU_OFFSET(i) (0x40B000 + (i) * 0x20)
+
+struct adf_gen4_vfmig {
+ struct adf_mstate_mgr *mstate_mgr;
+ bool bank_stopped[ADF_GEN4_NUM_BANKS_PER_VF];
+};
+
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
enum icp_qat_gen4_slice_mask {
@@ -230,11 +164,20 @@ u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self);
enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self);
u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self);
int adf_gen4_init_device(struct adf_accel_dev *accel_dev);
-void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev);
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev);
u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev);
+int adf_gen4_bank_quiesce_coal_timer(struct adf_accel_dev *accel_dev,
+ u32 bank_idx, int timeout_ms);
+int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev,
+ u32 bank_number, int timeout_us);
+void adf_gen4_bank_drain_finish(struct adf_accel_dev *accel_dev,
+ u32 bank_number);
+int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct bank_state *state);
+int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev,
+ u32 bank_number, struct bank_state *state);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c
index 8e8efe93f3ee..21474d402d09 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c
@@ -6,12 +6,10 @@
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_gen4_pfvf.h"
+#include "adf_gen4_hw_data.h"
#include "adf_pfvf_pf_proto.h"
#include "adf_pfvf_utils.h"
-#define ADF_4XXX_PF2VM_OFFSET(i) (0x40B010 + ((i) * 0x20))
-#define ADF_4XXX_VM2PF_OFFSET(i) (0x40B014 + ((i) * 0x20))
-
/* VF2PF interrupt source registers */
#define ADF_4XXX_VM2PF_SOU 0x41A180
#define ADF_4XXX_VM2PF_MSK 0x41A1C0
@@ -29,12 +27,12 @@ static const struct pfvf_csr_format csr_gen4_fmt = {
static u32 adf_gen4_pf_get_pf2vf_offset(u32 i)
{
- return ADF_4XXX_PF2VM_OFFSET(i);
+ return ADF_GEN4_PF2VM_OFFSET(i);
}
static u32 adf_gen4_pf_get_vf2pf_offset(u32 i)
{
- return ADF_4XXX_VM2PF_OFFSET(i);
+ return ADF_GEN4_VM2PF_OFFSET(i);
}
static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c
index 7fc7a77f6aed..c7ad8cf07863 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c
@@ -149,5 +149,6 @@ void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data)
tl_data->sl_exec_counters = sl_exec_counters;
tl_data->rp_counters = rp_counters;
tl_data->num_rp_counters = ARRAY_SIZE(rp_counters);
+ tl_data->max_sl_cnt = ADF_GEN4_TL_MAX_SLICES_PER_TYPE;
}
EXPORT_SYMBOL_GPL(adf_gen4_init_tl_data);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c
new file mode 100644
index 000000000000..a62eb5e8dbe6
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c
@@ -0,0 +1,1010 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024 Intel Corporation */
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/errno.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_hw_data.h"
+#include "adf_gen4_pfvf.h"
+#include "adf_pfvf_utils.h"
+#include "adf_mstate_mgr.h"
+#include "adf_gen4_vf_mig.h"
+
+#define ADF_GEN4_VF_MSTATE_SIZE 4096
+#define ADF_GEN4_PFVF_RSP_TIMEOUT_US 5000
+
+static int adf_gen4_vfmig_save_setup(struct qat_mig_dev *mdev);
+static int adf_gen4_vfmig_load_setup(struct qat_mig_dev *mdev, int len);
+
+static int adf_gen4_vfmig_init_device(struct qat_mig_dev *mdev)
+{
+ u8 *state;
+
+ state = kmalloc(ADF_GEN4_VF_MSTATE_SIZE, GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ mdev->state = state;
+ mdev->state_size = ADF_GEN4_VF_MSTATE_SIZE;
+ mdev->setup_size = 0;
+ mdev->remote_setup_size = 0;
+
+ return 0;
+}
+
+static void adf_gen4_vfmig_cleanup_device(struct qat_mig_dev *mdev)
+{
+ kfree(mdev->state);
+ mdev->state = NULL;
+}
+
+static void adf_gen4_vfmig_reset_device(struct qat_mig_dev *mdev)
+{
+ mdev->setup_size = 0;
+ mdev->remote_setup_size = 0;
+}
+
+static int adf_gen4_vfmig_open_device(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+ struct adf_accel_vf_info *vf_info;
+ struct adf_gen4_vfmig *vfmig;
+
+ vf_info = &accel_dev->pf.vf_info[mdev->vf_id];
+
+ vfmig = kzalloc(sizeof(*vfmig), GFP_KERNEL);
+ if (!vfmig)
+ return -ENOMEM;
+
+ vfmig->mstate_mgr = adf_mstate_mgr_new(mdev->state, mdev->state_size);
+ if (!vfmig->mstate_mgr) {
+ kfree(vfmig);
+ return -ENOMEM;
+ }
+ vf_info->mig_priv = vfmig;
+ mdev->setup_size = 0;
+ mdev->remote_setup_size = 0;
+
+ return 0;
+}
+
+static void adf_gen4_vfmig_close_device(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+ struct adf_accel_vf_info *vf_info;
+ struct adf_gen4_vfmig *vfmig;
+
+ vf_info = &accel_dev->pf.vf_info[mdev->vf_id];
+ if (vf_info->mig_priv) {
+ vfmig = vf_info->mig_priv;
+ adf_mstate_mgr_destroy(vfmig->mstate_mgr);
+ kfree(vfmig);
+ vf_info->mig_priv = NULL;
+ }
+}
+
+static int adf_gen4_vfmig_suspend_device(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_accel_vf_info *vf_info;
+ struct adf_gen4_vfmig *vf_mig;
+ u32 vf_nr = mdev->vf_id;
+ int ret, i;
+
+ vf_info = &accel_dev->pf.vf_info[vf_nr];
+ vf_mig = vf_info->mig_priv;
+
+ /* Stop all inflight jobs */
+ for (i = 0; i < hw_data->num_banks_per_vf; i++) {
+ u32 pf_bank_nr = i + vf_nr * hw_data->num_banks_per_vf;
+
+ ret = adf_gen4_bank_drain_start(accel_dev, pf_bank_nr,
+ ADF_RPRESET_POLL_TIMEOUT_US);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to drain bank %d for vf_nr %d\n", i,
+ vf_nr);
+ return ret;
+ }
+ vf_mig->bank_stopped[i] = true;
+
+ adf_gen4_bank_quiesce_coal_timer(accel_dev, pf_bank_nr,
+ ADF_COALESCED_POLL_TIMEOUT_US);
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_resume_device(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_accel_vf_info *vf_info;
+ struct adf_gen4_vfmig *vf_mig;
+ u32 vf_nr = mdev->vf_id;
+ int i;
+
+ vf_info = &accel_dev->pf.vf_info[vf_nr];
+ vf_mig = vf_info->mig_priv;
+
+ for (i = 0; i < hw_data->num_banks_per_vf; i++) {
+ u32 pf_bank_nr = i + vf_nr * hw_data->num_banks_per_vf;
+
+ if (vf_mig->bank_stopped[i]) {
+ adf_gen4_bank_drain_finish(accel_dev, pf_bank_nr);
+ vf_mig->bank_stopped[i] = false;
+ }
+ }
+
+ return 0;
+}
+
+struct adf_vf_bank_info {
+ struct adf_accel_dev *accel_dev;
+ u32 vf_nr;
+ u32 bank_nr;
+};
+
+struct mig_user_sla {
+ enum adf_base_services srv;
+ u64 rp_mask;
+ u32 cir;
+ u32 pir;
+};
+
+static int adf_mstate_sla_check(struct adf_mstate_mgr *sub_mgr, u8 *src_buf,
+ u32 src_size, void *opaque)
+{
+ struct adf_mstate_vreginfo _sinfo = { src_buf, src_size };
+ struct adf_mstate_vreginfo *sinfo = &_sinfo, *dinfo = opaque;
+ u32 src_sla_cnt = sinfo->size / sizeof(struct mig_user_sla);
+ u32 dst_sla_cnt = dinfo->size / sizeof(struct mig_user_sla);
+ struct mig_user_sla *src_slas = sinfo->addr;
+ struct mig_user_sla *dst_slas = dinfo->addr;
+ int i, j;
+
+ for (i = 0; i < src_sla_cnt; i++) {
+ for (j = 0; j < dst_sla_cnt; j++) {
+ if (src_slas[i].srv != dst_slas[j].srv ||
+ src_slas[i].rp_mask != dst_slas[j].rp_mask)
+ continue;
+
+ if (src_slas[i].cir > dst_slas[j].cir ||
+ src_slas[i].pir > dst_slas[j].pir) {
+ pr_err("QAT: DST VF rate limiting mismatch.\n");
+ return -EINVAL;
+ }
+ break;
+ }
+
+ if (j == dst_sla_cnt) {
+ pr_err("QAT: SRC VF rate limiting mismatch - SRC srv %d and rp_mask 0x%llx.\n",
+ src_slas[i].srv, src_slas[i].rp_mask);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static inline int adf_mstate_check_cap_size(u32 src_sz, u32 dst_sz, u32 max_sz)
+{
+ if (src_sz > max_sz || dst_sz > max_sz)
+ return -EINVAL;
+ else
+ return 0;
+}
+
+static int adf_mstate_compatver_check(struct adf_mstate_mgr *sub_mgr,
+ u8 *src_buf, u32 src_sz, void *opaque)
+{
+ struct adf_mstate_vreginfo *info = opaque;
+ u8 compat = 0;
+ u8 *pcompat;
+
+ if (src_sz != info->size) {
+ pr_debug("QAT: State mismatch (compat version size), current %u, expected %u\n",
+ src_sz, info->size);
+ return -EINVAL;
+ }
+
+ memcpy(info->addr, src_buf, info->size);
+ pcompat = info->addr;
+ if (*pcompat == 0) {
+ pr_warn("QAT: Unable to determine the version of VF\n");
+ return 0;
+ }
+
+ compat = adf_vf_compat_checker(*pcompat);
+ if (compat == ADF_PF2VF_VF_INCOMPATIBLE) {
+ pr_debug("QAT: SRC VF driver (ver=%u) is incompatible with DST PF driver (ver=%u)\n",
+ *pcompat, ADF_PFVF_COMPAT_THIS_VERSION);
+ return -EINVAL;
+ }
+
+ if (compat == ADF_PF2VF_VF_COMPAT_UNKNOWN)
+ pr_debug("QAT: SRC VF driver (ver=%u) is newer than DST PF driver (ver=%u)\n",
+ *pcompat, ADF_PFVF_COMPAT_THIS_VERSION);
+
+ return 0;
+}
+
+/*
+ * adf_mstate_capmask_compare() - compare QAT device capability mask
+ * @sinfo: Pointer to source capability info
+ * @dinfo: Pointer to target capability info
+ *
+ * This function compares the capability mask between source VF and target VF
+ *
+ * Returns: 0 if target capability mask is identical to source capability mask,
+ * 1 if target mask can represent all the capabilities represented by source mask,
+ * -1 if target mask can't represent all the capabilities represented by source
+ * mask.
+ */
+static int adf_mstate_capmask_compare(struct adf_mstate_vreginfo *sinfo,
+ struct adf_mstate_vreginfo *dinfo)
+{
+ u64 src = 0, dst = 0;
+
+ if (adf_mstate_check_cap_size(sinfo->size, dinfo->size, sizeof(u64))) {
+ pr_debug("QAT: Unexpected capability size %u %u %zu\n",
+ sinfo->size, dinfo->size, sizeof(u64));
+ return -1;
+ }
+
+ memcpy(&src, sinfo->addr, sinfo->size);
+ memcpy(&dst, dinfo->addr, dinfo->size);
+
+ pr_debug("QAT: Check cap compatibility of cap %llu %llu\n", src, dst);
+
+ if (src == dst)
+ return 0;
+
+ if ((src | dst) == dst)
+ return 1;
+
+ return -1;
+}
+
+static int adf_mstate_capmask_superset(struct adf_mstate_mgr *sub_mgr, u8 *buf,
+ u32 size, void *opa)
+{
+ struct adf_mstate_vreginfo sinfo = { buf, size };
+
+ if (adf_mstate_capmask_compare(&sinfo, opa) >= 0)
+ return 0;
+
+ return -EINVAL;
+}
+
+static int adf_mstate_capmask_equal(struct adf_mstate_mgr *sub_mgr, u8 *buf,
+ u32 size, void *opa)
+{
+ struct adf_mstate_vreginfo sinfo = { buf, size };
+
+ if (adf_mstate_capmask_compare(&sinfo, opa) == 0)
+ return 0;
+
+ return -EINVAL;
+}
+
+static int adf_mstate_set_vreg(struct adf_mstate_mgr *sub_mgr, u8 *buf,
+ u32 size, void *opa)
+{
+ struct adf_mstate_vreginfo *info = opa;
+
+ if (size != info->size) {
+ pr_debug("QAT: Unexpected cap size %u %u\n", size, info->size);
+ return -EINVAL;
+ }
+ memcpy(info->addr, buf, info->size);
+
+ return 0;
+}
+
+static u32 adf_gen4_vfmig_get_slas(struct adf_accel_dev *accel_dev, u32 vf_nr,
+ struct mig_user_sla *pmig_slas)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_rl *rl_data = accel_dev->rate_limiting;
+ struct rl_sla **sla_type_arr = NULL;
+ u64 rp_mask, rp_index;
+ u32 max_num_sla;
+ u32 sla_cnt = 0;
+ int i, j;
+
+ if (!accel_dev->rate_limiting)
+ return 0;
+
+ rp_index = vf_nr * hw_data->num_banks_per_vf;
+ max_num_sla = adf_rl_get_sla_arr_of_type(rl_data, RL_LEAF, &sla_type_arr);
+
+ for (i = 0; i < max_num_sla; i++) {
+ if (!sla_type_arr[i])
+ continue;
+
+ rp_mask = 0;
+ for (j = 0; j < sla_type_arr[i]->ring_pairs_cnt; j++)
+ rp_mask |= BIT(sla_type_arr[i]->ring_pairs_ids[j]);
+
+ if (rp_mask & GENMASK_ULL(rp_index + 3, rp_index)) {
+ pmig_slas->rp_mask = rp_mask;
+ pmig_slas->cir = sla_type_arr[i]->cir;
+ pmig_slas->pir = sla_type_arr[i]->pir;
+ pmig_slas->srv = sla_type_arr[i]->srv;
+ pmig_slas++;
+ sla_cnt++;
+ }
+ }
+
+ return sla_cnt;
+}
+
+static int adf_gen4_vfmig_load_etr_regs(struct adf_mstate_mgr *sub_mgr,
+ u8 *state, u32 size, void *opa)
+{
+ struct adf_vf_bank_info *vf_bank_info = opa;
+ struct adf_accel_dev *accel_dev = vf_bank_info->accel_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 pf_bank_nr;
+ int ret;
+
+ pf_bank_nr = vf_bank_info->bank_nr + vf_bank_info->vf_nr * hw_data->num_banks_per_vf;
+ ret = hw_data->bank_state_restore(accel_dev, pf_bank_nr,
+ (struct bank_state *)state);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to load regs for vf%d bank%d\n",
+ vf_bank_info->vf_nr, vf_bank_info->bank_nr);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_etr_bank(struct adf_accel_dev *accel_dev,
+ u32 vf_nr, u32 bank_nr,
+ struct adf_mstate_mgr *mstate_mgr)
+{
+ struct adf_vf_bank_info vf_bank_info = {accel_dev, vf_nr, bank_nr};
+ struct adf_mstate_sect_h *subsec, *l2_subsec;
+ struct adf_mstate_mgr sub_sects_mgr;
+ char bank_ids[ADF_MSTATE_ID_LEN];
+
+ snprintf(bank_ids, sizeof(bank_ids), ADF_MSTATE_BANK_IDX_IDS "%x", bank_nr);
+ subsec = adf_mstate_sect_lookup(mstate_mgr, bank_ids, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to lookup sec %s for vf%d bank%d\n",
+ ADF_MSTATE_BANK_IDX_IDS, vf_nr, bank_nr);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec);
+ l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, ADF_MSTATE_ETR_REGS_IDS,
+ adf_gen4_vfmig_load_etr_regs,
+ &vf_bank_info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to add sec %s for vf%d bank%d\n",
+ ADF_MSTATE_ETR_REGS_IDS, vf_nr, bank_nr);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_etr(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_mgr sub_sects_mgr;
+ struct adf_mstate_sect_h *subsec;
+ int ret, i;
+
+ subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_ETRB_IDS, NULL,
+ NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
+ ADF_MSTATE_ETRB_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec);
+ for (i = 0; i < hw_data->num_banks_per_vf; i++) {
+ ret = adf_gen4_vfmig_load_etr_bank(accel_dev, vf_nr, i,
+ &sub_sects_mgr);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_misc(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+ struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_sect_h *subsec, *l2_subsec;
+ struct adf_mstate_mgr sub_sects_mgr;
+ struct {
+ char *id;
+ u64 ofs;
+ } misc_states[] = {
+ {ADF_MSTATE_VINTMSK_IDS, ADF_GEN4_VINTMSK_OFFSET(vf_nr)},
+ {ADF_MSTATE_VINTMSK_PF2VM_IDS, ADF_GEN4_VINTMSKPF2VM_OFFSET(vf_nr)},
+ {ADF_MSTATE_PF2VM_IDS, ADF_GEN4_PF2VM_OFFSET(vf_nr)},
+ {ADF_MSTATE_VM2PF_IDS, ADF_GEN4_VM2PF_OFFSET(vf_nr)},
+ };
+ int i;
+
+ subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_MISCB_IDS, NULL,
+ NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
+ ADF_MSTATE_MISCB_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec);
+ for (i = 0; i < ARRAY_SIZE(misc_states); i++) {
+ struct adf_mstate_vreginfo info;
+ u32 regv;
+
+ info.addr = &regv;
+ info.size = sizeof(regv);
+ l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr,
+ misc_states[i].id,
+ adf_mstate_set_vreg,
+ &info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to load sec %s\n", misc_states[i].id);
+ return -EINVAL;
+ }
+ ADF_CSR_WR(csr, misc_states[i].ofs, regv);
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_generic(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct mig_user_sla dst_slas[RL_RP_CNT_PER_LEAF_MAX] = { };
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_sect_h *subsec, *l2_subsec;
+ struct adf_mstate_mgr sub_sects_mgr;
+ u32 dst_sla_cnt;
+ struct {
+ char *id;
+ int (*action)(struct adf_mstate_mgr *sub_mgr, u8 *buf, u32 size, void *opa);
+ struct adf_mstate_vreginfo info;
+ } gen_states[] = {
+ {ADF_MSTATE_IOV_INIT_IDS, adf_mstate_set_vreg,
+ {&vf_info->init, sizeof(vf_info->init)}},
+ {ADF_MSTATE_COMPAT_VER_IDS, adf_mstate_compatver_check,
+ {&vf_info->vf_compat_ver, sizeof(vf_info->vf_compat_ver)}},
+ {ADF_MSTATE_SLA_IDS, adf_mstate_sla_check, {dst_slas, 0}},
+ };
+ int i;
+
+ subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_GEN_IDS, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
+ ADF_MSTATE_GEN_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec);
+ for (i = 0; i < ARRAY_SIZE(gen_states); i++) {
+ if (gen_states[i].info.addr == dst_slas) {
+ dst_sla_cnt = adf_gen4_vfmig_get_slas(accel_dev, vf_nr, dst_slas);
+ gen_states[i].info.size = dst_sla_cnt * sizeof(struct mig_user_sla);
+ }
+
+ l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr,
+ gen_states[i].id,
+ gen_states[i].action,
+ &gen_states[i].info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
+ gen_states[i].id);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_config(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_sect_h *subsec, *l2_subsec;
+ struct adf_mstate_mgr sub_sects_mgr;
+ struct {
+ char *id;
+ int (*action)(struct adf_mstate_mgr *sub_mgr, u8 *buf, u32 size, void *opa);
+ struct adf_mstate_vreginfo info;
+ } setups[] = {
+ {ADF_MSTATE_GEN_CAP_IDS, adf_mstate_capmask_superset,
+ {&hw_data->accel_capabilities_mask, sizeof(hw_data->accel_capabilities_mask)}},
+ {ADF_MSTATE_GEN_SVCMAP_IDS, adf_mstate_capmask_equal,
+ {&hw_data->ring_to_svc_map, sizeof(hw_data->ring_to_svc_map)}},
+ {ADF_MSTATE_GEN_EXTDC_IDS, adf_mstate_capmask_superset,
+ {&hw_data->extended_dc_capabilities, sizeof(hw_data->extended_dc_capabilities)}},
+ };
+ int i;
+
+ subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_CONFIG_IDS, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
+ ADF_MSTATE_CONFIG_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec);
+ for (i = 0; i < ARRAY_SIZE(setups); i++) {
+ l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, setups[i].id,
+ setups[i].action, &setups[i].info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
+ setups[i].id);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_etr_regs(struct adf_mstate_mgr *subs, u8 *state,
+ u32 size, void *opa)
+{
+ struct adf_vf_bank_info *vf_bank_info = opa;
+ struct adf_accel_dev *accel_dev = vf_bank_info->accel_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 pf_bank_nr;
+ int ret;
+
+ pf_bank_nr = vf_bank_info->bank_nr;
+ pf_bank_nr += vf_bank_info->vf_nr * hw_data->num_banks_per_vf;
+
+ ret = hw_data->bank_state_save(accel_dev, pf_bank_nr,
+ (struct bank_state *)state);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to save regs for vf%d bank%d\n",
+ vf_bank_info->vf_nr, vf_bank_info->bank_nr);
+ return ret;
+ }
+
+ return sizeof(struct bank_state);
+}
+
+static int adf_gen4_vfmig_save_etr_bank(struct adf_accel_dev *accel_dev,
+ u32 vf_nr, u32 bank_nr,
+ struct adf_mstate_mgr *mstate_mgr)
+{
+ struct adf_mstate_sect_h *subsec, *l2_subsec;
+ struct adf_vf_bank_info vf_bank_info;
+ struct adf_mstate_mgr sub_sects_mgr;
+ char bank_ids[ADF_MSTATE_ID_LEN];
+
+ snprintf(bank_ids, sizeof(bank_ids), ADF_MSTATE_BANK_IDX_IDS "%x", bank_nr);
+
+ subsec = adf_mstate_sect_add(mstate_mgr, bank_ids, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to add sec %s for vf%d bank%d\n",
+ ADF_MSTATE_BANK_IDX_IDS, vf_nr, bank_nr);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr);
+ vf_bank_info.accel_dev = accel_dev;
+ vf_bank_info.vf_nr = vf_nr;
+ vf_bank_info.bank_nr = bank_nr;
+ l2_subsec = adf_mstate_sect_add(&sub_sects_mgr, ADF_MSTATE_ETR_REGS_IDS,
+ adf_gen4_vfmig_save_etr_regs,
+ &vf_bank_info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to add sec %s for vf%d bank%d\n",
+ ADF_MSTATE_ETR_REGS_IDS, vf_nr, bank_nr);
+ return -EINVAL;
+ }
+ adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_etr(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_mgr sub_sects_mgr;
+ struct adf_mstate_sect_h *subsec;
+ int ret, i;
+
+ subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_ETRB_IDS, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
+ ADF_MSTATE_ETRB_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr);
+ for (i = 0; i < hw_data->num_banks_per_vf; i++) {
+ ret = adf_gen4_vfmig_save_etr_bank(accel_dev, vf_nr, i,
+ &sub_sects_mgr);
+ if (ret)
+ return ret;
+ }
+ adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_misc(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+ struct adf_mstate_sect_h *subsec, *l2_subsec;
+ struct adf_mstate_mgr sub_sects_mgr;
+ struct {
+ char *id;
+ u64 offset;
+ } misc_states[] = {
+ {ADF_MSTATE_VINTSRC_IDS, ADF_GEN4_VINTSOU_OFFSET(vf_nr)},
+ {ADF_MSTATE_VINTMSK_IDS, ADF_GEN4_VINTMSK_OFFSET(vf_nr)},
+ {ADF_MSTATE_VINTSRC_PF2VM_IDS, ADF_GEN4_VINTSOUPF2VM_OFFSET(vf_nr)},
+ {ADF_MSTATE_VINTMSK_PF2VM_IDS, ADF_GEN4_VINTMSKPF2VM_OFFSET(vf_nr)},
+ {ADF_MSTATE_PF2VM_IDS, ADF_GEN4_PF2VM_OFFSET(vf_nr)},
+ {ADF_MSTATE_VM2PF_IDS, ADF_GEN4_VM2PF_OFFSET(vf_nr)},
+ };
+ ktime_t time_exp;
+ int i;
+
+ subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_MISCB_IDS, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
+ ADF_MSTATE_MISCB_IDS);
+ return -EINVAL;
+ }
+
+ time_exp = ktime_add_us(ktime_get(), ADF_GEN4_PFVF_RSP_TIMEOUT_US);
+ while (!mutex_trylock(&vf_info->pfvf_mig_lock)) {
+ if (ktime_after(ktime_get(), time_exp)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to get pfvf mig lock\n");
+ return -ETIMEDOUT;
+ }
+ usleep_range(500, 1000);
+ }
+
+ adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr);
+ for (i = 0; i < ARRAY_SIZE(misc_states); i++) {
+ struct adf_mstate_vreginfo info;
+ u32 regv;
+
+ info.addr = &regv;
+ info.size = sizeof(regv);
+ regv = ADF_CSR_RD(csr, misc_states[i].offset);
+
+ l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr,
+ misc_states[i].id,
+ &info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
+ misc_states[i].id);
+ mutex_unlock(&vf_info->pfvf_mig_lock);
+ return -EINVAL;
+ }
+ }
+
+ mutex_unlock(&vf_info->pfvf_mig_lock);
+ adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_generic(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_mgr sub_sects_mgr;
+ struct adf_mstate_sect_h *subsec, *l2_subsec;
+ struct mig_user_sla src_slas[RL_RP_CNT_PER_LEAF_MAX] = { };
+ u32 src_sla_cnt;
+ struct {
+ char *id;
+ struct adf_mstate_vreginfo info;
+ } gen_states[] = {
+ {ADF_MSTATE_IOV_INIT_IDS,
+ {&vf_info->init, sizeof(vf_info->init)}},
+ {ADF_MSTATE_COMPAT_VER_IDS,
+ {&vf_info->vf_compat_ver, sizeof(vf_info->vf_compat_ver)}},
+ {ADF_MSTATE_SLA_IDS, {src_slas, 0}},
+ };
+ int i;
+
+ subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_GEN_IDS, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
+ ADF_MSTATE_GEN_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr);
+ for (i = 0; i < ARRAY_SIZE(gen_states); i++) {
+ if (gen_states[i].info.addr == src_slas) {
+ src_sla_cnt = adf_gen4_vfmig_get_slas(accel_dev, vf_nr, src_slas);
+ gen_states[i].info.size = src_sla_cnt * sizeof(struct mig_user_sla);
+ }
+
+ l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr,
+ gen_states[i].id,
+ &gen_states[i].info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
+ gen_states[i].id);
+ return -EINVAL;
+ }
+ }
+ adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_config(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_mgr sub_sects_mgr;
+ struct adf_mstate_sect_h *subsec, *l2_subsec;
+ struct {
+ char *id;
+ struct adf_mstate_vreginfo info;
+ } setups[] = {
+ {ADF_MSTATE_GEN_CAP_IDS,
+ {&hw_data->accel_capabilities_mask, sizeof(hw_data->accel_capabilities_mask)}},
+ {ADF_MSTATE_GEN_SVCMAP_IDS,
+ {&hw_data->ring_to_svc_map, sizeof(hw_data->ring_to_svc_map)}},
+ {ADF_MSTATE_GEN_EXTDC_IDS,
+ {&hw_data->extended_dc_capabilities, sizeof(hw_data->extended_dc_capabilities)}},
+ };
+ int i;
+
+ subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_CONFIG_IDS, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
+ ADF_MSTATE_CONFIG_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr);
+ for (i = 0; i < ARRAY_SIZE(setups); i++) {
+ l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr, setups[i].id,
+ &setups[i].info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
+ setups[i].id);
+ return -EINVAL;
+ }
+ }
+ adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_state(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+ struct adf_accel_vf_info *vf_info;
+ struct adf_gen4_vfmig *vfmig;
+ u32 vf_nr = mdev->vf_id;
+ int ret;
+
+ vf_info = &accel_dev->pf.vf_info[vf_nr];
+ vfmig = vf_info->mig_priv;
+
+ ret = adf_gen4_vfmig_save_setup(mdev);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to save setup for vf_nr %d\n", vf_nr);
+ return ret;
+ }
+
+ adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state + mdev->setup_size,
+ mdev->state_size - mdev->setup_size);
+ if (!adf_mstate_preamble_add(vfmig->mstate_mgr))
+ return -EINVAL;
+
+ ret = adf_gen4_vfmig_save_generic(accel_dev, vf_nr);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to save generic state for vf_nr %d\n", vf_nr);
+ return ret;
+ }
+
+ ret = adf_gen4_vfmig_save_misc(accel_dev, vf_nr);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to save misc bar state for vf_nr %d\n", vf_nr);
+ return ret;
+ }
+
+ ret = adf_gen4_vfmig_save_etr(accel_dev, vf_nr);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to save etr bar state for vf_nr %d\n", vf_nr);
+ return ret;
+ }
+
+ adf_mstate_preamble_update(vfmig->mstate_mgr);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_state(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+ struct adf_accel_vf_info *vf_info;
+ struct adf_gen4_vfmig *vfmig;
+ u32 vf_nr = mdev->vf_id;
+ int ret;
+
+ vf_info = &accel_dev->pf.vf_info[vf_nr];
+ vfmig = vf_info->mig_priv;
+
+ ret = adf_gen4_vfmig_load_setup(mdev, mdev->state_size);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load setup for vf_nr %d\n",
+ vf_nr);
+ return ret;
+ }
+
+ ret = adf_mstate_mgr_init_from_remote(vfmig->mstate_mgr,
+ mdev->state + mdev->remote_setup_size,
+ mdev->state_size - mdev->remote_setup_size,
+ NULL, NULL);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Invalid state for vf_nr %d\n",
+ vf_nr);
+ return ret;
+ }
+
+ ret = adf_gen4_vfmig_load_generic(accel_dev, vf_nr);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to load general state for vf_nr %d\n", vf_nr);
+ return ret;
+ }
+
+ ret = adf_gen4_vfmig_load_misc(accel_dev, vf_nr);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to load misc bar state for vf_nr %d\n", vf_nr);
+ return ret;
+ }
+
+ ret = adf_gen4_vfmig_load_etr(accel_dev, vf_nr);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to load etr bar state for vf_nr %d\n", vf_nr);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_setup(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+ struct adf_accel_vf_info *vf_info;
+ struct adf_gen4_vfmig *vfmig;
+ u32 vf_nr = mdev->vf_id;
+ int ret;
+
+ vf_info = &accel_dev->pf.vf_info[vf_nr];
+ vfmig = vf_info->mig_priv;
+
+ if (mdev->setup_size)
+ return 0;
+
+ adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state, mdev->state_size);
+ if (!adf_mstate_preamble_add(vfmig->mstate_mgr))
+ return -EINVAL;
+
+ ret = adf_gen4_vfmig_save_config(accel_dev, mdev->vf_id);
+ if (ret)
+ return ret;
+
+ adf_mstate_preamble_update(vfmig->mstate_mgr);
+ mdev->setup_size = adf_mstate_state_size(vfmig->mstate_mgr);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_setup(struct qat_mig_dev *mdev, int len)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+ struct adf_accel_vf_info *vf_info;
+ struct adf_gen4_vfmig *vfmig;
+ u32 vf_nr = mdev->vf_id;
+ u32 setup_size;
+ int ret;
+
+ vf_info = &accel_dev->pf.vf_info[vf_nr];
+ vfmig = vf_info->mig_priv;
+
+ if (mdev->remote_setup_size)
+ return 0;
+
+ if (len < sizeof(struct adf_mstate_preh))
+ return -EAGAIN;
+
+ adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state, mdev->state_size);
+ setup_size = adf_mstate_state_size_from_remote(vfmig->mstate_mgr);
+ if (setup_size > mdev->state_size)
+ return -EINVAL;
+
+ if (len < setup_size)
+ return -EAGAIN;
+
+ ret = adf_mstate_mgr_init_from_remote(vfmig->mstate_mgr, mdev->state,
+ setup_size, NULL, NULL);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Invalid setup for vf_nr %d\n",
+ vf_nr);
+ return ret;
+ }
+
+ mdev->remote_setup_size = setup_size;
+
+ ret = adf_gen4_vfmig_load_config(accel_dev, vf_nr);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to load config for vf_nr %d\n", vf_nr);
+ return ret;
+ }
+
+ return 0;
+}
+
+void adf_gen4_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops)
+{
+ vfmig_ops->init = adf_gen4_vfmig_init_device;
+ vfmig_ops->cleanup = adf_gen4_vfmig_cleanup_device;
+ vfmig_ops->reset = adf_gen4_vfmig_reset_device;
+ vfmig_ops->open = adf_gen4_vfmig_open_device;
+ vfmig_ops->close = adf_gen4_vfmig_close_device;
+ vfmig_ops->suspend = adf_gen4_vfmig_suspend_device;
+ vfmig_ops->resume = adf_gen4_vfmig_resume_device;
+ vfmig_ops->save_state = adf_gen4_vfmig_save_state;
+ vfmig_ops->load_state = adf_gen4_vfmig_load_state;
+ vfmig_ops->load_setup = adf_gen4_vfmig_load_setup;
+ vfmig_ops->save_setup = adf_gen4_vfmig_save_setup;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_vf_mig_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h
new file mode 100644
index 000000000000..72216d078ee1
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2024 Intel Corporation */
+#ifndef ADF_GEN4_VF_MIG_H_
+#define ADF_GEN4_VF_MIG_H_
+
+#include "adf_accel_devices.h"
+
+void adf_gen4_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c
new file mode 100644
index 000000000000..41cc763a74aa
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024 Intel Corporation */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include "adf_mstate_mgr.h"
+
+#define ADF_MSTATE_MAGIC 0xADF5CAEA
+#define ADF_MSTATE_VERSION 0x1
+
+struct adf_mstate_sect_h {
+ u8 id[ADF_MSTATE_ID_LEN];
+ u32 size;
+ u32 sub_sects;
+ u8 state[];
+};
+
+u32 adf_mstate_state_size(struct adf_mstate_mgr *mgr)
+{
+ return mgr->state - mgr->buf;
+}
+
+static inline u32 adf_mstate_avail_room(struct adf_mstate_mgr *mgr)
+{
+ return mgr->buf + mgr->size - mgr->state;
+}
+
+void adf_mstate_mgr_init(struct adf_mstate_mgr *mgr, u8 *buf, u32 size)
+{
+ mgr->buf = buf;
+ mgr->state = buf;
+ mgr->size = size;
+ mgr->n_sects = 0;
+};
+
+struct adf_mstate_mgr *adf_mstate_mgr_new(u8 *buf, u32 size)
+{
+ struct adf_mstate_mgr *mgr;
+
+ mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return NULL;
+
+ adf_mstate_mgr_init(mgr, buf, size);
+
+ return mgr;
+}
+
+void adf_mstate_mgr_destroy(struct adf_mstate_mgr *mgr)
+{
+ kfree(mgr);
+}
+
+void adf_mstate_mgr_init_from_parent(struct adf_mstate_mgr *mgr,
+ struct adf_mstate_mgr *p_mgr)
+{
+ adf_mstate_mgr_init(mgr, p_mgr->state,
+ p_mgr->size - adf_mstate_state_size(p_mgr));
+}
+
+void adf_mstate_mgr_init_from_psect(struct adf_mstate_mgr *mgr,
+ struct adf_mstate_sect_h *p_sect)
+{
+ adf_mstate_mgr_init(mgr, p_sect->state, p_sect->size);
+ mgr->n_sects = p_sect->sub_sects;
+}
+
+static void adf_mstate_preamble_init(struct adf_mstate_preh *preamble)
+{
+ preamble->magic = ADF_MSTATE_MAGIC;
+ preamble->version = ADF_MSTATE_VERSION;
+ preamble->preh_len = sizeof(*preamble);
+ preamble->size = 0;
+ preamble->n_sects = 0;
+}
+
+/* default preambles checker */
+static int adf_mstate_preamble_def_checker(struct adf_mstate_preh *preamble,
+ void *opaque)
+{
+ struct adf_mstate_mgr *mgr = opaque;
+
+ if (preamble->magic != ADF_MSTATE_MAGIC ||
+ preamble->version > ADF_MSTATE_VERSION ||
+ preamble->preh_len > mgr->size) {
+ pr_debug("QAT: LM - Invalid state (magic=%#x, version=%#x, hlen=%u), state_size=%u\n",
+ preamble->magic, preamble->version, preamble->preh_len,
+ mgr->size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct adf_mstate_preh *adf_mstate_preamble_add(struct adf_mstate_mgr *mgr)
+{
+ struct adf_mstate_preh *pre = (struct adf_mstate_preh *)mgr->buf;
+
+ if (adf_mstate_avail_room(mgr) < sizeof(*pre)) {
+ pr_err("QAT: LM - Not enough space for preamble\n");
+ return NULL;
+ }
+
+ adf_mstate_preamble_init(pre);
+ mgr->state += pre->preh_len;
+
+ return pre;
+}
+
+int adf_mstate_preamble_update(struct adf_mstate_mgr *mgr)
+{
+ struct adf_mstate_preh *preamble = (struct adf_mstate_preh *)mgr->buf;
+
+ preamble->size = adf_mstate_state_size(mgr) - preamble->preh_len;
+ preamble->n_sects = mgr->n_sects;
+
+ return 0;
+}
+
+static void adf_mstate_dump_sect(struct adf_mstate_sect_h *sect,
+ const char *prefix)
+{
+ pr_debug("QAT: LM - %s QAT state section %s\n", prefix, sect->id);
+ print_hex_dump_debug("h-", DUMP_PREFIX_OFFSET, 16, 2, sect,
+ sizeof(*sect), true);
+ print_hex_dump_debug("s-", DUMP_PREFIX_OFFSET, 16, 2, sect->state,
+ sect->size, true);
+}
+
+static inline void __adf_mstate_sect_update(struct adf_mstate_mgr *mgr,
+ struct adf_mstate_sect_h *sect,
+ u32 size,
+ u32 n_subsects)
+{
+ sect->size += size;
+ sect->sub_sects += n_subsects;
+ mgr->n_sects++;
+ mgr->state += sect->size;
+
+ adf_mstate_dump_sect(sect, "Add");
+}
+
+void adf_mstate_sect_update(struct adf_mstate_mgr *p_mgr,
+ struct adf_mstate_mgr *curr_mgr,
+ struct adf_mstate_sect_h *sect)
+{
+ __adf_mstate_sect_update(p_mgr, sect, adf_mstate_state_size(curr_mgr),
+ curr_mgr->n_sects);
+}
+
+static struct adf_mstate_sect_h *adf_mstate_sect_add_header(struct adf_mstate_mgr *mgr,
+ const char *id)
+{
+ struct adf_mstate_sect_h *sect = (struct adf_mstate_sect_h *)(mgr->state);
+
+ if (adf_mstate_avail_room(mgr) < sizeof(*sect)) {
+ pr_debug("QAT: LM - Not enough space for header of QAT state sect %s\n", id);
+ return NULL;
+ }
+
+ strscpy(sect->id, id, sizeof(sect->id));
+ sect->size = 0;
+ sect->sub_sects = 0;
+ mgr->state += sizeof(*sect);
+
+ return sect;
+}
+
+struct adf_mstate_sect_h *adf_mstate_sect_add_vreg(struct adf_mstate_mgr *mgr,
+ const char *id,
+ struct adf_mstate_vreginfo *info)
+{
+ struct adf_mstate_sect_h *sect;
+
+ sect = adf_mstate_sect_add_header(mgr, id);
+ if (!sect)
+ return NULL;
+
+ if (adf_mstate_avail_room(mgr) < info->size) {
+ pr_debug("QAT: LM - Not enough space for QAT state sect %s, requires %u\n",
+ id, info->size);
+ return NULL;
+ }
+
+ memcpy(sect->state, info->addr, info->size);
+ __adf_mstate_sect_update(mgr, sect, info->size, 0);
+
+ return sect;
+}
+
+struct adf_mstate_sect_h *adf_mstate_sect_add(struct adf_mstate_mgr *mgr,
+ const char *id,
+ adf_mstate_populate populate,
+ void *opaque)
+{
+ struct adf_mstate_mgr sub_sects_mgr;
+ struct adf_mstate_sect_h *sect;
+ int avail_room, size;
+
+ sect = adf_mstate_sect_add_header(mgr, id);
+ if (!sect)
+ return NULL;
+
+ if (!populate)
+ return sect;
+
+ avail_room = adf_mstate_avail_room(mgr);
+ adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mgr);
+
+ size = (*populate)(&sub_sects_mgr, sect->state, avail_room, opaque);
+ if (size < 0)
+ return NULL;
+
+ size += adf_mstate_state_size(&sub_sects_mgr);
+ if (avail_room < size) {
+ pr_debug("QAT: LM - Not enough space for QAT state sect %s, requires %u\n",
+ id, size);
+ return NULL;
+ }
+ __adf_mstate_sect_update(mgr, sect, size, sub_sects_mgr.n_sects);
+
+ return sect;
+}
+
+static int adf_mstate_sect_validate(struct adf_mstate_mgr *mgr)
+{
+ struct adf_mstate_sect_h *start = (struct adf_mstate_sect_h *)mgr->state;
+ struct adf_mstate_sect_h *sect = start;
+ u64 end;
+ int i;
+
+ end = (uintptr_t)mgr->buf + mgr->size;
+ for (i = 0; i < mgr->n_sects; i++) {
+ uintptr_t s_start = (uintptr_t)sect->state;
+ uintptr_t s_end = s_start + sect->size;
+
+ if (s_end < s_start || s_end > end) {
+ pr_debug("QAT: LM - Corrupted state section (index=%u, size=%u) in state_mgr (size=%u, secs=%u)\n",
+ i, sect->size, mgr->size, mgr->n_sects);
+ return -EINVAL;
+ }
+ sect = (struct adf_mstate_sect_h *)s_end;
+ }
+
+ pr_debug("QAT: LM - Scanned section (last child=%s, size=%lu) in state_mgr (size=%u, secs=%u)\n",
+ start->id, sizeof(struct adf_mstate_sect_h) * (ulong)(sect - start),
+ mgr->size, mgr->n_sects);
+
+ return 0;
+}
+
+u32 adf_mstate_state_size_from_remote(struct adf_mstate_mgr *mgr)
+{
+ struct adf_mstate_preh *preh = (struct adf_mstate_preh *)mgr->buf;
+
+ return preh->preh_len + preh->size;
+}
+
+int adf_mstate_mgr_init_from_remote(struct adf_mstate_mgr *mgr, u8 *buf, u32 size,
+ adf_mstate_preamble_checker pre_checker,
+ void *opaque)
+{
+ struct adf_mstate_preh *pre;
+ int ret;
+
+ adf_mstate_mgr_init(mgr, buf, size);
+ pre = (struct adf_mstate_preh *)(mgr->buf);
+
+ pr_debug("QAT: LM - Dump state preambles\n");
+ print_hex_dump_debug("", DUMP_PREFIX_OFFSET, 16, 2, pre, pre->preh_len, 0);
+
+ if (pre_checker)
+ ret = (*pre_checker)(pre, opaque);
+ else
+ ret = adf_mstate_preamble_def_checker(pre, mgr);
+ if (ret)
+ return ret;
+
+ mgr->state = mgr->buf + pre->preh_len;
+ mgr->n_sects = pre->n_sects;
+
+ return adf_mstate_sect_validate(mgr);
+}
+
+struct adf_mstate_sect_h *adf_mstate_sect_lookup(struct adf_mstate_mgr *mgr,
+ const char *id,
+ adf_mstate_action action,
+ void *opaque)
+{
+ struct adf_mstate_sect_h *sect = (struct adf_mstate_sect_h *)mgr->state;
+ struct adf_mstate_mgr sub_sects_mgr;
+ int i, ret;
+
+ for (i = 0; i < mgr->n_sects; i++) {
+ if (!strncmp(sect->id, id, sizeof(sect->id)))
+ goto found;
+
+ sect = (struct adf_mstate_sect_h *)(sect->state + sect->size);
+ }
+
+ return NULL;
+
+found:
+ adf_mstate_dump_sect(sect, "Found");
+
+ adf_mstate_mgr_init_from_psect(&sub_sects_mgr, sect);
+ if (sect->sub_sects && adf_mstate_sect_validate(&sub_sects_mgr))
+ return NULL;
+
+ if (!action)
+ return sect;
+
+ ret = (*action)(&sub_sects_mgr, sect->state, sect->size, opaque);
+ if (ret)
+ return NULL;
+
+ return sect;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h
new file mode 100644
index 000000000000..81d263a596c5
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2024 Intel Corporation */
+
+#ifndef ADF_MSTATE_MGR_H
+#define ADF_MSTATE_MGR_H
+
+#define ADF_MSTATE_ID_LEN 8
+
+#define ADF_MSTATE_ETRB_IDS "ETRBAR"
+#define ADF_MSTATE_MISCB_IDS "MISCBAR"
+#define ADF_MSTATE_EXTB_IDS "EXTBAR"
+#define ADF_MSTATE_GEN_IDS "GENER"
+#define ADF_MSTATE_CONFIG_IDS "CONFIG"
+#define ADF_MSTATE_SECTION_NUM 5
+
+#define ADF_MSTATE_BANK_IDX_IDS "bnk"
+
+#define ADF_MSTATE_ETR_REGS_IDS "mregs"
+#define ADF_MSTATE_VINTSRC_IDS "visrc"
+#define ADF_MSTATE_VINTMSK_IDS "vimsk"
+#define ADF_MSTATE_SLA_IDS "sla"
+#define ADF_MSTATE_IOV_INIT_IDS "iovinit"
+#define ADF_MSTATE_COMPAT_VER_IDS "compver"
+#define ADF_MSTATE_GEN_CAP_IDS "gencap"
+#define ADF_MSTATE_GEN_SVCMAP_IDS "svcmap"
+#define ADF_MSTATE_GEN_EXTDC_IDS "extdc"
+#define ADF_MSTATE_VINTSRC_PF2VM_IDS "vispv"
+#define ADF_MSTATE_VINTMSK_PF2VM_IDS "vimpv"
+#define ADF_MSTATE_VM2PF_IDS "vm2pf"
+#define ADF_MSTATE_PF2VM_IDS "pf2vm"
+
+struct adf_mstate_mgr {
+ u8 *buf;
+ u8 *state;
+ u32 size;
+ u32 n_sects;
+};
+
+struct adf_mstate_preh {
+ u32 magic;
+ u32 version;
+ u16 preh_len;
+ u16 n_sects;
+ u32 size;
+};
+
+struct adf_mstate_vreginfo {
+ void *addr;
+ u32 size;
+};
+
+struct adf_mstate_sect_h;
+
+typedef int (*adf_mstate_preamble_checker)(struct adf_mstate_preh *preamble, void *opa);
+typedef int (*adf_mstate_populate)(struct adf_mstate_mgr *sub_mgr, u8 *buf,
+ u32 size, void *opa);
+typedef int (*adf_mstate_action)(struct adf_mstate_mgr *sub_mgr, u8 *buf, u32 size,
+ void *opa);
+
+struct adf_mstate_mgr *adf_mstate_mgr_new(u8 *buf, u32 size);
+void adf_mstate_mgr_destroy(struct adf_mstate_mgr *mgr);
+void adf_mstate_mgr_init(struct adf_mstate_mgr *mgr, u8 *buf, u32 size);
+void adf_mstate_mgr_init_from_parent(struct adf_mstate_mgr *mgr,
+ struct adf_mstate_mgr *p_mgr);
+void adf_mstate_mgr_init_from_psect(struct adf_mstate_mgr *mgr,
+ struct adf_mstate_sect_h *p_sect);
+int adf_mstate_mgr_init_from_remote(struct adf_mstate_mgr *mgr,
+ u8 *buf, u32 size,
+ adf_mstate_preamble_checker checker,
+ void *opaque);
+struct adf_mstate_preh *adf_mstate_preamble_add(struct adf_mstate_mgr *mgr);
+int adf_mstate_preamble_update(struct adf_mstate_mgr *mgr);
+u32 adf_mstate_state_size(struct adf_mstate_mgr *mgr);
+u32 adf_mstate_state_size_from_remote(struct adf_mstate_mgr *mgr);
+void adf_mstate_sect_update(struct adf_mstate_mgr *p_mgr,
+ struct adf_mstate_mgr *curr_mgr,
+ struct adf_mstate_sect_h *sect);
+struct adf_mstate_sect_h *adf_mstate_sect_add_vreg(struct adf_mstate_mgr *mgr,
+ const char *id,
+ struct adf_mstate_vreginfo *info);
+struct adf_mstate_sect_h *adf_mstate_sect_add(struct adf_mstate_mgr *mgr,
+ const char *id,
+ adf_mstate_populate populate,
+ void *opaque);
+struct adf_mstate_sect_h *adf_mstate_sect_lookup(struct adf_mstate_mgr *mgr,
+ const char *id,
+ adf_mstate_action action,
+ void *opaque);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
index 9ab93fbfefde..b9b5e744a3f1 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
@@ -242,13 +242,7 @@ static int adf_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr,
"VersionRequest received from VF%d (vers %d) to PF (vers %d)\n",
vf_nr, vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
- if (vf_compat_ver == 0)
- compat = ADF_PF2VF_VF_INCOMPATIBLE;
- else if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION)
- compat = ADF_PF2VF_VF_COMPATIBLE;
- else
- compat = ADF_PF2VF_VF_COMPAT_UNKNOWN;
-
+ compat = adf_vf_compat_checker(vf_compat_ver);
vf_info->vf_compat_ver = vf_compat_ver;
resp->type = ADF_PF2VF_MSGTYPE_VERSION_RESP;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h
index 2be048e2287b..1a044297d873 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h
@@ -28,4 +28,15 @@ u32 adf_pfvf_csr_msg_of(struct adf_accel_dev *accel_dev, struct pfvf_message msg
struct pfvf_message adf_pfvf_message_of(struct adf_accel_dev *accel_dev, u32 raw_msg,
const struct pfvf_csr_format *fmt);
+static inline u8 adf_vf_compat_checker(u8 vf_compat_ver)
+{
+ if (vf_compat_ver == 0)
+ return ADF_PF2VF_VF_INCOMPATIBLE;
+
+ if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION)
+ return ADF_PF2VF_VF_COMPATIBLE;
+
+ return ADF_PF2VF_VF_COMPAT_UNKNOWN;
+}
+
#endif /* ADF_PFVF_UTILS_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c
index d4f2db3c53d8..346ef8bee99d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_rl.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c
@@ -183,14 +183,14 @@ static enum adf_cfg_service_type srv_to_cfg_svc_type(enum adf_base_services rl_s
}
/**
- * get_sla_arr_of_type() - Returns a pointer to SLA type specific array
+ * adf_rl_get_sla_arr_of_type() - Returns a pointer to SLA type specific array
* @rl_data: pointer to ratelimiting data
* @type: SLA type
* @sla_arr: pointer to variable where requested pointer will be stored
*
* Return: Max number of elements allowed for the returned array
*/
-static u32 get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type,
+u32 adf_rl_get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type,
struct rl_sla ***sla_arr)
{
switch (type) {
@@ -778,7 +778,7 @@ static void clear_sla(struct adf_rl *rl_data, struct rl_sla *sla)
rp_in_use[sla->ring_pairs_ids[i]] = false;
update_budget(sla, old_cir, true);
- get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr);
+ adf_rl_get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr);
assign_node_to_parent(rl_data->accel_dev, sla, true);
adf_rl_send_admin_delete_msg(rl_data->accel_dev, node_id, sla->type);
mark_rps_usage(sla, rl_data->rp_in_use, false);
@@ -875,7 +875,7 @@ static int add_update_sla(struct adf_accel_dev *accel_dev,
if (!is_update) {
mark_rps_usage(sla, rl_data->rp_in_use, true);
- get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr);
+ adf_rl_get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr);
sla_type_arr[sla->node_id] = sla;
rl_data->sla[sla->sla_id] = sla;
}
@@ -1065,7 +1065,7 @@ void adf_rl_remove_sla_all(struct adf_accel_dev *accel_dev, bool incl_default)
/* Unregister and remove all SLAs */
for (j = RL_LEAF; j >= end_type; j--) {
- max_id = get_sla_arr_of_type(rl_data, j, &sla_type_arr);
+ max_id = adf_rl_get_sla_arr_of_type(rl_data, j, &sla_type_arr);
for (i = 0; i < max_id; i++) {
if (!sla_type_arr[i])
@@ -1125,7 +1125,7 @@ int adf_rl_start(struct adf_accel_dev *accel_dev)
}
if ((fw_caps & RL_CAPABILITY_MASK) != RL_CAPABILITY_VALUE) {
- dev_info(&GET_DEV(accel_dev), "not supported\n");
+ dev_info(&GET_DEV(accel_dev), "feature not supported by FW\n");
ret = -EOPNOTSUPP;
goto ret_free;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.h b/drivers/crypto/intel/qat/qat_common/adf_rl.h
index 269c6656fb90..bfe750ea0e83 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_rl.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl.h
@@ -151,6 +151,8 @@ struct rl_sla {
u16 ring_pairs_cnt;
};
+u32 adf_rl_get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type,
+ struct rl_sla ***sla_arr);
int adf_rl_add_sla(struct adf_accel_dev *accel_dev,
struct adf_rl_sla_input_data *sla_in);
int adf_rl_update_sla(struct adf_accel_dev *accel_dev,
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
index 87a70c00c41e..8d645e7e04aa 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
@@ -26,10 +26,12 @@ static void adf_iov_send_resp(struct work_struct *work)
u32 vf_nr = vf_info->vf_nr;
bool ret;
+ mutex_lock(&vf_info->pfvf_mig_lock);
ret = adf_recv_and_handle_vf2pf_msg(accel_dev, vf_nr);
if (ret)
/* re-enable interrupt on PF from this VF */
adf_enable_vf2pf_interrupts(accel_dev, 1 << vf_nr);
+ mutex_unlock(&vf_info->pfvf_mig_lock);
kfree(pf2vf_resp);
}
@@ -62,6 +64,7 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
vf_info->vf_nr = i;
mutex_init(&vf_info->pf2vf_lock);
+ mutex_init(&vf_info->pfvf_mig_lock);
ratelimit_state_init(&vf_info->vf2pf_ratelimit,
ADF_VF2PF_RATELIMIT_INTERVAL,
ADF_VF2PF_RATELIMIT_BURST);
@@ -138,8 +141,10 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
if (hw_data->configure_iov_threads)
hw_data->configure_iov_threads(accel_dev, false);
- for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++)
+ for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) {
mutex_destroy(&vf->pf2vf_lock);
+ mutex_destroy(&vf->pfvf_mig_lock);
+ }
if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) {
kfree(accel_dev->pf.vf_info);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
index 2ff714d11bd2..74fb0c2ed241 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
@@ -41,6 +41,20 @@ static int validate_tl_data(struct adf_tl_hw_data *tl_data)
return 0;
}
+static int validate_tl_slice_counters(struct icp_qat_fw_init_admin_slice_cnt *slice_count,
+ u8 max_slices_per_type)
+{
+ u8 *sl_counter = (u8 *)slice_count;
+ int i;
+
+ for (i = 0; i < ADF_TL_SL_CNT_COUNT; i++) {
+ if (sl_counter[i] > max_slices_per_type)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int adf_tl_alloc_mem(struct adf_accel_dev *accel_dev)
{
struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
@@ -214,6 +228,13 @@ int adf_tl_run(struct adf_accel_dev *accel_dev, int state)
return ret;
}
+ ret = validate_tl_slice_counters(&telemetry->slice_cnt, tl_data->max_sl_cnt);
+ if (ret) {
+ dev_err(dev, "invalid value returned by FW\n");
+ adf_send_admin_tl_stop(accel_dev);
+ return ret;
+ }
+
telemetry->hbuffs = state;
atomic_set(&telemetry->state, state);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
index 9be81cd3b886..e54a406cc1b4 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
@@ -40,6 +40,7 @@ struct adf_tl_hw_data {
u8 num_dev_counters;
u8 num_rp_counters;
u8 max_rp;
+ u8 max_sl_cnt;
};
struct adf_telemetry {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport.c b/drivers/crypto/intel/qat/qat_common/adf_transport.c
index 630d0483c4e0..1efdf46490f1 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_transport.c
@@ -474,7 +474,6 @@ err:
int adf_init_etr_data(struct adf_accel_dev *accel_dev)
{
struct adf_etr_data *etr_data;
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
void __iomem *csr_addr;
u32 size;
u32 num_banks = 0;
@@ -495,8 +494,7 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev)
}
accel_dev->transport = etr_data;
- i = hw_data->get_etr_bar_id(hw_data);
- csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
+ csr_addr = adf_get_etr_base(accel_dev);
/* accel_dev->debugfs_dir should always be non-NULL here */
etr_data->debug = debugfs_create_dir("transport",
diff --git a/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
index 4128200a9032..85c682e248fb 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
@@ -110,6 +110,8 @@ struct qat_dh_ctx {
unsigned int p_size;
bool g2;
struct qat_crypto_instance *inst;
+ struct crypto_kpp *ftfm;
+ bool fallback;
} __packed __aligned(64);
struct qat_asym_request {
@@ -381,6 +383,36 @@ unmap_src:
return ret;
}
+static int qat_dh_generate_public_key(struct kpp_request *req)
+{
+ struct kpp_request *nreq = kpp_request_ctx(req);
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ if (ctx->fallback) {
+ memcpy(nreq, req, sizeof(*req));
+ kpp_request_set_tfm(nreq, ctx->ftfm);
+ return crypto_kpp_generate_public_key(nreq);
+ }
+
+ return qat_dh_compute_value(req);
+}
+
+static int qat_dh_compute_shared_secret(struct kpp_request *req)
+{
+ struct kpp_request *nreq = kpp_request_ctx(req);
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ if (ctx->fallback) {
+ memcpy(nreq, req, sizeof(*req));
+ kpp_request_set_tfm(nreq, ctx->ftfm);
+ return crypto_kpp_compute_shared_secret(nreq);
+ }
+
+ return qat_dh_compute_value(req);
+}
+
static int qat_dh_check_params_length(unsigned int p_len)
{
switch (p_len) {
@@ -398,9 +430,6 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
- if (qat_dh_check_params_length(params->p_size << 3))
- return -EINVAL;
-
ctx->p_size = params->p_size;
ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
if (!ctx->p)
@@ -454,6 +483,13 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
if (crypto_dh_decode_key(buf, len, &params) < 0)
return -EINVAL;
+ if (qat_dh_check_params_length(params.p_size << 3)) {
+ ctx->fallback = true;
+ return crypto_kpp_set_secret(ctx->ftfm, buf, len);
+ }
+
+ ctx->fallback = false;
+
/* Free old secret if any */
qat_dh_clear_ctx(dev, ctx);
@@ -481,6 +517,9 @@ static unsigned int qat_dh_max_size(struct crypto_kpp *tfm)
{
struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+ if (ctx->fallback)
+ return crypto_kpp_maxsize(ctx->ftfm);
+
return ctx->p_size;
}
@@ -489,11 +528,22 @@ static int qat_dh_init_tfm(struct crypto_kpp *tfm)
struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
struct qat_crypto_instance *inst =
qat_crypto_get_instance_node(numa_node_id());
+ const char *alg = kpp_alg_name(tfm);
+ unsigned int reqsize;
if (!inst)
return -EINVAL;
- kpp_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64);
+ ctx->ftfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->ftfm))
+ return PTR_ERR(ctx->ftfm);
+
+ crypto_kpp_set_flags(ctx->ftfm, crypto_kpp_get_flags(tfm));
+
+ reqsize = max(sizeof(struct qat_asym_request) + 64,
+ sizeof(struct kpp_request) + crypto_kpp_reqsize(ctx->ftfm));
+
+ kpp_set_reqsize(tfm, reqsize);
ctx->p_size = 0;
ctx->g2 = false;
@@ -506,6 +556,9 @@ static void qat_dh_exit_tfm(struct crypto_kpp *tfm)
struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+ if (ctx->ftfm)
+ crypto_free_kpp(ctx->ftfm);
+
qat_dh_clear_ctx(dev, ctx);
qat_crypto_put_instance(ctx->inst);
}
@@ -1265,8 +1318,8 @@ static struct akcipher_alg rsa = {
static struct kpp_alg dh = {
.set_secret = qat_dh_set_secret,
- .generate_public_key = qat_dh_compute_value,
- .compute_shared_secret = qat_dh_compute_value,
+ .generate_public_key = qat_dh_generate_public_key,
+ .compute_shared_secret = qat_dh_compute_shared_secret,
.max_size = qat_dh_max_size,
.init = qat_dh_init_tfm,
.exit = qat_dh_exit_tfm,
@@ -1276,6 +1329,7 @@ static struct kpp_alg dh = {
.cra_priority = 1000,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct qat_dh_ctx),
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
},
};
diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.c b/drivers/crypto/intel/qat/qat_common/qat_bl.c
index 76baed0a76c0..338acf29c487 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_bl.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_bl.c
@@ -81,7 +81,8 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
if (unlikely(!bufl))
return -ENOMEM;
} else {
- bufl = &buf->sgl_src.sgl_hdr;
+ bufl = container_of(&buf->sgl_src.sgl_hdr,
+ struct qat_alg_buf_list, hdr);
memset(bufl, 0, sizeof(struct qat_alg_buf_list));
buf->sgl_src_valid = true;
}
@@ -139,7 +140,8 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
if (unlikely(!buflout))
goto err_in;
} else {
- buflout = &buf->sgl_dst.sgl_hdr;
+ buflout = container_of(&buf->sgl_dst.sgl_hdr,
+ struct qat_alg_buf_list, hdr);
memset(buflout, 0, sizeof(struct qat_alg_buf_list));
buf->sgl_dst_valid = true;
}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.h b/drivers/crypto/intel/qat/qat_common/qat_bl.h
index d87e4f35ac39..85bc32a9ec0e 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_bl.h
+++ b/drivers/crypto/intel/qat/qat_common/qat_bl.h
@@ -15,14 +15,17 @@ struct qat_alg_buf {
} __packed;
struct qat_alg_buf_list {
- u64 resrvd;
- u32 num_bufs;
- u32 num_mapped_bufs;
+ /* New members must be added within the __struct_group() macro below. */
+ __struct_group(qat_alg_buf_list_hdr, hdr, __packed,
+ u64 resrvd;
+ u32 num_bufs;
+ u32 num_mapped_bufs;
+ );
struct qat_alg_buf buffers[];
} __packed;
struct qat_alg_fixed_buf_list {
- struct qat_alg_buf_list sgl_hdr;
+ struct qat_alg_buf_list_hdr sgl_hdr;
struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC];
} __packed __aligned(64);
diff --git a/drivers/crypto/intel/qat/qat_common/qat_mig_dev.c b/drivers/crypto/intel/qat/qat_common/qat_mig_dev.c
new file mode 100644
index 000000000000..892c2283a50e
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/qat_mig_dev.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024 Intel Corporation */
+#include <linux/dev_printk.h>
+#include <linux/export.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/qat/qat_mig_dev.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+
+struct qat_mig_dev *qat_vfmig_create(struct pci_dev *pdev, int vf_id)
+{
+ struct adf_accel_dev *accel_dev;
+ struct qat_migdev_ops *ops;
+ struct qat_mig_dev *mdev;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+ if (!accel_dev)
+ return ERR_PTR(-ENODEV);
+
+ ops = GET_VFMIG_OPS(accel_dev);
+ if (!ops || !ops->init || !ops->cleanup || !ops->reset || !ops->open ||
+ !ops->close || !ops->suspend || !ops->resume || !ops->save_state ||
+ !ops->load_state || !ops->save_setup || !ops->load_setup)
+ return ERR_PTR(-EINVAL);
+
+ mdev = kmalloc(sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return ERR_PTR(-ENOMEM);
+
+ mdev->vf_id = vf_id;
+ mdev->parent_accel_dev = accel_dev;
+
+ return mdev;
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_create);
+
+int qat_vfmig_init(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->init(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_init);
+
+void qat_vfmig_cleanup(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->cleanup(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_cleanup);
+
+void qat_vfmig_reset(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->reset(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_reset);
+
+int qat_vfmig_open(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->open(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_open);
+
+void qat_vfmig_close(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ GET_VFMIG_OPS(accel_dev)->close(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_close);
+
+int qat_vfmig_suspend(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->suspend(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_suspend);
+
+int qat_vfmig_resume(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->resume(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_resume);
+
+int qat_vfmig_save_state(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->save_state(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_save_state);
+
+int qat_vfmig_save_setup(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->save_setup(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_save_setup);
+
+int qat_vfmig_load_state(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->load_state(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_load_state);
+
+int qat_vfmig_load_setup(struct qat_mig_dev *mdev, int size)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->load_setup(mdev, size);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_load_setup);
+
+void qat_vfmig_destroy(struct qat_mig_dev *mdev)
+{
+ kfree(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_destroy);
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
index 38d6f8e1624a..cfd3bd757715 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
+ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
qat_dh895xcc-objs := adf_drv.o adf_dh895xcc_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index af14090cc4be..6e24d57e6b98 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -5,6 +5,7 @@
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include "adf_dh895xcc_hw_data.h"
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
index 0153c85ce743..64b54e92b2b4 100644
--- a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
+ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o
qat_dh895xccvf-objs := adf_drv.o adf_dh895xccvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
index 70e56cc16ece..f4ee4c2e00da 100644
--- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
@@ -4,6 +4,7 @@
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include <adf_pfvf_vf_msg.h>
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
index 79b4e74804f6..6bfc59e67747 100644
--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
@@ -138,6 +138,10 @@ int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev,
return -ENOMEM;
cptr_dma = dma_map_single(&pdev->dev, hctx, CN10K_CPT_HW_CTX_SIZE,
DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&pdev->dev, cptr_dma)) {
+ kfree(hctx);
+ return -ENOMEM;
+ }
cn10k_cpt_hw_ctx_set(hctx, 1);
er_ctx->hw_ctx = hctx;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
index d2b8d26db968..215a1a8ba7e9 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
@@ -4,7 +4,8 @@
#include "otx2_cpt_devlink.h"
static int otx2_cpt_dl_egrp_create(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
@@ -13,7 +14,8 @@ static int otx2_cpt_dl_egrp_create(struct devlink *dl, u32 id,
}
static int otx2_cpt_dl_egrp_delete(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
@@ -45,7 +47,8 @@ static int otx2_cpt_dl_t106_mode_get(struct devlink *dl, u32 id,
}
static int otx2_cpt_dl_t106_mode_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 2b3ebe0db3a6..057d73c370b7 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/stmp_device.h>
#include <linux/clk.h>
+#include <soc/fsl/dcp.h>
#include <crypto/aes.h>
#include <crypto/sha1.h>
@@ -101,6 +102,7 @@ struct dcp_async_ctx {
struct crypto_skcipher *fallback;
unsigned int key_len;
uint8_t key[AES_KEYSIZE_128];
+ bool key_referenced;
};
struct dcp_aes_req_ctx {
@@ -155,6 +157,7 @@ static struct dcp *global_sdcp;
#define MXS_DCP_CONTROL0_HASH_TERM (1 << 13)
#define MXS_DCP_CONTROL0_HASH_INIT (1 << 12)
#define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11)
+#define MXS_DCP_CONTROL0_OTP_KEY (1 << 10)
#define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8)
#define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9)
#define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6)
@@ -168,6 +171,8 @@ static struct dcp *global_sdcp;
#define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4)
#define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0)
+#define MXS_DCP_CONTROL1_KEY_SELECT_SHIFT 8
+
static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
{
int dma_err;
@@ -224,13 +229,16 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
struct dcp *sdcp = global_sdcp;
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
+ bool key_referenced = actx->key_referenced;
int ret;
- key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
- 2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
- ret = dma_mapping_error(sdcp->dev, key_phys);
- if (ret)
- return ret;
+ if (!key_referenced) {
+ key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
+ 2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
+ ret = dma_mapping_error(sdcp->dev, key_phys);
+ if (ret)
+ return ret;
+ }
src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
DCP_BUF_SZ, DMA_TO_DEVICE);
@@ -255,8 +263,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
MXS_DCP_CONTROL0_INTERRUPT |
MXS_DCP_CONTROL0_ENABLE_CIPHER;
- /* Payload contains the key. */
- desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
+ if (key_referenced)
+ /* Set OTP key bit to select the key via KEY_SELECT. */
+ desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY;
+ else
+ /* Payload contains the key. */
+ desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
if (rctx->enc)
desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
@@ -270,6 +282,9 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
else
desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
+ if (key_referenced)
+ desc->control1 |= sdcp->coh->aes_key[0] << MXS_DCP_CONTROL1_KEY_SELECT_SHIFT;
+
desc->next_cmd_addr = 0;
desc->source = src_phys;
desc->destination = dst_phys;
@@ -284,9 +299,9 @@ aes_done_run:
err_dst:
dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
err_src:
- dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
- DMA_TO_DEVICE);
-
+ if (!key_referenced)
+ dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
+ DMA_TO_DEVICE);
return ret;
}
@@ -453,7 +468,7 @@ static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb)
struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
int ret;
- if (unlikely(actx->key_len != AES_KEYSIZE_128))
+ if (unlikely(actx->key_len != AES_KEYSIZE_128 && !actx->key_referenced))
return mxs_dcp_block_fallback(req, enc);
rctx->enc = enc;
@@ -500,6 +515,7 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
* there can still be an operation in progress.
*/
actx->key_len = len;
+ actx->key_referenced = false;
if (len == AES_KEYSIZE_128) {
memcpy(actx->key, key, len);
return 0;
@@ -516,6 +532,32 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
return crypto_skcipher_setkey(actx->fallback, key, len);
}
+static int mxs_dcp_aes_setrefkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
+
+ if (len != DCP_PAES_KEYSIZE)
+ return -EINVAL;
+
+ switch (key[0]) {
+ case DCP_PAES_KEY_SLOT0:
+ case DCP_PAES_KEY_SLOT1:
+ case DCP_PAES_KEY_SLOT2:
+ case DCP_PAES_KEY_SLOT3:
+ case DCP_PAES_KEY_UNIQUE:
+ case DCP_PAES_KEY_OTP:
+ memcpy(actx->key, key, len);
+ actx->key_len = len;
+ actx->key_referenced = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
{
const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
@@ -539,6 +581,13 @@ static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm)
crypto_free_skcipher(actx->fallback);
}
+static int mxs_dcp_paes_init_tfm(struct crypto_skcipher *tfm)
+{
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx));
+
+ return 0;
+}
+
/*
* Hashing (SHA1/SHA256)
*/
@@ -889,6 +938,39 @@ static struct skcipher_alg dcp_aes_algs[] = {
.ivsize = AES_BLOCK_SIZE,
.init = mxs_dcp_aes_fallback_init_tfm,
.exit = mxs_dcp_aes_fallback_exit_tfm,
+ }, {
+ .base.cra_name = "ecb(paes)",
+ .base.cra_driver_name = "ecb-paes-dcp",
+ .base.cra_priority = 401,
+ .base.cra_alignmask = 15,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DCP_PAES_KEYSIZE,
+ .max_keysize = DCP_PAES_KEYSIZE,
+ .setkey = mxs_dcp_aes_setrefkey,
+ .encrypt = mxs_dcp_aes_ecb_encrypt,
+ .decrypt = mxs_dcp_aes_ecb_decrypt,
+ .init = mxs_dcp_paes_init_tfm,
+ }, {
+ .base.cra_name = "cbc(paes)",
+ .base.cra_driver_name = "cbc-paes-dcp",
+ .base.cra_priority = 401,
+ .base.cra_alignmask = 15,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DCP_PAES_KEYSIZE,
+ .max_keysize = DCP_PAES_KEYSIZE,
+ .setkey = mxs_dcp_aes_setrefkey,
+ .encrypt = mxs_dcp_aes_cbc_encrypt,
+ .decrypt = mxs_dcp_aes_cbc_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .init = mxs_dcp_paes_init_tfm,
},
};
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 2ab90ec10e61..82214cde2bcd 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -251,7 +251,9 @@ int nx842_crypto_compress(struct crypto_tfm *tfm,
u8 *dst, unsigned int *dlen)
{
struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
- struct nx842_crypto_header *hdr = &ctx->header;
+ struct nx842_crypto_header *hdr =
+ container_of(&ctx->header,
+ struct nx842_crypto_header, hdr);
struct nx842_crypto_param p;
struct nx842_constraints c = *ctx->driver->constraints;
unsigned int groups, hdrsize, h;
@@ -490,7 +492,7 @@ int nx842_crypto_decompress(struct crypto_tfm *tfm,
}
memcpy(&ctx->header, src, hdr_len);
- hdr = &ctx->header;
+ hdr = container_of(&ctx->header, struct nx842_crypto_header, hdr);
for (n = 0; n < hdr->groups; n++) {
/* ignore applies to last group */
diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h
index 7590bfb24d79..25fa70b2112c 100644
--- a/drivers/crypto/nx/nx-842.h
+++ b/drivers/crypto/nx/nx-842.h
@@ -157,9 +157,11 @@ struct nx842_crypto_header_group {
} __packed;
struct nx842_crypto_header {
- __be16 magic; /* NX842_CRYPTO_MAGIC */
- __be16 ignore; /* decompressed end bytes to ignore */
- u8 groups; /* total groups in this header */
+ struct_group_tagged(nx842_crypto_header_hdr, hdr,
+ __be16 magic; /* NX842_CRYPTO_MAGIC */
+ __be16 ignore; /* decompressed end bytes to ignore */
+ u8 groups; /* total groups in this header */
+ );
struct nx842_crypto_header_group group[];
} __packed;
@@ -171,7 +173,7 @@ struct nx842_crypto_ctx {
u8 *wmem;
u8 *sbounce, *dbounce;
- struct nx842_crypto_header header;
+ struct nx842_crypto_header_hdr header;
struct nx842_crypto_header_group group[NX842_CRYPTO_GROUP_MAX];
struct nx842_driver *driver;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 3423b5cde1c7..96d4af5d48a6 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -559,7 +559,7 @@ static int sahara_aes_process(struct skcipher_request *req)
struct sahara_ctx *ctx;
struct sahara_aes_reqctx *rctx;
int ret;
- unsigned long timeout;
+ unsigned long time_left;
/* Request is ready to be dispatched by the device */
dev_dbg(dev->device,
@@ -597,15 +597,15 @@ static int sahara_aes_process(struct skcipher_request *req)
if (ret)
return -EINVAL;
- timeout = wait_for_completion_timeout(&dev->dma_completion,
- msecs_to_jiffies(SAHARA_TIMEOUT_MS));
+ time_left = wait_for_completion_timeout(&dev->dma_completion,
+ msecs_to_jiffies(SAHARA_TIMEOUT_MS));
dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
DMA_FROM_DEVICE);
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE);
- if (!timeout) {
+ if (!time_left) {
dev_err(dev->device, "AES timeout\n");
return -ETIMEDOUT;
}
@@ -931,7 +931,7 @@ static int sahara_sha_process(struct ahash_request *req)
struct sahara_dev *dev = dev_ptr;
struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
int ret;
- unsigned long timeout;
+ unsigned long time_left;
ret = sahara_sha_prepare_request(req);
if (!ret)
@@ -963,14 +963,14 @@ static int sahara_sha_process(struct ahash_request *req)
sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
- timeout = wait_for_completion_timeout(&dev->dma_completion,
- msecs_to_jiffies(SAHARA_TIMEOUT_MS));
+ time_left = wait_for_completion_timeout(&dev->dma_completion,
+ msecs_to_jiffies(SAHARA_TIMEOUT_MS));
if (rctx->sg_in_idx)
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE);
- if (!timeout) {
+ if (!time_left) {
dev_err(dev->device, "SHA timeout\n");
return -ETIMEDOUT;
}
diff --git a/drivers/crypto/starfive/Kconfig b/drivers/crypto/starfive/Kconfig
index cb59357b58b2..0fe389e9f932 100644
--- a/drivers/crypto/starfive/Kconfig
+++ b/drivers/crypto/starfive/Kconfig
@@ -14,6 +14,10 @@ config CRYPTO_DEV_JH7110
select CRYPTO_RSA
select CRYPTO_AES
select CRYPTO_CCM
+ select CRYPTO_GCM
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_CTR
help
Support for StarFive JH7110 crypto hardware acceleration engine.
This module provides acceleration for public key algo,
diff --git a/drivers/crypto/starfive/jh7110-aes.c b/drivers/crypto/starfive/jh7110-aes.c
index 1ac15cc4ef3c..86a1a1fa9f8f 100644
--- a/drivers/crypto/starfive/jh7110-aes.c
+++ b/drivers/crypto/starfive/jh7110-aes.c
@@ -78,7 +78,7 @@ static inline int is_gcm(struct starfive_cryp_dev *cryp)
return (cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_GCM;
}
-static inline int is_encrypt(struct starfive_cryp_dev *cryp)
+static inline bool is_encrypt(struct starfive_cryp_dev *cryp)
{
return cryp->flags & FLG_ENCRYPT;
}
@@ -103,16 +103,6 @@ static void starfive_aes_aead_hw_start(struct starfive_cryp_ctx *ctx, u32 hw_mod
}
}
-static inline void starfive_aes_set_ivlen(struct starfive_cryp_ctx *ctx)
-{
- struct starfive_cryp_dev *cryp = ctx->cryp;
-
- if (is_gcm(cryp))
- writel(GCM_AES_IV_SIZE, cryp->base + STARFIVE_AES_IVLEN);
- else
- writel(AES_BLOCK_SIZE, cryp->base + STARFIVE_AES_IVLEN);
-}
-
static inline void starfive_aes_set_alen(struct starfive_cryp_ctx *ctx)
{
struct starfive_cryp_dev *cryp = ctx->cryp;
@@ -261,7 +251,6 @@ static int starfive_aes_hw_init(struct starfive_cryp_ctx *ctx)
rctx->csr.aes.mode = hw_mode;
rctx->csr.aes.cmode = !is_encrypt(cryp);
- rctx->csr.aes.ie = 1;
rctx->csr.aes.stmode = STARFIVE_AES_MODE_XFB_1;
if (cryp->side_chan) {
@@ -279,7 +268,7 @@ static int starfive_aes_hw_init(struct starfive_cryp_ctx *ctx)
case STARFIVE_AES_MODE_GCM:
starfive_aes_set_alen(ctx);
starfive_aes_set_mlen(ctx);
- starfive_aes_set_ivlen(ctx);
+ writel(GCM_AES_IV_SIZE, cryp->base + STARFIVE_AES_IVLEN);
starfive_aes_aead_hw_start(ctx, hw_mode);
starfive_aes_write_iv(ctx, (void *)cryp->req.areq->iv);
break;
@@ -300,52 +289,49 @@ static int starfive_aes_hw_init(struct starfive_cryp_ctx *ctx)
return cryp->err;
}
-static int starfive_aes_read_authtag(struct starfive_cryp_dev *cryp)
+static int starfive_aes_read_authtag(struct starfive_cryp_ctx *ctx)
{
- int i, start_addr;
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ int i;
if (starfive_aes_wait_busy(cryp))
return dev_err_probe(cryp->dev, -ETIMEDOUT,
"Timeout waiting for tag generation.");
- start_addr = STARFIVE_AES_NONCE0;
-
- if (is_gcm(cryp))
- for (i = 0; i < AES_BLOCK_32; i++, start_addr += 4)
- cryp->tag_out[i] = readl(cryp->base + start_addr);
- else
+ if ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_GCM) {
+ cryp->tag_out[0] = readl(cryp->base + STARFIVE_AES_NONCE0);
+ cryp->tag_out[1] = readl(cryp->base + STARFIVE_AES_NONCE1);
+ cryp->tag_out[2] = readl(cryp->base + STARFIVE_AES_NONCE2);
+ cryp->tag_out[3] = readl(cryp->base + STARFIVE_AES_NONCE3);
+ } else {
for (i = 0; i < AES_BLOCK_32; i++)
cryp->tag_out[i] = readl(cryp->base + STARFIVE_AES_AESDIO0R);
+ }
if (is_encrypt(cryp)) {
- scatterwalk_copychunks(cryp->tag_out, &cryp->out_walk, cryp->authsize, 1);
+ scatterwalk_map_and_copy(cryp->tag_out, rctx->out_sg,
+ cryp->total_in, cryp->authsize, 1);
} else {
- scatterwalk_copychunks(cryp->tag_in, &cryp->in_walk, cryp->authsize, 0);
-
if (crypto_memneq(cryp->tag_in, cryp->tag_out, cryp->authsize))
- return dev_err_probe(cryp->dev, -EBADMSG, "Failed tag verification\n");
+ return -EBADMSG;
}
return 0;
}
-static void starfive_aes_finish_req(struct starfive_cryp_dev *cryp)
+static void starfive_aes_finish_req(struct starfive_cryp_ctx *ctx)
{
- union starfive_aes_csr csr;
+ struct starfive_cryp_dev *cryp = ctx->cryp;
int err = cryp->err;
if (!err && cryp->authsize)
- err = starfive_aes_read_authtag(cryp);
+ err = starfive_aes_read_authtag(ctx);
if (!err && ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CBC ||
(cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CTR))
starfive_aes_get_iv(cryp, (void *)cryp->req.sreq->iv);
- /* reset irq flags*/
- csr.v = 0;
- csr.aesrst = 1;
- writel(csr.v, cryp->base + STARFIVE_AES_CSR);
-
if (cryp->authsize)
crypto_finalize_aead_request(cryp->engine, cryp->req.areq, err);
else
@@ -353,39 +339,6 @@ static void starfive_aes_finish_req(struct starfive_cryp_dev *cryp)
err);
}
-void starfive_aes_done_task(unsigned long param)
-{
- struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)param;
- u32 block[AES_BLOCK_32];
- u32 stat;
- int i;
-
- for (i = 0; i < AES_BLOCK_32; i++)
- block[i] = readl(cryp->base + STARFIVE_AES_AESDIO0R);
-
- scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, AES_BLOCK_SIZE,
- cryp->total_out), 1);
-
- cryp->total_out -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_out);
-
- if (!cryp->total_out) {
- starfive_aes_finish_req(cryp);
- return;
- }
-
- memset(block, 0, AES_BLOCK_SIZE);
- scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE,
- cryp->total_in), 0);
- cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in);
-
- for (i = 0; i < AES_BLOCK_32; i++)
- writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R);
-
- stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
- stat &= ~STARFIVE_IE_MASK_AES_DONE;
- writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
-}
-
static int starfive_aes_gcm_write_adata(struct starfive_cryp_ctx *ctx)
{
struct starfive_cryp_dev *cryp = ctx->cryp;
@@ -451,60 +404,165 @@ static int starfive_aes_ccm_write_adata(struct starfive_cryp_ctx *ctx)
return 0;
}
-static int starfive_aes_prepare_req(struct skcipher_request *req,
- struct aead_request *areq)
+static void starfive_aes_dma_done(void *param)
{
- struct starfive_cryp_ctx *ctx;
- struct starfive_cryp_request_ctx *rctx;
- struct starfive_cryp_dev *cryp;
+ struct starfive_cryp_dev *cryp = param;
- if (!req && !areq)
- return -EINVAL;
+ complete(&cryp->dma_done);
+}
- ctx = req ? crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)) :
- crypto_aead_ctx(crypto_aead_reqtfm(areq));
+static void starfive_aes_dma_init(struct starfive_cryp_dev *cryp)
+{
+ cryp->cfg_in.direction = DMA_MEM_TO_DEV;
+ cryp->cfg_in.src_addr_width = DMA_SLAVE_BUSWIDTH_16_BYTES;
+ cryp->cfg_in.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cryp->cfg_in.src_maxburst = cryp->dma_maxburst;
+ cryp->cfg_in.dst_maxburst = cryp->dma_maxburst;
+ cryp->cfg_in.dst_addr = cryp->phys_base + STARFIVE_ALG_FIFO_OFFSET;
- cryp = ctx->cryp;
- rctx = req ? skcipher_request_ctx(req) : aead_request_ctx(areq);
+ dmaengine_slave_config(cryp->tx, &cryp->cfg_in);
- if (req) {
- cryp->req.sreq = req;
- cryp->total_in = req->cryptlen;
- cryp->total_out = req->cryptlen;
- cryp->assoclen = 0;
- cryp->authsize = 0;
- } else {
- cryp->req.areq = areq;
- cryp->assoclen = areq->assoclen;
- cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq));
- if (is_encrypt(cryp)) {
- cryp->total_in = areq->cryptlen;
- cryp->total_out = areq->cryptlen;
- } else {
- cryp->total_in = areq->cryptlen - cryp->authsize;
- cryp->total_out = cryp->total_in;
- }
- }
+ cryp->cfg_out.direction = DMA_DEV_TO_MEM;
+ cryp->cfg_out.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cryp->cfg_out.dst_addr_width = DMA_SLAVE_BUSWIDTH_16_BYTES;
+ cryp->cfg_out.src_maxburst = 4;
+ cryp->cfg_out.dst_maxburst = 4;
+ cryp->cfg_out.src_addr = cryp->phys_base + STARFIVE_ALG_FIFO_OFFSET;
- rctx->in_sg = req ? req->src : areq->src;
- scatterwalk_start(&cryp->in_walk, rctx->in_sg);
+ dmaengine_slave_config(cryp->rx, &cryp->cfg_out);
- rctx->out_sg = req ? req->dst : areq->dst;
- scatterwalk_start(&cryp->out_walk, rctx->out_sg);
+ init_completion(&cryp->dma_done);
+}
- if (cryp->assoclen) {
- rctx->adata = kzalloc(cryp->assoclen + AES_BLOCK_SIZE, GFP_KERNEL);
- if (!rctx->adata)
- return dev_err_probe(cryp->dev, -ENOMEM,
- "Failed to alloc memory for adata");
+static int starfive_aes_dma_xfer(struct starfive_cryp_dev *cryp,
+ struct scatterlist *src,
+ struct scatterlist *dst,
+ int len)
+{
+ struct dma_async_tx_descriptor *in_desc, *out_desc;
+ union starfive_alg_cr alg_cr;
+ int ret = 0, in_save, out_save;
+
+ alg_cr.v = 0;
+ alg_cr.start = 1;
+ alg_cr.aes_dma_en = 1;
+ writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
+
+ in_save = sg_dma_len(src);
+ out_save = sg_dma_len(dst);
- scatterwalk_copychunks(rctx->adata, &cryp->in_walk, cryp->assoclen, 0);
- scatterwalk_copychunks(NULL, &cryp->out_walk, cryp->assoclen, 2);
+ writel(ALIGN(len, AES_BLOCK_SIZE), cryp->base + STARFIVE_DMA_IN_LEN_OFFSET);
+ writel(ALIGN(len, AES_BLOCK_SIZE), cryp->base + STARFIVE_DMA_OUT_LEN_OFFSET);
+
+ sg_dma_len(src) = ALIGN(len, AES_BLOCK_SIZE);
+ sg_dma_len(dst) = ALIGN(len, AES_BLOCK_SIZE);
+
+ out_desc = dmaengine_prep_slave_sg(cryp->rx, dst, 1, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!out_desc) {
+ ret = -EINVAL;
+ goto dma_err;
}
- ctx->rctx = rctx;
+ out_desc->callback = starfive_aes_dma_done;
+ out_desc->callback_param = cryp;
+
+ reinit_completion(&cryp->dma_done);
+ dmaengine_submit(out_desc);
+ dma_async_issue_pending(cryp->rx);
+
+ in_desc = dmaengine_prep_slave_sg(cryp->tx, src, 1, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!in_desc) {
+ ret = -EINVAL;
+ goto dma_err;
+ }
+
+ dmaengine_submit(in_desc);
+ dma_async_issue_pending(cryp->tx);
+
+ if (!wait_for_completion_timeout(&cryp->dma_done,
+ msecs_to_jiffies(1000)))
+ ret = -ETIMEDOUT;
+
+dma_err:
+ sg_dma_len(src) = in_save;
+ sg_dma_len(dst) = out_save;
+
+ alg_cr.v = 0;
+ alg_cr.clear = 1;
+ writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
+
+ return ret;
+}
+
+static int starfive_aes_map_sg(struct starfive_cryp_dev *cryp,
+ struct scatterlist *src,
+ struct scatterlist *dst)
+{
+ struct scatterlist *stsg, *dtsg;
+ struct scatterlist _src[2], _dst[2];
+ unsigned int remain = cryp->total_in;
+ unsigned int len, src_nents, dst_nents;
+ int ret;
+
+ if (src == dst) {
+ for (stsg = src, dtsg = dst; remain > 0;
+ stsg = sg_next(stsg), dtsg = sg_next(dtsg)) {
+ src_nents = dma_map_sg(cryp->dev, stsg, 1, DMA_BIDIRECTIONAL);
+ if (src_nents == 0)
+ return dev_err_probe(cryp->dev, -ENOMEM,
+ "dma_map_sg error\n");
+
+ dst_nents = src_nents;
+ len = min(sg_dma_len(stsg), remain);
+
+ ret = starfive_aes_dma_xfer(cryp, stsg, dtsg, len);
+ dma_unmap_sg(cryp->dev, stsg, 1, DMA_BIDIRECTIONAL);
+ if (ret)
+ return ret;
+
+ remain -= len;
+ }
+ } else {
+ for (stsg = src, dtsg = dst;;) {
+ src_nents = dma_map_sg(cryp->dev, stsg, 1, DMA_TO_DEVICE);
+ if (src_nents == 0)
+ return dev_err_probe(cryp->dev, -ENOMEM,
+ "dma_map_sg src error\n");
+
+ dst_nents = dma_map_sg(cryp->dev, dtsg, 1, DMA_FROM_DEVICE);
+ if (dst_nents == 0)
+ return dev_err_probe(cryp->dev, -ENOMEM,
+ "dma_map_sg dst error\n");
+
+ len = min(sg_dma_len(stsg), sg_dma_len(dtsg));
+ len = min(len, remain);
+
+ ret = starfive_aes_dma_xfer(cryp, stsg, dtsg, len);
+ dma_unmap_sg(cryp->dev, stsg, 1, DMA_TO_DEVICE);
+ dma_unmap_sg(cryp->dev, dtsg, 1, DMA_FROM_DEVICE);
+ if (ret)
+ return ret;
+
+ remain -= len;
+ if (remain == 0)
+ break;
+
+ if (sg_dma_len(stsg) - len) {
+ stsg = scatterwalk_ffwd(_src, stsg, len);
+ dtsg = sg_next(dtsg);
+ } else if (sg_dma_len(dtsg) - len) {
+ dtsg = scatterwalk_ffwd(_dst, dtsg, len);
+ stsg = sg_next(stsg);
+ } else {
+ stsg = sg_next(stsg);
+ dtsg = sg_next(dtsg);
+ }
+ }
+ }
- return starfive_aes_hw_init(ctx);
+ return 0;
}
static int starfive_aes_do_one_req(struct crypto_engine *engine, void *areq)
@@ -513,35 +571,42 @@ static int starfive_aes_do_one_req(struct crypto_engine *engine, void *areq)
container_of(areq, struct skcipher_request, base);
struct starfive_cryp_ctx *ctx =
crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct starfive_cryp_request_ctx *rctx = skcipher_request_ctx(req);
struct starfive_cryp_dev *cryp = ctx->cryp;
- u32 block[AES_BLOCK_32];
- u32 stat;
- int err;
- int i;
+ int ret;
- err = starfive_aes_prepare_req(req, NULL);
- if (err)
- return err;
+ cryp->req.sreq = req;
+ cryp->total_in = req->cryptlen;
+ cryp->total_out = req->cryptlen;
+ cryp->assoclen = 0;
+ cryp->authsize = 0;
- /*
- * Write first plain/ciphertext block to start the module
- * then let irq tasklet handle the rest of the data blocks.
- */
- scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE,
- cryp->total_in), 0);
- cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in);
+ rctx->in_sg = req->src;
+ rctx->out_sg = req->dst;
+
+ ctx->rctx = rctx;
+
+ ret = starfive_aes_hw_init(ctx);
+ if (ret)
+ return ret;
- for (i = 0; i < AES_BLOCK_32; i++)
- writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R);
+ if (!cryp->total_in)
+ goto finish_req;
- stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
- stat &= ~STARFIVE_IE_MASK_AES_DONE;
- writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
+ starfive_aes_dma_init(cryp);
+
+ ret = starfive_aes_map_sg(cryp, rctx->in_sg, rctx->out_sg);
+ if (ret)
+ return ret;
+
+finish_req:
+ starfive_aes_finish_req(ctx);
return 0;
}
-static int starfive_aes_init_tfm(struct crypto_skcipher *tfm)
+static int starfive_aes_init_tfm(struct crypto_skcipher *tfm,
+ const char *alg_name)
{
struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -549,12 +614,26 @@ static int starfive_aes_init_tfm(struct crypto_skcipher *tfm)
if (!ctx->cryp)
return -ENODEV;
+ ctx->skcipher_fbk = crypto_alloc_skcipher(alg_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->skcipher_fbk))
+ return dev_err_probe(ctx->cryp->dev, PTR_ERR(ctx->skcipher_fbk),
+ "%s() failed to allocate fallback for %s\n",
+ __func__, alg_name);
+
crypto_skcipher_set_reqsize(tfm, sizeof(struct starfive_cryp_request_ctx) +
- sizeof(struct skcipher_request));
+ crypto_skcipher_reqsize(ctx->skcipher_fbk));
return 0;
}
+static void starfive_aes_exit_tfm(struct crypto_skcipher *tfm)
+{
+ struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_skcipher(ctx->skcipher_fbk);
+}
+
static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq)
{
struct aead_request *req =
@@ -562,79 +641,99 @@ static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq
struct starfive_cryp_ctx *ctx =
crypto_aead_ctx(crypto_aead_reqtfm(req));
struct starfive_cryp_dev *cryp = ctx->cryp;
- struct starfive_cryp_request_ctx *rctx;
- u32 block[AES_BLOCK_32];
- u32 stat;
- int err;
- int i;
+ struct starfive_cryp_request_ctx *rctx = aead_request_ctx(req);
+ struct scatterlist _src[2], _dst[2];
+ int ret;
+
+ cryp->req.areq = req;
+ cryp->assoclen = req->assoclen;
+ cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
+
+ rctx->in_sg = scatterwalk_ffwd(_src, req->src, cryp->assoclen);
+ if (req->src == req->dst)
+ rctx->out_sg = rctx->in_sg;
+ else
+ rctx->out_sg = scatterwalk_ffwd(_dst, req->dst, cryp->assoclen);
+
+ if (is_encrypt(cryp)) {
+ cryp->total_in = req->cryptlen;
+ cryp->total_out = req->cryptlen;
+ } else {
+ cryp->total_in = req->cryptlen - cryp->authsize;
+ cryp->total_out = cryp->total_in;
+ scatterwalk_map_and_copy(cryp->tag_in, req->src,
+ cryp->total_in + cryp->assoclen,
+ cryp->authsize, 0);
+ }
- err = starfive_aes_prepare_req(NULL, req);
- if (err)
- return err;
+ if (cryp->assoclen) {
+ rctx->adata = kzalloc(cryp->assoclen + AES_BLOCK_SIZE, GFP_KERNEL);
+ if (!rctx->adata)
+ return dev_err_probe(cryp->dev, -ENOMEM,
+ "Failed to alloc memory for adata");
+
+ if (sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, cryp->assoclen),
+ rctx->adata, cryp->assoclen) != cryp->assoclen)
+ return -EINVAL;
+ }
+
+ if (cryp->total_in)
+ sg_zero_buffer(rctx->in_sg, sg_nents(rctx->in_sg),
+ sg_dma_len(rctx->in_sg) - cryp->total_in,
+ cryp->total_in);
- rctx = ctx->rctx;
+ ctx->rctx = rctx;
+
+ ret = starfive_aes_hw_init(ctx);
+ if (ret)
+ return ret;
if (!cryp->assoclen)
goto write_text;
if ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CCM)
- cryp->err = starfive_aes_ccm_write_adata(ctx);
+ ret = starfive_aes_ccm_write_adata(ctx);
else
- cryp->err = starfive_aes_gcm_write_adata(ctx);
+ ret = starfive_aes_gcm_write_adata(ctx);
kfree(rctx->adata);
- if (cryp->err)
- return cryp->err;
+ if (ret)
+ return ret;
write_text:
if (!cryp->total_in)
goto finish_req;
- /*
- * Write first plain/ciphertext block to start the module
- * then let irq tasklet handle the rest of the data blocks.
- */
- scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE,
- cryp->total_in), 0);
- cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in);
-
- for (i = 0; i < AES_BLOCK_32; i++)
- writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R);
-
- stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
- stat &= ~STARFIVE_IE_MASK_AES_DONE;
- writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
+ starfive_aes_dma_init(cryp);
- return 0;
+ ret = starfive_aes_map_sg(cryp, rctx->in_sg, rctx->out_sg);
+ if (ret)
+ return ret;
finish_req:
- starfive_aes_finish_req(cryp);
+ starfive_aes_finish_req(ctx);
return 0;
}
-static int starfive_aes_aead_init_tfm(struct crypto_aead *tfm)
+static int starfive_aes_aead_init_tfm(struct crypto_aead *tfm,
+ const char *alg_name)
{
struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm);
- struct starfive_cryp_dev *cryp = ctx->cryp;
- struct crypto_tfm *aead = crypto_aead_tfm(tfm);
- struct crypto_alg *alg = aead->__crt_alg;
ctx->cryp = starfive_cryp_find_dev(ctx);
if (!ctx->cryp)
return -ENODEV;
- if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
- ctx->aead_fbk = crypto_alloc_aead(alg->cra_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(ctx->aead_fbk))
- return dev_err_probe(cryp->dev, PTR_ERR(ctx->aead_fbk),
- "%s() failed to allocate fallback for %s\n",
- __func__, alg->cra_name);
- }
+ ctx->aead_fbk = crypto_alloc_aead(alg_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->aead_fbk))
+ return dev_err_probe(ctx->cryp->dev, PTR_ERR(ctx->aead_fbk),
+ "%s() failed to allocate fallback for %s\n",
+ __func__, alg_name);
- crypto_aead_set_reqsize(tfm, sizeof(struct starfive_cryp_ctx) +
- sizeof(struct aead_request));
+ crypto_aead_set_reqsize(tfm, sizeof(struct starfive_cryp_request_ctx) +
+ crypto_aead_reqsize(ctx->aead_fbk));
return 0;
}
@@ -646,6 +745,46 @@ static void starfive_aes_aead_exit_tfm(struct crypto_aead *tfm)
crypto_free_aead(ctx->aead_fbk);
}
+static bool starfive_aes_check_unaligned(struct starfive_cryp_dev *cryp,
+ struct scatterlist *src,
+ struct scatterlist *dst)
+{
+ struct scatterlist *tsg;
+ int i;
+
+ for_each_sg(src, tsg, sg_nents(src), i)
+ if (!IS_ALIGNED(tsg->offset, sizeof(u32)) ||
+ (!IS_ALIGNED(tsg->length, AES_BLOCK_SIZE) &&
+ !sg_is_last(tsg)))
+ return true;
+
+ if (src != dst)
+ for_each_sg(dst, tsg, sg_nents(dst), i)
+ if (!IS_ALIGNED(tsg->offset, sizeof(u32)) ||
+ (!IS_ALIGNED(tsg->length, AES_BLOCK_SIZE) &&
+ !sg_is_last(tsg)))
+ return true;
+
+ return false;
+}
+
+static int starfive_aes_do_fallback(struct skcipher_request *req, bool enc)
+{
+ struct starfive_cryp_ctx *ctx =
+ crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+
+ skcipher_request_set_tfm(subreq, ctx->skcipher_fbk);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+
+ return enc ? crypto_skcipher_encrypt(subreq) :
+ crypto_skcipher_decrypt(subreq);
+}
+
static int starfive_aes_crypt(struct skcipher_request *req, unsigned long flags)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
@@ -660,32 +799,54 @@ static int starfive_aes_crypt(struct skcipher_request *req, unsigned long flags)
if (req->cryptlen & blocksize_align)
return -EINVAL;
+ if (starfive_aes_check_unaligned(cryp, req->src, req->dst))
+ return starfive_aes_do_fallback(req, is_encrypt(cryp));
+
return crypto_transfer_skcipher_request_to_engine(cryp->engine, req);
}
+static int starfive_aes_aead_do_fallback(struct aead_request *req, bool enc)
+{
+ struct starfive_cryp_ctx *ctx =
+ crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct aead_request *subreq = aead_request_ctx(req);
+
+ aead_request_set_tfm(subreq, ctx->aead_fbk);
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete,
+ req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+
+ return enc ? crypto_aead_encrypt(subreq) :
+ crypto_aead_decrypt(subreq);
+}
+
static int starfive_aes_aead_crypt(struct aead_request *req, unsigned long flags)
{
struct starfive_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct scatterlist *src, *dst, _src[2], _dst[2];
cryp->flags = flags;
- /*
- * HW engine could not perform CCM tag verification on
- * non-blocksize aligned text, use fallback algo instead
+ /* aes-ccm does not support tag verification for non-aligned text,
+ * use fallback for ccm decryption instead.
*/
- if (ctx->aead_fbk && !is_encrypt(cryp)) {
- struct aead_request *subreq = aead_request_ctx(req);
+ if (((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CCM) &&
+ !is_encrypt(cryp))
+ return starfive_aes_aead_do_fallback(req, 0);
- aead_request_set_tfm(subreq, ctx->aead_fbk);
- aead_request_set_callback(subreq, req->base.flags,
- req->base.complete, req->base.data);
- aead_request_set_crypt(subreq, req->src,
- req->dst, req->cryptlen, req->iv);
- aead_request_set_ad(subreq, req->assoclen);
+ src = scatterwalk_ffwd(_src, req->src, req->assoclen);
- return crypto_aead_decrypt(subreq);
- }
+ if (req->src == req->dst)
+ dst = src;
+ else
+ dst = scatterwalk_ffwd(_dst, req->dst, req->assoclen);
+
+ if (starfive_aes_check_unaligned(cryp, src, dst))
+ return starfive_aes_aead_do_fallback(req, is_encrypt(cryp));
return crypto_transfer_aead_request_to_engine(cryp->engine, req);
}
@@ -706,7 +867,7 @@ static int starfive_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
- return 0;
+ return crypto_skcipher_setkey(ctx->skcipher_fbk, key, keylen);
}
static int starfive_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key,
@@ -725,16 +886,20 @@ static int starfive_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key,
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
- if (ctx->aead_fbk)
- return crypto_aead_setkey(ctx->aead_fbk, key, keylen);
-
- return 0;
+ return crypto_aead_setkey(ctx->aead_fbk, key, keylen);
}
static int starfive_aes_gcm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
- return crypto_gcm_check_authsize(authsize);
+ struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
+
+ ret = crypto_gcm_check_authsize(authsize);
+ if (ret)
+ return ret;
+
+ return crypto_aead_setauthsize(ctx->aead_fbk, authsize);
}
static int starfive_aes_ccm_setauthsize(struct crypto_aead *tfm,
@@ -820,9 +985,35 @@ static int starfive_aes_ccm_decrypt(struct aead_request *req)
return starfive_aes_aead_crypt(req, STARFIVE_AES_MODE_CCM);
}
+static int starfive_aes_ecb_init_tfm(struct crypto_skcipher *tfm)
+{
+ return starfive_aes_init_tfm(tfm, "ecb(aes-generic)");
+}
+
+static int starfive_aes_cbc_init_tfm(struct crypto_skcipher *tfm)
+{
+ return starfive_aes_init_tfm(tfm, "cbc(aes-generic)");
+}
+
+static int starfive_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
+{
+ return starfive_aes_init_tfm(tfm, "ctr(aes-generic)");
+}
+
+static int starfive_aes_ccm_init_tfm(struct crypto_aead *tfm)
+{
+ return starfive_aes_aead_init_tfm(tfm, "ccm_base(ctr(aes-generic),cbcmac(aes-generic))");
+}
+
+static int starfive_aes_gcm_init_tfm(struct crypto_aead *tfm)
+{
+ return starfive_aes_aead_init_tfm(tfm, "gcm_base(ctr(aes-generic),ghash-generic)");
+}
+
static struct skcipher_engine_alg skcipher_algs[] = {
{
- .base.init = starfive_aes_init_tfm,
+ .base.init = starfive_aes_ecb_init_tfm,
+ .base.exit = starfive_aes_exit_tfm,
.base.setkey = starfive_aes_setkey,
.base.encrypt = starfive_aes_ecb_encrypt,
.base.decrypt = starfive_aes_ecb_decrypt,
@@ -832,7 +1023,8 @@ static struct skcipher_engine_alg skcipher_algs[] = {
.cra_name = "ecb(aes)",
.cra_driver_name = "starfive-ecb-aes",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct starfive_cryp_ctx),
.cra_alignmask = 0xf,
@@ -842,7 +1034,8 @@ static struct skcipher_engine_alg skcipher_algs[] = {
.do_one_request = starfive_aes_do_one_req,
},
}, {
- .base.init = starfive_aes_init_tfm,
+ .base.init = starfive_aes_cbc_init_tfm,
+ .base.exit = starfive_aes_exit_tfm,
.base.setkey = starfive_aes_setkey,
.base.encrypt = starfive_aes_cbc_encrypt,
.base.decrypt = starfive_aes_cbc_decrypt,
@@ -853,7 +1046,8 @@ static struct skcipher_engine_alg skcipher_algs[] = {
.cra_name = "cbc(aes)",
.cra_driver_name = "starfive-cbc-aes",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct starfive_cryp_ctx),
.cra_alignmask = 0xf,
@@ -863,7 +1057,8 @@ static struct skcipher_engine_alg skcipher_algs[] = {
.do_one_request = starfive_aes_do_one_req,
},
}, {
- .base.init = starfive_aes_init_tfm,
+ .base.init = starfive_aes_ctr_init_tfm,
+ .base.exit = starfive_aes_exit_tfm,
.base.setkey = starfive_aes_setkey,
.base.encrypt = starfive_aes_ctr_encrypt,
.base.decrypt = starfive_aes_ctr_decrypt,
@@ -874,7 +1069,8 @@ static struct skcipher_engine_alg skcipher_algs[] = {
.cra_name = "ctr(aes)",
.cra_driver_name = "starfive-ctr-aes",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct starfive_cryp_ctx),
.cra_alignmask = 0xf,
@@ -892,7 +1088,7 @@ static struct aead_engine_alg aead_algs[] = {
.base.setauthsize = starfive_aes_gcm_setauthsize,
.base.encrypt = starfive_aes_gcm_encrypt,
.base.decrypt = starfive_aes_gcm_decrypt,
- .base.init = starfive_aes_aead_init_tfm,
+ .base.init = starfive_aes_gcm_init_tfm,
.base.exit = starfive_aes_aead_exit_tfm,
.base.ivsize = GCM_AES_IV_SIZE,
.base.maxauthsize = AES_BLOCK_SIZE,
@@ -900,7 +1096,8 @@ static struct aead_engine_alg aead_algs[] = {
.cra_name = "gcm(aes)",
.cra_driver_name = "starfive-gcm-aes",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct starfive_cryp_ctx),
.cra_alignmask = 0xf,
@@ -914,7 +1111,7 @@ static struct aead_engine_alg aead_algs[] = {
.base.setauthsize = starfive_aes_ccm_setauthsize,
.base.encrypt = starfive_aes_ccm_encrypt,
.base.decrypt = starfive_aes_ccm_decrypt,
- .base.init = starfive_aes_aead_init_tfm,
+ .base.init = starfive_aes_ccm_init_tfm,
.base.exit = starfive_aes_aead_exit_tfm,
.base.ivsize = AES_BLOCK_SIZE,
.base.maxauthsize = AES_BLOCK_SIZE,
diff --git a/drivers/crypto/starfive/jh7110-cryp.c b/drivers/crypto/starfive/jh7110-cryp.c
index 425fddf3a8ab..e4dfed7ee0b0 100644
--- a/drivers/crypto/starfive/jh7110-cryp.c
+++ b/drivers/crypto/starfive/jh7110-cryp.c
@@ -89,34 +89,10 @@ static void starfive_dma_cleanup(struct starfive_cryp_dev *cryp)
dma_release_channel(cryp->rx);
}
-static irqreturn_t starfive_cryp_irq(int irq, void *priv)
-{
- u32 status;
- u32 mask;
- struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)priv;
-
- mask = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
- status = readl(cryp->base + STARFIVE_IE_FLAG_OFFSET);
- if (status & STARFIVE_IE_FLAG_AES_DONE) {
- mask |= STARFIVE_IE_MASK_AES_DONE;
- writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET);
- tasklet_schedule(&cryp->aes_done);
- }
-
- if (status & STARFIVE_IE_FLAG_HASH_DONE) {
- mask |= STARFIVE_IE_MASK_HASH_DONE;
- writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET);
- tasklet_schedule(&cryp->hash_done);
- }
-
- return IRQ_HANDLED;
-}
-
static int starfive_cryp_probe(struct platform_device *pdev)
{
struct starfive_cryp_dev *cryp;
struct resource *res;
- int irq;
int ret;
cryp = devm_kzalloc(&pdev->dev, sizeof(*cryp), GFP_KERNEL);
@@ -131,9 +107,6 @@ static int starfive_cryp_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(cryp->base),
"Error remapping memory for platform device\n");
- tasklet_init(&cryp->aes_done, starfive_aes_done_task, (unsigned long)cryp);
- tasklet_init(&cryp->hash_done, starfive_hash_done_task, (unsigned long)cryp);
-
cryp->phys_base = res->start;
cryp->dma_maxburst = 32;
cryp->side_chan = side_chan;
@@ -153,16 +126,6 @@ static int starfive_cryp_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(cryp->rst),
"Error getting hardware reset line\n");
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- ret = devm_request_irq(&pdev->dev, irq, starfive_cryp_irq, 0, pdev->name,
- (void *)cryp);
- if (ret)
- return dev_err_probe(&pdev->dev, ret,
- "Failed to register interrupt handler\n");
-
clk_prepare_enable(cryp->hclk);
clk_prepare_enable(cryp->ahb);
reset_control_deassert(cryp->rst);
@@ -219,9 +182,6 @@ err_dma_init:
clk_disable_unprepare(cryp->ahb);
reset_control_assert(cryp->rst);
- tasklet_kill(&cryp->aes_done);
- tasklet_kill(&cryp->hash_done);
-
return ret;
}
@@ -233,9 +193,6 @@ static void starfive_cryp_remove(struct platform_device *pdev)
starfive_hash_unregister_algs();
starfive_rsa_unregister_algs();
- tasklet_kill(&cryp->aes_done);
- tasklet_kill(&cryp->hash_done);
-
crypto_engine_stop(cryp->engine);
crypto_engine_exit(cryp->engine);
diff --git a/drivers/crypto/starfive/jh7110-cryp.h b/drivers/crypto/starfive/jh7110-cryp.h
index 6cdf6db5d904..494a74f52706 100644
--- a/drivers/crypto/starfive/jh7110-cryp.h
+++ b/drivers/crypto/starfive/jh7110-cryp.h
@@ -91,6 +91,7 @@ union starfive_hash_csr {
#define STARFIVE_HASH_KEY_DONE BIT(13)
u32 key_done :1;
u32 key_flag :1;
+#define STARFIVE_HASH_HMAC_DONE BIT(15)
u32 hmac_done :1;
#define STARFIVE_HASH_BUSY BIT(16)
u32 busy :1;
@@ -168,6 +169,7 @@ struct starfive_cryp_ctx {
struct crypto_akcipher *akcipher_fbk;
struct crypto_ahash *ahash_fbk;
struct crypto_aead *aead_fbk;
+ struct crypto_skcipher *skcipher_fbk;
};
struct starfive_cryp_dev {
@@ -185,11 +187,8 @@ struct starfive_cryp_dev {
struct dma_chan *rx;
struct dma_slave_config cfg_in;
struct dma_slave_config cfg_out;
- struct scatter_walk in_walk;
- struct scatter_walk out_walk;
struct crypto_engine *engine;
- struct tasklet_struct aes_done;
- struct tasklet_struct hash_done;
+ struct completion dma_done;
size_t assoclen;
size_t total_in;
size_t total_out;
@@ -236,7 +235,4 @@ void starfive_rsa_unregister_algs(void);
int starfive_aes_register_algs(void);
void starfive_aes_unregister_algs(void);
-
-void starfive_hash_done_task(unsigned long param);
-void starfive_aes_done_task(unsigned long param);
#endif
diff --git a/drivers/crypto/starfive/jh7110-hash.c b/drivers/crypto/starfive/jh7110-hash.c
index b6d1808012ca..2c60a1047bc3 100644
--- a/drivers/crypto/starfive/jh7110-hash.c
+++ b/drivers/crypto/starfive/jh7110-hash.c
@@ -36,15 +36,22 @@
#define STARFIVE_HASH_BUFLEN SHA512_BLOCK_SIZE
#define STARFIVE_HASH_RESET 0x2
-static inline int starfive_hash_wait_busy(struct starfive_cryp_ctx *ctx)
+static inline int starfive_hash_wait_busy(struct starfive_cryp_dev *cryp)
{
- struct starfive_cryp_dev *cryp = ctx->cryp;
u32 status;
return readl_relaxed_poll_timeout(cryp->base + STARFIVE_HASH_SHACSR, status,
!(status & STARFIVE_HASH_BUSY), 10, 100000);
}
+static inline int starfive_hash_wait_hmac_done(struct starfive_cryp_dev *cryp)
+{
+ u32 status;
+
+ return readl_relaxed_poll_timeout(cryp->base + STARFIVE_HASH_SHACSR, status,
+ (status & STARFIVE_HASH_HMAC_DONE), 10, 100000);
+}
+
static inline int starfive_hash_wait_key_done(struct starfive_cryp_ctx *ctx)
{
struct starfive_cryp_dev *cryp = ctx->cryp;
@@ -84,64 +91,26 @@ static int starfive_hash_hmac_key(struct starfive_cryp_ctx *ctx)
return 0;
}
-static void starfive_hash_start(void *param)
+static void starfive_hash_start(struct starfive_cryp_dev *cryp)
{
- struct starfive_cryp_ctx *ctx = param;
- struct starfive_cryp_request_ctx *rctx = ctx->rctx;
- struct starfive_cryp_dev *cryp = ctx->cryp;
- union starfive_alg_cr alg_cr;
union starfive_hash_csr csr;
- u32 stat;
-
- dma_unmap_sg(cryp->dev, rctx->in_sg, rctx->in_sg_len, DMA_TO_DEVICE);
-
- alg_cr.v = 0;
- alg_cr.clear = 1;
-
- writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
csr.v = readl(cryp->base + STARFIVE_HASH_SHACSR);
csr.firstb = 0;
csr.final = 1;
-
- stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
- stat &= ~STARFIVE_IE_MASK_HASH_DONE;
- writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
writel(csr.v, cryp->base + STARFIVE_HASH_SHACSR);
}
-static int starfive_hash_xmit_dma(struct starfive_cryp_ctx *ctx)
+static void starfive_hash_dma_callback(void *param)
{
- struct starfive_cryp_request_ctx *rctx = ctx->rctx;
- struct starfive_cryp_dev *cryp = ctx->cryp;
- struct dma_async_tx_descriptor *in_desc;
- union starfive_alg_cr alg_cr;
- int total_len;
- int ret;
-
- if (!rctx->total) {
- starfive_hash_start(ctx);
- return 0;
- }
+ struct starfive_cryp_dev *cryp = param;
- writel(rctx->total, cryp->base + STARFIVE_DMA_IN_LEN_OFFSET);
-
- total_len = rctx->total;
- total_len = (total_len & 0x3) ? (((total_len >> 2) + 1) << 2) : total_len;
- sg_dma_len(rctx->in_sg) = total_len;
-
- alg_cr.v = 0;
- alg_cr.start = 1;
- alg_cr.hash_dma_en = 1;
-
- writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
-
- ret = dma_map_sg(cryp->dev, rctx->in_sg, rctx->in_sg_len, DMA_TO_DEVICE);
- if (!ret)
- return dev_err_probe(cryp->dev, -EINVAL, "dma_map_sg() error\n");
+ complete(&cryp->dma_done);
+}
- cryp->cfg_in.direction = DMA_MEM_TO_DEV;
- cryp->cfg_in.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+static void starfive_hash_dma_init(struct starfive_cryp_dev *cryp)
+{
+ cryp->cfg_in.src_addr_width = DMA_SLAVE_BUSWIDTH_16_BYTES;
cryp->cfg_in.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
cryp->cfg_in.src_maxburst = cryp->dma_maxburst;
cryp->cfg_in.dst_maxburst = cryp->dma_maxburst;
@@ -149,50 +118,48 @@ static int starfive_hash_xmit_dma(struct starfive_cryp_ctx *ctx)
dmaengine_slave_config(cryp->tx, &cryp->cfg_in);
- in_desc = dmaengine_prep_slave_sg(cryp->tx, rctx->in_sg,
- ret, DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-
- if (!in_desc)
- return -EINVAL;
-
- in_desc->callback = starfive_hash_start;
- in_desc->callback_param = ctx;
-
- dmaengine_submit(in_desc);
- dma_async_issue_pending(cryp->tx);
-
- return 0;
+ init_completion(&cryp->dma_done);
}
-static int starfive_hash_xmit(struct starfive_cryp_ctx *ctx)
+static int starfive_hash_dma_xfer(struct starfive_cryp_dev *cryp,
+ struct scatterlist *sg)
{
- struct starfive_cryp_request_ctx *rctx = ctx->rctx;
- struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct dma_async_tx_descriptor *in_desc;
+ union starfive_alg_cr alg_cr;
int ret = 0;
- rctx->csr.hash.v = 0;
- rctx->csr.hash.reset = 1;
- writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR);
-
- if (starfive_hash_wait_busy(ctx))
- return dev_err_probe(cryp->dev, -ETIMEDOUT, "Error resetting engine.\n");
+ alg_cr.v = 0;
+ alg_cr.start = 1;
+ alg_cr.hash_dma_en = 1;
+ writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
- rctx->csr.hash.v = 0;
- rctx->csr.hash.mode = ctx->hash_mode;
- rctx->csr.hash.ie = 1;
+ writel(sg_dma_len(sg), cryp->base + STARFIVE_DMA_IN_LEN_OFFSET);
+ sg_dma_len(sg) = ALIGN(sg_dma_len(sg), sizeof(u32));
- if (ctx->is_hmac) {
- ret = starfive_hash_hmac_key(ctx);
- if (ret)
- return ret;
- } else {
- rctx->csr.hash.start = 1;
- rctx->csr.hash.firstb = 1;
- writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR);
+ in_desc = dmaengine_prep_slave_sg(cryp->tx, sg, 1, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!in_desc) {
+ ret = -EINVAL;
+ goto end;
}
- return starfive_hash_xmit_dma(ctx);
+ reinit_completion(&cryp->dma_done);
+ in_desc->callback = starfive_hash_dma_callback;
+ in_desc->callback_param = cryp;
+
+ dmaengine_submit(in_desc);
+ dma_async_issue_pending(cryp->tx);
+
+ if (!wait_for_completion_timeout(&cryp->dma_done,
+ msecs_to_jiffies(1000)))
+ ret = -ETIMEDOUT;
+
+end:
+ alg_cr.v = 0;
+ alg_cr.clear = 1;
+ writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
+
+ return ret;
}
static int starfive_hash_copy_hash(struct ahash_request *req)
@@ -215,58 +182,74 @@ static int starfive_hash_copy_hash(struct ahash_request *req)
return 0;
}
-void starfive_hash_done_task(unsigned long param)
+static void starfive_hash_done_task(struct starfive_cryp_dev *cryp)
{
- struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)param;
int err = cryp->err;
if (!err)
err = starfive_hash_copy_hash(cryp->req.hreq);
- /* Reset to clear hash_done in irq register*/
- writel(STARFIVE_HASH_RESET, cryp->base + STARFIVE_HASH_SHACSR);
-
crypto_finalize_hash_request(cryp->engine, cryp->req.hreq, err);
}
-static int starfive_hash_check_aligned(struct scatterlist *sg, size_t total, size_t align)
+static int starfive_hash_one_request(struct crypto_engine *engine, void *areq)
{
- int len = 0;
+ struct ahash_request *req = container_of(areq, struct ahash_request,
+ base);
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct scatterlist *tsg;
+ int ret, src_nents, i;
- if (!total)
- return 0;
+ writel(STARFIVE_HASH_RESET, cryp->base + STARFIVE_HASH_SHACSR);
- if (!IS_ALIGNED(total, align))
- return -EINVAL;
+ if (starfive_hash_wait_busy(cryp))
+ return dev_err_probe(cryp->dev, -ETIMEDOUT, "Error resetting hardware\n");
- while (sg) {
- if (!IS_ALIGNED(sg->offset, sizeof(u32)))
- return -EINVAL;
+ rctx->csr.hash.v = 0;
+ rctx->csr.hash.mode = ctx->hash_mode;
- if (!IS_ALIGNED(sg->length, align))
- return -EINVAL;
+ if (ctx->is_hmac) {
+ ret = starfive_hash_hmac_key(ctx);
+ if (ret)
+ return ret;
+ } else {
+ rctx->csr.hash.start = 1;
+ rctx->csr.hash.firstb = 1;
+ writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR);
+ }
+
+ /* No input message, get digest and end. */
+ if (!rctx->total)
+ goto hash_start;
+
+ starfive_hash_dma_init(cryp);
+
+ for_each_sg(rctx->in_sg, tsg, rctx->in_sg_len, i) {
+ src_nents = dma_map_sg(cryp->dev, tsg, 1, DMA_TO_DEVICE);
+ if (src_nents == 0)
+ return dev_err_probe(cryp->dev, -ENOMEM,
+ "dma_map_sg error\n");
- len += sg->length;
- sg = sg_next(sg);
+ ret = starfive_hash_dma_xfer(cryp, tsg);
+ dma_unmap_sg(cryp->dev, tsg, 1, DMA_TO_DEVICE);
+ if (ret)
+ return ret;
}
- if (len != total)
- return -EINVAL;
+hash_start:
+ starfive_hash_start(cryp);
- return 0;
-}
+ if (starfive_hash_wait_busy(cryp))
+ return dev_err_probe(cryp->dev, -ETIMEDOUT, "Error generating digest\n");
-static int starfive_hash_one_request(struct crypto_engine *engine, void *areq)
-{
- struct ahash_request *req = container_of(areq, struct ahash_request,
- base);
- struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct starfive_cryp_dev *cryp = ctx->cryp;
+ if (ctx->is_hmac)
+ cryp->err = starfive_hash_wait_hmac_done(cryp);
- if (!cryp)
- return -ENODEV;
+ starfive_hash_done_task(cryp);
- return starfive_hash_xmit(ctx);
+ return 0;
}
static int starfive_hash_init(struct ahash_request *req)
@@ -337,22 +320,6 @@ static int starfive_hash_finup(struct ahash_request *req)
return crypto_ahash_finup(&rctx->ahash_fbk_req);
}
-static int starfive_hash_digest_fb(struct ahash_request *req)
-{
- struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk);
- ahash_request_set_callback(&rctx->ahash_fbk_req, req->base.flags,
- req->base.complete, req->base.data);
-
- ahash_request_set_crypt(&rctx->ahash_fbk_req, req->src,
- req->result, req->nbytes);
-
- return crypto_ahash_digest(&rctx->ahash_fbk_req);
-}
-
static int starfive_hash_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -370,9 +337,6 @@ static int starfive_hash_digest(struct ahash_request *req)
rctx->in_sg_len = sg_nents_for_len(rctx->in_sg, rctx->total);
ctx->rctx = rctx;
- if (starfive_hash_check_aligned(rctx->in_sg, rctx->total, rctx->blksize))
- return starfive_hash_digest_fb(req);
-
return crypto_transfer_hash_request_to_engine(cryp->engine, req);
}
@@ -406,7 +370,8 @@ static int starfive_hash_import(struct ahash_request *req, const void *in)
static int starfive_hash_init_tfm(struct crypto_ahash *hash,
const char *alg_name,
- unsigned int mode)
+ unsigned int mode,
+ bool is_hmac)
{
struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
@@ -426,7 +391,7 @@ static int starfive_hash_init_tfm(struct crypto_ahash *hash,
crypto_ahash_set_reqsize(hash, sizeof(struct starfive_cryp_request_ctx) +
crypto_ahash_reqsize(ctx->ahash_fbk));
- ctx->keylen = 0;
+ ctx->is_hmac = is_hmac;
ctx->hash_mode = mode;
return 0;
@@ -529,81 +494,61 @@ static int starfive_hash_setkey(struct crypto_ahash *hash,
static int starfive_sha224_init_tfm(struct crypto_ahash *hash)
{
return starfive_hash_init_tfm(hash, "sha224-generic",
- STARFIVE_HASH_SHA224);
+ STARFIVE_HASH_SHA224, 0);
}
static int starfive_sha256_init_tfm(struct crypto_ahash *hash)
{
return starfive_hash_init_tfm(hash, "sha256-generic",
- STARFIVE_HASH_SHA256);
+ STARFIVE_HASH_SHA256, 0);
}
static int starfive_sha384_init_tfm(struct crypto_ahash *hash)
{
return starfive_hash_init_tfm(hash, "sha384-generic",
- STARFIVE_HASH_SHA384);
+ STARFIVE_HASH_SHA384, 0);
}
static int starfive_sha512_init_tfm(struct crypto_ahash *hash)
{
return starfive_hash_init_tfm(hash, "sha512-generic",
- STARFIVE_HASH_SHA512);
+ STARFIVE_HASH_SHA512, 0);
}
static int starfive_sm3_init_tfm(struct crypto_ahash *hash)
{
return starfive_hash_init_tfm(hash, "sm3-generic",
- STARFIVE_HASH_SM3);
+ STARFIVE_HASH_SM3, 0);
}
static int starfive_hmac_sha224_init_tfm(struct crypto_ahash *hash)
{
- struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
-
- ctx->is_hmac = true;
-
return starfive_hash_init_tfm(hash, "hmac(sha224-generic)",
- STARFIVE_HASH_SHA224);
+ STARFIVE_HASH_SHA224, 1);
}
static int starfive_hmac_sha256_init_tfm(struct crypto_ahash *hash)
{
- struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
-
- ctx->is_hmac = true;
-
return starfive_hash_init_tfm(hash, "hmac(sha256-generic)",
- STARFIVE_HASH_SHA256);
+ STARFIVE_HASH_SHA256, 1);
}
static int starfive_hmac_sha384_init_tfm(struct crypto_ahash *hash)
{
- struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
-
- ctx->is_hmac = true;
-
return starfive_hash_init_tfm(hash, "hmac(sha384-generic)",
- STARFIVE_HASH_SHA384);
+ STARFIVE_HASH_SHA384, 1);
}
static int starfive_hmac_sha512_init_tfm(struct crypto_ahash *hash)
{
- struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
-
- ctx->is_hmac = true;
-
return starfive_hash_init_tfm(hash, "hmac(sha512-generic)",
- STARFIVE_HASH_SHA512);
+ STARFIVE_HASH_SHA512, 1);
}
static int starfive_hmac_sm3_init_tfm(struct crypto_ahash *hash)
{
- struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
-
- ctx->is_hmac = true;
-
return starfive_hash_init_tfm(hash, "hmac(sm3-generic)",
- STARFIVE_HASH_SM3);
+ STARFIVE_HASH_SM3, 1);
}
static struct ahash_engine_alg algs_sha2_sm3[] = {
diff --git a/drivers/crypto/starfive/jh7110-rsa.c b/drivers/crypto/starfive/jh7110-rsa.c
index cf8bda7f0855..33093ba4b13a 100644
--- a/drivers/crypto/starfive/jh7110-rsa.c
+++ b/drivers/crypto/starfive/jh7110-rsa.c
@@ -45,6 +45,9 @@ static inline int starfive_pka_wait_done(struct starfive_cryp_ctx *ctx)
static void starfive_rsa_free_key(struct starfive_rsa_key *key)
{
+ if (!key->key_sz)
+ return;
+
kfree_sensitive(key->d);
kfree_sensitive(key->e);
kfree_sensitive(key->n);
@@ -273,7 +276,6 @@ static int starfive_rsa_enc_core(struct starfive_cryp_ctx *ctx, int enc)
err_rsa_crypt:
writel(STARFIVE_RSA_RESET, cryp->base + STARFIVE_PKA_CACR_OFFSET);
- kfree(rctx->rsa_data);
return ret;
}
@@ -534,16 +536,14 @@ static int starfive_rsa_init_tfm(struct crypto_akcipher *tfm)
{
struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ ctx->cryp = starfive_cryp_find_dev(ctx);
+ if (!ctx->cryp)
+ return -ENODEV;
+
ctx->akcipher_fbk = crypto_alloc_akcipher("rsa-generic", 0, 0);
if (IS_ERR(ctx->akcipher_fbk))
return PTR_ERR(ctx->akcipher_fbk);
- ctx->cryp = starfive_cryp_find_dev(ctx);
- if (!ctx->cryp) {
- crypto_free_akcipher(ctx->akcipher_fbk);
- return -ENODEV;
- }
-
akcipher_set_reqsize(tfm, sizeof(struct starfive_cryp_request_ctx) +
sizeof(struct crypto_akcipher) + 32);
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 34e0d7e381a8..351827372ea6 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -94,6 +94,7 @@
#define HASH_FLAGS_ERRORS BIT(21)
#define HASH_FLAGS_EMPTY BIT(22)
#define HASH_FLAGS_HMAC BIT(23)
+#define HASH_FLAGS_SGS_COPIED BIT(24)
#define HASH_OP_UPDATE 1
#define HASH_OP_FINAL 2
@@ -145,7 +146,7 @@ struct stm32_hash_state {
u16 bufcnt;
u16 blocklen;
- u8 buffer[HASH_BUFLEN] __aligned(4);
+ u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
/* hash state */
u32 hw_context[3 + HASH_CSR_NB_MAX];
@@ -158,8 +159,8 @@ struct stm32_hash_request_ctx {
u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
size_t digcnt;
- /* DMA */
struct scatterlist *sg;
+ struct scatterlist sgl[2]; /* scatterlist used to realize alignment */
unsigned int offset;
unsigned int total;
struct scatterlist sg_key;
@@ -184,6 +185,7 @@ struct stm32_hash_pdata {
size_t algs_info_size;
bool has_sr;
bool has_mdmat;
+ bool context_secured;
bool broken_emptymsg;
bool ux500;
};
@@ -195,6 +197,7 @@ struct stm32_hash_dev {
struct reset_control *rst;
void __iomem *io_base;
phys_addr_t phys_base;
+ u8 xmit_buf[HASH_BUFLEN] __aligned(sizeof(u32));
u32 dma_mode;
bool polled;
@@ -220,6 +223,8 @@ static struct stm32_hash_drv stm32_hash = {
};
static void stm32_hash_dma_callback(void *param);
+static int stm32_hash_prepare_request(struct ahash_request *req);
+static void stm32_hash_unprepare_request(struct ahash_request *req);
static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
{
@@ -232,6 +237,11 @@ static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
writel_relaxed(value, hdev->io_base + offset);
}
+/**
+ * stm32_hash_wait_busy - wait until hash processor is available. It return an
+ * error if the hash core is processing a block of data for more than 10 ms.
+ * @hdev: the stm32_hash_dev device.
+ */
static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
{
u32 status;
@@ -245,6 +255,11 @@ static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
!(status & HASH_SR_BUSY), 10, 10000);
}
+/**
+ * stm32_hash_set_nblw - set the number of valid bytes in the last word.
+ * @hdev: the stm32_hash_dev device.
+ * @length: the length of the final word.
+ */
static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
{
u32 reg;
@@ -282,6 +297,11 @@ static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
return 0;
}
+/**
+ * stm32_hash_write_ctrl - Initialize the hash processor, only if
+ * HASH_FLAGS_INIT is set.
+ * @hdev: the stm32_hash_dev device
+ */
static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
@@ -469,9 +489,7 @@ static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
struct stm32_hash_state *state = &rctx->state;
- u32 *preg = state->hw_context;
int bufcnt, err = 0, final;
- int i, swap_reg;
dev_dbg(hdev->dev, "%s flags %x\n", __func__, state->flags);
@@ -495,34 +513,23 @@ static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
return stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 1);
}
- if (!(hdev->flags & HASH_FLAGS_INIT))
- return 0;
-
- if (stm32_hash_wait_busy(hdev))
- return -ETIMEDOUT;
-
- swap_reg = hash_swap_reg(rctx);
-
- if (!hdev->pdata->ux500)
- *preg++ = stm32_hash_read(hdev, HASH_IMR);
- *preg++ = stm32_hash_read(hdev, HASH_STR);
- *preg++ = stm32_hash_read(hdev, HASH_CR);
- for (i = 0; i < swap_reg; i++)
- *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
-
- state->flags |= HASH_FLAGS_INIT;
-
return err;
}
static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
- struct scatterlist *sg, int length, int mdma)
+ struct scatterlist *sg, int length, int mdmat)
{
struct dma_async_tx_descriptor *in_desc;
dma_cookie_t cookie;
u32 reg;
int err;
+ dev_dbg(hdev->dev, "%s mdmat: %x length: %d\n", __func__, mdmat, length);
+
+ /* do not use dma if there is no data to send */
+ if (length <= 0)
+ return 0;
+
in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
DMA_CTRL_ACK);
@@ -535,13 +542,12 @@ static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
in_desc->callback = stm32_hash_dma_callback;
in_desc->callback_param = hdev;
- hdev->flags |= HASH_FLAGS_FINAL;
hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
reg = stm32_hash_read(hdev, HASH_CR);
if (hdev->pdata->has_mdmat) {
- if (mdma)
+ if (mdmat)
reg |= HASH_CR_MDMAT;
else
reg &= ~HASH_CR_MDMAT;
@@ -550,7 +556,6 @@ static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
stm32_hash_write(hdev, HASH_CR, reg);
- stm32_hash_set_nblw(hdev, length);
cookie = dmaengine_submit(in_desc);
err = dma_submit_error(cookie);
@@ -590,7 +595,7 @@ static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
int err;
- if (ctx->keylen < rctx->state.blocklen || hdev->dma_mode == 1) {
+ if (ctx->keylen < rctx->state.blocklen || hdev->dma_mode > 0) {
err = stm32_hash_write_key(hdev);
if (stm32_hash_wait_busy(hdev))
return -ETIMEDOUT;
@@ -655,18 +660,20 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
struct scatterlist sg[1], *tsg;
int err = 0, reg, ncp = 0;
unsigned int i, len = 0, bufcnt = 0;
+ bool final = hdev->flags & HASH_FLAGS_FINAL;
bool is_last = false;
+ u32 last_word;
- rctx->sg = hdev->req->src;
- rctx->total = hdev->req->nbytes;
+ dev_dbg(hdev->dev, "%s total: %d bufcnt: %d final: %d\n",
+ __func__, rctx->total, rctx->state.bufcnt, final);
- rctx->nents = sg_nents(rctx->sg);
if (rctx->nents < 0)
return -EINVAL;
stm32_hash_write_ctrl(hdev);
- if (hdev->flags & HASH_FLAGS_HMAC) {
+ if (hdev->flags & HASH_FLAGS_HMAC && (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
+ hdev->flags |= HASH_FLAGS_HMAC_KEY;
err = stm32_hash_hmac_dma_send(hdev);
if (err != -EINPROGRESS)
return err;
@@ -677,22 +684,36 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
len = sg->length;
if (sg_is_last(sg) || (bufcnt + sg[0].length) >= rctx->total) {
- sg->length = rctx->total - bufcnt;
- is_last = true;
- if (hdev->dma_mode == 1) {
- len = (ALIGN(sg->length, 16) - 16);
-
- ncp = sg_pcopy_to_buffer(
- rctx->sg, rctx->nents,
- rctx->state.buffer, sg->length - len,
- rctx->total - sg->length + len);
-
- sg->length = len;
+ if (!final) {
+ /* Always manually put the last word of a non-final transfer. */
+ len -= sizeof(u32);
+ sg_pcopy_to_buffer(rctx->sg, rctx->nents, &last_word, 4, len);
+ sg->length -= sizeof(u32);
} else {
- if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
- len = sg->length;
- sg->length = ALIGN(sg->length,
- sizeof(u32));
+ /*
+ * In Multiple DMA mode, DMA must be aborted before the final
+ * transfer.
+ */
+ sg->length = rctx->total - bufcnt;
+ if (hdev->dma_mode > 0) {
+ len = (ALIGN(sg->length, 16) - 16);
+
+ ncp = sg_pcopy_to_buffer(rctx->sg, rctx->nents,
+ rctx->state.buffer,
+ sg->length - len,
+ rctx->total - sg->length + len);
+
+ if (!len)
+ break;
+
+ sg->length = len;
+ } else {
+ is_last = true;
+ if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
+ len = sg->length;
+ sg->length = ALIGN(sg->length,
+ sizeof(u32));
+ }
}
}
}
@@ -706,43 +727,67 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
err = stm32_hash_xmit_dma(hdev, sg, len, !is_last);
+ /* The last word of a non final transfer is sent manually. */
+ if (!final) {
+ stm32_hash_write(hdev, HASH_DIN, last_word);
+ len += sizeof(u32);
+ }
+
+ rctx->total -= len;
+
bufcnt += sg[0].length;
dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
- if (err == -ENOMEM)
+ if (err == -ENOMEM || err == -ETIMEDOUT)
return err;
if (is_last)
break;
}
- if (hdev->dma_mode == 1) {
- if (stm32_hash_wait_busy(hdev))
- return -ETIMEDOUT;
- reg = stm32_hash_read(hdev, HASH_CR);
- reg &= ~HASH_CR_DMAE;
- reg |= HASH_CR_DMAA;
- stm32_hash_write(hdev, HASH_CR, reg);
+ /*
+ * When the second last block transfer of 4 words is performed by the DMA,
+ * the software must set the DMA Abort bit (DMAA) to 1 before completing the
+ * last transfer of 4 words or less.
+ */
+ if (final) {
+ if (hdev->dma_mode > 0) {
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+ reg = stm32_hash_read(hdev, HASH_CR);
+ reg &= ~HASH_CR_DMAE;
+ reg |= HASH_CR_DMAA;
+ stm32_hash_write(hdev, HASH_CR, reg);
+
+ if (ncp) {
+ memset(buffer + ncp, 0, 4 - DIV_ROUND_UP(ncp, sizeof(u32)));
+ writesl(hdev->io_base + HASH_DIN, buffer,
+ DIV_ROUND_UP(ncp, sizeof(u32)));
+ }
- if (ncp) {
- memset(buffer + ncp, 0,
- DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
- writesl(hdev->io_base + HASH_DIN, buffer,
- DIV_ROUND_UP(ncp, sizeof(u32)));
+ stm32_hash_set_nblw(hdev, ncp);
+ reg = stm32_hash_read(hdev, HASH_STR);
+ reg |= HASH_STR_DCAL;
+ stm32_hash_write(hdev, HASH_STR, reg);
+ err = -EINPROGRESS;
}
- stm32_hash_set_nblw(hdev, ncp);
- reg = stm32_hash_read(hdev, HASH_STR);
- reg |= HASH_STR_DCAL;
- stm32_hash_write(hdev, HASH_STR, reg);
- err = -EINPROGRESS;
- }
- if (hdev->flags & HASH_FLAGS_HMAC) {
- if (stm32_hash_wait_busy(hdev))
- return -ETIMEDOUT;
- err = stm32_hash_hmac_dma_send(hdev);
+ /*
+ * The hash processor needs the key to be loaded a second time in order
+ * to process the HMAC.
+ */
+ if (hdev->flags & HASH_FLAGS_HMAC) {
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+ err = stm32_hash_hmac_dma_send(hdev);
+ }
+
+ return err;
}
- return err;
+ if (err != -EINPROGRESS)
+ return err;
+
+ return 0;
}
static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
@@ -765,33 +810,6 @@ static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
return hdev;
}
-static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
-{
- struct scatterlist *sg;
- struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
- int i;
-
- if (!hdev->dma_lch || req->nbytes <= rctx->state.blocklen)
- return false;
-
- if (sg_nents(req->src) > 1) {
- if (hdev->dma_mode == 1)
- return false;
- for_each_sg(req->src, sg, sg_nents(req->src), i) {
- if ((!IS_ALIGNED(sg->length, sizeof(u32))) &&
- (!sg_is_last(sg)))
- return false;
- }
- }
-
- if (req->src->offset % 4)
- return false;
-
- return true;
-}
-
static int stm32_hash_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -802,8 +820,10 @@ static int stm32_hash_init(struct ahash_request *req)
bool sha3_mode = ctx->flags & HASH_FLAGS_SHA3_MODE;
rctx->hdev = hdev;
+ state->flags = 0;
- state->flags = HASH_FLAGS_CPU;
+ if (!(hdev->dma_lch && hdev->pdata->has_mdmat))
+ state->flags |= HASH_FLAGS_CPU;
if (sha3_mode)
state->flags |= HASH_FLAGS_SHA3_MODE;
@@ -857,6 +877,7 @@ static int stm32_hash_init(struct ahash_request *req)
dev_err(hdev->dev, "Error, block too large");
return -EINVAL;
}
+ rctx->nents = 0;
rctx->total = 0;
rctx->offset = 0;
rctx->data_type = HASH_DATA_8_BITS;
@@ -874,6 +895,9 @@ static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
struct stm32_hash_state *state = &rctx->state;
+ dev_dbg(hdev->dev, "update_req: total: %u, digcnt: %zd, final: 0",
+ rctx->total, rctx->digcnt);
+
if (!(state->flags & HASH_FLAGS_CPU))
return stm32_hash_dma_send(hdev);
@@ -887,6 +911,11 @@ static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
struct stm32_hash_state *state = &rctx->state;
int buflen = state->bufcnt;
+ if (!(state->flags & HASH_FLAGS_CPU)) {
+ hdev->flags |= HASH_FLAGS_FINAL;
+ return stm32_hash_dma_send(hdev);
+ }
+
if (state->flags & HASH_FLAGS_FINUP)
return stm32_hash_update_req(hdev);
@@ -968,15 +997,21 @@ static int stm32_hash_finish(struct ahash_request *req)
static void stm32_hash_finish_req(struct ahash_request *req, int err)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_state *state = &rctx->state;
struct stm32_hash_dev *hdev = rctx->hdev;
+ if (hdev->flags & HASH_FLAGS_DMA_ACTIVE)
+ state->flags |= HASH_FLAGS_DMA_ACTIVE;
+ else
+ state->flags &= ~HASH_FLAGS_DMA_ACTIVE;
+
if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
stm32_hash_copy_hash(req);
err = stm32_hash_finish(req);
}
- pm_runtime_mark_last_busy(hdev->dev);
- pm_runtime_put_autosuspend(hdev->dev);
+ /* Finalized request mist be unprepared here */
+ stm32_hash_unprepare_request(req);
crypto_finalize_hash_request(hdev->engine, req, err);
}
@@ -1006,6 +1041,10 @@ static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
pm_runtime_get_sync(hdev->dev);
+ err = stm32_hash_prepare_request(req);
+ if (err)
+ return err;
+
hdev->req = req;
hdev->flags = 0;
swap_reg = hash_swap_reg(rctx);
@@ -1030,6 +1069,12 @@ static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
if (state->flags & HASH_FLAGS_HMAC)
hdev->flags |= HASH_FLAGS_HMAC |
HASH_FLAGS_HMAC_KEY;
+
+ if (state->flags & HASH_FLAGS_CPU)
+ hdev->flags |= HASH_FLAGS_CPU;
+
+ if (state->flags & HASH_FLAGS_DMA_ACTIVE)
+ hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
}
if (rctx->op == HASH_OP_UPDATE)
@@ -1054,6 +1099,284 @@ static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
return 0;
}
+static int stm32_hash_copy_sgs(struct stm32_hash_request_ctx *rctx,
+ struct scatterlist *sg, int bs,
+ unsigned int new_len)
+{
+ struct stm32_hash_state *state = &rctx->state;
+ int pages;
+ void *buf;
+
+ pages = get_order(new_len);
+
+ buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
+ if (!buf) {
+ pr_err("Couldn't allocate pages for unaligned cases.\n");
+ return -ENOMEM;
+ }
+
+ if (state->bufcnt)
+ memcpy(buf, rctx->hdev->xmit_buf, state->bufcnt);
+
+ scatterwalk_map_and_copy(buf + state->bufcnt, sg, rctx->offset,
+ min(new_len, rctx->total) - state->bufcnt, 0);
+ sg_init_table(rctx->sgl, 1);
+ sg_set_buf(rctx->sgl, buf, new_len);
+ rctx->sg = rctx->sgl;
+ state->flags |= HASH_FLAGS_SGS_COPIED;
+ rctx->nents = 1;
+ rctx->offset += new_len - state->bufcnt;
+ state->bufcnt = 0;
+ rctx->total = new_len;
+
+ return 0;
+}
+
+static int stm32_hash_align_sgs(struct scatterlist *sg,
+ int nbytes, int bs, bool init, bool final,
+ struct stm32_hash_request_ctx *rctx)
+{
+ struct stm32_hash_state *state = &rctx->state;
+ struct stm32_hash_dev *hdev = rctx->hdev;
+ struct scatterlist *sg_tmp = sg;
+ int offset = rctx->offset;
+ int new_len;
+ int n = 0;
+ int bufcnt = state->bufcnt;
+ bool secure_ctx = hdev->pdata->context_secured;
+ bool aligned = true;
+
+ if (!sg || !sg->length || !nbytes) {
+ if (bufcnt) {
+ bufcnt = DIV_ROUND_UP(bufcnt, bs) * bs;
+ sg_init_table(rctx->sgl, 1);
+ sg_set_buf(rctx->sgl, rctx->hdev->xmit_buf, bufcnt);
+ rctx->sg = rctx->sgl;
+ rctx->nents = 1;
+ }
+
+ return 0;
+ }
+
+ new_len = nbytes;
+
+ if (offset)
+ aligned = false;
+
+ if (final) {
+ new_len = DIV_ROUND_UP(new_len, bs) * bs;
+ } else {
+ new_len = (new_len - 1) / bs * bs; // return n block - 1 block
+
+ /*
+ * Context save in some version of HASH IP can only be done when the
+ * FIFO is ready to get a new block. This implies to send n block plus a
+ * 32 bit word in the first DMA send.
+ */
+ if (init && secure_ctx) {
+ new_len += sizeof(u32);
+ if (unlikely(new_len > nbytes))
+ new_len -= bs;
+ }
+ }
+
+ if (!new_len)
+ return 0;
+
+ if (nbytes != new_len)
+ aligned = false;
+
+ while (nbytes > 0 && sg_tmp) {
+ n++;
+
+ if (bufcnt) {
+ if (!IS_ALIGNED(bufcnt, bs)) {
+ aligned = false;
+ break;
+ }
+ nbytes -= bufcnt;
+ bufcnt = 0;
+ if (!nbytes)
+ aligned = false;
+
+ continue;
+ }
+
+ if (offset < sg_tmp->length) {
+ if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
+ aligned = false;
+ break;
+ }
+
+ if (!IS_ALIGNED(sg_tmp->length - offset, bs)) {
+ aligned = false;
+ break;
+ }
+ }
+
+ if (offset) {
+ offset -= sg_tmp->length;
+ if (offset < 0) {
+ nbytes += offset;
+ offset = 0;
+ }
+ } else {
+ nbytes -= sg_tmp->length;
+ }
+
+ sg_tmp = sg_next(sg_tmp);
+
+ if (nbytes < 0) {
+ aligned = false;
+ break;
+ }
+ }
+
+ if (!aligned)
+ return stm32_hash_copy_sgs(rctx, sg, bs, new_len);
+
+ rctx->total = new_len;
+ rctx->offset += new_len;
+ rctx->nents = n;
+ if (state->bufcnt) {
+ sg_init_table(rctx->sgl, 2);
+ sg_set_buf(rctx->sgl, rctx->hdev->xmit_buf, state->bufcnt);
+ sg_chain(rctx->sgl, 2, sg);
+ rctx->sg = rctx->sgl;
+ } else {
+ rctx->sg = sg;
+ }
+
+ return 0;
+}
+
+static int stm32_hash_prepare_request(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ struct stm32_hash_state *state = &rctx->state;
+ unsigned int nbytes;
+ int ret, hash_later, bs;
+ bool update = rctx->op & HASH_OP_UPDATE;
+ bool init = !(state->flags & HASH_FLAGS_INIT);
+ bool finup = state->flags & HASH_FLAGS_FINUP;
+ bool final = state->flags & HASH_FLAGS_FINAL;
+
+ if (!hdev->dma_lch || state->flags & HASH_FLAGS_CPU)
+ return 0;
+
+ bs = crypto_ahash_blocksize(tfm);
+
+ nbytes = state->bufcnt;
+
+ /*
+ * In case of update request nbytes must correspond to the content of the
+ * buffer + the offset minus the content of the request already in the
+ * buffer.
+ */
+ if (update || finup)
+ nbytes += req->nbytes - rctx->offset;
+
+ dev_dbg(hdev->dev,
+ "%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%d\n",
+ __func__, nbytes, bs, rctx->total, rctx->offset, state->bufcnt);
+
+ if (!nbytes)
+ return 0;
+
+ rctx->total = nbytes;
+
+ if (update && req->nbytes && (!IS_ALIGNED(state->bufcnt, bs))) {
+ int len = bs - state->bufcnt % bs;
+
+ if (len > req->nbytes)
+ len = req->nbytes;
+ scatterwalk_map_and_copy(state->buffer + state->bufcnt, req->src,
+ 0, len, 0);
+ state->bufcnt += len;
+ rctx->offset = len;
+ }
+
+ /* copy buffer in a temporary one that is used for sg alignment */
+ if (state->bufcnt)
+ memcpy(hdev->xmit_buf, state->buffer, state->bufcnt);
+
+ ret = stm32_hash_align_sgs(req->src, nbytes, bs, init, final, rctx);
+ if (ret)
+ return ret;
+
+ hash_later = nbytes - rctx->total;
+ if (hash_later < 0)
+ hash_later = 0;
+
+ if (hash_later && hash_later <= state->blocklen) {
+ scatterwalk_map_and_copy(state->buffer,
+ req->src,
+ req->nbytes - hash_later,
+ hash_later, 0);
+
+ state->bufcnt = hash_later;
+ } else {
+ state->bufcnt = 0;
+ }
+
+ if (hash_later > state->blocklen) {
+ /* FIXME: add support of this case */
+ pr_err("Buffer contains more than one block.\n");
+ return -ENOMEM;
+ }
+
+ rctx->total = min(nbytes, rctx->total);
+
+ return 0;
+}
+
+static void stm32_hash_unprepare_request(struct ahash_request *req)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_state *state = &rctx->state;
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ u32 *preg = state->hw_context;
+ int swap_reg, i;
+
+ if (hdev->dma_lch)
+ dmaengine_terminate_sync(hdev->dma_lch);
+
+ if (state->flags & HASH_FLAGS_SGS_COPIED)
+ free_pages((unsigned long)sg_virt(rctx->sg), get_order(rctx->sg->length));
+
+ rctx->sg = NULL;
+ rctx->offset = 0;
+
+ state->flags &= ~(HASH_FLAGS_SGS_COPIED);
+
+ if (!(hdev->flags & HASH_FLAGS_INIT))
+ goto pm_runtime;
+
+ state->flags |= HASH_FLAGS_INIT;
+
+ if (stm32_hash_wait_busy(hdev)) {
+ dev_warn(hdev->dev, "Wait busy failed.");
+ return;
+ }
+
+ swap_reg = hash_swap_reg(rctx);
+
+ if (!hdev->pdata->ux500)
+ *preg++ = stm32_hash_read(hdev, HASH_IMR);
+ *preg++ = stm32_hash_read(hdev, HASH_STR);
+ *preg++ = stm32_hash_read(hdev, HASH_CR);
+ for (i = 0; i < swap_reg; i++)
+ *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
+
+pm_runtime:
+ pm_runtime_mark_last_busy(hdev->dev);
+ pm_runtime_put_autosuspend(hdev->dev);
+}
+
static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
@@ -1070,16 +1393,26 @@ static int stm32_hash_update(struct ahash_request *req)
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
struct stm32_hash_state *state = &rctx->state;
- if (!req->nbytes || !(state->flags & HASH_FLAGS_CPU))
+ if (!req->nbytes)
return 0;
- rctx->total = req->nbytes;
- rctx->sg = req->src;
- rctx->offset = 0;
- if ((state->bufcnt + rctx->total < state->blocklen)) {
- stm32_hash_append_sg(rctx);
- return 0;
+ if (state->flags & HASH_FLAGS_CPU) {
+ rctx->total = req->nbytes;
+ rctx->sg = req->src;
+ rctx->offset = 0;
+
+ if ((state->bufcnt + rctx->total < state->blocklen)) {
+ stm32_hash_append_sg(rctx);
+ return 0;
+ }
+ } else { /* DMA mode */
+ if (state->bufcnt + req->nbytes <= state->blocklen) {
+ scatterwalk_map_and_copy(state->buffer + state->bufcnt, req->src,
+ 0, req->nbytes, 0);
+ state->bufcnt += req->nbytes;
+ return 0;
+ }
}
return stm32_hash_enqueue(req, HASH_OP_UPDATE);
@@ -1098,20 +1431,18 @@ static int stm32_hash_final(struct ahash_request *req)
static int stm32_hash_finup(struct ahash_request *req)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
struct stm32_hash_state *state = &rctx->state;
if (!req->nbytes)
goto out;
state->flags |= HASH_FLAGS_FINUP;
- rctx->total = req->nbytes;
- rctx->sg = req->src;
- rctx->offset = 0;
- if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
- state->flags &= ~HASH_FLAGS_CPU;
+ if ((state->flags & HASH_FLAGS_CPU)) {
+ rctx->total = req->nbytes;
+ rctx->sg = req->src;
+ rctx->offset = 0;
+ }
out:
return stm32_hash_final(req);
@@ -1215,7 +1546,6 @@ static int stm32_hash_cra_sha3_hmac_init(struct crypto_tfm *tfm)
HASH_FLAGS_HMAC);
}
-
static void stm32_hash_cra_exit(struct crypto_tfm *tfm)
{
struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -1228,14 +1558,9 @@ static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
{
struct stm32_hash_dev *hdev = dev_id;
- if (HASH_FLAGS_CPU & hdev->flags) {
- if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
- hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
- goto finish;
- }
- } else if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
- hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
- goto finish;
+ if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
+ hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
+ goto finish;
}
return IRQ_HANDLED;
@@ -1984,6 +2309,7 @@ static const struct stm32_hash_pdata stm32_hash_pdata_stm32mp13 = {
.algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32mp13),
.has_sr = true,
.has_mdmat = true,
+ .context_secured = true,
};
static const struct of_device_id stm32_hash_of_match[] = {
diff --git a/drivers/crypto/tegra/Makefile b/drivers/crypto/tegra/Makefile
new file mode 100644
index 000000000000..a32001e58eb2
--- /dev/null
+++ b/drivers/crypto/tegra/Makefile
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+tegra-se-objs := tegra-se-key.o tegra-se-main.o
+
+tegra-se-y += tegra-se-aes.o
+tegra-se-y += tegra-se-hash.o
+
+obj-$(CONFIG_CRYPTO_DEV_TEGRA) += tegra-se.o
diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c
new file mode 100644
index 000000000000..ae7a0f8435fc
--- /dev/null
+++ b/drivers/crypto/tegra/tegra-se-aes.c
@@ -0,0 +1,1933 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+/*
+ * Crypto driver to handle block cipher algorithms using NVIDIA Security Engine.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/engine.h>
+#include <crypto/gcm.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/xts.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+
+#include "tegra-se.h"
+
+struct tegra_aes_ctx {
+ struct tegra_se *se;
+ u32 alg;
+ u32 ivsize;
+ u32 key1_id;
+ u32 key2_id;
+};
+
+struct tegra_aes_reqctx {
+ struct tegra_se_datbuf datbuf;
+ bool encrypt;
+ u32 config;
+ u32 crypto_config;
+ u32 len;
+ u32 *iv;
+};
+
+struct tegra_aead_ctx {
+ struct tegra_se *se;
+ unsigned int authsize;
+ u32 alg;
+ u32 keylen;
+ u32 key_id;
+};
+
+struct tegra_aead_reqctx {
+ struct tegra_se_datbuf inbuf;
+ struct tegra_se_datbuf outbuf;
+ struct scatterlist *src_sg;
+ struct scatterlist *dst_sg;
+ unsigned int assoclen;
+ unsigned int cryptlen;
+ unsigned int authsize;
+ bool encrypt;
+ u32 config;
+ u32 crypto_config;
+ u32 key_id;
+ u32 iv[4];
+ u8 authdata[16];
+};
+
+struct tegra_cmac_ctx {
+ struct tegra_se *se;
+ unsigned int alg;
+ u32 key_id;
+ struct crypto_shash *fallback_tfm;
+};
+
+struct tegra_cmac_reqctx {
+ struct scatterlist *src_sg;
+ struct tegra_se_datbuf datbuf;
+ struct tegra_se_datbuf residue;
+ unsigned int total_len;
+ unsigned int blk_size;
+ unsigned int task;
+ u32 crypto_config;
+ u32 config;
+ u32 key_id;
+ u32 *iv;
+ u32 result[CMAC_RESULT_REG_COUNT];
+};
+
+/* increment counter (128-bit int) */
+static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums)
+{
+ do {
+ --bits;
+ nums += counter[bits];
+ counter[bits] = nums & 0xff;
+ nums >>= 8;
+ } while (bits && nums);
+}
+
+static void tegra_cbc_iv_copyback(struct skcipher_request *req, struct tegra_aes_ctx *ctx)
+{
+ struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
+ unsigned int offset;
+
+ offset = req->cryptlen - ctx->ivsize;
+
+ if (rctx->encrypt)
+ memcpy(req->iv, rctx->datbuf.buf + offset, ctx->ivsize);
+ else
+ scatterwalk_map_and_copy(req->iv, req->src, offset, ctx->ivsize, 0);
+}
+
+static void tegra_aes_update_iv(struct skcipher_request *req, struct tegra_aes_ctx *ctx)
+{
+ int num;
+
+ if (ctx->alg == SE_ALG_CBC) {
+ tegra_cbc_iv_copyback(req, ctx);
+ } else if (ctx->alg == SE_ALG_CTR) {
+ num = req->cryptlen / ctx->ivsize;
+ if (req->cryptlen % ctx->ivsize)
+ num++;
+
+ ctr_iv_inc(req->iv, ctx->ivsize, num);
+ }
+}
+
+static int tegra234_aes_crypto_cfg(u32 alg, bool encrypt)
+{
+ switch (alg) {
+ case SE_ALG_CMAC:
+ case SE_ALG_GMAC:
+ case SE_ALG_GCM:
+ case SE_ALG_GCM_FINAL:
+ return 0;
+ case SE_ALG_CBC:
+ if (encrypt)
+ return SE_CRYPTO_CFG_CBC_ENCRYPT;
+ else
+ return SE_CRYPTO_CFG_CBC_DECRYPT;
+ case SE_ALG_ECB:
+ if (encrypt)
+ return SE_CRYPTO_CFG_ECB_ENCRYPT;
+ else
+ return SE_CRYPTO_CFG_ECB_DECRYPT;
+ case SE_ALG_XTS:
+ if (encrypt)
+ return SE_CRYPTO_CFG_XTS_ENCRYPT;
+ else
+ return SE_CRYPTO_CFG_XTS_DECRYPT;
+
+ case SE_ALG_CTR:
+ return SE_CRYPTO_CFG_CTR;
+ case SE_ALG_CBC_MAC:
+ return SE_CRYPTO_CFG_CBC_MAC;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int tegra234_aes_cfg(u32 alg, bool encrypt)
+{
+ switch (alg) {
+ case SE_ALG_CBC:
+ case SE_ALG_ECB:
+ case SE_ALG_XTS:
+ case SE_ALG_CTR:
+ if (encrypt)
+ return SE_CFG_AES_ENCRYPT;
+ else
+ return SE_CFG_AES_DECRYPT;
+
+ case SE_ALG_GMAC:
+ if (encrypt)
+ return SE_CFG_GMAC_ENCRYPT;
+ else
+ return SE_CFG_GMAC_DECRYPT;
+
+ case SE_ALG_GCM:
+ if (encrypt)
+ return SE_CFG_GCM_ENCRYPT;
+ else
+ return SE_CFG_GCM_DECRYPT;
+
+ case SE_ALG_GCM_FINAL:
+ if (encrypt)
+ return SE_CFG_GCM_FINAL_ENCRYPT;
+ else
+ return SE_CFG_GCM_FINAL_DECRYPT;
+
+ case SE_ALG_CMAC:
+ return SE_CFG_CMAC;
+
+ case SE_ALG_CBC_MAC:
+ return SE_AES_ENC_ALG_AES_ENC |
+ SE_AES_DST_HASH_REG;
+ }
+ return -EINVAL;
+}
+
+static unsigned int tegra_aes_prep_cmd(struct tegra_aes_ctx *ctx,
+ struct tegra_aes_reqctx *rctx)
+{
+ unsigned int data_count, res_bits, i = 0, j;
+ struct tegra_se *se = ctx->se;
+ u32 *cpuvaddr = se->cmdbuf->addr;
+ dma_addr_t addr = rctx->datbuf.addr;
+
+ data_count = rctx->len / AES_BLOCK_SIZE;
+ res_bits = (rctx->len % AES_BLOCK_SIZE) * 8;
+
+ /*
+ * Hardware processes data_count + 1 blocks.
+ * Reduce 1 block if there is no residue
+ */
+ if (!res_bits)
+ data_count--;
+
+ if (rctx->iv) {
+ cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
+ for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
+ cpuvaddr[i++] = rctx->iv[j];
+ }
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
+ cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
+ SE_LAST_BLOCK_RES_BITS(res_bits);
+
+ cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
+ cpuvaddr[i++] = rctx->config;
+ cpuvaddr[i++] = rctx->crypto_config;
+
+ /* Source address setting */
+ cpuvaddr[i++] = lower_32_bits(addr);
+ cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) | SE_ADDR_HI_SZ(rctx->len);
+
+ /* Destination address setting */
+ cpuvaddr[i++] = lower_32_bits(addr);
+ cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) |
+ SE_ADDR_HI_SZ(rctx->len);
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
+ cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF |
+ SE_AES_OP_START;
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
+ cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
+ host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
+
+ dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
+
+ return i;
+}
+
+static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
+ struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
+ struct tegra_se *se = ctx->se;
+ unsigned int cmdlen;
+ int ret;
+
+ rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_AES_BUFLEN,
+ &rctx->datbuf.addr, GFP_KERNEL);
+ if (!rctx->datbuf.buf)
+ return -ENOMEM;
+
+ rctx->datbuf.size = SE_AES_BUFLEN;
+ rctx->iv = (u32 *)req->iv;
+ rctx->len = req->cryptlen;
+
+ /* Pad input to AES Block size */
+ if (ctx->alg != SE_ALG_XTS) {
+ if (rctx->len % AES_BLOCK_SIZE)
+ rctx->len += AES_BLOCK_SIZE - (rctx->len % AES_BLOCK_SIZE);
+ }
+
+ scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0);
+
+ /* Prepare the command and submit for execution */
+ cmdlen = tegra_aes_prep_cmd(ctx, rctx);
+ ret = tegra_se_host1x_submit(se, cmdlen);
+
+ /* Copy the result */
+ tegra_aes_update_iv(req, ctx);
+ scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1);
+
+ /* Free the buffer */
+ dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ rctx->datbuf.buf, rctx->datbuf.addr);
+
+ crypto_finalize_skcipher_request(se->engine, req, ret);
+
+ return 0;
+}
+
+static int tegra_aes_cra_init(struct crypto_skcipher *tfm)
+{
+ struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct tegra_se_alg *se_alg;
+ const char *algname;
+ int ret;
+
+ se_alg = container_of(alg, struct tegra_se_alg, alg.skcipher.base);
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct tegra_aes_reqctx));
+
+ ctx->ivsize = crypto_skcipher_ivsize(tfm);
+ ctx->se = se_alg->se_dev;
+ ctx->key1_id = 0;
+ ctx->key2_id = 0;
+
+ algname = crypto_tfm_alg_name(&tfm->base);
+ ret = se_algname_to_algid(algname);
+ if (ret < 0) {
+ dev_err(ctx->se->dev, "invalid algorithm\n");
+ return ret;
+ }
+
+ ctx->alg = ret;
+
+ return 0;
+}
+
+static void tegra_aes_cra_exit(struct crypto_skcipher *tfm)
+{
+ struct tegra_aes_ctx *ctx = crypto_tfm_ctx(&tfm->base);
+
+ if (ctx->key1_id)
+ tegra_key_invalidate(ctx->se, ctx->key1_id, ctx->alg);
+
+ if (ctx->key2_id)
+ tegra_key_invalidate(ctx->se, ctx->key2_id, ctx->alg);
+}
+
+static int tegra_aes_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (aes_check_keylen(keylen)) {
+ dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
+ return -EINVAL;
+ }
+
+ return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id);
+}
+
+static int tegra_xts_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ u32 len = keylen / 2;
+ int ret;
+
+ ret = xts_verify_key(tfm, key, keylen);
+ if (ret || aes_check_keylen(len)) {
+ dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
+ return -EINVAL;
+ }
+
+ ret = tegra_key_submit(ctx->se, key, len,
+ ctx->alg, &ctx->key1_id);
+ if (ret)
+ return ret;
+
+ return tegra_key_submit(ctx->se, key + len, len,
+ ctx->alg, &ctx->key2_id);
+
+ return 0;
+}
+
+static int tegra_aes_kac_manifest(u32 user, u32 alg, u32 keylen)
+{
+ int manifest;
+
+ manifest = SE_KAC_USER_NS;
+
+ switch (alg) {
+ case SE_ALG_CBC:
+ case SE_ALG_ECB:
+ case SE_ALG_CTR:
+ manifest |= SE_KAC_ENC;
+ break;
+ case SE_ALG_XTS:
+ manifest |= SE_KAC_XTS;
+ break;
+ case SE_ALG_GCM:
+ manifest |= SE_KAC_GCM;
+ break;
+ case SE_ALG_CMAC:
+ manifest |= SE_KAC_CMAC;
+ break;
+ case SE_ALG_CBC_MAC:
+ manifest |= SE_KAC_ENC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ manifest |= SE_KAC_SIZE_128;
+ break;
+ case AES_KEYSIZE_192:
+ manifest |= SE_KAC_SIZE_192;
+ break;
+ case AES_KEYSIZE_256:
+ manifest |= SE_KAC_SIZE_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return manifest;
+}
+
+static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt)
+
+{
+ struct crypto_skcipher *tfm;
+ struct tegra_aes_ctx *ctx;
+ struct tegra_aes_reqctx *rctx;
+
+ tfm = crypto_skcipher_reqtfm(req);
+ ctx = crypto_skcipher_ctx(tfm);
+ rctx = skcipher_request_ctx(req);
+
+ if (ctx->alg != SE_ALG_XTS) {
+ if (!IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(tfm))) {
+ dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen);
+ return -EINVAL;
+ }
+ } else if (req->cryptlen < XTS_BLOCK_SIZE) {
+ dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen);
+ return -EINVAL;
+ }
+
+ if (!req->cryptlen)
+ return 0;
+
+ rctx->encrypt = encrypt;
+ rctx->config = tegra234_aes_cfg(ctx->alg, encrypt);
+ rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, encrypt);
+ rctx->crypto_config |= SE_AES_KEY_INDEX(ctx->key1_id);
+
+ if (ctx->key2_id)
+ rctx->crypto_config |= SE_AES_KEY2_INDEX(ctx->key2_id);
+
+ return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req);
+}
+
+static int tegra_aes_encrypt(struct skcipher_request *req)
+{
+ return tegra_aes_crypt(req, true);
+}
+
+static int tegra_aes_decrypt(struct skcipher_request *req)
+{
+ return tegra_aes_crypt(req, false);
+}
+
+static struct tegra_se_alg tegra_aes_algs[] = {
+ {
+ .alg.skcipher.op.do_one_request = tegra_aes_do_one_req,
+ .alg.skcipher.base = {
+ .init = tegra_aes_cra_init,
+ .exit = tegra_aes_cra_exit,
+ .setkey = tegra_aes_setkey,
+ .encrypt = tegra_aes_encrypt,
+ .decrypt = tegra_aes_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-tegra",
+ .cra_priority = 500,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_aes_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ }
+ }, {
+ .alg.skcipher.op.do_one_request = tegra_aes_do_one_req,
+ .alg.skcipher.base = {
+ .init = tegra_aes_cra_init,
+ .exit = tegra_aes_cra_exit,
+ .setkey = tegra_aes_setkey,
+ .encrypt = tegra_aes_encrypt,
+ .decrypt = tegra_aes_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-tegra",
+ .cra_priority = 500,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_aes_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ }
+ }, {
+ .alg.skcipher.op.do_one_request = tegra_aes_do_one_req,
+ .alg.skcipher.base = {
+ .init = tegra_aes_cra_init,
+ .exit = tegra_aes_cra_exit,
+ .setkey = tegra_aes_setkey,
+ .encrypt = tegra_aes_encrypt,
+ .decrypt = tegra_aes_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-tegra",
+ .cra_priority = 500,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct tegra_aes_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ }
+ }, {
+ .alg.skcipher.op.do_one_request = tegra_aes_do_one_req,
+ .alg.skcipher.base = {
+ .init = tegra_aes_cra_init,
+ .exit = tegra_aes_cra_exit,
+ .setkey = tegra_xts_setkey,
+ .encrypt = tegra_aes_encrypt,
+ .decrypt = tegra_aes_decrypt,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-tegra",
+ .cra_priority = 500,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_aes_ctx),
+ .cra_alignmask = (__alignof__(u64) - 1),
+ .cra_module = THIS_MODULE,
+ },
+ }
+ },
+};
+
+static unsigned int tegra_gmac_prep_cmd(struct tegra_aead_ctx *ctx,
+ struct tegra_aead_reqctx *rctx)
+{
+ unsigned int data_count, res_bits, i = 0;
+ struct tegra_se *se = ctx->se;
+ u32 *cpuvaddr = se->cmdbuf->addr;
+
+ data_count = (rctx->assoclen / AES_BLOCK_SIZE);
+ res_bits = (rctx->assoclen % AES_BLOCK_SIZE) * 8;
+
+ /*
+ * Hardware processes data_count + 1 blocks.
+ * Reduce 1 block if there is no residue
+ */
+ if (!res_bits)
+ data_count--;
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
+ cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
+ SE_LAST_BLOCK_RES_BITS(res_bits);
+
+ cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 4);
+ cpuvaddr[i++] = rctx->config;
+ cpuvaddr[i++] = rctx->crypto_config;
+ cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
+ cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
+ SE_ADDR_HI_SZ(rctx->assoclen);
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
+ cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
+ SE_AES_OP_INIT | SE_AES_OP_LASTBUF |
+ SE_AES_OP_START;
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
+ cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
+ host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
+
+ return i;
+}
+
+static unsigned int tegra_gcm_crypt_prep_cmd(struct tegra_aead_ctx *ctx,
+ struct tegra_aead_reqctx *rctx)
+{
+ unsigned int data_count, res_bits, i = 0, j;
+ struct tegra_se *se = ctx->se;
+ u32 *cpuvaddr = se->cmdbuf->addr, op;
+
+ data_count = (rctx->cryptlen / AES_BLOCK_SIZE);
+ res_bits = (rctx->cryptlen % AES_BLOCK_SIZE) * 8;
+ op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
+ SE_AES_OP_LASTBUF | SE_AES_OP_START;
+
+ /*
+ * If there is no assoc data,
+ * this will be the init command
+ */
+ if (!rctx->assoclen)
+ op |= SE_AES_OP_INIT;
+
+ /*
+ * Hardware processes data_count + 1 blocks.
+ * Reduce 1 block if there is no residue
+ */
+ if (!res_bits)
+ data_count--;
+
+ cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
+ for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
+ cpuvaddr[i++] = rctx->iv[j];
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
+ cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
+ SE_LAST_BLOCK_RES_BITS(res_bits);
+
+ cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
+ cpuvaddr[i++] = rctx->config;
+ cpuvaddr[i++] = rctx->crypto_config;
+
+ /* Source Address */
+ cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
+ cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
+ SE_ADDR_HI_SZ(rctx->cryptlen);
+
+ /* Destination Address */
+ cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
+ cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
+ SE_ADDR_HI_SZ(rctx->cryptlen);
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
+ cpuvaddr[i++] = op;
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
+ cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
+ host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
+
+ dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
+ return i;
+}
+
+static int tegra_gcm_prep_final_cmd(struct tegra_se *se, u32 *cpuvaddr,
+ struct tegra_aead_reqctx *rctx)
+{
+ unsigned int i = 0, j;
+ u32 op;
+
+ op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
+ SE_AES_OP_LASTBUF | SE_AES_OP_START;
+
+ /*
+ * Set init for zero sized vector
+ */
+ if (!rctx->assoclen && !rctx->cryptlen)
+ op |= SE_AES_OP_INIT;
+
+ cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->aad_len, 2);
+ cpuvaddr[i++] = rctx->assoclen * 8;
+ cpuvaddr[i++] = 0;
+
+ cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->cryp_msg_len, 2);
+ cpuvaddr[i++] = rctx->cryptlen * 8;
+ cpuvaddr[i++] = 0;
+
+ cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
+ for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
+ cpuvaddr[i++] = rctx->iv[j];
+
+ cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
+ cpuvaddr[i++] = rctx->config;
+ cpuvaddr[i++] = rctx->crypto_config;
+ cpuvaddr[i++] = 0;
+ cpuvaddr[i++] = 0;
+
+ /* Destination Address */
+ cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
+ cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
+ SE_ADDR_HI_SZ(0x10); /* HW always generates 128-bit tag */
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
+ cpuvaddr[i++] = op;
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
+ cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
+ host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
+
+ dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
+
+ return i;
+}
+
+static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
+{
+ struct tegra_se *se = ctx->se;
+ unsigned int cmdlen;
+
+ scatterwalk_map_and_copy(rctx->inbuf.buf,
+ rctx->src_sg, 0, rctx->assoclen, 0);
+
+ rctx->config = tegra234_aes_cfg(SE_ALG_GMAC, rctx->encrypt);
+ rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GMAC, rctx->encrypt) |
+ SE_AES_KEY_INDEX(ctx->key_id);
+
+ cmdlen = tegra_gmac_prep_cmd(ctx, rctx);
+
+ return tegra_se_host1x_submit(se, cmdlen);
+}
+
+static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
+{
+ struct tegra_se *se = ctx->se;
+ int cmdlen, ret;
+
+ scatterwalk_map_and_copy(rctx->inbuf.buf, rctx->src_sg,
+ rctx->assoclen, rctx->cryptlen, 0);
+
+ rctx->config = tegra234_aes_cfg(SE_ALG_GCM, rctx->encrypt);
+ rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM, rctx->encrypt) |
+ SE_AES_KEY_INDEX(ctx->key_id);
+
+ /* Prepare command and submit */
+ cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx);
+ ret = tegra_se_host1x_submit(se, cmdlen);
+ if (ret)
+ return ret;
+
+ /* Copy the result */
+ scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
+ rctx->assoclen, rctx->cryptlen, 1);
+
+ return 0;
+}
+
+static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
+{
+ struct tegra_se *se = ctx->se;
+ u32 *cpuvaddr = se->cmdbuf->addr;
+ int cmdlen, ret, offset;
+
+ rctx->config = tegra234_aes_cfg(SE_ALG_GCM_FINAL, rctx->encrypt);
+ rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL, rctx->encrypt) |
+ SE_AES_KEY_INDEX(ctx->key_id);
+
+ /* Prepare command and submit */
+ cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
+ ret = tegra_se_host1x_submit(se, cmdlen);
+ if (ret)
+ return ret;
+
+ if (rctx->encrypt) {
+ /* Copy the result */
+ offset = rctx->assoclen + rctx->cryptlen;
+ scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
+ offset, rctx->authsize, 1);
+ }
+
+ return 0;
+}
+
+static int tegra_gcm_do_verify(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
+{
+ unsigned int offset;
+ u8 mac[16];
+
+ offset = rctx->assoclen + rctx->cryptlen;
+ scatterwalk_map_and_copy(mac, rctx->src_sg, offset, rctx->authsize, 0);
+
+ if (crypto_memneq(rctx->outbuf.buf, mac, rctx->authsize))
+ return -EBADMSG;
+
+ return 0;
+}
+
+static inline int tegra_ccm_check_iv(const u8 *iv)
+{
+ /* iv[0] gives value of q-1
+ * 2 <= q <= 8 as per NIST 800-38C notation
+ * 2 <= L <= 8, so 1 <= L' <= 7. as per rfc 3610 notation
+ */
+ if (iv[0] < 1 || iv[0] > 7) {
+ pr_debug("ccm_check_iv failed %d\n", iv[0]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static unsigned int tegra_cbcmac_prep_cmd(struct tegra_aead_ctx *ctx,
+ struct tegra_aead_reqctx *rctx)
+{
+ unsigned int data_count, i = 0;
+ struct tegra_se *se = ctx->se;
+ u32 *cpuvaddr = se->cmdbuf->addr;
+
+ data_count = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1;
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
+ cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count);
+
+ cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
+ cpuvaddr[i++] = rctx->config;
+ cpuvaddr[i++] = rctx->crypto_config;
+
+ cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
+ cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
+ SE_ADDR_HI_SZ(rctx->inbuf.size);
+
+ cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
+ cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
+ SE_ADDR_HI_SZ(0x10); /* HW always generates 128 bit tag */
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
+ cpuvaddr[i++] = SE_AES_OP_WRSTALL |
+ SE_AES_OP_LASTBUF | SE_AES_OP_START;
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
+ cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
+ host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
+
+ return i;
+}
+
+static unsigned int tegra_ctr_prep_cmd(struct tegra_aead_ctx *ctx,
+ struct tegra_aead_reqctx *rctx)
+{
+ unsigned int i = 0, j;
+ struct tegra_se *se = ctx->se;
+ u32 *cpuvaddr = se->cmdbuf->addr;
+
+ cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
+ for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
+ cpuvaddr[i++] = rctx->iv[j];
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
+ cpuvaddr[i++] = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1;
+ cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
+ cpuvaddr[i++] = rctx->config;
+ cpuvaddr[i++] = rctx->crypto_config;
+
+ /* Source address setting */
+ cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
+ cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
+ SE_ADDR_HI_SZ(rctx->inbuf.size);
+
+ /* Destination address setting */
+ cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
+ cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
+ SE_ADDR_HI_SZ(rctx->inbuf.size);
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
+ cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF |
+ SE_AES_OP_START;
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
+ cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
+ host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
+
+ dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n",
+ rctx->config, rctx->crypto_config);
+
+ return i;
+}
+
+static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
+{
+ struct tegra_se *se = ctx->se;
+ int cmdlen;
+
+ rctx->config = tegra234_aes_cfg(SE_ALG_CBC_MAC, rctx->encrypt);
+ rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC,
+ rctx->encrypt) |
+ SE_AES_KEY_INDEX(ctx->key_id);
+
+ /* Prepare command and submit */
+ cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx);
+
+ return tegra_se_host1x_submit(se, cmdlen);
+}
+
+static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
+{
+ __be32 data;
+
+ memset(block, 0, csize);
+ block += csize;
+
+ if (csize >= 4)
+ csize = 4;
+ else if (msglen > (1 << (8 * csize)))
+ return -EOVERFLOW;
+
+ data = cpu_to_be32(msglen);
+ memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+ return 0;
+}
+
+static int tegra_ccm_format_nonce(struct tegra_aead_reqctx *rctx, u8 *nonce)
+{
+ unsigned int q, t;
+ u8 *q_ptr, *iv = (u8 *)rctx->iv;
+
+ memcpy(nonce, rctx->iv, 16);
+
+ /*** 1. Prepare Flags Octet ***/
+
+ /* Encode t (mac length) */
+ t = rctx->authsize;
+ nonce[0] |= (((t - 2) / 2) << 3);
+
+ /* Adata */
+ if (rctx->assoclen)
+ nonce[0] |= (1 << 6);
+
+ /*** Encode Q - message length ***/
+ q = iv[0] + 1;
+ q_ptr = nonce + 16 - q;
+
+ return tegra_ccm_set_msg_len(q_ptr, rctx->cryptlen, q);
+}
+
+static int tegra_ccm_format_adata(u8 *adata, unsigned int a)
+{
+ int len = 0;
+
+ /* add control info for associated data
+ * RFC 3610 and NIST Special Publication 800-38C
+ */
+ if (a < 65280) {
+ *(__be16 *)adata = cpu_to_be16(a);
+ len = 2;
+ } else {
+ *(__be16 *)adata = cpu_to_be16(0xfffe);
+ *(__be32 *)&adata[2] = cpu_to_be32(a);
+ len = 6;
+ }
+
+ return len;
+}
+
+static int tegra_ccm_add_padding(u8 *buf, unsigned int len)
+{
+ unsigned int padlen = 16 - (len % 16);
+ u8 padding[16] = {0};
+
+ if (padlen == 16)
+ return 0;
+
+ memcpy(buf, padding, padlen);
+
+ return padlen;
+}
+
+static int tegra_ccm_format_blocks(struct tegra_aead_reqctx *rctx)
+{
+ unsigned int alen = 0, offset = 0;
+ u8 nonce[16], adata[16];
+ int ret;
+
+ ret = tegra_ccm_format_nonce(rctx, nonce);
+ if (ret)
+ return ret;
+
+ memcpy(rctx->inbuf.buf, nonce, 16);
+ offset = 16;
+
+ if (rctx->assoclen) {
+ alen = tegra_ccm_format_adata(adata, rctx->assoclen);
+ memcpy(rctx->inbuf.buf + offset, adata, alen);
+ offset += alen;
+
+ scatterwalk_map_and_copy(rctx->inbuf.buf + offset,
+ rctx->src_sg, 0, rctx->assoclen, 0);
+
+ offset += rctx->assoclen;
+ offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset,
+ rctx->assoclen + alen);
+ }
+
+ return offset;
+}
+
+static int tegra_ccm_mac_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
+{
+ u32 result[16];
+ int i, ret;
+
+ /* Read and clear Result */
+ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
+ result[i] = readl(se->base + se->hw->regs->result + (i * 4));
+
+ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
+ writel(0, se->base + se->hw->regs->result + (i * 4));
+
+ if (rctx->encrypt) {
+ memcpy(rctx->authdata, result, rctx->authsize);
+ } else {
+ ret = crypto_memneq(rctx->authdata, result, rctx->authsize);
+ if (ret)
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
+static int tegra_ccm_ctr_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
+{
+ /* Copy result */
+ scatterwalk_map_and_copy(rctx->outbuf.buf + 16, rctx->dst_sg,
+ rctx->assoclen, rctx->cryptlen, 1);
+
+ if (rctx->encrypt)
+ scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
+ rctx->assoclen + rctx->cryptlen,
+ rctx->authsize, 1);
+ else
+ memcpy(rctx->authdata, rctx->outbuf.buf, rctx->authsize);
+
+ return 0;
+}
+
+static int tegra_ccm_compute_auth(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
+{
+ struct tegra_se *se = ctx->se;
+ struct scatterlist *sg;
+ int offset, ret;
+
+ offset = tegra_ccm_format_blocks(rctx);
+ if (offset < 0)
+ return -EINVAL;
+
+ /* Copy plain text to the buffer */
+ sg = rctx->encrypt ? rctx->src_sg : rctx->dst_sg;
+
+ scatterwalk_map_and_copy(rctx->inbuf.buf + offset,
+ sg, rctx->assoclen,
+ rctx->cryptlen, 0);
+ offset += rctx->cryptlen;
+ offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen);
+
+ rctx->inbuf.size = offset;
+
+ ret = tegra_ccm_do_cbcmac(ctx, rctx);
+ if (ret)
+ return ret;
+
+ return tegra_ccm_mac_result(se, rctx);
+}
+
+static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
+{
+ struct tegra_se *se = ctx->se;
+ unsigned int cmdlen, offset = 0;
+ struct scatterlist *sg = rctx->src_sg;
+ int ret;
+
+ rctx->config = tegra234_aes_cfg(SE_ALG_CTR, rctx->encrypt);
+ rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CTR, rctx->encrypt) |
+ SE_AES_KEY_INDEX(ctx->key_id);
+
+ /* Copy authdata in the top of buffer for encryption/decryption */
+ if (rctx->encrypt)
+ memcpy(rctx->inbuf.buf, rctx->authdata, rctx->authsize);
+ else
+ scatterwalk_map_and_copy(rctx->inbuf.buf, sg,
+ rctx->assoclen + rctx->cryptlen,
+ rctx->authsize, 0);
+
+ offset += rctx->authsize;
+ offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->authsize);
+
+ /* If there is no cryptlen, proceed to submit the task */
+ if (rctx->cryptlen) {
+ scatterwalk_map_and_copy(rctx->inbuf.buf + offset, sg,
+ rctx->assoclen, rctx->cryptlen, 0);
+ offset += rctx->cryptlen;
+ offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen);
+ }
+
+ rctx->inbuf.size = offset;
+
+ /* Prepare command and submit */
+ cmdlen = tegra_ctr_prep_cmd(ctx, rctx);
+ ret = tegra_se_host1x_submit(se, cmdlen);
+ if (ret)
+ return ret;
+
+ return tegra_ccm_ctr_result(se, rctx);
+}
+
+static int tegra_ccm_crypt_init(struct aead_request *req, struct tegra_se *se,
+ struct tegra_aead_reqctx *rctx)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ u8 *iv = (u8 *)rctx->iv;
+ int ret, i;
+
+ rctx->src_sg = req->src;
+ rctx->dst_sg = req->dst;
+ rctx->assoclen = req->assoclen;
+ rctx->authsize = crypto_aead_authsize(tfm);
+
+ memcpy(iv, req->iv, 16);
+
+ ret = tegra_ccm_check_iv(iv);
+ if (ret)
+ return ret;
+
+ /* Note: rfc 3610 and NIST 800-38C require counter (ctr_0) of
+ * zero to encrypt auth tag.
+ * req->iv has the formatted ctr_0 (i.e. Flags || N || 0).
+ */
+ memset(iv + 15 - iv[0], 0, iv[0] + 1);
+
+ /* Clear any previous result */
+ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
+ writel(0, se->base + se->hw->regs->result + (i * 4));
+
+ return 0;
+}
+
+static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req = container_of(areq, struct aead_request, base);
+ struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct tegra_se *se = ctx->se;
+ int ret;
+
+ /* Allocate buffers required */
+ rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ &rctx->inbuf.addr, GFP_KERNEL);
+ if (!rctx->inbuf.buf)
+ return -ENOMEM;
+
+ rctx->inbuf.size = SE_AES_BUFLEN;
+
+ rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ &rctx->outbuf.addr, GFP_KERNEL);
+ if (!rctx->outbuf.buf) {
+ ret = -ENOMEM;
+ goto outbuf_err;
+ }
+
+ rctx->outbuf.size = SE_AES_BUFLEN;
+
+ ret = tegra_ccm_crypt_init(req, se, rctx);
+ if (ret)
+ goto out;
+
+ if (rctx->encrypt) {
+ rctx->cryptlen = req->cryptlen;
+
+ /* CBC MAC Operation */
+ ret = tegra_ccm_compute_auth(ctx, rctx);
+ if (ret)
+ goto out;
+
+ /* CTR operation */
+ ret = tegra_ccm_do_ctr(ctx, rctx);
+ if (ret)
+ goto out;
+ } else {
+ rctx->cryptlen = req->cryptlen - ctx->authsize;
+ if (ret)
+ goto out;
+
+ /* CTR operation */
+ ret = tegra_ccm_do_ctr(ctx, rctx);
+ if (ret)
+ goto out;
+
+ /* CBC MAC Operation */
+ ret = tegra_ccm_compute_auth(ctx, rctx);
+ if (ret)
+ goto out;
+ }
+
+out:
+ dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ rctx->outbuf.buf, rctx->outbuf.addr);
+
+outbuf_err:
+ dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ rctx->inbuf.buf, rctx->inbuf.addr);
+
+ crypto_finalize_aead_request(ctx->se->engine, req, ret);
+
+ return 0;
+}
+
+static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req = container_of(areq, struct aead_request, base);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
+ int ret;
+
+ /* Allocate buffers required */
+ rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ &rctx->inbuf.addr, GFP_KERNEL);
+ if (!rctx->inbuf.buf)
+ return -ENOMEM;
+
+ rctx->inbuf.size = SE_AES_BUFLEN;
+
+ rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ &rctx->outbuf.addr, GFP_KERNEL);
+ if (!rctx->outbuf.buf) {
+ ret = -ENOMEM;
+ goto outbuf_err;
+ }
+
+ rctx->outbuf.size = SE_AES_BUFLEN;
+
+ rctx->src_sg = req->src;
+ rctx->dst_sg = req->dst;
+ rctx->assoclen = req->assoclen;
+ rctx->authsize = crypto_aead_authsize(tfm);
+
+ if (rctx->encrypt)
+ rctx->cryptlen = req->cryptlen;
+ else
+ rctx->cryptlen = req->cryptlen - ctx->authsize;
+
+ memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
+ rctx->iv[3] = (1 << 24);
+
+ /* If there is associated data perform GMAC operation */
+ if (rctx->assoclen) {
+ ret = tegra_gcm_do_gmac(ctx, rctx);
+ if (ret)
+ goto out;
+ }
+
+ /* GCM Encryption/Decryption operation */
+ if (rctx->cryptlen) {
+ ret = tegra_gcm_do_crypt(ctx, rctx);
+ if (ret)
+ goto out;
+ }
+
+ /* GCM_FINAL operation */
+ ret = tegra_gcm_do_final(ctx, rctx);
+ if (ret)
+ goto out;
+
+ if (!rctx->encrypt)
+ ret = tegra_gcm_do_verify(ctx->se, rctx);
+
+out:
+ dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ rctx->outbuf.buf, rctx->outbuf.addr);
+
+outbuf_err:
+ dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ rctx->inbuf.buf, rctx->inbuf.addr);
+
+ /* Finalize the request if there are no errors */
+ crypto_finalize_aead_request(ctx->se->engine, req, ret);
+
+ return 0;
+}
+
+static int tegra_aead_cra_init(struct crypto_aead *tfm)
+{
+ struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct tegra_se_alg *se_alg;
+ const char *algname;
+ int ret;
+
+ algname = crypto_tfm_alg_name(&tfm->base);
+
+ se_alg = container_of(alg, struct tegra_se_alg, alg.aead.base);
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct tegra_aead_reqctx));
+
+ ctx->se = se_alg->se_dev;
+ ctx->key_id = 0;
+
+ ret = se_algname_to_algid(algname);
+ if (ret < 0) {
+ dev_err(ctx->se->dev, "invalid algorithm\n");
+ return ret;
+ }
+
+ ctx->alg = ret;
+
+ return 0;
+}
+
+static int tegra_ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ switch (authsize) {
+ case 4:
+ case 6:
+ case 8:
+ case 10:
+ case 12:
+ case 14:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ctx->authsize = authsize;
+
+ return 0;
+}
+
+static int tegra_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
+
+ ret = crypto_gcm_check_authsize(authsize);
+ if (ret)
+ return ret;
+
+ ctx->authsize = authsize;
+
+ return 0;
+}
+
+static void tegra_aead_cra_exit(struct crypto_aead *tfm)
+{
+ struct tegra_aead_ctx *ctx = crypto_tfm_ctx(&tfm->base);
+
+ if (ctx->key_id)
+ tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
+}
+
+static int tegra_aead_crypt(struct aead_request *req, bool encrypt)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
+
+ rctx->encrypt = encrypt;
+
+ return crypto_transfer_aead_request_to_engine(ctx->se->engine, req);
+}
+
+static int tegra_aead_encrypt(struct aead_request *req)
+{
+ return tegra_aead_crypt(req, true);
+}
+
+static int tegra_aead_decrypt(struct aead_request *req)
+{
+ return tegra_aead_crypt(req, false);
+}
+
+static int tegra_aead_setkey(struct crypto_aead *tfm,
+ const u8 *key, u32 keylen)
+{
+ struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ if (aes_check_keylen(keylen)) {
+ dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
+ return -EINVAL;
+ }
+
+ return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+}
+
+static unsigned int tegra_cmac_prep_cmd(struct tegra_cmac_ctx *ctx,
+ struct tegra_cmac_reqctx *rctx)
+{
+ unsigned int data_count, res_bits = 0, i = 0, j;
+ struct tegra_se *se = ctx->se;
+ u32 *cpuvaddr = se->cmdbuf->addr, op;
+
+ data_count = (rctx->datbuf.size / AES_BLOCK_SIZE);
+
+ op = SE_AES_OP_WRSTALL | SE_AES_OP_START | SE_AES_OP_LASTBUF;
+
+ if (!(rctx->task & SHA_UPDATE)) {
+ op |= SE_AES_OP_FINAL;
+ res_bits = (rctx->datbuf.size % AES_BLOCK_SIZE) * 8;
+ }
+
+ if (!res_bits && data_count)
+ data_count--;
+
+ if (rctx->task & SHA_FIRST) {
+ rctx->task &= ~SHA_FIRST;
+
+ cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
+ /* Load 0 IV */
+ for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
+ cpuvaddr[i++] = 0;
+ }
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
+ cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
+ SE_LAST_BLOCK_RES_BITS(res_bits);
+
+ cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
+ cpuvaddr[i++] = rctx->config;
+ cpuvaddr[i++] = rctx->crypto_config;
+
+ /* Source Address */
+ cpuvaddr[i++] = lower_32_bits(rctx->datbuf.addr);
+ cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) |
+ SE_ADDR_HI_SZ(rctx->datbuf.size);
+ cpuvaddr[i++] = 0;
+ cpuvaddr[i++] = SE_ADDR_HI_SZ(AES_BLOCK_SIZE);
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
+ cpuvaddr[i++] = op;
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
+ cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
+ host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
+
+ return i;
+}
+
+static void tegra_cmac_copy_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
+{
+ int i;
+
+ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
+ rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4));
+}
+
+static void tegra_cmac_paste_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
+{
+ int i;
+
+ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
+ writel(rctx->result[i],
+ se->base + se->hw->regs->result + (i * 4));
+}
+
+static int tegra_cmac_do_update(struct ahash_request *req)
+{
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se *se = ctx->se;
+ unsigned int nblks, nresidue, cmdlen;
+ int ret;
+
+ if (!req->nbytes)
+ return 0;
+
+ nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
+ nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
+
+ /*
+ * Reserve the last block as residue during final() to process.
+ */
+ if (!nresidue && nblks) {
+ nresidue += rctx->blk_size;
+ nblks--;
+ }
+
+ rctx->src_sg = req->src;
+ rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
+ rctx->total_len += rctx->datbuf.size;
+ rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
+ rctx->crypto_config = SE_AES_KEY_INDEX(ctx->key_id);
+
+ /*
+ * Keep one block and residue bytes in residue and
+ * return. The bytes will be processed in final()
+ */
+ if (nblks < 1) {
+ scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size,
+ rctx->src_sg, 0, req->nbytes, 0);
+
+ rctx->residue.size += req->nbytes;
+ return 0;
+ }
+
+ /* Copy the previous residue first */
+ if (rctx->residue.size)
+ memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
+
+ scatterwalk_map_and_copy(rctx->datbuf.buf + rctx->residue.size,
+ rctx->src_sg, 0, req->nbytes - nresidue, 0);
+
+ scatterwalk_map_and_copy(rctx->residue.buf, rctx->src_sg,
+ req->nbytes - nresidue, nresidue, 0);
+
+ /* Update residue value with the residue after current block */
+ rctx->residue.size = nresidue;
+
+ /*
+ * If this is not the first 'update' call, paste the previous copied
+ * intermediate results to the registers so that it gets picked up.
+ * This is to support the import/export functionality.
+ */
+ if (!(rctx->task & SHA_FIRST))
+ tegra_cmac_paste_result(ctx->se, rctx);
+
+ cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
+
+ ret = tegra_se_host1x_submit(se, cmdlen);
+ /*
+ * If this is not the final update, copy the intermediate results
+ * from the registers so that it can be used in the next 'update'
+ * call. This is to support the import/export functionality.
+ */
+ if (!(rctx->task & SHA_FINAL))
+ tegra_cmac_copy_result(ctx->se, rctx);
+
+ return ret;
+}
+
+static int tegra_cmac_do_final(struct ahash_request *req)
+{
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se *se = ctx->se;
+ u32 *result = (u32 *)req->result;
+ int ret = 0, i, cmdlen;
+
+ if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) {
+ return crypto_shash_tfm_digest(ctx->fallback_tfm,
+ rctx->datbuf.buf, 0, req->result);
+ }
+
+ memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
+ rctx->datbuf.size = rctx->residue.size;
+ rctx->total_len += rctx->residue.size;
+ rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
+
+ /* Prepare command and submit */
+ cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
+ ret = tegra_se_host1x_submit(se, cmdlen);
+ if (ret)
+ goto out;
+
+ /* Read and clear Result register */
+ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
+ result[i] = readl(se->base + se->hw->regs->result + (i * 4));
+
+ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
+ writel(0, se->base + se->hw->regs->result + (i * 4));
+
+out:
+ dma_free_coherent(se->dev, SE_SHA_BUFLEN,
+ rctx->datbuf.buf, rctx->datbuf.addr);
+ dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2,
+ rctx->residue.buf, rctx->residue.addr);
+ return ret;
+}
+
+static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se *se = ctx->se;
+ int ret;
+
+ if (rctx->task & SHA_UPDATE) {
+ ret = tegra_cmac_do_update(req);
+ rctx->task &= ~SHA_UPDATE;
+ }
+
+ if (rctx->task & SHA_FINAL) {
+ ret = tegra_cmac_do_final(req);
+ rctx->task &= ~SHA_FINAL;
+ }
+
+ crypto_finalize_hash_request(se->engine, req, ret);
+
+ return 0;
+}
+
+static void tegra_cmac_init_fallback(struct crypto_ahash *tfm, struct tegra_cmac_ctx *ctx,
+ const char *algname)
+{
+ unsigned int statesize;
+
+ ctx->fallback_tfm = crypto_alloc_shash(algname, 0, CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(ctx->fallback_tfm)) {
+ dev_warn(ctx->se->dev, "failed to allocate fallback for %s\n", algname);
+ ctx->fallback_tfm = NULL;
+ return;
+ }
+
+ statesize = crypto_shash_statesize(ctx->fallback_tfm);
+
+ if (statesize > sizeof(struct tegra_cmac_reqctx))
+ crypto_ahash_set_statesize(tfm, statesize);
+}
+
+static int tegra_cmac_cra_init(struct crypto_tfm *tfm)
+{
+ struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_ahash *ahash_tfm = __crypto_ahash_cast(tfm);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+ struct tegra_se_alg *se_alg;
+ const char *algname;
+ int ret;
+
+ algname = crypto_tfm_alg_name(tfm);
+ se_alg = container_of(alg, struct tegra_se_alg, alg.ahash.base);
+
+ crypto_ahash_set_reqsize(ahash_tfm, sizeof(struct tegra_cmac_reqctx));
+
+ ctx->se = se_alg->se_dev;
+ ctx->key_id = 0;
+
+ ret = se_algname_to_algid(algname);
+ if (ret < 0) {
+ dev_err(ctx->se->dev, "invalid algorithm\n");
+ return ret;
+ }
+
+ ctx->alg = ret;
+
+ tegra_cmac_init_fallback(ahash_tfm, ctx, algname);
+
+ return 0;
+}
+
+static void tegra_cmac_cra_exit(struct crypto_tfm *tfm)
+{
+ struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->fallback_tfm)
+ crypto_free_shash(ctx->fallback_tfm);
+
+ tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
+}
+
+static int tegra_cmac_init(struct ahash_request *req)
+{
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se *se = ctx->se;
+ int i;
+
+ rctx->total_len = 0;
+ rctx->datbuf.size = 0;
+ rctx->residue.size = 0;
+ rctx->task = SHA_FIRST;
+ rctx->blk_size = crypto_ahash_blocksize(tfm);
+
+ rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
+ &rctx->residue.addr, GFP_KERNEL);
+ if (!rctx->residue.buf)
+ goto resbuf_fail;
+
+ rctx->residue.size = 0;
+
+ rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN,
+ &rctx->datbuf.addr, GFP_KERNEL);
+ if (!rctx->datbuf.buf)
+ goto datbuf_fail;
+
+ rctx->datbuf.size = 0;
+
+ /* Clear any previous result */
+ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
+ writel(0, se->base + se->hw->regs->result + (i * 4));
+
+ return 0;
+
+datbuf_fail:
+ dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf,
+ rctx->residue.addr);
+resbuf_fail:
+ return -ENOMEM;
+}
+
+static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (aes_check_keylen(keylen)) {
+ dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
+ return -EINVAL;
+ }
+
+ if (ctx->fallback_tfm)
+ crypto_shash_setkey(ctx->fallback_tfm, key, keylen);
+
+ return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+}
+
+static int tegra_cmac_update(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+
+ rctx->task |= SHA_UPDATE;
+
+ return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
+}
+
+static int tegra_cmac_final(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+
+ rctx->task |= SHA_FINAL;
+
+ return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
+}
+
+static int tegra_cmac_finup(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+
+ rctx->task |= SHA_UPDATE | SHA_FINAL;
+
+ return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
+}
+
+static int tegra_cmac_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+
+ tegra_cmac_init(req);
+ rctx->task |= SHA_UPDATE | SHA_FINAL;
+
+ return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
+}
+
+static int tegra_cmac_export(struct ahash_request *req, void *out)
+{
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+
+ memcpy(out, rctx, sizeof(*rctx));
+
+ return 0;
+}
+
+static int tegra_cmac_import(struct ahash_request *req, const void *in)
+{
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+
+ memcpy(rctx, in, sizeof(*rctx));
+
+ return 0;
+}
+
+static struct tegra_se_alg tegra_aead_algs[] = {
+ {
+ .alg.aead.op.do_one_request = tegra_gcm_do_one_req,
+ .alg.aead.base = {
+ .init = tegra_aead_cra_init,
+ .exit = tegra_aead_cra_exit,
+ .setkey = tegra_aead_setkey,
+ .setauthsize = tegra_gcm_setauthsize,
+ .encrypt = tegra_aead_encrypt,
+ .decrypt = tegra_aead_decrypt,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .ivsize = GCM_AES_IV_SIZE,
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-tegra",
+ .cra_priority = 500,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct tegra_aead_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ }
+ }, {
+ .alg.aead.op.do_one_request = tegra_ccm_do_one_req,
+ .alg.aead.base = {
+ .init = tegra_aead_cra_init,
+ .exit = tegra_aead_cra_exit,
+ .setkey = tegra_aead_setkey,
+ .setauthsize = tegra_ccm_setauthsize,
+ .encrypt = tegra_aead_encrypt,
+ .decrypt = tegra_aead_decrypt,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ccm-aes-tegra",
+ .cra_priority = 500,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct tegra_aead_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ }
+ }
+};
+
+static struct tegra_se_alg tegra_cmac_algs[] = {
+ {
+ .alg.ahash.op.do_one_request = tegra_cmac_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_cmac_init,
+ .setkey = tegra_cmac_setkey,
+ .update = tegra_cmac_update,
+ .final = tegra_cmac_final,
+ .finup = tegra_cmac_finup,
+ .digest = tegra_cmac_digest,
+ .export = tegra_cmac_export,
+ .import = tegra_cmac_import,
+ .halg.digestsize = AES_BLOCK_SIZE,
+ .halg.statesize = sizeof(struct tegra_cmac_reqctx),
+ .halg.base = {
+ .cra_name = "cmac(aes)",
+ .cra_driver_name = "tegra-se-cmac",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_cmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_cmac_cra_init,
+ .cra_exit = tegra_cmac_cra_exit,
+ }
+ }
+ }
+};
+
+int tegra_init_aes(struct tegra_se *se)
+{
+ struct aead_engine_alg *aead_alg;
+ struct ahash_engine_alg *ahash_alg;
+ struct skcipher_engine_alg *sk_alg;
+ int i, ret;
+
+ se->manifest = tegra_aes_kac_manifest;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++) {
+ sk_alg = &tegra_aes_algs[i].alg.skcipher;
+ tegra_aes_algs[i].se_dev = se;
+
+ ret = crypto_engine_register_skcipher(sk_alg);
+ if (ret) {
+ dev_err(se->dev, "failed to register %s\n",
+ sk_alg->base.base.cra_name);
+ goto err_aes;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++) {
+ aead_alg = &tegra_aead_algs[i].alg.aead;
+ tegra_aead_algs[i].se_dev = se;
+
+ ret = crypto_engine_register_aead(aead_alg);
+ if (ret) {
+ dev_err(se->dev, "failed to register %s\n",
+ aead_alg->base.base.cra_name);
+ goto err_aead;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++) {
+ ahash_alg = &tegra_cmac_algs[i].alg.ahash;
+ tegra_cmac_algs[i].se_dev = se;
+
+ ret = crypto_engine_register_ahash(ahash_alg);
+ if (ret) {
+ dev_err(se->dev, "failed to register %s\n",
+ ahash_alg->base.halg.base.cra_name);
+ goto err_cmac;
+ }
+ }
+
+ return 0;
+
+err_cmac:
+ while (i--)
+ crypto_engine_unregister_ahash(&tegra_cmac_algs[i].alg.ahash);
+
+ i = ARRAY_SIZE(tegra_aead_algs);
+err_aead:
+ while (i--)
+ crypto_engine_unregister_aead(&tegra_aead_algs[i].alg.aead);
+
+ i = ARRAY_SIZE(tegra_aes_algs);
+err_aes:
+ while (i--)
+ crypto_engine_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher);
+
+ return ret;
+}
+
+void tegra_deinit_aes(struct tegra_se *se)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++)
+ crypto_engine_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher);
+
+ for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++)
+ crypto_engine_unregister_aead(&tegra_aead_algs[i].alg.aead);
+
+ for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++)
+ crypto_engine_unregister_ahash(&tegra_cmac_algs[i].alg.ahash);
+}
diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c
new file mode 100644
index 000000000000..4d4bd727f498
--- /dev/null
+++ b/drivers/crypto/tegra/tegra-se-hash.c
@@ -0,0 +1,1060 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+/*
+ * Crypto driver to handle HASH algorithms using NVIDIA Security Engine.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <crypto/aes.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <crypto/sha3.h>
+#include <crypto/internal/des.h>
+#include <crypto/engine.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/hash.h>
+
+#include "tegra-se.h"
+
+struct tegra_sha_ctx {
+ struct tegra_se *se;
+ unsigned int alg;
+ bool fallback;
+ u32 key_id;
+ struct crypto_ahash *fallback_tfm;
+};
+
+struct tegra_sha_reqctx {
+ struct scatterlist *src_sg;
+ struct tegra_se_datbuf datbuf;
+ struct tegra_se_datbuf residue;
+ struct tegra_se_datbuf digest;
+ unsigned int alg;
+ unsigned int config;
+ unsigned int total_len;
+ unsigned int blk_size;
+ unsigned int task;
+ u32 key_id;
+ u32 result[HASH_RESULT_REG_COUNT];
+ struct ahash_request fallback_req;
+};
+
+static int tegra_sha_get_config(u32 alg)
+{
+ int cfg = 0;
+
+ switch (alg) {
+ case SE_ALG_SHA1:
+ cfg |= SE_SHA_ENC_ALG_SHA;
+ cfg |= SE_SHA_ENC_MODE_SHA1;
+ break;
+
+ case SE_ALG_HMAC_SHA224:
+ cfg |= SE_SHA_ENC_ALG_HMAC;
+ fallthrough;
+ case SE_ALG_SHA224:
+ cfg |= SE_SHA_ENC_ALG_SHA;
+ cfg |= SE_SHA_ENC_MODE_SHA224;
+ break;
+
+ case SE_ALG_HMAC_SHA256:
+ cfg |= SE_SHA_ENC_ALG_HMAC;
+ fallthrough;
+ case SE_ALG_SHA256:
+ cfg |= SE_SHA_ENC_ALG_SHA;
+ cfg |= SE_SHA_ENC_MODE_SHA256;
+ break;
+
+ case SE_ALG_HMAC_SHA384:
+ cfg |= SE_SHA_ENC_ALG_HMAC;
+ fallthrough;
+ case SE_ALG_SHA384:
+ cfg |= SE_SHA_ENC_ALG_SHA;
+ cfg |= SE_SHA_ENC_MODE_SHA384;
+ break;
+
+ case SE_ALG_HMAC_SHA512:
+ cfg |= SE_SHA_ENC_ALG_HMAC;
+ fallthrough;
+ case SE_ALG_SHA512:
+ cfg |= SE_SHA_ENC_ALG_SHA;
+ cfg |= SE_SHA_ENC_MODE_SHA512;
+ break;
+
+ case SE_ALG_SHA3_224:
+ cfg |= SE_SHA_ENC_ALG_SHA;
+ cfg |= SE_SHA_ENC_MODE_SHA3_224;
+ break;
+ case SE_ALG_SHA3_256:
+ cfg |= SE_SHA_ENC_ALG_SHA;
+ cfg |= SE_SHA_ENC_MODE_SHA3_256;
+ break;
+ case SE_ALG_SHA3_384:
+ cfg |= SE_SHA_ENC_ALG_SHA;
+ cfg |= SE_SHA_ENC_MODE_SHA3_384;
+ break;
+ case SE_ALG_SHA3_512:
+ cfg |= SE_SHA_ENC_ALG_SHA;
+ cfg |= SE_SHA_ENC_MODE_SHA3_512;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return cfg;
+}
+
+static int tegra_sha_fallback_init(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_init(&rctx->fallback_req);
+}
+
+static int tegra_sha_fallback_update(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+
+ return crypto_ahash_update(&rctx->fallback_req);
+}
+
+static int tegra_sha_fallback_final(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.result = req->result;
+
+ return crypto_ahash_final(&rctx->fallback_req);
+}
+
+static int tegra_sha_fallback_finup(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+ rctx->fallback_req.result = req->result;
+
+ return crypto_ahash_finup(&rctx->fallback_req);
+}
+
+static int tegra_sha_fallback_digest(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+ rctx->fallback_req.result = req->result;
+
+ return crypto_ahash_digest(&rctx->fallback_req);
+}
+
+static int tegra_sha_fallback_import(struct ahash_request *req, const void *in)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_import(&rctx->fallback_req, in);
+}
+
+static int tegra_sha_fallback_export(struct ahash_request *req, void *out)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_export(&rctx->fallback_req, out);
+}
+
+static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr,
+ struct tegra_sha_reqctx *rctx)
+{
+ u64 msg_len, msg_left;
+ int i = 0;
+
+ msg_len = rctx->total_len * 8;
+ msg_left = rctx->datbuf.size * 8;
+
+ /*
+ * If IN_ADDR_HI_0.SZ > SHA_MSG_LEFT_[0-3] to the HASH engine,
+ * HW treats it as the last buffer and process the data.
+ * Therefore, add an extra byte to msg_left if it is not the
+ * last buffer.
+ */
+ if (rctx->task & SHA_UPDATE) {
+ msg_left += 8;
+ msg_len += 8;
+ }
+
+ cpuvaddr[i++] = host1x_opcode_setpayload(8);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_MSG_LENGTH);
+ cpuvaddr[i++] = lower_32_bits(msg_len);
+ cpuvaddr[i++] = upper_32_bits(msg_len);
+ cpuvaddr[i++] = 0;
+ cpuvaddr[i++] = 0;
+ cpuvaddr[i++] = lower_32_bits(msg_left);
+ cpuvaddr[i++] = upper_32_bits(msg_left);
+ cpuvaddr[i++] = 0;
+ cpuvaddr[i++] = 0;
+ cpuvaddr[i++] = host1x_opcode_setpayload(6);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_CFG);
+ cpuvaddr[i++] = rctx->config;
+
+ if (rctx->task & SHA_FIRST) {
+ cpuvaddr[i++] = SE_SHA_TASK_HASH_INIT;
+ rctx->task &= ~SHA_FIRST;
+ } else {
+ cpuvaddr[i++] = 0;
+ }
+
+ cpuvaddr[i++] = rctx->datbuf.addr;
+ cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) |
+ SE_ADDR_HI_SZ(rctx->datbuf.size));
+ cpuvaddr[i++] = rctx->digest.addr;
+ cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->digest.addr)) |
+ SE_ADDR_HI_SZ(rctx->digest.size));
+ if (rctx->key_id) {
+ cpuvaddr[i++] = host1x_opcode_setpayload(1);
+ cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_CRYPTO_CFG);
+ cpuvaddr[i++] = SE_AES_KEY_INDEX(rctx->key_id);
+ }
+
+ cpuvaddr[i++] = host1x_opcode_setpayload(1);
+ cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_OPERATION);
+ cpuvaddr[i++] = SE_SHA_OP_WRSTALL |
+ SE_SHA_OP_START |
+ SE_SHA_OP_LASTBUF;
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
+ cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
+ host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
+
+ dev_dbg(se->dev, "msg len %llu msg left %llu cfg %#x",
+ msg_len, msg_left, rctx->config);
+
+ return i;
+}
+
+static void tegra_sha_copy_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx)
+{
+ int i;
+
+ for (i = 0; i < HASH_RESULT_REG_COUNT; i++)
+ rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4));
+}
+
+static void tegra_sha_paste_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx)
+{
+ int i;
+
+ for (i = 0; i < HASH_RESULT_REG_COUNT; i++)
+ writel(rctx->result[i],
+ se->base + se->hw->regs->result + (i * 4));
+}
+
+static int tegra_sha_do_update(struct ahash_request *req)
+{
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ unsigned int nblks, nresidue, size, ret;
+ u32 *cpuvaddr = ctx->se->cmdbuf->addr;
+
+ nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
+ nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
+
+ /*
+ * If nbytes is a multiple of block size and there is no residue,
+ * then reserve the last block as residue during final() to process.
+ */
+ if (!nresidue && nblks) {
+ nresidue = rctx->blk_size;
+ nblks--;
+ }
+
+ rctx->src_sg = req->src;
+ rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
+ rctx->total_len += rctx->datbuf.size;
+
+ /*
+ * If nbytes are less than a block size, copy it residue and
+ * return. The bytes will be processed in final()
+ */
+ if (nblks < 1) {
+ scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size,
+ rctx->src_sg, 0, req->nbytes, 0);
+
+ rctx->residue.size += req->nbytes;
+ return 0;
+ }
+
+ /* Copy the previous residue first */
+ if (rctx->residue.size)
+ memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
+
+ scatterwalk_map_and_copy(rctx->datbuf.buf + rctx->residue.size,
+ rctx->src_sg, 0, req->nbytes - nresidue, 0);
+
+ scatterwalk_map_and_copy(rctx->residue.buf, rctx->src_sg,
+ req->nbytes - nresidue, nresidue, 0);
+
+ /* Update residue value with the residue after current block */
+ rctx->residue.size = nresidue;
+
+ rctx->config = tegra_sha_get_config(rctx->alg) |
+ SE_SHA_DST_HASH_REG;
+
+ /*
+ * If this is not the first 'update' call, paste the previous copied
+ * intermediate results to the registers so that it gets picked up.
+ * This is to support the import/export functionality.
+ */
+ if (!(rctx->task & SHA_FIRST))
+ tegra_sha_paste_hash_result(ctx->se, rctx);
+
+ size = tegra_sha_prep_cmd(ctx->se, cpuvaddr, rctx);
+
+ ret = tegra_se_host1x_submit(ctx->se, size);
+
+ /*
+ * If this is not the final update, copy the intermediate results
+ * from the registers so that it can be used in the next 'update'
+ * call. This is to support the import/export functionality.
+ */
+ if (!(rctx->task & SHA_FINAL))
+ tegra_sha_copy_hash_result(ctx->se, rctx);
+
+ return ret;
+}
+
+static int tegra_sha_do_final(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se *se = ctx->se;
+ u32 *cpuvaddr = se->cmdbuf->addr;
+ int size, ret = 0;
+
+ memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
+ rctx->datbuf.size = rctx->residue.size;
+ rctx->total_len += rctx->residue.size;
+
+ rctx->config = tegra_sha_get_config(rctx->alg) |
+ SE_SHA_DST_MEMORY;
+
+ size = tegra_sha_prep_cmd(se, cpuvaddr, rctx);
+
+ ret = tegra_se_host1x_submit(se, size);
+ if (ret)
+ goto out;
+
+ /* Copy result */
+ memcpy(req->result, rctx->digest.buf, rctx->digest.size);
+
+out:
+ dma_free_coherent(se->dev, SE_SHA_BUFLEN,
+ rctx->datbuf.buf, rctx->datbuf.addr);
+ dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm),
+ rctx->residue.buf, rctx->residue.addr);
+ dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
+ rctx->digest.addr);
+ return ret;
+}
+
+static int tegra_sha_do_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se *se = ctx->se;
+ int ret = 0;
+
+ if (rctx->task & SHA_UPDATE) {
+ ret = tegra_sha_do_update(req);
+ rctx->task &= ~SHA_UPDATE;
+ }
+
+ if (rctx->task & SHA_FINAL) {
+ ret = tegra_sha_do_final(req);
+ rctx->task &= ~SHA_FINAL;
+ }
+
+ crypto_finalize_hash_request(se->engine, req, ret);
+
+ return 0;
+}
+
+static void tegra_sha_init_fallback(struct crypto_ahash *tfm, struct tegra_sha_ctx *ctx,
+ const char *algname)
+{
+ unsigned int statesize;
+
+ ctx->fallback_tfm = crypto_alloc_ahash(algname, 0, CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(ctx->fallback_tfm)) {
+ dev_warn(ctx->se->dev,
+ "failed to allocate fallback for %s\n", algname);
+ ctx->fallback_tfm = NULL;
+ return;
+ }
+
+ statesize = crypto_ahash_statesize(ctx->fallback_tfm);
+
+ if (statesize > sizeof(struct tegra_sha_reqctx))
+ crypto_ahash_set_statesize(tfm, statesize);
+
+ /* Update reqsize if fallback is added */
+ crypto_ahash_set_reqsize(tfm,
+ sizeof(struct tegra_sha_reqctx) +
+ crypto_ahash_reqsize(ctx->fallback_tfm));
+}
+
+static int tegra_sha_cra_init(struct crypto_tfm *tfm)
+{
+ struct tegra_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_ahash *ahash_tfm = __crypto_ahash_cast(tfm);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+ struct tegra_se_alg *se_alg;
+ const char *algname;
+ int ret;
+
+ algname = crypto_tfm_alg_name(tfm);
+ se_alg = container_of(alg, struct tegra_se_alg, alg.ahash.base);
+
+ crypto_ahash_set_reqsize(ahash_tfm, sizeof(struct tegra_sha_reqctx));
+
+ ctx->se = se_alg->se_dev;
+ ctx->fallback = false;
+ ctx->key_id = 0;
+
+ ret = se_algname_to_algid(algname);
+ if (ret < 0) {
+ dev_err(ctx->se->dev, "invalid algorithm\n");
+ return ret;
+ }
+
+ if (se_alg->alg_base)
+ tegra_sha_init_fallback(ahash_tfm, ctx, algname);
+
+ ctx->alg = ret;
+
+ return 0;
+}
+
+static void tegra_sha_cra_exit(struct crypto_tfm *tfm)
+{
+ struct tegra_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->fallback_tfm)
+ crypto_free_ahash(ctx->fallback_tfm);
+
+ tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
+}
+
+static int tegra_sha_init(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se *se = ctx->se;
+
+ if (ctx->fallback)
+ return tegra_sha_fallback_init(req);
+
+ rctx->total_len = 0;
+ rctx->datbuf.size = 0;
+ rctx->residue.size = 0;
+ rctx->key_id = ctx->key_id;
+ rctx->task = SHA_FIRST;
+ rctx->alg = ctx->alg;
+ rctx->blk_size = crypto_ahash_blocksize(tfm);
+ rctx->digest.size = crypto_ahash_digestsize(tfm);
+
+ rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size,
+ &rctx->digest.addr, GFP_KERNEL);
+ if (!rctx->digest.buf)
+ goto digbuf_fail;
+
+ rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size,
+ &rctx->residue.addr, GFP_KERNEL);
+ if (!rctx->residue.buf)
+ goto resbuf_fail;
+
+ rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN,
+ &rctx->datbuf.addr, GFP_KERNEL);
+ if (!rctx->datbuf.buf)
+ goto datbuf_fail;
+
+ return 0;
+
+datbuf_fail:
+ dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf,
+ rctx->residue.addr);
+resbuf_fail:
+ dma_free_coherent(se->dev, SE_SHA_BUFLEN, rctx->datbuf.buf,
+ rctx->datbuf.addr);
+digbuf_fail:
+ return -ENOMEM;
+}
+
+static int tegra_hmac_fallback_setkey(struct tegra_sha_ctx *ctx, const u8 *key,
+ unsigned int keylen)
+{
+ if (!ctx->fallback_tfm) {
+ dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
+ return -EINVAL;
+ }
+
+ ctx->fallback = true;
+ return crypto_ahash_setkey(ctx->fallback_tfm, key, keylen);
+}
+
+static int tegra_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (aes_check_keylen(keylen))
+ return tegra_hmac_fallback_setkey(ctx, key, keylen);
+
+ ctx->fallback = false;
+
+ return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+}
+
+static int tegra_sha_update(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (ctx->fallback)
+ return tegra_sha_fallback_update(req);
+
+ rctx->task |= SHA_UPDATE;
+
+ return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
+}
+
+static int tegra_sha_final(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (ctx->fallback)
+ return tegra_sha_fallback_final(req);
+
+ rctx->task |= SHA_FINAL;
+
+ return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
+}
+
+static int tegra_sha_finup(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (ctx->fallback)
+ return tegra_sha_fallback_finup(req);
+
+ rctx->task |= SHA_UPDATE | SHA_FINAL;
+
+ return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
+}
+
+static int tegra_sha_digest(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (ctx->fallback)
+ return tegra_sha_fallback_digest(req);
+
+ tegra_sha_init(req);
+ rctx->task |= SHA_UPDATE | SHA_FINAL;
+
+ return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
+}
+
+static int tegra_sha_export(struct ahash_request *req, void *out)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (ctx->fallback)
+ return tegra_sha_fallback_export(req, out);
+
+ memcpy(out, rctx, sizeof(*rctx));
+
+ return 0;
+}
+
+static int tegra_sha_import(struct ahash_request *req, const void *in)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (ctx->fallback)
+ return tegra_sha_fallback_import(req, in);
+
+ memcpy(rctx, in, sizeof(*rctx));
+
+ return 0;
+}
+
+static struct tegra_se_alg tegra_hash_algs[] = {
+ {
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "tegra-se-sha1",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }, {
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .halg.digestsize = SHA224_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "tegra-se-sha224",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }, {
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "tegra-se-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }, {
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "tegra-se-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }, {
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "tegra-se-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }, {
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .halg.digestsize = SHA3_224_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "sha3-224",
+ .cra_driver_name = "tegra-se-sha3-224",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA3_224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }, {
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .halg.digestsize = SHA3_256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "sha3-256",
+ .cra_driver_name = "tegra-se-sha3-256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA3_256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }, {
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .halg.digestsize = SHA3_384_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "sha3-384",
+ .cra_driver_name = "tegra-se-sha3-384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }, {
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .halg.digestsize = SHA3_512_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "sha3-512",
+ .cra_driver_name = "tegra-se-sha3-512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA3_512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }, {
+ .alg_base = "sha224",
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .setkey = tegra_hmac_setkey,
+ .halg.digestsize = SHA224_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "tegra-se-hmac-sha224",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }, {
+ .alg_base = "sha256",
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .setkey = tegra_hmac_setkey,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "tegra-se-hmac-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }, {
+ .alg_base = "sha384",
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .setkey = tegra_hmac_setkey,
+ .halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "tegra-se-hmac-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }, {
+ .alg_base = "sha512",
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .setkey = tegra_hmac_setkey,
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "tegra-se-hmac-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }
+};
+
+static int tegra_hash_kac_manifest(u32 user, u32 alg, u32 keylen)
+{
+ int manifest;
+
+ manifest = SE_KAC_USER_NS;
+
+ switch (alg) {
+ case SE_ALG_HMAC_SHA224:
+ case SE_ALG_HMAC_SHA256:
+ case SE_ALG_HMAC_SHA384:
+ case SE_ALG_HMAC_SHA512:
+ manifest |= SE_KAC_HMAC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ manifest |= SE_KAC_SIZE_128;
+ break;
+ case AES_KEYSIZE_192:
+ manifest |= SE_KAC_SIZE_192;
+ break;
+ case AES_KEYSIZE_256:
+ default:
+ manifest |= SE_KAC_SIZE_256;
+ break;
+ }
+
+ return manifest;
+}
+
+int tegra_init_hash(struct tegra_se *se)
+{
+ struct ahash_engine_alg *alg;
+ int i, ret;
+
+ se->manifest = tegra_hash_kac_manifest;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_hash_algs); i++) {
+ tegra_hash_algs[i].se_dev = se;
+ alg = &tegra_hash_algs[i].alg.ahash;
+
+ ret = crypto_engine_register_ahash(alg);
+ if (ret) {
+ dev_err(se->dev, "failed to register %s\n",
+ alg->base.halg.base.cra_name);
+ goto sha_err;
+ }
+ }
+
+ return 0;
+
+sha_err:
+ while (i--)
+ crypto_engine_unregister_ahash(&tegra_hash_algs[i].alg.ahash);
+
+ return ret;
+}
+
+void tegra_deinit_hash(struct tegra_se *se)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_hash_algs); i++)
+ crypto_engine_unregister_ahash(&tegra_hash_algs[i].alg.ahash);
+}
diff --git a/drivers/crypto/tegra/tegra-se-key.c b/drivers/crypto/tegra/tegra-se-key.c
new file mode 100644
index 000000000000..ac14678dbd30
--- /dev/null
+++ b/drivers/crypto/tegra/tegra-se-key.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+/*
+ * Crypto driver file to manage keys of NVIDIA Security Engine.
+ */
+
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <crypto/aes.h>
+
+#include "tegra-se.h"
+
+#define SE_KEY_FULL_MASK GENMASK(SE_MAX_KEYSLOT, 0)
+
+/* Reserve keyslot 0, 14, 15 */
+#define SE_KEY_RSVD_MASK (BIT(0) | BIT(14) | BIT(15))
+#define SE_KEY_VALID_MASK (SE_KEY_FULL_MASK & ~SE_KEY_RSVD_MASK)
+
+/* Mutex lock to guard keyslots */
+static DEFINE_MUTEX(kslt_lock);
+
+/* Keyslot bitmask (0 = available, 1 = in use/not available) */
+static u16 tegra_se_keyslots = SE_KEY_RSVD_MASK;
+
+static u16 tegra_keyslot_alloc(void)
+{
+ u16 keyid;
+
+ mutex_lock(&kslt_lock);
+ /* Check if all key slots are full */
+ if (tegra_se_keyslots == GENMASK(SE_MAX_KEYSLOT, 0)) {
+ mutex_unlock(&kslt_lock);
+ return 0;
+ }
+
+ keyid = ffz(tegra_se_keyslots);
+ tegra_se_keyslots |= BIT(keyid);
+
+ mutex_unlock(&kslt_lock);
+
+ return keyid;
+}
+
+static void tegra_keyslot_free(u16 slot)
+{
+ mutex_lock(&kslt_lock);
+ tegra_se_keyslots &= ~(BIT(slot));
+ mutex_unlock(&kslt_lock);
+}
+
+static unsigned int tegra_key_prep_ins_cmd(struct tegra_se *se, u32 *cpuvaddr,
+ const u32 *key, u32 keylen, u16 slot, u32 alg)
+{
+ int i = 0, j;
+
+ cpuvaddr[i++] = host1x_opcode_setpayload(1);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->op);
+ cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_DUMMY;
+
+ cpuvaddr[i++] = host1x_opcode_setpayload(1);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->manifest);
+ cpuvaddr[i++] = se->manifest(se->owner, alg, keylen);
+ cpuvaddr[i++] = host1x_opcode_setpayload(1);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->key_dst);
+
+ cpuvaddr[i++] = SE_AES_KEY_DST_INDEX(slot);
+
+ for (j = 0; j < keylen / 4; j++) {
+ /* Set key address */
+ cpuvaddr[i++] = host1x_opcode_setpayload(1);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->key_addr);
+ cpuvaddr[i++] = j;
+
+ /* Set key data */
+ cpuvaddr[i++] = host1x_opcode_setpayload(1);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->key_data);
+ cpuvaddr[i++] = key[j];
+ }
+
+ cpuvaddr[i++] = host1x_opcode_setpayload(1);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->config);
+ cpuvaddr[i++] = SE_CFG_INS;
+
+ cpuvaddr[i++] = host1x_opcode_setpayload(1);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->op);
+ cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_START |
+ SE_AES_OP_LASTBUF;
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
+ cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
+ host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
+
+ dev_dbg(se->dev, "key-slot %u key-manifest %#x\n",
+ slot, se->manifest(se->owner, alg, keylen));
+
+ return i;
+}
+
+static bool tegra_key_in_kslt(u32 keyid)
+{
+ bool ret;
+
+ if (keyid > SE_MAX_KEYSLOT)
+ return false;
+
+ mutex_lock(&kslt_lock);
+ ret = ((BIT(keyid) & SE_KEY_VALID_MASK) &&
+ (BIT(keyid) & tegra_se_keyslots));
+ mutex_unlock(&kslt_lock);
+
+ return ret;
+}
+
+static int tegra_key_insert(struct tegra_se *se, const u8 *key,
+ u32 keylen, u16 slot, u32 alg)
+{
+ const u32 *keyval = (u32 *)key;
+ u32 *addr = se->cmdbuf->addr, size;
+
+ size = tegra_key_prep_ins_cmd(se, addr, keyval, keylen, slot, alg);
+
+ return tegra_se_host1x_submit(se, size);
+}
+
+void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg)
+{
+ u8 zkey[AES_MAX_KEY_SIZE] = {0};
+
+ if (!keyid)
+ return;
+
+ /* Overwrite the key with 0s */
+ tegra_key_insert(se, zkey, AES_MAX_KEY_SIZE, keyid, alg);
+
+ tegra_keyslot_free(keyid);
+}
+
+int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u32 *keyid)
+{
+ int ret;
+
+ /* Use the existing slot if it is already allocated */
+ if (!tegra_key_in_kslt(*keyid)) {
+ *keyid = tegra_keyslot_alloc();
+ if (!(*keyid)) {
+ dev_err(se->dev, "failed to allocate key slot\n");
+ return -ENOMEM;
+ }
+ }
+
+ ret = tegra_key_insert(se, key, keylen, *keyid, alg);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/crypto/tegra/tegra-se-main.c b/drivers/crypto/tegra/tegra-se-main.c
new file mode 100644
index 000000000000..9955874b3dc3
--- /dev/null
+++ b/drivers/crypto/tegra/tegra-se-main.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+/*
+ * Crypto driver for NVIDIA Security Engine in Tegra Chips
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
+
+#include <crypto/engine.h>
+
+#include "tegra-se.h"
+
+static struct host1x_bo *tegra_se_cmdbuf_get(struct host1x_bo *host_bo)
+{
+ struct tegra_se_cmdbuf *cmdbuf = container_of(host_bo, struct tegra_se_cmdbuf, bo);
+
+ kref_get(&cmdbuf->ref);
+
+ return host_bo;
+}
+
+static void tegra_se_cmdbuf_release(struct kref *ref)
+{
+ struct tegra_se_cmdbuf *cmdbuf = container_of(ref, struct tegra_se_cmdbuf, ref);
+
+ dma_free_attrs(cmdbuf->dev, cmdbuf->size, cmdbuf->addr,
+ cmdbuf->iova, 0);
+
+ kfree(cmdbuf);
+}
+
+static void tegra_se_cmdbuf_put(struct host1x_bo *host_bo)
+{
+ struct tegra_se_cmdbuf *cmdbuf = container_of(host_bo, struct tegra_se_cmdbuf, bo);
+
+ kref_put(&cmdbuf->ref, tegra_se_cmdbuf_release);
+}
+
+static struct host1x_bo_mapping *
+tegra_se_cmdbuf_pin(struct device *dev, struct host1x_bo *bo, enum dma_data_direction direction)
+{
+ struct tegra_se_cmdbuf *cmdbuf = container_of(bo, struct tegra_se_cmdbuf, bo);
+ struct host1x_bo_mapping *map;
+ int err;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&map->ref);
+ map->bo = host1x_bo_get(bo);
+ map->direction = direction;
+ map->dev = dev;
+
+ map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
+ if (!map->sgt) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ err = dma_get_sgtable(dev, map->sgt, cmdbuf->addr,
+ cmdbuf->iova, cmdbuf->words * 4);
+ if (err)
+ goto free_sgt;
+
+ err = dma_map_sgtable(dev, map->sgt, direction, 0);
+ if (err)
+ goto free_sgt;
+
+ map->phys = sg_dma_address(map->sgt->sgl);
+ map->size = cmdbuf->words * 4;
+ map->chunks = err;
+
+ return map;
+
+free_sgt:
+ sg_free_table(map->sgt);
+ kfree(map->sgt);
+free:
+ kfree(map);
+ return ERR_PTR(err);
+}
+
+static void tegra_se_cmdbuf_unpin(struct host1x_bo_mapping *map)
+{
+ if (!map)
+ return;
+
+ dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
+ sg_free_table(map->sgt);
+ kfree(map->sgt);
+ host1x_bo_put(map->bo);
+
+ kfree(map);
+}
+
+static void *tegra_se_cmdbuf_mmap(struct host1x_bo *host_bo)
+{
+ struct tegra_se_cmdbuf *cmdbuf = container_of(host_bo, struct tegra_se_cmdbuf, bo);
+
+ return cmdbuf->addr;
+}
+
+static void tegra_se_cmdbuf_munmap(struct host1x_bo *host_bo, void *addr)
+{
+}
+
+static const struct host1x_bo_ops tegra_se_cmdbuf_ops = {
+ .get = tegra_se_cmdbuf_get,
+ .put = tegra_se_cmdbuf_put,
+ .pin = tegra_se_cmdbuf_pin,
+ .unpin = tegra_se_cmdbuf_unpin,
+ .mmap = tegra_se_cmdbuf_mmap,
+ .munmap = tegra_se_cmdbuf_munmap,
+};
+
+static struct tegra_se_cmdbuf *tegra_se_host1x_bo_alloc(struct tegra_se *se, ssize_t size)
+{
+ struct tegra_se_cmdbuf *cmdbuf;
+ struct device *dev = se->dev->parent;
+
+ cmdbuf = kzalloc(sizeof(*cmdbuf), GFP_KERNEL);
+ if (!cmdbuf)
+ return NULL;
+
+ cmdbuf->addr = dma_alloc_attrs(dev, size, &cmdbuf->iova,
+ GFP_KERNEL, 0);
+ if (!cmdbuf->addr)
+ return NULL;
+
+ cmdbuf->size = size;
+ cmdbuf->dev = dev;
+
+ host1x_bo_init(&cmdbuf->bo, &tegra_se_cmdbuf_ops);
+ kref_init(&cmdbuf->ref);
+
+ return cmdbuf;
+}
+
+int tegra_se_host1x_submit(struct tegra_se *se, u32 size)
+{
+ struct host1x_job *job;
+ int ret;
+
+ job = host1x_job_alloc(se->channel, 1, 0, true);
+ if (!job) {
+ dev_err(se->dev, "failed to allocate host1x job\n");
+ return -ENOMEM;
+ }
+
+ job->syncpt = host1x_syncpt_get(se->syncpt);
+ job->syncpt_incrs = 1;
+ job->client = &se->client;
+ job->class = se->client.class;
+ job->serialize = true;
+ job->engine_fallback_streamid = se->stream_id;
+ job->engine_streamid_offset = SE_STREAM_ID;
+
+ se->cmdbuf->words = size;
+
+ host1x_job_add_gather(job, &se->cmdbuf->bo, size, 0);
+
+ ret = host1x_job_pin(job, se->dev);
+ if (ret) {
+ dev_err(se->dev, "failed to pin host1x job\n");
+ goto job_put;
+ }
+
+ ret = host1x_job_submit(job);
+ if (ret) {
+ dev_err(se->dev, "failed to submit host1x job\n");
+ goto job_unpin;
+ }
+
+ ret = host1x_syncpt_wait(job->syncpt, job->syncpt_end,
+ MAX_SCHEDULE_TIMEOUT, NULL);
+ if (ret) {
+ dev_err(se->dev, "host1x job timed out\n");
+ return ret;
+ }
+
+ host1x_job_put(job);
+ return 0;
+
+job_unpin:
+ host1x_job_unpin(job);
+job_put:
+ host1x_job_put(job);
+
+ return ret;
+}
+
+static int tegra_se_client_init(struct host1x_client *client)
+{
+ struct tegra_se *se = container_of(client, struct tegra_se, client);
+ int ret;
+
+ se->channel = host1x_channel_request(&se->client);
+ if (!se->channel) {
+ dev_err(se->dev, "host1x channel map failed\n");
+ return -ENODEV;
+ }
+
+ se->syncpt = host1x_syncpt_request(&se->client, 0);
+ if (!se->syncpt) {
+ dev_err(se->dev, "host1x syncpt allocation failed\n");
+ ret = -EINVAL;
+ goto channel_put;
+ }
+
+ se->syncpt_id = host1x_syncpt_id(se->syncpt);
+
+ se->cmdbuf = tegra_se_host1x_bo_alloc(se, SZ_4K);
+ if (!se->cmdbuf) {
+ ret = -ENOMEM;
+ goto syncpt_put;
+ }
+
+ ret = se->hw->init_alg(se);
+ if (ret) {
+ dev_err(se->dev, "failed to register algorithms\n");
+ goto cmdbuf_put;
+ }
+
+ return 0;
+
+cmdbuf_put:
+ tegra_se_cmdbuf_put(&se->cmdbuf->bo);
+syncpt_put:
+ host1x_syncpt_put(se->syncpt);
+channel_put:
+ host1x_channel_put(se->channel);
+
+ return ret;
+}
+
+static int tegra_se_client_deinit(struct host1x_client *client)
+{
+ struct tegra_se *se = container_of(client, struct tegra_se, client);
+
+ se->hw->deinit_alg(se);
+ tegra_se_cmdbuf_put(&se->cmdbuf->bo);
+ host1x_syncpt_put(se->syncpt);
+ host1x_channel_put(se->channel);
+
+ return 0;
+}
+
+static const struct host1x_client_ops tegra_se_client_ops = {
+ .init = tegra_se_client_init,
+ .exit = tegra_se_client_deinit,
+};
+
+static int tegra_se_host1x_register(struct tegra_se *se)
+{
+ INIT_LIST_HEAD(&se->client.list);
+ se->client.dev = se->dev;
+ se->client.ops = &tegra_se_client_ops;
+ se->client.class = se->hw->host1x_class;
+ se->client.num_syncpts = 1;
+
+ host1x_client_register(&se->client);
+
+ return 0;
+}
+
+static int tegra_se_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tegra_se *se;
+ int ret;
+
+ se = devm_kzalloc(dev, sizeof(*se), GFP_KERNEL);
+ if (!se)
+ return -ENOMEM;
+
+ se->dev = dev;
+ se->owner = TEGRA_GPSE_ID;
+ se->hw = device_get_match_data(&pdev->dev);
+
+ se->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(se->base))
+ return PTR_ERR(se->base);
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39));
+ platform_set_drvdata(pdev, se);
+
+ se->clk = devm_clk_get_enabled(se->dev, NULL);
+ if (IS_ERR(se->clk))
+ return dev_err_probe(dev, PTR_ERR(se->clk),
+ "failed to enable clocks\n");
+
+ if (!tegra_dev_iommu_get_stream_id(dev, &se->stream_id))
+ return dev_err_probe(dev, -ENODEV,
+ "failed to get IOMMU stream ID\n");
+
+ writel(se->stream_id, se->base + SE_STREAM_ID);
+
+ se->engine = crypto_engine_alloc_init(dev, 0);
+ if (!se->engine)
+ return dev_err_probe(dev, -ENOMEM, "failed to init crypto engine\n");
+
+ ret = crypto_engine_start(se->engine);
+ if (ret) {
+ crypto_engine_exit(se->engine);
+ return dev_err_probe(dev, ret, "failed to start crypto engine\n");
+ }
+
+ ret = tegra_se_host1x_register(se);
+ if (ret) {
+ crypto_engine_stop(se->engine);
+ crypto_engine_exit(se->engine);
+ return dev_err_probe(dev, ret, "failed to init host1x params\n");
+ }
+
+ return 0;
+}
+
+static void tegra_se_remove(struct platform_device *pdev)
+{
+ struct tegra_se *se = platform_get_drvdata(pdev);
+
+ crypto_engine_stop(se->engine);
+ crypto_engine_exit(se->engine);
+ iommu_fwspec_free(se->dev);
+ host1x_client_unregister(&se->client);
+}
+
+static const struct tegra_se_regs tegra234_aes1_regs = {
+ .config = SE_AES1_CFG,
+ .op = SE_AES1_OPERATION,
+ .last_blk = SE_AES1_LAST_BLOCK,
+ .linear_ctr = SE_AES1_LINEAR_CTR,
+ .aad_len = SE_AES1_AAD_LEN,
+ .cryp_msg_len = SE_AES1_CRYPTO_MSG_LEN,
+ .manifest = SE_AES1_KEYMANIFEST,
+ .key_addr = SE_AES1_KEY_ADDR,
+ .key_data = SE_AES1_KEY_DATA,
+ .key_dst = SE_AES1_KEY_DST,
+ .result = SE_AES1_CMAC_RESULT,
+};
+
+static const struct tegra_se_regs tegra234_hash_regs = {
+ .config = SE_SHA_CFG,
+ .op = SE_SHA_OPERATION,
+ .manifest = SE_SHA_KEYMANIFEST,
+ .key_addr = SE_SHA_KEY_ADDR,
+ .key_data = SE_SHA_KEY_DATA,
+ .key_dst = SE_SHA_KEY_DST,
+ .result = SE_SHA_HASH_RESULT,
+};
+
+static const struct tegra_se_hw tegra234_aes_hw = {
+ .regs = &tegra234_aes1_regs,
+ .kac_ver = 1,
+ .host1x_class = 0x3b,
+ .init_alg = tegra_init_aes,
+ .deinit_alg = tegra_deinit_aes,
+};
+
+static const struct tegra_se_hw tegra234_hash_hw = {
+ .regs = &tegra234_hash_regs,
+ .kac_ver = 1,
+ .host1x_class = 0x3d,
+ .init_alg = tegra_init_hash,
+ .deinit_alg = tegra_deinit_hash,
+};
+
+static const struct of_device_id tegra_se_of_match[] = {
+ {
+ .compatible = "nvidia,tegra234-se-aes",
+ .data = &tegra234_aes_hw
+ }, {
+ .compatible = "nvidia,tegra234-se-hash",
+ .data = &tegra234_hash_hw,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_se_of_match);
+
+static struct platform_driver tegra_se_driver = {
+ .driver = {
+ .name = "tegra-se",
+ .of_match_table = tegra_se_of_match,
+ },
+ .probe = tegra_se_probe,
+ .remove_new = tegra_se_remove,
+};
+
+static int tegra_se_host1x_probe(struct host1x_device *dev)
+{
+ return host1x_device_init(dev);
+}
+
+static int tegra_se_host1x_remove(struct host1x_device *dev)
+{
+ host1x_device_exit(dev);
+
+ return 0;
+}
+
+static struct host1x_driver tegra_se_host1x_driver = {
+ .driver = {
+ .name = "tegra-se-host1x",
+ },
+ .probe = tegra_se_host1x_probe,
+ .remove = tegra_se_host1x_remove,
+ .subdevs = tegra_se_of_match,
+};
+
+static int __init tegra_se_module_init(void)
+{
+ int ret;
+
+ ret = host1x_driver_register(&tegra_se_host1x_driver);
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&tegra_se_driver);
+}
+
+static void __exit tegra_se_module_exit(void)
+{
+ host1x_driver_unregister(&tegra_se_host1x_driver);
+ platform_driver_unregister(&tegra_se_driver);
+}
+
+module_init(tegra_se_module_init);
+module_exit(tegra_se_module_exit);
+
+MODULE_DESCRIPTION("NVIDIA Tegra Security Engine Driver");
+MODULE_AUTHOR("Akhil R <akhilrajeev@nvidia.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/tegra/tegra-se.h b/drivers/crypto/tegra/tegra-se.h
new file mode 100644
index 000000000000..b9dd7ceb8783
--- /dev/null
+++ b/drivers/crypto/tegra/tegra-se.h
@@ -0,0 +1,560 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ *
+ * Header file for NVIDIA Security Engine driver.
+ */
+
+#ifndef _TEGRA_SE_H
+#define _TEGRA_SE_H
+
+#include <linux/bitfield.h>
+#include <linux/iommu.h>
+#include <linux/host1x.h>
+#include <crypto/aead.h>
+#include <crypto/engine.h>
+#include <crypto/hash.h>
+#include <crypto/sha1.h>
+#include <crypto/sha3.h>
+#include <crypto/skcipher.h>
+
+#define SE_OWNERSHIP 0x14
+#define SE_OWNERSHIP_UID(x) FIELD_GET(GENMASK(7, 0), x)
+#define TEGRA_GPSE_ID 3
+
+#define SE_STREAM_ID 0x90
+
+#define SE_SHA_CFG 0x4004
+#define SE_SHA_KEY_ADDR 0x4094
+#define SE_SHA_KEY_DATA 0x4098
+#define SE_SHA_KEYMANIFEST 0x409c
+#define SE_SHA_CRYPTO_CFG 0x40a4
+#define SE_SHA_KEY_DST 0x40a8
+#define SE_SHA_SRC_KSLT 0x4180
+#define SE_SHA_TGT_KSLT 0x4184
+#define SE_SHA_MSG_LENGTH 0x401c
+#define SE_SHA_OPERATION 0x407c
+#define SE_SHA_HASH_RESULT 0x40b0
+
+#define SE_SHA_ENC_MODE(x) FIELD_PREP(GENMASK(31, 24), x)
+#define SE_SHA_ENC_MODE_SHA1 SE_SHA_ENC_MODE(0)
+#define SE_SHA_ENC_MODE_SHA224 SE_SHA_ENC_MODE(4)
+#define SE_SHA_ENC_MODE_SHA256 SE_SHA_ENC_MODE(5)
+#define SE_SHA_ENC_MODE_SHA384 SE_SHA_ENC_MODE(6)
+#define SE_SHA_ENC_MODE_SHA512 SE_SHA_ENC_MODE(7)
+#define SE_SHA_ENC_MODE_SHA_CTX_INTEGRITY SE_SHA_ENC_MODE(8)
+#define SE_SHA_ENC_MODE_SHA3_224 SE_SHA_ENC_MODE(9)
+#define SE_SHA_ENC_MODE_SHA3_256 SE_SHA_ENC_MODE(10)
+#define SE_SHA_ENC_MODE_SHA3_384 SE_SHA_ENC_MODE(11)
+#define SE_SHA_ENC_MODE_SHA3_512 SE_SHA_ENC_MODE(12)
+#define SE_SHA_ENC_MODE_SHAKE128 SE_SHA_ENC_MODE(13)
+#define SE_SHA_ENC_MODE_SHAKE256 SE_SHA_ENC_MODE(14)
+#define SE_SHA_ENC_MODE_HMAC_SHA256_1KEY SE_SHA_ENC_MODE(0)
+#define SE_SHA_ENC_MODE_HMAC_SHA256_2KEY SE_SHA_ENC_MODE(1)
+#define SE_SHA_ENC_MODE_SM3_256 SE_SHA_ENC_MODE(0)
+
+#define SE_SHA_CFG_ENC_ALG(x) FIELD_PREP(GENMASK(15, 12), x)
+#define SE_SHA_ENC_ALG_NOP SE_SHA_CFG_ENC_ALG(0)
+#define SE_SHA_ENC_ALG_SHA_ENC SE_SHA_CFG_ENC_ALG(1)
+#define SE_SHA_ENC_ALG_RNG SE_SHA_CFG_ENC_ALG(2)
+#define SE_SHA_ENC_ALG_SHA SE_SHA_CFG_ENC_ALG(3)
+#define SE_SHA_ENC_ALG_SM3 SE_SHA_CFG_ENC_ALG(4)
+#define SE_SHA_ENC_ALG_HMAC SE_SHA_CFG_ENC_ALG(7)
+#define SE_SHA_ENC_ALG_KDF SE_SHA_CFG_ENC_ALG(8)
+#define SE_SHA_ENC_ALG_KEY_INVLD SE_SHA_CFG_ENC_ALG(10)
+#define SE_SHA_ENC_ALG_KEY_INQUIRE SE_SHA_CFG_ENC_ALG(12)
+#define SE_SHA_ENC_ALG_INS SE_SHA_CFG_ENC_ALG(13)
+
+#define SE_SHA_OP_LASTBUF FIELD_PREP(BIT(16), 1)
+#define SE_SHA_OP_WRSTALL FIELD_PREP(BIT(15), 1)
+
+#define SE_SHA_OP_OP(x) FIELD_PREP(GENMASK(2, 0), x)
+#define SE_SHA_OP_START SE_SHA_OP_OP(1)
+#define SE_SHA_OP_RESTART_OUT SE_SHA_OP_OP(2)
+#define SE_SHA_OP_RESTART_IN SE_SHA_OP_OP(4)
+#define SE_SHA_OP_RESTART_INOUT SE_SHA_OP_OP(5)
+#define SE_SHA_OP_DUMMY SE_SHA_OP_OP(6)
+
+#define SE_SHA_CFG_DEC_ALG(x) FIELD_PREP(GENMASK(11, 8), x)
+#define SE_SHA_DEC_ALG_NOP SE_SHA_CFG_DEC_ALG(0)
+#define SE_SHA_DEC_ALG_AES_DEC SE_SHA_CFG_DEC_ALG(1)
+#define SE_SHA_DEC_ALG_HMAC SE_SHA_CFG_DEC_ALG(7)
+#define SE_SHA_DEC_ALG_HMAC_VERIFY SE_SHA_CFG_DEC_ALG(9)
+
+#define SE_SHA_CFG_DST(x) FIELD_PREP(GENMASK(4, 2), x)
+#define SE_SHA_DST_MEMORY SE_SHA_CFG_DST(0)
+#define SE_SHA_DST_HASH_REG SE_SHA_CFG_DST(1)
+#define SE_SHA_DST_KEYTABLE SE_SHA_CFG_DST(2)
+#define SE_SHA_DST_SRK SE_SHA_CFG_DST(3)
+
+#define SE_SHA_TASK_HASH_INIT BIT(0)
+
+/* AES Configuration */
+#define SE_AES0_CFG 0x1004
+#define SE_AES0_CRYPTO_CONFIG 0x1008
+#define SE_AES0_KEY_DST 0x1030
+#define SE_AES0_OPERATION 0x1038
+#define SE_AES0_LINEAR_CTR 0x101c
+#define SE_AES0_LAST_BLOCK 0x102c
+#define SE_AES0_KEY_ADDR 0x10bc
+#define SE_AES0_KEY_DATA 0x10c0
+#define SE_AES0_CMAC_RESULT 0x10c4
+#define SE_AES0_SRC_KSLT 0x1100
+#define SE_AES0_TGT_KSLT 0x1104
+#define SE_AES0_KEYMANIFEST 0x1114
+#define SE_AES0_AAD_LEN 0x112c
+#define SE_AES0_CRYPTO_MSG_LEN 0x1134
+
+#define SE_AES1_CFG 0x2004
+#define SE_AES1_CRYPTO_CONFIG 0x2008
+#define SE_AES1_KEY_DST 0x2030
+#define SE_AES1_OPERATION 0x2038
+#define SE_AES1_LINEAR_CTR 0x201c
+#define SE_AES1_LAST_BLOCK 0x202c
+#define SE_AES1_KEY_ADDR 0x20bc
+#define SE_AES1_KEY_DATA 0x20c0
+#define SE_AES1_CMAC_RESULT 0x20c4
+#define SE_AES1_SRC_KSLT 0x2100
+#define SE_AES1_TGT_KSLT 0x2104
+#define SE_AES1_KEYMANIFEST 0x2114
+#define SE_AES1_AAD_LEN 0x212c
+#define SE_AES1_CRYPTO_MSG_LEN 0x2134
+
+#define SE_AES_CFG_ENC_MODE(x) FIELD_PREP(GENMASK(31, 24), x)
+#define SE_AES_ENC_MODE_GMAC SE_AES_CFG_ENC_MODE(3)
+#define SE_AES_ENC_MODE_GCM SE_AES_CFG_ENC_MODE(4)
+#define SE_AES_ENC_MODE_GCM_FINAL SE_AES_CFG_ENC_MODE(5)
+#define SE_AES_ENC_MODE_CMAC SE_AES_CFG_ENC_MODE(7)
+#define SE_AES_ENC_MODE_CBC_MAC SE_AES_CFG_ENC_MODE(12)
+
+#define SE_AES_CFG_DEC_MODE(x) FIELD_PREP(GENMASK(23, 16), x)
+#define SE_AES_DEC_MODE_GMAC SE_AES_CFG_DEC_MODE(3)
+#define SE_AES_DEC_MODE_GCM SE_AES_CFG_DEC_MODE(4)
+#define SE_AES_DEC_MODE_GCM_FINAL SE_AES_CFG_DEC_MODE(5)
+#define SE_AES_DEC_MODE_CBC_MAC SE_AES_CFG_DEC_MODE(12)
+
+#define SE_AES_CFG_ENC_ALG(x) FIELD_PREP(GENMASK(15, 12), x)
+#define SE_AES_ENC_ALG_NOP SE_AES_CFG_ENC_ALG(0)
+#define SE_AES_ENC_ALG_AES_ENC SE_AES_CFG_ENC_ALG(1)
+#define SE_AES_ENC_ALG_RNG SE_AES_CFG_ENC_ALG(2)
+#define SE_AES_ENC_ALG_SHA SE_AES_CFG_ENC_ALG(3)
+#define SE_AES_ENC_ALG_HMAC SE_AES_CFG_ENC_ALG(7)
+#define SE_AES_ENC_ALG_KDF SE_AES_CFG_ENC_ALG(8)
+#define SE_AES_ENC_ALG_INS SE_AES_CFG_ENC_ALG(13)
+
+#define SE_AES_CFG_DEC_ALG(x) FIELD_PREP(GENMASK(11, 8), x)
+#define SE_AES_DEC_ALG_NOP SE_AES_CFG_DEC_ALG(0)
+#define SE_AES_DEC_ALG_AES_DEC SE_AES_CFG_DEC_ALG(1)
+
+#define SE_AES_CFG_DST(x) FIELD_PREP(GENMASK(4, 2), x)
+#define SE_AES_DST_MEMORY SE_AES_CFG_DST(0)
+#define SE_AES_DST_HASH_REG SE_AES_CFG_DST(1)
+#define SE_AES_DST_KEYTABLE SE_AES_CFG_DST(2)
+#define SE_AES_DST_SRK SE_AES_CFG_DST(3)
+
+/* AES Crypto Configuration */
+#define SE_AES_KEY2_INDEX(x) FIELD_PREP(GENMASK(31, 28), x)
+#define SE_AES_KEY_INDEX(x) FIELD_PREP(GENMASK(27, 24), x)
+
+#define SE_AES_CRYPTO_CFG_SCC_DIS FIELD_PREP(BIT(20), 1)
+
+#define SE_AES_CRYPTO_CFG_CTR_CNTN(x) FIELD_PREP(GENMASK(18, 11), x)
+
+#define SE_AES_CRYPTO_CFG_IV_MODE(x) FIELD_PREP(BIT(10), x)
+#define SE_AES_IV_MODE_SWIV SE_AES_CRYPTO_CFG_IV_MODE(0)
+#define SE_AES_IV_MODE_HWIV SE_AES_CRYPTO_CFG_IV_MODE(1)
+
+#define SE_AES_CRYPTO_CFG_CORE_SEL(x) FIELD_PREP(BIT(9), x)
+#define SE_AES_CORE_SEL_DECRYPT SE_AES_CRYPTO_CFG_CORE_SEL(0)
+#define SE_AES_CORE_SEL_ENCRYPT SE_AES_CRYPTO_CFG_CORE_SEL(1)
+
+#define SE_AES_CRYPTO_CFG_IV_SEL(x) FIELD_PREP(GENMASK(8, 7), x)
+#define SE_AES_IV_SEL_UPDATED SE_AES_CRYPTO_CFG_IV_SEL(1)
+#define SE_AES_IV_SEL_REG SE_AES_CRYPTO_CFG_IV_SEL(2)
+#define SE_AES_IV_SEL_RANDOM SE_AES_CRYPTO_CFG_IV_SEL(3)
+
+#define SE_AES_CRYPTO_CFG_VCTRAM_SEL(x) FIELD_PREP(GENMASK(6, 5), x)
+#define SE_AES_VCTRAM_SEL_MEMORY SE_AES_CRYPTO_CFG_VCTRAM_SEL(0)
+#define SE_AES_VCTRAM_SEL_TWEAK SE_AES_CRYPTO_CFG_VCTRAM_SEL(1)
+#define SE_AES_VCTRAM_SEL_AESOUT SE_AES_CRYPTO_CFG_VCTRAM_SEL(2)
+#define SE_AES_VCTRAM_SEL_PREV_MEM SE_AES_CRYPTO_CFG_VCTRAM_SEL(3)
+
+#define SE_AES_CRYPTO_CFG_INPUT_SEL(x) FIELD_PREP(GENMASK(4, 3), x)
+#define SE_AES_INPUT_SEL_MEMORY SE_AES_CRYPTO_CFG_INPUT_SEL(0)
+#define SE_AES_INPUT_SEL_RANDOM SE_AES_CRYPTO_CFG_INPUT_SEL(1)
+#define SE_AES_INPUT_SEL_AESOUT SE_AES_CRYPTO_CFG_INPUT_SEL(2)
+#define SE_AES_INPUT_SEL_LINEAR_CTR SE_AES_CRYPTO_CFG_INPUT_SEL(3)
+#define SE_AES_INPUT_SEL_REG SE_AES_CRYPTO_CFG_INPUT_SEL(1)
+
+#define SE_AES_CRYPTO_CFG_XOR_POS(x) FIELD_PREP(GENMASK(2, 1), x)
+#define SE_AES_XOR_POS_BYPASS SE_AES_CRYPTO_CFG_XOR_POS(0)
+#define SE_AES_XOR_POS_BOTH SE_AES_CRYPTO_CFG_XOR_POS(1)
+#define SE_AES_XOR_POS_TOP SE_AES_CRYPTO_CFG_XOR_POS(2)
+#define SE_AES_XOR_POS_BOTTOM SE_AES_CRYPTO_CFG_XOR_POS(3)
+
+#define SE_AES_CRYPTO_CFG_HASH_EN(x) FIELD_PREP(BIT(0), x)
+#define SE_AES_HASH_DISABLE SE_AES_CRYPTO_CFG_HASH_EN(0)
+#define SE_AES_HASH_ENABLE SE_AES_CRYPTO_CFG_HASH_EN(1)
+
+#define SE_LAST_BLOCK_VAL(x) FIELD_PREP(GENMASK(19, 0), x)
+#define SE_LAST_BLOCK_RES_BITS(x) FIELD_PREP(GENMASK(26, 20), x)
+
+#define SE_AES_OP_LASTBUF FIELD_PREP(BIT(16), 1)
+#define SE_AES_OP_WRSTALL FIELD_PREP(BIT(15), 1)
+#define SE_AES_OP_FINAL FIELD_PREP(BIT(5), 1)
+#define SE_AES_OP_INIT FIELD_PREP(BIT(4), 1)
+
+#define SE_AES_OP_OP(x) FIELD_PREP(GENMASK(2, 0), x)
+#define SE_AES_OP_START SE_AES_OP_OP(1)
+#define SE_AES_OP_RESTART_OUT SE_AES_OP_OP(2)
+#define SE_AES_OP_RESTART_IN SE_AES_OP_OP(4)
+#define SE_AES_OP_RESTART_INOUT SE_AES_OP_OP(5)
+#define SE_AES_OP_DUMMY SE_AES_OP_OP(6)
+
+#define SE_KAC_SIZE(x) FIELD_PREP(GENMASK(15, 14), x)
+#define SE_KAC_SIZE_128 SE_KAC_SIZE(0)
+#define SE_KAC_SIZE_192 SE_KAC_SIZE(1)
+#define SE_KAC_SIZE_256 SE_KAC_SIZE(2)
+
+#define SE_KAC_EXPORTABLE FIELD_PREP(BIT(12), 1)
+
+#define SE_KAC_PURPOSE(x) FIELD_PREP(GENMASK(11, 8), x)
+#define SE_KAC_ENC SE_KAC_PURPOSE(0)
+#define SE_KAC_CMAC SE_KAC_PURPOSE(1)
+#define SE_KAC_HMAC SE_KAC_PURPOSE(2)
+#define SE_KAC_GCM_KW SE_KAC_PURPOSE(3)
+#define SE_KAC_HMAC_KDK SE_KAC_PURPOSE(6)
+#define SE_KAC_HMAC_KDD SE_KAC_PURPOSE(7)
+#define SE_KAC_HMAC_KDD_KUW SE_KAC_PURPOSE(8)
+#define SE_KAC_XTS SE_KAC_PURPOSE(9)
+#define SE_KAC_GCM SE_KAC_PURPOSE(10)
+
+#define SE_KAC_USER_NS FIELD_PREP(GENMASK(6, 4), 3)
+
+#define SE_AES_KEY_DST_INDEX(x) FIELD_PREP(GENMASK(11, 8), x)
+#define SE_ADDR_HI_MSB(x) FIELD_PREP(GENMASK(31, 24), x)
+#define SE_ADDR_HI_SZ(x) FIELD_PREP(GENMASK(23, 0), x)
+
+#define SE_CFG_AES_ENCRYPT (SE_AES_ENC_ALG_AES_ENC | \
+ SE_AES_DEC_ALG_NOP | \
+ SE_AES_DST_MEMORY)
+
+#define SE_CFG_AES_DECRYPT (SE_AES_ENC_ALG_NOP | \
+ SE_AES_DEC_ALG_AES_DEC | \
+ SE_AES_DST_MEMORY)
+
+#define SE_CFG_GMAC_ENCRYPT (SE_AES_ENC_ALG_AES_ENC | \
+ SE_AES_DEC_ALG_NOP | \
+ SE_AES_ENC_MODE_GMAC | \
+ SE_AES_DST_MEMORY)
+
+#define SE_CFG_GMAC_DECRYPT (SE_AES_ENC_ALG_NOP | \
+ SE_AES_DEC_ALG_AES_DEC | \
+ SE_AES_DEC_MODE_GMAC | \
+ SE_AES_DST_MEMORY)
+
+#define SE_CFG_GCM_ENCRYPT (SE_AES_ENC_ALG_AES_ENC | \
+ SE_AES_DEC_ALG_NOP | \
+ SE_AES_ENC_MODE_GCM | \
+ SE_AES_DST_MEMORY)
+
+#define SE_CFG_GCM_DECRYPT (SE_AES_ENC_ALG_NOP | \
+ SE_AES_DEC_ALG_AES_DEC | \
+ SE_AES_DEC_MODE_GCM | \
+ SE_AES_DST_MEMORY)
+
+#define SE_CFG_GCM_FINAL_ENCRYPT (SE_AES_ENC_ALG_AES_ENC | \
+ SE_AES_DEC_ALG_NOP | \
+ SE_AES_ENC_MODE_GCM_FINAL | \
+ SE_AES_DST_MEMORY)
+
+#define SE_CFG_GCM_FINAL_DECRYPT (SE_AES_ENC_ALG_NOP | \
+ SE_AES_DEC_ALG_AES_DEC | \
+ SE_AES_DEC_MODE_GCM_FINAL | \
+ SE_AES_DST_MEMORY)
+
+#define SE_CFG_CMAC (SE_AES_ENC_ALG_AES_ENC | \
+ SE_AES_ENC_MODE_CMAC | \
+ SE_AES_DST_HASH_REG)
+
+#define SE_CFG_CBC_MAC (SE_AES_ENC_ALG_AES_ENC | \
+ SE_AES_ENC_MODE_CBC_MAC)
+
+#define SE_CFG_INS (SE_AES_ENC_ALG_INS | \
+ SE_AES_DEC_ALG_NOP)
+
+#define SE_CRYPTO_CFG_ECB_ENCRYPT (SE_AES_INPUT_SEL_MEMORY | \
+ SE_AES_XOR_POS_BYPASS | \
+ SE_AES_CORE_SEL_ENCRYPT)
+
+#define SE_CRYPTO_CFG_ECB_DECRYPT (SE_AES_INPUT_SEL_MEMORY | \
+ SE_AES_XOR_POS_BYPASS | \
+ SE_AES_CORE_SEL_DECRYPT)
+
+#define SE_CRYPTO_CFG_CBC_ENCRYPT (SE_AES_INPUT_SEL_MEMORY | \
+ SE_AES_VCTRAM_SEL_AESOUT | \
+ SE_AES_XOR_POS_TOP | \
+ SE_AES_CORE_SEL_ENCRYPT | \
+ SE_AES_IV_SEL_REG)
+
+#define SE_CRYPTO_CFG_CBC_DECRYPT (SE_AES_INPUT_SEL_MEMORY | \
+ SE_AES_VCTRAM_SEL_PREV_MEM | \
+ SE_AES_XOR_POS_BOTTOM | \
+ SE_AES_CORE_SEL_DECRYPT | \
+ SE_AES_IV_SEL_REG)
+
+#define SE_CRYPTO_CFG_CTR (SE_AES_INPUT_SEL_LINEAR_CTR | \
+ SE_AES_VCTRAM_SEL_MEMORY | \
+ SE_AES_XOR_POS_BOTTOM | \
+ SE_AES_CORE_SEL_ENCRYPT | \
+ SE_AES_CRYPTO_CFG_CTR_CNTN(1) | \
+ SE_AES_IV_SEL_REG)
+
+#define SE_CRYPTO_CFG_XTS_ENCRYPT (SE_AES_INPUT_SEL_MEMORY | \
+ SE_AES_VCTRAM_SEL_TWEAK | \
+ SE_AES_XOR_POS_BOTH | \
+ SE_AES_CORE_SEL_ENCRYPT | \
+ SE_AES_IV_SEL_REG)
+
+#define SE_CRYPTO_CFG_XTS_DECRYPT (SE_AES_INPUT_SEL_MEMORY | \
+ SE_AES_VCTRAM_SEL_TWEAK | \
+ SE_AES_XOR_POS_BOTH | \
+ SE_AES_CORE_SEL_DECRYPT | \
+ SE_AES_IV_SEL_REG)
+
+#define SE_CRYPTO_CFG_XTS_DECRYPT (SE_AES_INPUT_SEL_MEMORY | \
+ SE_AES_VCTRAM_SEL_TWEAK | \
+ SE_AES_XOR_POS_BOTH | \
+ SE_AES_CORE_SEL_DECRYPT | \
+ SE_AES_IV_SEL_REG)
+
+#define SE_CRYPTO_CFG_CBC_MAC (SE_AES_INPUT_SEL_MEMORY | \
+ SE_AES_VCTRAM_SEL_AESOUT | \
+ SE_AES_XOR_POS_TOP | \
+ SE_AES_CORE_SEL_ENCRYPT | \
+ SE_AES_HASH_ENABLE | \
+ SE_AES_IV_SEL_REG)
+
+#define HASH_RESULT_REG_COUNT 50
+#define CMAC_RESULT_REG_COUNT 4
+
+#define SE_CRYPTO_CTR_REG_COUNT 4
+#define SE_MAX_KEYSLOT 15
+#define SE_MAX_MEM_ALLOC SZ_4M
+#define SE_AES_BUFLEN 0x8000
+#define SE_SHA_BUFLEN 0x2000
+
+#define SHA_FIRST BIT(0)
+#define SHA_UPDATE BIT(1)
+#define SHA_FINAL BIT(2)
+
+/* Security Engine operation modes */
+enum se_aes_alg {
+ SE_ALG_CBC, /* Cipher Block Chaining (CBC) mode */
+ SE_ALG_ECB, /* Electronic Codebook (ECB) mode */
+ SE_ALG_CTR, /* Counter (CTR) mode */
+ SE_ALG_XTS, /* XTS mode */
+ SE_ALG_GMAC, /* GMAC mode */
+ SE_ALG_GCM, /* GCM mode */
+ SE_ALG_GCM_FINAL, /* GCM FINAL mode */
+ SE_ALG_CMAC, /* Cipher-based MAC (CMAC) mode */
+ SE_ALG_CBC_MAC, /* CBC MAC mode */
+};
+
+enum se_hash_alg {
+ SE_ALG_RNG_DRBG, /* Deterministic Random Bit Generator */
+ SE_ALG_SHA1, /* Secure Hash Algorithm-1 (SHA1) mode */
+ SE_ALG_SHA224, /* Secure Hash Algorithm-224 (SHA224) mode */
+ SE_ALG_SHA256, /* Secure Hash Algorithm-256 (SHA256) mode */
+ SE_ALG_SHA384, /* Secure Hash Algorithm-384 (SHA384) mode */
+ SE_ALG_SHA512, /* Secure Hash Algorithm-512 (SHA512) mode */
+ SE_ALG_SHA3_224, /* Secure Hash Algorithm3-224 (SHA3-224) mode */
+ SE_ALG_SHA3_256, /* Secure Hash Algorithm3-256 (SHA3-256) mode */
+ SE_ALG_SHA3_384, /* Secure Hash Algorithm3-384 (SHA3-384) mode */
+ SE_ALG_SHA3_512, /* Secure Hash Algorithm3-512 (SHA3-512) mode */
+ SE_ALG_SHAKE128, /* Secure Hash Algorithm3 (SHAKE128) mode */
+ SE_ALG_SHAKE256, /* Secure Hash Algorithm3 (SHAKE256) mode */
+ SE_ALG_HMAC_SHA224, /* Hash based MAC (HMAC) - 224 */
+ SE_ALG_HMAC_SHA256, /* Hash based MAC (HMAC) - 256 */
+ SE_ALG_HMAC_SHA384, /* Hash based MAC (HMAC) - 384 */
+ SE_ALG_HMAC_SHA512, /* Hash based MAC (HMAC) - 512 */
+};
+
+struct tegra_se_alg {
+ struct tegra_se *se_dev;
+ const char *alg_base;
+
+ union {
+ struct skcipher_engine_alg skcipher;
+ struct aead_engine_alg aead;
+ struct ahash_engine_alg ahash;
+ } alg;
+};
+
+struct tegra_se_regs {
+ u32 op;
+ u32 config;
+ u32 last_blk;
+ u32 linear_ctr;
+ u32 out_addr;
+ u32 aad_len;
+ u32 cryp_msg_len;
+ u32 manifest;
+ u32 key_addr;
+ u32 key_data;
+ u32 key_dst;
+ u32 result;
+};
+
+struct tegra_se_hw {
+ const struct tegra_se_regs *regs;
+ int (*init_alg)(struct tegra_se *se);
+ void (*deinit_alg)(struct tegra_se *se);
+ bool support_sm_alg;
+ u32 host1x_class;
+ u32 kac_ver;
+};
+
+struct tegra_se {
+ int (*manifest)(u32 user, u32 alg, u32 keylen);
+ const struct tegra_se_hw *hw;
+ struct host1x_client client;
+ struct host1x_channel *channel;
+ struct tegra_se_cmdbuf *cmdbuf;
+ struct crypto_engine *engine;
+ struct host1x_syncpt *syncpt;
+ struct device *dev;
+ struct clk *clk;
+ unsigned int opcode_addr;
+ unsigned int stream_id;
+ unsigned int syncpt_id;
+ void __iomem *base;
+ u32 owner;
+};
+
+struct tegra_se_cmdbuf {
+ dma_addr_t iova;
+ u32 *addr;
+ struct device *dev;
+ struct kref ref;
+ struct host1x_bo bo;
+ ssize_t size;
+ u32 words;
+};
+
+struct tegra_se_datbuf {
+ u8 *buf;
+ dma_addr_t addr;
+ ssize_t size;
+};
+
+static inline int se_algname_to_algid(const char *name)
+{
+ if (!strcmp(name, "cbc(aes)"))
+ return SE_ALG_CBC;
+ else if (!strcmp(name, "ecb(aes)"))
+ return SE_ALG_ECB;
+ else if (!strcmp(name, "ctr(aes)"))
+ return SE_ALG_CTR;
+ else if (!strcmp(name, "xts(aes)"))
+ return SE_ALG_XTS;
+ else if (!strcmp(name, "cmac(aes)"))
+ return SE_ALG_CMAC;
+ else if (!strcmp(name, "gcm(aes)"))
+ return SE_ALG_GCM;
+ else if (!strcmp(name, "ccm(aes)"))
+ return SE_ALG_CBC_MAC;
+
+ else if (!strcmp(name, "sha1"))
+ return SE_ALG_SHA1;
+ else if (!strcmp(name, "sha224"))
+ return SE_ALG_SHA224;
+ else if (!strcmp(name, "sha256"))
+ return SE_ALG_SHA256;
+ else if (!strcmp(name, "sha384"))
+ return SE_ALG_SHA384;
+ else if (!strcmp(name, "sha512"))
+ return SE_ALG_SHA512;
+ else if (!strcmp(name, "sha3-224"))
+ return SE_ALG_SHA3_224;
+ else if (!strcmp(name, "sha3-256"))
+ return SE_ALG_SHA3_256;
+ else if (!strcmp(name, "sha3-384"))
+ return SE_ALG_SHA3_384;
+ else if (!strcmp(name, "sha3-512"))
+ return SE_ALG_SHA3_512;
+ else if (!strcmp(name, "hmac(sha224)"))
+ return SE_ALG_HMAC_SHA224;
+ else if (!strcmp(name, "hmac(sha256)"))
+ return SE_ALG_HMAC_SHA256;
+ else if (!strcmp(name, "hmac(sha384)"))
+ return SE_ALG_HMAC_SHA384;
+ else if (!strcmp(name, "hmac(sha512)"))
+ return SE_ALG_HMAC_SHA512;
+ else
+ return -EINVAL;
+}
+
+/* Functions */
+int tegra_init_aes(struct tegra_se *se);
+int tegra_init_hash(struct tegra_se *se);
+void tegra_deinit_aes(struct tegra_se *se);
+void tegra_deinit_hash(struct tegra_se *se);
+int tegra_key_submit(struct tegra_se *se, const u8 *key,
+ u32 keylen, u32 alg, u32 *keyid);
+void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg);
+int tegra_se_host1x_submit(struct tegra_se *se, u32 size);
+
+/* HOST1x OPCODES */
+static inline u32 host1x_opcode_setpayload(unsigned int payload)
+{
+ return (9 << 28) | payload;
+}
+
+static inline u32 host1x_opcode_incr_w(unsigned int offset)
+{
+ /* 22-bit offset supported */
+ return (10 << 28) | offset;
+}
+
+static inline u32 host1x_opcode_nonincr_w(unsigned int offset)
+{
+ /* 22-bit offset supported */
+ return (11 << 28) | offset;
+}
+
+static inline u32 host1x_opcode_incr(unsigned int offset, unsigned int count)
+{
+ return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_nonincr(unsigned int offset, unsigned int count)
+{
+ return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+{
+ return (v & 0xff) << 10;
+}
+
+static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+{
+ return (v & 0x3ff) << 0;
+}
+
+static inline u32 host1x_uclass_wait_syncpt_r(void)
+{
+ return 0x8;
+}
+
+static inline u32 host1x_uclass_incr_syncpt_r(void)
+{
+ return 0x0;
+}
+
+#define se_host1x_opcode_incr_w(x) host1x_opcode_incr_w((x) / 4)
+#define se_host1x_opcode_nonincr_w(x) host1x_opcode_nonincr_w((x) / 4)
+#define se_host1x_opcode_incr(x, y) host1x_opcode_incr((x) / 4, y)
+#define se_host1x_opcode_nonincr(x, y) host1x_opcode_nonincr((x) / 4, y)
+
+#endif /*_TEGRA_SE_H*/
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index 67998dbd1d46..99b5c25be079 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -6,6 +6,7 @@ menuconfig CXL_BUS
select FW_UPLOAD
select PCI_DOE
select FIRMWARE_TABLE
+ select NUMA_KEEP_MEMINFO if (NUMA && X86)
help
CXL is a bus that is electrically compatible with PCI Express, but
layers three protocols on that signalling (CXL.io, CXL.cache, and
@@ -144,17 +145,4 @@ config CXL_REGION_INVALIDATION_TEST
If unsure, or if this kernel is meant for production environments,
say N.
-config CXL_PMU
- tristate "CXL Performance Monitoring Unit"
- default CXL_BUS
- depends on PERF_EVENTS
- help
- Support performance monitoring as defined in CXL rev 3.0
- section 13.2: Performance Monitoring. CXL components may have
- one or more CXL Performance Monitoring Units (CPMUs).
-
- Say 'y/m' to enable a driver that will attach to performance
- monitoring units and provide standard perf based interfaces.
-
- If unsure say 'm'.
endif
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index af5cb818f84d..571069863c62 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -316,28 +316,59 @@ static const struct cxl_root_ops acpi_root_ops = {
.qos_class = cxl_acpi_qos_class,
};
+static void del_cxl_resource(struct resource *res)
+{
+ if (!res)
+ return;
+ kfree(res->name);
+ kfree(res);
+}
+
+static struct resource *alloc_cxl_resource(resource_size_t base,
+ resource_size_t n, int id)
+{
+ struct resource *res __free(kfree) = kzalloc(sizeof(*res), GFP_KERNEL);
+
+ if (!res)
+ return NULL;
+
+ res->start = base;
+ res->end = base + n - 1;
+ res->flags = IORESOURCE_MEM;
+ res->name = kasprintf(GFP_KERNEL, "CXL Window %d", id);
+ if (!res->name)
+ return NULL;
+
+ return no_free_ptr(res);
+}
+
+static int add_or_reset_cxl_resource(struct resource *parent, struct resource *res)
+{
+ int rc = insert_resource(parent, res);
+
+ if (rc)
+ del_cxl_resource(res);
+ return rc;
+}
+
+DEFINE_FREE(put_cxlrd, struct cxl_root_decoder *,
+ if (!IS_ERR_OR_NULL(_T)) put_device(&_T->cxlsd.cxld.dev))
+DEFINE_FREE(del_cxl_resource, struct resource *, if (_T) del_cxl_resource(_T))
static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
struct cxl_cfmws_context *ctx)
{
int target_map[CXL_DECODER_MAX_INTERLEAVE];
struct cxl_port *root_port = ctx->root_port;
- struct resource *cxl_res = ctx->cxl_res;
struct cxl_cxims_context cxims_ctx;
- struct cxl_root_decoder *cxlrd;
struct device *dev = ctx->dev;
cxl_calc_hb_fn cxl_calc_hb;
struct cxl_decoder *cxld;
unsigned int ways, i, ig;
- struct resource *res;
int rc;
rc = cxl_acpi_cfmws_verify(dev, cfmws);
- if (rc) {
- dev_err(dev, "CFMWS range %#llx-%#llx not registered\n",
- cfmws->base_hpa,
- cfmws->base_hpa + cfmws->window_size - 1);
+ if (rc)
return rc;
- }
rc = eiw_to_ways(cfmws->interleave_ways, &ways);
if (rc)
@@ -348,29 +379,23 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
for (i = 0; i < ways; i++)
target_map[i] = cfmws->interleave_targets[i];
- res = kzalloc(sizeof(*res), GFP_KERNEL);
+ struct resource *res __free(del_cxl_resource) = alloc_cxl_resource(
+ cfmws->base_hpa, cfmws->window_size, ctx->id++);
if (!res)
return -ENOMEM;
- res->name = kasprintf(GFP_KERNEL, "CXL Window %d", ctx->id++);
- if (!res->name)
- goto err_name;
-
- res->start = cfmws->base_hpa;
- res->end = cfmws->base_hpa + cfmws->window_size - 1;
- res->flags = IORESOURCE_MEM;
-
/* add to the local resource tracking to establish a sort order */
- rc = insert_resource(cxl_res, res);
+ rc = add_or_reset_cxl_resource(ctx->cxl_res, no_free_ptr(res));
if (rc)
- goto err_insert;
+ return rc;
if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_MODULO)
cxl_calc_hb = cxl_hb_modulo;
else
cxl_calc_hb = cxl_hb_xor;
- cxlrd = cxl_root_decoder_alloc(root_port, ways, cxl_calc_hb);
+ struct cxl_root_decoder *cxlrd __free(put_cxlrd) =
+ cxl_root_decoder_alloc(root_port, ways, cxl_calc_hb);
if (IS_ERR(cxlrd))
return PTR_ERR(cxlrd);
@@ -378,8 +403,8 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
cxld->hpa_range = (struct range) {
- .start = res->start,
- .end = res->end,
+ .start = cfmws->base_hpa,
+ .end = cfmws->base_hpa + cfmws->window_size - 1,
};
cxld->interleave_ways = ways;
/*
@@ -399,11 +424,10 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CXIMS,
cxl_parse_cxims, &cxims_ctx);
if (rc < 0)
- goto err_xormap;
+ return rc;
if (!cxlrd->platform_data) {
dev_err(dev, "No CXIMS for HBIG %u\n", ig);
- rc = -EINVAL;
- goto err_xormap;
+ return -EINVAL;
}
}
}
@@ -411,18 +435,9 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
cxlrd->qos_class = cfmws->qtg_id;
rc = cxl_decoder_add(cxld, target_map);
-err_xormap:
if (rc)
- put_device(&cxld->dev);
- else
- rc = cxl_decoder_autoremove(dev, cxld);
- return rc;
-
-err_insert:
- kfree(res->name);
-err_name:
- kfree(res);
- return -ENOMEM;
+ return rc;
+ return cxl_root_decoder_autoremove(dev, no_free_ptr(cxlrd));
}
static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
@@ -525,22 +540,11 @@ static int get_genport_coordinates(struct device *dev, struct cxl_dport *dport)
{
struct acpi_device *hb = to_cxl_host_bridge(NULL, dev);
u32 uid;
- int rc;
if (kstrtou32(acpi_device_uid(hb), 0, &uid))
return -EINVAL;
- rc = acpi_get_genport_coordinates(uid, dport->hb_coord);
- if (rc < 0)
- return rc;
-
- /* Adjust back to picoseconds from nanoseconds */
- for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
- dport->hb_coord[i].read_latency *= 1000;
- dport->hb_coord[i].write_latency *= 1000;
- }
-
- return 0;
+ return acpi_get_genport_coordinates(uid, dport->coord);
}
static int add_host_bridge_dport(struct device *match, void *arg)
@@ -694,12 +698,6 @@ static void cxl_acpi_lock_reset_class(void *dev)
device_lock_reset_class(dev);
}
-static void del_cxl_resource(struct resource *res)
-{
- kfree(res->name);
- kfree(res);
-}
-
static void cxl_set_public_resource(struct resource *priv, struct resource *pub)
{
priv->desc = (unsigned long) pub;
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index eddbbe21450c..bb83867d9fec 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -14,12 +14,42 @@
struct dsmas_entry {
struct range dpa_range;
u8 handle;
- struct access_coordinate coord;
+ struct access_coordinate coord[ACCESS_COORDINATE_MAX];
int entries;
int qos_class;
};
+static u32 cdat_normalize(u16 entry, u64 base, u8 type)
+{
+ u32 value;
+
+ /*
+ * Check for invalid and overflow values
+ */
+ if (entry == 0xffff || !entry)
+ return 0;
+ else if (base > (UINT_MAX / (entry)))
+ return 0;
+
+ /*
+ * CDAT fields follow the format of HMAT fields. See table 5 Device
+ * Scoped Latency and Bandwidth Information Structure in Coherent Device
+ * Attribute Table (CDAT) Specification v1.01.
+ */
+ value = entry * base;
+ switch (type) {
+ case ACPI_HMAT_ACCESS_LATENCY:
+ case ACPI_HMAT_READ_LATENCY:
+ case ACPI_HMAT_WRITE_LATENCY:
+ value = DIV_ROUND_UP(value, 1000);
+ break;
+ default:
+ break;
+ }
+ return value;
+}
+
static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg,
const unsigned long end)
{
@@ -58,8 +88,8 @@ static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg,
return 0;
}
-static void cxl_access_coordinate_set(struct access_coordinate *coord,
- int access, unsigned int val)
+static void __cxl_access_coordinate_set(struct access_coordinate *coord,
+ int access, unsigned int val)
{
switch (access) {
case ACPI_HMAT_ACCESS_LATENCY:
@@ -85,6 +115,13 @@ static void cxl_access_coordinate_set(struct access_coordinate *coord,
}
}
+static void cxl_access_coordinate_set(struct access_coordinate *coord,
+ int access, unsigned int val)
+{
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
+ __cxl_access_coordinate_set(&coord[i], access, val);
+}
+
static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
const unsigned long end)
{
@@ -97,7 +134,6 @@ static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
__le16 le_val;
u64 val;
u16 len;
- int rc;
len = le16_to_cpu((__force __le16)hdr->length);
if (len != size || (unsigned long)hdr + len > end) {
@@ -124,12 +160,10 @@ static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
le_base = (__force __le64)dslbis->entry_base_unit;
le_val = (__force __le16)dslbis->entry[0];
- rc = check_mul_overflow(le64_to_cpu(le_base),
- le16_to_cpu(le_val), &val);
- if (rc)
- pr_warn("DSLBIS value overflowed.\n");
+ val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
+ dslbis->data_type);
- cxl_access_coordinate_set(&dent->coord, dslbis->data_type, val);
+ cxl_access_coordinate_set(dent->coord, dslbis->data_type, val);
return 0;
}
@@ -163,25 +197,18 @@ static int cxl_cdat_endpoint_process(struct cxl_port *port,
static int cxl_port_perf_data_calculate(struct cxl_port *port,
struct xarray *dsmas_xa)
{
- struct access_coordinate ep_c;
- struct access_coordinate coord[ACCESS_COORDINATE_MAX];
+ struct access_coordinate ep_c[ACCESS_COORDINATE_MAX];
struct dsmas_entry *dent;
int valid_entries = 0;
unsigned long index;
int rc;
- rc = cxl_endpoint_get_perf_coordinates(port, &ep_c);
+ rc = cxl_endpoint_get_perf_coordinates(port, ep_c);
if (rc) {
dev_dbg(&port->dev, "Failed to retrieve ep perf coordinates.\n");
return rc;
}
- rc = cxl_hb_get_perf_coordinates(port, coord);
- if (rc) {
- dev_dbg(&port->dev, "Failed to retrieve hb perf coordinates.\n");
- return rc;
- }
-
struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
if (!cxl_root)
@@ -193,18 +220,10 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
xa_for_each(dsmas_xa, index, dent) {
int qos_class;
- cxl_coordinates_combine(&dent->coord, &dent->coord, &ep_c);
- /*
- * Keeping the host bridge coordinates separate from the dsmas
- * coordinates in order to allow calculation of access class
- * 0 and 1 for region later.
- */
- cxl_coordinates_combine(&coord[ACCESS_COORDINATE_CPU],
- &coord[ACCESS_COORDINATE_CPU],
- &dent->coord);
+ cxl_coordinates_combine(dent->coord, dent->coord, ep_c);
dent->entries = 1;
rc = cxl_root->ops->qos_class(cxl_root,
- &coord[ACCESS_COORDINATE_CPU],
+ &dent->coord[ACCESS_COORDINATE_CPU],
1, &qos_class);
if (rc != 1)
continue;
@@ -222,14 +241,17 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
struct cxl_dpa_perf *dpa_perf)
{
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
+ dpa_perf->coord[i] = dent->coord[i];
dpa_perf->dpa_range = dent->dpa_range;
- dpa_perf->coord = dent->coord;
dpa_perf->qos_class = dent->qos_class;
dev_dbg(dev,
"DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
dent->dpa_range.start, dpa_perf->qos_class,
- dent->coord.read_bandwidth, dent->coord.write_bandwidth,
- dent->coord.read_latency, dent->coord.write_latency);
+ dent->coord[ACCESS_COORDINATE_CPU].read_bandwidth,
+ dent->coord[ACCESS_COORDINATE_CPU].write_bandwidth,
+ dent->coord[ACCESS_COORDINATE_CPU].read_latency,
+ dent->coord[ACCESS_COORDINATE_CPU].write_latency);
}
static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
@@ -461,17 +483,16 @@ static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
le_base = (__force __le64)tbl->sslbis_header.entry_base_unit;
le_val = (__force __le16)tbl->entries[i].latency_or_bandwidth;
-
- if (check_mul_overflow(le64_to_cpu(le_base),
- le16_to_cpu(le_val), &val))
- dev_warn(dev, "SSLBIS value overflowed!\n");
+ val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
+ sslbis->data_type);
xa_for_each(&port->dports, index, dport) {
if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
- dsp_id == dport->port_id)
- cxl_access_coordinate_set(&dport->sw_coord,
+ dsp_id == dport->port_id) {
+ cxl_access_coordinate_set(dport->coord,
sslbis->data_type,
val);
+ }
}
}
@@ -493,6 +514,21 @@ void cxl_switch_parse_cdat(struct cxl_port *port)
}
EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, CXL);
+static void __cxl_coordinates_combine(struct access_coordinate *out,
+ struct access_coordinate *c1,
+ struct access_coordinate *c2)
+{
+ if (c1->write_bandwidth && c2->write_bandwidth)
+ out->write_bandwidth = min(c1->write_bandwidth,
+ c2->write_bandwidth);
+ out->write_latency = c1->write_latency + c2->write_latency;
+
+ if (c1->read_bandwidth && c2->read_bandwidth)
+ out->read_bandwidth = min(c1->read_bandwidth,
+ c2->read_bandwidth);
+ out->read_latency = c1->read_latency + c2->read_latency;
+}
+
/**
* cxl_coordinates_combine - Combine the two input coordinates
*
@@ -504,15 +540,8 @@ void cxl_coordinates_combine(struct access_coordinate *out,
struct access_coordinate *c1,
struct access_coordinate *c2)
{
- if (c1->write_bandwidth && c2->write_bandwidth)
- out->write_bandwidth = min(c1->write_bandwidth,
- c2->write_bandwidth);
- out->write_latency = c1->write_latency + c2->write_latency;
-
- if (c1->read_bandwidth && c2->read_bandwidth)
- out->read_bandwidth = min(c1->read_bandwidth,
- c2->read_bandwidth);
- out->read_latency = c1->read_latency + c2->read_latency;
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
+ __cxl_coordinates_combine(&out[i], &c1[i], &c2[i]);
}
MODULE_IMPORT_NS(CXL);
@@ -521,17 +550,13 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
struct cxl_endpoint_decoder *cxled)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
- struct cxl_port *port = cxlmd->endpoint;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- struct access_coordinate hb_coord[ACCESS_COORDINATE_MAX];
- struct access_coordinate coord;
struct range dpa = {
.start = cxled->dpa_res->start,
.end = cxled->dpa_res->end,
};
struct cxl_dpa_perf *perf;
- int rc;
switch (cxlr->mode) {
case CXL_DECODER_RAM:
@@ -549,35 +574,16 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
if (!range_contains(&perf->dpa_range, &dpa))
return;
- rc = cxl_hb_get_perf_coordinates(port, hb_coord);
- if (rc) {
- dev_dbg(&port->dev, "Failed to retrieve hb perf coordinates.\n");
- return;
- }
-
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
- /* Pickup the host bridge coords */
- cxl_coordinates_combine(&coord, &hb_coord[i], &perf->coord);
-
/* Get total bandwidth and the worst latency for the cxl region */
cxlr->coord[i].read_latency = max_t(unsigned int,
cxlr->coord[i].read_latency,
- coord.read_latency);
+ perf->coord[i].read_latency);
cxlr->coord[i].write_latency = max_t(unsigned int,
cxlr->coord[i].write_latency,
- coord.write_latency);
- cxlr->coord[i].read_bandwidth += coord.read_bandwidth;
- cxlr->coord[i].write_bandwidth += coord.write_bandwidth;
-
- /*
- * Convert latency to nanosec from picosec to be consistent
- * with the resulting latency coordinates computed by the
- * HMAT_REPORTING code.
- */
- cxlr->coord[i].read_latency =
- DIV_ROUND_UP(cxlr->coord[i].read_latency, 1000);
- cxlr->coord[i].write_latency =
- DIV_ROUND_UP(cxlr->coord[i].write_latency, 1000);
+ perf->coord[i].write_latency);
+ cxlr->coord[i].read_bandwidth += perf->coord[i].read_bandwidth;
+ cxlr->coord[i].write_bandwidth += perf->coord[i].write_bandwidth;
}
}
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index bc5a95665aa0..625394486459 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -27,7 +27,21 @@ void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled);
int cxl_region_init(void);
void cxl_region_exit(void);
int cxl_get_poison_by_endpoint(struct cxl_port *port);
+struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa);
+u64 cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
+ u64 dpa);
+
#else
+static inline u64
+cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, u64 dpa)
+{
+ return ULLONG_MAX;
+}
+static inline
+struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
+{
+ return NULL;
+}
static inline int cxl_get_poison_by_endpoint(struct cxl_port *port)
{
return 0;
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 7d97790b893d..784843fa2a22 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -319,8 +319,8 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
else if (resource_contains(&cxlds->ram_res, res))
cxled->mode = CXL_DECODER_RAM;
else {
- dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id,
- cxled->cxld.id, cxled->dpa_res);
+ dev_warn(dev, "decoder%d.%d: %pr mixed mode not supported\n",
+ port->id, cxled->cxld.id, cxled->dpa_res);
cxled->mode = CXL_DECODER_MIXED;
}
@@ -519,8 +519,7 @@ int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
if (size > avail) {
dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
- cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem",
- &avail);
+ cxl_decoder_mode_name(cxled->mode), &avail);
rc = -ENOSPC;
goto out;
}
@@ -888,8 +887,12 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
}
rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
&cxld->interleave_granularity);
- if (rc)
+ if (rc) {
+ dev_warn(&port->dev,
+ "decoder%d.%d: Invalid interleave granularity (ctrl: %#x)\n",
+ port->id, cxld->id, ctrl);
return rc;
+ }
dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n",
port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end,
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 9adda4795eb7..2626f3fff201 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -56,6 +56,9 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0),
CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0),
CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
+ CXL_CMD(GET_LOG_CAPS, 0x10, 0x4, 0),
+ CXL_CMD(CLEAR_LOG, 0x10, 0, 0),
+ CXL_CMD(GET_SUP_LOG_SUBLIST, 0x2, CXL_VARIABLE_PAYLOAD, 0),
CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0),
CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0),
CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0),
@@ -331,6 +334,15 @@ static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
return false;
break;
}
+ case CXL_MBOX_OP_CLEAR_LOG: {
+ const uuid_t *uuid = (uuid_t *)payload_in;
+
+ /*
+ * Restrict the ‘Clear log’ action to only apply to
+ * Vendor debug logs.
+ */
+ return uuid_equal(uuid, &DEFINE_CXL_VENDOR_DEBUG_UUID);
+ }
default:
break;
}
@@ -842,14 +854,38 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
enum cxl_event_type event_type,
const uuid_t *uuid, union cxl_event *evt)
{
- if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
- trace_cxl_general_media(cxlmd, type, &evt->gen_media);
- else if (event_type == CXL_CPER_EVENT_DRAM)
- trace_cxl_dram(cxlmd, type, &evt->dram);
- else if (event_type == CXL_CPER_EVENT_MEM_MODULE)
+ if (event_type == CXL_CPER_EVENT_MEM_MODULE) {
trace_cxl_memory_module(cxlmd, type, &evt->mem_module);
- else
+ return;
+ }
+ if (event_type == CXL_CPER_EVENT_GENERIC) {
trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic);
+ return;
+ }
+
+ if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) {
+ u64 dpa, hpa = ULLONG_MAX;
+ struct cxl_region *cxlr;
+
+ /*
+ * These trace points are annotated with HPA and region
+ * translations. Take topology mutation locks and lookup
+ * { HPA, REGION } from { DPA, MEMDEV } in the event record.
+ */
+ guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_dpa_rwsem);
+
+ dpa = le64_to_cpu(evt->common.phys_addr) & CXL_DPA_MASK;
+ cxlr = cxl_dpa_to_region(cxlmd, dpa);
+ if (cxlr)
+ hpa = cxl_trace_hpa(cxlr, cxlmd, dpa);
+
+ if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
+ trace_cxl_general_media(cxlmd, type, cxlr, hpa,
+ &evt->gen_media);
+ else if (event_type == CXL_CPER_EVENT_DRAM)
+ trace_cxl_dram(cxlmd, type, cxlr, hpa, &evt->dram);
+ }
}
EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, CXL);
@@ -915,7 +951,7 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds,
payload->handles[i++] = gen->hdr.handle;
dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log,
- le16_to_cpu(payload->handles[i]));
+ le16_to_cpu(payload->handles[i - 1]));
if (i == max_handles) {
payload->nr_recs = i;
@@ -946,24 +982,22 @@ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
struct device *dev = mds->cxlds.dev;
struct cxl_get_event_payload *payload;
- struct cxl_mbox_cmd mbox_cmd;
u8 log_type = type;
u16 nr_rec;
mutex_lock(&mds->event.log_lock);
payload = mds->event.buf;
- mbox_cmd = (struct cxl_mbox_cmd) {
- .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
- .payload_in = &log_type,
- .size_in = sizeof(log_type),
- .payload_out = payload,
- .size_out = mds->payload_size,
- .min_out = struct_size(payload, records, 0),
- };
-
do {
int rc, i;
+ struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
+ .payload_in = &log_type,
+ .size_in = sizeof(log_type),
+ .payload_out = payload,
+ .size_out = mds->payload_size,
+ .min_out = struct_size(payload, records, 0),
+ };
rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc) {
@@ -1296,7 +1330,6 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_mbox_poison_out *po;
struct cxl_mbox_poison_in pi;
- struct cxl_mbox_cmd mbox_cmd;
int nr_records = 0;
int rc;
@@ -1308,16 +1341,16 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
pi.offset = cpu_to_le64(offset);
pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT);
- mbox_cmd = (struct cxl_mbox_cmd) {
- .opcode = CXL_MBOX_OP_GET_POISON,
- .size_in = sizeof(pi),
- .payload_in = &pi,
- .size_out = mds->payload_size,
- .payload_out = po,
- .min_out = struct_size(po, record, 0),
- };
-
do {
+ struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd){
+ .opcode = CXL_MBOX_OP_GET_POISON,
+ .size_in = sizeof(pi),
+ .payload_in = &pi,
+ .size_out = mds->payload_size,
+ .payload_out = po,
+ .min_out = struct_size(po, record, 0),
+ };
+
rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc)
break;
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index d4e259f3a7e9..0277726afd04 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -251,50 +251,6 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
}
EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, CXL);
-struct cxl_dpa_to_region_context {
- struct cxl_region *cxlr;
- u64 dpa;
-};
-
-static int __cxl_dpa_to_region(struct device *dev, void *arg)
-{
- struct cxl_dpa_to_region_context *ctx = arg;
- struct cxl_endpoint_decoder *cxled;
- u64 dpa = ctx->dpa;
-
- if (!is_endpoint_decoder(dev))
- return 0;
-
- cxled = to_cxl_endpoint_decoder(dev);
- if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
- return 0;
-
- if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
- return 0;
-
- dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
- dev_name(&cxled->cxld.region->dev));
-
- ctx->cxlr = cxled->cxld.region;
-
- return 1;
-}
-
-static struct cxl_region *cxl_dpa_to_region(struct cxl_memdev *cxlmd, u64 dpa)
-{
- struct cxl_dpa_to_region_context ctx;
- struct cxl_port *port;
-
- ctx = (struct cxl_dpa_to_region_context) {
- .dpa = dpa,
- };
- port = cxlmd->endpoint;
- if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port))
- device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
-
- return ctx.cxlr;
-}
-
static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
{
struct cxl_dev_state *cxlds = cxlmd->cxlds;
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index 0df09bd79408..8567dd11eaac 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -525,7 +525,7 @@ static int cxl_cdat_get_length(struct device *dev,
__le32 response[2];
int rc;
- rc = pci_doe(doe_mb, PCI_DVSEC_VENDOR_ID_CXL,
+ rc = pci_doe(doe_mb, PCI_VENDOR_ID_CXL,
CXL_DOE_PROTOCOL_TABLE_ACCESS,
&request, sizeof(request),
&response, sizeof(response));
@@ -555,7 +555,7 @@ static int cxl_cdat_read_table(struct device *dev,
__le32 request = CDAT_DOE_REQ(entry_handle);
int rc;
- rc = pci_doe(doe_mb, PCI_DVSEC_VENDOR_ID_CXL,
+ rc = pci_doe(doe_mb, PCI_VENDOR_ID_CXL,
CXL_DOE_PROTOCOL_TABLE_ACCESS,
&request, sizeof(request),
rsp, sizeof(*rsp) + remaining);
@@ -640,7 +640,7 @@ void read_cdat_data(struct cxl_port *port)
if (!pdev)
return;
- doe_mb = pci_find_doe_mailbox(pdev, PCI_DVSEC_VENDOR_ID_CXL,
+ doe_mb = pci_find_doe_mailbox(pdev, PCI_VENDOR_ID_CXL,
CXL_DOE_PROTOCOL_TABLE_ACCESS);
if (!doe_mb) {
dev_dbg(dev, "No CDAT mailbox\n");
@@ -1045,3 +1045,32 @@ long cxl_pci_get_latency(struct pci_dev *pdev)
return cxl_flit_size(pdev) * MEGA / bw;
}
+
+static int __cxl_endpoint_decoder_reset_detected(struct device *dev, void *data)
+{
+ struct cxl_port *port = data;
+ struct cxl_decoder *cxld;
+ struct cxl_hdm *cxlhdm;
+ void __iomem *hdm;
+ u32 ctrl;
+
+ if (!is_endpoint_decoder(dev))
+ return 0;
+
+ cxld = to_cxl_decoder(dev);
+ if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
+ return 0;
+
+ cxlhdm = dev_get_drvdata(&port->dev);
+ hdm = cxlhdm->regs.hdm_decoder;
+ ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
+
+ return !FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl);
+}
+
+bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port)
+{
+ return device_for_each_child(&port->dev, port,
+ __cxl_endpoint_decoder_reset_detected);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_reset_detected, CXL);
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 2b0cab556072..887ed6e358fb 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -2133,36 +2133,44 @@ bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
}
EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
-/**
- * cxl_hb_get_perf_coordinates - Retrieve performance numbers between initiator
- * and host bridge
- *
- * @port: endpoint cxl_port
- * @coord: output access coordinates
- *
- * Return: errno on failure, 0 on success.
- */
-int cxl_hb_get_perf_coordinates(struct cxl_port *port,
- struct access_coordinate *coord)
+static void add_latency(struct access_coordinate *c, long latency)
{
- struct cxl_port *iter = port;
- struct cxl_dport *dport;
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ c[i].write_latency += latency;
+ c[i].read_latency += latency;
+ }
+}
- if (!is_cxl_endpoint(port))
- return -EINVAL;
+static bool coordinates_valid(struct access_coordinate *c)
+{
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ if (c[i].read_bandwidth && c[i].write_bandwidth &&
+ c[i].read_latency && c[i].write_latency)
+ continue;
+ return false;
+ }
- dport = iter->parent_dport;
- while (iter && !is_cxl_root(to_cxl_port(iter->dev.parent))) {
- iter = to_cxl_port(iter->dev.parent);
- dport = iter->parent_dport;
+ return true;
+}
+
+static void set_min_bandwidth(struct access_coordinate *c, unsigned int bw)
+{
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ c[i].write_bandwidth = min(c[i].write_bandwidth, bw);
+ c[i].read_bandwidth = min(c[i].read_bandwidth, bw);
}
+}
- coord[ACCESS_COORDINATE_LOCAL] =
- dport->hb_coord[ACCESS_COORDINATE_LOCAL];
- coord[ACCESS_COORDINATE_CPU] =
- dport->hb_coord[ACCESS_COORDINATE_CPU];
+static void set_access_coordinates(struct access_coordinate *out,
+ struct access_coordinate *in)
+{
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
+ out[i] = in[i];
+}
- return 0;
+static bool parent_port_is_cxl_root(struct cxl_port *port)
+{
+ return is_cxl_root(to_cxl_port(port->dev.parent));
}
/**
@@ -2176,47 +2184,76 @@ int cxl_hb_get_perf_coordinates(struct cxl_port *port,
int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
struct access_coordinate *coord)
{
- struct access_coordinate c = {
- .read_bandwidth = UINT_MAX,
- .write_bandwidth = UINT_MAX,
+ struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
+ struct access_coordinate c[] = {
+ {
+ .read_bandwidth = UINT_MAX,
+ .write_bandwidth = UINT_MAX,
+ },
+ {
+ .read_bandwidth = UINT_MAX,
+ .write_bandwidth = UINT_MAX,
+ },
};
struct cxl_port *iter = port;
struct cxl_dport *dport;
struct pci_dev *pdev;
+ struct device *dev;
unsigned int bw;
+ bool is_cxl_root;
if (!is_cxl_endpoint(port))
return -EINVAL;
- dport = iter->parent_dport;
-
/*
- * Exit the loop when the parent port of the current port is cxl root.
- * The iterative loop starts at the endpoint and gathers the
- * latency of the CXL link from the current iter to the next downstream
- * port each iteration. If the parent is cxl root then there is
- * nothing to gather.
+ * Skip calculation for RCD. Expectation is HMAT already covers RCD case
+ * since RCH does not support hotplug.
*/
- while (iter && !is_cxl_root(to_cxl_port(iter->dev.parent))) {
- cxl_coordinates_combine(&c, &c, &dport->sw_coord);
- c.write_latency += dport->link_latency;
- c.read_latency += dport->link_latency;
+ if (cxlmd->cxlds->rcd)
+ return 0;
- iter = to_cxl_port(iter->dev.parent);
+ /*
+ * Exit the loop when the parent port of the current iter port is cxl
+ * root. The iterative loop starts at the endpoint and gathers the
+ * latency of the CXL link from the current device/port to the connected
+ * downstream port each iteration.
+ */
+ do {
dport = iter->parent_dport;
- }
+ iter = to_cxl_port(iter->dev.parent);
+ is_cxl_root = parent_port_is_cxl_root(iter);
+
+ /*
+ * There's no valid access_coordinate for a root port since RPs do not
+ * have CDAT and therefore needs to be skipped.
+ */
+ if (!is_cxl_root) {
+ if (!coordinates_valid(dport->coord))
+ return -EINVAL;
+ cxl_coordinates_combine(c, c, dport->coord);
+ }
+ add_latency(c, dport->link_latency);
+ } while (!is_cxl_root);
+
+ dport = iter->parent_dport;
+ /* Retrieve HB coords */
+ if (!coordinates_valid(dport->coord))
+ return -EINVAL;
+ cxl_coordinates_combine(c, c, dport->coord);
+
+ dev = port->uport_dev->parent;
+ if (!dev_is_pci(dev))
+ return -ENODEV;
/* Get the calculated PCI paths bandwidth */
- pdev = to_pci_dev(port->uport_dev->parent);
+ pdev = to_pci_dev(dev);
bw = pcie_bandwidth_available(pdev, NULL, NULL, NULL);
if (bw == 0)
return -ENXIO;
bw /= BITS_PER_BYTE;
- c.write_bandwidth = min(c.write_bandwidth, bw);
- c.read_bandwidth = min(c.read_bandwidth, bw);
-
- *coord = c;
+ set_min_bandwidth(c, bw);
+ set_access_coordinates(coord, c);
return 0;
}
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 5c186e0a39b9..00a9f0eef8dd 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -2679,28 +2679,158 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port)
return rc;
}
+struct cxl_dpa_to_region_context {
+ struct cxl_region *cxlr;
+ u64 dpa;
+};
+
+static int __cxl_dpa_to_region(struct device *dev, void *arg)
+{
+ struct cxl_dpa_to_region_context *ctx = arg;
+ struct cxl_endpoint_decoder *cxled;
+ u64 dpa = ctx->dpa;
+
+ if (!is_endpoint_decoder(dev))
+ return 0;
+
+ cxled = to_cxl_endpoint_decoder(dev);
+ if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
+ return 0;
+
+ if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
+ return 0;
+
+ dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
+ dev_name(&cxled->cxld.region->dev));
+
+ ctx->cxlr = cxled->cxld.region;
+
+ return 1;
+}
+
+struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
+{
+ struct cxl_dpa_to_region_context ctx;
+ struct cxl_port *port;
+
+ ctx = (struct cxl_dpa_to_region_context) {
+ .dpa = dpa,
+ };
+ port = cxlmd->endpoint;
+ if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port))
+ device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
+
+ return ctx.cxlr;
+}
+
+static bool cxl_is_hpa_in_range(u64 hpa, struct cxl_region *cxlr, int pos)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ int gran = p->interleave_granularity;
+ int ways = p->interleave_ways;
+ u64 offset;
+
+ /* Is the hpa within this region at all */
+ if (hpa < p->res->start || hpa > p->res->end) {
+ dev_dbg(&cxlr->dev,
+ "Addr trans fail: hpa 0x%llx not in region\n", hpa);
+ return false;
+ }
+
+ /* Is the hpa in an expected chunk for its pos(-ition) */
+ offset = hpa - p->res->start;
+ offset = do_div(offset, gran * ways);
+ if ((offset >= pos * gran) && (offset < (pos + 1) * gran))
+ return true;
+
+ dev_dbg(&cxlr->dev,
+ "Addr trans fail: hpa 0x%llx not in expected chunk\n", hpa);
+
+ return false;
+}
+
+static u64 cxl_dpa_to_hpa(u64 dpa, struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled)
+{
+ u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa;
+ struct cxl_region_params *p = &cxlr->params;
+ int pos = cxled->pos;
+ u16 eig = 0;
+ u8 eiw = 0;
+
+ ways_to_eiw(p->interleave_ways, &eiw);
+ granularity_to_eig(p->interleave_granularity, &eig);
+
+ /*
+ * The device position in the region interleave set was removed
+ * from the offset at HPA->DPA translation. To reconstruct the
+ * HPA, place the 'pos' in the offset.
+ *
+ * The placement of 'pos' in the HPA is determined by interleave
+ * ways and granularity and is defined in the CXL Spec 3.0 Section
+ * 8.2.4.19.13 Implementation Note: Device Decode Logic
+ */
+
+ /* Remove the dpa base */
+ dpa_offset = dpa - cxl_dpa_resource_start(cxled);
+
+ mask_upper = GENMASK_ULL(51, eig + 8);
+
+ if (eiw < 8) {
+ hpa_offset = (dpa_offset & mask_upper) << eiw;
+ hpa_offset |= pos << (eig + 8);
+ } else {
+ bits_upper = (dpa_offset & mask_upper) >> (eig + 8);
+ bits_upper = bits_upper * 3;
+ hpa_offset = ((bits_upper << (eiw - 8)) + pos) << (eig + 8);
+ }
+
+ /* The lower bits remain unchanged */
+ hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0);
+
+ /* Apply the hpa_offset to the region base address */
+ hpa = hpa_offset + p->res->start;
+
+ if (!cxl_is_hpa_in_range(hpa, cxlr, cxled->pos))
+ return ULLONG_MAX;
+
+ return hpa;
+}
+
+u64 cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
+ u64 dpa)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_endpoint_decoder *cxled = NULL;
+
+ for (int i = 0; i < p->nr_targets; i++) {
+ cxled = p->targets[i];
+ if (cxlmd == cxled_to_memdev(cxled))
+ break;
+ }
+ if (!cxled || cxlmd != cxled_to_memdev(cxled))
+ return ULLONG_MAX;
+
+ return cxl_dpa_to_hpa(dpa, cxlr, cxled);
+}
+
static struct lock_class_key cxl_pmem_region_key;
-static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
+static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
{
struct cxl_region_params *p = &cxlr->params;
struct cxl_nvdimm_bridge *cxl_nvb;
- struct cxl_pmem_region *cxlr_pmem;
struct device *dev;
int i;
- down_read(&cxl_region_rwsem);
- if (p->state != CXL_CONFIG_COMMIT) {
- cxlr_pmem = ERR_PTR(-ENXIO);
- goto out;
- }
+ guard(rwsem_read)(&cxl_region_rwsem);
+ if (p->state != CXL_CONFIG_COMMIT)
+ return -ENXIO;
- cxlr_pmem = kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets),
- GFP_KERNEL);
- if (!cxlr_pmem) {
- cxlr_pmem = ERR_PTR(-ENOMEM);
- goto out;
- }
+ struct cxl_pmem_region *cxlr_pmem __free(kfree) =
+ kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets), GFP_KERNEL);
+ if (!cxlr_pmem)
+ return -ENOMEM;
cxlr_pmem->hpa_range.start = p->res->start;
cxlr_pmem->hpa_range.end = p->res->end;
@@ -2718,10 +2848,8 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
*/
if (i == 0) {
cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
- if (!cxl_nvb) {
- cxlr_pmem = ERR_PTR(-ENODEV);
- goto out;
- }
+ if (!cxl_nvb)
+ return -ENODEV;
cxlr->cxl_nvb = cxl_nvb;
}
m->cxlmd = cxlmd;
@@ -2732,18 +2860,16 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
}
dev = &cxlr_pmem->dev;
- cxlr_pmem->cxlr = cxlr;
- cxlr->cxlr_pmem = cxlr_pmem;
device_initialize(dev);
lockdep_set_class(&dev->mutex, &cxl_pmem_region_key);
device_set_pm_not_required(dev);
dev->parent = &cxlr->dev;
dev->bus = &cxl_bus_type;
dev->type = &cxl_pmem_region_type;
-out:
- up_read(&cxl_region_rwsem);
+ cxlr_pmem->cxlr = cxlr;
+ cxlr->cxlr_pmem = no_free_ptr(cxlr_pmem);
- return cxlr_pmem;
+ return 0;
}
static void cxl_dax_region_release(struct device *dev)
@@ -2860,9 +2986,10 @@ static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
struct device *dev;
int rc;
- cxlr_pmem = cxl_pmem_region_alloc(cxlr);
- if (IS_ERR(cxlr_pmem))
- return PTR_ERR(cxlr_pmem);
+ rc = cxl_pmem_region_alloc(cxlr);
+ if (rc)
+ return rc;
+ cxlr_pmem = cxlr->cxlr_pmem;
cxl_nvb = cxlr->cxl_nvb;
dev = &cxlr_pmem->dev;
diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
index 372786f80955..e1082e749c69 100644
--- a/drivers/cxl/core/regs.c
+++ b/drivers/cxl/core/regs.c
@@ -271,6 +271,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs, CXL);
static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
struct cxl_register_map *map)
{
+ u8 reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo);
int bar = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BIR_MASK, reg_lo);
u64 offset = ((u64)reg_hi << 32) |
(reg_lo & CXL_DVSEC_REG_LOCATOR_BLOCK_OFF_LOW_MASK);
@@ -278,11 +279,11 @@ static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
if (offset > pci_resource_len(pdev, bar)) {
dev_warn(&pdev->dev,
"BAR%d: %pr: too small (offset: %pa, type: %d)\n", bar,
- &pdev->resource[bar], &offset, map->reg_type);
+ &pdev->resource[bar], &offset, reg_type);
return false;
}
- map->reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo);
+ map->reg_type = reg_type;
map->resource = pci_resource_start(pdev, bar) + offset;
map->max_size = pci_resource_len(pdev, bar) - offset;
return true;
@@ -313,7 +314,7 @@ int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
.resource = CXL_RESOURCE_NONE,
};
- regloc = pci_find_dvsec_capability(pdev, PCI_DVSEC_VENDOR_ID_CXL,
+ regloc = pci_find_dvsec_capability(pdev, PCI_VENDOR_ID_CXL,
CXL_DVSEC_REG_LOCATOR);
if (!regloc)
return -ENXIO;
diff --git a/drivers/cxl/core/trace.c b/drivers/cxl/core/trace.c
index d0403dc3c8ab..7f2a9dd0d0e3 100644
--- a/drivers/cxl/core/trace.c
+++ b/drivers/cxl/core/trace.c
@@ -6,94 +6,3 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
-
-static bool cxl_is_hpa_in_range(u64 hpa, struct cxl_region *cxlr, int pos)
-{
- struct cxl_region_params *p = &cxlr->params;
- int gran = p->interleave_granularity;
- int ways = p->interleave_ways;
- u64 offset;
-
- /* Is the hpa within this region at all */
- if (hpa < p->res->start || hpa > p->res->end) {
- dev_dbg(&cxlr->dev,
- "Addr trans fail: hpa 0x%llx not in region\n", hpa);
- return false;
- }
-
- /* Is the hpa in an expected chunk for its pos(-ition) */
- offset = hpa - p->res->start;
- offset = do_div(offset, gran * ways);
- if ((offset >= pos * gran) && (offset < (pos + 1) * gran))
- return true;
-
- dev_dbg(&cxlr->dev,
- "Addr trans fail: hpa 0x%llx not in expected chunk\n", hpa);
-
- return false;
-}
-
-static u64 cxl_dpa_to_hpa(u64 dpa, struct cxl_region *cxlr,
- struct cxl_endpoint_decoder *cxled)
-{
- u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa;
- struct cxl_region_params *p = &cxlr->params;
- int pos = cxled->pos;
- u16 eig = 0;
- u8 eiw = 0;
-
- ways_to_eiw(p->interleave_ways, &eiw);
- granularity_to_eig(p->interleave_granularity, &eig);
-
- /*
- * The device position in the region interleave set was removed
- * from the offset at HPA->DPA translation. To reconstruct the
- * HPA, place the 'pos' in the offset.
- *
- * The placement of 'pos' in the HPA is determined by interleave
- * ways and granularity and is defined in the CXL Spec 3.0 Section
- * 8.2.4.19.13 Implementation Note: Device Decode Logic
- */
-
- /* Remove the dpa base */
- dpa_offset = dpa - cxl_dpa_resource_start(cxled);
-
- mask_upper = GENMASK_ULL(51, eig + 8);
-
- if (eiw < 8) {
- hpa_offset = (dpa_offset & mask_upper) << eiw;
- hpa_offset |= pos << (eig + 8);
- } else {
- bits_upper = (dpa_offset & mask_upper) >> (eig + 8);
- bits_upper = bits_upper * 3;
- hpa_offset = ((bits_upper << (eiw - 8)) + pos) << (eig + 8);
- }
-
- /* The lower bits remain unchanged */
- hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0);
-
- /* Apply the hpa_offset to the region base address */
- hpa = hpa_offset + p->res->start;
-
- if (!cxl_is_hpa_in_range(hpa, cxlr, cxled->pos))
- return ULLONG_MAX;
-
- return hpa;
-}
-
-u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *cxlmd,
- u64 dpa)
-{
- struct cxl_region_params *p = &cxlr->params;
- struct cxl_endpoint_decoder *cxled = NULL;
-
- for (int i = 0; i < p->nr_targets; i++) {
- cxled = p->targets[i];
- if (cxlmd == cxled_to_memdev(cxled))
- break;
- }
- if (!cxled || cxlmd != cxled_to_memdev(cxled))
- return ULLONG_MAX;
-
- return cxl_dpa_to_hpa(dpa, cxlr, cxled);
-}
diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
index e5f13260fc52..07a0394b1d99 100644
--- a/drivers/cxl/core/trace.h
+++ b/drivers/cxl/core/trace.h
@@ -253,8 +253,8 @@ TRACE_EVENT(cxl_generic_event,
* DRAM Event Record
* CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
*/
-#define CXL_DPA_FLAGS_MASK 0x3F
-#define CXL_DPA_MASK (~CXL_DPA_FLAGS_MASK)
+#define CXL_DPA_FLAGS_MASK GENMASK(1, 0)
+#define CXL_DPA_MASK GENMASK_ULL(63, 6)
#define CXL_DPA_VOLATILE BIT(0)
#define CXL_DPA_NOT_REPAIRABLE BIT(1)
@@ -316,9 +316,9 @@ TRACE_EVENT(cxl_generic_event,
TRACE_EVENT(cxl_general_media,
TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
- struct cxl_event_gen_media *rec),
+ struct cxl_region *cxlr, u64 hpa, struct cxl_event_gen_media *rec),
- TP_ARGS(cxlmd, log, rec),
+ TP_ARGS(cxlmd, log, cxlr, hpa, rec),
TP_STRUCT__entry(
CXL_EVT_TP_entry
@@ -330,10 +330,13 @@ TRACE_EVENT(cxl_general_media,
__field(u8, channel)
__field(u32, device)
__array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE)
- __field(u16, validity_flags)
/* Following are out of order to pack trace record */
+ __field(u64, hpa)
+ __field_struct(uuid_t, region_uuid)
+ __field(u16, validity_flags)
__field(u8, rank)
__field(u8, dpa_flags)
+ __string(region_name, cxlr ? dev_name(&cxlr->dev) : "")
),
TP_fast_assign(
@@ -354,18 +357,28 @@ TRACE_EVENT(cxl_general_media,
memcpy(__entry->comp_id, &rec->component_id,
CXL_EVENT_GEN_MED_COMP_ID_SIZE);
__entry->validity_flags = get_unaligned_le16(&rec->validity_flags);
+ __entry->hpa = hpa;
+ if (cxlr) {
+ __assign_str(region_name, dev_name(&cxlr->dev));
+ uuid_copy(&__entry->region_uuid, &cxlr->params.uuid);
+ } else {
+ __assign_str(region_name, "");
+ uuid_copy(&__entry->region_uuid, &uuid_null);
+ }
),
CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' " \
"descriptor='%s' type='%s' transaction_type='%s' channel=%u rank=%u " \
- "device=%x comp_id=%s validity_flags='%s'",
+ "device=%x comp_id=%s validity_flags='%s' " \
+ "hpa=%llx region=%s region_uuid=%pUb",
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
show_event_desc_flags(__entry->descriptor),
show_mem_event_type(__entry->type),
show_trans_type(__entry->transaction_type),
__entry->channel, __entry->rank, __entry->device,
__print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
- show_valid_flags(__entry->validity_flags)
+ show_valid_flags(__entry->validity_flags),
+ __entry->hpa, __get_str(region_name), &__entry->region_uuid
)
);
@@ -400,9 +413,9 @@ TRACE_EVENT(cxl_general_media,
TRACE_EVENT(cxl_dram,
TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
- struct cxl_event_dram *rec),
+ struct cxl_region *cxlr, u64 hpa, struct cxl_event_dram *rec),
- TP_ARGS(cxlmd, log, rec),
+ TP_ARGS(cxlmd, log, cxlr, hpa, rec),
TP_STRUCT__entry(
CXL_EVT_TP_entry
@@ -417,10 +430,13 @@ TRACE_EVENT(cxl_dram,
__field(u32, nibble_mask)
__field(u32, row)
__array(u8, cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE)
+ __field(u64, hpa)
+ __field_struct(uuid_t, region_uuid)
__field(u8, rank) /* Out of order to pack trace record */
__field(u8, bank_group) /* Out of order to pack trace record */
__field(u8, bank) /* Out of order to pack trace record */
__field(u8, dpa_flags) /* Out of order to pack trace record */
+ __string(region_name, cxlr ? dev_name(&cxlr->dev) : "")
),
TP_fast_assign(
@@ -444,12 +460,21 @@ TRACE_EVENT(cxl_dram,
__entry->column = get_unaligned_le16(rec->column);
memcpy(__entry->cor_mask, &rec->correction_mask,
CXL_EVENT_DER_CORRECTION_MASK_SIZE);
+ __entry->hpa = hpa;
+ if (cxlr) {
+ __assign_str(region_name, dev_name(&cxlr->dev));
+ uuid_copy(&__entry->region_uuid, &cxlr->params.uuid);
+ } else {
+ __assign_str(region_name, "");
+ uuid_copy(&__entry->region_uuid, &uuid_null);
+ }
),
CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' " \
"transaction_type='%s' channel=%u rank=%u nibble_mask=%x " \
"bank_group=%u bank=%u row=%u column=%u cor_mask=%s " \
- "validity_flags='%s'",
+ "validity_flags='%s' " \
+ "hpa=%llx region=%s region_uuid=%pUb",
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
show_event_desc_flags(__entry->descriptor),
show_mem_event_type(__entry->type),
@@ -458,7 +483,8 @@ TRACE_EVENT(cxl_dram,
__entry->bank_group, __entry->bank,
__entry->row, __entry->column,
__print_hex(__entry->cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE),
- show_dram_valid_flags(__entry->validity_flags)
+ show_dram_valid_flags(__entry->validity_flags),
+ __entry->hpa, __get_str(region_name), &__entry->region_uuid
)
);
@@ -642,8 +668,6 @@ TRACE_EVENT(cxl_memory_module,
#define cxl_poison_overflow(flags, time) \
(flags & CXL_POISON_FLAG_OVERFLOW ? le64_to_cpu(time) : 0)
-u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *memdev, u64 dpa);
-
TRACE_EVENT(cxl_poison,
TP_PROTO(struct cxl_memdev *cxlmd, struct cxl_region *cxlr,
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 534e25e2f0a4..603c0120cff8 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -12,6 +12,8 @@
#include <linux/node.h>
#include <linux/io.h>
+extern const struct nvdimm_security_ops *cxl_security_ops;
+
/**
* DOC: cxl objects
*
@@ -663,8 +665,7 @@ struct cxl_rcrb_info {
* @rch: Indicate whether this dport was enumerated in RCH or VH mode
* @port: reference to cxl_port that contains this downstream port
* @regs: Dport parsed register blocks
- * @sw_coord: access coordinates (performance) for switch from CDAT
- * @hb_coord: access coordinates (performance) from ACPI generic port (host bridge)
+ * @coord: access coordinates (bandwidth and latency performance attributes)
* @link_latency: calculated PCIe downstream latency
*/
struct cxl_dport {
@@ -675,8 +676,7 @@ struct cxl_dport {
bool rch;
struct cxl_port *port;
struct cxl_regs regs;
- struct access_coordinate sw_coord;
- struct access_coordinate hb_coord[ACCESS_COORDINATE_MAX];
+ struct access_coordinate coord[ACCESS_COORDINATE_MAX];
long link_latency;
};
@@ -781,6 +781,11 @@ int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map);
struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port);
int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map);
int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld);
+static inline int cxl_root_decoder_autoremove(struct device *host,
+ struct cxl_root_decoder *cxlrd)
+{
+ return cxl_decoder_autoremove(host, &cxlrd->cxlsd.cxld);
+}
int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint);
/**
@@ -884,8 +889,6 @@ void cxl_switch_parse_cdat(struct cxl_port *port);
int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
struct access_coordinate *coord);
-int cxl_hb_get_perf_coordinates(struct cxl_port *port,
- struct access_coordinate *coord);
void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
struct cxl_endpoint_decoder *cxled);
@@ -895,6 +898,8 @@ void cxl_coordinates_combine(struct access_coordinate *out,
struct access_coordinate *c1,
struct access_coordinate *c2);
+bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
+
/*
* Unit test builds overrides this to __weak, find the 'strong' version
* of these symbols in tools/testing/cxl/.
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 20fb3b35e89e..19aba81cdf13 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -401,7 +401,7 @@ enum cxl_devtype {
*/
struct cxl_dpa_perf {
struct range dpa_range;
- struct access_coordinate coord;
+ struct access_coordinate coord[ACCESS_COORDINATE_MAX];
int qos_class;
};
@@ -527,6 +527,9 @@ enum cxl_opcode {
CXL_MBOX_OP_SET_TIMESTAMP = 0x0301,
CXL_MBOX_OP_GET_SUPPORTED_LOGS = 0x0400,
CXL_MBOX_OP_GET_LOG = 0x0401,
+ CXL_MBOX_OP_GET_LOG_CAPS = 0x0402,
+ CXL_MBOX_OP_CLEAR_LOG = 0x0403,
+ CXL_MBOX_OP_GET_SUP_LOG_SUBLIST = 0x0405,
CXL_MBOX_OP_IDENTIFY = 0x4000,
CXL_MBOX_OP_GET_PARTITION_INFO = 0x4100,
CXL_MBOX_OP_SET_PARTITION_INFO = 0x4101,
diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
index 93992a1c8eec..4da07727ab9c 100644
--- a/drivers/cxl/cxlpci.h
+++ b/drivers/cxl/cxlpci.h
@@ -13,7 +13,6 @@
* "DVSEC" redundancies removed. When obvious, abbreviations may be used.
*/
#define PCI_DVSEC_HEADER1_LENGTH_MASK GENMASK(31, 20)
-#define PCI_DVSEC_VENDOR_ID_CXL 0x1E98
/* CXL 2.0 8.1.3: PCIe DVSEC for CXL Device */
#define CXL_DVSEC_PCIE_DEVICE 0
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 2ff361e756d6..e53646e9f2fb 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -817,7 +817,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
cxlds->rcd = is_cxl_restricted(pdev);
cxlds->serial = pci_get_dsn(pdev);
cxlds->cxl_dvsec = pci_find_dvsec_capability(
- pdev, PCI_DVSEC_VENDOR_ID_CXL, CXL_DVSEC_PCIE_DEVICE);
+ pdev, PCI_VENDOR_ID_CXL, CXL_DVSEC_PCIE_DEVICE);
if (!cxlds->cxl_dvsec)
dev_warn(&pdev->dev,
"Device DVSEC not present, skip CXL.mem init\n");
@@ -957,11 +957,33 @@ static void cxl_error_resume(struct pci_dev *pdev)
dev->driver ? "successful" : "failed");
}
+static void cxl_reset_done(struct pci_dev *pdev)
+{
+ struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
+ struct cxl_memdev *cxlmd = cxlds->cxlmd;
+ struct device *dev = &pdev->dev;
+
+ /*
+ * FLR does not expect to touch the HDM decoders and related
+ * registers. SBR, however, will wipe all device configurations.
+ * Issue a warning if there was an active decoder before the reset
+ * that no longer exists.
+ */
+ guard(device)(&cxlmd->dev);
+ if (cxlmd->endpoint &&
+ cxl_endpoint_decoder_reset_detected(cxlmd->endpoint)) {
+ dev_crit(dev, "SBR happened without memory regions removal.\n");
+ dev_crit(dev, "System may be unstable if regions hosted system memory.\n");
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+ }
+}
+
static const struct pci_error_handlers cxl_error_handlers = {
.error_detected = cxl_error_detected,
.slot_reset = cxl_slot_reset,
.resume = cxl_error_resume,
.cor_error_detected = cxl_cor_error_detected,
+ .reset_done = cxl_reset_done,
};
static struct pci_driver cxl_pci_driver = {
@@ -974,6 +996,75 @@ static struct pci_driver cxl_pci_driver = {
},
};
-module_pci_driver(cxl_pci_driver);
+#define CXL_EVENT_HDR_FLAGS_REC_SEVERITY GENMASK(1, 0)
+static void cxl_handle_cper_event(enum cxl_event_type ev_type,
+ struct cxl_cper_event_rec *rec)
+{
+ struct cper_cxl_event_devid *device_id = &rec->hdr.device_id;
+ struct pci_dev *pdev __free(pci_dev_put) = NULL;
+ enum cxl_event_log_type log_type;
+ struct cxl_dev_state *cxlds;
+ unsigned int devfn;
+ u32 hdr_flags;
+
+ pr_debug("CPER event %d for device %u:%u:%u.%u\n", ev_type,
+ device_id->segment_num, device_id->bus_num,
+ device_id->device_num, device_id->func_num);
+
+ devfn = PCI_DEVFN(device_id->device_num, device_id->func_num);
+ pdev = pci_get_domain_bus_and_slot(device_id->segment_num,
+ device_id->bus_num, devfn);
+ if (!pdev)
+ return;
+
+ guard(device)(&pdev->dev);
+ if (pdev->driver != &cxl_pci_driver)
+ return;
+
+ cxlds = pci_get_drvdata(pdev);
+ if (!cxlds)
+ return;
+
+ /* Fabricate a log type */
+ hdr_flags = get_unaligned_le24(rec->event.generic.hdr.flags);
+ log_type = FIELD_GET(CXL_EVENT_HDR_FLAGS_REC_SEVERITY, hdr_flags);
+
+ cxl_event_trace_record(cxlds->cxlmd, log_type, ev_type,
+ &uuid_null, &rec->event);
+}
+
+static void cxl_cper_work_fn(struct work_struct *work)
+{
+ struct cxl_cper_work_data wd;
+
+ while (cxl_cper_kfifo_get(&wd))
+ cxl_handle_cper_event(wd.event_type, &wd.rec);
+}
+static DECLARE_WORK(cxl_cper_work, cxl_cper_work_fn);
+
+static int __init cxl_pci_driver_init(void)
+{
+ int rc;
+
+ rc = pci_register_driver(&cxl_pci_driver);
+ if (rc)
+ return rc;
+
+ rc = cxl_cper_register_work(&cxl_cper_work);
+ if (rc)
+ pci_unregister_driver(&cxl_pci_driver);
+
+ return rc;
+}
+
+static void __exit cxl_pci_driver_exit(void)
+{
+ cxl_cper_unregister_work(&cxl_cper_work);
+ cancel_work_sync(&cxl_cper_work);
+ pci_unregister_driver(&cxl_pci_driver);
+}
+
+module_init(cxl_pci_driver_init);
+module_exit(cxl_pci_driver_exit);
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(CXL);
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index 7cb8994f8809..2ecdaee63021 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -11,8 +11,6 @@
#include "cxlmem.h"
#include "cxl.h"
-extern const struct nvdimm_security_ops *cxl_security_ops;
-
static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
static void clear_exclusive(void *mds)
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 797e1ebff299..3ef9550bd2ca 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -192,7 +192,7 @@ static u64 dev_dax_size(struct dev_dax *dev_dax)
u64 size = 0;
int i;
- WARN_ON_ONCE(!rwsem_is_locked(&dax_dev_rwsem));
+ lockdep_assert_held(&dax_dev_rwsem);
for (i = 0; i < dev_dax->nr_range; i++)
size += range_len(&dev_dax->ranges[i].range);
@@ -302,7 +302,7 @@ static unsigned long long dax_region_avail_size(struct dax_region *dax_region)
resource_size_t size = resource_size(&dax_region->res);
struct resource *res;
- WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
+ lockdep_assert_held(&dax_region_rwsem);
for_each_dax_region_resource(dax_region, res)
size -= resource_size(res);
@@ -447,7 +447,7 @@ static void trim_dev_dax_range(struct dev_dax *dev_dax)
struct range *range = &dev_dax->ranges[i].range;
struct dax_region *dax_region = dev_dax->region;
- WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
+ lockdep_assert_held_write(&dax_region_rwsem);
dev_dbg(&dev_dax->dev, "delete range[%d]: %#llx:%#llx\n", i,
(unsigned long long)range->start,
(unsigned long long)range->end);
@@ -465,26 +465,17 @@ static void free_dev_dax_ranges(struct dev_dax *dev_dax)
trim_dev_dax_range(dev_dax);
}
-static void __unregister_dev_dax(void *dev)
+static void unregister_dev_dax(void *dev)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
dev_dbg(dev, "%s\n", __func__);
+ down_write(&dax_region_rwsem);
kill_dev_dax(dev_dax);
device_del(dev);
free_dev_dax_ranges(dev_dax);
put_device(dev);
-}
-
-static void unregister_dev_dax(void *dev)
-{
- if (rwsem_is_locked(&dax_region_rwsem))
- return __unregister_dev_dax(dev);
-
- if (WARN_ON_ONCE(down_write_killable(&dax_region_rwsem) != 0))
- return;
- __unregister_dev_dax(dev);
up_write(&dax_region_rwsem);
}
@@ -507,7 +498,7 @@ static int __free_dev_dax_id(struct dev_dax *dev_dax)
struct dax_region *dax_region;
int rc = dev_dax->id;
- WARN_ON_ONCE(!rwsem_is_locked(&dax_dev_rwsem));
+ lockdep_assert_held_write(&dax_dev_rwsem);
if (!dev_dax->dyn_id || dev_dax->id < 0)
return -1;
@@ -560,15 +551,10 @@ static ssize_t delete_store(struct device *dev, struct device_attribute *attr,
if (!victim)
return -ENXIO;
- rc = down_write_killable(&dax_region_rwsem);
- if (rc)
- return rc;
- rc = down_write_killable(&dax_dev_rwsem);
- if (rc) {
- up_write(&dax_region_rwsem);
- return rc;
- }
+ device_lock(dev);
+ device_lock(victim);
dev_dax = to_dev_dax(victim);
+ down_write(&dax_dev_rwsem);
if (victim->driver || dev_dax_size(dev_dax))
rc = -EBUSY;
else {
@@ -589,11 +575,12 @@ static ssize_t delete_store(struct device *dev, struct device_attribute *attr,
rc = -EBUSY;
}
up_write(&dax_dev_rwsem);
+ device_unlock(victim);
/* won the race to invalidate the device, clean it up */
if (do_del)
devm_release_action(dev, unregister_dev_dax, victim);
- up_write(&dax_region_rwsem);
+ device_unlock(dev);
put_device(victim);
return rc;
@@ -705,7 +692,7 @@ static void dax_mapping_release(struct device *dev)
put_device(parent);
}
-static void __unregister_dax_mapping(void *data)
+static void unregister_dax_mapping(void *data)
{
struct device *dev = data;
struct dax_mapping *mapping = to_dax_mapping(dev);
@@ -713,25 +700,12 @@ static void __unregister_dax_mapping(void *data)
dev_dbg(dev, "%s\n", __func__);
- WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
-
dev_dax->ranges[mapping->range_id].mapping = NULL;
mapping->range_id = -1;
device_unregister(dev);
}
-static void unregister_dax_mapping(void *data)
-{
- if (rwsem_is_locked(&dax_region_rwsem))
- return __unregister_dax_mapping(data);
-
- if (WARN_ON_ONCE(down_write_killable(&dax_region_rwsem) != 0))
- return;
- __unregister_dax_mapping(data);
- up_write(&dax_region_rwsem);
-}
-
static struct dev_dax_range *get_dax_range(struct device *dev)
{
struct dax_mapping *mapping = to_dax_mapping(dev);
@@ -818,7 +792,7 @@ static const struct attribute_group *dax_mapping_attribute_groups[] = {
NULL,
};
-static struct device_type dax_mapping_type = {
+static const struct device_type dax_mapping_type = {
.release = dax_mapping_release,
.groups = dax_mapping_attribute_groups,
};
@@ -830,7 +804,7 @@ static int devm_register_dax_mapping(struct dev_dax *dev_dax, int range_id)
struct device *dev;
int rc;
- WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
+ lockdep_assert_held_write(&dax_region_rwsem);
if (dev_WARN_ONCE(&dev_dax->dev, !dax_region->dev->driver,
"region disabled\n"))
@@ -876,7 +850,7 @@ static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start,
struct resource *alloc;
int i, rc;
- WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
+ lockdep_assert_held_write(&dax_region_rwsem);
/* handle the seed alloc special case */
if (!size) {
@@ -935,7 +909,7 @@ static int adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res, r
struct device *dev = &dev_dax->dev;
int rc;
- WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
+ lockdep_assert_held_write(&dax_region_rwsem);
if (dev_WARN_ONCE(dev, !size, "deletion is handled by dev_dax_shrink\n"))
return -EINVAL;
@@ -963,11 +937,11 @@ static ssize_t size_show(struct device *dev,
unsigned long long size;
int rc;
- rc = down_write_killable(&dax_dev_rwsem);
+ rc = down_read_interruptible(&dax_dev_rwsem);
if (rc)
return rc;
size = dev_dax_size(dev_dax);
- up_write(&dax_dev_rwsem);
+ up_read(&dax_dev_rwsem);
return sysfs_emit(buf, "%llu\n", size);
}
@@ -1204,7 +1178,6 @@ static ssize_t mapping_store(struct device *dev, struct device_attribute *attr,
if (rc)
return rc;
- rc = -ENXIO;
rc = down_write_killable(&dax_region_rwsem);
if (rc)
return rc;
@@ -1566,12 +1539,8 @@ err_id:
struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
{
struct dev_dax *dev_dax;
- int rc;
-
- rc = down_write_killable(&dax_region_rwsem);
- if (rc)
- return ERR_PTR(rc);
+ down_write(&dax_region_rwsem);
dev_dax = __devm_create_dev_dax(data);
up_write(&dax_region_rwsem);
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 93ebedc5ec8c..eb61598247a9 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -329,14 +329,14 @@ static unsigned long dax_get_unmapped_area(struct file *filp,
if ((off + len_align) < off)
goto out;
- addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
- pgoff, flags);
+ addr_align = mm_get_unmapped_area(current->mm, filp, addr, len_align,
+ pgoff, flags);
if (!IS_ERR_VALUE(addr_align)) {
addr_align += (off - addr_align) & (align - 1);
return addr_align;
}
out:
- return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
+ return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags);
}
static const struct address_space_operations dev_dax_aops = {
@@ -377,7 +377,7 @@ static const struct file_operations dax_fops = {
.release = dax_release,
.get_unmapped_area = dax_get_unmapped_area,
.mmap = dax_mmap,
- .mmap_supported_flags = MAP_SYNC,
+ .fop_flags = FOP_MMAP_SYNC,
};
static void dev_dax_cdev_del(void *cdev)
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index 42ee360cf4e3..4fe9d040e375 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -55,36 +55,14 @@ static LIST_HEAD(kmem_memory_types);
static struct memory_dev_type *kmem_find_alloc_memory_type(int adist)
{
- bool found = false;
- struct memory_dev_type *mtype;
-
- mutex_lock(&kmem_memory_type_lock);
- list_for_each_entry(mtype, &kmem_memory_types, list) {
- if (mtype->adistance == adist) {
- found = true;
- break;
- }
- }
- if (!found) {
- mtype = alloc_memory_type(adist);
- if (!IS_ERR(mtype))
- list_add(&mtype->list, &kmem_memory_types);
- }
- mutex_unlock(&kmem_memory_type_lock);
-
- return mtype;
+ guard(mutex)(&kmem_memory_type_lock);
+ return mt_find_alloc_memory_type(adist, &kmem_memory_types);
}
static void kmem_put_memory_types(void)
{
- struct memory_dev_type *mtype, *mtn;
-
- mutex_lock(&kmem_memory_type_lock);
- list_for_each_entry_safe(mtype, mtn, &kmem_memory_types, list) {
- list_del(&mtype->list);
- put_memory_type(mtype);
- }
- mutex_unlock(&kmem_memory_type_lock);
+ guard(mutex)(&kmem_memory_type_lock);
+ mt_put_memory_types(&kmem_memory_types);
}
static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
index c1cc23bcb995..5edc522f715c 100644
--- a/drivers/devfreq/event/exynos-nocp.c
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -275,18 +275,16 @@ static int exynos_nocp_probe(struct platform_device *pdev)
return 0;
}
-static int exynos_nocp_remove(struct platform_device *pdev)
+static void exynos_nocp_remove(struct platform_device *pdev)
{
struct exynos_nocp *nocp = platform_get_drvdata(pdev);
clk_disable_unprepare(nocp->clk);
-
- return 0;
}
static struct platform_driver exynos_nocp_driver = {
.probe = exynos_nocp_probe,
- .remove = exynos_nocp_remove,
+ .remove_new = exynos_nocp_remove,
.driver = {
.name = "exynos-nocp",
.of_match_table = exynos_nocp_id_match,
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 56bac4702006..7002df20a49e 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -692,18 +692,16 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
return 0;
}
-static int exynos_ppmu_remove(struct platform_device *pdev)
+static void exynos_ppmu_remove(struct platform_device *pdev)
{
struct exynos_ppmu *info = platform_get_drvdata(pdev);
clk_disable_unprepare(info->ppmu.clk);
-
- return 0;
}
static struct platform_driver exynos_ppmu_driver = {
.probe = exynos_ppmu_probe,
- .remove = exynos_ppmu_remove,
+ .remove_new = exynos_ppmu_remove,
.driver = {
.name = "exynos-ppmu",
.of_match_table = exynos_ppmu_id_match,
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index 245898f1a88e..00118580905a 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -467,7 +467,6 @@ static void exynos_bus_shutdown(struct platform_device *pdev)
devfreq_suspend_device(bus->devfreq);
}
-#ifdef CONFIG_PM_SLEEP
static int exynos_bus_resume(struct device *dev)
{
struct exynos_bus *bus = dev_get_drvdata(dev);
@@ -495,11 +494,9 @@ static int exynos_bus_suspend(struct device *dev)
return 0;
}
-#endif
-static const struct dev_pm_ops exynos_bus_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(exynos_bus_suspend, exynos_bus_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(exynos_bus_pm,
+ exynos_bus_suspend, exynos_bus_resume);
static const struct of_device_id exynos_bus_of_match[] = {
{ .compatible = "samsung,exynos-bus", },
@@ -512,7 +509,7 @@ static struct platform_driver exynos_bus_platdrv = {
.shutdown = exynos_bus_shutdown,
.driver = {
.name = "exynos-bus",
- .pm = &exynos_bus_pm,
+ .pm = pm_sleep_ptr(&exynos_bus_pm),
.of_match_table = exynos_bus_of_match,
},
};
diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c
index 11bc3d03494c..7ad5225b0381 100644
--- a/drivers/devfreq/mtk-cci-devfreq.c
+++ b/drivers/devfreq/mtk-cci-devfreq.c
@@ -392,7 +392,7 @@ out_free_resources:
return ret;
}
-static int mtk_ccifreq_remove(struct platform_device *pdev)
+static void mtk_ccifreq_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_ccifreq_drv *drv;
@@ -405,8 +405,6 @@ static int mtk_ccifreq_remove(struct platform_device *pdev)
regulator_disable(drv->proc_reg);
if (drv->sram_reg)
regulator_disable(drv->sram_reg);
-
- return 0;
}
static const struct mtk_ccifreq_platform_data mt8183_platform_data = {
@@ -432,7 +430,7 @@ MODULE_DEVICE_TABLE(of, mtk_ccifreq_machines);
static struct platform_driver mtk_ccifreq_platdrv = {
.probe = mtk_ccifreq_probe,
- .remove = mtk_ccifreq_remove,
+ .remove_new = mtk_ccifreq_remove,
.driver = {
.name = "mtk-ccifreq",
.of_match_table = mtk_ccifreq_machines,
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index fd2c5ffedf41..d405cee92c25 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -459,13 +459,11 @@ err_edev:
return ret;
}
-static int rk3399_dmcfreq_remove(struct platform_device *pdev)
+static void rk3399_dmcfreq_remove(struct platform_device *pdev)
{
struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(&pdev->dev);
devfreq_event_disable_edev(dmcfreq->edev);
-
- return 0;
}
static const struct of_device_id rk3399dmc_devfreq_of_match[] = {
@@ -476,7 +474,7 @@ MODULE_DEVICE_TABLE(of, rk3399dmc_devfreq_of_match);
static struct platform_driver rk3399_dmcfreq_driver = {
.probe = rk3399_dmcfreq_probe,
- .remove = rk3399_dmcfreq_remove,
+ .remove_new = rk3399_dmcfreq_remove,
.driver = {
.name = "rk3399-dmc-freq",
.pm = &rk3399_dmcfreq_pm,
diff --git a/drivers/devfreq/sun8i-a33-mbus.c b/drivers/devfreq/sun8i-a33-mbus.c
index 13d32213139f..bcf654f4ff96 100644
--- a/drivers/devfreq/sun8i-a33-mbus.c
+++ b/drivers/devfreq/sun8i-a33-mbus.c
@@ -458,7 +458,7 @@ err_disable_bus:
return dev_err_probe(dev, ret, err);
}
-static int sun8i_a33_mbus_remove(struct platform_device *pdev)
+static void sun8i_a33_mbus_remove(struct platform_device *pdev)
{
struct sun8i_a33_mbus *priv = platform_get_drvdata(pdev);
unsigned long initial_freq = priv->profile.initial_freq;
@@ -475,8 +475,6 @@ static int sun8i_a33_mbus_remove(struct platform_device *pdev)
clk_rate_exclusive_put(priv->clk_mbus);
clk_rate_exclusive_put(priv->clk_dram);
clk_disable_unprepare(priv->clk_bus);
-
- return 0;
}
static const struct sun8i_a33_mbus_variant sun50i_a64_mbus = {
@@ -497,7 +495,7 @@ static SIMPLE_DEV_PM_OPS(sun8i_a33_mbus_pm_ops,
static struct platform_driver sun8i_a33_mbus_driver = {
.probe = sun8i_a33_mbus_probe,
- .remove = sun8i_a33_mbus_remove,
+ .remove_new = sun8i_a33_mbus_remove,
.driver = {
.name = "sun8i-a33-mbus",
.of_match_table = sun8i_a33_mbus_of_match,
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 8fe5aa67b167..8892bc701a66 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -35,12 +35,35 @@
static inline int is_dma_buf_file(struct file *);
-struct dma_buf_list {
- struct list_head head;
- struct mutex lock;
-};
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+static DEFINE_MUTEX(debugfs_list_mutex);
+static LIST_HEAD(debugfs_list);
-static struct dma_buf_list db_list;
+static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
+{
+ mutex_lock(&debugfs_list_mutex);
+ list_add(&dmabuf->list_node, &debugfs_list);
+ mutex_unlock(&debugfs_list_mutex);
+}
+
+static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf)
+{
+ if (!dmabuf)
+ return;
+
+ mutex_lock(&debugfs_list_mutex);
+ list_del(&dmabuf->list_node);
+ mutex_unlock(&debugfs_list_mutex);
+}
+#else
+static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
+{
+}
+
+static void __dma_buf_debugfs_list_del(struct file *file)
+{
+}
+#endif
static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
{
@@ -89,17 +112,10 @@ static void dma_buf_release(struct dentry *dentry)
static int dma_buf_file_release(struct inode *inode, struct file *file)
{
- struct dma_buf *dmabuf;
-
if (!is_dma_buf_file(file))
return -EINVAL;
- dmabuf = file->private_data;
- if (dmabuf) {
- mutex_lock(&db_list.lock);
- list_del(&dmabuf->list_node);
- mutex_unlock(&db_list.lock);
- }
+ __dma_buf_debugfs_list_del(file->private_data);
return 0;
}
@@ -672,9 +688,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
file->f_path.dentry->d_fsdata = dmabuf;
dmabuf->file = file;
- mutex_lock(&db_list.lock);
- list_add(&dmabuf->list_node, &db_list.head);
- mutex_unlock(&db_list.lock);
+ __dma_buf_debugfs_list_add(dmabuf);
return dmabuf;
@@ -1611,7 +1625,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
size_t size = 0;
int ret;
- ret = mutex_lock_interruptible(&db_list.lock);
+ ret = mutex_lock_interruptible(&debugfs_list_mutex);
if (ret)
return ret;
@@ -1620,7 +1634,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
"size", "flags", "mode", "count", "ino");
- list_for_each_entry(buf_obj, &db_list.head, list_node) {
+ list_for_each_entry(buf_obj, &debugfs_list, list_node) {
ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
if (ret)
@@ -1657,11 +1671,11 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
- mutex_unlock(&db_list.lock);
+ mutex_unlock(&debugfs_list_mutex);
return 0;
error_unlock:
- mutex_unlock(&db_list.lock);
+ mutex_unlock(&debugfs_list_mutex);
return ret;
}
@@ -1718,8 +1732,6 @@ static int __init dma_buf_init(void)
if (IS_ERR(dma_buf_mnt))
return PTR_ERR(dma_buf_mnt);
- mutex_init(&db_list.lock);
- INIT_LIST_HEAD(&db_list.head);
dma_buf_init_debugfs();
return 0;
}
diff --git a/drivers/dma-buf/st-dma-fence-chain.c b/drivers/dma-buf/st-dma-fence-chain.c
index 9c2a0c082a76..ed4b323886e4 100644
--- a/drivers/dma-buf/st-dma-fence-chain.c
+++ b/drivers/dma-buf/st-dma-fence-chain.c
@@ -84,11 +84,11 @@ static int sanitycheck(void *arg)
return -ENOMEM;
chain = mock_chain(NULL, f, 1);
- if (!chain)
+ if (chain)
+ dma_fence_enable_sw_signaling(chain);
+ else
err = -ENOMEM;
- dma_fence_enable_sw_signaling(chain);
-
dma_fence_signal(f);
dma_fence_put(f);
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index dfd40d14e408..802ca916f05f 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -31,10 +31,12 @@ obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/
obj-$(CONFIG_DW_DMAC_CORE) += dw/
obj-$(CONFIG_DW_EDMA) += dw-edma/
obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
+fsl-edma-trace-$(CONFIG_TRACING) := fsl-edma-trace.o
+CFLAGS_fsl-edma-trace.o := -I$(src)
obj-$(CONFIG_FSL_DMA) += fsldma.o
-fsl-edma-objs := fsl-edma-main.o fsl-edma-common.o
+fsl-edma-objs := fsl-edma-main.o fsl-edma-common.o ${fsl-edma-trace-y}
obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
-mcf-edma-objs := mcf-edma-main.o fsl-edma-common.o
+mcf-edma-objs := mcf-edma-main.o fsl-edma-common.o ${fsl-edma-trace-y}
obj-$(CONFIG_MCF_EDMA) += mcf-edma.o
obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
obj-$(CONFIG_FSL_RAID) += fsl_raid.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index fbf048f432bf..73a5cfb4da8a 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -2855,8 +2855,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
}
/* Initialize physical channels */
- pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)),
- GFP_KERNEL);
+ pl08x->phy_chans = kcalloc(vd->channels, sizeof(*pl08x->phy_chans),
+ GFP_KERNEL);
if (!pl08x->phy_chans) {
ret = -ENOMEM;
goto out_no_phychans;
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 4e339c04fc1e..bdb752f11869 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -1002,6 +1002,16 @@ static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version)
return 0;
}
+static void axi_dmac_tasklet_kill(void *task)
+{
+ tasklet_kill(task);
+}
+
+static void axi_dmac_free_dma_controller(void *of_node)
+{
+ of_dma_controller_free(of_node);
+}
+
static int axi_dmac_probe(struct platform_device *pdev)
{
struct dma_device *dma_dev;
@@ -1025,14 +1035,10 @@ static int axi_dmac_probe(struct platform_device *pdev)
if (IS_ERR(dmac->base))
return PTR_ERR(dmac->base);
- dmac->clk = devm_clk_get(&pdev->dev, NULL);
+ dmac->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(dmac->clk))
return PTR_ERR(dmac->clk);
- ret = clk_prepare_enable(dmac->clk);
- if (ret < 0)
- return ret;
-
version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION);
if (version >= ADI_AXI_PCORE_VER(4, 3, 'a'))
@@ -1041,7 +1047,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
ret = axi_dmac_parse_dt(&pdev->dev, dmac);
if (ret < 0)
- goto err_clk_disable;
+ return ret;
INIT_LIST_HEAD(&dmac->chan.active_descs);
@@ -1072,7 +1078,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
ret = axi_dmac_detect_caps(dmac, version);
if (ret)
- goto err_clk_disable;
+ return ret;
dma_dev->copy_align = (dmac->chan.address_align_mask + 1);
@@ -1088,57 +1094,42 @@ static int axi_dmac_probe(struct platform_device *pdev)
!AXI_DMAC_DST_COHERENT_GET(ret)) {
dev_err(dmac->dma_dev.dev,
"Coherent DMA not supported in hardware");
- ret = -EINVAL;
- goto err_clk_disable;
+ return -EINVAL;
}
}
- ret = dma_async_device_register(dma_dev);
+ ret = dmaenginem_async_device_register(dma_dev);
+ if (ret)
+ return ret;
+
+ /*
+ * Put the action in here so it get's done before unregistering the DMA
+ * device.
+ */
+ ret = devm_add_action_or_reset(&pdev->dev, axi_dmac_tasklet_kill,
+ &dmac->chan.vchan.task);
if (ret)
- goto err_clk_disable;
+ return ret;
ret = of_dma_controller_register(pdev->dev.of_node,
of_dma_xlate_by_chan_id, dma_dev);
if (ret)
- goto err_unregister_device;
+ return ret;
- ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, IRQF_SHARED,
- dev_name(&pdev->dev), dmac);
+ ret = devm_add_action_or_reset(&pdev->dev, axi_dmac_free_dma_controller,
+ pdev->dev.of_node);
if (ret)
- goto err_unregister_of;
+ return ret;
- platform_set_drvdata(pdev, dmac);
+ ret = devm_request_irq(&pdev->dev, dmac->irq, axi_dmac_interrupt_handler,
+ IRQF_SHARED, dev_name(&pdev->dev), dmac);
+ if (ret)
+ return ret;
regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base,
&axi_dmac_regmap_config);
- if (IS_ERR(regmap)) {
- ret = PTR_ERR(regmap);
- goto err_free_irq;
- }
-
- return 0;
-
-err_free_irq:
- free_irq(dmac->irq, dmac);
-err_unregister_of:
- of_dma_controller_free(pdev->dev.of_node);
-err_unregister_device:
- dma_async_device_unregister(&dmac->dma_dev);
-err_clk_disable:
- clk_disable_unprepare(dmac->clk);
-
- return ret;
-}
-
-static void axi_dmac_remove(struct platform_device *pdev)
-{
- struct axi_dmac *dmac = platform_get_drvdata(pdev);
- of_dma_controller_free(pdev->dev.of_node);
- free_irq(dmac->irq, dmac);
- tasklet_kill(&dmac->chan.vchan.task);
- dma_async_device_unregister(&dmac->dma_dev);
- clk_disable_unprepare(dmac->clk);
+ return PTR_ERR_OR_ZERO(regmap);
}
static const struct of_device_id axi_dmac_of_match_table[] = {
@@ -1153,7 +1144,6 @@ static struct platform_driver axi_dmac_driver = {
.of_match_table = axi_dmac_of_match_table,
},
.probe = axi_dmac_probe,
- .remove_new = axi_dmac_remove,
};
module_platform_driver(axi_dmac_driver);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index a86a81ff0caa..fffafa86d964 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -302,6 +302,7 @@ static struct axi_dma_desc *axi_desc_alloc(u32 num)
kfree(desc);
return NULL;
}
+ desc->nr_hw_descs = num;
return desc;
}
@@ -328,7 +329,7 @@ static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,
static void axi_desc_put(struct axi_dma_desc *desc)
{
struct axi_dma_chan *chan = desc->chan;
- int count = atomic_read(&chan->descs_allocated);
+ int count = desc->nr_hw_descs;
struct axi_dma_hw_desc *hw_desc;
int descs_put;
@@ -1139,9 +1140,6 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
/* Remove the completed descriptor from issued list before completing */
list_del(&vd->node);
vchan_cookie_complete(vd);
-
- /* Submit queued descriptors after processing the completed ones */
- axi_chan_start_first_queued(chan);
}
out:
@@ -1445,6 +1443,24 @@ static int parse_device_properties(struct axi_dma_chip *chip)
return 0;
}
+static int axi_req_irqs(struct platform_device *pdev, struct axi_dma_chip *chip)
+{
+ int irq_count = platform_irq_count(pdev);
+ int ret;
+
+ for (int i = 0; i < irq_count; i++) {
+ chip->irq[i] = platform_get_irq(pdev, i);
+ if (chip->irq[i] < 0)
+ return chip->irq[i];
+ ret = devm_request_irq(chip->dev, chip->irq[i], dw_axi_dma_interrupt,
+ IRQF_SHARED, KBUILD_MODNAME, chip);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static int dw_probe(struct platform_device *pdev)
{
struct axi_dma_chip *chip;
@@ -1471,10 +1487,6 @@ static int dw_probe(struct platform_device *pdev)
chip->dev = &pdev->dev;
chip->dw->hdata = hdata;
- chip->irq = platform_get_irq(pdev, 0);
- if (chip->irq < 0)
- return chip->irq;
-
chip->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);
@@ -1515,8 +1527,7 @@ static int dw_probe(struct platform_device *pdev)
if (!dw->chan)
return -ENOMEM;
- ret = devm_request_irq(chip->dev, chip->irq, dw_axi_dma_interrupt,
- IRQF_SHARED, KBUILD_MODNAME, chip);
+ ret = axi_req_irqs(pdev, chip);
if (ret)
return ret;
@@ -1629,7 +1640,9 @@ static void dw_remove(struct platform_device *pdev)
pm_runtime_disable(chip->dev);
axi_dma_suspend(chip);
- devm_free_irq(chip->dev, chip->irq, chip);
+ for (i = 0; i < DMAC_MAX_CHANNELS; i++)
+ if (chip->irq[i] > 0)
+ devm_free_irq(chip->dev, chip->irq[i], chip);
of_dma_controller_free(chip->dev->of_node);
@@ -1653,6 +1666,9 @@ static const struct of_device_id dw_dma_of_id_table[] = {
}, {
.compatible = "starfive,jh7110-axi-dma",
.data = (void *)(AXI_DMA_FLAG_HAS_RESETS | AXI_DMA_FLAG_USE_CFG2),
+ }, {
+ .compatible = "starfive,jh8100-axi-dma",
+ .data = (void *)AXI_DMA_FLAG_HAS_RESETS,
},
{}
};
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
index 454904d99654..b842e6a8d90d 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
@@ -65,7 +65,7 @@ struct dw_axi_dma {
struct axi_dma_chip {
struct device *dev;
- int irq;
+ int irq[DMAC_MAX_CHANNELS];
void __iomem *regs;
void __iomem *apb_regs;
struct clk *core_clk;
@@ -104,6 +104,7 @@ struct axi_dma_desc {
u32 completed_blocks;
u32 length;
u32 period_len;
+ u32 nr_hw_descs;
};
struct axi_dma_chan_config {
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
index 5a8061a307cd..36384d019263 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
@@ -362,7 +362,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
for (i = 0; i < priv->num_pairs; i++) {
err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
- i, &priv->rx_queue_attr[i]);
+ i, 0, &priv->rx_queue_attr[i]);
if (err) {
dev_err(dev, "dpdmai_get_rx_queue() failed\n");
goto exit;
@@ -370,13 +370,13 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
- i, &priv->tx_fqid[i]);
+ i, 0, &priv->tx_queue_attr[i]);
if (err) {
dev_err(dev, "dpdmai_get_tx_queue() failed\n");
goto exit;
}
- ppriv->req_fqid = priv->tx_fqid[i];
- ppriv->prio = i;
+ ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
+ ppriv->prio = DPAA2_QDMA_DEFAULT_PRIORITY;
ppriv->priv = priv;
ppriv++;
}
@@ -542,7 +542,7 @@ static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
rx_queue_cfg.dest_cfg.priority = ppriv->prio;
err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
- rx_queue_cfg.dest_cfg.priority,
+ rx_queue_cfg.dest_cfg.priority, 0,
&rx_queue_cfg);
if (err) {
dev_err(dev, "dpdmai_set_rx_queue() failed\n");
@@ -642,7 +642,7 @@ static int dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
for (i = 0; i < dpaa2_qdma->n_chans; i++) {
dpaa2_chan = &dpaa2_qdma->chans[i];
dpaa2_chan->qdma = dpaa2_qdma;
- dpaa2_chan->fqid = priv->tx_fqid[i % num];
+ dpaa2_chan->fqid = priv->tx_queue_attr[i % num].fqid;
dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
spin_lock_init(&dpaa2_chan->queue_lock);
@@ -802,7 +802,7 @@ static void dpaa2_qdma_shutdown(struct fsl_mc_device *ls_dev)
dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
dpaa2_dpdmai_dpio_unbind(priv);
dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
- dpdmai_destroy(priv->mc_io, 0, ls_dev->mc_handle);
+ dpdmai_destroy(priv->mc_io, 0, priv->dpqdma_id, ls_dev->mc_handle);
}
static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
index 03e2f4e0baca..2c80077cb7c0 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
@@ -6,6 +6,7 @@
#define DPAA2_QDMA_STORE_SIZE 16
#define NUM_CH 8
+#define DPAA2_QDMA_DEFAULT_PRIORITY 0
struct dpaa2_qdma_sd_d {
u32 rsv:32;
@@ -122,8 +123,8 @@ struct dpaa2_qdma_priv {
struct dpaa2_qdma_engine *dpaa2_qdma;
struct dpaa2_qdma_priv_per_prio *ppriv;
- struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM];
- u32 tx_fqid[DPDMAI_PRIO_NUM];
+ struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_MAX_QUEUE_NUM];
+ struct dpdmai_tx_queue_attr tx_queue_attr[DPDMAI_MAX_QUEUE_NUM];
};
struct dpaa2_qdma_priv_per_prio {
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
index 878662aaa1c2..36897b41ee7e 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
@@ -1,47 +1,52 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright 2019 NXP
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/io.h>
#include <linux/fsl/mc.h>
#include "dpdmai.h"
+#define DEST_TYPE_MASK 0xF
+
struct dpdmai_rsp_get_attributes {
__le32 id;
u8 num_of_priorities;
- u8 pad0[3];
+ u8 num_of_queues;
+ u8 pad0[2];
__le16 major;
__le16 minor;
};
struct dpdmai_cmd_queue {
__le32 dest_id;
- u8 priority;
- u8 queue;
+ u8 dest_priority;
+ union {
+ u8 queue;
+ u8 pri;
+ };
u8 dest_type;
- u8 pad;
+ u8 queue_idx;
__le64 user_ctx;
union {
__le32 options;
__le32 fqid;
};
-};
+} __packed;
struct dpdmai_rsp_get_tx_queue {
__le64 pad;
__le32 fqid;
};
-#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
- ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
+struct dpdmai_cmd_open {
+ __le32 dpdmai_id;
+} __packed;
-/* cmd, param, offset, width, type, arg_name */
-#define DPDMAI_CMD_CREATE(cmd, cfg) \
-do { \
- MC_CMD_OP(cmd, 0, 8, 8, u8, (cfg)->priorities[0]);\
- MC_CMD_OP(cmd, 0, 16, 8, u8, (cfg)->priorities[1]);\
-} while (0)
+struct dpdmai_cmd_destroy {
+ __le32 dpdmai_id;
+} __packed;
static inline u64 mc_enc(int lsoffset, int width, u64 val)
{
@@ -68,16 +73,16 @@ static inline u64 mc_enc(int lsoffset, int width, u64 val)
int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags,
int dpdmai_id, u16 *token)
{
+ struct dpdmai_cmd_open *cmd_params;
struct fsl_mc_command cmd = { 0 };
- __le64 *cmd_dpdmai_id;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
cmd_flags, 0);
- cmd_dpdmai_id = cmd.params;
- *cmd_dpdmai_id = cpu_to_le32(dpdmai_id);
+ cmd_params = (struct dpdmai_cmd_open *)&cmd.params;
+ cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -116,65 +121,26 @@ int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
EXPORT_SYMBOL_GPL(dpdmai_close);
/**
- * dpdmai_create() - Create the DPDMAI object
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg: Configuration structure
- * @token: Returned token; use in subsequent API calls
- *
- * Create the DPDMAI object, allocate required resources and
- * perform required initialization.
- *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- *
- * This function returns a unique authentication token,
- * associated with the specific object ID and the specific MC
- * portal; this token must be used in all subsequent calls to
- * this specific object. For objects that are created using the
- * DPL file, call dpdmai_open() function to get an authentication
- * token first.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags,
- const struct dpdmai_cfg *cfg, u16 *token)
-{
- struct fsl_mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
- cmd_flags, 0);
- DPDMAI_CMD_CREATE(cmd, cfg);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- *token = mc_cmd_hdr_read_token(&cmd);
-
- return 0;
-}
-
-/**
* dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpdmai_id: The object id; it must be a valid id within the container that created this object;
* @token: Token of DPDMAI object
*
* Return: '0' on Success; error code otherwise.
*/
-int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u32 dpdmai_id, u16 token)
{
+ struct dpdmai_cmd_destroy *cmd_params;
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
cmd_flags, token);
+ cmd_params = (struct dpdmai_cmd_destroy *)&cmd.params;
+ cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id);
+
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
@@ -274,6 +240,7 @@ int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags,
attr->version.major = le16_to_cpu(rsp_params->major);
attr->version.minor = le16_to_cpu(rsp_params->minor);
attr->num_of_priorities = rsp_params->num_of_priorities;
+ attr->num_of_queues = rsp_params->num_of_queues;
return 0;
}
@@ -284,13 +251,14 @@ EXPORT_SYMBOL_GPL(dpdmai_get_attributes);
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPDMAI object
+ * @queue_idx: DMA queue index
* @priority: Select the queue relative to number of
* priorities configured at DPDMAI creation
* @cfg: Rx queue configuration
*
* Return: '0' on Success; Error code otherwise.
*/
-int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 queue_idx,
u8 priority, const struct dpdmai_rx_queue_cfg *cfg)
{
struct dpdmai_cmd_queue *cmd_params;
@@ -302,11 +270,12 @@ int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
- cmd_params->priority = cfg->dest_cfg.priority;
- cmd_params->queue = priority;
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ cmd_params->pri = priority;
cmd_params->dest_type = cfg->dest_cfg.dest_type;
cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
cmd_params->options = cpu_to_le32(cfg->options);
+ cmd_params->queue_idx = queue_idx;
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -318,13 +287,14 @@ EXPORT_SYMBOL_GPL(dpdmai_set_rx_queue);
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPDMAI object
+ * @queue_idx: DMA Queue index
* @priority: Select the queue relative to number of
* priorities configured at DPDMAI creation
* @attr: Returned Rx queue attributes
*
* Return: '0' on Success; Error code otherwise.
*/
-int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 queue_idx,
u8 priority, struct dpdmai_rx_queue_attr *attr)
{
struct dpdmai_cmd_queue *cmd_params;
@@ -337,6 +307,7 @@ int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
cmd_params->queue = priority;
+ cmd_params->queue_idx = queue_idx;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -345,8 +316,8 @@ int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
/* retrieve response parameters */
attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
- attr->dest_cfg.priority = cmd_params->priority;
- attr->dest_cfg.dest_type = cmd_params->dest_type;
+ attr->dest_cfg.priority = cmd_params->dest_priority;
+ attr->dest_cfg.dest_type = FIELD_GET(DEST_TYPE_MASK, cmd_params->dest_type);
attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
attr->fqid = le32_to_cpu(cmd_params->fqid);
@@ -359,14 +330,15 @@ EXPORT_SYMBOL_GPL(dpdmai_get_rx_queue);
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPDMAI object
+ * @queue_idx: DMA queue index
* @priority: Select the queue relative to number of
* priorities configured at DPDMAI creation
- * @fqid: Returned Tx queue
+ * @attr: Returned DMA Tx queue attributes
*
* Return: '0' on Success; Error code otherwise.
*/
int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags,
- u16 token, u8 priority, u32 *fqid)
+ u16 token, u8 queue_idx, u8 priority, struct dpdmai_tx_queue_attr *attr)
{
struct dpdmai_rsp_get_tx_queue *rsp_params;
struct dpdmai_cmd_queue *cmd_params;
@@ -379,6 +351,7 @@ int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags,
cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
cmd_params->queue = priority;
+ cmd_params->queue_idx = queue_idx;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -388,7 +361,7 @@ int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags,
/* retrieve response parameters */
rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params;
- *fqid = le32_to_cpu(rsp_params->fqid);
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
return 0;
}
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.h b/drivers/dma/fsl-dpaa2-qdma/dpdmai.h
index b13b9bf0c003..3fe7d8327366 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpdmai.h
+++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.h
@@ -5,14 +5,19 @@
#define __FSL_DPDMAI_H
/* DPDMAI Version */
-#define DPDMAI_VER_MAJOR 2
-#define DPDMAI_VER_MINOR 2
+#define DPDMAI_VER_MAJOR 3
+#define DPDMAI_VER_MINOR 3
-#define DPDMAI_CMD_BASE_VERSION 0
+#define DPDMAI_CMD_BASE_VERSION 1
#define DPDMAI_CMD_ID_OFFSET 4
-#define DPDMAI_CMDID_FORMAT(x) (((x) << DPDMAI_CMD_ID_OFFSET) | \
- DPDMAI_CMD_BASE_VERSION)
+/*
+ * Maximum number of Tx/Rx queues per DPDMAI object
+ */
+#define DPDMAI_MAX_QUEUE_NUM 8
+
+#define DPDMAI_CMDID_FORMAT_V(x, v) (((x) << DPDMAI_CMD_ID_OFFSET) | (v))
+#define DPDMAI_CMDID_FORMAT(x) DPDMAI_CMDID_FORMAT_V(x, DPDMAI_CMD_BASE_VERSION)
/* Command IDs */
#define DPDMAI_CMDID_CLOSE DPDMAI_CMDID_FORMAT(0x800)
@@ -26,18 +31,9 @@
#define DPDMAI_CMDID_RESET DPDMAI_CMDID_FORMAT(0x005)
#define DPDMAI_CMDID_IS_ENABLED DPDMAI_CMDID_FORMAT(0x006)
-#define DPDMAI_CMDID_SET_IRQ DPDMAI_CMDID_FORMAT(0x010)
-#define DPDMAI_CMDID_GET_IRQ DPDMAI_CMDID_FORMAT(0x011)
-#define DPDMAI_CMDID_SET_IRQ_ENABLE DPDMAI_CMDID_FORMAT(0x012)
-#define DPDMAI_CMDID_GET_IRQ_ENABLE DPDMAI_CMDID_FORMAT(0x013)
-#define DPDMAI_CMDID_SET_IRQ_MASK DPDMAI_CMDID_FORMAT(0x014)
-#define DPDMAI_CMDID_GET_IRQ_MASK DPDMAI_CMDID_FORMAT(0x015)
-#define DPDMAI_CMDID_GET_IRQ_STATUS DPDMAI_CMDID_FORMAT(0x016)
-#define DPDMAI_CMDID_CLEAR_IRQ_STATUS DPDMAI_CMDID_FORMAT(0x017)
-
-#define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMDID_FORMAT(0x1A0)
-#define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMDID_FORMAT(0x1A1)
-#define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMDID_FORMAT(0x1A2)
+#define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMDID_FORMAT_V(0x1A0, 2)
+#define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMDID_FORMAT_V(0x1A1, 2)
+#define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMDID_FORMAT_V(0x1A2, 2)
#define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */
#define MC_CMD_HDR_TOKEN_S 16 /* Token field size */
@@ -49,30 +45,32 @@
* Contains initialization APIs and runtime control APIs for DPDMAI
*/
-/**
+/*
* Maximum number of Tx/Rx priorities per DPDMAI object
*/
#define DPDMAI_PRIO_NUM 2
/* DPDMAI queue modification options */
-/**
+/*
* Select to modify the user's context associated with the queue
*/
#define DPDMAI_QUEUE_OPT_USER_CTX 0x1
-/**
+/*
* Select to modify the queue's destination
*/
#define DPDMAI_QUEUE_OPT_DEST 0x2
/**
* struct dpdmai_cfg - Structure representing DPDMAI configuration
+ * @num_queues: Number of the DMA queues
* @priorities: Priorities for the DMA hardware processing; valid priorities are
* configured with values 1-8; the entry following last valid entry
* should be configured with 0
*/
struct dpdmai_cfg {
+ u8 num_queues;
u8 priorities[DPDMAI_PRIO_NUM];
};
@@ -80,20 +78,19 @@ struct dpdmai_cfg {
* struct dpdmai_attr - Structure representing DPDMAI attributes
* @id: DPDMAI object ID
* @version: DPDMAI version
+ * @version.major: DPDMAI major version
+ * @version.minor: DPDMAI minor version
* @num_of_priorities: number of priorities
+ * @num_of_queues: number of the DMA queues
*/
struct dpdmai_attr {
int id;
- /**
- * struct version - DPDMAI version
- * @major: DPDMAI major version
- * @minor: DPDMAI minor version
- */
struct {
u16 major;
u16 minor;
} version;
u8 num_of_priorities;
+ u8 num_of_queues;
};
/**
@@ -158,22 +155,24 @@ struct dpdmai_rx_queue_attr {
u32 fqid;
};
+struct dpdmai_tx_queue_attr {
+ u32 fqid;
+};
+
int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags,
int dpdmai_id, u16 *token);
int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
-int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
-int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags,
- const struct dpdmai_cfg *cfg, u16 *token);
+int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u32 dpdmai_id, u16 token);
int dpdmai_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
int dpdmai_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
int dpdmai_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags,
u16 token, struct dpdmai_attr *attr);
int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- u8 priority, const struct dpdmai_rx_queue_cfg *cfg);
+ u8 queue_idx, u8 priority, const struct dpdmai_rx_queue_cfg *cfg);
int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- u8 priority, struct dpdmai_rx_queue_attr *attr);
+ u8 queue_idx, u8 priority, struct dpdmai_rx_queue_attr *attr);
int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags,
- u16 token, u8 priority, u32 *fqid);
+ u16 token, u8 queue_idx, u8 priority, struct dpdmai_tx_queue_attr *attr);
#endif /* __FSL_DPDMAI_H */
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index b18faa7cfedb..3af430787315 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -3,6 +3,8 @@
// Copyright (c) 2013-2014 Freescale Semiconductor, Inc
// Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
#include <linux/dmapool.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -74,18 +76,10 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
flags = fsl_edma_drvflags(fsl_chan);
val = edma_readl_chreg(fsl_chan, ch_sbr);
- /* Remote/local swapped wrongly on iMX8 QM Audio edma */
- if (flags & FSL_EDMA_DRV_QUIRK_SWAPPED) {
- if (!fsl_chan->is_rxchan)
- val |= EDMA_V3_CH_SBR_RD;
- else
- val |= EDMA_V3_CH_SBR_WR;
- } else {
- if (fsl_chan->is_rxchan)
- val |= EDMA_V3_CH_SBR_RD;
- else
- val |= EDMA_V3_CH_SBR_WR;
- }
+ if (fsl_chan->is_rxchan)
+ val |= EDMA_V3_CH_SBR_RD;
+ else
+ val |= EDMA_V3_CH_SBR_WR;
if (fsl_chan->is_remote)
val &= ~(EDMA_V3_CH_SBR_RD | EDMA_V3_CH_SBR_WR);
@@ -546,6 +540,8 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
csr |= EDMA_TCD_CSR_START;
fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr);
+
+ trace_edma_fill_tcd(fsl_chan, tcd);
}
static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
@@ -810,6 +806,9 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK)
+ clk_prepare_enable(fsl_chan->clk);
+
fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_TCD64 ?
sizeof(struct fsl_edma_hw_tcd64) : sizeof(struct fsl_edma_hw_tcd),
@@ -838,6 +837,8 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
fsl_chan->tcd_pool = NULL;
fsl_chan->is_sw = false;
fsl_chan->srcid = 0;
+ if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK)
+ clk_disable_unprepare(fsl_chan->clk);
}
void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index 7bf0aba471a8..ac66222c1604 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -151,7 +151,6 @@ struct fsl_edma_chan {
enum dma_status status;
enum fsl_edma_pm_state pm_state;
bool idle;
- u32 slave_id;
struct fsl_edma_engine *edma;
struct fsl_edma_desc *edesc;
struct dma_slave_config cfg;
@@ -195,8 +194,6 @@ struct fsl_edma_desc {
#define FSL_EDMA_DRV_HAS_PD BIT(5)
#define FSL_EDMA_DRV_HAS_CHCLK BIT(6)
#define FSL_EDMA_DRV_HAS_CHMUX BIT(7)
-/* imx8 QM audio edma remote local swapped */
-#define FSL_EDMA_DRV_QUIRK_SWAPPED BIT(8)
/* control and status register is in tcd address space, edma3 reg layout */
#define FSL_EDMA_DRV_SPLIT_REG BIT(9)
#define FSL_EDMA_DRV_BUS_8BYTE BIT(10)
@@ -238,7 +235,6 @@ struct fsl_edma_engine {
void __iomem *muxbase[DMAMUX_NR];
struct clk *muxclk[DMAMUX_NR];
struct clk *dmaclk;
- struct clk *chclk;
struct mutex fsl_edma_mutex;
const struct fsl_edma_drvdata *drvdata;
u32 n_chans;
@@ -250,13 +246,17 @@ struct fsl_edma_engine {
struct fsl_edma_chan chans[] __counted_by(n_chans);
};
+static inline u32 fsl_edma_drvflags(struct fsl_edma_chan *fsl_chan)
+{
+ return fsl_chan->edma->drvdata->flags;
+}
+
#define edma_read_tcdreg_c(chan, _tcd, __name) \
-(sizeof((_tcd)->__name) == sizeof(u64) ? \
- edma_readq(chan->edma, &(_tcd)->__name) : \
- ((sizeof((_tcd)->__name) == sizeof(u32)) ? \
- edma_readl(chan->edma, &(_tcd)->__name) : \
- edma_readw(chan->edma, &(_tcd)->__name) \
- ))
+_Generic(((_tcd)->__name), \
+ __iomem __le64 : edma_readq(chan->edma, &(_tcd)->__name), \
+ __iomem __le32 : edma_readl(chan->edma, &(_tcd)->__name), \
+ __iomem __le16 : edma_readw(chan->edma, &(_tcd)->__name) \
+ )
#define edma_read_tcdreg(chan, __name) \
((fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) ? \
@@ -264,23 +264,13 @@ struct fsl_edma_engine {
edma_read_tcdreg_c(chan, ((struct fsl_edma_hw_tcd __iomem *)chan->tcd), __name) \
)
-#define edma_write_tcdreg_c(chan, _tcd, _val, __name) \
-do { \
- switch (sizeof(_tcd->__name)) { \
- case sizeof(u64): \
- edma_writeq(chan->edma, (u64 __force)_val, &_tcd->__name); \
- break; \
- case sizeof(u32): \
- edma_writel(chan->edma, (u32 __force)_val, &_tcd->__name); \
- break; \
- case sizeof(u16): \
- edma_writew(chan->edma, (u16 __force)_val, &_tcd->__name); \
- break; \
- case sizeof(u8): \
- edma_writeb(chan->edma, (u8 __force)_val, &_tcd->__name); \
- break; \
- } \
-} while (0)
+#define edma_write_tcdreg_c(chan, _tcd, _val, __name) \
+_Generic((_tcd->__name), \
+ __iomem __le64 : edma_writeq(chan->edma, (u64 __force)(_val), &_tcd->__name), \
+ __iomem __le32 : edma_writel(chan->edma, (u32 __force)(_val), &_tcd->__name), \
+ __iomem __le16 : edma_writew(chan->edma, (u16 __force)(_val), &_tcd->__name), \
+ __iomem u8 : edma_writeb(chan->edma, _val, &_tcd->__name) \
+ )
#define edma_write_tcdreg(chan, val, __name) \
do { \
@@ -321,9 +311,11 @@ do { \
(((struct fsl_edma_hw_tcd *)_tcd)->_field))
#define fsl_edma_le_to_cpu(x) \
-(sizeof(x) == sizeof(u64) ? le64_to_cpu((__force __le64)(x)) : \
- (sizeof(x) == sizeof(u32) ? le32_to_cpu((__force __le32)(x)) : \
- le16_to_cpu((__force __le16)(x))))
+_Generic((x), \
+ __le64 : le64_to_cpu((x)), \
+ __le32 : le32_to_cpu((x)), \
+ __le16 : le16_to_cpu((x)) \
+)
#define fsl_edma_get_tcd_to_cpu(_chan, _tcd, _field) \
(fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64 ? \
@@ -331,19 +323,11 @@ do { \
fsl_edma_le_to_cpu(((struct fsl_edma_hw_tcd *)_tcd)->_field))
#define fsl_edma_set_tcd_to_le_c(_tcd, _val, _field) \
-do { \
- switch (sizeof((_tcd)->_field)) { \
- case sizeof(u64): \
- *(__force __le64 *)(&((_tcd)->_field)) = cpu_to_le64(_val); \
- break; \
- case sizeof(u32): \
- *(__force __le32 *)(&((_tcd)->_field)) = cpu_to_le32(_val); \
- break; \
- case sizeof(u16): \
- *(__force __le16 *)(&((_tcd)->_field)) = cpu_to_le16(_val); \
- break; \
- } \
-} while (0)
+_Generic(((_tcd)->_field), \
+ __le64 : (_tcd)->_field = cpu_to_le64(_val), \
+ __le32 : (_tcd)->_field = cpu_to_le32(_val), \
+ __le16 : (_tcd)->_field = cpu_to_le16(_val) \
+)
#define fsl_edma_set_tcd_to_le(_chan, _tcd, _val, _field) \
do { \
@@ -353,6 +337,9 @@ do { \
fsl_edma_set_tcd_to_le_c((struct fsl_edma_hw_tcd *)_tcd, _val, _field); \
} while (0)
+/* Need after struct defination */
+#include "fsl-edma-trace.h"
+
/*
* R/W functions for big- or little-endian registers:
* The eDMA controller's endian is independent of the CPU core's endian.
@@ -371,23 +358,38 @@ static inline u64 edma_readq(struct fsl_edma_engine *edma, void __iomem *addr)
h = ioread32(addr + 4);
}
+ trace_edma_readl(edma, addr, l);
+ trace_edma_readl(edma, addr + 4, h);
+
return (h << 32) | l;
}
static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
{
+ u32 val;
+
if (edma->big_endian)
- return ioread32be(addr);
+ val = ioread32be(addr);
else
- return ioread32(addr);
+ val = ioread32(addr);
+
+ trace_edma_readl(edma, addr, val);
+
+ return val;
}
static inline u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr)
{
+ u16 val;
+
if (edma->big_endian)
- return ioread16be(addr);
+ val = ioread16be(addr);
else
- return ioread16(addr);
+ val = ioread16(addr);
+
+ trace_edma_readw(edma, addr, val);
+
+ return val;
}
static inline void edma_writeb(struct fsl_edma_engine *edma,
@@ -398,6 +400,8 @@ static inline void edma_writeb(struct fsl_edma_engine *edma,
iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3));
else
iowrite8(val, addr);
+
+ trace_edma_writeb(edma, addr, val);
}
static inline void edma_writew(struct fsl_edma_engine *edma,
@@ -408,6 +412,8 @@ static inline void edma_writew(struct fsl_edma_engine *edma,
iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2));
else
iowrite16(val, addr);
+
+ trace_edma_writew(edma, addr, val);
}
static inline void edma_writel(struct fsl_edma_engine *edma,
@@ -417,6 +423,8 @@ static inline void edma_writel(struct fsl_edma_engine *edma,
iowrite32be(val, addr);
else
iowrite32(val, addr);
+
+ trace_edma_writel(edma, addr, val);
}
static inline void edma_writeq(struct fsl_edma_engine *edma,
@@ -429,6 +437,9 @@ static inline void edma_writeq(struct fsl_edma_engine *edma,
iowrite32(val & 0xFFFFFFFF, addr);
iowrite32(val >> 32, addr + 4);
}
+
+ trace_edma_writel(edma, addr, val & 0xFFFFFFFF);
+ trace_edma_writel(edma, addr + 4, val >> 32);
}
static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
@@ -436,11 +447,6 @@ static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
return container_of(chan, struct fsl_edma_chan, vchan.chan);
}
-static inline u32 fsl_edma_drvflags(struct fsl_edma_chan *fsl_chan)
-{
- return fsl_chan->edma->drvdata->flags;
-}
-
static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
{
return container_of(vd, struct fsl_edma_desc, vdesc);
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 402f0058a180..391e4f13dfeb 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -105,7 +105,8 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
if (dma_spec->args_count != 2)
return NULL;
- mutex_lock(&fsl_edma->fsl_edma_mutex);
+ guard(mutex)(&fsl_edma->fsl_edma_mutex);
+
list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) {
if (chan->client_count)
continue;
@@ -114,15 +115,20 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
if (chan) {
chan->device->privatecnt++;
fsl_chan = to_fsl_edma_chan(chan);
- fsl_chan->slave_id = dma_spec->args[1];
- fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id,
+ fsl_chan->srcid = dma_spec->args[1];
+
+ if (!fsl_chan->srcid) {
+ dev_err(&fsl_chan->pdev->dev, "Invalidate srcid %d\n",
+ fsl_chan->srcid);
+ return NULL;
+ }
+
+ fsl_edma_chan_mux(fsl_chan, fsl_chan->srcid,
true);
- mutex_unlock(&fsl_edma->fsl_edma_mutex);
return chan;
}
}
}
- mutex_unlock(&fsl_edma->fsl_edma_mutex);
return NULL;
}
@@ -342,10 +348,13 @@ static struct fsl_edma_drvdata imx8qm_data = {
.setup_irq = fsl_edma3_irq_init,
};
-static struct fsl_edma_drvdata imx8qm_audio_data = {
- .flags = FSL_EDMA_DRV_QUIRK_SWAPPED | FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3,
+static struct fsl_edma_drvdata imx8ulp_data = {
+ .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_CHCLK | FSL_EDMA_DRV_HAS_DMACLK |
+ FSL_EDMA_DRV_EDMA3,
.chreg_space_sz = 0x10000,
.chreg_off = 0x10000,
+ .mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux),
+ .mux_skip = 0x10000,
.setup_irq = fsl_edma3_irq_init,
};
@@ -380,7 +389,7 @@ static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data},
{ .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data},
{ .compatible = "fsl,imx8qm-edma", .data = &imx8qm_data},
- { .compatible = "fsl,imx8qm-adma", .data = &imx8qm_audio_data},
+ { .compatible = "fsl,imx8ulp-edma", .data = &imx8ulp_data},
{ .compatible = "fsl,imx93-edma3", .data = &imx93_data3},
{ .compatible = "fsl,imx93-edma4", .data = &imx93_data4},
{ .compatible = "fsl,imx95-edma5", .data = &imx95_data5},
@@ -434,6 +443,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
struct fsl_edma_engine *fsl_edma;
const struct fsl_edma_drvdata *drvdata = NULL;
u32 chan_mask[2] = {0, 0};
+ char clk_name[36];
struct edma_regs *regs;
int chans;
int ret, i;
@@ -476,14 +486,6 @@ static int fsl_edma_probe(struct platform_device *pdev)
}
}
- if (drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) {
- fsl_edma->chclk = devm_clk_get_enabled(&pdev->dev, "mp");
- if (IS_ERR(fsl_edma->chclk)) {
- dev_err(&pdev->dev, "Missing MP block clock.\n");
- return PTR_ERR(fsl_edma->chclk);
- }
- }
-
ret = of_property_read_variable_u32_array(np, "dma-channel-mask", chan_mask, 1, 2);
if (ret > 0) {
@@ -540,7 +542,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_chan->edma = fsl_edma;
fsl_chan->pm_state = RUNNING;
- fsl_chan->slave_id = 0;
+ fsl_chan->srcid = 0;
fsl_chan->idle = true;
fsl_chan->dma_dir = DMA_NONE;
fsl_chan->vchan.desc_free = fsl_edma_free_desc;
@@ -551,11 +553,21 @@ static int fsl_edma_probe(struct platform_device *pdev)
+ i * drvdata->chreg_space_sz + drvdata->chreg_off + len;
fsl_chan->mux_addr = fsl_edma->membase + drvdata->mux_off + i * drvdata->mux_skip;
+ if (drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) {
+ snprintf(clk_name, sizeof(clk_name), "ch%02d", i);
+ fsl_chan->clk = devm_clk_get_enabled(&pdev->dev,
+ (const char *)clk_name);
+
+ if (IS_ERR(fsl_chan->clk))
+ return PTR_ERR(fsl_chan->clk);
+ }
fsl_chan->pdev = pdev;
vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
edma_write_tcdreg(fsl_chan, cpu_to_le32(0), csr);
fsl_edma_chan_mux(fsl_chan, 0, false);
+ if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK)
+ clk_disable_unprepare(fsl_chan->clk);
}
ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma);
@@ -682,8 +694,8 @@ static int fsl_edma_resume_early(struct device *dev)
continue;
fsl_chan->pm_state = RUNNING;
edma_write_tcdreg(fsl_chan, 0, csr);
- if (fsl_chan->slave_id != 0)
- fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true);
+ if (fsl_chan->srcid != 0)
+ fsl_edma_chan_mux(fsl_chan, fsl_chan->srcid, true);
}
if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_SPLIT_REG))
diff --git a/drivers/dma/fsl-edma-trace.c b/drivers/dma/fsl-edma-trace.c
new file mode 100644
index 000000000000..28300ad80bb7
--- /dev/null
+++ b/drivers/dma/fsl-edma-trace.c
@@ -0,0 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define CREATE_TRACE_POINTS
+#include "fsl-edma-common.h"
diff --git a/drivers/dma/fsl-edma-trace.h b/drivers/dma/fsl-edma-trace.h
new file mode 100644
index 000000000000..d3541301a247
--- /dev/null
+++ b/drivers/dma/fsl-edma-trace.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2023 NXP.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fsl_edma
+
+#if !defined(__LINUX_FSL_EDMA_TRACE) || defined(TRACE_HEADER_MULTI_READ)
+#define __LINUX_FSL_EDMA_TRACE
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(edma_log_io,
+ TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value),
+ TP_ARGS(edma, addr, value),
+ TP_STRUCT__entry(
+ __field(struct fsl_edma_engine *, edma)
+ __field(void __iomem *, addr)
+ __field(u32, value)
+ ),
+ TP_fast_assign(
+ __entry->edma = edma;
+ __entry->addr = addr;
+ __entry->value = value;
+ ),
+ TP_printk("offset %08x: value %08x",
+ (u32)(__entry->addr - __entry->edma->membase), __entry->value)
+);
+
+DEFINE_EVENT(edma_log_io, edma_readl,
+ TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value),
+ TP_ARGS(edma, addr, value)
+);
+
+DEFINE_EVENT(edma_log_io, edma_writel,
+ TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value),
+ TP_ARGS(edma, addr, value)
+);
+
+DEFINE_EVENT(edma_log_io, edma_readw,
+ TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value),
+ TP_ARGS(edma, addr, value)
+);
+
+DEFINE_EVENT(edma_log_io, edma_writew,
+ TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value),
+ TP_ARGS(edma, addr, value)
+);
+
+DEFINE_EVENT(edma_log_io, edma_readb,
+ TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value),
+ TP_ARGS(edma, addr, value)
+);
+
+DEFINE_EVENT(edma_log_io, edma_writeb,
+ TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value),
+ TP_ARGS(edma, addr, value)
+);
+
+DECLARE_EVENT_CLASS(edma_log_tcd,
+ TP_PROTO(struct fsl_edma_chan *chan, void *tcd),
+ TP_ARGS(chan, tcd),
+ TP_STRUCT__entry(
+ __field(u64, saddr)
+ __field(u16, soff)
+ __field(u16, attr)
+ __field(u32, nbytes)
+ __field(u64, slast)
+ __field(u64, daddr)
+ __field(u16, doff)
+ __field(u16, citer)
+ __field(u64, dlast_sga)
+ __field(u16, csr)
+ __field(u16, biter)
+
+ ),
+ TP_fast_assign(
+ __entry->saddr = fsl_edma_get_tcd_to_cpu(chan, tcd, saddr),
+ __entry->soff = fsl_edma_get_tcd_to_cpu(chan, tcd, soff),
+ __entry->attr = fsl_edma_get_tcd_to_cpu(chan, tcd, attr),
+ __entry->nbytes = fsl_edma_get_tcd_to_cpu(chan, tcd, nbytes),
+ __entry->slast = fsl_edma_get_tcd_to_cpu(chan, tcd, slast),
+ __entry->daddr = fsl_edma_get_tcd_to_cpu(chan, tcd, daddr),
+ __entry->doff = fsl_edma_get_tcd_to_cpu(chan, tcd, doff),
+ __entry->citer = fsl_edma_get_tcd_to_cpu(chan, tcd, citer),
+ __entry->dlast_sga = fsl_edma_get_tcd_to_cpu(chan, tcd, dlast_sga),
+ __entry->csr = fsl_edma_get_tcd_to_cpu(chan, tcd, csr),
+ __entry->biter = fsl_edma_get_tcd_to_cpu(chan, tcd, biter);
+ ),
+ TP_printk("\n==== TCD =====\n"
+ " saddr: 0x%016llx\n"
+ " soff: 0x%04x\n"
+ " attr: 0x%04x\n"
+ " nbytes: 0x%08x\n"
+ " slast: 0x%016llx\n"
+ " daddr: 0x%016llx\n"
+ " doff: 0x%04x\n"
+ " citer: 0x%04x\n"
+ " dlast: 0x%016llx\n"
+ " csr: 0x%04x\n"
+ " biter: 0x%04x\n",
+ __entry->saddr,
+ __entry->soff,
+ __entry->attr,
+ __entry->nbytes,
+ __entry->slast,
+ __entry->daddr,
+ __entry->doff,
+ __entry->citer,
+ __entry->dlast_sga,
+ __entry->csr,
+ __entry->biter)
+);
+
+DEFINE_EVENT(edma_log_tcd, edma_fill_tcd,
+ TP_PROTO(struct fsl_edma_chan *chan, void *tcd),
+ TP_ARGS(chan, tcd)
+);
+
+#endif
+
+/* this part must be outside header guard */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE fsl-edma-trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 78a938969d7d..e3505e56784b 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -171,6 +171,10 @@ static irqreturn_t idma64_irq(int irq, void *dev)
u32 status_err;
unsigned short i;
+ /* Since IRQ may be shared, check if DMA controller is powered on */
+ if (status == GENMASK(31, 0))
+ return IRQ_NONE;
+
dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
/* Check if we have any interrupt from the DMA controller */
@@ -594,7 +598,9 @@ static int idma64_probe(struct idma64_chip *chip)
idma64->dma.dev = chip->sysdev;
- dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
+ ret = dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
+ if (ret)
+ return ret;
ret = dma_async_device_register(&idma64->dma);
if (ret)
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 8078ab9acfbc..57f1bf2ab20b 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -342,7 +342,7 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
if (!evl)
return;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = status.tail;
h = status.head;
@@ -354,9 +354,8 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
set_bit(h, evl->bmap);
h = (h + 1) % size;
}
- spin_unlock(&evl->lock);
-
drain_workqueue(wq->wq);
+ mutex_unlock(&evl->lock);
}
static int idxd_cdev_release(struct inode *node, struct file *filep)
@@ -401,6 +400,18 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
int rc;
dev_dbg(&pdev->dev, "%s called\n", __func__);
+
+ /*
+ * Due to an erratum in some of the devices supported by the driver,
+ * direct user submission to the device can be unsafe.
+ * (See the INTEL-SA-01084 security advisory)
+ *
+ * For the devices that exhibit this behavior, require that the user
+ * has CAP_SYS_RAWIO capabilities.
+ */
+ if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
rc = check_vma(wq, vma, __func__);
if (rc < 0)
return rc;
@@ -415,6 +426,70 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_page_prot);
}
+static int idxd_submit_user_descriptor(struct idxd_user_context *ctx,
+ struct dsa_hw_desc __user *udesc)
+{
+ struct idxd_wq *wq = ctx->wq;
+ struct idxd_dev *idxd_dev = &wq->idxd->idxd_dev;
+ const uint64_t comp_addr_align = is_dsa_dev(idxd_dev) ? 0x20 : 0x40;
+ void __iomem *portal = idxd_wq_portal_addr(wq);
+ struct dsa_hw_desc descriptor __aligned(64);
+ int rc;
+
+ rc = copy_from_user(&descriptor, udesc, sizeof(descriptor));
+ if (rc)
+ return -EFAULT;
+
+ /*
+ * DSA devices are capable of indirect ("batch") command submission.
+ * On devices where direct user submissions are not safe, we cannot
+ * allow this since there is no good way for us to verify these
+ * indirect commands.
+ */
+ if (is_dsa_dev(idxd_dev) && descriptor.opcode == DSA_OPCODE_BATCH &&
+ !wq->idxd->user_submission_safe)
+ return -EINVAL;
+ /*
+ * As per the programming specification, the completion address must be
+ * aligned to 32 or 64 bytes. If this is violated the hardware
+ * engine can get very confused (security issue).
+ */
+ if (!IS_ALIGNED(descriptor.completion_addr, comp_addr_align))
+ return -EINVAL;
+
+ if (wq_dedicated(wq))
+ iosubmit_cmds512(portal, &descriptor, 1);
+ else {
+ descriptor.priv = 0;
+ descriptor.pasid = ctx->pasid;
+ rc = idxd_enqcmds(wq, portal, &descriptor);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t len,
+ loff_t *unused)
+{
+ struct dsa_hw_desc __user *udesc = (struct dsa_hw_desc __user *)buf;
+ struct idxd_user_context *ctx = filp->private_data;
+ ssize_t written = 0;
+ int i;
+
+ for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) {
+ int rc = idxd_submit_user_descriptor(ctx, udesc + i);
+
+ if (rc)
+ return written ? written : rc;
+
+ written += sizeof(struct dsa_hw_desc);
+ }
+
+ return written;
+}
+
static __poll_t idxd_cdev_poll(struct file *filp,
struct poll_table_struct *wait)
{
@@ -437,6 +512,7 @@ static const struct file_operations idxd_cdev_fops = {
.open = idxd_cdev_open,
.release = idxd_cdev_release,
.mmap = idxd_cdev_mmap,
+ .write = idxd_cdev_write,
.poll = idxd_cdev_poll,
};
@@ -501,7 +577,6 @@ void idxd_wq_del_cdev(struct idxd_wq *wq)
struct idxd_cdev *idxd_cdev;
idxd_cdev = wq->idxd_cdev;
- ida_destroy(&file_ida);
wq->idxd_cdev = NULL;
cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev));
put_device(cdev_dev(idxd_cdev));
@@ -517,6 +592,14 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
if (idxd->state != IDXD_DEV_ENABLED)
return -ENXIO;
+ mutex_lock(&wq->wq_lock);
+
+ if (!idxd_wq_driver_name_match(wq, dev)) {
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME;
+ rc = -ENODEV;
+ goto wq_err;
+ }
+
/*
* User type WQ is enabled only when SVA is enabled for two reasons:
* - If no IOMMU or IOMMU Passthrough without SVA, userspace
@@ -532,14 +615,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
dev_dbg(&idxd->pdev->dev,
"User type WQ cannot be enabled without SVA.\n");
- return -EOPNOTSUPP;
- }
-
- mutex_lock(&wq->wq_lock);
-
- if (!idxd_wq_driver_name_match(wq, dev)) {
- idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME;
- rc = -ENODEV;
+ rc = -EOPNOTSUPP;
goto wq_err;
}
diff --git a/drivers/dma/idxd/debugfs.c b/drivers/dma/idxd/debugfs.c
index f3f25ee676f3..ad4245cb301d 100644
--- a/drivers/dma/idxd/debugfs.c
+++ b/drivers/dma/idxd/debugfs.c
@@ -66,7 +66,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
if (!evl || !evl->log)
return 0;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = evl_status.tail;
@@ -87,7 +87,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
dump_event_entry(idxd, s, i, &count, processed);
}
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
return 0;
}
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index ecfdf4a8f1f8..c41ef195eeb9 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -775,7 +775,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
goto err_alloc;
}
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
evl->log = addr;
evl->dma = dma_addr;
evl->log_size = size;
@@ -796,7 +796,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
gencfg.evl_en = 1;
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
return 0;
err_alloc:
@@ -819,7 +819,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
if (!gencfg.evl_en)
return;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
gencfg.evl_en = 0;
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
@@ -836,7 +836,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
evl_dma = evl->dma;
evl->log = NULL;
evl->size = IDXD_EVL_SIZE_MIN;
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
dma_free_coherent(dev, evl_log_size, evl_log, evl_dma);
}
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index a4099a1e2340..868b724a3b75 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -288,12 +288,13 @@ struct idxd_driver_data {
int evl_cr_off;
int cr_status_off;
int cr_result_off;
+ bool user_submission_safe;
load_device_defaults_fn_t load_device_defaults;
};
struct idxd_evl {
/* Lock to protect event log access. */
- spinlock_t lock;
+ struct mutex lock;
void *log;
dma_addr_t dma;
/* Total size of event log = number of entries * entry size. */
@@ -374,6 +375,8 @@ struct idxd_device {
struct dentry *dbgfs_dir;
struct dentry *dbgfs_evl_file;
+
+ bool user_submission_safe;
};
static inline unsigned int evl_ent_size(struct idxd_device *idxd)
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 4954adc6bb60..a7295943fa22 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -47,6 +47,7 @@ static struct idxd_driver_data idxd_driver_data[] = {
.align = 32,
.dev_type = &dsa_device_type,
.evl_cr_off = offsetof(struct dsa_evl_entry, cr),
+ .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */
.cr_status_off = offsetof(struct dsa_completion_record, status),
.cr_result_off = offsetof(struct dsa_completion_record, result),
},
@@ -57,6 +58,7 @@ static struct idxd_driver_data idxd_driver_data[] = {
.align = 64,
.dev_type = &iax_device_type,
.evl_cr_off = offsetof(struct iax_evl_entry, cr),
+ .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */
.cr_status_off = offsetof(struct iax_completion_record, status),
.cr_result_off = offsetof(struct iax_completion_record, error_code),
.load_device_defaults = idxd_load_iaa_device_defaults,
@@ -354,7 +356,7 @@ static int idxd_init_evl(struct idxd_device *idxd)
if (!evl)
return -ENOMEM;
- spin_lock_init(&evl->lock);
+ mutex_init(&evl->lock);
evl->size = IDXD_EVL_SIZE_MIN;
idxd_name = dev_name(idxd_confdev(idxd));
@@ -774,6 +776,8 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
idxd->hw.version);
+ idxd->user_submission_safe = data->user_submission_safe;
+
return 0;
err_dev_register:
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 348aa21389a9..8dc029c86551 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -363,7 +363,7 @@ static void process_evl_entries(struct idxd_device *idxd)
evl_status.bits = 0;
evl_status.int_pending = 1;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
/* Clear interrupt pending bit */
iowrite32(evl_status.bits_upper32,
idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
@@ -380,7 +380,7 @@ static void process_evl_entries(struct idxd_device *idxd)
evl_status.head = h;
iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
}
irqreturn_t idxd_misc_thread(int vec, void *data)
diff --git a/drivers/dma/idxd/perfmon.c b/drivers/dma/idxd/perfmon.c
index fdda6d604262..5e94247e1ea7 100644
--- a/drivers/dma/idxd/perfmon.c
+++ b/drivers/dma/idxd/perfmon.c
@@ -528,14 +528,11 @@ static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node)
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
-
/* migrate events if there is a valid target */
- if (target < nr_cpu_ids)
+ if (target < nr_cpu_ids) {
cpumask_set_cpu(target, &perfmon_dsa_cpu_mask);
- else
- target = -1;
-
- perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
+ perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
+ }
return 0;
}
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index 315c004f58e4..e16dbf9ab324 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -6,9 +6,6 @@
#include <uapi/linux/idxd.h>
/* PCI Config */
-#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25
-#define PCI_DEVICE_ID_INTEL_IAX_SPR0 0x0cfe
-
#define DEVICE_VERSION_1 0x100
#define DEVICE_VERSION_2 0x200
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 7f28f01be672..f706eae0e76b 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -1197,12 +1197,35 @@ static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attrib
static struct device_attribute dev_attr_wq_enqcmds_retries =
__ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store);
+static ssize_t op_cap_show_common(struct device *dev, char *buf, unsigned long *opcap_bmap)
+{
+ ssize_t pos;
+ int i;
+
+ pos = 0;
+ for (i = IDXD_MAX_OPCAP_BITS/64 - 1; i >= 0; i--) {
+ unsigned long val = opcap_bmap[i];
+
+ /* On systems where direct user submissions are not safe, we need to clear out
+ * the BATCH capability from the capability mask in sysfs since we cannot support
+ * that command on such systems.
+ */
+ if (i == DSA_OPCODE_BATCH/64 && !confdev_to_idxd(dev)->user_submission_safe)
+ clear_bit(DSA_OPCODE_BATCH % 64, &val);
+
+ pos += sysfs_emit_at(buf, pos, "%*pb", 64, &val);
+ pos += sysfs_emit_at(buf, pos, "%c", i == 0 ? '\n' : ',');
+ }
+
+ return pos;
+}
+
static ssize_t wq_op_config_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct idxd_wq *wq = confdev_to_wq(dev);
- return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, wq->opcap_bmap);
+ return op_cap_show_common(dev, buf, wq->opcap_bmap);
}
static int idxd_verify_supported_opcap(struct idxd_device *idxd, unsigned long *opmask)
@@ -1455,7 +1478,7 @@ static ssize_t op_cap_show(struct device *dev,
{
struct idxd_device *idxd = confdev_to_idxd(dev);
- return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, idxd->opcap_bmap);
+ return op_cap_show_common(dev, buf, idxd->opcap_bmap);
}
static DEVICE_ATTR_RO(op_cap);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 9b42f5e96b1e..003e1580b902 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -24,6 +24,7 @@
#include <linux/semaphore.h>
#include <linux/spinlock.h>
#include <linux/device.h>
+#include <linux/genalloc.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
#include <linux/slab.h>
@@ -137,7 +138,11 @@
* 0: Source on AIPS
* 12 Destination Bit(DP) 1: Destination on SPBA
* 0: Destination on AIPS
- * 13-15 --------- MUST BE 0
+ * 13 Source FIFO 1: Source is dual FIFO
+ * 0: Source is single FIFO
+ * 14 Destination FIFO 1: Destination is dual FIFO
+ * 0: Destination is single FIFO
+ * 15 --------- MUST BE 0
* 16-23 Higher WML HWML
* 24-27 N Total number of samples after
* which Pad adding/Swallowing
@@ -168,6 +173,8 @@
#define SDMA_WATERMARK_LEVEL_SPDIF BIT(10)
#define SDMA_WATERMARK_LEVEL_SP BIT(11)
#define SDMA_WATERMARK_LEVEL_DP BIT(12)
+#define SDMA_WATERMARK_LEVEL_SD BIT(13)
+#define SDMA_WATERMARK_LEVEL_DD BIT(14)
#define SDMA_WATERMARK_LEVEL_HWML (0xFF << 16)
#define SDMA_WATERMARK_LEVEL_LWE BIT(28)
#define SDMA_WATERMARK_LEVEL_HWE BIT(29)
@@ -175,6 +182,7 @@
#define SDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
#define SDMA_DMA_DIRECTIONS (BIT(DMA_DEV_TO_MEM) | \
@@ -232,20 +240,23 @@ struct sdma_script_start_addrs {
s32 utra_addr;
s32 ram_code_start_addr;
/* End of v1 array */
- s32 mcu_2_ssish_addr;
+ union { s32 v1_end; s32 mcu_2_ssish_addr; };
s32 ssish_2_mcu_addr;
s32 hdmi_dma_addr;
/* End of v2 array */
- s32 zcanfd_2_mcu_addr;
+ union { s32 v2_end; s32 zcanfd_2_mcu_addr; };
s32 zqspi_2_mcu_addr;
s32 mcu_2_ecspi_addr;
s32 mcu_2_sai_addr;
s32 sai_2_mcu_addr;
s32 uart_2_mcu_rom_addr;
s32 uartsh_2_mcu_rom_addr;
+ s32 i2c_2_mcu_addr;
+ s32 mcu_2_i2c_addr;
/* End of v3 array */
- s32 mcu_2_zqspi_addr;
+ union { s32 v3_end; s32 mcu_2_zqspi_addr; };
/* End of v4 array */
+ s32 v4_end[0];
};
/*
@@ -531,6 +542,7 @@ struct sdma_engine {
/* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/
bool clk_ratio;
bool fw_loaded;
+ struct gen_pool *iram_pool;
};
static int sdma_config_write(struct dma_chan *chan,
@@ -1072,6 +1084,11 @@ static int sdma_get_pc(struct sdma_channel *sdmac,
per_2_emi = sdma->script_addrs->sai_2_mcu_addr;
emi_2_per = sdma->script_addrs->mcu_2_sai_addr;
break;
+ case IMX_DMATYPE_I2C:
+ per_2_emi = sdma->script_addrs->i2c_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_i2c_addr;
+ sdmac->is_ram_script = true;
+ break;
case IMX_DMATYPE_HDMI:
emi_2_per = sdma->script_addrs->hdmi_dma_addr;
sdmac->is_ram_script = true;
@@ -1255,6 +1272,16 @@ static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
+
+ /*
+ * Limitation: The p2p script support dual fifos in maximum,
+ * So when fifo number is larger than 1, force enable dual
+ * fifos.
+ */
+ if (sdmac->n_fifos_src > 1)
+ sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SD;
+ if (sdmac->n_fifos_dst > 1)
+ sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DD;
}
static void sdma_set_watermarklevel_for_sais(struct sdma_channel *sdmac)
@@ -1358,8 +1385,14 @@ static int sdma_request_channel0(struct sdma_engine *sdma)
{
int ret = -EBUSY;
- sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys,
- GFP_NOWAIT);
+ if (sdma->iram_pool)
+ sdma->bd0 = gen_pool_dma_alloc(sdma->iram_pool,
+ sizeof(struct sdma_buffer_descriptor),
+ &sdma->bd0_phys);
+ else
+ sdma->bd0 = dma_alloc_coherent(sdma->dev,
+ sizeof(struct sdma_buffer_descriptor),
+ &sdma->bd0_phys, GFP_NOWAIT);
if (!sdma->bd0) {
ret = -ENOMEM;
goto out;
@@ -1379,10 +1412,14 @@ out:
static int sdma_alloc_bd(struct sdma_desc *desc)
{
u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
+ struct sdma_engine *sdma = desc->sdmac->sdma;
int ret = 0;
- desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size,
- &desc->bd_phys, GFP_NOWAIT);
+ if (sdma->iram_pool)
+ desc->bd = gen_pool_dma_alloc(sdma->iram_pool, bd_size, &desc->bd_phys);
+ else
+ desc->bd = dma_alloc_coherent(sdma->dev, bd_size, &desc->bd_phys, GFP_NOWAIT);
+
if (!desc->bd) {
ret = -ENOMEM;
goto out;
@@ -1394,9 +1431,12 @@ out:
static void sdma_free_bd(struct sdma_desc *desc)
{
u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
+ struct sdma_engine *sdma = desc->sdmac->sdma;
- dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd,
- desc->bd_phys);
+ if (sdma->iram_pool)
+ gen_pool_free(sdma->iram_pool, (unsigned long)desc->bd, bd_size);
+ else
+ dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd, desc->bd_phys);
}
static void sdma_desc_free(struct virt_dma_desc *vd)
@@ -1643,6 +1683,9 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
if (count & 3 || sg->dma_address & 3)
goto err_bd_out;
break;
+ case DMA_SLAVE_BUSWIDTH_3_BYTES:
+ bd->mode.command = 3;
+ break;
case DMA_SLAVE_BUSWIDTH_2_BYTES:
bd->mode.command = 2;
if (count & 1 || sg->dma_address & 1)
@@ -1880,10 +1923,17 @@ static void sdma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&sdmac->vc.lock, flags);
}
-#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
-#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38
-#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 45
-#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 46
+#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 \
+(offsetof(struct sdma_script_start_addrs, v1_end) / sizeof(s32))
+
+#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 \
+(offsetof(struct sdma_script_start_addrs, v2_end) / sizeof(s32))
+
+#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 \
+(offsetof(struct sdma_script_start_addrs, v3_end) / sizeof(s32))
+
+#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 \
+(offsetof(struct sdma_script_start_addrs, v4_end) / sizeof(s32))
static void sdma_add_scripts(struct sdma_engine *sdma,
const struct sdma_script_start_addrs *addr)
@@ -2068,6 +2118,7 @@ static int sdma_init(struct sdma_engine *sdma)
{
int i, ret;
dma_addr_t ccb_phys;
+ int ccbsize;
ret = clk_enable(sdma->clk_ipg);
if (ret)
@@ -2083,10 +2134,14 @@ static int sdma_init(struct sdma_engine *sdma)
/* Be sure SDMA has not started yet */
writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
- sdma->channel_control = dma_alloc_coherent(sdma->dev,
- MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control) +
- sizeof(struct sdma_context_data),
- &ccb_phys, GFP_KERNEL);
+ ccbsize = MAX_DMA_CHANNELS * (sizeof(struct sdma_channel_control)
+ + sizeof(struct sdma_context_data));
+
+ if (sdma->iram_pool)
+ sdma->channel_control = gen_pool_dma_alloc(sdma->iram_pool, ccbsize, &ccb_phys);
+ else
+ sdma->channel_control = dma_alloc_coherent(sdma->dev, ccbsize, &ccb_phys,
+ GFP_KERNEL);
if (!sdma->channel_control) {
ret = -ENOMEM;
@@ -2272,6 +2327,12 @@ static int sdma_probe(struct platform_device *pdev)
vchan_init(&sdmac->vc, &sdma->dma_device);
}
+ if (np) {
+ sdma->iram_pool = of_gen_pool_get(np, "iram", 0);
+ if (sdma->iram_pool)
+ dev_info(&pdev->dev, "alloc bd from iram.\n");
+ }
+
ret = sdma_init(sdma);
if (ret)
goto err_init;
diff --git a/drivers/dma/mcf-edma-main.c b/drivers/dma/mcf-edma-main.c
index dba631783876..78c606f6d002 100644
--- a/drivers/dma/mcf-edma-main.c
+++ b/drivers/dma/mcf-edma-main.c
@@ -195,7 +195,7 @@ static int mcf_edma_probe(struct platform_device *pdev)
struct fsl_edma_chan *mcf_chan = &mcf_edma->chans[i];
mcf_chan->edma = mcf_edma;
- mcf_chan->slave_id = i;
+ mcf_chan->srcid = i;
mcf_chan->idle = true;
mcf_chan->dma_dir = DMA_NONE;
mcf_chan->vchan.desc_free = fsl_edma_free_desc;
@@ -277,7 +277,7 @@ bool mcf_edma_filter_fn(struct dma_chan *chan, void *param)
if (chan->device->dev->driver == &mcf_edma_driver.driver) {
struct fsl_edma_chan *mcf_chan = to_fsl_edma_chan(chan);
- return (mcf_chan->slave_id == (uintptr_t)param);
+ return (mcf_chan->srcid == (uintptr_t)param);
}
return false;
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 4e76c4ec2d39..e001f4f7aa64 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -250,7 +250,7 @@ static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
else
regval &= ~val;
- writel(val, pchan->base + reg);
+ writel(regval, pchan->base + reg);
}
static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
@@ -274,7 +274,7 @@ static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
else
regval &= ~val;
- writel(val, od->base + reg);
+ writel(regval, od->base + reg);
}
static void dma_writel(struct owl_dma *od, u32 reg, u32 data)
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index c359decc07a3..6b2793b07694 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -155,11 +155,6 @@ static inline struct device *chan2dev(struct dma_chan *chan)
return &chan->dev->device;
}
-static inline struct device *chan2parent(struct dma_chan *chan)
-{
- return chan->dev->device.parent;
-}
-
static inline
struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
{
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 5f6d7f1e095f..60c4de8dac1d 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1053,9 +1053,6 @@ static bool _trigger(struct pl330_thread *thrd)
thrd->req_running = idx;
- if (desc->rqtype == DMA_MEM_TO_DEV || desc->rqtype == DMA_DEV_TO_MEM)
- UNTIL(thrd, PL330_STATE_WFP);
-
return true;
}
@@ -3265,7 +3262,6 @@ MODULE_DEVICE_TABLE(amba, pl330_ids);
static struct amba_driver pl330_driver = {
.drv = {
- .owner = THIS_MODULE,
.name = "dma-pl330",
.pm = &pl330_pm,
},
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 202ac95227cb..721b4ac0857a 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -50,7 +50,6 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/of_dma.h>
#include <linux/property.h>
#include <linux/delay.h>
#include <linux/acpi.h>
@@ -947,22 +946,12 @@ static const struct acpi_device_id hidma_acpi_ids[] = {
MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
#endif
-static const struct of_device_id hidma_match[] = {
- {.compatible = "qcom,hidma-1.0",},
- {.compatible = "qcom,hidma-1.1", .data = (void *)(HIDMA_MSI_CAP),},
- {.compatible = "qcom,hidma-1.2",
- .data = (void *)(HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP),},
- {},
-};
-MODULE_DEVICE_TABLE(of, hidma_match);
-
static struct platform_driver hidma_driver = {
.probe = hidma_probe,
.remove_new = hidma_remove,
.shutdown = hidma_shutdown,
.driver = {
.name = "hidma",
- .of_match_table = hidma_match,
.acpi_match_table = ACPI_PTR(hidma_acpi_ids),
},
};
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index 1d675f31252b..bb883e138ebf 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -7,12 +7,7 @@
#include <linux/dmaengine.h>
#include <linux/acpi.h>
-#include <linux/of.h>
#include <linux/property.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/uaccess.h>
@@ -327,115 +322,13 @@ static const struct acpi_device_id hidma_mgmt_acpi_ids[] = {
MODULE_DEVICE_TABLE(acpi, hidma_mgmt_acpi_ids);
#endif
-static const struct of_device_id hidma_mgmt_match[] = {
- {.compatible = "qcom,hidma-mgmt-1.0",},
- {},
-};
-MODULE_DEVICE_TABLE(of, hidma_mgmt_match);
-
static struct platform_driver hidma_mgmt_driver = {
.probe = hidma_mgmt_probe,
.driver = {
.name = "hidma-mgmt",
- .of_match_table = hidma_mgmt_match,
.acpi_match_table = ACPI_PTR(hidma_mgmt_acpi_ids),
},
};
-#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
-static int object_counter;
-
-static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
-{
- struct platform_device *pdev_parent = of_find_device_by_node(np);
- struct platform_device_info pdevinfo;
- struct device_node *child;
- struct resource *res;
- int ret = 0;
-
- /* allocate a resource array */
- res = kcalloc(3, sizeof(*res), GFP_KERNEL);
- if (!res)
- return -ENOMEM;
-
- for_each_available_child_of_node(np, child) {
- struct platform_device *new_pdev;
-
- ret = of_address_to_resource(child, 0, &res[0]);
- if (!ret)
- goto out;
-
- ret = of_address_to_resource(child, 1, &res[1]);
- if (!ret)
- goto out;
-
- ret = of_irq_to_resource(child, 0, &res[2]);
- if (ret <= 0)
- goto out;
-
- memset(&pdevinfo, 0, sizeof(pdevinfo));
- pdevinfo.fwnode = &child->fwnode;
- pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL;
- pdevinfo.name = child->name;
- pdevinfo.id = object_counter++;
- pdevinfo.res = res;
- pdevinfo.num_res = 3;
- pdevinfo.data = NULL;
- pdevinfo.size_data = 0;
- pdevinfo.dma_mask = DMA_BIT_MASK(64);
- new_pdev = platform_device_register_full(&pdevinfo);
- if (IS_ERR(new_pdev)) {
- ret = PTR_ERR(new_pdev);
- goto out;
- }
- new_pdev->dev.of_node = child;
- of_dma_configure(&new_pdev->dev, child, true);
- /*
- * It is assumed that calling of_msi_configure is safe on
- * platforms with or without MSI support.
- */
- of_msi_configure(&new_pdev->dev, child);
- }
-
- kfree(res);
-
- return ret;
-
-out:
- of_node_put(child);
- kfree(res);
-
- return ret;
-}
-#endif
-
-static int __init hidma_mgmt_init(void)
-{
-#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
- struct device_node *child;
-
- for_each_matching_node(child, hidma_mgmt_match) {
- /* device tree based firmware here */
- hidma_mgmt_of_populate_channels(child);
- }
-#endif
- /*
- * We do not check for return value here, as it is assumed that
- * platform_driver_register must not fail. The reason for this is that
- * the (potential) hidma_mgmt_of_populate_channels calls above are not
- * cleaned up if it does fail, and to do this work is quite
- * complicated. In particular, various calls of of_address_to_resource,
- * of_irq_to_resource, platform_device_register_full, of_dma_configure,
- * and of_msi_configure which then call other functions and so on, must
- * be cleaned up - this is not a trivial exercise.
- *
- * Currently, this module is not intended to be unloaded, and there is
- * no module_exit function defined which does the needed cleanup. For
- * this reason, we have to assume success here.
- */
- platform_driver_register(&hidma_mgmt_driver);
-
- return 0;
-}
-module_init(hidma_mgmt_init);
+module_platform_driver(hidma_mgmt_driver);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
index 88547a23825b..3642508e88bb 100644
--- a/drivers/dma/tegra186-gpc-dma.c
+++ b/drivers/dma/tegra186-gpc-dma.c
@@ -746,6 +746,9 @@ static int tegra_dma_get_residual(struct tegra_dma_channel *tdc)
bytes_xfer = dma_desc->bytes_xfer +
sg_req[dma_desc->sg_idx].len - (wcount * 4);
+ if (dma_desc->bytes_req == bytes_xfer)
+ return 0;
+
residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req);
return residual;
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index e9f5250fbe4d..59d9eabc8b67 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -81,6 +81,8 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
*/
static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
{
+ lockdep_assert_held(&vc->lock);
+
list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
return !list_empty(&vc->desc_issued);
}
@@ -96,6 +98,8 @@ static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
dma_cookie_t cookie;
+ lockdep_assert_held(&vc->lock);
+
cookie = vd->tx.cookie;
dma_cookie_complete(&vd->tx);
dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
@@ -146,6 +150,8 @@ static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
{
struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+ lockdep_assert_held(&vc->lock);
+
list_add_tail(&vd->node, &vc->desc_terminated);
if (vc->cyclic == vd)
@@ -160,6 +166,8 @@ static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
*/
static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
{
+ lockdep_assert_held(&vc->lock);
+
return list_first_entry_or_null(&vc->desc_issued,
struct virt_dma_desc, node);
}
@@ -177,6 +185,8 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
struct list_head *head)
{
+ lockdep_assert_held(&vc->lock);
+
list_splice_tail_init(&vc->desc_allocated, head);
list_splice_tail_init(&vc->desc_submitted, head);
list_splice_tail_init(&vc->desc_issued, head);
diff --git a/drivers/dma/xilinx/xdma-regs.h b/drivers/dma/xilinx/xdma-regs.h
index 98f5f6fb9ff9..6ad08878e938 100644
--- a/drivers/dma/xilinx/xdma-regs.h
+++ b/drivers/dma/xilinx/xdma-regs.h
@@ -117,6 +117,9 @@ struct xdma_hw_desc {
CHAN_CTRL_IE_WRITE_ERROR | \
CHAN_CTRL_IE_DESC_ERROR)
+/* bits of the channel status register */
+#define XDMA_CHAN_STATUS_BUSY BIT(0)
+
#define XDMA_CHAN_STATUS_MASK CHAN_CTRL_START
#define XDMA_CHAN_ERROR_MASK (CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index 170017ff2aad..e143a7330816 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -71,6 +71,8 @@ struct xdma_chan {
enum dma_transfer_direction dir;
struct dma_slave_config cfg;
u32 irq;
+ struct completion last_interrupt;
+ bool stop_requested;
};
/**
@@ -376,6 +378,8 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
return ret;
xchan->busy = true;
+ xchan->stop_requested = false;
+ reinit_completion(&xchan->last_interrupt);
return 0;
}
@@ -387,7 +391,6 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
static int xdma_xfer_stop(struct xdma_chan *xchan)
{
int ret;
- u32 val;
struct xdma_device *xdev = xchan->xdev_hdl;
/* clear run stop bit to prevent any further auto-triggering */
@@ -395,13 +398,7 @@ static int xdma_xfer_stop(struct xdma_chan *xchan)
CHAN_CTRL_RUN_STOP);
if (ret)
return ret;
-
- /* Clear the channel status register */
- ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &val);
- if (ret)
- return ret;
-
- return 0;
+ return ret;
}
/**
@@ -474,6 +471,8 @@ static int xdma_alloc_channels(struct xdma_device *xdev,
xchan->xdev_hdl = xdev;
xchan->base = base + i * XDMA_CHAN_STRIDE;
xchan->dir = dir;
+ xchan->stop_requested = false;
+ init_completion(&xchan->last_interrupt);
ret = xdma_channel_init(xchan);
if (ret)
@@ -521,6 +520,7 @@ static int xdma_terminate_all(struct dma_chan *chan)
spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
xdma_chan->busy = false;
+ xdma_chan->stop_requested = true;
vd = vchan_next_desc(&xdma_chan->vchan);
if (vd) {
list_del(&vd->node);
@@ -542,17 +542,26 @@ static int xdma_terminate_all(struct dma_chan *chan)
static void xdma_synchronize(struct dma_chan *chan)
{
struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_device *xdev = xdma_chan->xdev_hdl;
+ int st = 0;
+
+ /* If the engine continues running, wait for the last interrupt */
+ regmap_read(xdev->rmap, xdma_chan->base + XDMA_CHAN_STATUS, &st);
+ if (st & XDMA_CHAN_STATUS_BUSY)
+ wait_for_completion_timeout(&xdma_chan->last_interrupt, msecs_to_jiffies(1000));
vchan_synchronize(&xdma_chan->vchan);
}
/**
- * xdma_fill_descs - Fill hardware descriptors with contiguous memory block addresses
- * @sw_desc: tx descriptor state container
- * @src_addr: Value for a ->src_addr field of a first descriptor
- * @dst_addr: Value for a ->dst_addr field of a first descriptor
- * @size: Total size of a contiguous memory block
- * @filled_descs_num: Number of filled hardware descriptors for corresponding sw_desc
+ * xdma_fill_descs() - Fill hardware descriptors for one contiguous memory chunk.
+ * More than one descriptor will be used if the size is bigger
+ * than XDMA_DESC_BLEN_MAX.
+ * @sw_desc: Descriptor container
+ * @src_addr: First value for the ->src_addr field
+ * @dst_addr: First value for the ->dst_addr field
+ * @size: Size of the contiguous memory block
+ * @filled_descs_num: Index of the first descriptor to take care of in @sw_desc
*/
static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr,
u64 dst_addr, u32 size, u32 filled_descs_num)
@@ -704,7 +713,7 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
desc_num = 0;
for (i = 0; i < periods; i++) {
desc_num += xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num);
- addr += i * period_size;
+ addr += period_size;
}
tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
@@ -876,6 +885,9 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
u32 st;
bool repeat_tx;
+ if (xchan->stop_requested)
+ complete(&xchan->last_interrupt);
+
spin_lock(&xchan->vchan.lock);
/* get submitted request */
@@ -1295,6 +1307,7 @@ static const struct platform_device_id xdma_id_table[] = {
{ "xdma", 0},
{ },
};
+MODULE_DEVICE_TABLE(platform, xdma_id_table);
static struct platform_driver xdma_driver = {
.driver = {
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index b82815e64d24..36bd4825d389 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -214,7 +214,8 @@ struct xilinx_dpdma_tx_desc {
* @running: true if the channel is running
* @first_frame: flag for the first frame of stream
* @video_group: flag if multi-channel operation is needed for video channels
- * @lock: lock to access struct xilinx_dpdma_chan
+ * @lock: lock to access struct xilinx_dpdma_chan. Must be taken before
+ * @vchan.lock, if both are to be held.
* @desc_pool: descriptor allocation pool
* @err_task: error IRQ bottom half handler
* @desc: References to descriptors being processed
@@ -1042,9 +1043,8 @@ static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
{
struct xilinx_dpdma_tx_desc *active;
- unsigned long flags;
- spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->lock);
xilinx_dpdma_debugfs_desc_done_irq(chan);
@@ -1056,7 +1056,7 @@ static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
"chan%u: DONE IRQ with no active descriptor!\n",
chan->id);
- spin_unlock_irqrestore(&chan->lock, flags);
+ spin_unlock(&chan->lock);
}
/**
@@ -1071,10 +1071,9 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
{
struct xilinx_dpdma_tx_desc *pending;
struct xilinx_dpdma_sw_desc *sw_desc;
- unsigned long flags;
u32 desc_id;
- spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->lock);
pending = chan->desc.pending;
if (!chan->running || !pending)
@@ -1097,15 +1096,17 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
* Complete the active descriptor, if any, promote the pending
* descriptor to active, and queue the next transfer, if any.
*/
+ spin_lock(&chan->vchan.lock);
if (chan->desc.active)
vchan_cookie_complete(&chan->desc.active->vdesc);
chan->desc.active = pending;
chan->desc.pending = NULL;
xilinx_dpdma_chan_queue_transfer(chan);
+ spin_unlock(&chan->vchan.lock);
out:
- spin_unlock_irqrestore(&chan->lock, flags);
+ spin_unlock(&chan->lock);
}
/**
@@ -1264,10 +1265,12 @@ static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
unsigned long flags;
- spin_lock_irqsave(&chan->vchan.lock, flags);
+ spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->vchan.lock);
if (vchan_issue_pending(&chan->vchan))
xilinx_dpdma_chan_queue_transfer(chan);
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ spin_unlock(&chan->vchan.lock);
+ spin_unlock_irqrestore(&chan->lock, flags);
}
static int xilinx_dpdma_config(struct dma_chan *dchan,
@@ -1495,7 +1498,9 @@ static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->vchan.lock);
xilinx_dpdma_chan_queue_transfer(chan);
+ spin_unlock(&chan->vchan.lock);
spin_unlock_irqrestore(&chan->lock, flags);
}
diff --git a/drivers/dpll/Kconfig b/drivers/dpll/Kconfig
index a4cae73f20d3..20607ed54243 100644
--- a/drivers/dpll/Kconfig
+++ b/drivers/dpll/Kconfig
@@ -4,4 +4,4 @@
#
config DPLL
- bool
+ bool
diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
index 64eaca80d736..32019dc33cca 100644
--- a/drivers/dpll/dpll_core.c
+++ b/drivers/dpll/dpll_core.c
@@ -42,6 +42,7 @@ struct dpll_pin_registration {
struct list_head list;
const struct dpll_pin_ops *ops;
void *priv;
+ void *cookie;
};
struct dpll_device *dpll_device_get_by_id(int id)
@@ -54,12 +55,14 @@ struct dpll_device *dpll_device_get_by_id(int id)
static struct dpll_pin_registration *
dpll_pin_registration_find(struct dpll_pin_ref *ref,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv,
+ void *cookie)
{
struct dpll_pin_registration *reg;
list_for_each_entry(reg, &ref->registration_list, list) {
- if (reg->ops == ops && reg->priv == priv)
+ if (reg->ops == ops && reg->priv == priv &&
+ reg->cookie == cookie)
return reg;
}
return NULL;
@@ -67,7 +70,8 @@ dpll_pin_registration_find(struct dpll_pin_ref *ref,
static int
dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv,
+ void *cookie)
{
struct dpll_pin_registration *reg;
struct dpll_pin_ref *ref;
@@ -78,7 +82,7 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
xa_for_each(xa_pins, i, ref) {
if (ref->pin != pin)
continue;
- reg = dpll_pin_registration_find(ref, ops, priv);
+ reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (reg) {
refcount_inc(&ref->refcount);
return 0;
@@ -111,6 +115,7 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
}
reg->ops = ops;
reg->priv = priv;
+ reg->cookie = cookie;
if (ref_exists)
refcount_inc(&ref->refcount);
list_add_tail(&reg->list, &ref->registration_list);
@@ -119,7 +124,8 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
}
static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv,
+ void *cookie)
{
struct dpll_pin_registration *reg;
struct dpll_pin_ref *ref;
@@ -128,7 +134,7 @@ static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
xa_for_each(xa_pins, i, ref) {
if (ref->pin != pin)
continue;
- reg = dpll_pin_registration_find(ref, ops, priv);
+ reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (WARN_ON(!reg))
return -EINVAL;
list_del(&reg->list);
@@ -146,7 +152,7 @@ static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
static int
dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv, void *cookie)
{
struct dpll_pin_registration *reg;
struct dpll_pin_ref *ref;
@@ -157,7 +163,7 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
xa_for_each(xa_dplls, i, ref) {
if (ref->dpll != dpll)
continue;
- reg = dpll_pin_registration_find(ref, ops, priv);
+ reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (reg) {
refcount_inc(&ref->refcount);
return 0;
@@ -190,6 +196,7 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
}
reg->ops = ops;
reg->priv = priv;
+ reg->cookie = cookie;
if (ref_exists)
refcount_inc(&ref->refcount);
list_add_tail(&reg->list, &ref->registration_list);
@@ -199,7 +206,7 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
static void
dpll_xa_ref_dpll_del(struct xarray *xa_dplls, struct dpll_device *dpll,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv, void *cookie)
{
struct dpll_pin_registration *reg;
struct dpll_pin_ref *ref;
@@ -208,7 +215,7 @@ dpll_xa_ref_dpll_del(struct xarray *xa_dplls, struct dpll_device *dpll,
xa_for_each(xa_dplls, i, ref) {
if (ref->dpll != dpll)
continue;
- reg = dpll_pin_registration_find(ref, ops, priv);
+ reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (WARN_ON(!reg))
return;
list_del(&reg->list);
@@ -442,7 +449,7 @@ static int dpll_pin_prop_dup(const struct dpll_pin_properties *src,
sizeof(*src->freq_supported);
dst->freq_supported = kmemdup(src->freq_supported,
freq_size, GFP_KERNEL);
- if (!src->freq_supported)
+ if (!dst->freq_supported)
return -ENOMEM;
}
if (src->board_label) {
@@ -594,14 +601,14 @@ EXPORT_SYMBOL_GPL(dpll_pin_put);
static int
__dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv, void *cookie)
{
int ret;
- ret = dpll_xa_ref_pin_add(&dpll->pin_refs, pin, ops, priv);
+ ret = dpll_xa_ref_pin_add(&dpll->pin_refs, pin, ops, priv, cookie);
if (ret)
return ret;
- ret = dpll_xa_ref_dpll_add(&pin->dpll_refs, dpll, ops, priv);
+ ret = dpll_xa_ref_dpll_add(&pin->dpll_refs, dpll, ops, priv, cookie);
if (ret)
goto ref_pin_del;
xa_set_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED);
@@ -610,7 +617,7 @@ __dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
return ret;
ref_pin_del:
- dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv);
+ dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv, cookie);
return ret;
}
@@ -642,7 +649,7 @@ dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
dpll->clock_id == pin->clock_id)))
ret = -EINVAL;
else
- ret = __dpll_pin_register(dpll, pin, ops, priv);
+ ret = __dpll_pin_register(dpll, pin, ops, priv, NULL);
mutex_unlock(&dpll_lock);
return ret;
@@ -651,11 +658,11 @@ EXPORT_SYMBOL_GPL(dpll_pin_register);
static void
__dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv, void *cookie)
{
ASSERT_DPLL_PIN_REGISTERED(pin);
- dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv);
- dpll_xa_ref_dpll_del(&pin->dpll_refs, dpll, ops, priv);
+ dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv, cookie);
+ dpll_xa_ref_dpll_del(&pin->dpll_refs, dpll, ops, priv, cookie);
if (xa_empty(&pin->dpll_refs))
xa_clear_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED);
}
@@ -680,7 +687,7 @@ void dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
mutex_lock(&dpll_lock);
dpll_pin_delete_ntf(pin);
- __dpll_pin_unregister(dpll, pin, ops, priv);
+ __dpll_pin_unregister(dpll, pin, ops, priv, NULL);
mutex_unlock(&dpll_lock);
}
EXPORT_SYMBOL_GPL(dpll_pin_unregister);
@@ -716,12 +723,12 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
return -EINVAL;
mutex_lock(&dpll_lock);
- ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv);
+ ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv, pin);
if (ret)
goto unlock;
refcount_inc(&pin->refcount);
xa_for_each(&parent->dpll_refs, i, ref) {
- ret = __dpll_pin_register(ref->dpll, pin, ops, priv);
+ ret = __dpll_pin_register(ref->dpll, pin, ops, priv, parent);
if (ret) {
stop = i;
goto dpll_unregister;
@@ -735,11 +742,12 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
dpll_unregister:
xa_for_each(&parent->dpll_refs, i, ref)
if (i < stop) {
- __dpll_pin_unregister(ref->dpll, pin, ops, priv);
+ __dpll_pin_unregister(ref->dpll, pin, ops, priv,
+ parent);
dpll_pin_delete_ntf(pin);
}
refcount_dec(&pin->refcount);
- dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv);
+ dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv, pin);
unlock:
mutex_unlock(&dpll_lock);
return ret;
@@ -764,10 +772,10 @@ void dpll_pin_on_pin_unregister(struct dpll_pin *parent, struct dpll_pin *pin,
mutex_lock(&dpll_lock);
dpll_pin_delete_ntf(pin);
- dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv);
+ dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv, pin);
refcount_dec(&pin->refcount);
xa_for_each(&pin->dpll_refs, i, ref)
- __dpll_pin_unregister(ref->dpll, pin, ops, priv);
+ __dpll_pin_unregister(ref->dpll, pin, ops, priv, parent);
mutex_unlock(&dpll_lock);
}
EXPORT_SYMBOL_GPL(dpll_pin_on_pin_unregister);
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index ae17ce4d9722..fe89f5c4837f 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -737,8 +737,7 @@ static int altr_edac_device_probe(struct platform_device *pdev)
}
dci = edac_device_alloc_ctl_info(sizeof(*drvdata), ecc_name,
- 1, ecc_name, 1, 0, NULL, 0,
- dev_instance++);
+ 1, ecc_name, 1, 0, dev_instance++);
if (!dci) {
edac_printk(KERN_ERR, EDAC_DEVICE,
@@ -1514,7 +1513,7 @@ static int altr_portb_setup(struct altr_edac_device_dev *device)
/* Create the PortB EDAC device */
edac_idx = edac_device_alloc_index();
dci = edac_device_alloc_ctl_info(sizeof(*altdev), ecc_name, 1,
- ecc_name, 1, 0, NULL, 0, edac_idx);
+ ecc_name, 1, 0, edac_idx);
if (!dci) {
edac_printk(KERN_ERR, EDAC_DEVICE,
"%s: Unable to allocate PortB EDAC device\n",
@@ -1921,8 +1920,7 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
edac_idx = edac_device_alloc_index();
dci = edac_device_alloc_ctl_info(sizeof(*altdev), ecc_name,
- 1, ecc_name, 1, 0, NULL, 0,
- edac_idx);
+ 1, ecc_name, 1, 0, edac_idx);
if (!dci) {
edac_printk(KERN_ERR, EDAC_DEVICE,
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 1665f7932bac..b879b12971e7 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -345,7 +345,6 @@ struct amd64_pvt {
u32 dchr1; /* DRAM Configuration High DCT1 reg */
u32 nbcap; /* North Bridge Capabilities */
u32 nbcfg; /* F10 North Bridge Configuration */
- u32 ext_nbcfg; /* Extended F10 North Bridge Configuration */
u32 dhar; /* DRAM Hoist reg */
u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */
u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c
index ca718f63fcbc..a6d3013d5823 100644
--- a/drivers/edac/amd8111_edac.c
+++ b/drivers/edac/amd8111_edac.c
@@ -366,8 +366,7 @@ static int amd8111_dev_probe(struct pci_dev *dev,
dev_info->edac_idx = edac_device_alloc_index();
dev_info->edac_dev =
edac_device_alloc_ctl_info(0, dev_info->ctl_name, 1,
- NULL, 0, 0,
- NULL, 0, dev_info->edac_idx);
+ NULL, 0, 0, dev_info->edac_idx);
if (!dev_info->edac_dev) {
ret = -ENOMEM;
goto err_dev_put;
diff --git a/drivers/edac/armada_xp_edac.c b/drivers/edac/armada_xp_edac.c
index 25517c99b3ea..589bc81f1249 100644
--- a/drivers/edac/armada_xp_edac.c
+++ b/drivers/edac/armada_xp_edac.c
@@ -523,7 +523,7 @@ static int aurora_l2_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "data ECC is not enabled\n");
dci = edac_device_alloc_ctl_info(sizeof(*drvdata),
- "cpu", 1, "L", 1, 2, NULL, 0, 0);
+ "cpu", 1, "L", 1, 2, 0);
if (!dci)
return -ENOMEM;
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index 5075dc7526e3..eb702bc3aa29 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -797,7 +797,7 @@ static void cpc925_add_edac_devices(void __iomem *vbase)
dev_info->edac_idx = edac_device_alloc_index();
dev_info->edac_dev =
edac_device_alloc_ctl_info(0, dev_info->ctl_name,
- 1, NULL, 0, 0, NULL, 0, dev_info->edac_idx);
+ 1, NULL, 0, 0, dev_info->edac_idx);
if (!dev_info->edac_dev) {
cpc925_printk(KERN_ERR, "No memory for edac device\n");
goto err1;
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 0689e1510721..621dc2a5d034 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -56,14 +56,12 @@ static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
struct edac_device_ctl_info *
edac_device_alloc_ctl_info(unsigned pvt_sz, char *dev_name, unsigned nr_instances,
char *blk_name, unsigned nr_blocks, unsigned off_val,
- struct edac_dev_sysfs_block_attribute *attrib_spec,
- unsigned nr_attrib, int device_index)
+ int device_index)
{
- struct edac_dev_sysfs_block_attribute *dev_attrib, *attrib_p, *attrib;
struct edac_device_block *dev_blk, *blk_p, *blk;
struct edac_device_instance *dev_inst, *inst;
struct edac_device_ctl_info *dev_ctl;
- unsigned instance, block, attr;
+ unsigned instance, block;
void *pvt;
int err;
@@ -85,15 +83,6 @@ edac_device_alloc_ctl_info(unsigned pvt_sz, char *dev_name, unsigned nr_instance
dev_ctl->blocks = dev_blk;
- if (nr_attrib) {
- dev_attrib = kcalloc(nr_attrib, sizeof(struct edac_dev_sysfs_block_attribute),
- GFP_KERNEL);
- if (!dev_attrib)
- goto free;
-
- dev_ctl->attribs = dev_attrib;
- }
-
if (pvt_sz) {
pvt = kzalloc(pvt_sz, GFP_KERNEL);
if (!pvt)
@@ -132,44 +121,6 @@ edac_device_alloc_ctl_info(unsigned pvt_sz, char *dev_name, unsigned nr_instance
edac_dbg(4, "instance=%d inst_p=%p block=#%d block_p=%p name='%s'\n",
instance, inst, block, blk, blk->name);
-
- /* if there are NO attributes OR no attribute pointer
- * then continue on to next block iteration
- */
- if ((nr_attrib == 0) || (attrib_spec == NULL))
- continue;
-
- /* setup the attribute array for this block */
- blk->nr_attribs = nr_attrib;
- attrib_p = &dev_attrib[block*nr_instances*nr_attrib];
- blk->block_attributes = attrib_p;
-
- edac_dbg(4, "THIS BLOCK_ATTRIB=%p\n",
- blk->block_attributes);
-
- /* Initialize every user specified attribute in this
- * block with the data the caller passed in
- * Each block gets its own copy of pointers,
- * and its unique 'value'
- */
- for (attr = 0; attr < nr_attrib; attr++) {
- attrib = &attrib_p[attr];
-
- /* populate the unique per attrib
- * with the code pointers and info
- */
- attrib->attr = attrib_spec[attr].attr;
- attrib->show = attrib_spec[attr].show;
- attrib->store = attrib_spec[attr].store;
-
- attrib->block = blk; /* up link */
-
- edac_dbg(4, "alloc-attrib=%p attrib_name='%s' attrib-spec=%p spec-name=%s\n",
- attrib, attrib->attr.name,
- &attrib_spec[attr],
- attrib_spec[attr].attr.name
- );
- }
}
}
diff --git a/drivers/edac/edac_device.h b/drivers/edac/edac_device.h
index 7db22a4c83ef..034711d71ebf 100644
--- a/drivers/edac/edac_device.h
+++ b/drivers/edac/edac_device.h
@@ -22,7 +22,6 @@
#ifndef _EDAC_DEVICE_H_
#define _EDAC_DEVICE_H_
-#include <linux/completion.h>
#include <linux/device.h>
#include <linux/edac.h>
#include <linux/kobject.h>
@@ -95,22 +94,13 @@ struct edac_dev_sysfs_attribute {
*
* used in leaf 'block' nodes for adding controls/attributes
*
- * each block in each instance of the containing control structure
- * can have an array of the following. The show and store functions
- * will be filled in with the show/store function in the
- * low level driver.
- *
- * The 'value' field will be the actual value field used for
- * counting
+ * each block in each instance of the containing control structure can
+ * have an array of the following. The show function will be filled in
+ * with the show function in the low level driver.
*/
struct edac_dev_sysfs_block_attribute {
struct attribute attr;
ssize_t (*show)(struct kobject *, struct attribute *, char *);
- ssize_t (*store)(struct kobject *, struct attribute *,
- const char *, size_t);
- struct edac_device_block *block;
-
- unsigned int value;
};
/* device block control structure */
@@ -200,8 +190,6 @@ struct edac_device_ctl_info {
unsigned long start_time; /* edac_device load start time (jiffies) */
- struct completion removal_complete;
-
/* sysfs top name under 'edac' directory
* and instance name:
* cpu/cpu0/...
@@ -217,7 +205,6 @@ struct edac_device_ctl_info {
u32 nr_instances;
struct edac_device_instance *instances;
struct edac_device_block *blocks;
- struct edac_dev_sysfs_block_attribute *attribs;
/* Event counters for the this whole EDAC Device */
struct edac_device_counter counters;
@@ -245,8 +232,6 @@ extern struct edac_device_ctl_info *edac_device_alloc_ctl_info(
char *edac_device_name, unsigned nr_instances,
char *edac_block_name, unsigned nr_blocks,
unsigned offset_value,
- struct edac_dev_sysfs_block_attribute *block_attributes,
- unsigned nr_attribs,
int device_index);
/* The offset value can be:
@@ -356,7 +341,6 @@ static inline void __edac_device_free_ctl_info(struct edac_device_ctl_info *ci)
{
if (ci) {
kfree(ci->pvt_info);
- kfree(ci->attribs);
kfree(ci->blocks);
kfree(ci->instances);
kfree(ci);
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
index 237a542e045a..fcebc4ffea26 100644
--- a/drivers/edac/edac_device_sysfs.c
+++ b/drivers/edac/edac_device_sysfs.c
@@ -457,35 +457,19 @@ static ssize_t edac_dev_block_show(struct kobject *kobj,
return -EIO;
}
-/* Function to 'store' fields into the edac_dev 'block' structure */
-static ssize_t edac_dev_block_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer, size_t count)
-{
- struct edac_dev_sysfs_block_attribute *block_attr;
-
- block_attr = to_block_attr(attr);
-
- if (block_attr->store)
- return block_attr->store(kobj, attr, buffer, count);
- return -EIO;
-}
-
/* edac_dev file operations for a 'block' */
static const struct sysfs_ops device_block_ops = {
.show = edac_dev_block_show,
- .store = edac_dev_block_store
};
-#define BLOCK_ATTR(_name,_mode,_show,_store) \
+#define BLOCK_ATTR(_name,_mode,_show) \
static struct edac_dev_sysfs_block_attribute attr_block_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
.show = _show, \
- .store = _store, \
};
-BLOCK_ATTR(ce_count, S_IRUGO, block_ce_count_show, NULL);
-BLOCK_ATTR(ue_count, S_IRUGO, block_ue_count_show, NULL);
+BLOCK_ATTR(ce_count, S_IRUGO, block_ce_count_show);
+BLOCK_ATTR(ue_count, S_IRUGO, block_ue_count_show);
/* list of edac_dev 'block' attributes */
static struct attribute *device_block_attrs[] = {
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 5116873c3330..4200aec04831 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -146,7 +146,7 @@ static ssize_t csrow_ue_count_show(struct device *dev,
{
struct csrow_info *csrow = to_csrow(dev);
- return sprintf(data, "%u\n", csrow->ue_count);
+ return sysfs_emit(data, "%u\n", csrow->ue_count);
}
static ssize_t csrow_ce_count_show(struct device *dev,
@@ -154,7 +154,7 @@ static ssize_t csrow_ce_count_show(struct device *dev,
{
struct csrow_info *csrow = to_csrow(dev);
- return sprintf(data, "%u\n", csrow->ce_count);
+ return sysfs_emit(data, "%u\n", csrow->ce_count);
}
static ssize_t csrow_size_show(struct device *dev,
@@ -166,7 +166,7 @@ static ssize_t csrow_size_show(struct device *dev,
for (i = 0; i < csrow->nr_channels; i++)
nr_pages += csrow->channels[i]->dimm->nr_pages;
- return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
+ return sysfs_emit(data, "%u\n", PAGES_TO_MiB(nr_pages));
}
static ssize_t csrow_mem_type_show(struct device *dev,
@@ -174,7 +174,7 @@ static ssize_t csrow_mem_type_show(struct device *dev,
{
struct csrow_info *csrow = to_csrow(dev);
- return sprintf(data, "%s\n", edac_mem_types[csrow->channels[0]->dimm->mtype]);
+ return sysfs_emit(data, "%s\n", edac_mem_types[csrow->channels[0]->dimm->mtype]);
}
static ssize_t csrow_dev_type_show(struct device *dev,
@@ -182,7 +182,7 @@ static ssize_t csrow_dev_type_show(struct device *dev,
{
struct csrow_info *csrow = to_csrow(dev);
- return sprintf(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]);
+ return sysfs_emit(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]);
}
static ssize_t csrow_edac_mode_show(struct device *dev,
@@ -191,7 +191,7 @@ static ssize_t csrow_edac_mode_show(struct device *dev,
{
struct csrow_info *csrow = to_csrow(dev);
- return sprintf(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]);
+ return sysfs_emit(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]);
}
/* show/store functions for DIMM Label attributes */
@@ -207,8 +207,7 @@ static ssize_t channel_dimm_label_show(struct device *dev,
if (!rank->dimm->label[0])
return 0;
- return snprintf(data, sizeof(rank->dimm->label) + 1, "%s\n",
- rank->dimm->label);
+ return sysfs_emit(data, "%s\n", rank->dimm->label);
}
static ssize_t channel_dimm_label_store(struct device *dev,
@@ -243,7 +242,7 @@ static ssize_t channel_ce_count_show(struct device *dev,
unsigned int chan = to_channel(mattr);
struct rank_info *rank = csrow->channels[chan];
- return sprintf(data, "%u\n", rank->ce_count);
+ return sysfs_emit(data, "%u\n", rank->ce_count);
}
/* cwrow<id>/attribute files */
@@ -515,7 +514,7 @@ static ssize_t dimmdev_label_show(struct device *dev,
if (!dimm->label[0])
return 0;
- return snprintf(data, sizeof(dimm->label) + 1, "%s\n", dimm->label);
+ return sysfs_emit(data, "%s\n", dimm->label);
}
static ssize_t dimmdev_label_store(struct device *dev,
@@ -546,7 +545,7 @@ static ssize_t dimmdev_size_show(struct device *dev,
{
struct dimm_info *dimm = to_dimm(dev);
- return sprintf(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages));
+ return sysfs_emit(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages));
}
static ssize_t dimmdev_mem_type_show(struct device *dev,
@@ -554,7 +553,7 @@ static ssize_t dimmdev_mem_type_show(struct device *dev,
{
struct dimm_info *dimm = to_dimm(dev);
- return sprintf(data, "%s\n", edac_mem_types[dimm->mtype]);
+ return sysfs_emit(data, "%s\n", edac_mem_types[dimm->mtype]);
}
static ssize_t dimmdev_dev_type_show(struct device *dev,
@@ -562,7 +561,7 @@ static ssize_t dimmdev_dev_type_show(struct device *dev,
{
struct dimm_info *dimm = to_dimm(dev);
- return sprintf(data, "%s\n", dev_types[dimm->dtype]);
+ return sysfs_emit(data, "%s\n", dev_types[dimm->dtype]);
}
static ssize_t dimmdev_edac_mode_show(struct device *dev,
@@ -571,7 +570,7 @@ static ssize_t dimmdev_edac_mode_show(struct device *dev,
{
struct dimm_info *dimm = to_dimm(dev);
- return sprintf(data, "%s\n", edac_caps[dimm->edac_mode]);
+ return sysfs_emit(data, "%s\n", edac_caps[dimm->edac_mode]);
}
static ssize_t dimmdev_ce_count_show(struct device *dev,
@@ -580,7 +579,7 @@ static ssize_t dimmdev_ce_count_show(struct device *dev,
{
struct dimm_info *dimm = to_dimm(dev);
- return sprintf(data, "%u\n", dimm->ce_count);
+ return sysfs_emit(data, "%u\n", dimm->ce_count);
}
static ssize_t dimmdev_ue_count_show(struct device *dev,
@@ -589,7 +588,7 @@ static ssize_t dimmdev_ue_count_show(struct device *dev,
{
struct dimm_info *dimm = to_dimm(dev);
- return sprintf(data, "%u\n", dimm->ue_count);
+ return sysfs_emit(data, "%u\n", dimm->ue_count);
}
/* dimm/rank attribute files */
@@ -758,7 +757,7 @@ static ssize_t mci_sdram_scrub_rate_show(struct device *dev,
return bandwidth;
}
- return sprintf(data, "%d\n", bandwidth);
+ return sysfs_emit(data, "%d\n", bandwidth);
}
/* default attribute files for the MCI object */
@@ -768,7 +767,7 @@ static ssize_t mci_ue_count_show(struct device *dev,
{
struct mem_ctl_info *mci = to_mci(dev);
- return sprintf(data, "%u\n", mci->ue_mc);
+ return sysfs_emit(data, "%u\n", mci->ue_mc);
}
static ssize_t mci_ce_count_show(struct device *dev,
@@ -777,7 +776,7 @@ static ssize_t mci_ce_count_show(struct device *dev,
{
struct mem_ctl_info *mci = to_mci(dev);
- return sprintf(data, "%u\n", mci->ce_mc);
+ return sysfs_emit(data, "%u\n", mci->ce_mc);
}
static ssize_t mci_ce_noinfo_show(struct device *dev,
@@ -786,7 +785,7 @@ static ssize_t mci_ce_noinfo_show(struct device *dev,
{
struct mem_ctl_info *mci = to_mci(dev);
- return sprintf(data, "%u\n", mci->ce_noinfo_count);
+ return sysfs_emit(data, "%u\n", mci->ce_noinfo_count);
}
static ssize_t mci_ue_noinfo_show(struct device *dev,
@@ -795,7 +794,7 @@ static ssize_t mci_ue_noinfo_show(struct device *dev,
{
struct mem_ctl_info *mci = to_mci(dev);
- return sprintf(data, "%u\n", mci->ue_noinfo_count);
+ return sysfs_emit(data, "%u\n", mci->ue_noinfo_count);
}
static ssize_t mci_seconds_show(struct device *dev,
@@ -804,7 +803,7 @@ static ssize_t mci_seconds_show(struct device *dev,
{
struct mem_ctl_info *mci = to_mci(dev);
- return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ);
+ return sysfs_emit(data, "%ld\n", (jiffies - mci->start_time) / HZ);
}
static ssize_t mci_ctl_name_show(struct device *dev,
@@ -813,7 +812,7 @@ static ssize_t mci_ctl_name_show(struct device *dev,
{
struct mem_ctl_info *mci = to_mci(dev);
- return sprintf(data, "%s\n", mci->ctl_name);
+ return sysfs_emit(data, "%s\n", mci->ctl_name);
}
static ssize_t mci_size_mb_show(struct device *dev,
@@ -833,7 +832,7 @@ static ssize_t mci_size_mb_show(struct device *dev,
}
}
- return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
+ return sysfs_emit(data, "%u\n", PAGES_TO_MiB(total_pages));
}
static ssize_t mci_max_location_show(struct device *dev,
diff --git a/drivers/edac/edac_pci.h b/drivers/edac/edac_pci.h
index 5175f5724cfa..3f47cd9b2b03 100644
--- a/drivers/edac/edac_pci.h
+++ b/drivers/edac/edac_pci.h
@@ -22,7 +22,6 @@
#ifndef _EDAC_PCI_H_
#define _EDAC_PCI_H_
-#include <linux/completion.h>
#include <linux/device.h>
#include <linux/edac.h>
#include <linux/kobject.h>
@@ -48,8 +47,6 @@ struct edac_pci_ctl_info {
int pci_idx;
- struct bus_type *edac_subsys; /* pointer to subsystem */
-
/* the internal state of this controller instance */
int op_state;
/* work struct for this instance */
@@ -72,8 +69,6 @@ struct edac_pci_ctl_info {
unsigned long start_time; /* edac_pci load start time (jiffies) */
- struct completion complete;
-
/* sysfs top name under 'edac' directory
* and instance name:
* cpu/cpu0/...
diff --git a/drivers/edac/highbank_l2_edac.c b/drivers/edac/highbank_l2_edac.c
index 5646c049a934..282ca6535f8f 100644
--- a/drivers/edac/highbank_l2_edac.c
+++ b/drivers/edac/highbank_l2_edac.c
@@ -54,7 +54,7 @@ static int highbank_l2_err_probe(struct platform_device *pdev)
int res = 0;
dci = edac_device_alloc_ctl_info(sizeof(*drvdata), "cpu",
- 1, "L", 1, 2, NULL, 0, 0);
+ 1, "L", 1, 2, 0);
if (!dci)
return -ENOMEM;
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index c1bc53f4e184..e8945d4adbad 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -496,7 +496,7 @@ static int mpc85xx_l2_err_probe(struct platform_device *op)
return -ENOMEM;
edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
- "cpu", 1, "L", 1, 2, NULL, 0,
+ "cpu", 1, "L", 1, 2,
edac_dev_idx);
if (!edac_dev) {
devres_release_group(&op->dev, mpc85xx_l2_err_probe);
diff --git a/drivers/edac/octeon_edac-l2c.c b/drivers/edac/octeon_edac-l2c.c
index 4015eb9af6fe..919095d10528 100644
--- a/drivers/edac/octeon_edac-l2c.c
+++ b/drivers/edac/octeon_edac-l2c.c
@@ -138,7 +138,7 @@ static int octeon_l2c_probe(struct platform_device *pdev)
/* 'Tags' are block 0, 'Data' is block 1*/
l2c = edac_device_alloc_ctl_info(0, "l2c", num_tads, "l2c", 2, 0,
- NULL, 0, edac_device_alloc_index());
+ edac_device_alloc_index());
if (!l2c)
return -ENOMEM;
diff --git a/drivers/edac/octeon_edac-pc.c b/drivers/edac/octeon_edac-pc.c
index ea8a8e337b1e..b8404cc7b65f 100644
--- a/drivers/edac/octeon_edac-pc.c
+++ b/drivers/edac/octeon_edac-pc.c
@@ -92,7 +92,7 @@ static int co_cache_error_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, p);
p->ed = edac_device_alloc_ctl_info(0, "cpu", num_possible_cpus(),
- "cache", 2, 0, NULL, 0,
+ "cache", 2, 0,
edac_device_alloc_index());
if (!p->ed)
goto err;
diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
index 5539917c01dd..d3cd4cc54ace 100644
--- a/drivers/edac/qcom_edac.c
+++ b/drivers/edac/qcom_edac.c
@@ -349,7 +349,6 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev)
/* Allocate edac control info */
edev_ctl = edac_device_alloc_ctl_info(0, "qcom-llcc", 1, "bank",
llcc_driv_data->num_banks, 1,
- NULL, 0,
edac_device_alloc_index());
if (!edev_ctl)
diff --git a/drivers/edac/sifive_edac.c b/drivers/edac/sifive_edac.c
index b844e2626fd5..a2b193dc6604 100644
--- a/drivers/edac/sifive_edac.c
+++ b/drivers/edac/sifive_edac.c
@@ -52,8 +52,7 @@ static int ecc_register(struct platform_device *pdev)
platform_set_drvdata(pdev, p);
p->dci = edac_device_alloc_ctl_info(0, "sifive_ecc", 1, "sifive_ecc",
- 1, 1, NULL, 0,
- edac_device_alloc_index());
+ 1, 1, edac_device_alloc_index());
if (!p->dci)
return -ENOMEM;
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index 9c5b6f8bd8bd..27996b7924c8 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -648,7 +648,7 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
memset(&res, 0, sizeof(res));
res.mce = mce;
res.addr = mce->addr & MCI_ADDR_PHYSADDR;
- if (!pfn_to_online_page(res.addr >> PAGE_SHIFT)) {
+ if (!pfn_to_online_page(res.addr >> PAGE_SHIFT) && !arch_is_platform_page(res.addr)) {
pr_err("Invalid address 0x%llx in IA32_MC%d_ADDR\n", mce->addr, mce->bank);
return NOTIFY_DONE;
}
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index 5527055b0964..ea7a9a342dd3 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -9,6 +9,7 @@
#include <linux/edac.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/of.h>
@@ -299,6 +300,7 @@ struct synps_ecc_status {
/**
* struct synps_edac_priv - DDR memory controller private instance data.
* @baseaddr: Base address of the DDR controller.
+ * @reglock: Concurrent CSRs access lock.
* @message: Buffer for framing the event specific info.
* @stat: ECC status information.
* @p_data: Platform data.
@@ -313,6 +315,7 @@ struct synps_ecc_status {
*/
struct synps_edac_priv {
void __iomem *baseaddr;
+ spinlock_t reglock;
char message[SYNPS_EDAC_MSG_SIZE];
struct synps_ecc_status stat;
const struct synps_platform_data *p_data;
@@ -408,7 +411,8 @@ out:
static int zynqmp_get_error_info(struct synps_edac_priv *priv)
{
struct synps_ecc_status *p;
- u32 regval, clearval = 0;
+ u32 regval, clearval;
+ unsigned long flags;
void __iomem *base;
base = priv->baseaddr;
@@ -452,10 +456,14 @@ ue_err:
p->ueinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
p->ueinfo.data = readl(base + ECC_UESYND0_OFST);
out:
- clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT;
- clearval |= ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
+ spin_lock_irqsave(&priv->reglock, flags);
+
+ clearval = readl(base + ECC_CLR_OFST) |
+ ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT |
+ ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
writel(clearval, base + ECC_CLR_OFST);
- writel(0x0, base + ECC_CLR_OFST);
+
+ spin_unlock_irqrestore(&priv->reglock, flags);
return 0;
}
@@ -515,24 +523,41 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
static void enable_intr(struct synps_edac_priv *priv)
{
+ unsigned long flags;
+
/* Enable UE/CE Interrupts */
- if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
- writel(DDR_UE_MASK | DDR_CE_MASK,
- priv->baseaddr + ECC_CLR_OFST);
- else
+ if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
+ return;
+ }
+
+ spin_lock_irqsave(&priv->reglock, flags);
+
+ writel(DDR_UE_MASK | DDR_CE_MASK,
+ priv->baseaddr + ECC_CLR_OFST);
+
+ spin_unlock_irqrestore(&priv->reglock, flags);
}
static void disable_intr(struct synps_edac_priv *priv)
{
+ unsigned long flags;
+
/* Disable UE/CE Interrupts */
- if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
- writel(0x0, priv->baseaddr + ECC_CLR_OFST);
- else
+ if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
+
+ return;
+ }
+
+ spin_lock_irqsave(&priv->reglock, flags);
+
+ writel(0, priv->baseaddr + ECC_CLR_OFST);
+
+ spin_unlock_irqrestore(&priv->reglock, flags);
}
/**
@@ -576,8 +601,6 @@ static irqreturn_t intr_handler(int irq, void *dev_id)
/* v3.0 of the controller does not have this register */
if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR))
writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
- else
- enable_intr(priv);
return IRQ_HANDLED;
}
@@ -1357,6 +1380,7 @@ static int mc_probe(struct platform_device *pdev)
priv = mci->pvt_info;
priv->baseaddr = baseaddr;
priv->p_data = p_data;
+ spin_lock_init(&priv->reglock);
mc_init(mci, pdev);
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
index 90d46e5c4ff0..fab9891e569a 100644
--- a/drivers/edac/thunderx_edac.c
+++ b/drivers/edac/thunderx_edac.c
@@ -1365,8 +1365,7 @@ static int thunderx_ocx_probe(struct pci_dev *pdev,
idx = edac_device_alloc_index();
snprintf(name, sizeof(name), "OCX%d", idx);
edac_dev = edac_device_alloc_ctl_info(sizeof(struct thunderx_ocx),
- name, 1, "CCPI", 1,
- 0, NULL, 0, idx);
+ name, 1, "CCPI", 1, 0, idx);
if (!edac_dev) {
dev_err(&pdev->dev, "Cannot allocate EDAC device\n");
return -ENOMEM;
@@ -2004,8 +2003,7 @@ static int thunderx_l2c_probe(struct pci_dev *pdev,
snprintf(name, sizeof(name), fmt, idx);
edac_dev = edac_device_alloc_ctl_info(sizeof(struct thunderx_l2c),
- name, 1, "L2C", 1, 0,
- NULL, 0, idx);
+ name, 1, "L2C", 1, 0, idx);
if (!edac_dev) {
dev_err(&pdev->dev, "Cannot allocate EDAC device\n");
return -ENOMEM;
diff --git a/drivers/edac/versal_edac.c b/drivers/edac/versal_edac.c
index 1688a5050f63..a556d23e8261 100644
--- a/drivers/edac/versal_edac.c
+++ b/drivers/edac/versal_edac.c
@@ -425,7 +425,7 @@ static void handle_error(struct mem_ctl_info *mci, struct ecc_status *stat)
convert_to_physical(priv, pinf), pinf.burstpos);
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
- priv->ce_cnt, 0, 0, 0, 0, 0, -1,
+ 1, 0, 0, 0, 0, 0, -1,
priv->message, "");
}
@@ -438,7 +438,7 @@ static void handle_error(struct mem_ctl_info *mci, struct ecc_status *stat)
convert_to_physical(priv, pinf), pinf.burstpos);
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
- priv->ue_cnt, 0, 0, 0, 0, 0, -1,
+ 1, 0, 0, 0, 0, 0, -1,
priv->message, "");
}
@@ -865,6 +865,9 @@ static ssize_t inject_data_ue_store(struct file *file, const char __user *data,
for (i = 0; i < NUM_UE_BITPOS; i++)
token[i] = strsep(&pbuf, ",");
+ if (!token[0] || !token[1])
+ return -EFAULT;
+
ret = kstrtou8(token[0], 0, &ue0);
if (ret)
return ret;
@@ -1135,8 +1138,7 @@ static int mc_probe(struct platform_device *pdev)
}
rc = xlnx_register_event(PM_NOTIFY_CB, VERSAL_EVENT_ERROR_PMC_ERR1,
- XPM_EVENT_ERROR_MASK_DDRMC_CR | XPM_EVENT_ERROR_MASK_DDRMC_NCR |
- XPM_EVENT_ERROR_MASK_NOC_CR | XPM_EVENT_ERROR_MASK_NOC_NCR,
+ XPM_EVENT_ERROR_MASK_DDRMC_CR | XPM_EVENT_ERROR_MASK_DDRMC_NCR,
false, err_callback, mci);
if (rc) {
if (rc == -EACCES)
@@ -1173,8 +1175,6 @@ static void mc_remove(struct platform_device *pdev)
xlnx_unregister_event(PM_NOTIFY_CB, VERSAL_EVENT_ERROR_PMC_ERR1,
XPM_EVENT_ERROR_MASK_DDRMC_CR |
- XPM_EVENT_ERROR_MASK_NOC_CR |
- XPM_EVENT_ERROR_MASK_NOC_NCR |
XPM_EVENT_ERROR_MASK_DDRMC_NCR, err_callback, mci);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 1b50f8160013..fd87f1b2c145 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -913,8 +913,8 @@ static int xgene_edac_pmd_add(struct xgene_edac *edac, struct device_node *np,
snprintf(edac_name, sizeof(edac_name), "l2c%d", pmd);
edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx),
- edac_name, 1, "l2c", 1, 2, NULL,
- 0, edac_device_alloc_index());
+ edac_name, 1, "l2c", 1, 2,
+ edac_device_alloc_index());
if (!edac_dev) {
rc = -ENOMEM;
goto err_group;
@@ -1208,8 +1208,7 @@ static int xgene_edac_l3_add(struct xgene_edac *edac, struct device_node *np,
edac_idx = edac_device_alloc_index();
edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx),
- "l3c", 1, "l3c", 1, 0, NULL, 0,
- edac_idx);
+ "l3c", 1, "l3c", 1, 0, edac_idx);
if (!edac_dev) {
rc = -ENOMEM;
goto err_release_group;
@@ -1748,8 +1747,7 @@ static int xgene_edac_soc_add(struct xgene_edac *edac, struct device_node *np,
edac_idx = edac_device_alloc_index();
edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx),
- "SOC", 1, "SOC", 1, 2, NULL, 0,
- edac_idx);
+ "SOC", 1, "SOC", 1, 2, edac_idx);
if (!edac_dev) {
rc = -ENOMEM;
goto err_release_group;
diff --git a/drivers/edac/zynqmp_edac.c b/drivers/edac/zynqmp_edac.c
index 2d9a5cfd8931..c9dc78d8c824 100644
--- a/drivers/edac/zynqmp_edac.c
+++ b/drivers/edac/zynqmp_edac.c
@@ -381,7 +381,7 @@ static int edac_probe(struct platform_device *pdev)
}
dci = edac_device_alloc_ctl_info(sizeof(*priv), ZYNQMP_OCM_EDAC_STRING,
- 1, ZYNQMP_OCM_EDAC_STRING, 1, 0, NULL, 0,
+ 1, ZYNQMP_OCM_EDAC_STRING, 1, 0,
edac_device_alloc_index());
if (!dci)
return -ENOMEM;
diff --git a/drivers/eisa/Kconfig b/drivers/eisa/Kconfig
index c8bbf90209f5..a66b3be502a9 100644
--- a/drivers/eisa/Kconfig
+++ b/drivers/eisa/Kconfig
@@ -44,17 +44,16 @@ config EISA_PCI_EISA
When in doubt, say Y.
-# Using EISA_VIRTUAL_ROOT on something other than an Alpha or
-# an X86 may lead to crashes...
+# Using EISA_VIRTUAL_ROOT on something other than an X86 may lead
+# to crashes...
config EISA_VIRTUAL_ROOT
bool "EISA virtual root device"
- depends on EISA && (ALPHA || X86)
+ depends on EISA && X86
default y
help
Activate this option if your system only have EISA bus
- (no PCI slots). The Alpha Jensen is an example of such
- a system.
+ (no PCI slots).
When in doubt, say Y.
diff --git a/drivers/eisa/virtual_root.c b/drivers/eisa/virtual_root.c
index 37e6dd219c37..cd9515d9d8f0 100644
--- a/drivers/eisa/virtual_root.c
+++ b/drivers/eisa/virtual_root.c
@@ -13,7 +13,7 @@
#include <linux/moduleparam.h>
#include <linux/init.h>
-#if defined(CONFIG_ALPHA_JENSEN) || defined(CONFIG_EISA_VLB_PRIMING)
+#if defined(CONFIG_EISA_VLB_PRIMING)
#define EISA_FORCE_PROBE_DEFAULT 1
#else
#define EISA_FORCE_PROBE_DEFAULT 0
diff --git a/drivers/firewire/.kunitconfig b/drivers/firewire/.kunitconfig
index 76444a2d5e12..60d9e7c35417 100644
--- a/drivers/firewire/.kunitconfig
+++ b/drivers/firewire/.kunitconfig
@@ -3,3 +3,4 @@ CONFIG_PCI=y
CONFIG_FIREWIRE=y
CONFIG_FIREWIRE_KUNIT_UAPI_TEST=y
CONFIG_FIREWIRE_KUNIT_DEVICE_ATTRIBUTE_TEST=y
+CONFIG_FIREWIRE_KUNIT_PACKET_SERDES_TEST=y
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 552a39df8cbd..869598b20e3a 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -50,6 +50,22 @@ config FIREWIRE_KUNIT_DEVICE_ATTRIBUTE_TEST
For more information on KUnit and unit tests in general, refer
to the KUnit documentation in Documentation/dev-tools/kunit/.
+config FIREWIRE_KUNIT_PACKET_SERDES_TEST
+ tristate "KUnit tests for packet serialization/deserialization" if !KUNIT_ALL_TESTS
+ depends on FIREWIRE && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the KUnit tests for packet serialization and
+ deserialization.
+
+ KUnit tests run during boot and output the results to the debug
+ log in TAP format (https://testanything.org/). Only useful for
+ kernel devs running KUnit test harness and are not for inclusion
+ into a production build.
+
+ For more information on KUnit and unit tests in general, refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
config FIREWIRE_OHCI
tristate "OHCI-1394 controllers"
depends on PCI && FIREWIRE && MMU
diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile
index b24b2879ac34..75c47d046925 100644
--- a/drivers/firewire/Makefile
+++ b/drivers/firewire/Makefile
@@ -3,7 +3,7 @@
# Makefile for the Linux IEEE 1394 implementation
#
-firewire-core-y += core-card.o core-cdev.o core-device.o \
+firewire-core-y += core-trace.o core-card.o core-cdev.o core-device.o \
core-iso.o core-topology.o core-transaction.o
firewire-ohci-y += ohci.o
firewire-sbp2-y += sbp2.o
@@ -16,5 +16,5 @@ obj-$(CONFIG_FIREWIRE_NET) += firewire-net.o
obj-$(CONFIG_FIREWIRE_NOSY) += nosy.o
obj-$(CONFIG_PROVIDE_OHCI1394_DMA_INIT) += init_ohci1394_dma.o
-firewire-uapi-test-objs += uapi-test.o
-obj-$(CONFIG_FIREWIRE_KUNIT_UAPI_TEST) += firewire-uapi-test.o
+obj-$(CONFIG_FIREWIRE_KUNIT_UAPI_TEST) += uapi-test.o
+obj-$(CONFIG_FIREWIRE_KUNIT_PACKET_SERDES_TEST) += packet-serdes-test.o
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 401a77e3b5fa..127d87e3a153 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -23,6 +23,7 @@
#include <asm/byteorder.h>
#include "core.h"
+#include <trace/events/firewire.h>
#define define_fw_printk_level(func, kern_level) \
void func(const struct fw_card *card, const char *fmt, ...) \
@@ -221,11 +222,15 @@ static int reset_bus(struct fw_card *card, bool short_reset)
int reg = short_reset ? 5 : 1;
int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
+ trace_bus_reset_initiate(card->generation, short_reset);
+
return card->driver->update_phy_reg(card, reg, 0, bit);
}
void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
{
+ trace_bus_reset_schedule(card->generation, short_reset);
+
/* We don't try hard to sort out requests of long vs. short resets. */
card->br_short = short_reset;
@@ -244,6 +249,8 @@ static void br_work(struct work_struct *work)
/* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
if (card->reset_jiffies != 0 &&
time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) {
+ trace_bus_reset_postpone(card->generation, card->br_short);
+
if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ))
fw_card_put(card);
return;
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 6274b86eb943..55993c9e0b90 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -35,6 +35,7 @@
#include "core.h"
+#include <trace/events/firewire.h>
/*
* ABI version history is documented in linux/firewire-cdev.h.
@@ -1558,6 +1559,9 @@ static void outbound_phy_packet_callback(struct fw_packet *packet,
struct client *e_client = e->client;
u32 rcode;
+ trace_async_phy_outbound_complete((uintptr_t)packet, status, packet->generation,
+ packet->timestamp);
+
switch (status) {
// expected:
case ACK_COMPLETE:
@@ -1655,6 +1659,9 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
memcpy(pp->data, a->data, sizeof(a->data));
}
+ trace_async_phy_outbound_initiate((uintptr_t)&e->p, e->p.generation, e->p.header[1],
+ e->p.header[2]);
+
card->driver->send_request(card, &e->p);
return 0;
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index f40c81534381..837cc44d8d9f 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -20,6 +20,7 @@
#include <asm/byteorder.h>
#include "core.h"
+#include <trace/events/firewire.h>
#define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f)
#define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01)
@@ -507,6 +508,8 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
struct fw_node *local_node;
unsigned long flags;
+ trace_bus_reset_handle(generation, node_id, bm_abdicate, self_ids, self_id_count);
+
spin_lock_irqsave(&card->lock, flags);
/*
diff --git a/drivers/firewire/core-trace.c b/drivers/firewire/core-trace.c
new file mode 100644
index 000000000000..96cbd9d384dc
--- /dev/null
+++ b/drivers/firewire/core-trace.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2024 Takashi Sakamoto
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/firewire.h>
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 130b95aca629..571fdff65c2b 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -29,29 +29,11 @@
#include <asm/byteorder.h>
#include "core.h"
+#include <trace/events/firewire.h>
+#include "packet-header-definitions.h"
-#define HEADER_PRI(pri) ((pri) << 0)
-#define HEADER_TCODE(tcode) ((tcode) << 4)
-#define HEADER_RETRY(retry) ((retry) << 8)
-#define HEADER_TLABEL(tlabel) ((tlabel) << 10)
-#define HEADER_DESTINATION(destination) ((destination) << 16)
-#define HEADER_SOURCE(source) ((source) << 16)
-#define HEADER_RCODE(rcode) ((rcode) << 12)
-#define HEADER_OFFSET_HIGH(offset_high) ((offset_high) << 0)
-#define HEADER_DATA_LENGTH(length) ((length) << 16)
-#define HEADER_EXTENDED_TCODE(tcode) ((tcode) << 0)
-
-#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
-#define HEADER_GET_TLABEL(q) (((q) >> 10) & 0x3f)
-#define HEADER_GET_RCODE(q) (((q) >> 12) & 0x0f)
-#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
-#define HEADER_GET_SOURCE(q) (((q) >> 16) & 0xffff)
-#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
-#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
-#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
-
-#define HEADER_DESTINATION_IS_BROADCAST(q) \
- (((q) & HEADER_DESTINATION(0x3f)) == HEADER_DESTINATION(0x3f))
+#define HEADER_DESTINATION_IS_BROADCAST(header) \
+ ((async_header_get_destination(header) & 0x3f) == 0x3f)
#define PHY_PACKET_CONFIG 0x0
#define PHY_PACKET_LINK_ON 0x1
@@ -192,6 +174,9 @@ static void transmit_complete_callback(struct fw_packet *packet,
struct fw_transaction *t =
container_of(packet, struct fw_transaction, packet);
+ trace_async_request_outbound_complete((uintptr_t)t, packet->generation, packet->speed,
+ status, packet->timestamp);
+
switch (status) {
case ACK_COMPLETE:
close_transaction(t, card, RCODE_COMPLETE, packet->timestamp);
@@ -231,10 +216,11 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
int ext_tcode;
if (tcode == TCODE_STREAM_DATA) {
- packet->header[0] =
- HEADER_DATA_LENGTH(length) |
- destination_id |
- HEADER_TCODE(TCODE_STREAM_DATA);
+ // The value of destination_id argument should include tag, channel, and sy fields
+ // as isochronous packet header has.
+ packet->header[0] = destination_id;
+ isoc_header_set_data_length(packet->header, length);
+ isoc_header_set_tcode(packet->header, TCODE_STREAM_DATA);
packet->header_length = 4;
packet->payload = payload;
packet->payload_length = length;
@@ -248,28 +234,24 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
} else
ext_tcode = 0;
- packet->header[0] =
- HEADER_RETRY(RETRY_X) |
- HEADER_TLABEL(tlabel) |
- HEADER_TCODE(tcode) |
- HEADER_DESTINATION(destination_id);
- packet->header[1] =
- HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id);
- packet->header[2] =
- offset;
+ async_header_set_retry(packet->header, RETRY_X);
+ async_header_set_tlabel(packet->header, tlabel);
+ async_header_set_tcode(packet->header, tcode);
+ async_header_set_destination(packet->header, destination_id);
+ async_header_set_source(packet->header, source_id);
+ async_header_set_offset(packet->header, offset);
switch (tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
- packet->header[3] = *(u32 *)payload;
+ async_header_set_quadlet_data(packet->header, *(u32 *)payload);
packet->header_length = 16;
packet->payload_length = 0;
break;
case TCODE_LOCK_REQUEST:
case TCODE_WRITE_BLOCK_REQUEST:
- packet->header[3] =
- HEADER_DATA_LENGTH(length) |
- HEADER_EXTENDED_TCODE(ext_tcode);
+ async_header_set_data_length(packet->header, length);
+ async_header_set_extended_tcode(packet->header, ext_tcode);
packet->header_length = 16;
packet->payload = payload;
packet->payload_length = length;
@@ -281,9 +263,8 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
break;
case TCODE_READ_BLOCK_REQUEST:
- packet->header[3] =
- HEADER_DATA_LENGTH(length) |
- HEADER_EXTENDED_TCODE(ext_tcode);
+ async_header_set_data_length(packet->header, length);
+ async_header_set_extended_tcode(packet->header, ext_tcode);
packet->header_length = 16;
packet->payload_length = 0;
break;
@@ -417,6 +398,9 @@ void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode
spin_unlock_irqrestore(&card->lock, flags);
+ trace_async_request_outbound_initiate((uintptr_t)t, generation, speed, t->packet.header, payload,
+ tcode_is_read_request(tcode) ? 0 : length / 4);
+
card->driver->send_request(card, &t->packet);
}
EXPORT_SYMBOL_GPL(__fw_send_request);
@@ -479,6 +463,8 @@ static DECLARE_COMPLETION(phy_config_done);
static void transmit_phy_packet_callback(struct fw_packet *packet,
struct fw_card *card, int status)
{
+ trace_async_phy_outbound_complete((uintptr_t)packet, packet->generation, status,
+ packet->timestamp);
complete(&phy_config_done);
}
@@ -517,6 +503,10 @@ void fw_send_phy_config(struct fw_card *card,
phy_config_packet.generation = generation;
reinit_completion(&phy_config_done);
+ trace_async_phy_outbound_initiate((uintptr_t)&phy_config_packet,
+ phy_config_packet.generation, phy_config_packet.header[1],
+ phy_config_packet.header[2]);
+
card->driver->send_request(card, &phy_config_packet);
wait_for_completion_timeout(&phy_config_done, timeout);
@@ -655,7 +645,7 @@ EXPORT_SYMBOL(fw_core_remove_address_handler);
struct fw_request {
struct kref kref;
struct fw_packet response;
- u32 request_header[4];
+ u32 request_header[ASYNC_HEADER_QUADLET_COUNT];
int ack;
u32 timestamp;
u32 length;
@@ -684,6 +674,9 @@ static void free_response_callback(struct fw_packet *packet,
{
struct fw_request *request = container_of(packet, struct fw_request, response);
+ trace_async_response_outbound_complete((uintptr_t)request, packet->generation,
+ packet->speed, status, packet->timestamp);
+
// Decrease the reference count since not at in-flight.
fw_request_put(request);
@@ -695,7 +688,7 @@ int fw_get_response_length(struct fw_request *r)
{
int tcode, ext_tcode, data_length;
- tcode = HEADER_GET_TCODE(r->request_header[0]);
+ tcode = async_header_get_tcode(r->request_header);
switch (tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
@@ -706,12 +699,12 @@ int fw_get_response_length(struct fw_request *r)
return 4;
case TCODE_READ_BLOCK_REQUEST:
- data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]);
+ data_length = async_header_get_data_length(r->request_header);
return data_length;
case TCODE_LOCK_REQUEST:
- ext_tcode = HEADER_GET_EXTENDED_TCODE(r->request_header[3]);
- data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]);
+ ext_tcode = async_header_get_extended_tcode(r->request_header);
+ data_length = async_header_get_data_length(r->request_header);
switch (ext_tcode) {
case EXTCODE_FETCH_ADD:
case EXTCODE_LITTLE_ADD:
@@ -731,46 +724,42 @@ void fw_fill_response(struct fw_packet *response, u32 *request_header,
{
int tcode, tlabel, extended_tcode, source, destination;
- tcode = HEADER_GET_TCODE(request_header[0]);
- tlabel = HEADER_GET_TLABEL(request_header[0]);
- source = HEADER_GET_DESTINATION(request_header[0]);
- destination = HEADER_GET_SOURCE(request_header[1]);
- extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]);
-
- response->header[0] =
- HEADER_RETRY(RETRY_1) |
- HEADER_TLABEL(tlabel) |
- HEADER_DESTINATION(destination);
- response->header[1] =
- HEADER_SOURCE(source) |
- HEADER_RCODE(rcode);
- response->header[2] = 0;
+ tcode = async_header_get_tcode(request_header);
+ tlabel = async_header_get_tlabel(request_header);
+ source = async_header_get_destination(request_header); // Exchange.
+ destination = async_header_get_source(request_header); // Exchange.
+ extended_tcode = async_header_get_extended_tcode(request_header);
+
+ async_header_set_retry(response->header, RETRY_1);
+ async_header_set_tlabel(response->header, tlabel);
+ async_header_set_destination(response->header, destination);
+ async_header_set_source(response->header, source);
+ async_header_set_rcode(response->header, rcode);
+ response->header[2] = 0; // The field is reserved.
switch (tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
case TCODE_WRITE_BLOCK_REQUEST:
- response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE);
+ async_header_set_tcode(response->header, TCODE_WRITE_RESPONSE);
response->header_length = 12;
response->payload_length = 0;
break;
case TCODE_READ_QUADLET_REQUEST:
- response->header[0] |=
- HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE);
+ async_header_set_tcode(response->header, TCODE_READ_QUADLET_RESPONSE);
if (payload != NULL)
- response->header[3] = *(u32 *)payload;
+ async_header_set_quadlet_data(response->header, *(u32 *)payload);
else
- response->header[3] = 0;
+ async_header_set_quadlet_data(response->header, 0);
response->header_length = 16;
response->payload_length = 0;
break;
case TCODE_READ_BLOCK_REQUEST:
case TCODE_LOCK_REQUEST:
- response->header[0] |= HEADER_TCODE(tcode + 2);
- response->header[3] =
- HEADER_DATA_LENGTH(length) |
- HEADER_EXTENDED_TCODE(extended_tcode);
+ async_header_set_tcode(response->header, tcode + 2);
+ async_header_set_data_length(response->header, length);
+ async_header_set_extended_tcode(response->header, extended_tcode);
response->header_length = 16;
response->payload = payload;
response->payload_length = length;
@@ -807,7 +796,7 @@ static struct fw_request *allocate_request(struct fw_card *card,
u32 *data, length;
int request_tcode;
- request_tcode = HEADER_GET_TCODE(p->header[0]);
+ request_tcode = async_header_get_tcode(p->header);
switch (request_tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
data = &p->header[3];
@@ -817,7 +806,7 @@ static struct fw_request *allocate_request(struct fw_card *card,
case TCODE_WRITE_BLOCK_REQUEST:
case TCODE_LOCK_REQUEST:
data = p->payload;
- length = HEADER_GET_DATA_LENGTH(p->header[3]);
+ length = async_header_get_data_length(p->header);
break;
case TCODE_READ_QUADLET_REQUEST:
@@ -827,7 +816,7 @@ static struct fw_request *allocate_request(struct fw_card *card,
case TCODE_READ_BLOCK_REQUEST:
data = NULL;
- length = HEADER_GET_DATA_LENGTH(p->header[3]);
+ length = async_header_get_data_length(p->header);
break;
default:
@@ -870,24 +859,30 @@ static struct fw_request *allocate_request(struct fw_card *card,
void fw_send_response(struct fw_card *card,
struct fw_request *request, int rcode)
{
+ u32 *data = NULL;
+ unsigned int data_length = 0;
+
/* unified transaction or broadcast transaction: don't respond */
if (request->ack != ACK_PENDING ||
- HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) {
+ HEADER_DESTINATION_IS_BROADCAST(request->request_header)) {
fw_request_put(request);
return;
}
- if (rcode == RCODE_COMPLETE)
- fw_fill_response(&request->response, request->request_header,
- rcode, request->data,
- fw_get_response_length(request));
- else
- fw_fill_response(&request->response, request->request_header,
- rcode, NULL, 0);
+ if (rcode == RCODE_COMPLETE) {
+ data = request->data;
+ data_length = fw_get_response_length(request);
+ }
+
+ fw_fill_response(&request->response, request->request_header, rcode, data, data_length);
// Increase the reference count so that the object is kept during in-flight.
fw_request_get(request);
+ trace_async_response_outbound_initiate((uintptr_t)request, request->response.generation,
+ request->response.speed, request->response.header,
+ data, data ? data_length / 4 : 0);
+
card->driver->send_response(card, &request->response);
}
EXPORT_SYMBOL(fw_send_response);
@@ -926,11 +921,11 @@ static void handle_exclusive_region_request(struct fw_card *card,
struct fw_address_handler *handler;
int tcode, destination, source;
- destination = HEADER_GET_DESTINATION(p->header[0]);
- source = HEADER_GET_SOURCE(p->header[1]);
- tcode = HEADER_GET_TCODE(p->header[0]);
+ destination = async_header_get_destination(p->header);
+ source = async_header_get_source(p->header);
+ tcode = async_header_get_tcode(p->header);
if (tcode == TCODE_LOCK_REQUEST)
- tcode = 0x10 + HEADER_GET_EXTENDED_TCODE(p->header[3]);
+ tcode = 0x10 + async_header_get_extended_tcode(p->header);
rcu_read_lock();
handler = lookup_enclosing_address_handler(&address_handler_list,
@@ -963,9 +958,9 @@ static void handle_fcp_region_request(struct fw_card *card,
return;
}
- tcode = HEADER_GET_TCODE(p->header[0]);
- destination = HEADER_GET_DESTINATION(p->header[0]);
- source = HEADER_GET_SOURCE(p->header[1]);
+ tcode = async_header_get_tcode(p->header);
+ destination = async_header_get_destination(p->header);
+ source = async_header_get_source(p->header);
if (tcode != TCODE_WRITE_QUADLET_REQUEST &&
tcode != TCODE_WRITE_BLOCK_REQUEST) {
@@ -993,11 +988,15 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
{
struct fw_request *request;
unsigned long long offset;
+ unsigned int tcode;
if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
return;
- if (TCODE_IS_LINK_INTERNAL(HEADER_GET_TCODE(p->header[0]))) {
+ tcode = async_header_get_tcode(p->header);
+ if (tcode_is_link_internal(tcode)) {
+ trace_async_phy_inbound((uintptr_t)p, p->generation, p->ack, p->timestamp,
+ p->header[1], p->header[2]);
fw_cdev_handle_phy_packet(card, p);
return;
}
@@ -1008,8 +1007,11 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
return;
}
- offset = ((u64)HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) |
- p->header[2];
+ trace_async_request_inbound((uintptr_t)request, p->generation, p->speed, p->ack,
+ p->timestamp, p->header, request->data,
+ tcode_is_read_request(tcode) ? 0 : request->length / 4);
+
+ offset = async_header_get_offset(p->header);
if (!is_in_fcp_region(offset, request->length))
handle_exclusive_region_request(card, p, request, offset);
@@ -1027,37 +1029,15 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
size_t data_length;
int tcode, tlabel, source, rcode;
- tcode = HEADER_GET_TCODE(p->header[0]);
- tlabel = HEADER_GET_TLABEL(p->header[0]);
- source = HEADER_GET_SOURCE(p->header[1]);
- rcode = HEADER_GET_RCODE(p->header[1]);
+ tcode = async_header_get_tcode(p->header);
+ tlabel = async_header_get_tlabel(p->header);
+ source = async_header_get_source(p->header);
+ rcode = async_header_get_rcode(p->header);
- spin_lock_irqsave(&card->lock, flags);
- list_for_each_entry(iter, &card->transaction_list, link) {
- if (iter->node_id == source && iter->tlabel == tlabel) {
- if (!try_cancel_split_timeout(iter)) {
- spin_unlock_irqrestore(&card->lock, flags);
- goto timed_out;
- }
- list_del_init(&iter->link);
- card->tlabel_mask &= ~(1ULL << iter->tlabel);
- t = iter;
- break;
- }
- }
- spin_unlock_irqrestore(&card->lock, flags);
-
- if (!t) {
- timed_out:
- fw_notice(card, "unsolicited response (source %x, tlabel %x)\n",
- source, tlabel);
- return;
- }
-
- /*
- * FIXME: sanity check packet, is length correct, does tcodes
- * and addresses match.
- */
+ // FIXME: sanity check packet, is length correct, does tcodes
+ // and addresses match to the transaction request queried later.
+ //
+ // For the tracepoints event, let us decode the header here against the concern.
switch (tcode) {
case TCODE_READ_QUADLET_RESPONSE:
@@ -1073,7 +1053,7 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
case TCODE_READ_BLOCK_RESPONSE:
case TCODE_LOCK_RESPONSE:
data = p->payload;
- data_length = HEADER_GET_DATA_LENGTH(p->header[3]);
+ data_length = async_header_get_data_length(p->header);
break;
default:
@@ -1083,6 +1063,31 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
break;
}
+ spin_lock_irqsave(&card->lock, flags);
+ list_for_each_entry(iter, &card->transaction_list, link) {
+ if (iter->node_id == source && iter->tlabel == tlabel) {
+ if (!try_cancel_split_timeout(iter)) {
+ spin_unlock_irqrestore(&card->lock, flags);
+ goto timed_out;
+ }
+ list_del_init(&iter->link);
+ card->tlabel_mask &= ~(1ULL << iter->tlabel);
+ t = iter;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ trace_async_response_inbound((uintptr_t)t, p->generation, p->speed, p->ack, p->timestamp,
+ p->header, data, data_length / 4);
+
+ if (!t) {
+ timed_out:
+ fw_notice(card, "unsolicited response (source %x, tlabel %x)\n",
+ source, tlabel);
+ return;
+ }
+
/*
* The response handler may be executed while the request handler
* is still pending. Cancel the request handler.
@@ -1135,7 +1140,7 @@ static void handle_topology_map(struct fw_card *card, struct fw_request *request
{
int start;
- if (!TCODE_IS_READ_REQUEST(tcode)) {
+ if (!tcode_is_read_request(tcode)) {
fw_send_response(card, request, RCODE_TYPE_ERROR);
return;
}
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 95c10f3d2282..7c36d2628e37 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -225,13 +225,20 @@ static inline bool is_next_generation(int new_generation, int old_generation)
#define TCODE_LINK_INTERNAL 0xe
-#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
-#define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0)
-#define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == TCODE_LINK_INTERNAL)
-#define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0)
-#define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0)
-#define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4)
-#define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0)
+static inline bool tcode_is_read_request(unsigned int tcode)
+{
+ return (tcode & ~1u) == 4u;
+}
+
+static inline bool tcode_is_block_packet(unsigned int tcode)
+{
+ return (tcode & 1u) != 0u;
+}
+
+static inline bool tcode_is_link_internal(unsigned int tcode)
+{
+ return (tcode == TCODE_LINK_INTERNAL);
+}
#define LOCAL_BUS 0xffc0
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index b0d671db178a..ea31ac7ac1ca 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -148,10 +148,12 @@ packet_buffer_get(struct client *client, char __user *data, size_t user_length)
if (atomic_read(&buffer->size) == 0)
return -ENODEV;
- /* FIXME: Check length <= user_length. */
+ length = buffer->head->length;
+
+ if (length > user_length)
+ return 0;
end = buffer->data + buffer->capacity;
- length = buffer->head->length;
if (&buffer->head->data[length] < end) {
if (copy_to_user(data, buffer->head->data, length))
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 7bc71f4be64a..f6de0b3a9a55 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -40,6 +40,7 @@
#include "core.h"
#include "ohci.h"
+#include "packet-header-definitions.h"
#define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args)
#define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args)
@@ -393,7 +394,6 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
#define OHCI_PARAM_DEBUG_AT_AR 1
#define OHCI_PARAM_DEBUG_SELFIDS 2
#define OHCI_PARAM_DEBUG_IRQS 4
-#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
static int param_debug;
module_param_named(debug, param_debug, int, 0644);
@@ -401,7 +401,6 @@ MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
- ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
", or a combination, or all = -1)");
static bool param_remote_dma;
@@ -410,12 +409,7 @@ MODULE_PARM_DESC(remote_dma, "Enable unfiltered remote DMA (default = N)");
static void log_irqs(struct fw_ohci *ohci, u32 evt)
{
- if (likely(!(param_debug &
- (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
- return;
-
- if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
- !(evt & OHCI1394_busReset))
+ if (likely(!(param_debug & OHCI_PARAM_DEBUG_IRQS)))
return;
ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
@@ -516,14 +510,14 @@ static const char *tcodes[] = {
static void log_ar_at_event(struct fw_ohci *ohci,
char dir, int speed, u32 *header, int evt)
{
- int tcode = header[0] >> 4 & 0xf;
+ int tcode = async_header_get_tcode(header);
char specific[12];
if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
return;
if (unlikely(evt >= ARRAY_SIZE(evts)))
- evt = 0x1f;
+ evt = 0x1f;
if (evt == OHCI1394_evt_bus_reset) {
ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n",
@@ -532,20 +526,27 @@ static void log_ar_at_event(struct fw_ohci *ohci,
}
switch (tcode) {
- case 0x0: case 0x6: case 0x8:
+ case TCODE_WRITE_QUADLET_REQUEST:
+ case TCODE_READ_QUADLET_RESPONSE:
+ case TCODE_CYCLE_START:
snprintf(specific, sizeof(specific), " = %08x",
be32_to_cpu((__force __be32)header[3]));
break;
- case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
+ case TCODE_WRITE_BLOCK_REQUEST:
+ case TCODE_READ_BLOCK_REQUEST:
+ case TCODE_READ_BLOCK_RESPONSE:
+ case TCODE_LOCK_REQUEST:
+ case TCODE_LOCK_RESPONSE:
snprintf(specific, sizeof(specific), " %x,%x",
- header[3] >> 16, header[3] & 0xffff);
+ async_header_get_data_length(header),
+ async_header_get_extended_tcode(header));
break;
default:
specific[0] = '\0';
}
switch (tcode) {
- case 0xa:
+ case TCODE_STREAM_DATA:
ohci_notice(ohci, "A%c %s, %s\n",
dir, evts[evt], tcodes[tcode]);
break;
@@ -553,19 +554,23 @@ static void log_ar_at_event(struct fw_ohci *ohci,
ohci_notice(ohci, "A%c %s, PHY %08x %08x\n",
dir, evts[evt], header[1], header[2]);
break;
- case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
+ case TCODE_WRITE_QUADLET_REQUEST:
+ case TCODE_WRITE_BLOCK_REQUEST:
+ case TCODE_READ_QUADLET_REQUEST:
+ case TCODE_READ_BLOCK_REQUEST:
+ case TCODE_LOCK_REQUEST:
ohci_notice(ohci,
- "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %04x%08x%s\n",
- dir, speed, header[0] >> 10 & 0x3f,
- header[1] >> 16, header[0] >> 16, evts[evt],
- tcodes[tcode], header[1] & 0xffff, header[2], specific);
+ "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %012llx%s\n",
+ dir, speed, async_header_get_tlabel(header),
+ async_header_get_source(header), async_header_get_destination(header),
+ evts[evt], tcodes[tcode], async_header_get_offset(header), specific);
break;
default:
ohci_notice(ohci,
"A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n",
- dir, speed, header[0] >> 10 & 0x3f,
- header[1] >> 16, header[0] >> 16, evts[evt],
- tcodes[tcode], specific);
+ dir, speed, async_header_get_tlabel(header),
+ async_header_get_source(header), async_header_get_destination(header),
+ evts[evt], tcodes[tcode], specific);
}
}
@@ -853,7 +858,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
p.header[1] = cond_le32_to_cpu(buffer[1]);
p.header[2] = cond_le32_to_cpu(buffer[2]);
- tcode = (p.header[0] >> 4) & 0x0f;
+ tcode = async_header_get_tcode(p.header);
switch (tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
case TCODE_READ_QUADLET_RESPONSE:
@@ -874,7 +879,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
case TCODE_LOCK_RESPONSE:
p.header[3] = cond_le32_to_cpu(buffer[3]);
p.header_length = 16;
- p.payload_length = p.header[3] >> 16;
+ p.payload_length = async_header_get_data_length(p.header);
if (p.payload_length > MAX_ASYNC_PAYLOAD) {
ar_context_abort(ctx, "invalid packet length");
return NULL;
@@ -911,8 +916,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
* Several controllers, notably from NEC and VIA, forget to
* write ack_complete status at PHY packet reception.
*/
- if (evt == OHCI1394_evt_no_status &&
- (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4))
+ if (evt == OHCI1394_evt_no_status && tcode == OHCI1394_phy_tcode)
p.ack = ACK_COMPLETE;
/*
@@ -1353,7 +1357,7 @@ static int at_context_queue_packet(struct context *ctx,
* accordingly.
*/
- tcode = (packet->header[0] >> 4) & 0x0f;
+ tcode = async_header_get_tcode(packet->header);
header = (__le32 *) &d[1];
switch (tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
@@ -1371,7 +1375,7 @@ static int at_context_queue_packet(struct context *ctx,
(packet->header[0] & 0xffff0000));
header[2] = cpu_to_le32(packet->header[2]);
- if (TCODE_IS_BLOCK_PACKET(tcode))
+ if (tcode_is_block_packet(tcode))
header[3] = cpu_to_le32(packet->header[3]);
else
header[3] = (__force __le32) packet->header[3];
@@ -1550,11 +1554,7 @@ static int handle_at_packet(struct context *context,
return 1;
}
-#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
-#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
-#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
-#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
-#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
+static u32 get_cycle_time(struct fw_ohci *ohci);
static void handle_local_rom(struct fw_ohci *ohci,
struct fw_packet *packet, u32 csr)
@@ -1562,9 +1562,9 @@ static void handle_local_rom(struct fw_ohci *ohci,
struct fw_packet response;
int tcode, length, i;
- tcode = HEADER_GET_TCODE(packet->header[0]);
- if (TCODE_IS_BLOCK_PACKET(tcode))
- length = HEADER_GET_DATA_LENGTH(packet->header[3]);
+ tcode = async_header_get_tcode(packet->header);
+ if (tcode_is_block_packet(tcode))
+ length = async_header_get_data_length(packet->header);
else
length = 4;
@@ -1572,7 +1572,7 @@ static void handle_local_rom(struct fw_ohci *ohci,
if (i + length > CONFIG_ROM_SIZE) {
fw_fill_response(&response, packet->header,
RCODE_ADDRESS_ERROR, NULL, 0);
- } else if (!TCODE_IS_READ_REQUEST(tcode)) {
+ } else if (!tcode_is_read_request(tcode)) {
fw_fill_response(&response, packet->header,
RCODE_TYPE_ERROR, NULL, 0);
} else {
@@ -1580,6 +1580,8 @@ static void handle_local_rom(struct fw_ohci *ohci,
(void *) ohci->config_rom + i, length);
}
+ // Timestamping on behalf of the hardware.
+ response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
fw_core_handle_response(&ohci->card, &response);
}
@@ -1591,10 +1593,10 @@ static void handle_local_lock(struct fw_ohci *ohci,
__be32 *payload, lock_old;
u32 lock_arg, lock_data;
- tcode = HEADER_GET_TCODE(packet->header[0]);
- length = HEADER_GET_DATA_LENGTH(packet->header[3]);
+ tcode = async_header_get_tcode(packet->header);
+ length = async_header_get_data_length(packet->header);
payload = packet->payload;
- ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
+ ext_tcode = async_header_get_extended_tcode(packet->header);
if (tcode == TCODE_LOCK_REQUEST &&
ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
@@ -1628,6 +1630,8 @@ static void handle_local_lock(struct fw_ohci *ohci,
fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
out:
+ // Timestamping on behalf of the hardware.
+ response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
fw_core_handle_response(&ohci->card, &response);
}
@@ -1640,10 +1644,7 @@ static void handle_local_request(struct context *ctx, struct fw_packet *packet)
packet->callback(packet, &ctx->ohci->card, packet->ack);
}
- offset =
- ((unsigned long long)
- HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
- packet->header[2];
+ offset = async_header_get_offset(packet->header);
csr = offset - CSR_REGISTER_BASE;
/* Handle config rom reads. */
@@ -1670,8 +1671,6 @@ static void handle_local_request(struct context *ctx, struct fw_packet *packet)
}
}
-static u32 get_cycle_time(struct fw_ohci *ohci);
-
static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
{
unsigned long flags;
@@ -1679,7 +1678,7 @@ static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
spin_lock_irqsave(&ctx->ohci->lock, flags);
- if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
+ if (async_header_get_destination(packet->header) == ctx->ohci->node_id &&
ctx->ohci->generation == packet->generation) {
spin_unlock_irqrestore(&ctx->ohci->lock, flags);
@@ -2060,6 +2059,7 @@ static void bus_reset_work(struct work_struct *work)
ohci->generation = generation;
reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
+ reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
if (ohci->quirks & QUIRK_RESET_PACKET)
ohci->request_generation = generation;
@@ -2125,12 +2125,15 @@ static irqreturn_t irq_handler(int irq, void *data)
return IRQ_NONE;
/*
- * busReset and postedWriteErr must not be cleared yet
+ * busReset and postedWriteErr events must not be cleared yet
* (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
*/
reg_write(ohci, OHCI1394_IntEventClear,
event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
log_irqs(ohci, event);
+ // The flag is masked again at bus_reset_work() scheduled by selfID event.
+ if (event & OHCI1394_busReset)
+ reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
if (event & OHCI1394_selfIDComplete)
queue_work(selfid_workqueue, &ohci->bus_reset_work);
@@ -2468,9 +2471,8 @@ static int ohci_enable(struct fw_card *card,
OHCI1394_cycleInconsistent |
OHCI1394_unrecoverableError |
OHCI1394_cycleTooLong |
- OHCI1394_masterIntEnable;
- if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
- irqs |= OHCI1394_busReset;
+ OHCI1394_masterIntEnable |
+ OHCI1394_busReset;
reg_write(ohci, OHCI1394_IntMaskSet, irqs);
reg_write(ohci, OHCI1394_HCControlSet,
@@ -3623,7 +3625,7 @@ static int pci_probe(struct pci_dev *dev,
struct fw_ohci *ohci;
u32 bus_options, max_receive, link_speed, version;
u64 guid;
- int i, err;
+ int i, flags, irq, err;
size_t size;
if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) {
@@ -3748,18 +3750,29 @@ static int pci_probe(struct pci_dev *dev,
guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
reg_read(ohci, OHCI1394_GUIDLo);
+ flags = PCI_IRQ_INTX;
if (!(ohci->quirks & QUIRK_NO_MSI))
- pci_enable_msi(dev);
- err = devm_request_irq(&dev->dev, dev->irq, irq_handler,
- pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, ohci_driver_name, ohci);
+ flags |= PCI_IRQ_MSI;
+ err = pci_alloc_irq_vectors(dev, 1, 1, flags);
+ if (err < 0)
+ return err;
+ irq = pci_irq_vector(dev, 0);
+ if (irq < 0) {
+ err = irq;
+ goto fail_msi;
+ }
+
+ err = request_threaded_irq(irq, irq_handler, NULL,
+ pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, ohci_driver_name,
+ ohci);
if (err < 0) {
- ohci_err(ohci, "failed to allocate interrupt %d\n", dev->irq);
+ ohci_err(ohci, "failed to allocate interrupt %d\n", irq);
goto fail_msi;
}
err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
if (err)
- goto fail_msi;
+ goto fail_irq;
version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
ohci_notice(ohci,
@@ -3772,9 +3785,10 @@ static int pci_probe(struct pci_dev *dev,
return 0;
+ fail_irq:
+ free_irq(irq, ohci);
fail_msi:
- devm_free_irq(&dev->dev, dev->irq, ohci);
- pci_disable_msi(dev);
+ pci_free_irq_vectors(dev);
return err;
}
@@ -3782,6 +3796,7 @@ static int pci_probe(struct pci_dev *dev,
static void pci_remove(struct pci_dev *dev)
{
struct fw_ohci *ohci = pci_get_drvdata(dev);
+ int irq;
/*
* If the removal is happening from the suspend state, LPS won't be
@@ -3801,8 +3816,10 @@ static void pci_remove(struct pci_dev *dev)
software_reset(ohci);
- devm_free_irq(&dev->dev, dev->irq, ohci);
- pci_disable_msi(dev);
+ irq = pci_irq_vector(dev, 0);
+ if (irq >= 0)
+ free_irq(irq, ohci);
+ pci_free_irq_vectors(dev);
dev_notice(&dev->dev, "removing fw-ohci device\n");
}
diff --git a/drivers/firewire/packet-header-definitions.h b/drivers/firewire/packet-header-definitions.h
new file mode 100644
index 000000000000..ab9d0fa790d4
--- /dev/null
+++ b/drivers/firewire/packet-header-definitions.h
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+//
+// packet-header-definitions.h - The definitions of header fields for IEEE 1394 packet.
+//
+// Copyright (c) 2024 Takashi Sakamoto
+
+#ifndef _FIREWIRE_PACKET_HEADER_DEFINITIONS_H
+#define _FIREWIRE_PACKET_HEADER_DEFINITIONS_H
+
+#define ASYNC_HEADER_QUADLET_COUNT 4
+
+#define ASYNC_HEADER_Q0_DESTINATION_SHIFT 16
+#define ASYNC_HEADER_Q0_DESTINATION_MASK 0xffff0000
+#define ASYNC_HEADER_Q0_TLABEL_SHIFT 10
+#define ASYNC_HEADER_Q0_TLABEL_MASK 0x0000fc00
+#define ASYNC_HEADER_Q0_RETRY_SHIFT 8
+#define ASYNC_HEADER_Q0_RETRY_MASK 0x00000300
+#define ASYNC_HEADER_Q0_TCODE_SHIFT 4
+#define ASYNC_HEADER_Q0_TCODE_MASK 0x000000f0
+#define ASYNC_HEADER_Q0_PRIORITY_SHIFT 0
+#define ASYNC_HEADER_Q0_PRIORITY_MASK 0x0000000f
+#define ASYNC_HEADER_Q1_SOURCE_SHIFT 16
+#define ASYNC_HEADER_Q1_SOURCE_MASK 0xffff0000
+#define ASYNC_HEADER_Q1_RCODE_SHIFT 12
+#define ASYNC_HEADER_Q1_RCODE_MASK 0x0000f000
+#define ASYNC_HEADER_Q1_RCODE_SHIFT 12
+#define ASYNC_HEADER_Q1_RCODE_MASK 0x0000f000
+#define ASYNC_HEADER_Q1_OFFSET_HIGH_SHIFT 0
+#define ASYNC_HEADER_Q1_OFFSET_HIGH_MASK 0x0000ffff
+#define ASYNC_HEADER_Q3_DATA_LENGTH_SHIFT 16
+#define ASYNC_HEADER_Q3_DATA_LENGTH_MASK 0xffff0000
+#define ASYNC_HEADER_Q3_EXTENDED_TCODE_SHIFT 0
+#define ASYNC_HEADER_Q3_EXTENDED_TCODE_MASK 0x0000ffff
+
+static inline unsigned int async_header_get_destination(const u32 header[ASYNC_HEADER_QUADLET_COUNT])
+{
+ return (header[0] & ASYNC_HEADER_Q0_DESTINATION_MASK) >> ASYNC_HEADER_Q0_DESTINATION_SHIFT;
+}
+
+static inline unsigned int async_header_get_tlabel(const u32 header[ASYNC_HEADER_QUADLET_COUNT])
+{
+ return (header[0] & ASYNC_HEADER_Q0_TLABEL_MASK) >> ASYNC_HEADER_Q0_TLABEL_SHIFT;
+}
+
+static inline unsigned int async_header_get_retry(const u32 header[ASYNC_HEADER_QUADLET_COUNT])
+{
+ return (header[0] & ASYNC_HEADER_Q0_RETRY_MASK) >> ASYNC_HEADER_Q0_RETRY_SHIFT;
+}
+
+static inline unsigned int async_header_get_tcode(const u32 header[ASYNC_HEADER_QUADLET_COUNT])
+{
+ return (header[0] & ASYNC_HEADER_Q0_TCODE_MASK) >> ASYNC_HEADER_Q0_TCODE_SHIFT;
+}
+
+static inline unsigned int async_header_get_priority(const u32 header[ASYNC_HEADER_QUADLET_COUNT])
+{
+ return (header[0] & ASYNC_HEADER_Q0_PRIORITY_MASK) >> ASYNC_HEADER_Q0_PRIORITY_SHIFT;
+}
+
+static inline unsigned int async_header_get_source(const u32 header[ASYNC_HEADER_QUADLET_COUNT])
+{
+ return (header[1] & ASYNC_HEADER_Q1_SOURCE_MASK) >> ASYNC_HEADER_Q1_SOURCE_SHIFT;
+}
+
+static inline unsigned int async_header_get_rcode(const u32 header[ASYNC_HEADER_QUADLET_COUNT])
+{
+ return (header[1] & ASYNC_HEADER_Q1_RCODE_MASK) >> ASYNC_HEADER_Q1_RCODE_SHIFT;
+}
+
+static inline u64 async_header_get_offset(const u32 header[ASYNC_HEADER_QUADLET_COUNT])
+{
+ u32 hi = (header[1] & ASYNC_HEADER_Q1_OFFSET_HIGH_MASK) >> ASYNC_HEADER_Q1_OFFSET_HIGH_SHIFT;
+ return (((u64)hi) << 32) | ((u64)header[2]);
+}
+
+static inline u32 async_header_get_quadlet_data(const u32 header[ASYNC_HEADER_QUADLET_COUNT])
+{
+ return header[3];
+}
+
+static inline unsigned int async_header_get_data_length(const u32 header[ASYNC_HEADER_QUADLET_COUNT])
+{
+ return (header[3] & ASYNC_HEADER_Q3_DATA_LENGTH_MASK) >> ASYNC_HEADER_Q3_DATA_LENGTH_SHIFT;
+}
+
+static inline unsigned int async_header_get_extended_tcode(const u32 header[ASYNC_HEADER_QUADLET_COUNT])
+{
+ return (header[3] & ASYNC_HEADER_Q3_EXTENDED_TCODE_MASK) >> ASYNC_HEADER_Q3_EXTENDED_TCODE_SHIFT;
+}
+
+static inline void async_header_set_destination(u32 header[ASYNC_HEADER_QUADLET_COUNT],
+ unsigned int destination)
+{
+ header[0] &= ~ASYNC_HEADER_Q0_DESTINATION_MASK;
+ header[0] |= (((u32)destination) << ASYNC_HEADER_Q0_DESTINATION_SHIFT) & ASYNC_HEADER_Q0_DESTINATION_MASK;
+}
+
+static inline void async_header_set_tlabel(u32 header[ASYNC_HEADER_QUADLET_COUNT],
+ unsigned int tlabel)
+{
+ header[0] &= ~ASYNC_HEADER_Q0_TLABEL_MASK;
+ header[0] |= (((u32)tlabel) << ASYNC_HEADER_Q0_TLABEL_SHIFT) & ASYNC_HEADER_Q0_TLABEL_MASK;
+}
+
+static inline void async_header_set_retry(u32 header[ASYNC_HEADER_QUADLET_COUNT],
+ unsigned int retry)
+{
+ header[0] &= ~ASYNC_HEADER_Q0_RETRY_MASK;
+ header[0] |= (((u32)retry) << ASYNC_HEADER_Q0_RETRY_SHIFT) & ASYNC_HEADER_Q0_RETRY_MASK;
+}
+
+static inline void async_header_set_tcode(u32 header[ASYNC_HEADER_QUADLET_COUNT],
+ unsigned int tcode)
+{
+ header[0] &= ~ASYNC_HEADER_Q0_TCODE_MASK;
+ header[0] |= (((u32)tcode) << ASYNC_HEADER_Q0_TCODE_SHIFT) & ASYNC_HEADER_Q0_TCODE_MASK;
+}
+
+static inline void async_header_set_priority(u32 header[ASYNC_HEADER_QUADLET_COUNT],
+ unsigned int priority)
+{
+ header[0] &= ~ASYNC_HEADER_Q0_PRIORITY_MASK;
+ header[0] |= (((u32)priority) << ASYNC_HEADER_Q0_PRIORITY_SHIFT) & ASYNC_HEADER_Q0_PRIORITY_MASK;
+}
+
+
+static inline void async_header_set_source(u32 header[ASYNC_HEADER_QUADLET_COUNT],
+ unsigned int source)
+{
+ header[1] &= ~ASYNC_HEADER_Q1_SOURCE_MASK;
+ header[1] |= (((u32)source) << ASYNC_HEADER_Q1_SOURCE_SHIFT) & ASYNC_HEADER_Q1_SOURCE_MASK;
+}
+
+static inline void async_header_set_rcode(u32 header[ASYNC_HEADER_QUADLET_COUNT],
+ unsigned int rcode)
+{
+ header[1] &= ~ASYNC_HEADER_Q1_RCODE_MASK;
+ header[1] |= (((u32)rcode) << ASYNC_HEADER_Q1_RCODE_SHIFT) & ASYNC_HEADER_Q1_RCODE_MASK;
+}
+
+static inline void async_header_set_offset(u32 header[ASYNC_HEADER_QUADLET_COUNT], u64 offset)
+{
+ u32 hi = (u32)(offset >> 32);
+ header[1] &= ~ASYNC_HEADER_Q1_OFFSET_HIGH_MASK;
+ header[1] |= (hi << ASYNC_HEADER_Q1_OFFSET_HIGH_SHIFT) & ASYNC_HEADER_Q1_OFFSET_HIGH_MASK;
+ header[2] = (u32)(offset & 0x00000000ffffffff);
+}
+
+static inline void async_header_set_quadlet_data(u32 header[ASYNC_HEADER_QUADLET_COUNT], u32 quadlet_data)
+{
+ header[3] = quadlet_data;
+}
+
+static inline void async_header_set_data_length(u32 header[ASYNC_HEADER_QUADLET_COUNT],
+ unsigned int data_length)
+{
+ header[3] &= ~ASYNC_HEADER_Q3_DATA_LENGTH_MASK;
+ header[3] |= (((u32)data_length) << ASYNC_HEADER_Q3_DATA_LENGTH_SHIFT) & ASYNC_HEADER_Q3_DATA_LENGTH_MASK;
+}
+
+static inline void async_header_set_extended_tcode(u32 header[ASYNC_HEADER_QUADLET_COUNT],
+ unsigned int extended_tcode)
+{
+ header[3] &= ~ASYNC_HEADER_Q3_EXTENDED_TCODE_MASK;
+ header[3] |= (((u32)extended_tcode) << ASYNC_HEADER_Q3_EXTENDED_TCODE_SHIFT) & ASYNC_HEADER_Q3_EXTENDED_TCODE_MASK;
+}
+
+#define ISOC_HEADER_DATA_LENGTH_SHIFT 16
+#define ISOC_HEADER_DATA_LENGTH_MASK 0xffff0000
+#define ISOC_HEADER_TAG_SHIFT 14
+#define ISOC_HEADER_TAG_MASK 0x0000c000
+#define ISOC_HEADER_CHANNEL_SHIFT 8
+#define ISOC_HEADER_CHANNEL_MASK 0x00003f00
+#define ISOC_HEADER_TCODE_SHIFT 4
+#define ISOC_HEADER_TCODE_MASK 0x000000f0
+#define ISOC_HEADER_SY_SHIFT 0
+#define ISOC_HEADER_SY_MASK 0x0000000f
+
+static inline unsigned int isoc_header_get_data_length(u32 header)
+{
+ return (header & ISOC_HEADER_DATA_LENGTH_MASK) >> ISOC_HEADER_DATA_LENGTH_SHIFT;
+}
+
+static inline unsigned int isoc_header_get_tag(u32 header)
+{
+ return (header & ISOC_HEADER_TAG_MASK) >> ISOC_HEADER_TAG_SHIFT;
+}
+
+static inline unsigned int isoc_header_get_channel(u32 header)
+{
+ return (header & ISOC_HEADER_CHANNEL_MASK) >> ISOC_HEADER_CHANNEL_SHIFT;
+}
+
+static inline unsigned int isoc_header_get_tcode(u32 header)
+{
+ return (header & ISOC_HEADER_TCODE_MASK) >> ISOC_HEADER_TCODE_SHIFT;
+}
+
+static inline unsigned int isoc_header_get_sy(u32 header)
+{
+ return (header & ISOC_HEADER_SY_MASK) >> ISOC_HEADER_SY_SHIFT;
+}
+
+static inline void isoc_header_set_data_length(u32 *header, unsigned int data_length)
+{
+ *header &= ~ISOC_HEADER_DATA_LENGTH_MASK;
+ *header |= (((u32)data_length) << ISOC_HEADER_DATA_LENGTH_SHIFT) & ISOC_HEADER_DATA_LENGTH_MASK;
+}
+
+static inline void isoc_header_set_tag(u32 *header, unsigned int tag)
+{
+ *header &= ~ISOC_HEADER_TAG_MASK;
+ *header |= (((u32)tag) << ISOC_HEADER_TAG_SHIFT) & ISOC_HEADER_TAG_MASK;
+}
+
+static inline void isoc_header_set_channel(u32 *header, unsigned int channel)
+{
+ *header &= ~ISOC_HEADER_CHANNEL_MASK;
+ *header |= (((u32)channel) << ISOC_HEADER_CHANNEL_SHIFT) & ISOC_HEADER_CHANNEL_MASK;
+}
+
+static inline void isoc_header_set_tcode(u32 *header, unsigned int tcode)
+{
+ *header &= ~ISOC_HEADER_TCODE_MASK;
+ *header |= (((u32)tcode) << ISOC_HEADER_TCODE_SHIFT) & ISOC_HEADER_TCODE_MASK;
+}
+
+static inline void isoc_header_set_sy(u32 *header, unsigned int sy)
+{
+ *header &= ~ISOC_HEADER_SY_MASK;
+ *header |= (((u32)sy) << ISOC_HEADER_SY_SHIFT) & ISOC_HEADER_SY_MASK;
+}
+
+#endif // _FIREWIRE_PACKET_HEADER_DEFINITIONS_H
diff --git a/drivers/firewire/packet-serdes-test.c b/drivers/firewire/packet-serdes-test.c
new file mode 100644
index 000000000000..f93c966e794d
--- /dev/null
+++ b/drivers/firewire/packet-serdes-test.c
@@ -0,0 +1,582 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+//
+// packet-serdes-test.c - An application of Kunit to check serialization/deserialization of packets
+// defined by IEEE 1394.
+//
+// Copyright (c) 2024 Takashi Sakamoto
+
+#include <kunit/test.h>
+
+#include <linux/firewire-constants.h>
+
+#include "packet-header-definitions.h"
+
+static void serialize_async_header_common(u32 header[ASYNC_HEADER_QUADLET_COUNT],
+ unsigned int dst_id, unsigned int tlabel,
+ unsigned int retry, unsigned int tcode,
+ unsigned int priority, unsigned int src_id)
+{
+ async_header_set_destination(header, dst_id);
+ async_header_set_tlabel(header, tlabel);
+ async_header_set_retry(header, retry);
+ async_header_set_tcode(header, tcode);
+ async_header_set_priority(header, priority);
+ async_header_set_source(header, src_id);
+}
+
+static void serialize_async_header_request(u32 header[ASYNC_HEADER_QUADLET_COUNT],
+ unsigned int dst_id, unsigned int tlabel,
+ unsigned int retry, unsigned int tcode,
+ unsigned int priority, unsigned int src_id, u64 offset)
+{
+ serialize_async_header_common(header, dst_id, tlabel, retry, tcode, priority, src_id);
+ async_header_set_offset(header, offset);
+}
+
+static void serialize_async_header_quadlet_request(u32 header[ASYNC_HEADER_QUADLET_COUNT],
+ unsigned int dst_id, unsigned int tlabel,
+ unsigned int retry, unsigned int tcode,
+ unsigned int priority, unsigned int src_id,
+ u64 offset)
+{
+ serialize_async_header_request(header, dst_id, tlabel, retry, tcode, priority, src_id,
+ offset);
+}
+
+static void serialize_async_header_block_request(u32 header[ASYNC_HEADER_QUADLET_COUNT],
+ unsigned int dst_id, unsigned int tlabel,
+ unsigned int retry, unsigned int tcode,
+ unsigned int priority, unsigned int src_id,
+ u64 offset, unsigned int data_length,
+ unsigned int extended_tcode)
+{
+ serialize_async_header_request(header, dst_id, tlabel, retry, tcode, priority, src_id,
+ offset);
+ async_header_set_data_length(header, data_length);
+ async_header_set_extended_tcode(header, extended_tcode);
+}
+
+static void serialize_async_header_response(u32 header[ASYNC_HEADER_QUADLET_COUNT],
+ unsigned int dst_id, unsigned int tlabel,
+ unsigned int retry, unsigned int tcode,
+ unsigned int priority, unsigned int src_id,
+ unsigned int rcode)
+{
+ serialize_async_header_common(header, dst_id, tlabel, retry, tcode, priority, src_id);
+ async_header_set_rcode(header, rcode);
+}
+
+static void serialize_async_header_quadlet_response(u32 header[ASYNC_HEADER_QUADLET_COUNT],
+ unsigned int dst_id, unsigned int tlabel,
+ unsigned int retry, unsigned int tcode,
+ unsigned int priority, unsigned int src_id,
+ unsigned int rcode)
+{
+ serialize_async_header_response(header, dst_id, tlabel, retry, tcode, priority, src_id,
+ rcode);
+}
+
+static void serialize_async_header_block_response(u32 header[ASYNC_HEADER_QUADLET_COUNT],
+ unsigned int dst_id, unsigned int tlabel,
+ unsigned int retry, unsigned int tcode,
+ unsigned int priority, unsigned int src_id,
+ unsigned int rcode, unsigned int data_length,
+ unsigned int extended_tcode)
+{
+ serialize_async_header_response(header, dst_id, tlabel, retry, tcode, priority, src_id,
+ rcode);
+ async_header_set_data_length(header, data_length);
+ async_header_set_extended_tcode(header, extended_tcode);
+}
+
+static void deserialize_async_header_common(const u32 header[ASYNC_HEADER_QUADLET_COUNT],
+ unsigned int *dst_id, unsigned int *tlabel,
+ unsigned int *retry, unsigned int *tcode,
+ unsigned int *priority, unsigned int *src_id)
+{
+ *dst_id = async_header_get_destination(header);
+ *tlabel = async_header_get_tlabel(header);
+ *retry = async_header_get_retry(header);
+ *tcode = async_header_get_tcode(header);
+ *priority = async_header_get_priority(header);
+ *src_id = async_header_get_source(header);
+}
+
+static void deserialize_async_header_request(const u32 header[ASYNC_HEADER_QUADLET_COUNT],
+ unsigned int *dst_id, unsigned int *tlabel,
+ unsigned int *retry, unsigned int *tcode,
+ unsigned int *priority, unsigned int *src_id,
+ u64 *offset)
+{
+ deserialize_async_header_common(header, dst_id, tlabel, retry, tcode, priority, src_id);
+ *offset = async_header_get_offset(header);
+}
+
+static void deserialize_async_header_quadlet_request(const u32 header[ASYNC_HEADER_QUADLET_COUNT],
+ unsigned int *dst_id, unsigned int *tlabel,
+ unsigned int *retry, unsigned int *tcode,
+ unsigned int *priority, unsigned int *src_id,
+ u64 *offset)
+{
+ deserialize_async_header_request(header, dst_id, tlabel, retry, tcode, priority, src_id,
+ offset);
+}
+
+static void deserialize_async_header_block_request(const u32 header[ASYNC_HEADER_QUADLET_COUNT],
+ unsigned int *dst_id, unsigned int *tlabel,
+ unsigned int *retry, unsigned int *tcode,
+ unsigned int *priority, unsigned int *src_id,
+ u64 *offset,
+ unsigned int *data_length,
+ unsigned int *extended_tcode)
+{
+ deserialize_async_header_request(header, dst_id, tlabel, retry, tcode, priority, src_id,
+ offset);
+ *data_length = async_header_get_data_length(header);
+ *extended_tcode = async_header_get_extended_tcode(header);
+}
+
+static void deserialize_async_header_response(const u32 header[ASYNC_HEADER_QUADLET_COUNT],
+ unsigned int *dst_id, unsigned int *tlabel,
+ unsigned int *retry, unsigned int *tcode,
+ unsigned int *priority, unsigned int *src_id,
+ unsigned int *rcode)
+{
+ deserialize_async_header_common(header, dst_id, tlabel, retry, tcode, priority, src_id);
+ *rcode = async_header_get_rcode(header);
+}
+
+static void deserialize_async_header_quadlet_response(const u32 header[ASYNC_HEADER_QUADLET_COUNT],
+ unsigned int *dst_id, unsigned int *tlabel,
+ unsigned int *retry, unsigned int *tcode,
+ unsigned int *priority, unsigned int *src_id,
+ unsigned int *rcode)
+{
+ deserialize_async_header_response(header, dst_id, tlabel, retry, tcode, priority, src_id, rcode);
+}
+
+static void deserialize_async_header_block_response(const u32 header[ASYNC_HEADER_QUADLET_COUNT],
+ unsigned int *dst_id, unsigned int *tlabel,
+ unsigned int *retry, unsigned int *tcode,
+ unsigned int *priority, unsigned int *src_id,
+ unsigned int *rcode, unsigned int *data_length,
+ unsigned int *extended_tcode)
+{
+ deserialize_async_header_response(header, dst_id, tlabel, retry, tcode, priority, src_id, rcode);
+ *data_length = async_header_get_data_length(header);
+ *extended_tcode = async_header_get_extended_tcode(header);
+}
+
+static void serialize_isoc_header(u32 *header, unsigned int data_length, unsigned int tag,
+ unsigned int channel, unsigned int tcode, unsigned int sy)
+{
+ isoc_header_set_data_length(header, data_length);
+ isoc_header_set_tag(header, tag);
+ isoc_header_set_channel(header, channel);
+ isoc_header_set_tcode(header, tcode);
+ isoc_header_set_sy(header, sy);
+}
+
+static void deserialize_isoc_header(u32 header, unsigned int *data_length, unsigned int *tag,
+ unsigned int *channel, unsigned int *tcode, unsigned int *sy)
+{
+ *data_length = isoc_header_get_data_length(header);
+ *tag = isoc_header_get_tag(header);
+ *channel = isoc_header_get_channel(header);
+ *tcode = isoc_header_get_tcode(header);
+ *sy = isoc_header_get_sy(header);
+}
+
+static void test_async_header_write_quadlet_request(struct kunit *test)
+{
+ static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = {
+ 0xffc05100,
+ 0xffc1ffff,
+ 0xf0000234,
+ 0x1f0000c0,
+ };
+ u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0};
+
+ unsigned int dst_id;
+ unsigned int tlabel;
+ unsigned int retry;
+ unsigned int tcode;
+ unsigned int priority;
+ unsigned int src_id;
+ u64 offset;
+ u32 quadlet_data;
+
+ deserialize_async_header_quadlet_request(expected, &dst_id, &tlabel, &retry, &tcode,
+ &priority, &src_id, &offset);
+ quadlet_data = async_header_get_quadlet_data(expected);
+
+ KUNIT_EXPECT_EQ(test, 0xffc0, dst_id);
+ KUNIT_EXPECT_EQ(test, 0x14, tlabel);
+ KUNIT_EXPECT_EQ(test, 0x01, retry);
+ KUNIT_EXPECT_EQ(test, TCODE_WRITE_QUADLET_REQUEST, tcode);
+ KUNIT_EXPECT_EQ(test, 0x00, priority);
+ KUNIT_EXPECT_EQ(test, 0xffc1, src_id);
+ KUNIT_EXPECT_EQ(test, 0xfffff0000234, offset);
+ KUNIT_EXPECT_EQ(test, 0x1f0000c0, quadlet_data);
+
+ serialize_async_header_quadlet_request(header, dst_id, tlabel, retry, tcode, priority,
+ src_id, offset);
+ async_header_set_quadlet_data(header, quadlet_data);
+
+ KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected));
+}
+
+static void test_async_header_write_block_request(struct kunit *test)
+{
+ static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = {
+ 0xffc06510,
+ 0xffc1ecc0,
+ 0x00000000,
+ 0x00180000,
+ };
+ u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0};
+
+ unsigned int dst_id;
+ unsigned int tlabel;
+ unsigned int retry;
+ unsigned int tcode;
+ unsigned int priority;
+ unsigned int src_id;
+ u64 offset;
+ unsigned int data_length;
+ unsigned int extended_tcode;
+
+ deserialize_async_header_block_request(expected, &dst_id, &tlabel, &retry, &tcode,
+ &priority, &src_id, &offset, &data_length,
+ &extended_tcode);
+
+ KUNIT_EXPECT_EQ(test, 0xffc0, dst_id);
+ KUNIT_EXPECT_EQ(test, 0x19, tlabel);
+ KUNIT_EXPECT_EQ(test, 0x01, retry);
+ KUNIT_EXPECT_EQ(test, TCODE_WRITE_BLOCK_REQUEST, tcode);
+ KUNIT_EXPECT_EQ(test, 0x00, priority);
+ KUNIT_EXPECT_EQ(test, 0xffc1, src_id);
+ KUNIT_EXPECT_EQ(test, 0xecc000000000, offset);
+ KUNIT_EXPECT_EQ(test, 0x0018, data_length);
+ KUNIT_EXPECT_EQ(test, 0x0000, extended_tcode);
+
+ serialize_async_header_block_request(header, dst_id, tlabel, retry, tcode, priority, src_id,
+ offset, data_length, extended_tcode);
+
+ KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected));
+}
+
+static void test_async_header_write_response(struct kunit *test)
+{
+ static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = {
+ 0xffc15120,
+ 0xffc00000,
+ 0x00000000,
+ 0x00000000,
+ };
+ u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0};
+
+ unsigned int dst_id;
+ unsigned int tlabel;
+ unsigned int retry;
+ unsigned int tcode;
+ unsigned int priority;
+ unsigned int src_id;
+ unsigned int rcode;
+
+ deserialize_async_header_quadlet_response(expected, &dst_id, &tlabel, &retry, &tcode,
+ &priority, &src_id, &rcode);
+
+ KUNIT_EXPECT_EQ(test, 0xffc1, dst_id);
+ KUNIT_EXPECT_EQ(test, 0x14, tlabel);
+ KUNIT_EXPECT_EQ(test, 0x01, retry);
+ KUNIT_EXPECT_EQ(test, TCODE_WRITE_RESPONSE, tcode);
+ KUNIT_EXPECT_EQ(test, 0x00, priority);
+ KUNIT_EXPECT_EQ(test, 0xffc0, src_id);
+ KUNIT_EXPECT_EQ(test, RCODE_COMPLETE, rcode);
+
+ serialize_async_header_quadlet_response(header, dst_id, tlabel, retry, tcode, priority,
+ src_id, rcode);
+
+ KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected) - sizeof(expected[0]));
+}
+
+static void test_async_header_read_quadlet_request(struct kunit *test)
+{
+ static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = {
+ 0xffc0f140,
+ 0xffc1ffff,
+ 0xf0000984,
+ 0x00000000,
+ };
+ u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0};
+
+ unsigned int dst_id;
+ unsigned int tlabel;
+ unsigned int retry;
+ unsigned int tcode;
+ unsigned int priority;
+ unsigned int src_id;
+ u64 offset;
+
+ deserialize_async_header_quadlet_request(expected, &dst_id, &tlabel, &retry, &tcode,
+ &priority, &src_id, &offset);
+
+ KUNIT_EXPECT_EQ(test, 0xffc0, dst_id);
+ KUNIT_EXPECT_EQ(test, 0x3c, tlabel);
+ KUNIT_EXPECT_EQ(test, 0x01, retry);
+ KUNIT_EXPECT_EQ(test, TCODE_READ_QUADLET_REQUEST, tcode);
+ KUNIT_EXPECT_EQ(test, 0x00, priority);
+ KUNIT_EXPECT_EQ(test, 0xffc1, src_id);
+ KUNIT_EXPECT_EQ(test, 0xfffff0000984, offset);
+
+ serialize_async_header_quadlet_request(header, dst_id, tlabel, retry, tcode, priority,
+ src_id, offset);
+
+ KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected));
+}
+
+static void test_async_header_read_quadlet_response(struct kunit *test)
+{
+ static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = {
+ 0xffc1f160,
+ 0xffc00000,
+ 0x00000000,
+ 0x00000180,
+ };
+ u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0};
+
+ unsigned int dst_id;
+ unsigned int tlabel;
+ unsigned int retry;
+ unsigned int tcode;
+ unsigned int priority;
+ unsigned int src_id;
+ unsigned int rcode;
+ u32 quadlet_data;
+
+ deserialize_async_header_quadlet_response(expected, &dst_id, &tlabel, &retry, &tcode,
+ &priority, &src_id, &rcode);
+ quadlet_data = async_header_get_quadlet_data(expected);
+
+ KUNIT_EXPECT_EQ(test, 0xffc1, dst_id);
+ KUNIT_EXPECT_EQ(test, 0x3c, tlabel);
+ KUNIT_EXPECT_EQ(test, 0x01, retry);
+ KUNIT_EXPECT_EQ(test, TCODE_READ_QUADLET_RESPONSE, tcode);
+ KUNIT_EXPECT_EQ(test, 0x00, priority);
+ KUNIT_EXPECT_EQ(test, 0xffc0, src_id);
+ KUNIT_EXPECT_EQ(test, RCODE_COMPLETE, rcode);
+ KUNIT_EXPECT_EQ(test, 0x00000180, quadlet_data);
+
+ serialize_async_header_quadlet_response(header, dst_id, tlabel, retry, tcode, priority,
+ src_id, rcode);
+ async_header_set_quadlet_data(header, quadlet_data);
+
+ KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected));
+}
+
+static void test_async_header_read_block_request(struct kunit *test)
+{
+ static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = {
+ 0xffc0e150,
+ 0xffc1ffff,
+ 0xf0000400,
+ 0x00200000,
+ };
+ u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0};
+
+ unsigned int dst_id;
+ unsigned int tlabel;
+ unsigned int retry;
+ unsigned int tcode;
+ unsigned int priority;
+ unsigned int src_id;
+ u64 offset;
+ unsigned int data_length;
+ unsigned int extended_tcode;
+
+ deserialize_async_header_block_request(expected, &dst_id, &tlabel, &retry, &tcode,
+ &priority, &src_id, &offset, &data_length,
+ &extended_tcode);
+
+ KUNIT_EXPECT_EQ(test, 0xffc0, dst_id);
+ KUNIT_EXPECT_EQ(test, 0x38, tlabel);
+ KUNIT_EXPECT_EQ(test, 0x01, retry);
+ KUNIT_EXPECT_EQ(test, TCODE_READ_BLOCK_REQUEST, tcode);
+ KUNIT_EXPECT_EQ(test, 0x00, priority);
+ KUNIT_EXPECT_EQ(test, 0xffc1, src_id);
+ KUNIT_EXPECT_EQ(test, 0xfffff0000400, offset);
+ KUNIT_EXPECT_EQ(test, 0x0020, data_length);
+ KUNIT_EXPECT_EQ(test, 0x0000, extended_tcode);
+
+ serialize_async_header_block_request(header, dst_id, tlabel, retry, tcode, priority, src_id,
+ offset, data_length, extended_tcode);
+
+ KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected));
+}
+
+static void test_async_header_read_block_response(struct kunit *test)
+{
+ static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = {
+ 0xffc1e170,
+ 0xffc00000,
+ 0x00000000,
+ 0x00200000,
+ };
+ u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0};
+
+ unsigned int dst_id;
+ unsigned int tlabel;
+ unsigned int retry;
+ unsigned int tcode;
+ unsigned int priority;
+ unsigned int src_id;
+ unsigned int rcode;
+ unsigned int data_length;
+ unsigned int extended_tcode;
+
+ deserialize_async_header_block_response(expected, &dst_id, &tlabel, &retry, &tcode,
+ &priority, &src_id, &rcode, &data_length,
+ &extended_tcode);
+
+ KUNIT_EXPECT_EQ(test, 0xffc1, dst_id);
+ KUNIT_EXPECT_EQ(test, 0x38, tlabel);
+ KUNIT_EXPECT_EQ(test, 0x01, retry);
+ KUNIT_EXPECT_EQ(test, TCODE_READ_BLOCK_RESPONSE, tcode);
+ KUNIT_EXPECT_EQ(test, 0x00, priority);
+ KUNIT_EXPECT_EQ(test, 0xffc0, src_id);
+ KUNIT_EXPECT_EQ(test, RCODE_COMPLETE, rcode);
+ KUNIT_EXPECT_EQ(test, 0x0020, data_length);
+ KUNIT_EXPECT_EQ(test, 0x0000, extended_tcode);
+
+ serialize_async_header_block_response(header, dst_id, tlabel, retry, tcode, priority,
+ src_id, rcode, data_length, extended_tcode);
+
+ KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected));
+}
+
+static void test_async_header_lock_request(struct kunit *test)
+{
+ static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = {
+ 0xffc02d90,
+ 0xffc1ffff,
+ 0xf0000984,
+ 0x00080002,
+ };
+ u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0};
+
+ unsigned int dst_id;
+ unsigned int tlabel;
+ unsigned int retry;
+ unsigned int tcode;
+ unsigned int priority;
+ unsigned int src_id;
+ u64 offset;
+ unsigned int data_length;
+ unsigned int extended_tcode;
+
+ deserialize_async_header_block_request(expected, &dst_id, &tlabel, &retry, &tcode,
+ &priority, &src_id, &offset, &data_length,
+ &extended_tcode);
+
+ KUNIT_EXPECT_EQ(test, 0xffc0, dst_id);
+ KUNIT_EXPECT_EQ(test, 0x0b, tlabel);
+ KUNIT_EXPECT_EQ(test, 0x01, retry);
+ KUNIT_EXPECT_EQ(test, TCODE_LOCK_REQUEST, tcode);
+ KUNIT_EXPECT_EQ(test, 0x00, priority);
+ KUNIT_EXPECT_EQ(test, 0xffc1, src_id);
+ KUNIT_EXPECT_EQ(test, 0xfffff0000984, offset);
+ KUNIT_EXPECT_EQ(test, 0x0008, data_length);
+ KUNIT_EXPECT_EQ(test, EXTCODE_COMPARE_SWAP, extended_tcode);
+
+ serialize_async_header_block_request(header, dst_id, tlabel, retry, tcode, priority, src_id,
+ offset, data_length, extended_tcode);
+
+ KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected));
+}
+
+static void test_async_header_lock_response(struct kunit *test)
+{
+ static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = {
+ 0xffc12db0,
+ 0xffc00000,
+ 0x00000000,
+ 0x00040002,
+ };
+ u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0};
+
+ unsigned int dst_id;
+ unsigned int tlabel;
+ unsigned int retry;
+ unsigned int tcode;
+ unsigned int priority;
+ unsigned int src_id;
+ unsigned int rcode;
+ unsigned int data_length;
+ unsigned int extended_tcode;
+
+ deserialize_async_header_block_response(expected, &dst_id, &tlabel, &retry, &tcode,
+ &priority, &src_id, &rcode, &data_length,
+ &extended_tcode);
+
+ KUNIT_EXPECT_EQ(test, 0xffc1, dst_id);
+ KUNIT_EXPECT_EQ(test, 0x0b, tlabel);
+ KUNIT_EXPECT_EQ(test, 0x01, retry);
+ KUNIT_EXPECT_EQ(test, TCODE_LOCK_RESPONSE, tcode);
+ KUNIT_EXPECT_EQ(test, 0x00, priority);
+ KUNIT_EXPECT_EQ(test, 0xffc0, src_id);
+ KUNIT_EXPECT_EQ(test, RCODE_COMPLETE, rcode);
+ KUNIT_EXPECT_EQ(test, 0x0004, data_length);
+ KUNIT_EXPECT_EQ(test, EXTCODE_COMPARE_SWAP, extended_tcode);
+
+ serialize_async_header_block_response(header, dst_id, tlabel, retry, tcode, priority,
+ src_id, rcode, data_length, extended_tcode);
+
+ KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected));
+}
+
+static void test_isoc_header(struct kunit *test)
+{
+ const u32 expected = 0x00d08dec;
+ u32 header = 0;
+
+ unsigned int data_length;
+ unsigned int tag;
+ unsigned int channel;
+ unsigned int tcode;
+ unsigned int sy;
+
+ deserialize_isoc_header(expected, &data_length, &tag, &channel, &tcode, &sy);
+
+ KUNIT_EXPECT_EQ(test, 0xd0, data_length);
+ KUNIT_EXPECT_EQ(test, 0x02, tag);
+ KUNIT_EXPECT_EQ(test, 0x0d, channel);
+ KUNIT_EXPECT_EQ(test, 0x0e, tcode);
+ KUNIT_EXPECT_EQ(test, 0x0c, sy);
+
+ serialize_isoc_header(&header, data_length, tag, channel, tcode, sy);
+
+ KUNIT_EXPECT_EQ(test, header, expected);
+}
+
+static struct kunit_case packet_serdes_test_cases[] = {
+ KUNIT_CASE(test_async_header_write_quadlet_request),
+ KUNIT_CASE(test_async_header_write_block_request),
+ KUNIT_CASE(test_async_header_write_response),
+ KUNIT_CASE(test_async_header_read_quadlet_request),
+ KUNIT_CASE(test_async_header_read_quadlet_response),
+ KUNIT_CASE(test_async_header_read_block_request),
+ KUNIT_CASE(test_async_header_read_block_response),
+ KUNIT_CASE(test_async_header_lock_request),
+ KUNIT_CASE(test_async_header_lock_response),
+ KUNIT_CASE(test_isoc_header),
+ {}
+};
+
+static struct kunit_suite packet_serdes_test_suite = {
+ .name = "firewire-packet-serdes",
+ .test_cases = packet_serdes_test_cases,
+};
+kunit_test_suite(packet_serdes_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index e779d866022b..827dee0f57dd 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1500,19 +1500,14 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
sdev->allow_restart = 1;
- /*
- * SBP-2 does not require any alignment, but we set it anyway
- * for compatibility with earlier versions of this driver.
- */
- blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1);
-
if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
sdev->inquiry_len = 36;
return 0;
}
-static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
+static int sbp2_scsi_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct sbp2_logical_unit *lu = sdev->hostdata;
@@ -1538,7 +1533,7 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
sdev->start_stop_pwr_cond = 1;
if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
- blk_queue_max_hw_sectors(sdev->request_queue, 128 * 1024 / 512);
+ lim->max_hw_sectors = 128 * 1024 / 512;
return 0;
}
@@ -1596,7 +1591,7 @@ static const struct scsi_host_template scsi_driver_template = {
.proc_name = "sbp2",
.queuecommand = sbp2_scsi_queuecommand,
.slave_alloc = sbp2_scsi_slave_alloc,
- .slave_configure = sbp2_scsi_slave_configure,
+ .device_configure = sbp2_scsi_device_configure,
.eh_abort_handler = sbp2_scsi_abort,
.this_id = -1,
.sg_tablesize = SG_ALL,
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index f2556a8e9401..1609247cfafc 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -101,11 +101,12 @@ struct ffa_drv_info {
bool bitmap_created;
bool notif_enabled;
unsigned int sched_recv_irq;
+ unsigned int notif_pend_irq;
unsigned int cpuhp_state;
struct ffa_pcpu_irq __percpu *irq_pcpu;
struct workqueue_struct *notif_pcpu_wq;
struct work_struct notif_pcpu_work;
- struct work_struct irq_work;
+ struct work_struct sched_recv_irq_work;
struct xarray partition_info;
DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS));
struct mutex notify_lock; /* lock to protect notifier hashtable */
@@ -344,6 +345,38 @@ static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit,
return -EINVAL;
}
+static int ffa_msg_send2(u16 src_id, u16 dst_id, void *buf, size_t sz)
+{
+ u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id);
+ struct ffa_indirect_msg_hdr *msg;
+ ffa_value_t ret;
+ int retval = 0;
+
+ if (sz > (RXTX_BUFFER_SIZE - sizeof(*msg)))
+ return -ERANGE;
+
+ mutex_lock(&drv_info->tx_lock);
+
+ msg = drv_info->tx_buffer;
+ msg->flags = 0;
+ msg->res0 = 0;
+ msg->offset = sizeof(*msg);
+ msg->send_recv_id = src_dst_ids;
+ msg->size = sz;
+ memcpy((u8 *)msg + msg->offset, buf, sz);
+
+ /* flags = 0, sender VMID = 0 works for both physical/virtual NS */
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_MSG_SEND2, .a1 = 0, .a2 = 0
+ }, &ret);
+
+ if (ret.a0 == FFA_ERROR)
+ retval = ffa_to_linux_errno((int)ret.a2);
+
+ mutex_unlock(&drv_info->tx_lock);
+ return retval;
+}
+
static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz,
u32 frag_len, u32 len, u64 *handle)
{
@@ -790,7 +823,7 @@ static void ffa_notification_info_get(void)
part_id = packed_id_list[ids_processed++];
- if (!ids_count[list]) { /* Global Notification */
+ if (ids_count[list] == 1) { /* Global Notification */
__do_sched_recv_cb(part_id, 0, false);
continue;
}
@@ -870,6 +903,11 @@ static int ffa_sync_send_receive(struct ffa_device *dev,
dev->mode_32bit, data);
}
+static int ffa_indirect_msg_send(struct ffa_device *dev, void *buf, size_t sz)
+{
+ return ffa_msg_send2(drv_info->vm_id, dev->vm_id, buf, sz);
+}
+
static int ffa_memory_share(struct ffa_mem_ops_args *args)
{
if (drv_info->mem_ops_native)
@@ -1108,7 +1146,7 @@ static void handle_notif_callbacks(u64 bitmap, enum notify_type type)
}
}
-static void notif_pcpu_irq_work_fn(struct work_struct *work)
+static void notif_get_and_handle(void *unused)
{
int rc;
struct ffa_notify_bitmaps bitmaps;
@@ -1131,10 +1169,17 @@ ffa_self_notif_handle(u16 vcpu, bool is_per_vcpu, void *cb_data)
struct ffa_drv_info *info = cb_data;
if (!is_per_vcpu)
- notif_pcpu_irq_work_fn(&info->notif_pcpu_work);
+ notif_get_and_handle(info);
else
- queue_work_on(vcpu, info->notif_pcpu_wq,
- &info->notif_pcpu_work);
+ smp_call_function_single(vcpu, notif_get_and_handle, info, 0);
+}
+
+static void notif_pcpu_irq_work_fn(struct work_struct *work)
+{
+ struct ffa_drv_info *info = container_of(work, struct ffa_drv_info,
+ notif_pcpu_work);
+
+ ffa_self_notif_handle(smp_processor_id(), true, info);
}
static const struct ffa_info_ops ffa_drv_info_ops = {
@@ -1145,6 +1190,7 @@ static const struct ffa_info_ops ffa_drv_info_ops = {
static const struct ffa_msg_ops ffa_drv_msg_ops = {
.mode_32bit_set = ffa_mode_32bit_set,
.sync_send_receive = ffa_sync_send_receive,
+ .indirect_send = ffa_indirect_msg_send,
};
static const struct ffa_mem_ops ffa_drv_mem_ops = {
@@ -1227,6 +1273,8 @@ static int ffa_setup_partitions(void)
continue;
}
+ ffa_dev->properties = tpbuf->properties;
+
if (drv_info->version > FFA_VERSION_1_0 &&
!(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
ffa_mode_32bit_set(ffa_dev);
@@ -1291,12 +1339,23 @@ static void ffa_partitions_cleanup(void)
#define FFA_FEAT_SCHEDULE_RECEIVER_INT (2)
#define FFA_FEAT_MANAGED_EXIT_INT (3)
-static irqreturn_t irq_handler(int irq, void *irq_data)
+static irqreturn_t ffa_sched_recv_irq_handler(int irq, void *irq_data)
+{
+ struct ffa_pcpu_irq *pcpu = irq_data;
+ struct ffa_drv_info *info = pcpu->info;
+
+ queue_work(info->notif_pcpu_wq, &info->sched_recv_irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t notif_pend_irq_handler(int irq, void *irq_data)
{
struct ffa_pcpu_irq *pcpu = irq_data;
struct ffa_drv_info *info = pcpu->info;
- queue_work(info->notif_pcpu_wq, &info->irq_work);
+ queue_work_on(smp_processor_id(), info->notif_pcpu_wq,
+ &info->notif_pcpu_work);
return IRQ_HANDLED;
}
@@ -1306,15 +1365,23 @@ static void ffa_sched_recv_irq_work_fn(struct work_struct *work)
ffa_notification_info_get();
}
-static int ffa_sched_recv_irq_map(void)
+static int ffa_irq_map(u32 id)
{
- int ret, irq, sr_intid;
+ char *err_str;
+ int ret, irq, intid;
- /* The returned sr_intid is assumed to be SGI donated to NS world */
- ret = ffa_features(FFA_FEAT_SCHEDULE_RECEIVER_INT, 0, &sr_intid, NULL);
+ if (id == FFA_FEAT_NOTIFICATION_PENDING_INT)
+ err_str = "Notification Pending Interrupt";
+ else if (id == FFA_FEAT_SCHEDULE_RECEIVER_INT)
+ err_str = "Schedule Receiver Interrupt";
+ else
+ err_str = "Unknown ID";
+
+ /* The returned intid is assumed to be SGI donated to NS world */
+ ret = ffa_features(id, 0, &intid, NULL);
if (ret < 0) {
if (ret != -EOPNOTSUPP)
- pr_err("Failed to retrieve scheduler Rx interrupt\n");
+ pr_err("Failed to retrieve FF-A %s %u\n", err_str, id);
return ret;
}
@@ -1329,12 +1396,12 @@ static int ffa_sched_recv_irq_map(void)
oirq.np = gic;
oirq.args_count = 1;
- oirq.args[0] = sr_intid;
+ oirq.args[0] = intid;
irq = irq_create_of_mapping(&oirq);
of_node_put(gic);
#ifdef CONFIG_ACPI
} else {
- irq = acpi_register_gsi(NULL, sr_intid, ACPI_EDGE_SENSITIVE,
+ irq = acpi_register_gsi(NULL, intid, ACPI_EDGE_SENSITIVE,
ACPI_ACTIVE_HIGH);
#endif
}
@@ -1347,23 +1414,28 @@ static int ffa_sched_recv_irq_map(void)
return irq;
}
-static void ffa_sched_recv_irq_unmap(void)
+static void ffa_irq_unmap(unsigned int irq)
{
- if (drv_info->sched_recv_irq) {
- irq_dispose_mapping(drv_info->sched_recv_irq);
- drv_info->sched_recv_irq = 0;
- }
+ if (!irq)
+ return;
+ irq_dispose_mapping(irq);
}
static int ffa_cpuhp_pcpu_irq_enable(unsigned int cpu)
{
- enable_percpu_irq(drv_info->sched_recv_irq, IRQ_TYPE_NONE);
+ if (drv_info->sched_recv_irq)
+ enable_percpu_irq(drv_info->sched_recv_irq, IRQ_TYPE_NONE);
+ if (drv_info->notif_pend_irq)
+ enable_percpu_irq(drv_info->notif_pend_irq, IRQ_TYPE_NONE);
return 0;
}
static int ffa_cpuhp_pcpu_irq_disable(unsigned int cpu)
{
- disable_percpu_irq(drv_info->sched_recv_irq);
+ if (drv_info->sched_recv_irq)
+ disable_percpu_irq(drv_info->sched_recv_irq);
+ if (drv_info->notif_pend_irq)
+ disable_percpu_irq(drv_info->notif_pend_irq);
return 0;
}
@@ -1382,13 +1454,16 @@ static void ffa_uninit_pcpu_irq(void)
if (drv_info->sched_recv_irq)
free_percpu_irq(drv_info->sched_recv_irq, drv_info->irq_pcpu);
+ if (drv_info->notif_pend_irq)
+ free_percpu_irq(drv_info->notif_pend_irq, drv_info->irq_pcpu);
+
if (drv_info->irq_pcpu) {
free_percpu(drv_info->irq_pcpu);
drv_info->irq_pcpu = NULL;
}
}
-static int ffa_init_pcpu_irq(unsigned int irq)
+static int ffa_init_pcpu_irq(void)
{
struct ffa_pcpu_irq __percpu *irq_pcpu;
int ret, cpu;
@@ -1402,13 +1477,31 @@ static int ffa_init_pcpu_irq(unsigned int irq)
drv_info->irq_pcpu = irq_pcpu;
- ret = request_percpu_irq(irq, irq_handler, "ARM-FFA", irq_pcpu);
- if (ret) {
- pr_err("Error registering notification IRQ %d: %d\n", irq, ret);
- return ret;
+ if (drv_info->sched_recv_irq) {
+ ret = request_percpu_irq(drv_info->sched_recv_irq,
+ ffa_sched_recv_irq_handler,
+ "ARM-FFA-SRI", irq_pcpu);
+ if (ret) {
+ pr_err("Error registering percpu SRI nIRQ %d : %d\n",
+ drv_info->sched_recv_irq, ret);
+ drv_info->sched_recv_irq = 0;
+ return ret;
+ }
}
- INIT_WORK(&drv_info->irq_work, ffa_sched_recv_irq_work_fn);
+ if (drv_info->notif_pend_irq) {
+ ret = request_percpu_irq(drv_info->notif_pend_irq,
+ notif_pend_irq_handler,
+ "ARM-FFA-NPI", irq_pcpu);
+ if (ret) {
+ pr_err("Error registering percpu NPI nIRQ %d : %d\n",
+ drv_info->notif_pend_irq, ret);
+ drv_info->notif_pend_irq = 0;
+ return ret;
+ }
+ }
+
+ INIT_WORK(&drv_info->sched_recv_irq_work, ffa_sched_recv_irq_work_fn);
INIT_WORK(&drv_info->notif_pcpu_work, notif_pcpu_irq_work_fn);
drv_info->notif_pcpu_wq = create_workqueue("ffa_pcpu_irq_notification");
if (!drv_info->notif_pcpu_wq)
@@ -1428,7 +1521,10 @@ static int ffa_init_pcpu_irq(unsigned int irq)
static void ffa_notifications_cleanup(void)
{
ffa_uninit_pcpu_irq();
- ffa_sched_recv_irq_unmap();
+ ffa_irq_unmap(drv_info->sched_recv_irq);
+ drv_info->sched_recv_irq = 0;
+ ffa_irq_unmap(drv_info->notif_pend_irq);
+ drv_info->notif_pend_irq = 0;
if (drv_info->bitmap_created) {
ffa_notification_bitmap_destroy();
@@ -1439,30 +1535,31 @@ static void ffa_notifications_cleanup(void)
static void ffa_notifications_setup(void)
{
- int ret, irq;
+ int ret;
ret = ffa_features(FFA_NOTIFICATION_BITMAP_CREATE, 0, NULL, NULL);
- if (ret) {
- pr_info("Notifications not supported, continuing with it ..\n");
- return;
- }
+ if (!ret) {
+ ret = ffa_notification_bitmap_create();
+ if (ret) {
+ pr_err("Notification bitmap create error %d\n", ret);
+ return;
+ }
- ret = ffa_notification_bitmap_create();
- if (ret) {
- pr_info("Notification bitmap create error %d\n", ret);
- return;
+ drv_info->bitmap_created = true;
}
- drv_info->bitmap_created = true;
- irq = ffa_sched_recv_irq_map();
- if (irq <= 0) {
- ret = irq;
- goto cleanup;
- }
+ ret = ffa_irq_map(FFA_FEAT_SCHEDULE_RECEIVER_INT);
+ if (ret > 0)
+ drv_info->sched_recv_irq = ret;
+
+ ret = ffa_irq_map(FFA_FEAT_NOTIFICATION_PENDING_INT);
+ if (ret > 0)
+ drv_info->notif_pend_irq = ret;
- drv_info->sched_recv_irq = irq;
+ if (!drv_info->sched_recv_irq && !drv_info->notif_pend_irq)
+ goto cleanup;
- ret = ffa_init_pcpu_irq(irq);
+ ret = ffa_init_pcpu_irq();
if (ret)
goto cleanup;
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index a7bc4796519c..fd59f58ce8a2 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -10,7 +10,8 @@ scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o
scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o
-scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o
+scmi-protocols-y := base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o
+scmi-protocols-y += pinctrl.o
scmi-module-objs := $(scmi-driver-y) $(scmi-protocols-y) $(scmi-transport-y)
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-core.o
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 6affbfdd1dec..b5ac25dbc1ca 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -301,6 +301,17 @@ extern const struct scmi_desc scmi_optee_desc;
void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv);
+enum scmi_bad_msg {
+ MSG_UNEXPECTED = -1,
+ MSG_INVALID = -2,
+ MSG_UNKNOWN = -3,
+ MSG_NOMEM = -4,
+ MSG_MBOX_SPURIOUS = -5,
+};
+
+void scmi_bad_message_trace(struct scmi_chan_info *cinfo, u32 msg_hdr,
+ enum scmi_bad_msg err);
+
/* shmem related declarations */
struct scmi_shared_mem;
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 2709598f3008..6b6957f4743f 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -33,6 +33,7 @@
#include <linux/processor.h>
#include <linux/refcount.h>
#include <linux/slab.h>
+#include <linux/xarray.h>
#include "common.h"
#include "notify.h"
@@ -44,8 +45,7 @@
static DEFINE_IDA(scmi_id);
-static DEFINE_IDR(scmi_protocols);
-static DEFINE_SPINLOCK(protocol_lock);
+static DEFINE_XARRAY(scmi_protocols);
/* List of all SCMI devices active in system */
static LIST_HEAD(scmi_list);
@@ -194,11 +194,94 @@ struct scmi_info {
#define bus_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, bus_nb)
#define req_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, dev_req_nb)
-static const struct scmi_protocol *scmi_protocol_get(int protocol_id)
+static unsigned long
+scmi_vendor_protocol_signature(unsigned int protocol_id, char *vendor_id,
+ char *sub_vendor_id, u32 impl_ver)
{
- const struct scmi_protocol *proto;
+ char *signature, *p;
+ unsigned long hash = 0;
- proto = idr_find(&scmi_protocols, protocol_id);
+ /* vendor_id/sub_vendor_id guaranteed <= SCMI_SHORT_NAME_MAX_SIZE */
+ signature = kasprintf(GFP_KERNEL, "%02X|%s|%s|0x%08X", protocol_id,
+ vendor_id ?: "", sub_vendor_id ?: "", impl_ver);
+ if (!signature)
+ return 0;
+
+ p = signature;
+ while (*p)
+ hash = partial_name_hash(tolower(*p++), hash);
+ hash = end_name_hash(hash);
+
+ kfree(signature);
+
+ return hash;
+}
+
+static unsigned long
+scmi_protocol_key_calculate(int protocol_id, char *vendor_id,
+ char *sub_vendor_id, u32 impl_ver)
+{
+ if (protocol_id < SCMI_PROTOCOL_VENDOR_BASE)
+ return protocol_id;
+ else
+ return scmi_vendor_protocol_signature(protocol_id, vendor_id,
+ sub_vendor_id, impl_ver);
+}
+
+static const struct scmi_protocol *
+__scmi_vendor_protocol_lookup(int protocol_id, char *vendor_id,
+ char *sub_vendor_id, u32 impl_ver)
+{
+ unsigned long key;
+ struct scmi_protocol *proto = NULL;
+
+ key = scmi_protocol_key_calculate(protocol_id, vendor_id,
+ sub_vendor_id, impl_ver);
+ if (key)
+ proto = xa_load(&scmi_protocols, key);
+
+ return proto;
+}
+
+static const struct scmi_protocol *
+scmi_vendor_protocol_lookup(int protocol_id, char *vendor_id,
+ char *sub_vendor_id, u32 impl_ver)
+{
+ const struct scmi_protocol *proto = NULL;
+
+ /* Searching for closest match ...*/
+ proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id,
+ sub_vendor_id, impl_ver);
+ if (proto)
+ return proto;
+
+ /* Any match just on vendor/sub_vendor ? */
+ if (impl_ver) {
+ proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id,
+ sub_vendor_id, 0);
+ if (proto)
+ return proto;
+ }
+
+ /* Any match just on the vendor ? */
+ if (sub_vendor_id)
+ proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id,
+ NULL, 0);
+ return proto;
+}
+
+static const struct scmi_protocol *
+scmi_protocol_get(int protocol_id, struct scmi_revision_info *version)
+{
+ const struct scmi_protocol *proto = NULL;
+
+ if (protocol_id < SCMI_PROTOCOL_VENDOR_BASE)
+ proto = xa_load(&scmi_protocols, protocol_id);
+ else
+ proto = scmi_vendor_protocol_lookup(protocol_id,
+ version->vendor_id,
+ version->sub_vendor_id,
+ version->impl_ver);
if (!proto || !try_module_get(proto->owner)) {
pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
return NULL;
@@ -206,21 +289,46 @@ static const struct scmi_protocol *scmi_protocol_get(int protocol_id)
pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
+ if (protocol_id >= SCMI_PROTOCOL_VENDOR_BASE)
+ pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n",
+ protocol_id, proto->vendor_id ?: "",
+ proto->sub_vendor_id ?: "", proto->impl_ver);
+
return proto;
}
-static void scmi_protocol_put(int protocol_id)
+static void scmi_protocol_put(const struct scmi_protocol *proto)
{
- const struct scmi_protocol *proto;
-
- proto = idr_find(&scmi_protocols, protocol_id);
if (proto)
module_put(proto->owner);
}
+static int scmi_vendor_protocol_check(const struct scmi_protocol *proto)
+{
+ if (!proto->vendor_id) {
+ pr_err("missing vendor_id for protocol 0x%x\n", proto->id);
+ return -EINVAL;
+ }
+
+ if (strlen(proto->vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) {
+ pr_err("malformed vendor_id for protocol 0x%x\n", proto->id);
+ return -EINVAL;
+ }
+
+ if (proto->sub_vendor_id &&
+ strlen(proto->sub_vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) {
+ pr_err("malformed sub_vendor_id for protocol 0x%x\n",
+ proto->id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int scmi_protocol_register(const struct scmi_protocol *proto)
{
int ret;
+ unsigned long key;
if (!proto) {
pr_err("invalid protocol\n");
@@ -232,12 +340,23 @@ int scmi_protocol_register(const struct scmi_protocol *proto)
return -EINVAL;
}
- spin_lock(&protocol_lock);
- ret = idr_alloc(&scmi_protocols, (void *)proto,
- proto->id, proto->id + 1, GFP_ATOMIC);
- spin_unlock(&protocol_lock);
- if (ret != proto->id) {
- pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n",
+ if (proto->id >= SCMI_PROTOCOL_VENDOR_BASE &&
+ scmi_vendor_protocol_check(proto))
+ return -EINVAL;
+
+ /*
+ * Calculate a protocol key to register this protocol with the core;
+ * key value 0 is considered invalid.
+ */
+ key = scmi_protocol_key_calculate(proto->id, proto->vendor_id,
+ proto->sub_vendor_id,
+ proto->impl_ver);
+ if (!key)
+ return -EINVAL;
+
+ ret = xa_insert(&scmi_protocols, key, (void *)proto, GFP_KERNEL);
+ if (ret) {
+ pr_err("unable to allocate SCMI protocol slot for 0x%x - err %d\n",
proto->id, ret);
return ret;
}
@@ -250,9 +369,15 @@ EXPORT_SYMBOL_GPL(scmi_protocol_register);
void scmi_protocol_unregister(const struct scmi_protocol *proto)
{
- spin_lock(&protocol_lock);
- idr_remove(&scmi_protocols, proto->id);
- spin_unlock(&protocol_lock);
+ unsigned long key;
+
+ key = scmi_protocol_key_calculate(proto->id, proto->vendor_id,
+ proto->sub_vendor_id,
+ proto->impl_ver);
+ if (!key)
+ return;
+
+ xa_erase(&scmi_protocols, key);
pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id);
}
@@ -697,6 +822,45 @@ scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id)
}
/**
+ * scmi_bad_message_trace - A helper to trace weird messages
+ *
+ * @cinfo: A reference to the channel descriptor on which the message was
+ * received
+ * @msg_hdr: Message header to track
+ * @err: A specific error code used as a status value in traces.
+ *
+ * This helper can be used to trace any kind of weird, incomplete, unexpected,
+ * timed-out message that arrives and as such, can be traced only referring to
+ * the header content, since the payload is missing/unreliable.
+ */
+void scmi_bad_message_trace(struct scmi_chan_info *cinfo, u32 msg_hdr,
+ enum scmi_bad_msg err)
+{
+ char *tag;
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+
+ switch (MSG_XTRACT_TYPE(msg_hdr)) {
+ case MSG_TYPE_COMMAND:
+ tag = "!RESP";
+ break;
+ case MSG_TYPE_DELAYED_RESP:
+ tag = "!DLYD";
+ break;
+ case MSG_TYPE_NOTIFICATION:
+ tag = "!NOTI";
+ break;
+ default:
+ tag = "!UNKN";
+ break;
+ }
+
+ trace_scmi_msg_dump(info->id, cinfo->id,
+ MSG_XTRACT_PROT_ID(msg_hdr),
+ MSG_XTRACT_ID(msg_hdr), tag,
+ MSG_XTRACT_TOKEN(msg_hdr), err, NULL, 0);
+}
+
+/**
* scmi_msg_response_validate - Validate message type against state of related
* xfer
*
@@ -822,6 +986,9 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
"Message for %d type %d is not expected!\n",
xfer_id, msg_type);
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+ scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED);
+
return xfer;
}
refcount_inc(&xfer->users);
@@ -846,6 +1013,9 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
dev_err(cinfo->dev,
"Invalid message type:%d for %d - HDR:0x%X state:%d\n",
msg_type, xfer_id, msg_hdr, xfer->state);
+
+ scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID);
+
/* On error the refcount incremented above has to be dropped */
__scmi_xfer_put(minfo, xfer);
xfer = ERR_PTR(-EINVAL);
@@ -882,6 +1052,9 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
if (IS_ERR(xfer)) {
dev_err(dev, "failed to get free message slot (%ld)\n",
PTR_ERR(xfer));
+
+ scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM);
+
scmi_clear_channel(info, cinfo);
return;
}
@@ -1001,6 +1174,7 @@ void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv)
break;
default:
WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
+ scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNKNOWN);
break;
}
}
@@ -1489,6 +1663,20 @@ out:
}
/**
+ * scmi_common_get_max_msg_size - Get maximum message size
+ * @ph: A protocol handle reference.
+ *
+ * Return: Maximum message size for the current protocol.
+ */
+static int scmi_common_get_max_msg_size(const struct scmi_protocol_handle *ph)
+{
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
+
+ return info->desc->max_msg_size;
+}
+
+/**
* struct scmi_iterator - Iterator descriptor
* @msg: A reference to the message TX buffer; filled by @prepare_message with
* a proper custom command payload for each multi-part command request.
@@ -1799,6 +1987,7 @@ static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph,
static const struct scmi_proto_helpers_ops helpers_ops = {
.extended_name_get = scmi_common_extended_name_get,
+ .get_max_msg_size = scmi_common_get_max_msg_size,
.iter_response_init = scmi_iterator_init,
.iter_response_run = scmi_iterator_run,
.protocol_msg_check = scmi_protocol_msg_check,
@@ -1891,7 +2080,7 @@ scmi_alloc_init_protocol_instance(struct scmi_info *info,
/* Protocol specific devres group */
gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
if (!gid) {
- scmi_protocol_put(proto->id);
+ scmi_protocol_put(proto);
goto out;
}
@@ -1955,7 +2144,7 @@ scmi_alloc_init_protocol_instance(struct scmi_info *info,
clean:
/* Take care to put the protocol module's owner before releasing all */
- scmi_protocol_put(proto->id);
+ scmi_protocol_put(proto);
devres_release_group(handle->dev, gid);
out:
return ERR_PTR(ret);
@@ -1989,7 +2178,7 @@ scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
const struct scmi_protocol *proto;
/* Fails if protocol not registered on bus */
- proto = scmi_protocol_get(protocol_id);
+ proto = scmi_protocol_get(protocol_id, &info->version);
if (proto)
pi = scmi_alloc_init_protocol_instance(info, proto);
else
@@ -2044,7 +2233,7 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
idr_remove(&info->protocols, protocol_id);
- scmi_protocol_put(protocol_id);
+ scmi_protocol_put(pi->proto);
devres_release_group(handle->dev, gid);
dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
@@ -2491,6 +2680,10 @@ scmi_txrx_setup(struct scmi_info *info, struct device_node *of_node,
ret = 0;
}
+ if (ret)
+ dev_err(info->dev,
+ "failed to setup channel for protocol:0x%X\n", prot_id);
+
return ret;
}
@@ -2760,6 +2953,7 @@ static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
static int scmi_probe(struct platform_device *pdev)
{
int ret;
+ char *err_str = "probe failure\n";
struct scmi_handle *handle;
const struct scmi_desc *desc;
struct scmi_info *info;
@@ -2810,27 +3004,37 @@ static int scmi_probe(struct platform_device *pdev)
if (desc->ops->link_supplier) {
ret = desc->ops->link_supplier(dev);
- if (ret)
+ if (ret) {
+ err_str = "transport not ready\n";
goto clear_ida;
+ }
}
/* Setup all channels described in the DT at first */
ret = scmi_channels_setup(info);
- if (ret)
+ if (ret) {
+ err_str = "failed to setup channels\n";
goto clear_ida;
+ }
ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb);
- if (ret)
+ if (ret) {
+ err_str = "failed to register bus notifier\n";
goto clear_txrx_setup;
+ }
ret = blocking_notifier_chain_register(&scmi_requested_devices_nh,
&info->dev_req_nb);
- if (ret)
+ if (ret) {
+ err_str = "failed to register device notifier\n";
goto clear_bus_notifier;
+ }
ret = scmi_xfer_info_init(info);
- if (ret)
+ if (ret) {
+ err_str = "failed to init xfers pool\n";
goto clear_dev_req_notifier;
+ }
if (scmi_top_dentry) {
info->dbg = scmi_debugfs_common_setup(info);
@@ -2867,9 +3071,11 @@ static int scmi_probe(struct platform_device *pdev)
*/
ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
if (ret) {
- dev_err(dev, "unable to communicate with SCMI\n");
- if (coex)
+ err_str = "unable to communicate with SCMI\n";
+ if (coex) {
+ dev_err(dev, "%s", err_str);
return 0;
+ }
goto notification_exit;
}
@@ -2923,7 +3129,8 @@ clear_txrx_setup:
scmi_cleanup_txrx_channels(info);
clear_ida:
ida_free(&scmi_id, info->id);
- return ret;
+
+ return dev_err_probe(dev, ret, "%s", err_str);
}
static void scmi_remove(struct platform_device *pdev)
@@ -3127,6 +3334,7 @@ static int __init scmi_driver_init(void)
scmi_voltage_register();
scmi_system_register();
scmi_powercap_register();
+ scmi_pinctrl_register();
return platform_driver_register(&scmi_driver);
}
@@ -3144,6 +3352,7 @@ static void __exit scmi_driver_exit(void)
scmi_voltage_unregister();
scmi_system_unregister();
scmi_powercap_unregister();
+ scmi_pinctrl_unregister();
scmi_transports_exit();
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
index b8d470417e8f..615a3b2ad83d 100644
--- a/drivers/firmware/arm_scmi/mailbox.c
+++ b/drivers/firmware/arm_scmi/mailbox.c
@@ -56,6 +56,9 @@ static void rx_callback(struct mbox_client *cl, void *m)
*/
if (cl->knows_txdone && !shmem_channel_free(smbox->shmem)) {
dev_warn(smbox->cinfo->dev, "Ignoring spurious A2P IRQ !\n");
+ scmi_bad_message_trace(smbox->cinfo,
+ shmem_read_header(smbox->shmem),
+ MSG_MBOX_SPURIOUS);
return;
}
diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c
index 27c52531194d..e160ecb22948 100644
--- a/drivers/firmware/arm_scmi/notify.c
+++ b/drivers/firmware/arm_scmi/notify.c
@@ -1513,17 +1513,12 @@ static int scmi_devm_notifier_register(struct scmi_device *sdev,
static int scmi_devm_notifier_match(struct device *dev, void *res, void *data)
{
struct scmi_notifier_devres *dres = res;
- struct scmi_notifier_devres *xres = data;
+ struct notifier_block *nb = data;
- if (WARN_ON(!dres || !xres))
+ if (WARN_ON(!dres || !nb))
return 0;
- return dres->proto_id == xres->proto_id &&
- dres->evt_id == xres->evt_id &&
- dres->nb == xres->nb &&
- ((!dres->src_id && !xres->src_id) ||
- (dres->src_id && xres->src_id &&
- dres->__src_id == xres->__src_id));
+ return dres->nb == nb;
}
/**
@@ -1531,10 +1526,6 @@ static int scmi_devm_notifier_match(struct device *dev, void *res, void *data)
* notifier_block for an event
* @sdev: A reference to an scmi_device whose embedded struct device is to
* be used for devres accounting.
- * @proto_id: Protocol ID
- * @evt_id: Event ID
- * @src_id: Source ID, when NULL register for events coming form ALL possible
- * sources
* @nb: A standard notifier block to register for the specified event
*
* Generic devres managed helper to explicitly un-register a notifier_block
@@ -1544,25 +1535,12 @@ static int scmi_devm_notifier_match(struct device *dev, void *res, void *data)
* Return: 0 on Success
*/
static int scmi_devm_notifier_unregister(struct scmi_device *sdev,
- u8 proto_id, u8 evt_id,
- const u32 *src_id,
struct notifier_block *nb)
{
int ret;
- struct scmi_notifier_devres dres;
-
- dres.handle = sdev->handle;
- dres.proto_id = proto_id;
- dres.evt_id = evt_id;
- if (src_id) {
- dres.__src_id = *src_id;
- dres.src_id = &dres.__src_id;
- } else {
- dres.src_id = NULL;
- }
ret = devres_release(&sdev->dev, scmi_devm_release_notifier,
- scmi_devm_notifier_match, &dres);
+ scmi_devm_notifier_match, nb);
WARN_ON(ret);
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 345fff167b52..4b7f1cbb9b04 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -387,8 +387,8 @@ process_response_opp(struct device *dev, struct perf_dom_info *dom,
ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
if (ret)
- dev_warn(dev, "Failed to add opps_by_lvl at %d - ret:%d\n",
- opp->perf, ret);
+ dev_warn(dev, "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
+ opp->perf, dom->info.name, ret);
}
static inline void
@@ -405,8 +405,8 @@ process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
if (ret)
- dev_warn(dev, "Failed to add opps_by_lvl at %d - ret:%d\n",
- opp->perf, ret);
+ dev_warn(dev, "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
+ opp->perf, dom->info.name, ret);
/* Note that PERF v4 reports always five 32-bit words */
opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq);
@@ -417,8 +417,8 @@ process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
GFP_KERNEL);
if (ret)
dev_warn(dev,
- "Failed to add opps_by_idx at %d - ret:%d\n",
- opp->level_index, ret);
+ "Failed to add opps_by_idx at %d for %s - ret:%d\n",
+ opp->level_index, dom->info.name, ret);
hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq);
}
@@ -879,7 +879,8 @@ static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
ret = dev_pm_opp_add_dynamic(dev, &data);
if (ret) {
- dev_warn(dev, "failed to add opp %luHz\n", freq);
+ dev_warn(dev, "[%d][%s]: Failed to add OPP[%d] %lu\n",
+ domain, dom->info.name, idx, freq);
dev_pm_opp_remove_all_dynamic(dev);
return ret;
}
diff --git a/drivers/firmware/arm_scmi/pinctrl.c b/drivers/firmware/arm_scmi/pinctrl.c
new file mode 100644
index 000000000000..a2a7f880d6a3
--- /dev/null
+++ b/drivers/firmware/arm_scmi/pinctrl.c
@@ -0,0 +1,916 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Pinctrl Protocol
+ *
+ * Copyright (C) 2024 EPAM
+ * Copyright 2024 NXP
+ */
+
+#include <asm/byteorder.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/scmi_protocol.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "common.h"
+#include "protocols.h"
+
+/* Updated only after ALL the mandatory features for that version are merged */
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x10000
+
+#define GET_GROUPS_NR(x) le32_get_bits((x), GENMASK(31, 16))
+#define GET_PINS_NR(x) le32_get_bits((x), GENMASK(15, 0))
+#define GET_FUNCTIONS_NR(x) le32_get_bits((x), GENMASK(15, 0))
+
+#define EXT_NAME_FLAG(x) le32_get_bits((x), BIT(31))
+#define NUM_ELEMS(x) le32_get_bits((x), GENMASK(15, 0))
+
+#define REMAINING(x) le32_get_bits((x), GENMASK(31, 16))
+#define RETURNED(x) le32_get_bits((x), GENMASK(11, 0))
+
+#define CONFIG_FLAG_MASK GENMASK(19, 18)
+#define SELECTOR_MASK GENMASK(17, 16)
+#define SKIP_CONFIGS_MASK GENMASK(15, 8)
+#define CONFIG_TYPE_MASK GENMASK(7, 0)
+
+enum scmi_pinctrl_protocol_cmd {
+ PINCTRL_ATTRIBUTES = 0x3,
+ PINCTRL_LIST_ASSOCIATIONS = 0x4,
+ PINCTRL_SETTINGS_GET = 0x5,
+ PINCTRL_SETTINGS_CONFIGURE = 0x6,
+ PINCTRL_REQUEST = 0x7,
+ PINCTRL_RELEASE = 0x8,
+ PINCTRL_NAME_GET = 0x9,
+ PINCTRL_SET_PERMISSIONS = 0xa,
+};
+
+struct scmi_msg_settings_conf {
+ __le32 identifier;
+ __le32 function_id;
+ __le32 attributes;
+ __le32 configs[];
+};
+
+struct scmi_msg_settings_get {
+ __le32 identifier;
+ __le32 attributes;
+};
+
+struct scmi_resp_settings_get {
+ __le32 function_selected;
+ __le32 num_configs;
+ __le32 configs[];
+};
+
+struct scmi_msg_pinctrl_protocol_attributes {
+ __le32 attributes_low;
+ __le32 attributes_high;
+};
+
+struct scmi_msg_pinctrl_attributes {
+ __le32 identifier;
+ __le32 flags;
+};
+
+struct scmi_resp_pinctrl_attributes {
+ __le32 attributes;
+ u8 name[SCMI_SHORT_NAME_MAX_SIZE];
+};
+
+struct scmi_msg_pinctrl_list_assoc {
+ __le32 identifier;
+ __le32 flags;
+ __le32 index;
+};
+
+struct scmi_resp_pinctrl_list_assoc {
+ __le32 flags;
+ __le16 array[];
+};
+
+struct scmi_msg_request {
+ __le32 identifier;
+ __le32 flags;
+};
+
+struct scmi_group_info {
+ char name[SCMI_MAX_STR_SIZE];
+ bool present;
+ u32 *group_pins;
+ u32 nr_pins;
+};
+
+struct scmi_function_info {
+ char name[SCMI_MAX_STR_SIZE];
+ bool present;
+ u32 *groups;
+ u32 nr_groups;
+};
+
+struct scmi_pin_info {
+ char name[SCMI_MAX_STR_SIZE];
+ bool present;
+};
+
+struct scmi_pinctrl_info {
+ u32 version;
+ int nr_groups;
+ int nr_functions;
+ int nr_pins;
+ struct scmi_group_info *groups;
+ struct scmi_function_info *functions;
+ struct scmi_pin_info *pins;
+};
+
+static int scmi_pinctrl_attributes_get(const struct scmi_protocol_handle *ph,
+ struct scmi_pinctrl_info *pi)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_pinctrl_protocol_attributes *attr;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0, sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ pi->nr_functions = GET_FUNCTIONS_NR(attr->attributes_high);
+ pi->nr_groups = GET_GROUPS_NR(attr->attributes_low);
+ pi->nr_pins = GET_PINS_NR(attr->attributes_low);
+ if (pi->nr_pins == 0) {
+ dev_warn(ph->dev, "returned zero pins\n");
+ ret = -EINVAL;
+ }
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_pinctrl_count_get(const struct scmi_protocol_handle *ph,
+ enum scmi_pinctrl_selector_type type)
+{
+ struct scmi_pinctrl_info *pi = ph->get_priv(ph);
+
+ switch (type) {
+ case PIN_TYPE:
+ return pi->nr_pins;
+ case GROUP_TYPE:
+ return pi->nr_groups;
+ case FUNCTION_TYPE:
+ return pi->nr_functions;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int scmi_pinctrl_validate_id(const struct scmi_protocol_handle *ph,
+ u32 selector,
+ enum scmi_pinctrl_selector_type type)
+{
+ int value;
+
+ value = scmi_pinctrl_count_get(ph, type);
+ if (value < 0)
+ return value;
+
+ if (selector >= value || value == 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int scmi_pinctrl_attributes(const struct scmi_protocol_handle *ph,
+ enum scmi_pinctrl_selector_type type,
+ u32 selector, char *name,
+ u32 *n_elems)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_pinctrl_attributes *tx;
+ struct scmi_resp_pinctrl_attributes *rx;
+ bool ext_name_flag;
+
+ if (!name)
+ return -EINVAL;
+
+ ret = scmi_pinctrl_validate_id(ph, selector, type);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->xfer_get_init(ph, PINCTRL_ATTRIBUTES, sizeof(*tx),
+ sizeof(*rx), &t);
+ if (ret)
+ return ret;
+
+ tx = t->tx.buf;
+ rx = t->rx.buf;
+ tx->identifier = cpu_to_le32(selector);
+ tx->flags = cpu_to_le32(type);
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ if (n_elems)
+ *n_elems = NUM_ELEMS(rx->attributes);
+
+ strscpy(name, rx->name, SCMI_SHORT_NAME_MAX_SIZE);
+
+ ext_name_flag = !!EXT_NAME_FLAG(rx->attributes);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ if (ret)
+ return ret;
+ /*
+ * If supported overwrite short name with the extended one;
+ * on error just carry on and use already provided short name.
+ */
+ if (ext_name_flag)
+ ret = ph->hops->extended_name_get(ph, PINCTRL_NAME_GET,
+ selector, (u32 *)&type, name,
+ SCMI_MAX_STR_SIZE);
+ return ret;
+}
+
+struct scmi_pinctrl_ipriv {
+ u32 selector;
+ enum scmi_pinctrl_selector_type type;
+ u32 *array;
+};
+
+static void iter_pinctrl_assoc_prepare_message(void *message,
+ u32 desc_index,
+ const void *priv)
+{
+ struct scmi_msg_pinctrl_list_assoc *msg = message;
+ const struct scmi_pinctrl_ipriv *p = priv;
+
+ msg->identifier = cpu_to_le32(p->selector);
+ msg->flags = cpu_to_le32(p->type);
+ msg->index = cpu_to_le32(desc_index);
+}
+
+static int iter_pinctrl_assoc_update_state(struct scmi_iterator_state *st,
+ const void *response, void *priv)
+{
+ const struct scmi_resp_pinctrl_list_assoc *r = response;
+
+ st->num_returned = RETURNED(r->flags);
+ st->num_remaining = REMAINING(r->flags);
+
+ return 0;
+}
+
+static int
+iter_pinctrl_assoc_process_response(const struct scmi_protocol_handle *ph,
+ const void *response,
+ struct scmi_iterator_state *st, void *priv)
+{
+ const struct scmi_resp_pinctrl_list_assoc *r = response;
+ struct scmi_pinctrl_ipriv *p = priv;
+
+ p->array[st->desc_index + st->loop_idx] =
+ le16_to_cpu(r->array[st->loop_idx]);
+
+ return 0;
+}
+
+static int scmi_pinctrl_list_associations(const struct scmi_protocol_handle *ph,
+ u32 selector,
+ enum scmi_pinctrl_selector_type type,
+ u16 size, u32 *array)
+{
+ int ret;
+ void *iter;
+ struct scmi_iterator_ops ops = {
+ .prepare_message = iter_pinctrl_assoc_prepare_message,
+ .update_state = iter_pinctrl_assoc_update_state,
+ .process_response = iter_pinctrl_assoc_process_response,
+ };
+ struct scmi_pinctrl_ipriv ipriv = {
+ .selector = selector,
+ .type = type,
+ .array = array,
+ };
+
+ if (!array || !size || type == PIN_TYPE)
+ return -EINVAL;
+
+ ret = scmi_pinctrl_validate_id(ph, selector, type);
+ if (ret)
+ return ret;
+
+ iter = ph->hops->iter_response_init(ph, &ops, size,
+ PINCTRL_LIST_ASSOCIATIONS,
+ sizeof(struct scmi_msg_pinctrl_list_assoc),
+ &ipriv);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ return ph->hops->iter_response_run(iter);
+}
+
+struct scmi_settings_get_ipriv {
+ u32 selector;
+ enum scmi_pinctrl_selector_type type;
+ bool get_all;
+ unsigned int *nr_configs;
+ enum scmi_pinctrl_conf_type *config_types;
+ u32 *config_values;
+};
+
+static void
+iter_pinctrl_settings_get_prepare_message(void *message, u32 desc_index,
+ const void *priv)
+{
+ struct scmi_msg_settings_get *msg = message;
+ const struct scmi_settings_get_ipriv *p = priv;
+ u32 attributes;
+
+ attributes = FIELD_PREP(SELECTOR_MASK, p->type);
+
+ if (p->get_all) {
+ attributes |= FIELD_PREP(CONFIG_FLAG_MASK, 1) |
+ FIELD_PREP(SKIP_CONFIGS_MASK, desc_index);
+ } else {
+ attributes |= FIELD_PREP(CONFIG_TYPE_MASK, p->config_types[0]);
+ }
+
+ msg->attributes = cpu_to_le32(attributes);
+ msg->identifier = cpu_to_le32(p->selector);
+}
+
+static int
+iter_pinctrl_settings_get_update_state(struct scmi_iterator_state *st,
+ const void *response, void *priv)
+{
+ const struct scmi_resp_settings_get *r = response;
+ struct scmi_settings_get_ipriv *p = priv;
+
+ if (p->get_all) {
+ st->num_returned = le32_get_bits(r->num_configs, GENMASK(7, 0));
+ st->num_remaining = le32_get_bits(r->num_configs, GENMASK(31, 24));
+ } else {
+ st->num_returned = 1;
+ st->num_remaining = 0;
+ }
+
+ return 0;
+}
+
+static int
+iter_pinctrl_settings_get_process_response(const struct scmi_protocol_handle *ph,
+ const void *response,
+ struct scmi_iterator_state *st,
+ void *priv)
+{
+ const struct scmi_resp_settings_get *r = response;
+ struct scmi_settings_get_ipriv *p = priv;
+ u32 type = le32_get_bits(r->configs[st->loop_idx * 2], GENMASK(7, 0));
+ u32 val = le32_to_cpu(r->configs[st->loop_idx * 2 + 1]);
+
+ if (p->get_all) {
+ p->config_types[st->desc_index + st->loop_idx] = type;
+ } else {
+ if (p->config_types[0] != type)
+ return -EINVAL;
+ }
+
+ p->config_values[st->desc_index + st->loop_idx] = val;
+ ++*p->nr_configs;
+
+ return 0;
+}
+
+static int
+scmi_pinctrl_settings_get(const struct scmi_protocol_handle *ph, u32 selector,
+ enum scmi_pinctrl_selector_type type,
+ unsigned int *nr_configs,
+ enum scmi_pinctrl_conf_type *config_types,
+ u32 *config_values)
+{
+ int ret;
+ void *iter;
+ unsigned int max_configs = *nr_configs;
+ struct scmi_iterator_ops ops = {
+ .prepare_message = iter_pinctrl_settings_get_prepare_message,
+ .update_state = iter_pinctrl_settings_get_update_state,
+ .process_response = iter_pinctrl_settings_get_process_response,
+ };
+ struct scmi_settings_get_ipriv ipriv = {
+ .selector = selector,
+ .type = type,
+ .get_all = (max_configs > 1),
+ .nr_configs = nr_configs,
+ .config_types = config_types,
+ .config_values = config_values,
+ };
+
+ if (!config_types || !config_values || type == FUNCTION_TYPE)
+ return -EINVAL;
+
+ ret = scmi_pinctrl_validate_id(ph, selector, type);
+ if (ret)
+ return ret;
+
+ /* Prepare to count returned configs */
+ *nr_configs = 0;
+ iter = ph->hops->iter_response_init(ph, &ops, max_configs,
+ PINCTRL_SETTINGS_GET,
+ sizeof(struct scmi_msg_settings_get),
+ &ipriv);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ return ph->hops->iter_response_run(iter);
+}
+
+static int scmi_pinctrl_settings_get_one(const struct scmi_protocol_handle *ph,
+ u32 selector,
+ enum scmi_pinctrl_selector_type type,
+ enum scmi_pinctrl_conf_type config_type,
+ u32 *config_value)
+{
+ unsigned int nr_configs = 1;
+
+ return scmi_pinctrl_settings_get(ph, selector, type, &nr_configs,
+ &config_type, config_value);
+}
+
+static int scmi_pinctrl_settings_get_all(const struct scmi_protocol_handle *ph,
+ u32 selector,
+ enum scmi_pinctrl_selector_type type,
+ unsigned int *nr_configs,
+ enum scmi_pinctrl_conf_type *config_types,
+ u32 *config_values)
+{
+ if (!nr_configs || *nr_configs == 0)
+ return -EINVAL;
+
+ return scmi_pinctrl_settings_get(ph, selector, type, nr_configs,
+ config_types, config_values);
+}
+
+static int
+scmi_pinctrl_settings_conf(const struct scmi_protocol_handle *ph,
+ u32 selector,
+ enum scmi_pinctrl_selector_type type,
+ u32 nr_configs,
+ enum scmi_pinctrl_conf_type *config_type,
+ u32 *config_value)
+{
+ struct scmi_xfer *t;
+ struct scmi_msg_settings_conf *tx;
+ u32 attributes;
+ int ret, i;
+ u32 configs_in_chunk, conf_num = 0;
+ u32 chunk;
+ int max_msg_size = ph->hops->get_max_msg_size(ph);
+
+ if (!config_type || !config_value || type == FUNCTION_TYPE)
+ return -EINVAL;
+
+ ret = scmi_pinctrl_validate_id(ph, selector, type);
+ if (ret)
+ return ret;
+
+ configs_in_chunk = (max_msg_size - sizeof(*tx)) / (sizeof(__le32) * 2);
+ while (conf_num < nr_configs) {
+ chunk = (nr_configs - conf_num > configs_in_chunk) ?
+ configs_in_chunk : nr_configs - conf_num;
+
+ ret = ph->xops->xfer_get_init(ph, PINCTRL_SETTINGS_CONFIGURE,
+ sizeof(*tx) +
+ chunk * 2 * sizeof(__le32), 0, &t);
+ if (ret)
+ break;
+
+ tx = t->tx.buf;
+ tx->identifier = cpu_to_le32(selector);
+ tx->function_id = cpu_to_le32(0xFFFFFFFF);
+ attributes = FIELD_PREP(GENMASK(1, 0), type) |
+ FIELD_PREP(GENMASK(9, 2), chunk);
+ tx->attributes = cpu_to_le32(attributes);
+
+ for (i = 0; i < chunk; i++) {
+ tx->configs[i * 2] =
+ cpu_to_le32(config_type[conf_num + i]);
+ tx->configs[i * 2 + 1] =
+ cpu_to_le32(config_value[conf_num + i]);
+ }
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ if (ret)
+ break;
+
+ conf_num += chunk;
+ }
+
+ return ret;
+}
+
+static int scmi_pinctrl_function_select(const struct scmi_protocol_handle *ph,
+ u32 group,
+ enum scmi_pinctrl_selector_type type,
+ u32 function_id)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_settings_conf *tx;
+ u32 attributes;
+
+ ret = scmi_pinctrl_validate_id(ph, group, type);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->xfer_get_init(ph, PINCTRL_SETTINGS_CONFIGURE,
+ sizeof(*tx), 0, &t);
+ if (ret)
+ return ret;
+
+ tx = t->tx.buf;
+ tx->identifier = cpu_to_le32(group);
+ tx->function_id = cpu_to_le32(function_id);
+ attributes = FIELD_PREP(GENMASK(1, 0), type) | BIT(10);
+ tx->attributes = cpu_to_le32(attributes);
+
+ ret = ph->xops->do_xfer(ph, t);
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_pinctrl_request_free(const struct scmi_protocol_handle *ph,
+ u32 identifier,
+ enum scmi_pinctrl_selector_type type,
+ enum scmi_pinctrl_protocol_cmd cmd)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_request *tx;
+
+ if (type == FUNCTION_TYPE)
+ return -EINVAL;
+
+ if (cmd != PINCTRL_REQUEST && cmd != PINCTRL_RELEASE)
+ return -EINVAL;
+
+ ret = scmi_pinctrl_validate_id(ph, identifier, type);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->xfer_get_init(ph, cmd, sizeof(*tx), 0, &t);
+ if (ret)
+ return ret;
+
+ tx = t->tx.buf;
+ tx->identifier = cpu_to_le32(identifier);
+ tx->flags = cpu_to_le32(type);
+
+ ret = ph->xops->do_xfer(ph, t);
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_pinctrl_pin_request(const struct scmi_protocol_handle *ph,
+ u32 pin)
+{
+ return scmi_pinctrl_request_free(ph, pin, PIN_TYPE, PINCTRL_REQUEST);
+}
+
+static int scmi_pinctrl_pin_free(const struct scmi_protocol_handle *ph, u32 pin)
+{
+ return scmi_pinctrl_request_free(ph, pin, PIN_TYPE, PINCTRL_RELEASE);
+}
+
+static int scmi_pinctrl_get_group_info(const struct scmi_protocol_handle *ph,
+ u32 selector,
+ struct scmi_group_info *group)
+{
+ int ret;
+
+ ret = scmi_pinctrl_attributes(ph, GROUP_TYPE, selector, group->name,
+ &group->nr_pins);
+ if (ret)
+ return ret;
+
+ if (!group->nr_pins) {
+ dev_err(ph->dev, "Group %d has 0 elements", selector);
+ return -ENODATA;
+ }
+
+ group->group_pins = kmalloc_array(group->nr_pins,
+ sizeof(*group->group_pins),
+ GFP_KERNEL);
+ if (!group->group_pins)
+ return -ENOMEM;
+
+ ret = scmi_pinctrl_list_associations(ph, selector, GROUP_TYPE,
+ group->nr_pins, group->group_pins);
+ if (ret) {
+ kfree(group->group_pins);
+ return ret;
+ }
+
+ group->present = true;
+ return 0;
+}
+
+static int scmi_pinctrl_get_group_name(const struct scmi_protocol_handle *ph,
+ u32 selector, const char **name)
+{
+ struct scmi_pinctrl_info *pi = ph->get_priv(ph);
+
+ if (!name)
+ return -EINVAL;
+
+ if (selector >= pi->nr_groups || pi->nr_groups == 0)
+ return -EINVAL;
+
+ if (!pi->groups[selector].present) {
+ int ret;
+
+ ret = scmi_pinctrl_get_group_info(ph, selector,
+ &pi->groups[selector]);
+ if (ret)
+ return ret;
+ }
+
+ *name = pi->groups[selector].name;
+
+ return 0;
+}
+
+static int scmi_pinctrl_group_pins_get(const struct scmi_protocol_handle *ph,
+ u32 selector, const u32 **pins,
+ u32 *nr_pins)
+{
+ struct scmi_pinctrl_info *pi = ph->get_priv(ph);
+
+ if (!pins || !nr_pins)
+ return -EINVAL;
+
+ if (selector >= pi->nr_groups || pi->nr_groups == 0)
+ return -EINVAL;
+
+ if (!pi->groups[selector].present) {
+ int ret;
+
+ ret = scmi_pinctrl_get_group_info(ph, selector,
+ &pi->groups[selector]);
+ if (ret)
+ return ret;
+ }
+
+ *pins = pi->groups[selector].group_pins;
+ *nr_pins = pi->groups[selector].nr_pins;
+
+ return 0;
+}
+
+static int scmi_pinctrl_get_function_info(const struct scmi_protocol_handle *ph,
+ u32 selector,
+ struct scmi_function_info *func)
+{
+ int ret;
+
+ ret = scmi_pinctrl_attributes(ph, FUNCTION_TYPE, selector, func->name,
+ &func->nr_groups);
+ if (ret)
+ return ret;
+
+ if (!func->nr_groups) {
+ dev_err(ph->dev, "Function %d has 0 elements", selector);
+ return -ENODATA;
+ }
+
+ func->groups = kmalloc_array(func->nr_groups, sizeof(*func->groups),
+ GFP_KERNEL);
+ if (!func->groups)
+ return -ENOMEM;
+
+ ret = scmi_pinctrl_list_associations(ph, selector, FUNCTION_TYPE,
+ func->nr_groups, func->groups);
+ if (ret) {
+ kfree(func->groups);
+ return ret;
+ }
+
+ func->present = true;
+ return 0;
+}
+
+static int scmi_pinctrl_get_function_name(const struct scmi_protocol_handle *ph,
+ u32 selector, const char **name)
+{
+ struct scmi_pinctrl_info *pi = ph->get_priv(ph);
+
+ if (!name)
+ return -EINVAL;
+
+ if (selector >= pi->nr_functions || pi->nr_functions == 0)
+ return -EINVAL;
+
+ if (!pi->functions[selector].present) {
+ int ret;
+
+ ret = scmi_pinctrl_get_function_info(ph, selector,
+ &pi->functions[selector]);
+ if (ret)
+ return ret;
+ }
+
+ *name = pi->functions[selector].name;
+ return 0;
+}
+
+static int
+scmi_pinctrl_function_groups_get(const struct scmi_protocol_handle *ph,
+ u32 selector, u32 *nr_groups,
+ const u32 **groups)
+{
+ struct scmi_pinctrl_info *pi = ph->get_priv(ph);
+
+ if (!groups || !nr_groups)
+ return -EINVAL;
+
+ if (selector >= pi->nr_functions || pi->nr_functions == 0)
+ return -EINVAL;
+
+ if (!pi->functions[selector].present) {
+ int ret;
+
+ ret = scmi_pinctrl_get_function_info(ph, selector,
+ &pi->functions[selector]);
+ if (ret)
+ return ret;
+ }
+
+ *groups = pi->functions[selector].groups;
+ *nr_groups = pi->functions[selector].nr_groups;
+
+ return 0;
+}
+
+static int scmi_pinctrl_mux_set(const struct scmi_protocol_handle *ph,
+ u32 selector, u32 group)
+{
+ return scmi_pinctrl_function_select(ph, group, GROUP_TYPE, selector);
+}
+
+static int scmi_pinctrl_get_pin_info(const struct scmi_protocol_handle *ph,
+ u32 selector, struct scmi_pin_info *pin)
+{
+ int ret;
+
+ if (!pin)
+ return -EINVAL;
+
+ ret = scmi_pinctrl_attributes(ph, PIN_TYPE, selector, pin->name, NULL);
+ if (ret)
+ return ret;
+
+ pin->present = true;
+ return 0;
+}
+
+static int scmi_pinctrl_get_pin_name(const struct scmi_protocol_handle *ph,
+ u32 selector, const char **name)
+{
+ struct scmi_pinctrl_info *pi = ph->get_priv(ph);
+
+ if (!name)
+ return -EINVAL;
+
+ if (selector >= pi->nr_pins)
+ return -EINVAL;
+
+ if (!pi->pins[selector].present) {
+ int ret;
+
+ ret = scmi_pinctrl_get_pin_info(ph, selector, &pi->pins[selector]);
+ if (ret)
+ return ret;
+ }
+
+ *name = pi->pins[selector].name;
+
+ return 0;
+}
+
+static int scmi_pinctrl_name_get(const struct scmi_protocol_handle *ph,
+ u32 selector,
+ enum scmi_pinctrl_selector_type type,
+ const char **name)
+{
+ switch (type) {
+ case PIN_TYPE:
+ return scmi_pinctrl_get_pin_name(ph, selector, name);
+ case GROUP_TYPE:
+ return scmi_pinctrl_get_group_name(ph, selector, name);
+ case FUNCTION_TYPE:
+ return scmi_pinctrl_get_function_name(ph, selector, name);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct scmi_pinctrl_proto_ops pinctrl_proto_ops = {
+ .count_get = scmi_pinctrl_count_get,
+ .name_get = scmi_pinctrl_name_get,
+ .group_pins_get = scmi_pinctrl_group_pins_get,
+ .function_groups_get = scmi_pinctrl_function_groups_get,
+ .mux_set = scmi_pinctrl_mux_set,
+ .settings_get_one = scmi_pinctrl_settings_get_one,
+ .settings_get_all = scmi_pinctrl_settings_get_all,
+ .settings_conf = scmi_pinctrl_settings_conf,
+ .pin_request = scmi_pinctrl_pin_request,
+ .pin_free = scmi_pinctrl_pin_free,
+};
+
+static int scmi_pinctrl_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ int ret;
+ u32 version;
+ struct scmi_pinctrl_info *pinfo;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_dbg(ph->dev, "Pinctrl Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ ret = scmi_pinctrl_attributes_get(ph, pinfo);
+ if (ret)
+ return ret;
+
+ pinfo->pins = devm_kcalloc(ph->dev, pinfo->nr_pins,
+ sizeof(*pinfo->pins), GFP_KERNEL);
+ if (!pinfo->pins)
+ return -ENOMEM;
+
+ pinfo->groups = devm_kcalloc(ph->dev, pinfo->nr_groups,
+ sizeof(*pinfo->groups), GFP_KERNEL);
+ if (!pinfo->groups)
+ return -ENOMEM;
+
+ pinfo->functions = devm_kcalloc(ph->dev, pinfo->nr_functions,
+ sizeof(*pinfo->functions), GFP_KERNEL);
+ if (!pinfo->functions)
+ return -ENOMEM;
+
+ pinfo->version = version;
+
+ return ph->set_priv(ph, pinfo, version);
+}
+
+static int scmi_pinctrl_protocol_deinit(const struct scmi_protocol_handle *ph)
+{
+ int i;
+ struct scmi_pinctrl_info *pi = ph->get_priv(ph);
+
+ /* Free groups_pins allocated in scmi_pinctrl_get_group_info */
+ for (i = 0; i < pi->nr_groups; i++) {
+ if (pi->groups[i].present) {
+ kfree(pi->groups[i].group_pins);
+ pi->groups[i].present = false;
+ }
+ }
+
+ /* Free groups allocated in scmi_pinctrl_get_function_info */
+ for (i = 0; i < pi->nr_functions; i++) {
+ if (pi->functions[i].present) {
+ kfree(pi->functions[i].groups);
+ pi->functions[i].present = false;
+ }
+ }
+
+ return 0;
+}
+
+static const struct scmi_protocol scmi_pinctrl = {
+ .id = SCMI_PROTOCOL_PINCTRL,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_pinctrl_protocol_init,
+ .instance_deinit = &scmi_pinctrl_protocol_deinit,
+ .ops = &pinctrl_proto_ops,
+ .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
+};
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(pinctrl, scmi_pinctrl)
diff --git a/drivers/firmware/arm_scmi/powercap.c b/drivers/firmware/arm_scmi/powercap.c
index ea9201e7044c..1fa79bba492e 100644
--- a/drivers/firmware/arm_scmi/powercap.c
+++ b/drivers/firmware/arm_scmi/powercap.c
@@ -736,7 +736,7 @@ static void scmi_powercap_domain_init_fc(const struct scmi_protocol_handle *ph,
ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
POWERCAP_PAI_GET, 4, domain,
&fc[POWERCAP_FC_PAI].get_addr, NULL,
- &fc[POWERCAP_PAI_GET].rate_limit);
+ &fc[POWERCAP_FC_PAI].rate_limit);
*p_fc = fc;
}
diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h
index 317d3fb32676..8e95f53bd7b7 100644
--- a/drivers/firmware/arm_scmi/protocols.h
+++ b/drivers/firmware/arm_scmi/protocols.h
@@ -29,6 +29,8 @@
#define PROTOCOL_REV_MAJOR(x) ((u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x))))
#define PROTOCOL_REV_MINOR(x) ((u16)(FIELD_GET(PROTOCOL_REV_MINOR_MASK, (x))))
+#define SCMI_PROTOCOL_VENDOR_BASE 0x80
+
enum scmi_common_cmd {
PROTOCOL_VERSION = 0x0,
PROTOCOL_ATTRIBUTES = 0x1,
@@ -258,6 +260,7 @@ struct scmi_fc_info {
* @fastchannel_init: A common helper used to initialize FC descriptors by
* gathering FC descriptions from the SCMI platform server.
* @fastchannel_db_ring: A common helper to ring a FC doorbell.
+ * @get_max_msg_size: A common helper to get the maximum message size.
*/
struct scmi_proto_helpers_ops {
int (*extended_name_get)(const struct scmi_protocol_handle *ph,
@@ -277,6 +280,7 @@ struct scmi_proto_helpers_ops {
struct scmi_fc_db_info **p_db,
u32 *rate_limit);
void (*fastchannel_db_ring)(struct scmi_fc_db_info *db);
+ int (*get_max_msg_size)(const struct scmi_protocol_handle *ph);
};
/**
@@ -323,6 +327,16 @@ typedef int (*scmi_prot_init_ph_fn_t)(const struct scmi_protocol_handle *);
* protocol by the agent. Each protocol implementation
* in the agent is supposed to downgrade to match the
* protocol version supported by the platform.
+ * @vendor_id: A firmware vendor string for vendor protocols matching.
+ * Ignored when @id identifies a standard protocol, cannot be NULL
+ * otherwise.
+ * @sub_vendor_id: A firmware sub_vendor string for vendor protocols matching.
+ * Ignored if NULL or when @id identifies a standard protocol.
+ * @impl_ver: A firmware implementation version for vendor protocols matching.
+ * Ignored if zero or if @id identifies a standard protocol.
+ *
+ * Note that vendor protocols matching at load time is performed by attempting
+ * the closest match first against the tuple (vendor, sub_vendor, impl_ver)
*/
struct scmi_protocol {
const u8 id;
@@ -332,6 +346,9 @@ struct scmi_protocol {
const void *ops;
const struct scmi_protocol_events *events;
unsigned int supported_version;
+ char *vendor_id;
+ char *sub_vendor_id;
+ u32 impl_ver;
};
#define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(name, proto) \
@@ -353,6 +370,7 @@ void __exit scmi_##name##_unregister(void) \
DECLARE_SCMI_REGISTER_UNREGISTER(base);
DECLARE_SCMI_REGISTER_UNREGISTER(clock);
DECLARE_SCMI_REGISTER_UNREGISTER(perf);
+DECLARE_SCMI_REGISTER_UNREGISTER(pinctrl);
DECLARE_SCMI_REGISTER_UNREGISTER(power);
DECLARE_SCMI_REGISTER_UNREGISTER(reset);
DECLARE_SCMI_REGISTER_UNREGISTER(sensors);
diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
index 350573518503..130d13e9cd6b 100644
--- a/drivers/firmware/arm_scmi/raw_mode.c
+++ b/drivers/firmware/arm_scmi/raw_mode.c
@@ -921,7 +921,7 @@ static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
rd->raw = raw;
filp->private_data = rd;
- return 0;
+ return nonseekable_open(inode, filp);
}
static int scmi_dbg_raw_mode_release(struct inode *inode, struct file *filp)
@@ -950,6 +950,7 @@ static const struct file_operations scmi_dbg_raw_mode_reset_fops = {
.open = scmi_dbg_raw_mode_open,
.release = scmi_dbg_raw_mode_release,
.write = scmi_dbg_raw_mode_reset_write,
+ .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -959,6 +960,7 @@ static const struct file_operations scmi_dbg_raw_mode_message_fops = {
.read = scmi_dbg_raw_mode_message_read,
.write = scmi_dbg_raw_mode_message_write,
.poll = scmi_dbg_raw_mode_message_poll,
+ .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -975,6 +977,7 @@ static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
.read = scmi_dbg_raw_mode_message_read,
.write = scmi_dbg_raw_mode_message_async_write,
.poll = scmi_dbg_raw_mode_message_poll,
+ .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -998,6 +1001,7 @@ static const struct file_operations scmi_dbg_raw_mode_notification_fops = {
.release = scmi_dbg_raw_mode_release,
.read = scmi_test_dbg_raw_mode_notif_read,
.poll = scmi_test_dbg_raw_mode_notif_poll,
+ .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -1021,6 +1025,7 @@ static const struct file_operations scmi_dbg_raw_mode_errors_fops = {
.release = scmi_dbg_raw_mode_release,
.read = scmi_test_dbg_raw_mode_errors_read,
.poll = scmi_test_dbg_raw_mode_errors_poll,
+ .llseek = no_llseek,
.owner = THIS_MODULE,
};
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index 9f3d665cfdcf..0d139e4de37c 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -819,6 +819,33 @@ int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl,
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_write_ctrl, FW_CS_DSP);
+/**
+ * cs_dsp_coeff_lock_and_write_ctrl() - Writes the given buffer to the given coefficient control
+ * @ctl: pointer to coefficient control
+ * @off: word offset at which data should be written
+ * @buf: the buffer to write to the given control
+ * @len: the length of the buffer in bytes
+ *
+ * Same as cs_dsp_coeff_write_ctrl() but takes pwr_lock.
+ *
+ * Return: A negative number on error, 1 when the control value changed and 0 when it has not.
+ */
+int cs_dsp_coeff_lock_and_write_ctrl(struct cs_dsp_coeff_ctl *ctl,
+ unsigned int off, const void *buf, size_t len)
+{
+ struct cs_dsp *dsp = ctl->dsp;
+ int ret;
+
+ lockdep_assert_not_held(&dsp->pwr_lock);
+
+ mutex_lock(&dsp->pwr_lock);
+ ret = cs_dsp_coeff_write_ctrl(ctl, off, buf, len);
+ mutex_unlock(&dsp->pwr_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_coeff_lock_and_write_ctrl);
+
static int cs_dsp_coeff_read_ctrl_raw(struct cs_dsp_coeff_ctl *ctl,
unsigned int off, void *buf, size_t len)
{
@@ -891,6 +918,33 @@ int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl,
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_read_ctrl, FW_CS_DSP);
+/**
+ * cs_dsp_coeff_lock_and_read_ctrl() - Reads the given coefficient control into the given buffer
+ * @ctl: pointer to coefficient control
+ * @off: word offset at which data should be read
+ * @buf: the buffer to store to the given control
+ * @len: the length of the buffer in bytes
+ *
+ * Same as cs_dsp_coeff_read_ctrl() but takes pwr_lock.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_coeff_lock_and_read_ctrl(struct cs_dsp_coeff_ctl *ctl,
+ unsigned int off, void *buf, size_t len)
+{
+ struct cs_dsp *dsp = ctl->dsp;
+ int ret;
+
+ lockdep_assert_not_held(&dsp->pwr_lock);
+
+ mutex_lock(&dsp->pwr_lock);
+ ret = cs_dsp_coeff_read_ctrl(ctl, off, buf, len);
+ mutex_unlock(&dsp->pwr_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_coeff_lock_and_read_ctrl);
+
static int cs_dsp_coeff_init_control_caches(struct cs_dsp *dsp)
{
struct cs_dsp_coeff_ctl *ctl;
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
index 5f3a3e913d28..d19c78a78ae3 100644
--- a/drivers/firmware/dmi-id.c
+++ b/drivers/firmware/dmi-id.c
@@ -169,9 +169,14 @@ static int dmi_dev_uevent(const struct device *dev, struct kobj_uevent_env *env)
return 0;
}
+static void dmi_dev_release(struct device *dev)
+{
+ kfree(dev);
+}
+
static struct class dmi_class = {
.name = "dmi",
- .dev_release = (void(*)(struct device *)) kfree,
+ .dev_release = dmi_dev_release,
.dev_uevent = dmi_dev_uevent,
};
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 015c95a825d3..68231c638c55 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -42,6 +42,7 @@ static struct dmi_memdev_info {
u8 type; /* DDR2, DDR3, DDR4 etc */
} *dmi_memdev;
static int dmi_memdev_nr;
+static int dmi_memdev_populated_nr __initdata;
static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
{
@@ -102,6 +103,17 @@ static void dmi_decode_table(u8 *buf,
const struct dmi_header *dm = (const struct dmi_header *)data;
/*
+ * If a short entry is found (less than 4 bytes), not only it
+ * is invalid, but we cannot reliably locate the next entry.
+ */
+ if (dm->length < sizeof(struct dmi_header)) {
+ pr_warn(FW_BUG
+ "Corrupted DMI table, offset %zd (only %d entries processed)\n",
+ data - buf, i);
+ break;
+ }
+
+ /*
* We want to know the total length (formatted area and
* strings) before decoding to make sure we won't run off the
* table in dmi_decode or dmi_string
@@ -448,6 +460,9 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v)
else
bytes = (u64)get_unaligned((u32 *)&d[0x1C]) << 20;
+ if (bytes)
+ dmi_memdev_populated_nr++;
+
dmi_memdev[nr].size = bytes;
nr++;
}
@@ -824,6 +839,8 @@ void __init dmi_setup(void)
return;
dmi_memdev_walk();
+ pr_info("DMI: Memory slots populated: %d/%d\n",
+ dmi_memdev_populated_nr, dmi_memdev_nr);
dump_stack_set_arch_desc("%s", dmi_ids_string);
}
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 833cbb995dd3..5b9dc26e6bcb 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -162,7 +162,15 @@ static ssize_t efi_pstore_read(struct pstore_record *record)
efi_status_t status;
for (;;) {
- varname_size = 1024;
+ /*
+ * A small set of old UEFI implementations reject sizes
+ * above a certain threshold, the lowest seen in the wild
+ * is 512.
+ *
+ * TODO: Commonize with the iteration implementation in
+ * fs/efivarfs to keep all the quirks in one place.
+ */
+ varname_size = 512;
/*
* If this is the first read() call in the pstore enumeration,
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 31eb1e287ce1..06f0428a723c 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -56,17 +56,6 @@ KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_CFI), $(KBUILD_CFLAGS))
# disable LTO
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO), $(KBUILD_CFLAGS))
-GCOV_PROFILE := n
-# Sanitizer runtimes are unavailable and cannot be linked here.
-KASAN_SANITIZE := n
-KCSAN_SANITIZE := n
-KMSAN_SANITIZE := n
-UBSAN_SANITIZE := n
-OBJECT_FILES_NON_STANDARD := y
-
-# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
-KCOV_INSTRUMENT := n
-
lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \
file.o mem.o random.o randomalloc.o pci.o \
skip_spaces.o lib-cmdline.o lib-ctype.o \
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 70e9789ff9de..6a337f1f8787 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -335,8 +335,8 @@ fail_free_new_fdt:
fail:
efi_free(fdt_size, fdt_addr);
-
- efi_bs_call(free_pool, priv.runtime_map);
+ if (!efi_novamap)
+ efi_bs_call(free_pool, priv.runtime_map);
return EFI_LOAD_ERROR;
}
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index 7e1852859550..c41e7b2091cd 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -120,7 +120,7 @@ efi_status_t efi_random_alloc(unsigned long size,
continue;
}
- target = round_up(max(md->phys_addr, alloc_min), align) + target_slot * align;
+ target = round_up(max_t(u64, md->phys_addr, alloc_min), align) + target_slot * align;
pages = size / EFI_PAGE_SIZE;
status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 6a6ffc6707bd..1983fd3bf392 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -496,6 +496,7 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
hdr->vid_mode = 0xffff;
hdr->type_of_loader = 0x21;
+ hdr->initrd_addr_max = INT_MAX;
/* Convert unicode cmdline to ascii */
cmdline_ptr = efi_convert_cmdline(image, &options_size);
@@ -775,6 +776,26 @@ static void error(char *str)
efi_warn("Decompression failed: %s\n", str);
}
+static const char *cmdline_memmap_override;
+
+static efi_status_t parse_options(const char *cmdline)
+{
+ static const char opts[][14] = {
+ "mem=", "memmap=", "efi_fake_mem=", "hugepages="
+ };
+
+ for (int i = 0; i < ARRAY_SIZE(opts); i++) {
+ const char *p = strstr(cmdline, opts[i]);
+
+ if (p == cmdline || (p > cmdline && isspace(p[-1]))) {
+ cmdline_memmap_override = opts[i];
+ break;
+ }
+ }
+
+ return efi_parse_options(cmdline);
+}
+
static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
{
unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
@@ -806,6 +827,10 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
!memcmp(efistub_fw_vendor(), ami, sizeof(ami))) {
efi_debug("AMI firmware v2.0 or older detected - disabling physical KASLR\n");
seed[0] = 0;
+ } else if (cmdline_memmap_override) {
+ efi_info("%s detected on the kernel command line - disabling physical KASLR\n",
+ cmdline_memmap_override);
+ seed[0] = 0;
}
boot_params_ptr->hdr.loadflags |= KASLR_FLAG;
@@ -882,7 +907,7 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
}
#ifdef CONFIG_CMDLINE_BOOL
- status = efi_parse_options(CONFIG_CMDLINE);
+ status = parse_options(CONFIG_CMDLINE);
if (status != EFI_SUCCESS) {
efi_err("Failed to parse options\n");
goto fail;
@@ -891,7 +916,7 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
if (!IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) {
unsigned long cmdline_paddr = ((u64)hdr->cmd_line_ptr |
((u64)boot_params->ext_cmd_line_ptr << 32));
- status = efi_parse_options((char *)cmdline_paddr);
+ status = parse_options((char *)cmdline_paddr);
if (status != EFI_SUCCESS) {
efi_err("Failed to parse options\n");
goto fail;
diff --git a/drivers/firmware/efi/unaccepted_memory.c b/drivers/firmware/efi/unaccepted_memory.c
index 5b439d04079c..50f6503fe49f 100644
--- a/drivers/firmware/efi/unaccepted_memory.c
+++ b/drivers/firmware/efi/unaccepted_memory.c
@@ -4,6 +4,7 @@
#include <linux/memblock.h>
#include <linux/spinlock.h>
#include <linux/crash_dump.h>
+#include <linux/nmi.h>
#include <asm/unaccepted_memory.h>
/* Protects unaccepted memory bitmap and accepting_list */
@@ -149,6 +150,9 @@ retry:
}
list_del(&range.list);
+
+ touch_softlockup_watchdog();
+
spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
}
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index f654e6f6af87..4056ba7f3440 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -215,7 +215,7 @@ efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor,
if (data_size > 0) {
status = check_var_size(nonblocking, attr,
- data_size + ucs2_strsize(name, 1024));
+ data_size + ucs2_strsize(name, EFI_VAR_NAME_LEN));
if (status != EFI_SUCCESS)
return status;
}
diff --git a/drivers/firmware/google/cbmem.c b/drivers/firmware/google/cbmem.c
index c2bffdc352a3..6f810d720f4d 100644
--- a/drivers/firmware/google/cbmem.c
+++ b/drivers/firmware/google/cbmem.c
@@ -124,7 +124,6 @@ static struct coreboot_driver cbmem_entry_driver = {
.probe = cbmem_entry_probe,
.drv = {
.name = "cbmem",
- .owner = THIS_MODULE,
.dev_groups = dev_groups,
},
.id_table = cbmem_ids,
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
index d4b6e581a6c6..fa7752f6e89b 100644
--- a/drivers/firmware/google/coreboot_table.c
+++ b/drivers/firmware/google/coreboot_table.c
@@ -85,13 +85,15 @@ static void coreboot_device_release(struct device *dev)
kfree(device);
}
-int coreboot_driver_register(struct coreboot_driver *driver)
+int __coreboot_driver_register(struct coreboot_driver *driver,
+ struct module *owner)
{
driver->drv.bus = &coreboot_bus_type;
+ driver->drv.owner = owner;
return driver_register(&driver->drv);
}
-EXPORT_SYMBOL(coreboot_driver_register);
+EXPORT_SYMBOL(__coreboot_driver_register);
void coreboot_driver_unregister(struct coreboot_driver *driver)
{
diff --git a/drivers/firmware/google/coreboot_table.h b/drivers/firmware/google/coreboot_table.h
index 86427989c57f..bb6f0f7299b4 100644
--- a/drivers/firmware/google/coreboot_table.h
+++ b/drivers/firmware/google/coreboot_table.h
@@ -97,8 +97,12 @@ struct coreboot_driver {
const struct coreboot_device_id *id_table;
};
+/* use a macro to avoid include chaining to get THIS_MODULE */
+#define coreboot_driver_register(driver) \
+ __coreboot_driver_register(driver, THIS_MODULE)
/* Register a driver that uses the data from a coreboot table. */
-int coreboot_driver_register(struct coreboot_driver *driver);
+int __coreboot_driver_register(struct coreboot_driver *driver,
+ struct module *owner);
/* Unregister a driver that uses the data from a coreboot table. */
void coreboot_driver_unregister(struct coreboot_driver *driver);
diff --git a/drivers/firmware/microchip/mpfs-auto-update.c b/drivers/firmware/microchip/mpfs-auto-update.c
index fbeeaee4ac85..835a19a7a3a0 100644
--- a/drivers/firmware/microchip/mpfs-auto-update.c
+++ b/drivers/firmware/microchip/mpfs-auto-update.c
@@ -206,10 +206,12 @@ static int mpfs_auto_update_verify_image(struct fw_upload *fw_uploader)
if (ret | response->resp_status) {
dev_warn(priv->dev, "Verification of Upgrade Image failed!\n");
ret = ret ? ret : -EBADMSG;
+ goto free_message;
}
dev_info(priv->dev, "Verification of Upgrade Image passed!\n");
+free_message:
devm_kfree(priv->dev, message);
free_response:
devm_kfree(priv->dev, response);
@@ -265,7 +267,7 @@ static int mpfs_auto_update_set_image_address(struct mpfs_auto_update_priv *priv
AUTO_UPDATE_DIRECTORY_WIDTH);
memset(buffer + AUTO_UPDATE_BLANK_DIRECTORY, 0x0, AUTO_UPDATE_DIRECTORY_WIDTH);
- dev_info(priv->dev, "Writing the image address (%x) to the flash directory (%llx)\n",
+ dev_info(priv->dev, "Writing the image address (0x%x) to the flash directory (0x%llx)\n",
image_address, directory_address);
ret = mtd_write(priv->flash, 0x0, erase_size, &bytes_written, (u_char *)buffer);
@@ -313,7 +315,7 @@ static int mpfs_auto_update_write_bitstream(struct fw_upload *fw_uploader, const
erase.len = round_up(size, (size_t)priv->flash->erasesize);
erase.addr = image_address;
- dev_info(priv->dev, "Erasing the flash at address (%x)\n", image_address);
+ dev_info(priv->dev, "Erasing the flash at address (0x%x)\n", image_address);
ret = mtd_erase(priv->flash, &erase);
if (ret)
goto out;
@@ -323,7 +325,7 @@ static int mpfs_auto_update_write_bitstream(struct fw_upload *fw_uploader, const
* will do all of that itself - including verifying that the bitstream
* is valid.
*/
- dev_info(priv->dev, "Writing the image to the flash at address (%x)\n", image_address);
+ dev_info(priv->dev, "Writing the image to the flash at address (0x%x)\n", image_address);
ret = mtd_write(priv->flash, (loff_t)image_address, size, &bytes_written, data);
if (ret)
goto out;
diff --git a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
index 32188f098ef3..bc550ad0dbe0 100644
--- a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
+++ b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
@@ -221,6 +221,19 @@ struct qsee_rsp_uefi_query_variable_info {
* alignment of 8 bytes (64 bits) for GUIDs. Our definition of efi_guid_t,
* however, has an alignment of 4 byte (32 bits). So far, this seems to work
* fine here. See also the comment on the typedef of efi_guid_t.
+ *
+ * Note: It looks like uefisecapp is quite picky about how the memory passed to
+ * it is structured and aligned. In particular the request/response setup used
+ * for QSEE_CMD_UEFI_GET_VARIABLE. While qcom_qseecom_app_send(), in theory,
+ * accepts separate buffers/addresses for the request and response parts, in
+ * practice, however, it seems to expect them to be both part of a larger
+ * contiguous block. We initially allocated separate buffers for the request
+ * and response but this caused the QSEE_CMD_UEFI_GET_VARIABLE command to
+ * either not write any response to the response buffer or outright crash the
+ * device. Therefore, we now allocate a single contiguous block of DMA memory
+ * for both and properly align the data using the macros below. In particular,
+ * request and response structs are aligned at 8 byte (via __reqdata_offs()),
+ * following the driver that this has been reverse-engineered from.
*/
#define qcuefi_buf_align_fields(fields...) \
({ \
@@ -244,6 +257,12 @@ struct qsee_rsp_uefi_query_variable_info {
#define __array_offs(type, count, offset) \
__field_impl(sizeof(type) * (count), __alignof__(type), offset)
+#define __array_offs_aligned(type, count, align, offset) \
+ __field_impl(sizeof(type) * (count), align, offset)
+
+#define __reqdata_offs(size, offset) \
+ __array_offs_aligned(u8, size, 8, offset)
+
#define __array(type, count) __array_offs(type, count, NULL)
#define __field_offs(type, offset) __array_offs(type, 1, offset)
#define __field(type) __array_offs(type, 1, NULL)
@@ -277,10 +296,15 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
unsigned long buffer_size = *data_size;
efi_status_t efi_status = EFI_SUCCESS;
unsigned long name_length;
+ dma_addr_t cmd_buf_dma;
+ size_t cmd_buf_size;
+ void *cmd_buf;
size_t guid_offs;
size_t name_offs;
size_t req_size;
size_t rsp_size;
+ size_t req_offs;
+ size_t rsp_offs;
ssize_t status;
if (!name || !guid)
@@ -304,17 +328,19 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
__array(u8, buffer_size)
);
- req_data = kzalloc(req_size, GFP_KERNEL);
- if (!req_data) {
+ cmd_buf_size = qcuefi_buf_align_fields(
+ __reqdata_offs(req_size, &req_offs)
+ __reqdata_offs(rsp_size, &rsp_offs)
+ );
+
+ cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
+ if (!cmd_buf) {
efi_status = EFI_OUT_OF_RESOURCES;
goto out;
}
- rsp_data = kzalloc(rsp_size, GFP_KERNEL);
- if (!rsp_data) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out_free_req;
- }
+ req_data = cmd_buf + req_offs;
+ rsp_data = cmd_buf + rsp_offs;
req_data->command_id = QSEE_CMD_UEFI_GET_VARIABLE;
req_data->data_size = buffer_size;
@@ -332,7 +358,9 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size);
- status = qcom_qseecom_app_send(qcuefi->client, req_data, req_size, rsp_data, rsp_size);
+ status = qcom_qseecom_app_send(qcuefi->client,
+ cmd_buf_dma + req_offs, req_size,
+ cmd_buf_dma + rsp_offs, rsp_size);
if (status) {
efi_status = EFI_DEVICE_ERROR;
goto out_free;
@@ -407,9 +435,7 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
memcpy(data, ((void *)rsp_data) + rsp_data->data_offset, rsp_data->data_size);
out_free:
- kfree(rsp_data);
-out_free_req:
- kfree(req_data);
+ qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
out:
return efi_status;
}
@@ -422,10 +448,15 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
struct qsee_rsp_uefi_set_variable *rsp_data;
efi_status_t efi_status = EFI_SUCCESS;
unsigned long name_length;
+ dma_addr_t cmd_buf_dma;
+ size_t cmd_buf_size;
+ void *cmd_buf;
size_t name_offs;
size_t guid_offs;
size_t data_offs;
size_t req_size;
+ size_t req_offs;
+ size_t rsp_offs;
ssize_t status;
if (!name || !guid)
@@ -450,17 +481,19 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
__array_offs(u8, data_size, &data_offs)
);
- req_data = kzalloc(req_size, GFP_KERNEL);
- if (!req_data) {
+ cmd_buf_size = qcuefi_buf_align_fields(
+ __reqdata_offs(req_size, &req_offs)
+ __reqdata_offs(sizeof(*rsp_data), &rsp_offs)
+ );
+
+ cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
+ if (!cmd_buf) {
efi_status = EFI_OUT_OF_RESOURCES;
goto out;
}
- rsp_data = kzalloc(sizeof(*rsp_data), GFP_KERNEL);
- if (!rsp_data) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out_free_req;
- }
+ req_data = cmd_buf + req_offs;
+ rsp_data = cmd_buf + rsp_offs;
req_data->command_id = QSEE_CMD_UEFI_SET_VARIABLE;
req_data->attributes = attributes;
@@ -483,8 +516,9 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
if (data_size)
memcpy(((void *)req_data) + req_data->data_offset, data, req_data->data_size);
- status = qcom_qseecom_app_send(qcuefi->client, req_data, req_size, rsp_data,
- sizeof(*rsp_data));
+ status = qcom_qseecom_app_send(qcuefi->client,
+ cmd_buf_dma + req_offs, req_size,
+ cmd_buf_dma + rsp_offs, sizeof(*rsp_data));
if (status) {
efi_status = EFI_DEVICE_ERROR;
goto out_free;
@@ -507,9 +541,7 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
}
out_free:
- kfree(rsp_data);
-out_free_req:
- kfree(req_data);
+ qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
out:
return efi_status;
}
@@ -521,10 +553,15 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
struct qsee_req_uefi_get_next_variable *req_data;
struct qsee_rsp_uefi_get_next_variable *rsp_data;
efi_status_t efi_status = EFI_SUCCESS;
+ dma_addr_t cmd_buf_dma;
+ size_t cmd_buf_size;
+ void *cmd_buf;
size_t guid_offs;
size_t name_offs;
size_t req_size;
size_t rsp_size;
+ size_t req_offs;
+ size_t rsp_offs;
ssize_t status;
if (!name_size || !name || !guid)
@@ -545,17 +582,19 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
__array(*name, *name_size / sizeof(*name))
);
- req_data = kzalloc(req_size, GFP_KERNEL);
- if (!req_data) {
+ cmd_buf_size = qcuefi_buf_align_fields(
+ __reqdata_offs(req_size, &req_offs)
+ __reqdata_offs(rsp_size, &rsp_offs)
+ );
+
+ cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
+ if (!cmd_buf) {
efi_status = EFI_OUT_OF_RESOURCES;
goto out;
}
- rsp_data = kzalloc(rsp_size, GFP_KERNEL);
- if (!rsp_data) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out_free_req;
- }
+ req_data = cmd_buf + req_offs;
+ rsp_data = cmd_buf + rsp_offs;
req_data->command_id = QSEE_CMD_UEFI_GET_NEXT_VARIABLE;
req_data->guid_offset = guid_offs;
@@ -572,7 +611,9 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
goto out_free;
}
- status = qcom_qseecom_app_send(qcuefi->client, req_data, req_size, rsp_data, rsp_size);
+ status = qcom_qseecom_app_send(qcuefi->client,
+ cmd_buf_dma + req_offs, req_size,
+ cmd_buf_dma + rsp_offs, rsp_size);
if (status) {
efi_status = EFI_DEVICE_ERROR;
goto out_free;
@@ -645,9 +686,7 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
}
out_free:
- kfree(rsp_data);
-out_free_req:
- kfree(req_data);
+ qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
out:
return efi_status;
}
@@ -659,26 +698,34 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi,
struct qsee_req_uefi_query_variable_info *req_data;
struct qsee_rsp_uefi_query_variable_info *rsp_data;
efi_status_t efi_status = EFI_SUCCESS;
+ dma_addr_t cmd_buf_dma;
+ size_t cmd_buf_size;
+ void *cmd_buf;
+ size_t req_offs;
+ size_t rsp_offs;
int status;
- req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
- if (!req_data) {
+ cmd_buf_size = qcuefi_buf_align_fields(
+ __reqdata_offs(sizeof(*req_data), &req_offs)
+ __reqdata_offs(sizeof(*rsp_data), &rsp_offs)
+ );
+
+ cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
+ if (!cmd_buf) {
efi_status = EFI_OUT_OF_RESOURCES;
goto out;
}
- rsp_data = kzalloc(sizeof(*rsp_data), GFP_KERNEL);
- if (!rsp_data) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out_free_req;
- }
+ req_data = cmd_buf + req_offs;
+ rsp_data = cmd_buf + rsp_offs;
req_data->command_id = QSEE_CMD_UEFI_QUERY_VARIABLE_INFO;
req_data->attributes = attr;
req_data->length = sizeof(*req_data);
- status = qcom_qseecom_app_send(qcuefi->client, req_data, sizeof(*req_data), rsp_data,
- sizeof(*rsp_data));
+ status = qcom_qseecom_app_send(qcuefi->client,
+ cmd_buf_dma + req_offs, sizeof(*req_data),
+ cmd_buf_dma + rsp_offs, sizeof(*rsp_data));
if (status) {
efi_status = EFI_DEVICE_ERROR;
goto out_free;
@@ -711,9 +758,7 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi,
*max_variable_size = rsp_data->max_variable_size;
out_free:
- kfree(rsp_data);
-out_free_req:
- kfree(req_data);
+ qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
out:
return efi_status;
}
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index 520de9b5633a..68f4df7e6c3c 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -4,6 +4,8 @@
*/
#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/cpumask.h>
@@ -114,6 +116,10 @@ static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
#define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0)
#define QCOM_SMC_WAITQ_FLAG_WAKE_ALL BIT(1)
+#define QCOM_DLOAD_MASK GENMASK(5, 4)
+#define QCOM_DLOAD_NODUMP 0
+#define QCOM_DLOAD_FULLDUMP 1
+
static const char * const qcom_scm_convention_names[] = {
[SMC_CONVENTION_UNKNOWN] = "unknown",
[SMC_CONVENTION_ARM_32] = "smc arm 32",
@@ -163,9 +169,6 @@ static int qcom_scm_bw_enable(void)
if (!__scm->path)
return 0;
- if (IS_ERR(__scm->path))
- return -EINVAL;
-
mutex_lock(&__scm->scm_bw_lock);
if (!__scm->scm_vote_count) {
ret = icc_set_bw(__scm->path, 0, UINT_MAX);
@@ -183,7 +186,7 @@ err_bw:
static void qcom_scm_bw_disable(void)
{
- if (IS_ERR_OR_NULL(__scm->path))
+ if (!__scm->path)
return;
mutex_lock(&__scm->scm_bw_lock);
@@ -496,19 +499,32 @@ static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
}
+static int qcom_scm_io_rmw(phys_addr_t addr, unsigned int mask, unsigned int val)
+{
+ unsigned int old;
+ unsigned int new;
+ int ret;
+
+ ret = qcom_scm_io_readl(addr, &old);
+ if (ret)
+ return ret;
+
+ new = (old & ~mask) | (val & mask);
+
+ return qcom_scm_io_writel(addr, new);
+}
+
static void qcom_scm_set_download_mode(bool enable)
{
- bool avail;
+ u32 val = enable ? QCOM_DLOAD_FULLDUMP : QCOM_DLOAD_NODUMP;
int ret = 0;
- avail = __qcom_scm_is_call_available(__scm->dev,
- QCOM_SCM_SVC_BOOT,
- QCOM_SCM_BOOT_SET_DLOAD_MODE);
- if (avail) {
+ if (__scm->dload_mode_addr) {
+ ret = qcom_scm_io_rmw(__scm->dload_mode_addr, QCOM_DLOAD_MASK,
+ FIELD_PREP(QCOM_DLOAD_MASK, val));
+ } else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT,
+ QCOM_SCM_BOOT_SET_DLOAD_MODE)) {
ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
- } else if (__scm->dload_mode_addr) {
- ret = qcom_scm_io_writel(__scm->dload_mode_addr,
- enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
} else {
dev_err(__scm->dev,
"No available mechanism for setting download mode\n");
@@ -557,10 +573,9 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
*/
mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
GFP_KERNEL);
- if (!mdata_buf) {
- dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
+ if (!mdata_buf)
return -ENOMEM;
- }
+
memcpy(mdata_buf, metadata, size);
ret = qcom_scm_clk_enable();
@@ -569,13 +584,14 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
ret = qcom_scm_bw_enable();
if (ret)
- return ret;
+ goto disable_clk;
desc.args[1] = mdata_phys;
ret = qcom_scm_call(__scm->dev, &desc, &res);
-
qcom_scm_bw_disable();
+
+disable_clk:
qcom_scm_clk_disable();
out:
@@ -637,10 +653,12 @@ int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
ret = qcom_scm_bw_enable();
if (ret)
- return ret;
+ goto disable_clk;
ret = qcom_scm_call(__scm->dev, &desc, &res);
qcom_scm_bw_disable();
+
+disable_clk:
qcom_scm_clk_disable();
return ret ? : res.result[0];
@@ -672,10 +690,12 @@ int qcom_scm_pas_auth_and_reset(u32 peripheral)
ret = qcom_scm_bw_enable();
if (ret)
- return ret;
+ goto disable_clk;
ret = qcom_scm_call(__scm->dev, &desc, &res);
qcom_scm_bw_disable();
+
+disable_clk:
qcom_scm_clk_disable();
return ret ? : res.result[0];
@@ -706,11 +726,12 @@ int qcom_scm_pas_shutdown(u32 peripheral)
ret = qcom_scm_bw_enable();
if (ret)
- return ret;
+ goto disable_clk;
ret = qcom_scm_call(__scm->dev, &desc, &res);
-
qcom_scm_bw_disable();
+
+disable_clk:
qcom_scm_clk_disable();
return ret ? : res.result[0];
@@ -1576,9 +1597,9 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
/**
* qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app.
* @app_id: The ID of the target app.
- * @req: Request buffer sent to the app (must be DMA-mappable).
+ * @req: DMA address of the request buffer sent to the app.
* @req_size: Size of the request buffer.
- * @rsp: Response buffer, written to by the app (must be DMA-mappable).
+ * @rsp: DMA address of the response buffer, written to by the app.
* @rsp_size: Size of the response buffer.
*
* Sends a request to the QSEE app associated with the given ID and read back
@@ -1589,33 +1610,13 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
*
* Return: Zero on success, nonzero on failure.
*/
-int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp,
- size_t rsp_size)
+int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size,
+ dma_addr_t rsp, size_t rsp_size)
{
struct qcom_scm_qseecom_resp res = {};
struct qcom_scm_desc desc = {};
- dma_addr_t req_phys;
- dma_addr_t rsp_phys;
int status;
- /* Map request buffer */
- req_phys = dma_map_single(__scm->dev, req, req_size, DMA_TO_DEVICE);
- status = dma_mapping_error(__scm->dev, req_phys);
- if (status) {
- dev_err(__scm->dev, "qseecom: failed to map request buffer\n");
- return status;
- }
-
- /* Map response buffer */
- rsp_phys = dma_map_single(__scm->dev, rsp, rsp_size, DMA_FROM_DEVICE);
- status = dma_mapping_error(__scm->dev, rsp_phys);
- if (status) {
- dma_unmap_single(__scm->dev, req_phys, req_size, DMA_TO_DEVICE);
- dev_err(__scm->dev, "qseecom: failed to map response buffer\n");
- return status;
- }
-
- /* Set up SCM call data */
desc.owner = QSEECOM_TZ_OWNER_TZ_APPS;
desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER;
desc.cmd = QSEECOM_TZ_CMD_APP_SEND;
@@ -1623,18 +1624,13 @@ int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp,
QCOM_SCM_RW, QCOM_SCM_VAL,
QCOM_SCM_RW, QCOM_SCM_VAL);
desc.args[0] = app_id;
- desc.args[1] = req_phys;
+ desc.args[1] = req;
desc.args[2] = req_size;
- desc.args[3] = rsp_phys;
+ desc.args[3] = rsp;
desc.args[4] = rsp_size;
- /* Perform call */
status = qcom_scm_qseecom_call(&desc, &res);
- /* Unmap buffers */
- dma_unmap_single(__scm->dev, rsp_phys, rsp_size, DMA_FROM_DEVICE);
- dma_unmap_single(__scm->dev, req_phys, req_size, DMA_TO_DEVICE);
-
if (status)
return status;
@@ -1649,8 +1645,10 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
* We do not yet support re-entrant calls via the qseecom interface. To prevent
+ any potential issues with this, only allow validated machines for now.
*/
-static const struct of_device_id qcom_scm_qseecom_allowlist[] = {
+static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
+ { .compatible = "lenovo,flex-5g" },
{ .compatible = "lenovo,thinkpad-x13s", },
+ { .compatible = "qcom,sc8180x-primus" },
{ }
};
@@ -1738,7 +1736,7 @@ static int qcom_scm_qseecom_init(struct qcom_scm *scm)
*/
bool qcom_scm_is_available(void)
{
- return !!__scm;
+ return !!READ_ONCE(__scm);
}
EXPORT_SYMBOL_GPL(qcom_scm_is_available);
@@ -1769,7 +1767,7 @@ int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
return 0;
}
-static int qcom_scm_waitq_wakeup(struct qcom_scm *scm, unsigned int wq_ctx)
+static int qcom_scm_waitq_wakeup(unsigned int wq_ctx)
{
int ret;
@@ -1801,7 +1799,7 @@ static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
goto out;
}
- ret = qcom_scm_waitq_wakeup(scm, wq_ctx);
+ ret = qcom_scm_waitq_wakeup(wq_ctx);
if (ret)
goto out;
} while (more_pending);
@@ -1819,10 +1817,12 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (!scm)
return -ENOMEM;
+ scm->dev = &pdev->dev;
ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
if (ret < 0)
return ret;
+ init_completion(&scm->waitq_comp);
mutex_init(&scm->scm_bw_lock);
scm->path = devm_of_icc_get(&pdev->dev, NULL);
@@ -1854,10 +1854,8 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (ret)
return ret;
- __scm = scm;
- __scm->dev = &pdev->dev;
-
- init_completion(&__scm->waitq_comp);
+ /* Let all above stores be available after this */
+ smp_store_release(&__scm, scm);
irq = platform_get_irq_optional(pdev, 0);
if (irq < 0) {
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
index 322aada20f74..ac34876a97f8 100644
--- a/drivers/firmware/raspberrypi.c
+++ b/drivers/firmware/raspberrypi.c
@@ -9,6 +9,7 @@
#include <linux/dma-mapping.h>
#include <linux/kref.h>
#include <linux/mailbox_client.h>
+#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -97,8 +98,8 @@ int rpi_firmware_property_list(struct rpi_firmware *fw,
if (size & 3)
return -EINVAL;
- buf = dma_alloc_coherent(fw->cl.dev, PAGE_ALIGN(size), &bus_addr,
- GFP_ATOMIC);
+ buf = dma_alloc_coherent(fw->chan->mbox->dev, PAGE_ALIGN(size),
+ &bus_addr, GFP_ATOMIC);
if (!buf)
return -ENOMEM;
@@ -126,7 +127,7 @@ int rpi_firmware_property_list(struct rpi_firmware *fw,
ret = -EINVAL;
}
- dma_free_coherent(fw->cl.dev, PAGE_ALIGN(size), buf, bus_addr);
+ dma_free_coherent(fw->chan->mbox->dev, PAGE_ALIGN(size), buf, bus_addr);
return ret;
}
diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c
index db818f9dcb8e..d670635914ec 100644
--- a/drivers/firmware/smccc/smccc.c
+++ b/drivers/firmware/smccc/smccc.c
@@ -69,6 +69,7 @@ s32 arm_smccc_get_soc_id_revision(void)
{
return smccc_soc_id_revision;
}
+EXPORT_SYMBOL_GPL(arm_smccc_get_soc_id_revision);
static int __init smccc_devices_init(void)
{
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 8b9a2556de16..160968301b1f 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -87,7 +87,6 @@ struct ti_sci_desc {
* struct ti_sci_info - Structure representing a TI SCI instance
* @dev: Device pointer
* @desc: SoC description for this instance
- * @nb: Reboot Notifier block
* @d: Debugfs file entry
* @debug_region: Memory region where the debug message are available
* @debug_region_size: Debug region size
@@ -103,7 +102,6 @@ struct ti_sci_desc {
*/
struct ti_sci_info {
struct device *dev;
- struct notifier_block nb;
const struct ti_sci_desc *desc;
struct dentry *d;
void __iomem *debug_region;
@@ -122,7 +120,6 @@ struct ti_sci_info {
#define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl)
#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
-#define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
#ifdef CONFIG_DEBUG_FS
@@ -3254,10 +3251,9 @@ devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource);
-static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
- void *cmd)
+static int tisci_reboot_handler(struct sys_off_data *data)
{
- struct ti_sci_info *info = reboot_to_ti_sci_info(nb);
+ struct ti_sci_info *info = data->cb_data;
const struct ti_sci_handle *handle = &info->handle;
ti_sci_cmd_core_reboot(handle);
@@ -3303,7 +3299,6 @@ static int ti_sci_probe(struct platform_device *pdev)
struct mbox_client *cl;
int ret = -EINVAL;
int i;
- int reboot = 0;
u32 h_id;
desc = device_get_match_data(dev);
@@ -3327,8 +3322,6 @@ static int ti_sci_probe(struct platform_device *pdev)
}
}
- reboot = of_property_read_bool(dev->of_node,
- "ti,system-reboot-controller");
INIT_LIST_HEAD(&info->node);
minfo = &info->minfo;
@@ -3399,15 +3392,10 @@ static int ti_sci_probe(struct platform_device *pdev)
ti_sci_setup_ops(info);
- if (reboot) {
- info->nb.notifier_call = tisci_reboot_handler;
- info->nb.priority = 128;
-
- ret = register_restart_handler(&info->nb);
- if (ret) {
- dev_err(dev, "reboot registration fail(%d)\n", ret);
- goto out;
- }
+ ret = devm_register_restart_handler(dev, tisci_reboot_handler, info);
+ if (ret) {
+ dev_err(dev, "reboot registration fail(%d)\n", ret);
+ goto out;
}
dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
index 98b8fd16183e..80cac3a5f976 100644
--- a/drivers/fpga/dfl-pci.c
+++ b/drivers/fpga/dfl-pci.c
@@ -78,6 +78,7 @@ static void cci_pci_free_irq(struct pci_dev *pcidev)
#define PCIE_DEVICE_ID_SILICOM_PAC_N5011 0x1001
#define PCIE_DEVICE_ID_INTEL_DFL 0xbcce
/* PCI Subdevice ID for PCIE_DEVICE_ID_INTEL_DFL */
+#define PCIE_SUBDEVICE_ID_INTEL_D5005 0x138d
#define PCIE_SUBDEVICE_ID_INTEL_N6000 0x1770
#define PCIE_SUBDEVICE_ID_INTEL_N6001 0x1771
#define PCIE_SUBDEVICE_ID_INTEL_C6100 0x17d4
@@ -102,6 +103,8 @@ static struct pci_device_id cci_pcie_id_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5010),},
{PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5011),},
{PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
+ PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_D5005),},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6000),},
{PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6000),},
diff --git a/drivers/fpga/intel-m10-bmc-sec-update.c b/drivers/fpga/intel-m10-bmc-sec-update.c
index 89851b133709..7ac9f9f5af12 100644
--- a/drivers/fpga/intel-m10-bmc-sec-update.c
+++ b/drivers/fpga/intel-m10-bmc-sec-update.c
@@ -529,11 +529,12 @@ static enum fw_upload_err m10bmc_sec_prepare(struct fw_upload *fwl,
const u8 *data, u32 size)
{
struct m10bmc_sec *sec = fwl->dd_handle;
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
u32 ret;
sec->cancel_request = false;
- if (!size || size > M10BMC_STAGING_SIZE)
+ if (!size || size > csr_map->staging_size)
return FW_UPLOAD_ERR_INVALID_SIZE;
if (sec->m10bmc->flash_bulk_ops)
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index b50d0b470849..3dbddec07028 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -103,6 +103,15 @@ config GPIO_REGMAP
select REGMAP
tristate
+config GPIO_SWNODE_UNDEFINED
+ bool
+ help
+ This adds a special place holder for software nodes to contain an
+ undefined GPIO reference, this is primarily used by SPI to allow a
+ list of GPIO chip selects to mark a certain chip select as being
+ controlled the SPI device's internal chip select mechanism and not
+ a GPIO.
+
# put drivers in the right section, in alphabetical order
# This symbol is selected by both I2C and SPI expanders
@@ -312,6 +321,24 @@ config GPIO_GENERIC_PLATFORM
help
Say yes here to support basic platform_device memory-mapped GPIO controllers.
+config GPIO_GRANITERAPIDS
+ tristate "Intel Granite Rapids-D vGPIO support"
+ depends on X86 || COMPILE_TEST
+ select GPIOLIB_IRQCHIP
+ help
+ Select this to enable virtual GPIO support on platforms with the
+ following SoCs:
+
+ - Intel Granite Rapids-D
+
+ The driver enables basic GPIO functionality and implements interrupt
+ support. The virtual GPIO driver controls GPIO lines via a firmware
+ interface. The physical GPIO pins reside on device that is external
+ from the main SoC package, such as a BMC or a CPLD.
+
+ To compile this driver as a module, choose M here: the module will
+ be called gpio-graniterapids.
+
config GPIO_GRGPIO
tristate "Aeroflex Gaisler GRGPIO support"
depends on OF_GPIO
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index fdd28c58d890..e2a53013780e 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -66,6 +66,7 @@ obj-$(CONFIG_GPIO_FTGPIO010) += gpio-ftgpio010.o
obj-$(CONFIG_GPIO_FXL6408) += gpio-fxl6408.o
obj-$(CONFIG_GPIO_GE_FPGA) += gpio-ge.o
obj-$(CONFIG_GPIO_GPIO_MM) += gpio-gpio-mm.o
+obj-$(CONFIG_GPIO_GRANITERAPIDS) += gpio-graniterapids.o
obj-$(CONFIG_GPIO_GRGPIO) += gpio-grgpio.o
obj-$(CONFIG_GPIO_GW_PLD) += gpio-gw-pld.o
obj-$(CONFIG_GPIO_HISI) += gpio-hisi.o
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index a789af4a5c85..8dce78ea7139 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -50,7 +50,6 @@ struct brcmstb_gpio_priv {
struct irq_domain *irq_domain;
struct irq_chip irq_chip;
int parent_irq;
- int gpio_base;
int num_gpios;
int parent_wake_irq;
};
@@ -92,7 +91,7 @@ brcmstb_gpio_get_active_irqs(struct brcmstb_gpio_bank *bank)
static int brcmstb_gpio_hwirq_to_offset(irq_hw_number_t hwirq,
struct brcmstb_gpio_bank *bank)
{
- return hwirq - (bank->gc.base - bank->parent_priv->gpio_base);
+ return hwirq - bank->gc.offset;
}
static void brcmstb_gpio_set_imask(struct brcmstb_gpio_bank *bank,
@@ -118,7 +117,7 @@ static int brcmstb_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc);
/* gc_offset is relative to this gpio_chip; want real offset */
- int hwirq = offset + (gc->base - priv->gpio_base);
+ int hwirq = offset + gc->offset;
if (hwirq >= priv->num_gpios)
return -ENXIO;
@@ -263,7 +262,7 @@ static void brcmstb_gpio_irq_bank_handler(struct brcmstb_gpio_bank *bank)
{
struct brcmstb_gpio_priv *priv = bank->parent_priv;
struct irq_domain *domain = priv->irq_domain;
- int hwbase = bank->gc.base - priv->gpio_base;
+ int hwbase = bank->gc.offset;
unsigned long status;
while ((status = brcmstb_gpio_get_active_irqs(bank))) {
@@ -412,7 +411,7 @@ static int brcmstb_gpio_of_xlate(struct gpio_chip *gc,
if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells))
return -EINVAL;
- offset = gpiospec->args[0] - (gc->base - priv->gpio_base);
+ offset = gpiospec->args[0] - bank->gc.offset;
if (offset >= gc->ngpio || offset < 0)
return -EINVAL;
@@ -596,8 +595,8 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
const __be32 *p;
u32 bank_width;
int num_banks = 0;
+ int num_gpios = 0;
int err;
- static int gpio_base;
unsigned long flags = 0;
bool need_wakeup_event = false;
@@ -611,7 +610,6 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
- priv->gpio_base = gpio_base;
priv->reg_base = reg_base;
priv->pdev = pdev;
@@ -651,7 +649,7 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
dev_dbg(dev, "Width 0 found: Empty bank @ %d\n",
num_banks);
num_banks++;
- gpio_base += MAX_GPIO_PER_BANK;
+ num_gpios += MAX_GPIO_PER_BANK;
continue;
}
@@ -691,12 +689,13 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
err = -ENOMEM;
goto fail;
}
- gc->base = gpio_base;
gc->of_gpio_n_cells = 2;
gc->of_xlate = brcmstb_gpio_of_xlate;
/* not all ngpio lines are valid, will use bank width later */
gc->ngpio = MAX_GPIO_PER_BANK;
gc->offset = bank->id * MAX_GPIO_PER_BANK;
+ gc->request = gpiochip_generic_request;
+ gc->free = gpiochip_generic_free;
if (priv->parent_irq > 0)
gc->to_irq = brcmstb_gpio_to_irq;
@@ -713,7 +712,7 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
bank->id);
goto fail;
}
- gpio_base += gc->ngpio;
+ num_gpios += gc->ngpio;
dev_dbg(dev, "bank=%d, base=%d, ngpio=%d, width=%d\n", bank->id,
gc->base, gc->ngpio, bank->width);
@@ -724,7 +723,7 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
num_banks++;
}
- priv->num_gpios = gpio_base - priv->gpio_base;
+ priv->num_gpios = num_gpios;
if (priv->parent_irq > 0) {
err = brcmstb_gpio_irq_setup(pdev, priv);
if (err)
diff --git a/drivers/gpio/gpio-cros-ec.c b/drivers/gpio/gpio-cros-ec.c
index 842e1c060414..0c09bb54dc0c 100644
--- a/drivers/gpio/gpio-cros-ec.c
+++ b/drivers/gpio/gpio-cros-ec.c
@@ -12,6 +12,7 @@
#include <linux/errno.h>
#include <linux/gpio/driver.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
@@ -197,11 +198,18 @@ static int cros_ec_gpio_probe(struct platform_device *pdev)
return devm_gpiochip_add_data(dev, gc, cros_ec);
}
+static const struct platform_device_id cros_ec_gpio_id[] = {
+ { "cros-ec-gpio", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cros_ec_gpio_id);
+
static struct platform_driver cros_ec_gpio_driver = {
.probe = cros_ec_gpio_probe,
.driver = {
.name = "cros-ec-gpio",
},
+ .id_table = cros_ec_gpio_id,
};
module_platform_driver(cros_ec_gpio_driver);
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 1ee62cd58582..25db014494a4 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -92,7 +92,7 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type)
case 0x5e:
return GPIOPANELCTL;
default:
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
}
}
diff --git a/drivers/gpio/gpio-graniterapids.c b/drivers/gpio/gpio-graniterapids.c
new file mode 100644
index 000000000000..c693fe05d50f
--- /dev/null
+++ b/drivers/gpio/gpio-graniterapids.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Granite Rapids-D vGPIO driver
+ *
+ * Copyright (c) 2024, Intel Corporation.
+ *
+ * Author: Aapo Vienamo <aapo.vienamo@linux.intel.com>
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gfp_types.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/math.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <linux/gpio/driver.h>
+
+#define GNR_NUM_PINS 128
+#define GNR_PINS_PER_REG 32
+#define GNR_NUM_REGS DIV_ROUND_UP(GNR_NUM_PINS, GNR_PINS_PER_REG)
+
+#define GNR_CFG_BAR 0x00
+#define GNR_CFG_LOCK_OFFSET 0x04
+#define GNR_GPI_STATUS_OFFSET 0x20
+#define GNR_GPI_ENABLE_OFFSET 0x24
+
+#define GNR_CFG_DW_RX_MASK GENMASK(25, 22)
+#define GNR_CFG_DW_RX_DISABLE FIELD_PREP(GNR_CFG_DW_RX_MASK, 2)
+#define GNR_CFG_DW_RX_EDGE FIELD_PREP(GNR_CFG_DW_RX_MASK, 1)
+#define GNR_CFG_DW_RX_LEVEL FIELD_PREP(GNR_CFG_DW_RX_MASK, 0)
+#define GNR_CFG_DW_RXDIS BIT(4)
+#define GNR_CFG_DW_TXDIS BIT(3)
+#define GNR_CFG_DW_RXSTATE BIT(1)
+#define GNR_CFG_DW_TXSTATE BIT(0)
+
+/**
+ * struct gnr_gpio - Intel Granite Rapids-D vGPIO driver state
+ * @gc: GPIO controller interface
+ * @reg_base: base address of the GPIO registers
+ * @ro_bitmap: bitmap of read-only pins
+ * @lock: guard the registers
+ * @pad_backup: backup of the register state for suspend
+ */
+struct gnr_gpio {
+ struct gpio_chip gc;
+ void __iomem *reg_base;
+ DECLARE_BITMAP(ro_bitmap, GNR_NUM_PINS);
+ raw_spinlock_t lock;
+ u32 pad_backup[];
+};
+
+static void __iomem *gnr_gpio_get_padcfg_addr(const struct gnr_gpio *priv,
+ unsigned int gpio)
+{
+ return priv->reg_base + gpio * sizeof(u32);
+}
+
+static int gnr_gpio_configure_line(struct gpio_chip *gc, unsigned int gpio,
+ u32 clear_mask, u32 set_mask)
+{
+ struct gnr_gpio *priv = gpiochip_get_data(gc);
+ void __iomem *addr = gnr_gpio_get_padcfg_addr(priv, gpio);
+ u32 dw;
+
+ if (test_bit(gpio, priv->ro_bitmap))
+ return -EACCES;
+
+ guard(raw_spinlock_irqsave)(&priv->lock);
+
+ dw = readl(addr);
+ dw &= ~clear_mask;
+ dw |= set_mask;
+ writel(dw, addr);
+
+ return 0;
+}
+
+static int gnr_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ const struct gnr_gpio *priv = gpiochip_get_data(gc);
+ u32 dw;
+
+ dw = readl(gnr_gpio_get_padcfg_addr(priv, gpio));
+
+ return !!(dw & GNR_CFG_DW_RXSTATE);
+}
+
+static void gnr_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
+{
+ u32 clear = 0;
+ u32 set = 0;
+
+ if (value)
+ set = GNR_CFG_DW_TXSTATE;
+ else
+ clear = GNR_CFG_DW_TXSTATE;
+
+ gnr_gpio_configure_line(gc, gpio, clear, set);
+}
+
+static int gnr_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct gnr_gpio *priv = gpiochip_get_data(gc);
+ u32 dw;
+
+ dw = readl(gnr_gpio_get_padcfg_addr(priv, gpio));
+
+ if (dw & GNR_CFG_DW_TXDIS)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int gnr_gpio_direction_input(struct gpio_chip *gc, unsigned int gpio)
+{
+ return gnr_gpio_configure_line(gc, gpio, GNR_CFG_DW_RXDIS, 0);
+}
+
+static int gnr_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio, int value)
+{
+ u32 clear = GNR_CFG_DW_TXDIS;
+ u32 set = value ? GNR_CFG_DW_TXSTATE : 0;
+
+ return gnr_gpio_configure_line(gc, gpio, clear, set);
+}
+
+static const struct gpio_chip gnr_gpio_chip = {
+ .owner = THIS_MODULE,
+ .get = gnr_gpio_get,
+ .set = gnr_gpio_set,
+ .get_direction = gnr_gpio_get_direction,
+ .direction_input = gnr_gpio_direction_input,
+ .direction_output = gnr_gpio_direction_output,
+};
+
+static void __iomem *gnr_gpio_get_reg_addr(const struct gnr_gpio *priv,
+ unsigned int base,
+ unsigned int gpio)
+{
+ return priv->reg_base + base + gpio * sizeof(u32);
+}
+
+static void gnr_gpio_irq_ack(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gnr_gpio *priv = gpiochip_get_data(gc);
+ irq_hw_number_t gpio = irqd_to_hwirq(d);
+ unsigned int reg_idx = gpio / GNR_PINS_PER_REG;
+ unsigned int bit_idx = gpio % GNR_PINS_PER_REG;
+ void __iomem *addr = gnr_gpio_get_reg_addr(priv, GNR_GPI_STATUS_OFFSET, reg_idx);
+ u32 reg;
+
+ guard(raw_spinlock_irqsave)(&priv->lock);
+
+ reg = readl(addr);
+ reg &= ~BIT(bit_idx);
+ writel(reg, addr);
+}
+
+static void gnr_gpio_irq_mask_unmask(struct gpio_chip *gc, unsigned long gpio, bool mask)
+{
+ struct gnr_gpio *priv = gpiochip_get_data(gc);
+ unsigned int reg_idx = gpio / GNR_PINS_PER_REG;
+ unsigned int bit_idx = gpio % GNR_PINS_PER_REG;
+ void __iomem *addr = gnr_gpio_get_reg_addr(priv, GNR_GPI_ENABLE_OFFSET, reg_idx);
+ u32 reg;
+
+ guard(raw_spinlock_irqsave)(&priv->lock);
+
+ reg = readl(addr);
+ if (mask)
+ reg &= ~BIT(bit_idx);
+ else
+ reg |= BIT(bit_idx);
+ writel(reg, addr);
+}
+
+static void gnr_gpio_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gnr_gpio_irq_mask_unmask(gc, hwirq, true);
+ gpiochip_disable_irq(gc, hwirq);
+}
+
+static void gnr_gpio_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(gc, hwirq);
+ gnr_gpio_irq_mask_unmask(gc, hwirq, false);
+}
+
+static int gnr_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t pin = irqd_to_hwirq(d);
+ u32 mask = GNR_CFG_DW_RX_MASK;
+ u32 set;
+
+ /* Falling edge and level low triggers not supported by the GPIO controller */
+ switch (type) {
+ case IRQ_TYPE_NONE:
+ set = GNR_CFG_DW_RX_DISABLE;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ set = GNR_CFG_DW_RX_EDGE;
+ irq_set_handler_locked(d, handle_edge_irq);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ set = GNR_CFG_DW_RX_LEVEL;
+ irq_set_handler_locked(d, handle_level_irq);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return gnr_gpio_configure_line(gc, pin, mask, set);
+}
+
+static const struct irq_chip gnr_gpio_irq_chip = {
+ .irq_ack = gnr_gpio_irq_ack,
+ .irq_mask = gnr_gpio_irq_mask,
+ .irq_unmask = gnr_gpio_irq_unmask,
+ .irq_set_type = gnr_gpio_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static void gnr_gpio_init_pin_ro_bits(struct device *dev,
+ const void __iomem *cfg_lock_base,
+ unsigned long *ro_bitmap)
+{
+ u32 tmp[GNR_NUM_REGS];
+
+ memcpy_fromio(tmp, cfg_lock_base, sizeof(tmp));
+ bitmap_from_arr32(ro_bitmap, tmp, GNR_NUM_PINS);
+}
+
+static irqreturn_t gnr_gpio_irq(int irq, void *data)
+{
+ struct gnr_gpio *priv = data;
+ unsigned int handled = 0;
+
+ for (unsigned int i = 0; i < GNR_NUM_REGS; i++) {
+ const void __iomem *reg = priv->reg_base + i * sizeof(u32);
+ unsigned long pending;
+ unsigned long enabled;
+ unsigned int bit_idx;
+
+ scoped_guard(raw_spinlock, &priv->lock) {
+ pending = readl(reg + GNR_GPI_STATUS_OFFSET);
+ enabled = readl(reg + GNR_GPI_ENABLE_OFFSET);
+ }
+
+ /* Only enabled interrupts */
+ pending &= enabled;
+
+ for_each_set_bit(bit_idx, &pending, GNR_PINS_PER_REG) {
+ unsigned int hwirq = i * GNR_PINS_PER_REG + bit_idx;
+
+ generic_handle_domain_irq(priv->gc.irq.domain, hwirq);
+ }
+
+ handled += pending ? 1 : 0;
+
+ }
+ return IRQ_RETVAL(handled);
+}
+
+static int gnr_gpio_probe(struct platform_device *pdev)
+{
+ size_t num_backup_pins = IS_ENABLED(CONFIG_PM_SLEEP) ? GNR_NUM_PINS : 0;
+ struct device *dev = &pdev->dev;
+ struct gpio_irq_chip *girq;
+ struct gnr_gpio *priv;
+ void __iomem *regs;
+ int irq, ret;
+
+ priv = devm_kzalloc(dev, struct_size(priv, pad_backup, num_backup_pins), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, gnr_gpio_irq, IRQF_SHARED | IRQF_NO_THREAD,
+ dev_name(dev), priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request interrupt\n");
+
+ priv->reg_base = regs + readl(regs + GNR_CFG_BAR);
+
+ gnr_gpio_init_pin_ro_bits(dev, priv->reg_base + GNR_CFG_LOCK_OFFSET,
+ priv->ro_bitmap);
+
+ priv->gc = gnr_gpio_chip;
+ priv->gc.label = dev_name(dev);
+ priv->gc.parent = dev;
+ priv->gc.ngpio = GNR_NUM_PINS;
+ priv->gc.base = -1;
+
+ girq = &priv->gc.irq;
+ gpio_irq_chip_set_chip(girq, &gnr_gpio_irq_chip);
+ girq->chip->name = dev_name(dev);
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+
+ platform_set_drvdata(pdev, priv);
+
+ return devm_gpiochip_add_data(dev, &priv->gc, priv);
+}
+
+static int gnr_gpio_suspend(struct device *dev)
+{
+ struct gnr_gpio *priv = dev_get_drvdata(dev);
+ unsigned int i;
+
+ guard(raw_spinlock_irqsave)(&priv->lock);
+
+ for_each_clear_bit(i, priv->ro_bitmap, priv->gc.ngpio)
+ priv->pad_backup[i] = readl(gnr_gpio_get_padcfg_addr(priv, i));
+
+ return 0;
+}
+
+static int gnr_gpio_resume(struct device *dev)
+{
+ struct gnr_gpio *priv = dev_get_drvdata(dev);
+ unsigned int i;
+
+ guard(raw_spinlock_irqsave)(&priv->lock);
+
+ for_each_clear_bit(i, priv->ro_bitmap, priv->gc.ngpio)
+ writel(priv->pad_backup[i], gnr_gpio_get_padcfg_addr(priv, i));
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(gnr_gpio_pm_ops, gnr_gpio_suspend, gnr_gpio_resume);
+
+static const struct acpi_device_id gnr_gpio_acpi_match[] = {
+ { "INTC1109" },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, gnr_gpio_acpi_match);
+
+static struct platform_driver gnr_gpio_driver = {
+ .driver = {
+ .name = "gpio-graniterapids",
+ .pm = pm_sleep_ptr(&gnr_gpio_pm_ops),
+ .acpi_match_table = gnr_gpio_acpi_match,
+ },
+ .probe = gnr_gpio_probe,
+};
+module_platform_driver(gnr_gpio_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Aapo Vienamo <aapo.vienamo@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Granite Rapids-D vGPIO driver");
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index 5ef8af824980..c097e310c9e8 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -529,6 +529,7 @@ static const struct of_device_id lpc32xx_gpio_of_match[] = {
{ .compatible = "nxp,lpc3220-gpio", },
{ },
};
+MODULE_DEVICE_TABLE(of, lpc32xx_gpio_of_match);
static struct platform_driver lpc32xx_gpio_driver = {
.driver = {
diff --git a/drivers/gpio/gpio-npcm-sgpio.c b/drivers/gpio/gpio-npcm-sgpio.c
index d31788b43abc..260570614543 100644
--- a/drivers/gpio/gpio-npcm-sgpio.c
+++ b/drivers/gpio/gpio-npcm-sgpio.c
@@ -434,7 +434,7 @@ static void npcm_sgpio_irq_handler(struct irq_desc *desc)
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct irq_chip *ic = irq_desc_get_chip(desc);
struct npcm_sgpio *gpio = gpiochip_get_data(gc);
- unsigned int i, j, girq;
+ unsigned int i, j;
unsigned long reg;
chained_irq_enter(ic, desc);
@@ -443,11 +443,9 @@ static void npcm_sgpio_irq_handler(struct irq_desc *desc)
const struct npcm_sgpio_bank *bank = &npcm_sgpio_banks[i];
reg = ioread8(bank_reg(gpio, bank, EVENT_STS));
- for_each_set_bit(j, &reg, 8) {
- girq = irq_find_mapping(gc->irq.domain,
- i * 8 + gpio->nout_sgpio + j);
- generic_handle_domain_irq(gc->irq.domain, girq);
- }
+ for_each_set_bit(j, &reg, 8)
+ generic_handle_domain_irq(gc->irq.domain,
+ i * 8 + gpio->nout_sgpio + j);
}
chained_irq_exit(ic, desc);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 00ffa168e405..77a2812f2974 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -144,7 +144,7 @@ static int pca953x_acpi_get_irq(struct device *dev)
if (ret)
dev_warn(dev, "can't add GPIO ACPI mapping\n");
- ret = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(dev), "irq-gpios", 0);
+ ret = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(dev), "irq", 0);
if (ret < 0)
return ret;
diff --git a/drivers/gpio/gpio-pcie-idio-24.c b/drivers/gpio/gpio-pcie-idio-24.c
index 2efd1b1a0805..7f7f95ad4343 100644
--- a/drivers/gpio/gpio-pcie-idio-24.c
+++ b/drivers/gpio/gpio-pcie-idio-24.c
@@ -267,7 +267,7 @@ static int idio_24_reg_mask_xlate(struct gpio_regmap *const gpio, const unsigned
case IDIO_24_CONTROL_REG:
/* We can only set direction for TTL/CMOS lines */
if (offset < 48)
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
*reg = IDIO_24_CONTROL_REG;
*mask = CONTROL_REG_OUT_MODE;
diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c
index c08c8e528867..71684dee2ca5 100644
--- a/drivers/gpio/gpio-regmap.c
+++ b/drivers/gpio/gpio-regmap.c
@@ -129,7 +129,7 @@ static int gpio_regmap_get_direction(struct gpio_chip *chip,
base = gpio_regmap_addr(gpio->reg_dir_in_base);
invert = 1;
} else {
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
}
ret = gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
@@ -160,7 +160,7 @@ static int gpio_regmap_set_direction(struct gpio_chip *chip,
base = gpio_regmap_addr(gpio->reg_dir_in_base);
invert = 1;
} else {
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
}
ret = gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index e48392074e4b..ff0341b1222f 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -38,8 +38,8 @@
struct sch_gpio {
struct gpio_chip chip;
+ void __iomem *regs;
spinlock_t lock;
- unsigned short iobase;
unsigned short resume_base;
/* GPE handling */
@@ -75,7 +75,7 @@ static int sch_gpio_reg_get(struct sch_gpio *sch, unsigned int gpio, unsigned in
offset = sch_gpio_offset(sch, gpio, reg);
bit = sch_gpio_bit(sch, gpio);
- reg_val = !!(inb(sch->iobase + offset) & BIT(bit));
+ reg_val = !!(ioread8(sch->regs + offset) & BIT(bit));
return reg_val;
}
@@ -89,12 +89,14 @@ static void sch_gpio_reg_set(struct sch_gpio *sch, unsigned int gpio, unsigned i
offset = sch_gpio_offset(sch, gpio, reg);
bit = sch_gpio_bit(sch, gpio);
- reg_val = inb(sch->iobase + offset);
+ reg_val = ioread8(sch->regs + offset);
if (val)
- outb(reg_val | BIT(bit), sch->iobase + offset);
+ reg_val |= BIT(bit);
else
- outb((reg_val & ~BIT(bit)), sch->iobase + offset);
+ reg_val &= ~BIT(bit);
+
+ iowrite8(reg_val, sch->regs + offset);
}
static int sch_gpio_direction_in(struct gpio_chip *gc, unsigned int gpio_num)
@@ -267,8 +269,8 @@ static u32 sch_gpio_gpe_handler(acpi_handle gpe_device, u32 gpe, void *context)
spin_lock_irqsave(&sch->lock, flags);
- core_status = inl(sch->iobase + CORE_BANK_OFFSET + GTS);
- resume_status = inl(sch->iobase + RESUME_BANK_OFFSET + GTS);
+ core_status = ioread32(sch->regs + CORE_BANK_OFFSET + GTS);
+ resume_status = ioread32(sch->regs + RESUME_BANK_OFFSET + GTS);
spin_unlock_irqrestore(&sch->lock, flags);
@@ -319,12 +321,14 @@ static int sch_gpio_install_gpe_handler(struct sch_gpio *sch)
static int sch_gpio_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct gpio_irq_chip *girq;
struct sch_gpio *sch;
struct resource *res;
+ void __iomem *regs;
int ret;
- sch = devm_kzalloc(&pdev->dev, sizeof(*sch), GFP_KERNEL);
+ sch = devm_kzalloc(dev, sizeof(*sch), GFP_KERNEL);
if (!sch)
return -ENOMEM;
@@ -332,15 +336,16 @@ static int sch_gpio_probe(struct platform_device *pdev)
if (!res)
return -EBUSY;
- if (!devm_request_region(&pdev->dev, res->start, resource_size(res),
- pdev->name))
+ regs = devm_ioport_map(dev, res->start, resource_size(res));
+ if (!regs)
return -EBUSY;
+ sch->regs = regs;
+
spin_lock_init(&sch->lock);
- sch->iobase = res->start;
sch->chip = sch_gpio_chip;
- sch->chip.label = dev_name(&pdev->dev);
- sch->chip.parent = &pdev->dev;
+ sch->chip.label = dev_name(dev);
+ sch->chip.parent = dev;
switch (pdev->id) {
case PCI_DEVICE_ID_INTEL_SCH_LPC:
@@ -394,9 +399,9 @@ static int sch_gpio_probe(struct platform_device *pdev)
ret = sch_gpio_install_gpe_handler(sch);
if (ret)
- dev_warn(&pdev->dev, "Can't setup GPE, no IRQ support\n");
+ dev_warn(dev, "Can't setup GPE, no IRQ support\n");
- return devm_gpiochip_add_data(&pdev->dev, &sch->chip, sch);
+ return devm_gpiochip_add_data(dev, &sch->chip, sch);
}
static struct platform_driver sch_gpio_driver = {
diff --git a/drivers/gpio/gpio-tangier.c b/drivers/gpio/gpio-tangier.c
index b75e0b12087a..4b29abafecf6 100644
--- a/drivers/gpio/gpio-tangier.c
+++ b/drivers/gpio/gpio-tangier.c
@@ -195,7 +195,8 @@ static int tng_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
static void tng_irq_ack(struct irq_data *d)
{
- struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct tng_gpio *priv = gpiochip_get_data(gc);
irq_hw_number_t gpio = irqd_to_hwirq(d);
void __iomem *gisr;
u8 shift;
@@ -227,7 +228,8 @@ static void tng_irq_unmask_mask(struct tng_gpio *priv, u32 gpio, bool unmask)
static void tng_irq_mask(struct irq_data *d)
{
- struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct tng_gpio *priv = gpiochip_get_data(gc);
irq_hw_number_t gpio = irqd_to_hwirq(d);
tng_irq_unmask_mask(priv, gpio, false);
@@ -236,7 +238,8 @@ static void tng_irq_mask(struct irq_data *d)
static void tng_irq_unmask(struct irq_data *d)
{
- struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct tng_gpio *priv = gpiochip_get_data(gc);
irq_hw_number_t gpio = irqd_to_hwirq(d);
gpiochip_enable_irq(&priv->chip, gpio);
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index d87dd06db40d..9130c691a2dd 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -36,12 +36,6 @@
#define TEGRA186_GPIO_SCR_SEC_REN BIT(27)
#define TEGRA186_GPIO_SCR_SEC_G1W BIT(9)
#define TEGRA186_GPIO_SCR_SEC_G1R BIT(1)
-#define TEGRA186_GPIO_FULL_ACCESS (TEGRA186_GPIO_SCR_SEC_WEN | \
- TEGRA186_GPIO_SCR_SEC_REN | \
- TEGRA186_GPIO_SCR_SEC_G1R | \
- TEGRA186_GPIO_SCR_SEC_G1W)
-#define TEGRA186_GPIO_SCR_SEC_ENABLE (TEGRA186_GPIO_SCR_SEC_WEN | \
- TEGRA186_GPIO_SCR_SEC_REN)
/* control registers */
#define TEGRA186_GPIO_ENABLE_CONFIG 0x00
@@ -177,10 +171,18 @@ static inline bool tegra186_gpio_is_accessible(struct tegra_gpio *gpio, unsigned
value = __raw_readl(secure + TEGRA186_GPIO_SCR);
- if ((value & TEGRA186_GPIO_SCR_SEC_ENABLE) == 0)
- return true;
+ /*
+ * When SCR_SEC_[R|W]EN is unset, then we have full read/write access to all the
+ * registers for given GPIO pin.
+ * When SCR_SEC[R|W]EN is set, then there is need to further check the accompanying
+ * SCR_SEC_G1[R|W] bit to determine read/write access to all the registers for given
+ * GPIO pin.
+ */
- if ((value & TEGRA186_GPIO_FULL_ACCESS) == TEGRA186_GPIO_FULL_ACCESS)
+ if (((value & TEGRA186_GPIO_SCR_SEC_REN) == 0 ||
+ ((value & TEGRA186_GPIO_SCR_SEC_REN) && (value & TEGRA186_GPIO_SCR_SEC_G1R))) &&
+ ((value & TEGRA186_GPIO_SCR_SEC_WEN) == 0 ||
+ ((value & TEGRA186_GPIO_SCR_SEC_WEN) && (value & TEGRA186_GPIO_SCR_SEC_G1W))))
return true;
return false;
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index c18b6b47384f..94ca9d03c094 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -104,7 +104,7 @@ static inline int to_reg(int gpio, enum ctrl_register type)
unsigned int reg = type == CTRL_IN ? GPIO_IN_CTRL_BASE : GPIO_OUT_CTRL_BASE;
if (gpio >= WCOVE_GPIO_NUM)
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
return reg + gpio;
}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 7f140df40f35..bb063b81cee6 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -128,7 +128,24 @@ static bool acpi_gpio_deferred_req_irqs_done;
static int acpi_gpiochip_find(struct gpio_chip *gc, const void *data)
{
- return device_match_acpi_handle(&gc->gpiodev->dev, data);
+ /* First check the actual GPIO device */
+ if (device_match_acpi_handle(&gc->gpiodev->dev, data))
+ return true;
+
+ /*
+ * When the ACPI device is artificially split to the banks of GPIOs,
+ * where each of them is represented by a separate GPIO device,
+ * the firmware node of the physical device may not be shared among
+ * the banks as they may require different values for the same property,
+ * e.g., number of GPIOs in a certain bank. In such case the ACPI handle
+ * of a GPIO device is NULL and can not be used. Hence we have to check
+ * the parent device to be sure that there is no match before bailing
+ * out.
+ */
+ if (gc->parent)
+ return device_match_acpi_handle(gc->parent, data);
+
+ return false;
}
/**
@@ -873,9 +890,6 @@ static struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev,
struct acpi_gpio_lookup lookup;
int ret;
- if (!adev)
- return ERR_PTR(-ENODEV);
-
memset(&lookup, 0, sizeof(lookup));
lookup.index = index;
@@ -941,6 +955,10 @@ static struct gpio_desc *acpi_get_gpiod_from_data(struct fwnode_handle *fwnode,
static bool acpi_can_fallback_to_crs(struct acpi_device *adev,
const char *con_id)
{
+ /* If there is no ACPI device, there is no _CRS to fall back to */
+ if (!adev)
+ return false;
+
/* Never allow fallback if the device has properties */
if (acpi_dev_has_props(adev) || adev->driver_gpios)
return false;
@@ -948,14 +966,11 @@ static bool acpi_can_fallback_to_crs(struct acpi_device *adev,
return con_id == NULL;
}
-struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
- const char *con_id,
- unsigned int idx,
- enum gpiod_flags *dflags,
- unsigned long *lookupflags)
+static struct gpio_desc *
+__acpi_find_gpio(struct fwnode_handle *fwnode, const char *con_id, unsigned int idx,
+ bool can_fallback, struct acpi_gpio_info *info)
{
struct acpi_device *adev = to_acpi_device_node(fwnode);
- struct acpi_gpio_info info;
struct gpio_desc *desc;
char propname[32];
int i;
@@ -972,25 +987,38 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
if (adev)
desc = acpi_get_gpiod_by_index(adev,
- propname, idx, &info);
+ propname, idx, info);
else
desc = acpi_get_gpiod_from_data(fwnode,
- propname, idx, &info);
- if (!IS_ERR(desc))
- break;
+ propname, idx, info);
if (PTR_ERR(desc) == -EPROBE_DEFER)
return ERR_CAST(desc);
+
+ if (!IS_ERR(desc))
+ return desc;
}
/* Then from plain _CRS GPIOs */
- if (IS_ERR(desc)) {
- if (!adev || !acpi_can_fallback_to_crs(adev, con_id))
- return ERR_PTR(-ENOENT);
+ if (can_fallback)
+ return acpi_get_gpiod_by_index(adev, NULL, idx, info);
- desc = acpi_get_gpiod_by_index(adev, NULL, idx, &info);
- if (IS_ERR(desc))
- return desc;
- }
+ return ERR_PTR(-ENOENT);
+}
+
+struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
+ const char *con_id,
+ unsigned int idx,
+ enum gpiod_flags *dflags,
+ unsigned long *lookupflags)
+{
+ struct acpi_device *adev = to_acpi_device_node(fwnode);
+ bool can_fallback = acpi_can_fallback_to_crs(adev, con_id);
+ struct acpi_gpio_info info;
+ struct gpio_desc *desc;
+
+ desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info);
+ if (IS_ERR(desc))
+ return desc;
if (info.gpioint &&
(*dflags == GPIOD_OUT_LOW || *dflags == GPIOD_OUT_HIGH)) {
@@ -1006,7 +1034,7 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
/**
* acpi_dev_gpio_irq_wake_get_by() - Find GpioInt and translate it to Linux IRQ number
* @adev: pointer to a ACPI device to get IRQ from
- * @name: optional name of GpioInt resource
+ * @con_id: optional name of GpioInt resource
* @index: index of GpioInt resource (starting from %0)
* @wake_capable: Set to true if the IRQ is wake capable
*
@@ -1017,17 +1045,18 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
* The function is idempotent, though each time it runs it will configure GPIO
* pin direction according to the flags in GpioInt resource.
*
- * The function takes optional @name parameter. If the resource has a property
- * name, then only those will be taken into account.
+ * The function takes optional @con_id parameter. If the resource has
+ * a @con_id in a property, then only those will be taken into account.
*
* The GPIO is considered wake capable if the GpioInt resource specifies
* SharedAndWake or ExclusiveAndWake.
*
* Return: Linux IRQ number (> %0) on success, negative errno on failure.
*/
-int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name, int index,
+int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id, int index,
bool *wake_capable)
{
+ struct fwnode_handle *fwnode = acpi_fwnode_handle(adev);
int idx, i;
unsigned int irq_flags;
int ret;
@@ -1036,9 +1065,8 @@ int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name, in
struct acpi_gpio_info info;
struct gpio_desc *desc;
- desc = acpi_get_gpiod_by_index(adev, name, i, &info);
-
/* Ignore -EPROBE_DEFER, it only matters if idx matches */
+ desc = __acpi_find_gpio(fwnode, con_id, i, true, &info);
if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER)
return PTR_ERR(desc);
@@ -1058,7 +1086,11 @@ int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name, in
acpi_gpio_update_gpiod_flags(&dflags, &info);
acpi_gpio_update_gpiod_lookup_flags(&lflags, &info);
- snprintf(label, sizeof(label), "GpioInt() %d", index);
+ snprintf(label, sizeof(label), "%pfwP GpioInt(%d)", fwnode, index);
+ ret = gpiod_set_consumer_name(desc, con_id ?: label);
+ if (ret)
+ return ret;
+
ret = gpiod_configure_flags(desc, label, lflags, dflags);
if (ret < 0)
return ret;
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index f384fa278764..9dad67ea2597 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -728,6 +728,25 @@ static u32 line_event_id(int level)
GPIO_V2_LINE_EVENT_FALLING_EDGE;
}
+static inline char *make_irq_label(const char *orig)
+{
+ char *new;
+
+ if (!orig)
+ return NULL;
+
+ new = kstrdup_and_replace(orig, '/', ':', GFP_KERNEL);
+ if (!new)
+ return ERR_PTR(-ENOMEM);
+
+ return new;
+}
+
+static inline void free_irq_label(const char *label)
+{
+ kfree(label);
+}
+
#ifdef CONFIG_HTE
static enum hte_return process_hw_ts_thread(void *p)
@@ -1015,6 +1034,7 @@ static int debounce_setup(struct line *line, unsigned int debounce_period_us)
{
unsigned long irqflags;
int ret, level, irq;
+ char *label;
/* try hardware */
ret = gpiod_set_debounce(line->desc, debounce_period_us);
@@ -1037,11 +1057,17 @@ static int debounce_setup(struct line *line, unsigned int debounce_period_us)
if (irq < 0)
return -ENXIO;
+ label = make_irq_label(line->req->label);
+ if (IS_ERR(label))
+ return -ENOMEM;
+
irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
ret = request_irq(irq, debounce_irq_handler, irqflags,
- line->req->label, line);
- if (ret)
+ label, line);
+ if (ret) {
+ free_irq_label(label);
return ret;
+ }
line->irq = irq;
} else {
ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH);
@@ -1086,7 +1112,7 @@ static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
static void edge_detector_stop(struct line *line)
{
if (line->irq) {
- free_irq(line->irq, line);
+ free_irq_label(free_irq(line->irq, line));
line->irq = 0;
}
@@ -1110,6 +1136,7 @@ static int edge_detector_setup(struct line *line,
unsigned long irqflags = 0;
u64 eflags;
int irq, ret;
+ char *label;
eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
if (eflags && !kfifo_initialized(&line->req->events)) {
@@ -1146,11 +1173,17 @@ static int edge_detector_setup(struct line *line,
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
irqflags |= IRQF_ONESHOT;
+ label = make_irq_label(line->req->label);
+ if (IS_ERR(label))
+ return PTR_ERR(label);
+
/* Request a thread to read the events */
ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread,
- irqflags, line->req->label, line);
- if (ret)
+ irqflags, label, line);
+ if (ret) {
+ free_irq_label(label);
return ret;
+ }
line->irq = irq;
return 0;
@@ -1160,6 +1193,8 @@ static int edge_detector_update(struct line *line,
struct gpio_v2_line_config *lc,
unsigned int line_idx, u64 edflags)
{
+ u64 eflags;
+ int ret;
u64 active_edflags = READ_ONCE(line->edflags);
unsigned int debounce_period_us =
gpio_v2_line_config_debounce_period(lc, line_idx);
@@ -1171,6 +1206,18 @@ static int edge_detector_update(struct line *line,
/* sw debounced and still will be...*/
if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
line_set_debounce_period(line, debounce_period_us);
+ /*
+ * ensure event fifo is initialised if edge detection
+ * is now enabled.
+ */
+ eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
+ if (eflags && !kfifo_initialized(&line->req->events)) {
+ ret = kfifo_alloc(&line->req->events,
+ line->req->event_buffer_size,
+ GFP_KERNEL);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -1973,7 +2020,7 @@ static void lineevent_free(struct lineevent_state *le)
blocking_notifier_chain_unregister(&le->gdev->device_notifier,
&le->device_unregistered_nb);
if (le->irq)
- free_irq(le->irq, le);
+ free_irq_label(free_irq(le->irq, le));
if (le->desc)
gpiod_free(le->desc);
kfree(le->label);
@@ -2114,6 +2161,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
int fd;
int ret;
int irq, irqflags = 0;
+ char *label;
if (copy_from_user(&eventreq, ip, sizeof(eventreq)))
return -EFAULT;
@@ -2198,15 +2246,23 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
if (ret)
goto out_free_le;
+ label = make_irq_label(le->label);
+ if (IS_ERR(label)) {
+ ret = PTR_ERR(label);
+ goto out_free_le;
+ }
+
/* Request a thread to read the events */
ret = request_threaded_irq(irq,
lineevent_irq_handler,
lineevent_irq_thread,
irqflags,
- le->label,
+ label,
le);
- if (ret)
+ if (ret) {
+ free_irq_label(label);
goto out_free_le;
+ }
le->irq = irq;
@@ -2309,7 +2365,7 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
dflags = READ_ONCE(desc->flags);
- scoped_guard(srcu, &desc->srcu) {
+ scoped_guard(srcu, &desc->gdev->desc_srcu) {
label = gpiod_get_label(desc);
if (label && test_bit(FLAG_REQUESTED, &dflags))
strscpy(info->consumer, label,
@@ -2757,11 +2813,11 @@ static int gpio_chrdev_release(struct inode *inode, struct file *file)
struct gpio_chardev_data *cdev = file->private_data;
struct gpio_device *gdev = cdev->gdev;
- bitmap_free(cdev->watched_lines);
blocking_notifier_chain_unregister(&gdev->device_notifier,
&cdev->device_unregistered_nb);
blocking_notifier_chain_unregister(&gdev->line_state_notifier,
&cdev->lineinfo_changed_nb);
+ bitmap_free(cdev->watched_lines);
gpio_device_put(gdev);
kfree(cdev);
diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c
index b138682fec3d..5a9911ae9125 100644
--- a/drivers/gpio/gpiolib-legacy.c
+++ b/drivers/gpio/gpiolib-legacy.c
@@ -28,10 +28,9 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
struct gpio_desc *desc;
int err;
- desc = gpio_to_desc(gpio);
-
/* Compatibility: assume unavailable "valid" GPIOs will appear later */
- if (!desc && gpio_is_valid(gpio))
+ desc = gpio_to_desc(gpio);
+ if (!desc)
return -EPROBE_DEFER;
err = gpiod_request(desc, label);
@@ -63,51 +62,13 @@ EXPORT_SYMBOL_GPL(gpio_request_one);
*/
int gpio_request(unsigned gpio, const char *label)
{
- struct gpio_desc *desc = gpio_to_desc(gpio);
+ struct gpio_desc *desc;
/* Compatibility: assume unavailable "valid" GPIOs will appear later */
- if (!desc && gpio_is_valid(gpio))
+ desc = gpio_to_desc(gpio);
+ if (!desc)
return -EPROBE_DEFER;
return gpiod_request(desc, label);
}
EXPORT_SYMBOL_GPL(gpio_request);
-
-/**
- * gpio_request_array - request multiple GPIOs in a single call
- * @array: array of the 'struct gpio'
- * @num: how many GPIOs in the array
- *
- * **DEPRECATED** This function is deprecated and must not be used in new code.
- */
-int gpio_request_array(const struct gpio *array, size_t num)
-{
- int i, err;
-
- for (i = 0; i < num; i++, array++) {
- err = gpio_request_one(array->gpio, array->flags, array->label);
- if (err)
- goto err_free;
- }
- return 0;
-
-err_free:
- while (i--)
- gpio_free((--array)->gpio);
- return err;
-}
-EXPORT_SYMBOL_GPL(gpio_request_array);
-
-/**
- * gpio_free_array - release multiple GPIOs in a single call
- * @array: array of the 'struct gpio'
- * @num: how many GPIOs in the array
- *
- * **DEPRECATED** This function is deprecated and must not be used in new code.
- */
-void gpio_free_array(const struct gpio *array, size_t num)
-{
- while (num--)
- gpio_free((array++)->gpio);
-}
-EXPORT_SYMBOL_GPL(gpio_free_array);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index cb0cefaec37e..d75f6ee37028 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -1037,7 +1037,7 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
struct of_phandle_args pinspec;
struct pinctrl_dev *pctldev;
struct device_node *np;
- int index = 0, ret;
+ int index = 0, ret, trim;
const char *name;
static const char group_names_propname[] = "gpio-ranges-group-names";
struct property *group_names;
@@ -1059,7 +1059,14 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
if (!pctldev)
return -EPROBE_DEFER;
+ /* Ignore ranges outside of this GPIO chip */
+ if (pinspec.args[0] >= (chip->offset + chip->ngpio))
+ continue;
+ if (pinspec.args[0] + pinspec.args[2] <= chip->offset)
+ continue;
+
if (pinspec.args[2]) {
+ /* npins != 0: linear range */
if (group_names) {
of_property_read_string_index(np,
group_names_propname,
@@ -1070,7 +1077,19 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
break;
}
}
- /* npins != 0: linear range */
+
+ /* Trim the range to fit this GPIO chip */
+ if (chip->offset > pinspec.args[0]) {
+ trim = chip->offset - pinspec.args[0];
+ pinspec.args[2] -= trim;
+ pinspec.args[1] += trim;
+ pinspec.args[0] = 0;
+ } else {
+ pinspec.args[0] -= chip->offset;
+ }
+ if ((pinspec.args[0] + pinspec.args[2]) > chip->ngpio)
+ pinspec.args[2] = chip->ngpio - pinspec.args[0];
+
ret = gpiochip_add_pin_range(chip,
pinctrl_dev_get_devname(pctldev),
pinspec.args[0],
diff --git a/drivers/gpio/gpiolib-swnode.c b/drivers/gpio/gpiolib-swnode.c
index fa52bdb1a29a..cec1ab878af8 100644
--- a/drivers/gpio/gpiolib-swnode.c
+++ b/drivers/gpio/gpiolib-swnode.c
@@ -4,8 +4,13 @@
*
* Copyright 2022 Google LLC
*/
+
+#define pr_fmt(fmt) "gpiolib: swnode: " fmt
+
#include <linux/err.h>
#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/property.h>
@@ -17,6 +22,8 @@
#include "gpiolib.h"
#include "gpiolib-swnode.h"
+#define GPIOLIB_SWNODE_UNDEFINED_NAME "swnode-gpio-undefined"
+
static void swnode_format_propname(const char *con_id, char *propname,
size_t max_size)
{
@@ -40,6 +47,14 @@ static struct gpio_device *swnode_get_gpio_device(struct fwnode_handle *fwnode)
if (!gdev_node || !gdev_node->name)
return ERR_PTR(-EINVAL);
+ /*
+ * Check for a special node that identifies undefined GPIOs, this is
+ * primarily used as a key for internal chip selects in SPI bindings.
+ */
+ if (IS_ENABLED(CONFIG_GPIO_SWNODE_UNDEFINED) &&
+ !strcmp(gdev_node->name, GPIOLIB_SWNODE_UNDEFINED_NAME))
+ return ERR_PTR(-ENOENT);
+
gdev = gpio_device_find_by_label(gdev_node->name);
return gdev ?: ERR_PTR(-EPROBE_DEFER);
}
@@ -121,3 +136,32 @@ int swnode_gpio_count(const struct fwnode_handle *fwnode, const char *con_id)
return count ?: -ENOENT;
}
+
+#if IS_ENABLED(CONFIG_GPIO_SWNODE_UNDEFINED)
+/*
+ * A special node that identifies undefined GPIOs, this is primarily used as
+ * a key for internal chip selects in SPI bindings.
+ */
+const struct software_node swnode_gpio_undefined = {
+ .name = GPIOLIB_SWNODE_UNDEFINED_NAME,
+};
+EXPORT_SYMBOL_NS_GPL(swnode_gpio_undefined, GPIO_SWNODE);
+
+static int __init swnode_gpio_init(void)
+{
+ int ret;
+
+ ret = software_node_register(&swnode_gpio_undefined);
+ if (ret < 0)
+ pr_err("failed to register swnode: %d\n", ret);
+
+ return ret;
+}
+subsys_initcall(swnode_gpio_init);
+
+static void __exit swnode_gpio_cleanup(void)
+{
+ software_node_unregister(&swnode_gpio_undefined);
+}
+__exitcall(swnode_gpio_cleanup);
+#endif
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 6853ecd98bcb..26202586fd39 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -412,7 +412,7 @@ static ssize_t base_show(struct device *dev,
{
const struct gpio_device *gdev = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%d\n", gdev->base);
+ return sysfs_emit(buf, "%u\n", gdev->base);
}
static DEVICE_ATTR_RO(base);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index ce94e37bcbee..fa62367ee929 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -101,6 +101,7 @@ static bool gpiolib_initialized;
const char *gpiod_get_label(struct gpio_desc *desc)
{
+ struct gpio_desc_label *label;
unsigned long flags;
flags = READ_ONCE(desc->flags);
@@ -108,23 +109,36 @@ const char *gpiod_get_label(struct gpio_desc *desc)
!test_bit(FLAG_REQUESTED, &flags))
return "interrupt";
- return test_bit(FLAG_REQUESTED, &flags) ?
- srcu_dereference(desc->label, &desc->srcu) : NULL;
+ if (!test_bit(FLAG_REQUESTED, &flags))
+ return NULL;
+
+ label = srcu_dereference_check(desc->label, &desc->gdev->desc_srcu,
+ srcu_read_lock_held(&desc->gdev->desc_srcu));
+
+ return label->str;
+}
+
+static void desc_free_label(struct rcu_head *rh)
+{
+ kfree(container_of(rh, struct gpio_desc_label, rh));
}
static int desc_set_label(struct gpio_desc *desc, const char *label)
{
- const char *new = NULL, *old;
+ struct gpio_desc_label *new = NULL, *old;
if (label) {
- new = kstrdup_const(label, GFP_KERNEL);
+ new = kzalloc(struct_size(new, str, strlen(label) + 1),
+ GFP_KERNEL);
if (!new)
return -ENOMEM;
+
+ strcpy(new->str, label);
}
old = rcu_replace_pointer(desc->label, new, 1);
- synchronize_srcu(&desc->srcu);
- kfree_const(old);
+ if (old)
+ call_srcu(&desc->gdev->desc_srcu, &old->rh, desc_free_label);
return 0;
}
@@ -150,9 +164,6 @@ struct gpio_desc *gpio_to_desc(unsigned gpio)
}
}
- if (!gpio_is_valid(gpio))
- pr_warn("invalid GPIO %d\n", gpio);
-
return NULL;
}
EXPORT_SYMBOL_GPL(gpio_to_desc);
@@ -297,10 +308,10 @@ struct gpio_chip *gpio_device_get_chip(struct gpio_device *gdev)
EXPORT_SYMBOL_GPL(gpio_device_get_chip);
/* dynamic allocation of GPIOs, e.g. on a hotplugged device */
-static int gpiochip_find_base_unlocked(int ngpio)
+static int gpiochip_find_base_unlocked(u16 ngpio)
{
+ unsigned int base = GPIO_DYNAMIC_BASE;
struct gpio_device *gdev;
- int base = GPIO_DYNAMIC_BASE;
list_for_each_entry_srcu(gdev, &gpio_devices, list,
lockdep_is_held(&gpio_devices_lock)) {
@@ -311,9 +322,11 @@ static int gpiochip_find_base_unlocked(int ngpio)
base = gdev->base + gdev->ngpio;
if (base < GPIO_DYNAMIC_BASE)
base = GPIO_DYNAMIC_BASE;
+ if (base > GPIO_DYNAMIC_MAX - ngpio)
+ break;
}
- if (gpio_is_valid(base)) {
+ if (base <= GPIO_DYNAMIC_MAX - ngpio) {
pr_debug("%s: found new base at %d\n", __func__, base);
return base;
} else {
@@ -365,7 +378,10 @@ int gpiod_get_direction(struct gpio_desc *desc)
if (ret < 0)
return ret;
- /* GPIOF_DIR_IN or other positive, otherwise GPIOF_DIR_OUT */
+ /*
+ * GPIO_LINE_DIRECTION_IN or other positive,
+ * otherwise GPIO_LINE_DIRECTION_OUT.
+ */
if (ret > 0)
ret = 1;
@@ -695,10 +711,10 @@ EXPORT_SYMBOL_GPL(gpiochip_line_is_valid);
static void gpiodev_release(struct device *dev)
{
struct gpio_device *gdev = to_gpio_device(dev);
- unsigned int i;
- for (i = 0; i < gdev->ngpio; i++)
- cleanup_srcu_struct(&gdev->descs[i].srcu);
+ /* Call pending kfree()s for descriptor labels. */
+ synchronize_srcu(&gdev->desc_srcu);
+ cleanup_srcu_struct(&gdev->desc_srcu);
ida_free(&gpio_ida, gdev->id);
kfree_const(gdev->label);
@@ -746,7 +762,7 @@ static int gpiochip_setup_dev(struct gpio_device *gdev)
if (ret)
goto err_remove_device;
- dev_dbg(&gdev->dev, "registered GPIOs %d to %d on %s\n", gdev->base,
+ dev_dbg(&gdev->dev, "registered GPIOs %u to %u on %s\n", gdev->base,
gdev->base + gdev->ngpio - 1, gdev->label);
return 0;
@@ -975,6 +991,10 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
if (ret)
goto err_remove_from_list;
+ ret = init_srcu_struct(&gdev->desc_srcu);
+ if (ret)
+ goto err_cleanup_gdev_srcu;
+
#ifdef CONFIG_PINCTRL
INIT_LIST_HEAD(&gdev->pin_ranges);
#endif
@@ -982,23 +1002,19 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
if (gc->names) {
ret = gpiochip_set_desc_names(gc);
if (ret)
- goto err_cleanup_gdev_srcu;
+ goto err_cleanup_desc_srcu;
}
ret = gpiochip_set_names(gc);
if (ret)
- goto err_cleanup_gdev_srcu;
+ goto err_cleanup_desc_srcu;
ret = gpiochip_init_valid_mask(gc);
if (ret)
- goto err_cleanup_gdev_srcu;
+ goto err_cleanup_desc_srcu;
for (desc_index = 0; desc_index < gc->ngpio; desc_index++) {
struct gpio_desc *desc = &gdev->descs[desc_index];
- ret = init_srcu_struct(&desc->srcu);
- if (ret)
- goto err_cleanup_desc_srcu;
-
if (gc->get_direction && gpiochip_line_is_valid(gc, desc_index)) {
assign_bit(FLAG_IS_OUT,
&desc->flags, !gc->get_direction(gc, desc_index));
@@ -1010,7 +1026,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
ret = of_gpiochip_add(gc);
if (ret)
- goto err_cleanup_desc_srcu;
+ goto err_free_valid_mask;
ret = gpiochip_add_pin_ranges(gc);
if (ret)
@@ -1057,10 +1073,10 @@ err_free_hogs:
gpiochip_remove_pin_ranges(gc);
err_remove_of_chip:
of_gpiochip_remove(gc);
-err_cleanup_desc_srcu:
- while (desc_index--)
- cleanup_srcu_struct(&gdev->descs[desc_index].srcu);
+err_free_valid_mask:
gpiochip_free_valid_mask(gc);
+err_cleanup_desc_srcu:
+ cleanup_srcu_struct(&gdev->desc_srcu);
err_cleanup_gdev_srcu:
cleanup_srcu_struct(&gdev->srcu);
err_remove_from_list:
@@ -1175,6 +1191,9 @@ struct gpio_device *gpio_device_find(const void *data,
list_for_each_entry_srcu(gdev, &gpio_devices, list,
srcu_read_lock_held(&gpio_devices_srcu)) {
+ if (!device_is_registered(&gdev->dev))
+ continue;
+
guard(srcu)(&gdev->srcu);
gc = srcu_dereference(gdev->chip, &gdev->srcu);
@@ -2387,7 +2406,7 @@ char *gpiochip_dup_line_label(struct gpio_chip *gc, unsigned int offset)
if (!test_bit(FLAG_REQUESTED, &desc->flags))
return NULL;
- guard(srcu)(&desc->srcu);
+ guard(srcu)(&desc->gdev->desc_srcu);
label = kstrdup(gpiod_get_label(desc), GFP_KERNEL);
if (!label)
@@ -2397,6 +2416,11 @@ char *gpiochip_dup_line_label(struct gpio_chip *gc, unsigned int offset)
}
EXPORT_SYMBOL_GPL(gpiochip_dup_line_label);
+static inline const char *function_name_or_default(const char *con_id)
+{
+ return con_id ?: "(default)";
+}
+
/**
* gpiochip_request_own_desc - Allow GPIO chip to request its own descriptor
* @gc: GPIO chip
@@ -2425,10 +2449,11 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
enum gpiod_flags dflags)
{
struct gpio_desc *desc = gpiochip_get_desc(gc, hwnum);
+ const char *name = function_name_or_default(label);
int ret;
if (IS_ERR(desc)) {
- chip_err(gc, "failed to get GPIO descriptor\n");
+ chip_err(gc, "failed to get GPIO %s descriptor\n", name);
return desc;
}
@@ -2438,8 +2463,8 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
ret = gpiod_configure_flags(desc, label, lflags, dflags);
if (ret) {
- chip_err(gc, "setup of own GPIO %s failed\n", label);
gpiod_free_commit(desc);
+ chip_err(gc, "setup of own GPIO %s failed\n", name);
return ERR_PTR(ret);
}
@@ -4153,19 +4178,17 @@ static struct gpio_desc *gpiod_find_by_fwnode(struct fwnode_handle *fwnode,
enum gpiod_flags *flags,
unsigned long *lookupflags)
{
+ const char *name = function_name_or_default(con_id);
struct gpio_desc *desc = ERR_PTR(-ENOENT);
if (is_of_node(fwnode)) {
- dev_dbg(consumer, "using DT '%pfw' for '%s' GPIO lookup\n",
- fwnode, con_id);
+ dev_dbg(consumer, "using DT '%pfw' for '%s' GPIO lookup\n", fwnode, name);
desc = of_find_gpio(to_of_node(fwnode), con_id, idx, lookupflags);
} else if (is_acpi_node(fwnode)) {
- dev_dbg(consumer, "using ACPI '%pfw' for '%s' GPIO lookup\n",
- fwnode, con_id);
+ dev_dbg(consumer, "using ACPI '%pfw' for '%s' GPIO lookup\n", fwnode, name);
desc = acpi_find_gpio(fwnode, con_id, idx, flags, lookupflags);
} else if (is_software_node(fwnode)) {
- dev_dbg(consumer, "using swnode '%pfw' for '%s' GPIO lookup\n",
- fwnode, con_id);
+ dev_dbg(consumer, "using swnode '%pfw' for '%s' GPIO lookup\n", fwnode, name);
desc = swnode_find_gpio(fwnode, con_id, idx, lookupflags);
}
@@ -4181,6 +4204,7 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
bool platform_lookup_allowed)
{
unsigned long lookupflags = GPIO_LOOKUP_FLAGS_DEFAULT;
+ const char *name = function_name_or_default(con_id);
/*
* scoped_guard() is implemented as a for loop, meaning static
* analyzers will complain about these two not being initialized.
@@ -4203,8 +4227,7 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
}
if (IS_ERR(desc)) {
- dev_dbg(consumer, "No GPIO consumer %s found\n",
- con_id);
+ dev_dbg(consumer, "No GPIO consumer %s found\n", name);
return desc;
}
@@ -4226,15 +4249,14 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
*
* FIXME: Make this more sane and safe.
*/
- dev_info(consumer,
- "nonexclusive access to GPIO for %s\n", con_id);
+ dev_info(consumer, "nonexclusive access to GPIO for %s\n", name);
return desc;
}
ret = gpiod_configure_flags(desc, con_id, lookupflags, flags);
if (ret < 0) {
- dev_dbg(consumer, "setup of GPIO %s failed\n", con_id);
gpiod_put(desc);
+ dev_err(consumer, "setup of GPIO %s failed: %d\n", name, ret);
return ERR_PTR(ret);
}
@@ -4350,6 +4372,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_optional);
int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
unsigned long lflags, enum gpiod_flags dflags)
{
+ const char *name = function_name_or_default(con_id);
int ret;
if (lflags & GPIO_ACTIVE_LOW)
@@ -4393,7 +4416,7 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
/* No particular flag request, return here... */
if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
- gpiod_dbg(desc, "no flags found for %s\n", con_id);
+ gpiod_dbg(desc, "no flags found for GPIO %s\n", name);
return 0;
}
@@ -4774,21 +4797,21 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
}
for_each_gpio_desc(gc, desc) {
- guard(srcu)(&desc->srcu);
+ guard(srcu)(&desc->gdev->desc_srcu);
if (test_bit(FLAG_REQUESTED, &desc->flags)) {
gpiod_get_direction(desc);
is_out = test_bit(FLAG_IS_OUT, &desc->flags);
value = gpio_chip_get_value(gc, desc);
is_irq = test_bit(FLAG_USED_AS_IRQ, &desc->flags);
active_low = test_bit(FLAG_ACTIVE_LOW, &desc->flags);
- seq_printf(s, " gpio-%-3d (%-20.20s|%-20.20s) %s %s %s%s\n",
+ seq_printf(s, " gpio-%-3u (%-20.20s|%-20.20s) %s %s %s%s\n",
gpio, desc->name ?: "", gpiod_get_label(desc),
is_out ? "out" : "in ",
value >= 0 ? (value ? "hi" : "lo") : "? ",
is_irq ? "IRQ " : "",
active_low ? "ACTIVE LOW" : "");
} else if (desc->name) {
- seq_printf(s, " gpio-%-3d (%-20.20s)\n", gpio, desc->name);
+ seq_printf(s, " gpio-%-3u (%-20.20s)\n", gpio, desc->name);
}
gpio++;
@@ -4860,7 +4883,7 @@ static int gpiolib_seq_show(struct seq_file *s, void *v)
return 0;
}
- seq_printf(s, "%s%s: GPIOs %d-%d", priv->newline ? "\n" : "",
+ seq_printf(s, "%s%s: GPIOs %u-%u", priv->newline ? "\n" : "",
dev_name(&gdev->dev),
gdev->base, gdev->base + gdev->ngpio - 1);
parent = gc->parent;
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index f67d5991ab1c..48e086c2f416 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -31,6 +31,7 @@
* @chip: pointer to the corresponding gpiochip, holding static
* data for this device
* @descs: array of ngpio descriptors.
+ * @desc_srcu: ensures consistent state of GPIO descriptors exposed to users
* @ngpio: the number of GPIO lines on this GPIO device, equal to the size
* of the @descs array.
* @can_sleep: indicate whether the GPIO chip driver's callbacks can sleep
@@ -61,7 +62,8 @@ struct gpio_device {
struct module *owner;
struct gpio_chip __rcu *chip;
struct gpio_desc *descs;
- int base;
+ struct srcu_struct desc_srcu;
+ unsigned int base;
u16 ngpio;
bool can_sleep;
const char *label;
@@ -137,6 +139,11 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
void gpiod_line_state_notify(struct gpio_desc *desc, unsigned long action);
+struct gpio_desc_label {
+ struct rcu_head rh;
+ char str[];
+};
+
/**
* struct gpio_desc - Opaque descriptor for a GPIO
*
@@ -145,7 +152,6 @@ void gpiod_line_state_notify(struct gpio_desc *desc, unsigned long action);
* @label: Name of the consumer
* @name: Line name
* @hog: Pointer to the device node that hogs this line (if any)
- * @srcu: SRCU struct protecting the label pointer.
*
* These are obtained using gpiod_get() and are preferable to the old
* integer-based handles.
@@ -177,13 +183,12 @@ struct gpio_desc {
#define FLAG_EVENT_CLOCK_HTE 19 /* GPIO CDEV reports hardware timestamps in events */
/* Connection label */
- const char __rcu *label;
+ struct gpio_desc_label __rcu *label;
/* Name of the GPIO */
const char *name;
#ifdef CONFIG_OF_DYNAMIC
struct device_node *hog;
#endif
- struct srcu_struct srcu;
};
#define gpiod_not_found(desc) (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT)
@@ -251,7 +256,7 @@ static inline int gpio_chip_hwgpio(const struct gpio_desc *desc)
#define gpiod_err(desc, fmt, ...) \
do { \
- scoped_guard(srcu, &desc->srcu) { \
+ scoped_guard(srcu, &desc->gdev->desc_srcu) { \
pr_err("gpio-%d (%s): " fmt, desc_to_gpio(desc), \
gpiod_get_label(desc) ? : "?", ##__VA_ARGS__); \
} \
@@ -259,7 +264,7 @@ do { \
#define gpiod_warn(desc, fmt, ...) \
do { \
- scoped_guard(srcu, &desc->srcu) { \
+ scoped_guard(srcu, &desc->gdev->desc_srcu) { \
pr_warn("gpio-%d (%s): " fmt, desc_to_gpio(desc), \
gpiod_get_label(desc) ? : "?", ##__VA_ARGS__); \
} \
@@ -267,7 +272,7 @@ do { \
#define gpiod_dbg(desc, fmt, ...) \
do { \
- scoped_guard(srcu, &desc->srcu) { \
+ scoped_guard(srcu, &desc->gdev->desc_srcu) { \
pr_debug("gpio-%d (%s): " fmt, desc_to_gpio(desc), \
gpiod_get_label(desc) ? : "?", ##__VA_ARGS__); \
} \
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 5a0c476361c3..026444eeb5c6 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -29,6 +29,8 @@ menuconfig DRM
details. You should also select and configure AGP
(/dev/agpgart) support if it is available for your platform.
+if DRM
+
config DRM_MIPI_DBI
tristate
depends on DRM
@@ -102,6 +104,38 @@ config DRM_KMS_HELPER
help
CRTC helpers for KMS drivers.
+config DRM_PANIC
+ bool "Display a user-friendly message when a kernel panic occurs"
+ depends on DRM && !FRAMEBUFFER_CONSOLE
+ select DRM_KMS_HELPER
+ select FONT_SUPPORT
+ help
+ Enable a drm panic handler, which will display a user-friendly message
+ when a kernel panic occurs. It's useful when using a user-space
+ console instead of fbcon.
+ It will only work if your graphic driver supports this feature.
+ To support Hi-DPI Display, you can enable bigger fonts like
+ FONT_TER16x32
+
+config DRM_PANIC_FOREGROUND_COLOR
+ hex "Drm panic screen foreground color, in RGB"
+ depends on DRM_PANIC
+ default 0xffffff
+
+config DRM_PANIC_BACKGROUND_COLOR
+ hex "Drm panic screen background color, in RGB"
+ depends on DRM_PANIC
+ default 0x000000
+
+config DRM_PANIC_DEBUG
+ bool "Add a debug fs entry to trigger drm_panic"
+ depends on DRM_PANIC && DEBUG_FS
+ help
+ Add dri/[device]/drm_panic_plane_x in the kernel debugfs, to force the
+ panic handler to write the panic message to this plane scanout buffer.
+ This is unsafe and should not be enabled on a production build.
+ If in doubt, say "N".
+
config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
bool "Enable refcount backtrace history in the DP MST helpers"
depends on STACKTRACE_SUPPORT
@@ -371,6 +405,8 @@ source "drivers/gpu/drm/lima/Kconfig"
source "drivers/gpu/drm/panfrost/Kconfig"
+source "drivers/gpu/drm/panthor/Kconfig"
+
source "drivers/gpu/drm/aspeed/Kconfig"
source "drivers/gpu/drm/mcde/Kconfig"
@@ -403,10 +439,6 @@ config DRM_HYPERV
config DRM_EXPORT_FOR_TESTS
bool
-# Separate option because drm_panel_orientation_quirks.c is shared with fbdev
-config DRM_PANEL_ORIENTATION_QUIRKS
- tristate
-
config DRM_LIB_RANDOM
bool
default n
@@ -414,3 +446,22 @@ config DRM_LIB_RANDOM
config DRM_PRIVACY_SCREEN
bool
default n
+
+config DRM_WERROR
+ bool "Compile the drm subsystem with warnings as errors"
+ depends on DRM && EXPERT
+ default n
+ help
+ A kernel build should not cause any compiler warnings, and this
+ enables the '-Werror' flag to enforce that rule in the drm subsystem.
+
+ The drm subsystem enables more warnings than the kernel default, so
+ this config option is disabled by default.
+
+ If in doubt, say N.
+
+endif
+
+# Separate option because drm_panel_orientation_quirks.c is shared with fbdev
+config DRM_PANEL_ORIENTATION_QUIRKS
+ tristate
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 104b42df2e95..f9ca4f8fa6c5 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -5,6 +5,34 @@
CFLAGS-$(CONFIG_DRM_USE_DYNAMIC_DEBUG) += -DDYNAMIC_DEBUG_MODULE
+# Unconditionally enable W=1 warnings locally
+# --- begin copy-paste W=1 warnings from scripts/Makefile.extrawarn
+subdir-ccflags-y += -Wextra -Wunused -Wno-unused-parameter
+subdir-ccflags-y += $(call cc-option, -Wrestrict)
+subdir-ccflags-y += -Wmissing-format-attribute
+subdir-ccflags-y += -Wold-style-definition
+subdir-ccflags-y += -Wmissing-include-dirs
+subdir-ccflags-y += $(call cc-option, -Wunused-but-set-variable)
+subdir-ccflags-y += $(call cc-option, -Wunused-const-variable)
+subdir-ccflags-y += $(call cc-option, -Wpacked-not-aligned)
+subdir-ccflags-y += $(call cc-option, -Wformat-overflow)
+# FIXME: fix -Wformat-truncation warnings and uncomment
+#subdir-ccflags-y += $(call cc-option, -Wformat-truncation)
+subdir-ccflags-y += $(call cc-option, -Wstringop-truncation)
+# The following turn off the warnings enabled by -Wextra
+ifeq ($(findstring 2, $(KBUILD_EXTRA_WARN)),)
+subdir-ccflags-y += -Wno-missing-field-initializers
+subdir-ccflags-y += -Wno-type-limits
+subdir-ccflags-y += -Wno-shift-negative-value
+endif
+ifeq ($(findstring 3, $(KBUILD_EXTRA_WARN)),)
+subdir-ccflags-y += -Wno-sign-compare
+endif
+# --- end copy-paste
+
+# Enable -Werror in CI and development
+subdir-ccflags-$(CONFIG_DRM_WERROR) += -Werror
+
drm-y := \
drm_aperture.o \
drm_atomic.o \
@@ -60,6 +88,7 @@ drm-$(CONFIG_DRM_PRIVACY_SCREEN) += \
drm_privacy_screen.o \
drm_privacy_screen_x86.o
drm-$(CONFIG_DRM_ACCEL) += ../../accel/drm_accel.o
+drm-$(CONFIG_DRM_PANIC) += drm_panic.o
obj-$(CONFIG_DRM) += drm.o
obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
@@ -179,6 +208,7 @@ obj-$(CONFIG_DRM_XEN) += xen/
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
obj-$(CONFIG_DRM_LIMA) += lima/
obj-$(CONFIG_DRM_PANFROST) += panfrost/
+obj-$(CONFIG_DRM_PANTHOR) += panthor/
obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/
obj-$(CONFIG_DRM_MCDE) += mcde/
obj-$(CONFIG_DRM_TIDSS) += tidss/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 4536c8ad0e11..078f588e99eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -23,7 +23,7 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-FULL_AMD_PATH=$(srctree)/$(src)/..
+FULL_AMD_PATH=$(src)/..
DISPLAY_FOLDER_NAME=display
FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME)
@@ -70,7 +70,8 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \
amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o \
atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
- amdgpu_dma_buf.o amdgpu_vm.o amdgpu_vm_pt.o amdgpu_ib.o amdgpu_pll.o \
+ amdgpu_dma_buf.o amdgpu_vm.o amdgpu_vm_pt.o amdgpu_vm_tlb_fence.o \
+ amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_preempt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o \
amdgpu_atomfirmware.o amdgpu_vf_error.o amdgpu_sched.o \
@@ -80,7 +81,7 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
amdgpu_fw_attestation.o amdgpu_securedisplay.o \
amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \
- amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o
+ amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o amdgpu_dev_coredump.o
amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
@@ -247,7 +248,8 @@ amdgpu-y += \
smuio_v11_0_6.o \
smuio_v13_0.o \
smuio_v13_0_3.o \
- smuio_v13_0_6.o
+ smuio_v13_0_6.o \
+ smuio_v14_0_2.o
# add reset block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index 576067d66bb9..d0a8da67dc2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -97,7 +97,7 @@ static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = false;
}
- return r;
+ return 0;
}
static int
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 9c62552bec34..f87d53e183c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -139,6 +139,14 @@ enum amdgpu_ss {
AMDGPU_SS_DRV_UNLOAD
};
+struct amdgpu_hwip_reg_entry {
+ u32 hwip;
+ u32 inst;
+ u32 seg;
+ u32 reg_offset;
+ const char *reg_name;
+};
+
struct amdgpu_watchdog_timer {
bool timeout_fatal_disable;
uint32_t period; /* maxCycles = (1 << period), the number of cycles before a timeout */
@@ -210,6 +218,7 @@ extern int amdgpu_async_gfx_ring;
extern int amdgpu_mcbp;
extern int amdgpu_discovery;
extern int amdgpu_mes;
+extern int amdgpu_mes_log_enable;
extern int amdgpu_mes_kiq;
extern int amdgpu_noretry;
extern int amdgpu_force_asic_type;
@@ -493,6 +502,7 @@ struct amdgpu_wb {
uint64_t gpu_addr;
u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */
unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
+ spinlock_t lock;
};
int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb);
@@ -605,7 +615,7 @@ struct amdgpu_asic_funcs {
/* PCIe replay counter */
uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev);
/* device supports BACO */
- bool (*supports_baco)(struct amdgpu_device *adev);
+ int (*supports_baco)(struct amdgpu_device *adev);
/* pre asic_init quirks */
void (*pre_asic_init)(struct amdgpu_device *adev);
/* enter/exit umd stable pstate */
@@ -1407,7 +1417,8 @@ bool amdgpu_device_supports_atpx(struct drm_device *dev);
bool amdgpu_device_supports_px(struct drm_device *dev);
bool amdgpu_device_supports_boco(struct drm_device *dev);
bool amdgpu_device_supports_smart_shift(struct drm_device *dev);
-bool amdgpu_device_supports_baco(struct drm_device *dev);
+int amdgpu_device_supports_baco(struct drm_device *dev);
+void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev);
bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
int amdgpu_device_baco_enter(struct drm_device *dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
index 493982f94649..c50202215f6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -28,7 +28,7 @@
#define ACA_BANK_HWID(type, hwid, mcatype) [ACA_HWIP_TYPE_##type] = {hwid, mcatype}
-typedef int bank_handler_t(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type, void *data);
+typedef int bank_handler_t(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, void *data);
struct aca_banks {
int nr_banks;
@@ -86,7 +86,7 @@ static void aca_banks_release(struct aca_banks *banks)
}
}
-static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev, enum aca_error_type type, u32 *count)
+static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev, enum aca_smu_type type, u32 *count)
{
struct amdgpu_aca *aca = &adev->aca;
const struct aca_smu_funcs *smu_funcs = aca->smu_funcs;
@@ -116,20 +116,22 @@ static struct aca_regs_dump {
{"CONTROL_MASK", ACA_REG_IDX_CTL_MASK},
};
-static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, struct aca_bank *bank)
+static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, struct aca_bank *bank,
+ struct ras_query_context *qctx)
{
+ u64 event_id = qctx ? qctx->event_id : 0ULL;
int i;
- dev_info(adev->dev, HW_ERR "Accelerator Check Architecture events logged\n");
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "Accelerator Check Architecture events logged\n");
/* plus 1 for output format, e.g: ACA[08/08]: xxxx */
for (i = 0; i < ARRAY_SIZE(aca_regs); i++)
- dev_info(adev->dev, HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n",
- idx + 1, total, aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]);
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n",
+ idx + 1, total, aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]);
}
-static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_error_type type,
+static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_type type,
int start, int count,
- struct aca_banks *banks)
+ struct aca_banks *banks, struct ras_query_context *qctx)
{
struct amdgpu_aca *aca = &adev->aca;
const struct aca_smu_funcs *smu_funcs = aca->smu_funcs;
@@ -143,13 +145,12 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_erro
return -EOPNOTSUPP;
switch (type) {
- case ACA_ERROR_TYPE_UE:
+ case ACA_SMU_TYPE_UE:
max_count = smu_funcs->max_ue_bank_count;
break;
- case ACA_ERROR_TYPE_CE:
+ case ACA_SMU_TYPE_CE:
max_count = smu_funcs->max_ce_bank_count;
break;
- case ACA_ERROR_TYPE_DEFERRED:
default:
return -EINVAL;
}
@@ -164,7 +165,9 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_erro
if (ret)
return ret;
- aca_smu_bank_dump(adev, i, count, &bank);
+ bank.type = type;
+
+ aca_smu_bank_dump(adev, i, count, &bank, qctx);
ret = aca_banks_add_bank(banks, &bank);
if (ret)
@@ -195,7 +198,7 @@ static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type t
return hwip->hwid == hwid && hwip->mcatype == mcatype;
}
-static bool aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type)
+static bool aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type)
{
const struct aca_bank_ops *bank_ops = handle->bank_ops;
@@ -273,59 +276,49 @@ static struct aca_bank_error *get_bank_error(struct aca_error *aerr, struct aca_
return new_bank_error(aerr, info);
}
-static int aca_log_errors(struct aca_handle *handle, enum aca_error_type type,
- struct aca_bank_report *report)
+int aca_error_cache_log_bank_error(struct aca_handle *handle, struct aca_bank_info *info,
+ enum aca_error_type type, u64 count)
{
struct aca_error_cache *error_cache = &handle->error_cache;
struct aca_bank_error *bank_error;
struct aca_error *aerr;
- if (!handle || !report)
+ if (!handle || !info || type >= ACA_ERROR_TYPE_COUNT)
return -EINVAL;
- if (!report->count[type])
+ if (!count)
return 0;
aerr = &error_cache->errors[type];
- bank_error = get_bank_error(aerr, &report->info);
+ bank_error = get_bank_error(aerr, info);
if (!bank_error)
return -ENOMEM;
- bank_error->count[type] += report->count[type];
+ bank_error->count += count;
return 0;
}
-static int aca_generate_bank_report(struct aca_handle *handle, struct aca_bank *bank,
- enum aca_error_type type, struct aca_bank_report *report)
+static int aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type)
{
const struct aca_bank_ops *bank_ops = handle->bank_ops;
- if (!bank || !report)
+ if (!bank)
return -EINVAL;
- if (!bank_ops->aca_bank_generate_report)
+ if (!bank_ops->aca_bank_parser)
return -EOPNOTSUPP;
- memset(report, 0, sizeof(*report));
- return bank_ops->aca_bank_generate_report(handle, bank, type,
- report, handle->data);
+ return bank_ops->aca_bank_parser(handle, bank, type,
+ handle->data);
}
static int handler_aca_log_bank_error(struct aca_handle *handle, struct aca_bank *bank,
- enum aca_error_type type, void *data)
+ enum aca_smu_type type, void *data)
{
- struct aca_bank_report report;
int ret;
- ret = aca_generate_bank_report(handle, bank, type, &report);
- if (ret)
- return ret;
-
- if (!report.count[type])
- return 0;
-
- ret = aca_log_errors(handle, type, &report);
+ ret = aca_bank_parser(handle, bank, type);
if (ret)
return ret;
@@ -333,7 +326,7 @@ static int handler_aca_log_bank_error(struct aca_handle *handle, struct aca_bank
}
static int aca_dispatch_bank(struct aca_handle_manager *mgr, struct aca_bank *bank,
- enum aca_error_type type, bank_handler_t handler, void *data)
+ enum aca_smu_type type, bank_handler_t handler, void *data)
{
struct aca_handle *handle;
int ret;
@@ -354,7 +347,7 @@ static int aca_dispatch_bank(struct aca_handle_manager *mgr, struct aca_bank *ba
}
static int aca_dispatch_banks(struct aca_handle_manager *mgr, struct aca_banks *banks,
- enum aca_error_type type, bank_handler_t handler, void *data)
+ enum aca_smu_type type, bank_handler_t handler, void *data)
{
struct aca_bank_node *node;
struct aca_bank *bank;
@@ -378,8 +371,28 @@ static int aca_dispatch_banks(struct aca_handle_manager *mgr, struct aca_banks *
return 0;
}
-static int aca_banks_update(struct amdgpu_device *adev, enum aca_error_type type,
- bank_handler_t handler, void *data)
+static bool aca_bank_should_update(struct amdgpu_device *adev, enum aca_smu_type type)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+ bool ret = true;
+
+ /*
+ * Because the UE Valid MCA count will only be cleared after reset,
+ * in order to avoid repeated counting of the error count,
+ * the aca bank is only updated once during the gpu recovery stage.
+ */
+ if (type == ACA_SMU_TYPE_UE) {
+ if (amdgpu_ras_intr_triggered())
+ ret = atomic_cmpxchg(&aca->ue_update_flag, 0, 1) == 0;
+ else
+ atomic_set(&aca->ue_update_flag, 0);
+ }
+
+ return ret;
+}
+
+static int aca_banks_update(struct amdgpu_device *adev, enum aca_smu_type type,
+ bank_handler_t handler, struct ras_query_context *qctx, void *data)
{
struct amdgpu_aca *aca = &adev->aca;
struct aca_banks banks;
@@ -389,9 +402,8 @@ static int aca_banks_update(struct amdgpu_device *adev, enum aca_error_type type
if (list_empty(&aca->mgr.list))
return 0;
- /* NOTE: pmfw is only support UE and CE */
- if (type == ACA_ERROR_TYPE_DEFERRED)
- type = ACA_ERROR_TYPE_CE;
+ if (!aca_bank_should_update(adev, type))
+ return 0;
ret = aca_smu_get_valid_aca_count(adev, type, &count);
if (ret)
@@ -402,7 +414,7 @@ static int aca_banks_update(struct amdgpu_device *adev, enum aca_error_type type
aca_banks_init(&banks);
- ret = aca_smu_get_valid_aca_banks(adev, type, 0, count, &banks);
+ ret = aca_smu_get_valid_aca_banks(adev, type, 0, count, &banks, qctx);
if (ret)
goto err_release_banks;
@@ -431,7 +443,7 @@ static int aca_log_aca_error_data(struct aca_bank_error *bank_error, enum aca_er
if (type >= ACA_ERROR_TYPE_COUNT)
return -EINVAL;
- count = bank_error->count[type];
+ count = bank_error->count;
if (!count)
return 0;
@@ -447,6 +459,8 @@ static int aca_log_aca_error_data(struct aca_bank_error *bank_error, enum aca_er
amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, count);
break;
case ACA_ERROR_TYPE_DEFERRED:
+ amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, NULL, count);
+ break;
default:
break;
}
@@ -477,12 +491,25 @@ out_unlock:
}
static int __aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle, enum aca_error_type type,
- struct ras_err_data *err_data)
+ struct ras_err_data *err_data, struct ras_query_context *qctx)
{
+ enum aca_smu_type smu_type;
int ret;
+ switch (type) {
+ case ACA_ERROR_TYPE_UE:
+ smu_type = ACA_SMU_TYPE_UE;
+ break;
+ case ACA_ERROR_TYPE_CE:
+ case ACA_ERROR_TYPE_DEFERRED:
+ smu_type = ACA_SMU_TYPE_CE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
/* udpate aca bank to aca source error_cache first */
- ret = aca_banks_update(adev, type, handler_aca_log_bank_error, NULL);
+ ret = aca_banks_update(adev, smu_type, handler_aca_log_bank_error, qctx, NULL);
if (ret)
return ret;
@@ -498,10 +525,9 @@ static bool aca_handle_is_valid(struct aca_handle *handle)
}
int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle,
- enum aca_error_type type, void *data)
+ enum aca_error_type type, struct ras_err_data *err_data,
+ struct ras_query_context *qctx)
{
- struct ras_err_data *err_data = (struct ras_err_data *)data;
-
if (!handle || !err_data)
return -EINVAL;
@@ -511,7 +537,7 @@ int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *han
if (!(BIT(type) & handle->mask))
return 0;
- return __aca_get_error_data(adev, handle, type, err_data);
+ return __aca_get_error_data(adev, handle, type, err_data, qctx);
}
static void aca_error_init(struct aca_error *aerr, enum aca_error_type type)
@@ -668,6 +694,8 @@ int amdgpu_aca_init(struct amdgpu_device *adev)
struct amdgpu_aca *aca = &adev->aca;
int ret;
+ atomic_set(&aca->ue_update_flag, 0);
+
ret = aca_manager_init(&aca->mgr);
if (ret)
return ret;
@@ -680,6 +708,8 @@ void amdgpu_aca_fini(struct amdgpu_device *adev)
struct amdgpu_aca *aca = &adev->aca;
aca_manager_fini(&aca->mgr);
+
+ atomic_set(&aca->ue_update_flag, 0);
}
int amdgpu_aca_reset(struct amdgpu_device *adev)
@@ -723,23 +753,13 @@ int aca_bank_info_decode(struct aca_bank *bank, struct aca_bank_info *info)
static int aca_bank_get_error_code(struct amdgpu_device *adev, struct aca_bank *bank)
{
- int error_code;
-
- switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
- case IP_VERSION(13, 0, 6):
- if (!(adev->flags & AMD_IS_APU) && adev->pm.fw_version >= 0x00555600) {
- error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]);
- return error_code & 0xff;
- }
- break;
- default:
- break;
- }
+ struct amdgpu_aca *aca = &adev->aca;
+ const struct aca_smu_funcs *smu_funcs = aca->smu_funcs;
- /* NOTE: the true error code is encoded in status.errorcode[0:7] */
- error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]);
+ if (!smu_funcs || !smu_funcs->parse_error_code)
+ return -EOPNOTSUPP;
- return error_code & 0xff;
+ return smu_funcs->parse_error_code(adev, bank);
}
int aca_bank_check_error_codes(struct amdgpu_device *adev, struct aca_bank *bank, int *err_codes, int size)
@@ -750,6 +770,9 @@ int aca_bank_check_error_codes(struct amdgpu_device *adev, struct aca_bank *bank
return -EINVAL;
error_code = aca_bank_get_error_code(adev, bank);
+ if (error_code < 0)
+ return error_code;
+
for (i = 0; i < size; i++) {
if (err_codes[i] == error_code)
return 0;
@@ -784,7 +807,7 @@ static int amdgpu_aca_smu_debug_mode_set(void *data, u64 val)
return 0;
}
-static void aca_dump_entry(struct seq_file *m, struct aca_bank *bank, enum aca_error_type type, int idx)
+static void aca_dump_entry(struct seq_file *m, struct aca_bank *bank, enum aca_smu_type type, int idx)
{
struct aca_bank_info info;
int i, ret;
@@ -793,7 +816,7 @@ static void aca_dump_entry(struct seq_file *m, struct aca_bank *bank, enum aca_e
if (ret)
return;
- seq_printf(m, "aca entry[%d].type: %s\n", idx, type == ACA_ERROR_TYPE_UE ? "UE" : "CE");
+ seq_printf(m, "aca entry[%d].type: %s\n", idx, type == ACA_SMU_TYPE_UE ? "UE" : "CE");
seq_printf(m, "aca entry[%d].info: socketid:%d aid:%d hwid:0x%03x mcatype:0x%04x\n",
idx, info.socket_id, info.die_id, info.hwid, info.mcatype);
@@ -807,7 +830,7 @@ struct aca_dump_context {
};
static int handler_aca_bank_dump(struct aca_handle *handle, struct aca_bank *bank,
- enum aca_error_type type, void *data)
+ enum aca_smu_type type, void *data)
{
struct aca_dump_context *ctx = (struct aca_dump_context *)data;
@@ -816,7 +839,7 @@ static int handler_aca_bank_dump(struct aca_handle *handle, struct aca_bank *ban
return handler_aca_log_bank_error(handle, bank, type, NULL);
}
-static int aca_dump_show(struct seq_file *m, enum aca_error_type type)
+static int aca_dump_show(struct seq_file *m, enum aca_smu_type type)
{
struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
struct aca_dump_context context = {
@@ -824,12 +847,12 @@ static int aca_dump_show(struct seq_file *m, enum aca_error_type type)
.idx = 0,
};
- return aca_banks_update(adev, type, handler_aca_bank_dump, (void *)&context);
+ return aca_banks_update(adev, type, handler_aca_bank_dump, NULL, (void *)&context);
}
static int aca_dump_ce_show(struct seq_file *m, void *unused)
{
- return aca_dump_show(m, ACA_ERROR_TYPE_CE);
+ return aca_dump_show(m, ACA_SMU_TYPE_CE);
}
static int aca_dump_ce_open(struct inode *inode, struct file *file)
@@ -847,7 +870,7 @@ static const struct file_operations aca_ce_dump_debug_fops = {
static int aca_dump_ue_show(struct seq_file *m, void *unused)
{
- return aca_dump_show(m, ACA_ERROR_TYPE_UE);
+ return aca_dump_show(m, ACA_SMU_TYPE_UE);
}
static int aca_dump_ue_open(struct inode *inode, struct file *file)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
index 2da50e095883..5ef6b745f222 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
@@ -26,6 +26,9 @@
#include <linux/list.h>
+struct ras_err_data;
+struct ras_query_context;
+
#define ACA_MAX_REGS_COUNT (16)
#define ACA_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> l)
@@ -99,7 +102,14 @@ enum aca_error_type {
ACA_ERROR_TYPE_COUNT
};
+enum aca_smu_type {
+ ACA_SMU_TYPE_UE = 0,
+ ACA_SMU_TYPE_CE,
+ ACA_SMU_TYPE_COUNT,
+};
+
struct aca_bank {
+ enum aca_smu_type type;
u64 regs[ACA_MAX_REGS_COUNT];
};
@@ -115,15 +125,10 @@ struct aca_bank_info {
int mcatype;
};
-struct aca_bank_report {
- struct aca_bank_info info;
- u64 count[ACA_ERROR_TYPE_COUNT];
-};
-
struct aca_bank_error {
struct list_head node;
struct aca_bank_info info;
- u64 count[ACA_ERROR_TYPE_COUNT];
+ u64 count;
};
struct aca_error {
@@ -157,9 +162,8 @@ struct aca_handle {
};
struct aca_bank_ops {
- int (*aca_bank_generate_report)(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type,
- struct aca_bank_report *report, void *data);
- bool (*aca_bank_is_valid)(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type,
+ int (*aca_bank_parser)(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, void *data);
+ bool (*aca_bank_is_valid)(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type,
void *data);
};
@@ -167,13 +171,15 @@ struct aca_smu_funcs {
int max_ue_bank_count;
int max_ce_bank_count;
int (*set_debug_mode)(struct amdgpu_device *adev, bool enable);
- int (*get_valid_aca_count)(struct amdgpu_device *adev, enum aca_error_type type, u32 *count);
- int (*get_valid_aca_bank)(struct amdgpu_device *adev, enum aca_error_type type, int idx, struct aca_bank *bank);
+ int (*get_valid_aca_count)(struct amdgpu_device *adev, enum aca_smu_type type, u32 *count);
+ int (*get_valid_aca_bank)(struct amdgpu_device *adev, enum aca_smu_type type, int idx, struct aca_bank *bank);
+ int (*parse_error_code)(struct amdgpu_device *adev, struct aca_bank *bank);
};
struct amdgpu_aca {
struct aca_handle_manager mgr;
const struct aca_smu_funcs *smu_funcs;
+ atomic_t ue_update_flag;
bool is_enabled;
};
@@ -196,7 +202,10 @@ int amdgpu_aca_add_handle(struct amdgpu_device *adev, struct aca_handle *handle,
const char *name, const struct aca_info *aca_info, void *data);
void amdgpu_aca_remove_handle(struct aca_handle *handle);
int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle,
- enum aca_error_type type, void *data);
+ enum aca_error_type type, struct ras_err_data *err_data,
+ struct ras_query_context *qctx);
int amdgpu_aca_smu_set_debug_mode(struct amdgpu_device *adev, bool en);
void amdgpu_aca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root);
+int aca_error_cache_log_bank_error(struct aca_handle *handle, struct aca_bank_info *info,
+ enum aca_error_type type, u64 count);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 6d72355ac492..bf6c4a0d0525 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -637,6 +637,8 @@ static const struct amd_ip_funcs acp_ip_funcs = {
.soft_reset = acp_soft_reset,
.set_clockgating_state = acp_set_clockgating_state,
.set_powergating_state = acp_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version acp_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 35dd6effa9a3..7ba05f030dd1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -747,10 +747,17 @@ bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev)
return amdgpu_ras_get_fed_status(adev);
}
+void amdgpu_amdkfd_ras_pasid_poison_consumption_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint16_t pasid,
+ pasid_notify pasid_fn, void *data, uint32_t reset)
+{
+ amdgpu_umc_pasid_poison_handler(adev, block, pasid, pasid_fn, data, reset);
+}
+
void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
- enum amdgpu_ras_block block, bool reset)
+ enum amdgpu_ras_block block, uint32_t reset)
{
- amdgpu_umc_poison_handler(adev, block, reset);
+ amdgpu_umc_pasid_poison_handler(adev, block, 0, NULL, NULL, reset);
}
int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
@@ -769,12 +776,20 @@ int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
return 0;
}
-bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev)
+bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev,
+ int hub_inst, int hub_type)
{
- if (adev->gfx.ras && adev->gfx.ras->query_utcl2_poison_status)
- return adev->gfx.ras->query_utcl2_poison_status(adev);
- else
- return false;
+ if (!hub_type) {
+ if (adev->gfxhub.funcs->query_utcl2_poison_status)
+ return adev->gfxhub.funcs->query_utcl2_poison_status(adev, hub_inst);
+ else
+ return false;
+ } else {
+ if (adev->mmhub.funcs->query_utcl2_poison_status)
+ return adev->mmhub.funcs->query_utcl2_poison_status(adev, hub_inst);
+ else
+ return false;
+ }
}
int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 0ef223c2affb..1de021ebdd46 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -336,12 +336,18 @@ void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev);
int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
struct tile_config *config);
void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
- enum amdgpu_ras_block block, bool reset);
+ enum amdgpu_ras_block block, uint32_t reset);
+
+void amdgpu_amdkfd_ras_pasid_poison_consumption_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint16_t pasid,
+ pasid_notify pasid_fn, void *data, uint32_t reset);
+
bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev);
bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem);
void amdgpu_amdkfd_block_mmu_notifications(void *p);
int amdgpu_amdkfd_criu_resume(void *p);
-bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev);
+bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev,
+ int hub_inst, int hub_type);
int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
uint64_t size, u32 alloc_flag, int8_t xcp_id);
void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 69810b3f1c63..3ab6c3aa0ad1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -881,6 +881,7 @@ uint32_t kgd_gfx_v10_set_wave_launch_mode(struct amdgpu_device *adev,
}
#define TCP_WATCH_STRIDE (mmTCP_WATCH1_ADDR_H - mmTCP_WATCH0_ADDR_H)
+#define SQ_WATCH_STRIDE (mmSQ_WATCH1_ADDR_H - mmSQ_WATCH0_ADDR_H)
uint32_t kgd_gfx_v10_set_address_watch(struct amdgpu_device *adev,
uint64_t watch_address,
uint32_t watch_address_mask,
@@ -889,55 +890,93 @@ uint32_t kgd_gfx_v10_set_address_watch(struct amdgpu_device *adev,
uint32_t debug_vmid,
uint32_t inst)
{
+ /* SQ_WATCH?_ADDR_* and TCP_WATCH?_ADDR_* are programmed with the
+ * same values.
+ */
uint32_t watch_address_high;
uint32_t watch_address_low;
- uint32_t watch_address_cntl;
-
- watch_address_cntl = 0;
+ uint32_t tcp_watch_address_cntl;
+ uint32_t sq_watch_address_cntl;
watch_address_low = lower_32_bits(watch_address);
watch_address_high = upper_32_bits(watch_address) & 0xffff;
- watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ tcp_watch_address_cntl = 0;
+ tcp_watch_address_cntl = REG_SET_FIELD(tcp_watch_address_cntl,
TCP_WATCH0_CNTL,
VMID,
debug_vmid);
- watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ tcp_watch_address_cntl = REG_SET_FIELD(tcp_watch_address_cntl,
TCP_WATCH0_CNTL,
MODE,
watch_mode);
- watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ tcp_watch_address_cntl = REG_SET_FIELD(tcp_watch_address_cntl,
TCP_WATCH0_CNTL,
MASK,
watch_address_mask >> 7);
+ sq_watch_address_cntl = 0;
+ sq_watch_address_cntl = REG_SET_FIELD(sq_watch_address_cntl,
+ SQ_WATCH0_CNTL,
+ VMID,
+ debug_vmid);
+ sq_watch_address_cntl = REG_SET_FIELD(sq_watch_address_cntl,
+ SQ_WATCH0_CNTL,
+ MODE,
+ watch_mode);
+ sq_watch_address_cntl = REG_SET_FIELD(sq_watch_address_cntl,
+ SQ_WATCH0_CNTL,
+ MASK,
+ watch_address_mask >> 6);
+
/* Turning off this watch point until we set all the registers */
- watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ tcp_watch_address_cntl = REG_SET_FIELD(tcp_watch_address_cntl,
TCP_WATCH0_CNTL,
VALID,
0);
-
WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) +
(watch_id * TCP_WATCH_STRIDE)),
- watch_address_cntl);
+ tcp_watch_address_cntl);
+
+ sq_watch_address_cntl = REG_SET_FIELD(sq_watch_address_cntl,
+ SQ_WATCH0_CNTL,
+ VALID,
+ 0);
+ WREG32((SOC15_REG_OFFSET(GC, 0, mmSQ_WATCH0_CNTL) +
+ (watch_id * SQ_WATCH_STRIDE)),
+ sq_watch_address_cntl);
+ /* Program {TCP,SQ}_WATCH?_ADDR* */
WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_H) +
(watch_id * TCP_WATCH_STRIDE)),
watch_address_high);
-
WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_L) +
(watch_id * TCP_WATCH_STRIDE)),
watch_address_low);
+ WREG32((SOC15_REG_OFFSET(GC, 0, mmSQ_WATCH0_ADDR_H) +
+ (watch_id * SQ_WATCH_STRIDE)),
+ watch_address_high);
+ WREG32((SOC15_REG_OFFSET(GC, 0, mmSQ_WATCH0_ADDR_L) +
+ (watch_id * SQ_WATCH_STRIDE)),
+ watch_address_low);
+
/* Enable the watch point */
- watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ tcp_watch_address_cntl = REG_SET_FIELD(tcp_watch_address_cntl,
TCP_WATCH0_CNTL,
VALID,
1);
-
WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) +
(watch_id * TCP_WATCH_STRIDE)),
- watch_address_cntl);
+ tcp_watch_address_cntl);
+
+ sq_watch_address_cntl = REG_SET_FIELD(sq_watch_address_cntl,
+ SQ_WATCH0_CNTL,
+ VALID,
+ 1);
+ WREG32((SOC15_REG_OFFSET(GC, 0, mmSQ_WATCH0_CNTL) +
+ (watch_id * SQ_WATCH_STRIDE)),
+ sq_watch_address_cntl);
return 0;
}
@@ -953,8 +992,14 @@ uint32_t kgd_gfx_v10_clear_address_watch(struct amdgpu_device *adev,
(watch_id * TCP_WATCH_STRIDE)),
watch_address_cntl);
+ WREG32((SOC15_REG_OFFSET(GC, 0, mmSQ_WATCH0_CNTL) +
+ (watch_id * SQ_WATCH_STRIDE)),
+ watch_address_cntl);
+
return 0;
}
+#undef TCP_WATCH_STRIDE
+#undef SQ_WATCH_STRIDE
/* kgd_gfx_v10_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index df58a6a1a67e..e4d4e55c08ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -220,7 +220,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
(kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
kfd_mem_limit.max_ttm_mem_limit) ||
(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed >
- vram_size - reserved_for_pt)) {
+ vram_size - reserved_for_pt - atomic64_read(&adev->vram_pin_size))) {
ret = -ENOMEM;
goto release;
}
@@ -1854,6 +1854,7 @@ err_node_allow:
err_bo_create:
amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id);
err_reserve_limit:
+ amdgpu_sync_free(&(*mem)->sync);
mutex_destroy(&(*mem)->lock);
if (gobj)
drm_gem_object_put(gobj);
@@ -2900,13 +2901,12 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
amdgpu_sync_create(&sync_obj);
- /* Validate BOs and map them to GPUVM (update VM page tables). */
+ /* Validate BOs managed by KFD */
list_for_each_entry(mem, &process_info->kfd_bo_list,
validate_list) {
struct amdgpu_bo *bo = mem->bo;
uint32_t domain = mem->domain;
- struct kfd_mem_attachment *attachment;
struct dma_resv_iter cursor;
struct dma_fence *fence;
@@ -2931,6 +2931,25 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
goto validate_map_fail;
}
}
+ }
+
+ if (failed_size)
+ pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
+
+ /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO
+ * validations above would invalidate DMABuf imports again.
+ */
+ ret = process_validate_vms(process_info, &exec.ticket);
+ if (ret) {
+ pr_debug("Validating VMs failed, ret: %d\n", ret);
+ goto validate_map_fail;
+ }
+
+ /* Update mappings managed by KFD. */
+ list_for_each_entry(mem, &process_info->kfd_bo_list,
+ validate_list) {
+ struct kfd_mem_attachment *attachment;
+
list_for_each_entry(attachment, &mem->attachments, list) {
if (!attachment->is_mapped)
continue;
@@ -2947,18 +2966,6 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
}
}
- if (failed_size)
- pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
-
- /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO
- * validations above would invalidate DMABuf imports again.
- */
- ret = process_validate_vms(process_info, &exec.ticket);
- if (ret) {
- pr_debug("Validating VMs failed, ret: %d\n", ret);
- goto validate_map_fail;
- }
-
/* Update mappings not managed by KFD */
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 6857c586ded7..a6d64bdbbb14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -34,6 +34,7 @@ union firmware_info {
struct atom_firmware_info_v3_2 v32;
struct atom_firmware_info_v3_3 v33;
struct atom_firmware_info_v3_4 v34;
+ struct atom_firmware_info_v3_5 v35;
};
/*
@@ -872,6 +873,10 @@ int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev)
fw_reserved_fb_size =
(firmware_info->v34.fw_reserved_size_in_kb << 10);
break;
+ case 5:
+ fw_reserved_fb_size =
+ (firmware_info->v35.fw_reserved_size_in_kb << 10);
+ break;
default:
fw_reserved_fb_size = 0;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index edc6377ec5ff..199693369c7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -39,7 +39,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
for (i = 0; i < n; i++) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
- false, false, false);
+ false, false, 0);
if (r)
goto exit_do_move;
r = dma_fence_wait(fence, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 0a4b09709cfb..ec888fc6ead8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -819,7 +819,7 @@ retry:
p->bytes_moved += ctx.bytes_moved;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
- amdgpu_bo_in_cpu_visible_vram(bo))
+ amdgpu_res_cpu_visible(adev, bo->tbo.resource))
p->bytes_moved_vis += ctx.bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index f5d0fa207a88..b62ae3c91a9d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -2065,12 +2065,13 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
char reg_offset[11];
uint32_t *new = NULL, *tmp = NULL;
- int ret, i = 0, len = 0;
+ unsigned int len = 0;
+ int ret, i = 0;
do {
memset(reg_offset, 0, 11);
if (copy_from_user(reg_offset, buf + len,
- min(10, ((int)size-len)))) {
+ min(10, (size-len)))) {
ret = -EFAULT;
goto error_free;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
new file mode 100644
index 000000000000..c1cb62683695
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <generated/utsrelease.h>
+#include <linux/devcoredump.h>
+#include "amdgpu_dev_coredump.h"
+#include "atom.h"
+
+#ifndef CONFIG_DEV_COREDUMP
+void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
+ struct amdgpu_reset_context *reset_context)
+{
+}
+#else
+
+const char *hw_ip_names[MAX_HWIP] = {
+ [GC_HWIP] = "GC",
+ [HDP_HWIP] = "HDP",
+ [SDMA0_HWIP] = "SDMA0",
+ [SDMA1_HWIP] = "SDMA1",
+ [SDMA2_HWIP] = "SDMA2",
+ [SDMA3_HWIP] = "SDMA3",
+ [SDMA4_HWIP] = "SDMA4",
+ [SDMA5_HWIP] = "SDMA5",
+ [SDMA6_HWIP] = "SDMA6",
+ [SDMA7_HWIP] = "SDMA7",
+ [LSDMA_HWIP] = "LSDMA",
+ [MMHUB_HWIP] = "MMHUB",
+ [ATHUB_HWIP] = "ATHUB",
+ [NBIO_HWIP] = "NBIO",
+ [MP0_HWIP] = "MP0",
+ [MP1_HWIP] = "MP1",
+ [UVD_HWIP] = "UVD/JPEG/VCN",
+ [VCN1_HWIP] = "VCN1",
+ [VCE_HWIP] = "VCE",
+ [VPE_HWIP] = "VPE",
+ [DF_HWIP] = "DF",
+ [DCE_HWIP] = "DCE",
+ [OSSSYS_HWIP] = "OSSSYS",
+ [SMUIO_HWIP] = "SMUIO",
+ [PWR_HWIP] = "PWR",
+ [NBIF_HWIP] = "NBIF",
+ [THM_HWIP] = "THM",
+ [CLK_HWIP] = "CLK",
+ [UMC_HWIP] = "UMC",
+ [RSMU_HWIP] = "RSMU",
+ [XGMI_HWIP] = "XGMI",
+ [DCI_HWIP] = "DCI",
+ [PCIE_HWIP] = "PCIE",
+};
+
+static void amdgpu_devcoredump_fw_info(struct amdgpu_device *adev,
+ struct drm_printer *p)
+{
+ uint32_t version;
+ uint32_t feature;
+ uint8_t smu_program, smu_major, smu_minor, smu_debug;
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ drm_printf(p, "VCE feature version: %u, fw version: 0x%08x\n",
+ adev->vce.fb_version, adev->vce.fw_version);
+ drm_printf(p, "UVD feature version: %u, fw version: 0x%08x\n", 0,
+ adev->uvd.fw_version);
+ drm_printf(p, "GMC feature version: %u, fw version: 0x%08x\n", 0,
+ adev->gmc.fw_version);
+ drm_printf(p, "ME feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.me_feature_version, adev->gfx.me_fw_version);
+ drm_printf(p, "PFP feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.pfp_feature_version, adev->gfx.pfp_fw_version);
+ drm_printf(p, "CE feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.ce_feature_version, adev->gfx.ce_fw_version);
+ drm_printf(p, "RLC feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.rlc_feature_version, adev->gfx.rlc_fw_version);
+
+ drm_printf(p, "RLC SRLC feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.rlc_srlc_feature_version,
+ adev->gfx.rlc_srlc_fw_version);
+ drm_printf(p, "RLC SRLG feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.rlc_srlg_feature_version,
+ adev->gfx.rlc_srlg_fw_version);
+ drm_printf(p, "RLC SRLS feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.rlc_srls_feature_version,
+ adev->gfx.rlc_srls_fw_version);
+ drm_printf(p, "RLCP feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.rlcp_ucode_feature_version,
+ adev->gfx.rlcp_ucode_version);
+ drm_printf(p, "RLCV feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.rlcv_ucode_feature_version,
+ adev->gfx.rlcv_ucode_version);
+ drm_printf(p, "MEC feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.mec_feature_version, adev->gfx.mec_fw_version);
+
+ if (adev->gfx.mec2_fw)
+ drm_printf(p, "MEC2 feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.mec2_feature_version,
+ adev->gfx.mec2_fw_version);
+
+ drm_printf(p, "IMU feature version: %u, fw version: 0x%08x\n", 0,
+ adev->gfx.imu_fw_version);
+ drm_printf(p, "PSP SOS feature version: %u, fw version: 0x%08x\n",
+ adev->psp.sos.feature_version, adev->psp.sos.fw_version);
+ drm_printf(p, "PSP ASD feature version: %u, fw version: 0x%08x\n",
+ adev->psp.asd_context.bin_desc.feature_version,
+ adev->psp.asd_context.bin_desc.fw_version);
+
+ drm_printf(p, "TA XGMI feature version: 0x%08x, fw version: 0x%08x\n",
+ adev->psp.xgmi_context.context.bin_desc.feature_version,
+ adev->psp.xgmi_context.context.bin_desc.fw_version);
+ drm_printf(p, "TA RAS feature version: 0x%08x, fw version: 0x%08x\n",
+ adev->psp.ras_context.context.bin_desc.feature_version,
+ adev->psp.ras_context.context.bin_desc.fw_version);
+ drm_printf(p, "TA HDCP feature version: 0x%08x, fw version: 0x%08x\n",
+ adev->psp.hdcp_context.context.bin_desc.feature_version,
+ adev->psp.hdcp_context.context.bin_desc.fw_version);
+ drm_printf(p, "TA DTM feature version: 0x%08x, fw version: 0x%08x\n",
+ adev->psp.dtm_context.context.bin_desc.feature_version,
+ adev->psp.dtm_context.context.bin_desc.fw_version);
+ drm_printf(p, "TA RAP feature version: 0x%08x, fw version: 0x%08x\n",
+ adev->psp.rap_context.context.bin_desc.feature_version,
+ adev->psp.rap_context.context.bin_desc.fw_version);
+ drm_printf(p,
+ "TA SECURE DISPLAY feature version: 0x%08x, fw version: 0x%08x\n",
+ adev->psp.securedisplay_context.context.bin_desc.feature_version,
+ adev->psp.securedisplay_context.context.bin_desc.fw_version);
+
+ /* SMC firmware */
+ version = adev->pm.fw_version;
+
+ smu_program = (version >> 24) & 0xff;
+ smu_major = (version >> 16) & 0xff;
+ smu_minor = (version >> 8) & 0xff;
+ smu_debug = (version >> 0) & 0xff;
+ drm_printf(p,
+ "SMC feature version: %u, program: %d, fw version: 0x%08x (%d.%d.%d)\n",
+ 0, smu_program, version, smu_major, smu_minor, smu_debug);
+
+ /* SDMA firmware */
+ for (int i = 0; i < adev->sdma.num_instances; i++) {
+ drm_printf(p,
+ "SDMA%d feature version: %u, firmware version: 0x%08x\n",
+ i, adev->sdma.instance[i].feature_version,
+ adev->sdma.instance[i].fw_version);
+ }
+
+ drm_printf(p, "VCN feature version: %u, fw version: 0x%08x\n", 0,
+ adev->vcn.fw_version);
+ drm_printf(p, "DMCU feature version: %u, fw version: 0x%08x\n", 0,
+ adev->dm.dmcu_fw_version);
+ drm_printf(p, "DMCUB feature version: %u, fw version: 0x%08x\n", 0,
+ adev->dm.dmcub_fw_version);
+ drm_printf(p, "PSP TOC feature version: %u, fw version: 0x%08x\n",
+ adev->psp.toc.feature_version, adev->psp.toc.fw_version);
+
+ version = adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK;
+ feature = (adev->mes.kiq_version & AMDGPU_MES_FEAT_VERSION_MASK) >>
+ AMDGPU_MES_FEAT_VERSION_SHIFT;
+ drm_printf(p, "MES_KIQ feature version: %u, fw version: 0x%08x\n",
+ feature, version);
+
+ version = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
+ feature = (adev->mes.sched_version & AMDGPU_MES_FEAT_VERSION_MASK) >>
+ AMDGPU_MES_FEAT_VERSION_SHIFT;
+ drm_printf(p, "MES feature version: %u, fw version: 0x%08x\n", feature,
+ version);
+
+ drm_printf(p, "VPE feature version: %u, fw version: 0x%08x\n",
+ adev->vpe.feature_version, adev->vpe.fw_version);
+
+ drm_printf(p, "\nVBIOS Information\n");
+ drm_printf(p, "vbios name : %s\n", ctx->name);
+ drm_printf(p, "vbios pn : %s\n", ctx->vbios_pn);
+ drm_printf(p, "vbios version : %d\n", ctx->version);
+ drm_printf(p, "vbios ver_str : %s\n", ctx->vbios_ver_str);
+ drm_printf(p, "vbios date : %s\n", ctx->date);
+}
+
+static ssize_t
+amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
+ void *data, size_t datalen)
+{
+ struct drm_printer p;
+ struct amdgpu_coredump_info *coredump = data;
+ struct drm_print_iterator iter;
+ struct amdgpu_vm_fault_info *fault_info;
+ int i, ver;
+
+ iter.data = buffer;
+ iter.offset = 0;
+ iter.start = offset;
+ iter.remain = count;
+
+ p = drm_coredump_printer(&iter);
+
+ drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
+ drm_printf(&p, "version: " AMDGPU_COREDUMP_VERSION "\n");
+ drm_printf(&p, "kernel: " UTS_RELEASE "\n");
+ drm_printf(&p, "module: " KBUILD_MODNAME "\n");
+ drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec,
+ coredump->reset_time.tv_nsec);
+
+ if (coredump->reset_task_info.pid)
+ drm_printf(&p, "process_name: %s PID: %d\n",
+ coredump->reset_task_info.process_name,
+ coredump->reset_task_info.pid);
+
+ /* GPU IP's information of the SOC */
+ drm_printf(&p, "\nIP Information\n");
+ drm_printf(&p, "SOC Family: %d\n", coredump->adev->family);
+ drm_printf(&p, "SOC Revision id: %d\n", coredump->adev->rev_id);
+ drm_printf(&p, "SOC External Revision id: %d\n", coredump->adev->external_rev_id);
+
+ for (int i = 1; i < MAX_HWIP; i++) {
+ for (int j = 0; j < HWIP_MAX_INSTANCE; j++) {
+ ver = coredump->adev->ip_versions[i][j];
+ if (ver)
+ drm_printf(&p, "HWIP: %s[%d][%d]: v%d.%d.%d.%d.%d\n",
+ hw_ip_names[i], i, j,
+ IP_VERSION_MAJ(ver),
+ IP_VERSION_MIN(ver),
+ IP_VERSION_REV(ver),
+ IP_VERSION_VARIANT(ver),
+ IP_VERSION_SUBREV(ver));
+ }
+ }
+
+ /* IP firmware information */
+ drm_printf(&p, "\nIP Firmwares\n");
+ amdgpu_devcoredump_fw_info(coredump->adev, &p);
+
+ if (coredump->ring) {
+ drm_printf(&p, "\nRing timed out details\n");
+ drm_printf(&p, "IP Type: %d Ring Name: %s\n",
+ coredump->ring->funcs->type,
+ coredump->ring->name);
+ }
+
+ /* Add page fault information */
+ fault_info = &coredump->adev->vm_manager.fault_info;
+ drm_printf(&p, "\n[%s] Page fault observed\n",
+ fault_info->vmhub ? "mmhub" : "gfxhub");
+ drm_printf(&p, "Faulty page starting at address: 0x%016llx\n", fault_info->addr);
+ drm_printf(&p, "Protection fault status register: 0x%x\n\n", fault_info->status);
+
+ /* dump the ip state for each ip */
+ drm_printf(&p, "IP Dump\n");
+ for (int i = 0; i < coredump->adev->num_ip_blocks; i++) {
+ if (coredump->adev->ip_blocks[i].version->funcs->print_ip_state) {
+ drm_printf(&p, "IP: %s\n",
+ coredump->adev->ip_blocks[i]
+ .version->funcs->name);
+ coredump->adev->ip_blocks[i]
+ .version->funcs->print_ip_state(
+ (void *)coredump->adev, &p);
+ drm_printf(&p, "\n");
+ }
+ }
+
+ /* Add ring buffer information */
+ drm_printf(&p, "Ring buffer information\n");
+ for (int i = 0; i < coredump->adev->num_rings; i++) {
+ int j = 0;
+ struct amdgpu_ring *ring = coredump->adev->rings[i];
+
+ drm_printf(&p, "ring name: %s\n", ring->name);
+ drm_printf(&p, "Rptr: 0x%llx Wptr: 0x%llx RB mask: %x\n",
+ amdgpu_ring_get_rptr(ring),
+ amdgpu_ring_get_wptr(ring),
+ ring->buf_mask);
+ drm_printf(&p, "Ring size in dwords: %d\n",
+ ring->ring_size / 4);
+ drm_printf(&p, "Ring contents\n");
+ drm_printf(&p, "Offset \t Value\n");
+
+ while (j < ring->ring_size) {
+ drm_printf(&p, "0x%x \t 0x%x\n", j, ring->ring[j / 4]);
+ j += 4;
+ }
+ }
+
+ if (coredump->reset_vram_lost)
+ drm_printf(&p, "VRAM is lost due to GPU reset!\n");
+ if (coredump->adev->reset_info.num_regs) {
+ drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n");
+
+ for (i = 0; i < coredump->adev->reset_info.num_regs; i++)
+ drm_printf(&p, "0x%08x: 0x%08x\n",
+ coredump->adev->reset_info.reset_dump_reg_list[i],
+ coredump->adev->reset_info.reset_dump_reg_value[i]);
+ }
+
+ return count - iter.remain;
+}
+
+static void amdgpu_devcoredump_free(void *data)
+{
+ kfree(data);
+}
+
+void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_coredump_info *coredump;
+ struct drm_device *dev = adev_to_drm(adev);
+ struct amdgpu_job *job = reset_context->job;
+ struct drm_sched_job *s_job;
+
+ coredump = kzalloc(sizeof(*coredump), GFP_NOWAIT);
+
+ if (!coredump) {
+ DRM_ERROR("%s: failed to allocate memory for coredump\n", __func__);
+ return;
+ }
+
+ coredump->reset_vram_lost = vram_lost;
+
+ if (reset_context->job && reset_context->job->vm) {
+ struct amdgpu_task_info *ti;
+ struct amdgpu_vm *vm = reset_context->job->vm;
+
+ ti = amdgpu_vm_get_task_info_vm(vm);
+ if (ti) {
+ coredump->reset_task_info = *ti;
+ amdgpu_vm_put_task_info(ti);
+ }
+ }
+
+ if (job) {
+ s_job = &job->base;
+ coredump->ring = to_amdgpu_ring(s_job->sched);
+ }
+
+ coredump->adev = adev;
+
+ ktime_get_ts64(&coredump->reset_time);
+
+ dev_coredumpm(dev->dev, THIS_MODULE, coredump, 0, GFP_NOWAIT,
+ amdgpu_devcoredump_read, amdgpu_devcoredump_free);
+}
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h
new file mode 100644
index 000000000000..52459512cb2b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_DEV_COREDUMP_H__
+#define __AMDGPU_DEV_COREDUMP_H__
+
+#include "amdgpu.h"
+#include "amdgpu_reset.h"
+
+#ifdef CONFIG_DEV_COREDUMP
+
+#define AMDGPU_COREDUMP_VERSION "1"
+
+struct amdgpu_coredump_info {
+ struct amdgpu_device *adev;
+ struct amdgpu_task_info reset_task_info;
+ struct timespec64 reset_time;
+ bool reset_vram_lost;
+ struct amdgpu_ring *ring;
+};
+#endif
+
+void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
+ struct amdgpu_reset_context *reset_context);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 5dc24c971b41..861ccff78af9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -74,6 +74,7 @@
#include "amdgpu_fru_eeprom.h"
#include "amdgpu_reset.h"
#include "amdgpu_virt.h"
+#include "amdgpu_dev_coredump.h"
#include <linux/suspend.h>
#include <drm/task_barrier.h>
@@ -143,6 +144,8 @@ const char *amdgpu_asic_name[] = {
"LAST",
};
+static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev);
+
/**
* DOC: pcie_replay_count
*
@@ -335,16 +338,93 @@ bool amdgpu_device_supports_boco(struct drm_device *dev)
*
* @dev: drm_device pointer
*
- * Returns true if the device supporte BACO,
- * otherwise return false.
+ * Return:
+ * 1 if the device supporte BACO;
+ * 3 if the device support MACO (only works if BACO is supported)
+ * otherwise return 0.
*/
-bool amdgpu_device_supports_baco(struct drm_device *dev)
+int amdgpu_device_supports_baco(struct drm_device *dev)
{
struct amdgpu_device *adev = drm_to_adev(dev);
return amdgpu_asic_supports_baco(adev);
}
+void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
+{
+ struct drm_device *dev;
+ int bamaco_support;
+
+ dev = adev_to_drm(adev);
+
+ adev->pm.rpm_mode = AMDGPU_RUNPM_NONE;
+ bamaco_support = amdgpu_device_supports_baco(dev);
+
+ switch (amdgpu_runtime_pm) {
+ case 2:
+ if (bamaco_support & MACO_SUPPORT) {
+ adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO;
+ dev_info(adev->dev, "Forcing BAMACO for runtime pm\n");
+ } else if (bamaco_support == BACO_SUPPORT) {
+ adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
+ dev_info(adev->dev, "Requested mode BAMACO not available,fallback to use BACO\n");
+ }
+ break;
+ case 1:
+ if (bamaco_support & BACO_SUPPORT) {
+ adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
+ dev_info(adev->dev, "Forcing BACO for runtime pm\n");
+ }
+ break;
+ case -1:
+ case -2:
+ if (amdgpu_device_supports_px(dev)) { /* enable PX as runtime mode */
+ adev->pm.rpm_mode = AMDGPU_RUNPM_PX;
+ dev_info(adev->dev, "Using ATPX for runtime pm\n");
+ } else if (amdgpu_device_supports_boco(dev)) { /* enable boco as runtime mode */
+ adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO;
+ dev_info(adev->dev, "Using BOCO for runtime pm\n");
+ } else {
+ if (!bamaco_support)
+ goto no_runtime_pm;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
+ /* BACO are not supported on vega20 and arctrus */
+ break;
+ case CHIP_VEGA10:
+ /* enable BACO as runpm mode if noretry=0 */
+ if (!adev->gmc.noretry)
+ adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
+ break;
+ default:
+ /* enable BACO as runpm mode on CI+ */
+ adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
+ break;
+ }
+
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) {
+ if (bamaco_support & MACO_SUPPORT) {
+ adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO;
+ dev_info(adev->dev, "Using BAMACO for runtime pm\n");
+ } else {
+ dev_info(adev->dev, "Using BACO for runtime pm\n");
+ }
+ }
+ }
+ break;
+ case 0:
+ dev_info(adev->dev, "runtime pm is manually disabled\n");
+ break;
+ default:
+ break;
+ }
+
+no_runtime_pm:
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE)
+ dev_info(adev->dev, "Runtime PM not available\n");
+}
/**
* amdgpu_device_supports_smart_shift - Is the device dGPU with
* smart shift support
@@ -1402,13 +1482,17 @@ static int amdgpu_device_wb_init(struct amdgpu_device *adev)
*/
int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
{
- unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
+ unsigned long flags, offset;
+ spin_lock_irqsave(&adev->wb.lock, flags);
+ offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
if (offset < adev->wb.num_wb) {
__set_bit(offset, adev->wb.used);
+ spin_unlock_irqrestore(&adev->wb.lock, flags);
*wb = offset << 3; /* convert to dw offset */
return 0;
} else {
+ spin_unlock_irqrestore(&adev->wb.lock, flags);
return -EINVAL;
}
}
@@ -1423,9 +1507,13 @@ int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
*/
void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
{
+ unsigned long flags;
+
wb >>= 3;
+ spin_lock_irqsave(&adev->wb.lock, flags);
if (wb < adev->wb.num_wb)
__clear_bit(wb, adev->wb.used);
+ spin_unlock_irqrestore(&adev->wb.lock, flags);
}
/**
@@ -1455,7 +1543,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
- DRM_WARN("System can't access extended configuration space,please check!!\n");
+ DRM_WARN("System can't access extended configuration space, please check!!\n");
/* skip if the bios has already enabled large BAR */
if (adev->gmc.real_vram_size &&
@@ -3981,6 +4069,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
spin_lock_init(&adev->se_cac_idx_lock);
spin_lock_init(&adev->audio_endpt_idx_lock);
spin_lock_init(&adev->mm_stats.lock);
+ spin_lock_init(&adev->wb.lock);
INIT_LIST_HEAD(&adev->shadow_list);
mutex_init(&adev->shadow_list_lock);
@@ -4069,6 +4158,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* Enable TMZ based on IP_VERSION */
amdgpu_gmc_tmz_set(adev);
+ if (amdgpu_sriov_vf(adev) &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
+ /* VF MMIO access (except mailbox range) from CPU
+ * will be blocked during sriov runtime
+ */
+ adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
+
amdgpu_gmc_noretry_set(adev);
/* Need to get xgmi info early to decide the reset behavior*/
if (adev->gmc.xgmi.supported) {
@@ -4135,18 +4231,22 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->ip_blocks[i].status.hw = true;
}
}
+ } else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
+ !amdgpu_device_has_display_hardware(adev)) {
+ r = psp_gpu_reset(adev);
} else {
- tmp = amdgpu_reset_method;
- /* It should do a default reset when loading or reloading the driver,
- * regardless of the module parameter reset_method.
- */
- amdgpu_reset_method = AMD_RESET_METHOD_NONE;
- r = amdgpu_asic_reset(adev);
- amdgpu_reset_method = tmp;
- if (r) {
- dev_err(adev->dev, "asic reset on init failed\n");
- goto failed;
- }
+ tmp = amdgpu_reset_method;
+ /* It should do a default reset when loading or reloading the driver,
+ * regardless of the module parameter reset_method.
+ */
+ amdgpu_reset_method = AMD_RESET_METHOD_NONE;
+ r = amdgpu_asic_reset(adev);
+ amdgpu_reset_method = tmp;
+ }
+
+ if (r) {
+ dev_err(adev->dev, "asic reset on init failed\n");
+ goto failed;
}
}
@@ -4539,6 +4639,8 @@ int amdgpu_device_prepare(struct drm_device *dev)
if (r)
goto unprepare;
+ flush_delayed_work(&adev->gfx.gfx_off_delay_work);
+
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
@@ -4968,12 +5070,15 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
retry:
amdgpu_amdkfd_pre_reset(adev);
+ amdgpu_device_stop_pending_resets(adev);
+
if (from_hypervisor)
r = amdgpu_virt_request_full_gpu(adev, true);
else
r = amdgpu_virt_reset_gpu(adev);
if (r)
return r;
+ amdgpu_ras_set_fed(adev, false);
amdgpu_irq_gpu_reset_resume_helper(adev);
/* some sw clean up VF needs to do before recover */
@@ -5257,11 +5362,21 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
struct amdgpu_device *tmp_adev = NULL;
bool need_full_reset, skip_hw_reset, vram_lost = false;
int r = 0;
+ uint32_t i;
/* Try reset handler method first */
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
- amdgpu_reset_reg_dumps(tmp_adev);
+
+ if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) {
+ amdgpu_reset_reg_dumps(tmp_adev);
+
+ /* Trigger ip dump before we reset the asic */
+ for (i = 0; i < tmp_adev->num_ip_blocks; i++)
+ if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state)
+ tmp_adev->ip_blocks[i].version->funcs
+ ->dump_ip_state((void *)tmp_adev);
+ }
reset_context->reset_device_list = device_list_handle;
r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
@@ -5334,7 +5449,8 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
- amdgpu_coredump(tmp_adev, vram_lost, reset_context);
+ if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags))
+ amdgpu_coredump(tmp_adev, vram_lost, reset_context);
if (vram_lost) {
DRM_INFO("VRAM is lost due to GPU reset!\n");
@@ -5532,6 +5648,23 @@ static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
}
+static int amdgpu_device_health_check(struct list_head *device_list_handle)
+{
+ struct amdgpu_device *tmp_adev;
+ int ret = 0;
+ u32 status;
+
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ pci_read_config_dword(tmp_adev->pdev, PCI_COMMAND, &status);
+ if (PCI_POSSIBLE_ERROR(status)) {
+ dev_err(tmp_adev->dev, "device lost from bus!");
+ ret = -ENODEV;
+ }
+ }
+
+ return ret;
+}
+
/**
* amdgpu_device_gpu_recover - reset the asic and recover scheduler
*
@@ -5603,6 +5736,12 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
device_list_handle = &device_list;
}
+ if (!amdgpu_sriov_vf(adev)) {
+ r = amdgpu_device_health_check(device_list_handle);
+ if (r)
+ goto end_reset;
+ }
+
/* We need to lock reset domain only once both for XGMI and single device */
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
@@ -5685,11 +5824,12 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
tmp_adev->asic_reset_res = r;
}
- /*
- * Drop all pending non scheduler resets. Scheduler resets
- * were already dropped during drm_sched_stop
- */
- amdgpu_device_stop_pending_resets(tmp_adev);
+ if (!amdgpu_sriov_vf(tmp_adev))
+ /*
+ * Drop all pending non scheduler resets. Scheduler resets
+ * were already dropped during drm_sched_stop
+ */
+ amdgpu_device_stop_pending_resets(tmp_adev);
}
/* Actual ASIC resets if needed.*/
@@ -5768,6 +5908,7 @@ skip_sched_resume:
reset_list);
amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
+end_reset:
if (hive) {
mutex_unlock(&hive->hive_lock);
amdgpu_put_xgmi_hive(hive);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index a07e4b87d4ca..0e31bdb4b7cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -97,6 +97,7 @@
#include "smuio_v13_0.h"
#include "smuio_v13_0_3.h"
#include "smuio_v13_0_6.h"
+#include "smuio_v14_0_2.h"
#include "vcn_v5_0_0.h"
#include "jpeg_v5_0_0.h"
@@ -245,6 +246,9 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev,
return -ENOENT;
}
+#define IP_DISCOVERY_V2 2
+#define IP_DISCOVERY_V4 4
+
static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
uint8_t *binary)
{
@@ -259,14 +263,14 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
* wait for this to complete. Once the C2PMSG is updated, we can
* continue.
*/
- if (dev_is_removable(&adev->pdev->dev)) {
- for (i = 0; i < 1000; i++) {
- msg = RREG32(mmMP0_SMN_C2PMSG_33);
- if (msg & 0x80000000)
- break;
- msleep(1);
- }
+
+ for (i = 0; i < 1000; i++) {
+ msg = RREG32(mmMP0_SMN_C2PMSG_33);
+ if (msg & 0x80000000)
+ break;
+ usleep_range(1000, 1100);
}
+
vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
if (vram_size) {
@@ -1896,6 +1900,9 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
break;
case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 3):
amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
break;
default:
@@ -2237,6 +2244,7 @@ static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
{
switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
case IP_VERSION(4, 0, 5):
+ case IP_VERSION(4, 0, 6):
if (amdgpu_umsch_mm & 0x1) {
amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
adev->enable_umsch_mm = true;
@@ -2676,6 +2684,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(14, 0, 1):
adev->smuio.funcs = &smuio_v13_0_6_funcs;
break;
+ case IP_VERSION(14, 0, 2):
+ adev->smuio.funcs = &smuio_v14_0_2_funcs;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 80b9642f2bc4..ea14f1c8f430 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -195,6 +195,7 @@ int amdgpu_async_gfx_ring = 1;
int amdgpu_mcbp = -1;
int amdgpu_discovery = -1;
int amdgpu_mes;
+int amdgpu_mes_log_enable = 0;
int amdgpu_mes_kiq;
int amdgpu_noretry = -1;
int amdgpu_force_asic_type = -1;
@@ -668,6 +669,15 @@ MODULE_PARM_DESC(mes,
module_param_named(mes, amdgpu_mes, int, 0444);
/**
+ * DOC: mes_log_enable (int)
+ * Enable Micro Engine Scheduler log. This is used to enable/disable MES internal log.
+ * (0 = disabled (default), 1 = enabled)
+ */
+MODULE_PARM_DESC(mes_log_enable,
+ "Enable Micro Engine Scheduler log (0 = disabled (default), 1 = enabled)");
+module_param_named(mes_log_enable, amdgpu_mes_log_enable, int, 0444);
+
+/**
* DOC: mes_kiq (int)
* Enable Micro Engine Scheduler KIQ. This is a new engine pipe for kiq.
* (0 = disabled (default), 1 = enabled)
@@ -915,7 +925,7 @@ module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
* GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)
*/
MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco)");
-module_param_named(reset_method, amdgpu_reset_method, int, 0444);
+module_param_named(reset_method, amdgpu_reset_method, int, 0644);
/**
* DOC: bad_page_threshold (int) Bad page threshold is specifies the
@@ -2471,6 +2481,7 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
/* Use a common context, just need to make sure full reset is done */
set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
+ set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
r = amdgpu_do_asic_reset(&device_list, &reset_context);
if (r) {
@@ -2734,7 +2745,8 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
} else if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO) {
/* nothing to do */
- } else if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) {
+ } else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
+ (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) {
amdgpu_device_baco_enter(drm_dev);
}
@@ -2774,7 +2786,8 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
* PCI core handles it for _PR3.
*/
pci_set_master(pdev);
- } else if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) {
+ } else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
+ (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) {
amdgpu_device_baco_exit(drm_dev);
}
ret = amdgpu_device_resume(drm_dev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 55d5508987ff..1d955652f3ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -1206,7 +1206,8 @@ void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
break;
default:
- break;
+ dev_err(adev->dev, "Invalid ucode id %u\n", ucode_id);
+ return;
}
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 8fcf889ddce9..64f197bbc866 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -259,7 +259,6 @@ struct amdgpu_cu_info {
struct amdgpu_gfx_ras {
struct amdgpu_ras_block_object ras_block;
void (*enable_watchdog_timer)(struct amdgpu_device *adev);
- bool (*query_utcl2_poison_status)(struct amdgpu_device *adev);
int (*rlc_gc_fed_irq)(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
@@ -434,6 +433,10 @@ struct amdgpu_gfx {
uint32_t num_xcc_per_xcp;
struct mutex partition_mutex;
bool mcbp; /* mid command buffer preemption */
+
+ /* IP reg dump */
+ uint32_t *ip_dump;
+ uint32_t reg_count;
};
struct amdgpu_gfx_ras_reg_entry {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
index c7b44aeb671b..103a837ccc71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
@@ -38,6 +38,8 @@ struct amdgpu_gfxhub_funcs {
void (*mode2_save_regs)(struct amdgpu_device *adev);
void (*mode2_restore_regs)(struct amdgpu_device *adev);
void (*halt)(struct amdgpu_device *adev);
+ bool (*query_utcl2_poison_status)(struct amdgpu_device *adev,
+ int xcc_id);
};
struct amdgpu_gfxhub {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index d79cb13e1aa8..00d6211e0fbf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -279,7 +279,7 @@ amdgpu_i2c_lookup(struct amdgpu_device *adev,
return NULL;
}
-static void amdgpu_i2c_get_byte(struct amdgpu_i2c_chan *i2c_bus,
+static int amdgpu_i2c_get_byte(struct amdgpu_i2c_chan *i2c_bus,
u8 slave_addr,
u8 addr,
u8 *val)
@@ -304,16 +304,18 @@ static void amdgpu_i2c_get_byte(struct amdgpu_i2c_chan *i2c_bus,
out_buf[0] = addr;
out_buf[1] = 0;
- if (i2c_transfer(&i2c_bus->adapter, msgs, 2) == 2) {
- *val = in_buf[0];
- DRM_DEBUG("val = 0x%02x\n", *val);
- } else {
- DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n",
- addr, *val);
+ if (i2c_transfer(&i2c_bus->adapter, msgs, 2) != 2) {
+ DRM_DEBUG("i2c 0x%02x read failed\n", addr);
+ return -EIO;
}
+
+ *val = in_buf[0];
+ DRM_DEBUG("val = 0x%02x\n", *val);
+
+ return 0;
}
-static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
+static int amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
u8 slave_addr,
u8 addr,
u8 val)
@@ -329,9 +331,12 @@ static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
out_buf[0] = addr;
out_buf[1] = val;
- if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
- DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n",
- addr, val);
+ if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1) {
+ DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n", addr, val);
+ return -EIO;
+ }
+
+ return 0;
}
/* ddc router switching */
@@ -346,16 +351,18 @@ amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connecto
if (!amdgpu_connector->router_bus)
return;
- amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
+ if (amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
- 0x3, &val);
+ 0x3, &val))
+ return;
val &= ~amdgpu_connector->router.ddc_mux_control_pin;
amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
0x3, val);
- amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
+ if (amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
- 0x1, &val);
+ 0x1, &val))
+ return;
val &= ~amdgpu_connector->router.ddc_mux_control_pin;
val |= amdgpu_connector->router.ddc_mux_state;
amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
@@ -375,16 +382,18 @@ amdgpu_i2c_router_select_cd_port(const struct amdgpu_connector *amdgpu_connector
if (!amdgpu_connector->router_bus)
return;
- amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
+ if (amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
- 0x3, &val);
+ 0x3, &val))
+ return;
val &= ~amdgpu_connector->router.cd_mux_control_pin;
amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
0x3, val);
- amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
+ if (amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
- 0x1, &val);
+ 0x1, &val))
+ return;
val &= ~amdgpu_connector->router.cd_mux_control_pin;
val |= amdgpu_connector->router.cd_mux_state;
amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 7e6d09730e6d..013ff373e067 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -279,7 +279,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
adev->irq.msi_enabled = false;
if (!amdgpu_msi_ok(adev))
- flags = PCI_IRQ_LEGACY;
+ flags = PCI_IRQ_INTX;
else
flags = PCI_IRQ_ALL_TYPES;
@@ -445,6 +445,14 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
entry.ih = ih;
entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
+
+ /*
+ * timestamp is not supported on some legacy SOCs (cik, cz, iceland,
+ * si and tonga), so initialize timestamp and timestamp_src to 0
+ */
+ entry.timestamp = 0;
+ entry.timestamp_src = 0;
+
amdgpu_ih_decode_iv(adev, &entry);
trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 4b3000c21ef2..e4742b65032d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -304,12 +304,15 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
dma_fence_set_error(finished, -ECANCELED);
if (finished->error < 0) {
- DRM_INFO("Skip scheduling IBs!\n");
+ dev_dbg(adev->dev, "Skip scheduling IBs in ring(%s)",
+ ring->name);
} else {
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
&fence);
if (r)
- DRM_ERROR("Error scheduling IBs (%d)\n", r);
+ dev_err(adev->dev,
+ "Error scheduling IBs (%d) in ring(%s)", r,
+ ring->name);
}
job->job_run_counter++;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index a2df3025a754..a0ea6fe8d060 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -149,38 +149,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
goto out;
}
- adev->pm.rpm_mode = AMDGPU_RUNPM_NONE;
- if (amdgpu_device_supports_px(dev) &&
- (amdgpu_runtime_pm != 0)) { /* enable PX as runtime mode */
- adev->pm.rpm_mode = AMDGPU_RUNPM_PX;
- dev_info(adev->dev, "Using ATPX for runtime pm\n");
- } else if (amdgpu_device_supports_boco(dev) &&
- (amdgpu_runtime_pm != 0)) { /* enable boco as runtime mode */
- adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO;
- dev_info(adev->dev, "Using BOCO for runtime pm\n");
- } else if (amdgpu_device_supports_baco(dev) &&
- (amdgpu_runtime_pm != 0)) {
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- case CHIP_ARCTURUS:
- /* enable BACO as runpm mode if runpm=1 */
- if (amdgpu_runtime_pm > 0)
- adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
- break;
- case CHIP_VEGA10:
- /* enable BACO as runpm mode if noretry=0 */
- if (!adev->gmc.noretry)
- adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
- break;
- default:
- /* enable BACO as runpm mode on CI+ */
- adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
- break;
- }
-
- if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO)
- dev_info(adev->dev, "Using BACO for runtime pm\n");
- }
+ amdgpu_device_detect_runtime_pm_mode(adev);
/* Call ACPI methods: require modeset init
* but failure is not fatal
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
index 24ad4b97177b..0734490347db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
@@ -210,22 +210,26 @@ int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
return -EOPNOTSUPP;
}
-static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, struct mca_bank_entry *entry)
+static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, struct mca_bank_entry *entry,
+ struct ras_query_context *qctx)
{
- dev_info(adev->dev, HW_ERR "Accelerator Check Architecture events logged\n");
- dev_info(adev->dev, HW_ERR "aca entry[%02d].STATUS=0x%016llx\n",
- idx, entry->regs[MCA_REG_IDX_STATUS]);
- dev_info(adev->dev, HW_ERR "aca entry[%02d].ADDR=0x%016llx\n",
- idx, entry->regs[MCA_REG_IDX_ADDR]);
- dev_info(adev->dev, HW_ERR "aca entry[%02d].MISC0=0x%016llx\n",
- idx, entry->regs[MCA_REG_IDX_MISC0]);
- dev_info(adev->dev, HW_ERR "aca entry[%02d].IPID=0x%016llx\n",
- idx, entry->regs[MCA_REG_IDX_IPID]);
- dev_info(adev->dev, HW_ERR "aca entry[%02d].SYND=0x%016llx\n",
- idx, entry->regs[MCA_REG_IDX_SYND]);
+ u64 event_id = qctx->event_id;
+
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "Accelerator Check Architecture events logged\n");
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].STATUS=0x%016llx\n",
+ idx, entry->regs[MCA_REG_IDX_STATUS]);
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].ADDR=0x%016llx\n",
+ idx, entry->regs[MCA_REG_IDX_ADDR]);
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].MISC0=0x%016llx\n",
+ idx, entry->regs[MCA_REG_IDX_MISC0]);
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].IPID=0x%016llx\n",
+ idx, entry->regs[MCA_REG_IDX_IPID]);
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].SYND=0x%016llx\n",
+ idx, entry->regs[MCA_REG_IDX_SYND]);
}
-int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data)
+int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
+ struct ras_err_data *err_data, struct ras_query_context *qctx)
{
struct amdgpu_smuio_mcm_config_info mcm_info;
struct ras_err_addr err_addr = {0};
@@ -244,7 +248,7 @@ int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_blo
list_for_each_entry(node, &mca_set.list, node) {
entry = &node->entry;
- amdgpu_mca_smu_mca_bank_dump(adev, i++, entry);
+ amdgpu_mca_smu_mca_bank_dump(adev, i++, entry, qctx);
count = 0;
ret = amdgpu_mca_smu_parse_mca_error_count(adev, blk, type, entry, &count);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
index b964110ed1e0..e5bf07ce3451 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
@@ -169,6 +169,7 @@ void amdgpu_mca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root
void amdgpu_mca_bank_set_init(struct mca_bank_set *mca_set);
int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mca_bank_entry *entry);
void amdgpu_mca_bank_set_release(struct mca_bank_set *mca_set);
-int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data);
+int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
+ struct ras_err_data *err_data, struct ras_query_context *qctx);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index a98e03e0a51f..5ca5c47ab54e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -32,6 +32,18 @@
#define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
#define AMDGPU_ONE_DOORBELL_SIZE 8
+signed long amdgpu_mes_fence_wait_polling(u64 *fence,
+ u64 wait_seq,
+ signed long timeout)
+{
+
+ while ((s64)(wait_seq - *fence) > 0 && timeout > 0) {
+ udelay(2);
+ timeout -= 2;
+ }
+ return timeout > 0 ? timeout : 0;
+}
+
int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
{
return roundup(AMDGPU_ONE_DOORBELL_SIZE *
@@ -40,7 +52,6 @@ int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
}
static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
- struct amdgpu_mes_process *process,
int ip_type, uint64_t *doorbell_index)
{
unsigned int offset, found;
@@ -65,7 +76,6 @@ static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
}
static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
- struct amdgpu_mes_process *process,
uint32_t doorbell_index)
{
unsigned int old, rel_index;
@@ -102,7 +112,10 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
{
int r;
- r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
+ if (!amdgpu_mes_log_enable)
+ return 0;
+
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_LOG_BUFFER_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&adev->mes.event_log_gpu_obj,
&adev->mes.event_log_gpu_addr,
@@ -653,7 +666,7 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
*queue_id = queue->queue_id = r;
/* allocate a doorbell index for the queue */
- r = amdgpu_mes_kernel_doorbell_get(adev, gang->process,
+ r = amdgpu_mes_kernel_doorbell_get(adev,
qprops->queue_type,
&qprops->doorbell_off);
if (r)
@@ -711,8 +724,7 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
return 0;
clean_up_doorbell:
- amdgpu_mes_kernel_doorbell_free(adev, gang->process,
- qprops->doorbell_off);
+ amdgpu_mes_kernel_doorbell_free(adev, qprops->doorbell_off);
clean_up_queue_id:
spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
@@ -766,8 +778,7 @@ int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
queue_id);
list_del(&queue->list);
- amdgpu_mes_kernel_doorbell_free(adev, gang->process,
- queue->doorbell_off);
+ amdgpu_mes_kernel_doorbell_free(adev, queue->doorbell_off);
amdgpu_mes_unlock(&adev->mes);
amdgpu_mes_queue_free_mqd(queue);
@@ -775,6 +786,28 @@ int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
return 0;
}
+int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ struct mes_map_legacy_queue_input queue_input;
+ int r;
+
+ memset(&queue_input, 0, sizeof(queue_input));
+
+ queue_input.queue_type = ring->funcs->type;
+ queue_input.doorbell_offset = ring->doorbell_index;
+ queue_input.pipe_id = ring->pipe;
+ queue_input.queue_id = ring->queue;
+ queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
+ queue_input.wptr_addr = ring->wptr_gpu_addr;
+
+ r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input);
+ if (r)
+ DRM_ERROR("failed to map legacy queue\n");
+
+ return r;
+}
+
int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
enum amdgpu_unmap_queues_action action,
@@ -1129,6 +1162,7 @@ void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
return;
amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
+ del_timer_sync(&ring->fence_drv.fallback_timer);
amdgpu_ring_fini(ring);
kfree(ring);
}
@@ -1471,7 +1505,7 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
const struct mes_firmware_header_v1_0 *mes_hdr;
struct amdgpu_firmware_info *info;
char ucode_prefix[30];
- char fw_name[40];
+ char fw_name[50];
bool need_retry = false;
int r;
@@ -1549,12 +1583,11 @@ static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
- mem, PAGE_SIZE, false);
+ mem, AMDGPU_MES_LOG_BUFFER_SIZE, false);
return 0;
}
-
DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
#endif
@@ -1565,7 +1598,7 @@ void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *root = minor->debugfs_root;
- if (adev->enable_mes)
+ if (adev->enable_mes && amdgpu_mes_log_enable)
debugfs_create_file("amdgpu_mes_event_log", 0444, root,
adev, &amdgpu_debugfs_mes_event_log_fops);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index 7d4f93fea937..df9f0404d842 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -52,6 +52,7 @@ enum amdgpu_mes_priority_level {
#define AMDGPU_MES_PROC_CTX_SIZE 0x1000 /* one page area */
#define AMDGPU_MES_GANG_CTX_SIZE 0x1000 /* one page area */
+#define AMDGPU_MES_LOG_BUFFER_SIZE 0x4000 /* Maximu log buffer size for MES */
struct amdgpu_mes_funcs;
@@ -140,6 +141,12 @@ struct amdgpu_mes {
/* ip specific functions */
const struct amdgpu_mes_funcs *funcs;
+
+ /* mes resource_1 bo*/
+ struct amdgpu_bo *resource_1;
+ uint64_t resource_1_gpu_addr;
+ void *resource_1_addr;
+
};
struct amdgpu_mes_process {
@@ -241,6 +248,15 @@ struct mes_remove_queue_input {
uint64_t gang_context_addr;
};
+struct mes_map_legacy_queue_input {
+ uint32_t queue_type;
+ uint32_t doorbell_offset;
+ uint32_t pipe_id;
+ uint32_t queue_id;
+ uint64_t mqd_addr;
+ uint64_t wptr_addr;
+};
+
struct mes_unmap_legacy_queue_input {
enum amdgpu_unmap_queues_action action;
uint32_t queue_type;
@@ -317,6 +333,9 @@ struct amdgpu_mes_funcs {
int (*remove_hw_queue)(struct amdgpu_mes *mes,
struct mes_remove_queue_input *input);
+ int (*map_legacy_queue)(struct amdgpu_mes *mes,
+ struct mes_map_legacy_queue_input *input);
+
int (*unmap_legacy_queue)(struct amdgpu_mes *mes,
struct mes_unmap_legacy_queue_input *input);
@@ -333,6 +352,10 @@ struct amdgpu_mes_funcs {
#define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev))
#define amdgpu_mes_kiq_hw_fini(adev) (adev)->mes.kiq_hw_fini((adev))
+signed long amdgpu_mes_fence_wait_polling(u64 *fence,
+ u64 wait_seq,
+ signed long timeout);
+
int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs);
int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe);
@@ -356,6 +379,8 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
int *queue_id);
int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id);
+int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
enum amdgpu_unmap_queues_action action,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
index 1ca9d4ed8063..95d676ee207f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -63,6 +63,8 @@ struct amdgpu_mmhub_funcs {
uint64_t page_table_base);
void (*update_power_gating)(struct amdgpu_device *adev,
bool enable);
+ bool (*query_utcl2_poison_status)(struct amdgpu_device *adev,
+ int hub_inst);
};
struct amdgpu_mmhub {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 010b0cb7693c..8d8c39be6129 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -39,6 +39,7 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
+#include "amdgpu_vram_mgr.h"
/**
* DOC: amdgpu_object
@@ -153,8 +154,10 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
else
places[c].flags |= TTM_PL_FLAG_TOPDOWN;
- if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
+ if (abo->tbo.type == ttm_bo_type_kernel &&
+ flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
+
c++;
}
@@ -173,6 +176,12 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ?
AMDGPU_PL_PREEMPT : TTM_PL_TT;
places[c].flags = 0;
+ /*
+ * When GTT is just an alternative to VRAM make sure that we
+ * only use it as fallback and still try to fill up VRAM first.
+ */
+ if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)
+ places[c].flags |= TTM_PL_FLAG_FALLBACK;
c++;
}
@@ -595,8 +604,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (!amdgpu_bo_support_uswc(bo->flags))
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
- if (adev->ras_enabled)
- bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
+ bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
bo->tbo.bdev = &adev->mman.bdev;
if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
@@ -605,6 +613,8 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
else
amdgpu_bo_placement_from_domain(bo, bp->domain);
if (bp->type == ttm_bo_type_kernel)
+ bo->tbo.priority = 2;
+ else if (!(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE))
bo->tbo.priority = 1;
if (!bp->destroy)
@@ -617,8 +627,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
return r;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
- bo->tbo.resource->mem_type == TTM_PL_VRAM &&
- amdgpu_bo_in_cpu_visible_vram(bo))
+ amdgpu_res_cpu_visible(adev, bo->tbo.resource))
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
ctx.bytes_moved);
else
@@ -628,7 +637,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
bo->tbo.resource->mem_type == TTM_PL_VRAM) {
struct dma_fence *fence;
- r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence, true);
+ r = amdgpu_ttm_clear_buffer(bo, bo->tbo.base.resv, &fence);
if (unlikely(r))
goto fail_unreserve;
@@ -758,7 +767,7 @@ int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
amdgpu_bo_size(shadow), NULL, fence,
- true, false, false);
+ true, false, 0);
}
/**
@@ -960,6 +969,10 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
if (!bo->placements[i].lpfn ||
(lpfn && lpfn < bo->placements[i].lpfn))
bo->placements[i].lpfn = lpfn;
+
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS &&
+ bo->placements[i].mem_type == TTM_PL_VRAM)
+ bo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
@@ -1242,14 +1255,18 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
* amdgpu_bo_move_notify - notification about a memory move
* @bo: pointer to a buffer object
* @evict: if this move is evicting the buffer from the graphics address space
+ * @new_mem: new resource for backing the BO
*
* Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
* bookkeeping.
* TTM driver callback which is called when ttm moves a buffer.
*/
-void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
+void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
+ bool evict,
+ struct ttm_resource *new_mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct ttm_resource *old_mem = bo->resource;
struct amdgpu_bo *abo;
if (!amdgpu_bo_is_amdgpu_bo(bo))
@@ -1261,34 +1278,36 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
amdgpu_bo_kunmap(abo);
if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
- bo->resource->mem_type != TTM_PL_SYSTEM)
+ old_mem && old_mem->mem_type != TTM_PL_SYSTEM)
dma_buf_move_notify(abo->tbo.base.dma_buf);
- /* remember the eviction */
- if (evict)
- atomic64_inc(&adev->num_evictions);
+ /* move_notify is called before move happens */
+ trace_amdgpu_bo_move(abo, new_mem ? new_mem->mem_type : -1,
+ old_mem ? old_mem->mem_type : -1);
}
void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
struct amdgpu_mem_stats *stats)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_resource *res = bo->tbo.resource;
uint64_t size = amdgpu_bo_size(bo);
struct drm_gem_object *obj;
unsigned int domain;
bool shared;
/* Abort if the BO doesn't currently have a backing store */
- if (!bo->tbo.resource)
+ if (!res)
return;
obj = &bo->tbo.base;
shared = drm_gem_object_is_shared_for_memory_stats(obj);
- domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
+ domain = amdgpu_mem_type_to_domain(res->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
stats->vram += size;
- if (amdgpu_bo_in_cpu_visible_vram(bo))
+ if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
stats->visible_vram += size;
if (shared)
stats->vram_shared += size;
@@ -1359,8 +1378,9 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
return;
- r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence, true);
+ r = amdgpu_fill_buffer(abo, 0, bo->base.resv, &fence, true);
if (!WARN_ON(r)) {
+ amdgpu_vram_mgr_set_cleared(bo->resource);
amdgpu_bo_fence(abo, fence, false);
dma_fence_put(fence);
}
@@ -1389,10 +1409,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* Remember that this BO was accessed by the CPU */
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- if (bo->resource->mem_type != TTM_PL_VRAM)
- return 0;
-
- if (amdgpu_bo_in_cpu_visible_vram(abo))
+ if (amdgpu_res_cpu_visible(adev, bo->resource))
return 0;
/* Can't move a pinned BO to visible VRAM */
@@ -1415,7 +1432,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* this should never happen */
if (bo->resource->mem_type == TTM_PL_VRAM &&
- !amdgpu_bo_in_cpu_visible_vram(abo))
+ !amdgpu_res_cpu_visible(adev, bo->resource))
return VM_FAULT_SIGBUS;
ttm_bo_move_to_lru_tail_unlocked(bo);
@@ -1579,6 +1596,7 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
*/
u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct dma_buf_attachment *attachment;
struct dma_buf *dma_buf;
const char *placement;
@@ -1587,10 +1605,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
if (dma_resv_trylock(bo->tbo.base.resv)) {
unsigned int domain;
+
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
- if (amdgpu_bo_in_cpu_visible_vram(bo))
+ if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
placement = "VRAM VISIBLE";
else
placement = "VRAM";
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index be679c42b0b8..bc42ccbde659 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -251,28 +251,6 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
}
/**
- * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
- */
-static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct amdgpu_res_cursor cursor;
-
- if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM)
- return false;
-
- amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
- while (cursor.remaining) {
- if (cursor.start < adev->gmc.visible_vram_size)
- return true;
-
- amdgpu_res_next(&cursor, cursor.size);
- }
-
- return false;
-}
-
-/**
* amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
*/
static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
@@ -350,7 +328,9 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
size_t buffer_size, uint32_t *metadata_size,
uint64_t *flags);
-void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict);
+void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
+ bool evict,
+ struct ttm_resource *new_mem);
void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 94b310fdb719..4bd4602d11b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -1053,6 +1053,11 @@ static int psp_asd_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
return 0;
+ /* bypass asd if display hardware is not available */
+ if (!amdgpu_device_has_display_hardware(psp->adev) &&
+ amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10))
+ return 0;
+
psp->asd_context.mem_context.shared_mc_addr = 0;
psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD;
@@ -2260,6 +2265,15 @@ static int psp_hw_start(struct psp_context *psp)
}
}
+ if ((is_psp_fw_valid(psp->ipkeymgr_drv)) &&
+ (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) {
+ ret = psp_bootloader_load_ipkeymgr_drv(psp);
+ if (ret) {
+ dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n");
+ return ret;
+ }
+ }
+
if ((is_psp_fw_valid(psp->sos)) &&
(psp->funcs->bootloader_load_sos != NULL)) {
ret = psp_bootloader_load_sos(psp);
@@ -2617,7 +2631,8 @@ static int psp_load_p2s_table(struct psp_context *psp)
struct amdgpu_firmware_info *ucode =
&adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
- if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO))
+ if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
+ (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
return 0;
if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) {
@@ -2647,7 +2662,8 @@ static int psp_load_smu_fw(struct psp_context *psp)
* Skip SMU FW reloading in case of using BACO for runpm only,
* as SMU is always alive.
*/
- if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO))
+ if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
+ (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
return 0;
if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
@@ -3273,6 +3289,12 @@ static int parse_sos_bin_descriptor(struct psp_context *psp,
psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes);
psp->ras_drv.start_addr = ucode_start_addr;
break;
+ case PSP_FW_TYPE_PSP_IPKEYMGR_DRV:
+ psp->ipkeymgr_drv.fw_version = le32_to_cpu(desc->fw_version);
+ psp->ipkeymgr_drv.feature_version = le32_to_cpu(desc->fw_version);
+ psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->ipkeymgr_drv.start_addr = ucode_start_addr;
+ break;
default:
dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index ee16f134ae92..3635303e6548 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -73,8 +73,10 @@ enum psp_bootloader_cmd {
PSP_BL__LOAD_KEY_DATABASE = 0x80000,
PSP_BL__LOAD_SOCDRV = 0xB0000,
PSP_BL__LOAD_DBGDRV = 0xC0000,
+ PSP_BL__LOAD_HADDRV = PSP_BL__LOAD_DBGDRV,
PSP_BL__LOAD_INTFDRV = 0xD0000,
- PSP_BL__LOAD_RASDRV = 0xE0000,
+ PSP_BL__LOAD_RASDRV = 0xE0000,
+ PSP_BL__LOAD_IPKEYMGRDRV = 0xF0000,
PSP_BL__DRAM_LONG_TRAIN = 0x100000,
PSP_BL__DRAM_SHORT_TRAIN = 0x200000,
PSP_BL__LOAD_TOS_SPL_TABLE = 0x10000000,
@@ -117,6 +119,7 @@ struct psp_funcs {
int (*bootloader_load_intf_drv)(struct psp_context *psp);
int (*bootloader_load_dbg_drv)(struct psp_context *psp);
int (*bootloader_load_ras_drv)(struct psp_context *psp);
+ int (*bootloader_load_ipkeymgr_drv)(struct psp_context *psp);
int (*bootloader_load_sos)(struct psp_context *psp);
int (*ring_create)(struct psp_context *psp,
enum psp_ring_type ring_type);
@@ -336,6 +339,7 @@ struct psp_context {
struct psp_bin_desc intf_drv;
struct psp_bin_desc dbg_drv;
struct psp_bin_desc ras_drv;
+ struct psp_bin_desc ipkeymgr_drv;
/* tmr buffer */
struct amdgpu_bo *tmr_bo;
@@ -424,6 +428,9 @@ struct amdgpu_psp_funcs {
#define psp_bootloader_load_ras_drv(psp) \
((psp)->funcs->bootloader_load_ras_drv ? \
(psp)->funcs->bootloader_load_ras_drv((psp)) : 0)
+#define psp_bootloader_load_ipkeymgr_drv(psp) \
+ ((psp)->funcs->bootloader_load_ipkeymgr_drv ? \
+ (psp)->funcs->bootloader_load_ipkeymgr_drv((psp)) : 0)
#define psp_bootloader_load_sos(psp) \
((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 8ebab6f22e5a..1adc81a55734 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -122,6 +122,8 @@ const char *get_ras_block_str(struct ras_common_if *ras_block)
#define MAX_UMC_POISON_POLLING_TIME_ASYNC 100 //ms
+#define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100 //ms
+
enum amdgpu_ras_retire_page_reservation {
AMDGPU_RAS_RETIRE_PAGE_RESERVED,
AMDGPU_RAS_RETIRE_PAGE_PENDING,
@@ -1045,6 +1047,7 @@ static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_d
static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
struct ras_manager *ras_mgr,
struct ras_err_data *err_data,
+ struct ras_query_context *qctx,
const char *blk_name,
bool is_ue,
bool is_de)
@@ -1052,27 +1055,28 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
struct amdgpu_smuio_mcm_config_info *mcm_info;
struct ras_err_node *err_node;
struct ras_err_info *err_info;
+ u64 event_id = qctx->event_id;
if (is_ue) {
for_each_ras_error(err_node, err_data) {
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
if (err_info->ue_count) {
- dev_info(adev->dev, "socket: %d, die: %d, "
- "%lld new uncorrectable hardware errors detected in %s block\n",
- mcm_info->socket_id,
- mcm_info->die_id,
- err_info->ue_count,
- blk_name);
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
+ "%lld new uncorrectable hardware errors detected in %s block\n",
+ mcm_info->socket_id,
+ mcm_info->die_id,
+ err_info->ue_count,
+ blk_name);
}
}
for_each_ras_error(err_node, &ras_mgr->err_data) {
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
- dev_info(adev->dev, "socket: %d, die: %d, "
- "%lld uncorrectable hardware errors detected in total in %s block\n",
- mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name);
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
+ "%lld uncorrectable hardware errors detected in total in %s block\n",
+ mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name);
}
} else {
@@ -1081,44 +1085,44 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
if (err_info->de_count) {
- dev_info(adev->dev, "socket: %d, die: %d, "
- "%lld new deferred hardware errors detected in %s block\n",
- mcm_info->socket_id,
- mcm_info->die_id,
- err_info->de_count,
- blk_name);
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
+ "%lld new deferred hardware errors detected in %s block\n",
+ mcm_info->socket_id,
+ mcm_info->die_id,
+ err_info->de_count,
+ blk_name);
}
}
for_each_ras_error(err_node, &ras_mgr->err_data) {
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
- dev_info(adev->dev, "socket: %d, die: %d, "
- "%lld deferred hardware errors detected in total in %s block\n",
- mcm_info->socket_id, mcm_info->die_id,
- err_info->de_count, blk_name);
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
+ "%lld deferred hardware errors detected in total in %s block\n",
+ mcm_info->socket_id, mcm_info->die_id,
+ err_info->de_count, blk_name);
}
} else {
for_each_ras_error(err_node, err_data) {
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
if (err_info->ce_count) {
- dev_info(adev->dev, "socket: %d, die: %d, "
- "%lld new correctable hardware errors detected in %s block\n",
- mcm_info->socket_id,
- mcm_info->die_id,
- err_info->ce_count,
- blk_name);
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
+ "%lld new correctable hardware errors detected in %s block\n",
+ mcm_info->socket_id,
+ mcm_info->die_id,
+ err_info->ce_count,
+ blk_name);
}
}
for_each_ras_error(err_node, &ras_mgr->err_data) {
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
- dev_info(adev->dev, "socket: %d, die: %d, "
- "%lld correctable hardware errors detected in total in %s block\n",
- mcm_info->socket_id, mcm_info->die_id,
- err_info->ce_count, blk_name);
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
+ "%lld correctable hardware errors detected in total in %s block\n",
+ mcm_info->socket_id, mcm_info->die_id,
+ err_info->ce_count, blk_name);
}
}
}
@@ -1131,77 +1135,79 @@ static inline bool err_data_has_source_info(struct ras_err_data *data)
static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
struct ras_query_if *query_if,
- struct ras_err_data *err_data)
+ struct ras_err_data *err_data,
+ struct ras_query_context *qctx)
{
struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head);
const char *blk_name = get_ras_block_str(&query_if->head);
+ u64 event_id = qctx->event_id;
if (err_data->ce_count) {
if (err_data_has_source_info(err_data)) {
- amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data,
+ amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
blk_name, false, false);
} else if (!adev->aid_mask &&
adev->smuio.funcs &&
adev->smuio.funcs->get_socket_id &&
adev->smuio.funcs->get_die_id) {
- dev_info(adev->dev, "socket: %d, die: %d "
- "%ld correctable hardware errors "
- "detected in %s block\n",
- adev->smuio.funcs->get_socket_id(adev),
- adev->smuio.funcs->get_die_id(adev),
- ras_mgr->err_data.ce_count,
- blk_name);
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
+ "%ld correctable hardware errors "
+ "detected in %s block\n",
+ adev->smuio.funcs->get_socket_id(adev),
+ adev->smuio.funcs->get_die_id(adev),
+ ras_mgr->err_data.ce_count,
+ blk_name);
} else {
- dev_info(adev->dev, "%ld correctable hardware errors "
- "detected in %s block\n",
- ras_mgr->err_data.ce_count,
- blk_name);
+ RAS_EVENT_LOG(adev, event_id, "%ld correctable hardware errors "
+ "detected in %s block\n",
+ ras_mgr->err_data.ce_count,
+ blk_name);
}
}
if (err_data->ue_count) {
if (err_data_has_source_info(err_data)) {
- amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data,
+ amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
blk_name, true, false);
} else if (!adev->aid_mask &&
adev->smuio.funcs &&
adev->smuio.funcs->get_socket_id &&
adev->smuio.funcs->get_die_id) {
- dev_info(adev->dev, "socket: %d, die: %d "
- "%ld uncorrectable hardware errors "
- "detected in %s block\n",
- adev->smuio.funcs->get_socket_id(adev),
- adev->smuio.funcs->get_die_id(adev),
- ras_mgr->err_data.ue_count,
- blk_name);
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
+ "%ld uncorrectable hardware errors "
+ "detected in %s block\n",
+ adev->smuio.funcs->get_socket_id(adev),
+ adev->smuio.funcs->get_die_id(adev),
+ ras_mgr->err_data.ue_count,
+ blk_name);
} else {
- dev_info(adev->dev, "%ld uncorrectable hardware errors "
- "detected in %s block\n",
- ras_mgr->err_data.ue_count,
- blk_name);
+ RAS_EVENT_LOG(adev, event_id, "%ld uncorrectable hardware errors "
+ "detected in %s block\n",
+ ras_mgr->err_data.ue_count,
+ blk_name);
}
}
if (err_data->de_count) {
if (err_data_has_source_info(err_data)) {
- amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data,
+ amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
blk_name, false, true);
} else if (!adev->aid_mask &&
adev->smuio.funcs &&
adev->smuio.funcs->get_socket_id &&
adev->smuio.funcs->get_die_id) {
- dev_info(adev->dev, "socket: %d, die: %d "
- "%ld deferred hardware errors "
- "detected in %s block\n",
- adev->smuio.funcs->get_socket_id(adev),
- adev->smuio.funcs->get_die_id(adev),
- ras_mgr->err_data.de_count,
- blk_name);
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
+ "%ld deferred hardware errors "
+ "detected in %s block\n",
+ adev->smuio.funcs->get_socket_id(adev),
+ adev->smuio.funcs->get_die_id(adev),
+ ras_mgr->err_data.de_count,
+ blk_name);
} else {
- dev_info(adev->dev, "%ld deferred hardware errors "
- "detected in %s block\n",
- ras_mgr->err_data.de_count,
- blk_name);
+ RAS_EVENT_LOG(adev, event_id, "%ld deferred hardware errors "
+ "detected in %s block\n",
+ ras_mgr->err_data.de_count,
+ blk_name);
}
}
}
@@ -1244,6 +1250,10 @@ int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
{
struct ras_manager *obj;
+ /* in resume phase, no need to create aca fs node */
+ if (adev->in_suspend || amdgpu_in_reset(adev))
+ return 0;
+
obj = get_ras_manager(adev, blk);
if (!obj)
return -EINVAL;
@@ -1265,7 +1275,8 @@ int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
}
static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
- enum aca_error_type type, struct ras_err_data *err_data)
+ enum aca_error_type type, struct ras_err_data *err_data,
+ struct ras_query_context *qctx)
{
struct ras_manager *obj;
@@ -1273,7 +1284,7 @@ static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu
if (!obj)
return -EINVAL;
- return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data);
+ return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data, qctx);
}
ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
@@ -1287,13 +1298,14 @@ ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *a
if (amdgpu_ras_query_error_status(obj->adev, &info))
return -EINVAL;
- return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
- "ce", info.ce_count);
+ return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
+ "ce", info.ce_count, "de", info.ue_count);
}
static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
struct ras_query_if *info,
struct ras_err_data *err_data,
+ struct ras_query_context *qctx,
unsigned int error_query_mode)
{
enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
@@ -1329,17 +1341,21 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
}
} else {
if (amdgpu_aca_is_enabled(adev)) {
- ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data);
+ ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data, qctx);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data, qctx);
if (ret)
return ret;
- ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data);
+ ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_DEFERRED, err_data, qctx);
if (ret)
return ret;
} else {
/* FIXME: add code to check return value later */
- amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data);
- amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data);
+ amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data, qctx);
+ amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data, qctx);
}
}
@@ -1351,6 +1367,7 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_i
{
struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
struct ras_err_data err_data;
+ struct ras_query_context qctx;
unsigned int error_query_mode;
int ret;
@@ -1364,8 +1381,12 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_i
if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode))
return -EINVAL;
+ memset(&qctx, 0, sizeof(qctx));
+ qctx.event_id = amdgpu_ras_acquire_event_id(adev, amdgpu_ras_intr_triggered() ?
+ RAS_EVENT_TYPE_ISR : RAS_EVENT_TYPE_INVALID);
ret = amdgpu_ras_query_error_status_helper(adev, info,
&err_data,
+ &qctx,
error_query_mode);
if (ret)
goto out_fini_err_data;
@@ -1376,7 +1397,7 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_i
info->ce_count = obj->err_data.ce_count;
info->de_count = obj->err_data.de_count;
- amdgpu_ras_error_generate_report(adev, info, &err_data);
+ amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx);
out_fini_err_data:
amdgpu_ras_error_data_fini(&err_data);
@@ -2041,7 +2062,7 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
}
}
- amdgpu_umc_poison_handler(adev, obj->head.block, false);
+ amdgpu_umc_poison_handler(adev, obj->head.block, 0);
if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
@@ -2061,6 +2082,17 @@ static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj
{
dev_info(obj->adev->dev,
"Poison is created\n");
+
+ if (amdgpu_ip_version(obj->adev, UMC_HWIP, 0) >= IP_VERSION(12, 0, 0)) {
+ struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev);
+
+ amdgpu_ras_put_poison_req(obj->adev,
+ AMDGPU_RAS_BLOCK__UMC, 0, NULL, NULL, false);
+
+ atomic_inc(&con->page_retirement_req_cnt);
+
+ wake_up(&con->page_retirement_wq);
+ }
}
static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
@@ -2371,7 +2403,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
};
status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
- data->bps[i].retired_page);
+ data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT);
if (status == -EBUSY)
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
else if (status == -ENOENT)
@@ -2384,6 +2416,19 @@ out:
return ret;
}
+static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive, bool status)
+{
+ struct amdgpu_device *tmp_adev;
+
+ if (hive) {
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
+ amdgpu_ras_set_fed(tmp_adev, status);
+ } else {
+ amdgpu_ras_set_fed(adev, status);
+ }
+}
+
static void amdgpu_ras_do_recovery(struct work_struct *work)
{
struct amdgpu_ras *ras =
@@ -2393,8 +2438,21 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
struct list_head device_list, *device_list_handle = NULL;
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
- if (hive)
+ if (hive) {
atomic_set(&hive->ras_recovery, 1);
+
+ /* If any device which is part of the hive received RAS fatal
+ * error interrupt, set fatal error status on all. This
+ * condition will need a recovery, and flag will be cleared
+ * as part of recovery.
+ */
+ list_for_each_entry(remote_adev, &hive->device_list,
+ gmc.xgmi.head)
+ if (amdgpu_ras_get_fed_status(remote_adev)) {
+ amdgpu_ras_set_fed_all(adev, hive, true);
+ break;
+ }
+ }
if (!ras->disable_ras_err_cnt_harvest) {
/* Build list of devices to query RAS related errors */
@@ -2439,18 +2497,6 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
- /* For any RAS error that needs a full reset to
- * recover, set the fatal error status
- */
- if (hive) {
- list_for_each_entry(remote_adev,
- &hive->device_list,
- gmc.xgmi.head)
- amdgpu_ras_set_fed(remote_adev,
- true);
- } else {
- amdgpu_ras_set_fed(adev, true);
- }
psp_fatal_error_recovery_quirk(&adev->psp);
}
}
@@ -2516,9 +2562,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
goto out;
}
- amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
- bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
- AMDGPU_GPU_PAGE_SIZE);
+ amdgpu_ras_reserve_page(adev, bps[i].retired_page);
memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
data->count++;
@@ -2674,10 +2718,167 @@ static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
}
}
+int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint16_t pasid,
+ pasid_notify pasid_fn, void *data, uint32_t reset)
+{
+ int ret = 0;
+ struct ras_poison_msg poison_msg;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ memset(&poison_msg, 0, sizeof(poison_msg));
+ poison_msg.block = block;
+ poison_msg.pasid = pasid;
+ poison_msg.reset = reset;
+ poison_msg.pasid_fn = pasid_fn;
+ poison_msg.data = data;
+
+ ret = kfifo_put(&con->poison_fifo, poison_msg);
+ if (!ret) {
+ dev_err(adev->dev, "Poison message fifo is full!\n");
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+static int amdgpu_ras_get_poison_req(struct amdgpu_device *adev,
+ struct ras_poison_msg *poison_msg)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ return kfifo_get(&con->poison_fifo, poison_msg);
+}
+
+static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
+{
+ mutex_init(&ecc_log->lock);
+
+ /* Set any value as siphash key */
+ memset(&ecc_log->ecc_key, 0xad, sizeof(ecc_log->ecc_key));
+
+ INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
+ ecc_log->de_updated = false;
+}
+
+static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+ struct ras_ecc_err *ecc_err;
+
+ mutex_lock(&ecc_log->lock);
+ radix_tree_for_each_slot(slot, &ecc_log->de_page_tree, &iter, 0) {
+ ecc_err = radix_tree_deref_slot(slot);
+ kfree(ecc_err->err_pages.pfn);
+ kfree(ecc_err);
+ radix_tree_iter_delete(&ecc_log->de_page_tree, &iter, slot);
+ }
+ mutex_unlock(&ecc_log->lock);
+
+ mutex_destroy(&ecc_log->lock);
+ ecc_log->de_updated = false;
+}
+
+static void amdgpu_ras_do_page_retirement(struct work_struct *work)
+{
+ struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
+ page_retirement_dwork.work);
+ struct amdgpu_device *adev = con->adev;
+ struct ras_err_data err_data;
+
+ if (amdgpu_in_reset(adev) || atomic_read(&con->in_recovery))
+ return;
+
+ amdgpu_ras_error_data_init(&err_data);
+
+ amdgpu_umc_handle_bad_pages(adev, &err_data);
+
+ amdgpu_ras_error_data_fini(&err_data);
+
+ mutex_lock(&con->umc_ecc_log.lock);
+ if (radix_tree_tagged(&con->umc_ecc_log.de_page_tree,
+ UMC_ECC_NEW_DETECTED_TAG))
+ schedule_delayed_work(&con->page_retirement_dwork,
+ msecs_to_jiffies(AMDGPU_RAS_RETIRE_PAGE_INTERVAL));
+ mutex_unlock(&con->umc_ecc_log.lock);
+}
+
+static int amdgpu_ras_query_ecc_status(struct amdgpu_device *adev,
+ enum amdgpu_ras_block ras_block, uint32_t timeout_ms)
+{
+ int ret = 0;
+ struct ras_ecc_log_info *ecc_log;
+ struct ras_query_if info;
+ uint32_t timeout = timeout_ms;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ memset(&info, 0, sizeof(info));
+ info.head.block = ras_block;
+
+ ecc_log = &ras->umc_ecc_log;
+ ecc_log->de_updated = false;
+ do {
+ ret = amdgpu_ras_query_error_status(adev, &info);
+ if (ret) {
+ dev_err(adev->dev, "Failed to query ras error! ret:%d\n", ret);
+ return ret;
+ }
+
+ if (timeout && !ecc_log->de_updated) {
+ msleep(1);
+ timeout--;
+ }
+ } while (timeout && !ecc_log->de_updated);
+
+ if (timeout_ms && !timeout) {
+ dev_warn(adev->dev, "Can't find deferred error\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
+ uint32_t timeout)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int ret;
+
+ ret = amdgpu_ras_query_ecc_status(adev, AMDGPU_RAS_BLOCK__UMC, timeout);
+ if (!ret)
+ schedule_delayed_work(&con->page_retirement_dwork, 0);
+}
+
+static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
+ struct ras_poison_msg *poison_msg)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ uint32_t reset = poison_msg->reset;
+ uint16_t pasid = poison_msg->pasid;
+
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+
+ if (poison_msg->pasid_fn)
+ poison_msg->pasid_fn(adev, pasid, poison_msg->data);
+
+ if (reset) {
+ flush_delayed_work(&con->page_retirement_dwork);
+
+ con->gpu_reset_flags |= reset;
+ amdgpu_ras_reset_gpu(adev);
+ }
+
+ return 0;
+}
+
static int amdgpu_ras_page_retirement_thread(void *param)
{
struct amdgpu_device *adev = (struct amdgpu_device *)param;
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_poison_msg poison_msg;
+ enum amdgpu_ras_block ras_block;
+ bool poison_creation_is_handled = false;
while (!kthread_should_stop()) {
@@ -2688,13 +2889,34 @@ static int amdgpu_ras_page_retirement_thread(void *param)
if (kthread_should_stop())
break;
- dev_info(adev->dev, "Start processing page retirement. request:%d\n",
- atomic_read(&con->page_retirement_req_cnt));
-
atomic_dec(&con->page_retirement_req_cnt);
- amdgpu_umc_bad_page_polling_timeout(adev,
- false, MAX_UMC_POISON_POLLING_TIME_ASYNC);
+ if (!amdgpu_ras_get_poison_req(adev, &poison_msg))
+ continue;
+
+ ras_block = poison_msg.block;
+
+ dev_info(adev->dev, "Start processing ras block %s(%d)\n",
+ ras_block_str(ras_block), ras_block);
+
+ if (ras_block == AMDGPU_RAS_BLOCK__UMC) {
+ amdgpu_ras_poison_creation_handler(adev,
+ MAX_UMC_POISON_POLLING_TIME_ASYNC);
+ poison_creation_is_handled = true;
+ } else {
+ /* poison_creation_is_handled:
+ * false: no poison creation interrupt, but it has poison
+ * consumption interrupt.
+ * true: It has poison creation interrupt at the beginning,
+ * but it has no poison creation interrupt later.
+ */
+ amdgpu_ras_poison_creation_handler(adev,
+ poison_creation_is_handled ?
+ 0 : MAX_UMC_POISON_POLLING_TIME_ASYNC);
+
+ amdgpu_ras_poison_consumption_handler(adev, &poison_msg);
+ poison_creation_is_handled = false;
+ }
}
return 0;
@@ -2763,6 +2985,8 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
}
}
+ mutex_init(&con->page_rsv_lock);
+ INIT_KFIFO(con->poison_fifo);
mutex_init(&con->page_retirement_lock);
init_waitqueue_head(&con->page_retirement_wq);
atomic_set(&con->page_retirement_req_cnt, 0);
@@ -2773,6 +2997,8 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n");
}
+ INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement);
+ amdgpu_ras_ecc_log_init(&con->umc_ecc_log);
#ifdef CONFIG_X86_MCE_AMD
if ((adev->asic_type == CHIP_ALDEBARAN) &&
(adev->gmc.xgmi.connected_to_cpu))
@@ -2813,8 +3039,14 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
atomic_set(&con->page_retirement_req_cnt, 0);
+ mutex_destroy(&con->page_rsv_lock);
+
cancel_work_sync(&con->recovery_work);
+ cancel_delayed_work_sync(&con->page_retirement_dwork);
+
+ amdgpu_ras_ecc_log_fini(&con->umc_ecc_log);
+
mutex_lock(&con->recovery_lock);
con->eh_data = NULL;
kfree(data->bps);
@@ -3036,6 +3268,35 @@ static int amdgpu_get_ras_schema(struct amdgpu_device *adev)
AMDGPU_RAS_ERROR__PARITY;
}
+static void ras_event_mgr_init(struct ras_event_manager *mgr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mgr->seqnos); i++)
+ atomic64_set(&mgr->seqnos[i], 0);
+}
+
+static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ struct amdgpu_hive_info *hive;
+
+ if (!ras)
+ return;
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ ras->event_mgr = hive ? &hive->event_mgr : &ras->__event_mgr;
+
+ /* init event manager with node 0 on xgmi system */
+ if (!amdgpu_in_reset(adev)) {
+ if (!hive || adev->gmc.xgmi.node_id == 0)
+ ras_event_mgr_init(ras->event_mgr);
+ }
+
+ if (hive)
+ amdgpu_put_xgmi_hive(hive);
+}
+
int amdgpu_ras_init(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
@@ -3356,6 +3617,8 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
return 0;
+ amdgpu_ras_event_mgr_init(adev);
+
if (amdgpu_aca_is_enabled(adev)) {
if (amdgpu_in_reset(adev))
r = amdgpu_aca_reset(adev);
@@ -3472,14 +3735,39 @@ void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
atomic_set(&ras->fed, !!status);
}
+bool amdgpu_ras_event_id_is_valid(struct amdgpu_device *adev, u64 id)
+{
+ return !(id & BIT_ULL(63));
+}
+
+u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ u64 id;
+
+ switch (type) {
+ case RAS_EVENT_TYPE_ISR:
+ id = (u64)atomic64_read(&ras->event_mgr->seqnos[type]);
+ break;
+ case RAS_EVENT_TYPE_INVALID:
+ default:
+ id = BIT_ULL(63) | 0ULL;
+ break;
+ }
+
+ return id;
+}
+
void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
{
if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ u64 event_id = (u64)atomic64_inc_return(&ras->event_mgr->seqnos[RAS_EVENT_TYPE_ISR]);
- dev_info(adev->dev, "uncorrectable hardware error"
- "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
+ RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error"
+ "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
+ amdgpu_ras_set_fed(adev, true);
ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
amdgpu_ras_reset_gpu(adev);
}
@@ -3998,6 +4286,8 @@ void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info, struct ras_err_a
{
struct ras_err_addr *mca_err_addr;
+ /* This function will be retired. */
+ return;
mca_err_addr = kzalloc(sizeof(*mca_err_addr), GFP_KERNEL);
if (!mca_err_addr)
return;
@@ -4195,3 +4485,19 @@ void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances)
amdgpu_ras_boot_time_error_reporting(adev, i, boot_error);
}
}
+
+int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
+ uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT;
+ int ret = 0;
+
+ mutex_lock(&con->page_rsv_lock);
+ ret = amdgpu_vram_mgr_query_page_status(mgr, start);
+ if (ret == -ENOENT)
+ ret = amdgpu_vram_mgr_reserve_range(mgr, start, AMDGPU_GPU_PAGE_SIZE);
+ mutex_unlock(&con->page_rsv_lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index e0f8ce9d8440..c8980d5f6540 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -26,6 +26,9 @@
#include <linux/debugfs.h>
#include <linux/list.h>
+#include <linux/kfifo.h>
+#include <linux/radix-tree.h>
+#include <linux/siphash.h>
#include "ta_ras_if.h"
#include "amdgpu_ras_eeprom.h"
#include "amdgpu_smuio.h"
@@ -64,6 +67,14 @@ struct amdgpu_iv_entry;
/* The high three bits indicates socketid */
#define AMDGPU_RAS_GET_FEATURES(val) ((val) & ~AMDGPU_RAS_FEATURES_SOCKETID_MASK)
+#define RAS_EVENT_LOG(_adev, _id, _fmt, ...) \
+do { \
+ if (amdgpu_ras_event_id_is_valid((_adev), (_id))) \
+ dev_info((_adev)->dev, "{%llu}" _fmt, (_id), ##__VA_ARGS__); \
+ else \
+ dev_info((_adev)->dev, _fmt, ##__VA_ARGS__); \
+} while (0)
+
enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__UMC = 0,
AMDGPU_RAS_BLOCK__SDMA,
@@ -419,6 +430,52 @@ struct umc_ecc_info {
int record_ce_addr_supported;
};
+enum ras_event_type {
+ RAS_EVENT_TYPE_INVALID = -1,
+ RAS_EVENT_TYPE_ISR = 0,
+ RAS_EVENT_TYPE_COUNT,
+};
+
+struct ras_event_manager {
+ atomic64_t seqnos[RAS_EVENT_TYPE_COUNT];
+};
+
+struct ras_query_context {
+ enum ras_event_type type;
+ u64 event_id;
+};
+
+typedef int (*pasid_notify)(struct amdgpu_device *adev,
+ uint16_t pasid, void *data);
+
+struct ras_poison_msg {
+ enum amdgpu_ras_block block;
+ uint16_t pasid;
+ uint32_t reset;
+ pasid_notify pasid_fn;
+ void *data;
+};
+
+struct ras_err_pages {
+ uint32_t count;
+ uint64_t *pfn;
+};
+
+struct ras_ecc_err {
+ u64 hash_index;
+ uint64_t status;
+ uint64_t ipid;
+ uint64_t addr;
+ struct ras_err_pages err_pages;
+};
+
+struct ras_ecc_log_info {
+ struct mutex lock;
+ siphash_key_t ecc_key;
+ struct radix_tree_root de_page_tree;
+ bool de_updated;
+};
+
struct amdgpu_ras {
/* ras infrastructure */
/* for ras itself. */
@@ -477,8 +534,18 @@ struct amdgpu_ras {
wait_queue_head_t page_retirement_wq;
struct mutex page_retirement_lock;
atomic_t page_retirement_req_cnt;
+ struct mutex page_rsv_lock;
+ DECLARE_KFIFO(poison_fifo, struct ras_poison_msg, 128);
+ struct ras_ecc_log_info umc_ecc_log;
+ struct delayed_work page_retirement_dwork;
+
/* Fatal error detected flag */
atomic_t fed;
+
+ /* RAS event manager */
+ struct ras_event_manager __event_mgr;
+ struct ras_event_manager *event_mgr;
+
};
struct ras_fs_data {
@@ -512,6 +579,7 @@ struct ras_err_data {
unsigned long de_count;
unsigned long err_addr_cnt;
struct eeprom_table_record *err_addr;
+ unsigned long err_addr_len;
u32 err_list_count;
struct list_head err_node_list;
};
@@ -879,4 +947,13 @@ void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info,
void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status);
bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev);
+bool amdgpu_ras_event_id_is_valid(struct amdgpu_device *adev, u64 id);
+u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type);
+
+int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn);
+
+int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint16_t pasid,
+ pasid_notify pasid_fn, void *data, uint32_t reset);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index b12808c0c331..06a62a8a992e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -404,6 +404,22 @@ static int amdgpu_ras_eeprom_correct_header_tag(
return res;
}
+static void amdgpu_ras_set_eeprom_table_version(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+
+ switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
+ case IP_VERSION(8, 10, 0):
+ case IP_VERSION(12, 0, 0):
+ hdr->version = RAS_TABLE_VER_V2_1;
+ return;
+ default:
+ hdr->version = RAS_TABLE_VER_V1;
+ return;
+ }
+}
+
/**
* amdgpu_ras_eeprom_reset_table -- Reset the RAS EEPROM table
* @control: pointer to control structure
@@ -423,11 +439,7 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
mutex_lock(&control->ras_tbl_mutex);
hdr->header = RAS_TABLE_HDR_VAL;
- if (adev->umc.ras &&
- adev->umc.ras->set_eeprom_table_version)
- adev->umc.ras->set_eeprom_table_version(hdr);
- else
- hdr->version = RAS_TABLE_VER_V1;
+ amdgpu_ras_set_eeprom_table_version(control);
if (hdr->version == RAS_TABLE_VER_V2_1) {
hdr->first_rec_offset = RAS_RECORD_START_V2_1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 381101d2bf05..50fcd86e1033 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -164,4 +164,29 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
}
}
+/**
+ * amdgpu_res_cleared - check if blocks are cleared
+ *
+ * @cur: the cursor to extract the block
+ *
+ * Check if the @cur block is cleared
+ */
+static inline bool amdgpu_res_cleared(struct amdgpu_res_cursor *cur)
+{
+ struct drm_buddy_block *block;
+
+ switch (cur->mem_type) {
+ case TTM_PL_VRAM:
+ block = cur->node;
+
+ if (!amdgpu_vram_mgr_is_cleared(block))
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
index 147100c27c2d..ea4873f6ccd1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -21,9 +21,6 @@
*
*/
-#include <linux/devcoredump.h>
-#include <generated/utsrelease.h>
-
#include "amdgpu_reset.h"
#include "aldebaran.h"
#include "sienna_cichlid.h"
@@ -161,105 +158,3 @@ void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain)
atomic_set(&reset_domain->in_gpu_reset, 0);
up_write(&reset_domain->sem);
}
-
-#ifndef CONFIG_DEV_COREDUMP
-void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
- struct amdgpu_reset_context *reset_context)
-{
-}
-#else
-static ssize_t
-amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
- void *data, size_t datalen)
-{
- struct drm_printer p;
- struct amdgpu_coredump_info *coredump = data;
- struct drm_print_iterator iter;
- int i;
-
- iter.data = buffer;
- iter.offset = 0;
- iter.start = offset;
- iter.remain = count;
-
- p = drm_coredump_printer(&iter);
-
- drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
- drm_printf(&p, "version: " AMDGPU_COREDUMP_VERSION "\n");
- drm_printf(&p, "kernel: " UTS_RELEASE "\n");
- drm_printf(&p, "module: " KBUILD_MODNAME "\n");
- drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec,
- coredump->reset_time.tv_nsec);
-
- if (coredump->reset_task_info.pid)
- drm_printf(&p, "process_name: %s PID: %d\n",
- coredump->reset_task_info.process_name,
- coredump->reset_task_info.pid);
-
- if (coredump->ring) {
- drm_printf(&p, "\nRing timed out details\n");
- drm_printf(&p, "IP Type: %d Ring Name: %s\n",
- coredump->ring->funcs->type,
- coredump->ring->name);
- }
-
- if (coredump->reset_vram_lost)
- drm_printf(&p, "VRAM is lost due to GPU reset!\n");
- if (coredump->adev->reset_info.num_regs) {
- drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n");
-
- for (i = 0; i < coredump->adev->reset_info.num_regs; i++)
- drm_printf(&p, "0x%08x: 0x%08x\n",
- coredump->adev->reset_info.reset_dump_reg_list[i],
- coredump->adev->reset_info.reset_dump_reg_value[i]);
- }
-
- return count - iter.remain;
-}
-
-static void amdgpu_devcoredump_free(void *data)
-{
- kfree(data);
-}
-
-void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
- struct amdgpu_reset_context *reset_context)
-{
- struct amdgpu_coredump_info *coredump;
- struct drm_device *dev = adev_to_drm(adev);
- struct amdgpu_job *job = reset_context->job;
- struct drm_sched_job *s_job;
-
- coredump = kzalloc(sizeof(*coredump), GFP_NOWAIT);
-
- if (!coredump) {
- DRM_ERROR("%s: failed to allocate memory for coredump\n", __func__);
- return;
- }
-
- coredump->reset_vram_lost = vram_lost;
-
- if (reset_context->job && reset_context->job->vm) {
- struct amdgpu_task_info *ti;
- struct amdgpu_vm *vm = reset_context->job->vm;
-
- ti = amdgpu_vm_get_task_info_vm(vm);
- if (ti) {
- coredump->reset_task_info = *ti;
- amdgpu_vm_put_task_info(ti);
- }
- }
-
- if (job) {
- s_job = &job->base;
- coredump->ring = to_amdgpu_ring(s_job->sched);
- }
-
- coredump->adev = adev;
-
- ktime_get_ts64(&coredump->reset_time);
-
- dev_coredumpm(dev->dev, THIS_MODULE, coredump, 0, GFP_NOWAIT,
- amdgpu_devcoredump_read, amdgpu_devcoredump_free);
-}
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index 60522963aaca..b11d190ece53 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -32,6 +32,7 @@ enum AMDGPU_RESET_FLAGS {
AMDGPU_NEED_FULL_RESET = 0,
AMDGPU_SKIP_HW_RESET = 1,
+ AMDGPU_SKIP_COREDUMP = 2,
};
struct amdgpu_reset_context {
@@ -88,19 +89,6 @@ struct amdgpu_reset_domain {
atomic_t reset_res;
};
-#ifdef CONFIG_DEV_COREDUMP
-
-#define AMDGPU_COREDUMP_VERSION "1"
-
-struct amdgpu_coredump_info {
- struct amdgpu_device *adev;
- struct amdgpu_task_info reset_task_info;
- struct timespec64 reset_time;
- bool reset_vram_lost;
- struct amdgpu_ring *ring;
-};
-#endif
-
int amdgpu_reset_init(struct amdgpu_device *adev);
int amdgpu_reset_fini(struct amdgpu_device *adev);
@@ -141,9 +129,6 @@ void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain);
void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain);
-void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
- struct amdgpu_reset_context *reset_context);
-
#define for_each_handler(i, handler, reset_ctl) \
for (i = 0; (i < AMDGPU_RESET_MAX_HANDLERS) && \
(handler = (*reset_ctl->reset_handlers)[i]); \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 5505d646f43a..06f0a6534a94 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -524,46 +524,58 @@ static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf,
{
struct amdgpu_ring *ring = file_inode(f)->i_private;
volatile u32 *mqd;
- int r;
+ u32 *kbuf;
+ int r, i;
uint32_t value, result;
if (*pos & 3 || size & 3)
return -EINVAL;
- result = 0;
+ kbuf = kmalloc(ring->mqd_size, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0))
- return r;
+ goto err_free;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
- if (r) {
- amdgpu_bo_unreserve(ring->mqd_obj);
- return r;
- }
+ if (r)
+ goto err_unreserve;
+ /*
+ * Copy to local buffer to avoid put_user(), which might fault
+ * and acquire mmap_sem, under reservation_ww_class_mutex.
+ */
+ for (i = 0; i < ring->mqd_size/sizeof(u32); i++)
+ kbuf[i] = mqd[i];
+
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ amdgpu_bo_unreserve(ring->mqd_obj);
+
+ result = 0;
while (size) {
if (*pos >= ring->mqd_size)
- goto done;
+ break;
- value = mqd[*pos/4];
+ value = kbuf[*pos/4];
r = put_user(value, (uint32_t *)buf);
if (r)
- goto done;
+ goto err_free;
buf += 4;
result += 4;
size -= 4;
*pos += 4;
}
-done:
- amdgpu_bo_kunmap(ring->mqd_obj);
- mqd = NULL;
- amdgpu_bo_unreserve(ring->mqd_obj);
- if (r)
- return r;
-
+ kfree(kbuf);
return result;
+
+err_unreserve:
+ amdgpu_bo_unreserve(ring->mqd_obj);
+err_free:
+ kfree(kbuf);
+ return r;
}
static const struct file_operations amdgpu_debugfs_mqd_fops = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 173a2a308078..b51a82e711df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -132,7 +132,7 @@ struct amdgpu_buffer_funcs {
uint64_t dst_offset,
/* number of byte to transfer */
uint32_t byte_count,
- bool tmz);
+ uint32_t copy_flags);
/* maximum bytes in a single operation */
uint32_t fill_max_bytes;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h
index ff4435181055..ec9d12f85f39 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h
@@ -44,6 +44,7 @@ struct amdgpu_smuio_funcs {
u32 (*get_socket_id)(struct amdgpu_device *adev);
enum amdgpu_pkg_type (*get_pkg_type)(struct amdgpu_device *adev);
bool (*is_host_gpu_xgmi_supported)(struct amdgpu_device *adev);
+ u64 (*get_gpu_clock_counter)(struct amdgpu_device *adev);
};
struct amdgpu_smuio {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index fc418e670fda..e785f128411d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -133,7 +133,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
- amdgpu_bo_in_cpu_visible_vram(abo)) {
+ amdgpu_res_cpu_visible(adev, bo->resource)) {
/* Try evicting to the CPU inaccessible part of VRAM
* first, but only set GTT as busy placement, so this
@@ -236,7 +236,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
- dst_addr, num_bytes, false);
+ dst_addr, num_bytes, 0);
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
@@ -296,6 +296,8 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
struct dma_fence *fence = NULL;
int r = 0;
+ uint32_t copy_flags = 0;
+
if (!adev->mman.buffer_funcs_enabled) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
@@ -323,8 +325,11 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
if (r)
goto error;
- r = amdgpu_copy_buffer(ring, from, to, cur_size,
- resv, &next, false, true, tmz);
+ if (tmz)
+ copy_flags |= AMDGPU_COPY_FLAGS_TMZ;
+
+ r = amdgpu_copy_buffer(ring, from, to, cur_size, resv,
+ &next, false, true, copy_flags);
if (r)
goto error;
@@ -378,11 +383,12 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
struct dma_fence *wipe_fence = NULL;
- r = amdgpu_fill_buffer(abo, AMDGPU_POISON, NULL, &wipe_fence,
- false);
+ r = amdgpu_fill_buffer(abo, 0, NULL, &wipe_fence,
+ false);
if (r) {
goto error;
} else if (wipe_fence) {
+ amdgpu_vram_mgr_set_cleared(bo->resource);
dma_fence_put(fence);
fence = wipe_fence;
}
@@ -403,40 +409,55 @@ error:
return r;
}
-/*
- * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
+/**
+ * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
+ * @adev: amdgpu device
+ * @res: the resource to check
*
- * Called by amdgpu_bo_move()
+ * Returns: true if the full resource is CPU visible, false otherwise.
*/
-static bool amdgpu_mem_visible(struct amdgpu_device *adev,
- struct ttm_resource *mem)
+bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
+ struct ttm_resource *res)
{
- u64 mem_size = (u64)mem->size;
struct amdgpu_res_cursor cursor;
- u64 end;
- if (mem->mem_type == TTM_PL_SYSTEM ||
- mem->mem_type == TTM_PL_TT)
+ if (!res)
+ return false;
+
+ if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
+ res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL)
return true;
- if (mem->mem_type != TTM_PL_VRAM)
+
+ if (res->mem_type != TTM_PL_VRAM)
return false;
- amdgpu_res_first(mem, 0, mem_size, &cursor);
- end = cursor.start + cursor.size;
+ amdgpu_res_first(res, 0, res->size, &cursor);
while (cursor.remaining) {
+ if ((cursor.start + cursor.size) > adev->gmc.visible_vram_size)
+ return false;
amdgpu_res_next(&cursor, cursor.size);
+ }
- if (!cursor.remaining)
- break;
+ return true;
+}
- /* ttm_resource_ioremap only supports contiguous memory */
- if (end != cursor.start)
- return false;
+/*
+ * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
+ *
+ * Called by amdgpu_bo_move()
+ */
+static bool amdgpu_res_copyable(struct amdgpu_device *adev,
+ struct ttm_resource *mem)
+{
+ if (!amdgpu_res_cpu_visible(adev, mem))
+ return false;
- end = cursor.start + cursor.size;
- }
+ /* ttm_resource_ioremap only supports contiguous memory */
+ if (mem->mem_type == TTM_PL_VRAM &&
+ !(mem->placement & TTM_PL_FLAG_CONTIGUOUS))
+ return false;
- return end <= adev->gmc.visible_vram_size;
+ return true;
}
/*
@@ -466,14 +487,16 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&
bo->ttm == NULL)) {
+ amdgpu_bo_move_notify(bo, evict, new_mem);
ttm_bo_move_null(bo, new_mem);
- goto out;
+ return 0;
}
if (old_mem->mem_type == TTM_PL_SYSTEM &&
(new_mem->mem_type == TTM_PL_TT ||
new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
+ amdgpu_bo_move_notify(bo, evict, new_mem);
ttm_bo_move_null(bo, new_mem);
- goto out;
+ return 0;
}
if ((old_mem->mem_type == TTM_PL_TT ||
old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
@@ -483,9 +506,10 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
return r;
amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
+ amdgpu_bo_move_notify(bo, evict, new_mem);
ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_mem);
- goto out;
+ return 0;
}
if (old_mem->mem_type == AMDGPU_PL_GDS ||
@@ -497,8 +521,9 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
new_mem->mem_type == AMDGPU_PL_OA ||
new_mem->mem_type == AMDGPU_PL_DOORBELL) {
/* Nothing to save here */
+ amdgpu_bo_move_notify(bo, evict, new_mem);
ttm_bo_move_null(bo, new_mem);
- goto out;
+ return 0;
}
if (bo->type == ttm_bo_type_device &&
@@ -510,27 +535,28 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
}
- if (adev->mman.buffer_funcs_enabled) {
- if (((old_mem->mem_type == TTM_PL_SYSTEM &&
- new_mem->mem_type == TTM_PL_VRAM) ||
- (old_mem->mem_type == TTM_PL_VRAM &&
- new_mem->mem_type == TTM_PL_SYSTEM))) {
- hop->fpfn = 0;
- hop->lpfn = 0;
- hop->mem_type = TTM_PL_TT;
- hop->flags = TTM_PL_FLAG_TEMPORARY;
- return -EMULTIHOP;
- }
+ if (adev->mman.buffer_funcs_enabled &&
+ ((old_mem->mem_type == TTM_PL_SYSTEM &&
+ new_mem->mem_type == TTM_PL_VRAM) ||
+ (old_mem->mem_type == TTM_PL_VRAM &&
+ new_mem->mem_type == TTM_PL_SYSTEM))) {
+ hop->fpfn = 0;
+ hop->lpfn = 0;
+ hop->mem_type = TTM_PL_TT;
+ hop->flags = TTM_PL_FLAG_TEMPORARY;
+ return -EMULTIHOP;
+ }
+ amdgpu_bo_move_notify(bo, evict, new_mem);
+ if (adev->mman.buffer_funcs_enabled)
r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
- } else {
+ else
r = -ENODEV;
- }
if (r) {
/* Check that all memory is CPU accessible */
- if (!amdgpu_mem_visible(adev, old_mem) ||
- !amdgpu_mem_visible(adev, new_mem)) {
+ if (!amdgpu_res_copyable(adev, old_mem) ||
+ !amdgpu_res_copyable(adev, new_mem)) {
pr_err("Move buffer fallback to memcpy unavailable\n");
return r;
}
@@ -540,11 +566,10 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
return r;
}
- trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
-out:
- /* update statistics */
+ /* update statistics after the move */
+ if (evict)
+ atomic64_inc(&adev->num_evictions);
atomic64_add(bo->base.size, &adev->num_bytes_moved);
- amdgpu_bo_move_notify(bo, evict);
return 0;
}
@@ -557,7 +582,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- size_t bus_size = (size_t)mem->size;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
@@ -568,9 +592,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
break;
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
- /* check if it's visible */
- if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
- return -EINVAL;
if (adev->mman.aper_base_kaddr &&
mem->placement & TTM_PL_FLAG_CONTIGUOUS)
@@ -1477,7 +1498,7 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
swap(src_addr, dst_addr);
amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr,
- PAGE_SIZE, false);
+ PAGE_SIZE, 0);
amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
@@ -1548,7 +1569,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
static void
amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
{
- amdgpu_bo_move_notify(bo, false);
+ amdgpu_bo_move_notify(bo, false, NULL);
}
static struct ttm_device_funcs amdgpu_bo_driver = {
@@ -2128,7 +2149,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence, bool direct_submit,
- bool vm_needs_flush, bool tmz)
+ bool vm_needs_flush, uint32_t copy_flags)
{
struct amdgpu_device *adev = ring->adev;
unsigned int num_loops, num_dw;
@@ -2154,8 +2175,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
- dst_offset, cur_size_in_bytes, tmz);
-
+ dst_offset, cur_size_in_bytes, copy_flags);
src_offset += cur_size_in_bytes;
dst_offset += cur_size_in_bytes;
byte_count -= cur_size_in_bytes;
@@ -2215,6 +2235,71 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
return 0;
}
+/**
+ * amdgpu_ttm_clear_buffer - clear memory buffers
+ * @bo: amdgpu buffer object
+ * @resv: reservation object
+ * @fence: dma_fence associated with the operation
+ *
+ * Clear the memory buffer resource.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
+ struct dma_resv *resv,
+ struct dma_fence **fence)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+ struct amdgpu_res_cursor cursor;
+ u64 addr;
+ int r;
+
+ if (!adev->mman.buffer_funcs_enabled)
+ return -EINVAL;
+
+ if (!fence)
+ return -EINVAL;
+
+ *fence = dma_fence_get_stub();
+
+ amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
+
+ mutex_lock(&adev->mman.gtt_window_lock);
+ while (cursor.remaining) {
+ struct dma_fence *next = NULL;
+ u64 size;
+
+ if (amdgpu_res_cleared(&cursor)) {
+ amdgpu_res_next(&cursor, cursor.size);
+ continue;
+ }
+
+ /* Never clear more than 256MiB at once to avoid timeouts */
+ size = min(cursor.size, 256ULL << 20);
+
+ r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &cursor,
+ 1, ring, false, &size, &addr);
+ if (r)
+ goto err;
+
+ r = amdgpu_ttm_fill_mem(ring, 0, addr, size, resv,
+ &next, true, true);
+ if (r)
+ goto err;
+
+ dma_fence_put(*fence);
+ *fence = next;
+
+ amdgpu_res_next(&cursor, size);
+ }
+err:
+ mutex_unlock(&adev->mman.gtt_window_lock);
+
+ return r;
+}
+
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct dma_resv *resv,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 65ec82141a8e..b6f53129dea3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -38,8 +38,6 @@
#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
-#define AMDGPU_POISON 0xd0bed0be
-
extern const struct attribute_group amdgpu_vram_mgr_attr_group;
extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
@@ -111,6 +109,8 @@ struct amdgpu_copy_mem {
unsigned long offset;
};
+#define AMDGPU_COPY_FLAGS_TMZ (1 << 0)
+
int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size);
void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev);
int amdgpu_preempt_mgr_init(struct amdgpu_device *adev);
@@ -139,6 +139,9 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
uint64_t start);
+bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
+ struct ttm_resource *res);
+
int amdgpu_ttm_init(struct amdgpu_device *adev);
void amdgpu_ttm_fini(struct amdgpu_device *adev);
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
@@ -148,13 +151,16 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence, bool direct_submit,
- bool vm_needs_flush, bool tmz);
+ bool vm_needs_flush, uint32_t copy_flags);
int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
const struct amdgpu_copy_mem *src,
const struct amdgpu_copy_mem *dst,
uint64_t size, bool tmz,
struct dma_resv *resv,
struct dma_fence **f);
+int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
+ struct dma_resv *resv,
+ struct dma_fence **fence);
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct dma_resv *resv,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 619445760037..105d4de0613a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -125,6 +125,7 @@ enum psp_fw_type {
PSP_FW_TYPE_PSP_INTF_DRV,
PSP_FW_TYPE_PSP_DBG_DRV,
PSP_FW_TYPE_PSP_RAS_DRV,
+ PSP_FW_TYPE_PSP_IPKEYMGR_DRV,
PSP_FW_TYPE_MAX_INDEX,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index 20436f81856a..540e0f066b26 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -21,10 +21,13 @@
*
*/
+#include <linux/sort.h>
#include "amdgpu.h"
#include "umc_v6_7.h"
#define MAX_UMC_POISON_POLLING_TIME_SYNC 20 //ms
+#define MAX_UMC_HASH_STRING_SIZE 256
+
static int amdgpu_umc_convert_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data, uint64_t err_addr,
uint32_t ch_inst, uint32_t umc_inst)
@@ -63,6 +66,8 @@ int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
goto out_fini_err_data;
}
+ err_data.err_addr_len = adev->umc.max_ras_err_cnt_per_query;
+
/*
* Translate UMC channel address to Physical address
*/
@@ -86,7 +91,7 @@ out_fini_err_data:
return ret;
}
-static void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
+void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
@@ -118,6 +123,8 @@ static void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
if(!err_data->err_addr)
dev_warn(adev->dev, "Failed to alloc memory for "
"umc error address record!\n");
+ else
+ err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query;
/* umc query_ras_error_address is also responsible for clearing
* error status
@@ -143,6 +150,8 @@ static void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
if(!err_data->err_addr)
dev_warn(adev->dev, "Failed to alloc memory for "
"umc error address record!\n");
+ else
+ err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query;
/* umc query_ras_error_address is also responsible for clearing
* error status
@@ -170,6 +179,7 @@ static void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
}
kfree(err_data->err_addr);
+ err_data->err_addr = NULL;
mutex_unlock(&con->page_retirement_lock);
}
@@ -177,7 +187,7 @@ static void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
void *ras_error_status,
struct amdgpu_iv_entry *entry,
- bool reset)
+ uint32_t reset)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
@@ -186,9 +196,7 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
amdgpu_umc_handle_bad_pages(adev, ras_error_status);
if (err_data->ue_count && reset) {
- /* use mode-2 reset for poison consumption */
- if (!entry)
- con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ con->gpu_reset_flags |= reset;
amdgpu_ras_reset_gpu(adev);
}
@@ -196,7 +204,7 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
}
int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
- bool reset, uint32_t timeout_ms)
+ uint32_t reset, uint32_t timeout_ms)
{
struct ras_err_data err_data;
struct ras_common_if head = {
@@ -238,16 +246,16 @@ int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
if (reset) {
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- /* use mode-2 reset for poison consumption */
- con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ con->gpu_reset_flags |= reset;
amdgpu_ras_reset_gpu(adev);
}
return 0;
}
-int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
- enum amdgpu_ras_block block, bool reset)
+int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint16_t pasid,
+ pasid_notify pasid_fn, void *data, uint32_t reset)
{
int ret = AMDGPU_RAS_SUCCESS;
@@ -285,16 +293,14 @@ int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
amdgpu_ras_error_data_fini(&err_data);
} else {
- if (reset) {
- amdgpu_umc_bad_page_polling_timeout(adev,
- reset, MAX_UMC_POISON_POLLING_TIME_SYNC);
- } else {
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ amdgpu_ras_put_poison_req(adev,
+ block, pasid, pasid_fn, data, reset);
+
atomic_inc(&con->page_retirement_req_cnt);
wake_up(&con->page_retirement_wq);
- }
}
} else {
if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
@@ -307,11 +313,19 @@ int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
return ret;
}
+int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint32_t reset)
+{
+ return amdgpu_umc_pasid_poison_handler(adev,
+ block, 0, NULL, NULL, reset);
+}
+
int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
void *ras_error_status,
struct amdgpu_iv_entry *entry)
{
- return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, true);
+ return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry,
+ AMDGPU_RAS_GPU_RESET_MODE1_RESET);
}
int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev)
@@ -388,14 +402,20 @@ int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
return 0;
}
-void amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
+int amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
uint64_t err_addr,
uint64_t retired_page,
uint32_t channel_index,
uint32_t umc_inst)
{
- struct eeprom_table_record *err_rec =
- &err_data->err_addr[err_data->err_addr_cnt];
+ struct eeprom_table_record *err_rec;
+
+ if (!err_data ||
+ !err_data->err_addr ||
+ (err_data->err_addr_cnt >= err_data->err_addr_len))
+ return -EINVAL;
+
+ err_rec = &err_data->err_addr[err_data->err_addr_cnt];
err_rec->address = err_addr;
/* page frame address is saved */
@@ -407,6 +427,8 @@ void amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
err_rec->mcumc_id = umc_inst;
err_data->err_addr_cnt++;
+
+ return 0;
}
int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
@@ -439,3 +461,76 @@ int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
return 0;
}
+
+int amdgpu_umc_update_ecc_status(struct amdgpu_device *adev,
+ uint64_t status, uint64_t ipid, uint64_t addr)
+{
+ if (adev->umc.ras->update_ecc_status)
+ return adev->umc.ras->update_ecc_status(adev,
+ status, ipid, addr);
+ return 0;
+}
+
+static int amdgpu_umc_uint64_cmp(const void *a, const void *b)
+{
+ uint64_t *addr_a = (uint64_t *)a;
+ uint64_t *addr_b = (uint64_t *)b;
+
+ if (*addr_a > *addr_b)
+ return 1;
+ else if (*addr_a < *addr_b)
+ return -1;
+ else
+ return 0;
+}
+
+/* Use string hash to avoid logging the same bad pages repeatedly */
+int amdgpu_umc_build_pages_hash(struct amdgpu_device *adev,
+ uint64_t *pfns, int len, uint64_t *val)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ char buf[MAX_UMC_HASH_STRING_SIZE] = {0};
+ int offset = 0, i = 0;
+ uint64_t hash_val;
+
+ if (!pfns || !len)
+ return -EINVAL;
+
+ sort(pfns, len, sizeof(uint64_t), amdgpu_umc_uint64_cmp, NULL);
+
+ for (i = 0; i < len; i++)
+ offset += snprintf(&buf[offset], sizeof(buf) - offset, "%llx", pfns[i]);
+
+ hash_val = siphash(buf, offset, &con->umc_ecc_log.ecc_key);
+
+ *val = hash_val;
+
+ return 0;
+}
+
+int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
+ struct radix_tree_root *ecc_tree, struct ras_ecc_err *ecc_err)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_ecc_log_info *ecc_log;
+ int ret;
+
+ ecc_log = &con->umc_ecc_log;
+
+ mutex_lock(&ecc_log->lock);
+ ret = radix_tree_insert(ecc_tree, ecc_err->hash_index, ecc_err);
+ if (!ret) {
+ struct ras_err_pages *err_pages = &ecc_err->err_pages;
+ int i;
+
+ /* Reserve memory */
+ for (i = 0; i < err_pages->count; i++)
+ amdgpu_ras_reserve_page(adev, err_pages->pfn[i]);
+
+ radix_tree_tag_set(ecc_tree,
+ ecc_err->hash_index, UMC_ECC_NEW_DETECTED_TAG);
+ }
+ mutex_unlock(&ecc_log->lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 26d2ae498daf..5f50c69c3cec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -52,6 +52,8 @@
#define LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) \
LOOP_UMC_NODE_INST((node_inst)) LOOP_UMC_INST_AND_CH((umc_inst), (ch_inst))
+/* Page retirement tag */
+#define UMC_ECC_NEW_DETECTED_TAG 0x1
typedef int (*umc_func)(struct amdgpu_device *adev, uint32_t node_inst,
uint32_t umc_inst, uint32_t ch_inst, void *data);
@@ -66,8 +68,8 @@ struct amdgpu_umc_ras {
void *ras_error_status);
bool (*check_ecc_err_status)(struct amdgpu_device *adev,
enum amdgpu_mca_error_type type, void *ras_error_status);
- /* support different eeprom table version for different asic */
- void (*set_eeprom_table_version)(struct amdgpu_ras_eeprom_table_header *hdr);
+ int (*update_ecc_status)(struct amdgpu_device *adev,
+ uint64_t status, uint64_t ipid, uint64_t addr);
};
struct amdgpu_umc_funcs {
@@ -103,11 +105,14 @@ struct amdgpu_umc {
int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
- enum amdgpu_ras_block block, bool reset);
+ enum amdgpu_ras_block block, uint32_t reset);
+int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint16_t pasid,
+ pasid_notify pasid_fn, void *data, uint32_t reset);
int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
-void amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
+int amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
uint64_t err_addr,
uint64_t retired_page,
uint32_t channel_index,
@@ -123,5 +128,15 @@ int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
umc_func func, void *data);
int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
- bool reset, uint32_t timeout_ms);
+ uint32_t reset, uint32_t timeout_ms);
+
+int amdgpu_umc_update_ecc_status(struct amdgpu_device *adev,
+ uint64_t status, uint64_t ipid, uint64_t addr);
+int amdgpu_umc_build_pages_hash(struct amdgpu_device *adev,
+ uint64_t *pfns, int len, uint64_t *val);
+int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
+ struct radix_tree_root *ecc_tree, struct ras_ecc_err *ecc_err);
+
+void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
+ void *ras_error_status);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
index ab820cf52668..e01c1c8e64c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
@@ -189,10 +189,13 @@ static void setup_vpe_queue(struct amdgpu_device *adev,
mqd->rptr_val = 0;
mqd->unmapped = 1;
+ if (adev->vpe.collaborate_mode)
+ memcpy(++mqd, test->mqd_data_cpu_addr, sizeof(struct MQD_INFO));
+
qinfo->mqd_addr = test->mqd_data_gpu_addr;
qinfo->csa_addr = test->ctx_data_gpu_addr +
offsetof(struct umsch_mm_test_ctx_data, vpe_ctx_csa);
- qinfo->doorbell_offset_0 = (adev->doorbell_index.vpe_ring + 1) << 1;
+ qinfo->doorbell_offset_0 = 0;
qinfo->doorbell_offset_1 = 0;
}
@@ -287,7 +290,10 @@ static int submit_vpe_queue(struct amdgpu_device *adev, struct umsch_mm_test *te
ring[5] = 0;
mqd->wptr_val = (6 << 2);
- // WDOORBELL32(adev->umsch_mm.agdb_index[CONTEXT_PRIORITY_LEVEL_NORMAL], mqd->wptr_val);
+ if (adev->vpe.collaborate_mode)
+ (++mqd)->wptr_val = (6 << 2);
+
+ WDOORBELL32(adev->umsch_mm.agdb_index[CONTEXT_PRIORITY_LEVEL_NORMAL], mqd->wptr_val);
for (i = 0; i < adev->usec_timeout; i++) {
if (*fence == test_pattern)
@@ -571,6 +577,7 @@ int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch)
switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
case IP_VERSION(4, 0, 5):
+ case IP_VERSION(4, 0, 6):
fw_name = "amdgpu/umsch_mm_4_0_0.bin";
break;
default:
@@ -750,6 +757,7 @@ static int umsch_mm_early_init(void *handle)
switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
case IP_VERSION(4, 0, 5):
+ case IP_VERSION(4, 0, 6):
umsch_mm_v4_0_set_funcs(&adev->umsch_mm);
break;
default:
@@ -766,6 +774,9 @@ static int umsch_mm_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_in_reset(adev) || adev->in_s0ix || adev->in_suspend)
+ return 0;
+
return umsch_mm_test(adev);
}
@@ -867,6 +878,8 @@ static const struct amd_ip_funcs umsch_mm_v4_0_ip_funcs = {
.hw_fini = umsch_mm_hw_fini,
.suspend = umsch_mm_suspend,
.resume = umsch_mm_resume,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version umsch_mm_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h
index 8258a43a6236..5014b5af95fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h
@@ -33,13 +33,6 @@ enum UMSCH_SWIP_ENGINE_TYPE {
UMSCH_SWIP_ENGINE_TYPE_MAX
};
-enum UMSCH_SWIP_AFFINITY_TYPE {
- UMSCH_SWIP_AFFINITY_TYPE_ANY = 0,
- UMSCH_SWIP_AFFINITY_TYPE_VCN0 = 1,
- UMSCH_SWIP_AFFINITY_TYPE_VCN1 = 2,
- UMSCH_SWIP_AFFINITY_TYPE_MAX
-};
-
enum UMSCH_CONTEXT_PRIORITY_LEVEL {
CONTEXT_PRIORITY_LEVEL_IDLE = 0,
CONTEXT_PRIORITY_LEVEL_NORMAL = 1,
@@ -51,13 +44,15 @@ enum UMSCH_CONTEXT_PRIORITY_LEVEL {
struct umsch_mm_set_resource_input {
uint32_t vmid_mask_mm_vcn;
uint32_t vmid_mask_mm_vpe;
+ uint32_t collaboration_mask_vpe;
uint32_t logging_vmid;
uint32_t engine_mask;
union {
struct {
uint32_t disable_reset : 1;
uint32_t disable_umsch_mm_log : 1;
- uint32_t reserved : 30;
+ uint32_t use_rs64mem_for_proc_ctx_csa : 1;
+ uint32_t reserved : 29;
};
uint32_t uint32_all;
};
@@ -78,15 +73,18 @@ struct umsch_mm_add_queue_input {
uint32_t doorbell_offset_1;
enum UMSCH_SWIP_ENGINE_TYPE engine_type;
uint32_t affinity;
- enum UMSCH_SWIP_AFFINITY_TYPE affinity_type;
uint64_t mqd_addr;
uint64_t h_context;
uint64_t h_queue;
uint32_t vm_context_cntl;
+ uint32_t process_csa_array_index;
+ uint32_t context_csa_array_index;
+
struct {
uint32_t is_context_suspended : 1;
- uint32_t reserved : 31;
+ uint32_t collaboration_mode : 1;
+ uint32_t reserved : 30;
};
};
@@ -94,6 +92,7 @@ struct umsch_mm_remove_queue_input {
uint32_t doorbell_offset_0;
uint32_t doorbell_offset_1;
uint64_t context_csa_addr;
+ uint32_t context_csa_array_index;
};
struct MQD_INFO {
@@ -103,6 +102,7 @@ struct MQD_INFO {
uint32_t wptr_val;
uint32_t rptr_val;
uint32_t unmapped;
+ uint32_t vmid;
};
struct amdgpu_umsch_mm;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 59acf424a078..968ca2c84ef7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -743,7 +743,8 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p,
uint32_t created = 0;
uint32_t allocated = 0;
uint32_t tmp, handle = 0;
- uint32_t *size = &tmp;
+ uint32_t dummy = 0xffffffff;
+ uint32_t *size = &dummy;
unsigned int idx;
int i, r = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 9c514a606a2f..677eb141554e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -93,7 +93,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
int amdgpu_vcn_early_init(struct amdgpu_device *adev)
{
- char ucode_prefix[30];
+ char ucode_prefix[25];
char fw_name[40];
int r, i;
@@ -185,7 +185,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
- if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0)) {
+ if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(5, 0, 0)) {
+ fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared));
+ log_offset = offsetof(struct amdgpu_vcn5_fw_shared, fw_log);
+ } else if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0)) {
fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared));
log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index a418393d89ec..9f06def236fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -454,6 +454,16 @@ struct amdgpu_vcn_rb_metadata {
uint8_t pad[26];
};
+struct amdgpu_vcn5_fw_shared {
+ uint32_t present_flag_0;
+ uint8_t pad[12];
+ struct amdgpu_fw_shared_unified_queue_struct sq;
+ uint8_t pad1[8];
+ struct amdgpu_fw_shared_fw_logging fw_log;
+ struct amdgpu_fw_shared_rb_setup rb_setup;
+ uint8_t pad2[4];
+};
+
#define VCN_BLOCK_ENCODE_DISABLE_MASK 0x80
#define VCN_BLOCK_DECODE_DISABLE_MASK 0x40
#define VCN_BLOCK_QUEUE_DISABLE_MASK 0xC0
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 7a4eae36778a..54ab51a4ada7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -32,6 +32,7 @@
#include "amdgpu.h"
#include "amdgpu_ras.h"
+#include "amdgpu_reset.h"
#include "vi.h"
#include "soc15.h"
#include "nv.h"
@@ -424,7 +425,7 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
return -EINVAL;
if (pf2vf_info->size > 1024) {
- DRM_ERROR("invalid pf2vf message size\n");
+ dev_err(adev->dev, "invalid pf2vf message size: 0x%x\n", pf2vf_info->size);
return -EINVAL;
}
@@ -435,7 +436,9 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
adev->virt.fw_reserve.checksum_key, checksum);
if (checksum != checkval) {
- DRM_ERROR("invalid pf2vf message\n");
+ dev_err(adev->dev,
+ "invalid pf2vf message: header checksum=0x%x calculated checksum=0x%x\n",
+ checksum, checkval);
return -EINVAL;
}
@@ -449,7 +452,9 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
0, checksum);
if (checksum != checkval) {
- DRM_ERROR("invalid pf2vf message\n");
+ dev_err(adev->dev,
+ "invalid pf2vf message: header checksum=0x%x calculated checksum=0x%x\n",
+ checksum, checkval);
return -EINVAL;
}
@@ -485,7 +490,7 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid;
break;
default:
- DRM_ERROR("invalid pf2vf version\n");
+ dev_err(adev->dev, "invalid pf2vf version: 0x%x\n", pf2vf_info->version);
return -EINVAL;
}
@@ -571,6 +576,11 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
vf2pf_info->decode_usage = 0;
vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
+ vf2pf_info->mes_info_addr = (uint64_t)adev->mes.resource_1_gpu_addr;
+
+ if (adev->mes.resource_1) {
+ vf2pf_info->mes_info_size = adev->mes.resource_1->tbo.base.size;
+ }
vf2pf_info->checksum =
amd_sriov_msg_checksum(
vf2pf_info, vf2pf_info->header.size, 0, 0);
@@ -584,8 +594,22 @@ static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
int ret;
ret = amdgpu_virt_read_pf2vf_data(adev);
- if (ret)
+ if (ret) {
+ adev->virt.vf2pf_update_retry_cnt++;
+ if ((adev->virt.vf2pf_update_retry_cnt >= AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT) &&
+ amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev)) {
+ amdgpu_ras_set_fed(adev, true);
+ if (amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work))
+ return;
+ else
+ dev_err(adev->dev, "Failed to queue work! at %s", __func__);
+ }
+
goto out;
+ }
+
+ adev->virt.vf2pf_update_retry_cnt = 0;
amdgpu_virt_write_vf2pf_data(adev);
out:
@@ -606,6 +630,7 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
adev->virt.fw_reserve.p_pf2vf = NULL;
adev->virt.fw_reserve.p_vf2pf = NULL;
adev->virt.vf2pf_update_interval_ms = 0;
+ adev->virt.vf2pf_update_retry_cnt = 0;
if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) {
DRM_WARN("Currently fw_vram and drv_vram should not have values at the same time!");
@@ -705,12 +730,6 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
- if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
- /* VF MMIO access (except mailbox range) from CPU
- * will be blocked during sriov runtime
- */
- adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
-
/* we have the ability to check now */
if (amdgpu_sriov_vf(adev)) {
switch (adev->asic_type) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 3f59b7b5523f..642f1fd287d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -52,6 +52,8 @@
/* tonga/fiji use this offset */
#define mmBIF_IOV_FUNC_IDENTIFIER 0x1503
+#define AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT 5
+
enum amdgpu_sriov_vf_mode {
SRIOV_VF_MODE_BARE_METAL = 0,
SRIOV_VF_MODE_ONE_VF,
@@ -130,6 +132,8 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_AV1_SUPPORT = (1 << 6),
/* VCN RB decouple */
AMDGIM_FEATURE_VCN_RB_DECOUPLE = (1 << 7),
+ /* MES info */
+ AMDGIM_FEATURE_MES_INFO_ENABLE = (1 << 8),
};
enum AMDGIM_REG_ACCESS_FLAG {
@@ -257,6 +261,7 @@ struct amdgpu_virt {
/* vf2pf message */
struct delayed_work vf2pf_work;
uint32_t vf2pf_update_interval_ms;
+ int vf2pf_update_retry_cnt;
/* multimedia bandwidth config */
bool is_mm_bw_enabled;
@@ -332,6 +337,8 @@ static inline bool is_virtual_machine(void)
((adev)->virt.gim_feature & AMDGIM_FEATURE_AV1_SUPPORT)
#define amdgpu_sriov_is_vcn_rb_decouple(adev) \
((adev)->virt.gim_feature & AMDGIM_FEATURE_VCN_RB_DECOUPLE)
+#define amdgpu_sriov_is_mes_info_enable(adev) \
+ ((adev)->virt.gim_feature & AMDGIM_FEATURE_MES_INFO_ENABLE)
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 8baa2e0935cc..e30eecd02ae1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -658,6 +658,8 @@ static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = {
.soft_reset = amdgpu_vkms_soft_reset,
.set_clockgating_state = amdgpu_vkms_set_clockgating_state,
.set_powergating_state = amdgpu_vkms_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version amdgpu_vkms_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 4299ce386322..4e2391c83d7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -886,6 +886,44 @@ static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
}
/**
+ * amdgpu_vm_tlb_flush - prepare TLB flush
+ *
+ * @params: parameters for update
+ * @fence: input fence to sync TLB flush with
+ * @tlb_cb: the callback structure
+ *
+ * Increments the tlb sequence to make sure that future CS execute a VM flush.
+ */
+static void
+amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
+ struct dma_fence **fence,
+ struct amdgpu_vm_tlb_seq_struct *tlb_cb)
+{
+ struct amdgpu_vm *vm = params->vm;
+
+ if (!fence || !*fence)
+ return;
+
+ tlb_cb->vm = vm;
+ if (!dma_fence_add_callback(*fence, &tlb_cb->cb,
+ amdgpu_vm_tlb_seq_cb)) {
+ dma_fence_put(vm->last_tlb_flush);
+ vm->last_tlb_flush = dma_fence_get(*fence);
+ } else {
+ amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
+ }
+
+ /* Prepare a TLB flush fence to be attached to PTs */
+ if (!params->unlocked && vm->is_compute_context) {
+ amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
+
+ /* Makes sure no PD/PT is freed before the flush */
+ dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence,
+ DMA_RESV_USAGE_BOOKKEEP);
+ }
+}
+
+/**
* amdgpu_vm_update_range - update a range in the vm page table
*
* @adev: amdgpu_device pointer to use for commands
@@ -916,8 +954,8 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct ttm_resource *res, dma_addr_t *pages_addr,
struct dma_fence **fence)
{
- struct amdgpu_vm_update_params params;
struct amdgpu_vm_tlb_seq_struct *tlb_cb;
+ struct amdgpu_vm_update_params params;
struct amdgpu_res_cursor cursor;
enum amdgpu_sync_mode sync_mode;
int r, idx;
@@ -927,8 +965,8 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL);
if (!tlb_cb) {
- r = -ENOMEM;
- goto error_unlock;
+ drm_dev_exit(idx);
+ return -ENOMEM;
}
/* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
@@ -948,7 +986,9 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
params.immediate = immediate;
params.pages_addr = pages_addr;
params.unlocked = unlocked;
+ params.needs_flush = flush_tlb;
params.allow_override = allow_override;
+ INIT_LIST_HEAD(&params.tlb_flush_waitlist);
/* Implicitly sync to command submissions in the same VM before
* unmapping. Sync to moving fences before mapping.
@@ -1031,24 +1071,18 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
r = vm->update_funcs->commit(&params, fence);
+ if (r)
+ goto error_free;
- if (flush_tlb || params.table_freed) {
- tlb_cb->vm = vm;
- if (fence && *fence &&
- !dma_fence_add_callback(*fence, &tlb_cb->cb,
- amdgpu_vm_tlb_seq_cb)) {
- dma_fence_put(vm->last_tlb_flush);
- vm->last_tlb_flush = dma_fence_get(*fence);
- } else {
- amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
- }
+ if (params.needs_flush) {
+ amdgpu_vm_tlb_flush(&params, fence, tlb_cb);
tlb_cb = NULL;
}
+ amdgpu_vm_pt_free_list(adev, &params);
+
error_free:
kfree(tlb_cb);
-
-error_unlock:
amdgpu_vm_eviction_unlock(vm);
drm_dev_exit(idx);
return r;
@@ -1613,6 +1647,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_map(bo_va, mapping);
}
+/* Validate operation parameters to prevent potential abuse */
+static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo,
+ uint64_t saddr,
+ uint64_t offset,
+ uint64_t size)
+{
+ uint64_t tmp, lpfn;
+
+ if (saddr & AMDGPU_GPU_PAGE_MASK
+ || offset & AMDGPU_GPU_PAGE_MASK
+ || size & AMDGPU_GPU_PAGE_MASK)
+ return -EINVAL;
+
+ if (check_add_overflow(saddr, size, &tmp)
+ || check_add_overflow(offset, size, &tmp)
+ || size == 0 /* which also leads to end < begin */)
+ return -EINVAL;
+
+ /* make sure object fit at this offset */
+ if (bo && offset + size > amdgpu_bo_size(bo))
+ return -EINVAL;
+
+ /* Ensure last pfn not exceed max_pfn */
+ lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
+ if (lpfn >= adev->vm_manager.max_pfn)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
* amdgpu_vm_bo_map - map bo inside a vm
*
@@ -1639,21 +1704,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr;
+ int r;
- /* validate the parameters */
- if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
- return -EINVAL;
- if (saddr + size <= saddr || offset + size <= offset)
- return -EINVAL;
-
- /* make sure object fit at this offset */
- eaddr = saddr + size - 1;
- if ((bo && offset + size > amdgpu_bo_size(bo)) ||
- (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
- return -EINVAL;
+ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
+ if (r)
+ return r;
saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
if (tmp) {
@@ -1706,17 +1764,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
uint64_t eaddr;
int r;
- /* validate the parameters */
- if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
- return -EINVAL;
- if (saddr + size <= saddr || offset + size <= offset)
- return -EINVAL;
-
- /* make sure object fit at this offset */
- eaddr = saddr + size - 1;
- if ((bo && offset + size > amdgpu_bo_size(bo)) ||
- (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
- return -EINVAL;
+ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
+ if (r)
+ return r;
/* Allocate all the needed memory */
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
@@ -1730,7 +1780,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
}
saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
mapping->start = saddr;
mapping->last = eaddr;
@@ -1817,10 +1867,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
LIST_HEAD(removed);
uint64_t eaddr;
+ int r;
+
+ r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
+ if (r)
+ return r;
- eaddr = saddr + size - 1;
saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
/* Allocate all the needed memory */
before = kzalloc(sizeof(*before), GFP_KERNEL);
@@ -2391,6 +2445,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
mutex_init(&vm->eviction_lock);
vm->evicting = false;
+ vm->tlb_fence_context = dma_fence_context_alloc(1);
r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
false, &root, xcp_id);
@@ -2924,6 +2979,14 @@ void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
if (vm && status) {
vm->fault_info.addr = addr;
vm->fault_info.status = status;
+ /*
+ * Update the fault information globally for later usage
+ * when vm could be stale or freed.
+ */
+ adev->vm_manager.fault_info.addr = addr;
+ adev->vm_manager.fault_info.vmhub = vmhub;
+ adev->vm_manager.fault_info.status = status;
+
if (AMDGPU_IS_GFXHUB(vmhub)) {
vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX;
vm->fault_info.vmhub |=
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 047ec1930d12..54d7da396de0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -257,15 +257,20 @@ struct amdgpu_vm_update_params {
unsigned int num_dw_left;
/**
- * @table_freed: return true if page table is freed when updating
+ * @needs_flush: true whenever we need to invalidate the TLB
*/
- bool table_freed;
+ bool needs_flush;
/**
* @allow_override: true for memory that is not uncached: allows MTYPE
* to be overridden for NUMA local memory.
*/
bool allow_override;
+
+ /**
+ * @tlb_flush_waitlist: temporary storage for BOs until tlb_flush
+ */
+ struct list_head tlb_flush_waitlist;
};
struct amdgpu_vm_update_funcs {
@@ -342,6 +347,7 @@ struct amdgpu_vm {
atomic64_t tlb_seq;
struct dma_fence *last_tlb_flush;
atomic64_t kfd_last_flushed_seq;
+ uint64_t tlb_fence_context;
/* How many times we had to re-generate the page tables */
uint64_t generation;
@@ -422,6 +428,8 @@ struct amdgpu_vm_manager {
* look up VM of a page fault
*/
struct xarray pasids;
+ /* Global registration of recent page fault information */
+ struct amdgpu_vm_fault_info fault_info;
};
struct amdgpu_bo_va_mapping;
@@ -544,6 +552,8 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
uint64_t start, uint64_t end,
uint64_t dst, uint64_t flags);
void amdgpu_vm_pt_free_work(struct work_struct *work);
+void amdgpu_vm_pt_free_list(struct amdgpu_device *adev,
+ struct amdgpu_vm_update_params *params);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
@@ -609,5 +619,8 @@ void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
uint64_t addr,
uint32_t status,
unsigned int vmhub);
+void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct dma_fence **fence);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 6e31621452de..3895bd7d176a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -108,7 +108,9 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
static int amdgpu_vm_cpu_commit(struct amdgpu_vm_update_params *p,
struct dma_fence **fence)
{
- /* Flush HDP */
+ if (p->needs_flush)
+ atomic64_inc(&p->vm->tlb_seq);
+
mb();
amdgpu_device_flush_hdp(p->adev, NULL);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index 124389a6bf48..7fdd306a48a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -622,40 +622,58 @@ void amdgpu_vm_pt_free_work(struct work_struct *work)
}
/**
- * amdgpu_vm_pt_free_dfs - free PD/PT levels
+ * amdgpu_vm_pt_free_list - free PD/PT levels
*
* @adev: amdgpu device structure
- * @vm: amdgpu vm structure
- * @start: optional cursor where to start freeing PDs/PTs
- * @unlocked: vm resv unlock status
+ * @params: see amdgpu_vm_update_params definition
*
- * Free the page directory or page table level and all sub levels.
+ * Free the page directory objects saved in the flush list
*/
-static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_vm_pt_cursor *start,
- bool unlocked)
+void amdgpu_vm_pt_free_list(struct amdgpu_device *adev,
+ struct amdgpu_vm_update_params *params)
{
- struct amdgpu_vm_pt_cursor cursor;
- struct amdgpu_vm_bo_base *entry;
+ struct amdgpu_vm_bo_base *entry, *next;
+ struct amdgpu_vm *vm = params->vm;
+ bool unlocked = params->unlocked;
+
+ if (list_empty(&params->tlb_flush_waitlist))
+ return;
if (unlocked) {
spin_lock(&vm->status_lock);
- for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
- list_move(&entry->vm_status, &vm->pt_freed);
-
- if (start)
- list_move(&start->entry->vm_status, &vm->pt_freed);
+ list_splice_init(&params->tlb_flush_waitlist, &vm->pt_freed);
spin_unlock(&vm->status_lock);
schedule_work(&vm->pt_free_work);
return;
}
- for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
+ list_for_each_entry_safe(entry, next, &params->tlb_flush_waitlist, vm_status)
amdgpu_vm_pt_free(entry);
+}
- if (start)
- amdgpu_vm_pt_free(start->entry);
+/**
+ * amdgpu_vm_pt_add_list - add PD/PT level to the flush list
+ *
+ * @params: parameters for the update
+ * @cursor: first PT entry to start DF search from, non NULL
+ *
+ * This list will be freed after TLB flush.
+ */
+static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params,
+ struct amdgpu_vm_pt_cursor *cursor)
+{
+ struct amdgpu_vm_pt_cursor seek;
+ struct amdgpu_vm_bo_base *entry;
+
+ spin_lock(&params->vm->status_lock);
+ for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) {
+ if (entry && entry->bo)
+ list_move(&entry->vm_status, &params->tlb_flush_waitlist);
+ }
+
+ /* enter start node now */
+ list_move(&cursor->entry->vm_status, &params->tlb_flush_waitlist);
+ spin_unlock(&params->vm->status_lock);
}
/**
@@ -667,7 +685,13 @@ static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev,
*/
void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
- amdgpu_vm_pt_free_dfs(adev, vm, NULL, false);
+ struct amdgpu_vm_pt_cursor cursor;
+ struct amdgpu_vm_bo_base *entry;
+
+ for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) {
+ if (entry)
+ amdgpu_vm_pt_free(entry);
+ }
}
/**
@@ -972,10 +996,8 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
while (cursor.pfn < frag_start) {
/* Make sure previous mapping is freed */
if (cursor.entry->bo) {
- params->table_freed = true;
- amdgpu_vm_pt_free_dfs(adev, params->vm,
- &cursor,
- params->unlocked);
+ params->needs_flush = true;
+ amdgpu_vm_pt_add_list(params, &cursor);
}
amdgpu_vm_pt_next(adev, &cursor);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 349416e176a1..66e8a016126b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -126,6 +126,10 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
WARN_ON(ib->length_dw == 0);
amdgpu_ring_pad_ib(ring, ib);
+
+ if (p->needs_flush)
+ atomic64_inc(&p->vm->tlb_seq);
+
WARN_ON(ib->length_dw > p->num_dw_left);
f = amdgpu_job_submit(p->job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
new file mode 100644
index 000000000000..51cddfa3f1e8
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/dma-fence.h>
+#include <linux/workqueue.h>
+
+#include "amdgpu.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_gmc.h"
+
+struct amdgpu_tlb_fence {
+ struct dma_fence base;
+ struct amdgpu_device *adev;
+ struct dma_fence *dependency;
+ struct work_struct work;
+ spinlock_t lock;
+ uint16_t pasid;
+
+};
+
+static const char *amdgpu_tlb_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "amdgpu tlb fence";
+}
+
+static const char *amdgpu_tlb_fence_get_timeline_name(struct dma_fence *f)
+{
+ return "amdgpu tlb timeline";
+}
+
+static void amdgpu_tlb_fence_work(struct work_struct *work)
+{
+ struct amdgpu_tlb_fence *f = container_of(work, typeof(*f), work);
+ int r;
+
+ if (f->dependency) {
+ dma_fence_wait(f->dependency, false);
+ dma_fence_put(f->dependency);
+ f->dependency = NULL;
+ }
+
+ r = amdgpu_gmc_flush_gpu_tlb_pasid(f->adev, f->pasid, 2, true, 0);
+ if (r) {
+ dev_err(f->adev->dev, "TLB flush failed for PASID %d.\n",
+ f->pasid);
+ dma_fence_set_error(&f->base, r);
+ }
+
+ dma_fence_signal(&f->base);
+ dma_fence_put(&f->base);
+}
+
+static const struct dma_fence_ops amdgpu_tlb_fence_ops = {
+ .use_64bit_seqno = true,
+ .get_driver_name = amdgpu_tlb_fence_get_driver_name,
+ .get_timeline_name = amdgpu_tlb_fence_get_timeline_name
+};
+
+void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct dma_fence **fence)
+{
+ struct amdgpu_tlb_fence *f;
+
+ f = kmalloc(sizeof(*f), GFP_KERNEL);
+ if (!f) {
+ /*
+ * We can't fail since the PDEs and PTEs are already updated, so
+ * just block for the dependency and execute the TLB flush
+ */
+ if (*fence)
+ dma_fence_wait(*fence, false);
+
+ amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, 2, true, 0);
+ *fence = dma_fence_get_stub();
+ return;
+ }
+
+ f->adev = adev;
+ f->dependency = *fence;
+ f->pasid = vm->pasid;
+ INIT_WORK(&f->work, amdgpu_tlb_fence_work);
+ spin_lock_init(&f->lock);
+
+ dma_fence_init(&f->base, &amdgpu_tlb_fence_ops, &f->lock,
+ vm->tlb_fence_context, atomic64_read(&vm->tlb_seq));
+
+ /* TODO: We probably need a separate wq here */
+ dma_fence_get(&f->base);
+ schedule_work(&f->work);
+
+ *fence = &f->base;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index 7a65a2b128ec..c23d97d34b7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -205,7 +205,7 @@ disable_dpm:
dpm_ctl &= 0xfffffffe; /* Disable DPM */
WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl);
dev_dbg(adev->dev, "%s: disable vpe dpm\n", __func__);
- return 0;
+ return -EINVAL;
}
int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev)
@@ -396,6 +396,12 @@ static int vpe_hw_init(void *handle)
struct amdgpu_vpe *vpe = &adev->vpe;
int ret;
+ /* Power on VPE */
+ ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
+ AMD_PG_STATE_UNGATE);
+ if (ret)
+ return ret;
+
ret = vpe_load_microcode(vpe);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 8db880244324..6c30eceec896 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -450,6 +450,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
{
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr);
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
u64 vis_usage = 0, max_bytes, min_block_size;
struct amdgpu_vram_mgr_resource *vres;
u64 size, remaining_size, lpfn, fpfn;
@@ -468,7 +469,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
if (tbo->type != ttm_bo_type_kernel)
max_bytes -= AMDGPU_VM_RESERVED_VRAM;
- if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) {
pages_per_block = ~0ul;
} else {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -477,7 +478,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
/* default to 2MB */
pages_per_block = 2UL << (20UL - PAGE_SHIFT);
#endif
- pages_per_block = max_t(uint32_t, pages_per_block,
+ pages_per_block = max_t(u32, pages_per_block,
tbo->page_alignment);
}
@@ -498,9 +499,12 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
if (place->flags & TTM_PL_FLAG_TOPDOWN)
vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
- if (place->flags & TTM_PL_FLAG_CONTIGUOUS)
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
vres->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION;
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED)
+ vres->flags |= DRM_BUDDY_CLEAR_ALLOCATION;
+
if (fpfn || lpfn != mgr->mm.size)
/* Allocate blocks in desired range */
vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
@@ -514,21 +518,31 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
else
min_block_size = mgr->default_page_size;
- BUG_ON(min_block_size < mm->chunk_size);
-
/* Limit maximum size to 2GiB due to SG table limitations */
size = min(remaining_size, 2ULL << 30);
if ((size >= (u64)pages_per_block << PAGE_SHIFT) &&
- !(size & (((u64)pages_per_block << PAGE_SHIFT) - 1)))
+ !(size & (((u64)pages_per_block << PAGE_SHIFT) - 1)))
min_block_size = (u64)pages_per_block << PAGE_SHIFT;
+ BUG_ON(min_block_size < mm->chunk_size);
+
r = drm_buddy_alloc_blocks(mm, fpfn,
lpfn,
size,
min_block_size,
&vres->blocks,
vres->flags);
+
+ if (unlikely(r == -ENOSPC) && pages_per_block == ~0ul &&
+ !(place->flags & TTM_PL_FLAG_CONTIGUOUS)) {
+ vres->flags &= ~DRM_BUDDY_CONTIGUOUS_ALLOCATION;
+ pages_per_block = max_t(u32, 2UL << (20UL - PAGE_SHIFT),
+ tbo->page_alignment);
+
+ continue;
+ }
+
if (unlikely(r))
goto error_free_blocks;
@@ -571,7 +585,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
return 0;
error_free_blocks:
- drm_buddy_free_list(mm, &vres->blocks);
+ drm_buddy_free_list(mm, &vres->blocks, 0);
mutex_unlock(&mgr->lock);
error_fini:
ttm_resource_fini(man, &vres->base);
@@ -604,7 +618,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
amdgpu_vram_mgr_do_reserve(man);
- drm_buddy_free_list(mm, &vres->blocks);
+ drm_buddy_free_list(mm, &vres->blocks, vres->flags);
mutex_unlock(&mgr->lock);
atomic64_sub(vis_usage, &mgr->vis_usage);
@@ -912,7 +926,7 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
kfree(rsv);
list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) {
- drm_buddy_free_list(&mgr->mm, &rsv->allocated);
+ drm_buddy_free_list(&mgr->mm, &rsv->allocated, 0);
kfree(rsv);
}
if (!adev->gmc.is_app_apu)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
index 0e04e42cf809..b256cbc2bc27 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
@@ -53,10 +53,20 @@ static inline u64 amdgpu_vram_mgr_block_size(struct drm_buddy_block *block)
return (u64)PAGE_SIZE << drm_buddy_block_order(block);
}
+static inline bool amdgpu_vram_mgr_is_cleared(struct drm_buddy_block *block)
+{
+ return drm_buddy_block_is_clear(block);
+}
+
static inline struct amdgpu_vram_mgr_resource *
to_amdgpu_vram_mgr_resource(struct ttm_resource *res)
{
return container_of(res, struct amdgpu_vram_mgr_resource, base);
}
+static inline void amdgpu_vram_mgr_set_cleared(struct ttm_resource *res)
+{
+ to_amdgpu_vram_mgr_resource(res)->flags |= DRM_BUDDY_CLEARED;
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 20d51f6c9bb8..dd2ec48cf5c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -1035,15 +1035,16 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
return 0;
}
-static int xgmi_v6_4_0_aca_bank_generate_report(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type,
- struct aca_bank_report *report, void *data)
+static int xgmi_v6_4_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
{
struct amdgpu_device *adev = handle->adev;
+ struct aca_bank_info info;
const char *error_str;
- u64 status;
+ u64 status, count;
int ret, ext_error_code;
- ret = aca_bank_info_decode(bank, &report->info);
+ ret = aca_bank_info_decode(bank, &info);
if (ret)
return ret;
@@ -1055,15 +1056,28 @@ static int xgmi_v6_4_0_aca_bank_generate_report(struct aca_handle *handle, struc
if (error_str)
dev_info(adev->dev, "%s detected\n", error_str);
- if ((type == ACA_ERROR_TYPE_UE && ext_error_code == 0) ||
- (type == ACA_ERROR_TYPE_CE && ext_error_code == 6))
- report->count[type] = ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]);
+ count = ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]);
- return 0;
+ switch (type) {
+ case ACA_SMU_TYPE_UE:
+ if (ext_error_code != 0 && ext_error_code != 9)
+ count = 0ULL;
+
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, count);
+ break;
+ case ACA_SMU_TYPE_CE:
+ count = ext_error_code == 6 ? count : 0ULL;
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE, count);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
}
static const struct aca_bank_ops xgmi_v6_4_0_aca_bank_ops = {
- .aca_bank_generate_report = xgmi_v6_4_0_aca_bank_generate_report,
+ .aca_bank_parser = xgmi_v6_4_0_aca_bank_parser,
};
static const struct aca_info xgmi_v6_4_0_aca_info = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 1592c63b3099..a3bfc16de6d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -44,6 +44,7 @@ struct amdgpu_hive_info {
struct amdgpu_reset_domain *reset_domain;
atomic_t ras_recovery;
+ struct ras_event_manager event_mgr;
};
struct amdgpu_pcs_ras_field {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index 51a14f6d93bd..fb2b394bb9c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -94,7 +94,8 @@ union amd_sriov_msg_feature_flags {
uint32_t reg_indirect_acc : 1;
uint32_t av1_support : 1;
uint32_t vcn_rb_decouple : 1;
- uint32_t reserved : 24;
+ uint32_t mes_info_enable : 1;
+ uint32_t reserved : 23;
} flags;
uint32_t all;
};
@@ -157,7 +158,7 @@ struct amd_sriov_msg_pf2vf_info_header {
uint32_t reserved[2];
};
-#define AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE (48)
+#define AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE (49)
struct amd_sriov_msg_pf2vf_info {
/* header contains size and version */
struct amd_sriov_msg_pf2vf_info_header header;
@@ -208,6 +209,8 @@ struct amd_sriov_msg_pf2vf_info {
struct amd_sriov_msg_uuid_info uuid_info;
/* PCIE atomic ops support flag */
uint32_t pcie_atomic_ops_support_flags;
+ /* Portion of GPU memory occupied by VF. MAX value is 65535, but set to uint32_t to maintain alignment with reserved size */
+ uint32_t gpu_capacity;
/* reserved */
uint32_t reserved[256 - AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE];
};
@@ -221,7 +224,7 @@ struct amd_sriov_msg_vf2pf_info_header {
uint32_t reserved[2];
};
-#define AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE (70)
+#define AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE (73)
struct amd_sriov_msg_vf2pf_info {
/* header contains size and version */
struct amd_sriov_msg_vf2pf_info_header header;
@@ -265,7 +268,9 @@ struct amd_sriov_msg_vf2pf_info {
uint32_t version;
} ucode_info[AMD_SRIOV_MSG_RESERVE_UCODE];
uint64_t dummy_page_addr;
-
+ /* FB allocated for guest MES to record UQ info */
+ uint64_t mes_info_addr;
+ uint32_t mes_info_size;
/* reserved */
uint32_t reserved[256 - AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE];
};
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index d6f808acfb17..414ea3f560a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -62,6 +62,11 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
}
+static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
+{
+ return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
+}
+
static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
uint32_t inst_idx, struct amdgpu_ring *ring)
{
@@ -87,7 +92,7 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
case AMDGPU_RING_TYPE_VCN_ENC:
case AMDGPU_RING_TYPE_VCN_JPEG:
ip_blk = AMDGPU_XCP_VCN;
- if (adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
+ if (aqua_vanjaram_xcp_vcn_shared(adev))
inst_mask = 1 << (inst_idx * 2);
break;
default:
@@ -140,10 +145,12 @@ static int aqua_vanjaram_xcp_sched_list_update(
aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
- /* VCN is shared by two partitions under CPX MODE */
+ /* VCN may be shared by two partitions under CPX MODE in certain
+ * configs.
+ */
if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
- ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
- adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
+ aqua_vanjaram_xcp_vcn_shared(adev))
aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
}
@@ -623,7 +630,7 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
{
- u32 mask, inst_mask = adev->sdma.sdma_mask;
+ u32 mask, avail_inst, inst_mask = adev->sdma.sdma_mask;
int ret, i;
/* generally 1 AID supports 4 instances */
@@ -635,7 +642,9 @@ int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
- if ((inst_mask & mask) == mask)
+ avail_inst = inst_mask & mask;
+ if (avail_inst == mask || avail_inst == 0x3 ||
+ avail_inst == 0xc)
adev->aid_mask |= (1 << i);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index 72362df352f6..d552e013354c 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -1243,6 +1243,7 @@ static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index,
ectx.ps_size = params_size;
ectx.abort = false;
ectx.last_jump = 0;
+ ectx.last_jump_jiffies = 0;
if (ws) {
ectx.ws = kcalloc(4, ws, GFP_KERNEL);
ectx.ws_size = ws;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index a3a643254d7a..cf1d5d462b67 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1375,14 +1375,14 @@ static int cik_asic_pci_config_reset(struct amdgpu_device *adev)
return r;
}
-static bool cik_asic_supports_baco(struct amdgpu_device *adev)
+static int cik_asic_supports_baco(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_BONAIRE:
case CHIP_HAWAII:
return amdgpu_dpm_is_baco_supported(adev);
default:
- return false;
+ return 0;
}
}
@@ -2210,6 +2210,8 @@ static const struct amd_ip_funcs cik_common_ip_funcs = {
.soft_reset = cik_common_soft_reset,
.set_clockgating_state = cik_common_set_clockgating_state,
.set_powergating_state = cik_common_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ip_block_version cik_common_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index f24e34dc33d1..576baa9dbb0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -435,6 +435,8 @@ static const struct amd_ip_funcs cik_ih_ip_funcs = {
.soft_reset = cik_ih_soft_reset,
.set_clockgating_state = cik_ih_set_clockgating_state,
.set_powergating_state = cik_ih_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs cik_ih_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index a3fccc4c1f43..6948ebda0fa2 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -1228,6 +1228,8 @@ static const struct amd_ip_funcs cik_sdma_ip_funcs = {
.soft_reset = cik_sdma_soft_reset,
.set_clockgating_state = cik_sdma_set_clockgating_state,
.set_powergating_state = cik_sdma_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
@@ -1290,7 +1292,7 @@ static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
- * @tmz: is this a secure operation
+ * @copy_flags: unused
*
* Copy GPU buffers using the DMA engine (CIK).
* Used by the amdgpu ttm implementation to move pages if
@@ -1300,7 +1302,7 @@ static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
- bool tmz)
+ uint32_t copy_flags)
{
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
ib->ptr[ib->length_dw++] = byte_count;
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index c19681492efa..072643787384 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -433,6 +433,8 @@ static const struct amd_ip_funcs cz_ih_ip_funcs = {
.soft_reset = cz_ih_soft_reset,
.set_clockgating_state = cz_ih_set_clockgating_state,
.set_powergating_state = cz_ih_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs cz_ih_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 221af054d874..b44fce44c066 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -3333,6 +3333,8 @@ static const struct amd_ip_funcs dce_v10_0_ip_funcs = {
.soft_reset = dce_v10_0_soft_reset,
.set_clockgating_state = dce_v10_0_set_clockgating_state,
.set_powergating_state = dce_v10_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static void
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 69e8b0db6cf7..80b2e7f79acf 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -3464,6 +3464,8 @@ static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
.soft_reset = dce_v11_0_soft_reset,
.set_clockgating_state = dce_v11_0_set_clockgating_state,
.set_powergating_state = dce_v11_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static void
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 60d40201fdd1..db20012600f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -3154,6 +3154,8 @@ static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
.soft_reset = dce_v6_0_soft_reset,
.set_clockgating_state = dce_v6_0_set_clockgating_state,
.set_powergating_state = dce_v6_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static void
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 5a5fcc45e452..5b56100ec902 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -3242,6 +3242,8 @@ static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
.soft_reset = dce_v8_0_soft_reset,
.set_clockgating_state = dce_v8_0_set_clockgating_state,
.set_powergating_state = dce_v8_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static void
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index f90905ef32c7..536287ddd2ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -276,6 +276,99 @@ MODULE_FIRMWARE("amdgpu/gc_10_3_7_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_10_3_7_mec2.bin");
MODULE_FIRMWARE("amdgpu/gc_10_3_7_rlc.bin");
+static const struct amdgpu_hwip_reg_entry gc_reg_list_10_1[] = {
+ SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS2),
+ SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS3),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_STALLED_STAT1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_STALLED_STAT2),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_STALLED_STAT1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_STALLED_STAT1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_BUSY_STAT),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_BUSY_STAT),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_BUSY_STAT),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_BUSY_STAT2),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_BUSY_STAT2),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_GFX_ERROR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_GFX_HPD_STATUS0),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_BASE),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB0_BASE),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB0_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB0_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB1_BASE),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB1_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB1_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB2_BASE),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB2_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB2_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_CMD_BUFSZ),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_CMD_BUFSZ),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_CMD_BUFSZ),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_CMD_BUFSZ),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_BUFSZ),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_BUFSZ),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_BUFSZ),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_BUFSZ),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCPF_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCPC_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCPG_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmGDS_PROTECTION_FAULT),
+ SOC15_REG_ENTRY_STR(GC, 0, mmGDS_VM_PROTECTION_FAULT),
+ SOC15_REG_ENTRY_STR(GC, 0, mmIA_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmIA_UTCL1_STATUS_2),
+ SOC15_REG_ENTRY_STR(GC, 0, mmPA_CL_CNTL_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRMI_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSQC_DCACHE_UTCL0_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSQC_ICACHE_UTCL0_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSQG_UTCL0_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmTCP_UTCL0_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmWD_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmGCVM_L2_PROTECTION_FAULT_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_DEBUG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MES_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_INSTR_PNTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC1_INSTR_PNTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC2_INSTR_PNTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MES_DEBUG_INTERRUPT_INSTR_PNTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MES_INSTR_PNTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_INSTR_PNTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_INSTR_PNTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_STAT),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_COMMAND),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_MESSAGE),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_ARGUMENT_1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_ARGUMENT_2),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_ARGUMENT_3),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_ARGUMENT_4),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSMU_RLC_RESPONSE),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SAFE_MODE),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_SAFE_MODE),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_RLCS_GPM_STAT_2),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SPP_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_RLCS_BOOTLOAD_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_INT_STAT),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_GENERAL_6),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_DEBUG_INST_A),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_DEBUG_INST_B),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_DEBUG_INST_ADDR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_LX6_CORE_PDEBUG_INST)
+};
+
static const struct soc15_reg_golden golden_settings_gc_10_1[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100),
@@ -3964,7 +4057,7 @@ static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
{
- char fw_name[40];
+ char fw_name[53];
char ucode_prefix[30];
const char *wks = "";
int err;
@@ -4490,6 +4583,22 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
hw_prio, NULL);
}
+static void gfx_v10_0_alloc_dump_mem(struct amdgpu_device *adev)
+{
+ uint32_t reg_count = ARRAY_SIZE(gc_reg_list_10_1);
+ uint32_t *ptr;
+
+ ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (ptr == NULL) {
+ DRM_ERROR("Failed to allocate memory for IP Dump\n");
+ adev->gfx.ip_dump = NULL;
+ adev->gfx.reg_count = 0;
+ } else {
+ adev->gfx.ip_dump = ptr;
+ adev->gfx.reg_count = reg_count;
+ }
+}
+
static int gfx_v10_0_sw_init(void *handle)
{
int i, j, k, r, ring_id = 0;
@@ -4518,7 +4627,7 @@ static int gfx_v10_0_sw_init(void *handle)
case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 7):
adev->gfx.me.num_me = 1;
- adev->gfx.me.num_pipe_per_me = 1;
+ adev->gfx.me.num_pipe_per_me = 2;
adev->gfx.me.num_queue_per_pipe = 1;
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
@@ -4642,6 +4751,8 @@ static int gfx_v10_0_sw_init(void *handle)
gfx_v10_0_gpu_early_init(adev);
+ gfx_v10_0_alloc_dump_mem(adev);
+
return 0;
}
@@ -4694,6 +4805,8 @@ static int gfx_v10_0_sw_fini(void *handle)
gfx_v10_0_free_microcode(adev);
+ kfree(adev->gfx.ip_dump);
+
return 0;
}
@@ -8317,7 +8430,7 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
}
reg_mem_engine = 0;
} else {
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
+ ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe;
reg_mem_engine = 1; /* pfp */
}
@@ -9154,6 +9267,36 @@ static void gfx_v10_0_emit_mem_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
}
+static void gfx_v10_ip_print(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t i;
+ uint32_t reg_count = ARRAY_SIZE(gc_reg_list_10_1);
+
+ if (!adev->gfx.ip_dump)
+ return;
+
+ for (i = 0; i < reg_count; i++)
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ gc_reg_list_10_1[i].reg_name,
+ adev->gfx.ip_dump[i]);
+}
+
+static void gfx_v10_ip_dump(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t i;
+ uint32_t reg_count = ARRAY_SIZE(gc_reg_list_10_1);
+
+ if (!adev->gfx.ip_dump)
+ return;
+
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < reg_count; i++)
+ adev->gfx.ip_dump[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_10_1[i]));
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
static const struct amd_ip_funcs gfx_v10_0_ip_funcs = {
.name = "gfx_v10_0",
.early_init = gfx_v10_0_early_init,
@@ -9170,6 +9313,8 @@ static const struct amd_ip_funcs gfx_v10_0_ip_funcs = {
.set_clockgating_state = gfx_v10_0_set_clockgating_state,
.set_powergating_state = gfx_v10_0_set_powergating_state,
.get_clockgating_state = gfx_v10_0_get_clockgating_state,
+ .dump_ip_state = gfx_v10_ip_dump,
+ .print_ip_state = gfx_v10_ip_print,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
@@ -9186,7 +9331,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
7 + /* PIPELINE_SYNC */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* VM_FLUSH */
+ 4 + /* VM_FLUSH */
8 + /* FENCE for VM_FLUSH */
20 + /* GDS switch */
4 + /* double SWITCH_BUFFER,
@@ -9276,7 +9421,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
7 + /* gfx_v10_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* gfx_v10_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v10_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
.emit_ib = gfx_v10_0_ring_emit_ib_compute,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 1770e496c1b7..ad6431013c73 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -510,7 +510,7 @@ static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev)
static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
{
char fw_name[40];
- char ucode_prefix[30];
+ char ucode_prefix[25];
int err;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
uint16_t version_major;
@@ -1635,7 +1635,7 @@ static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
}
- active_rb_bitmap |= global_active_rb_bitmap;
+ active_rb_bitmap &= global_active_rb_bitmap;
adev->gfx.config.backend_enable_mask = active_rb_bitmap;
adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
}
@@ -4506,14 +4506,11 @@ static int gfx_v11_0_soft_reset(void *handle)
gfx_v11_0_set_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
- tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
- tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i);
- tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j);
- tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k);
- WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
+ soc21_grbm_select(adev, i, k, j, 0);
WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
@@ -4523,16 +4520,14 @@ static int gfx_v11_0_soft_reset(void *handle)
for (i = 0; i < adev->gfx.me.num_me; ++i) {
for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
- tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
- tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i);
- tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j);
- tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k);
- WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
+ soc21_grbm_select(adev, i, k, j, 0);
WREG32_SOC15(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST, 0x1);
}
}
}
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
/* Try to acquire the gfx mutex before access to CP_VMID_RESET */
r = gfx_v11_0_request_gfx_index_mutex(adev, 1);
@@ -5465,6 +5460,7 @@ static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
/* Make sure that we can't skip the SET_Q_MODE packets when the VM
* changed in any way.
*/
+ ring->set_q_mode_offs = 0;
ring->set_q_mode_ptr = NULL;
}
@@ -6173,6 +6169,8 @@ static const struct amd_ip_funcs gfx_v11_0_ip_funcs = {
.set_clockgating_state = gfx_v11_0_set_clockgating_state,
.set_powergating_state = gfx_v11_0_set_powergating_state,
.get_clockgating_state = gfx_v11_0_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
@@ -6191,7 +6189,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
7 + /* PIPELINE_SYNC */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* VM_FLUSH */
+ 4 + /* VM_FLUSH */
8 + /* FENCE for VM_FLUSH */
20 + /* GDS switch */
5 + /* COND_EXEC */
@@ -6277,7 +6275,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
7 + /* gfx_v11_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* gfx_v11_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */
.emit_ib = gfx_v11_0_ring_emit_ib_compute,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 34f9211b2679..d0992ce9fb47 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -3457,6 +3457,8 @@ static const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
.soft_reset = gfx_v6_0_soft_reset,
.set_clockgating_state = gfx_v6_0_set_clockgating_state,
.set_powergating_state = gfx_v6_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 86a4865b1ae5..541dbd70d8c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4977,6 +4977,8 @@ static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.soft_reset = gfx_v7_0_soft_reset,
.set_clockgating_state = gfx_v7_0_set_clockgating_state,
.set_powergating_state = gfx_v7_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 202ddda57f98..2f0e72caee1a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -6878,6 +6878,8 @@ static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.set_clockgating_state = gfx_v8_0_set_clockgating_state,
.set_powergating_state = gfx_v8_0_set_powergating_state,
.get_clockgating_state = gfx_v8_0_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 6f97a6d0e6d0..3c8c5abf35ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1249,7 +1249,7 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
char *chip_name)
{
- char fw_name[30];
+ char fw_name[50];
int err;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
@@ -1282,7 +1282,7 @@ out:
static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
char *chip_name)
{
- char fw_name[30];
+ char fw_name[53];
int err;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
uint16_t version_major;
@@ -1337,7 +1337,7 @@ static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev)
static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
char *chip_name)
{
- char fw_name[30];
+ char fw_name[50];
int err;
if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
@@ -6856,6 +6856,8 @@ static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
.set_clockgating_state = gfx_v9_0_set_clockgating_state,
.set_powergating_state = gfx_v9_0_set_powergating_state,
.get_clockgating_state = gfx_v9_0_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
@@ -6981,7 +6983,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
7 + /* gfx_v9_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
7 + /* gfx_v9_0_emit_mem_sync */
5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
@@ -7019,7 +7020,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
7 + /* gfx_v9_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
.emit_fence = gfx_v9_0_ring_emit_fence_kiq,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index 065b2bd5f5a6..3f4fd2f08163 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -1909,18 +1909,7 @@ static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev)
mutex_unlock(&adev->grbm_idx_mutex);
}
-static bool gfx_v9_4_2_query_uctl2_poison_status(struct amdgpu_device *adev)
-{
- u32 status = 0;
- struct amdgpu_vmhub *hub;
-
- hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
- status = RREG32(hub->vm_l2_pro_fault_status);
- /* reset page fault status */
- WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
- return REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
-}
struct amdgpu_ras_block_hw_ops gfx_v9_4_2_ras_ops = {
.query_ras_error_count = &gfx_v9_4_2_query_ras_error_count,
@@ -1934,5 +1923,4 @@ struct amdgpu_gfx_ras gfx_v9_4_2_ras = {
.hw_ops = &gfx_v9_4_2_ras_ops,
},
.enable_watchdog_timer = &gfx_v9_4_2_enable_watchdog_timer,
- .query_utcl2_poison_status = gfx_v9_4_2_query_uctl2_poison_status,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index b53c8fd4e8cf..7b16e8cca86a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -431,16 +431,16 @@ out:
static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev)
{
- const char *chip_name;
+ char ucode_prefix[15];
int r;
- chip_name = "gc_9_4_3";
+ amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
- r = gfx_v9_4_3_init_rlc_microcode(adev, chip_name);
+ r = gfx_v9_4_3_init_rlc_microcode(adev, ucode_prefix);
if (r)
return r;
- r = gfx_v9_4_3_init_cp_compute_microcode(adev, chip_name);
+ r = gfx_v9_4_3_init_cp_compute_microcode(adev, ucode_prefix);
if (r)
return r;
@@ -680,38 +680,44 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
.ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst,
};
-static int gfx_v9_4_3_aca_bank_generate_report(struct aca_handle *handle,
- struct aca_bank *bank, enum aca_error_type type,
- struct aca_bank_report *report, void *data)
+static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle,
+ struct aca_bank *bank, enum aca_smu_type type,
+ void *data)
{
- u64 status, misc0;
+ struct aca_bank_info info;
+ u64 misc0;
u32 instlo;
int ret;
- status = bank->regs[ACA_REG_IDX_STATUS];
- if ((type == ACA_ERROR_TYPE_UE &&
- ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) ||
- (type == ACA_ERROR_TYPE_CE &&
- ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) {
+ ret = aca_bank_info_decode(bank, &info);
+ if (ret)
+ return ret;
- ret = aca_bank_info_decode(bank, &report->info);
- if (ret)
- return ret;
+ /* NOTE: overwrite info.die_id with xcd id for gfx */
+ instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+ instlo &= GENMASK(31, 1);
+ info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1;
- /* NOTE: overwrite info.die_id with xcd id for gfx */
- instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
- instlo &= GENMASK(31, 1);
- report->info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1;
+ misc0 = bank->regs[ACA_REG_IDX_MISC0];
- misc0 = bank->regs[ACA_REG_IDX_MISC0];
- report->count[type] = ACA_REG__MISC0__ERRCNT(misc0);
+ switch (type) {
+ case ACA_SMU_TYPE_UE:
+ ret = aca_error_cache_log_bank_error(handle, &info,
+ ACA_ERROR_TYPE_UE, 1ULL);
+ break;
+ case ACA_SMU_TYPE_CE:
+ ret = aca_error_cache_log_bank_error(handle, &info,
+ ACA_ERROR_TYPE_CE, ACA_REG__MISC0__ERRCNT(misc0));
+ break;
+ default:
+ return -EINVAL;
}
- return 0;
+ return ret;
}
static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
- enum aca_error_type type, void *data)
+ enum aca_smu_type type, void *data)
{
u32 instlo;
@@ -730,7 +736,7 @@ static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_b
}
static const struct aca_bank_ops gfx_v9_4_3_aca_bank_ops = {
- .aca_bank_generate_report = gfx_v9_4_3_aca_bank_generate_report,
+ .aca_bank_parser = gfx_v9_4_3_aca_bank_parser,
.aca_bank_is_valid = gfx_v9_4_3_aca_bank_is_valid,
};
@@ -2398,10 +2404,10 @@ gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
if (def != data)
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
- /* enable cgcg FSM(0x0000363F) */
+ /* CGCG Hysteresis: 400us */
def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
- data = (0x36
+ data = (0x2710
<< RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
@@ -2410,10 +2416,10 @@ gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
if (def != data)
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
- /* set IDLE_POLL_COUNT(0x00900100) */
+ /* set IDLE_POLL_COUNT(0x33450100)*/
def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL);
data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
- (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
+ (0x3345 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
if (def != data)
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data);
} else {
@@ -4010,6 +4016,8 @@ static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = {
.set_clockgating_state = gfx_v9_4_3_set_clockgating_state,
.set_powergating_state = gfx_v9_4_3_set_powergating_state,
.get_clockgating_state = gfx_v9_4_3_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 22175da0e16a..d200310d1731 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -443,6 +443,22 @@ static void gfxhub_v1_0_init(struct amdgpu_device *adev)
mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
}
+static bool gfxhub_v1_0_query_utcl2_poison_status(struct amdgpu_device *adev,
+ int xcc_id)
+{
+ u32 status = 0;
+ struct amdgpu_vmhub *hub;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2))
+ return false;
+
+ hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
+ status = RREG32(hub->vm_l2_pro_fault_status);
+ /* reset page fault status */
+ WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
+
+ return REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
+}
const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs = {
.get_mc_fb_offset = gfxhub_v1_0_get_mc_fb_offset,
@@ -452,4 +468,5 @@ const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs = {
.set_fault_enable_default = gfxhub_v1_0_set_fault_enable_default,
.init = gfxhub_v1_0_init,
.get_xgmi_info = gfxhub_v1_1_get_xgmi_info,
+ .query_utcl2_poison_status = gfxhub_v1_0_query_utcl2_poison_status,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
index 49aecdcee006..77df8c9cbad2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
@@ -620,6 +620,20 @@ static int gfxhub_v1_2_get_xgmi_info(struct amdgpu_device *adev)
return 0;
}
+static bool gfxhub_v1_2_query_utcl2_poison_status(struct amdgpu_device *adev,
+ int xcc_id)
+{
+ u32 fed, status;
+
+ status = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regVM_L2_PROTECTION_FAULT_STATUS);
+ fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
+ /* reset page fault status */
+ WREG32_P(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id),
+ regVM_L2_PROTECTION_FAULT_STATUS), 1, ~1);
+
+ return fed;
+}
+
const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = {
.get_mc_fb_offset = gfxhub_v1_2_get_mc_fb_offset,
.setup_vm_pt_regs = gfxhub_v1_2_setup_vm_pt_regs,
@@ -628,6 +642,7 @@ const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = {
.set_fault_enable_default = gfxhub_v1_2_set_fault_enable_default,
.init = gfxhub_v1_2_init,
.get_xgmi_info = gfxhub_v1_2_get_xgmi_info,
+ .query_utcl2_poison_status = gfxhub_v1_2_query_utcl2_poison_status,
};
static int gfxhub_v1_2_xcp_resume(void *handle, uint32_t inst_mask)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 23b478639921..3e38d8bfcb69 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -1115,6 +1115,8 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
.soft_reset = gmc_v6_0_soft_reset,
.set_clockgating_state = gmc_v6_0_set_clockgating_state,
.set_powergating_state = gmc_v6_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 3da7b6a2b00d..85df8fc81065 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1354,6 +1354,8 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
.soft_reset = gmc_v7_0_soft_reset,
.set_clockgating_state = gmc_v7_0_set_clockgating_state,
.set_powergating_state = gmc_v7_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index d20e5f20ee31..fc97757e33d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1717,6 +1717,8 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
.set_clockgating_state = gmc_v8_0_set_clockgating_state,
.set_powergating_state = gmc_v8_0_set_powergating_state,
.get_clockgating_state = gmc_v8_0_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 47b63a4ce68b..c4ec1358f3aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -548,7 +548,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
{
bool retry_fault = !!(entry->src_data[1] & 0x80);
bool write_fault = !!(entry->src_data[1] & 0x20);
- uint32_t status = 0, cid = 0, rw = 0;
+ uint32_t status = 0, cid = 0, rw = 0, fed = 0;
struct amdgpu_task_info *task_info;
struct amdgpu_vmhub *hub;
const char *mmhub_cid;
@@ -664,6 +664,13 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
status = RREG32(hub->vm_l2_pro_fault_status);
cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID);
rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
+ fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
+
+ /* for fed error, kfd will handle it, return directly */
+ if (fed && amdgpu_ras_is_poison_mode_supported(adev) &&
+ (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2)))
+ return 0;
+
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, vmhub);
@@ -1450,7 +1457,6 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET;
adev->umc.active_mask = adev->aid_mask;
adev->umc.retire_unit = UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
- adev->umc.channel_idx_tbl = &umc_v12_0_channel_idx_tbl[0][0][0];
if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
adev->umc.ras = &umc_v12_0_ras;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 2c02ae69883d..07984f7c3ae7 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -425,6 +425,8 @@ static const struct amd_ip_funcs iceland_ih_ip_funcs = {
.soft_reset = iceland_ih_soft_reset,
.set_clockgating_state = iceland_ih_set_clockgating_state,
.set_powergating_state = iceland_ih_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs iceland_ih_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index ad4ad39f128f..3cb64c8f7175 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -346,6 +346,21 @@ static int ih_v6_0_irq_init(struct amdgpu_device *adev)
DELAY, 3);
WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp);
+ /* Redirect the interrupts to IH RB1 for dGPU */
+ if (adev->irq.ih1.ring_size) {
+ tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX);
+ tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_INDEX, INDEX, 0);
+ WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX, tmp);
+
+ tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA);
+ tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, CLIENT_ID, 0xa);
+ tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, SOURCE_ID, 0x0);
+ tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA,
+ SOURCE_ID_MATCH_ENABLE, 0x1);
+
+ WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA, tmp);
+ }
+
pci_set_master(adev->pdev);
/* enable interrupts */
@@ -549,8 +564,15 @@ static int ih_v6_0_sw_init(void *handle)
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
- adev->irq.ih1.ring_size = 0;
- adev->irq.ih2.ring_size = 0;
+ if (!(adev->flags & AMD_IS_APU)) {
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, IH_RING_SIZE,
+ use_bus_addr);
+ if (r)
+ return r;
+
+ adev->irq.ih1.use_doorbell = true;
+ adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
+ }
/* initialize ih control register offset */
ih_v6_0_init_register_offset(adev);
@@ -748,6 +770,8 @@ static const struct amd_ip_funcs ih_v6_0_ip_funcs = {
.set_clockgating_state = ih_v6_0_set_clockgating_state,
.set_powergating_state = ih_v6_0_set_powergating_state,
.get_clockgating_state = ih_v6_0_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs ih_v6_0_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
index b8da0fc29378..0fbf5fa7b0f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
@@ -346,6 +346,21 @@ static int ih_v6_1_irq_init(struct amdgpu_device *adev)
DELAY, 3);
WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp);
+ /* Redirect the interrupts to IH RB1 for dGPU */
+ if (adev->irq.ih1.ring_size) {
+ tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX);
+ tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_INDEX, INDEX, 0);
+ WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX, tmp);
+
+ tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA);
+ tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, CLIENT_ID, 0xa);
+ tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, SOURCE_ID, 0x0);
+ tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA,
+ SOURCE_ID_MATCH_ENABLE, 0x1);
+
+ WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA, tmp);
+ }
+
pci_set_master(adev->pdev);
/* enable interrupts */
@@ -550,8 +565,15 @@ static int ih_v6_1_sw_init(void *handle)
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
- adev->irq.ih1.ring_size = 0;
- adev->irq.ih2.ring_size = 0;
+ if (!(adev->flags & AMD_IS_APU)) {
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, IH_RING_SIZE,
+ use_bus_addr);
+ if (r)
+ return r;
+
+ adev->irq.ih1.use_doorbell = true;
+ adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
+ }
/* initialize ih control register offset */
ih_v6_1_init_register_offset(adev);
@@ -753,6 +775,8 @@ static const struct amd_ip_funcs ih_v6_1_ip_funcs = {
.set_clockgating_state = ih_v6_1_set_clockgating_state,
.set_powergating_state = ih_v6_1_set_powergating_state,
.get_clockgating_state = ih_v6_1_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs ih_v6_1_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
index 7aed96fa10a9..aa6235dd4f2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
@@ -749,6 +749,8 @@ static const struct amd_ip_funcs ih_v7_0_ip_funcs = {
.set_clockgating_state = ih_v7_0_set_clockgating_state,
.set_powergating_state = ih_v7_0_set_powergating_state,
.get_clockgating_state = ih_v7_0_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs ih_v7_0_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 1c8116d75f63..ef3e42f6b841 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -759,6 +759,8 @@ static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = jpeg_v2_0_set_clockgating_state,
.set_powergating_state = jpeg_v2_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 99cd49ee8ef6..afeaf3c64e27 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -632,6 +632,8 @@ static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = jpeg_v2_5_set_clockgating_state,
.set_powergating_state = jpeg_v2_5_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = {
@@ -652,6 +654,8 @@ static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = jpeg_v2_5_set_clockgating_state,
.set_powergating_state = jpeg_v2_5_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index a92481da60cd..1c7cf4800bf7 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -557,6 +557,8 @@ static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = jpeg_v3_0_set_clockgating_state,
.set_powergating_state = jpeg_v3_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index 88ea58d5c4ab..237fe5df5a8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -719,6 +719,8 @@ static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = jpeg_v4_0_set_clockgating_state,
.set_powergating_state = jpeg_v4_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index 32caeb37cef9..d66af11aa66c 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -1053,6 +1053,8 @@ static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = jpeg_v4_0_3_set_clockgating_state,
.set_powergating_state = jpeg_v4_0_3_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index edf5bcdd2bc9..da6bb9022b80 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -762,6 +762,8 @@ static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = jpeg_v4_0_5_set_clockgating_state,
.set_powergating_state = jpeg_v4_0_5_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
index e70200f97555..64c856bfe0cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
@@ -513,6 +513,8 @@ static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = jpeg_v5_0_0_set_clockgating_state,
.set_powergating_state = jpeg_v5_0_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
index 1e5ad1e08d2a..a626bf904926 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
@@ -1176,6 +1176,8 @@ static const struct amd_ip_funcs mes_v10_1_ip_funcs = {
.hw_fini = mes_v10_1_hw_fini,
.suspend = mes_v10_1_suspend,
.resume = mes_v10_1_resume,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version mes_v10_1_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 072c478665ad..0d1407f25005 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -100,18 +100,76 @@ static const struct amdgpu_ring_funcs mes_v11_0_ring_funcs = {
.insert_nop = amdgpu_ring_insert_nop,
};
+static const char *mes_v11_0_opcodes[] = {
+ "SET_HW_RSRC",
+ "SET_SCHEDULING_CONFIG",
+ "ADD_QUEUE",
+ "REMOVE_QUEUE",
+ "PERFORM_YIELD",
+ "SET_GANG_PRIORITY_LEVEL",
+ "SUSPEND",
+ "RESUME",
+ "RESET",
+ "SET_LOG_BUFFER",
+ "CHANGE_GANG_PRORITY",
+ "QUERY_SCHEDULER_STATUS",
+ "PROGRAM_GDS",
+ "SET_DEBUG_VMID",
+ "MISC",
+ "UPDATE_ROOT_PAGE_TABLE",
+ "AMD_LOG",
+};
+
+static const char *mes_v11_0_misc_opcodes[] = {
+ "WRITE_REG",
+ "INV_GART",
+ "QUERY_STATUS",
+ "READ_REG",
+ "WAIT_REG_MEM",
+ "SET_SHADER_DEBUGGER",
+};
+
+static const char *mes_v11_0_get_op_string(union MESAPI__MISC *x_pkt)
+{
+ const char *op_str = NULL;
+
+ if (x_pkt->header.opcode < ARRAY_SIZE(mes_v11_0_opcodes))
+ op_str = mes_v11_0_opcodes[x_pkt->header.opcode];
+
+ return op_str;
+}
+
+static const char *mes_v11_0_get_misc_op_string(union MESAPI__MISC *x_pkt)
+{
+ const char *op_str = NULL;
+
+ if ((x_pkt->header.opcode == MES_SCH_API_MISC) &&
+ (x_pkt->opcode < ARRAY_SIZE(mes_v11_0_misc_opcodes)))
+ op_str = mes_v11_0_misc_opcodes[x_pkt->opcode];
+
+ return op_str;
+}
+
static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
void *pkt, int size,
int api_status_off)
{
int ndw = size / 4;
signed long r;
- union MESAPI__ADD_QUEUE *x_pkt = pkt;
+ union MESAPI__MISC *x_pkt = pkt;
struct MES_API_STATUS *api_status;
struct amdgpu_device *adev = mes->adev;
struct amdgpu_ring *ring = &mes->ring;
unsigned long flags;
- signed long timeout = adev->usec_timeout;
+ signed long timeout = 3000000; /* 3000 ms */
+ const char *op_str, *misc_op_str;
+ u32 fence_offset;
+ u64 fence_gpu_addr;
+ u64 *fence_ptr;
+ int ret;
+
+ if (x_pkt->header.opcode >= MES_SCH_API_MAX)
+ return -EINVAL;
if (amdgpu_emu_mode) {
timeout *= 100;
@@ -121,27 +179,52 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
}
BUG_ON(size % 4 != 0);
+ ret = amdgpu_device_wb_get(adev, &fence_offset);
+ if (ret)
+ return ret;
+ fence_gpu_addr =
+ adev->wb.gpu_addr + (fence_offset * 4);
+ fence_ptr = (u64 *)&adev->wb.wb[fence_offset];
+ *fence_ptr = 0;
+
spin_lock_irqsave(&mes->ring_lock, flags);
if (amdgpu_ring_alloc(ring, ndw)) {
spin_unlock_irqrestore(&mes->ring_lock, flags);
+ amdgpu_device_wb_free(adev, fence_offset);
return -ENOMEM;
}
api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off);
- api_status->api_completion_fence_addr = mes->ring.fence_drv.gpu_addr;
- api_status->api_completion_fence_value = ++mes->ring.fence_drv.sync_seq;
+ api_status->api_completion_fence_addr = fence_gpu_addr;
+ api_status->api_completion_fence_value = 1;
amdgpu_ring_write_multiple(ring, pkt, ndw);
amdgpu_ring_commit(ring);
spin_unlock_irqrestore(&mes->ring_lock, flags);
- DRM_DEBUG("MES msg=%d was emitted\n", x_pkt->header.opcode);
+ op_str = mes_v11_0_get_op_string(x_pkt);
+ misc_op_str = mes_v11_0_get_misc_op_string(x_pkt);
+
+ if (misc_op_str)
+ dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str, misc_op_str);
+ else if (op_str)
+ dev_dbg(adev->dev, "MES msg=%s was emitted\n", op_str);
+ else
+ dev_dbg(adev->dev, "MES msg=%d was emitted\n", x_pkt->header.opcode);
- r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq,
- timeout);
+ r = amdgpu_mes_fence_wait_polling(fence_ptr, (u64)1, timeout);
+ amdgpu_device_wb_free(adev, fence_offset);
if (r < 1) {
- DRM_ERROR("MES failed to response msg=%d\n",
- x_pkt->header.opcode);
+
+ if (misc_op_str)
+ dev_err(adev->dev, "MES failed to respond to msg=%s (%s)\n",
+ op_str, misc_op_str);
+ else if (op_str)
+ dev_err(adev->dev, "MES failed to respond to msg=%s\n",
+ op_str);
+ else
+ dev_err(adev->dev, "MES failed to respond to msg=%d\n",
+ x_pkt->header.opcode);
while (halt_if_hws_hang)
schedule();
@@ -411,14 +494,47 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes_set_hw_res_pkt.enable_reg_active_poll = 1;
mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
mes_set_hw_res_pkt.oversubscription_timer = 50;
- mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
- mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
+ if (amdgpu_mes_log_enable) {
+ mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
+ mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr =
+ mes->event_log_gpu_addr;
+ }
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
offsetof(union MESAPI_SET_HW_RESOURCES, api_status));
}
+static int mes_v11_0_set_hw_resources_1(struct amdgpu_mes *mes)
+{
+ int size = 128 * PAGE_SIZE;
+ int ret = 0;
+ struct amdgpu_device *adev = mes->adev;
+ union MESAPI_SET_HW_RESOURCES_1 mes_set_hw_res_pkt;
+ memset(&mes_set_hw_res_pkt, 0, sizeof(mes_set_hw_res_pkt));
+
+ mes_set_hw_res_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_set_hw_res_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC_1;
+ mes_set_hw_res_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+ mes_set_hw_res_pkt.enable_mes_info_ctx = 1;
+
+ ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &mes->resource_1,
+ &mes->resource_1_gpu_addr,
+ &mes->resource_1_addr);
+ if (ret) {
+ dev_err(adev->dev, "(%d) failed to create mes resource_1 bo\n", ret);
+ return ret;
+ }
+
+ mes_set_hw_res_pkt.mes_info_ctx_mc_addr = mes->resource_1_gpu_addr;
+ mes_set_hw_res_pkt.mes_info_ctx_size = mes->resource_1->tbo.base.size;
+ return mes_v11_0_submit_pkt_and_poll_completion(mes,
+ &mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
+ offsetof(union MESAPI_SET_HW_RESOURCES_1, api_status));
+}
+
static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.add_hw_queue = mes_v11_0_add_hw_queue,
.remove_hw_queue = mes_v11_0_remove_hw_queue,
@@ -1200,6 +1316,14 @@ static int mes_v11_0_hw_init(void *handle)
if (r)
goto failure;
+ if (amdgpu_sriov_is_mes_info_enable(adev)) {
+ r = mes_v11_0_set_hw_resources_1(&adev->mes);
+ if (r) {
+ DRM_ERROR("failed mes_v11_0_set_hw_resources_1, r=%d\n", r);
+ goto failure;
+ }
+ }
+
r = mes_v11_0_query_sched_status(&adev->mes);
if (r) {
DRM_ERROR("MES is busy\n");
@@ -1223,6 +1347,11 @@ failure:
static int mes_v11_0_hw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_sriov_is_mes_info_enable(adev)) {
+ amdgpu_bo_free_kernel(&adev->mes.resource_1, &adev->mes.resource_1_gpu_addr,
+ &adev->mes.resource_1_addr);
+ }
return 0;
}
@@ -1288,6 +1417,8 @@ static const struct amd_ip_funcs mes_v11_0_ip_funcs = {
.hw_fini = mes_v11_0_hw_fini,
.suspend = mes_v11_0_suspend,
.resume = mes_v11_0_resume,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version mes_v11_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index c0fc44cdd658..7a1ff298417a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -559,6 +559,20 @@ static void mmhub_v1_8_get_clockgating(struct amdgpu_device *adev, u64 *flags)
}
+static bool mmhub_v1_8_query_utcl2_poison_status(struct amdgpu_device *adev,
+ int hub_inst)
+{
+ u32 fed, status;
+
+ status = RREG32_SOC15(MMHUB, hub_inst, regVM_L2_PROTECTION_FAULT_STATUS);
+ fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
+ /* reset page fault status */
+ WREG32_P(SOC15_REG_OFFSET(MMHUB, hub_inst,
+ regVM_L2_PROTECTION_FAULT_STATUS), 1, ~1);
+
+ return fed;
+}
+
const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = {
.get_fb_location = mmhub_v1_8_get_fb_location,
.init = mmhub_v1_8_init,
@@ -568,6 +582,7 @@ const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = {
.setup_vm_pt_regs = mmhub_v1_8_setup_vm_pt_regs,
.set_clockgating = mmhub_v1_8_set_clockgating,
.get_clockgating = mmhub_v1_8_get_clockgating,
+ .query_utcl2_poison_status = mmhub_v1_8_query_utcl2_poison_status,
};
static const struct amdgpu_ras_err_status_reg_entry mmhub_v1_8_ce_reg_list[] = {
@@ -706,28 +721,32 @@ static const struct amdgpu_ras_block_hw_ops mmhub_v1_8_ras_hw_ops = {
.reset_ras_error_count = mmhub_v1_8_reset_ras_error_count,
};
-static int mmhub_v1_8_aca_bank_generate_report(struct aca_handle *handle,
- struct aca_bank *bank, enum aca_error_type type,
- struct aca_bank_report *report, void *data)
+static int mmhub_v1_8_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
{
- u64 status, misc0;
+ struct aca_bank_info info;
+ u64 misc0;
int ret;
- status = bank->regs[ACA_REG_IDX_STATUS];
- if ((type == ACA_ERROR_TYPE_UE &&
- ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) ||
- (type == ACA_ERROR_TYPE_CE &&
- ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) {
-
- ret = aca_bank_info_decode(bank, &report->info);
- if (ret)
- return ret;
-
- misc0 = bank->regs[ACA_REG_IDX_MISC0];
- report->count[type] = ACA_REG__MISC0__ERRCNT(misc0);
+ ret = aca_bank_info_decode(bank, &info);
+ if (ret)
+ return ret;
+
+ misc0 = bank->regs[ACA_REG_IDX_MISC0];
+ switch (type) {
+ case ACA_SMU_TYPE_UE:
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
+ 1ULL);
+ break;
+ case ACA_SMU_TYPE_CE:
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE,
+ ACA_REG__MISC0__ERRCNT(misc0));
+ break;
+ default:
+ return -EINVAL;
}
- return 0;
+ return ret;
}
/* reference to smu driver if header file */
@@ -741,7 +760,7 @@ static int mmhub_v1_8_err_codes[] = {
};
static bool mmhub_v1_8_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
- enum aca_error_type type, void *data)
+ enum aca_smu_type type, void *data)
{
u32 instlo;
@@ -760,7 +779,7 @@ static bool mmhub_v1_8_aca_bank_is_valid(struct aca_handle *handle, struct aca_b
}
static const struct aca_bank_ops mmhub_v1_8_aca_bank_ops = {
- .aca_bank_generate_report = mmhub_v1_8_aca_bank_generate_report,
+ .aca_bank_parser = mmhub_v1_8_aca_bank_parser,
.aca_bank_is_valid = mmhub_v1_8_aca_bank_is_valid,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index a2bd2c3b1ef9..0c7275bca8f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -276,6 +276,8 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
timeout -= 10;
} while (timeout > 1);
+ dev_warn(adev->dev, "waiting IDH_FLR_NOTIFICATION_CMPL timeout\n");
+
flr_done:
atomic_set(&adev->reset_domain->in_gpu_reset, 0);
up_write(&adev->reset_domain->sem);
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 77f5b55decf9..aba00d961627 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -309,6 +309,8 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
timeout -= 10;
} while (timeout > 1);
+ dev_warn(adev->dev, "waiting IDH_FLR_NOTIFICATION_CMPL timeout\n");
+
flr_done:
atomic_set(&adev->reset_domain->in_gpu_reset, 0);
up_write(&adev->reset_domain->sem);
@@ -444,7 +446,6 @@ static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev,
amdgpu_virt_fini_data_exchange(adev);
xgpu_nv_send_access_requests_with_param(adev,
IDH_RAS_POISON, block, 0, 0);
- amdgpu_virt_init_data_exchange(adev);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 4178f4e5dad7..b281462093f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -713,6 +713,8 @@ static const struct amd_ip_funcs navi10_ih_ip_funcs = {
.set_clockgating_state = navi10_ih_set_clockgating_state,
.set_powergating_state = navi10_ih_set_powergating_state,
.get_clockgating_state = navi10_ih_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs navi10_ih_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 4d7976b77767..12e54047bf79 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -110,7 +110,7 @@ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[]
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
@@ -121,7 +121,7 @@ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn1[]
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
@@ -199,7 +199,7 @@ static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
@@ -1131,4 +1131,6 @@ static const struct amd_ip_funcs nv_common_ip_funcs = {
.set_clockgating_state = nv_common_set_clockgating_state,
.set_powergating_state = nv_common_set_powergating_state,
.get_clockgating_state = nv_common_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
index 78a95f8f370b..f08a32c18694 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
@@ -169,7 +169,8 @@ static int psp_v14_0_bootloader_load_intf_drv(struct psp_context *psp)
static int psp_v14_0_bootloader_load_dbg_drv(struct psp_context *psp)
{
- return psp_v14_0_bootloader_load_component(psp, &psp->dbg_drv, PSP_BL__LOAD_DBGDRV);
+ /* dbg_drv was renamed to had_drv in psp v14 */
+ return psp_v14_0_bootloader_load_component(psp, &psp->dbg_drv, PSP_BL__LOAD_HADDRV);
}
static int psp_v14_0_bootloader_load_ras_drv(struct psp_context *psp)
@@ -177,6 +178,10 @@ static int psp_v14_0_bootloader_load_ras_drv(struct psp_context *psp)
return psp_v14_0_bootloader_load_component(psp, &psp->ras_drv, PSP_BL__LOAD_RASDRV);
}
+static int psp_v14_0_bootloader_load_ipkeymgr_drv(struct psp_context *psp)
+{
+ return psp_v14_0_bootloader_load_component(psp, &psp->ipkeymgr_drv, PSP_BL__LOAD_IPKEYMGRDRV);
+}
static int psp_v14_0_bootloader_load_sos(struct psp_context *psp)
{
@@ -653,6 +658,7 @@ static const struct psp_funcs psp_v14_0_funcs = {
.bootloader_load_intf_drv = psp_v14_0_bootloader_load_intf_drv,
.bootloader_load_dbg_drv = psp_v14_0_bootloader_load_dbg_drv,
.bootloader_load_ras_drv = psp_v14_0_bootloader_load_ras_drv,
+ .bootloader_load_ipkeymgr_drv = psp_v14_0_bootloader_load_ipkeymgr_drv,
.bootloader_load_sos = psp_v14_0_bootloader_load_sos,
.ring_create = psp_v14_0_ring_create,
.ring_stop = psp_v14_0_ring_stop,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 07e19caf2bc1..ac8a9b9b3e52 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -1113,6 +1113,8 @@ static const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
.soft_reset = sdma_v2_4_soft_reset,
.set_clockgating_state = sdma_v2_4_set_clockgating_state,
.set_powergating_state = sdma_v2_4_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
@@ -1176,7 +1178,7 @@ static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
- * @tmz: unused
+ * @copy_flags: unused
*
* Copy GPU buffers using the DMA engine (VI).
* Used by the amdgpu ttm implementation to move pages if
@@ -1186,7 +1188,7 @@ static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
- bool tmz)
+ uint32_t copy_flags)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 2ad615be4bb3..b8ebdc4ae6f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1553,6 +1553,8 @@ static const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
.set_clockgating_state = sdma_v3_0_set_clockgating_state,
.set_powergating_state = sdma_v3_0_set_powergating_state,
.get_clockgating_state = sdma_v3_0_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
@@ -1616,7 +1618,7 @@ static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
- * @tmz: unused
+ * @copy_flags: unused
*
* Copy GPU buffers using the DMA engine (VI).
* Used by the amdgpu ttm implementation to move pages if
@@ -1626,7 +1628,7 @@ static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
- bool tmz)
+ uint32_t copy_flags)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 43775cb67ff5..101038395c3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -2021,6 +2021,9 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
DRM_DEBUG("IH: SDMA trap\n");
instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
+ if (instance < 0)
+ return instance;
+
switch (entry->ring_id) {
case 0:
amdgpu_fence_process(&adev->sdma.instance[instance].ring);
@@ -2448,7 +2451,7 @@ static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
- * @tmz: if a secure copy should be used
+ * @copy_flags: copy flags for the buffers
*
* Copy GPU buffers using the DMA engine (VEGA10/12).
* Used by the amdgpu ttm implementation to move pages if
@@ -2458,11 +2461,11 @@ static void sdma_v4_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
- bool tmz)
+ uint32_t copy_flags)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
- SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
+ SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0);
ib->ptr[ib->length_dw++] = byte_count - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 34237a1b1f2e..341b24d8320b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -368,7 +368,8 @@ static void sdma_v4_4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
- ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
+ ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0
+ << (ring->me % adev->sdma.num_inst_per_aid);
sdma_v4_4_2_wait_reg_mem(ring, 0, 1,
adev->nbio.funcs->get_hdp_flush_done_offset(adev),
@@ -1602,19 +1603,9 @@ static int sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL);
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL,
- DRAM_ECC_INT_ENABLE, 0);
- WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
- break;
- /* sdma ecc interrupt is enabled by default
- * driver doesn't need to do anything to
- * enable the interrupt */
- case AMDGPU_IRQ_STATE_ENABLE:
- default:
- break;
- }
+ sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, DRAM_ECC_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
return 0;
}
@@ -1954,7 +1945,7 @@ static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev)
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
- * @tmz: if a secure copy should be used
+ * @copy_flags: copy flags for the buffers
*
* Copy GPU buffers using the DMA engine.
* Used by the amdgpu ttm implementation to move pages if
@@ -1964,11 +1955,11 @@ static void sdma_v4_4_2_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
- bool tmz)
+ uint32_t copy_flags)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
- SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
+ SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0);
ib->ptr[ib->length_dw++] = byte_count - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
@@ -2189,35 +2180,39 @@ static const struct amdgpu_ras_block_hw_ops sdma_v4_4_2_ras_hw_ops = {
.reset_ras_error_count = sdma_v4_4_2_reset_ras_error_count,
};
-static int sdma_v4_4_2_aca_bank_generate_report(struct aca_handle *handle,
- struct aca_bank *bank, enum aca_error_type type,
- struct aca_bank_report *report, void *data)
+static int sdma_v4_4_2_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
{
- u64 status, misc0;
+ struct aca_bank_info info;
+ u64 misc0;
int ret;
- status = bank->regs[ACA_REG_IDX_STATUS];
- if ((type == ACA_ERROR_TYPE_UE &&
- ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) ||
- (type == ACA_ERROR_TYPE_CE &&
- ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) {
-
- ret = aca_bank_info_decode(bank, &report->info);
- if (ret)
- return ret;
+ ret = aca_bank_info_decode(bank, &info);
+ if (ret)
+ return ret;
- misc0 = bank->regs[ACA_REG_IDX_MISC0];
- report->count[type] = ACA_REG__MISC0__ERRCNT(misc0);
+ misc0 = bank->regs[ACA_REG_IDX_MISC0];
+ switch (type) {
+ case ACA_SMU_TYPE_UE:
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
+ 1ULL);
+ break;
+ case ACA_SMU_TYPE_CE:
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE,
+ ACA_REG__MISC0__ERRCNT(misc0));
+ break;
+ default:
+ return -EINVAL;
}
- return 0;
+ return ret;
}
/* CODE_SDMA0 - CODE_SDMA4, reference to smu driver if header file */
static int sdma_v4_4_2_err_codes[] = { 33, 34, 35, 36 };
static bool sdma_v4_4_2_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
- enum aca_error_type type, void *data)
+ enum aca_smu_type type, void *data)
{
u32 instlo;
@@ -2236,7 +2231,7 @@ static bool sdma_v4_4_2_aca_bank_is_valid(struct aca_handle *handle, struct aca_
}
static const struct aca_bank_ops sdma_v4_4_2_aca_bank_ops = {
- .aca_bank_generate_report = sdma_v4_4_2_aca_bank_generate_report,
+ .aca_bank_parser = sdma_v4_4_2_aca_bank_parser,
.aca_bank_is_valid = sdma_v4_4_2_aca_bank_is_valid,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 883e8a1b8a40..b7d33d78bce0 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -999,7 +999,8 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 20);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_device_wb_free(adev, index);
+ if (!ring->is_mes_queue)
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1805,7 +1806,7 @@ static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev)
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
- * @tmz: if a secure copy should be used
+ * @copy_flags: copy flags for the buffers
*
* Copy GPU buffers using the DMA engine (NAVI10).
* Used by the amdgpu ttm implementation to move pages if
@@ -1815,11 +1816,11 @@ static void sdma_v5_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
- bool tmz)
+ uint32_t copy_flags)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
- SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
+ SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0);
ib->ptr[ib->length_dw++] = byte_count - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 42f4bd250def..cc9e961f0078 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -280,17 +280,21 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
- ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
-
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
- SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
- SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
- amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
- amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
- amdgpu_ring_write(ring, ref_and_mask); /* reference */
- amdgpu_ring_write(ring, ref_and_mask); /* mask */
- amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
- SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
+ if (ring->me > 1) {
+ amdgpu_asic_flush_hdp(adev, ring);
+ } else {
+ ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
+
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+ SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
+ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
+ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
+ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
+ amdgpu_ring_write(ring, ref_and_mask); /* reference */
+ amdgpu_ring_write(ring, ref_and_mask); /* mask */
+ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
+ }
}
/**
@@ -835,7 +839,8 @@ static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 20);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_device_wb_free(adev, index);
+ if (!ring->is_mes_queue)
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1747,7 +1752,7 @@ static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev)
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
- * @tmz: if a secure copy should be used
+ * @copy_flags: copy flags for the buffers
*
* Copy GPU buffers using the DMA engine.
* Used by the amdgpu ttm implementation to move pages if
@@ -1757,11 +1762,11 @@ static void sdma_v5_2_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
- bool tmz)
+ uint32_t copy_flags)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
- SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
+ SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0);
ib->ptr[ib->length_dw++] = byte_count - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index 361835a61f2e..c833b6b8373b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -507,6 +507,13 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
/* set minor_ptr_update to 0 after wptr programed */
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0);
+ /* Set up sdma hang watchdog */
+ temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL));
+ /* 100ms per unit */
+ temp = REG_SET_FIELD(temp, SDMA0_WATCHDOG_CNTL, QUEUE_HANG_COUNT,
+ max(adev->usec_timeout/100000, 1));
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL), temp);
+
/* Set up RESP_MODE to non-copy addresses */
temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL));
temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
@@ -854,7 +861,8 @@ static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_device_wb_free(adev, index);
+ if (!ring->is_mes_queue)
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1567,7 +1575,7 @@ static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev)
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
- * @tmz: if a secure copy should be used
+ * @copy_flags: copy flags for the buffers
*
* Copy GPU buffers using the DMA engine.
* Used by the amdgpu ttm implementation to move pages if
@@ -1577,11 +1585,11 @@ static void sdma_v6_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
- bool tmz)
+ uint32_t copy_flags)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
- SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
+ SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0);
ib->ptr[ib->length_dw++] = byte_count - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 23e4ef4fff7c..85235470e872 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1409,9 +1409,9 @@ static int si_gpu_pci_config_reset(struct amdgpu_device *adev)
return r;
}
-static bool si_asic_supports_baco(struct amdgpu_device *adev)
+static int si_asic_supports_baco(struct amdgpu_device *adev)
{
- return false;
+ return 0;
}
static enum amd_reset_method
@@ -2706,6 +2706,8 @@ static const struct amd_ip_funcs si_common_ip_funcs = {
.soft_reset = si_common_soft_reset,
.set_clockgating_state = si_common_set_clockgating_state,
.set_powergating_state = si_common_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ip_block_version si_common_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index 9aa0e11ee673..11db5b755832 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -708,6 +708,8 @@ static const struct amd_ip_funcs si_dma_ip_funcs = {
.soft_reset = si_dma_soft_reset,
.set_clockgating_state = si_dma_set_clockgating_state,
.set_powergating_state = si_dma_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
@@ -761,7 +763,7 @@ static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
- * @tmz: is this a secure operation
+ * @copy_flags: unused
*
* Copy GPU buffers using the DMA engine (VI).
* Used by the amdgpu ttm implementation to move pages if
@@ -771,7 +773,7 @@ static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
- bool tmz)
+ uint32_t copy_flags)
{
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
1, 0, 0, byte_count);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index cada9f300a7f..5237395e4fab 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -296,6 +296,8 @@ static const struct amd_ip_funcs si_ih_ip_funcs = {
.soft_reset = si_ih_soft_reset,
.set_clockgating_state = si_ih_set_clockgating_state,
.set_powergating_state = si_ih_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs si_ih_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
index 93f6772d1b24..481217c32d85 100644
--- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
+++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
@@ -92,7 +92,7 @@ static int sienna_cichlid_mode2_suspend_ip(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = false;
}
- return r;
+ return 0;
}
static int
diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.c b/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.c
new file mode 100644
index 000000000000..2a51a70d4846
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "smuio_v14_0_2.h"
+#include "smuio/smuio_14_0_2_offset.h"
+#include "smuio/smuio_14_0_2_sh_mask.h"
+#include <linux/preempt.h>
+
+static u32 smuio_v14_0_2_get_rom_index_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(SMUIO, 0, regROM_INDEX);
+}
+
+static u32 smuio_v14_0_2_get_rom_data_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(SMUIO, 0, regROM_DATA);
+}
+
+static u64 smuio_v14_0_2_get_gpu_clock_counter(struct amdgpu_device *adev)
+{
+ u64 clock;
+ u64 clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after;
+
+ preempt_disable();
+ clock_counter_hi_pre = (u64)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
+ clock_counter_lo = (u64)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
+ /* the clock counter may be udpated during polling the counters */
+ clock_counter_hi_after = (u64)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
+ if (clock_counter_hi_pre != clock_counter_hi_after)
+ clock_counter_lo = (u64)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
+ preempt_enable();
+
+ clock = clock_counter_lo | (clock_counter_hi_after << 32ULL);
+
+ return clock;
+}
+
+const struct amdgpu_smuio_funcs smuio_v14_0_2_funcs = {
+ .get_rom_index_offset = smuio_v14_0_2_get_rom_index_offset,
+ .get_rom_data_offset = smuio_v14_0_2_get_rom_data_offset,
+ .get_gpu_clock_counter = smuio_v14_0_2_get_gpu_clock_counter,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.h b/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.h
new file mode 100644
index 000000000000..6e617f832d90
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMUIO_V14_0_2_H__
+#define __SMUIO_V14_0_2_H__
+
+#include "soc15_common.h"
+
+extern const struct amdgpu_smuio_funcs smuio_v14_0_2_funcs;
+
+#endif /* __SMUIO_V14_0_2_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index dec81ccf6240..170f02e96717 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -143,7 +143,7 @@ static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
@@ -156,7 +156,7 @@ static const struct amdgpu_video_codecs rn_video_codecs_decode =
static const struct amdgpu_video_codec_info vcn_4_0_3_video_codecs_decode_array[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
@@ -502,7 +502,7 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev)
static enum amd_reset_method
soc15_asic_reset_method(struct amdgpu_device *adev)
{
- bool baco_reset = false;
+ int baco_reset = 0;
bool connected_to_cpu = false;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
@@ -540,7 +540,7 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
*/
if (ras && adev->ras_enabled &&
adev->pm.fw_version <= 0x283400)
- baco_reset = false;
+ baco_reset = 0;
} else {
baco_reset = amdgpu_dpm_is_baco_supported(adev);
}
@@ -620,7 +620,7 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
}
}
-static bool soc15_supports_baco(struct amdgpu_device *adev)
+static int soc15_supports_baco(struct amdgpu_device *adev)
{
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(9, 0, 0):
@@ -628,13 +628,13 @@ static bool soc15_supports_baco(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_VEGA20) {
if (adev->psp.sos.fw_version >= 0x80067)
return amdgpu_dpm_is_baco_supported(adev);
- return false;
+ return 0;
} else {
return amdgpu_dpm_is_baco_supported(adev);
}
break;
default:
- return false;
+ return 0;
}
}
@@ -1501,4 +1501,6 @@ static const struct amd_ip_funcs soc15_common_ip_funcs = {
.set_clockgating_state = soc15_common_set_clockgating_state,
.set_powergating_state = soc15_common_set_powergating_state,
.get_clockgating_state= soc15_common_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index 1444b7765e4b..282584a48be0 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -88,6 +88,8 @@ struct soc15_ras_field_entry {
};
#define SOC15_REG_ENTRY(ip, inst, reg) ip##_HWIP, inst, reg##_BASE_IDX, reg
+#define SOC15_REG_ENTRY_STR(ip, inst, reg) \
+ { ip##_HWIP, inst, reg##_BASE_IDX, reg, #reg }
#define SOC15_REG_ENTRY_OFFSET(entry) (adev->reg_offset[entry.hwip][entry.inst][entry.seg] + entry.reg_offset)
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 581a3bd11481..fb6797467571 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -72,7 +72,7 @@ static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn1 = {
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn0[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
@@ -80,7 +80,7 @@ static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn1[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
@@ -457,10 +457,8 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev)
{
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
- return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
- return false;
default:
return true;
}
@@ -722,7 +720,10 @@ static int soc21_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_GFX_PG;
- adev->external_rev_id = adev->rev_id + 0x1;
+ if (adev->rev_id == 0)
+ adev->external_rev_id = 0x1;
+ else
+ adev->external_rev_id = adev->rev_id + 0x10;
break;
case IP_VERSION(11, 5, 1):
adev->cg_flags =
@@ -869,10 +870,35 @@ static int soc21_common_suspend(void *handle)
return soc21_common_hw_fini(adev);
}
+static bool soc21_need_reset_on_resume(struct amdgpu_device *adev)
+{
+ u32 sol_reg1, sol_reg2;
+
+ /* Will reset for the following suspend abort cases.
+ * 1) Only reset dGPU side.
+ * 2) S3 suspend got aborted and TOS is active.
+ */
+ if (!(adev->flags & AMD_IS_APU) && adev->in_s3 &&
+ !adev->suspend_complete) {
+ sol_reg1 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
+ msleep(100);
+ sol_reg2 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
+
+ return (sol_reg1 != sol_reg2);
+ }
+
+ return false;
+}
+
static int soc21_common_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (soc21_need_reset_on_resume(adev)) {
+ dev_info(adev->dev, "S3 suspend aborted, resetting...");
+ soc21_asic_reset(adev);
+ }
+
return soc21_common_hw_init(adev);
}
@@ -959,4 +985,6 @@ static const struct amd_ip_funcs soc21_common_ip_funcs = {
.set_clockgating_state = soc21_common_set_clockgating_state,
.set_powergating_state = soc21_common_set_powergating_state,
.get_clockgating_state = soc21_common_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
index 056d4df8fa1f..3ac56a9645eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
@@ -146,6 +146,7 @@ struct ta_ras_mca_addr {
uint32_t ch_inst;
uint32_t umc_inst;
uint32_t node_inst;
+ uint32_t socket_id;
};
struct ta_ras_phy_addr {
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 450b6e831509..24d49d813607 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -486,6 +486,8 @@ static const struct amd_ip_funcs tonga_ih_ip_funcs = {
.post_soft_reset = tonga_ih_post_soft_reset,
.set_clockgating_state = tonga_ih_set_clockgating_state,
.set_powergating_state = tonga_ih_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs tonga_ih_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index 77af4e25ff46..bfe61d86ee6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -28,27 +28,7 @@
#include "umc/umc_12_0_0_sh_mask.h"
#include "mp/mp_13_0_6_sh_mask.h"
-const uint32_t
- umc_v12_0_channel_idx_tbl[]
- [UMC_V12_0_UMC_INSTANCE_NUM]
- [UMC_V12_0_CHANNEL_INSTANCE_NUM] = {
- {{3, 7, 11, 15, 2, 6, 10, 14}, {1, 5, 9, 13, 0, 4, 8, 12},
- {19, 23, 27, 31, 18, 22, 26, 30}, {17, 21, 25, 29, 16, 20, 24, 28}},
- {{47, 43, 39, 35, 46, 42, 38, 34}, {45, 41, 37, 33, 44, 40, 36, 32},
- {63, 59, 55, 51, 62, 58, 54, 50}, {61, 57, 53, 49, 60, 56, 52, 48}},
- {{79, 75, 71, 67, 78, 74, 70, 66}, {77, 73, 69, 65, 76, 72, 68, 64},
- {95, 91, 87, 83, 94, 90, 86, 82}, {93, 89, 85, 81, 92, 88, 84, 80}},
- {{99, 103, 107, 111, 98, 102, 106, 110}, {97, 101, 105, 109, 96, 100, 104, 108},
- {115, 119, 123, 127, 114, 118, 122, 126}, {113, 117, 121, 125, 112, 116, 120, 124}}
- };
-
-/* mapping of MCA error address to normalized address */
-static const uint32_t umc_v12_0_ma2na_mapping[] = {
- 0, 5, 6, 8, 9, 14, 12, 13,
- 10, 11, 15, 16, 17, 18, 19, 20,
- 21, 22, 23, 24, 25, 26, 27, 28,
- 24, 7, 29, 30,
-};
+#define MAX_ECC_NUM_PER_RETIREMENT 32
static inline uint64_t get_umc_v12_0_reg_offset(struct amdgpu_device *adev,
uint32_t node_inst,
@@ -192,99 +172,74 @@ static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev,
umc_v12_0_reset_error_count(adev);
}
-static bool umc_v12_0_bit_wise_xor(uint32_t val)
+static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data,
+ struct ta_ras_query_address_input *addr_in)
{
- bool result = 0;
- int i;
+ uint32_t col, row, row_xor, bank, channel_index;
+ uint64_t soc_pa, retired_page, column, err_addr;
+ struct ta_ras_query_address_output addr_out;
- for (i = 0; i < 32; i++)
- result = result ^ ((val >> i) & 0x1);
+ err_addr = addr_in->ma.err_addr;
+ addr_in->addr_type = TA_RAS_MCA_TO_PA;
+ if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) {
+ dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
+ err_addr);
- return result;
-}
+ return;
+ }
+
+ soc_pa = addr_out.pa.pa;
+ bank = addr_out.pa.bank;
+ channel_index = addr_out.pa.channel_idx;
-static void umc_v12_0_mca_addr_to_pa(struct amdgpu_device *adev,
- uint64_t err_addr, uint32_t ch_inst, uint32_t umc_inst,
- uint32_t node_inst,
- struct ta_ras_query_address_output *addr_out)
-{
- uint32_t channel_index, i;
- uint64_t na, soc_pa;
- uint32_t bank_hash0, bank_hash1, bank_hash2, bank_hash3, col, row;
- uint32_t bank0, bank1, bank2, bank3, bank;
-
- bank_hash0 = (err_addr >> UMC_V12_0_MCA_B0_BIT) & 0x1ULL;
- bank_hash1 = (err_addr >> UMC_V12_0_MCA_B1_BIT) & 0x1ULL;
- bank_hash2 = (err_addr >> UMC_V12_0_MCA_B2_BIT) & 0x1ULL;
- bank_hash3 = (err_addr >> UMC_V12_0_MCA_B3_BIT) & 0x1ULL;
col = (err_addr >> 1) & 0x1fULL;
row = (err_addr >> 10) & 0x3fffULL;
+ row_xor = row ^ (0x1ULL << 13);
+ /* clear [C3 C2] in soc physical address */
+ soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT);
+ /* clear [C4] in soc physical address */
+ soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT);
+
+ /* loop for all possibilities of [C4 C3 C2] */
+ for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
+ retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
+ retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
+ /* include column bit 0 and 1 */
+ col &= 0x3;
+ col |= (column << 2);
+ dev_info(adev->dev,
+ "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
+ retired_page, row, col, bank, channel_index);
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, addr_in->ma.umc_inst);
- /* apply bank hash algorithm */
- bank0 =
- bank_hash0 ^ (UMC_V12_0_XOR_EN0 &
- (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR0) ^
- (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR0))));
- bank1 =
- bank_hash1 ^ (UMC_V12_0_XOR_EN1 &
- (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR1) ^
- (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR1))));
- bank2 =
- bank_hash2 ^ (UMC_V12_0_XOR_EN2 &
- (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR2) ^
- (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR2))));
- bank3 =
- bank_hash3 ^ (UMC_V12_0_XOR_EN3 &
- (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR3) ^
- (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR3))));
-
- bank = bank0 | (bank1 << 1) | (bank2 << 2) | (bank3 << 3);
- err_addr &= ~0x3c0ULL;
- err_addr |= (bank << UMC_V12_0_MCA_B0_BIT);
-
- na = 0x0;
- /* convert mca error address to normalized address */
- for (i = 1; i < ARRAY_SIZE(umc_v12_0_ma2na_mapping); i++)
- na |= ((err_addr >> i) & 0x1ULL) << umc_v12_0_ma2na_mapping[i];
-
- channel_index =
- adev->umc.channel_idx_tbl[node_inst * adev->umc.umc_inst_num *
- adev->umc.channel_inst_num +
- umc_inst * adev->umc.channel_inst_num +
- ch_inst];
- /* translate umc channel address to soc pa, 3 parts are included */
- soc_pa = ADDR_OF_32KB_BLOCK(na) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(na);
-
- /* the umc channel bits are not original values, they are hashed */
- UMC_V12_0_SET_CHANNEL_HASH(channel_index, soc_pa);
-
- addr_out->pa.pa = soc_pa;
- addr_out->pa.bank = bank;
- addr_out->pa.channel_idx = channel_index;
+ /* shift R13 bit */
+ retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
+ dev_info(adev->dev,
+ "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
+ retired_page, row_xor, col, bank, channel_index);
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, addr_in->ma.umc_inst);
+ }
}
-static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
- struct ras_err_data *err_data, uint64_t err_addr,
- uint32_t ch_inst, uint32_t umc_inst,
- uint32_t node_inst)
+static int umc_v12_0_convert_err_addr(struct amdgpu_device *adev,
+ struct ta_ras_query_address_input *addr_in,
+ uint64_t *pfns, int len)
{
uint32_t col, row, row_xor, bank, channel_index;
- uint64_t soc_pa, retired_page, column;
- struct ta_ras_query_address_input addr_in;
+ uint64_t soc_pa, retired_page, column, err_addr;
struct ta_ras_query_address_output addr_out;
+ uint32_t pos = 0;
- addr_in.addr_type = TA_RAS_MCA_TO_PA;
- addr_in.ma.err_addr = err_addr;
- addr_in.ma.ch_inst = ch_inst;
- addr_in.ma.umc_inst = umc_inst;
- addr_in.ma.node_inst = node_inst;
-
- if (psp_ras_query_address(&adev->psp, &addr_in, &addr_out))
- /* fallback to old path if fail to get pa from psp */
- umc_v12_0_mca_addr_to_pa(adev, err_addr, ch_inst, umc_inst,
- node_inst, &addr_out);
+ err_addr = addr_in->ma.err_addr;
+ addr_in->addr_type = TA_RAS_MCA_TO_PA;
+ if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) {
+ dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
+ err_addr);
+ return 0;
+ }
soc_pa = addr_out.pa.pa;
bank = addr_out.pa.bank;
@@ -302,33 +257,42 @@ static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
+
+ if (pos >= len)
+ return 0;
+ pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+
/* include column bit 0 and 1 */
col &= 0x3;
col |= (column << 2);
dev_info(adev->dev,
"Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
retired_page, row, col, bank, channel_index);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
/* shift R13 bit */
retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
+
+ if (pos >= len)
+ return 0;
+ pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+
dev_info(adev->dev,
"Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
retired_page, row_xor, col, bank, channel_index);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
}
+
+ return pos;
}
static int umc_v12_0_query_error_address(struct amdgpu_device *adev,
uint32_t node_inst, uint32_t umc_inst,
uint32_t ch_inst, void *data)
{
+ struct ras_err_data *err_data = (struct ras_err_data *)data;
+ struct ta_ras_query_address_input addr_in;
uint64_t mc_umc_status_addr;
uint64_t mc_umc_status, err_addr;
uint64_t mc_umc_addrt0;
- struct ras_err_data *err_data = (struct ras_err_data *)data;
uint64_t umc_reg_offset =
get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst);
@@ -357,8 +321,19 @@ static int umc_v12_0_query_error_address(struct amdgpu_device *adev,
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
- umc_v12_0_convert_error_address(adev, err_data, err_addr,
- ch_inst, umc_inst, node_inst);
+ if (!adev->aid_mask &&
+ adev->smuio.funcs &&
+ adev->smuio.funcs->get_socket_id)
+ addr_in.ma.socket_id = adev->smuio.funcs->get_socket_id(adev);
+ else
+ addr_in.ma.socket_id = 0;
+
+ addr_in.ma.err_addr = err_addr;
+ addr_in.ma.ch_inst = ch_inst;
+ addr_in.ma.umc_inst = umc_inst;
+ addr_in.ma.node_inst = node_inst;
+
+ umc_v12_0_convert_error_address(adev, err_data, &addr_in);
}
/* clear umc status */
@@ -401,13 +376,20 @@ static int umc_v12_0_err_cnt_init_per_channel(struct amdgpu_device *adev,
return 0;
}
+#ifdef TO_BE_REMOVED
static void umc_v12_0_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
+ struct ras_query_context qctx;
+
+ memset(&qctx, 0, sizeof(qctx));
+ qctx.event_id = amdgpu_ras_acquire_event_id(adev, amdgpu_ras_intr_triggered() ?
+ RAS_EVENT_TYPE_ISR : RAS_EVENT_TYPE_INVALID);
+
amdgpu_mca_smu_log_ras_error(adev,
- AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_CE, ras_error_status);
+ AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_CE, ras_error_status, &qctx);
amdgpu_mca_smu_log_ras_error(adev,
- AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_UE, ras_error_status);
+ AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_UE, ras_error_status, &qctx);
}
static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *adev,
@@ -418,12 +400,16 @@ static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *ade
struct ras_err_info *err_info;
struct ras_err_addr *mca_err_addr, *tmp;
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ struct ta_ras_query_address_input addr_in;
for_each_ras_error(err_node, err_data) {
err_info = &err_node->err_info;
if (list_empty(&err_info->err_addr_list))
continue;
+ addr_in.ma.node_inst = err_info->mcm_info.die_id;
+ addr_in.ma.socket_id = err_info->mcm_info.socket_id;
+
list_for_each_entry_safe(mca_err_addr, tmp, &err_info->err_addr_list, node) {
mc_umc_status = mca_err_addr->err_status;
if (mc_umc_status &&
@@ -439,6 +425,10 @@ static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *ade
MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
InstanceIdLo = REG_GET_FIELD(mca_ipid, MCMP1_IPIDT0, InstanceIdLo);
+ addr_in.ma.err_addr = err_addr;
+ addr_in.ma.ch_inst = MCA_IPID_LO_2_UMC_CH(InstanceIdLo);
+ addr_in.ma.umc_inst = MCA_IPID_LO_2_UMC_INST(InstanceIdLo);
+
dev_info(adev->dev, "UMC:IPID:0x%llx, aid:%d, inst:%d, ch:%d, err_addr:0x%llx\n",
mca_ipid,
err_info->mcm_info.die_id,
@@ -447,10 +437,7 @@ static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *ade
err_addr);
umc_v12_0_convert_error_address(adev,
- err_data, err_addr,
- MCA_IPID_LO_2_UMC_CH(InstanceIdLo),
- MCA_IPID_LO_2_UMC_INST(InstanceIdLo),
- err_info->mcm_info.die_id);
+ err_data, &addr_in);
}
/* Delete error address node from list and free memory */
@@ -458,6 +445,7 @@ static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *ade
}
}
}
+#endif
static bool umc_v12_0_check_ecc_err_status(struct amdgpu_device *adev,
enum amdgpu_mca_error_type type, void *ras_error_status)
@@ -498,43 +486,49 @@ const struct amdgpu_ras_block_hw_ops umc_v12_0_ras_hw_ops = {
.query_ras_error_address = umc_v12_0_query_ras_error_address,
};
-static int umc_v12_0_aca_bank_generate_report(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type,
- struct aca_bank_report *report, void *data)
+static int umc_v12_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
{
struct amdgpu_device *adev = handle->adev;
- u64 status;
+ struct aca_bank_info info;
+ enum aca_error_type err_type;
+ u64 status, count;
+ u32 ext_error_code;
int ret;
- ret = aca_bank_info_decode(bank, &report->info);
+ status = bank->regs[ACA_REG_IDX_STATUS];
+ if (umc_v12_0_is_deferred_error(adev, status))
+ err_type = ACA_ERROR_TYPE_DEFERRED;
+ else if (umc_v12_0_is_uncorrectable_error(adev, status))
+ err_type = ACA_ERROR_TYPE_UE;
+ else if (umc_v12_0_is_correctable_error(adev, status))
+ err_type = ACA_ERROR_TYPE_CE;
+ else
+ return 0;
+
+ ret = aca_bank_info_decode(bank, &info);
if (ret)
return ret;
- status = bank->regs[ACA_REG_IDX_STATUS];
- switch (type) {
- case ACA_ERROR_TYPE_UE:
- if (umc_v12_0_is_uncorrectable_error(adev, status)) {
- report->count[type] = 1;
- }
- break;
- case ACA_ERROR_TYPE_CE:
- if (umc_v12_0_is_correctable_error(adev, status)) {
- report->count[type] = 1;
- }
- break;
- default:
- return -EINVAL;
- }
+ amdgpu_umc_update_ecc_status(adev,
+ bank->regs[ACA_REG_IDX_STATUS],
+ bank->regs[ACA_REG_IDX_IPID],
+ bank->regs[ACA_REG_IDX_ADDR]);
- return 0;
+ ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status);
+ count = ext_error_code == 0 ?
+ ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL;
+
+ return aca_error_cache_log_bank_error(handle, &info, err_type, count);
}
static const struct aca_bank_ops umc_v12_0_aca_bank_ops = {
- .aca_bank_generate_report = umc_v12_0_aca_bank_generate_report,
+ .aca_bank_parser = umc_v12_0_aca_bank_parser,
};
const struct aca_info umc_v12_0_aca_info = {
.hwip = ACA_HWIP_TYPE_UMC,
- .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK,
+ .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK | ACA_ERROR_DEFERRED_MASK,
.bank_ops = &umc_v12_0_aca_bank_ops,
};
@@ -554,6 +548,152 @@ static int umc_v12_0_ras_late_init(struct amdgpu_device *adev, struct ras_common
return 0;
}
+static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
+ uint64_t status, uint64_t ipid, uint64_t addr)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ uint16_t hwid, mcatype;
+ struct ta_ras_query_address_input addr_in;
+ uint64_t page_pfn[UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL];
+ uint64_t err_addr, hash_val = 0;
+ struct ras_ecc_err *ecc_err;
+ int count;
+ int ret;
+
+ hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
+ mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType);
+
+ if ((hwid != MCA_UMC_HWID_V12_0) || (mcatype != MCA_UMC_MCATYPE_V12_0))
+ return 0;
+
+ if (!status)
+ return 0;
+
+ if (!umc_v12_0_is_deferred_error(adev, status))
+ return 0;
+
+ err_addr = REG_GET_FIELD(addr,
+ MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+
+ dev_info(adev->dev,
+ "UMC:IPID:0x%llx, socket:%llu, aid:%llu, inst:%llu, ch:%llu, err_addr:0x%llx\n",
+ ipid,
+ MCA_IPID_2_SOCKET_ID(ipid),
+ MCA_IPID_2_DIE_ID(ipid),
+ MCA_IPID_2_UMC_INST(ipid),
+ MCA_IPID_2_UMC_CH(ipid),
+ err_addr);
+
+ memset(page_pfn, 0, sizeof(page_pfn));
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ addr_in.ma.err_addr = err_addr;
+ addr_in.ma.ch_inst = MCA_IPID_2_UMC_CH(ipid);
+ addr_in.ma.umc_inst = MCA_IPID_2_UMC_INST(ipid);
+ addr_in.ma.node_inst = MCA_IPID_2_DIE_ID(ipid);
+ addr_in.ma.socket_id = MCA_IPID_2_SOCKET_ID(ipid);
+
+ count = umc_v12_0_convert_err_addr(adev,
+ &addr_in, page_pfn, ARRAY_SIZE(page_pfn));
+ if (count <= 0) {
+ dev_warn(adev->dev, "Fail to convert error address! count:%d\n", count);
+ return 0;
+ }
+
+ ret = amdgpu_umc_build_pages_hash(adev,
+ page_pfn, count, &hash_val);
+ if (ret) {
+ dev_err(adev->dev, "Fail to build error pages hash\n");
+ return ret;
+ }
+
+ ecc_err = kzalloc(sizeof(*ecc_err), GFP_KERNEL);
+ if (!ecc_err)
+ return -ENOMEM;
+
+ ecc_err->err_pages.pfn = kcalloc(count, sizeof(*ecc_err->err_pages.pfn), GFP_KERNEL);
+ if (!ecc_err->err_pages.pfn) {
+ kfree(ecc_err);
+ return -ENOMEM;
+ }
+
+ memcpy(ecc_err->err_pages.pfn, page_pfn, count * sizeof(*ecc_err->err_pages.pfn));
+ ecc_err->err_pages.count = count;
+
+ ecc_err->hash_index = hash_val;
+ ecc_err->status = status;
+ ecc_err->ipid = ipid;
+ ecc_err->addr = addr;
+
+ ret = amdgpu_umc_logs_ecc_err(adev, &con->umc_ecc_log.de_page_tree, ecc_err);
+ if (ret) {
+ if (ret == -EEXIST)
+ con->umc_ecc_log.de_updated = true;
+ else
+ dev_err(adev->dev, "Fail to log ecc error! ret:%d\n", ret);
+
+ kfree(ecc_err->err_pages.pfn);
+ kfree(ecc_err);
+ return ret;
+ }
+
+ con->umc_ecc_log.de_updated = true;
+
+ return 0;
+}
+
+static int umc_v12_0_fill_error_record(struct amdgpu_device *adev,
+ struct ras_ecc_err *ecc_err, void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ uint32_t i = 0;
+ int ret = 0;
+
+ if (!err_data || !ecc_err)
+ return -EINVAL;
+
+ for (i = 0; i < ecc_err->err_pages.count; i++) {
+ ret = amdgpu_umc_fill_error_record(err_data,
+ ecc_err->addr,
+ ecc_err->err_pages.pfn[i] << AMDGPU_GPU_PAGE_SHIFT,
+ MCA_IPID_2_UMC_CH(ecc_err->ipid),
+ MCA_IPID_2_UMC_INST(ecc_err->ipid));
+ if (ret)
+ break;
+ }
+
+ err_data->de_count++;
+
+ return ret;
+}
+
+static void umc_v12_0_query_ras_ecc_err_addr(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_ecc_err *entries[MAX_ECC_NUM_PER_RETIREMENT];
+ struct radix_tree_root *ecc_tree;
+ int new_detected, ret, i;
+
+ ecc_tree = &con->umc_ecc_log.de_page_tree;
+
+ mutex_lock(&con->umc_ecc_log.lock);
+ new_detected = radix_tree_gang_lookup_tag(ecc_tree, (void **)entries,
+ 0, ARRAY_SIZE(entries), UMC_ECC_NEW_DETECTED_TAG);
+ for (i = 0; i < new_detected; i++) {
+ if (!entries[i])
+ continue;
+
+ ret = umc_v12_0_fill_error_record(adev, entries[i], ras_error_status);
+ if (ret) {
+ dev_err(adev->dev, "Fail to fill umc error record, ret:%d\n", ret);
+ break;
+ }
+ radix_tree_tag_clear(ecc_tree, entries[i]->hash_index, UMC_ECC_NEW_DETECTED_TAG);
+ }
+ mutex_unlock(&con->umc_ecc_log.lock);
+}
+
struct amdgpu_umc_ras umc_v12_0_ras = {
.ras_block = {
.hw_ops = &umc_v12_0_ras_hw_ops,
@@ -561,8 +701,8 @@ struct amdgpu_umc_ras umc_v12_0_ras = {
},
.err_cnt_init = umc_v12_0_err_cnt_init,
.query_ras_poison_mode = umc_v12_0_query_ras_poison_mode,
- .ecc_info_query_ras_error_count = umc_v12_0_ecc_info_query_ras_error_count,
- .ecc_info_query_ras_error_address = umc_v12_0_ecc_info_query_ras_error_address,
+ .ecc_info_query_ras_error_address = umc_v12_0_query_ras_ecc_err_addr,
.check_ecc_err_status = umc_v12_0_check_ecc_err_status,
+ .update_ecc_status = umc_v12_0_update_ecc_status,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
index 5973bfb14fce..b4974793850b 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
@@ -55,83 +55,38 @@
#define UMC_V12_0_NA_MAP_PA_NUM 8
/* R13 bit shift should be considered, double the number */
#define UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL (UMC_V12_0_NA_MAP_PA_NUM * 2)
-/* bank bits in MCA error address */
-#define UMC_V12_0_MCA_B0_BIT 6
-#define UMC_V12_0_MCA_B1_BIT 7
-#define UMC_V12_0_MCA_B2_BIT 8
-#define UMC_V12_0_MCA_B3_BIT 9
+
/* column bits in SOC physical address */
#define UMC_V12_0_PA_C2_BIT 15
#define UMC_V12_0_PA_C4_BIT 21
/* row bits in SOC physical address */
#define UMC_V12_0_PA_R13_BIT 35
-/* channel index bits in SOC physical address */
-#define UMC_V12_0_PA_CH4_BIT 12
-#define UMC_V12_0_PA_CH5_BIT 13
-#define UMC_V12_0_PA_CH6_BIT 14
-
-/* bank hash settings */
-#define UMC_V12_0_XOR_EN0 1
-#define UMC_V12_0_XOR_EN1 1
-#define UMC_V12_0_XOR_EN2 1
-#define UMC_V12_0_XOR_EN3 1
-#define UMC_V12_0_COL_XOR0 0x0
-#define UMC_V12_0_COL_XOR1 0x0
-#define UMC_V12_0_COL_XOR2 0x800
-#define UMC_V12_0_COL_XOR3 0x1000
-#define UMC_V12_0_ROW_XOR0 0x11111
-#define UMC_V12_0_ROW_XOR1 0x22222
-#define UMC_V12_0_ROW_XOR2 0x4444
-#define UMC_V12_0_ROW_XOR3 0x8888
-
-/* channel hash settings */
-#define UMC_V12_0_HASH_4K 0
-#define UMC_V12_0_HASH_64K 1
-#define UMC_V12_0_HASH_2M 1
-#define UMC_V12_0_HASH_1G 1
-#define UMC_V12_0_HASH_1T 1
-
-/* XOR some bits of PA into CH4~CH6 bits (bits 12~14 of PA),
- * hash bit is only effective when related setting is enabled
- */
-#define UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) ((((channel_idx) >> 5) & 0x1) ^ \
- (((pa) >> 20) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \
- (((pa) >> 27) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \
- (((pa) >> 34) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \
- (((pa) >> 41) & 0x1ULL & UMC_V12_0_HASH_1T))
-#define UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) ((((channel_idx) >> 6) & 0x1) ^ \
- (((pa) >> 21) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \
- (((pa) >> 28) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \
- (((pa) >> 35) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \
- (((pa) >> 42) & 0x1ULL & UMC_V12_0_HASH_1T))
-#define UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) ((((channel_idx) >> 4) & 0x1) ^ \
- (((pa) >> 19) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \
- (((pa) >> 26) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \
- (((pa) >> 33) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \
- (((pa) >> 40) & 0x1ULL & UMC_V12_0_HASH_1T) ^ \
- (((pa) >> 47) & 0x1ULL & UMC_V12_0_HASH_4K))
-#define UMC_V12_0_SET_CHANNEL_HASH(channel_idx, pa) do { \
- (pa) &= ~(0x7ULL << UMC_V12_0_PA_CH4_BIT); \
- (pa) |= (UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) << UMC_V12_0_PA_CH4_BIT); \
- (pa) |= (UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) << UMC_V12_0_PA_CH5_BIT); \
- (pa) |= (UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) << UMC_V12_0_PA_CH6_BIT); \
- } while (0)
+
+#define MCA_UMC_HWID_V12_0 0x96
+#define MCA_UMC_MCATYPE_V12_0 0x0
#define MCA_IPID_LO_2_UMC_CH(_ipid_lo) (((((_ipid_lo) >> 20) & 0x1) * 4) + \
(((_ipid_lo) >> 12) & 0xF))
#define MCA_IPID_LO_2_UMC_INST(_ipid_lo) (((_ipid_lo) >> 21) & 0x7)
+#define MCA_IPID_2_DIE_ID(ipid) ((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) >> 2) & 0x03)
+
+#define MCA_IPID_2_UMC_CH(ipid) \
+ (MCA_IPID_LO_2_UMC_CH(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo)))
+
+#define MCA_IPID_2_UMC_INST(ipid) \
+ (MCA_IPID_LO_2_UMC_INST(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo)))
+
+#define MCA_IPID_2_SOCKET_ID(ipid) \
+ (((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo) & 0x1) << 2) | \
+ (REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) & 0x03))
+
bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
typedef bool (*check_error_type_func)(struct amdgpu_device *adev, uint64_t mc_umc_status);
-extern const uint32_t
- umc_v12_0_channel_idx_tbl[]
- [UMC_V12_0_UMC_INSTANCE_NUM]
- [UMC_V12_0_CHANNEL_INSTANCE_NUM];
-
extern struct amdgpu_umc_ras umc_v12_0_ras;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
index c4c77257710c..a32f87992f20 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
@@ -442,11 +442,6 @@ static void umc_v8_10_ecc_info_query_ras_error_address(struct amdgpu_device *ade
umc_v8_10_ecc_info_query_error_address, ras_error_status);
}
-static void umc_v8_10_set_eeprom_table_version(struct amdgpu_ras_eeprom_table_header *hdr)
-{
- hdr->version = RAS_TABLE_VER_V2_1;
-}
-
const struct amdgpu_ras_block_hw_ops umc_v8_10_ras_hw_ops = {
.query_ras_error_count = umc_v8_10_query_ras_error_count,
.query_ras_error_address = umc_v8_10_query_ras_error_address,
@@ -460,5 +455,4 @@ struct amdgpu_umc_ras umc_v8_10_ras = {
.query_ras_poison_mode = umc_v8_10_query_ras_poison_mode,
.ecc_info_query_ras_error_count = umc_v8_10_ecc_info_query_ras_error_count,
.ecc_info_query_ras_error_address = umc_v8_10_ecc_info_query_ras_error_address,
- .set_eeprom_table_version = umc_v8_10_set_eeprom_table_version,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c
index 8e7b763cfdb7..bd57896ab85d 100644
--- a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c
@@ -60,7 +60,7 @@ static int umsch_mm_v4_0_load_microcode(struct amdgpu_umsch_mm *umsch)
umsch->cmd_buf_curr_ptr = umsch->cmd_buf_ptr;
- if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 5)) {
+ if (amdgpu_ip_version(adev, VCN_HWIP, 0) >= IP_VERSION(4, 0, 5)) {
WREG32_SOC15(VCN, 0, regUVD_IPX_DLDO_CONFIG,
1 << UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT);
SOC15_WAIT_ON_RREG(VCN, 0, regUVD_IPX_DLDO_STATUS,
@@ -225,6 +225,8 @@ static int umsch_mm_v4_0_ring_start(struct amdgpu_umsch_mm *umsch)
WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_SIZE, ring->ring_size);
+ ring->wptr = 0;
+
data = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE);
data &= ~(VCN_RB_ENABLE__AUDIO_RB_EN_MASK);
WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, data);
@@ -248,7 +250,7 @@ static int umsch_mm_v4_0_ring_stop(struct amdgpu_umsch_mm *umsch)
data = REG_SET_FIELD(data, VCN_UMSCH_RB_DB_CTRL, EN, 0);
WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_DB_CTRL, data);
- if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 5)) {
+ if (amdgpu_ip_version(adev, VCN_HWIP, 0) >= IP_VERSION(4, 0, 5)) {
WREG32_SOC15(VCN, 0, regUVD_IPX_DLDO_CONFIG,
2 << UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT);
SOC15_WAIT_ON_RREG(VCN, 0, regUVD_IPX_DLDO_STATUS,
@@ -271,6 +273,8 @@ static int umsch_mm_v4_0_set_hw_resources(struct amdgpu_umsch_mm *umsch)
set_hw_resources.vmid_mask_mm_vcn = umsch->vmid_mask_mm_vcn;
set_hw_resources.vmid_mask_mm_vpe = umsch->vmid_mask_mm_vpe;
+ set_hw_resources.collaboration_mask_vpe =
+ adev->vpe.collaborate_mode ? 0x3 : 0x0;
set_hw_resources.engine_mask = umsch->engine_mask;
set_hw_resources.vcn0_hqd_mask[0] = umsch->vcn0_hqd_mask;
@@ -346,6 +350,7 @@ static int umsch_mm_v4_0_add_queue(struct amdgpu_umsch_mm *umsch,
add_queue.h_queue = input_ptr->h_queue;
add_queue.vm_context_cntl = input_ptr->vm_context_cntl;
add_queue.is_context_suspended = input_ptr->is_context_suspended;
+ add_queue.collaboration_mode = adev->vpe.collaborate_mode ? 1 : 0;
add_queue.api_status.api_completion_fence_addr = umsch->ring.fence_drv.gpu_addr;
add_queue.api_status.api_completion_fence_value = ++umsch->ring.fence_drv.sync_seq;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
index a6006f231c65..805d6662c88b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
@@ -819,6 +819,8 @@ static const struct amd_ip_funcs uvd_v3_1_ip_funcs = {
.soft_reset = uvd_v3_1_soft_reset,
.set_clockgating_state = uvd_v3_1_set_clockgating_state,
.set_powergating_state = uvd_v3_1_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version uvd_v3_1_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 1aa09ad7bbe3..3f19c606f4de 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -769,6 +769,8 @@ static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
.soft_reset = uvd_v4_2_soft_reset,
.set_clockgating_state = uvd_v4_2_set_clockgating_state,
.set_powergating_state = uvd_v4_2_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index f8b229b75435..efd903c21d48 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -877,6 +877,8 @@ static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
.set_clockgating_state = uvd_v5_0_set_clockgating_state,
.set_powergating_state = uvd_v5_0_set_powergating_state,
.get_clockgating_state = uvd_v5_0_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index a9a6880f44e3..495de5068455 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -1545,6 +1545,8 @@ static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
.set_clockgating_state = uvd_v6_0_set_clockgating_state,
.set_powergating_state = uvd_v6_0_set_powergating_state,
.get_clockgating_state = uvd_v6_0_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index a08e7abca423..66fada199bda 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -626,6 +626,8 @@ static const struct amd_ip_funcs vce_v2_0_ip_funcs = {
.soft_reset = vce_v2_0_soft_reset,
.set_clockgating_state = vce_v2_0_set_clockgating_state,
.set_powergating_state = vce_v2_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index f4760748d349..32517c364cf7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -913,6 +913,8 @@ static const struct amd_ip_funcs vce_v3_0_ip_funcs = {
.set_clockgating_state = vce_v3_0_set_clockgating_state,
.set_powergating_state = vce_v3_0_set_powergating_state,
.get_clockgating_state = vce_v3_0_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index aaceecd558cf..cb253bd3a2a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -1902,6 +1902,8 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
.set_clockgating_state = vcn_v1_0_set_clockgating_state,
.set_powergating_state = vcn_v1_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index e357d8cf0c01..f18fd61c435e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -2008,6 +2008,8 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v2_0_set_clockgating_state,
.set_powergating_state = vcn_v2_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 1cd8a94b0fbc..baec14bde2a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -1901,6 +1901,8 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_v2_5_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
@@ -1921,6 +1923,8 @@ static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_v2_5_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 8f82fb887e9c..6b31cf4b8aac 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -359,6 +359,7 @@ static int vcn_v3_0_hw_init(void *handle)
}
}
+ return 0;
done:
if (!r)
DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
@@ -2230,6 +2231,8 @@ static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v3_0_set_clockgating_state,
.set_powergating_state = vcn_v3_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version vcn_v3_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 832d15f7b5f6..ac1b8ead03b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -288,6 +288,7 @@ static int vcn_v4_0_hw_init(void *handle)
}
}
+ return 0;
done:
if (!r)
DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
@@ -2130,6 +2131,8 @@ static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v4_0_set_clockgating_state,
.set_powergating_state = vcn_v4_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version vcn_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 203fa988322b..2279d8fce03d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -1660,6 +1660,8 @@ static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
.set_powergating_state = vcn_v4_0_3_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index 501e53e69f2a..81fb99729f37 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -237,6 +237,7 @@ static int vcn_v4_0_5_hw_init(void *handle)
goto done;
}
+ return 0;
done:
if (!r)
DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
@@ -1752,6 +1753,8 @@ static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v4_0_5_set_clockgating_state,
.set_powergating_state = vcn_v4_0_5_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version vcn_v4_0_5_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index bc60c554eb32..851975b5ce29 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -95,7 +95,7 @@ static int vcn_v5_0_0_sw_init(void *handle)
return r;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ volatile struct amdgpu_vcn5_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -154,7 +154,7 @@ static int vcn_v5_0_0_sw_fini(void *handle)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ volatile struct amdgpu_vcn5_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -203,6 +203,7 @@ static int vcn_v5_0_0_hw_init(void *handle)
goto done;
}
+ return 0;
done:
if (!r)
DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
@@ -334,7 +335,7 @@ static void vcn_v5_0_0_mc_resume(struct amdgpu_device *adev, int inst)
upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_SIZE0,
- AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)));
}
/**
@@ -438,7 +439,7 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0),
- AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)), 0, indirect);
/* VCN global tiling registers */
WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
@@ -615,7 +616,7 @@ static void vcn_v5_0_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
*/
static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ volatile struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
@@ -712,7 +713,7 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
*/
static int vcn_v5_0_0_start(struct amdgpu_device *adev)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ volatile struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
int i, j, k, r;
@@ -893,7 +894,7 @@ static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
*/
static int vcn_v5_0_0_stop(struct amdgpu_device *adev)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ volatile struct amdgpu_vcn5_fw_shared *fw_shared;
uint32_t tmp;
int i, r = 0;
@@ -1328,6 +1329,8 @@ static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v5_0_0_set_clockgating_state,
.set_powergating_state = vcn_v5_0_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 1a98812981f4..d39c670f6220 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -897,7 +897,7 @@ static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
return r;
}
-static bool vi_asic_supports_baco(struct amdgpu_device *adev)
+static int vi_asic_supports_baco(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_FIJI:
@@ -908,14 +908,14 @@ static bool vi_asic_supports_baco(struct amdgpu_device *adev)
case CHIP_TOPAZ:
return amdgpu_dpm_is_baco_supported(adev);
default:
- return false;
+ return 0;
}
}
static enum amd_reset_method
vi_asic_reset_method(struct amdgpu_device *adev)
{
- bool baco_reset;
+ int baco_reset;
if (amdgpu_reset_method == AMD_RESET_METHOD_LEGACY ||
amdgpu_reset_method == AMD_RESET_METHOD_BACO)
@@ -935,7 +935,7 @@ vi_asic_reset_method(struct amdgpu_device *adev)
baco_reset = amdgpu_dpm_is_baco_supported(adev);
break;
default:
- baco_reset = false;
+ baco_reset = 0;
break;
}
@@ -2058,6 +2058,8 @@ static const struct amd_ip_funcs vi_common_ip_funcs = {
.set_clockgating_state = vi_common_set_clockgating_state,
.set_powergating_state = vi_common_set_powergating_state,
.get_clockgating_state = vi_common_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ip_block_version vi_common_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
index 769eb8f7bb3c..09315dd5a1ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
@@ -144,6 +144,12 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
WREG32(vpe_get_reg_offset(vpe, j, regVPEC_CNTL), ret);
}
+ /* setup collaborate mode */
+ vpe_v6_1_set_collaborate_mode(vpe, true);
+ /* setup DPM */
+ if (amdgpu_vpe_configure_dpm(vpe))
+ dev_warn(adev->dev, "VPE failed to enable DPM\n");
+
/*
* For VPE 6.1.1, still only need to add master's offset, and psp will apply it to slave as well.
* Here use instance 0 as master.
@@ -159,11 +165,7 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
adev->vpe.cmdbuf_cpu_addr[0] = f32_offset;
adev->vpe.cmdbuf_cpu_addr[1] = f32_cntl;
- amdgpu_vpe_psp_update_sram(adev);
- vpe_v6_1_set_collaborate_mode(vpe, true);
- amdgpu_vpe_configure_dpm(vpe);
-
- return 0;
+ return amdgpu_vpe_psp_update_sram(adev);
}
vpe_hdr = (const struct vpe_firmware_header_v1_0 *)adev->vpe.fw->data;
@@ -196,8 +198,6 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
}
vpe_v6_1_halt(vpe, false);
- vpe_v6_1_set_collaborate_mode(vpe, true);
- amdgpu_vpe_configure_dpm(vpe);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index dfa8c69532d4..fdf171ad4a3c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -371,6 +371,11 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
err = -EINVAL;
goto err_wptr_map_gart;
}
+ if (dev->adev != amdgpu_ttm_adev(wptr_bo->tbo.bdev)) {
+ pr_err("Queue memory allocated to wrong device\n");
+ err = -EINVAL;
+ goto err_wptr_map_gart;
+ }
err = amdgpu_amdkfd_map_gtt_bo_to_gart(wptr_bo);
if (err) {
@@ -779,8 +784,8 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
* nodes, but not more than args->num_of_nodes as that is
* the amount of memory allocated by user
*/
- pa = kzalloc((sizeof(struct kfd_process_device_apertures) *
- args->num_of_nodes), GFP_KERNEL);
+ pa = kcalloc(args->num_of_nodes, sizeof(struct kfd_process_device_apertures),
+ GFP_KERNEL);
if (!pa)
return -ENOMEM;
@@ -1139,7 +1144,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
goto err_unlock;
}
offset = dev->adev->rmmio_remap.bus_addr;
- if (!offset) {
+ if (!offset || (PAGE_SIZE > 4096)) {
err = -ENOMEM;
goto err_unlock;
}
@@ -1523,7 +1528,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
/* Find a KFD GPU device that supports the get_dmabuf_info query */
for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++)
- if (dev)
+ if (dev && !kfd_devcgroup_check_permission(dev))
break;
if (!dev)
return -EINVAL;
@@ -1545,7 +1550,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
if (xcp_id >= 0)
args->gpu_id = dmabuf_adev->kfd.dev->nodes[xcp_id]->id;
else
- args->gpu_id = dmabuf_adev->kfd.dev->nodes[0]->id;
+ args->gpu_id = dev->id;
args->flags = flags;
/* Copy metadata buffer to user mode */
@@ -2307,7 +2312,7 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
return -EINVAL;
}
offset = pdd->dev->adev->rmmio_remap.bus_addr;
- if (!offset) {
+ if (!offset || (PAGE_SIZE > 4096)) {
pr_err("amdgpu_amdkfd_get_mmio_remap_phys_addr failed\n");
return -ENOMEM;
}
@@ -3349,6 +3354,9 @@ static int kfd_mmio_mmap(struct kfd_node *dev, struct kfd_process *process,
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
+ if (PAGE_SIZE > 4096)
+ return -EINVAL;
+
address = dev->adev->rmmio_remap.bus_addr;
vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 041ec3de55e7..9596bca57212 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -435,12 +435,12 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
if (!f2g) {
if (amdgpu_ip_version(adev, GC_HWIP, 0))
- dev_err(kfd_device,
+ dev_info(kfd_device,
"GC IP %06x %s not supported in kfd\n",
amdgpu_ip_version(adev, GC_HWIP, 0),
vf ? "VF" : "");
else
- dev_err(kfd_device, "%s %s not supported in kfd\n",
+ dev_info(kfd_device, "%s %s not supported in kfd\n",
amdgpu_asic_name[adev->asic_type], vf ? "VF" : "");
return NULL;
}
@@ -960,7 +960,6 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{
struct kfd_node *node;
int i;
- int count;
if (!kfd->init_complete)
return;
@@ -968,12 +967,10 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
/* for runtime suspend, skip locking kfd */
if (!run_pm) {
mutex_lock(&kfd_processes_mutex);
- count = ++kfd_locked;
- mutex_unlock(&kfd_processes_mutex);
-
/* For first KFD device suspend all the KFD processes */
- if (count == 1)
+ if (++kfd_locked == 1)
kfd_suspend_all_processes();
+ mutex_unlock(&kfd_processes_mutex);
}
for (i = 0; i < kfd->num_nodes; i++) {
@@ -984,7 +981,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{
- int ret, count, i;
+ int ret, i;
if (!kfd->init_complete)
return 0;
@@ -998,12 +995,10 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
/* for runtime resume, skip unlocking kfd */
if (!run_pm) {
mutex_lock(&kfd_processes_mutex);
- count = --kfd_locked;
- mutex_unlock(&kfd_processes_mutex);
-
- WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
- if (count == 0)
+ if (--kfd_locked == 0)
ret = kfd_resume_all_processes();
+ WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
+ mutex_unlock(&kfd_processes_mutex);
}
return ret;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index f4d395e38683..c08b6ee25289 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1997,10 +1997,10 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
* check those fields
*/
mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
- if (mqd_mgr->read_doorbell_id(dqm->packet_mgr.priv_queue->queue->mqd)) {
- dev_err(dev, "HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n");
+ if (mqd_mgr->check_preemption_failed(mqd_mgr, dqm->packet_mgr.priv_queue->queue->mqd)) {
while (halt_if_hws_hang)
schedule();
+ kfd_hws_hang(dqm);
return -ETIME;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
index 9a06c6fb6605..8e0d0356e810 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
@@ -134,6 +134,7 @@ static void event_interrupt_poison_consumption(struct kfd_node *dev,
{
enum amdgpu_ras_block block = 0;
int old_poison, ret = -EINVAL;
+ uint32_t reset = 0;
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
if (!p)
@@ -153,6 +154,8 @@ static void event_interrupt_poison_consumption(struct kfd_node *dev,
case SOC15_IH_CLIENTID_UTCL2:
ret = kfd_dqm_evict_pasid(dev->dqm, pasid);
block = AMDGPU_RAS_BLOCK__GFX;
+ if (ret)
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
break;
case SOC15_IH_CLIENTID_SDMA0:
case SOC15_IH_CLIENTID_SDMA1:
@@ -160,6 +163,7 @@ static void event_interrupt_poison_consumption(struct kfd_node *dev,
case SOC15_IH_CLIENTID_SDMA3:
case SOC15_IH_CLIENTID_SDMA4:
block = AMDGPU_RAS_BLOCK__SDMA;
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
break;
default:
break;
@@ -170,17 +174,16 @@ static void event_interrupt_poison_consumption(struct kfd_node *dev,
/* resetting queue passes, do page retirement without gpu reset
* resetting queue fails, fallback to gpu reset solution
*/
- if (!ret) {
+ if (!ret)
dev_warn(dev->adev->dev,
"RAS poison consumption, unmap queue flow succeeded: client id %d\n",
client_id);
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false);
- } else {
+ else
dev_warn(dev->adev->dev,
"RAS poison consumption, fall back to gpu reset flow: client id %d\n",
client_id);
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true);
- }
+
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, reset);
}
static bool event_interrupt_isr_v10(struct kfd_node *dev,
@@ -339,7 +342,8 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
break;
}
kfd_signal_event_interrupt(pasid, context_id0 & 0x7fffff, 23);
- } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) {
+ } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE &&
+ KFD_DBG_EC_TYPE_IS_PACKET(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0))) {
kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_DEBUG_DOORBELL_ID(context_id0),
KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)),
@@ -367,10 +371,25 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
client_id == SOC15_IH_CLIENTID_UTCL2) {
struct kfd_vm_fault_info info = {0};
uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
+ uint32_t node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry);
+ uint32_t vmid_type = SOC15_VMID_TYPE_FROM_IH_ENTRY(ih_ring_entry);
+ int hub_inst = 0;
struct kfd_hsa_memory_exception_data exception_data;
- if (client_id == SOC15_IH_CLIENTID_UTCL2 &&
- amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev)) {
+ /* gfxhub */
+ if (!vmid_type && dev->adev->gfx.funcs->ih_node_to_logical_xcc) {
+ hub_inst = dev->adev->gfx.funcs->ih_node_to_logical_xcc(dev->adev,
+ node_id);
+ if (hub_inst < 0)
+ hub_inst = 0;
+ }
+
+ /* mmhub */
+ if (vmid_type && client_id == SOC15_IH_CLIENTID_VMC)
+ hub_inst = node_id / 4;
+
+ if (amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev,
+ hub_inst, vmid_type)) {
event_interrupt_poison_consumption(dev, pasid, client_id);
return;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
index 7e2859736a55..f524a55eee11 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
@@ -193,6 +193,7 @@ static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
{
enum amdgpu_ras_block block = 0;
int ret = -EINVAL;
+ uint32_t reset = 0;
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
if (!p)
@@ -212,10 +213,13 @@ static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
if (dev->dqm->ops.reset_queues)
ret = dev->dqm->ops.reset_queues(dev->dqm, pasid);
block = AMDGPU_RAS_BLOCK__GFX;
+ if (ret)
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
break;
case SOC21_INTSRC_SDMA_ECC:
default:
block = AMDGPU_RAS_BLOCK__GFX;
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
break;
}
@@ -223,10 +227,7 @@ static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
/* resetting queue passes, do page retirement without gpu reset
resetting queue fails, fallback to gpu reset solution */
- if (!ret)
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false);
- else
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true);
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, reset);
}
static bool event_interrupt_isr_v11(struct kfd_node *dev,
@@ -328,7 +329,8 @@ static void event_interrupt_wq_v11(struct kfd_node *dev,
/* CP */
if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
kfd_signal_event_interrupt(pasid, context_id0, 32);
- else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
+ else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE &&
+ KFD_DBG_EC_TYPE_IS_PACKET(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)))
kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_CTXID0_DOORBELL_ID(context_id0),
KFD_EC_MASK(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)),
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 91dd5e045b51..e1c21d250611 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -144,7 +144,8 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
uint16_t pasid, uint16_t client_id)
{
enum amdgpu_ras_block block = 0;
- int old_poison, ret = -EINVAL;
+ int old_poison;
+ uint32_t reset = 0;
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
if (!p)
@@ -162,8 +163,13 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
case SOC15_IH_CLIENTID_SE2SH:
case SOC15_IH_CLIENTID_SE3SH:
case SOC15_IH_CLIENTID_UTCL2:
- ret = kfd_dqm_evict_pasid(dev->dqm, pasid);
block = AMDGPU_RAS_BLOCK__GFX;
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ break;
+ case SOC15_IH_CLIENTID_VMC:
+ case SOC15_IH_CLIENTID_VMC1:
+ block = AMDGPU_RAS_BLOCK__MMHUB;
+ reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
break;
case SOC15_IH_CLIENTID_SDMA0:
case SOC15_IH_CLIENTID_SDMA1:
@@ -171,27 +177,21 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
case SOC15_IH_CLIENTID_SDMA3:
case SOC15_IH_CLIENTID_SDMA4:
block = AMDGPU_RAS_BLOCK__SDMA;
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
break;
default:
- break;
+ dev_warn(dev->adev->dev,
+ "client %d does not support poison consumption\n", client_id);
+ return;
}
kfd_signal_poison_consumed_event(dev, pasid);
- /* resetting queue passes, do page retirement without gpu reset
- * resetting queue fails, fallback to gpu reset solution
- */
- if (!ret) {
- dev_warn(dev->adev->dev,
- "RAS poison consumption, unmap queue flow succeeded: client id %d\n",
- client_id);
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false);
- } else {
- dev_warn(dev->adev->dev,
- "RAS poison consumption, fall back to gpu reset flow: client id %d\n",
- client_id);
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true);
- }
+ dev_warn(dev->adev->dev,
+ "poison is consumed by client %d, kick off gpu reset flow\n", client_id);
+
+ amdgpu_amdkfd_ras_pasid_poison_consumption_handler(dev->adev,
+ block, pasid, NULL, NULL, reset);
}
static bool context_id_expected(struct kfd_dev *dev)
@@ -388,7 +388,8 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
break;
}
kfd_signal_event_interrupt(pasid, sq_int_data, 24);
- } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) {
+ } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE &&
+ KFD_DBG_EC_TYPE_IS_PACKET(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0))) {
kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_DEBUG_DOORBELL_ID(context_id0),
KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)),
@@ -413,10 +414,25 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
client_id == SOC15_IH_CLIENTID_UTCL2) {
struct kfd_vm_fault_info info = {0};
uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
+ uint32_t node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry);
+ uint32_t vmid_type = SOC15_VMID_TYPE_FROM_IH_ENTRY(ih_ring_entry);
+ int hub_inst = 0;
struct kfd_hsa_memory_exception_data exception_data;
- if (client_id == SOC15_IH_CLIENTID_UTCL2 &&
- amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev)) {
+ /* gfxhub */
+ if (!vmid_type && dev->adev->gfx.funcs->ih_node_to_logical_xcc) {
+ hub_inst = dev->adev->gfx.funcs->ih_node_to_logical_xcc(dev->adev,
+ node_id);
+ if (hub_inst < 0)
+ hub_inst = 0;
+ }
+
+ /* mmhub */
+ if (vmid_type && client_id == SOC15_IH_CLIENTID_VMC)
+ hub_inst = node_id / 4;
+
+ if (amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev,
+ hub_inst, vmid_type)) {
event_interrupt_poison_consumption_v9(dev, pasid, client_id);
return;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index dd3c43c1ad70..9b6b6e882593 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -104,6 +104,8 @@ void kfd_interrupt_exit(struct kfd_node *node)
*/
flush_workqueue(node->ih_wq);
+ destroy_workqueue(node->ih_wq);
+
kfifo_free(&node->ih_fifo);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index bdc01ca9609a..4bcfbeac48fb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -77,7 +77,7 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
- dst_addr, num_bytes, false);
+ dst_addr, num_bytes, 0);
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
@@ -153,7 +153,7 @@ svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
}
r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE,
- NULL, &next, false, true, false);
+ NULL, &next, false, true, 0);
if (r) {
dev_err(adev->dev, "fail %d to copy memory\n", r);
goto out_unlock;
@@ -509,10 +509,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
start = start_mgr << PAGE_SHIFT;
end = (last_mgr + 1) << PAGE_SHIFT;
+ r = amdgpu_amdkfd_reserve_mem_limit(node->adev,
+ prange->npages * PAGE_SIZE,
+ KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
+ node->xcp ? node->xcp->id : 0);
+ if (r) {
+ dev_dbg(node->adev->dev, "failed to reserve VRAM, r: %ld\n", r);
+ return -ENOSPC;
+ }
+
r = svm_range_vram_node_new(node, prange, true);
if (r) {
dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r);
- return r;
+ goto out;
}
ttm_res_offset = (start_mgr - prange->start + prange->offset) << PAGE_SHIFT;
@@ -545,6 +554,11 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
svm_range_vram_node_free(prange);
}
+out:
+ amdgpu_amdkfd_unreserve_mem_limit(node->adev,
+ prange->npages * PAGE_SIZE,
+ KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
+ node->xcp ? node->xcp->id : 0);
return r < 0 ? r : 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index 050a6936ff84..8746a61a852d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -290,3 +290,21 @@ uint64_t kfd_mqd_stride(struct mqd_manager *mm,
{
return mm->mqd_size;
}
+
+bool kfd_check_hiq_mqd_doorbell_id(struct kfd_node *node, uint32_t doorbell_id,
+ uint32_t inst)
+{
+ if (doorbell_id) {
+ struct device *dev = node->adev->dev;
+
+ if (node->adev->xcp_mgr && node->adev->xcp_mgr->num_xcps > 0)
+ dev_err(dev, "XCC %d: Queue preemption failed for queue with doorbell_id: %x\n",
+ inst, doorbell_id);
+ else
+ dev_err(dev, "Queue preemption failed for queue with doorbell_id: %x\n",
+ doorbell_id);
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index e5cc697a3ca8..17cc1f25c8d0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -119,7 +119,7 @@ struct mqd_manager {
#if defined(CONFIG_DEBUG_FS)
int (*debugfs_show_mqd)(struct seq_file *m, void *data);
#endif
- uint32_t (*read_doorbell_id)(void *mqd);
+ bool (*check_preemption_failed)(struct mqd_manager *mm, void *mqd);
uint64_t (*mqd_stride)(struct mqd_manager *mm,
struct queue_properties *p);
@@ -198,4 +198,6 @@ void kfd_get_hiq_xcc_mqd(struct kfd_node *dev,
uint64_t kfd_hiq_mqd_stride(struct kfd_node *dev);
uint64_t kfd_mqd_stride(struct mqd_manager *mm,
struct queue_properties *q);
+bool kfd_check_hiq_mqd_doorbell_id(struct kfd_node *node, uint32_t doorbell_id,
+ uint32_t inst);
#endif /* KFD_MQD_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 1a4a69943c71..05f3ac2eaef9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -206,11 +206,11 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd,
q->is_active = QUEUE_IS_ACTIVE(*q);
}
-static uint32_t read_doorbell_id(void *mqd)
+static bool check_preemption_failed(struct mqd_manager *mm, void *mqd)
{
struct cik_mqd *m = (struct cik_mqd *)mqd;
- return m->queue_doorbell_id0;
+ return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0);
}
static void update_mqd(struct mqd_manager *mm, void *mqd,
@@ -423,7 +423,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
- mqd->read_doorbell_id = read_doorbell_id;
+ mqd->check_preemption_failed = check_preemption_failed;
break;
case KFD_MQD_TYPE_DIQ:
mqd->allocate_mqd = allocate_mqd;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 22cbfa1bdadd..2eff37aaf827 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -224,11 +224,11 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
q->is_active = QUEUE_IS_ACTIVE(*q);
}
-static uint32_t read_doorbell_id(void *mqd)
+static bool check_preemption_failed(struct mqd_manager *mm, void *mqd)
{
struct v10_compute_mqd *m = (struct v10_compute_mqd *)mqd;
- return m->queue_doorbell_id0;
+ return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0);
}
static int get_wave_state(struct mqd_manager *mm, void *mqd,
@@ -488,7 +488,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
- mqd->read_doorbell_id = read_doorbell_id;
+ mqd->check_preemption_failed = check_preemption_failed;
pr_debug("%s@%i\n", __func__, __LINE__);
break;
case KFD_MQD_TYPE_DIQ:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index 826bc4f6c8a7..68dbc0399c87 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -278,11 +278,11 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
q->is_active = QUEUE_IS_ACTIVE(*q);
}
-static uint32_t read_doorbell_id(void *mqd)
+static bool check_preemption_failed(struct mqd_manager *mm, void *mqd)
{
struct v11_compute_mqd *m = (struct v11_compute_mqd *)mqd;
- return m->queue_doorbell_id0;
+ return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0);
}
static int get_wave_state(struct mqd_manager *mm, void *mqd,
@@ -517,7 +517,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
- mqd->read_doorbell_id = read_doorbell_id;
+ mqd->check_preemption_failed = check_preemption_failed;
pr_debug("%s@%i\n", __func__, __LINE__);
break;
case KFD_MQD_TYPE_DIQ:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 697b6d530d12..6bddc16808d7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -316,11 +316,11 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
}
-static uint32_t read_doorbell_id(void *mqd)
+static bool check_preemption_failed(struct mqd_manager *mm, void *mqd)
{
struct v9_mqd *m = (struct v9_mqd *)mqd;
- return m->queue_doorbell_id0;
+ return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0);
}
static int get_wave_state(struct mqd_manager *mm, void *mqd,
@@ -607,6 +607,24 @@ static int destroy_hiq_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
return err;
}
+static bool check_preemption_failed_v9_4_3(struct mqd_manager *mm, void *mqd)
+{
+ uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev);
+ uint32_t xcc_mask = mm->dev->xcc_mask;
+ int inst = 0, xcc_id;
+ struct v9_mqd *m;
+ bool ret = false;
+
+ for_each_inst(xcc_id, xcc_mask) {
+ m = get_mqd(mqd + hiq_mqd_size * inst);
+ ret |= kfd_check_hiq_mqd_doorbell_id(mm->dev,
+ m->queue_doorbell_id0, inst);
+ ++inst;
+ }
+
+ return ret;
+}
+
static void get_xcc_mqd(struct kfd_mem_obj *mqd_mem_obj,
struct kfd_mem_obj *xcc_mqd_mem_obj,
uint64_t offset)
@@ -881,15 +899,16 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
- mqd->read_doorbell_id = read_doorbell_id;
if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) {
mqd->init_mqd = init_mqd_hiq_v9_4_3;
mqd->load_mqd = hiq_load_mqd_kiq_v9_4_3;
mqd->destroy_mqd = destroy_hiq_mqd_v9_4_3;
+ mqd->check_preemption_failed = check_preemption_failed_v9_4_3;
} else {
mqd->init_mqd = init_mqd_hiq;
mqd->load_mqd = kfd_hiq_load_mqd_kiq;
mqd->destroy_mqd = destroy_hiq_mqd;
+ mqd->check_preemption_failed = check_preemption_failed;
}
break;
case KFD_MQD_TYPE_DIQ:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index 3e1a574d4ea6..c1fafc502515 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -237,11 +237,11 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd,
q->is_active = QUEUE_IS_ACTIVE(*q);
}
-static uint32_t read_doorbell_id(void *mqd)
+static bool check_preemption_failed(struct mqd_manager *mm, void *mqd)
{
struct vi_mqd *m = (struct vi_mqd *)mqd;
- return m->queue_doorbell_id0;
+ return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0);
}
static void update_mqd(struct mqd_manager *mm, void *mqd,
@@ -482,7 +482,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
- mqd->read_doorbell_id = read_doorbell_id;
+ mqd->check_preemption_failed = check_preemption_failed;
break;
case KFD_MQD_TYPE_DIQ:
mqd->allocate_mqd = allocate_mqd;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 42d40560cd30..a81ef232fdef 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -1473,7 +1473,7 @@ static inline void kfd_flush_tlb(struct kfd_process_device *pdd,
static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
{
- return KFD_GC_VERSION(dev) > IP_VERSION(9, 4, 2) ||
+ return KFD_GC_VERSION(dev) >= IP_VERSION(9, 4, 2) ||
(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) ||
KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 717a60d7a4ea..451bb058cc62 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -819,9 +819,9 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
mutex_lock(&kfd_processes_mutex);
if (kfd_is_locked()) {
- mutex_unlock(&kfd_processes_mutex);
pr_debug("KFD is locked! Cannot create process");
- return ERR_PTR(-EINVAL);
+ process = ERR_PTR(-EINVAL);
+ goto out;
}
/* A prior open of /dev/kfd could have already created the process. */
@@ -829,6 +829,14 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
if (process) {
pr_debug("Process already found\n");
} else {
+ /* If the process just called exec(3), it is possible that the
+ * cleanup of the kfd_process (following the release of the mm
+ * of the old process image) is still in the cleanup work queue.
+ * Make sure to drain any job before trying to recreate any
+ * resource for this process.
+ */
+ flush_workqueue(kfd_process_wq);
+
process = create_process(thread);
if (IS_ERR(process))
goto out;
@@ -1922,6 +1930,8 @@ static int signal_eviction_fence(struct kfd_process *p)
rcu_read_lock();
ef = dma_fence_get_rcu_safe(&p->ef);
rcu_read_unlock();
+ if (!ef)
+ return -EINVAL;
ret = dma_fence_signal(ef);
dma_fence_put(ef);
@@ -1949,10 +1959,9 @@ static void evict_process_worker(struct work_struct *work)
* they are responsible stopping the queues and scheduling
* the restore work.
*/
- if (!signal_eviction_fence(p))
- queue_delayed_work(kfd_restore_wq, &p->restore_work,
- msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
- else
+ if (signal_eviction_fence(p) ||
+ mod_delayed_work(kfd_restore_wq, &p->restore_work,
+ msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
kfd_process_restore_queues(p);
pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
@@ -2011,9 +2020,9 @@ static void restore_process_worker(struct work_struct *work)
if (ret) {
pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
p->pasid, PROCESS_BACK_OFF_TIME_MS);
- ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
- msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
- WARN(!ret, "reschedule restore work failed\n");
+ if (mod_delayed_work(kfd_restore_wq, &p->restore_work,
+ msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
+ kfd_process_restore_queues(p);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index f0f7f48af413..386875e6eb96 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -3426,7 +3426,7 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
mm, KFD_MIGRATE_TRIGGER_PREFETCH);
*migrated = !r;
- return r;
+ return 0;
}
int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index c51f131eaa2f..bc9eb847ecfe 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1997,9 +1997,8 @@ int kfd_topology_add_device(struct kfd_node *gpu)
HSA_CAP_ASIC_REVISION_MASK);
dev->node_props.location_id = pci_dev_id(gpu->adev->pdev);
- /* On multi-partition nodes, node id = location_id[31:28] */
- if (gpu->kfd->num_nodes > 1)
- dev->node_props.location_id |= (dev->gpu->node_id << 28);
+ if (KFD_GC_VERSION(dev->gpu->kfd) == IP_VERSION(9, 4, 3))
+ dev->node_props.location_id |= dev->gpu->node_id;
dev->node_props.domain = pci_domain_nr(gpu->adev->pdev->bus);
dev->node_props.max_engine_clk_fcompute =
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index 92a5c5efcf92..9a5bcafbf730 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -33,6 +33,7 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/hwss
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/resource
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dsc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/optc
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dpp
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 2851719d7121..f1d67c6f4b98 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -148,6 +148,9 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
#define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB);
+#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB);
+
/* Number of bytes in PSP header for firmware. */
#define PSP_HEADER_BYTES 0x100
@@ -271,7 +274,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
u32 *vbl, u32 *position)
{
- u32 v_blank_start, v_blank_end, h_position, v_position;
+ u32 v_blank_start = 0, v_blank_end = 0, h_position = 0, v_position = 0;
struct amdgpu_crtc *acrtc = NULL;
struct dc *dc = adev->dm.dc;
@@ -845,7 +848,7 @@ static void dm_handle_hpd_work(struct work_struct *work)
*/
static void dm_dmub_outbox1_low_irq(void *interrupt_params)
{
- struct dmub_notification notify;
+ struct dmub_notification notify = {0};
struct common_irq_params *irq_params = interrupt_params;
struct amdgpu_device *adev = irq_params->adev;
struct amdgpu_display_manager *dm = &adev->dm;
@@ -1227,6 +1230,15 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
break;
}
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ hw_params.ips_sequential_ono = adev->external_rev_id > 0x10;
+ break;
+ default:
+ break;
+ }
+
status = dmub_srv_hw_init(dmub_srv, &hw_params);
if (status != DMUB_STATUS_OK) {
DRM_ERROR("Error initializing DMUB HW: %d\n", status);
@@ -1723,8 +1735,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_DISABLE_IPS)
init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
+ else
+ init_data.flags.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
- init_data.flags.disable_ips_in_vpb = 1;
+ init_data.flags.disable_ips_in_vpb = 0;
/* Enable DWB for tested platforms only */
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
@@ -2626,6 +2640,7 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
int i;
struct dc_stream_state *del_streams[MAX_PIPES];
int del_streams_count = 0;
+ struct dc_commit_streams_params params = {};
memset(del_streams, 0, sizeof(del_streams));
@@ -2652,7 +2667,9 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
goto fail;
}
- res = dc_commit_streams(dc, context->streams, context->stream_count);
+ params.streams = context->streams;
+ params.stream_count = context->stream_count;
+ res = dc_commit_streams(dc, &params);
fail:
dc_state_release(context);
@@ -2874,6 +2891,7 @@ static int dm_resume(void *handle)
struct dc_state *dc_state;
int i, r, j, ret;
bool need_hotplug = false;
+ struct dc_commit_streams_params commit_params = {};
if (dm->dc->caps.ips_support) {
dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
@@ -2923,7 +2941,9 @@ static int dm_resume(void *handle)
dc_enable_dmub_outbox(adev->dm.dc);
}
- WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
+ commit_params.streams = dc_state->streams;
+ commit_params.stream_count = dc_state->stream_count;
+ WARN_ON(!dc_commit_streams(dm->dc, &commit_params));
dm_gpureset_commit_state(dm->cached_dc_state, dm);
@@ -2940,7 +2960,7 @@ static int dm_resume(void *handle)
}
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
dc_state_release(dm_state->context);
- dm_state->context = dc_state_create(dm->dc);
+ dm_state->context = dc_state_create(dm->dc, NULL);
/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
/* Before powering on DC we need to re-initialize DMUB. */
@@ -3026,6 +3046,7 @@ static int dm_resume(void *handle)
dc_stream_release(dm_new_crtc_state->stream);
dm_new_crtc_state->stream = NULL;
}
+ dm_new_crtc_state->base.color_mgmt_changed = true;
}
for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
@@ -3044,6 +3065,10 @@ static int dm_resume(void *handle)
/* Do mst topology probing after resuming cached state*/
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type != dc_connection_mst_branch ||
aconnector->mst_root)
@@ -3096,6 +3121,8 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {
.soft_reset = dm_soft_reset,
.set_clockgating_state = dm_set_clockgating_state,
.set_powergating_state = dm_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version dm_ip_block = {
@@ -4529,15 +4556,18 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
/* Determine whether to enable Replay support by default. */
if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
- case IP_VERSION(3, 1, 4):
- case IP_VERSION(3, 1, 5):
- case IP_VERSION(3, 1, 6):
- case IP_VERSION(3, 2, 0):
- case IP_VERSION(3, 2, 1):
- case IP_VERSION(3, 5, 0):
- case IP_VERSION(3, 5, 1):
- replay_feature_enabled = true;
- break;
+/*
+ * Disabled by default due to https://gitlab.freedesktop.org/drm/amd/-/issues/3344
+ * case IP_VERSION(3, 1, 4):
+ * case IP_VERSION(3, 1, 5):
+ * case IP_VERSION(3, 1, 6):
+ * case IP_VERSION(3, 2, 0):
+ * case IP_VERSION(3, 2, 1):
+ * case IP_VERSION(3, 5, 0):
+ * case IP_VERSION(3, 5, 1):
+ * replay_feature_enabled = true;
+ * break;
+ */
default:
replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
break;
@@ -4820,9 +4850,11 @@ static int dm_init_microcode(struct amdgpu_device *adev)
fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
break;
case IP_VERSION(3, 5, 0):
- case IP_VERSION(3, 5, 1):
fw_name_dmub = FIRMWARE_DCN_35_DMUB;
break;
+ case IP_VERSION(3, 5, 1):
+ fw_name_dmub = FIRMWARE_DCN_351_DMUB;
+ break;
default:
/* ASIC doesn't support DMUB. */
return 0;
@@ -5700,8 +5732,8 @@ static void fill_stream_properties_from_drm_display_mode(
timing_out->aspect_ratio = get_aspect_ratio(mode_in);
- stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
- stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
+ stream->out_transfer_func.type = TF_TYPE_PREDEFINED;
+ stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB;
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
if (!adjust_colour_depth_from_display_info(timing_out, info) &&
drm_mode_is_420_also(info, mode_in) &&
@@ -5921,6 +5953,9 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
&aconnector->base.probed_modes :
&aconnector->base.modes;
+ if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return NULL;
+
if (aconnector->freesync_vid_base.clock != 0)
return &aconnector->freesync_vid_base;
@@ -6305,27 +6340,22 @@ create_stream_for_sink(struct drm_connector *connector,
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
- else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
- stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
- stream->signal == SIGNAL_TYPE_EDP) {
+
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ stream->signal == SIGNAL_TYPE_EDP) {
//
// should decide stream support vsc sdp colorimetry capability
// before building vsc info packet
//
- stream->use_vsc_sdp_for_colorimetry = false;
- if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- stream->use_vsc_sdp_for_colorimetry =
- aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
- } else {
- if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
- stream->use_vsc_sdp_for_colorimetry = true;
- }
- if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
+ stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
+ stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED;
+
+ if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22)
tf = TRANSFER_FUNC_GAMMA_22;
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
+ aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
- if (stream->link->psr_settings.psr_feature_enabled)
- aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
}
finish:
dc_sink_release(sink);
@@ -6792,7 +6822,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
if (!dc_plane_state)
goto cleanup;
- dc_state = dc_state_create(dc);
+ dc_state = dc_state_create(dc, NULL);
if (!dc_state)
goto cleanup;
@@ -7181,7 +7211,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
struct amdgpu_dm_connector *aconnector;
struct dm_connector_state *dm_conn_state;
int i, j, ret;
- int vcpi, pbn_div, pbn, slot_num = 0;
+ int vcpi, pbn_div, pbn = 0, slot_num = 0;
for_each_new_connector_in_state(state, connector, new_con_state, i) {
@@ -8394,13 +8424,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].surface = dc_plane;
if (new_pcrtc_state->color_mgmt_changed) {
- bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
- bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
+ bundle->surface_updates[planes_count].gamma = &dc_plane->gamma_correction;
+ bundle->surface_updates[planes_count].in_transfer_func = &dc_plane->in_transfer_func;
bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bundle->surface_updates[planes_count].hdr_mult = dc_plane->hdr_mult;
- bundle->surface_updates[planes_count].func_shaper = dc_plane->in_shaper_func;
- bundle->surface_updates[planes_count].lut3d_func = dc_plane->lut3d_func;
- bundle->surface_updates[planes_count].blend_tf = dc_plane->blend_tf;
+ bundle->surface_updates[planes_count].func_shaper = &dc_plane->in_shaper_func;
+ bundle->surface_updates[planes_count].lut3d_func = &dc_plane->lut3d_func;
+ bundle->surface_updates[planes_count].blend_tf = &dc_plane->blend_tf;
}
amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state,
@@ -8613,7 +8643,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->stream_update.output_csc_transform =
&acrtc_state->stream->csc_color_matrix;
bundle->stream_update.out_transfer_func =
- acrtc_state->stream->out_transfer_func;
+ &acrtc_state->stream->out_transfer_func;
bundle->stream_update.lut3d_func =
(struct dc_3dlut *) acrtc_state->stream->lut3d_func;
bundle->stream_update.func_shaper =
@@ -8764,10 +8794,10 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev,
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
continue;
+notify:
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
-notify:
aconnector = to_amdgpu_dm_connector(connector);
mutex_lock(&adev->dm.audio_lock);
@@ -8847,6 +8877,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
struct drm_connector *connector;
bool mode_set_reset_required = false;
u32 i;
+ struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count};
/* Disable writeback */
for_each_old_connector_in_state(state, connector, old_con_state, i) {
@@ -8983,7 +9014,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
dm_enable_per_frame_crtc_master_sync(dc_state);
mutex_lock(&dm->dc_lock);
- WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
+ WARN_ON(!dc_commit_streams(dm->dc, &params));
/* Allow idle optimization when vblank count is 0 for display off */
if (dm->active_vblank_irq_count == 0)
@@ -10587,7 +10618,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
struct drm_dp_mst_topology_mgr *mgr;
struct drm_dp_mst_topology_state *mst_state;
- struct dsc_mst_fairness_vars vars[MAX_PIPES];
+ struct dsc_mst_fairness_vars vars[MAX_PIPES] = {0};
trace_amdgpu_dm_atomic_check_begin(state);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index c87b64e464ed..ebabfe3a512f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -571,7 +571,7 @@ static int amdgpu_dm_set_atomic_regamma(struct dc_stream_state *stream,
uint32_t regamma_size, bool has_rom,
enum dc_transfer_func_predefined tf)
{
- struct dc_transfer_func *out_tf = stream->out_transfer_func;
+ struct dc_transfer_func *out_tf = &stream->out_transfer_func;
int ret = 0;
if (regamma_size || tf != TRANSFER_FUNCTION_LINEAR) {
@@ -954,8 +954,8 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
* inverse color ramp in legacy userspace.
*/
crtc->cm_is_degamma_srgb = true;
- stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
- stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
+ stream->out_transfer_func.type = TF_TYPE_DISTRIBUTED_POINTS;
+ stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB;
/*
* Note: although we pass has_rom as parameter here, we never
* actually use ROM because the color module only takes the ROM
@@ -963,7 +963,7 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
*
* See more in mod_color_calculate_regamma_params()
*/
- r = __set_legacy_tf(stream->out_transfer_func, regamma_lut,
+ r = __set_legacy_tf(&stream->out_transfer_func, regamma_lut,
regamma_size, has_rom);
if (r)
return r;
@@ -1034,7 +1034,7 @@ map_crtc_degamma_to_dc_plane(struct dm_crtc_state *crtc,
&degamma_size);
ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES);
- dc_plane_state->in_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
+ dc_plane_state->in_transfer_func.type = TF_TYPE_DISTRIBUTED_POINTS;
/*
* This case isn't fully correct, but also fairly
@@ -1061,12 +1061,12 @@ map_crtc_degamma_to_dc_plane(struct dm_crtc_state *crtc,
* map these to the atomic one instead.
*/
if (crtc->cm_is_degamma_srgb)
- dc_plane_state->in_transfer_func->tf = tf;
+ dc_plane_state->in_transfer_func.tf = tf;
else
- dc_plane_state->in_transfer_func->tf =
+ dc_plane_state->in_transfer_func.tf =
TRANSFER_FUNCTION_LINEAR;
- r = __set_input_tf(caps, dc_plane_state->in_transfer_func,
+ r = __set_input_tf(caps, &dc_plane_state->in_transfer_func,
degamma_lut, degamma_size);
if (r)
return r;
@@ -1075,12 +1075,12 @@ map_crtc_degamma_to_dc_plane(struct dm_crtc_state *crtc,
* For legacy gamma support we need the regamma input
* in linear space. Assume that the input is sRGB.
*/
- dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED;
- dc_plane_state->in_transfer_func->tf = tf;
+ dc_plane_state->in_transfer_func.type = TF_TYPE_PREDEFINED;
+ dc_plane_state->in_transfer_func.tf = tf;
if (tf != TRANSFER_FUNCTION_SRGB &&
!mod_color_calculate_degamma_params(caps,
- dc_plane_state->in_transfer_func,
+ &dc_plane_state->in_transfer_func,
NULL, false))
return -ENOMEM;
}
@@ -1114,24 +1114,24 @@ __set_dm_plane_degamma(struct drm_plane_state *plane_state,
if (!has_degamma_lut && tf == AMDGPU_TRANSFER_FUNCTION_DEFAULT)
return -EINVAL;
- dc_plane_state->in_transfer_func->tf = amdgpu_tf_to_dc_tf(tf);
+ dc_plane_state->in_transfer_func.tf = amdgpu_tf_to_dc_tf(tf);
if (has_degamma_lut) {
ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES);
- dc_plane_state->in_transfer_func->type =
+ dc_plane_state->in_transfer_func.type =
TF_TYPE_DISTRIBUTED_POINTS;
- ret = __set_input_tf(color_caps, dc_plane_state->in_transfer_func,
+ ret = __set_input_tf(color_caps, &dc_plane_state->in_transfer_func,
degamma_lut, degamma_size);
if (ret)
return ret;
} else {
- dc_plane_state->in_transfer_func->type =
+ dc_plane_state->in_transfer_func.type =
TF_TYPE_PREDEFINED;
if (!mod_color_calculate_degamma_params(color_caps,
- dc_plane_state->in_transfer_func, NULL, false))
+ &dc_plane_state->in_transfer_func, NULL, false))
return -ENOMEM;
}
return 0;
@@ -1156,11 +1156,11 @@ amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state,
lut3d = __extract_blob_lut(dm_plane_state->lut3d, &lut3d_size);
lut3d_size = lut3d != NULL ? lut3d_size : 0;
- amdgpu_dm_atomic_lut3d(lut3d, lut3d_size, dc_plane_state->lut3d_func);
+ amdgpu_dm_atomic_lut3d(lut3d, lut3d_size, &dc_plane_state->lut3d_func);
ret = amdgpu_dm_atomic_shaper_lut(shaper_lut, false,
amdgpu_tf_to_dc_tf(shaper_tf),
shaper_size,
- dc_plane_state->in_shaper_func);
+ &dc_plane_state->in_shaper_func);
if (ret) {
drm_dbg_kms(plane_state->plane->dev,
"setting plane %d shaper LUT failed.\n",
@@ -1175,7 +1175,7 @@ amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state,
ret = amdgpu_dm_atomic_blend_lut(blend_lut, false,
amdgpu_tf_to_dc_tf(blend_tf),
- blend_size, dc_plane_state->blend_tf);
+ blend_size, &dc_plane_state->blend_tf);
if (ret) {
drm_dbg_kms(plane_state->plane->dev,
"setting plane %d gamma lut failed.\n",
@@ -1221,8 +1221,8 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
color_caps = &dc_plane_state->ctx->dc->caps.color;
/* Initially, we can just bypass the DGM block. */
- dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
- dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
+ dc_plane_state->in_transfer_func.type = TF_TYPE_BYPASS;
+ dc_plane_state->in_transfer_func.tf = TRANSFER_FUNCTION_LINEAR;
/* After, we start to update values according to color props */
has_crtc_cm_degamma = (crtc->cm_has_degamma || crtc->cm_is_degamma_srgb);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index eee4945653e2..4d7a5d470b1e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1249,7 +1249,7 @@ static ssize_t dp_sdp_message_debugfs_write(struct file *f, const char __user *b
size_t size, loff_t *pos)
{
int r;
- uint8_t data[36];
+ uint8_t data[36] = {0};
struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
struct dm_crtc_state *acrtc_state;
uint32_t write_size = 36;
@@ -1495,7 +1495,9 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -1596,7 +1598,9 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -1681,7 +1685,9 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -1780,7 +1786,9 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -1865,7 +1873,9 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -1964,7 +1974,9 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -2045,7 +2057,9 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -2141,7 +2155,9 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -2220,7 +2236,9 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -2276,7 +2294,9 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -2347,7 +2367,9 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -2418,7 +2440,9 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -2936,7 +2960,7 @@ static int psr_read_residency(void *data, u64 *val)
{
struct amdgpu_dm_connector *connector = data;
struct dc_link *link = connector->dc_link;
- u32 residency;
+ u32 residency = 0;
link->dc->link_srv->edp_get_psr_residency(link, &residency);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 941e96f100f4..35733d5c5193 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -791,25 +791,12 @@ struct dsc_mst_fairness_params {
struct amdgpu_dm_connector *aconnector;
};
-static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
-{
- u8 link_coding_cap;
- uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
-
- link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
- if (link_coding_cap == DP_128b_132b_ENCODING)
- fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
-
- return fec_overhead_multiplier_x1000;
-}
-
-static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
+static int kbps_to_peak_pbn(int kbps)
{
u64 peak_kbps = kbps;
peak_kbps *= 1006;
- peak_kbps *= fec_overhead_multiplier_x1000;
- peak_kbps = div_u64(peak_kbps, 1000 * 1000);
+ peak_kbps = div_u64(peak_kbps, 1000);
return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
}
@@ -910,12 +897,11 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
int link_timeslots_used;
int fair_pbn_alloc;
int ret = 0;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled) {
initial_slack[i] =
- kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
+ kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn;
bpp_increased[i] = false;
remaining_to_increase += 1;
} else {
@@ -1011,7 +997,6 @@ static int try_disable_dsc(struct drm_atomic_state *state,
int next_index;
int remaining_to_try = 0;
int ret;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled
@@ -1041,7 +1026,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
if (next_index == -1)
break;
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@@ -1054,7 +1039,8 @@ static int try_disable_dsc(struct drm_atomic_state *state,
vars[next_index].dsc_enabled = false;
vars[next_index].bpp_x16 = 0;
} else {
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000);
+ vars[next_index].pbn = kbps_to_peak_pbn(
+ params[next_index].bw_range.max_kbps);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@@ -1083,7 +1069,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
int count = 0;
int i, k, ret;
bool debugfs_overwrite = false;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
memset(params, 0, sizeof(params));
@@ -1148,7 +1133,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
/* Try no compression */
for (i = 0; i < count; i++) {
vars[i + k].aconnector = params[i].aconnector;
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
@@ -1167,7 +1152,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
/* Try max compression */
for (i = 0; i < count; i++) {
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
vars[i + k].dsc_enabled = true;
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1175,7 +1160,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (ret < 0)
return ret;
} else {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1219,8 +1204,10 @@ static bool is_dsc_need_re_compute(
if (dc_link->type != dc_connection_mst_branch)
return false;
- if (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT ||
- dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))
+ /* add a check for older MST DSC with no virtual DPCDs */
+ if (needs_dsc_aux_workaround(dc_link) &&
+ (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT ||
+ dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)))
return false;
for (i = 0; i < MAX_PIPES; i++)
@@ -1240,7 +1227,15 @@ static bool is_dsc_need_re_compute(
continue;
aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context;
- if (!aconnector)
+ if (!aconnector || !aconnector->dsc_aux)
+ continue;
+
+ /*
+ * check if cached virtual MST DSC caps are available and DSC is supported
+ * as per specifications in their Virtual DPCD registers.
+ */
+ if (!(aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported ||
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))
continue;
stream_on_link[new_stream_on_link_num] = aconnector;
@@ -1601,7 +1596,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
struct amdgpu_dm_connector *aconnector,
struct dc_stream_state *stream)
{
- int bpp, pbn, branch_max_throughput_mps = 0;
+ int pbn, branch_max_throughput_mps = 0;
struct dc_link_settings cur_link_settings;
unsigned int end_to_end_bw_in_kbps = 0;
unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
@@ -1651,11 +1646,34 @@ enum dc_status dm_dp_mst_is_port_support_mode(
}
}
} else {
- /* check if mode could be supported within full_pbn */
- bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
- pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp << 4);
- if (pbn > aconnector->mst_output_port->full_pbn)
+ /* Check if mode could be supported within max slot
+ * number of current mst link and full_pbn of mst links.
+ */
+ int pbn_div, slot_num, max_slot_num;
+ enum dc_link_encoding_format link_encoding;
+ uint32_t stream_kbps =
+ dc_bandwidth_in_kbps_from_timing(&stream->timing,
+ dc_link_get_highest_encoding_format(stream->link));
+
+ pbn = kbps_to_peak_pbn(stream_kbps);
+ pbn_div = dm_mst_get_pbn_divider(stream->link);
+ slot_num = DIV_ROUND_UP(pbn, pbn_div);
+
+ link_encoding = dc_link_get_highest_encoding_format(stream->link);
+ if (link_encoding == DC_LINK_ENCODING_DP_8b_10b)
+ max_slot_num = 63;
+ else if (link_encoding == DC_LINK_ENCODING_DP_128b_132b)
+ max_slot_num = 64;
+ else {
+ DRM_DEBUG_DRIVER("Invalid link encoding format\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
+ }
+
+ if (slot_num > max_slot_num ||
+ pbn > aconnector->mst_output_port->full_pbn) {
+ DRM_DEBUG_DRIVER("Mode can not be supported within mst links!");
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+ }
}
/* check is mst dsc output bandwidth branch_overall_throughput_0_mps */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 37c820ab0fdb..fa84d34b7373 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -46,9 +46,6 @@
#define SYNAPTICS_CASCADED_HUB_ID 0x5A
#define IS_SYNAPTICS_CASCADED_PANAMERA(devName, data) ((IS_SYNAPTICS_PANAMERA(devName) && ((int)data[2] == SYNAPTICS_CASCADED_HUB_ID)) ? 1 : 0)
-#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
-#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000
-
enum mst_msg_ready_type {
NONE_MSG_RDY_EVENT = 0,
DOWN_REP_MSG_RDY_EVENT = 1,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index 1f08c6564c3b..bfa090432ce2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -141,9 +141,8 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
* amdgpu_dm_psr_enable() - enable psr f/w
* @stream: stream state
*
- * Return: true if success
*/
-bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
+void amdgpu_dm_psr_enable(struct dc_stream_state *stream)
{
struct dc_link *link = stream->link;
unsigned int vsync_rate_hz = 0;
@@ -190,7 +189,10 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
power_opt |= psr_power_opt_z10_static_screen;
- return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt);
+ dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt);
+
+ if (link->ctx->dc->caps.ips_support)
+ dc_allow_idle_optimizations(link->ctx->dc, true);
}
/*
@@ -210,7 +212,7 @@ bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
}
/*
- * amdgpu_dm_psr_disable() - disable psr f/w
+ * amdgpu_dm_psr_disable_all() - disable psr f/w for all streams
* if psr is enabled on any stream
*
* Return: true if success
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
index 6806b3c9c84b..1fdfd183c0d9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
@@ -32,7 +32,7 @@
#define AMDGPU_DM_PSR_ENTRY_DELAY 5
void amdgpu_dm_set_psr_caps(struct dc_link *link);
-bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
+void amdgpu_dm_psr_enable(struct dc_stream_state *stream);
bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index d9e33c6bccd9..0005f5f8f34f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -52,4 +52,12 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc
func_name, line);
}
+void dm_trace_smu_msg(uint32_t msg_id, uint32_t param_in, struct dc_context *ctx)
+{
+}
+
+void dm_trace_smu_delay(uint32_t delay, struct dc_context *ctx)
+{
+}
+
/**** power component interfaces ****/
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
index 16e72d623630..08c494a7a21b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
@@ -76,10 +76,8 @@ static int amdgpu_dm_wb_encoder_atomic_check(struct drm_encoder *encoder,
static int amdgpu_dm_wb_connector_get_modes(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
-
- return drm_add_modes_noedid(connector, dev->mode_config.max_width,
- dev->mode_config.max_height);
+ /* Maximum resolution supported by DWB */
+ return drm_add_modes_noedid(connector, 3840, 2160);
}
static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector,
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 7991ae468f75..4e9fb1742877 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -22,7 +22,7 @@
#
# Makefile for Display Core (dc) component.
-DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc resource optc
+DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc resource optc dpp
ifdef CONFIG_DRM_AMD_DC_FP
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 6450853fea94..bc16db69a663 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -44,8 +44,6 @@
#include "bios_parser_common.h"
-#include "dc.h"
-
#define THREE_PERCENT_OF_10000 300
#define LAST_RECORD_TYPE 0xff
@@ -1731,6 +1729,7 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
return 0;
}
+
/**
* get_ss_entry_number_from_internal_ss_info_tbl_V3_1
* Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table of
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 05f392501c0a..9fe0020bcb9c 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1594,8 +1594,6 @@ static bool bios_parser_is_device_id_supported(
return (le16_to_cpu(bp->object_info_tbl.v1_5->supporteddevices) & mask) != 0;
break;
}
-
- return false;
}
static uint32_t bios_parser_get_ss_entry_number(
@@ -2948,6 +2946,7 @@ static enum bp_result construct_integrated_info(
result = get_integrated_info_v2_1(bp, info);
break;
case 2:
+ case 3:
result = get_integrated_info_v2_2(bp, info);
break;
default:
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 86f9198e7501..2bcae0643e61 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -399,7 +399,7 @@ static enum bp_result transmitter_control_v1_6(
static void init_transmitter_control(struct bios_parser *bp)
{
uint8_t frev;
- uint8_t crev;
+ uint8_t crev = 0;
if (BIOS_CMD_TABLE_REVISION(UNIPHYTransmitterControl,
frev, crev) == false)
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index cbae1be7b009..cc000833d300 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -225,7 +225,7 @@ static enum bp_result transmitter_control_fallback(
static void init_transmitter_control(struct bios_parser *bp)
{
uint8_t frev;
- uint8_t crev;
+ uint8_t crev = 0;
BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 9f0f25aee426..a2b4ff2cff16 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -272,7 +272,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
}
- if (asic_id.chip_id == DEVICE_ID_NV_13FE) {
+ if (ctx->dce_version == DCN_VERSION_2_01) {
dcn201_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
}
@@ -329,15 +329,14 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
}
break;
case AMDGPU_FAMILY_GC_11_0_0: {
- struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
-
- if (clk_mgr == NULL) {
- BREAK_TO_DEBUGGER();
- return NULL;
- }
+ struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
- dcn32_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
- return &clk_mgr->base;
+ if (clk_mgr == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+ dcn32_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+ return &clk_mgr->base;
}
case AMDGPU_FAMILY_GC_11_0_1: {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index b77804cfde0f..2a5dd3a296b2 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -131,8 +131,8 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
int dprefclk_wdivider;
int dprefclk_src_sel;
- int dp_ref_clk_khz;
- int target_div = 600000;
+ int dp_ref_clk_khz = 600000;
+ int target_div;
/* ASSERT DP Reference Clock source is from DFS*/
REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
index 2a74e2d74909..369421e46c52 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
@@ -23,7 +23,6 @@
*
*/
-#include "reg_helper.h"
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "rv1_clk_mgr.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
index 89b79dd39628..19897fa52e7e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
@@ -26,7 +26,6 @@
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "reg_helper.h"
-#include <linux/delay.h>
#include "rv1_clk_mgr_vbios_smu.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 5ee87965a078..bb4f3bd7532e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -503,7 +503,7 @@ static void dcn2_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct dc
clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
- for (i = 0; i < MAX_PIPES * 2; i++) {
+ for (i = 0; i < MAX_LINKS; i++) {
if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req)
max_phyclk_req = clk_mgr->cur_phyclk_req_table[i];
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
index 9c90090e7351..f77840dd051e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
@@ -100,7 +100,15 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base,
if (clk_mgr_base->clks.dispclk_khz == 0 ||
dc->debug.force_clock_mode & 0x1) {
+ /* this is from resume or boot up, if forced_clock cfg option
+ * used, we bypass program dispclk and DPPCLK, but need set them
+ * for S3.
+ */
+
force_reset = true;
+ /* force_clock_mode 0x1: force reset the clock even it is the
+ * same clock as long as it is in Passive level.
+ */
dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
}
@@ -150,11 +158,14 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base,
if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
if (dpp_clock_lowered) {
+ // if clock is being lowered, increase DTO before lowering refclk
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
dcn20_update_clocks_update_dentist(clk_mgr, context);
} else {
+ // if clock is being raised, increase refclk before lowering DTO
if (update_dppclk || update_dispclk)
dcn20_update_clocks_update_dentist(clk_mgr, context);
+ // always update dtos unless clock is lowered and not safe to lower
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index e3e1940198a9..5ef0879f6ad9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -548,7 +548,7 @@ static void rn_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct dc_l
clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
- for (i = 0; i < MAX_PIPES * 2; i++) {
+ for (i = 0; i < MAX_LINKS; i++) {
if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req)
max_phyclk_req = clk_mgr->cur_phyclk_req_table[i];
}
@@ -642,7 +642,8 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params
j = -1;
- ASSERT(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL);
+ static_assert(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL,
+ "number of reported FCLK DPM levels exceed maximum");
/* Find lowest DPM, FCLK is filled in reverse order*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 3271c8c7905d..8083a553c60e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -474,7 +474,7 @@ static void dcn30_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct d
clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
- for (i = 0; i < MAX_PIPES * 2; i++) {
+ for (i = 0; i < MAX_LINKS; i++) {
if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req)
max_phyclk_req = clk_mgr->cur_phyclk_req_table[i];
}
@@ -560,11 +560,19 @@ void dcn3_clk_mgr_construct(
dce_clock_read_ss_info(clk_mgr);
clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
+ if (!clk_mgr->base.bw_params) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
/* need physical address of table to give to PMFW */
clk_mgr->wm_range_table = dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx,
DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t),
&clk_mgr->wm_range_table_addr);
+ if (!clk_mgr->wm_range_table) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
}
void dcn3_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
index bdbf18306698..3253115a153d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
@@ -23,7 +23,6 @@
*
*/
-#include <linux/delay.h>
#include "dcn30_clk_mgr_smu_msg.h"
#include "clk_mgr_internal.h"
@@ -54,6 +53,7 @@
*/
static uint32_t dcn30_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries)
{
+ const uint32_t initial_max_retries = max_retries;
uint32_t reg = 0;
do {
@@ -69,7 +69,7 @@ static uint32_t dcn30_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un
/* handle DALSMC_Result_CmdRejectedBusy? */
- /* Log? */
+ TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
return reg;
}
@@ -89,6 +89,8 @@ static bool dcn30_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uint
/* Trigger the message transaction by writing the message ID */
REG_WRITE(DAL_MSG_REG, msg_id);
+ TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx);
+
result = dcn30_smu_wait_for_response(clk_mgr, 10, 200000);
if (IS_SMU_TIMEOUT(result)) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index aa9fd1dc550a..191d8b969d19 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -566,7 +566,8 @@ static void vg_clk_mgr_helper_populate_bw_params(
j = -1;
- ASSERT(VG_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL);
+ static_assert(VG_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL,
+ "number of reported FCLK DPM levels exceeds maximum");
/* Find lowest DPM, FCLK is filled in reverse order*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index ce1386e22576..12a7752758b8 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -562,7 +562,8 @@ static void dcn31_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
j = -1;
- ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL);
+ static_assert(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL,
+ "number of reported pstate levels exceeds maximum");
/* Find lowest DPM, FCLK is filled in reverse order*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
index 6904e95113c1..f201628e4e98 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
@@ -23,7 +23,6 @@
*
*/
-#include <linux/delay.h>
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "reg_helper.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
index 047d19ea919c..78ca1e5c5e9e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
@@ -37,34 +37,34 @@ typedef enum {
} WCK_RATIO_e;
typedef struct {
- uint32_t FClk;
- uint32_t MemClk;
- uint32_t Voltage;
- uint8_t WckRatio;
- uint8_t Spare[3];
+ uint32_t FClk;
+ uint32_t MemClk;
+ uint32_t Voltage;
+ uint8_t WckRatio;
+ uint8_t Spare[3];
} DfPstateTable314_t;
//Freq in MHz
//Voltage in milli volts with 2 fractional bits
typedef struct {
- uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
- uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
- uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
- uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
- uint32_t VClocks[NUM_VCN_DPM_LEVELS];
- uint32_t DClocks[NUM_VCN_DPM_LEVELS];
- uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
- DfPstateTable314_t DfPstateTable[NUM_DF_PSTATE_LEVELS];
+ uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
+ uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
+ uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
+ uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
+ uint32_t VClocks[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks[NUM_VCN_DPM_LEVELS];
+ uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
+ DfPstateTable314_t DfPstateTable[NUM_DF_PSTATE_LEVELS];
- uint8_t NumDcfClkLevelsEnabled;
- uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
- uint8_t NumSocClkLevelsEnabled;
- uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk
- uint8_t NumDfPstatesEnabled;
- uint8_t spare[3];
+ uint8_t NumDcfClkLevelsEnabled;
+ uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
+ uint8_t NumSocClkLevelsEnabled;
+ uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk
+ uint8_t NumDfPstatesEnabled;
+ uint8_t spare[3];
- uint32_t MinGfxClk;
- uint32_t MaxGfxClk;
+ uint32_t MinGfxClk;
+ uint32_t MaxGfxClk;
} DpmClocks314_t;
struct dcn314_watermarks {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index 644da4637320..5506cf9b3672 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -145,6 +145,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
*/
clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
if (safe_to_lower) {
+ if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
+ dcn315_smu_set_dtbclk(clk_mgr, false);
+ clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
+ }
/* check that we're not already in lower */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
display_count = dcn315_get_active_display_cnt_wa(dc, context);
@@ -160,6 +164,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
}
}
} else {
+ if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
+ dcn315_smu_set_dtbclk(clk_mgr, true);
+ clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
+ }
/* check that we're not already in D0 */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) {
union display_idle_optimization_u idle_info = { 0 };
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
index 879f1494c4cd..2d14346b680e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
@@ -29,6 +29,7 @@
#include "dm_helpers.h"
#include "dcn315_smu.h"
#include "mp/mp_13_0_5_offset.h"
+#include "logger_types.h"
#define MAX_INSTANCE 6
#define MAX_SEGMENT 6
@@ -69,7 +70,6 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D
#define REG_NBIO(reg_name) \
(NBIO_BASE.instance[0].segment[regBIF_BX_PF2_ ## reg_name ## _BASE_IDX] + regBIF_BX_PF2_ ## reg_name)
-#include "logger_types.h"
#undef DC_LOGGER
#define DC_LOGGER \
CTX->logger
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 12f3e8aa46d8..20ca7afa9cb4 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -99,20 +99,25 @@ static int dcn316_get_active_display_cnt_wa(
return display_count;
}
-static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
+static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
+ bool safe_to_lower, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
- struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe = safe_to_lower
+ ? &context->res_ctx.pipe_ctx[i]
+ : &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
- dc_is_virtual_signal(pipe->stream->signal))) {
+ if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
+ !pipe->stream->link_enc)) {
if (disable) {
- pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+ if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
+ pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+
reset_sync_context_for_pipe(dc, context, i);
} else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
@@ -207,11 +212,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- dcn316_disable_otg_wa(clk_mgr_base, context, true);
+ dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- dcn316_disable_otg_wa(clk_mgr_base, context, false);
+ dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;
}
@@ -480,7 +485,8 @@ static void dcn316_clk_mgr_helper_populate_bw_params(
j = -1;
- ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL);
+ static_assert(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL,
+ "number of reported pstate levels exceeds maximum");
/* Find lowest DPM, FCLK is filled in reverse order*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index bec252e1dd27..ff5fdc7b1198 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -29,6 +29,7 @@
#include "dcn20/dcn20_clk_mgr.h"
#include "dce100/dce_clk_mgr.h"
#include "dcn31/dcn31_clk_mgr.h"
+#include "dcn32/dcn32_clk_mgr.h"
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
@@ -40,7 +41,6 @@
#include "dcn/dcn_3_2_0_offset.h"
#include "dcn/dcn_3_2_0_sh_mask.h"
-#include "dcn32/dcn32_clk_mgr.h"
#include "dml/dcn32/dcn32_fpu.h"
#define DCN_BASE__INST0_SEG1 0x000000C0
@@ -712,8 +712,12 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
* since we calculate mode support based on softmax being the max UCLK
* frequency.
*/
- dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
+ if (dc->debug.disable_dc_mode_overwrite) {
+ dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz);
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz);
+ } else
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
+ dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
} else {
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz);
}
@@ -746,8 +750,13 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
/* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
if (clk_mgr_base->clks.p_state_change_support &&
(update_uclk || !clk_mgr_base->clks.prev_p_state_change_support) &&
- !dc->work_arounds.clock_update_disable_mask.uclk)
+ !dc->work_arounds.clock_update_disable_mask.uclk) {
+ if (dc->clk_mgr->dc_mode_softmax_enabled && dc->debug.disable_dc_mode_overwrite)
+ dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK,
+ max((int)dc->clk_mgr->bw_params->dc_mode_softmax_memclk, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz)));
+
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
+ }
if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
clk_mgr_base->clks.num_ways > new_clocks->num_ways) {
@@ -829,7 +838,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
dmcu->funcs->set_psr_wait_loop(dmcu,
clk_mgr_base->clks.dispclk_khz / 1000 / 7);
- if (dc->config.enable_auto_dpm_test_logs && safe_to_lower) {
+ if (dc->config.enable_auto_dpm_test_logs) {
dcn32_auto_dpm_test_log(new_clocks, clk_mgr, context);
}
}
@@ -1199,11 +1208,19 @@ void dcn32_clk_mgr_construct(
clk_mgr->smu_present = false;
clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
+ if (!clk_mgr->base.bw_params) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
/* need physical address of table to give to PMFW */
clk_mgr->wm_range_table = dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx,
DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t),
&clk_mgr->wm_range_table_addr);
+ if (!clk_mgr->wm_range_table) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
}
void dcn32_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
index df244b175fdb..f2f60478b1a6 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
@@ -49,6 +49,7 @@
*/
static uint32_t dcn32_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries)
{
+ const uint32_t initial_max_retries = max_retries;
uint32_t reg = 0;
do {
@@ -62,6 +63,8 @@ static uint32_t dcn32_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un
udelay(delay_us);
} while (max_retries--);
+ TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
+
return reg;
}
@@ -79,6 +82,8 @@ static bool dcn32_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uint
/* Trigger the message transaction by writing the message ID */
REG_WRITE(DAL_MSG_REG, msg_id);
+ TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx);
+
/* Wait for response */
if (dcn32_smu_wait_for_response(clk_mgr, 10, 200000) == DALSMC_Result_OK) {
if (param_out)
@@ -115,6 +120,8 @@ static uint32_t dcn32_smu_wait_for_response_delay(struct clk_mgr_internal *clk_m
*total_delay_us += delay_us;
} while (max_retries--);
+ TRACE_SMU_DELAY(*total_delay_us, clk_mgr->base.ctx);
+
return reg;
}
@@ -135,6 +142,8 @@ static bool dcn32_smu_send_msg_with_param_delay(struct clk_mgr_internal *clk_mgr
/* Trigger the message transaction by writing the message ID */
REG_WRITE(DAL_MSG_REG, msg_id);
+ TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx);
+
/* Wait for response */
if (dcn32_smu_wait_for_response_delay(clk_mgr, 10, 200000, &delay2_us) == DALSMC_Result_OK) {
if (param_out)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h
index c76352a817de..5c44ab0e8667 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h
@@ -37,10 +37,9 @@
#define DALSMC_Result_OK 0x1
void dcn32_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool enable);
-void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
-void dcn32_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr);
void dcn32_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways);
void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
+void dcn32_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr);
unsigned int dcn32_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz);
void dcn32_smu_wait_for_dmub_ack_mclk(struct clk_mgr_internal *clk_mgr, bool enable);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index c378b879c76d..6c9b4e6491a5 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -73,6 +73,14 @@
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
+#define regCLK5_0_CLK5_spll_field_8 0x464b
+#define regCLK5_0_CLK5_spll_field_8_BASE_IDX 0
+
+#define CLK5_0_CLK5_spll_field_8__spll_ssc_en__SHIFT 0xd
+#define CLK5_0_CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
+
+#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
+
#define REG(reg_name) \
(ctx->clk_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
@@ -244,7 +252,8 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
- dcn35_smu_set_dtbclk(clk_mgr, false);
+ if (clk_mgr->base.ctx->dc->config.allow_0_dtb_clk)
+ dcn35_smu_set_dtbclk(clk_mgr, false);
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
}
/* check that we're not already in lower */
@@ -409,11 +418,25 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs
{
}
+static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dc_context *ctx = clk_mgr->base.ctx;
+ uint32_t ssc_enable;
+
+ REG_GET(CLK5_0_CLK5_spll_field_8, spll_ssc_en, &ssc_enable);
+
+ return ssc_enable == 1;
+}
+
static void init_clk_states(struct clk_mgr *clk_mgr)
{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+ if (clk_mgr_int->smu_ver >= SMU_VER_THRESHOLD)
+ clk_mgr->clks.dtbclk_en = true; // request DTBCLK disable on first commit
clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
clk_mgr->clks.p_state_change_support = true;
clk_mgr->clks.prev_p_state_change_support = true;
@@ -423,7 +446,16 @@ static void init_clk_states(struct clk_mgr *clk_mgr)
void dcn35_init_clocks(struct clk_mgr *clk_mgr)
{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
init_clk_states(clk_mgr);
+
+ // to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk
+ if (dcn35_is_spll_ssc_enabled(clk_mgr))
+ clk_mgr->dp_dto_source_clock_in_khz =
+ dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz);
+ else
+ clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
+
}
static struct clk_bw_params dcn35_bw_params = {
.vram_type = Ddr4MemType,
@@ -512,6 +544,28 @@ static DpmClocks_t_dcn35 dummy_clocks;
static struct dcn35_watermarks dummy_wms = { 0 };
+static struct dcn35_ss_info_table ss_info_table = {
+ .ss_divider = 1000,
+ .ss_percentage = {0, 0, 375, 375, 375}
+};
+
+static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
+{
+ struct dc_context *ctx = clk_mgr->base.ctx;
+ uint32_t clock_source;
+
+ REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
+ // If it's DFS mode, clock_source is 0.
+ if (dcn35_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) {
+ clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
+
+ if (clk_mgr->dprefclk_ss_percentage != 0) {
+ clk_mgr->ss_on_dprefclk = true;
+ clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider;
+ }
+ }
+}
+
static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn35_watermarks *table)
{
int i, num_valid_sets;
@@ -709,7 +763,7 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
clock_table->NumFclkLevelsEnabled;
max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, num_fclk);
- num_dcfclk = (clock_table->NumFclkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS :
+ num_dcfclk = (clock_table->NumDcfClkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS :
clock_table->NumDcfClkLevelsEnabled;
for (i = 0; i < num_dcfclk; i++) {
int j;
@@ -836,35 +890,6 @@ static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base)
}
}
-static void dcn35_set_ips_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle)
-{
- struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- struct dc *dc = clk_mgr_base->ctx->dc;
- uint32_t val = dcn35_smu_read_ips_scratch(clk_mgr);
-
- if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
- dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
- val = val & ~DMUB_IPS1_ALLOW_MASK;
- val = val & ~DMUB_IPS2_ALLOW_MASK;
- } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
- val |= DMUB_IPS1_ALLOW_MASK;
- val |= DMUB_IPS2_ALLOW_MASK;
- } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
- val = val & ~DMUB_IPS1_ALLOW_MASK;
- val |= DMUB_IPS2_ALLOW_MASK;
- } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
- val = val & ~DMUB_IPS1_ALLOW_MASK;
- val = val & ~DMUB_IPS2_ALLOW_MASK;
- }
-
- if (!allow_idle) {
- val |= DMUB_IPS1_ALLOW_MASK;
- val |= DMUB_IPS2_ALLOW_MASK;
- }
-
- dcn35_smu_write_ips_scratch(clk_mgr, val);
-}
-
static void dcn35_exit_low_power_state(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
@@ -884,13 +909,6 @@ static bool dcn35_is_ips_supported(struct clk_mgr *clk_mgr_base)
return ips_supported;
}
-static uint32_t dcn35_get_ips_idle_state(struct clk_mgr *clk_mgr_base)
-{
- struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
-
- return dcn35_smu_read_ips_scratch(clk_mgr);
-}
-
static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr)
{
init_clk_states(clk_mgr);
@@ -978,8 +996,6 @@ static struct clk_mgr_funcs dcn35_funcs = {
.set_low_power_state = dcn35_set_low_power_state,
.exit_low_power_state = dcn35_exit_low_power_state,
.is_ips_supported = dcn35_is_ips_supported,
- .set_idle_state = dcn35_set_ips_idle_state,
- .get_idle_state = dcn35_get_ips_idle_state
};
struct clk_mgr_funcs dcn35_fpga_funcs = {
@@ -1056,6 +1072,8 @@ void dcn35_clk_mgr_construct(
dce_clock_read_ss_info(&clk_mgr->base);
/*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/
+ dcn35_read_ss_info_from_lut(&clk_mgr->base);
+
clk_mgr->base.base.bw_params = &dcn35_bw_params;
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
index 9e588c56c570..1399b41dfd1c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
@@ -487,24 +487,3 @@ int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr)
//smu_print("%s: VBIOSSMC_MSG_QueryIPS2Support return = %x\n", __func__, retv);
return retv;
}
-
-void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param)
-{
- if (!clk_mgr->smu_present)
- return;
-
- REG_WRITE(MP1_SMN_C2PMSG_71, param);
- //smu_print("%s: write_ips_scratch = %x\n", __func__, param);
-}
-
-uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr)
-{
- uint32_t retv;
-
- if (!clk_mgr->smu_present)
- return 0;
-
- retv = REG_READ(MP1_SMN_C2PMSG_71);
- //smu_print("%s: dcn35_smu_read_ips_scratch = %x\n", __func__, retv);
- return retv;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h
index 2b8e6959a03d..06cd3cc6d36e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h
@@ -198,6 +198,4 @@ int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr);
int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr);
int dcn35_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr);
int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr);
-void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param);
-uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr);
#endif /* DAL_DC_35_SMU_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index e7dc128f6284..236876d95185 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -36,6 +36,7 @@
#include "resource.h"
#include "dc_state.h"
#include "dc_state_priv.h"
+#include "dc_plane_priv.h"
#include "gpio_service_interface.h"
#include "clk_mgr.h"
@@ -212,7 +213,8 @@ static bool create_links(
connectors_num,
num_virtual_links);
- for (i = 0; i < connectors_num; i++) {
+ // condition loop on link_count to allow skipping invalid indices
+ for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) {
struct link_init_data link_init_params = {0};
struct dc_link *link;
@@ -386,6 +388,30 @@ static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
*perf_trace = NULL;
}
+static bool set_long_vtotal(struct dc *dc, struct dc_stream_state *stream, struct dc_crtc_timing_adjust *adjust)
+{
+ if (!dc || !stream || !adjust)
+ return false;
+
+ if (!dc->current_state)
+ return false;
+
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.tg) {
+ if (dc->hwss.set_long_vtotal)
+ dc->hwss.set_long_vtotal(&pipe, 1, adjust->v_total_min, adjust->v_total_max);
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
/**
* dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR
* @dc: dc reference
@@ -420,6 +446,15 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
stream->adjust.v_total_mid = adjust->v_total_mid;
stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
stream->adjust.v_total_min = adjust->v_total_min;
+ stream->adjust.allow_otg_v_count_halt = adjust->allow_otg_v_count_halt;
+
+ if (dc->caps.max_v_total != 0 &&
+ (adjust->v_total_max > dc->caps.max_v_total || adjust->v_total_min > dc->caps.max_v_total)) {
+ if (adjust->allow_otg_v_count_halt)
+ return set_long_vtotal(dc, stream, adjust);
+ else
+ return false;
+ }
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -1055,8 +1090,7 @@ static bool dc_construct(struct dc *dc,
* is initialized in dc_create_resource_pool because
* on creation it copies the contents of dc->dml
*/
-
- dc->current_state = dc_state_create(dc);
+ dc->current_state = dc_state_create(dc, NULL);
if (!dc->current_state) {
dm_error("%s: failed to create validate ctx\n", __func__);
@@ -1272,7 +1306,7 @@ static void disable_vbios_mode_if_required(
if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
unsigned int enc_inst, tg_inst = 0;
- unsigned int pix_clk_100hz;
+ unsigned int pix_clk_100hz = 0;
enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
if (enc_inst != ENGINE_ID_UNKNOWN) {
@@ -1759,7 +1793,7 @@ bool dc_validate_boot_timing(const struct dc *dc,
return false;
if (dc_is_dp_signal(link->connector_signal)) {
- unsigned int pix_clk_100hz;
+ unsigned int pix_clk_100hz = 0;
uint32_t numOdmPipes = 1;
uint32_t id_src[4] = {0};
@@ -1801,6 +1835,9 @@ bool dc_validate_boot_timing(const struct dc *dc,
return false;
}
+ if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)
+ return false;
+
if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
return false;
@@ -2085,15 +2122,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
return result;
}
-static bool commit_minimal_transition_state_legacy(struct dc *dc,
+static bool commit_minimal_transition_state(struct dc *dc,
struct dc_state *transition_base_context);
/**
* dc_commit_streams - Commit current stream state
*
* @dc: DC object with the commit state to be configured in the hardware
- * @streams: Array with a list of stream state
- * @stream_count: Total of streams
+ * @params: Parameters for the commit, including the streams to be committed
*
* Function responsible for commit streams change to the hardware.
*
@@ -2101,9 +2137,7 @@ static bool commit_minimal_transition_state_legacy(struct dc *dc,
* Return DC_OK if everything work as expected, otherwise, return a dc_status
* code.
*/
-enum dc_status dc_commit_streams(struct dc *dc,
- struct dc_stream_state *streams[],
- uint8_t stream_count)
+enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params *params)
{
int i, j;
struct dc_state *context;
@@ -2112,18 +2146,22 @@ enum dc_status dc_commit_streams(struct dc *dc,
struct pipe_ctx *pipe;
bool handle_exit_odm2to1 = false;
+ if (!params)
+ return DC_ERROR_UNEXPECTED;
+
if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW)
return res;
- if (!streams_changed(dc, streams, stream_count))
+ if (!streams_changed(dc, params->streams, params->stream_count) &&
+ dc->current_state->power_source == params->power_source)
return res;
dc_exit_ips_for_hw_access(dc);
- DC_LOG_DC("%s: %d streams\n", __func__, stream_count);
+ DC_LOG_DC("%s: %d streams\n", __func__, params->stream_count);
- for (i = 0; i < stream_count; i++) {
- struct dc_stream_state *stream = streams[i];
+ for (i = 0; i < params->stream_count; i++) {
+ struct dc_stream_state *stream = params->streams[i];
struct dc_stream_status *status = dc_stream_get_status(stream);
dc_stream_log(dc, stream);
@@ -2141,7 +2179,7 @@ enum dc_status dc_commit_streams(struct dc *dc,
* scenario, it uses extra pipes than needed to reduce power consumption
* We need to switch off this feature to make room for new streams.
*/
- if (stream_count > dc->current_state->stream_count &&
+ if (params->stream_count > dc->current_state->stream_count &&
dc->current_state->stream_count == 1) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -2151,13 +2189,15 @@ enum dc_status dc_commit_streams(struct dc *dc,
}
if (handle_exit_odm2to1)
- res = commit_minimal_transition_state_legacy(dc, dc->current_state);
+ res = commit_minimal_transition_state(dc, dc->current_state);
context = dc_state_create_current_copy(dc);
if (!context)
goto context_alloc_fail;
- res = dc_validate_with_context(dc, set, stream_count, context, false);
+ context->power_source = params->power_source;
+
+ res = dc_validate_with_context(dc, set, params->stream_count, context, false);
if (res != DC_OK) {
BREAK_TO_DEBUGGER();
goto fail;
@@ -2165,16 +2205,16 @@ enum dc_status dc_commit_streams(struct dc *dc,
res = dc_commit_state_no_check(dc, context);
- for (i = 0; i < stream_count; i++) {
+ for (i = 0; i < params->stream_count; i++) {
for (j = 0; j < context->stream_count; j++) {
- if (streams[i]->stream_id == context->streams[j]->stream_id)
- streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
+ if (params->streams[i]->stream_id == context->streams[j]->stream_id)
+ params->streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
- if (dc_is_embedded_signal(streams[i]->signal)) {
- struct dc_stream_status *status = dc_state_get_stream_status(context, streams[i]);
+ if (dc_is_embedded_signal(params->streams[i]->signal)) {
+ struct dc_stream_status *status = dc_state_get_stream_status(context, params->streams[i]);
if (dc->hwss.is_abm_supported)
- status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]);
+ status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, params->streams[i]);
else
status->is_abm_supported = true;
}
@@ -2818,55 +2858,45 @@ static void copy_surface_update_to_plane(
srf_update->plane_info->layer_index;
}
- if (srf_update->gamma &&
- (surface->gamma_correction !=
- srf_update->gamma)) {
- memcpy(&surface->gamma_correction->entries,
+ if (srf_update->gamma) {
+ memcpy(&surface->gamma_correction.entries,
&srf_update->gamma->entries,
sizeof(struct dc_gamma_entries));
- surface->gamma_correction->is_identity =
+ surface->gamma_correction.is_identity =
srf_update->gamma->is_identity;
- surface->gamma_correction->num_entries =
+ surface->gamma_correction.num_entries =
srf_update->gamma->num_entries;
- surface->gamma_correction->type =
+ surface->gamma_correction.type =
srf_update->gamma->type;
}
- if (srf_update->in_transfer_func &&
- (surface->in_transfer_func !=
- srf_update->in_transfer_func)) {
- surface->in_transfer_func->sdr_ref_white_level =
+ if (srf_update->in_transfer_func) {
+ surface->in_transfer_func.sdr_ref_white_level =
srf_update->in_transfer_func->sdr_ref_white_level;
- surface->in_transfer_func->tf =
+ surface->in_transfer_func.tf =
srf_update->in_transfer_func->tf;
- surface->in_transfer_func->type =
+ surface->in_transfer_func.type =
srf_update->in_transfer_func->type;
- memcpy(&surface->in_transfer_func->tf_pts,
+ memcpy(&surface->in_transfer_func.tf_pts,
&srf_update->in_transfer_func->tf_pts,
sizeof(struct dc_transfer_func_distributed_points));
}
- if (srf_update->func_shaper &&
- (surface->in_shaper_func !=
- srf_update->func_shaper))
- memcpy(surface->in_shaper_func, srf_update->func_shaper,
- sizeof(*surface->in_shaper_func));
+ if (srf_update->func_shaper)
+ memcpy(&surface->in_shaper_func, srf_update->func_shaper,
+ sizeof(surface->in_shaper_func));
- if (srf_update->lut3d_func &&
- (surface->lut3d_func !=
- srf_update->lut3d_func))
- memcpy(surface->lut3d_func, srf_update->lut3d_func,
- sizeof(*surface->lut3d_func));
+ if (srf_update->lut3d_func)
+ memcpy(&surface->lut3d_func, srf_update->lut3d_func,
+ sizeof(surface->lut3d_func));
if (srf_update->hdr_mult.value)
surface->hdr_mult =
srf_update->hdr_mult;
- if (srf_update->blend_tf &&
- (surface->blend_tf !=
- srf_update->blend_tf))
- memcpy(surface->blend_tf, srf_update->blend_tf,
- sizeof(*surface->blend_tf));
+ if (srf_update->blend_tf)
+ memcpy(&surface->blend_tf, srf_update->blend_tf,
+ sizeof(surface->blend_tf));
if (srf_update->input_csc_color_matrix)
surface->input_csc_color_matrix =
@@ -2897,14 +2927,13 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->dst.height && update->dst.width)
stream->dst = update->dst;
- if (update->out_transfer_func &&
- stream->out_transfer_func != update->out_transfer_func) {
- stream->out_transfer_func->sdr_ref_white_level =
+ if (update->out_transfer_func) {
+ stream->out_transfer_func.sdr_ref_white_level =
update->out_transfer_func->sdr_ref_white_level;
- stream->out_transfer_func->tf = update->out_transfer_func->tf;
- stream->out_transfer_func->type =
+ stream->out_transfer_func.tf = update->out_transfer_func->tf;
+ stream->out_transfer_func.type =
update->out_transfer_func->type;
- memcpy(&stream->out_transfer_func->tf_pts,
+ memcpy(&stream->out_transfer_func.tf_pts,
&update->out_transfer_func->tf_pts,
sizeof(struct dc_transfer_func_distributed_points));
}
@@ -3017,14 +3046,8 @@ static void backup_planes_and_stream_state(
for (i = 0; i < status->plane_count; i++) {
scratch->plane_states[i] = *status->plane_states[i];
- scratch->gamma_correction[i] = *status->plane_states[i]->gamma_correction;
- scratch->in_transfer_func[i] = *status->plane_states[i]->in_transfer_func;
- scratch->lut3d_func[i] = *status->plane_states[i]->lut3d_func;
- scratch->in_shaper_func[i] = *status->plane_states[i]->in_shaper_func;
- scratch->blend_tf[i] = *status->plane_states[i]->blend_tf;
}
scratch->stream_state = *stream;
- scratch->out_transfer_func = *stream->out_transfer_func;
}
static void restore_planes_and_stream_state(
@@ -3039,16 +3062,67 @@ static void restore_planes_and_stream_state(
for (i = 0; i < status->plane_count; i++) {
*status->plane_states[i] = scratch->plane_states[i];
- *status->plane_states[i]->gamma_correction = scratch->gamma_correction[i];
- *status->plane_states[i]->in_transfer_func = scratch->in_transfer_func[i];
- *status->plane_states[i]->lut3d_func = scratch->lut3d_func[i];
- *status->plane_states[i]->in_shaper_func = scratch->in_shaper_func[i];
- *status->plane_states[i]->blend_tf = scratch->blend_tf[i];
}
*stream = scratch->stream_state;
- *stream->out_transfer_func = scratch->out_transfer_func;
}
+/**
+ * update_seamless_boot_flags() - Helper function for updating seamless boot flags
+ *
+ * @dc: Current DC state
+ * @context: New DC state to be programmed
+ * @surface_count: Number of surfaces that have an updated
+ * @stream: Corresponding stream to be updated in the current flip
+ *
+ * Updating seamless boot flags do not need to be part of the commit sequence. This
+ * helper function will update the seamless boot flags on each flip (if required)
+ * outside of the HW commit sequence (fast or slow).
+ *
+ * Return: void
+ */
+static void update_seamless_boot_flags(struct dc *dc,
+ struct dc_state *context,
+ int surface_count,
+ struct dc_stream_state *stream)
+{
+ if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
+ /* Optimize seamless boot flag keeps clocks and watermarks high until
+ * first flip. After first flip, optimization is required to lower
+ * bandwidth. Important to note that it is expected UEFI will
+ * only light up a single display on POST, therefore we only expect
+ * one stream with seamless boot flag set.
+ */
+ if (stream->apply_seamless_boot_optimization) {
+ stream->apply_seamless_boot_optimization = false;
+
+ if (get_seamless_boot_stream_count(context) == 0)
+ dc->optimized_required = true;
+ }
+ }
+}
+
+/**
+ * update_planes_and_stream_state() - The function takes planes and stream
+ * updates as inputs and determines the appropriate update type. If update type
+ * is FULL, the function allocates a new context, populates and validates it.
+ * Otherwise, it updates current dc context. The function will return both
+ * new_context and new_update_type back to the caller. The function also backs
+ * up both current and new contexts into corresponding dc state scratch memory.
+ * TODO: The function does too many things, and even conditionally allocates dc
+ * context memory implicitly. We should consider to break it down.
+ *
+ * @dc: Current DC state
+ * @srf_updates: an array of surface updates
+ * @surface_count: surface update count
+ * @stream: Corresponding stream to be updated
+ * @stream_update: stream update
+ * @new_update_type: [out] determined update type by the function
+ * @new_context: [out] new context allocated and validated if update type is
+ * FULL, reference to current context if update type is less than FULL.
+ *
+ * Return: true if a valid update is populated into new_context, false
+ * otherwise.
+ */
static bool update_planes_and_stream_state(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
@@ -3072,9 +3146,10 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
context = dc->current_state;
- backup_planes_and_stream_state(&dc->current_state->scratch, stream);
update_type = dc_check_update_surfaces_for_stream(
dc, srf_updates, surface_count, stream_update, stream_status);
+ if (update_type == UPDATE_TYPE_FULL)
+ backup_planes_and_stream_state(&dc->scratch.current_state, stream);
/* update current stream with the new updates */
copy_stream_update_to_stream(dc, context, stream, stream_update);
@@ -3143,7 +3218,10 @@ static bool update_planes_and_stream_state(struct dc *dc,
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *surface = srf_updates[i].surface;
- if (update_type >= UPDATE_TYPE_MED) {
+ if (update_type != UPDATE_TYPE_MED)
+ continue;
+ if (surface->update_flags.bits.clip_size_change ||
+ surface->update_flags.bits.position_change) {
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
@@ -3160,19 +3238,13 @@ static bool update_planes_and_stream_state(struct dc *dc,
BREAK_TO_DEBUGGER();
goto fail;
}
-
- for (i = 0; i < context->stream_count; i++) {
- struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx,
- context->streams[i]);
-
- if (otg_master && otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE)
- resource_build_test_pattern_params(&context->res_ctx, otg_master);
- }
}
+ update_seamless_boot_flags(dc, context, surface_count, stream);
*new_context = context;
*new_update_type = update_type;
- backup_planes_and_stream_state(&context->scratch, stream);
+ if (update_type == UPDATE_TYPE_FULL)
+ backup_planes_and_stream_state(&dc->scratch.new_state, stream);
return true;
@@ -3261,12 +3333,26 @@ static void commit_planes_do_stream_update(struct dc *dc,
}
if (stream_update->pending_test_pattern) {
- dc_link_dp_set_test_pattern(stream->link,
+ /*
+ * test pattern params depends on ODM topology
+ * changes that we could be applying to front
+ * end. Since at the current stage front end
+ * changes are not yet applied. We can only
+ * apply test pattern in hw based on current
+ * state and populate the final test pattern
+ * params in new state. If current and new test
+ * pattern params are different as result of
+ * different ODM topology being used, it will be
+ * detected and handle during front end
+ * programming update.
+ */
+ dc->link_srv->dp_set_test_pattern(stream->link,
stream->test_pattern.type,
stream->test_pattern.color_space,
stream->test_pattern.p_link_settings,
stream->test_pattern.p_custom_pattern,
stream->test_pattern.cust_pattern_size);
+ resource_build_test_pattern_params(&context->res_ctx, pipe_ctx);
}
if (stream_update->dpms_off) {
@@ -3363,6 +3449,7 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
if (srf_updates[i].surface->flip_immediate)
continue;
+ update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
sizeof(flip_addr->dirty_rects));
@@ -3479,6 +3566,7 @@ static void commit_planes_for_stream_fast(struct dc *dc,
int i, j;
struct pipe_ctx *top_pipe_to_program = NULL;
struct dc_stream_status *stream_status = NULL;
+
dc_exit_ips_for_hw_access(dc);
dc_z10_restore(dc);
@@ -3536,7 +3624,8 @@ static void commit_planes_for_stream_fast(struct dc *dc,
context->block_sequence,
&(context->block_sequence_steps),
top_pipe_to_program,
- stream_status);
+ stream_status,
+ context);
hwss_execute_sequence(dc,
context->block_sequence,
context->block_sequence_steps);
@@ -4065,24 +4154,14 @@ struct pipe_split_policy_backup {
bool dynamic_odm_policy;
bool subvp_policy;
enum pipe_split_policy mpc_policy;
+ char force_odm[MAX_PIPES];
};
-static void release_minimal_transition_state(struct dc *dc,
- struct dc_state *context, struct pipe_split_policy_backup *policy)
-{
- dc_state_release(context);
- /* restore previous pipe split and odm policy */
- if (!dc->config.is_vmin_only_asic)
- dc->debug.pipe_split_policy = policy->mpc_policy;
- dc->debug.enable_single_display_2to1_odm_policy = policy->dynamic_odm_policy;
- dc->debug.force_disable_subvp = policy->subvp_policy;
-}
-
-static struct dc_state *create_minimal_transition_state(struct dc *dc,
- struct dc_state *base_context, struct pipe_split_policy_backup *policy)
+static void backup_and_set_minimal_pipe_split_policy(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_split_policy_backup *policy)
{
- struct dc_state *minimal_transition_context = NULL;
- unsigned int i, j;
+ int i;
if (!dc->config.is_vmin_only_asic) {
policy->mpc_policy = dc->debug.pipe_split_policy;
@@ -4092,97 +4171,257 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc,
dc->debug.enable_single_display_2to1_odm_policy = false;
policy->subvp_policy = dc->debug.force_disable_subvp;
dc->debug.force_disable_subvp = true;
+ for (i = 0; i < context->stream_count; i++) {
+ policy->force_odm[i] = context->streams[i]->debug.force_odm_combine_segments;
+ context->streams[i]->debug.force_odm_combine_segments = 0;
+ }
+}
+
+static void restore_minimal_pipe_split_policy(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_split_policy_backup *policy)
+{
+ uint8_t i;
+
+ if (!dc->config.is_vmin_only_asic)
+ dc->debug.pipe_split_policy = policy->mpc_policy;
+ dc->debug.enable_single_display_2to1_odm_policy =
+ policy->dynamic_odm_policy;
+ dc->debug.force_disable_subvp = policy->subvp_policy;
+ for (i = 0; i < context->stream_count; i++)
+ context->streams[i]->debug.force_odm_combine_segments = policy->force_odm[i];
+}
+
+static void release_minimal_transition_state(struct dc *dc,
+ struct dc_state *minimal_transition_context,
+ struct dc_state *base_context,
+ struct pipe_split_policy_backup *policy)
+{
+ restore_minimal_pipe_split_policy(dc, base_context, policy);
+ dc_state_release(minimal_transition_context);
+}
+
+static void force_vsync_flip_in_minimal_transition_context(struct dc_state *context)
+{
+ uint8_t i;
+ int j;
+ struct dc_stream_status *stream_status;
+
+ for (i = 0; i < context->stream_count; i++) {
+ stream_status = &context->stream_status[i];
+
+ for (j = 0; j < stream_status->plane_count; j++)
+ stream_status->plane_states[j]->flip_immediate = false;
+ }
+}
+
+static struct dc_state *create_minimal_transition_state(struct dc *dc,
+ struct dc_state *base_context, struct pipe_split_policy_backup *policy)
+{
+ struct dc_state *minimal_transition_context = NULL;
minimal_transition_context = dc_state_create_copy(base_context);
if (!minimal_transition_context)
return NULL;
+ backup_and_set_minimal_pipe_split_policy(dc, base_context, policy);
/* commit minimal state */
if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
- for (i = 0; i < minimal_transition_context->stream_count; i++) {
- struct dc_stream_status *stream_status = &minimal_transition_context->stream_status[i];
-
- for (j = 0; j < stream_status->plane_count; j++) {
- struct dc_plane_state *plane_state = stream_status->plane_states[j];
-
- /* force vsync flip when reconfiguring pipes to prevent underflow
- * and corruption
- */
- plane_state->flip_immediate = false;
- }
- }
+ /* prevent underflow and corruption when reconfiguring pipes */
+ force_vsync_flip_in_minimal_transition_context(minimal_transition_context);
} else {
- /* this should never happen */
- release_minimal_transition_state(dc, minimal_transition_context, policy);
+ /*
+ * This should never happen, minimal transition state should
+ * always be validated first before adding pipe split features.
+ */
+ release_minimal_transition_state(dc, minimal_transition_context, base_context, policy);
BREAK_TO_DEBUGGER();
minimal_transition_context = NULL;
}
return minimal_transition_context;
}
+static bool is_pipe_topology_transition_seamless_with_intermediate_step(
+ struct dc *dc,
+ struct dc_state *initial_state,
+ struct dc_state *intermediate_state,
+ struct dc_state *final_state)
+{
+ return dc->hwss.is_pipe_topology_transition_seamless(dc, initial_state,
+ intermediate_state) &&
+ dc->hwss.is_pipe_topology_transition_seamless(dc,
+ intermediate_state, final_state);
+}
+
+static void swap_and_release_current_context(struct dc *dc,
+ struct dc_state *new_context, struct dc_stream_state *stream)
+{
+
+ int i;
+ struct dc_state *old = dc->current_state;
+ struct pipe_ctx *pipe_ctx;
+
+ /* Since memory free requires elevated IRQ, an interrupt
+ * request is generated by mem free. If this happens
+ * between freeing and reassigning the context, our vsync
+ * interrupt will call into dc and cause a memory
+ * corruption. Hence, we first reassign the context,
+ * then free the old context.
+ */
+ dc->current_state = new_context;
+ dc_state_release(old);
+
+ // clear any forced full updates
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx = &new_context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
+ pipe_ctx->plane_state->force_full_update = false;
+ }
+}
+
+static int initialize_empty_surface_updates(
+ struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates)
+{
+ struct dc_stream_status *status = dc_stream_get_status(stream);
+ int i;
+
+ if (!status)
+ return 0;
+
+ for (i = 0; i < status->plane_count; i++)
+ srf_updates[i].surface = status->plane_states[i];
+
+ return status->plane_count;
+}
+
+static bool commit_minimal_transition_based_on_new_context(struct dc *dc,
+ struct dc_state *new_context,
+ struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates,
+ int surface_count)
+{
+ bool success = false;
+ struct pipe_split_policy_backup policy;
+ struct dc_state *intermediate_context =
+ create_minimal_transition_state(dc, new_context,
+ &policy);
+
+ if (intermediate_context) {
+ if (is_pipe_topology_transition_seamless_with_intermediate_step(
+ dc,
+ dc->current_state,
+ intermediate_context,
+ new_context)) {
+ DC_LOG_DC("commit minimal transition state: base = new state\n");
+ commit_planes_for_stream(dc, srf_updates,
+ surface_count, stream, NULL,
+ UPDATE_TYPE_FULL, intermediate_context);
+ swap_and_release_current_context(
+ dc, intermediate_context, stream);
+ dc_state_retain(dc->current_state);
+ success = true;
+ }
+ release_minimal_transition_state(
+ dc, intermediate_context, new_context, &policy);
+ }
+ return success;
+}
+
+static bool commit_minimal_transition_based_on_current_context(struct dc *dc,
+ struct dc_state *new_context, struct dc_stream_state *stream)
+{
+ bool success = false;
+ struct pipe_split_policy_backup policy;
+ struct dc_state *intermediate_context;
+ struct dc_state *old_current_state = dc->current_state;
+ struct dc_surface_update srf_updates[MAX_SURFACE_NUM] = {0};
+ int surface_count;
+
+ /*
+ * Both current and new contexts share the same stream and plane state
+ * pointers. When new context is validated, stream and planes get
+ * populated with new updates such as new plane addresses. This makes
+ * the current context no longer valid because stream and planes are
+ * modified from the original. We backup current stream and plane states
+ * into scratch space whenever we are populating new context. So we can
+ * restore the original values back by calling the restore function now.
+ * This restores back the original stream and plane states associated
+ * with the current state.
+ */
+ restore_planes_and_stream_state(&dc->scratch.current_state, stream);
+ dc_state_retain(old_current_state);
+ intermediate_context = create_minimal_transition_state(dc,
+ old_current_state, &policy);
+
+ if (intermediate_context) {
+ if (is_pipe_topology_transition_seamless_with_intermediate_step(
+ dc,
+ dc->current_state,
+ intermediate_context,
+ new_context)) {
+ DC_LOG_DC("commit minimal transition state: base = current state\n");
+ surface_count = initialize_empty_surface_updates(
+ stream, srf_updates);
+ commit_planes_for_stream(dc, srf_updates,
+ surface_count, stream, NULL,
+ UPDATE_TYPE_FULL, intermediate_context);
+ swap_and_release_current_context(
+ dc, intermediate_context, stream);
+ dc_state_retain(dc->current_state);
+ success = true;
+ }
+ release_minimal_transition_state(dc, intermediate_context,
+ old_current_state, &policy);
+ }
+ dc_state_release(old_current_state);
+ /*
+ * Restore stream and plane states back to the values associated with
+ * new context.
+ */
+ restore_planes_and_stream_state(&dc->scratch.new_state, stream);
+ return success;
+}
/**
- * commit_minimal_transition_state - Commit a minimal state based on current or new context
+ * commit_minimal_transition_state_in_dc_update - Commit a minimal state based
+ * on current or new context
*
* @dc: DC structure, used to get the current state
- * @context: New context
+ * @new_context: New context
* @stream: Stream getting the update for the flip
+ * @srf_updates: Surface updates
+ * @surface_count: Number of surfaces
*
- * The function takes in current state and new state and determine a minimal transition state
- * as the intermediate step which could make the transition between current and new states
- * seamless. If found, it will commit the minimal transition state and update current state to
- * this minimal transition state and return true, if not, it will return false.
+ * The function takes in current state and new state and determine a minimal
+ * transition state as the intermediate step which could make the transition
+ * between current and new states seamless. If found, it will commit the minimal
+ * transition state and update current state to this minimal transition state
+ * and return true, if not, it will return false.
*
* Return:
* Return True if the minimal transition succeeded, false otherwise
*/
-static bool commit_minimal_transition_state(struct dc *dc,
- struct dc_state *context,
- struct dc_stream_state *stream)
-{
- bool success = false;
- struct dc_state *minimal_transition_context;
- struct pipe_split_policy_backup policy;
-
- /* commit based on new context */
- minimal_transition_context = create_minimal_transition_state(dc,
- context, &policy);
- if (minimal_transition_context) {
- if (dc->hwss.is_pipe_topology_transition_seamless(
- dc, dc->current_state, minimal_transition_context) &&
- dc->hwss.is_pipe_topology_transition_seamless(
- dc, minimal_transition_context, context)) {
- DC_LOG_DC("%s base = new state\n", __func__);
-
- success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
- }
- release_minimal_transition_state(dc, minimal_transition_context, &policy);
- }
-
- if (!success) {
- /* commit based on current context */
- restore_planes_and_stream_state(&dc->current_state->scratch, stream);
- minimal_transition_context = create_minimal_transition_state(dc,
- dc->current_state, &policy);
- if (minimal_transition_context) {
- if (dc->hwss.is_pipe_topology_transition_seamless(
- dc, dc->current_state, minimal_transition_context) &&
- dc->hwss.is_pipe_topology_transition_seamless(
- dc, minimal_transition_context, context)) {
- DC_LOG_DC("%s base = current state\n", __func__);
- success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
- }
- release_minimal_transition_state(dc, minimal_transition_context, &policy);
- }
- restore_planes_and_stream_state(&context->scratch, stream);
- }
-
- ASSERT(success);
+static bool commit_minimal_transition_state_in_dc_update(struct dc *dc,
+ struct dc_state *new_context,
+ struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates,
+ int surface_count)
+{
+ bool success = commit_minimal_transition_based_on_new_context(
+ dc, new_context, stream, srf_updates,
+ surface_count);
+ if (!success)
+ success = commit_minimal_transition_based_on_current_context(dc,
+ new_context, stream);
+ if (!success)
+ DC_LOG_ERROR("Fail to commit a seamless minimal transition state between current and new states.\nThis pipe topology update is non-seamless!\n");
return success;
}
/**
- * commit_minimal_transition_state_legacy - Create a transition pipe split state
+ * commit_minimal_transition_state - Create a transition pipe split state
*
* @dc: Used to get the current state status
* @transition_base_context: New transition state
@@ -4199,7 +4438,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
* Return:
* Return false if something is wrong in the transition state.
*/
-static bool commit_minimal_transition_state_legacy(struct dc *dc,
+static bool commit_minimal_transition_state(struct dc *dc,
struct dc_state *transition_base_context)
{
struct dc_state *transition_context;
@@ -4260,12 +4499,14 @@ static bool commit_minimal_transition_state_legacy(struct dc *dc,
dc->debug.pipe_split_policy != MPC_SPLIT_AVOID ? "MPC in Use" :
"Unknown");
+ dc_state_retain(transition_base_context);
transition_context = create_minimal_transition_state(dc,
transition_base_context, &policy);
if (transition_context) {
ret = dc_commit_state_no_check(dc, transition_context);
- release_minimal_transition_state(dc, transition_context, &policy);
+ release_minimal_transition_state(dc, transition_context, transition_base_context, &policy);
}
+ dc_state_release(transition_base_context);
if (ret != DC_OK) {
/* this should never happen */
@@ -4283,41 +4524,6 @@ static bool commit_minimal_transition_state_legacy(struct dc *dc,
return true;
}
-/**
- * update_seamless_boot_flags() - Helper function for updating seamless boot flags
- *
- * @dc: Current DC state
- * @context: New DC state to be programmed
- * @surface_count: Number of surfaces that have an updated
- * @stream: Corresponding stream to be updated in the current flip
- *
- * Updating seamless boot flags do not need to be part of the commit sequence. This
- * helper function will update the seamless boot flags on each flip (if required)
- * outside of the HW commit sequence (fast or slow).
- *
- * Return: void
- */
-static void update_seamless_boot_flags(struct dc *dc,
- struct dc_state *context,
- int surface_count,
- struct dc_stream_state *stream)
-{
- if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
- /* Optimize seamless boot flag keeps clocks and watermarks high until
- * first flip. After first flip, optimization is required to lower
- * bandwidth. Important to note that it is expected UEFI will
- * only light up a single display on POST, therefore we only expect
- * one stream with seamless boot flag set.
- */
- if (stream->apply_seamless_boot_optimization) {
- stream->apply_seamless_boot_optimization = false;
-
- if (get_seamless_boot_stream_count(context) == 0)
- dc->optimized_required = true;
- }
- }
-}
-
static void populate_fast_updates(struct dc_fast_update *fast_update,
struct dc_surface_update *srf_updates,
int surface_count,
@@ -4437,123 +4643,9 @@ static bool fast_update_only(struct dc *dc,
&& !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
}
-bool dc_update_planes_and_stream(struct dc *dc,
+static bool update_planes_and_stream_v1(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
- struct dc_stream_update *stream_update)
-{
- struct dc_state *context;
- enum surface_update_type update_type;
- int i;
- struct dc_fast_update fast_update[MAX_SURFACES] = {0};
-
- /* In cases where MPO and split or ODM are used transitions can
- * cause underflow. Apply stream configuration with minimal pipe
- * split first to avoid unsupported transitions for active pipes.
- */
- bool force_minimal_pipe_splitting = 0;
- bool is_plane_addition = 0;
- bool is_fast_update_only;
-
- dc_exit_ips_for_hw_access(dc);
-
- populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
- is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
- surface_count, stream_update, stream);
- force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
- dc,
- stream,
- srf_updates,
- surface_count,
- &is_plane_addition);
-
- /* on plane addition, minimal state is the current one */
- if (force_minimal_pipe_splitting && is_plane_addition &&
- !commit_minimal_transition_state_legacy(dc, dc->current_state))
- return false;
-
- if (!update_planes_and_stream_state(
- dc,
- srf_updates,
- surface_count,
- stream,
- stream_update,
- &update_type,
- &context))
- return false;
-
- /* on plane removal, minimal state is the new one */
- if (force_minimal_pipe_splitting && !is_plane_addition) {
- if (!commit_minimal_transition_state_legacy(dc, context)) {
- dc_state_release(context);
- return false;
- }
- update_type = UPDATE_TYPE_FULL;
- }
-
- if (dc->hwss.is_pipe_topology_transition_seamless &&
- !dc->hwss.is_pipe_topology_transition_seamless(
- dc, dc->current_state, context)) {
- commit_minimal_transition_state(dc,
- context, stream);
- }
- update_seamless_boot_flags(dc, context, surface_count, stream);
- if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {
- commit_planes_for_stream_fast(dc,
- srf_updates,
- surface_count,
- stream,
- stream_update,
- update_type,
- context);
- } else {
- if (!stream_update &&
- dc->hwss.is_pipe_topology_transition_seamless &&
- !dc->hwss.is_pipe_topology_transition_seamless(
- dc, dc->current_state, context)) {
- DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n");
- BREAK_TO_DEBUGGER();
- }
- commit_planes_for_stream(
- dc,
- srf_updates,
- surface_count,
- stream,
- stream_update,
- update_type,
- context);
- }
-
- if (dc->current_state != context) {
-
- /* Since memory free requires elevated IRQL, an interrupt
- * request is generated by mem free. If this happens
- * between freeing and reassigning the context, our vsync
- * interrupt will call into dc and cause a memory
- * corruption BSOD. Hence, we first reassign the context,
- * then free the old context.
- */
-
- struct dc_state *old = dc->current_state;
-
- dc->current_state = context;
- dc_state_release(old);
-
- // clear any forced full updates
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
- pipe_ctx->plane_state->force_full_update = false;
- }
- }
- return true;
-}
-
-void dc_commit_updates_for_stream(struct dc *dc,
- struct dc_surface_update *srf_updates,
- int surface_count,
- struct dc_stream_state *stream,
struct dc_stream_update *stream_update,
struct dc_state *state)
{
@@ -4573,35 +4665,13 @@ void dc_commit_updates_for_stream(struct dc *dc,
update_type = dc_check_update_surfaces_for_stream(
dc, srf_updates, surface_count, stream_update, stream_status);
- /* TODO: Since change commit sequence can have a huge impact,
- * we decided to only enable it for DCN3x. However, as soon as
- * we get more confident about this change we'll need to enable
- * the new sequence for all ASICs.
- */
- if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
- /*
- * Previous frame finished and HW is ready for optimization.
- */
- if (update_type == UPDATE_TYPE_FAST)
- dc_post_update_surfaces_to_stream(dc);
-
- dc_update_planes_and_stream(dc, srf_updates,
- surface_count, stream,
- stream_update);
- return;
- }
-
- if (update_type >= update_surface_trace_level)
- update_surface_trace(dc, srf_updates, surface_count);
-
-
if (update_type >= UPDATE_TYPE_FULL) {
/* initialize scratch memory for building context */
context = dc_state_create_copy(state);
if (context == NULL) {
DC_ERROR("Failed to allocate new validate context!\n");
- return;
+ return false;
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -4618,7 +4688,6 @@ void dc_commit_updates_for_stream(struct dc *dc,
dc_post_update_surfaces_to_stream(dc);
}
-
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *surface = srf_updates[i].surface;
@@ -4643,13 +4712,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
DC_ERROR("Mode validation failed for stream update!\n");
dc_state_release(context);
- return;
+ return false;
}
}
TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
- update_seamless_boot_flags(dc, context, surface_count, stream);
if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
!dc->debug.enable_legacy_fast_update) {
commit_planes_for_stream_fast(dc,
@@ -4690,9 +4758,252 @@ void dc_commit_updates_for_stream(struct dc *dc,
dc_post_update_surfaces_to_stream(dc);
TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
}
+ return true;
+}
+
+static bool update_planes_and_stream_v2(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update)
+{
+ struct dc_state *context;
+ enum surface_update_type update_type;
+ struct dc_fast_update fast_update[MAX_SURFACES] = {0};
+
+ /* In cases where MPO and split or ODM are used transitions can
+ * cause underflow. Apply stream configuration with minimal pipe
+ * split first to avoid unsupported transitions for active pipes.
+ */
+ bool force_minimal_pipe_splitting = 0;
+ bool is_plane_addition = 0;
+ bool is_fast_update_only;
+
+ populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
+ is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
+ surface_count, stream_update, stream);
+ force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
+ dc,
+ stream,
+ srf_updates,
+ surface_count,
+ &is_plane_addition);
+
+ /* on plane addition, minimal state is the current one */
+ if (force_minimal_pipe_splitting && is_plane_addition &&
+ !commit_minimal_transition_state(dc, dc->current_state))
+ return false;
+
+ if (!update_planes_and_stream_state(
+ dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ &update_type,
+ &context))
+ return false;
+
+ /* on plane removal, minimal state is the new one */
+ if (force_minimal_pipe_splitting && !is_plane_addition) {
+ if (!commit_minimal_transition_state(dc, context)) {
+ dc_state_release(context);
+ return false;
+ }
+ update_type = UPDATE_TYPE_FULL;
+ }
- return;
+ if (dc->hwss.is_pipe_topology_transition_seamless &&
+ !dc->hwss.is_pipe_topology_transition_seamless(
+ dc, dc->current_state, context))
+ commit_minimal_transition_state_in_dc_update(dc, context, stream,
+ srf_updates, surface_count);
+ if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {
+ commit_planes_for_stream_fast(dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ context);
+ } else {
+ if (!stream_update &&
+ dc->hwss.is_pipe_topology_transition_seamless &&
+ !dc->hwss.is_pipe_topology_transition_seamless(
+ dc, dc->current_state, context)) {
+ DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n");
+ BREAK_TO_DEBUGGER();
+ }
+ commit_planes_for_stream(
+ dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ context);
+ }
+ if (dc->current_state != context)
+ swap_and_release_current_context(dc, context, stream);
+ return true;
+}
+
+static void commit_planes_and_stream_update_on_current_context(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ enum surface_update_type update_type)
+{
+ struct dc_fast_update fast_update[MAX_SURFACES] = {0};
+
+ ASSERT(update_type < UPDATE_TYPE_FULL);
+ populate_fast_updates(fast_update, srf_updates, surface_count,
+ stream_update);
+ if (fast_update_only(dc, fast_update, srf_updates, surface_count,
+ stream_update, stream) &&
+ !dc->debug.enable_legacy_fast_update)
+ commit_planes_for_stream_fast(dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ dc->current_state);
+ else
+ commit_planes_for_stream(
+ dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ dc->current_state);
+}
+
+static void commit_planes_and_stream_update_with_new_context(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ enum surface_update_type update_type,
+ struct dc_state *new_context)
+{
+ ASSERT(update_type >= UPDATE_TYPE_FULL);
+ if (!dc->hwss.is_pipe_topology_transition_seamless(dc,
+ dc->current_state, new_context))
+ /*
+ * It is required by the feature design that all pipe topologies
+ * using extra free pipes for power saving purposes such as
+ * dynamic ODM or SubVp shall only be enabled when it can be
+ * transitioned seamlessly to AND from its minimal transition
+ * state. A minimal transition state is defined as the same dc
+ * state but with all power saving features disabled. So it uses
+ * the minimum pipe topology. When we can't seamlessly
+ * transition from state A to state B, we will insert the
+ * minimal transition state A' or B' in between so seamless
+ * transition between A and B can be made possible.
+ */
+ commit_minimal_transition_state_in_dc_update(dc, new_context,
+ stream, srf_updates, surface_count);
+
+ commit_planes_for_stream(
+ dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ new_context);
+}
+
+static bool update_planes_and_stream_v3(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update)
+{
+ struct dc_state *new_context;
+ enum surface_update_type update_type;
+
+ /*
+ * When this function returns true and new_context is not equal to
+ * current state, the function allocates and validates a new dc state
+ * and assigns it to new_context. The function expects that the caller
+ * is responsible to free this memory when new_context is no longer
+ * used. We swap current with new context and free current instead. So
+ * new_context's memory will live until the next full update after it is
+ * replaced by a newer context. Refer to the use of
+ * swap_and_free_current_context below.
+ */
+ if (!update_planes_and_stream_state(dc, srf_updates, surface_count,
+ stream, stream_update, &update_type,
+ &new_context))
+ return false;
+
+ if (new_context == dc->current_state) {
+ commit_planes_and_stream_update_on_current_context(dc,
+ srf_updates, surface_count, stream,
+ stream_update, update_type);
+ } else {
+ commit_planes_and_stream_update_with_new_context(dc,
+ srf_updates, surface_count, stream,
+ stream_update, update_type, new_context);
+ swap_and_release_current_context(dc, new_context, stream);
+ }
+
+ return true;
+}
+
+bool dc_update_planes_and_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update)
+{
+ dc_exit_ips_for_hw_access(dc);
+ /*
+ * update planes and stream version 3 separates FULL and FAST updates
+ * to their own sequences. It aims to clean up frequent checks for
+ * update type resulting unnecessary branching in logic flow. It also
+ * adds a new commit minimal transition sequence, which detects the need
+ * for minimal transition based on the actual comparison of current and
+ * new states instead of "predicting" it based on per feature software
+ * policy.i.e could_mpcc_tree_change_for_active_pipes. The new commit
+ * minimal transition sequence is made universal to any power saving
+ * features that would use extra free pipes such as Dynamic ODM/MPC
+ * Combine, MPO or SubVp. Therefore there is no longer a need to
+ * specially handle compatibility problems with transitions among those
+ * features as they are now transparent to the new sequence.
+ */
+ if (dc->ctx->dce_version > DCN_VERSION_3_51)
+ return update_planes_and_stream_v3(dc, srf_updates,
+ surface_count, stream, stream_update);
+ return update_planes_and_stream_v2(dc, srf_updates,
+ surface_count, stream, stream_update);
+}
+
+void dc_commit_updates_for_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ struct dc_state *state)
+{
+ dc_exit_ips_for_hw_access(dc);
+ /* TODO: Since change commit sequence can have a huge impact,
+ * we decided to only enable it for DCN3x. However, as soon as
+ * we get more confident about this change we'll need to enable
+ * the new sequence for all ASICs.
+ */
+ if (dc->ctx->dce_version > DCN_VERSION_3_51) {
+ update_planes_and_stream_v3(dc, srf_updates, surface_count,
+ stream, stream_update);
+ return;
+ }
+ if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
+ update_planes_and_stream_v2(dc, srf_updates, surface_count,
+ stream, stream_update);
+ return;
+ }
+ update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
+ stream_update, state);
}
uint8_t dc_get_current_stream_count(struct dc *dc)
@@ -4735,8 +5046,13 @@ void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
void dc_power_down_on_boot(struct dc *dc)
{
if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
- dc->hwss.power_down_on_boot)
+ dc->hwss.power_down_on_boot) {
+
+ if (dc->caps.ips_support)
+ dc_exit_ips_for_hw_access(dc);
+
dc->hwss.power_down_on_boot(dc);
+ }
}
void dc_set_power_state(
@@ -4874,11 +5190,15 @@ bool dc_set_replay_allow_active(struct dc *dc, bool active)
return true;
}
-void dc_allow_idle_optimizations(struct dc *dc, bool allow)
+void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const *caller_name)
{
if (dc->debug.disable_idle_power_optimizations)
return;
+ if (allow != dc->idle_optimizations_allowed)
+ DC_LOG_IPS("%s: allow_idle old=%d new=%d (caller=%s)\n", __func__,
+ dc->idle_optimizations_allowed, allow, caller_name);
+
if (dc->caps.ips_support && (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
return;
@@ -4893,10 +5213,10 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow)
dc->idle_optimizations_allowed = allow;
}
-void dc_exit_ips_for_hw_access(struct dc *dc)
+void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name)
{
if (dc->caps.ips_support)
- dc_allow_idle_optimizations(dc, false);
+ dc_allow_idle_optimizations_internal(dc, false, caller_name);
}
bool dc_dmub_is_ips_idle_state(struct dc *dc)
@@ -5030,10 +5350,13 @@ void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
}
dc->clk_mgr->dc_mode_softmax_enabled = enable;
}
-bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
+bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc,
+ unsigned int pitch,
+ unsigned int height,
+ enum surface_pixel_format format,
struct dc_cursor_attributes *cursor_attr)
{
- if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
+ if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, pitch, height, format, cursor_attr))
return true;
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 9c05b1a07142..5c1d3017aefd 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -392,10 +392,10 @@ void get_hdr_visual_confirm_color(
switch (top_pipe_ctx->plane_res.scl_data.format) {
case PIXEL_FORMAT_ARGB2101010:
- if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
+ if (top_pipe_ctx->stream->out_transfer_func.tf == TRANSFER_FUNCTION_PQ) {
/* HDR10, ARGB2101010 - set border color to red */
color->color_r_cr = color_value;
- } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
+ } else if (top_pipe_ctx->stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) {
/* FreeSync 2 ARGB2101010 - set border color to pink */
color->color_r_cr = color_value;
color->color_b_cb = color_value;
@@ -403,10 +403,10 @@ void get_hdr_visual_confirm_color(
is_sdr = true;
break;
case PIXEL_FORMAT_FP16:
- if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
+ if (top_pipe_ctx->stream->out_transfer_func.tf == TRANSFER_FUNCTION_PQ) {
/* HDR10, FP16 - set border color to blue */
color->color_b_cb = color_value;
- } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
+ } else if (top_pipe_ctx->stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) {
/* FreeSync 2 HDR - set border color to green */
color->color_g_y = color_value;
} else
@@ -558,9 +558,10 @@ void hwss_build_fast_sequence(struct dc *dc,
struct dc_dmub_cmd *dc_dmub_cmd,
unsigned int dmub_cmd_count,
struct block_sequence block_sequence[],
- int *num_steps,
+ unsigned int *num_steps,
struct pipe_ctx *pipe_ctx,
- struct dc_stream_status *stream_status)
+ struct dc_stream_status *stream_status,
+ struct dc_state *context)
{
struct dc_plane_state *plane = pipe_ctx->plane_state;
struct dc_stream_state *stream = pipe_ctx->stream;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index ec4bf9432bdb..15819416a2f3 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -340,7 +340,7 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
return res_pool;
}
-void dc_destroy_resource_pool(struct dc *dc)
+void dc_destroy_resource_pool(struct dc *dc)
{
if (dc) {
if (dc->res_pool)
@@ -1457,6 +1457,9 @@ void resource_build_test_pattern_params(struct resource_context *res_ctx,
controller_color_space = convert_dp_to_controller_color_space(
otg_master->stream->test_pattern.color_space);
+ if (controller_test_pattern == CONTROLLER_DP_TEST_PATTERN_VIDEOMODE)
+ return;
+
odm_cnt = resource_get_opp_heads_for_otg_master(otg_master, res_ctx, opp_heads);
odm_slice_width = h_active / odm_cnt;
@@ -1485,6 +1488,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
const struct rect odm_slice_rec = calculate_odm_slice_in_timing_active(pipe_ctx);
bool res = false;
+
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
/* Invalid input */
@@ -1496,9 +1500,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
return false;
}
- pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
- pipe_ctx->plane_state->format);
-
/* Timing borders are part of vactive that we are also supposed to skip in addition
* to any stream dst offset. Since dm logic assumes dst is in addressable
* space we need to add the left and top borders to dst offsets temporarily.
@@ -1510,6 +1511,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
/* Calculate H and V active size */
pipe_ctx->plane_res.scl_data.h_active = odm_slice_rec.width;
pipe_ctx->plane_res.scl_data.v_active = odm_slice_rec.height;
+ pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
+ pipe_ctx->plane_state->format);
/* depends on h_active */
calculate_recout(pipe_ctx);
@@ -1794,6 +1797,30 @@ int recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx(
return free_pipe_idx;
}
+int resource_find_free_pipe_used_as_cur_sec_dpp(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
+ const struct resource_pool *pool)
+{
+ int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND;
+ const struct pipe_ctx *new_pipe, *cur_pipe;
+ int i;
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ cur_pipe = &cur_res_ctx->pipe_ctx[i];
+ new_pipe = &new_res_ctx->pipe_ctx[i];
+
+ if (resource_is_pipe_type(cur_pipe, DPP_PIPE) &&
+ !resource_is_pipe_type(cur_pipe, OPP_HEAD) &&
+ resource_is_pipe_type(new_pipe, FREE_PIPE)) {
+ free_pipe_idx = i;
+ break;
+ }
+ }
+
+ return free_pipe_idx;
+}
+
int resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine(
const struct resource_context *cur_res_ctx,
struct resource_context *new_res_ctx,
@@ -2168,50 +2195,91 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
}
}
-void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
+static void resource_log_pipe_for_stream(struct dc *dc, struct dc_state *state,
+ struct pipe_ctx *otg_master, int stream_idx)
{
- struct pipe_ctx *otg_master;
struct pipe_ctx *opp_heads[MAX_PIPES];
struct pipe_ctx *dpp_pipes[MAX_PIPES];
- int stream_idx, slice_idx, dpp_idx, plane_idx, slice_count, dpp_count;
+ int slice_idx, dpp_idx, plane_idx, slice_count, dpp_count;
bool is_primary;
DC_LOGGER_INIT(dc->ctx->logger);
+ slice_count = resource_get_opp_heads_for_otg_master(otg_master,
+ &state->res_ctx, opp_heads);
+ for (slice_idx = 0; slice_idx < slice_count; slice_idx++) {
+ plane_idx = -1;
+ if (opp_heads[slice_idx]->plane_state) {
+ dpp_count = resource_get_dpp_pipes_for_opp_head(
+ opp_heads[slice_idx],
+ &state->res_ctx,
+ dpp_pipes);
+ for (dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) {
+ is_primary = !dpp_pipes[dpp_idx]->top_pipe ||
+ dpp_pipes[dpp_idx]->top_pipe->plane_state != dpp_pipes[dpp_idx]->plane_state;
+ if (is_primary)
+ plane_idx++;
+ resource_log_pipe(dc, dpp_pipes[dpp_idx],
+ stream_idx, slice_idx,
+ plane_idx, slice_count,
+ is_primary);
+ }
+ } else {
+ resource_log_pipe(dc, opp_heads[slice_idx],
+ stream_idx, slice_idx, plane_idx,
+ slice_count, true);
+ }
+
+ }
+}
+
+static int resource_stream_to_stream_idx(struct dc_state *state,
+ struct dc_stream_state *stream)
+{
+ int i, stream_idx = -1;
+
+ for (i = 0; i < state->stream_count; i++)
+ if (state->streams[i] == stream) {
+ stream_idx = i;
+ break;
+ }
+
+ /* never return negative array index */
+ if (stream_idx == -1) {
+ ASSERT(0);
+ return 0;
+ }
+
+ return stream_idx;
+}
+
+void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
+{
+ struct pipe_ctx *otg_master;
+ int stream_idx, phantom_stream_idx;
+ DC_LOGGER_INIT(dc->ctx->logger);
+
DC_LOG_DC(" pipe topology update");
DC_LOG_DC(" ________________________");
for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) {
+ if (state->streams[stream_idx]->is_phantom)
+ continue;
+
otg_master = resource_get_otg_master_for_stream(
&state->res_ctx, state->streams[stream_idx]);
- if (!otg_master || otg_master->stream_res.tg == NULL) {
- DC_LOG_DC("topology update: otg_master NULL stream_idx %d!\n", stream_idx);
- return;
- }
- slice_count = resource_get_opp_heads_for_otg_master(otg_master,
- &state->res_ctx, opp_heads);
- for (slice_idx = 0; slice_idx < slice_count; slice_idx++) {
- plane_idx = -1;
- if (opp_heads[slice_idx]->plane_state) {
- dpp_count = resource_get_dpp_pipes_for_opp_head(
- opp_heads[slice_idx],
- &state->res_ctx,
- dpp_pipes);
- for (dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) {
- is_primary = !dpp_pipes[dpp_idx]->top_pipe ||
- dpp_pipes[dpp_idx]->top_pipe->plane_state != dpp_pipes[dpp_idx]->plane_state;
- if (is_primary)
- plane_idx++;
- resource_log_pipe(dc, dpp_pipes[dpp_idx],
- stream_idx, slice_idx,
- plane_idx, slice_count,
- is_primary);
- }
- } else {
- resource_log_pipe(dc, opp_heads[slice_idx],
- stream_idx, slice_idx, plane_idx,
- slice_count, true);
- }
+ resource_log_pipe_for_stream(dc, state, otg_master, stream_idx);
+ }
+ if (state->phantom_stream_count > 0) {
+ DC_LOG_DC(" | (phantom pipes) |");
+ for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) {
+ if (state->stream_status[stream_idx].mall_stream_config.type != SUBVP_MAIN)
+ continue;
+ phantom_stream_idx = resource_stream_to_stream_idx(state,
+ state->stream_status[stream_idx].mall_stream_config.paired_stream);
+ otg_master = resource_get_otg_master_for_stream(
+ &state->res_ctx, state->streams[phantom_stream_idx]);
+ resource_log_pipe_for_stream(dc, state, otg_master, stream_idx);
}
}
DC_LOG_DC(" |________________________|\n");
@@ -2266,6 +2334,9 @@ static bool update_pipe_params_after_odm_slice_count_change(
if (pool->funcs->build_pipe_pix_clk_params)
pool->funcs->build_pipe_pix_clk_params(otg_master);
+
+ resource_build_test_pattern_params(&context->res_ctx, otg_master);
+
return result;
}
@@ -2624,13 +2695,19 @@ bool resource_append_dpp_pipes_for_plane_composition(
struct pipe_ctx *otg_master_pipe,
struct dc_plane_state *plane_state)
{
+ bool success;
if (otg_master_pipe->plane_state == NULL)
- return add_plane_to_opp_head_pipes(otg_master_pipe,
+ success = add_plane_to_opp_head_pipes(otg_master_pipe,
plane_state, new_ctx);
else
- return acquire_secondary_dpp_pipes_and_add_plane(
+ success = acquire_secondary_dpp_pipes_and_add_plane(
otg_master_pipe, plane_state, new_ctx,
cur_ctx, pool);
+ if (success)
+ /* when appending a plane mpc slice count changes from 0 to 1 */
+ success = update_pipe_params_after_mpc_slice_count_change(
+ plane_state, new_ctx, pool);
+ return success;
}
void resource_remove_dpp_pipes_for_plane_composition(
@@ -2965,7 +3042,7 @@ bool resource_update_pipes_for_plane_with_slice_count(
int i;
int dpp_pipe_count;
int cur_slice_count;
- struct pipe_ctx *dpp_pipes[MAX_PIPES];
+ struct pipe_ctx *dpp_pipes[MAX_PIPES] = {0};
bool result = true;
dpp_pipe_count = resource_get_dpp_pipes_for_plane(plane,
@@ -3117,6 +3194,9 @@ static struct audio *find_first_free_audio(
{
int i, available_audio_count;
+ if (id == ENGINE_ID_UNKNOWN)
+ return NULL;
+
available_audio_count = pool->audio_count;
for (i = 0; i < available_audio_count; i++) {
@@ -3371,11 +3451,31 @@ static bool acquire_otg_master_pipe_for_stream(
* any free pipes already used in current context as this could tear
* down exiting ODM/MPC/MPO configuration unnecessarily.
*/
+
+ /*
+ * Try to acquire the same OTG master already in use. This is not
+ * optimal because resetting an enabled OTG master pipe for a new stream
+ * requires an extra frame of wait. However there are test automation
+ * and eDP assumptions that rely on reusing the same OTG master pipe
+ * during mode change. We have to keep this logic as is for now.
+ */
pipe_idx = recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx(
&cur_ctx->res_ctx, &new_ctx->res_ctx, pool);
+ /*
+ * Try to acquire a pipe not used in current resource context to avoid
+ * pipe swapping.
+ */
if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND)
pipe_idx = recource_find_free_pipe_not_used_in_cur_res_ctx(
&cur_ctx->res_ctx, &new_ctx->res_ctx, pool);
+ /*
+ * If pipe swapping is unavoidable, try to acquire pipe used as
+ * secondary DPP pipe in current state as we prioritize to support more
+ * streams over supporting MPO planes.
+ */
+ if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND)
+ pipe_idx = resource_find_free_pipe_used_as_cur_sec_dpp(
+ &cur_ctx->res_ctx, &new_ctx->res_ctx, pool);
if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND)
pipe_idx = resource_find_any_free_pipe(&new_ctx->res_ctx, pool);
if (pipe_idx != FREE_PIPE_INDEX_NOT_FOUND) {
@@ -3990,7 +4090,7 @@ static void set_avi_info_frame(
}
if (pixel_encoding && color_space == COLOR_SPACE_2020_YCBCR &&
- stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
+ stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) {
hdmi_info.bits.EC0_EC2 = 0;
hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709;
}
@@ -4992,3 +5092,39 @@ bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_st
return false;
}
+
+void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options)
+{
+ dml2_options->callbacks.dc = dc;
+ dml2_options->callbacks.build_scaling_params = &resource_build_scaling_params;
+ dml2_options->callbacks.build_test_pattern_params = &resource_build_test_pattern_params;
+ dml2_options->callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy;
+ dml2_options->callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count;
+ dml2_options->callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count;
+ dml2_options->callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index;
+ dml2_options->callbacks.get_mpc_slice_count = &resource_get_mpc_slice_count;
+ dml2_options->callbacks.get_odm_slice_index = &resource_get_odm_slice_index;
+ dml2_options->callbacks.get_odm_slice_count = &resource_get_odm_slice_count;
+ dml2_options->callbacks.get_opp_head = &resource_get_opp_head;
+ dml2_options->callbacks.get_otg_master_for_stream = &resource_get_otg_master_for_stream;
+ dml2_options->callbacks.get_opp_heads_for_otg_master = &resource_get_opp_heads_for_otg_master;
+ dml2_options->callbacks.get_dpp_pipes_for_plane = &resource_get_dpp_pipes_for_plane;
+ dml2_options->callbacks.get_stream_status = &dc_state_get_stream_status;
+ dml2_options->callbacks.get_stream_from_id = &dc_state_get_stream_from_id;
+
+ dml2_options->svp_pstate.callbacks.dc = dc;
+ dml2_options->svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane;
+ dml2_options->svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream;
+ dml2_options->svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params;
+ dml2_options->svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane;
+ dml2_options->svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane;
+ dml2_options->svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream;
+ dml2_options->svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream;
+ dml2_options->svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane;
+ dml2_options->svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream;
+ dml2_options->svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type;
+ dml2_options->svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type;
+ dml2_options->svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream;
+ dml2_options->svp_pstate.callbacks.remove_phantom_streams_and_planes = &dc_state_remove_phantom_streams_and_planes;
+ dml2_options->svp_pstate.callbacks.release_phantom_streams_and_planes = &dc_state_release_phantom_streams_and_planes;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
index 5f6392ae31a6..cd6570a1e20e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
@@ -61,7 +61,7 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification
/* For HPD/HPD RX, convert dpia port index into link index */
if (notify->type == DMUB_NOTIFICATION_HPD ||
notify->type == DMUB_NOTIFICATION_HPD_IRQ ||
- notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION ||
+ notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION ||
notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) {
notify->link_index =
get_link_index_from_dpia_port_index(dc, notify->link_index);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index 5cc7f8da209c..76bb05f4d6bf 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -188,8 +188,11 @@ static void init_state(struct dc *dc, struct dc_state *state)
}
/* Public dc_state functions */
-struct dc_state *dc_state_create(struct dc *dc)
+struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params)
{
+#ifdef CONFIG_DRM_AMD_DC_FP
+ struct dml2_configuration_options *dml2_opt = &dc->dml2_options;
+#endif
struct dc_state *state = kvzalloc(sizeof(struct dc_state),
GFP_KERNEL);
@@ -198,10 +201,16 @@ struct dc_state *dc_state_create(struct dc *dc)
init_state(dc, state);
dc_state_construct(dc, state);
+ state->power_source = params ? params->power_source : DC_POWER_SOURCE_AC;
#ifdef CONFIG_DRM_AMD_DC_FP
- if (dc->debug.using_dml2)
- dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2);
+ if (dc->debug.using_dml2) {
+ dml2_opt->use_clock_dc_limits = false;
+ dml2_create(dc, dml2_opt, &state->bw_ctx.dml2);
+
+ dml2_opt->use_clock_dc_limits = true;
+ dml2_create(dc, dml2_opt, &state->bw_ctx.dml2_dc_power_source);
+ }
#endif
kref_init(&state->refcount);
@@ -214,6 +223,7 @@ void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state)
struct kref refcount = dst_state->refcount;
#ifdef CONFIG_DRM_AMD_DC_FP
struct dml2_context *dst_dml2 = dst_state->bw_ctx.dml2;
+ struct dml2_context *dst_dml2_dc_power_source = dst_state->bw_ctx.dml2_dc_power_source;
#endif
dc_state_copy_internal(dst_state, src_state);
@@ -222,6 +232,10 @@ void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state)
dst_state->bw_ctx.dml2 = dst_dml2;
if (src_state->bw_ctx.dml2)
dml2_copy(dst_state->bw_ctx.dml2, src_state->bw_ctx.dml2);
+
+ dst_state->bw_ctx.dml2_dc_power_source = dst_dml2_dc_power_source;
+ if (src_state->bw_ctx.dml2_dc_power_source)
+ dml2_copy(dst_state->bw_ctx.dml2_dc_power_source, src_state->bw_ctx.dml2_dc_power_source);
#endif
/* context refcount should not be overridden */
@@ -245,6 +259,12 @@ struct dc_state *dc_state_create_copy(struct dc_state *src_state)
dc_state_release(new_state);
return NULL;
}
+
+ if (src_state->bw_ctx.dml2_dc_power_source &&
+ !dml2_create_copy(&new_state->bw_ctx.dml2_dc_power_source, src_state->bw_ctx.dml2_dc_power_source)) {
+ dc_state_release(new_state);
+ return NULL;
+ }
#endif
kref_init(&new_state->refcount);
@@ -310,7 +330,6 @@ void dc_state_destruct(struct dc_state *state)
memset(state->dc_dmub_cmd, 0, sizeof(state->dc_dmub_cmd));
state->dmub_cmd_count = 0;
memset(&state->perf_params, 0, sizeof(state->perf_params));
- memset(&state->scratch, 0, sizeof(state->scratch));
}
void dc_state_retain(struct dc_state *state)
@@ -327,6 +346,9 @@ static void dc_state_free(struct kref *kref)
#ifdef CONFIG_DRM_AMD_DC_FP
dml2_destroy(state->bw_ctx.dml2);
state->bw_ctx.dml2 = 0;
+
+ dml2_destroy(state->bw_ctx.dml2_dc_power_source);
+ state->bw_ctx.dml2_dc_power_source = 0;
#endif
kvfree(state);
@@ -341,7 +363,7 @@ void dc_state_release(struct dc_state *state)
* dc_state_add_stream() - Add a new dc_stream_state to a dc_state.
*/
enum dc_status dc_state_add_stream(
- struct dc *dc,
+ const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *stream)
{
@@ -370,7 +392,7 @@ enum dc_status dc_state_add_stream(
* dc_state_remove_stream() - Remove a stream from a dc_state.
*/
enum dc_status dc_state_remove_stream(
- struct dc *dc,
+ const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *stream)
{
@@ -436,6 +458,15 @@ bool dc_state_add_plane(
goto out;
}
+ if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
+ /* ODM combine could prevent us from supporting more planes
+ * we will reset ODM slice count back to 1 when all planes have
+ * been removed to maximize the amount of planes supported when
+ * new planes are added.
+ */
+ resource_update_pipes_for_stream_with_slice_count(
+ state, dc->current_state, dc->res_pool, stream, 1);
+
otg_master_pipe = resource_get_otg_master_for_stream(
&state->res_ctx, stream);
if (otg_master_pipe)
@@ -586,7 +617,7 @@ bool dc_state_add_all_planes_for_stream(
*/
struct dc_stream_status *dc_state_get_stream_status(
struct dc_state *state,
- struct dc_stream_state *stream)
+ const struct dc_stream_state *stream)
{
uint8_t i;
@@ -680,7 +711,7 @@ void dc_state_release_phantom_stream(const struct dc *dc,
dc_stream_release(phantom_stream);
}
-struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc,
+struct dc_plane_state *dc_state_create_phantom_plane(const struct dc *dc,
struct dc_state *state,
struct dc_plane_state *main_plane)
{
@@ -716,7 +747,7 @@ void dc_state_release_phantom_plane(const struct dc *dc,
}
/* add phantom streams to context and generate correct meta inside dc_state */
-enum dc_status dc_state_add_phantom_stream(struct dc *dc,
+enum dc_status dc_state_add_phantom_stream(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *phantom_stream,
struct dc_stream_state *main_stream)
@@ -742,7 +773,7 @@ enum dc_status dc_state_add_phantom_stream(struct dc *dc,
return res;
}
-enum dc_status dc_state_remove_phantom_stream(struct dc *dc,
+enum dc_status dc_state_remove_phantom_stream(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *phantom_stream)
{
@@ -836,7 +867,7 @@ bool dc_state_add_all_phantom_planes_for_stream(
}
bool dc_state_remove_phantom_streams_and_planes(
- struct dc *dc,
+ const struct dc *dc,
struct dc_state *state)
{
int i;
@@ -858,7 +889,7 @@ bool dc_state_remove_phantom_streams_and_planes(
}
void dc_state_release_phantom_streams_and_planes(
- struct dc *dc,
+ const struct dc *dc,
struct dc_state *state)
{
int i;
@@ -869,3 +900,19 @@ void dc_state_release_phantom_streams_and_planes(
for (i = 0; i < state->phantom_plane_count; i++)
dc_state_release_phantom_plane(dc, state, state->phantom_planes[i]);
}
+
+struct dc_stream_state *dc_state_get_stream_from_id(const struct dc_state *state, unsigned int id)
+{
+ struct dc_stream_state *stream = NULL;
+ int i;
+
+ for (i = 0; i < state->stream_count; i++) {
+ if (state->streams[i] && state->streams[i]->stream_id == id) {
+ stream = state->streams[i];
+ break;
+ }
+ }
+
+ return stream;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 51a970fcb5d0..5c7e4884cac2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -116,12 +116,7 @@ bool dc_stream_construct(struct dc_stream_state *stream,
update_stream_signal(stream, dc_sink_data);
- stream->out_transfer_func = dc_create_transfer_func();
- if (stream->out_transfer_func == NULL) {
- dc_sink_release(dc_sink_data);
- return false;
- }
- stream->out_transfer_func->type = TF_TYPE_BYPASS;
+ stream->out_transfer_func.type = TF_TYPE_BYPASS;
dc_stream_assign_stream_id(stream);
@@ -131,10 +126,6 @@ bool dc_stream_construct(struct dc_stream_state *stream,
void dc_stream_destruct(struct dc_stream_state *stream)
{
dc_sink_release(stream->sink);
- if (stream->out_transfer_func != NULL) {
- dc_transfer_func_release(stream->out_transfer_func);
- stream->out_transfer_func = NULL;
- }
}
void dc_stream_assign_stream_id(struct dc_stream_state *stream)
@@ -201,9 +192,6 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
if (new_stream->sink)
dc_sink_retain(new_stream->sink);
- if (new_stream->out_transfer_func)
- dc_transfer_func_retain(new_stream->out_transfer_func);
-
dc_stream_assign_stream_id(new_stream);
/* If using dynamic encoder assignment, wait till stream committed to assign encoder. */
@@ -319,7 +307,7 @@ bool dc_stream_set_cursor_attributes(
program_cursor_attributes(dc, stream, attributes);
/* re-enable idle optimizations if necessary */
- if (reset_idle_optimizations)
+ if (reset_idle_optimizations && !dc->debug.disable_dmub_reallow_idle)
dc_allow_idle_optimizations(dc, true);
return true;
@@ -394,7 +382,7 @@ bool dc_stream_set_cursor_position(
program_cursor_position(dc, stream, position);
/* re-enable idle optimizations if necessary */
- if (reset_idle_optimizations)
+ if (reset_idle_optimizations && !dc->debug.disable_dmub_reallow_idle)
dc_allow_idle_optimizations(dc, true);
return true;
@@ -425,7 +413,7 @@ bool dc_stream_add_writeback(struct dc *dc,
dc_exit_ips_for_hw_access(dc);
- wb_info->dwb_params.out_transfer_func = stream->out_transfer_func;
+ wb_info->dwb_params.out_transfer_func = &stream->out_transfer_func;
dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
dwb->dwb_is_drc = false;
@@ -507,7 +495,7 @@ bool dc_stream_remove_writeback(struct dc *dc,
struct dc_stream_state *stream,
uint32_t dwb_pipe_inst)
{
- int i = 0, j = 0;
+ unsigned int i, j;
if (stream == NULL) {
dm_error("DC: dc_stream is NULL!\n");
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index 19140fb65787..067f6555cfdf 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -41,25 +41,15 @@ void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_sta
{
plane_state->ctx = ctx;
- plane_state->gamma_correction = dc_create_gamma();
- if (plane_state->gamma_correction != NULL)
- plane_state->gamma_correction->is_identity = true;
+ plane_state->gamma_correction.is_identity = true;
- plane_state->in_transfer_func = dc_create_transfer_func();
- if (plane_state->in_transfer_func != NULL) {
- plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
- }
- plane_state->in_shaper_func = dc_create_transfer_func();
- if (plane_state->in_shaper_func != NULL) {
- plane_state->in_shaper_func->type = TF_TYPE_BYPASS;
- }
+ plane_state->in_transfer_func.type = TF_TYPE_BYPASS;
- plane_state->lut3d_func = dc_create_3dlut_func();
+ plane_state->in_shaper_func.type = TF_TYPE_BYPASS;
- plane_state->blend_tf = dc_create_transfer_func();
- if (plane_state->blend_tf != NULL) {
- plane_state->blend_tf->type = TF_TYPE_BYPASS;
- }
+ plane_state->lut3d_func.state.raw = 0;
+
+ plane_state->blend_tf.type = TF_TYPE_BYPASS;
plane_state->pre_multiplied_alpha = true;
@@ -67,30 +57,27 @@ void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_sta
void dc_plane_destruct(struct dc_plane_state *plane_state)
{
- if (plane_state->gamma_correction != NULL) {
- dc_gamma_release(&plane_state->gamma_correction);
- }
- if (plane_state->in_transfer_func != NULL) {
- dc_transfer_func_release(
- plane_state->in_transfer_func);
- plane_state->in_transfer_func = NULL;
- }
- if (plane_state->in_shaper_func != NULL) {
- dc_transfer_func_release(
- plane_state->in_shaper_func);
- plane_state->in_shaper_func = NULL;
- }
- if (plane_state->lut3d_func != NULL) {
- dc_3dlut_func_release(
- plane_state->lut3d_func);
- plane_state->lut3d_func = NULL;
- }
- if (plane_state->blend_tf != NULL) {
- dc_transfer_func_release(
- plane_state->blend_tf);
- plane_state->blend_tf = NULL;
+ // no more pointers to free within dc_plane_state
+}
+
+
+/* dc_state is passed in separately since it may differ from the current dc state accessible from plane_state e.g.
+ * if the driver is doing an update from an old context to a new one and the caller wants the pipe mask for the new
+ * context rather than the existing one
+ */
+uint8_t dc_plane_get_pipe_mask(struct dc_state *dc_state, const struct dc_plane_state *plane_state)
+{
+ uint8_t pipe_mask = 0;
+ int i;
+
+ for (i = 0; i < plane_state->ctx->dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->plane_state == plane_state && pipe_ctx->plane_res.hubp)
+ pipe_mask |= 1 << pipe_ctx->plane_res.hubp->inst;
}
+ return pipe_mask;
}
/*******************************************************************************
@@ -103,7 +90,7 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
/*register_flip_interrupt(surface);*/
}
-struct dc_plane_state *dc_create_plane_state(struct dc *dc)
+struct dc_plane_state *dc_create_plane_state(const struct dc *dc)
{
struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state),
GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index ee8453bf958f..3c33c3bcbe2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -44,6 +44,8 @@
#include "dml2/dml2_wrapper.h"
+#include "dmub/inc/dmub_cmd.h"
+
struct abm_save_restore;
/* forward declaration */
@@ -51,7 +53,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.273"
+#define DC_VER "3.2.281"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -219,6 +221,7 @@ struct dc_dmub_caps {
bool mclk_sw;
bool subvp_psr;
bool gecc_enable;
+ uint8_t fams_ver;
};
struct dc_caps {
@@ -306,12 +309,12 @@ struct dc_dcc_setting {
unsigned int max_compressed_blk_size;
unsigned int max_uncompressed_blk_size;
bool independent_64b_blks;
- //These bitfields to be used starting with DCN
+ //These bitfields to be used starting with DCN 3.0
struct {
- uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN (the worst compression case)
- uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN
- uint32_t dcc_256_128_128 : 1; //available starting with DCN
- uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN (the best compression case)
+ uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN 3.0 (the worst compression case)
+ uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN 3.0
+ uint32_t dcc_256_128_128 : 1; //available starting with DCN 3.0
+ uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN 3.0 (the best compression case)
} dcc_controls;
};
@@ -435,6 +438,9 @@ struct dc_config {
unsigned int disable_ips;
unsigned int disable_ips_in_vpb;
bool usb4_bw_alloc_support;
+ bool allow_0_dtb_clk;
+ bool use_assr_psp_message;
+ bool support_edp0_on_dp1;
};
enum visual_confirm {
@@ -693,6 +699,8 @@ enum pg_hw_pipe_resources {
PG_MPCC,
PG_OPP,
PG_OPTC,
+ PG_DPSTREAM,
+ PG_HDMISTREAM,
PG_HW_PIPE_RESOURCES_NUM_ELEMENT
};
@@ -987,14 +995,17 @@ struct dc_debug_options {
bool psp_disabled_wa;
unsigned int ips2_eval_delay_us;
unsigned int ips2_entry_delay_us;
+ bool optimize_ips_handshake;
bool disable_dmub_reallow_idle;
bool disable_timeout;
bool disable_extblankadj;
+ bool enable_idle_reg_checks;
unsigned int static_screen_wait_frames;
bool force_chroma_subsampling_1tap;
+ bool disable_422_left_edge_pixel;
+ unsigned int force_cositing;
};
-struct gpu_info_soc_bounding_box_v1_0;
/* Generic structure that can be used to query properties of DC. More fields
* can be added as required.
@@ -1003,76 +1014,6 @@ struct dc_current_properties {
unsigned int cursor_size_limit;
};
-struct dc {
- struct dc_debug_options debug;
- struct dc_versions versions;
- struct dc_caps caps;
- struct dc_cap_funcs cap_funcs;
- struct dc_config config;
- struct dc_bounding_box_overrides bb_overrides;
- struct dc_bug_wa work_arounds;
- struct dc_context *ctx;
- struct dc_phy_addr_space_config vm_pa_config;
-
- uint8_t link_count;
- struct dc_link *links[MAX_PIPES * 2];
- struct link_service *link_srv;
-
- struct dc_state *current_state;
- struct resource_pool *res_pool;
-
- struct clk_mgr *clk_mgr;
-
- /* Display Engine Clock levels */
- struct dm_pp_clock_levels sclk_lvls;
-
- /* Inputs into BW and WM calculations. */
- struct bw_calcs_dceip *bw_dceip;
- struct bw_calcs_vbios *bw_vbios;
- struct dcn_soc_bounding_box *dcn_soc;
- struct dcn_ip_params *dcn_ip;
- struct display_mode_lib dml;
-
- /* HW functions */
- struct hw_sequencer_funcs hwss;
- struct dce_hwseq *hwseq;
-
- /* Require to optimize clocks and bandwidth for added/removed planes */
- bool optimized_required;
- bool wm_optimized_required;
- bool idle_optimizations_allowed;
- bool enable_c20_dtm_b0;
-
- /* Require to maintain clocks and bandwidth for UEFI enabled HW */
-
- /* FBC compressor */
- struct compressor *fbc_compressor;
-
- struct dc_debug_data debug_data;
- struct dpcd_vendor_signature vendor_signature;
-
- const char *build_id;
- struct vm_helper *vm_helper;
-
- uint32_t *dcn_reg_offsets;
- uint32_t *nbio_reg_offsets;
- uint32_t *clk_reg_offsets;
-
- /* Scratch memory */
- struct {
- struct {
- /*
- * For matching clock_limits table in driver with table
- * from PMFW.
- */
- struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
- } update_bw_bounding_box;
- } scratch;
-
- struct dml2_configuration_options dml2_options;
- enum dc_acpi_cm_power_state power_state;
-};
-
enum frame_buffer_mode {
FRAME_BUFFER_MODE_LOCAL_ONLY = 0,
FRAME_BUFFER_MODE_ZFB_ONLY,
@@ -1277,6 +1218,8 @@ union surface_update_flags {
uint32_t raw;
};
+#define DC_REMOVE_PLANE_POINTERS 1
+
struct dc_plane_state {
struct dc_plane_address address;
struct dc_plane_flip_time time;
@@ -1291,8 +1234,8 @@ struct dc_plane_state {
struct dc_plane_dcc_param dcc;
- struct dc_gamma *gamma_correction;
- struct dc_transfer_func *in_transfer_func;
+ struct dc_gamma gamma_correction;
+ struct dc_transfer_func in_transfer_func;
struct dc_bias_and_scale *bias_and_scale;
struct dc_csc_transform input_csc_color_matrix;
struct fixed31_32 coeff_reduction_factor;
@@ -1304,9 +1247,9 @@ struct dc_plane_state {
enum dc_color_space color_space;
- struct dc_3dlut *lut3d_func;
- struct dc_transfer_func *in_shaper_func;
- struct dc_transfer_func *blend_tf;
+ struct dc_3dlut lut3d_func;
+ struct dc_transfer_func in_shaper_func;
+ struct dc_transfer_func blend_tf;
struct dc_transfer_func *gamcor_tf;
enum surface_pixel_format format;
@@ -1342,6 +1285,7 @@ struct dc_plane_state {
struct tg_color visual_confirm_color;
bool is_statically_allocated;
+ enum chroma_cositing cositing;
};
struct dc_plane_info {
@@ -1360,6 +1304,96 @@ struct dc_plane_info {
int global_alpha_value;
bool input_csc_enabled;
int layer_index;
+ enum chroma_cositing cositing;
+};
+
+#include "dc_stream.h"
+
+struct dc_scratch_space {
+ /* used to temporarily backup plane states of a stream during
+ * dc update. The reason is that plane states are overwritten
+ * with surface updates in dc update. Once they are overwritten
+ * current state is no longer valid. We want to temporarily
+ * store current value in plane states so we can still recover
+ * a valid current state during dc update.
+ */
+ struct dc_plane_state plane_states[MAX_SURFACE_NUM];
+
+ struct dc_stream_state stream_state;
+};
+
+struct dc {
+ struct dc_debug_options debug;
+ struct dc_versions versions;
+ struct dc_caps caps;
+ struct dc_cap_funcs cap_funcs;
+ struct dc_config config;
+ struct dc_bounding_box_overrides bb_overrides;
+ struct dc_bug_wa work_arounds;
+ struct dc_context *ctx;
+ struct dc_phy_addr_space_config vm_pa_config;
+
+ uint8_t link_count;
+ struct dc_link *links[MAX_LINKS];
+ struct link_service *link_srv;
+
+ struct dc_state *current_state;
+ struct resource_pool *res_pool;
+
+ struct clk_mgr *clk_mgr;
+
+ /* Display Engine Clock levels */
+ struct dm_pp_clock_levels sclk_lvls;
+
+ /* Inputs into BW and WM calculations. */
+ struct bw_calcs_dceip *bw_dceip;
+ struct bw_calcs_vbios *bw_vbios;
+ struct dcn_soc_bounding_box *dcn_soc;
+ struct dcn_ip_params *dcn_ip;
+ struct display_mode_lib dml;
+
+ /* HW functions */
+ struct hw_sequencer_funcs hwss;
+ struct dce_hwseq *hwseq;
+
+ /* Require to optimize clocks and bandwidth for added/removed planes */
+ bool optimized_required;
+ bool wm_optimized_required;
+ bool idle_optimizations_allowed;
+ bool enable_c20_dtm_b0;
+
+ /* Require to maintain clocks and bandwidth for UEFI enabled HW */
+
+ /* FBC compressor */
+ struct compressor *fbc_compressor;
+
+ struct dc_debug_data debug_data;
+ struct dpcd_vendor_signature vendor_signature;
+
+ const char *build_id;
+ struct vm_helper *vm_helper;
+
+ uint32_t *dcn_reg_offsets;
+ uint32_t *nbio_reg_offsets;
+ uint32_t *clk_reg_offsets;
+
+ /* Scratch memory */
+ struct {
+ struct {
+ /*
+ * For matching clock_limits table in driver with table
+ * from PMFW.
+ */
+ struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
+ } update_bw_bounding_box;
+ struct dc_scratch_space current_state;
+ struct dc_scratch_space new_state;
+ struct dc_stream_state temp_stream; // Used so we don't need to allocate stream on the stack
+ } scratch;
+
+ struct dml2_configuration_options dml2_options;
+ enum dc_acpi_cm_power_state power_state;
+
};
struct dc_scaling_info {
@@ -1476,10 +1510,15 @@ bool dc_acquire_release_mpc_3dlut(
bool dc_resource_is_dsc_encoding_supported(const struct dc *dc);
void get_audio_check(struct audio_info *aud_modes,
struct audio_check *aud_chk);
-
-enum dc_status dc_commit_streams(struct dc *dc,
- struct dc_stream_state *streams[],
- uint8_t stream_count);
+/*
+ * Set up streams and links associated to drive sinks
+ * The streams parameter is an absolute set of all active streams.
+ *
+ * After this call:
+ * Phy, Encoder, Timing Generator are programmed and enabled.
+ * New streams are enabled with blank stream; no memory read.
+ */
+enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params *params);
struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc,
@@ -2335,11 +2374,17 @@ bool dc_is_dmcu_initialized(struct dc *dc);
enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg);
-bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
- struct dc_cursor_attributes *cursor_attr);
+bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc,
+ unsigned int pitch,
+ unsigned int height,
+ enum surface_pixel_format format,
+ struct dc_cursor_attributes *cursor_attr);
+
+#define dc_allow_idle_optimizations(dc, allow) dc_allow_idle_optimizations_internal(dc, allow, __func__)
+#define dc_exit_ips_for_hw_access(dc) dc_exit_ips_for_hw_access_internal(dc, __func__)
-void dc_allow_idle_optimizations(struct dc *dc, bool allow);
-void dc_exit_ips_for_hw_access(struct dc *dc);
+void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, const char *caller_name);
+void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name);
bool dc_dmub_is_ips_idle_state(struct dc *dc);
/* set min and max memory clock to lowest and highest DPM level, respectively */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 6083b1dcf050..2293a92df3be 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -23,6 +23,7 @@
*
*/
+#include "dm_services.h"
#include "dc.h"
#include "dc_dmub_srv.h"
#include "../dmub/dmub_srv.h"
@@ -34,6 +35,7 @@
#include "resource.h"
#include "clk_mgr.h"
#include "dc_state_priv.h"
+#include "dc_plane_priv.h"
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
@@ -198,6 +200,11 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
if (status != DMUB_STATUS_OK) {
DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
+ if (!dmub->debug.timeout_occured) {
+ dmub->debug.timeout_occured = true;
+ dmub->debug.timeout_cmd = *cmd_list;
+ dmub->debug.timestamp = dm_get_timestamp(dc_dmub_srv->ctx);
+ }
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
return false;
}
@@ -904,12 +911,15 @@ bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmu
void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
{
struct dmub_diagnostic_data diag_data = {0};
+ uint32_t i;
if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
DC_LOG_ERROR("%s: invalid parameters.", __func__);
return;
}
+ DC_LOG_ERROR("%s: DMCUB error - collecting diagnostic data\n", __func__);
+
if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv, &diag_data)) {
DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__);
return;
@@ -933,7 +943,8 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
DC_LOG_DEBUG(" scratch [13] : %08x", diag_data.scratch[13]);
DC_LOG_DEBUG(" scratch [14] : %08x", diag_data.scratch[14]);
DC_LOG_DEBUG(" scratch [15] : %08x", diag_data.scratch[15]);
- DC_LOG_DEBUG(" pc : %08x", diag_data.pc);
+ for (i = 0; i < DMUB_PC_SNAPSHOT_COUNT; i++)
+ DC_LOG_DEBUG(" pc[%d] : %08x", i, diag_data.pc[i]);
DC_LOG_DEBUG(" unk_fault_addr : %08x", diag_data.undefined_address_fault_addr);
DC_LOG_DEBUG(" inst_fault_addr : %08x", diag_data.inst_fetch_fault_addr);
DC_LOG_DEBUG(" data_fault_addr : %08x", diag_data.data_write_fault_addr);
@@ -1199,8 +1210,23 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
return true;
}
+static int count_active_streams(const struct dc *dc)
+{
+ int i, count = 0;
+
+ for (i = 0; i < dc->current_state->stream_count; ++i) {
+ struct dc_stream_state *stream = dc->current_state->streams[i];
+
+ if (stream && !stream->dpms_off)
+ count += 1;
+ }
+
+ return count;
+}
+
static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
{
+ volatile const struct dmub_shared_state_ips_fw *ips_fw;
struct dc_dmub_srv *dc_dmub_srv;
union dmub_rb_cmd cmd = {0};
@@ -1211,6 +1237,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
return;
dc_dmub_srv = dc->ctx->dmub_srv;
+ ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
memset(&cmd, 0, sizeof(cmd));
cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT;
@@ -1226,6 +1253,12 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
union dmub_shared_state_ips_driver_signals new_signals;
+ DC_LOG_IPS(
+ "%s wait idle (ips1_commit=%d ips2_commit=%d)",
+ __func__,
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
memset(&new_signals, 0, sizeof(new_signals));
@@ -1245,19 +1278,46 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
new_signals.bits.allow_pg = 1;
new_signals.bits.allow_ips1 = 1;
new_signals.bits.allow_ips2 = 1;
+ } else if (dc->config.disable_ips == DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF) {
+ /* TODO: Move this logic out to hwseq */
+ if (count_active_streams(dc) == 0) {
+ /* IPS2 - Display off */
+ new_signals.bits.allow_pg = 1;
+ new_signals.bits.allow_ips1 = 1;
+ new_signals.bits.allow_ips2 = 1;
+ new_signals.bits.allow_z10 = 1;
+ } else {
+ /* RCG only */
+ new_signals.bits.allow_pg = 0;
+ new_signals.bits.allow_ips1 = 1;
+ new_signals.bits.allow_ips2 = 0;
+ new_signals.bits.allow_z10 = 0;
+ }
}
ips_driver->signals = new_signals;
}
+ DC_LOG_IPS(
+ "%s send allow_idle=%d (ips1_commit=%d ips2_commit=%d)",
+ __func__,
+ allow_idle,
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
/* NOTE: This does not use the "wake" interface since this is part of the wake path. */
/* We also do not perform a wait since DMCUB could enter idle after the notification. */
dm_execute_dmub_cmd(dc->ctx, &cmd, allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT);
+
+ /* Register access should stop at this point. */
+ if (allow_idle)
+ dc_dmub_srv->needs_idle_wake = true;
}
static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
{
struct dc_dmub_srv *dc_dmub_srv;
+ uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0;
if (dc->debug.dmcub_emulation)
return;
@@ -1274,40 +1334,113 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
union dmub_shared_state_ips_driver_signals prev_driver_signals = ips_driver->signals;
+ rcg_exit_count = ips_fw->rcg_exit_count;
+ ips1_exit_count = ips_fw->ips1_exit_count;
+ ips2_exit_count = ips_fw->ips2_exit_count;
+
ips_driver->signals.all = 0;
- if (prev_driver_signals.bits.allow_ips2) {
- udelay(dc->debug.ips2_eval_delay_us);
+ DC_LOG_IPS(
+ "%s (allow ips1=%d ips2=%d) (commit ips1=%d ips2=%d) (count rcg=%d ips1=%d ips2=%d)",
+ __func__,
+ ips_driver->signals.bits.allow_ips1,
+ ips_driver->signals.bits.allow_ips2,
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit,
+ ips_fw->rcg_entry_count,
+ ips_fw->ips1_entry_count,
+ ips_fw->ips2_entry_count);
+
+ /* Note: register access has technically not resumed for DCN here, but we
+ * need to be message PMFW through our standard register interface.
+ */
+ dc_dmub_srv->needs_idle_wake = false;
+
+ if (prev_driver_signals.bits.allow_ips2 &&
+ (!dc->debug.optimize_ips_handshake ||
+ ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) {
+ DC_LOG_IPS(
+ "wait IPS2 eval (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
+ if (!dc->debug.optimize_ips_handshake || !ips_fw->signals.bits.ips2_commit)
+ udelay(dc->debug.ips2_eval_delay_us);
if (ips_fw->signals.bits.ips2_commit) {
+ DC_LOG_IPS(
+ "exit IPS2 #1 (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
// Tell PMFW to exit low power state
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
+ DC_LOG_IPS(
+ "wait IPS2 entry delay (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
// Wait for IPS2 entry upper bound
udelay(dc->debug.ips2_entry_delay_us);
+ DC_LOG_IPS(
+ "exit IPS2 #2 (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
+ DC_LOG_IPS(
+ "wait IPS2 commit clear (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
while (ips_fw->signals.bits.ips2_commit)
udelay(1);
+ DC_LOG_IPS(
+ "wait hw_pwr_up (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
ASSERT(0);
+ DC_LOG_IPS(
+ "resync inbox1 (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub);
}
}
dc_dmub_srv_notify_idle(dc, false);
if (prev_driver_signals.bits.allow_ips1) {
+ DC_LOG_IPS(
+ "wait for IPS1 commit clear (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
while (ips_fw->signals.bits.ips1_commit)
udelay(1);
+ DC_LOG_IPS(
+ "wait for IPS1 commit clear done (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
}
}
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
ASSERT(0);
+
+ DC_LOG_IPS("%s exit (count rcg=%d ips1=%d ips2=%d)",
+ __func__,
+ rcg_exit_count,
+ ips1_exit_count,
+ ips2_exit_count);
}
void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState)
@@ -1335,21 +1468,42 @@ void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_
if (dc_dmub_srv->idle_allowed == allow_idle)
return;
+ DC_LOG_IPS("%s state change: old=%d new=%d", __func__, dc_dmub_srv->idle_allowed, allow_idle);
+
/*
* Entering a low power state requires a driver notification.
* Powering up the hardware requires notifying PMFW and DMCUB.
* Clearing the driver idle allow requires a DMCUB command.
* DMCUB commands requires the DMCUB to be powered up and restored.
- *
- * Exit out early to prevent an infinite loop of DMCUB commands
- * triggering exit low power - use software state to track this.
*/
- dc_dmub_srv->idle_allowed = allow_idle;
- if (!allow_idle)
+ if (!allow_idle) {
+ dc_dmub_srv->idle_exit_counter += 1;
+
dc_dmub_srv_exit_low_power_state(dc);
- else
+ /*
+ * Idle is considered fully exited only after the sequence above
+ * fully completes. If we have a race of two threads exiting
+ * at the same time then it's safe to perform the sequence
+ * twice as long as we're not re-entering.
+ *
+ * Infinite command submission is avoided by using the
+ * dm_execute_dmub_cmd submission instead of the "wake" helpers.
+ */
+ dc_dmub_srv->idle_allowed = false;
+
+ dc_dmub_srv->idle_exit_counter -= 1;
+ if (dc_dmub_srv->idle_exit_counter < 0) {
+ ASSERT(0);
+ dc_dmub_srv->idle_exit_counter = 0;
+ }
+ } else {
+ /* Consider idle as notified prior to the actual submission to
+ * prevent multiple entries. */
+ dc_dmub_srv->idle_allowed = true;
+
dc_dmub_srv_notify_idle(dc, allow_idle);
+ }
}
bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd,
@@ -1384,7 +1538,8 @@ bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned in
else
result = dm_execute_dmub_cmd(ctx, cmd, wait_type);
- if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle)
+ if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
+ !ctx->dc->debug.disable_dmub_reallow_idle)
dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
return result;
@@ -1433,8 +1588,10 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com
result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type);
- if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle)
+ if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
+ !ctx->dc->debug.disable_dmub_reallow_idle)
dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
return result;
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 952bfb368886..2c5866211f60 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -35,6 +35,7 @@ struct pipe_ctx;
struct dc_crtc_timing_adjust;
struct dc_crtc_timing;
struct dc_state;
+struct dc_surface_update;
struct dc_reg_helper_state {
bool gather_in_progress;
@@ -51,7 +52,9 @@ struct dc_dmub_srv {
struct dc_context *ctx;
void *dm;
+ int32_t idle_exit_counter;
bool idle_allowed;
+ bool needs_idle_wake;
};
void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 1cb7765f593a..519c3df78ee5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -137,8 +137,13 @@ enum dp_link_encoding {
enum dp_test_link_rate {
DP_TEST_LINK_RATE_RBR = 0x06,
+ DP_TEST_LINK_RATE_RATE_2 = 0x08, // Rate_2 - 2.16 Gbps/Lane
+ DP_TEST_LINK_RATE_RATE_3 = 0x09, // Rate_3 - 2.43 Gbps/Lane
DP_TEST_LINK_RATE_HBR = 0x0A,
+ DP_TEST_LINK_RATE_RBR2 = 0x0C, // Rate_5 (RBR2) - 3.24 Gbps/Lane
+ DP_TEST_LINK_RATE_RATE_6 = 0x10, // Rate_6 - 4.32 Gbps/Lane
DP_TEST_LINK_RATE_HBR2 = 0x14,
+ DP_TEST_LINK_RATE_RATE_8 = 0x19, // Rate_8 - 6.75 Gbps/Lane
DP_TEST_LINK_RATE_HBR3 = 0x1E,
DP_TEST_LINK_RATE_UHBR10 = 0x01,
DP_TEST_LINK_RATE_UHBR20 = 0x02,
@@ -917,16 +922,6 @@ struct dpcd_usb4_dp_tunneling_info {
uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN];
};
-#ifndef DP_DFP_CAPABILITY_EXTENSION_SUPPORT
-#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0A3
-#endif
-#ifndef DP_TEST_264BIT_CUSTOM_PATTERN_7_0
-#define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0X2230
-#endif
-#ifndef DP_TEST_264BIT_CUSTOM_PATTERN_263_256
-#define DP_TEST_264BIT_CUSTOM_PATTERN_263_256 0X2250
-#endif
-
union dp_main_line_channel_coding_cap {
struct {
uint8_t DP_8b_10b_SUPPORTED :1;
@@ -1232,8 +1227,7 @@ union replay_enable_and_configuration {
unsigned char FREESYNC_PANEL_REPLAY_MODE :1;
unsigned char TIMING_DESYNC_ERROR_VERIFICATION :1;
unsigned char STATE_TRANSITION_ERROR_DETECTION :1;
- unsigned char RESERVED0 :1;
- unsigned char RESERVED1 :4;
+ unsigned char RESERVED :5;
} bits;
unsigned char raw;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index aae2f3a2660d..2ad7f60805f5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -738,6 +738,13 @@ enum scanning_type {
SCANNING_TYPE_UNDEFINED
};
+enum chroma_cositing {
+ CHROMA_COSITING_NONE,
+ CHROMA_COSITING_LEFT,
+ CHROMA_COSITING_TOPLEFT,
+ CHROMA_COSITING_COUNT
+};
+
struct dc_crtc_timing_flags {
uint32_t INTERLACE :1;
uint32_t HSYNC_POSITIVE_POLARITY :1; /* when set to 1,
@@ -974,6 +981,7 @@ struct dc_crtc_timing_adjust {
uint32_t v_total_max;
uint32_t v_total_mid;
uint32_t v_total_mid_frame_num;
+ uint32_t allow_otg_v_count_halt;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h
index ef380cae816a..44afcd989224 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_plane.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h
@@ -29,7 +29,7 @@
#include "dc.h"
#include "dc_hw_types.h"
-struct dc_plane_state *dc_create_plane_state(struct dc *dc);
+struct dc_plane_state *dc_create_plane_state(const struct dc *dc);
const struct dc_plane_status *dc_plane_get_status(
const struct dc_plane_state *plane_state);
void dc_plane_state_retain(struct dc_plane_state *plane_state);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h b/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h
index 9ee184c1df00..ab13335f1d01 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h
@@ -30,5 +30,6 @@
void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state);
void dc_plane_destruct(struct dc_plane_state *plane_state);
+uint8_t dc_plane_get_pipe_mask(struct dc_state *dc_state, const struct dc_plane_state *plane_state);
#endif /* _DC_PLANE_PRIV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_state.h b/drivers/gpu/drm/amd/display/dc/dc_state.h
index d167fdbfa8a9..caa45db50232 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_state.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_state.h
@@ -29,7 +29,7 @@
#include "dc.h"
#include "inc/core_status.h"
-struct dc_state *dc_state_create(struct dc *dc);
+struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params);
void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state);
struct dc_state *dc_state_create_copy(struct dc_state *src_state);
void dc_state_copy_current(struct dc *dc, struct dc_state *dst_state);
@@ -39,12 +39,12 @@ void dc_state_destruct(struct dc_state *state);
void dc_state_retain(struct dc_state *state);
void dc_state_release(struct dc_state *state);
-enum dc_status dc_state_add_stream(struct dc *dc,
+enum dc_status dc_state_add_stream(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *stream);
enum dc_status dc_state_remove_stream(
- struct dc *dc,
+ const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *stream);
@@ -74,5 +74,5 @@ bool dc_state_add_all_planes_for_stream(
struct dc_stream_status *dc_state_get_stream_status(
struct dc_state *state,
- struct dc_stream_state *stream);
+ const struct dc_stream_state *stream);
#endif /* _DC_STATE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
index c1f44e09a6c1..615086d74d32 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
@@ -29,6 +29,8 @@
#include "dc_state.h"
#include "dc_stream.h"
+struct dc_stream_state *dc_state_get_stream_from_id(const struct dc_state *state, unsigned int id);
+
/* Get the type of the provided resource (none, phantom, main) based on the provided
* context. If the context is unavailable, determine only if phantom or not.
*/
@@ -45,7 +47,7 @@ struct dc_stream_state *dc_state_get_paired_subvp_stream(const struct dc_state *
struct dc_stream_state *dc_state_create_phantom_stream(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *main_stream);
-struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc,
+struct dc_plane_state *dc_state_create_phantom_plane(const struct dc *dc,
struct dc_state *state,
struct dc_plane_state *main_plane);
@@ -58,11 +60,11 @@ void dc_state_release_phantom_plane(const struct dc *dc,
struct dc_plane_state *phantom_plane);
/* add/remove phantom stream to context and generate subvp meta data */
-enum dc_status dc_state_add_phantom_stream(struct dc *dc,
+enum dc_status dc_state_add_phantom_stream(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *phantom_stream,
struct dc_stream_state *main_stream);
-enum dc_status dc_state_remove_phantom_stream(struct dc *dc,
+enum dc_status dc_state_remove_phantom_stream(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *phantom_stream);
@@ -92,11 +94,11 @@ bool dc_state_add_all_phantom_planes_for_stream(
struct dc_state *state);
bool dc_state_remove_phantom_streams_and_planes(
- struct dc *dc,
+ const struct dc *dc,
struct dc_state *state);
void dc_state_release_phantom_streams_and_planes(
- struct dc *dc,
+ const struct dc *dc,
struct dc_state *state);
#endif /* _DC_STATE_PRIV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index ee10941caa59..e5dbbc6089a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -190,7 +190,7 @@ struct dc_stream_state {
PHYSICAL_ADDRESS_LOC dmdata_address;
bool use_dynamic_meta;
- struct dc_transfer_func *out_transfer_func;
+ struct dc_transfer_func out_transfer_func;
struct colorspace_transform gamut_remap_matrix;
struct dc_csc_transform csc_color_matrix;
@@ -428,14 +428,6 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc,
enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream);
/*
- * Set up streams and links associated to drive sinks
- * The streams parameter is an absolute set of all active streams.
- *
- * After this call:
- * Phy, Encoder, Timing Generator are programmed and enabled.
- * New streams are enabled with blank stream; no memory read.
- */
-/*
* Enable stereo when commit_streams is not required,
* for example, frame alternate.
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index be2ac5c442a4..0f66d00ef80f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -422,7 +422,7 @@ struct dc_dwb_params {
enum dwb_capture_rate capture_rate; /* controls the frame capture rate */
struct scaling_taps scaler_taps; /* Scaling taps */
enum dwb_subsample_position subsample_position;
- struct dc_transfer_func *out_transfer_func;
+ const struct dc_transfer_func *out_transfer_func;
};
/* audio*/
@@ -1050,6 +1050,8 @@ union replay_error_status {
struct replay_config {
/* Replay feature is supported */
bool replay_supported;
+ /* Replay caps support DPCD & EDID caps*/
+ bool replay_cap_support;
/* Power opt flags that are supported */
unsigned int replay_power_opt_supported;
/* SMU optimization is supported */
@@ -1175,4 +1177,20 @@ enum mall_stream_type {
SUBVP_MAIN, // subvp in use, this stream is main stream
SUBVP_PHANTOM, // subvp in use, this stream is a phantom stream
};
+
+enum dc_power_source_type {
+ DC_POWER_SOURCE_AC, // wall power
+ DC_POWER_SOURCE_DC, // battery power
+};
+
+struct dc_state_create_params {
+ enum dc_power_source_type power_source;
+};
+
+struct dc_commit_streams_params {
+ struct dc_stream_state **streams;
+ uint8_t stream_count;
+ enum dc_power_source_type power_source;
+};
+
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 970644b695cd..b5e0289d2fe8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -976,7 +976,10 @@ static bool dcn31_program_pix_clk(
struct bp_pixel_clock_parameters bp_pc_params = {0};
enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
- if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
+ // Apply ssed(spread spectrum) dpref clock for edp only.
+ if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0
+ && pix_clk_params->signal_type == SIGNAL_TYPE_EDP
+ && encoding == DP_8b_10b_ENCODING)
dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
// For these signal types Driver to program DP_DTO without calling VBIOS Command table
if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) {
@@ -1093,9 +1096,6 @@ static bool get_pixel_clk_frequency_100hz(
unsigned int modulo_hz = 0;
unsigned int dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dprefclk_khz;
- if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
- dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
-
if (clock_source->id == CLOCK_SOURCE_ID_DP_DTO) {
clock_hz = REG_READ(PHASE[inst]);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index a2f48d46d199..ee601a6897a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -22,9 +22,6 @@
* Authors: AMD
*
*/
-
-#include <linux/delay.h>
-
#include "resource.h"
#include "dce_i2c.h"
#include "dce_i2c_hw.h"
@@ -315,9 +312,6 @@ static bool setup_engine(
/* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/
REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1);
- /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/
- REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1);
-
/*set SW requested I2c speed to default, if API calls in it will be override later*/
set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
index f98400efdd9b..e34e445a4013 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
@@ -181,6 +181,7 @@ struct dce_mem_input_registers {
SFB(blk, GRPH_ENABLE, GRPH_ENABLE, mask_sh),\
SFB(blk, GRPH_CONTROL, GRPH_DEPTH, mask_sh),\
SFB(blk, GRPH_CONTROL, GRPH_FORMAT, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_NUM_BANKS, mask_sh),\
SFB(blk, GRPH_X_START, GRPH_X_START, mask_sh),\
SFB(blk, GRPH_Y_START, GRPH_Y_START, mask_sh),\
SFB(blk, GRPH_X_END, GRPH_X_END, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
index bf1ffc3629c7..3d9be87aae45 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
@@ -111,6 +111,7 @@ enum dce110_opp_reg_type {
OPP_SF(FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh),\
OPP_SF(FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh),\
OPP_SF(FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_RESET, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_OFFSET, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_DEPTH, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index 670d5ab9d998..2b1673d69ea8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -1408,7 +1408,7 @@ void dce110_opp_set_csc_default(
static void program_pwl(struct dce_transform *xfm_dce,
const struct pwl_params *params)
{
- int retval;
+ uint32_t retval;
uint8_t max_tries = 10;
uint8_t counter = 0;
uint32_t i = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
index f9d6a181164a..b851fc65f5b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
@@ -34,11 +34,7 @@
#include "reg_helper.h"
#include "fixed31_32.h"
-#ifdef _WIN32
-#include "atombios.h"
-#else
#include "atom.h"
-#endif
#define TO_DMUB_ABM(abm)\
container_of(abm, struct dce_abm, base)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index b010814706fe..4f559a025cf0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -244,7 +244,7 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
uint16_t param = (uint16_t)(panel_inst << 8);
if (is_alpm)
- param |= REPLAY_RESIDENCY_MODE_ALPM;
+ param |= REPLAY_RESIDENCY_FIELD_MODE_ALPM;
if (is_start)
param |= REPLAY_RESIDENCY_ENABLE;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/Makefile b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
index f0777d61c2cb..c307f040e48f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = -Wno-override-init
DCE110 = dce110_timing_generator.o \
dce110_compressor.o dce110_opp_regamma_v.o \
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/Makefile b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
index 7e92effec894..683866797709 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = -Wno-override-init
DCE112 = dce112_compressor.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/Makefile b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
index 1e3ef68a452a..8f508e662748 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
@@ -24,7 +24,7 @@
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = -Wno-override-init
DCE120 = dce120_timing_generator.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
index fee331accc0e..eede83ad91fa 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = -Wno-override-init
DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \
dce60_resource.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/Makefile b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
index 7eefffbdc925..fba189d26652 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = -Wno-override-init
DCE80 = dce80_timing_generator.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index ae6a131be71b..8dc7938c36d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -24,9 +24,9 @@
DCN10 = dcn10_ipp.o \
dcn10_hw_sequencer_debug.o \
- dcn10_dpp.o dcn10_opp.o \
+ dcn10_opp.o \
dcn10_hubp.o dcn10_mpc.o \
- dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
+ dcn10_cm_common.o \
dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o
AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index b7e57aa27361..0b49362f71b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -24,7 +24,7 @@
*/
#include "dc.h"
#include "reg_helper.h"
-#include "dcn10_dpp.h"
+#include "dcn10/dcn10_dpp.h"
#include "dcn10_cm_common.h"
#include "custom_float.h"
@@ -402,6 +402,11 @@ bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx,
i += increment) {
if (j == hw_points - 1)
break;
+ if (i >= TRANSFER_FUNC_POINTS) {
+ DC_LOG_ERROR("Index out of bounds: i=%d, TRANSFER_FUNC_POINTS=%d\n",
+ i, TRANSFER_FUNC_POINTS);
+ return false;
+ }
rgb_resulted[j].red = output_tf->tf_pts.red[i];
rgb_resulted[j].green = output_tf->tf_pts.green[i];
rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index d51f1ce02874..6dd355a03033 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -130,7 +130,7 @@ bool hubbub1_verify_allow_pstate_change_high(
static unsigned int max_sampled_pstate_wait_us; /* data collection */
static bool forced_pstate_allow; /* help with revert wa */
- unsigned int debug_data;
+ unsigned int debug_data = 0;
unsigned int i;
if (forced_pstate_allow) {
@@ -242,7 +242,7 @@ void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
bool hubbub1_program_urgent_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -356,7 +356,7 @@ bool hubbub1_program_urgent_watermarks(
bool hubbub1_program_stutter_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -501,7 +501,7 @@ bool hubbub1_program_stutter_watermarks(
bool hubbub1_program_pstate_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -582,7 +582,7 @@ bool hubbub1_program_pstate_watermarks(
bool hubbub1_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index 4201b7627030..d1f9e63944c8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -409,7 +409,7 @@ struct dcn10_hubbub {
const struct dcn_hubbub_shift *shifts;
const struct dcn_hubbub_mask *masks;
unsigned int debug_test_index_pstate;
- struct dcn_watermark_set watermarks;
+ union dcn_watermark_set watermarks;
};
void hubbub1_update_dchub(
@@ -423,7 +423,7 @@ void hubbub1_wm_change_req_wa(struct hubbub *hubbub);
bool hubbub1_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
@@ -446,17 +446,17 @@ void hubbub1_construct(struct hubbub *hubbub,
bool hubbub1_program_urgent_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub1_program_stutter_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub1_program_pstate_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 09784222cc03..69119b2fdce2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -692,6 +692,7 @@ struct dcn_hubp_state {
uint32_t primary_meta_addr_hi;
uint32_t uclk_pstate_force;
uint32_t hubp_cntl;
+ uint32_t flip_control;
};
struct dcn10_hubp {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index 9033b39e0e0c..c51b717e5622 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -392,7 +392,7 @@ static unsigned int dcn10_get_mpcc_states(struct dc *dc, char *pBuf, unsigned in
remaining_buffer -= chars_printed;
pBuf += chars_printed;
- for (i = 0; i < pool->pipe_count; i++) {
+ for (i = 0; i < pool->mpcc_count; i++) {
struct mpcc_state s = {0};
pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index 377f1ba1a81b..4d0eed7598b2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -1439,7 +1439,6 @@ enum signal_type dcn10_get_dig_mode(
default:
return SIGNAL_TYPE_NONE;
}
- return SIGNAL_TYPE_NONE;
}
void dcn10_link_encoder_get_max_link_cap(struct link_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index d980e6bd6c66..b7a89c39f445 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -167,7 +167,6 @@ struct dcn10_link_enc_registers {
uint32_t DIO_LINKD_CNTL;
uint32_t DIO_LINKE_CNTL;
uint32_t DIO_LINKF_CNTL;
- uint32_t DIG_FIFO_CTRL0;
uint32_t DIO_CLK_CNTL;
uint32_t DIG_BE_CLK_CNTL;
};
@@ -475,9 +474,6 @@ struct dcn10_link_enc_registers {
type HPO_DP_ENC_SEL;\
type HPO_HDMI_ENC_SEL
-#define DCN32_LINK_ENCODER_REG_FIELD_LIST(type) \
- type DIG_FIFO_OUTPUT_PIXEL_MODE
-
#define DCN35_LINK_ENCODER_REG_FIELD_LIST(type) \
type DIG_BE_ENABLE;\
type DIG_RB_SWITCH_EN;\
@@ -512,7 +508,6 @@ struct dcn10_link_enc_shift {
DCN20_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
DCN30_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
DCN31_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
- DCN32_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
DCN35_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
};
@@ -521,7 +516,6 @@ struct dcn10_link_enc_mask {
DCN20_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
DCN30_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
DCN31_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
- DCN32_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
DCN35_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index 5838a11efd00..71e9288d60ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -168,6 +168,10 @@ static void opp1_set_pixel_encoding(
case PIXEL_ENCODING_RGB:
case PIXEL_ENCODING_YCBCR444:
+ REG_UPDATE_3(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 0,
+ FMT_SUBSAMPLING_MODE, 0,
+ FMT_CBCR_BIT_REDUCTION_BYPASS, 0);
REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 0);
break;
case PIXEL_ENCODING_YCBCR422:
@@ -177,7 +181,10 @@ static void opp1_set_pixel_encoding(
FMT_CBCR_BIT_REDUCTION_BYPASS, 0);
break;
case PIXEL_ENCODING_YCBCR420:
- REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 2);
+ REG_UPDATE_3(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 2,
+ FMT_SUBSAMPLING_MODE, 2,
+ FMT_CBCR_BIT_REDUCTION_BYPASS, 1);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
index 2c0ecfa5a643..c87de68a509e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
@@ -79,6 +79,8 @@
OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh), \
OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh), \
OPP_SF(FMT0_FMT_CONTROL, FMT_PIXEL_ENCODING, mask_sh), \
+ OPP_SF(FMT0_FMT_CONTROL, FMT_SUBSAMPLING_MODE, mask_sh), \
+ OPP_SF(FMT0_FMT_CONTROL, FMT_CBCR_BIT_REDUCTION_BYPASS, mask_sh), \
OPP_SF(FMT0_FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh), \
OPP_SF(FMT0_FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh), \
OPP_SF(FMT0_FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh), \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index c429590f1298..1b96972b9d0f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -127,7 +127,6 @@ struct dcn10_stream_enc_registers {
uint32_t AFMT_60958_1;
uint32_t AFMT_60958_2;
uint32_t DIG_FE_CNTL;
- uint32_t DIG_FE_CNTL2;
uint32_t DIG_FIFO_STATUS;
uint32_t DP_MSE_RATE_CNTL;
uint32_t DP_MSE_RATE_UPDATE;
@@ -570,7 +569,7 @@ struct dcn10_stream_enc_registers {
type DP_SEC_GSP11_ENABLE;\
type DP_SEC_GSP11_LINE_NUM
-#define SE_REG_FIELD_LIST_DCN3_2(type) \
+#define SE_REG_FIELD_LIST_DCN3_1_COMMON(type) \
type DIG_FIFO_OUTPUT_PIXEL_MODE;\
type DP_PIXEL_PER_CYCLE_PROCESSING_MODE;\
type DIG_SYMCLK_FE_ON;\
@@ -599,7 +598,7 @@ struct dcn10_stream_encoder_shift {
uint8_t HDMI_ACP_SEND;
SE_REG_FIELD_LIST_DCN2_0(uint8_t);
SE_REG_FIELD_LIST_DCN3_0(uint8_t);
- SE_REG_FIELD_LIST_DCN3_2(uint8_t);
+ SE_REG_FIELD_LIST_DCN3_1_COMMON(uint8_t);
SE_REG_FIELD_LIST_DCN3_5_COMMON(uint8_t);
};
@@ -608,7 +607,7 @@ struct dcn10_stream_encoder_mask {
uint32_t HDMI_ACP_SEND;
SE_REG_FIELD_LIST_DCN2_0(uint32_t);
SE_REG_FIELD_LIST_DCN3_0(uint32_t);
- SE_REG_FIELD_LIST_DCN3_2(uint32_t);
+ SE_REG_FIELD_LIST_DCN3_1_COMMON(uint32_t);
SE_REG_FIELD_LIST_DCN3_5_COMMON(uint32_t);
};
@@ -667,9 +666,6 @@ void enc1_stream_encoder_send_immediate_sdp_message(
void enc1_stream_encoder_stop_dp_info_packets(
struct stream_encoder *enc);
-void enc1_stream_encoder_reset_fifo(
- struct stream_encoder *enc);
-
void enc1_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
index 3dae3943b056..9b6070c99794 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -2,7 +2,7 @@
#
# Makefile for DCN.
-DCN20 = dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
+DCN20 = dcn20_hubp.o \
dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_mmhubbub.o \
dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \
dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c
index f8667be57046..80779e85e2c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c
@@ -299,6 +299,17 @@ void dwb2_set_scaler(struct dwbc *dwbc, struct dc_dwb_params *params)
}
}
+
+ if (dwbc20->dwbc_mask->WBSCL_COEF_RAM_SEL) {
+ /* Swap double buffered coefficient set */
+ uint32_t wbscl_mode = REG_READ(WBSCL_MODE);
+ bool coef_ram_current = get_reg_field_value_ex(
+ wbscl_mode, dwbc20->dwbc_mask->WBSCL_COEF_RAM_SEL_CURRENT,
+ dwbc20->dwbc_shift->WBSCL_COEF_RAM_SEL_CURRENT);
+
+ REG_UPDATE(WBSCL_MODE, WBSCL_COEF_RAM_SEL, !coef_ram_current);
+ }
+
}
static const struct dwbc_funcs dcn20_dwbc_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index 6eebcb22e317..c6f859871d11 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -570,7 +570,7 @@ void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub,
static bool hubbub2_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
index 2f6146bf1d32..24a9c45988ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
@@ -85,7 +85,7 @@ struct dcn20_hubbub {
const struct dcn_hubbub_shift *shifts;
const struct dcn_hubbub_mask *masks;
unsigned int debug_test_index_pstate;
- struct dcn_watermark_set watermarks;
+ union dcn_watermark_set watermarks;
int num_vmid;
struct dcn20_vmid vmid[16];
unsigned int detile_buf_size;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index 89c3bf0fe0c9..6bba020ad6fb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -1331,6 +1331,12 @@ void hubp2_read_state(struct hubp *hubp)
SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height,
PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear);
+ if (REG(DCHUBP_CNTL))
+ s->hubp_cntl = REG_READ(DCHUBP_CNTL);
+
+ if (REG(DCSURF_FLIP_CONTROL))
+ s->flip_control = REG_READ(DCSURF_FLIP_CONTROL);
+
}
static void hubp2_validate_dml_output(struct hubp *hubp,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
index efa2adf4f83d..8da3084d933f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
@@ -147,7 +147,7 @@
uint32_t DCN_CUR1_TTU_CNTL1;\
uint32_t VMID_SETTINGS_0
-
+/*shared with dcn3.x*/
#define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \
DCN2_HUBP_REG_COMMON_VARIABLE_LIST; \
uint32_t FLIP_PARAMETERS_3;\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
index b2b266953d18..c34e04cac9a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
@@ -147,7 +147,8 @@
LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_SWAP, mask_sh),\
LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT, mask_sh),\
LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_EN, mask_sh),\
- LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_RD_START_DELAY, mask_sh)
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_RD_START_DELAY, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_DEBUG_CONFIG, DPCS_DBG_CBUS_DIS, mask_sh)
#define DPCS_DCN2_MASK_SH_LIST(mask_sh)\
DPCS_MASK_SH_LIST(mask_sh),\
@@ -231,6 +232,8 @@
SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
SRI(DPCSTX_TX_CLOCK_CNTL, DPCSTX, id), \
SRI(DPCSTX_TX_CNTL, DPCSTX, id), \
+ SRI(DPCSTX_DEBUG_CONFIG, DPCSTX, id), \
+ SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \
SR(RDPCSTX0_RDPCSTX_SCRATCH)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 16b5ff208d14..ea73473b970a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -395,9 +395,12 @@ static void mpc20_program_ogam_pwl(
MPCC_OGAM_LUT_DATA, rgb[i].delta_green_reg);
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0,
MPCC_OGAM_LUT_DATA, rgb[i].delta_blue_reg);
-
}
+ REG_SEQ_SUBMIT();
+ PERF_TRACE();
+ REG_SEQ_WAIT_DONE();
+ PERF_TRACE();
}
static void apply_DEDCN20_305_wa(struct mpc *mpc, int mpcc_id,
@@ -501,11 +504,6 @@ void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id)
ASSERT(!mpc_disabled);
ASSERT(!mpc_idle);
}
-
- REG_SEQ_SUBMIT();
- PERF_TRACE();
- REG_SEQ_WAIT_DONE();
- PERF_TRACE();
}
static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile
index 2b0b4f32e13b..3880db59e457 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile
@@ -2,7 +2,7 @@
#
# Makefile for DCN.
DCN201 = dcn201_hubbub.o\
- dcn201_mpc.o dcn201_hubp.o dcn201_opp.o dcn201_dpp.o \
+ dcn201_mpc.o dcn201_hubp.o dcn201_opp.o \
dcn201_dccg.o dcn201_link_encoder.o
AMD_DAL_DCN201 = $(addprefix $(AMDDALPATH)/dc/dcn201/,$(DCN201))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c
index 037d265431c6..63798132ed95 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c
@@ -52,7 +52,7 @@
static bool hubbub201_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -103,5 +103,5 @@ void hubbub201_construct(struct dcn20_hubbub *hubbub,
hubbub->masks = hubbub_mask;
hubbub->debug_test_index_pstate = 0xB;
- hubbub->detile_buf_size = 164 * 1024;
+ hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c
index 35dd4bac242a..cd2bfcc51276 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c
@@ -77,6 +77,7 @@ static void hubp201_program_requestor(struct hubp *hubp,
MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode,
CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode);
+ /* no need to program PTE */
REG_SET_5(DCHUBP_REQ_SIZE_CONFIG, 0,
CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size,
MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size,
@@ -99,6 +100,10 @@ static void hubp201_setup(
struct _vcs_dpi_display_rq_regs_st *rq_regs,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
{
+ /*
+ * otg is locked when this func is called. Register are double buffered.
+ * disable the requestors is not needed
+ */
hubp2_vready_at_or_After_vsync(hubp, pipe_dest);
hubp201_program_requestor(hubp, rq_regs);
hubp201_program_deadline(hubp, dlg_attr, ttu_attr);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h
index 8b95ef251332..be25e8dc0636 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h
@@ -30,6 +30,10 @@
#define DPCS_DCN201_MASK_SH_LIST(mask_sh)\
DPCS_MASK_SH_LIST(mask_sh),\
+ LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2, VCO_LD_VAL_OVRD, mask_sh),\
+ LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2, VCO_LD_VAL_OVRD_EN, mask_sh),\
+ LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3, REF_LD_VAL_OVRD, mask_sh),\
+ LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3, REF_LD_VAL_OVRD_EN, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DP4, mask_sh),\
@@ -44,7 +48,15 @@
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_REF_CLK_EN, mask_sh)
#define DPCS_DCN201_REG_LIST(id) \
- DPCS_DCN2_CMN_REG_LIST(id)
+ DPCS_DCN2_CMN_REG_LIST(id), \
+ SRI_IX(RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \
+ SRI_IX(RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id), \
+ SRI_IX(RAWLANE1_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \
+ SRI_IX(RAWLANE1_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id), \
+ SRI_IX(RAWLANE2_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \
+ SRI_IX(RAWLANE2_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id), \
+ SRI_IX(RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \
+ SRI_IX(RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id)
void dcn201_link_encoder_construct(
struct dcn20_link_encoder *enc20,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index aeb0e0d9b70a..2546224b326a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -140,7 +140,7 @@ int hubbub21_init_dchub(struct hubbub *hubbub,
bool hubbub21_program_urgent_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -334,7 +334,7 @@ bool hubbub21_program_urgent_watermarks(
bool hubbub21_program_stutter_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -487,7 +487,7 @@ bool hubbub21_program_stutter_watermarks(
bool hubbub21_program_pstate_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -573,7 +573,7 @@ bool hubbub21_program_pstate_watermarks(
bool hubbub21_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
index d8eb2bb7282c..ab2ce0313529 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
@@ -127,22 +127,22 @@ int hubbub21_init_dchub(struct hubbub *hubbub,
struct dcn_hubbub_phys_addr_config *pa_config);
bool hubbub21_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub21_program_urgent_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub21_program_stutter_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub21_program_pstate_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
index b5b2aa3b3783..c6ca70f3c061 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
@@ -25,13 +25,11 @@
DCN30 := dcn30_hubbub.o \
dcn30_hubp.o \
- dcn30_dpp.o \
dcn30_dccg.o \
dcn30_mpc.o dcn30_vpg.o \
dcn30_afmt.o \
dcn30_dio_stream_encoder.o \
dcn30_dwb.o \
- dcn30_dpp_cm.o \
dcn30_dwb_cm.o \
dcn30_cm_common.o \
dcn30_mmhubbub.o \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
index ddb344056d40..b8327237ed44 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
@@ -26,7 +26,7 @@
#include "dm_services.h"
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn30_dpp.h"
+#include "dcn30/dcn30_dpp.h"
#include "basics/conversion.h"
#include "dcn30_cm_common.h"
#include "custom_float.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h
index 35a613bb08bf..3f1da7f3a91c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h
@@ -29,15 +29,9 @@
#include "dcn20/dcn20_dccg.h"
-#define DCCG_REG_LIST_DCN3AG() \
- DCCG_COMMON_REG_LIST_DCN_BASE(),\
- SR(PHYASYMCLK_CLOCK_CNTL),\
- SR(PHYBSYMCLK_CLOCK_CNTL),\
- SR(PHYCSYMCLK_CLOCK_CNTL)
-
-
#define DCCG_REG_LIST_DCN30() \
DCCG_REG_LIST_DCN2(),\
+ DCCG_SRII(CLOCK_CNTL, HDMICHARCLK, 0),\
DCCG_SRII(PIXEL_RATE_CNTL, OTG, 2),\
DCCG_SRII(PIXEL_RATE_CNTL, OTG, 3),\
DCCG_SRII(PIXEL_RATE_CNTL, OTG, 4),\
@@ -46,19 +40,10 @@
SR(PHYBSYMCLK_CLOCK_CNTL),\
SR(PHYCSYMCLK_CLOCK_CNTL)
-#define DCCG_MASK_SH_LIST_DCN3AG(mask_sh) \
- DCCG_MASK_SH_LIST_DCN2_1(mask_sh),\
- DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_EN, mask_sh),\
- DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_SRC_SEL, mask_sh),\
- DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\
- DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\
- DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\
- DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_SRC_SEL, mask_sh),\
- DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, mask_sh),\
- DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_SRC_SEL, mask_sh)
-
#define DCCG_MASK_SH_LIST_DCN3(mask_sh) \
DCCG_MASK_SH_LIST_DCN2(mask_sh),\
+ DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_EN, mask_sh),\
+ DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_SRC_SEL, mask_sh),\
DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\
DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\
DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
index 1fb8fd7afc95..b8e31b5ea114 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
@@ -30,8 +30,6 @@
#include "dcn30_dio_link_encoder.h"
#include "stream_encoder.h"
#include "dc_bios_types.h"
-/* #include "dcn3ag/dcn3ag_phy_fw.h" */
-
#include "gpio_service_interface.h"
#define CTX \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h
index f2d90f2b8bf1..5b6177c2ae98 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h
@@ -55,7 +55,8 @@
SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id)
#define LINK_ENCODER_MASK_SH_LIST_DCN30(mask_sh) \
- LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh)
+ LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh),\
+ LE_SF(DIG0_TMDS_DCBALANCER_CONTROL, TMDS_SYNC_DCBAL_EN, mask_sh)
#define DPCS_DCN3_MASK_SH_LIST(mask_sh)\
DPCS_DCN2_MASK_SH_LIST(mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
index 005dbe099a7a..425b830b88d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
@@ -29,9 +29,6 @@
#include "reg_helper.h"
#include "hw_shared.h"
#include "dc.h"
-#include "core_types.h"
-#include <linux/delay.h>
-
#define DC_LOGGER \
enc1->base.ctx->logger
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
index 1b9d9495f76d..fae98cf52020 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
@@ -251,9 +251,7 @@ static const struct dwbc_funcs dcn30_dwbc_funcs = {
.set_fc_enable = dwb3_set_fc_enable,
.set_stereo = dwb3_set_stereo,
.set_new_content = dwb3_set_new_content,
- .dwb_program_output_csc = NULL,
.dwb_ogam_set_input_transfer_func = dwb3_ogam_set_input_transfer_func, //TODO: rename
- .dwb_set_scaler = NULL,
};
void dcn30_dwbc_construct(struct dcn30_dwbc *dwbc30,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
index 332634b76aac..0f3f7c5fbaec 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
@@ -217,6 +217,7 @@
SF_DWB2(DWB_OGAM_LUT_DATA, DWBCP, 0, DWB_OGAM_LUT_DATA, mask_sh),\
SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_WRITE_COLOR_MASK, mask_sh),\
SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_READ_COLOR_SEL, mask_sh),\
+ SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_READ_DBG, mask_sh),\
SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_HOST_SEL, mask_sh),\
SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_CONFIG_MODE, mask_sh),\
SF_DWB2(DWB_OGAM_RAMA_START_CNTL_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_B, mask_sh),\
@@ -524,6 +525,7 @@
type DWB_OGAM_LUT_DATA;\
type DWB_OGAM_LUT_WRITE_COLOR_MASK;\
type DWB_OGAM_LUT_READ_COLOR_SEL;\
+ type DWB_OGAM_LUT_READ_DBG;\
type DWB_OGAM_LUT_HOST_SEL;\
type DWB_OGAM_LUT_CONFIG_MODE;\
type DWB_OGAM_LUT_STATUS;\
@@ -710,7 +712,7 @@
type DWB_OGAM_RAMB_EXP_REGION32_LUT_OFFSET;\
type DWB_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS;\
type DWB_OGAM_RAMB_EXP_REGION33_LUT_OFFSET;\
- type DWB_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS;
+ type DWB_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS
struct dcn30_dwbc_registers {
/* DCN3AG */
@@ -733,6 +735,10 @@ struct dcn30_dwbc_registers {
uint32_t DWB_MMHUBBUB_BACKPRESSURE_CNT;
uint32_t DWB_HOST_READ_CONTROL;
uint32_t DWB_SOFT_RESET;
+ uint32_t DWB_DEBUG_CTRL;
+ uint32_t DWB_DEBUG;
+ uint32_t DWB_TEST_DEBUG_INDEX;
+ uint32_t DWB_TEST_DEBUG_DATA;
/* DWBSCL */
uint32_t DWBSCL_COEF_RAM_TAP_SELECT;
@@ -747,6 +753,9 @@ struct dcn30_dwbc_registers {
uint32_t DWBSCL_DEST_SIZE;
uint32_t DWBSCL_OVERFLOW_STATUS;
uint32_t DWBSCL_OVERFLOW_COUNTER;
+ uint32_t DWBSCL_DEBUG;
+ uint32_t DWBSCL_TEST_DEBUG_INDEX;
+ uint32_t DWBSCL_TEST_DEBUG_DATA;
/* DWBCP */
uint32_t DWB_HDR_MULT_COEF;
@@ -838,6 +847,9 @@ struct dcn30_dwbc_registers {
uint32_t DWB_OGAM_RAMB_REGION_28_29;
uint32_t DWB_OGAM_RAMB_REGION_30_31;
uint32_t DWB_OGAM_RAMB_REGION_32_33;
+ uint32_t DWBCP_DEBUG;
+ uint32_t DWBCP_TEST_DEBUG_INDEX;
+ uint32_t DWBCP_TEST_DEBUG_DATA;
};
/* Internal enums / structs */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c
index 152c9c5733f1..6a5af3da4b45 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c
@@ -95,7 +95,7 @@ int hubbub3_init_dchub_sys_ctx(struct hubbub *hubbub,
bool hubbub3_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h
index 7b597908b937..ca6233e8f1f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h
@@ -124,7 +124,7 @@ bool hubbub3_get_dcc_compression_cap(struct hubbub *hubbub,
bool hubbub3_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
index 75547ce86c09..60a64d290352 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
@@ -455,6 +455,9 @@ void hubp3_read_state(struct hubp *hubp)
if (REG(DCHUBP_CNTL))
s->hubp_cntl = REG_READ(DCHUBP_CNTL);
+ if (REG(DCSURF_FLIP_CONTROL))
+ s->flip_control = REG_READ(DCSURF_FLIP_CONTROL);
+
}
void hubp3_setup(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
index bf3386cd444d..fca94e50ae93 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
@@ -44,6 +44,36 @@
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+void mpc3_mpc_init(struct mpc *mpc)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+ int opp_id;
+
+ mpc1_mpc_init(mpc);
+
+ for (opp_id = 0; opp_id < MAX_OPP; opp_id++) {
+ if (REG(MUX[opp_id]))
+ /* disable mpc out rate and flow control */
+ REG_UPDATE_2(MUX[opp_id], MPC_OUT_RATE_CONTROL_DISABLE,
+ 1, MPC_OUT_FLOW_CONTROL_COUNT, 0);
+ }
+}
+
+void mpc3_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ mpc1_mpc_init_single_inst(mpc, mpcc_id);
+
+ /* assuming mpc out mux is connected to opp with the same index at this
+ * point in time (e.g. transitioning from vbios to driver)
+ */
+ if (mpcc_id < MAX_OPP && REG(MUX[mpcc_id]))
+ /* disable mpc out rate and flow control */
+ REG_UPDATE_2(MUX[mpcc_id], MPC_OUT_RATE_CONTROL_DISABLE,
+ 1, MPC_OUT_FLOW_CONTROL_COUNT, 0);
+}
+
bool mpc3_is_dwb_idle(
struct mpc *mpc,
int dwb_id)
@@ -80,25 +110,6 @@ void mpc3_disable_dwb_mux(
MPC_DWB0_MUX, 0xf);
}
-void mpc3_set_out_rate_control(
- struct mpc *mpc,
- int opp_id,
- bool enable,
- bool rate_2x_mode,
- struct mpc_dwb_flow_control *flow_control)
-{
- struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
-
- REG_UPDATE_2(MUX[opp_id],
- MPC_OUT_RATE_CONTROL_DISABLE, !enable,
- MPC_OUT_RATE_CONTROL, rate_2x_mode);
-
- if (flow_control)
- REG_UPDATE_2(MUX[opp_id],
- MPC_OUT_FLOW_CONTROL_MODE, flow_control->flow_ctrl_mode,
- MPC_OUT_FLOW_CONTROL_COUNT, flow_control->flow_ctrl_cnt1);
-}
-
enum dc_lut_mode mpc3_get_ogam_current(struct mpc *mpc, int mpcc_id)
{
/*Contrary to DCN2 and DCN1 wherein a single status register field holds this info;
@@ -1172,7 +1183,7 @@ void mpc3_get_gamut_remap(struct mpc *mpc,
struct mpc_grph_gamut_adjustment *adjust)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
- uint16_t arr_reg_val[12];
+ uint16_t arr_reg_val[12] = {0};
int select;
read_gamut_remap(mpc30, mpcc_id, arr_reg_val, &select);
@@ -1490,8 +1501,8 @@ static const struct mpc_funcs dcn30_mpc_funcs = {
.read_mpcc_state = mpc3_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
- .mpc_init = mpc1_mpc_init,
- .mpc_init_single_inst = mpc1_mpc_init_single_inst,
+ .mpc_init = mpc3_mpc_init,
+ .mpc_init_single_inst = mpc3_mpc_init_single_inst,
.update_blending = mpc2_update_blending,
.cursor_lock = mpc1_cursor_lock,
.get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,
@@ -1508,7 +1519,6 @@ static const struct mpc_funcs dcn30_mpc_funcs = {
.set_dwb_mux = mpc3_set_dwb_mux,
.disable_dwb_mux = mpc3_disable_dwb_mux,
.is_dwb_idle = mpc3_is_dwb_idle,
- .set_out_rate_control = mpc3_set_out_rate_control,
.set_gamut_remap = mpc3_set_gamut_remap,
.program_shaper = mpc3_program_shaper,
.acquire_rmu = mpcc3_acquire_rmu,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
index 9cb96ae95a2f..ce93003dae01 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
@@ -1007,6 +1007,13 @@ void dcn30_mpc_construct(struct dcn30_mpc *mpc30,
int num_mpcc,
int num_rmu);
+void mpc3_mpc_init(
+ struct mpc *mpc);
+
+void mpc3_mpc_init_single_inst(
+ struct mpc *mpc,
+ unsigned int mpcc_id);
+
bool mpc3_program_shaper(
struct mpc *mpc,
const struct pwl_params *params,
@@ -1078,13 +1085,6 @@ bool mpc3_is_dwb_idle(
struct mpc *mpc,
int dwb_id);
-void mpc3_set_out_rate_control(
- struct mpc *mpc,
- int opp_id,
- bool enable,
- bool rate_2x_mode,
- struct mpc_dwb_flow_control *flow_control);
-
void mpc3_power_on_ogam_lut(
struct mpc *mpc, int mpcc_id,
bool power_on);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h
index ed9a5549c389..466ba20b9c61 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h
@@ -26,6 +26,7 @@
#ifndef __DAL_DCN30_VPG_H__
#define __DAL_DCN30_VPG_H__
+#include "vpg.h"
#define DCN30_VPG_FROM_VPG(vpg)\
container_of(vpg, struct dcn30_vpg, base)
@@ -132,28 +133,6 @@ struct dcn30_vpg_mask {
VPG_DCN3_REG_FIELD_LIST(uint32_t);
};
-struct vpg;
-
-struct vpg_funcs {
- void (*update_generic_info_packet)(
- struct vpg *vpg,
- uint32_t packet_index,
- const struct dc_info_packet *info_packet,
- bool immediate_update);
-
- void (*vpg_poweron)(
- struct vpg *vpg);
-
- void (*vpg_powerdown)(
- struct vpg *vpg);
-};
-
-struct vpg {
- const struct vpg_funcs *funcs;
- struct dc_context *ctx;
- int inst;
-};
-
struct dcn30_vpg {
struct vpg base;
const struct dcn30_vpg_registers *regs;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.h
index 73db962dbc03..067e49cb238e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.h
@@ -56,10 +56,4 @@ struct dccg *dccg301_create(
const struct dccg_shift *dccg_shift,
const struct dccg_mask *dccg_mask);
-struct dccg *dccg301_create(
- struct dc_context *ctx,
- const struct dccg_registers *regs,
- const struct dccg_shift *dccg_shift,
- const struct dccg_mask *dccg_mask);
-
#endif //__DCN301_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c
index a046664e2031..c1959672df50 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c
@@ -63,6 +63,7 @@ static const struct hubbub_funcs hubbub301_funcs = {
.verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high,
.force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes,
.force_pstate_change_control = hubbub3_force_pstate_change_control,
+ .init_watermarks = hubbub3_init_watermarks,
.hubbub_read_state = hubbub2_read_state,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
index e3caaacf7493..e3be0bab4007 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
@@ -34,12 +34,14 @@
DCCG_SRII(DTO_PARAM, DPPCLK, 1),\
DCCG_SRII(DTO_PARAM, DPPCLK, 2),\
DCCG_SRII(DTO_PARAM, DPPCLK, 3),\
+ DCCG_SRII(CLOCK_CNTL, HDMICHARCLK, 0),\
SR(PHYASYMCLK_CLOCK_CNTL),\
SR(PHYBSYMCLK_CLOCK_CNTL),\
SR(PHYCSYMCLK_CLOCK_CNTL),\
SR(PHYDSYMCLK_CLOCK_CNTL),\
SR(PHYESYMCLK_CLOCK_CNTL),\
SR(DPSTREAMCLK_CNTL),\
+ SR(HDMISTREAMCLK_CNTL),\
SR(SYMCLK32_SE_CNTL),\
SR(SYMCLK32_LE_CNTL),\
DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0),\
@@ -78,6 +80,8 @@
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 3, mask_sh),\
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_PHASE, mask_sh),\
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_MODULO, mask_sh),\
+ DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_EN, mask_sh),\
+ DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_SRC_SEL, mask_sh),\
DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\
DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\
DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\
@@ -92,6 +96,8 @@
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE1_EN, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE2_EN, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE3_EN, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_SRC_SEL, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_DTO_FORCE_DIS, mask_sh),\
DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE0_SRC_SEL, mask_sh),\
DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE1_SRC_SEL, mask_sh),\
DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE2_SRC_SEL, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index 26be5fee7411..b2cea59ba5d4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -205,7 +205,7 @@ void dcn31_link_encoder_set_dio_phy_mux(
}
}
-static void enc31_hw_init(struct link_encoder *enc)
+void enc31_hw_init(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h
index 221671563a0b..ee78ba80797c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h
@@ -89,6 +89,7 @@
SRI(RDPCSTX_PHY_FUSE1, RDPCSTX, id), \
SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \
SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
+ SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \
SR(RDPCSTX0_RDPCSTX_SCRATCH), \
SRI(RDPCSTX_PHY_RX_LD_VAL, RDPCSTX, id),\
SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id)
@@ -222,6 +223,7 @@
SRI(RDPCSTX_PHY_FUSE1, RDPCSTX, id), \
SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \
SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
+ SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \
SR(RDPCSTX0_RDPCSTX_SCRATCH), \
SRI(RDPCSTX_PHY_RX_LD_VAL, RDPCSTX, id),\
SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id)
@@ -283,4 +285,6 @@ bool dcn31_link_encoder_is_in_alt_mode(
void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc,
struct dc_link_settings *link_settings);
+void enc31_hw_init(struct link_encoder *enc);
+
#endif /* __DC_LINK_ENCODER__DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
index 5b7ad38f85e0..03b4ac2f1991 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
@@ -377,7 +377,7 @@ void dcn31_hpo_dp_link_enc_update_stream_allocation_table(
*/
REG_WAIT(DP_DPHY_SYM32_STATUS,
SAT_UPDATE_PENDING, 0,
- 10, DP_SAT_UPDATE_MAX_RETRY);
+ 100, DP_SAT_UPDATE_MAX_RETRY);
}
void dcn31_hpo_dp_link_enc_set_throttled_vcp_size(
@@ -395,6 +395,12 @@ void dcn31_hpo_dp_link_enc_set_throttled_vcp_size(
x),
25));
+ // If y rounds up to integer, carry it over to x.
+ if (y >> 25) {
+ x += 1;
+ y = 0;
+ }
+
switch (stream_encoder_inst) {
case 0:
REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL0, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
index 45143459eedd..678db949cfe3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -474,6 +474,10 @@ static void dcn31_hpo_dp_stream_enc_update_dp_info_packets(
&info_frame->hdrsmd,
true);
+ /* packetIndex 4 is used for send immediate sdp message, and please
+ * use other packetIndex (such as 5,6) for other info packet
+ */
+
if (info_frame->adaptive_sync.valid)
enc->vpg->funcs->update_generic_info_packet(
enc->vpg,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
index 5b5b5e0775fa..b906db6e7355 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
@@ -172,7 +172,7 @@ static uint32_t convert_and_clamp(
static bool hubbub31_program_urgent_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -362,7 +362,7 @@ static bool hubbub31_program_urgent_watermarks(
static bool hubbub31_program_stutter_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -635,7 +635,7 @@ static bool hubbub31_program_stutter_watermarks(
static bool hubbub31_program_pstate_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -717,7 +717,7 @@ static bool hubbub31_program_pstate_watermarks(
static bool hubbub31_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
index 281be20b1a10..20c6fe48567f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
@@ -173,5 +173,12 @@ void dcn31_panel_cntl_construct(
break;
}
- dcn31_panel_cntl->base.pwrseq_inst = pwrseq_inst;
+ if (dcn31_panel_cntl->base.ctx->dc->config.support_edp0_on_dp1)
+ //If supported, power sequencer mapping shall follow the DIG instance
+ dcn31_panel_cntl->base.pwrseq_inst = pwrseq_inst;
+ else
+ /* If not supported, pwrseq will be assigned in order,
+ * so first pwrseq will be assigned to first panel instance (legacy behavior)
+ */
+ dcn31_panel_cntl->base.pwrseq_inst = dcn31_panel_cntl->base.inst;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c
index f1deb1c3c363..cfb923d85630 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c
@@ -63,7 +63,12 @@ void vpg31_poweron(struct vpg *vpg)
{
struct dcn31_vpg *vpg31 = DCN31_VPG_FROM_VPG(vpg);
- if (vpg->ctx->dc->debug.enable_mem_low_power.bits.vpg == false)
+ uint32_t vpg_gsp_mem_pwr_state;
+
+ REG_GET(VPG_MEM_PWR, VPG_GSP_MEM_PWR_STATE, &vpg_gsp_mem_pwr_state);
+
+ if (vpg->ctx->dc->debug.enable_mem_low_power.bits.vpg == false &&
+ vpg_gsp_mem_pwr_state == 0)
return;
REG_UPDATE_2(VPG_MEM_PWR, VPG_GSP_MEM_LIGHT_SLEEP_DIS, 1, VPG_GSP_LIGHT_SLEEP_FORCE, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h
index 0e76eabce441..609e58dbd056 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h
@@ -26,6 +26,7 @@
#ifndef __DAL_DCN31_VPG_H__
#define __DAL_DCN31_VPG_H__
+#include "vpg.h"
#define DCN31_VPG_FROM_VPG(vpg)\
container_of(vpg, struct dcn31_vpg, base)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile b/drivers/gpu/drm/amd/display/dc/dcn32/Makefile
index 5314770fff1c..a58c37165f5a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/Makefile
@@ -11,7 +11,7 @@
# Makefile for dcn32.
DCN32 = dcn32_hubbub.o dcn32_dccg.o \
- dcn32_mmhubbub.o dcn32_dpp.o dcn32_hubp.o dcn32_mpc.o \
+ dcn32_mmhubbub.o dcn32_hubp.o dcn32_mpc.o \
dcn32_dio_stream_encoder.o dcn32_dio_link_encoder.o dcn32_resource_helpers.o \
dcn32_hpo_dp_link_encoder.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
index e224a028d68a..d9ff95cd2dbd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
@@ -251,8 +251,6 @@ void dcn32_link_encoder_construct(
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
enc10->base.features = *enc_features;
- if (enc10->base.connector.id == CONNECTOR_ID_USBC)
- enc10->base.features.flags.bits.DP_IS_USB_C = 1;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
index 2d5f25290ed1..35d23d9db45e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
@@ -26,15 +26,7 @@
#ifndef __DC_LINK_ENCODER__DCN32_H__
#define __DC_LINK_ENCODER__DCN32_H__
-#include "dcn31/dcn31_dio_link_encoder.h"
-
-#define LE_DCN32_REG_LIST(id)\
- LE_DCN31_REG_LIST(id),\
- SRI(DIG_FIFO_CTRL0, DIG, id)
-
-#define LINK_ENCODER_MASK_SH_LIST_DCN32(mask_sh) \
- LINK_ENCODER_MASK_SH_LIST_DCN31(mask_sh),\
- LE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, mask_sh)
+#include "dcn30/dcn30_dio_link_encoder.h"
void dcn32_link_encoder_construct(
struct dcn20_link_encoder *enc20,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
index 1be5410cce97..ca53d39561d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
@@ -177,11 +177,12 @@
SE_SF(DIG0_DIG_FE_CNTL, DIG_SYMCLK_FE_ON, mask_sh),\
SE_SF(DP0_DP_SEC_FRAMING4, DP_SST_SDP_SPLITTING, mask_sh),\
SE_SF(DIG0_DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, mask_sh),\
+ SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, mask_sh),\
SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, mask_sh),\
SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, mask_sh),\
SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET, mask_sh),\
- SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, mask_sh),\
- SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, mask_sh)
+ SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, mask_sh)
+
void dcn32_dio_stream_encoder_construct(
struct dcn10_stream_encoder *enc1,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
index 88dfc907553d..515c4c2b4c21 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
@@ -167,7 +167,7 @@ static uint32_t convert_and_clamp(
bool hubbub32_program_urgent_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -357,7 +357,7 @@ bool hubbub32_program_urgent_watermarks(
bool hubbub32_program_stutter_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -503,7 +503,7 @@ bool hubbub32_program_stutter_watermarks(
bool hubbub32_program_pstate_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -656,7 +656,7 @@ bool hubbub32_program_pstate_watermarks(
bool hubbub32_program_usr_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -750,7 +750,7 @@ void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow)
static bool hubbub32_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
index f073839a4b6d..e439ba0fa30f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
@@ -118,25 +118,25 @@
bool hubbub32_program_urgent_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub32_program_stutter_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub32_program_pstate_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub32_program_usr_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
index e789e654c387..e408e859b355 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
@@ -47,7 +47,7 @@ void mpc32_mpc_init(struct mpc *mpc)
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
int mpcc_id;
- mpc1_mpc_init(mpc);
+ mpc3_mpc_init(mpc);
if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) {
if (mpc30->mpc_mask->MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE && mpc30->mpc_mask->MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE) {
@@ -991,7 +991,7 @@ static const struct mpc_funcs dcn32_mpc_funcs = {
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
.mpc_init = mpc32_mpc_init,
- .mpc_init_single_inst = mpc1_mpc_init_single_inst,
+ .mpc_init_single_inst = mpc3_mpc_init_single_inst,
.update_blending = mpc2_update_blending,
.cursor_lock = mpc1_cursor_lock,
.get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,
@@ -1008,7 +1008,6 @@ static const struct mpc_funcs dcn32_mpc_funcs = {
.set_dwb_mux = mpc3_set_dwb_mux,
.disable_dwb_mux = mpc3_disable_dwb_mux,
.is_dwb_idle = mpc3_is_dwb_idle,
- .set_out_rate_control = mpc3_set_out_rate_control,
.set_gamut_remap = mpc3_set_gamut_remap,
.program_shaper = mpc32_program_shaper,
.program_3dlut = mpc32_program_3dlut,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index f98def6c8c2d..fbcd6f7bc993 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -35,25 +35,6 @@ static bool is_dual_plane(enum surface_pixel_format format)
return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
}
-
-uint32_t dcn32_helper_mall_bytes_to_ways(
- struct dc *dc,
- uint32_t total_size_in_mall_bytes)
-{
- uint32_t cache_lines_used, lines_per_way, total_cache_lines, num_ways;
-
- /* add 2 lines for worst case alignment */
- cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2;
-
- total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size;
- lines_per_way = total_cache_lines / dc->caps.cache_num_ways;
- num_ways = cache_lines_used / lines_per_way;
- if (cache_lines_used % lines_per_way > 0)
- num_ways++;
-
- return num_ways;
-}
-
uint32_t dcn32_helper_calculate_mall_bytes_for_cursor(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
@@ -112,8 +93,10 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(
if (context->bw_ctx.bw.dcn.mall_subvp_size_bytes > 0) {
if (dc->debug.force_subvp_num_ways) {
return dc->debug.force_subvp_num_ways;
+ } else if (dc->res_pool->funcs->calculate_mall_ways_from_bytes) {
+ return dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes);
} else {
- return dcn32_helper_mall_bytes_to_ways(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes);
+ return 0;
}
} else {
return 0;
@@ -399,7 +382,7 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
+ struct pipe_ctx *pipe = 0;
bool disable_unbounded_requesting = dc->debug.disable_z9_mpc || dc->debug.disable_unbounded_requesting;
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
index 13be5f06d987..05783daa62ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
@@ -127,11 +127,6 @@ void dcn321_link_encoder_construct(
* while doing the DP sink detect
*/
-/* if (dal_adapter_service_is_feature_supported(as,
- FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
- enc10->base.features.flags.bits.
- DP_SINK_DETECT_POLL_DATA_PIN = true;*/
-
enc10->base.output_signals =
SIGNAL_TYPE_DVI_SINGLE_LINK |
SIGNAL_TYPE_DVI_DUAL_LINK |
@@ -191,7 +186,6 @@ void dcn321_link_encoder_construct(
__func__,
result);
}
- if (enc10->base.ctx->dc->debug.hdmi20_disable) {
+ if (enc10->base.ctx->dc->debug.hdmi20_disable)
enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
- }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile b/drivers/gpu/drm/amd/display/dc/dcn35/Makefile
index 0e317e0c36a0..d5b4533d2f62 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/Makefile
@@ -13,7 +13,7 @@
DCN35 = dcn35_dio_stream_encoder.o \
dcn35_dio_link_encoder.o dcn35_dccg.o \
dcn35_hubp.o dcn35_hubbub.o \
- dcn35_mmhubbub.o dcn35_opp.o dcn35_dpp.o dcn35_pg_cntl.o dcn35_dwb.o
+ dcn35_mmhubbub.o dcn35_opp.o dcn35_pg_cntl.o dcn35_dwb.o
AMD_DAL_DCN35 = $(addprefix $(AMDDALPATH)/dc/dcn35/,$(DCN35))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c
index f1ba7bb792ea..58dd3c5bbff0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c
@@ -49,15 +49,23 @@ static void dcn35_set_dppclk_enable(struct dccg *dccg,
switch (dpp_inst) {
case 0:
REG_UPDATE(DPPCLK_CTRL, DPPCLK0_EN, enable);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable);
break;
case 1:
REG_UPDATE(DPPCLK_CTRL, DPPCLK1_EN, enable);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable);
break;
case 2:
REG_UPDATE(DPPCLK_CTRL, DPPCLK2_EN, enable);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable);
break;
case 3:
REG_UPDATE(DPPCLK_CTRL, DPPCLK3_EN, enable);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable);
break;
default:
break;
@@ -100,6 +108,32 @@ static void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst,
dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
}
+static void dccg35_set_dppclk_root_clock_gating(struct dccg *dccg,
+ uint32_t dpp_inst, uint32_t enable)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
+ return;
+
+ switch (dpp_inst) {
+ case 0:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable);
+ break;
+ case 1:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable);
+ break;
+ case 2:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable);
+ break;
+ case 3:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable);
+ break;
+ default:
+ break;
+ }
+}
+
static void dccg35_get_pixel_rate_div(
struct dccg *dccg,
uint32_t otg_inst,
@@ -333,21 +367,67 @@ static void dccg35_set_dpstreamclk(
/* enabled to select one of the DTBCLKs for pipe */
switch (dp_hpo_inst) {
case 0:
- REG_UPDATE_2(DPSTREAMCLK_CNTL,
- DPSTREAMCLK0_EN,
+ REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK0_EN,
(src == REFCLK) ? 0 : 1, DPSTREAMCLK0_SRC_SEL, otg_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_ROOT_GATE_DISABLE, (src == REFCLK) ? 0 : 1);
break;
case 1:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK1_EN,
(src == REFCLK) ? 0 : 1, DPSTREAMCLK1_SRC_SEL, otg_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_ROOT_GATE_DISABLE, (src == REFCLK) ? 0 : 1);
break;
case 2:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK2_EN,
(src == REFCLK) ? 0 : 1, DPSTREAMCLK2_SRC_SEL, otg_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_ROOT_GATE_DISABLE, (src == REFCLK) ? 0 : 1);
break;
case 3:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK3_EN,
(src == REFCLK) ? 0 : 1, DPSTREAMCLK3_SRC_SEL, otg_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_ROOT_GATE_DISABLE, (src == REFCLK) ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+
+static void dccg35_set_dpstreamclk_root_clock_gating(
+ struct dccg *dccg,
+ int dp_hpo_inst,
+ bool enable)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ switch (dp_hpo_inst) {
+ case 0:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, enable ? 1 : 0);
+ }
+ break;
+ case 1:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, enable ? 1 : 0);
+ }
+ break;
+ case 2:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, enable ? 1 : 0);
+ }
+ break;
+ case 3:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, enable ? 1 : 0);
+ }
break;
default:
BREAK_TO_DEBUGGER();
@@ -355,6 +435,8 @@ static void dccg35_set_dpstreamclk(
}
}
+
+
static void dccg35_set_physymclk_root_clock_gating(
struct dccg *dccg,
int phy_inst,
@@ -369,22 +451,32 @@ static void dccg35_set_physymclk_root_clock_gating(
case 0:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
PHYASYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYA_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
break;
case 1:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
PHYBSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYB_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
break;
case 2:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
PHYCSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYC_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
break;
case 3:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
PHYDSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYD_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
break;
case 4:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
PHYESYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYE_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
break;
default:
BREAK_TO_DEBUGGER();
@@ -407,10 +499,16 @@ static void dccg35_set_physymclk(
REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL,
PHYASYMCLK_EN, 1,
PHYASYMCLK_SRC_SEL, clk_src);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYA_REFCLK_ROOT_GATE_DISABLE, 0);
} else {
REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL,
PHYASYMCLK_EN, 0,
PHYASYMCLK_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYA_REFCLK_ROOT_GATE_DISABLE, 1);
}
break;
case 1:
@@ -418,10 +516,16 @@ static void dccg35_set_physymclk(
REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL,
PHYBSYMCLK_EN, 1,
PHYBSYMCLK_SRC_SEL, clk_src);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYB_REFCLK_ROOT_GATE_DISABLE, 0);
} else {
REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL,
PHYBSYMCLK_EN, 0,
PHYBSYMCLK_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYB_REFCLK_ROOT_GATE_DISABLE, 1);
}
break;
case 2:
@@ -429,10 +533,16 @@ static void dccg35_set_physymclk(
REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL,
PHYCSYMCLK_EN, 1,
PHYCSYMCLK_SRC_SEL, clk_src);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYC_REFCLK_ROOT_GATE_DISABLE, 0);
} else {
REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL,
PHYCSYMCLK_EN, 0,
PHYCSYMCLK_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYC_REFCLK_ROOT_GATE_DISABLE, 1);
}
break;
case 3:
@@ -440,10 +550,16 @@ static void dccg35_set_physymclk(
REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL,
PHYDSYMCLK_EN, 1,
PHYDSYMCLK_SRC_SEL, clk_src);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYD_REFCLK_ROOT_GATE_DISABLE, 0);
} else {
REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL,
PHYDSYMCLK_EN, 0,
PHYDSYMCLK_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYD_REFCLK_ROOT_GATE_DISABLE, 1);
}
break;
case 4:
@@ -451,10 +567,16 @@ static void dccg35_set_physymclk(
REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL,
PHYESYMCLK_EN, 1,
PHYESYMCLK_SRC_SEL, clk_src);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYE_REFCLK_ROOT_GATE_DISABLE, 0);
} else {
REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL,
PHYESYMCLK_EN, 0,
PHYESYMCLK_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYE_REFCLK_ROOT_GATE_DISABLE, 1);
}
break;
default:
@@ -491,12 +613,12 @@ static void dccg35_dpp_root_clock_control(
if (clock_on) {
/* turn off the DTO and leave phase/modulo at max */
- dcn35_set_dppclk_enable(dccg, dpp_inst, 0);
+ dcn35_set_dppclk_enable(dccg, dpp_inst, 1);
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
DPPCLK0_DTO_PHASE, 0xFF,
DPPCLK0_DTO_MODULO, 0xFF);
} else {
- dcn35_set_dppclk_enable(dccg, dpp_inst, 1);
+ dcn35_set_dppclk_enable(dccg, dpp_inst, 0);
/* turn on the DTO to generate a 0hz clock */
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
DPPCLK0_DTO_PHASE, 0,
@@ -575,18 +697,32 @@ void dccg35_init(struct dccg *dccg)
dccg35_disable_symclk32_se(dccg, otg_inst);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
- for (otg_inst = 0; otg_inst < 2; otg_inst++)
+ for (otg_inst = 0; otg_inst < 2; otg_inst++) {
dccg31_disable_symclk32_le(dccg, otg_inst);
+ dccg31_set_symclk32_le_root_clock_gating(dccg, otg_inst, false);
+ }
+
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+// for (otg_inst = 0; otg_inst < 4; otg_inst++)
+// dccg35_disable_symclk_se(dccg, otg_inst, otg_inst);
+
if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
- for (otg_inst = 0; otg_inst < 4; otg_inst++)
- dccg314_set_dpstreamclk(dccg, REFCLK, otg_inst,
+ for (otg_inst = 0; otg_inst < 4; otg_inst++) {
+ dccg35_set_dpstreamclk(dccg, REFCLK, otg_inst,
otg_inst);
+ dccg35_set_dpstreamclk_root_clock_gating(dccg, otg_inst, false);
+ }
if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
for (otg_inst = 0; otg_inst < 5; otg_inst++)
dccg35_set_physymclk_root_clock_gating(dccg, otg_inst,
false);
+
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
+ for (otg_inst = 0; otg_inst < 4; otg_inst++)
+ dccg35_set_dppclk_root_clock_gating(dccg, otg_inst, 0);
+
/*
dccg35_enable_global_fgcg_rep(
dccg, dccg->ctx->dc->debug.enable_fine_grain_clock_gating.bits
@@ -611,24 +747,32 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst)
DSCCLK0_DTO_PHASE, 0,
DSCCLK0_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1);
break;
case 1:
REG_UPDATE_2(DSCCLK1_DTO_PARAM,
DSCCLK1_DTO_PHASE, 0,
DSCCLK1_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1);
break;
case 2:
REG_UPDATE_2(DSCCLK2_DTO_PARAM,
DSCCLK2_DTO_PHASE, 0,
DSCCLK2_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1);
break;
case 3:
REG_UPDATE_2(DSCCLK3_DTO_PARAM,
DSCCLK3_DTO_PHASE, 0,
DSCCLK3_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1);
break;
default:
BREAK_TO_DEBUGGER();
@@ -650,24 +794,32 @@ static void dccg35_disable_dscclk(struct dccg *dccg,
REG_UPDATE_2(DSCCLK0_DTO_PARAM,
DSCCLK0_DTO_PHASE, 0,
DSCCLK0_DTO_MODULO, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 0);
break;
case 1:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 0);
REG_UPDATE_2(DSCCLK1_DTO_PARAM,
DSCCLK1_DTO_PHASE, 0,
DSCCLK1_DTO_MODULO, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 0);
break;
case 2:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 0);
REG_UPDATE_2(DSCCLK2_DTO_PARAM,
DSCCLK2_DTO_PHASE, 0,
DSCCLK2_DTO_MODULO, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 0);
break;
case 3:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, 0);
REG_UPDATE_2(DSCCLK3_DTO_PARAM,
DSCCLK3_DTO_PHASE, 0,
DSCCLK3_DTO_MODULO, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 0);
break;
default:
return;
@@ -682,22 +834,32 @@ static void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst,
case 0:
REG_UPDATE(SYMCLKA_CLOCK_ENABLE,
SYMCLKA_CLOCK_ENABLE, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, 1);
break;
case 1:
REG_UPDATE(SYMCLKB_CLOCK_ENABLE,
SYMCLKB_CLOCK_ENABLE, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, 1);
break;
case 2:
REG_UPDATE(SYMCLKC_CLOCK_ENABLE,
SYMCLKC_CLOCK_ENABLE, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, 1);
break;
case 3:
REG_UPDATE(SYMCLKD_CLOCK_ENABLE,
SYMCLKD_CLOCK_ENABLE, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, 1);
break;
case 4:
REG_UPDATE(SYMCLKE_CLOCK_ENABLE,
SYMCLKE_CLOCK_ENABLE, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_ROOT_GATE_DISABLE, 1);
break;
}
@@ -706,26 +868,36 @@ static void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst,
REG_UPDATE_2(SYMCLKA_CLOCK_ENABLE,
SYMCLKA_FE_EN, 1,
SYMCLKA_FE_SRC_SEL, link_enc_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_FE_ROOT_GATE_DISABLE, 1);
break;
case 1:
REG_UPDATE_2(SYMCLKB_CLOCK_ENABLE,
SYMCLKB_FE_EN, 1,
SYMCLKB_FE_SRC_SEL, link_enc_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_FE_ROOT_GATE_DISABLE, 1);
break;
case 2:
REG_UPDATE_2(SYMCLKC_CLOCK_ENABLE,
SYMCLKC_FE_EN, 1,
SYMCLKC_FE_SRC_SEL, link_enc_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_FE_ROOT_GATE_DISABLE, 1);
break;
case 3:
REG_UPDATE_2(SYMCLKD_CLOCK_ENABLE,
SYMCLKD_FE_EN, 1,
SYMCLKD_FE_SRC_SEL, link_enc_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_FE_ROOT_GATE_DISABLE, 1);
break;
case 4:
REG_UPDATE_2(SYMCLKE_CLOCK_ENABLE,
SYMCLKE_FE_EN, 1,
SYMCLKE_FE_SRC_SEL, link_enc_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_FE_ROOT_GATE_DISABLE, 1);
break;
}
}
@@ -786,26 +958,36 @@ static void dccg35_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst
REG_UPDATE_2(SYMCLKA_CLOCK_ENABLE,
SYMCLKA_FE_EN, 0,
SYMCLKA_FE_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_FE_ROOT_GATE_DISABLE, 0);
break;
case 1:
REG_UPDATE_2(SYMCLKB_CLOCK_ENABLE,
SYMCLKB_FE_EN, 0,
SYMCLKB_FE_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_FE_ROOT_GATE_DISABLE, 0);
break;
case 2:
REG_UPDATE_2(SYMCLKC_CLOCK_ENABLE,
SYMCLKC_FE_EN, 0,
SYMCLKC_FE_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_FE_ROOT_GATE_DISABLE, 0);
break;
case 3:
REG_UPDATE_2(SYMCLKD_CLOCK_ENABLE,
SYMCLKD_FE_EN, 0,
SYMCLKD_FE_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_FE_ROOT_GATE_DISABLE, 0);
break;
case 4:
REG_UPDATE_2(SYMCLKE_CLOCK_ENABLE,
SYMCLKE_FE_EN, 0,
SYMCLKE_FE_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_FE_ROOT_GATE_DISABLE, 0);
break;
}
@@ -818,22 +1000,32 @@ static void dccg35_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst
case 0:
REG_UPDATE(SYMCLKA_CLOCK_ENABLE,
SYMCLKA_CLOCK_ENABLE, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, 0);
break;
case 1:
REG_UPDATE(SYMCLKB_CLOCK_ENABLE,
SYMCLKB_CLOCK_ENABLE, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, 0);
break;
case 2:
REG_UPDATE(SYMCLKC_CLOCK_ENABLE,
SYMCLKC_CLOCK_ENABLE, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, 0);
break;
case 3:
REG_UPDATE(SYMCLKD_CLOCK_ENABLE,
SYMCLKD_CLOCK_ENABLE, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, 0);
break;
case 4:
REG_UPDATE(SYMCLKE_CLOCK_ENABLE,
SYMCLKE_CLOCK_ENABLE, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_ROOT_GATE_DISABLE, 0);
break;
}
}
@@ -845,6 +1037,7 @@ static const struct dccg_funcs dccg35_funcs = {
.get_dccg_ref_freq = dccg31_get_dccg_ref_freq,
.dccg_init = dccg35_init,
.set_dpstreamclk = dccg35_set_dpstreamclk,
+ .set_dpstreamclk_root_clock_gating = dccg35_set_dpstreamclk_root_clock_gating,
.enable_symclk32_se = dccg31_enable_symclk32_se,
.disable_symclk32_se = dccg35_disable_symclk32_se,
.enable_symclk32_le = dccg31_enable_symclk32_le,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
index 81e349d5835b..20f810a6646c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
@@ -80,7 +80,6 @@ enum signal_type dcn35_get_dig_mode(
default:
return SIGNAL_TYPE_NONE;
}
- return SIGNAL_TYPE_NONE;
}
void dcn35_link_encoder_setup(
@@ -119,7 +118,7 @@ void dcn35_link_encoder_setup(
void dcn35_link_encoder_init(struct link_encoder *enc)
{
- enc32_hw_init(enc);
+ enc31_hw_init(enc);
dcn35_link_encoder_set_fgcg(enc, enc->ctx->dc->debug.enable_fine_grain_clock_gating.bits.dio);
}
@@ -184,6 +183,8 @@ void dcn35_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
+ if (enc10->base.connector.id == CONNECTOR_ID_USBC)
+ enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
@@ -238,8 +239,6 @@ void dcn35_link_encoder_construct(
}
enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
- if (enc10->base.connector.id == CONNECTOR_ID_USBC)
- enc10->base.features.flags.bits.DP_IS_USB_C = 1;
if (bp_funcs->get_connector_speed_cap_info)
result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h
index e1e560732a9d..d546a3676304 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h
@@ -37,7 +37,9 @@
LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_MODE, mask_sh),\
LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_CLK_EN, mask_sh),\
LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_SOFT_RESET, mask_sh),\
+ LE_SF(DIG0_DIG_BE_CLK_CNTL, HDCP_SOFT_RESET, mask_sh),\
LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_SYMCLK_G_CLOCK_ON, mask_sh),\
+ LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_SYMCLK_G_HDCP_CLOCK_ON, mask_sh),\
LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_SYMCLK_G_TMDS_CLOCK_ON, mask_sh),\
LE_SF(DIG0_DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, mask_sh),\
LE_SF(DIG0_TMDS_CTL_BITS, TMDS_CTL0, mask_sh), \
@@ -114,7 +116,15 @@
LE_SF(DIO_CLK_CNTL, SYMCLK_FE_G_GATE_DIS, mask_sh),\
LE_SF(DIO_CLK_CNTL, SYMCLK_R_GATE_DIS, mask_sh),\
LE_SF(DIO_CLK_CNTL, SYMCLK_G_GATE_DIS, mask_sh),\
- LE_SF(DIO_CLK_CNTL, DIO_FGCG_REP_DIS, mask_sh)
+ LE_SF(DIO_CLK_CNTL, DIO_FGCG_REP_DIS, mask_sh),\
+ LE_SF(DIO_CLK_CNTL, DISPCLK_G_HDCP_GATE_DIS, mask_sh),\
+ LE_SF(DIO_CLK_CNTL, SYMCLKA_G_HDCP_GATE_DIS, mask_sh),\
+ LE_SF(DIO_CLK_CNTL, SYMCLKB_G_HDCP_GATE_DIS, mask_sh),\
+ LE_SF(DIO_CLK_CNTL, SYMCLKC_G_HDCP_GATE_DIS, mask_sh),\
+ LE_SF(DIO_CLK_CNTL, SYMCLKD_G_HDCP_GATE_DIS, mask_sh),\
+ LE_SF(DIO_CLK_CNTL, SYMCLKE_G_HDCP_GATE_DIS, mask_sh),\
+ LE_SF(DIO_CLK_CNTL, SYMCLKF_G_HDCP_GATE_DIS, mask_sh),\
+ LE_SF(DIO_CLK_CNTL, SYMCLKG_G_HDCP_GATE_DIS, mask_sh)
void dcn35_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h
index 499052329ebb..1212fcee38f2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h
@@ -28,7 +28,6 @@
#include "dcn30/dcn30_vpg.h"
#include "dcn30/dcn30_afmt.h"
#include "stream_encoder.h"
-#include "dcn10/dcn10_link_encoder.h"
#include "dcn20/dcn20_stream_encoder.h"
/* Register bit field name change */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c
index 339bf0c722dd..6293173ba2b9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c
@@ -111,7 +111,7 @@ static uint32_t convert_and_clamp(
static bool hubbub35_program_stutter_z8_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -297,7 +297,7 @@ static void hubbub35_get_dchub_ref_freq(struct hubbub *hubbub,
static bool hubbub35_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 6d7a15dcf8a7..34adae7ab6e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -36,6 +36,7 @@
struct dc_dp_mst_stream_allocation_table;
struct aux_payload;
enum aux_return_code_type;
+enum set_config_status;
/*
* Allocate memory accessible by the GPU
@@ -200,7 +201,7 @@ int dm_helper_dmub_aux_transfer_sync(
const struct dc_link *link,
struct aux_payload *payload,
enum aux_return_code_type *operation_result);
-enum set_config_status;
+
int dm_helpers_dmub_set_config_sync(struct dc_context *ctx,
const struct dc_link *link,
struct set_config_cmd_payload *payload,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index d0eed3b4771e..9405c47ee2a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -275,6 +275,16 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc
#define PERF_TRACE_CTX(__CTX) dm_perf_trace_timestamp(__func__, __LINE__, __CTX)
/*
+ * SMU message tracing
+ */
+void dm_trace_smu_msg(uint32_t msg_id, uint32_t param_in, struct dc_context *ctx);
+void dm_trace_smu_delay(uint32_t delay, struct dc_context *ctx);
+
+#define TRACE_SMU_MSG(msg_id, param_in, ctx) dm_trace_smu_msg(msg_id, param_in, ctx)
+#define TRACE_SMU_DELAY(response_delay, ctx) dm_trace_smu_delay(response_delay, ctx)
+
+
+/*
* DMUB Interfaces
*/
bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index 38ab9ad60ef8..74da9ebda016 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -1085,6 +1085,9 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
bool is_pwrseq0 = link->link_index == 0;
+ bool is_psr = (link && (link->psr_settings.psr_version == DC_PSR_VERSION_1 ||
+ link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) && !link->panel_config.psr.disable_psr);
+ bool is_replay = link && link->replay_settings.replay_feature_enabled;
/* Don't support multi-plane configurations */
if (stream_status->plane_count > 1)
@@ -1092,8 +1095,8 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
if (is_pwrseq0 && context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
return DCN_ZSTATE_SUPPORT_ALLOW;
- else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
- return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
+ else if (is_pwrseq0 && (is_psr || is_replay))
+ return DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY;
else
return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY : DCN_ZSTATE_SUPPORT_DISALLOW;
} else {
@@ -2369,7 +2372,7 @@ validate_out:
static struct _vcs_dpi_voltage_scaling_st construct_low_pstate_lvl(struct clk_limit_table *clk_table, unsigned int high_voltage_lvl)
{
- struct _vcs_dpi_voltage_scaling_st low_pstate_lvl;
+ struct _vcs_dpi_voltage_scaling_st low_pstate_lvl = {0};
int i;
low_pstate_lvl.state = 1;
@@ -2474,7 +2477,7 @@ void dcn201_populate_dml_writeback_from_context_fpu(struct dc *dc,
int pipe_cnt, i, j;
double max_calc_writeback_dispclk;
double writeback_dispclk;
- struct writeback_st dout_wb;
+ struct writeback_st dout_wb = {0};
dc_assert_fp_enabled();
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
index ccb4ad78f667..81f7b90849ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
@@ -260,7 +260,7 @@ void dcn30_fpu_populate_dml_writeback_from_context(
int pipe_cnt, i, j;
double max_calc_writeback_dispclk;
double writeback_dispclk;
- struct writeback_st dout_wb;
+ struct writeback_st dout_wb = {0};
dc_assert_fp_enabled();
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index e7f4a2d491cc..e0b52db2c210 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -3535,7 +3535,6 @@ static double TruncToValidBPP(
return DesiredBPP;
}
}
- return BPP_INVALID;
}
void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index deb6d162a2d5..94317b2e4a85 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -291,6 +291,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
.do_urgent_latency_adjustment = false,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+ .dispclk_dppclk_vco_speed_mhz = 2400.0,
.num_chans = 4,
.dummy_pstate_latency_us = 10.0
};
@@ -438,6 +439,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
.do_urgent_latency_adjustment = false,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+ .dispclk_dppclk_vco_speed_mhz = 2500.0,
};
void dcn31_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
@@ -485,6 +487,7 @@ void dcn31_calculate_wm_and_dlg_fp(
{
int i, pipe_idx, total_det = 0, active_hubp_count = 0;
double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+ uint32_t cstate_enter_plus_exit_z8_ns;
dc_assert_fp_enabled();
@@ -504,6 +507,13 @@ void dcn31_calculate_wm_and_dlg_fp(
pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
+ cstate_enter_plus_exit_z8_ns =
+ get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ if (get_stutter_period(&context->bw_ctx.dml, pipes, pipe_cnt) < dc->debug.minimum_z8_residency_time &&
+ cstate_enter_plus_exit_z8_ns < dc->debug.minimum_z8_residency_time * 1000)
+ cstate_enter_plus_exit_z8_ns = dc->debug.minimum_z8_residency_time * 1000;
+
/* Set A:
* All clocks min required
*
@@ -514,7 +524,7 @@ void dcn31_calculate_wm_and_dlg_fp(
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = cstate_enter_plus_exit_z8_ns;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
index 8f9c8faed260..d2ae43a82ba5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
@@ -30,6 +30,7 @@
#define DCN3_15_DEFAULT_DET_SIZE 192
#define DCN3_15_MIN_COMPBUF_SIZE_KB 128
#define DCN3_16_DEFAULT_DET_SIZE 192
+#define DCN3_16_MIN_COMPBUF_SIZE_KB 128
void dcn31_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
int pipe_cnt);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index adea459e7d36..33cf824c5da1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -3679,7 +3679,6 @@ static double TruncToValidBPP(
return DesiredBPP;
}
}
- return BPP_INVALID;
}
static noinline void CalculatePrefetchSchedulePerPlane(
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index fb21572750e8..21f637ae4add 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -310,7 +310,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
+ struct pipe_ctx *pipe = 0;
bool upscaled = false;
const unsigned int max_allowed_vblank_nom = 1023;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index 88e56889a68c..3242957d00c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -3788,7 +3788,6 @@ static double TruncToValidBPP(
return DesiredBPP;
}
}
- return BPP_INVALID;
}
static noinline void CalculatePrefetchSchedulePerPlane(
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index a0a65e099104..f6fe0a64beac 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -180,6 +180,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
.urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
};
+static bool dcn32_apply_merge_split_flags_helper(struct dc *dc, struct dc_state *context,
+ bool *repopulate_pipes, int *split, bool *merge);
+
void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
{
/* defaults */
@@ -622,7 +625,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
* to combine this with SubVP can cause issues with the scheduling).
* - Not TMZ surface
*/
- if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) &&
+ if (pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe && !dcn32_is_center_timing(pipe) &&
!(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
(!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
@@ -720,7 +723,7 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context
*/
static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
{
- struct pipe_ctx *subvp_pipes[2];
+ struct pipe_ctx *subvp_pipes[2] = {0};
struct dc_stream_state *phantom = NULL;
uint32_t microschedule_lines = 0;
uint32_t index = 0;
@@ -1425,13 +1428,14 @@ static bool is_test_pattern_enabled(
return false;
}
-static void dcn32_full_validate_bw_helper(struct dc *dc,
+static bool dcn32_full_validate_bw_helper(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *vlevel,
int *split,
bool *merge,
- int *pipe_cnt)
+ int *pipe_cnt,
+ bool *repopulate_pipes)
{
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
unsigned int dc_pipe_idx = 0;
@@ -1461,6 +1465,12 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
vba->VoltageLevel = *vlevel;
}
+ /* Apply split and merge flags before checking for subvp */
+ if (!dcn32_apply_merge_split_flags_helper(dc, context, repopulate_pipes, split, merge))
+ return false;
+ memset(split, 0, MAX_PIPES * sizeof(int));
+ memset(merge, 0, MAX_PIPES * sizeof(bool));
+
/* Conditions for setting up phantom pipes for SubVP:
* 1. Not force disable SubVP
* 2. Full update (i.e. !fast_validate)
@@ -1475,19 +1485,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported ||
dc->debug.force_subvp_mclk_switch)) {
- dcn32_merge_pipes_for_subvp(dc, context);
- memset(merge, 0, MAX_PIPES * sizeof(bool));
-
vlevel_temp = *vlevel;
- /* to re-initialize viewport after the pipe merge */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (!pipe_ctx->plane_state || !pipe_ctx->stream)
- continue;
-
- resource_build_scaling_params(pipe_ctx);
- }
while (!found_supported_config && dcn32_enough_pipes_for_subvp(dc, context) &&
dcn32_assign_subvp_pipe(dc, context, &dc_pipe_idx)) {
@@ -1576,8 +1574,6 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
* add phantom pipes. If pipe split (ODM / MPC) is required, both the main
* and phantom pipes will be split in the regular pipe splitting sequence.
*/
- memset(split, 0, MAX_PIPES * sizeof(int));
- memset(merge, 0, MAX_PIPES * sizeof(bool));
*vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
vba->VoltageLevel = *vlevel;
// Note: We can't apply the phantom pipes to hardware at this time. We have to wait
@@ -1590,6 +1586,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
try_odm_power_optimization_and_revalidate(
dc, context, pipes, split, merge, vlevel, *pipe_cnt);
+ return true;
}
static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
@@ -1929,106 +1926,23 @@ static bool dcn32_split_stream_for_mpc_or_odm(
return true;
}
-bool dcn32_internal_validate_bw(struct dc *dc,
- struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int *pipe_cnt_out,
- int *vlevel_out,
- bool fast_validate)
+static bool dcn32_apply_merge_split_flags_helper(
+ struct dc *dc,
+ struct dc_state *context,
+ bool *repopulate_pipes,
+ int *split,
+ bool *merge)
{
- bool out = false;
- bool repopulate_pipes = false;
- int split[MAX_PIPES] = { 0 };
- bool merge[MAX_PIPES] = { false };
+ int i, pipe_idx;
bool newly_split[MAX_PIPES] = { false };
- int pipe_cnt, i, pipe_idx;
- int vlevel = context->bw_ctx.dml.soc.num_states;
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
- dc_assert_fp_enabled();
-
- ASSERT(pipes);
- if (!pipes)
- return false;
-
- // For each full update, remove all existing phantom pipes first
- dc_state_remove_phantom_streams_and_planes(dc, context);
- dc_state_release_phantom_streams_and_planes(dc, context);
-
- dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
-
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
-
- if (!pipe_cnt) {
- out = true;
- goto validate_out;
- }
-
- dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
- context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context);
-
- if (!fast_validate)
- dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt);
-
- if (fast_validate ||
- (dc->debug.dml_disallow_alternate_prefetch_modes &&
- (vlevel == context->bw_ctx.dml.soc.num_states ||
- vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) {
- /*
- * If dml_disallow_alternate_prefetch_modes is false, then we have already
- * tried alternate prefetch modes during full validation.
- *
- * If mode is unsupported or there is no p-state support, then
- * fall back to favouring voltage.
- *
- * If Prefetch mode 0 failed for this config, or passed with Max UCLK, then try
- * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)
- */
- context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
- dm_prefetch_support_none;
-
- context->bw_ctx.dml.validate_max_state = fast_validate;
- vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
-
- context->bw_ctx.dml.validate_max_state = false;
-
- if (vlevel < context->bw_ctx.dml.soc.num_states) {
- memset(split, 0, sizeof(split));
- memset(merge, 0, sizeof(merge));
- vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
- // dcn20_validate_apply_pipe_split_flags can modify voltage level outside of DML
- vba->VoltageLevel = vlevel;
- }
- }
-
- dml_log_mode_support_params(&context->bw_ctx.dml);
-
- if (vlevel == context->bw_ctx.dml.soc.num_states)
- goto validate_fail;
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *mpo_pipe = pipe->bottom_pipe;
-
- if (!pipe->stream)
- continue;
-
- if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
- && !dc->config.enable_windowed_mpo_odm
- && pipe->plane_state && mpo_pipe
- && memcmp(&mpo_pipe->plane_state->clip_rect,
- &pipe->stream->src,
- sizeof(struct rect)) != 0) {
- ASSERT(mpo_pipe->plane_state != pipe->plane_state);
- goto validate_fail;
- }
- pipe_idx++;
- }
-
if (dc->config.enable_windowed_mpo_odm) {
- repopulate_pipes = update_pipes_with_split_flags(
- dc, context, vba, split, merge);
+ if (update_pipes_with_split_flags(
+ dc, context, vba, split, merge))
+ *repopulate_pipes = true;
} else {
+
/* the code below will be removed once windowed mpo odm is fully
* enabled.
*/
@@ -2085,7 +1999,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
memset(&pipe->link_res, 0, sizeof(pipe->link_res));
- repopulate_pipes = true;
+ *repopulate_pipes = true;
} else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
struct pipe_ctx *top_pipe = pipe->top_pipe;
struct pipe_ctx *bottom_pipe = pipe->bottom_pipe;
@@ -2101,7 +2015,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
memset(&pipe->link_res, 0, sizeof(pipe->link_res));
- repopulate_pipes = true;
+ *repopulate_pipes = true;
} else
ASSERT(0); /* Should never try to merge master pipe */
@@ -2140,15 +2054,15 @@ bool dcn32_internal_validate_bw(struct dc *dc,
hsplit_pipe = dcn32_find_split_pipe(dc, context, old_index);
ASSERT(hsplit_pipe);
if (!hsplit_pipe)
- goto validate_fail;
+ return false;
if (!dcn32_split_stream_for_mpc_or_odm(
dc, &context->res_ctx,
pipe, hsplit_pipe, odm))
- goto validate_fail;
+ return false;
newly_split[hsplit_pipe->pipe_idx] = true;
- repopulate_pipes = true;
+ *repopulate_pipes = true;
}
if (split[i] == 4) {
struct pipe_ctx *pipe_4to1;
@@ -2163,11 +2077,11 @@ bool dcn32_internal_validate_bw(struct dc *dc,
pipe_4to1 = dcn32_find_split_pipe(dc, context, old_index);
ASSERT(pipe_4to1);
if (!pipe_4to1)
- goto validate_fail;
+ return false;
if (!dcn32_split_stream_for_mpc_or_odm(
dc, &context->res_ctx,
pipe, pipe_4to1, odm))
- goto validate_fail;
+ return false;
newly_split[pipe_4to1->pipe_idx] = true;
if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe
@@ -2182,11 +2096,11 @@ bool dcn32_internal_validate_bw(struct dc *dc,
pipe_4to1 = dcn32_find_split_pipe(dc, context, old_index);
ASSERT(pipe_4to1);
if (!pipe_4to1)
- goto validate_fail;
+ return false;
if (!dcn32_split_stream_for_mpc_or_odm(
dc, &context->res_ctx,
hsplit_pipe, pipe_4to1, odm))
- goto validate_fail;
+ return false;
newly_split[pipe_4to1->pipe_idx] = true;
}
if (odm)
@@ -2198,11 +2112,122 @@ bool dcn32_internal_validate_bw(struct dc *dc,
if (pipe->plane_state) {
if (!resource_build_scaling_params(pipe))
- goto validate_fail;
+ return false;
}
}
+
+ for (i = 0; i < context->stream_count; i++) {
+ struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx,
+ context->streams[i]);
+
+ if (otg_master)
+ resource_build_test_pattern_params(&context->res_ctx, otg_master);
+ }
+ }
+ return true;
+}
+
+bool dcn32_internal_validate_bw(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int *pipe_cnt_out,
+ int *vlevel_out,
+ bool fast_validate)
+{
+ bool out = false;
+ bool repopulate_pipes = false;
+ int split[MAX_PIPES] = { 0 };
+ bool merge[MAX_PIPES] = { false };
+ int pipe_cnt, i, pipe_idx;
+ int vlevel = context->bw_ctx.dml.soc.num_states;
+ struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+
+ dc_assert_fp_enabled();
+
+ ASSERT(pipes);
+ if (!pipes)
+ return false;
+
+ /* For each full update, remove all existing phantom pipes first */
+ dc_state_remove_phantom_streams_and_planes(dc, context);
+ dc_state_release_phantom_streams_and_planes(dc, context);
+
+ dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
+
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+
+ if (!pipe_cnt) {
+ out = true;
+ goto validate_out;
+ }
+
+ dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
+ context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context);
+
+ if (!fast_validate) {
+ if (!dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge,
+ &pipe_cnt, &repopulate_pipes))
+ goto validate_fail;
+ }
+
+ if (fast_validate ||
+ (dc->debug.dml_disallow_alternate_prefetch_modes &&
+ (vlevel == context->bw_ctx.dml.soc.num_states ||
+ vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) {
+ /*
+ * If dml_disallow_alternate_prefetch_modes is false, then we have already
+ * tried alternate prefetch modes during full validation.
+ *
+ * If mode is unsupported or there is no p-state support, then
+ * fall back to favouring voltage.
+ *
+ * If Prefetch mode 0 failed for this config, or passed with Max UCLK, then try
+ * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)
+ */
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
+ dm_prefetch_support_none;
+
+ context->bw_ctx.dml.validate_max_state = fast_validate;
+ vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
+
+ context->bw_ctx.dml.validate_max_state = false;
+
+ if (vlevel < context->bw_ctx.dml.soc.num_states) {
+ memset(split, 0, sizeof(split));
+ memset(merge, 0, sizeof(merge));
+ vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
+ /* dcn20_validate_apply_pipe_split_flags can modify voltage level outside of DML */
+ vba->VoltageLevel = vlevel;
+ }
}
+ dml_log_mode_support_params(&context->bw_ctx.dml);
+
+ if (vlevel == context->bw_ctx.dml.soc.num_states)
+ goto validate_fail;
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *mpo_pipe = pipe->bottom_pipe;
+
+ if (!pipe->stream)
+ continue;
+
+ if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
+ && !dc->config.enable_windowed_mpo_odm
+ && pipe->plane_state && mpo_pipe
+ && memcmp(&mpo_pipe->plane_state->clip_rect,
+ &pipe->stream->src,
+ sizeof(struct rect)) != 0) {
+ ASSERT(mpo_pipe->plane_state != pipe->plane_state);
+ goto validate_fail;
+ }
+ pipe_idx++;
+ }
+
+ if (!dcn32_apply_merge_split_flags_helper(dc, context, &repopulate_pipes, split, merge))
+ goto validate_fail;
+
/* Actual dsc count per stream dsc validation*/
if (!dcn20_validate_dsc(dc, context)) {
vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 80fccd4999a5..ba1310c8fd77 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -1650,6 +1650,8 @@ double dml32_TruncToValidBPP(
MaxLinkBPP = 2 * MaxLinkBPP;
}
+ *RequiredSlots = dml_ceil(DesiredBPP / MaxLinkBPP * 64, 1);
+
if (DesiredBPP == 0) {
if (DSCEnable) {
if (MaxLinkBPP < MinDSCBPP)
@@ -1676,10 +1678,6 @@ double dml32_TruncToValidBPP(
else
return DesiredBPP;
}
-
- *RequiredSlots = dml_ceil(DesiredBPP / MaxLinkBPP * 64, 1);
-
- return BPP_INVALID;
} // TruncToValidBPP
double dml32_RequiredDTBCLK(
@@ -1975,8 +1973,8 @@ void dml32_CalculateVMRowAndSwath(
unsigned int PTEBufferSizeInRequestsForChroma[DC__NUM_DPP__MAX];
unsigned int PDEAndMetaPTEBytesFrameY;
unsigned int PDEAndMetaPTEBytesFrameC;
- unsigned int MetaRowByteY[DC__NUM_DPP__MAX];
- unsigned int MetaRowByteC[DC__NUM_DPP__MAX];
+ unsigned int MetaRowByteY[DC__NUM_DPP__MAX] = {0};
+ unsigned int MetaRowByteC[DC__NUM_DPP__MAX] = {0};
unsigned int PixelPTEBytesPerRowY[DC__NUM_DPP__MAX];
unsigned int PixelPTEBytesPerRowC[DC__NUM_DPP__MAX];
unsigned int PixelPTEBytesPerRowY_one_row_per_frame[DC__NUM_DPP__MAX];
@@ -4291,7 +4289,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
unsigned int i, j, k;
unsigned int SurfaceWithMinActiveFCLKChangeMargin = 0;
unsigned int DRAMClockChangeSupportNumber = 0;
- unsigned int LastSurfaceWithoutMargin;
+ unsigned int LastSurfaceWithoutMargin = 0;
unsigned int DRAMClockChangeMethod = 0;
bool FoundFirstSurfaceWithMinActiveFCLKChangeMargin = false;
double MinActiveFCLKChangeMargin = 0.;
@@ -5656,9 +5654,9 @@ void dml32_CalculateStutterEfficiency(
double LastZ8StutterPeriod = 0.0;
double LastStutterPeriod = 0.0;
unsigned int TotalNumberOfActiveOTG = 0;
- double doublePixelClock;
- unsigned int doubleHTotal;
- unsigned int doubleVTotal;
+ double doublePixelClock = 0;
+ unsigned int doubleHTotal = 0;
+ unsigned int doubleVTotal = 0;
bool SameTiming = true;
double DETBufferingTimeY;
double SwathWidthYCriticalSurface = 0.0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index 80bebfc268db..60f251cf973b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -166,8 +166,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.num_states = 5,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
- .sr_exit_z8_time_us = 210.0,
- .sr_enter_plus_exit_z8_time_us = 320.0,
+ .sr_exit_z8_time_us = 250.0,
+ .sr_enter_plus_exit_z8_time_us = 350.0,
.fclk_change_latency_us = 24.0,
.usr_retraining_latency_us = 2,
.writeback_latency_us = 12.0,
@@ -195,9 +195,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.dcn_downspread_percent = 0.5,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
- .do_urgent_latency_adjustment = 0,
+ .do_urgent_latency_adjustment = 1,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
};
void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
@@ -439,7 +439,7 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
+ struct pipe_ctx *pipe = 0;
bool upscaled = false;
const unsigned int max_allowed_vblank_nom = 1023;
@@ -577,6 +577,7 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context)
{
enum dcn_zstate_support_state support = DCN_ZSTATE_SUPPORT_DISALLOW;
unsigned int i, plane_count = 0;
+ DC_LOGGER_INIT(dc->ctx->logger);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].plane_state)
@@ -602,11 +603,14 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context)
if (is_pwrseq0 && allow_z10)
support = DCN_ZSTATE_SUPPORT_ALLOW;
else if (is_pwrseq0 && (is_psr || is_replay))
- support = allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
+ support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY;
else if (allow_z8)
support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY;
}
+ DC_LOG_SMU("zstate_support: %d, StutterPeriod: %d\n", support,
+ (int)context->bw_ctx.dml.vba.StutterPeriod);
+
context->bw_ctx.bw.dcn.clk.zstate_support = support;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
index dc9e1b758ed6..e4f333d4fb54 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
@@ -98,55 +98,114 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = {
.clock_limits = {
{
.state = 0,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
+ .dcfclk_mhz = 400.0,
+ .fabricclk_mhz = 400.0,
+ .socclk_mhz = 600.0,
+ .dram_speed_mts = 3200.0,
+ .dispclk_mhz = 600.0,
+ .dppclk_mhz = 600.0,
.phyclk_mhz = 600.0,
.phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 186.0,
+ .dscclk_mhz = 200.0,
.dtbclk_mhz = 600.0,
},
{
.state = 1,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
+ .dcfclk_mhz = 600.0,
+ .fabricclk_mhz = 1000.0,
+ .socclk_mhz = 733.0,
+ .dram_speed_mts = 6400.0,
+ .dispclk_mhz = 800.0,
+ .dppclk_mhz = 800.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
+ .dscclk_mhz = 266.7,
.dtbclk_mhz = 600.0,
},
{
.state = 2,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
+ .dcfclk_mhz = 738.0,
+ .fabricclk_mhz = 1200.0,
+ .socclk_mhz = 880.0,
+ .dram_speed_mts = 7500.0,
+ .dispclk_mhz = 800.0,
+ .dppclk_mhz = 800.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
+ .dscclk_mhz = 266.7,
.dtbclk_mhz = 600.0,
},
{
.state = 3,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
+ .dcfclk_mhz = 800.0,
+ .fabricclk_mhz = 1400.0,
+ .socclk_mhz = 978.0,
+ .dram_speed_mts = 7500.0,
+ .dispclk_mhz = 960.0,
+ .dppclk_mhz = 960.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 371.0,
+ .dscclk_mhz = 320.0,
.dtbclk_mhz = 600.0,
},
{
.state = 4,
+ .dcfclk_mhz = 873.0,
+ .fabricclk_mhz = 1600.0,
+ .socclk_mhz = 1100.0,
+ .dram_speed_mts = 8533.0,
+ .dispclk_mhz = 1066.7,
+ .dppclk_mhz = 1066.7,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 355.6,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 5,
+ .dcfclk_mhz = 960.0,
+ .fabricclk_mhz = 1700.0,
+ .socclk_mhz = 1257.0,
+ .dram_speed_mts = 8533.0,
.dispclk_mhz = 1200.0,
.dppclk_mhz = 1200.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 417.0,
+ .dscclk_mhz = 400.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 6,
+ .dcfclk_mhz = 1067.0,
+ .fabricclk_mhz = 1850.0,
+ .socclk_mhz = 1257.0,
+ .dram_speed_mts = 8533.0,
+ .dispclk_mhz = 1371.4,
+ .dppclk_mhz = 1371.4,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 457.1,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 7,
+ .dcfclk_mhz = 1200.0,
+ .fabricclk_mhz = 2000.0,
+ .socclk_mhz = 1467.0,
+ .dram_speed_mts = 8533.0,
+ .dispclk_mhz = 1600.0,
+ .dppclk_mhz = 1600.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 533.3,
.dtbclk_mhz = 600.0,
},
},
- .num_states = 5,
+ .num_states = 8,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
- .sr_exit_z8_time_us = 210.0,
- .sr_enter_plus_exit_z8_time_us = 320.0,
+ .sr_exit_z8_time_us = 250.0,
+ .sr_enter_plus_exit_z8_time_us = 350.0,
.fclk_change_latency_us = 24.0,
.usr_retraining_latency_us = 2,
.writeback_latency_us = 12.0,
@@ -177,6 +236,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = {
.do_urgent_latency_adjustment = 0,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+ .num_chans = 4,
+ .dispclk_dppclk_vco_speed_mhz = 2400.0,
};
/*
@@ -340,6 +401,8 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc,
clock_limits[i].socclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
+ clock_limits[i].dtbclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
clk_table->num_entries;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
@@ -352,6 +415,8 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc,
clk_table->num_entries;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels =
+ clk_table->num_entries;
}
}
@@ -408,7 +473,7 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
+ struct pipe_ctx *pipe = 0;
bool upscaled = false;
const unsigned int max_allowed_vblank_nom = 1023;
@@ -551,6 +616,7 @@ void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context)
if (context->res_ctx.pipe_ctx[i].plane_state)
plane_count++;
}
+
/*dcn351 does not support z9/z10*/
if (context->stream_count == 0 || plane_count == 0) {
support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY;
@@ -564,11 +630,9 @@ void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context)
dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
-
/*for psr1/psr-su, we allow z8 and z10 based on latency, for replay with IPS enabled, it will enter ips2*/
- if (is_pwrseq0 && (is_psr || is_replay))
+ if (is_pwrseq0 && (is_psr || is_replay))
support = allow_z8 ? allow_z8 : DCN_ZSTATE_SUPPORT_DISALLOW;
-
}
context->bw_ctx.bw.dcn.clk.zstate_support = support;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
index acff3449b8d7..1c9498a72520 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
@@ -67,6 +67,7 @@ frame_warn_flag := -Wframe-larger-than=2048
endif
endif
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2
CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_ccflags)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
index 9be5ebf3a8c0..3e919f5c00ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
@@ -31,6 +31,8 @@
#include "dml_assert.h"
#define DML2_MAX_FMT_420_BUFFER_WIDTH 4096
+#define TB_BORROWED_MAX 400
+
// ---------------------------
// Declaration Begins
// ---------------------------
@@ -2782,6 +2784,8 @@ static dml_float_t TruncToValidBPP(
}
}
+ *RequiredSlots = (dml_uint_t)(dml_ceil(DesiredBPP / MaxLinkBPP * 64, 1));
+
if (DesiredBPP == 0) {
if (DSCEnable) {
if (MaxLinkBPP < MinDSCBPP) {
@@ -2810,10 +2814,6 @@ static dml_float_t TruncToValidBPP(
return DesiredBPP;
}
}
-
- *RequiredSlots = (dml_uint_t)(dml_ceil(DesiredBPP / MaxLinkBPP * 64, 1));
-
- return __DML_DPP_INVALID__;
} // TruncToValidBPP
static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
@@ -3790,9 +3790,9 @@ static void CalculateStutterEfficiency(struct display_mode_lib_scratch_st *scrat
dml_bool_t FoundCriticalSurface = false;
dml_uint_t TotalNumberOfActiveOTG = 0;
- dml_float_t SinglePixelClock;
- dml_uint_t SingleHTotal;
- dml_uint_t SingleVTotal;
+ dml_float_t SinglePixelClock = 0;
+ dml_uint_t SingleHTotal = 0;
+ dml_uint_t SingleVTotal = 0;
dml_bool_t SameTiming = true;
dml_float_t LastStutterPeriod = 0.0;
@@ -9460,8 +9460,10 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
/* Copy the calculated watermarks to mp.Watermark as the getter functions are
* implemented by the DML team to copy the calculated values from the mp.Watermark interface.
+ * &mode_lib->mp.Watermark and &locals->Watermark are the same address, memcpy may lead to
+ * unexpected behavior. memmove should be used.
*/
- memcpy(&mode_lib->mp.Watermark, CalculateWatermarks_params->Watermark, sizeof(struct Watermarks));
+ memmove(&mode_lib->mp.Watermark, CalculateWatermarks_params->Watermark, sizeof(struct Watermarks));
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
if (mode_lib->ms.cache_display_cfg.writeback.WritebackEnable[k] == true) {
@@ -10214,6 +10216,7 @@ dml_get_var_func(fraction_of_urgent_bandwidth_imm_flip, dml_float_t, mode_lib->m
dml_get_var_func(urgent_latency, dml_float_t, mode_lib->mp.UrgentLatency);
dml_get_var_func(clk_dcf_deepsleep, dml_float_t, mode_lib->mp.DCFCLKDeepSleep);
dml_get_var_func(wm_writeback_dram_clock_change, dml_float_t, mode_lib->mp.Watermark.WritebackDRAMClockChangeWatermark);
+dml_get_var_func(wm_writeback_urgent, dml_float_t, mode_lib->mp.Watermark.WritebackUrgentWatermark);
dml_get_var_func(stutter_efficiency, dml_float_t, mode_lib->mp.StutterEfficiency);
dml_get_var_func(stutter_efficiency_no_vblank, dml_float_t, mode_lib->mp.StutterEfficiencyNotIncludingVBlank);
dml_get_var_func(stutter_efficiency_z8, dml_float_t, mode_lib->mp.Z8StutterEfficiency);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h
index 8452485684f5..3116b88e99dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h
@@ -94,6 +94,7 @@ dml_get_var_decl(wm_usr_retraining, dml_float_t);
dml_get_var_decl(urgent_latency, dml_float_t);
dml_get_var_decl(wm_writeback_dram_clock_change, dml_float_t);
+dml_get_var_decl(wm_writeback_urgent, dml_float_t);
dml_get_var_decl(stutter_efficiency_no_vblank, dml_float_t);
dml_get_var_decl(stutter_efficiency, dml_float_t);
dml_get_var_decl(stutter_efficiency_z8, dml_float_t);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h
index de63364be01d..14d389525296 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h
@@ -41,6 +41,7 @@
#define DCN_DML__VM_PRESENT__1 1
#define DCN_DML__HOST_VM_PRESENT 1
#define DCN_DML__HOST_VM_PRESENT__1 1
+#define DCN_DML__DWB 1
#include "dml_depedencies.h"
@@ -59,6 +60,7 @@
#define __DML_NUM_PLANES__ DCN_DML__NUM_PLANE
#define __DML_NUM_CURSORS__ DCN_DML__NUM_CURSOR
#define __DML_DPP_INVALID__ 0
+#define __DML_NUM_DMB__ DCN_DML__DWB
#define __DML_PIPE_NO_PLANE__ 99
#define __DML_MAX_STATE_ARRAY_SIZE__ DCN_DML__NUM_PWR_STATE
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
index a52c594e1ba4..ad2a6b4769fe 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
@@ -88,7 +88,8 @@ static int find_disp_cfg_idx_by_plane_id(struct dml2_dml_to_dc_pipe_mapping *map
return i;
}
- return -1;
+ ASSERT(false);
+ return __DML2_WRAPPER_MAX_STREAMS_PLANES__;
}
static int find_disp_cfg_idx_by_stream_id(struct dml2_dml_to_dc_pipe_mapping *mapping, unsigned int stream_id)
@@ -100,7 +101,8 @@ static int find_disp_cfg_idx_by_stream_id(struct dml2_dml_to_dc_pipe_mapping *ma
return i;
}
- return -1;
+ ASSERT(false);
+ return __DML2_WRAPPER_MAX_STREAMS_PLANES__;
}
// The master pipe of a stream is defined as the top pipe in odm slice 0
@@ -793,8 +795,8 @@ static void map_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state
free_unused_pipes_for_plane(ctx, state, plane, &scratch->pipe_pool, stream->stream_id, plane_index);
}
-static unsigned int get_mpc_factor(struct dml2_context *ctx,
- const struct dc_state *state,
+static unsigned int get_target_mpc_factor(struct dml2_context *ctx,
+ struct dc_state *state,
const struct dml_display_cfg_st *disp_cfg,
struct dml2_dml_to_dc_pipe_mapping *mapping,
const struct dc_stream_status *status,
@@ -805,10 +807,10 @@ static unsigned int get_mpc_factor(struct dml2_context *ctx,
unsigned int cfg_idx;
unsigned int mpc_factor;
- get_plane_id(ctx, state, status->plane_states[plane_idx],
- stream->stream_id, plane_idx, &plane_id);
- cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id);
if (ctx->architecture == dml2_architecture_20) {
+ get_plane_id(ctx, state, status->plane_states[plane_idx],
+ stream->stream_id, plane_idx, &plane_id);
+ cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id);
mpc_factor = (unsigned int)disp_cfg->hw.DPPPerSurface[cfg_idx];
} else {
mpc_factor = 1;
@@ -822,16 +824,18 @@ static unsigned int get_mpc_factor(struct dml2_context *ctx,
return mpc_factor;
}
-static unsigned int get_odm_factor(
+static unsigned int get_target_odm_factor(
const struct dml2_context *ctx,
+ struct dc_state *state,
const struct dml_display_cfg_st *disp_cfg,
struct dml2_dml_to_dc_pipe_mapping *mapping,
const struct dc_stream_state *stream)
{
- unsigned int cfg_idx = find_disp_cfg_idx_by_stream_id(
- mapping, stream->stream_id);
+ unsigned int cfg_idx;
- if (ctx->architecture == dml2_architecture_20)
+ if (ctx->architecture == dml2_architecture_20) {
+ cfg_idx = find_disp_cfg_idx_by_stream_id(
+ mapping, stream->stream_id);
switch (disp_cfg->hw.ODMMode[cfg_idx]) {
case dml_odm_mode_bypass:
return 1;
@@ -842,83 +846,122 @@ static unsigned int get_odm_factor(
default:
break;
}
+ }
ASSERT(false);
return 1;
}
+static unsigned int get_source_odm_factor(const struct dml2_context *ctx,
+ struct dc_state *state,
+ const struct dc_stream_state *stream)
+{
+ struct pipe_ctx *otg_master = ctx->config.callbacks.get_otg_master_for_stream(&state->res_ctx, stream);
+
+ return ctx->config.callbacks.get_odm_slice_count(otg_master);
+}
+
+static unsigned int get_source_mpc_factor(const struct dml2_context *ctx,
+ struct dc_state *state,
+ const struct dc_plane_state *plane)
+{
+ struct pipe_ctx *dpp_pipes[MAX_PIPES] = {0};
+ int dpp_pipe_count = ctx->config.callbacks.get_dpp_pipes_for_plane(plane,
+ &state->res_ctx, dpp_pipes);
+
+ ASSERT(dpp_pipe_count > 0);
+ return ctx->config.callbacks.get_mpc_slice_count(dpp_pipes[0]);
+}
+
+
static void populate_mpc_factors_for_stream(
struct dml2_context *ctx,
const struct dml_display_cfg_st *disp_cfg,
struct dml2_dml_to_dc_pipe_mapping *mapping,
- const struct dc_state *state,
+ struct dc_state *state,
unsigned int stream_idx,
- unsigned int odm_factor,
- unsigned int mpc_factors[MAX_PIPES])
+ struct dml2_pipe_combine_factor odm_factor,
+ struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES])
{
const struct dc_stream_status *status = &state->stream_status[stream_idx];
int i;
- for (i = 0; i < status->plane_count; i++)
- if (odm_factor == 1)
- mpc_factors[i] = get_mpc_factor(
- ctx, state, disp_cfg, mapping, status,
- state->streams[stream_idx], i);
- else
- mpc_factors[i] = 1;
+ for (i = 0; i < status->plane_count; i++) {
+ mpc_factors[i].source = get_source_mpc_factor(ctx, state, status->plane_states[i]);
+ mpc_factors[i].target = (odm_factor.target == 1) ?
+ get_target_mpc_factor(ctx, state, disp_cfg, mapping, status, state->streams[stream_idx], i) : 1;
+ }
}
static void populate_odm_factors(const struct dml2_context *ctx,
const struct dml_display_cfg_st *disp_cfg,
struct dml2_dml_to_dc_pipe_mapping *mapping,
- const struct dc_state *state,
- unsigned int odm_factors[MAX_PIPES])
+ struct dc_state *state,
+ struct dml2_pipe_combine_factor odm_factors[MAX_PIPES])
{
int i;
- for (i = 0; i < state->stream_count; i++)
- odm_factors[i] = get_odm_factor(
- ctx, disp_cfg, mapping, state->streams[i]);
+ for (i = 0; i < state->stream_count; i++) {
+ odm_factors[i].source = get_source_odm_factor(ctx, state, state->streams[i]);
+ odm_factors[i].target = get_target_odm_factor(
+ ctx, state, disp_cfg, mapping, state->streams[i]);
+ }
}
-static bool map_dc_pipes_for_stream(struct dml2_context *ctx,
+static bool unmap_dc_pipes_for_stream(struct dml2_context *ctx,
struct dc_state *state,
const struct dc_state *existing_state,
const struct dc_stream_state *stream,
const struct dc_stream_status *status,
- unsigned int odm_factor,
- unsigned int mpc_factors[MAX_PIPES])
+ struct dml2_pipe_combine_factor odm_factor,
+ struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES])
{
int plane_idx;
bool result = true;
- if (odm_factor == 1)
- /*
- * ODM and MPC combines are by DML design mutually exclusive.
- * ODM factor of 1 means MPC factors may be greater than 1.
- * In this case, we want to set ODM factor to 1 first to free up
- * pipe resources from previous ODM configuration before setting
- * up MPC combine to acquire more pipe resources.
- */
+ for (plane_idx = 0; plane_idx < status->plane_count; plane_idx++)
+ if (mpc_factors[plane_idx].target < mpc_factors[plane_idx].source)
+ result &= ctx->config.callbacks.update_pipes_for_plane_with_slice_count(
+ state,
+ existing_state,
+ ctx->config.callbacks.dc->res_pool,
+ status->plane_states[plane_idx],
+ mpc_factors[plane_idx].target);
+ if (odm_factor.target < odm_factor.source)
result &= ctx->config.callbacks.update_pipes_for_stream_with_slice_count(
state,
existing_state,
ctx->config.callbacks.dc->res_pool,
stream,
- odm_factor);
+ odm_factor.target);
+ return result;
+}
+
+static bool map_dc_pipes_for_stream(struct dml2_context *ctx,
+ struct dc_state *state,
+ const struct dc_state *existing_state,
+ const struct dc_stream_state *stream,
+ const struct dc_stream_status *status,
+ struct dml2_pipe_combine_factor odm_factor,
+ struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES])
+{
+ int plane_idx;
+ bool result = true;
+
for (plane_idx = 0; plane_idx < status->plane_count; plane_idx++)
- result &= ctx->config.callbacks.update_pipes_for_plane_with_slice_count(
- state,
- existing_state,
- ctx->config.callbacks.dc->res_pool,
- status->plane_states[plane_idx],
- mpc_factors[plane_idx]);
- if (odm_factor > 1)
+ if (mpc_factors[plane_idx].target > mpc_factors[plane_idx].source)
+ result &= ctx->config.callbacks.update_pipes_for_plane_with_slice_count(
+ state,
+ existing_state,
+ ctx->config.callbacks.dc->res_pool,
+ status->plane_states[plane_idx],
+ mpc_factors[plane_idx].target);
+ if (odm_factor.target > odm_factor.source)
result &= ctx->config.callbacks.update_pipes_for_stream_with_slice_count(
state,
existing_state,
ctx->config.callbacks.dc->res_pool,
stream,
- odm_factor);
+ odm_factor.target);
return result;
}
@@ -928,20 +971,20 @@ static bool map_dc_pipes_with_callbacks(struct dml2_context *ctx,
struct dml2_dml_to_dc_pipe_mapping *mapping,
const struct dc_state *existing_state)
{
- unsigned int odm_factors[MAX_PIPES];
- unsigned int mpc_factors_for_stream[MAX_PIPES];
int i;
bool result = true;
- populate_odm_factors(ctx, disp_cfg, mapping, state, odm_factors);
- for (i = 0; i < state->stream_count; i++) {
+ populate_odm_factors(ctx, disp_cfg, mapping, state, ctx->pipe_combine_scratch.odm_factors);
+ for (i = 0; i < state->stream_count; i++)
populate_mpc_factors_for_stream(ctx, disp_cfg, mapping, state,
- i, odm_factors[i], mpc_factors_for_stream);
- result &= map_dc_pipes_for_stream(ctx, state, existing_state,
- state->streams[i],
- &state->stream_status[i],
- odm_factors[i], mpc_factors_for_stream);
- }
+ i, ctx->pipe_combine_scratch.odm_factors[i], ctx->pipe_combine_scratch.mpc_factors[i]);
+ for (i = 0; i < state->stream_count; i++)
+ result &= unmap_dc_pipes_for_stream(ctx, state, existing_state, state->streams[i],
+ &state->stream_status[i], ctx->pipe_combine_scratch.odm_factors[i], ctx->pipe_combine_scratch.mpc_factors[i]);
+ for (i = 0; i < state->stream_count; i++)
+ result &= map_dc_pipes_for_stream(ctx, state, existing_state, state->streams[i],
+ &state->stream_status[i], ctx->pipe_combine_scratch.odm_factors[i], ctx->pipe_combine_scratch.mpc_factors[i]);
+
return result;
}
@@ -1037,6 +1080,12 @@ bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const s
ASSERT(false);
}
}
+
+ if (ctx->config.callbacks.build_test_pattern_params &&
+ pipe->stream &&
+ pipe->prev_odm_pipe == NULL &&
+ pipe->top_pipe == NULL)
+ ctx->config.callbacks.build_test_pattern_params(&state->res_ctx, pipe);
}
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h
index 2f91244a7b01..1538b708d8be 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h
@@ -30,6 +30,8 @@
#include "dml2_dc_types.h"
struct dml2_context;
+struct dml2_dml_to_dc_pipe_mapping;
+struct dml_display_cfg_st;
/*
* dml2_map_dc_pipes - Creates a pipe linkage in dc_state based on current display config.
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
index 1cf8a884c0fb..9dab4e43c511 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
@@ -109,10 +109,21 @@ enum dml2_architecture {
dml2_architecture_20,
};
+struct dml2_pipe_combine_factor {
+ unsigned int source;
+ unsigned int target;
+};
+
+struct dml2_pipe_combine_scratch {
+ struct dml2_pipe_combine_factor odm_factors[MAX_PIPES];
+ struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES][MAX_PIPES];
+};
+
struct dml2_context {
enum dml2_architecture architecture;
struct dml2_configuration_options config;
struct dml2_helper_det_policy_scratch det_helper_scratch;
+ struct dml2_pipe_combine_scratch pipe_combine_scratch;
union {
struct {
struct display_mode_lib_st dml_core_ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index 17a58f41fc6a..a41812598ce8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -29,6 +29,7 @@
#include "dml2_translation_helper.h"
#define NUM_DCFCLK_STAS 5
+#define NUM_DCFCLK_STAS_NEW 8
void dml2_init_ip_params(struct dml2_context *dml2, const struct dc *in_dc, struct ip_params_st *out)
{
@@ -228,17 +229,13 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s
break;
case dml_project_dcn35:
+ case dml_project_dcn351:
out->num_chans = 4;
out->round_trip_ping_latency_dcfclk_cycles = 106;
out->smn_latency_us = 2;
out->dispclk_dppclk_vco_speed_mhz = 3600;
break;
- case dml_project_dcn351:
- out->num_chans = 16;
- out->round_trip_ping_latency_dcfclk_cycles = 1100;
- out->smn_latency_us = 2;
- break;
}
/* ---Overrides if available--- */
if (dml2->config.bbox_overrides.dram_num_chan)
@@ -253,12 +250,21 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
{
struct dml2_policy_build_synthetic_soc_states_scratch *s = &dml2->v20.scratch.create_scratch.build_synthetic_socbb_scratch;
struct dml2_policy_build_synthetic_soc_states_params *p = &dml2->v20.scratch.build_synthetic_socbb_params;
- unsigned int dcfclk_stas_mhz[NUM_DCFCLK_STAS];
+ unsigned int dcfclk_stas_mhz[NUM_DCFCLK_STAS] = {0};
+ unsigned int dcfclk_stas_mhz_new[NUM_DCFCLK_STAS_NEW] = {0};
+ unsigned int dml_project = dml2->v20.dml_core_ctx.project;
+
unsigned int i = 0;
unsigned int transactions_per_mem_clock = 16; // project specific, depends on used Memory type
- p->dcfclk_stas_mhz = dcfclk_stas_mhz;
- p->num_dcfclk_stas = NUM_DCFCLK_STAS;
+ if (dml_project == dml_project_dcn351) {
+ p->dcfclk_stas_mhz = dcfclk_stas_mhz_new;
+ p->num_dcfclk_stas = NUM_DCFCLK_STAS_NEW;
+ } else {
+ p->dcfclk_stas_mhz = dcfclk_stas_mhz;
+ p->num_dcfclk_stas = NUM_DCFCLK_STAS;
+ }
+
p->in_bbox = in_bbox;
p->out_states = out;
p->in_states = &dml2->v20.scratch.create_scratch.in_states;
@@ -436,8 +442,7 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
}
dml2_policy_build_synthetic_soc_states(s, p);
- if (dml2->v20.dml_core_ctx.project == dml_project_dcn35 ||
- dml2->v20.dml_core_ctx.project == dml_project_dcn351) {
+ if (dml2->v20.dml_core_ctx.project == dml_project_dcn35) {
// Override last out_state with data from last in_state
// This will ensure that out_state contains max fclk
memcpy(&p->out_states->state_array[p->out_states->num_states - 1],
@@ -1056,7 +1061,46 @@ static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2,
plane_index = 0;
}
}
-
+static void populate_dml_writeback_cfg_from_stream_state(struct dml_writeback_cfg_st *out,
+ unsigned int location, const struct dc_stream_state *in)
+{
+ if (in->num_wb_info > 0) {
+ for (int i = 0; i < __DML_NUM_DMB__; i++) {
+ const struct dc_writeback_info *wb_info = &in->writeback_info[i];
+ /*current dml support 1 dwb per stream, limitation*/
+ if (wb_info->wb_enabled) {
+ out->WritebackEnable[location] = wb_info->wb_enabled;
+ out->ActiveWritebacksPerSurface[location] = wb_info->dwb_params.cnv_params.src_width;
+ out->WritebackDestinationWidth[location] = wb_info->dwb_params.dest_width;
+ out->WritebackDestinationHeight[location] = wb_info->dwb_params.dest_height;
+
+ out->WritebackSourceWidth[location] = wb_info->dwb_params.cnv_params.crop_en ?
+ wb_info->dwb_params.cnv_params.crop_width :
+ wb_info->dwb_params.cnv_params.src_width;
+
+ out->WritebackSourceHeight[location] = wb_info->dwb_params.cnv_params.crop_en ?
+ wb_info->dwb_params.cnv_params.crop_height :
+ wb_info->dwb_params.cnv_params.src_height;
+ /*current design does not have chroma scaling, need to follow up*/
+ out->WritebackHTaps[location] = wb_info->dwb_params.scaler_taps.h_taps > 0 ?
+ wb_info->dwb_params.scaler_taps.h_taps : 1;
+ out->WritebackVTaps[location] = wb_info->dwb_params.scaler_taps.v_taps > 0 ?
+ wb_info->dwb_params.scaler_taps.v_taps : 1;
+
+ out->WritebackHRatio[location] = wb_info->dwb_params.cnv_params.crop_en ?
+ (double)wb_info->dwb_params.cnv_params.crop_width /
+ (double)wb_info->dwb_params.dest_width :
+ (double)wb_info->dwb_params.cnv_params.src_width /
+ (double)wb_info->dwb_params.dest_width;
+ out->WritebackVRatio[location] = wb_info->dwb_params.cnv_params.crop_en ?
+ (double)wb_info->dwb_params.cnv_params.crop_height /
+ (double)wb_info->dwb_params.dest_height :
+ (double)wb_info->dwb_params.cnv_params.src_height /
+ (double)wb_info->dwb_params.dest_height;
+ }
+ }
+ }
+}
void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg)
{
int i = 0, j = 0, k = 0;
@@ -1101,6 +1145,10 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_stream_location, context->streams[i]);
populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context);
+ /*Call site for populate_dml_writeback_cfg_from_stream_state*/
+ populate_dml_writeback_cfg_from_stream_state(&dml_dispcfg->writeback,
+ disp_cfg_stream_location, context->streams[i]);
+
switch (context->streams[i]->debug.force_odm_combine_segments) {
case 2:
dml2->v20.dml_core_ctx.policy.ODMUse[disp_cfg_stream_location] = dml_odm_use_policy_combine_2to1;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
index f15d1dbad6a9..0f8b3336e26d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
@@ -224,7 +224,7 @@ static int find_dml_pipe_idx_by_plane_id(struct dml2_context *ctx, unsigned int
static bool get_plane_id(struct dml2_context *dml2, const struct dc_state *state, const struct dc_plane_state *plane,
unsigned int stream_id, unsigned int plane_index, unsigned int *plane_id)
{
- int i, j;
+ unsigned int i, j;
bool is_plane_duplicate = dml2->v20.scratch.plane_duplicate_exists;
if (!plane_id)
@@ -327,6 +327,8 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont
dml_pipe_idx = dml2_helper_find_dml_pipe_idx_by_stream_id(in_ctx, context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->stream_id);
}
+ if (dml_pipe_idx == 0xFFFFFFFF)
+ continue;
ASSERT(in_ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[dml_pipe_idx]);
ASSERT(in_ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[dml_pipe_idx] == context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->stream_id);
@@ -374,10 +376,16 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont
context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
+
context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = in_ctx->v20.dml_core_ctx.states.state_array[in_ctx->v20.scratch.mode_support_params.out_lowest_state_idx].dppclk_mhz
* 1000;
context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = in_ctx->v20.dml_core_ctx.states.state_array[in_ctx->v20.scratch.mode_support_params.out_lowest_state_idx].dispclk_mhz
* 1000;
+
+ if (dc->config.forced_clocks || dc->debug.max_disp_clk) {
+ context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz;
+ context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz ;
+ }
}
void dml2_extract_watermark_set(struct dcn_watermarks *watermark, struct display_mode_lib_st *dml_core_ctx)
@@ -396,6 +404,71 @@ void dml2_extract_watermark_set(struct dcn_watermarks *watermark, struct display
watermark->cstate_pstate.cstate_exit_z8_ns = dml_get_wm_z8_stutter(dml_core_ctx) * 1000;
}
+unsigned int dml2_calc_max_scaled_time(
+ unsigned int time_per_pixel,
+ enum mmhubbub_wbif_mode mode,
+ unsigned int urgent_watermark)
+{
+ unsigned int time_per_byte = 0;
+ unsigned int total_free_entry = 0xb40;
+ unsigned int buf_lh_capability;
+ unsigned int max_scaled_time;
+
+ if (mode == PACKED_444) /* packed mode 32 bpp */
+ time_per_byte = time_per_pixel/4;
+ else if (mode == PACKED_444_FP16) /* packed mode 64 bpp */
+ time_per_byte = time_per_pixel/8;
+
+ if (time_per_byte == 0)
+ time_per_byte = 1;
+
+ buf_lh_capability = (total_free_entry*time_per_byte*32) >> 6; /* time_per_byte is in u6.6*/
+ max_scaled_time = buf_lh_capability - urgent_watermark;
+ return max_scaled_time;
+}
+
+void dml2_extract_writeback_wm(struct dc_state *context, struct display_mode_lib_st *dml_core_ctx)
+{
+ int i, j = 0;;
+ struct mcif_arb_params *wb_arb_params = NULL;
+ struct dcn_bw_writeback *bw_writeback = NULL;
+ enum mmhubbub_wbif_mode wbif_mode = PACKED_444_FP16; /*for now*/
+
+ if (context->stream_count != 0) {
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i]->num_wb_info != 0)
+ j++;
+ }
+ }
+ if (j == 0) /*no dwb */
+ return;
+ for (i = 0; i < __DML_NUM_DMB__; i++) {
+ bw_writeback = &context->bw_ctx.bw.dcn.bw_writeback;
+ wb_arb_params = &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[i];
+
+ for (j = 0 ; j < 4; j++) {
+ /*current dml only has one set of watermark, need to follow up*/
+ bw_writeback->mcif_wb_arb[i].cli_watermark[j] =
+ dml_get_wm_writeback_urgent(dml_core_ctx) * 1000;
+ bw_writeback->mcif_wb_arb[i].pstate_watermark[j] =
+ dml_get_wm_writeback_dram_clock_change(dml_core_ctx) * 1000;
+ }
+ if (context->res_ctx.pipe_ctx[i].stream->phy_pix_clk != 0) {
+ /* time_per_pixel should be in u6.6 format */
+ bw_writeback->mcif_wb_arb[i].time_per_pixel =
+ (1000000 << 6) / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk;
+ }
+ bw_writeback->mcif_wb_arb[i].slice_lines = 32;
+ bw_writeback->mcif_wb_arb[i].arbitration_slice = 2;
+ bw_writeback->mcif_wb_arb[i].max_scaled_time =
+ dml2_calc_max_scaled_time(wb_arb_params->time_per_pixel,
+ wbif_mode, wb_arb_params->cli_watermark[0]);
+ /*not required any more*/
+ bw_writeback->mcif_wb_arb[i].dram_speed_change_duration =
+ dml_get_wm_writeback_dram_clock_change(dml_core_ctx) * 1000;
+
+ }
+}
void dml2_initialize_det_scratch(struct dml2_context *in_ctx)
{
int i;
@@ -468,6 +541,9 @@ bool dml2_verify_det_buffer_configuration(struct dml2_context *in_ctx, struct dc
dml_pipe_idx = find_dml_pipe_idx_by_plane_id(in_ctx, plane_id);
else
dml_pipe_idx = dml2_helper_find_dml_pipe_idx_by_stream_id(in_ctx, display_state->res_ctx.pipe_ctx[i].stream->stream_id);
+
+ if (dml_pipe_idx == 0xFFFFFFFF)
+ continue;
total_det_allocated += dml_get_det_buffer_size_kbytes(&in_ctx->v20.dml_core_ctx, dml_pipe_idx);
if (total_det_allocated > max_det_size) {
need_recalculation = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h
index 5842d6d3c4b6..04fcfe637119 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h
@@ -40,9 +40,14 @@ void dml2_util_copy_dml_output(struct dml_output_cfg_st *dml_output_array, unsig
unsigned int dml2_util_get_maximum_odm_combine_for_output(bool force_odm_4to1, enum dml_output_encoder_class encoder, bool dsc_enabled);
void dml2_copy_clocks_to_dc_state(struct dml2_dcn_clocks *out_clks, struct dc_state *context);
void dml2_extract_watermark_set(struct dcn_watermarks *watermark, struct display_mode_lib_st *dml_core_ctx);
+void dml2_extract_writeback_wm(struct dc_state *context, struct display_mode_lib_st *dml_core_ctx);
int dml2_helper_find_dml_pipe_idx_by_stream_id(struct dml2_context *ctx, unsigned int stream_id);
bool is_dtbclk_required(const struct dc *dc, struct dc_state *context);
bool dml2_is_stereo_timing(const struct dc_stream_state *stream);
+unsigned int dml2_calc_max_scaled_time(
+ unsigned int time_per_pixel,
+ enum mmhubbub_wbif_mode mode,
+ unsigned int urgent_watermark);
/*
* dml2_dc_construct_pipes - This function will determine if we need additional pipes based
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 72cca367062e..9412d5384a41 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -570,6 +570,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
struct dml2_dcn_clocks out_clks;
unsigned int result = 0;
bool need_recalculation = false;
+ uint32_t cstate_enter_plus_exit_z8_ns;
if (!context || context->stream_count == 0)
return true;
@@ -639,8 +640,17 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.b, &dml2->v20.dml_core_ctx);
memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c));
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.d, &dml2->v20.dml_core_ctx);
+ dml2_extract_writeback_wm(context, &dml2->v20.dml_core_ctx);
//copy for deciding zstate use
context->bw_ctx.dml.vba.StutterPeriod = context->bw_ctx.dml2->v20.dml_core_ctx.mp.StutterPeriod;
+
+ cstate_enter_plus_exit_z8_ns = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns;
+
+ if (context->bw_ctx.dml.vba.StutterPeriod < in_dc->debug.minimum_z8_residency_time &&
+ cstate_enter_plus_exit_z8_ns < in_dc->debug.minimum_z8_residency_time * 1000)
+ cstate_enter_plus_exit_z8_ns = in_dc->debug.minimum_z8_residency_time * 1000;
+
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = cstate_enter_plus_exit_z8_ns;
}
return result;
@@ -681,13 +691,13 @@ static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *d
}
}
-bool dml2_validate(const struct dc *in_dc, struct dc_state *context, bool fast_validate)
+bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2, bool fast_validate)
{
bool out = false;
- if (!(context->bw_ctx.dml2))
+ if (!dml2)
return false;
- dml2_apply_debug_options(in_dc, context->bw_ctx.dml2);
+ dml2_apply_debug_options(in_dc, dml2);
/* Use dml_validate_only for fast_validate path */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
index cc662d682fd4..4a8bd2f4195e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
@@ -71,6 +71,7 @@ struct dml2_dcn_clocks {
struct dml2_dc_callbacks {
struct dc *dc;
bool (*build_scaling_params)(struct pipe_ctx *pipe_ctx);
+ void (*build_test_pattern_params)(struct resource_context *res_ctx, struct pipe_ctx *otg_master);
bool (*can_support_mclk_switch_using_fw_based_vblank_stretch)(struct dc *dc, struct dc_state *context);
bool (*acquire_secondary_pipe_for_mpc_odm)(const struct dc *dc, struct dc_state *state, struct pipe_ctx *pri_pipe, struct pipe_ctx *sec_pipe, bool odm);
bool (*update_pipes_for_stream_with_slice_count)(
@@ -86,8 +87,23 @@ struct dml2_dc_callbacks {
const struct dc_plane_state *plane,
int slice_count);
int (*get_odm_slice_index)(const struct pipe_ctx *opp_head);
+ int (*get_odm_slice_count)(const struct pipe_ctx *opp_head);
int (*get_mpc_slice_index)(const struct pipe_ctx *dpp_pipe);
+ int (*get_mpc_slice_count)(const struct pipe_ctx *dpp_pipe);
struct pipe_ctx *(*get_opp_head)(const struct pipe_ctx *pipe_ctx);
+ struct pipe_ctx *(*get_otg_master_for_stream)(
+ struct resource_context *res_ctx,
+ const struct dc_stream_state *stream);
+ int (*get_opp_heads_for_otg_master)(const struct pipe_ctx *otg_master,
+ struct resource_context *res_ctx,
+ struct pipe_ctx *opp_heads[MAX_PIPES]);
+ int (*get_dpp_pipes_for_plane)(const struct dc_plane_state *plane,
+ struct resource_context *res_ctx,
+ struct pipe_ctx *dpp_pipes[MAX_PIPES]);
+ struct dc_stream_status *(*get_stream_status)(
+ struct dc_state *state,
+ const struct dc_stream_state *stream);
+ struct dc_stream_state *(*get_stream_from_id)(const struct dc_state *state, unsigned int id);
};
struct dml2_dc_svp_callbacks {
@@ -96,10 +112,10 @@ struct dml2_dc_svp_callbacks {
struct dc_stream_state* (*create_phantom_stream)(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *main_stream);
- struct dc_plane_state* (*create_phantom_plane)(struct dc *dc,
+ struct dc_plane_state* (*create_phantom_plane)(const struct dc *dc,
struct dc_state *state,
struct dc_plane_state *main_plane);
- enum dc_status (*add_phantom_stream)(struct dc *dc,
+ enum dc_status (*add_phantom_stream)(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *phantom_stream,
struct dc_stream_state *main_stream);
@@ -108,7 +124,7 @@ struct dml2_dc_svp_callbacks {
struct dc_stream_state *stream,
struct dc_plane_state *plane_state,
struct dc_state *context);
- enum dc_status (*remove_phantom_stream)(struct dc *dc,
+ enum dc_status (*remove_phantom_stream)(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *stream);
void (*release_phantom_plane)(const struct dc *dc,
@@ -121,6 +137,15 @@ struct dml2_dc_svp_callbacks {
enum mall_stream_type (*get_pipe_subvp_type)(const struct dc_state *state, const struct pipe_ctx *pipe_ctx);
enum mall_stream_type (*get_stream_subvp_type)(const struct dc_state *state, const struct dc_stream_state *stream);
struct dc_stream_state *(*get_paired_subvp_stream)(const struct dc_state *state, const struct dc_stream_state *stream);
+ bool (*remove_phantom_streams_and_planes)(
+ const struct dc *dc,
+ struct dc_state *state);
+ void (*release_phantom_streams_and_planes)(
+ const struct dc *dc,
+ struct dc_state *state);
+ unsigned int (*calculate_mall_ways_from_bytes)(
+ const struct dc *dc,
+ unsigned int total_size_in_mall_bytes);
};
struct dml2_clks_table_entry {
@@ -191,6 +216,8 @@ struct dml2_configuration_options {
unsigned int max_segments_per_hubp;
unsigned int det_segment_size;
bool map_dc_pipes_with_callbacks;
+
+ bool use_clock_dc_limits;
};
/*
@@ -244,6 +271,7 @@ void dml2_reinit(const struct dc *in_dc,
*/
bool dml2_validate(const struct dc *in_dc,
struct dc_state *context,
+ struct dml2_context *dml2,
bool fast_validate);
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/Makefile b/drivers/gpu/drm/amd/display/dc/dpp/Makefile
new file mode 100644
index 000000000000..99bd36073561
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dpp/Makefile
@@ -0,0 +1,77 @@
+
+# Copyright 2022 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Makefile for the 'dpp' sub-component of DAL.
+#
+ifdef CONFIG_DRM_AMD_DC_FP
+###############################################################################
+# DCN
+###############################################################################
+
+DPP_DCN10 = dcn10_dpp.o dcn10_dpp_dscl.o dcn10_dpp_cm.o
+
+AMD_DAL_DPP_DCN10 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn10/,$(DPP_DCN10))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN10)
+
+###############################################################################
+
+DPP_DCN20 = dcn20_dpp.o dcn20_dpp_cm.o
+
+AMD_DAL_DPP_DCN20 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn20/,$(DPP_DCN20))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN20)
+
+###############################################################################
+
+DPP_DCN201 = dcn201_dpp.o
+
+AMD_DAL_DPP_DCN201 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn201/,$(DPP_DCN201))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN201)
+
+###############################################################################
+
+DPP_DCN30 = dcn30_dpp.o dcn30_dpp_cm.o
+
+AMD_DAL_DPP_DCN30 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn30/,$(DPP_DCN30))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN30)
+
+###############################################################################
+
+DPP_DCN32 = dcn32_dpp.o
+
+AMD_DAL_DPP_DCN32 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn32/,$(DPP_DCN32))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN32)
+
+###############################################################################
+
+DPP_DCN35 = dcn35_dpp.o
+
+AMD_DAL_DPP_DCN35 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn35/,$(DPP_DCN35))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN35)
+
+###############################################################################
+
+endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/CMakeLists.txt
new file mode 100644
index 000000000000..1318c6fba3e7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/CMakeLists.txt
@@ -0,0 +1,6 @@
+dal3_subdirectory_sources(
+ dcn10_dpp.c
+ dcn10_dpp_cm.c
+ dcn10_dpp_dscl.c
+ dcn10_dpp.h
+)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
index 4e391fd1d71c..e1da48b05d00 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
@@ -28,7 +28,7 @@
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn10_dpp.h"
+#include "dcn10/dcn10_dpp.h"
#include "basics/conversion.h"
#define NUM_PHASES 64
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
index a039eedc7c24..c48139bed11f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
@@ -1090,7 +1090,8 @@
type DPP_CLOCK_ENABLE; \
type CM_HDR_MULT_COEF; \
type CUR0_FP_BIAS; \
- type CUR0_FP_SCALE;
+ type CUR0_FP_SCALE;\
+ type DISPCLK_R_GATE_DISABLE;
struct dcn_dpp_shift {
TF_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c
index 2f994a3a0b9c..006e23842016 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c
@@ -28,9 +28,9 @@
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn10_dpp.h"
+#include "dcn10/dcn10_dpp.h"
#include "basics/conversion.h"
-#include "dcn10_cm_common.h"
+#include "dcn10/dcn10_cm_common.h"
#define NUM_PHASES 64
#define HORZ_MAX_TAPS 8
@@ -234,7 +234,7 @@ void dpp1_cm_get_gamut_remap(struct dpp *dpp_base,
struct dpp_grph_csc_adjustment *adjust)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
- uint16_t arr_reg_val[12];
+ uint16_t arr_reg_val[12] = {0};
enum gamut_remap_select select;
read_gamut_remap(dpp, arr_reg_val, &select);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c
index 5ca9ab8a76e8..808bca9fb804 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c
@@ -28,7 +28,7 @@
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn10_dpp.h"
+#include "dcn10/dcn10_dpp.h"
#include "basics/conversion.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/CMakeLists.txt
new file mode 100644
index 000000000000..9c2d7096348e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/CMakeLists.txt
@@ -0,0 +1,5 @@
+dal3_subdirectory_sources(
+ dcn20_dpp.c
+ dcn20_dpp_cm.c
+ dcn20_dpp.h
+)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c
index 1516c0a48726..56ebd7164dd7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c
@@ -28,7 +28,7 @@
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn20_dpp.h"
+#include "dcn20/dcn20_dpp.h"
#include "basics/conversion.h"
#define NUM_PHASES 64
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h
index 672cde46c4b9..49cb25c9cb36 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h
@@ -736,7 +736,7 @@ bool dpp20_program_shaper(
bool dpp20_program_3dlut(
struct dpp *dpp_base,
- struct tetrahedral_params *params);
+ const struct tetrahedral_params *params);
void dpp2_cnv_set_alpha_keyer(
struct dpp *dpp_base,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c
index 58dc69926e8a..31613372e214 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c
@@ -28,7 +28,7 @@
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn20_dpp.h"
+#include "dcn20/dcn20_dpp.h"
#include "basics/conversion.h"
#include "dcn10/dcn10_cm_common.h"
@@ -274,7 +274,7 @@ void dpp2_cm_get_gamut_remap(struct dpp *dpp_base,
struct dpp_grph_csc_adjustment *adjust)
{
struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
- uint16_t arr_reg_val[12];
+ uint16_t arr_reg_val[12] = {0};
enum dcn20_gamut_remap_select select;
read_gamut_remap(dpp, arr_reg_val, &select);
@@ -1114,15 +1114,15 @@ static void dpp20_select_3dlut_ram_mask(
bool dpp20_program_3dlut(
struct dpp *dpp_base,
- struct tetrahedral_params *params)
+ const struct tetrahedral_params *params)
{
enum dc_lut_mode mode;
bool is_17x17x17;
bool is_12bits_color_channel;
- struct dc_rgb *lut0;
- struct dc_rgb *lut1;
- struct dc_rgb *lut2;
- struct dc_rgb *lut3;
+ const struct dc_rgb *lut0;
+ const struct dc_rgb *lut1;
+ const struct dc_rgb *lut2;
+ const struct dc_rgb *lut3;
int lut_size0;
int lut_size;
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn201/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/CMakeLists.txt
new file mode 100644
index 000000000000..7711cd3c47a7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/CMakeLists.txt
@@ -0,0 +1,4 @@
+dal3_subdirectory_sources(
+ dcn201_dpp.c
+ dcn201_dpp.h
+)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.c
index f809a7d21033..345202fee40f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.c
@@ -28,7 +28,7 @@
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn201_dpp.h"
+#include "dcn201/dcn201_dpp.h"
#include "basics/conversion.h"
#define REG(reg)\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.h
index cbd5b47b4acf..cbd5b47b4acf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.h
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/CMakeLists.txt
new file mode 100644
index 000000000000..0faee2a1e32b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/CMakeLists.txt
@@ -0,0 +1,5 @@
+dal3_subdirectory_sources(
+ dcn30_dpp.c
+ dcn30_dpp_cm.c
+ dcn30_dpp.h
+)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
index a3a769aad042..f8c0cee34080 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
@@ -26,9 +26,9 @@
#include "dm_services.h"
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn30_dpp.h"
+#include "dcn30/dcn30_dpp.h"
#include "basics/conversion.h"
-#include "dcn30_cm_common.h"
+#include "dcn30/dcn30_cm_common.h"
#define REG(reg)\
dpp->tf_regs->reg
@@ -293,9 +293,11 @@ void dpp3_cnv_setup (
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
pixel_format = 112;
+ alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
pixel_format = 113;
+ alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
pixel_format = 114;
@@ -319,9 +321,11 @@ void dpp3_cnv_setup (
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
pixel_format = 118;
+ alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
pixel_format = 119;
+ alpha_en = 0;
break;
default:
break;
@@ -1384,15 +1388,15 @@ static void dpp3_select_3dlut_ram_mask(
}
static bool dpp3_program_3dlut(struct dpp *dpp_base,
- struct tetrahedral_params *params)
+ const struct tetrahedral_params *params)
{
enum dc_lut_mode mode;
bool is_17x17x17;
bool is_12bits_color_channel;
- struct dc_rgb *lut0;
- struct dc_rgb *lut1;
- struct dc_rgb *lut2;
- struct dc_rgb *lut3;
+ const struct dc_rgb *lut0;
+ const struct dc_rgb *lut1;
+ const struct dc_rgb *lut2;
+ const struct dc_rgb *lut3;
int lut_size0;
int lut_size;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
index 2ac8045a87a1..269f437c1633 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
@@ -132,6 +132,8 @@
SRI(CM_POST_CSC_B_C33_C34, CM, id), \
SRI(CM_MEM_PWR_CTRL, CM, id), \
SRI(CM_CONTROL, CM, id), \
+ SRI(CM_TEST_DEBUG_INDEX, CM, id), \
+ SRI(CM_TEST_DEBUG_DATA, CM, id), \
SRI(FORMAT_CONTROL, CNVC_CFG, id), \
SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \
SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
@@ -294,6 +296,7 @@
TF_SF(CM0_CM_POST_CSC_C11_C12, CM_POST_CSC_C12, mask_sh), \
TF_SF(CM0_CM_POST_CSC_C33_C34, CM_POST_CSC_C33, mask_sh), \
TF_SF(CM0_CM_POST_CSC_C33_C34, CM_POST_CSC_C34, mask_sh), \
+ TF_SF(CM0_CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_INDEX, mask_sh), \
TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \
TF2_SF(CNVC_CFG0, FORMAT_CONTROL__ALPHA_EN, mask_sh), \
TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \
@@ -426,6 +429,7 @@
type CM_GAMCOR_LUT_DATA; \
type CM_GAMCOR_LUT_WRITE_COLOR_MASK; \
type CM_GAMCOR_LUT_READ_COLOR_SEL; \
+ type CM_GAMCOR_LUT_READ_DBG; \
type CM_GAMCOR_LUT_HOST_SEL; \
type CM_GAMCOR_LUT_CONFIG_MODE; \
type CM_GAMCOR_LUT_STATUS; \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c
index 2f5b3fbd3507..82eca0e7b7d0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c
@@ -26,9 +26,9 @@
#include "dm_services.h"
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn30_dpp.h"
+#include "dcn30/dcn30_dpp.h"
#include "basics/conversion.h"
-#include "dcn30_cm_common.h"
+#include "dcn30/dcn30_cm_common.h"
#define REG(reg)\
dpp->tf_regs->reg
@@ -445,7 +445,7 @@ void dpp3_cm_get_gamut_remap(struct dpp *dpp_base,
struct dpp_grph_csc_adjustment *adjust)
{
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
- uint16_t arr_reg_val[12];
+ uint16_t arr_reg_val[12] = {0};
int select;
read_gamut_remap(dpp, arr_reg_val, &select);
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/CMakeLists.txt
new file mode 100644
index 000000000000..7743edc4599f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/CMakeLists.txt
@@ -0,0 +1,4 @@
+dal3_subdirectory_sources(
+ dcn32_dpp.c
+ dcn32_dpp.h
+)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c
index 681e75c6dbaf..41679997b44d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c
@@ -26,7 +26,7 @@
#include "dm_services.h"
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn32_dpp.h"
+#include "dcn32/dcn32_dpp.h"
#include "basics/conversion.h"
#include "dcn30/dcn30_cm_common.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h
index 572958d287eb..572958d287eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/CMakeLists.txt
new file mode 100644
index 000000000000..91df5db26435
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/CMakeLists.txt
@@ -0,0 +1,4 @@
+dal3_subdirectory_sources(
+ dcn35_dpp.c
+ dcn35_dpp.h
+)
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
new file mode 100644
index 000000000000..e16274fee31d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "dcn35/dcn35_dpp.h"
+#include "reg_helper.h"
+
+#define REG(reg) dpp->tf_regs->reg
+
+#define CTX dpp->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ ((const struct dcn35_dpp_shift *)(dpp->tf_shift))->field_name, \
+ ((const struct dcn35_dpp_mask *)(dpp->tf_mask))->field_name
+
+void dpp35_dppclk_control(
+ struct dpp *dpp_base,
+ bool dppclk_div,
+ bool enable)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ if (enable) {
+ if (dpp->tf_mask->DPPCLK_RATE_CONTROL)
+ REG_UPDATE_2(DPP_CONTROL,
+ DPPCLK_RATE_CONTROL, dppclk_div,
+ DPP_CLOCK_ENABLE, 1);
+ else
+ REG_UPDATE_2(DPP_CONTROL,
+ DPP_CLOCK_ENABLE, 1,
+ DISPCLK_R_GATE_DISABLE, 1);
+ } else
+ REG_UPDATE_2(DPP_CONTROL,
+ DPP_CLOCK_ENABLE, 0,
+ DISPCLK_R_GATE_DISABLE, 0);
+}
+
+static struct dpp_funcs dcn35_dpp_funcs = {
+ .dpp_program_gamcor_lut = dpp3_program_gamcor_lut,
+ .dpp_read_state = dpp30_read_state,
+ .dpp_reset = dpp_reset,
+ .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
+ .dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps,
+ .dpp_set_gamut_remap = dpp3_cm_set_gamut_remap,
+ .dpp_set_csc_adjustment = NULL,
+ .dpp_set_csc_default = NULL,
+ .dpp_program_regamma_pwl = NULL,
+ .dpp_set_pre_degam = dpp3_set_pre_degam,
+ .dpp_program_input_lut = NULL,
+ .dpp_full_bypass = dpp1_full_bypass,
+ .dpp_setup = dpp3_cnv_setup,
+ .dpp_program_degamma_pwl = NULL,
+ .dpp_program_cm_dealpha = dpp3_program_cm_dealpha,
+ .dpp_program_cm_bias = dpp3_program_cm_bias,
+
+ .dpp_program_blnd_lut = NULL, // BLNDGAM is removed completely in DCN3.2 DPP
+ .dpp_program_shaper_lut = NULL, // CM SHAPER block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND)
+ .dpp_program_3dlut = NULL, // CM 3DLUT block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND)
+
+ .dpp_program_bias_and_scale = NULL,
+ .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer,
+ .set_cursor_attributes = dpp3_set_cursor_attributes,
+ .set_cursor_position = dpp1_set_cursor_position,
+ .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
+ .dpp_dppclk_control = dpp35_dppclk_control,
+ .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
+ .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap,
+};
+
+
+bool dpp35_construct(
+ struct dcn3_dpp *dpp, struct dc_context *ctx,
+ uint32_t inst, const struct dcn3_dpp_registers *tf_regs,
+ const struct dcn35_dpp_shift *tf_shift,
+ const struct dcn35_dpp_mask *tf_mask)
+{
+ bool ret = dpp32_construct(dpp, ctx, inst, tf_regs,
+ (const struct dcn3_dpp_shift *)(tf_shift),
+ (const struct dcn3_dpp_mask *)(tf_mask));
+
+ dpp->base.funcs = &dcn35_dpp_funcs;
+ return ret;
+}
+
+void dpp35_set_fgcg(struct dcn3_dpp *dpp, bool enable)
+{
+ REG_UPDATE(DPP_CONTROL, DPP_FGCG_REP_DIS, !enable);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h
index 09b84307cd9e..135872d88219 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h
@@ -31,7 +31,9 @@
#define DPP_REG_LIST_SH_MASK_DCN35(mask_sh) \
DPP_REG_LIST_SH_MASK_DCN30_COMMON(mask_sh), \
- TF_SF(DPP_TOP0_DPP_CONTROL, DPP_FGCG_REP_DIS, mask_sh)
+ TF_SF(DPP_TOP0_DPP_CONTROL, DPP_FGCG_REP_DIS, mask_sh), \
+ TF_SF(DPP_TOP0_DPP_CONTROL, DPP_FGCG_REP_DIS, mask_sh), \
+ TF_SF(DPP_TOP0_DPP_CONTROL, DISPCLK_R_GATE_DISABLE, mask_sh)
#define DPP_REG_FIELD_LIST_DCN35(type) \
struct { \
@@ -47,6 +49,11 @@ struct dcn35_dpp_mask {
DPP_REG_FIELD_LIST_DCN35(uint32_t);
};
+void dpp35_dppclk_control(
+ struct dpp *dpp_base,
+ bool dppclk_div,
+ bool enable);
+
bool dpp35_construct(struct dcn3_dpp *dpp3, struct dc_context *ctx,
uint32_t inst, const struct dcn3_dpp_registers *tf_regs,
const struct dcn35_dpp_shift *tf_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index ac41f9c0a283..150ef23440a2 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -458,7 +458,7 @@ bool dc_dsc_compute_bandwidth_range(
bool is_dsc_possible = false;
struct dsc_enc_caps dsc_enc_caps;
struct dsc_enc_caps dsc_common_caps;
- struct dc_dsc_config config;
+ struct dc_dsc_config config = {0};
struct dc_dsc_config_options options = {0};
options.dsc_min_slice_height_override = dsc_min_slice_height_override;
@@ -868,9 +868,9 @@ static bool setup_dsc_config(
struct dc_dsc_config *dsc_cfg)
{
struct dsc_enc_caps dsc_common_caps;
- int max_slices_h;
- int min_slices_h;
- int num_slices_h;
+ int max_slices_h = 0;
+ int min_slices_h = 0;
+ int num_slices_h = 0;
int pic_width;
int slice_width;
int target_bpp;
@@ -1055,7 +1055,12 @@ static bool setup_dsc_config(
if (!is_dsc_possible)
goto done;
- dsc_cfg->num_slices_v = pic_height/slice_height;
+ if (slice_height > 0) {
+ dsc_cfg->num_slices_v = pic_height / slice_height;
+ } else {
+ is_dsc_possible = false;
+ goto done;
+ }
if (target_bandwidth_kbps > 0) {
is_dsc_possible = decide_dsc_target_bpp_x16(
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
index 36d6c1646a51..59864130cf83 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
@@ -101,7 +101,6 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps,
{
int ret;
struct drm_dsc_config dsc_cfg;
- unsigned long long tmp;
dsc_params->pps = *pps;
dsc_params->pps.initial_scale_value = 8 * rc->rc_model_size / (rc->rc_model_size - rc->initial_fullness_offset);
@@ -112,9 +111,9 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps,
dsc_cfg.mux_word_size = dsc_params->pps.bits_per_component <= 10 ? 48 : 64;
ret = drm_dsc_compute_rc_parameters(&dsc_cfg);
- tmp = (unsigned long long)dsc_cfg.slice_chunk_size * 0x10000000 + (dsc_cfg.slice_width - 1);
- do_div(tmp, (uint32_t)dsc_cfg.slice_width); //ROUND-UP
- dsc_params->bytes_per_pixel = (uint32_t)tmp;
+ dsc_params->bytes_per_pixel =
+ (uint32_t)(div_u64(((uint64_t)dsc_cfg.slice_chunk_size * 0x10000000 + (dsc_cfg.slice_width - 1)),
+ (uint32_t)dsc_cfg.slice_width)); /* Round-up */
copy_pps_fields(&dsc_params->pps, &dsc_cfg);
dsc_params->rc_buffer_model_size = dsc_cfg.rc_bits;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c
index d734e3a134d1..2840ed5c57d8 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c
@@ -95,10 +95,6 @@ static bool offset_to_id(
return true;
default:
ASSERT_CRITICAL(false);
-#ifdef PALLADIUM_SUPPORTED
- *en = GPIO_DDC_LINE_DDC1;
- return true;
-#endif
return false;
}
break;
@@ -184,11 +180,6 @@ static bool offset_to_id(
/* UNEXPECTED */
default:
/* case REG(DC_GPIO_SYNCA_A): not exista */
-#ifdef PALLADIUM_SUPPORTED
- *id = GPIO_ID_HPD;
- *en = GPIO_DDC_LINE_DDC1;
- return true;
-#endif
ASSERT_CRITICAL(false);
return false;
}
@@ -308,10 +299,6 @@ static bool id_to_offset(
break;
default:
ASSERT_CRITICAL(false);
-#ifdef PALLADIUM_SUPPORTED
- info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
- result = true;
-#endif
result = false;
}
break;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index 3ede6e02c3a7..663c17f52779 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -128,7 +128,7 @@ struct gpio *dal_gpio_service_create_irq(
uint32_t offset,
uint32_t mask)
{
- enum gpio_id id;
+ enum gpio_id id = 0;
uint32_t en;
if (!service->translate.funcs->offset_to_id(offset, mask, &id, &en)) {
@@ -144,7 +144,7 @@ struct gpio *dal_gpio_service_create_generic_mux(
uint32_t offset,
uint32_t mask)
{
- enum gpio_id id;
+ enum gpio_id id = 0;
uint32_t en;
struct gpio *generic;
@@ -178,7 +178,7 @@ struct gpio_pin_info dal_gpio_get_generic_pin_info(
enum gpio_id id,
uint32_t en)
{
- struct gpio_pin_info pin;
+ struct gpio_pin_info pin = {0};
if (service->translate.funcs->id_to_offset) {
service->translate.funcs->id_to_offset(id, en, &pin);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/Makefile
index 9e8e9de51a92..cf8aa23b4415 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/hwss/Makefile
@@ -180,7 +180,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN35)
###############################################################################
-HWSS_DCN351 = dcn351_init.o
+HWSS_DCN351 = dcn351_hwseq.o dcn351_init.o
AMD_DAL_HWSS_DCN351 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn351/,$(HWSS_DCN351))
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 9d5df4c0da59..0d3ea291eeee 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -249,7 +249,7 @@ static bool dce110_enable_display_power_gating(
return false;
}
-static void build_prescale_params(struct ipp_prescale_params *prescale_params,
+static void dce110_prescale_params(struct ipp_prescale_params *prescale_params,
const struct dc_plane_state *plane_state)
{
prescale_params->mode = IPP_PRESCALE_MODE_FIXED_UNSIGNED;
@@ -289,16 +289,14 @@ dce110_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
if (ipp == NULL)
return false;
- if (plane_state->in_transfer_func)
- tf = plane_state->in_transfer_func;
+ tf = &plane_state->in_transfer_func;
- build_prescale_params(&prescale_params, plane_state);
+ dce110_prescale_params(&prescale_params, plane_state);
ipp->funcs->ipp_program_prescale(ipp, &prescale_params);
- if (plane_state->gamma_correction &&
- !plane_state->gamma_correction->is_identity &&
+ if (!plane_state->gamma_correction.is_identity &&
dce_use_lut(plane_state->format))
- ipp->funcs->ipp_program_input_lut(ipp, plane_state->gamma_correction);
+ ipp->funcs->ipp_program_input_lut(ipp, &plane_state->gamma_correction);
if (tf == NULL) {
/* Default case if no input transfer function specified */
@@ -614,11 +612,10 @@ dce110_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
xfm->funcs->opp_power_on_regamma_lut(xfm, true);
xfm->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
- if (stream->out_transfer_func &&
- stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
- stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB) {
+ if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED &&
+ stream->out_transfer_func.tf == TRANSFER_FUNCTION_SRGB) {
xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_SRGB);
- } else if (dce110_translate_regamma_to_hw_format(stream->out_transfer_func,
+ } else if (dce110_translate_regamma_to_hw_format(&stream->out_transfer_func,
&xfm->regamma_params)) {
xfm->funcs->opp_program_regamma_pwl(xfm, &xfm->regamma_params);
xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_USER);
@@ -1185,22 +1182,13 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
if (dccg) {
dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
- dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ if (dccg && dccg->funcs->set_dtbclk_dto)
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
}
} else if (dccg && dccg->funcs->disable_symclk_se) {
dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,
link_enc->transmitter - TRANSMITTER_UNIPHY_A);
}
-
- if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
- /* TODO: This looks like a bug to me as we are disabling HPO IO when
- * we are just disabling a single HPO stream. Shouldn't we disable HPO
- * HW control only when HPOs for all streams are disabled?
- */
- if (pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control)
- pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control(
- pipe_ctx->stream->ctx->dc->hwseq, false);
- }
}
void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
@@ -1549,7 +1537,7 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
}
if (pipe_ctx->stream_res.audio != NULL) {
- struct audio_output audio_output;
+ struct audio_output audio_output = {0};
build_audio_output(context, pipe_ctx, &audio_output);
@@ -2200,7 +2188,7 @@ static void dce110_setup_audio_dto(
struct dc *dc,
struct dc_state *context)
{
- int i;
+ unsigned int i;
/* program audio wall clock. use HDMI as clock source if HDMI
* audio active. Otherwise, use DP as clock source
@@ -2272,7 +2260,7 @@ static void dce110_setup_audio_dto(
continue;
if (pipe_ctx->stream_res.audio != NULL) {
- struct audio_output audio_output;
+ struct audio_output audio_output = {0};
build_audio_output(context, pipe_ctx, &audio_output);
@@ -2287,6 +2275,19 @@ static void dce110_setup_audio_dto(
}
}
+static bool dce110_is_hpo_enabled(struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < MAX_HPO_DP2_ENCODERS; i++) {
+ if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i]) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
enum dc_status dce110_apply_ctx_to_hw(
struct dc *dc,
struct dc_state *context)
@@ -2295,6 +2296,8 @@ enum dc_status dce110_apply_ctx_to_hw(
struct dc_bios *dcb = dc->ctx->dc_bios;
enum dc_status status;
int i;
+ bool was_hpo_enabled = dce110_is_hpo_enabled(dc->current_state);
+ bool is_hpo_enabled = dce110_is_hpo_enabled(context);
/* reset syncd pipes from disabled pipes */
if (dc->config.use_pipe_ctx_sync_logic)
@@ -2337,6 +2340,10 @@ enum dc_status dce110_apply_ctx_to_hw(
dce110_setup_audio_dto(dc, context);
+ if (dc->hwseq->funcs.setup_hpo_hw_control && was_hpo_enabled != is_hpo_enabled) {
+ dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, is_hpo_enabled);
+ }
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx_old =
&dc->current_state->res_ctx.pipe_ctx[i];
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 314798400b16..0c4aef8ffe2c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -367,7 +367,7 @@ static void dcn10_log_color_state(struct dc *dc,
dc->caps.color.dpp.ocsc);
DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
- for (i = 0; i < pool->pipe_count; i++) {
+ for (i = 0; i < pool->mpcc_count; i++) {
struct mpcc_state s = {0};
pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
@@ -1366,6 +1366,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
bool can_apply_seamless_boot = false;
+ bool tg_enabled[MAX_PIPES] = {false};
for (i = 0; i < context->stream_count; i++) {
if (context->streams[i]->apply_seamless_boot_optimization) {
@@ -1447,6 +1448,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
// requesting data while in PSR.
tg->funcs->tg_init(tg);
hubp->power_gated = true;
+ tg_enabled[i] = true;
continue;
}
@@ -1488,6 +1490,20 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
tg->funcs->tg_init(tg);
}
+ /* Clean up MPC tree */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (tg_enabled[i]) {
+ if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) {
+ if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) {
+ int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id;
+
+ if ((bot_id < MAX_MPCC) && (bot_id < MAX_PIPES) && (!tg_enabled[bot_id]))
+ dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
+ }
+ }
+ }
+ }
+
/* Power gate DSCs */
if (hws->funcs.dsc_pg_control != NULL) {
uint32_t num_opps = 0;
@@ -1813,14 +1829,12 @@ bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
if (dpp_base == NULL)
return false;
- if (plane_state->in_transfer_func)
- tf = plane_state->in_transfer_func;
+ tf = &plane_state->in_transfer_func;
- if (plane_state->gamma_correction &&
- !dpp_base->ctx->dc->debug.always_use_regamma
- && !plane_state->gamma_correction->is_identity
+ if (!dpp_base->ctx->dc->debug.always_use_regamma
+ && !plane_state->gamma_correction.is_identity
&& dce_use_lut(plane_state->format))
- dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
+ dpp_base->funcs->dpp_program_input_lut(dpp_base, &plane_state->gamma_correction);
if (tf == NULL)
dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
@@ -1861,7 +1875,7 @@ bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
#define MAX_NUM_HW_POINTS 0x200
static void log_tf(struct dc_context *ctx,
- struct dc_transfer_func *tf, uint32_t hw_points_num)
+ const struct dc_transfer_func *tf, uint32_t hw_points_num)
{
// DC_LOG_GAMMA is default logging of all hw points
// DC_LOG_ALL_GAMMA logs all points, not only hw points
@@ -1898,16 +1912,15 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
- if (stream->out_transfer_func &&
- stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
- stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
+ if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED &&
+ stream->out_transfer_func.tf == TRANSFER_FUNCTION_SRGB)
dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
* update.
*/
else if (cm_helper_translate_curve_to_hw_format(dc->ctx,
- stream->out_transfer_func,
+ &stream->out_transfer_func,
&dpp->regamma_params, false)) {
dpp->funcs->dpp_program_regamma_pwl(
dpp,
@@ -1915,10 +1928,9 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
} else
dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
- if (stream->ctx &&
- stream->out_transfer_func) {
+ if (stream->ctx) {
log_tf(stream->ctx,
- stream->out_transfer_func,
+ &stream->out_transfer_func,
dpp->regamma_params.hw_points_num);
}
@@ -2173,7 +2185,7 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
struct dc_crtc_timing *hw_crtc_timing;
uint64_t phase[MAX_PIPES];
uint64_t modulo[MAX_PIPES];
- unsigned int pclk;
+ unsigned int pclk = 0;
uint32_t embedded_pix_clk_100hz;
uint16_t embedded_h_total;
@@ -2264,7 +2276,7 @@ void dcn10_enable_vblanks_synchronization(
struct dc_context *dc_ctx = dc->ctx;
struct output_pixel_processor *opp;
struct timing_generator *tg;
- int i, width, height, master;
+ int i, width = 0, height = 0, master;
DC_LOGGER_INIT(dc_ctx->logger);
@@ -2330,7 +2342,7 @@ void dcn10_enable_timing_synchronization(
struct dc_context *dc_ctx = dc->ctx;
struct output_pixel_processor *opp;
struct timing_generator *tg;
- int i, width, height;
+ int i, width = 0, height = 0;
DC_LOGGER_INIT(dc_ctx->logger);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index 8b3536c380b8..7d833fa6dd77 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -155,7 +155,7 @@ void dcn20_log_color_state(struct dc *dc,
DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE"
" OGAM mode\n");
- for (i = 0; i < pool->pipe_count; i++) {
+ for (i = 0; i < pool->mpcc_count; i++) {
struct mpcc_state s = {0};
pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
@@ -403,7 +403,7 @@ void dcn20_init_blank(
struct output_pixel_processor *opp = NULL;
struct output_pixel_processor *bottom_opp = NULL;
uint32_t num_opps, opp_id_src0, opp_id_src1;
- uint32_t otg_active_width, otg_active_height;
+ uint32_t otg_active_width = 0, otg_active_height = 0;
/* program opp dpg blank color */
color_space = COLOR_SPACE_SRGB;
@@ -873,6 +873,22 @@ enum dc_status dcn20_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
+ struct dccg *dccg = dc->res_pool->dccg;
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ struct dtbclk_dto_params dto_params = {0};
+
+ if (dccg->funcs->set_dtbclk_p_src)
+ dccg->funcs->set_dtbclk_p_src(dccg, DTBCLK0, tg->inst);
+
+ dto_params.otg_inst = tg->inst;
+ dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
+ dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
+ dto_params.timing = &pipe_ctx->stream->timing;
+ dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ }
+
if (dc_is_hdmi_tmds_signal(stream->signal)) {
stream->link->phy_state.symclk_ref_cnts.otg = 1;
if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
@@ -959,22 +975,6 @@ enum dc_status dcn20_enable_stream_timing(
pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg);
}
- if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
- struct dccg *dccg = dc->res_pool->dccg;
- struct timing_generator *tg = pipe_ctx->stream_res.tg;
- struct dtbclk_dto_params dto_params = {0};
-
- if (dccg->funcs->set_dtbclk_p_src)
- dccg->funcs->set_dtbclk_p_src(dccg, DTBCLK0, tg->inst);
-
- dto_params.otg_inst = tg->inst;
- dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
- dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
- dto_params.timing = &pipe_ctx->stream->timing;
- dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
- dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
- }
-
return DC_OK;
}
@@ -1011,7 +1011,7 @@ bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
{
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
- struct pwl_params *params = NULL;
+ const struct pwl_params *params = NULL;
/*
* program OGAM only for the top pipe
* if there is a pipe split then fix diagnostic is required:
@@ -1022,19 +1022,19 @@ bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
if (mpc->funcs->power_on_mpc_mem_pwr)
mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true);
if (pipe_ctx->top_pipe == NULL
- && mpc->funcs->set_output_gamma && stream->out_transfer_func) {
- if (stream->out_transfer_func->type == TF_TYPE_HWPWL)
- params = &stream->out_transfer_func->pwl;
- else if (pipe_ctx->stream->out_transfer_func->type ==
+ && mpc->funcs->set_output_gamma) {
+ if (stream->out_transfer_func.type == TF_TYPE_HWPWL)
+ params = &stream->out_transfer_func.pwl;
+ else if (pipe_ctx->stream->out_transfer_func.type ==
TF_TYPE_DISTRIBUTED_POINTS &&
cm_helper_translate_curve_to_hw_format(dc->ctx,
- stream->out_transfer_func,
+ &stream->out_transfer_func,
&mpc->blender_params, false))
params = &mpc->blender_params;
/*
* there is no ROM
*/
- if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED)
+ if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED)
BREAK_TO_DEBUGGER();
}
/*
@@ -1050,17 +1050,15 @@ bool dcn20_set_blend_lut(
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
bool result = true;
- struct pwl_params *blend_lut = NULL;
-
- if (plane_state->blend_tf) {
- if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
- blend_lut = &plane_state->blend_tf->pwl;
- else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm_helper_translate_curve_to_hw_format(plane_state->ctx,
- plane_state->blend_tf,
- &dpp_base->regamma_params, false);
- blend_lut = &dpp_base->regamma_params;
- }
+ const struct pwl_params *blend_lut = NULL;
+
+ if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
+ blend_lut = &plane_state->blend_tf.pwl;
+ else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
+ cm_helper_translate_curve_to_hw_format(plane_state->ctx,
+ &plane_state->blend_tf,
+ &dpp_base->regamma_params, false);
+ blend_lut = &dpp_base->regamma_params;
}
result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut);
@@ -1072,24 +1070,21 @@ bool dcn20_set_shaper_3dlut(
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
bool result = true;
- struct pwl_params *shaper_lut = NULL;
-
- if (plane_state->in_shaper_func) {
- if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL)
- shaper_lut = &plane_state->in_shaper_func->pwl;
- else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm_helper_translate_curve_to_hw_format(plane_state->ctx,
- plane_state->in_shaper_func,
- &dpp_base->shaper_params, true);
- shaper_lut = &dpp_base->shaper_params;
- }
+ const struct pwl_params *shaper_lut = NULL;
+
+ if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL)
+ shaper_lut = &plane_state->in_shaper_func.pwl;
+ else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
+ cm_helper_translate_curve_to_hw_format(plane_state->ctx,
+ &plane_state->in_shaper_func,
+ &dpp_base->shaper_params, true);
+ shaper_lut = &dpp_base->shaper_params;
}
result = dpp_base->funcs->dpp_program_shaper_lut(dpp_base, shaper_lut);
- if (plane_state->lut3d_func &&
- plane_state->lut3d_func->state.bits.initialized == 1)
+ if (plane_state->lut3d_func.state.bits.initialized == 1)
result = dpp_base->funcs->dpp_program_3dlut(dpp_base,
- &plane_state->lut3d_func->lut_3d);
+ &plane_state->lut3d_func.lut_3d);
else
result = dpp_base->funcs->dpp_program_3dlut(dpp_base, NULL);
@@ -1112,15 +1107,7 @@ bool dcn20_set_input_transfer_func(struct dc *dc,
hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state);
hws->funcs.set_blend_lut(pipe_ctx, plane_state);
- if (plane_state->in_transfer_func)
- tf = plane_state->in_transfer_func;
-
-
- if (tf == NULL) {
- dpp_base->funcs->dpp_set_degamma(dpp_base,
- IPP_DEGAMMA_MODE_BYPASS);
- return true;
- }
+ tf = &plane_state->in_transfer_func;
if (tf->type == TF_TYPE_HWPWL || tf->type == TF_TYPE_DISTRIBUTED_POINTS)
use_degamma_ram = true;
@@ -1917,9 +1904,11 @@ static void dcn20_program_pipe(
dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub);
}
- if (dc->res_pool->hubbub->funcs->program_det_size && pipe_ctx->update_flags.bits.det_size)
- dc->res_pool->hubbub->funcs->program_det_size(
- dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
+ if (pipe_ctx->update_flags.bits.det_size) {
+ if (dc->res_pool->hubbub->funcs->program_det_size)
+ dc->res_pool->hubbub->funcs->program_det_size(
+ dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
+ }
if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw)
dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
@@ -2080,9 +2069,11 @@ void dcn20_program_front_end_for_ctx(
* turned on (i.e. in an MCLK switch) which can come in too late and cause issues with
* DET allocation.
*/
- if (hubbub->funcs->program_det_size && (context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
- (context->res_ctx.pipe_ctx[i].plane_state && dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM)))
- hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
+ if ((context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
+ (context->res_ctx.pipe_ctx[i].plane_state && dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM))) {
+ if (hubbub->funcs->program_det_size)
+ hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
+ }
hws->funcs.plane_atomic_disconnect(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
@@ -2893,11 +2884,6 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
- if (dc->hwseq->funcs.setup_hpo_hw_control)
- dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, true);
- }
-
- if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
dto_params.otg_inst = tg->inst;
dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
index 884e3e323338..ef6488165b8f 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
@@ -67,6 +67,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dce110_set_avmute,
.log_hw_state = dcn10_log_hw_state,
+ .log_color_state = dcn20_log_color_state,
.get_hw_state = dcn10_get_hw_state,
.clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
index d5769f38874f..6be846635a79 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
@@ -167,7 +167,7 @@ void dcn201_init_blank(
struct tg_color black_color = {0};
struct output_pixel_processor *opp = NULL;
uint32_t num_opps, opp_id_src0, opp_id_src1;
- uint32_t otg_active_width, otg_active_height;
+ uint32_t otg_active_width = 0, otg_active_height = 0;
/* program opp dpg blank color */
color_space = COLOR_SPACE_SRGB;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
index 7252f5f781f0..804be977ea47 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
@@ -66,7 +66,7 @@ static void mmhub_update_page_table_config(struct dcn_hubbub_phys_addr_config *c
int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
{
- struct dcn_hubbub_phys_addr_config config;
+ struct dcn_hubbub_phys_addr_config config = {0};
config.system_aperture.fb_top = pa_config->system_aperture.fb_top;
config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index 8bc3d01537bb..ed9141a67db3 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -166,7 +166,7 @@ void dcn30_log_color_state(struct dc *dc,
"C21 C22 C23 C24 "
"C31 C32 C33 C34 \n");
- for (i = 0; i < pool->pipe_count; i++) {
+ for (i = 0; i < pool->mpcc_count; i++) {
struct mpcc_state s = {0};
pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
@@ -223,16 +223,14 @@ bool dcn30_set_blend_lut(
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
bool result = true;
- struct pwl_params *blend_lut = NULL;
-
- if (plane_state->blend_tf) {
- if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
- blend_lut = &plane_state->blend_tf->pwl;
- else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm3_helper_translate_curve_to_hw_format(
- plane_state->blend_tf, &dpp_base->regamma_params, false);
- blend_lut = &dpp_base->regamma_params;
- }
+ const struct pwl_params *blend_lut = NULL;
+
+ if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
+ blend_lut = &plane_state->blend_tf.pwl;
+ else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
+ cm3_helper_translate_curve_to_hw_format(
+ &plane_state->blend_tf, &dpp_base->regamma_params, false);
+ blend_lut = &dpp_base->regamma_params;
}
result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut);
@@ -300,27 +298,24 @@ bool dcn30_set_input_transfer_func(struct dc *dc,
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
enum dc_transfer_func_predefined tf;
bool result = true;
- struct pwl_params *params = NULL;
+ const struct pwl_params *params = NULL;
if (dpp_base == NULL || plane_state == NULL)
return false;
tf = TRANSFER_FUNCTION_UNITY;
- if (plane_state->in_transfer_func &&
- plane_state->in_transfer_func->type == TF_TYPE_PREDEFINED)
- tf = plane_state->in_transfer_func->tf;
+ if (plane_state->in_transfer_func.type == TF_TYPE_PREDEFINED)
+ tf = plane_state->in_transfer_func.tf;
dpp_base->funcs->dpp_set_pre_degam(dpp_base, tf);
- if (plane_state->in_transfer_func) {
- if (plane_state->in_transfer_func->type == TF_TYPE_HWPWL)
- params = &plane_state->in_transfer_func->pwl;
- else if (plane_state->in_transfer_func->type == TF_TYPE_DISTRIBUTED_POINTS &&
- cm3_helper_translate_curve_to_hw_format(plane_state->in_transfer_func,
- &dpp_base->degamma_params, false))
- params = &dpp_base->degamma_params;
- }
+ if (plane_state->in_transfer_func.type == TF_TYPE_HWPWL)
+ params = &plane_state->in_transfer_func.pwl;
+ else if (plane_state->in_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS &&
+ cm3_helper_translate_curve_to_hw_format(&plane_state->in_transfer_func,
+ &dpp_base->degamma_params, false))
+ params = &dpp_base->degamma_params;
result = dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params);
@@ -378,24 +373,24 @@ bool dcn30_set_output_transfer_func(struct dc *dc,
{
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
- struct pwl_params *params = NULL;
+ const struct pwl_params *params = NULL;
bool ret = false;
/* program OGAM or 3DLUT only for the top pipe*/
if (pipe_ctx->top_pipe == NULL) {
/*program rmu shaper and 3dlut in MPC*/
ret = dcn30_set_mpc_shaper_3dlut(pipe_ctx, stream);
- if (ret == false && mpc->funcs->set_output_gamma && stream->out_transfer_func) {
- if (stream->out_transfer_func->type == TF_TYPE_HWPWL)
- params = &stream->out_transfer_func->pwl;
- else if (pipe_ctx->stream->out_transfer_func->type ==
+ if (ret == false && mpc->funcs->set_output_gamma) {
+ if (stream->out_transfer_func.type == TF_TYPE_HWPWL)
+ params = &stream->out_transfer_func.pwl;
+ else if (pipe_ctx->stream->out_transfer_func.type ==
TF_TYPE_DISTRIBUTED_POINTS &&
cm3_helper_translate_curve_to_hw_format(
- stream->out_transfer_func,
+ &stream->out_transfer_func,
&mpc->blender_params, false))
params = &mpc->blender_params;
/* there are no ROM LUTs in OUTGAM */
- if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED)
+ if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED)
BREAK_TO_DEBUGGER();
}
}
@@ -804,7 +799,7 @@ void dcn30_init_hw(struct dc *dc)
// Get DMCUB capabilities
dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
- dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
+ dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
}
void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
@@ -818,7 +813,7 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
enable);
/* Wait for two frame to make sure AV mute is sent out */
- if (enable) {
+ if (enable && pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) {
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
@@ -890,7 +885,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
{
union dmub_rb_cmd cmd;
uint32_t tmr_delay = 0, tmr_scale = 0;
- struct dc_cursor_attributes cursor_attr;
+ struct dc_cursor_attributes cursor_attr = {0};
bool cursor_cache_enable = false;
struct dc_stream_state *stream = NULL;
struct dc_plane_state *plane = NULL;
@@ -946,7 +941,8 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888 &&
plane->address.page_table_base.quad_part == 0 &&
dc->hwss.does_plane_fit_in_mall &&
- dc->hwss.does_plane_fit_in_mall(dc, plane,
+ dc->hwss.does_plane_fit_in_mall(dc, plane->plane_size.surface_pitch,
+ plane->plane_size.surface_size.height, plane->format,
cursor_cache_enable ? &cursor_attr : NULL)) {
unsigned int v_total = stream->adjust.v_total_max ?
stream->adjust.v_total_max : stream->timing.v_total;
@@ -1076,11 +1072,15 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
return true;
}
-bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane, struct dc_cursor_attributes *cursor_attr)
+bool dcn30_does_plane_fit_in_mall(struct dc *dc,
+ unsigned int pitch,
+ unsigned int height,
+ enum surface_pixel_format format,
+ struct dc_cursor_attributes *cursor_attr)
{
// add meta size?
- unsigned int surface_size = plane->plane_size.surface_pitch * plane->plane_size.surface_size.height *
- (plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
+ unsigned int surface_size = pitch * height *
+ (format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
unsigned int mall_size = dc->caps.mall_size_total;
unsigned int cursor_size = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
index 638f018a3cb5..76b16839486a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
@@ -71,7 +71,10 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx);
void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx);
-bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane,
+bool dcn30_does_plane_fit_in_mall(struct dc *dc,
+ unsigned int pitch,
+ unsigned int height,
+ enum surface_pixel_format format,
struct dc_cursor_attributes *cursor_attr);
bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index a760f0c6fe98..1c8abb417b6e 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -273,7 +273,7 @@ void dcn31_init_hw(struct dc *dc)
// Get DMCUB capabilities
dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
- dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
+ dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
}
void dcn31_dsc_pg_control(
@@ -479,7 +479,7 @@ void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool p
int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
{
- struct dcn_hubbub_phys_addr_config config;
+ struct dcn_hubbub_phys_addr_config config = {0};
config.system_aperture.fb_top = pa_config->system_aperture.fb_top;
config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index 3a9cc8ac0c07..0d8a05cf8b1a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -69,29 +69,6 @@
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
-static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
- int opp_cnt)
-{
- bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
- int flow_ctrl_cnt;
-
- if (opp_cnt >= 2)
- hblank_halved = true;
-
- flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
- stream->timing.h_border_left -
- stream->timing.h_border_right;
-
- if (hblank_halved)
- flow_ctrl_cnt /= 2;
-
- /* ODM combine 4:1 case */
- if (opp_cnt == 4)
- flow_ctrl_cnt /= 2;
-
- return flow_ctrl_cnt;
-}
-
static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
@@ -105,7 +82,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
if (enable) {
struct dsc_config dsc_cfg;
- struct dsc_optc_config dsc_optc_cfg;
+ struct dsc_optc_config dsc_optc_cfg = {0};
enum optc_dsc_mode optc_dsc_mode;
/* Enable DSC hw block */
@@ -183,10 +160,6 @@ void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx
struct pipe_ctx *odm_pipe;
int opp_cnt = 0;
int opp_inst[MAX_PIPES] = {0};
- bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing));
- struct mpc_dwb_flow_control flow_control;
- struct mpc *mpc = dc->res_pool->mpc;
- int i;
opp_cnt = get_odm_config(pipe_ctx, opp_inst);
@@ -199,20 +172,6 @@ void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
- flow_control.flow_ctrl_mode = 0;
- flow_control.flow_ctrl_cnt0 = 0x80;
- flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt);
- if (mpc->funcs->set_out_rate_control) {
- for (i = 0; i < opp_cnt; ++i) {
- mpc->funcs->set_out_rate_control(
- mpc, opp_inst[i],
- true,
- rate_control_2x_pclk,
- &flow_control);
- }
- }
-
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index c0b526cf1786..b8e884368dc6 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -239,8 +239,10 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
// Convert number of cache lines required to number of ways
if (dc->debug.force_mall_ss_num_ways > 0) {
num_ways = dc->debug.force_mall_ss_num_ways;
+ } else if (dc->res_pool->funcs->calculate_mall_ways_from_bytes) {
+ num_ways = dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, mall_ss_size_bytes);
} else {
- num_ways = dcn32_helper_mall_bytes_to_ways(dc, mall_ss_size_bytes);
+ num_ways = 0;
}
return num_ways;
@@ -261,7 +263,9 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
for (i = 0; i < dc->current_state->stream_count; i++) {
/* MALL SS messaging is not supported with PSR at this time */
if (dc->current_state->streams[i] != NULL &&
- dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED)
+ dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
+ (dc->current_state->stream_count > 1 || (!dc->current_state->streams[i]->dpms_off &&
+ dc->current_state->stream_status[i].plane_count > 0)))
return false;
}
@@ -475,39 +479,35 @@ bool dcn32_set_mcm_luts(
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
bool result = true;
- struct pwl_params *lut_params = NULL;
+ const struct pwl_params *lut_params = NULL;
// 1D LUT
- if (plane_state->blend_tf) {
- if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
- lut_params = &plane_state->blend_tf->pwl;
- else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm3_helper_translate_curve_to_hw_format(plane_state->blend_tf,
- &dpp_base->regamma_params, false);
- lut_params = &dpp_base->regamma_params;
- }
+ if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
+ lut_params = &plane_state->blend_tf.pwl;
+ else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
+ cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf,
+ &dpp_base->regamma_params, false);
+ lut_params = &dpp_base->regamma_params;
}
result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id);
lut_params = NULL;
// Shaper
- if (plane_state->in_shaper_func) {
- if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL)
- lut_params = &plane_state->in_shaper_func->pwl;
- else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
- // TODO: dpp_base replace
- ASSERT(false);
- cm3_helper_translate_curve_to_hw_format(plane_state->in_shaper_func,
- &dpp_base->shaper_params, true);
- lut_params = &dpp_base->shaper_params;
- }
+ if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL)
+ lut_params = &plane_state->in_shaper_func.pwl;
+ else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
+ // TODO: dpp_base replace
+ ASSERT(false);
+ cm3_helper_translate_curve_to_hw_format(&plane_state->in_shaper_func,
+ &dpp_base->shaper_params, true);
+ lut_params = &dpp_base->shaper_params;
}
result = mpc->funcs->program_shaper(mpc, lut_params, mpcc_id);
// 3D
- if (plane_state->lut3d_func && plane_state->lut3d_func->state.bits.initialized == 1)
- result = mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func->lut_3d, mpcc_id);
+ if (plane_state->lut3d_func.state.bits.initialized == 1)
+ result = mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id);
else
result = mpc->funcs->program_3dlut(mpc, NULL, mpcc_id);
@@ -524,27 +524,24 @@ bool dcn32_set_input_transfer_func(struct dc *dc,
enum dc_transfer_func_predefined tf;
bool result = true;
- struct pwl_params *params = NULL;
+ const struct pwl_params *params = NULL;
if (mpc == NULL || plane_state == NULL)
return false;
tf = TRANSFER_FUNCTION_UNITY;
- if (plane_state->in_transfer_func &&
- plane_state->in_transfer_func->type == TF_TYPE_PREDEFINED)
- tf = plane_state->in_transfer_func->tf;
+ if (plane_state->in_transfer_func.type == TF_TYPE_PREDEFINED)
+ tf = plane_state->in_transfer_func.tf;
dpp_base->funcs->dpp_set_pre_degam(dpp_base, tf);
- if (plane_state->in_transfer_func) {
- if (plane_state->in_transfer_func->type == TF_TYPE_HWPWL)
- params = &plane_state->in_transfer_func->pwl;
- else if (plane_state->in_transfer_func->type == TF_TYPE_DISTRIBUTED_POINTS &&
- cm3_helper_translate_curve_to_hw_format(plane_state->in_transfer_func,
- &dpp_base->degamma_params, false))
- params = &dpp_base->degamma_params;
- }
+ if (plane_state->in_transfer_func.type == TF_TYPE_HWPWL)
+ params = &plane_state->in_transfer_func.pwl;
+ else if (plane_state->in_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS &&
+ cm3_helper_translate_curve_to_hw_format(&plane_state->in_transfer_func,
+ &dpp_base->degamma_params, false))
+ params = &dpp_base->degamma_params;
dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params);
@@ -562,24 +559,24 @@ bool dcn32_set_output_transfer_func(struct dc *dc,
{
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
- struct pwl_params *params = NULL;
+ const struct pwl_params *params = NULL;
bool ret = false;
/* program OGAM or 3DLUT only for the top pipe*/
if (resource_is_pipe_type(pipe_ctx, OPP_HEAD)) {
/*program shaper and 3dlut in MPC*/
ret = dcn32_set_mpc_shaper_3dlut(pipe_ctx, stream);
- if (ret == false && mpc->funcs->set_output_gamma && stream->out_transfer_func) {
- if (stream->out_transfer_func->type == TF_TYPE_HWPWL)
- params = &stream->out_transfer_func->pwl;
- else if (pipe_ctx->stream->out_transfer_func->type ==
+ if (ret == false && mpc->funcs->set_output_gamma) {
+ if (stream->out_transfer_func.type == TF_TYPE_HWPWL)
+ params = &stream->out_transfer_func.pwl;
+ else if (pipe_ctx->stream->out_transfer_func.type ==
TF_TYPE_DISTRIBUTED_POINTS &&
cm3_helper_translate_curve_to_hw_format(
- stream->out_transfer_func,
+ &stream->out_transfer_func,
&mpc->blender_params, false))
params = &mpc->blender_params;
/* there are no ROM LUTs in OUTGAM */
- if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED)
+ if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED)
BREAK_TO_DEBUGGER();
}
}
@@ -956,39 +953,16 @@ void dcn32_init_hw(struct dc *dc)
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
dc->caps.dmub_caps.subvp_psr = dc->ctx->dmub_srv->dmub->feature_caps.subvp_psr_support;
dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable;
- dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
+ dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
if (dc->ctx->dmub_srv->dmub->fw_version <
- DMUB_FW_VERSION(7, 0, 35)) {
+ DMUB_FW_VERSION(7, 0, 35)) {
dc->debug.force_disable_subvp = true;
dc->debug.disable_fpo_optimizations = true;
}
}
}
-static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
- int opp_cnt)
-{
- bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
- int flow_ctrl_cnt;
-
- if (opp_cnt >= 2)
- hblank_halved = true;
-
- flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
- stream->timing.h_border_left -
- stream->timing.h_border_right;
-
- if (hblank_halved)
- flow_ctrl_cnt /= 2;
-
- /* ODM combine 4:1 case */
- if (opp_cnt == 4)
- flow_ctrl_cnt /= 2;
-
- return flow_ctrl_cnt;
-}
-
static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
@@ -1015,7 +989,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
if (enable) {
struct dsc_config dsc_cfg;
- struct dsc_optc_config dsc_optc_cfg;
+ struct dsc_optc_config dsc_optc_cfg = {0};
enum optc_dsc_mode optc_dsc_mode;
/* Enable DSC hw block */
@@ -1103,10 +1077,6 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
struct pipe_ctx *odm_pipe;
int opp_cnt = 0;
int opp_inst[MAX_PIPES] = {0};
- bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing));
- struct mpc_dwb_flow_control flow_control;
- struct mpc *mpc = dc->res_pool->mpc;
- int i;
opp_cnt = get_odm_config(pipe_ctx, opp_inst);
@@ -1119,20 +1089,6 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
- flow_control.flow_ctrl_mode = 0;
- flow_control.flow_ctrl_cnt0 = 0x80;
- flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt);
- if (mpc->funcs->set_out_rate_control) {
- for (i = 0; i < opp_cnt; ++i) {
- mpc->funcs->set_out_rate_control(
- mpc, opp_inst[i],
- true,
- rate_control_2x_pclk,
- &flow_control);
- }
- }
-
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
@@ -1586,7 +1542,7 @@ void dcn32_init_blank(
struct output_pixel_processor *opp = NULL;
struct output_pixel_processor *bottom_opp = NULL;
uint32_t num_opps, opp_id_src0, opp_id_src1;
- uint32_t otg_active_width, otg_active_height;
+ uint32_t otg_active_width = 0, otg_active_height = 0;
uint32_t i;
/* program opp dpg blank color */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 4b92df23ff0d..5295f52e4fc8 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -349,7 +349,7 @@ void dcn35_init_hw(struct dc *dc)
if (dc->ctx->dmub_srv) {
dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
- dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
+ dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
}
if (dc->res_pool->pg_cntl) {
@@ -358,29 +358,6 @@ void dcn35_init_hw(struct dc *dc)
}
}
-static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
- int opp_cnt)
-{
- bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
- int flow_ctrl_cnt;
-
- if (opp_cnt >= 2)
- hblank_halved = true;
-
- flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
- stream->timing.h_border_left -
- stream->timing.h_border_right;
-
- if (hblank_halved)
- flow_ctrl_cnt /= 2;
-
- /* ODM combine 4:1 case */
- if (opp_cnt == 4)
- flow_ctrl_cnt /= 2;
-
- return flow_ctrl_cnt;
-}
-
static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
@@ -396,7 +373,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
if (enable) {
struct dsc_config dsc_cfg;
- struct dsc_optc_config dsc_optc_cfg;
+ struct dsc_optc_config dsc_optc_cfg = {0};
enum optc_dsc_mode optc_dsc_mode;
/* Enable DSC hw block */
@@ -474,10 +451,6 @@ void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
struct pipe_ctx *odm_pipe;
int opp_cnt = 0;
int opp_inst[MAX_PIPES] = {0};
- bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing));
- struct mpc_dwb_flow_control flow_control;
- struct mpc *mpc = dc->res_pool->mpc;
- int i;
opp_cnt = get_odm_config(pipe_ctx, opp_inst);
@@ -490,20 +463,6 @@ void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
- flow_control.flow_ctrl_mode = 0;
- flow_control.flow_ctrl_cnt0 = 0x80;
- flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt);
- if (mpc->funcs->set_out_rate_control) {
- for (i = 0; i < opp_cnt; ++i) {
- mpc->funcs->set_out_rate_control(
- mpc, opp_inst[i],
- true,
- rate_control_2x_pclk,
- &flow_control);
- }
- }
-
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
@@ -536,6 +495,17 @@ void dcn35_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst,
}
}
+void dcn35_dpstream_root_clock_control(struct dce_hwseq *hws, unsigned int dp_hpo_inst, bool clock_on)
+{
+ if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpstream)
+ return;
+
+ if (hws->ctx->dc->res_pool->dccg->funcs->set_dpstreamclk_root_clock_gating) {
+ hws->ctx->dc->res_pool->dccg->funcs->set_dpstreamclk_root_clock_gating(
+ hws->ctx->dc->res_pool->dccg, dp_hpo_inst, clock_on);
+ }
+}
+
void dcn35_dsc_pg_control(
struct dce_hwseq *hws,
unsigned int dsc_inst,
@@ -679,22 +649,43 @@ void dcn35_power_down_on_boot(struct dc *dc)
bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
{
- struct dc_link *edp_links[MAX_NUM_EDP];
- int i, edp_num;
if (dc->debug.dmcub_emulation)
return true;
if (enable) {
- dc_get_edp_links(dc, edp_links, &edp_num);
- if (edp_num == 0 || edp_num > 1)
- return false;
+ uint32_t num_active_edp = 0;
+ int i;
for (i = 0; i < dc->current_state->stream_count; ++i) {
struct dc_stream_state *stream = dc->current_state->streams[i];
+ struct dc_link *link = stream->link;
+ bool is_psr = link && !link->panel_config.psr.disable_psr &&
+ (link->psr_settings.psr_version == DC_PSR_VERSION_1 ||
+ link->psr_settings.psr_version == DC_PSR_VERSION_SU_1);
+ bool is_replay = link && link->replay_settings.replay_feature_enabled;
+
+ /* Ignore streams that disabled. */
+ if (stream->dpms_off)
+ continue;
+
+ /* Active external displays block idle optimizations. */
+ if (!dc_is_embedded_signal(stream->signal))
+ return false;
+
+ /* If not PWRSEQ0 can't enter idle optimizations */
+ if (link && link->link_index != 0)
+ return false;
- if (!stream->dpms_off && !dc_is_embedded_signal(stream->signal))
+ /* Check for panel power features required for idle optimizations. */
+ if (!is_psr && !is_replay)
return false;
+
+ num_active_edp += 1;
}
+
+ /* If more than one active eDP then disallow. */
+ if (num_active_edp > 1)
+ return false;
}
// TODO: review other cases when idle optimization is allowed
@@ -720,6 +711,7 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
struct hubbub *hubbub = dc->res_pool->hubbub;
struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
bool can_apply_seamless_boot = false;
+ bool tg_enabled[MAX_PIPES] = {false};
for (i = 0; i < context->stream_count; i++) {
if (context->streams[i]->apply_seamless_boot_optimization) {
@@ -801,6 +793,7 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
// requesting data while in PSR.
tg->funcs->tg_init(tg);
hubp->power_gated = true;
+ tg_enabled[i] = true;
continue;
}
@@ -842,6 +835,20 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
tg->funcs->tg_init(tg);
}
+ /* Clean up MPC tree */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (tg_enabled[i]) {
+ if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) {
+ if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) {
+ int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id;
+
+ if ((bot_id < MAX_MPCC) && (bot_id < MAX_PIPES) && (!tg_enabled[bot_id]))
+ dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
+ }
+ }
+ }
+ }
+
if (pg_cntl != NULL) {
if (pg_cntl->funcs->dsc_pg_control != NULL) {
uint32_t num_opps = 0;
@@ -1002,6 +1009,9 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
if (!hpo_frl_stream_enc_acquired && !hpo_dp_stream_enc_acquired)
update_state->pg_res_update[PG_HPO] = true;
+ if (hpo_frl_stream_enc_acquired)
+ update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true;
+
update_state->pg_res_update[PG_DWB] = true;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1019,8 +1029,7 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
if (pipe_ctx->plane_res.dpp)
update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->plane_res.hubp->inst] = false;
- if ((pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp) &&
- pipe_ctx->plane_res.mpcc_inst >= 0)
+ if (pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp)
update_state->pg_pipe_res_update[PG_MPCC][pipe_ctx->plane_res.mpcc_inst] = false;
if (pipe_ctx->stream_res.dsc)
@@ -1028,6 +1037,9 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
if (pipe_ctx->stream_res.opp)
update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false;
+
+ if (pipe_ctx->stream_res.hpo_dp_stream_enc)
+ update_state->pg_pipe_res_update[PG_DPSTREAM][pipe_ctx->stream_res.hpo_dp_stream_enc->inst] = false;
}
/*domain24 controls all the otg, mpc, opp, as long as one otg is still up, avoid enabling OTG PG*/
for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
@@ -1085,6 +1097,9 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
if (j == PG_OPTC && new_pipe->stream_res.tg)
update_state->pg_pipe_res_update[j][new_pipe->stream_res.tg->inst] = true;
+
+ if (j == PG_DPSTREAM && new_pipe->stream_res.hpo_dp_stream_enc)
+ update_state->pg_pipe_res_update[j][new_pipe->stream_res.hpo_dp_stream_enc->inst] = true;
}
} else if (cur_pipe->plane_state == new_pipe->plane_state ||
cur_pipe == new_pipe) {
@@ -1114,6 +1129,11 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
cur_pipe->stream_res.tg != new_pipe->stream_res.tg &&
new_pipe->stream_res.tg)
update_state->pg_pipe_res_update[j][new_pipe->stream_res.tg->inst] = true;
+
+ if (j == PG_DPSTREAM &&
+ cur_pipe->stream_res.hpo_dp_stream_enc != new_pipe->stream_res.hpo_dp_stream_enc &&
+ new_pipe->stream_res.hpo_dp_stream_enc)
+ update_state->pg_pipe_res_update[j][new_pipe->stream_res.hpo_dp_stream_enc->inst] = true;
}
}
}
@@ -1129,6 +1149,9 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
if (hpo_frl_stream_enc_acquired || hpo_dp_stream_enc_acquired)
update_state->pg_res_update[PG_HPO] = true;
+ if (hpo_frl_stream_enc_acquired)
+ update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true;
+
}
/**
@@ -1253,14 +1276,19 @@ void dcn35_root_clock_control(struct dc *dc,
if (!pg_cntl)
return;
/*enable root clock first when power up*/
- if (power_on)
+ if (power_on) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
update_state->pg_pipe_res_update[PG_DPP][i]) {
if (dc->hwseq->funcs.dpp_root_clock_control)
dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
}
+ if (update_state->pg_pipe_res_update[PG_DPSTREAM][i])
+ if (dc->hwseq->funcs.dpstream_root_clock_control)
+ dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on);
}
+
+ }
for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
if (update_state->pg_pipe_res_update[PG_DSC][i]) {
if (power_on) {
@@ -1273,14 +1301,19 @@ void dcn35_root_clock_control(struct dc *dc,
}
}
/*disable root clock first when power down*/
- if (!power_on)
+ if (!power_on) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
update_state->pg_pipe_res_update[PG_DPP][i]) {
if (dc->hwseq->funcs.dpp_root_clock_control)
dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
}
+ if (update_state->pg_pipe_res_update[PG_DPSTREAM][i])
+ if (dc->hwseq->funcs.dpstream_root_clock_control)
+ dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on);
}
+
+ }
}
void dcn35_prepare_bandwidth(
@@ -1321,22 +1354,6 @@ void dcn35_optimize_bandwidth(
}
}
-void dcn35_set_idle_state(const struct dc *dc, bool allow_idle)
-{
- // TODO: Find a more suitable communcation
- if (dc->clk_mgr->funcs->set_idle_state)
- dc->clk_mgr->funcs->set_idle_state(dc->clk_mgr, allow_idle);
-}
-
-uint32_t dcn35_get_idle_state(const struct dc *dc)
-{
- // TODO: Find a more suitable communcation
- if (dc->clk_mgr->funcs->get_idle_state)
- return dc->clk_mgr->funcs->get_idle_state(dc->clk_mgr);
-
- return 0;
-}
-
void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
int num_pipes, struct dc_crtc_timing_adjust adjust)
{
@@ -1394,3 +1411,31 @@ void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx,
set_static_screen_control(pipe_ctx[i]->stream_res.tg,
triggers, params->num_frames);
}
+
+void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx,
+ int num_pipes, uint32_t v_total_min, uint32_t v_total_max)
+{
+ int i = 0;
+ struct long_vtotal_params params = {0};
+
+ params.vertical_total_max = v_total_max;
+ params.vertical_total_min = v_total_min;
+
+ for (i = 0; i < num_pipes; i++) {
+ if (!pipe_ctx[i])
+ continue;
+
+ if (pipe_ctx[i]->stream) {
+ struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing;
+
+ if (timing)
+ params.vertical_blank_start = timing->v_total - timing->v_front_porch;
+ else
+ params.vertical_blank_start = 0;
+
+ if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs &&
+ pipe_ctx[i]->stream_res.tg->funcs->set_long_vtotal)
+ pipe_ctx[i]->stream_res.tg->funcs->set_long_vtotal(pipe_ctx[i]->stream_res.tg, &params);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
index c354efa6c1b2..a731c8880d60 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
@@ -37,6 +37,8 @@ void dcn35_dsc_pg_control(struct dce_hwseq *hws, unsigned int dsc_inst, bool pow
void dcn35_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on);
+void dcn35_dpstream_root_clock_control(struct dce_hwseq *hws, unsigned int dp_hpo_inst, bool clock_on);
+
void dcn35_enable_power_gating_plane(struct dce_hwseq *hws, bool enable);
void dcn35_set_dmu_fgcg(struct dce_hwseq *hws, bool enable);
@@ -84,13 +86,13 @@ void dcn35_dsc_pg_control(
unsigned int dsc_inst,
bool power_on);
-void dcn35_set_idle_state(const struct dc *dc, bool allow_idle);
-uint32_t dcn35_get_idle_state(const struct dc *dc);
-
void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
int num_pipes, struct dc_crtc_timing_adjust adjust);
void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx,
int num_pipes, const struct dc_static_screen_params *params);
+void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx,
+ int num_pipes, uint32_t v_total_min, uint32_t v_total_max);
+
#endif /* __DC_HWSS_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index a93073055e7b..df3bf77f3fb4 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -121,8 +121,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.hw_block_power_up = dcn35_hw_block_power_up,
.hw_block_power_down = dcn35_hw_block_power_down,
.root_clock_control = dcn35_root_clock_control,
- .set_idle_state = dcn35_set_idle_state,
- .get_idle_state = dcn35_get_idle_state
+ .set_long_vtotal = dcn35_set_long_vblank,
};
static const struct hwseq_private_funcs dcn35_private_funcs = {
@@ -148,6 +147,7 @@ static const struct hwseq_private_funcs dcn35_private_funcs = {
//.hubp_pg_control = dcn35_hubp_pg_control,
.enable_power_gating_plane = dcn35_enable_power_gating_plane,
.dpp_root_clock_control = dcn35_dpp_root_clock_control,
+ .dpstream_root_clock_control = dcn35_dpstream_root_clock_control,
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
.update_odm = dcn35_update_odm,
.set_hdr_multiplier = dcn10_set_hdr_multiplier,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile
index b24ad27fe6ef..a4b3c1e99ec6 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile
@@ -1,16 +1,27 @@
#
-# (c) Copyright 2022 Advanced Micro Devices, Inc. All the rights reserved
+# Copyright (c) 2022-2024 Advanced Micro Devices, Inc.
#
-# All rights reserved. This notice is intended as a precaution against
-# inadvertent publication and does not imply publication or any waiver
-# of confidentiality. The year included in the foregoing notice is the
-# year of creation of the work.
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
#
-# Authors: AMD
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
#
# Makefile for DCN351.
-DCN351 = dcn351_init.o
+DCN351 = dcn351_hwseq.o dcn351_init.o
AMD_DAL_DCN351 = $(addprefix $(AMDDALPATH)/dc/dcn351/,$(DCN351))
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c
new file mode 100644
index 000000000000..93fe5b262a3d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "resource.h"
+#include "dcn351_hwseq.h"
+#include "dcn35/dcn35_hwseq.h"
+
+#define DC_LOGGER_INIT(logger) \
+ struct dal_logger *dc_logger = logger
+
+#define DC_LOGGER \
+ dc_logger
+
+void dcn351_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
+ struct pg_block_update *update_state)
+{
+ int i, j;
+
+ dcn35_calc_blocks_to_gate(dc, context, update_state);
+
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ if (!update_state->pg_pipe_res_update[PG_HUBP][i] &&
+ !update_state->pg_pipe_res_update[PG_DPP][i]) {
+ for (j = i - 1; j >= 0; j--) {
+ update_state->pg_pipe_res_update[PG_HUBP][j] = false;
+ update_state->pg_pipe_res_update[PG_DPP][j] = false;
+ }
+
+ break;
+ }
+ }
+}
+
+void dcn351_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
+ struct pg_block_update *update_state)
+{
+ int i, j;
+
+ dcn35_calc_blocks_to_ungate(dc, context, update_state);
+
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
+ update_state->pg_pipe_res_update[PG_DPP][i]) {
+ for (j = i - 1; j >= 0; j--) {
+ update_state->pg_pipe_res_update[PG_HUBP][j] = true;
+ update_state->pg_pipe_res_update[PG_DPP][j] = true;
+ }
+
+ break;
+ }
+ }
+}
+
+/**
+ * dcn351_hw_block_power_down() - power down sequence
+ *
+ * The following sequence describes the ON-OFF (ONO) for power down:
+ *
+ * ONO Region 11, DCPG 19: dsc3
+ * ONO Region 10, DCPG 3: dchubp3, dpp3
+ * ONO Region 9, DCPG 18: dsc2
+ * ONO Region 8, DCPG 2: dchubp2, dpp2
+ * ONO Region 7, DCPG 17: dsc1
+ * ONO Region 6, DCPG 1: dchubp1, dpp1
+ * ONO Region 5, DCPG 16: dsc0
+ * ONO Region 4, DCPG 0: dchubp0, dpp0
+ * ONO Region 3, DCPG 25: hpo - SKIPPED. Should be kept on
+ * ONO Region 2, DCPG 24: mpc opp optc dwb
+ * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will pwr dwn at IPS2 entry
+ * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED. will be pwr dwn after lono timer is armed
+ *
+ * @dc: Current DC state
+ * @update_state: update PG sequence states for HW block
+ */
+void dcn351_hw_block_power_down(struct dc *dc,
+ struct pg_block_update *update_state)
+{
+ int i = 0;
+ struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
+
+ if (!pg_cntl || dc->debug.ignore_pg)
+ return;
+
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ if (update_state->pg_pipe_res_update[PG_DSC][i]) {
+ if (pg_cntl->funcs->dsc_pg_control)
+ pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false);
+ }
+
+ if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
+ update_state->pg_pipe_res_update[PG_DPP][i]) {
+ if (pg_cntl->funcs->hubp_dpp_pg_control)
+ pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false);
+ }
+ }
+
+ // domain25 currently always on.
+
+ /* this will need all the clients to unregister optc interrupts, let dmubfw handle this */
+ if (pg_cntl->funcs->plane_otg_pg_control)
+ pg_cntl->funcs->plane_otg_pg_control(pg_cntl, false);
+
+ // domain23 currently always on.
+ // domain22 currently always on.
+}
+
+/**
+ * dcn351_hw_block_power_up() - power up sequence
+ *
+ * The following sequence describes the ON-OFF (ONO) for power up:
+ *
+ * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED
+ * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will power up at IPS2 exit
+ * ONO Region 2, DCPG 24: mpc opp optc dwb
+ * ONO Region 3, DCPG 25: hpo - SKIPPED
+ * ONO Region 4, DCPG 0: dchubp0, dpp0
+ * ONO Region 5, DCPG 16: dsc0
+ * ONO Region 6, DCPG 1: dchubp1, dpp1
+ * ONO Region 7, DCPG 17: dsc1
+ * ONO Region 8, DCPG 2: dchubp2, dpp2
+ * ONO Region 9, DCPG 18: dsc2
+ * ONO Region 10, DCPG 3: dchubp3, dpp3
+ * ONO Region 11, DCPG 19: dsc3
+ *
+ * @dc: Current DC state
+ * @update_state: update PG sequence states for HW block
+ */
+void dcn351_hw_block_power_up(struct dc *dc,
+ struct pg_block_update *update_state)
+{
+ int i = 0;
+ struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
+
+ if (!pg_cntl || dc->debug.ignore_pg)
+ return;
+
+ // domain22 currently always on.
+ // domain23 currently always on.
+
+ /* this will need all the clients to unregister optc interrupts, let dmubfw handle this */
+ if (pg_cntl->funcs->plane_otg_pg_control)
+ pg_cntl->funcs->plane_otg_pg_control(pg_cntl, true);
+
+ // domain25 currently always on.
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
+ update_state->pg_pipe_res_update[PG_DPP][i]) {
+ if (pg_cntl->funcs->hubp_dpp_pg_control)
+ pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, true);
+ }
+
+ if (update_state->pg_pipe_res_update[PG_DSC][i]) {
+ if (pg_cntl->funcs->dsc_pg_control)
+ pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.h
index 3341ef71009b..6d8f3bfb668e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright 2023 Advanced Micro Devices, Inc.
+ * Copyright 2024 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -24,30 +24,18 @@
*
*/
-#include "core_types.h"
-#include "dcn35_dpp.h"
-#include "reg_helper.h"
+#ifndef __DC_HWSS_DCN351_H__
+#define __DC_HWSS_DCN351_H__
-#define REG(reg) dpp->tf_regs->reg
+#include "hw_sequencer_private.h"
-#define CTX dpp->base.ctx
+void dcn351_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
+ struct pg_block_update *update_state);
+void dcn351_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
+ struct pg_block_update *update_state);
+void dcn351_hw_block_power_up(struct dc *dc,
+ struct pg_block_update *update_state);
+void dcn351_hw_block_power_down(struct dc *dc,
+ struct pg_block_update *update_state);
-#undef FN
-#define FN(reg_name, field_name) \
- ((const struct dcn35_dpp_shift *)(dpp->tf_shift))->field_name, \
- ((const struct dcn35_dpp_mask *)(dpp->tf_mask))->field_name
-
-bool dpp35_construct(struct dcn3_dpp *dpp, struct dc_context *ctx,
- uint32_t inst, const struct dcn3_dpp_registers *tf_regs,
- const struct dcn35_dpp_shift *tf_shift,
- const struct dcn35_dpp_mask *tf_mask)
-{
- return dpp32_construct(dpp, ctx, inst, tf_regs,
- (const struct dcn3_dpp_shift *)(tf_shift),
- (const struct dcn3_dpp_mask *)(tf_mask));
-}
-
-void dpp35_set_fgcg(struct dcn3_dpp *dpp, bool enable)
-{
- REG_UPDATE(DPP_CONTROL, DPP_FGCG_REP_DIS, !enable);
-}
+#endif /* __DC_HWSS_DCN351_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
index ab17fa1c64e8..a53092cd619b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -32,6 +32,7 @@
#include "dcn31/dcn31_hwseq.h"
#include "dcn32/dcn32_hwseq.h"
#include "dcn35/dcn35_hwseq.h"
+#include "dcn351/dcn351_hwseq.h"
#include "dcn351_init.h"
@@ -67,7 +68,7 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.prepare_bandwidth = dcn35_prepare_bandwidth,
.optimize_bandwidth = dcn35_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
- .set_drr = dcn10_set_drr,
+ .set_drr = dcn35_set_drr,
.get_position = dcn10_get_position,
.set_static_screen_control = dcn35_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
@@ -120,8 +121,6 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.hw_block_power_up = dcn35_hw_block_power_up,
.hw_block_power_down = dcn35_hw_block_power_down,
.root_clock_control = dcn35_root_clock_control,
- .set_idle_state = dcn35_set_idle_state,
- .get_idle_state = dcn35_get_idle_state
};
static const struct hwseq_private_funcs dcn351_private_funcs = {
@@ -147,6 +146,7 @@ static const struct hwseq_private_funcs dcn351_private_funcs = {
//.hubp_pg_control = dcn35_hubp_pg_control,
.enable_power_gating_plane = dcn35_enable_power_gating_plane,
.dpp_root_clock_control = dcn35_dpp_root_clock_control,
+ .dpstream_root_clock_control = dcn35_dpstream_root_clock_control,
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
.update_odm = dcn35_update_odm,
.set_hdr_multiplier = dcn10_set_hdr_multiplier,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index f89f205e42a1..7c339e7e7117 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -377,7 +377,10 @@ struct hw_sequencer_funcs {
/* Idle Optimization Related */
bool (*apply_idle_power_optimizations)(struct dc *dc, bool enable);
- bool (*does_plane_fit_in_mall)(struct dc *dc, struct dc_plane_state *plane,
+ bool (*does_plane_fit_in_mall)(struct dc *dc,
+ unsigned int pitch,
+ unsigned int height,
+ enum surface_pixel_format format,
struct dc_cursor_attributes *cursor_attr);
void (*commit_subvp_config)(struct dc *dc, struct dc_state *context);
void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context);
@@ -424,11 +427,10 @@ struct hw_sequencer_funcs {
struct pg_block_update *update_state);
void (*root_clock_control)(struct dc *dc,
struct pg_block_update *update_state, bool power_on);
- void (*set_idle_state)(const struct dc *dc, bool allow_idle);
- uint32_t (*get_idle_state)(const struct dc *dc);
bool (*is_pipe_topology_transition_seamless)(struct dc *dc,
const struct dc_state *cur_ctx,
const struct dc_state *new_ctx);
+ void (*set_long_vtotal)(struct pipe_ctx **pipe_ctx, int num_pipes, uint32_t v_total_min, uint32_t v_total_max);
};
void color_space_to_black_color(
@@ -478,9 +480,10 @@ void hwss_build_fast_sequence(struct dc *dc,
struct dc_dmub_cmd *dc_dmub_cmd,
unsigned int dmub_cmd_count,
struct block_sequence block_sequence[],
- int *num_steps,
+ unsigned int *num_steps,
struct pipe_ctx *pipe_ctx,
- struct dc_stream_status *stream_status);
+ struct dc_stream_status *stream_status,
+ struct dc_state *context);
void hwss_send_dmcub_cmd(union block_sequence_params *params);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
index 554cfab5ab24..341219cf4144 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
@@ -120,6 +120,10 @@ struct hwseq_private_funcs {
struct dce_hwseq *hws,
unsigned int dpp_inst,
bool clock_on);
+ void (*dpstream_root_clock_control)(
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool clock_on);
void (*dpp_pg_control)(struct dce_hwseq *hws,
unsigned int dpp_inst,
bool power_on);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index b1b72e688f74..028b2f971e36 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -90,6 +90,9 @@ struct resource_funcs {
void (*update_soc_for_wm_a)(
struct dc *dc, struct dc_state *context);
+ unsigned int (*calculate_mall_ways_from_bytes)(
+ const struct dc *dc,
+ unsigned int total_size_in_mall_bytes);
/**
* @populate_dml_pipes - Populate pipe data struct
*
@@ -336,7 +339,9 @@ struct stream_resource {
};
struct plane_resource {
+ /* scl_data is scratch space required to program a plane */
struct scaler_data scl_data;
+ /* Below pointers to hw objects are required to enable the plane */
struct hubp *hubp;
struct mem_input *mi;
struct input_pixel_processor *ipp;
@@ -496,7 +501,7 @@ struct dcn_bw_writeback {
struct dcn_bw_output {
struct dc_clocks clk;
- struct dcn_watermark_set watermarks;
+ union dcn_watermark_set watermarks;
struct dcn_bw_writeback bw_writeback;
int compbuf_size_kb;
unsigned int mall_ss_size_bytes;
@@ -515,6 +520,7 @@ struct bw_context {
union bw_output bw;
struct display_mode_lib dml;
struct dml2_context *dml2;
+ struct dml2_context *dml2_dc_power_source;
};
struct dc_dmub_cmd {
@@ -522,25 +528,6 @@ struct dc_dmub_cmd {
enum dm_dmub_wait_type wait_type;
};
-struct dc_scratch_space {
- /* used to temporarily backup plane states of a stream during
- * dc update. The reason is that plane states are overwritten
- * with surface updates in dc update. Once they are overwritten
- * current state is no longer valid. We want to temporarily
- * store current value in plane states so we can still recover
- * a valid current state during dc update.
- */
- struct dc_plane_state plane_states[MAX_SURFACE_NUM];
- struct dc_gamma gamma_correction[MAX_SURFACE_NUM];
- struct dc_transfer_func in_transfer_func[MAX_SURFACE_NUM];
- struct dc_3dlut lut3d_func[MAX_SURFACE_NUM];
- struct dc_transfer_func in_shaper_func[MAX_SURFACE_NUM];
- struct dc_transfer_func blend_tf[MAX_SURFACE_NUM];
-
- struct dc_stream_state stream_state;
- struct dc_transfer_func out_transfer_func;
-};
-
/**
* struct dc_state - The full description of a state requested by users
*/
@@ -623,8 +610,7 @@ struct dc_state {
unsigned int stutter_period_us;
} perf_params;
-
- struct dc_scratch_space scratch;
+ enum dc_power_source_type power_source;
};
struct replay_context {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 9e4ddc985240..55529c5f471c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -31,7 +31,7 @@
#define __DCN_CALCS_H__
#include "bw_fixed.h"
-#include "../dml/display_mode_lib.h"
+#include "dml/display_mode_lib.h"
struct dc;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 17e014d3bdc8..4f7480f60c85 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -281,8 +281,6 @@ struct clk_mgr_funcs {
void (*set_low_power_state)(struct clk_mgr *clk_mgr);
void (*exit_low_power_state)(struct clk_mgr *clk_mgr);
bool (*is_ips_supported)(struct clk_mgr *clk_mgr);
- void (*set_idle_state)(struct clk_mgr *clk_mgr, bool allow_idle);
- uint32_t (*get_idle_state)(struct clk_mgr *clk_mgr);
void (*init_clocks)(struct clk_mgr *clk_mgr);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index f4d4a68c91dc..4ba18ea57aad 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -349,7 +349,7 @@ struct clk_mgr_internal {
enum dm_pp_clocks_state cur_min_clks_state;
bool periodic_retraining_disabled;
- unsigned int cur_phyclk_req_table[MAX_PIPES * 2];
+ unsigned int cur_phyclk_req_table[MAX_LINKS];
bool smu_present;
void *wm_range_table;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index b9a06bf84cc9..d4c7885fc916 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -59,6 +59,7 @@ enum dentist_dispclk_change_mode {
struct dp_dto_params {
int otg_inst;
enum signal_type signal;
+ enum streamclk_source clk_src;
uint64_t pixclk_hz;
uint64_t refclk_hz;
};
@@ -105,6 +106,10 @@ struct dccg_funcs {
void (*otg_drop_pixel)(struct dccg *dccg,
uint32_t otg_inst);
void (*dccg_init)(struct dccg *dccg);
+ void (*set_dpstreamclk_root_clock_gating)(
+ struct dccg *dccg,
+ int dp_hpo_inst,
+ bool enable);
void (*set_dpstreamclk)(
struct dccg *dccg,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 2ae7484d18af..305fdc127bfc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -160,7 +160,7 @@ struct hubbub_funcs {
bool (*program_watermarks)(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 0f24afbf4388..ca8de345d039 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -322,7 +322,7 @@ struct dpp_funcs {
const struct pwl_params *params);
bool (*dpp_program_3dlut)(
struct dpp *dpp,
- struct tetrahedral_params *params);
+ const struct tetrahedral_params *params);
void (*dpp_cnv_set_alpha_keyer)(
struct dpp *dpp_base,
struct cnv_color_keyer_params *color_keyer);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
index 729ca0064e94..063efc8128a7 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
@@ -147,9 +147,10 @@ struct dwb_caps {
unsigned int support_ogam :1;
unsigned int support_wbscl :1;
unsigned int support_ocsc :1;
- unsigned int support_stereo :1;
+ unsigned int support_stereo :1;
+ unsigned int support_4k_120p :1;
} caps;
- unsigned int reserved2[9]; /* Reserved for future use, MUST BE 0. */
+ unsigned int reserved2[10]; /* Reserved for future use, MUST BE 0. */
};
struct dwbc {
@@ -166,8 +167,9 @@ struct dwbc {
bool dwb_is_drc;
int wb_src_plane_inst;/*hubp, mpcc, inst*/
uint32_t mask_id;
- int otg_inst;
- bool mvc_cfg;
+ int otg_inst;
+ bool mvc_cfg;
+ struct dc_dwb_params params;
};
struct dwbc_funcs {
@@ -192,6 +194,10 @@ struct dwbc_funcs {
struct dwbc *dwbc,
enum dwb_frame_capture_enable enable);
+ void (*dwb_set_scaler)(
+ struct dwbc *dwbc,
+ struct dc_dwb_params *params);
+
void (*set_stereo)(
struct dwbc *dwbc,
struct dwb_stereo_params *stereo_params);
@@ -205,9 +211,11 @@ struct dwbc_funcs {
struct dwbc *dwbc,
struct dwb_warmup_params *warmup_params);
-
+ bool (*dwb_get_mcifbuf_line)(
+ struct dwbc *dwbc, unsigned int *buf_idx,
+ unsigned int *cur_line,
+ unsigned int *over_run);
#if defined(CONFIG_DRM_AMD_DC_FP)
-
void (*dwb_program_output_csc)(
struct dwbc *dwbc,
enum dc_color_space color_space,
@@ -216,17 +224,17 @@ struct dwbc_funcs {
bool (*dwb_ogam_set_output_transfer_func)(
struct dwbc *dwbc,
const struct dc_transfer_func *in_transfer_func_dwb_ogam);
-
+#endif
//TODO: merge with output_transfer_func?
bool (*dwb_ogam_set_input_transfer_func)(
struct dwbc *dwbc,
const struct dc_transfer_func *in_transfer_func_dwb_ogam);
-#endif
+
+ void (*get_drr_time_stamp)(
+ struct dwbc *dwbc, uint32_t *time_stamp);
+
bool (*get_dwb_status)(
struct dwbc *dwbc);
- void (*dwb_set_scaler)(
- struct dwbc *dwbc,
- struct dc_dwb_params *params);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index dcae23faeee3..c80ebb407add 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -44,10 +44,11 @@
*/
#define MAX_PIPES 6
#define MAX_PHANTOM_PIPES (MAX_PIPES / 2)
+#define MAX_LINKS (MAX_PIPES * 2)
#define MAX_DIG_LINK_ENCODERS 7
#define MAX_DWB_PIPES 1
#define MAX_HPO_DP2_ENCODERS 4
-#define MAX_HPO_DP2_LINK_ENCODERS 2
+#define MAX_HPO_DP2_LINK_ENCODERS 4
struct gamma_curve {
uint32_t offset;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index dbe7afa9d3a2..af9183f5d69b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -163,12 +163,11 @@ struct link_encoder_funcs {
enum signal_type (*get_dig_mode)(
struct link_encoder *enc);
+
void (*set_dio_phy_mux)(
struct link_encoder *enc,
enum encoder_type_select sel,
uint32_t hpo_inst);
- void (*set_dig_output_mode)(
- struct link_encoder *enc, uint8_t pix_per_container);
};
/*
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index b72fb314d804..86c12cd6f47d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -50,11 +50,13 @@ struct dcn_watermarks {
uint32_t usr_retraining_ns;
};
-struct dcn_watermark_set {
- struct dcn_watermarks a;
- struct dcn_watermarks b;
- struct dcn_watermarks c;
- struct dcn_watermarks d;
+union dcn_watermark_set {
+ struct {
+ struct dcn_watermarks a;
+ struct dcn_watermarks b;
+ struct dcn_watermarks c;
+ struct dcn_watermarks d;
+ }; // legacy
};
struct dce_watermarks {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
index 9a8bf6ec70ea..8d32e525f05a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
@@ -93,6 +93,8 @@ struct dcn_otg_state {
uint32_t vertical_interrupt1_line;
uint32_t vertical_interrupt2_en;
uint32_t vertical_interrupt2_line;
+ uint32_t otg_master_update_lock;
+ uint32_t otg_double_buffer_control;
};
void optc1_read_otg_state(struct optc *optc1, struct dcn_otg_state *s);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index a15efadb9183..75b9ec21f297 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -178,10 +178,6 @@ struct stream_encoder_funcs {
void (*stop_dp_info_packets)(
struct stream_encoder *enc);
- void (*reset_fifo)(
- struct stream_encoder *enc
- );
-
void (*dp_blank)(
struct dc_link *link,
struct stream_encoder *enc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index ffad8fe16c54..cd68ecc242c1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -64,6 +64,12 @@ struct drr_params {
bool immediate_flip;
};
+struct long_vtotal_params {
+ uint32_t vertical_total_min;
+ uint32_t vertical_total_max;
+ uint32_t vertical_blank_start;
+};
+
#define LEFT_EYE_3D_PRIMARY_SURFACE 1
#define RIGHT_EYE_3D_PRIMARY_SURFACE 0
@@ -331,6 +337,7 @@ struct timing_generator_funcs {
void (*init_odm)(struct timing_generator *tg);
void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);
+ void (*set_long_vtotal)(struct timing_generator *optc, const struct long_vtotal_params *params);
void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h
new file mode 100644
index 000000000000..51da368f5c3e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
+
+#ifndef __DC_VPG_H__
+#define __DC_VPG_H__
+
+struct dc_context;
+struct dc_info_packet;
+
+struct vpg;
+
+struct vpg_funcs {
+ void (*update_generic_info_packet)(
+ struct vpg *vpg,
+ uint32_t packet_index,
+ const struct dc_info_packet *info_packet,
+ bool immediate_update);
+
+ void (*vpg_poweron)(
+ struct vpg *vpg);
+
+ void (*vpg_powerdown)(
+ struct vpg *vpg);
+};
+
+struct vpg {
+ const struct vpg_funcs *funcs;
+ struct dc_context *ctx;
+ int inst;
+};
+
+#endif /* DC_INC_VPG_H_ */ \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
index bf29fc58ea6a..7ab8ba5e23ed 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -288,7 +288,7 @@ struct link_service {
struct dc_link *link, uint32_t coasting_vtotal);
bool (*edp_replay_residency)(const struct dc_link *link,
unsigned int *residency, const bool is_start,
- const bool is_alpm);
+ const enum pr_residency_mode mode);
bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link,
const unsigned int *power_opts, uint32_t coasting_vtotal);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 77a60aa9f27b..361ad6b16b96 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -510,6 +510,17 @@ int recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx(
/*
* Look for a free pipe in new resource context that is used as a secondary DPP
+ * pipe in current resource context.
+ * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise
+ * pipe idx of the free pipe
+ */
+int resource_find_free_pipe_used_as_cur_sec_dpp(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
+ const struct resource_pool *pool);
+
+/*
+ * Look for a free pipe in new resource context that is used as a secondary DPP
* pipe in any MPCC combine in current resource context.
* return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise
* pipe idx of the free pipe
@@ -573,13 +584,6 @@ bool get_temp_dp_link_res(struct dc_link *link,
struct link_resource *link_res,
struct dc_link_settings *link_settings);
-#if defined(CONFIG_DRM_AMD_DC_FP)
-struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt(
- const struct resource_context *res_ctx,
- const struct resource_pool *pool,
- const struct dc_link *link);
-#endif
-
void reset_syncd_pipes_from_disabled_pipes(struct dc *dc,
struct dc_state *context);
@@ -615,4 +619,10 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
struct pipe_ctx *pipe_ctx);
bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream);
+
+/* Setup dc callbacks for dml2
+ * @dc: the display core structure
+ * @dml2_options: struct to hold callbacks
+ */
+void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options);
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
index 1c0d89e675da..bb576a9c5fdb 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
@@ -211,8 +211,12 @@ bool dce110_vblank_set(struct irq_service *irq_service,
info->ext_id);
uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK;
- struct timing_generator *tg =
- dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
+ struct timing_generator *tg;
+
+ if (pipe_offset >= MAX_PIPES)
+ return false;
+
+ tg = dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
if (enable) {
if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) {
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index 22b24749c9d2..8d1a1cc94a8b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -884,7 +884,7 @@ void dp_set_preferred_link_settings(struct dc *dc,
{
int i;
struct pipe_ctx *pipe;
- struct dc_stream_state *link_stream;
+ struct dc_stream_state *link_stream = 0;
struct dc_link_settings store_settings = *link_setting;
link->preferred_link_setting = store_settings;
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c
index fbcd8fb58ea8..c8c55f196f8d 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c
@@ -24,7 +24,6 @@
*/
#include "link_dp_trace.h"
#include "link/protocols/link_dpcd.h"
-#include "link.h"
void dp_trace_init(struct dc_link *link)
{
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index b8c4a04dd175..0d523dc43d02 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -516,8 +516,8 @@ static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
static void read_current_link_settings_on_detect(struct dc_link *link)
{
union lane_count_set lane_count_set = {0};
- uint8_t link_bw_set;
- uint8_t link_rate_set;
+ uint8_t link_bw_set = 0;
+ uint8_t link_rate_set = 0;
uint32_t read_dpcd_retry_cnt = 10;
enum dc_status status = DC_ERROR_UNEXPECTED;
int i;
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index a72de44a5747..b53ad18dbfbc 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -55,6 +55,8 @@
#include "dccg.h"
#include "clk_mgr.h"
#include "atomfirmware.h"
+#include "vpg.h"
+
#define DC_LOGGER \
dc_logger
#define DC_LOGGER_INIT(logger) \
@@ -67,7 +69,6 @@
#define RETIMER_REDRIVER_INFO(...) \
DC_LOG_RETIMER_REDRIVER( \
__VA_ARGS__)
-#include "dc/dcn30/dcn30_vpg.h"
#define MAX_MTP_SLOT_COUNT 64
#define LINK_TRAINING_ATTEMPTS 4
@@ -127,7 +128,7 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init)
if (link->ep_type == DISPLAY_ENDPOINT_PHY &&
link->link_enc->funcs->get_dig_frontend &&
link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
- unsigned int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc);
+ int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc);
if (fe != ENGINE_ID_UNKNOWN)
for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
@@ -725,7 +726,7 @@ static void set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
static void enable_mst_on_sink(struct dc_link *link, bool enable)
{
- unsigned char mstmCntl;
+ unsigned char mstmCntl = 0;
core_link_read_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1);
if (enable)
@@ -803,7 +804,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
if (enable) {
struct dsc_config dsc_cfg;
- struct dsc_optc_config dsc_optc_cfg;
+ struct dsc_optc_config dsc_optc_cfg = {0};
enum optc_dsc_mode optc_dsc_mode;
/* Enable DSC hw block */
@@ -1575,7 +1576,7 @@ static bool write_128b_132b_sst_payload_allocation_table(
break;
}
} else {
- union dpcd_rev dpcdRev;
+ union dpcd_rev dpcdRev = {0};
if (core_link_read_dpcd(
link,
@@ -2119,7 +2120,7 @@ static enum dc_status enable_link_dp_mst(
struct pipe_ctx *pipe_ctx)
{
struct dc_link *link = pipe_ctx->stream->link;
- unsigned char mstm_cntl;
+ unsigned char mstm_cntl = 0;
/* sink signal type after MST branch is MST. Multiple MST sinks
* share one link. Link DP PHY is enable or training only once.
@@ -2285,6 +2286,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->sink->link;
struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
+ enum dp_panel_mode panel_mode_dp = dp_get_panel_mode(link);
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
@@ -2311,6 +2313,8 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
dc->hwss.disable_audio_stream(pipe_ctx);
+ edp_set_panel_assr(link, pipe_ctx, &panel_mode_dp, false);
+
update_psp_stream_config(pipe_ctx, true);
dc->hwss.blank_stream(pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 289f5d133342..a01d0842bf8e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -992,7 +992,7 @@ enum dp_link_encoding mst_decide_link_encoding_format(const struct dc_link *link
static void read_dp_device_vendor_id(struct dc_link *link)
{
- struct dp_device_vendor_id dp_id;
+ struct dp_device_vendor_id dp_id = {0};
/* read IEEE branch device id */
core_link_read_dpcd(
@@ -1087,7 +1087,7 @@ static void get_active_converter_info(
}
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) {
- uint8_t det_caps[16]; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/
+ uint8_t det_caps[16] = {0}; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/
union dwnstream_port_caps_byte0 *port_caps =
(union dwnstream_port_caps_byte0 *)det_caps;
if (core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0,
@@ -1172,7 +1172,7 @@ static void get_active_converter_info(
set_dongle_type(link->ddc, link->dpcd_caps.dongle_type);
{
- struct dp_sink_hw_fw_revision dp_hw_fw_revision;
+ struct dp_sink_hw_fw_revision dp_hw_fw_revision = {0};
core_link_read_dpcd(
link,
@@ -1242,7 +1242,7 @@ static void apply_usbc_combo_phy_reset_wa(struct dc_link *link,
bool dp_overwrite_extended_receiver_cap(struct dc_link *link)
{
- uint8_t dpcd_data[16];
+ uint8_t dpcd_data[16] = {0};
uint32_t read_dpcd_retry_cnt = 3;
enum dc_status status = DC_ERROR_UNEXPECTED;
union dp_downstream_port_present ds_port = { 0 };
@@ -1408,7 +1408,7 @@ static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
static void retrieve_cable_id(struct dc_link *link)
{
- union dp_cable_id usbc_cable_id;
+ union dp_cable_id usbc_cable_id = {0};
link->dpcd_caps.cable_id.raw = 0;
core_link_read_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX,
@@ -1475,7 +1475,7 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
{
- uint8_t lttpr_dpcd_data[8];
+ uint8_t lttpr_dpcd_data[8] = {0};
enum dc_status status;
bool is_lttpr_present;
@@ -1931,8 +1931,8 @@ void detect_edp_sink_caps(struct dc_link *link)
uint32_t entry;
uint32_t link_rate_in_khz;
enum dc_link_rate link_rate = LINK_RATE_UNKNOWN;
- uint8_t backlight_adj_cap;
- uint8_t general_edp_cap;
+ uint8_t backlight_adj_cap = 0;
+ uint8_t general_edp_cap = 0;
retrieve_link_cap(link);
link->dpcd_caps.edp_supported_link_rates_count = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
index 5491b707cec8..0f1c411523a2 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -166,7 +166,7 @@ static uint8_t get_lowest_dpia_index(struct dc_link *link)
uint8_t idx = 0xFF;
int i;
- for (i = 0; i < MAX_PIPES * 2; ++i) {
+ for (i = 0; i < MAX_LINKS; ++i) {
if (!dc_struct->links[i] ||
dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
@@ -196,7 +196,7 @@ static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_in
struct dc_link *link_dpia_primary, *link_dpia_secondary;
int total_bw = 0;
- for (uint8_t i = 0; i < (MAX_PIPES * 2) - 1; ++i) {
+ for (uint8_t i = 0; i < MAX_LINKS - 1; ++i) {
if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
continue;
@@ -270,7 +270,7 @@ static void set_usb4_req_bw_req(struct dc_link *link, int req_bw)
/* Error check whether requested and allocated are equal */
req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
- if (req_bw == link->dpia_bw_alloc_config.allocated_bw) {
+ if (req_bw && (req_bw == link->dpia_bw_alloc_config.allocated_bw)) {
DC_LOG_ERROR("%s: Request bw equals to allocated bw for link(%d)\n",
__func__, link->link_index);
}
@@ -341,6 +341,14 @@ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link)
ret = true;
init_usb4_bw_struct(link);
link->dpia_bw_alloc_config.bw_alloc_enabled = true;
+
+ /*
+ * During DP tunnel creation, CM preallocates BW and reduces estimated BW of other
+ * DPIA. CM release preallocation only when allocation is complete. Do zero alloc
+ * to make the CM to release preallocation and update estimated BW correctly for
+ * all DPIAs per host router
+ */
+ link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index ba69874be5a4..0fcf0b8530ac 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -120,7 +120,7 @@ bool dp_parse_link_loss_status(
static bool handle_hpd_irq_psr_sink(struct dc_link *link)
{
- union dpcd_psr_configuration psr_configuration;
+ union dpcd_psr_configuration psr_configuration = {0};
if (!link->psr_settings.psr_feature_enabled)
return false;
@@ -186,9 +186,9 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
static void handle_hpd_irq_replay_sink(struct dc_link *link)
{
- union dpcd_replay_configuration replay_configuration;
+ union dpcd_replay_configuration replay_configuration = {0};
/*AMD Replay version reuse DP_PSR_ERROR_STATUS for REPLAY_ERROR status.*/
- union psr_error_status replay_error_status;
+ union psr_error_status replay_error_status = {0};
if (!link->replay_settings.replay_feature_enabled)
return;
@@ -280,7 +280,7 @@ void dp_handle_link_loss(struct dc_link *link)
static void read_dpcd204h_on_irq_hpd(struct dc_link *link, union hpd_irq_data *irq_data)
{
enum dc_status retval;
- union lane_align_status_updated dpcd_lane_status_updated;
+ union lane_align_status_updated dpcd_lane_status_updated = {0};
retval = core_link_read_dpcd(
link,
@@ -320,7 +320,7 @@ enum dc_status dp_read_hpd_rx_irq_data(
/* Read 14 bytes in a single read and then copy only the required fields.
* This is more efficient than doing it in two separate AUX reads. */
- uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1];
+ uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1] = {0};
retval = core_link_read_dpcd(
link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index e538c67d3ed9..1818970b8eaf 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -1071,7 +1071,7 @@ enum dc_status dpcd_set_link_settings(
* MUX chip gets link rate set back before link training.
*/
if (link->connector_signal == SIGNAL_TYPE_EDP) {
- uint8_t supported_link_rates[16];
+ uint8_t supported_link_rates[16] = {0};
core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
supported_link_rates, sizeof(supported_link_rates));
@@ -1587,21 +1587,7 @@ bool perform_link_training_with_retries(
msleep(delay_dp_power_up_in_ms);
}
- if (panel_mode == DP_PANEL_MODE_EDP) {
- struct cp_psp *cp_psp = &stream->ctx->cp_psp;
-
- if (cp_psp && cp_psp->funcs.enable_assr) {
- /* ASSR is bound to fail with unsigned PSP
- * verstage used during devlopment phase.
- * Report and continue with eDP panel mode to
- * perform eDP link training with right settings
- */
- bool result;
- result = cp_psp->funcs.enable_assr(cp_psp->handle, link);
- if (!result && link->panel_mode != DP_PANEL_MODE_EDP)
- panel_mode = DP_PANEL_MODE_DEFAULT;
- }
- }
+ edp_set_panel_assr(link, pipe_ctx, &panel_mode, true);
dp_set_panel_mode(link, panel_mode);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
index 5d36bab0029c..edb21d21952a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
@@ -291,7 +291,7 @@ static enum link_training_result dpia_training_cr_non_transparent(
{
enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0;
uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
- enum dc_status status;
+ enum dc_status status = DC_ERROR_UNEXPECTED;
uint32_t retries_cr = 0; /* Number of consecutive attempts with same VS or PE. */
uint32_t retry_count = 0;
uint32_t wait_time_microsec = TRAINING_AUX_RD_INTERVAL; /* From DP spec, CR read interval is always 100us. */
@@ -617,7 +617,7 @@ static enum link_training_result dpia_training_eq_non_transparent(
enum link_training_result result = LINK_TRAINING_EQ_FAIL_EQ;
uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
uint32_t retries_eq = 0;
- enum dc_status status;
+ enum dc_status status = DC_ERROR_UNEXPECTED;
enum dc_dp_training_pattern tr_pattern;
uint32_t wait_time_microsec = 0;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
index c5de6ed5bf58..a72c898b64fa 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
@@ -130,7 +130,7 @@ static uint32_t dpcd_get_next_partition_size(const uint32_t address, const uint3
* XXX: Do not allow any two address ranges in this array to overlap
*/
static const struct dpcd_address_range mandatory_dpcd_blocks[] = {
- { DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT }};
+ { DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, DP_PHY_REPEATER_128B132B_RATES }};
/*
* extend addresses to read all mandatory blocks together
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 3baa2bdd6dd6..ad9aca790dd7 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -38,6 +38,7 @@
#include "dc/dc_dmub_srv.h"
#include "dce/dmub_replay.h"
#include "abm.h"
+#include "resource.h"
#define DC_LOGGER \
link->ctx->logger
#define DC_LOGGER_INIT(logger)
@@ -320,8 +321,8 @@ bool edp_is_ilr_optimization_required(struct dc_link *link,
struct dc_crtc_timing *crtc_timing)
{
struct dc_link_settings link_setting;
- uint8_t link_bw_set;
- uint8_t link_rate_set;
+ uint8_t link_bw_set = 0;
+ uint8_t link_rate_set = 0;
uint32_t req_bw;
union lane_count_set lane_count_set = {0};
@@ -1055,7 +1056,7 @@ bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal)
}
bool edp_replay_residency(const struct dc_link *link,
- unsigned int *residency, const bool is_start, const bool is_alpm)
+ unsigned int *residency, const bool is_start, const enum pr_residency_mode mode)
{
struct dc *dc = link->ctx->dc;
struct dmub_replay *replay = dc->res_pool->replay;
@@ -1064,8 +1065,11 @@ bool edp_replay_residency(const struct dc_link *link,
if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
return false;
+ if (!residency)
+ return false;
+
if (replay != NULL && link->replay_settings.replay_feature_enabled)
- replay->funcs->replay_residency(replay, panel_inst, residency, is_start, is_alpm);
+ replay->funcs->replay_residency(replay, panel_inst, residency, is_start, mode);
else
*residency = 0;
@@ -1145,3 +1149,66 @@ int edp_get_target_backlight_pwm(const struct dc_link *link)
return (int) abm->funcs->get_target_backlight(abm);
}
+
+static void edp_set_assr_enable(const struct dc *pDC, struct dc_link *link,
+ struct link_resource *link_res, bool enable)
+{
+ union dmub_rb_cmd cmd;
+ bool use_hpo_dp_link_enc = false;
+ uint8_t link_enc_index = 0;
+ uint8_t phy_type = 0;
+ uint8_t phy_id = 0;
+
+ if (!pDC->config.use_assr_psp_message)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ link_enc_index = link->link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+
+ if (link_res->hpo_dp_link_enc) {
+ link_enc_index = link_res->hpo_dp_link_enc->inst;
+ use_hpo_dp_link_enc = true;
+ }
+
+ if (enable)
+ phy_type = ((dp_get_panel_mode(link) == DP_PANEL_MODE_EDP) ? 1 : 0);
+
+ phy_id = resource_transmitter_to_phy_idx(pDC, link->link_enc->transmitter);
+
+ cmd.assr_enable.header.type = DMUB_CMD__PSP;
+ cmd.assr_enable.header.sub_type = DMUB_CMD__PSP_ASSR_ENABLE;
+ cmd.assr_enable.assr_data.enable = enable;
+ cmd.assr_enable.assr_data.phy_port_type = phy_type;
+ cmd.assr_enable.assr_data.phy_port_id = phy_id;
+ cmd.assr_enable.assr_data.link_enc_index = link_enc_index;
+ cmd.assr_enable.assr_data.hpo_mode = use_hpo_dp_link_enc;
+
+ dc_wake_and_execute_dmub_cmd(pDC->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+void edp_set_panel_assr(struct dc_link *link, struct pipe_ctx *pipe_ctx,
+ enum dp_panel_mode *panel_mode, bool enable)
+{
+ struct link_resource *link_res = &pipe_ctx->link_res;
+ struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
+
+ if (*panel_mode != DP_PANEL_MODE_EDP)
+ return;
+
+ if (link->dc->config.use_assr_psp_message) {
+ edp_set_assr_enable(link->dc, link, link_res, enable);
+ } else if (cp_psp && cp_psp->funcs.enable_assr && enable) {
+ /* ASSR is bound to fail with unsigned PSP
+ * verstage used during devlopment phase.
+ * Report and continue with eDP panel mode to
+ * perform eDP link training with right settings
+ */
+ bool result;
+
+ result = cp_psp->funcs.enable_assr(cp_psp->handle, link);
+
+ if (!result && link->panel_mode != DP_PANEL_MODE_EDP)
+ *panel_mode = DP_PANEL_MODE_DEFAULT;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index a158c6234d42..cb6d95cc36e4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -61,7 +61,7 @@ bool edp_send_replay_cmd(struct dc_link *link,
union dmub_replay_cmd_set *cmd_data);
bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal);
bool edp_replay_residency(const struct dc_link *link,
- unsigned int *residency, const bool is_start, const bool is_alpm);
+ unsigned int *residency, const bool is_start, const enum pr_residency_mode mode);
bool edp_get_replay_state(const struct dc_link *link, uint64_t *state);
bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
const unsigned int *power_opts, uint32_t coasting_vtotal);
@@ -76,4 +76,6 @@ bool edp_receiver_ready_T9(struct dc_link *link);
bool edp_receiver_ready_T7(struct dc_link *link);
bool edp_power_alpm_dpcd_enable(struct dc_link *link, bool enable);
void edp_set_panel_power(struct dc_link *link, bool powerOn);
+void edp_set_panel_assr(struct dc_link *link, struct pipe_ctx *pipe_ctx,
+ enum dp_panel_mode *panel_mode, bool enable);
#endif /* __DC_LINK_EDP_POWER_CONTROL_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c
index e3d729ab5b9f..caa617883f62 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c
@@ -35,7 +35,7 @@
bool link_get_hpd_state(struct dc_link *link)
{
- uint32_t state;
+ uint32_t state = 0;
dal_gpio_lock_pin(link->hpd_gpio);
dal_gpio_get_value(link->hpd_gpio, &state);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
index 0e8f4f36c87c..5574bc628053 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
@@ -945,10 +945,19 @@ void optc1_set_drr(
OTG_FORCE_LOCK_ON_EVENT, 0,
OTG_SET_V_TOTAL_MIN_MASK_EN, 0,
OTG_SET_V_TOTAL_MIN_MASK, 0);
- }
- // Setup manual flow control for EOF via TRIG_A
- optc->funcs->setup_manual_trigger(optc);
+ // Setup manual flow control for EOF via TRIG_A
+ optc->funcs->setup_manual_trigger(optc);
+
+ } else {
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_SET_V_TOTAL_MIN_MASK, 0,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_FORCE_LOCK_ON_EVENT, 0);
+
+ optc->funcs->set_vtotal_min_max(optc, 0, 0);
+ }
}
void optc1_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max)
@@ -1383,6 +1392,9 @@ void optc1_read_otg_state(struct optc *optc1,
REG_GET(OTG_VERTICAL_INTERRUPT2_POSITION,
OTG_VERTICAL_INTERRUPT2_LINE_START, &s->vertical_interrupt2_line);
+
+ s->otg_master_update_lock = REG_READ(OTG_MASTER_UPDATE_LOCK);
+ s->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL);
}
bool optc1_get_otg_active_size(struct timing_generator *optc,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
index 6c2e84d3967f..2f3bd7648ba7 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
@@ -129,6 +129,8 @@ struct dcn_optc_registers {
uint32_t OTG_V_TOTAL_MID;
uint32_t OTG_V_TOTAL_MIN;
uint32_t OTG_V_TOTAL_CONTROL;
+ uint32_t OTG_V_COUNT_STOP_CONTROL;
+ uint32_t OTG_V_COUNT_STOP_CONTROL2;
uint32_t OTG_TRIGA_CNTL;
uint32_t OTG_TRIGA_MANUAL_TRIG;
uint32_t OTG_MANUAL_FLOW_CONTROL;
@@ -515,12 +517,15 @@ struct dcn_optc_registers {
type MANUAL_FLOW_CONTROL;\
type MANUAL_FLOW_CONTROL_SEL;
+#define V_TOTAL_REGS(type)
+
#define TG_REG_FIELD_LIST(type) \
TG_REG_FIELD_LIST_DCN1_0(type)\
type OTG_V_SYNC_MODE;\
type OTG_DRR_TRIGGER_WINDOW_START_X;\
type OTG_DRR_TRIGGER_WINDOW_END_X;\
type OTG_DRR_V_TOTAL_CHANGE_LIMIT;\
+ V_TOTAL_REGS(type)\
type OTG_OUT_MUX;\
type OTG_M_CONST_DTO_PHASE;\
type OTG_M_CONST_DTO_MODULO;\
@@ -581,7 +586,9 @@ struct dcn_optc_registers {
type OTG_CRC1_WINDOWB_X_END_READBACK;\
type OTG_CRC1_WINDOWB_Y_START_READBACK;\
type OTG_CRC1_WINDOWB_Y_END_READBACK;\
- type OPTC_FGCG_REP_DIS;
+ type OPTC_FGCG_REP_DIS;\
+ type OTG_V_COUNT_STOP;\
+ type OTG_V_COUNT_STOP_TIMER;
struct dcn_optc_shift {
TG_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
index 58bdbd859bf9..d6f095b4555d 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
@@ -462,16 +462,6 @@ void optc2_setup_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- /* Set the min/max selectors unconditionally so that
- * DMCUB fw may change OTG timings when necessary
- * TODO: Remove the w/a after fixing the issue in DMCUB firmware
- */
- REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
- OTG_V_TOTAL_MIN_SEL, 1,
- OTG_V_TOTAL_MAX_SEL, 1,
- OTG_FORCE_LOCK_ON_EVENT, 0,
- OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
-
REG_SET_8(OTG_TRIGA_CNTL, 0,
OTG_TRIGA_SOURCE_SELECT, 21,
OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
index f07a4c7e48bc..52eab8fccb7f 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
@@ -267,9 +267,6 @@ static void optc32_setup_manual_trigger(struct timing_generator *optc)
OTG_V_TOTAL_MAX_SEL, 1,
OTG_FORCE_LOCK_ON_EVENT, 0,
OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
-
- // Setup manual flow control for EOF via TRIG_A
- optc->funcs->setup_manual_trigger(optc);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
index 5b1547508850..d393be30dff8 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
@@ -32,6 +32,7 @@
#include "reg_helper.h"
#include "dc.h"
#include "dcn_calc_math.h"
+#include "dc_dmub_srv.h"
#define REG(reg)\
optc1->tg_regs->reg
@@ -213,6 +214,167 @@ static bool optc35_configure_crc(struct timing_generator *optc,
return true;
}
+static void optc35_setup_manual_trigger(struct timing_generator *optc)
+{
+ if (!optc || !optc->ctx)
+ return;
+
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ struct dc *dc = optc->ctx->dc;
+
+ if (dc->caps.dmub_caps.mclk_sw && !dc->debug.disable_fams)
+ dc_dmub_srv_set_drr_manual_trigger_cmd(dc, optc->inst);
+ else {
+ /*
+ * MIN_MASK_EN is gone and MASK is now always enabled.
+ *
+ * To get it to it work with manual trigger we need to make sure
+ * we program the correct bit.
+ */
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 1,
+ OTG_V_TOTAL_MAX_SEL, 1,
+ OTG_FORCE_LOCK_ON_EVENT, 0,
+ OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
+
+ // Setup manual flow control for EOF via TRIG_A
+ if (optc->funcs && optc->funcs->setup_manual_trigger)
+ optc->funcs->setup_manual_trigger(optc);
+ }
+}
+
+void optc35_set_drr(
+ struct timing_generator *optc,
+ const struct drr_params *params)
+{
+ if (!optc || !params)
+ return;
+
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t max_otg_v_total = optc1->max_v_total - 1;
+
+ if (params != NULL &&
+ params->vertical_total_max > 0 &&
+ params->vertical_total_min > 0) {
+
+ if (params->vertical_total_mid != 0) {
+
+ REG_SET(OTG_V_TOTAL_MID, 0,
+ OTG_V_TOTAL_MID, params->vertical_total_mid - 1);
+
+ REG_UPDATE_2(OTG_V_TOTAL_CONTROL,
+ OTG_VTOTAL_MID_REPLACING_MAX_EN, 1,
+ OTG_VTOTAL_MID_FRAME_NUM,
+ (uint8_t)params->vertical_total_mid_frame_num);
+
+ }
+
+ if (optc->funcs && optc->funcs->set_vtotal_min_max)
+ optc->funcs->set_vtotal_min_max(optc,
+ params->vertical_total_min - 1, params->vertical_total_max - 1);
+ optc35_setup_manual_trigger(optc);
+ } else {
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_SET_V_TOTAL_MIN_MASK, 0,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_FORCE_LOCK_ON_EVENT, 0);
+
+ if (optc->funcs && optc->funcs->set_vtotal_min_max)
+ optc->funcs->set_vtotal_min_max(optc, 0, 0);
+ }
+
+ REG_WRITE(OTG_V_COUNT_STOP_CONTROL, max_otg_v_total);
+ REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, 0);
+}
+
+static void optc35_set_long_vtotal(
+ struct timing_generator *optc,
+ const struct long_vtotal_params *params)
+{
+ if (!optc || !params)
+ return;
+
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t vcount_stop_timer = 0, vcount_stop = 0;
+ uint32_t max_otg_v_total = optc1->max_v_total - 1;
+
+ if (params->vertical_total_min <= max_otg_v_total && params->vertical_total_max <= max_otg_v_total)
+ return;
+
+ if (params->vertical_total_max == 0 || params->vertical_total_min == 0) {
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_SET_V_TOTAL_MIN_MASK, 0,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_FORCE_LOCK_ON_EVENT, 0);
+
+ if (optc->funcs && optc->funcs->set_vtotal_min_max)
+ optc->funcs->set_vtotal_min_max(optc, 0, 0);
+ } else if (params->vertical_total_max == params->vertical_total_min) {
+ vcount_stop = params->vertical_blank_start;
+ vcount_stop_timer = params->vertical_total_max - max_otg_v_total;
+
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 1,
+ OTG_V_TOTAL_MAX_SEL, 1,
+ OTG_FORCE_LOCK_ON_EVENT, 0,
+ OTG_SET_V_TOTAL_MIN_MASK, 0);
+
+ if (optc->funcs && optc->funcs->set_vtotal_min_max)
+ optc->funcs->set_vtotal_min_max(optc, max_otg_v_total, max_otg_v_total);
+
+ REG_WRITE(OTG_V_COUNT_STOP_CONTROL, vcount_stop);
+ REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, vcount_stop_timer);
+ } else {
+ // Variable rate, keep DRR trigger mask
+ if (params->vertical_total_min > max_otg_v_total) {
+ // cannot be supported
+ // If MAX_OTG_V_COUNT < DRR trigger < v_total_min < v_total_max,
+ // DRR trigger will drop the vtotal counting directly to a new frame.
+ // But it should trigger between v_total_min and v_total_max.
+ ASSERT(0);
+
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_SET_V_TOTAL_MIN_MASK, 0,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_FORCE_LOCK_ON_EVENT, 0);
+
+ if (optc->funcs && optc->funcs->set_vtotal_min_max)
+ optc->funcs->set_vtotal_min_max(optc, 0, 0);
+
+ REG_WRITE(OTG_V_COUNT_STOP_CONTROL, max_otg_v_total);
+ REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, 0);
+ } else {
+ // For total_min <= MAX_OTG_V_COUNT and total_max > MAX_OTG_V_COUNT
+ vcount_stop = params->vertical_total_min;
+ vcount_stop_timer = params->vertical_total_max - max_otg_v_total;
+
+ // Example:
+ // params->vertical_total_min 1000
+ // params->vertical_total_max 2000
+ // MAX_OTG_V_COUNT_STOP = 1500
+ //
+ // If DRR event not happened,
+ // time 0,1,2,3,4,...1000,1001,........,1500,1501,1502, ...1999
+ // vcount 0,1,2,3,4....1000...................,1001,1002,1003,...1399
+ // vcount2 0,1,2,3,4,..499,
+ // else (DRR event happened, ex : at line 1004)
+ // time 0,1,2,3,4,...1000,1001.....1004, 0
+ // vcount 0,1,2,3,4....1000,.............. 0 (new frame)
+ // vcount2 0,1,2, 3, -
+ if (optc->funcs && optc->funcs->set_vtotal_min_max)
+ optc->funcs->set_vtotal_min_max(optc,
+ params->vertical_total_min - 1, max_otg_v_total);
+ optc35_setup_manual_trigger(optc);
+
+ REG_WRITE(OTG_V_COUNT_STOP_CONTROL, vcount_stop);
+ REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, vcount_stop_timer);
+ }
+ }
+}
+
static struct timing_generator_funcs dcn35_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
@@ -245,7 +407,7 @@ static struct timing_generator_funcs dcn35_tg_funcs = {
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
.enable_optc_clock = optc1_enable_optc_clock,
- .set_drr = optc31_set_drr,
+ .set_drr = optc35_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
.set_vtotal_min_max = optc1_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
@@ -275,6 +437,7 @@ static struct timing_generator_funcs dcn35_tg_funcs = {
.setup_manual_trigger = optc2_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
.init_odm = optc3_init_odm,
+ .set_long_vtotal = optc35_set_long_vtotal,
};
void dcn35_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
index 1f422e4c468f..d077e2392379 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
@@ -65,10 +65,14 @@
SF(OTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK, OTG_CRC1_WINDOWB_X_END_READBACK, mask_sh),\
SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK, OTG_CRC1_WINDOWB_Y_START_READBACK, mask_sh),\
SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK, OTG_CRC1_WINDOWB_Y_END_READBACK, mask_sh),\
- SF(OPTC_CLOCK_CONTROL, OPTC_FGCG_REP_DIS, mask_sh)
+ SF(OPTC_CLOCK_CONTROL, OPTC_FGCG_REP_DIS, mask_sh),\
+ SF(OTG0_OTG_V_COUNT_STOP_CONTROL, OTG_V_COUNT_STOP, mask_sh),\
+ SF(OTG0_OTG_V_COUNT_STOP_CONTROL2, OTG_V_COUNT_STOP_TIMER, mask_sh)
void dcn35_timing_generator_init(struct optc *optc1);
void dcn35_timing_generator_set_fgcg(struct optc *optc1, bool enable);
+void optc35_set_drr(struct timing_generator *optc, const struct drr_params *params);
+
#endif /* __DC_OPTC_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/Makefile b/drivers/gpu/drm/amd/display/dc/resource/Makefile
index 184b1f23aa77..db9048974d74 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/resource/Makefile
@@ -102,10 +102,6 @@ AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN21)
###############################################################################
-###############################################################################
-
-###############################################################################
-
RESOURCE_DCN30 = dcn30_resource.o
AMD_DAL_RESOURCE_DCN30 = $(addprefix $(AMDDALPATH)/dc/resource/dcn30/,$(RESOURCE_DCN30))
@@ -202,6 +198,4 @@ AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN351)
###############################################################################
-###############################################################################
-
endif
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
index d1edac46c9a0..88afb2a30eef 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
@@ -864,8 +864,6 @@ static struct clock_source *find_matching_pll(
default:
return NULL;
}
-
- return NULL;
}
static enum dc_status build_mapped_resource(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
index 20662edd0ae4..621825a51f46 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
@@ -1060,7 +1060,7 @@ static bool dce120_resource_construct(
struct irq_service_init_data irq_init_data;
static const struct resource_create_funcs *res_funcs;
bool is_vg20 = ASICREV_IS_VEGA20_P(ctx->asic_id.hw_internal_rev);
- uint32_t pipe_fuses;
+ uint32_t pipe_fuses = 0;
ctx->dc_bios->regs = &bios_regs;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
index 35a2cce0c2b8..56ee45e12b46 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
@@ -56,7 +56,6 @@
#include "dce/dce_aux.h"
#include "dce/dce_abm.h"
#include "dce/dce_i2c.h"
-/* TODO remove this include */
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
#include "gmc/gmc_7_1_d.h"
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
index d08d10969251..563c5eec83ff 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
@@ -513,7 +513,7 @@ static const struct dc_plane_cap plane_cap = {
.argb8888 = true,
.nv12 = true,
.fp16 = true,
- .p010 = true
+ .p010 = false
},
.max_upscale_factor = {
@@ -569,6 +569,7 @@ static const struct dc_debug_options debug_defaults_diags = {
.disable_pplib_clock_request = true,
.disable_pplib_wm_range = true,
.underflow_assert_delay_us = 0xFFFFFFFF,
+ .enable_legacy_fast_update = true,
};
static void dcn10_dpp_destroy(struct dpp **dpp)
@@ -1631,6 +1632,7 @@ static bool dcn10_resource_construct(
/* valid pipe num */
pool->base.pipe_count = j;
pool->base.timing_generator_count = j;
+ pool->base.mpcc_count = j;
/* within dml lib, it is hard code to 4. If ASIC pipe is fused,
* the value may be changed
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index a2387cea1af9..0a939437e19f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -62,6 +62,9 @@
#include "dcn20/dcn20_vmid.h"
#include "dce/dce_panel_cntl.h"
+#include "dcn20/dcn20_dwb.h"
+#include "dcn20/dcn20_mmhubbub.h"
+
#include "navi10_ip_offset.h"
#include "dcn/dcn_2_0_0_offset.h"
@@ -71,9 +74,6 @@
#include "nbio/nbio_2_3_offset.h"
-#include "dcn20/dcn20_dwb.h"
-#include "dcn20/dcn20_mmhubbub.h"
-
#include "mmhub/mmhub_2_0_0_offset.h"
#include "mmhub/mmhub_2_0_0_sh_mask.h"
@@ -83,11 +83,10 @@
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
#include "vm_helper.h"
-#include "link_enc_cfg.h"
-
-#include "amdgpu_socbb.h"
+#include "link_enc_cfg.h"
#include "link.h"
+
#define DC_LOGGER_INIT(logger)
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
@@ -1282,8 +1281,13 @@ void dcn20_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx)
static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
{
+ struct resource_pool *pool = pipe_ctx->stream->ctx->dc->res_pool;
- dcn20_build_pipe_pix_clk_params(pipe_ctx);
+ if (pool->funcs->build_pipe_pix_clk_params) {
+ pool->funcs->build_pipe_pix_clk_params(pipe_ctx);
+ } else {
+ dcn20_build_pipe_pix_clk_params(pipe_ctx);
+ }
pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
@@ -2449,6 +2453,7 @@ static bool dcn20_resource_construct(
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.extended_aux_timeout_support = true;
+ dc->caps.dmcub_support = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
index 914b234d7f6b..070a4efb308b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
@@ -55,7 +55,6 @@
#include "dce110/dce110_resource.h"
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
-#include "dcn201/dcn201_hubbub.h"
#include "dcn10/dcn10_resource.h"
#include "cyan_skillfish_ip_offset.h"
@@ -182,6 +181,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn201_soc = {
.socclk_mhz = 1254.0,
.dram_speed_mts = 14000.0,
},
+ /* state4 is not an actual state, just defines unsupported for dml*/
{
.state = 4,
.dscclk_mhz = 400.0,
@@ -566,6 +566,8 @@ static const struct resource_caps res_cap_dnc201 = {
.num_audio = 2,
.num_stream_encoder = 2,
.num_pll = 2,
+ .num_dwb = 0,
+ .num_dsc = 0,
.num_ddc = 2,
};
@@ -612,7 +614,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.scl_reset_length10 = true,
.sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
- .enable_tri_buf = false,
+ .enable_tri_buf = true,
.enable_legacy_fast_update = true,
.using_dml2 = false,
};
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
index 65d337731f56..8663cbc3d1cf 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
@@ -581,32 +581,6 @@ static const struct resource_caps res_cap_rn = {
.num_dsc = 3,
};
-#ifdef DIAGS_BUILD
-static const struct resource_caps res_cap_rn_FPGA_4pipe = {
- .num_timing_generator = 4,
- .num_opp = 4,
- .num_video_plane = 4,
- .num_audio = 7,
- .num_stream_encoder = 4,
- .num_pll = 4,
- .num_dwb = 1,
- .num_ddc = 4,
- .num_dsc = 0,
-};
-
-static const struct resource_caps res_cap_rn_FPGA_2pipe_dsc = {
- .num_timing_generator = 2,
- .num_opp = 2,
- .num_video_plane = 2,
- .num_audio = 7,
- .num_stream_encoder = 2,
- .num_pll = 4,
- .num_dwb = 1,
- .num_ddc = 4,
- .num_dsc = 2,
-};
-#endif
-
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
.per_pixel_alpha = true,
@@ -1415,16 +1389,11 @@ static bool dcn21_resource_construct(
struct dc_context *ctx = dc->ctx;
struct irq_service_init_data init_data;
uint32_t pipe_fuses = read_pipe_fuses(ctx);
- uint32_t num_pipes;
+ uint32_t num_pipes = 0;
ctx->dc_bios->regs = &bios_regs;
pool->base.res_cap = &res_cap_rn;
-#ifdef DIAGS_BUILD
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- //pool->base.res_cap = &res_cap_nv10_FPGA_2pipe_dsc;
- pool->base.res_cap = &res_cap_rn_FPGA_4pipe;
-#endif
pool->base.funcs = &dcn21_res_pool_funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index ecc477ef8e3b..f35cc307830b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -1639,7 +1639,7 @@ noinline bool dcn30_internal_validate_bw(
int split[MAX_PIPES] = { 0 };
bool merge[MAX_PIPES] = { false };
bool newly_split[MAX_PIPES] = { false };
- int pipe_cnt, i, pipe_idx, vlevel;
+ int pipe_cnt, i, pipe_idx, vlevel = 0;
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
ASSERT(pipes);
@@ -2050,6 +2050,9 @@ bool dcn30_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();
+ if (!pipes)
+ goto validate_fail;
+
DC_FP_START();
out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
DC_FP_END();
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
index 25cd6236b054..8bc1bcaeaa47 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
@@ -1143,7 +1143,7 @@ static bool dcn303_resource_construct(
int i;
struct dc_context *ctx = dc->ctx;
struct irq_service_init_data init_data;
- struct ddc_service_init_data ddc_init_data;
+ struct ddc_service_init_data ddc_init_data = {0};
ctx->dc_bios->regs = &bios_regs;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index 04d142f97474..d4c3e2754f51 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -75,7 +75,6 @@
#include "dcn30/dcn30_dwb.h"
#include "dcn30/dcn30_mmhubbub.h"
-// TODO: change include headers /amd/include/asic_reg after upstream
#include "yellow_carp_offset.h"
#include "dcn/dcn_3_1_2_offset.h"
#include "dcn/dcn_3_1_2_sh_mask.h"
@@ -892,7 +891,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = true,
.enable_legacy_fast_update = true,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
- .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
+ .dml_hostvm_override = DML_HOSTVM_NO_OVERRIDE,
.using_dml2 = false,
};
@@ -1311,6 +1310,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
&hpo_dp_link_enc_regs[inst],
@@ -1645,7 +1646,7 @@ int dcn31_populate_dml_pipes_from_context(
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
+ struct pipe_ctx *pipe = 0;
bool upscaled = false;
DC_FP_START();
@@ -1767,11 +1768,14 @@ bool dcn31_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();
+ if (!pipes)
+ goto validate_fail;
+
DC_FP_START();
out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
DC_FP_END();
- // Disable fast_validate to set min dcfclk in alculate_wm_and_dlg
+ // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg
if (pipe_cnt == 0)
fast_validate = false;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index c97391edb5ff..ff50f43e4c00 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
@@ -925,27 +925,10 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.seamless_boot_odm_combine = true,
+ .enable_legacy_fast_update = true,
.using_dml2 = false,
};
-static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true,
- .force_abm_enable = false,
- .timing_trace = true,
- .clock_trace = true,
- .disable_dpp_power_gate = true,
- .disable_hubp_power_gate = true,
- .disable_clock_gate = true,
- .disable_pplib_clock_request = true,
- .disable_pplib_wm_range = true,
- .disable_stutter = false,
- .scl_reset_length10 = true,
- .dwb_fi_phase = -1, // -1 = disable
- .dmub_command_table = true,
- .enable_tri_buf = true,
- .use_max_lb = true
-};
-
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1384,6 +1367,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
&hpo_dp_link_enc_regs[inst],
@@ -1744,6 +1729,9 @@ bool dcn314_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();
+ if (!pipes)
+ goto validate_fail;
+
if (filter_modes_for_single_channel_workaround(dc, context))
goto validate_fail;
@@ -1938,8 +1926,6 @@ static bool dcn314_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
- else
- dc->debug = debug_defaults_diags;
/* Disable pipe power gating */
dc->debug.disable_dpp_power_gate = true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
index 515ba435f759..4ce0f4bf1d9b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
@@ -1309,6 +1309,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
&hpo_dp_link_enc_regs[inst],
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
index b9753d4606f8..5fd52c5fcee4 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
@@ -125,7 +125,6 @@
#include "link_enc_cfg.h"
#define DCN3_16_MAX_DET_SIZE 384
-#define DCN3_16_MIN_COMPBUF_SIZE_KB 128
#define DCN3_16_CRB_SEGMENT_SIZE_KB 64
enum dcn31_clk_src_array_id {
@@ -1306,6 +1305,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
&hpo_dp_link_enc_regs[inst],
@@ -1614,7 +1615,7 @@ static int dcn316_populate_dml_pipes_from_context(
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
+ struct pipe_ctx *pipe = 0;
const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_16_MIN_COMPBUF_SIZE_KB;
DC_FP_START();
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index ce1754cc1f46..abd76345d1e4 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -1304,6 +1304,8 @@ static struct hpo_dp_link_encoder *dcn32_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
#undef REG_STRUCT
#define REG_STRUCT hpo_dp_link_enc_regs
@@ -1751,6 +1753,9 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
BW_VAL_TRACE_COUNT();
+ if (!pipes)
+ goto validate_fail;
+
DC_FP_START();
out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
DC_FP_END();
@@ -1799,7 +1804,9 @@ bool dcn32_validate_bandwidth(struct dc *dc,
bool out = false;
if (dc->debug.using_dml2)
- out = dml2_validate(dc, context, fast_validate);
+ out = dml2_validate(dc, context,
+ context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
+ fast_validate);
else
out = dml1_validate(dc, context, fast_validate);
return out;
@@ -1815,9 +1822,48 @@ int dcn32_populate_dml_pipes_from_context(
struct pipe_ctx *pipe = NULL;
bool subvp_in_use = false;
struct dc_crtc_timing *timing;
+ int subvp_main_pipe_index = -1;
+ enum mall_stream_type mall_type;
+ bool single_display_subvp = false;
+ struct dc_stream_state *stream = NULL;
+ int num_subvp_main = 0;
+ int num_subvp_phantom = 0;
+ int num_subvp_none = 0;
+ int odm_slice_count;
dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ /* For single display subvp, look for subvp main so if we have phantom
+ * pipe, we can set odm policy to match main pipe
+ */
+ for (i = 0; i < context->stream_count; i++) {
+ stream = context->streams[i];
+ mall_type = dc_state_get_stream_subvp_type(context, stream);
+ if (mall_type == SUBVP_MAIN)
+ num_subvp_main++;
+ else if (mall_type == SUBVP_PHANTOM)
+ num_subvp_phantom++;
+ else
+ num_subvp_none++;
+ }
+ if (num_subvp_main == 1 && num_subvp_phantom == 1 && num_subvp_none == 0)
+ single_display_subvp = true;
+
+ if (single_display_subvp) {
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &res_ctx->pipe_ctx[i];
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ mall_type = dc_state_get_pipe_subvp_type(context, pipe);
+ if (mall_type == SUBVP_MAIN) {
+ if (resource_is_pipe_type(pipe, OTG_MASTER))
+ subvp_main_pipe_index = i;
+ }
+ pipe_cnt++;
+ }
+ }
+
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
if (!res_ctx->pipe_ctx[i].stream)
@@ -1832,7 +1878,21 @@ int dcn32_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
if (dc->config.enable_windowed_mpo_odm &&
dc->debug.enable_single_display_2to1_odm_policy) {
- switch (resource_get_odm_slice_count(pipe)) {
+ /* For single display subvp, if pipe is phantom pipe,
+ * then copy odm policy from subvp main pipe
+ */
+ mall_type = dc_state_get_pipe_subvp_type(context, pipe);
+ if (single_display_subvp && (mall_type == SUBVP_PHANTOM)) {
+ if (subvp_main_pipe_index < 0) {
+ odm_slice_count = -1;
+ ASSERT(0);
+ } else {
+ odm_slice_count = resource_get_odm_slice_count(&res_ctx->pipe_ctx[subvp_main_pipe_index]);
+ }
+ } else {
+ odm_slice_count = resource_get_odm_slice_count(pipe);
+ }
+ switch (odm_slice_count) {
case 2:
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
break;
@@ -1845,6 +1905,7 @@ int dcn32_populate_dml_pipes_from_context(
} else {
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
}
+
pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; // according to spreadsheet
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_19;
@@ -1912,6 +1973,22 @@ int dcn32_populate_dml_pipes_from_context(
return pipe_cnt;
}
+unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned int total_size_in_mall_bytes)
+{
+ uint32_t cache_lines_used, lines_per_way, total_cache_lines, num_ways;
+
+ /* add 2 lines for worst case alignment */
+ cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2;
+
+ total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size;
+ lines_per_way = total_cache_lines / dc->caps.cache_num_ways;
+ num_ways = cache_lines_used / lines_per_way;
+ if (cache_lines_used % lines_per_way > 0)
+ num_ways++;
+
+ return num_ways;
+}
+
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap,
.get_subvp_en = dcn32_subvp_in_use,
@@ -1929,10 +2006,20 @@ void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context,
static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
+ struct dml2_configuration_options dml2_opt = dc->dml2_options;
+
DC_FP_START();
+
dcn32_update_bw_bounding_box_fpu(dc, bw_params);
+
+ dml2_opt.use_clock_dc_limits = false;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
- dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
+ dml2_reinit(dc, &dml2_opt, &dc->current_state->bw_ctx.dml2);
+
+ dml2_opt.use_clock_dc_limits = true;
+ if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source)
+ dml2_reinit(dc, &dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source);
+
DC_FP_END();
}
@@ -1960,6 +2047,7 @@ static struct resource_funcs dcn32_res_pool_funcs = {
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.add_phantom_pipes = dcn32_add_phantom_pipes,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
+ .calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -2048,7 +2136,8 @@ static bool dcn32_resource_construct(
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 4;
- dc->caps.mall_size_total = 0;
+ /* total size = mall per channel * num channels * 1024 * 1024 */
+ dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576;
dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
dc->caps.cache_line_size = 64;
@@ -2362,30 +2451,10 @@ static bool dcn32_resource_construct(
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = true;
- dc->dml2_options.callbacks.dc = dc;
- dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params;
+ resource_init_common_dml2_callbacks(dc, &dc->dml2_options);
dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch;
- dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy;
- dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count;
- dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count;
- dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index;
- dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index;
- dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head;
-
- dc->dml2_options.svp_pstate.callbacks.dc = dc;
- dc->dml2_options.svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane;
- dc->dml2_options.svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream;
- dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params;
- dc->dml2_options.svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane;
- dc->dml2_options.svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane;
- dc->dml2_options.svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream;
- dc->dml2_options.svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream;
- dc->dml2_options.svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane;
- dc->dml2_options.svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream;
dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc;
- dc->dml2_options.svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type;
- dc->dml2_options.svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type;
- dc->dml2_options.svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream;
+ dc->dml2_options.svp_pstate.callbacks.calculate_mall_ways_from_bytes = pool->base.funcs->calculate_mall_ways_from_bytes;
dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us;
dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us;
@@ -2483,7 +2552,7 @@ struct resource_pool *dcn32_create_resource_pool(
* full update which delays the flip for 1 frame. If we use the original pipe
* we don't have to toggle its power. So we can flip faster.
*/
-static int find_optimal_free_pipe_as_secondary_dpp_pipe(
+int dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe(
const struct resource_context *cur_res_ctx,
struct resource_context *new_res_ctx,
const struct resource_pool *pool,
@@ -2666,7 +2735,7 @@ struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe(
return dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
new_ctx, pool, opp_head_pipe->stream, opp_head_pipe);
- free_pipe_idx = find_optimal_free_pipe_as_secondary_dpp_pipe(
+ free_pipe_idx = dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe(
&cur_ctx->res_ctx, &new_ctx->res_ctx,
pool, opp_head_pipe);
if (free_pipe_idx >= 0) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index 2258c5c7212d..fee67fbab8e2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -113,10 +113,6 @@ void dcn32_calculate_wm_and_dlg(
int pipe_cnt,
int vlevel);
-uint32_t dcn32_helper_mall_bytes_to_ways(
- struct dc *dc,
- uint32_t total_size_in_mall_bytes);
-
uint32_t dcn32_helper_calculate_mall_bytes_for_cursor(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
@@ -141,6 +137,12 @@ bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context);
bool dcn32_is_center_timing(struct pipe_ctx *pipe);
bool dcn32_is_psr_capable(struct pipe_ctx *pipe);
+int dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
+ const struct resource_pool *pool,
+ const struct pipe_ctx *new_opp_head);
+
struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe(
const struct dc_state *cur_ctx,
struct dc_state *new_ctx,
@@ -184,6 +186,8 @@ void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc
void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context);
+unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned int total_size_in_mall_bytes);
+
/* definitions for run time init of reg offsets */
/* CLK SRC */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 296a0a8e7145..e4b360d89b3b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -1288,6 +1288,8 @@ static struct hpo_dp_link_encoder *dcn321_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
#undef REG_STRUCT
#define REG_STRUCT hpo_dp_link_enc_regs
@@ -1579,10 +1581,20 @@ static struct dc_cap_funcs cap_funcs = {
static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
+ struct dml2_configuration_options dml2_opt = dc->dml2_options;
+
DC_FP_START();
+
dcn321_update_bw_bounding_box_fpu(dc, bw_params);
+
+ dml2_opt.use_clock_dc_limits = false;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
- dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
+ dml2_reinit(dc, &dml2_opt, &dc->current_state->bw_ctx.dml2);
+
+ dml2_opt.use_clock_dc_limits = true;
+ if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source)
+ dml2_reinit(dc, &dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source);
+
DC_FP_END();
}
@@ -1610,6 +1622,7 @@ static struct resource_funcs dcn321_res_pool_funcs = {
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.add_phantom_pipes = dcn32_add_phantom_pipes,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
+ .calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -1697,7 +1710,9 @@ static bool dcn321_resource_construct(
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 4;
- dc->caps.mall_size_total = 0;
+ /* total size = mall per channel * num channels * 1024 * 1024 */
+ dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576;
+
dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
dc->caps.cache_line_size = 64;
dc->caps.cache_num_ways = 16;
@@ -1998,30 +2013,10 @@ static bool dcn321_resource_construct(
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = true;
- dc->dml2_options.callbacks.dc = dc;
- dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params;
+ resource_init_common_dml2_callbacks(dc, &dc->dml2_options);
dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch;
- dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy;
- dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count;
- dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count;
- dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index;
- dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index;
- dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head;
-
- dc->dml2_options.svp_pstate.callbacks.dc = dc;
- dc->dml2_options.svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane;
- dc->dml2_options.svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream;
- dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params;
- dc->dml2_options.svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane;
- dc->dml2_options.svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane;
- dc->dml2_options.svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream;
- dc->dml2_options.svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream;
- dc->dml2_options.svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane;
- dc->dml2_options.svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream;
dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc;
- dc->dml2_options.svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type;
- dc->dml2_options.svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type;
- dc->dml2_options.svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream;
+ dc->dml2_options.svp_pstate.callbacks.calculate_mall_ways_from_bytes = pool->base.funcs->calculate_mall_ways_from_bytes;
dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us;
dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 5d52853cac96..2df8a742516c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -721,7 +721,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dpp_power_gate = true,
.disable_hubp_power_gate = true,
.disable_optc_power_gate = true, /*should the same as above two*/
- .disable_hpo_power_gate = true, /*dmubfw force domain25 on*/
+ .disable_hpo_power_gate = false, /*dmubfw force domain25 on*/
.disable_clock_gate = false,
.disable_dsc_power_gate = true,
.vsr_support = true,
@@ -764,12 +764,12 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
- .minimum_z8_residency_time = 2100,
+ .minimum_z8_residency_time = 1, /* Always allow when other conditions are met */
.using_dml2 = true,
.support_eDP1_5 = true,
.enable_hpo_pg_support = false,
.enable_legacy_fast_update = true,
- .enable_single_display_2to1_odm_policy = false,
+ .enable_single_display_2to1_odm_policy = true,
.disable_idle_power_optimizations = false,
.dmcub_emulation = false,
.disable_boot_optimizations = false,
@@ -783,7 +783,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.psp_disabled_wa = true,
.ips2_eval_delay_us = 2000,
.ips2_entry_delay_us = 800,
- .disable_dmub_reallow_idle = true,
+ .disable_dmub_reallow_idle = false,
.static_screen_wait_frames = 2,
};
@@ -1368,6 +1368,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
#undef REG_STRUCT
#define REG_STRUCT hpo_dp_link_enc_regs
@@ -1734,7 +1736,9 @@ static bool dcn35_validate_bandwidth(struct dc *dc,
{
bool out = false;
- out = dml2_validate(dc, context, fast_validate);
+ out = dml2_validate(dc, context,
+ context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
+ fast_validate);
if (fast_validate)
return out;
@@ -2138,15 +2142,9 @@ static bool dcn35_resource_construct(
dc->dml2_options.minimize_dispclk_using_odm = true;
dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm;
- dc->dml2_options.callbacks.dc = dc;
- dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params;
+ resource_init_common_dml2_callbacks(dc, &dc->dml2_options);
dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch;
- dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy;
- dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count;
- dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count;
- dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index;
- dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index;
- dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head;
+
dc->dml2_options.max_segments_per_hubp = 24;
dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
index a51c4a9eaafe..f97bb4cb3761 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
@@ -240,6 +240,8 @@ struct resource_pool *dcn35_create_resource_pool(
SRI_ARR(OTG_V_TOTAL_MAX, OTG, inst),\
SRI_ARR(OTG_V_TOTAL_MIN, OTG, inst),\
SRI_ARR(OTG_V_TOTAL_CONTROL, OTG, inst),\
+ SRI_ARR(OTG_V_COUNT_STOP_CONTROL, OTG, inst),\
+ SRI_ARR(OTG_V_COUNT_STOP_CONTROL2, OTG, inst),\
SRI_ARR(OTG_TRIGA_CNTL, OTG, inst),\
SRI_ARR(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst),\
SRI_ARR(OTG_STATIC_SCREEN_CONTROL, OTG, inst),\
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 5b486400dfdb..ddf9560ab772 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -700,6 +700,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dcc = DCC_ENABLE,
.disable_dpp_power_gate = true,
.disable_hubp_power_gate = true,
+ .disable_optc_power_gate = true, /*should the same as above two*/
+ .disable_hpo_power_gate = true, /*dmubfw force domain25 on*/
.disable_clock_gate = false,
.disable_dsc_power_gate = true,
.vsr_support = true,
@@ -742,12 +744,13 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
+ .minimum_z8_residency_time = 2100,
.using_dml2 = true,
.support_eDP1_5 = true,
.enable_hpo_pg_support = false,
.enable_legacy_fast_update = true,
.enable_single_display_2to1_odm_policy = true,
- .disable_idle_power_optimizations = true,
+ .disable_idle_power_optimizations = false,
.dmcub_emulation = false,
.disable_boot_optimizations = false,
.disable_unbounded_requesting = false,
@@ -755,11 +758,13 @@ static const struct dc_debug_options debug_defaults_drv = {
//must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions
.enable_double_buffered_dsc_pg_support = true,
.enable_dp_dig_pixel_rate_div_policy = 1,
- .disable_z10 = true,
+ .disable_z10 = false,
.ignore_pg = true,
.psp_disabled_wa = true,
- .ips2_eval_delay_us = 200,
- .ips2_entry_delay_us = 400
+ .ips2_eval_delay_us = 2000,
+ .ips2_entry_delay_us = 800,
+ .disable_dmub_reallow_idle = true,
+ .static_screen_wait_frames = 2,
};
static const struct dc_panel_config panel_config_defaults = {
@@ -1343,6 +1348,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
#undef REG_STRUCT
#define REG_STRUCT hpo_dp_link_enc_regs
@@ -1709,19 +1716,20 @@ static bool dcn351_validate_bandwidth(struct dc *dc,
{
bool out = false;
- out = dml2_validate(dc, context, fast_validate);
+ out = dml2_validate(dc, context,
+ context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
+ fast_validate);
if (fast_validate)
return out;
DC_FP_START();
- dcn351_decide_zstate_support(dc, context);
+ dcn35_decide_zstate_support(dc, context);
DC_FP_END();
return out;
}
-
static struct resource_funcs dcn351_res_pool_funcs = {
.destroy = dcn351_destroy_resource_pool,
.link_enc_create = dcn35_link_encoder_create,
@@ -1864,6 +1872,9 @@ static bool dcn351_resource_construct(
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
+ /* Use psp mailbox to enable assr */
+ dc->config.use_assr_psp_message = true;
+
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
@@ -1883,6 +1894,8 @@ static bool dcn351_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
+ /*HW default is to have all the FGCG enabled, SW no need to program them*/
+ dc->debug.enable_fine_grain_clock_gating.u32All = 0xFFFF;
// Init the vm_helper
if (dc->vm_helper)
vm_helper_init(dc->vm_helper, 16);
@@ -2113,15 +2126,9 @@ static bool dcn351_resource_construct(
dc->dml2_options.minimize_dispclk_using_odm = true;
dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm;
- dc->dml2_options.callbacks.dc = dc;
- dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params;
+ resource_init_common_dml2_callbacks(dc, &dc->dml2_options);
dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch;
- dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy;
- dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count;
- dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count;
- dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index;
- dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index;
- dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head;
+
dc->dml2_options.max_segments_per_hubp = 24;
dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 7785908a6676..2fde1f043d50 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -71,6 +71,8 @@
extern "C" {
#endif
+#define DMUB_PC_SNAPSHOT_COUNT 10
+
/* Forward declarations */
struct dmub_srv;
struct dmub_srv_common_regs;
@@ -295,18 +297,30 @@ struct dmub_srv_hw_params {
bool dpia_hpd_int_enable_supported;
bool disable_clock_gate;
bool disallow_dispclk_dppclk_ds;
+ bool ips_sequential_ono;
enum dmub_memory_access_type mem_access_type;
enum dmub_ips_disable_type disable_ips;
};
/**
+ * struct dmub_srv_debug - Debug info for dmub_srv
+ * @timeout_occured: Indicates a timeout occured on any message from driver to dmub
+ * @timeout_cmd: first cmd sent from driver that timed out - subsequent timeouts are not stored
+ */
+struct dmub_srv_debug {
+ bool timeout_occured;
+ union dmub_rb_cmd timeout_cmd;
+ unsigned long long timestamp;
+};
+
+/**
* struct dmub_diagnostic_data - Diagnostic data retrieved from DMCUB for
* debugging purposes, including logging, crash analysis, etc.
*/
struct dmub_diagnostic_data {
uint32_t dmcub_version;
uint32_t scratch[17];
- uint32_t pc;
+ uint32_t pc[DMUB_PC_SNAPSHOT_COUNT];
uint32_t undefined_address_fault_addr;
uint32_t inst_fetch_fault_addr;
uint32_t data_write_fault_addr;
@@ -317,6 +331,7 @@ struct dmub_diagnostic_data {
uint32_t inbox0_wptr;
uint32_t inbox0_size;
uint32_t gpint_datain0;
+ struct dmub_srv_debug timeout_info;
uint8_t is_dmcub_enabled : 1;
uint8_t is_dmcub_soft_reset : 1;
uint8_t is_dmcub_secure_reset : 1;
@@ -506,6 +521,7 @@ struct dmub_srv {
struct dmub_visual_confirm_color visual_confirm_color;
enum dmub_srv_power_state_type power_state;
+ struct dmub_srv_debug debug;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index af3fe8bb0728..e85fd3ac52c7 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -97,6 +97,9 @@
/* Maximum number of planes on any ASIC. */
#define DMUB_MAX_PLANES 6
+/* Maximum number of phantom planes on any ASIC */
+#define DMUB_MAX_PHANTOM_PLANES ((DMUB_MAX_PLANES) / 2)
+
/* Trace buffer offset for entry */
#define TRACE_BUFFER_ENTRY_OFFSET 16
@@ -194,6 +197,11 @@ union abm_flags {
* of user backlight level.
*/
unsigned int abm_gradual_bl_change : 1;
+
+ /**
+ * @abm_new_frame: Indicates if a new frame update needed for ABM to ramp up into steady
+ */
+ unsigned int abm_new_frame : 1;
} bitfields;
unsigned int u32All;
@@ -461,7 +469,7 @@ struct dmub_feature_caps {
* Max PSR version supported by FW.
*/
uint8_t psr;
- uint8_t fw_assisted_mclk_switch;
+ uint8_t fw_assisted_mclk_switch_ver;
uint8_t reserved[4];
uint8_t subvp_psr_support;
uint8_t gecc_enable;
@@ -619,6 +627,7 @@ enum dmub_ips_disable_type {
DMUB_IPS_DISABLE_IPS2 = 3,
DMUB_IPS_DISABLE_IPS2_Z10 = 4,
DMUB_IPS_DISABLE_DYNAMIC = 5,
+ DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF = 6,
};
#define DMUB_IPS1_ALLOW_MASK 0x00000001
@@ -653,6 +662,7 @@ union dmub_fw_boot_options {
uint32_t disable_timeout_recovery : 1; /* 1 if timeout recovery should be disabled */
uint32_t ips_pg_disable: 1; /* 1 to disable ONO domains power gating*/
uint32_t ips_disable: 3; /* options to disable ips support*/
+ uint32_t ips_sequential_ono: 1; /**< 1 to enable sequential ONO IPS sequence */
uint32_t reserved : 9; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
@@ -695,7 +705,8 @@ union dmub_shared_state_ips_fw_signals {
struct {
uint32_t ips1_commit : 1; /**< 1 if in IPS1 */
uint32_t ips2_commit : 1; /**< 1 if in IPS2 */
- uint32_t reserved_bits : 30; /**< Reversed */
+ uint32_t in_idle : 1; /**< 1 if DMCUB is in idle */
+ uint32_t reserved_bits : 29; /**< Reversed */
} bits;
uint32_t all;
};
@@ -724,7 +735,13 @@ union dmub_shared_state_ips_driver_signals {
*/
struct dmub_shared_state_ips_fw {
union dmub_shared_state_ips_fw_signals signals; /**< 4 bytes, IPS signal bits */
- uint32_t reserved[61]; /**< Reversed, to be updated when adding new fields. */
+ uint32_t rcg_entry_count; /**< Entry counter for RCG */
+ uint32_t rcg_exit_count; /**< Exit counter for RCG */
+ uint32_t ips1_entry_count; /**< Entry counter for IPS1 */
+ uint32_t ips1_exit_count; /**< Exit counter for IPS1 */
+ uint32_t ips2_entry_count; /**< Entry counter for IPS2 */
+ uint32_t ips2_exit_count; /**< Exit counter for IPS2 */
+ uint32_t reserved[55]; /**< Reversed, to be updated when adding new fields. */
}; /* 248-bytes, fixed */
/**
@@ -812,6 +829,10 @@ enum dmub_cmd_vbios_type {
*/
DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT = 26,
/**
+ * Control PHY FSM
+ */
+ DMUB_CMD__VBIOS_TRANSMITTER_SET_PHY_FSM = 29,
+ /**
* Controls domain power gating
*/
DMUB_CMD__VBIOS_DOMAIN_CONTROL = 28,
@@ -1186,6 +1207,11 @@ enum dmub_cmd_type {
*/
DMUB_CMD__DPIA_HPD_INT_ENABLE = 86,
+ /**
+ * Command type used for all PSP commands.
+ */
+ DMUB_CMD__PSP = 88,
+
DMUB_CMD__VBIOS = 128,
};
@@ -1588,7 +1614,7 @@ struct dmub_rb_cmd_idle_opt_dcn_restore {
*/
struct dmub_dcn_notify_idle_cntl_data {
uint8_t driver_idle;
- uint8_t pad[1];
+ uint8_t reserved[59];
};
/**
@@ -2309,6 +2335,11 @@ enum phy_link_rate {
* UHBR10 - 20.0 Gbps/Lane
*/
PHY_RATE_2000 = 11,
+
+ PHY_RATE_675 = 12,
+ /**
+ * Rate 12 - 6.75 Gbps/Lane
+ */
};
/**
@@ -2327,6 +2358,7 @@ enum dmub_phy_fsm_state {
DMUB_PHY_FSM_POWER_DOWN,
DMUB_PHY_FSM_PLL_EN,
DMUB_PHY_FSM_TX_EN,
+ DMUB_PHY_FSM_TX_EN_TEST_MODE,
DMUB_PHY_FSM_FAST_LP,
DMUB_PHY_FSM_P2_PLL_OFF_CPM,
DMUB_PHY_FSM_P2_PLL_OFF_PG,
@@ -2931,18 +2963,49 @@ struct dmub_rb_cmd_psr_set_power_opt {
struct dmub_cmd_psr_set_power_opt_data psr_set_power_opt_data;
};
+/**
+ * Definition of Replay Residency GPINT command.
+ * Bit[0] - Residency mode for Revision 0
+ * Bit[1] - Enable/Disable state
+ * Bit[2-3] - Revision number
+ * Bit[4-7] - Residency mode for Revision 1
+ * Bit[8] - Panel instance
+ * Bit[9-15] - Reserved
+ */
+
+enum pr_residency_mode {
+ PR_RESIDENCY_MODE_PHY = 0x0,
+ PR_RESIDENCY_MODE_ALPM,
+ PR_RESIDENCY_MODE_IPS2,
+ PR_RESIDENCY_MODE_FRAME_CNT,
+ PR_RESIDENCY_MODE_ENABLEMENT_PERIOD,
+};
+
#define REPLAY_RESIDENCY_MODE_SHIFT (0)
#define REPLAY_RESIDENCY_ENABLE_SHIFT (1)
+#define REPLAY_RESIDENCY_REVISION_SHIFT (2)
+#define REPLAY_RESIDENCY_MODE2_SHIFT (4)
#define REPLAY_RESIDENCY_MODE_MASK (0x1 << REPLAY_RESIDENCY_MODE_SHIFT)
-# define REPLAY_RESIDENCY_MODE_PHY (0x0 << REPLAY_RESIDENCY_MODE_SHIFT)
-# define REPLAY_RESIDENCY_MODE_ALPM (0x1 << REPLAY_RESIDENCY_MODE_SHIFT)
-# define REPLAY_RESIDENCY_MODE_IPS 0x10
+# define REPLAY_RESIDENCY_FIELD_MODE_PHY (0x0 << REPLAY_RESIDENCY_MODE_SHIFT)
+# define REPLAY_RESIDENCY_FIELD_MODE_ALPM (0x1 << REPLAY_RESIDENCY_MODE_SHIFT)
+
+#define REPLAY_RESIDENCY_MODE2_MASK (0xF << REPLAY_RESIDENCY_MODE2_SHIFT)
+# define REPLAY_RESIDENCY_FIELD_MODE2_IPS (0x1 << REPLAY_RESIDENCY_MODE2_SHIFT)
+# define REPLAY_RESIDENCY_FIELD_MODE2_FRAME_CNT (0x2 << REPLAY_RESIDENCY_MODE2_SHIFT)
+# define REPLAY_RESIDENCY_FIELD_MODE2_EN_PERIOD (0x3 << REPLAY_RESIDENCY_MODE2_SHIFT)
#define REPLAY_RESIDENCY_ENABLE_MASK (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT)
# define REPLAY_RESIDENCY_DISABLE (0x0 << REPLAY_RESIDENCY_ENABLE_SHIFT)
# define REPLAY_RESIDENCY_ENABLE (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT)
+#define REPLAY_RESIDENCY_REVISION_MASK (0x3 << REPLAY_RESIDENCY_REVISION_SHIFT)
+# define REPLAY_RESIDENCY_REVISION_0 (0x0 << REPLAY_RESIDENCY_REVISION_SHIFT)
+# define REPLAY_RESIDENCY_REVISION_1 (0x1 << REPLAY_RESIDENCY_REVISION_SHIFT)
+
+/**
+ * Definition of a replay_state.
+ */
enum replay_state {
REPLAY_STATE_0 = 0x0,
REPLAY_STATE_1 = 0x10,
@@ -3004,6 +3067,11 @@ enum dmub_cmd_replay_type {
* Set pseudo vtotal
*/
DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL = 7,
+ /**
+ * Set adaptive sync sdp enabled
+ */
+ DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP = 8,
+
};
/**
@@ -3205,6 +3273,20 @@ struct dmub_cmd_replay_set_pseudo_vtotal {
*/
uint8_t pad;
};
+struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data {
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which replay_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * enabled: set adaptive sync sdp enabled
+ */
+ uint8_t force_disabled;
+
+ uint8_t pad[2];
+};
/**
* Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command.
@@ -3309,6 +3391,20 @@ struct dmub_rb_cmd_replay_set_pseudo_vtotal {
};
/**
+ * Definition of a DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command.
+ */
+struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Definition of DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command.
+ */
+ struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data data;
+};
+
+/**
* Data passed from driver to FW in DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command.
*/
struct dmub_cmd_replay_frameupdate_timer_data {
@@ -3363,6 +3459,11 @@ union dmub_replay_cmd_set {
* Definition of DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command data.
*/
struct dmub_cmd_replay_set_pseudo_vtotal pseudo_vtotal_data;
+ /**
+ * Definition of DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command data.
+ */
+ struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data disabled_adaptive_sync_sdp_data;
+
};
/**
@@ -3445,7 +3546,7 @@ enum hw_lock_client {
/**
* Replay is the client of HW Lock Manager.
*/
- HW_LOCK_CLIENT_REPLAY = 4,
+ HW_LOCK_CLIENT_REPLAY = 4,
/**
* Invalid client.
*/
@@ -4038,6 +4139,10 @@ enum dmub_cmd_panel_cntl_type {
* Queries backlight info for the embedded panel.
*/
DMUB_CMD__PANEL_CNTL_QUERY_BACKLIGHT_INFO = 1,
+ /**
+ * Sets the PWM Freq as per user's requirement.
+ */
+ DMUB_CMD__PANEL_DEBUG_PWM_FREQ = 2,
};
/**
@@ -4139,6 +4244,34 @@ struct dmub_rb_cmd_transmitter_query_dp_alt {
struct dmub_rb_cmd_transmitter_query_dp_alt_data data; /**< payload */
};
+struct phy_test_mode {
+ uint8_t mode;
+ uint8_t pat0;
+ uint8_t pad[2];
+};
+
+/**
+ * Data passed in/out in a DMUB_CMD__VBIOS_TRANSMITTER_SET_PHY_FSM command.
+ */
+struct dmub_rb_cmd_transmitter_set_phy_fsm_data {
+ uint8_t phy_id; /**< 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4=UNIPHYE, 5=UNIPHYF */
+ uint8_t mode; /**< HDMI/DP/DP2 etc */
+ uint8_t lane_num; /**< Number of lanes */
+ uint32_t symclk_100Hz; /**< PLL symclock in 100hz */
+ struct phy_test_mode test_mode;
+ enum dmub_phy_fsm_state state;
+ uint32_t status;
+ uint8_t pad;
+};
+
+/**
+ * Definition of a DMUB_CMD__VBIOS_TRANSMITTER_SET_PHY_FSM command.
+ */
+struct dmub_rb_cmd_transmitter_set_phy_fsm {
+ struct dmub_cmd_header header; /**< header */
+ struct dmub_rb_cmd_transmitter_set_phy_fsm_data data; /**< payload */
+};
+
/**
* Maximum number of bytes a chunk sent to DMUB for parsing
*/
@@ -4261,6 +4394,65 @@ struct dmub_rb_cmd_secure_display {
};
/**
+ * Command type of a DMUB_CMD__PSP command
+ */
+enum dmub_cmd_psp_type {
+ DMUB_CMD__PSP_ASSR_ENABLE = 0
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD__PSP_ASSR_ENABLE command.
+ */
+struct dmub_cmd_assr_enable_data {
+ /**
+ * ASSR enable or disable.
+ */
+ uint8_t enable;
+ /**
+ * PHY port type.
+ * Indicates eDP / non-eDP port type
+ */
+ uint8_t phy_port_type;
+ /**
+ * PHY port ID.
+ */
+ uint8_t phy_port_id;
+ /**
+ * Link encoder index.
+ */
+ uint8_t link_enc_index;
+ /**
+ * HPO mode.
+ */
+ uint8_t hpo_mode;
+
+ /**
+ * Reserved field.
+ */
+ uint8_t reserved[7];
+};
+
+/**
+ * Definition of a DMUB_CMD__PSP_ASSR_ENABLE command.
+ */
+struct dmub_rb_cmd_assr_enable {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+
+ /**
+ * Assr data.
+ */
+ struct dmub_cmd_assr_enable_data assr_data;
+
+ /**
+ * Reserved field.
+ */
+ uint32_t reserved[3];
+};
+
+/**
* union dmub_rb_cmd - DMUB inbox command.
*/
union dmub_rb_cmd {
@@ -4451,6 +4643,10 @@ union dmub_rb_cmd {
*/
struct dmub_rb_cmd_transmitter_query_dp_alt query_dp_alt;
/**
+ * Definition of a DMUB_CMD__VBIOS_TRANSMITTER_SET_PHY_FSM command.
+ */
+ struct dmub_rb_cmd_transmitter_set_phy_fsm set_phy_fsm;
+ /**
* Definition of a DMUB_CMD__DPIA_DIG1_CONTROL command.
*/
struct dmub_rb_cmd_dig1_dpia_control dig1_dpia_control;
@@ -4518,6 +4714,15 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command.
*/
struct dmub_rb_cmd_replay_set_pseudo_vtotal replay_set_pseudo_vtotal;
+ /**
+ * Definition of a DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command.
+ */
+ struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp replay_disabled_adaptive_sync_sdp;
+ /**
+ * Definition of a DMUB_CMD__PSP_ASSR_ENABLE command.
+ */
+ struct dmub_rb_cmd_assr_enable assr_enable;
+
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index cae96fba6349..e500ca9ae09c 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -472,4 +472,5 @@ void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
diag_data->is_cw6_enabled = is_cw6_enabled;
+ diag_data->timeout_info = dmub->debug;
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index 2bcf5fb87dd9..662c34e9495c 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -466,6 +466,7 @@ void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
diag_data->is_cw6_enabled = is_cw6_enabled;
+ diag_data->timeout_info = dmub->debug;
}
bool dmub_dcn31_should_detect(struct dmub_srv *dmub)
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
index 0d521eeda050..e1da270502cc 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
@@ -478,6 +478,8 @@ void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
diag_data->is_cw6_enabled = is_cw6_enabled;
diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
+
+ diag_data->timeout_info = dmub->debug;
}
void dmub_dcn32_configure_dmub_in_system_memory(struct dmub_srv *dmub)
{
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
index 53f359f3fae2..70e63aeb8f89 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
@@ -420,6 +420,7 @@ void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.disable_clk_ds = params->disallow_dispclk_dppclk_ds;
boot_options.bits.disable_clk_gate = params->disable_clock_gate;
boot_options.bits.ips_disable = params->disable_ips;
+ boot_options.bits.ips_sequential_ono = params->ips_sequential_ono;
REG_WRITE(DMCUB_SCRATCH14, boot_options.all);
}
@@ -516,6 +517,7 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
diag_data->is_cw6_enabled = is_cw6_enabled;
diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
+ diag_data->timeout_info = dmub->debug;
}
void dmub_dcn35_configure_dmub_in_system_memory(struct dmub_srv *dmub)
{
diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
index 1c6f24cb1d2f..447768dec887 100644
--- a/drivers/gpu/drm/amd/display/include/dal_types.h
+++ b/drivers/gpu/drm/amd/display/include/dal_types.h
@@ -27,7 +27,6 @@
#define __DAL_TYPES_H__
#include "signal_types.h"
-#include "dc_types.h"
struct dal_logger;
struct dc_bios;
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h
index c6bbd262f1ac..08ee0350b31f 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_id.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h
@@ -226,8 +226,8 @@ enum dp_alt_mode {
struct graphics_object_id {
uint32_t id:8;
- uint32_t enum_id:4;
- uint32_t type:4;
+ enum object_enum_id enum_id;
+ enum object_type type;
uint32_t reserved:16; /* for padding. total size should be u32 */
};
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index 92dbff22a7c6..1867aac57cf2 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -73,7 +73,6 @@ struct link_training_settings {
enum dc_pre_emphasis *pre_emphasis;
enum dc_post_cursor2 *post_cursor2;
bool should_set_fec_ready;
- /* TODO - factor lane_settings out because it changes during LT */
union dc_dp_ffe_preset *ffe_preset;
uint16_t cr_pattern_time;
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index f39e2785e618..83479951732a 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -64,6 +64,7 @@
#define DC_LOG_DWB(...) drm_dbg((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_DP2(...) drm_dbg_dp((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_AUTO_DPM_TEST(...) pr_debug("[AutoDPMTest]: "__VA_ARGS__)
+#define DC_LOG_IPS(...) pr_debug("[IPS]: "__VA_ARGS__)
struct dc_log_buffer_ctx {
char *buf;
diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h
index 1b14b17a79c7..a10d6b988aab 100644
--- a/drivers/gpu/drm/amd/display/include/signal_types.h
+++ b/drivers/gpu/drm/amd/display/include/signal_types.h
@@ -118,6 +118,19 @@ static inline bool dc_is_dvi_signal(enum signal_type signal)
}
}
+static inline bool dc_is_tmds_signal(enum signal_type signal)
+{
+ switch (signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return true;
+ break;
+ default:
+ return false;
+ }
+}
+
static inline bool dc_is_dvi_single_link_signal(enum signal_type signal)
{
return (signal == SIGNAL_TYPE_DVI_SINGLE_LINK);
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 8b5c27857671..3699e633801d 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1059,7 +1059,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
struct fixed31_32 min_display;
struct fixed31_32 max_content;
struct fixed31_32 clip = dc_fixpt_one;
- struct fixed31_32 output;
+ struct fixed31_32 output = dc_fixpt_zero;
bool use_eetf = false;
bool is_clipped = false;
struct fixed31_32 sdr_white_level;
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 3955b7e4b2e2..d09627c15b9c 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -158,13 +158,13 @@ static unsigned int calc_v_total_from_duration(
if (duration_in_us > vrr->max_duration_in_us)
duration_in_us = vrr->max_duration_in_us;
- if (dc_is_hdmi_signal(stream->signal)) {
+ if (dc_is_hdmi_signal(stream->signal)) { // change for HDMI to comply with spec
uint32_t h_total_up_scaled;
h_total_up_scaled = stream->timing.h_total * 10000;
v_total = div_u64((unsigned long long)duration_in_us
* stream->timing.pix_clk_100hz + (h_total_up_scaled - 1),
- h_total_up_scaled);
+ h_total_up_scaled); //ceiling for MMax and MMin for MVRR
} else {
v_total = div64_u64(div64_u64(((unsigned long long)(
duration_in_us) * (stream->timing.pix_clk_100hz / 10)),
@@ -1057,7 +1057,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
in_out_vrr->fixed_refresh_in_uhz = 0;
refresh_range = div_u64(in_out_vrr->max_refresh_in_uhz + 500000, 1000000) -
-+ div_u64(in_out_vrr->min_refresh_in_uhz + 500000, 1000000);
+ div_u64(in_out_vrr->min_refresh_in_uhz + 500000, 1000000);
in_out_vrr->supported = true;
}
@@ -1126,6 +1126,8 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
in_out_vrr->adjust.v_total_min = stream->timing.v_total;
in_out_vrr->adjust.v_total_max = stream->timing.v_total;
}
+
+ in_out_vrr->adjust.allow_otg_v_count_halt = (in_config->state == VRR_STATE_ACTIVE_FIXED) ? true : false;
}
void mod_freesync_handle_preflip(struct mod_freesync *mod_freesync,
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index 733f22bed021..c996365e84b0 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -151,7 +151,7 @@ out:
static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp)
{
- enum mod_hdcp_status status;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_FAILURE;
uint8_t size;
uint16_t max_wait = 20; // units of ms
uint16_t num_polls = 5;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
index f7b5583ee609..8e9caae7c955 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -156,6 +156,10 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
uint32_t cur_size = 0;
uint32_t data_offset = 0;
+ if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
+ return MOD_HDCP_STATUS_DDC_FAILURE;
+ }
+
if (is_dp_hdcp(hdcp)) {
while (buf_len > 0) {
cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
@@ -215,6 +219,10 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
uint32_t cur_size = 0;
uint32_t data_offset = 0;
+ if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
+ return MOD_HDCP_STATUS_DDC_FAILURE;
+ }
+
if (is_dp_hdcp(hdcp)) {
while (buf_len > 0) {
cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index 738ee763f24a..a344e2e49b0e 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -147,15 +147,12 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
}
/* VSC packet set to 4 for PSR-SU, or 2 for PSR1 */
- if (stream->link->psr_settings.psr_feature_enabled) {
- if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
- vsc_packet_revision = vsc_packet_rev4;
- else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
- vsc_packet_revision = vsc_packet_rev2;
- }
-
- if (stream->link->replay_settings.config.replay_supported)
+ if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
+ vsc_packet_revision = vsc_packet_rev4;
+ else if (stream->link->replay_settings.config.replay_supported)
vsc_packet_revision = vsc_packet_rev4;
+ else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
+ vsc_packet_revision = vsc_packet_rev2;
/* Update to revision 5 for extended colorimetry support */
if (stream->use_vsc_sdp_for_colorimetry)
@@ -539,8 +536,6 @@ void mod_build_adaptive_sync_infopacket(const struct dc_stream_state *stream,
mod_build_adaptive_sync_infopacket_v2(stream, param, info_packet);
break;
case FREESYNC_TYPE_PCON_IN_WHITELIST:
- mod_build_adaptive_sync_infopacket_v1(info_packet);
- break;
case ADAPTIVE_SYNC_TYPE_EDP:
mod_build_adaptive_sync_infopacket_v1(info_packet);
break;
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index b0a6256e89f4..7536c173a546 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -24,6 +24,7 @@
#define __AMD_SHARED_H__
#include <drm/amd_asic_type.h>
+#include <drm/drm_print.h>
#define AMD_MAX_USEC_TIMEOUT 1000000 /* 1000 ms */
@@ -321,6 +322,8 @@ struct amd_ip_funcs {
int (*set_powergating_state)(void *handle,
enum amd_powergating_state state);
void (*get_clockgating_state)(void *handle, u64 *flags);
+ void (*dump_ip_state)(void *handle);
+ void (*print_ip_state)(void *handle, struct drm_printer *p);
};
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h
index f2f8f9b39c6b..fc72c2267060 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h
@@ -311,6 +311,10 @@
#define mmPHYESYMCLK_CLOCK_CNTL_BASE_IDX 2
#define mmPHYFSYMCLK_CLOCK_CNTL 0x0057
#define mmPHYFSYMCLK_CLOCK_CNTL_BASE_IDX 2
+#define regHDMICHARCLK0_CLOCK_CNTL 0x004a
+#define regHDMICHARCLK0_CLOCK_CNTL_BASE_IDX 2
+#define mmHDMICHARCLK0_CLOCK_CNTL 0x004a
+#define mmHDMICHARCLK0_CLOCK_CNTL_BASE_IDX 2
// addressBlock: dce_dc_dccg_dccg_dfs_dispdec
@@ -4513,6 +4517,10 @@
#define mmCM0_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM0_CM_3DLUT_OUT_OFFSET_B 0x0e18
#define mmCM0_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM0_CM_TEST_DEBUG_INDEX 0x0e19
+#define mmCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM0_CM_TEST_DEBUG_DATA 0x0e1a
+#define mmCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -5201,6 +5209,10 @@
#define mmCM1_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM1_CM_3DLUT_OUT_OFFSET_B 0x0f83
#define mmCM1_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM1_CM_TEST_DEBUG_INDEX 0x0f84
+#define mmCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM1_CM_TEST_DEBUG_DATA 0x0f85
+#define mmCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -5888,6 +5900,10 @@
#define mmCM2_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM2_CM_3DLUT_OUT_OFFSET_B 0x10ee
#define mmCM2_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM2_CM_TEST_DEBUG_INDEX 0x10ef
+#define mmCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM2_CM_TEST_DEBUG_DATA 0x10f0
+#define mmCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp2_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -6576,6 +6592,10 @@
#define mmCM3_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM3_CM_3DLUT_OUT_OFFSET_B 0x1259
#define mmCM3_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM3_CM_TEST_DEBUG_INDEX 0x125a
+#define mmCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM3_CM_TEST_DEBUG_DATA 0x125b
+#define mmCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp3_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -7264,6 +7284,10 @@
#define mmCM4_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM4_CM_3DLUT_OUT_OFFSET_B 0x13c4
#define mmCM4_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM4_CM_TEST_DEBUG_INDEX 0x13c5
+#define mmCM4_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM4_CM_TEST_DEBUG_DATA 0x13c6
+#define mmCM4_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp4_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -7952,6 +7976,10 @@
#define mmCM5_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM5_CM_3DLUT_OUT_OFFSET_B 0x152f
#define mmCM5_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM5_CM_TEST_DEBUG_INDEX 0x1530
+#define mmCM5_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM5_CM_TEST_DEBUG_DATA 0x1531
+#define mmCM5_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp5_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h
index e0a447351623..daf71e82f0ba 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h
@@ -1189,6 +1189,11 @@
#define PHYFSYMCLK_CLOCK_CNTL__PHYFSYMCLK_FORCE_SRC_SEL__SHIFT 0x4
#define PHYFSYMCLK_CLOCK_CNTL__PHYFSYMCLK_FORCE_EN_MASK 0x00000001L
#define PHYFSYMCLK_CLOCK_CNTL__PHYFSYMCLK_FORCE_SRC_SEL_MASK 0x00000010L
+//HDMICHARCLK0_CLOCK_CNTL
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN__SHIFT 0x0
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL__SHIFT 0x4
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN_MASK 0x00000001L
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL_MASK 0x00000070L
// addressBlock: dce_dc_dccg_dccg_dfs_dispdec
@@ -16739,6 +16744,15 @@
#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B__SHIFT 0x10
#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+//CM0_CM_TEST_DEBUG_INDEX
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//CM0_CM_TEST_DEBUG_DATA
+#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+
// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
//DC_PERFMON12_PERFCOUNTER_CNTL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h
index b45a35aae241..bf84f97d9162 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h
@@ -4466,6 +4466,10 @@
#define mmCM0_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM0_CM_3DLUT_OUT_OFFSET_B 0x0e18
#define mmCM0_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM0_CM_TEST_DEBUG_INDEX 0x0e19
+#define mmCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM0_CM_TEST_DEBUG_DATA 0x0e1a
+#define mmCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -5154,6 +5158,10 @@
#define mmCM1_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM1_CM_3DLUT_OUT_OFFSET_B 0x0f83
#define mmCM1_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM1_CM_TEST_DEBUG_INDEX 0x0f84
+#define mmCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM1_CM_TEST_DEBUG_DATA 0x0f85
+#define mmCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -5841,6 +5849,10 @@
#define mmCM2_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM2_CM_3DLUT_OUT_OFFSET_B 0x10ee
#define mmCM2_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM2_CM_TEST_DEBUG_INDEX 0x10ef
+#define mmCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM2_CM_TEST_DEBUG_DATA 0x10f0
+#define mmCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp2_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -6529,6 +6541,10 @@
#define mmCM3_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM3_CM_3DLUT_OUT_OFFSET_B 0x1259
#define mmCM3_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM3_CM_TEST_DEBUG_INDEX 0x125a
+#define mmCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM3_CM_TEST_DEBUG_DATA 0x125b
+#define mmCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp3_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -7217,6 +7233,10 @@
#define mmCM4_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM4_CM_3DLUT_OUT_OFFSET_B 0x13c4
#define mmCM4_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM4_CM_TEST_DEBUG_INDEX 0x13c5
+#define mmCM4_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM4_CM_TEST_DEBUG_DATA 0x13c6
+#define mmCM4_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp4_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h
index 3dae29f9581e..56cdb219874a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h
@@ -15676,6 +15676,14 @@
#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B__SHIFT 0x10
#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+//CM0_CM_TEST_DEBUG_INDEX
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//CM0_CM_TEST_DEBUG_DATA
+#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_offset.h
index daa8130636f0..8b0d2638a6b0 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_offset.h
@@ -3110,6 +3110,10 @@
#define mmCM0_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM0_CM_3DLUT_OUT_OFFSET_B 0x0e18
#define mmCM0_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM0_CM_TEST_DEBUG_INDEX 0x0e19
+#define mmCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM0_CM_TEST_DEBUG_DATA 0x0e1a
+#define mmCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -3798,6 +3802,10 @@
#define mmCM1_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM1_CM_3DLUT_OUT_OFFSET_B 0x0f83
#define mmCM1_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM1_CM_TEST_DEBUG_INDEX 0x0f84
+#define mmCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM1_CM_TEST_DEBUG_DATA 0x0f85
+#define mmCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -5687,6 +5695,16 @@
#define mmDSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define mmDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3035
#define mmDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE 0x303a
+#define mmDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+#define mmDSCC0_DSCC_TEST_DEBUG_DATA0 0x303b
+#define mmDSCC0_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2
+#define mmDSCC0_DSCC_TEST_DEBUG_DATA1 0x303c
+#define mmDSCC0_DSCC_TEST_DEBUG_DATA1_BASE_IDX 2
+#define mmDSCC0_DSCC_TEST_DEBUG_DATA2 0x303d
+#define mmDSCC0_DSCC_TEST_DEBUG_DATA2_BASE_IDX 2
+#define mmDSCC0_DSCC_TEST_DEBUG_DATA3 0x303e
+#define mmDSCC0_DSCC_TEST_DEBUG_DATA3_BASE_IDX 2
// addressBlock: dce_dc_dsc0_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
@@ -5817,6 +5835,16 @@
#define mmDSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define mmDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3091
#define mmDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE 0x3096
+#define mmDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+#define mmDSCC1_DSCC_TEST_DEBUG_DATA0 0x3097
+#define mmDSCC1_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2
+#define mmDSCC1_DSCC_TEST_DEBUG_DATA1 0x3098
+#define mmDSCC1_DSCC_TEST_DEBUG_DATA1_BASE_IDX 2
+#define mmDSCC1_DSCC_TEST_DEBUG_DATA2 0x3099
+#define mmDSCC1_DSCC_TEST_DEBUG_DATA2_BASE_IDX 2
+#define mmDSCC1_DSCC_TEST_DEBUG_DATA3 0x309a
+#define mmDSCC1_DSCC_TEST_DEBUG_DATA3_BASE_IDX 2
// addressBlock: dce_dc_dsc1_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h
index 5c469cf635e5..53f1705f8d99 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h
@@ -10701,6 +10701,13 @@
#define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
#define CM0_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L
#define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+
+//CM0_CM_TEST_DEBUG_INDEX
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+
//CM0_CM_SHAPER_CONTROL
#define CM0_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE__SHIFT 0x0
#define CM0_CM_SHAPER_CONTROL__CM_SHAPER_MODE_CURRENT__SHIFT 0x2
@@ -22258,7 +22265,9 @@
#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
//DSC_TOP0_DSC_DEBUG_CONTROL
#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
// addressBlock: dce_dc_dsc0_dispdec_dsccif_dispdec
@@ -22631,6 +22640,15 @@
//DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL
#define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
#define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L
// addressBlock: dce_dc_dsc0_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h
index f268d33c4744..7fd906f10803 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h
@@ -424,6 +424,8 @@
#define regDTBCLK_DTO2_MODULO_BASE_IDX 2
#define regDTBCLK_DTO3_MODULO 0x0022
#define regDTBCLK_DTO3_MODULO_BASE_IDX 2
+#define regHDMICHARCLK0_CLOCK_CNTL 0x004a
+#define regHDMICHARCLK0_CLOCK_CNTL_BASE_IDX 2
#define regPHYASYMCLK_CLOCK_CNTL 0x0052
#define regPHYASYMCLK_CLOCK_CNTL_BASE_IDX 2
#define regPHYBSYMCLK_CLOCK_CNTL 0x0053
@@ -434,6 +436,8 @@
#define regPHYDSYMCLK_CLOCK_CNTL_BASE_IDX 2
#define regPHYESYMCLK_CLOCK_CNTL 0x0056
#define regPHYESYMCLK_CLOCK_CNTL_BASE_IDX 2
+#define regHDMISTREAMCLK_CNTL 0x0059
+#define regHDMISTREAMCLK_CNTL_BASE_IDX 2
#define regDCCG_GATE_DISABLE_CNTL3 0x005a
#define regDCCG_GATE_DISABLE_CNTL3_BASE_IDX 2
#define regHDMISTREAMCLK0_DTO_PARAM 0x005b
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h
index cf3398f15666..07fbfafe6056 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h
@@ -1372,6 +1372,11 @@
//DTBCLK_DTO3_MODULO
#define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO__SHIFT 0x0
#define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO_MASK 0xFFFFFFFFL
+//HDMICHARCLK0_CLOCK_CNTL
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN__SHIFT 0x0
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL__SHIFT 0x4
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN_MASK 0x00000001L
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL_MASK 0x00000070L
//PHYASYMCLK_CLOCK_CNTL
#define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_FORCE_EN__SHIFT 0x0
#define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_FORCE_SRC_SEL__SHIFT 0x4
@@ -1397,6 +1402,13 @@
#define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_FORCE_SRC_SEL__SHIFT 0x4
#define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_FORCE_EN_MASK 0x00000001L
#define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_FORCE_SRC_SEL_MASK 0x00000030L
+//HDMISTREAMCLK_CNTL
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL__SHIFT 0x0
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_EN__SHIFT 0x3
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_DTO_FORCE_DIS__SHIFT 0x4
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL_MASK 0x00000007L
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_EN_MASK 0x00000008L
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_DTO_FORCE_DIS_MASK 0x00000010L
//DCCG_GATE_DISABLE_CNTL3
#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK0_GATE_DISABLE__SHIFT 0x0
#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK1_GATE_DISABLE__SHIFT 0x1
@@ -46978,6 +46990,13 @@
#define DSC_TOP0_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+
+
+//DSC_TOP0_DSC_DEBUG_CONTROL
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
//DSC_TOP0_DSC_DEBUG_CONTROL
#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_offset.h
index 50c34d88c17c..16a69d17bb1e 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_offset.h
@@ -213,6 +213,8 @@
#define regDTBCLK_DTO2_MODULO_BASE_IDX 2
#define regDTBCLK_DTO3_MODULO 0x0022
#define regDTBCLK_DTO3_MODULO_BASE_IDX 2
+#define regHDMICHARCLK0_CLOCK_CNTL 0x004a
+#define regHDMICHARCLK0_CLOCK_CNTL_BASE_IDX 2
#define regPHYASYMCLK_CLOCK_CNTL 0x0052
#define regPHYASYMCLK_CLOCK_CNTL_BASE_IDX 2
#define regPHYBSYMCLK_CLOCK_CNTL 0x0053
@@ -233,6 +235,8 @@
#define regDCCG_AUDIO_DTBCLK_DTO_MODULO_BASE_IDX 2
#define regDTBCLK_DTO_DBUF_EN 0x0063
#define regDTBCLK_DTO_DBUF_EN_BASE_IDX 2
+#define regHDMISTREAMCLK_CNTL 0x0059
+#define regHDMISTREAMCLK_CNTL_BASE_IDX 2
// addressBlock: dce_dc_dccg_dccg_dcperfmon0_dc_perfmon_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_sh_mask.h
index 295e0dac9ffa..6473362e39a8 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_sh_mask.h
@@ -886,6 +886,11 @@
//DTBCLK_DTO3_MODULO
#define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO__SHIFT 0x0
#define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO_MASK 0xFFFFFFFFL
+//HDMICHARCLK0_CLOCK_CNTL
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN__SHIFT 0x0
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL__SHIFT 0x4
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN_MASK 0x00000001L
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL_MASK 0x00000070L
//PHYASYMCLK_CLOCK_CNTL
#define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_FORCE_EN__SHIFT 0x0
#define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_FORCE_SRC_SEL__SHIFT 0x4
@@ -911,6 +916,11 @@
#define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_FORCE_SRC_SEL__SHIFT 0x4
#define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_FORCE_EN_MASK 0x00000001L
#define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_FORCE_SRC_SEL_MASK 0x00000030L
+//HDMISTREAMCLK_CNTL
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL__SHIFT 0x0
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_DTO_FORCE_DIS__SHIFT 0x10
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL_MASK 0x00000003L
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_DTO_FORCE_DIS_MASK 0x00010000L
//DCCG_GATE_DISABLE_CNTL3
#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK0_GATE_DISABLE__SHIFT 0x0
#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK1_GATE_DISABLE__SHIFT 0x1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_offset.h
index 14c29ce4c7b3..78cb61d5800a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_offset.h
@@ -1719,6 +1719,10 @@
#define regDCHUBBUB_TIMEOUT_INTERRUPT_STATUS_BASE_IDX 2
#define regFMON_CTRL 0x0541
#define regFMON_CTRL_BASE_IDX 2
+#define regDCHUBBUB_TEST_DEBUG_INDEX 0x0542
+#define regDCHUBBUB_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regDCHUBBUB_TEST_DEBUG_DATA 0x0543
+#define regDCHUBBUB_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dcn_dc_dchubbubl_hubbub_sdpif_dispdec
@@ -3574,6 +3578,10 @@
#define regCM0_CM_DEALPHA_BASE_IDX 2
#define regCM0_CM_COEF_FORMAT 0x0d8c
#define regCM0_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM0_CM_TEST_DEBUG_INDEX 0x0d8d
+#define regCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM0_CM_TEST_DEBUG_DATA 0x0d8e
+#define regCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dcn_dc_dpp0_dispdec_dpp_top_dispdec
@@ -3960,6 +3968,10 @@
#define regCM1_CM_DEALPHA_BASE_IDX 2
#define regCM1_CM_COEF_FORMAT 0x0ef7
#define regCM1_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM1_CM_TEST_DEBUG_INDEX 0x0ef8
+#define regCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM1_CM_TEST_DEBUG_DATA 0x0ef9
+#define regCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dcn_dc_dpp1_dispdec_dpp_top_dispdec
@@ -4346,6 +4358,10 @@
#define regCM2_CM_DEALPHA_BASE_IDX 2
#define regCM2_CM_COEF_FORMAT 0x1062
#define regCM2_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM2_CM_TEST_DEBUG_INDEX 0x1063
+#define regCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM2_CM_TEST_DEBUG_DATA 0x1064
+#define regCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dcn_dc_dpp2_dispdec_dpp_top_dispdec
@@ -4732,6 +4748,10 @@
#define regCM3_CM_DEALPHA_BASE_IDX 2
#define regCM3_CM_COEF_FORMAT 0x11cd
#define regCM3_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM3_CM_TEST_DEBUG_INDEX 0x11ce
+#define regCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM3_CM_TEST_DEBUG_DATA 0x11cf
+#define regCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dcn_dc_dpp3_dispdec_dpp_top_dispdec
@@ -11780,6 +11800,16 @@
#define regDSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3035
#define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE 0x303a
+#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+#define regDSCC0_DSCC_TEST_DEBUG_DATA0 0x303b
+#define regDSCC0_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2
+#define regDSCC0_DSCC_TEST_DEBUG_DATA1 0x303c
+#define regDSCC0_DSCC_TEST_DEBUG_DATA1_BASE_IDX 2
+#define regDSCC0_DSCC_TEST_DEBUG_DATA2 0x303d
+#define regDSCC0_DSCC_TEST_DEBUG_DATA2_BASE_IDX 2
+#define regDSCC0_DSCC_TEST_DEBUG_DATA3 0x303e
+#define regDSCC0_DSCC_TEST_DEBUG_DATA3_BASE_IDX 2
// addressBlock: dcn_dc_dsc0_dispdec_dsccif_dispdec
@@ -11888,6 +11918,16 @@
#define regDSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3091
#define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE 0x3096
+#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+#define regDSCC1_DSCC_TEST_DEBUG_DATA0 0x3097
+#define regDSCC1_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2
+#define regDSCC1_DSCC_TEST_DEBUG_DATA1 0x3098
+#define regDSCC1_DSCC_TEST_DEBUG_DATA1_BASE_IDX 2
+#define regDSCC1_DSCC_TEST_DEBUG_DATA2 0x3099
+#define regDSCC1_DSCC_TEST_DEBUG_DATA2_BASE_IDX 2
+#define regDSCC1_DSCC_TEST_DEBUG_DATA3 0x309a
+#define regDSCC1_DSCC_TEST_DEBUG_DATA3_BASE_IDX 2
// addressBlock: dcn_dc_dsc1_dispdec_dsccif_dispdec
@@ -11996,6 +12036,16 @@
#define regDSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x30ed
#define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE 0x30f2
+#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+#define regDSCC2_DSCC_TEST_DEBUG_DATA0 0x30f3
+#define regDSCC2_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2
+#define regDSCC2_DSCC_TEST_DEBUG_DATA1 0x30f4
+#define regDSCC2_DSCC_TEST_DEBUG_DATA1_BASE_IDX 2
+#define regDSCC2_DSCC_TEST_DEBUG_DATA2 0x30f5
+#define regDSCC2_DSCC_TEST_DEBUG_DATA2_BASE_IDX 2
+#define regDSCC2_DSCC_TEST_DEBUG_DATA3 0x30f6
+#define regDSCC2_DSCC_TEST_DEBUG_DATA3_BASE_IDX 2
// addressBlock: dcn_dc_dsc2_dispdec_dsccif_dispdec
@@ -12104,6 +12154,16 @@
#define regDSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3149
#define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE 0x314e
+#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+#define regDSCC3_DSCC_TEST_DEBUG_DATA0 0x314f
+#define regDSCC3_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2
+#define regDSCC3_DSCC_TEST_DEBUG_DATA1 0x3150
+#define regDSCC3_DSCC_TEST_DEBUG_DATA1_BASE_IDX 2
+#define regDSCC3_DSCC_TEST_DEBUG_DATA2 0x3151
+#define regDSCC3_DSCC_TEST_DEBUG_DATA2_BASE_IDX 2
+#define regDSCC3_DSCC_TEST_DEBUG_DATA3 0x3152
+#define regDSCC3_DSCC_TEST_DEBUG_DATA3_BASE_IDX 2
// addressBlock: dcn_dc_dsc3_dispdec_dsccif_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h
index 0691e328d0f0..1093105ca35b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h
@@ -11544,6 +11544,11 @@
#define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
#define CM0_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L
#define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+//CM0_CM_TEST_DEBUG_INDEX
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
// addressBlock: dcn_dc_dpp0_dispdec_dpp_top_dispdec
@@ -42267,6 +42272,18 @@
//DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL
#define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
#define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_TEST_DEBUG_INDEX2
+#define DSCC0_DSCC_TEST_DEBUG_INDEX2__DSCC_TEST_DEBUG_INDEX2__SHIFT 0x0
+#define DSCC0_DSCC_TEST_DEBUG_INDEX2__DSCC_TEST_DEBUG_INDEX2_MASK 0x000000FFL
+//DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L
// addressBlock: dcn_dc_dsc0_dispdec_dsccif_dispdec
@@ -42300,6 +42317,16 @@
#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
//DSC_TOP0_DSC_DEBUG_CONTROL
#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
+
+
+//DSC_TOP0_DSC_DEBUG_CONTROL
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
+//DSC_TOP0_DSC_DEBUG_CONTROL
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_offset.h
index 3bd8792fd7b3..a04b8c32c564 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_offset.h
@@ -1719,6 +1719,10 @@
#define regDCHUBBUB_TIMEOUT_INTERRUPT_STATUS_BASE_IDX 2
#define regFMON_CTRL 0x0541
#define regFMON_CTRL_BASE_IDX 2
+#define regDCHUBBUB_TEST_DEBUG_INDEX 0x0542
+#define regDCHUBBUB_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regDCHUBBUB_TEST_DEBUG_DATA 0x0543
+#define regDCHUBBUB_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dchubbubl_hubbub_sdpif_dispdec
@@ -3573,6 +3577,10 @@
#define regCM0_CM_DEALPHA_BASE_IDX 2
#define regCM0_CM_COEF_FORMAT 0x0d8c
#define regCM0_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM0_CM_TEST_DEBUG_INDEX 0x0d8d
+#define regCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM0_CM_TEST_DEBUG_DATA 0x0d8e
+#define regCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp0_dispdec_dpp_top_dispdec
@@ -3959,6 +3967,10 @@
#define regCM1_CM_DEALPHA_BASE_IDX 2
#define regCM1_CM_COEF_FORMAT 0x0ef7
#define regCM1_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM1_CM_TEST_DEBUG_INDEX 0x0ef8
+#define regCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM1_CM_TEST_DEBUG_DATA 0x0ef9
+#define regCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp1_dispdec_dpp_top_dispdec
@@ -4345,6 +4357,10 @@
#define regCM2_CM_DEALPHA_BASE_IDX 2
#define regCM2_CM_COEF_FORMAT 0x1062
#define regCM2_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM2_CM_TEST_DEBUG_INDEX 0x1063
+#define regCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM2_CM_TEST_DEBUG_DATA 0x1064
+#define regCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp2_dispdec_dpp_top_dispdec
@@ -4731,6 +4747,10 @@
#define regCM3_CM_DEALPHA_BASE_IDX 2
#define regCM3_CM_COEF_FORMAT 0x11cd
#define regCM3_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM3_CM_TEST_DEBUG_INDEX 0x11ce
+#define regCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM3_CM_TEST_DEBUG_DATA 0x11cf
+#define regCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp3_dispdec_dpp_top_dispdec
@@ -11789,6 +11809,10 @@
#define regDSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3035
#define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE 0x303a
+#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+#define regDSCC0_DSCC_TEST_DEBUG_DATA0 0x303b
+#define regDSCC0_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2
// addressBlock: dce_dc_dsc0_dispdec_dsccif_dispdec
@@ -11897,6 +11921,10 @@
#define regDSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3091
#define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE 0x3096
+#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+#define regDSCC1_DSCC_TEST_DEBUG_DATA0 0x3097
+#define regDSCC1_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2
// addressBlock: dce_dc_dsc1_dispdec_dsccif_dispdec
@@ -12005,7 +12033,10 @@
#define regDSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x30ed
#define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
-
+#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE 0x30f2
+#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+#define regDSCC2_DSCC_TEST_DEBUG_DATA0 0x30f3
+#define regDSCC2_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2
// addressBlock: dce_dc_dsc2_dispdec_dsccif_dispdec
// base address: 0x2e0
@@ -12113,6 +12144,10 @@
#define regDSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3149
#define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE 0x314e
+#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+#define regDSCC3_DSCC_TEST_DEBUG_DATA0 0x314f
+#define regDSCC3_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2
// addressBlock: dce_dc_dsc3_dispdec_dsccif_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_sh_mask.h
index e82dffc2b9b0..ce773fca621f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_sh_mask.h
@@ -11547,6 +11547,11 @@
#define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
#define CM0_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L
#define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+//CM0_CM_TEST_DEBUG_INDEX
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
// addressBlock: dce_dc_dpp0_dispdec_dpp_top_dispdec
@@ -42315,6 +42320,15 @@
//DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL
#define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
#define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L
// addressBlock: dce_dc_dsc0_dispdec_dsccif_dispdec
@@ -42348,7 +42362,9 @@
#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
//DSC_TOP0_DSC_DEBUG_CONTROL
#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
// addressBlock: dce_dc_dsc1_dispdec_dscc_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_offset.h
index 0bb47e06eee8..081e726afbf0 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_offset.h
@@ -24,6 +24,8 @@
#define mmDPCSTX0_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
#define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA 0x292d
#define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_DEBUG_CONFIG 0x292e
+#define mmDPCSTX0_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
// addressBlock: dpcssys_dpcs0_rdpcstx0_dispdec
@@ -50,6 +52,8 @@
#define mmRDPCSTX0_RDPCSTX_CNTL2_BASE_IDX 2
#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x293c
#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_DEBUG_CONFIG 0x293d
+#define mmRDPCSTX0_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
#define mmRDPCSTX0_RDPCSTX_PHY_CNTL0 0x2940
#define mmRDPCSTX0_RDPCSTX_PHY_CNTL0_BASE_IDX 2
#define mmRDPCSTX0_RDPCSTX_PHY_CNTL1 0x2941
@@ -120,6 +124,8 @@
#define mmDPCSTX1_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
#define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA 0x2a05
#define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_DEBUG_CONFIG 0x2a06
+#define mmDPCSTX1_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
// addressBlock: dpcssys_dpcs0_rdpcstx1_dispdec
@@ -146,6 +152,8 @@
#define mmRDPCSTX1_RDPCSTX_CNTL2_BASE_IDX 2
#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2a14
#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_DEBUG_CONFIG 0x2a15
+#define mmRDPCSTX1_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
#define mmRDPCSTX1_RDPCSTX_PHY_CNTL0 0x2a18
#define mmRDPCSTX1_RDPCSTX_PHY_CNTL0_BASE_IDX 2
#define mmRDPCSTX1_RDPCSTX_PHY_CNTL1 0x2a19
@@ -216,6 +224,8 @@
#define mmDPCSTX2_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
#define mmDPCSTX2_DPCSTX_PLL_UPDATE_DATA 0x2add
#define mmDPCSTX2_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_DEBUG_CONFIG 0x2ade
+#define mmDPCSTX2_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
// addressBlock: dpcssys_dpcs0_rdpcstx2_dispdec
@@ -242,6 +252,8 @@
#define mmRDPCSTX2_RDPCSTX_CNTL2_BASE_IDX 2
#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2aec
#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_DEBUG_CONFIG 0x2aed
+#define mmRDPCSTX2_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
#define mmRDPCSTX2_RDPCSTX_PHY_CNTL0 0x2af0
#define mmRDPCSTX2_RDPCSTX_PHY_CNTL0_BASE_IDX 2
#define mmRDPCSTX2_RDPCSTX_PHY_CNTL1 0x2af1
@@ -312,6 +324,8 @@
#define mmDPCSTX3_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
#define mmDPCSTX3_DPCSTX_PLL_UPDATE_DATA 0x2bb5
#define mmDPCSTX3_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_DEBUG_CONFIG 0x2bb6
+#define mmDPCSTX3_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
// addressBlock: dpcssys_dpcs0_rdpcstx3_dispdec
@@ -338,6 +352,8 @@
#define mmRDPCSTX3_RDPCSTX_CNTL2_BASE_IDX 2
#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2bc4
#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_DEBUG_CONFIG 0x2bc5
+#define mmRDPCSTX3_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
#define mmRDPCSTX3_RDPCSTX_PHY_CNTL0 0x2bc8
#define mmRDPCSTX3_RDPCSTX_PHY_CNTL0_BASE_IDX 2
#define mmRDPCSTX3_RDPCSTX_PHY_CNTL1 0x2bc9
@@ -408,6 +424,8 @@
#define mmDPCSTX4_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
#define mmDPCSTX4_DPCSTX_PLL_UPDATE_DATA 0x2c8d
#define mmDPCSTX4_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_DEBUG_CONFIG 0x2c8e
+#define mmDPCSTX4_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
// addressBlock: dpcssys_dpcs0_rdpcstx4_dispdec
@@ -434,6 +452,8 @@
#define mmRDPCSTX4_RDPCSTX_CNTL2_BASE_IDX 2
#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2c9c
#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_DEBUG_CONFIG 0x2c9d
+#define mmRDPCSTX4_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
#define mmRDPCSTX4_RDPCSTX_PHY_CNTL0 0x2ca0
#define mmRDPCSTX4_RDPCSTX_PHY_CNTL0_BASE_IDX 2
#define mmRDPCSTX4_RDPCSTX_PHY_CNTL1 0x2ca1
@@ -504,6 +524,8 @@
#define mmDPCSTX5_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
#define mmDPCSTX5_DPCSTX_PLL_UPDATE_DATA 0x2d65
#define mmDPCSTX5_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX5_DPCSTX_DEBUG_CONFIG 0x2d66
+#define mmDPCSTX5_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
// addressBlock: dpcssys_dpcs0_rdpcstx5_dispdec
@@ -530,6 +552,8 @@
#define mmRDPCSTX5_RDPCSTX_CNTL2_BASE_IDX 2
#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2d74
#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_DEBUG_CONFIG 0x2d75
+#define mmRDPCSTX5_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
#define mmRDPCSTX5_RDPCSTX_PHY_CNTL0 0x2d78
#define mmRDPCSTX5_RDPCSTX_PHY_CNTL0_BASE_IDX 2
#define mmRDPCSTX5_RDPCSTX_PHY_CNTL1 0x2d79
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_sh_mask.h
index 23fa1121a967..1f846fa6c1a2 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_sh_mask.h
@@ -70,7 +70,9 @@
//DPCSTX0_DPCSTX_PLL_UPDATE_DATA
#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
-
+//DPCSTX0_DPCSTX_DEBUG_CONFIG
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
// addressBlock: dpcssys_dpcs0_rdpcstx0_dispdec
//RDPCSTX0_RDPCSTX_CNTL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_3_sh_mask.h
index 55743d06f728..e55ff0e8d74c 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_3_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_3_sh_mask.h
@@ -70,7 +70,9 @@
//DPCSTX0_DPCSTX_PLL_UPDATE_DATA
#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
-
+//DPCSTX0_DPCSTX_DEBUG_CONFIG
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
// addressBlock: dpcssys_dpcs0_rdpcstx0_dispdec
//RDPCSTX0_RDPCSTX_CNTL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h
index 01a56556cde1..5b4fdeda1040 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h
@@ -155,6 +155,8 @@
#define regRDPCSTX0_RDPCSTX_CNTL2_BASE_IDX 2
#define regRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x293c
#define regRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define regRDPCSTX0_RDPCSTX_DEBUG_CONFIG 0x293d
+#define regRDPCSTX0_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
#define regRDPCSTX0_RDPCSTX_PHY_CNTL0 0x2940
#define regRDPCSTX0_RDPCSTX_PHY_CNTL0_BASE_IDX 2
#define regRDPCSTX0_RDPCSTX_PHY_CNTL1 0x2941
@@ -239,6 +241,8 @@
#define regRDPCSTX1_RDPCSTX_CNTL2_BASE_IDX 2
#define regRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2a14
#define regRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define regRDPCSTX1_RDPCSTX_DEBUG_CONFIG 0x2a15
+#define regRDPCSTX1_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
#define regRDPCSTX1_RDPCSTX_PHY_CNTL0 0x2a18
#define regRDPCSTX1_RDPCSTX_PHY_CNTL0_BASE_IDX 2
#define regRDPCSTX1_RDPCSTX_PHY_CNTL1 0x2a19
@@ -323,6 +327,8 @@
#define regRDPCSTX2_RDPCSTX_CNTL2_BASE_IDX 2
#define regRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2aec
#define regRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define regRDPCSTX2_RDPCSTX_DEBUG_CONFIG 0x2aed
+#define regRDPCSTX2_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
#define regRDPCSTX2_RDPCSTX_PHY_CNTL0 0x2af0
#define regRDPCSTX2_RDPCSTX_PHY_CNTL0_BASE_IDX 2
#define regRDPCSTX2_RDPCSTX_PHY_CNTL1 0x2af1
@@ -407,6 +413,8 @@
#define regRDPCSTX3_RDPCSTX_CNTL2_BASE_IDX 2
#define regRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2bc4
#define regRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define regRDPCSTX3_RDPCSTX_DEBUG_CONFIG 0x2bc5
+#define regRDPCSTX3_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
#define regRDPCSTX3_RDPCSTX_PHY_CNTL0 0x2bc8
#define regRDPCSTX3_RDPCSTX_PHY_CNTL0_BASE_IDX 2
#define regRDPCSTX3_RDPCSTX_PHY_CNTL1 0x2bc9
@@ -491,6 +499,8 @@
#define regRDPCSTX4_RDPCSTX_CNTL2_BASE_IDX 2
#define regRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2c9c
#define regRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define regRDPCSTX4_RDPCSTX_DEBUG_CONFIG 0x2c9d
+#define regRDPCSTX4_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
#define regRDPCSTX4_RDPCSTX_PHY_CNTL0 0x2ca0
#define regRDPCSTX4_RDPCSTX_PHY_CNTL0_BASE_IDX 2
#define regRDPCSTX4_RDPCSTX_PHY_CNTL1 0x2ca1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h
index 4908044f7409..4c8e7fdb6976 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h
@@ -4830,6 +4830,8 @@
#define mmCP_ECC_FIRSTOCCURRENCE_RING2_BASE_IDX 0
#define mmGB_EDC_MODE 0x1e1e
#define mmGB_EDC_MODE_BASE_IDX 0
+#define mmCP_DEBUG 0x1e1f
+#define mmCP_DEBUG_BASE_IDX 0
#define mmCP_FETCHER_SOURCE 0x1e22
#define mmCP_FETCHER_SOURCE_BASE_IDX 0
#define mmCP_PQ_WPTR_POLL_CNTL 0x1e23
@@ -7778,6 +7780,8 @@
#define mmCP_MES_DOORBELL_CONTROL5_BASE_IDX 1
#define mmCP_MES_DOORBELL_CONTROL6 0x2841
#define mmCP_MES_DOORBELL_CONTROL6_BASE_IDX 1
+#define mmCP_MES_DEBUG_INTERRUPT_INSTR_PNTR 0x2842
+#define mmCP_MES_DEBUG_INTERRUPT_INSTR_PNTR_BASE_IDX 1
#define mmCP_MES_GP0_LO 0x2843
#define mmCP_MES_GP0_LO_BASE_IDX 1
#define mmCP_MES_GP0_HI 0x2844
@@ -9332,10 +9336,16 @@
#define mmRLC_LB_CNTR_INIT_1_BASE_IDX 1
#define mmRLC_LB_CNTR_1 0x4c1c
#define mmRLC_LB_CNTR_1_BASE_IDX 1
+#define mmRLC_GPM_DEBUG_INST_ADDR 0x4c1d
+#define mmRLC_GPM_DEBUG_INST_ADDR_BASE_IDX 1
#define mmRLC_JUMP_TABLE_RESTORE 0x4c1e
#define mmRLC_JUMP_TABLE_RESTORE_BASE_IDX 1
#define mmRLC_PG_DELAY_2 0x4c1f
#define mmRLC_PG_DELAY_2_BASE_IDX 1
+#define mmRLC_GPM_DEBUG_INST_A 0x4c22
+#define mmRLC_GPM_DEBUG_INST_A_BASE_IDX 1
+#define mmRLC_GPM_DEBUG_INST_B 0x4c23
+#define mmRLC_GPM_DEBUG_INST_B_BASE_IDX 1
#define mmRLC_GPU_CLOCK_COUNT_LSB 0x4c24
#define mmRLC_GPU_CLOCK_COUNT_LSB_BASE_IDX 1
#define mmRLC_GPU_CLOCK_COUNT_MSB 0x4c25
@@ -9720,6 +9730,8 @@
#define mmRLC_SPM_THREAD_TRACE_CTRL_BASE_IDX 1
#define mmRLC_LB_CNTR_2 0x4de7
#define mmRLC_LB_CNTR_2_BASE_IDX 1
+#define mmRLC_LX6_CORE_PDEBUG_INST 0x4deb
+#define mmRLC_LX6_CORE_PDEBUG_INST_BASE_IDX 1
#define mmRLC_CPAXI_DOORBELL_MON_CTRL 0x4df1
#define mmRLC_CPAXI_DOORBELL_MON_CTRL_BASE_IDX 1
#define mmRLC_CPAXI_DOORBELL_MON_STAT 0x4df2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
index efc16ddf274a..2dfa0e5b1aa3 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
@@ -6822,6 +6822,8 @@
#define VM_L2_PROTECTION_FAULT_STATUS__VMID__SHIFT 0x14
#define VM_L2_PROTECTION_FAULT_STATUS__VF__SHIFT 0x18
#define VM_L2_PROTECTION_FAULT_STATUS__VFID__SHIFT 0x19
+#define VM_L2_PROTECTION_FAULT_STATUS__UCE__SHIFT 0x1d
+#define VM_L2_PROTECTION_FAULT_STATUS__FED__SHIFT 0x1e
#define VM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS_MASK 0x00000001L
#define VM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR_MASK 0x0000000EL
#define VM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS_MASK 0x000000F0L
@@ -6832,6 +6834,8 @@
#define VM_L2_PROTECTION_FAULT_STATUS__VMID_MASK 0x00F00000L
#define VM_L2_PROTECTION_FAULT_STATUS__VF_MASK 0x01000000L
#define VM_L2_PROTECTION_FAULT_STATUS__VFID_MASK 0x1E000000L
+#define VM_L2_PROTECTION_FAULT_STATUS__UCE_MASK 0x20000000L
+#define VM_L2_PROTECTION_FAULT_STATUS__FED_MASK 0x40000000L
//VM_L2_PROTECTION_FAULT_ADDR_LO32
#define VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32__SHIFT 0x0
#define VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_offset.h
index 8b931bbabe70..969e006b859b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_offset.h
@@ -237,6 +237,10 @@
#define regSEM_REGISTER_LAST_PART2_BASE_IDX 0
#define regIH_CLIENT_CFG 0x0184
#define regIH_CLIENT_CFG_BASE_IDX 0
+#define regIH_RING1_CLIENT_CFG_INDEX 0x0185
+#define regIH_RING1_CLIENT_CFG_INDEX_BASE_IDX 0
+#define regIH_RING1_CLIENT_CFG_DATA 0x0186
+#define regIH_RING1_CLIENT_CFG_DATA_BASE_IDX 0
#define regIH_CLIENT_CFG_INDEX 0x0188
#define regIH_CLIENT_CFG_INDEX_BASE_IDX 0
#define regIH_CLIENT_CFG_DATA 0x0189
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_sh_mask.h
index f262f44fa68c..a672a91e58f0 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_sh_mask.h
@@ -888,6 +888,16 @@
//IH_CLIENT_CFG
#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM__SHIFT 0x0
#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM_MASK 0x0000003FL
+//IH_RING1_CLIENT_CFG_INDEX
+#define IH_RING1_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0
+#define IH_RING1_CLIENT_CFG_INDEX__INDEX_MASK 0x00000007L
+//IH_RING1_CLIENT_CFG_DATA
+#define IH_RING1_CLIENT_CFG_DATA__CLIENT_ID__SHIFT 0x0
+#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID__SHIFT 0x8
+#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MATCH_ENABLE__SHIFT 0x10
+#define IH_RING1_CLIENT_CFG_DATA__CLIENT_ID_MASK 0x000000FFL
+#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MASK 0x0000FF00L
+#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MATCH_ENABLE_MASK 0x00010000L
//IH_CLIENT_CFG_INDEX
#define IH_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0
#define IH_CLIENT_CFG_INDEX__INDEX_MASK 0x0000001FL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_offset.h
new file mode 100644
index 000000000000..da7e31fedd58
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_offset.h
@@ -0,0 +1,511 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _smuio_14_0_2_OFFSET_HEADER
+#define _smuio_14_0_2_OFFSET_HEADER
+
+
+
+// addressBlock: smuio_smuio_tsc_SmuSmuioDec
+// base address: 0x5a8a0
+#define regPWROK_REFCLK_GAP_CYCLES 0x0028
+#define regPWROK_REFCLK_GAP_CYCLES_BASE_IDX 1
+#define regGOLDEN_TSC_INCREMENT_UPPER 0x002b
+#define regGOLDEN_TSC_INCREMENT_UPPER_BASE_IDX 1
+#define regGOLDEN_TSC_INCREMENT_LOWER 0x002c
+#define regGOLDEN_TSC_INCREMENT_LOWER_BASE_IDX 1
+#define regGOLDEN_TSC_COUNT_UPPER 0x002d
+#define regGOLDEN_TSC_COUNT_UPPER_BASE_IDX 1
+#define regGOLDEN_TSC_COUNT_LOWER 0x002e
+#define regGOLDEN_TSC_COUNT_LOWER_BASE_IDX 1
+#define regSOC_GOLDEN_TSC_SHADOW_UPPER 0x002f
+#define regSOC_GOLDEN_TSC_SHADOW_UPPER_BASE_IDX 1
+#define regSOC_GOLDEN_TSC_SHADOW_LOWER 0x0030
+#define regSOC_GOLDEN_TSC_SHADOW_LOWER_BASE_IDX 1
+#define regSOC_GAP_PWROK 0x0031
+#define regSOC_GAP_PWROK_BASE_IDX 1
+
+
+// addressBlock: smuio_smuio_swtimer_SmuSmuioDec
+// base address: 0x5aca8
+#define regPWR_VIRT_RESET_REQ 0x012a
+#define regPWR_VIRT_RESET_REQ_BASE_IDX 1
+#define regPWR_DISP_TIMER_CONTROL 0x012b
+#define regPWR_DISP_TIMER_CONTROL_BASE_IDX 1
+#define regPWR_DISP_TIMER_DEBUG 0x012c
+#define regPWR_DISP_TIMER_DEBUG_BASE_IDX 1
+#define regPWR_DISP_TIMER2_CONTROL 0x012d
+#define regPWR_DISP_TIMER2_CONTROL_BASE_IDX 1
+#define regPWR_DISP_TIMER2_DEBUG 0x012e
+#define regPWR_DISP_TIMER2_DEBUG_BASE_IDX 1
+#define regPWR_DISP_TIMER_GLOBAL_CONTROL 0x012f
+#define regPWR_DISP_TIMER_GLOBAL_CONTROL_BASE_IDX 1
+#define regPWR_IH_CONTROL 0x0130
+#define regPWR_IH_CONTROL_BASE_IDX 1
+
+
+// addressBlock: smuio_smuio_misc_SmuSmuioDec
+// base address: 0x5a000
+#define regSMUIO_MCM_CONFIG 0x0023
+#define regSMUIO_MCM_CONFIG_BASE_IDX 0
+#define regIP_DISCOVERY_VERSION 0x0000
+#define regIP_DISCOVERY_VERSION_BASE_IDX 1
+#define regSCRATCH_REGISTER0 0x01bd
+#define regSCRATCH_REGISTER0_BASE_IDX 1
+#define regSCRATCH_REGISTER1 0x01be
+#define regSCRATCH_REGISTER1_BASE_IDX 1
+#define regSCRATCH_REGISTER2 0x01bf
+#define regSCRATCH_REGISTER2_BASE_IDX 1
+#define regSCRATCH_REGISTER3 0x01c0
+#define regSCRATCH_REGISTER3_BASE_IDX 1
+#define regSCRATCH_REGISTER4 0x01c1
+#define regSCRATCH_REGISTER4_BASE_IDX 1
+#define regSCRATCH_REGISTER5 0x01c2
+#define regSCRATCH_REGISTER5_BASE_IDX 1
+#define regSCRATCH_REGISTER6 0x01c3
+#define regSCRATCH_REGISTER6_BASE_IDX 1
+#define regSCRATCH_REGISTER7 0x01c4
+#define regSCRATCH_REGISTER7_BASE_IDX 1
+
+
+// addressBlock: smuio_smuio_i2c_SmuSmuioDec
+// base address: 0x5a100
+#define regCKSVII2C_IC_CON 0x0040
+#define regCKSVII2C_IC_CON_BASE_IDX 0
+#define regCKSVII2C_IC_TAR 0x0041
+#define regCKSVII2C_IC_TAR_BASE_IDX 0
+#define regCKSVII2C_IC_SAR 0x0042
+#define regCKSVII2C_IC_SAR_BASE_IDX 0
+#define regCKSVII2C_IC_HS_MADDR 0x0043
+#define regCKSVII2C_IC_HS_MADDR_BASE_IDX 0
+#define regCKSVII2C_IC_DATA_CMD 0x0044
+#define regCKSVII2C_IC_DATA_CMD_BASE_IDX 0
+#define regCKSVII2C_IC_SS_SCL_HCNT 0x0045
+#define regCKSVII2C_IC_SS_SCL_HCNT_BASE_IDX 0
+#define regCKSVII2C_IC_SS_SCL_LCNT 0x0046
+#define regCKSVII2C_IC_SS_SCL_LCNT_BASE_IDX 0
+#define regCKSVII2C_IC_FS_SCL_HCNT 0x0047
+#define regCKSVII2C_IC_FS_SCL_HCNT_BASE_IDX 0
+#define regCKSVII2C_IC_FS_SCL_LCNT 0x0048
+#define regCKSVII2C_IC_FS_SCL_LCNT_BASE_IDX 0
+#define regCKSVII2C_IC_HS_SCL_HCNT 0x0049
+#define regCKSVII2C_IC_HS_SCL_HCNT_BASE_IDX 0
+#define regCKSVII2C_IC_HS_SCL_LCNT 0x004a
+#define regCKSVII2C_IC_HS_SCL_LCNT_BASE_IDX 0
+#define regCKSVII2C_IC_INTR_STAT 0x004b
+#define regCKSVII2C_IC_INTR_STAT_BASE_IDX 0
+#define regCKSVII2C_IC_INTR_MASK 0x004c
+#define regCKSVII2C_IC_INTR_MASK_BASE_IDX 0
+#define regCKSVII2C_IC_RAW_INTR_STAT 0x004d
+#define regCKSVII2C_IC_RAW_INTR_STAT_BASE_IDX 0
+#define regCKSVII2C_IC_RX_TL 0x004e
+#define regCKSVII2C_IC_RX_TL_BASE_IDX 0
+#define regCKSVII2C_IC_TX_TL 0x004f
+#define regCKSVII2C_IC_TX_TL_BASE_IDX 0
+#define regCKSVII2C_IC_CLR_INTR 0x0050
+#define regCKSVII2C_IC_CLR_INTR_BASE_IDX 0
+#define regCKSVII2C_IC_CLR_RX_UNDER 0x0051
+#define regCKSVII2C_IC_CLR_RX_UNDER_BASE_IDX 0
+#define regCKSVII2C_IC_CLR_RX_OVER 0x0052
+#define regCKSVII2C_IC_CLR_RX_OVER_BASE_IDX 0
+#define regCKSVII2C_IC_CLR_TX_OVER 0x0053
+#define regCKSVII2C_IC_CLR_TX_OVER_BASE_IDX 0
+#define regCKSVII2C_IC_CLR_RD_REQ 0x0054
+#define regCKSVII2C_IC_CLR_RD_REQ_BASE_IDX 0
+#define regCKSVII2C_IC_CLR_TX_ABRT 0x0055
+#define regCKSVII2C_IC_CLR_TX_ABRT_BASE_IDX 0
+#define regCKSVII2C_IC_CLR_RX_DONE 0x0056
+#define regCKSVII2C_IC_CLR_RX_DONE_BASE_IDX 0
+#define regCKSVII2C_IC_CLR_ACTIVITY 0x0057
+#define regCKSVII2C_IC_CLR_ACTIVITY_BASE_IDX 0
+#define regCKSVII2C_IC_CLR_STOP_DET 0x0058
+#define regCKSVII2C_IC_CLR_STOP_DET_BASE_IDX 0
+#define regCKSVII2C_IC_CLR_START_DET 0x0059
+#define regCKSVII2C_IC_CLR_START_DET_BASE_IDX 0
+#define regCKSVII2C_IC_CLR_GEN_CALL 0x005a
+#define regCKSVII2C_IC_CLR_GEN_CALL_BASE_IDX 0
+#define regCKSVII2C_IC_ENABLE 0x005b
+#define regCKSVII2C_IC_ENABLE_BASE_IDX 0
+#define regCKSVII2C_IC_STATUS 0x005c
+#define regCKSVII2C_IC_STATUS_BASE_IDX 0
+#define regCKSVII2C_IC_TXFLR 0x005d
+#define regCKSVII2C_IC_TXFLR_BASE_IDX 0
+#define regCKSVII2C_IC_RXFLR 0x005e
+#define regCKSVII2C_IC_RXFLR_BASE_IDX 0
+#define regCKSVII2C_IC_SDA_HOLD 0x005f
+#define regCKSVII2C_IC_SDA_HOLD_BASE_IDX 0
+#define regCKSVII2C_IC_TX_ABRT_SOURCE 0x0060
+#define regCKSVII2C_IC_TX_ABRT_SOURCE_BASE_IDX 0
+#define regCKSVII2C_IC_SLV_DATA_NACK_ONLY 0x0061
+#define regCKSVII2C_IC_SLV_DATA_NACK_ONLY_BASE_IDX 0
+#define regCKSVII2C_IC_DMA_CR 0x0062
+#define regCKSVII2C_IC_DMA_CR_BASE_IDX 0
+#define regCKSVII2C_IC_DMA_TDLR 0x0063
+#define regCKSVII2C_IC_DMA_TDLR_BASE_IDX 0
+#define regCKSVII2C_IC_DMA_RDLR 0x0064
+#define regCKSVII2C_IC_DMA_RDLR_BASE_IDX 0
+#define regCKSVII2C_IC_SDA_SETUP 0x0065
+#define regCKSVII2C_IC_SDA_SETUP_BASE_IDX 0
+#define regCKSVII2C_IC_ACK_GENERAL_CALL 0x0066
+#define regCKSVII2C_IC_ACK_GENERAL_CALL_BASE_IDX 0
+#define regCKSVII2C_IC_ENABLE_STATUS 0x0067
+#define regCKSVII2C_IC_ENABLE_STATUS_BASE_IDX 0
+#define regCKSVII2C_IC_FS_SPKLEN 0x0068
+#define regCKSVII2C_IC_FS_SPKLEN_BASE_IDX 0
+#define regCKSVII2C_IC_HS_SPKLEN 0x0069
+#define regCKSVII2C_IC_HS_SPKLEN_BASE_IDX 0
+#define regCKSVII2C_IC_CLR_RESTART_DET 0x006a
+#define regCKSVII2C_IC_CLR_RESTART_DET_BASE_IDX 0
+#define regCKSVII2C_IC_COMP_PARAM_1 0x006d
+#define regCKSVII2C_IC_COMP_PARAM_1_BASE_IDX 0
+#define regCKSVII2C_IC_COMP_VERSION 0x006e
+#define regCKSVII2C_IC_COMP_VERSION_BASE_IDX 0
+#define regCKSVII2C_IC_COMP_TYPE 0x006f
+#define regCKSVII2C_IC_COMP_TYPE_BASE_IDX 0
+#define regCKSVII2C1_IC_CON 0x0080
+#define regCKSVII2C1_IC_CON_BASE_IDX 0
+#define regCKSVII2C1_IC_TAR 0x0081
+#define regCKSVII2C1_IC_TAR_BASE_IDX 0
+#define regCKSVII2C1_IC_SAR 0x0082
+#define regCKSVII2C1_IC_SAR_BASE_IDX 0
+#define regCKSVII2C1_IC_HS_MADDR 0x0083
+#define regCKSVII2C1_IC_HS_MADDR_BASE_IDX 0
+#define regCKSVII2C1_IC_DATA_CMD 0x0084
+#define regCKSVII2C1_IC_DATA_CMD_BASE_IDX 0
+#define regCKSVII2C1_IC_SS_SCL_HCNT 0x0085
+#define regCKSVII2C1_IC_SS_SCL_HCNT_BASE_IDX 0
+#define regCKSVII2C1_IC_SS_SCL_LCNT 0x0086
+#define regCKSVII2C1_IC_SS_SCL_LCNT_BASE_IDX 0
+#define regCKSVII2C1_IC_FS_SCL_HCNT 0x0087
+#define regCKSVII2C1_IC_FS_SCL_HCNT_BASE_IDX 0
+#define regCKSVII2C1_IC_FS_SCL_LCNT 0x0088
+#define regCKSVII2C1_IC_FS_SCL_LCNT_BASE_IDX 0
+#define regCKSVII2C1_IC_HS_SCL_HCNT 0x0089
+#define regCKSVII2C1_IC_HS_SCL_HCNT_BASE_IDX 0
+#define regCKSVII2C1_IC_HS_SCL_LCNT 0x008a
+#define regCKSVII2C1_IC_HS_SCL_LCNT_BASE_IDX 0
+#define regCKSVII2C1_IC_INTR_STAT 0x008b
+#define regCKSVII2C1_IC_INTR_STAT_BASE_IDX 0
+#define regCKSVII2C1_IC_INTR_MASK 0x008c
+#define regCKSVII2C1_IC_INTR_MASK_BASE_IDX 0
+#define regCKSVII2C1_IC_RAW_INTR_STAT 0x008d
+#define regCKSVII2C1_IC_RAW_INTR_STAT_BASE_IDX 0
+#define regCKSVII2C1_IC_RX_TL 0x008e
+#define regCKSVII2C1_IC_RX_TL_BASE_IDX 0
+#define regCKSVII2C1_IC_TX_TL 0x008f
+#define regCKSVII2C1_IC_TX_TL_BASE_IDX 0
+#define regCKSVII2C1_IC_CLR_INTR 0x0090
+#define regCKSVII2C1_IC_CLR_INTR_BASE_IDX 0
+#define regCKSVII2C1_IC_CLR_RX_UNDER 0x0091
+#define regCKSVII2C1_IC_CLR_RX_UNDER_BASE_IDX 0
+#define regCKSVII2C1_IC_CLR_RX_OVER 0x0092
+#define regCKSVII2C1_IC_CLR_RX_OVER_BASE_IDX 0
+#define regCKSVII2C1_IC_CLR_TX_OVER 0x0093
+#define regCKSVII2C1_IC_CLR_TX_OVER_BASE_IDX 0
+#define regCKSVII2C1_IC_CLR_RD_REQ 0x0094
+#define regCKSVII2C1_IC_CLR_RD_REQ_BASE_IDX 0
+#define regCKSVII2C1_IC_CLR_TX_ABRT 0x0095
+#define regCKSVII2C1_IC_CLR_TX_ABRT_BASE_IDX 0
+#define regCKSVII2C1_IC_CLR_RX_DONE 0x0096
+#define regCKSVII2C1_IC_CLR_RX_DONE_BASE_IDX 0
+#define regCKSVII2C1_IC_CLR_ACTIVITY 0x0097
+#define regCKSVII2C1_IC_CLR_ACTIVITY_BASE_IDX 0
+#define regCKSVII2C1_IC_CLR_STOP_DET 0x0098
+#define regCKSVII2C1_IC_CLR_STOP_DET_BASE_IDX 0
+#define regCKSVII2C1_IC_CLR_START_DET 0x0099
+#define regCKSVII2C1_IC_CLR_START_DET_BASE_IDX 0
+#define regCKSVII2C1_IC_CLR_GEN_CALL 0x009a
+#define regCKSVII2C1_IC_CLR_GEN_CALL_BASE_IDX 0
+#define regCKSVII2C1_IC_ENABLE 0x009b
+#define regCKSVII2C1_IC_ENABLE_BASE_IDX 0
+#define regCKSVII2C1_IC_STATUS 0x009c
+#define regCKSVII2C1_IC_STATUS_BASE_IDX 0
+#define regCKSVII2C1_IC_TXFLR 0x009d
+#define regCKSVII2C1_IC_TXFLR_BASE_IDX 0
+#define regCKSVII2C1_IC_RXFLR 0x009e
+#define regCKSVII2C1_IC_RXFLR_BASE_IDX 0
+#define regCKSVII2C1_IC_SDA_HOLD 0x009f
+#define regCKSVII2C1_IC_SDA_HOLD_BASE_IDX 0
+#define regCKSVII2C1_IC_TX_ABRT_SOURCE 0x00a0
+#define regCKSVII2C1_IC_TX_ABRT_SOURCE_BASE_IDX 0
+#define regCKSVII2C1_IC_SLV_DATA_NACK_ONLY 0x00a1
+#define regCKSVII2C1_IC_SLV_DATA_NACK_ONLY_BASE_IDX 0
+#define regCKSVII2C1_IC_DMA_CR 0x00a2
+#define regCKSVII2C1_IC_DMA_CR_BASE_IDX 0
+#define regCKSVII2C1_IC_DMA_TDLR 0x00a3
+#define regCKSVII2C1_IC_DMA_TDLR_BASE_IDX 0
+#define regCKSVII2C1_IC_DMA_RDLR 0x00a4
+#define regCKSVII2C1_IC_DMA_RDLR_BASE_IDX 0
+#define regCKSVII2C1_IC_SDA_SETUP 0x00a5
+#define regCKSVII2C1_IC_SDA_SETUP_BASE_IDX 0
+#define regCKSVII2C1_IC_ACK_GENERAL_CALL 0x00a6
+#define regCKSVII2C1_IC_ACK_GENERAL_CALL_BASE_IDX 0
+#define regCKSVII2C1_IC_ENABLE_STATUS 0x00a7
+#define regCKSVII2C1_IC_ENABLE_STATUS_BASE_IDX 0
+#define regCKSVII2C1_IC_FS_SPKLEN 0x00a8
+#define regCKSVII2C1_IC_FS_SPKLEN_BASE_IDX 0
+#define regCKSVII2C1_IC_HS_SPKLEN 0x00a9
+#define regCKSVII2C1_IC_HS_SPKLEN_BASE_IDX 0
+#define regCKSVII2C1_IC_CLR_RESTART_DET 0x00aa
+#define regCKSVII2C1_IC_CLR_RESTART_DET_BASE_IDX 0
+#define regCKSVII2C1_IC_COMP_PARAM_1 0x00ad
+#define regCKSVII2C1_IC_COMP_PARAM_1_BASE_IDX 0
+#define regCKSVII2C1_IC_COMP_VERSION 0x00ae
+#define regCKSVII2C1_IC_COMP_VERSION_BASE_IDX 0
+#define regCKSVII2C1_IC_COMP_TYPE 0x00af
+#define regCKSVII2C1_IC_COMP_TYPE_BASE_IDX 0
+#define regSMUIO_PWRMGT 0x018c
+#define regSMUIO_PWRMGT_BASE_IDX 0
+
+
+// addressBlock: smuio_smuio_rom_SmuSmuioDec
+// base address: 0x5a380
+#define regROM_CNTL 0x00e0
+#define regROM_CNTL_BASE_IDX 0
+#define regPAGE_MIRROR_CNTL 0x00e1
+#define regPAGE_MIRROR_CNTL_BASE_IDX 0
+#define regROM_STATUS 0x00e2
+#define regROM_STATUS_BASE_IDX 0
+#define regCGTT_ROM_CLK_CTRL0 0x00e3
+#define regCGTT_ROM_CLK_CTRL0_BASE_IDX 0
+#define regROM_INDEX 0x00e4
+#define regROM_INDEX_BASE_IDX 0
+#define regROM_DATA 0x00e5
+#define regROM_DATA_BASE_IDX 0
+#define regROM_START 0x00e6
+#define regROM_START_BASE_IDX 0
+#define regROM_SW_CNTL 0x00e8
+#define regROM_SW_CNTL_BASE_IDX 0
+#define regROM_SW_STATUS 0x00e9
+#define regROM_SW_STATUS_BASE_IDX 0
+#define regROM_SW_COMMAND 0x00ea
+#define regROM_SW_COMMAND_BASE_IDX 0
+#define regROM_SW_DATA_1 0x00ec
+#define regROM_SW_DATA_1_BASE_IDX 0
+#define regROM_SW_DATA_2 0x00ed
+#define regROM_SW_DATA_2_BASE_IDX 0
+#define regROM_SW_DATA_3 0x00ee
+#define regROM_SW_DATA_3_BASE_IDX 0
+#define regROM_SW_DATA_4 0x00ef
+#define regROM_SW_DATA_4_BASE_IDX 0
+#define regROM_SW_DATA_5 0x00f0
+#define regROM_SW_DATA_5_BASE_IDX 0
+#define regROM_SW_DATA_6 0x00f1
+#define regROM_SW_DATA_6_BASE_IDX 0
+#define regROM_SW_DATA_7 0x00f2
+#define regROM_SW_DATA_7_BASE_IDX 0
+#define regROM_SW_DATA_8 0x00f3
+#define regROM_SW_DATA_8_BASE_IDX 0
+#define regROM_SW_DATA_9 0x00f4
+#define regROM_SW_DATA_9_BASE_IDX 0
+#define regROM_SW_DATA_10 0x00f5
+#define regROM_SW_DATA_10_BASE_IDX 0
+#define regROM_SW_DATA_11 0x00f6
+#define regROM_SW_DATA_11_BASE_IDX 0
+#define regROM_SW_DATA_12 0x00f7
+#define regROM_SW_DATA_12_BASE_IDX 0
+#define regROM_SW_DATA_13 0x00f8
+#define regROM_SW_DATA_13_BASE_IDX 0
+#define regROM_SW_DATA_14 0x00f9
+#define regROM_SW_DATA_14_BASE_IDX 0
+#define regROM_SW_DATA_15 0x00fa
+#define regROM_SW_DATA_15_BASE_IDX 0
+#define regROM_SW_DATA_16 0x00fb
+#define regROM_SW_DATA_16_BASE_IDX 0
+#define regROM_SW_DATA_17 0x00fc
+#define regROM_SW_DATA_17_BASE_IDX 0
+#define regROM_SW_DATA_18 0x00fd
+#define regROM_SW_DATA_18_BASE_IDX 0
+#define regROM_SW_DATA_19 0x00fe
+#define regROM_SW_DATA_19_BASE_IDX 0
+#define regROM_SW_DATA_20 0x00ff
+#define regROM_SW_DATA_20_BASE_IDX 0
+#define regROM_SW_DATA_21 0x0100
+#define regROM_SW_DATA_21_BASE_IDX 0
+#define regROM_SW_DATA_22 0x0101
+#define regROM_SW_DATA_22_BASE_IDX 0
+#define regROM_SW_DATA_23 0x0102
+#define regROM_SW_DATA_23_BASE_IDX 0
+#define regROM_SW_DATA_24 0x0103
+#define regROM_SW_DATA_24_BASE_IDX 0
+#define regROM_SW_DATA_25 0x0104
+#define regROM_SW_DATA_25_BASE_IDX 0
+#define regROM_SW_DATA_26 0x0105
+#define regROM_SW_DATA_26_BASE_IDX 0
+#define regROM_SW_DATA_27 0x0106
+#define regROM_SW_DATA_27_BASE_IDX 0
+#define regROM_SW_DATA_28 0x0107
+#define regROM_SW_DATA_28_BASE_IDX 0
+#define regROM_SW_DATA_29 0x0108
+#define regROM_SW_DATA_29_BASE_IDX 0
+#define regROM_SW_DATA_30 0x0109
+#define regROM_SW_DATA_30_BASE_IDX 0
+#define regROM_SW_DATA_31 0x010a
+#define regROM_SW_DATA_31_BASE_IDX 0
+#define regROM_SW_DATA_32 0x010b
+#define regROM_SW_DATA_32_BASE_IDX 0
+#define regROM_SW_DATA_33 0x010c
+#define regROM_SW_DATA_33_BASE_IDX 0
+#define regROM_SW_DATA_34 0x010d
+#define regROM_SW_DATA_34_BASE_IDX 0
+#define regROM_SW_DATA_35 0x010e
+#define regROM_SW_DATA_35_BASE_IDX 0
+#define regROM_SW_DATA_36 0x010f
+#define regROM_SW_DATA_36_BASE_IDX 0
+#define regROM_SW_DATA_37 0x0110
+#define regROM_SW_DATA_37_BASE_IDX 0
+#define regROM_SW_DATA_38 0x0111
+#define regROM_SW_DATA_38_BASE_IDX 0
+#define regROM_SW_DATA_39 0x0112
+#define regROM_SW_DATA_39_BASE_IDX 0
+#define regROM_SW_DATA_40 0x0113
+#define regROM_SW_DATA_40_BASE_IDX 0
+#define regROM_SW_DATA_41 0x0114
+#define regROM_SW_DATA_41_BASE_IDX 0
+#define regROM_SW_DATA_42 0x0115
+#define regROM_SW_DATA_42_BASE_IDX 0
+#define regROM_SW_DATA_43 0x0116
+#define regROM_SW_DATA_43_BASE_IDX 0
+#define regROM_SW_DATA_44 0x0117
+#define regROM_SW_DATA_44_BASE_IDX 0
+#define regROM_SW_DATA_45 0x0118
+#define regROM_SW_DATA_45_BASE_IDX 0
+#define regROM_SW_DATA_46 0x0119
+#define regROM_SW_DATA_46_BASE_IDX 0
+#define regROM_SW_DATA_47 0x011a
+#define regROM_SW_DATA_47_BASE_IDX 0
+#define regROM_SW_DATA_48 0x011b
+#define regROM_SW_DATA_48_BASE_IDX 0
+#define regROM_SW_DATA_49 0x011c
+#define regROM_SW_DATA_49_BASE_IDX 0
+#define regROM_SW_DATA_50 0x011d
+#define regROM_SW_DATA_50_BASE_IDX 0
+#define regROM_SW_DATA_51 0x011e
+#define regROM_SW_DATA_51_BASE_IDX 0
+#define regROM_SW_DATA_52 0x011f
+#define regROM_SW_DATA_52_BASE_IDX 0
+#define regROM_SW_DATA_53 0x0120
+#define regROM_SW_DATA_53_BASE_IDX 0
+#define regROM_SW_DATA_54 0x0121
+#define regROM_SW_DATA_54_BASE_IDX 0
+#define regROM_SW_DATA_55 0x0122
+#define regROM_SW_DATA_55_BASE_IDX 0
+#define regROM_SW_DATA_56 0x0123
+#define regROM_SW_DATA_56_BASE_IDX 0
+#define regROM_SW_DATA_57 0x0124
+#define regROM_SW_DATA_57_BASE_IDX 0
+#define regROM_SW_DATA_58 0x0125
+#define regROM_SW_DATA_58_BASE_IDX 0
+#define regROM_SW_DATA_59 0x0126
+#define regROM_SW_DATA_59_BASE_IDX 0
+#define regROM_SW_DATA_60 0x0127
+#define regROM_SW_DATA_60_BASE_IDX 0
+#define regROM_SW_DATA_61 0x0128
+#define regROM_SW_DATA_61_BASE_IDX 0
+#define regROM_SW_DATA_62 0x0129
+#define regROM_SW_DATA_62_BASE_IDX 0
+#define regROM_SW_DATA_63 0x012a
+#define regROM_SW_DATA_63_BASE_IDX 0
+#define regROM_SW_DATA_64 0x012b
+#define regROM_SW_DATA_64_BASE_IDX 0
+
+
+// addressBlock: smuio_smuio_gpio_SmuSmuioDec
+// base address: 0x5a500
+#define regSMU_GPIOPAD_SW_INT_STAT 0x0140
+#define regSMU_GPIOPAD_SW_INT_STAT_BASE_IDX 0
+#define regSMU_GPIOPAD_MASK 0x0141
+#define regSMU_GPIOPAD_MASK_BASE_IDX 0
+#define regSMU_GPIOPAD_A 0x0142
+#define regSMU_GPIOPAD_A_BASE_IDX 0
+#define regSMU_GPIOPAD_TXIMPSEL 0x0143
+#define regSMU_GPIOPAD_TXIMPSEL_BASE_IDX 0
+#define regSMU_GPIOPAD_EN 0x0144
+#define regSMU_GPIOPAD_EN_BASE_IDX 0
+#define regSMU_GPIOPAD_Y 0x0145
+#define regSMU_GPIOPAD_Y_BASE_IDX 0
+#define regSMU_GPIOPAD_RXEN 0x0146
+#define regSMU_GPIOPAD_RXEN_BASE_IDX 0
+#define regSMU_GPIOPAD_RCVR_SEL0 0x0147
+#define regSMU_GPIOPAD_RCVR_SEL0_BASE_IDX 0
+#define regSMU_GPIOPAD_RCVR_SEL1 0x0148
+#define regSMU_GPIOPAD_RCVR_SEL1_BASE_IDX 0
+#define regSMU_GPIOPAD_PU_EN 0x0149
+#define regSMU_GPIOPAD_PU_EN_BASE_IDX 0
+#define regSMU_GPIOPAD_PD_EN 0x014a
+#define regSMU_GPIOPAD_PD_EN_BASE_IDX 0
+#define regSMU_GPIOPAD_PINSTRAPS 0x014b
+#define regSMU_GPIOPAD_PINSTRAPS_BASE_IDX 0
+#define regDFT_PINSTRAPS 0x014c
+#define regDFT_PINSTRAPS_BASE_IDX 0
+#define regSMU_GPIOPAD_INT_STAT_EN 0x014d
+#define regSMU_GPIOPAD_INT_STAT_EN_BASE_IDX 0
+#define regSMU_GPIOPAD_INT_STAT 0x014e
+#define regSMU_GPIOPAD_INT_STAT_BASE_IDX 0
+#define regSMU_GPIOPAD_INT_STAT_AK 0x014f
+#define regSMU_GPIOPAD_INT_STAT_AK_BASE_IDX 0
+#define regSMU_GPIOPAD_INT_EN 0x0150
+#define regSMU_GPIOPAD_INT_EN_BASE_IDX 0
+#define regSMU_GPIOPAD_INT_TYPE 0x0151
+#define regSMU_GPIOPAD_INT_TYPE_BASE_IDX 0
+#define regSMU_GPIOPAD_INT_POLARITY 0x0152
+#define regSMU_GPIOPAD_INT_POLARITY_BASE_IDX 0
+#define regSMUIO_PCC_GPIO_SELECT 0x0155
+#define regSMUIO_PCC_GPIO_SELECT_BASE_IDX 0
+#define regSMU_GPIOPAD_S0 0x0156
+#define regSMU_GPIOPAD_S0_BASE_IDX 0
+#define regSMU_GPIOPAD_S1 0x0157
+#define regSMU_GPIOPAD_S1_BASE_IDX 0
+#define regSMU_GPIOPAD_SCHMEN 0x0158
+#define regSMU_GPIOPAD_SCHMEN_BASE_IDX 0
+#define regSMU_GPIOPAD_SCL_EN 0x0159
+#define regSMU_GPIOPAD_SCL_EN_BASE_IDX 0
+#define regSMU_GPIOPAD_SDA_EN 0x015a
+#define regSMU_GPIOPAD_SDA_EN_BASE_IDX 0
+#define regSMUIO_GPIO_INT0_SELECT 0x015b
+#define regSMUIO_GPIO_INT0_SELECT_BASE_IDX 0
+#define regSMUIO_GPIO_INT1_SELECT 0x015c
+#define regSMUIO_GPIO_INT1_SELECT_BASE_IDX 0
+#define regSMUIO_GPIO_INT2_SELECT 0x015d
+#define regSMUIO_GPIO_INT2_SELECT_BASE_IDX 0
+#define regSMUIO_GPIO_INT3_SELECT 0x015e
+#define regSMUIO_GPIO_INT3_SELECT_BASE_IDX 0
+#define regSMU_GPIOPAD_MP_INT0_STAT 0x015f
+#define regSMU_GPIOPAD_MP_INT0_STAT_BASE_IDX 0
+#define regSMU_GPIOPAD_MP_INT1_STAT 0x0160
+#define regSMU_GPIOPAD_MP_INT1_STAT_BASE_IDX 0
+#define regSMU_GPIOPAD_MP_INT2_STAT 0x0161
+#define regSMU_GPIOPAD_MP_INT2_STAT_BASE_IDX 0
+#define regSMU_GPIOPAD_MP_INT3_STAT 0x0162
+#define regSMU_GPIOPAD_MP_INT3_STAT_BASE_IDX 0
+#define regSMIO_INDEX 0x0163
+#define regSMIO_INDEX_BASE_IDX 0
+#define regS0_VID_SMIO_CNTL 0x0164
+#define regS0_VID_SMIO_CNTL_BASE_IDX 0
+#define regS1_VID_SMIO_CNTL 0x0165
+#define regS1_VID_SMIO_CNTL_BASE_IDX 0
+#define regOPEN_DRAIN_SELECT 0x0166
+#define regOPEN_DRAIN_SELECT_BASE_IDX 0
+#define regSMIO_ENABLE 0x0167
+#define regSMIO_ENABLE_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_sh_mask.h
new file mode 100644
index 000000000000..6204505e553b
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_sh_mask.h
@@ -0,0 +1,1106 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _smuio_14_0_2_SH_MASK_HEADER
+#define _smuio_14_0_2_SH_MASK_HEADER
+
+
+// addressBlock: smuio_smuio_tsc_SmuSmuioDec
+//PWROK_REFCLK_GAP_CYCLES
+#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles__SHIFT 0x0
+#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles__SHIFT 0x8
+#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles_MASK 0x000000FFL
+#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles_MASK 0x0000FF00L
+//GOLDEN_TSC_INCREMENT_UPPER
+#define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper__SHIFT 0x0
+#define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper_MASK 0x00FFFFFFL
+//GOLDEN_TSC_INCREMENT_LOWER
+#define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower__SHIFT 0x0
+#define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower_MASK 0xFFFFFFFFL
+//GOLDEN_TSC_COUNT_UPPER
+#define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper__SHIFT 0x0
+#define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper_MASK 0x00FFFFFFL
+//GOLDEN_TSC_COUNT_LOWER
+#define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower__SHIFT 0x0
+#define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower_MASK 0xFFFFFFFFL
+//SOC_GOLDEN_TSC_SHADOW_UPPER
+#define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper__SHIFT 0x0
+#define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper_MASK 0x00FFFFFFL
+//SOC_GOLDEN_TSC_SHADOW_LOWER
+#define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower__SHIFT 0x0
+#define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower_MASK 0xFFFFFFFFL
+//SOC_GAP_PWROK
+#define SOC_GAP_PWROK__soc_gap_pwrok__SHIFT 0x0
+#define SOC_GAP_PWROK__soc_gap_pwrok_MASK 0x00000001L
+
+
+// addressBlock: smuio_smuio_swtimer_SmuSmuioDec
+//PWR_VIRT_RESET_REQ
+#define PWR_VIRT_RESET_REQ__VF_FLR__SHIFT 0x0
+#define PWR_VIRT_RESET_REQ__PF_FLR__SHIFT 0x1f
+#define PWR_VIRT_RESET_REQ__VF_FLR_MASK 0x7FFFFFFFL
+#define PWR_VIRT_RESET_REQ__PF_FLR_MASK 0x80000000L
+//PWR_DISP_TIMER_CONTROL
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L
+//PWR_DISP_TIMER_DEBUG
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT__SHIFT 0x2
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x00000001L
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT_MASK 0x00000002L
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_MASK 0x00000004L
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xFFFFFF80L
+//PWR_DISP_TIMER2_CONTROL
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L
+//PWR_DISP_TIMER2_DEBUG
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT__SHIFT 0x2
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x00000001L
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT_MASK 0x00000002L
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_MASK 0x00000004L
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xFFFFFF80L
+//PWR_DISP_TIMER_GLOBAL_CONTROL
+#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH__SHIFT 0x0
+#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN__SHIFT 0xa
+#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH_MASK 0x000003FFL
+#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN_MASK 0x00000400L
+//PWR_IH_CONTROL
+#define PWR_IH_CONTROL__MAX_CREDIT__SHIFT 0x0
+#define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK__SHIFT 0x5
+#define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK__SHIFT 0x6
+#define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN__SHIFT 0x1f
+#define PWR_IH_CONTROL__MAX_CREDIT_MASK 0x0000001FL
+#define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK_MASK 0x00000020L
+#define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK_MASK 0x00000040L
+#define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN_MASK 0x80000000L
+
+
+// addressBlock: smuio_smuio_misc_SmuSmuioDec
+//SMUIO_MCM_CONFIG
+#define SMUIO_MCM_CONFIG__DIE_ID__SHIFT 0x0
+#define SMUIO_MCM_CONFIG__PKG_TYPE__SHIFT 0x2
+#define SMUIO_MCM_CONFIG__SOCKET_ID__SHIFT 0x8
+#define SMUIO_MCM_CONFIG__PKG_SUBTYPE__SHIFT 0xc
+#define SMUIO_MCM_CONFIG__DIE_CONFIG__SHIFT 0xd
+#define SMUIO_MCM_CONFIG__CONSOLE_K__SHIFT 0x10
+#define SMUIO_MCM_CONFIG__CONSOLE_A__SHIFT 0x11
+#define SMUIO_MCM_CONFIG__DIE_ID_MASK 0x00000003L
+#define SMUIO_MCM_CONFIG__PKG_TYPE_MASK 0x0000001CL
+#define SMUIO_MCM_CONFIG__SOCKET_ID_MASK 0x00000300L
+#define SMUIO_MCM_CONFIG__PKG_SUBTYPE_MASK 0x00001000L
+#define SMUIO_MCM_CONFIG__CONSOLE_K_MASK 0x00010000L
+#define SMUIO_MCM_CONFIG__CONSOLE_A_MASK 0x00020000L
+//IP_DISCOVERY_VERSION
+#define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION__SHIFT 0x0
+#define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER0
+#define SCRATCH_REGISTER0__ScratchPad0__SHIFT 0x0
+#define SCRATCH_REGISTER0__ScratchPad0_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER1
+#define SCRATCH_REGISTER1__ScratchPad1__SHIFT 0x0
+#define SCRATCH_REGISTER1__ScratchPad1_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER2
+#define SCRATCH_REGISTER2__ScratchPad2__SHIFT 0x0
+#define SCRATCH_REGISTER2__ScratchPad2_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER3
+#define SCRATCH_REGISTER3__ScratchPad3__SHIFT 0x0
+#define SCRATCH_REGISTER3__ScratchPad3_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER4
+#define SCRATCH_REGISTER4__ScratchPad4__SHIFT 0x0
+#define SCRATCH_REGISTER4__ScratchPad4_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER5
+#define SCRATCH_REGISTER5__ScratchPad5__SHIFT 0x0
+#define SCRATCH_REGISTER5__ScratchPad5_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER6
+#define SCRATCH_REGISTER6__ScratchPad6__SHIFT 0x0
+#define SCRATCH_REGISTER6__ScratchPad6_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER7
+#define SCRATCH_REGISTER7__ScratchPad7__SHIFT 0x0
+#define SCRATCH_REGISTER7__ScratchPad7_MASK 0xFFFFFFFFL
+
+
+// addressBlock: smuio_smuio_i2c_SmuSmuioDec
+//CKSVII2C_IC_CON
+#define CKSVII2C_IC_CON__IC_MASTER_MODE__SHIFT 0x0
+#define CKSVII2C_IC_CON__IC_MAX_SPEED_MODE__SHIFT 0x1
+#define CKSVII2C_IC_CON__IC_10BITADDR_SLAVE__SHIFT 0x3
+#define CKSVII2C_IC_CON__IC_10BITADDR_MASTER__SHIFT 0x4
+#define CKSVII2C_IC_CON__IC_RESTART_EN__SHIFT 0x5
+#define CKSVII2C_IC_CON__IC_SLAVE_DISABLE__SHIFT 0x6
+#define CKSVII2C_IC_CON__STOP_DET_IFADDRESSED__SHIFT 0x7
+#define CKSVII2C_IC_CON__TX_EMPTY_CTRL__SHIFT 0x8
+#define CKSVII2C_IC_CON__RX_FIFO_FULL_HLD_CTRL__SHIFT 0x9
+#define CKSVII2C_IC_CON__BUS_CLEAR_FEATURE_CTRL__SHIFT 0xb
+#define CKSVII2C_IC_CON__IC_MASTER_MODE_MASK 0x00000001L
+#define CKSVII2C_IC_CON__IC_MAX_SPEED_MODE_MASK 0x00000006L
+#define CKSVII2C_IC_CON__IC_10BITADDR_SLAVE_MASK 0x00000008L
+#define CKSVII2C_IC_CON__IC_10BITADDR_MASTER_MASK 0x00000010L
+#define CKSVII2C_IC_CON__IC_RESTART_EN_MASK 0x00000020L
+#define CKSVII2C_IC_CON__IC_SLAVE_DISABLE_MASK 0x00000040L
+#define CKSVII2C_IC_CON__STOP_DET_IFADDRESSED_MASK 0x00000080L
+#define CKSVII2C_IC_CON__TX_EMPTY_CTRL_MASK 0x00000100L
+#define CKSVII2C_IC_CON__RX_FIFO_FULL_HLD_CTRL_MASK 0x00000200L
+//CKSVII2C_IC_TAR
+#define CKSVII2C_IC_TAR__IC_TAR__SHIFT 0x0
+#define CKSVII2C_IC_TAR__GC_OR_START__SHIFT 0xa
+#define CKSVII2C_IC_TAR__SPECIAL__SHIFT 0xb
+#define CKSVII2C_IC_TAR__IC_10BITADDR_MASTER__SHIFT 0xc
+#define CKSVII2C_IC_TAR__IC_TAR_MASK 0x000003FFL
+#define CKSVII2C_IC_TAR__GC_OR_START_MASK 0x00000400L
+#define CKSVII2C_IC_TAR__SPECIAL_MASK 0x00000800L
+#define CKSVII2C_IC_TAR__IC_10BITADDR_MASTER_MASK 0x00001000L
+//CKSVII2C_IC_SAR
+#define CKSVII2C_IC_SAR__IC_SAR__SHIFT 0x0
+#define CKSVII2C_IC_SAR__IC_SAR_MASK 0x000003FFL
+//CKSVII2C_IC_HS_MADDR
+#define CKSVII2C_IC_HS_MADDR__IC_HS_MADDR__SHIFT 0x0
+#define CKSVII2C_IC_HS_MADDR__IC_HS_MADDR_MASK 0x00000007L
+//CKSVII2C_IC_DATA_CMD
+#define CKSVII2C_IC_DATA_CMD__DAT__SHIFT 0x0
+#define CKSVII2C_IC_DATA_CMD__CMD__SHIFT 0x8
+#define CKSVII2C_IC_DATA_CMD__STOP__SHIFT 0x9
+#define CKSVII2C_IC_DATA_CMD__RESTART__SHIFT 0xa
+#define CKSVII2C_IC_DATA_CMD__FIRST_DATA_BYTE__SHIFT 0xb
+#define CKSVII2C_IC_DATA_CMD__DAT_MASK 0x000000FFL
+#define CKSVII2C_IC_DATA_CMD__CMD_MASK 0x00000100L
+#define CKSVII2C_IC_DATA_CMD__STOP_MASK 0x00000200L
+#define CKSVII2C_IC_DATA_CMD__RESTART_MASK 0x00000400L
+//CKSVII2C_IC_SS_SCL_HCNT
+#define CKSVII2C_IC_SS_SCL_HCNT__IC_SS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C_IC_SS_SCL_HCNT__IC_SS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C_IC_SS_SCL_LCNT
+#define CKSVII2C_IC_SS_SCL_LCNT__IC_SS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C_IC_SS_SCL_LCNT__IC_SS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C_IC_FS_SCL_HCNT
+#define CKSVII2C_IC_FS_SCL_HCNT__IC_FS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C_IC_FS_SCL_HCNT__IC_FS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C_IC_FS_SCL_LCNT
+#define CKSVII2C_IC_FS_SCL_LCNT__IC_FS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C_IC_FS_SCL_LCNT__IC_FS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C_IC_HS_SCL_HCNT
+#define CKSVII2C_IC_HS_SCL_HCNT__IC_HS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C_IC_HS_SCL_HCNT__IC_HS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C_IC_HS_SCL_LCNT
+#define CKSVII2C_IC_HS_SCL_LCNT__IC_HS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C_IC_HS_SCL_LCNT__IC_HS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C_IC_INTR_STAT
+#define CKSVII2C_IC_INTR_STAT__R_RX_UNDER__SHIFT 0x0
+#define CKSVII2C_IC_INTR_STAT__R_RX_OVER__SHIFT 0x1
+#define CKSVII2C_IC_INTR_STAT__R_RX_FULL__SHIFT 0x2
+#define CKSVII2C_IC_INTR_STAT__R_TX_OVER__SHIFT 0x3
+#define CKSVII2C_IC_INTR_STAT__R_TX_EMPTY__SHIFT 0x4
+#define CKSVII2C_IC_INTR_STAT__R_RD_REQ__SHIFT 0x5
+#define CKSVII2C_IC_INTR_STAT__R_TX_ABRT__SHIFT 0x6
+#define CKSVII2C_IC_INTR_STAT__R_RX_DONE__SHIFT 0x7
+#define CKSVII2C_IC_INTR_STAT__R_ACTIVITY__SHIFT 0x8
+#define CKSVII2C_IC_INTR_STAT__R_STOP_DET__SHIFT 0x9
+#define CKSVII2C_IC_INTR_STAT__R_START_DET__SHIFT 0xa
+#define CKSVII2C_IC_INTR_STAT__R_GEN_CALL__SHIFT 0xb
+#define CKSVII2C_IC_INTR_STAT__R_RESTART_DET__SHIFT 0xc
+#define CKSVII2C_IC_INTR_STAT__R_MST_ON_HOLD__SHIFT 0xd
+#define CKSVII2C_IC_INTR_STAT__R_SCL_STUCK_AT_LOW__SHIFT 0xe
+#define CKSVII2C_IC_INTR_STAT__R_RX_UNDER_MASK 0x00000001L
+#define CKSVII2C_IC_INTR_STAT__R_RX_OVER_MASK 0x00000002L
+#define CKSVII2C_IC_INTR_STAT__R_RX_FULL_MASK 0x00000004L
+#define CKSVII2C_IC_INTR_STAT__R_TX_OVER_MASK 0x00000008L
+#define CKSVII2C_IC_INTR_STAT__R_TX_EMPTY_MASK 0x00000010L
+#define CKSVII2C_IC_INTR_STAT__R_RD_REQ_MASK 0x00000020L
+#define CKSVII2C_IC_INTR_STAT__R_TX_ABRT_MASK 0x00000040L
+#define CKSVII2C_IC_INTR_STAT__R_RX_DONE_MASK 0x00000080L
+#define CKSVII2C_IC_INTR_STAT__R_ACTIVITY_MASK 0x00000100L
+#define CKSVII2C_IC_INTR_STAT__R_STOP_DET_MASK 0x00000200L
+#define CKSVII2C_IC_INTR_STAT__R_START_DET_MASK 0x00000400L
+#define CKSVII2C_IC_INTR_STAT__R_GEN_CALL_MASK 0x00000800L
+#define CKSVII2C_IC_INTR_STAT__R_RESTART_DET_MASK 0x00001000L
+#define CKSVII2C_IC_INTR_STAT__R_MST_ON_HOLD_MASK 0x00002000L
+//CKSVII2C_IC_INTR_MASK
+#define CKSVII2C_IC_INTR_MASK__M_RX_UNDER__SHIFT 0x0
+#define CKSVII2C_IC_INTR_MASK__M_RX_OVER__SHIFT 0x1
+#define CKSVII2C_IC_INTR_MASK__M_RX_FULL__SHIFT 0x2
+#define CKSVII2C_IC_INTR_MASK__M_TX_OVER__SHIFT 0x3
+#define CKSVII2C_IC_INTR_MASK__M_TX_EMPTY__SHIFT 0x4
+#define CKSVII2C_IC_INTR_MASK__M_RD_REQ__SHIFT 0x5
+#define CKSVII2C_IC_INTR_MASK__M_TX_ABRT__SHIFT 0x6
+#define CKSVII2C_IC_INTR_MASK__M_RX_DONE__SHIFT 0x7
+#define CKSVII2C_IC_INTR_MASK__M_ACTIVITY__SHIFT 0x8
+#define CKSVII2C_IC_INTR_MASK__M_STOP_DET__SHIFT 0x9
+#define CKSVII2C_IC_INTR_MASK__M_START_DET__SHIFT 0xa
+#define CKSVII2C_IC_INTR_MASK__M_GEN_CALL__SHIFT 0xb
+#define CKSVII2C_IC_INTR_MASK__M_RESTART_DET__SHIFT 0xc
+#define CKSVII2C_IC_INTR_MASK__M_MST_ON_HOLD__SHIFT 0xd
+#define CKSVII2C_IC_INTR_MASK__M_SCL_STUCK_AT_LOW__SHIFT 0xe
+#define CKSVII2C_IC_INTR_MASK__M_RX_UNDER_MASK 0x00000001L
+#define CKSVII2C_IC_INTR_MASK__M_RX_OVER_MASK 0x00000002L
+#define CKSVII2C_IC_INTR_MASK__M_RX_FULL_MASK 0x00000004L
+#define CKSVII2C_IC_INTR_MASK__M_TX_OVER_MASK 0x00000008L
+#define CKSVII2C_IC_INTR_MASK__M_TX_EMPTY_MASK 0x00000010L
+#define CKSVII2C_IC_INTR_MASK__M_RD_REQ_MASK 0x00000020L
+#define CKSVII2C_IC_INTR_MASK__M_TX_ABRT_MASK 0x00000040L
+#define CKSVII2C_IC_INTR_MASK__M_RX_DONE_MASK 0x00000080L
+#define CKSVII2C_IC_INTR_MASK__M_ACTIVITY_MASK 0x00000100L
+#define CKSVII2C_IC_INTR_MASK__M_STOP_DET_MASK 0x00000200L
+#define CKSVII2C_IC_INTR_MASK__M_START_DET_MASK 0x00000400L
+#define CKSVII2C_IC_INTR_MASK__M_GEN_CALL_MASK 0x00000800L
+#define CKSVII2C_IC_INTR_MASK__M_RESTART_DET_MASK 0x00001000L
+#define CKSVII2C_IC_INTR_MASK__M_MST_ON_HOLD_MASK 0x00002000L
+//CKSVII2C_IC_RAW_INTR_STAT
+//CKSVII2C_IC_RX_TL
+#define CKSVII2C_IC_RX_TL__RX_TL__SHIFT 0x0
+//CKSVII2C_IC_TX_TL
+#define CKSVII2C_IC_TX_TL__TX_TL__SHIFT 0x0
+//CKSVII2C_IC_CLR_INTR
+//CKSVII2C_IC_CLR_RX_UNDER
+//CKSVII2C_IC_CLR_RX_OVER
+//CKSVII2C_IC_CLR_TX_OVER
+//CKSVII2C_IC_CLR_RD_REQ
+//CKSVII2C_IC_CLR_TX_ABRT
+//CKSVII2C_IC_CLR_RX_DONE
+//CKSVII2C_IC_CLR_ACTIVITY
+//CKSVII2C_IC_CLR_STOP_DET
+//CKSVII2C_IC_CLR_START_DET
+//CKSVII2C_IC_CLR_GEN_CALL
+//CKSVII2C_IC_ENABLE
+#define CKSVII2C_IC_ENABLE__ENABLE__SHIFT 0x0
+#define CKSVII2C_IC_ENABLE__ABORT__SHIFT 0x1
+#define CKSVII2C_IC_ENABLE__TX_CMD_BLOCK__SHIFT 0x2
+#define CKSVII2C_IC_ENABLE__SDA_STUCK_RECOVERY_ENABLE__SHIFT 0x3
+#define CKSVII2C_IC_ENABLE__ENABLE_MASK 0x00000001L
+#define CKSVII2C_IC_ENABLE__ABORT_MASK 0x00000002L
+//CKSVII2C_IC_STATUS
+#define CKSVII2C_IC_STATUS__ACTIVITY__SHIFT 0x0
+#define CKSVII2C_IC_STATUS__TFNF__SHIFT 0x1
+#define CKSVII2C_IC_STATUS__TFE__SHIFT 0x2
+#define CKSVII2C_IC_STATUS__RFNE__SHIFT 0x3
+#define CKSVII2C_IC_STATUS__RFF__SHIFT 0x4
+#define CKSVII2C_IC_STATUS__MST_ACTIVITY__SHIFT 0x5
+#define CKSVII2C_IC_STATUS__SLV_ACTIVITY__SHIFT 0x6
+#define CKSVII2C_IC_STATUS__MST_HOLD_TX_FIFO_EMPTY__SHIFT 0x7
+#define CKSVII2C_IC_STATUS__MST_HOLD_RX_FIFO_FULL__SHIFT 0x8
+#define CKSVII2C_IC_STATUS__SLV_HOLD_TX_FIFO_EMPTY__SHIFT 0x9
+#define CKSVII2C_IC_STATUS__SLV_HOLD_RX_FIFO_FULL__SHIFT 0xa
+#define CKSVII2C_IC_STATUS__SDA_STUCK_NOT_RECOVERED__SHIFT 0xb
+#define CKSVII2C_IC_STATUS__ACTIVITY_MASK 0x00000001L
+#define CKSVII2C_IC_STATUS__TFNF_MASK 0x00000002L
+#define CKSVII2C_IC_STATUS__TFE_MASK 0x00000004L
+#define CKSVII2C_IC_STATUS__RFNE_MASK 0x00000008L
+#define CKSVII2C_IC_STATUS__RFF_MASK 0x00000010L
+#define CKSVII2C_IC_STATUS__MST_ACTIVITY_MASK 0x00000020L
+#define CKSVII2C_IC_STATUS__SLV_ACTIVITY_MASK 0x00000040L
+//CKSVII2C_IC_TXFLR
+#define CKSVII2C_IC_TXFLR__TXFLR__SHIFT 0x0
+//CKSVII2C_IC_RXFLR
+#define CKSVII2C_IC_RXFLR__RXFLR__SHIFT 0x0
+//CKSVII2C_IC_SDA_HOLD
+#define CKSVII2C_IC_SDA_HOLD__IC_SDA_TX_HOLD__SHIFT 0x0
+#define CKSVII2C_IC_SDA_HOLD__IC_SDA_RX_HOLD__SHIFT 0x10
+//CKSVII2C_IC_TX_ABRT_SOURCE
+//CKSVII2C_IC_SLV_DATA_NACK_ONLY
+//CKSVII2C_IC_DMA_CR
+//CKSVII2C_IC_DMA_TDLR
+//CKSVII2C_IC_DMA_RDLR
+//CKSVII2C_IC_SDA_SETUP
+#define CKSVII2C_IC_SDA_SETUP__SDA_SETUP__SHIFT 0x0
+#define CKSVII2C_IC_SDA_SETUP__SDA_SETUP_MASK 0x000000FFL
+//CKSVII2C_IC_ACK_GENERAL_CALL
+#define CKSVII2C_IC_ACK_GENERAL_CALL__ACK_GENERAL_CALL__SHIFT 0x0
+#define CKSVII2C_IC_ACK_GENERAL_CALL__ACK_GENERAL_CALL_MASK 0x00000001L
+//CKSVII2C_IC_ENABLE_STATUS
+#define CKSVII2C_IC_ENABLE_STATUS__IC_EN__SHIFT 0x0
+#define CKSVII2C_IC_ENABLE_STATUS__SLV_DISABLED_WHILE_BUSY__SHIFT 0x1
+#define CKSVII2C_IC_ENABLE_STATUS__SLV_RX_DATA_LOST__SHIFT 0x2
+#define CKSVII2C_IC_ENABLE_STATUS__IC_EN_MASK 0x00000001L
+//CKSVII2C_IC_FS_SPKLEN
+#define CKSVII2C_IC_FS_SPKLEN__FS_SPKLEN__SHIFT 0x0
+#define CKSVII2C_IC_FS_SPKLEN__FS_SPKLEN_MASK 0x000000FFL
+//CKSVII2C_IC_HS_SPKLEN
+#define CKSVII2C_IC_HS_SPKLEN__HS_SPKLEN__SHIFT 0x0
+#define CKSVII2C_IC_HS_SPKLEN__HS_SPKLEN_MASK 0x000000FFL
+//CKSVII2C_IC_CLR_RESTART_DET
+//CKSVII2C_IC_COMP_PARAM_1
+#define CKSVII2C_IC_COMP_PARAM_1__APB_DATA_WIDTH__SHIFT 0x0
+#define CKSVII2C_IC_COMP_PARAM_1__MAX_SPEED_MODE__SHIFT 0x2
+#define CKSVII2C_IC_COMP_PARAM_1__HC_COUNT_VALUES__SHIFT 0x4
+#define CKSVII2C_IC_COMP_PARAM_1__INTR_IO__SHIFT 0x5
+#define CKSVII2C_IC_COMP_PARAM_1__HAS_DMA__SHIFT 0x6
+#define CKSVII2C_IC_COMP_PARAM_1__ADD_ENCODED_PARAMS__SHIFT 0x7
+#define CKSVII2C_IC_COMP_PARAM_1__RX_BUFFER_DEPTH__SHIFT 0x8
+#define CKSVII2C_IC_COMP_PARAM_1__TX_BUFFER_DEPTH__SHIFT 0x10
+//CKSVII2C_IC_COMP_VERSION
+#define CKSVII2C_IC_COMP_VERSION__COMP_VERSION__SHIFT 0x0
+#define CKSVII2C_IC_COMP_VERSION__COMP_VERSION_MASK 0xFFFFFFFFL
+//CKSVII2C_IC_COMP_TYPE
+#define CKSVII2C_IC_COMP_TYPE__COMP_TYPE__SHIFT 0x0
+#define CKSVII2C_IC_COMP_TYPE__COMP_TYPE_MASK 0xFFFFFFFFL
+//CKSVII2C1_IC_CON
+#define CKSVII2C1_IC_CON__IC1_MASTER_MODE__SHIFT 0x0
+#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE__SHIFT 0x1
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE__SHIFT 0x3
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER__SHIFT 0x4
+#define CKSVII2C1_IC_CON__IC1_RESTART_EN__SHIFT 0x5
+#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE__SHIFT 0x6
+#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED__SHIFT 0x7
+#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL__SHIFT 0x8
+#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL__SHIFT 0x9
+#define CKSVII2C1_IC_CON__BUS_CLEAR_FEATURE_CTRL1__SHIFT 0xb
+#define CKSVII2C1_IC_CON__IC1_MASTER_MODE_MASK 0x00000001L
+#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE_MASK 0x00000006L
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE_MASK 0x00000008L
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER_MASK 0x00000010L
+#define CKSVII2C1_IC_CON__IC1_RESTART_EN_MASK 0x00000020L
+#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE_MASK 0x00000040L
+#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED_MASK 0x00000080L
+#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL_MASK 0x00000100L
+#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL_MASK 0x00000200L
+//CKSVII2C1_IC_TAR
+#define CKSVII2C1_IC_TAR__IC1_TAR__SHIFT 0x0
+#define CKSVII2C1_IC_TAR__GC1_OR_START__SHIFT 0xa
+#define CKSVII2C1_IC_TAR__SPECIAL1__SHIFT 0xb
+#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER__SHIFT 0xc
+#define CKSVII2C1_IC_TAR__IC1_TAR_MASK 0x000003FFL
+#define CKSVII2C1_IC_TAR__GC1_OR_START_MASK 0x00000400L
+#define CKSVII2C1_IC_TAR__SPECIAL1_MASK 0x00000800L
+#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER_MASK 0x00001000L
+//CKSVII2C1_IC_SAR
+#define CKSVII2C1_IC_SAR__IC1_SAR__SHIFT 0x0
+#define CKSVII2C1_IC_SAR__IC1_SAR_MASK 0x000003FFL
+//CKSVII2C1_IC_HS_MADDR
+#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR__SHIFT 0x0
+#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR_MASK 0x00000007L
+//CKSVII2C1_IC_DATA_CMD
+#define CKSVII2C1_IC_DATA_CMD__DAT1__SHIFT 0x0
+#define CKSVII2C1_IC_DATA_CMD__CMD1__SHIFT 0x8
+#define CKSVII2C1_IC_DATA_CMD__STOP1__SHIFT 0x9
+#define CKSVII2C1_IC_DATA_CMD__RESTART1__SHIFT 0xa
+#define CKSVII2C1_IC_DATA_CMD__FIRST1_DATA_BYTE__SHIFT 0xb
+#define CKSVII2C1_IC_DATA_CMD__DAT1_MASK 0x000000FFL
+#define CKSVII2C1_IC_DATA_CMD__CMD1_MASK 0x00000100L
+#define CKSVII2C1_IC_DATA_CMD__STOP1_MASK 0x00000200L
+#define CKSVII2C1_IC_DATA_CMD__RESTART1_MASK 0x00000400L
+//CKSVII2C1_IC_SS_SCL_HCNT
+#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_SS_SCL_LCNT
+#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_FS_SCL_HCNT
+#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_FS_SCL_LCNT
+#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_HS_SCL_HCNT
+#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_HS_SCL_LCNT
+#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_INTR_STAT
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER__SHIFT 0x0
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER__SHIFT 0x1
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL__SHIFT 0x2
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER__SHIFT 0x3
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY__SHIFT 0x4
+#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ__SHIFT 0x5
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT__SHIFT 0x6
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE__SHIFT 0x7
+#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY__SHIFT 0x8
+#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET__SHIFT 0x9
+#define CKSVII2C1_IC_INTR_STAT__R1_START_DET__SHIFT 0xa
+#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL__SHIFT 0xb
+#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET__SHIFT 0xc
+#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD__SHIFT 0xd
+#define CKSVII2C1_IC_INTR_STAT__R1_SCL_STUCK_AT_LOW__SHIFT 0xe
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER_MASK 0x00000001L
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER_MASK 0x00000002L
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL_MASK 0x00000004L
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER_MASK 0x00000008L
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY_MASK 0x00000010L
+#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ_MASK 0x00000020L
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT_MASK 0x00000040L
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE_MASK 0x00000080L
+#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY_MASK 0x00000100L
+#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET_MASK 0x00000200L
+#define CKSVII2C1_IC_INTR_STAT__R1_START_DET_MASK 0x00000400L
+#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL_MASK 0x00000800L
+#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET_MASK 0x00001000L
+#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD_MASK 0x00002000L
+//CKSVII2C1_IC_INTR_MASK
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER__SHIFT 0x0
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER__SHIFT 0x1
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL__SHIFT 0x2
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER__SHIFT 0x3
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY__SHIFT 0x4
+#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ__SHIFT 0x5
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT__SHIFT 0x6
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE__SHIFT 0x7
+#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY__SHIFT 0x8
+#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET__SHIFT 0x9
+#define CKSVII2C1_IC_INTR_MASK__M1_START_DET__SHIFT 0xa
+#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL__SHIFT 0xb
+#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET__SHIFT 0xc
+#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD__SHIFT 0xd
+#define CKSVII2C1_IC_INTR_MASK__M1_SCL_STUCK_AT_LOW__SHIFT 0xe
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER_MASK 0x00000001L
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER_MASK 0x00000002L
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL_MASK 0x00000004L
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER_MASK 0x00000008L
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY_MASK 0x00000010L
+#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ_MASK 0x00000020L
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT_MASK 0x00000040L
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE_MASK 0x00000080L
+#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY_MASK 0x00000100L
+#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET_MASK 0x00000200L
+#define CKSVII2C1_IC_INTR_MASK__M1_START_DET_MASK 0x00000400L
+#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL_MASK 0x00000800L
+#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET_MASK 0x00001000L
+#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD_MASK 0x00002000L
+//CKSVII2C1_IC_RAW_INTR_STAT
+//CKSVII2C1_IC_RX_TL
+#define CKSVII2C1_IC_RX_TL__RX1_TL__SHIFT 0x0
+//CKSVII2C1_IC_TX_TL
+#define CKSVII2C1_IC_TX_TL__TX1_TL__SHIFT 0x0
+//CKSVII2C1_IC_CLR_INTR
+//CKSVII2C1_IC_CLR_RX_UNDER
+//CKSVII2C1_IC_CLR_RX_OVER
+//CKSVII2C1_IC_CLR_TX_OVER
+//CKSVII2C1_IC_CLR_RD_REQ
+//CKSVII2C1_IC_CLR_TX_ABRT
+//CKSVII2C1_IC_CLR_RX_DONE
+//CKSVII2C1_IC_CLR_ACTIVITY
+//CKSVII2C1_IC_CLR_STOP_DET
+//CKSVII2C1_IC_CLR_START_DET
+//CKSVII2C1_IC_CLR_GEN_CALL
+//CKSVII2C1_IC_ENABLE
+#define CKSVII2C1_IC_ENABLE__ENABLE1__SHIFT 0x0
+#define CKSVII2C1_IC_ENABLE__ABORT1__SHIFT 0x1
+#define CKSVII2C1_IC_ENABLE__TX1_CMD_BLOCK__SHIFT 0x2
+#define CKSVII2C1_IC_ENABLE__SDA1_STUCK_RECOVERY_ENABLE__SHIFT 0x3
+#define CKSVII2C1_IC_ENABLE__ENABLE1_MASK 0x00000001L
+#define CKSVII2C1_IC_ENABLE__ABORT1_MASK 0x00000002L
+//CKSVII2C1_IC_STATUS
+#define CKSVII2C1_IC_STATUS__ACTIVITY1__SHIFT 0x0
+#define CKSVII2C1_IC_STATUS__TFNF1__SHIFT 0x1
+#define CKSVII2C1_IC_STATUS__TFE1__SHIFT 0x2
+#define CKSVII2C1_IC_STATUS__RFNE1__SHIFT 0x3
+#define CKSVII2C1_IC_STATUS__RFF1__SHIFT 0x4
+#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY__SHIFT 0x5
+#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY__SHIFT 0x6
+#define CKSVII2C1_IC_STATUS__MST1_HOLD_TX_FIFO_EMPTY__SHIFT 0x7
+#define CKSVII2C1_IC_STATUS__MST1_HOLD_RX_FIFO_FULL__SHIFT 0x8
+#define CKSVII2C1_IC_STATUS__SLV1_HOLD_TX_FIFO_EMPTY__SHIFT 0x9
+#define CKSVII2C1_IC_STATUS__SLV1_HOLD_RX_FIFO_FULL__SHIFT 0xa
+#define CKSVII2C1_IC_STATUS__SDA1_STUCK_NOT_RECOVERED__SHIFT 0xb
+#define CKSVII2C1_IC_STATUS__ACTIVITY1_MASK 0x00000001L
+#define CKSVII2C1_IC_STATUS__TFNF1_MASK 0x00000002L
+#define CKSVII2C1_IC_STATUS__TFE1_MASK 0x00000004L
+#define CKSVII2C1_IC_STATUS__RFNE1_MASK 0x00000008L
+#define CKSVII2C1_IC_STATUS__RFF1_MASK 0x00000010L
+#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY_MASK 0x00000020L
+#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY_MASK 0x00000040L
+//CKSVII2C1_IC_TXFLR
+#define CKSVII2C1_IC_TXFLR__TXFLR1__SHIFT 0x0
+//CKSVII2C1_IC_RXFLR
+#define CKSVII2C1_IC_RXFLR__RXFLR1__SHIFT 0x0
+//CKSVII2C1_IC_SDA_HOLD
+#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_TX_HOLD__SHIFT 0x0
+#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_RX_HOLD__SHIFT 0x10
+//CKSVII2C1_IC_TX_ABRT_SOURCE
+//CKSVII2C1_IC_SLV_DATA_NACK_ONLY
+//CKSVII2C1_IC_DMA_CR
+//CKSVII2C1_IC_DMA_TDLR
+//CKSVII2C1_IC_DMA_RDLR
+//CKSVII2C1_IC_SDA_SETUP
+#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP__SHIFT 0x0
+#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP_MASK 0x000000FFL
+//CKSVII2C1_IC_ACK_GENERAL_CALL
+#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL__SHIFT 0x0
+#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL_MASK 0x00000001L
+//CKSVII2C1_IC_ENABLE_STATUS
+#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN__SHIFT 0x0
+#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_DISABLED_WHILE_BUSY__SHIFT 0x1
+#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_RX_DATA_LOST__SHIFT 0x2
+#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN_MASK 0x00000001L
+//CKSVII2C1_IC_FS_SPKLEN
+#define CKSVII2C1_IC_FS_SPKLEN__FS1_SPKLEN__SHIFT 0x0
+//CKSVII2C1_IC_HS_SPKLEN
+#define CKSVII2C1_IC_HS_SPKLEN__HS1_SPKLEN__SHIFT 0x0
+//CKSVII2C1_IC_CLR_RESTART_DET
+//CKSVII2C1_IC_COMP_PARAM_1
+#define CKSVII2C1_IC_COMP_PARAM_1__APB1_DATA_WIDTH__SHIFT 0x0
+#define CKSVII2C1_IC_COMP_PARAM_1__MAX1_SPEED_MODE__SHIFT 0x2
+#define CKSVII2C1_IC_COMP_PARAM_1__HC1_COUNT_VALUES__SHIFT 0x4
+#define CKSVII2C1_IC_COMP_PARAM_1__INTR1_IO__SHIFT 0x5
+#define CKSVII2C1_IC_COMP_PARAM_1__HAS1_DMA__SHIFT 0x6
+#define CKSVII2C1_IC_COMP_PARAM_1__ADD1_ENCODED_PARAMS__SHIFT 0x7
+#define CKSVII2C1_IC_COMP_PARAM_1__RX1_BUFFER_DEPTH__SHIFT 0x8
+#define CKSVII2C1_IC_COMP_PARAM_1__TX1_BUFFER_DEPTH__SHIFT 0x10
+//CKSVII2C1_IC_COMP_VERSION
+#define CKSVII2C1_IC_COMP_VERSION__COMP1_VERSION__SHIFT 0x0
+//CKSVII2C1_IC_COMP_TYPE
+#define CKSVII2C1_IC_COMP_TYPE__COMP1_TYPE__SHIFT 0x0
+//SMUIO_PWRMGT
+#define SMUIO_PWRMGT__i2c_clk_gate_en__SHIFT 0x0
+#define SMUIO_PWRMGT__i2c1_clk_gate_en__SHIFT 0x4
+#define SMUIO_PWRMGT__i2c_clk_gate_en_MASK 0x00000001L
+#define SMUIO_PWRMGT__i2c1_clk_gate_en_MASK 0x00000010L
+
+
+// addressBlock: smuio_smuio_rom_SmuSmuioDec
+//ROM_CNTL
+#define ROM_CNTL__CLOCK_GATING_EN__SHIFT 0x0
+#define ROM_CNTL__READ_MODE__SHIFT 0x1
+#define ROM_CNTL__READ_MODE_OVERRIDE__SHIFT 0x3
+#define ROM_CNTL__SPI_TIMING_RELAX_SCK__SHIFT 0x4
+#define ROM_CNTL__SPI_TIMING_RELAX_SCK_OVERRIDE__SHIFT 0x5
+#define ROM_CNTL__FOUR_BYTE_ADDRESS_MODE__SHIFT 0x6
+#define ROM_CNTL__DUMMY_CYCLE_NUM__SHIFT 0x8
+#define ROM_CNTL__SPI_TIMING_RELAX__SHIFT 0x13
+#define ROM_CNTL__SPI_TIMING_RELAX_OVERRIDE__SHIFT 0x14
+#define ROM_CNTL__SPI_FAST_MODE__SHIFT 0x15
+#define ROM_CNTL__SPI_FAST_MODE_OVERRIDE__SHIFT 0x16
+#define ROM_CNTL__SCK_PRESCALE_REFCLK__SHIFT 0x17
+#define ROM_CNTL__SCK_PRESCALE_REFCLK_OVERRIDE__SHIFT 0x1c
+#define ROM_CNTL__ROM_INDEX_ADDRESS_AUTO_INCREASE__SHIFT 0x1d
+#define ROM_CNTL__PAD_SAMPLE_MODE__SHIFT 0x1e
+#define ROM_CNTL__PAD_SAMPLE_MODE_OVERRIDE__SHIFT 0x1f
+#define ROM_CNTL__CLOCK_GATING_EN_MASK 0x00000001L
+#define ROM_CNTL__SPI_TIMING_RELAX_MASK 0x00080000L
+#define ROM_CNTL__SPI_TIMING_RELAX_OVERRIDE_MASK 0x00100000L
+#define ROM_CNTL__SPI_FAST_MODE_MASK 0x00200000L
+#define ROM_CNTL__SPI_FAST_MODE_OVERRIDE_MASK 0x00400000L
+#define ROM_CNTL__SCK_PRESCALE_REFCLK_MASK 0x0F800000L
+#define ROM_CNTL__SCK_PRESCALE_REFCLK_OVERRIDE_MASK 0x10000000L
+//PAGE_MIRROR_CNTL
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_BASE_ADDR__SHIFT 0x0
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_ENABLE__SHIFT 0x19
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_USAGE__SHIFT 0x1a
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_INVALIDATE__SHIFT 0x1c
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_BASE_ADDR_MASK 0x01FFFFFFL
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_ENABLE_MASK 0x02000000L
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_USAGE_MASK 0x0C000000L
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_INVALIDATE_MASK 0x10000000L
+//ROM_STATUS
+#define ROM_STATUS__ROM_BUSY__SHIFT 0x0
+#define ROM_STATUS__ROM_BUSY_MASK 0x00000001L
+//CGTT_ROM_CLK_CTRL0
+#define CGTT_ROM_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_ROM_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
+#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000L
+//ROM_INDEX
+#define ROM_INDEX__ROM_INDEX__SHIFT 0x0
+#define ROM_INDEX__ROM_INDEX_MASK 0x01FFFFFFL
+//ROM_DATA
+#define ROM_DATA__ROM_DATA__SHIFT 0x0
+#define ROM_DATA__ROM_DATA_MASK 0xFFFFFFFFL
+//ROM_START
+#define ROM_START__ROM_START__SHIFT 0x0
+#define ROM_START__ROM_START_MASK 0x01FFFFFFL
+//ROM_SW_CNTL
+#define ROM_SW_CNTL__DATA_SIZE__SHIFT 0x0
+#define ROM_SW_CNTL__COMMAND_SIZE__SHIFT 0x10
+#define ROM_SW_CNTL__ROM_SW_RETURN_DATA_ENABLE__SHIFT 0x13
+#define ROM_SW_CNTL__DATA_SIZE_MASK 0x0000FFFFL
+#define ROM_SW_CNTL__COMMAND_SIZE_MASK 0x00070000L
+#define ROM_SW_CNTL__ROM_SW_RETURN_DATA_ENABLE_MASK 0x00080000L
+//ROM_SW_STATUS
+#define ROM_SW_STATUS__ROM_SW_DONE__SHIFT 0x0
+#define ROM_SW_STATUS__ROM_SW_DONE_MASK 0x00000001L
+//ROM_SW_COMMAND
+#define ROM_SW_COMMAND__ROM_SW_INSTRUCTION__SHIFT 0x0
+#define ROM_SW_COMMAND__ROM_SW_ADDRESS__SHIFT 0x8
+#define ROM_SW_COMMAND__ROM_SW_INSTRUCTION_MASK 0x000000FFL
+#define ROM_SW_COMMAND__ROM_SW_ADDRESS_MASK 0xFFFFFF00L
+//ROM_SW_DATA_1
+#define ROM_SW_DATA_1__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_1__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_2
+#define ROM_SW_DATA_2__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_2__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_3
+#define ROM_SW_DATA_3__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_3__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_4
+#define ROM_SW_DATA_4__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_4__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_5
+#define ROM_SW_DATA_5__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_5__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_6
+#define ROM_SW_DATA_6__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_6__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_7
+#define ROM_SW_DATA_7__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_7__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_8
+#define ROM_SW_DATA_8__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_8__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_9
+#define ROM_SW_DATA_9__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_9__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_10
+#define ROM_SW_DATA_10__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_10__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_11
+#define ROM_SW_DATA_11__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_11__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_12
+#define ROM_SW_DATA_12__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_12__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_13
+#define ROM_SW_DATA_13__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_13__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_14
+#define ROM_SW_DATA_14__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_14__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_15
+#define ROM_SW_DATA_15__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_15__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_16
+#define ROM_SW_DATA_16__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_16__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_17
+#define ROM_SW_DATA_17__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_17__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_18
+#define ROM_SW_DATA_18__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_18__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_19
+#define ROM_SW_DATA_19__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_19__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_20
+#define ROM_SW_DATA_20__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_20__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_21
+#define ROM_SW_DATA_21__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_21__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_22
+#define ROM_SW_DATA_22__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_22__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_23
+#define ROM_SW_DATA_23__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_23__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_24
+#define ROM_SW_DATA_24__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_24__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_25
+#define ROM_SW_DATA_25__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_25__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_26
+#define ROM_SW_DATA_26__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_26__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_27
+#define ROM_SW_DATA_27__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_27__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_28
+#define ROM_SW_DATA_28__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_28__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_29
+#define ROM_SW_DATA_29__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_29__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_30
+#define ROM_SW_DATA_30__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_30__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_31
+#define ROM_SW_DATA_31__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_31__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_32
+#define ROM_SW_DATA_32__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_32__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_33
+#define ROM_SW_DATA_33__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_33__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_34
+#define ROM_SW_DATA_34__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_34__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_35
+#define ROM_SW_DATA_35__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_35__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_36
+#define ROM_SW_DATA_36__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_36__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_37
+#define ROM_SW_DATA_37__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_37__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_38
+#define ROM_SW_DATA_38__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_38__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_39
+#define ROM_SW_DATA_39__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_39__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_40
+#define ROM_SW_DATA_40__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_40__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_41
+#define ROM_SW_DATA_41__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_41__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_42
+#define ROM_SW_DATA_42__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_42__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_43
+#define ROM_SW_DATA_43__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_43__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_44
+#define ROM_SW_DATA_44__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_44__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_45
+#define ROM_SW_DATA_45__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_45__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_46
+#define ROM_SW_DATA_46__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_46__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_47
+#define ROM_SW_DATA_47__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_47__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_48
+#define ROM_SW_DATA_48__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_48__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_49
+#define ROM_SW_DATA_49__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_49__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_50
+#define ROM_SW_DATA_50__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_50__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_51
+#define ROM_SW_DATA_51__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_51__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_52
+#define ROM_SW_DATA_52__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_52__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_53
+#define ROM_SW_DATA_53__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_53__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_54
+#define ROM_SW_DATA_54__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_54__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_55
+#define ROM_SW_DATA_55__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_55__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_56
+#define ROM_SW_DATA_56__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_56__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_57
+#define ROM_SW_DATA_57__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_57__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_58
+#define ROM_SW_DATA_58__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_58__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_59
+#define ROM_SW_DATA_59__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_59__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_60
+#define ROM_SW_DATA_60__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_60__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_61
+#define ROM_SW_DATA_61__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_61__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_62
+#define ROM_SW_DATA_62__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_62__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_63
+#define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_63__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_64
+#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: smuio_smuio_gpio_SmuSmuioDec
+//SMU_GPIOPAD_SW_INT_STAT
+#define SMU_GPIOPAD_SW_INT_STAT__SW_INT_STAT__SHIFT 0x0
+#define SMU_GPIOPAD_SW_INT_STAT__SW_INT_STAT_MASK 0x00000001L
+//SMU_GPIOPAD_MASK
+#define SMU_GPIOPAD_MASK__GPIO_MASK__SHIFT 0x0
+#define SMU_GPIOPAD_MASK__GPIO_MASK_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_A
+#define SMU_GPIOPAD_A__GPIO_A__SHIFT 0x0
+#define SMU_GPIOPAD_A__GPIO_A_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_TXIMPSEL
+#define SMU_GPIOPAD_TXIMPSEL__GPIO_TXIMPSEL__SHIFT 0x0
+#define SMU_GPIOPAD_TXIMPSEL__GPIO_TXIMPSEL_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_EN
+#define SMU_GPIOPAD_EN__GPIO_EN__SHIFT 0x0
+#define SMU_GPIOPAD_EN__GPIO_EN_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_Y
+#define SMU_GPIOPAD_Y__GPIO_Y__SHIFT 0x0
+#define SMU_GPIOPAD_Y__GPIO_Y_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_RXEN
+#define SMU_GPIOPAD_RXEN__GPIO_RXEN__SHIFT 0x0
+#define SMU_GPIOPAD_RXEN__GPIO_RXEN_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_RCVR_SEL0
+#define SMU_GPIOPAD_RCVR_SEL0__GPIO_RCVR_SEL0__SHIFT 0x0
+#define SMU_GPIOPAD_RCVR_SEL0__GPIO_RCVR_SEL0_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_RCVR_SEL1
+#define SMU_GPIOPAD_RCVR_SEL1__GPIO_RCVR_SEL1__SHIFT 0x0
+#define SMU_GPIOPAD_RCVR_SEL1__GPIO_RCVR_SEL1_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_PU_EN
+#define SMU_GPIOPAD_PU_EN__GPIO_PU_EN__SHIFT 0x0
+#define SMU_GPIOPAD_PU_EN__GPIO_PU_EN_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_PD_EN
+#define SMU_GPIOPAD_PD_EN__GPIO_PD_EN__SHIFT 0x0
+#define SMU_GPIOPAD_PD_EN__GPIO_PD_EN_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_PINSTRAPS
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0__SHIFT 0x0
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1__SHIFT 0x1
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2__SHIFT 0x2
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3__SHIFT 0x3
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4__SHIFT 0x4
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5__SHIFT 0x5
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6__SHIFT 0x6
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7__SHIFT 0x7
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8__SHIFT 0x8
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9__SHIFT 0x9
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10__SHIFT 0xa
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11__SHIFT 0xb
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12__SHIFT 0xc
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13__SHIFT 0xd
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14__SHIFT 0xe
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15__SHIFT 0xf
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16__SHIFT 0x10
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17__SHIFT 0x11
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18__SHIFT 0x12
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19__SHIFT 0x13
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20__SHIFT 0x14
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21__SHIFT 0x15
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22__SHIFT 0x16
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23__SHIFT 0x17
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24__SHIFT 0x18
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25__SHIFT 0x19
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26__SHIFT 0x1a
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27__SHIFT 0x1b
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28__SHIFT 0x1c
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29__SHIFT 0x1d
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30__SHIFT 0x1e
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0_MASK 0x00000001L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1_MASK 0x00000002L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2_MASK 0x00000004L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3_MASK 0x00000008L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4_MASK 0x00000010L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5_MASK 0x00000020L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6_MASK 0x00000040L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7_MASK 0x00000080L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8_MASK 0x00000100L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9_MASK 0x00000200L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10_MASK 0x00000400L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11_MASK 0x00000800L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12_MASK 0x00001000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13_MASK 0x00002000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14_MASK 0x00004000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15_MASK 0x00008000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16_MASK 0x00010000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17_MASK 0x00020000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18_MASK 0x00040000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19_MASK 0x00080000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20_MASK 0x00100000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21_MASK 0x00200000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22_MASK 0x00400000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23_MASK 0x00800000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24_MASK 0x01000000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25_MASK 0x02000000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26_MASK 0x04000000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27_MASK 0x08000000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28_MASK 0x10000000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29_MASK 0x20000000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30_MASK 0x40000000L
+//DFT_PINSTRAPS
+#define DFT_PINSTRAPS__DFT_PINSTRAPS__SHIFT 0x0
+#define DFT_PINSTRAPS__DFT_PINSTRAPS_MASK 0x000000FFL
+//SMU_GPIOPAD_INT_STAT_EN
+#define SMU_GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN__SHIFT 0x0
+#define SMU_GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN__SHIFT 0x1f
+#define SMU_GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN_MASK 0x1FFFFFFFL
+#define SMU_GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN_MASK 0x80000000L
+//SMU_GPIOPAD_INT_STAT
+#define SMU_GPIOPAD_INT_STAT__GPIO_INT_STAT__SHIFT 0x0
+#define SMU_GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT__SHIFT 0x1f
+#define SMU_GPIOPAD_INT_STAT__GPIO_INT_STAT_MASK 0x1FFFFFFFL
+#define SMU_GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT_MASK 0x80000000L
+//SMU_GPIOPAD_INT_STAT_AK
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0__SHIFT 0x0
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1__SHIFT 0x1
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2__SHIFT 0x2
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3__SHIFT 0x3
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4__SHIFT 0x4
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5__SHIFT 0x5
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6__SHIFT 0x6
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7__SHIFT 0x7
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8__SHIFT 0x8
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9__SHIFT 0x9
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10__SHIFT 0xa
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11__SHIFT 0xb
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12__SHIFT 0xc
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13__SHIFT 0xd
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14__SHIFT 0xe
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15__SHIFT 0xf
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16__SHIFT 0x10
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17__SHIFT 0x11
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18__SHIFT 0x12
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19__SHIFT 0x13
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20__SHIFT 0x14
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21__SHIFT 0x15
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22__SHIFT 0x16
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23__SHIFT 0x17
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24__SHIFT 0x18
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25__SHIFT 0x19
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26__SHIFT 0x1a
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27__SHIFT 0x1b
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28__SHIFT 0x1c
+#define SMU_GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK__SHIFT 0x1f
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0_MASK 0x00000001L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1_MASK 0x00000002L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2_MASK 0x00000004L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3_MASK 0x00000008L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4_MASK 0x00000010L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5_MASK 0x00000020L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6_MASK 0x00000040L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7_MASK 0x00000080L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8_MASK 0x00000100L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9_MASK 0x00000200L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10_MASK 0x00000400L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11_MASK 0x00000800L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12_MASK 0x00001000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13_MASK 0x00002000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14_MASK 0x00004000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15_MASK 0x00008000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16_MASK 0x00010000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17_MASK 0x00020000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18_MASK 0x00040000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19_MASK 0x00080000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20_MASK 0x00100000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21_MASK 0x00200000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22_MASK 0x00400000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23_MASK 0x00800000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24_MASK 0x01000000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25_MASK 0x02000000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26_MASK 0x04000000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27_MASK 0x08000000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28_MASK 0x10000000L
+#define SMU_GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK_MASK 0x80000000L
+//SMU_GPIOPAD_INT_EN
+#define SMU_GPIOPAD_INT_EN__GPIO_INT_EN__SHIFT 0x0
+#define SMU_GPIOPAD_INT_EN__SW_INITIATED_INT_EN__SHIFT 0x1f
+#define SMU_GPIOPAD_INT_EN__GPIO_INT_EN_MASK 0x1FFFFFFFL
+#define SMU_GPIOPAD_INT_EN__SW_INITIATED_INT_EN_MASK 0x80000000L
+//SMU_GPIOPAD_INT_TYPE
+#define SMU_GPIOPAD_INT_TYPE__GPIO_INT_TYPE__SHIFT 0x0
+#define SMU_GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE__SHIFT 0x1f
+#define SMU_GPIOPAD_INT_TYPE__GPIO_INT_TYPE_MASK 0x1FFFFFFFL
+#define SMU_GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE_MASK 0x80000000L
+//SMU_GPIOPAD_INT_POLARITY
+#define SMU_GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY__SHIFT 0x0
+#define SMU_GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY__SHIFT 0x1f
+#define SMU_GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY_MASK 0x1FFFFFFFL
+#define SMU_GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY_MASK 0x80000000L
+//SMUIO_PCC_GPIO_SELECT
+#define SMUIO_PCC_GPIO_SELECT__GPIO__SHIFT 0x0
+#define SMUIO_PCC_GPIO_SELECT__GPIO_MASK 0xFFFFFFFFL
+//SMU_GPIOPAD_S0
+#define SMU_GPIOPAD_S0__GPIO_S0__SHIFT 0x0
+#define SMU_GPIOPAD_S0__GPIO_S0_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_S1
+#define SMU_GPIOPAD_S1__GPIO_S1__SHIFT 0x0
+#define SMU_GPIOPAD_S1__GPIO_S1_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_SCHMEN
+#define SMU_GPIOPAD_SCHMEN__GPIO_SCHMEN__SHIFT 0x0
+#define SMU_GPIOPAD_SCHMEN__GPIO_SCHMEN_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_SCL_EN
+#define SMU_GPIOPAD_SCL_EN__GPIO_SCL_EN__SHIFT 0x0
+#define SMU_GPIOPAD_SCL_EN__GPIO_SCL_EN_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_SDA_EN
+#define SMU_GPIOPAD_SDA_EN__GPIO_SDA_EN__SHIFT 0x0
+#define SMU_GPIOPAD_SDA_EN__GPIO_SDA_EN_MASK 0x7FFFFFFFL
+//SMUIO_GPIO_INT0_SELECT
+#define SMUIO_GPIO_INT0_SELECT__GPIO_INT0_SELECT__SHIFT 0x0
+#define SMUIO_GPIO_INT0_SELECT__GPIO_INT0_SELECT_MASK 0xFFFFFFFFL
+//SMUIO_GPIO_INT1_SELECT
+#define SMUIO_GPIO_INT1_SELECT__GPIO_INT1_SELECT__SHIFT 0x0
+#define SMUIO_GPIO_INT1_SELECT__GPIO_INT1_SELECT_MASK 0xFFFFFFFFL
+//SMUIO_GPIO_INT2_SELECT
+#define SMUIO_GPIO_INT2_SELECT__GPIO_INT2_SELECT__SHIFT 0x0
+#define SMUIO_GPIO_INT2_SELECT__GPIO_INT2_SELECT_MASK 0xFFFFFFFFL
+//SMUIO_GPIO_INT3_SELECT
+#define SMUIO_GPIO_INT3_SELECT__GPIO_INT3_SELECT__SHIFT 0x0
+#define SMUIO_GPIO_INT3_SELECT__GPIO_INT3_SELECT_MASK 0xFFFFFFFFL
+//SMU_GPIOPAD_MP_INT0_STAT
+#define SMU_GPIOPAD_MP_INT0_STAT__GPIO_MP_INT0_STAT__SHIFT 0x0
+#define SMU_GPIOPAD_MP_INT0_STAT__GPIO_MP_INT0_STAT_MASK 0x1FFFFFFFL
+//SMU_GPIOPAD_MP_INT1_STAT
+#define SMU_GPIOPAD_MP_INT1_STAT__GPIO_MP_INT1_STAT__SHIFT 0x0
+#define SMU_GPIOPAD_MP_INT1_STAT__GPIO_MP_INT1_STAT_MASK 0x1FFFFFFFL
+//SMU_GPIOPAD_MP_INT2_STAT
+#define SMU_GPIOPAD_MP_INT2_STAT__GPIO_MP_INT2_STAT__SHIFT 0x0
+#define SMU_GPIOPAD_MP_INT2_STAT__GPIO_MP_INT2_STAT_MASK 0x1FFFFFFFL
+//SMU_GPIOPAD_MP_INT3_STAT
+#define SMU_GPIOPAD_MP_INT3_STAT__GPIO_MP_INT3_STAT__SHIFT 0x0
+#define SMU_GPIOPAD_MP_INT3_STAT__GPIO_MP_INT3_STAT_MASK 0x1FFFFFFFL
+//SMIO_INDEX
+#define SMIO_INDEX__SW_SMIO_INDEX__SHIFT 0x0
+#define SMIO_INDEX__SW_SMIO_INDEX_MASK 0x00000001L
+//S0_VID_SMIO_CNTL
+#define S0_VID_SMIO_CNTL__S0_SMIO_VALUES__SHIFT 0x0
+#define S0_VID_SMIO_CNTL__S0_SMIO_VALUES_MASK 0xFFFFFFFFL
+//S1_VID_SMIO_CNTL
+#define S1_VID_SMIO_CNTL__S1_SMIO_VALUES__SHIFT 0x0
+#define S1_VID_SMIO_CNTL__S1_SMIO_VALUES_MASK 0xFFFFFFFFL
+//OPEN_DRAIN_SELECT
+#define OPEN_DRAIN_SELECT__OPEN_DRAIN_SELECT__SHIFT 0x0
+#define OPEN_DRAIN_SELECT__RESERVED__SHIFT 0x1f
+#define OPEN_DRAIN_SELECT__OPEN_DRAIN_SELECT_MASK 0x7FFFFFFFL
+#define OPEN_DRAIN_SELECT__RESERVED_MASK 0x80000000L
+//SMIO_ENABLE
+#define SMIO_ENABLE__SMIO_ENABLE__SHIFT 0x0
+#define SMIO_ENABLE__SMIO_ENABLE_MASK 0xFFFFFFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 32054ecf0b87..805c9d37a2b4 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -150,6 +150,7 @@ enum amd_pp_sensors {
AMDGPU_PP_SENSOR_VCN_POWER_STATE,
AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK,
AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK,
+ AMDGPU_PP_SENSOR_VCN_LOAD,
};
enum amd_pp_task {
@@ -420,7 +421,7 @@ struct amd_pm_funcs {
int (*set_hard_min_dcefclk_by_freq)(void *handle, uint32_t clock);
int (*set_hard_min_fclk_by_freq)(void *handle, uint32_t clock);
int (*set_min_deep_sleep_dcefclk)(void *handle, uint32_t clock);
- bool (*get_asic_baco_capability)(void *handle);
+ int (*get_asic_baco_capability)(void *handle);
int (*get_asic_baco_state)(void *handle, int *state);
int (*set_asic_baco_state)(void *handle, int state);
int (*get_ppfeature_status)(void *handle, char *buf);
diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
index ec5b9ab67c5e..b72d5d362251 100644
--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
@@ -61,6 +61,7 @@ enum MES_SCH_API_OPCODE {
MES_SCH_API_MISC = 14,
MES_SCH_API_UPDATE_ROOT_PAGE_TABLE = 15,
MES_SCH_API_AMD_LOG = 16,
+ MES_SCH_API_SET_HW_RSRC_1 = 19,
MES_SCH_API_MAX = 0xFF
};
@@ -238,6 +239,26 @@ union MESAPI_SET_HW_RESOURCES {
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
};
+union MESAPI_SET_HW_RESOURCES_1 {
+ struct {
+ union MES_API_HEADER header;
+ struct MES_API_STATUS api_status;
+ uint64_t timestamp;
+ union {
+ struct {
+ uint32_t enable_mes_info_ctx : 1;
+ uint32_t reserved : 31;
+ };
+ uint32_t uint32_all;
+ };
+ uint64_t mes_info_ctx_mc_addr;
+ uint32_t mes_info_ctx_size;
+ uint32_t mes_kiq_unmap_timeout; // unit is 100ms
+ };
+
+ uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
+};
+
union MESAPI__ADD_QUEUE {
struct {
union MES_API_HEADER header;
@@ -278,10 +299,21 @@ union MESAPI__ADD_QUEUE {
uint32_t skip_process_ctx_clear : 1;
uint32_t map_legacy_kq : 1;
uint32_t exclusively_scheduled : 1;
- uint32_t reserved : 17;
+ uint32_t is_long_running : 1;
+ uint32_t is_dwm_queue : 1;
+ uint32_t is_video_blit_queue : 1;
+ uint32_t reserved : 14;
};
- struct MES_API_STATUS api_status;
- uint64_t tma_addr;
+ struct MES_API_STATUS api_status;
+ uint64_t tma_addr;
+ uint32_t sch_id;
+ uint64_t timestamp;
+ uint32_t process_context_array_index;
+ uint32_t gang_context_array_index;
+ uint32_t pipe_id;
+ uint32_t queue_id;
+ uint32_t alignment_mode_setting;
+ uint64_t unmap_flag_addr;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
diff --git a/drivers/gpu/drm/amd/include/umsch_mm_4_0_api_def.h b/drivers/gpu/drm/amd/include/umsch_mm_4_0_api_def.h
index beadb9e42850..ca83e9e5c3ff 100644
--- a/drivers/gpu/drm/amd/include/umsch_mm_4_0_api_def.h
+++ b/drivers/gpu/drm/amd/include/umsch_mm_4_0_api_def.h
@@ -234,7 +234,8 @@ union UMSCHAPI__SET_HW_RESOURCES {
uint32_t enable_level_process_quantum_check : 1;
uint32_t is_vcn0_enabled : 1;
uint32_t is_vcn1_enabled : 1;
- uint32_t reserved : 27;
+ uint32_t use_rs64mem_for_proc_ctx_csa : 1;
+ uint32_t reserved : 26;
};
uint32_t uint32_all;
};
@@ -297,9 +298,12 @@ union UMSCHAPI__ADD_QUEUE {
struct {
uint32_t is_context_suspended : 1;
- uint32_t reserved : 31;
+ uint32_t collaboration_mode : 1;
+ uint32_t reserved : 30;
};
struct UMSCH_API_STATUS api_status;
+ uint32_t process_csa_array_index;
+ uint32_t context_csa_array_index;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
@@ -314,6 +318,7 @@ union UMSCHAPI__REMOVE_QUEUE {
uint64_t context_csa_addr;
struct UMSCH_API_STATUS api_status;
+ uint32_t context_csa_array_index;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
@@ -337,6 +342,7 @@ union UMSCHAPI__SUSPEND {
uint32_t suspend_fence_value;
struct UMSCH_API_STATUS api_status;
+ uint32_t context_csa_array_index;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
@@ -356,6 +362,7 @@ union UMSCHAPI__RESUME {
enum UMSCH_ENGINE_TYPE engine_type;
struct UMSCH_API_STATUS api_status;
+ uint32_t context_csa_array_index;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
@@ -404,6 +411,7 @@ union UMSCHAPI__UPDATE_AFFINITY {
union UMSCH_AFFINITY affinity;
uint64_t context_csa_addr;
struct UMSCH_API_STATUS api_status;
+ uint32_t context_csa_array_index;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
@@ -417,6 +425,7 @@ union UMSCHAPI__CHANGE_CONTEXT_PRIORITY_LEVEL {
uint64_t context_quantum;
uint64_t context_csa_addr;
struct UMSCH_API_STATUS api_status;
+ uint32_t context_csa_array_index;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index f84bfed50681..eee919577b44 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -199,14 +199,14 @@ int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
return ret;
}
-bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
+int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
- bool ret;
+ int ret;
if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
- return false;
+ return 0;
/* Don't use baco for reset in S3.
* This is a workaround for some platforms
* where entering BACO during suspend
@@ -217,7 +217,7 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
* devices. Needs more investigation.
*/
if (adev->in_s3)
- return false;
+ return 0;
mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index f09b9d49297e..c11952a4389b 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -38,6 +38,8 @@
#define MAX_NUM_OF_FEATURES_PER_SUBSET 8
#define MAX_NUM_OF_SUBSETS 8
+#define DEVICE_ATTR_IS(_name) (attr_id == device_attr_id__##_name)
+
struct od_attribute {
struct kobj_attribute attribute;
struct list_head entry;
@@ -1582,6 +1584,30 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
}
/**
+ * DOC: vcn_busy_percent
+ *
+ * The amdgpu driver provides a sysfs API for reading how busy the VCN
+ * is as a percentage. The file vcn_busy_percent is used for this.
+ * The SMU firmware computes a percentage of load based on the
+ * aggregate activity level in the IP cores.
+ */
+static ssize_t amdgpu_get_vcn_busy_percent(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ unsigned int value;
+ int r;
+
+ r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%d\n", value);
+}
+
+/**
* DOC: pcie_bw
*
* The amdgpu driver provides a sysfs API for estimating how much data
@@ -2091,6 +2117,99 @@ static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_
return 0;
}
+static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
+ uint32_t mask, enum amdgpu_device_attr_states *states)
+{
+ struct device_attribute *dev_attr = &attr->dev_attr;
+ enum amdgpu_device_attr_id attr_id = attr->attr_id;
+ uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
+ uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
+
+ *states = ATTR_STATE_SUPPORTED;
+
+ if (!(attr->flags & mask)) {
+ *states = ATTR_STATE_UNSUPPORTED;
+ return 0;
+ }
+
+ if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
+ if (gc_ver < IP_VERSION(9, 0, 0))
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
+ if (mp1_ver < IP_VERSION(10, 0, 0))
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
+ if (!(gc_ver == IP_VERSION(10, 3, 1) ||
+ gc_ver == IP_VERSION(10, 3, 3) ||
+ gc_ver == IP_VERSION(10, 3, 6) ||
+ gc_ver == IP_VERSION(10, 3, 7) ||
+ gc_ver == IP_VERSION(10, 3, 0) ||
+ gc_ver == IP_VERSION(10, 1, 2) ||
+ gc_ver == IP_VERSION(11, 0, 0) ||
+ gc_ver == IP_VERSION(11, 0, 1) ||
+ gc_ver == IP_VERSION(11, 0, 4) ||
+ gc_ver == IP_VERSION(11, 5, 0) ||
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3) ||
+ gc_ver == IP_VERSION(9, 4, 3)))
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
+ if (!((gc_ver == IP_VERSION(10, 3, 1) ||
+ gc_ver == IP_VERSION(10, 3, 0) ||
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
+ if (!(gc_ver == IP_VERSION(10, 3, 1) ||
+ gc_ver == IP_VERSION(10, 3, 3) ||
+ gc_ver == IP_VERSION(10, 3, 6) ||
+ gc_ver == IP_VERSION(10, 3, 7) ||
+ gc_ver == IP_VERSION(10, 3, 0) ||
+ gc_ver == IP_VERSION(10, 1, 2) ||
+ gc_ver == IP_VERSION(11, 0, 0) ||
+ gc_ver == IP_VERSION(11, 0, 1) ||
+ gc_ver == IP_VERSION(11, 0, 4) ||
+ gc_ver == IP_VERSION(11, 5, 0) ||
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3) ||
+ gc_ver == IP_VERSION(9, 4, 3)))
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
+ if (!((gc_ver == IP_VERSION(10, 3, 1) ||
+ gc_ver == IP_VERSION(10, 3, 0) ||
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
+ if (gc_ver == IP_VERSION(9, 4, 2) ||
+ gc_ver == IP_VERSION(9, 4, 3))
+ *states = ATTR_STATE_UNSUPPORTED;
+ }
+
+ switch (gc_ver) {
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ /* the Mi series card does not support standalone mclk/socclk/fclk level setting */
+ if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
+ DEVICE_ATTR_IS(pp_dpm_socclk) ||
+ DEVICE_ATTR_IS(pp_dpm_fclk)) {
+ dev_attr->attr.mode &= ~S_IWUGO;
+ dev_attr->store = NULL;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* setting should not be allowed from VF if not in one VF mode */
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_is_pp_one_vf(adev)) {
+ dev_attr->attr.mode &= ~S_IWUGO;
+ dev_attr->store = NULL;
+ }
+
+ return 0;
+}
+
/* Following items will be read out to indicate current plpd policy:
* - -1: none
* - 0: disallow
@@ -2162,17 +2281,26 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
.attr_update = pp_dpm_dcefclk_attr_update),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
@@ -2180,6 +2308,7 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
.attr_update = pp_od_clk_voltage_attr_update),
AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RO(vcn_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
@@ -2201,28 +2330,28 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
uint32_t mask, enum amdgpu_device_attr_states *states)
{
struct device_attribute *dev_attr = &attr->dev_attr;
- uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
+ enum amdgpu_device_attr_id attr_id = attr->attr_id;
uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
- const char *attr_name = dev_attr->attr.name;
if (!(attr->flags & mask)) {
*states = ATTR_STATE_UNSUPPORTED;
return 0;
}
-#define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name))
-
- if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
- if (gc_ver < IP_VERSION(9, 0, 0))
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
- if (mp1_ver < IP_VERSION(10, 0, 0))
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
+ if (DEVICE_ATTR_IS(mem_busy_percent)) {
if ((adev->flags & AMD_IS_APU &&
gc_ver != IP_VERSION(9, 4, 3)) ||
gc_ver == IP_VERSION(9, 0, 1))
*states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(vcn_busy_percent)) {
+ if (!(gc_ver == IP_VERSION(10, 3, 1) ||
+ gc_ver == IP_VERSION(10, 3, 3) ||
+ gc_ver == IP_VERSION(10, 3, 6) ||
+ gc_ver == IP_VERSION(10, 3, 7) ||
+ gc_ver == IP_VERSION(11, 0, 1) ||
+ gc_ver == IP_VERSION(11, 0, 4) ||
+ gc_ver == IP_VERSION(11, 5, 0)))
+ *states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pcie_bw)) {
/* PCIe Perf counters won't work on APU nodes */
if (adev->flags & AMD_IS_APU ||
@@ -2253,36 +2382,6 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
} else if (DEVICE_ATTR_IS(gpu_metrics)) {
if (gc_ver < IP_VERSION(9, 1, 0))
*states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
- if (!(gc_ver == IP_VERSION(10, 3, 1) ||
- gc_ver == IP_VERSION(10, 3, 0) ||
- gc_ver == IP_VERSION(10, 1, 2) ||
- gc_ver == IP_VERSION(11, 0, 0) ||
- gc_ver == IP_VERSION(11, 0, 2) ||
- gc_ver == IP_VERSION(11, 0, 3) ||
- gc_ver == IP_VERSION(9, 4, 3)))
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
- if (!((gc_ver == IP_VERSION(10, 3, 1) ||
- gc_ver == IP_VERSION(10, 3, 0) ||
- gc_ver == IP_VERSION(11, 0, 2) ||
- gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
- if (!(gc_ver == IP_VERSION(10, 3, 1) ||
- gc_ver == IP_VERSION(10, 3, 0) ||
- gc_ver == IP_VERSION(10, 1, 2) ||
- gc_ver == IP_VERSION(11, 0, 0) ||
- gc_ver == IP_VERSION(11, 0, 2) ||
- gc_ver == IP_VERSION(11, 0, 3) ||
- gc_ver == IP_VERSION(9, 4, 3)))
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
- if (!((gc_ver == IP_VERSION(10, 3, 1) ||
- gc_ver == IP_VERSION(10, 3, 0) ||
- gc_ver == IP_VERSION(11, 0, 2) ||
- gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
- *states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
*states = ATTR_STATE_UNSUPPORTED;
@@ -2304,23 +2403,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) ==
-EOPNOTSUPP)
*states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
- if (gc_ver == IP_VERSION(9, 4, 2) ||
- gc_ver == IP_VERSION(9, 4, 3))
- *states = ATTR_STATE_UNSUPPORTED;
}
switch (gc_ver) {
- case IP_VERSION(9, 4, 1):
- case IP_VERSION(9, 4, 2):
- /* the Mi series card does not support standalone mclk/socclk/fclk level setting */
- if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
- DEVICE_ATTR_IS(pp_dpm_socclk) ||
- DEVICE_ATTR_IS(pp_dpm_fclk)) {
- dev_attr->attr.mode &= ~S_IWUGO;
- dev_attr->store = NULL;
- }
- break;
case IP_VERSION(10, 3, 0):
if (DEVICE_ATTR_IS(power_dpm_force_performance_level) &&
amdgpu_sriov_vf(adev)) {
@@ -2332,14 +2417,6 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
break;
}
- /* setting should not be allowed from VF if not in one VF mode */
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
- dev_attr->attr.mode &= ~S_IWUGO;
- dev_attr->store = NULL;
- }
-
-#undef DEVICE_ATTR_IS
-
return 0;
}
@@ -4261,6 +4338,13 @@ static int amdgpu_od_set_init(struct amdgpu_device *adev)
}
}
+ /*
+ * If gpu_od is the only member in the list, that means gpu_od is an
+ * empty directory, so remove it.
+ */
+ if (list_is_singular(&adev->pm.od_kobj_list))
+ goto err_out;
+
return 0;
err_out:
@@ -4322,6 +4406,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
ret = amdgpu_od_set_init(adev);
if (ret)
goto err_out1;
+ } else if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) {
+ dev_info(adev->dev, "overdrive feature is not supported\n");
}
adev->pm.sysfs_initialized = true;
@@ -4429,6 +4515,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
/* MEM Load */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
seq_printf(m, "MEM Load: %u %%\n", value);
+ /* VCN Load */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_LOAD, (void *)&value, &size))
+ seq_printf(m, "VCN Load: %u %%\n", value);
seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 621200e0823f..501f8c726e8d 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -50,8 +50,12 @@ enum amdgpu_runpm_mode {
AMDGPU_RUNPM_PX,
AMDGPU_RUNPM_BOCO,
AMDGPU_RUNPM_BACO,
+ AMDGPU_RUNPM_BAMACO,
};
+#define BACO_SUPPORT (1<<0)
+#define MACO_SUPPORT (1<<1)
+
struct amdgpu_ps {
u32 caps; /* vbios flags */
u32 class; /* vbios flags */
@@ -407,7 +411,7 @@ int amdgpu_dpm_baco_reset(struct amdgpu_device *adev);
int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev);
int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev);
-bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev);
+int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev);
bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev);
int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h
index eec816f0cbf9..448ba3a14584 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h
@@ -43,8 +43,48 @@ enum amdgpu_device_attr_states {
ATTR_STATE_SUPPORTED,
};
+enum amdgpu_device_attr_id {
+ device_attr_id__unknown = -1,
+ device_attr_id__power_dpm_state = 0,
+ device_attr_id__power_dpm_force_performance_level,
+ device_attr_id__pp_num_states,
+ device_attr_id__pp_cur_state,
+ device_attr_id__pp_force_state,
+ device_attr_id__pp_table,
+ device_attr_id__pp_dpm_sclk,
+ device_attr_id__pp_dpm_mclk,
+ device_attr_id__pp_dpm_socclk,
+ device_attr_id__pp_dpm_fclk,
+ device_attr_id__pp_dpm_vclk,
+ device_attr_id__pp_dpm_vclk1,
+ device_attr_id__pp_dpm_dclk,
+ device_attr_id__pp_dpm_dclk1,
+ device_attr_id__pp_dpm_dcefclk,
+ device_attr_id__pp_dpm_pcie,
+ device_attr_id__pp_sclk_od,
+ device_attr_id__pp_mclk_od,
+ device_attr_id__pp_power_profile_mode,
+ device_attr_id__pp_od_clk_voltage,
+ device_attr_id__gpu_busy_percent,
+ device_attr_id__mem_busy_percent,
+ device_attr_id__vcn_busy_percent,
+ device_attr_id__pcie_bw,
+ device_attr_id__pp_features,
+ device_attr_id__unique_id,
+ device_attr_id__thermal_throttling_logging,
+ device_attr_id__apu_thermal_cap,
+ device_attr_id__gpu_metrics,
+ device_attr_id__smartshift_apu_power,
+ device_attr_id__smartshift_dgpu_power,
+ device_attr_id__smartshift_bias,
+ device_attr_id__xgmi_plpd_policy,
+ device_attr_id__pm_metrics,
+ device_attr_id__count,
+};
+
struct amdgpu_device_attr {
struct device_attribute dev_attr;
+ enum amdgpu_device_attr_id attr_id;
enum amdgpu_device_attr_flags flags;
int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
uint32_t mask, enum amdgpu_device_attr_states *states);
@@ -61,6 +101,7 @@ struct amdgpu_device_attr_entry {
#define __AMDGPU_DEVICE_ATTR(_name, _mode, _show, _store, _flags, ...) \
{ .dev_attr = __ATTR(_name, _mode, _show, _store), \
+ .attr_id = device_attr_id__##_name, \
.flags = _flags, \
##__VA_ARGS__, }
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 5cb4725c773f..6bb42d04b247 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -3316,6 +3316,8 @@ static const struct amd_ip_funcs kv_dpm_ip_funcs = {
.soft_reset = kv_dpm_soft_reset,
.set_clockgating_state = kv_dpm_set_clockgating_state,
.set_powergating_state = kv_dpm_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version kv_smu_ip_block = {
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index eb4da3666e05..f245fc0bc6d3 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -8060,6 +8060,8 @@ static const struct amd_ip_funcs si_dpm_ip_funcs = {
.soft_reset = si_dpm_soft_reset,
.set_clockgating_state = si_dpm_set_clockgating_state,
.set_powergating_state = si_dpm_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version si_smu_ip_block =
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index aed0e2cefbf9..5fb21a0508cd 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -302,6 +302,8 @@ static const struct amd_ip_funcs pp_ip_funcs = {
.soft_reset = pp_sw_reset,
.set_clockgating_state = pp_set_clockgating_state,
.set_powergating_state = pp_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version pp_smu_ip_block =
@@ -1371,7 +1373,7 @@ static int pp_set_active_display_count(void *handle, uint32_t count)
return phm_set_active_display_count(hwmgr, count);
}
-static bool pp_get_asic_baco_capability(void *handle)
+static int pp_get_asic_baco_capability(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
@@ -1379,10 +1381,10 @@ static bool pp_get_asic_baco_capability(void *handle)
return false;
if (!(hwmgr->not_vf && amdgpu_dpm) ||
- !hwmgr->hwmgr_func->get_asic_baco_capability)
+ !hwmgr->hwmgr_func->get_bamaco_support)
return false;
- return hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr);
+ return hwmgr->hwmgr_func->get_bamaco_support(hwmgr);
}
static int pp_get_asic_baco_state(void *handle, int *state)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c
index e8a9471c1898..ad60918aaae1 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c
@@ -33,7 +33,7 @@
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
-bool smu7_baco_get_capability(struct pp_hwmgr *hwmgr)
+int smu7_get_bamaco_support(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
uint32_t reg;
@@ -44,9 +44,9 @@ bool smu7_baco_get_capability(struct pp_hwmgr *hwmgr)
reg = RREG32(mmCC_BIF_BX_FUSESTRAP0);
if (reg & CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK)
- return true;
+ return BACO_SUPPORT;
- return false;
+ return 0;
}
int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h
index 73a773f4ce2e..750082ea74d8 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h
@@ -25,7 +25,7 @@
#include "hwmgr.h"
#include "common_baco.h"
-extern bool smu7_baco_get_capability(struct pp_hwmgr *hwmgr);
+extern int smu7_get_bamaco_support(struct pp_hwmgr *hwmgr);
extern int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
extern int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index aa91730e4eaf..1fcd4451001f 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -5791,7 +5791,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.get_power_profile_mode = smu7_get_power_profile_mode,
.set_power_profile_mode = smu7_set_power_profile_mode,
.get_performance_level = smu7_get_performance_level,
- .get_asic_baco_capability = smu7_baco_get_capability,
+ .get_bamaco_support = smu7_get_bamaco_support,
.get_asic_baco_state = smu7_baco_get_state,
.set_asic_baco_state = smu7_baco_set_state,
.power_off_asic = smu7_power_off_asic,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c
index c66ef9741535..c1ce1d7cae48 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c
@@ -28,13 +28,13 @@
#include "vega10_inc.h"
#include "smu9_baco.h"
-bool smu9_baco_get_capability(struct pp_hwmgr *hwmgr)
+int smu9_get_bamaco_support(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
uint32_t reg, data;
if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
- return false;
+ return 0;
WREG32(0x12074, 0xFFF0003B);
data = RREG32(0x12075);
@@ -43,10 +43,10 @@ bool smu9_baco_get_capability(struct pp_hwmgr *hwmgr)
reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0);
if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
- return true;
+ return BACO_SUPPORT;
}
- return false;
+ return 0;
}
int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h
index 9ff7c2ea1b58..2c100482084c 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h
@@ -25,7 +25,7 @@
#include "hwmgr.h"
#include "common_baco.h"
-extern bool smu9_baco_get_capability(struct pp_hwmgr *hwmgr);
+extern int smu9_get_bamaco_support(struct pp_hwmgr *hwmgr);
extern int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
#endif
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index 6d6bc6a380b3..9f5bd998c6bf 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -5756,7 +5756,7 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.set_power_limit = vega10_set_power_limit,
.odn_edit_dpm_table = vega10_odn_edit_dpm_table,
.get_performance_level = vega10_get_performance_level,
- .get_asic_baco_capability = smu9_baco_get_capability,
+ .get_bamaco_support = smu9_get_bamaco_support,
.get_asic_baco_state = smu9_baco_get_state,
.set_asic_baco_state = vega10_baco_set_state,
.enable_mgpu_fan_boost = vega10_enable_mgpu_fan_boost,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
index 460067933de2..c223e3a6bfca 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
@@ -2966,7 +2966,7 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
.start_thermal_controller = vega12_start_thermal_controller,
.powergate_gfx = vega12_gfx_off_control,
.get_performance_level = vega12_get_performance_level,
- .get_asic_baco_capability = smu9_baco_get_capability,
+ .get_bamaco_support = smu9_get_bamaco_support,
.get_asic_baco_state = smu9_baco_get_state,
.set_asic_baco_state = vega12_baco_set_state,
.get_ppfeature_status = vega12_get_ppfeature_status,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c
index dad4c80aee58..424e4ec9e389 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c
@@ -36,22 +36,22 @@ static const struct soc15_baco_cmd_entry clean_baco_tbl[] = {
{CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_7), 0, 0, 0, 0},
};
-bool vega20_baco_get_capability(struct pp_hwmgr *hwmgr)
+int vega20_get_bamaco_support(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
uint32_t reg;
if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
- return false;
+ return 0;
if (((RREG32(0x17569) & 0x20000000) >> 29) == 0x1) {
reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0);
if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
- return true;
+ return BACO_SUPPORT;
}
- return false;
+ return 0;
}
int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h
index bdad9c915631..0f2dd8c008ba 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h
@@ -25,7 +25,7 @@
#include "hwmgr.h"
#include "common_baco.h"
-extern bool vega20_baco_get_capability(struct pp_hwmgr *hwmgr);
+extern int vega20_get_bamaco_support(struct pp_hwmgr *hwmgr);
extern int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
extern int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
extern int vega20_baco_apply_vdci_flush_workaround(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
index 3b33af30eb0f..f9efb0bad807 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
@@ -4422,7 +4422,7 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
.notify_cac_buffer_info = vega20_notify_cac_buffer_info,
.enable_mgpu_fan_boost = vega20_enable_mgpu_fan_boost,
/* BACO related */
- .get_asic_baco_capability = vega20_baco_get_capability,
+ .get_bamaco_support = vega20_get_bamaco_support,
.get_asic_baco_state = vega20_baco_get_state,
.set_asic_baco_state = vega20_baco_set_state,
.set_mp1_state = vega20_set_mp1_state,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index 6f536159df4d..69928a4a074b 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
@@ -351,7 +351,7 @@ struct pp_hwmgr_func {
int (*set_hard_min_fclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
int (*set_hard_min_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
int (*set_soft_max_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
- bool (*get_asic_baco_capability)(struct pp_hwmgr *hwmgr);
+ int (*get_bamaco_support)(struct pp_hwmgr *hwmgr);
int (*get_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
int (*set_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
int (*get_ppfeature_status)(struct pp_hwmgr *hwmgr, char *buf);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 246b211b1e85..7789b313285c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -45,6 +45,7 @@
#include "smu_v13_0_6_ppt.h"
#include "smu_v13_0_7_ppt.h"
#include "smu_v14_0_0_ppt.h"
+#include "smu_v14_0_2_ppt.h"
#include "amd_pcie.h"
/*
@@ -715,6 +716,10 @@ static int smu_set_funcs(struct amdgpu_device *adev)
case IP_VERSION(14, 0, 1):
smu_v14_0_0_set_ppt_funcs(smu);
break;
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 3):
+ smu_v14_0_2_set_ppt_funcs(smu);
+ break;
default:
return -EINVAL;
}
@@ -735,8 +740,9 @@ static int smu_early_init(void *handle)
smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm;
smu->is_apu = false;
- smu->smu_baco.state = SMU_BACO_STATE_EXIT;
+ smu->smu_baco.state = SMU_BACO_STATE_NONE;
smu->smu_baco.platform_support = false;
+ smu->smu_baco.maco_support = false;
smu->user_dpm_profile.fan_mode = -1;
mutex_init(&smu->message_lock);
@@ -1966,10 +1972,25 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
return 0;
}
+static int smu_reset_mp1_state(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if ((!adev->in_runpm) && (!adev->in_suspend) &&
+ (!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) ==
+ IP_VERSION(13, 0, 10) &&
+ !amdgpu_device_has_display_hardware(adev))
+ ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
+
+ return ret;
+}
+
static int smu_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
@@ -1987,7 +2008,15 @@ static int smu_hw_fini(void *handle)
adev->pm.dpm_enabled = false;
- return smu_smc_hw_cleanup(smu);
+ ret = smu_smc_hw_cleanup(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_reset_mp1_state(smu);
+ if (ret)
+ return ret;
+
+ return 0;
}
static void smu_late_fini(void *handle)
@@ -3200,17 +3229,17 @@ static int smu_set_xgmi_pstate(void *handle,
return ret;
}
-static bool smu_get_baco_capability(void *handle)
+static int smu_get_baco_capability(void *handle)
{
struct smu_context *smu = handle;
if (!smu->pm_enabled)
return false;
- if (!smu->ppt_funcs || !smu->ppt_funcs->baco_is_support)
+ if (!smu->ppt_funcs || !smu->ppt_funcs->get_bamaco_support)
return false;
- return smu->ppt_funcs->baco_is_support(smu);
+ return smu->ppt_funcs->get_bamaco_support(smu);
}
static int smu_baco_set_state(void *handle, int state)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index a870bdd49a4e..0917dec8efe3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -424,6 +424,7 @@ enum smu_reset_mode {
enum smu_baco_state {
SMU_BACO_STATE_ENTER = 0,
SMU_BACO_STATE_EXIT,
+ SMU_BACO_STATE_NONE,
};
struct smu_baco_context {
@@ -458,7 +459,7 @@ struct smu_umd_pstate_table {
struct cmn2asic_msg_mapping {
int valid_mapping;
int map_to;
- int valid_in_vf;
+ uint32_t flags;
};
struct cmn2asic_mapping {
@@ -538,6 +539,7 @@ struct smu_context {
uint32_t smc_driver_if_version;
uint32_t smc_fw_if_version;
uint32_t smc_fw_version;
+ uint32_t smc_fw_caps;
bool uploading_custom_pp_table;
bool dc_controlled_by_gpio;
@@ -1173,9 +1175,11 @@ struct pptable_funcs {
int (*get_max_sustainable_clocks_by_dc)(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks);
/**
- * @baco_is_support: Check if GPU supports BACO (Bus Active, Chip Off).
+ * @get_bamaco_support: Check if GPU supports BACO/MACO
+ * BACO: Bus Active, Chip Off
+ * MACO: Memory Active, Chip Off
*/
- bool (*baco_is_support)(struct smu_context *smu);
+ int (*get_bamaco_support)(struct smu_context *smu);
/**
* @baco_get_state: Get the current BACO state.
@@ -1482,8 +1486,8 @@ enum smu_baco_seq {
BACO_SEQ_COUNT,
};
-#define MSG_MAP(msg, index, valid_in_vf) \
- [SMU_MSG_##msg] = {1, (index), (valid_in_vf)}
+#define MSG_MAP(msg, index, flags) \
+ [SMU_MSG_##msg] = {1, (index), (flags)}
#define CLK_MAP(clk, index) \
[SMU_##clk] = {1, (index)}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h
new file mode 100644
index 000000000000..97a29b80fb13
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h
@@ -0,0 +1,1836 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU14_DRIVER_IF_V14_0_H
+#define SMU14_DRIVER_IF_V14_0_H
+
+//Increment this version if SkuTable_t or BoardTable_t change
+#define PPTABLE_VERSION 0x18
+
+#define NUM_GFXCLK_DPM_LEVELS 16
+#define NUM_SOCCLK_DPM_LEVELS 8
+#define NUM_MP0CLK_DPM_LEVELS 2
+#define NUM_DCLK_DPM_LEVELS 8
+#define NUM_VCLK_DPM_LEVELS 8
+#define NUM_DISPCLK_DPM_LEVELS 8
+#define NUM_DPPCLK_DPM_LEVELS 8
+#define NUM_DPREFCLK_DPM_LEVELS 8
+#define NUM_DCFCLK_DPM_LEVELS 8
+#define NUM_DTBCLK_DPM_LEVELS 8
+#define NUM_UCLK_DPM_LEVELS 6
+#define NUM_LINK_LEVELS 3
+#define NUM_FCLK_DPM_LEVELS 8
+#define NUM_OD_FAN_MAX_POINTS 6
+
+// Feature Control Defines
+#define FEATURE_FW_DATA_READ_BIT 0
+#define FEATURE_DPM_GFXCLK_BIT 1
+#define FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT 2
+#define FEATURE_DPM_UCLK_BIT 3
+#define FEATURE_DPM_FCLK_BIT 4
+#define FEATURE_DPM_SOCCLK_BIT 5
+#define FEATURE_DPM_LINK_BIT 6
+#define FEATURE_DPM_DCN_BIT 7
+#define FEATURE_VMEMP_SCALING_BIT 8
+#define FEATURE_VDDIO_MEM_SCALING_BIT 9
+#define FEATURE_DS_GFXCLK_BIT 10
+#define FEATURE_DS_SOCCLK_BIT 11
+#define FEATURE_DS_FCLK_BIT 12
+#define FEATURE_DS_LCLK_BIT 13
+#define FEATURE_DS_DCFCLK_BIT 14
+#define FEATURE_DS_UCLK_BIT 15
+#define FEATURE_GFX_ULV_BIT 16
+#define FEATURE_FW_DSTATE_BIT 17
+#define FEATURE_GFXOFF_BIT 18
+#define FEATURE_BACO_BIT 19
+#define FEATURE_MM_DPM_BIT 20
+#define FEATURE_SOC_MPCLK_DS_BIT 21
+#define FEATURE_BACO_MPCLK_DS_BIT 22
+#define FEATURE_THROTTLERS_BIT 23
+#define FEATURE_SMARTSHIFT_BIT 24
+#define FEATURE_GTHR_BIT 25
+#define FEATURE_ACDC_BIT 26
+#define FEATURE_VR0HOT_BIT 27
+#define FEATURE_FW_CTF_BIT 28
+#define FEATURE_FAN_CONTROL_BIT 29
+#define FEATURE_GFX_DCS_BIT 30
+#define FEATURE_GFX_READ_MARGIN_BIT 31
+#define FEATURE_LED_DISPLAY_BIT 32
+#define FEATURE_GFXCLK_SPREAD_SPECTRUM_BIT 33
+#define FEATURE_OUT_OF_BAND_MONITOR_BIT 34
+#define FEATURE_OPTIMIZED_VMIN_BIT 35
+#define FEATURE_GFX_IMU_BIT 36
+#define FEATURE_BOOT_TIME_CAL_BIT 37
+#define FEATURE_GFX_PCC_DFLL_BIT 38
+#define FEATURE_SOC_CG_BIT 39
+#define FEATURE_DF_CSTATE_BIT 40
+#define FEATURE_GFX_EDC_BIT 41
+#define FEATURE_BOOT_POWER_OPT_BIT 42
+#define FEATURE_CLOCK_POWER_DOWN_BYPASS_BIT 43
+#define FEATURE_DS_VCN_BIT 44
+#define FEATURE_BACO_CG_BIT 45
+#define FEATURE_MEM_TEMP_READ_BIT 46
+#define FEATURE_ATHUB_MMHUB_PG_BIT 47
+#define FEATURE_SOC_PCC_BIT 48
+#define FEATURE_EDC_PWRBRK_BIT 49
+#define FEATURE_SOC_EDC_XVMIN_BIT 50
+#define FEATURE_GFX_PSM_DIDT_BIT 51
+#define FEATURE_APT_ALL_ENABLE_BIT 52
+#define FEATURE_APT_SQ_THROTTLE_BIT 53
+#define FEATURE_APT_PF_DCS_BIT 54
+#define FEATURE_GFX_EDC_XVMIN_BIT 55
+#define FEATURE_GFX_DIDT_XVMIN_BIT 56
+#define FEATURE_FAN_ABNORMAL_BIT 57
+#define FEATURE_CLOCK_STRETCH_COMPENSATOR 58
+#define FEATURE_SPARE_59_BIT 59
+#define FEATURE_SPARE_60_BIT 60
+#define FEATURE_SPARE_61_BIT 61
+#define FEATURE_SPARE_62_BIT 62
+#define FEATURE_SPARE_63_BIT 63
+#define NUM_FEATURES 64
+
+#define ALLOWED_FEATURE_CTRL_DEFAULT 0xFFFFFFFFFFFFFFFFULL
+#define ALLOWED_FEATURE_CTRL_SCPM (1 << FEATURE_DPM_GFXCLK_BIT) | \
+ (1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
+ (1 << FEATURE_DPM_UCLK_BIT) | \
+ (1 << FEATURE_DPM_FCLK_BIT) | \
+ (1 << FEATURE_DPM_SOCCLK_BIT) | \
+ (1 << FEATURE_DPM_LINK_BIT) | \
+ (1 << FEATURE_DPM_DCN_BIT) | \
+ (1 << FEATURE_DS_GFXCLK_BIT) | \
+ (1 << FEATURE_DS_SOCCLK_BIT) | \
+ (1 << FEATURE_DS_FCLK_BIT) | \
+ (1 << FEATURE_DS_LCLK_BIT) | \
+ (1 << FEATURE_DS_DCFCLK_BIT) | \
+ (1 << FEATURE_DS_UCLK_BIT) | \
+ (1ULL << FEATURE_DS_VCN_BIT)
+
+
+//For use with feature control messages
+typedef enum {
+ FEATURE_PWR_ALL,
+ FEATURE_PWR_S5,
+ FEATURE_PWR_BACO,
+ FEATURE_PWR_SOC,
+ FEATURE_PWR_GFX,
+ FEATURE_PWR_DOMAIN_COUNT,
+} FEATURE_PWR_DOMAIN_e;
+
+//For use with feature control + BTC save restore
+typedef enum {
+ FEATURE_BTC_NOP,
+ FEATURE_BTC_SAVE,
+ FEATURE_BTC_RESTORE,
+ FEATURE_BTC_COUNT,
+} FEATURE_BTC_e;
+
+// Debug Overrides Bitmask
+#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000001
+#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_DCN_FCLK 0x00000002
+#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_MP0_FCLK 0x00000004
+#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_DCFCLK 0x00000008
+#define DEBUG_OVERRIDE_DISABLE_FAST_FCLK_TIMER 0x00000010
+#define DEBUG_OVERRIDE_DISABLE_VCN_PG 0x00000020
+#define DEBUG_OVERRIDE_DISABLE_FMAX_VMAX 0x00000040
+#define DEBUG_OVERRIDE_DISABLE_IMU_FW_CHECKS 0x00000080
+#define DEBUG_OVERRIDE_DISABLE_D0i2_REENTRY_HSR_TIMER_CHECK 0x00000100
+#define DEBUG_OVERRIDE_DISABLE_DFLL 0x00000200
+#define DEBUG_OVERRIDE_ENABLE_RLC_VF_BRINGUP_MODE 0x00000400
+#define DEBUG_OVERRIDE_DFLL_MASTER_MODE 0x00000800
+#define DEBUG_OVERRIDE_ENABLE_PROFILING_MODE 0x00001000
+#define DEBUG_OVERRIDE_ENABLE_SOC_VF_BRINGUP_MODE 0x00002000
+#define DEBUG_OVERRIDE_ENABLE_PER_WGP_RESIENCY 0x00004000
+#define DEBUG_OVERRIDE_DISABLE_MEMORY_VOLTAGE_SCALING 0x00008000
+
+// VR Mapping Bit Defines
+#define VR_MAPPING_VR_SELECT_MASK 0x01
+#define VR_MAPPING_VR_SELECT_SHIFT 0x00
+
+#define VR_MAPPING_PLANE_SELECT_MASK 0x02
+#define VR_MAPPING_PLANE_SELECT_SHIFT 0x01
+
+// PSI Bit Defines
+#define PSI_SEL_VR0_PLANE0_PSI0 0x01
+#define PSI_SEL_VR0_PLANE0_PSI1 0x02
+#define PSI_SEL_VR0_PLANE1_PSI0 0x04
+#define PSI_SEL_VR0_PLANE1_PSI1 0x08
+#define PSI_SEL_VR1_PLANE0_PSI0 0x10
+#define PSI_SEL_VR1_PLANE0_PSI1 0x20
+#define PSI_SEL_VR1_PLANE1_PSI0 0x40
+#define PSI_SEL_VR1_PLANE1_PSI1 0x80
+
+typedef enum {
+ SVI_PSI_0, // Full phase count (default)
+ SVI_PSI_1, // Phase count 1st level
+ SVI_PSI_2, // Phase count 2nd level
+ SVI_PSI_3, // Single phase operation + active diode emulation
+ SVI_PSI_4, // Single phase operation + passive diode emulation *optional*
+ SVI_PSI_5, // Reserved
+ SVI_PSI_6, // Power down to 0V (voltage regulation disabled)
+ SVI_PSI_7, // Automated phase shedding and diode emulation
+} SVI_PSI_e;
+
+// Throttler Control/Status Bits
+#define THROTTLER_TEMP_EDGE_BIT 0
+#define THROTTLER_TEMP_HOTSPOT_BIT 1
+#define THROTTLER_TEMP_HOTSPOT_GFX_BIT 2
+#define THROTTLER_TEMP_HOTSPOT_SOC_BIT 3
+#define THROTTLER_TEMP_MEM_BIT 4
+#define THROTTLER_TEMP_VR_GFX_BIT 5
+#define THROTTLER_TEMP_VR_SOC_BIT 6
+#define THROTTLER_TEMP_VR_MEM0_BIT 7
+#define THROTTLER_TEMP_VR_MEM1_BIT 8
+#define THROTTLER_TEMP_LIQUID0_BIT 9
+#define THROTTLER_TEMP_LIQUID1_BIT 10
+#define THROTTLER_TEMP_PLX_BIT 11
+#define THROTTLER_TDC_GFX_BIT 12
+#define THROTTLER_TDC_SOC_BIT 13
+#define THROTTLER_PPT0_BIT 14
+#define THROTTLER_PPT1_BIT 15
+#define THROTTLER_PPT2_BIT 16
+#define THROTTLER_PPT3_BIT 17
+#define THROTTLER_FIT_BIT 18
+#define THROTTLER_GFX_APCC_PLUS_BIT 19
+#define THROTTLER_GFX_DVO_BIT 20
+#define THROTTLER_COUNT 21
+
+// FW DState Features Control Bits
+#define FW_DSTATE_SOC_ULV_BIT 0
+#define FW_DSTATE_G6_HSR_BIT 1
+#define FW_DSTATE_G6_PHY_VMEMP_OFF_BIT 2
+#define FW_DSTATE_SMN_DS_BIT 3
+#define FW_DSTATE_MP1_WHISPER_MODE_BIT 4
+#define FW_DSTATE_SOC_LIV_MIN_BIT 5
+#define FW_DSTATE_SOC_PLL_PWRDN_BIT 6
+#define FW_DSTATE_MEM_PLL_PWRDN_BIT 7
+#define FW_DSTATE_MALL_ALLOC_BIT 8
+#define FW_DSTATE_MEM_PSI_BIT 9
+#define FW_DSTATE_HSR_NON_STROBE_BIT 10
+#define FW_DSTATE_MP0_ENTER_WFI_BIT 11
+#define FW_DSTATE_MALL_FLUSH_BIT 12
+#define FW_DSTATE_SOC_PSI_BIT 13
+#define FW_DSTATE_MMHUB_INTERLOCK_BIT 14
+#define FW_DSTATE_D0i3_2_QUIET_FW_BIT 15
+#define FW_DSTATE_CLDO_PRG_BIT 16
+#define FW_DSTATE_DF_PLL_PWRDN_BIT 17
+
+//LED Display Mask & Control Bits
+#define LED_DISPLAY_GFX_DPM_BIT 0
+#define LED_DISPLAY_PCIE_BIT 1
+#define LED_DISPLAY_ERROR_BIT 2
+
+
+#define MEM_TEMP_READ_OUT_OF_BAND_BIT 0
+#define MEM_TEMP_READ_IN_BAND_REFRESH_BIT 1
+#define MEM_TEMP_READ_IN_BAND_DUMMY_PSTATE_BIT 2
+
+typedef enum {
+ SMARTSHIFT_VERSION_1,
+ SMARTSHIFT_VERSION_2,
+ SMARTSHIFT_VERSION_3,
+} SMARTSHIFT_VERSION_e;
+
+typedef enum {
+ FOPT_CALC_AC_CALC_DC,
+ FOPT_PPTABLE_AC_CALC_DC,
+ FOPT_CALC_AC_PPTABLE_DC,
+ FOPT_PPTABLE_AC_PPTABLE_DC,
+} FOPT_CALC_e;
+
+typedef enum {
+ DRAM_BIT_WIDTH_DISABLED = 0,
+ DRAM_BIT_WIDTH_X_8 = 8,
+ DRAM_BIT_WIDTH_X_16 = 16,
+ DRAM_BIT_WIDTH_X_32 = 32,
+ DRAM_BIT_WIDTH_X_64 = 64,
+ DRAM_BIT_WIDTH_X_128 = 128,
+ DRAM_BIT_WIDTH_COUNT,
+} DRAM_BIT_WIDTH_TYPE_e;
+
+//I2C Interface
+#define NUM_I2C_CONTROLLERS 8
+
+#define I2C_CONTROLLER_ENABLED 1
+#define I2C_CONTROLLER_DISABLED 0
+
+#define MAX_SW_I2C_COMMANDS 24
+
+typedef enum {
+ I2C_CONTROLLER_PORT_0 = 0, //CKSVII2C0
+ I2C_CONTROLLER_PORT_1 = 1, //CKSVII2C1
+ I2C_CONTROLLER_PORT_COUNT,
+} I2cControllerPort_e;
+
+typedef enum {
+ I2C_CONTROLLER_NAME_VR_GFX = 0,
+ I2C_CONTROLLER_NAME_VR_SOC,
+ I2C_CONTROLLER_NAME_VR_VMEMP,
+ I2C_CONTROLLER_NAME_VR_VDDIO,
+ I2C_CONTROLLER_NAME_LIQUID0,
+ I2C_CONTROLLER_NAME_LIQUID1,
+ I2C_CONTROLLER_NAME_PLX,
+ I2C_CONTROLLER_NAME_FAN_INTAKE,
+ I2C_CONTROLLER_NAME_COUNT,
+} I2cControllerName_e;
+
+typedef enum {
+ I2C_CONTROLLER_THROTTLER_TYPE_NONE = 0,
+ I2C_CONTROLLER_THROTTLER_VR_GFX,
+ I2C_CONTROLLER_THROTTLER_VR_SOC,
+ I2C_CONTROLLER_THROTTLER_VR_VMEMP,
+ I2C_CONTROLLER_THROTTLER_VR_VDDIO,
+ I2C_CONTROLLER_THROTTLER_LIQUID0,
+ I2C_CONTROLLER_THROTTLER_LIQUID1,
+ I2C_CONTROLLER_THROTTLER_PLX,
+ I2C_CONTROLLER_THROTTLER_FAN_INTAKE,
+ I2C_CONTROLLER_THROTTLER_INA3221,
+ I2C_CONTROLLER_THROTTLER_COUNT,
+} I2cControllerThrottler_e;
+
+typedef enum {
+ I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5,
+ I2C_CONTROLLER_PROTOCOL_VR_IR35217,
+ I2C_CONTROLLER_PROTOCOL_TMP_MAX31875,
+ I2C_CONTROLLER_PROTOCOL_INA3221,
+ I2C_CONTROLLER_PROTOCOL_TMP_MAX6604,
+ I2C_CONTROLLER_PROTOCOL_COUNT,
+} I2cControllerProtocol_e;
+
+typedef struct {
+ uint8_t Enabled;
+ uint8_t Speed;
+ uint8_t SlaveAddress;
+ uint8_t ControllerPort;
+ uint8_t ControllerName;
+ uint8_t ThermalThrotter;
+ uint8_t I2cProtocol;
+ uint8_t PaddingConfig;
+} I2cControllerConfig_t;
+
+typedef enum {
+ I2C_PORT_SVD_SCL = 0,
+ I2C_PORT_GPIO,
+} I2cPort_e;
+
+typedef enum {
+ I2C_SPEED_FAST_50K = 0, //50 Kbits/s
+ I2C_SPEED_FAST_100K, //100 Kbits/s
+ I2C_SPEED_FAST_400K, //400 Kbits/s
+ I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode)
+ I2C_SPEED_HIGH_1M, //1 Mbits/s (in high speed mode)
+ I2C_SPEED_HIGH_2M, //2.3 Mbits/s
+ I2C_SPEED_COUNT,
+} I2cSpeed_e;
+
+typedef enum {
+ I2C_CMD_READ = 0,
+ I2C_CMD_WRITE,
+ I2C_CMD_COUNT,
+} I2cCmdType_e;
+
+#define CMDCONFIG_STOP_BIT 0
+#define CMDCONFIG_RESTART_BIT 1
+#define CMDCONFIG_READWRITE_BIT 2 //bit should be 0 for read, 1 for write
+
+#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT)
+#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT)
+#define CMDCONFIG_READWRITE_MASK (1 << CMDCONFIG_READWRITE_BIT)
+
+typedef struct {
+ uint8_t ReadWriteData; //Return data for read. Data to send for write
+ uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command, and is a read or write
+} SwI2cCmd_t; //SW I2C Command Table
+
+typedef struct {
+ uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1)
+ uint8_t I2CSpeed; //Use I2cSpeed_e to indicate speed to select
+ uint8_t SlaveAddress; //Slave address of device
+ uint8_t NumCmds; //Number of commands
+
+ SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS];
+} SwI2cRequest_t; // SW I2C Request Table
+
+typedef struct {
+ SwI2cRequest_t SwI2cRequest;
+
+ uint32_t Spare[8];
+ uint32_t MmHubPadding[8]; // SMU internal use
+} SwI2cRequestExternal_t;
+
+typedef struct {
+ uint64_t mca_umc_status;
+ uint64_t mca_umc_addr;
+
+ uint16_t ce_count_lo_chip;
+ uint16_t ce_count_hi_chip;
+
+ uint32_t eccPadding;
+} EccInfo_t;
+
+typedef struct {
+ EccInfo_t EccInfo[24];
+} EccInfoTable_t;
+
+//D3HOT sequences
+typedef enum {
+ BACO_SEQUENCE,
+ MSR_SEQUENCE,
+ BAMACO_SEQUENCE,
+ ULPS_SEQUENCE,
+ D3HOT_SEQUENCE_COUNT,
+} D3HOTSequence_e;
+
+//This is aligned with RSMU PGFSM Register Mapping
+typedef enum {
+ PG_DYNAMIC_MODE = 0,
+ PG_STATIC_MODE,
+} PowerGatingMode_e;
+
+//This is aligned with RSMU PGFSM Register Mapping
+typedef enum {
+ PG_POWER_DOWN = 0,
+ PG_POWER_UP,
+} PowerGatingSettings_e;
+
+typedef struct {
+ uint32_t a; // store in IEEE float format in this variable
+ uint32_t b; // store in IEEE float format in this variable
+ uint32_t c; // store in IEEE float format in this variable
+} QuadraticInt_t;
+
+typedef struct {
+ uint32_t m; // store in IEEE float format in this variable
+ uint32_t b; // store in IEEE float format in this variable
+} LinearInt_t;
+
+typedef struct {
+ uint32_t a; // store in IEEE float format in this variable
+ uint32_t b; // store in IEEE float format in this variable
+ uint32_t c; // store in IEEE float format in this variable
+} DroopInt_t;
+
+typedef enum {
+ DCS_ARCH_DISABLED,
+ DCS_ARCH_FADCS,
+ DCS_ARCH_ASYNC,
+} DCS_ARCH_e;
+
+//Only Clks that have DPM descriptors are listed here
+typedef enum {
+ PPCLK_GFXCLK = 0,
+ PPCLK_SOCCLK,
+ PPCLK_UCLK,
+ PPCLK_FCLK,
+ PPCLK_DCLK_0,
+ PPCLK_VCLK_0,
+ PPCLK_DISPCLK,
+ PPCLK_DPPCLK,
+ PPCLK_DPREFCLK,
+ PPCLK_DCFCLK,
+ PPCLK_DTBCLK,
+ PPCLK_COUNT,
+} PPCLK_e;
+
+typedef enum {
+ VOLTAGE_MODE_PPTABLE = 0,
+ VOLTAGE_MODE_FUSES,
+ VOLTAGE_MODE_COUNT,
+} VOLTAGE_MODE_e;
+
+typedef enum {
+ AVFS_VOLTAGE_GFX = 0,
+ AVFS_VOLTAGE_SOC,
+ AVFS_VOLTAGE_COUNT,
+} AVFS_VOLTAGE_TYPE_e;
+
+typedef enum {
+ AVFS_TEMP_COLD = 0,
+ AVFS_TEMP_HOT,
+ AVFS_TEMP_COUNT,
+} AVFS_TEMP_e;
+
+typedef enum {
+ AVFS_D_G,
+ AVFS_D_COUNT,
+} AVFS_D_e;
+
+
+typedef enum {
+ UCLK_DIV_BY_1 = 0,
+ UCLK_DIV_BY_2,
+ UCLK_DIV_BY_4,
+ UCLK_DIV_BY_8,
+} UCLK_DIV_e;
+
+typedef enum {
+ GPIO_INT_POLARITY_ACTIVE_LOW = 0,
+ GPIO_INT_POLARITY_ACTIVE_HIGH,
+} GpioIntPolarity_e;
+
+typedef enum {
+ PWR_CONFIG_TDP = 0,
+ PWR_CONFIG_TGP,
+ PWR_CONFIG_TCP_ESTIMATED,
+ PWR_CONFIG_TCP_MEASURED,
+ PWR_CONFIG_TBP_DESKTOP,
+ PWR_CONFIG_TBP_MOBILE,
+} PwrConfig_e;
+
+typedef struct {
+ uint8_t Padding;
+ uint8_t SnapToDiscrete; // 0 - Fine grained DPM, 1 - Discrete DPM
+ uint8_t NumDiscreteLevels; // Set to 2 (Fmin, Fmax) when using fine grained DPM, otherwise set to # discrete levels used
+ uint8_t CalculateFopt; // Indication whether FW should calculate Fopt or use values below. Reference FOPT_CALC_e
+ LinearInt_t ConversionToAvfsClk; // Transfer function to AVFS Clock (GHz->GHz)
+ uint32_t Padding3[3];
+ uint16_t Padding4;
+ uint16_t FoptimalDc; //Foptimal frequency in DC power mode.
+ uint16_t FoptimalAc; //Foptimal frequency in AC power mode.
+ uint16_t Padding2;
+} DpmDescriptor_t;
+
+typedef enum {
+ PPT_THROTTLER_PPT0,
+ PPT_THROTTLER_PPT1,
+ PPT_THROTTLER_PPT2,
+ PPT_THROTTLER_PPT3,
+ PPT_THROTTLER_COUNT
+} PPT_THROTTLER_e;
+
+typedef enum {
+ TEMP_EDGE,
+ TEMP_HOTSPOT,
+ TEMP_HOTSPOT_GFX,
+ TEMP_HOTSPOT_SOC,
+ TEMP_MEM,
+ TEMP_VR_GFX,
+ TEMP_VR_SOC,
+ TEMP_VR_MEM0,
+ TEMP_VR_MEM1,
+ TEMP_LIQUID0,
+ TEMP_LIQUID1,
+ TEMP_PLX,
+ TEMP_COUNT,
+} TEMP_e;
+
+typedef enum {
+ TDC_THROTTLER_GFX,
+ TDC_THROTTLER_SOC,
+ TDC_THROTTLER_COUNT
+} TDC_THROTTLER_e;
+
+typedef enum {
+ SVI_PLANE_VDD_GFX,
+ SVI_PLANE_VDD_SOC,
+ SVI_PLANE_VDDCI_MEM,
+ SVI_PLANE_VDDIO_MEM,
+ SVI_PLANE_COUNT,
+} SVI_PLANE_e;
+
+typedef enum {
+ PMFW_VOLT_PLANE_GFX,
+ PMFW_VOLT_PLANE_SOC,
+ PMFW_VOLT_PLANE_COUNT
+} PMFW_VOLT_PLANE_e;
+
+typedef enum {
+ CUSTOMER_VARIANT_ROW,
+ CUSTOMER_VARIANT_FALCON,
+ CUSTOMER_VARIANT_COUNT,
+} CUSTOMER_VARIANT_e;
+
+typedef enum {
+ POWER_SOURCE_AC,
+ POWER_SOURCE_DC,
+ POWER_SOURCE_COUNT,
+} POWER_SOURCE_e;
+
+typedef enum {
+ MEM_VENDOR_PLACEHOLDER0, // 0
+ MEM_VENDOR_SAMSUNG, // 1
+ MEM_VENDOR_INFINEON, // 2
+ MEM_VENDOR_ELPIDA, // 3
+ MEM_VENDOR_ETRON, // 4
+ MEM_VENDOR_NANYA, // 5
+ MEM_VENDOR_HYNIX, // 6
+ MEM_VENDOR_MOSEL, // 7
+ MEM_VENDOR_WINBOND, // 8
+ MEM_VENDOR_ESMT, // 9
+ MEM_VENDOR_PLACEHOLDER1, // 10
+ MEM_VENDOR_PLACEHOLDER2, // 11
+ MEM_VENDOR_PLACEHOLDER3, // 12
+ MEM_VENDOR_PLACEHOLDER4, // 13
+ MEM_VENDOR_PLACEHOLDER5, // 14
+ MEM_VENDOR_MICRON, // 15
+ MEM_VENDOR_COUNT,
+} MEM_VENDOR_e;
+
+typedef enum {
+ PP_GRTAVFS_HW_CPO_CTL_ZONE0,
+ PP_GRTAVFS_HW_CPO_CTL_ZONE1,
+ PP_GRTAVFS_HW_CPO_CTL_ZONE2,
+ PP_GRTAVFS_HW_CPO_CTL_ZONE3,
+ PP_GRTAVFS_HW_CPO_CTL_ZONE4,
+ PP_GRTAVFS_HW_CPO_EN_0_31_ZONE0,
+ PP_GRTAVFS_HW_CPO_EN_32_63_ZONE0,
+ PP_GRTAVFS_HW_CPO_EN_0_31_ZONE1,
+ PP_GRTAVFS_HW_CPO_EN_32_63_ZONE1,
+ PP_GRTAVFS_HW_CPO_EN_0_31_ZONE2,
+ PP_GRTAVFS_HW_CPO_EN_32_63_ZONE2,
+ PP_GRTAVFS_HW_CPO_EN_0_31_ZONE3,
+ PP_GRTAVFS_HW_CPO_EN_32_63_ZONE3,
+ PP_GRTAVFS_HW_CPO_EN_0_31_ZONE4,
+ PP_GRTAVFS_HW_CPO_EN_32_63_ZONE4,
+ PP_GRTAVFS_HW_ZONE0_VF,
+ PP_GRTAVFS_HW_ZONE1_VF1,
+ PP_GRTAVFS_HW_ZONE2_VF2,
+ PP_GRTAVFS_HW_ZONE3_VF3,
+ PP_GRTAVFS_HW_VOLTAGE_GB,
+ PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE0,
+ PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE1,
+ PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE2,
+ PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE3,
+ PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE4,
+ PP_GRTAVFS_HW_RESERVED_0,
+ PP_GRTAVFS_HW_RESERVED_1,
+ PP_GRTAVFS_HW_RESERVED_2,
+ PP_GRTAVFS_HW_RESERVED_3,
+ PP_GRTAVFS_HW_RESERVED_4,
+ PP_GRTAVFS_HW_RESERVED_5,
+ PP_GRTAVFS_HW_RESERVED_6,
+ PP_GRTAVFS_HW_FUSE_COUNT,
+} PP_GRTAVFS_HW_FUSE_e;
+
+typedef enum {
+ PP_GRTAVFS_FW_COMMON_PPVMIN_Z1_HOT_T0,
+ PP_GRTAVFS_FW_COMMON_PPVMIN_Z1_COLD_T0,
+ PP_GRTAVFS_FW_COMMON_PPVMIN_Z2_HOT_T0,
+ PP_GRTAVFS_FW_COMMON_PPVMIN_Z2_COLD_T0,
+ PP_GRTAVFS_FW_COMMON_PPVMIN_Z3_HOT_T0,
+ PP_GRTAVFS_FW_COMMON_PPVMIN_Z3_COLD_T0,
+ PP_GRTAVFS_FW_COMMON_PPVMIN_Z4_HOT_T0,
+ PP_GRTAVFS_FW_COMMON_PPVMIN_Z4_COLD_T0,
+ PP_GRTAVFS_FW_COMMON_SRAM_RM_Z0,
+ PP_GRTAVFS_FW_COMMON_SRAM_RM_Z1,
+ PP_GRTAVFS_FW_COMMON_SRAM_RM_Z2,
+ PP_GRTAVFS_FW_COMMON_SRAM_RM_Z3,
+ PP_GRTAVFS_FW_COMMON_SRAM_RM_Z4,
+ PP_GRTAVFS_FW_COMMON_FUSE_COUNT,
+} PP_GRTAVFS_FW_COMMON_FUSE_e;
+
+typedef enum {
+ PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_NEG_1,
+ PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_0,
+ PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_1,
+ PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_2,
+ PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_3,
+ PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_4,
+ PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_NEG_1,
+ PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_0,
+ PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_1,
+ PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_2,
+ PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_3,
+ PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_4,
+ PP_GRTAVFS_FW_SEP_FUSE_VF_NEG_1_FREQUENCY,
+ PP_GRTAVFS_FW_SEP_FUSE_VF4_FREQUENCY,
+ PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_0,
+ PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_1,
+ PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_2,
+ PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_3,
+ PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_4,
+ PP_GRTAVFS_FW_SEP_FUSE_COUNT,
+} PP_GRTAVFS_FW_SEP_FUSE_e;
+
+#define PP_NUM_RTAVFS_PWL_ZONES 5
+
+
+// VBIOS or PPLIB configures telemetry slope and offset. Only slope expected to be set for SVI3
+// Slope Q1.7, Offset Q1.2
+typedef struct {
+ int8_t Offset; // in Amps
+ uint8_t Padding;
+ uint16_t MaxCurrent; // in Amps
+} SviTelemetryScale_t;
+
+#define PP_NUM_OD_VF_CURVE_POINTS PP_NUM_RTAVFS_PWL_ZONES + 1
+
+#define PP_OD_FEATURE_GFX_VF_CURVE_BIT 0
+#define PP_OD_FEATURE_GFX_VMAX_BIT 1
+#define PP_OD_FEATURE_SOC_VMAX_BIT 2
+#define PP_OD_FEATURE_PPT_BIT 3
+#define PP_OD_FEATURE_FAN_CURVE_BIT 4
+#define PP_OD_FEATURE_FAN_LEGACY_BIT 5
+#define PP_OD_FEATURE_FULL_CTRL_BIT 6
+#define PP_OD_FEATURE_TDC_BIT 7
+#define PP_OD_FEATURE_GFXCLK_BIT 8
+#define PP_OD_FEATURE_UCLK_BIT 9
+#define PP_OD_FEATURE_FCLK_BIT 10
+#define PP_OD_FEATURE_ZERO_FAN_BIT 11
+#define PP_OD_FEATURE_TEMPERATURE_BIT 12
+#define PP_OD_FEATURE_EDC_BIT 13
+#define PP_OD_FEATURE_COUNT 14
+
+typedef enum {
+ PP_OD_POWER_FEATURE_ALWAYS_ENABLED,
+ PP_OD_POWER_FEATURE_DISABLED_WHILE_GAMING,
+ PP_OD_POWER_FEATURE_ALWAYS_DISABLED,
+} PP_OD_POWER_FEATURE_e;
+
+typedef enum {
+ FAN_MODE_AUTO = 0,
+ FAN_MODE_MANUAL_LINEAR,
+} FanMode_e;
+
+typedef enum {
+ OD_NO_ERROR,
+ OD_REQUEST_ADVANCED_NOT_SUPPORTED,
+ OD_UNSUPPORTED_FEATURE,
+ OD_INVALID_FEATURE_COMBO_ERROR,
+ OD_GFXCLK_VF_CURVE_OFFSET_ERROR,
+ OD_VDD_GFX_VMAX_ERROR,
+ OD_VDD_SOC_VMAX_ERROR,
+ OD_PPT_ERROR,
+ OD_FAN_MIN_PWM_ERROR,
+ OD_FAN_ACOUSTIC_TARGET_ERROR,
+ OD_FAN_ACOUSTIC_LIMIT_ERROR,
+ OD_FAN_TARGET_TEMP_ERROR,
+ OD_FAN_ZERO_RPM_STOP_TEMP_ERROR,
+ OD_FAN_CURVE_PWM_ERROR,
+ OD_FAN_CURVE_TEMP_ERROR,
+ OD_FULL_CTRL_GFXCLK_ERROR,
+ OD_FULL_CTRL_UCLK_ERROR,
+ OD_FULL_CTRL_FCLK_ERROR,
+ OD_FULL_CTRL_VDD_GFX_ERROR,
+ OD_FULL_CTRL_VDD_SOC_ERROR,
+ OD_TDC_ERROR,
+ OD_GFXCLK_ERROR,
+ OD_UCLK_ERROR,
+ OD_FCLK_ERROR,
+ OD_OP_TEMP_ERROR,
+ OD_OP_GFX_EDC_ERROR,
+ OD_OP_GFX_PCC_ERROR,
+ OD_POWER_FEATURE_CTRL_ERROR,
+} OD_FAIL_e;
+
+typedef struct {
+ uint32_t FeatureCtrlMask;
+
+ //Voltage control
+ int16_t VoltageOffsetPerZoneBoundary[PP_NUM_OD_VF_CURVE_POINTS];
+
+ uint16_t VddGfxVmax; // in mV
+ uint16_t VddSocVmax;
+
+ uint8_t IdlePwrSavingFeaturesCtrl;
+ uint8_t RuntimePwrSavingFeaturesCtrl;
+ uint16_t Padding;
+
+ //Frequency changes
+ int16_t GfxclkFmin; // MHz
+ int16_t GfxclkFmax; // MHz
+ uint16_t UclkFmin; // MHz
+ uint16_t UclkFmax; // MHz
+ uint16_t FclkFmin;
+ uint16_t FclkFmax;
+
+ //PPT
+ int16_t Ppt; // %
+ int16_t Tdc;
+
+ //Fan control
+ uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS];
+ uint8_t FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS];
+ uint16_t FanMinimumPwm;
+ uint16_t AcousticTargetRpmThreshold;
+ uint16_t AcousticLimitRpmThreshold;
+ uint16_t FanTargetTemperature; // Degree Celcius
+ uint8_t FanZeroRpmEnable;
+ uint8_t FanZeroRpmStopTemp;
+ uint8_t FanMode;
+ uint8_t MaxOpTemp;
+
+ uint8_t AdvancedOdModeEnabled;
+ uint8_t Padding1[3];
+
+ uint16_t GfxVoltageFullCtrlMode;
+ uint16_t SocVoltageFullCtrlMode;
+ uint16_t GfxclkFullCtrlMode;
+ uint16_t UclkFullCtrlMode;
+ uint16_t FclkFullCtrlMode;
+ uint16_t Padding2;
+
+ int16_t GfxEdc;
+ int16_t GfxPccLimitControl;
+
+ uint32_t Spare[10];
+ uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround
+} OverDriveTable_t;
+
+typedef struct {
+ OverDriveTable_t OverDriveTable;
+
+} OverDriveTableExternal_t;
+
+typedef struct {
+ uint32_t FeatureCtrlMask;
+
+ //Gfx Vf Curve
+ int16_t VoltageOffsetPerZoneBoundary[PP_NUM_OD_VF_CURVE_POINTS];
+ //gfx Vmax
+ uint16_t VddGfxVmax; // in mV
+ //soc Vmax
+ uint16_t VddSocVmax;
+
+ //gfxclk
+ int16_t GfxclkFmin; // MHz
+ int16_t GfxclkFmax; // MHz
+ //uclk
+ uint16_t UclkFmin; // MHz
+ uint16_t UclkFmax; // MHz
+ //fclk
+ uint16_t FclkFmin;
+ uint16_t FclkFmax;
+
+ //PPT
+ int16_t Ppt; // %
+ //TDC
+ int16_t Tdc;
+
+ //Fan Curve
+ uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS];
+ uint8_t FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS];
+ //Fan Legacy
+ uint16_t FanMinimumPwm;
+ uint16_t AcousticTargetRpmThreshold;
+ uint16_t AcousticLimitRpmThreshold;
+ uint16_t FanTargetTemperature; // Degree Celcius
+ //zero fan
+ uint8_t FanZeroRpmEnable;
+ //temperature
+ uint8_t MaxOpTemp;
+ uint8_t Padding[2];
+
+ //Full Ctrl
+ uint16_t GfxVoltageFullCtrlMode;
+ uint16_t SocVoltageFullCtrlMode;
+ uint16_t GfxclkFullCtrlMode;
+ uint16_t UclkFullCtrlMode;
+ uint16_t FclkFullCtrlMode;
+ //EDC
+ int16_t GfxEdc;
+ int16_t GfxPccLimitControl;
+ int16_t Padding1;
+
+ uint32_t Spare[5];
+} OverDriveLimits_t;
+
+typedef enum {
+ BOARD_GPIO_SMUIO_0,
+ BOARD_GPIO_SMUIO_1,
+ BOARD_GPIO_SMUIO_2,
+ BOARD_GPIO_SMUIO_3,
+ BOARD_GPIO_SMUIO_4,
+ BOARD_GPIO_SMUIO_5,
+ BOARD_GPIO_SMUIO_6,
+ BOARD_GPIO_SMUIO_7,
+ BOARD_GPIO_SMUIO_8,
+ BOARD_GPIO_SMUIO_9,
+ BOARD_GPIO_SMUIO_10,
+ BOARD_GPIO_SMUIO_11,
+ BOARD_GPIO_SMUIO_12,
+ BOARD_GPIO_SMUIO_13,
+ BOARD_GPIO_SMUIO_14,
+ BOARD_GPIO_SMUIO_15,
+ BOARD_GPIO_SMUIO_16,
+ BOARD_GPIO_SMUIO_17,
+ BOARD_GPIO_SMUIO_18,
+ BOARD_GPIO_SMUIO_19,
+ BOARD_GPIO_SMUIO_20,
+ BOARD_GPIO_SMUIO_21,
+ BOARD_GPIO_SMUIO_22,
+ BOARD_GPIO_SMUIO_23,
+ BOARD_GPIO_SMUIO_24,
+ BOARD_GPIO_SMUIO_25,
+ BOARD_GPIO_SMUIO_26,
+ BOARD_GPIO_SMUIO_27,
+ BOARD_GPIO_SMUIO_28,
+ BOARD_GPIO_SMUIO_29,
+ BOARD_GPIO_SMUIO_30,
+ BOARD_GPIO_SMUIO_31,
+ MAX_BOARD_GPIO_SMUIO_NUM,
+ BOARD_GPIO_DC_GEN_A,
+ BOARD_GPIO_DC_GEN_B,
+ BOARD_GPIO_DC_GEN_C,
+ BOARD_GPIO_DC_GEN_D,
+ BOARD_GPIO_DC_GEN_E,
+ BOARD_GPIO_DC_GEN_F,
+ BOARD_GPIO_DC_GEN_G,
+ BOARD_GPIO_DC_GENLK_CLK,
+ BOARD_GPIO_DC_GENLK_VSYNC,
+ BOARD_GPIO_DC_SWAPLOCK_A,
+ BOARD_GPIO_DC_SWAPLOCK_B,
+ MAX_BOARD_DC_GPIO_NUM,
+ BOARD_GPIO_LV_EN,
+} BOARD_GPIO_TYPE_e;
+
+#define INVALID_BOARD_GPIO 0xFF
+
+
+typedef struct {
+ //PLL 0
+ uint16_t InitImuClk;
+ uint16_t InitSocclk;
+ uint16_t InitMpioclk;
+ uint16_t InitSmnclk;
+ //PLL 1
+ uint16_t InitDispClk;
+ uint16_t InitDppClk;
+ uint16_t InitDprefclk;
+ uint16_t InitDcfclk;
+ uint16_t InitDtbclk;
+ uint16_t InitDbguSocClk;
+ //PLL 2
+ uint16_t InitGfxclk_bypass;
+ uint16_t InitMp1clk;
+ uint16_t InitLclk;
+ uint16_t InitDbguBacoClk;
+ uint16_t InitBaco400clk;
+ uint16_t InitBaco1200clk_bypass;
+ uint16_t InitBaco700clk_bypass;
+ uint16_t InitBaco500clk;
+ // PLL 3
+ uint16_t InitDclk0;
+ uint16_t InitVclk0;
+ // PLL 4
+ uint16_t InitFclk;
+ uint16_t Padding1;
+ // PLL 5
+ //UCLK clocks, assumed all UCLK instances will be the same.
+ uint8_t InitUclkLevel; // =0,1,2,3,4,5 frequency from FreqTableUclk
+
+ uint8_t Padding[3];
+
+ uint32_t InitVcoFreqPll0; //smu_socclk_t
+ uint32_t InitVcoFreqPll1; //smu_displayclk_t
+ uint32_t InitVcoFreqPll2; //smu_nbioclk_t
+ uint32_t InitVcoFreqPll3; //smu_vcnclk_t
+ uint32_t InitVcoFreqPll4; //smu_fclk_t
+ uint32_t InitVcoFreqPll5; //smu_uclk_01_t
+ uint32_t InitVcoFreqPll6; //smu_uclk_23_t
+ uint32_t InitVcoFreqPll7; //smu_uclk_45_t
+ uint32_t InitVcoFreqPll8; //smu_uclk_67_t
+
+ //encoding will be SVI3
+ uint16_t InitGfx; // In mV(Q2) , should be 0?
+ uint16_t InitSoc; // In mV(Q2)
+ uint16_t InitVddIoMem; // In mV(Q2) MemVdd
+ uint16_t InitVddCiMem; // In mV(Q2) VMemP
+
+ //uint16_t Padding2;
+
+ uint32_t Spare[8];
+} BootValues_t;
+
+typedef struct {
+ uint16_t Power[PPT_THROTTLER_COUNT][POWER_SOURCE_COUNT]; // Watts
+ uint16_t Tdc[TDC_THROTTLER_COUNT]; // Amps
+
+ uint16_t Temperature[TEMP_COUNT]; // Celsius
+
+ uint8_t PwmLimitMin;
+ uint8_t PwmLimitMax;
+ uint8_t FanTargetTemperature;
+ uint8_t Spare1[1];
+
+ uint16_t AcousticTargetRpmThresholdMin;
+ uint16_t AcousticTargetRpmThresholdMax;
+
+ uint16_t AcousticLimitRpmThresholdMin;
+ uint16_t AcousticLimitRpmThresholdMax;
+
+ uint16_t PccLimitMin;
+ uint16_t PccLimitMax;
+
+ uint16_t FanStopTempMin;
+ uint16_t FanStopTempMax;
+ uint16_t FanStartTempMin;
+ uint16_t FanStartTempMax;
+
+ uint16_t PowerMinPpt0[POWER_SOURCE_COUNT];
+ uint32_t Spare[11];
+} MsgLimits_t;
+
+typedef struct {
+ uint16_t BaseClockAc;
+ uint16_t GameClockAc;
+ uint16_t BoostClockAc;
+ uint16_t BaseClockDc;
+ uint16_t GameClockDc;
+ uint16_t BoostClockDc;
+
+ uint32_t Reserved[4];
+} DriverReportedClocks_t;
+
+typedef struct {
+ uint8_t DcBtcEnabled;
+ uint8_t Padding[3];
+
+ uint16_t DcTol; // mV Q2
+ uint16_t DcBtcGb; // mV Q2
+
+ uint16_t DcBtcMin; // mV Q2
+ uint16_t DcBtcMax; // mV Q2
+
+ LinearInt_t DcBtcGbScalar;
+} AvfsDcBtcParams_t;
+
+typedef struct {
+ uint16_t AvfsTemp[AVFS_TEMP_COUNT]; //in degrees C
+ uint16_t VftFMin; // in MHz
+ uint16_t VInversion; // in mV Q2
+ QuadraticInt_t qVft[AVFS_TEMP_COUNT];
+ QuadraticInt_t qAvfsGb;
+ QuadraticInt_t qAvfsGb2;
+} AvfsFuseOverride_t;
+
+//all settings maintained by PFE team
+typedef struct {
+ uint8_t Version;
+ uint8_t Spare8[3];
+ // SECTION: Feature Control
+ uint32_t FeaturesToRun[NUM_FEATURES / 32]; // Features that PMFW will attempt to enable. Use FEATURE_*_BIT as mapping
+ // SECTION: FW DSTATE Settings
+ uint32_t FwDStateMask; // See FW_DSTATE_*_BIT for mapping
+ // SECTION: Advanced Options
+ uint32_t DebugOverrides;
+
+ uint32_t Spare[2];
+} PFE_Settings_t;
+
+typedef struct {
+ // SECTION: Version
+ uint32_t Version; // should be unique to each SKU(i.e if any value changes in below structure then this value must be different)
+
+ // SECTION: Miscellaneous Configuration
+ uint8_t TotalPowerConfig; // Determines how PMFW calculates the power. Use defines from PwrConfig_e
+ uint8_t CustomerVariant; //To specify if this PPTable is intended for a particular customer. Use defines from CUSTOMER_VARIANT_e
+ uint8_t MemoryTemperatureTypeMask; // Bit mapping indicating which methods of memory temperature reading are enabled. Use defines from MEM_TEMP_*BIT
+ uint8_t SmartShiftVersion; // Determine what SmartShift feature version is supported Use defines from SMARTSHIFT_VERSION_e
+
+ // SECTION: Infrastructure Limits
+ uint8_t SocketPowerLimitSpare[10];
+
+ //if set to 1, SocketPowerLimitAc and SocketPowerLimitDc will be interpreted as legacy programs(i.e absolute power). If 0, all except index 0 will be scalars
+ //relative index 0
+ uint8_t EnableLegacyPptLimit;
+ uint8_t UseInputTelemetry; //applicable to SVI3 only and only to be set if VRs support
+
+ uint8_t SmartShiftMinReportedPptinDcs; //minimum possible active power consumption for this SKU. Used for SmartShift power reporting
+
+ uint8_t PaddingPpt[7];
+
+ uint16_t HwCtfTempLimit; // In degrees Celsius. Temperature above which HW will trigger CTF. Consumed by VBIOS only
+
+ uint16_t PaddingInfra;
+
+ // Per year normalized Vmax state failure rates (sum of the two domains divided by life time in years)
+ uint32_t FitControllerFailureRateLimit; //in IEEE float
+ //Expected GFX Duty Cycle at Vmax.
+ uint32_t FitControllerGfxDutyCycle; // in IEEE float
+ //Expected SOC Duty Cycle at Vmax.
+ uint32_t FitControllerSocDutyCycle; // in IEEE float
+
+ //This offset will be deducted from the controller output to before it goes through the SOC Vset limiter block.
+ uint32_t FitControllerSocOffset; //in IEEE float
+
+ uint32_t GfxApccPlusResidencyLimit; // Percentage value. Used by APCC+ controller to control PCC residency to some value
+
+ // SECTION: Throttler settings
+ uint32_t ThrottlerControlMask; // See THROTTLER_*_BIT for mapping
+
+
+ // SECTION: Voltage Control Parameters
+ uint16_t UlvVoltageOffset[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2). ULV offset used in either GFX_ULV or SOC_ULV(part of FW_DSTATE)
+
+ uint8_t Padding[2];
+ uint16_t DeepUlvVoltageOffsetSoc; // In mV(Q2) Long Idle Vmin (deep ULV), for VDD_SOC as part of FW_DSTATE
+
+ // Voltage Limits
+ uint16_t DefaultMaxVoltage[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2) Maximum voltage without FIT controller enabled
+ uint16_t BoostMaxVoltage[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2) Maximum voltage with FIT controller enabled
+
+ //Vmin Optimizations
+ int16_t VminTempHystersis[PMFW_VOLT_PLANE_COUNT]; // Celsius Temperature hysteresis for switching between low/high temperature values for Vmin
+ int16_t VminTempThreshold[PMFW_VOLT_PLANE_COUNT]; // Celsius Temperature threshold for switching between low/high temperature values for Vmin
+ uint16_t Vmin_Hot_T0[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Initial (pre-aging) Vset to be used at hot.
+ uint16_t Vmin_Cold_T0[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Initial (pre-aging) Vset to be used at cold.
+ uint16_t Vmin_Hot_Eol[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) End-of-life Vset to be used at hot.
+ uint16_t Vmin_Cold_Eol[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) End-of-life Vset to be used at cold.
+ uint16_t Vmin_Aging_Offset[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Worst-case aging margin
+ uint16_t Spare_Vmin_Plat_Offset_Hot[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Hot
+ uint16_t Spare_Vmin_Plat_Offset_Cold[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Cold
+
+ //This is a fixed/minimum VMIN aging degradation offset which is applied at T0. This reflects the minimum amount of aging already accounted for.
+ uint16_t VcBtcFixedVminAgingOffset[PMFW_VOLT_PLANE_COUNT];
+ //Linear offset or GB term to account for mis-correlation between PSM and Vmin shift trends across parts.
+ uint16_t VcBtcVmin2PsmDegrationGb[PMFW_VOLT_PLANE_COUNT];
+ //Scalar coefficient of the PSM aging degradation function
+ uint32_t VcBtcPsmA[PMFW_VOLT_PLANE_COUNT]; // A_PSM
+ //Exponential coefficient of the PSM aging degradation function
+ uint32_t VcBtcPsmB[PMFW_VOLT_PLANE_COUNT]; // B_PSM
+ //Scalar coefficient of the VMIN aging degradation function. Specified as worst case between hot and cold.
+ uint32_t VcBtcVminA[PMFW_VOLT_PLANE_COUNT]; // A_VMIN
+ //Exponential coefficient of the VMIN aging degradation function. Specified as worst case between hot and cold.
+ uint32_t VcBtcVminB[PMFW_VOLT_PLANE_COUNT]; // B_VMIN
+
+ uint8_t PerPartVminEnabled[PMFW_VOLT_PLANE_COUNT];
+ uint8_t VcBtcEnabled[PMFW_VOLT_PLANE_COUNT];
+
+ uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms
+ uint16_t SocketPowerLimitDcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms
+
+ QuadraticInt_t Gfx_Vmin_droop;
+ QuadraticInt_t Soc_Vmin_droop;
+ uint32_t SpareVmin[6];
+
+ //SECTION: DPM Configuration 1
+ DpmDescriptor_t DpmDescriptor[PPCLK_COUNT];
+
+ uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableShadowUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableDppClk [NUM_DPPCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableDprefclk [NUM_DPREFCLK_DPM_LEVELS]; // In MHz
+ uint16_t FreqTableDcfclk [NUM_DCFCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableDtbclk [NUM_DTBCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz
+
+ uint32_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz
+
+ uint16_t GfxclkAibFmax;
+ uint16_t GfxclkFreqCap;
+
+ //GFX Idle Power Settings
+ uint16_t GfxclkFgfxoffEntry; // Entry in RLC stage (PLL), in Mhz
+ uint16_t GfxclkFgfxoffExitImu; // Exit/Entry in IMU stage (BYPASS), in Mhz
+ uint16_t GfxclkFgfxoffExitRlc; // Exit in RLC stage (PLL), in Mhz
+ uint16_t GfxclkThrottleClock; //Used primarily in DCS
+ uint8_t EnableGfxPowerStagesGpio; //Genlk_vsync GPIO flag used to control gfx power stages
+ uint8_t GfxIdlePadding;
+
+ uint8_t SmsRepairWRCKClkDivEn;
+ uint8_t SmsRepairWRCKClkDivVal;
+ uint8_t GfxOffEntryEarlyMGCGEn;
+ uint8_t GfxOffEntryForceCGCGEn;
+ uint8_t GfxOffEntryForceCGCGDelayEn;
+ uint8_t GfxOffEntryForceCGCGDelayVal; // in microseconds
+
+ uint16_t GfxclkFreqGfxUlv; // in MHz
+ uint8_t GfxIdlePadding2[2];
+ uint32_t GfxOffEntryHysteresis; //For RLC to count after it enters CGCG, and before triggers GFXOFF entry
+ uint32_t GfxoffSpare[15];
+
+ // DFLL
+ uint16_t DfllMstrOscConfigA; //Used for voltage sensitivity slope tuning: 0 = (en_leaker << 9) | (en_vint1_reduce << 8) | (gain_code << 6) | (bias_code << 3) | (vint1_code << 1) | en_bias
+ uint16_t DfllSlvOscConfigA; //Used for voltage sensitivity slope tuning: 0 = (en_leaker << 9) | (en_vint1_reduce << 8) | (gain_code << 6) | (bias_code << 3) | (vint1_code << 1) | en_bias
+ uint32_t DfllBtcMasterScalerM;
+ int32_t DfllBtcMasterScalerB;
+ uint32_t DfllBtcSlaveScalerM;
+ int32_t DfllBtcSlaveScalerB;
+
+ uint32_t DfllPccAsWaitCtrl; //GDFLL_AS_WAIT_CTRL_PCC register value to be passed to RLC msg
+ uint32_t DfllPccAsStepCtrl; //GDFLL_AS_STEP_CTRL_PCC register value to be passed to RLC msg
+ uint32_t GfxDfllSpare[9];
+
+ // DVO
+ uint32_t DvoPsmDownThresholdVoltage; //Voltage float
+ uint32_t DvoPsmUpThresholdVoltage; //Voltage float
+ uint32_t DvoFmaxLowScaler; //Unitless float
+
+ // GFX DCS
+ uint16_t DcsGfxOffVoltage; //Voltage in mV(Q2) applied to VDDGFX when entering DCS GFXOFF phase
+ uint16_t PaddingDcs;
+
+ uint16_t DcsMinGfxOffTime; //Minimum amount of time PMFW shuts GFX OFF as part of GFX DCS phase
+ uint16_t DcsMaxGfxOffTime; //Maximum amount of time PMFW can shut GFX OFF as part of GFX DCS phase at a stretch.
+
+ uint32_t DcsMinCreditAccum; //Min amount of positive credit accumulation before waking GFX up as part of DCS.
+
+ uint16_t DcsExitHysteresis; //The min amount of time power credit accumulator should have a value > 0 before SMU exits the DCS throttling phase.
+ uint16_t DcsTimeout; //This is the amount of time SMU FW waits for RLC to put GFX into GFXOFF before reverting to the fallback mechanism of throttling GFXCLK to Fmin.
+
+ uint32_t DcsPfGfxFopt; //Default to GFX FMIN
+ uint32_t DcsPfUclkFopt; //Default to UCLK FMIN
+
+ uint8_t FoptEnabled;
+ uint8_t DcsSpare2[3];
+ uint32_t DcsFoptM; //Tuning paramters to shift Fopt calculation, IEEE754 float
+ uint32_t DcsFoptB; //Tuning paramters to shift Fopt calculation, IEEE754 float
+ uint32_t DcsSpare[9];
+
+ // UCLK section
+ uint8_t UseStrobeModeOptimizations; //Set to indicate that FW should use strobe mode optimizations
+ uint8_t PaddingMem[3];
+
+ uint8_t UclkDpmPstates [NUM_UCLK_DPM_LEVELS]; // 6 Primary SW DPM states (6 + 6 Shadow)
+ uint8_t UclkDpmShadowPstates [NUM_UCLK_DPM_LEVELS]; // 6 Shadow SW DPM states (6 + 6 Shadow)
+ uint8_t FreqTableUclkDiv [NUM_UCLK_DPM_LEVELS]; // 0:Div-1, 1:Div-1/2, 2:Div-1/4, 3:Div-1/8
+ uint8_t FreqTableShadowUclkDiv [NUM_UCLK_DPM_LEVELS]; // 0:Div-1, 1:Div-1/2, 2:Div-1/4, 3:Div-1/8
+ uint16_t MemVmempVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2)
+ uint16_t MemVddioVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2)
+ uint16_t DalDcModeMaxUclkFreq;
+ uint8_t PaddingsMem[2];
+ //FCLK Section
+ uint16_t FclkDpmDisallowPstateFreq; //Frequency which FW will target when indicated that display config cannot support P-state. Set to 0 use FW calculated value
+ uint16_t PaddingFclk;
+
+ // Link DPM Settings
+ uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4 4:PciE-gen5
+ uint8_t PcieLaneCount[NUM_LINK_LEVELS]; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16
+ uint16_t LclkFreq[NUM_LINK_LEVELS];
+
+ // SECTION: VDD_GFX AVFS
+ uint8_t OverrideGfxAvfsFuses;
+ uint8_t GfxAvfsPadding[3];
+
+ uint32_t SocHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //new added for Soc domain
+ uint32_t GfxL2HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //see fusedoc for encoding
+ //uint32_t GfxSeHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT];
+ uint32_t spare_HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT];
+
+ uint32_t SocCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT];
+ uint32_t GfxCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT];
+
+ uint32_t SocFwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT];
+ uint32_t GfxL2FwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT];
+ //uint32_t GfxSeFwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT];
+ uint32_t spare_FwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT];
+
+ uint32_t Soc_Droop_PWL_F[PP_NUM_RTAVFS_PWL_ZONES];
+ uint32_t Soc_Droop_PWL_a[PP_NUM_RTAVFS_PWL_ZONES];
+ uint32_t Soc_Droop_PWL_b[PP_NUM_RTAVFS_PWL_ZONES];
+ uint32_t Soc_Droop_PWL_c[PP_NUM_RTAVFS_PWL_ZONES];
+
+ uint32_t Gfx_Droop_PWL_F[PP_NUM_RTAVFS_PWL_ZONES];
+ uint32_t Gfx_Droop_PWL_a[PP_NUM_RTAVFS_PWL_ZONES];
+ uint32_t Gfx_Droop_PWL_b[PP_NUM_RTAVFS_PWL_ZONES];
+ uint32_t Gfx_Droop_PWL_c[PP_NUM_RTAVFS_PWL_ZONES];
+
+ uint32_t Gfx_Static_PWL_Offset[PP_NUM_RTAVFS_PWL_ZONES];
+ uint32_t Soc_Static_PWL_Offset[PP_NUM_RTAVFS_PWL_ZONES];
+
+ uint32_t dGbV_dT_vmin;
+ uint32_t dGbV_dT_vmax;
+
+ //Unused: PMFW-9370
+ uint32_t V2F_vmin_range_low;
+ uint32_t V2F_vmin_range_high;
+ uint32_t V2F_vmax_range_low;
+ uint32_t V2F_vmax_range_high;
+
+ AvfsDcBtcParams_t DcBtcGfxParams;
+ QuadraticInt_t SSCurve_GFX;
+ uint32_t GfxAvfsSpare[29];
+
+ //SECTION: VDD_SOC AVFS
+ uint8_t OverrideSocAvfsFuses;
+ uint8_t MinSocAvfsRevision;
+ uint8_t SocAvfsPadding[2];
+
+ AvfsFuseOverride_t SocAvfsFuseOverride[AVFS_D_COUNT];
+
+ DroopInt_t dBtcGbSoc[AVFS_D_COUNT]; // GHz->V BtcGb
+
+ LinearInt_t qAgingGb[AVFS_D_COUNT]; // GHz->V
+
+ QuadraticInt_t qStaticVoltageOffset[AVFS_D_COUNT]; // GHz->V
+
+ AvfsDcBtcParams_t DcBtcSocParams[AVFS_D_COUNT];
+
+ QuadraticInt_t SSCurve_SOC;
+ uint32_t SocAvfsSpare[29];
+
+ //SECTION: Boot clock and voltage values
+ BootValues_t BootValues;
+
+ //SECTION: Driver Reported Clocks
+ DriverReportedClocks_t DriverReportedClocks;
+
+ //SECTION: Message Limits
+ MsgLimits_t MsgLimits;
+
+ //SECTION: OverDrive Limits
+ OverDriveLimits_t OverDriveLimitsBasicMin;
+ OverDriveLimits_t OverDriveLimitsBasicMax;
+ OverDriveLimits_t OverDriveLimitsAdvancedMin;
+ OverDriveLimits_t OverDriveLimitsAdvancedMax;
+
+ // Section: Total Board Power idle vs active coefficients
+ uint8_t TotalBoardPowerSupport;
+ uint8_t TotalBoardPowerPadding[1];
+ uint16_t TotalBoardPowerRoc;
+
+ //PMFW-11158
+ QuadraticInt_t qFeffCoeffGameClock[POWER_SOURCE_COUNT];
+ QuadraticInt_t qFeffCoeffBaseClock[POWER_SOURCE_COUNT];
+ QuadraticInt_t qFeffCoeffBoostClock[POWER_SOURCE_COUNT];
+
+ // APT GFX to UCLK mapping
+ int32_t AptUclkGfxclkLookup[POWER_SOURCE_COUNT][6];
+ uint32_t AptUclkGfxclkLookupHyst[POWER_SOURCE_COUNT][6];
+ uint32_t AptPadding;
+
+ // Xvmin didt
+ QuadraticInt_t GfxXvminDidtDroopThresh;
+ uint32_t GfxXvminDidtResetDDWait;
+ uint32_t GfxXvminDidtClkStopWait;
+ uint32_t GfxXvminDidtFcsStepCtrl;
+ uint32_t GfxXvminDidtFcsWaitCtrl;
+
+ // PSM based didt controller
+ uint32_t PsmModeEnabled; //0: all disabled 1: static mode only 2: dynamic mode only 3:static + dynamic mode
+ uint32_t P2v_a; // floating point in U32 format
+ uint32_t P2v_b;
+ uint32_t P2v_c;
+ uint32_t T2p_a;
+ uint32_t T2p_b;
+ uint32_t T2p_c;
+ uint32_t P2vTemp;
+ QuadraticInt_t PsmDidtStaticSettings;
+ QuadraticInt_t PsmDidtDynamicSettings;
+ uint8_t PsmDidtAvgDiv;
+ uint8_t PsmDidtForceStall;
+ uint16_t PsmDidtReleaseTimer;
+ uint32_t PsmDidtStallPattern; //Will be written to both pattern 1 and didt_static_level_prog
+ // CAC EDC
+ uint32_t Leakage_C0; // in IEEE float
+ uint32_t Leakage_C1; // in IEEE float
+ uint32_t Leakage_C2; // in IEEE float
+ uint32_t Leakage_C3; // in IEEE float
+ uint32_t Leakage_C4; // in IEEE float
+ uint32_t Leakage_C5; // in IEEE float
+ uint32_t GFX_CLK_SCALAR; // in IEEE float
+ uint32_t GFX_CLK_INTERCEPT; // in IEEE float
+ uint32_t GFX_CAC_M; // in IEEE float
+ uint32_t GFX_CAC_B; // in IEEE float
+ uint32_t VDD_GFX_CurrentLimitGuardband; // in IEEE float
+ uint32_t DynToTotalCacScalar; // in IEEE
+ // GFX EDC XVMIN
+ uint32_t XVmin_Gfx_EdcThreshScalar;
+ uint32_t XVmin_Gfx_EdcEnableFreq;
+ uint32_t XVmin_Gfx_EdcPccAsStepCtrl;
+ uint32_t XVmin_Gfx_EdcPccAsWaitCtrl;
+ uint16_t XVmin_Gfx_EdcThreshold;
+ uint16_t XVmin_Gfx_EdcFiltHysWaitCtrl;
+ // SOC EDC XVMIN
+ uint32_t XVmin_Soc_EdcThreshScalar;
+ uint32_t XVmin_Soc_EdcEnableFreq;
+ uint32_t XVmin_Soc_EdcThreshold; // LPF: number of cycles Xvmin_trig_filt will react.
+ uint16_t XVmin_Soc_EdcStepUpTime; // 10 bit, refclk count to step up throttle when PCC remains asserted.
+ uint16_t XVmin_Soc_EdcStepDownTime;// 10 bit, refclk count to step down throttle when PCC remains asserted.
+ uint8_t XVmin_Soc_EdcInitPccStep; // 3 bit, First Pcc Step number that will applied when PCC asserts.
+ uint8_t PaddingSocEdc[3];
+
+ // Fuse Override for SOC and GFX XVMIN
+ uint8_t GfxXvminFuseOverride;
+ uint8_t SocXvminFuseOverride;
+ uint8_t PaddingXvminFuseOverride[2];
+ uint8_t GfxXvminFddTempLow; // bit 7: sign, bit 0-6: ABS value
+ uint8_t GfxXvminFddTempHigh; // bit 7: sign, bit 0-6: ABS value
+ uint8_t SocXvminFddTempLow; // bit 7: sign, bit 0-6: ABS value
+ uint8_t SocXvminFddTempHigh; // bit 7: sign, bit 0-6: ABS value
+
+
+ uint16_t GfxXvminFddVolt0; // low voltage, in VID
+ uint16_t GfxXvminFddVolt1; // mid voltage, in VID
+ uint16_t GfxXvminFddVolt2; // high voltage, in VID
+ uint16_t SocXvminFddVolt0; // low voltage, in VID
+ uint16_t SocXvminFddVolt1; // mid voltage, in VID
+ uint16_t SocXvminFddVolt2; // high voltage, in VID
+ uint16_t GfxXvminDsFddDsm[6]; // XVMIN DS, same organization with fuse
+ uint16_t GfxXvminEdcFddDsm[6];// XVMIN GFX EDC, same organization with fuse
+ uint16_t SocXvminEdcFddDsm[6];// XVMIN SOC EDC, same organization with fuse
+
+ // SECTION: Sku Reserved
+ uint32_t Spare;
+
+ // Padding for MMHUB - do not modify this
+ uint32_t MmHubPadding[8];
+} SkuTable_t;
+
+typedef struct {
+ uint8_t SlewRateConditions;
+ uint8_t LoadLineAdjust;
+ uint8_t VoutOffset;
+ uint8_t VidMax;
+ uint8_t VidMin;
+ uint8_t TenBitTelEn;
+ uint8_t SixteenBitTelEn;
+ uint8_t OcpThresh;
+ uint8_t OcpWarnThresh;
+ uint8_t OcpSettings;
+ uint8_t VrhotThresh;
+ uint8_t OtpThresh;
+ uint8_t UvpOvpDeltaRef;
+ uint8_t PhaseShed;
+ uint8_t Padding[10];
+ uint32_t SettingOverrideMask;
+} Svi3RegulatorSettings_t;
+
+typedef struct {
+ // SECTION: Version
+ uint32_t Version; //should be unique to each board type
+
+ // SECTION: I2C Control
+ I2cControllerConfig_t I2cControllers[NUM_I2C_CONTROLLERS];
+
+ //SECTION SVI3 Board Parameters
+ uint8_t SlaveAddrMapping[SVI_PLANE_COUNT];
+ uint8_t VrPsiSupport[SVI_PLANE_COUNT];
+
+ uint32_t Svi3SvcSpeed;
+ uint8_t EnablePsi6[SVI_PLANE_COUNT]; // only applicable in SVI3
+
+ // SECTION: Voltage Regulator Settings
+ Svi3RegulatorSettings_t Svi3RegSettings[SVI_PLANE_COUNT];
+
+ // SECTION: GPIO Settings
+ uint8_t LedOffGpio;
+ uint8_t FanOffGpio;
+ uint8_t GfxVrPowerStageOffGpio;
+
+ uint8_t AcDcGpio; // GPIO pin configured for AC/DC switching
+ uint8_t AcDcPolarity; // GPIO polarity for AC/DC switching
+ uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event
+ uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event
+
+ uint8_t GthrGpio; // GPIO pin configured for GTHR Event
+ uint8_t GthrPolarity; // replace GPIO polarity for GTHR
+
+ // LED Display Settings
+ uint8_t LedPin0; // GPIO number for LedPin[0]
+ uint8_t LedPin1; // GPIO number for LedPin[1]
+ uint8_t LedPin2; // GPIO number for LedPin[2]
+ uint8_t LedEnableMask;
+
+ uint8_t LedPcie; // GPIO number for PCIE results
+ uint8_t LedError; // GPIO number for Error Cases
+ uint8_t PaddingLed;
+
+ // SECTION: Clock Spread Spectrum
+
+ // UCLK Spread Spectrum
+ uint8_t UclkTrainingModeSpreadPercent; // Q4.4
+ uint8_t UclkSpreadPadding;
+ uint16_t UclkSpreadFreq; // kHz
+
+ // UCLK Spread Spectrum
+ uint8_t UclkSpreadPercent[MEM_VENDOR_COUNT];
+
+ // DFLL Spread Spectrum
+ uint8_t GfxclkSpreadEnable;
+
+ // FCLK Spread Spectrum
+ uint8_t FclkSpreadPercent; // Q4.4
+ uint16_t FclkSpreadFreq; // kHz
+
+ // Section: Memory Config
+ uint8_t DramWidth; // Width of interface to the channel for each DRAM module. See DRAM_BIT_WIDTH_TYPE_e
+ uint8_t PaddingMem1[7];
+
+ // SECTION: UMC feature flags
+ uint8_t HsrEnabled;
+ uint8_t VddqOffEnabled;
+ uint8_t PaddingUmcFlags[2];
+
+ uint32_t PostVoltageSetBacoDelay; // in microseconds. Amount of time FW will wait after power good is established or PSI0 command is issued
+ uint32_t BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS
+
+ uint8_t FuseWritePowerMuxPresent;
+ uint8_t FuseWritePadding[3];
+
+ // SECTION: EDC Params
+ uint32_t LoadlineGfx;
+ uint32_t LoadlineSoc;
+ uint32_t GfxEdcLimit;
+ uint32_t SocEdcLimit;
+
+ uint32_t RestBoardPower; //power consumed by board that is not captured by the SVI3 input telemetry
+ uint32_t ConnectorsImpedance; // impedance of the input ATX power connectors
+
+ uint8_t EpcsSens0; //GPIO number for External Power Connector Support Sense0
+ uint8_t EpcsSens1; //GPIO Number for External Power Connector Support Sense1
+ uint8_t PaddingEpcs[2];
+
+ // SECTION: Board Reserved
+ uint32_t BoardSpare[52];
+
+ // SECTION: Structure Padding
+
+ // Padding for MMHUB - do not modify this
+ uint32_t MmHubPadding[8];
+} BoardTable_t;
+
+typedef struct {
+ // SECTION: Infrastructure Limits
+ uint16_t SocketPowerLimitAc[PPT_THROTTLER_COUNT]; // In Watts. Power limit that PMFW attempts to control to in AC mode. Multiple limits supported
+
+ uint16_t VrTdcLimit[TDC_THROTTLER_COUNT]; // In Amperes. Current limit associated with VR regulator maximum temperature
+
+ int16_t TotalIdleBoardPowerM;
+ int16_t TotalIdleBoardPowerB;
+ int16_t TotalBoardPowerM;
+ int16_t TotalBoardPowerB;
+
+ uint16_t TemperatureLimit[TEMP_COUNT]; // In degrees Celsius. Temperature limit associated with each input
+
+ // SECTION: Fan Control
+ uint16_t FanStopTemp[TEMP_COUNT]; //Celsius
+ uint16_t FanStartTemp[TEMP_COUNT]; //Celsius
+
+ uint16_t FanGain[TEMP_COUNT];
+
+ uint16_t FanPwmMin;
+ uint16_t AcousticTargetRpmThreshold;
+ uint16_t AcousticLimitRpmThreshold;
+ uint16_t FanMaximumRpm;
+ uint16_t MGpuAcousticLimitRpmThreshold;
+ uint16_t FanTargetGfxclk;
+ uint32_t TempInputSelectMask;
+ uint8_t FanZeroRpmEnable;
+ uint8_t FanTachEdgePerRev;
+ uint16_t FanPadding;
+ uint16_t FanTargetTemperature[TEMP_COUNT];
+
+ // The following are AFC override parameters. Leave at 0 to use FW defaults.
+ int16_t FuzzyFan_ErrorSetDelta;
+ int16_t FuzzyFan_ErrorRateSetDelta;
+ int16_t FuzzyFan_PwmSetDelta;
+ uint16_t FuzzyFan_Reserved;
+
+ uint16_t FwCtfLimit[TEMP_COUNT];
+
+ uint16_t IntakeTempEnableRPM;
+ int16_t IntakeTempOffsetTemp;
+ uint16_t IntakeTempReleaseTemp;
+ uint16_t IntakeTempHighIntakeAcousticLimit;
+
+ uint16_t IntakeTempAcouticLimitReleaseRate;
+ int16_t FanAbnormalTempLimitOffset; // FanStalledTempLimitOffset
+ uint16_t FanStalledTriggerRpm; //
+ uint16_t FanAbnormalTriggerRpmCoeff; // FanAbnormalTriggerRpm
+
+ uint16_t FanSpare[1];
+ uint8_t FanIntakeSensorSupport;
+ uint8_t FanIntakePadding;
+ uint32_t FanAmbientPerfBoostThreshold;
+ uint32_t FanSpare2[12];
+
+ uint16_t TemperatureLimit_Hynix; // In degrees Celsius. Memory temperature limit associated with Hynix
+ uint16_t TemperatureLimit_Micron; // In degrees Celsius. Memory temperature limit associated with Micron
+ uint16_t TemperatureFwCtfLimit_Hynix;
+ uint16_t TemperatureFwCtfLimit_Micron;
+
+ // SECTION: Board Reserved
+ uint16_t PlatformTdcLimit[TDC_THROTTLER_COUNT]; // In Amperes. Current limit associated with platform maximum temperature per VR current rail
+ uint16_t SocketPowerLimitDc[PPT_THROTTLER_COUNT]; // In Watts. Power limit that PMFW attempts to control to in DC mode. Multiple limits supported
+ uint16_t SocketPowerLimitSmartShift2; // In Watts. Power limit used SmartShift
+ uint16_t CustomSkuSpare16b;
+ uint32_t CustomSkuSpare32b[10];
+
+ // SECTION: Structure Padding
+
+ // Padding for MMHUB - do not modify this
+ uint32_t MmHubPadding[8];
+} CustomSkuTable_t;
+
+typedef struct {
+ PFE_Settings_t PFE_Settings;
+ SkuTable_t SkuTable;
+ CustomSkuTable_t CustomSkuTable;
+ BoardTable_t BoardTable;
+} PPTable_t;
+
+typedef struct {
+ // Time constant parameters for clock averages in ms
+ uint16_t GfxclkAverageLpfTau;
+ uint16_t FclkAverageLpfTau;
+ uint16_t UclkAverageLpfTau;
+ uint16_t GfxActivityLpfTau;
+ uint16_t UclkActivityLpfTau;
+ uint16_t UclkMaxActivityLpfTau;
+ uint16_t SocketPowerLpfTau;
+ uint16_t VcnClkAverageLpfTau;
+ uint16_t VcnUsageAverageLpfTau;
+ uint16_t PcieActivityLpTau;
+} DriverSmuConfig_t;
+
+typedef struct {
+ DriverSmuConfig_t DriverSmuConfig;
+
+ uint32_t Spare[8];
+ // Padding - ignore
+ uint32_t MmHubPadding[8]; // SMU internal use
+} DriverSmuConfigExternal_t;
+
+
+typedef struct {
+
+ uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableDppClk [NUM_DPPCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableDprefclk [NUM_DPREFCLK_DPM_LEVELS]; // In MHz
+ uint16_t FreqTableDcfclk [NUM_DCFCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableDtbclk [NUM_DTBCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz
+
+ uint16_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz
+
+ uint16_t Padding;
+
+ uint32_t Spare[32];
+
+ // Padding - ignore
+ uint32_t MmHubPadding[8]; // SMU internal use
+
+} DriverInfoTable_t;
+
+typedef struct {
+ uint32_t CurrClock[PPCLK_COUNT];
+
+ uint16_t AverageGfxclkFrequencyTarget;
+ uint16_t AverageGfxclkFrequencyPreDs;
+ uint16_t AverageGfxclkFrequencyPostDs;
+ uint16_t AverageFclkFrequencyPreDs;
+ uint16_t AverageFclkFrequencyPostDs;
+ uint16_t AverageMemclkFrequencyPreDs ; // this is scaled to actual memory clock
+ uint16_t AverageMemclkFrequencyPostDs ; // this is scaled to actual memory clock
+ uint16_t AverageVclk0Frequency ;
+ uint16_t AverageDclk0Frequency ;
+ uint16_t AverageVclk1Frequency ;
+ uint16_t AverageDclk1Frequency ;
+ uint16_t PCIeBusy ;
+ uint16_t dGPU_W_MAX ;
+ uint16_t padding ;
+
+ uint32_t MetricsCounter ;
+
+ uint16_t AvgVoltage[SVI_PLANE_COUNT];
+ uint16_t AvgCurrent[SVI_PLANE_COUNT];
+
+ uint16_t AverageGfxActivity ;
+ uint16_t AverageUclkActivity ;
+ uint16_t Vcn0ActivityPercentage ;
+ uint16_t Vcn1ActivityPercentage ;
+
+ uint32_t EnergyAccumulator;
+ uint16_t AverageSocketPower;
+ uint16_t AverageTotalBoardPower;
+
+ uint16_t AvgTemperature[TEMP_COUNT];
+ uint16_t AvgTemperatureFanIntake;
+
+ uint8_t PcieRate ;
+ uint8_t PcieWidth ;
+
+ uint8_t AvgFanPwm;
+ uint8_t Padding[1];
+ uint16_t AvgFanRpm;
+
+
+ uint8_t ThrottlingPercentage[THROTTLER_COUNT];
+ uint8_t padding1[3];
+
+ //metrics for D3hot entry/exit and driver ARM msgs
+ uint32_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT];
+ uint32_t D3HotExitCountPerMode[D3HOT_SEQUENCE_COUNT];
+ uint32_t ArmMsgReceivedCountPerMode[D3HOT_SEQUENCE_COUNT];
+
+ uint16_t ApuSTAPMSmartShiftLimit;
+ uint16_t ApuSTAPMLimit;
+ uint16_t AvgApuSocketPower;
+
+ uint16_t AverageUclkActivity_MAX;
+
+ uint32_t PublicSerialNumberLower;
+ uint32_t PublicSerialNumberUpper;
+
+} SmuMetrics_t;
+
+typedef struct {
+ SmuMetrics_t SmuMetrics;
+ uint32_t Spare[30];
+
+ // Padding - ignore
+ uint32_t MmHubPadding[8]; // SMU internal use
+} SmuMetricsExternal_t;
+
+typedef struct {
+ uint8_t WmSetting;
+ uint8_t Flags;
+ uint8_t Padding[2];
+
+} WatermarkRowGeneric_t;
+
+#define NUM_WM_RANGES 4
+
+typedef enum {
+ WATERMARKS_CLOCK_RANGE = 0,
+ WATERMARKS_DUMMY_PSTATE,
+ WATERMARKS_MALL,
+ WATERMARKS_COUNT,
+} WATERMARKS_FLAGS_e;
+
+typedef struct {
+ // Watermarks
+ WatermarkRowGeneric_t WatermarkRow[NUM_WM_RANGES];
+} Watermarks_t;
+
+typedef struct {
+ Watermarks_t Watermarks;
+ uint32_t Spare[16];
+
+ uint32_t MmHubPadding[8]; // SMU internal use
+} WatermarksExternal_t;
+
+typedef struct {
+ uint16_t avgPsmCount[76];
+ uint16_t minPsmCount[76];
+ uint16_t maxPsmCount[76];
+ float avgPsmVoltage[76];
+ float minPsmVoltage[76];
+ float maxPsmVoltage[76];
+} AvfsDebugTable_t;
+
+typedef struct {
+ AvfsDebugTable_t AvfsDebugTable;
+
+ uint32_t MmHubPadding[8]; // SMU internal use
+} AvfsDebugTableExternal_t;
+
+
+typedef struct {
+ uint8_t Gfx_ActiveHystLimit;
+ uint8_t Gfx_IdleHystLimit;
+ uint8_t Gfx_FPS;
+ uint8_t Gfx_MinActiveFreqType;
+ uint8_t Gfx_BoosterFreqType;
+ uint8_t PaddingGfx;
+ uint16_t Gfx_MinActiveFreq; // MHz
+ uint16_t Gfx_BoosterFreq; // MHz
+ uint16_t Gfx_PD_Data_time_constant; // Time constant of PD controller in ms
+ uint32_t Gfx_PD_Data_limit_a; // Q16
+ uint32_t Gfx_PD_Data_limit_b; // Q16
+ uint32_t Gfx_PD_Data_limit_c; // Q16
+ uint32_t Gfx_PD_Data_error_coeff; // Q16
+ uint32_t Gfx_PD_Data_error_rate_coeff; // Q16
+
+ uint8_t Fclk_ActiveHystLimit;
+ uint8_t Fclk_IdleHystLimit;
+ uint8_t Fclk_FPS;
+ uint8_t Fclk_MinActiveFreqType;
+ uint8_t Fclk_BoosterFreqType;
+ uint8_t PaddingFclk;
+ uint16_t Fclk_MinActiveFreq; // MHz
+ uint16_t Fclk_BoosterFreq; // MHz
+ uint16_t Fclk_PD_Data_time_constant; // Time constant of PD controller in ms
+ uint32_t Fclk_PD_Data_limit_a; // Q16
+ uint32_t Fclk_PD_Data_limit_b; // Q16
+ uint32_t Fclk_PD_Data_limit_c; // Q16
+ uint32_t Fclk_PD_Data_error_coeff; // Q16
+ uint32_t Fclk_PD_Data_error_rate_coeff; // Q16
+
+ uint32_t Mem_UpThreshold_Limit[NUM_UCLK_DPM_LEVELS]; // Q16
+ uint8_t Mem_UpHystLimit[NUM_UCLK_DPM_LEVELS];
+ uint16_t Mem_DownHystLimit[NUM_UCLK_DPM_LEVELS];
+ uint16_t Mem_Fps;
+
+} DpmActivityMonitorCoeffInt_t;
+
+
+typedef struct {
+ DpmActivityMonitorCoeffInt_t DpmActivityMonitorCoeffInt;
+ uint32_t MmHubPadding[8]; // SMU internal use
+} DpmActivityMonitorCoeffIntExternal_t;
+
+
+
+// Workload bits
+#define WORKLOAD_PPLIB_DEFAULT_BIT 0
+#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1
+#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2
+#define WORKLOAD_PPLIB_VIDEO_BIT 3
+#define WORKLOAD_PPLIB_VR_BIT 4
+#define WORKLOAD_PPLIB_COMPUTE_BIT 5
+#define WORKLOAD_PPLIB_CUSTOM_BIT 6
+#define WORKLOAD_PPLIB_WINDOW_3D_BIT 7
+#define WORKLOAD_PPLIB_DIRECT_ML_BIT 8
+#define WORKLOAD_PPLIB_CGVDI_BIT 9
+#define WORKLOAD_PPLIB_COUNT 10
+
+
+// These defines are used with the following messages:
+// SMC_MSG_TransferTableDram2Smu
+// SMC_MSG_TransferTableSmu2Dram
+
+// Table transfer status
+#define TABLE_TRANSFER_OK 0x0
+#define TABLE_TRANSFER_FAILED 0xFF
+#define TABLE_TRANSFER_PENDING 0xAB
+
+// Table types
+#define TABLE_PPTABLE 0
+#define TABLE_COMBO_PPTABLE 1
+#define TABLE_WATERMARKS 2
+#define TABLE_AVFS_PSM_DEBUG 3
+#define TABLE_PMSTATUSLOG 4
+#define TABLE_SMU_METRICS 5
+#define TABLE_DRIVER_SMU_CONFIG 6
+#define TABLE_ACTIVITY_MONITOR_COEFF 7
+#define TABLE_OVERDRIVE 8
+#define TABLE_I2C_COMMANDS 9
+#define TABLE_DRIVER_INFO 10
+#define TABLE_ECCINFO 11
+#define TABLE_CUSTOM_SKUTABLE 12
+#define TABLE_COUNT 13
+
+//IH Interupt ID
+#define IH_INTERRUPT_ID_TO_DRIVER 0xFE
+#define IH_INTERRUPT_CONTEXT_ID_BACO 0x2
+#define IH_INTERRUPT_CONTEXT_ID_AC 0x3
+#define IH_INTERRUPT_CONTEXT_ID_DC 0x4
+#define IH_INTERRUPT_CONTEXT_ID_AUDIO_D0 0x5
+#define IH_INTERRUPT_CONTEXT_ID_AUDIO_D3 0x6
+#define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7
+#define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
+#define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
index 5bb7a63c0602..97522c085258 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
@@ -144,6 +144,37 @@ typedef struct {
uint32_t MaxGfxClk;
} DpmClocks_t;
+//Freq in MHz
+//Voltage in milli volts with 2 fractional bits
+typedef struct {
+ uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
+ uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
+ uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
+ uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
+ uint32_t VClocks0[NUM_VCN_DPM_LEVELS];
+ uint32_t VClocks1[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks0[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks1[NUM_VCN_DPM_LEVELS];
+ uint32_t VPEClocks[NUM_VPE_DPM_LEVELS];
+ uint32_t FclkClocks_Freq[NUM_FCLK_DPM_LEVELS];
+ uint32_t FclkClocks_Voltage[NUM_FCLK_DPM_LEVELS];
+ uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
+ MemPstateTable_t MemPstateTable[NUM_MEM_PSTATE_LEVELS];
+
+ uint8_t NumDcfClkLevelsEnabled;
+ uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
+ uint8_t NumSocClkLevelsEnabled;
+ uint8_t Vcn0ClkLevelsEnabled; //Applies to both Vclk0 and Dclk0
+ uint8_t Vcn1ClkLevelsEnabled; //Applies to both Vclk1 and Dclk1
+ uint8_t VpeClkLevelsEnabled;
+ uint8_t NumMemPstatesEnabled;
+ uint8_t NumFclkLevelsEnabled;
+ uint8_t spare;
+
+ uint32_t MinGfxClk;
+ uint32_t MaxGfxClk;
+} DpmClocks_t_v14_0_1;
+
typedef struct {
uint16_t CoreFrequency[16]; //Target core frequency [MHz]
uint16_t CorePower[16]; //CAC calculated core power [mW]
@@ -224,7 +255,7 @@ typedef enum {
#define TABLE_CUSTOM_DPM 2 // Called by Driver
#define TABLE_BIOS_GPIO_CONFIG 3 // Called by BIOS
#define TABLE_DPMCLOCKS 4 // Called by Driver and VBIOS
-#define TABLE_SPARE0 5 // Unused
+#define TABLE_MOMENTARY_PM 5 // Called by Tools
#define TABLE_MODERN_STDBY 6 // Called by Tools for Modern Standby Log
#define TABLE_SMU_METRICS 7 // Called by Driver and SMF/PMF
#define TABLE_COUNT 8
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index 7b812b9994d7..0b3c2f54a343 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -123,7 +123,7 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
-#define SMU_METRICS_TABLE_VERSION 0xB
+#define SMU_METRICS_TABLE_VERSION 0xC
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
@@ -223,6 +223,10 @@ typedef struct __attribute__((packed, aligned(4))) {
// VCN/JPEG ACTIVITY
uint32_t VcnBusy[4];
uint32_t JpegBusy[32];
+
+ // PCIE LINK Speed and width
+ uint32_t PCIeLinkSpeed;
+ uint32_t PCIeLinkWidth;
} MetricsTableX_t;
typedef struct __attribute__((packed, aligned(4))) {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h
index 356e0f57a426..ddb625860083 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h
@@ -42,7 +42,7 @@
#define FEATURE_EDC_BIT 7
#define FEATURE_PLL_POWER_DOWN_BIT 8
#define FEATURE_VDDOFF_BIT 9
-#define FEATURE_VCN_DPM_BIT 10
+#define FEATURE_VCN_DPM_BIT 10 /* this is for both VCN0 and VCN1 */
#define FEATURE_DS_MPM_BIT 11
#define FEATURE_FCLK_DPM_BIT 12
#define FEATURE_SOCCLK_DPM_BIT 13
@@ -56,9 +56,9 @@
#define FEATURE_DS_GFXCLK_BIT 21
#define FEATURE_DS_SOCCLK_BIT 22
#define FEATURE_DS_LCLK_BIT 23
-#define FEATURE_LOW_POWER_DCNCLKS_BIT 24 // for all DISP clks
+#define FEATURE_LOW_POWER_DCNCLKS_BIT 24
#define FEATURE_DS_SHUBCLK_BIT 25
-#define FEATURE_SPARE0_BIT 26 //SPARE
+#define FEATURE_RESERVED0_BIT 26
#define FEATURE_ZSTATES_BIT 27
#define FEATURE_IOMMUL2_PG_BIT 28
#define FEATURE_DS_FCLK_BIT 29
@@ -66,8 +66,8 @@
#define FEATURE_DS_MP1CLK_BIT 31
#define FEATURE_WHISPER_MODE_BIT 32
#define FEATURE_SMU_LOW_POWER_BIT 33
-#define FEATURE_SMART_L3_RINSER_BIT 34
-#define FEATURE_SPARE1_BIT 35 //SPARE
+#define FEATURE_RESERVED1_BIT 34 /* v14_0_0 SMART_L3_RINSER; v14_0_1 RESERVED1 */
+#define FEATURE_GFX_DEM_BIT 35 /* v14_0_0 SPARE; v14_0_1 GFX_DEM */
#define FEATURE_PSI_BIT 36
#define FEATURE_PROCHOT_BIT 37
#define FEATURE_CPUOFF_BIT 38
@@ -77,11 +77,11 @@
#define FEATURE_PERF_LIMIT_BIT 42
#define FEATURE_CORE_DLDO_BIT 43
#define FEATURE_DVO_BIT 44
-#define FEATURE_DS_VCN_BIT 45
+#define FEATURE_DS_VCN_BIT 45 /* v14_0_1 this is for both VCN0 and VCN1 */
#define FEATURE_CPPC_BIT 46
#define FEATURE_CPPC_PREFERRED_CORES 47
#define FEATURE_DF_CSTATES_BIT 48
-#define FEATURE_SPARE2_BIT 49 //SPARE
+#define FEATURE_FAST_PSTATE_CLDO_BIT 49 /* v14_0_0 SPARE */
#define FEATURE_ATHUB_PG_BIT 50
#define FEATURE_VDDOFF_ECO_BIT 51
#define FEATURE_ZSTATES_ECO_BIT 52
@@ -93,8 +93,8 @@
#define FEATURE_DS_IPUCLK_BIT 58
#define FEATURE_DS_VPECLK_BIT 59
#define FEATURE_VPE_DPM_BIT 60
-#define FEATURE_SPARE_61 61
-#define FEATURE_FP_DIDT 62
+#define FEATURE_SMART_L3_RINSER_BIT 61 /* v14_0_0 SPARE*/
+#define FEATURE_PCC_BIT 62 /* v14_0_0 FP_DIDT v14_0_1 PCC_BIT */
#define NUM_FEATURES 63
// Firmware Header/Footer
@@ -151,6 +151,43 @@ typedef struct {
// MP1_EXT_SCRATCH7 = RTOS Current Job
} FwStatus_t;
+typedef struct {
+ // MP1_EXT_SCRATCH0
+ uint32_t DpmHandlerID : 8;
+ uint32_t ActivityMonitorID : 8;
+ uint32_t DpmTimerID : 8;
+ uint32_t DpmHubID : 4;
+ uint32_t DpmHubTask : 4;
+ // MP1_EXT_SCRATCH1
+ uint32_t CclkSyncStatus : 8;
+ uint32_t ZstateStatus : 4;
+ uint32_t Cpu1VddOff : 4;
+ uint32_t DstateFun : 4;
+ uint32_t DstateDev : 4;
+ uint32_t GfxOffStatus : 2;
+ uint32_t Cpu0Off : 2;
+ uint32_t Cpu1Off : 2;
+ uint32_t Cpu0VddOff : 2;
+ // MP1_EXT_SCRATCH2
+ uint32_t P2JobHandler :32;
+ // MP1_EXT_SCRATCH3
+ uint32_t PostCode :32;
+ // MP1_EXT_SCRATCH4
+ uint32_t MsgPortBusy :15;
+ uint32_t RsmuPmiP1Pending : 1;
+ uint32_t RsmuPmiP2PendingCnt : 8;
+ uint32_t DfCstateExitPending : 1;
+ uint32_t Pc6EntryPending : 1;
+ uint32_t Pc6ExitPending : 1;
+ uint32_t WarmResetPending : 1;
+ uint32_t Mp0ClkPending : 1;
+ uint32_t InWhisperMode : 1;
+ uint32_t spare2 : 2;
+ // MP1_EXT_SCRATCH5
+ uint32_t IdleMask :32;
+ // MP1_EXT_SCRATCH6 = RTOS threads' status
+ // MP1_EXT_SCRATCH7 = RTOS Current Job
+} FwStatus_t_v14_0_1;
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
index 8a8a57c56bc0..c4dc5881d8df 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
@@ -54,14 +54,14 @@
#define PPSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team
#define PPSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version
#define PPSMC_MSG_GetDriverIfVersion 0x03 ///< Get PMFW_DRIVER_IF version
-#define PPSMC_MSG_SPARE0 0x04 ///< SPARE
-#define PPSMC_MSG_SPARE1 0x05 ///< SPARE
-#define PPSMC_MSG_PowerDownVcn 0x06 ///< Power down VCN
-#define PPSMC_MSG_PowerUpVcn 0x07 ///< Power up VCN; VCN is power gated by default
-#define PPSMC_MSG_SetHardMinVcn 0x08 ///< For wireless display
+#define PPSMC_MSG_PowerDownVcn1 0x04 ///< Power down VCN1
+#define PPSMC_MSG_PowerUpVcn1 0x05 ///< Power up VCN1; VCN1 is power gated by default
+#define PPSMC_MSG_PowerDownVcn0 0x06 ///< Power down VCN0
+#define PPSMC_MSG_PowerUpVcn0 0x07 ///< Power up VCN0; VCN0 is power gated by default
+#define PPSMC_MSG_SetHardMinVcn0 0x08 ///< For wireless display
#define PPSMC_MSG_SetSoftMinGfxclk 0x09 ///< Set SoftMin for GFXCLK, argument is frequency in MHz
-#define PPSMC_MSG_SPARE2 0x0A ///< SPARE
-#define PPSMC_MSG_SPARE3 0x0B ///< SPARE
+#define PPSMC_MSG_SetHardMinVcn1 0x0A ///< For wireless display
+#define PPSMC_MSG_SetSoftMinVcn1 0x0B ///< Set soft min for VCN1 clocks (VCLK1 and DCLK1)
#define PPSMC_MSG_PrepareMp1ForUnload 0x0C ///< Prepare PMFW for GFX driver unload
#define PPSMC_MSG_SetDriverDramAddrHigh 0x0D ///< Set high 32 bits of DRAM address for Driver table transfer
#define PPSMC_MSG_SetDriverDramAddrLow 0x0E ///< Set low 32 bits of DRAM address for Driver table transfer
@@ -71,36 +71,32 @@
#define PPSMC_MSG_GetEnabledSmuFeatures 0x12 ///< Get enabled features in PMFW
#define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK
#define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK
-#define PPSMC_MSG_SetSoftMinVcn 0x15 ///< Set soft min for VCN clocks (VCLK and DCLK)
-
+#define PPSMC_MSG_SetSoftMinVcn0 0x15 ///< Set soft min for VCN0 clocks (VCLK0 and DCLK0)
#define PPSMC_MSG_EnableGfxImu 0x16 ///< Enable GFX IMU
-
-#define PPSMC_MSG_spare_0x17 0x17
-#define PPSMC_MSG_spare_0x18 0x18
+#define PPSMC_MSG_spare_0x17 0x17 ///< Get GFX clock frequency
+#define PPSMC_MSG_spare_0x18 0x18 ///< Get FCLK frequency
#define PPSMC_MSG_AllowGfxOff 0x19 ///< Inform PMFW of allowing GFXOFF entry
#define PPSMC_MSG_DisallowGfxOff 0x1A ///< Inform PMFW of disallowing GFXOFF entry
#define PPSMC_MSG_SetSoftMaxGfxClk 0x1B ///< Set soft max for GFX CLK
#define PPSMC_MSG_SetHardMinGfxClk 0x1C ///< Set hard min for GFX CLK
-
#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x1D ///< Set soft max for SOC CLK
#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x1E ///< Set soft max for FCLK
-#define PPSMC_MSG_SetSoftMaxVcn 0x1F ///< Set soft max for VCN clocks (VCLK and DCLK)
-#define PPSMC_MSG_spare_0x20 0x20
-#define PPSMC_MSG_PowerDownJpeg 0x21 ///< Power down Jpeg
-#define PPSMC_MSG_PowerUpJpeg 0x22 ///< Power up Jpeg; VCN is power gated by default
-
+#define PPSMC_MSG_SetSoftMaxVcn0 0x1F ///< Set soft max for VCN0 clocks (VCLK0 and DCLK0)
+#define PPSMC_MSG_spare_0x20 0x20 ///< Set power limit percentage
+#define PPSMC_MSG_PowerDownJpeg0 0x21 ///< Power down Jpeg of VCN0
+#define PPSMC_MSG_PowerUpJpeg0 0x22 ///< Power up Jpeg of VCN0; VCN0 is power gated by default
#define PPSMC_MSG_SetHardMinFclkByFreq 0x23 ///< Set hard min for FCLK
#define PPSMC_MSG_SetSoftMinSocclkByFreq 0x24 ///< Set soft min for SOC CLK
#define PPSMC_MSG_AllowZstates 0x25 ///< Inform PMFM of allowing Zstate entry, i.e. no Miracast activity
-#define PPSMC_MSG_Reserved 0x26 ///< Not used
-#define PPSMC_MSG_Reserved1 0x27 ///< Not used, previously PPSMC_MSG_RequestActiveWgp
-#define PPSMC_MSG_Reserved2 0x28 ///< Not used, previously PPSMC_MSG_QueryActiveWgp
+#define PPSMC_MSG_PowerDownJpeg1 0x26 ///< Power down Jpeg of VCN1
+#define PPSMC_MSG_PowerUpJpeg1 0x27 ///< Power up Jpeg of VCN1; VCN1 is power gated by default
+#define PPSMC_MSG_SetSoftMaxVcn1 0x28 ///< Set soft max for VCN1 clocks (VCLK1 and DCLK1)
#define PPSMC_MSG_PowerDownIspByTile 0x29 ///< ISP is power gated by default
#define PPSMC_MSG_PowerUpIspByTile 0x2A ///< This message is used to power up ISP tiles and enable the ISP DPM
#define PPSMC_MSG_SetHardMinIspiclkByFreq 0x2B ///< Set HardMin by frequency for ISPICLK
#define PPSMC_MSG_SetHardMinIspxclkByFreq 0x2C ///< Set HardMin by frequency for ISPXCLK
-#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN.UMSCH (aka VSCH) scheduler
-#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN.UMSCH (aka VSCH) scheduler
+#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN0.UMSCH (aka VSCH) scheduler
+#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN0.UMSCH (aka VSCH) scheduler
#define PPSMC_Message_IspStutterOn_MmhubPgDis 0x2F ///< ISP StutterOn mmHub PgDis
#define PPSMC_Message_IspStutterOff_MmhubPgEn 0x30 ///< ISP StufferOff mmHub PgEn
#define PPSMC_MSG_PowerUpVpe 0x31 ///< Power up VPE
@@ -110,7 +106,9 @@
#define PPSMC_MSG_DisableLSdma 0x35 ///< Disable LSDMA
#define PPSMC_MSG_SetSoftMaxVpe 0x36 ///<
#define PPSMC_MSG_SetSoftMinVpe 0x37 ///<
-#define PPSMC_Message_Count 0x38 ///< Total number of PPSMC messages
+#define PPSMC_MSG_AllocMALLCache 0x38 ///< Allocating MALL Cache
+#define PPSMC_MSG_ReleaseMALLCache 0x39 ///< Releasing MALL Cache
+#define PPSMC_Message_Count 0x3A ///< Total number of PPSMC messages
/** @}*/
/**
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h
new file mode 100644
index 000000000000..de2e442281ff
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU_V14_0_2_PPSMC_H
+#define SMU_V14_0_2_PPSMC_H
+
+#define PPSMC_VERSION 0x1
+
+// SMU Response Codes:
+#define PPSMC_Result_OK 0x1
+#define PPSMC_Result_Failed 0xFF
+#define PPSMC_Result_UnknownCmd 0xFE
+#define PPSMC_Result_CmdRejectedPrereq 0xFD
+#define PPSMC_Result_CmdRejectedBusy 0xFC
+
+// Message Definitions:
+// BASIC
+#define PPSMC_MSG_TestMessage 0x1
+#define PPSMC_MSG_GetSmuVersion 0x2
+#define PPSMC_MSG_GetDriverIfVersion 0x3
+#define PPSMC_MSG_SetAllowedFeaturesMaskLow 0x4
+#define PPSMC_MSG_SetAllowedFeaturesMaskHigh 0x5
+#define PPSMC_MSG_EnableAllSmuFeatures 0x6
+#define PPSMC_MSG_DisableAllSmuFeatures 0x7
+#define PPSMC_MSG_EnableSmuFeaturesLow 0x8
+#define PPSMC_MSG_EnableSmuFeaturesHigh 0x9
+#define PPSMC_MSG_DisableSmuFeaturesLow 0xA
+#define PPSMC_MSG_DisableSmuFeaturesHigh 0xB
+#define PPSMC_MSG_GetRunningSmuFeaturesLow 0xC
+#define PPSMC_MSG_GetRunningSmuFeaturesHigh 0xD
+#define PPSMC_MSG_SetDriverDramAddrHigh 0xE
+#define PPSMC_MSG_SetDriverDramAddrLow 0xF
+#define PPSMC_MSG_SetToolsDramAddrHigh 0x10
+#define PPSMC_MSG_SetToolsDramAddrLow 0x11
+#define PPSMC_MSG_TransferTableSmu2Dram 0x12
+#define PPSMC_MSG_TransferTableDram2Smu 0x13
+#define PPSMC_MSG_UseDefaultPPTable 0x14
+
+//BACO/BAMACO/BOMACO
+#define PPSMC_MSG_EnterBaco 0x15
+#define PPSMC_MSG_ExitBaco 0x16
+#define PPSMC_MSG_ArmD3 0x17
+#define PPSMC_MSG_BacoAudioD3PME 0x18
+
+//DPM
+#define PPSMC_MSG_SetSoftMinByFreq 0x19
+#define PPSMC_MSG_SetSoftMaxByFreq 0x1A
+#define PPSMC_MSG_SetHardMinByFreq 0x1B
+#define PPSMC_MSG_SetHardMaxByFreq 0x1C
+#define PPSMC_MSG_GetMinDpmFreq 0x1D
+#define PPSMC_MSG_GetMaxDpmFreq 0x1E
+#define PPSMC_MSG_GetDpmFreqByIndex 0x1F
+#define PPSMC_MSG_OverridePcieParameters 0x20
+
+//DramLog Set DramAddr
+#define PPSMC_MSG_DramLogSetDramAddrHigh 0x21
+#define PPSMC_MSG_DramLogSetDramAddrLow 0x22
+#define PPSMC_MSG_DramLogSetDramSize 0x23
+#define PPSMC_MSG_SetWorkloadMask 0x24
+
+#define PPSMC_MSG_GetVoltageByDpm 0x25 // Can be removed
+#define PPSMC_MSG_SetVideoFps 0x26 // Can be removed
+#define PPSMC_MSG_GetDcModeMaxDpmFreq 0x27
+
+//Power Gating
+#define PPSMC_MSG_AllowGfxOff 0x28
+#define PPSMC_MSG_DisallowGfxOff 0x29
+#define PPSMC_MSG_PowerUpVcn 0x2A
+#define PPSMC_MSG_PowerDownVcn 0x2B
+#define PPSMC_MSG_PowerUpJpeg 0x2C
+#define PPSMC_MSG_PowerDownJpeg 0x2D
+
+//Resets
+#define PPSMC_MSG_PrepareMp1ForUnload 0x2E
+#define PPSMC_MSG_Mode1Reset 0x2F
+
+//Set SystemVirtual DramAddrHigh
+#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x30
+#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x31
+//ACDC Power Source
+#define PPSMC_MSG_SetPptLimit 0x32
+#define PPSMC_MSG_GetPptLimit 0x33
+#define PPSMC_MSG_ReenableAcDcInterrupt 0x34
+#define PPSMC_MSG_NotifyPowerSource 0x35
+
+//BTC
+#define PPSMC_MSG_RunDcBtc 0x36
+
+// 0x37
+
+//Others
+#define PPSMC_MSG_SetTemperatureInputSelect 0x38 // Can be removed
+#define PPSMC_MSG_SetFwDstatesMask 0x39
+#define PPSMC_MSG_SetThrottlerMask 0x3A
+
+#define PPSMC_MSG_SetExternalClientDfCstateAllow 0x3B
+
+#define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x3C
+
+//STB to dram log
+#define PPSMC_MSG_DumpSTBtoDram 0x3D
+#define PPSMC_MSG_STBtoDramLogSetDramAddrHigh 0x3E
+#define PPSMC_MSG_STBtoDramLogSetDramAddrLow 0x3F
+#define PPSMC_MSG_STBtoDramLogSetDramSize 0x40
+#define PPSMC_MSG_SetOBMTraceBufferLogging 0x41
+
+#define PPSMC_MSG_AllowGfxDcs 0x43
+#define PPSMC_MSG_DisallowGfxDcs 0x44
+#define PPSMC_MSG_EnableAudioStutterWA 0x45
+#define PPSMC_MSG_PowerUpUmsch 0x46
+#define PPSMC_MSG_PowerDownUmsch 0x47
+#define PPSMC_MSG_SetDcsArch 0x48
+#define PPSMC_MSG_TriggerVFFLR 0x49
+#define PPSMC_MSG_SetNumBadMemoryPagesRetired 0x4A
+#define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4B
+#define PPSMC_MSG_SetPriorityDeltaGain 0x4C
+#define PPSMC_MSG_AllowIHHostInterrupt 0x4D
+#define PPSMC_MSG_Mode3Reset 0x4F
+#define PPSMC_Message_Count 0x50
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index a941fdbf78b6..c48214e3dc8e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -115,6 +115,10 @@
__SMU_DUMMY_MAP(PowerDownVcn), \
__SMU_DUMMY_MAP(PowerUpJpeg), \
__SMU_DUMMY_MAP(PowerDownJpeg), \
+ __SMU_DUMMY_MAP(PowerUpJpeg0), \
+ __SMU_DUMMY_MAP(PowerDownJpeg0), \
+ __SMU_DUMMY_MAP(PowerUpJpeg1), \
+ __SMU_DUMMY_MAP(PowerDownJpeg1), \
__SMU_DUMMY_MAP(BacoAudioD3PME), \
__SMU_DUMMY_MAP(ArmD3), \
__SMU_DUMMY_MAP(RunDcBtc), \
@@ -135,6 +139,8 @@
__SMU_DUMMY_MAP(PowerUpSdma), \
__SMU_DUMMY_MAP(SetHardMinIspclkByFreq), \
__SMU_DUMMY_MAP(SetHardMinVcn), \
+ __SMU_DUMMY_MAP(SetHardMinVcn0), \
+ __SMU_DUMMY_MAP(SetHardMinVcn1), \
__SMU_DUMMY_MAP(SetAllowFclkSwitch), \
__SMU_DUMMY_MAP(SetMinVideoGfxclkFreq), \
__SMU_DUMMY_MAP(ActiveProcessNotify), \
@@ -150,6 +156,8 @@
__SMU_DUMMY_MAP(SetPhyclkVoltageByFreq), \
__SMU_DUMMY_MAP(SetDppclkVoltageByFreq), \
__SMU_DUMMY_MAP(SetSoftMinVcn), \
+ __SMU_DUMMY_MAP(SetSoftMinVcn0), \
+ __SMU_DUMMY_MAP(SetSoftMinVcn1), \
__SMU_DUMMY_MAP(EnablePostCode), \
__SMU_DUMMY_MAP(GetGfxclkFrequency), \
__SMU_DUMMY_MAP(GetFclkFrequency), \
@@ -161,6 +169,8 @@
__SMU_DUMMY_MAP(SetSoftMaxSocclkByFreq), \
__SMU_DUMMY_MAP(SetSoftMaxFclkByFreq), \
__SMU_DUMMY_MAP(SetSoftMaxVcn), \
+ __SMU_DUMMY_MAP(SetSoftMaxVcn0), \
+ __SMU_DUMMY_MAP(SetSoftMaxVcn1), \
__SMU_DUMMY_MAP(PowerGateMmHub), \
__SMU_DUMMY_MAP(UpdatePmeRestore), \
__SMU_DUMMY_MAP(GpuChangeState), \
@@ -435,4 +445,11 @@ enum smu_feature_mask {
SMU_FEATURE_COUNT,
};
+/* Message category flags */
+#define SMU_MSG_VF_FLAG (1U << 0)
+#define SMU_MSG_RAS_PRI (1U << 1)
+
+/* Firmware capability flags */
+#define SMU_FW_CAP_RAS_PRI (1U << 0)
+
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
index a0e5ad0381d6..c2ab336bb530 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
@@ -237,7 +237,7 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu);
int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
struct pp_smu_nv_clock_table *max_clocks);
-bool smu_v11_0_baco_is_support(struct smu_context *smu);
+int smu_v11_0_get_bamaco_support(struct smu_context *smu);
enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index fbd57fa1a004..d9700a3f28d2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -210,7 +210,7 @@ int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu);
int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
struct pp_smu_nv_clock_table *max_clocks);
-bool smu_v13_0_baco_is_support(struct smu_context *smu);
+int smu_v13_0_get_bamaco_support(struct smu_context *smu);
int smu_v13_0_baco_enter(struct smu_context *smu);
int smu_v13_0_baco_exit(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
index 3f7463c1c1a9..1fc4557e6fb4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
@@ -27,7 +27,8 @@
#define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7
-#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x25
#define FEATURE_MASK(feature) (1ULL << feature)
@@ -38,7 +39,8 @@
#define MP1_SRAM 0x03c00004
/* address block */
-#define smnMP1_FIRMWARE_FLAGS 0x3010028
+#define smnMP1_FIRMWARE_FLAGS_14_0_0 0x3010028
+#define smnMP1_FIRMWARE_FLAGS 0x3010024
#define smnMP1_PUB_CTRL 0x3010d10
#define MAX_DPM_LEVELS 16
@@ -159,7 +161,7 @@ int smu_v14_0_register_irq_handler(struct smu_context *smu);
int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu,
enum smu_baco_seq baco_seq);
-bool smu_v14_0_baco_is_support(struct smu_context *smu);
+int smu_v14_0_get_bamaco_support(struct smu_context *smu);
enum smu_baco_state smu_v14_0_baco_get_state(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h
new file mode 100644
index 000000000000..4a3fde89aed7
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU_14_0_2_PPTABLE_H
+#define SMU_14_0_2_PPTABLE_H
+
+
+#pragma pack(push, 1)
+
+#define SMU_14_0_2_TABLE_FORMAT_REVISION 3
+
+// POWERPLAYTABLE::ulPlatformCaps
+#define SMU_14_0_2_PP_PLATFORM_CAP_POWERPLAY 0x1 // This cap indicates whether CCC need to show Powerplay page.
+#define SMU_14_0_2_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 0x2 // This cap indicates whether power source notificaiton is done by SBIOS instead of OS.
+#define SMU_14_0_2_PP_PLATFORM_CAP_HARDWAREDC 0x4 // This cap indicates whether DC mode notificaiton is done by GPIO pin directly.
+#define SMU_14_0_2_PP_PLATFORM_CAP_BACO 0x8 // This cap indicates whether board supports the BACO circuitry.
+#define SMU_14_0_2_PP_PLATFORM_CAP_MACO 0x10 // This cap indicates whether board supports the MACO circuitry.
+#define SMU_14_0_2_PP_PLATFORM_CAP_SHADOWPSTATE 0x20 // This cap indicates whether board supports the Shadow Pstate.
+#define SMU_14_0_2_PP_PLATFORM_CAP_LEDSUPPORTED 0x40 // This cap indicates whether board supports the LED.
+#define SMU_14_0_2_PP_PLATFORM_CAP_MOBILEOVERDRIVE 0x80 // This cap indicates whether board supports the Mobile Overdrive.
+
+// SMU_14_0_2_PP_THERMALCONTROLLER - Thermal Controller Type
+#define SMU_14_0_2_PP_THERMALCONTROLLER_NONE 0
+
+#define SMU_14_0_2_PP_OVERDRIVE_VERSION 0x1 // TODO: FIX OverDrive Version TBD
+#define SMU_14_0_2_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00
+
+enum SMU_14_0_2_OD_SW_FEATURE_CAP
+{
+ SMU_14_0_2_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT = 0,
+ SMU_14_0_2_ODCAP_POWER_MODE = 1,
+ SMU_14_0_2_ODCAP_AUTO_UV_ENGINE = 2,
+ SMU_14_0_2_ODCAP_AUTO_OC_ENGINE = 3,
+ SMU_14_0_2_ODCAP_AUTO_OC_MEMORY = 4,
+ SMU_14_0_2_ODCAP_MEMORY_TIMING_TUNE = 5,
+ SMU_14_0_2_ODCAP_MANUAL_AC_TIMING = 6,
+ SMU_14_0_2_ODCAP_AUTO_VF_CURVE_OPTIMIZER = 7,
+ SMU_14_0_2_ODCAP_AUTO_SOC_UV = 8,
+ SMU_14_0_2_ODCAP_COUNT = 9,
+};
+
+enum SMU_14_0_2_OD_SW_FEATURE_ID
+{
+ SMU_14_0_2_ODFEATURE_AUTO_FAN_ACOUSTIC_LIMIT = 1 << SMU_14_0_2_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT, // Auto Fan Acoustic RPM
+ SMU_14_0_2_ODFEATURE_POWER_MODE = 1 << SMU_14_0_2_ODCAP_POWER_MODE, // Optimized GPU Power Mode
+ SMU_14_0_2_ODFEATURE_AUTO_UV_ENGINE = 1 << SMU_14_0_2_ODCAP_AUTO_UV_ENGINE, // Auto Under Volt GFXCLK
+ SMU_14_0_2_ODFEATURE_AUTO_OC_ENGINE = 1 << SMU_14_0_2_ODCAP_AUTO_OC_ENGINE, // Auto Over Clock GFXCLK
+ SMU_14_0_2_ODFEATURE_AUTO_OC_MEMORY = 1 << SMU_14_0_2_ODCAP_AUTO_OC_MEMORY, // Auto Over Clock MCLK
+ SMU_14_0_2_ODFEATURE_MEMORY_TIMING_TUNE = 1 << SMU_14_0_2_ODCAP_MEMORY_TIMING_TUNE, // Auto AC Timing Tuning
+ SMU_14_0_2_ODFEATURE_MANUAL_AC_TIMING = 1 << SMU_14_0_2_ODCAP_MANUAL_AC_TIMING, // Manual fine grain AC Timing tuning
+ SMU_14_0_2_ODFEATURE_AUTO_VF_CURVE_OPTIMIZER = 1 << SMU_14_0_2_ODCAP_AUTO_VF_CURVE_OPTIMIZER, // Fine grain auto VF curve tuning
+ SMU_14_0_2_ODFEATURE_AUTO_SOC_UV = 1 << SMU_14_0_2_ODCAP_AUTO_SOC_UV, // Auto Unver Volt VDDSOC
+};
+
+#define SMU_14_0_2_MAX_ODFEATURE 32 // Maximum Number of OD Features
+
+enum SMU_14_0_2_OD_SW_FEATURE_SETTING_ID
+{
+ SMU_14_0_2_ODSETTING_AUTO_FAN_ACOUSTIC_LIMIT = 0,
+ SMU_14_0_2_ODSETTING_POWER_MODE = 1,
+ SMU_14_0_2_ODSETTING_AUTOUVENGINE = 2,
+ SMU_14_0_2_ODSETTING_AUTOOCENGINE = 3,
+ SMU_14_0_2_ODSETTING_AUTOOCMEMORY = 4,
+ SMU_14_0_2_ODSETTING_ACTIMING = 5,
+ SMU_14_0_2_ODSETTING_MANUAL_AC_TIMING = 6,
+ SMU_14_0_2_ODSETTING_AUTO_VF_CURVE_OPTIMIZER = 7,
+ SMU_14_0_2_ODSETTING_AUTO_SOC_UV = 8,
+ SMU_14_0_2_ODSETTING_COUNT = 9,
+};
+#define SMU_14_0_2_MAX_ODSETTING 64 // Maximum Number of ODSettings
+
+enum SMU_14_0_2_PWRMODE_SETTING
+{
+ SMU_14_0_2_PMSETTING_POWER_LIMIT_QUIET = 0,
+ SMU_14_0_2_PMSETTING_POWER_LIMIT_BALANCE,
+ SMU_14_0_2_PMSETTING_POWER_LIMIT_TURBO,
+ SMU_14_0_2_PMSETTING_POWER_LIMIT_RAGE,
+ SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_QUIET,
+ SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_BALANCE,
+ SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_TURBO,
+ SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_RAGE,
+ SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_QUIET,
+ SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_BALANCE,
+ SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_TURBO,
+ SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_RAGE,
+ SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_QUIET,
+ SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_BALANCE,
+ SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_TURBO,
+ SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_RAGE,
+};
+#define SMU_14_0_2_MAX_PMSETTING 32 // Maximum Number of PowerMode Settings
+
+enum SMU_14_0_2_overdrive_table_id
+{
+ SMU_14_0_2_OVERDRIVE_TABLE_BASIC = 0,
+ SMU_14_0_2_OVERDRIVE_TABLE_ADVANCED = 1,
+ SMU_14_0_2_OVERDRIVE_TABLE_COUNT = 2,
+};
+
+struct smu_14_0_2_overdrive_table
+{
+ uint8_t revision; // Revision = SMU_14_0_2_PP_OVERDRIVE_VERSION
+ uint8_t reserve[3]; // Zero filled field reserved for future use
+ uint8_t cap[SMU_14_0_2_OVERDRIVE_TABLE_COUNT][SMU_14_0_2_MAX_ODFEATURE]; // OD feature support flags
+ int32_t max[SMU_14_0_2_OVERDRIVE_TABLE_COUNT][SMU_14_0_2_MAX_ODSETTING]; // maximum settings
+ int32_t min[SMU_14_0_2_OVERDRIVE_TABLE_COUNT][SMU_14_0_2_MAX_ODSETTING]; // minimum settings
+ int16_t pm_setting[SMU_14_0_2_MAX_PMSETTING]; // Optimized power mode feature settings
+};
+
+struct smu_14_0_2_powerplay_table
+{
+ struct atom_common_table_header header; // header.format_revision = 3 (HAS TO MATCH SMU_14_0_2_TABLE_FORMAT_REVISION), header.content_revision = ? structuresize is calculated by PPGen.
+ uint8_t table_revision; // PPGen use only: table_revision = 3
+ uint8_t padding; // Padding 1 byte to align table_size offset to 6 bytes (pmfw_start_offset, for PMFW to know the starting offset of PPTable_t).
+ uint16_t pmfw_pptable_start_offset; // The start offset of the pmfw portion. i.e. start of PPTable_t (start of SkuTable_t)
+ uint16_t pmfw_pptable_size; // The total size of pmfw_pptable, i.e PPTable_t.
+ uint16_t pmfw_pfe_table_start_offset; // The start offset of the PFE_Settings_t within pmfw_pptable.
+ uint16_t pmfw_pfe_table_size; // The size of PFE_Settings_t.
+ uint16_t pmfw_board_table_start_offset; // The start offset of the BoardTable_t within pmfw_pptable.
+ uint16_t pmfw_board_table_size; // The size of BoardTable_t.
+ uint16_t pmfw_custom_sku_table_start_offset; // The start offset of the CustomSkuTable_t within pmfw_pptable.
+ uint16_t pmfw_custom_sku_table_size; // The size of the CustomSkuTable_t.
+ uint32_t golden_pp_id; // PPGen use only: PP Table ID on the Golden Data Base
+ uint32_t golden_revision; // PPGen use only: PP Table Revision on the Golden Data Base
+ uint16_t format_id; // PPGen use only: PPTable for different ASICs.
+ uint32_t platform_caps; // POWERPLAYTABLE::ulPlatformCaps
+
+ uint8_t thermal_controller_type; // one of smu_14_0_2_PP_THERMALCONTROLLER
+
+ uint16_t small_power_limit1;
+ uint16_t small_power_limit2;
+ uint16_t boost_power_limit; // For Gemini Board, when the slave adapter is in BACO mode, the master adapter will use this boost power limit instead of the default power limit to boost the power limit.
+ uint16_t software_shutdown_temp;
+
+ uint8_t reserve[143]; // Zero filled field reserved for future use
+
+ struct smu_14_0_2_overdrive_table overdrive_table;
+
+ PPTable_t smc_pptable; // PPTable_t in driver_if.h -- as requested by PMFW, this offset should start at a 32-byte boundary, and the table_size above should remain at offset=6 bytes
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 0c2d04f978ac..6d334a2aff67 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -2387,7 +2387,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.register_irq_handler = smu_v11_0_register_irq_handler,
.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
- .baco_is_support = smu_v11_0_baco_is_support,
+ .get_bamaco_support = smu_v11_0_get_bamaco_support,
.baco_enter = smu_v11_0_baco_enter,
.baco_exit = smu_v11_0_baco_exit,
.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 836b1df79928..5a68d365967f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -3538,7 +3538,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.register_irq_handler = smu_v11_0_register_irq_handler,
.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
- .baco_is_support = smu_v11_0_baco_is_support,
+ .get_bamaco_support = smu_v11_0_get_bamaco_support,
.baco_enter = navi10_baco_enter,
.baco_exit = navi10_baco_exit,
.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 1f18b61884f3..e426f457a017 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -4431,7 +4431,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.register_irq_handler = smu_v11_0_register_irq_handler,
.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
- .baco_is_support = smu_v11_0_baco_is_support,
+ .get_bamaco_support = smu_v11_0_get_bamaco_support,
.baco_enter = sienna_cichlid_baco_enter,
.baco_exit = sienna_cichlid_baco_exit,
.mode1_reset_is_support = sienna_cichlid_is_mode1_reset_supported,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index f6545093bfc1..9d5ab2ea643a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -93,7 +93,7 @@ static void smu_v11_0_poll_baco_exit(struct smu_context *smu)
int smu_v11_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- char ucode_prefix[30];
+ char ucode_prefix[25];
char fw_name[SMU_FW_NAME_LEN];
int err = 0;
const struct smc_firmware_header_v1_0 *hdr;
@@ -1557,23 +1557,27 @@ int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu,
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
}
-bool smu_v11_0_baco_is_support(struct smu_context *smu)
+int smu_v11_0_get_bamaco_support(struct smu_context *smu)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
+ int bamaco_support = 0;
if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support)
- return false;
+ return 0;
+
+ if (smu_baco->maco_support)
+ bamaco_support |= MACO_SUPPORT;
/* return true if ASIC is in BACO state already */
if (smu_v11_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
- return true;
+ return bamaco_support |= BACO_SUPPORT;
/* Arcturus does not support this bit mask */
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
- return false;
+ return 0;
- return true;
+ return (bamaco_support |= BACO_SUPPORT);
}
enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
@@ -1603,7 +1607,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
- if (amdgpu_runtime_pm == 2)
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_EnterBaco,
D3HOT_BAMACO_SEQUENCE,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index da1f43999d09..379e44eb0019 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -301,7 +301,7 @@ static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu,
*value = metrics->GfxActivity / 100;
break;
case METRICS_AVERAGE_VCNACTIVITY:
- *value = metrics->UvdActivity;
+ *value = metrics->UvdActivity / 100;
break;
case METRICS_AVERAGE_SOCKETPOWER:
*value = (metrics->CurrentSocketPower << 8) /
@@ -1507,6 +1507,12 @@ static int vangogh_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VCN_LOAD:
+ ret = vangogh_common_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_VCNACTIVITY,
+ (uint32_t *)data);
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index f41ac6465f2a..ce941fbb9cfb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -759,8 +759,11 @@ static int aldebaran_emit_clk_levels(struct smu_context *smu,
switch (type) {
case SMU_OD_SCLK:
- *offset += sysfs_emit_at(buf, *offset, "%s:\n", "GFXCLK");
- fallthrough;
+ *offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_SCLK");
+ *offset += sysfs_emit_at(buf, *offset, "0: %uMhz\n1: %uMhz\n",
+ pstate_table->gfxclk_pstate.curr.min,
+ pstate_table->gfxclk_pstate.curr.max);
+ return 0;
case SMU_SCLK:
ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &cur_value);
if (ret) {
@@ -788,8 +791,11 @@ static int aldebaran_emit_clk_levels(struct smu_context *smu,
break;
case SMU_OD_MCLK:
- *offset += sysfs_emit_at(buf, *offset, "%s:\n", "MCLK");
- fallthrough;
+ *offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_MCLK");
+ *offset += sysfs_emit_at(buf, *offset, "0: %uMhz\n1: %uMhz\n",
+ pstate_table->uclk_pstate.curr.min,
+ pstate_table->uclk_pstate.curr.max);
+ return 0;
case SMU_MCLK:
ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &cur_value);
if (ret) {
@@ -850,7 +856,6 @@ static int aldebaran_emit_clk_levels(struct smu_context *smu,
}
switch (type) {
- case SMU_OD_SCLK:
case SMU_SCLK:
for (i = 0; i < display_levels; i++) {
clock_mhz = freq_values[i];
@@ -863,7 +868,6 @@ static int aldebaran_emit_clk_levels(struct smu_context *smu,
}
break;
- case SMU_OD_MCLK:
case SMU_MCLK:
case SMU_SOCCLK:
case SMU_FCLK:
@@ -1581,11 +1585,11 @@ out:
adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
}
-static bool aldebaran_is_baco_supported(struct smu_context *smu)
+static int aldebaran_get_bamaco_support(struct smu_context *smu)
{
/* aldebaran is not support baco */
- return false;
+ return 0;
}
static int aldebaran_set_df_cstate(struct smu_context *smu,
@@ -2059,7 +2063,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
.register_irq_handler = smu_v13_0_register_irq_handler,
.set_azalia_d3_pme = smu_v13_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc,
- .baco_is_support = aldebaran_is_baco_supported,
+ .get_bamaco_support = aldebaran_get_bamaco_support,
.get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range,
.od_edit_dpm_table = aldebaran_usr_edit_dpm_table,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 48170bb5112e..a8d34adc7d3f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -93,7 +93,7 @@ int smu_v13_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
char fw_name[30];
- char ucode_prefix[30];
+ char ucode_prefix[15];
int err = 0;
const struct smc_firmware_header_v1_0 *hdr;
const struct common_firmware_header *header;
@@ -2247,7 +2247,7 @@ static int smu_v13_0_baco_set_state(struct smu_context *smu,
if (state == SMU_BACO_STATE_ENTER) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_EnterBaco,
- (smu_baco->maco_support && amdgpu_runtime_pm != 1) ?
+ (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ?
BACO_SEQ_BAMACO : BACO_SEQ_BACO,
NULL);
} else {
@@ -2268,33 +2268,36 @@ static int smu_v13_0_baco_set_state(struct smu_context *smu,
return ret;
}
-bool smu_v13_0_baco_is_support(struct smu_context *smu)
+int smu_v13_0_get_bamaco_support(struct smu_context *smu)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
+ int bamaco_support = 0;
if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support)
- return false;
+ return 0;
+
+ if (smu_baco->maco_support)
+ bamaco_support |= MACO_SUPPORT;
/* return true if ASIC is in BACO state already */
if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
- return true;
+ return bamaco_support |= BACO_SUPPORT;
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
- return false;
+ return 0;
- return true;
+ return (bamaco_support |= BACO_SUPPORT);
}
int smu_v13_0_baco_enter(struct smu_context *smu)
{
- struct smu_baco_context *smu_baco = &smu->smu_baco;
struct amdgpu_device *adev = smu->adev;
int ret;
if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
return smu_v13_0_baco_set_armd3_sequence(smu,
- (smu_baco->maco_support && amdgpu_runtime_pm != 1) ?
+ (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ?
BACO_SEQ_BAMACO : BACO_SEQ_BACO);
} else {
ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 9c03296f92cd..1e09d5f2d82f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2751,7 +2751,13 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu,
switch (mp1_state) {
case PP_MP1_STATE_UNLOAD:
- ret = smu_cmn_set_mp1_state(smu, mp1_state);
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_PrepareMp1ForUnload,
+ 0x55, NULL);
+
+ if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT)
+ ret = smu_v13_0_disable_pmfw_state(smu);
+
break;
default:
/* Ignore others */
@@ -3070,7 +3076,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.set_tool_table_location = smu_v13_0_set_tool_table_location,
.deep_sleep_control = smu_v13_0_deep_sleep_control,
.gfx_ulv_control = smu_v13_0_gfx_ulv_control,
- .baco_is_support = smu_v13_0_baco_is_support,
+ .get_bamaco_support = smu_v13_0_get_bamaco_support,
.baco_enter = smu_v13_0_baco_enter,
.baco_exit = smu_v13_0_baco_exit,
.mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index bb98156b2fa1..bc241b593db1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -226,8 +226,18 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!en && !adev->in_s0ix)
+ if (!en && adev->in_s4) {
+ /* Adds a GFX reset as workaround just before sending the
+ * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
+ * an invalid state.
+ */
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+ SMU_RESET_MODE_2, NULL);
+ if (ret)
+ return ret;
+
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
+ }
return ret;
}
@@ -318,7 +328,7 @@ static int smu_v13_0_4_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->GfxActivity / 100;
break;
case METRICS_AVERAGE_VCNACTIVITY:
- *value = metrics->UvdActivity;
+ *value = metrics->UvdActivity / 100;
break;
case METRICS_AVERAGE_SOCKETPOWER:
*value = (metrics->AverageSocketPower << 8) / 1000;
@@ -572,6 +582,12 @@ static int smu_v13_0_4_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VCN_LOAD:
+ ret = smu_v13_0_4_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_VCNACTIVITY,
+ (uint32_t *)data);
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
ret = smu_v13_0_4_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
index 0dce672ac1b9..218f209c3775 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
@@ -286,7 +286,7 @@ static int smu_v13_0_5_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->GfxActivity / 100;
break;
case METRICS_AVERAGE_VCNACTIVITY:
- *value = metrics->UvdActivity;
+ *value = metrics->UvdActivity / 100;
break;
case METRICS_CURR_SOCKETPOWER:
*value = (metrics->CurrentSocketPower << 8) / 1000;
@@ -332,6 +332,12 @@ static int smu_v13_0_5_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VCN_LOAD:
+ ret = smu_v13_0_5_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_VCNACTIVITY,
+ (uint32_t *)data);
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
ret = smu_v13_0_5_get_smu_metrics_data(smu,
METRICS_CURR_SOCKETPOWER,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 3957af057d54..4d3eca2fc3f1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -138,13 +138,13 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0),
- MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0),
+ MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
- MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, 0),
+ MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI),
MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
@@ -167,10 +167,10 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0),
MSG_MAP(GetThermalLimit, PPSMC_MSG_ReadThrottlerLimit, 0),
MSG_MAP(ClearMcaOnRead, PPSMC_MSG_ClearMcaOnRead, 0),
- MSG_MAP(QueryValidMcaCount, PPSMC_MSG_QueryValidMcaCount, 0),
- MSG_MAP(QueryValidMcaCeCount, PPSMC_MSG_QueryValidMcaCeCount, 0),
- MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, 0),
- MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, 0),
+ MSG_MAP(QueryValidMcaCount, PPSMC_MSG_QueryValidMcaCount, SMU_MSG_RAS_PRI),
+ MSG_MAP(QueryValidMcaCeCount, PPSMC_MSG_QueryValidMcaCeCount, SMU_MSG_RAS_PRI),
+ MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, SMU_MSG_RAS_PRI),
+ MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, SMU_MSG_RAS_PRI),
MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0),
MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0),
};
@@ -1010,8 +1010,11 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
switch (type) {
case SMU_OD_SCLK:
- size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK");
- fallthrough;
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
+ pstate_table->gfxclk_pstate.curr.min,
+ pstate_table->gfxclk_pstate.curr.max);
+ break;
case SMU_SCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_GFXCLK,
&now);
@@ -1052,8 +1055,11 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
break;
case SMU_OD_MCLK:
- size += sysfs_emit_at(buf, size, "%s:\n", "MCLK");
- fallthrough;
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
+ pstate_table->uclk_pstate.curr.min,
+ pstate_table->uclk_pstate.curr.max);
+ break;
case SMU_MCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_UCLK,
&now);
@@ -1670,6 +1676,11 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
if (clk_type == SMU_UCLK) {
if (max == pstate_table->uclk_pstate.curr.max)
return 0;
+ /* For VF, only allowed in FW versions 85.102 or greater */
+ if (amdgpu_sriov_vf(adev) &&
+ ((smu->smc_fw_version < 0x556600) ||
+ (adev->flags & AMD_IS_APU)))
+ return -EOPNOTSUPP;
/* Only max clock limiting is allowed for UCLK */
ret = smu_v13_0_set_soft_freq_limited_range(
smu, SMU_UCLK, 0, max);
@@ -2077,11 +2088,11 @@ static void smu_v13_0_6_get_unique_id(struct smu_context *smu)
adev->unique_id = pptable->PublicSerialNumber_AID;
}
-static bool smu_v13_0_6_is_baco_supported(struct smu_context *smu)
+static int smu_v13_0_6_get_bamaco_support(struct smu_context *smu)
{
/* smu_13_0_6 does not support baco */
- return false;
+ return 0;
}
static const char *const throttling_logging_label[] = {
@@ -2228,7 +2239,15 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak) >> GET_INST(GC, 0);
if (!(adev->flags & AMD_IS_APU)) {
- if (!amdgpu_sriov_vf(adev)) {
+ /*Check smu version, PCIE link speed and width will be reported from pmfw metric
+ * table for both pf & one vf for smu version 85.99.0 or higher else report only
+ * for pf from registers
+ */
+ if (smu->smc_fw_version >= 0x556300) {
+ gpu_metrics->pcie_link_width = metrics_x->PCIeLinkWidth;
+ gpu_metrics->pcie_link_speed =
+ pcie_gen_to_speed(metrics_x->PCIeLinkSpeed);
+ } else if (!amdgpu_sriov_vf(adev)) {
link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu);
if (link_width_level > MAX_LINK_WIDTH)
link_width_level = 0;
@@ -2238,6 +2257,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->pcie_link_speed =
smu_v13_0_6_get_current_pcie_link_speed(smu);
}
+
gpu_metrics->pcie_bandwidth_acc =
SMUQ10_ROUND(metrics_x->PcieBandwidthAcc[0]);
gpu_metrics->pcie_bandwidth_inst =
@@ -2294,6 +2314,17 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
return sizeof(*gpu_metrics);
}
+static void smu_v13_0_6_restore_pci_config(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int i;
+
+ for (i = 0; i < 16; i++)
+ pci_write_config_dword(adev->pdev, i * 4,
+ adev->pdev->saved_config_space[i]);
+ pci_restore_msi_state(adev->pdev);
+}
+
static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
{
int ret = 0, index;
@@ -2315,6 +2346,20 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
/* Restore the config space saved during init */
amdgpu_device_load_pci_state(adev->pdev);
+ /* Certain platforms have switches which assign virtual BAR values to
+ * devices. OS uses the virtual BAR values and device behind the switch
+ * is assgined another BAR value. When device's config space registers
+ * are queried, switch returns the virtual BAR values. When mode-2 reset
+ * is performed, switch is unaware of it, and will continue to return
+ * the same virtual values to the OS.This affects
+ * pci_restore_config_space() API as it doesn't write the value saved if
+ * the current value read from config space is the same as what is
+ * saved. As a workaround, make sure the config space is restored
+ * always.
+ */
+ if (!(adev->flags & AMD_IS_APU))
+ smu_v13_0_6_restore_pci_config(smu);
+
dev_dbg(smu->adev->dev, "wait for reset ack\n");
do {
ret = smu_cmn_wait_for_response(smu);
@@ -2671,6 +2716,11 @@ static int mca_umc_mca_get_err_count(const struct mca_ras_info *mca_ras, struct
umc_v12_0_is_correctable_error(adev, status0))
*count = (ext_error_code == 0) ? odecc_err_cnt : 1;
+ amdgpu_umc_update_ecc_status(adev,
+ entry->regs[MCA_REG_IDX_STATUS],
+ entry->regs[MCA_REG_IDX_IPID],
+ entry->regs[MCA_REG_IDX_ADDR]);
+
return 0;
}
@@ -2684,7 +2734,8 @@ static int mca_pcs_xgmi_mca_get_err_count(const struct mca_ras_info *mca_ras, st
ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(entry->regs[MCA_REG_IDX_STATUS]);
err_cnt = MCA_REG__MISC0__ERRCNT(entry->regs[MCA_REG_IDX_MISC0]);
- if (type == AMDGPU_MCA_ERROR_TYPE_UE && ext_error_code == 0)
+ if (type == AMDGPU_MCA_ERROR_TYPE_UE &&
+ (ext_error_code == 0 || ext_error_code == 9))
*count = err_cnt;
else if (type == AMDGPU_MCA_ERROR_TYPE_CE && ext_error_code == 6)
*count = err_cnt;
@@ -2975,7 +3026,7 @@ static int aca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
return smu_v13_0_6_mca_set_debug_mode(smu, enable);
}
-static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_error_type type, u32 *count)
+static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_smu_type type, u32 *count)
{
uint32_t msg;
int ret;
@@ -2984,10 +3035,10 @@ static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_err
return -EINVAL;
switch (type) {
- case ACA_ERROR_TYPE_UE:
+ case ACA_SMU_TYPE_UE:
msg = SMU_MSG_QueryValidMcaCount;
break;
- case ACA_ERROR_TYPE_CE:
+ case ACA_SMU_TYPE_CE:
msg = SMU_MSG_QueryValidMcaCeCount;
break;
default:
@@ -3004,14 +3055,14 @@ static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_err
}
static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev,
- enum aca_error_type type, u32 *count)
+ enum aca_smu_type type, u32 *count)
{
struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
switch (type) {
- case ACA_ERROR_TYPE_UE:
- case ACA_ERROR_TYPE_CE:
+ case ACA_SMU_TYPE_UE:
+ case ACA_SMU_TYPE_CE:
ret = smu_v13_0_6_get_valid_aca_count(smu, type, count);
break;
default:
@@ -3022,16 +3073,16 @@ static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev,
return ret;
}
-static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_type type,
+static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_smu_type type,
int idx, int offset, u32 *val)
{
uint32_t msg, param;
switch (type) {
- case ACA_ERROR_TYPE_UE:
+ case ACA_SMU_TYPE_UE:
msg = SMU_MSG_McaBankDumpDW;
break;
- case ACA_ERROR_TYPE_CE:
+ case ACA_SMU_TYPE_CE:
msg = SMU_MSG_McaBankCeDumpDW;
break;
default:
@@ -3043,7 +3094,7 @@ static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_t
return smu_cmn_send_smc_msg_with_param(smu, msg, param, (uint32_t *)val);
}
-static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_type type,
+static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_smu_type type,
int idx, int offset, u32 *val, int count)
{
int ret, i;
@@ -3060,7 +3111,7 @@ static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_typ
return 0;
}
-static int aca_bank_read_reg(struct amdgpu_device *adev, enum aca_error_type type,
+static int aca_bank_read_reg(struct amdgpu_device *adev, enum aca_smu_type type,
int idx, int reg_idx, u64 *val)
{
struct smu_context *smu = adev->powerplay.pp_handle;
@@ -3077,13 +3128,13 @@ static int aca_bank_read_reg(struct amdgpu_device *adev, enum aca_error_type typ
*val = (u64)data[1] << 32 | data[0];
dev_dbg(adev->dev, "mca read bank reg: type:%s, index: %d, reg_idx: %d, val: 0x%016llx\n",
- type == ACA_ERROR_TYPE_UE ? "UE" : "CE", idx, reg_idx, *val);
+ type == ACA_SMU_TYPE_UE ? "UE" : "CE", idx, reg_idx, *val);
return 0;
}
static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev,
- enum aca_error_type type, int idx, struct aca_bank *bank)
+ enum aca_smu_type type, int idx, struct aca_bank *bank)
{
int i, ret, count;
@@ -3097,12 +3148,25 @@ static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev,
return 0;
}
+static int aca_smu_parse_error_code(struct amdgpu_device *adev, struct aca_bank *bank)
+{
+ int error_code;
+
+ if (!(adev->flags & AMD_IS_APU) && adev->pm.fw_version >= 0x00555600)
+ error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]);
+ else
+ error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]);
+
+ return error_code & 0xff;
+}
+
static const struct aca_smu_funcs smu_v13_0_6_aca_smu_funcs = {
.max_ue_bank_count = 12,
.max_ce_bank_count = 12,
.set_debug_mode = aca_smu_set_debug_mode,
.get_valid_aca_count = aca_smu_get_valid_aca_count,
.get_valid_aca_bank = aca_smu_get_valid_aca_bank,
+ .parse_error_code = aca_smu_parse_error_code,
};
static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu,
@@ -3179,7 +3243,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
.disable_thermal_alert = smu_v13_0_disable_thermal_alert,
.setup_pptable = smu_v13_0_6_setup_pptable,
- .baco_is_support = smu_v13_0_6_is_baco_supported,
+ .get_bamaco_support = smu_v13_0_6_get_bamaco_support,
.get_dpm_ultimate_freq = smu_v13_0_6_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v13_0_6_set_soft_freq_limited_range,
.od_edit_dpm_table = smu_v13_0_6_usr_edit_dpm_table,
@@ -3208,6 +3272,7 @@ void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
smu->feature_map = smu_v13_0_6_feature_mask_map;
smu->table_map = smu_v13_0_6_table_map;
smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION;
+ smu->smc_fw_caps |= SMU_FW_CAP_RAS_PRI;
smu_v13_0_set_smu_mailbox_registers(smu);
amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs);
amdgpu_aca_set_smu_funcs(smu->adev, &smu_v13_0_6_aca_smu_funcs);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 7318964f1f14..e996a0a4d33e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2650,7 +2650,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.set_tool_table_location = smu_v13_0_set_tool_table_location,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
- .baco_is_support = smu_v13_0_baco_is_support,
+ .get_bamaco_support = smu_v13_0_get_bamaco_support,
.baco_enter = smu_v13_0_baco_enter,
.baco_exit = smu_v13_0_baco_exit,
.mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 2d1736234b4a..d8bcf765a803 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -363,7 +363,7 @@ static int yellow_carp_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->GfxActivity / 100;
break;
case METRICS_AVERAGE_VCNACTIVITY:
- *value = metrics->UvdActivity;
+ *value = metrics->UvdActivity / 100;
break;
case METRICS_CURR_SOCKETPOWER:
*value = (metrics->CurrentSocketPower << 8) / 1000;
@@ -423,6 +423,12 @@ static int yellow_carp_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VCN_LOAD:
+ ret = yellow_carp_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_VCNACTIVITY,
+ (uint32_t *)data);
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
ret = yellow_carp_get_smu_metrics_data(smu,
METRICS_CURR_SOCKETPOWER,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile
index ddbac5c655f7..4593e29e8ff8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'smu manager' sub-component of powerplay.
# It provides the smu management services for the driver.
-SMU14_MGR = smu_v14_0.o smu_v14_0_0_ppt.o
+SMU14_MGR = smu_v14_0.o smu_v14_0_0_ppt.o smu_v14_0_2_ppt.o
AMD_SWSMU_SMU14MGR = $(addprefix $(AMD_SWSMU_PATH)/smu14/,$(SMU14_MGR))
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index b06a3cc43305..68b9bf822e8d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -38,8 +38,13 @@
#include "amdgpu_ras.h"
#include "smu_cmn.h"
-#include "asic_reg/mp/mp_14_0_0_offset.h"
-#include "asic_reg/mp/mp_14_0_0_sh_mask.h"
+#include "asic_reg/mp/mp_14_0_2_offset.h"
+#include "asic_reg/mp/mp_14_0_2_sh_mask.h"
+
+#define regMP1_SMN_IH_SW_INT_mp1_14_0_0 0x0341
+#define regMP1_SMN_IH_SW_INT_mp1_14_0_0_BASE_IDX 0
+#define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0 0x0342
+#define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0_BASE_IDX 0
/*
* DO NOT use these for err/warn/info/debug messages.
@@ -52,6 +57,7 @@
#undef pr_debug
MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin");
+MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin");
#define ENABLE_IMU_ARG_GFXOFF_ENABLE 1
@@ -106,7 +112,6 @@ void smu_v14_0_fini_microcode(struct smu_context *smu)
int smu_v14_0_load_microcode(struct smu_context *smu)
{
-#if 0
struct amdgpu_device *adev = smu->adev;
const uint32_t *src;
const struct smc_firmware_header_v1_0 *hdr;
@@ -131,8 +136,13 @@ int smu_v14_0_load_microcode(struct smu_context *smu)
1 & ~MP1_SMN_PUB_CTRL__LX3_RESET_MASK);
for (i = 0; i < adev->usec_timeout; i++) {
- mp1_fw_flags = RREG32_PCIE(MP1_Public |
- (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff));
+ else
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
break;
@@ -142,9 +152,7 @@ int smu_v14_0_load_microcode(struct smu_context *smu)
if (i == adev->usec_timeout)
return -ETIME;
-#endif
return 0;
-
}
int smu_v14_0_init_pptable_microcode(struct smu_context *smu)
@@ -165,6 +173,10 @@ int smu_v14_0_init_pptable_microcode(struct smu_context *smu)
if (!adev->scpm_enabled)
return 0;
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 2)) ||
+ (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 3)))
+ return 0;
+
/* override pptable_id from driver parameter */
if (amdgpu_smu_pptable_id >= 0) {
pptable_id = amdgpu_smu_pptable_id;
@@ -198,7 +210,12 @@ int smu_v14_0_check_fw_status(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
uint32_t mp1_fw_flags;
- mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff));
+ else
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
(smnMP1_FIRMWARE_FLAGS & 0xffffffff));
if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
@@ -227,16 +244,16 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
adev->pm.fw_version = smu_version;
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
- case IP_VERSION(14, 0, 2):
- smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
- break;
case IP_VERSION(14, 0, 0):
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
break;
case IP_VERSION(14, 0, 1):
- smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1;
+ break;
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 3):
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
break;
-
default:
dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
amdgpu_ip_version(adev, MP1_HWIP, 0));
@@ -738,9 +755,9 @@ int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable)
struct amdgpu_device *adev = smu->adev;
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
- case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 0):
case IP_VERSION(14, 0, 1):
+ case IP_VERSION(14, 0, 2):
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
@@ -841,9 +858,16 @@ static int smu_v14_0_set_irq_state(struct amdgpu_device *adev,
// TODO
/* For MP1 SW irqs */
- val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
- val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
- WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) {
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val);
+ } else {
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
+ }
break;
case AMDGPU_IRQ_STATE_ENABLE:
@@ -851,14 +875,26 @@ static int smu_v14_0_set_irq_state(struct amdgpu_device *adev,
// TODO
/* For MP1 SW irqs */
- val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT);
- val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
- val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
- WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val);
-
- val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
- val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
- WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) {
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0, val);
+
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val);
+ } else {
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val);
+
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
+ }
break;
default:
@@ -868,11 +904,32 @@ static int smu_v14_0_set_irq_state(struct amdgpu_device *adev,
return 0;
}
+#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
+#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
+
static int smu_v14_0_irq_process(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- // TODO
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ uint32_t client_id = entry->client_id;
+ uint32_t src_id = entry->src_id;
+
+ if (client_id == SOC15_IH_CLIENTID_THM) {
+ switch (src_id) {
+ case THM_11_0__SRCID__THM_DIG_THERM_L2H:
+ schedule_delayed_work(&smu->swctf_delayed_work,
+ msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
+ break;
+ case THM_11_0__SRCID__THM_DIG_THERM_H2L:
+ dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
+ break;
+ default:
+ dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n",
+ src_id);
+ break;
+ }
+ }
return 0;
}
@@ -894,7 +951,17 @@ int smu_v14_0_register_irq_handler(struct smu_context *smu)
irq_src->num_types = 1;
irq_src->funcs = &smu_v14_0_irq_funcs;
- // TODO: THM related
+ ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
+ THM_11_0__SRCID__THM_DIG_THERM_L2H,
+ irq_src);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
+ THM_11_0__SRCID__THM_DIG_THERM_H2L,
+ irq_src);
+ if (ret)
+ return ret;
ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
SMU_IH_INTERRUPT_ID_TO_DRIVER,
@@ -1402,9 +1469,22 @@ int smu_v14_0_set_vcn_enable(struct smu_context *smu,
if (adev->vcn.harvest_config & (1 << i))
continue;
- ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
- SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
- i << 16U, NULL);
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) {
+ if (i == 0)
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0,
+ i << 16U, NULL);
+ else if (i == 1)
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn1 : SMU_MSG_PowerDownVcn1,
+ i << 16U, NULL);
+ } else {
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
+ i << 16U, NULL);
+ }
+
if (ret)
return ret;
}
@@ -1415,9 +1495,34 @@ int smu_v14_0_set_vcn_enable(struct smu_context *smu,
int smu_v14_0_set_jpeg_enable(struct smu_context *smu,
bool enable)
{
- return smu_cmn_send_smc_msg_with_param(smu, enable ?
- SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg,
- 0, NULL);
+ struct amdgpu_device *adev = smu->adev;
+ int i, ret = 0;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) {
+ if (i == 0)
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpJpeg0 : SMU_MSG_PowerDownJpeg0,
+ i << 16U, NULL);
+ else if (i == 1 && amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpJpeg1 : SMU_MSG_PowerDownJpeg1,
+ i << 16U, NULL);
+ } else {
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg,
+ i << 16U, NULL);
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ return ret;
}
int smu_v14_0_run_btc(struct smu_context *smu)
@@ -1552,23 +1657,27 @@ int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu,
return 0;
}
-bool smu_v14_0_baco_is_support(struct smu_context *smu)
+int smu_v14_0_get_bamaco_support(struct smu_context *smu)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
+ int bamaco_support = 0;
if (amdgpu_sriov_vf(smu->adev) ||
!smu_baco->platform_support)
- return false;
+ return 0;
+
+ if (smu_baco->maco_support)
+ bamaco_support |= MACO_SUPPORT;
/* return true if ASIC is in BACO state already */
if (smu_v14_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
- return true;
+ return (bamaco_support |= BACO_SUPPORT);
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
- return false;
+ return 0;
- return true;
+ return (bamaco_support |= BACO_SUPPORT);
}
enum smu_baco_state smu_v14_0_baco_get_state(struct smu_context *smu)
@@ -1591,7 +1700,7 @@ int smu_v14_0_baco_set_state(struct smu_context *smu,
if (state == SMU_BACO_STATE_ENTER) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_EnterBaco,
- smu_baco->maco_support ?
+ (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ?
BACO_SEQ_BAMACO : BACO_SEQ_BACO,
NULL);
} else {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index 9310c4758e38..e4419e1561ef 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -70,9 +70,12 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1),
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
- MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1),
- MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1),
- MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1),
+ MSG_MAP(PowerDownVcn0, PPSMC_MSG_PowerDownVcn0, 1),
+ MSG_MAP(PowerUpVcn0, PPSMC_MSG_PowerUpVcn0, 1),
+ MSG_MAP(SetHardMinVcn0, PPSMC_MSG_SetHardMinVcn0, 1),
+ MSG_MAP(PowerDownVcn1, PPSMC_MSG_PowerDownVcn1, 1),
+ MSG_MAP(PowerUpVcn1, PPSMC_MSG_PowerUpVcn1, 1),
+ MSG_MAP(SetHardMinVcn1, PPSMC_MSG_SetHardMinVcn1, 1),
MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxclk, 1),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1),
MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
@@ -83,7 +86,8 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 1),
MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 1),
MSG_MAP(SetSoftMinFclk, PPSMC_MSG_SetSoftMinFclk, 1),
- MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 1),
+ MSG_MAP(SetSoftMinVcn0, PPSMC_MSG_SetSoftMinVcn0, 1),
+ MSG_MAP(SetSoftMinVcn1, PPSMC_MSG_SetSoftMinVcn1, 1),
MSG_MAP(EnableGfxImu, PPSMC_MSG_EnableGfxImu, 1),
MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1),
MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1),
@@ -91,9 +95,12 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 1),
MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 1),
MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 1),
- MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 1),
- MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 1),
- MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 1),
+ MSG_MAP(SetSoftMaxVcn0, PPSMC_MSG_SetSoftMaxVcn0, 1),
+ MSG_MAP(SetSoftMaxVcn1, PPSMC_MSG_SetSoftMaxVcn1, 1),
+ MSG_MAP(PowerDownJpeg0, PPSMC_MSG_PowerDownJpeg0, 1),
+ MSG_MAP(PowerUpJpeg0, PPSMC_MSG_PowerUpJpeg0, 1),
+ MSG_MAP(PowerDownJpeg1, PPSMC_MSG_PowerDownJpeg1, 1),
+ MSG_MAP(PowerUpJpeg1, PPSMC_MSG_PowerUpJpeg1, 1),
MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 1),
MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 1),
MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 1),
@@ -154,7 +161,7 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu)
SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
+ SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, max(sizeof(DpmClocks_t), sizeof(DpmClocks_t_v14_0_1)),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
@@ -164,7 +171,7 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu)
goto err0_out;
smu_table->metrics_time = 0;
- smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL);
+ smu_table->clocks_table = kzalloc(max(sizeof(DpmClocks_t), sizeof(DpmClocks_t_v14_0_1)), GFP_KERNEL);
if (!smu_table->clocks_table)
goto err1_out;
@@ -355,6 +362,12 @@ static int smu_v14_0_0_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VCN_LOAD:
+ ret = smu_v14_0_0_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_VCNACTIVITY,
+ (uint32_t *)data);
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
ret = smu_v14_0_0_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
@@ -586,6 +599,60 @@ static int smu_v14_0_0_mode2_reset(struct smu_context *smu)
return ret;
}
+static int smu_v14_0_1_get_dpm_freq_by_index(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t dpm_level,
+ uint32_t *freq)
+{
+ DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
+
+ if (!clk_table || clk_type >= SMU_CLK_COUNT)
+ return -EINVAL;
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ if (dpm_level >= clk_table->NumSocClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->SocClocks[dpm_level];
+ break;
+ case SMU_VCLK:
+ if (dpm_level >= clk_table->Vcn0ClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->VClocks0[dpm_level];
+ break;
+ case SMU_DCLK:
+ if (dpm_level >= clk_table->Vcn0ClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->DClocks0[dpm_level];
+ break;
+ case SMU_VCLK1:
+ if (dpm_level >= clk_table->Vcn1ClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->VClocks1[dpm_level];
+ break;
+ case SMU_DCLK1:
+ if (dpm_level >= clk_table->Vcn1ClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->DClocks1[dpm_level];
+ break;
+ case SMU_UCLK:
+ case SMU_MCLK:
+ if (dpm_level >= clk_table->NumMemPstatesEnabled)
+ return -EINVAL;
+ *freq = clk_table->MemPstateTable[dpm_level].MemClk;
+ break;
+ case SMU_FCLK:
+ if (dpm_level >= clk_table->NumFclkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->FclkClocks_Freq[dpm_level];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int smu_v14_0_0_get_dpm_freq_by_index(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t dpm_level,
@@ -630,6 +697,19 @@ static int smu_v14_0_0_get_dpm_freq_by_index(struct smu_context *smu,
return 0;
}
+static int smu_v14_0_common_get_dpm_freq_by_index(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t dpm_level,
+ uint32_t *freq)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq);
+ else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ smu_v14_0_1_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq);
+
+ return 0;
+}
+
static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu,
enum smu_clk_type clk_type)
{
@@ -650,6 +730,8 @@ static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu,
break;
case SMU_VCLK:
case SMU_DCLK:
+ case SMU_VCLK1:
+ case SMU_DCLK1:
feature_id = SMU_FEATURE_VCN_DPM_BIT;
break;
default:
@@ -659,6 +741,126 @@ static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu,
return smu_cmn_feature_is_enabled(smu, feature_id);
}
+static int smu_v14_0_1_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min,
+ uint32_t *max)
+{
+ DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
+ uint32_t clock_limit;
+ uint32_t max_dpm_level, min_dpm_level;
+ int ret = 0;
+
+ if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ clock_limit = smu->smu_table.boot_values.uclk;
+ break;
+ case SMU_FCLK:
+ clock_limit = smu->smu_table.boot_values.fclk;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ clock_limit = smu->smu_table.boot_values.gfxclk;
+ break;
+ case SMU_SOCCLK:
+ clock_limit = smu->smu_table.boot_values.socclk;
+ break;
+ case SMU_VCLK:
+ case SMU_VCLK1:
+ clock_limit = smu->smu_table.boot_values.vclk;
+ break;
+ case SMU_DCLK:
+ case SMU_DCLK1:
+ clock_limit = smu->smu_table.boot_values.dclk;
+ break;
+ default:
+ clock_limit = 0;
+ break;
+ }
+
+ /* clock in Mhz unit */
+ if (min)
+ *min = clock_limit / 100;
+ if (max)
+ *max = clock_limit / 100;
+
+ return 0;
+ }
+
+ if (max) {
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ *max = clk_table->MaxGfxClk;
+ break;
+ case SMU_MCLK:
+ case SMU_UCLK:
+ case SMU_FCLK:
+ max_dpm_level = 0;
+ break;
+ case SMU_SOCCLK:
+ max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1;
+ break;
+ case SMU_VCLK:
+ case SMU_DCLK:
+ max_dpm_level = clk_table->Vcn0ClkLevelsEnabled - 1;
+ break;
+ case SMU_VCLK1:
+ case SMU_DCLK1:
+ max_dpm_level = clk_table->Vcn1ClkLevelsEnabled - 1;
+ break;
+ default:
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max);
+ if (ret)
+ goto failed;
+ }
+ }
+
+ if (min) {
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ *min = clk_table->MinGfxClk;
+ break;
+ case SMU_MCLK:
+ case SMU_UCLK:
+ min_dpm_level = clk_table->NumMemPstatesEnabled - 1;
+ break;
+ case SMU_FCLK:
+ min_dpm_level = clk_table->NumFclkLevelsEnabled - 1;
+ break;
+ case SMU_SOCCLK:
+ min_dpm_level = 0;
+ break;
+ case SMU_VCLK:
+ case SMU_DCLK:
+ case SMU_VCLK1:
+ case SMU_DCLK1:
+ min_dpm_level = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min);
+ if (ret)
+ goto failed;
+ }
+ }
+
+failed:
+ return ret;
+}
+
static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *min,
@@ -729,7 +931,7 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
}
if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
- ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max);
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max);
if (ret)
goto failed;
}
@@ -761,7 +963,7 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
}
if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
- ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min);
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min);
if (ret)
goto failed;
}
@@ -771,6 +973,19 @@ failed:
return ret;
}
+static int smu_v14_0_common_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min,
+ uint32_t *max)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ smu_v14_0_0_get_dpm_ultimate_freq(smu, clk_type, min, max);
+ else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ smu_v14_0_1_get_dpm_ultimate_freq(smu, clk_type, min, max);
+
+ return 0;
+}
+
static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *value)
@@ -804,6 +1019,37 @@ static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu,
return smu_v14_0_0_get_smu_metrics_data(smu, member_type, value);
}
+static int smu_v14_0_1_get_dpm_level_count(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *count)
+{
+ DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ *count = clk_table->NumSocClkLevelsEnabled;
+ break;
+ case SMU_VCLK:
+ case SMU_DCLK:
+ *count = clk_table->Vcn0ClkLevelsEnabled;
+ break;
+ case SMU_VCLK1:
+ case SMU_DCLK1:
+ *count = clk_table->Vcn1ClkLevelsEnabled;
+ break;
+ case SMU_MCLK:
+ *count = clk_table->NumMemPstatesEnabled;
+ break;
+ case SMU_FCLK:
+ *count = clk_table->NumFclkLevelsEnabled;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *count)
@@ -833,6 +1079,18 @@ static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu,
return 0;
}
+static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *count)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ smu_v14_0_0_get_dpm_level_count(smu, clk_type, count);
+ else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ smu_v14_0_1_get_dpm_level_count(smu, clk_type, count);
+
+ return 0;
+}
+
static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
@@ -859,18 +1117,20 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
case SMU_SOCCLK:
case SMU_VCLK:
case SMU_DCLK:
+ case SMU_VCLK1:
+ case SMU_DCLK1:
case SMU_MCLK:
case SMU_FCLK:
ret = smu_v14_0_0_get_current_clk_freq(smu, clk_type, &cur_value);
if (ret)
break;
- ret = smu_v14_0_0_get_dpm_level_count(smu, clk_type, &count);
+ ret = smu_v14_0_common_get_dpm_level_count(smu, clk_type, &count);
if (ret)
break;
for (i = 0; i < count; i++) {
- ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, i, &value);
if (ret)
break;
@@ -933,8 +1193,13 @@ static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu,
break;
case SMU_VCLK:
case SMU_DCLK:
- msg_set_min = SMU_MSG_SetHardMinVcn;
- msg_set_max = SMU_MSG_SetSoftMaxVcn;
+ msg_set_min = SMU_MSG_SetHardMinVcn0;
+ msg_set_max = SMU_MSG_SetSoftMaxVcn0;
+ break;
+ case SMU_VCLK1:
+ case SMU_DCLK1:
+ msg_set_min = SMU_MSG_SetHardMinVcn1;
+ msg_set_max = SMU_MSG_SetSoftMaxVcn1;
break;
default:
return -EINVAL;
@@ -964,11 +1229,11 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu,
case SMU_FCLK:
case SMU_VCLK:
case SMU_DCLK:
- ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
if (ret)
break;
- ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
if (ret)
break;
@@ -993,25 +1258,25 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu,
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max);
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max);
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max);
sclk_min = sclk_max;
fclk_min = fclk_max;
socclk_min = socclk_max;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL);
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL);
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL);
sclk_max = sclk_min;
fclk_max = fclk_min;
socclk_max = socclk_min;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max);
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max);
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
@@ -1060,6 +1325,18 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu,
return ret;
}
+static int smu_v14_0_1_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
+{
+ DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
+
+ smu->gfx_default_hard_min_freq = clk_table->MinGfxClk;
+ smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk;
+ smu->gfx_actual_hard_min_freq = 0;
+ smu->gfx_actual_soft_max_freq = 0;
+
+ return 0;
+}
+
static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
{
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
@@ -1072,6 +1349,16 @@ static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *sm
return 0;
}
+static int smu_v14_0_common_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ smu_v14_0_0_set_fine_grain_gfx_freq_parameters(smu);
+ else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ smu_v14_0_1_set_fine_grain_gfx_freq_parameters(smu);
+
+ return 0;
+}
+
static int smu_v14_0_0_set_vpe_enable(struct smu_context *smu,
bool enable)
{
@@ -1088,6 +1375,25 @@ static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu,
0, NULL);
}
+static int smu_14_0_1_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table)
+{
+ DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
+ uint8_t idx;
+
+ /* Only the Clock information of SOC and VPE is copied to provide VPE DPM settings for use. */
+ for (idx = 0; idx < NUM_SOCCLK_DPM_LEVELS; idx++) {
+ clock_table->SocClocks[idx].Freq = (idx < clk_table->NumSocClkLevelsEnabled) ? clk_table->SocClocks[idx]:0;
+ clock_table->SocClocks[idx].Vol = 0;
+ }
+
+ for (idx = 0; idx < NUM_VPE_DPM_LEVELS; idx++) {
+ clock_table->VPEClocks[idx].Freq = (idx < clk_table->VpeClkLevelsEnabled) ? clk_table->VPEClocks[idx]:0;
+ clock_table->VPEClocks[idx].Vol = 0;
+ }
+
+ return 0;
+}
+
static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table)
{
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
@@ -1107,6 +1413,16 @@ static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks *
return 0;
}
+static int smu_v14_0_common_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ smu_14_0_0_get_dpm_table(smu, clock_table);
+ else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ smu_14_0_1_get_dpm_table(smu, clock_table);
+
+ return 0;
+}
+
static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.check_fw_status = smu_v14_0_check_fw_status,
.check_fw_version = smu_v14_0_check_fw_version,
@@ -1128,16 +1444,16 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.set_driver_table_location = smu_v14_0_set_driver_table_location,
.gfx_off_control = smu_v14_0_gfx_off_control,
.mode2_reset = smu_v14_0_0_mode2_reset,
- .get_dpm_ultimate_freq = smu_v14_0_0_get_dpm_ultimate_freq,
+ .get_dpm_ultimate_freq = smu_v14_0_common_get_dpm_ultimate_freq,
.od_edit_dpm_table = smu_v14_0_od_edit_dpm_table,
.print_clk_levels = smu_v14_0_0_print_clk_levels,
.force_clk_levels = smu_v14_0_0_force_clk_levels,
.set_performance_level = smu_v14_0_0_set_performance_level,
- .set_fine_grain_gfx_freq_parameters = smu_v14_0_0_set_fine_grain_gfx_freq_parameters,
+ .set_fine_grain_gfx_freq_parameters = smu_v14_0_common_set_fine_grain_gfx_freq_parameters,
.set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu,
.dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable,
.dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable,
- .get_dpm_clock_table = smu_14_0_0_get_dpm_table,
+ .get_dpm_clock_table = smu_v14_0_common_get_dpm_table,
};
static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
new file mode 100644
index 000000000000..706265220292
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -0,0 +1,1796 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#define SWSMU_CODE_LAYER_L2
+
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include <linux/i2c.h>
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "atomfirmware.h"
+#include "amdgpu_atomfirmware.h"
+#include "amdgpu_atombios.h"
+#include "smu_v14_0.h"
+#include "smu14_driver_if_v14_0.h"
+#include "soc15_common.h"
+#include "atom.h"
+#include "smu_v14_0_2_ppt.h"
+#include "smu_v14_0_2_pptable.h"
+#include "smu_v14_0_2_ppsmc.h"
+#include "mp/mp_14_0_2_offset.h"
+#include "mp/mp_14_0_2_sh_mask.h"
+
+#include "smu_cmn.h"
+#include "amdgpu_ras.h"
+
+/*
+ * DO NOT use these for err/warn/info/debug messages.
+ * Use dev_err, dev_warn, dev_info and dev_dbg instead.
+ * They are more MGPU friendly.
+ */
+#undef pr_err
+#undef pr_warn
+#undef pr_info
+#undef pr_debug
+
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+#define SMC_DPM_FEATURE ( \
+ FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \
+ FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \
+ FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \
+ FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \
+ FEATURE_MASK(FEATURE_DPM_FCLK_BIT))
+
+#define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000
+
+static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = {
+ MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
+ MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
+ MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
+ MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0),
+ MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0),
+ MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0),
+ MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0),
+ MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1),
+ MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1),
+ MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1),
+ MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1),
+ MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1),
+ MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1),
+ MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1),
+ MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
+ MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
+ MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
+ MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
+ MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
+ MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1),
+ MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
+ MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0),
+ MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0),
+ MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0),
+ MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0),
+ MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1),
+ MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
+ MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1),
+ MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0),
+ MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
+ MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
+ MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
+ MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0),
+ MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0),
+ MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0),
+ MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0),
+ MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1),
+ MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0),
+ MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
+ MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
+ MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
+ MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0),
+ MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
+ MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
+ MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
+ MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
+ MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
+ MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
+ MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
+ MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
+ MSG_MAP(SetNumBadMemoryPagesRetired, PPSMC_MSG_SetNumBadMemoryPagesRetired, 0),
+ MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel,
+ PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0),
+ MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
+ MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0),
+};
+
+static struct cmn2asic_mapping smu_v14_0_2_clk_map[SMU_CLK_COUNT] = {
+ CLK_MAP(GFXCLK, PPCLK_GFXCLK),
+ CLK_MAP(SCLK, PPCLK_GFXCLK),
+ CLK_MAP(SOCCLK, PPCLK_SOCCLK),
+ CLK_MAP(FCLK, PPCLK_FCLK),
+ CLK_MAP(UCLK, PPCLK_UCLK),
+ CLK_MAP(MCLK, PPCLK_UCLK),
+ CLK_MAP(VCLK, PPCLK_VCLK_0),
+ CLK_MAP(DCLK, PPCLK_DCLK_0),
+};
+
+static struct cmn2asic_mapping smu_v14_0_2_feature_mask_map[SMU_FEATURE_COUNT] = {
+ FEA_MAP(FW_DATA_READ),
+ FEA_MAP(DPM_GFXCLK),
+ FEA_MAP(DPM_GFX_POWER_OPTIMIZER),
+ FEA_MAP(DPM_UCLK),
+ FEA_MAP(DPM_FCLK),
+ FEA_MAP(DPM_SOCCLK),
+ FEA_MAP(DPM_LINK),
+ FEA_MAP(DPM_DCN),
+ FEA_MAP(VMEMP_SCALING),
+ FEA_MAP(VDDIO_MEM_SCALING),
+ FEA_MAP(DS_GFXCLK),
+ FEA_MAP(DS_SOCCLK),
+ FEA_MAP(DS_FCLK),
+ FEA_MAP(DS_LCLK),
+ FEA_MAP(DS_DCFCLK),
+ FEA_MAP(DS_UCLK),
+ FEA_MAP(GFX_ULV),
+ FEA_MAP(FW_DSTATE),
+ FEA_MAP(GFXOFF),
+ FEA_MAP(BACO),
+ FEA_MAP(MM_DPM),
+ FEA_MAP(SOC_MPCLK_DS),
+ FEA_MAP(BACO_MPCLK_DS),
+ FEA_MAP(THROTTLERS),
+ FEA_MAP(SMARTSHIFT),
+ FEA_MAP(GTHR),
+ FEA_MAP(ACDC),
+ FEA_MAP(VR0HOT),
+ FEA_MAP(FW_CTF),
+ FEA_MAP(FAN_CONTROL),
+ FEA_MAP(GFX_DCS),
+ FEA_MAP(GFX_READ_MARGIN),
+ FEA_MAP(LED_DISPLAY),
+ FEA_MAP(GFXCLK_SPREAD_SPECTRUM),
+ FEA_MAP(OUT_OF_BAND_MONITOR),
+ FEA_MAP(OPTIMIZED_VMIN),
+ FEA_MAP(GFX_IMU),
+ FEA_MAP(BOOT_TIME_CAL),
+ FEA_MAP(GFX_PCC_DFLL),
+ FEA_MAP(SOC_CG),
+ FEA_MAP(DF_CSTATE),
+ FEA_MAP(GFX_EDC),
+ FEA_MAP(BOOT_POWER_OPT),
+ FEA_MAP(CLOCK_POWER_DOWN_BYPASS),
+ FEA_MAP(DS_VCN),
+ FEA_MAP(BACO_CG),
+ FEA_MAP(MEM_TEMP_READ),
+ FEA_MAP(ATHUB_MMHUB_PG),
+ FEA_MAP(SOC_PCC),
+ [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
+ [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
+ [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT},
+};
+
+static struct cmn2asic_mapping smu_v14_0_2_table_map[SMU_TABLE_COUNT] = {
+ TAB_MAP(PPTABLE),
+ TAB_MAP(WATERMARKS),
+ TAB_MAP(AVFS_PSM_DEBUG),
+ TAB_MAP(PMSTATUSLOG),
+ TAB_MAP(SMU_METRICS),
+ TAB_MAP(DRIVER_SMU_CONFIG),
+ TAB_MAP(ACTIVITY_MONITOR_COEFF),
+ [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE},
+ TAB_MAP(I2C_COMMANDS),
+ TAB_MAP(ECCINFO),
+};
+
+static struct cmn2asic_mapping smu_v14_0_2_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
+ PWR_MAP(AC),
+ PWR_MAP(DC),
+};
+
+static struct cmn2asic_mapping smu_v14_0_2_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT),
+};
+
+#if 0
+static const uint8_t smu_v14_0_2_throttler_map[] = {
+ [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT),
+ [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT),
+ [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT),
+ [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT),
+ [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT),
+ [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT),
+ [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT),
+ [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT),
+ [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT),
+ [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
+ [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT),
+ [THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT),
+ [THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT),
+ [THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT),
+ [THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT),
+ [THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT),
+ [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT),
+};
+#endif
+
+static int
+smu_v14_0_2_get_allowed_feature_mask(struct smu_context *smu,
+ uint32_t *feature_mask, uint32_t num)
+{
+ struct amdgpu_device *adev = smu->adev;
+ /*u32 smu_version;*/
+
+ if (num > 2)
+ return -EINVAL;
+
+ memset(feature_mask, 0xff, sizeof(uint32_t) * num);
+
+ if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) {
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT);
+ }
+#if 0
+ if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) ||
+ !(adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
+
+ if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
+
+ /* PMFW 78.58 contains a critical fix for gfxoff feature */
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if ((smu_version < 0x004e3a00) ||
+ !(adev->pm.pp_feature & PP_GFXOFF_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT);
+
+ if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) {
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
+ }
+
+ if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
+
+ if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT);
+ }
+
+ if (!(adev->pm.pp_feature & PP_ULV_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT);
+#endif
+
+ return 0;
+}
+
+static int smu_v14_0_2_check_powerplay_table(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ struct smu_14_0_2_powerplay_table *powerplay_table =
+ table_context->power_play_table;
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ const OverDriveLimits_t * const overdrive_upperlimits =
+ &pptable->SkuTable.OverDriveLimitsBasicMax;
+ const OverDriveLimits_t * const overdrive_lowerlimits =
+ &pptable->SkuTable.OverDriveLimitsBasicMin;
+
+ if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_HARDWAREDC)
+ smu->dc_controlled_by_gpio = true;
+
+ if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_BACO) {
+ smu_baco->platform_support = true;
+
+ if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_MACO)
+ smu_baco->maco_support = true;
+ }
+
+ if (!overdrive_lowerlimits->FeatureCtrlMask ||
+ !overdrive_upperlimits->FeatureCtrlMask)
+ smu->od_enabled = false;
+
+ table_context->thermal_controller_type =
+ powerplay_table->thermal_controller_type;
+
+ /*
+ * Instead of having its own buffer space and get overdrive_table copied,
+ * smu->od_settings just points to the actual overdrive_table
+ */
+ smu->od_settings = &powerplay_table->overdrive_table;
+
+ smu->adev->pm.no_fan =
+ !(pptable->PFE_Settings.FeaturesToRun[0] & (1 << FEATURE_FAN_CONTROL_BIT));
+
+ return 0;
+}
+
+static int smu_v14_0_2_store_powerplay_table(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ struct smu_14_0_2_powerplay_table *powerplay_table =
+ table_context->power_play_table;
+
+ memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,
+ sizeof(PPTable_t));
+
+ return 0;
+}
+
+#ifndef atom_smc_dpm_info_table_14_0_0
+struct atom_smc_dpm_info_table_14_0_0 {
+ struct atom_common_table_header table_header;
+ BoardTable_t BoardTable;
+};
+#endif
+
+static int smu_v14_0_2_append_powerplay_table(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *smc_pptable = table_context->driver_pptable;
+ struct atom_smc_dpm_info_table_14_0_0 *smc_dpm_table;
+ BoardTable_t *BoardTable = &smc_pptable->BoardTable;
+ int index, ret;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ smc_dpm_info);
+
+ ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL,
+ (uint8_t **)&smc_dpm_table);
+ if (ret)
+ return ret;
+
+ memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t));
+
+ return 0;
+}
+
+#if 0
+static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu,
+ void **table,
+ uint32_t *size)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ void *combo_pptable = smu_table->combo_pptable;
+ int ret = 0;
+
+ ret = smu_cmn_get_combo_pptable(smu);
+ if (ret)
+ return ret;
+
+ *table = combo_pptable;
+ *size = sizeof(struct smu_14_0_powerplay_table);
+
+ return 0;
+}
+#endif
+
+static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu,
+ void **table,
+ uint32_t *size)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ void *combo_pptable = smu_table->combo_pptable;
+ int ret = 0;
+
+ ret = smu_cmn_get_combo_pptable(smu);
+ if (ret)
+ return ret;
+
+ *table = combo_pptable;
+ *size = sizeof(struct smu_14_0_2_powerplay_table);
+
+ return 0;
+}
+
+static int smu_v14_0_2_setup_pptable(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
+ if (!adev->scpm_enabled)
+ ret = smu_v14_0_setup_pptable(smu);
+ else
+ ret = smu_v14_0_2_get_pptable_from_pmfw(smu,
+ &smu_table->power_play_table,
+ &smu_table->power_play_table_size);
+ if (ret)
+ return ret;
+
+ ret = smu_v14_0_2_store_powerplay_table(smu);
+ if (ret)
+ return ret;
+
+ /*
+ * With SCPM enabled, the operation below will be handled
+ * by PSP. Driver involvment is unnecessary and useless.
+ */
+ if (!adev->scpm_enabled) {
+ ret = smu_v14_0_2_append_powerplay_table(smu);
+ if (ret)
+ return ret;
+ }
+
+ ret = smu_v14_0_2_check_powerplay_table(smu);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int smu_v14_0_2_tables_init(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+ smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
+ if (!smu_table->metrics_table)
+ goto err0_out;
+ smu_table->metrics_time = 0;
+
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
+ smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
+ if (!smu_table->gpu_metrics_table)
+ goto err1_out;
+
+ smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
+ if (!smu_table->watermarks_table)
+ goto err2_out;
+
+ smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
+ if (!smu_table->ecc_table)
+ goto err3_out;
+
+ return 0;
+
+err3_out:
+ kfree(smu_table->watermarks_table);
+err2_out:
+ kfree(smu_table->gpu_metrics_table);
+err1_out:
+ kfree(smu_table->metrics_table);
+err0_out:
+ return -ENOMEM;
+}
+
+static int smu_v14_0_2_allocate_dpm_context(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+
+ smu_dpm->dpm_context = kzalloc(sizeof(struct smu_14_0_dpm_context),
+ GFP_KERNEL);
+ if (!smu_dpm->dpm_context)
+ return -ENOMEM;
+
+ smu_dpm->dpm_context_size = sizeof(struct smu_14_0_dpm_context);
+
+ return 0;
+}
+
+static int smu_v14_0_2_init_smc_tables(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_v14_0_2_tables_init(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_v14_0_2_allocate_dpm_context(smu);
+ if (ret)
+ return ret;
+
+ return smu_v14_0_init_smc_tables(smu);
+}
+
+static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu)
+{
+ struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ struct smu_14_0_dpm_table *dpm_table;
+ struct smu_14_0_pcie_table *pcie_table;
+ uint32_t link_level;
+ int ret = 0;
+
+ /* socclk dpm table setup */
+ dpm_table = &dpm_context->dpm_tables.soc_table;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+ ret = smu_v14_0_set_single_dpm_table(smu,
+ SMU_SOCCLK,
+ dpm_table);
+ if (ret)
+ return ret;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[0].value;
+ }
+
+ /* gfxclk dpm table setup */
+ dpm_table = &dpm_context->dpm_tables.gfx_table;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
+ ret = smu_v14_0_set_single_dpm_table(smu,
+ SMU_GFXCLK,
+ dpm_table);
+ if (ret)
+ return ret;
+
+ /*
+ * Update the reported maximum shader clock to the value
+ * which can be guarded to be achieved on all cards. This
+ * is aligned with Window setting. And considering that value
+ * might be not the peak frequency the card can achieve, it
+ * is normal some real-time clock frequency can overtake this
+ * labelled maximum clock frequency(for example in pp_dpm_sclk
+ * sysfs output).
+ */
+ if (skutable->DriverReportedClocks.GameClockAc &&
+ (dpm_table->dpm_levels[dpm_table->count - 1].value >
+ skutable->DriverReportedClocks.GameClockAc)) {
+ dpm_table->dpm_levels[dpm_table->count - 1].value =
+ skutable->DriverReportedClocks.GameClockAc;
+ dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
+ }
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[0].value;
+ }
+
+ /* uclk dpm table setup */
+ dpm_table = &dpm_context->dpm_tables.uclk_table;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+ ret = smu_v14_0_set_single_dpm_table(smu,
+ SMU_UCLK,
+ dpm_table);
+ if (ret)
+ return ret;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[0].value;
+ }
+
+ /* fclk dpm table setup */
+ dpm_table = &dpm_context->dpm_tables.fclk_table;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
+ ret = smu_v14_0_set_single_dpm_table(smu,
+ SMU_FCLK,
+ dpm_table);
+ if (ret)
+ return ret;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[0].value;
+ }
+
+ /* vclk dpm table setup */
+ dpm_table = &dpm_context->dpm_tables.vclk_table;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) {
+ ret = smu_v14_0_set_single_dpm_table(smu,
+ SMU_VCLK,
+ dpm_table);
+ if (ret)
+ return ret;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[0].value;
+ }
+
+ /* dclk dpm table setup */
+ dpm_table = &dpm_context->dpm_tables.dclk_table;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) {
+ ret = smu_v14_0_set_single_dpm_table(smu,
+ SMU_DCLK,
+ dpm_table);
+ if (ret)
+ return ret;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[0].value;
+ }
+
+ /* lclk dpm table setup */
+ pcie_table = &dpm_context->dpm_tables.pcie_table;
+ pcie_table->num_of_link_levels = 0;
+ for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
+ if (!skutable->PcieGenSpeed[link_level] &&
+ !skutable->PcieLaneCount[link_level] &&
+ !skutable->LclkFreq[link_level])
+ continue;
+
+ pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
+ skutable->PcieGenSpeed[link_level];
+ pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
+ skutable->PcieLaneCount[link_level];
+ pcie_table->clk_freq[pcie_table->num_of_link_levels] =
+ skutable->LclkFreq[link_level];
+ pcie_table->num_of_link_levels++;
+ }
+
+ return 0;
+}
+
+static bool smu_v14_0_2_is_dpm_running(struct smu_context *smu)
+{
+ int ret = 0;
+ uint64_t feature_enabled;
+
+ ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
+ if (ret)
+ return false;
+
+ return !!(feature_enabled & SMC_DPM_FEATURE);
+}
+
+static void smu_v14_0_2_dump_pptable(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ PFE_Settings_t *PFEsettings = &pptable->PFE_Settings;
+
+ dev_info(smu->adev->dev, "Dumped PPTable:\n");
+
+ dev_info(smu->adev->dev, "Version = 0x%08x\n", PFEsettings->Version);
+ dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", PFEsettings->FeaturesToRun[0]);
+ dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", PFEsettings->FeaturesToRun[1]);
+}
+
+static uint32_t smu_v14_0_2_get_throttler_status(SmuMetrics_t *metrics)
+{
+ uint32_t throttler_status = 0;
+ int i;
+
+ for (i = 0; i < THROTTLER_COUNT; i++)
+ throttler_status |=
+ (metrics->ThrottlingPercentage[i] ? 1U << i : 0);
+
+ return throttler_status;
+}
+
+#define SMU_14_0_2_BUSY_THRESHOLD 5
+static int smu_v14_0_2_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member,
+ uint32_t *value)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ SmuMetrics_t *metrics =
+ &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
+ int ret = 0;
+
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
+ return ret;
+
+ switch (member) {
+ case METRICS_CURR_GFXCLK:
+ *value = metrics->CurrClock[PPCLK_GFXCLK];
+ break;
+ case METRICS_CURR_SOCCLK:
+ *value = metrics->CurrClock[PPCLK_SOCCLK];
+ break;
+ case METRICS_CURR_UCLK:
+ *value = metrics->CurrClock[PPCLK_UCLK];
+ break;
+ case METRICS_CURR_VCLK:
+ *value = metrics->CurrClock[PPCLK_VCLK_0];
+ break;
+ case METRICS_CURR_DCLK:
+ *value = metrics->CurrClock[PPCLK_DCLK_0];
+ break;
+ case METRICS_CURR_FCLK:
+ *value = metrics->CurrClock[PPCLK_FCLK];
+ break;
+ case METRICS_CURR_DCEFCLK:
+ *value = metrics->CurrClock[PPCLK_DCFCLK];
+ break;
+ case METRICS_AVERAGE_GFXCLK:
+ if (metrics->AverageGfxActivity <= SMU_14_0_2_BUSY_THRESHOLD)
+ *value = metrics->AverageGfxclkFrequencyPostDs;
+ else
+ *value = metrics->AverageGfxclkFrequencyPreDs;
+ break;
+ case METRICS_AVERAGE_FCLK:
+ if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD)
+ *value = metrics->AverageFclkFrequencyPostDs;
+ else
+ *value = metrics->AverageFclkFrequencyPreDs;
+ break;
+ case METRICS_AVERAGE_UCLK:
+ if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD)
+ *value = metrics->AverageMemclkFrequencyPostDs;
+ else
+ *value = metrics->AverageMemclkFrequencyPreDs;
+ break;
+ case METRICS_AVERAGE_VCLK:
+ *value = metrics->AverageVclk0Frequency;
+ break;
+ case METRICS_AVERAGE_DCLK:
+ *value = metrics->AverageDclk0Frequency;
+ break;
+ case METRICS_AVERAGE_VCLK1:
+ *value = metrics->AverageVclk1Frequency;
+ break;
+ case METRICS_AVERAGE_DCLK1:
+ *value = metrics->AverageDclk1Frequency;
+ break;
+ case METRICS_AVERAGE_GFXACTIVITY:
+ *value = metrics->AverageGfxActivity;
+ break;
+ case METRICS_AVERAGE_MEMACTIVITY:
+ *value = metrics->AverageUclkActivity;
+ break;
+ case METRICS_AVERAGE_SOCKETPOWER:
+ *value = metrics->AverageSocketPower << 8;
+ break;
+ case METRICS_TEMPERATURE_EDGE:
+ *value = metrics->AvgTemperature[TEMP_EDGE] *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_TEMPERATURE_HOTSPOT:
+ *value = metrics->AvgTemperature[TEMP_HOTSPOT] *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_TEMPERATURE_MEM:
+ *value = metrics->AvgTemperature[TEMP_MEM] *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_TEMPERATURE_VRGFX:
+ *value = metrics->AvgTemperature[TEMP_VR_GFX] *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_TEMPERATURE_VRSOC:
+ *value = metrics->AvgTemperature[TEMP_VR_SOC] *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_THROTTLER_STATUS:
+ *value = smu_v14_0_2_get_throttler_status(metrics);
+ break;
+ case METRICS_CURR_FANSPEED:
+ *value = metrics->AvgFanRpm;
+ break;
+ case METRICS_CURR_FANPWM:
+ *value = metrics->AvgFanPwm;
+ break;
+ case METRICS_VOLTAGE_VDDGFX:
+ *value = metrics->AvgVoltage[SVI_PLANE_VDD_GFX];
+ break;
+ case METRICS_PCIE_RATE:
+ *value = metrics->PcieRate;
+ break;
+ case METRICS_PCIE_WIDTH:
+ *value = metrics->PcieWidth;
+ break;
+ default:
+ *value = UINT_MAX;
+ break;
+ }
+
+ return ret;
+}
+
+static int smu_v14_0_2_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min,
+ uint32_t *max)
+{
+ struct smu_14_0_dpm_context *dpm_context =
+ smu->smu_dpm.dpm_context;
+ struct smu_14_0_dpm_table *dpm_table;
+
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ /* uclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.uclk_table;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ /* gfxclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.gfx_table;
+ break;
+ case SMU_SOCCLK:
+ /* socclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.soc_table;
+ break;
+ case SMU_FCLK:
+ /* fclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.fclk_table;
+ break;
+ case SMU_VCLK:
+ case SMU_VCLK1:
+ /* vclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.vclk_table;
+ break;
+ case SMU_DCLK:
+ case SMU_DCLK1:
+ /* dclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.dclk_table;
+ break;
+ default:
+ dev_err(smu->adev->dev, "Unsupported clock type!\n");
+ return -EINVAL;
+ }
+
+ if (min)
+ *min = dpm_table->min;
+ if (max)
+ *max = dpm_table->max;
+
+ return 0;
+}
+
+static int smu_v14_0_2_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data,
+ uint32_t *size)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *smc_pptable = table_context->driver_pptable;
+ int ret = 0;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
+ *(uint16_t *)data = smc_pptable->CustomSkuTable.FanMaximumRpm;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_MEM_LOAD:
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_MEMACTIVITY,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_GFXACTIVITY,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_SOCKETPOWER,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_TEMPERATURE_HOTSPOT,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_EDGE_TEMP:
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_TEMPERATURE_EDGE,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_MEM_TEMP:
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_TEMPERATURE_MEM,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_MCLK:
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_CURR_UCLK,
+ (uint32_t *)data);
+ *(uint32_t *)data *= 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_GFXCLK,
+ (uint32_t *)data);
+ *(uint32_t *)data *= 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VDDGFX:
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_VOLTAGE_VDDGFX,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int smu_v14_0_2_get_current_clk_freq_by_table(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *value)
+{
+ MetricsMember_t member_type;
+ int clk_id = 0;
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
+ if (clk_id < 0)
+ return -EINVAL;
+
+ switch (clk_id) {
+ case PPCLK_GFXCLK:
+ member_type = METRICS_AVERAGE_GFXCLK;
+ break;
+ case PPCLK_UCLK:
+ member_type = METRICS_CURR_UCLK;
+ break;
+ case PPCLK_FCLK:
+ member_type = METRICS_CURR_FCLK;
+ break;
+ case PPCLK_SOCCLK:
+ member_type = METRICS_CURR_SOCCLK;
+ break;
+ case PPCLK_VCLK_0:
+ member_type = METRICS_AVERAGE_VCLK;
+ break;
+ case PPCLK_DCLK_0:
+ member_type = METRICS_AVERAGE_DCLK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return smu_v14_0_2_get_smu_metrics_data(smu,
+ member_type,
+ value);
+}
+
+static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ char *buf)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ struct smu_14_0_dpm_table *single_dpm_table;
+ int i, curr_freq, size = 0;
+ int ret = 0;
+
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
+ if (amdgpu_ras_intr_triggered()) {
+ size += sysfs_emit_at(buf, size, "unavailable\n");
+ return size;
+ }
+
+ switch (clk_type) {
+ case SMU_SCLK:
+ single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
+ break;
+ case SMU_MCLK:
+ single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
+ break;
+ case SMU_SOCCLK:
+ single_dpm_table = &(dpm_context->dpm_tables.soc_table);
+ break;
+ case SMU_FCLK:
+ single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
+ break;
+ case SMU_VCLK:
+ case SMU_VCLK1:
+ single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
+ break;
+ case SMU_DCLK:
+ case SMU_DCLK1:
+ single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
+ break;
+ default:
+ break;
+ }
+
+ switch (clk_type) {
+ case SMU_SCLK:
+ case SMU_MCLK:
+ case SMU_SOCCLK:
+ case SMU_FCLK:
+ case SMU_VCLK:
+ case SMU_VCLK1:
+ case SMU_DCLK:
+ case SMU_DCLK1:
+ ret = smu_v14_0_2_get_current_clk_freq_by_table(smu, clk_type, &curr_freq);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to get current clock freq!");
+ return ret;
+ }
+
+ if (single_dpm_table->is_fine_grained) {
+ /*
+ * For fine grained dpms, there are only two dpm levels:
+ * - level 0 -> min clock freq
+ * - level 1 -> max clock freq
+ * And the current clock frequency can be any value between them.
+ * So, if the current clock frequency is not at level 0 or level 1,
+ * we will fake it as three dpm levels:
+ * - level 0 -> min clock freq
+ * - level 1 -> current actual clock freq
+ * - level 2 -> max clock freq
+ */
+ if ((single_dpm_table->dpm_levels[0].value != curr_freq) &&
+ (single_dpm_table->dpm_levels[1].value != curr_freq)) {
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n",
+ single_dpm_table->dpm_levels[0].value);
+ size += sysfs_emit_at(buf, size, "1: %uMhz *\n",
+ curr_freq);
+ size += sysfs_emit_at(buf, size, "2: %uMhz\n",
+ single_dpm_table->dpm_levels[1].value);
+ } else {
+ size += sysfs_emit_at(buf, size, "0: %uMhz %s\n",
+ single_dpm_table->dpm_levels[0].value,
+ single_dpm_table->dpm_levels[0].value == curr_freq ? "*" : "");
+ size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
+ single_dpm_table->dpm_levels[1].value,
+ single_dpm_table->dpm_levels[1].value == curr_freq ? "*" : "");
+ }
+ } else {
+ for (i = 0; i < single_dpm_table->count; i++)
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ i, single_dpm_table->dpm_levels[i].value,
+ single_dpm_table->dpm_levels[i].value == curr_freq ? "*" : "");
+ }
+ break;
+ case SMU_PCIE:
+ // TODO
+ break;
+
+ default:
+ break;
+ }
+
+ return size;
+}
+
+static int smu_v14_0_2_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t mask)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ struct smu_14_0_dpm_table *single_dpm_table;
+ uint32_t soft_min_level, soft_max_level;
+ uint32_t min_freq, max_freq;
+ int ret = 0;
+
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
+ break;
+ case SMU_MCLK:
+ case SMU_UCLK:
+ single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
+ break;
+ case SMU_SOCCLK:
+ single_dpm_table = &(dpm_context->dpm_tables.soc_table);
+ break;
+ case SMU_FCLK:
+ single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
+ break;
+ case SMU_VCLK:
+ case SMU_VCLK1:
+ single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
+ break;
+ case SMU_DCLK:
+ case SMU_DCLK1:
+ single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
+ break;
+ default:
+ break;
+ }
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ case SMU_MCLK:
+ case SMU_UCLK:
+ case SMU_SOCCLK:
+ case SMU_FCLK:
+ case SMU_VCLK:
+ case SMU_VCLK1:
+ case SMU_DCLK:
+ case SMU_DCLK1:
+ if (single_dpm_table->is_fine_grained) {
+ /* There is only 2 levels for fine grained DPM */
+ soft_max_level = (soft_max_level >= 1 ? 1 : 0);
+ soft_min_level = (soft_min_level >= 1 ? 1 : 0);
+ } else {
+ if ((soft_max_level >= single_dpm_table->count) ||
+ (soft_min_level >= single_dpm_table->count))
+ return -EINVAL;
+ }
+
+ min_freq = single_dpm_table->dpm_levels[soft_min_level].value;
+ max_freq = single_dpm_table->dpm_levels[soft_max_level].value;
+
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ clk_type,
+ min_freq,
+ max_freq);
+ break;
+ case SMU_DCEFCLK:
+ case SMU_PCIE:
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu,
+ uint8_t pcie_gen_cap,
+ uint8_t pcie_width_cap)
+{
+ struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_14_0_pcie_table *pcie_table =
+ &dpm_context->dpm_tables.pcie_table;
+ uint32_t smu_pcie_arg;
+ int ret, i;
+
+ for (i = 0; i < pcie_table->num_of_link_levels; i++) {
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap)
+ pcie_table->pcie_gen[i] = pcie_gen_cap;
+ if (pcie_table->pcie_lane[i] > pcie_width_cap)
+ pcie_table->pcie_lane[i] = pcie_width_cap;
+
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int smu_v14_0_2_get_thermal_temperature_range(struct smu_context *smu,
+ struct smu_temperature_range *range)
+{
+ // TODO
+
+ return 0;
+}
+
+static int smu_v14_0_2_populate_umd_state_clk(struct smu_context *smu)
+{
+ // TODO
+
+ return 0;
+}
+
+static void smu_v14_0_2_get_unique_id(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ SmuMetrics_t *metrics =
+ &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t upper32 = 0, lower32 = 0;
+ int ret;
+
+ ret = smu_cmn_get_metrics_table(smu, NULL, false);
+ if (ret)
+ goto out;
+
+ upper32 = metrics->PublicSerialNumberUpper;
+ lower32 = metrics->PublicSerialNumberLower;
+
+out:
+ adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
+}
+
+static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
+ uint32_t *current_power_limit,
+ uint32_t *default_power_limit,
+ uint32_t *max_power_limit,
+ uint32_t *min_power_limit)
+{
+ // TODO
+
+ return 0;
+}
+
+static int smu_v14_0_2_get_power_profile_mode(struct smu_context *smu,
+ char *buf)
+{
+ DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
+ DpmActivityMonitorCoeffInt_t *activity_monitor =
+ &(activity_monitor_external.DpmActivityMonitorCoeffInt);
+ static const char *title[] = {
+ "PROFILE_INDEX(NAME)",
+ "CLOCK_TYPE(NAME)",
+ "FPS",
+ "MinActiveFreqType",
+ "MinActiveFreq",
+ "BoosterFreqType",
+ "BoosterFreq",
+ "PD_Data_limit_c",
+ "PD_Data_error_coeff",
+ "PD_Data_error_rate_coeff"};
+ int16_t workload_type = 0;
+ uint32_t i, size = 0;
+ int result = 0;
+
+ if (!buf)
+ return -EINVAL;
+
+ size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s\n",
+ title[0], title[1], title[2], title[3], title[4], title[5],
+ title[6], title[7], title[8], title[9]);
+
+ for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ i);
+ if (workload_type == -ENOTSUPP)
+ continue;
+ else if (workload_type < 0)
+ return -EINVAL;
+
+ result = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ workload_type,
+ (void *)(&activity_monitor_external),
+ false);
+ if (result) {
+ dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+ return result;
+ }
+
+ size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
+ i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+
+ size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ " ",
+ 0,
+ "GFXCLK",
+ activity_monitor->Gfx_FPS,
+ activity_monitor->Gfx_MinActiveFreqType,
+ activity_monitor->Gfx_MinActiveFreq,
+ activity_monitor->Gfx_BoosterFreqType,
+ activity_monitor->Gfx_BoosterFreq,
+ activity_monitor->Gfx_PD_Data_limit_c,
+ activity_monitor->Gfx_PD_Data_error_coeff,
+ activity_monitor->Gfx_PD_Data_error_rate_coeff);
+
+ size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ " ",
+ 1,
+ "FCLK",
+ activity_monitor->Fclk_FPS,
+ activity_monitor->Fclk_MinActiveFreqType,
+ activity_monitor->Fclk_MinActiveFreq,
+ activity_monitor->Fclk_BoosterFreqType,
+ activity_monitor->Fclk_BoosterFreq,
+ activity_monitor->Fclk_PD_Data_limit_c,
+ activity_monitor->Fclk_PD_Data_error_coeff,
+ activity_monitor->Fclk_PD_Data_error_rate_coeff);
+ }
+
+ return size;
+}
+
+static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
+ long *input,
+ uint32_t size)
+{
+ DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
+ DpmActivityMonitorCoeffInt_t *activity_monitor =
+ &(activity_monitor_external.DpmActivityMonitorCoeffInt);
+ int workload_type, ret = 0;
+
+ smu->power_profile_mode = input[size];
+
+ if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
+ dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
+ return -EINVAL;
+ }
+
+ if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor_external),
+ false);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+ return ret;
+ }
+
+ switch (input[0]) {
+ case 0: /* Gfxclk */
+ activity_monitor->Gfx_FPS = input[1];
+ activity_monitor->Gfx_MinActiveFreqType = input[2];
+ activity_monitor->Gfx_MinActiveFreq = input[3];
+ activity_monitor->Gfx_BoosterFreqType = input[4];
+ activity_monitor->Gfx_BoosterFreq = input[5];
+ activity_monitor->Gfx_PD_Data_limit_c = input[6];
+ activity_monitor->Gfx_PD_Data_error_coeff = input[7];
+ activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8];
+ break;
+ case 1: /* Fclk */
+ activity_monitor->Fclk_FPS = input[1];
+ activity_monitor->Fclk_MinActiveFreqType = input[2];
+ activity_monitor->Fclk_MinActiveFreq = input[3];
+ activity_monitor->Fclk_BoosterFreqType = input[4];
+ activity_monitor->Fclk_BoosterFreq = input[5];
+ activity_monitor->Fclk_PD_Data_limit_c = input[6];
+ activity_monitor->Fclk_PD_Data_error_coeff = input[7];
+ activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8];
+ break;
+ }
+
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor_external),
+ true);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+ return ret;
+ }
+ }
+
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ smu->power_profile_mode);
+ if (workload_type < 0)
+ return -EINVAL;
+
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetWorkloadMask,
+ 1 << workload_type,
+ NULL);
+}
+
+static int smu_v14_0_2_baco_enter(struct smu_context *smu)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
+ return smu_v14_0_baco_set_armd3_sequence(smu,
+ smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO);
+ else
+ return smu_v14_0_baco_enter(smu);
+}
+
+static int smu_v14_0_2_baco_exit(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
+ /* Wait for PMFW handling for the Dstate change */
+ usleep_range(10000, 11000);
+ return smu_v14_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+ } else {
+ return smu_v14_0_baco_exit(smu);
+ }
+}
+
+static bool smu_v14_0_2_is_mode1_reset_supported(struct smu_context *smu)
+{
+ // TODO
+
+ return true;
+}
+
+static int smu_v14_0_2_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msg, int num_msgs)
+{
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
+ struct amdgpu_device *adev = smu_i2c->adev;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *table = &smu_table->driver_table;
+ SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
+ int i, j, r, c;
+ u16 dir;
+
+ if (!adev->pm.dpm_enabled)
+ return -EBUSY;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ req->I2CcontrollerPort = smu_i2c->port;
+ req->I2CSpeed = I2C_SPEED_FAST_400K;
+ req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
+ dir = msg[0].flags & I2C_M_RD;
+
+ for (c = i = 0; i < num_msgs; i++) {
+ for (j = 0; j < msg[i].len; j++, c++) {
+ SwI2cCmd_t *cmd = &req->SwI2cCmds[c];
+
+ if (!(msg[i].flags & I2C_M_RD)) {
+ /* write */
+ cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK;
+ cmd->ReadWriteData = msg[i].buf[j];
+ }
+
+ if ((dir ^ msg[i].flags) & I2C_M_RD) {
+ /* The direction changes.
+ */
+ dir = msg[i].flags & I2C_M_RD;
+ cmd->CmdConfig |= CMDCONFIG_RESTART_MASK;
+ }
+
+ req->NumCmds++;
+
+ /*
+ * Insert STOP if we are at the last byte of either last
+ * message for the transaction or the client explicitly
+ * requires a STOP at this particular message.
+ */
+ if ((j == msg[i].len - 1) &&
+ ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) {
+ cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK;
+ cmd->CmdConfig |= CMDCONFIG_STOP_MASK;
+ }
+ }
+ }
+ mutex_lock(&adev->pm.mutex);
+ r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
+ mutex_unlock(&adev->pm.mutex);
+ if (r)
+ goto fail;
+
+ for (c = i = 0; i < num_msgs; i++) {
+ if (!(msg[i].flags & I2C_M_RD)) {
+ c += msg[i].len;
+ continue;
+ }
+ for (j = 0; j < msg[i].len; j++, c++) {
+ SwI2cCmd_t *cmd = &res->SwI2cCmds[c];
+
+ msg[i].buf[j] = cmd->ReadWriteData;
+ }
+ }
+ r = num_msgs;
+fail:
+ kfree(req);
+ return r;
+}
+
+static u32 smu_v14_0_2_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm smu_v14_0_2_i2c_algo = {
+ .master_xfer = smu_v14_0_2_i2c_xfer,
+ .functionality = smu_v14_0_2_i2c_func,
+};
+
+static const struct i2c_adapter_quirks smu_v14_0_2_i2c_control_quirks = {
+ .flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN,
+ .max_read_len = MAX_SW_I2C_COMMANDS,
+ .max_write_len = MAX_SW_I2C_COMMANDS,
+ .max_comb_1st_msg_len = 2,
+ .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
+};
+
+static int smu_v14_0_2_i2c_control_init(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int res, i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ smu_i2c->adev = adev;
+ smu_i2c->port = i;
+ mutex_init(&smu_i2c->mutex);
+ control->owner = THIS_MODULE;
+ control->class = I2C_CLASS_SPD;
+ control->dev.parent = &adev->pdev->dev;
+ control->algo = &smu_v14_0_2_i2c_algo;
+ snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
+ control->quirks = &smu_v14_0_2_i2c_control_quirks;
+ i2c_set_adapdata(control, smu_i2c);
+
+ res = i2c_add_adapter(control);
+ if (res) {
+ DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+ goto Out_err;
+ }
+ }
+
+ /* assign the buses used for the FRU EEPROM and RAS EEPROM */
+ /* XXX ideally this would be something in a vbios data table */
+ adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
+ adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
+
+ return 0;
+Out_err:
+ for ( ; i >= 0; i--) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ i2c_del_adapter(control);
+ }
+ return res;
+}
+
+static void smu_v14_0_2_i2c_control_fini(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ i2c_del_adapter(control);
+ }
+ adev->pm.ras_eeprom_i2c_bus = NULL;
+ adev->pm.fru_eeprom_i2c_bus = NULL;
+}
+
+static int smu_v14_0_2_set_mp1_state(struct smu_context *smu,
+ enum pp_mp1_state mp1_state)
+{
+ int ret;
+
+ switch (mp1_state) {
+ case PP_MP1_STATE_UNLOAD:
+ ret = smu_cmn_set_mp1_state(smu, mp1_state);
+ break;
+ default:
+ /* Ignore others */
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int smu_v14_0_2_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state)
+{
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_DFCstateControl,
+ state,
+ NULL);
+}
+
+static int smu_v14_0_2_mode1_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ // TODO
+
+ return ret;
+}
+
+static int smu_v14_0_2_mode2_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ // TODO
+
+ return ret;
+}
+
+static int smu_v14_0_2_enable_gfx_features(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(14, 0, 2))
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures,
+ FEATURE_PWR_GFX, NULL);
+ else
+ return -EOPNOTSUPP;
+}
+
+static void smu_v14_0_2_set_smu_mailbox_registers(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ smu->param_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_82);
+ smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_66);
+ smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_90);
+}
+
+static int smu_v14_0_2_smu_send_bad_mem_page_num(struct smu_context *smu,
+ uint32_t size)
+{
+ int ret = 0;
+
+ /* message SMU to update the bad page number on SMUBUS */
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetNumBadMemoryPagesRetired,
+ size, NULL);
+ if (ret)
+ dev_err(smu->adev->dev,
+ "[%s] failed to message SMU to update bad memory pages number\n",
+ __func__);
+
+ return ret;
+}
+
+static int smu_v14_0_2_send_bad_mem_channel_flag(struct smu_context *smu,
+ uint32_t size)
+{
+ int ret = 0;
+
+ /* message SMU to update the bad channel info on SMUBUS */
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetBadMemoryPagesRetiredFlagsPerChannel,
+ size, NULL);
+ if (ret)
+ dev_err(smu->adev->dev,
+ "[%s] failed to message SMU to update bad memory pages channel info\n",
+ __func__);
+
+ return ret;
+}
+
+static ssize_t smu_v14_0_2_get_ecc_info(struct smu_context *smu,
+ void *table)
+{
+ int ret = 0;
+
+ // TODO
+
+ return ret;
+}
+
+static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
+ .get_allowed_feature_mask = smu_v14_0_2_get_allowed_feature_mask,
+ .set_default_dpm_table = smu_v14_0_2_set_default_dpm_table,
+ .i2c_init = smu_v14_0_2_i2c_control_init,
+ .i2c_fini = smu_v14_0_2_i2c_control_fini,
+ .is_dpm_running = smu_v14_0_2_is_dpm_running,
+ .dump_pptable = smu_v14_0_2_dump_pptable,
+ .init_microcode = smu_v14_0_init_microcode,
+ .load_microcode = smu_v14_0_load_microcode,
+ .fini_microcode = smu_v14_0_fini_microcode,
+ .init_smc_tables = smu_v14_0_2_init_smc_tables,
+ .fini_smc_tables = smu_v14_0_fini_smc_tables,
+ .init_power = smu_v14_0_init_power,
+ .fini_power = smu_v14_0_fini_power,
+ .check_fw_status = smu_v14_0_check_fw_status,
+ .setup_pptable = smu_v14_0_2_setup_pptable,
+ .check_fw_version = smu_v14_0_check_fw_version,
+ .write_pptable = smu_cmn_write_pptable,
+ .set_driver_table_location = smu_v14_0_set_driver_table_location,
+ .system_features_control = smu_v14_0_system_features_control,
+ .set_allowed_mask = smu_v14_0_set_allowed_mask,
+ .get_enabled_mask = smu_cmn_get_enabled_mask,
+ .dpm_set_vcn_enable = smu_v14_0_set_vcn_enable,
+ .dpm_set_jpeg_enable = smu_v14_0_set_jpeg_enable,
+ .get_dpm_ultimate_freq = smu_v14_0_2_get_dpm_ultimate_freq,
+ .get_vbios_bootup_values = smu_v14_0_get_vbios_bootup_values,
+ .read_sensor = smu_v14_0_2_read_sensor,
+ .feature_is_enabled = smu_cmn_feature_is_enabled,
+ .print_clk_levels = smu_v14_0_2_print_clk_levels,
+ .force_clk_levels = smu_v14_0_2_force_clk_levels,
+ .update_pcie_parameters = smu_v14_0_2_update_pcie_parameters,
+ .get_thermal_temperature_range = smu_v14_0_2_get_thermal_temperature_range,
+ .register_irq_handler = smu_v14_0_register_irq_handler,
+ .notify_memory_pool_location = smu_v14_0_notify_memory_pool_location,
+ .set_soft_freq_limited_range = smu_v14_0_set_soft_freq_limited_range,
+ .init_pptable_microcode = smu_v14_0_init_pptable_microcode,
+ .populate_umd_state_clk = smu_v14_0_2_populate_umd_state_clk,
+ .set_performance_level = smu_v14_0_set_performance_level,
+ .gfx_off_control = smu_v14_0_gfx_off_control,
+ .get_unique_id = smu_v14_0_2_get_unique_id,
+ .get_power_limit = smu_v14_0_2_get_power_limit,
+ .set_power_limit = smu_v14_0_set_power_limit,
+ .set_power_source = smu_v14_0_set_power_source,
+ .get_power_profile_mode = smu_v14_0_2_get_power_profile_mode,
+ .set_power_profile_mode = smu_v14_0_2_set_power_profile_mode,
+ .run_btc = smu_v14_0_run_btc,
+ .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
+ .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
+ .set_tool_table_location = smu_v14_0_set_tool_table_location,
+ .deep_sleep_control = smu_v14_0_deep_sleep_control,
+ .gfx_ulv_control = smu_v14_0_gfx_ulv_control,
+ .get_bamaco_support = smu_v14_0_get_bamaco_support,
+ .baco_get_state = smu_v14_0_baco_get_state,
+ .baco_set_state = smu_v14_0_baco_set_state,
+ .baco_enter = smu_v14_0_2_baco_enter,
+ .baco_exit = smu_v14_0_2_baco_exit,
+ .mode1_reset_is_support = smu_v14_0_2_is_mode1_reset_supported,
+ .mode1_reset = smu_v14_0_2_mode1_reset,
+ .mode2_reset = smu_v14_0_2_mode2_reset,
+ .enable_gfx_features = smu_v14_0_2_enable_gfx_features,
+ .set_mp1_state = smu_v14_0_2_set_mp1_state,
+ .set_df_cstate = smu_v14_0_2_set_df_cstate,
+ .send_hbm_bad_pages_num = smu_v14_0_2_smu_send_bad_mem_page_num,
+ .send_hbm_bad_channel_flag = smu_v14_0_2_send_bad_mem_channel_flag,
+ .gpo_control = smu_v14_0_gpo_control,
+ .get_ecc_info = smu_v14_0_2_get_ecc_info,
+};
+
+void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu)
+{
+ smu->ppt_funcs = &smu_v14_0_2_ppt_funcs;
+ smu->message_map = smu_v14_0_2_message_map;
+ smu->clock_map = smu_v14_0_2_clk_map;
+ smu->feature_map = smu_v14_0_2_feature_mask_map;
+ smu->table_map = smu_v14_0_2_table_map;
+ smu->pwr_src_map = smu_v14_0_2_pwr_src_map;
+ smu->workload_map = smu_v14_0_2_workload_map;
+ smu_v14_0_2_set_smu_mailbox_registers(smu);
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h
new file mode 100644
index 000000000000..b83729e5d6f9
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU_V14_0_2_PPT_H__
+#define __SMU_V14_0_2_PPT_H__
+
+extern void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu);
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index b8dbd4e25348..6d1c3af927ca 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -235,6 +235,50 @@ static void __smu_cmn_send_msg(struct smu_context *smu,
WREG32(smu->msg_reg, msg);
}
+static inline uint32_t __smu_cmn_get_msg_flags(struct smu_context *smu,
+ enum smu_message_type msg)
+{
+ return smu->message_map[msg].flags;
+}
+
+static int __smu_cmn_ras_filter_msg(struct smu_context *smu,
+ enum smu_message_type msg, bool *poll)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t flags, resp;
+ bool fed_status;
+
+ flags = __smu_cmn_get_msg_flags(smu, msg);
+ *poll = true;
+
+ /* When there is RAS fatal error, FW won't process non-RAS priority
+ * messages. Don't allow any messages other than RAS priority messages.
+ */
+ fed_status = amdgpu_ras_get_fed_status(adev);
+ if (fed_status) {
+ if (!(flags & SMU_MSG_RAS_PRI)) {
+ dev_dbg(adev->dev,
+ "RAS error detected, skip sending %s",
+ smu_get_message_name(smu, msg));
+ return -EACCES;
+ }
+
+ /* FW will ignore non-priority messages when a RAS fatal error
+ * is detected. Hence it is possible that a previous message
+ * wouldn't have got response. Allow to continue without polling
+ * for response status for priority messages.
+ */
+ resp = RREG32(smu->resp_reg);
+ dev_dbg(adev->dev,
+ "Sending RAS priority message %s response status: %x",
+ smu_get_message_name(smu, msg), resp);
+ if (resp == 0)
+ *poll = false;
+ }
+
+ return 0;
+}
+
static int __smu_cmn_send_debug_msg(struct smu_context *smu,
u32 msg,
u32 param)
@@ -354,6 +398,7 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
{
struct amdgpu_device *adev = smu->adev;
int res, index;
+ bool poll = true;
u32 reg;
if (adev->no_hw_access)
@@ -366,12 +411,20 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
return index == -EACCES ? 0 : index;
mutex_lock(&smu->message_lock);
- reg = __smu_cmn_poll_stat(smu);
- res = __smu_cmn_reg2errno(smu, reg);
- if (reg == SMU_RESP_NONE ||
- res == -EREMOTEIO) {
- __smu_cmn_reg_print_error(smu, reg, index, param, msg);
- goto Out;
+
+ if (smu->smc_fw_caps & SMU_FW_CAP_RAS_PRI) {
+ res = __smu_cmn_ras_filter_msg(smu, msg, &poll);
+ if (res)
+ goto Out;
+ }
+
+ if (poll) {
+ reg = __smu_cmn_poll_stat(smu);
+ res = __smu_cmn_reg2errno(smu, reg);
+ if (reg == SMU_RESP_NONE || res == -EREMOTEIO) {
+ __smu_cmn_reg_print_error(smu, reg, index, param, msg);
+ goto Out;
+ }
}
__smu_cmn_send_msg(smu, (uint16_t) index, param);
reg = __smu_cmn_poll_stat(smu);
@@ -437,7 +490,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
return -EINVAL;
if (amdgpu_sriov_vf(smu->adev) &&
- !msg_mapping.valid_in_vf)
+ !(msg_mapping.flags & SMU_MSG_VF_FLAG))
return -EACCES;
return msg_mapping.map_to;
diff --git a/drivers/gpu/drm/arm/display/komeda/Makefile b/drivers/gpu/drm/arm/display/komeda/Makefile
index 1931a7fa1a14..cf5287fcbbc2 100644
--- a/drivers/gpu/drm/arm/display/komeda/Makefile
+++ b/drivers/gpu/drm/arm/display/komeda/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y := \
- -I $(srctree)/$(src)/../include \
- -I $(srctree)/$(src)
+ -I $(src)/../include \
+ -I $(src)
komeda-y := \
komeda_drv.o \
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
index 42510fdea27e..67e5d3b4190f 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
@@ -4,6 +4,8 @@
* Author: James.Qian.Wang <james.qian.wang@arm.com>
*
*/
+
+#include <linux/seq_file.h>
#include "d71_dev.h"
#include "komeda_kms.h"
#include "malidp_io.h"
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
index 4b7d94961527..00f5864a0495 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
@@ -5,6 +5,7 @@
*
*/
#include <linux/of.h>
+#include <linux/seq_file.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index 626709bec6f5..2577f0cef8fc 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -72,7 +72,10 @@ static void malidp_mw_connector_reset(struct drm_connector *connector)
__drm_atomic_helper_connector_destroy_state(connector->state);
kfree(connector->state);
- __drm_atomic_helper_connector_reset(connector, &mw_state->base);
+ connector->state = NULL;
+
+ if (mw_state)
+ __drm_atomic_helper_connector_reset(connector, &mw_state->base);
}
static enum drm_connector_status
diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c
index 29f4b52e3c8d..a763349dd89f 100644
--- a/drivers/gpu/drm/armada/armada_debugfs.c
+++ b/drivers/gpu/drm/armada/armada_debugfs.c
@@ -5,6 +5,7 @@
*/
#include <linux/ctype.h>
+#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile
index 5a53ce51fb24..d794c076bc24 100644
--- a/drivers/gpu/drm/ast/Makefile
+++ b/drivers/gpu/drm/ast/Makefile
@@ -3,6 +3,14 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-ast-y := ast_drv.o ast_i2c.o ast_main.o ast_mm.o ast_mode.o ast_post.o ast_dp501.o ast_dp.o
+ast-y := \
+ ast_ddc.o \
+ ast_dp501.o \
+ ast_dp.o \
+ ast_drv.o \
+ ast_main.o \
+ ast_mm.o \
+ ast_mode.o \
+ ast_post.o
obj-$(CONFIG_DRM_AST) := ast.o
diff --git a/drivers/gpu/drm/ast/ast_i2c.c b/drivers/gpu/drm/ast/ast_ddc.c
index e5d3f7121de4..29cf5d157f34 100644
--- a/drivers/gpu/drm/ast/ast_i2c.c
+++ b/drivers/gpu/drm/ast/ast_ddc.c
@@ -21,20 +21,31 @@
* of the Software.
*/
+#include <linux/i2c-algo-bit.h>
+#include <linux/i2c.h>
+
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
+#include "ast_ddc.h"
#include "ast_drv.h"
-static void ast_i2c_setsda(void *i2c_priv, int data)
+struct ast_ddc {
+ struct ast_device *ast;
+
+ struct i2c_algo_bit_data bit;
+ struct i2c_adapter adapter;
+};
+
+static void ast_ddc_algo_bit_data_setsda(void *data, int state)
{
- struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_device *ast = to_ast_device(i2c->dev);
+ struct ast_ddc *ddc = data;
+ struct ast_device *ast = ddc->ast;
int i;
u8 ujcrb7, jtemp;
for (i = 0; i < 0x10000; i++) {
- ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
+ ujcrb7 = ((state & 0x01) ? 0 : 1) << 2;
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0xf1, ujcrb7);
jtemp = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x04);
if (ujcrb7 == jtemp)
@@ -42,15 +53,15 @@ static void ast_i2c_setsda(void *i2c_priv, int data)
}
}
-static void ast_i2c_setscl(void *i2c_priv, int clock)
+static void ast_ddc_algo_bit_data_setscl(void *data, int state)
{
- struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_device *ast = to_ast_device(i2c->dev);
+ struct ast_ddc *ddc = data;
+ struct ast_device *ast = ddc->ast;
int i;
u8 ujcrb7, jtemp;
for (i = 0; i < 0x10000; i++) {
- ujcrb7 = ((clock & 0x01) ? 0 : 1);
+ ujcrb7 = ((state & 0x01) ? 0 : 1);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0xf4, ujcrb7);
jtemp = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x01);
if (ujcrb7 == jtemp)
@@ -58,10 +69,32 @@ static void ast_i2c_setscl(void *i2c_priv, int clock)
}
}
-static int ast_i2c_getsda(void *i2c_priv)
+static int ast_ddc_algo_bit_data_pre_xfer(struct i2c_adapter *adapter)
+{
+ struct ast_ddc *ddc = i2c_get_adapdata(adapter);
+ struct ast_device *ast = ddc->ast;
+
+ /*
+ * Protect access to I/O registers from concurrent modesetting
+ * by acquiring the I/O-register lock.
+ */
+ mutex_lock(&ast->modeset_lock);
+
+ return 0;
+}
+
+static void ast_ddc_algo_bit_data_post_xfer(struct i2c_adapter *adapter)
+{
+ struct ast_ddc *ddc = i2c_get_adapdata(adapter);
+ struct ast_device *ast = ddc->ast;
+
+ mutex_unlock(&ast->modeset_lock);
+}
+
+static int ast_ddc_algo_bit_data_getsda(void *data)
{
- struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_device *ast = to_ast_device(i2c->dev);
+ struct ast_ddc *ddc = data;
+ struct ast_device *ast = ddc->ast;
uint32_t val, val2, count, pass;
count = 0;
@@ -80,10 +113,10 @@ static int ast_i2c_getsda(void *i2c_priv)
return val & 1 ? 1 : 0;
}
-static int ast_i2c_getscl(void *i2c_priv)
+static int ast_ddc_algo_bit_data_getscl(void *data)
{
- struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_device *ast = to_ast_device(i2c->dev);
+ struct ast_ddc *ddc = data;
+ struct ast_device *ast = ddc->ast;
uint32_t val, val2, count, pass;
count = 0;
@@ -102,50 +135,53 @@ static int ast_i2c_getscl(void *i2c_priv)
return val & 1 ? 1 : 0;
}
-static void ast_i2c_release(struct drm_device *dev, void *res)
+static void ast_ddc_release(struct drm_device *dev, void *res)
{
- struct ast_i2c_chan *i2c = res;
+ struct ast_ddc *ddc = res;
- i2c_del_adapter(&i2c->adapter);
- kfree(i2c);
+ i2c_del_adapter(&ddc->adapter);
}
-struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev)
+struct i2c_adapter *ast_ddc_create(struct ast_device *ast)
{
- struct ast_i2c_chan *i2c;
+ struct drm_device *dev = &ast->base;
+ struct ast_ddc *ddc;
+ struct i2c_adapter *adapter;
+ struct i2c_algo_bit_data *bit;
int ret;
- i2c = kzalloc(sizeof(struct ast_i2c_chan), GFP_KERNEL);
- if (!i2c)
- return NULL;
-
- i2c->adapter.owner = THIS_MODULE;
- i2c->adapter.dev.parent = dev->dev;
- i2c->dev = dev;
- i2c_set_adapdata(&i2c->adapter, i2c);
- snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
- "AST i2c bit bus");
- i2c->adapter.algo_data = &i2c->bit;
-
- i2c->bit.udelay = 20;
- i2c->bit.timeout = 2;
- i2c->bit.data = i2c;
- i2c->bit.setsda = ast_i2c_setsda;
- i2c->bit.setscl = ast_i2c_setscl;
- i2c->bit.getsda = ast_i2c_getsda;
- i2c->bit.getscl = ast_i2c_getscl;
- ret = i2c_bit_add_bus(&i2c->adapter);
+ ddc = drmm_kzalloc(dev, sizeof(*ddc), GFP_KERNEL);
+ if (!ddc)
+ return ERR_PTR(-ENOMEM);
+ ddc->ast = ast;
+
+ bit = &ddc->bit;
+ bit->data = ddc;
+ bit->setsda = ast_ddc_algo_bit_data_setsda;
+ bit->setscl = ast_ddc_algo_bit_data_setscl;
+ bit->getsda = ast_ddc_algo_bit_data_getsda;
+ bit->getscl = ast_ddc_algo_bit_data_getscl;
+ bit->pre_xfer = ast_ddc_algo_bit_data_pre_xfer;
+ bit->post_xfer = ast_ddc_algo_bit_data_post_xfer;
+ bit->udelay = 20;
+ bit->timeout = usecs_to_jiffies(2200);
+
+ adapter = &ddc->adapter;
+ adapter->owner = THIS_MODULE;
+ adapter->algo_data = bit;
+ adapter->dev.parent = dev->dev;
+ snprintf(adapter->name, sizeof(adapter->name), "AST DDC bus");
+ i2c_set_adapdata(adapter, ddc);
+
+ ret = i2c_bit_add_bus(adapter);
if (ret) {
drm_err(dev, "Failed to register bit i2c\n");
- goto out_kfree;
+ return ERR_PTR(ret);
}
- ret = drmm_add_action_or_reset(dev, ast_i2c_release, i2c);
+ ret = drmm_add_action_or_reset(dev, ast_ddc_release, ddc);
if (ret)
- return NULL;
- return i2c;
+ return ERR_PTR(ret);
-out_kfree:
- kfree(i2c);
- return NULL;
+ return &ddc->adapter;
}
diff --git a/drivers/gpu/drm/ast/ast_ddc.h b/drivers/gpu/drm/ast/ast_ddc.h
new file mode 100644
index 000000000000..85c93edc9ae1
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_ddc.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef __AST_DDC_H__
+#define __AST_DDC_H__
+
+struct ast_device;
+struct i2c_adapter;
+
+struct i2c_adapter *ast_ddc_create(struct ast_device *ast);
+
+#endif
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index ebb6d8ebd44e..1e9259416980 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -180,6 +180,7 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
{
struct ast_device *ast = to_ast_device(dev);
u8 video_on_off = on;
+ u32 i = 0;
// Video On/Off
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_VIDEO_ENABLE, on);
@@ -192,6 +193,8 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
ASTDP_MIRROR_VIDEO_ENABLE) != video_on_off) {
// wait 1 ms
mdelay(1);
+ if (++i > 200)
+ break;
}
}
}
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 90bcb1eb9cd9..f8c49ba68e78 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -27,6 +27,7 @@
*/
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <drm/drm_aperture.h>
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 3be5ccf1f5f4..ba3d86973995 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -28,8 +28,6 @@
#ifndef __AST_DRV_H__
#define __AST_DRV_H__
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
#include <linux/io.h>
#include <linux/types.h>
@@ -149,37 +147,9 @@ static inline struct ast_plane *to_ast_plane(struct drm_plane *plane)
}
/*
- * Connector with i2c channel
+ * BMC
*/
-struct ast_i2c_chan {
- struct i2c_adapter adapter;
- struct drm_device *dev;
- struct i2c_algo_bit_data bit;
-};
-
-struct ast_vga_connector {
- struct drm_connector base;
- struct ast_i2c_chan *i2c;
-};
-
-static inline struct ast_vga_connector *
-to_ast_vga_connector(struct drm_connector *connector)
-{
- return container_of(connector, struct ast_vga_connector, base);
-}
-
-struct ast_sil164_connector {
- struct drm_connector base;
- struct ast_i2c_chan *i2c;
-};
-
-static inline struct ast_sil164_connector *
-to_ast_sil164_connector(struct drm_connector *connector)
-{
- return container_of(connector, struct ast_sil164_connector, base);
-}
-
struct ast_bmc_connector {
struct drm_connector base;
struct drm_connector *physical_connector;
@@ -222,11 +192,11 @@ struct ast_device {
struct {
struct {
struct drm_encoder encoder;
- struct ast_vga_connector vga_connector;
+ struct drm_connector connector;
} vga;
struct {
struct drm_encoder encoder;
- struct ast_sil164_connector sil164_connector;
+ struct drm_connector connector;
} sil164;
struct {
struct drm_encoder encoder;
@@ -498,9 +468,6 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata);
u8 ast_get_dp501_max_clk(struct drm_device *dev);
void ast_init_3rdtx(struct drm_device *dev);
-/* ast_i2c.c */
-struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
-
/* aspeed DP */
bool ast_astdp_is_connected(struct ast_device *ast);
int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 2f3ad5f949fc..0637abb70361 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -26,6 +26,7 @@
* Authors: Dave Airlie <airlied@redhat.com>
*/
+#include <linux/of.h>
#include <linux/pci.h>
#include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index a718646a66b8..6695af70768f 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -43,9 +43,11 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
+#include <drm/drm_panic.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
+#include "ast_ddc.h"
#include "ast_drv.h"
#include "ast_tables.h"
@@ -700,12 +702,29 @@ static void ast_primary_plane_helper_atomic_disable(struct drm_plane *plane,
ast_set_index_reg_mask(ast, AST_IO_VGASRI, 0x1, 0xdf, 0x20);
}
+static int ast_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct ast_plane *ast_plane = to_ast_plane(plane);
+
+ if (plane->state && plane->state->fb && ast_plane->vaddr) {
+ sb->format = plane->state->fb->format;
+ sb->width = plane->state->fb->width;
+ sb->height = plane->state->fb->height;
+ sb->pitch[0] = plane->state->fb->pitches[0];
+ iosys_map_set_vaddr_iomem(&sb->map[0], ast_plane->vaddr);
+ return 0;
+ }
+ return -ENODEV;
+}
+
static const struct drm_plane_helper_funcs ast_primary_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
.atomic_check = ast_primary_plane_helper_atomic_check,
.atomic_update = ast_primary_plane_helper_atomic_update,
.atomic_enable = ast_primary_plane_helper_atomic_enable,
.atomic_disable = ast_primary_plane_helper_atomic_disable,
+ .get_scanout_buffer = ast_primary_plane_helper_get_scanout_buffer,
};
static const struct drm_plane_funcs ast_primary_plane_funcs = {
@@ -1343,43 +1362,9 @@ static int ast_crtc_init(struct drm_device *dev)
* VGA Connector
*/
-static int ast_vga_connector_helper_get_modes(struct drm_connector *connector)
-{
- struct ast_vga_connector *ast_vga_connector = to_ast_vga_connector(connector);
- struct drm_device *dev = connector->dev;
- struct ast_device *ast = to_ast_device(dev);
- struct edid *edid;
- int count;
-
- if (!ast_vga_connector->i2c)
- goto err_drm_connector_update_edid_property;
-
- /*
- * Protect access to I/O registers from concurrent modesetting
- * by acquiring the I/O-register lock.
- */
- mutex_lock(&ast->modeset_lock);
-
- edid = drm_get_edid(connector, &ast_vga_connector->i2c->adapter);
- if (!edid)
- goto err_mutex_unlock;
-
- mutex_unlock(&ast->modeset_lock);
-
- count = drm_add_edid_modes(connector, edid);
- kfree(edid);
-
- return count;
-
-err_mutex_unlock:
- mutex_unlock(&ast->modeset_lock);
-err_drm_connector_update_edid_property:
- drm_connector_update_edid_property(connector, NULL);
- return 0;
-}
-
static const struct drm_connector_helper_funcs ast_vga_connector_helper_funcs = {
- .get_modes = ast_vga_connector_helper_get_modes,
+ .get_modes = drm_connector_helper_get_modes,
+ .detect_ctx = drm_connector_helper_detect_from_ddc,
};
static const struct drm_connector_funcs ast_vga_connector_funcs = {
@@ -1390,23 +1375,21 @@ static const struct drm_connector_funcs ast_vga_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int ast_vga_connector_init(struct drm_device *dev,
- struct ast_vga_connector *ast_vga_connector)
+static int ast_vga_connector_init(struct drm_device *dev, struct drm_connector *connector)
{
- struct drm_connector *connector = &ast_vga_connector->base;
+ struct ast_device *ast = to_ast_device(dev);
+ struct i2c_adapter *ddc;
int ret;
- ast_vga_connector->i2c = ast_i2c_create(dev);
- if (!ast_vga_connector->i2c)
- drm_err(dev, "failed to add ddc bus for connector\n");
+ ddc = ast_ddc_create(ast);
+ if (IS_ERR(ddc)) {
+ ret = PTR_ERR(ddc);
+ drm_err(dev, "failed to add DDC bus for connector; ret=%d\n", ret);
+ return ret;
+ }
- if (ast_vga_connector->i2c)
- ret = drm_connector_init_with_ddc(dev, connector, &ast_vga_connector_funcs,
- DRM_MODE_CONNECTOR_VGA,
- &ast_vga_connector->i2c->adapter);
- else
- ret = drm_connector_init(dev, connector, &ast_vga_connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
+ ret = drm_connector_init_with_ddc(dev, connector, &ast_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA, ddc);
if (ret)
return ret;
@@ -1415,7 +1398,7 @@ static int ast_vga_connector_init(struct drm_device *dev,
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
return 0;
}
@@ -1425,8 +1408,7 @@ static int ast_vga_output_init(struct ast_device *ast)
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
struct drm_encoder *encoder = &ast->output.vga.encoder;
- struct ast_vga_connector *ast_vga_connector = &ast->output.vga.vga_connector;
- struct drm_connector *connector = &ast_vga_connector->base;
+ struct drm_connector *connector = &ast->output.vga.connector;
int ret;
ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
@@ -1434,7 +1416,7 @@ static int ast_vga_output_init(struct ast_device *ast)
return ret;
encoder->possible_crtcs = drm_crtc_mask(crtc);
- ret = ast_vga_connector_init(dev, ast_vga_connector);
+ ret = ast_vga_connector_init(dev, connector);
if (ret)
return ret;
@@ -1449,43 +1431,9 @@ static int ast_vga_output_init(struct ast_device *ast)
* SIL164 Connector
*/
-static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector)
-{
- struct ast_sil164_connector *ast_sil164_connector = to_ast_sil164_connector(connector);
- struct drm_device *dev = connector->dev;
- struct ast_device *ast = to_ast_device(dev);
- struct edid *edid;
- int count;
-
- if (!ast_sil164_connector->i2c)
- goto err_drm_connector_update_edid_property;
-
- /*
- * Protect access to I/O registers from concurrent modesetting
- * by acquiring the I/O-register lock.
- */
- mutex_lock(&ast->modeset_lock);
-
- edid = drm_get_edid(connector, &ast_sil164_connector->i2c->adapter);
- if (!edid)
- goto err_mutex_unlock;
-
- mutex_unlock(&ast->modeset_lock);
-
- count = drm_add_edid_modes(connector, edid);
- kfree(edid);
-
- return count;
-
-err_mutex_unlock:
- mutex_unlock(&ast->modeset_lock);
-err_drm_connector_update_edid_property:
- drm_connector_update_edid_property(connector, NULL);
- return 0;
-}
-
static const struct drm_connector_helper_funcs ast_sil164_connector_helper_funcs = {
- .get_modes = ast_sil164_connector_helper_get_modes,
+ .get_modes = drm_connector_helper_get_modes,
+ .detect_ctx = drm_connector_helper_detect_from_ddc,
};
static const struct drm_connector_funcs ast_sil164_connector_funcs = {
@@ -1496,23 +1444,21 @@ static const struct drm_connector_funcs ast_sil164_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int ast_sil164_connector_init(struct drm_device *dev,
- struct ast_sil164_connector *ast_sil164_connector)
+static int ast_sil164_connector_init(struct drm_device *dev, struct drm_connector *connector)
{
- struct drm_connector *connector = &ast_sil164_connector->base;
+ struct ast_device *ast = to_ast_device(dev);
+ struct i2c_adapter *ddc;
int ret;
- ast_sil164_connector->i2c = ast_i2c_create(dev);
- if (!ast_sil164_connector->i2c)
- drm_err(dev, "failed to add ddc bus for connector\n");
+ ddc = ast_ddc_create(ast);
+ if (IS_ERR(ddc)) {
+ ret = PTR_ERR(ddc);
+ drm_err(dev, "failed to add DDC bus for connector; ret=%d\n", ret);
+ return ret;
+ }
- if (ast_sil164_connector->i2c)
- ret = drm_connector_init_with_ddc(dev, connector, &ast_sil164_connector_funcs,
- DRM_MODE_CONNECTOR_DVII,
- &ast_sil164_connector->i2c->adapter);
- else
- ret = drm_connector_init(dev, connector, &ast_sil164_connector_funcs,
- DRM_MODE_CONNECTOR_DVII);
+ ret = drm_connector_init_with_ddc(dev, connector, &ast_sil164_connector_funcs,
+ DRM_MODE_CONNECTOR_DVII, ddc);
if (ret)
return ret;
@@ -1521,7 +1467,7 @@ static int ast_sil164_connector_init(struct drm_device *dev,
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
return 0;
}
@@ -1531,8 +1477,7 @@ static int ast_sil164_output_init(struct ast_device *ast)
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
struct drm_encoder *encoder = &ast->output.sil164.encoder;
- struct ast_sil164_connector *ast_sil164_connector = &ast->output.sil164.sil164_connector;
- struct drm_connector *connector = &ast_sil164_connector->base;
+ struct drm_connector *connector = &ast->output.sil164.connector;
int ret;
ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
@@ -1540,7 +1485,7 @@ static int ast_sil164_output_init(struct ast_device *ast)
return ret;
encoder->possible_crtcs = drm_crtc_mask(crtc);
- ret = ast_sil164_connector_init(dev, ast_sil164_connector);
+ ret = ast_sil164_connector_init(dev, connector);
if (ret)
return ret;
@@ -1952,13 +1897,13 @@ int ast_mode_config_init(struct ast_device *ast)
ret = ast_vga_output_init(ast);
if (ret)
return ret;
- physical_connector = &ast->output.vga.vga_connector.base;
+ physical_connector = &ast->output.vga.connector;
}
if (ast->tx_chip_types & AST_TX_SIL164_BIT) {
ret = ast_sil164_output_init(ast);
if (ret)
return ret;
- physical_connector = &ast->output.sil164.sil164_connector.base;
+ physical_connector = &ast->output.sil164.connector;
}
if (ast->tx_chip_types & AST_TX_DP501_BIT) {
ret = ast_dp501_output_init(ast);
@@ -1978,7 +1923,9 @@ int ast_mode_config_init(struct ast_device *ast)
drm_mode_config_reset(dev);
- drm_kms_helper_poll_init(dev);
+ ret = drmm_kms_helper_poll_init(dev);
+ if (ret)
+ return ret;
return 0;
}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index efd996f6c138..c621be1a99a8 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -96,9 +96,8 @@ config DRM_ITE_IT6505
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDCP_HELPER
select DRM_DISPLAY_HELPER
- select DRM_DP_AUX_BUS
+ select DRM_DISPLAY_DP_AUX_BUS
select DRM_KMS_HELPER
- select DRM_DP_HELPER
select EXTCON
select CRYPTO
select CRYPTO_HASH
@@ -190,6 +189,13 @@ config DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW
to DP++. This is used with the i.MX6 imx-ldb
driver. You are likely to say N here.
+config DRM_MICROCHIP_LVDS_SERIALIZER
+ tristate "Microchip LVDS serializer support"
+ depends on OF
+ depends on DRM_ATMEL_HLCDC
+ help
+ Support for Microchip's LVDS serializer.
+
config DRM_NWL_MIPI_DSI
tristate "Northwest Logic MIPI DSI Host controller"
depends on DRM
@@ -229,7 +235,7 @@ config DRM_PARADE_PS8640
depends on OF
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
- select DRM_DP_AUX_BUS
+ select DRM_DISPLAY_DP_AUX_BUS
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
@@ -389,7 +395,7 @@ config DRM_TI_SN65DSI86
select DRM_PANEL
select DRM_MIPI_DSI
select AUXILIARY_BUS
- select DRM_DP_AUX_BUS
+ select DRM_DISPLAY_DP_AUX_BUS
help
Texas Instruments SN65DSI86 DSI to eDP Bridge driver
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 017b5832733b..7df87b582dca 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_DRM_LONTIUM_LT9611) += lontium-lt9611.o
obj-$(CONFIG_DRM_LONTIUM_LT9611UXC) += lontium-lt9611uxc.o
obj-$(CONFIG_DRM_LVDS_CODEC) += lvds-codec.o
obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o
+obj-$(CONFIG_DRM_MICROCHIP_LVDS_SERIALIZER) += microchip-lvds.o
obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
obj-$(CONFIG_DRM_PARADE_PS8640) += parade-ps8640.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index 39c9ece373b0..ea271f62b214 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -356,6 +356,7 @@ struct adv7511 {
enum drm_connector_status status;
bool powered;
+ struct drm_bridge *next_bridge;
struct drm_display_mode curr_mode;
unsigned int f_tmds;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index b5518ff97165..dd21b81bd28f 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -17,6 +17,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -477,6 +478,11 @@ static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
if (ret < 0)
return ret;
+ /* If there is no IRQ to handle, exit indicating no IRQ data */
+ if (!(irq0 & (ADV7511_INT0_HPD | ADV7511_INT0_EDID_READY)) &&
+ !(irq1 & ADV7511_INT1_DDC_ERROR))
+ return -ENODATA;
+
regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
@@ -946,6 +952,12 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge,
struct adv7511 *adv = bridge_to_adv7511(bridge);
int ret = 0;
+ if (adv->next_bridge) {
+ ret = drm_bridge_attach(bridge->encoder, adv->next_bridge, bridge, flags);
+ if (ret)
+ return ret;
+ }
+
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
ret = adv7511_connector_init(adv);
if (ret < 0)
@@ -1216,6 +1228,11 @@ static int adv7511_probe(struct i2c_client *i2c)
memset(&link_config, 0, sizeof(link_config));
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 1, -1, NULL,
+ &adv7511->next_bridge);
+ if (ret && ret != -ENODEV)
+ return ret;
+
if (adv7511->info->link_config)
ret = adv7511_parse_dt(dev->of_node, &link_config);
else
@@ -1318,7 +1335,8 @@ static int adv7511_probe(struct i2c_client *i2c)
ret = devm_request_threaded_irq(dev, i2c->irq, NULL,
adv7511_irq_handler,
- IRQF_ONESHOT, dev_name(dev),
+ IRQF_ONESHOT | IRQF_SHARED,
+ dev_name(dev),
adv7511);
if (ret)
goto err_unregister_audio;
diff --git a/drivers/gpu/drm/bridge/analogix/Kconfig b/drivers/gpu/drm/bridge/analogix/Kconfig
index 173dada218ec..4846b2e9be7c 100644
--- a/drivers/gpu/drm/bridge/analogix/Kconfig
+++ b/drivers/gpu/drm/bridge/analogix/Kconfig
@@ -37,7 +37,7 @@ config DRM_ANALOGIX_ANX7625
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDCP_HELPER
select DRM_DISPLAY_HELPER
- select DRM_DP_AUX_BUS
+ select DRM_DISPLAY_DP_AUX_BUS
select DRM_MIPI_DSI
help
ANX7625 is an ultra-low power 4K mobile HD transmitter
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 9d96d28d6fe8..59e9ad349969 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -2066,10 +2066,8 @@ static int anx7625_setup_dsi_device(struct anx7625_data *ctx)
};
host = of_find_mipi_dsi_host_by_node(ctx->pdata.mipi_host_node);
- if (!host) {
- DRM_DEV_ERROR(dev, "fail to find dsi host.\n");
- return -EPROBE_DEFER;
- }
+ if (!host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "fail to find dsi host.\n");
dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
if (IS_ERR(dsi)) {
@@ -2471,15 +2469,22 @@ static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge,
mutex_unlock(&ctx->aux_lock);
}
+static void
+anx7625_audio_update_connector_status(struct anx7625_data *ctx,
+ enum drm_connector_status status);
+
static enum drm_connector_status
anx7625_bridge_detect(struct drm_bridge *bridge)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
struct device *dev = ctx->dev;
+ enum drm_connector_status status;
DRM_DEV_DEBUG_DRIVER(dev, "drm bridge detect\n");
- return anx7625_sink_detect(ctx);
+ status = anx7625_sink_detect(ctx);
+ anx7625_audio_update_connector_status(ctx, status);
+ return status;
}
static const struct drm_edid *anx7625_bridge_edid_read(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index e226acc5c15e..8a91ef0ae065 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -2059,6 +2059,9 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
mhdp_state = to_cdns_mhdp_bridge_state(new_state);
mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
+ if (!mhdp_state->current_mode)
+ return;
+
drm_mode_set_name(mhdp_state->current_mode);
dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, mode->name);
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index 82d23e4df09e..9eecac457dcf 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -563,10 +563,8 @@ static int chipone_dsi_host_attach(struct chipone *icn)
host = of_find_mipi_dsi_host_by_node(host_node);
of_node_put(host_node);
- if (!host) {
- dev_err(dev, "failed to find dsi host\n");
- return -EPROBE_DEFER;
- }
+ if (!host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
dsi = mipi_dsi_device_register_full(host, &info);
if (IS_ERR(dsi)) {
@@ -783,7 +781,6 @@ static struct mipi_dsi_driver chipone_dsi_driver = {
.remove = chipone_dsi_remove,
.driver = {
.name = "chipone-icn6211",
- .owner = THIS_MODULE,
.of_match_table = chipone_of_match,
},
};
diff --git a/drivers/gpu/drm/bridge/imx/Kconfig b/drivers/gpu/drm/bridge/imx/Kconfig
index 5965e8027529..8dd89efa8ea7 100644
--- a/drivers/gpu/drm/bridge/imx/Kconfig
+++ b/drivers/gpu/drm/bridge/imx/Kconfig
@@ -8,8 +8,8 @@ config DRM_IMX8MP_DW_HDMI_BRIDGE
depends on OF
depends on COMMON_CLK
select DRM_DW_HDMI
- select DRM_IMX8MP_HDMI_PVI
- select PHY_FSL_SAMSUNG_HDMI_PHY
+ imply DRM_IMX8MP_HDMI_PVI
+ imply PHY_FSL_SAMSUNG_HDMI_PHY
help
Choose this to enable support for the internal HDMI encoder found
on the i.MX8MP SoC.
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
index f2a09c879e3d..073e64dc200c 100644
--- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
@@ -173,15 +173,13 @@ static int imx8mp_hdmi_pvi_probe(struct platform_device *pdev)
return 0;
}
-static int imx8mp_hdmi_pvi_remove(struct platform_device *pdev)
+static void imx8mp_hdmi_pvi_remove(struct platform_device *pdev)
{
struct imx8mp_hdmi_pvi *pvi = platform_get_drvdata(pdev);
drm_bridge_remove(&pvi->bridge);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct of_device_id imx8mp_hdmi_pvi_match[] = {
@@ -195,7 +193,7 @@ MODULE_DEVICE_TABLE(of, imx8mp_hdmi_pvi_match);
static struct platform_driver imx8mp_hdmi_pvi_driver = {
.probe = imx8mp_hdmi_pvi_probe,
- .remove = imx8mp_hdmi_pvi_remove,
+ .remove_new = imx8mp_hdmi_pvi_remove,
.driver = {
.name = "imx-hdmi-pvi",
.of_match_table = imx8mp_hdmi_pvi_match,
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c
index 89fc432ac611..13bc570c5473 100644
--- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c
@@ -104,13 +104,11 @@ static int imx8mp_dw_hdmi_probe(struct platform_device *pdev)
return 0;
}
-static int imx8mp_dw_hdmi_remove(struct platform_device *pdev)
+static void imx8mp_dw_hdmi_remove(struct platform_device *pdev)
{
struct imx8mp_hdmi *hdmi = platform_get_drvdata(pdev);
dw_hdmi_remove(hdmi->dw_hdmi);
-
- return 0;
}
static int __maybe_unused imx8mp_dw_hdmi_pm_suspend(struct device *dev)
@@ -140,7 +138,7 @@ MODULE_DEVICE_TABLE(of, imx8mp_dw_hdmi_of_table);
static struct platform_driver imx8mp_dw_hdmi_platform_driver = {
.probe = imx8mp_dw_hdmi_probe,
- .remove = imx8mp_dw_hdmi_remove,
+ .remove_new = imx8mp_dw_hdmi_remove,
.driver = {
.name = "imx8mp-dw-hdmi-tx",
.of_match_table = imx8mp_dw_hdmi_of_table,
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 27334173e911..3f68c82888c2 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -3,6 +3,7 @@
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
#include <linux/bits.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 1c3433b5e366..925e42f46cd8 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -1540,12 +1540,6 @@ static int it66121_probe(struct i2c_client *client)
return -EINVAL;
}
- if (!of_device_is_available(ep)) {
- of_node_put(ep);
- dev_err(ctx->dev, "The remote device is disabled\n");
- return -ENODEV;
- }
-
ctx->next_bridge = of_drm_find_bridge(ep);
of_node_put(ep);
if (!ctx->next_bridge) {
@@ -1586,13 +1580,18 @@ static int it66121_probe(struct i2c_client *client)
ctx->bridge.funcs = &it66121_bridge_funcs;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
- ctx->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD;
-
- ret = devm_request_threaded_irq(dev, client->irq, NULL, it66121_irq_threaded_handler,
- IRQF_ONESHOT, dev_name(dev), ctx);
- if (ret < 0) {
- dev_err(dev, "Failed to request irq %d:%d\n", client->irq, ret);
- return ret;
+ ctx->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
+ if (client->irq > 0) {
+ ctx->bridge.ops |= DRM_BRIDGE_OP_HPD;
+
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ it66121_irq_threaded_handler,
+ IRQF_ONESHOT, dev_name(dev),
+ ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request irq %d:%d\n", client->irq, ret);
+ return ret;
+ }
}
it66121_audio_codec_init(ctx, dev);
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 4b2ae27f0a57..1a9defa15663 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -494,10 +494,8 @@ static int lt8912_attach_dsi(struct lt8912 *lt)
};
host = of_find_mipi_dsi_host_by_node(lt->host_node);
- if (!host) {
- dev_err(dev, "failed to find dsi host\n");
- return -EPROBE_DEFER;
- }
+ if (!host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
if (IS_ERR(dsi)) {
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index a9c7e2b07ea1..b99fe87ec738 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -761,10 +761,8 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
int ret;
host = of_find_mipi_dsi_host_by_node(dsi_node);
- if (!host) {
- dev_err(lt9611->dev, "failed to find dsi host\n");
- return ERR_PTR(-EPROBE_DEFER);
- }
+ if (!host)
+ return ERR_PTR(dev_err_probe(lt9611->dev, -EPROBE_DEFER, "failed to find dsi host\n"));
dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
if (IS_ERR(dsi)) {
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index f4f593ad8f79..ab702471f3ab 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -266,10 +266,8 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc,
int ret;
host = of_find_mipi_dsi_host_by_node(dsi_node);
- if (!host) {
- dev_err(dev, "failed to find dsi host\n");
- return ERR_PTR(-EPROBE_DEFER);
- }
+ if (!host)
+ return ERR_PTR(dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n"));
dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
if (IS_ERR(dsi)) {
diff --git a/drivers/gpu/drm/bridge/microchip-lvds.c b/drivers/gpu/drm/bridge/microchip-lvds.c
new file mode 100644
index 000000000000..b8313dad6072
--- /dev/null
+++ b/drivers/gpu/drm/bridge/microchip-lvds.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Manikandan Muralidharan <manikandan.m@microchip.com>
+ * Author: Dharma Balasubiramani <dharma.b@microchip.com>
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_graph.h>
+#include <linux/pinctrl/devinfo.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#define LVDS_POLL_TIMEOUT_MS 1000
+
+/* LVDSC register offsets */
+#define LVDSC_CR 0x00
+#define LVDSC_CFGR 0x04
+#define LVDSC_SR 0x0C
+#define LVDSC_WPMR 0xE4
+
+/* Bitfields in LVDSC_CR (Control Register) */
+#define LVDSC_CR_SER_EN BIT(0)
+
+/* Bitfields in LVDSC_CFGR (Configuration Register) */
+#define LVDSC_CFGR_PIXSIZE_24BITS 0
+#define LVDSC_CFGR_DEN_POL_HIGH 0
+#define LVDSC_CFGR_DC_UNBALANCED 0
+#define LVDSC_CFGR_MAPPING_JEIDA BIT(6)
+
+/*Bitfields in LVDSC_SR */
+#define LVDSC_SR_CS BIT(0)
+
+/* Bitfields in LVDSC_WPMR (Write Protection Mode Register) */
+#define LVDSC_WPMR_WPKEY_MASK GENMASK(31, 8)
+#define LVDSC_WPMR_WPKEY_PSSWD 0x4C5644
+
+struct mchp_lvds {
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *pclk;
+ struct drm_panel *panel;
+ struct drm_bridge bridge;
+ struct drm_bridge *panel_bridge;
+};
+
+static inline struct mchp_lvds *bridge_to_lvds(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct mchp_lvds, bridge);
+}
+
+static inline u32 lvds_readl(struct mchp_lvds *lvds, u32 offset)
+{
+ return readl_relaxed(lvds->regs + offset);
+}
+
+static inline void lvds_writel(struct mchp_lvds *lvds, u32 offset, u32 val)
+{
+ writel_relaxed(val, lvds->regs + offset);
+}
+
+static void lvds_serialiser_on(struct mchp_lvds *lvds)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(LVDS_POLL_TIMEOUT_MS);
+
+ /* The LVDSC registers can only be written if WPEN is cleared */
+ lvds_writel(lvds, LVDSC_WPMR, (LVDSC_WPMR_WPKEY_PSSWD &
+ LVDSC_WPMR_WPKEY_MASK));
+
+ /* Wait for the status of configuration registers to be changed */
+ while (lvds_readl(lvds, LVDSC_SR) & LVDSC_SR_CS) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(lvds->dev, "%s: timeout error\n", __func__);
+ return;
+ }
+ usleep_range(1000, 2000);
+ }
+
+ /* Configure the LVDSC */
+ lvds_writel(lvds, LVDSC_CFGR, (LVDSC_CFGR_MAPPING_JEIDA |
+ LVDSC_CFGR_DC_UNBALANCED |
+ LVDSC_CFGR_DEN_POL_HIGH |
+ LVDSC_CFGR_PIXSIZE_24BITS));
+
+ /* Enable the LVDS serializer */
+ lvds_writel(lvds, LVDSC_CR, LVDSC_CR_SER_EN);
+}
+
+static int mchp_lvds_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct mchp_lvds *lvds = bridge_to_lvds(bridge);
+
+ return drm_bridge_attach(bridge->encoder, lvds->panel_bridge,
+ bridge, flags);
+}
+
+static void mchp_lvds_enable(struct drm_bridge *bridge)
+{
+ struct mchp_lvds *lvds = bridge_to_lvds(bridge);
+ int ret;
+
+ ret = clk_prepare_enable(lvds->pclk);
+ if (ret < 0) {
+ dev_err(lvds->dev, "failed to enable lvds pclk %d\n", ret);
+ return;
+ }
+
+ ret = pm_runtime_get_sync(lvds->dev);
+ if (ret < 0) {
+ dev_err(lvds->dev, "failed to get pm runtime: %d\n", ret);
+ return;
+ }
+
+ lvds_serialiser_on(lvds);
+}
+
+static void mchp_lvds_disable(struct drm_bridge *bridge)
+{
+ struct mchp_lvds *lvds = bridge_to_lvds(bridge);
+
+ pm_runtime_put(lvds->dev);
+ clk_disable_unprepare(lvds->pclk);
+}
+
+static const struct drm_bridge_funcs mchp_lvds_bridge_funcs = {
+ .attach = mchp_lvds_attach,
+ .enable = mchp_lvds_enable,
+ .disable = mchp_lvds_disable,
+};
+
+static int mchp_lvds_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mchp_lvds *lvds;
+ struct device_node *port;
+ int ret;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
+ if (!lvds)
+ return -ENOMEM;
+
+ lvds->dev = dev;
+
+ lvds->regs = devm_ioremap_resource(lvds->dev,
+ platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ if (IS_ERR(lvds->regs))
+ return PTR_ERR(lvds->regs);
+
+ lvds->pclk = devm_clk_get(lvds->dev, "pclk");
+ if (IS_ERR(lvds->pclk))
+ return dev_err_probe(lvds->dev, PTR_ERR(lvds->pclk),
+ "could not get pclk_lvds\n");
+
+ port = of_graph_get_remote_node(dev->of_node, 1, 0);
+ if (!port) {
+ dev_err(dev,
+ "can't find port point, please init lvds panel port!\n");
+ return -ENODEV;
+ }
+
+ lvds->panel = of_drm_find_panel(port);
+ of_node_put(port);
+
+ if (IS_ERR(lvds->panel))
+ return -EPROBE_DEFER;
+
+ lvds->panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
+
+ if (IS_ERR(lvds->panel_bridge))
+ return PTR_ERR(lvds->panel_bridge);
+
+ lvds->bridge.of_node = dev->of_node;
+ lvds->bridge.type = DRM_MODE_CONNECTOR_LVDS;
+ lvds->bridge.funcs = &mchp_lvds_bridge_funcs;
+
+ dev_set_drvdata(dev, lvds);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret < 0) {
+ dev_err(lvds->dev, "failed to enable pm runtime: %d\n", ret);
+ return ret;
+ }
+
+ drm_bridge_add(&lvds->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id mchp_lvds_dt_ids[] = {
+ {
+ .compatible = "microchip,sam9x75-lvds",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mchp_lvds_dt_ids);
+
+static struct platform_driver mchp_lvds_driver = {
+ .probe = mchp_lvds_probe,
+ .driver = {
+ .name = "microchip-lvds",
+ .of_match_table = mchp_lvds_dt_ids,
+ },
+};
+module_platform_driver(mchp_lvds_driver);
+
+MODULE_AUTHOR("Manikandan Muralidharan <manikandan.m@microchip.com>");
+MODULE_AUTHOR("Dharma Balasubiramani <dharma.b@microchip.com>");
+MODULE_DESCRIPTION("Low Voltage Differential Signaling Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 7f41525f7a6e..32506524d9a2 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -4,6 +4,8 @@
* Copyright (C) 2017 Broadcom
*/
+#include <linux/debugfs.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 8f84e98249c7..2fbeda9025bf 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -1092,7 +1092,7 @@ static int sii902x_init(struct sii902x *sii902x)
}
sii902x->i2cmux->priv = sii902x;
- ret = i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
+ ret = i2c_mux_add_adapter(sii902x->i2cmux, 0, 0);
if (ret)
goto err_unreg_audio;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index cceb5aab6c83..9f2bc932c371 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -3291,40 +3291,17 @@ static void dw_hdmi_init_hw(struct dw_hdmi *hdmi)
static int dw_hdmi_parse_dt(struct dw_hdmi *hdmi)
{
- struct device_node *endpoint;
struct device_node *remote;
if (!hdmi->plat_data->output_port)
return 0;
- endpoint = of_graph_get_endpoint_by_regs(hdmi->dev->of_node,
- hdmi->plat_data->output_port,
- -1);
- if (!endpoint) {
- /*
- * On platforms whose bindings don't make the output port
- * mandatory (such as Rockchip) the plat_data->output_port
- * field isn't set, so it's safe to make this a fatal error.
- */
- dev_err(hdmi->dev, "Missing endpoint in port@%u\n",
- hdmi->plat_data->output_port);
- return -ENODEV;
- }
- remote = of_graph_get_remote_port_parent(endpoint);
- of_node_put(endpoint);
- if (!remote) {
- dev_err(hdmi->dev, "Endpoint in port@%u unconnected\n",
- hdmi->plat_data->output_port);
+ remote = of_graph_get_remote_node(hdmi->dev->of_node,
+ hdmi->plat_data->output_port,
+ -1);
+ if (!remote)
return -ENODEV;
- }
-
- if (!of_device_is_available(remote)) {
- dev_err(hdmi->dev, "port@%u remote device is disabled\n",
- hdmi->plat_data->output_port);
- of_node_put(remote);
- return -ENODEV;
- }
hdmi->next_bridge = of_drm_find_bridge(remote);
of_node_put(remote);
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index deccb3995022..3d3d135b4348 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -401,7 +401,6 @@ static struct mipi_dsi_driver tc358764_driver = {
.remove = tc358764_remove,
.driver = {
.name = "tc358764",
- .owner = THIS_MODULE,
.of_match_table = tc358764_of_match,
},
};
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
index 90a89d70d832..3b7cc3be2ccd 100644
--- a/drivers/gpu/drm/bridge/tc358775.c
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -107,6 +108,7 @@
#define RDPKTLN 0x0404 /* Command Read Packet Length */
#define VPCTRL 0x0450 /* Video Path Control */
+#define EVTMODE BIT(5) /* Video event mode enable, tc35876x only */
#define HTIM1 0x0454 /* Horizontal Timing Control 1 */
#define HTIM2 0x0458 /* Horizontal Timing Control 2 */
#define VTIM1 0x045C /* Vertical Timing Control 1 */
@@ -254,6 +256,11 @@ enum tc358775_ports {
TC358775_LVDS_OUT1,
};
+enum tc3587x5_type {
+ TC358765 = 0x65,
+ TC358775 = 0x75,
+};
+
struct tc_data {
struct i2c_client *i2c;
struct device *dev;
@@ -271,6 +278,8 @@ struct tc_data {
struct gpio_desc *stby_gpio;
u8 lvds_link; /* single-link or dual-link */
u8 bpc;
+
+ enum tc3587x5_type type;
};
static inline struct tc_data *bridge_to_tc(struct drm_bridge *b)
@@ -424,10 +433,16 @@ static void tc_bridge_enable(struct drm_bridge *bridge)
d2l_write(tc->i2c, PPI_STARTPPI, PPI_START_FUNCTION);
d2l_write(tc->i2c, DSI_STARTDSI, DSI_RX_START);
+ /* Video event mode vs pulse mode bit, does not exist for tc358775 */
+ if (tc->type == TC358765)
+ val = EVTMODE;
+ else
+ val = 0;
+
if (tc->bpc == 8)
- val = TC358775_VPCTRL_OPXLFMT(1);
+ val |= TC358775_VPCTRL_OPXLFMT(1);
else /* bpc = 6; */
- val = TC358775_VPCTRL_MSF(1);
+ val |= TC358775_VPCTRL_MSF(1);
dsiclk = mode->crtc_clock * 3 * tc->bpc / tc->num_dsi_lanes / 1000;
clkdiv = dsiclk / (tc->lvds_link == DUAL_LINK ? DIVIDE_BY_6 : DIVIDE_BY_3);
@@ -454,10 +469,6 @@ static void tc_bridge_enable(struct drm_bridge *bridge)
dev_dbg(tc->dev, "bus_formats %04x bpc %d\n",
connector->display_info.bus_formats[0],
tc->bpc);
- /*
- * Default hardware register settings of tc358775 configured
- * with MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA jeida-24 format
- */
if (connector->display_info.bus_formats[0] ==
MEDIA_BUS_FMT_RGB888_1X7X4_SPWG) {
/* VESA-24 */
@@ -468,14 +479,15 @@ static void tc_bridge_enable(struct drm_bridge *bridge)
d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_B6, LVI_B7, LVI_B1, LVI_B2));
d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B3, LVI_B4, LVI_B5, LVI_L0));
d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_R6));
- } else { /* MEDIA_BUS_FMT_RGB666_1X7X3_SPWG - JEIDA-18 */
- d2l_write(tc->i2c, LV_MX0003, LV_MX(LVI_R0, LVI_R1, LVI_R2, LVI_R3));
- d2l_write(tc->i2c, LV_MX0407, LV_MX(LVI_R4, LVI_L0, LVI_R5, LVI_G0));
- d2l_write(tc->i2c, LV_MX0811, LV_MX(LVI_G1, LVI_G2, LVI_L0, LVI_L0));
- d2l_write(tc->i2c, LV_MX1215, LV_MX(LVI_G3, LVI_G4, LVI_G5, LVI_B0));
- d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_L0, LVI_L0, LVI_B1, LVI_B2));
- d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B3, LVI_B4, LVI_B5, LVI_L0));
- d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_L0));
+ } else {
+ /* JEIDA-18 and JEIDA-24 */
+ d2l_write(tc->i2c, LV_MX0003, LV_MX(LVI_R2, LVI_R3, LVI_R4, LVI_R5));
+ d2l_write(tc->i2c, LV_MX0407, LV_MX(LVI_R6, LVI_R1, LVI_R7, LVI_G2));
+ d2l_write(tc->i2c, LV_MX0811, LV_MX(LVI_G3, LVI_G4, LVI_G0, LVI_G1));
+ d2l_write(tc->i2c, LV_MX1215, LV_MX(LVI_G5, LVI_G6, LVI_G7, LVI_B2));
+ d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_B0, LVI_B1, LVI_B3, LVI_B4));
+ d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B5, LVI_B6, LVI_B7, LVI_L0));
+ d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_R0));
}
d2l_write(tc->i2c, VFUEN, VFUEN_EN);
@@ -528,27 +540,24 @@ tc_mode_valid(struct drm_bridge *bridge,
static int tc358775_parse_dt(struct device_node *np, struct tc_data *tc)
{
struct device_node *endpoint;
- struct device_node *parent;
struct device_node *remote;
int dsi_lanes = -1;
- /*
- * To get the data-lanes of dsi, we need to access the dsi0_out of port1
- * of dsi0 endpoint from bridge port0 of d2l_in
- */
endpoint = of_graph_get_endpoint_by_regs(tc->dev->of_node,
TC358775_DSI_IN, -1);
- if (endpoint) {
- /* dsi0_out node */
- parent = of_graph_get_remote_port_parent(endpoint);
- of_node_put(endpoint);
- if (parent) {
- /* dsi0 port 1 */
- dsi_lanes = drm_of_get_data_lanes_count_ep(parent, 1, -1, 1, 4);
- of_node_put(parent);
- }
+ dsi_lanes = drm_of_get_data_lanes_count(endpoint, 1, 4);
+
+ /* Quirk old dtb: Use data lanes from the DSI host side instead of bridge */
+ if (dsi_lanes == -EINVAL || dsi_lanes == -ENODEV) {
+ remote = of_graph_get_remote_endpoint(endpoint);
+ dsi_lanes = drm_of_get_data_lanes_count(remote, 1, 4);
+ of_node_put(remote);
+ if (dsi_lanes >= 1)
+ dev_warn(tc->dev, "no dsi-lanes for the bridge, using host lanes\n");
}
+ of_node_put(endpoint);
+
if (dsi_lanes < 0)
return dsi_lanes;
@@ -610,10 +619,8 @@ static int tc_attach_host(struct tc_data *tc)
};
host = of_find_mipi_dsi_host_by_node(tc->host_node);
- if (!host) {
- dev_err(dev, "failed to find dsi host\n");
- return -EPROBE_DEFER;
- }
+ if (!host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
if (IS_ERR(dsi)) {
@@ -625,7 +632,21 @@ static int tc_attach_host(struct tc_data *tc)
dsi->lanes = tc->num_dsi_lanes;
dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->mode_flags = MIPI_DSI_MODE_VIDEO;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM;
+
+ /*
+ * The hs_rate and lp_rate are data rate values. The HS mode is
+ * differential, while the LP mode is single ended. As the HS mode
+ * uses DDR, the DSI clock frequency is half the hs_rate. The 10 Mbs
+ * data rate for LP mode is not specified in the bridge data sheet,
+ * but seems to be part of the MIPI DSI spec.
+ */
+ if (tc->type == TC358765)
+ dsi->hs_rate = 800000000;
+ else
+ dsi->hs_rate = 1000000000;
+ dsi->lp_rate = 10000000;
ret = devm_mipi_dsi_attach(dev, dsi);
if (ret < 0) {
@@ -648,6 +669,7 @@ static int tc_probe(struct i2c_client *client)
tc->dev = dev;
tc->i2c = client;
+ tc->type = (enum tc3587x5_type)(unsigned long)of_device_get_match_data(dev);
tc->panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node,
TC358775_LVDS_OUT0, 0);
@@ -672,12 +694,9 @@ static int tc_probe(struct i2c_client *client)
return ret;
}
- tc->stby_gpio = devm_gpiod_get(dev, "stby", GPIOD_OUT_HIGH);
- if (IS_ERR(tc->stby_gpio)) {
- ret = PTR_ERR(tc->stby_gpio);
- dev_err(dev, "cannot get stby-gpio %d\n", ret);
- return ret;
- }
+ tc->stby_gpio = devm_gpiod_get_optional(dev, "stby", GPIOD_OUT_HIGH);
+ if (IS_ERR(tc->stby_gpio))
+ return PTR_ERR(tc->stby_gpio);
tc->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(tc->reset_gpio)) {
@@ -688,6 +707,7 @@ static int tc_probe(struct i2c_client *client)
tc->bridge.funcs = &tc_bridge_funcs;
tc->bridge.of_node = dev->of_node;
+ tc->bridge.pre_enable_prev_first = true;
drm_bridge_add(&tc->bridge);
i2c_set_clientdata(client, tc);
@@ -711,13 +731,15 @@ static void tc_remove(struct i2c_client *client)
}
static const struct i2c_device_id tc358775_i2c_ids[] = {
- { "tc358775", 0 },
+ { "tc358765", TC358765, },
+ { "tc358775", TC358775, },
{ }
};
MODULE_DEVICE_TABLE(i2c, tc358775_i2c_ids);
static const struct of_device_id tc358775_of_ids[] = {
- { .compatible = "toshiba,tc358775", },
+ { .compatible = "toshiba,tc358765", .data = (void *)TC358765, },
+ { .compatible = "toshiba,tc358775", .data = (void *)TC358775, },
{ }
};
MODULE_DEVICE_TABLE(of, tc358775_of_ids);
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
index d4c1a601bbb5..674efc489e3a 100644
--- a/drivers/gpu/drm/bridge/thc63lvd1024.c
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -123,26 +123,11 @@ static int thc63_parse_dt(struct thc63_dev *thc63)
struct device_node *endpoint;
struct device_node *remote;
- endpoint = of_graph_get_endpoint_by_regs(thc63->dev->of_node,
- THC63_RGB_OUT0, -1);
- if (!endpoint) {
- dev_err(thc63->dev, "Missing endpoint in port@%u\n",
- THC63_RGB_OUT0);
- return -ENODEV;
- }
-
- remote = of_graph_get_remote_port_parent(endpoint);
- of_node_put(endpoint);
+ remote = of_graph_get_remote_node(thc63->dev->of_node,
+ THC63_RGB_OUT0, -1);
if (!remote) {
- dev_err(thc63->dev, "Endpoint in port@%u unconnected\n",
- THC63_RGB_OUT0);
- return -ENODEV;
- }
-
- if (!of_device_is_available(remote)) {
- dev_err(thc63->dev, "port@%u remote endpoint is disabled\n",
+ dev_err(thc63->dev, "No remote endpoint for port@%u\n",
THC63_RGB_OUT0);
- of_node_put(remote);
return -ENODEV;
}
diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c
index ca3348109bcd..6b559e071301 100644
--- a/drivers/gpu/drm/bridge/ti-dlpc3433.c
+++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c
@@ -319,12 +319,11 @@ static int dlpc_host_attach(struct dlpc *dlpc)
.channel = 0,
.node = NULL,
};
+ int ret;
host = of_find_mipi_dsi_host_by_node(dlpc->host_node);
- if (!host) {
- DRM_DEV_ERROR(dev, "failed to find dsi host\n");
- return -EPROBE_DEFER;
- }
+ if (!host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
dlpc->dsi = mipi_dsi_device_register_full(host, &info);
if (IS_ERR(dlpc->dsi)) {
@@ -336,7 +335,11 @@ static int dlpc_host_attach(struct dlpc *dlpc)
dlpc->dsi->format = MIPI_DSI_FMT_RGB565;
dlpc->dsi->lanes = dlpc->dsi_lanes;
- return devm_mipi_dsi_attach(dev, dlpc->dsi);
+ ret = devm_mipi_dsi_attach(dev, dlpc->dsi);
+ if (ret)
+ DRM_DEV_ERROR(dev, "failed to attach dsi host\n");
+
+ return ret;
}
static int dlpc3433_probe(struct i2c_client *client)
@@ -367,10 +370,8 @@ static int dlpc3433_probe(struct i2c_client *client)
drm_bridge_add(&dlpc->bridge);
ret = dlpc_host_attach(dlpc);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to attach dsi host\n");
+ if (ret)
goto err_remove_bridge;
- }
return 0;
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 4814b7b6d1fd..57a7ed13f996 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -478,7 +478,6 @@ static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
dev_err(ctx->dev, "failed to lock PLL, ret=%i\n", ret);
/* On failure, disable PLL again and exit. */
regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00);
- regulator_disable(ctx->vcc);
return;
}
diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
index 0857773e5c5f..8bc63912fddb 100644
--- a/drivers/gpu/drm/ci/test.yml
+++ b/drivers/gpu/drm/ci/test.yml
@@ -252,11 +252,11 @@ i915:cml:
i915:tgl:
extends:
- .i915
- parallel: 8
+ parallel: 5
variables:
- DEVICE_TYPE: asus-cx9400-volteer
+ DEVICE_TYPE: acer-cp514-2h-1130g7-volteer
GPU_VERSION: tgl
- RUNNER_TAG: mesa-ci-x86-64-lava-asus-cx9400-volteer
+ RUNNER_TAG: mesa-ci-x86-64-lava-acer-cp514-2h-1130g7-volteer
.amdgpu:
extends:
diff --git a/drivers/gpu/drm/display/Kconfig b/drivers/gpu/drm/display/Kconfig
index c0f56888c328..864a6488bfdf 100644
--- a/drivers/gpu/drm/display/Kconfig
+++ b/drivers/gpu/drm/display/Kconfig
@@ -1,15 +1,36 @@
# SPDX-License-Identifier: MIT
-config DRM_DP_AUX_BUS
+config DRM_DISPLAY_HELPER
tristate
depends on DRM
- depends on OF || COMPILE_TEST
+ help
+ DRM helpers for display adapters.
-config DRM_DISPLAY_HELPER
+config DRM_DISPLAY_DP_AUX_BUS
tristate
depends on DRM
+ depends on OF || COMPILE_TEST
+
+config DRM_DISPLAY_DP_AUX_CEC
+ bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
+ depends on DRM && DRM_DISPLAY_HELPER
+ select DRM_DISPLAY_DP_HELPER
+ select CEC_CORE
help
- DRM helpers for display adapters.
+ Choose this option if you want to enable HDMI CEC support for
+ DisplayPort/USB-C to HDMI adapters.
+
+ Note: not all adapters support this feature, and even for those
+ that do support this they often do not hook up the CEC pin.
+
+config DRM_DISPLAY_DP_AUX_CHARDEV
+ bool "DRM DP AUX Interface"
+ depends on DRM && DRM_DISPLAY_HELPER
+ select DRM_DISPLAY_DP_HELPER
+ help
+ Choose this option to enable a /dev/drm_dp_auxN node that allows to
+ read and write values to arbitrary DPCD registers on the DP aux
+ channel.
config DRM_DISPLAY_DP_HELPER
bool
@@ -25,7 +46,7 @@ config DRM_DISPLAY_DP_TUNNEL
DP tunnel features like the Bandwidth Allocation mode to maximize the
BW utilization for display streams on Thunderbolt links.
-config DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+config DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
bool "Enable debugging the DP tunnel state"
depends on REF_TRACKER
depends on DRM_DISPLAY_DP_TUNNEL
@@ -49,24 +70,3 @@ config DRM_DISPLAY_HDMI_HELPER
depends on DRM_DISPLAY_HELPER
help
DRM display helpers for HDMI.
-
-config DRM_DP_AUX_CHARDEV
- bool "DRM DP AUX Interface"
- depends on DRM && DRM_DISPLAY_HELPER
- select DRM_DISPLAY_DP_HELPER
- help
- Choose this option to enable a /dev/drm_dp_auxN node that allows to
- read and write values to arbitrary DPCD registers on the DP aux
- channel.
-
-config DRM_DP_CEC
- bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
- depends on DRM && DRM_DISPLAY_HELPER
- select DRM_DISPLAY_DP_HELPER
- select CEC_CORE
- help
- Choose this option if you want to enable HDMI CEC support for
- DisplayPort/USB-C to HDMI adapters.
-
- Note: not all adapters support this feature, and even for those
- that do support this they often do not hook up the CEC pin.
diff --git a/drivers/gpu/drm/display/Makefile b/drivers/gpu/drm/display/Makefile
index 7ca61333c669..17d2cc73ff56 100644
--- a/drivers/gpu/drm/display/Makefile
+++ b/drivers/gpu/drm/display/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: MIT
-obj-$(CONFIG_DRM_DP_AUX_BUS) += drm_dp_aux_bus.o
+obj-$(CONFIG_DRM_DISPLAY_DP_AUX_BUS) += drm_dp_aux_bus.o
drm_display_helper-y := drm_display_helper_mod.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += \
@@ -14,7 +14,7 @@ drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \
drm_hdmi_helper.o \
drm_scdc_helper.o
-drm_display_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
-drm_display_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_AUX_CEC) += drm_dp_cec.o
obj-$(CONFIG_DRM_DISPLAY_HELPER) += drm_display_helper.o
diff --git a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
index bd61e20770a5..14a2a8473682 100644
--- a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
@@ -52,7 +52,7 @@
* @adapter: I2C adapter for the DDC bus
* @offset: register offset
* @buffer: buffer for return data
- * @size: sizo of the buffer
+ * @size: size of the buffer
*
* Reads @size bytes from the DP dual mode adaptor registers
* starting at @offset.
@@ -116,7 +116,7 @@ EXPORT_SYMBOL(drm_dp_dual_mode_read);
* @adapter: I2C adapter for the DDC bus
* @offset: register offset
* @buffer: buffer for write data
- * @size: sizo of the buffer
+ * @size: size of the buffer
*
* Writes @size bytes to the DP dual mode adaptor registers
* starting at @offset.
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index 266826eac4a7..79a615667aab 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -2113,7 +2113,7 @@ EXPORT_SYMBOL(drm_dp_aux_init);
* drm_dp_aux_register() in &drm_connector_funcs.late_register, and likewise to
* call drm_dp_aux_unregister() in &drm_connector_funcs.early_unregister.
* Functions which don't follow this will likely Oops when
- * %CONFIG_DRM_DP_AUX_CHARDEV is enabled.
+ * %CONFIG_DRM_DISPLAY_DP_AUX_CHARDEV is enabled.
*
* For devices where the AUX channel is a device that exists independently of
* the &drm_device that uses it, such as SoCs and bridge devices, it is
@@ -2281,6 +2281,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
{ OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) },
/* Synaptics DP1.4 MST hubs require DSC for some modes on which it applies HBLANK expansion. */
{ OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC) },
+ /* MediaTek panels (at least in U3224KBA) require DSC for modes with a short HBLANK on UHBR links. */
+ { OUI(0x00, 0x0C, 0xE7), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC) },
/* Apple MacBookPro 2017 15 inch eDP Retina panel reports too low DP_MAX_LINK_RATE */
{ OUI(0x00, 0x10, 0xfa), DEVICE_ID(101, 68, 21, 101, 98, 97), false, BIT(DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS) },
};
@@ -2948,6 +2950,43 @@ void drm_dp_vsc_sdp_log(struct drm_printer *p, const struct drm_dp_vsc_sdp *vsc)
}
EXPORT_SYMBOL(drm_dp_vsc_sdp_log);
+void drm_dp_as_sdp_log(struct drm_printer *p, const struct drm_dp_as_sdp *as_sdp)
+{
+ drm_printf(p, "DP SDP: AS_SDP, revision %u, length %u\n",
+ as_sdp->revision, as_sdp->length);
+ drm_printf(p, " vtotal: %d\n", as_sdp->vtotal);
+ drm_printf(p, " target_rr: %d\n", as_sdp->target_rr);
+ drm_printf(p, " duration_incr_ms: %d\n", as_sdp->duration_incr_ms);
+ drm_printf(p, " duration_decr_ms: %d\n", as_sdp->duration_decr_ms);
+ drm_printf(p, " operation_mode: %d\n", as_sdp->mode);
+}
+EXPORT_SYMBOL(drm_dp_as_sdp_log);
+
+/**
+ * drm_dp_as_sdp_supported() - check if adaptive sync sdp is supported
+ * @aux: DisplayPort AUX channel
+ * @dpcd: DisplayPort configuration data
+ *
+ * Returns true if adaptive sync sdp is supported, else returns false
+ */
+bool drm_dp_as_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ u8 rx_feature;
+
+ if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13)
+ return false;
+
+ if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1,
+ &rx_feature) != 1) {
+ drm_dbg_dp(aux->drm_dev,
+ "Failed to read DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1\n");
+ return false;
+ }
+
+ return (rx_feature & DP_ADAPTIVE_SYNC_SDP_SUPPORTED);
+}
+EXPORT_SYMBOL(drm_dp_as_sdp_supported);
+
/**
* drm_dp_vsc_sdp_supported() - check if vsc sdp is supported
* @aux: DisplayPort AUX channel
@@ -4111,6 +4150,13 @@ int drm_dp_bw_overhead(int lane_count, int hactive,
u32 overhead = 1000000;
int symbol_cycles;
+ if (lane_count == 0 || hactive == 0 || bpp_x16 == 0) {
+ DRM_DEBUG_KMS("Invalid BW overhead params: lane_count %d, hactive %d, bpp_x16 %d.%04d\n",
+ lane_count, hactive,
+ bpp_x16 >> 4, (bpp_x16 & 0xf) * 625);
+ return 0;
+ }
+
/*
* DP Standard v2.1 2.6.4.1
* SSC downspread and ref clock variation margin:
diff --git a/drivers/gpu/drm/display/drm_dp_helper_internal.h b/drivers/gpu/drm/display/drm_dp_helper_internal.h
index 8917fc3af9ec..737949a2820f 100644
--- a/drivers/gpu/drm/display/drm_dp_helper_internal.h
+++ b/drivers/gpu/drm/display/drm_dp_helper_internal.h
@@ -5,7 +5,7 @@
struct drm_dp_aux;
-#ifdef CONFIG_DRM_DP_AUX_CHARDEV
+#ifdef CONFIG_DRM_DISPLAY_DP_AUX_CHARDEV
int drm_dp_aux_dev_init(void);
void drm_dp_aux_dev_exit(void);
int drm_dp_aux_register_devnode(struct drm_dp_aux *aux);
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 03d528209426..3577786b5db2 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -2274,7 +2274,7 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
if (port->pdt != DP_PEER_DEVICE_NONE &&
drm_dp_mst_is_end_device(port->pdt, port->mcs) &&
- port->port_num >= DP_MST_LOGICAL_PORT_0)
+ drm_dp_mst_port_is_logical(port))
port->cached_edid = drm_edid_read_ddc(port->connector,
&port->aux.ddc);
@@ -3608,24 +3608,30 @@ fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
/**
- * drm_dp_read_mst_cap() - check whether or not a sink supports MST
+ * drm_dp_read_mst_cap() - Read the sink's MST mode capability
* @aux: The DP AUX channel to use
* @dpcd: A cached copy of the DPCD capabilities for this sink
*
- * Returns: %True if the sink supports MST, %false otherwise
+ * Returns: enum drm_dp_mst_mode to indicate MST mode capability
*/
-bool drm_dp_read_mst_cap(struct drm_dp_aux *aux,
- const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
u8 mstm_cap;
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
- return false;
+ return DRM_DP_SST;
if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1)
- return false;
+ return DRM_DP_SST;
+
+ if (mstm_cap & DP_MST_CAP)
+ return DRM_DP_MST;
- return mstm_cap & DP_MST_CAP;
+ if (mstm_cap & DP_SINGLE_STREAM_SIDEBAND_MSG)
+ return DRM_DP_SST_SIDEBAND_MSG;
+
+ return DRM_DP_SST;
}
EXPORT_SYMBOL(drm_dp_read_mst_cap);
@@ -4213,7 +4219,7 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
case DP_PEER_DEVICE_SST_SINK:
ret = connector_status_connected;
/* for logical ports - cache the EDID */
- if (port->port_num >= DP_MST_LOGICAL_PORT_0 && !port->cached_edid)
+ if (drm_dp_mst_port_is_logical(port) && !port->cached_edid)
port->cached_edid = drm_edid_read_ddc(connector, &port->aux.ddc);
break;
case DP_PEER_DEVICE_DP_LEGACY_CONV:
@@ -5977,7 +5983,7 @@ static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
return false;
/* Virtual DP Sink (Internal Display Panel) */
- if (port->port_num >= 8)
+ if (drm_dp_mst_port_is_logical(port))
return true;
/* DP-to-HDMI Protocol Converter */
@@ -6005,6 +6011,22 @@ static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
}
/**
+ * drm_dp_mst_aux_for_parent() - Get the AUX device for an MST port's parent
+ * @port: MST port whose parent's AUX device is returned
+ *
+ * Return the AUX device for @port's parent or NULL if port's parent is the
+ * root port.
+ */
+struct drm_dp_aux *drm_dp_mst_aux_for_parent(struct drm_dp_mst_port *port)
+{
+ if (!port->parent || !port->parent->port_parent)
+ return NULL;
+
+ return &port->parent->port_parent->aux;
+}
+EXPORT_SYMBOL(drm_dp_mst_aux_for_parent);
+
+/**
* drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
* @port: The port to check. A leaf of the MST tree with an attached display.
*
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology_internal.h b/drivers/gpu/drm/display/drm_dp_mst_topology_internal.h
index a785ccbfdd73..f41c34e26be2 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology_internal.h
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology_internal.h
@@ -10,7 +10,9 @@
#ifndef _DRM_DP_MST_HELPER_INTERNAL_H_
#define _DRM_DP_MST_HELPER_INTERNAL_H_
-#include <drm/display/drm_dp_mst_helper.h>
+struct drm_dp_sideband_msg_req_body;
+struct drm_dp_sideband_msg_tx;
+struct drm_printer;
void
drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c
index 120e0de674c1..48b2df120086 100644
--- a/drivers/gpu/drm/display/drm_dp_tunnel.c
+++ b/drivers/gpu/drm/display/drm_dp_tunnel.c
@@ -191,7 +191,7 @@ struct drm_dp_tunnel_mgr {
struct drm_dp_tunnel_group *groups;
wait_queue_head_t bw_req_queue;
-#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
struct ref_tracker_dir ref_tracker;
#endif
};
@@ -385,7 +385,7 @@ static void tunnel_put(struct drm_dp_tunnel *tunnel)
kref_put(&tunnel->kref, free_tunnel);
}
-#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
static void track_tunnel_ref(struct drm_dp_tunnel *tunnel,
struct ref_tracker **tracker)
{
@@ -436,8 +436,8 @@ EXPORT_SYMBOL(drm_dp_tunnel_get);
/**
* drm_dp_tunnel_put - Put a reference for a DP tunnel
- * @tunnel - Tunnel object
- * @tracker - Debug tracker for the reference
+ * @tunnel: Tunnel object
+ * @tracker: Debug tracker for the reference
*
* Put a reference for @tunnel along with its debug *@tracker, which
* was obtained with drm_dp_tunnel_get().
@@ -1170,7 +1170,7 @@ int drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw)
EXPORT_SYMBOL(drm_dp_tunnel_alloc_bw);
/**
- * drm_dp_tunnel_atomic_get_allocated_bw - Get the BW allocated for a DP tunnel
+ * drm_dp_tunnel_get_allocated_bw - Get the BW allocated for a DP tunnel
* @tunnel: Tunnel object
*
* Get the current BW allocated for @tunnel. After the tunnel is created /
@@ -1603,7 +1603,7 @@ static void cleanup_group(struct drm_dp_tunnel_group *group)
drm_atomic_private_obj_fini(&group->base);
}
-#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
static void check_unique_stream_ids(const struct drm_dp_tunnel_group_state *group_state)
{
const struct drm_dp_tunnel_state *tunnel_state;
@@ -1881,7 +1881,7 @@ static void destroy_mgr(struct drm_dp_tunnel_mgr *mgr)
drm_WARN_ON(mgr->dev, !list_empty(&mgr->groups[i].tunnels));
}
-#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
ref_tracker_dir_exit(&mgr->ref_tracker);
#endif
@@ -1892,6 +1892,7 @@ static void destroy_mgr(struct drm_dp_tunnel_mgr *mgr)
/**
* drm_dp_tunnel_mgr_create - Create a DP tunnel manager
* @dev: DRM device object
+ * @max_group_count: Maximum number of tunnel groups
*
* Creates a DP tunnel manager for @dev.
*
@@ -1918,7 +1919,7 @@ drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
return NULL;
}
-#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
ref_tracker_dir_init(&mgr->ref_tracker, 16, "dptun");
#endif
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 39ef0a6addeb..fb97b51b38f1 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -38,6 +38,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_panic.h>
#include <drm/drm_print.h>
#include <drm/drm_self_refresh_helper.h>
#include <drm/drm_vblank.h>
@@ -3016,6 +3017,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
bool stall)
{
int i, ret;
+ unsigned long flags;
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
struct drm_crtc *crtc;
@@ -3099,6 +3101,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
}
}
+ drm_panic_lock(state->dev, flags);
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
WARN_ON(plane->state != old_plane_state);
@@ -3108,6 +3111,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
state->planes[i].state = old_plane_state;
plane->state = new_plane_state;
}
+ drm_panic_unlock(state->dev, flags);
for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) {
WARN_ON(obj->state != old_obj_state);
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 29d4940188d4..fc16fddee5c5 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -145,10 +145,10 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
&state->mode, blob->data);
if (ret) {
drm_dbg_atomic(crtc->dev,
- "[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n",
+ "[CRTC:%d:%s] invalid mode (%s, %pe): " DRM_MODE_FMT "\n",
crtc->base.id, crtc->name,
- ret, drm_get_mode_status_name(state->mode.status));
- drm_mode_debug_printmodeline(&state->mode);
+ drm_get_mode_status_name(state->mode.status),
+ ERR_PTR(ret), DRM_MODE_ARG(&state->mode));
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 521a71c61b16..28abe9aa99ca 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -657,6 +657,13 @@ static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge,
* bridge will be called before the previous one to reverse the @pre_enable
* calling direction.
*
+ * Example:
+ * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E
+ *
+ * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting
+ * @post_disable order would be,
+ * Bridge B, Bridge A, Bridge E, Bridge D, Bridge C.
+ *
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
@@ -687,11 +694,17 @@ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
*/
list_for_each_entry_from(next, &encoder->bridge_chain,
chain_node) {
- if (next->pre_enable_prev_first) {
+ if (!next->pre_enable_prev_first) {
next = list_prev_entry(next, chain_node);
limit = next;
break;
}
+
+ if (list_is_last(&next->chain_node,
+ &encoder->bridge_chain)) {
+ limit = next;
+ break;
+ }
}
/* Call these bridges in reverse order */
@@ -747,6 +760,13 @@ static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge,
* If a bridge sets @pre_enable_prev_first, then the pre_enable for the
* prev bridge will be called before pre_enable of this bridge.
*
+ * Example:
+ * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E
+ *
+ * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting
+ * @pre_enable order would be,
+ * Bridge C, Bridge D, Bridge E, Bridge A, Bridge B.
+ *
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
@@ -774,7 +794,7 @@ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
/* Found first bridge that does NOT
* request prev to be enabled first
*/
- limit = list_prev_entry(next, chain_node);
+ limit = next;
break;
}
}
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index 5ebdd6f8f36e..1daf778cf6fa 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -57,6 +57,16 @@ static void list_insert_sorted(struct drm_buddy *mm,
__list_add(&block->link, node->link.prev, &node->link);
}
+static void clear_reset(struct drm_buddy_block *block)
+{
+ block->header &= ~DRM_BUDDY_HEADER_CLEAR;
+}
+
+static void mark_cleared(struct drm_buddy_block *block)
+{
+ block->header |= DRM_BUDDY_HEADER_CLEAR;
+}
+
static void mark_allocated(struct drm_buddy_block *block)
{
block->header &= ~DRM_BUDDY_HEADER_STATE;
@@ -82,6 +92,133 @@ static void mark_split(struct drm_buddy_block *block)
list_del(&block->link);
}
+static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
+{
+ return s1 <= e2 && e1 >= s2;
+}
+
+static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
+{
+ return s1 <= s2 && e1 >= e2;
+}
+
+static struct drm_buddy_block *
+__get_buddy(struct drm_buddy_block *block)
+{
+ struct drm_buddy_block *parent;
+
+ parent = block->parent;
+ if (!parent)
+ return NULL;
+
+ if (parent->left == block)
+ return parent->right;
+
+ return parent->left;
+}
+
+static unsigned int __drm_buddy_free(struct drm_buddy *mm,
+ struct drm_buddy_block *block,
+ bool force_merge)
+{
+ struct drm_buddy_block *parent;
+ unsigned int order;
+
+ while ((parent = block->parent)) {
+ struct drm_buddy_block *buddy;
+
+ buddy = __get_buddy(block);
+
+ if (!drm_buddy_block_is_free(buddy))
+ break;
+
+ if (!force_merge) {
+ /*
+ * Check the block and its buddy clear state and exit
+ * the loop if they both have the dissimilar state.
+ */
+ if (drm_buddy_block_is_clear(block) !=
+ drm_buddy_block_is_clear(buddy))
+ break;
+
+ if (drm_buddy_block_is_clear(block))
+ mark_cleared(parent);
+ }
+
+ list_del(&buddy->link);
+ if (force_merge && drm_buddy_block_is_clear(buddy))
+ mm->clear_avail -= drm_buddy_block_size(mm, buddy);
+
+ drm_block_free(mm, block);
+ drm_block_free(mm, buddy);
+
+ block = parent;
+ }
+
+ order = drm_buddy_block_order(block);
+ mark_free(mm, block);
+
+ return order;
+}
+
+static int __force_merge(struct drm_buddy *mm,
+ u64 start,
+ u64 end,
+ unsigned int min_order)
+{
+ unsigned int order;
+ int i;
+
+ if (!min_order)
+ return -ENOMEM;
+
+ if (min_order > mm->max_order)
+ return -EINVAL;
+
+ for (i = min_order - 1; i >= 0; i--) {
+ struct drm_buddy_block *block, *prev;
+
+ list_for_each_entry_safe_reverse(block, prev, &mm->free_list[i], link) {
+ struct drm_buddy_block *buddy;
+ u64 block_start, block_end;
+
+ if (!block->parent)
+ continue;
+
+ block_start = drm_buddy_block_offset(block);
+ block_end = block_start + drm_buddy_block_size(mm, block) - 1;
+
+ if (!contains(start, end, block_start, block_end))
+ continue;
+
+ buddy = __get_buddy(block);
+ if (!drm_buddy_block_is_free(buddy))
+ continue;
+
+ WARN_ON(drm_buddy_block_is_clear(block) ==
+ drm_buddy_block_is_clear(buddy));
+
+ /*
+ * If the prev block is same as buddy, don't access the
+ * block in the next iteration as we would free the
+ * buddy block as part of the free function.
+ */
+ if (prev == buddy)
+ prev = list_prev_entry(prev, link);
+
+ list_del(&block->link);
+ if (drm_buddy_block_is_clear(block))
+ mm->clear_avail -= drm_buddy_block_size(mm, block);
+
+ order = __drm_buddy_free(mm, block, true);
+ if (order >= min_order)
+ return 0;
+ }
+ }
+
+ return -ENOMEM;
+}
+
/**
* drm_buddy_init - init memory manager
*
@@ -112,6 +249,7 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
mm->size = size;
mm->avail = size;
+ mm->clear_avail = 0;
mm->chunk_size = chunk_size;
mm->max_order = ilog2(size) - ilog2(chunk_size);
@@ -186,11 +324,21 @@ EXPORT_SYMBOL(drm_buddy_init);
*/
void drm_buddy_fini(struct drm_buddy *mm)
{
+ u64 root_size, size;
+ unsigned int order;
int i;
+ size = mm->size;
+
for (i = 0; i < mm->n_roots; ++i) {
+ order = ilog2(size) - ilog2(mm->chunk_size);
+ __force_merge(mm, 0, size, order);
+
WARN_ON(!drm_buddy_block_is_free(mm->roots[i]));
drm_block_free(mm, mm->roots[i]);
+
+ root_size = mm->chunk_size << order;
+ size -= root_size;
}
WARN_ON(mm->avail != mm->size);
@@ -223,26 +371,17 @@ static int split_block(struct drm_buddy *mm,
mark_free(mm, block->left);
mark_free(mm, block->right);
+ if (drm_buddy_block_is_clear(block)) {
+ mark_cleared(block->left);
+ mark_cleared(block->right);
+ clear_reset(block);
+ }
+
mark_split(block);
return 0;
}
-static struct drm_buddy_block *
-__get_buddy(struct drm_buddy_block *block)
-{
- struct drm_buddy_block *parent;
-
- parent = block->parent;
- if (!parent)
- return NULL;
-
- if (parent->left == block)
- return parent->right;
-
- return parent->left;
-}
-
/**
* drm_get_buddy - get buddy address
*
@@ -260,30 +399,6 @@ drm_get_buddy(struct drm_buddy_block *block)
}
EXPORT_SYMBOL(drm_get_buddy);
-static void __drm_buddy_free(struct drm_buddy *mm,
- struct drm_buddy_block *block)
-{
- struct drm_buddy_block *parent;
-
- while ((parent = block->parent)) {
- struct drm_buddy_block *buddy;
-
- buddy = __get_buddy(block);
-
- if (!drm_buddy_block_is_free(buddy))
- break;
-
- list_del(&buddy->link);
-
- drm_block_free(mm, block);
- drm_block_free(mm, buddy);
-
- block = parent;
- }
-
- mark_free(mm, block);
-}
-
/**
* drm_buddy_free_block - free a block
*
@@ -295,42 +410,74 @@ void drm_buddy_free_block(struct drm_buddy *mm,
{
BUG_ON(!drm_buddy_block_is_allocated(block));
mm->avail += drm_buddy_block_size(mm, block);
- __drm_buddy_free(mm, block);
+ if (drm_buddy_block_is_clear(block))
+ mm->clear_avail += drm_buddy_block_size(mm, block);
+
+ __drm_buddy_free(mm, block, false);
}
EXPORT_SYMBOL(drm_buddy_free_block);
-/**
- * drm_buddy_free_list - free blocks
- *
- * @mm: DRM buddy manager
- * @objects: input list head to free blocks
- */
-void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects)
+static void __drm_buddy_free_list(struct drm_buddy *mm,
+ struct list_head *objects,
+ bool mark_clear,
+ bool mark_dirty)
{
struct drm_buddy_block *block, *on;
+ WARN_ON(mark_dirty && mark_clear);
+
list_for_each_entry_safe(block, on, objects, link) {
+ if (mark_clear)
+ mark_cleared(block);
+ else if (mark_dirty)
+ clear_reset(block);
drm_buddy_free_block(mm, block);
cond_resched();
}
INIT_LIST_HEAD(objects);
}
-EXPORT_SYMBOL(drm_buddy_free_list);
-static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
+static void drm_buddy_free_list_internal(struct drm_buddy *mm,
+ struct list_head *objects)
{
- return s1 <= e2 && e1 >= s2;
+ /*
+ * Don't touch the clear/dirty bit, since allocation is still internal
+ * at this point. For example we might have just failed part of the
+ * allocation.
+ */
+ __drm_buddy_free_list(mm, objects, false, false);
}
-static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
+/**
+ * drm_buddy_free_list - free blocks
+ *
+ * @mm: DRM buddy manager
+ * @objects: input list head to free blocks
+ * @flags: optional flags like DRM_BUDDY_CLEARED
+ */
+void drm_buddy_free_list(struct drm_buddy *mm,
+ struct list_head *objects,
+ unsigned int flags)
{
- return s1 <= s2 && e1 >= e2;
+ bool mark_clear = flags & DRM_BUDDY_CLEARED;
+
+ __drm_buddy_free_list(mm, objects, mark_clear, !mark_clear);
+}
+EXPORT_SYMBOL(drm_buddy_free_list);
+
+static bool block_incompatible(struct drm_buddy_block *block, unsigned int flags)
+{
+ bool needs_clear = flags & DRM_BUDDY_CLEAR_ALLOCATION;
+
+ return needs_clear != drm_buddy_block_is_clear(block);
}
static struct drm_buddy_block *
-alloc_range_bias(struct drm_buddy *mm,
- u64 start, u64 end,
- unsigned int order)
+__alloc_range_bias(struct drm_buddy *mm,
+ u64 start, u64 end,
+ unsigned int order,
+ unsigned long flags,
+ bool fallback)
{
u64 req_size = mm->chunk_size << order;
struct drm_buddy_block *block;
@@ -379,6 +526,9 @@ alloc_range_bias(struct drm_buddy *mm,
if (contains(start, end, block_start, block_end) &&
order == drm_buddy_block_order(block)) {
+ if (!fallback && block_incompatible(block, flags))
+ continue;
+
/*
* Find the free block within the range.
*/
@@ -410,30 +560,57 @@ err_undo:
if (buddy &&
(drm_buddy_block_is_free(block) &&
drm_buddy_block_is_free(buddy)))
- __drm_buddy_free(mm, block);
+ __drm_buddy_free(mm, block, false);
return ERR_PTR(err);
}
static struct drm_buddy_block *
-get_maxblock(struct drm_buddy *mm, unsigned int order)
+__drm_buddy_alloc_range_bias(struct drm_buddy *mm,
+ u64 start, u64 end,
+ unsigned int order,
+ unsigned long flags)
+{
+ struct drm_buddy_block *block;
+ bool fallback = false;
+
+ block = __alloc_range_bias(mm, start, end, order,
+ flags, fallback);
+ if (IS_ERR(block))
+ return __alloc_range_bias(mm, start, end, order,
+ flags, !fallback);
+
+ return block;
+}
+
+static struct drm_buddy_block *
+get_maxblock(struct drm_buddy *mm, unsigned int order,
+ unsigned long flags)
{
- struct drm_buddy_block *max_block = NULL, *node;
+ struct drm_buddy_block *max_block = NULL, *block = NULL;
unsigned int i;
for (i = order; i <= mm->max_order; ++i) {
- if (!list_empty(&mm->free_list[i])) {
- node = list_last_entry(&mm->free_list[i],
- struct drm_buddy_block,
- link);
- if (!max_block) {
- max_block = node;
+ struct drm_buddy_block *tmp_block;
+
+ list_for_each_entry_reverse(tmp_block, &mm->free_list[i], link) {
+ if (block_incompatible(tmp_block, flags))
continue;
- }
- if (drm_buddy_block_offset(node) >
- drm_buddy_block_offset(max_block)) {
- max_block = node;
- }
+ block = tmp_block;
+ break;
+ }
+
+ if (!block)
+ continue;
+
+ if (!max_block) {
+ max_block = block;
+ continue;
+ }
+
+ if (drm_buddy_block_offset(block) >
+ drm_buddy_block_offset(max_block)) {
+ max_block = block;
}
}
@@ -450,12 +627,30 @@ alloc_from_freelist(struct drm_buddy *mm,
int err;
if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
- block = get_maxblock(mm, order);
+ block = get_maxblock(mm, order, flags);
if (block)
/* Store the obtained block order */
tmp = drm_buddy_block_order(block);
} else {
for (tmp = order; tmp <= mm->max_order; ++tmp) {
+ struct drm_buddy_block *tmp_block;
+
+ list_for_each_entry_reverse(tmp_block, &mm->free_list[tmp], link) {
+ if (block_incompatible(tmp_block, flags))
+ continue;
+
+ block = tmp_block;
+ break;
+ }
+
+ if (block)
+ break;
+ }
+ }
+
+ if (!block) {
+ /* Fallback method */
+ for (tmp = order; tmp <= mm->max_order; ++tmp) {
if (!list_empty(&mm->free_list[tmp])) {
block = list_last_entry(&mm->free_list[tmp],
struct drm_buddy_block,
@@ -464,10 +659,10 @@ alloc_from_freelist(struct drm_buddy *mm,
break;
}
}
- }
- if (!block)
- return ERR_PTR(-ENOSPC);
+ if (!block)
+ return ERR_PTR(-ENOSPC);
+ }
BUG_ON(!drm_buddy_block_is_free(block));
@@ -483,7 +678,7 @@ alloc_from_freelist(struct drm_buddy *mm,
err_undo:
if (tmp != order)
- __drm_buddy_free(mm, block);
+ __drm_buddy_free(mm, block, false);
return ERR_PTR(err);
}
@@ -526,16 +721,18 @@ static int __alloc_range(struct drm_buddy *mm,
}
if (contains(start, end, block_start, block_end)) {
- if (!drm_buddy_block_is_free(block)) {
+ if (drm_buddy_block_is_free(block)) {
+ mark_allocated(block);
+ total_allocated += drm_buddy_block_size(mm, block);
+ mm->avail -= drm_buddy_block_size(mm, block);
+ if (drm_buddy_block_is_clear(block))
+ mm->clear_avail -= drm_buddy_block_size(mm, block);
+ list_add_tail(&block->link, &allocated);
+ continue;
+ } else if (!mm->clear_avail) {
err = -ENOSPC;
goto err_free;
}
-
- mark_allocated(block);
- total_allocated += drm_buddy_block_size(mm, block);
- mm->avail -= drm_buddy_block_size(mm, block);
- list_add_tail(&block->link, &allocated);
- continue;
}
if (!drm_buddy_block_is_split(block)) {
@@ -567,14 +764,14 @@ err_undo:
if (buddy &&
(drm_buddy_block_is_free(block) &&
drm_buddy_block_is_free(buddy)))
- __drm_buddy_free(mm, block);
+ __drm_buddy_free(mm, block, false);
err_free:
if (err == -ENOSPC && total_allocated_on_err) {
list_splice_tail(&allocated, blocks);
*total_allocated_on_err = total_allocated;
} else {
- drm_buddy_free_list(mm, &allocated);
+ drm_buddy_free_list_internal(mm, &allocated);
}
return err;
@@ -640,11 +837,11 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm,
list_splice(&blocks_lhs, blocks);
return 0;
} else if (err != -ENOSPC) {
- drm_buddy_free_list(mm, blocks);
+ drm_buddy_free_list_internal(mm, blocks);
return err;
}
/* Free blocks for the next iteration */
- drm_buddy_free_list(mm, blocks);
+ drm_buddy_free_list_internal(mm, blocks);
}
return -ENOSPC;
@@ -700,6 +897,8 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
list_del(&block->link);
mark_free(mm, block);
mm->avail += drm_buddy_block_size(mm, block);
+ if (drm_buddy_block_is_clear(block))
+ mm->clear_avail += drm_buddy_block_size(mm, block);
/* Prevent recursively freeing this node */
parent = block->parent;
@@ -711,6 +910,8 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
if (err) {
mark_allocated(block);
mm->avail -= drm_buddy_block_size(mm, block);
+ if (drm_buddy_block_is_clear(block))
+ mm->clear_avail -= drm_buddy_block_size(mm, block);
list_add(&block->link, blocks);
}
@@ -719,13 +920,28 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
}
EXPORT_SYMBOL(drm_buddy_block_trim);
+static struct drm_buddy_block *
+__drm_buddy_alloc_blocks(struct drm_buddy *mm,
+ u64 start, u64 end,
+ unsigned int order,
+ unsigned long flags)
+{
+ if (flags & DRM_BUDDY_RANGE_ALLOCATION)
+ /* Allocate traversing within the range */
+ return __drm_buddy_alloc_range_bias(mm, start, end,
+ order, flags);
+ else
+ /* Allocate from freelist */
+ return alloc_from_freelist(mm, order, flags);
+}
+
/**
* drm_buddy_alloc_blocks - allocate power-of-two blocks
*
* @mm: DRM buddy manager to allocate from
* @start: start of the allowed range for this block
* @end: end of the allowed range for this block
- * @size: size of the allocation
+ * @size: size of the allocation in bytes
* @min_block_size: alignment of the allocation
* @blocks: output list head to add allocated blocks
* @flags: DRM_BUDDY_*_ALLOCATION flags
@@ -800,23 +1016,33 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
BUG_ON(order < min_order);
do {
- if (flags & DRM_BUDDY_RANGE_ALLOCATION)
- /* Allocate traversing within the range */
- block = alloc_range_bias(mm, start, end, order);
- else
- /* Allocate from freelist */
- block = alloc_from_freelist(mm, order, flags);
-
+ block = __drm_buddy_alloc_blocks(mm, start,
+ end,
+ order,
+ flags);
if (!IS_ERR(block))
break;
if (order-- == min_order) {
+ /* Try allocation through force merge method */
+ if (mm->clear_avail &&
+ !__force_merge(mm, start, end, min_order)) {
+ block = __drm_buddy_alloc_blocks(mm, start,
+ end,
+ min_order,
+ flags);
+ if (!IS_ERR(block)) {
+ order = min_order;
+ break;
+ }
+ }
+
+ /*
+ * Try contiguous block allocation through
+ * try harder method.
+ */
if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
!(flags & DRM_BUDDY_RANGE_ALLOCATION))
- /*
- * Try contiguous block allocation through
- * try harder method
- */
return __alloc_contig_try_harder(mm,
original_size,
original_min_size,
@@ -828,6 +1054,8 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
mark_allocated(block);
mm->avail -= drm_buddy_block_size(mm, block);
+ if (drm_buddy_block_is_clear(block))
+ mm->clear_avail -= drm_buddy_block_size(mm, block);
kmemleak_update_trace(block);
list_add_tail(&block->link, &allocated);
@@ -866,7 +1094,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
return 0;
err_free:
- drm_buddy_free_list(mm, &allocated);
+ drm_buddy_free_list_internal(mm, &allocated);
return err;
}
EXPORT_SYMBOL(drm_buddy_alloc_blocks);
@@ -899,8 +1127,8 @@ void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
{
int order;
- drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB\n",
- mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20);
+ drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB, clear_free: %lluMiB\n",
+ mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20);
for (order = mm->max_order; order >= 0; order--) {
struct drm_buddy_block *block;
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index 9403b3f576f7..2803ac111bbd 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -172,6 +172,18 @@ void drm_client_release(struct drm_client_dev *client)
}
EXPORT_SYMBOL(drm_client_release);
+/**
+ * drm_client_dev_unregister - Unregister clients
+ * @dev: DRM device
+ *
+ * This function releases all clients by calling each client's
+ * &drm_client_funcs.unregister callback. The callback function
+ * is responsibe for releaseing all resources including the client
+ * itself.
+ *
+ * The helper drm_dev_unregister() calls this function. Drivers
+ * that use it don't need to call this function themselves.
+ */
void drm_client_dev_unregister(struct drm_device *dev)
{
struct drm_client_dev *client, *tmp;
@@ -191,6 +203,7 @@ void drm_client_dev_unregister(struct drm_device *dev)
}
mutex_unlock(&dev->clientlist_mutex);
}
+EXPORT_SYMBOL(drm_client_dev_unregister);
/**
* drm_client_dev_hotplug - Send hotplug event to clients
@@ -305,6 +318,66 @@ err_delete:
}
/**
+ * drm_client_buffer_vmap_local - Map DRM client buffer into address space
+ * @buffer: DRM client buffer
+ * @map_copy: Returns the mapped memory's address
+ *
+ * This function maps a client buffer into kernel address space. If the
+ * buffer is already mapped, it returns the existing mapping's address.
+ *
+ * Client buffer mappings are not ref'counted. Each call to
+ * drm_client_buffer_vmap_local() should be closely followed by a call to
+ * drm_client_buffer_vunmap_local(). See drm_client_buffer_vmap() for
+ * long-term mappings.
+ *
+ * The returned address is a copy of the internal value. In contrast to
+ * other vmap interfaces, you don't need it for the client's vunmap
+ * function. So you can modify it at will during blit and draw operations.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ */
+int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer,
+ struct iosys_map *map_copy)
+{
+ struct drm_gem_object *gem = buffer->gem;
+ struct iosys_map *map = &buffer->map;
+ int ret;
+
+ drm_gem_lock(gem);
+
+ ret = drm_gem_vmap(gem, map);
+ if (ret)
+ goto err_drm_gem_vmap_unlocked;
+ *map_copy = *map;
+
+ return 0;
+
+err_drm_gem_vmap_unlocked:
+ drm_gem_unlock(gem);
+ return 0;
+}
+EXPORT_SYMBOL(drm_client_buffer_vmap_local);
+
+/**
+ * drm_client_buffer_vunmap_local - Unmap DRM client buffer
+ * @buffer: DRM client buffer
+ *
+ * This function removes a client buffer's memory mapping established
+ * with drm_client_buffer_vunmap_local(). Calling this function is only
+ * required by clients that manage their buffer mappings by themselves.
+ */
+void drm_client_buffer_vunmap_local(struct drm_client_buffer *buffer)
+{
+ struct drm_gem_object *gem = buffer->gem;
+ struct iosys_map *map = &buffer->map;
+
+ drm_gem_vunmap(gem, map);
+ drm_gem_unlock(gem);
+}
+EXPORT_SYMBOL(drm_client_buffer_vunmap_local);
+
+/**
* drm_client_buffer_vmap - Map DRM client buffer into address space
* @buffer: DRM client buffer
* @map_copy: Returns the mapped memory's address
@@ -328,24 +401,30 @@ int
drm_client_buffer_vmap(struct drm_client_buffer *buffer,
struct iosys_map *map_copy)
{
+ struct drm_gem_object *gem = buffer->gem;
struct iosys_map *map = &buffer->map;
int ret;
- /*
- * FIXME: The dependency on GEM here isn't required, we could
- * convert the driver handle to a dma-buf instead and use the
- * backend-agnostic dma-buf vmap support instead. This would
- * require that the handle2fd prime ioctl is reworked to pull the
- * fd_install step out of the driver backend hooks, to make that
- * final step optional for internal users.
- */
- ret = drm_gem_vmap_unlocked(buffer->gem, map);
+ drm_gem_lock(gem);
+
+ ret = drm_gem_pin_locked(gem);
if (ret)
- return ret;
+ goto err_drm_gem_pin_locked;
+ ret = drm_gem_vmap(gem, map);
+ if (ret)
+ goto err_drm_gem_vmap;
+
+ drm_gem_unlock(gem);
*map_copy = *map;
return 0;
+
+err_drm_gem_vmap:
+ drm_gem_unpin_locked(buffer->gem);
+err_drm_gem_pin_locked:
+ drm_gem_unlock(gem);
+ return ret;
}
EXPORT_SYMBOL(drm_client_buffer_vmap);
@@ -359,9 +438,13 @@ EXPORT_SYMBOL(drm_client_buffer_vmap);
*/
void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
{
+ struct drm_gem_object *gem = buffer->gem;
struct iosys_map *map = &buffer->map;
- drm_gem_vunmap_unlocked(buffer->gem, map);
+ drm_gem_lock(gem);
+ drm_gem_vunmap(gem, map);
+ drm_gem_unpin_locked(gem);
+ drm_gem_unlock(gem);
}
EXPORT_SYMBOL(drm_client_buffer_vunmap);
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 871e4e2129d6..31af5cf37a09 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -242,8 +242,10 @@ static void drm_client_connectors_enabled(struct drm_connector **connectors,
for (i = 0; i < connector_count; i++) {
connector = connectors[i];
enabled[i] = drm_connector_enabled(connector, true);
- DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
- connector->display_info.non_desktop ? "non desktop" : str_yes_no(enabled[i]));
+ drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] enabled? %s\n",
+ connector->base.id, connector->name,
+ connector->display_info.non_desktop ?
+ "non desktop" : str_yes_no(enabled[i]));
any_enabled |= enabled[i];
}
@@ -303,7 +305,7 @@ static bool drm_client_target_cloned(struct drm_device *dev,
}
if (can_clone) {
- DRM_DEBUG_KMS("can clone using command line\n");
+ drm_dbg_kms(dev, "can clone using command line\n");
return true;
}
@@ -332,15 +334,16 @@ static bool drm_client_target_cloned(struct drm_device *dev,
kfree(dmt_mode);
if (can_clone) {
- DRM_DEBUG_KMS("can clone using 1024x768\n");
+ drm_dbg_kms(dev, "can clone using 1024x768\n");
return true;
}
fail:
- DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
+ drm_info(dev, "kms: can't enable cloning when we probably wanted to.\n");
return false;
}
-static int drm_client_get_tile_offsets(struct drm_connector **connectors,
+static int drm_client_get_tile_offsets(struct drm_device *dev,
+ struct drm_connector **connectors,
unsigned int connector_count,
struct drm_display_mode **modes,
struct drm_client_offset *offsets,
@@ -357,8 +360,9 @@ static int drm_client_get_tile_offsets(struct drm_connector **connectors,
continue;
if (!modes[i] && (h_idx || v_idx)) {
- DRM_DEBUG_KMS("no modes for connector tiled %d %d\n", i,
- connector->base.id);
+ drm_dbg_kms(dev,
+ "[CONNECTOR:%d:%s] no modes for connector tiled %d\n",
+ connector->base.id, connector->name, i);
continue;
}
if (connector->tile_h_loc < h_idx)
@@ -369,11 +373,12 @@ static int drm_client_get_tile_offsets(struct drm_connector **connectors,
}
offsets[idx].x = hoffset;
offsets[idx].y = voffset;
- DRM_DEBUG_KMS("returned %d %d for %d %d\n", hoffset, voffset, h_idx, v_idx);
+ drm_dbg_kms(dev, "returned %d %d for %d %d\n", hoffset, voffset, h_idx, v_idx);
return 0;
}
-static bool drm_client_target_preferred(struct drm_connector **connectors,
+static bool drm_client_target_preferred(struct drm_device *dev,
+ struct drm_connector **connectors,
unsigned int connector_count,
struct drm_display_mode **modes,
struct drm_client_offset *offsets,
@@ -423,17 +428,19 @@ retry:
* find the tile offsets for this pass - need to find
* all tiles left and above
*/
- drm_client_get_tile_offsets(connectors, connector_count, modes, offsets, i,
+ drm_client_get_tile_offsets(dev, connectors, connector_count,
+ modes, offsets, i,
connector->tile_h_loc, connector->tile_v_loc);
}
- DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
- connector->base.id);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for cmdline mode\n",
+ connector->base.id, connector->name);
/* got for command line mode first */
modes[i] = drm_connector_pick_cmdline_mode(connector);
if (!modes[i]) {
- DRM_DEBUG_KMS("looking for preferred mode on connector %d %d\n",
- connector->base.id, connector->tile_group ? connector->tile_group->id : 0);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for preferred mode, tile %d\n",
+ connector->base.id, connector->name,
+ connector->tile_group ? connector->tile_group->id : 0);
modes[i] = drm_connector_has_preferred_mode(connector, width, height);
}
/* No preferred modes, pick one off the list */
@@ -455,16 +462,18 @@ retry:
(connector->tile_h_loc == 0 &&
connector->tile_v_loc == 0 &&
!drm_connector_get_tiled_mode(connector))) {
- DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n",
- connector->base.id);
+ drm_dbg_kms(dev,
+ "[CONNECTOR:%d:%s] Falling back to non-tiled mode\n",
+ connector->base.id, connector->name);
modes[i] = drm_connector_fallback_non_tiled_mode(connector);
} else {
modes[i] = drm_connector_get_tiled_mode(connector);
}
}
- DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
- "none");
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Found mode %s\n",
+ connector->base.id, connector->name,
+ modes[i] ? modes[i]->name : "none");
conn_configured |= BIT_ULL(i);
}
@@ -585,7 +594,7 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
if (!drm_drv_uses_atomic_modeset(dev))
return false;
- if (WARN_ON(count <= 0))
+ if (drm_WARN_ON(dev, count <= 0))
return false;
save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
@@ -624,26 +633,26 @@ retry:
num_connectors_detected++;
if (!enabled[i]) {
- DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
- connector->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] not enabled, skipping\n",
+ connector->base.id, connector->name);
conn_configured |= BIT(i);
continue;
}
if (connector->force == DRM_FORCE_OFF) {
- DRM_DEBUG_KMS("connector %s is disabled by user, skipping\n",
- connector->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] disabled by user, skipping\n",
+ connector->base.id, connector->name);
enabled[i] = false;
continue;
}
encoder = connector->state->best_encoder;
- if (!encoder || WARN_ON(!connector->state->crtc)) {
+ if (!encoder || drm_WARN_ON(dev, !connector->state->crtc)) {
if (connector->force > DRM_FORCE_OFF)
goto bail;
- DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
- connector->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] has no encoder or crtc, skipping\n",
+ connector->base.id, connector->name);
enabled[i] = false;
conn_configured |= BIT(i);
continue;
@@ -660,28 +669,30 @@ retry:
*/
for (j = 0; j < count; j++) {
if (crtcs[j] == new_crtc) {
- DRM_DEBUG_KMS("fallback: cloned configuration\n");
+ drm_dbg_kms(dev, "fallback: cloned configuration\n");
goto bail;
}
}
- DRM_DEBUG_KMS("looking for cmdline mode on connector %s\n",
- connector->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for cmdline mode\n",
+ connector->base.id, connector->name);
/* go for command line mode first */
modes[i] = drm_connector_pick_cmdline_mode(connector);
/* try for preferred next */
if (!modes[i]) {
- DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n",
- connector->name, connector->has_tile);
+ drm_dbg_kms(dev,
+ "[CONNECTOR:%d:%s] looking for preferred mode, has tile: %s\n",
+ connector->base.id, connector->name,
+ str_yes_no(connector->has_tile));
modes[i] = drm_connector_has_preferred_mode(connector, width, height);
}
/* No preferred mode marked by the EDID? Are there any modes? */
if (!modes[i] && !list_empty(&connector->modes)) {
- DRM_DEBUG_KMS("using first mode listed on connector %s\n",
- connector->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] using first listed mode\n",
+ connector->base.id, connector->name);
modes[i] = list_first_entry(&connector->modes,
struct drm_display_mode,
head);
@@ -700,8 +711,8 @@ retry:
* This is crtc->mode and not crtc->state->mode for the
* fastboot check to work correctly.
*/
- DRM_DEBUG_KMS("looking for current mode on connector %s\n",
- connector->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for current mode\n",
+ connector->base.id, connector->name);
modes[i] = &connector->state->crtc->mode;
}
/*
@@ -710,18 +721,18 @@ retry:
*/
if (connector->has_tile &&
num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
- DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n",
- connector->base.id);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Falling back to non-tiled mode\n",
+ connector->base.id, connector->name);
modes[i] = drm_connector_fallback_non_tiled_mode(connector);
}
crtcs[i] = new_crtc;
- DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
- connector->name,
- connector->state->crtc->base.id,
- connector->state->crtc->name,
- modes[i]->hdisplay, modes[i]->vdisplay,
- modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" : "");
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] on [CRTC:%d:%s]: %dx%d%s\n",
+ connector->base.id, connector->name,
+ connector->state->crtc->base.id,
+ connector->state->crtc->name,
+ modes[i]->hdisplay, modes[i]->vdisplay,
+ modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" : "");
fallback = false;
conn_configured |= BIT(i);
@@ -737,15 +748,15 @@ retry:
*/
if (num_connectors_enabled != num_connectors_detected &&
num_connectors_enabled < dev->mode_config.num_crtc) {
- DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
- DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
- num_connectors_detected);
+ drm_dbg_kms(dev, "fallback: Not all outputs enabled\n");
+ drm_dbg_kms(dev, "Enabled: %i, detected: %i\n",
+ num_connectors_enabled, num_connectors_detected);
fallback = true;
}
if (fallback) {
bail:
- DRM_DEBUG_KMS("Not using firmware configuration\n");
+ drm_dbg_kms(dev, "Not using firmware configuration\n");
memcpy(enabled, save_enabled, count);
ret = false;
}
@@ -777,12 +788,13 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
unsigned int total_modes_count = 0;
struct drm_client_offset *offsets;
unsigned int connector_count = 0;
+ /* points to modes protected by mode_config.mutex */
struct drm_display_mode **modes;
struct drm_crtc **crtcs;
int i, ret = 0;
bool *enabled;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(dev, "\n");
if (!width)
width = dev->mode_config.max_width;
@@ -813,7 +825,6 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
offsets = kcalloc(connector_count, sizeof(*offsets), GFP_KERNEL);
enabled = kcalloc(connector_count, sizeof(bool), GFP_KERNEL);
if (!crtcs || !modes || !enabled || !offsets) {
- DRM_ERROR("Memory allocation failed\n");
ret = -ENOMEM;
goto out;
}
@@ -824,7 +835,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
for (i = 0; i < connector_count; i++)
total_modes_count += connectors[i]->funcs->fill_modes(connectors[i], width, height);
if (!total_modes_count)
- DRM_DEBUG_KMS("No connectors reported connected with modes\n");
+ drm_dbg_kms(dev, "No connectors reported connected with modes\n");
drm_client_connectors_enabled(connectors, connector_count, enabled);
if (!drm_client_firmware_config(client, connectors, connector_count, crtcs,
@@ -835,17 +846,16 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
if (!drm_client_target_cloned(dev, connectors, connector_count, modes,
offsets, enabled, width, height) &&
- !drm_client_target_preferred(connectors, connector_count, modes,
+ !drm_client_target_preferred(dev, connectors, connector_count, modes,
offsets, enabled, width, height))
- DRM_ERROR("Unable to find initial modes\n");
+ drm_err(dev, "Unable to find initial modes\n");
- DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n",
- width, height);
+ drm_dbg_kms(dev, "picking CRTCs for %dx%d config\n",
+ width, height);
drm_client_pick_crtcs(client, connectors, connector_count,
crtcs, modes, 0, width, height);
}
- mutex_unlock(&dev->mode_config.mutex);
drm_client_modeset_release(client);
@@ -858,11 +868,12 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
struct drm_mode_set *modeset = drm_client_find_modeset(client, crtc);
struct drm_connector *connector = connectors[i];
- DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n",
- mode->name, crtc->base.id, offset->x, offset->y);
+ drm_dbg_kms(dev, "[CRTC:%d:%s] desired mode %s set (%d,%d)\n",
+ crtc->base.id, crtc->name,
+ mode->name, offset->x, offset->y);
- if (WARN_ON_ONCE(modeset->num_connectors == DRM_CLIENT_MAX_CLONED_CONNECTORS ||
- (dev->mode_config.num_crtc > 1 && modeset->num_connectors == 1))) {
+ if (drm_WARN_ON_ONCE(dev, modeset->num_connectors == DRM_CLIENT_MAX_CLONED_CONNECTORS ||
+ (dev->mode_config.num_crtc > 1 && modeset->num_connectors == 1))) {
ret = -EINVAL;
break;
}
@@ -875,6 +886,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
modeset->y = offset->y;
}
}
+ mutex_unlock(&dev->mode_config.mutex);
mutex_unlock(&client->modeset_mutex);
out:
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index b0516505f7ae..4d2df7f64dc5 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -2940,7 +2940,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
dev->mode_config.max_width,
dev->mode_config.max_height);
else
- drm_dbg_kms(dev, "User-space requested a forced probe on [CONNECTOR:%d:%s] but is not the DRM master, demoting to read-only probe",
+ drm_dbg_kms(dev, "User-space requested a forced probe on [CONNECTOR:%d:%s] but is not the DRM master, demoting to read-only probe\n",
connector->base.id, connector->name);
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 82c665d3e74b..483969b84a30 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -716,10 +716,10 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
crtc = drm_crtc_find(dev, file_priv, crtc_req->crtc_id);
if (!crtc) {
- DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
+ drm_dbg_kms(dev, "Unknown CRTC ID %d\n", crtc_req->crtc_id);
return -ENOENT;
}
- DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
+ drm_dbg_kms(dev, "[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
plane = crtc->primary;
@@ -742,7 +742,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
old_fb = plane->fb;
if (!old_fb) {
- DRM_DEBUG_KMS("CRTC doesn't have current FB\n");
+ drm_dbg_kms(dev, "CRTC doesn't have current FB\n");
ret = -EINVAL;
goto out;
}
@@ -753,8 +753,8 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
} else {
fb = drm_framebuffer_lookup(dev, file_priv, crtc_req->fb_id);
if (!fb) {
- DRM_DEBUG_KMS("Unknown FB ID%d\n",
- crtc_req->fb_id);
+ drm_dbg_kms(dev, "Unknown FB ID%d\n",
+ crtc_req->fb_id);
ret = -ENOENT;
goto out;
}
@@ -767,7 +767,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
}
if (!file_priv->aspect_ratio_allowed &&
(crtc_req->mode.flags & DRM_MODE_FLAG_PIC_AR_MASK) != DRM_MODE_FLAG_PIC_AR_NONE) {
- DRM_DEBUG_KMS("Unexpected aspect-ratio flag bits\n");
+ drm_dbg_kms(dev, "Unexpected aspect-ratio flag bits\n");
ret = -EINVAL;
goto out;
}
@@ -775,9 +775,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
ret = drm_mode_convert_umode(dev, mode, &crtc_req->mode);
if (ret) {
- DRM_DEBUG_KMS("Invalid mode (ret=%d, status=%s)\n",
- ret, drm_get_mode_status_name(mode->status));
- drm_mode_debug_printmodeline(mode);
+ drm_dbg_kms(dev, "Invalid mode (%s, %pe): " DRM_MODE_FMT "\n",
+ drm_get_mode_status_name(mode->status),
+ ERR_PTR(ret), DRM_MODE_ARG(mode));
goto out;
}
@@ -793,9 +793,8 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
fb->format->format,
fb->modifier);
if (ret) {
- DRM_DEBUG_KMS("Invalid pixel format %p4cc, modifier 0x%llx\n",
- &fb->format->format,
- fb->modifier);
+ drm_dbg_kms(dev, "Invalid pixel format %p4cc, modifier 0x%llx\n",
+ &fb->format->format, fb->modifier);
goto out;
}
}
@@ -808,14 +807,14 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
}
if (crtc_req->count_connectors == 0 && mode) {
- DRM_DEBUG_KMS("Count connectors is 0 but mode set\n");
+ drm_dbg_kms(dev, "Count connectors is 0 but mode set\n");
ret = -EINVAL;
goto out;
}
if (crtc_req->count_connectors > 0 && (!mode || !fb)) {
- DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n",
- crtc_req->count_connectors);
+ drm_dbg_kms(dev, "Count connectors is %d but no mode or fb set\n",
+ crtc_req->count_connectors);
ret = -EINVAL;
goto out;
}
@@ -847,14 +846,13 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
connector = drm_connector_lookup(dev, file_priv, out_id);
if (!connector) {
- DRM_DEBUG_KMS("Connector id %d unknown\n",
- out_id);
+ drm_dbg_kms(dev, "Connector id %d unknown\n",
+ out_id);
ret = -ENOENT;
goto out;
}
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id,
- connector->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
connector_set[i] = connector;
num_connectors++;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 2dafc39a27cb..0955f1c385dd 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -110,15 +110,15 @@ bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
struct drm_connector_list_iter conn_iter;
struct drm_device *dev = encoder->dev;
- WARN_ON(drm_drv_uses_atomic_modeset(dev));
+ drm_WARN_ON(dev, drm_drv_uses_atomic_modeset(dev));
/*
* We can expect this mutex to be locked if we are not panicking.
* Locking is currently fubar in the panic handler.
*/
if (!oops_in_progress) {
- WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
+ drm_WARN_ON(dev, !drm_modeset_is_locked(&dev->mode_config.connection_mutex));
}
@@ -150,14 +150,14 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
struct drm_encoder *encoder;
struct drm_device *dev = crtc->dev;
- WARN_ON(drm_drv_uses_atomic_modeset(dev));
+ drm_WARN_ON(dev, drm_drv_uses_atomic_modeset(dev));
/*
* We can expect this mutex to be locked if we are not panicking.
* Locking is currently fubar in the panic handler.
*/
if (!oops_in_progress)
- WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
drm_for_each_encoder(encoder, dev)
if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
@@ -230,7 +230,7 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
*/
void drm_helper_disable_unused_functions(struct drm_device *dev)
{
- WARN_ON(drm_drv_uses_atomic_modeset(dev));
+ drm_WARN_ON(dev, drm_drv_uses_atomic_modeset(dev));
drm_modeset_lock_all(dev);
__drm_helper_disable_unused_functions(dev);
@@ -294,7 +294,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_encoder *encoder;
bool ret = true;
- WARN_ON(drm_drv_uses_atomic_modeset(dev));
+ drm_WARN_ON(dev, drm_drv_uses_atomic_modeset(dev));
drm_warn_on_modeset_not_all_locked(dev);
@@ -338,7 +338,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (encoder_funcs->mode_fixup) {
if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
adjusted_mode))) {
- DRM_DEBUG_KMS("Encoder fixup failed\n");
+ drm_dbg_kms(dev, "[ENCODER:%d:%s] mode fixup failed\n",
+ encoder->base.id, encoder->name);
goto done;
}
}
@@ -347,11 +348,12 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (crtc_funcs->mode_fixup) {
if (!(ret = crtc_funcs->mode_fixup(crtc, mode,
adjusted_mode))) {
- DRM_DEBUG_KMS("CRTC fixup failed\n");
+ drm_dbg_kms(dev, "[CRTC:%d:%s] mode fixup failed\n",
+ crtc->base.id, crtc->name);
goto done;
}
}
- DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
+ drm_dbg_kms(dev, "[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
drm_mode_copy(&crtc->hwmode, adjusted_mode);
@@ -390,8 +392,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (!encoder_funcs)
continue;
- DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%s]\n",
- encoder->base.id, encoder->name, mode->name);
+ drm_dbg_kms(dev, "[ENCODER:%d:%s] set [MODE:%s]\n",
+ encoder->base.id, encoder->name, mode->name);
if (encoder_funcs->mode_set)
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
}
@@ -503,7 +505,7 @@ drm_connector_get_single_encoder(struct drm_connector *connector)
{
struct drm_encoder *encoder;
- WARN_ON(hweight32(connector->possible_encoders) > 1);
+ drm_WARN_ON(connector->dev, hweight32(connector->possible_encoders) > 1);
drm_connector_for_each_possible_encoder(connector, encoder)
return encoder;
@@ -564,8 +566,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
int ret;
int i;
- DRM_DEBUG_KMS("\n");
-
BUG_ON(!set);
BUG_ON(!set->crtc);
BUG_ON(!set->crtc->helper_private);
@@ -577,19 +577,22 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
crtc_funcs = set->crtc->helper_private;
dev = set->crtc->dev;
- WARN_ON(drm_drv_uses_atomic_modeset(dev));
+
+ drm_dbg_kms(dev, "\n");
+
+ drm_WARN_ON(dev, drm_drv_uses_atomic_modeset(dev));
if (!set->mode)
set->fb = NULL;
if (set->fb) {
- DRM_DEBUG_KMS("[CRTC:%d:%s] [FB:%d] #connectors=%d (x y) (%i %i)\n",
- set->crtc->base.id, set->crtc->name,
- set->fb->base.id,
- (int)set->num_connectors, set->x, set->y);
+ drm_dbg_kms(dev, "[CRTC:%d:%s] [FB:%d] #connectors=%d (x y) (%i %i)\n",
+ set->crtc->base.id, set->crtc->name,
+ set->fb->base.id,
+ (int)set->num_connectors, set->x, set->y);
} else {
- DRM_DEBUG_KMS("[CRTC:%d:%s] [NOFB]\n",
- set->crtc->base.id, set->crtc->name);
+ drm_dbg_kms(dev, "[CRTC:%d:%s] [NOFB]\n",
+ set->crtc->base.id, set->crtc->name);
drm_crtc_helper_disable(set->crtc);
return 0;
}
@@ -639,7 +642,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
if (set->crtc->primary->fb != set->fb) {
/* If we have no fb then treat it as a full mode set */
if (set->crtc->primary->fb == NULL) {
- DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
+ drm_dbg_kms(dev, "[CRTC:%d:%s] no fb, full mode set\n",
+ set->crtc->base.id, set->crtc->name);
mode_changed = true;
} else if (set->fb->format != set->crtc->primary->fb->format) {
mode_changed = true;
@@ -651,9 +655,10 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
fb_changed = true;
if (!drm_mode_equal(set->mode, &set->crtc->mode)) {
- DRM_DEBUG_KMS("modes are different, full mode set\n");
- drm_mode_debug_printmodeline(&set->crtc->mode);
- drm_mode_debug_printmodeline(set->mode);
+ drm_dbg_kms(dev, "[CRTC:%d:%s] modes are different, full mode set:\n",
+ set->crtc->base.id, set->crtc->name);
+ drm_dbg_kms(dev, DRM_MODE_FMT "\n", DRM_MODE_ARG(&set->crtc->mode));
+ drm_dbg_kms(dev, DRM_MODE_FMT "\n", DRM_MODE_ARG(set->mode));
mode_changed = true;
}
@@ -687,7 +692,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
fail = 1;
if (connector->dpms != DRM_MODE_DPMS_ON) {
- DRM_DEBUG_KMS("connector dpms not on, full mode switch\n");
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] DPMS not on, full mode switch\n",
+ connector->base.id, connector->name);
mode_changed = true;
}
@@ -696,7 +702,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
}
if (new_encoder != connector->encoder) {
- DRM_DEBUG_KMS("encoder changed, full mode switch\n");
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] encoder changed, full mode switch\n",
+ connector->base.id, connector->name);
mode_changed = true;
/* If the encoder is reused for another connector, then
* the appropriate crtc will be set later.
@@ -737,17 +744,18 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
goto fail;
}
if (new_crtc != connector->encoder->crtc) {
- DRM_DEBUG_KMS("crtc changed, full mode switch\n");
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] CRTC changed, full mode switch\n",
+ connector->base.id, connector->name);
mode_changed = true;
connector->encoder->crtc = new_crtc;
}
if (new_crtc) {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d:%s]\n",
- connector->base.id, connector->name,
- new_crtc->base.id, new_crtc->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] to [CRTC:%d:%s]\n",
+ connector->base.id, connector->name,
+ new_crtc->base.id, new_crtc->name);
} else {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] to [NOCRTC]\n",
+ connector->base.id, connector->name);
}
}
drm_connector_list_iter_end(&conn_iter);
@@ -758,23 +766,23 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
if (mode_changed) {
if (drm_helper_crtc_in_use(set->crtc)) {
- DRM_DEBUG_KMS("attempting to set mode from"
- " userspace\n");
- drm_mode_debug_printmodeline(set->mode);
+ drm_dbg_kms(dev, "[CRTC:%d:%s] attempting to set mode from userspace: " DRM_MODE_FMT "\n",
+ set->crtc->base.id, set->crtc->name, DRM_MODE_ARG(set->mode));
set->crtc->primary->fb = set->fb;
if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
set->x, set->y,
save_set.fb)) {
- DRM_ERROR("failed to set mode on [CRTC:%d:%s]\n",
- set->crtc->base.id, set->crtc->name);
+ drm_err(dev, "[CRTC:%d:%s] failed to set mode\n",
+ set->crtc->base.id, set->crtc->name);
set->crtc->primary->fb = save_set.fb;
ret = -EINVAL;
goto fail;
}
- DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
+ drm_dbg_kms(dev, "[CRTC:%d:%s] Setting connector DPMS state to on\n",
+ set->crtc->base.id, set->crtc->name);
for (i = 0; i < set->num_connectors; i++) {
- DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
- set->connectors[i]->name);
+ drm_dbg_kms(dev, "\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
+ set->connectors[i]->name);
set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
}
}
@@ -823,7 +831,7 @@ fail:
if (mode_changed &&
!drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x,
save_set.y, save_set.fb))
- DRM_ERROR("failed to restore config after modeset failure\n");
+ drm_err(dev, "failed to restore config after modeset failure\n");
kfree(save_connector_encoders);
kfree(save_encoder_crtcs);
@@ -905,7 +913,7 @@ int drm_helper_connector_dpms(struct drm_connector *connector, int mode)
struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF;
- WARN_ON(drm_drv_uses_atomic_modeset(connector->dev));
+ drm_WARN_ON(connector->dev, drm_drv_uses_atomic_modeset(connector->dev));
if (mode == connector->dpms)
return 0;
@@ -980,7 +988,7 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
int encoder_dpms;
bool ret;
- WARN_ON(drm_drv_uses_atomic_modeset(dev));
+ drm_WARN_ON(dev, drm_drv_uses_atomic_modeset(dev));
drm_modeset_lock_all(dev);
drm_for_each_crtc(crtc, dev) {
@@ -993,7 +1001,7 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
/* Restoring the old config should never fail! */
if (ret == false)
- DRM_ERROR("failed to set mode on crtc %p\n", crtc);
+ drm_err(dev, "failed to set mode on crtc %p\n", crtc);
/* Turn off outputs that were already powered off */
if (drm_helper_choose_crtc_dpms(crtc)) {
diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h
index 28e04e750130..8059f65c5d6c 100644
--- a/drivers/gpu/drm/drm_crtc_helper_internal.h
+++ b/drivers/gpu/drm/drm_crtc_helper_internal.h
@@ -26,10 +26,15 @@
* implementation details and are not exported to drivers.
*/
-#include <drm/drm_connector.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_encoder.h>
-#include <drm/drm_modes.h>
+#ifndef __DRM_CRTC_HELPER_INTERNAL_H__
+#define __DRM_CRTC_HELPER_INTERNAL_H__
+
+enum drm_mode_status;
+struct drm_connector;
+struct drm_crtc;
+struct drm_display_mode;
+struct drm_encoder;
+struct drm_modeset_acquire_ctx;
/* drm_probe_helper.c */
enum drm_mode_status drm_crtc_mode_valid(struct drm_crtc *crtc,
@@ -44,3 +49,5 @@ drm_connector_mode_valid(struct drm_connector *connector,
struct drm_encoder *
drm_connector_get_single_encoder(struct drm_connector *connector);
+
+#endif /* __DRM_CRTC_HELPER_INTERNAL_H__ */
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index a514d5207e41..25aaae937ceb 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -32,6 +32,10 @@
* and are not exported to drivers.
*/
+#ifndef __DRM_CRTC_INTERNAL_H__
+#define __DRM_CRTC_INTERNAL_H__
+
+#include <linux/err.h>
#include <linux/types.h>
enum drm_color_encoding;
@@ -39,12 +43,14 @@ enum drm_color_range;
enum drm_connector_force;
enum drm_mode_status;
+struct cea_sad;
struct drm_atomic_state;
struct drm_bridge;
struct drm_connector;
struct drm_crtc;
struct drm_device;
struct drm_display_mode;
+struct drm_edid;
struct drm_file;
struct drm_framebuffer;
struct drm_mode_create_dumb;
@@ -54,6 +60,7 @@ struct drm_mode_object;
struct drm_mode_set;
struct drm_plane;
struct drm_plane_state;
+struct drm_printer;
struct drm_property;
struct edid;
struct fwnode_handle;
@@ -292,6 +299,10 @@ void drm_mode_fixup_1366x768(struct drm_display_mode *mode);
int drm_edid_override_show(struct drm_connector *connector, struct seq_file *m);
int drm_edid_override_set(struct drm_connector *connector, const void *edid, size_t size);
int drm_edid_override_reset(struct drm_connector *connector);
+const u8 *drm_edid_find_extension(const struct drm_edid *drm_edid,
+ int ext_id, int *ext_index);
+void drm_edid_cta_sad_get(const struct cea_sad *cta_sad, u8 *sad);
+void drm_edid_cta_sad_set(struct cea_sad *cta_sad, const u8 *sad);
/* drm_edid_load.c */
#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
@@ -303,3 +314,5 @@ drm_edid_load_firmware(struct drm_connector *connector)
return ERR_PTR(-ENOENT);
}
#endif
+
+#endif /* __DRM_CRTC_INTERNAL_H__ */
diff --git a/drivers/gpu/drm/drm_displayid.c b/drivers/gpu/drm/drm_displayid.c
index 9edc111be7ee..9d01d762801f 100644
--- a/drivers/gpu/drm/drm_displayid.c
+++ b/drivers/gpu/drm/drm_displayid.c
@@ -3,10 +3,12 @@
* Copyright © 2021 Intel Corporation
*/
-#include <drm/drm_displayid.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
+#include "drm_crtc_internal.h"
+#include "drm_displayid_internal.h"
+
static const struct displayid_header *
displayid_get_header(const u8 *displayid, int length, int index)
{
@@ -53,9 +55,10 @@ static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid,
int *length, int *idx,
int *ext_index)
{
- const u8 *displayid = drm_find_edid_extension(drm_edid, DISPLAYID_EXT, ext_index);
const struct displayid_header *base;
+ const u8 *displayid;
+ displayid = drm_edid_find_extension(drm_edid, DISPLAYID_EXT, ext_index);
if (!displayid)
return NULL;
diff --git a/include/drm/drm_displayid.h b/drivers/gpu/drm/drm_displayid_internal.h
index 566497eeb3b8..aee1b86a73c1 100644
--- a/include/drm/drm_displayid.h
+++ b/drivers/gpu/drm/drm_displayid_internal.h
@@ -19,8 +19,9 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#ifndef DRM_DISPLAYID_H
-#define DRM_DISPLAYID_H
+
+#ifndef __DRM_DISPLAYID_INTERNAL_H__
+#define __DRM_DISPLAYID_INTERNAL_H__
#include <linux/types.h>
#include <linux/bits.h>
@@ -30,7 +31,6 @@ struct drm_edid;
#define VESA_IEEE_OUI 0x3a0292
/* DisplayID Structure versions */
-#define DISPLAY_ID_STRUCTURE_VER_12 0x12
#define DISPLAY_ID_STRUCTURE_VER_20 0x20
/* DisplayID Structure v1r2 Data Blocks */
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 243cacb3575c..535b624d4c9d 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -43,6 +43,7 @@
#include <drm/drm_file.h>
#include <drm/drm_managed.h>
#include <drm/drm_mode_object.h>
+#include <drm/drm_panic.h>
#include <drm/drm_print.h>
#include <drm/drm_privacy_screen_machine.h>
@@ -638,6 +639,7 @@ static int drm_dev_init(struct drm_device *dev,
mutex_init(&dev->filelist_mutex);
mutex_init(&dev->clientlist_mutex);
mutex_init(&dev->master_mutex);
+ raw_spin_lock_init(&dev->mode_config.panic_lock);
ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL);
if (ret)
@@ -943,6 +945,7 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
if (ret)
goto err_unload;
}
+ drm_panic_register(dev);
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver->name, driver->major, driver->minor,
@@ -987,6 +990,8 @@ void drm_dev_unregister(struct drm_device *dev)
{
dev->registered = false;
+ drm_panic_unregister(dev);
+
drm_client_dev_unregister(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET))
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 923c4423151c..4f54c91b31b2 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -29,16 +29,17 @@
*/
#include <linux/bitfield.h>
+#include <linux/byteorder/generic.h>
#include <linux/cec.h>
#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/seq_buf.h>
#include <linux/slab.h>
#include <linux/vga_switcheroo.h>
-#include <drm/drm_displayid.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
@@ -46,6 +47,7 @@
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
+#include "drm_displayid_internal.h"
#include "drm_internal.h"
static int oui(u8 first, u8 second, u8 third)
@@ -102,6 +104,11 @@ struct detailed_mode_closure {
int modes;
};
+struct drm_edid_match_closure {
+ const struct drm_edid_ident *ident;
+ bool matched;
+};
+
#define LEVEL_DMT 0
#define LEVEL_GTF 1
#define LEVEL_GTF2 2
@@ -109,13 +116,15 @@ struct detailed_mode_closure {
#define EDID_QUIRK(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _quirks) \
{ \
- .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
- product_id), \
+ .ident = { \
+ .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, \
+ vend_chr_2, product_id), \
+ }, \
.quirks = _quirks \
}
static const struct edid_quirk {
- u32 panel_id;
+ const struct drm_edid_ident ident;
u32 quirks;
} edid_quirk_list[] = {
/* Acer AL1706 */
@@ -1811,36 +1820,25 @@ static bool edid_block_is_zero(const void *edid)
return !memchr_inv(edid, 0, EDID_LENGTH);
}
-/**
- * drm_edid_are_equal - compare two edid blobs.
- * @edid1: pointer to first blob
- * @edid2: pointer to second blob
- * This helper can be used during probing to determine if
- * edid had changed.
- */
-bool drm_edid_are_equal(const struct edid *edid1, const struct edid *edid2)
+static bool drm_edid_eq(const struct drm_edid *drm_edid,
+ const void *raw_edid, size_t raw_edid_size)
{
- int edid1_len, edid2_len;
- bool edid1_present = edid1 != NULL;
- bool edid2_present = edid2 != NULL;
+ bool edid1_present = drm_edid && drm_edid->edid && drm_edid->size;
+ bool edid2_present = raw_edid && raw_edid_size;
if (edid1_present != edid2_present)
return false;
- if (edid1) {
- edid1_len = edid_size(edid1);
- edid2_len = edid_size(edid2);
-
- if (edid1_len != edid2_len)
+ if (edid1_present) {
+ if (drm_edid->size != raw_edid_size)
return false;
- if (memcmp(edid1, edid2, edid1_len))
+ if (memcmp(drm_edid->edid, raw_edid, drm_edid->size))
return false;
}
return true;
}
-EXPORT_SYMBOL(drm_edid_are_equal);
enum edid_block_status {
EDID_BLOCK_OK = 0,
@@ -2749,8 +2747,84 @@ const struct drm_edid *drm_edid_read(struct drm_connector *connector)
}
EXPORT_SYMBOL(drm_edid_read);
-static u32 edid_extract_panel_id(const struct edid *edid)
+/**
+ * drm_edid_get_product_id - Get the vendor and product identification
+ * @drm_edid: EDID
+ * @id: Where to place the product id
+ */
+void drm_edid_get_product_id(const struct drm_edid *drm_edid,
+ struct drm_edid_product_id *id)
+{
+ if (drm_edid && drm_edid->edid && drm_edid->size >= EDID_LENGTH)
+ memcpy(id, &drm_edid->edid->product_id, sizeof(*id));
+ else
+ memset(id, 0, sizeof(*id));
+}
+EXPORT_SYMBOL(drm_edid_get_product_id);
+
+static void decode_date(struct seq_buf *s, const struct drm_edid_product_id *id)
+{
+ int week = id->week_of_manufacture;
+ int year = id->year_of_manufacture + 1990;
+
+ if (week == 0xff)
+ seq_buf_printf(s, "model year: %d", year);
+ else if (!week)
+ seq_buf_printf(s, "year of manufacture: %d", year);
+ else
+ seq_buf_printf(s, "week/year of manufacture: %d/%d", week, year);
+}
+
+/**
+ * drm_edid_print_product_id - Print decoded product id to printer
+ * @p: drm printer
+ * @id: EDID product id
+ * @raw: If true, also print the raw hex
+ *
+ * See VESA E-EDID 1.4 section 3.4.
+ */
+void drm_edid_print_product_id(struct drm_printer *p,
+ const struct drm_edid_product_id *id, bool raw)
+{
+ DECLARE_SEQ_BUF(date, 40);
+ char vend[4];
+
+ drm_edid_decode_mfg_id(be16_to_cpu(id->manufacturer_name), vend);
+
+ decode_date(&date, id);
+
+ drm_printf(p, "manufacturer name: %s, product code: %u, serial number: %u, %s\n",
+ vend, le16_to_cpu(id->product_code),
+ le32_to_cpu(id->serial_number), seq_buf_str(&date));
+
+ if (raw)
+ drm_printf(p, "raw product id: %*ph\n", (int)sizeof(*id), id);
+
+ WARN_ON(seq_buf_has_overflowed(&date));
+}
+EXPORT_SYMBOL(drm_edid_print_product_id);
+
+/**
+ * drm_edid_get_panel_id - Get a panel's ID from EDID
+ * @drm_edid: EDID that contains panel ID.
+ *
+ * This function uses the first block of the EDID of a panel and (assuming
+ * that the EDID is valid) extracts the ID out of it. The ID is a 32-bit value
+ * (16 bits of manufacturer ID and 16 bits of per-manufacturer ID) that's
+ * supposed to be different for each different modem of panel.
+ *
+ * Return: A 32-bit ID that should be different for each make/model of panel.
+ * See the functions drm_edid_encode_panel_id() and
+ * drm_edid_decode_panel_id() for some details on the structure of this
+ * ID. Return 0 if the EDID size is less than a base block.
+ */
+u32 drm_edid_get_panel_id(const struct drm_edid *drm_edid)
{
+ const struct edid *edid = drm_edid->edid;
+
+ if (drm_edid->size < EDID_LENGTH)
+ return 0;
+
/*
* We represent the ID as a 32-bit number so it can easily be compared
* with "==".
@@ -2768,60 +2842,54 @@ static u32 edid_extract_panel_id(const struct edid *edid)
(u32)edid->mfg_id[1] << 16 |
(u32)EDID_PRODUCT_ID(edid);
}
+EXPORT_SYMBOL(drm_edid_get_panel_id);
/**
- * drm_edid_get_panel_id - Get a panel's ID through DDC
+ * drm_edid_read_base_block - Get a panel's EDID base block
* @adapter: I2C adapter to use for DDC
*
- * This function reads the first block of the EDID of a panel and (assuming
- * that the EDID is valid) extracts the ID out of it. The ID is a 32-bit value
- * (16 bits of manufacturer ID and 16 bits of per-manufacturer ID) that's
- * supposed to be different for each different modem of panel.
+ * This function returns the drm_edid containing the first block of the EDID of
+ * a panel.
*
* This function is intended to be used during early probing on devices where
* more than one panel might be present. Because of its intended use it must
- * assume that the EDID of the panel is correct, at least as far as the ID
- * is concerned (in other words, we don't process any overrides here).
+ * assume that the EDID of the panel is correct, at least as far as the base
+ * block is concerned (in other words, we don't process any overrides here).
+ *
+ * Caller should call drm_edid_free() after use.
*
* NOTE: it's expected that this function and drm_do_get_edid() will both
* be read the EDID, but there is no caching between them. Since we're only
* reading the first block, hopefully this extra overhead won't be too big.
*
- * Return: A 32-bit ID that should be different for each make/model of panel.
- * See the functions drm_edid_encode_panel_id() and
- * drm_edid_decode_panel_id() for some details on the structure of this
- * ID.
+ * WARNING: Only use this function when the connector is unknown. For example,
+ * during the early probe of panel. The EDID read from the function is temporary
+ * and should be replaced by the full EDID returned from other drm_edid_read.
+ *
+ * Return: Pointer to allocated EDID base block, or NULL on any failure.
*/
-
-u32 drm_edid_get_panel_id(struct i2c_adapter *adapter)
+const struct drm_edid *drm_edid_read_base_block(struct i2c_adapter *adapter)
{
enum edid_block_status status;
void *base_block;
- u32 panel_id = 0;
-
- /*
- * There are no manufacturer IDs of 0, so if there is a problem reading
- * the EDID then we'll just return 0.
- */
base_block = kzalloc(EDID_LENGTH, GFP_KERNEL);
if (!base_block)
- return 0;
+ return NULL;
status = edid_block_read(base_block, 0, drm_do_probe_ddc_edid, adapter);
edid_block_status_print(status, base_block, 0);
- if (edid_block_status_valid(status, edid_block_tag(base_block)))
- panel_id = edid_extract_panel_id(base_block);
- else
+ if (!edid_block_status_valid(status, edid_block_tag(base_block))) {
edid_block_dump(KERN_NOTICE, base_block, 0);
+ kfree(base_block);
+ return NULL;
+ }
- kfree(base_block);
-
- return panel_id;
+ return _drm_edid_alloc(base_block, EDID_LENGTH);
}
-EXPORT_SYMBOL(drm_edid_get_panel_id);
+EXPORT_SYMBOL(drm_edid_read_base_block);
/**
* drm_get_edid_switcheroo - get EDID data for a vga_switcheroo output
@@ -2903,16 +2971,17 @@ EXPORT_SYMBOL(drm_edid_duplicate);
* @drm_edid: EDID to process
*
* This tells subsequent routines what fixes they need to apply.
+ *
+ * Return: A u32 represents the quirks to apply.
*/
static u32 edid_get_quirks(const struct drm_edid *drm_edid)
{
- u32 panel_id = edid_extract_panel_id(drm_edid->edid);
const struct edid_quirk *quirk;
int i;
for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
quirk = &edid_quirk_list[i];
- if (quirk->panel_id == panel_id)
+ if (drm_edid_match(drm_edid, &quirk->ident))
return quirk->quirks;
}
@@ -4120,7 +4189,7 @@ static int add_detailed_modes(struct drm_connector *connector,
*
* FIXME: Prefer not returning pointers to raw EDID data.
*/
-const u8 *drm_find_edid_extension(const struct drm_edid *drm_edid,
+const u8 *drm_edid_find_extension(const struct drm_edid *drm_edid,
int ext_id, int *ext_index)
{
const u8 *edid_ext = NULL;
@@ -4150,11 +4219,21 @@ static bool drm_edid_has_cta_extension(const struct drm_edid *drm_edid)
{
const struct displayid_block *block;
struct displayid_iter iter;
- int ext_index = 0;
+ struct drm_edid_iter edid_iter;
+ const u8 *ext;
bool found = false;
/* Look for a top level CEA extension block */
- if (drm_find_edid_extension(drm_edid, CEA_EXT, &ext_index))
+ drm_edid_iter_begin(drm_edid, &edid_iter);
+ drm_edid_iter_for_each(ext, &edid_iter) {
+ if (ext[0] == CEA_EXT) {
+ found = true;
+ break;
+ }
+ }
+ drm_edid_iter_end(&edid_iter);
+
+ if (found)
return true;
/* CEA blocks can also be found embedded in a DisplayID block */
@@ -5443,6 +5522,66 @@ drm_parse_hdmi_vsdb_audio(struct drm_connector *connector, const u8 *db)
}
static void
+match_identity(const struct detailed_timing *timing, void *data)
+{
+ struct drm_edid_match_closure *closure = data;
+ unsigned int i;
+ const char *name = closure->ident->name;
+ unsigned int name_len = strlen(name);
+ const char *desc = timing->data.other_data.data.str.str;
+ unsigned int desc_len = ARRAY_SIZE(timing->data.other_data.data.str.str);
+
+ if (name_len > desc_len ||
+ !(is_display_descriptor(timing, EDID_DETAIL_MONITOR_NAME) ||
+ is_display_descriptor(timing, EDID_DETAIL_MONITOR_STRING)))
+ return;
+
+ if (strncmp(name, desc, name_len))
+ return;
+
+ for (i = name_len; i < desc_len; i++) {
+ if (desc[i] == '\n')
+ break;
+ /* Allow white space before EDID string terminator. */
+ if (!isspace(desc[i]))
+ return;
+ }
+
+ closure->matched = true;
+}
+
+/**
+ * drm_edid_match - match drm_edid with given identity
+ * @drm_edid: EDID
+ * @ident: the EDID identity to match with
+ *
+ * Check if the EDID matches with the given identity.
+ *
+ * Return: True if the given identity matched with EDID, false otherwise.
+ */
+bool drm_edid_match(const struct drm_edid *drm_edid,
+ const struct drm_edid_ident *ident)
+{
+ if (!drm_edid || drm_edid_get_panel_id(drm_edid) != ident->panel_id)
+ return false;
+
+ /* Match with name only if it's not NULL. */
+ if (ident->name) {
+ struct drm_edid_match_closure closure = {
+ .ident = ident,
+ .matched = false,
+ };
+
+ drm_for_each_detailed_block(drm_edid, match_identity, &closure);
+
+ return closure.matched;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(drm_edid_match);
+
+static void
monitor_name(const struct detailed_timing *timing, void *data)
{
const char **res = data;
@@ -6787,15 +6926,14 @@ static int _drm_edid_connector_property_update(struct drm_connector *connector,
int ret;
if (connector->edid_blob_ptr) {
- const struct edid *old_edid = connector->edid_blob_ptr->data;
-
- if (old_edid) {
- if (!drm_edid_are_equal(drm_edid ? drm_edid->edid : NULL, old_edid)) {
- connector->epoch_counter++;
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] EDID changed, epoch counter %llu\n",
- connector->base.id, connector->name,
- connector->epoch_counter);
- }
+ const void *old_edid = connector->edid_blob_ptr->data;
+ size_t old_edid_size = connector->edid_blob_ptr->length;
+
+ if (old_edid && !drm_edid_eq(drm_edid, old_edid, old_edid_size)) {
+ connector->epoch_counter++;
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] EDID changed, epoch counter %llu\n",
+ connector->base.id, connector->name,
+ connector->epoch_counter);
}
}
@@ -7324,7 +7462,7 @@ static void drm_parse_tiled_block(struct drm_connector *connector,
static bool displayid_is_tiled_block(const struct displayid_iter *iter,
const struct displayid_block *block)
{
- return (displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_12 &&
+ return (displayid_version(iter) < DISPLAY_ID_STRUCTURE_VER_20 &&
block->tag == DATA_BLOCK_TILED_DISPLAY) ||
(displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_20 &&
block->tag == DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY);
diff --git a/drivers/gpu/drm/drm_eld.c b/drivers/gpu/drm/drm_eld.c
index 5177991aa272..c0428d07de53 100644
--- a/drivers/gpu/drm/drm_eld.c
+++ b/drivers/gpu/drm/drm_eld.c
@@ -3,10 +3,12 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/export.h>
+
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
-#include "drm_internal.h"
+#include "drm_crtc_internal.h"
/**
* drm_eld_sad_get - get SAD from ELD to struct cea_sad
diff --git a/drivers/gpu/drm/drm_fb_dma_helper.c b/drivers/gpu/drm/drm_fb_dma_helper.c
index 3b535ad1b07c..e1d61a65210b 100644
--- a/drivers/gpu/drm/drm_fb_dma_helper.c
+++ b/drivers/gpu/drm/drm_fb_dma_helper.c
@@ -15,6 +15,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_panic.h>
#include <drm/drm_plane.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
@@ -148,3 +149,47 @@ void drm_fb_dma_sync_non_coherent(struct drm_device *drm,
}
}
EXPORT_SYMBOL_GPL(drm_fb_dma_sync_non_coherent);
+
+/**
+ * drm_fb_dma_get_scanout_buffer - Provide a scanout buffer in case of panic
+ * @plane: DRM primary plane
+ * @sb: scanout buffer for the panic handler
+ * Returns: 0 or negative error code
+ *
+ * Generic get_scanout_buffer() implementation, for drivers that uses the
+ * drm_fb_dma_helper. It won't call vmap in the panic context, so the driver
+ * should make sure the primary plane is vmapped, otherwise the panic screen
+ * won't get displayed.
+ */
+int drm_fb_dma_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct drm_gem_dma_object *dma_obj;
+ struct drm_framebuffer *fb;
+
+ if (!plane->state || !plane->state->fb)
+ return -EINVAL;
+
+ fb = plane->state->fb;
+ /* Only support linear modifier */
+ if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
+ return -ENODEV;
+
+ dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
+
+ /* Buffer should be accessible from the CPU */
+ if (dma_obj->base.import_attach)
+ return -ENODEV;
+
+ /* Buffer should be already mapped to CPU */
+ if (!dma_obj->vaddr)
+ return -ENODEV;
+
+ iosys_map_set_vaddr(&sb->map[0], dma_obj->vaddr);
+ sb->format = fb->format;
+ sb->height = fb->height;
+ sb->width = fb->width;
+ sb->pitch[0] = fb->pitches[0];
+ return 0;
+}
+EXPORT_SYMBOL(drm_fb_dma_get_scanout_buffer);
diff --git a/drivers/gpu/drm/drm_fbdev_generic.c b/drivers/gpu/drm/drm_fbdev_generic.c
index d647d89764cb..97e579c33d84 100644
--- a/drivers/gpu/drm/drm_fbdev_generic.c
+++ b/drivers/gpu/drm/drm_fbdev_generic.c
@@ -113,7 +113,6 @@ static int drm_fbdev_generic_helper_fb_probe(struct drm_fb_helper *fb_helper,
/* screen */
info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
info->screen_buffer = screen_buffer;
- info->fix.smem_start = page_to_phys(vmalloc_to_page(info->screen_buffer));
info->fix.smem_len = screen_size;
/* deferred I/O */
@@ -197,14 +196,14 @@ static int drm_fbdev_generic_damage_blit(struct drm_fb_helper *fb_helper,
*/
mutex_lock(&fb_helper->lock);
- ret = drm_client_buffer_vmap(buffer, &map);
+ ret = drm_client_buffer_vmap_local(buffer, &map);
if (ret)
goto out;
dst = map;
drm_fbdev_generic_damage_blit_real(fb_helper, clip, &dst);
- drm_client_buffer_vunmap(buffer);
+ drm_client_buffer_vunmap_local(buffer);
out:
mutex_unlock(&fb_helper->lock);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 44a948b80ee1..d4bbc5d109c8 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1161,7 +1161,7 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
obj->funcs->print_info(p, indent, obj);
}
-int drm_gem_pin(struct drm_gem_object *obj)
+int drm_gem_pin_locked(struct drm_gem_object *obj)
{
if (obj->funcs->pin)
return obj->funcs->pin(obj);
@@ -1169,12 +1169,30 @@ int drm_gem_pin(struct drm_gem_object *obj)
return 0;
}
-void drm_gem_unpin(struct drm_gem_object *obj)
+void drm_gem_unpin_locked(struct drm_gem_object *obj)
{
if (obj->funcs->unpin)
obj->funcs->unpin(obj);
}
+int drm_gem_pin(struct drm_gem_object *obj)
+{
+ int ret;
+
+ dma_resv_lock(obj->resv, NULL);
+ ret = drm_gem_pin_locked(obj);
+ dma_resv_unlock(obj->resv);
+
+ return ret;
+}
+
+void drm_gem_unpin(struct drm_gem_object *obj)
+{
+ dma_resv_lock(obj->resv, NULL);
+ drm_gem_unpin_locked(obj);
+ dma_resv_unlock(obj->resv);
+}
+
int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
@@ -1209,6 +1227,18 @@ void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
}
EXPORT_SYMBOL(drm_gem_vunmap);
+void drm_gem_lock(struct drm_gem_object *obj)
+{
+ dma_resv_lock(obj->resv, NULL);
+}
+EXPORT_SYMBOL(drm_gem_lock);
+
+void drm_gem_unlock(struct drm_gem_object *obj)
+{
+ dma_resv_unlock(obj->resv);
+}
+EXPORT_SYMBOL(drm_gem_unlock);
+
int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index e440f458b663..93337543aac3 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -224,8 +224,8 @@ __drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane,
__drm_atomic_helper_plane_duplicate_state(plane, &new_shadow_plane_state->base);
- drm_format_conv_state_copy(&shadow_plane_state->fmtcnv_state,
- &new_shadow_plane_state->fmtcnv_state);
+ drm_format_conv_state_copy(&new_shadow_plane_state->fmtcnv_state,
+ &shadow_plane_state->fmtcnv_state);
}
EXPORT_SYMBOL(__drm_gem_duplicate_shadow_plane_state);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index e435f986cd13..177773bcdbfd 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -10,7 +10,6 @@
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/module.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
@@ -228,7 +227,7 @@ void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
}
EXPORT_SYMBOL(drm_gem_shmem_put_pages);
-static int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
+int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
{
int ret;
@@ -238,13 +237,15 @@ static int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
return ret;
}
+EXPORT_SYMBOL(drm_gem_shmem_pin_locked);
-static void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
{
dma_resv_assert_held(shmem->base.resv);
drm_gem_shmem_put_pages(shmem);
}
+EXPORT_SYMBOL(drm_gem_shmem_unpin_locked);
/**
* drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 1ac284a9e8ee..6027584406af 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -282,6 +282,8 @@ static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo,
struct ttm_operation_ctx ctx = { false, false };
int ret;
+ dma_resv_assert_held(gbo->bo.base.resv);
+
if (gbo->bo.pin_count)
goto out;
@@ -337,6 +339,8 @@ EXPORT_SYMBOL(drm_gem_vram_pin);
static void drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
{
+ dma_resv_assert_held(gbo->bo.base.resv);
+
ttm_bo_unpin(&gbo->bo);
}
@@ -363,11 +367,28 @@ int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
}
EXPORT_SYMBOL(drm_gem_vram_unpin);
-static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
- struct iosys_map *map)
+/**
+ * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address
+ * space
+ * @gbo: The GEM VRAM object to map
+ * @map: Returns the kernel virtual address of the VRAM GEM object's backing
+ * store.
+ *
+ * The vmap function pins a GEM VRAM object to its current location, either
+ * system or video memory, and maps its buffer into kernel address space.
+ * As pinned object cannot be relocated, you should avoid pinning objects
+ * permanently. Call drm_gem_vram_vunmap() with the returned address to
+ * unmap and unpin the GEM VRAM object.
+ *
+ * Returns:
+ * 0 on success, or a negative error code otherwise.
+ */
+int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct iosys_map *map)
{
int ret;
+ dma_resv_assert_held(gbo->bo.base.resv);
+
if (gbo->vmap_use_count > 0)
goto out;
@@ -388,12 +409,23 @@ out:
return 0;
}
+EXPORT_SYMBOL(drm_gem_vram_vmap);
-static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo,
- struct iosys_map *map)
+/**
+ * drm_gem_vram_vunmap() - Unmaps and unpins a GEM VRAM object
+ * @gbo: The GEM VRAM object to unmap
+ * @map: Kernel virtual address where the VRAM GEM object was mapped
+ *
+ * A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See
+ * the documentation for drm_gem_vram_vmap() for more information.
+ */
+void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo,
+ struct iosys_map *map)
{
struct drm_device *dev = gbo->bo.base.dev;
+ dma_resv_assert_held(gbo->bo.base.resv);
+
if (drm_WARN_ON_ONCE(dev, !gbo->vmap_use_count))
return;
@@ -410,60 +442,6 @@ static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo,
* from memory. See drm_gem_vram_bo_driver_move_notify().
*/
}
-
-/**
- * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address
- * space
- * @gbo: The GEM VRAM object to map
- * @map: Returns the kernel virtual address of the VRAM GEM object's backing
- * store.
- *
- * The vmap function pins a GEM VRAM object to its current location, either
- * system or video memory, and maps its buffer into kernel address space.
- * As pinned object cannot be relocated, you should avoid pinning objects
- * permanently. Call drm_gem_vram_vunmap() with the returned address to
- * unmap and unpin the GEM VRAM object.
- *
- * Returns:
- * 0 on success, or a negative error code otherwise.
- */
-int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct iosys_map *map)
-{
- int ret;
-
- dma_resv_assert_held(gbo->bo.base.resv);
-
- ret = drm_gem_vram_pin_locked(gbo, 0);
- if (ret)
- return ret;
- ret = drm_gem_vram_kmap_locked(gbo, map);
- if (ret)
- goto err_drm_gem_vram_unpin_locked;
-
- return 0;
-
-err_drm_gem_vram_unpin_locked:
- drm_gem_vram_unpin_locked(gbo);
- return ret;
-}
-EXPORT_SYMBOL(drm_gem_vram_vmap);
-
-/**
- * drm_gem_vram_vunmap() - Unmaps and unpins a GEM VRAM object
- * @gbo: The GEM VRAM object to unmap
- * @map: Kernel virtual address where the VRAM GEM object was mapped
- *
- * A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See
- * the documentation for drm_gem_vram_vmap() for more information.
- */
-void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo,
- struct iosys_map *map)
-{
- dma_resv_assert_held(gbo->bo.base.resv);
-
- drm_gem_vram_kunmap_locked(gbo, map);
- drm_gem_vram_unpin_locked(gbo);
-}
EXPORT_SYMBOL(drm_gem_vram_vunmap);
/**
@@ -768,7 +746,8 @@ static int drm_gem_vram_object_pin(struct drm_gem_object *gem)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
- /* Fbdev console emulation is the use case of these PRIME
+ /*
+ * Fbdev console emulation is the use case of these PRIME
* helpers. This may involve updating a hardware buffer from
* a shadow FB. We pin the buffer to it's current location
* (either video RAM or system memory) to prevent it from
@@ -776,7 +755,7 @@ static int drm_gem_vram_object_pin(struct drm_gem_object *gem)
* the buffer to be pinned to VRAM, implement a callback that
* sets the flags accordingly.
*/
- return drm_gem_vram_pin(gbo, 0);
+ return drm_gem_vram_pin_locked(gbo, 0);
}
/**
@@ -787,7 +766,7 @@ static void drm_gem_vram_object_unpin(struct drm_gem_object *gem)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
- drm_gem_vram_unpin(gbo);
+ drm_gem_vram_unpin_locked(gbo);
}
/**
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 8e4faf0a28e6..690505a1f7a5 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -21,6 +21,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#ifndef __DRM_INTERNAL_H__
+#define __DRM_INTERNAL_H__
+
#include <linux/kthread.h>
#include <linux/types.h>
@@ -32,7 +35,6 @@
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
-struct cea_sad;
struct dentry;
struct dma_buf;
struct iosys_map;
@@ -170,6 +172,8 @@ void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_gem_object *obj);
+int drm_gem_pin_locked(struct drm_gem_object *obj);
+void drm_gem_unpin_locked(struct drm_gem_object *obj);
int drm_gem_pin(struct drm_gem_object *obj);
void drm_gem_unpin(struct drm_gem_object *obj);
int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map);
@@ -273,6 +277,4 @@ void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_framebuffer *fb);
void drm_framebuffer_debugfs_init(struct drm_device *dev);
-/* drm_edid.c */
-void drm_edid_cta_sad_get(const struct cea_sad *cta_sad, u8 *sad);
-void drm_edid_cta_sad_set(struct cea_sad *cta_sad, const u8 *sad);
+#endif /* __DRM_INTERNAL_H__ */
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index ef6e416522f8..795001bb7ff1 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -645,29 +645,56 @@ int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
EXPORT_SYMBOL(mipi_dsi_set_maximum_return_packet_size);
/**
- * mipi_dsi_compression_mode() - enable/disable DSC on the peripheral
+ * mipi_dsi_compression_mode_ext() - enable/disable DSC on the peripheral
* @dsi: DSI peripheral device
* @enable: Whether to enable or disable the DSC
+ * @algo: Selected compression algorithm
+ * @pps_selector: Select PPS from the table of pre-stored or uploaded PPS entries
*
- * Enable or disable Display Stream Compression on the peripheral using the
- * default Picture Parameter Set and VESA DSC 1.1 algorithm.
+ * Enable or disable Display Stream Compression on the peripheral.
*
* Return: 0 on success or a negative error code on failure.
*/
-ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable)
+int mipi_dsi_compression_mode_ext(struct mipi_dsi_device *dsi, bool enable,
+ enum mipi_dsi_compression_algo algo,
+ unsigned int pps_selector)
{
- /* Note: Needs updating for non-default PPS or algorithm */
- u8 tx[2] = { enable << 0, 0 };
+ u8 tx[2] = { };
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
.type = MIPI_DSI_COMPRESSION_MODE,
.tx_len = sizeof(tx),
.tx_buf = tx,
};
- int ret = mipi_dsi_device_transfer(dsi, &msg);
+ int ret;
+
+ if (algo > 3 || pps_selector > 3)
+ return -EINVAL;
+
+ tx[0] = (enable << 0) |
+ (algo << 1) |
+ (pps_selector << 4);
+
+ ret = mipi_dsi_device_transfer(dsi, &msg);
return (ret < 0) ? ret : 0;
}
+EXPORT_SYMBOL(mipi_dsi_compression_mode_ext);
+
+/**
+ * mipi_dsi_compression_mode() - enable/disable DSC on the peripheral
+ * @dsi: DSI peripheral device
+ * @enable: Whether to enable or disable the DSC
+ *
+ * Enable or disable Display Stream Compression on the peripheral using the
+ * default Picture Parameter Set and VESA DSC 1.1 algorithm.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable)
+{
+ return mipi_dsi_compression_mode_ext(dsi, enable, MIPI_DSI_COMPRESSION_DSC, 0);
+}
EXPORT_SYMBOL(mipi_dsi_compression_mode);
/**
@@ -679,8 +706,8 @@ EXPORT_SYMBOL(mipi_dsi_compression_mode);
*
* Return: 0 on success or a negative error code on failure.
*/
-ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
- const struct drm_dsc_picture_parameter_set *pps)
+int mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
+ const struct drm_dsc_picture_parameter_set *pps)
{
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 48fd2d67f352..568972258222 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -372,6 +372,13 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
return -ENOMEM;
dev->mode_config.modifiers_property = prop;
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_BLOB,
+ "SIZE_HINTS", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.size_hints_property = prop;
+
return 0;
}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index c4f88c3a93b7..2d8b0371619d 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -373,8 +373,8 @@ static int fill_analog_mode(struct drm_device *dev,
if (!bt601 &&
(hact_duration_ns < params->hact_ns.min ||
hact_duration_ns > params->hact_ns.max)) {
- DRM_ERROR("Invalid horizontal active area duration: %uns (min: %u, max %u)\n",
- hact_duration_ns, params->hact_ns.min, params->hact_ns.max);
+ drm_err(dev, "Invalid horizontal active area duration: %uns (min: %u, max %u)\n",
+ hact_duration_ns, params->hact_ns.min, params->hact_ns.max);
return -EINVAL;
}
@@ -385,8 +385,8 @@ static int fill_analog_mode(struct drm_device *dev,
if (!bt601 &&
(hblk_duration_ns < params->hblk_ns.min ||
hblk_duration_ns > params->hblk_ns.max)) {
- DRM_ERROR("Invalid horizontal blanking duration: %uns (min: %u, max %u)\n",
- hblk_duration_ns, params->hblk_ns.min, params->hblk_ns.max);
+ drm_err(dev, "Invalid horizontal blanking duration: %uns (min: %u, max %u)\n",
+ hblk_duration_ns, params->hblk_ns.min, params->hblk_ns.max);
return -EINVAL;
}
@@ -397,8 +397,8 @@ static int fill_analog_mode(struct drm_device *dev,
if (!bt601 &&
(hslen_duration_ns < params->hslen_ns.min ||
hslen_duration_ns > params->hslen_ns.max)) {
- DRM_ERROR("Invalid horizontal sync duration: %uns (min: %u, max %u)\n",
- hslen_duration_ns, params->hslen_ns.min, params->hslen_ns.max);
+ drm_err(dev, "Invalid horizontal sync duration: %uns (min: %u, max %u)\n",
+ hslen_duration_ns, params->hslen_ns.min, params->hslen_ns.max);
return -EINVAL;
}
@@ -409,7 +409,8 @@ static int fill_analog_mode(struct drm_device *dev,
if (!bt601 &&
(porches_duration_ns > (params->hfp_ns.max + params->hbp_ns.max) ||
porches_duration_ns < (params->hfp_ns.min + params->hbp_ns.min))) {
- DRM_ERROR("Invalid horizontal porches duration: %uns\n", porches_duration_ns);
+ drm_err(dev, "Invalid horizontal porches duration: %uns\n",
+ porches_duration_ns);
return -EINVAL;
}
@@ -431,8 +432,8 @@ static int fill_analog_mode(struct drm_device *dev,
if (!bt601 &&
(hfp_duration_ns < params->hfp_ns.min ||
hfp_duration_ns > params->hfp_ns.max)) {
- DRM_ERROR("Invalid horizontal front porch duration: %uns (min: %u, max %u)\n",
- hfp_duration_ns, params->hfp_ns.min, params->hfp_ns.max);
+ drm_err(dev, "Invalid horizontal front porch duration: %uns (min: %u, max %u)\n",
+ hfp_duration_ns, params->hfp_ns.min, params->hfp_ns.max);
return -EINVAL;
}
@@ -443,8 +444,8 @@ static int fill_analog_mode(struct drm_device *dev,
if (!bt601 &&
(hbp_duration_ns < params->hbp_ns.min ||
hbp_duration_ns > params->hbp_ns.max)) {
- DRM_ERROR("Invalid horizontal back porch duration: %uns (min: %u, max %u)\n",
- hbp_duration_ns, params->hbp_ns.min, params->hbp_ns.max);
+ drm_err(dev, "Invalid horizontal back porch duration: %uns (min: %u, max %u)\n",
+ hbp_duration_ns, params->hbp_ns.min, params->hbp_ns.max);
return -EINVAL;
}
@@ -495,8 +496,8 @@ static int fill_analog_mode(struct drm_device *dev,
vtotal = vactive + vfp + vslen + vbp;
if (params->num_lines != vtotal) {
- DRM_ERROR("Invalid vertical total: %upx (expected %upx)\n",
- vtotal, params->num_lines);
+ drm_err(dev, "Invalid vertical total: %upx (expected %upx)\n",
+ vtotal, params->num_lines);
return -EINVAL;
}
@@ -1200,9 +1201,8 @@ int of_get_drm_display_mode(struct device_node *np,
if (bus_flags)
drm_bus_flags_from_videomode(&vm, bus_flags);
- pr_debug("%pOF: got %dx%d display mode\n",
- np, vm.hactive, vm.vactive);
- drm_mode_debug_printmodeline(dmode);
+ pr_debug("%pOF: got %dx%d display mode: " DRM_MODE_FMT "\n",
+ np, vm.hactive, vm.vactive, DRM_MODE_ARG(dmode));
return 0;
}
@@ -1250,7 +1250,7 @@ int of_get_drm_panel_display_mode(struct device_node *np,
dmode->width_mm = width_mm;
dmode->height_mm = height_mm;
- drm_mode_debug_printmodeline(dmode);
+ pr_debug(DRM_MODE_FMT "\n", DRM_MODE_ARG(dmode));
return 0;
}
@@ -1812,10 +1812,8 @@ void drm_mode_prune_invalid(struct drm_device *dev,
DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
}
if (verbose) {
- drm_mode_debug_printmodeline(mode);
- DRM_DEBUG_KMS("Not using %s mode: %s\n",
- mode->name,
- drm_get_mode_status_name(mode->status));
+ drm_dbg_kms(dev, "Rejected mode: " DRM_MODE_FMT " (%s)\n",
+ DRM_MODE_ARG(mode), drm_get_mode_status_name(mode->status));
}
drm_mode_destroy(dev, mode);
}
diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
new file mode 100644
index 000000000000..7ece67086cec
--- /dev/null
+++ b/drivers/gpu/drm/drm_panic.c
@@ -0,0 +1,585 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/*
+ * Copyright (c) 2023 Red Hat.
+ * Author: Jocelyn Falempe <jfalempe@redhat.com>
+ * inspired by the drm_log driver from David Herrmann <dh.herrmann@gmail.com>
+ * Tux Ascii art taken from cowsay written by Tony Monroe
+ */
+
+#include <linux/font.h>
+#include <linux/iosys-map.h>
+#include <linux/kdebug.h>
+#include <linux/kmsg_dump.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_format_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_panic.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_print.h>
+
+MODULE_AUTHOR("Jocelyn Falempe");
+MODULE_DESCRIPTION("DRM panic handler");
+MODULE_LICENSE("GPL");
+
+/**
+ * DOC: overview
+ *
+ * To enable DRM panic for a driver, the primary plane must implement a
+ * &drm_plane_helper_funcs.get_scanout_buffer helper function. It is then
+ * automatically registered to the drm panic handler.
+ * When a panic occurs, the &drm_plane_helper_funcs.get_scanout_buffer will be
+ * called, and the driver can provide a framebuffer so the panic handler can
+ * draw the panic screen on it. Currently only linear buffer and a few color
+ * formats are supported.
+ * Optionally the driver can also provide a &drm_plane_helper_funcs.panic_flush
+ * callback, that will be called after that, to send additional commands to the
+ * hardware to make the scanout buffer visible.
+ */
+
+/*
+ * This module displays a user friendly message on screen when a kernel panic
+ * occurs. This is conflicting with fbcon, so you can only enable it when fbcon
+ * is disabled.
+ * It's intended for end-user, so have minimal technical/debug information.
+ *
+ * Implementation details:
+ *
+ * It is a panic handler, so it can't take lock, allocate memory, run tasks/irq,
+ * or attempt to sleep. It's a best effort, and it may not be able to display
+ * the message in all situations (like if the panic occurs in the middle of a
+ * modesetting).
+ * It will display only one static frame, so performance optimizations are low
+ * priority as the machine is already in an unusable state.
+ */
+
+struct drm_panic_line {
+ u32 len;
+ const char *txt;
+};
+
+#define PANIC_LINE(s) {.len = sizeof(s) - 1, .txt = s}
+
+static struct drm_panic_line panic_msg[] = {
+ PANIC_LINE("KERNEL PANIC !"),
+ PANIC_LINE(""),
+ PANIC_LINE("Please reboot your computer."),
+};
+
+static const struct drm_panic_line logo[] = {
+ PANIC_LINE(" .--. _"),
+ PANIC_LINE(" |o_o | | |"),
+ PANIC_LINE(" |:_/ | | |"),
+ PANIC_LINE(" // \\ \\ |_|"),
+ PANIC_LINE(" (| | ) _"),
+ PANIC_LINE(" /'\\_ _/`\\ (_)"),
+ PANIC_LINE(" \\___)=(___/"),
+};
+
+/*
+ * Color conversion
+ */
+
+static u16 convert_xrgb8888_to_rgb565(u32 pix)
+{
+ return ((pix & 0x00F80000) >> 8) |
+ ((pix & 0x0000FC00) >> 5) |
+ ((pix & 0x000000F8) >> 3);
+}
+
+static u16 convert_xrgb8888_to_rgba5551(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 8) |
+ ((pix & 0x0000f800) >> 5) |
+ ((pix & 0x000000f8) >> 2) |
+ BIT(0); /* set alpha bit */
+}
+
+static u16 convert_xrgb8888_to_xrgb1555(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 9) |
+ ((pix & 0x0000f800) >> 6) |
+ ((pix & 0x000000f8) >> 3);
+}
+
+static u16 convert_xrgb8888_to_argb1555(u32 pix)
+{
+ return BIT(15) | /* set alpha bit */
+ ((pix & 0x00f80000) >> 9) |
+ ((pix & 0x0000f800) >> 6) |
+ ((pix & 0x000000f8) >> 3);
+}
+
+static u32 convert_xrgb8888_to_argb8888(u32 pix)
+{
+ return pix | GENMASK(31, 24); /* fill alpha bits */
+}
+
+static u32 convert_xrgb8888_to_xbgr8888(u32 pix)
+{
+ return ((pix & 0x00ff0000) >> 16) << 0 |
+ ((pix & 0x0000ff00) >> 8) << 8 |
+ ((pix & 0x000000ff) >> 0) << 16 |
+ ((pix & 0xff000000) >> 24) << 24;
+}
+
+static u32 convert_xrgb8888_to_abgr8888(u32 pix)
+{
+ return ((pix & 0x00ff0000) >> 16) << 0 |
+ ((pix & 0x0000ff00) >> 8) << 8 |
+ ((pix & 0x000000ff) >> 0) << 16 |
+ GENMASK(31, 24); /* fill alpha bits */
+}
+
+static u32 convert_xrgb8888_to_xrgb2101010(u32 pix)
+{
+ pix = ((pix & 0x000000FF) << 2) |
+ ((pix & 0x0000FF00) << 4) |
+ ((pix & 0x00FF0000) << 6);
+ return pix | ((pix >> 8) & 0x00300C03);
+}
+
+static u32 convert_xrgb8888_to_argb2101010(u32 pix)
+{
+ pix = ((pix & 0x000000FF) << 2) |
+ ((pix & 0x0000FF00) << 4) |
+ ((pix & 0x00FF0000) << 6);
+ return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
+}
+
+/*
+ * convert_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format
+ * @color: input color, in xrgb8888 format
+ * @format: output format
+ *
+ * Returns:
+ * Color in the format specified, casted to u32.
+ * Or 0 if the format is not supported.
+ */
+static u32 convert_from_xrgb8888(u32 color, u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_RGB565:
+ return convert_xrgb8888_to_rgb565(color);
+ case DRM_FORMAT_RGBA5551:
+ return convert_xrgb8888_to_rgba5551(color);
+ case DRM_FORMAT_XRGB1555:
+ return convert_xrgb8888_to_xrgb1555(color);
+ case DRM_FORMAT_ARGB1555:
+ return convert_xrgb8888_to_argb1555(color);
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ return color;
+ case DRM_FORMAT_ARGB8888:
+ return convert_xrgb8888_to_argb8888(color);
+ case DRM_FORMAT_XBGR8888:
+ return convert_xrgb8888_to_xbgr8888(color);
+ case DRM_FORMAT_ABGR8888:
+ return convert_xrgb8888_to_abgr8888(color);
+ case DRM_FORMAT_XRGB2101010:
+ return convert_xrgb8888_to_xrgb2101010(color);
+ case DRM_FORMAT_ARGB2101010:
+ return convert_xrgb8888_to_argb2101010(color);
+ default:
+ WARN_ONCE(1, "Can't convert to %p4cc\n", &format);
+ return 0;
+ }
+}
+
+/*
+ * Blit & Fill
+ */
+static void drm_panic_blit16(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ u16 fg16, u16 bg16)
+{
+ unsigned int y, x;
+ u16 val16;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ val16 = (sbuf8[(y * spitch) + x / 8] & (0x80 >> (x % 8))) ? fg16 : bg16;
+ iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, val16);
+ }
+ }
+}
+
+static void drm_panic_blit24(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ u32 fg32, u32 bg32)
+{
+ unsigned int y, x;
+ u32 val32;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ u32 off = y * dpitch + x * 3;
+
+ val32 = (sbuf8[(y * spitch) + x / 8] & (0x80 >> (x % 8))) ? fg32 : bg32;
+
+ /* write blue-green-red to output in little endianness */
+ iosys_map_wr(dmap, off, u8, (val32 & 0x000000FF) >> 0);
+ iosys_map_wr(dmap, off + 1, u8, (val32 & 0x0000FF00) >> 8);
+ iosys_map_wr(dmap, off + 2, u8, (val32 & 0x00FF0000) >> 16);
+ }
+ }
+}
+
+static void drm_panic_blit32(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ u32 fg32, u32 bg32)
+{
+ unsigned int y, x;
+ u32 val32;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ val32 = (sbuf8[(y * spitch) + x / 8] & (0x80 >> (x % 8))) ? fg32 : bg32;
+ iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, val32);
+ }
+ }
+}
+
+/*
+ * drm_panic_blit - convert a monochrome image to a linear framebuffer
+ * @dmap: destination iosys_map
+ * @dpitch: destination pitch in bytes
+ * @sbuf8: source buffer, in monochrome format, 8 pixels per byte.
+ * @spitch: source pitch in bytes
+ * @height: height of the image to copy, in pixels
+ * @width: width of the image to copy, in pixels
+ * @fg_color: foreground color, in destination format
+ * @bg_color: background color, in destination format
+ * @pixel_width: pixel width in bytes.
+ *
+ * This can be used to draw a font character, which is a monochrome image, to a
+ * framebuffer in other supported format.
+ */
+static void drm_panic_blit(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ u32 fg_color, u32 bg_color,
+ unsigned int pixel_width)
+{
+ switch (pixel_width) {
+ case 2:
+ drm_panic_blit16(dmap, dpitch, sbuf8, spitch,
+ height, width, fg_color, bg_color);
+ break;
+ case 3:
+ drm_panic_blit24(dmap, dpitch, sbuf8, spitch,
+ height, width, fg_color, bg_color);
+ break;
+ case 4:
+ drm_panic_blit32(dmap, dpitch, sbuf8, spitch,
+ height, width, fg_color, bg_color);
+ break;
+ default:
+ WARN_ONCE(1, "Can't blit with pixel width %d\n", pixel_width);
+ }
+}
+
+static void drm_panic_fill16(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u16 color)
+{
+ unsigned int y, x;
+
+ for (y = 0; y < height; y++)
+ for (x = 0; x < width; x++)
+ iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, color);
+}
+
+static void drm_panic_fill24(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u32 color)
+{
+ unsigned int y, x;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ unsigned int off = y * dpitch + x * 3;
+
+ /* write blue-green-red to output in little endianness */
+ iosys_map_wr(dmap, off, u8, (color & 0x000000FF) >> 0);
+ iosys_map_wr(dmap, off + 1, u8, (color & 0x0000FF00) >> 8);
+ iosys_map_wr(dmap, off + 2, u8, (color & 0x00FF0000) >> 16);
+ }
+ }
+}
+
+static void drm_panic_fill32(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u32 color)
+{
+ unsigned int y, x;
+
+ for (y = 0; y < height; y++)
+ for (x = 0; x < width; x++)
+ iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, color);
+}
+
+/*
+ * drm_panic_fill - Fill a rectangle with a color
+ * @dmap: destination iosys_map, pointing to the top left corner of the rectangle
+ * @dpitch: destination pitch in bytes
+ * @height: height of the rectangle, in pixels
+ * @width: width of the rectangle, in pixels
+ * @color: color to fill the rectangle.
+ * @pixel_width: pixel width in bytes
+ *
+ * Fill a rectangle with a color, in a linear framebuffer.
+ */
+static void drm_panic_fill(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u32 color, unsigned int pixel_width)
+{
+ switch (pixel_width) {
+ case 2:
+ drm_panic_fill16(dmap, dpitch, height, width, color);
+ break;
+ case 3:
+ drm_panic_fill24(dmap, dpitch, height, width, color);
+ break;
+ case 4:
+ drm_panic_fill32(dmap, dpitch, height, width, color);
+ break;
+ default:
+ WARN_ONCE(1, "Can't fill with pixel width %d\n", pixel_width);
+ }
+}
+
+static const u8 *get_char_bitmap(const struct font_desc *font, char c, size_t font_pitch)
+{
+ return font->data + (c * font->height) * font_pitch;
+}
+
+static unsigned int get_max_line_len(const struct drm_panic_line *lines, int len)
+{
+ int i;
+ unsigned int max = 0;
+
+ for (i = 0; i < len; i++)
+ max = max(lines[i].len, max);
+ return max;
+}
+
+/*
+ * Draw a text in a rectangle on a framebuffer. The text is truncated if it overflows the rectangle
+ */
+static void draw_txt_rectangle(struct drm_scanout_buffer *sb,
+ const struct font_desc *font,
+ const struct drm_panic_line *msg,
+ unsigned int msg_lines,
+ bool centered,
+ struct drm_rect *clip,
+ u32 fg_color,
+ u32 bg_color)
+{
+ int i, j;
+ const u8 *src;
+ size_t font_pitch = DIV_ROUND_UP(font->width, 8);
+ struct iosys_map dst;
+ unsigned int px_width = sb->format->cpp[0];
+ int left = 0;
+
+ msg_lines = min(msg_lines, drm_rect_height(clip) / font->height);
+ for (i = 0; i < msg_lines; i++) {
+ size_t line_len = min(msg[i].len, drm_rect_width(clip) / font->width);
+
+ if (centered)
+ left = (drm_rect_width(clip) - (line_len * font->width)) / 2;
+
+ dst = sb->map[0];
+ iosys_map_incr(&dst, (clip->y1 + i * font->height) * sb->pitch[0] +
+ (clip->x1 + left) * px_width);
+ for (j = 0; j < line_len; j++) {
+ src = get_char_bitmap(font, msg[i].txt[j], font_pitch);
+ drm_panic_blit(&dst, sb->pitch[0], src, font_pitch,
+ font->height, font->width,
+ fg_color, bg_color, px_width);
+ iosys_map_incr(&dst, font->width * px_width);
+ }
+ }
+}
+
+/*
+ * Draw the panic message at the center of the screen
+ */
+static void draw_panic_static(struct drm_scanout_buffer *sb)
+{
+ size_t msg_lines = ARRAY_SIZE(panic_msg);
+ size_t logo_lines = ARRAY_SIZE(logo);
+ u32 fg_color = CONFIG_DRM_PANIC_FOREGROUND_COLOR;
+ u32 bg_color = CONFIG_DRM_PANIC_BACKGROUND_COLOR;
+ const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL);
+ struct drm_rect r_logo, r_msg;
+
+ if (!font)
+ return;
+
+ fg_color = convert_from_xrgb8888(fg_color, sb->format->format);
+ bg_color = convert_from_xrgb8888(bg_color, sb->format->format);
+
+ r_logo = DRM_RECT_INIT(0, 0,
+ get_max_line_len(logo, logo_lines) * font->width,
+ logo_lines * font->height);
+ r_msg = DRM_RECT_INIT(0, 0,
+ min(get_max_line_len(panic_msg, msg_lines) * font->width, sb->width),
+ min(msg_lines * font->height, sb->height));
+
+ /* Center the panic message */
+ drm_rect_translate(&r_msg, (sb->width - r_msg.x2) / 2, (sb->height - r_msg.y2) / 2);
+
+ /* Fill with the background color, and draw text on top */
+ drm_panic_fill(&sb->map[0], sb->pitch[0], sb->height, sb->width,
+ bg_color, sb->format->cpp[0]);
+
+ if ((r_msg.x1 >= drm_rect_width(&r_logo) || r_msg.y1 >= drm_rect_height(&r_logo)) &&
+ drm_rect_width(&r_logo) < sb->width && drm_rect_height(&r_logo) < sb->height) {
+ draw_txt_rectangle(sb, font, logo, logo_lines, false, &r_logo, fg_color, bg_color);
+ }
+ draw_txt_rectangle(sb, font, panic_msg, msg_lines, true, &r_msg, fg_color, bg_color);
+}
+
+/*
+ * drm_panic_is_format_supported()
+ * @format: a fourcc color code
+ * Returns: true if supported, false otherwise.
+ *
+ * Check if drm_panic will be able to use this color format.
+ */
+static bool drm_panic_is_format_supported(const struct drm_format_info *format)
+{
+ if (format->num_planes != 1)
+ return false;
+ return convert_from_xrgb8888(0xffffff, format->format) != 0;
+}
+
+static void draw_panic_plane(struct drm_plane *plane)
+{
+ struct drm_scanout_buffer sb;
+ int ret;
+ unsigned long flags;
+
+ if (!drm_panic_trylock(plane->dev, flags))
+ return;
+
+ ret = plane->helper_private->get_scanout_buffer(plane, &sb);
+
+ if (!ret && drm_panic_is_format_supported(sb.format)) {
+ draw_panic_static(&sb);
+ if (plane->helper_private->panic_flush)
+ plane->helper_private->panic_flush(plane);
+ }
+ drm_panic_unlock(plane->dev, flags);
+}
+
+static struct drm_plane *to_drm_plane(struct kmsg_dumper *kd)
+{
+ return container_of(kd, struct drm_plane, kmsg_panic);
+}
+
+static void drm_panic(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason)
+{
+ struct drm_plane *plane = to_drm_plane(dumper);
+
+ if (reason == KMSG_DUMP_PANIC)
+ draw_panic_plane(plane);
+}
+
+
+/*
+ * DEBUG FS, This is currently unsafe.
+ * Create one file per plane, so it's possible to debug one plane at a time.
+ * TODO: It would be better to emulate an NMI context.
+ */
+#ifdef CONFIG_DRM_PANIC_DEBUG
+#include <linux/debugfs.h>
+
+static ssize_t debugfs_trigger_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ bool run;
+
+ if (kstrtobool_from_user(user_buf, count, &run) == 0 && run) {
+ struct drm_plane *plane = file->private_data;
+
+ draw_panic_plane(plane);
+ }
+ return count;
+}
+
+static const struct file_operations dbg_drm_panic_ops = {
+ .owner = THIS_MODULE,
+ .write = debugfs_trigger_write,
+ .open = simple_open,
+};
+
+static void debugfs_register_plane(struct drm_plane *plane, int index)
+{
+ char fname[32];
+
+ snprintf(fname, 32, "drm_panic_plane_%d", index);
+ debugfs_create_file(fname, 0200, plane->dev->debugfs_root,
+ plane, &dbg_drm_panic_ops);
+}
+#else
+static void debugfs_register_plane(struct drm_plane *plane, int index) {}
+#endif /* CONFIG_DRM_PANIC_DEBUG */
+
+/**
+ * drm_panic_register() - Initialize DRM panic for a device
+ * @dev: the drm device on which the panic screen will be displayed.
+ */
+void drm_panic_register(struct drm_device *dev)
+{
+ struct drm_plane *plane;
+ int registered_plane = 0;
+
+ if (!dev->mode_config.num_total_plane)
+ return;
+
+ drm_for_each_plane(plane, dev) {
+ if (!plane->helper_private || !plane->helper_private->get_scanout_buffer)
+ continue;
+ plane->kmsg_panic.dump = drm_panic;
+ plane->kmsg_panic.max_reason = KMSG_DUMP_PANIC;
+ if (kmsg_dump_register(&plane->kmsg_panic))
+ drm_warn(dev, "Failed to register panic handler\n");
+ else {
+ debugfs_register_plane(plane, registered_plane);
+ registered_plane++;
+ }
+ }
+ if (registered_plane)
+ drm_info(dev, "Registered %d planes with drm panic\n", registered_plane);
+}
+EXPORT_SYMBOL(drm_panic_register);
+
+/**
+ * drm_panic_unregister()
+ * @dev: the drm device previously registered.
+ */
+void drm_panic_unregister(struct drm_device *dev)
+{
+ struct drm_plane *plane;
+
+ if (!dev->mode_config.num_total_plane)
+ return;
+
+ drm_for_each_plane(plane, dev) {
+ if (!plane->helper_private || !plane->helper_private->get_scanout_buffer)
+ continue;
+ kmsg_dump_unregister(&plane->kmsg_panic);
+ }
+}
+EXPORT_SYMBOL(drm_panic_unregister);
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 672c655c7a8e..57662a1fd345 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -140,6 +140,25 @@
* DRM_FORMAT_MOD_LINEAR. Before linux kernel release v5.1 there have been
* various bugs in this area with inconsistencies between the capability
* flag and per-plane properties.
+ *
+ * SIZE_HINTS:
+ * Blob property which contains the set of recommended plane size
+ * which can used for simple "cursor like" use cases (eg. no scaling).
+ * Using these hints frees userspace from extensive probing of
+ * supported plane sizes through atomic/setcursor ioctls.
+ *
+ * The blob contains an array of struct drm_plane_size_hint, in
+ * order of preference. For optimal usage userspace should pick
+ * the first size that satisfies its own requirements.
+ *
+ * Drivers should only attach this property to planes that
+ * support a very limited set of sizes.
+ *
+ * Note that property value 0 (ie. no blob) is reserved for potential
+ * future use. Current userspace is expected to ignore the property
+ * if the value is 0, and fall back to some other means (eg.
+ * &DRM_CAP_CURSOR_WIDTH and &DRM_CAP_CURSOR_HEIGHT) to determine
+ * the appropriate plane size to use.
*/
static unsigned int drm_num_planes(struct drm_device *dev)
@@ -1729,3 +1748,40 @@ int drm_plane_create_scaling_filter_property(struct drm_plane *plane,
return 0;
}
EXPORT_SYMBOL(drm_plane_create_scaling_filter_property);
+
+/**
+ * drm_plane_add_size_hints_property - create a size hints property
+ *
+ * @plane: drm plane
+ * @hints: size hints
+ * @num_hints: number of size hints
+ *
+ * Create a size hints property for the plane.
+ *
+ * RETURNS:
+ * Zero for success or -errno
+ */
+int drm_plane_add_size_hints_property(struct drm_plane *plane,
+ const struct drm_plane_size_hint *hints,
+ int num_hints)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_property_blob *blob;
+
+ /* extending to other plane types needs actual thought */
+ if (drm_WARN_ON(dev, plane->type != DRM_PLANE_TYPE_CURSOR))
+ return -EINVAL;
+
+ blob = drm_property_create_blob(dev,
+ array_size(sizeof(hints[0]), num_hints),
+ hints);
+ if (IS_ERR(blob))
+ return PTR_ERR(blob);
+
+ drm_object_attach_property(&plane->base, config->size_hints_property,
+ blob->base.id);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_add_size_hints_property);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 7352bde299d5..03bd3c7bd0dc 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -582,7 +582,12 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
{
struct drm_gem_object *obj = dma_buf->priv;
- if (!obj->funcs->get_sg_table)
+ /*
+ * drm_gem_map_dma_buf() requires obj->get_sg_table(), but drivers
+ * that implement their own ->map_dma_buf() do not.
+ */
+ if (dma_buf->ops->map_dma_buf == drm_gem_map_dma_buf &&
+ !obj->funcs->get_sg_table)
return -ENOSYS;
return drm_gem_pin(obj);
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index 699b7dbffd7b..cf2efb44722c 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -23,13 +23,13 @@
* Rob Clark <robdclark@gmail.com>
*/
-#include <linux/stdarg.h>
-
+#include <linux/debugfs.h>
+#include <linux/dynamic_debug.h>
#include <linux/io.h>
#include <linux/moduleparam.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <linux/dynamic_debug.h>
+#include <linux/stdarg.h>
#include <drm/drm.h>
#include <drm/drm_drv.h>
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index bf2dd1f46b6c..4f75a1cfd820 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -37,6 +37,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -566,8 +567,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
drm_modeset_acquire_init(&ctx, 0);
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
- connector->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s]\n", connector->base.id,
+ connector->name);
retry:
ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
@@ -610,11 +611,10 @@ retry:
* check here, and if anything changed start the hotplug code.
*/
if (old_status != connector->status) {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
- connector->base.id,
- connector->name,
- drm_get_connector_status_name(old_status),
- drm_get_connector_status_name(connector->status));
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s\n",
+ connector->base.id, connector->name,
+ drm_get_connector_status_name(old_status),
+ drm_get_connector_status_name(connector->status));
/*
* The hotplug event code might call into the fb
@@ -637,8 +637,8 @@ retry:
drm_kms_helper_poll_enable(dev);
if (connector->status == connector_status_disconnected) {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] disconnected\n",
+ connector->base.id, connector->name);
drm_connector_update_edid_property(connector, NULL);
drm_mode_prune_invalid(dev, &connector->modes, false);
goto exit;
@@ -696,11 +696,13 @@ exit:
drm_mode_sort(&connector->modes);
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
- connector->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] probed modes:\n",
+ connector->base.id, connector->name);
+
list_for_each_entry(mode, &connector->modes, head) {
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
- drm_mode_debug_printmodeline(mode);
+ drm_dbg_kms(dev, "Probed mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(mode));
}
return count;
@@ -833,14 +835,12 @@ static void output_poll_execute(struct work_struct *work)
old = drm_get_connector_status_name(old_status);
new = drm_get_connector_status_name(connector->status);
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
- "status updated from %s to %s\n",
- connector->base.id,
- connector->name,
- old, new);
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] epoch counter %llu -> %llu\n",
- connector->base.id, connector->name,
- old_epoch_counter, connector->epoch_counter);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s\n",
+ connector->base.id, connector->name,
+ old, new);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] epoch counter %llu -> %llu\n",
+ connector->base.id, connector->name,
+ old_epoch_counter, connector->epoch_counter);
changed = true;
}
@@ -951,6 +951,32 @@ void drm_kms_helper_poll_fini(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
+static void drm_kms_helper_poll_init_release(struct drm_device *dev, void *res)
+{
+ drm_kms_helper_poll_fini(dev);
+}
+
+/**
+ * drmm_kms_helper_poll_init - initialize and enable output polling
+ * @dev: drm_device
+ *
+ * This function initializes and then also enables output polling support for
+ * @dev similar to drm_kms_helper_poll_init(). Polling will automatically be
+ * cleaned up when the DRM device goes away.
+ *
+ * See drm_kms_helper_poll_init() for more information.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ */
+int drmm_kms_helper_poll_init(struct drm_device *dev)
+{
+ drm_kms_helper_poll_init(dev);
+
+ return drmm_add_action_or_reset(dev, drm_kms_helper_poll_init_release, dev);
+}
+EXPORT_SYMBOL(drmm_kms_helper_poll_init);
+
static bool check_connector_changed(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -1279,3 +1305,32 @@ int drm_connector_helper_tv_get_modes(struct drm_connector *connector)
return i;
}
EXPORT_SYMBOL(drm_connector_helper_tv_get_modes);
+
+/**
+ * drm_connector_helper_detect_from_ddc - Read EDID and detect connector status.
+ * @connector: The connector
+ * @ctx: Acquire context
+ * @force: Perform screen-destructive operations, if necessary
+ *
+ * Detects the connector status by reading the EDID using drm_probe_ddc(),
+ * which requires connector->ddc to be set. Returns connector_status_connected
+ * on success or connector_status_disconnected on failure.
+ *
+ * Returns:
+ * The connector status as defined by enum drm_connector_status.
+ */
+int drm_connector_helper_detect_from_ddc(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct i2c_adapter *ddc = connector->ddc;
+
+ if (!ddc)
+ return connector_status_unknown;
+
+ if (drm_probe_ddc(ddc))
+ return connector_status_connected;
+
+ return connector_status_disconnected;
+}
+EXPORT_SYMBOL(drm_connector_helper_detect_from_ddc);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index a953f69a34b6..bd9b8ab4f82b 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -209,10 +209,9 @@ static ssize_t status_store(struct device *device,
ret = -EINVAL;
if (old_force != connector->force || !connector->force) {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force updated from %d to %d or reprobing\n",
- connector->base.id,
- connector->name,
- old_force, connector->force);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] force updated from %d to %d or reprobing\n",
+ connector->base.id, connector->name,
+ old_force, connector->force);
connector->funcs->fill_modes(connector,
dev->mode_config.max_width,
@@ -383,8 +382,8 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
if (r)
goto err_free;
- DRM_DEBUG("adding \"%s\" to sysfs\n",
- connector->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] adding connector to sysfs\n",
+ connector->base.id, connector->name);
r = device_add(kdev);
if (r) {
@@ -430,8 +429,9 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
if (dev_fwnode(connector->kdev))
component_del(connector->kdev, &typec_connector_ops);
- DRM_DEBUG("removing \"%s\" from sysfs\n",
- connector->name);
+ drm_dbg_kms(connector->dev,
+ "[CONNECTOR:%d:%s] removing connector from sysfs\n",
+ connector->base.id, connector->name);
device_unregister(connector->kdev);
connector->kdev = NULL;
@@ -442,7 +442,7 @@ void drm_sysfs_lease_event(struct drm_device *dev)
char *event_string = "LEASE=1";
char *envp[] = { event_string, NULL };
- DRM_DEBUG("generating lease event\n");
+ drm_dbg_lease(dev, "generating lease event\n");
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
}
@@ -463,7 +463,7 @@ void drm_sysfs_hotplug_event(struct drm_device *dev)
char *event_string = "HOTPLUG=1";
char *envp[] = { event_string, NULL };
- DRM_DEBUG("generating hotplug event\n");
+ drm_dbg_kms(dev, "generating hotplug event\n");
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
}
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 702a12bc93bd..cc3571e25a9a 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -166,11 +166,24 @@ module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600)
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
+static struct drm_vblank_crtc *
+drm_vblank_crtc(struct drm_device *dev, unsigned int pipe)
+{
+ return &dev->vblank[pipe];
+}
+
+struct drm_vblank_crtc *
+drm_crtc_vblank_crtc(struct drm_crtc *crtc)
+{
+ return drm_vblank_crtc(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_crtc);
+
static void store_vblank(struct drm_device *dev, unsigned int pipe,
u32 vblank_count_inc,
ktime_t t_vblank, u32 last)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
assert_spin_locked(&dev->vblank_time_lock);
@@ -184,7 +197,7 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
static u32 drm_max_vblank_count(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
return vblank->max_vblank_count ?: dev->max_vblank_count;
}
@@ -273,7 +286,7 @@ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe
static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
bool in_vblank_irq)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
u32 cur_vblank, diff;
bool rc;
ktime_t t_vblank;
@@ -364,7 +377,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
u64 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
u64 count;
if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
@@ -438,7 +451,7 @@ static void __disable_vblank(struct drm_device *dev, unsigned int pipe)
*/
void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
unsigned long irqflags;
assert_spin_locked(&dev->vbl_lock);
@@ -600,7 +613,7 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
int linedur_ns = 0, framedur_ns = 0;
int dotclock = mode->crtc_clock;
@@ -930,7 +943,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_count);
static u64 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
ktime_t *vblanktime)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
u64 vblank_count;
unsigned int seq;
@@ -985,7 +998,6 @@ EXPORT_SYMBOL(drm_crtc_vblank_count_and_time);
*/
int drm_crtc_next_vblank_start(struct drm_crtc *crtc, ktime_t *vblanktime)
{
- unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank;
struct drm_display_mode *mode;
u64 vblank_start;
@@ -993,7 +1005,7 @@ int drm_crtc_next_vblank_start(struct drm_crtc *crtc, ktime_t *vblanktime)
if (!drm_dev_has_vblank(crtc->dev))
return -EINVAL;
- vblank = &crtc->dev->vblank[pipe];
+ vblank = drm_crtc_vblank_crtc(crtc);
mode = &vblank->hwmode;
if (!vblank->framedur_ns || !vblank->linedur_ns)
@@ -1147,7 +1159,7 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe)
static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
int ret = 0;
assert_spin_locked(&dev->vbl_lock);
@@ -1185,7 +1197,7 @@ static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
unsigned long irqflags;
int ret = 0;
@@ -1228,7 +1240,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_get);
void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return;
@@ -1274,7 +1286,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_put);
*/
void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
int ret;
u64 last;
@@ -1327,7 +1339,7 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
struct drm_pending_vblank_event *e, *t;
ktime_t now;
u64 seq;
@@ -1405,8 +1417,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_off);
void drm_crtc_vblank_reset(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- unsigned int pipe = drm_crtc_index(crtc);
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
spin_lock_irq(&dev->vbl_lock);
/*
@@ -1445,8 +1456,7 @@ void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc,
u32 max_vblank_count)
{
struct drm_device *dev = crtc->dev;
- unsigned int pipe = drm_crtc_index(crtc);
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
drm_WARN_ON(dev, dev->max_vblank_count);
drm_WARN_ON(dev, !READ_ONCE(vblank->inmodeset));
@@ -1469,7 +1479,7 @@ void drm_crtc_vblank_on(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return;
@@ -1512,7 +1522,7 @@ static void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
assert_spin_locked(&dev->vbl_lock);
assert_spin_locked(&dev->vblank_time_lock);
- vblank = &dev->vblank[pipe];
+ vblank = drm_vblank_crtc(dev, pipe);
drm_WARN_ONCE(dev,
drm_debug_enabled(DRM_UT_VBL) && !vblank->framedur_ns,
"Cannot compute missed vblanks without frame duration\n");
@@ -1564,7 +1574,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
union drm_wait_vblank *vblwait,
struct drm_file *file_priv)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
struct drm_pending_vblank_event *e;
ktime_t now;
u64 seq;
@@ -1872,7 +1882,7 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
*/
bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
unsigned long irqflags;
bool disable_irq;
@@ -1981,7 +1991,7 @@ int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
pipe = drm_crtc_index(crtc);
- vblank = &dev->vblank[pipe];
+ vblank = drm_crtc_vblank_crtc(crtc);
vblank_enabled = dev->vblank_disable_immediate && READ_ONCE(vblank->enabled);
if (!vblank_enabled) {
@@ -2046,7 +2056,7 @@ int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data,
pipe = drm_crtc_index(crtc);
- vblank = &dev->vblank[pipe];
+ vblank = drm_crtc_vblank_crtc(crtc);
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (e == NULL)
diff --git a/drivers/gpu/drm/drm_vblank_work.c b/drivers/gpu/drm/drm_vblank_work.c
index 43cd5c0f4f6f..4fe9b1d3b00f 100644
--- a/drivers/gpu/drm/drm_vblank_work.c
+++ b/drivers/gpu/drm/drm_vblank_work.c
@@ -245,7 +245,7 @@ void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc,
{
kthread_init_work(&work->base, func);
INIT_LIST_HEAD(&work->node);
- work->vblank = &crtc->dev->vblank[drm_crtc_index(crtc)];
+ work->vblank = drm_crtc_vblank_crtc(crtc);
}
EXPORT_SYMBOL(drm_vblank_work_init);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 734412aae94d..a9bf426f69b3 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -164,26 +164,6 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
*value = gpu->identity.eco_id;
break;
- case ETNAVIV_PARAM_GPU_NN_CORE_COUNT:
- *value = gpu->identity.nn_core_count;
- break;
-
- case ETNAVIV_PARAM_GPU_NN_MAD_PER_CORE:
- *value = gpu->identity.nn_mad_per_core;
- break;
-
- case ETNAVIV_PARAM_GPU_TP_CORE_COUNT:
- *value = gpu->identity.tp_core_count;
- break;
-
- case ETNAVIV_PARAM_GPU_ON_CHIP_SRAM_SIZE:
- *value = gpu->identity.on_chip_sram_size;
- break;
-
- case ETNAVIV_PARAM_GPU_AXI_SRAM_SIZE:
- *value = gpu->identity.axi_sram_size;
- break;
-
default:
DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
return -EINVAL;
@@ -663,8 +643,8 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
/* Disable TX clock gating on affected core revisions. */
if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
- etnaviv_is_model_rev(gpu, GC2000, 0x6202) ||
- etnaviv_is_model_rev(gpu, GC2000, 0x6203))
+ etnaviv_is_model_rev(gpu, GC7000, 0x6202) ||
+ etnaviv_is_model_rev(gpu, GC7000, 0x6203))
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
/* Disable SE and RA clock gating on affected core revisions. */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 7d5e9158e13c..197e0037732e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -54,18 +54,6 @@ struct etnaviv_chip_identity {
/* Number of Neural Network cores. */
u32 nn_core_count;
- /* Number of MAD units per Neural Network core. */
- u32 nn_mad_per_core;
-
- /* Number of Tensor Processing cores. */
- u32 tp_core_count;
-
- /* Size in bytes of the SRAM inside the NPU. */
- u32 on_chip_sram_size;
-
- /* Size in bytes of the SRAM across the AXI bus. */
- u32 axi_sram_size;
-
/* Size of the vertex cache. */
u32 vertex_cache_size;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
index d8e7334de8ce..8665f2658d51 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
@@ -17,10 +17,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 128,
.shader_core_count = 1,
.nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 8,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -52,11 +48,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.register_max = 64,
.thread_count = 256,
.shader_core_count = 1,
- .nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 8,
.vertex_output_buffer_size = 512,
.pixel_pipes = 1,
@@ -89,10 +80,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 512,
.shader_core_count = 2,
.nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -125,10 +112,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 512,
.shader_core_count = 2,
.nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -160,11 +143,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.register_max = 64,
.thread_count = 512,
.shader_core_count = 2,
- .nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -197,10 +175,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 1024,
.shader_core_count = 4,
.nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 2,
@@ -233,10 +207,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 256,
.shader_core_count = 1,
.nn_core_count = 8,
- .nn_mad_per_core = 64,
- .tp_core_count = 4,
- .on_chip_sram_size = 524288,
- .axi_sram_size = 1048576,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -269,10 +239,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 256,
.shader_core_count = 1,
.nn_core_count = 6,
- .nn_mad_per_core = 64,
- .tp_core_count = 3,
- .on_chip_sram_size = 262144,
- .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 2fe0e5f3f638..bf16deaae68b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -184,7 +184,6 @@ struct platform_driver dsi_driver = {
.remove_new = samsung_dsim_remove,
.driver = {
.name = "exynos-dsi",
- .owner = THIS_MODULE,
.pm = &samsung_dsim_pm_ops,
.of_match_table = exynos_dsi_of_match,
},
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index e81a576de398..142184c8c3bc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1412,7 +1412,6 @@ struct platform_driver fimc_driver = {
.driver = {
.of_match_table = fimc_of_match,
.name = "exynos-drm-fimc",
- .owner = THIS_MODULE,
.pm = pm_ptr(&fimc_pm_ops),
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index f2145227a1e0..f57df8c48139 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -1326,7 +1326,6 @@ struct platform_driver fimd_driver = {
.remove_new = fimd_remove,
.driver = {
.name = "exynos4-fb",
- .owner = THIS_MODULE,
.pm = pm_ptr(&exynos_fimd_pm_ops),
.of_match_table = fimd_driver_dt_match,
},
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index f3138423612e..3a3b2c00e400 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1610,7 +1610,6 @@ struct platform_driver g2d_driver = {
.remove_new = g2d_remove,
.driver = {
.name = "exynos-drm-g2d",
- .owner = THIS_MODULE,
.pm = pm_ptr(&g2d_pm_ops),
.of_match_table = exynos_g2d_match,
},
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 180507a47700..1b111e2c3347 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1423,7 +1423,6 @@ struct platform_driver gsc_driver = {
.remove_new = gsc_remove,
.driver = {
.name = "exynos-drm-gsc",
- .owner = THIS_MODULE,
.pm = &gsc_pm_ops,
.of_match_table = exynos_drm_gsc_of_match,
},
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index e2920960180f..d61ec451807c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -464,7 +464,6 @@ struct platform_driver mic_driver = {
.driver = {
.name = "exynos-mic",
.pm = pm_ptr(&exynos_mic_pm_ops),
- .owner = THIS_MODULE,
.of_match_table = exynos_mic_of_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 5f7516655b08..2eb0b701672f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -454,7 +454,6 @@ struct platform_driver rotator_driver = {
.remove_new = rotator_remove,
.driver = {
.name = "exynos-rotator",
- .owner = THIS_MODULE,
.pm = pm_ptr(&rotator_pm_ops),
.of_match_table = exynos_rotator_match,
},
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 392f721f13ab..a9d469896824 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -722,7 +722,6 @@ struct platform_driver scaler_driver = {
.remove_new = scaler_remove,
.driver = {
.name = "exynos-scaler",
- .owner = THIS_MODULE,
.pm = pm_ptr(&scaler_pm_ops),
.of_match_table = exynos_scaler_match,
},
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index f5bbba9ad225..fab135308b70 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -479,7 +479,6 @@ struct platform_driver vidi_driver = {
.remove_new = vidi_remove,
.driver = {
.name = "exynos-drm-vidi",
- .owner = THIS_MODULE,
.dev_groups = vidi_groups,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index b1d02dec3774..e968824a4c72 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1919,10 +1919,9 @@ static int hdmi_get_ddc_adapter(struct hdmi_context *hdata)
static int hdmi_get_phy_io(struct hdmi_context *hdata)
{
const char *compatible_str = "samsung,exynos4212-hdmiphy";
- struct device_node *np;
- int ret = 0;
+ struct device_node *np __free(device_node) =
+ of_find_compatible_node(NULL, NULL, compatible_str);
- np = of_find_compatible_node(NULL, NULL, compatible_str);
if (!np) {
np = of_parse_phandle(hdata->dev->of_node, "phy", 0);
if (!np) {
@@ -1937,21 +1936,17 @@ static int hdmi_get_phy_io(struct hdmi_context *hdata)
if (!hdata->regs_hdmiphy) {
DRM_DEV_ERROR(hdata->dev,
"failed to ioremap hdmi phy\n");
- ret = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
} else {
hdata->hdmiphy_port = of_find_i2c_device_by_node(np);
if (!hdata->hdmiphy_port) {
DRM_INFO("Failed to get hdmi phy i2c client\n");
- ret = -EPROBE_DEFER;
- goto out;
+ return -EPROBE_DEFER;
}
}
-out:
- of_node_put(np);
- return ret;
+ return 0;
}
static int hdmi_probe(struct platform_device *pdev)
@@ -2126,7 +2121,6 @@ struct platform_driver hdmi_driver = {
.remove_new = hdmi_remove,
.driver = {
.name = "exynos-hdmi",
- .owner = THIS_MODULE,
.pm = &exynos_hdmi_pm_ops,
.of_match_table = hdmi_match_types,
},
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 6822333fd0e6..1db955f00044 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1331,7 +1331,6 @@ static const struct dev_pm_ops exynos_mixer_pm_ops = {
struct platform_driver mixer_driver = {
.driver = {
.name = "exynos-mixer",
- .owner = THIS_MODULE,
.pm = &exynos_mixer_pm_ops,
.of_match_table = mixer_match_types,
},
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index 4f302cd5e1a6..58fed80c7392 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -34,7 +34,6 @@ gma500_gfx-y += \
psb_intel_lvds.o \
psb_intel_modes.o \
psb_intel_sdvo.o \
- psb_lid.o \
psb_irq.o
gma500_gfx-$(CONFIG_ACPI) += opregion.o
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index a70b01ccdf70..4d78b33eaa82 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -5,6 +5,7 @@
**************************************************************************/
#include <linux/highmem.h>
+#include <linux/vmalloc.h>
#include "mmu.h"
#include "psb_drv.h"
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index d974d0c60d2a..72191d6f0d06 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -11,8 +11,6 @@
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
-#include <asm/intel-mid.h>
-
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_simple_kms_helper.h>
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index dcfcd7b89d4a..6dece8f0e380 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -73,8 +73,7 @@ static int psb_backlight_setup(struct drm_device *dev)
}
psb_intel_lvds_set_brightness(dev, PSB_MAX_BRIGHTNESS);
- /* This must occur after the backlight is properly initialised */
- psb_lid_timer_init(dev_priv);
+
return 0;
}
@@ -259,8 +258,6 @@ static int psb_chip_setup(struct drm_device *dev)
static void psb_chip_teardown(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- psb_lid_timer_takedown(dev_priv);
gma_intel_teardown_gmbus(dev);
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index c5edfa4aa4cc..83c17689c454 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -162,7 +162,6 @@
#define PSB_NUM_VBLANKS 2
#define PSB_WATCHDOG_DELAY (HZ * 2)
-#define PSB_LID_DELAY (HZ / 10)
#define PSB_MAX_BRIGHTNESS 100
@@ -491,11 +490,7 @@ struct drm_psb_private {
/* Hotplug handling */
struct work_struct hotplug_work;
- /* LID-Switch */
- spinlock_t lid_lock;
- struct timer_list lid_timer;
struct psb_intel_opregion opregion;
- u32 lid_last_state;
/* Watchdog */
uint32_t apm_reg;
@@ -591,10 +586,6 @@ struct psb_ops {
int i2c_bus; /* I2C bus identifier for Moorestown */
};
-/* psb_lid.c */
-extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
-extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
-
/* modesetting */
extern void psb_modeset_init(struct drm_device *dev);
extern void psb_modeset_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
deleted file mode 100644
index 58a7fe392636..000000000000
--- a/drivers/gpu/drm/gma500/psb_lid.c
+++ /dev/null
@@ -1,80 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/**************************************************************************
- * Copyright (c) 2007, Intel Corporation.
- *
- * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
- **************************************************************************/
-
-#include <linux/spinlock.h>
-
-#include "psb_drv.h"
-#include "psb_intel_reg.h"
-#include "psb_reg.h"
-
-static void psb_lid_timer_func(struct timer_list *t)
-{
- struct drm_psb_private *dev_priv = from_timer(dev_priv, t, lid_timer);
- struct drm_device *dev = (struct drm_device *)&dev_priv->dev;
- struct timer_list *lid_timer = &dev_priv->lid_timer;
- unsigned long irq_flags;
- u32 __iomem *lid_state = dev_priv->opregion.lid_state;
- u32 pp_status;
-
- if (readl(lid_state) == dev_priv->lid_last_state)
- goto lid_timer_schedule;
-
- if ((readl(lid_state)) & 0x01) {
- /*lid state is open*/
- REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
- do {
- pp_status = REG_READ(PP_STATUS);
- } while ((pp_status & PP_ON) == 0 &&
- (pp_status & PP_SEQUENCE_MASK) != 0);
-
- if (REG_READ(PP_STATUS) & PP_ON) {
- /*FIXME: should be backlight level before*/
- psb_intel_lvds_set_brightness(dev, 100);
- } else {
- DRM_DEBUG("LVDS panel never powered up");
- return;
- }
- } else {
- psb_intel_lvds_set_brightness(dev, 0);
-
- REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
- do {
- pp_status = REG_READ(PP_STATUS);
- } while ((pp_status & PP_ON) == 0);
- }
- dev_priv->lid_last_state = readl(lid_state);
-
-lid_timer_schedule:
- spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
- if (!timer_pending(lid_timer)) {
- lid_timer->expires = jiffies + PSB_LID_DELAY;
- add_timer(lid_timer);
- }
- spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
-}
-
-void psb_lid_timer_init(struct drm_psb_private *dev_priv)
-{
- struct timer_list *lid_timer = &dev_priv->lid_timer;
- unsigned long irq_flags;
-
- spin_lock_init(&dev_priv->lid_lock);
- spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
-
- timer_setup(lid_timer, psb_lid_timer_func, 0);
-
- lid_timer->expires = jiffies + PSB_LID_DELAY;
-
- add_timer(lid_timer);
- spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
-}
-
-void psb_lid_timer_takedown(struct drm_psb_private *dev_priv)
-{
- del_timer_sync(&dev_priv->lid_timer);
-}
-
diff --git a/drivers/gpu/drm/gud/gud_connector.c b/drivers/gpu/drm/gud/gud_connector.c
index 034e78360d4f..0f07d77c5d52 100644
--- a/drivers/gpu/drm/gud/gud_connector.c
+++ b/drivers/gpu/drm/gud/gud_connector.c
@@ -221,7 +221,7 @@ static int gud_connector_get_modes(struct drm_connector *connector)
struct gud_display_mode_req *reqmodes = NULL;
struct gud_connector_get_edid_ctx edid_ctx;
unsigned int i, num_modes = 0;
- struct edid *edid = NULL;
+ const struct drm_edid *drm_edid = NULL;
int idx, ret;
if (!drm_dev_enter(connector->dev, &idx))
@@ -238,13 +238,13 @@ static int gud_connector_get_modes(struct drm_connector *connector)
gud_conn_err(connector, "Invalid EDID size", ret);
} else if (ret > 0) {
edid_ctx.len = ret;
- edid = drm_do_get_edid(connector, gud_connector_get_edid_block, &edid_ctx);
+ drm_edid = drm_edid_read_custom(connector, gud_connector_get_edid_block, &edid_ctx);
}
kfree(edid_ctx.buf);
- drm_connector_update_edid_property(connector, edid);
+ drm_edid_connector_update(connector, drm_edid);
- if (edid && edid_ctx.edid_override)
+ if (drm_edid && edid_ctx.edid_override)
goto out;
reqmodes = kmalloc_array(GUD_CONNECTOR_MAX_NUM_MODES, sizeof(*reqmodes), GFP_KERNEL);
@@ -276,10 +276,10 @@ static int gud_connector_get_modes(struct drm_connector *connector)
}
out:
if (!num_modes)
- num_modes = drm_add_edid_modes(connector, edid);
+ num_modes = drm_edid_connector_add_modes(connector);
kfree(reqmodes);
- kfree(edid);
+ drm_edid_free(drm_edid);
drm_dev_exit(idx);
return num_modes;
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index bc18e2d9ea05..d8397065c3f0 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -27,8 +27,8 @@ config DRM_I915_DEBUG
select REF_TRACKER
select STACKDEPOT
select STACKTRACE
- select DRM_DP_AUX_CHARDEV
- select DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE if DRM_I915_DP_TUNNEL
+ select DRM_DISPLAY_DP_AUX_CHARDEV
+ select DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG if DRM_I915_DP_TUNNEL
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 3ef6ed41e62b..c8c8b31da4fb 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -32,16 +32,11 @@ endif
# Enable -Werror in CI and development
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
-# Fine grained warnings disable
-CFLAGS_i915_pci.o = $(call cc-disable-warning, override-init)
-CFLAGS_display/intel_display_device.o = $(call cc-disable-warning, override-init)
-CFLAGS_display/intel_fbdev.o = $(call cc-disable-warning, override-init)
-
# Support compiling the display code separately for both i915 and xe
# drivers. Define I915 when building i915.
subdir-ccflags-y += -DI915
-subdir-ccflags-y += -I$(srctree)/$(src)
+subdir-ccflags-y += -I$(src)
# Please keep these build lists sorted!
@@ -118,6 +113,7 @@ gt-y += \
gt/intel_ggtt_fencing.o \
gt/intel_gt.o \
gt/intel_gt_buffer_pool.o \
+ gt/intel_gt_ccs_mode.o \
gt/intel_gt_clock_utils.o \
gt/intel_gt_debugfs.o \
gt/intel_gt_engines_debugfs.o \
@@ -270,6 +266,7 @@ i915-y += \
display/intel_display_rps.o \
display/intel_display_wa.o \
display/intel_dmc.o \
+ display/intel_dmc_wl.o \
display/intel_dpio_phy.o \
display/intel_dpll.o \
display/intel_dpll_mgr.o \
@@ -433,7 +430,7 @@ no-header-test := \
always-$(CONFIG_DRM_I915_WERROR) += \
$(patsubst %.h,%.hdrtest, $(filter-out $(no-header-test), \
- $(shell cd $(srctree)/$(src) && find * -name '*.h')))
+ $(shell cd $(src) && find * -name '*.h')))
quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
cmd_hdrtest = $(CC) $(filter-out $(CFLAGS_GCOV), $(c_flags)) -S -o /dev/null -x c /dev/null -include $<; \
diff --git a/drivers/gpu/drm/i915/display/bxt_dpio_phy_regs.h b/drivers/gpu/drm/i915/display/bxt_dpio_phy_regs.h
new file mode 100644
index 000000000000..275f4d9c3fb0
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/bxt_dpio_phy_regs.h
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __BXT_DPIO_PHY_REGS_H__
+#define __BXT_DPIO_PHY_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+/* BXT PHY registers */
+#define _BXT_PHY0_BASE 0x6C000
+#define _BXT_PHY1_BASE 0x162000
+#define _BXT_PHY2_BASE 0x163000
+#define BXT_PHY_BASE(phy) \
+ _PICK_EVEN_2RANGES(phy, 1, \
+ _BXT_PHY0_BASE, _BXT_PHY0_BASE, \
+ _BXT_PHY1_BASE, _BXT_PHY2_BASE)
+
+#define _BXT_PHY(phy, reg) \
+ _MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg))
+
+#define _BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \
+ (BXT_PHY_BASE(phy) + _PIPE((ch), (reg_ch0) - _BXT_PHY0_BASE, \
+ (reg_ch1) - _BXT_PHY0_BASE))
+#define _MMIO_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \
+ _MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1))
+#define _BXT_LANE_OFFSET(lane) (((lane) >> 1) * 0x200 + \
+ ((lane) & 1) * 0x80)
+#define _MMIO_BXT_PHY_CH_LN(phy, ch, lane, reg_ch0, reg_ch1) \
+ _MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) + _BXT_LANE_OFFSET(lane))
+
+/* BXT PHY PLL registers */
+#define _PORT_PLL_A 0x46074
+#define _PORT_PLL_B 0x46078
+#define _PORT_PLL_C 0x4607c
+#define PORT_PLL_ENABLE REG_BIT(31)
+#define PORT_PLL_LOCK REG_BIT(30)
+#define PORT_PLL_REF_SEL REG_BIT(27)
+#define PORT_PLL_POWER_ENABLE REG_BIT(26)
+#define PORT_PLL_POWER_STATE REG_BIT(25)
+#define BXT_PORT_PLL_ENABLE(port) _MMIO_PORT(port, _PORT_PLL_A, _PORT_PLL_B)
+
+#define _PORT_PLL_EBB_0_A 0x162034
+#define _PORT_PLL_EBB_0_B 0x6C034
+#define _PORT_PLL_EBB_0_C 0x6C340
+#define PORT_PLL_P1_MASK REG_GENMASK(15, 13)
+#define PORT_PLL_P1(p1) REG_FIELD_PREP(PORT_PLL_P1_MASK, (p1))
+#define PORT_PLL_P2_MASK REG_GENMASK(12, 8)
+#define PORT_PLL_P2(p2) REG_FIELD_PREP(PORT_PLL_P2_MASK, (p2))
+#define BXT_PORT_PLL_EBB_0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PLL_EBB_0_B, \
+ _PORT_PLL_EBB_0_C)
+
+#define _PORT_PLL_EBB_4_A 0x162038
+#define _PORT_PLL_EBB_4_B 0x6C038
+#define _PORT_PLL_EBB_4_C 0x6C344
+#define PORT_PLL_RECALIBRATE REG_BIT(14)
+#define PORT_PLL_10BIT_CLK_ENABLE REG_BIT(13)
+#define BXT_PORT_PLL_EBB_4(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PLL_EBB_4_B, \
+ _PORT_PLL_EBB_4_C)
+
+#define _PORT_PLL_0_A 0x162100
+#define _PORT_PLL_0_B 0x6C100
+#define _PORT_PLL_0_C 0x6C380
+/* PORT_PLL_0_A */
+#define PORT_PLL_M2_INT_MASK REG_GENMASK(7, 0)
+#define PORT_PLL_M2_INT(m2_int) REG_FIELD_PREP(PORT_PLL_M2_INT_MASK, (m2_int))
+/* PORT_PLL_1_A */
+#define PORT_PLL_N_MASK REG_GENMASK(11, 8)
+#define PORT_PLL_N(n) REG_FIELD_PREP(PORT_PLL_N_MASK, (n))
+/* PORT_PLL_2_A */
+#define PORT_PLL_M2_FRAC_MASK REG_GENMASK(21, 0)
+#define PORT_PLL_M2_FRAC(m2_frac) REG_FIELD_PREP(PORT_PLL_M2_FRAC_MASK, (m2_frac))
+/* PORT_PLL_3_A */
+#define PORT_PLL_M2_FRAC_ENABLE REG_BIT(16)
+/* PORT_PLL_6_A */
+#define PORT_PLL_GAIN_CTL_MASK REG_GENMASK(18, 16)
+#define PORT_PLL_GAIN_CTL(x) REG_FIELD_PREP(PORT_PLL_GAIN_CTL_MASK, (x))
+#define PORT_PLL_INT_COEFF_MASK REG_GENMASK(12, 8)
+#define PORT_PLL_INT_COEFF(x) REG_FIELD_PREP(PORT_PLL_INT_COEFF_MASK, (x))
+#define PORT_PLL_PROP_COEFF_MASK REG_GENMASK(3, 0)
+#define PORT_PLL_PROP_COEFF(x) REG_FIELD_PREP(PORT_PLL_PROP_COEFF_MASK, (x))
+/* PORT_PLL_8_A */
+#define PORT_PLL_TARGET_CNT_MASK REG_GENMASK(9, 0)
+#define PORT_PLL_TARGET_CNT(x) REG_FIELD_PREP(PORT_PLL_TARGET_CNT_MASK, (x))
+/* PORT_PLL_9_A */
+#define PORT_PLL_LOCK_THRESHOLD_MASK REG_GENMASK(3, 1)
+#define PORT_PLL_LOCK_THRESHOLD(x) REG_FIELD_PREP(PORT_PLL_LOCK_THRESHOLD_MASK, (x))
+/* PORT_PLL_10_A */
+#define PORT_PLL_DCO_AMP_OVR_EN_H REG_BIT(27)
+#define PORT_PLL_DCO_AMP_MASK REG_GENMASK(13, 10)
+#define PORT_PLL_DCO_AMP(x) REG_FIELD_PREP(PORT_PLL_DCO_AMP_MASK, (x))
+#define _PORT_PLL_BASE(phy, ch) _BXT_PHY_CH(phy, ch, \
+ _PORT_PLL_0_B, \
+ _PORT_PLL_0_C)
+#define BXT_PORT_PLL(phy, ch, idx) _MMIO(_PORT_PLL_BASE(phy, ch) + \
+ (idx) * 4)
+
+/* BXT PHY common lane registers */
+#define _PORT_CL1CM_DW0_A 0x162000
+#define _PORT_CL1CM_DW0_BC 0x6C000
+#define PHY_POWER_GOOD REG_BIT(16)
+#define PHY_RESERVED REG_BIT(7)
+#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC)
+
+#define _PORT_CL1CM_DW9_A 0x162024
+#define _PORT_CL1CM_DW9_BC 0x6C024
+#define IREF0RC_OFFSET_MASK REG_GENMASK(15, 8)
+#define IREF0RC_OFFSET(x) REG_FIELD_PREP(IREF0RC_OFFSET_MASK, (x))
+#define BXT_PORT_CL1CM_DW9(phy) _BXT_PHY((phy), _PORT_CL1CM_DW9_BC)
+
+#define _PORT_CL1CM_DW10_A 0x162028
+#define _PORT_CL1CM_DW10_BC 0x6C028
+#define IREF1RC_OFFSET_MASK REG_GENMASK(15, 8)
+#define IREF1RC_OFFSET(x) REG_FIELD_PREP(IREF1RC_OFFSET_MASK, (x))
+#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC)
+
+#define _PORT_CL1CM_DW28_A 0x162070
+#define _PORT_CL1CM_DW28_BC 0x6C070
+#define OCL1_POWER_DOWN_EN REG_BIT(23)
+#define DW28_OLDO_DYN_PWR_DOWN_EN REG_BIT(22)
+#define SUS_CLK_CONFIG REG_GENMASK(1, 0)
+#define BXT_PORT_CL1CM_DW28(phy) _BXT_PHY((phy), _PORT_CL1CM_DW28_BC)
+
+#define _PORT_CL1CM_DW30_A 0x162078
+#define _PORT_CL1CM_DW30_BC 0x6C078
+#define OCL2_LDOFUSE_PWR_DIS REG_BIT(6)
+#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC)
+
+/* The spec defines this only for BXT PHY0, but lets assume that this
+ * would exist for PHY1 too if it had a second channel.
+ */
+#define _PORT_CL2CM_DW6_A 0x162358
+#define _PORT_CL2CM_DW6_BC 0x6C358
+#define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC)
+#define DW6_OLDO_DYN_PWR_DOWN_EN REG_BIT(28)
+
+/* BXT PHY Ref registers */
+#define _PORT_REF_DW3_A 0x16218C
+#define _PORT_REF_DW3_BC 0x6C18C
+#define GRC_DONE REG_BIT(22)
+#define BXT_PORT_REF_DW3(phy) _BXT_PHY((phy), _PORT_REF_DW3_BC)
+
+#define _PORT_REF_DW6_A 0x162198
+#define _PORT_REF_DW6_BC 0x6C198
+#define GRC_CODE_MASK REG_GENMASK(31, 24)
+#define GRC_CODE(x) REG_FIELD_PREP(GRC_CODE_MASK, (x))
+#define GRC_CODE_FAST_MASK REG_GENMASK(23, 16)
+#define GRC_CODE_FAST(x) REG_FIELD_PREP(GRC_CODE_FAST_MASK, (x))
+#define GRC_CODE_SLOW_MASK REG_GENMASK(15, 8)
+#define GRC_CODE_SLOW(x) REG_FIELD_PREP(GRC_CODE_SLOW_MASK, (x))
+#define GRC_CODE_NOM_MASK REG_GENMASK(7, 0)
+#define GRC_CODE_NOM(x) REG_FIELD_PREP(GRC_CODE_NOM_MASK, (x))
+#define BXT_PORT_REF_DW6(phy) _BXT_PHY((phy), _PORT_REF_DW6_BC)
+
+#define _PORT_REF_DW8_A 0x1621A0
+#define _PORT_REF_DW8_BC 0x6C1A0
+#define GRC_DIS REG_BIT(15)
+#define GRC_RDY_OVRD REG_BIT(1)
+#define BXT_PORT_REF_DW8(phy) _BXT_PHY((phy), _PORT_REF_DW8_BC)
+
+/* BXT PHY PCS registers */
+#define _PORT_PCS_DW10_LN01_A 0x162428
+#define _PORT_PCS_DW10_LN01_B 0x6C428
+#define _PORT_PCS_DW10_LN01_C 0x6C828
+#define _PORT_PCS_DW10_GRP_A 0x162C28
+#define _PORT_PCS_DW10_GRP_B 0x6CC28
+#define _PORT_PCS_DW10_GRP_C 0x6CE28
+#define BXT_PORT_PCS_DW10_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW10_LN01_B, \
+ _PORT_PCS_DW10_LN01_C)
+#define BXT_PORT_PCS_DW10_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW10_GRP_B, \
+ _PORT_PCS_DW10_GRP_C)
+
+#define TX2_SWING_CALC_INIT REG_BIT(31)
+#define TX1_SWING_CALC_INIT REG_BIT(30)
+
+#define _PORT_PCS_DW12_LN01_A 0x162430
+#define _PORT_PCS_DW12_LN01_B 0x6C430
+#define _PORT_PCS_DW12_LN01_C 0x6C830
+#define _PORT_PCS_DW12_LN23_A 0x162630
+#define _PORT_PCS_DW12_LN23_B 0x6C630
+#define _PORT_PCS_DW12_LN23_C 0x6CA30
+#define _PORT_PCS_DW12_GRP_A 0x162c30
+#define _PORT_PCS_DW12_GRP_B 0x6CC30
+#define _PORT_PCS_DW12_GRP_C 0x6CE30
+#define LANESTAGGER_STRAP_OVRD REG_BIT(6)
+#define LANE_STAGGER_MASK REG_GENMASK(4, 0)
+#define BXT_PORT_PCS_DW12_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW12_LN01_B, \
+ _PORT_PCS_DW12_LN01_C)
+#define BXT_PORT_PCS_DW12_LN23(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW12_LN23_B, \
+ _PORT_PCS_DW12_LN23_C)
+#define BXT_PORT_PCS_DW12_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW12_GRP_B, \
+ _PORT_PCS_DW12_GRP_C)
+
+/* BXT PHY TX registers */
+#define _PORT_TX_DW2_LN0_A 0x162508
+#define _PORT_TX_DW2_LN0_B 0x6C508
+#define _PORT_TX_DW2_LN0_C 0x6C908
+#define _PORT_TX_DW2_GRP_A 0x162D08
+#define _PORT_TX_DW2_GRP_B 0x6CD08
+#define _PORT_TX_DW2_GRP_C 0x6CF08
+#define BXT_PORT_TX_DW2_LN(phy, ch, lane) _MMIO_BXT_PHY_CH_LN(phy, ch, lane, \
+ _PORT_TX_DW2_LN0_B, \
+ _PORT_TX_DW2_LN0_C)
+#define BXT_PORT_TX_DW2_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW2_GRP_B, \
+ _PORT_TX_DW2_GRP_C)
+#define MARGIN_000_MASK REG_GENMASK(23, 16)
+#define MARGIN_000(x) REG_FIELD_PREP(MARGIN_000_MASK, (x))
+#define UNIQ_TRANS_SCALE_MASK REG_GENMASK(15, 8)
+#define UNIQ_TRANS_SCALE(x) REG_FIELD_PREP(UNIQ_TRANS_SCALE_MASK, (x))
+
+#define _PORT_TX_DW3_LN0_A 0x16250C
+#define _PORT_TX_DW3_LN0_B 0x6C50C
+#define _PORT_TX_DW3_LN0_C 0x6C90C
+#define _PORT_TX_DW3_GRP_A 0x162D0C
+#define _PORT_TX_DW3_GRP_B 0x6CD0C
+#define _PORT_TX_DW3_GRP_C 0x6CF0C
+#define BXT_PORT_TX_DW3_LN(phy, ch, lane) _MMIO_BXT_PHY_CH_LN(phy, ch, lane, \
+ _PORT_TX_DW3_LN0_B, \
+ _PORT_TX_DW3_LN0_C)
+#define BXT_PORT_TX_DW3_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW3_GRP_B, \
+ _PORT_TX_DW3_GRP_C)
+#define SCALE_DCOMP_METHOD REG_BIT(26)
+#define UNIQUE_TRANGE_EN_METHOD REG_BIT(27)
+
+#define _PORT_TX_DW4_LN0_A 0x162510
+#define _PORT_TX_DW4_LN0_B 0x6C510
+#define _PORT_TX_DW4_LN0_C 0x6C910
+#define _PORT_TX_DW4_GRP_A 0x162D10
+#define _PORT_TX_DW4_GRP_B 0x6CD10
+#define _PORT_TX_DW4_GRP_C 0x6CF10
+#define BXT_PORT_TX_DW4_LN(phy, ch, lane) _MMIO_BXT_PHY_CH_LN(phy, ch, lane, \
+ _PORT_TX_DW4_LN0_B, \
+ _PORT_TX_DW4_LN0_C)
+#define BXT_PORT_TX_DW4_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW4_GRP_B, \
+ _PORT_TX_DW4_GRP_C)
+#define DE_EMPHASIS_MASK REG_GENMASK(31, 24)
+#define DE_EMPHASIS(x) REG_FIELD_PREP(DE_EMPHASIS_MASK, (x))
+
+#define _PORT_TX_DW5_LN0_A 0x162514
+#define _PORT_TX_DW5_LN0_B 0x6C514
+#define _PORT_TX_DW5_LN0_C 0x6C914
+#define _PORT_TX_DW5_GRP_A 0x162D14
+#define _PORT_TX_DW5_GRP_B 0x6CD14
+#define _PORT_TX_DW5_GRP_C 0x6CF14
+#define BXT_PORT_TX_DW5_LN(phy, ch, lane) _MMIO_BXT_PHY_CH_LN(phy, ch, lane, \
+ _PORT_TX_DW5_LN0_B, \
+ _PORT_TX_DW5_LN0_C)
+#define BXT_PORT_TX_DW5_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW5_GRP_B, \
+ _PORT_TX_DW5_GRP_C)
+#define DCC_DELAY_RANGE_1 REG_BIT(9)
+#define DCC_DELAY_RANGE_2 REG_BIT(8)
+
+#define _PORT_TX_DW14_LN0_A 0x162538
+#define _PORT_TX_DW14_LN0_B 0x6C538
+#define _PORT_TX_DW14_LN0_C 0x6C938
+#define LATENCY_OPTIM REG_BIT(30)
+#define BXT_PORT_TX_DW14_LN(phy, ch, lane) _MMIO_BXT_PHY_CH_LN(phy, ch, lane, \
+ _PORT_TX_DW14_LN0_B, \
+ _PORT_TX_DW14_LN0_C)
+
+#endif /* __BXT_DPIO_PHY_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index dfe0b07a122d..06ec04e667e3 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -717,7 +717,6 @@ static void g4x_enable_dp(struct intel_atomic_state *state,
{
intel_enable_dp(state, encoder, pipe_config, conn_state);
intel_edp_backlight_on(pipe_config, conn_state);
- encoder->audio_enable(encoder, pipe_config, conn_state);
}
static void vlv_enable_dp(struct intel_atomic_state *state,
@@ -726,7 +725,6 @@ static void vlv_enable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
intel_edp_backlight_on(pipe_config, conn_state);
- encoder->audio_enable(encoder, pipe_config, conn_state);
}
static void g4x_pre_enable_dp(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index eda4a8b88590..79ecfc339430 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1155,7 +1155,6 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
}
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
- intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
/* ensure all panel commands dispatched before enabling transcoder */
wait_for_cmds_dispatched_to_panel(encoder);
@@ -1256,6 +1255,8 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
/* step6d: enable dsi transcoder */
gen11_dsi_enable_transcoder(encoder);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
+
/* step7: enable backlight */
intel_backlight_enable(crtc_state, conn_state);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
@@ -1615,8 +1616,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
- base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 2bb270f82932..7a77ae3dc394 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -62,7 +62,7 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_digital_connector_state *intel_conn_state =
+ const struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(state);
if (property == dev_priv->display.properties.force_audio)
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 07e0c73204f3..ed81e1466c4b 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -76,19 +76,6 @@ struct intel_audio_funcs {
struct intel_crtc_state *crtc_state);
};
-/* DP N/M table */
-#define LC_810M 810000
-#define LC_540M 540000
-#define LC_270M 270000
-#define LC_162M 162000
-
-struct dp_aud_n_m {
- int sample_rate;
- int clock;
- u16 m;
- u16 n;
-};
-
struct hdmi_aud_ncts {
int sample_rate;
int clock;
@@ -96,60 +83,6 @@ struct hdmi_aud_ncts {
int cts;
};
-/* Values according to DP 1.4 Table 2-104 */
-static const struct dp_aud_n_m dp_aud_n_m[] = {
- { 32000, LC_162M, 1024, 10125 },
- { 44100, LC_162M, 784, 5625 },
- { 48000, LC_162M, 512, 3375 },
- { 64000, LC_162M, 2048, 10125 },
- { 88200, LC_162M, 1568, 5625 },
- { 96000, LC_162M, 1024, 3375 },
- { 128000, LC_162M, 4096, 10125 },
- { 176400, LC_162M, 3136, 5625 },
- { 192000, LC_162M, 2048, 3375 },
- { 32000, LC_270M, 1024, 16875 },
- { 44100, LC_270M, 784, 9375 },
- { 48000, LC_270M, 512, 5625 },
- { 64000, LC_270M, 2048, 16875 },
- { 88200, LC_270M, 1568, 9375 },
- { 96000, LC_270M, 1024, 5625 },
- { 128000, LC_270M, 4096, 16875 },
- { 176400, LC_270M, 3136, 9375 },
- { 192000, LC_270M, 2048, 5625 },
- { 32000, LC_540M, 1024, 33750 },
- { 44100, LC_540M, 784, 18750 },
- { 48000, LC_540M, 512, 11250 },
- { 64000, LC_540M, 2048, 33750 },
- { 88200, LC_540M, 1568, 18750 },
- { 96000, LC_540M, 1024, 11250 },
- { 128000, LC_540M, 4096, 33750 },
- { 176400, LC_540M, 3136, 18750 },
- { 192000, LC_540M, 2048, 11250 },
- { 32000, LC_810M, 1024, 50625 },
- { 44100, LC_810M, 784, 28125 },
- { 48000, LC_810M, 512, 16875 },
- { 64000, LC_810M, 2048, 50625 },
- { 88200, LC_810M, 1568, 28125 },
- { 96000, LC_810M, 1024, 16875 },
- { 128000, LC_810M, 4096, 50625 },
- { 176400, LC_810M, 3136, 28125 },
- { 192000, LC_810M, 2048, 16875 },
-};
-
-static const struct dp_aud_n_m *
-audio_config_dp_get_n_m(const struct intel_crtc_state *crtc_state, int rate)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(dp_aud_n_m); i++) {
- if (rate == dp_aud_n_m[i].sample_rate &&
- crtc_state->port_clock == dp_aud_n_m[i].clock)
- return &dp_aud_n_m[i];
- }
-
- return NULL;
-}
-
static const struct {
int clock;
u32 config;
@@ -387,47 +320,17 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = i915->display.audio.component;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- enum port port = encoder->port;
- const struct dp_aud_n_m *nm;
- int rate;
- u32 tmp;
-
- rate = acomp ? acomp->aud_sample_rate[port] : 0;
- nm = audio_config_dp_get_n_m(crtc_state, rate);
- if (nm)
- drm_dbg_kms(&i915->drm, "using Maud %u, Naud %u\n", nm->m,
- nm->n);
- else
- drm_dbg_kms(&i915->drm, "using automatic Maud, Naud\n");
-
- tmp = intel_de_read(i915, HSW_AUD_CFG(cpu_transcoder));
- tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
- tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
- tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
- tmp |= AUD_CONFIG_N_VALUE_INDEX;
- if (nm) {
- tmp &= ~AUD_CONFIG_N_MASK;
- tmp |= AUD_CONFIG_N(nm->n);
- tmp |= AUD_CONFIG_N_PROG_ENABLE;
- }
-
- intel_de_write(i915, HSW_AUD_CFG(cpu_transcoder), tmp);
-
- tmp = intel_de_read(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
- tmp &= ~AUD_CONFIG_M_MASK;
- tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
- tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
-
- if (nm) {
- tmp |= nm->m;
- tmp |= AUD_M_CTS_M_VALUE_INDEX;
- tmp |= AUD_M_CTS_M_PROG_ENABLE;
- }
+ /* Enable time stamps. Let HW calculate Maud/Naud values */
+ intel_de_rmw(i915, HSW_AUD_CFG(cpu_transcoder),
+ AUD_CONFIG_N_VALUE_INDEX |
+ AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK |
+ AUD_CONFIG_UPPER_N_MASK |
+ AUD_CONFIG_LOWER_N_MASK |
+ AUD_CONFIG_N_PROG_ENABLE,
+ AUD_CONFIG_N_VALUE_INDEX);
- intel_de_write(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
}
static void
diff --git a/drivers/gpu/drm/i915/display/intel_audio_regs.h b/drivers/gpu/drm/i915/display/intel_audio_regs.h
index 616e7b1275c4..88ea2740365d 100644
--- a/drivers/gpu/drm/i915/display/intel_audio_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_audio_regs.h
@@ -148,4 +148,20 @@
#define HBLANK_START_COUNT_96 4
#define HBLANK_START_COUNT_128 5
+/* LPE Audio */
+#define I915_HDMI_LPE_AUDIO_BASE (VLV_DISPLAY_BASE + 0x65000)
+#define I915_HDMI_LPE_AUDIO_SIZE 0x1000
+
+#define VLV_AUD_CHICKEN_BIT_REG _MMIO(VLV_DISPLAY_BASE + 0x62F38)
+#define VLV_CHICKEN_BIT_DBG_ENABLE (1 << 0)
+
+#define _VLV_AUD_PORT_EN_B_DBG 0x62F20
+#define _VLV_AUD_PORT_EN_C_DBG 0x62F30
+#define _VLV_AUD_PORT_EN_D_DBG 0x62F34
+#define VLV_AUD_PORT_EN_DBG(port) _MMIO_BASE_PORT3(VLV_DISPLAY_BASE, (port) - PORT_B, \
+ _VLV_AUD_PORT_EN_B_DBG, \
+ _VLV_AUD_PORT_EN_C_DBG, \
+ _VLV_AUD_PORT_EN_D_DBG)
+#define VLV_AMP_MUTE (1 << 1)
+
#endif /* __INTEL_AUDIO_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 1946d7fb3c2e..071668bfe5d1 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -83,16 +83,16 @@ static u32 scale_hw_to_user(struct intel_connector *connector,
u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 val)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
- drm_WARN_ON(&i915->drm, panel->backlight.pwm_level_max == 0);
+ drm_WARN_ON(display->drm, panel->backlight.pwm_level_max == 0);
- if (i915->display.params.invert_brightness < 0)
+ if (display->params.invert_brightness < 0)
return val;
- if (i915->display.params.invert_brightness > 0 ||
- intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)) {
+ if (display->params.invert_brightness > 0 ||
+ intel_has_quirk(display, QUIRK_INVERT_BRIGHTNESS)) {
return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min;
}
@@ -126,15 +126,15 @@ u32 intel_backlight_level_to_pwm(struct intel_connector *connector, u32 val)
u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
- drm_WARN_ON_ONCE(&i915->drm,
+ drm_WARN_ON_ONCE(display->drm,
panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
- if (i915->display.params.invert_brightness > 0 ||
- (i915->display.params.invert_brightness == 0 &&
- intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)))
+ if (display->params.invert_brightness > 0 ||
+ (display->params.invert_brightness == 0 &&
+ intel_has_quirk(display, QUIRK_INVERT_BRIGHTNESS)))
val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min);
return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max,
@@ -761,8 +761,8 @@ static void __intel_backlight_enable(const struct intel_crtc_state *crtc_state,
WARN_ON(panel->backlight.max == 0);
- if (panel->backlight.level <= panel->backlight.min) {
- panel->backlight.level = panel->backlight.max;
+ if (panel->backlight.level < panel->backlight.min) {
+ panel->backlight.level = panel->backlight.min;
if (panel->backlight.device)
panel->backlight.device->props.brightness =
scale_hw_to_user(connector,
@@ -949,7 +949,7 @@ int intel_backlight_device_register(struct intel_connector *connector)
else
props.power = FB_BLANK_POWERDOWN;
- name = kstrdup("intel_backlight", GFP_KERNEL);
+ name = kstrdup_const("intel_backlight", GFP_KERNEL);
if (!name)
return -ENOMEM;
@@ -963,7 +963,7 @@ int intel_backlight_device_register(struct intel_connector *connector)
* compatibility. Use unique names for subsequent backlight devices as a
* fallback when the default name already exists.
*/
- kfree(name);
+ kfree_const(name);
name = kasprintf(GFP_KERNEL, "card%d-%s-backlight",
i915->drm.primary->index, connector->base.name);
if (!name)
@@ -987,7 +987,7 @@ int intel_backlight_device_register(struct intel_connector *connector)
connector->base.base.id, connector->base.name, name);
out:
- kfree(name);
+ kfree_const(name);
return ret;
}
@@ -1642,17 +1642,17 @@ void intel_backlight_update(struct intel_atomic_state *state,
int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
int ret;
if (!connector->panel.vbt.backlight.present) {
- if (intel_has_quirk(i915, QUIRK_BACKLIGHT_PRESENT)) {
- drm_dbg_kms(&i915->drm,
+ if (intel_has_quirk(display, QUIRK_BACKLIGHT_PRESENT)) {
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] no backlight present per VBT, but present per quirk\n",
connector->base.base.id, connector->base.name);
} else {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] no backlight present per VBT\n",
connector->base.base.id, connector->base.name);
return 0;
@@ -1660,16 +1660,16 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
}
/* ensure intel_panel has been initialized first */
- if (drm_WARN_ON(&i915->drm, !panel->backlight.funcs))
+ if (drm_WARN_ON(display->drm, !panel->backlight.funcs))
return -ENODEV;
/* set level and max in panel struct */
- mutex_lock(&i915->display.backlight.lock);
+ mutex_lock(&display->backlight.lock);
ret = panel->backlight.funcs->setup(connector, pipe);
- mutex_unlock(&i915->display.backlight.lock);
+ mutex_unlock(&display->backlight.lock);
if (ret) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] failed to setup backlight\n",
connector->base.base.id, connector->base.name);
return ret;
@@ -1677,7 +1677,7 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
panel->backlight.present = true;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] backlight initialized, %s, brightness %u/%u\n",
connector->base.base.id, connector->base.name,
str_enabled_disabled(panel->backlight.enabled),
@@ -1821,7 +1821,7 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
if (intel_dp_aux_init_backlight_funcs(connector) == 0)
return;
- if (!intel_has_quirk(i915, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
+ if (!intel_has_quirk(&i915->display, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
connector->panel.backlight.power = intel_pps_backlight_power;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index fe52c06271ef..5fb48b6129b6 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -25,6 +25,8 @@
*
*/
+#include <linux/firmware.h>
+
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dsc_helper.h>
#include <drm/drm_edid.h>
@@ -593,11 +595,14 @@ get_lvds_fp_timing(const struct bdb_lvds_lfp_data *data,
return (const void *)data + ptrs->ptr[index].fp_timing.offset;
}
-static const struct lvds_pnp_id *
+static const struct drm_edid_product_id *
get_lvds_pnp_id(const struct bdb_lvds_lfp_data *data,
const struct bdb_lvds_lfp_data_ptrs *ptrs,
int index)
{
+ /* These two are supposed to have the same layout in memory. */
+ BUILD_BUG_ON(sizeof(struct lvds_pnp_id) != sizeof(struct drm_edid_product_id));
+
return (const void *)data + ptrs->ptr[index].panel_pnp_id.offset;
}
@@ -611,19 +616,6 @@ get_lfp_data_tail(const struct bdb_lvds_lfp_data *data,
return NULL;
}
-static void dump_pnp_id(struct drm_i915_private *i915,
- const struct lvds_pnp_id *pnp_id,
- const char *name)
-{
- u16 mfg_name = be16_to_cpu((__force __be16)pnp_id->mfg_name);
- char vend[4];
-
- drm_dbg_kms(&i915->drm, "%s PNPID mfg: %s (0x%x), prod: %u, serial: %u, week: %d, year: %d\n",
- name, drm_edid_decode_mfg_id(mfg_name, vend),
- pnp_id->mfg_name, pnp_id->product_code, pnp_id->serial,
- pnp_id->mfg_week, pnp_id->mfg_year + 1990);
-}
-
static int opregion_get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
const struct drm_edid *drm_edid, bool use_fallback)
@@ -662,21 +654,21 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
{
const struct bdb_lvds_lfp_data *data;
const struct bdb_lvds_lfp_data_ptrs *ptrs;
- const struct lvds_pnp_id *edid_id;
- struct lvds_pnp_id edid_id_nodate;
- const struct edid *edid = drm_edid_raw(drm_edid); /* FIXME */
+ struct drm_edid_product_id product_id, product_id_nodate;
+ struct drm_printer p;
int i, best = -1;
- if (!edid)
+ if (!drm_edid)
return -1;
- edid_id = (const void *)&edid->mfg_id[0];
+ drm_edid_get_product_id(drm_edid, &product_id);
- edid_id_nodate = *edid_id;
- edid_id_nodate.mfg_week = 0;
- edid_id_nodate.mfg_year = 0;
+ product_id_nodate = product_id;
+ product_id_nodate.week_of_manufacture = 0;
+ product_id_nodate.year_of_manufacture = 0;
- dump_pnp_id(i915, edid_id, "EDID");
+ p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, "EDID");
+ drm_edid_print_product_id(&p, &product_id, true);
ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
if (!ptrs)
@@ -687,11 +679,11 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
return -1;
for (i = 0; i < 16; i++) {
- const struct lvds_pnp_id *vbt_id =
+ const struct drm_edid_product_id *vbt_id =
get_lvds_pnp_id(data, ptrs, i);
/* full match? */
- if (!memcmp(vbt_id, edid_id, sizeof(*vbt_id)))
+ if (!memcmp(vbt_id, &product_id, sizeof(*vbt_id)))
return i;
/*
@@ -699,7 +691,7 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
* and the VBT entry does not specify a date.
*/
if (best < 0 &&
- !memcmp(vbt_id, &edid_id_nodate, sizeof(*vbt_id)))
+ !memcmp(vbt_id, &product_id_nodate, sizeof(*vbt_id)))
best = i;
}
@@ -885,7 +877,8 @@ parse_lfp_data(struct drm_i915_private *i915,
const struct bdb_lvds_lfp_data *data;
const struct bdb_lvds_lfp_data_tail *tail;
const struct bdb_lvds_lfp_data_ptrs *ptrs;
- const struct lvds_pnp_id *pnp_id;
+ const struct drm_edid_product_id *pnp_id;
+ struct drm_printer p;
int panel_type = panel->vbt.panel_type;
ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
@@ -900,7 +893,9 @@ parse_lfp_data(struct drm_i915_private *i915,
parse_lfp_panel_dtd(i915, panel, data, ptrs);
pnp_id = get_lvds_pnp_id(data, ptrs, panel_type);
- dump_pnp_id(i915, pnp_id, "Panel");
+
+ p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, "Panel");
+ drm_edid_print_product_id(&p, pnp_id, false);
tail = get_lfp_data_tail(data, ptrs);
if (!tail)
@@ -1042,22 +1037,11 @@ parse_lfp_backlight(struct drm_i915_private *i915,
panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
panel->vbt.backlight.controller = 0;
if (i915->display.vbt.version >= 191) {
- size_t exp_size;
+ const struct lfp_backlight_control_method *method;
- if (i915->display.vbt.version >= 236)
- exp_size = sizeof(struct bdb_lfp_backlight_data);
- else if (i915->display.vbt.version >= 234)
- exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_234;
- else
- exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_191;
-
- if (get_blocksize(backlight_data) >= exp_size) {
- const struct lfp_backlight_control_method *method;
-
- method = &backlight_data->backlight_control[panel_type];
- panel->vbt.backlight.type = method->type;
- panel->vbt.backlight.controller = method->controller;
- }
+ method = &backlight_data->backlight_control[panel_type];
+ panel->vbt.backlight.type = method->type;
+ panel->vbt.backlight.controller = method->controller;
}
panel->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
@@ -1955,16 +1939,12 @@ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915,
* these devices we split the init OTP sequence into a deassert sequence and
* the actual init OTP part.
*/
-static void fixup_mipi_sequences(struct drm_i915_private *i915,
- struct intel_panel *panel)
+static void vlv_fixup_mipi_sequences(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
u8 *init_otp;
int len;
- /* Limit this to VLV for now. */
- if (!IS_VALLEYVIEW(i915))
- return;
-
/* Limit this to v1 vid-mode sequences */
if (panel->vbt.dsi.config->is_cmd_mode ||
panel->vbt.dsi.seq_version != 1)
@@ -2000,6 +1980,41 @@ static void fixup_mipi_sequences(struct drm_i915_private *i915,
panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
}
+/*
+ * Some machines (eg. Lenovo 82TQ) appear to have broken
+ * VBT sequences:
+ * - INIT_OTP is not present at all
+ * - what should be in INIT_OTP is in DISPLAY_ON
+ * - what should be in DISPLAY_ON is in BACKLIGHT_ON
+ * (along with the actual backlight stuff)
+ *
+ * To make those work we simply swap DISPLAY_ON and INIT_OTP.
+ *
+ * TODO: Do we need to limit this to specific machines,
+ * or examine the contents of the sequences to
+ * avoid false positives?
+ */
+static void icl_fixup_mipi_sequences(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+{
+ if (!panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] &&
+ panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]) {
+ drm_dbg_kms(&i915->drm, "Broken VBT: Swapping INIT_OTP and DISPLAY_ON sequences\n");
+
+ swap(panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP],
+ panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]);
+ }
+}
+
+static void fixup_mipi_sequences(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+{
+ if (DISPLAY_VER(i915) >= 11)
+ icl_fixup_mipi_sequences(i915, panel);
+ else if (IS_VALLEYVIEW(i915))
+ vlv_fixup_mipi_sequences(i915, panel);
+}
+
static void
parse_mipi_sequence(struct drm_i915_private *i915,
struct intel_panel *panel)
@@ -2699,6 +2714,57 @@ static void parse_ddi_ports(struct drm_i915_private *i915)
print_ddi_port(devdata);
}
+static int child_device_expected_size(u16 version)
+{
+ BUILD_BUG_ON(sizeof(struct child_device_config) < 40);
+
+ if (version > 256)
+ return -ENOENT;
+ else if (version >= 256)
+ return 40;
+ else if (version >= 216)
+ return 39;
+ else if (version >= 196)
+ return 38;
+ else if (version >= 195)
+ return 37;
+ else if (version >= 111)
+ return LEGACY_CHILD_DEVICE_CONFIG_SIZE;
+ else if (version >= 106)
+ return 27;
+ else
+ return 22;
+}
+
+static bool child_device_size_valid(struct drm_i915_private *i915, int size)
+{
+ int expected_size;
+
+ expected_size = child_device_expected_size(i915->display.vbt.version);
+ if (expected_size < 0) {
+ expected_size = sizeof(struct child_device_config);
+ drm_dbg(&i915->drm,
+ "Expected child device config size for VBT version %u not known; assuming %d\n",
+ i915->display.vbt.version, expected_size);
+ }
+
+ /* Flag an error for unexpected size, but continue anyway. */
+ if (size != expected_size)
+ drm_err(&i915->drm,
+ "Unexpected child device config size %d (expected %d for VBT version %u)\n",
+ size, expected_size, i915->display.vbt.version);
+
+ /* The legacy sized child device config is the minimum we need. */
+ if (size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
+ drm_dbg_kms(&i915->drm,
+ "Child device config size %d is too small.\n",
+ size);
+ return false;
+ }
+
+ return true;
+}
+
static void
parse_general_definitions(struct drm_i915_private *i915)
{
@@ -2706,7 +2772,6 @@ parse_general_definitions(struct drm_i915_private *i915)
struct intel_bios_encoder_data *devdata;
const struct child_device_config *child;
int i, child_device_num;
- u8 expected_size;
u16 block_size;
int bus_pin;
@@ -2730,39 +2795,8 @@ parse_general_definitions(struct drm_i915_private *i915)
if (intel_gmbus_is_valid_pin(i915, bus_pin))
i915->display.vbt.crt_ddc_pin = bus_pin;
- if (i915->display.vbt.version < 106) {
- expected_size = 22;
- } else if (i915->display.vbt.version < 111) {
- expected_size = 27;
- } else if (i915->display.vbt.version < 195) {
- expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE;
- } else if (i915->display.vbt.version == 195) {
- expected_size = 37;
- } else if (i915->display.vbt.version <= 215) {
- expected_size = 38;
- } else if (i915->display.vbt.version <= 250) {
- expected_size = 39;
- } else {
- expected_size = sizeof(*child);
- BUILD_BUG_ON(sizeof(*child) < 39);
- drm_dbg(&i915->drm,
- "Expected child device config size for VBT version %u not known; assuming %u\n",
- i915->display.vbt.version, expected_size);
- }
-
- /* Flag an error for unexpected size, but continue anyway. */
- if (defs->child_dev_size != expected_size)
- drm_err(&i915->drm,
- "Unexpected child device config size %u (expected %u for VBT version %u)\n",
- defs->child_dev_size, expected_size, i915->display.vbt.version);
-
- /* The legacy sized child device config is the minimum we need. */
- if (defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
- drm_dbg_kms(&i915->drm,
- "Child device config size %u is too small.\n",
- defs->child_dev_size);
+ if (!child_device_size_valid(i915, defs->child_dev_size))
return;
- }
/* get the number of child device */
child_device_num = (block_size - sizeof(*defs)) / defs->child_dev_size;
@@ -2838,9 +2872,8 @@ init_vbt_panel_defaults(struct intel_panel *panel)
static void
init_vbt_missing_defaults(struct drm_i915_private *i915)
{
+ unsigned int ports = DISPLAY_RUNTIME_INFO(i915)->port_mask;
enum port port;
- int ports = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) |
- BIT(PORT_D) | BIT(PORT_E) | BIT(PORT_F);
if (!HAS_DDI(i915) && !IS_CHERRYVIEW(i915))
return;
@@ -2950,6 +2983,43 @@ bool intel_bios_is_valid_vbt(struct drm_i915_private *i915,
return vbt;
}
+static struct vbt_header *firmware_get_vbt(struct drm_i915_private *i915,
+ size_t *size)
+{
+ struct vbt_header *vbt = NULL;
+ const struct firmware *fw = NULL;
+ const char *name = i915->display.params.vbt_firmware;
+ int ret;
+
+ if (!name || !*name)
+ return NULL;
+
+ ret = request_firmware(&fw, name, i915->drm.dev);
+ if (ret) {
+ drm_err(&i915->drm,
+ "Requesting VBT firmware \"%s\" failed (%d)\n",
+ name, ret);
+ return NULL;
+ }
+
+ if (intel_bios_is_valid_vbt(i915, fw->data, fw->size)) {
+ vbt = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ if (vbt) {
+ drm_dbg_kms(&i915->drm,
+ "Found valid VBT firmware \"%s\"\n", name);
+ if (size)
+ *size = fw->size;
+ }
+ } else {
+ drm_dbg_kms(&i915->drm, "Invalid VBT firmware \"%s\"\n",
+ name);
+ }
+
+ release_firmware(fw);
+
+ return vbt;
+}
+
static u32 intel_spi_read(struct intel_uncore *uncore, u32 offset)
{
intel_uncore_write(uncore, PRIMARY_SPI_ADDRESS, offset);
@@ -2957,7 +3027,8 @@ static u32 intel_spi_read(struct intel_uncore *uncore, u32 offset)
return intel_uncore_read(uncore, PRIMARY_SPI_TRIGGER);
}
-static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915)
+static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915,
+ size_t *size)
{
u32 count, data, found, store = 0;
u32 static_region, oprom_offset;
@@ -3000,6 +3071,9 @@ static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915)
drm_dbg_kms(&i915->drm, "Found valid VBT in SPI flash\n");
+ if (size)
+ *size = vbt_size;
+
return (struct vbt_header *)vbt;
err_free_vbt:
@@ -3008,7 +3082,8 @@ err_not_found:
return NULL;
}
-static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915)
+static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915,
+ size_t *sizep)
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
void __iomem *p = NULL, *oprom;
@@ -3057,6 +3132,9 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915)
pci_unmap_rom(pdev, oprom);
+ if (sizep)
+ *sizep = vbt_size;
+
drm_dbg_kms(&i915->drm, "Found valid VBT in PCI ROM\n");
return vbt;
@@ -3069,6 +3147,32 @@ err_unmap_oprom:
return NULL;
}
+static const struct vbt_header *intel_bios_get_vbt(struct drm_i915_private *i915,
+ size_t *sizep)
+{
+ const struct vbt_header *vbt = NULL;
+ intel_wakeref_t wakeref;
+
+ vbt = firmware_get_vbt(i915, sizep);
+
+ if (!vbt)
+ vbt = intel_opregion_get_vbt(i915, sizep);
+
+ /*
+ * If the OpRegion does not have VBT, look in SPI flash
+ * through MMIO or PCI mapping
+ */
+ if (!vbt && IS_DGFX(i915))
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ vbt = spi_oprom_get_vbt(i915, sizep);
+
+ if (!vbt)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ vbt = oprom_get_vbt(i915, sizep);
+
+ return vbt;
+}
+
/**
* intel_bios_init - find VBT and initialize settings from the BIOS
* @i915: i915 device instance
@@ -3080,7 +3184,6 @@ err_unmap_oprom:
void intel_bios_init(struct drm_i915_private *i915)
{
const struct vbt_header *vbt;
- struct vbt_header *oprom_vbt = NULL;
const struct bdb_header *bdb;
INIT_LIST_HEAD(&i915->display.vbt.display_devices);
@@ -3094,21 +3197,7 @@ void intel_bios_init(struct drm_i915_private *i915)
init_vbt_defaults(i915);
- vbt = intel_opregion_get_vbt(i915, NULL);
-
- /*
- * If the OpRegion does not have VBT, look in SPI flash through MMIO or
- * PCI mapping
- */
- if (!vbt && IS_DGFX(i915)) {
- oprom_vbt = spi_oprom_get_vbt(i915);
- vbt = oprom_vbt;
- }
-
- if (!vbt) {
- oprom_vbt = oprom_get_vbt(i915);
- vbt = oprom_vbt;
- }
+ vbt = intel_bios_get_vbt(i915, NULL);
if (!vbt)
goto out;
@@ -3141,7 +3230,7 @@ out:
parse_sdvo_device_mapping(i915);
parse_ddi_ports(i915);
- kfree(oprom_vbt);
+ kfree(vbt);
}
static void intel_bios_init_panel(struct drm_i915_private *i915,
@@ -3313,8 +3402,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
* additional data. Trust that if the VBT was written into
* the OpRegion then they have validated the LVDS's existence.
*/
- if (intel_opregion_get_vbt(i915, NULL))
- return true;
+ return intel_opregion_vbt_present(i915);
}
return false;
@@ -3351,6 +3439,9 @@ bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_da
{
const struct child_device_config *child = &devdata->child;
+ if (!devdata)
+ return false;
+
if (!intel_bios_encoder_supports_dp(devdata) ||
!intel_bios_encoder_supports_hdmi(devdata))
return false;
@@ -3672,13 +3763,12 @@ static int intel_bios_vbt_show(struct seq_file *m, void *unused)
const void *vbt;
size_t vbt_size;
- /*
- * FIXME: VBT might originate from other places than opregion, and then
- * this would be incorrect.
- */
- vbt = intel_opregion_get_vbt(i915, &vbt_size);
- if (vbt)
+ vbt = intel_bios_get_vbt(i915, &vbt_size);
+
+ if (vbt) {
seq_write(m, vbt, vbt_size);
+ kfree(vbt);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 7f2a50b4f494..972ea887e232 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -162,7 +162,9 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
1);
if (ret < 0) {
- drm_err(&dev_priv->drm, "Failed to disable qgv points (%d) points: 0x%x\n", ret, points_mask);
+ drm_err(&dev_priv->drm,
+ "Failed to disable qgv points (0x%x) points: 0x%x\n",
+ ret, points_mask);
return ret;
}
@@ -290,8 +292,10 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
struct intel_qgv_point *sp = &qi->points[i];
ret = intel_read_qgv_point_info(dev_priv, sp, i);
- if (ret)
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm, "Could not read QGV %d info\n", i);
return ret;
+ }
drm_dbg_kms(&dev_priv->drm,
"QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
@@ -659,6 +663,22 @@ static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv,
return bi->psf_bw[psf_gv_point];
}
+static unsigned int icl_qgv_bw(struct drm_i915_private *i915,
+ int num_active_planes, int qgv_point)
+{
+ unsigned int idx;
+
+ if (DISPLAY_VER(i915) >= 12)
+ idx = tgl_max_bw_index(i915, num_active_planes, qgv_point);
+ else
+ idx = icl_max_bw_index(i915, num_active_planes, qgv_point);
+
+ if (idx >= ARRAY_SIZE(i915->display.bw.max))
+ return 0;
+
+ return i915->display.bw.max[idx].deratedbw[qgv_point];
+}
+
void intel_bw_init_hw(struct drm_i915_private *dev_priv)
{
if (!HAS_DISPLAY(dev_priv))
@@ -735,6 +755,7 @@ void intel_bw_crtc_update(struct intel_bw_state *bw_state,
intel_bw_crtc_data_rate(crtc_state);
bw_state->num_active_planes[crtc->pipe] =
intel_bw_crtc_num_active_planes(crtc_state);
+ bw_state->force_check_qgv = true;
drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
pipe_name(crtc->pipe),
@@ -804,6 +825,80 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state)
return to_intel_bw_state(bw_state);
}
+static unsigned int icl_max_bw_qgv_point_mask(struct drm_i915_private *i915,
+ int num_active_planes)
+{
+ unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int max_bw_point = 0;
+ unsigned int max_bw = 0;
+ int i;
+
+ for (i = 0; i < num_qgv_points; i++) {
+ unsigned int max_data_rate =
+ icl_qgv_bw(i915, num_active_planes, i);
+
+ /*
+ * We need to know which qgv point gives us
+ * maximum bandwidth in order to disable SAGV
+ * if we find that we exceed SAGV block time
+ * with watermarks. By that moment we already
+ * have those, as it is calculated earlier in
+ * intel_atomic_check,
+ */
+ if (max_data_rate > max_bw) {
+ max_bw_point = BIT(i);
+ max_bw = max_data_rate;
+ }
+ }
+
+ return max_bw_point;
+}
+
+static u16 icl_prepare_qgv_points_mask(struct drm_i915_private *i915,
+ unsigned int qgv_points,
+ unsigned int psf_points)
+{
+ return ~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
+ ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(i915);
+}
+
+static unsigned int icl_max_bw_psf_gv_point_mask(struct drm_i915_private *i915)
+{
+ unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
+ unsigned int max_bw_point_mask = 0;
+ unsigned int max_bw = 0;
+ int i;
+
+ for (i = 0; i < num_psf_gv_points; i++) {
+ unsigned int max_data_rate = adl_psf_bw(i915, i);
+
+ if (max_data_rate > max_bw) {
+ max_bw_point_mask = BIT(i);
+ max_bw = max_data_rate;
+ } else if (max_data_rate == max_bw) {
+ max_bw_point_mask |= BIT(i);
+ }
+ }
+
+ return max_bw_point_mask;
+}
+
+static void icl_force_disable_sagv(struct drm_i915_private *i915,
+ struct intel_bw_state *bw_state)
+{
+ unsigned int qgv_points = icl_max_bw_qgv_point_mask(i915, 0);
+ unsigned int psf_points = icl_max_bw_psf_gv_point_mask(i915);
+
+ bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915,
+ qgv_points,
+ psf_points);
+
+ drm_dbg_kms(&i915->drm, "Forcing SAGV disable: mask 0x%x\n",
+ bw_state->qgv_points_mask);
+
+ icl_pcode_restrict_qgv_points(i915, bw_state->qgv_points_mask);
+}
+
static int mtl_find_qgv_points(struct drm_i915_private *i915,
unsigned int data_rate,
unsigned int num_active_planes,
@@ -881,8 +976,6 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
const struct intel_bw_state *old_bw_state,
struct intel_bw_state *new_bw_state)
{
- unsigned int max_bw_point = 0;
- unsigned int max_bw = 0;
unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
u16 psf_points = 0;
@@ -895,31 +988,8 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
return ret;
for (i = 0; i < num_qgv_points; i++) {
- unsigned int idx;
- unsigned int max_data_rate;
-
- if (DISPLAY_VER(i915) >= 12)
- idx = tgl_max_bw_index(i915, num_active_planes, i);
- else
- idx = icl_max_bw_index(i915, num_active_planes, i);
-
- if (idx >= ARRAY_SIZE(i915->display.bw.max))
- continue;
-
- max_data_rate = i915->display.bw.max[idx].deratedbw[i];
-
- /*
- * We need to know which qgv point gives us
- * maximum bandwidth in order to disable SAGV
- * if we find that we exceed SAGV block time
- * with watermarks. By that moment we already
- * have those, as it is calculated earlier in
- * intel_atomic_check,
- */
- if (max_data_rate > max_bw) {
- max_bw_point = i;
- max_bw = max_data_rate;
- }
+ unsigned int max_data_rate = icl_qgv_bw(i915,
+ num_active_planes, i);
if (max_data_rate >= data_rate)
qgv_points |= BIT(i);
@@ -963,20 +1033,18 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
* cause.
*/
if (!intel_can_enable_sagv(i915, new_bw_state)) {
- qgv_points = BIT(max_bw_point);
- drm_dbg_kms(&i915->drm, "No SAGV, using single QGV point %d\n",
- max_bw_point);
+ qgv_points = icl_max_bw_qgv_point_mask(i915, num_active_planes);
+ drm_dbg_kms(&i915->drm, "No SAGV, using single QGV point mask 0x%x\n",
+ qgv_points);
}
/*
* We store the ones which need to be masked as that is what PCode
* actually accepts as a parameter.
*/
- new_bw_state->qgv_points_mask =
- ~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
- ADLS_PCODE_REQ_PSF_PT(psf_points)) &
- icl_qgv_points_mask(i915);
-
+ new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915,
+ qgv_points,
+ psf_points);
/*
* If the actual mask had changed we need to make sure that
* the commits are serialized(in case this is a nomodeset, nonblocking)
@@ -1272,8 +1340,9 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
new_bw_state = intel_atomic_get_new_bw_state(state);
if (new_bw_state &&
- intel_can_enable_sagv(i915, old_bw_state) !=
- intel_can_enable_sagv(i915, new_bw_state))
+ (intel_can_enable_sagv(i915, old_bw_state) !=
+ intel_can_enable_sagv(i915, new_bw_state) ||
+ new_bw_state->force_check_qgv))
changed = true;
/*
@@ -1287,6 +1356,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
if (ret)
return ret;
+ new_bw_state->force_check_qgv = false;
+
return 0;
}
@@ -1313,7 +1384,7 @@ static const struct intel_global_state_funcs intel_bw_funcs = {
.atomic_destroy_state = intel_bw_destroy_state,
};
-int intel_bw_init(struct drm_i915_private *dev_priv)
+int intel_bw_init(struct drm_i915_private *i915)
{
struct intel_bw_state *state;
@@ -1321,8 +1392,15 @@ int intel_bw_init(struct drm_i915_private *dev_priv)
if (!state)
return -ENOMEM;
- intel_atomic_global_obj_init(dev_priv, &dev_priv->display.bw.obj,
+ intel_atomic_global_obj_init(i915, &i915->display.bw.obj,
&state->base, &intel_bw_funcs);
+ /*
+ * Limit this only if we have SAGV. And for Display version 14 onwards
+ * sagv is handled though pmdemand requests
+ */
+ if (intel_has_sagv(i915) && IS_DISPLAY_VER(i915, 11, 13))
+ icl_force_disable_sagv(i915, state);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index 59cb4fc5db76..161813cca473 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -47,12 +47,19 @@ struct intel_bw_state {
*/
u16 qgv_points_mask;
+ /*
+ * Flag to force the QGV comparison in atomic check right after the
+ * hw state readout
+ */
+ bool force_check_qgv;
+
int min_cdclk[I915_MAX_PIPES];
unsigned int data_rate[I915_MAX_PIPES];
u8 num_active_planes[I915_MAX_PIPES];
};
-#define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base)
+#define to_intel_bw_state(global_state) \
+ container_of_const((global_state), struct intel_bw_state, base)
struct intel_bw_state *
intel_atomic_get_old_bw_state(struct intel_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index ed89b86ea625..7a833b5f2de2 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -39,6 +39,8 @@
#include "intel_pcode.h"
#include "intel_psr.h"
#include "intel_vdsc.h"
+#include "skl_watermark.h"
+#include "skl_watermark_regs.h"
#include "vlv_sideband.h"
/**
@@ -63,6 +65,32 @@
* DMC will not change the active CDCLK frequency however, so that part
* will still be performed by the driver directly.
*
+ * There are multiple components involved in the generation of the CDCLK
+ * frequency:
+ *
+ * - We have the CDCLK PLL, which generates an output clock based on a
+ * reference clock and a ratio parameter.
+ * - The CD2X Divider, which divides the output of the PLL based on a
+ * divisor selected from a set of pre-defined choices.
+ * - The CD2X Squasher, which further divides the output based on a
+ * waveform represented as a sequence of bits where each zero
+ * "squashes out" a clock cycle.
+ * - And, finally, a fixed divider that divides the output frequency by 2.
+ *
+ * As such, the resulting CDCLK frequency can be calculated with the
+ * following formula:
+ *
+ * cdclk = vco / cd2x_div / (sq_len / sq_div) / 2
+ *
+ * , where vco is the frequency generated by the PLL; cd2x_div
+ * represents the CD2X Divider; sq_len and sq_div are the bit length
+ * and the number of high bits for the CD2X Squasher waveform, respectively;
+ * and 2 represents the fixed divider.
+ *
+ * Note that some older platforms do not contain the CD2X Divider
+ * and/or CD2X Squasher, in which case we can ignore their respective
+ * factors in the formula above.
+ *
* Several methods exist to change the CDCLK frequency, which ones are
* supported depends on the platform:
*
@@ -993,15 +1021,14 @@ static int skl_cdclk_decimal(int cdclk)
return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
}
-static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv,
- int vco)
+static void skl_set_preferred_cdclk_vco(struct drm_i915_private *i915, int vco)
{
- bool changed = dev_priv->skl_preferred_vco_freq != vco;
+ bool changed = i915->display.cdclk.skl_preferred_vco_freq != vco;
- dev_priv->skl_preferred_vco_freq = vco;
+ i915->display.cdclk.skl_preferred_vco_freq = vco;
if (changed)
- intel_update_max_cdclk(dev_priv);
+ intel_update_max_cdclk(i915);
}
static u32 skl_dpll0_link_rate(struct drm_i915_private *dev_priv, int vco)
@@ -1205,7 +1232,7 @@ static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
* Use the current vco as our initial
* guess as to what the preferred vco is.
*/
- if (dev_priv->skl_preferred_vco_freq == 0)
+ if (dev_priv->display.cdclk.skl_preferred_vco_freq == 0)
skl_set_preferred_cdclk_vco(dev_priv,
dev_priv->display.cdclk.hw.vco);
return;
@@ -1213,7 +1240,7 @@ static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
cdclk_config = dev_priv->display.cdclk.hw;
- cdclk_config.vco = dev_priv->skl_preferred_vco_freq;
+ cdclk_config.vco = dev_priv->display.cdclk.skl_preferred_vco_freq;
if (cdclk_config.vco == 0)
cdclk_config.vco = 8100000;
cdclk_config.cdclk = skl_calc_cdclk(0, cdclk_config.vco);
@@ -1391,7 +1418,7 @@ static const struct intel_cdclk_vals mtl_cdclk_table[] = {
{}
};
-static const struct intel_cdclk_vals lnl_cdclk_table[] = {
+static const struct intel_cdclk_vals xe2lpd_cdclk_table[] = {
{ .refclk = 38400, .cdclk = 153600, .ratio = 16, .waveform = 0xaaaa },
{ .refclk = 38400, .cdclk = 172800, .ratio = 16, .waveform = 0xad5a },
{ .refclk = 38400, .cdclk = 192000, .ratio = 16, .waveform = 0xb6b6 },
@@ -1656,6 +1683,8 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
}
out:
+ if (DISPLAY_VER(dev_priv) >= 20)
+ cdclk_config->joined_mbus = intel_de_read(dev_priv, MBUS_CTL) & MBUS_JOIN;
/*
* Can't read this out :( Let's assume it's
* at least what the CDCLK frequency requires.
@@ -1850,6 +1879,37 @@ static bool cdclk_pll_is_unknown(unsigned int vco)
return vco == ~0;
}
+static bool mdclk_source_is_cdclk_pll(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) >= 20;
+}
+
+static u32 xe2lpd_mdclk_source_sel(struct drm_i915_private *i915)
+{
+ if (mdclk_source_is_cdclk_pll(i915))
+ return MDCLK_SOURCE_SEL_CDCLK_PLL;
+
+ return MDCLK_SOURCE_SEL_CD2XCLK;
+}
+
+int intel_mdclk_cdclk_ratio(struct drm_i915_private *i915,
+ const struct intel_cdclk_config *cdclk_config)
+{
+ if (mdclk_source_is_cdclk_pll(i915))
+ return DIV_ROUND_UP(cdclk_config->vco, cdclk_config->cdclk);
+
+ /* Otherwise, source for MDCLK is CD2XCLK. */
+ return 2;
+}
+
+static void xe2lpd_mdclk_cdclk_ratio_program(struct drm_i915_private *i915,
+ const struct intel_cdclk_config *cdclk_config)
+{
+ intel_dbuf_mdclk_cdclk_ratio_update(i915,
+ intel_mdclk_cdclk_ratio(i915, cdclk_config),
+ cdclk_config->joined_mbus);
+}
+
static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i915,
const struct intel_cdclk_config *old_cdclk_config,
const struct intel_cdclk_config *new_cdclk_config,
@@ -1954,7 +2014,7 @@ static u32 bxt_cdclk_ctl(struct drm_i915_private *i915,
val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
if (DISPLAY_VER(i915) >= 20)
- val |= MDCLK_SOURCE_SEL_CDCLK_PLL;
+ val |= xe2lpd_mdclk_source_sel(i915);
else
val |= skl_cdclk_decimal(cdclk);
@@ -1967,7 +2027,6 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
{
int cdclk = cdclk_config->cdclk;
int vco = cdclk_config->vco;
- u16 waveform;
if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0 &&
!cdclk_pll_is_unknown(dev_priv->display.cdclk.hw.vco)) {
@@ -1982,10 +2041,11 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
} else
bxt_cdclk_pll_update(dev_priv, vco);
- waveform = cdclk_squash_waveform(dev_priv, cdclk);
+ if (HAS_CDCLK_SQUASH(dev_priv)) {
+ u16 waveform = cdclk_squash_waveform(dev_priv, cdclk);
- if (HAS_CDCLK_SQUASH(dev_priv))
dg2_cdclk_squash_program(dev_priv, waveform);
+ }
intel_de_write(dev_priv, CDCLK_CTL, bxt_cdclk_ctl(dev_priv, cdclk_config, pipe));
@@ -2030,6 +2090,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
return;
}
+ if (DISPLAY_VER(dev_priv) >= 20 && cdclk < dev_priv->display.cdclk.hw.cdclk)
+ xe2lpd_mdclk_cdclk_ratio_program(dev_priv, cdclk_config);
+
if (cdclk_compute_crawl_and_squash_midpoint(dev_priv, &dev_priv->display.cdclk.hw,
cdclk_config, &mid_cdclk_config)) {
_bxt_set_cdclk(dev_priv, &mid_cdclk_config, pipe);
@@ -2038,6 +2101,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
_bxt_set_cdclk(dev_priv, cdclk_config, pipe);
}
+ if (DISPLAY_VER(dev_priv) >= 20 && cdclk > dev_priv->display.cdclk.hw.cdclk)
+ xe2lpd_mdclk_cdclk_ratio_program(dev_priv, cdclk_config);
+
if (DISPLAY_VER(dev_priv) >= 14)
/*
* NOOP - No Pcode communication needed for
@@ -2260,16 +2326,15 @@ static bool intel_cdclk_can_squash(struct drm_i915_private *dev_priv,
}
/**
- * intel_cdclk_needs_modeset - Determine if changong between the CDCLK
- * configurations requires a modeset on all pipes
+ * intel_cdclk_clock_changed - Check whether the clock changed
* @a: first CDCLK configuration
* @b: second CDCLK configuration
*
* Returns:
- * True if changing between the two CDCLK configurations
- * requires all pipes to be off, false if not.
+ * True if CDCLK changed in a way that requires re-programming and
+ * False otherwise.
*/
-bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a,
+bool intel_cdclk_clock_changed(const struct intel_cdclk_config *a,
const struct intel_cdclk_config *b)
{
return a->cdclk != b->cdclk ||
@@ -2322,7 +2387,7 @@ static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv,
static bool intel_cdclk_changed(const struct intel_cdclk_config *a,
const struct intel_cdclk_config *b)
{
- return intel_cdclk_needs_modeset(a, b) ||
+ return intel_cdclk_clock_changed(a, b) ||
a->voltage_level != b->voltage_level;
}
@@ -2368,18 +2433,9 @@ static void intel_pcode_notify(struct drm_i915_private *i915,
ret);
}
-/**
- * intel_set_cdclk - Push the CDCLK configuration to the hardware
- * @dev_priv: i915 device
- * @cdclk_config: new CDCLK configuration
- * @pipe: pipe with which to synchronize the update
- *
- * Program the hardware based on the passed in CDCLK state,
- * if necessary.
- */
static void intel_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *cdclk_config,
- enum pipe pipe)
+ enum pipe pipe, const char *context)
{
struct intel_encoder *encoder;
@@ -2389,7 +2445,7 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->display.funcs.cdclk->set_cdclk))
return;
- intel_cdclk_dump_config(dev_priv, cdclk_config, "Changing CDCLK to");
+ intel_cdclk_dump_config(dev_priv, cdclk_config, context);
for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2519,6 +2575,17 @@ static void intel_cdclk_pcode_post_notify(struct intel_atomic_state *state)
update_cdclk, update_pipe_count);
}
+bool intel_cdclk_is_decreasing_later(struct intel_atomic_state *state)
+{
+ const struct intel_cdclk_state *old_cdclk_state =
+ intel_atomic_get_old_cdclk_state(state);
+ const struct intel_cdclk_state *new_cdclk_state =
+ intel_atomic_get_new_cdclk_state(state);
+
+ return new_cdclk_state && !new_cdclk_state->disable_pipes &&
+ new_cdclk_state->actual.cdclk < old_cdclk_state->actual.cdclk;
+}
+
/**
* intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware
* @state: intel atomic state
@@ -2534,7 +2601,8 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
intel_atomic_get_new_cdclk_state(state);
- enum pipe pipe = new_cdclk_state->pipe;
+ struct intel_cdclk_config cdclk_config;
+ enum pipe pipe;
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
@@ -2543,12 +2611,32 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
if (IS_DG2(i915))
intel_cdclk_pcode_pre_notify(state);
- if (pipe == INVALID_PIPE ||
- old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
- drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+ if (new_cdclk_state->disable_pipes) {
+ cdclk_config = new_cdclk_state->actual;
+ pipe = INVALID_PIPE;
+ } else {
+ if (new_cdclk_state->actual.cdclk >= old_cdclk_state->actual.cdclk) {
+ cdclk_config = new_cdclk_state->actual;
+ pipe = new_cdclk_state->pipe;
+ } else {
+ cdclk_config = old_cdclk_state->actual;
+ pipe = INVALID_PIPE;
+ }
- intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
+ cdclk_config.voltage_level = max(new_cdclk_state->actual.voltage_level,
+ old_cdclk_state->actual.voltage_level);
}
+
+ /*
+ * mbus joining will be changed later by
+ * intel_dbuf_mbus_{pre,post}_ddb_update()
+ */
+ cdclk_config.joined_mbus = old_cdclk_state->actual.joined_mbus;
+
+ drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+
+ intel_set_cdclk(i915, &cdclk_config, pipe,
+ "Pre changing CDCLK to");
}
/**
@@ -2566,7 +2654,7 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
intel_atomic_get_new_cdclk_state(state);
- enum pipe pipe = new_cdclk_state->pipe;
+ enum pipe pipe;
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
@@ -2575,12 +2663,16 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
if (IS_DG2(i915))
intel_cdclk_pcode_post_notify(state);
- if (pipe != INVALID_PIPE &&
- old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
- drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+ if (!new_cdclk_state->disable_pipes &&
+ new_cdclk_state->actual.cdclk < old_cdclk_state->actual.cdclk)
+ pipe = new_cdclk_state->pipe;
+ else
+ pipe = INVALID_PIPE;
- intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
- }
+ drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+
+ intel_set_cdclk(i915, &new_cdclk_state->actual, pipe,
+ "Post changing CDCLK to");
}
static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
@@ -2731,25 +2823,6 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
if (crtc_state->dsc.compression_enable)
min_cdclk = max(min_cdclk, intel_vdsc_min_cdclk(crtc_state));
- /*
- * HACK. Currently for TGL/DG2 platforms we calculate
- * min_cdclk initially based on pixel_rate divided
- * by 2, accounting for also plane requirements,
- * however in some cases the lowest possible CDCLK
- * doesn't work and causing the underruns.
- * Explicitly stating here that this seems to be currently
- * rather a Hack, than final solution.
- */
- if (IS_TIGERLAKE(dev_priv) || IS_DG2(dev_priv)) {
- /*
- * Clamp to max_cdclk_freq in case pixel rate is higher,
- * in order not to break an 8K, but still leave W/A at place.
- */
- min_cdclk = max_t(int, min_cdclk,
- min_t(int, crtc_state->pixel_rate,
- dev_priv->display.cdclk.max_cdclk_freq));
- }
-
return min_cdclk;
}
@@ -2937,7 +3010,7 @@ static int skl_dpll0_vco(struct intel_cdclk_state *cdclk_state)
vco = cdclk_state->logical.vco;
if (!vco)
- vco = dev_priv->skl_preferred_vco_freq;
+ vco = dev_priv->display.cdclk.skl_preferred_vco_freq;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
if (!crtc_state->hw.enable)
@@ -3058,6 +3131,7 @@ static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_globa
return NULL;
cdclk_state->pipe = INVALID_PIPE;
+ cdclk_state->disable_pipes = false;
return &cdclk_state->base;
}
@@ -3121,6 +3195,20 @@ int intel_cdclk_atomic_check(struct intel_atomic_state *state,
return 0;
}
+int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joined_mbus)
+{
+ struct intel_cdclk_state *cdclk_state;
+
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
+
+ cdclk_state->actual.joined_mbus = joined_mbus;
+ cdclk_state->logical.joined_mbus = joined_mbus;
+
+ return intel_atomic_lock_global_state(&cdclk_state->base);
+}
+
int intel_cdclk_init(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state *cdclk_state;
@@ -3229,17 +3317,28 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
drm_dbg_kms(&dev_priv->drm,
"Can change cdclk cd2x divider with pipe %c active\n",
pipe_name(pipe));
- } else if (intel_cdclk_needs_modeset(&old_cdclk_state->actual,
+ } else if (intel_cdclk_clock_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual)) {
/* All pipes must be switched off while we change the cdclk. */
ret = intel_modeset_all_pipes_late(state, "CDCLK change");
if (ret)
return ret;
+ new_cdclk_state->disable_pipes = true;
+
drm_dbg_kms(&dev_priv->drm,
"Modeset required for cdclk change\n");
}
+ if (intel_mdclk_cdclk_ratio(dev_priv, &old_cdclk_state->actual) !=
+ intel_mdclk_cdclk_ratio(dev_priv, &new_cdclk_state->actual)) {
+ int ratio = intel_mdclk_cdclk_ratio(dev_priv, &new_cdclk_state->actual);
+
+ ret = intel_dbuf_state_set_mdclk_cdclk_ratio(state, ratio);
+ if (ret)
+ return ret;
+ }
+
drm_dbg_kms(&dev_priv->drm,
"New cdclk calculated to be logical %u kHz, actual %u kHz\n",
new_cdclk_state->logical.cdclk,
@@ -3297,7 +3396,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
u32 limit = intel_de_read(dev_priv, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
int max_cdclk, vco;
- vco = dev_priv->skl_preferred_vco_freq;
+ vco = dev_priv->display.cdclk.skl_preferred_vco_freq;
drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000);
/*
@@ -3339,13 +3438,13 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
dev_priv->display.cdclk.max_cdclk_freq = dev_priv->display.cdclk.hw.cdclk;
}
- dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
+ dev_priv->display.cdclk.max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
drm_dbg(&dev_priv->drm, "Max CD clock rate: %d kHz\n",
dev_priv->display.cdclk.max_cdclk_freq);
drm_dbg(&dev_priv->drm, "Max dotclock rate: %d kHz\n",
- dev_priv->max_dotclk_freq);
+ dev_priv->display.cdclk.max_dotclk_freq);
}
/**
@@ -3519,7 +3618,7 @@ static int i915_cdclk_info_show(struct seq_file *m, void *unused)
seq_printf(m, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk);
seq_printf(m, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq);
- seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
+ seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->display.cdclk.max_dotclk_freq);
return 0;
}
@@ -3534,13 +3633,6 @@ void intel_cdclk_debugfs_register(struct drm_i915_private *i915)
i915, &i915_cdclk_info_fops);
}
-static const struct intel_cdclk_funcs mtl_cdclk_funcs = {
- .get_cdclk = bxt_get_cdclk,
- .set_cdclk = bxt_set_cdclk,
- .modeset_calc_cdclk = bxt_modeset_calc_cdclk,
- .calc_voltage_level = rplu_calc_voltage_level,
-};
-
static const struct intel_cdclk_funcs rplu_cdclk_funcs = {
.get_cdclk = bxt_get_cdclk,
.set_cdclk = bxt_set_cdclk,
@@ -3684,10 +3776,10 @@ static const struct intel_cdclk_funcs i830_cdclk_funcs = {
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
if (DISPLAY_VER(dev_priv) >= 20) {
- dev_priv->display.funcs.cdclk = &mtl_cdclk_funcs;
- dev_priv->display.cdclk.table = lnl_cdclk_table;
+ dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs;
+ dev_priv->display.cdclk.table = xe2lpd_cdclk_table;
} else if (DISPLAY_VER(dev_priv) >= 14) {
- dev_priv->display.funcs.cdclk = &mtl_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs;
dev_priv->display.cdclk.table = mtl_cdclk_table;
} else if (IS_DG2(dev_priv)) {
dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index 48fd7d39e0cd..cfdcdec07a4d 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -18,6 +18,8 @@ struct intel_crtc_state;
struct intel_cdclk_config {
unsigned int cdclk, vco, ref, bypass;
u8 voltage_level;
+ /* This field is only valid for Xe2LPD and above. */
+ bool joined_mbus;
};
struct intel_cdclk_state {
@@ -51,6 +53,9 @@ struct intel_cdclk_state {
/* bitmask of active pipes */
u8 active_pipes;
+
+ /* update cdclk with pipes disabled */
+ bool disable_pipes;
};
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
@@ -60,8 +65,11 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
void intel_update_cdclk(struct drm_i915_private *dev_priv);
u32 intel_read_rawclk(struct drm_i915_private *dev_priv);
-bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a,
+bool intel_cdclk_clock_changed(const struct intel_cdclk_config *a,
const struct intel_cdclk_config *b);
+int intel_mdclk_cdclk_ratio(struct drm_i915_private *i915,
+ const struct intel_cdclk_config *cdclk_config);
+bool intel_cdclk_is_decreasing_later(struct intel_atomic_state *state);
void intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state);
void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state);
void intel_cdclk_dump_config(struct drm_i915_private *i915,
@@ -72,10 +80,13 @@ void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config);
int intel_cdclk_atomic_check(struct intel_atomic_state *state,
bool *need_cdclk_calc);
+int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joined_mbus);
struct intel_cdclk_state *
intel_atomic_get_cdclk_state(struct intel_atomic_state *state);
-#define to_intel_cdclk_state(x) container_of((x), struct intel_cdclk_state, base)
+#define to_intel_cdclk_state(global_state) \
+ container_of_const((global_state), struct intel_cdclk_state, base)
+
#define intel_atomic_get_old_cdclk_state(state) \
to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.cdclk.obj))
#define intel_atomic_get_new_cdclk_state(state) \
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index ca7112b32cb3..d23163dc64d4 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -616,19 +616,19 @@ static void vlv_load_wgc_csc(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- intel_de_write_fw(dev_priv, PIPE_WGC_C01_C00(pipe),
+ intel_de_write_fw(dev_priv, PIPE_WGC_C01_C00(dev_priv, pipe),
csc->coeff[1] << 16 | csc->coeff[0]);
- intel_de_write_fw(dev_priv, PIPE_WGC_C02(pipe),
+ intel_de_write_fw(dev_priv, PIPE_WGC_C02(dev_priv, pipe),
csc->coeff[2]);
- intel_de_write_fw(dev_priv, PIPE_WGC_C11_C10(pipe),
+ intel_de_write_fw(dev_priv, PIPE_WGC_C11_C10(dev_priv, pipe),
csc->coeff[4] << 16 | csc->coeff[3]);
- intel_de_write_fw(dev_priv, PIPE_WGC_C12(pipe),
+ intel_de_write_fw(dev_priv, PIPE_WGC_C12(dev_priv, pipe),
csc->coeff[5]);
- intel_de_write_fw(dev_priv, PIPE_WGC_C21_C20(pipe),
+ intel_de_write_fw(dev_priv, PIPE_WGC_C21_C20(dev_priv, pipe),
csc->coeff[7] << 16 | csc->coeff[6]);
- intel_de_write_fw(dev_priv, PIPE_WGC_C22(pipe),
+ intel_de_write_fw(dev_priv, PIPE_WGC_C22(dev_priv, pipe),
csc->coeff[8]);
}
@@ -639,25 +639,25 @@ static void vlv_read_wgc_csc(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
u32 tmp;
- tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C01_C00(pipe));
+ tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C01_C00(dev_priv, pipe));
csc->coeff[0] = tmp & 0xffff;
csc->coeff[1] = tmp >> 16;
- tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C02(pipe));
+ tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C02(dev_priv, pipe));
csc->coeff[2] = tmp & 0xffff;
- tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C11_C10(pipe));
+ tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C11_C10(dev_priv, pipe));
csc->coeff[3] = tmp & 0xffff;
csc->coeff[4] = tmp >> 16;
- tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C12(pipe));
+ tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C12(dev_priv, pipe));
csc->coeff[5] = tmp & 0xffff;
- tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C21_C20(pipe));
+ tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C21_C20(dev_priv, pipe));
csc->coeff[6] = tmp & 0xffff;
csc->coeff[7] = tmp >> 16;
- tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C22(pipe));
+ tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C22(dev_priv, pipe));
csc->coeff[8] = tmp & 0xffff;
}
@@ -1227,7 +1227,7 @@ static void i9xx_load_lut_8(struct intel_crtc *crtc,
lut = blob->data;
for (i = 0; i < 256; i++)
- intel_de_write_fw(dev_priv, PALETTE(pipe, i),
+ intel_de_write_fw(dev_priv, PALETTE(dev_priv, pipe, i),
i9xx_lut_8(&lut[i]));
}
@@ -1240,9 +1240,11 @@ static void i9xx_load_lut_10(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size - 1; i++) {
- intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 0),
+ intel_de_write_fw(dev_priv,
+ PALETTE(dev_priv, pipe, 2 * i + 0),
i9xx_lut_10_ldw(&lut[i]));
- intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 1),
+ intel_de_write_fw(dev_priv,
+ PALETTE(dev_priv, pipe, 2 * i + 1),
i9xx_lut_10_udw(&lut[i]));
}
}
@@ -1274,9 +1276,11 @@ static void i965_load_lut_10p6(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size - 1; i++) {
- intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 0),
+ intel_de_write_fw(dev_priv,
+ PALETTE(dev_priv, pipe, 2 * i + 0),
i965_lut_10p6_ldw(&lut[i]));
- intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 1),
+ intel_de_write_fw(dev_priv,
+ PALETTE(dev_priv, pipe, 2 * i + 1),
i965_lut_10p6_udw(&lut[i]));
}
@@ -3150,7 +3154,8 @@ static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
- u32 val = intel_de_read_fw(dev_priv, PALETTE(pipe, i));
+ u32 val = intel_de_read_fw(dev_priv,
+ PALETTE(dev_priv, pipe, i));
i9xx_lut_8_pack(&lut[i], val);
}
@@ -3176,8 +3181,10 @@ static struct drm_property_blob *i9xx_read_lut_10(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < lut_size - 1; i++) {
- ldw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 0));
- udw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 1));
+ ldw = intel_de_read_fw(dev_priv,
+ PALETTE(dev_priv, pipe, 2 * i + 0));
+ udw = intel_de_read_fw(dev_priv,
+ PALETTE(dev_priv, pipe, 2 * i + 1));
i9xx_lut_10_pack(&lut[i], ldw, udw);
}
@@ -3224,8 +3231,10 @@ static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < lut_size - 1; i++) {
- u32 ldw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 0));
- u32 udw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 1));
+ u32 ldw = intel_de_read_fw(dev_priv,
+ PALETTE(dev_priv, pipe, 2 * i + 0));
+ u32 udw = intel_de_read_fw(dev_priv,
+ PALETTE(dev_priv, pipe, 2 * i + 1));
i965_lut_10p6_pack(&lut[i], ldw, udw);
}
diff --git a/drivers/gpu/drm/i915/display/intel_color_regs.h b/drivers/gpu/drm/i915/display/intel_color_regs.h
index 9f4ae58f3e7e..bb99ea533842 100644
--- a/drivers/gpu/drm/i915/display/intel_color_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_color_regs.h
@@ -8,7 +8,35 @@
#include "intel_display_reg_defs.h"
-/* legacy palette */
+/* GMCH palette */
+#define _PALETTE_A 0xa000
+#define _PALETTE_B 0xa800
+#define _CHV_PALETTE_C 0xc000
+/* 8bit mode / i965+ 10.6 interpolated mode ldw/udw */
+#define PALETTE_RED_MASK REG_GENMASK(23, 16)
+#define PALETTE_GREEN_MASK REG_GENMASK(15, 8)
+#define PALETTE_BLUE_MASK REG_GENMASK(7, 0)
+/* pre-i965 10bit interpolated mode ldw */
+#define PALETTE_10BIT_RED_LDW_MASK REG_GENMASK(23, 16)
+#define PALETTE_10BIT_GREEN_LDW_MASK REG_GENMASK(15, 8)
+#define PALETTE_10BIT_BLUE_LDW_MASK REG_GENMASK(7, 0)
+/* pre-i965 10bit interpolated mode udw */
+#define PALETTE_10BIT_RED_EXP_MASK REG_GENMASK(23, 22)
+#define PALETTE_10BIT_RED_MANT_MASK REG_GENMASK(21, 18)
+#define PALETTE_10BIT_RED_UDW_MASK REG_GENMASK(17, 16)
+#define PALETTE_10BIT_GREEN_EXP_MASK REG_GENMASK(15, 14)
+#define PALETTE_10BIT_GREEN_MANT_MASK REG_GENMASK(13, 10)
+#define PALETTE_10BIT_GREEN_UDW_MASK REG_GENMASK(9, 8)
+#define PALETTE_10BIT_BLUE_EXP_MASK REG_GENMASK(7, 6)
+#define PALETTE_10BIT_BLUE_MANT_MASK REG_GENMASK(5, 2)
+#define PALETTE_10BIT_BLUE_UDW_MASK REG_GENMASK(1, 0)
+#define PALETTE(dev_priv, pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \
+ _PICK_EVEN_2RANGES(pipe, 2, \
+ _PALETTE_A, _PALETTE_B, \
+ _CHV_PALETTE_C, _CHV_PALETTE_C) + \
+ (i) * 4)
+
+/* ilk+ palette */
#define _LGC_PALETTE_A 0x4a000
#define _LGC_PALETTE_B 0x4a800
/* see PALETTE_* for the bits */
@@ -228,12 +256,12 @@
#define _PIPE_A_WGC_C21_C20 0x600C0 /* s2.10 */
#define _PIPE_A_WGC_C22 0x600C4 /* s2.10 */
-#define PIPE_WGC_C01_C00(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C01_C00)
-#define PIPE_WGC_C02(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C02)
-#define PIPE_WGC_C11_C10(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C11_C10)
-#define PIPE_WGC_C12(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C12)
-#define PIPE_WGC_C21_C20(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C21_C20)
-#define PIPE_WGC_C22(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C22)
+#define PIPE_WGC_C01_C00(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_A_WGC_C01_C00)
+#define PIPE_WGC_C02(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_A_WGC_C02)
+#define PIPE_WGC_C11_C10(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_A_WGC_C11_C10)
+#define PIPE_WGC_C12(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_A_WGC_C12)
+#define PIPE_WGC_C21_C20(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_A_WGC_C21_C20)
+#define PIPE_WGC_C22(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_A_WGC_C22)
/* pipe CSC & degamma/gamma LUTs on CHV */
#define _CGM_PIPE_A_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x67900)
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
index b0983edccf3f..0964e392d02c 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
@@ -25,28 +25,26 @@
4 * (dw))
#define ICL_PORT_CL_DW5(phy) _MMIO(_ICL_PORT_CL_DW(5, phy))
-#define CL_POWER_DOWN_ENABLE (1 << 4)
-#define SUS_CLOCK_CONFIG (3 << 0)
+#define CL_POWER_DOWN_ENABLE REG_BIT(4)
+#define SUS_CLOCK_CONFIG REG_GENMASK(1, 0)
#define ICL_PORT_CL_DW10(phy) _MMIO(_ICL_PORT_CL_DW(10, phy))
-#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25)
-#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25
-#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24)
-#define PWR_UP_ALL_LANES (0x0 << 4)
-#define PWR_DOWN_LN_3_2_1 (0xe << 4)
-#define PWR_DOWN_LN_3_2 (0xc << 4)
-#define PWR_DOWN_LN_3 (0x8 << 4)
-#define PWR_DOWN_LN_2_1_0 (0x7 << 4)
-#define PWR_DOWN_LN_1_0 (0x3 << 4)
-#define PWR_DOWN_LN_3_1 (0xa << 4)
-#define PWR_DOWN_LN_3_1_0 (0xb << 4)
-#define PWR_DOWN_LN_MASK (0xf << 4)
-#define PWR_DOWN_LN_SHIFT 4
-#define EDP4K2K_MODE_OVRD_EN (1 << 3)
-#define EDP4K2K_MODE_OVRD_OPTIMIZED (1 << 2)
+#define PG_SEQ_DELAY_OVERRIDE_MASK REG_GENMASK(26, 25)
+#define PG_SEQ_DELAY_OVERRIDE_ENABLE REG_BIT(24)
+#define PWR_DOWN_LN_MASK REG_GENMASK(7, 4)
+#define PWR_UP_ALL_LANES REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0x0)
+#define PWR_DOWN_LN_3_2_1 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0xe)
+#define PWR_DOWN_LN_3_2 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0xc)
+#define PWR_DOWN_LN_3 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0x8)
+#define PWR_DOWN_LN_2_1_0 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0x7)
+#define PWR_DOWN_LN_1_0 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0x3)
+#define PWR_DOWN_LN_3_1 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0xa)
+#define PWR_DOWN_LN_3_1_0 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0xb)
+#define EDP4K2K_MODE_OVRD_EN REG_BIT(3)
+#define EDP4K2K_MODE_OVRD_OPTIMIZED REG_BIT(2)
#define ICL_PORT_CL_DW12(phy) _MMIO(_ICL_PORT_CL_DW(12, phy))
-#define ICL_LANE_ENABLE_AUX (1 << 0)
+#define ICL_LANE_ENABLE_AUX REG_BIT(0)
/* ICL Port COMP_DW registers */
#define _ICL_PORT_COMP 0x100
@@ -54,24 +52,22 @@
_ICL_PORT_COMP + 4 * (dw))
#define ICL_PORT_COMP_DW0(phy) _MMIO(_ICL_PORT_COMP_DW(0, phy))
-#define COMP_INIT (1 << 31)
+#define COMP_INIT REG_BIT(31)
#define ICL_PORT_COMP_DW1(phy) _MMIO(_ICL_PORT_COMP_DW(1, phy))
#define ICL_PORT_COMP_DW3(phy) _MMIO(_ICL_PORT_COMP_DW(3, phy))
-#define PROCESS_INFO_DOT_0 (0 << 26)
-#define PROCESS_INFO_DOT_1 (1 << 26)
-#define PROCESS_INFO_DOT_4 (2 << 26)
-#define PROCESS_INFO_MASK (7 << 26)
-#define PROCESS_INFO_SHIFT 26
-#define VOLTAGE_INFO_0_85V (0 << 24)
-#define VOLTAGE_INFO_0_95V (1 << 24)
-#define VOLTAGE_INFO_1_05V (2 << 24)
-#define VOLTAGE_INFO_MASK (3 << 24)
-#define VOLTAGE_INFO_SHIFT 24
+#define PROCESS_INFO_MASK REG_GENMASK(28, 26)
+#define PROCESS_INFO_DOT_0 REG_FIELD_PREP(PROCESS_INFO_MASK, 0)
+#define PROCESS_INFO_DOT_1 REG_FIELD_PREP(PROCESS_INFO_MASK, 1)
+#define PROCESS_INFO_DOT_4 REG_FIELD_PREP(PROCESS_INFO_MASK, 2)
+#define VOLTAGE_INFO_MASK REG_GENMASK(25, 24)
+#define VOLTAGE_INFO_0_85V REG_FIELD_PREP(VOLTAGE_INFO_MASK, 0)
+#define VOLTAGE_INFO_0_95V REG_FIELD_PREP(VOLTAGE_INFO_MASK, 1)
+#define VOLTAGE_INFO_1_05V REG_FIELD_PREP(VOLTAGE_INFO_MASK, 2)
#define ICL_PORT_COMP_DW8(phy) _MMIO(_ICL_PORT_COMP_DW(8, phy))
-#define IREFGEN (1 << 24)
+#define IREFGEN REG_BIT(24)
#define ICL_PORT_COMP_DW9(phy) _MMIO(_ICL_PORT_COMP_DW(9, phy))
@@ -92,9 +88,9 @@
#define ICL_PORT_PCS_DW1_LN(ln, phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, ln, phy))
#define DCC_MODE_SELECT_MASK REG_GENMASK(21, 20)
#define RUN_DCC_ONCE REG_FIELD_PREP(DCC_MODE_SELECT_MASK, 0)
-#define COMMON_KEEPER_EN (1 << 26)
-#define LATENCY_OPTIM_MASK (0x3 << 2)
-#define LATENCY_OPTIM_VAL(x) ((x) << 2)
+#define COMMON_KEEPER_EN REG_BIT(26)
+#define LATENCY_OPTIM_MASK REG_GENMASK(3, 2)
+#define LATENCY_OPTIM_VAL(x) REG_FIELD_PREP(LATENCY_OPTIM_MASK, (x))
/* ICL Port TX registers */
#define _ICL_PORT_TX_AUX 0x380
@@ -111,42 +107,49 @@
#define ICL_PORT_TX_DW2_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(2, phy))
#define ICL_PORT_TX_DW2_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(2, phy))
#define ICL_PORT_TX_DW2_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(2, ln, phy))
-#define SWING_SEL_UPPER(x) (((x) >> 3) << 15)
-#define SWING_SEL_UPPER_MASK (1 << 15)
-#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11)
-#define SWING_SEL_LOWER_MASK (0x7 << 11)
-#define FRC_LATENCY_OPTIM_MASK (0x7 << 8)
-#define FRC_LATENCY_OPTIM_VAL(x) ((x) << 8)
-#define RCOMP_SCALAR(x) ((x) << 0)
-#define RCOMP_SCALAR_MASK (0xFF << 0)
+#define SWING_SEL_UPPER_MASK REG_BIT(15)
+#define SWING_SEL_UPPER(x) REG_FIELD_PREP(SWING_SEL_UPPER_MASK, (x) >> 3)
+#define SWING_SEL_LOWER_MASK REG_GENMASK(13, 11)
+#define SWING_SEL_LOWER(x) REG_FIELD_PREP(SWING_SEL_LOWER_MASK, (x) & 0x7)
+#define FRC_LATENCY_OPTIM_MASK REG_GENMASK(10, 8)
+#define FRC_LATENCY_OPTIM_VAL(x) REG_FIELD_PREP(FRC_LATENCY_OPTIM_MASK, (x))
+#define RCOMP_SCALAR_MASK REG_GENMASK(7, 0)
+#define RCOMP_SCALAR(x) REG_FIELD_PREP(RCOMP_SCALAR_MASK, (x))
#define ICL_PORT_TX_DW4_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(4, phy))
#define ICL_PORT_TX_DW4_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(4, phy))
#define ICL_PORT_TX_DW4_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, phy))
-#define LOADGEN_SELECT (1 << 31)
-#define POST_CURSOR_1(x) ((x) << 12)
-#define POST_CURSOR_1_MASK (0x3F << 12)
-#define POST_CURSOR_2(x) ((x) << 6)
-#define POST_CURSOR_2_MASK (0x3F << 6)
-#define CURSOR_COEFF(x) ((x) << 0)
-#define CURSOR_COEFF_MASK (0x3F << 0)
+#define LOADGEN_SELECT REG_BIT(31)
+#define POST_CURSOR_1_MASK REG_GENMASK(17, 12)
+#define POST_CURSOR_1(x) REG_FIELD_PREP(POST_CURSOR_1_MASK, (x))
+#define POST_CURSOR_2_MASK REG_GENMASK(11, 6)
+#define POST_CURSOR_2(x) REG_FIELD_PREP(POST_CURSOR_2_MASK, (x))
+#define CURSOR_COEFF_MASK REG_GENMASK(5, 0)
+#define CURSOR_COEFF(x) REG_FIELD_PREP(CURSOR_COEFF_MASK, (x))
#define ICL_PORT_TX_DW5_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(5, phy))
#define ICL_PORT_TX_DW5_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(5, phy))
#define ICL_PORT_TX_DW5_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(5, ln, phy))
-#define TX_TRAINING_EN (1 << 31)
-#define TAP2_DISABLE (1 << 30)
-#define TAP3_DISABLE (1 << 29)
-#define SCALING_MODE_SEL(x) ((x) << 18)
-#define SCALING_MODE_SEL_MASK (0x7 << 18)
-#define RTERM_SELECT(x) ((x) << 3)
-#define RTERM_SELECT_MASK (0x7 << 3)
+#define TX_TRAINING_EN REG_BIT(31)
+#define TAP2_DISABLE REG_BIT(30)
+#define TAP3_DISABLE REG_BIT(29)
+#define SCALING_MODE_SEL_MASK REG_GENMASK(20, 18)
+#define SCALING_MODE_SEL(x) REG_FIELD_PREP(SCALING_MODE_SEL_MASK, (x))
+#define RTERM_SELECT_MASK REG_GENMASK(5, 3)
+#define RTERM_SELECT(x) REG_FIELD_PREP(RTERM_SELECT_MASK, (x))
+
+#define ICL_PORT_TX_DW6_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(6, phy))
+#define ICL_PORT_TX_DW6_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(6, phy))
+#define ICL_PORT_TX_DW6_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(6, ln, phy))
+#define O_FUNC_OVRD_EN REG_BIT(7)
+#define O_LDO_REF_SEL_CRI REG_GENMASK(6, 1)
+#define O_LDO_BYPASS_CRI REG_BIT(0)
#define ICL_PORT_TX_DW7_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(7, phy))
#define ICL_PORT_TX_DW7_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(7, phy))
#define ICL_PORT_TX_DW7_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, phy))
-#define N_SCALAR(x) ((x) << 24)
-#define N_SCALAR_MASK (0x7F << 24)
+#define N_SCALAR_MASK REG_GENMASK(30, 24)
+#define N_SCALAR(x) REG_FIELD_PREP(N_SCALAR_MASK, (x))
#define ICL_PORT_TX_DW8_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(8, phy))
#define ICL_PORT_TX_DW8_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(8, phy))
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 93479db0f89f..10e95dc425a6 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -348,7 +348,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int max_dotclk = dev_priv->max_dotclk_freq;
+ int max_dotclk = dev_priv->display.cdclk.max_dotclk_freq;
enum drm_mode_status status;
int max_clock;
@@ -356,9 +356,6 @@ intel_crt_mode_valid(struct drm_connector *connector,
if (status != MODE_OK)
return status;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 4bcf446c75f4..ccaa4cb2809b 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -12,33 +12,31 @@
#include "intel_hdmi.h"
#include "intel_vrr.h"
-static void intel_dump_crtc_timings(struct drm_i915_private *i915,
+static void intel_dump_crtc_timings(struct drm_printer *p,
const struct drm_display_mode *mode)
{
- drm_dbg_kms(&i915->drm, "crtc timings: clock=%d, "
- "hd=%d hb=%d-%d hs=%d-%d ht=%d, "
- "vd=%d vb=%d-%d vs=%d-%d vt=%d, "
- "flags=0x%x\n",
- mode->crtc_clock,
- mode->crtc_hdisplay, mode->crtc_hblank_start, mode->crtc_hblank_end,
- mode->crtc_hsync_start, mode->crtc_hsync_end, mode->crtc_htotal,
- mode->crtc_vdisplay, mode->crtc_vblank_start, mode->crtc_vblank_end,
- mode->crtc_vsync_start, mode->crtc_vsync_end, mode->crtc_vtotal,
- mode->flags);
+ drm_printf(p, "crtc timings: clock=%d, "
+ "hd=%d hb=%d-%d hs=%d-%d ht=%d, "
+ "vd=%d vb=%d-%d vs=%d-%d vt=%d, "
+ "flags=0x%x\n",
+ mode->crtc_clock,
+ mode->crtc_hdisplay, mode->crtc_hblank_start, mode->crtc_hblank_end,
+ mode->crtc_hsync_start, mode->crtc_hsync_end, mode->crtc_htotal,
+ mode->crtc_vdisplay, mode->crtc_vblank_start, mode->crtc_vblank_end,
+ mode->crtc_vsync_start, mode->crtc_vsync_end, mode->crtc_vtotal,
+ mode->flags);
}
static void
-intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
+intel_dump_m_n_config(struct drm_printer *p,
+ const struct intel_crtc_state *pipe_config,
const char *id, unsigned int lane_count,
const struct intel_link_m_n *m_n)
{
- struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
-
- drm_dbg_kms(&i915->drm,
- "%s: lanes: %i; data_m: %u, data_n: %u, link_m: %u, link_n: %u, tu: %u\n",
- id, lane_count,
- m_n->data_m, m_n->data_n,
- m_n->link_m, m_n->link_n, m_n->tu);
+ drm_printf(p, "%s: lanes: %i; data_m: %u, data_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+ id, lane_count,
+ m_n->data_m, m_n->data_n,
+ m_n->link_m, m_n->link_n, m_n->tu);
}
static void
@@ -52,17 +50,7 @@ intel_dump_infoframe(struct drm_i915_private *i915,
}
static void
-intel_dump_dp_vsc_sdp(struct drm_i915_private *i915,
- const struct drm_dp_vsc_sdp *vsc)
-{
- struct drm_printer p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL);
-
- drm_dp_vsc_sdp_log(&p, vsc);
-}
-
-static void
-intel_dump_buffer(struct drm_i915_private *i915,
- const char *prefix, const u8 *buf, size_t len)
+intel_dump_buffer(const char *prefix, const u8 *buf, size_t len)
{
if (!drm_debug_enabled(DRM_UT_KMS))
return;
@@ -130,71 +118,66 @@ const char *intel_output_format_name(enum intel_output_format format)
return output_format_str[format];
}
-static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
+static void intel_dump_plane_state(struct drm_printer *p,
+ const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
if (!fb) {
- drm_dbg_kms(&i915->drm,
- "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
- plane->base.base.id, plane->base.name,
- str_yes_no(plane_state->uapi.visible));
+ drm_printf(p, "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
+ plane->base.base.id, plane->base.name,
+ str_yes_no(plane_state->uapi.visible));
return;
}
- drm_dbg_kms(&i915->drm,
- "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
- plane->base.base.id, plane->base.name,
- fb->base.id, fb->width, fb->height, &fb->format->format,
- fb->modifier, str_yes_no(plane_state->uapi.visible));
- drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d, scaling_filter: %d\n",
- plane_state->hw.rotation, plane_state->scaler_id, plane_state->hw.scaling_filter);
+ drm_printf(p, "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
+ plane->base.base.id, plane->base.name,
+ fb->base.id, fb->width, fb->height, &fb->format->format,
+ fb->modifier, str_yes_no(plane_state->uapi.visible));
+ drm_printf(p, "\trotation: 0x%x, scaler: %d, scaling_filter: %d\n",
+ plane_state->hw.rotation, plane_state->scaler_id, plane_state->hw.scaling_filter);
if (plane_state->uapi.visible)
- drm_dbg_kms(&i915->drm,
- "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
- DRM_RECT_FP_ARG(&plane_state->uapi.src),
- DRM_RECT_ARG(&plane_state->uapi.dst));
+ drm_printf(p, "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
+ DRM_RECT_FP_ARG(&plane_state->uapi.src),
+ DRM_RECT_ARG(&plane_state->uapi.dst));
}
static void
-ilk_dump_csc(struct drm_i915_private *i915, const char *name,
+ilk_dump_csc(struct drm_i915_private *i915,
+ struct drm_printer *p,
+ const char *name,
const struct intel_csc_matrix *csc)
{
int i;
- drm_dbg_kms(&i915->drm,
- "%s: pre offsets: 0x%04x 0x%04x 0x%04x\n", name,
- csc->preoff[0], csc->preoff[1], csc->preoff[2]);
+ drm_printf(p, "%s: pre offsets: 0x%04x 0x%04x 0x%04x\n", name,
+ csc->preoff[0], csc->preoff[1], csc->preoff[2]);
for (i = 0; i < 3; i++)
- drm_dbg_kms(&i915->drm,
- "%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name,
- csc->coeff[3 * i + 0],
- csc->coeff[3 * i + 1],
- csc->coeff[3 * i + 2]);
+ drm_printf(p, "%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name,
+ csc->coeff[3 * i + 0],
+ csc->coeff[3 * i + 1],
+ csc->coeff[3 * i + 2]);
if (DISPLAY_VER(i915) < 7)
return;
- drm_dbg_kms(&i915->drm,
- "%s: post offsets: 0x%04x 0x%04x 0x%04x\n", name,
- csc->postoff[0], csc->postoff[1], csc->postoff[2]);
+ drm_printf(p, "%s: post offsets: 0x%04x 0x%04x 0x%04x\n", name,
+ csc->postoff[0], csc->postoff[1], csc->postoff[2]);
}
static void
-vlv_dump_csc(struct drm_i915_private *i915, const char *name,
+vlv_dump_csc(struct drm_printer *p, const char *name,
const struct intel_csc_matrix *csc)
{
int i;
for (i = 0; i < 3; i++)
- drm_dbg_kms(&i915->drm,
- "%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name,
- csc->coeff[3 * i + 0],
- csc->coeff[3 * i + 1],
- csc->coeff[3 * i + 2]);
+ drm_printf(p, "%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name,
+ csc->coeff[3 * i + 0],
+ csc->coeff[3 * i + 1],
+ csc->coeff[3 * i + 2]);
}
void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
@@ -205,85 +188,86 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
const struct intel_plane_state *plane_state;
struct intel_plane *plane;
+ struct drm_printer p;
char buf[64];
int i;
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] enable: %s [%s]\n",
- crtc->base.base.id, crtc->base.name,
- str_yes_no(pipe_config->hw.enable), context);
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL);
+
+ drm_printf(&p, "[CRTC:%d:%s] enable: %s [%s]\n",
+ crtc->base.base.id, crtc->base.name,
+ str_yes_no(pipe_config->hw.enable), context);
if (!pipe_config->hw.enable)
goto dump_planes;
snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
- drm_dbg_kms(&i915->drm,
- "active: %s, output_types: %s (0x%x), output format: %s, sink format: %s\n",
- str_yes_no(pipe_config->hw.active),
- buf, pipe_config->output_types,
- intel_output_format_name(pipe_config->output_format),
- intel_output_format_name(pipe_config->sink_format));
-
- drm_dbg_kms(&i915->drm,
- "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
- transcoder_name(pipe_config->cpu_transcoder),
- pipe_config->pipe_bpp, pipe_config->dither);
-
- drm_dbg_kms(&i915->drm, "MST master transcoder: %s\n",
- transcoder_name(pipe_config->mst_master_transcoder));
-
- drm_dbg_kms(&i915->drm,
- "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
- transcoder_name(pipe_config->master_transcoder),
- pipe_config->sync_mode_slaves_mask);
-
- drm_dbg_kms(&i915->drm, "bigjoiner: %s, pipes: 0x%x\n",
- intel_crtc_is_bigjoiner_slave(pipe_config) ? "slave" :
- intel_crtc_is_bigjoiner_master(pipe_config) ? "master" : "no",
- pipe_config->bigjoiner_pipes);
-
- drm_dbg_kms(&i915->drm, "splitter: %s, link count %d, overlap %d\n",
- str_enabled_disabled(pipe_config->splitter.enable),
- pipe_config->splitter.link_count,
- pipe_config->splitter.pixel_overlap);
+ drm_printf(&p, "active: %s, output_types: %s (0x%x), output format: %s, sink format: %s\n",
+ str_yes_no(pipe_config->hw.active),
+ buf, pipe_config->output_types,
+ intel_output_format_name(pipe_config->output_format),
+ intel_output_format_name(pipe_config->sink_format));
+
+ drm_printf(&p, "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
+ transcoder_name(pipe_config->cpu_transcoder),
+ pipe_config->pipe_bpp, pipe_config->dither);
+
+ drm_printf(&p, "MST master transcoder: %s\n",
+ transcoder_name(pipe_config->mst_master_transcoder));
+
+ drm_printf(&p, "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
+ transcoder_name(pipe_config->master_transcoder),
+ pipe_config->sync_mode_slaves_mask);
+
+ drm_printf(&p, "bigjoiner: %s, pipes: 0x%x\n",
+ intel_crtc_is_bigjoiner_slave(pipe_config) ? "slave" :
+ intel_crtc_is_bigjoiner_master(pipe_config) ? "master" : "no",
+ pipe_config->bigjoiner_pipes);
+
+ drm_printf(&p, "splitter: %s, link count %d, overlap %d\n",
+ str_enabled_disabled(pipe_config->splitter.enable),
+ pipe_config->splitter.link_count,
+ pipe_config->splitter.pixel_overlap);
if (pipe_config->has_pch_encoder)
- intel_dump_m_n_config(pipe_config, "fdi",
+ intel_dump_m_n_config(&p, pipe_config, "fdi",
pipe_config->fdi_lanes,
&pipe_config->fdi_m_n);
if (intel_crtc_has_dp_encoder(pipe_config)) {
- intel_dump_m_n_config(pipe_config, "dp m_n",
+ intel_dump_m_n_config(&p, pipe_config, "dp m_n",
pipe_config->lane_count,
&pipe_config->dp_m_n);
- intel_dump_m_n_config(pipe_config, "dp m2_n2",
+ intel_dump_m_n_config(&p, pipe_config, "dp m2_n2",
pipe_config->lane_count,
&pipe_config->dp_m2_n2);
- drm_dbg_kms(&i915->drm, "fec: %s, enhanced framing: %s\n",
- str_enabled_disabled(pipe_config->fec_enable),
- str_enabled_disabled(pipe_config->enhanced_framing));
-
- drm_dbg_kms(&i915->drm, "sdp split: %s\n",
- str_enabled_disabled(pipe_config->sdp_split_enable));
-
- drm_dbg_kms(&i915->drm, "psr: %s, psr2: %s, panel replay: %s, selective fetch: %s\n",
- str_enabled_disabled(pipe_config->has_psr),
- str_enabled_disabled(pipe_config->has_psr2),
- str_enabled_disabled(pipe_config->has_panel_replay),
- str_enabled_disabled(pipe_config->enable_psr2_sel_fetch));
+ drm_printf(&p, "fec: %s, enhanced framing: %s\n",
+ str_enabled_disabled(pipe_config->fec_enable),
+ str_enabled_disabled(pipe_config->enhanced_framing));
+
+ drm_printf(&p, "sdp split: %s\n",
+ str_enabled_disabled(pipe_config->sdp_split_enable));
+
+ drm_printf(&p, "psr: %s, psr2: %s, panel replay: %s, selective fetch: %s\n",
+ str_enabled_disabled(pipe_config->has_psr),
+ str_enabled_disabled(pipe_config->has_psr2),
+ str_enabled_disabled(pipe_config->has_panel_replay),
+ str_enabled_disabled(pipe_config->enable_psr2_sel_fetch));
}
- drm_dbg_kms(&i915->drm, "framestart delay: %d, MSA timing delay: %d\n",
- pipe_config->framestart_delay, pipe_config->msa_timing_delay);
+ drm_printf(&p, "framestart delay: %d, MSA timing delay: %d\n",
+ pipe_config->framestart_delay, pipe_config->msa_timing_delay);
- drm_dbg_kms(&i915->drm,
- "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
- pipe_config->has_audio, pipe_config->has_infoframe,
- pipe_config->infoframes.enable);
+ drm_printf(&p, "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
+ pipe_config->has_audio, pipe_config->has_infoframe,
+ pipe_config->infoframes.enable);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
- drm_dbg_kms(&i915->drm, "GCP: 0x%x\n",
- pipe_config->infoframes.gcp);
+ drm_printf(&p, "GCP: 0x%x\n", pipe_config->infoframes.gcp);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
intel_dump_infoframe(i915, &pipe_config->infoframes.avi);
@@ -301,91 +285,88 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
intel_dump_infoframe(i915, &pipe_config->infoframes.drm);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(DP_SDP_VSC))
- intel_dump_dp_vsc_sdp(i915, &pipe_config->infoframes.vsc);
+ drm_dp_vsc_sdp_log(&p, &pipe_config->infoframes.vsc);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC))
+ drm_dp_as_sdp_log(&p, &pipe_config->infoframes.as_sdp);
if (pipe_config->has_audio)
- intel_dump_buffer(i915, "ELD: ", pipe_config->eld,
+ intel_dump_buffer("ELD: ", pipe_config->eld,
drm_eld_size(pipe_config->eld));
- drm_dbg_kms(&i915->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
- str_yes_no(pipe_config->vrr.enable),
- pipe_config->vrr.vmin, pipe_config->vrr.vmax,
- pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
- pipe_config->vrr.flipline,
- intel_vrr_vmin_vblank_start(pipe_config),
- intel_vrr_vmax_vblank_start(pipe_config));
-
- drm_dbg_kms(&i915->drm, "requested mode: " DRM_MODE_FMT "\n",
- DRM_MODE_ARG(&pipe_config->hw.mode));
- drm_dbg_kms(&i915->drm, "adjusted mode: " DRM_MODE_FMT "\n",
- DRM_MODE_ARG(&pipe_config->hw.adjusted_mode));
- intel_dump_crtc_timings(i915, &pipe_config->hw.adjusted_mode);
- drm_dbg_kms(&i915->drm, "pipe mode: " DRM_MODE_FMT "\n",
- DRM_MODE_ARG(&pipe_config->hw.pipe_mode));
- intel_dump_crtc_timings(i915, &pipe_config->hw.pipe_mode);
- drm_dbg_kms(&i915->drm,
- "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d\n",
- pipe_config->port_clock, DRM_RECT_ARG(&pipe_config->pipe_src),
- pipe_config->pixel_rate);
-
- drm_dbg_kms(&i915->drm, "linetime: %d, ips linetime: %d\n",
- pipe_config->linetime, pipe_config->ips_linetime);
+ drm_printf(&p, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
+ str_yes_no(pipe_config->vrr.enable),
+ pipe_config->vrr.vmin, pipe_config->vrr.vmax,
+ pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
+ pipe_config->vrr.flipline,
+ intel_vrr_vmin_vblank_start(pipe_config),
+ intel_vrr_vmax_vblank_start(pipe_config));
+
+ drm_printf(&p, "requested mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&pipe_config->hw.mode));
+ drm_printf(&p, "adjusted mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&pipe_config->hw.adjusted_mode));
+ intel_dump_crtc_timings(&p, &pipe_config->hw.adjusted_mode);
+ drm_printf(&p, "pipe mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&pipe_config->hw.pipe_mode));
+ intel_dump_crtc_timings(&p, &pipe_config->hw.pipe_mode);
+ drm_printf(&p, "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d\n",
+ pipe_config->port_clock, DRM_RECT_ARG(&pipe_config->pipe_src),
+ pipe_config->pixel_rate);
+
+ drm_printf(&p, "linetime: %d, ips linetime: %d\n",
+ pipe_config->linetime, pipe_config->ips_linetime);
if (DISPLAY_VER(i915) >= 9)
- drm_dbg_kms(&i915->drm,
- "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d, scaling_filter: %d\n",
- crtc->num_scalers,
- pipe_config->scaler_state.scaler_users,
- pipe_config->scaler_state.scaler_id,
- pipe_config->hw.scaling_filter);
+ drm_printf(&p, "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d, scaling_filter: %d\n",
+ crtc->num_scalers,
+ pipe_config->scaler_state.scaler_users,
+ pipe_config->scaler_state.scaler_id,
+ pipe_config->hw.scaling_filter);
if (HAS_GMCH(i915))
- drm_dbg_kms(&i915->drm,
- "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
- pipe_config->gmch_pfit.control,
- pipe_config->gmch_pfit.pgm_ratios,
- pipe_config->gmch_pfit.lvds_border_bits);
+ drm_printf(&p, "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
+ pipe_config->gmch_pfit.control,
+ pipe_config->gmch_pfit.pgm_ratios,
+ pipe_config->gmch_pfit.lvds_border_bits);
else
- drm_dbg_kms(&i915->drm,
- "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
- DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
- str_enabled_disabled(pipe_config->pch_pfit.enabled),
- str_yes_no(pipe_config->pch_pfit.force_thru));
+ drm_printf(&p, "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
+ DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
+ str_enabled_disabled(pipe_config->pch_pfit.enabled),
+ str_yes_no(pipe_config->pch_pfit.force_thru));
- drm_dbg_kms(&i915->drm, "ips: %i, double wide: %i, drrs: %i\n",
- pipe_config->ips_enabled, pipe_config->double_wide,
- pipe_config->has_drrs);
+ drm_printf(&p, "ips: %i, double wide: %i, drrs: %i\n",
+ pipe_config->ips_enabled, pipe_config->double_wide,
+ pipe_config->has_drrs);
- intel_dpll_dump_hw_state(i915, &pipe_config->dpll_hw_state);
+ intel_dpll_dump_hw_state(i915, &p, &pipe_config->dpll_hw_state);
if (IS_CHERRYVIEW(i915))
- drm_dbg_kms(&i915->drm,
- "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
- pipe_config->cgm_mode, pipe_config->gamma_mode,
- pipe_config->gamma_enable, pipe_config->csc_enable);
+ drm_printf(&p, "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->cgm_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
else
- drm_dbg_kms(&i915->drm,
- "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
- pipe_config->csc_mode, pipe_config->gamma_mode,
- pipe_config->gamma_enable, pipe_config->csc_enable);
-
- drm_dbg_kms(&i915->drm, "pre csc lut: %s%d entries, post csc lut: %d entries\n",
- pipe_config->pre_csc_lut && pipe_config->pre_csc_lut ==
- i915->display.color.glk_linear_degamma_lut ? "(linear) " : "",
- pipe_config->pre_csc_lut ?
- drm_color_lut_size(pipe_config->pre_csc_lut) : 0,
- pipe_config->post_csc_lut ?
- drm_color_lut_size(pipe_config->post_csc_lut) : 0);
+ drm_printf(&p, "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->csc_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
+
+ drm_printf(&p, "pre csc lut: %s%d entries, post csc lut: %d entries\n",
+ pipe_config->pre_csc_lut && pipe_config->pre_csc_lut ==
+ i915->display.color.glk_linear_degamma_lut ? "(linear) " : "",
+ pipe_config->pre_csc_lut ?
+ drm_color_lut_size(pipe_config->pre_csc_lut) : 0,
+ pipe_config->post_csc_lut ?
+ drm_color_lut_size(pipe_config->post_csc_lut) : 0);
if (DISPLAY_VER(i915) >= 11)
- ilk_dump_csc(i915, "output csc", &pipe_config->output_csc);
+ ilk_dump_csc(i915, &p, "output csc", &pipe_config->output_csc);
if (!HAS_GMCH(i915))
- ilk_dump_csc(i915, "pipe csc", &pipe_config->csc);
+ ilk_dump_csc(i915, &p, "pipe csc", &pipe_config->csc);
else if (IS_CHERRYVIEW(i915))
- vlv_dump_csc(i915, "cgm csc", &pipe_config->csc);
+ vlv_dump_csc(&p, "cgm csc", &pipe_config->csc);
else if (IS_VALLEYVIEW(i915))
- vlv_dump_csc(i915, "wgc csc", &pipe_config->csc);
+ vlv_dump_csc(&p, "wgc csc", &pipe_config->csc);
dump_planes:
if (!state)
@@ -393,6 +374,6 @@ dump_planes:
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
if (plane->pipe == crtc->pipe)
- intel_dump_plane_state(plane_state);
+ intel_dump_plane_state(&p, plane_state);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index f8b33999d43f..23a122ee20c9 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -36,12 +36,10 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 base;
if (DISPLAY_INFO(dev_priv)->cursor_needs_physical)
- base = i915_gem_object_get_dma_address(obj, 0);
+ base = plane_state->phys_dma_addr;
else
base = intel_plane_ggtt_offset(plane_state);
@@ -511,6 +509,24 @@ static void i9xx_cursor_disable_sel_fetch_arm(struct intel_plane *plane,
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
}
+static void wa_16021440873(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ u32 ctl = plane_state->ctl;
+ int et_y_position = drm_rect_height(&crtc_state->pipe_src) + 1;
+ enum pipe pipe = plane->pipe;
+
+ ctl &= ~MCURSOR_MODE_MASK;
+ ctl |= MCURSOR_MODE_64_2B;
+
+ intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), ctl);
+
+ intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(pipe),
+ PIPESRC_HEIGHT(et_y_position));
+}
+
static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
@@ -531,7 +547,11 @@ static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane,
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
plane_state->ctl);
} else {
- i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state);
+ /* Wa_16021440873 */
+ if (crtc_state->enable_psr2_su_region_et)
+ wa_16021440873(plane, crtc_state, plane_state);
+ else
+ i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state);
}
}
@@ -823,6 +843,28 @@ static const struct drm_plane_funcs intel_cursor_plane_funcs = {
.format_mod_supported = intel_cursor_format_mod_supported,
};
+static void intel_cursor_add_size_hints_property(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ const struct drm_mode_config *config = &i915->drm.mode_config;
+ struct drm_plane_size_hint hints[4];
+ int size, max_size, num_hints = 0;
+
+ max_size = min(config->cursor_width, config->cursor_height);
+
+ /* for simplicity only enumerate the supported square+POT sizes */
+ for (size = 64; size <= max_size; size *= 2) {
+ if (drm_WARN_ON(&i915->drm, num_hints >= ARRAY_SIZE(hints)))
+ break;
+
+ hints[num_hints].width = size;
+ hints[num_hints].height = size;
+ num_hints++;
+ }
+
+ drm_plane_add_size_hints_property(&plane->base, hints, num_hints);
+}
+
struct intel_plane *
intel_cursor_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe)
@@ -881,6 +923,8 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
DRM_MODE_ROTATE_0 |
DRM_MODE_ROTATE_180);
+ intel_cursor_add_size_hints_property(cursor);
+
zpos = DISPLAY_RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 64e0f820a789..8e3b13884bb8 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -29,8 +29,11 @@
#define INTEL_CX0_LANE1 BIT(1)
#define INTEL_CX0_BOTH_LANES (INTEL_CX0_LANE1 | INTEL_CX0_LANE0)
-bool intel_is_c10phy(struct drm_i915_private *i915, enum phy phy)
+bool intel_encoder_is_c10phy(struct intel_encoder *encoder)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_encoder_to_phy(encoder);
+
if ((IS_LUNARLAKE(i915) || IS_METEORLAKE(i915)) && phy < PHY_C)
return true;
@@ -46,8 +49,7 @@ static int lane_mask_to_lane(u8 lane_mask)
return ilog2(lane_mask);
}
-static u8 intel_cx0_get_owned_lane_mask(struct drm_i915_private *i915,
- struct intel_encoder *encoder)
+static u8 intel_cx0_get_owned_lane_mask(struct intel_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
@@ -114,16 +116,20 @@ static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_w
intel_display_power_put(i915, POWER_DOMAIN_DC_OFF, wakeref);
}
-static void intel_clear_response_ready_flag(struct drm_i915_private *i915,
- enum port port, int lane)
+static void intel_clear_response_ready_flag(struct intel_encoder *encoder,
+ int lane)
{
- intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane),
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, encoder->port, lane),
0, XELPDP_PORT_P2M_RESPONSE_READY | XELPDP_PORT_P2M_ERROR_SET);
}
-static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, int lane)
+static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane)
{
- enum phy phy = intel_port_to_phy(i915, port);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ enum phy phy = intel_encoder_to_phy(encoder);
intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_RESET);
@@ -135,20 +141,22 @@ static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, i
return;
}
- intel_clear_response_ready_flag(i915, port, lane);
+ intel_clear_response_ready_flag(encoder, lane);
}
-static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port,
+static int intel_cx0_wait_for_ack(struct intel_encoder *encoder,
int command, int lane, u32 *val)
{
- enum phy phy = intel_port_to_phy(i915, port);
-
- if (__intel_de_wait_for_register(i915,
- XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane),
- XELPDP_PORT_P2M_RESPONSE_READY,
- XELPDP_PORT_P2M_RESPONSE_READY,
- XELPDP_MSGBUS_TIMEOUT_FAST_US,
- XELPDP_MSGBUS_TIMEOUT_SLOW, val)) {
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ enum phy phy = intel_encoder_to_phy(encoder);
+
+ if (intel_de_wait_custom(i915,
+ XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane),
+ XELPDP_PORT_P2M_RESPONSE_READY,
+ XELPDP_PORT_P2M_RESPONSE_READY,
+ XELPDP_MSGBUS_TIMEOUT_FAST_US,
+ XELPDP_MSGBUS_TIMEOUT_SLOW, val)) {
drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for message ACK. Status: 0x%x\n",
phy_name(phy), *val);
@@ -158,31 +166,33 @@ static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port,
"PHY %c Hardware did not detect a timeout\n",
phy_name(phy));
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return -ETIMEDOUT;
}
if (*val & XELPDP_PORT_P2M_ERROR_SET) {
drm_dbg_kms(&i915->drm, "PHY %c Error occurred during %s command. Status: 0x%x\n", phy_name(phy),
command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val);
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return -EINVAL;
}
if (REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, *val) != command) {
drm_dbg_kms(&i915->drm, "PHY %c Not a %s response. MSGBUS Status: 0x%x.\n", phy_name(phy),
command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val);
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return -EINVAL;
}
return 0;
}
-static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
+static int __intel_cx0_read_once(struct intel_encoder *encoder,
int lane, u16 addr)
{
- enum phy phy = intel_port_to_phy(i915, port);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ enum phy phy = intel_encoder_to_phy(encoder);
int ack;
u32 val;
@@ -191,7 +201,7 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
drm_dbg_kms(&i915->drm,
"PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy));
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return -ETIMEDOUT;
}
@@ -200,33 +210,34 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
XELPDP_PORT_M2P_COMMAND_READ |
XELPDP_PORT_M2P_ADDRESS(addr));
- ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_READ_ACK, lane, &val);
+ ack = intel_cx0_wait_for_ack(encoder, XELPDP_PORT_P2M_COMMAND_READ_ACK, lane, &val);
if (ack < 0)
return ack;
- intel_clear_response_ready_flag(i915, port, lane);
+ intel_clear_response_ready_flag(encoder, lane);
/*
* FIXME: Workaround to let HW to settle
* down and let the message bus to end up
* in a known state
*/
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val);
}
-static u8 __intel_cx0_read(struct drm_i915_private *i915, enum port port,
+static u8 __intel_cx0_read(struct intel_encoder *encoder,
int lane, u16 addr)
{
- enum phy phy = intel_port_to_phy(i915, port);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_encoder_to_phy(encoder);
int i, status;
assert_dc_off(i915);
/* 3 tries is assumed to be enough to read successfully */
for (i = 0; i < 3; i++) {
- status = __intel_cx0_read_once(i915, port, lane, addr);
+ status = __intel_cx0_read_once(encoder, lane, addr);
if (status >= 0)
return status;
@@ -238,18 +249,20 @@ static u8 __intel_cx0_read(struct drm_i915_private *i915, enum port port,
return 0;
}
-static u8 intel_cx0_read(struct drm_i915_private *i915, enum port port,
+static u8 intel_cx0_read(struct intel_encoder *encoder,
u8 lane_mask, u16 addr)
{
int lane = lane_mask_to_lane(lane_mask);
- return __intel_cx0_read(i915, port, lane, addr);
+ return __intel_cx0_read(encoder, lane, addr);
}
-static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
+static int __intel_cx0_write_once(struct intel_encoder *encoder,
int lane, u16 addr, u8 data, bool committed)
{
- enum phy phy = intel_port_to_phy(i915, port);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ enum phy phy = intel_encoder_to_phy(encoder);
int ack;
u32 val;
@@ -258,7 +271,7 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
drm_dbg_kms(&i915->drm,
"PHY %c Timeout waiting for previous transaction to complete. Resetting the bus.\n", phy_name(phy));
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return -ETIMEDOUT;
}
@@ -274,45 +287,46 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
drm_dbg_kms(&i915->drm,
"PHY %c Timeout waiting for write to complete. Resetting the bus.\n", phy_name(phy));
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return -ETIMEDOUT;
}
if (committed) {
- ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val);
+ ack = intel_cx0_wait_for_ack(encoder, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val);
if (ack < 0)
return ack;
} else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane)) &
XELPDP_PORT_P2M_ERROR_SET)) {
drm_dbg_kms(&i915->drm,
"PHY %c Error occurred during write command.\n", phy_name(phy));
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return -EINVAL;
}
- intel_clear_response_ready_flag(i915, port, lane);
+ intel_clear_response_ready_flag(encoder, lane);
/*
* FIXME: Workaround to let HW to settle
* down and let the message bus to end up
* in a known state
*/
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return 0;
}
-static void __intel_cx0_write(struct drm_i915_private *i915, enum port port,
+static void __intel_cx0_write(struct intel_encoder *encoder,
int lane, u16 addr, u8 data, bool committed)
{
- enum phy phy = intel_port_to_phy(i915, port);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_encoder_to_phy(encoder);
int i, status;
assert_dc_off(i915);
/* 3 tries is assumed to be enough to write successfully */
for (i = 0; i < 3; i++) {
- status = __intel_cx0_write_once(i915, port, lane, addr, data, committed);
+ status = __intel_cx0_write_once(encoder, lane, addr, data, committed);
if (status == 0)
return;
@@ -322,63 +336,66 @@ static void __intel_cx0_write(struct drm_i915_private *i915, enum port port,
"PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, i);
}
-static void intel_cx0_write(struct drm_i915_private *i915, enum port port,
+static void intel_cx0_write(struct intel_encoder *encoder,
u8 lane_mask, u16 addr, u8 data, bool committed)
{
int lane;
for_each_cx0_lane_in_mask(lane_mask, lane)
- __intel_cx0_write(i915, port, lane, addr, data, committed);
+ __intel_cx0_write(encoder, lane, addr, data, committed);
}
-static void intel_c20_sram_write(struct drm_i915_private *i915, enum port port,
+static void intel_c20_sram_write(struct intel_encoder *encoder,
int lane, u16 addr, u16 data)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
assert_dc_off(i915);
- intel_cx0_write(i915, port, lane, PHY_C20_WR_ADDRESS_H, addr >> 8, 0);
- intel_cx0_write(i915, port, lane, PHY_C20_WR_ADDRESS_L, addr & 0xff, 0);
+ intel_cx0_write(encoder, lane, PHY_C20_WR_ADDRESS_H, addr >> 8, 0);
+ intel_cx0_write(encoder, lane, PHY_C20_WR_ADDRESS_L, addr & 0xff, 0);
- intel_cx0_write(i915, port, lane, PHY_C20_WR_DATA_H, data >> 8, 0);
- intel_cx0_write(i915, port, lane, PHY_C20_WR_DATA_L, data & 0xff, 1);
+ intel_cx0_write(encoder, lane, PHY_C20_WR_DATA_H, data >> 8, 0);
+ intel_cx0_write(encoder, lane, PHY_C20_WR_DATA_L, data & 0xff, 1);
}
-static u16 intel_c20_sram_read(struct drm_i915_private *i915, enum port port,
+static u16 intel_c20_sram_read(struct intel_encoder *encoder,
int lane, u16 addr)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u16 val;
assert_dc_off(i915);
- intel_cx0_write(i915, port, lane, PHY_C20_RD_ADDRESS_H, addr >> 8, 0);
- intel_cx0_write(i915, port, lane, PHY_C20_RD_ADDRESS_L, addr & 0xff, 1);
+ intel_cx0_write(encoder, lane, PHY_C20_RD_ADDRESS_H, addr >> 8, 0);
+ intel_cx0_write(encoder, lane, PHY_C20_RD_ADDRESS_L, addr & 0xff, 1);
- val = intel_cx0_read(i915, port, lane, PHY_C20_RD_DATA_H);
+ val = intel_cx0_read(encoder, lane, PHY_C20_RD_DATA_H);
val <<= 8;
- val |= intel_cx0_read(i915, port, lane, PHY_C20_RD_DATA_L);
+ val |= intel_cx0_read(encoder, lane, PHY_C20_RD_DATA_L);
return val;
}
-static void __intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
+static void __intel_cx0_rmw(struct intel_encoder *encoder,
int lane, u16 addr, u8 clear, u8 set, bool committed)
{
u8 old, val;
- old = __intel_cx0_read(i915, port, lane, addr);
+ old = __intel_cx0_read(encoder, lane, addr);
val = (old & ~clear) | set;
if (val != old)
- __intel_cx0_write(i915, port, lane, addr, val, committed);
+ __intel_cx0_write(encoder, lane, addr, val, committed);
}
-static void intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
+static void intel_cx0_rmw(struct intel_encoder *encoder,
u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed)
{
u8 lane;
for_each_cx0_lane_in_mask(lane_mask, lane)
- __intel_cx0_rmw(i915, port, lane, addr, clear, set, committed);
+ __intel_cx0_rmw(encoder, lane, addr, clear, set, committed);
}
static u8 intel_c10_get_tx_vboost_lvl(const struct intel_crtc_state *crtc_state)
@@ -414,7 +431,6 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_ddi_buf_trans *trans;
- enum phy phy = intel_port_to_phy(i915, encoder->port);
u8 owned_lane_mask;
intel_wakeref_t wakeref;
int n_entries, ln;
@@ -423,7 +439,7 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
if (intel_tc_port_in_tbt_alt_mode(dig_port))
return;
- owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder);
+ owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
wakeref = intel_cx0_phy_transaction_begin(encoder);
@@ -433,14 +449,14 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
return;
}
- if (intel_is_c10phy(i915, phy)) {
- intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_CONTROL(1),
+ if (intel_encoder_is_c10phy(encoder)) {
+ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED);
- intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_CMN(3),
+ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CMN(3),
C10_CMN3_TXVBOOST_MASK,
C10_CMN3_TXVBOOST(intel_c10_get_tx_vboost_lvl(crtc_state)),
MB_WRITE_UNCOMMITTED);
- intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_TX(1),
+ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_TX(1),
C10_TX1_TERMCTL_MASK,
C10_TX1_TERMCTL(intel_c10_get_tx_term_ctl(crtc_state)),
MB_WRITE_COMMITTED);
@@ -455,27 +471,27 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
if (!(lane_mask & owned_lane_mask))
continue;
- intel_cx0_rmw(i915, encoder->port, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 0),
+ intel_cx0_rmw(encoder, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 0),
C10_PHY_OVRD_LEVEL_MASK,
C10_PHY_OVRD_LEVEL(trans->entries[level].snps.pre_cursor),
MB_WRITE_COMMITTED);
- intel_cx0_rmw(i915, encoder->port, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 1),
+ intel_cx0_rmw(encoder, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 1),
C10_PHY_OVRD_LEVEL_MASK,
C10_PHY_OVRD_LEVEL(trans->entries[level].snps.vswing),
MB_WRITE_COMMITTED);
- intel_cx0_rmw(i915, encoder->port, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 2),
+ intel_cx0_rmw(encoder, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 2),
C10_PHY_OVRD_LEVEL_MASK,
C10_PHY_OVRD_LEVEL(trans->entries[level].snps.post_cursor),
MB_WRITE_COMMITTED);
}
/* Write Override enables in 0xD71 */
- intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_OVRD,
+ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_OVRD,
0, PHY_C10_VDR_OVRD_TX1 | PHY_C10_VDR_OVRD_TX2,
MB_WRITE_COMMITTED);
- if (intel_is_c10phy(i915, phy))
- intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_CONTROL(1),
+ if (intel_encoder_is_c10phy(encoder))
+ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED);
intel_cx0_phy_transaction_end(encoder, wakeref);
@@ -1811,7 +1827,7 @@ static void intel_c10pll_update_pll(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_cx0pll_state *pll_state = &crtc_state->cx0pll_state;
+ struct intel_cx0pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll;
int i;
if (intel_crtc_has_dp_encoder(crtc_state)) {
@@ -1843,7 +1859,7 @@ static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state,
for (i = 0; tables[i]; i++) {
if (crtc_state->port_clock == tables[i]->clock) {
- crtc_state->cx0pll_state.c10 = *tables[i];
+ crtc_state->dpll_hw_state.cx0pll.c10 = *tables[i];
intel_c10pll_update_pll(crtc_state, encoder);
return 0;
@@ -1856,7 +1872,6 @@ static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state,
static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
struct intel_c10pll_state *pll_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u8 lane = INTEL_CX0_LANE0;
intel_wakeref_t wakeref;
int i;
@@ -1867,16 +1882,15 @@ static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
* According to C10 VDR Register programming Sequence we need
* to do this to read PHY internal registers from MsgBus.
*/
- intel_cx0_rmw(i915, encoder->port, lane, PHY_C10_VDR_CONTROL(1),
+ intel_cx0_rmw(encoder, lane, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_MSGBUS_ACCESS,
MB_WRITE_COMMITTED);
for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++)
- pll_state->pll[i] = intel_cx0_read(i915, encoder->port, lane,
- PHY_C10_VDR_PLL(i));
+ pll_state->pll[i] = intel_cx0_read(encoder, lane, PHY_C10_VDR_PLL(i));
- pll_state->cmn = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_CMN(0));
- pll_state->tx = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_TX(0));
+ pll_state->cmn = intel_cx0_read(encoder, lane, PHY_C10_VDR_CMN(0));
+ pll_state->tx = intel_cx0_read(encoder, lane, PHY_C10_VDR_TX(0));
intel_cx0_phy_transaction_end(encoder, wakeref);
}
@@ -1885,31 +1899,31 @@ static void intel_c10_pll_program(struct drm_i915_private *i915,
const struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- const struct intel_c10pll_state *pll_state = &crtc_state->cx0pll_state.c10;
+ const struct intel_c10pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll.c10;
int i;
- intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
+ intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_MSGBUS_ACCESS,
MB_WRITE_COMMITTED);
/* Custom width needs to be programmed to 0 for both the phy lanes */
- intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
+ intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10,
MB_WRITE_COMMITTED);
- intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
+ intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_UPDATE_CFG,
MB_WRITE_COMMITTED);
/* Program the pll values only for the master lane */
for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++)
- intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i),
+ intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i),
pll_state->pll[i],
(i % 4) ? MB_WRITE_UNCOMMITTED : MB_WRITE_COMMITTED);
- intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED);
- intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED);
+ intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED);
+ intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED);
- intel_cx0_rmw(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1),
+ intel_cx0_rmw(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_MASTER_LANE | C10_VDR_CTRL_UPDATE_CFG,
MB_WRITE_COMMITTED);
}
@@ -2037,10 +2051,8 @@ static int intel_c20_phy_check_hdmi_link_rate(int clock)
int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock)
{
struct intel_digital_port *dig_port = hdmi_to_dig_port(hdmi);
- struct drm_i915_private *i915 = intel_hdmi_to_i915(hdmi);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
- if (intel_is_c10phy(i915, phy))
+ if (intel_encoder_is_c10phy(&dig_port->base))
return intel_c10_phy_check_hdmi_link_rate(clock);
return intel_c20_phy_check_hdmi_link_rate(clock);
}
@@ -2067,7 +2079,7 @@ static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state,
/* try computed C20 HDMI tables before using consolidated tables */
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
if (intel_c20_compute_hdmi_tmds_pll(crtc_state->port_clock,
- &crtc_state->cx0pll_state.c20) == 0)
+ &crtc_state->dpll_hw_state.cx0pll.c20) == 0)
return 0;
}
@@ -2077,7 +2089,7 @@ static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state,
for (i = 0; tables[i]; i++) {
if (crtc_state->port_clock == tables[i]->clock) {
- crtc_state->cx0pll_state.c20 = *tables[i];
+ crtc_state->dpll_hw_state.cx0pll.c20 = *tables[i];
return 0;
}
}
@@ -2088,10 +2100,7 @@ static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state,
int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
-
- if (intel_is_c10phy(i915, phy))
+ if (intel_encoder_is_c10phy(encoder))
return intel_c10pll_calc_state(crtc_state, encoder);
return intel_c20pll_calc_state(crtc_state, encoder);
}
@@ -2149,7 +2158,6 @@ static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
struct intel_c20pll_state *pll_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
bool cntx;
intel_wakeref_t wakeref;
int i;
@@ -2157,25 +2165,25 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
wakeref = intel_cx0_phy_transaction_begin(encoder);
/* 1. Read current context selection */
- cntx = intel_cx0_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & PHY_C20_CONTEXT_TOGGLE;
+ cntx = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & PHY_C20_CONTEXT_TOGGLE;
/* Read Tx configuration */
for (i = 0; i < ARRAY_SIZE(pll_state->tx); i++) {
if (cntx)
- pll_state->tx[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ pll_state->tx[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
PHY_C20_B_TX_CNTX_CFG(i));
else
- pll_state->tx[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ pll_state->tx[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
PHY_C20_A_TX_CNTX_CFG(i));
}
/* Read common configuration */
for (i = 0; i < ARRAY_SIZE(pll_state->cmn); i++) {
if (cntx)
- pll_state->cmn[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ pll_state->cmn[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
PHY_C20_B_CMN_CNTX_CFG(i));
else
- pll_state->cmn[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ pll_state->cmn[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
PHY_C20_A_CMN_CNTX_CFG(i));
}
@@ -2183,20 +2191,20 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
/* MPLLB configuration */
for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) {
if (cntx)
- pll_state->mpllb[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ pll_state->mpllb[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
PHY_C20_B_MPLLB_CNTX_CFG(i));
else
- pll_state->mpllb[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ pll_state->mpllb[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
PHY_C20_A_MPLLB_CNTX_CFG(i));
}
} else {
/* MPLLA configuration */
for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) {
if (cntx)
- pll_state->mplla[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ pll_state->mplla[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
PHY_C20_B_MPLLA_CNTX_CFG(i));
else
- pll_state->mplla[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ pll_state->mplla[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
PHY_C20_A_MPLLA_CNTX_CFG(i));
}
}
@@ -2327,7 +2335,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
const struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- const struct intel_c20pll_state *pll_state = &crtc_state->cx0pll_state.c20;
+ const struct intel_c20pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll.c20;
bool dp = false;
int lane = crtc_state->lane_count > 2 ? INTEL_CX0_BOTH_LANES : INTEL_CX0_LANE0;
u32 clock = crtc_state->port_clock;
@@ -2338,7 +2346,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
dp = true;
/* 1. Read current context selection */
- cntx = intel_cx0_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & BIT(0);
+ cntx = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & BIT(0);
/*
* 2. If there is a protocol switch from HDMI to DP or vice versa, clear
@@ -2347,7 +2355,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
*/
if (intel_c20_protocol_switch_valid(encoder)) {
for (i = 0; i < 4; i++)
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, RAWLANEAONX_DIG_TX_MPLLB_CAL_DONE_BANK(i), 0);
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0, RAWLANEAONX_DIG_TX_MPLLB_CAL_DONE_BANK(i), 0);
usleep_range(4000, 4100);
}
@@ -2355,63 +2363,63 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
/* 3.1 Tx configuration */
for (i = 0; i < ARRAY_SIZE(pll_state->tx); i++) {
if (cntx)
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_A_TX_CNTX_CFG(i), pll_state->tx[i]);
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0, PHY_C20_A_TX_CNTX_CFG(i), pll_state->tx[i]);
else
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_B_TX_CNTX_CFG(i), pll_state->tx[i]);
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0, PHY_C20_B_TX_CNTX_CFG(i), pll_state->tx[i]);
}
/* 3.2 common configuration */
for (i = 0; i < ARRAY_SIZE(pll_state->cmn); i++) {
if (cntx)
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_A_CMN_CNTX_CFG(i), pll_state->cmn[i]);
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0, PHY_C20_A_CMN_CNTX_CFG(i), pll_state->cmn[i]);
else
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_B_CMN_CNTX_CFG(i), pll_state->cmn[i]);
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0, PHY_C20_B_CMN_CNTX_CFG(i), pll_state->cmn[i]);
}
/* 3.3 mpllb or mplla configuration */
if (intel_c20phy_use_mpllb(pll_state)) {
for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) {
if (cntx)
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0,
PHY_C20_A_MPLLB_CNTX_CFG(i),
pll_state->mpllb[i]);
else
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0,
PHY_C20_B_MPLLB_CNTX_CFG(i),
pll_state->mpllb[i]);
}
} else {
for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) {
if (cntx)
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0,
PHY_C20_A_MPLLA_CNTX_CFG(i),
pll_state->mplla[i]);
else
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0,
PHY_C20_B_MPLLA_CNTX_CFG(i),
pll_state->mplla[i]);
}
}
/* 4. Program custom width to match the link protocol */
- intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_WIDTH,
+ intel_cx0_rmw(encoder, lane, PHY_C20_VDR_CUSTOM_WIDTH,
PHY_C20_CUSTOM_WIDTH_MASK,
PHY_C20_CUSTOM_WIDTH(intel_get_c20_custom_width(clock, dp)),
MB_WRITE_COMMITTED);
/* 5. For DP or 6. For HDMI */
if (dp) {
- intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
+ intel_cx0_rmw(encoder, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
BIT(6) | PHY_C20_CUSTOM_SERDES_MASK,
BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(clock)),
MB_WRITE_COMMITTED);
} else {
- intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
+ intel_cx0_rmw(encoder, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
BIT(7) | PHY_C20_CUSTOM_SERDES_MASK,
is_hdmi_frl(clock) ? BIT(7) : 0,
MB_WRITE_COMMITTED);
- intel_cx0_write(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE,
+ intel_cx0_write(encoder, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE,
intel_c20_get_hdmi_rate(clock),
MB_WRITE_COMMITTED);
}
@@ -2420,7 +2428,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
* 7. Write Vendor specific registers to toggle context setting to load
* the updated programming toggle context bit
*/
- intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
+ intel_cx0_rmw(encoder, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
BIT(0), cntx ? 0 : 1, MB_WRITE_COMMITTED);
}
@@ -2476,9 +2484,9 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
/* TODO: HDMI FRL */
/* DP2.0 10G and 20G rates enable MPLLA*/
if (crtc_state->port_clock == 1000000 || crtc_state->port_clock == 2000000)
- val |= crtc_state->cx0pll_state.ssc_enabled ? XELPDP_SSC_ENABLE_PLLA : 0;
+ val |= crtc_state->dpll_hw_state.cx0pll.ssc_enabled ? XELPDP_SSC_ENABLE_PLLA : 0;
else
- val |= crtc_state->cx0pll_state.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
+ val |= crtc_state->dpll_hw_state.cx0pll.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE |
@@ -2508,11 +2516,12 @@ static u32 intel_cx0_get_powerdown_state(u8 lane_mask, u8 state)
return val;
}
-static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
- enum port port,
+static void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder,
u8 lane_mask, u8 state)
{
- enum phy phy = intel_port_to_phy(i915, port);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ enum phy phy = intel_encoder_to_phy(encoder);
i915_reg_t buf_ctl2_reg = XELPDP_PORT_BUF_CTL2(i915, port);
int lane;
@@ -2528,7 +2537,7 @@ static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
drm_dbg_kms(&i915->drm,
"PHY %c Timeout waiting for previous transaction to complete. Reset the bus.\n",
phy_name(phy));
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
}
intel_de_rmw(i915, buf_ctl2_reg,
@@ -2536,15 +2545,18 @@ static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
intel_cx0_get_powerdown_update(lane_mask));
/* Update Timeout Value */
- if (__intel_de_wait_for_register(i915, buf_ctl2_reg,
- intel_cx0_get_powerdown_update(lane_mask), 0,
- XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_custom(i915, buf_ctl2_reg,
+ intel_cx0_get_powerdown_update(lane_mask), 0,
+ XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
}
-static void intel_cx0_setup_powerdown(struct drm_i915_private *i915, enum port port)
+static void intel_cx0_setup_powerdown(struct intel_encoder *encoder)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port),
XELPDP_POWER_STATE_READY_MASK,
XELPDP_POWER_STATE_READY(CX0_P2_STATE_READY));
@@ -2577,13 +2589,13 @@ static u32 intel_cx0_get_pclk_refclk_ack(u8 lane_mask)
return val;
}
-static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
- struct intel_encoder *encoder,
+static void intel_cx0_phy_lane_reset(struct intel_encoder *encoder,
bool lane_reversal)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
enum port port = encoder->port;
- enum phy phy = intel_port_to_phy(i915, port);
- u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder);
+ enum phy phy = intel_encoder_to_phy(encoder);
+ u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
u8 lane_mask = lane_reversal ? INTEL_CX0_LANE1 : INTEL_CX0_LANE0;
u32 lane_pipe_reset = owned_lane_mask == INTEL_CX0_BOTH_LANES
? XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1)
@@ -2593,19 +2605,19 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
XELPDP_LANE_PHY_CURRENT_STATUS(1))
: XELPDP_LANE_PHY_CURRENT_STATUS(0);
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL1(i915, port),
- XELPDP_PORT_BUF_SOC_PHY_READY,
- XELPDP_PORT_BUF_SOC_PHY_READY,
- XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_custom(i915, XELPDP_PORT_BUF_CTL1(i915, port),
+ XELPDP_PORT_BUF_SOC_PHY_READY,
+ XELPDP_PORT_BUF_SOC_PHY_READY,
+ XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n",
phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US);
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset,
lane_pipe_reset);
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(i915, port),
- lane_phy_current_status, lane_phy_current_status,
- XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_custom(i915, XELPDP_PORT_BUF_CTL2(i915, port),
+ lane_phy_current_status, lane_phy_current_status,
+ XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
@@ -2613,16 +2625,16 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
intel_cx0_get_pclk_refclk_request(owned_lane_mask),
intel_cx0_get_pclk_refclk_request(lane_mask));
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, port),
- intel_cx0_get_pclk_refclk_ack(owned_lane_mask),
- intel_cx0_get_pclk_refclk_ack(lane_mask),
- XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, port),
+ intel_cx0_get_pclk_refclk_ack(owned_lane_mask),
+ intel_cx0_get_pclk_refclk_ack(lane_mask),
+ XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "PHY %c failed to request refclk after %dus.\n",
phy_name(phy), XELPDP_REFCLK_ENABLE_TIMEOUT_US);
- intel_cx0_powerdown_change_sequence(i915, port, INTEL_CX0_BOTH_LANES,
+ intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES,
CX0_P2_STATE_RESET);
- intel_cx0_setup_powerdown(i915, port);
+ intel_cx0_setup_powerdown(encoder);
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset, 0);
@@ -2640,11 +2652,10 @@ static void intel_cx0_program_phy_lane(struct drm_i915_private *i915,
int i;
u8 disables;
bool dp_alt_mode = intel_tc_port_in_dp_alt_mode(enc_to_dig_port(encoder));
- u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder);
- enum port port = encoder->port;
+ u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
- if (intel_is_c10phy(i915, intel_port_to_phy(i915, port)))
- intel_cx0_rmw(i915, port, owned_lane_mask,
+ if (intel_encoder_is_c10phy(encoder))
+ intel_cx0_rmw(encoder, owned_lane_mask,
PHY_C10_VDR_CONTROL(1), 0,
C10_VDR_CTRL_MSGBUS_ACCESS,
MB_WRITE_COMMITTED);
@@ -2666,14 +2677,14 @@ static void intel_cx0_program_phy_lane(struct drm_i915_private *i915,
if (!(owned_lane_mask & lane_mask))
continue;
- intel_cx0_rmw(i915, port, lane_mask, PHY_CX0_TX_CONTROL(tx, 2),
+ intel_cx0_rmw(encoder, lane_mask, PHY_CX0_TX_CONTROL(tx, 2),
CONTROL2_DISABLE_SINGLE_TX,
disables & BIT(i) ? CONTROL2_DISABLE_SINGLE_TX : 0,
MB_WRITE_COMMITTED);
}
- if (intel_is_c10phy(i915, intel_port_to_phy(i915, port)))
- intel_cx0_rmw(i915, port, owned_lane_mask,
+ if (intel_encoder_is_c10phy(encoder))
+ intel_cx0_rmw(encoder, owned_lane_mask,
PHY_C10_VDR_CONTROL(1), 0,
C10_VDR_CTRL_UPDATE_CFG,
MB_WRITE_COMMITTED);
@@ -2705,7 +2716,7 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
u8 maxpclk_lane = lane_reversal ? INTEL_CX0_LANE1 :
@@ -2719,13 +2730,13 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
intel_program_port_clock_ctl(encoder, crtc_state, lane_reversal);
/* 2. Bring PHY out of reset. */
- intel_cx0_phy_lane_reset(i915, encoder, lane_reversal);
+ intel_cx0_phy_lane_reset(encoder, lane_reversal);
/*
* 3. Change Phy power state to Ready.
* TODO: For DP alt mode use only one lane.
*/
- intel_cx0_powerdown_change_sequence(i915, encoder->port, INTEL_CX0_BOTH_LANES,
+ intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES,
CX0_P2_STATE_READY);
/*
@@ -2735,7 +2746,7 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
*/
/* 5. Program PHY internal PLL internal registers. */
- if (intel_is_c10phy(i915, phy))
+ if (intel_encoder_is_c10phy(encoder))
intel_c10_pll_program(i915, crtc_state, encoder);
else
intel_c20_pll_program(i915, crtc_state, encoder);
@@ -2767,10 +2778,10 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
intel_cx0_get_pclk_pll_request(maxpclk_lane));
/* 10. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
- intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES),
- intel_cx0_get_pclk_pll_ack(maxpclk_lane),
- XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
+ intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES),
+ intel_cx0_get_pclk_pll_ack(maxpclk_lane),
+ XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "Port %c PLL not locked after %dus.\n",
phy_name(phy), XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US);
@@ -2831,7 +2842,7 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
u32 val = 0;
/*
@@ -2858,10 +2869,10 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
intel_de_write(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), val);
/* 5. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "1". */
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
- XELPDP_TBT_CLOCK_ACK,
- XELPDP_TBT_CLOCK_ACK,
- 100, 0, NULL))
+ if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
+ XELPDP_TBT_CLOCK_ACK,
+ XELPDP_TBT_CLOCK_ACK,
+ 100, 0, NULL))
drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not locked after 100us.\n",
encoder->base.base.id, encoder->base.name, phy_name(phy));
@@ -2892,12 +2903,12 @@ void intel_mtl_pll_enable(struct intel_encoder *encoder,
static void intel_cx0pll_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
- bool is_c10 = intel_is_c10phy(i915, phy);
+ enum phy phy = intel_encoder_to_phy(encoder);
+ bool is_c10 = intel_encoder_is_c10phy(encoder);
intel_wakeref_t wakeref = intel_cx0_phy_transaction_begin(encoder);
/* 1. Change owned PHY lane power to Disable state. */
- intel_cx0_powerdown_change_sequence(i915, encoder->port, INTEL_CX0_BOTH_LANES,
+ intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES,
is_c10 ? CX0_P2PG_STATE_DISABLE :
CX0_P4PG_STATE_DISABLE);
@@ -2920,10 +2931,10 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
/*
* 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK**> == "0".
*/
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
- intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) |
- intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0,
- XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
+ intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) |
+ intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0,
+ XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "Port %c PLL not unlocked after %dus.\n",
phy_name(phy), XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US);
@@ -2944,7 +2955,7 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
/*
* 1. Follow the Display Voltage Frequency Switching Sequence Before
@@ -2958,8 +2969,8 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
XELPDP_TBT_CLOCK_REQUEST, 0);
/* 3. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "0". */
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
- XELPDP_TBT_CLOCK_ACK, 0, 10, 0, NULL))
+ if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
+ XELPDP_TBT_CLOCK_ACK, 0, 10, 0, NULL))
drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not unlocked after 10us.\n",
encoder->base.base.id, encoder->base.name, phy_name(phy));
@@ -3014,7 +3025,7 @@ static void intel_c10pll_state_verify(const struct intel_crtc_state *state,
struct intel_c10pll_state *mpllb_hw_state)
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- const struct intel_c10pll_state *mpllb_sw_state = &state->cx0pll_state.c10;
+ const struct intel_c10pll_state *mpllb_sw_state = &state->dpll_hw_state.cx0pll.c10;
int i;
if (intel_crtc_needs_fastset(state))
@@ -3043,10 +3054,7 @@ static void intel_c10pll_state_verify(const struct intel_crtc_state *state,
void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder,
struct intel_cx0pll_state *pll_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
-
- if (intel_is_c10phy(i915, phy))
+ if (intel_encoder_is_c10phy(encoder))
intel_c10pll_readout_hw_state(encoder, &pll_state->c10);
else
intel_c20pll_readout_hw_state(encoder, &pll_state->c20);
@@ -3055,10 +3063,7 @@ void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder,
int intel_cx0pll_calc_port_clock(struct intel_encoder *encoder,
const struct intel_cx0pll_state *pll_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
-
- if (intel_is_c10phy(i915, phy))
+ if (intel_encoder_is_c10phy(encoder))
return intel_c10pll_calc_port_clock(encoder, &pll_state->c10);
return intel_c20pll_calc_port_clock(encoder, &pll_state->c20);
@@ -3070,7 +3075,7 @@ static void intel_c20pll_state_verify(const struct intel_crtc_state *state,
struct intel_c20pll_state *mpll_hw_state)
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- const struct intel_c20pll_state *mpll_sw_state = &state->cx0pll_state.c20;
+ const struct intel_c20pll_state *mpll_sw_state = &state->dpll_hw_state.cx0pll.c20;
bool sw_use_mpllb = intel_c20phy_use_mpllb(mpll_sw_state);
bool hw_use_mpllb = intel_c20phy_use_mpllb(mpll_hw_state);
int i;
@@ -3124,7 +3129,6 @@ void intel_cx0pll_state_verify(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder;
struct intel_cx0pll_state mpll_hw_state = {};
- enum phy phy;
if (DISPLAY_VER(i915) < 14)
return;
@@ -3138,14 +3142,13 @@ void intel_cx0pll_state_verify(struct intel_atomic_state *state,
return;
encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
- phy = intel_port_to_phy(i915, encoder->port);
if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder)))
return;
intel_cx0pll_readout_hw_state(encoder, &mpll_hw_state);
- if (intel_is_c10phy(i915, phy))
+ if (intel_encoder_is_c10phy(encoder))
intel_c10pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c10);
else
intel_c20pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c20);
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
index c6682677253a..3e03af3e006c 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
@@ -11,7 +11,6 @@
#include <linux/bits.h>
enum icl_port_dpll_id;
-enum phy;
struct drm_i915_private;
struct intel_atomic_state;
struct intel_c10pll_state;
@@ -22,7 +21,7 @@ struct intel_crtc_state;
struct intel_encoder;
struct intel_hdmi;
-bool intel_is_c10phy(struct drm_i915_private *dev_priv, enum phy phy);
+bool intel_encoder_is_c10phy(struct intel_encoder *encoder);
void intel_mtl_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_mtl_pll_disable(struct intel_encoder *encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index c587a8efeafc..3c3fc53376ce 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -200,10 +200,10 @@ void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
port_name(port));
}
-static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
- enum port port)
+static void intel_wait_ddi_buf_active(struct intel_encoder *encoder)
{
- enum phy phy = intel_port_to_phy(dev_priv, port);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
int timeout_us;
int ret;
@@ -218,7 +218,7 @@ static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
} else if (IS_DG2(dev_priv)) {
timeout_us = 1200;
} else if (DISPLAY_VER(dev_priv) >= 12) {
- if (intel_phy_is_tc(dev_priv, phy))
+ if (intel_encoder_is_tc(encoder))
timeout_us = 3000;
else
timeout_us = 1000;
@@ -331,7 +331,6 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
/* DDI_BUF_CTL_ENABLE will be set by intel_ddi_prepare_link_retrain() later */
intel_dp->DP = dig_port->saved_port_bits |
@@ -345,7 +344,7 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
intel_dp->DP |= DDI_BUF_PORT_DATA_10BIT;
}
- if (IS_ALDERLAKE_P(i915) && intel_phy_is_tc(i915, phy)) {
+ if (IS_ALDERLAKE_P(i915) && intel_encoder_is_tc(encoder)) {
intel_dp->DP |= ddi_buf_phy_link_rate(crtc_state->port_clock);
if (!intel_tc_port_in_tbt_alt_mode(dig_port))
intel_dp->DP |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
@@ -632,6 +631,7 @@ intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -662,10 +662,9 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
- if (intel_has_quirk(dev_priv, QUIRK_INCREASE_DDI_DISABLED_TIME) &&
+ if (intel_has_quirk(display, QUIRK_INCREASE_DDI_DISABLED_TIME) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- drm_dbg_kms(&dev_priv->drm,
- "Quirk Increase DDI disabled time\n");
+ drm_dbg_kms(display->drm, "Quirk Increase DDI disabled time\n");
/* Quirk time at 100ms for reliable operation */
msleep(100);
}
@@ -895,7 +894,6 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
/*
* ICL+ HW requires corresponding AUX IOs to be powered up for PSR with
@@ -914,7 +912,7 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port,
return intel_display_power_aux_io_domain(i915, dig_port->aux_ch);
else if (DISPLAY_VER(i915) < 14 &&
(intel_crtc_has_dp_encoder(crtc_state) ||
- intel_phy_is_tc(i915, phy)))
+ intel_encoder_is_tc(&dig_port->base)))
return intel_aux_power_domain(dig_port);
else
return POWER_DOMAIN_INVALID;
@@ -984,7 +982,7 @@ void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
u32 val;
if (cpu_transcoder == TRANSCODER_EDP)
@@ -1113,7 +1111,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
const struct intel_ddi_buf_trans *trans;
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
int n_entries, ln;
u32 val;
@@ -1176,7 +1174,7 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
u32 val;
int ln;
@@ -1227,7 +1225,7 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
+ enum tc_port tc_port = intel_encoder_to_tc(encoder);
const struct intel_ddi_buf_trans *trans;
int n_entries, ln;
@@ -1328,7 +1326,7 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
+ enum tc_port tc_port = intel_encoder_to_tc(encoder);
const struct intel_ddi_buf_trans *trans;
int n_entries, ln;
@@ -1526,7 +1524,7 @@ static void adls_ddi_enable_clock(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
if (drm_WARN_ON(&i915->drm, !pll))
return;
@@ -1540,7 +1538,7 @@ static void adls_ddi_enable_clock(struct intel_encoder *encoder,
static void adls_ddi_disable_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
_icl_ddi_disable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1549,7 +1547,7 @@ static void adls_ddi_disable_clock(struct intel_encoder *encoder)
static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
return _icl_ddi_is_clock_enabled(i915, ADLS_DPCLKA_CFGCR(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1558,7 +1556,7 @@ static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder)
static struct intel_shared_dpll *adls_ddi_get_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
return _icl_ddi_get_pll(i915, ADLS_DPCLKA_CFGCR(phy),
ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy),
@@ -1570,7 +1568,7 @@ static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
if (drm_WARN_ON(&i915->drm, !pll))
return;
@@ -1584,7 +1582,7 @@ static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
static void rkl_ddi_disable_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
_icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1593,7 +1591,7 @@ static void rkl_ddi_disable_clock(struct intel_encoder *encoder)
static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1602,7 +1600,7 @@ static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder)
static struct intel_shared_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
return _icl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
@@ -1614,7 +1612,7 @@ static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
if (drm_WARN_ON(&i915->drm, !pll))
return;
@@ -1637,7 +1635,7 @@ static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
static void dg1_ddi_disable_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
_icl_ddi_disable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1646,7 +1644,7 @@ static void dg1_ddi_disable_clock(struct intel_encoder *encoder)
static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
return _icl_ddi_is_clock_enabled(i915, DG1_DPCLKA_CFGCR0(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1655,7 +1653,7 @@ static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder)
static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
enum intel_dpll_id id;
u32 val;
@@ -1680,7 +1678,7 @@ static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
if (drm_WARN_ON(&i915->drm, !pll))
return;
@@ -1694,7 +1692,7 @@ static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
static void icl_ddi_combo_disable_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
_icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1703,7 +1701,7 @@ static void icl_ddi_combo_disable_clock(struct intel_encoder *encoder)
static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1712,7 +1710,7 @@ static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder)
struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
return _icl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
@@ -1767,7 +1765,7 @@ static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
if (drm_WARN_ON(&i915->drm, !pll))
@@ -1787,7 +1785,7 @@ static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder,
static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
mutex_lock(&i915->display.dpll.lock);
@@ -1803,7 +1801,7 @@ static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder)
static bool icl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
u32 tmp;
@@ -1820,7 +1818,7 @@ static bool icl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
static struct intel_shared_dpll *icl_ddi_tc_get_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
enum intel_dpll_id id;
u32 tmp;
@@ -2086,12 +2084,11 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
- enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
+ enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
u32 ln0, ln1, pin_assignment;
u8 width;
- if (!intel_phy_is_tc(dev_priv, phy) ||
+ if (!intel_encoder_is_tc(&dig_port->base) ||
intel_tc_port_in_tbt_alt_mode(dig_port))
return;
@@ -2327,9 +2324,9 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
- if (intel_phy_is_combo(i915, phy)) {
+ if (intel_encoder_is_combo(encoder)) {
+ enum phy phy = intel_encoder_to_phy(encoder);
bool lane_reversal =
dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
@@ -2339,10 +2336,15 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
}
}
-/* Splitter enable for eDP MSO is limited to certain pipes. */
+/*
+ * Splitter enable for eDP MSO is limited to certain pipes, on certain
+ * platforms.
+ */
static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915)
{
- if (IS_ALDERLAKE_P(i915))
+ if (DISPLAY_VER(i915) > 20)
+ return ~0;
+ else if (IS_ALDERLAKE_P(i915))
return BIT(PIPE_A) | BIT(PIPE_B);
else
return BIT(PIPE_A);
@@ -2812,15 +2814,14 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- if (HAS_DP20(dev_priv)) {
+ if (HAS_DP20(dev_priv))
intel_dp_128b132b_sdp_crc16(enc_to_intel_dp(encoder),
crtc_state);
- if (crtc_state->has_panel_replay)
- drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG,
- DP_PANEL_REPLAY_ENABLE);
- }
+
+ /* Panel replay has to be enabled in sink dpcd before link training. */
+ if (crtc_state->has_panel_replay)
+ intel_psr_enable_sink(enc_to_intel_dp(encoder), crtc_state);
if (DISPLAY_VER(dev_priv) >= 14)
mtl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
@@ -3095,39 +3096,48 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
}
-static void intel_ddi_post_disable(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
+static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *slave_crtc;
+ struct intel_crtc *pipe_crtc;
- if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) {
- intel_crtc_vblank_off(old_crtc_state);
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(old_crtc_state)) {
+ const struct intel_crtc_state *old_pipe_crtc_state =
+ intel_atomic_get_old_crtc_state(state, pipe_crtc);
+
+ intel_crtc_vblank_off(old_pipe_crtc_state);
+ }
- intel_disable_transcoder(old_crtc_state);
+ intel_disable_transcoder(old_crtc_state);
- intel_ddi_disable_transcoder_func(old_crtc_state);
+ intel_ddi_disable_transcoder_func(old_crtc_state);
- intel_dsc_disable(old_crtc_state);
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(old_crtc_state)) {
+ const struct intel_crtc_state *old_pipe_crtc_state =
+ intel_atomic_get_old_crtc_state(state, pipe_crtc);
+
+ intel_dsc_disable(old_pipe_crtc_state);
if (DISPLAY_VER(dev_priv) >= 9)
- skl_scaler_disable(old_crtc_state);
+ skl_scaler_disable(old_pipe_crtc_state);
else
- ilk_pfit_disable(old_crtc_state);
+ ilk_pfit_disable(old_pipe_crtc_state);
}
+}
- for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, slave_crtc,
- intel_crtc_bigjoiner_slave_pipes(old_crtc_state)) {
- const struct intel_crtc_state *old_slave_crtc_state =
- intel_atomic_get_old_crtc_state(state, slave_crtc);
-
- intel_crtc_vblank_off(old_slave_crtc_state);
-
- intel_dsc_disable(old_slave_crtc_state);
- skl_scaler_disable(old_slave_crtc_state);
- }
+static void intel_ddi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
+ intel_ddi_post_disable_hdmi_or_sst(state, encoder, old_crtc_state,
+ old_conn_state);
/*
* When called from DP MST code:
@@ -3155,14 +3165,11 @@ static void intel_ddi_post_pll_disable(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
- bool is_tc_port = intel_phy_is_tc(i915, phy);
main_link_aux_power_domain_put(dig_port, old_crtc_state);
- if (is_tc_port)
+ if (intel_encoder_is_tc(encoder))
intel_tc_port_put_link(dig_port);
}
@@ -3263,7 +3270,6 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_connector *connector = conn_state->connector;
enum port port = encoder->port;
- enum phy phy = intel_port_to_phy(dev_priv, port);
u32 buf_ctl;
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
@@ -3347,14 +3353,14 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) >= 20)
buf_ctl |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
- } else if (IS_ALDERLAKE_P(dev_priv) && intel_phy_is_tc(dev_priv, phy)) {
+ } else if (IS_ALDERLAKE_P(dev_priv) && intel_encoder_is_tc(encoder)) {
drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port));
buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
}
intel_de_write(dev_priv, DDI_BUF_CTL(port), buf_ctl);
- intel_wait_ddi_buf_active(dev_priv, port);
+ intel_wait_ddi_buf_active(encoder);
}
static void intel_enable_ddi(struct intel_atomic_state *state,
@@ -3362,10 +3368,10 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_crtc *pipe_crtc;
- if (!intel_crtc_is_bigjoiner_slave(crtc_state))
- intel_ddi_enable_transcoder_func(encoder, crtc_state);
+ intel_ddi_enable_transcoder_func(encoder, crtc_state);
/* Enable/Disable DP2.0 SDP split config before transcoder */
intel_audio_sdp_split_update(crtc_state);
@@ -3374,7 +3380,13 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
intel_ddi_wait_for_fec_status(encoder, crtc_state, true);
- intel_crtc_vblank_on(crtc_state);
+ for_each_intel_crtc_in_pipe_mask_reverse(&i915->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(crtc_state)) {
+ const struct intel_crtc_state *pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
+
+ intel_crtc_vblank_on(pipe_crtc_state);
+ }
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
intel_enable_ddi_hdmi(state, encoder, crtc_state, conn_state);
@@ -3470,19 +3482,17 @@ void intel_ddi_update_active_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_crtc_state *crtc_state =
+ const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_crtc *slave_crtc;
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ struct intel_crtc *pipe_crtc;
/* FIXME: Add MTL pll_mgr */
- if (DISPLAY_VER(i915) >= 14 || !intel_phy_is_tc(i915, phy))
+ if (DISPLAY_VER(i915) >= 14 || !intel_encoder_is_tc(encoder))
return;
- intel_update_active_dpll(state, crtc, encoder);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
- intel_crtc_bigjoiner_slave_pipes(crtc_state))
- intel_update_active_dpll(state, slave_crtc, encoder);
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(crtc_state))
+ intel_update_active_dpll(state, pipe_crtc, encoder);
}
static void
@@ -3493,8 +3503,7 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
+ bool is_tc_port = intel_encoder_is_tc(encoder);
if (is_tc_port) {
struct intel_crtc *master_crtc =
@@ -3513,14 +3522,14 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
*/
intel_tc_port_set_fia_lane_count(dig_port, crtc_state->lane_count);
else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- bxt_ddi_phy_set_lane_optim_mask(encoder,
- crtc_state->lane_lat_optim_mask);
+ bxt_dpio_phy_set_lane_optim_mask(encoder,
+ crtc_state->lane_lat_optim_mask);
}
static void adlp_tbt_to_dp_alt_switch_wa(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum tc_port tc_port = intel_encoder_to_tc(encoder);
int ln;
for (ln = 0; ln < 2; ln++)
@@ -3574,7 +3583,7 @@ static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
/* 6.j Poll for PORT_BUF_CTL Idle Status == 0, timeout after 100 us */
- intel_wait_ddi_buf_active(dev_priv, port);
+ intel_wait_ddi_buf_active(encoder);
}
static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
@@ -3624,7 +3633,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
- intel_wait_ddi_buf_active(dev_priv, port);
+ intel_wait_ddi_buf_active(encoder);
}
static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
@@ -3681,7 +3690,7 @@ static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp,
if (intel_de_wait_for_set(dev_priv,
dp_tp_status_reg(encoder, crtc_state),
- DP_TP_STATUS_IDLE_DONE, 1))
+ DP_TP_STATUS_IDLE_DONE, 2))
drm_err(&dev_priv->drm,
"Timed out waiting for DP idle patterns\n");
}
@@ -3946,7 +3955,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
pipe_config->lane_lat_optim_mask =
- bxt_ddi_phy_get_lane_lat_optim_mask(encoder);
+ bxt_dpio_phy_get_lane_lat_optim_mask(encoder);
intel_ddi_compute_min_voltage_level(pipe_config);
@@ -3972,6 +3981,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
intel_read_dp_sdp(encoder, pipe_config, HDMI_PACKET_TYPE_GAMUT_METADATA);
intel_read_dp_sdp(encoder, pipe_config, DP_SDP_VSC);
+ intel_read_dp_sdp(encoder, pipe_config, DP_SDP_ADAPTIVE_SYNC);
intel_audio_codec_get_config(encoder, pipe_config);
}
@@ -4006,8 +4016,8 @@ static void mtl_ddi_get_config(struct intel_encoder *encoder,
if (intel_tc_port_in_tbt_alt_mode(dig_port)) {
crtc_state->port_clock = intel_mtl_tbt_calc_port_clock(encoder);
} else {
- intel_cx0pll_readout_hw_state(encoder, &crtc_state->cx0pll_state);
- crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->cx0pll_state);
+ intel_cx0pll_readout_hw_state(encoder, &crtc_state->dpll_hw_state.cx0pll);
+ crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->dpll_hw_state.cx0pll);
}
intel_ddi_get_config(encoder, crtc_state);
@@ -4016,8 +4026,8 @@ static void mtl_ddi_get_config(struct intel_encoder *encoder,
static void dg2_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- intel_mpllb_readout_hw_state(encoder, &crtc_state->mpllb_state);
- crtc_state->port_clock = intel_mpllb_calc_port_clock(encoder, &crtc_state->mpllb_state);
+ intel_mpllb_readout_hw_state(encoder, &crtc_state->dpll_hw_state.mpllb);
+ crtc_state->port_clock = intel_mpllb_calc_port_clock(encoder, &crtc_state->dpll_hw_state.mpllb);
intel_ddi_get_config(encoder, crtc_state);
}
@@ -4144,10 +4154,7 @@ void hsw_ddi_get_config(struct intel_encoder *encoder,
static void intel_ddi_sync_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
-
- if (intel_phy_is_tc(i915, phy))
+ if (intel_encoder_is_tc(encoder))
intel_tc_port_sanitize_mode(enc_to_dig_port(encoder),
crtc_state);
@@ -4159,10 +4166,9 @@ static bool intel_ddi_initial_fastset_check(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
bool fastset = true;
- if (intel_phy_is_tc(i915, phy)) {
+ if (intel_encoder_is_tc(encoder)) {
drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute TC port DPLLs\n",
encoder->base.base.id, encoder->base.name);
crtc_state->uapi.mode_changed = true;
@@ -4226,7 +4232,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
pipe_config->lane_lat_optim_mask =
- bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
+ bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
intel_ddi_compute_min_voltage_level(pipe_config);
@@ -4256,7 +4262,12 @@ static bool m_n_equal(const struct intel_link_m_n *m_n_1,
static bool crtcs_port_sync_compatible(const struct intel_crtc_state *crtc_state1,
const struct intel_crtc_state *crtc_state2)
{
+ /*
+ * FIXME the modeset sequence is currently wrong and
+ * can't deal with bigjoiner + port sync at the same time.
+ */
return crtc_state1->hw.active && crtc_state2->hw.active &&
+ !crtc_state1->bigjoiner_pipes && !crtc_state2->bigjoiner_pipes &&
crtc_state1->output_types == crtc_state2->output_types &&
crtc_state1->output_format == crtc_state2->output_format &&
crtc_state1->lane_count == crtc_state2->lane_count &&
@@ -4348,10 +4359,9 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->dev);
struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
intel_dp_encoder_flush_work(encoder);
- if (intel_phy_is_tc(i915, phy))
+ if (intel_encoder_is_tc(&dig_port->base))
intel_tc_port_cleanup(dig_port);
intel_display_power_flush_work(i915);
@@ -4362,16 +4372,14 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
static void intel_ddi_encoder_reset(struct drm_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->dev);
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
intel_dp->reset_link_params = true;
intel_pps_encoder_reset(intel_dp);
- if (intel_phy_is_tc(i915, phy))
+ if (intel_encoder_is_tc(&dig_port->base))
intel_tc_port_init_mode(dig_port);
}
@@ -4538,11 +4546,9 @@ static enum intel_hotplug_state
intel_ddi_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &dig_port->dp;
- enum phy phy = intel_port_to_phy(i915, encoder->port);
- bool is_tc = intel_phy_is_tc(i915, phy);
+ bool is_tc = intel_encoder_is_tc(encoder);
struct drm_modeset_acquire_ctx ctx;
enum intel_hotplug_state state;
int ret;
@@ -4824,10 +4830,7 @@ static bool port_strap_detected(struct drm_i915_private *i915, enum port port)
static bool need_aux_ch(struct intel_encoder *encoder, bool init_dp)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
-
- return init_dp || intel_phy_is_tc(i915, phy);
+ return init_dp || intel_encoder_is_tc(encoder);
}
static bool assert_has_icl_dsi(struct drm_i915_private *i915)
@@ -5071,17 +5074,17 @@ void intel_ddi_init(struct drm_i915_private *dev_priv,
} else if (IS_DG2(dev_priv)) {
encoder->set_signal_levels = intel_snps_phy_set_signal_levels;
} else if (DISPLAY_VER(dev_priv) >= 12) {
- if (intel_phy_is_combo(dev_priv, phy))
+ if (intel_encoder_is_combo(encoder))
encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
else
encoder->set_signal_levels = tgl_dkl_phy_set_signal_levels;
} else if (DISPLAY_VER(dev_priv) >= 11) {
- if (intel_phy_is_combo(dev_priv, phy))
+ if (intel_encoder_is_combo(encoder))
encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
else
encoder->set_signal_levels = icl_mg_phy_set_signal_levels;
} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
- encoder->set_signal_levels = bxt_ddi_phy_set_signal_levels;
+ encoder->set_signal_levels = bxt_dpio_phy_set_signal_levels;
} else {
encoder->set_signal_levels = hsw_set_signal_levels;
}
@@ -5126,7 +5129,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv,
goto err;
}
- if (intel_phy_is_tc(dev_priv, phy)) {
+ if (intel_encoder_is_tc(encoder)) {
bool is_legacy =
!intel_bios_encoder_supports_typec_usb(devdata) &&
!intel_bios_encoder_supports_tbt(devdata);
@@ -5155,7 +5158,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv,
dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(dev_priv, port);
if (DISPLAY_VER(dev_priv) >= 11) {
- if (intel_phy_is_tc(dev_priv, phy))
+ if (intel_encoder_is_tc(encoder))
dig_port->connected = intel_tc_port_connected;
else
dig_port->connected = lpt_digital_port_connected;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index de809e2d9cac..4d21ce734343 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -1691,14 +1691,11 @@ mtl_get_cx0_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
-
if (intel_crtc_has_dp_encoder(crtc_state) && crtc_state->port_clock >= 1000000)
return intel_get_buf_trans(&mtl_c20_trans_uhbr, n_entries);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && !(intel_is_c10phy(i915, phy)))
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && !(intel_encoder_is_c10phy(encoder)))
return intel_get_buf_trans(&mtl_c20_trans_hdmi, n_entries);
- else if (!intel_is_c10phy(i915, phy))
+ else if (!intel_encoder_is_c10phy(encoder))
return intel_get_buf_trans(&mtl_c20_trans_dp14, n_entries);
else
return intel_get_buf_trans(&mtl_c10_trans_dp14, n_entries);
@@ -1707,14 +1704,13 @@ mtl_get_cx0_buf_trans(struct intel_encoder *encoder,
void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
if (DISPLAY_VER(i915) >= 14) {
encoder->get_buf_trans = mtl_get_cx0_buf_trans;
} else if (IS_DG2(i915)) {
encoder->get_buf_trans = dg2_get_snps_buf_trans;
} else if (IS_ALDERLAKE_P(i915)) {
- if (intel_phy_is_combo(i915, phy))
+ if (intel_encoder_is_combo(encoder))
encoder->get_buf_trans = adlp_get_combo_buf_trans;
else
encoder->get_buf_trans = adlp_get_dkl_buf_trans;
@@ -1725,16 +1721,16 @@ void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
} else if (IS_DG1(i915)) {
encoder->get_buf_trans = dg1_get_combo_buf_trans;
} else if (DISPLAY_VER(i915) >= 12) {
- if (intel_phy_is_combo(i915, phy))
+ if (intel_encoder_is_combo(encoder))
encoder->get_buf_trans = tgl_get_combo_buf_trans;
else
encoder->get_buf_trans = tgl_get_dkl_buf_trans;
} else if (DISPLAY_VER(i915) == 11) {
- if (IS_PLATFORM(i915, INTEL_JASPERLAKE))
+ if (IS_JASPERLAKE(i915))
encoder->get_buf_trans = jsl_get_combo_buf_trans;
- else if (IS_PLATFORM(i915, INTEL_ELKHARTLAKE))
+ else if (IS_ELKHARTLAKE(i915))
encoder->get_buf_trans = ehl_get_combo_buf_trans;
- else if (intel_phy_is_combo(i915, phy))
+ else if (intel_encoder_is_combo(encoder))
encoder->get_buf_trans = icl_get_combo_buf_trans;
else
encoder->get_buf_trans = icl_get_mg_buf_trans;
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
index 42552d8c151e..e881bfeafb47 100644
--- a/drivers/gpu/drm/i915/display/intel_de.h
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -10,80 +10,185 @@
#include "i915_trace.h"
#include "intel_uncore.h"
+static inline struct intel_uncore *__to_uncore(struct intel_display *display)
+{
+ return &to_i915(display->drm)->uncore;
+}
+
static inline u32
-intel_de_read(struct drm_i915_private *i915, i915_reg_t reg)
+__intel_de_read(struct intel_display *display, i915_reg_t reg)
{
- return intel_uncore_read(&i915->uncore, reg);
+ u32 val;
+
+ intel_dmc_wl_get(display, reg);
+
+ val = intel_uncore_read(__to_uncore(display), reg);
+
+ intel_dmc_wl_put(display, reg);
+
+ return val;
}
+#define intel_de_read(p,...) __intel_de_read(__to_intel_display(p), __VA_ARGS__)
static inline u8
-intel_de_read8(struct drm_i915_private *i915, i915_reg_t reg)
+__intel_de_read8(struct intel_display *display, i915_reg_t reg)
{
- return intel_uncore_read8(&i915->uncore, reg);
+ u8 val;
+
+ intel_dmc_wl_get(display, reg);
+
+ val = intel_uncore_read8(__to_uncore(display), reg);
+
+ intel_dmc_wl_put(display, reg);
+
+ return val;
}
+#define intel_de_read8(p,...) __intel_de_read8(__to_intel_display(p), __VA_ARGS__)
static inline u64
-intel_de_read64_2x32(struct drm_i915_private *i915,
- i915_reg_t lower_reg, i915_reg_t upper_reg)
+__intel_de_read64_2x32(struct intel_display *display,
+ i915_reg_t lower_reg, i915_reg_t upper_reg)
{
- return intel_uncore_read64_2x32(&i915->uncore, lower_reg, upper_reg);
+ u64 val;
+
+ intel_dmc_wl_get(display, lower_reg);
+ intel_dmc_wl_get(display, upper_reg);
+
+ val = intel_uncore_read64_2x32(__to_uncore(display), lower_reg,
+ upper_reg);
+
+ intel_dmc_wl_put(display, upper_reg);
+ intel_dmc_wl_put(display, lower_reg);
+
+ return val;
}
+#define intel_de_read64_2x32(p,...) __intel_de_read64_2x32(__to_intel_display(p), __VA_ARGS__)
static inline void
-intel_de_posting_read(struct drm_i915_private *i915, i915_reg_t reg)
+__intel_de_posting_read(struct intel_display *display, i915_reg_t reg)
{
- intel_uncore_posting_read(&i915->uncore, reg);
+ intel_dmc_wl_get(display, reg);
+
+ intel_uncore_posting_read(__to_uncore(display), reg);
+
+ intel_dmc_wl_put(display, reg);
}
+#define intel_de_posting_read(p,...) __intel_de_posting_read(__to_intel_display(p), __VA_ARGS__)
static inline void
-intel_de_write(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
+__intel_de_write(struct intel_display *display, i915_reg_t reg, u32 val)
{
- intel_uncore_write(&i915->uncore, reg, val);
+ intel_dmc_wl_get(display, reg);
+
+ intel_uncore_write(__to_uncore(display), reg, val);
+
+ intel_dmc_wl_put(display, reg);
}
+#define intel_de_write(p,...) __intel_de_write(__to_intel_display(p), __VA_ARGS__)
static inline u32
-intel_de_rmw(struct drm_i915_private *i915, i915_reg_t reg, u32 clear, u32 set)
+____intel_de_rmw_nowl(struct intel_display *display, i915_reg_t reg,
+ u32 clear, u32 set)
{
- return intel_uncore_rmw(&i915->uncore, reg, clear, set);
+ return intel_uncore_rmw(__to_uncore(display), reg, clear, set);
}
+#define __intel_de_rmw_nowl(p,...) ____intel_de_rmw_nowl(__to_intel_display(p), __VA_ARGS__)
+
+static inline u32
+__intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear,
+ u32 set)
+{
+ u32 val;
+
+ intel_dmc_wl_get(display, reg);
+
+ val = __intel_de_rmw_nowl(display, reg, clear, set);
+
+ intel_dmc_wl_put(display, reg);
+
+ return val;
+}
+#define intel_de_rmw(p,...) __intel_de_rmw(__to_intel_display(p), __VA_ARGS__)
static inline int
-intel_de_wait_for_register(struct drm_i915_private *i915, i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout)
+____intel_de_wait_for_register_nowl(struct intel_display *display,
+ i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout)
{
- return intel_wait_for_register(&i915->uncore, reg, mask, value, timeout);
+ return intel_wait_for_register(__to_uncore(display), reg, mask,
+ value, timeout);
}
+#define __intel_de_wait_for_register_nowl(p,...) ____intel_de_wait_for_register_nowl(__to_intel_display(p), __VA_ARGS__)
static inline int
-intel_de_wait_for_register_fw(struct drm_i915_private *i915, i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout)
+__intel_de_wait(struct intel_display *display, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout)
{
- return intel_wait_for_register_fw(&i915->uncore, reg, mask, value, timeout);
+ int ret;
+
+ intel_dmc_wl_get(display, reg);
+
+ ret = __intel_de_wait_for_register_nowl(display, reg, mask, value,
+ timeout);
+
+ intel_dmc_wl_put(display, reg);
+
+ return ret;
}
+#define intel_de_wait(p,...) __intel_de_wait(__to_intel_display(p), __VA_ARGS__)
static inline int
-__intel_de_wait_for_register(struct drm_i915_private *i915, i915_reg_t reg,
- u32 mask, u32 value,
- unsigned int fast_timeout_us,
- unsigned int slow_timeout_ms, u32 *out_value)
+__intel_de_wait_fw(struct intel_display *display, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout)
{
- return __intel_wait_for_register(&i915->uncore, reg, mask, value,
- fast_timeout_us, slow_timeout_ms, out_value);
+ int ret;
+
+ intel_dmc_wl_get(display, reg);
+
+ ret = intel_wait_for_register_fw(__to_uncore(display), reg, mask,
+ value, timeout);
+
+ intel_dmc_wl_put(display, reg);
+
+ return ret;
}
+#define intel_de_wait_fw(p,...) __intel_de_wait_fw(__to_intel_display(p), __VA_ARGS__)
static inline int
-intel_de_wait_for_set(struct drm_i915_private *i915, i915_reg_t reg,
- u32 mask, unsigned int timeout)
+__intel_de_wait_custom(struct intel_display *display, i915_reg_t reg,
+ u32 mask, u32 value,
+ unsigned int fast_timeout_us,
+ unsigned int slow_timeout_ms, u32 *out_value)
{
- return intel_de_wait_for_register(i915, reg, mask, mask, timeout);
+ int ret;
+
+ intel_dmc_wl_get(display, reg);
+
+ ret = __intel_wait_for_register(__to_uncore(display), reg, mask,
+ value,
+ fast_timeout_us, slow_timeout_ms, out_value);
+
+ intel_dmc_wl_put(display, reg);
+
+ return ret;
}
+#define intel_de_wait_custom(p,...) __intel_de_wait_custom(__to_intel_display(p), __VA_ARGS__)
static inline int
-intel_de_wait_for_clear(struct drm_i915_private *i915, i915_reg_t reg,
+__intel_de_wait_for_set(struct intel_display *display, i915_reg_t reg,
u32 mask, unsigned int timeout)
{
- return intel_de_wait_for_register(i915, reg, mask, 0, timeout);
+ return intel_de_wait(display, reg, mask, mask, timeout);
+}
+#define intel_de_wait_for_set(p,...) __intel_de_wait_for_set(__to_intel_display(p), __VA_ARGS__)
+
+static inline int
+__intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg,
+ u32 mask, unsigned int timeout)
+{
+ return intel_de_wait(display, reg, mask, 0, timeout);
}
+#define intel_de_wait_for_clear(p,...) __intel_de_wait_for_clear(__to_intel_display(p), __VA_ARGS__)
/*
* Unlocked mmio-accessors, think carefully before using these.
@@ -94,33 +199,38 @@ intel_de_wait_for_clear(struct drm_i915_private *i915, i915_reg_t reg,
* a more localised lock guarding all access to that bank of registers.
*/
static inline u32
-intel_de_read_fw(struct drm_i915_private *i915, i915_reg_t reg)
+__intel_de_read_fw(struct intel_display *display, i915_reg_t reg)
{
u32 val;
- val = intel_uncore_read_fw(&i915->uncore, reg);
+ val = intel_uncore_read_fw(__to_uncore(display), reg);
trace_i915_reg_rw(false, reg, val, sizeof(val), true);
return val;
}
+#define intel_de_read_fw(p,...) __intel_de_read_fw(__to_intel_display(p), __VA_ARGS__)
static inline void
-intel_de_write_fw(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
+__intel_de_write_fw(struct intel_display *display, i915_reg_t reg, u32 val)
{
trace_i915_reg_rw(true, reg, val, sizeof(val), true);
- intel_uncore_write_fw(&i915->uncore, reg, val);
+ intel_uncore_write_fw(__to_uncore(display), reg, val);
}
+#define intel_de_write_fw(p,...) __intel_de_write_fw(__to_intel_display(p), __VA_ARGS__)
static inline u32
-intel_de_read_notrace(struct drm_i915_private *i915, i915_reg_t reg)
+__intel_de_read_notrace(struct intel_display *display, i915_reg_t reg)
{
- return intel_uncore_read_notrace(&i915->uncore, reg);
+ return intel_uncore_read_notrace(__to_uncore(display), reg);
}
+#define intel_de_read_notrace(p,...) __intel_de_read_notrace(__to_intel_display(p), __VA_ARGS__)
static inline void
-intel_de_write_notrace(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
+__intel_de_write_notrace(struct intel_display *display, i915_reg_t reg,
+ u32 val)
{
- intel_uncore_write_notrace(&i915->uncore, reg, val);
+ intel_uncore_write_notrace(__to_uncore(display), reg, val);
}
+#define intel_de_write_notrace(p,...) __intel_de_write_notrace(__to_intel_display(p), __VA_ARGS__)
#endif /* __INTEL_DE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index ab2f52d21bad..273323f30ae2 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -85,7 +85,6 @@
#include "intel_dvo.h"
#include "intel_fb.h"
#include "intel_fbc.h"
-#include "intel_fbdev.h"
#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
#include "intel_frontbuffer.h"
@@ -120,6 +119,7 @@
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_watermark.h"
+#include "vlv_dpio_phy_regs.h"
#include "vlv_dsi.h"
#include "vlv_dsi_pll.h"
#include "vlv_dsi_regs.h"
@@ -275,6 +275,13 @@ static int intel_bigjoiner_num_pipes(const struct intel_crtc_state *crtc_state)
return hweight8(crtc_state->bigjoiner_pipes);
}
+u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ return BIT(crtc->pipe) | crtc_state->bigjoiner_pipes;
+}
+
struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
@@ -383,8 +390,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
break;
}
- if (intel_de_wait_for_register(dev_priv, dpll_reg,
- port_mask, expected_mask, 1000))
+ if (intel_de_wait(dev_priv, dpll_reg, port_mask, expected_mask, 1000))
drm_WARN(&dev_priv->drm, 1,
"timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
@@ -430,6 +436,18 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
0, PIPE_ARB_USE_PROG_SLOTS);
+ if (DISPLAY_VER(dev_priv) >= 14) {
+ u32 clear = DP_DSC_INSERT_SF_AT_EOL_WA;
+ u32 set = 0;
+
+ if (DISPLAY_VER(dev_priv) == 14)
+ set |= DP_FEC_BS_JITTER_WA;
+
+ intel_de_rmw(dev_priv,
+ hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
+ clear, set);
+ }
+
val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
if (val & TRANSCONF_ENABLE) {
/* we keep both pipes enabled on 830 */
@@ -437,6 +455,14 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
return;
}
+ /* Wa_1409098942:adlp+ */
+ if (DISPLAY_VER(dev_priv) >= 13 &&
+ new_crtc_state->dsc.compression_enable) {
+ val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK;
+ val |= REG_FIELD_PREP(TRANSCONF_PIXEL_COUNT_SCALING_MASK,
+ TRANSCONF_PIXEL_COUNT_SCALING_X4);
+ }
+
intel_de_write(dev_priv, TRANSCONF(cpu_transcoder),
val | TRANSCONF_ENABLE);
intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
@@ -483,6 +509,11 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
if (!IS_I830(dev_priv))
val &= ~TRANSCONF_ENABLE;
+ /* Wa_1409098942:adlp+ */
+ if (DISPLAY_VER(dev_priv) >= 13 &&
+ old_crtc_state->dsc.compression_enable)
+ val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK;
+
intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
if (DISPLAY_VER(dev_priv) >= 12)
@@ -535,7 +566,7 @@ bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
return DISPLAY_VER(dev_priv) < 4 ||
- (plane->fbc &&
+ (plane->fbc && !plane_state->no_fbc_reason &&
plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL);
}
@@ -1552,18 +1583,21 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
}
-static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool apply)
+/* Display WA #1180: WaDisableScalarClockGating: glk */
+static bool glk_need_scaler_clock_gating_wa(const struct intel_crtc_state *crtc_state)
{
- u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
- u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- if (apply)
- val |= mask;
- else
- val &= ~mask;
+ return DISPLAY_VER(i915) == 10 && crtc_state->pch_pfit.enabled;
+}
- intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
+static void glk_pipe_scaler_clock_gating_wa(struct intel_crtc *crtc, bool enable)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
+
+ intel_de_rmw(i915, CLKGATE_DIS_PSL(crtc->pipe),
+ mask, enable ? mask : 0);
}
static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
@@ -1586,24 +1620,6 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1));
}
-static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *master_crtc = intel_master_crtc(crtc_state);
-
- /*
- * Enable sequence steps 1-7 on bigjoiner master
- */
- if (intel_crtc_is_bigjoiner_slave(crtc_state))
- intel_encoders_pre_pll_enable(state, master_crtc);
-
- if (crtc_state->shared_dpll)
- intel_enable_shared_dpll(crtc_state);
-
- if (intel_crtc_is_bigjoiner_slave(crtc_state))
- intel_encoders_pre_enable(state, master_crtc);
-}
-
static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -1639,90 +1655,107 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
- bool psl_clkgate_wa;
+ struct intel_crtc *pipe_crtc;
if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
- intel_dmc_enable_pipe(dev_priv, crtc->pipe);
+ for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(new_crtc_state))
+ intel_dmc_enable_pipe(dev_priv, pipe_crtc->pipe);
- if (!new_crtc_state->bigjoiner_pipes) {
- intel_encoders_pre_pll_enable(state, crtc);
+ intel_encoders_pre_pll_enable(state, crtc);
- if (new_crtc_state->shared_dpll)
- intel_enable_shared_dpll(new_crtc_state);
+ for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(new_crtc_state)) {
+ const struct intel_crtc_state *pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
- intel_encoders_pre_enable(state, crtc);
- } else {
- icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
+ if (pipe_crtc_state->shared_dpll)
+ intel_enable_shared_dpll(pipe_crtc_state);
}
- intel_dsc_enable(new_crtc_state);
+ intel_encoders_pre_enable(state, crtc);
- if (DISPLAY_VER(dev_priv) >= 13)
- intel_uncompressed_joiner_enable(new_crtc_state);
+ for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(new_crtc_state)) {
+ const struct intel_crtc_state *pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
- intel_set_pipe_src_size(new_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- bdw_set_pipe_misc(new_crtc_state);
+ intel_dsc_enable(pipe_crtc_state);
+
+ if (DISPLAY_VER(dev_priv) >= 13)
+ intel_uncompressed_joiner_enable(pipe_crtc_state);
+
+ intel_set_pipe_src_size(pipe_crtc_state);
+
+ if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+ bdw_set_pipe_misc(pipe_crtc_state);
+ }
- if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) &&
- !transcoder_is_dsi(cpu_transcoder))
+ if (!transcoder_is_dsi(cpu_transcoder))
hsw_configure_cpu_transcoder(new_crtc_state);
- crtc->active = true;
+ for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(new_crtc_state)) {
+ const struct intel_crtc_state *pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
- /* Display WA #1180: WaDisableScalarClockGating: glk */
- psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
- new_crtc_state->pch_pfit.enabled;
- if (psl_clkgate_wa)
- glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
+ pipe_crtc->active = true;
- if (DISPLAY_VER(dev_priv) >= 9)
- skl_pfit_enable(new_crtc_state);
- else
- ilk_pfit_enable(new_crtc_state);
+ if (glk_need_scaler_clock_gating_wa(pipe_crtc_state))
+ glk_pipe_scaler_clock_gating_wa(pipe_crtc, true);
- /*
- * On ILK+ LUT must be loaded before the pipe is running but with
- * clocks enabled
- */
- intel_color_load_luts(new_crtc_state);
- intel_color_commit_noarm(new_crtc_state);
- intel_color_commit_arm(new_crtc_state);
- /* update DSPCNTR to configure gamma/csc for pipe bottom color */
- if (DISPLAY_VER(dev_priv) < 9)
- intel_disable_primary_plane(new_crtc_state);
+ if (DISPLAY_VER(dev_priv) >= 9)
+ skl_pfit_enable(pipe_crtc_state);
+ else
+ ilk_pfit_enable(pipe_crtc_state);
- hsw_set_linetime_wm(new_crtc_state);
+ /*
+ * On ILK+ LUT must be loaded before the pipe is running but with
+ * clocks enabled
+ */
+ intel_color_load_luts(pipe_crtc_state);
+ intel_color_commit_noarm(pipe_crtc_state);
+ intel_color_commit_arm(pipe_crtc_state);
+ /* update DSPCNTR to configure gamma/csc for pipe bottom color */
+ if (DISPLAY_VER(dev_priv) < 9)
+ intel_disable_primary_plane(pipe_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 11)
- icl_set_pipe_chicken(new_crtc_state);
+ hsw_set_linetime_wm(pipe_crtc_state);
- intel_initial_watermarks(state, crtc);
+ if (DISPLAY_VER(dev_priv) >= 11)
+ icl_set_pipe_chicken(pipe_crtc_state);
- if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
- intel_crtc_vblank_on(new_crtc_state);
+ intel_initial_watermarks(state, pipe_crtc);
+ }
intel_encoders_enable(state, crtc);
- if (psl_clkgate_wa) {
- intel_crtc_wait_for_next_vblank(crtc);
- glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
- }
+ for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(new_crtc_state)) {
+ const struct intel_crtc_state *pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
+ enum pipe hsw_workaround_pipe;
- /* If we change the relative order between pipe/planes enabling, we need
- * to change the workaround. */
- hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
- if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
- struct intel_crtc *wa_crtc;
+ if (glk_need_scaler_clock_gating_wa(pipe_crtc_state)) {
+ intel_crtc_wait_for_next_vblank(pipe_crtc);
+ glk_pipe_scaler_clock_gating_wa(pipe_crtc, false);
+ }
- wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
+ /*
+ * If we change the relative order between pipe/planes
+ * enabling, we need to change the workaround.
+ */
+ hsw_workaround_pipe = pipe_crtc_state->hsw_workaround_pipe;
+ if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
+ struct intel_crtc *wa_crtc =
+ intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
- intel_crtc_wait_for_next_vblank(wa_crtc);
- intel_crtc_wait_for_next_vblank(wa_crtc);
+ intel_crtc_wait_for_next_vblank(wa_crtc);
+ intel_crtc_wait_for_next_vblank(wa_crtc);
+ }
}
}
@@ -1786,29 +1819,28 @@ static void hsw_crtc_disable(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_crtc *pipe_crtc;
/*
* FIXME collapse everything to one hook.
* Need care with mst->ddi interactions.
*/
- if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) {
- intel_encoders_disable(state, crtc);
- intel_encoders_post_disable(state, crtc);
- }
-
- intel_disable_shared_dpll(old_crtc_state);
+ intel_encoders_disable(state, crtc);
+ intel_encoders_post_disable(state, crtc);
- if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) {
- struct intel_crtc *slave_crtc;
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(old_crtc_state)) {
+ const struct intel_crtc_state *old_pipe_crtc_state =
+ intel_atomic_get_old_crtc_state(state, pipe_crtc);
- intel_encoders_post_pll_disable(state, crtc);
+ intel_disable_shared_dpll(old_pipe_crtc_state);
+ }
- intel_dmc_disable_pipe(i915, crtc->pipe);
+ intel_encoders_post_pll_disable(state, crtc);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
- intel_crtc_bigjoiner_slave_pipes(old_crtc_state))
- intel_dmc_disable_pipe(i915, slave_crtc->pipe);
- }
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(old_crtc_state))
+ intel_dmc_disable_pipe(i915, pipe_crtc->pipe);
}
static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
@@ -1836,6 +1868,7 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
}
+/* Prefer intel_encoder_is_combo() */
bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
{
if (phy == PHY_NONE)
@@ -1857,6 +1890,7 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
return false;
}
+/* Prefer intel_encoder_is_tc() */
bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
{
/*
@@ -1877,6 +1911,7 @@ bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
return false;
}
+/* Prefer intel_encoder_is_snps() */
bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
{
/*
@@ -1886,6 +1921,7 @@ bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
return IS_DG2(dev_priv) && phy > PHY_NONE && phy <= PHY_E;
}
+/* Prefer intel_encoder_to_phy() */
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
{
if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
@@ -1903,6 +1939,7 @@ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
return PHY_A + port - PORT_A;
}
+/* Prefer intel_encoder_to_tc() */
enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
{
if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
@@ -1914,6 +1951,41 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
return TC_PORT_1 + port - PORT_C;
}
+enum phy intel_encoder_to_phy(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return intel_port_to_phy(i915, encoder->port);
+}
+
+bool intel_encoder_is_combo(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return intel_phy_is_combo(i915, intel_encoder_to_phy(encoder));
+}
+
+bool intel_encoder_is_snps(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return intel_phy_is_snps(i915, intel_encoder_to_phy(encoder));
+}
+
+bool intel_encoder_is_tc(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return intel_phy_is_tc(i915, intel_encoder_to_phy(encoder));
+}
+
+enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return intel_port_to_tc(i915, encoder->port);
+}
+
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port)
{
@@ -2381,7 +2453,7 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
- int clock_limit = i915->max_dotclk_freq;
+ int clock_limit = i915->display.cdclk.max_dotclk_freq;
/*
* Start with the adjusted_mode crtc timings, which
@@ -2405,7 +2477,7 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
*/
if (intel_crtc_supports_double_wide(crtc) &&
pipe_mode->crtc_clock > clock_limit) {
- clock_limit = i915->max_dotclk_freq;
+ clock_limit = i915->display.cdclk.max_dotclk_freq;
crtc_state->double_wide = true;
}
}
@@ -2709,15 +2781,6 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
*/
intel_de_write(dev_priv, PIPESRC(pipe),
PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
-
- if (!crtc_state->enable_psr2_su_region_et)
- return;
-
- width = drm_rect_width(&crtc_state->psr2_su_area);
- height = drm_rect_height(&crtc_state->psr2_su_area);
-
- intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(pipe),
- PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
}
static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
@@ -3008,19 +3071,16 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
i9xx_get_pfit_config(pipe_config);
+ i9xx_dpll_get_hw_state(crtc, &pipe_config->dpll_hw_state);
+
if (DISPLAY_VER(dev_priv) >= 4) {
- /* No way to read it out on pipes B and C */
- if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
- tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe];
- else
- tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
+ tmp = pipe_config->dpll_hw_state.i9xx.dpll_md;
pipe_config->pixel_multiplier =
((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
>> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
- pipe_config->dpll_hw_state.dpll_md = tmp;
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
- tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
+ tmp = pipe_config->dpll_hw_state.i9xx.dpll;
pipe_config->pixel_multiplier =
((tmp & SDVO_MULTIPLIER_MASK)
>> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
@@ -3030,26 +3090,13 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
* function. */
pipe_config->pixel_multiplier = 1;
}
- pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
- DPLL(crtc->pipe));
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
- FP0(crtc->pipe));
- pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
- FP1(crtc->pipe));
- } else {
- /* Mask out read-only status bits. */
- pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
- DPLL_PORTC_READY_MASK |
- DPLL_PORTB_READY_MASK);
- }
if (IS_CHERRYVIEW(dev_priv))
- chv_crtc_clock_get(crtc, pipe_config);
+ chv_crtc_clock_get(pipe_config);
else if (IS_VALLEYVIEW(dev_priv))
- vlv_crtc_clock_get(crtc, pipe_config);
+ vlv_crtc_clock_get(pipe_config);
else
- i9xx_crtc_clock_get(crtc, pipe_config);
+ i9xx_crtc_clock_get(pipe_config);
/*
* Normally the dotclock is filled in by the encoder .get_config()
@@ -3675,8 +3722,8 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
struct intel_display_power_domain_set *power_domain_set)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder;
enum port port;
u32 tmp;
@@ -3702,11 +3749,11 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
break;
/* XXX: this works for video mode only */
- tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
+ tmp = intel_de_read(display, BXT_MIPI_PORT_CTRL(port));
if (!(tmp & DPI_ENABLE))
continue;
- tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ tmp = intel_de_read(display, MIPI_CTRL(display, port));
if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
continue;
@@ -4723,8 +4770,6 @@ intel_modeset_pipe_config_late(struct intel_atomic_state *state,
struct drm_connector *connector;
int i;
- intel_bigjoiner_adjust_pipe_src(crtc_state);
-
for_each_new_connector_in_state(&state->base, connector,
conn_state, i) {
struct intel_encoder *encoder =
@@ -4792,42 +4837,92 @@ intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
}
static bool
+intel_compare_dp_as_sdp(const struct drm_dp_as_sdp *a,
+ const struct drm_dp_as_sdp *b)
+{
+ return a->vtotal == b->vtotal &&
+ a->target_rr == b->target_rr &&
+ a->duration_incr_ms == b->duration_incr_ms &&
+ a->duration_decr_ms == b->duration_decr_ms &&
+ a->mode == b->mode;
+}
+
+static bool
intel_compare_buffer(const u8 *a, const u8 *b, size_t len)
{
return memcmp(a, b, len) == 0;
}
+static void __printf(5, 6)
+pipe_config_mismatch(struct drm_printer *p, bool fastset,
+ const struct intel_crtc *crtc,
+ const char *name, const char *format, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, format);
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ if (fastset)
+ drm_printf(p, "[CRTC:%d:%s] fastset requirement not met in %s %pV\n",
+ crtc->base.base.id, crtc->base.name, name, &vaf);
+ else
+ drm_printf(p, "[CRTC:%d:%s] mismatch in %s %pV\n",
+ crtc->base.base.id, crtc->base.name, name, &vaf);
+
+ va_end(args);
+}
+
static void
-pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
- bool fastset, const char *name,
+pipe_config_infoframe_mismatch(struct drm_printer *p, bool fastset,
+ const struct intel_crtc *crtc,
+ const char *name,
const union hdmi_infoframe *a,
const union hdmi_infoframe *b)
{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ const char *loglevel;
+
if (fastset) {
if (!drm_debug_enabled(DRM_UT_KMS))
return;
- drm_dbg_kms(&dev_priv->drm,
- "fastset requirement not met in %s infoframe\n", name);
- drm_dbg_kms(&dev_priv->drm, "expected:\n");
- hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
- drm_dbg_kms(&dev_priv->drm, "found:\n");
- hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
+ loglevel = KERN_DEBUG;
} else {
- drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
- drm_err(&dev_priv->drm, "expected:\n");
- hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
- drm_err(&dev_priv->drm, "found:\n");
- hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
+ loglevel = KERN_ERR;
}
+
+ pipe_config_mismatch(p, fastset, crtc, name, "infoframe");
+
+ drm_printf(p, "expected:\n");
+ hdmi_infoframe_log(loglevel, i915->drm.dev, a);
+ drm_printf(p, "found:\n");
+ hdmi_infoframe_log(loglevel, i915->drm.dev, b);
}
static void
-pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *i915,
- bool fastset, const char *name,
+pipe_config_dp_vsc_sdp_mismatch(struct drm_printer *p, bool fastset,
+ const struct intel_crtc *crtc,
+ const char *name,
const struct drm_dp_vsc_sdp *a,
const struct drm_dp_vsc_sdp *b)
{
+ pipe_config_mismatch(p, fastset, crtc, name, "dp sdp");
+
+ drm_printf(p, "expected:\n");
+ drm_dp_vsc_sdp_log(p, a);
+ drm_printf(p, "found:\n");
+ drm_dp_vsc_sdp_log(p, b);
+}
+
+static void
+pipe_config_dp_as_sdp_mismatch(struct drm_i915_private *i915,
+ bool fastset, const char *name,
+ const struct drm_dp_as_sdp *a,
+ const struct drm_dp_as_sdp *b)
+{
struct drm_printer p;
if (fastset) {
@@ -4841,9 +4936,9 @@ pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *i915,
}
drm_printf(&p, "expected:\n");
- drm_dp_vsc_sdp_log(&p, a);
+ drm_dp_as_sdp_log(&p, a);
drm_printf(&p, "found:\n");
- drm_dp_vsc_sdp_log(&p, b);
+ drm_dp_as_sdp_log(&p, b);
}
/* Returns the length up to and including the last differing byte */
@@ -4861,64 +4956,35 @@ memcmp_diff_len(const u8 *a, const u8 *b, size_t len)
}
static void
-pipe_config_buffer_mismatch(bool fastset, const struct intel_crtc *crtc,
+pipe_config_buffer_mismatch(struct drm_printer *p, bool fastset,
+ const struct intel_crtc *crtc,
const char *name,
const u8 *a, const u8 *b, size_t len)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const char *loglevel;
if (fastset) {
if (!drm_debug_enabled(DRM_UT_KMS))
return;
- /* only dump up to the last difference */
- len = memcmp_diff_len(a, b, len);
-
- drm_dbg_kms(&dev_priv->drm,
- "[CRTC:%d:%s] fastset requirement not met in %s buffer\n",
- crtc->base.base.id, crtc->base.name, name);
- print_hex_dump(KERN_DEBUG, "expected: ", DUMP_PREFIX_NONE,
- 16, 0, a, len, false);
- print_hex_dump(KERN_DEBUG, "found: ", DUMP_PREFIX_NONE,
- 16, 0, b, len, false);
+ loglevel = KERN_DEBUG;
} else {
- /* only dump up to the last difference */
- len = memcmp_diff_len(a, b, len);
-
- drm_err(&dev_priv->drm, "[CRTC:%d:%s] mismatch in %s buffer\n",
- crtc->base.base.id, crtc->base.name, name);
- print_hex_dump(KERN_ERR, "expected: ", DUMP_PREFIX_NONE,
- 16, 0, a, len, false);
- print_hex_dump(KERN_ERR, "found: ", DUMP_PREFIX_NONE,
- 16, 0, b, len, false);
+ loglevel = KERN_ERR;
}
-}
-static void __printf(4, 5)
-pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
- const char *name, const char *format, ...)
-{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct va_format vaf;
- va_list args;
-
- va_start(args, format);
- vaf.fmt = format;
- vaf.va = &args;
+ pipe_config_mismatch(p, fastset, crtc, name, "buffer");
- if (fastset)
- drm_dbg_kms(&i915->drm,
- "[CRTC:%d:%s] fastset requirement not met in %s %pV\n",
- crtc->base.base.id, crtc->base.name, name, &vaf);
- else
- drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
- crtc->base.base.id, crtc->base.name, name, &vaf);
+ /* only dump up to the last difference */
+ len = memcmp_diff_len(a, b, len);
- va_end(args);
+ print_hex_dump(loglevel, "expected: ", DUMP_PREFIX_NONE,
+ 16, 0, a, len, false);
+ print_hex_dump(loglevel, "found: ", DUMP_PREFIX_NONE,
+ 16, 0, b, len, false);
}
static void
-pipe_config_pll_mismatch(bool fastset,
+pipe_config_pll_mismatch(struct drm_printer *p, bool fastset,
const struct intel_crtc *crtc,
const char *name,
const struct intel_dpll_hw_state *a,
@@ -4926,25 +4992,12 @@ pipe_config_pll_mismatch(bool fastset,
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (fastset) {
- if (!drm_debug_enabled(DRM_UT_KMS))
- return;
+ pipe_config_mismatch(p, fastset, crtc, name, " "); /* stupid -Werror=format-zero-length */
- drm_dbg_kms(&i915->drm,
- "[CRTC:%d:%s] fastset requirement not met in %s\n",
- crtc->base.base.id, crtc->base.name, name);
- drm_dbg_kms(&i915->drm, "expected:\n");
- intel_dpll_dump_hw_state(i915, a);
- drm_dbg_kms(&i915->drm, "found:\n");
- intel_dpll_dump_hw_state(i915, b);
- } else {
- drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s buffer\n",
- crtc->base.base.id, crtc->base.name, name);
- drm_err(&i915->drm, "expected:\n");
- intel_dpll_dump_hw_state(i915, a);
- drm_err(&i915->drm, "found:\n");
- intel_dpll_dump_hw_state(i915, b);
- }
+ drm_printf(p, "expected:\n");
+ intel_dpll_dump_hw_state(i915, p, a);
+ drm_printf(p, "found:\n");
+ intel_dpll_dump_hw_state(i915, p, b);
}
bool
@@ -4954,13 +5007,19 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
{
struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct drm_printer p;
bool ret = true;
+ if (fastset)
+ p = drm_dbg_printer(&dev_priv->drm, DRM_UT_KMS, NULL);
+ else
+ p = drm_err_printer(&dev_priv->drm, NULL);
+
#define PIPE_CONF_CHECK_X(name) do { \
if (current_config->name != pipe_config->name) { \
BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
__stringify(name) " is bool"); \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
"(expected 0x%08x, found 0x%08x)", \
current_config->name, \
pipe_config->name); \
@@ -4972,7 +5031,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
__stringify(name) " is bool"); \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
"(expected 0x%08x, found 0x%08x)", \
current_config->name & (mask), \
pipe_config->name & (mask)); \
@@ -4984,7 +5043,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (current_config->name != pipe_config->name) { \
BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
__stringify(name) " is bool"); \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
"(expected %i, found %i)", \
current_config->name, \
pipe_config->name); \
@@ -4996,7 +5055,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (current_config->name != pipe_config->name) { \
BUILD_BUG_ON_MSG(!__same_type(current_config->name, bool), \
__stringify(name) " is not bool"); \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
"(expected %s, found %s)", \
str_yes_no(current_config->name), \
str_yes_no(pipe_config->name)); \
@@ -5006,7 +5065,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_P(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
"(expected %p, found %p)", \
current_config->name, \
pipe_config->name); \
@@ -5017,7 +5076,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_M_N(name) do { \
if (!intel_compare_link_m_n(&current_config->name, \
&pipe_config->name)) { \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
"(expected tu %i data %i/%i link %i/%i, " \
"found tu %i, data %i/%i link %i/%i)", \
current_config->name.tu, \
@@ -5037,7 +5096,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_PLL(name) do { \
if (!intel_dpll_compare_hw_state(dev_priv, &current_config->name, \
&pipe_config->name)) { \
- pipe_config_pll_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_pll_mismatch(&p, fastset, crtc, __stringify(name), \
&current_config->name, \
&pipe_config->name); \
ret = false; \
@@ -5070,7 +5129,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
"(%x) (expected %i, found %i)", \
(mask), \
current_config->name & (mask), \
@@ -5082,7 +5141,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_INFOFRAME(name) do { \
if (!intel_compare_infoframe(&current_config->infoframes.name, \
&pipe_config->infoframes.name)) { \
- pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
+ pipe_config_infoframe_mismatch(&p, fastset, crtc, __stringify(name), \
&current_config->infoframes.name, \
&pipe_config->infoframes.name); \
ret = false; \
@@ -5092,7 +5151,17 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
if (!intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
&pipe_config->infoframes.name)) { \
- pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
+ pipe_config_dp_vsc_sdp_mismatch(&p, fastset, crtc, __stringify(name), \
+ &current_config->infoframes.name, \
+ &pipe_config->infoframes.name); \
+ ret = false; \
+ } \
+} while (0)
+
+#define PIPE_CONF_CHECK_DP_AS_SDP(name) do { \
+ if (!intel_compare_dp_as_sdp(&current_config->infoframes.name, \
+ &pipe_config->infoframes.name)) { \
+ pipe_config_dp_as_sdp_mismatch(dev_priv, fastset, __stringify(name), \
&current_config->infoframes.name, \
&pipe_config->infoframes.name); \
ret = false; \
@@ -5103,7 +5172,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
BUILD_BUG_ON(sizeof(current_config->name) != (len)); \
BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \
if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \
- pipe_config_buffer_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_buffer_mismatch(&p, fastset, crtc, __stringify(name), \
current_config->name, \
pipe_config->name, \
(len)); \
@@ -5116,7 +5185,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
!intel_color_lut_equal(current_config, \
current_config->lut, pipe_config->lut, \
is_pre_csc_lut)) { \
- pipe_config_mismatch(fastset, crtc, __stringify(lut), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(lut), \
"hw_state doesn't match sw_state"); \
ret = false; \
} \
@@ -5245,6 +5314,18 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_CSC(output_csc);
}
+ /*
+ * Panel replay has to be enabled before link training. PSR doesn't have
+ * this requirement -> check these only if using panel replay
+ */
+ if (current_config->has_panel_replay || pipe_config->has_panel_replay) {
+ PIPE_CONF_CHECK_BOOL(has_psr);
+ PIPE_CONF_CHECK_BOOL(has_psr2);
+ PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
+ PIPE_CONF_CHECK_BOOL(enable_psr2_su_region_et);
+ PIPE_CONF_CHECK_BOOL(has_panel_replay);
+ }
+
PIPE_CONF_CHECK_BOOL(double_wide);
if (dev_priv->display.dpll.mgr)
@@ -5280,6 +5361,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_INFOFRAME(hdmi);
PIPE_CONF_CHECK_INFOFRAME(drm);
PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
+ PIPE_CONF_CHECK_DP_AS_SDP(as_sdp);
PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
PIPE_CONF_CHECK_I(master_transcoder);
@@ -5331,6 +5413,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(vrr.flipline);
PIPE_CONF_CHECK_I(vrr.pipeline_full);
PIPE_CONF_CHECK_I(vrr.guardband);
+ PIPE_CONF_CHECK_I(vrr.vsync_start);
+ PIPE_CONF_CHECK_I(vrr.vsync_end);
}
#undef PIPE_CONF_CHECK_X
@@ -5576,14 +5660,16 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
- struct drm_i915_private *i915 = to_i915(old_crtc_state->uapi.crtc->dev);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
/* only allow LRR when the timings stay within the VRR range */
if (old_crtc_state->vrr.in_range != new_crtc_state->vrr.in_range)
new_crtc_state->update_lrr = false;
if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
- drm_dbg_kms(&i915->drm, "fastset requirement not met, forcing full modeset\n");
+ drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] fastset requirement not met, forcing full modeset\n",
+ crtc->base.base.id, crtc->base.name);
else
new_crtc_state->uapi.mode_changed = false;
@@ -6237,27 +6323,37 @@ static int intel_atomic_check_config(struct intel_atomic_state *state,
continue;
}
- if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) {
- drm_WARN_ON(&i915->drm, new_crtc_state->uapi.enable);
+ if (drm_WARN_ON(&i915->drm, intel_crtc_is_bigjoiner_slave(new_crtc_state)))
continue;
- }
ret = intel_crtc_prepare_cleared_state(state, crtc);
if (ret)
- break;
+ goto fail;
if (!new_crtc_state->hw.enable)
continue;
ret = intel_modeset_pipe_config(state, crtc, limits);
if (ret)
- break;
+ goto fail;
+ }
- ret = intel_atomic_check_bigjoiner(state, crtc);
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (!intel_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ if (drm_WARN_ON(&i915->drm, intel_crtc_is_bigjoiner_slave(new_crtc_state)))
+ continue;
+
+ if (!new_crtc_state->hw.enable)
+ continue;
+
+ ret = intel_modeset_pipe_config_late(state, crtc);
if (ret)
- break;
+ goto fail;
}
+fail:
if (ret)
*failed_pipe = crtc->pipe;
@@ -6353,16 +6449,26 @@ int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (!intel_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) {
+ drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable);
+ continue;
+ }
+
+ ret = intel_atomic_check_bigjoiner(state, crtc);
+ if (ret)
+ goto fail;
+ }
+
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
- if (new_crtc_state->hw.enable) {
- ret = intel_modeset_pipe_config_late(state, crtc);
- if (ret)
- goto fail;
- }
+ intel_bigjoiner_adjust_pipe_src(new_crtc_state);
intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
}
@@ -6644,17 +6750,21 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_crtc *pipe_crtc;
if (!intel_crtc_needs_modeset(new_crtc_state))
return;
- /* VRR will be enable later, if required */
- intel_crtc_update_active_timings(new_crtc_state, false);
+ for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(new_crtc_state)) {
+ const struct intel_crtc_state *pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
- dev_priv->display.funcs.display->crtc_enable(state, crtc);
+ /* VRR will be enable later, if required */
+ intel_crtc_update_active_timings(pipe_crtc_state, false);
+ }
- if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
- return;
+ dev_priv->display.funcs.display->crtc_enable(state, crtc);
/* vblanks work again, re-enable pipe CRC. */
intel_crtc_enable_pipe_crc(crtc);
@@ -6746,31 +6856,42 @@ static void intel_update_crtc(struct intel_atomic_state *state,
}
static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc *pipe_crtc;
/*
* We need to disable pipe CRC before disabling the pipe,
* or we race against vblank off.
*/
- intel_crtc_disable_pipe_crc(crtc);
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(old_crtc_state))
+ intel_crtc_disable_pipe_crc(pipe_crtc);
dev_priv->display.funcs.display->crtc_disable(state, crtc);
- crtc->active = false;
- intel_fbc_disable(crtc);
- if (!new_crtc_state->hw.active)
- intel_initial_watermarks(state, crtc);
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(old_crtc_state)) {
+ const struct intel_crtc_state *new_pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
+
+ pipe_crtc->active = false;
+ intel_fbc_disable(pipe_crtc);
+
+ if (!new_pipe_crtc_state->hw.active)
+ intel_initial_watermarks(state, pipe_crtc);
+ }
}
static void intel_commit_modeset_disables(struct intel_atomic_state *state)
{
- struct intel_crtc_state *new_crtc_state, *old_crtc_state;
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
- u32 handled = 0;
+ u8 disable_pipes = 0;
int i;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
@@ -6778,21 +6899,31 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
+ /*
+ * Needs to be done even for pipes
+ * that weren't enabled previously.
+ */
intel_pre_plane_update(state, crtc);
if (!old_crtc_state->hw.active)
continue;
+ disable_pipes |= BIT(crtc->pipe);
+ }
+
+ for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) {
+ if ((disable_pipes & BIT(crtc->pipe)) == 0)
+ continue;
+
intel_crtc_disable_planes(state, crtc);
}
/* Only disable port sync and MST slaves */
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- if (!intel_crtc_needs_modeset(new_crtc_state))
+ for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) {
+ if ((disable_pipes & BIT(crtc->pipe)) == 0)
continue;
- if (!old_crtc_state->hw.active)
+ if (intel_crtc_is_bigjoiner_slave(old_crtc_state))
continue;
/* In case of Transcoder port Sync master slave CRTCs can be
@@ -6801,28 +6932,28 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
* Slave vblanks are masked till Master Vblanks.
*/
if (!is_trans_port_sync_slave(old_crtc_state) &&
- !intel_dp_mst_is_slave_trans(old_crtc_state) &&
- !intel_crtc_is_bigjoiner_slave(old_crtc_state))
+ !intel_dp_mst_is_slave_trans(old_crtc_state))
continue;
- intel_old_crtc_state_disables(state, old_crtc_state,
- new_crtc_state, crtc);
- handled |= BIT(crtc->pipe);
+ intel_old_crtc_state_disables(state, crtc);
+
+ disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state);
}
/* Disable everything else left on */
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- if (!intel_crtc_needs_modeset(new_crtc_state) ||
- (handled & BIT(crtc->pipe)))
+ for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) {
+ if ((disable_pipes & BIT(crtc->pipe)) == 0)
continue;
- if (!old_crtc_state->hw.active)
+ if (intel_crtc_is_bigjoiner_slave(old_crtc_state))
continue;
- intel_old_crtc_state_disables(state, old_crtc_state,
- new_crtc_state, crtc);
+ intel_old_crtc_state_disables(state, crtc);
+
+ disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state);
}
+
+ drm_WARN_ON(&i915->drm, disable_pipes);
}
static void intel_commit_modeset_enables(struct intel_atomic_state *state)
@@ -6889,9 +7020,15 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
intel_pre_update_crtc(state, crtc);
}
+ intel_dbuf_mbus_pre_ddb_update(state);
+
while (update_pipes) {
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
+ /*
+ * Commit in reverse order to make bigjoiner master
+ * send the uapi events after slaves are done.
+ */
+ for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
enum pipe pipe = crtc->pipe;
if ((update_pipes & BIT(pipe)) == 0)
@@ -6919,6 +7056,8 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
}
}
+ intel_dbuf_mbus_post_ddb_update(state);
+
update_pipes = modeset_pipes;
/*
@@ -6931,12 +7070,14 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
if ((modeset_pipes & BIT(pipe)) == 0)
continue;
+ if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
+ continue;
+
if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
- is_trans_port_sync_master(new_crtc_state) ||
- intel_crtc_is_bigjoiner_master(new_crtc_state))
+ is_trans_port_sync_master(new_crtc_state))
continue;
- modeset_pipes &= ~BIT(pipe);
+ modeset_pipes &= ~intel_crtc_joined_pipe_mask(new_crtc_state);
intel_enable_crtc(state, crtc);
}
@@ -6951,7 +7092,10 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
if ((modeset_pipes & BIT(pipe)) == 0)
continue;
- modeset_pipes &= ~BIT(pipe);
+ if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
+ continue;
+
+ modeset_pipes &= ~intel_crtc_joined_pipe_mask(new_crtc_state);
intel_enable_crtc(state, crtc);
}
@@ -6968,7 +7112,11 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
intel_pre_update_crtc(state, crtc);
}
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ /*
+ * Commit in reverse order to make bigjoiner master
+ * send the uapi events after slaves are done.
+ */
+ for_each_new_intel_crtc_in_state_reverse(state, crtc, new_crtc_state, i) {
enum pipe pipe = crtc->pipe;
if ((update_pipes & BIT(pipe)) == 0)
@@ -7165,7 +7313,6 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_encoders_update_prepare(state);
intel_dbuf_pre_plane_update(state);
- intel_mbus_dbox_update(state);
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->do_async_flip)
@@ -7690,7 +7837,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
static int max_dotclock(struct drm_i915_private *i915)
{
- int max_dotclock = i915->max_dotclk_freq;
+ int max_dotclock = i915->display.cdclk.max_dotclk_freq;
/* icl+ might use bigjoiner */
if (DISPLAY_VER(i915) >= 11)
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index f4a0773f0fca..56d1c0e3e62c 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -280,6 +280,12 @@ enum phy_fia {
base.head) \
for_each_if((pipe_mask) & BIT(intel_crtc->pipe))
+#define for_each_intel_crtc_in_pipe_mask_reverse(dev, intel_crtc, pipe_mask) \
+ list_for_each_entry_reverse((intel_crtc), \
+ &(dev)->mode_config.crtc_list, \
+ base.head) \
+ for_each_if((pipe_mask) & BIT((intel_crtc)->pipe))
+
#define for_each_intel_encoder(dev, intel_encoder) \
list_for_each_entry(intel_encoder, \
&(dev)->mode_config.encoder_list, \
@@ -344,6 +350,14 @@ enum phy_fia {
(__i)++) \
for_each_if(crtc)
+#define for_each_new_intel_crtc_in_state_reverse(__state, crtc, new_crtc_state, __i) \
+ for ((__i) = (__state)->base.dev->mode_config.num_crtc - 1; \
+ (__i) >= 0 && \
+ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+ (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+ (__i)--) \
+ for_each_if(crtc)
+
#define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->base.dev->mode_config.num_total_plane && \
@@ -408,6 +422,7 @@ intel_cpu_transcoder_mode_valid(struct drm_i915_private *i915,
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
bool is_trans_port_sync_master(const struct intel_crtc_state *state);
+u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state);
bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state);
bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state);
u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state);
@@ -448,6 +463,13 @@ bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy);
bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy);
enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
enum port port);
+
+enum phy intel_encoder_to_phy(struct intel_encoder *encoder);
+bool intel_encoder_is_combo(struct intel_encoder *encoder);
+bool intel_encoder_is_snps(struct intel_encoder *encoder);
+bool intel_encoder_is_tc(struct intel_encoder *encoder);
+enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder);
+
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.h b/drivers/gpu/drm/i915/display/intel_display_conversion.h
new file mode 100644
index 000000000000..ad8545c8055d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_conversion.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2024 Intel Corporation */
+
+/*
+ * This header is for transitional struct intel_display conversion helpers only.
+ */
+
+#ifndef __INTEL_DISPLAY_CONVERSION__
+#define __INTEL_DISPLAY_CONVERSION__
+
+/*
+ * Transitional macro to optionally convert struct drm_i915_private * to struct
+ * intel_display *, also accepting the latter.
+ */
+#define __to_intel_display(p) \
+ _Generic(p, \
+ const struct drm_i915_private *: (&((const struct drm_i915_private *)(p))->display), \
+ struct drm_i915_private *: (&((struct drm_i915_private *)(p))->display), \
+ const struct intel_display *: (p), \
+ struct intel_display *: (p))
+
+#endif /* __INTEL_DISPLAY_CONVERSION__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index 2167dbee5eea..7715fc329057 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -26,6 +26,7 @@
#include "intel_global_state.h"
#include "intel_gmbus.h"
#include "intel_opregion.h"
+#include "intel_dmc_wl.h"
#include "intel_wm_types.h"
struct task_struct;
@@ -282,6 +283,9 @@ struct intel_wm {
};
struct intel_display {
+ /* drm device backpointer */
+ struct drm_device *drm;
+
/* Display functions */
struct {
/* Top level crtc-ish functions */
@@ -345,6 +349,8 @@ struct intel_display {
struct intel_global_obj obj;
unsigned int max_cdclk_freq;
+ unsigned int max_dotclk_freq;
+ unsigned int skl_preferred_vco_freq;
} cdclk;
struct {
@@ -446,6 +452,16 @@ struct intel_display {
} ips;
struct {
+ bool display_irqs_enabled;
+
+ /* For i915gm/i945gm vblank irq workaround */
+ u8 vblank_enabled;
+
+ u32 de_irq_mask[I915_MAX_PIPES];
+ u32 pipestat_irq_mask[I915_MAX_PIPES];
+ } irq;
+
+ struct {
wait_queue_head_t waitqueue;
/* mutex to protect pmdemand programming sequence */
@@ -534,6 +550,7 @@ struct intel_display {
struct intel_overlay *overlay;
struct intel_display_params params;
struct intel_vbt_data vbt;
+ struct intel_dmc_wl wl;
struct intel_wm wm;
};
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index b99c024b0934..35f9f86ef70f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -31,6 +31,7 @@
#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_panel.h"
+#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
#include "intel_wm.h"
@@ -191,7 +192,7 @@ static void intel_hdcp_info(struct seq_file *m,
struct intel_connector *intel_connector,
bool remote_req)
{
- bool hdcp_cap, hdcp2_cap;
+ bool hdcp_cap = false, hdcp2_cap = false;
if (!intel_connector->hdcp.shim) {
seq_puts(m, "No Connector Support");
@@ -252,9 +253,6 @@ static void intel_connector_info(struct seq_file *m,
struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
- const struct drm_connector_state *conn_state = connector->state;
- struct intel_encoder *encoder =
- to_intel_encoder(conn_state->best_encoder);
const struct drm_display_mode *mode;
seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
@@ -271,28 +269,23 @@ static void intel_connector_info(struct seq_file *m,
drm_get_subpixel_order_name(connector->display_info.subpixel_order));
seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
- if (!encoder)
- return;
-
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
- if (encoder->type == INTEL_OUTPUT_DP_MST)
+ if (intel_connector->mst_port)
intel_dp_mst_info(m, intel_connector);
else
intel_dp_info(m, intel_connector);
break;
case DRM_MODE_CONNECTOR_HDMIA:
- if (encoder->type == INTEL_OUTPUT_HDMI ||
- encoder->type == INTEL_OUTPUT_DDI)
- intel_hdmi_info(m, intel_connector);
+ intel_hdmi_info(m, intel_connector);
break;
default:
break;
}
seq_puts(m, "\tHDCP version: ");
- if (intel_encoder_is_mst(encoder)) {
+ if (intel_connector->mst_port) {
intel_hdcp_info(m, intel_connector, true);
seq_puts(m, "\tMST Hub HDCP version: ");
}
@@ -645,51 +638,24 @@ static int i915_display_capabilities(struct seq_file *m, void *unused)
static int i915_shared_dplls_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
struct intel_shared_dpll *pll;
int i;
drm_modeset_lock_all(&dev_priv->drm);
- seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
+ drm_printf(&p, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
dev_priv->display.dpll.ref_clks.nssc,
dev_priv->display.dpll.ref_clks.ssc);
for_each_shared_dpll(dev_priv, pll, i) {
- seq_printf(m, "DPLL%i: %s, id: %i\n", pll->index,
+ drm_printf(&p, "DPLL%i: %s, id: %i\n", pll->index,
pll->info->name, pll->info->id);
- seq_printf(m, " pipe_mask: 0x%x, active: 0x%x, on: %s\n",
+ drm_printf(&p, " pipe_mask: 0x%x, active: 0x%x, on: %s\n",
pll->state.pipe_mask, pll->active_mask,
str_yes_no(pll->on));
- seq_printf(m, " tracked hardware state:\n");
- seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
- seq_printf(m, " dpll_md: 0x%08x\n",
- pll->state.hw_state.dpll_md);
- seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
- seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
- seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
- seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
- seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
- seq_printf(m, " div0: 0x%08x\n", pll->state.hw_state.div0);
- seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
- pll->state.hw_state.mg_refclkin_ctl);
- seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
- pll->state.hw_state.mg_clktop2_coreclkctl1);
- seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
- pll->state.hw_state.mg_clktop2_hsclkctl);
- seq_printf(m, " mg_pll_div0: 0x%08x\n",
- pll->state.hw_state.mg_pll_div0);
- seq_printf(m, " mg_pll_div1: 0x%08x\n",
- pll->state.hw_state.mg_pll_div1);
- seq_printf(m, " mg_pll_lf: 0x%08x\n",
- pll->state.hw_state.mg_pll_lf);
- seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
- pll->state.hw_state.mg_pll_frac_lock);
- seq_printf(m, " mg_pll_ssc: 0x%08x\n",
- pll->state.hw_state.mg_pll_ssc);
- seq_printf(m, " mg_pll_bias: 0x%08x\n",
- pll->state.hw_state.mg_pll_bias);
- seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
- pll->state.hw_state.mg_pll_tdc_coldst_bias);
+ drm_printf(&p, " tracked hardware state:\n");
+ intel_dpll_dump_hw_state(dev_priv, &p, &pll->state.hw_state);
}
drm_modeset_unlock_all(&dev_priv->drm);
@@ -1103,27 +1069,6 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
intel_display_debugfs_params(i915);
}
-static int i915_panel_show(struct seq_file *m, void *data)
-{
- struct intel_connector *connector = m->private;
- struct intel_dp *intel_dp = intel_attached_dp(connector);
-
- if (connector->base.status != connector_status_connected)
- return -ENODEV;
-
- seq_printf(m, "Panel power up delay: %d\n",
- intel_dp->pps.panel_power_up_delay);
- seq_printf(m, "Panel power down delay: %d\n",
- intel_dp->pps.panel_power_down_delay);
- seq_printf(m, "Backlight on delay: %d\n",
- intel_dp->pps.backlight_on_delay);
- seq_printf(m, "Backlight off delay: %d\n",
- intel_dp->pps.backlight_off_delay);
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_panel);
-
static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = m->private;
@@ -1402,20 +1347,6 @@ out: drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
return ret;
}
-static int i915_bigjoiner_enable_show(struct seq_file *m, void *data)
-{
- struct intel_connector *connector = m->private;
- struct drm_crtc *crtc;
-
- crtc = connector->base.state->crtc;
- if (connector->base.status != connector_status_connected || !crtc)
- return -ENODEV;
-
- seq_printf(m, "Bigjoiner enable: %d\n", connector->force_bigjoiner_enable);
-
- return 0;
-}
-
static ssize_t i915_dsc_output_format_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
@@ -1437,30 +1368,6 @@ static ssize_t i915_dsc_output_format_write(struct file *file,
return len;
}
-static ssize_t i915_bigjoiner_enable_write(struct file *file,
- const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct intel_connector *connector = m->private;
- struct drm_crtc *crtc;
- bool bigjoiner_en = 0;
- int ret;
-
- crtc = connector->base.state->crtc;
- if (connector->base.status != connector_status_connected || !crtc)
- return -ENODEV;
-
- ret = kstrtobool_from_user(ubuf, len, &bigjoiner_en);
- if (ret < 0)
- return ret;
-
- connector->force_bigjoiner_enable = bigjoiner_en;
- *offp += len;
-
- return len;
-}
-
static int i915_dsc_output_format_open(struct inode *inode,
struct file *file)
{
@@ -1554,8 +1461,6 @@ static const struct file_operations i915_dsc_fractional_bpp_fops = {
.write = i915_dsc_fractional_bpp_write
};
-DEFINE_SHOW_STORE_ATTRIBUTE(i915_bigjoiner_enable);
-
/*
* Returns the Current CRTC's bpc.
* Example usage: cat /sys/kernel/debug/dri/0/crtc-0/i915_current_bpc
@@ -1608,12 +1513,9 @@ void intel_connector_debugfs_add(struct intel_connector *connector)
return;
intel_drrs_connector_debugfs_add(connector);
+ intel_pps_connector_debugfs_add(connector);
intel_psr_connector_debugfs_add(connector);
- if (connector_type == DRM_MODE_CONNECTOR_eDP)
- debugfs_create_file("i915_panel_timings", 0444, root,
- connector, &i915_panel_fops);
-
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector_type == DRM_MODE_CONNECTOR_HDMIA ||
connector_type == DRM_MODE_CONNECTOR_HDMIB) {
@@ -1640,8 +1542,8 @@ void intel_connector_debugfs_add(struct intel_connector *connector)
if (DISPLAY_VER(i915) >= 11 &&
(connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector_type == DRM_MODE_CONNECTOR_eDP)) {
- debugfs_create_file("i915_bigjoiner_force_enable", 0644, root,
- connector, &i915_bigjoiner_enable_fops);
+ debugfs_create_bool("i915_bigjoiner_force_enable", 0644, root,
+ &connector->force_bigjoiner_enable);
}
if (connector_type == DRM_MODE_CONNECTOR_DSI ||
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index c02d79b50006..120e209ee74a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -17,6 +17,9 @@
#include "intel_display_reg_defs.h"
#include "intel_fbc.h"
+__diag_push();
+__diag_ignore_all("-Woverride-init", "Allow field initialization overrides for display info");
+
static const struct intel_display_device_info no_display = {};
#define PIPE_A_OFFSET 0x70000
@@ -768,6 +771,8 @@ static const struct intel_display_device_info xe2_lpd_display = {
BIT(INTEL_FBC_C) | BIT(INTEL_FBC_D),
};
+__diag_pop();
+
/*
* Separate detection for no display cases to keep the display id array simple.
*
@@ -922,6 +927,9 @@ void intel_display_device_probe(struct drm_i915_private *i915)
const struct intel_display_device_info *info;
u16 ver, rel, step;
+ /* Add drm device backpointer as early as possible. */
+ i915->display.drm = &i915->drm;
+
if (HAS_GMD_ID(i915))
info = probe_gmdid_display(i915, &ver, &rel, &step);
else
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index fe4268813786..17ddf82f0b6e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
+#include "intel_display_conversion.h"
#include "intel_display_limits.h"
struct drm_i915_private;
@@ -47,6 +48,7 @@ struct drm_printer;
#define HAS_DPT(i915) (DISPLAY_VER(i915) >= 13)
#define HAS_DSB(i915) (DISPLAY_INFO(i915)->has_dsb)
#define HAS_DSC(__i915) (DISPLAY_RUNTIME_INFO(__i915)->has_dsc)
+#define HAS_DSC_MST(__i915) (DISPLAY_VER(__i915) >= 12 && HAS_DSC(__i915))
#define HAS_FBC(i915) (DISPLAY_RUNTIME_INFO(i915)->fbc_mask != 0)
#define HAS_FPGA_DBG_UNCLAIMED(i915) (DISPLAY_INFO(i915)->has_fpga_dbg)
#define HAS_FW_BLC(i915) (DISPLAY_VER(i915) >= 3)
@@ -68,6 +70,7 @@ struct drm_printer;
#define HAS_TRANSCODER(i915, trans) ((DISPLAY_RUNTIME_INFO(i915)->cpu_transcoder_mask & \
BIT(trans)) != 0)
#define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11)
+#define HAS_AS_SDP(i915) (DISPLAY_VER(i915) >= 13)
#define INTEL_NUM_PIPES(i915) (hweight8(DISPLAY_RUNTIME_INFO(i915)->pipe_mask))
#define I915_HAS_HOTPLUG(i915) (DISPLAY_INFO(i915)->has_hotplug)
#define OVERLAY_NEEDS_PHYSICAL(i915) (DISPLAY_INFO(i915)->overlay_needs_physical)
@@ -98,8 +101,8 @@ struct drm_printer;
(IS_DISPLAY_IP_RANGE((__i915), (ipver), (ipver)) && \
IS_DISPLAY_STEP((__i915), (from), (until)))
-#define DISPLAY_INFO(i915) ((i915)->display.info.__device_info)
-#define DISPLAY_RUNTIME_INFO(i915) (&(i915)->display.info.__runtime_info)
+#define DISPLAY_INFO(i915) (__to_intel_display(i915)->info.__device_info)
+#define DISPLAY_RUNTIME_INFO(i915) (&__to_intel_display(i915)->info.__runtime_info)
#define DISPLAY_VER(i915) (DISPLAY_RUNTIME_INFO(i915)->ip.ver)
#define DISPLAY_VER_FULL(i915) IP_VER(DISPLAY_RUNTIME_INFO(i915)->ip.ver, \
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 87dd07e0d138..89bd032ed995 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -11,6 +11,7 @@
#include <acpi/video.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_client.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_privacy_screen_consumer.h>
#include <drm/drm_probe_helper.h>
@@ -98,7 +99,6 @@ void intel_display_driver_init_hw(struct drm_i915_private *i915)
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
.get_format_info = intel_fb_get_format_info,
- .output_poll_changed = intel_fbdev_output_poll_changed,
.mode_valid = intel_mode_valid,
.atomic_check = intel_atomic_check,
.atomic_commit = intel_atomic_commit,
@@ -198,11 +198,13 @@ void intel_display_driver_early_probe(struct drm_i915_private *i915)
intel_dpll_init_clock_hook(i915);
intel_init_display_hooks(i915);
intel_fdi_init_hook(i915);
+ intel_dmc_wl_init(&i915->display);
}
/* part #1: call before irq install */
int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
int ret;
if (i915_inject_probe_failure(i915))
@@ -261,7 +263,7 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
if (ret)
goto cleanup_vga_client_pw_domain_dmc;
- intel_init_quirks(i915);
+ intel_init_quirks(display);
intel_fbc_init(i915);
@@ -514,10 +516,6 @@ int intel_display_driver_probe(struct drm_i915_private *i915)
intel_overlay_setup(i915);
- ret = intel_fbdev_init(&i915->drm);
- if (ret)
- return ret;
-
/* Only enable hotplug handling once the fbdev is fully set up. */
intel_hpd_init(i915);
@@ -545,16 +543,6 @@ void intel_display_driver_register(struct drm_i915_private *i915)
intel_display_debugfs_register(i915);
/*
- * Some ports require correctly set-up hpd registers for
- * detection to work properly (leading to ghost connected
- * connector status), e.g. VGA on gm45. Hence we can only set
- * up the initial fbdev config after hpd irqs are fully
- * enabled. We do it last so that the async config cannot run
- * before the connectors are registered.
- */
- intel_fbdev_initial_config_async(i915);
-
- /*
* We need to coordinate the hotplugs with the asynchronous
* fbdev configuration, for which we use the
* fbdev->async_cookie.
@@ -562,6 +550,8 @@ void intel_display_driver_register(struct drm_i915_private *i915)
drm_kms_helper_poll_init(&i915->drm);
intel_hpd_poll_disable(i915);
+ intel_fbdev_setup(i915);
+
intel_display_device_info_print(DISPLAY_INFO(i915),
DISPLAY_RUNTIME_INFO(i915), &p);
}
@@ -597,9 +587,6 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
*/
intel_hpd_poll_fini(i915);
- /* poll work can call into fbdev, hence clean that up afterwards */
- intel_fbdev_fini(i915);
-
intel_unregister_dsm_handler();
/* flush any delayed tasks or pending work */
@@ -638,7 +625,8 @@ void intel_display_driver_unregister(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return;
- intel_fbdev_unregister(i915);
+ drm_client_dev_unregister(&i915->drm);
+
/*
* After flushing the fbdev (incl. a late async config which
* will have delayed queuing of a hotplug event), then flush
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index f846c5b108b5..c337e0597541 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -117,13 +117,14 @@ static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
- new_val = dev_priv->de_irq_mask[pipe];
+ new_val = dev_priv->display.irq.de_irq_mask[pipe];
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
- if (new_val != dev_priv->de_irq_mask[pipe]) {
- dev_priv->de_irq_mask[pipe] = new_val;
- intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+ if (new_val != dev_priv->display.irq.de_irq_mask[pipe]) {
+ dev_priv->display.irq.de_irq_mask[pipe] = new_val;
+ intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe),
+ dev_priv->display.irq.de_irq_mask[pipe]);
intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
}
}
@@ -179,7 +180,7 @@ void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
+ u32 status_mask = dev_priv->display.irq.pipestat_irq_mask[pipe];
u32 enable_mask = status_mask << 16;
lockdep_assert_held(&dev_priv->irq_lock);
@@ -233,10 +234,10 @@ void i915_enable_pipestat(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->irq_lock);
drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
- if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
+ if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == status_mask)
return;
- dev_priv->pipestat_irq_mask[pipe] |= status_mask;
+ dev_priv->display.irq.pipestat_irq_mask[pipe] |= status_mask;
enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
@@ -256,10 +257,10 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->irq_lock);
drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
- if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
+ if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == 0)
return;
- dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
+ dev_priv->display.irq.pipestat_irq_mask[pipe] &= ~status_mask;
enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
@@ -401,7 +402,7 @@ void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
PIPESTAT_INT_STATUS_MASK |
PIPE_FIFO_UNDERRUN_STATUS);
- dev_priv->pipestat_irq_mask[pipe] = 0;
+ dev_priv->display.irq.pipestat_irq_mask[pipe] = 0;
}
}
@@ -412,7 +413,7 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
spin_lock(&dev_priv->irq_lock);
- if (!dev_priv->display_irqs_enabled) {
+ if (!dev_priv->display.irq.display_irqs_enabled) {
spin_unlock(&dev_priv->irq_lock);
return;
}
@@ -445,7 +446,7 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
break;
}
if (iir & iir_bit)
- status_mask |= dev_priv->pipestat_irq_mask[pipe];
+ status_mask |= dev_priv->display.irq.pipestat_irq_mask[pipe];
if (!status_mask)
continue;
@@ -1203,7 +1204,7 @@ int i8xx_enable_vblank(struct drm_crtc *crtc)
int i915gm_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_i915_private *i915 = to_i915(crtc->dev);
/*
* Vblank interrupts fail to wake the device up from C2+.
@@ -1211,8 +1212,8 @@ int i915gm_enable_vblank(struct drm_crtc *crtc)
* the problem. There is a small power cost so we do this
* only when vblank interrupts are actually enabled.
*/
- if (dev_priv->vblank_enabled++ == 0)
- intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ if (i915->display.irq.vblank_enabled++ == 0)
+ intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
return i8xx_enable_vblank(crtc);
}
@@ -1315,12 +1316,12 @@ void i8xx_disable_vblank(struct drm_crtc *crtc)
void i915gm_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_i915_private *i915 = to_i915(crtc->dev);
i8xx_disable_vblank(crtc);
- if (--dev_priv->vblank_enabled == 0)
- intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ if (--i915->display.irq.vblank_enabled == 0)
+ intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
void i965_disable_vblank(struct drm_crtc *crtc)
@@ -1497,8 +1498,8 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
- dev_priv->de_irq_mask[pipe],
- ~dev_priv->de_irq_mask[pipe] | extra_ier);
+ dev_priv->display.irq.de_irq_mask[pipe],
+ ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -1558,10 +1559,10 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
lockdep_assert_held(&dev_priv->irq_lock);
- if (dev_priv->display_irqs_enabled)
+ if (dev_priv->display.irq.display_irqs_enabled)
return;
- dev_priv->display_irqs_enabled = true;
+ dev_priv->display.irq.display_irqs_enabled = true;
if (intel_irqs_enabled(dev_priv)) {
vlv_display_irq_reset(dev_priv);
@@ -1573,10 +1574,10 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
lockdep_assert_held(&dev_priv->irq_lock);
- if (!dev_priv->display_irqs_enabled)
+ if (!dev_priv->display.irq.display_irqs_enabled)
return;
- dev_priv->display_irqs_enabled = false;
+ dev_priv->display.irq.display_irqs_enabled = false;
if (intel_irqs_enabled(dev_priv))
vlv_display_irq_reset(dev_priv);
@@ -1694,12 +1695,12 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
}
for_each_pipe(dev_priv, pipe) {
- dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
+ dev_priv->display.irq.de_irq_mask[pipe] = ~de_pipe_masked;
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
- dev_priv->de_irq_mask[pipe],
+ dev_priv->display.irq.de_irq_mask[pipe],
de_pipe_enables);
}
@@ -1770,9 +1771,9 @@ void intel_display_irq_init(struct drm_i915_private *i915)
* domain. We defer setting up the display irqs in this case to the
* runtime pm.
*/
- i915->display_irqs_enabled = true;
+ i915->display.irq.display_irqs_enabled = true;
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
- i915->display_irqs_enabled = false;
+ i915->display.irq.display_irqs_enabled = false;
intel_hotplug_irq_init(i915);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c
index 11e03cfb774d..1799a6643128 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_params.c
@@ -27,6 +27,10 @@ static struct intel_display_params intel_display_modparams __read_mostly = {
* debugfs mode to 0.
*/
+intel_display_param_named_unsafe(dmc_firmware_path, charp, 0400,
+ "DMC firmware path to use instead of the default one. "
+ "Use /dev/null to disable DMC and runtime PM.");
+
intel_display_param_named_unsafe(vbt_firmware, charp, 0400,
"Load VBT from specified file under /lib/firmware");
@@ -116,6 +120,11 @@ intel_display_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400,
"(0=disabled, 1=enabled) "
"Default: 1");
+intel_display_param_named_unsafe(enable_dmc_wl, bool, 0400,
+ "Enable DMC wakelock "
+ "(0=disabled, 1=enabled) "
+ "Default: 0");
+
__maybe_unused
static void _param_print_bool(struct drm_printer *p, const char *driver_name,
const char *name, bool val)
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.h b/drivers/gpu/drm/i915/display/intel_display_params.h
index 6206cc51df04..1208a62c16d2 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.h
+++ b/drivers/gpu/drm/i915/display/intel_display_params.h
@@ -24,6 +24,7 @@ struct drm_i915_private;
* debugfs file
*/
#define INTEL_DISPLAY_PARAMS_FOR_EACH(param) \
+ param(char *, dmc_firmware_path, NULL, 0400) \
param(char *, vbt_firmware, NULL, 0400) \
param(int, lvds_channel_mode, 0, 0400) \
param(int, panel_use_ssc, -1, 0600) \
@@ -46,6 +47,7 @@ struct drm_i915_private;
param(int, enable_psr, -1, 0600) \
param(bool, psr_safest_params, false, 0400) \
param(bool, enable_psr2_sel_fetch, true, 0400) \
+ param(bool, enable_dmc_wl, false, 0400) \
#define MEMBER(T, member, ...) T member;
struct intel_display_params {
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 6fd4fa52253a..03dc7edcc443 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -640,13 +640,7 @@ release_async_put_domains(struct i915_power_domains *power_domains,
enum intel_display_power_domain domain;
intel_wakeref_t wakeref;
- /*
- * The caller must hold already raw wakeref, upgrade that to a proper
- * wakeref to make the state checker happy about the HW access during
- * power well disabling.
- */
- assert_rpm_raw_wakeref_held(rpm);
- wakeref = intel_runtime_pm_get(rpm);
+ wakeref = intel_runtime_pm_get_noresume(rpm);
for_each_power_domain(domain, mask) {
/* Clear before put, so put's sanity check is happy. */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 06900ff307b2..83f616097a29 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -17,6 +17,7 @@
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
#include "intel_dmc.h"
+#include "intel_dmc_wl.h"
#include "intel_dp_aux_regs.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
@@ -26,6 +27,7 @@
#include "intel_tc.h"
#include "intel_vga.h"
#include "skl_watermark.h"
+#include "vlv_dpio_phy_regs.h"
#include "vlv_sideband.h"
#include "vlv_sideband_reg.h"
@@ -199,6 +201,9 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
}
+#define ICL_AUX_PW_TO_PHY(pw_idx) \
+ ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + PHY_A)
+
#define ICL_AUX_PW_TO_CH(pw_idx) \
((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
@@ -217,27 +222,22 @@ static struct intel_digital_port *
aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
enum aux_ch aux_ch)
{
- struct intel_digital_port *dig_port = NULL;
struct intel_encoder *encoder;
for_each_intel_encoder(&dev_priv->drm, encoder) {
+ struct intel_digital_port *dig_port;
+
/* We'll check the MST primary port */
if (encoder->type == INTEL_OUTPUT_DP_MST)
continue;
dig_port = enc_to_dig_port(encoder);
- if (!dig_port)
- continue;
-
- if (dig_port->aux_ch != aux_ch) {
- dig_port = NULL;
- continue;
- }
- break;
+ if (dig_port && dig_port->aux_ch == aux_ch)
+ return dig_port;
}
- return dig_port;
+ return NULL;
}
static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
@@ -253,7 +253,7 @@ static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
* as HDMI-only and routed to a combo PHY, the encoder either won't be
* present at all or it will not have an aux_ch assigned.
*/
- return dig_port ? intel_port_to_phy(i915, dig_port->base.port) : PHY_NONE;
+ return dig_port ? intel_encoder_to_phy(&dig_port->base) : PHY_NONE;
}
static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
@@ -396,17 +396,11 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
-static bool intel_port_is_edp(struct drm_i915_private *i915, enum port port)
+static bool intel_aux_ch_is_edp(struct drm_i915_private *i915, enum aux_ch aux_ch)
{
- struct intel_encoder *encoder;
-
- for_each_intel_encoder(&i915->drm, encoder) {
- if (encoder->type == INTEL_OUTPUT_EDP &&
- encoder->port == port)
- return true;
- }
+ struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
- return false;
+ return dig_port && dig_port->base.type == INTEL_OUTPUT_EDP;
}
static void
@@ -415,24 +409,25 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
- /* FIXME this is a mess */
- if (phy != PHY_NONE)
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
- 0, ICL_LANE_ENABLE_AUX);
+ /*
+ * FIXME not sure if we should derive the PHY from the pw_idx, or
+ * from the VBT defined AUX_CH->DDI->PHY mapping.
+ */
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
+ 0, ICL_LANE_ENABLE_AUX);
hsw_wait_for_power_well_enable(dev_priv, power_well, false);
/* Display WA #1178: icl */
if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
- !intel_port_is_edp(dev_priv, (enum port)phy))
- intel_de_rmw(dev_priv, ICL_AUX_ANAOVRD1(pw_idx),
- 0, ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS);
+ !intel_aux_ch_is_edp(dev_priv, ICL_AUX_PW_TO_CH(pw_idx)))
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW6_AUX(ICL_AUX_PW_TO_PHY(pw_idx)),
+ 0, O_FUNC_OVRD_EN | O_LDO_BYPASS_CRI);
}
static void
@@ -441,14 +436,15 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
- /* FIXME this is a mess */
- if (phy != PHY_NONE)
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
- ICL_LANE_ENABLE_AUX, 0);
+ /*
+ * FIXME not sure if we should derive the PHY from the pw_idx, or
+ * from the VBT defined AUX_CH->DDI->PHY mapping.
+ */
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
+ ICL_LANE_ENABLE_AUX, 0);
intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
@@ -827,6 +823,8 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv)
intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
0, SKL_SELECT_ALTERNATE_DC_EXIT);
+ intel_dmc_wl_enable(&dev_priv->display);
+
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
}
@@ -856,6 +854,8 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)
intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
0, SKL_SELECT_ALTERNATE_DC_EXIT);
+ intel_dmc_wl_enable(&dev_priv->display);
+
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
}
@@ -906,39 +906,39 @@ static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- bxt_ddi_phy_init(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+ bxt_dpio_phy_init(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
}
static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- bxt_ddi_phy_uninit(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+ bxt_dpio_phy_uninit(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
}
static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- return bxt_ddi_phy_is_enabled(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+ return bxt_dpio_phy_is_enabled(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
}
-static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
+static void bxt_verify_dpio_phy_power_wells(struct drm_i915_private *dev_priv)
{
struct i915_power_well *power_well;
power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
if (intel_power_well_refcount(power_well) > 0)
- bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+ bxt_dpio_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
if (intel_power_well_refcount(power_well) > 0)
- bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+ bxt_dpio_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
if (IS_GEMINILAKE(dev_priv)) {
power_well = lookup_power_well(dev_priv,
GLK_DISP_PW_DPIO_CMN_C);
if (intel_power_well_refcount(power_well) > 0)
- bxt_ddi_phy_verify_state(dev_priv,
- i915_power_well_instance(power_well)->bxt.phy);
+ bxt_dpio_phy_verify_state(dev_priv,
+ i915_power_well_instance(power_well)->bxt.phy);
}
}
@@ -976,16 +976,18 @@ void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
+ intel_dmc_wl_disable(&dev_priv->display);
+
intel_cdclk_get_cdclk(dev_priv, &cdclk_config);
/* Can't read out voltage_level so can't use intel_cdclk_changed() */
drm_WARN_ON(&dev_priv->drm,
- intel_cdclk_needs_modeset(&dev_priv->display.cdclk.hw,
+ intel_cdclk_clock_changed(&dev_priv->display.cdclk.hw,
&cdclk_config));
gen9_assert_dbuf_enabled(dev_priv);
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- bxt_verify_ddi_phy_power_wells(dev_priv);
+ bxt_verify_dpio_phy_power_wells(dev_priv);
if (DISPLAY_VER(dev_priv) >= 11)
/*
@@ -1396,8 +1398,8 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
* The PHY may be busy with some initial calibration and whatnot,
* so the power state can take a while to actually change.
*/
- if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
- phy_status_mask, phy_status, 10))
+ if (intel_de_wait(dev_priv, DISPLAY_PHY_STATUS,
+ phy_status_mask, phy_status, 10))
drm_err(&dev_priv->drm,
"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
@@ -1441,9 +1443,9 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
vlv_dpio_write(dev_priv, phy, CHV_CMN_DW28, tmp);
if (id == VLV_DISP_PW_DPIO_CMN_BC) {
- tmp = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW6_CH1);
+ tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW6_CH1);
tmp |= DPIO_DYNPWRDOWNEN_CH1;
- vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW6_CH1, tmp);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW6_CH1, tmp);
} else {
/*
* Force the non-existing CL2 off. BXT does this
@@ -1519,9 +1521,9 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
return;
if (ch == DPIO_CH0)
- reg = _CHV_CMN_DW0_CH0;
+ reg = CHV_CMN_DW0_CH0;
else
- reg = _CHV_CMN_DW6_CH1;
+ reg = CHV_CMN_DW6_CH1;
vlv_dpio_get(dev_priv);
val = vlv_dpio_read(dev_priv, phy, reg);
@@ -1552,10 +1554,11 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
}
if (ch == DPIO_CH0)
- actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
+ actual = REG_FIELD_GET(DPIO_ANYDL_POWERDOWN_CH0 |
+ DPIO_ALLDL_POWERDOWN_CH0, val);
else
- actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
- actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
+ actual = REG_FIELD_GET(DPIO_ANYDL_POWERDOWN_CH1 |
+ DPIO_ALLDL_POWERDOWN_CH1, val);
drm_WARN(&dev_priv->drm, actual != expected,
"Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
diff --git a/drivers/gpu/drm/i915/display/intel_display_reg_defs.h b/drivers/gpu/drm/i915/display/intel_display_reg_defs.h
index 2f07b7afa3bf..b83ad06f2ea7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reg_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_reg_defs.h
@@ -29,21 +29,21 @@
#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
#define _MMIO_PHY(phy, a, b) _MMIO(_PHY(phy, a, b))
-#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK_EVEN_2RANGES(pipe, 1, a, a, b, c))
-#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK_EVEN_2RANGES(pipe, 1, a, a, b, c))
+#define _MMIO_BASE_PIPE3(base, pipe, a, b, c) _MMIO((base) + _PICK_EVEN_2RANGES(pipe, 1, a, a, b, c))
+#define _MMIO_BASE_PORT3(base, pipe, a, b, c) _MMIO((base) + _PICK_EVEN_2RANGES(pipe, 1, a, a, b, c))
/*
* Device info offset array based helpers for groups of registers with unevenly
* spaced base offsets.
*/
-#define _MMIO_PIPE2(pipe, reg) _MMIO(DISPLAY_INFO(dev_priv)->pipe_offsets[(pipe)] - \
- DISPLAY_INFO(dev_priv)->pipe_offsets[PIPE_A] + \
- DISPLAY_MMIO_BASE(dev_priv) + (reg))
-#define _MMIO_TRANS2(tran, reg) _MMIO(DISPLAY_INFO(dev_priv)->trans_offsets[(tran)] - \
- DISPLAY_INFO(dev_priv)->trans_offsets[TRANSCODER_A] + \
- DISPLAY_MMIO_BASE(dev_priv) + (reg))
-#define _MMIO_CURSOR2(pipe, reg) _MMIO(DISPLAY_INFO(dev_priv)->cursor_offsets[(pipe)] - \
- DISPLAY_INFO(dev_priv)->cursor_offsets[PIPE_A] + \
- DISPLAY_MMIO_BASE(dev_priv) + (reg))
+#define _MMIO_PIPE2(display, pipe, reg) _MMIO(DISPLAY_INFO(display)->pipe_offsets[(pipe)] - \
+ DISPLAY_INFO(display)->pipe_offsets[PIPE_A] + \
+ DISPLAY_MMIO_BASE(display) + (reg))
+#define _MMIO_TRANS2(display, tran, reg) _MMIO(DISPLAY_INFO(display)->trans_offsets[(tran)] - \
+ DISPLAY_INFO(display)->trans_offsets[TRANSCODER_A] + \
+ DISPLAY_MMIO_BASE(display) + (reg))
+#define _MMIO_CURSOR2(display, pipe, reg) _MMIO(DISPLAY_INFO(display)->cursor_offsets[(pipe)] - \
+ DISPLAY_INFO(display)->cursor_offsets[PIPE_A] + \
+ DISPLAY_MMIO_BASE(display) + (reg))
#endif /* __INTEL_DISPLAY_REG_DEFS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index e67cd5b02e84..62f7a30c37dc 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -661,7 +661,8 @@ struct intel_digital_connector_state {
int broadcast_rgb;
};
-#define to_intel_digital_connector_state(x) container_of(x, struct intel_digital_connector_state, base)
+#define to_intel_digital_connector_state(conn_state) \
+ container_of_const((conn_state), struct intel_digital_connector_state, base)
struct dpll {
/* given values */
@@ -727,6 +728,7 @@ struct intel_plane_state {
#define PLANE_HAS_FENCE BIT(0)
struct intel_fb_view view;
+ u32 phys_dma_addr; /* for cursor_needs_physical */
/* Plane pxp decryption state */
bool decrypt;
@@ -1002,18 +1004,6 @@ enum intel_output_format {
INTEL_OUTPUT_FORMAT_YCBCR444,
};
-struct intel_mpllb_state {
- u32 clock; /* in KHz */
- u32 ref_control;
- u32 mpllb_cp;
- u32 mpllb_div;
- u32 mpllb_div2;
- u32 mpllb_fracn1;
- u32 mpllb_fracn2;
- u32 mpllb_sscen;
- u32 mpllb_sscstep;
-};
-
/* Used by dp and fdi links */
struct intel_link_m_n {
u32 tu;
@@ -1029,31 +1019,6 @@ struct intel_csc_matrix {
u16 postoff[3];
};
-struct intel_c10pll_state {
- u32 clock; /* in KHz */
- u8 tx;
- u8 cmn;
- u8 pll[20];
-};
-
-struct intel_c20pll_state {
- u32 clock; /* in kHz */
- u16 tx[3];
- u16 cmn[4];
- union {
- u16 mplla[10];
- u16 mpllb[11];
- };
-};
-
-struct intel_cx0pll_state {
- union {
- struct intel_c10pll_state c10;
- struct intel_c20pll_state c20;
- };
- bool ssc_enabled;
-};
-
struct intel_crtc_state {
/*
* uapi (drm) state. This is the software state shown to userspace.
@@ -1198,11 +1163,7 @@ struct intel_crtc_state {
struct intel_shared_dpll *shared_dpll;
/* Actual register state of the dpll, for shared dpll cross-checking. */
- union {
- struct intel_dpll_hw_state dpll_hw_state;
- struct intel_mpllb_state mpllb_state;
- struct intel_cx0pll_state cx0pll_state;
- };
+ struct intel_dpll_hw_state dpll_hw_state;
/*
* ICL reserved DPLLs for the CRTC/port. The active PLL is selected by
@@ -1345,6 +1306,7 @@ struct intel_crtc_state {
union hdmi_infoframe hdmi;
union hdmi_infoframe drm;
struct drm_dp_vsc_sdp vsc;
+ struct drm_dp_as_sdp as_sdp;
} infoframes;
u8 eld[MAX_ELD_BYTES];
@@ -1422,6 +1384,8 @@ struct intel_crtc_state {
u32 psr2_man_track_ctl;
+ u32 pipe_srcsz_early_tpt;
+
struct drm_rect psr2_su_area;
/* Variable Refresh Rate state */
@@ -1429,6 +1393,7 @@ struct intel_crtc_state {
bool enable, in_range;
u8 pipeline_full;
u16 flipline, vmin, vmax, guardband;
+ u32 vsync_end, vsync_start;
} vrr;
/* Stream Splitter for eDP MSO */
@@ -1617,12 +1582,17 @@ struct intel_watermark_params {
#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
-#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, uapi)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
-#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
-#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, uapi)
+
+#define to_intel_crtc_state(crtc_state) \
+ container_of_const((crtc_state), struct intel_crtc_state, uapi)
+#define to_intel_plane_state(plane_state) \
+ container_of_const((plane_state), struct intel_plane_state, uapi)
+#define to_intel_framebuffer(fb) \
+ container_of_const((fb), struct intel_framebuffer, base)
+
#define intel_fb_obj(x) ((x) ? to_intel_bo((x)->obj[0]) : NULL)
struct intel_hdmi {
@@ -1737,6 +1707,8 @@ struct intel_psr {
/* LNL and beyond */
u8 check_entry_lines;
+ u8 silence_period_sym_clocks;
+ u8 lfps_half_cycle_num_of_syms;
} alpm_parameters;
ktime_t last_entry_attempt;
@@ -1798,6 +1770,7 @@ struct intel_dp {
bool is_mst;
int active_mst_links;
+ enum drm_dp_mst_mode mst_detect;
/* connector directly attached - won't be use for modeset in mst world */
struct intel_connector *attached_connector;
@@ -2183,4 +2156,41 @@ static inline int to_bpp_x16(int bpp)
return bpp << 4;
}
+/*
+ * Conversion functions/macros from various pointer types to struct
+ * intel_display pointer.
+ */
+#define __drm_device_to_intel_display(p) \
+ (&to_i915(p)->display)
+#define __intel_connector_to_intel_display(p) \
+ __drm_device_to_intel_display((p)->base.dev)
+#define __intel_crtc_to_intel_display(p) \
+ __drm_device_to_intel_display((p)->base.dev)
+#define __intel_crtc_state_to_intel_display(p) \
+ __drm_device_to_intel_display((p)->uapi.crtc->dev)
+#define __intel_digital_port_to_intel_display(p) \
+ __drm_device_to_intel_display((p)->base.base.dev)
+#define __intel_dp_to_intel_display(p) \
+ __drm_device_to_intel_display(dp_to_dig_port(p)->base.base.dev)
+#define __intel_encoder_to_intel_display(p) \
+ __drm_device_to_intel_display((p)->base.dev)
+#define __intel_hdmi_to_intel_display(p) \
+ __drm_device_to_intel_display(hdmi_to_dig_port(p)->base.base.dev)
+
+/* Helper for generic association. Map types to conversion functions/macros. */
+#define __assoc(type, p) \
+ struct type: __##type##_to_intel_display((struct type *)(p))
+
+/* Convert various pointer types to struct intel_display pointer. */
+#define to_intel_display(p) \
+ _Generic(*p, \
+ __assoc(drm_device, p), \
+ __assoc(intel_connector, p), \
+ __assoc(intel_crtc, p), \
+ __assoc(intel_crtc_state, p), \
+ __assoc(intel_digital_port, p), \
+ __assoc(intel_dp, p), \
+ __assoc(intel_encoder, p), \
+ __assoc(intel_hdmi, p))
+
#endif /* __INTEL_DISPLAY_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.c b/drivers/gpu/drm/i915/display/intel_display_wa.c
index ac136fd992ba..e5a8022db664 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.c
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.c
@@ -10,20 +10,12 @@
static void gen11_display_wa_apply(struct drm_i915_private *i915)
{
- /* Wa_1409120013 */
- intel_de_write(i915, ILK_DPFC_CHICKEN(INTEL_FBC_A),
- DPFC_CHICKEN_COMP_DUMMY_PIXEL);
-
/* Wa_14010594013 */
intel_de_rmw(i915, GEN8_CHICKEN_DCPR_1, 0, ICL_DELAY_PMRSP);
}
static void xe_d_display_wa_apply(struct drm_i915_private *i915)
{
- /* Wa_1409120013 */
- intel_de_write(i915, ILK_DPFC_CHICKEN(INTEL_FBC_A),
- DPFC_CHICKEN_COMP_DUMMY_PIXEL);
-
/* Wa_14013723622 */
intel_de_rmw(i915, CLKREQ_POLICY, CLKREQ_POLICY_MEM_UP_OVRD, 0);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 835781624482..cbd2ac5671b1 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -22,6 +22,7 @@
*
*/
+#include <linux/debugfs.h>
#include <linux/firmware.h>
#include "i915_drv.h"
@@ -38,6 +39,8 @@
* low-power state and comes back to normal.
*/
+#define INTEL_DMC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git"
+
enum intel_dmc_id {
DMC_FW_MAIN = 0,
DMC_FW_PIPEA,
@@ -71,6 +74,21 @@ static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915)
return i915->display.dmc.dmc;
}
+static const char *dmc_firmware_param(struct drm_i915_private *i915)
+{
+ const char *p = i915->display.params.dmc_firmware_path;
+
+ return p && *p ? p : NULL;
+}
+
+static bool dmc_firmware_param_disabled(struct drm_i915_private *i915)
+{
+ const char *p = dmc_firmware_param(i915);
+
+ /* Magic path to indicate disabled */
+ return p && !strcmp(p, "/dev/null");
+}
+
#define DMC_VERSION(major, minor) ((major) << 16 | (minor))
#define DMC_VERSION_MAJOR(version) ((version) >> 16)
#define DMC_VERSION_MINOR(version) ((version) & 0xffff)
@@ -89,10 +107,14 @@ static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915)
__stringify(major) "_" \
__stringify(minor) ".bin"
+#define XE2LPD_DMC_MAX_FW_SIZE 0x8000
#define XELPDP_DMC_MAX_FW_SIZE 0x7000
#define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
+#define XE2LPD_DMC_PATH DMC_PATH(xe2lpd)
+MODULE_FIRMWARE(XE2LPD_DMC_PATH);
+
#define MTL_DMC_PATH DMC_PATH(mtl)
MODULE_FIRMWARE(MTL_DMC_PATH);
@@ -136,6 +158,59 @@ MODULE_FIRMWARE(SKL_DMC_PATH);
#define BXT_DMC_MAX_FW_SIZE 0x3000
MODULE_FIRMWARE(BXT_DMC_PATH);
+static const char *dmc_firmware_default(struct drm_i915_private *i915, u32 *size)
+{
+ const char *fw_path = NULL;
+ u32 max_fw_size = 0;
+
+ if (DISPLAY_VER_FULL(i915) == IP_VER(20, 0)) {
+ fw_path = XE2LPD_DMC_PATH;
+ max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
+ } else if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0)) {
+ fw_path = MTL_DMC_PATH;
+ max_fw_size = XELPDP_DMC_MAX_FW_SIZE;
+ } else if (IS_DG2(i915)) {
+ fw_path = DG2_DMC_PATH;
+ max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
+ } else if (IS_ALDERLAKE_P(i915)) {
+ fw_path = ADLP_DMC_PATH;
+ max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
+ } else if (IS_ALDERLAKE_S(i915)) {
+ fw_path = ADLS_DMC_PATH;
+ max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
+ } else if (IS_DG1(i915)) {
+ fw_path = DG1_DMC_PATH;
+ max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
+ } else if (IS_ROCKETLAKE(i915)) {
+ fw_path = RKL_DMC_PATH;
+ max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
+ } else if (IS_TIGERLAKE(i915)) {
+ fw_path = TGL_DMC_PATH;
+ max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
+ } else if (DISPLAY_VER(i915) == 11) {
+ fw_path = ICL_DMC_PATH;
+ max_fw_size = ICL_DMC_MAX_FW_SIZE;
+ } else if (IS_GEMINILAKE(i915)) {
+ fw_path = GLK_DMC_PATH;
+ max_fw_size = GLK_DMC_MAX_FW_SIZE;
+ } else if (IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915) ||
+ IS_COMETLAKE(i915)) {
+ fw_path = KBL_DMC_PATH;
+ max_fw_size = KBL_DMC_MAX_FW_SIZE;
+ } else if (IS_SKYLAKE(i915)) {
+ fw_path = SKL_DMC_PATH;
+ max_fw_size = SKL_DMC_MAX_FW_SIZE;
+ } else if (IS_BROXTON(i915)) {
+ fw_path = BXT_DMC_PATH;
+ max_fw_size = BXT_DMC_MAX_FW_SIZE;
+ }
+
+ *size = max_fw_size;
+
+ return fw_path;
+}
+
#define DMC_DEFAULT_FW_OFFSET 0xFFFFFFFF
#define PACKAGE_MAX_FW_INFO_ENTRIES 20
#define PACKAGE_V2_MAX_FW_INFO_ENTRIES 32
@@ -546,6 +621,8 @@ void intel_dmc_disable_program(struct drm_i915_private *i915)
pipedmc_clock_gating_wa(i915, true);
disable_all_event_handlers(i915);
pipedmc_clock_gating_wa(i915, false);
+
+ intel_dmc_wl_disable(&i915->display);
}
void assert_dmc_loaded(struct drm_i915_private *i915)
@@ -845,7 +922,7 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc,
return sizeof(struct intel_css_header);
}
-static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
+static int parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
{
struct drm_i915_private *i915 = dmc->i915;
struct intel_css_header *css_header;
@@ -858,13 +935,13 @@ static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
u32 r, offset;
if (!fw)
- return;
+ return -EINVAL;
/* Extract CSS Header information */
css_header = (struct intel_css_header *)fw->data;
r = parse_dmc_fw_css(dmc, css_header, fw->size);
if (!r)
- return;
+ return -EINVAL;
readcount += r;
@@ -872,7 +949,7 @@ static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
package_header = (struct intel_package_header *)&fw->data[readcount];
r = parse_dmc_fw_package(dmc, package_header, si, fw->size - readcount);
if (!r)
- return;
+ return -EINVAL;
readcount += r;
@@ -889,6 +966,13 @@ static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
dmc_header = (struct intel_dmc_header_base *)&fw->data[offset];
parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, dmc_id);
}
+
+ if (!intel_dmc_has_payload(i915)) {
+ drm_err(&i915->drm, "DMC firmware main program not found\n");
+ return -ENOENT;
+ }
+
+ return 0;
}
static void intel_dmc_runtime_pm_get(struct drm_i915_private *i915)
@@ -923,7 +1007,7 @@ static void dmc_load_work_fn(struct work_struct *work)
err = request_firmware(&fw, dmc->fw_path, i915->drm.dev);
- if (err == -ENOENT && !i915->params.dmc_firmware_path) {
+ if (err == -ENOENT && !dmc_firmware_param(i915)) {
fallback_path = dmc_fallback_path(i915);
if (fallback_path) {
drm_dbg_kms(&i915->drm, "%s not found, falling back to %s\n",
@@ -934,24 +1018,31 @@ static void dmc_load_work_fn(struct work_struct *work)
}
}
- parse_dmc_fw(dmc, fw);
-
- if (intel_dmc_has_payload(i915)) {
- intel_dmc_load_program(i915);
- intel_dmc_runtime_pm_put(i915);
-
- drm_info(&i915->drm, "Finished loading DMC firmware %s (v%u.%u)\n",
- dmc->fw_path, DMC_VERSION_MAJOR(dmc->version),
- DMC_VERSION_MINOR(dmc->version));
- } else {
+ if (err) {
drm_notice(&i915->drm,
- "Failed to load DMC firmware %s."
- " Disabling runtime power management.\n",
- dmc->fw_path);
+ "Failed to load DMC firmware %s (%pe). Disabling runtime power management.\n",
+ dmc->fw_path, ERR_PTR(err));
drm_notice(&i915->drm, "DMC firmware homepage: %s",
- INTEL_UC_FIRMWARE_URL);
+ INTEL_DMC_FIRMWARE_URL);
+ return;
}
+ err = parse_dmc_fw(dmc, fw);
+ if (err) {
+ drm_notice(&i915->drm,
+ "Failed to parse DMC firmware %s (%pe). Disabling runtime power management.\n",
+ dmc->fw_path, ERR_PTR(err));
+ goto out;
+ }
+
+ intel_dmc_load_program(i915);
+ intel_dmc_runtime_pm_put(i915);
+
+ drm_info(&i915->drm, "Finished loading DMC firmware %s (v%u.%u)\n",
+ dmc->fw_path, DMC_VERSION_MAJOR(dmc->version),
+ DMC_VERSION_MINOR(dmc->version));
+
+out:
release_firmware(fw);
}
@@ -987,56 +1078,16 @@ void intel_dmc_init(struct drm_i915_private *i915)
INIT_WORK(&dmc->work, dmc_load_work_fn);
- if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0)) {
- dmc->fw_path = MTL_DMC_PATH;
- dmc->max_fw_size = XELPDP_DMC_MAX_FW_SIZE;
- } else if (IS_DG2(i915)) {
- dmc->fw_path = DG2_DMC_PATH;
- dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
- } else if (IS_ALDERLAKE_P(i915)) {
- dmc->fw_path = ADLP_DMC_PATH;
- dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
- } else if (IS_ALDERLAKE_S(i915)) {
- dmc->fw_path = ADLS_DMC_PATH;
- dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_DG1(i915)) {
- dmc->fw_path = DG1_DMC_PATH;
- dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_ROCKETLAKE(i915)) {
- dmc->fw_path = RKL_DMC_PATH;
- dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_TIGERLAKE(i915)) {
- dmc->fw_path = TGL_DMC_PATH;
- dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (DISPLAY_VER(i915) == 11) {
- dmc->fw_path = ICL_DMC_PATH;
- dmc->max_fw_size = ICL_DMC_MAX_FW_SIZE;
- } else if (IS_GEMINILAKE(i915)) {
- dmc->fw_path = GLK_DMC_PATH;
- dmc->max_fw_size = GLK_DMC_MAX_FW_SIZE;
- } else if (IS_KABYLAKE(i915) ||
- IS_COFFEELAKE(i915) ||
- IS_COMETLAKE(i915)) {
- dmc->fw_path = KBL_DMC_PATH;
- dmc->max_fw_size = KBL_DMC_MAX_FW_SIZE;
- } else if (IS_SKYLAKE(i915)) {
- dmc->fw_path = SKL_DMC_PATH;
- dmc->max_fw_size = SKL_DMC_MAX_FW_SIZE;
- } else if (IS_BROXTON(i915)) {
- dmc->fw_path = BXT_DMC_PATH;
- dmc->max_fw_size = BXT_DMC_MAX_FW_SIZE;
- }
-
- if (i915->params.dmc_firmware_path) {
- if (strlen(i915->params.dmc_firmware_path) == 0) {
- drm_info(&i915->drm,
- "Disabling DMC firmware and runtime PM\n");
- goto out;
- }
+ dmc->fw_path = dmc_firmware_default(i915, &dmc->max_fw_size);
- dmc->fw_path = i915->params.dmc_firmware_path;
+ if (dmc_firmware_param_disabled(i915)) {
+ drm_info(&i915->drm, "Disabling DMC firmware and runtime PM\n");
+ goto out;
}
+ if (dmc_firmware_param(i915))
+ dmc->fw_path = dmc_firmware_param(i915);
+
if (!dmc->fw_path) {
drm_dbg_kms(&i915->drm,
"No known DMC firmware for platform, disabling runtime PM\n");
@@ -1072,6 +1123,8 @@ void intel_dmc_suspend(struct drm_i915_private *i915)
if (dmc)
flush_work(&dmc->work);
+ intel_dmc_wl_disable(&i915->display);
+
/* Drop the reference held in case DMC isn't loaded. */
if (!intel_dmc_has_payload(i915))
intel_dmc_runtime_pm_put(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
index 90d0dbb41cfe..1bf446f96a10 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
@@ -97,4 +97,10 @@
#define TGL_DMC_DEBUG3 _MMIO(0x101090)
#define DG1_DMC_DEBUG3 _MMIO(0x13415c)
+#define DMC_WAKELOCK_CFG _MMIO(0x8F1B0)
+#define DMC_WAKELOCK_CFG_ENABLE REG_BIT(31)
+#define DMC_WAKELOCK1_CTL _MMIO(0x8F140)
+#define DMC_WAKELOCK_CTL_REQ REG_BIT(31)
+#define DMC_WAKELOCK_CTL_ACK REG_BIT(15)
+
#endif /* __INTEL_DMC_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.c b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
new file mode 100644
index 000000000000..d9864b9cc429
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+
+#include "intel_de.h"
+#include "intel_dmc.h"
+#include "intel_dmc_regs.h"
+#include "intel_dmc_wl.h"
+
+/**
+ * DOC: DMC wakelock support
+ *
+ * Wake lock is the mechanism to cause display engine to exit DC
+ * states to allow programming to registers that are powered down in
+ * those states. Previous projects exited DC states automatically when
+ * detecting programming. Now software controls the exit by
+ * programming the wake lock. This improves system performance and
+ * system interactions and better fits the flip queue style of
+ * programming. Wake lock is only required when DC5, DC6, or DC6v have
+ * been enabled in DC_STATE_EN and the wake lock mode of operation has
+ * been enabled.
+ *
+ * The wakelock mechanism in DMC allows the display engine to exit DC
+ * states explicitly before programming registers that may be powered
+ * down. In earlier hardware, this was done automatically and
+ * implicitly when the display engine accessed a register. With the
+ * wakelock implementation, the driver asserts a wakelock in DMC,
+ * which forces it to exit the DC state until the wakelock is
+ * deasserted.
+ *
+ * The mechanism can be enabled and disabled by writing to the
+ * DMC_WAKELOCK_CFG register. There are also 13 control registers
+ * that can be used to hold and release different wakelocks. In the
+ * current implementation, we only need one wakelock, so only
+ * DMC_WAKELOCK1_CTL is used. The other definitions are here for
+ * potential future use.
+ */
+
+#define DMC_WAKELOCK_CTL_TIMEOUT 5
+#define DMC_WAKELOCK_HOLD_TIME 50
+
+struct intel_dmc_wl_range {
+ u32 start;
+ u32 end;
+};
+
+static struct intel_dmc_wl_range lnl_wl_range[] = {
+ { .start = 0x60000, .end = 0x7ffff },
+};
+
+static void __intel_dmc_wl_release(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct intel_dmc_wl *wl = &display->wl;
+
+ WARN_ON(refcount_read(&wl->refcount));
+
+ queue_delayed_work(i915->unordered_wq, &wl->work,
+ msecs_to_jiffies(DMC_WAKELOCK_HOLD_TIME));
+}
+
+static void intel_dmc_wl_work(struct work_struct *work)
+{
+ struct intel_dmc_wl *wl =
+ container_of(work, struct intel_dmc_wl, work.work);
+ struct intel_display *display =
+ container_of(wl, struct intel_display, wl);
+ unsigned long flags;
+
+ spin_lock_irqsave(&wl->lock, flags);
+
+ /* Bail out if refcount reached zero while waiting for the spinlock */
+ if (!refcount_read(&wl->refcount))
+ goto out_unlock;
+
+ __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
+
+ if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
+ DMC_WAKELOCK_CTL_ACK, 0,
+ DMC_WAKELOCK_CTL_TIMEOUT)) {
+ WARN_RATELIMIT(1, "DMC wakelock release timed out");
+ goto out_unlock;
+ }
+
+ wl->taken = false;
+
+out_unlock:
+ spin_unlock_irqrestore(&wl->lock, flags);
+}
+
+static bool intel_dmc_wl_check_range(u32 address)
+{
+ int i;
+ bool wl_needed = false;
+
+ for (i = 0; i < ARRAY_SIZE(lnl_wl_range); i++) {
+ if (address >= lnl_wl_range[i].start &&
+ address <= lnl_wl_range[i].end) {
+ wl_needed = true;
+ break;
+ }
+ }
+
+ return wl_needed;
+}
+
+static bool __intel_dmc_wl_supported(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (DISPLAY_VER(display) < 20 ||
+ !intel_dmc_has_payload(i915) ||
+ !display->params.enable_dmc_wl)
+ return false;
+
+ return true;
+}
+
+void intel_dmc_wl_init(struct intel_display *display)
+{
+ struct intel_dmc_wl *wl = &display->wl;
+
+ /* don't call __intel_dmc_wl_supported(), DMC is not loaded yet */
+ if (DISPLAY_VER(display) < 20 || !display->params.enable_dmc_wl)
+ return;
+
+ INIT_DELAYED_WORK(&wl->work, intel_dmc_wl_work);
+ spin_lock_init(&wl->lock);
+ refcount_set(&wl->refcount, 0);
+}
+
+void intel_dmc_wl_enable(struct intel_display *display)
+{
+ struct intel_dmc_wl *wl = &display->wl;
+ unsigned long flags;
+
+ if (!__intel_dmc_wl_supported(display))
+ return;
+
+ spin_lock_irqsave(&wl->lock, flags);
+
+ if (wl->enabled)
+ goto out_unlock;
+
+ /*
+ * Enable wakelock in DMC. We shouldn't try to take the
+ * wakelock, because we're just enabling it, so call the
+ * non-locking version directly here.
+ */
+ __intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, 0, DMC_WAKELOCK_CFG_ENABLE);
+
+ wl->enabled = true;
+ wl->taken = false;
+
+out_unlock:
+ spin_unlock_irqrestore(&wl->lock, flags);
+}
+
+void intel_dmc_wl_disable(struct intel_display *display)
+{
+ struct intel_dmc_wl *wl = &display->wl;
+ unsigned long flags;
+
+ if (!__intel_dmc_wl_supported(display))
+ return;
+
+ flush_delayed_work(&wl->work);
+
+ spin_lock_irqsave(&wl->lock, flags);
+
+ if (!wl->enabled)
+ goto out_unlock;
+
+ /* Disable wakelock in DMC */
+ __intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, DMC_WAKELOCK_CFG_ENABLE, 0);
+
+ refcount_set(&wl->refcount, 0);
+ wl->enabled = false;
+ wl->taken = false;
+
+out_unlock:
+ spin_unlock_irqrestore(&wl->lock, flags);
+}
+
+void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
+{
+ struct intel_dmc_wl *wl = &display->wl;
+ unsigned long flags;
+
+ if (!__intel_dmc_wl_supported(display))
+ return;
+
+ if (!intel_dmc_wl_check_range(reg.reg))
+ return;
+
+ spin_lock_irqsave(&wl->lock, flags);
+
+ if (!wl->enabled)
+ goto out_unlock;
+
+ cancel_delayed_work(&wl->work);
+
+ if (refcount_inc_not_zero(&wl->refcount))
+ goto out_unlock;
+
+ refcount_set(&wl->refcount, 1);
+
+ /*
+ * Only try to take the wakelock if it's not marked as taken
+ * yet. It may be already taken at this point if we have
+ * already released the last reference, but the work has not
+ * run yet.
+ */
+ if (!wl->taken) {
+ __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0,
+ DMC_WAKELOCK_CTL_REQ);
+
+ if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
+ DMC_WAKELOCK_CTL_ACK,
+ DMC_WAKELOCK_CTL_ACK,
+ DMC_WAKELOCK_CTL_TIMEOUT)) {
+ WARN_RATELIMIT(1, "DMC wakelock ack timed out");
+ goto out_unlock;
+ }
+
+ wl->taken = true;
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&wl->lock, flags);
+}
+
+void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg)
+{
+ struct intel_dmc_wl *wl = &display->wl;
+ unsigned long flags;
+
+ if (!__intel_dmc_wl_supported(display))
+ return;
+
+ if (!intel_dmc_wl_check_range(reg.reg))
+ return;
+
+ spin_lock_irqsave(&wl->lock, flags);
+
+ if (!wl->enabled)
+ goto out_unlock;
+
+ if (WARN_RATELIMIT(!refcount_read(&wl->refcount),
+ "Tried to put wakelock with refcount zero\n"))
+ goto out_unlock;
+
+ if (refcount_dec_and_test(&wl->refcount)) {
+ __intel_dmc_wl_release(display);
+
+ goto out_unlock;
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&wl->lock, flags);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.h b/drivers/gpu/drm/i915/display/intel_dmc_wl.h
new file mode 100644
index 000000000000..adab51208d0a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+
+#ifndef __INTEL_WAKELOCK_H__
+#define __INTEL_WAKELOCK_H__
+
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/refcount.h>
+
+#include "i915_reg_defs.h"
+
+struct intel_display;
+
+struct intel_dmc_wl {
+ spinlock_t lock; /* protects enabled, taken and refcount */
+ bool enabled;
+ bool taken;
+ refcount_t refcount;
+ struct delayed_work work;
+};
+
+void intel_dmc_wl_init(struct intel_display *display);
+void intel_dmc_wl_enable(struct intel_display *display);
+void intel_dmc_wl_disable(struct intel_display *display);
+void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg);
+void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg);
+
+#endif /* __INTEL_WAKELOCK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index f0c3ed37b350..e05e25cd4a94 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -67,6 +67,7 @@
#include "intel_dp_tunnel.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
+#include "intel_drrs.h"
#include "intel_fifo_underrun.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
@@ -88,6 +89,9 @@
#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
+/* Max DSC line buffer depth supported by HW. */
+#define INTEL_DP_DSC_MAX_LINE_BUF_DEPTH 13
+
/* DP DSC FEC Overhead factor in ppm = 1/(0.972261) = 1.028530 */
#define DP_DSC_FEC_OVERHEAD_FACTOR 1028530
@@ -122,6 +126,14 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
return dig_port->base.type == INTEL_OUTPUT_EDP;
}
+bool intel_dp_as_sdp_supported(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ return HAS_AS_SDP(i915) &&
+ drm_dp_as_sdp_supported(&intel_dp->aux, intel_dp->dpcd);
+}
+
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
/* Is link rate UHBR and thus 128b/132b? */
@@ -213,7 +225,7 @@ static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp)
* Sink rates for 128b/132b. If set, sink should support all 8b/10b
* rates and 10 Gbps.
*/
- if (intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B) {
+ if (drm_dp_128b132b_supported(intel_dp->dpcd)) {
u8 uhbr_rates = 0;
BUILD_BUG_ON(ARRAY_SIZE(intel_dp->sink_rates) < ARRAY_SIZE(dp_rates) + 3);
@@ -424,7 +436,7 @@ int intel_dp_max_link_data_rate(struct intel_dp *intel_dp,
return max_rate;
}
-bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
+bool intel_dp_has_bigjoiner(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
@@ -442,11 +454,9 @@ static int dg2_max_source_rate(struct intel_dp *intel_dp)
static int icl_max_source_rate(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp))
+ if (intel_encoder_is_combo(encoder) && !intel_dp_is_edp(intel_dp))
return 540000;
return 810000;
@@ -462,11 +472,9 @@ static int ehl_max_source_rate(struct intel_dp *intel_dp)
static int mtl_max_source_rate(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- if (intel_is_c10phy(i915, phy))
+ if (intel_encoder_is_c10phy(encoder))
return 810000;
return 2000000;
@@ -498,7 +506,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
/* The values must be in increasing order */
static const int mtl_rates[] = {
162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000,
- 810000, 1000000, 1350000, 2000000,
+ 810000, 1000000, 2000000,
};
static const int icl_rates[] = {
162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000,
@@ -1197,15 +1205,15 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector,
}
bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp,
+ struct intel_connector *connector,
int hdisplay, int clock)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- struct intel_connector *connector = intel_dp->attached_connector;
- if (!intel_dp_can_bigjoiner(intel_dp))
+ if (!intel_dp_has_bigjoiner(intel_dp))
return false;
- return clock > i915->max_dotclk_freq || hdisplay > 5120 ||
+ return clock > i915->display.cdclk.max_dotclk_freq || hdisplay > 5120 ||
connector->force_bigjoiner_enable;
}
@@ -1219,7 +1227,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
const struct drm_display_mode *fixed_mode;
int target_clock = mode->clock;
int max_rate, mode_rate, max_lanes, max_link_clock;
- int max_dotclk = dev_priv->max_dotclk_freq;
+ int max_dotclk = dev_priv->display.cdclk.max_dotclk_freq;
u16 dsc_max_compressed_bpp = 0;
u8 dsc_slice_count = 0;
enum drm_mode_status status;
@@ -1232,6 +1240,9 @@ intel_dp_mode_valid(struct drm_connector *_connector,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
return MODE_H_ILLEGAL;
+ if (mode->clock < 10000)
+ return MODE_CLOCK_LOW;
+
fixed_mode = intel_panel_fixed_mode(connector, mode);
if (intel_dp_is_edp(intel_dp) && fixed_mode) {
status = intel_panel_mode_valid(connector, mode);
@@ -1241,10 +1252,8 @@ intel_dp_mode_valid(struct drm_connector *_connector,
target_clock = fixed_mode->clock;
}
- if (mode->clock < 10000)
- return MODE_CLOCK_LOW;
-
- if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) {
+ if (intel_dp_need_bigjoiner(intel_dp, connector,
+ mode->hdisplay, target_clock)) {
bigjoiner = true;
max_dotclk *= 2;
}
@@ -1305,11 +1314,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
dsc = dsc_max_compressed_bpp && dsc_slice_count;
}
- /*
- * Big joiner configuration needs DSC for TGL which is not true for
- * XE_LPD where uncompressed joiner is supported.
- */
- if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc)
+ if (intel_dp_joiner_needs_dsc(dev_priv, bigjoiner) && !dsc)
return MODE_CLOCK_HIGH;
if (mode_rate > max_rate && !dsc)
@@ -1421,7 +1426,8 @@ static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
if (DISPLAY_VER(dev_priv) >= 12)
return true;
- if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A)
+ if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A &&
+ !intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
return true;
return false;
@@ -1702,7 +1708,6 @@ static int intel_dp_dsc_compute_params(const struct intel_connector *connector,
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
- u8 line_buf_depth;
int ret;
/*
@@ -1731,20 +1736,14 @@ static int intel_dp_dsc_compute_params(const struct intel_connector *connector,
connector->dp.dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
DP_DSC_RGB;
- line_buf_depth = drm_dp_dsc_sink_line_buf_depth(connector->dp.dsc_dpcd);
- if (!line_buf_depth) {
+ vdsc_cfg->line_buf_depth = min(INTEL_DP_DSC_MAX_LINE_BUF_DEPTH,
+ drm_dp_dsc_sink_line_buf_depth(connector->dp.dsc_dpcd));
+ if (!vdsc_cfg->line_buf_depth) {
drm_dbg_kms(&i915->drm,
"DSC Sink Line Buffer Depth invalid\n");
return -EINVAL;
}
- if (vdsc_cfg->dsc_version_minor == 2)
- vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
- DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
- else
- vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
- DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
-
vdsc_cfg->block_pred_enable =
connector->dp.dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
@@ -1916,8 +1915,9 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp,
dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) {
- if (valid_dsc_bpp[i] < dsc_min_bpp ||
- valid_dsc_bpp[i] > dsc_max_bpp)
+ if (valid_dsc_bpp[i] < dsc_min_bpp)
+ continue;
+ if (valid_dsc_bpp[i] > dsc_max_bpp)
break;
ret = dsc_compute_link_config(intel_dp,
@@ -2398,6 +2398,16 @@ int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state)
return intel_dp_link_required(adjusted_mode->crtc_clock, bpp);
}
+bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915, bool use_joiner)
+{
+ /*
+ * Pipe joiner needs compression up to display 12 due to bandwidth
+ * limitation. DG2 onwards pipe joiner can be enabled without
+ * compression.
+ */
+ return DISPLAY_VER(i915) < 13 && use_joiner;
+}
+
static int
intel_dp_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -2406,30 +2416,25 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- const struct intel_connector *connector =
+ struct intel_connector *connector =
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct link_config_limits limits;
- bool joiner_needs_dsc = false;
- bool dsc_needed;
+ bool dsc_needed, joiner_needs_dsc;
int ret = 0;
if (pipe_config->fec_enable &&
!intel_dp_supports_fec(intel_dp, connector, pipe_config))
return -EINVAL;
- if (intel_dp_need_bigjoiner(intel_dp, adjusted_mode->crtc_hdisplay,
+ if (intel_dp_need_bigjoiner(intel_dp, connector,
+ adjusted_mode->crtc_hdisplay,
adjusted_mode->crtc_clock))
pipe_config->bigjoiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe);
- /*
- * Pipe joiner needs compression up to display 12 due to bandwidth
- * limitation. DG2 onwards pipe joiner can be enabled without
- * compression.
- */
- joiner_needs_dsc = DISPLAY_VER(i915) < 13 && pipe_config->bigjoiner_pipes;
+ joiner_needs_dsc = intel_dp_joiner_needs_dsc(i915, pipe_config->bigjoiner_pipes);
dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
!intel_dp_compute_config_limits(intel_dp, pipe_config,
@@ -2612,6 +2617,29 @@ static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc
vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED;
}
+static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_dp_as_sdp *as_sdp = &crtc_state->infoframes.as_sdp;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ if (!crtc_state->vrr.enable ||
+ !intel_dp_as_sdp_supported(intel_dp))
+ return;
+
+ crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC);
+
+ /* Currently only DP_AS_SDP_AVT_FIXED_VTOTAL mode supported */
+ as_sdp->sdp_type = DP_SDP_ADAPTIVE_SYNC;
+ as_sdp->length = 0x9;
+ as_sdp->mode = DP_AS_SDP_AVT_FIXED_VTOTAL;
+ as_sdp->vtotal = adjusted_mode->vtotal;
+ as_sdp->target_rr = 0;
+ as_sdp->duration_incr_ms = 0;
+ as_sdp->duration_incr_ms = 0;
+}
+
static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@@ -2683,15 +2711,6 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
}
-static bool cpu_transcoder_has_drrs(struct drm_i915_private *i915,
- enum transcoder cpu_transcoder)
-{
- if (HAS_DOUBLE_BUFFERED_M_N(i915))
- return true;
-
- return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder);
-}
-
static bool can_enable_drrs(struct intel_connector *connector,
const struct intel_crtc_state *pipe_config,
const struct drm_display_mode *downclock_mode)
@@ -2714,7 +2733,7 @@ static bool can_enable_drrs(struct intel_connector *connector,
if (pipe_config->has_pch_encoder)
return false;
- if (!cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder))
+ if (!intel_cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder))
return false;
return downclock_mode &&
@@ -2731,7 +2750,11 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
int pixel_clock;
- if (has_seamless_m_n(connector))
+ /*
+ * FIXME all joined pipes share the same transcoder.
+ * Need to account for that when updating M/N live.
+ */
+ if (has_seamless_m_n(connector) && !pipe_config->bigjoiner_pipes)
pipe_config->update_m_n = true;
if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
@@ -2972,6 +2995,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
g4x_dp_set_clock(encoder, pipe_config);
intel_vrr_compute_config(pipe_config, conn_state);
+ intel_dp_compute_as_sdp(intel_dp, pipe_config);
intel_psr_compute_config(intel_dp, pipe_config, conn_state);
intel_dp_drrs_compute_config(connector, pipe_config, link_bpp_x16);
intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
@@ -3364,6 +3388,14 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
fastset = false;
}
+ if (CAN_PANEL_REPLAY(intel_dp)) {
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] Forcing full modeset to compute panel replay state\n",
+ encoder->base.base.id, encoder->base.name);
+ crtc_state->uapi.mode_changed = true;
+ fastset = false;
+ }
+
return fastset;
}
@@ -4047,39 +4079,84 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
intel_dp->downstream_ports) == 0;
}
-static bool
-intel_dp_can_mst(struct intel_dp *intel_dp)
+static const char *intel_dp_mst_mode_str(enum drm_dp_mst_mode mst_mode)
+{
+ if (mst_mode == DRM_DP_MST)
+ return "MST";
+ else if (mst_mode == DRM_DP_SST_SIDEBAND_MSG)
+ return "SST w/ sideband messaging";
+ else
+ return "SST";
+}
+
+static enum drm_dp_mst_mode
+intel_dp_mst_mode_choose(struct intel_dp *intel_dp,
+ enum drm_dp_mst_mode sink_mst_mode)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- return i915->display.params.enable_dp_mst &&
- intel_dp_mst_source_support(intel_dp) &&
- drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
+ if (!i915->display.params.enable_dp_mst)
+ return DRM_DP_SST;
+
+ if (!intel_dp_mst_source_support(intel_dp))
+ return DRM_DP_SST;
+
+ if (sink_mst_mode == DRM_DP_SST_SIDEBAND_MSG &&
+ !(intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B))
+ return DRM_DP_SST;
+
+ return sink_mst_mode;
}
-static void
-intel_dp_configure_mst(struct intel_dp *intel_dp)
+static enum drm_dp_mst_mode
+intel_dp_mst_detect(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- struct intel_encoder *encoder =
- &dp_to_dig_port(intel_dp)->base;
- bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ enum drm_dp_mst_mode sink_mst_mode;
+ enum drm_dp_mst_mode mst_detect;
+
+ sink_mst_mode = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
+
+ mst_detect = intel_dp_mst_mode_choose(intel_dp, sink_mst_mode);
drm_dbg_kms(&i915->drm,
- "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
+ "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s -> enable: %s\n",
encoder->base.base.id, encoder->base.name,
str_yes_no(intel_dp_mst_source_support(intel_dp)),
- str_yes_no(sink_can_mst),
- str_yes_no(i915->display.params.enable_dp_mst));
+ intel_dp_mst_mode_str(sink_mst_mode),
+ str_yes_no(i915->display.params.enable_dp_mst),
+ intel_dp_mst_mode_str(mst_detect));
+
+ return mst_detect;
+}
+static void
+intel_dp_mst_configure(struct intel_dp *intel_dp)
+{
if (!intel_dp_mst_source_support(intel_dp))
return;
- intel_dp->is_mst = sink_can_mst &&
- i915->display.params.enable_dp_mst;
+ intel_dp->is_mst = intel_dp->mst_detect != DRM_DP_SST;
- drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
- intel_dp->is_mst);
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+
+ /* Avoid stale info on the next detect cycle. */
+ intel_dp->mst_detect = DRM_DP_SST;
+}
+
+static void
+intel_dp_mst_disconnect(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if (!intel_dp->is_mst)
+ return;
+
+ drm_dbg_kms(&i915->drm, "MST device may have disappeared %d vs %d\n",
+ intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
+ intel_dp->is_mst = false;
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
}
static bool
@@ -4127,6 +4204,32 @@ intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
return false;
}
+static ssize_t intel_dp_as_sdp_pack(const struct drm_dp_as_sdp *as_sdp,
+ struct dp_sdp *sdp, size_t size)
+{
+ size_t length = sizeof(struct dp_sdp);
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(sdp, 0, size);
+
+ /* Prepare AS (Adaptive Sync) SDP Header */
+ sdp->sdp_header.HB0 = 0;
+ sdp->sdp_header.HB1 = as_sdp->sdp_type;
+ sdp->sdp_header.HB2 = 0x02;
+ sdp->sdp_header.HB3 = as_sdp->length;
+
+ /* Fill AS (Adaptive Sync) SDP Payload */
+ sdp->db[0] = as_sdp->mode;
+ sdp->db[1] = as_sdp->vtotal & 0xFF;
+ sdp->db[2] = (as_sdp->vtotal >> 8) & 0xFF;
+ sdp->db[3] = as_sdp->target_rr & 0xFF;
+ sdp->db[4] = (as_sdp->target_rr >> 8) & 0x3;
+
+ return length;
+}
+
static ssize_t
intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915,
const struct hdmi_drm_infoframe *drm_infoframe,
@@ -4226,6 +4329,10 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
&crtc_state->infoframes.drm.drm,
&sdp, sizeof(sdp));
break;
+ case DP_SDP_ADAPTIVE_SYNC:
+ len = intel_dp_as_sdp_pack(&crtc_state->infoframes.as_sdp, &sdp,
+ sizeof(sdp));
+ break;
default:
MISSING_CASE(type);
return;
@@ -4247,6 +4354,10 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder,
u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
+
+ if (HAS_AS_SDP(dev_priv))
+ dip_enable |= VIDEO_DIP_ENABLE_AS_ADL;
+
u32 val = intel_de_read(dev_priv, reg) & ~dip_enable;
/* TODO: Sanitize DSC enabling wrt. intel_dsc_dp_pps_write(). */
@@ -4264,10 +4375,42 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder,
return;
intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
+ intel_write_dp_sdp(encoder, crtc_state, DP_SDP_ADAPTIVE_SYNC);
intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
}
+static
+int intel_dp_as_sdp_unpack(struct drm_dp_as_sdp *as_sdp,
+ const void *buffer, size_t size)
+{
+ const struct dp_sdp *sdp = buffer;
+
+ if (size < sizeof(struct dp_sdp))
+ return -EINVAL;
+
+ memset(as_sdp, 0, sizeof(*as_sdp));
+
+ if (sdp->sdp_header.HB0 != 0)
+ return -EINVAL;
+
+ if (sdp->sdp_header.HB1 != DP_SDP_ADAPTIVE_SYNC)
+ return -EINVAL;
+
+ if (sdp->sdp_header.HB2 != 0x02)
+ return -EINVAL;
+
+ if ((sdp->sdp_header.HB3 & 0x3F) != 9)
+ return -EINVAL;
+
+ as_sdp->length = sdp->sdp_header.HB3 & DP_ADAPTIVE_SYNC_SDP_LENGTH;
+ as_sdp->mode = sdp->db[0] & DP_ADAPTIVE_SYNC_SDP_OPERATION_MODE;
+ as_sdp->vtotal = (sdp->db[2] << 8) | sdp->db[1];
+ as_sdp->target_rr = (u64)sdp->db[3] | ((u64)sdp->db[4] & 0x3);
+
+ return 0;
+}
+
static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
const void *buffer, size_t size)
{
@@ -4338,6 +4481,29 @@ static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
return 0;
}
+static void
+intel_read_dp_as_sdp(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_dp_as_sdp *as_sdp)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ unsigned int type = DP_SDP_ADAPTIVE_SYNC;
+ struct dp_sdp sdp = {};
+ int ret;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(type)) == 0)
+ return;
+
+ dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
+ sizeof(sdp));
+
+ ret = intel_dp_as_sdp_unpack(as_sdp, &sdp, sizeof(sdp));
+ if (ret)
+ drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP AS SDP\n");
+}
+
static int
intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe,
const void *buffer, size_t size)
@@ -4444,6 +4610,10 @@ void intel_read_dp_sdp(struct intel_encoder *encoder,
intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state,
&crtc_state->infoframes.drm.drm);
break;
+ case DP_SDP_ADAPTIVE_SYNC:
+ intel_read_dp_as_sdp(encoder, crtc_state,
+ &crtc_state->infoframes.as_sdp);
+ break;
default:
MISSING_CASE(type);
break;
@@ -5371,6 +5541,8 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
if (!intel_dp_get_dpcd(intel_dp))
return connector_status_disconnected;
+ intel_dp->mst_detect = intel_dp_mst_detect(intel_dp);
+
/* if there's no downstream port, we're done */
if (!drm_dp_is_branch(dpcd))
return connector_status_connected;
@@ -5382,7 +5554,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
connector_status_connected : connector_status_disconnected;
}
- if (intel_dp_can_mst(intel_dp))
+ if (intel_dp->mst_detect == DRM_DP_MST)
return connector_status_connected;
/* If no HPD, poke DDC gently */
@@ -5687,15 +5859,7 @@ intel_dp_detect(struct drm_connector *connector,
memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd));
intel_dp->psr.sink_panel_replay_support = false;
- if (intel_dp->is_mst) {
- drm_dbg_kms(&dev_priv->drm,
- "MST device may have disappeared %d vs %d\n",
- intel_dp->is_mst,
- intel_dp->mst_mgr.mst_state);
- intel_dp->is_mst = false;
- drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
- intel_dp->is_mst);
- }
+ intel_dp_mst_disconnect(intel_dp);
intel_dp_tunnel_disconnect(intel_dp);
@@ -5714,7 +5878,7 @@ intel_dp_detect(struct drm_connector *connector,
intel_dp_detect_dsc_caps(intel_dp, intel_connector);
- intel_dp_configure_mst(intel_dp);
+ intel_dp_mst_configure(intel_dp);
/*
* TODO: Reset link params when switching to MST mode, until MST
@@ -6497,7 +6661,6 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_encoder->port;
- enum phy phy = intel_port_to_phy(dev_priv, port);
int type;
/* Initialize the work for modeset in case of link train failure */
@@ -6522,7 +6685,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
* Currently we don't support eDP on TypeC ports, although in
* theory it could work on TypeC legacy ports.
*/
- drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
+ drm_WARN_ON(dev, intel_encoder_is_tc(intel_encoder));
type = DRM_MODE_CONNECTOR_eDP;
intel_encoder->type = INTEL_OUTPUT_EDP;
@@ -6565,6 +6728,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
+ intel_connector->sync_state = intel_dp_connector_sync_state;
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
intel_dp_aux_fini(intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index c540d3a73fe7..106ecfde36d9 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -88,6 +88,7 @@ void intel_dp_audio_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state);
bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp);
bool intel_dp_is_edp(struct intel_dp *intel_dp);
+bool intel_dp_as_sdp_supported(struct intel_dp *intel_dp);
bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state);
int intel_dp_link_symbol_size(int rate);
int intel_dp_link_symbol_clock(int rate);
@@ -119,7 +120,8 @@ int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
int bw_overhead);
int intel_dp_max_link_data_rate(struct intel_dp *intel_dp,
int max_dprx_rate, int max_dprx_lanes);
-bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp);
+bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915, bool use_joiner);
+bool intel_dp_has_bigjoiner(struct intel_dp *intel_dp);
bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_dp_set_infoframes(struct intel_encoder *encoder, bool enable,
@@ -149,6 +151,7 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
int mode_clock, int mode_hdisplay,
bool bigjoiner);
bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp,
+ struct intel_connector *connector,
int hdisplay, int clock);
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 4f4a0e3b3114..b8a53bb174da 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -61,9 +61,8 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
u32 status;
int ret;
- ret = __intel_de_wait_for_register(i915, ch_ctl,
- DP_AUX_CH_CTL_SEND_BUSY, 0,
- 2, timeout_ms, &status);
+ ret = intel_de_wait_custom(i915, ch_ctl, DP_AUX_CH_CTL_SEND_BUSY, 0,
+ 2, timeout_ms, &status);
if (ret == -ETIMEDOUT)
drm_err(&i915->drm,
@@ -143,9 +142,15 @@ static int intel_dp_aux_sync_len(void)
return precharge + preamble;
}
-static int intel_dp_aux_fw_sync_len(void)
+int intel_dp_aux_fw_sync_len(void)
{
- int precharge = 10; /* 10-16 */
+ /*
+ * We faced some glitches on Dell Precision 5490 MTL laptop with panel:
+ * "Manufacturer: AUO, Model: 63898" when using HW default 18. Using 20
+ * is fixing these problems with the panel. It is still within range
+ * mentioned in eDP specification.
+ */
+ int precharge = 12; /* 10-16 */
int preamble = 8;
return precharge + preamble;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h
index 8447f3e601fe..76d1f2ed7c2f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h
@@ -20,5 +20,6 @@ enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder);
void intel_dp_aux_irq_handler(struct drm_i915_private *i915);
u32 intel_dp_aux_pack(const u8 *src, int src_bytes);
+int intel_dp_aux_fw_sync_len(void);
#endif /* __INTEL_DP_AUX_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index b98a87883fef..92b03073acdd 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -691,12 +691,15 @@ int intel_dp_hdcp_get_remote_capability(struct intel_connector *connector,
u8 bcaps;
int ret;
+ *hdcp_capable = false;
+ *hdcp2_capable = false;
if (!intel_encoder_is_mst(connector->encoder))
return -EINVAL;
ret = _intel_dp_hdcp2_get_capability(aux, hdcp2_capable);
if (ret)
- return ret;
+ drm_dbg_kms(&i915->drm,
+ "HDCP2 DPCD capability read failed err: %d\n", ret);
ret = intel_dp_hdcp_read_bcaps(aux, i915, &bcaps);
if (ret)
@@ -766,11 +769,9 @@ intel_dp_mst_hdcp_stream_encryption(struct intel_connector *connector,
return -EINVAL;
/* Wait for encryption confirmation */
- if (intel_de_wait_for_register(i915,
- HDCP_STATUS(i915, cpu_transcoder, port),
- stream_enc_status,
- enable ? stream_enc_status : 0,
- HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ if (intel_de_wait(i915, HDCP_STATUS(i915, cpu_transcoder, port),
+ stream_enc_status, enable ? stream_enc_status : 0,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled");
return -ETIMEDOUT;
@@ -801,11 +802,10 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
return ret;
/* Wait for encryption confirmation */
- if (intel_de_wait_for_register(i915,
- HDCP2_STREAM_STATUS(i915, cpu_transcoder, pipe),
- STREAM_ENCRYPTION_STATUS,
- enable ? STREAM_ENCRYPTION_STATUS : 0,
- HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ if (intel_de_wait(i915, HDCP2_STREAM_STATUS(i915, cpu_transcoder, pipe),
+ STREAM_ENCRYPTION_STATUS,
+ enable ? STREAM_ENCRYPTION_STATUS : 0,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled");
return -ETIMEDOUT;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index fb84ca98bb7a..947575140059 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -334,7 +334,7 @@ static bool has_per_lane_signal_levels(struct intel_dp *intel_dp,
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy) ||
- DISPLAY_VER(i915) >= 11;
+ DISPLAY_VER(i915) >= 10 || IS_BROXTON(i915);
}
/* 128b/132b */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 53aec023ce92..c772ba19c547 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -51,25 +51,39 @@
#include "intel_vdsc.h"
#include "skl_scaler.h"
-static int intel_dp_mst_check_constraints(struct drm_i915_private *i915, int bpp,
- const struct drm_display_mode *adjusted_mode,
- struct intel_crtc_state *crtc_state,
- bool dsc)
+static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state,
+ bool dsc)
{
- if (intel_dp_is_uhbr(crtc_state) && DISPLAY_VER(i915) < 14 && dsc) {
- int output_bpp = bpp;
- /* DisplayPort 2 128b/132b, bits per lane is always 32 */
- int symbol_clock = crtc_state->port_clock / 32;
-
- if (output_bpp * adjusted_mode->crtc_clock >=
- symbol_clock * 72) {
- drm_dbg_kms(&i915->drm, "UHBR check failed(required bw %d available %d)\n",
- output_bpp * adjusted_mode->crtc_clock, symbol_clock * 72);
- return -EINVAL;
- }
- }
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
- return 0;
+ if (!intel_dp_is_uhbr(crtc_state) || DISPLAY_VER(i915) >= 20 || !dsc)
+ return INT_MAX;
+
+ /*
+ * DSC->DPT interface width:
+ * ICL-MTL: 72 bits (each branch has 72 bits, only left branch is used)
+ * LNL+: 144 bits (not a bottleneck in any config)
+ *
+ * Bspec/49259 suggests that the FEC overhead needs to be
+ * applied here, though HW people claim that neither this FEC
+ * or any other overhead is applicable here (that is the actual
+ * available_bw is just symbol_clock * 72). However based on
+ * testing on MTL-P the
+ * - DELL U3224KBA display
+ * - Unigraf UCD-500 CTS test sink
+ * devices the
+ * - 5120x2880/995.59Mhz
+ * - 6016x3384/1357.23Mhz
+ * - 6144x3456/1413.39Mhz
+ * modes (all the ones having a DPT limit on the above devices),
+ * both the channel coding efficiency and an additional 3%
+ * overhead needs to be accounted for.
+ */
+ return div64_u64(mul_u32_u32(intel_dp_link_symbol_clock(crtc_state->port_clock) * 72,
+ drm_dp_bw_channel_coding_efficiency(true)),
+ mul_u32_u32(adjusted_mode->crtc_clock, 1030000));
}
static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state,
@@ -88,11 +102,10 @@ static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state,
if (dsc) {
flags |= DRM_DP_BW_OVERHEAD_DSC;
- /* TODO: add support for bigjoiner */
dsc_slice_count = intel_dp_dsc_get_slice_count(connector,
adjusted_mode->clock,
adjusted_mode->hdisplay,
- false);
+ crtc_state->bigjoiner_pipes);
}
overhead = drm_dp_bw_overhead(crtc_state->lane_count,
@@ -158,6 +171,7 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int bpp, slots = -EINVAL;
+ int max_dpt_bpp;
int ret = 0;
mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr);
@@ -178,6 +192,13 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
crtc_state->port_clock,
crtc_state->lane_count);
+ max_dpt_bpp = intel_dp_mst_max_dpt_bpp(crtc_state, dsc);
+ if (max_bpp > max_dpt_bpp) {
+ drm_dbg_kms(&i915->drm, "Limiting bpp to max DPT bpp (%d -> %d)\n",
+ max_bpp, max_dpt_bpp);
+ max_bpp = max_dpt_bpp;
+ }
+
drm_dbg_kms(&i915->drm, "Looking for slots in range min bpp %d max bpp %d\n",
min_bpp, max_bpp);
@@ -189,10 +210,6 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp);
- ret = intel_dp_mst_check_constraints(i915, bpp, adjusted_mode, crtc_state, dsc);
- if (ret)
- continue;
-
link_bpp_x16 = to_bpp_x16(dsc ? bpp :
intel_dp_output_bpp(crtc_state->output_format, bpp));
@@ -404,15 +421,22 @@ static int mode_hblank_period_ns(const struct drm_display_mode *mode)
static bool
hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector,
- const struct intel_crtc_state *crtc_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct link_config_limits *limits)
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
+ bool is_uhbr_sink = connector->mst_port &&
+ drm_dp_128b132b_supported(connector->mst_port->dpcd);
+ int hblank_limit = is_uhbr_sink ? 500 : 300;
if (!connector->dp.dsc_hblank_expansion_quirk)
return false;
- if (mode_hblank_period_ns(adjusted_mode) > 300)
+ if (is_uhbr_sink && !drm_dp_is_uhbr_rate(limits->max_rate))
+ return false;
+
+ if (mode_hblank_period_ns(adjusted_mode) > hblank_limit)
return false;
return true;
@@ -428,7 +452,7 @@ adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *conne
const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
int min_bpp_x16 = limits->link.min_bpp_x16;
- if (!hblank_expansion_quirk_needs_dsc(connector, crtc_state))
+ if (!hblank_expansion_quirk_needs_dsc(connector, crtc_state, limits))
return true;
if (!dsc) {
@@ -525,14 +549,15 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
- const struct intel_connector *connector =
+ struct intel_connector *connector =
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
struct link_config_limits limits;
- bool dsc_needed;
+ bool dsc_needed, joiner_needs_dsc;
int ret = 0;
if (pipe_config->fec_enable &&
@@ -542,11 +567,18 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
+ if (intel_dp_need_bigjoiner(intel_dp, connector,
+ adjusted_mode->crtc_hdisplay,
+ adjusted_mode->crtc_clock))
+ pipe_config->bigjoiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe);
+
pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_pch_encoder = false;
- dsc_needed = intel_dp->force_dsc_en ||
+ joiner_needs_dsc = intel_dp_joiner_needs_dsc(dev_priv, pipe_config->bigjoiner_pipes);
+
+ dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
!intel_dp_mst_compute_config_limits(intel_dp,
connector,
pipe_config,
@@ -566,8 +598,8 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
/* enable compression if the mode doesn't fit available BW */
if (dsc_needed) {
- drm_dbg_kms(&dev_priv->drm, "Try DSC (fallback=%s, force=%s)\n",
- str_yes_no(ret),
+ drm_dbg_kms(&dev_priv->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
+ str_yes_no(ret), str_yes_no(joiner_needs_dsc),
str_yes_no(intel_dp->force_dsc_en));
if (!intel_dp_mst_dsc_source_support(pipe_config))
@@ -613,7 +645,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
pipe_config->lane_lat_optim_mask =
- bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
+ bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
@@ -954,6 +986,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
struct drm_dp_mst_atomic_payload *new_payload =
drm_atomic_get_mst_payload_state(new_mst_state, connector->port);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_crtc *pipe_crtc;
bool last_mst_stream;
intel_dp->active_mst_links--;
@@ -962,7 +995,13 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
DISPLAY_VER(dev_priv) >= 12 && last_mst_stream &&
!intel_dp_mst_is_master_trans(old_crtc_state));
- intel_crtc_vblank_off(old_crtc_state);
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(old_crtc_state)) {
+ const struct intel_crtc_state *old_pipe_crtc_state =
+ intel_atomic_get_old_crtc_state(state, pipe_crtc);
+
+ intel_crtc_vblank_off(old_pipe_crtc_state);
+ }
intel_disable_transcoder(old_crtc_state);
@@ -980,12 +1019,18 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
intel_ddi_disable_transcoder_func(old_crtc_state);
- intel_dsc_disable(old_crtc_state);
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(old_crtc_state)) {
+ const struct intel_crtc_state *old_pipe_crtc_state =
+ intel_atomic_get_old_crtc_state(state, pipe_crtc);
- if (DISPLAY_VER(dev_priv) >= 9)
- skl_scaler_disable(old_crtc_state);
- else
- ilk_pfit_disable(old_crtc_state);
+ intel_dsc_disable(old_pipe_crtc_state);
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ skl_scaler_disable(old_pipe_crtc_state);
+ else
+ ilk_pfit_disable(old_pipe_crtc_state);
+ }
/*
* Power down mst path before disabling the port, otherwise we end
@@ -1117,6 +1162,39 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_set_dp_msa(pipe_config, conn_state);
}
+static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ u32 clear = 0;
+ u32 set = 0;
+
+ if (!IS_ALDERLAKE_P(i915))
+ return;
+
+ if (!IS_DISPLAY_STEP(i915, STEP_D0, STEP_FOREVER))
+ return;
+
+ /* Wa_14013163432:adlp */
+ if (crtc_state->fec_enable || intel_dp_is_uhbr(crtc_state))
+ set |= DP_MST_FEC_BS_JITTER_WA(crtc_state->cpu_transcoder);
+
+ /* Wa_14014143976:adlp */
+ if (IS_DISPLAY_STEP(i915, STEP_E0, STEP_FOREVER)) {
+ if (intel_dp_is_uhbr(crtc_state))
+ set |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder);
+ else if (crtc_state->fec_enable)
+ clear |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder);
+
+ if (crtc_state->fec_enable || intel_dp_is_uhbr(crtc_state))
+ set |= DP_MST_DPT_DPTP_ALIGN_WA(crtc_state->cpu_transcoder);
+ }
+
+ if (!clear && !set)
+ return;
+
+ intel_de_rmw(i915, CHICKEN_MISC_3, clear, set);
+}
+
static void intel_mst_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
@@ -1131,6 +1209,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
enum transcoder trans = pipe_config->cpu_transcoder;
bool first_mst_stream = intel_dp->active_mst_links == 1;
+ struct intel_crtc *pipe_crtc;
drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
@@ -1145,6 +1224,8 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff));
}
+ enable_bs_jitter_was(pipe_config);
+
intel_ddi_enable_transcoder_func(encoder, pipe_config);
clear_act_sent(encoder, pipe_config);
@@ -1172,7 +1253,13 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
intel_enable_transcoder(pipe_config);
- intel_crtc_vblank_on(pipe_config);
+ for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(pipe_config)) {
+ const struct intel_crtc_state *pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
+
+ intel_crtc_vblank_on(pipe_crtc_state);
+ }
intel_hdcp_enable(state, encoder, pipe_config, conn_state);
}
@@ -1285,7 +1372,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
struct drm_dp_mst_port *port = intel_connector->port;
const int min_bpp = 18;
- int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
int max_rate, mode_rate, max_lanes, max_link_clock;
int ret;
bool dsc = false, bigjoiner = false;
@@ -1302,8 +1389,13 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
if (*status != MODE_OK)
return 0;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
- *status = MODE_NO_DBLESCAN;
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
+ *status = MODE_H_ILLEGAL;
+ return 0;
+ }
+
+ if (mode->clock < 10000) {
+ *status = MODE_CLOCK_LOW;
return 0;
}
@@ -1314,10 +1406,6 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(mode->clock, min_bpp);
- ret = drm_modeset_lock(&mgr->base.lock, ctx);
- if (ret)
- return ret;
-
/*
* TODO:
* - Also check if compression would allow for the mode
@@ -1330,32 +1418,23 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
* corresponding link capabilities of the sink) in case the
* stream is uncompressed for it by the last branch device.
*/
- if (mode_rate > max_rate || mode->clock > max_dotclk ||
- drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) {
- *status = MODE_CLOCK_HIGH;
- return 0;
- }
-
- if (mode->clock < 10000) {
- *status = MODE_CLOCK_LOW;
- return 0;
- }
-
- if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
- *status = MODE_H_ILLEGAL;
- return 0;
- }
-
- if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) {
+ if (intel_dp_need_bigjoiner(intel_dp, intel_connector,
+ mode->hdisplay, target_clock)) {
bigjoiner = true;
max_dotclk *= 2;
+ }
+
+ ret = drm_modeset_lock(&mgr->base.lock, ctx);
+ if (ret)
+ return ret;
- /* TODO: add support for bigjoiner */
+ if (mode_rate > max_rate || mode->clock > max_dotclk ||
+ drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) {
*status = MODE_CLOCK_HIGH;
return 0;
}
- if (DISPLAY_VER(dev_priv) >= 10 &&
+ if (HAS_DSC_MST(dev_priv) &&
drm_dp_sink_supports_dsc(intel_connector->dp.dsc_dpcd)) {
/*
* TBD pass the connector BPC,
@@ -1383,11 +1462,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
dsc = dsc_max_compressed_bpp && dsc_slice_count;
}
- /*
- * Big joiner configuration needs DSC for TGL which is not true for
- * XE_LPD where uncompressed joiner is supported.
- */
- if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc) {
+ if (intel_dp_joiner_needs_dsc(dev_priv, bigjoiner) && !dsc) {
*status = MODE_CLOCK_HIGH;
return 0;
}
@@ -1397,7 +1472,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
return 0;
}
- *status = intel_mode_valid_max_plane_size(dev_priv, mode, false);
+ *status = intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner);
return 0;
}
@@ -1509,24 +1584,41 @@ intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp,
static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *connector)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct drm_dp_aux *aux = connector->dp.dsc_decompression_aux;
struct drm_dp_desc desc;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
- if (!connector->dp.dsc_decompression_aux)
+ if (!aux)
return false;
- if (drm_dp_read_desc(connector->dp.dsc_decompression_aux,
- &desc, true) < 0)
+ /*
+ * A logical port's OUI (at least for affected sinks) is all 0, so
+ * instead of that the parent port's OUI is used for identification.
+ */
+ if (drm_dp_mst_port_is_logical(connector->port)) {
+ aux = drm_dp_mst_aux_for_parent(connector->port);
+ if (!aux)
+ aux = &connector->mst_port->aux;
+ }
+
+ if (drm_dp_read_dpcd_caps(aux, dpcd) < 0)
return false;
- if (!drm_dp_has_quirk(&desc,
- DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC))
+ if (drm_dp_read_desc(aux, &desc, drm_dp_is_branch(dpcd)) < 0)
return false;
- if (drm_dp_read_dpcd_caps(connector->dp.dsc_decompression_aux, dpcd) < 0)
+ if (!drm_dp_has_quirk(&desc,
+ DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC))
return false;
- if (!(dpcd[DP_RECEIVE_PORT_0_CAP_0] & DP_HBLANK_EXPANSION_CAPABLE))
+ /*
+ * UHBR (MST sink) devices requiring this quirk don't advertise the
+ * HBLANK expansion support. Presuming that they perform HBLANK
+ * expansion internally, or are affected by this issue on modes with a
+ * short HBLANK for other reasons.
+ */
+ if (!drm_dp_128b132b_supported(dpcd) &&
+ !(dpcd[DP_RECEIVE_PORT_0_CAP_0] & DP_HBLANK_EXPANSION_CAPABLE))
return false;
drm_dbg_kms(&i915->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
index 75d76f91ecbd..6503abdc2b98 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
@@ -348,7 +348,7 @@ void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
out_err:
drm_dbg_kms(&i915->drm,
- "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Tunnel can't be resumed, will drop and redect it (err %pe)\n",
+ "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Tunnel can't be resumed, will drop and reject it (err %pe)\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 4ca910874a4f..d20e4e9cf7f7 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include "bxt_dpio_phy_regs.h"
#include "i915_reg.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
@@ -29,6 +30,7 @@
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
+#include "vlv_dpio_phy_regs.h"
#include "vlv_sideband.h"
/**
@@ -123,9 +125,9 @@
*/
/**
- * struct bxt_ddi_phy_info - Hold info for a broxton DDI phy
+ * struct bxt_dpio_phy_info - Hold info for a broxton DDI phy
*/
-struct bxt_ddi_phy_info {
+struct bxt_dpio_phy_info {
/**
* @dual_channel: true if this phy has a second channel.
*/
@@ -161,7 +163,7 @@ struct bxt_ddi_phy_info {
} channel[2];
};
-static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
+static const struct bxt_dpio_phy_info bxt_dpio_phy_info[] = {
[DPIO_PHY0] = {
.dual_channel = true,
.rcomp_phy = DPIO_PHY1,
@@ -183,7 +185,7 @@ static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
},
};
-static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
+static const struct bxt_dpio_phy_info glk_dpio_phy_info[] = {
[DPIO_PHY0] = {
.dual_channel = false,
.rcomp_phy = DPIO_PHY1,
@@ -216,23 +218,23 @@ static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
},
};
-static const struct bxt_ddi_phy_info *
+static const struct bxt_dpio_phy_info *
bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
{
if (IS_GEMINILAKE(dev_priv)) {
- *count = ARRAY_SIZE(glk_ddi_phy_info);
- return glk_ddi_phy_info;
+ *count = ARRAY_SIZE(glk_dpio_phy_info);
+ return glk_dpio_phy_info;
} else {
- *count = ARRAY_SIZE(bxt_ddi_phy_info);
- return bxt_ddi_phy_info;
+ *count = ARRAY_SIZE(bxt_dpio_phy_info);
+ return bxt_dpio_phy_info;
}
}
-static const struct bxt_ddi_phy_info *
+static const struct bxt_dpio_phy_info *
bxt_get_phy_info(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
int count;
- const struct bxt_ddi_phy_info *phy_list =
+ const struct bxt_dpio_phy_info *phy_list =
bxt_get_phy_list(dev_priv, &count);
return &phy_list[phy];
@@ -241,7 +243,7 @@ bxt_get_phy_info(struct drm_i915_private *dev_priv, enum dpio_phy phy)
void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
enum dpio_phy *phy, enum dpio_channel *ch)
{
- const struct bxt_ddi_phy_info *phy_info, *phys;
+ const struct bxt_dpio_phy_info *phy_info, *phys;
int i, count;
phys = bxt_get_phy_list(dev_priv, &count);
@@ -269,16 +271,32 @@ void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
*ch = DPIO_CH0;
}
-void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+/*
+ * Like intel_de_rmw() but reads from a single per-lane register and
+ * writes to the group register to write the same value to all the lanes.
+ */
+static u32 bxt_dpio_phy_rmw_grp(struct drm_i915_private *i915,
+ i915_reg_t reg_single,
+ i915_reg_t reg_group,
+ u32 clear, u32 set)
+{
+ u32 old, val;
+
+ old = intel_de_read(i915, reg_single);
+ val = (old & ~clear) | set;
+ intel_de_write(i915, reg_group, val);
+
+ return old;
+}
+
+void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int level = intel_ddi_level(encoder, crtc_state, 0);
const struct intel_ddi_buf_trans *trans;
enum dpio_channel ch;
enum dpio_phy phy;
- int n_entries;
- u32 val;
+ int lane, n_entries;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
@@ -290,41 +308,51 @@ void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder,
* While we write to the group register to program all lanes at once we
* can read only lane registers and we pick lanes 0/1 for that.
*/
- val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch));
- val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
- intel_de_write(dev_priv, BXT_PORT_PCS_DW10_GRP(phy, ch), val);
-
- val = intel_de_read(dev_priv, BXT_PORT_TX_DW2_LN0(phy, ch));
- val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
- val |= trans->entries[level].bxt.margin << MARGIN_000_SHIFT |
- trans->entries[level].bxt.scale << UNIQ_TRANS_SCALE_SHIFT;
- intel_de_write(dev_priv, BXT_PORT_TX_DW2_GRP(phy, ch), val);
-
- val = intel_de_read(dev_priv, BXT_PORT_TX_DW3_LN0(phy, ch));
- val &= ~SCALE_DCOMP_METHOD;
- if (trans->entries[level].bxt.enable)
- val |= SCALE_DCOMP_METHOD;
-
- if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
- drm_err(&dev_priv->drm,
- "Disabled scaling while ouniqetrangenmethod was set");
-
- intel_de_write(dev_priv, BXT_PORT_TX_DW3_GRP(phy, ch), val);
-
- val = intel_de_read(dev_priv, BXT_PORT_TX_DW4_LN0(phy, ch));
- val &= ~DE_EMPHASIS;
- val |= trans->entries[level].bxt.deemphasis << DEEMPH_SHIFT;
- intel_de_write(dev_priv, BXT_PORT_TX_DW4_GRP(phy, ch), val);
-
- val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch));
- val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
- intel_de_write(dev_priv, BXT_PORT_PCS_DW10_GRP(phy, ch), val);
+ bxt_dpio_phy_rmw_grp(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch),
+ BXT_PORT_PCS_DW10_GRP(phy, ch),
+ TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT, 0);
+
+ for (lane = 0; lane < crtc_state->lane_count; lane++) {
+ int level = intel_ddi_level(encoder, crtc_state, lane);
+
+ intel_de_rmw(dev_priv, BXT_PORT_TX_DW2_LN(phy, ch, lane),
+ MARGIN_000_MASK | UNIQ_TRANS_SCALE_MASK,
+ MARGIN_000(trans->entries[level].bxt.margin) |
+ UNIQ_TRANS_SCALE(trans->entries[level].bxt.scale));
+ }
+
+ for (lane = 0; lane < crtc_state->lane_count; lane++) {
+ int level = intel_ddi_level(encoder, crtc_state, lane);
+ u32 val;
+
+ intel_de_rmw(dev_priv, BXT_PORT_TX_DW3_LN(phy, ch, lane),
+ SCALE_DCOMP_METHOD,
+ trans->entries[level].bxt.enable ?
+ SCALE_DCOMP_METHOD : 0);
+
+ val = intel_de_read(dev_priv, BXT_PORT_TX_DW3_LN(phy, ch, lane));
+ if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
+ drm_err(&dev_priv->drm,
+ "Disabled scaling while ouniqetrangenmethod was set");
+ }
+
+ for (lane = 0; lane < crtc_state->lane_count; lane++) {
+ int level = intel_ddi_level(encoder, crtc_state, lane);
+
+ intel_de_rmw(dev_priv, BXT_PORT_TX_DW4_LN(phy, ch, lane),
+ DE_EMPHASIS_MASK,
+ DE_EMPHASIS(trans->entries[level].bxt.deemphasis));
+ }
+
+ bxt_dpio_phy_rmw_grp(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch),
+ BXT_PORT_PCS_DW10_GRP(phy, ch),
+ 0, TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
}
-bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+bool bxt_dpio_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
{
- const struct bxt_ddi_phy_info *phy_info;
+ const struct bxt_dpio_phy_info *phy_info;
phy_info = bxt_get_phy_info(dev_priv, phy);
@@ -353,7 +381,7 @@ static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
u32 val = intel_de_read(dev_priv, BXT_PORT_REF_DW6(phy));
- return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
+ return REG_FIELD_GET(GRC_CODE_MASK, val);
}
static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
@@ -365,20 +393,20 @@ static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
phy);
}
-static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+static void _bxt_dpio_phy_init(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
{
- const struct bxt_ddi_phy_info *phy_info;
+ const struct bxt_dpio_phy_info *phy_info;
u32 val;
phy_info = bxt_get_phy_info(dev_priv, phy);
- if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
+ if (bxt_dpio_phy_is_enabled(dev_priv, phy)) {
/* Still read out the GRC value for state verification */
if (phy_info->rcomp_phy != -1)
dev_priv->display.state.bxt_phy_grc = bxt_get_grc(dev_priv, phy);
- if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
+ if (bxt_dpio_phy_verify_state(dev_priv, phy)) {
drm_dbg(&dev_priv->drm, "DDI PHY %d already enabled, "
"won't reprogram it\n", phy);
return;
@@ -399,20 +427,17 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
* The flag should get set in 100us according to the HW team, but
* use 1ms due to occasional timeouts observed with that.
*/
- if (intel_wait_for_register_fw(&dev_priv->uncore,
- BXT_PORT_CL1CM_DW0(phy),
- PHY_RESERVED | PHY_POWER_GOOD,
- PHY_POWER_GOOD,
- 1))
+ if (intel_de_wait_fw(dev_priv, BXT_PORT_CL1CM_DW0(phy),
+ PHY_RESERVED | PHY_POWER_GOOD, PHY_POWER_GOOD, 1))
drm_err(&dev_priv->drm, "timeout during PHY%d power on\n",
phy);
/* Program PLL Rcomp code offset */
- intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW9(phy), IREF0RC_OFFSET_MASK,
- 0xE4 << IREF0RC_OFFSET_SHIFT);
+ intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW9(phy),
+ IREF0RC_OFFSET_MASK, IREF0RC_OFFSET(0xE4));
- intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW10(phy), IREF1RC_OFFSET_MASK,
- 0xE4 << IREF1RC_OFFSET_SHIFT);
+ intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW10(phy),
+ IREF1RC_OFFSET_MASK, IREF1RC_OFFSET(0xE4));
/* Program power gating */
intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW28(phy), 0,
@@ -435,9 +460,9 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
val = bxt_get_grc(dev_priv, phy_info->rcomp_phy);
dev_priv->display.state.bxt_phy_grc = val;
- grc_code = val << GRC_CODE_FAST_SHIFT |
- val << GRC_CODE_SLOW_SHIFT |
- val;
+ grc_code = GRC_CODE_FAST(val) |
+ GRC_CODE_SLOW(val) |
+ GRC_CODE_NOM(val);
intel_de_write(dev_priv, BXT_PORT_REF_DW6(phy), grc_code);
intel_de_rmw(dev_priv, BXT_PORT_REF_DW8(phy),
0, GRC_DIS | GRC_RDY_OVRD);
@@ -449,9 +474,9 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), 0, COMMON_RESET_DIS);
}
-void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+void bxt_dpio_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
- const struct bxt_ddi_phy_info *phy_info;
+ const struct bxt_dpio_phy_info *phy_info;
phy_info = bxt_get_phy_info(dev_priv, phy);
@@ -460,9 +485,9 @@ void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, phy_info->pwron_mask, 0);
}
-void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+void bxt_dpio_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
- const struct bxt_ddi_phy_info *phy_info =
+ const struct bxt_dpio_phy_info *phy_info =
bxt_get_phy_info(dev_priv, phy);
enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
bool was_enabled;
@@ -471,19 +496,19 @@ void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
was_enabled = true;
if (rcomp_phy != -1)
- was_enabled = bxt_ddi_phy_is_enabled(dev_priv, rcomp_phy);
+ was_enabled = bxt_dpio_phy_is_enabled(dev_priv, rcomp_phy);
/*
* We need to copy the GRC calibration value from rcomp_phy,
* so make sure it's powered up.
*/
if (!was_enabled)
- _bxt_ddi_phy_init(dev_priv, rcomp_phy);
+ _bxt_dpio_phy_init(dev_priv, rcomp_phy);
- _bxt_ddi_phy_init(dev_priv, phy);
+ _bxt_dpio_phy_init(dev_priv, phy);
if (!was_enabled)
- bxt_ddi_phy_uninit(dev_priv, rcomp_phy);
+ bxt_dpio_phy_uninit(dev_priv, rcomp_phy);
}
static bool __printf(6, 7)
@@ -513,10 +538,10 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
return false;
}
-bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+bool bxt_dpio_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
{
- const struct bxt_ddi_phy_info *phy_info;
+ const struct bxt_dpio_phy_info *phy_info;
u32 mask;
bool ok;
@@ -526,23 +551,23 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
__phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
## __VA_ARGS__)
- if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
+ if (!bxt_dpio_phy_is_enabled(dev_priv, phy))
return false;
ok = true;
/* PLL Rcomp code offset */
ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
- IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
- "BXT_PORT_CL1CM_DW9(%d)", phy);
+ IREF0RC_OFFSET_MASK, IREF0RC_OFFSET(0xe4),
+ "BXT_PORT_CL1CM_DW9(%d)", phy);
ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
- IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
- "BXT_PORT_CL1CM_DW10(%d)", phy);
+ IREF1RC_OFFSET_MASK, IREF1RC_OFFSET(0xe4),
+ "BXT_PORT_CL1CM_DW10(%d)", phy);
/* Power gating */
mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
- "BXT_PORT_CL1CM_DW28(%d)", phy);
+ "BXT_PORT_CL1CM_DW28(%d)", phy);
if (phy_info->dual_channel)
ok &= _CHK(BXT_PORT_CL2CM_DW6(phy),
@@ -552,9 +577,9 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
if (phy_info->rcomp_phy != -1) {
u32 grc_code = dev_priv->display.state.bxt_phy_grc;
- grc_code = grc_code << GRC_CODE_FAST_SHIFT |
- grc_code << GRC_CODE_SLOW_SHIFT |
- grc_code;
+ grc_code = GRC_CODE_FAST(grc_code) |
+ GRC_CODE_SLOW(grc_code) |
+ GRC_CODE_NOM(grc_code);
mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
GRC_CODE_NOM_MASK;
ok &= _CHK(BXT_PORT_REF_DW6(phy), mask, grc_code,
@@ -562,7 +587,7 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
mask = GRC_DIS | GRC_RDY_OVRD;
ok &= _CHK(BXT_PORT_REF_DW8(phy), mask, mask,
- "BXT_PORT_REF_DW8(%d)", phy);
+ "BXT_PORT_REF_DW8(%d)", phy);
}
return ok;
@@ -570,7 +595,7 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
}
u8
-bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count)
+bxt_dpio_phy_calc_lane_lat_optim_mask(u8 lane_count)
{
switch (lane_count) {
case 1:
@@ -586,8 +611,8 @@ bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count)
}
}
-void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
- u8 lane_lat_optim_mask)
+void bxt_dpio_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+ u8 lane_lat_optim_mask)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
@@ -598,24 +623,18 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
for (lane = 0; lane < 4; lane++) {
- u32 val = intel_de_read(dev_priv,
- BXT_PORT_TX_DW14_LN(phy, ch, lane));
-
/*
* Note that on CHV this flag is called UPAR, but has
* the same function.
*/
- val &= ~LATENCY_OPTIM;
- if (lane_lat_optim_mask & BIT(lane))
- val |= LATENCY_OPTIM;
-
- intel_de_write(dev_priv, BXT_PORT_TX_DW14_LN(phy, ch, lane),
- val);
+ intel_de_rmw(dev_priv, BXT_PORT_TX_DW14_LN(phy, ch, lane),
+ LATENCY_OPTIM,
+ lane_lat_optim_mask & BIT(lane) ? LATENCY_OPTIM : 0);
}
}
u8
-bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
+bxt_dpio_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
@@ -701,9 +720,8 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
- enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
u32 val;
int i;
@@ -740,7 +758,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
for (i = 0; i < crtc_state->lane_count; i++) {
val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW4(ch, i));
val &= ~DPIO_SWING_DEEMPH9P5_MASK;
- val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
+ val |= DPIO_SWING_DEEMPH9P5(deemph_reg_value);
vlv_dpio_write(dev_priv, phy, CHV_TX_DW4(ch, i), val);
}
@@ -749,15 +767,15 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW2(ch, i));
val &= ~DPIO_SWING_MARGIN000_MASK;
- val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
+ val |= DPIO_SWING_MARGIN000(margin_reg_value);
/*
* Supposedly this value shouldn't matter when unique transition
* scale is disabled, but in fact it does matter. Let's just
* always program the same value and hope it's OK.
*/
- val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
- val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
+ val &= ~DPIO_UNIQ_TRANS_SCALE_MASK;
+ val |= DPIO_UNIQ_TRANS_SCALE(0x9a);
vlv_dpio_write(dev_priv, phy, CHV_TX_DW2(ch, i), val);
}
@@ -796,9 +814,9 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
bool reset)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
- enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
+ enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
u32 val;
val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW0(ch));
@@ -843,7 +861,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
- enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
enum pipe pipe = crtc->pipe;
unsigned int lane_mask =
intel_dp_unused_lane_mask(crtc_state->lane_count);
@@ -866,39 +884,39 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
/* program left/right clock distribution */
if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW5_CH0);
+ val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA1_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA1_FORCE;
- vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW5_CH0, val);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW5_CH0, val);
} else {
- val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW1_CH1);
+ val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA2_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA2_FORCE;
- vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW1_CH1, val);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW1_CH1, val);
}
/* program clock channel usage */
val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(ch));
- val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
- if (pipe != PIPE_B)
- val &= ~CHV_PCS_USEDCLKCHANNEL;
+ val |= DPIO_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe == PIPE_B)
+ val |= DPIO_PCS_USEDCLKCHANNEL;
else
- val |= CHV_PCS_USEDCLKCHANNEL;
+ val &= ~DPIO_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW8(ch), val);
if (crtc_state->lane_count > 2) {
val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW8(ch));
- val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
- if (pipe != PIPE_B)
- val &= ~CHV_PCS_USEDCLKCHANNEL;
+ val |= DPIO_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe == PIPE_B)
+ val |= DPIO_PCS_USEDCLKCHANNEL;
else
- val |= CHV_PCS_USEDCLKCHANNEL;
+ val &= ~DPIO_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW8(ch), val);
}
@@ -908,10 +926,10 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
* pick the CL based on the port.
*/
val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW19(ch));
- if (pipe != PIPE_B)
- val &= ~CHV_CMN_USEDCLKCHANNEL;
- else
+ if (pipe == PIPE_B)
val |= CHV_CMN_USEDCLKCHANNEL;
+ else
+ val &= ~CHV_CMN_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, phy, CHV_CMN_DW19(ch), val);
vlv_dpio_put(dev_priv);
@@ -923,9 +941,8 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
- enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
int data, i, stagger;
u32 val;
@@ -946,11 +963,10 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
for (i = 0; i < crtc_state->lane_count; i++) {
/* Set the upar bit */
if (crtc_state->lane_count == 1)
- data = 0x0;
+ data = 0;
else
- data = (i == 1) ? 0x0 : 0x1;
- vlv_dpio_write(dev_priv, phy, CHV_TX_DW14(ch, i),
- data << DPIO_UPAR_SHIFT);
+ data = (i == 1) ? 0 : DPIO_UPAR;
+ vlv_dpio_write(dev_priv, phy, CHV_TX_DW14(ch, i), data);
}
/* Data lane stagger programming */
@@ -1012,21 +1028,21 @@ void chv_phy_post_pll_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
enum pipe pipe = to_intel_crtc(old_crtc_state->uapi.crtc)->pipe;
- enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
vlv_dpio_get(dev_priv);
/* disable left/right clock distribution */
if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW5_CH0);
+ val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
- vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW5_CH0, val);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW5_CH0, val);
} else {
- val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW1_CH1);
+ val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
- vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW1_CH1, val);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW1_CH1, val);
}
vlv_dpio_put(dev_priv);
@@ -1050,24 +1066,23 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
- enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
+ enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
vlv_dpio_get(dev_priv);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW5(port), 0x00000000);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW4(port), demph_reg_value);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW2(port),
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW5_GRP(ch), 0x00000000);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW4_GRP(ch), demph_reg_value);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW2_GRP(ch),
uniqtranscale_reg_value);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW3(port), 0x0C782040);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW3_GRP(ch), 0x0C782040);
if (tx3_demph)
- vlv_dpio_write(dev_priv, phy, VLV_TX3_DW4(port), tx3_demph);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW4(ch, 3), tx3_demph);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW11(port), 0x00030000);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW9(port), preemph_reg_value);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW11_GRP(ch), 0x00030000);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW9_GRP(ch), preemph_reg_value);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW5_GRP(ch), DPIO_TX_OCALINIT_EN);
vlv_dpio_put(dev_priv);
}
@@ -1077,26 +1092,25 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
- enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
+ enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
/* Program Tx lane resets to default */
vlv_dpio_get(dev_priv);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0(port),
- DPIO_PCS_TX_LANE2_RESET |
- DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1(port),
- DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
- DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
- (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
- DPIO_PCS_CLK_SOFT_RESET);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0_GRP(ch),
+ DPIO_PCS_TX_LANE2_RESET |
+ DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1_GRP(ch),
+ DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
+ DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
+ DPIO_PCS_CLK_DATAWIDTH_8_10 |
+ DPIO_PCS_CLK_SOFT_RESET);
/* Fix up inter-pair skew failure */
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW12(port), 0x00750f00);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW11(port), 0x00001500);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW14(port), 0x40400000);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW12_GRP(ch), 0x00750f00);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW11_GRP(ch), 0x00001500);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW14_GRP(ch), 0x40400000);
vlv_dpio_put(dev_priv);
}
@@ -1108,26 +1122,23 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
+ enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
+ enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
enum pipe pipe = crtc->pipe;
- enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
vlv_dpio_get(dev_priv);
/* Enable clock channels for this port */
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(port));
- val = 0;
- if (pipe)
- val |= (1<<21);
- else
- val &= ~(1<<21);
- val |= 0x001000c4;
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW8(port), val);
+ val = DPIO_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe == PIPE_B)
+ val |= DPIO_PCS_USEDCLKCHANNEL;
+ val |= 0xc4;
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW8_GRP(ch), val);
/* Program lane clock */
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW14(port), 0x00760018);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW23(port), 0x00400888);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW14_GRP(ch), 0x00760018);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW23_GRP(ch), 0x00400888);
vlv_dpio_put(dev_priv);
}
@@ -1137,12 +1148,11 @@ void vlv_phy_reset_lanes(struct intel_encoder *encoder,
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
- enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
+ enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
vlv_dpio_get(dev_priv);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0(port), 0x00000000);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1(port), 0x00e00060);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0_GRP(ch), 0x00000000);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1_GRP(ch), 0x00e00060);
vlv_dpio_put(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.h b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
index 9adc4e8c1738..226994dcb89b 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
@@ -29,18 +29,18 @@ enum dpio_phy {
#ifdef I915
void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
enum dpio_phy *phy, enum dpio_channel *ch);
-void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
-void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
- enum dpio_phy phy);
-bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
- enum dpio_phy phy);
-u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count);
-void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
- u8 lane_lat_optim_mask);
-u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
+void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void bxt_dpio_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+void bxt_dpio_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+bool bxt_dpio_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
+bool bxt_dpio_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
+u8 bxt_dpio_phy_calc_lane_lat_optim_mask(u8 lane_count);
+void bxt_dpio_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+ u8 lane_lat_optim_mask);
+u8 bxt_dpio_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
enum dpio_channel vlv_dig_port_to_channel(struct intel_digital_port *dig_port);
enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_port);
@@ -77,35 +77,35 @@ static inline void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, en
enum dpio_phy *phy, enum dpio_channel *ch)
{
}
-static inline void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static inline void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
}
-static inline void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+static inline void bxt_dpio_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
}
-static inline void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+static inline void bxt_dpio_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
}
-static inline bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+static inline bool bxt_dpio_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
{
return false;
}
-static inline bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+static inline bool bxt_dpio_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
{
return true;
}
-static inline u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count)
+static inline u8 bxt_dpio_phy_calc_lane_lat_optim_mask(u8 lane_count)
{
return 0;
}
-static inline void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
- u8 lane_lat_optim_mask)
+static inline void bxt_dpio_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+ u8 lane_lat_optim_mask)
{
}
-static inline u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
+static inline u8 bxt_dpio_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
{
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 3038655377ea..a981f45facb3 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -20,6 +20,7 @@
#include "intel_panel.h"
#include "intel_pps.h"
#include "intel_snps_phy.h"
+#include "vlv_dpio_phy_regs.h"
#include "vlv_sideband.h"
struct intel_dpll_funcs {
@@ -369,38 +370,68 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
return clock->dot;
}
-static int i9xx_pll_refclk(struct drm_device *dev,
- const struct intel_crtc_state *pipe_config)
+static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- u32 dpll = pipe_config->dpll_hw_state.dpll;
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
- if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
- return dev_priv->display.vbt.lvds_ssc_freq;
- else if (HAS_PCH_SPLIT(dev_priv))
+ if ((hw_state->dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
+ return i915->display.vbt.lvds_ssc_freq;
+ else if (HAS_PCH_SPLIT(i915))
return 120000;
- else if (DISPLAY_VER(dev_priv) != 2)
+ else if (DISPLAY_VER(i915) != 2)
return 96000;
else
return 48000;
}
+void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
+ struct intel_dpll_hw_state *dpll_hw_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
+
+ if (DISPLAY_VER(dev_priv) >= 4) {
+ u32 tmp;
+
+ /* No way to read it out on pipes B and C */
+ if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
+ tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe];
+ else
+ tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
+
+ hw_state->dpll_md = tmp;
+ }
+
+ hw_state->dpll = intel_de_read(dev_priv, DPLL(crtc->pipe));
+
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
+ hw_state->fp0 = intel_de_read(dev_priv, FP0(crtc->pipe));
+ hw_state->fp1 = intel_de_read(dev_priv, FP1(crtc->pipe));
+ } else {
+ /* Mask out read-only status bits. */
+ hw_state->dpll &= ~(DPLL_LOCK_VLV |
+ DPLL_PORTC_READY_MASK |
+ DPLL_PORTB_READY_MASK);
+ }
+}
+
/* Returns the clock of the currently programmed mode of the given pipe. */
-void i9xx_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- u32 dpll = pipe_config->dpll_hw_state.dpll;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
+ u32 dpll = hw_state->dpll;
u32 fp;
struct dpll clock;
int port_clock;
- int refclk = i9xx_pll_refclk(dev, pipe_config);
+ int refclk = i9xx_pll_refclk(crtc_state);
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = pipe_config->dpll_hw_state.fp0;
+ fp = hw_state->fp0;
else
- fp = pipe_config->dpll_hw_state.fp1;
+ fp = hw_state->fp1;
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
if (IS_PINEVIEW(dev_priv)) {
@@ -475,68 +506,69 @@ void i9xx_crtc_clock_get(struct intel_crtc *crtc,
* port_clock to compute adjusted_mode.crtc_clock in the
* encoder's get_config() function.
*/
- pipe_config->port_clock = port_clock;
+ crtc_state->port_clock = port_clock;
}
-void vlv_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
- struct dpll clock;
- u32 mdiv;
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
int refclk = 100000;
+ struct dpll clock;
+ u32 tmp;
/* In case of DSI, DPLL will not be used */
- if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+ if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
return;
vlv_dpio_get(dev_priv);
- mdiv = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW3(crtc->pipe));
+ tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW3(ch));
vlv_dpio_put(dev_priv);
- clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
- clock.m2 = mdiv & DPIO_M2DIV_MASK;
- clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
- clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
- clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
+ clock.m1 = REG_FIELD_GET(DPIO_M1_DIV_MASK, tmp);
+ clock.m2 = REG_FIELD_GET(DPIO_M2_DIV_MASK, tmp);
+ clock.n = REG_FIELD_GET(DPIO_N_DIV_MASK, tmp);
+ clock.p1 = REG_FIELD_GET(DPIO_P1_DIV_MASK, tmp);
+ clock.p2 = REG_FIELD_GET(DPIO_P2_DIV_MASK, tmp);
- pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
+ crtc_state->port_clock = vlv_calc_dpll_params(refclk, &clock);
}
-void chv_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+void chv_crtc_clock_get(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum dpio_channel port = vlv_pipe_to_channel(crtc->pipe);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
struct dpll clock;
u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
int refclk = 100000;
/* In case of DSI, DPLL will not be used */
- if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+ if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
return;
vlv_dpio_get(dev_priv);
- cmn_dw13 = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW13(port));
- pll_dw0 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW0(port));
- pll_dw1 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW1(port));
- pll_dw2 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW2(port));
- pll_dw3 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(port));
+ cmn_dw13 = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW13(ch));
+ pll_dw0 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW0(ch));
+ pll_dw1 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW1(ch));
+ pll_dw2 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW2(ch));
+ pll_dw3 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch));
vlv_dpio_put(dev_priv);
- clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
- clock.m2 = (pll_dw0 & 0xff) << 22;
+ clock.m1 = REG_FIELD_GET(DPIO_CHV_M1_DIV_MASK, pll_dw1) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
+ clock.m2 = REG_FIELD_GET(DPIO_CHV_M2_DIV_MASK, pll_dw0) << 22;
if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
- clock.m2 |= pll_dw2 & 0x3fffff;
- clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
- clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
- clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
+ clock.m2 |= REG_FIELD_GET(DPIO_CHV_M2_FRAC_DIV_MASK, pll_dw2);
+ clock.n = REG_FIELD_GET(DPIO_CHV_N_DIV_MASK, pll_dw1);
+ clock.p1 = REG_FIELD_GET(DPIO_CHV_P1_DIV_MASK, cmn_dw13);
+ clock.p2 = REG_FIELD_GET(DPIO_CHV_P2_DIV_MASK, cmn_dw13);
- pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
+ crtc_state->port_clock = chv_calc_dpll_params(refclk, &clock);
}
/*
@@ -958,37 +990,20 @@ static u32 pnv_dpll_compute_fp(const struct dpll *dpll)
return (1 << dpll->n) << 16 | dpll->m2;
}
-static void i9xx_update_pll_dividers(struct intel_crtc_state *crtc_state,
- const struct dpll *clock,
- const struct dpll *reduced_clock)
+static u32 i965_dpll_md(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 fp, fp2;
-
- if (IS_PINEVIEW(dev_priv)) {
- fp = pnv_dpll_compute_fp(clock);
- fp2 = pnv_dpll_compute_fp(reduced_clock);
- } else {
- fp = i9xx_dpll_compute_fp(clock);
- fp2 = i9xx_dpll_compute_fp(reduced_clock);
- }
-
- crtc_state->dpll_hw_state.fp0 = fp;
- crtc_state->dpll_hw_state.fp1 = fp2;
+ return (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
}
-static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
- const struct dpll *clock,
- const struct dpll *reduced_clock)
+static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
- i9xx_update_pll_dividers(crtc_state, clock, reduced_clock);
-
- dpll = DPLL_VGA_MODE_DIS;
+ dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
dpll |= DPLLB_MODE_LVDS;
@@ -1047,27 +1062,40 @@ static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
else
dpll |= PLL_REF_INPUT_DREFCLK;
- dpll |= DPLL_VCO_ENABLE;
- crtc_state->dpll_hw_state.dpll = dpll;
-
- if (DISPLAY_VER(dev_priv) >= 4) {
- u32 dpll_md = (crtc_state->pixel_multiplier - 1)
- << DPLL_MD_UDI_MULTIPLIER_SHIFT;
- crtc_state->dpll_hw_state.dpll_md = dpll_md;
- }
+ return dpll;
}
-static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
+static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
const struct dpll *clock,
const struct dpll *reduced_clock)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 dpll;
+ struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
- i9xx_update_pll_dividers(crtc_state, clock, reduced_clock);
+ if (IS_PINEVIEW(dev_priv)) {
+ hw_state->fp0 = pnv_dpll_compute_fp(clock);
+ hw_state->fp1 = pnv_dpll_compute_fp(reduced_clock);
+ } else {
+ hw_state->fp0 = i9xx_dpll_compute_fp(clock);
+ hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock);
+ }
+
+ hw_state->dpll = i9xx_dpll(crtc_state, clock, reduced_clock);
+
+ if (DISPLAY_VER(dev_priv) >= 4)
+ hw_state->dpll_md = i965_dpll_md(crtc_state);
+}
- dpll = DPLL_VGA_MODE_DIS;
+static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dpll;
+
+ dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
@@ -1104,8 +1132,19 @@ static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
else
dpll |= PLL_REF_INPUT_DREFCLK;
- dpll |= DPLL_VCO_ENABLE;
- crtc_state->dpll_hw_state.dpll = dpll;
+ return dpll;
+}
+
+static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
+{
+ struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
+
+ hw_state->fp0 = i9xx_dpll_compute_fp(clock);
+ hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock);
+
+ hw_state->dpll = i8xx_dpll(crtc_state, clock, reduced_clock);
}
static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
@@ -1185,62 +1224,54 @@ static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
return ret;
/* TODO: Do the readback via intel_compute_shared_dplls() */
- crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->cx0pll_state);
+ crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->dpll_hw_state.cx0pll);
crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
return 0;
}
+static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ ((intel_panel_use_ssc(i915) && i915->display.vbt.lvds_ssc_freq == 100000) ||
+ (HAS_PCH_IBX(i915) && intel_is_dual_link_lvds(i915))))
+ return 25;
+
+ if (crtc_state->sdvo_tv_clock)
+ return 20;
+
+ return 21;
+}
+
static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor)
{
return dpll->m < factor * dpll->n;
}
-static void ilk_update_pll_dividers(struct intel_crtc_state *crtc_state,
- const struct dpll *clock,
- const struct dpll *reduced_clock)
+static u32 ilk_dpll_compute_fp(const struct dpll *clock, int factor)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 fp, fp2;
- int factor;
-
- /* Enable autotuning of the PLL clock (if permissible) */
- factor = 21;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if ((intel_panel_use_ssc(dev_priv) &&
- dev_priv->display.vbt.lvds_ssc_freq == 100000) ||
- (HAS_PCH_IBX(dev_priv) &&
- intel_is_dual_link_lvds(dev_priv)))
- factor = 25;
- } else if (crtc_state->sdvo_tv_clock) {
- factor = 20;
- }
+ u32 fp;
fp = i9xx_dpll_compute_fp(clock);
if (ilk_needs_fb_cb_tune(clock, factor))
fp |= FP_CB_TUNE;
- fp2 = i9xx_dpll_compute_fp(reduced_clock);
- if (ilk_needs_fb_cb_tune(reduced_clock, factor))
- fp2 |= FP_CB_TUNE;
-
- crtc_state->dpll_hw_state.fp0 = fp;
- crtc_state->dpll_hw_state.fp1 = fp2;
+ return fp;
}
-static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
- const struct dpll *clock,
- const struct dpll *reduced_clock)
+static u32 ilk_dpll(const struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
- ilk_update_pll_dividers(crtc_state, clock, reduced_clock);
-
- dpll = 0;
+ dpll = DPLL_VCO_ENABLE;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
dpll |= DPLLB_MODE_LVDS;
@@ -1302,9 +1333,20 @@ static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
else
dpll |= PLL_REF_INPUT_DREFCLK;
- dpll |= DPLL_VCO_ENABLE;
+ return dpll;
+}
+
+static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
+{
+ struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
+ int factor = ilk_fb_cb_factor(crtc_state);
+
+ hw_state->fp0 = ilk_dpll_compute_fp(clock, factor);
+ hw_state->fp1 = ilk_dpll_compute_fp(reduced_clock, factor);
- crtc_state->dpll_hw_state.dpll = dpll;
+ hw_state->dpll = ilk_dpll(crtc_state, clock, reduced_clock);
}
static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
@@ -1377,39 +1419,56 @@ static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state,
return intel_reserve_shared_dplls(state, crtc, NULL);
}
-void vlv_compute_dpll(struct intel_crtc_state *crtc_state)
+static u32 vlv_dpll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ u32 dpll;
- crtc_state->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
+ dpll = DPLL_INTEGRATED_REF_CLK_VLV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+
if (crtc->pipe != PIPE_A)
- crtc_state->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+ dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
/* DPLL not used with DSI, but still need the rest set up */
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
- crtc_state->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
- DPLL_EXT_BUFFER_ENABLE_VLV;
+ dpll |= DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV;
- crtc_state->dpll_hw_state.dpll_md =
- (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ return dpll;
}
-void chv_compute_dpll(struct intel_crtc_state *crtc_state)
+void vlv_compute_dpll(struct intel_crtc_state *crtc_state)
+{
+ struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
+
+ hw_state->dpll = vlv_dpll(crtc_state);
+ hw_state->dpll_md = i965_dpll_md(crtc_state);
+}
+
+static u32 chv_dpll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ u32 dpll;
- crtc_state->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
+ dpll = DPLL_SSC_REF_CLK_CHV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+
if (crtc->pipe != PIPE_A)
- crtc_state->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+ dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
/* DPLL not used with DSI, but still need the rest set up */
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
- crtc_state->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
+ dpll |= DPLL_VCO_ENABLE;
- crtc_state->dpll_hw_state.dpll_md =
- (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ return dpll;
+}
+
+void chv_compute_dpll(struct intel_crtc_state *crtc_state)
+{
+ struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
+
+ hw_state->dpll = chv_dpll(crtc_state);
+ hw_state->dpll_md = i965_dpll_md(crtc_state);
}
static int chv_crtc_compute_clock(struct intel_atomic_state *state,
@@ -1765,7 +1824,7 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 dpll = crtc_state->dpll_hw_state.dpll;
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
int i;
@@ -1775,157 +1834,152 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
if (i9xx_has_pps(dev_priv))
assert_pps_unlocked(dev_priv, pipe);
- intel_de_write(dev_priv, FP0(pipe), crtc_state->dpll_hw_state.fp0);
- intel_de_write(dev_priv, FP1(pipe), crtc_state->dpll_hw_state.fp1);
+ intel_de_write(dev_priv, FP0(pipe), hw_state->fp0);
+ intel_de_write(dev_priv, FP1(pipe), hw_state->fp1);
/*
* Apparently we need to have VGA mode enabled prior to changing
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
* dividers, even though the register value does change.
*/
- intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
- intel_de_write(dev_priv, DPLL(pipe), dpll);
+ intel_de_write(dev_priv, DPLL(pipe), hw_state->dpll & ~DPLL_VGA_MODE_DIS);
+ intel_de_write(dev_priv, DPLL(pipe), hw_state->dpll);
/* Wait for the clocks to stabilize. */
intel_de_posting_read(dev_priv, DPLL(pipe));
udelay(150);
if (DISPLAY_VER(dev_priv) >= 4) {
- intel_de_write(dev_priv, DPLL_MD(pipe),
- crtc_state->dpll_hw_state.dpll_md);
+ intel_de_write(dev_priv, DPLL_MD(pipe), hw_state->dpll_md);
} else {
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
- intel_de_write(dev_priv, DPLL(pipe), dpll);
+ intel_de_write(dev_priv, DPLL(pipe), hw_state->dpll);
}
/* We do this three times for luck */
for (i = 0; i < 3; i++) {
- intel_de_write(dev_priv, DPLL(pipe), dpll);
+ intel_de_write(dev_priv, DPLL(pipe), hw_state->dpll);
intel_de_posting_read(dev_priv, DPLL(pipe));
udelay(150); /* wait for warmup */
}
}
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+ enum dpio_phy phy, enum dpio_channel ch)
{
- u32 reg_val;
+ u32 tmp;
/*
* PLLB opamp always calibrates to max value of 0x3f, force enable it
* and set it to a reasonable value instead.
*/
- reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW9(1));
- reg_val &= 0xffffff00;
- reg_val |= 0x00000030;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9(1), reg_val);
-
- reg_val = vlv_dpio_read(dev_priv, phy, VLV_REF_DW13);
- reg_val &= 0x00ffffff;
- reg_val |= 0x8c000000;
- vlv_dpio_write(dev_priv, phy, VLV_REF_DW13, reg_val);
-
- reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW9(1));
- reg_val &= 0xffffff00;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9(1), reg_val);
-
- reg_val = vlv_dpio_read(dev_priv, phy, VLV_REF_DW13);
- reg_val &= 0x00ffffff;
- reg_val |= 0xb0000000;
- vlv_dpio_write(dev_priv, phy, VLV_REF_DW13, reg_val);
+ tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch));
+ tmp &= 0xffffff00;
+ tmp |= 0x00000030;
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp);
+
+ tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11);
+ tmp &= 0x00ffffff;
+ tmp |= 0x8c000000;
+ vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp);
+
+ tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch));
+ tmp &= 0xffffff00;
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp);
+
+ tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11);
+ tmp &= 0x00ffffff;
+ tmp |= 0xb0000000;
+ vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp);
}
static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct dpll *clock = &crtc_state->dpll;
+ enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
enum pipe pipe = crtc->pipe;
- u32 mdiv;
- u32 bestn, bestm1, bestm2, bestp1, bestp2;
- u32 coreclk, reg_val;
+ u32 tmp, coreclk;
vlv_dpio_get(dev_priv);
- bestn = crtc_state->dpll.n;
- bestm1 = crtc_state->dpll.m1;
- bestm2 = crtc_state->dpll.m2;
- bestp1 = crtc_state->dpll.p1;
- bestp2 = crtc_state->dpll.p2;
-
/* See eDP HDMI DPIO driver vbios notes doc */
/* PLL B needs special handling */
if (pipe == PIPE_B)
- vlv_pllb_recal_opamp(dev_priv, phy);
+ vlv_pllb_recal_opamp(dev_priv, phy, ch);
/* Set up Tx target for periodic Rcomp update */
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9_BCAST, 0x0100000f);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW17_BCAST, 0x0100000f);
/* Disable target IRef on PLL */
- reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW8(pipe));
- reg_val &= 0x00ffffff;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW8(pipe), reg_val);
+ tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW16(ch));
+ tmp &= 0x00ffffff;
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW16(ch), tmp);
/* Disable fast lock */
vlv_dpio_write(dev_priv, phy, VLV_CMN_DW0, 0x610);
/* Set idtafcrecal before PLL is enabled */
- mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
- mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
- mdiv |= ((bestn << DPIO_N_SHIFT));
- mdiv |= (1 << DPIO_K_SHIFT);
+ tmp = DPIO_M1_DIV(clock->m1) |
+ DPIO_M2_DIV(clock->m2) |
+ DPIO_P1_DIV(clock->p1) |
+ DPIO_P2_DIV(clock->p2) |
+ DPIO_N_DIV(clock->n) |
+ DPIO_K_DIV(1);
/*
* Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
* but we don't support that).
* Note: don't use the DAC post divider as it seems unstable.
*/
- mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(pipe), mdiv);
+ tmp |= DPIO_S1_DIV(DPIO_S1_DIV_HDMIDP);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp);
- mdiv |= DPIO_ENABLE_CALIBRATION;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(pipe), mdiv);
+ tmp |= DPIO_ENABLE_CALIBRATION;
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp);
/* Set HBR and RBR LPF coefficients */
if (crtc_state->port_clock == 162000 ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW10(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch),
0x009f0003);
else
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW10(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch),
0x00d0000f);
if (intel_crtc_has_dp_encoder(crtc_state)) {
/* Use SSC source */
if (pipe == PIPE_A)
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
0x0df40000);
else
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
0x0df70000);
} else { /* HDMI or VGA */
/* Use bend source */
if (pipe == PIPE_A)
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
0x0df70000);
else
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
0x0df40000);
}
- coreclk = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW7(pipe));
+ coreclk = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW7(ch));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
if (intel_crtc_has_dp_encoder(crtc_state))
coreclk |= 0x01000000;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW7(pipe), coreclk);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW7(ch), coreclk);
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW11(pipe), 0x87871000);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW19(ch), 0x87871000);
vlv_dpio_put(dev_priv);
}
@@ -1934,9 +1988,10 @@ static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
- intel_de_write(dev_priv, DPLL(pipe), crtc_state->dpll_hw_state.dpll);
+ intel_de_write(dev_priv, DPLL(pipe), hw_state->dpll);
intel_de_posting_read(dev_priv, DPLL(pipe));
udelay(150);
@@ -1948,6 +2003,7 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
@@ -1957,16 +2013,14 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
/* Enable Refclk */
intel_de_write(dev_priv, DPLL(pipe),
- crtc_state->dpll_hw_state.dpll &
- ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
+ hw_state->dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
- if (crtc_state->dpll_hw_state.dpll & DPLL_VCO_ENABLE) {
+ if (hw_state->dpll & DPLL_VCO_ENABLE) {
vlv_prepare_pll(crtc_state);
_vlv_enable_pll(crtc_state);
}
- intel_de_write(dev_priv, DPLL_MD(pipe),
- crtc_state->dpll_hw_state.dpll_md);
+ intel_de_write(dev_priv, DPLL_MD(pipe), hw_state->dpll_md);
intel_de_posting_read(dev_priv, DPLL_MD(pipe));
}
@@ -1974,93 +2028,87 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ const struct dpll *clock = &crtc_state->dpll;
+ enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
- u32 loopfilter, tribuf_calcntr;
- u32 bestm2, bestp1, bestp2, bestm2_frac;
- u32 dpio_val;
- int vco;
-
- bestm2_frac = crtc_state->dpll.m2 & 0x3fffff;
- bestm2 = crtc_state->dpll.m2 >> 22;
- bestp1 = crtc_state->dpll.p1;
- bestp2 = crtc_state->dpll.p2;
- vco = crtc_state->dpll.vco;
- dpio_val = 0;
- loopfilter = 0;
+ u32 tmp, loopfilter, tribuf_calcntr;
+ u32 m2_frac;
+
+ m2_frac = clock->m2 & 0x3fffff;
vlv_dpio_get(dev_priv);
/* p1 and p2 divider */
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW13(port),
- 5 << DPIO_CHV_S1_DIV_SHIFT |
- bestp1 << DPIO_CHV_P1_DIV_SHIFT |
- bestp2 << DPIO_CHV_P2_DIV_SHIFT |
- 1 << DPIO_CHV_K_DIV_SHIFT);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW13(ch),
+ DPIO_CHV_S1_DIV(5) |
+ DPIO_CHV_P1_DIV(clock->p1) |
+ DPIO_CHV_P2_DIV(clock->p2) |
+ DPIO_CHV_K_DIV(1));
/* Feedback post-divider - m2 */
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW0(port), bestm2);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW0(ch),
+ DPIO_CHV_M2_DIV(clock->m2 >> 22));
/* Feedback refclk divider - n and m1 */
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW1(port),
- DPIO_CHV_M1_DIV_BY_2 |
- 1 << DPIO_CHV_N_DIV_SHIFT);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW1(ch),
+ DPIO_CHV_M1_DIV(DPIO_CHV_M1_DIV_BY_2) |
+ DPIO_CHV_N_DIV(1));
/* M2 fraction division */
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW2(port), bestm2_frac);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW2(ch),
+ DPIO_CHV_M2_FRAC_DIV(m2_frac));
/* M2 fraction division enable */
- dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(port));
- dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
- dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
- if (bestm2_frac)
- dpio_val |= DPIO_CHV_FRAC_DIV_EN;
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW3(port), dpio_val);
+ tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch));
+ tmp &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
+ tmp |= DPIO_CHV_FEEDFWD_GAIN(2);
+ if (m2_frac)
+ tmp |= DPIO_CHV_FRAC_DIV_EN;
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW3(ch), tmp);
/* Program digital lock detect threshold */
- dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW9(port));
- dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
- DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
- dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
- if (!bestm2_frac)
- dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW9(port), dpio_val);
+ tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW9(ch));
+ tmp &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
+ DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
+ tmp |= DPIO_CHV_INT_LOCK_THRESHOLD(0x5);
+ if (!m2_frac)
+ tmp |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW9(ch), tmp);
/* Loop filter */
- if (vco == 5400000) {
- loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
- loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
- loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ if (clock->vco == 5400000) {
+ loopfilter = DPIO_CHV_PROP_COEFF(0x3) |
+ DPIO_CHV_INT_COEFF(0x8) |
+ DPIO_CHV_GAIN_CTRL(0x1);
tribuf_calcntr = 0x9;
- } else if (vco <= 6200000) {
- loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
- loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
- loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ } else if (clock->vco <= 6200000) {
+ loopfilter = DPIO_CHV_PROP_COEFF(0x5) |
+ DPIO_CHV_INT_COEFF(0xB) |
+ DPIO_CHV_GAIN_CTRL(0x3);
tribuf_calcntr = 0x9;
- } else if (vco <= 6480000) {
- loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
- loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
- loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ } else if (clock->vco <= 6480000) {
+ loopfilter = DPIO_CHV_PROP_COEFF(0x4) |
+ DPIO_CHV_INT_COEFF(0x9) |
+ DPIO_CHV_GAIN_CTRL(0x3);
tribuf_calcntr = 0x8;
} else {
/* Not supported. Apply the same limits as in the max case */
- loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
- loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
- loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ loopfilter = DPIO_CHV_PROP_COEFF(0x4) |
+ DPIO_CHV_INT_COEFF(0x9) |
+ DPIO_CHV_GAIN_CTRL(0x3);
tribuf_calcntr = 0;
}
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW6(port), loopfilter);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW6(ch), loopfilter);
- dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW8(port));
- dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
- dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW8(port), dpio_val);
+ tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW8(ch));
+ tmp &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
+ tmp |= DPIO_CHV_TDC_TARGET_CNT(tribuf_calcntr);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW8(ch), tmp);
/* AFC Recal */
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port),
- vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port)) |
- DPIO_AFC_RECAL);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch),
+ vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch)) |
+ DPIO_AFC_RECAL);
vlv_dpio_put(dev_priv);
}
@@ -2069,17 +2117,18 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
+ enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ enum pipe pipe = crtc->pipe;
u32 tmp;
vlv_dpio_get(dev_priv);
/* Enable back the 10bit clock to display controller */
- tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port));
+ tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch));
tmp |= DPIO_DCLKP_EN;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port), tmp);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), tmp);
vlv_dpio_put(dev_priv);
@@ -2089,7 +2138,7 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
udelay(1);
/* Enable PLL */
- intel_de_write(dev_priv, DPLL(pipe), crtc_state->dpll_hw_state.dpll);
+ intel_de_write(dev_priv, DPLL(pipe), hw_state->dpll);
/* Check PLL is locked */
if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
@@ -2100,6 +2149,7 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
@@ -2109,9 +2159,9 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
/* Enable Refclk and SSC */
intel_de_write(dev_priv, DPLL(pipe),
- crtc_state->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
+ hw_state->dpll & ~DPLL_VCO_ENABLE);
- if (crtc_state->dpll_hw_state.dpll & DPLL_VCO_ENABLE) {
+ if (hw_state->dpll & DPLL_VCO_ENABLE) {
chv_prepare_pll(crtc_state);
_chv_enable_pll(crtc_state);
}
@@ -2124,10 +2174,9 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
* the value from DPLLBMD to either pipe B or C.
*/
intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
- intel_de_write(dev_priv, DPLL_MD(PIPE_B),
- crtc_state->dpll_hw_state.dpll_md);
+ intel_de_write(dev_priv, DPLL_MD(PIPE_B), hw_state->dpll_md);
intel_de_write(dev_priv, CBR4_VLV, 0);
- dev_priv->display.state.chv_dpll_md[pipe] = crtc_state->dpll_hw_state.dpll_md;
+ dev_priv->display.state.chv_dpll_md[pipe] = hw_state->dpll_md;
/*
* DPLLB VGA mode also seems to cause problems.
@@ -2137,8 +2186,7 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
(intel_de_read(dev_priv, DPLL(PIPE_B)) &
DPLL_VGA_MODE_DIS) == 0);
} else {
- intel_de_write(dev_priv, DPLL_MD(pipe),
- crtc_state->dpll_hw_state.dpll_md);
+ intel_de_write(dev_priv, DPLL_MD(pipe), hw_state->dpll_md);
intel_de_posting_read(dev_priv, DPLL_MD(pipe));
}
}
@@ -2199,7 +2247,7 @@ void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ enum dpio_channel ch = vlv_pipe_to_channel(pipe);
enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
@@ -2217,9 +2265,9 @@ void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
vlv_dpio_get(dev_priv);
/* Disable 10bit clock to display controller */
- val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port));
+ val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch));
val &= ~DPIO_DCLKP_EN;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port), val);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), val);
vlv_dpio_put(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
index ac01bb19cc6c..a86a79408af0 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -13,6 +13,7 @@ struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_dpll_hw_state;
enum pipe;
void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv);
@@ -22,6 +23,8 @@ int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int i9xx_calc_dpll_params(int refclk, struct dpll *clock);
u32 i9xx_dpll_compute_fp(const struct dpll *dpll);
+void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
+ struct intel_dpll_hw_state *dpll_hw_state);
void vlv_compute_dpll(struct intel_crtc_state *crtc_state);
void chv_compute_dpll(struct intel_crtc_state *crtc_state);
@@ -39,12 +42,9 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock);
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
-void i9xx_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
-void vlv_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
-void chv_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
+void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state);
+void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state);
+void chv_crtc_clock_get(struct intel_crtc_state *crtc_state);
void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe);
void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index ff480f171f75..90998b037349 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -24,6 +24,7 @@
#include <linux/math.h>
#include <linux/string_helpers.h>
+#include "bxt_dpio_phy_regs.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -64,7 +65,8 @@ struct intel_shared_dpll_funcs {
* the pll is not already enabled.
*/
void (*enable)(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll);
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state);
/*
* Hook for disabling the pll, called from intel_disable_shared_dpll()
@@ -81,7 +83,7 @@ struct intel_shared_dpll_funcs {
*/
bool (*get_hw_state)(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state);
+ struct intel_dpll_hw_state *dpll_hw_state);
/*
* Hook for calculating the pll's output frequency based on its passed
@@ -89,7 +91,7 @@ struct intel_shared_dpll_funcs {
*/
int (*get_freq)(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state);
+ const struct intel_dpll_hw_state *dpll_hw_state);
};
struct intel_dpll_mgr {
@@ -107,8 +109,8 @@ struct intel_dpll_mgr {
struct intel_crtc *crtc,
struct intel_encoder *encoder);
void (*update_ref_clks)(struct drm_i915_private *i915);
- void (*dump_hw_state)(struct drm_i915_private *i915,
- const struct intel_dpll_hw_state *hw_state);
+ void (*dump_hw_state)(struct drm_printer *p,
+ const struct intel_dpll_hw_state *dpll_hw_state);
bool (*compare_hw_state)(const struct intel_dpll_hw_state *a,
const struct intel_dpll_hw_state *b);
};
@@ -227,7 +229,7 @@ static void _intel_enable_shared_dpll(struct drm_i915_private *i915,
if (pll->info->power_domain)
pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
- pll->info->funcs->enable(i915, pll);
+ pll->info->funcs->enable(i915, pll, &pll->state.hw_state);
pll->on = true;
}
@@ -352,7 +354,7 @@ intel_dpll_mask_all(struct drm_i915_private *i915)
static struct intel_shared_dpll *
intel_find_shared_dpll(struct intel_atomic_state *state,
const struct intel_crtc *crtc,
- const struct intel_dpll_hw_state *pll_state,
+ const struct intel_dpll_hw_state *dpll_hw_state,
unsigned long dpll_mask)
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
@@ -379,9 +381,9 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
continue;
}
- if (memcmp(pll_state,
+ if (memcmp(dpll_hw_state,
&shared_dpll[pll->index].hw_state,
- sizeof(*pll_state)) == 0) {
+ sizeof(*dpll_hw_state)) == 0) {
drm_dbg_kms(&i915->drm,
"[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
crtc->base.base.id, crtc->base.name,
@@ -430,14 +432,14 @@ static void
intel_reference_shared_dpll(struct intel_atomic_state *state,
const struct intel_crtc *crtc,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
struct intel_shared_dpll_state *shared_dpll;
shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
if (shared_dpll[pll->index].pipe_mask == 0)
- shared_dpll[pll->index].hw_state = *pll_state;
+ shared_dpll[pll->index].hw_state = *dpll_hw_state;
intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
}
@@ -519,8 +521,9 @@ void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
const enum intel_dpll_id id = pll->info->id;
intel_wakeref_t wakeref;
u32 val;
@@ -553,17 +556,19 @@ static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915)
}
static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
const enum intel_dpll_id id = pll->info->id;
/* PCH refclock must be enabled first */
ibx_assert_pch_refclk_enabled(i915);
- intel_de_write(i915, PCH_FP0(id), pll->state.hw_state.fp0);
- intel_de_write(i915, PCH_FP1(id), pll->state.hw_state.fp1);
+ intel_de_write(i915, PCH_FP0(id), hw_state->fp0);
+ intel_de_write(i915, PCH_FP1(id), hw_state->fp1);
- intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll);
+ intel_de_write(i915, PCH_DPLL(id), hw_state->dpll);
/* Wait for the clocks to stabilize. */
intel_de_posting_read(i915, PCH_DPLL(id));
@@ -574,7 +579,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
*
* So write it again.
*/
- intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll);
+ intel_de_write(i915, PCH_DPLL(id), hw_state->dpll);
intel_de_posting_read(i915, PCH_DPLL(id));
udelay(200);
}
@@ -634,21 +639,25 @@ static int ibx_get_dpll(struct intel_atomic_state *state,
return 0;
}
-static void ibx_dump_hw_state(struct drm_i915_private *i915,
- const struct intel_dpll_hw_state *hw_state)
+static void ibx_dump_hw_state(struct drm_printer *p,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- drm_dbg_kms(&i915->drm,
- "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
- "fp0: 0x%x, fp1: 0x%x\n",
- hw_state->dpll,
- hw_state->dpll_md,
- hw_state->fp0,
- hw_state->fp1);
+ const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
+
+ drm_printf(p, "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
+ "fp0: 0x%x, fp1: 0x%x\n",
+ hw_state->dpll,
+ hw_state->dpll_md,
+ hw_state->fp0,
+ hw_state->fp1);
}
-static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *a,
- const struct intel_dpll_hw_state *b)
+static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *_a,
+ const struct intel_dpll_hw_state *_b)
{
+ const struct i9xx_dpll_hw_state *a = &_a->i9xx;
+ const struct i9xx_dpll_hw_state *b = &_b->i9xx;
+
return a->dpll == b->dpll &&
a->dpll_md == b->dpll_md &&
a->fp0 == b->fp0 &&
@@ -677,19 +686,24 @@ static const struct intel_dpll_mgr pch_pll_mgr = {
};
static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
const enum intel_dpll_id id = pll->info->id;
- intel_de_write(i915, WRPLL_CTL(id), pll->state.hw_state.wrpll);
+ intel_de_write(i915, WRPLL_CTL(id), hw_state->wrpll);
intel_de_posting_read(i915, WRPLL_CTL(id));
udelay(20);
}
static void hsw_ddi_spll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- intel_de_write(i915, SPLL_CTL, pll->state.hw_state.spll);
+ const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
+
+ intel_de_write(i915, SPLL_CTL, hw_state->spll);
intel_de_posting_read(i915, SPLL_CTL);
udelay(20);
}
@@ -728,8 +742,9 @@ static void hsw_ddi_spll_disable(struct drm_i915_private *i915,
static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
const enum intel_dpll_id id = pll->info->id;
intel_wakeref_t wakeref;
u32 val;
@@ -749,8 +764,9 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915,
static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
intel_wakeref_t wakeref;
u32 val;
@@ -975,11 +991,12 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
int refclk;
int n, p, r;
- u32 wrpll = pll_state->wrpll;
+ u32 wrpll = hw_state->wrpll;
switch (wrpll & WRPLL_REF_MASK) {
case WRPLL_REF_SPECIAL_HSW:
@@ -1020,11 +1037,12 @@ hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
unsigned int p, n2, r2;
hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
- crtc_state->dpll_hw_state.wrpll =
+ hw_state->wrpll =
WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p);
@@ -1099,7 +1117,7 @@ hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
int link_clock = 0;
@@ -1127,11 +1145,12 @@ hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
return -EINVAL;
- crtc_state->dpll_hw_state.spll =
+ hw_state->spll =
SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
return 0;
@@ -1150,11 +1169,12 @@ hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
int link_clock = 0;
- switch (pll_state->spll & SPLL_FREQ_MASK) {
+ switch (hw_state->spll & SPLL_FREQ_MASK) {
case SPLL_FREQ_810MHz:
link_clock = 81000;
break;
@@ -1225,16 +1245,21 @@ static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
i915->display.dpll.ref_clks.nssc = 135000;
}
-static void hsw_dump_hw_state(struct drm_i915_private *i915,
- const struct intel_dpll_hw_state *hw_state)
+static void hsw_dump_hw_state(struct drm_printer *p,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- drm_dbg_kms(&i915->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
- hw_state->wrpll, hw_state->spll);
+ const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
+
+ drm_printf(p, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
+ hw_state->wrpll, hw_state->spll);
}
-static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *a,
- const struct intel_dpll_hw_state *b)
+static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *_a,
+ const struct intel_dpll_hw_state *_b)
{
+ const struct hsw_dpll_hw_state *a = &_a->hsw;
+ const struct hsw_dpll_hw_state *b = &_b->hsw;
+
return a->wrpll == b->wrpll &&
a->spll == b->spll;
}
@@ -1254,7 +1279,8 @@ static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
};
static void hsw_ddi_lcpll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *hw_state)
{
}
@@ -1265,7 +1291,7 @@ static void hsw_ddi_lcpll_disable(struct drm_i915_private *i915,
static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
return true;
}
@@ -1332,26 +1358,31 @@ static const struct skl_dpll_regs skl_dpll_regs[4] = {
};
static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct skl_dpll_hw_state *hw_state)
{
const enum intel_dpll_id id = pll->info->id;
intel_de_rmw(i915, DPLL_CTRL1,
- DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id),
- pll->state.hw_state.ctrl1 << (id * 6));
+ DPLL_CTRL1_HDMI_MODE(id) |
+ DPLL_CTRL1_SSC(id) |
+ DPLL_CTRL1_LINK_RATE_MASK(id),
+ hw_state->ctrl1 << (id * 6));
intel_de_posting_read(i915, DPLL_CTRL1);
}
static void skl_ddi_pll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
const struct skl_dpll_regs *regs = skl_dpll_regs;
const enum intel_dpll_id id = pll->info->id;
- skl_ddi_pll_write_ctrl1(i915, pll);
+ skl_ddi_pll_write_ctrl1(i915, pll, hw_state);
- intel_de_write(i915, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
- intel_de_write(i915, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
+ intel_de_write(i915, regs[id].cfgcr1, hw_state->cfgcr1);
+ intel_de_write(i915, regs[id].cfgcr2, hw_state->cfgcr2);
intel_de_posting_read(i915, regs[id].cfgcr1);
intel_de_posting_read(i915, regs[id].cfgcr2);
@@ -1363,9 +1394,12 @@ static void skl_ddi_pll_enable(struct drm_i915_private *i915,
}
static void skl_ddi_dpll0_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- skl_ddi_pll_write_ctrl1(i915, pll);
+ const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
+
+ skl_ddi_pll_write_ctrl1(i915, pll, hw_state);
}
static void skl_ddi_pll_disable(struct drm_i915_private *i915,
@@ -1386,13 +1420,14 @@ static void skl_ddi_dpll0_disable(struct drm_i915_private *i915,
static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
- u32 val;
+ struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
const struct skl_dpll_regs *regs = skl_dpll_regs;
const enum intel_dpll_id id = pll->info->id;
intel_wakeref_t wakeref;
bool ret;
+ u32 val;
wakeref = intel_display_power_get_if_enabled(i915,
POWER_DOMAIN_DISPLAY_CORE);
@@ -1423,8 +1458,9 @@ out:
static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
const struct skl_dpll_regs *regs = skl_dpll_regs;
const enum intel_dpll_id id = pll->info->id;
intel_wakeref_t wakeref;
@@ -1695,16 +1731,17 @@ skip_remaining_dividers:
static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
int ref_clock = i915->display.dpll.ref_clks.nssc;
u32 p0, p1, p2, dco_freq;
- p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
- p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
+ p0 = hw_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
+ p2 = hw_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
- if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
- p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
+ if (hw_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
+ p1 = (hw_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
else
p1 = 1;
@@ -1752,10 +1789,10 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
return 0;
}
- dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
+ dco_freq = (hw_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
ref_clock;
- dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
+ dco_freq += ((hw_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
ref_clock / 0x8000;
if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
@@ -1767,37 +1804,35 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
struct skl_wrpll_params wrpll_params = {};
- u32 ctrl1, cfgcr1, cfgcr2;
int ret;
- /*
- * See comment in intel_dpll_hw_state to understand why we always use 0
- * as the DPLL id in this function.
- */
- ctrl1 = DPLL_CTRL1_OVERRIDE(0);
-
- ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
-
ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
i915->display.dpll.ref_clks.nssc, &wrpll_params);
if (ret)
return ret;
- cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
+ /*
+ * See comment in intel_dpll_hw_state to understand why we always use 0
+ * as the DPLL id in this function.
+ */
+ hw_state->ctrl1 =
+ DPLL_CTRL1_OVERRIDE(0) |
+ DPLL_CTRL1_HDMI_MODE(0);
+
+ hw_state->cfgcr1 =
+ DPLL_CFGCR1_FREQ_ENABLE |
DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
wrpll_params.dco_integer;
- cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
+ hw_state->cfgcr2 =
+ DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
wrpll_params.central_freq;
- crtc_state->dpll_hw_state.ctrl1 = ctrl1;
- crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
- crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
-
crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
&crtc_state->dpll_hw_state);
@@ -1807,6 +1842,7 @@ static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
static int
skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
+ struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
u32 ctrl1;
/*
@@ -1836,18 +1872,19 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
break;
}
- crtc_state->dpll_hw_state.ctrl1 = ctrl1;
+ hw_state->ctrl1 = ctrl1;
return 0;
}
static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
int link_clock = 0;
- switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
+ switch ((hw_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
case DPLL_CTRL1_LINK_RATE_810:
link_clock = 81000;
@@ -1921,16 +1958,18 @@ static int skl_get_dpll(struct intel_atomic_state *state,
static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
+
/*
* ctrl1 register is already shifted for each pll, just use 0 to get
* the internal shift for each field
*/
- if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
- return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
+ if (hw_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
+ return skl_ddi_wrpll_get_freq(i915, pll, dpll_hw_state);
else
- return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
+ return skl_ddi_lcpll_get_freq(i915, pll, dpll_hw_state);
}
static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
@@ -1939,19 +1978,21 @@ static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
}
-static void skl_dump_hw_state(struct drm_i915_private *i915,
- const struct intel_dpll_hw_state *hw_state)
+static void skl_dump_hw_state(struct drm_printer *p,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- drm_dbg_kms(&i915->drm, "dpll_hw_state: "
- "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
- hw_state->ctrl1,
- hw_state->cfgcr1,
- hw_state->cfgcr2);
+ const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
+
+ drm_printf(p, "dpll_hw_state: ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
+ hw_state->ctrl1, hw_state->cfgcr1, hw_state->cfgcr2);
}
-static bool skl_compare_hw_state(const struct intel_dpll_hw_state *a,
- const struct intel_dpll_hw_state *b)
+static bool skl_compare_hw_state(const struct intel_dpll_hw_state *_a,
+ const struct intel_dpll_hw_state *_b)
{
+ const struct skl_dpll_hw_state *a = &_a->skl;
+ const struct skl_dpll_hw_state *b = &_b->skl;
+
return a->ctrl1 == b->ctrl1 &&
a->cfgcr1 == b->cfgcr1 &&
a->cfgcr2 == b->cfgcr2;
@@ -1991,12 +2032,14 @@ static const struct intel_dpll_mgr skl_pll_mgr = {
};
static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- u32 temp;
+ const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
enum dpio_phy phy;
enum dpio_channel ch;
+ u32 temp;
bxt_port_to_phy_channel(i915, port, &phy, &ch);
@@ -2019,43 +2062,43 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
/* Write P1 & P2 */
intel_de_rmw(i915, BXT_PORT_PLL_EBB_0(phy, ch),
- PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0);
+ PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, hw_state->ebb0);
/* Write M2 integer */
intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 0),
- PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0);
+ PORT_PLL_M2_INT_MASK, hw_state->pll0);
/* Write N */
intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 1),
- PORT_PLL_N_MASK, pll->state.hw_state.pll1);
+ PORT_PLL_N_MASK, hw_state->pll1);
/* Write M2 fraction */
intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 2),
- PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2);
+ PORT_PLL_M2_FRAC_MASK, hw_state->pll2);
/* Write M2 fraction enable */
intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 3),
- PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3);
+ PORT_PLL_M2_FRAC_ENABLE, hw_state->pll3);
/* Write coeff */
temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
temp &= ~PORT_PLL_PROP_COEFF_MASK;
temp &= ~PORT_PLL_INT_COEFF_MASK;
temp &= ~PORT_PLL_GAIN_CTL_MASK;
- temp |= pll->state.hw_state.pll6;
+ temp |= hw_state->pll6;
intel_de_write(i915, BXT_PORT_PLL(phy, ch, 6), temp);
/* Write calibration val */
intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 8),
- PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8);
+ PORT_PLL_TARGET_CNT_MASK, hw_state->pll8);
intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 9),
- PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9);
+ PORT_PLL_LOCK_THRESHOLD_MASK, hw_state->pll9);
temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
temp &= ~PORT_PLL_DCO_AMP_MASK;
- temp |= pll->state.hw_state.pll10;
+ temp |= hw_state->pll10;
intel_de_write(i915, BXT_PORT_PLL(phy, ch, 10), temp);
/* Recalibrate with new settings */
@@ -2063,7 +2106,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
temp |= PORT_PLL_RECALIBRATE;
intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
- temp |= pll->state.hw_state.ebb4;
+ temp |= hw_state->ebb4;
intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Enable PLL */
@@ -2075,7 +2118,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
drm_err(&i915->drm, "PLL %d not locked\n", port);
if (IS_GEMINILAKE(i915)) {
- temp = intel_de_read(i915, BXT_PORT_TX_DW5_LN0(phy, ch));
+ temp = intel_de_read(i915, BXT_PORT_TX_DW5_LN(phy, ch, 0));
temp |= DCC_DELAY_RANGE_2;
intel_de_write(i915, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
}
@@ -2087,7 +2130,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
temp = intel_de_read(i915, BXT_PORT_PCS_DW12_LN01(phy, ch));
temp &= ~LANE_STAGGER_MASK;
temp &= ~LANESTAGGER_STRAP_OVRD;
- temp |= pll->state.hw_state.pcsdw12;
+ temp |= hw_state->pcsdw12;
intel_de_write(i915, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
}
@@ -2112,8 +2155,9 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *i915,
static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
intel_wakeref_t wakeref;
enum dpio_phy phy;
@@ -2245,7 +2289,7 @@ static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
const struct dpll *clk_div)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
+ struct bxt_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.bxt;
int clock = crtc_state->port_clock;
int vco = clk_div->vco;
u32 prop_coef, int_coef, gain_ctl, targ_cnt;
@@ -2283,45 +2327,47 @@ static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
else
lanestagger = 0x02;
- dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
- dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
- dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
- dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
+ hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
+ hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
+ hw_state->pll1 = PORT_PLL_N(clk_div->n);
+ hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
if (clk_div->m2 & 0x3fffff)
- dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
+ hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
- dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
+ hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
PORT_PLL_INT_COEFF(int_coef) |
PORT_PLL_GAIN_CTL(gain_ctl);
- dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
+ hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
- dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
+ hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
- dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
+ hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
PORT_PLL_DCO_AMP_OVR_EN_H;
- dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
+ hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
- dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
+ hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
return 0;
}
static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
struct dpll clock;
clock.m1 = 2;
- clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
- if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
- clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
- clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
- clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
- clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
+ clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, hw_state->pll0) << 22;
+ if (hw_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
+ clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK,
+ hw_state->pll2);
+ clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, hw_state->pll1);
+ clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, hw_state->ebb0);
+ clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, hw_state->ebb0);
return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
}
@@ -2402,28 +2448,26 @@ static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
/* DSI non-SSC ref 19.2MHz */
}
-static void bxt_dump_hw_state(struct drm_i915_private *i915,
- const struct intel_dpll_hw_state *hw_state)
+static void bxt_dump_hw_state(struct drm_printer *p,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- drm_dbg_kms(&i915->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
- "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
- "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
- hw_state->ebb0,
- hw_state->ebb4,
- hw_state->pll0,
- hw_state->pll1,
- hw_state->pll2,
- hw_state->pll3,
- hw_state->pll6,
- hw_state->pll8,
- hw_state->pll9,
- hw_state->pll10,
- hw_state->pcsdw12);
+ const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
+
+ drm_printf(p, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
+ "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
+ "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
+ hw_state->ebb0, hw_state->ebb4,
+ hw_state->pll0, hw_state->pll1, hw_state->pll2, hw_state->pll3,
+ hw_state->pll6, hw_state->pll8, hw_state->pll9, hw_state->pll10,
+ hw_state->pcsdw12);
}
-static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *a,
- const struct intel_dpll_hw_state *b)
+static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *_a,
+ const struct intel_dpll_hw_state *_b)
{
+ const struct bxt_dpll_hw_state *a = &_a->bxt;
+ const struct bxt_dpll_hw_state *b = &_b->bxt;
+
return a->ebb0 == b->ebb0 &&
a->ebb4 == b->ebb4 &&
a->pll0 == b->pll0 &&
@@ -2554,7 +2598,7 @@ static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
static bool
ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
{
- return (((IS_ELKHARTLAKE(i915) || IS_JASPERLAKE(i915)) &&
+ return ((IS_ELKHARTLAKE(i915) &&
IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
i915->display.dpll.ref_clks.nssc == 38400;
@@ -2706,7 +2750,7 @@ static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
/*
* The PLL outputs multiple frequencies at the same time, selection is
@@ -2777,17 +2821,18 @@ icl_calc_wrpll(struct intel_crtc_state *crtc_state,
static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
int ref_clock = icl_wrpll_ref_clock(i915);
u32 dco_fraction;
u32 p0, p1, p2, dco_freq;
- p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
- p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
+ p0 = hw_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
+ p2 = hw_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
- if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
- p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
+ if (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
+ p1 = (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
DPLL_CFGCR1_QDIV_RATIO_SHIFT;
else
p1 = 1;
@@ -2819,10 +2864,10 @@ static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
break;
}
- dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
+ dco_freq = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
ref_clock;
- dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
+ dco_fraction = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
DPLL_CFGCR0_DCO_FRACTION_SHIFT;
if (ehl_combo_pll_div_frac_wa_needed(i915))
@@ -2838,33 +2883,34 @@ static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
static void icl_calc_dpll_state(struct drm_i915_private *i915,
const struct skl_wrpll_params *pll_params,
- struct intel_dpll_hw_state *pll_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
u32 dco_fraction = pll_params->dco_fraction;
if (ehl_combo_pll_div_frac_wa_needed(i915))
dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
- pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
+ hw_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
pll_params->dco_integer;
- pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
+ hw_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
DPLL_CFGCR1_KDIV(pll_params->kdiv) |
DPLL_CFGCR1_PDIV(pll_params->pdiv);
if (DISPLAY_VER(i915) >= 12)
- pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
+ hw_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
else
- pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
+ hw_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
if (i915->display.vbt.override_afc_startup)
- pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
+ hw_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
}
static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
u32 *target_dco_khz,
- struct intel_dpll_hw_state *state,
+ struct icl_dpll_hw_state *hw_state,
bool is_dkl)
{
static const u8 div1_vals[] = { 7, 5, 3, 2 };
@@ -2920,12 +2966,12 @@ static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
*target_dco_khz = dco;
- state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
+ hw_state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
- state->mg_clktop2_coreclkctl1 =
+ hw_state->mg_clktop2_coreclkctl1 =
MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
- state->mg_clktop2_hsclkctl =
+ hw_state->mg_clktop2_hsclkctl =
MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
hsdiv |
@@ -2943,9 +2989,10 @@ static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
* adapted to integer-only calculation, that's why it looks so different.
*/
static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
- struct intel_dpll_hw_state *pll_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
int refclk_khz = i915->display.dpll.ref_clks.nssc;
int clock = crtc_state->port_clock;
u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
@@ -2960,7 +3007,7 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
int ret;
ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
- pll_state, is_dkl);
+ hw_state, is_dkl);
if (ret)
return ret;
@@ -3050,61 +3097,61 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
/* write pll_state calculations */
if (is_dkl) {
- pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
+ hw_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
DKL_PLL_DIV0_FBPREDIV(m1div) |
DKL_PLL_DIV0_FBDIV_INT(m2div_int);
if (i915->display.vbt.override_afc_startup) {
u8 val = i915->display.vbt.override_afc_startup_val;
- pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
+ hw_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
}
- pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
+ hw_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
- pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
+ hw_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
(use_ssc ? DKL_PLL_SSC_EN : 0);
- pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
+ hw_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
- pll_state->mg_pll_tdc_coldst_bias =
+ hw_state->mg_pll_tdc_coldst_bias =
DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
} else {
- pll_state->mg_pll_div0 =
+ hw_state->mg_pll_div0 =
(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
MG_PLL_DIV0_FBDIV_INT(m2div_int);
- pll_state->mg_pll_div1 =
+ hw_state->mg_pll_div1 =
MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
MG_PLL_DIV1_DITHER_DIV_2 |
MG_PLL_DIV1_NDIVRATIO(1) |
MG_PLL_DIV1_FBPREDIV(m1div);
- pll_state->mg_pll_lf =
+ hw_state->mg_pll_lf =
MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
MG_PLL_LF_AFCCNTSEL_512 |
MG_PLL_LF_GAINCTRL(1) |
MG_PLL_LF_INT_COEFF(int_coeff) |
MG_PLL_LF_PROP_COEFF(prop_coeff);
- pll_state->mg_pll_frac_lock =
+ hw_state->mg_pll_frac_lock =
MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
MG_PLL_FRAC_LOCK_DCODITHEREN |
MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
if (use_ssc || m2div_rem > 0)
- pll_state->mg_pll_frac_lock |=
+ hw_state->mg_pll_frac_lock |=
MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
- pll_state->mg_pll_ssc =
+ hw_state->mg_pll_ssc =
(use_ssc ? MG_PLL_SSC_EN : 0) |
MG_PLL_SSC_TYPE(2) |
MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
@@ -3112,14 +3159,14 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
MG_PLL_SSC_FLLEN |
MG_PLL_SSC_STEPSIZE(ssc_stepsize);
- pll_state->mg_pll_tdc_coldst_bias =
+ hw_state->mg_pll_tdc_coldst_bias =
MG_PLL_TDC_COLDST_COLDSTART |
MG_PLL_TDC_COLDST_IREFINT_EN |
MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
MG_PLL_TDC_TDCOVCCORR_EN |
MG_PLL_TDC_TDCSEL(3);
- pll_state->mg_pll_bias =
+ hw_state->mg_pll_bias =
MG_PLL_BIAS_BIAS_GB_SEL(3) |
MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
MG_PLL_BIAS_BIAS_BONUS(10) |
@@ -3129,17 +3176,17 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
MG_PLL_BIAS_IREFTRIM(iref_trim);
if (refclk_khz == 38400) {
- pll_state->mg_pll_tdc_coldst_bias_mask =
+ hw_state->mg_pll_tdc_coldst_bias_mask =
MG_PLL_TDC_COLDST_COLDSTART;
- pll_state->mg_pll_bias_mask = 0;
+ hw_state->mg_pll_bias_mask = 0;
} else {
- pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
- pll_state->mg_pll_bias_mask = -1U;
+ hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
+ hw_state->mg_pll_bias_mask = -1U;
}
- pll_state->mg_pll_tdc_coldst_bias &=
- pll_state->mg_pll_tdc_coldst_bias_mask;
- pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
+ hw_state->mg_pll_tdc_coldst_bias &=
+ hw_state->mg_pll_tdc_coldst_bias_mask;
+ hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
}
return 0;
@@ -3147,31 +3194,32 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
u64 tmp;
ref_clock = i915->display.dpll.ref_clks.nssc;
if (DISPLAY_VER(i915) >= 12) {
- m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
+ m1 = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
- m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
+ m2_int = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
- if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
- m2_frac = pll_state->mg_pll_bias &
+ if (hw_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
+ m2_frac = hw_state->mg_pll_bias &
DKL_PLL_BIAS_FBDIV_FRAC_MASK;
m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
} else {
m2_frac = 0;
}
} else {
- m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
- m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
+ m1 = hw_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
+ m2_int = hw_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
- if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
- m2_frac = pll_state->mg_pll_div0 &
+ if (hw_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
+ m2_frac = hw_state->mg_pll_div0 &
MG_PLL_DIV0_FBDIV_FRAC_MASK;
m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
} else {
@@ -3179,7 +3227,7 @@ static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
}
}
- switch (pll_state->mg_clktop2_hsclkctl &
+ switch (hw_state->mg_clktop2_hsclkctl &
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
div1 = 2;
@@ -3194,11 +3242,11 @@ static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
div1 = 7;
break;
default:
- MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
+ MISSING_CASE(hw_state->mg_clktop2_hsclkctl);
return 0;
}
- div2 = (pll_state->mg_clktop2_hsclkctl &
+ div2 = (hw_state->mg_clktop2_hsclkctl &
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
@@ -3389,7 +3437,6 @@ static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct icl_port_dpll *port_dpll =
@@ -3408,8 +3455,7 @@ static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
- dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(i915,
- encoder->port));
+ dpll_id = icl_tc_port_to_pll_id(intel_encoder_to_tc(encoder));
port_dpll->pll = intel_find_shared_dpll(state, crtc,
&port_dpll->hw_state,
BIT(dpll_id));
@@ -3435,15 +3481,12 @@ static int icl_compute_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
-
- if (intel_phy_is_combo(i915, phy))
+ if (intel_encoder_is_combo(encoder))
return icl_compute_combo_phy_dpll(state, crtc);
- else if (intel_phy_is_tc(i915, phy))
+ else if (intel_encoder_is_tc(encoder))
return icl_compute_tc_phy_dplls(state, crtc);
- MISSING_CASE(phy);
+ MISSING_CASE(encoder->port);
return 0;
}
@@ -3452,15 +3495,12 @@ static int icl_get_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
-
- if (intel_phy_is_combo(i915, phy))
+ if (intel_encoder_is_combo(encoder))
return icl_get_combo_phy_dpll(state, crtc, encoder);
- else if (intel_phy_is_tc(i915, phy))
+ else if (intel_encoder_is_tc(encoder))
return icl_get_tc_phy_dplls(state, crtc, encoder);
- MISSING_CASE(phy);
+ MISSING_CASE(encoder->port);
return -EINVAL;
}
@@ -3493,8 +3533,9 @@ static void icl_put_dplls(struct intel_atomic_state *state,
static bool mg_pll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
const enum intel_dpll_id id = pll->info->id;
enum tc_port tc_port = icl_pll_id_to_tc_port(id);
intel_wakeref_t wakeref;
@@ -3559,8 +3600,9 @@ out:
static bool dkl_pll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
const enum intel_dpll_id id = pll->info->id;
enum tc_port tc_port = icl_pll_id_to_tc_port(id);
intel_wakeref_t wakeref;
@@ -3630,9 +3672,10 @@ out:
static bool icl_pll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state,
+ struct intel_dpll_hw_state *dpll_hw_state,
i915_reg_t enable_reg)
{
+ struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
const enum intel_dpll_id id = pll->info->id;
intel_wakeref_t wakeref;
bool ret = false;
@@ -3690,24 +3733,24 @@ out:
static bool combo_pll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
- return icl_pll_get_hw_state(i915, pll, hw_state, enable_reg);
+ return icl_pll_get_hw_state(i915, pll, dpll_hw_state, enable_reg);
}
static bool tbt_pll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
- return icl_pll_get_hw_state(i915, pll, hw_state, TBT_PLL_ENABLE);
+ return icl_pll_get_hw_state(i915, pll, dpll_hw_state, TBT_PLL_ENABLE);
}
static void icl_dpll_write(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct icl_dpll_hw_state *hw_state)
{
- struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
const enum intel_dpll_id id = pll->info->id;
i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
@@ -3747,9 +3790,9 @@ static void icl_dpll_write(struct drm_i915_private *i915,
}
static void icl_mg_pll_write(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct icl_dpll_hw_state *hw_state)
{
- struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
/*
@@ -3790,9 +3833,9 @@ static void icl_mg_pll_write(struct drm_i915_private *i915,
}
static void dkl_pll_write(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct icl_dpll_hw_state *hw_state)
{
- struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
u32 val;
@@ -3905,13 +3948,15 @@ static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct inte
}
static void combo_pll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
icl_pll_power_enable(i915, pll, enable_reg);
- icl_dpll_write(i915, pll);
+ icl_dpll_write(i915, pll, hw_state);
/*
* DVFS pre sequence would be here, but in our driver the cdclk code
@@ -3927,11 +3972,14 @@ static void combo_pll_enable(struct drm_i915_private *i915,
}
static void tbt_pll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
+
icl_pll_power_enable(i915, pll, TBT_PLL_ENABLE);
- icl_dpll_write(i915, pll);
+ icl_dpll_write(i915, pll, hw_state);
/*
* DVFS pre sequence would be here, but in our driver the cdclk code
@@ -3945,16 +3993,18 @@ static void tbt_pll_enable(struct drm_i915_private *i915,
}
static void mg_pll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
icl_pll_power_enable(i915, pll, enable_reg);
if (DISPLAY_VER(i915) >= 12)
- dkl_pll_write(i915, pll);
+ dkl_pll_write(i915, pll, hw_state);
else
- icl_mg_pll_write(i915, pll);
+ icl_mg_pll_write(i915, pll, hw_state);
/*
* DVFS pre sequence would be here, but in our driver the cdclk code
@@ -4026,33 +4076,36 @@ static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
}
-static void icl_dump_hw_state(struct drm_i915_private *i915,
- const struct intel_dpll_hw_state *hw_state)
+static void icl_dump_hw_state(struct drm_printer *p,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- drm_dbg_kms(&i915->drm,
- "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
- "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
- "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
- "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
- "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
- "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
- hw_state->cfgcr0, hw_state->cfgcr1,
- hw_state->div0,
- hw_state->mg_refclkin_ctl,
- hw_state->mg_clktop2_coreclkctl1,
- hw_state->mg_clktop2_hsclkctl,
- hw_state->mg_pll_div0,
- hw_state->mg_pll_div1,
- hw_state->mg_pll_lf,
- hw_state->mg_pll_frac_lock,
- hw_state->mg_pll_ssc,
- hw_state->mg_pll_bias,
- hw_state->mg_pll_tdc_coldst_bias);
-}
-
-static bool icl_compare_hw_state(const struct intel_dpll_hw_state *a,
- const struct intel_dpll_hw_state *b)
+ const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
+
+ drm_printf(p, "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
+ "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
+ "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
+ "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
+ "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
+ "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
+ hw_state->cfgcr0, hw_state->cfgcr1, hw_state->div0,
+ hw_state->mg_refclkin_ctl,
+ hw_state->mg_clktop2_coreclkctl1,
+ hw_state->mg_clktop2_hsclkctl,
+ hw_state->mg_pll_div0,
+ hw_state->mg_pll_div1,
+ hw_state->mg_pll_lf,
+ hw_state->mg_pll_frac_lock,
+ hw_state->mg_pll_ssc,
+ hw_state->mg_pll_bias,
+ hw_state->mg_pll_tdc_coldst_bias);
+}
+
+static bool icl_compare_hw_state(const struct intel_dpll_hw_state *_a,
+ const struct intel_dpll_hw_state *_b)
{
+ const struct icl_dpll_hw_state *a = &_a->icl;
+ const struct icl_dpll_hw_state *b = &_b->icl;
+
/* FIXME split combo vs. mg more thoroughly */
return a->cfgcr0 == b->cfgcr0 &&
a->cfgcr1 == b->cfgcr1 &&
@@ -4417,33 +4470,33 @@ void intel_update_active_dpll(struct intel_atomic_state *state,
* intel_dpll_get_freq - calculate the DPLL's output frequency
* @i915: i915 device
* @pll: DPLL for which to calculate the output frequency
- * @pll_state: DPLL state from which to calculate the output frequency
+ * @dpll_hw_state: DPLL state from which to calculate the output frequency
*
- * Return the output frequency corresponding to @pll's passed in @pll_state.
+ * Return the output frequency corresponding to @pll's passed in @dpll_hw_state.
*/
int intel_dpll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
return 0;
- return pll->info->funcs->get_freq(i915, pll, pll_state);
+ return pll->info->funcs->get_freq(i915, pll, dpll_hw_state);
}
/**
* intel_dpll_get_hw_state - readout the DPLL's hardware state
* @i915: i915 device
* @pll: DPLL for which to calculate the output frequency
- * @hw_state: DPLL's hardware state
+ * @dpll_hw_state: DPLL's hardware state
*
- * Read out @pll's hardware state into @hw_state.
+ * Read out @pll's hardware state into @dpll_hw_state.
*/
bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
- return pll->info->funcs->get_hw_state(i915, pll, hw_state);
+ return pll->info->funcs->get_hw_state(i915, pll, dpll_hw_state);
}
static void readout_dpll_hw_state(struct drm_i915_private *i915,
@@ -4514,22 +4567,24 @@ void intel_dpll_sanitize_state(struct drm_i915_private *i915)
}
/**
- * intel_dpll_dump_hw_state - write hw_state to dmesg
+ * intel_dpll_dump_hw_state - dump hw_state
* @i915: i915 drm device
- * @hw_state: hw state to be written to the log
+ * @p: where to print the state to
+ * @dpll_hw_state: hw state to be dumped
*
- * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
+ * Dumo out the relevant values in @dpll_hw_state.
*/
void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
- const struct intel_dpll_hw_state *hw_state)
+ struct drm_printer *p,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
if (i915->display.dpll.mgr) {
- i915->display.dpll.mgr->dump_hw_state(i915, hw_state);
+ i915->display.dpll.mgr->dump_hw_state(p, dpll_hw_state);
} else {
/* fallback for platforms that don't use the shared dpll
* infrastructure
*/
- ibx_dump_hw_state(i915, hw_state);
+ ibx_dump_hw_state(p, dpll_hw_state);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index cc0e1386309d..f09e513ce05b 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -36,6 +36,7 @@
enum tc_port;
struct drm_i915_private;
+struct drm_printer;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
@@ -180,18 +181,19 @@ enum icl_port_dpll_id {
ICL_PORT_DPLL_COUNT,
};
-struct intel_dpll_hw_state {
- /* i9xx, pch plls */
+struct i9xx_dpll_hw_state {
u32 dpll;
u32 dpll_md;
u32 fp0;
u32 fp1;
+};
- /* hsw, bdw */
+struct hsw_dpll_hw_state {
u32 wrpll;
u32 spll;
+};
- /* skl */
+struct skl_dpll_hw_state {
/*
* DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
* lower part of ctrl1 and they get shifted into position when writing
@@ -201,20 +203,18 @@ struct intel_dpll_hw_state {
u32 ctrl1;
/* HDMI only, 0 when used for DP */
u32 cfgcr1, cfgcr2;
+};
- /* icl */
- u32 cfgcr0;
+struct bxt_dpll_hw_state {
+ u32 ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, pcsdw12;
+};
+
+struct icl_dpll_hw_state {
+ u32 cfgcr0, cfgcr1;
/* tgl */
u32 div0;
- /* bxt */
- u32 ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, pcsdw12;
-
- /*
- * ICL uses the following, already defined:
- * u32 cfgcr0, cfgcr1;
- */
u32 mg_refclkin_ctl;
u32 mg_clktop2_coreclkctl1;
u32 mg_clktop2_hsclkctl;
@@ -229,6 +229,55 @@ struct intel_dpll_hw_state {
u32 mg_pll_tdc_coldst_bias_mask;
};
+struct intel_mpllb_state {
+ u32 clock; /* in KHz */
+ u32 ref_control;
+ u32 mpllb_cp;
+ u32 mpllb_div;
+ u32 mpllb_div2;
+ u32 mpllb_fracn1;
+ u32 mpllb_fracn2;
+ u32 mpllb_sscen;
+ u32 mpllb_sscstep;
+};
+
+struct intel_c10pll_state {
+ u32 clock; /* in KHz */
+ u8 tx;
+ u8 cmn;
+ u8 pll[20];
+};
+
+struct intel_c20pll_state {
+ u32 clock; /* in kHz */
+ u16 tx[3];
+ u16 cmn[4];
+ union {
+ u16 mplla[10];
+ u16 mpllb[11];
+ };
+};
+
+struct intel_cx0pll_state {
+ union {
+ struct intel_c10pll_state c10;
+ struct intel_c20pll_state c20;
+ };
+ bool ssc_enabled;
+};
+
+struct intel_dpll_hw_state {
+ union {
+ struct i9xx_dpll_hw_state i9xx;
+ struct hsw_dpll_hw_state hsw;
+ struct skl_dpll_hw_state skl;
+ struct bxt_dpll_hw_state bxt;
+ struct icl_dpll_hw_state icl;
+ struct intel_mpllb_state mpllb;
+ struct intel_cx0pll_state cx0pll;
+ };
+};
+
/**
* struct intel_shared_dpll_state - hold the DPLL atomic state
*
@@ -364,10 +413,10 @@ void intel_update_active_dpll(struct intel_atomic_state *state,
struct intel_encoder *encoder);
int intel_dpll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state);
+ const struct intel_dpll_hw_state *dpll_hw_state);
bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state);
+ struct intel_dpll_hw_state *dpll_hw_state);
void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
@@ -377,7 +426,8 @@ void intel_dpll_readout_hw_state(struct drm_i915_private *i915);
void intel_dpll_sanitize_state(struct drm_i915_private *i915);
void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
- const struct intel_dpll_hw_state *hw_state);
+ struct drm_printer *p,
+ const struct intel_dpll_hw_state *dpll_hw_state);
bool intel_dpll_compare_hw_state(struct drm_i915_private *i915,
const struct intel_dpll_hw_state *a,
const struct intel_dpll_hw_state *b);
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index 169ef38ff188..597f8bd6aa1a 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -63,6 +63,15 @@ const char *intel_drrs_type_str(enum drrs_type drrs_type)
return str[drrs_type];
}
+bool intel_cpu_transcoder_has_drrs(struct drm_i915_private *i915,
+ enum transcoder cpu_transcoder)
+{
+ if (HAS_DOUBLE_BUFFERED_M_N(i915))
+ return true;
+
+ return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder);
+}
+
static void
intel_drrs_set_refresh_rate_pipeconf(struct intel_crtc *crtc,
enum drrs_refresh_rate refresh_rate)
@@ -312,9 +321,8 @@ static int intel_drrs_debugfs_status_show(struct seq_file *m, void *unused)
mutex_lock(&crtc->drrs.mutex);
seq_printf(m, "DRRS capable: %s\n",
- str_yes_no(crtc_state->has_drrs ||
- HAS_DOUBLE_BUFFERED_M_N(i915) ||
- intel_cpu_transcoder_has_m2_n2(i915, crtc_state->cpu_transcoder)));
+ str_yes_no(intel_cpu_transcoder_has_drrs(i915,
+ crtc_state->cpu_transcoder)));
seq_printf(m, "DRRS enabled: %s\n",
str_yes_no(crtc_state->has_drrs));
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.h b/drivers/gpu/drm/i915/display/intel_drrs.h
index 8ef5f93a80ff..0982f95eab72 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.h
+++ b/drivers/gpu/drm/i915/display/intel_drrs.h
@@ -9,12 +9,15 @@
#include <linux/types.h>
enum drrs_type;
+enum transcoder;
struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_connector;
+bool intel_cpu_transcoder_has_drrs(struct drm_i915_private *i915,
+ enum transcoder cpu_transcoder);
const char *intel_drrs_type_str(enum drrs_type drrs_type);
bool intel_drrs_is_active(struct intel_crtc *crtc);
void intel_drrs_activate(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index d62e050185e7..4baaa92ceaec 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -340,6 +340,18 @@ static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state)
return max(0, vblank_start - intel_usecs_to_scanlines(adjusted_mode, latency));
}
+static u32 dsb_chicken(struct intel_crtc *crtc)
+{
+ if (crtc->mode_flags & I915_MODE_FLAG_VRR)
+ return DSB_SKIP_WAITS_EN |
+ DSB_CTRL_WAIT_SAFE_WINDOW |
+ DSB_CTRL_NO_WAIT_VBLANK |
+ DSB_INST_WAIT_SAFE_WINDOW |
+ DSB_INST_NO_WAIT_VBLANK;
+ else
+ return DSB_SKIP_WAITS_EN;
+}
+
static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
int dewake_scanline)
{
@@ -361,6 +373,9 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
intel_de_write_fw(dev_priv, DSB_CTRL(pipe, dsb->id),
ctrl | DSB_ENABLE);
+ intel_de_write_fw(dev_priv, DSB_CHICKEN(pipe, dsb->id),
+ dsb_chicken(crtc));
+
intel_de_write_fw(dev_priv, DSB_HEAD(pipe, dsb->id),
intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf));
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index d3cf6a652221..bd5888ce4852 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -64,14 +64,11 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
struct intel_connector *intel_connector = to_intel_connector(connector);
const struct drm_display_mode *fixed_mode =
intel_panel_fixed_mode(intel_connector, mode);
- int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
enum drm_mode_status status;
drm_dbg_kms(&dev_priv->drm, "\n");
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
status = intel_panel_mode_valid(intel_connector, mode);
if (status != MODE_OK)
return status;
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index c076da75b066..1840f5b59229 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -223,7 +223,7 @@ intel_dvo_mode_valid(struct drm_connector *_connector,
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
const struct drm_display_mode *fixed_mode =
intel_panel_fixed_mode(connector, mode);
- int max_dotclk = to_i915(connector->base.dev)->max_dotclk_freq;
+ int max_dotclk = to_i915(connector->base.dev)->display.cdclk.max_dotclk_freq;
int target_clock = mode->clock;
enum drm_mode_status status;
@@ -231,9 +231,6 @@ intel_dvo_mode_valid(struct drm_connector *_connector,
if (status != MODE_OK)
return status;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
/* XXX: Validate clock range */
if (fixed_mode) {
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 3ea6470d6d92..86b443433e8b 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -1106,7 +1106,7 @@ static int intel_fb_offset_to_xy(int *x, int *y,
{
struct drm_i915_private *i915 = to_i915(fb->dev);
unsigned int height;
- u32 alignment;
+ u32 alignment, unused;
if (DISPLAY_VER(i915) >= 12 &&
!intel_fb_needs_pot_stride_remap(to_intel_framebuffer(fb)) &&
@@ -1128,8 +1128,8 @@ static int intel_fb_offset_to_xy(int *x, int *y,
height = ALIGN(height, intel_tile_height(fb, color_plane));
/* Catch potential overflows early */
- if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
- fb->offsets[color_plane])) {
+ if (check_add_overflow(mul_u32_u32(height, fb->pitches[color_plane]),
+ fb->offsets[color_plane], &unused)) {
drm_dbg_kms(&i915->drm,
"Bad offset 0x%08x or pitch %d for color plane %d\n",
fb->offsets[color_plane], fb->pitches[color_plane],
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index 7b42aef37d2f..b6df9baf481b 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -255,6 +255,16 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state)
return PTR_ERR(vma);
plane_state->ggtt_vma = vma;
+
+ /*
+ * Pre-populate the dma address before we enter the vblank
+ * evade critical section as i915_gem_object_get_dma_address()
+ * will trigger might_sleep() even if it won't actually sleep,
+ * which is the case when the fb has already been pinned.
+ */
+ if (phys_cursor)
+ plane_state->phys_dma_addr =
+ i915_gem_object_get_dma_address(intel_fb_obj(fb), 0);
} else {
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index b453fcbd67da..151dcd0c45b6 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -54,6 +54,7 @@
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_fbc.h"
+#include "intel_fbc_regs.h"
#include "intel_frontbuffer.h"
#define for_each_fbc_id(__dev_priv, __fbc_id) \
@@ -826,10 +827,36 @@ static void intel_fbc_program_cfb(struct intel_fbc *fbc)
static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
{
+ struct drm_i915_private *i915 = fbc->i915;
+
+ if (IS_SKYLAKE(i915) || IS_BROXTON(i915)) {
+ /*
+ * WaFbcHighMemBwCorruptionAvoidance:skl,bxt
+ * Display WA #0883: skl,bxt
+ */
+ intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id),
+ 0, DPFC_DISABLE_DUMMY0);
+ }
+
+ if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) {
+ /*
+ * WaFbcNukeOnHostModify:skl,kbl,cfl
+ * Display WA #0873: skl,kbl,cfl
+ */
+ intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id),
+ 0, DPFC_NUKE_ON_ANY_MODIFICATION);
+ }
+
+ /* Wa_1409120013:icl,jsl,tgl,dg1 */
+ if (IS_DISPLAY_VER(i915, 11, 12))
+ intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id),
+ 0, DPFC_CHICKEN_COMP_DUMMY_PIXEL);
+
/* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp,mtl */
- if (DISPLAY_VER(fbc->i915) >= 11 && !IS_DG2(fbc->i915))
- intel_de_rmw(fbc->i915, ILK_DPFC_CHICKEN(fbc->id), 0,
- DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
+ if (DISPLAY_VER(i915) >= 11 && !IS_DG2(i915))
+ intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id),
+ 0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
}
static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
diff --git a/drivers/gpu/drm/i915/display/intel_fbc_regs.h b/drivers/gpu/drm/i915/display/intel_fbc_regs.h
new file mode 100644
index 000000000000..ae0699c3c2fe
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fbc_regs.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2024 Intel Corporation */
+
+#ifndef __INTEL_FBC_REGS__
+#define __INTEL_FBC_REGS__
+
+#include "intel_display_reg_defs.h"
+
+#define FBC_CFB_BASE _MMIO(0x3200) /* 4k page aligned */
+#define FBC_LL_BASE _MMIO(0x3204) /* 4k page aligned */
+#define FBC_CONTROL _MMIO(0x3208)
+#define FBC_CTL_EN REG_BIT(31)
+#define FBC_CTL_PERIODIC REG_BIT(30)
+#define FBC_CTL_INTERVAL_MASK REG_GENMASK(29, 16)
+#define FBC_CTL_INTERVAL(x) REG_FIELD_PREP(FBC_CTL_INTERVAL_MASK, (x))
+#define FBC_CTL_STOP_ON_MOD REG_BIT(15)
+#define FBC_CTL_UNCOMPRESSIBLE REG_BIT(14) /* i915+ */
+#define FBC_CTL_C3_IDLE REG_BIT(13) /* i945gm only */
+#define FBC_CTL_STRIDE_MASK REG_GENMASK(12, 5)
+#define FBC_CTL_STRIDE(x) REG_FIELD_PREP(FBC_CTL_STRIDE_MASK, (x))
+#define FBC_CTL_FENCENO_MASK REG_GENMASK(3, 0)
+#define FBC_CTL_FENCENO(x) REG_FIELD_PREP(FBC_CTL_FENCENO_MASK, (x))
+#define FBC_COMMAND _MMIO(0x320c)
+#define FBC_CMD_COMPRESS REG_BIT(0)
+#define FBC_STATUS _MMIO(0x3210)
+#define FBC_STAT_COMPRESSING REG_BIT(31)
+#define FBC_STAT_COMPRESSED REG_BIT(30)
+#define FBC_STAT_MODIFIED REG_BIT(29)
+#define FBC_STAT_CURRENT_LINE_MASK REG_GENMASK(10, 0)
+#define FBC_CONTROL2 _MMIO(0x3214) /* i965gm only */
+#define FBC_CTL_FENCE_DBL REG_BIT(4)
+#define FBC_CTL_IDLE_MASK REG_GENMASK(3, 2)
+#define FBC_CTL_IDLE_IMM REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 0)
+#define FBC_CTL_IDLE_FULL REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 1)
+#define FBC_CTL_IDLE_LINE REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 2)
+#define FBC_CTL_IDLE_DEBUG REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 3)
+#define FBC_CTL_CPU_FENCE_EN REG_BIT(1)
+#define FBC_CTL_PLANE_MASK REG_GENMASK(1, 0)
+#define FBC_CTL_PLANE(i9xx_plane) REG_FIELD_PREP(FBC_CTL_PLANE_MASK, (i9xx_plane))
+#define FBC_FENCE_OFF _MMIO(0x3218) /* i965gm only, BSpec typo has 321Bh */
+#define FBC_MOD_NUM _MMIO(0x3220) /* i965gm only */
+#define FBC_MOD_NUM_MASK REG_GENMASK(31, 1)
+#define FBC_MOD_NUM_VALID REG_BIT(0)
+#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4) /* 49 reisters */
+#define FBC_TAG_MASK REG_GENMASK(1, 0) /* 16 tags per register */
+#define FBC_TAG_MODIFIED REG_FIELD_PREP(FBC_TAG_MASK, 0)
+#define FBC_TAG_UNCOMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 1)
+#define FBC_TAG_UNCOMPRESSIBLE REG_FIELD_PREP(FBC_TAG_MASK, 2)
+#define FBC_TAG_COMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 3)
+
+#define FBC_LL_SIZE (1536)
+
+#define DPFC_CB_BASE _MMIO(0x3200)
+#define ILK_DPFC_CB_BASE(fbc_id) _MMIO_PIPE((fbc_id), 0x43200, 0x43240)
+#define DPFC_CONTROL _MMIO(0x3208)
+#define ILK_DPFC_CONTROL(fbc_id) _MMIO_PIPE((fbc_id), 0x43208, 0x43248)
+#define DPFC_CTL_EN REG_BIT(31)
+#define DPFC_CTL_PLANE_MASK_G4X REG_BIT(30) /* g4x-snb */
+#define DPFC_CTL_PLANE_G4X(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_G4X, (i9xx_plane))
+#define DPFC_CTL_FENCE_EN_G4X REG_BIT(29) /* g4x-snb */
+#define DPFC_CTL_PLANE_MASK_IVB REG_GENMASK(30, 29) /* ivb only */
+#define DPFC_CTL_PLANE_IVB(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_IVB, (i9xx_plane))
+#define DPFC_CTL_FENCE_EN_IVB REG_BIT(28) /* ivb+ */
+#define DPFC_CTL_PERSISTENT_MODE REG_BIT(25) /* g4x-snb */
+#define DPFC_CTL_PLANE_BINDING_MASK REG_GENMASK(12, 11) /* lnl+ */
+#define DPFC_CTL_PLANE_BINDING(plane_id) REG_FIELD_PREP(DPFC_CTL_PLANE_BINDING_MASK, (plane_id))
+#define DPFC_CTL_FALSE_COLOR REG_BIT(10) /* ivb+ */
+#define DPFC_CTL_SR_EN REG_BIT(10) /* g4x only */
+#define DPFC_CTL_SR_EXIT_DIS REG_BIT(9) /* g4x only */
+#define DPFC_CTL_LIMIT_MASK REG_GENMASK(7, 6)
+#define DPFC_CTL_LIMIT_1X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 0)
+#define DPFC_CTL_LIMIT_2X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 1)
+#define DPFC_CTL_LIMIT_4X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 2)
+#define DPFC_CTL_FENCENO_MASK REG_GENMASK(3, 0)
+#define DPFC_CTL_FENCENO(fence) REG_FIELD_PREP(DPFC_CTL_FENCENO_MASK, (fence))
+#define DPFC_RECOMP_CTL _MMIO(0x320c)
+#define ILK_DPFC_RECOMP_CTL(fbc_id) _MMIO_PIPE((fbc_id), 0x4320c, 0x4324c)
+#define DPFC_RECOMP_STALL_EN REG_BIT(27)
+#define DPFC_RECOMP_STALL_WM_MASK REG_GENMASK(26, 16)
+#define DPFC_RECOMP_TIMER_COUNT_MASK REG_GENMASK(5, 0)
+#define DPFC_STATUS _MMIO(0x3210)
+#define ILK_DPFC_STATUS(fbc_id) _MMIO_PIPE((fbc_id), 0x43210, 0x43250)
+#define DPFC_INVAL_SEG_MASK REG_GENMASK(26, 16)
+#define DPFC_COMP_SEG_MASK REG_GENMASK(10, 0)
+#define DPFC_STATUS2 _MMIO(0x3214)
+#define ILK_DPFC_STATUS2(fbc_id) _MMIO_PIPE((fbc_id), 0x43214, 0x43254)
+#define DPFC_COMP_SEG_MASK_IVB REG_GENMASK(11, 0)
+#define DPFC_FENCE_YOFF _MMIO(0x3218)
+#define ILK_DPFC_FENCE_YOFF(fbc_id) _MMIO_PIPE((fbc_id), 0x43218, 0x43258)
+#define DPFC_CHICKEN _MMIO(0x3224)
+#define ILK_DPFC_CHICKEN(fbc_id) _MMIO_PIPE((fbc_id), 0x43224, 0x43264)
+#define DPFC_HT_MODIFY REG_BIT(31) /* pre-ivb */
+#define DPFC_NUKE_ON_ANY_MODIFICATION REG_BIT(23) /* bdw+ */
+#define DPFC_CHICKEN_COMP_DUMMY_PIXEL REG_BIT(14) /* glk+ */
+#define DPFC_CHICKEN_FORCE_SLB_INVALIDATION REG_BIT(13) /* icl+ */
+#define DPFC_DISABLE_DUMMY0 REG_BIT(8) /* ivb+ */
+
+#define GLK_FBC_STRIDE(fbc_id) _MMIO_PIPE((fbc_id), 0x43228, 0x43268)
+#define FBC_STRIDE_OVERRIDE REG_BIT(15)
+#define FBC_STRIDE_MASK REG_GENMASK(14, 0)
+#define FBC_STRIDE(x) REG_FIELD_PREP(FBC_STRIDE_MASK, (x))
+
+#define ILK_FBC_RT_BASE _MMIO(0x2128)
+#define ILK_FBC_RT_VALID REG_BIT(0)
+#define SNB_FBC_FRONT_BUFFER REG_BIT(1)
+
+#define SNB_DPFC_CTL_SA _MMIO(0x100100)
+#define SNB_DPFC_FENCE_EN REG_BIT(29)
+#define SNB_DPFC_FENCENO_MASK REG_GENMASK(4, 0)
+#define SNB_DPFC_FENCENO(fence) REG_FIELD_PREP(SNB_DPFC_FENCENO_MASK, (fence))
+#define SNB_DPFC_CPU_FENCE_OFFSET _MMIO(0x100104)
+
+#define IVB_FBC_RT_BASE _MMIO(0x7020)
+#define IVB_FBC_RT_BASE_UPPER _MMIO(0x7024)
+
+#define MSG_FBC_REND_STATE(fbc_id) _MMIO_PIPE((fbc_id), 0x50380, 0x50384)
+#define FBC_REND_NUKE REG_BIT(2)
+#define FBC_REND_CACHE_CLEAN REG_BIT(1)
+
+#endif /* __INTEL_FBC_REGS__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 99894a855ef0..bda702c2cab8 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -24,7 +24,6 @@
* David Airlie
*/
-#include <linux/async.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/errno.h>
@@ -39,6 +38,7 @@
#include <linux/vga_switcheroo.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -58,7 +58,6 @@ struct intel_fbdev {
struct intel_framebuffer *fb;
struct i915_vma *vma;
unsigned long vma_flags;
- async_cookie_t cookie;
int preferred_bpp;
/* Whether or not fbdev hpd processing is temporarily suspended */
@@ -135,6 +134,29 @@ static int intel_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
return i915_gem_fb_mmap(obj, vma);
}
+static void intel_fbdev_fb_destroy(struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct intel_fbdev *ifbdev = container_of(fb_helper, struct intel_fbdev, helper);
+
+ drm_fb_helper_fini(&ifbdev->helper);
+
+ /*
+ * We rely on the object-free to release the VMA pinning for
+ * the info->screen_base mmaping. Leaking the VMA is simpler than
+ * trying to rectify all the possible error paths leading here.
+ */
+ intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags);
+ drm_framebuffer_remove(&ifbdev->fb->base);
+
+ drm_client_release(&fb_helper->client);
+ drm_fb_helper_unprepare(&ifbdev->helper);
+ kfree(ifbdev);
+}
+
+__diag_push();
+__diag_ignore_all("-Woverride-init", "Allow field initialization overrides for fb ops");
+
static const struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
__FB_DEFAULT_DEFERRED_OPS_RDWR(intel_fbdev),
@@ -144,8 +166,11 @@ static const struct fb_ops intelfb_ops = {
.fb_pan_display = intel_fbdev_pan_display,
__FB_DEFAULT_DEFERRED_OPS_DRAW(intel_fbdev),
.fb_mmap = intel_fbdev_mmap,
+ .fb_destroy = intel_fbdev_fb_destroy,
};
+__diag_pop();
+
static int intelfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
@@ -153,7 +178,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct intel_framebuffer *intel_fb = ifbdev->fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
const struct i915_gtt_view view = {
.type = I915_GTT_VIEW_NORMAL,
};
@@ -245,7 +269,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
ifbdev->vma_flags = flags;
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- vga_switcheroo_client_fb_set(pdev, info);
+
return 0;
out_unpin:
@@ -271,25 +295,6 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.fb_dirty = intelfb_dirty,
};
-static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
-{
- /* We rely on the object-free to release the VMA pinning for
- * the info->screen_base mmaping. Leaking the VMA is simpler than
- * trying to rectify all the possible error paths leading here.
- */
-
- drm_fb_helper_fini(&ifbdev->helper);
-
- if (ifbdev->vma)
- intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags);
-
- if (ifbdev->fb)
- drm_framebuffer_remove(&ifbdev->fb->base);
-
- drm_fb_helper_unprepare(&ifbdev->helper);
- kfree(ifbdev);
-}
-
/*
* Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible.
* The core display code will have read out the current plane configuration,
@@ -453,93 +458,6 @@ static void intel_fbdev_suspend_worker(struct work_struct *work)
true);
}
-int intel_fbdev_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_fbdev *ifbdev;
- int ret;
-
- if (drm_WARN_ON(dev, !HAS_DISPLAY(dev_priv)))
- return -ENODEV;
-
- ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
- if (ifbdev == NULL)
- return -ENOMEM;
-
- mutex_init(&ifbdev->hpd_lock);
- drm_fb_helper_prepare(dev, &ifbdev->helper, 32, &intel_fb_helper_funcs);
-
- if (intel_fbdev_init_bios(dev, ifbdev))
- ifbdev->helper.preferred_bpp = ifbdev->preferred_bpp;
- else
- ifbdev->preferred_bpp = ifbdev->helper.preferred_bpp;
-
- ret = drm_fb_helper_init(dev, &ifbdev->helper);
- if (ret) {
- kfree(ifbdev);
- return ret;
- }
-
- dev_priv->display.fbdev.fbdev = ifbdev;
- INIT_WORK(&dev_priv->display.fbdev.suspend_work, intel_fbdev_suspend_worker);
-
- return 0;
-}
-
-static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
-{
- struct intel_fbdev *ifbdev = data;
-
- /* Due to peculiar init order wrt to hpd handling this is separate. */
- if (drm_fb_helper_initial_config(&ifbdev->helper))
- intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
-}
-
-void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv)
-{
- struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
-
- if (!ifbdev)
- return;
-
- ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
-}
-
-static void intel_fbdev_sync(struct intel_fbdev *ifbdev)
-{
- if (!ifbdev->cookie)
- return;
-
- /* Only serialises with all preceding async calls, hence +1 */
- async_synchronize_cookie(ifbdev->cookie + 1);
- ifbdev->cookie = 0;
-}
-
-void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
-{
- struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
-
- if (!ifbdev)
- return;
-
- intel_fbdev_set_suspend(&dev_priv->drm, FBINFO_STATE_SUSPENDED, true);
-
- if (!current_is_async())
- intel_fbdev_sync(ifbdev);
-
- drm_fb_helper_unregister_info(&ifbdev->helper);
-}
-
-void intel_fbdev_fini(struct drm_i915_private *dev_priv)
-{
- struct intel_fbdev *ifbdev = fetch_and_zero(&dev_priv->display.fbdev.fbdev);
-
- if (!ifbdev)
- return;
-
- intel_fbdev_destroy(ifbdev);
-}
-
/* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD
* processing, fbdev will perform a full connector reprobe if a hotplug event
* was received while HPD was suspended.
@@ -622,15 +540,13 @@ set_suspend:
intel_fbdev_hpd_set_suspend(dev_priv, state);
}
-void intel_fbdev_output_poll_changed(struct drm_device *dev)
+static int intel_fbdev_output_poll_changed(struct drm_device *dev)
{
struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
bool send_hpd;
if (!ifbdev)
- return;
-
- intel_fbdev_sync(ifbdev);
+ return -EINVAL;
mutex_lock(&ifbdev->hpd_lock);
send_hpd = !ifbdev->hpd_suspended;
@@ -639,21 +555,137 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup))
drm_fb_helper_hotplug_event(&ifbdev->helper);
+
+ return 0;
}
-void intel_fbdev_restore_mode(struct drm_i915_private *dev_priv)
+static int intel_fbdev_restore_mode(struct drm_i915_private *dev_priv)
{
struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
+ int ret;
if (!ifbdev)
- return;
+ return -EINVAL;
- intel_fbdev_sync(ifbdev);
if (!ifbdev->vma)
+ return -ENOMEM;
+
+ ret = drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper);
+ if (ret)
+ return ret;
+
+ intel_fbdev_invalidate(ifbdev);
+
+ return 0;
+}
+
+/*
+ * Fbdev client and struct drm_client_funcs
+ */
+
+static void intel_fbdev_client_unregister(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+ struct drm_device *dev = fb_helper->dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ if (fb_helper->info) {
+ vga_switcheroo_client_fb_set(pdev, NULL);
+ drm_fb_helper_unregister_info(fb_helper);
+ } else {
+ drm_fb_helper_unprepare(fb_helper);
+ drm_client_release(&fb_helper->client);
+ kfree(fb_helper);
+ }
+}
+
+static int intel_fbdev_client_restore(struct drm_client_dev *client)
+{
+ struct drm_i915_private *dev_priv = to_i915(client->dev);
+ int ret;
+
+ ret = intel_fbdev_restore_mode(dev_priv);
+ if (ret)
+ return ret;
+
+ vga_switcheroo_process_delayed_switch();
+
+ return 0;
+}
+
+static int intel_fbdev_client_hotplug(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+ struct drm_device *dev = client->dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ int ret;
+
+ if (dev->fb_helper)
+ return intel_fbdev_output_poll_changed(dev);
+
+ ret = drm_fb_helper_init(dev, fb_helper);
+ if (ret)
+ goto err_drm_err;
+
+ ret = drm_fb_helper_initial_config(fb_helper);
+ if (ret)
+ goto err_drm_fb_helper_fini;
+
+ vga_switcheroo_client_fb_set(pdev, fb_helper->info);
+
+ return 0;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(fb_helper);
+err_drm_err:
+ drm_err(dev, "Failed to setup i915 fbdev emulation (ret=%d)\n", ret);
+ return ret;
+}
+
+static const struct drm_client_funcs intel_fbdev_client_funcs = {
+ .owner = THIS_MODULE,
+ .unregister = intel_fbdev_client_unregister,
+ .restore = intel_fbdev_client_restore,
+ .hotplug = intel_fbdev_client_hotplug,
+};
+
+void intel_fbdev_setup(struct drm_i915_private *i915)
+{
+ struct drm_device *dev = &i915->drm;
+ struct intel_fbdev *ifbdev;
+ int ret;
+
+ if (!HAS_DISPLAY(i915))
return;
- if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0)
- intel_fbdev_invalidate(ifbdev);
+ ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL);
+ if (!ifbdev)
+ return;
+ drm_fb_helper_prepare(dev, &ifbdev->helper, 32, &intel_fb_helper_funcs);
+
+ i915->display.fbdev.fbdev = ifbdev;
+ INIT_WORK(&i915->display.fbdev.suspend_work, intel_fbdev_suspend_worker);
+ mutex_init(&ifbdev->hpd_lock);
+ if (intel_fbdev_init_bios(dev, ifbdev))
+ ifbdev->helper.preferred_bpp = ifbdev->preferred_bpp;
+ else
+ ifbdev->preferred_bpp = ifbdev->helper.preferred_bpp;
+
+ ret = drm_client_init(dev, &ifbdev->helper.client, "intel-fbdev",
+ &intel_fbdev_client_funcs);
+ if (ret) {
+ drm_err(dev, "Failed to register client: %d\n", ret);
+ goto err_drm_fb_helper_unprepare;
+ }
+
+ drm_client_register(&ifbdev->helper.client);
+
+ return;
+
+err_drm_fb_helper_unprepare:
+ drm_fb_helper_unprepare(&ifbdev->helper);
+ mutex_destroy(&ifbdev->hpd_lock);
+ kfree(ifbdev);
}
struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h
index 04fd523a5023..08de2d5b3433 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.h
@@ -14,29 +14,11 @@ struct intel_fbdev;
struct intel_framebuffer;
#ifdef CONFIG_DRM_FBDEV_EMULATION
-int intel_fbdev_init(struct drm_device *dev);
-void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv);
-void intel_fbdev_unregister(struct drm_i915_private *dev_priv);
-void intel_fbdev_fini(struct drm_i915_private *dev_priv);
+void intel_fbdev_setup(struct drm_i915_private *dev_priv);
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
-void intel_fbdev_output_poll_changed(struct drm_device *dev);
-void intel_fbdev_restore_mode(struct drm_i915_private *dev_priv);
struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev);
#else
-static inline int intel_fbdev_init(struct drm_device *dev)
-{
- return 0;
-}
-
-static inline void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv)
-{
-}
-
-static inline void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
-{
-}
-
-static inline void intel_fbdev_fini(struct drm_i915_private *dev_priv)
+static inline void intel_fbdev_setup(struct drm_i915_private *dev_priv)
{
}
@@ -44,13 +26,6 @@ static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bo
{
}
-static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
-{
-}
-
-static inline void intel_fbdev_restore_mode(struct drm_i915_private *i915)
-{
-}
static inline struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
{
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_fixed.h b/drivers/gpu/drm/i915/display/intel_fixed.h
index a327094de2bd..a327094de2bd 100644
--- a/drivers/gpu/drm/i915/i915_fixed.h
+++ b/drivers/gpu/drm/i915/display/intel_fixed.h
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index d3e03ed5b79c..9c8e1e91ff1c 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -411,7 +411,7 @@ gmbus_wait_idle(struct drm_i915_private *i915)
add_wait_queue(&i915->display.gmbus.wait_queue, &wait);
intel_de_write_fw(i915, GMBUS4(i915), irq_enable);
- ret = intel_de_wait_for_register_fw(i915, GMBUS2(i915), GMBUS_ACTIVE, 0, 10);
+ ret = intel_de_wait_fw(i915, GMBUS2(i915), GMBUS_ACTIVE, 0, 10);
intel_de_write_fw(i915, GMBUS4(i915), 0);
remove_wait_queue(&i915->display.gmbus.wait_queue, &wait);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 9edac27bab26..d5ed4c7dfbc0 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -369,9 +369,9 @@ static int intel_hdcp_load_keys(struct drm_i915_private *i915)
}
/* Wait for the keys to load (500us) */
- ret = __intel_wait_for_register(&i915->uncore, HDCP_KEY_STATUS,
- HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
- 10, 1, &val);
+ ret = intel_de_wait_custom(i915, HDCP_KEY_STATUS,
+ HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
+ 10, 1, &val);
if (ret)
return ret;
else if (!(val & HDCP_KEY_LOAD_STATUS))
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
index 302bff75b06c..35823e1f65d6 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
@@ -13,6 +13,12 @@
#include "intel_hdcp_gsc.h"
#include "intel_hdcp_gsc_message.h"
+struct intel_hdcp_gsc_message {
+ struct i915_vma *vma;
+ void *hdcp_cmd_in;
+ void *hdcp_cmd_out;
+};
+
bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915)
{
return DISPLAY_VER(i915) >= 14;
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
index eba2057c5a9e..5f610df61cc9 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
@@ -10,12 +10,7 @@
#include <linux/types.h>
struct drm_i915_private;
-
-struct intel_hdcp_gsc_message {
- struct i915_vma *vma;
- void *hdcp_cmd_in;
- void *hdcp_cmd_out;
-};
+struct intel_hdcp_gsc_message;
bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915);
ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 90d2236fede3..5f6deceaf8ba 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -114,6 +114,8 @@ static u32 g4x_infoframe_enable(unsigned int type)
return VIDEO_DIP_ENABLE_GAMUT;
case DP_SDP_VSC:
return 0;
+ case DP_SDP_ADAPTIVE_SYNC:
+ return 0;
case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI;
case HDMI_INFOFRAME_TYPE_SPD:
@@ -137,6 +139,8 @@ static u32 hsw_infoframe_enable(unsigned int type)
return VIDEO_DIP_ENABLE_GMP_HSW;
case DP_SDP_VSC:
return VIDEO_DIP_ENABLE_VSC_HSW;
+ case DP_SDP_ADAPTIVE_SYNC:
+ return VIDEO_DIP_ENABLE_AS_ADL;
case DP_SDP_PPS:
return VDIP_ENABLE_PPS;
case HDMI_INFOFRAME_TYPE_AVI:
@@ -164,6 +168,8 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
return HSW_TVIDEO_DIP_GMP_DATA(cpu_transcoder, i);
case DP_SDP_VSC:
return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i);
+ case DP_SDP_ADAPTIVE_SYNC:
+ return ADL_TVIDEO_DIP_AS_SDP_DATA(cpu_transcoder, i);
case DP_SDP_PPS:
return ICL_VIDEO_DIP_PPS_DATA(cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_AVI:
@@ -186,6 +192,8 @@ static int hsw_dip_data_size(struct drm_i915_private *dev_priv,
switch (type) {
case DP_SDP_VSC:
return VIDEO_DIP_VSC_DATA_SIZE;
+ case DP_SDP_ADAPTIVE_SYNC:
+ return VIDEO_DIP_ASYNC_DATA_SIZE;
case DP_SDP_PPS:
return VIDEO_DIP_PPS_DATA_SIZE;
case HDMI_PACKET_TYPE_GAMUT_METADATA:
@@ -563,6 +571,9 @@ static u32 hsw_infoframes_enabled(struct intel_encoder *encoder,
if (DISPLAY_VER(dev_priv) >= 10)
mask |= VIDEO_DIP_ENABLE_DRM_GLK;
+ if (HAS_AS_SDP(dev_priv))
+ mask |= VIDEO_DIP_ENABLE_AS_ADL;
+
return val & mask;
}
@@ -570,6 +581,7 @@ static const u8 infoframe_type_to_idx[] = {
HDMI_PACKET_TYPE_GENERAL_CONTROL,
HDMI_PACKET_TYPE_GAMUT_METADATA,
DP_SDP_VSC,
+ DP_SDP_ADAPTIVE_SYNC,
HDMI_INFOFRAME_TYPE_AVI,
HDMI_INFOFRAME_TYPE_SPD,
HDMI_INFOFRAME_TYPE_VENDOR,
@@ -1212,7 +1224,7 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
- VIDEO_DIP_ENABLE_DRM_GLK);
+ VIDEO_DIP_ENABLE_DRM_GLK | VIDEO_DIP_ENABLE_AS_ADL);
if (!enable) {
intel_de_write(dev_priv, reg, val);
@@ -1832,7 +1844,7 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
bool has_hdmi_sink)
{
struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
- enum phy phy = intel_port_to_phy(dev_priv, hdmi_to_dig_port(hdmi)->base.port);
+ struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
if (clock < 25000)
return MODE_CLOCK_LOW;
@@ -1854,11 +1866,11 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
return MODE_CLOCK_RANGE;
/* ICL+ combo PHY PLL can't generate 500-533.2 MHz */
- if (intel_phy_is_combo(dev_priv, phy) && clock > 500000 && clock < 533200)
+ if (intel_encoder_is_combo(encoder) && clock > 500000 && clock < 533200)
return MODE_CLOCK_RANGE;
/* ICL+ TC PHY PLL can't generate 500-532.8 MHz */
- if (intel_phy_is_tc(dev_priv, phy) && clock > 500000 && clock < 532800)
+ if (intel_encoder_is_tc(encoder) && clock > 500000 && clock < 532800)
return MODE_CLOCK_RANGE;
/*
@@ -1981,7 +1993,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
enum drm_mode_status status;
int clock = mode->clock;
- int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->state);
bool ycbcr_420_only;
enum intel_output_format sink_format;
@@ -2664,8 +2676,9 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
drm_scdc_set_scrambling(connector, scrambling);
}
-static u8 chv_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+static u8 chv_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
+ enum port port = encoder->port;
u8 ddc_pin;
switch (port) {
@@ -2686,8 +2699,9 @@ static u8 chv_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
return ddc_pin;
}
-static u8 bxt_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+static u8 bxt_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
+ enum port port = encoder->port;
u8 ddc_pin;
switch (port) {
@@ -2705,9 +2719,9 @@ static u8 bxt_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
return ddc_pin;
}
-static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static u8 cnp_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
+ enum port port = encoder->port;
u8 ddc_pin;
switch (port) {
@@ -2731,22 +2745,23 @@ static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv,
return ddc_pin;
}
-static u8 icl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+static u8 icl_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
- enum phy phy = intel_port_to_phy(dev_priv, port);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
- if (intel_phy_is_combo(dev_priv, phy))
+ if (intel_encoder_is_combo(encoder))
return GMBUS_PIN_1_BXT + port;
- else if (intel_phy_is_tc(dev_priv, phy))
- return GMBUS_PIN_9_TC1_ICP + intel_port_to_tc(dev_priv, port);
+ else if (intel_encoder_is_tc(encoder))
+ return GMBUS_PIN_9_TC1_ICP + intel_encoder_to_tc(encoder);
drm_WARN(&dev_priv->drm, 1, "Unknown port:%c\n", port_name(port));
return GMBUS_PIN_2_BXT;
}
-static u8 mcc_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+static u8 mcc_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
- enum phy phy = intel_port_to_phy(dev_priv, port);
+ enum phy phy = intel_encoder_to_phy(encoder);
u8 ddc_pin;
switch (phy) {
@@ -2767,11 +2782,12 @@ static u8 mcc_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
return ddc_pin;
}
-static u8 rkl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+static u8 rkl_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
- enum phy phy = intel_port_to_phy(dev_priv, port);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum phy phy = intel_encoder_to_phy(encoder);
- WARN_ON(port == PORT_C);
+ WARN_ON(encoder->port == PORT_C);
/*
* Pin mapping for RKL depends on which PCH is present. With TGP, the
@@ -2785,11 +2801,12 @@ static u8 rkl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
return GMBUS_PIN_1_BXT + phy;
}
-static u8 gen9bc_tgp_port_to_ddc_pin(struct drm_i915_private *i915, enum port port)
+static u8 gen9bc_tgp_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
- enum phy phy = intel_port_to_phy(i915, port);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_encoder_to_phy(encoder);
- drm_WARN_ON(&i915->drm, port == PORT_A);
+ drm_WARN_ON(&i915->drm, encoder->port == PORT_A);
/*
* Pin mapping for GEN9 BC depends on which PCH is present. With TGP,
@@ -2803,16 +2820,16 @@ static u8 gen9bc_tgp_port_to_ddc_pin(struct drm_i915_private *i915, enum port po
return GMBUS_PIN_1_BXT + phy;
}
-static u8 dg1_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+static u8 dg1_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
- return intel_port_to_phy(dev_priv, port) + 1;
+ return intel_encoder_to_phy(encoder) + 1;
}
-static u8 adls_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+static u8 adls_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
- enum phy phy = intel_port_to_phy(dev_priv, port);
+ enum phy phy = intel_encoder_to_phy(encoder);
- WARN_ON(port == PORT_B || port == PORT_C);
+ WARN_ON(encoder->port == PORT_B || encoder->port == PORT_C);
/*
* Pin mapping for ADL-S requires TC pins for all combo phy outputs
@@ -2824,9 +2841,9 @@ static u8 adls_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port
return GMBUS_PIN_9_TC1_ICP + phy - PHY_B;
}
-static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static u8 g4x_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
+ enum port port = encoder->port;
u8 ddc_pin;
switch (port) {
@@ -2850,30 +2867,29 @@ static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
u8 ddc_pin;
if (IS_ALDERLAKE_S(dev_priv))
- ddc_pin = adls_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = adls_encoder_to_ddc_pin(encoder);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
- ddc_pin = dg1_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = dg1_encoder_to_ddc_pin(encoder);
else if (IS_ROCKETLAKE(dev_priv))
- ddc_pin = rkl_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = rkl_encoder_to_ddc_pin(encoder);
else if (DISPLAY_VER(dev_priv) == 9 && HAS_PCH_TGP(dev_priv))
- ddc_pin = gen9bc_tgp_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = gen9bc_tgp_encoder_to_ddc_pin(encoder);
else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
HAS_PCH_TGP(dev_priv))
- ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = mcc_encoder_to_ddc_pin(encoder);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = icl_encoder_to_ddc_pin(encoder);
else if (HAS_PCH_CNP(dev_priv))
- ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = cnp_encoder_to_ddc_pin(encoder);
else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = bxt_encoder_to_ddc_pin(encoder);
else if (IS_CHERRYVIEW(dev_priv))
- ddc_pin = chv_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = chv_encoder_to_ddc_pin(encoder);
else
- ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = g4x_encoder_to_ddc_pin(encoder);
return ddc_pin;
}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index 76076509f771..d270bb7b9462 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -1444,7 +1444,7 @@ void intel_hpd_enable_detection(struct intel_encoder *encoder)
void intel_hpd_irq_setup(struct drm_i915_private *i915)
{
- if (i915->display_irqs_enabled && i915->display.funcs.hotplug)
+ if (i915->display.irq.display_irqs_enabled && i915->display.funcs.hotplug)
i915->display.funcs.hotplug->hpd_irq_setup(i915);
}
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 5863763de530..93e6cac9a4ed 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -72,7 +72,7 @@
#include "i915_drv.h"
#include "i915_irq.h"
-#include "i915_reg.h"
+#include "intel_audio_regs.h"
#include "intel_de.h"
#include "intel_lpe_audio.h"
#include "intel_pci_config.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 221f5c6c871b..8b8959073466 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -392,16 +392,13 @@ intel_lvds_mode_valid(struct drm_connector *_connector,
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *fixed_mode =
intel_panel_fixed_mode(connector, mode);
- int max_pixclk = to_i915(connector->base.dev)->max_dotclk_freq;
+ int max_pixclk = to_i915(connector->base.dev)->display.cdclk.max_dotclk_freq;
enum drm_mode_status status;
status = intel_cpu_transcoder_mode_valid(i915, mode);
if (status != MODE_OK)
return status;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
status = intel_panel_mode_valid(connector, mode);
if (status != MODE_OK)
return status;
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index fcbb083318a7..68bd5101ec89 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -27,7 +27,6 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
-#include <linux/firmware.h>
#include <acpi/video.h>
#include <drm/drm_edid.h>
@@ -263,7 +262,6 @@ struct intel_opregion {
struct opregion_asle *asle;
struct opregion_asle_ext *asle_ext;
void *rvda;
- void *vbt_firmware;
const void *vbt;
u32 vbt_size;
struct work_struct asle_work;
@@ -869,46 +867,6 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
{ }
};
-static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
-{
- struct intel_opregion *opregion = dev_priv->display.opregion;
- const struct firmware *fw = NULL;
- const char *name = dev_priv->display.params.vbt_firmware;
- int ret;
-
- if (!name || !*name)
- return -ENOENT;
-
- ret = request_firmware(&fw, name, dev_priv->drm.dev);
- if (ret) {
- drm_err(&dev_priv->drm,
- "Requesting VBT firmware \"%s\" failed (%d)\n",
- name, ret);
- return ret;
- }
-
- if (intel_bios_is_valid_vbt(dev_priv, fw->data, fw->size)) {
- opregion->vbt_firmware = kmemdup(fw->data, fw->size, GFP_KERNEL);
- if (opregion->vbt_firmware) {
- drm_dbg_kms(&dev_priv->drm,
- "Found valid VBT firmware \"%s\"\n", name);
- opregion->vbt = opregion->vbt_firmware;
- opregion->vbt_size = fw->size;
- ret = 0;
- } else {
- ret = -ENOMEM;
- }
- } else {
- drm_dbg_kms(&dev_priv->drm, "Invalid VBT firmware \"%s\"\n",
- name);
- ret = -EINVAL;
- }
-
- release_firmware(fw);
-
- return ret;
-}
-
int intel_opregion_setup(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion;
@@ -1006,9 +964,6 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
drm_dbg(&dev_priv->drm, "Mailbox #2 for backlight present\n");
}
- if (intel_load_vbt_firmware(dev_priv) == 0)
- goto out;
-
if (dmi_check_system(intel_no_opregion_vbt))
goto out;
@@ -1176,6 +1131,16 @@ const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_con
return drm_edid;
}
+bool intel_opregion_vbt_present(struct drm_i915_private *i915)
+{
+ struct intel_opregion *opregion = i915->display.opregion;
+
+ if (!opregion || !opregion->vbt)
+ return false;
+
+ return true;
+}
+
const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size)
{
struct intel_opregion *opregion = i915->display.opregion;
@@ -1186,7 +1151,7 @@ const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size)
if (size)
*size = opregion->vbt_size;
- return opregion->vbt;
+ return kmemdup(opregion->vbt, opregion->vbt_size, GFP_KERNEL);
}
bool intel_opregion_headless_sku(struct drm_i915_private *i915)
@@ -1312,7 +1277,6 @@ void intel_opregion_cleanup(struct drm_i915_private *i915)
memunmap(opregion->header);
if (opregion->rvda)
memunmap(opregion->rvda);
- kfree(opregion->vbt_firmware);
kfree(opregion);
i915->display.opregion = NULL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.h b/drivers/gpu/drm/i915/display/intel_opregion.h
index 0bec224f711f..4b2b8e752632 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.h
+++ b/drivers/gpu/drm/i915/display/intel_opregion.h
@@ -53,6 +53,7 @@ int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
const struct drm_edid *intel_opregion_get_edid(struct intel_connector *connector);
+bool intel_opregion_vbt_present(struct drm_i915_private *i915);
const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size);
bool intel_opregion_headless_sku(struct drm_i915_private *i915);
@@ -119,6 +120,11 @@ intel_opregion_get_edid(struct intel_connector *connector)
return NULL;
}
+static inline bool intel_opregion_vbt_present(struct drm_i915_private *i915)
+{
+ return false;
+}
+
static inline const void *
intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size)
{
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 2b1392d5a902..1c2099ed5514 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -972,10 +972,11 @@ static int check_overlay_dst(struct intel_overlay *overlay,
rec->dst_width, rec->dst_height);
clipped = req;
- drm_rect_intersect(&clipped, &crtc_state->pipe_src);
- if (!drm_rect_visible(&clipped) ||
- !drm_rect_equals(&clipped, &req))
+ if (!drm_rect_intersect(&clipped, &crtc_state->pipe_src))
+ return -EINVAL;
+
+ if (!drm_rect_equals(&clipped, &req))
return -EINVAL;
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 073ea3166c36..6f4ff6a89c32 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -47,10 +47,12 @@
bool intel_panel_use_ssc(struct drm_i915_private *i915)
{
- if (i915->display.params.panel_use_ssc >= 0)
- return i915->display.params.panel_use_ssc != 0;
- return i915->display.vbt.lvds_use_ssc &&
- !intel_has_quirk(i915, QUIRK_LVDS_SSC_DISABLE);
+ struct intel_display *display = &i915->display;
+
+ if (display->params.panel_use_ssc >= 0)
+ return display->params.panel_use_ssc != 0;
+ return display->vbt.lvds_use_ssc &&
+ !intel_has_quirk(display, QUIRK_LVDS_SSC_DISABLE);
}
const struct drm_display_mode *
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index baf679759e00..826e38a9e6a4 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -474,7 +474,7 @@ static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/* read out port_clock from the DPLL */
- i9xx_crtc_clock_get(crtc, crtc_state);
+ i9xx_crtc_clock_get(crtc_state);
/*
* In case there is an active pipe without active ports,
@@ -529,7 +529,7 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
&crtc_state->dpll_hw_state);
drm_WARN_ON(&dev_priv->drm, !pll_active);
- tmp = crtc_state->dpll_hw_state.dpll;
+ tmp = crtc_state->dpll_hw_state.i9xx.dpll;
crtc_state->pixel_multiplier =
((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
>> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c
index 744e332fa2af..9ca981b7a12c 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.c
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c
@@ -119,10 +119,11 @@ intel_pmdemand_update_phys_mask(struct drm_i915_private *i915,
if (!encoder)
return;
- phy = intel_port_to_phy(i915, encoder->port);
- if (intel_phy_is_tc(i915, phy))
+ if (intel_encoder_is_tc(encoder))
return;
+ phy = intel_encoder_to_phy(encoder);
+
if (set_bit)
pmdemand_state->active_combo_phys_mask |= BIT(phy);
else
@@ -222,14 +223,7 @@ static bool
intel_pmdemand_encoder_has_tc_phy(struct drm_i915_private *i915,
struct intel_encoder *encoder)
{
- enum phy phy;
-
- if (!encoder)
- return false;
-
- phy = intel_port_to_phy(i915, encoder->port);
-
- return intel_phy_is_tc(i915, phy);
+ return encoder && intel_encoder_is_tc(encoder);
}
static bool
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.h b/drivers/gpu/drm/i915/display/intel_pmdemand.h
index 2941a1a18b72..128fd61f8f14 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.h
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.h
@@ -43,9 +43,8 @@ struct intel_pmdemand_state {
struct pmdemand_params params;
};
-#define to_intel_pmdemand_state(x) container_of((x), \
- struct intel_pmdemand_state, \
- base)
+#define to_intel_pmdemand_state(global_state) \
+ container_of_const((global_state), struct intel_pmdemand_state, base)
void intel_pmdemand_init_early(struct drm_i915_private *i915);
int intel_pmdemand_init(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 2d65a538f83e..0ccbf9a85914 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -605,8 +605,7 @@ static void wait_panel_status(struct intel_dp *intel_dp,
intel_de_read(dev_priv, pp_stat_reg),
intel_de_read(dev_priv, pp_ctrl_reg));
- if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
- mask, value, 5000))
+ if (intel_de_wait(dev_priv, pp_stat_reg, mask, value, 5000))
drm_err(&dev_priv->drm,
"[ENCODER:%d:%s] %s panel status timeout: PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
@@ -1351,7 +1350,7 @@ static void pps_init_delays_bios(struct intel_dp *intel_dp,
static void pps_init_delays_vbt(struct intel_dp *intel_dp,
struct edp_power_seq *vbt)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
*vbt = connector->panel.vbt.edp.pps;
@@ -1364,9 +1363,9 @@ static void pps_init_delays_vbt(struct intel_dp *intel_dp,
* just fails to power back on. Increasing the delay to 800ms
* seems sufficient to avoid this problem.
*/
- if (intel_has_quirk(dev_priv, QUIRK_INCREASE_T12_DELAY)) {
+ if (intel_has_quirk(display, QUIRK_INCREASE_T12_DELAY)) {
vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Increasing T12 panel delay as per the quirk to %d\n",
vbt->t11_t12);
}
@@ -1671,6 +1670,37 @@ void intel_pps_setup(struct drm_i915_private *i915)
i915->display.pps.mmio_base = PPS_BASE;
}
+static int intel_pps_show(struct seq_file *m, void *data)
+{
+ struct intel_connector *connector = m->private;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+
+ if (connector->base.status != connector_status_connected)
+ return -ENODEV;
+
+ seq_printf(m, "Panel power up delay: %d\n",
+ intel_dp->pps.panel_power_up_delay);
+ seq_printf(m, "Panel power down delay: %d\n",
+ intel_dp->pps.panel_power_down_delay);
+ seq_printf(m, "Backlight on delay: %d\n",
+ intel_dp->pps.backlight_on_delay);
+ seq_printf(m, "Backlight off delay: %d\n",
+ intel_dp->pps.backlight_off_delay);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(intel_pps);
+
+void intel_pps_connector_debugfs_add(struct intel_connector *connector)
+{
+ struct dentry *root = connector->base.debugfs_entry;
+ int connector_type = connector->base.connector_type;
+
+ if (connector_type == DRM_MODE_CONNECTOR_eDP)
+ debugfs_create_file("i915_panel_timings", 0444, root,
+ connector, &intel_pps_fops);
+}
+
void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
{
i915_reg_t pp_reg;
diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h
index a2c2467e3c22..07ef96ca8da2 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.h
+++ b/drivers/gpu/drm/i915/display/intel_pps.h
@@ -51,6 +51,8 @@ void vlv_pps_init(struct intel_encoder *encoder,
void intel_pps_unlock_regs_wa(struct drm_i915_private *i915);
void intel_pps_setup(struct drm_i915_private *i915);
+void intel_pps_connector_debugfs_add(struct intel_connector *connector);
+
void assert_pps_unlocked(struct drm_i915_private *i915, enum pipe pipe);
#endif /* __INTEL_PPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 6927785fd6ff..f5b33335a9ae 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -171,14 +171,27 @@
*
* The rest of the bits are more self-explanatory and/or
* irrelevant for normal operation.
+ *
+ * Description of intel_crtc_state variables. has_psr, has_panel_replay and
+ * has_sel_update:
+ *
+ * has_psr (alone): PSR1
+ * has_psr + has_sel_update: PSR2
+ * has_psr + has_panel_replay: Panel Replay
+ * has_psr + has_panel_replay + has_sel_update: Panel Replay Selective Update
+ *
+ * Description of some intel_psr varibles. enabled, panel_replay_enabled,
+ * sel_update_enabled
+ *
+ * enabled (alone): PSR1
+ * enabled + sel_update_enabled: PSR2
+ * enabled + panel_replay_enabled: Panel Replay
+ * enabled + panel_replay_enabled + sel_update_enabled: Panel Replay SU
*/
#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
(intel_dp)->psr.source_support)
-#define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \
- (intel_dp)->psr.source_panel_replay_support)
-
bool intel_encoder_can_psr(struct intel_encoder *encoder)
{
if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
@@ -330,6 +343,9 @@ static void psr_irq_control(struct intel_dp *intel_dp)
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 mask;
+ if (intel_dp->psr.panel_replay_enabled)
+ return;
+
mask = psr_irq_psr_error_bit_get(intel_dp);
if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
mask |= psr_irq_post_exit_bit_get(intel_dp) |
@@ -619,40 +635,59 @@ static bool psr2_su_region_et_valid(struct intel_dp *intel_dp)
return false;
}
-static void intel_psr_enable_sink(struct intel_dp *intel_dp)
+static unsigned int intel_psr_get_enable_sink_offset(struct intel_dp *intel_dp)
+{
+ return intel_dp->psr.panel_replay_enabled ?
+ PANEL_REPLAY_CONFIG : DP_PSR_EN_CFG;
+}
+
+/*
+ * Note: Most of the bits are same in PANEL_REPLAY_CONFIG and DP_PSR_EN_CFG. We
+ * are relying on PSR definitions on these "common" bits.
+ */
+void intel_psr_enable_sink(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u8 dpcd_val = DP_PSR_ENABLE;
- if (intel_dp->psr.panel_replay_enabled)
- return;
-
- if (intel_dp->psr.psr2_enabled) {
+ if (crtc_state->has_psr2) {
/* Enable ALPM at sink for psr2 */
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
- DP_ALPM_ENABLE |
- DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
+ if (!crtc_state->has_panel_replay) {
+ drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_RECEIVER_ALPM_CONFIG,
+ DP_ALPM_ENABLE |
+ DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
+
+ if (psr2_su_region_et_valid(intel_dp))
+ dpcd_val |= DP_PSR_ENABLE_SU_REGION_ET;
+ }
dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
- if (psr2_su_region_et_valid(intel_dp))
- dpcd_val |= DP_PSR_ENABLE_SU_REGION_ET;
} else {
if (intel_dp->psr.link_standby)
dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (!crtc_state->has_panel_replay && DISPLAY_VER(dev_priv) >= 8)
dpcd_val |= DP_PSR_CRC_VERIFICATION;
}
- if (intel_dp->psr.req_psr2_sdp_prior_scanline)
+ if (crtc_state->has_panel_replay)
+ dpcd_val |= DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN |
+ DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN;
+
+ if (crtc_state->req_psr2_sdp_prior_scanline)
dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
if (intel_dp->psr.entry_setup_frames > 0)
dpcd_val |= DP_PSR_FRAME_CAPTURE;
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
+ drm_dp_dpcd_writeb(&intel_dp->aux,
+ intel_psr_get_enable_sink_offset(intel_dp),
+ dpcd_val);
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+ if (intel_dp_is_edp(intel_dp))
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
}
static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
@@ -1126,6 +1161,141 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
return true;
}
+/*
+ * See Bspec: 71632 for the table
+ *
+ * Silence_period = tSilence,Min + ((tSilence,Max - tSilence,Min) / 2)
+ *
+ * Half cycle duration:
+ *
+ * Link rates 1.62 - 4.32 and tLFPS_Cycle = 70 ns
+ * FLOOR( (Link Rate * tLFPS_Cycle) / (2 * 10) )
+ *
+ * Link rates 5.4 - 8.1
+ * PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ] = 10
+ * LFPS Period chosen is the mid-point of the min:max values from the table
+ * FLOOR( LFPS Period in Symbol clocks /
+ * (2 * PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ]) )
+ */
+static bool _lnl_get_silence_period_and_lfps_half_cycle(int link_rate,
+ int *silence_period,
+ int *lfps_half_cycle)
+{
+ switch (link_rate) {
+ case 162000:
+ *silence_period = 20;
+ *lfps_half_cycle = 5;
+ break;
+ case 216000:
+ *silence_period = 27;
+ *lfps_half_cycle = 7;
+ break;
+ case 243000:
+ *silence_period = 31;
+ *lfps_half_cycle = 8;
+ break;
+ case 270000:
+ *silence_period = 34;
+ *lfps_half_cycle = 9;
+ break;
+ case 324000:
+ *silence_period = 41;
+ *lfps_half_cycle = 11;
+ break;
+ case 432000:
+ *silence_period = 56;
+ *lfps_half_cycle = 15;
+ break;
+ case 540000:
+ *silence_period = 69;
+ *lfps_half_cycle = 12;
+ break;
+ case 648000:
+ *silence_period = 84;
+ *lfps_half_cycle = 15;
+ break;
+ case 675000:
+ *silence_period = 87;
+ *lfps_half_cycle = 15;
+ break;
+ case 810000:
+ *silence_period = 104;
+ *lfps_half_cycle = 19;
+ break;
+ default:
+ *silence_period = *lfps_half_cycle = -1;
+ return false;
+ }
+ return true;
+}
+
+/*
+ * AUX-Less Wake Time = CEILING( ((PHY P2 to P0) + tLFPS_Period, Max+
+ * tSilence, Max+ tPHY Establishment + tCDS) / tline)
+ * For the "PHY P2 to P0" latency see the PHY Power Control page
+ * (PHY P2 to P0) : https://gfxspecs.intel.com/Predator/Home/Index/68965
+ * : 12 us
+ * The tLFPS_Period, Max term is 800ns
+ * The tSilence, Max term is 180ns
+ * The tPHY Establishment (a.k.a. t1) term is 50us
+ * The tCDS term is 1 or 2 times t2
+ * t2 = Number ML_PHY_LOCK * tML_PHY_LOCK
+ * Number ML_PHY_LOCK = ( 7 + CEILING( 6.5us / tML_PHY_LOCK ) + 1)
+ * Rounding up the 6.5us padding to the next ML_PHY_LOCK boundary and
+ * adding the "+ 1" term ensures all ML_PHY_LOCK sequences that start
+ * within the CDS period complete within the CDS period regardless of
+ * entry into the period
+ * tML_PHY_LOCK = TPS4 Length * ( 10 / (Link Rate in MHz) )
+ * TPS4 Length = 252 Symbols
+ */
+static int _lnl_compute_aux_less_wake_time(int port_clock)
+{
+ int tphy2_p2_to_p0 = 12 * 1000;
+ int tlfps_period_max = 800;
+ int tsilence_max = 180;
+ int t1 = 50 * 1000;
+ int tps4 = 252;
+ int tml_phy_lock = 1000 * 1000 * tps4 * 10 / port_clock;
+ int num_ml_phy_lock = 7 + DIV_ROUND_UP(6500, tml_phy_lock) + 1;
+ int t2 = num_ml_phy_lock * tml_phy_lock;
+ int tcds = 1 * t2;
+
+ return DIV_ROUND_UP(tphy2_p2_to_p0 + tlfps_period_max + tsilence_max +
+ t1 + tcds, 1000);
+}
+
+static int _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int aux_less_wake_time, aux_less_wake_lines, silence_period,
+ lfps_half_cycle;
+
+ aux_less_wake_time =
+ _lnl_compute_aux_less_wake_time(crtc_state->port_clock);
+ aux_less_wake_lines = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode,
+ aux_less_wake_time);
+
+ if (!_lnl_get_silence_period_and_lfps_half_cycle(crtc_state->port_clock,
+ &silence_period,
+ &lfps_half_cycle))
+ return false;
+
+ if (aux_less_wake_lines > ALPM_CTL_AUX_LESS_WAKE_TIME_MASK ||
+ silence_period > PORT_ALPM_CTL_SILENCE_PERIOD_MASK ||
+ lfps_half_cycle > PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION_MASK)
+ return false;
+
+ if (i915->display.params.psr_safest_params)
+ aux_less_wake_lines = ALPM_CTL_AUX_LESS_WAKE_TIME_MASK;
+
+ intel_dp->psr.alpm_parameters.fast_wake_lines = aux_less_wake_lines;
+ intel_dp->psr.alpm_parameters.silence_period_sym_clocks = silence_period;
+ intel_dp->psr.alpm_parameters.lfps_half_cycle_num_of_syms = lfps_half_cycle;
+
+ return true;
+}
+
static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
@@ -1142,6 +1312,9 @@ static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
if (check_entry_lines > 15)
return false;
+ if (!_lnl_compute_aux_less_alpm_params(intel_dp, crtc_state))
+ return false;
+
if (i915->display.params.psr_safest_params)
check_entry_lines = 15;
@@ -1150,28 +1323,52 @@ static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
return true;
}
+/*
+ * IO wake time for DISPLAY_VER < 12 is not directly mentioned in Bspec. There
+ * are 50 us io wake time and 32 us fast wake time. Clearly preharge pulses are
+ * not (improperly) included in 32 us fast wake time. 50 us - 32 us = 18 us.
+ */
+static int skl_io_buffer_wake_time(void)
+{
+ return 18;
+}
+
+static int tgl_io_buffer_wake_time(void)
+{
+ return 10;
+}
+
+static int io_buffer_wake_time(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (DISPLAY_VER(i915) >= 12)
+ return tgl_io_buffer_wake_time();
+ else
+ return skl_io_buffer_wake_time();
+}
+
static bool _compute_alpm_params(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
+ int tfw_exit_latency = 20; /* eDP spec */
+ int phy_wake = 4; /* eDP spec */
+ int preamble = 8; /* eDP spec */
+ int precharge = intel_dp_aux_fw_sync_len() - preamble;
u8 max_wake_lines;
- if (DISPLAY_VER(i915) >= 12) {
- io_wake_time = 42;
- /*
- * According to Bspec it's 42us, but based on testing
- * it is not enough -> use 45 us.
- */
- fast_wake_time = 45;
+ io_wake_time = max(precharge, io_buffer_wake_time(crtc_state)) +
+ preamble + phy_wake + tfw_exit_latency;
+ fast_wake_time = precharge + preamble + phy_wake +
+ tfw_exit_latency;
+ if (DISPLAY_VER(i915) >= 12)
/* TODO: Check how we can use ALPM_CTL fast wake extended field */
max_wake_lines = 12;
- } else {
- io_wake_time = 50;
- fast_wake_time = 32;
+ else
max_wake_lines = 8;
- }
io_wake_lines = intel_usecs_to_scanlines(
&crtc_state->hw.adjusted_mode, io_wake_time);
@@ -1422,12 +1619,24 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
+ /*
+ * FIXME figure out what is wrong with PSR+bigjoiner and
+ * fix it. Presumably something related to the fact that
+ * PSR is a transcoder level feature.
+ */
+ if (crtc_state->bigjoiner_pipes) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR disabled due to bigjoiner\n");
+ return;
+ }
+
if (CAN_PANEL_REPLAY(intel_dp))
crtc_state->has_panel_replay = true;
- else
- crtc_state->has_psr = _psr_compute_config(intel_dp, crtc_state);
- if (!(crtc_state->has_panel_replay || crtc_state->has_psr))
+ crtc_state->has_psr = crtc_state->has_panel_replay ? true :
+ _psr_compute_config(intel_dp, crtc_state);
+
+ if (!crtc_state->has_psr)
return;
crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
@@ -1454,7 +1663,7 @@ void intel_psr_get_config(struct intel_encoder *encoder,
goto unlock;
if (intel_dp->psr.panel_replay_enabled) {
- pipe_config->has_panel_replay = true;
+ pipe_config->has_psr = pipe_config->has_panel_replay = true;
} else {
/*
* Not possible to read EDP_PSR/PSR2_CTL registers as it is
@@ -1559,14 +1768,44 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
struct intel_psr *psr = &intel_dp->psr;
+ u32 alpm_ctl;
- if (DISPLAY_VER(dev_priv) < 20)
+ if (DISPLAY_VER(dev_priv) < 20 || (!intel_dp->psr.psr2_enabled &&
+ !intel_dp_is_edp(intel_dp)))
return;
- intel_de_write(dev_priv, ALPM_CTL(cpu_transcoder),
- ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE |
- ALPM_CTL_ALPM_ENTRY_CHECK(psr->alpm_parameters.check_entry_lines) |
- ALPM_CTL_EXTENDED_FAST_WAKE_TIME(psr->alpm_parameters.fast_wake_lines));
+ /*
+ * Panel Replay on eDP is always using ALPM aux less. I.e. no need to
+ * check panel support at this point.
+ */
+ if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
+ alpm_ctl = ALPM_CTL_ALPM_ENABLE |
+ ALPM_CTL_ALPM_AUX_LESS_ENABLE |
+ ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_50_SYMBOLS;
+
+ intel_de_write(dev_priv, PORT_ALPM_CTL(cpu_transcoder),
+ PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE |
+ PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) |
+ PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) |
+ PORT_ALPM_CTL_SILENCE_PERIOD(
+ psr->alpm_parameters.silence_period_sym_clocks));
+
+ intel_de_write(dev_priv, PORT_ALPM_LFPS_CTL(cpu_transcoder),
+ PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(10) |
+ PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(
+ psr->alpm_parameters.lfps_half_cycle_num_of_syms) |
+ PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(
+ psr->alpm_parameters.lfps_half_cycle_num_of_syms) |
+ PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(
+ psr->alpm_parameters.lfps_half_cycle_num_of_syms));
+ } else {
+ alpm_ctl = ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE |
+ ALPM_CTL_EXTENDED_FAST_WAKE_TIME(psr->alpm_parameters.fast_wake_lines);
+ }
+
+ alpm_ctl |= ALPM_CTL_ALPM_ENTRY_CHECK(psr->alpm_parameters.check_entry_lines);
+
+ intel_de_write(dev_priv, ALPM_CTL(cpu_transcoder), alpm_ctl);
}
static void intel_psr_enable_source(struct intel_dp *intel_dp,
@@ -1574,7 +1813,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
- u32 mask;
+ u32 mask = 0;
/*
* Only HSW and BDW have PSR AUX registers that need to be setup.
@@ -1588,34 +1827,46 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* mask LPSP to avoid dependency on other drivers that might block
* runtime_pm besides preventing other hw tracking issues now we
* can rely on frontbuffer tracking.
+ *
+ * From bspec prior LunarLake:
+ * Only PSR_MASK[Mask FBC modify] and PSR_MASK[Mask Hotplug] are used in
+ * panel replay mode.
+ *
+ * From bspec beyod LunarLake:
+ * Panel Replay on DP: No bits are applicable
+ * Panel Replay on eDP: All bits are applicable
*/
- mask = EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD;
+ if (DISPLAY_VER(dev_priv) < 20 || intel_dp_is_edp(intel_dp))
+ mask = EDP_PSR_DEBUG_MASK_HPD;
- /*
- * For some unknown reason on HSW non-ULT (or at least on
- * Dell Latitude E6540) external displays start to flicker
- * when PSR is enabled on the eDP. SR/PC6 residency is much
- * higher than should be possible with an external display.
- * As a workaround leave LPSP unmasked to prevent PSR entry
- * when external displays are active.
- */
- if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
- mask |= EDP_PSR_DEBUG_MASK_LPSP;
+ if (intel_dp_is_edp(intel_dp)) {
+ mask |= EDP_PSR_DEBUG_MASK_MEMUP;
- if (DISPLAY_VER(dev_priv) < 20)
- mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
+ /*
+ * For some unknown reason on HSW non-ULT (or at least on
+ * Dell Latitude E6540) external displays start to flicker
+ * when PSR is enabled on the eDP. SR/PC6 residency is much
+ * higher than should be possible with an external display.
+ * As a workaround leave LPSP unmasked to prevent PSR entry
+ * when external displays are active.
+ */
+ if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
+ mask |= EDP_PSR_DEBUG_MASK_LPSP;
- /*
- * No separate pipe reg write mask on hsw/bdw, so have to unmask all
- * registers in order to keep the CURSURFLIVE tricks working :(
- */
- if (IS_DISPLAY_VER(dev_priv, 9, 10))
- mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
+ if (DISPLAY_VER(dev_priv) < 20)
+ mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
- /* allow PSR with sprite enabled */
- if (IS_HASWELL(dev_priv))
- mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
+ /*
+ * No separate pipe reg write mask on hsw/bdw, so have to unmask all
+ * registers in order to keep the CURSURFLIVE tricks working :(
+ */
+ if (IS_DISPLAY_VER(dev_priv, 9, 10))
+ mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
+
+ /* allow PSR with sprite enabled */
+ if (IS_HASWELL(dev_priv))
+ mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
+ }
intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
@@ -1634,7 +1885,8 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
intel_dp->psr.psr2_sel_fetch_enabled ?
IGNORE_PSR2_HW_TRACKING : 0);
- lnl_alpm_configure(intel_dp);
+ if (intel_dp_is_edp(intel_dp))
+ lnl_alpm_configure(intel_dp);
/*
* Wa_16013835468
@@ -1675,6 +1927,9 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val;
+ if (intel_dp->psr.panel_replay_enabled)
+ goto no_err;
+
/*
* If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
* will still keep the error set even after the reset done in the
@@ -1692,6 +1947,7 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
return false;
}
+no_err:
return true;
}
@@ -1700,7 +1956,6 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
u32 val;
drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
@@ -1722,14 +1977,22 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
if (!psr_interrupt_error_check(intel_dp))
return;
- if (intel_dp->psr.panel_replay_enabled)
+ if (intel_dp->psr.panel_replay_enabled) {
drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
- else
+ } else {
drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
intel_dp->psr.psr2_enabled ? "2" : "1");
- intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
- intel_psr_enable_sink(intel_dp);
+ /*
+ * Panel replay has to be enabled before link training: doing it
+ * only for PSR here.
+ */
+ intel_psr_enable_sink(intel_dp, crtc_state);
+ }
+
+ if (intel_dp_is_edp(intel_dp))
+ intel_snps_phy_update_psr_power_state(&dig_port->base, true);
+
intel_psr_enable_source(intel_dp, crtc_state);
intel_dp->psr.enabled = true;
intel_dp->psr.paused = false;
@@ -1799,8 +2062,6 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
- enum phy phy = intel_port_to_phy(dev_priv,
- dp_to_dig_port(intel_dp)->base.port);
lockdep_assert_held(&intel_dp->psr.lock);
@@ -1835,12 +2096,25 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
}
- intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
+ if (intel_dp_is_edp(intel_dp))
+ intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);
+
+ /* Panel Replay on eDP is always using ALPM aux less. */
+ if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
+ intel_de_rmw(dev_priv, ALPM_CTL(cpu_transcoder),
+ ALPM_CTL_ALPM_ENABLE |
+ ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
+
+ intel_de_rmw(dev_priv, PORT_ALPM_CTL(cpu_transcoder),
+ PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
+ }
/* Disable PSR on Sink */
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
+ drm_dp_dpcd_writeb(&intel_dp->aux,
+ intel_psr_get_enable_sink_offset(intel_dp), 0);
- if (intel_dp->psr.psr2_enabled)
+ if (!intel_dp->psr.panel_replay_enabled &&
+ intel_dp->psr.psr2_enabled)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
intel_dp->psr.enabled = false;
@@ -1888,7 +2162,7 @@ void intel_psr_pause(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
- if (!CAN_PSR(intel_dp))
+ if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
return;
mutex_lock(&psr->lock);
@@ -1921,7 +2195,7 @@ void intel_psr_resume(struct intel_dp *intel_dp)
{
struct intel_psr *psr = &intel_dp->psr;
- if (!CAN_PSR(intel_dp))
+ if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
return;
mutex_lock(&psr->lock);
@@ -1994,6 +2268,7 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
struct intel_encoder *encoder;
@@ -2013,6 +2288,12 @@ void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_st
intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
crtc_state->psr2_man_track_ctl);
+
+ if (!crtc_state->enable_psr2_su_region_et)
+ return;
+
+ intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
+ crtc_state->pipe_srcsz_early_tpt);
}
static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
@@ -2051,6 +2332,25 @@ exit:
crtc_state->psr2_man_track_ctl = val;
}
+static u32
+psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
+ bool full_update, bool cursor_in_su_area)
+{
+ int width, height;
+
+ if (!crtc_state->enable_psr2_su_region_et || full_update)
+ return 0;
+
+ if (!cursor_in_su_area)
+ return PIPESRC_WIDTH(0) |
+ PIPESRC_HEIGHT(drm_rect_height(&crtc_state->pipe_src));
+
+ width = drm_rect_width(&crtc_state->psr2_su_area);
+ height = drm_rect_height(&crtc_state->psr2_su_area);
+
+ return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
+}
+
static void clip_area_update(struct drm_rect *overlap_damage_area,
struct drm_rect *damage_area,
struct drm_rect *pipe_src)
@@ -2095,21 +2395,38 @@ static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_st
* cursor fully when cursor is in SU area.
*/
static void
-intel_psr2_sel_fetch_et_alignment(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *cursor_state)
+intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ bool *cursor_in_su_area)
{
- struct drm_rect inter;
+ struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_plane_state *new_plane_state;
+ struct intel_plane *plane;
+ int i;
- if (!crtc_state->enable_psr2_su_region_et ||
- !cursor_state->uapi.visible)
+ if (!crtc_state->enable_psr2_su_region_et)
return;
- inter = crtc_state->psr2_su_area;
- if (!drm_rect_intersect(&inter, &cursor_state->uapi.dst))
- return;
+ for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
+ struct drm_rect inter;
- clip_area_update(&crtc_state->psr2_su_area, &cursor_state->uapi.dst,
- &crtc_state->pipe_src);
+ if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
+ continue;
+
+ if (plane->id != PLANE_CURSOR)
+ continue;
+
+ if (!new_plane_state->uapi.visible)
+ continue;
+
+ inter = crtc_state->psr2_su_area;
+ if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
+ continue;
+
+ clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
+ &crtc_state->pipe_src);
+ *cursor_in_su_area = true;
+ }
}
/*
@@ -2152,10 +2469,9 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_plane_state *new_plane_state, *old_plane_state,
- *cursor_plane_state = NULL;
+ struct intel_plane_state *new_plane_state, *old_plane_state;
struct intel_plane *plane;
- bool full_update = false;
+ bool full_update = false, cursor_in_su_area = false;
int i, ret;
if (!crtc_state->enable_psr2_sel_fetch)
@@ -2238,13 +2554,6 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
-
- /*
- * Cursor plane new state is stored to adjust su area to cover
- * cursor are fully.
- */
- if (plane->id == PLANE_CURSOR)
- cursor_plane_state = new_plane_state;
}
/*
@@ -2273,9 +2582,13 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (ret)
return ret;
- /* Adjust su area to cover cursor fully as necessary */
- if (cursor_plane_state)
- intel_psr2_sel_fetch_et_alignment(crtc_state, cursor_plane_state);
+ /*
+ * Adjust su area to cover cursor fully as necessary (early
+ * transport). This needs to be done after
+ * drm_atomic_add_affected_planes to ensure visible cursor is added into
+ * affected planes even when cursor is not updated by itself.
+ */
+ intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
intel_psr2_sel_fetch_pipe_alignment(crtc_state);
@@ -2338,6 +2651,9 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
skip_sel_fetch_set_loop:
psr2_man_trk_ctl_calc(crtc_state, full_update);
+ crtc_state->pipe_srcsz_early_tpt =
+ psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update,
+ cursor_in_su_area);
return 0;
}
@@ -2394,7 +2710,7 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder;
- if (!(crtc_state->has_psr || crtc_state->has_panel_replay))
+ if (!crtc_state->has_psr)
return;
for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
@@ -2994,6 +3310,13 @@ static void psr_capability_changed_check(struct intel_dp *intel_dp)
}
}
+/*
+ * On common bits:
+ * DP_PSR_RFB_STORAGE_ERROR == DP_PANEL_REPLAY_RFB_STORAGE_ERROR
+ * DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR == DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR
+ * DP_PSR_LINK_CRC_ERROR == DP_PANEL_REPLAY_LINK_CRC_ERROR
+ * this function is relying on PSR definitions
+ */
void intel_psr_short_pulse(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -3003,7 +3326,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
DP_PSR_LINK_CRC_ERROR;
- if (!CAN_PSR(intel_dp))
+ if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
return;
mutex_lock(&psr->lock);
@@ -3017,12 +3340,14 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
goto exit;
}
- if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
+ if ((!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR) ||
+ (error_status & errors)) {
intel_psr_disable_locked(intel_dp);
psr->sink_not_reliable = true;
}
- if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
+ if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR &&
+ !error_status)
drm_dbg_kms(&dev_priv->drm,
"PSR sink internal error, disabling PSR\n");
if (error_status & DP_PSR_RFB_STORAGE_ERROR)
@@ -3042,8 +3367,10 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
/* clear status register */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
- psr_alpm_check(intel_dp);
- psr_capability_changed_check(intel_dp);
+ if (!psr->panel_replay_enabled) {
+ psr_alpm_check(intel_dp);
+ psr_capability_changed_check(intel_dp);
+ }
exit:
mutex_unlock(&psr->lock);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index cde781df84d5..d483c85870e1 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -21,8 +21,13 @@ struct intel_encoder;
struct intel_plane;
struct intel_plane_state;
+#define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \
+ (intel_dp)->psr.source_panel_replay_support)
+
bool intel_encoder_can_psr(struct intel_encoder *encoder);
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
+void intel_psr_enable_sink(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
void intel_psr_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_psr_post_plane_update(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h
index 8427a736f639..ebc22999572c 100644
--- a/drivers/gpu/drm/i915/display/intel_psr_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h
@@ -9,7 +9,7 @@
#include "intel_display_reg_defs.h"
#include "intel_dp_aux_regs.h"
-#define TRANS_EXITLINE(trans) _MMIO_TRANS2((trans), _TRANS_EXITLINE_A)
+#define TRANS_EXITLINE(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_EXITLINE_A)
#define EXITLINE_ENABLE REG_BIT(31)
#define EXITLINE_MASK REG_GENMASK(12, 0)
#define EXITLINE_SHIFT 0
@@ -23,7 +23,7 @@
#define HSW_SRD_CTL _MMIO(0x64800)
#define _SRD_CTL_A 0x60800
#define _SRD_CTL_EDP 0x6f800
-#define EDP_PSR_CTL(tran) _MMIO_TRANS2(tran, _SRD_CTL_A)
+#define EDP_PSR_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _SRD_CTL_A)
#define EDP_PSR_ENABLE REG_BIT(31)
#define BDW_PSR_SINGLE_FRAME REG_BIT(30)
#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK REG_BIT(29) /* SW can't modify */
@@ -66,8 +66,8 @@
#define EDP_PSR_IIR _MMIO(0x64838)
#define _PSR_IMR_A 0x60814
#define _PSR_IIR_A 0x60818
-#define TRANS_PSR_IMR(tran) _MMIO_TRANS2(tran, _PSR_IMR_A)
-#define TRANS_PSR_IIR(tran) _MMIO_TRANS2(tran, _PSR_IIR_A)
+#define TRANS_PSR_IMR(tran) _MMIO_TRANS2(dev_priv, tran, _PSR_IMR_A)
+#define TRANS_PSR_IIR(tran) _MMIO_TRANS2(dev_priv, tran, _PSR_IIR_A)
#define _EDP_PSR_TRANS_SHIFT(trans) ((trans) == TRANSCODER_EDP ? \
0 : ((trans) - TRANSCODER_A + 1) * 8)
#define TGL_PSR_MASK REG_GENMASK(2, 0)
@@ -86,7 +86,7 @@
#define HSW_SRD_AUX_CTL _MMIO(0x64810)
#define _SRD_AUX_CTL_A 0x60810
#define _SRD_AUX_CTL_EDP 0x6f810
-#define EDP_PSR_AUX_CTL(tran) _MMIO_TRANS2(tran, _SRD_AUX_CTL_A)
+#define EDP_PSR_AUX_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _SRD_AUX_CTL_A)
#define EDP_PSR_AUX_CTL_TIME_OUT_MASK DP_AUX_CH_CTL_TIME_OUT_MASK
#define EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK DP_AUX_CH_CTL_MESSAGE_SIZE_MASK
#define EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK DP_AUX_CH_CTL_PRECHARGE_2US_MASK
@@ -96,12 +96,12 @@
#define HSW_SRD_AUX_DATA(i) _MMIO(0x64814 + (i) * 4) /* 5 registers */
#define _SRD_AUX_DATA_A 0x60814
#define _SRD_AUX_DATA_EDP 0x6f814
-#define EDP_PSR_AUX_DATA(tran, i) _MMIO_TRANS2(tran, _SRD_AUX_DATA_A + (i) * 4) /* 5 registers */
+#define EDP_PSR_AUX_DATA(tran, i) _MMIO_TRANS2(dev_priv, tran, _SRD_AUX_DATA_A + (i) * 4) /* 5 registers */
#define HSW_SRD_STATUS _MMIO(0x64840)
#define _SRD_STATUS_A 0x60840
#define _SRD_STATUS_EDP 0x6f840
-#define EDP_PSR_STATUS(tran) _MMIO_TRANS2(tran, _SRD_STATUS_A)
+#define EDP_PSR_STATUS(tran) _MMIO_TRANS2(dev_priv, tran, _SRD_STATUS_A)
#define EDP_PSR_STATUS_STATE_MASK REG_GENMASK(31, 29)
#define EDP_PSR_STATUS_STATE_IDLE REG_FIELD_PREP(EDP_PSR_STATUS_STATE_MASK, 0)
#define EDP_PSR_STATUS_STATE_SRDONACK REG_FIELD_PREP(EDP_PSR_STATUS_STATE_MASK, 1)
@@ -126,14 +126,14 @@
#define HSW_SRD_PERF_CNT _MMIO(0x64844)
#define _SRD_PERF_CNT_A 0x60844
#define _SRD_PERF_CNT_EDP 0x6f844
-#define EDP_PSR_PERF_CNT(tran) _MMIO_TRANS2(tran, _SRD_PERF_CNT_A)
+#define EDP_PSR_PERF_CNT(tran) _MMIO_TRANS2(dev_priv, tran, _SRD_PERF_CNT_A)
#define EDP_PSR_PERF_CNT_MASK REG_GENMASK(23, 0)
/* PSR_MASK on SKL+ */
#define HSW_SRD_DEBUG _MMIO(0x64860)
#define _SRD_DEBUG_A 0x60860
#define _SRD_DEBUG_EDP 0x6f860
-#define EDP_PSR_DEBUG(tran) _MMIO_TRANS2(tran, _SRD_DEBUG_A)
+#define EDP_PSR_DEBUG(tran) _MMIO_TRANS2(dev_priv, tran, _SRD_DEBUG_A)
#define EDP_PSR_DEBUG_MASK_MAX_SLEEP REG_BIT(28)
#define EDP_PSR_DEBUG_MASK_LPSP REG_BIT(27)
#define EDP_PSR_DEBUG_MASK_MEMUP REG_BIT(26)
@@ -153,7 +153,7 @@
#define _PSR2_CTL_A 0x60900
#define _PSR2_CTL_EDP 0x6f900
-#define EDP_PSR2_CTL(tran) _MMIO_TRANS2(tran, _PSR2_CTL_A)
+#define EDP_PSR2_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _PSR2_CTL_A)
#define EDP_PSR2_ENABLE REG_BIT(31)
#define EDP_SU_TRACK_ENABLE REG_BIT(30) /* up to adl-p */
#define TGL_EDP_PSR2_BLOCK_COUNT_MASK REG_BIT(28)
@@ -195,7 +195,7 @@
#define _PSR_EVENT_TRANS_C 0x62848
#define _PSR_EVENT_TRANS_D 0x63848
#define _PSR_EVENT_TRANS_EDP 0x6f848
-#define PSR_EVENT(tran) _MMIO_TRANS2(tran, _PSR_EVENT_TRANS_A)
+#define PSR_EVENT(tran) _MMIO_TRANS2(dev_priv, tran, _PSR_EVENT_TRANS_A)
#define PSR_EVENT_PSR2_WD_TIMER_EXPIRE REG_BIT(17)
#define PSR_EVENT_PSR2_DISABLED REG_BIT(16)
#define PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN REG_BIT(15)
@@ -215,13 +215,13 @@
#define _PSR2_STATUS_A 0x60940
#define _PSR2_STATUS_EDP 0x6f940
-#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A)
+#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(dev_priv, tran, _PSR2_STATUS_A)
#define EDP_PSR2_STATUS_STATE_MASK REG_GENMASK(31, 28)
#define EDP_PSR2_STATUS_STATE_DEEP_SLEEP REG_FIELD_PREP(EDP_PSR2_STATUS_STATE_MASK, 0x8)
#define _PSR2_SU_STATUS_A 0x60914
#define _PSR2_SU_STATUS_EDP 0x6f914
-#define _PSR2_SU_STATUS(tran, index) _MMIO_TRANS2(tran, _PSR2_SU_STATUS_A + (index) * 4)
+#define _PSR2_SU_STATUS(tran, index) _MMIO_TRANS2(dev_priv, tran, _PSR2_SU_STATUS_A + (index) * 4)
#define PSR2_SU_STATUS(tran, frame) (_PSR2_SU_STATUS(tran, (frame) / 3))
#define PSR2_SU_STATUS_SHIFT(frame) (((frame) % 3) * 10)
#define PSR2_SU_STATUS_MASK(frame) (0x3ff << PSR2_SU_STATUS_SHIFT(frame))
@@ -229,7 +229,7 @@
#define _PSR2_MAN_TRK_CTL_A 0x60910
#define _PSR2_MAN_TRK_CTL_EDP 0x6f910
-#define PSR2_MAN_TRK_CTL(tran) _MMIO_TRANS2(tran, _PSR2_MAN_TRK_CTL_A)
+#define PSR2_MAN_TRK_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _PSR2_MAN_TRK_CTL_A)
#define PSR2_MAN_TRK_CTL_ENABLE REG_BIT(31)
#define PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK REG_GENMASK(30, 21)
#define PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(val) REG_FIELD_PREP(PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK, val)
@@ -249,7 +249,7 @@
/* PSR2 Early transport */
#define _PIPE_SRCSZ_ERLY_TPT_A 0x70074
-#define PIPE_SRCSZ_ERLY_TPT(trans) _MMIO_TRANS2(trans, _PIPE_SRCSZ_ERLY_TPT_A)
+#define PIPE_SRCSZ_ERLY_TPT(trans) _MMIO_TRANS2(dev_priv, trans, _PIPE_SRCSZ_ERLY_TPT_A)
#define _SEL_FETCH_PLANE_BASE_1_A 0x70890
#define _SEL_FETCH_PLANE_BASE_2_A 0x708B0
@@ -297,7 +297,7 @@
_SEL_FETCH_PLANE_BASE_1_A)
#define _ALPM_CTL_A 0x60950
-#define ALPM_CTL(tran) _MMIO_TRANS2(tran, _ALPM_CTL_A)
+#define ALPM_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _ALPM_CTL_A)
#define ALPM_CTL_ALPM_ENABLE REG_BIT(31)
#define ALPM_CTL_ALPM_AUX_LESS_ENABLE REG_BIT(30)
#define ALPM_CTL_LOBF_ENABLE REG_BIT(29)
@@ -321,7 +321,7 @@
#define ALPM_CTL_AUX_LESS_WAKE_TIME(val) REG_FIELD_PREP(ALPM_CTL_AUX_LESS_WAKE_TIME_MASK, val)
#define _ALPM_CTL2_A 0x60954
-#define ALPM_CTL2(tran) _MMIO_TRANS2(tran, _ALPM_CTL2_A)
+#define ALPM_CTL2(tran) _MMIO_TRANS2(dev_priv, tran, _ALPM_CTL2_A)
#define ALPM_CTL2_SWITCH_TO_ACTIVE_LATENCY_MASK REG_GENMASK(28, 24)
#define ALPM_CTL2_SWITCH_TO_ACTIVE_LATENCY(val) REG_FIELD_PREP(ALPM_CTL2_SWITCH_TO_ACTIVE_LATENCY_MASK, val)
#define ALPM_CTL2_AUX_LESS_WAKE_TIME_EXTENSION_MASK REG_GENMASK(19, 16)
@@ -335,7 +335,7 @@
#define ALPM_CTL2_NUMBER_AUX_LESS_ML_PHY_SLEEP_SEQUENCES(val) REG_FIELD_PREP(ALPM_CTL2_NUMBER_AUX_LESS_ML_PHY_SLEEP_SEQUENCES_MASK, val)
#define _PORT_ALPM_CTL_A 0x16fa2c
-#define PORT_ALPM_CTL(tran) _MMIO_TRANS2(tran, _PORT_ALPM_CTL_A)
+#define PORT_ALPM_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _PORT_ALPM_CTL_A)
#define PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE REG_BIT(31)
#define PORT_ALPM_CTL_MAX_PHY_SWING_SETUP_MASK REG_GENMASK(23, 20)
#define PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(val) REG_FIELD_PREP(PORT_ALPM_CTL_MAX_PHY_SWING_SETUP_MASK, val)
@@ -345,12 +345,16 @@
#define PORT_ALPM_CTL_SILENCE_PERIOD(val) REG_FIELD_PREP(PORT_ALPM_CTL_SILENCE_PERIOD_MASK, val)
#define _PORT_ALPM_LFPS_CTL_A 0x16fa30
-#define PORT_ALPM_LFPS_CTL(tran) _MMIO_TRANS2(tran, _PORT_ALPM_LFPS_CTL_A)
+#define PORT_ALPM_LFPS_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _PORT_ALPM_LFPS_CTL_A)
#define PORT_ALPM_LFPS_CTL_LFPS_START_POLARITY REG_BIT(31)
#define PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MASK REG_GENMASK(27, 24)
-#define ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES 5
-#define ALPM_CTL_EXTENDED_FAST_WAKE_TIME(lines) REG_FIELD_PREP(ALPM_CTL_EXTENDED_FAST_WAKE_TIME_MASK, (lines) - ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES)
-#define ALPM_CTL_AUX_LESS_WAKE_TIME_MASK REG_GENMASK(5, 0)
-#define ALPM_CTL_AUX_LESS_WAKE_TIME(val) REG_FIELD_PREP(ALPM_CTL_AUX_LESS_WAKE_TIME_MASK, val)
+#define PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MIN 7
+#define PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MASK, (val) - PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MIN)
+#define PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK REG_GENMASK(20, 16)
+#define PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val)
+#define PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION_MASK REG_GENMASK(12, 8)
+#define PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val)
+#define PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION_MASK REG_GENMASK(4, 0)
+#define PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val)
#endif /* __INTEL_PSR_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index a280448df771..14d5fefc9c5b 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -9,72 +9,72 @@
#include "intel_display_types.h"
#include "intel_quirks.h"
-static void intel_set_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk)
+static void intel_set_quirk(struct intel_display *display, enum intel_quirk_id quirk)
{
- i915->display.quirks.mask |= BIT(quirk);
+ display->quirks.mask |= BIT(quirk);
}
/*
* Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
*/
-static void quirk_ssc_force_disable(struct drm_i915_private *i915)
+static void quirk_ssc_force_disable(struct intel_display *display)
{
- intel_set_quirk(i915, QUIRK_LVDS_SSC_DISABLE);
- drm_info(&i915->drm, "applying lvds SSC disable quirk\n");
+ intel_set_quirk(display, QUIRK_LVDS_SSC_DISABLE);
+ drm_info(display->drm, "applying lvds SSC disable quirk\n");
}
/*
* A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
* brightness value
*/
-static void quirk_invert_brightness(struct drm_i915_private *i915)
+static void quirk_invert_brightness(struct intel_display *display)
{
- intel_set_quirk(i915, QUIRK_INVERT_BRIGHTNESS);
- drm_info(&i915->drm, "applying inverted panel brightness quirk\n");
+ intel_set_quirk(display, QUIRK_INVERT_BRIGHTNESS);
+ drm_info(display->drm, "applying inverted panel brightness quirk\n");
}
/* Some VBT's incorrectly indicate no backlight is present */
-static void quirk_backlight_present(struct drm_i915_private *i915)
+static void quirk_backlight_present(struct intel_display *display)
{
- intel_set_quirk(i915, QUIRK_BACKLIGHT_PRESENT);
- drm_info(&i915->drm, "applying backlight present quirk\n");
+ intel_set_quirk(display, QUIRK_BACKLIGHT_PRESENT);
+ drm_info(display->drm, "applying backlight present quirk\n");
}
/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
* which is 300 ms greater than eDP spec T12 min.
*/
-static void quirk_increase_t12_delay(struct drm_i915_private *i915)
+static void quirk_increase_t12_delay(struct intel_display *display)
{
- intel_set_quirk(i915, QUIRK_INCREASE_T12_DELAY);
- drm_info(&i915->drm, "Applying T12 delay quirk\n");
+ intel_set_quirk(display, QUIRK_INCREASE_T12_DELAY);
+ drm_info(display->drm, "Applying T12 delay quirk\n");
}
/*
* GeminiLake NUC HDMI outputs require additional off time
* this allows the onboard retimer to correctly sync to signal
*/
-static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915)
+static void quirk_increase_ddi_disabled_time(struct intel_display *display)
{
- intel_set_quirk(i915, QUIRK_INCREASE_DDI_DISABLED_TIME);
- drm_info(&i915->drm, "Applying Increase DDI Disabled quirk\n");
+ intel_set_quirk(display, QUIRK_INCREASE_DDI_DISABLED_TIME);
+ drm_info(display->drm, "Applying Increase DDI Disabled quirk\n");
}
-static void quirk_no_pps_backlight_power_hook(struct drm_i915_private *i915)
+static void quirk_no_pps_backlight_power_hook(struct intel_display *display)
{
- intel_set_quirk(i915, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK);
- drm_info(&i915->drm, "Applying no pps backlight power quirk\n");
+ intel_set_quirk(display, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK);
+ drm_info(display->drm, "Applying no pps backlight power quirk\n");
}
struct intel_quirk {
int device;
int subsystem_vendor;
int subsystem_device;
- void (*hook)(struct drm_i915_private *i915);
+ void (*hook)(struct intel_display *display);
};
/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
struct intel_dmi_quirk {
- void (*hook)(struct drm_i915_private *i915);
+ void (*hook)(struct intel_display *display);
const struct dmi_system_id (*dmi_id_list)[];
};
@@ -203,9 +203,9 @@ static struct intel_quirk intel_quirks[] = {
{ 0x0f31, 0x103c, 0x220f, quirk_invert_brightness },
};
-void intel_init_quirks(struct drm_i915_private *i915)
+void intel_init_quirks(struct intel_display *display)
{
- struct pci_dev *d = to_pci_dev(i915->drm.dev);
+ struct pci_dev *d = to_pci_dev(display->drm->dev);
int i;
for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
@@ -216,15 +216,15 @@ void intel_init_quirks(struct drm_i915_private *i915)
q->subsystem_vendor == PCI_ANY_ID) &&
(d->subsystem_device == q->subsystem_device ||
q->subsystem_device == PCI_ANY_ID))
- q->hook(i915);
+ q->hook(display);
}
for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
- intel_dmi_quirks[i].hook(i915);
+ intel_dmi_quirks[i].hook(display);
}
}
-bool intel_has_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk)
+bool intel_has_quirk(struct intel_display *display, enum intel_quirk_id quirk)
{
- return i915->display.quirks.mask & BIT(quirk);
+ return display->quirks.mask & BIT(quirk);
}
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.h b/drivers/gpu/drm/i915/display/intel_quirks.h
index 10a4d163149f..151c8f4ae576 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.h
+++ b/drivers/gpu/drm/i915/display/intel_quirks.h
@@ -8,7 +8,7 @@
#include <linux/types.h>
-struct drm_i915_private;
+struct intel_display;
enum intel_quirk_id {
QUIRK_BACKLIGHT_PRESENT,
@@ -19,7 +19,7 @@ enum intel_quirk_id {
QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK,
};
-void intel_init_quirks(struct drm_i915_private *i915);
-bool intel_has_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk);
+void intel_init_quirks(struct intel_display *display);
+bool intel_has_quirk(struct intel_display *display, enum intel_quirk_id quirk);
#endif /* __INTEL_QUIRKS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 5f9e748adc89..d0d712405129 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -193,7 +193,7 @@ to_intel_sdvo_connector(struct drm_connector *connector)
}
#define to_intel_sdvo_connector_state(conn_state) \
- container_of((conn_state), struct intel_sdvo_connector_state, base.base)
+ container_of_const((conn_state), struct intel_sdvo_connector_state, base.base)
static bool
intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo);
@@ -1842,8 +1842,6 @@ static void intel_disable_sdvo(struct intel_atomic_state *state,
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
u32 temp;
- encoder->audio_disable(encoder, old_crtc_state, conn_state);
-
intel_sdvo_set_active_outputs(intel_sdvo, 0);
if (0)
intel_sdvo_set_encoder_power_state(intel_sdvo,
@@ -1935,8 +1933,6 @@ static void intel_enable_sdvo(struct intel_atomic_state *state,
intel_sdvo_set_encoder_power_state(intel_sdvo,
DRM_MODE_DPMS_ON);
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo_connector->output_flag);
-
- encoder->audio_enable(encoder, pipe_config, conn_state);
}
static enum drm_mode_status
@@ -1948,7 +1944,7 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(connector);
bool has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo_connector, connector->state);
- int max_dotclk = i915->max_dotclk_freq;
+ int max_dotclk = i915->display.cdclk.max_dotclk_freq;
enum drm_mode_status status;
int clock = mode->clock;
@@ -1956,9 +1952,6 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
if (status != MODE_OK)
return status;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
if (clock > max_dotclk)
return MODE_CLOCK_HIGH;
@@ -2382,7 +2375,7 @@ intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
u64 *val)
{
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- const struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state((void *)state);
+ const struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state);
if (property == intel_sdvo_connector->tv_format) {
int i;
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index bc61e736f9b3..e6df1f92def5 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -44,12 +44,14 @@ void intel_snps_phy_wait_for_calibration(struct drm_i915_private *i915)
}
}
-void intel_snps_phy_update_psr_power_state(struct drm_i915_private *i915,
- enum phy phy, bool enable)
+void intel_snps_phy_update_psr_power_state(struct intel_encoder *encoder,
+ bool enable)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_encoder_to_phy(encoder);
u32 val;
- if (!intel_phy_is_snps(i915, phy))
+ if (!intel_encoder_is_snps(encoder))
return;
val = REG_FIELD_PREP(SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR,
@@ -63,7 +65,7 @@ void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
const struct intel_ddi_buf_trans *trans;
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
int n_entries, ln;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
@@ -1809,7 +1811,7 @@ int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state,
for (i = 0; tables[i]; i++) {
if (crtc_state->port_clock == tables[i]->clock) {
- crtc_state->mpllb_state = *tables[i];
+ crtc_state->dpll_hw_state.mpllb = *tables[i];
return 0;
}
}
@@ -1821,8 +1823,8 @@ void intel_mpllb_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct intel_mpllb_state *pll_state = &crtc_state->mpllb_state;
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ const struct intel_mpllb_state *pll_state = &crtc_state->dpll_hw_state.mpllb;
+ enum phy phy = intel_encoder_to_phy(encoder);
i915_reg_t enable_reg = (phy <= PHY_D ?
DG2_PLL_ENABLE(phy) : MG_PLL_ENABLE(0));
@@ -1879,7 +1881,7 @@ void intel_mpllb_enable(struct intel_encoder *encoder,
void intel_mpllb_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
i915_reg_t enable_reg = (phy <= PHY_D ?
DG2_PLL_ENABLE(phy) : MG_PLL_ENABLE(0));
@@ -1951,7 +1953,7 @@ void intel_mpllb_readout_hw_state(struct intel_encoder *encoder,
struct intel_mpllb_state *pll_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
pll_state->mpllb_cp = intel_de_read(dev_priv, SNPS_PHY_MPLLB_CP(phy));
pll_state->mpllb_div = intel_de_read(dev_priv, SNPS_PHY_MPLLB_DIV(phy));
@@ -1999,7 +2001,7 @@ void intel_mpllb_state_verify(struct intel_atomic_state *state,
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_mpllb_state mpllb_hw_state = {};
- const struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
+ const struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->dpll_hw_state.mpllb;
struct intel_encoder *encoder;
if (!IS_DG2(i915))
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.h b/drivers/gpu/drm/i915/display/intel_snps_phy.h
index 515abf7c5902..bc08b92a7cd9 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.h
@@ -17,8 +17,8 @@ struct intel_mpllb_state;
enum phy;
void intel_snps_phy_wait_for_calibration(struct drm_i915_private *dev_priv);
-void intel_snps_phy_update_psr_power_state(struct drm_i915_private *dev_priv,
- enum phy phy, bool enable);
+void intel_snps_phy_update_psr_power_state(struct intel_encoder *encoder,
+ bool enable);
int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index d7b440c8caef..36a253a19c74 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -47,6 +47,7 @@
#include "intel_fb.h"
#include "intel_frontbuffer.h"
#include "intel_sprite.h"
+#include "intel_sprite_regs.h"
static char sprite_name(struct drm_i915_private *i915, enum pipe pipe, int sprite)
{
diff --git a/drivers/gpu/drm/i915/display/intel_sprite_regs.h b/drivers/gpu/drm/i915/display/intel_sprite_regs.h
new file mode 100644
index 000000000000..bb67705652b2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_sprite_regs.h
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2024 Intel Corporation */
+
+#ifndef __INTEL_SPRITE_REGS__
+#define __INTEL_SPRITE_REGS__
+
+#include "intel_display_reg_defs.h"
+
+#define _DVSACNTR 0x72180
+#define DVS_ENABLE REG_BIT(31)
+#define DVS_PIPE_GAMMA_ENABLE REG_BIT(30)
+#define DVS_YUV_RANGE_CORRECTION_DISABLE REG_BIT(27)
+#define DVS_FORMAT_MASK REG_GENMASK(26, 25)
+#define DVS_FORMAT_YUV422 REG_FIELD_PREP(DVS_FORMAT_MASK, 0)
+#define DVS_FORMAT_RGBX101010 REG_FIELD_PREP(DVS_FORMAT_MASK, 1)
+#define DVS_FORMAT_RGBX888 REG_FIELD_PREP(DVS_FORMAT_MASK, 2)
+#define DVS_FORMAT_RGBX161616 REG_FIELD_PREP(DVS_FORMAT_MASK, 3)
+#define DVS_PIPE_CSC_ENABLE REG_BIT(24)
+#define DVS_SOURCE_KEY REG_BIT(22)
+#define DVS_RGB_ORDER_XBGR REG_BIT(20)
+#define DVS_YUV_FORMAT_BT709 REG_BIT(18)
+#define DVS_YUV_ORDER_MASK REG_GENMASK(17, 16)
+#define DVS_YUV_ORDER_YUYV REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 0)
+#define DVS_YUV_ORDER_UYVY REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 1)
+#define DVS_YUV_ORDER_YVYU REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 2)
+#define DVS_YUV_ORDER_VYUY REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 3)
+#define DVS_ROTATE_180 REG_BIT(15)
+#define DVS_TRICKLE_FEED_DISABLE REG_BIT(14)
+#define DVS_TILED REG_BIT(10)
+#define DVS_DEST_KEY REG_BIT(2)
+#define _DVSALINOFF 0x72184
+#define _DVSASTRIDE 0x72188
+#define _DVSAPOS 0x7218c
+#define DVS_POS_Y_MASK REG_GENMASK(31, 16)
+#define DVS_POS_Y(y) REG_FIELD_PREP(DVS_POS_Y_MASK, (y))
+#define DVS_POS_X_MASK REG_GENMASK(15, 0)
+#define DVS_POS_X(x) REG_FIELD_PREP(DVS_POS_X_MASK, (x))
+#define _DVSASIZE 0x72190
+#define DVS_HEIGHT_MASK REG_GENMASK(31, 16)
+#define DVS_HEIGHT(h) REG_FIELD_PREP(DVS_HEIGHT_MASK, (h))
+#define DVS_WIDTH_MASK REG_GENMASK(15, 0)
+#define DVS_WIDTH(w) REG_FIELD_PREP(DVS_WIDTH_MASK, (w))
+#define _DVSAKEYVAL 0x72194
+#define _DVSAKEYMSK 0x72198
+#define _DVSASURF 0x7219c
+#define DVS_ADDR_MASK REG_GENMASK(31, 12)
+#define _DVSAKEYMAXVAL 0x721a0
+#define _DVSATILEOFF 0x721a4
+#define DVS_OFFSET_Y_MASK REG_GENMASK(31, 16)
+#define DVS_OFFSET_Y(y) REG_FIELD_PREP(DVS_OFFSET_Y_MASK, (y))
+#define DVS_OFFSET_X_MASK REG_GENMASK(15, 0)
+#define DVS_OFFSET_X(x) REG_FIELD_PREP(DVS_OFFSET_X_MASK, (x))
+#define _DVSASURFLIVE 0x721ac
+#define _DVSAGAMC_G4X 0x721e0 /* g4x */
+#define _DVSASCALE 0x72204
+#define DVS_SCALE_ENABLE REG_BIT(31)
+#define DVS_FILTER_MASK REG_GENMASK(30, 29)
+#define DVS_FILTER_MEDIUM REG_FIELD_PREP(DVS_FILTER_MASK, 0)
+#define DVS_FILTER_ENHANCING REG_FIELD_PREP(DVS_FILTER_MASK, 1)
+#define DVS_FILTER_SOFTENING REG_FIELD_PREP(DVS_FILTER_MASK, 2)
+#define DVS_VERTICAL_OFFSET_HALF REG_BIT(28) /* must be enabled below */
+#define DVS_VERTICAL_OFFSET_ENABLE REG_BIT(27)
+#define DVS_SRC_WIDTH_MASK REG_GENMASK(26, 16)
+#define DVS_SRC_WIDTH(w) REG_FIELD_PREP(DVS_SRC_WIDTH_MASK, (w))
+#define DVS_SRC_HEIGHT_MASK REG_GENMASK(10, 0)
+#define DVS_SRC_HEIGHT(h) REG_FIELD_PREP(DVS_SRC_HEIGHT_MASK, (h))
+#define _DVSAGAMC_ILK 0x72300 /* ilk/snb */
+#define _DVSAGAMCMAX_ILK 0x72340 /* ilk/snb */
+
+#define _DVSBCNTR 0x73180
+#define _DVSBLINOFF 0x73184
+#define _DVSBSTRIDE 0x73188
+#define _DVSBPOS 0x7318c
+#define _DVSBSIZE 0x73190
+#define _DVSBKEYVAL 0x73194
+#define _DVSBKEYMSK 0x73198
+#define _DVSBSURF 0x7319c
+#define _DVSBKEYMAXVAL 0x731a0
+#define _DVSBTILEOFF 0x731a4
+#define _DVSBSURFLIVE 0x731ac
+#define _DVSBGAMC_G4X 0x731e0 /* g4x */
+#define _DVSBSCALE 0x73204
+#define _DVSBGAMC_ILK 0x73300 /* ilk/snb */
+#define _DVSBGAMCMAX_ILK 0x73340 /* ilk/snb */
+
+#define DVSCNTR(pipe) _MMIO_PIPE(pipe, _DVSACNTR, _DVSBCNTR)
+#define DVSLINOFF(pipe) _MMIO_PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
+#define DVSSTRIDE(pipe) _MMIO_PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE)
+#define DVSPOS(pipe) _MMIO_PIPE(pipe, _DVSAPOS, _DVSBPOS)
+#define DVSSURF(pipe) _MMIO_PIPE(pipe, _DVSASURF, _DVSBSURF)
+#define DVSKEYMAX(pipe) _MMIO_PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL)
+#define DVSSIZE(pipe) _MMIO_PIPE(pipe, _DVSASIZE, _DVSBSIZE)
+#define DVSSCALE(pipe) _MMIO_PIPE(pipe, _DVSASCALE, _DVSBSCALE)
+#define DVSTILEOFF(pipe) _MMIO_PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
+#define DVSKEYVAL(pipe) _MMIO_PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
+#define DVSKEYMSK(pipe) _MMIO_PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
+#define DVSSURFLIVE(pipe) _MMIO_PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
+#define DVSGAMC_G4X(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_G4X, _DVSBGAMC_G4X) + (5 - (i)) * 4) /* 6 x u0.8 */
+#define DVSGAMC_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_ILK, _DVSBGAMC_ILK) + (i) * 4) /* 16 x u0.10 */
+#define DVSGAMCMAX_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMCMAX_ILK, _DVSBGAMCMAX_ILK) + (i) * 4) /* 3 x u1.10 */
+
+#define _SPRA_CTL 0x70280
+#define SPRITE_ENABLE REG_BIT(31)
+#define SPRITE_PIPE_GAMMA_ENABLE REG_BIT(30)
+#define SPRITE_YUV_RANGE_CORRECTION_DISABLE REG_BIT(28)
+#define SPRITE_FORMAT_MASK REG_GENMASK(27, 25)
+#define SPRITE_FORMAT_YUV422 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 0)
+#define SPRITE_FORMAT_RGBX101010 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 1)
+#define SPRITE_FORMAT_RGBX888 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 2)
+#define SPRITE_FORMAT_RGBX161616 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 3)
+#define SPRITE_FORMAT_YUV444 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 4)
+#define SPRITE_FORMAT_XR_BGR101010 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 5) /* Extended range */
+#define SPRITE_PIPE_CSC_ENABLE REG_BIT(24)
+#define SPRITE_SOURCE_KEY REG_BIT(22)
+#define SPRITE_RGB_ORDER_RGBX REG_BIT(20) /* only for 888 and 161616 */
+#define SPRITE_YUV_TO_RGB_CSC_DISABLE REG_BIT(19)
+#define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 REG_BIT(18) /* 0 is BT601 */
+#define SPRITE_YUV_ORDER_MASK REG_GENMASK(17, 16)
+#define SPRITE_YUV_ORDER_YUYV REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 0)
+#define SPRITE_YUV_ORDER_UYVY REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 1)
+#define SPRITE_YUV_ORDER_YVYU REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 2)
+#define SPRITE_YUV_ORDER_VYUY REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 3)
+#define SPRITE_ROTATE_180 REG_BIT(15)
+#define SPRITE_TRICKLE_FEED_DISABLE REG_BIT(14)
+#define SPRITE_PLANE_GAMMA_DISABLE REG_BIT(13)
+#define SPRITE_TILED REG_BIT(10)
+#define SPRITE_DEST_KEY REG_BIT(2)
+#define _SPRA_LINOFF 0x70284
+#define _SPRA_STRIDE 0x70288
+#define _SPRA_POS 0x7028c
+#define SPRITE_POS_Y_MASK REG_GENMASK(31, 16)
+#define SPRITE_POS_Y(y) REG_FIELD_PREP(SPRITE_POS_Y_MASK, (y))
+#define SPRITE_POS_X_MASK REG_GENMASK(15, 0)
+#define SPRITE_POS_X(x) REG_FIELD_PREP(SPRITE_POS_X_MASK, (x))
+#define _SPRA_SIZE 0x70290
+#define SPRITE_HEIGHT_MASK REG_GENMASK(31, 16)
+#define SPRITE_HEIGHT(h) REG_FIELD_PREP(SPRITE_HEIGHT_MASK, (h))
+#define SPRITE_WIDTH_MASK REG_GENMASK(15, 0)
+#define SPRITE_WIDTH(w) REG_FIELD_PREP(SPRITE_WIDTH_MASK, (w))
+#define _SPRA_KEYVAL 0x70294
+#define _SPRA_KEYMSK 0x70298
+#define _SPRA_SURF 0x7029c
+#define SPRITE_ADDR_MASK REG_GENMASK(31, 12)
+#define _SPRA_KEYMAX 0x702a0
+#define _SPRA_TILEOFF 0x702a4
+#define SPRITE_OFFSET_Y_MASK REG_GENMASK(31, 16)
+#define SPRITE_OFFSET_Y(y) REG_FIELD_PREP(SPRITE_OFFSET_Y_MASK, (y))
+#define SPRITE_OFFSET_X_MASK REG_GENMASK(15, 0)
+#define SPRITE_OFFSET_X(x) REG_FIELD_PREP(SPRITE_OFFSET_X_MASK, (x))
+#define _SPRA_OFFSET 0x702a4
+#define _SPRA_SURFLIVE 0x702ac
+#define _SPRA_SCALE 0x70304
+#define SPRITE_SCALE_ENABLE REG_BIT(31)
+#define SPRITE_FILTER_MASK REG_GENMASK(30, 29)
+#define SPRITE_FILTER_MEDIUM REG_FIELD_PREP(SPRITE_FILTER_MASK, 0)
+#define SPRITE_FILTER_ENHANCING REG_FIELD_PREP(SPRITE_FILTER_MASK, 1)
+#define SPRITE_FILTER_SOFTENING REG_FIELD_PREP(SPRITE_FILTER_MASK, 2)
+#define SPRITE_VERTICAL_OFFSET_HALF REG_BIT(28) /* must be enabled below */
+#define SPRITE_VERTICAL_OFFSET_ENABLE REG_BIT(27)
+#define SPRITE_SRC_WIDTH_MASK REG_GENMASK(26, 16)
+#define SPRITE_SRC_WIDTH(w) REG_FIELD_PREP(SPRITE_SRC_WIDTH_MASK, (w))
+#define SPRITE_SRC_HEIGHT_MASK REG_GENMASK(10, 0)
+#define SPRITE_SRC_HEIGHT(h) REG_FIELD_PREP(SPRITE_SRC_HEIGHT_MASK, (h))
+#define _SPRA_GAMC 0x70400
+#define _SPRA_GAMC16 0x70440
+#define _SPRA_GAMC17 0x7044c
+
+#define _SPRB_CTL 0x71280
+#define _SPRB_LINOFF 0x71284
+#define _SPRB_STRIDE 0x71288
+#define _SPRB_POS 0x7128c
+#define _SPRB_SIZE 0x71290
+#define _SPRB_KEYVAL 0x71294
+#define _SPRB_KEYMSK 0x71298
+#define _SPRB_SURF 0x7129c
+#define _SPRB_KEYMAX 0x712a0
+#define _SPRB_TILEOFF 0x712a4
+#define _SPRB_OFFSET 0x712a4
+#define _SPRB_SURFLIVE 0x712ac
+#define _SPRB_SCALE 0x71304
+#define _SPRB_GAMC 0x71400
+#define _SPRB_GAMC16 0x71440
+#define _SPRB_GAMC17 0x7144c
+
+#define SPRCTL(pipe) _MMIO_PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
+#define SPRLINOFF(pipe) _MMIO_PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
+#define SPRSTRIDE(pipe) _MMIO_PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE)
+#define SPRPOS(pipe) _MMIO_PIPE(pipe, _SPRA_POS, _SPRB_POS)
+#define SPRSIZE(pipe) _MMIO_PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE)
+#define SPRKEYVAL(pipe) _MMIO_PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL)
+#define SPRKEYMSK(pipe) _MMIO_PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK)
+#define SPRSURF(pipe) _MMIO_PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
+#define SPRKEYMAX(pipe) _MMIO_PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
+#define SPRTILEOFF(pipe) _MMIO_PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
+#define SPROFFSET(pipe) _MMIO_PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
+#define SPRSCALE(pipe) _MMIO_PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
+#define SPRGAMC(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) + (i) * 4) /* 16 x u0.10 */
+#define SPRGAMC16(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC16, _SPRB_GAMC16) + (i) * 4) /* 3 x u1.10 */
+#define SPRGAMC17(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC17, _SPRB_GAMC17) + (i) * 4) /* 3 x u2.10 */
+#define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
+
+#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180)
+#define SP_ENABLE REG_BIT(31)
+#define SP_PIPE_GAMMA_ENABLE REG_BIT(30)
+#define SP_FORMAT_MASK REG_GENMASK(29, 26)
+#define SP_FORMAT_YUV422 REG_FIELD_PREP(SP_FORMAT_MASK, 0)
+#define SP_FORMAT_8BPP REG_FIELD_PREP(SP_FORMAT_MASK, 2)
+#define SP_FORMAT_BGR565 REG_FIELD_PREP(SP_FORMAT_MASK, 5)
+#define SP_FORMAT_BGRX8888 REG_FIELD_PREP(SP_FORMAT_MASK, 6)
+#define SP_FORMAT_BGRA8888 REG_FIELD_PREP(SP_FORMAT_MASK, 7)
+#define SP_FORMAT_RGBX1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 8)
+#define SP_FORMAT_RGBA1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 9)
+#define SP_FORMAT_BGRX1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 10) /* CHV pipe B */
+#define SP_FORMAT_BGRA1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 11) /* CHV pipe B */
+#define SP_FORMAT_RGBX8888 REG_FIELD_PREP(SP_FORMAT_MASK, 14)
+#define SP_FORMAT_RGBA8888 REG_FIELD_PREP(SP_FORMAT_MASK, 15)
+#define SP_ALPHA_PREMULTIPLY REG_BIT(23) /* CHV pipe B */
+#define SP_SOURCE_KEY REG_BIT(22)
+#define SP_YUV_FORMAT_BT709 REG_BIT(18)
+#define SP_YUV_ORDER_MASK REG_GENMASK(17, 16)
+#define SP_YUV_ORDER_YUYV REG_FIELD_PREP(SP_YUV_ORDER_MASK, 0)
+#define SP_YUV_ORDER_UYVY REG_FIELD_PREP(SP_YUV_ORDER_MASK, 1)
+#define SP_YUV_ORDER_YVYU REG_FIELD_PREP(SP_YUV_ORDER_MASK, 2)
+#define SP_YUV_ORDER_VYUY REG_FIELD_PREP(SP_YUV_ORDER_MASK, 3)
+#define SP_ROTATE_180 REG_BIT(15)
+#define SP_TILED REG_BIT(10)
+#define SP_MIRROR REG_BIT(8) /* CHV pipe B */
+#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184)
+#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188)
+#define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c)
+#define SP_POS_Y_MASK REG_GENMASK(31, 16)
+#define SP_POS_Y(y) REG_FIELD_PREP(SP_POS_Y_MASK, (y))
+#define SP_POS_X_MASK REG_GENMASK(15, 0)
+#define SP_POS_X(x) REG_FIELD_PREP(SP_POS_X_MASK, (x))
+#define _SPASIZE (VLV_DISPLAY_BASE + 0x72190)
+#define SP_HEIGHT_MASK REG_GENMASK(31, 16)
+#define SP_HEIGHT(h) REG_FIELD_PREP(SP_HEIGHT_MASK, (h))
+#define SP_WIDTH_MASK REG_GENMASK(15, 0)
+#define SP_WIDTH(w) REG_FIELD_PREP(SP_WIDTH_MASK, (w))
+#define _SPAKEYMINVAL (VLV_DISPLAY_BASE + 0x72194)
+#define _SPAKEYMSK (VLV_DISPLAY_BASE + 0x72198)
+#define _SPASURF (VLV_DISPLAY_BASE + 0x7219c)
+#define SP_ADDR_MASK REG_GENMASK(31, 12)
+#define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0)
+#define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4)
+#define SP_OFFSET_Y_MASK REG_GENMASK(31, 16)
+#define SP_OFFSET_Y(y) REG_FIELD_PREP(SP_OFFSET_Y_MASK, (y))
+#define SP_OFFSET_X_MASK REG_GENMASK(15, 0)
+#define SP_OFFSET_X(x) REG_FIELD_PREP(SP_OFFSET_X_MASK, (x))
+#define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8)
+#define SP_CONST_ALPHA_ENABLE REG_BIT(31)
+#define SP_CONST_ALPHA_MASK REG_GENMASK(7, 0)
+#define SP_CONST_ALPHA(alpha) REG_FIELD_PREP(SP_CONST_ALPHA_MASK, (alpha))
+#define _SPASURFLIVE (VLV_DISPLAY_BASE + 0x721ac)
+#define _SPACLRC0 (VLV_DISPLAY_BASE + 0x721d0)
+#define SP_CONTRAST_MASK REG_GENMASK(26, 18)
+#define SP_CONTRAST(x) REG_FIELD_PREP(SP_CONTRAST_MASK, (x)) /* u3.6 */
+#define SP_BRIGHTNESS_MASK REG_GENMASK(7, 0)
+#define SP_BRIGHTNESS(x) REG_FIELD_PREP(SP_BRIGHTNESS_MASK, (x)) /* s8 */
+#define _SPACLRC1 (VLV_DISPLAY_BASE + 0x721d4)
+#define SP_SH_SIN_MASK REG_GENMASK(26, 16)
+#define SP_SH_SIN(x) REG_FIELD_PREP(SP_SH_SIN_MASK, (x)) /* s4.7 */
+#define SP_SH_COS_MASK REG_GENMASK(9, 0)
+#define SP_SH_COS(x) REG_FIELD_PREP(SP_SH_COS_MASK, (x)) /* u3.7 */
+#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721e0)
+
+#define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280)
+#define _SPBLINOFF (VLV_DISPLAY_BASE + 0x72284)
+#define _SPBSTRIDE (VLV_DISPLAY_BASE + 0x72288)
+#define _SPBPOS (VLV_DISPLAY_BASE + 0x7228c)
+#define _SPBSIZE (VLV_DISPLAY_BASE + 0x72290)
+#define _SPBKEYMINVAL (VLV_DISPLAY_BASE + 0x72294)
+#define _SPBKEYMSK (VLV_DISPLAY_BASE + 0x72298)
+#define _SPBSURF (VLV_DISPLAY_BASE + 0x7229c)
+#define _SPBKEYMAXVAL (VLV_DISPLAY_BASE + 0x722a0)
+#define _SPBTILEOFF (VLV_DISPLAY_BASE + 0x722a4)
+#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
+#define _SPBSURFLIVE (VLV_DISPLAY_BASE + 0x722ac)
+#define _SPBCLRC0 (VLV_DISPLAY_BASE + 0x722d0)
+#define _SPBCLRC1 (VLV_DISPLAY_BASE + 0x722d4)
+#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722e0)
+
+#define _VLV_SPR(pipe, plane_id, reg_a, reg_b) \
+ _PIPE((pipe) * 2 + (plane_id) - PLANE_SPRITE0, (reg_a), (reg_b))
+#define _MMIO_VLV_SPR(pipe, plane_id, reg_a, reg_b) \
+ _MMIO(_VLV_SPR((pipe), (plane_id), (reg_a), (reg_b)))
+
+#define SPCNTR(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACNTR, _SPBCNTR)
+#define SPLINOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPALINOFF, _SPBLINOFF)
+#define SPSTRIDE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASTRIDE, _SPBSTRIDE)
+#define SPPOS(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAPOS, _SPBPOS)
+#define SPSIZE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASIZE, _SPBSIZE)
+#define SPKEYMINVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMINVAL, _SPBKEYMINVAL)
+#define SPKEYMSK(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMSK, _SPBKEYMSK)
+#define SPSURF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURF, _SPBSURF)
+#define SPKEYMAXVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
+#define SPTILEOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPATILEOFF, _SPBTILEOFF)
+#define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA)
+#define SPSURFLIVE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURFLIVE, _SPBSURFLIVE)
+#define SPCLRC0(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC0, _SPBCLRC0)
+#define SPCLRC1(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC1, _SPBCLRC1)
+#define SPGAMC(pipe, plane_id, i) _MMIO(_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC) + (5 - (i)) * 4) /* 6 x u0.10 */
+
+/*
+ * CHV pipe B sprite CSC
+ *
+ * |cr| |c0 c1 c2| |cr + cr_ioff| |cr_ooff|
+ * |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff|
+ * |cb| |c6 c7 c8| |cb + cr_ioff| |cb_ooff|
+ */
+#define _MMIO_CHV_SPCSC(plane_id, reg) \
+ _MMIO(VLV_DISPLAY_BASE + ((plane_id) - PLANE_SPRITE0) * 0x1000 + (reg))
+
+#define SPCSCYGOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d900)
+#define SPCSCCBOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d904)
+#define SPCSCCROFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d908)
+#define SPCSC_OOFF_MASK REG_GENMASK(26, 16)
+#define SPCSC_OOFF(x) REG_FIELD_PREP(SPCSC_OOFF_MASK, (x) & 0x7ff) /* s11 */
+#define SPCSC_IOFF_MASK REG_GENMASK(10, 0)
+#define SPCSC_IOFF(x) REG_FIELD_PREP(SPCSC_IOFF_MASK, (x) & 0x7ff) /* s11 */
+
+#define SPCSCC01(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d90c)
+#define SPCSCC23(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d910)
+#define SPCSCC45(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d914)
+#define SPCSCC67(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d918)
+#define SPCSCC8(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d91c)
+#define SPCSC_C1_MASK REG_GENMASK(30, 16)
+#define SPCSC_C1(x) REG_FIELD_PREP(SPCSC_C1_MASK, (x) & 0x7fff) /* s3.12 */
+#define SPCSC_C0_MASK REG_GENMASK(14, 0)
+#define SPCSC_C0(x) REG_FIELD_PREP(SPCSC_C0_MASK, (x) & 0x7fff) /* s3.12 */
+
+#define SPCSCYGICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d920)
+#define SPCSCCBICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d924)
+#define SPCSCCRICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d928)
+#define SPCSC_IMAX_MASK REG_GENMASK(26, 16)
+#define SPCSC_IMAX(x) REG_FIELD_PREP(SPCSC_IMAX_MASK, (x) & 0x7ff) /* s11 */
+#define SPCSC_IMIN_MASK REG_GENMASK(10, 0)
+#define SPCSC_IMIN(x) REG_FIELD_PREP(SPCSC_IMIN_MASK, (x) & 0x7ff) /* s11 */
+
+#define SPCSCYGOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d92c)
+#define SPCSCCBOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d930)
+#define SPCSCCROCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d934)
+#define SPCSC_OMAX_MASK REG_GENMASK(25, 16)
+#define SPCSC_OMAX(x) REG_FIELD_PREP(SPCSC_OMAX_MASK, (x)) /* u10 */
+#define SPCSC_OMIN_MASK REG_GENMASK(9, 0)
+#define SPCSC_OMIN(x) REG_FIELD_PREP(SPCSC_OMIN_MASK, (x)) /* u10 */
+
+#endif /* __INTEL_SPRITE_REGS__ */
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 6b374d481cd9..9887967b2ca5 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -100,11 +100,9 @@ static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc)
static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
enum tc_port_mode mode)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
struct intel_tc_port *tc = to_tc_port(dig_port);
- return intel_phy_is_tc(i915, phy) && tc->mode == mode;
+ return intel_encoder_is_tc(&dig_port->base) && tc->mode == mode;
}
bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
@@ -124,11 +122,9 @@ bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
struct intel_tc_port *tc = to_tc_port(dig_port);
- return intel_phy_is_tc(i915, phy) && !tc->legacy_port;
+ return intel_encoder_is_tc(&dig_port->base) && !tc->legacy_port;
}
/*
@@ -254,8 +250,7 @@ assert_tc_cold_blocked(struct intel_tc_port *tc)
static enum intel_display_power_domain
tc_port_power_domain(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
- enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port);
+ enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
}
@@ -302,7 +297,7 @@ u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
+ enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
intel_wakeref_t wakeref;
u32 val, pin_assignment;
@@ -375,9 +370,8 @@ int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_tc_port *tc = to_tc_port(dig_port);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
- if (!intel_phy_is_tc(i915, phy) || tc->mode != TC_PORT_DP_ALT)
+ if (!intel_encoder_is_tc(&dig_port->base) || tc->mode != TC_PORT_DP_ALT)
return 4;
assert_tc_cold_blocked(tc);
@@ -458,9 +452,7 @@ static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
- enum port port = tc->dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(i915, port);
+ enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
/*
* Each Modular FIA instance houses 2 TC ports. In SOC that has more
@@ -812,7 +804,7 @@ static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
{
struct drm_i915_private *i915 = tc_to_i915(tc);
- enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port);
+ enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
u32 val;
assert_display_core_power_enabled(tc);
@@ -1635,10 +1627,7 @@ static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc)
bool intel_tc_port_link_needs_reset(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
-
- if (!intel_phy_is_tc(i915, phy))
+ if (!intel_encoder_is_tc(&dig_port->base))
return false;
return __intel_tc_port_link_needs_reset(to_tc_port(dig_port));
@@ -1740,11 +1729,9 @@ bool intel_tc_port_link_reset(struct intel_digital_port *dig_port)
void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
struct intel_tc_port *tc = to_tc_port(dig_port);
- if (!intel_phy_is_tc(i915, phy))
+ if (!intel_encoder_is_tc(&dig_port->base))
return;
cancel_delayed_work(&tc->link_reset_work);
@@ -1861,7 +1848,7 @@ int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_tc_port *tc;
enum port port = dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(i915, port);
+ enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 2b77d399f1a1..9df0f1263913 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -885,7 +885,8 @@ struct intel_tv_connector_state {
bool bypass_vfilter;
};
-#define to_intel_tv_connector_state(x) container_of(x, struct intel_tv_connector_state, base)
+#define to_intel_tv_connector_state(conn_state) \
+ container_of_const((conn_state), struct intel_tv_connector_state, base)
static struct drm_connector_state *
intel_tv_connector_duplicate_state(struct drm_connector *connector)
@@ -961,16 +962,13 @@ intel_tv_mode_valid(struct drm_connector *connector,
{
struct drm_i915_private *i915 = to_i915(connector->dev);
const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
- int max_dotclk = i915->max_dotclk_freq;
+ int max_dotclk = i915->display.cdclk.max_dotclk_freq;
enum drm_mode_status status;
status = intel_cpu_transcoder_mode_valid(i915, mode);
if (status != MODE_OK)
return status;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
if (mode->clock > max_dotclk)
return MODE_CLOCK_HIGH;
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index a9f44abfc9fc..228702c0e492 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -485,6 +485,7 @@ struct child_device_config {
u8 hdmi_iboost_level:4; /* 196+ */
u8 dp_max_link_rate:3; /* 216+ */
u8 dp_max_link_rate_reserved:5; /* 216+ */
+ u8 efp_index; /* 256+ */
} __packed;
struct bdb_general_definitions {
@@ -602,22 +603,22 @@ struct bdb_driver_features {
u8 custom_vbt_version; /* 155+ */
/* Driver Feature Flags */
- u16 rmpm_enabled:1; /* 165+ */
- u16 s2ddt_enabled:1; /* 165+ */
- u16 dpst_enabled:1; /* 165-227 */
- u16 bltclt_enabled:1; /* 165+ */
- u16 adb_enabled:1; /* 165-227 */
- u16 drrs_enabled:1; /* 165-227 */
- u16 grs_enabled:1; /* 165+ */
- u16 gpmt_enabled:1; /* 165+ */
- u16 tbt_enabled:1; /* 165+ */
+ u16 rmpm_enabled:1; /* 159+ */
+ u16 s2ddt_enabled:1; /* 159+ */
+ u16 dpst_enabled:1; /* 159-227 */
+ u16 bltclt_enabled:1; /* 159+ */
+ u16 adb_enabled:1; /* 159-227 */
+ u16 drrs_enabled:1; /* 159-227 */
+ u16 grs_enabled:1; /* 159+ */
+ u16 gpmt_enabled:1; /* 159+ */
+ u16 tbt_enabled:1; /* 159+ */
u16 psr_enabled:1; /* 165-227 */
u16 ips_enabled:1; /* 165+ */
- u16 dpfs_enabled:1; /* 165+ */
+ u16 dfps_enabled:1; /* 165+ */
u16 dmrrs_enabled:1; /* 174-227 */
u16 adt_enabled:1; /* ???-228 */
u16 hpd_wake:1; /* 201-240 */
- u16 pc_feature_valid:1;
+ u16 pc_feature_valid:1; /* 159+ */
} __packed;
/*
@@ -880,11 +881,12 @@ struct bdb_lvds_lfp_data_tail {
struct lfp_backlight_data_entry {
u8 type:2;
u8 active_low_pwm:1;
- u8 obsolete1:5;
+ u8 i2c_pin:3; /* obsolete since ? */
+ u8 i2c_speed:2; /* obsolete since ? */
u16 pwm_freq_hz;
u8 min_brightness; /* ???-233 */
- u8 obsolete2;
- u8 obsolete3;
+ u8 i2c_address; /* obsolete since ? */
+ u8 i2c_command; /* obsolete since ? */
} __packed;
struct lfp_backlight_control_method {
@@ -897,16 +899,11 @@ struct lfp_brightness_level {
u16 reserved;
} __packed;
-#define EXP_BDB_LFP_BL_DATA_SIZE_REV_191 \
- offsetof(struct bdb_lfp_backlight_data, brightness_level)
-#define EXP_BDB_LFP_BL_DATA_SIZE_REV_234 \
- offsetof(struct bdb_lfp_backlight_data, brightness_precision_bits)
-
struct bdb_lfp_backlight_data {
u8 entry_size;
struct lfp_backlight_data_entry data[16];
- u8 level[16]; /* ???-233 */
- struct lfp_backlight_control_method backlight_control[16];
+ u8 level[16]; /* 162-233 */
+ struct lfp_backlight_control_method backlight_control[16]; /* 191+ */
struct lfp_brightness_level brightness_level[16]; /* 234+ */
struct lfp_brightness_level brightness_min_level[16]; /* 234+ */
u8 brightness_precision_bits[16]; /* 236+ */
@@ -917,7 +914,7 @@ struct bdb_lfp_backlight_data {
* Block 44 - LFP Power Conservation Features Block
*/
struct lfp_power_features {
- u8 reserved1:1;
+ u8 dpst_support:1; /* ???-159 */
u8 power_conservation_pref:3;
u8 reserved2:1;
u8 lace_enabled_status:1; /* 210+ */
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 5d905f932cb4..894ee97b3e1b 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -9,6 +9,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_vrr.h"
+#include "intel_dp.h"
bool intel_vrr_is_capable(struct intel_connector *connector)
{
@@ -113,10 +114,18 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
const struct drm_display_info *info = &connector->base.display_info;
int vmin, vmax;
+ /*
+ * FIXME all joined pipes share the same transcoder.
+ * Need to account for that during VRR toggle/push/etc.
+ */
+ if (crtc_state->bigjoiner_pipes)
+ return;
+
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
return;
@@ -165,6 +174,14 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
if (crtc_state->uapi.vrr_enabled) {
crtc_state->vrr.enable = true;
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+ if (intel_dp_as_sdp_supported(intel_dp)) {
+ crtc_state->vrr.vsync_start =
+ (crtc_state->hw.adjusted_mode.crtc_vtotal -
+ crtc_state->hw.adjusted_mode.vsync_start);
+ crtc_state->vrr.vsync_end =
+ (crtc_state->hw.adjusted_mode.crtc_vtotal -
+ crtc_state->hw.adjusted_mode.vsync_end);
+ }
}
}
@@ -187,10 +204,11 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
/*
- * TRANS_SET_CONTEXT_LATENCY with VRR enabled
- * requires this chicken bit on ADL/DG2.
+ * This bit seems to have two meanings depending on the platform:
+ * TGL: generate VRR "safe window" for DSB vblank waits
+ * ADL/DG2: make TRANS_SET_CONTEXT_LATENCY effective with VRR
*/
- if (DISPLAY_VER(dev_priv) == 13)
+ if (IS_DISPLAY_VER(dev_priv, 12, 13))
intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
0, PIPE_VBLANK_WITH_DELAY);
@@ -239,6 +257,12 @@ void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
return;
intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), TRANS_PUSH_EN);
+
+ if (HAS_AS_SDP(dev_priv))
+ intel_de_write(dev_priv, TRANS_VRR_VSYNC(cpu_transcoder),
+ VRR_VSYNC_END(crtc_state->vrr.vsync_end) |
+ VRR_VSYNC_START(crtc_state->vrr.vsync_start));
+
intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder),
VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
}
@@ -257,13 +281,16 @@ void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
intel_de_wait_for_clear(dev_priv, TRANS_VRR_STATUS(cpu_transcoder),
VRR_STATUS_VRR_EN_LIVE, 1000);
intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), 0);
+
+ if (HAS_AS_SDP(dev_priv))
+ intel_de_write(dev_priv, TRANS_VRR_VSYNC(cpu_transcoder), 0);
}
void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- u32 trans_vrr_ctl;
+ u32 trans_vrr_ctl, trans_vrr_vsync;
trans_vrr_ctl = intel_de_read(dev_priv, TRANS_VRR_CTL(cpu_transcoder));
@@ -283,6 +310,16 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
crtc_state->vrr.vmin = intel_de_read(dev_priv, TRANS_VRR_VMIN(cpu_transcoder)) + 1;
}
- if (crtc_state->vrr.enable)
+ if (crtc_state->vrr.enable) {
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+
+ if (HAS_AS_SDP(dev_priv)) {
+ trans_vrr_vsync =
+ intel_de_read(dev_priv, TRANS_VRR_VSYNC(cpu_transcoder));
+ crtc_state->vrr.vsync_start =
+ REG_FIELD_GET(VRR_VSYNC_START_MASK, trans_vrr_vsync);
+ crtc_state->vrr.vsync_end =
+ REG_FIELD_GET(VRR_VSYNC_END_MASK, trans_vrr_vsync);
+ }
+ }
}
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index 8a934bada624..baa601d27815 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -213,10 +213,11 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
* The pipe scaler does not use all the bits of PIPESRC, at least
* on the earlier platforms. So even when we're scaling a plane
* the *pipe* source size must not be too large. For simplicity
- * we assume the limits match the scaler source size limits. Might
- * not be 100% accurate on all platforms, but good enough for now.
+ * we assume the limits match the scaler destination size limits.
+ * Might not be 100% accurate on all platforms, but good enough for
+ * now.
*/
- if (pipe_src_w > max_src_w || pipe_src_h > max_src_h) {
+ if (pipe_src_w > max_dst_w || pipe_src_h > max_dst_h) {
drm_dbg_kms(&dev_priv->drm,
"scaler_user index %u.%u: pipe src size %ux%u "
"is out of scaler range\n",
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index e941e2e4fd14..860574d04f88 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -2295,6 +2295,9 @@ static u8 skl_get_plane_caps(struct drm_i915_private *i915,
if (HAS_4TILE(i915))
caps |= INTEL_PLANE_CAP_TILING_4;
+ if (!IS_ENABLED(I915) && !HAS_FLAT_CCS(i915))
+ return caps;
+
if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) {
caps |= INTEL_PLANE_CAP_CCS_RC;
if (DISPLAY_VER(i915) >= 12)
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index c6b9be80d83c..7c6187b4479f 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -6,18 +6,19 @@
#include <drm/drm_blend.h>
#include "i915_drv.h"
-#include "i915_fixed.h"
#include "i915_reg.h"
#include "i9xx_wm.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_bw.h"
+#include "intel_cdclk.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_fb.h"
+#include "intel_fixed.h"
#include "intel_pcode.h"
#include "intel_wm.h"
#include "skl_watermark.h"
@@ -69,7 +70,7 @@ static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
return DISPLAY_VER(i915) == 9;
}
-static bool
+bool
intel_has_sagv(struct drm_i915_private *i915)
{
return HAS_SAGV(i915) &&
@@ -2601,10 +2602,17 @@ skl_compute_ddb(struct intel_atomic_state *state)
return ret;
}
- if (HAS_MBUS_JOINING(i915))
+ if (HAS_MBUS_JOINING(i915)) {
new_dbuf_state->joined_mbus =
adlp_check_mbus_joined(new_dbuf_state->active_pipes);
+ if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
+ ret = intel_cdclk_state_set_joined_mbus(state, new_dbuf_state->joined_mbus);
+ if (ret)
+ return ret;
+ }
+ }
+
for_each_intel_crtc(&i915->drm, crtc) {
enum pipe pipe = crtc->pipe;
@@ -2628,13 +2636,6 @@ skl_compute_ddb(struct intel_atomic_state *state)
if (ret)
return ret;
- if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
- /* TODO: Implement vblank synchronized MBUS joining changes */
- ret = intel_modeset_all_pipes_late(state, "MBUS joining change");
- if (ret)
- return ret;
- }
-
drm_dbg_kms(&i915->drm,
"Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
old_dbuf_state->enabled_slices,
@@ -3057,6 +3058,8 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
if (HAS_MBUS_JOINING(i915))
dbuf_state->joined_mbus = intel_de_read(i915, MBUS_CTL) & MBUS_JOIN;
+ dbuf_state->mdclk_cdclk_ratio = intel_mdclk_cdclk_ratio(i915, &i915->display.cdclk.hw);
+
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -3530,85 +3533,6 @@ int intel_dbuf_init(struct drm_i915_private *i915)
return 0;
}
-/*
- * Configure MBUS_CTL and all DBUF_CTL_S of each slice to join_mbus state before
- * update the request state of all DBUS slices.
- */
-static void update_mbus_pre_enable(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- u32 mbus_ctl, dbuf_min_tracker_val;
- enum dbuf_slice slice;
- const struct intel_dbuf_state *dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
-
- if (!HAS_MBUS_JOINING(i915))
- return;
-
- /*
- * TODO: Implement vblank synchronized MBUS joining changes.
- * Must be properly coordinated with dbuf reprogramming.
- */
- if (dbuf_state->joined_mbus) {
- mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN |
- MBUS_JOIN_PIPE_SELECT_NONE;
- dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3);
- } else {
- mbus_ctl = MBUS_HASHING_MODE_2x2 |
- MBUS_JOIN_PIPE_SELECT_NONE;
- dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1);
- }
-
- intel_de_rmw(i915, MBUS_CTL,
- MBUS_HASHING_MODE_MASK | MBUS_JOIN |
- MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
-
- for_each_dbuf_slice(i915, slice)
- intel_de_rmw(i915, DBUF_CTL_S(slice),
- DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
- dbuf_min_tracker_val);
-}
-
-void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(state);
-
- if (!new_dbuf_state ||
- (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
- new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
- return;
-
- WARN_ON(!new_dbuf_state->base.changed);
-
- update_mbus_pre_enable(state);
- gen9_dbuf_slices_update(i915,
- old_dbuf_state->enabled_slices |
- new_dbuf_state->enabled_slices);
-}
-
-void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(state);
-
- if (!new_dbuf_state ||
- (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
- new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
- return;
-
- WARN_ON(!new_dbuf_state->base.changed);
-
- gen9_dbuf_slices_update(i915,
- new_dbuf_state->enabled_slices);
-}
-
static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
{
switch (pipe) {
@@ -3628,14 +3552,12 @@ static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
return false;
}
-void intel_mbus_dbox_update(struct intel_atomic_state *state)
+static void intel_mbus_dbox_update(struct intel_atomic_state *state)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
- const struct intel_crtc_state *new_crtc_state;
const struct intel_crtc *crtc;
u32 val = 0;
- int i;
if (DISPLAY_VER(i915) < 11)
return;
@@ -3679,12 +3601,9 @@ void intel_mbus_dbox_update(struct intel_atomic_state *state)
val |= MBUS_DBOX_B_CREDIT(8);
}
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, new_dbuf_state->active_pipes) {
u32 pipe_val = val;
- if (!new_crtc_state->hw.active)
- continue;
-
if (DISPLAY_VER(i915) >= 14) {
if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe,
new_dbuf_state->active_pipes))
@@ -3697,6 +3616,217 @@ void intel_mbus_dbox_update(struct intel_atomic_state *state)
}
}
+int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
+ int ratio)
+{
+ struct intel_dbuf_state *dbuf_state;
+
+ dbuf_state = intel_atomic_get_dbuf_state(state);
+ if (IS_ERR(dbuf_state))
+ return PTR_ERR(dbuf_state);
+
+ dbuf_state->mdclk_cdclk_ratio = ratio;
+
+ return intel_atomic_lock_global_state(&dbuf_state->base);
+}
+
+void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
+ int ratio, bool joined_mbus)
+{
+ enum dbuf_slice slice;
+
+ if (!HAS_MBUS_JOINING(i915))
+ return;
+
+ if (DISPLAY_VER(i915) >= 20)
+ intel_de_rmw(i915, MBUS_CTL, MBUS_TRANSLATION_THROTTLE_MIN_MASK,
+ MBUS_TRANSLATION_THROTTLE_MIN(ratio - 1));
+
+ if (joined_mbus)
+ ratio *= 2;
+
+ drm_dbg_kms(&i915->drm, "Updating dbuf ratio to %d (mbus joined: %s)\n",
+ ratio, str_yes_no(joined_mbus));
+
+ for_each_dbuf_slice(i915, slice)
+ intel_de_rmw(i915, DBUF_CTL_S(slice),
+ DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
+ DBUF_MIN_TRACKER_STATE_SERVICE(ratio - 1));
+}
+
+static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ int mdclk_cdclk_ratio;
+
+ if (intel_cdclk_is_decreasing_later(state)) {
+ /* cdclk/mdclk will be changed later by intel_set_cdclk_post_plane_update() */
+ mdclk_cdclk_ratio = old_dbuf_state->mdclk_cdclk_ratio;
+ } else {
+ /* cdclk/mdclk already changed by intel_set_cdclk_pre_plane_update() */
+ mdclk_cdclk_ratio = new_dbuf_state->mdclk_cdclk_ratio;
+ }
+
+ intel_dbuf_mdclk_cdclk_ratio_update(i915, mdclk_cdclk_ratio,
+ new_dbuf_state->joined_mbus);
+}
+
+static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state,
+ const struct intel_dbuf_state *dbuf_state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ enum pipe pipe = ffs(dbuf_state->active_pipes) - 1;
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+
+ drm_WARN_ON(&i915->drm, !dbuf_state->joined_mbus);
+ drm_WARN_ON(&i915->drm, !is_power_of_2(dbuf_state->active_pipes));
+
+ crtc = intel_crtc_for_pipe(i915, pipe);
+ new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (new_crtc_state && !intel_crtc_needs_modeset(new_crtc_state))
+ return pipe;
+ else
+ return INVALID_PIPE;
+}
+
+static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state,
+ enum pipe pipe)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ u32 mbus_ctl;
+
+ drm_dbg_kms(&i915->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n",
+ str_yes_no(old_dbuf_state->joined_mbus),
+ str_yes_no(new_dbuf_state->joined_mbus),
+ pipe != INVALID_PIPE ? pipe_name(pipe) : '*');
+
+ if (new_dbuf_state->joined_mbus)
+ mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN;
+ else
+ mbus_ctl = MBUS_HASHING_MODE_2x2;
+
+ if (pipe != INVALID_PIPE)
+ mbus_ctl |= MBUS_JOIN_PIPE_SELECT(pipe);
+ else
+ mbus_ctl |= MBUS_JOIN_PIPE_SELECT_NONE;
+
+ intel_de_rmw(i915, MBUS_CTL,
+ MBUS_HASHING_MODE_MASK | MBUS_JOIN |
+ MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
+}
+
+void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state)
+{
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+
+ if (!new_dbuf_state)
+ return;
+
+ if (!old_dbuf_state->joined_mbus && new_dbuf_state->joined_mbus) {
+ enum pipe pipe = intel_mbus_joined_pipe(state, new_dbuf_state);
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ intel_dbuf_mbus_join_update(state, pipe);
+ intel_mbus_dbox_update(state);
+ intel_dbuf_mdclk_min_tracker_update(state);
+ }
+}
+
+void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+
+ if (!new_dbuf_state)
+ return;
+
+ if (old_dbuf_state->joined_mbus && !new_dbuf_state->joined_mbus) {
+ enum pipe pipe = intel_mbus_joined_pipe(state, old_dbuf_state);
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ intel_dbuf_mdclk_min_tracker_update(state);
+ intel_mbus_dbox_update(state);
+ intel_dbuf_mbus_join_update(state, pipe);
+
+ if (pipe != INVALID_PIPE) {
+ struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
+
+ intel_crtc_wait_for_next_vblank(crtc);
+ }
+ } else if (old_dbuf_state->joined_mbus == new_dbuf_state->joined_mbus &&
+ old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ intel_dbuf_mdclk_min_tracker_update(state);
+ intel_mbus_dbox_update(state);
+ }
+
+}
+
+void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+ u8 old_slices, new_slices;
+
+ if (!new_dbuf_state)
+ return;
+
+ old_slices = old_dbuf_state->enabled_slices;
+ new_slices = old_dbuf_state->enabled_slices | new_dbuf_state->enabled_slices;
+
+ if (old_slices == new_slices)
+ return;
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ gen9_dbuf_slices_update(i915, new_slices);
+}
+
+void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+ u8 old_slices, new_slices;
+
+ if (!new_dbuf_state)
+ return;
+
+ old_slices = old_dbuf_state->enabled_slices | new_dbuf_state->enabled_slices;
+ new_slices = new_dbuf_state->enabled_slices;
+
+ if (old_slices == new_slices)
+ return;
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ gen9_dbuf_slices_update(i915, new_slices);
+}
+
static int skl_watermark_ipc_status_show(struct seq_file *m, void *data)
{
struct drm_i915_private *i915 = m->private;
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
index e3d1d74a7b17..91f92c0e706e 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -25,6 +25,7 @@ void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
void intel_sagv_post_plane_update(struct intel_atomic_state *state);
bool intel_can_enable_sagv(struct drm_i915_private *i915,
const struct intel_bw_state *bw_state);
+bool intel_has_sagv(struct drm_i915_private *i915);
u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
const struct skl_ddb_entry *entry);
@@ -58,22 +59,31 @@ struct intel_dbuf_state {
u8 slices[I915_MAX_PIPES];
u8 enabled_slices;
u8 active_pipes;
+ u8 mdclk_cdclk_ratio;
bool joined_mbus;
};
struct intel_dbuf_state *
intel_atomic_get_dbuf_state(struct intel_atomic_state *state);
-#define to_intel_dbuf_state(x) container_of((x), struct intel_dbuf_state, base)
+#define to_intel_dbuf_state(global_state) \
+ container_of_const((global_state), struct intel_dbuf_state, base)
+
#define intel_atomic_get_old_dbuf_state(state) \
to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj))
#define intel_atomic_get_new_dbuf_state(state) \
to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj))
int intel_dbuf_init(struct drm_i915_private *i915);
+int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
+ int ratio);
+
void intel_dbuf_pre_plane_update(struct intel_atomic_state *state);
void intel_dbuf_post_plane_update(struct intel_atomic_state *state);
-void intel_mbus_dbox_update(struct intel_atomic_state *state);
+void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
+ int ratio, bool joined_mbus);
+void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state);
+void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state);
#endif /* __SKL_WATERMARK_H__ */
diff --git a/drivers/gpu/drm/i915/display/skl_watermark_regs.h b/drivers/gpu/drm/i915/display/skl_watermark_regs.h
index 20b30c9a6613..269163fa3350 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark_regs.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark_regs.h
@@ -32,14 +32,16 @@
#define MBUS_BBOX_CTL_S1 _MMIO(0x45040)
#define MBUS_BBOX_CTL_S2 _MMIO(0x45044)
-#define MBUS_CTL _MMIO(0x4438C)
-#define MBUS_JOIN REG_BIT(31)
-#define MBUS_HASHING_MODE_MASK REG_BIT(30)
-#define MBUS_HASHING_MODE_2x2 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 0)
-#define MBUS_HASHING_MODE_1x4 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 1)
-#define MBUS_JOIN_PIPE_SELECT_MASK REG_GENMASK(28, 26)
-#define MBUS_JOIN_PIPE_SELECT(pipe) REG_FIELD_PREP(MBUS_JOIN_PIPE_SELECT_MASK, pipe)
-#define MBUS_JOIN_PIPE_SELECT_NONE MBUS_JOIN_PIPE_SELECT(7)
+#define MBUS_CTL _MMIO(0x4438C)
+#define MBUS_JOIN REG_BIT(31)
+#define MBUS_HASHING_MODE_MASK REG_BIT(30)
+#define MBUS_HASHING_MODE_2x2 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 0)
+#define MBUS_HASHING_MODE_1x4 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 1)
+#define MBUS_JOIN_PIPE_SELECT_MASK REG_GENMASK(28, 26)
+#define MBUS_JOIN_PIPE_SELECT(pipe) REG_FIELD_PREP(MBUS_JOIN_PIPE_SELECT_MASK, pipe)
+#define MBUS_JOIN_PIPE_SELECT_NONE MBUS_JOIN_PIPE_SELECT(7)
+#define MBUS_TRANSLATION_THROTTLE_MIN_MASK REG_GENMASK(15, 13)
+#define MBUS_TRANSLATION_THROTTLE_MIN(val) REG_FIELD_PREP(MBUS_TRANSLATION_THROTTLE_MIN_MASK, val)
/* Watermark register definitions for SKL */
#define _CUR_WM_A_0 0x70140
diff --git a/drivers/gpu/drm/i915/display/vlv_dpio_phy_regs.h b/drivers/gpu/drm/i915/display/vlv_dpio_phy_regs.h
new file mode 100644
index 000000000000..2b83f334b1ff
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/vlv_dpio_phy_regs.h
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __VLV_DPIO_PHY_REGS_H__
+#define __VLV_DPIO_PHY_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define _VLV_CMN(dw) (0x8100 + (dw) * 4)
+#define _CHV_CMN(cl, dw) (0x8100 - (cl) * 0x80 + (dw) * 4)
+#define _VLV_PLL(ch, dw) (0x8000 + (ch) * 0x20 + (dw) * 4) /* dw 0-7,16-23 */
+#define _CHV_PLL(ch, dw) (0x8000 + (ch) * 0x180 + (dw) * 4)
+#define _VLV_REF(dw) (0x80a0 + ((dw) - 8) * 4) /* dw 8-15 */
+#define _VLV_PCS(ch, spline, dw) (0x200 + (ch) * 0x2400 + (spline) * 0x200 + (dw) * 4)
+#define _VLV_PCS_GRP(ch, dw) (0x8200 + (ch) * 0x200 + (dw) * 4)
+#define _VLV_PCS_BCAST(dw) (0xc000 + (dw) * 4)
+#define _VLV_TX(ch, lane, dw) (0x80 + (ch) * 0x2400 + (lane) * 0x200 + (dw) * 4)
+#define _VLV_TX_GRP(ch, dw) (0x8280 + (ch) * 0x200 + (dw) * 4)
+#define _VLV_TX_BCAST(dw) (0xc080 + (dw) * 4)
+
+/*
+ * Per pipe/PLL DPIO regs
+ */
+#define VLV_PLL_DW3(ch) _VLV_PLL((ch), 3)
+#define DPIO_S1_DIV_MASK REG_GENMASK(30, 28)
+#define DPIO_S1_DIV(s1) REG_FIELD_PREP(DPIO_S1_DIV_MASK, (s1))
+#define DPIO_S1_DIV_DAC 0 /* 10, DAC 25-225M rate */
+#define DPIO_S1_DIV_HDMIDP 1 /* 5, DAC 225-400M rate */
+#define DPIO_S1_DIV_LVDS1 2 /* 14 */
+#define DPIO_S1_DIV_LVDS2 3 /* 7 */
+#define DPIO_K_DIV_MASK REG_GENMASK(27, 24)
+#define DPIO_K_DIV(k) REG_FIELD_PREP(DPIO_K_DIV_MASK, (k))
+#define DPIO_P1_DIV_MASK REG_GENMASK(23, 21)
+#define DPIO_P1_DIV(p1) REG_FIELD_PREP(DPIO_P1_DIV_MASK, (p1))
+#define DPIO_P2_DIV_MASK REG_GENMASK(20, 16)
+#define DPIO_P2_DIV(p2) REG_FIELD_PREP(DPIO_P2_DIV_MASK, (p2))
+#define DPIO_N_DIV_MASK REG_GENMASK(15, 12)
+#define DPIO_N_DIV(n) REG_FIELD_PREP(DPIO_N_DIV_MASK, (n))
+#define DPIO_ENABLE_CALIBRATION REG_BIT(11)
+#define DPIO_M1_DIV_MASK REG_GENMASK(10, 8)
+#define DPIO_M1_DIV(m1) REG_FIELD_PREP(DPIO_M1_DIV_MASK, (m1))
+#define DPIO_M2_DIV_MASK REG_GENMASK(7, 0)
+#define DPIO_M2_DIV(m2) REG_FIELD_PREP(DPIO_M2_DIV_MASK, (m2))
+
+#define VLV_PLL_DW5(ch) _VLV_PLL((ch), 5)
+#define DPIO_REFSEL_OVERRIDE REG_BIT(27)
+#define DPIO_PLL_MODESEL_MASK REG_GENMASK(26, 24)
+#define DPIO_BIAS_CURRENT_CTL_MASK REG_GENMASK(22, 20) /* always 0x7 */
+#define DPIO_PLL_REFCLK_SEL_MASK REG_GENMASK(17, 16)
+#define DPIO_DRIVER_CTL_MASK REG_GENMASK(15, 12) /* always set to 0x8 */
+#define DPIO_CLK_BIAS_CTL_MASK REG_GENMASK(11, 8) /* always set to 0x5 */
+
+#define VLV_PLL_DW7(ch) _VLV_PLL((ch), 7)
+
+#define VLV_PLL_DW16(ch) _VLV_PLL((ch), 16)
+
+#define VLV_PLL_DW17(ch) _VLV_PLL((ch), 17)
+
+#define VLV_PLL_DW18(ch) _VLV_PLL((ch), 18)
+
+#define VLV_PLL_DW19(ch) _VLV_PLL((ch), 19)
+
+#define VLV_REF_DW11 _VLV_REF(11)
+
+#define VLV_CMN_DW0 _VLV_CMN(0)
+
+/*
+ * Per DDI channel DPIO regs
+ */
+#define VLV_PCS_DW0_GRP(ch) _VLV_PCS_GRP((ch), 0)
+#define VLV_PCS01_DW0(ch) _VLV_PCS((ch), 0, 0)
+#define VLV_PCS23_DW0(ch) _VLV_PCS((ch), 1, 0)
+#define DPIO_PCS_TX_LANE2_RESET REG_BIT(16)
+#define DPIO_PCS_TX_LANE1_RESET REG_BIT(7)
+#define DPIO_LEFT_TXFIFO_RST_MASTER2 REG_BIT(4)
+#define DPIO_RIGHT_TXFIFO_RST_MASTER2 REG_BIT(3)
+
+#define VLV_PCS_DW1_GRP(ch) _VLV_PCS_GRP((ch), 1)
+#define VLV_PCS01_DW1(ch) _VLV_PCS((ch), 0, 1)
+#define VLV_PCS23_DW1(ch) _VLV_PCS((ch), 1, 1)
+#define CHV_PCS_REQ_SOFTRESET_EN REG_BIT(23)
+#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN REG_BIT(22)
+#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN REG_BIT(21)
+#define DPIO_PCS_CLK_DATAWIDTH_MASK REG_GENMASK(7, 6)
+#define DPIO_PCS_CLK_DATAWIDTH_8_10 REG_FIELD_PREP(DPIO_PCS_CLK_DATAWIDTH_MASK, 1)
+#define DPIO_PCS_CLK_DATAWIDTH_16_20 REG_FIELD_PREP(DPIO_PCS_CLK_DATAWIDTH_MASK, 2)
+#define DPIO_PCS_CLK_DATAWIDTH_32_40 REG_FIELD_PREP(DPIO_PCS_CLK_DATAWIDTH_MASK, 3)
+#define DPIO_PCS_CLK_SOFT_RESET REG_BIT(5)
+
+#define VLV_PCS_DW8_GRP(ch) _VLV_PCS_GRP((ch), 8)
+#define VLV_PCS01_DW8(ch) _VLV_PCS((ch), 0, 8)
+#define VLV_PCS23_DW8(ch) _VLV_PCS((ch), 1, 8)
+#define DPIO_PCS_USEDCLKCHANNEL REG_BIT(21)
+#define DPIO_PCS_USEDCLKCHANNEL_OVRRIDE REG_BIT(20)
+
+#define VLV_PCS_DW9_GRP(ch) _VLV_PCS_GRP((ch), 9)
+#define VLV_PCS01_DW9(ch) _VLV_PCS((ch), 0, 9)
+#define VLV_PCS23_DW9(ch) _VLV_PCS((ch), 1, 9)
+#define DPIO_PCS_TX2MARGIN_MASK REG_GENMASK(15, 13)
+#define DPIO_PCS_TX2MARGIN_000 REG_FIELD_PREP(DPIO_PCS_TX2MARGIN_MASK, 0)
+#define DPIO_PCS_TX2MARGIN_101 REG_FIELD_PREP(DPIO_PCS_TX2MARGIN_MASK, 1)
+#define DPIO_PCS_TX1MARGIN_MASK REG_GENMASK(12, 10)
+#define DPIO_PCS_TX1MARGIN_000 REG_FIELD_PREP(DPIO_PCS_TX1MARGIN_MASK, 0)
+#define DPIO_PCS_TX1MARGIN_101 REG_FIELD_PREP(DPIO_PCS_TX1MARGIN_MASK, 1)
+
+#define VLV_PCS_DW10_GRP(ch) _VLV_PCS_GRP((ch), 10)
+#define VLV_PCS01_DW10(ch) _VLV_PCS((ch), 0, 10)
+#define VLV_PCS23_DW10(ch) _VLV_PCS((ch), 1, 10)
+#define DPIO_PCS_SWING_CALC_TX1_TX3 REG_BIT(31)
+#define DPIO_PCS_SWING_CALC_TX0_TX2 REG_BIT(30)
+#define DPIO_PCS_TX2DEEMP_MASK REG_GENMASK(27, 24)
+#define DPIO_PCS_TX2DEEMP_9P5 REG_FIELD_PREP(DPIO_PCS_TX2DEEMP_MASK, 0)
+#define DPIO_PCS_TX2DEEMP_6P0 REG_FIELD_PREP(DPIO_PCS_TX2DEEMP_MASK, 2)
+#define DPIO_PCS_TX1DEEMP_MASK REG_GENMASK(19, 16)
+#define DPIO_PCS_TX1DEEMP_9P5 REG_FIELD_PREP(DPIO_PCS_TX1DEEMP_MASK, 0)
+#define DPIO_PCS_TX1DEEMP_6P0 REG_FIELD_PREP(DPIO_PCS_TX1DEEMP_MASK, 2)
+
+#define VLV_PCS_DW11_GRP(ch) _VLV_PCS_GRP((ch), 11)
+#define VLV_PCS01_DW11(ch) _VLV_PCS((ch), 0, 11)
+#define VLV_PCS23_DW11(ch) _VLV_PCS((ch), 1, 11)
+#define DPIO_TX2_STAGGER_MASK_MASK REG_GENMASK(28, 24)
+#define DPIO_TX2_STAGGER_MASK(x) REG_FIELD_PREP(DPIO_TX2_STAGGER_MASK_MASK, (x))
+#define DPIO_LANEDESKEW_STRAP_OVRD REG_BIT(3)
+#define DPIO_LEFT_TXFIFO_RST_MASTER REG_BIT(1)
+#define DPIO_RIGHT_TXFIFO_RST_MASTER REG_BIT(0)
+
+#define VLV_PCS_DW12_GRP(ch) _VLV_PCS_GRP((ch), 12)
+#define VLV_PCS01_DW12(ch) _VLV_PCS((ch), 0, 12)
+#define VLV_PCS23_DW12(ch) _VLV_PCS((ch), 1, 12)
+#define DPIO_TX2_STAGGER_MULT_MASK REG_GENMASK(22, 20)
+#define DPIO_TX2_STAGGER_MULT(x) REG_FIELD_PREP(DPIO_TX2_STAGGER_MULT_MASK, (x))
+#define DPIO_TX1_STAGGER_MULT_MASK REG_GENMASK(20, 16)
+#define DPIO_TX1_STAGGER_MULT(x) REG_FIELD_PREP(DPIO_TX1_STAGGER_MULT_MASK, (x))
+#define DPIO_TX1_STAGGER_MASK_MASK REG_GENMASK(12, 8)
+#define DPIO_TX1_STAGGER_MASK(x) REG_FIELD_PREP(DPIO_TX1_STAGGER_MASK_MASK, (x))
+#define DPIO_LANESTAGGER_STRAP_OVRD REG_BIT(6)
+#define DPIO_LANESTAGGER_STRAP_MASK REG_GENMASK(4, 0)
+#define DPIO_LANESTAGGER_STRAP(x) REG_FIELD_PREP(DPIO_LANESTAGGER_STRAP_MASK, (x))
+
+#define VLV_PCS_DW14_GRP(ch) _VLV_PCS_GRP((ch), 14)
+#define VLV_PCS01_DW14(ch) _VLV_PCS((ch), 0, 14)
+#define VLV_PCS23_DW14(ch) _VLV_PCS((ch), 1, 14)
+
+#define VLV_PCS_DW17_BCAST _VLV_PCS_BCAST(17)
+#define VLV_PCS_DW17_GRP(ch) _VLV_PCS_GRP((ch), 17)
+#define VLV_PCS01_DW17(ch) _VLV_PCS((ch), 0, 17)
+#define VLV_PCS23_DW17(ch) _VLV_PCS((ch), 1, 17)
+
+#define VLV_PCS_DW23_GRP(ch) _VLV_PCS_GRP((ch), 23)
+#define VLV_PCS01_DW23(ch) _VLV_PCS((ch), 0, 23)
+#define VLV_PCS23_DW23(ch) _VLV_PCS((ch), 1, 23)
+
+#define VLV_TX_DW2_GRP(ch) _VLV_TX_GRP((ch), 2)
+#define VLV_TX_DW2(ch, lane) _VLV_TX((ch), (lane), 2)
+#define DPIO_SWING_MARGIN000_MASK REG_GENMASK(23, 16)
+#define DPIO_SWING_MARGIN000(x) REG_FIELD_PREP(DPIO_SWING_MARGIN000_MASK, (x))
+#define DPIO_UNIQ_TRANS_SCALE_MASK REG_GENMASK(15, 8)
+#define DPIO_UNIQ_TRANS_SCALE(x) REG_FIELD_PREP(DPIO_UNIQ_TRANS_SCALE_MASK, (x))
+
+#define VLV_TX_DW3_GRP(ch) _VLV_TX_GRP((ch), 3)
+#define VLV_TX_DW3(ch, lane) _VLV_TX((ch), (lane), 3)
+/* The following bit for CHV phy */
+#define DPIO_TX_UNIQ_TRANS_SCALE_EN REG_BIT(27)
+#define DPIO_SWING_MARGIN101_MASK REG_GENMASK(23, 16)
+#define DPIO_SWING_MARGIN101(x) REG_FIELD_PREP(DPIO_SWING_MARGIN101_MASK, (x))
+
+#define VLV_TX_DW4_GRP(ch) _VLV_TX_GRP((ch), 4)
+#define VLV_TX_DW4(ch, lane) _VLV_TX((ch), (lane), 4)
+#define DPIO_SWING_DEEMPH9P5_MASK REG_GENMASK(31, 24)
+#define DPIO_SWING_DEEMPH9P5(x) REG_FIELD_PREP(DPIO_SWING_DEEMPH9P5_MASK, (x))
+#define DPIO_SWING_DEEMPH6P0_MASK REG_GENMASK(23, 16)
+#define DPIO_SWING_DEEMPH6P0_SHIFT REG_FIELD_PREP(DPIO_SWING_DEEMPH6P0_MASK, (x))
+
+#define VLV_TX_DW5_GRP(ch) _VLV_TX_GRP((ch), 5)
+#define VLV_TX_DW5(ch, lane) _VLV_TX((ch), (lane), 5)
+#define DPIO_TX_OCALINIT_EN REG_BIT(31)
+
+#define VLV_TX_DW11_GRP(ch) _VLV_TX_GRP((ch), 11)
+#define VLV_TX_DW11(ch, lane) _VLV_TX((ch), (lane), 11)
+
+#define VLV_TX_DW14_GRP(ch) _VLV_TX_GRP((ch), 14)
+#define VLV_TX_DW14(ch, lane) _VLV_TX((ch), (lane), 14)
+
+/* CHV dpPhy registers */
+#define CHV_PLL_DW0(ch) _CHV_PLL((ch), 0)
+#define DPIO_CHV_M2_DIV_MASK REG_GENMASK(7, 0)
+#define DPIO_CHV_M2_DIV(m2) REG_FIELD_PREP(DPIO_CHV_M2_DIV_MASK, (m2))
+
+#define CHV_PLL_DW1(ch) _CHV_PLL((ch), 1)
+#define DPIO_CHV_N_DIV_MASK REG_GENMASK(11, 8)
+#define DPIO_CHV_N_DIV(n) REG_FIELD_PREP(DPIO_CHV_N_DIV_MASK, (n))
+#define DPIO_CHV_M1_DIV_MASK REG_GENMASK(2, 0)
+#define DPIO_CHV_M1_DIV(m1) REG_FIELD_PREP(DPIO_CHV_M1_DIV_MASK, (m1))
+#define DPIO_CHV_M1_DIV_BY_2 0
+
+#define CHV_PLL_DW2(ch) _CHV_PLL((ch), 2)
+#define DPIO_CHV_M2_FRAC_DIV_MASK REG_GENMASK(21, 0)
+#define DPIO_CHV_M2_FRAC_DIV(m2_frac) REG_FIELD_PREP(DPIO_CHV_M2_FRAC_DIV_MASK, (m2_frac))
+
+#define CHV_PLL_DW3(ch) _CHV_PLL((ch), 3)
+#define DPIO_CHV_FRAC_DIV_EN REG_BIT(16)
+#define DPIO_CHV_SECOND_MOD REG_BIT(8)
+#define DPIO_CHV_FEEDFWD_GAIN_MASK REG_GENMASK(3, 0)
+#define DPIO_CHV_FEEDFWD_GAIN(x) REG_FIELD_PREP(DPIO_CHV_FEEDFWD_GAIN_MASK, (x))
+
+#define CHV_PLL_DW6(ch) _CHV_PLL((ch), 6)
+#define DPIO_CHV_GAIN_CTRL_MASK REG_GENMASK(18, 16)
+#define DPIO_CHV_GAIN_CTRL(x) REG_FIELD_PREP(DPIO_CHV_GAIN_CTRL_MASK, (x))
+#define DPIO_CHV_INT_COEFF_MASK REG_GENMASK(12, 8)
+#define DPIO_CHV_INT_COEFF(x) REG_FIELD_PREP(DPIO_CHV_INT_COEFF_MASK, (x))
+#define DPIO_CHV_PROP_COEFF_MASK REG_GENMASK(3, 0)
+#define DPIO_CHV_PROP_COEFF(x) REG_FIELD_PREP(DPIO_CHV_PROP_COEFF_MASK, (x))
+
+#define CHV_PLL_DW8(ch) _CHV_PLL((ch), 8)
+#define DPIO_CHV_TDC_TARGET_CNT_MASK REG_GENMASK(9, 0)
+#define DPIO_CHV_TDC_TARGET_CNT(x) REG_FIELD_PREP(DPIO_CHV_TDC_TARGET_CNT_MASK, (x))
+
+#define CHV_PLL_DW9(ch) _CHV_PLL((ch), 9)
+#define DPIO_CHV_INT_LOCK_THRESHOLD_MASK REG_GENMASK(3, 1)
+#define DPIO_CHV_INT_LOCK_THRESHOLD(x) REG_FIELD_PREP(DPIO_CHV_INT_LOCK_THRESHOLD_MASK, (x))
+#define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE REG_BIT(0) /* 1: coarse & 0 : fine */
+
+#define CHV_CMN_DW0_CH0 _CHV_CMN(0, 0)
+#define DPIO_ALLDL_POWERDOWN_CH0 REG_BIT(19)
+#define DPIO_ANYDL_POWERDOWN_CH0 REG_BIT(18)
+#define DPIO_ALLDL_POWERDOWN BIT(1)
+#define DPIO_ANYDL_POWERDOWN BIT(0)
+
+#define CHV_CMN_DW5_CH0 _CHV_CMN(0, 5)
+#define CHV_BUFRIGHTENA1_MASK REG_GENMASK(21, 20)
+#define CHV_BUFRIGHTENA1_DISABLE REG_FIELD_PREP(CHV_BUFRIGHTENA1_MASK, 0)
+#define CHV_BUFRIGHTENA1_NORMAL REG_FIELD_PREP(CHV_BUFRIGHTENA1_MASK, 1)
+#define CHV_BUFRIGHTENA1_FORCE REG_FIELD_PREP(CHV_BUFRIGHTENA1_MASK, 3)
+#define CHV_BUFLEFTENA1_MASK REG_GENMASK(23, 22)
+#define CHV_BUFLEFTENA1_DISABLE REG_FIELD_PREP(CHV_BUFLEFTENA1_MASK, 0)
+#define CHV_BUFLEFTENA1_NORMAL REG_FIELD_PREP(CHV_BUFLEFTENA1_MASK, 1)
+#define CHV_BUFLEFTENA1_FORCE REG_FIELD_PREP(CHV_BUFLEFTENA1_MASK, 3)
+
+#define CHV_CMN_DW13_CH0 _CHV_CMN(0, 13)
+#define CHV_CMN_DW0_CH1 _CHV_CMN(1, 0)
+#define DPIO_CHV_S1_DIV_MASK REG_GENMASK(23, 21)
+#define DPIO_CHV_S1_DIV(s1) REG_FIELD_PREP(DPIO_CHV_S1_DIV_MASK, (s1))
+#define DPIO_CHV_P1_DIV_MASK REG_GENMASK(15, 13)
+#define DPIO_CHV_P1_DIV(p1) REG_FIELD_PREP(DPIO_CHV_P1_DIV_MASK, (p1))
+#define DPIO_CHV_P2_DIV_MASK REG_GENMASK(12, 8)
+#define DPIO_CHV_P2_DIV(p2) REG_FIELD_PREP(DPIO_CHV_P2_DIV_MASK, (p2))
+#define DPIO_CHV_K_DIV_MASK REG_GENMASK(7, 4)
+#define DPIO_CHV_K_DIV(k) REG_FIELD_PREP(DPIO_CHV_K_DIV_MASK, (k))
+#define DPIO_PLL_FREQLOCK REG_BIT(1)
+#define DPIO_PLL_LOCK REG_BIT(0)
+#define CHV_CMN_DW13(ch) _PIPE(ch, CHV_CMN_DW13_CH0, CHV_CMN_DW0_CH1)
+
+#define CHV_CMN_DW14_CH0 _CHV_CMN(0, 14)
+#define CHV_CMN_DW1_CH1 _CHV_CMN(1, 1)
+#define DPIO_AFC_RECAL REG_BIT(14)
+#define DPIO_DCLKP_EN REG_BIT(13)
+#define CHV_BUFLEFTENA2_MASK REG_GENMASK(18, 17) /* CL2 DW1 only */
+#define CHV_BUFLEFTENA2_DISABLE REG_FIELD_PREP(CHV_BUFLEFTENA2_MASK, 0)
+#define CHV_BUFLEFTENA2_NORMAL REG_FIELD_PREP(CHV_BUFLEFTENA2_MASK, 1)
+#define CHV_BUFLEFTENA2_FORCE REG_FIELD_PREP(CHV_BUFLEFTENA2_MASK, 3)
+#define CHV_BUFRIGHTENA2_MASK REG_GENMASK(20, 19) /* CL2 DW1 only */
+#define CHV_BUFRIGHTENA2_DISABLE REG_FIELD_PREP(CHV_BUFRIGHTENA2_MASK, 0)
+#define CHV_BUFRIGHTENA2_NORMAL REG_FIELD_PREP(CHV_BUFRIGHTENA2_MASK, 1)
+#define CHV_BUFRIGHTENA2_FORCE REG_FIELD_PREP(CHV_BUFRIGHTENA2_MASK, 3)
+#define CHV_CMN_DW14(ch) _PIPE(ch, CHV_CMN_DW14_CH0, CHV_CMN_DW1_CH1)
+
+#define CHV_CMN_DW19_CH0 _CHV_CMN(0, 19)
+#define CHV_CMN_DW6_CH1 _CHV_CMN(1, 6)
+#define DPIO_ALLDL_POWERDOWN_CH1 REG_BIT(30) /* CL2 DW6 only */
+#define DPIO_ANYDL_POWERDOWN_CH1 REG_BIT(29) /* CL2 DW6 only */
+#define DPIO_DYNPWRDOWNEN_CH1 REG_BIT(28) /* CL2 DW6 only */
+#define CHV_CMN_USEDCLKCHANNEL REG_BIT(13)
+#define CHV_CMN_DW19(ch) _PIPE(ch, CHV_CMN_DW19_CH0, CHV_CMN_DW6_CH1)
+
+#define CHV_CMN_DW28 _CHV_CMN(0, 28)
+#define DPIO_CL1POWERDOWNEN REG_BIT(23)
+#define DPIO_DYNPWRDOWNEN_CH0 REG_BIT(22)
+#define DPIO_SUS_CLK_CONFIG_MASK REG_GENMASK(1, 0)
+#define DPIO_SUS_CLK_CONFIG_ON REG_FIELD_PREP(DPIO_SUS_CLK_CONFIG_MASK, 0)
+#define DPIO_SUS_CLK_CONFIG_CLKREQ REG_FIELD_PREP(DPIO_SUS_CLK_CONFIG_MASK, 1)
+#define DPIO_SUS_CLK_CONFIG_GATE REG_FIELD_PREP(DPIO_SUS_CLK_CONFIG_MASK, 2)
+#define DPIO_SUS_CLK_CONFIG_GATE_CLKREQ REG_FIELD_PREP(DPIO_SUS_CLK_CONFIG_MASK, 3)
+
+#define CHV_CMN_DW30 _CHV_CMN(0, 30)
+#define DPIO_CL2_LDOFUSE_PWRENB REG_BIT(6)
+#define DPIO_LRC_BYPASS REG_BIT(3)
+
+#define CHV_TX_DW0(ch, lane) _VLV_TX((ch), (lane), 0)
+#define CHV_TX_DW1(ch, lane) _VLV_TX((ch), (lane), 1)
+#define CHV_TX_DW2(ch, lane) _VLV_TX((ch), (lane), 2)
+#define CHV_TX_DW3(ch, lane) _VLV_TX((ch), (lane), 3)
+#define CHV_TX_DW4(ch, lane) _VLV_TX((ch), (lane), 4)
+#define CHV_TX_DW5(ch, lane) _VLV_TX((ch), (lane), 5)
+#define CHV_TX_DW6(ch, lane) _VLV_TX((ch), (lane), 6)
+#define CHV_TX_DW7(ch, lane) _VLV_TX((ch), (lane), 7)
+#define CHV_TX_DW8(ch, lane) _VLV_TX((ch), (lane), 8)
+#define CHV_TX_DW9(ch, lane) _VLV_TX((ch), (lane), 9)
+#define CHV_TX_DW10(ch, lane) _VLV_TX((ch), (lane), 10)
+
+#define CHV_TX_DW11(ch, lane) _VLV_TX((ch), (lane), 11)
+#define DPIO_FRC_LATENCY_MASK REG_GENMASK(10, 8)
+#define DPIO_FRC_LATENCY(x) REG_FIELD_PREP(DPIO_FRC_LATENCY_MASK, (x))
+
+#define CHV_TX_DW14(ch, lane) _VLV_TX((ch), (lane), 14)
+#define DPIO_UPAR REG_BIT(30)
+
+#endif /* __VLV_DPIO_PHY_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 9b33b8a74d64..ee9923c7b115 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -85,20 +85,18 @@ enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
u32 mask;
mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
- if (intel_de_wait_for_set(dev_priv, MIPI_GEN_FIFO_STAT(port),
+ if (intel_de_wait_for_set(display, MIPI_GEN_FIFO_STAT(display, port),
mask, 100))
- drm_err(&dev_priv->drm, "DPI FIFOs are not empty\n");
+ drm_err(display->drm, "DPI FIFOs are not empty\n");
}
-static void write_data(struct drm_i915_private *dev_priv,
+static void write_data(struct intel_display *display,
i915_reg_t reg,
const u8 *data, u32 len)
{
@@ -110,18 +108,18 @@ static void write_data(struct drm_i915_private *dev_priv,
for (j = 0; j < min_t(u32, len - i, 4); j++)
val |= *data++ << 8 * j;
- intel_de_write(dev_priv, reg, val);
+ intel_de_write(display, reg, val);
}
}
-static void read_data(struct drm_i915_private *dev_priv,
+static void read_data(struct intel_display *display,
i915_reg_t reg,
u8 *data, u32 len)
{
u32 i, j;
for (i = 0; i < len; i += 4) {
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
for (j = 0; j < min_t(u32, len - i, 4); j++)
*data++ = val >> 8 * j;
@@ -132,8 +130,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host);
- struct drm_device *dev = intel_dsi_host->intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_dsi *intel_dsi = intel_dsi_host->intel_dsi;
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
enum port port = intel_dsi_host->port;
struct mipi_dsi_packet packet;
ssize_t ret;
@@ -148,51 +146,51 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
header = packet.header;
if (msg->flags & MIPI_DSI_MSG_USE_LPM) {
- data_reg = MIPI_LP_GEN_DATA(port);
+ data_reg = MIPI_LP_GEN_DATA(display, port);
data_mask = LP_DATA_FIFO_FULL;
- ctrl_reg = MIPI_LP_GEN_CTRL(port);
+ ctrl_reg = MIPI_LP_GEN_CTRL(display, port);
ctrl_mask = LP_CTRL_FIFO_FULL;
} else {
- data_reg = MIPI_HS_GEN_DATA(port);
+ data_reg = MIPI_HS_GEN_DATA(display, port);
data_mask = HS_DATA_FIFO_FULL;
- ctrl_reg = MIPI_HS_GEN_CTRL(port);
+ ctrl_reg = MIPI_HS_GEN_CTRL(display, port);
ctrl_mask = HS_CTRL_FIFO_FULL;
}
/* note: this is never true for reads */
if (packet.payload_length) {
- if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
+ if (intel_de_wait_for_clear(display, MIPI_GEN_FIFO_STAT(display, port),
data_mask, 50))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timeout waiting for HS/LP DATA FIFO !full\n");
- write_data(dev_priv, data_reg, packet.payload,
+ write_data(display, data_reg, packet.payload,
packet.payload_length);
}
if (msg->rx_len) {
- intel_de_write(dev_priv, MIPI_INTR_STAT(port),
+ intel_de_write(display, MIPI_INTR_STAT(display, port),
GEN_READ_DATA_AVAIL);
}
- if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
+ if (intel_de_wait_for_clear(display, MIPI_GEN_FIFO_STAT(display, port),
ctrl_mask, 50)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timeout waiting for HS/LP CTRL FIFO !full\n");
}
- intel_de_write(dev_priv, ctrl_reg,
+ intel_de_write(display, ctrl_reg,
header[2] << 16 | header[1] << 8 | header[0]);
/* ->rx_len is set only for reads */
if (msg->rx_len) {
data_mask = GEN_READ_DATA_AVAIL;
- if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port),
+ if (intel_de_wait_for_set(display, MIPI_INTR_STAT(display, port),
data_mask, 50))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timeout waiting for read data.\n");
- read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
+ read_data(display, data_reg, msg->rx_buf, msg->rx_len);
}
/* XXX: fix for reads and writes */
@@ -225,9 +223,7 @@ static const struct mipi_dsi_host_ops intel_dsi_host_ops = {
static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
enum port port)
{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
u32 mask;
/* XXX: pipe, hs */
@@ -237,18 +233,18 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
cmd |= DPI_LP_MODE;
/* clear bit */
- intel_de_write(dev_priv, MIPI_INTR_STAT(port), SPL_PKT_SENT_INTERRUPT);
+ intel_de_write(display, MIPI_INTR_STAT(display, port), SPL_PKT_SENT_INTERRUPT);
/* XXX: old code skips write if control unchanged */
- if (cmd == intel_de_read(dev_priv, MIPI_DPI_CONTROL(port)))
- drm_dbg_kms(&dev_priv->drm,
+ if (cmd == intel_de_read(display, MIPI_DPI_CONTROL(display, port)))
+ drm_dbg_kms(display->drm,
"Same special packet %02x twice in a row.\n", cmd);
- intel_de_write(dev_priv, MIPI_DPI_CONTROL(port), cmd);
+ intel_de_write(display, MIPI_DPI_CONTROL(display, port), cmd);
mask = SPL_PKT_SENT_INTERRUPT;
- if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port), mask, 100))
- drm_err(&dev_priv->drm,
+ if (intel_de_wait_for_set(display, MIPI_INTR_STAT(display, port), mask, 100))
+ drm_err(display->drm,
"Video mode command 0x%08x send failed.\n", cmd);
return 0;
@@ -273,8 +269,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
- base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int ret;
@@ -329,7 +324,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
static bool glk_dsi_enable_io(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
bool cold_boot = false;
@@ -339,29 +334,30 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
* Power ON MIPI IO first and then write into IO reset and LP wake bits
*/
for_each_dsi_port(port, intel_dsi->ports)
- intel_de_rmw(dev_priv, MIPI_CTRL(port), 0, GLK_MIPIIO_ENABLE);
+ intel_de_rmw(display, MIPI_CTRL(display, port), 0, GLK_MIPIIO_ENABLE);
/* Put the IO into reset */
- intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), GLK_MIPIIO_RESET_RELEASED, 0);
+ intel_de_rmw(display, MIPI_CTRL(display, PORT_A), GLK_MIPIIO_RESET_RELEASED, 0);
/* Program LP Wake */
for_each_dsi_port(port, intel_dsi->ports) {
- u32 tmp = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
- intel_de_rmw(dev_priv, MIPI_CTRL(port),
+ u32 tmp = intel_de_read(display, MIPI_DEVICE_READY(display, port));
+
+ intel_de_rmw(display, MIPI_CTRL(display, port),
GLK_LP_WAKE, (tmp & DEVICE_READY) ? GLK_LP_WAKE : 0);
}
/* Wait for Pwr ACK */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
+ if (intel_de_wait_for_set(display, MIPI_CTRL(display, port),
GLK_MIPIIO_PORT_POWERED, 20))
- drm_err(&dev_priv->drm, "MIPIO port is powergated\n");
+ drm_err(display->drm, "MIPIO port is powergated\n");
}
/* Check for cold boot scenario */
for_each_dsi_port(port, intel_dsi->ports) {
cold_boot |=
- !(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY);
+ !(intel_de_read(display, MIPI_DEVICE_READY(display, port)) & DEVICE_READY);
}
return cold_boot;
@@ -369,99 +365,100 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
static void glk_dsi_device_ready(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
/* Wait for MIPI PHY status bit to set */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
+ if (intel_de_wait_for_set(display, MIPI_CTRL(display, port),
GLK_PHY_STATUS_PORT_READY, 20))
- drm_err(&dev_priv->drm, "PHY is not ON\n");
+ drm_err(display->drm, "PHY is not ON\n");
}
/* Get IO out of reset */
- intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), 0, GLK_MIPIIO_RESET_RELEASED);
+ intel_de_rmw(display, MIPI_CTRL(display, PORT_A), 0, GLK_MIPIIO_RESET_RELEASED);
/* Get IO out of Low power state*/
for_each_dsi_port(port, intel_dsi->ports) {
- if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY)) {
- intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ if (!(intel_de_read(display, MIPI_DEVICE_READY(display, port)) & DEVICE_READY)) {
+ intel_de_rmw(display, MIPI_DEVICE_READY(display, port),
ULPS_STATE_MASK, DEVICE_READY);
usleep_range(10, 15);
} else {
/* Enter ULPS */
- intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_rmw(display, MIPI_DEVICE_READY(display, port),
ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY);
/* Wait for ULPS active */
- if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port),
GLK_ULPS_NOT_ACTIVE, 20))
- drm_err(&dev_priv->drm, "ULPS not active\n");
+ drm_err(display->drm, "ULPS not active\n");
/* Exit ULPS */
- intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_rmw(display, MIPI_DEVICE_READY(display, port),
ULPS_STATE_MASK, ULPS_STATE_EXIT | DEVICE_READY);
/* Enter Normal Mode */
- intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_rmw(display, MIPI_DEVICE_READY(display, port),
ULPS_STATE_MASK,
ULPS_STATE_NORMAL_OPERATION | DEVICE_READY);
- intel_de_rmw(dev_priv, MIPI_CTRL(port), GLK_LP_WAKE, 0);
+ intel_de_rmw(display, MIPI_CTRL(display, port), GLK_LP_WAKE, 0);
}
}
/* Wait for Stop state */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
+ if (intel_de_wait_for_set(display, MIPI_CTRL(display, port),
GLK_DATA_LANE_STOP_STATE, 20))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Date lane not in STOP state\n");
}
/* Wait for AFE LATCH */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_set(dev_priv, BXT_MIPI_PORT_CTRL(port),
+ if (intel_de_wait_for_set(display, BXT_MIPI_PORT_CTRL(port),
AFE_LATCHOUT, 20))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"D-PHY not entering LP-11 state\n");
}
}
static void bxt_dsi_device_ready(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 val;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
/* Enable MIPI PHY transparent latch */
for_each_dsi_port(port, intel_dsi->ports) {
- intel_de_rmw(dev_priv, BXT_MIPI_PORT_CTRL(port), 0, LP_OUTPUT_HOLD);
+ intel_de_rmw(display, BXT_MIPI_PORT_CTRL(port), 0, LP_OUTPUT_HOLD);
usleep_range(2000, 2500);
}
/* Clear ULPS and set device ready */
for_each_dsi_port(port, intel_dsi->ports) {
- val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
+ val = intel_de_read(display, MIPI_DEVICE_READY(display, port));
val &= ~ULPS_STATE_MASK;
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
+ intel_de_write(display, MIPI_DEVICE_READY(display, port), val);
usleep_range(2000, 2500);
val |= DEVICE_READY;
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
+ intel_de_write(display, MIPI_DEVICE_READY(display, port), val);
}
}
static void vlv_dsi_device_ready(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
vlv_flisdsi_get(dev_priv);
/* program rcomp for compliance, reduce from 50 ohms to 45 ohms
@@ -474,7 +471,7 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_write(display, MIPI_DEVICE_READY(display, port),
ULPS_STATE_ENTER);
usleep_range(2500, 3000);
@@ -482,14 +479,14 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
* Common bit for both MIPI Port A & MIPI Port C
* No similar bit in MIPI Port C reg
*/
- intel_de_rmw(dev_priv, MIPI_PORT_CTRL(PORT_A), 0, LP_OUTPUT_HOLD);
+ intel_de_rmw(display, VLV_MIPI_PORT_CTRL(PORT_A), 0, LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_write(display, MIPI_DEVICE_READY(display, port),
ULPS_STATE_EXIT);
usleep_range(2500, 3000);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_write(display, MIPI_DEVICE_READY(display, port),
DEVICE_READY);
usleep_range(2500, 3000);
}
@@ -509,50 +506,50 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
/* Enter ULPS */
for_each_dsi_port(port, intel_dsi->ports)
- intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_rmw(display, MIPI_DEVICE_READY(display, port),
ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY);
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port),
GLK_PHY_STATUS_PORT_READY, 20))
- drm_err(&dev_priv->drm, "PHY is not turning OFF\n");
+ drm_err(display->drm, "PHY is not turning OFF\n");
}
/* Wait for Pwr ACK bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port),
GLK_MIPIIO_PORT_POWERED, 20))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"MIPI IO Port is not powergated\n");
}
}
static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
/* Put the IO into reset */
- intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), GLK_MIPIIO_RESET_RELEASED, 0);
+ intel_de_rmw(display, MIPI_CTRL(display, PORT_A), GLK_MIPIIO_RESET_RELEASED, 0);
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port),
GLK_PHY_STATUS_PORT_READY, 20))
- drm_err(&dev_priv->drm, "PHY is not turning OFF\n");
+ drm_err(display->drm, "PHY is not turning OFF\n");
}
/* Clear MIPI mode */
for_each_dsi_port(port, intel_dsi->ports)
- intel_de_rmw(dev_priv, MIPI_CTRL(port), GLK_MIPIIO_ENABLE, 0);
+ intel_de_rmw(display, MIPI_CTRL(display, port), GLK_MIPIIO_ENABLE, 0);
}
static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
@@ -564,30 +561,31 @@ static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
static i915_reg_t port_ctrl_reg(struct drm_i915_private *i915, enum port port)
{
return IS_GEMINILAKE(i915) || IS_BROXTON(i915) ?
- BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
+ BXT_MIPI_PORT_CTRL(port) : VLV_MIPI_PORT_CTRL(port);
}
static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
- BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
+ BXT_MIPI_PORT_CTRL(port) : VLV_MIPI_PORT_CTRL(PORT_A);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_write(display, MIPI_DEVICE_READY(display, port),
DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_write(display, MIPI_DEVICE_READY(display, port),
DEVICE_READY | ULPS_STATE_EXIT);
usleep_range(2000, 2500);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_write(display, MIPI_DEVICE_READY(display, port),
DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
@@ -596,15 +594,15 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
* Port A only. MIPI Port C has no similar bit for checking.
*/
if ((IS_BROXTON(dev_priv) || port == PORT_A) &&
- intel_de_wait_for_clear(dev_priv, port_ctrl,
+ intel_de_wait_for_clear(display, port_ctrl,
AFE_LATCHOUT, 30))
- drm_err(&dev_priv->drm, "DSI LP not going Low\n");
+ drm_err(display->drm, "DSI LP not going Low\n");
/* Disable MIPI PHY transparent latch */
- intel_de_rmw(dev_priv, port_ctrl, LP_OUTPUT_HOLD, 0);
+ intel_de_rmw(display, port_ctrl, LP_OUTPUT_HOLD, 0);
usleep_range(1000, 1500);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x00);
+ intel_de_write(display, MIPI_DEVICE_READY(display, port), 0x00);
usleep_range(2000, 2500);
}
}
@@ -612,6 +610,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
static void intel_dsi_port_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
@@ -622,11 +621,11 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
for_each_dsi_port(port, intel_dsi->ports)
- intel_de_rmw(dev_priv, MIPI_CTRL(port),
+ intel_de_rmw(display, MIPI_CTRL(display, port),
BXT_PIXEL_OVERLAP_CNT_MASK,
temp << BXT_PIXEL_OVERLAP_CNT_SHIFT);
} else {
- intel_de_rmw(dev_priv, VLV_CHICKEN_3,
+ intel_de_rmw(display, VLV_CHICKEN_3,
PIXEL_OVERLAP_CNT_MASK,
temp << PIXEL_OVERLAP_CNT_SHIFT);
}
@@ -636,7 +635,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
u32 temp;
- temp = intel_de_read(dev_priv, port_ctrl);
+ temp = intel_de_read(display, port_ctrl);
temp &= ~LANE_CONFIGURATION_MASK;
temp &= ~DUAL_LINK_MODE_MASK;
@@ -656,15 +655,15 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
temp |= DITHERING_ENABLE;
/* assert ip_tg_enable signal */
- intel_de_write(dev_priv, port_ctrl, temp | DPI_ENABLE);
- intel_de_posting_read(dev_priv, port_ctrl);
+ intel_de_write(display, port_ctrl, temp | DPI_ENABLE);
+ intel_de_posting_read(display, port_ctrl);
}
}
static void intel_dsi_port_disable(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
@@ -672,11 +671,12 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
/* de-assert ip_tg_enable signal */
- intel_de_rmw(dev_priv, port_ctrl, DPI_ENABLE, 0);
- intel_de_posting_read(dev_priv, port_ctrl);
+ intel_de_rmw(display, port_ctrl, DPI_ENABLE, 0);
+ intel_de_posting_read(display, port_ctrl);
}
}
-static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
+
+static void intel_dsi_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
static void intel_dsi_unprepare(struct intel_encoder *encoder);
@@ -726,6 +726,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -733,7 +734,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
enum port port;
bool glk_cold_boot = false;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
intel_dsi_wait_panel_power_cycle(intel_dsi);
@@ -753,16 +754,16 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
if (IS_BROXTON(dev_priv)) {
/* Add MIPI IO reset programming for modeset */
- intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, 0, MIPIO_RST_CTRL);
+ intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, 0, MIPIO_RST_CTRL);
/* Power up DSI regulator */
- intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
- intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_TX_CTRL, 0);
+ intel_de_write(display, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
+ intel_de_write(display, BXT_P_DSI_REGULATOR_TX_CTRL, 0);
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
/* Disable DPOunit clock gating, can stall pipe */
- intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv),
+ intel_de_rmw(display, DSPCLK_GATE_D(dev_priv),
0, DPOUNIT_CLOCK_GATE_DISABLE);
}
@@ -798,8 +799,8 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
*/
if (is_cmd_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports)
- intel_de_write(dev_priv,
- MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4);
+ intel_de_write(display,
+ MIPI_MAX_RETURN_PKT_SIZE(display, port), 8 * 4);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_TEAR_ON);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
} else {
@@ -871,11 +872,12 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
intel_crtc_vblank_off(old_crtc_state);
@@ -906,12 +908,12 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
if (IS_BROXTON(dev_priv)) {
/* Power down DSI regulator to save power */
- intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
- intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_TX_CTRL,
+ intel_de_write(display, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
+ intel_de_write(display, BXT_P_DSI_REGULATOR_TX_CTRL,
HS_IO_CTRL_SELECT);
/* Add MIPI IO reset programming for modeset */
- intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, MIPIO_RST_CTRL, 0);
+ intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, MIPIO_RST_CTRL, 0);
}
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
@@ -919,7 +921,7 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
} else {
vlv_dsi_pll_disable(encoder);
- intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv),
+ intel_de_rmw(display, DSPCLK_GATE_D(dev_priv),
DPOUNIT_CLOCK_GATE_DISABLE, 0);
}
@@ -935,13 +937,14 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
intel_wakeref_t wakeref;
enum port port;
bool active = false;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
wakeref = intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain);
@@ -960,7 +963,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
- bool enabled = intel_de_read(dev_priv, port_ctrl) & DPI_ENABLE;
+ bool enabled = intel_de_read(display, port_ctrl) & DPI_ENABLE;
/*
* Due to some hardware limitations on VLV/CHV, the DPI enable
@@ -969,27 +972,27 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
*/
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
port == PORT_C)
- enabled = intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE;
+ enabled = intel_de_read(display, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE;
/* Try command mode if video mode not enabled */
if (!enabled) {
- u32 tmp = intel_de_read(dev_priv,
- MIPI_DSI_FUNC_PRG(port));
+ u32 tmp = intel_de_read(display,
+ MIPI_DSI_FUNC_PRG(display, port));
enabled = tmp & CMD_MODE_DATA_WIDTH_MASK;
}
if (!enabled)
continue;
- if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY))
+ if (!(intel_de_read(display, MIPI_DEVICE_READY(display, port)) & DEVICE_READY))
continue;
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
- u32 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ u32 tmp = intel_de_read(display, MIPI_CTRL(display, port));
tmp &= BXT_PIPE_SELECT_MASK;
tmp >>= BXT_PIPE_SELECT_SHIFT;
- if (drm_WARN_ON(&dev_priv->drm, tmp > PIPE_C))
+ if (drm_WARN_ON(display->drm, tmp > PIPE_C))
continue;
*pipe = tmp;
@@ -1010,8 +1013,7 @@ out_put_power:
static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
struct drm_display_mode *adjusted_mode_sw;
@@ -1033,11 +1035,11 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
* encoder->get_hw_state() returns true.
*/
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE)
+ if (intel_de_read(display, BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE)
break;
}
- fmt = intel_de_read(dev_priv, MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK;
+ fmt = intel_de_read(display, MIPI_DSI_FUNC_PRG(display, port)) & VID_MODE_FORMAT_MASK;
bpp = mipi_dsi_pixel_format_to_bpp(
pixel_format_from_register_bits(fmt));
@@ -1049,24 +1051,24 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
/* In terms of pixels */
adjusted_mode->crtc_hdisplay =
- intel_de_read(dev_priv,
+ intel_de_read(display,
BXT_MIPI_TRANS_HACTIVE(port));
adjusted_mode->crtc_vdisplay =
- intel_de_read(dev_priv,
+ intel_de_read(display,
BXT_MIPI_TRANS_VACTIVE(port));
adjusted_mode->crtc_vtotal =
- intel_de_read(dev_priv,
+ intel_de_read(display,
BXT_MIPI_TRANS_VTOTAL(port));
hactive = adjusted_mode->crtc_hdisplay;
- hfp = intel_de_read(dev_priv, MIPI_HFP_COUNT(port));
+ hfp = intel_de_read(display, MIPI_HFP_COUNT(display, port));
/*
* Meaningful for video mode non-burst sync pulse mode only,
* can be zero for non-burst sync events and burst modes
*/
- hsync = intel_de_read(dev_priv, MIPI_HSYNC_PADDING_COUNT(port));
- hbp = intel_de_read(dev_priv, MIPI_HBP_COUNT(port));
+ hsync = intel_de_read(display, MIPI_HSYNC_PADDING_COUNT(display, port));
+ hbp = intel_de_read(display, MIPI_HBP_COUNT(display, port));
/* harizontal values are in terms of high speed byte clock */
hfp = pixels_from_txbyteclkhs(hfp, bpp, lane_count,
@@ -1083,8 +1085,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
}
/* vertical values are in terms of lines */
- vfp = intel_de_read(dev_priv, MIPI_VFP_COUNT(port));
- vsync = intel_de_read(dev_priv, MIPI_VSYNC_PADDING_COUNT(port));
+ vfp = intel_de_read(display, MIPI_VFP_COUNT(display, port));
+ vsync = intel_de_read(display, MIPI_VSYNC_PADDING_COUNT(display, port));
adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp;
adjusted_mode->crtc_hsync_start = hfp + adjusted_mode->crtc_hdisplay;
@@ -1210,12 +1212,12 @@ static u16 txclkesc(u32 divider, unsigned int us)
}
}
-static void set_dsi_timings(struct drm_encoder *encoder,
+static void set_dsi_timings(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
+ struct intel_display *display = to_intel_display(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
unsigned int lane_count = intel_dsi->lane_count;
@@ -1256,29 +1258,29 @@ static void set_dsi_timings(struct drm_encoder *encoder,
* vactive, as they are calculated per channel basis,
* whereas these values should be based on resolution.
*/
- intel_de_write(dev_priv, BXT_MIPI_TRANS_HACTIVE(port),
+ intel_de_write(display, BXT_MIPI_TRANS_HACTIVE(port),
adjusted_mode->crtc_hdisplay);
- intel_de_write(dev_priv, BXT_MIPI_TRANS_VACTIVE(port),
+ intel_de_write(display, BXT_MIPI_TRANS_VACTIVE(port),
adjusted_mode->crtc_vdisplay);
- intel_de_write(dev_priv, BXT_MIPI_TRANS_VTOTAL(port),
+ intel_de_write(display, BXT_MIPI_TRANS_VTOTAL(port),
adjusted_mode->crtc_vtotal);
}
- intel_de_write(dev_priv, MIPI_HACTIVE_AREA_COUNT(port),
+ intel_de_write(display, MIPI_HACTIVE_AREA_COUNT(display, port),
hactive);
- intel_de_write(dev_priv, MIPI_HFP_COUNT(port), hfp);
+ intel_de_write(display, MIPI_HFP_COUNT(display, port), hfp);
/* meaningful for video mode non-burst sync pulse mode only,
* can be zero for non-burst sync events and burst modes */
- intel_de_write(dev_priv, MIPI_HSYNC_PADDING_COUNT(port),
+ intel_de_write(display, MIPI_HSYNC_PADDING_COUNT(display, port),
hsync);
- intel_de_write(dev_priv, MIPI_HBP_COUNT(port), hbp);
+ intel_de_write(display, MIPI_HBP_COUNT(display, port), hbp);
/* vertical values are in terms of lines */
- intel_de_write(dev_priv, MIPI_VFP_COUNT(port), vfp);
- intel_de_write(dev_priv, MIPI_VSYNC_PADDING_COUNT(port),
+ intel_de_write(display, MIPI_VFP_COUNT(display, port), vfp);
+ intel_de_write(display, MIPI_VSYNC_PADDING_COUNT(display, port),
vsync);
- intel_de_write(dev_priv, MIPI_VBP_COUNT(port), vbp);
+ intel_de_write(display, MIPI_VBP_COUNT(display, port), vbp);
}
}
@@ -1299,21 +1301,20 @@ static u32 pixel_format_to_reg(enum mipi_dsi_pixel_format fmt)
}
}
-static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
+static void intel_dsi_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
enum port port;
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
u32 val, tmp;
u16 mode_hdisplay;
- drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(crtc->pipe));
+ drm_dbg_kms(display->drm, "pipe %c\n", pipe_name(crtc->pipe));
mode_hdisplay = adjusted_mode->crtc_hdisplay;
@@ -1329,31 +1330,31 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
* escape clock divider, 20MHz, shared for A and C.
* device ready must be off when doing this! txclkesc?
*/
- tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
+ tmp = intel_de_read(display, MIPI_CTRL(display, PORT_A));
tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
- intel_de_write(dev_priv, MIPI_CTRL(PORT_A),
+ intel_de_write(display, MIPI_CTRL(display, PORT_A),
tmp | ESCAPE_CLOCK_DIVIDER_1);
/* read request priority is per pipe */
- tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ tmp = intel_de_read(display, MIPI_CTRL(display, port));
tmp &= ~READ_REQUEST_PRIORITY_MASK;
- intel_de_write(dev_priv, MIPI_CTRL(port),
+ intel_de_write(display, MIPI_CTRL(display, port),
tmp | READ_REQUEST_PRIORITY_HIGH);
} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
enum pipe pipe = crtc->pipe;
- intel_de_rmw(dev_priv, MIPI_CTRL(port),
+ intel_de_rmw(display, MIPI_CTRL(display, port),
BXT_PIPE_SELECT_MASK, BXT_PIPE_SELECT(pipe));
}
/* XXX: why here, why like this? handling in irq handler?! */
- intel_de_write(dev_priv, MIPI_INTR_STAT(port), 0xffffffff);
- intel_de_write(dev_priv, MIPI_INTR_EN(port), 0xffffffff);
+ intel_de_write(display, MIPI_INTR_STAT(display, port), 0xffffffff);
+ intel_de_write(display, MIPI_INTR_EN(display, port), 0xffffffff);
- intel_de_write(dev_priv, MIPI_DPHY_PARAM(port),
+ intel_de_write(display, MIPI_DPHY_PARAM(display, port),
intel_dsi->dphy_reg);
- intel_de_write(dev_priv, MIPI_DPI_RESOLUTION(port),
+ intel_de_write(display, MIPI_DPI_RESOLUTION(display, port),
adjusted_mode->crtc_vdisplay << VERTICAL_ADDRESS_SHIFT | mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT);
}
@@ -1381,7 +1382,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
- intel_de_write(dev_priv, MIPI_DSI_FUNC_PRG(port), val);
+ intel_de_write(display, MIPI_DSI_FUNC_PRG(display, port), val);
/* timeouts for recovery. one frame IIUC. if counter expires,
* EOT and stop state. */
@@ -1402,23 +1403,23 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
if (is_vid_mode(intel_dsi) &&
intel_dsi->video_mode == BURST_MODE) {
- intel_de_write(dev_priv, MIPI_HS_TX_TIMEOUT(port),
+ intel_de_write(display, MIPI_HS_TX_TIMEOUT(display, port),
txbyteclkhs(adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1);
} else {
- intel_de_write(dev_priv, MIPI_HS_TX_TIMEOUT(port),
+ intel_de_write(display, MIPI_HS_TX_TIMEOUT(display, port),
txbyteclkhs(adjusted_mode->crtc_vtotal * adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1);
}
- intel_de_write(dev_priv, MIPI_LP_RX_TIMEOUT(port),
+ intel_de_write(display, MIPI_LP_RX_TIMEOUT(display, port),
intel_dsi->lp_rx_timeout);
- intel_de_write(dev_priv, MIPI_TURN_AROUND_TIMEOUT(port),
+ intel_de_write(display, MIPI_TURN_AROUND_TIMEOUT(display, port),
intel_dsi->turn_arnd_val);
- intel_de_write(dev_priv, MIPI_DEVICE_RESET_TIMER(port),
+ intel_de_write(display, MIPI_DEVICE_RESET_TIMER(display, port),
intel_dsi->rst_timer_val);
/* dphy stuff */
/* in terms of low power clock */
- intel_de_write(dev_priv, MIPI_INIT_COUNT(port),
+ intel_de_write(display, MIPI_INIT_COUNT(display, port),
txclkesc(intel_dsi->escape_clk_div, 100));
if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
@@ -1429,16 +1430,16 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
* getting used. So write the other port
* if not in dual link mode.
*/
- intel_de_write(dev_priv,
- MIPI_INIT_COUNT(port == PORT_A ? PORT_C : PORT_A),
+ intel_de_write(display,
+ MIPI_INIT_COUNT(display, port == PORT_A ? PORT_C : PORT_A),
intel_dsi->init_count);
}
/* recovery disables */
- intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), tmp);
+ intel_de_write(display, MIPI_EOT_DISABLE(display, port), tmp);
/* in terms of low power clock */
- intel_de_write(dev_priv, MIPI_INIT_COUNT(port),
+ intel_de_write(display, MIPI_INIT_COUNT(display, port),
intel_dsi->init_count);
/* in terms of txbyteclkhs. actual high to low switch +
@@ -1446,7 +1447,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
*
* XXX: write MIPI_STOP_STATE_STALL?
*/
- intel_de_write(dev_priv, MIPI_HIGH_LOW_SWITCH_COUNT(port),
+ intel_de_write(display, MIPI_HIGH_LOW_SWITCH_COUNT(display, port),
intel_dsi->hs_to_lp_count);
/* XXX: low power clock equivalence in terms of byte clock.
@@ -1455,14 +1456,14 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
* txclkesc time / txbyteclk time * (105 + MIPI_STOP_STATE_STALL
* ) / 105.???
*/
- intel_de_write(dev_priv, MIPI_LP_BYTECLK(port),
+ intel_de_write(display, MIPI_LP_BYTECLK(display, port),
intel_dsi->lp_byte_clk);
if (IS_GEMINILAKE(dev_priv)) {
- intel_de_write(dev_priv, MIPI_TLPX_TIME_COUNT(port),
+ intel_de_write(display, MIPI_TLPX_TIME_COUNT(display, port),
intel_dsi->lp_byte_clk);
/* Shadow of DPHY reg */
- intel_de_write(dev_priv, MIPI_CLK_LANE_TIMING(port),
+ intel_de_write(display, MIPI_CLK_LANE_TIMING(display, port),
intel_dsi->dphy_reg);
}
@@ -1471,10 +1472,10 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
* this register in terms of byte clocks. based on dsi transfer
* rate and the number of lanes configured the time taken to
* transmit 16 long packets in a dsi stream varies. */
- intel_de_write(dev_priv, MIPI_DBI_BW_CTRL(port),
+ intel_de_write(display, MIPI_DBI_BW_CTRL(display, port),
intel_dsi->bw_timer);
- intel_de_write(dev_priv, MIPI_CLK_LANE_SWITCH_TIME_CNT(port),
+ intel_de_write(display, MIPI_CLK_LANE_SWITCH_TIME_CNT(display, port),
intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT | intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
if (is_vid_mode(intel_dsi)) {
@@ -1502,13 +1503,14 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
break;
}
- intel_de_write(dev_priv, MIPI_VIDEO_MODE_FORMAT(port), fmt);
+ intel_de_write(display, MIPI_VIDEO_MODE_FORMAT(display, port), fmt);
}
}
}
static void intel_dsi_unprepare(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
@@ -1518,17 +1520,17 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
/* Panel commands can be sent when clock is in LP11 */
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x0);
+ intel_de_write(display, MIPI_DEVICE_READY(display, port), 0x0);
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
bxt_dsi_reset_clocks(encoder, port);
else
vlv_dsi_reset_clocks(encoder, port);
- intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
+ intel_de_write(display, MIPI_EOT_DISABLE(display, port), CLOCKSTOP);
- intel_de_rmw(dev_priv, MIPI_DSI_FUNC_PRG(port), VID_MODE_FORMAT_MASK, 0);
+ intel_de_rmw(display, MIPI_DSI_FUNC_PRG(display, port), VID_MODE_FORMAT_MASK, 0);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x1);
+ intel_de_write(display, MIPI_DEVICE_READY(display, port), 0x1);
}
}
@@ -1592,8 +1594,7 @@ static void vlv_dsi_add_properties(struct intel_connector *connector)
static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
struct intel_connector *connector = intel_dsi->attached_connector;
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
u32 tlpx_ns, extra_byte_count, tlpx_ui;
@@ -1879,10 +1880,8 @@ static const struct dmi_system_id vlv_dsi_dmi_quirk_table[] = {
void vlv_dsi_init(struct drm_i915_private *dev_priv)
{
struct intel_dsi *intel_dsi;
- struct intel_encoder *intel_encoder;
- struct drm_encoder *encoder;
- struct intel_connector *intel_connector;
- struct drm_connector *connector;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
struct drm_display_mode *current_mode;
const struct dmi_system_id *dmi_id;
enum port port;
@@ -1903,64 +1902,61 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
if (!intel_dsi)
return;
- intel_connector = intel_connector_alloc();
- if (!intel_connector) {
+ connector = intel_connector_alloc();
+ if (!connector) {
kfree(intel_dsi);
return;
}
- intel_encoder = &intel_dsi->base;
- encoder = &intel_encoder->base;
- intel_dsi->attached_connector = intel_connector;
-
- connector = &intel_connector->base;
+ encoder = &intel_dsi->base;
+ intel_dsi->attached_connector = connector;
- drm_encoder_init(&dev_priv->drm, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI,
- "DSI %c", port_name(port));
+ drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_dsi_funcs,
+ DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port));
- intel_encoder->compute_config = intel_dsi_compute_config;
- intel_encoder->pre_enable = intel_dsi_pre_enable;
+ encoder->compute_config = intel_dsi_compute_config;
+ encoder->pre_enable = intel_dsi_pre_enable;
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- intel_encoder->enable = bxt_dsi_enable;
- intel_encoder->disable = intel_dsi_disable;
- intel_encoder->post_disable = intel_dsi_post_disable;
- intel_encoder->get_hw_state = intel_dsi_get_hw_state;
- intel_encoder->get_config = intel_dsi_get_config;
- intel_encoder->update_pipe = intel_backlight_update;
- intel_encoder->shutdown = intel_dsi_shutdown;
+ encoder->enable = bxt_dsi_enable;
+ encoder->disable = intel_dsi_disable;
+ encoder->post_disable = intel_dsi_post_disable;
+ encoder->get_hw_state = intel_dsi_get_hw_state;
+ encoder->get_config = intel_dsi_get_config;
+ encoder->update_pipe = intel_backlight_update;
+ encoder->shutdown = intel_dsi_shutdown;
- intel_connector->get_hw_state = intel_connector_get_hw_state;
+ connector->get_hw_state = intel_connector_get_hw_state;
- intel_encoder->port = port;
- intel_encoder->type = INTEL_OUTPUT_DSI;
- intel_encoder->power_domain = POWER_DOMAIN_PORT_DSI;
- intel_encoder->cloneable = 0;
+ encoder->port = port;
+ encoder->type = INTEL_OUTPUT_DSI;
+ encoder->power_domain = POWER_DOMAIN_PORT_DSI;
+ encoder->cloneable = 0;
/*
* On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
* port C. BXT isn't limited like this.
*/
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- intel_encoder->pipe_mask = ~0;
+ encoder->pipe_mask = ~0;
else if (port == PORT_A)
- intel_encoder->pipe_mask = BIT(PIPE_A);
+ encoder->pipe_mask = BIT(PIPE_A);
else
- intel_encoder->pipe_mask = BIT(PIPE_B);
+ encoder->pipe_mask = BIT(PIPE_B);
intel_dsi->panel_power_off_time = ktime_get_boottime();
- intel_bios_init_panel_late(dev_priv, &intel_connector->panel, NULL, NULL);
+ intel_bios_init_panel_late(dev_priv, &connector->panel, NULL, NULL);
- if (intel_connector->panel.vbt.dsi.config->dual_link)
+ if (connector->panel.vbt.dsi.config->dual_link)
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
else
intel_dsi->ports = BIT(port);
- if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
- intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
+ if (drm_WARN_ON(&dev_priv->drm, connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
+ connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
- if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
- intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
+ if (drm_WARN_ON(&dev_priv->drm, connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
+ connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
/* Create a DSI host (and a device) for each port. */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -1980,7 +1976,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
}
/* Use clock read-back from current hw-state for fastboot */
- current_mode = intel_encoder_current_mode(intel_encoder);
+ current_mode = intel_encoder_current_mode(encoder);
if (current_mode) {
drm_dbg_kms(&dev_priv->drm, "Calculated pclk %d GOP %d\n",
intel_dsi->pclk, current_mode->clock);
@@ -1996,22 +1992,22 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
vlv_dphy_param_init(intel_dsi);
intel_dsi_vbt_gpio_init(intel_dsi,
- intel_dsi_get_hw_state(intel_encoder, &pipe));
+ intel_dsi_get_hw_state(encoder, &pipe));
- drm_connector_init(&dev_priv->drm, connector, &intel_dsi_connector_funcs,
+ drm_connector_init(&dev_priv->drm, &connector->base, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
- drm_connector_helper_add(connector, &intel_dsi_connector_helper_funcs);
+ drm_connector_helper_add(&connector->base, &intel_dsi_connector_helper_funcs);
- connector->display_info.subpixel_order = SubPixelHorizontalRGB; /*XXX*/
+ connector->base.display_info.subpixel_order = SubPixelHorizontalRGB; /*XXX*/
- intel_connector_attach_encoder(intel_connector, intel_encoder);
+ intel_connector_attach_encoder(connector, encoder);
mutex_lock(&dev_priv->drm.mode_config.mutex);
- intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
+ intel_panel_add_vbt_lfp_fixed_mode(connector);
mutex_unlock(&dev_priv->drm.mode_config.mutex);
- if (!intel_panel_preferred_fixed_mode(intel_connector)) {
+ if (!intel_panel_preferred_fixed_mode(connector)) {
drm_dbg_kms(&dev_priv->drm, "no fixed mode\n");
goto err_cleanup_connector;
}
@@ -2024,18 +2020,18 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
quirk_func(intel_dsi);
}
- intel_panel_init(intel_connector, NULL);
+ intel_panel_init(connector, NULL);
- intel_backlight_setup(intel_connector, INVALID_PIPE);
+ intel_backlight_setup(connector, INVALID_PIPE);
- vlv_dsi_add_properties(intel_connector);
+ vlv_dsi_add_properties(connector);
return;
err_cleanup_connector:
- drm_connector_cleanup(&intel_connector->base);
+ drm_connector_cleanup(&connector->base);
err:
- drm_encoder_cleanup(&intel_encoder->base);
+ drm_encoder_cleanup(&encoder->base);
kfree(intel_dsi);
- kfree(intel_connector);
+ kfree(connector);
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index ae0a0b11bae3..70c5a13a3c75 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -365,13 +365,13 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
- u32 temp;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ u32 temp;
- temp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ temp = intel_de_read(display, MIPI_CTRL(display, port));
temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
- intel_de_write(dev_priv, MIPI_CTRL(port),
+ intel_de_write(display, MIPI_CTRL(display, port),
temp | intel_dsi->escape_clk_div << ESCAPE_CLOCK_DIVIDER_SHIFT);
}
@@ -570,24 +570,24 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
+ struct intel_display *display = to_intel_display(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp;
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
/* Clear old configurations */
if (IS_BROXTON(dev_priv)) {
- tmp = intel_de_read(dev_priv, BXT_MIPI_CLOCK_CTL);
+ tmp = intel_de_read(display, BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
- intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp);
+ intel_de_write(display, BXT_MIPI_CLOCK_CTL, tmp);
} else {
- intel_de_rmw(dev_priv, MIPIO_TXESC_CLK_DIV1, GLK_TX_ESC_CLK_DIV1_MASK, 0);
+ intel_de_rmw(display, MIPIO_TXESC_CLK_DIV1, GLK_TX_ESC_CLK_DIV1_MASK, 0);
- intel_de_rmw(dev_priv, MIPIO_TXESC_CLK_DIV2, GLK_TX_ESC_CLK_DIV2_MASK, 0);
+ intel_de_rmw(display, MIPIO_TXESC_CLK_DIV2, GLK_TX_ESC_CLK_DIV2_MASK, 0);
}
- intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
+ intel_de_write(display, MIPI_EOT_DISABLE(display, port), CLOCKSTOP);
}
static void assert_dsi_pll(struct drm_i915_private *i915, bool state)
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_regs.h b/drivers/gpu/drm/i915/display/vlv_dsi_regs.h
index abbe427e462e..c1126d170ec6 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_regs.h
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_regs.h
@@ -11,26 +11,23 @@
#define VLV_MIPI_BASE VLV_DISPLAY_BASE
#define BXT_MIPI_BASE 0x60000
-#define _MIPI_MMIO_BASE(__i915) ((__i915)->display.dsi.mmio_base)
+#define _MIPI_MMIO_BASE(display) ((display)->dsi.mmio_base)
#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */
-#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
+#define _MMIO_MIPI(base, port, a, c) _MMIO((base) + _MIPI_PORT(port, a, c))
/* BXT MIPI mode configure */
-#define _BXT_MIPIA_TRANS_HACTIVE 0x6B0F8
-#define _BXT_MIPIC_TRANS_HACTIVE 0x6B8F8
-#define BXT_MIPI_TRANS_HACTIVE(tc) _MMIO_MIPI(tc, \
- _BXT_MIPIA_TRANS_HACTIVE, _BXT_MIPIC_TRANS_HACTIVE)
+#define _BXT_MIPIA_TRANS_HACTIVE 0xb0f8
+#define _BXT_MIPIC_TRANS_HACTIVE 0xb8f8
+#define BXT_MIPI_TRANS_HACTIVE(tc) _MMIO_MIPI(BXT_MIPI_BASE, tc, _BXT_MIPIA_TRANS_HACTIVE, _BXT_MIPIC_TRANS_HACTIVE)
-#define _BXT_MIPIA_TRANS_VACTIVE 0x6B0FC
-#define _BXT_MIPIC_TRANS_VACTIVE 0x6B8FC
-#define BXT_MIPI_TRANS_VACTIVE(tc) _MMIO_MIPI(tc, \
- _BXT_MIPIA_TRANS_VACTIVE, _BXT_MIPIC_TRANS_VACTIVE)
+#define _BXT_MIPIA_TRANS_VACTIVE 0xb0fc
+#define _BXT_MIPIC_TRANS_VACTIVE 0xb8fc
+#define BXT_MIPI_TRANS_VACTIVE(tc) _MMIO_MIPI(BXT_MIPI_BASE, tc, _BXT_MIPIA_TRANS_VACTIVE, _BXT_MIPIC_TRANS_VACTIVE)
-#define _BXT_MIPIA_TRANS_VTOTAL 0x6B100
-#define _BXT_MIPIC_TRANS_VTOTAL 0x6B900
-#define BXT_MIPI_TRANS_VTOTAL(tc) _MMIO_MIPI(tc, \
- _BXT_MIPIA_TRANS_VTOTAL, _BXT_MIPIC_TRANS_VTOTAL)
+#define _BXT_MIPIA_TRANS_VTOTAL 0xb100
+#define _BXT_MIPIC_TRANS_VTOTAL 0xb900
+#define BXT_MIPI_TRANS_VTOTAL(tc) _MMIO_MIPI(BXT_MIPI_BASE, tc, _BXT_MIPIA_TRANS_VTOTAL, _BXT_MIPIC_TRANS_VTOTAL)
#define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020)
#define STAP_SELECT (1 << 0)
@@ -38,14 +35,14 @@
#define BXT_P_DSI_REGULATOR_TX_CTRL _MMIO(0x160054)
#define HS_IO_CTRL_SELECT (1 << 0)
-#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
-#define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
-#define MIPI_PORT_CTRL(port) _MMIO_MIPI(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL)
+#define _MIPIA_PORT_CTRL 0x61190
+#define _MIPIC_PORT_CTRL 0x61700
+#define VLV_MIPI_PORT_CTRL(port) _MMIO_MIPI(VLV_MIPI_BASE, port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL)
/* BXT port control */
-#define _BXT_MIPIA_PORT_CTRL 0x6B0C0
-#define _BXT_MIPIC_PORT_CTRL 0x6B8C0
-#define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL)
+#define _BXT_MIPIA_PORT_CTRL 0xb0c0
+#define _BXT_MIPIC_PORT_CTRL 0xb8c0
+#define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(BXT_MIPI_BASE, tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL)
#define DPI_ENABLE (1 << 31) /* A + C */
#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
@@ -87,20 +84,17 @@
#define LANE_CONFIGURATION_DUAL_LINK_A (1 << 0)
#define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0)
-#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
-#define _MIPIC_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
-#define MIPI_TEARING_CTRL(port) _MMIO_MIPI(port, _MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL)
+#define _MIPIA_TEARING_CTRL 0x61194
+#define _MIPIC_TEARING_CTRL 0x61704
+#define VLV_MIPI_TEARING_CTRL(port) _MMIO_MIPI(VLV_MIPI_BASE, port, _MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL)
#define TEARING_EFFECT_DELAY_SHIFT 0
#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
-/* XXX: all bits reserved */
-#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0)
-
/* MIPI DSI Controller and D-PHY registers */
-#define _MIPIA_DEVICE_READY (_MIPI_MMIO_BASE(dev_priv) + 0xb000)
-#define _MIPIC_DEVICE_READY (_MIPI_MMIO_BASE(dev_priv) + 0xb800)
-#define MIPI_DEVICE_READY(port) _MMIO_MIPI(port, _MIPIA_DEVICE_READY, _MIPIC_DEVICE_READY)
+#define _MIPIA_DEVICE_READY 0xb000
+#define _MIPIC_DEVICE_READY 0xb800
+#define MIPI_DEVICE_READY(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DEVICE_READY, _MIPIC_DEVICE_READY)
#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
#define ULPS_STATE_MASK (3 << 1)
#define ULPS_STATE_ENTER (2 << 1)
@@ -108,12 +102,12 @@
#define ULPS_STATE_NORMAL_OPERATION (0 << 1)
#define DEVICE_READY (1 << 0)
-#define _MIPIA_INTR_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb004)
-#define _MIPIC_INTR_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb804)
-#define MIPI_INTR_STAT(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT, _MIPIC_INTR_STAT)
-#define _MIPIA_INTR_EN (_MIPI_MMIO_BASE(dev_priv) + 0xb008)
-#define _MIPIC_INTR_EN (_MIPI_MMIO_BASE(dev_priv) + 0xb808)
-#define MIPI_INTR_EN(port) _MMIO_MIPI(port, _MIPIA_INTR_EN, _MIPIC_INTR_EN)
+#define _MIPIA_INTR_STAT 0xb004
+#define _MIPIC_INTR_STAT 0xb804
+#define MIPI_INTR_STAT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_INTR_STAT, _MIPIC_INTR_STAT)
+#define _MIPIA_INTR_EN 0xb008
+#define _MIPIC_INTR_EN 0xb808
+#define MIPI_INTR_EN(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_INTR_EN, _MIPIC_INTR_EN)
#define TEARING_EFFECT (1 << 31)
#define SPL_PKT_SENT_INTERRUPT (1 << 30)
#define GEN_READ_DATA_AVAIL (1 << 29)
@@ -147,9 +141,9 @@
#define RXSOT_SYNC_ERROR (1 << 1)
#define RXSOT_ERROR (1 << 0)
-#define _MIPIA_DSI_FUNC_PRG (_MIPI_MMIO_BASE(dev_priv) + 0xb00c)
-#define _MIPIC_DSI_FUNC_PRG (_MIPI_MMIO_BASE(dev_priv) + 0xb80c)
-#define MIPI_DSI_FUNC_PRG(port) _MMIO_MIPI(port, _MIPIA_DSI_FUNC_PRG, _MIPIC_DSI_FUNC_PRG)
+#define _MIPIA_DSI_FUNC_PRG 0xb00c
+#define _MIPIC_DSI_FUNC_PRG 0xb80c
+#define MIPI_DSI_FUNC_PRG(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DSI_FUNC_PRG, _MIPIC_DSI_FUNC_PRG)
#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
#define CMD_MODE_NOT_SUPPORTED (0 << 13)
#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
@@ -170,77 +164,77 @@
#define DATA_LANES_PRG_REG_SHIFT 0
#define DATA_LANES_PRG_REG_MASK (7 << 0)
-#define _MIPIA_HS_TX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb010)
-#define _MIPIC_HS_TX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb810)
-#define MIPI_HS_TX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_HS_TX_TIMEOUT, _MIPIC_HS_TX_TIMEOUT)
+#define _MIPIA_HS_TX_TIMEOUT 0xb010
+#define _MIPIC_HS_TX_TIMEOUT 0xb810
+#define MIPI_HS_TX_TIMEOUT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HS_TX_TIMEOUT, _MIPIC_HS_TX_TIMEOUT)
#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
-#define _MIPIA_LP_RX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb014)
-#define _MIPIC_LP_RX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb814)
-#define MIPI_LP_RX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_LP_RX_TIMEOUT, _MIPIC_LP_RX_TIMEOUT)
+#define _MIPIA_LP_RX_TIMEOUT 0xb014
+#define _MIPIC_LP_RX_TIMEOUT 0xb814
+#define MIPI_LP_RX_TIMEOUT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_LP_RX_TIMEOUT, _MIPIC_LP_RX_TIMEOUT)
#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
-#define _MIPIA_TURN_AROUND_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb018)
-#define _MIPIC_TURN_AROUND_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb818)
-#define MIPI_TURN_AROUND_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
+#define _MIPIA_TURN_AROUND_TIMEOUT 0xb018
+#define _MIPIC_TURN_AROUND_TIMEOUT 0xb818
+#define MIPI_TURN_AROUND_TIMEOUT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
#define TURN_AROUND_TIMEOUT_MASK 0x3f
-#define _MIPIA_DEVICE_RESET_TIMER (_MIPI_MMIO_BASE(dev_priv) + 0xb01c)
-#define _MIPIC_DEVICE_RESET_TIMER (_MIPI_MMIO_BASE(dev_priv) + 0xb81c)
-#define MIPI_DEVICE_RESET_TIMER(port) _MMIO_MIPI(port, _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
+#define _MIPIA_DEVICE_RESET_TIMER 0xb01c
+#define _MIPIC_DEVICE_RESET_TIMER 0xb81c
+#define MIPI_DEVICE_RESET_TIMER(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
#define DEVICE_RESET_TIMER_MASK 0xffff
-#define _MIPIA_DPI_RESOLUTION (_MIPI_MMIO_BASE(dev_priv) + 0xb020)
-#define _MIPIC_DPI_RESOLUTION (_MIPI_MMIO_BASE(dev_priv) + 0xb820)
-#define MIPI_DPI_RESOLUTION(port) _MMIO_MIPI(port, _MIPIA_DPI_RESOLUTION, _MIPIC_DPI_RESOLUTION)
+#define _MIPIA_DPI_RESOLUTION 0xb020
+#define _MIPIC_DPI_RESOLUTION 0xb820
+#define MIPI_DPI_RESOLUTION(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DPI_RESOLUTION, _MIPIC_DPI_RESOLUTION)
#define VERTICAL_ADDRESS_SHIFT 16
#define VERTICAL_ADDRESS_MASK (0xffff << 16)
#define HORIZONTAL_ADDRESS_SHIFT 0
#define HORIZONTAL_ADDRESS_MASK 0xffff
-#define _MIPIA_DBI_FIFO_THROTTLE (_MIPI_MMIO_BASE(dev_priv) + 0xb024)
-#define _MIPIC_DBI_FIFO_THROTTLE (_MIPI_MMIO_BASE(dev_priv) + 0xb824)
-#define MIPI_DBI_FIFO_THROTTLE(port) _MMIO_MIPI(port, _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
+#define _MIPIA_DBI_FIFO_THROTTLE 0xb024
+#define _MIPIC_DBI_FIFO_THROTTLE 0xb824
+#define MIPI_DBI_FIFO_THROTTLE(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
#define DBI_FIFO_EMPTY_HALF (0 << 0)
#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
/* regs below are bits 15:0 */
-#define _MIPIA_HSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb028)
-#define _MIPIC_HSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb828)
-#define MIPI_HSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
+#define _MIPIA_HSYNC_PADDING_COUNT 0xb028
+#define _MIPIC_HSYNC_PADDING_COUNT 0xb828
+#define MIPI_HSYNC_PADDING_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
-#define _MIPIA_HBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb02c)
-#define _MIPIC_HBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb82c)
-#define MIPI_HBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HBP_COUNT, _MIPIC_HBP_COUNT)
+#define _MIPIA_HBP_COUNT 0xb02c
+#define _MIPIC_HBP_COUNT 0xb82c
+#define MIPI_HBP_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HBP_COUNT, _MIPIC_HBP_COUNT)
-#define _MIPIA_HFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb030)
-#define _MIPIC_HFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb830)
-#define MIPI_HFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HFP_COUNT, _MIPIC_HFP_COUNT)
+#define _MIPIA_HFP_COUNT 0xb030
+#define _MIPIC_HFP_COUNT 0xb830
+#define MIPI_HFP_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HFP_COUNT, _MIPIC_HFP_COUNT)
-#define _MIPIA_HACTIVE_AREA_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb034)
-#define _MIPIC_HACTIVE_AREA_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb834)
-#define MIPI_HACTIVE_AREA_COUNT(port) _MMIO_MIPI(port, _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
+#define _MIPIA_HACTIVE_AREA_COUNT 0xb034
+#define _MIPIC_HACTIVE_AREA_COUNT 0xb834
+#define MIPI_HACTIVE_AREA_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
-#define _MIPIA_VSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb038)
-#define _MIPIC_VSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb838)
-#define MIPI_VSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
+#define _MIPIA_VSYNC_PADDING_COUNT 0xb038
+#define _MIPIC_VSYNC_PADDING_COUNT 0xb838
+#define MIPI_VSYNC_PADDING_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
-#define _MIPIA_VBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb03c)
-#define _MIPIC_VBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb83c)
-#define MIPI_VBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VBP_COUNT, _MIPIC_VBP_COUNT)
+#define _MIPIA_VBP_COUNT 0xb03c
+#define _MIPIC_VBP_COUNT 0xb83c
+#define MIPI_VBP_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_VBP_COUNT, _MIPIC_VBP_COUNT)
-#define _MIPIA_VFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb040)
-#define _MIPIC_VFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb840)
-#define MIPI_VFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VFP_COUNT, _MIPIC_VFP_COUNT)
+#define _MIPIA_VFP_COUNT 0xb040
+#define _MIPIC_VFP_COUNT 0xb840
+#define MIPI_VFP_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_VFP_COUNT, _MIPIC_VFP_COUNT)
-#define _MIPIA_HIGH_LOW_SWITCH_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb044)
-#define _MIPIC_HIGH_LOW_SWITCH_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb844)
-#define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MMIO_MIPI(port, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
+#define _MIPIA_HIGH_LOW_SWITCH_COUNT 0xb044
+#define _MIPIC_HIGH_LOW_SWITCH_COUNT 0xb844
+#define MIPI_HIGH_LOW_SWITCH_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
-#define _MIPIA_DPI_CONTROL (_MIPI_MMIO_BASE(dev_priv) + 0xb048)
-#define _MIPIC_DPI_CONTROL (_MIPI_MMIO_BASE(dev_priv) + 0xb848)
-#define MIPI_DPI_CONTROL(port) _MMIO_MIPI(port, _MIPIA_DPI_CONTROL, _MIPIC_DPI_CONTROL)
+#define _MIPIA_DPI_CONTROL 0xb048
+#define _MIPIC_DPI_CONTROL 0xb848
+#define MIPI_DPI_CONTROL(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DPI_CONTROL, _MIPIC_DPI_CONTROL)
#define DPI_LP_MODE (1 << 6)
#define BACKLIGHT_OFF (1 << 5)
#define BACKLIGHT_ON (1 << 4)
@@ -249,28 +243,27 @@
#define TURN_ON (1 << 1)
#define SHUTDOWN (1 << 0)
-#define _MIPIA_DPI_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb04c)
-#define _MIPIC_DPI_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb84c)
-#define MIPI_DPI_DATA(port) _MMIO_MIPI(port, _MIPIA_DPI_DATA, _MIPIC_DPI_DATA)
+#define _MIPIA_DPI_DATA 0xb04c
+#define _MIPIC_DPI_DATA 0xb84c
+#define MIPI_DPI_DATA(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DPI_DATA, _MIPIC_DPI_DATA)
#define COMMAND_BYTE_SHIFT 0
#define COMMAND_BYTE_MASK (0x3f << 0)
-#define _MIPIA_INIT_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb050)
-#define _MIPIC_INIT_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb850)
-#define MIPI_INIT_COUNT(port) _MMIO_MIPI(port, _MIPIA_INIT_COUNT, _MIPIC_INIT_COUNT)
+#define _MIPIA_INIT_COUNT 0xb050
+#define _MIPIC_INIT_COUNT 0xb850
+#define MIPI_INIT_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_INIT_COUNT, _MIPIC_INIT_COUNT)
#define MASTER_INIT_TIMER_SHIFT 0
#define MASTER_INIT_TIMER_MASK (0xffff << 0)
-#define _MIPIA_MAX_RETURN_PKT_SIZE (_MIPI_MMIO_BASE(dev_priv) + 0xb054)
-#define _MIPIC_MAX_RETURN_PKT_SIZE (_MIPI_MMIO_BASE(dev_priv) + 0xb854)
-#define MIPI_MAX_RETURN_PKT_SIZE(port) _MMIO_MIPI(port, \
- _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE)
+#define _MIPIA_MAX_RETURN_PKT_SIZE 0xb054
+#define _MIPIC_MAX_RETURN_PKT_SIZE 0xb854
+#define MIPI_MAX_RETURN_PKT_SIZE(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE)
#define MAX_RETURN_PKT_SIZE_SHIFT 0
#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
-#define _MIPIA_VIDEO_MODE_FORMAT (_MIPI_MMIO_BASE(dev_priv) + 0xb058)
-#define _MIPIC_VIDEO_MODE_FORMAT (_MIPI_MMIO_BASE(dev_priv) + 0xb858)
-#define MIPI_VIDEO_MODE_FORMAT(port) _MMIO_MIPI(port, _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
+#define _MIPIA_VIDEO_MODE_FORMAT 0xb058
+#define _MIPIC_VIDEO_MODE_FORMAT 0xb858
+#define MIPI_VIDEO_MODE_FORMAT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
#define DISABLE_VIDEO_BTA (1 << 3)
#define IP_TG_CONFIG (1 << 2)
@@ -278,9 +271,9 @@
#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0)
#define VIDEO_MODE_BURST (3 << 0)
-#define _MIPIA_EOT_DISABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb05c)
-#define _MIPIC_EOT_DISABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb85c)
-#define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE)
+#define _MIPIA_EOT_DISABLE 0xb05c
+#define _MIPIC_EOT_DISABLE 0xb85c
+#define MIPI_EOT_DISABLE(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE)
#define BXT_DEFEATURE_DPI_FIFO_CTR (1 << 9)
#define BXT_DPHY_DEFEATURE_EN (1 << 8)
#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
@@ -292,36 +285,36 @@
#define CLOCKSTOP (1 << 1)
#define EOT_DISABLE (1 << 0)
-#define _MIPIA_LP_BYTECLK (_MIPI_MMIO_BASE(dev_priv) + 0xb060)
-#define _MIPIC_LP_BYTECLK (_MIPI_MMIO_BASE(dev_priv) + 0xb860)
-#define MIPI_LP_BYTECLK(port) _MMIO_MIPI(port, _MIPIA_LP_BYTECLK, _MIPIC_LP_BYTECLK)
+#define _MIPIA_LP_BYTECLK 0xb060
+#define _MIPIC_LP_BYTECLK 0xb860
+#define MIPI_LP_BYTECLK(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_LP_BYTECLK, _MIPIC_LP_BYTECLK)
#define LP_BYTECLK_SHIFT 0
#define LP_BYTECLK_MASK (0xffff << 0)
-#define _MIPIA_TLPX_TIME_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb0a4)
-#define _MIPIC_TLPX_TIME_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb8a4)
-#define MIPI_TLPX_TIME_COUNT(port) _MMIO_MIPI(port, _MIPIA_TLPX_TIME_COUNT, _MIPIC_TLPX_TIME_COUNT)
+#define _MIPIA_TLPX_TIME_COUNT 0xb0a4
+#define _MIPIC_TLPX_TIME_COUNT 0xb8a4
+#define MIPI_TLPX_TIME_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_TLPX_TIME_COUNT, _MIPIC_TLPX_TIME_COUNT)
-#define _MIPIA_CLK_LANE_TIMING (_MIPI_MMIO_BASE(dev_priv) + 0xb098)
-#define _MIPIC_CLK_LANE_TIMING (_MIPI_MMIO_BASE(dev_priv) + 0xb898)
-#define MIPI_CLK_LANE_TIMING(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_TIMING, _MIPIC_CLK_LANE_TIMING)
+#define _MIPIA_CLK_LANE_TIMING 0xb098
+#define _MIPIC_CLK_LANE_TIMING 0xb898
+#define MIPI_CLK_LANE_TIMING(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_CLK_LANE_TIMING, _MIPIC_CLK_LANE_TIMING)
/* bits 31:0 */
-#define _MIPIA_LP_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb064)
-#define _MIPIC_LP_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb864)
-#define MIPI_LP_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_DATA, _MIPIC_LP_GEN_DATA)
+#define _MIPIA_LP_GEN_DATA 0xb064
+#define _MIPIC_LP_GEN_DATA 0xb864
+#define MIPI_LP_GEN_DATA(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_LP_GEN_DATA, _MIPIC_LP_GEN_DATA)
/* bits 31:0 */
-#define _MIPIA_HS_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb068)
-#define _MIPIC_HS_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb868)
-#define MIPI_HS_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_DATA, _MIPIC_HS_GEN_DATA)
-
-#define _MIPIA_LP_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb06c)
-#define _MIPIC_LP_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb86c)
-#define MIPI_LP_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_CTRL, _MIPIC_LP_GEN_CTRL)
-#define _MIPIA_HS_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb070)
-#define _MIPIC_HS_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb870)
-#define MIPI_HS_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_CTRL, _MIPIC_HS_GEN_CTRL)
+#define _MIPIA_HS_GEN_DATA 0xb068
+#define _MIPIC_HS_GEN_DATA 0xb868
+#define MIPI_HS_GEN_DATA(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HS_GEN_DATA, _MIPIC_HS_GEN_DATA)
+
+#define _MIPIA_LP_GEN_CTRL 0xb06c
+#define _MIPIC_LP_GEN_CTRL 0xb86c
+#define MIPI_LP_GEN_CTRL(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_LP_GEN_CTRL, _MIPIC_LP_GEN_CTRL)
+#define _MIPIA_HS_GEN_CTRL 0xb070
+#define _MIPIC_HS_GEN_CTRL 0xb870
+#define MIPI_HS_GEN_CTRL(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HS_GEN_CTRL, _MIPIC_HS_GEN_CTRL)
#define LONG_PACKET_WORD_COUNT_SHIFT 8
#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
#define SHORT_PACKET_PARAM_SHIFT 8
@@ -332,9 +325,9 @@
#define DATA_TYPE_MASK (0x3f << 0)
/* data type values, see include/video/mipi_display.h */
-#define _MIPIA_GEN_FIFO_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb074)
-#define _MIPIC_GEN_FIFO_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb874)
-#define MIPI_GEN_FIFO_STAT(port) _MMIO_MIPI(port, _MIPIA_GEN_FIFO_STAT, _MIPIC_GEN_FIFO_STAT)
+#define _MIPIA_GEN_FIFO_STAT 0xb074
+#define _MIPIC_GEN_FIFO_STAT 0xb874
+#define MIPI_GEN_FIFO_STAT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_GEN_FIFO_STAT, _MIPIC_GEN_FIFO_STAT)
#define DPI_FIFO_EMPTY (1 << 28)
#define DBI_FIFO_EMPTY (1 << 27)
#define LP_CTRL_FIFO_EMPTY (1 << 26)
@@ -350,16 +343,16 @@
#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
#define HS_DATA_FIFO_FULL (1 << 0)
-#define _MIPIA_HS_LS_DBI_ENABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb078)
-#define _MIPIC_HS_LS_DBI_ENABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb878)
-#define MIPI_HS_LP_DBI_ENABLE(port) _MMIO_MIPI(port, _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
+#define _MIPIA_HS_LS_DBI_ENABLE 0xb078
+#define _MIPIC_HS_LS_DBI_ENABLE 0xb878
+#define MIPI_HS_LP_DBI_ENABLE(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
#define DBI_HS_LP_MODE_MASK (1 << 0)
#define DBI_LP_MODE (1 << 0)
#define DBI_HS_MODE (0 << 0)
-#define _MIPIA_DPHY_PARAM (_MIPI_MMIO_BASE(dev_priv) + 0xb080)
-#define _MIPIC_DPHY_PARAM (_MIPI_MMIO_BASE(dev_priv) + 0xb880)
-#define MIPI_DPHY_PARAM(port) _MMIO_MIPI(port, _MIPIA_DPHY_PARAM, _MIPIC_DPHY_PARAM)
+#define _MIPIA_DPHY_PARAM 0xb080
+#define _MIPIC_DPHY_PARAM 0xb880
+#define MIPI_DPHY_PARAM(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DPHY_PARAM, _MIPIC_DPHY_PARAM)
#define EXIT_ZERO_COUNT_SHIFT 24
#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
#define TRAIL_COUNT_SHIFT 16
@@ -369,34 +362,34 @@
#define PREPARE_COUNT_SHIFT 0
#define PREPARE_COUNT_MASK (0x3f << 0)
-#define _MIPIA_DBI_BW_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb084)
-#define _MIPIC_DBI_BW_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb884)
-#define MIPI_DBI_BW_CTRL(port) _MMIO_MIPI(port, _MIPIA_DBI_BW_CTRL, _MIPIC_DBI_BW_CTRL)
+#define _MIPIA_DBI_BW_CTRL 0xb084
+#define _MIPIC_DBI_BW_CTRL 0xb884
+#define MIPI_DBI_BW_CTRL(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DBI_BW_CTRL, _MIPIC_DBI_BW_CTRL)
-#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (_MIPI_MMIO_BASE(dev_priv) + 0xb088)
-#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (_MIPI_MMIO_BASE(dev_priv) + 0xb888)
-#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
+#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT 0xb088
+#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT 0xb888
+#define MIPI_CLK_LANE_SWITCH_TIME_CNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
#define LP_HS_SSW_CNT_SHIFT 16
#define LP_HS_SSW_CNT_MASK (0xffff << 16)
#define HS_LP_PWR_SW_CNT_SHIFT 0
#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
-#define _MIPIA_STOP_STATE_STALL (_MIPI_MMIO_BASE(dev_priv) + 0xb08c)
-#define _MIPIC_STOP_STATE_STALL (_MIPI_MMIO_BASE(dev_priv) + 0xb88c)
-#define MIPI_STOP_STATE_STALL(port) _MMIO_MIPI(port, _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
+#define _MIPIA_STOP_STATE_STALL 0xb08c
+#define _MIPIC_STOP_STATE_STALL 0xb88c
+#define MIPI_STOP_STATE_STALL(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
#define STOP_STATE_STALL_COUNTER_SHIFT 0
#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
-#define _MIPIA_INTR_STAT_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb090)
-#define _MIPIC_INTR_STAT_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb890)
-#define MIPI_INTR_STAT_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
-#define _MIPIA_INTR_EN_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb094)
-#define _MIPIC_INTR_EN_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb894)
-#define MIPI_INTR_EN_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_EN_REG_1, _MIPIC_INTR_EN_REG_1)
+#define _MIPIA_INTR_STAT_REG_1 0xb090
+#define _MIPIC_INTR_STAT_REG_1 0xb890
+#define MIPI_INTR_STAT_REG_1(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
+#define _MIPIA_INTR_EN_REG_1 0xb094
+#define _MIPIC_INTR_EN_REG_1 0xb894
+#define MIPI_INTR_EN_REG_1(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_INTR_EN_REG_1, _MIPIC_INTR_EN_REG_1)
#define RX_CONTENTION_DETECTED (1 << 0)
/* XXX: only pipe A ?!? */
-#define MIPIA_DBI_TYPEC_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb100)
+#define MIPIA_DBI_TYPEC_CTRL(display) (_MIPI_MMIO_BASE(display) + 0xb100)
#define DBI_TYPEC_ENABLE (1 << 31)
#define DBI_TYPEC_WIP (1 << 30)
#define DBI_TYPEC_OPTION_SHIFT 28
@@ -409,9 +402,9 @@
/* MIPI adapter registers */
-#define _MIPIA_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb104)
-#define _MIPIC_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb904)
-#define MIPI_CTRL(port) _MMIO_MIPI(port, _MIPIA_CTRL, _MIPIC_CTRL)
+#define _MIPIA_CTRL 0xb104
+#define _MIPIC_CTRL 0xb904
+#define MIPI_CTRL(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_CTRL, _MIPIC_CTRL)
#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
@@ -442,41 +435,41 @@
#define GLK_MIPIIO_PORT_POWERED (1 << 1) /* RO */
#define GLK_MIPIIO_ENABLE (1 << 0)
-#define _MIPIA_DATA_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb108)
-#define _MIPIC_DATA_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb908)
-#define MIPI_DATA_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_DATA_ADDRESS, _MIPIC_DATA_ADDRESS)
+#define _MIPIA_DATA_ADDRESS 0xb108
+#define _MIPIC_DATA_ADDRESS 0xb908
+#define MIPI_DATA_ADDRESS(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DATA_ADDRESS, _MIPIC_DATA_ADDRESS)
#define DATA_MEM_ADDRESS_SHIFT 5
#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define DATA_VALID (1 << 0)
-#define _MIPIA_DATA_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb10c)
-#define _MIPIC_DATA_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb90c)
-#define MIPI_DATA_LENGTH(port) _MMIO_MIPI(port, _MIPIA_DATA_LENGTH, _MIPIC_DATA_LENGTH)
+#define _MIPIA_DATA_LENGTH 0xb10c
+#define _MIPIC_DATA_LENGTH 0xb90c
+#define MIPI_DATA_LENGTH(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DATA_LENGTH, _MIPIC_DATA_LENGTH)
#define DATA_LENGTH_SHIFT 0
#define DATA_LENGTH_MASK (0xfffff << 0)
-#define _MIPIA_COMMAND_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb110)
-#define _MIPIC_COMMAND_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb910)
-#define MIPI_COMMAND_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
+#define _MIPIA_COMMAND_ADDRESS 0xb110
+#define _MIPIC_COMMAND_ADDRESS 0xb910
+#define MIPI_COMMAND_ADDRESS(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
#define COMMAND_MEM_ADDRESS_SHIFT 5
#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define AUTO_PWG_ENABLE (1 << 2)
#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1)
#define COMMAND_VALID (1 << 0)
-#define _MIPIA_COMMAND_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb114)
-#define _MIPIC_COMMAND_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb914)
-#define MIPI_COMMAND_LENGTH(port) _MMIO_MIPI(port, _MIPIA_COMMAND_LENGTH, _MIPIC_COMMAND_LENGTH)
+#define _MIPIA_COMMAND_LENGTH 0xb114
+#define _MIPIC_COMMAND_LENGTH 0xb914
+#define MIPI_COMMAND_LENGTH(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_COMMAND_LENGTH, _MIPIC_COMMAND_LENGTH)
#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
-#define _MIPIA_READ_DATA_RETURN0 (_MIPI_MMIO_BASE(dev_priv) + 0xb118)
-#define _MIPIC_READ_DATA_RETURN0 (_MIPI_MMIO_BASE(dev_priv) + 0xb918)
-#define MIPI_READ_DATA_RETURN(port, n) _MMIO(_MIPI(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
+#define _MIPIA_READ_DATA_RETURN0 0xb118
+#define _MIPIC_READ_DATA_RETURN0 0xb918
+#define MIPI_READ_DATA_RETURN(display, port, n) _MMIO_MIPI(_MIPI_MMIO_BASE(display) + 4 * (n), port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) /* n: 0...7 */
-#define _MIPIA_READ_DATA_VALID (_MIPI_MMIO_BASE(dev_priv) + 0xb138)
-#define _MIPIC_READ_DATA_VALID (_MIPI_MMIO_BASE(dev_priv) + 0xb938)
-#define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
+#define _MIPIA_READ_DATA_VALID 0xb138
+#define _MIPIC_READ_DATA_VALID 0xb938
+#define MIPI_READ_DATA_VALID(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
#define READ_DATA_VALID(n) (1 << (n))
#endif /* __VLV_DSI_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index dcbfe32fd30c..81f65cab1330 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -879,6 +879,7 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
struct i915_gem_proto_context *pc,
struct drm_i915_gem_context_param *args)
{
+ struct drm_i915_private *i915 = fpriv->i915;
int ret = 0;
switch (args->param) {
@@ -904,6 +905,13 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
pc->user_flags &= ~BIT(UCONTEXT_BANNABLE);
break;
+ case I915_CONTEXT_PARAM_LOW_LATENCY:
+ if (intel_uc_uses_guc_submission(&to_gt(i915)->uc))
+ pc->user_flags |= BIT(UCONTEXT_LOW_LATENCY);
+ else
+ ret = -EINVAL;
+ break;
+
case I915_CONTEXT_PARAM_RECOVERABLE:
if (args->size)
ret = -EINVAL;
@@ -992,6 +1000,9 @@ static int intel_context_set_gem(struct intel_context *ce,
if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS))
ret = intel_context_reconfigure_sseu(ce, sseu);
+ if (test_bit(UCONTEXT_LOW_LATENCY, &ctx->user_flags))
+ __set_bit(CONTEXT_LOW_LATENCY, &ce->flags);
+
return ret;
}
@@ -1630,6 +1641,9 @@ i915_gem_create_context(struct drm_i915_private *i915,
if (vm)
ctx->vm = vm;
+ /* Assign early so intel_context_set_gem can access these flags */
+ ctx->user_flags = pc->user_flags;
+
mutex_init(&ctx->engines_mutex);
if (pc->num_user_engines >= 0) {
i915_gem_context_set_user_engines(ctx);
@@ -1652,8 +1666,6 @@ i915_gem_create_context(struct drm_i915_private *i915,
* is no remap info, it will be a NOP. */
ctx->remap_slice = ALL_L3_SLICES(i915);
- ctx->user_flags = pc->user_flags;
-
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 03bc7f9d191b..b6d97da63d1f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -338,6 +338,7 @@ struct i915_gem_context {
#define UCONTEXT_BANNABLE 2
#define UCONTEXT_RECOVERABLE 3
#define UCONTEXT_PERSISTENCE 4
+#define UCONTEXT_LOW_LATENCY 5
/**
* @flags: small set of booleans
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index d3a771afb083..42619fc05de4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -255,7 +255,6 @@ struct i915_execbuffer {
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
intel_wakeref_t wakeref;
- intel_wakeref_t wakeref_gt0;
/** our requests to build */
struct i915_request *requests[MAX_ENGINE_INSTANCE + 1];
@@ -2457,7 +2456,7 @@ static int eb_submit(struct i915_execbuffer *eb)
* The engine index is returned.
*/
static unsigned int
-gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
+gen8_dispatch_bsd_engine(struct drm_i915_private *i915,
struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -2465,7 +2464,7 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
/* Check whether the file_priv has already selected one ring. */
if ((int)file_priv->bsd_engine < 0)
file_priv->bsd_engine =
- get_random_u32_below(dev_priv->engine_uabi_class_count[I915_ENGINE_CLASS_VIDEO]);
+ get_random_u32_below(i915->engine_uabi_class_count[I915_ENGINE_CLASS_VIDEO]);
return file_priv->bsd_engine;
}
@@ -2686,7 +2685,6 @@ static int
eb_select_engine(struct i915_execbuffer *eb)
{
struct intel_context *ce, *child;
- struct intel_gt *gt;
unsigned int idx;
int err;
@@ -2710,17 +2708,10 @@ eb_select_engine(struct i915_execbuffer *eb)
}
}
eb->num_batches = ce->parallel.number_children + 1;
- gt = ce->engine->gt;
for_each_child(ce, child)
intel_context_get(child);
eb->wakeref = intel_gt_pm_get(ce->engine->gt);
- /*
- * Keep GT0 active on MTL so that i915_vma_parked() doesn't
- * free VMAs while execbuf ioctl is validating VMAs.
- */
- if (gt->info.id)
- eb->wakeref_gt0 = intel_gt_pm_get(to_gt(gt->i915));
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
err = intel_context_alloc_state(ce);
@@ -2759,9 +2750,6 @@ eb_select_engine(struct i915_execbuffer *eb)
return err;
err:
- if (gt->info.id)
- intel_gt_pm_put(to_gt(gt->i915), eb->wakeref_gt0);
-
intel_gt_pm_put(ce->engine->gt, eb->wakeref);
for_each_child(ce, child)
intel_context_put(child);
@@ -2775,12 +2763,6 @@ eb_put_engine(struct i915_execbuffer *eb)
struct intel_context *child;
i915_vm_put(eb->context->vm);
- /*
- * This works in conjunction with eb_select_engine() to prevent
- * i915_vma_parked() from interfering while execbuf validates vmas.
- */
- if (eb->gt->info.id)
- intel_gt_pm_put(to_gt(eb->gt->i915), eb->wakeref_gt0);
intel_gt_pm_put(eb->context->engine->gt, eb->wakeref);
for_each_child(eb->context, child)
intel_context_put(child);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 0c5cdab278b6..1495b6074492 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -386,7 +386,7 @@ struct drm_i915_gem_object {
* and kernel mode driver for caching policy control after GEN12.
* In the meantime platform specific tables are created to translate
* i915_cache_level into pat index, for more details check the macros
- * defined i915/i915_pci.c, e.g. PVC_CACHELEVEL.
+ * defined i915/i915_pci.c, e.g. TGL_CACHELEVEL.
* For backward compatibility, this field contains values exactly match
* the entries of enum i915_cache_level for pre-GEN12 platforms (See
* LEGACY_CACHELEVEL), so that the PTE encode functions for these
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 0ba955611dfb..8780aa243105 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -5,6 +5,7 @@
*/
#include <drm/drm_cache.h>
+#include <linux/vmalloc.h>
#include "gt/intel_gt.h"
#include "gt/intel_tlb.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 38b72d86560f..c5e1c718a6d2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -654,7 +654,7 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915,
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
-i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
+i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
const void *data, resource_size_t size)
{
struct drm_i915_gem_object *obj;
@@ -663,8 +663,8 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
resource_size_t offset;
int err;
- GEM_WARN_ON(IS_DGFX(dev_priv));
- obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
+ GEM_WARN_ON(IS_DGFX(i915));
+ obj = i915_gem_object_create_shmem(i915, round_up(size, PAGE_SIZE));
if (IS_ERR(obj))
return obj;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index 258381d1c054..dfe0db8bb1b9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -14,14 +14,14 @@ struct drm_i915_gem_object;
#define i915_stolen_fb drm_mm_node
-int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
struct drm_mm_node *node, u64 size,
unsigned alignment);
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
+int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
struct drm_mm_node *node, u64 size,
unsigned alignment, u64 start,
u64 end);
-void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
struct drm_mm_node *node);
struct intel_memory_region *
i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
@@ -31,7 +31,7 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
u16 instance);
struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+i915_gem_object_create_stolen(struct drm_i915_private *i915,
resource_size_t size);
bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index a049ca0b7980..d9eb84c1d2f1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -343,12 +343,12 @@ int
i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_set_tiling *args = data;
struct drm_i915_gem_object *obj;
int err;
- if (!to_gt(dev_priv)->ggtt->num_fences)
+ if (!to_gt(i915)->ggtt->num_fences)
return -EOPNOTSUPP;
obj = i915_gem_object_lookup(file, args->handle);
@@ -374,9 +374,9 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
args->stride = 0;
} else {
if (args->tiling_mode == I915_TILING_X)
- args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x;
+ args->swizzle_mode = to_gt(i915)->ggtt->bit_6_swizzle_x;
else
- args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y;
+ args->swizzle_mode = to_gt(i915)->ggtt->bit_6_swizzle_y;
/* Hide bit 17 swizzling from the user. This prevents old Mesa
* from aborting the application on sw fallbacks to bit 17,
@@ -427,11 +427,11 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_get_tiling *args = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_object *obj;
int err = -ENOENT;
- if (!to_gt(dev_priv)->ggtt->num_fences)
+ if (!to_gt(i915)->ggtt->num_fences)
return -EOPNOTSUPP;
rcu_read_lock();
@@ -447,10 +447,10 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
switch (args->tiling_mode) {
case I915_TILING_X:
- args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x;
+ args->swizzle_mode = to_gt(i915)->ggtt->bit_6_swizzle_x;
break;
case I915_TILING_Y:
- args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y;
+ args->swizzle_mode = to_gt(i915)->ggtt->bit_6_swizzle_y;
break;
default:
case I915_TILING_NONE:
@@ -459,7 +459,7 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
}
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
- if (dev_priv->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
+ if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
else
args->phys_swizzle_mode = args->swizzle_mode;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 61abfb505766..09b68713ab32 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -463,13 +463,13 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
struct drm_file *file)
{
static struct lock_class_key __maybe_unused lock_class;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_userptr *args = data;
struct drm_i915_gem_object __maybe_unused *obj;
int __maybe_unused ret;
u32 __maybe_unused handle;
- if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
+ if (!HAS_LLC(i915) && !HAS_SNOOP(i915)) {
/* We cannot support coherent userptr objects on hw without
* LLC and broken snooping.
*/
@@ -501,7 +501,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
* On almost all of the older hw, we cannot tell the GPU that
* a page is readonly.
*/
- if (!to_gt(dev_priv)->vm->has_read_only)
+ if (!to_gt(i915)->vm->has_read_only)
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 3ff3d8889c6c..84d41e6ccf05 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -713,7 +713,7 @@ static int igt_ppgtt_huge_fill(void *arg)
{
struct drm_i915_private *i915 = arg;
unsigned int supported = RUNTIME_INFO(i915)->page_sizes;
- bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50);
+ bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55);
struct i915_address_space *vm;
struct i915_gem_context *ctx;
unsigned long max_pages;
@@ -857,7 +857,7 @@ out:
static int igt_ppgtt_64K(void *arg)
{
struct drm_i915_private *i915 = arg;
- bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50);
+ bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55);
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
struct i915_gem_context *ctx;
@@ -1969,19 +1969,19 @@ int i915_gem_huge_page_mock_selftests(void)
SUBTEST(igt_mock_memory_region_huge_pages),
SUBTEST(igt_mock_ppgtt_misaligned_dma),
};
- struct drm_i915_private *dev_priv;
+ struct drm_i915_private *i915;
struct i915_ppgtt *ppgtt;
int err;
- dev_priv = mock_gem_device();
- if (!dev_priv)
+ i915 = mock_gem_device();
+ if (!i915)
return -ENOMEM;
/* Pretend to be a device which supports the 48b PPGTT */
- RUNTIME_INFO(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
- RUNTIME_INFO(dev_priv)->ppgtt_size = 48;
+ RUNTIME_INFO(i915)->ppgtt_type = INTEL_PPGTT_FULL;
+ RUNTIME_INFO(i915)->ppgtt_size = 48;
- ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
+ ppgtt = i915_ppgtt_create(to_gt(i915), 0);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
@@ -2005,7 +2005,7 @@ int i915_gem_huge_page_mock_selftests(void)
out_put:
i915_vm_put(&ppgtt->vm);
out_unlock:
- mock_destroy_device(dev_priv);
+ mock_destroy_device(i915);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 10a7847f1b04..bac15196b4d2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -117,7 +117,7 @@ static bool fastblit_supports_x_tiling(const struct drm_i915_private *i915)
if (gen < 12)
return true;
- if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
return false;
return HAS_DISPLAY(i915);
@@ -166,7 +166,7 @@ static int prepare_blit(const struct tiled_blits *t,
src_pitch = t->width; /* in dwords */
if (src->tiling == CLIENT_TILING_Y) {
src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(YMAJOR);
- if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 55))
src_4t = XY_FAST_COPY_BLT_D1_SRC_TILE4;
} else if (src->tiling == CLIENT_TILING_X) {
src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(TILE_X);
@@ -177,7 +177,7 @@ static int prepare_blit(const struct tiled_blits *t,
dst_pitch = t->width; /* in dwords */
if (dst->tiling == CLIENT_TILING_Y) {
dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(YMAJOR);
- if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 55))
dst_4t = XY_FAST_COPY_BLT_D1_DST_TILE4;
} else if (dst->tiling == CLIENT_TILING_X) {
dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(TILE_X);
@@ -365,7 +365,7 @@ static u64 tiled_offset(const struct intel_gt *gt,
v += x;
swizzle = gt->ggtt->bit_6_swizzle_x;
- } else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) {
+ } else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55)) {
/* Y-major tiling layout is Tile4 for Xe_HP and beyond */
v = linear_x_y_to_ftiled_pos(x_pos, y_pos, stride, 32);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index d684a70f2c04..65a931ea80e9 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -7,6 +7,7 @@
#include "i915_drv.h"
#include "i915_selftest.h"
#include "gem/i915_gem_context.h"
+#include "gt/intel_gt.h"
#include "mock_context.h"
#include "mock_dmabuf.h"
@@ -155,6 +156,7 @@ static int verify_access(struct drm_i915_private *i915,
struct file *file;
u32 *vaddr;
int err = 0, i;
+ unsigned int mode;
file = mock_file(i915);
if (IS_ERR(file))
@@ -194,7 +196,8 @@ static int verify_access(struct drm_i915_private *i915,
if (err)
goto out_file;
- vaddr = i915_gem_object_pin_map_unlocked(native_obj, I915_MAP_WB);
+ mode = intel_gt_coherent_map_type(to_gt(i915), native_obj, true);
+ vaddr = i915_gem_object_pin_map_unlocked(native_obj, mode);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto out_file;
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
index b2a5882b8f81..075657018739 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
@@ -4,6 +4,7 @@
* Copyright © 2016 Intel Corporation
*/
+#include <linux/vmalloc.h>
#include "mock_dmabuf.h"
static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment,
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index e1bf13e3d307..e9f65f27b53f 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -189,9 +189,6 @@ static bool gen12_needs_ccs_aux_inv(struct intel_engine_cs *engine)
{
i915_reg_t reg = gen12_get_aux_inv_reg(engine);
- if (IS_PONTEVECCHIO(engine->i915))
- return false;
-
/*
* So far platforms supported by i915 having flat ccs do not require
* AUX invalidation. Check also whether the engine requires it.
@@ -743,21 +740,25 @@ static u32 *gen12_emit_preempt_busywait(struct i915_request *rq, u32 *cs)
}
/* Wa_14014475959:dg2 */
-#define CCS_SEMAPHORE_PPHWSP_OFFSET 0x540
-static u32 ccs_semaphore_offset(struct i915_request *rq)
+/* Wa_16019325821 */
+/* Wa_14019159160 */
+#define HOLD_SWITCHOUT_SEMAPHORE_PPHWSP_OFFSET 0x540
+static u32 hold_switchout_semaphore_offset(struct i915_request *rq)
{
return i915_ggtt_offset(rq->context->state) +
- (LRC_PPHWSP_PN * PAGE_SIZE) + CCS_SEMAPHORE_PPHWSP_OFFSET;
+ (LRC_PPHWSP_PN * PAGE_SIZE) + HOLD_SWITCHOUT_SEMAPHORE_PPHWSP_OFFSET;
}
/* Wa_14014475959:dg2 */
-static u32 *ccs_emit_wa_busywait(struct i915_request *rq, u32 *cs)
+/* Wa_16019325821 */
+/* Wa_14019159160 */
+static u32 *hold_switchout_emit_wa_busywait(struct i915_request *rq, u32 *cs)
{
int i;
*cs++ = MI_ATOMIC_INLINE | MI_ATOMIC_GLOBAL_GTT | MI_ATOMIC_CS_STALL |
MI_ATOMIC_MOVE;
- *cs++ = ccs_semaphore_offset(rq);
+ *cs++ = hold_switchout_semaphore_offset(rq);
*cs++ = 0;
*cs++ = 1;
@@ -773,7 +774,7 @@ static u32 *ccs_emit_wa_busywait(struct i915_request *rq, u32 *cs)
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_EQ_SDD;
*cs++ = 0;
- *cs++ = ccs_semaphore_offset(rq);
+ *cs++ = hold_switchout_semaphore_offset(rq);
*cs++ = 0;
return cs;
@@ -790,8 +791,10 @@ gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
cs = gen12_emit_preempt_busywait(rq, cs);
/* Wa_14014475959:dg2 */
- if (intel_engine_uses_wa_hold_ccs_switchout(rq->engine))
- cs = ccs_emit_wa_busywait(rq, cs);
+ /* Wa_16019325821 */
+ /* Wa_14019159160 */
+ if (intel_engine_uses_wa_hold_switchout(rq->engine))
+ cs = hold_switchout_emit_wa_busywait(rq, cs);
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
@@ -827,7 +830,7 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
cs = gen12_emit_pipe_control(cs, 0,
PIPE_CONTROL_DEPTH_CACHE_FLUSH, 0);
- if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
+ if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
/* Wa_1409600907 */
flags |= PIPE_CONTROL_DEPTH_STALL;
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index fa46d2308b0e..398d60a66410 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -500,11 +500,11 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
}
static void
-xehpsdv_ppgtt_insert_huge(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res,
- struct sgt_dma *iter,
- unsigned int pat_index,
- u32 flags)
+xehp_ppgtt_insert_huge(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ struct sgt_dma *iter,
+ unsigned int pat_index,
+ u32 flags)
{
const gen8_pte_t pte_encode = vm->pte_encode(0, pat_index, flags);
unsigned int rem = sg_dma_len(iter->sg);
@@ -741,8 +741,8 @@ static void gen8_ppgtt_insert(struct i915_address_space *vm,
struct sgt_dma iter = sgt_dma(vma_res);
if (vma_res->bi.page_sizes.sg > I915_GTT_PAGE_SIZE) {
- if (GRAPHICS_VER_FULL(vm->i915) >= IP_VER(12, 50))
- xehpsdv_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags);
+ if (GRAPHICS_VER_FULL(vm->i915) >= IP_VER(12, 55))
+ xehp_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags);
else
gen8_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags);
} else {
@@ -781,11 +781,11 @@ static void gen8_ppgtt_insert_entry(struct i915_address_space *vm,
drm_clflush_virt_range(&vaddr[gen8_pd_index(idx, 0)], sizeof(*vaddr));
}
-static void __xehpsdv_ppgtt_insert_entry_lm(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- unsigned int pat_index,
- u32 flags)
+static void xehp_ppgtt_insert_entry_lm(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ unsigned int pat_index,
+ u32 flags)
{
u64 idx = offset >> GEN8_PTE_SHIFT;
struct i915_page_directory * const pdp =
@@ -810,15 +810,15 @@ static void __xehpsdv_ppgtt_insert_entry_lm(struct i915_address_space *vm,
vaddr[gen8_pd_index(idx, 0) / 16] = vm->pte_encode(addr, pat_index, flags);
}
-static void xehpsdv_ppgtt_insert_entry(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- unsigned int pat_index,
- u32 flags)
+static void xehp_ppgtt_insert_entry(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ unsigned int pat_index,
+ u32 flags)
{
if (flags & PTE_LM)
- return __xehpsdv_ppgtt_insert_entry_lm(vm, addr, offset,
- pat_index, flags);
+ return xehp_ppgtt_insert_entry_lm(vm, addr, offset,
+ pat_index, flags);
return gen8_ppgtt_insert_entry(vm, addr, offset, pat_index, flags);
}
@@ -961,6 +961,9 @@ static int gen8_init_rsvd(struct i915_address_space *vm)
struct i915_vma *vma;
int ret;
+ if (!intel_gt_needs_wa_16018031267(vm->gt))
+ return 0;
+
/* The memory will be used only by GPU. */
obj = i915_gem_object_create_lmem(i915, PAGE_SIZE,
I915_BO_ALLOC_VOLATILE |
@@ -1042,7 +1045,7 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
ppgtt->vm.insert_entries = gen8_ppgtt_insert;
if (HAS_64K_PAGES(gt->i915))
- ppgtt->vm.insert_page = xehpsdv_ppgtt_insert_entry;
+ ppgtt->vm.insert_page = xehp_ppgtt_insert_entry;
else
ppgtt->vm.insert_page = gen8_ppgtt_insert_entry;
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 7eccbd70d89f..ed95a7b57cbb 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -130,6 +130,7 @@ struct intel_context {
#define CONTEXT_PERMA_PIN 11
#define CONTEXT_IS_PARKING 12
#define CONTEXT_EXITING 13
+#define CONTEXT_LOW_LATENCY 14
struct {
u64 timeout_us;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 1ade568ffbfa..5c8e9ee3b008 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -497,9 +497,8 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
engine->logical_mask = BIT(logical_instance);
__sprint_engine_name(engine);
- if ((engine->class == COMPUTE_CLASS && !RCS_MASK(engine->gt) &&
- __ffs(CCS_MASK(engine->gt)) == engine->instance) ||
- engine->class == RENDER_CLASS)
+ if ((engine->class == COMPUTE_CLASS || engine->class == RENDER_CLASS) &&
+ __ffs(CCS_MASK(engine->gt) | RCS_MASK(engine->gt)) == engine->instance)
engine->flags |= I915_ENGINE_FIRST_RENDER_COMPUTE;
/* features common between engines sharing EUs */
@@ -589,7 +588,7 @@ u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value)
* NB: The GuC API only supports 32bit values. However, the limit is further
* reduced due to internal calculations which would otherwise overflow.
*/
- if (intel_guc_submission_is_wanted(&engine->gt->uc.guc))
+ if (intel_guc_submission_is_wanted(gt_to_guc(engine->gt)))
value = min_t(u64, value, guc_policy_max_preempt_timeout_ms());
value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
@@ -610,7 +609,7 @@ u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value)
* NB: The GuC API only supports 32bit values. However, the limit is further
* reduced due to internal calculations which would otherwise overflow.
*/
- if (intel_guc_submission_is_wanted(&engine->gt->uc.guc))
+ if (intel_guc_submission_is_wanted(gt_to_guc(engine->gt)))
value = min_t(u64, value, guc_policy_max_exec_quantum_ms());
value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
@@ -679,7 +678,7 @@ void intel_engines_release(struct intel_gt *gt)
*/
GEM_BUG_ON(intel_gt_pm_is_awake(gt));
if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
- __intel_gt_reset(gt, ALL_ENGINES);
+ intel_gt_reset_all_engines(gt);
/* Decouple the backend; but keep the layout for late GPU resets */
for_each_engine(engine, gt, id) {
@@ -765,14 +764,14 @@ static void engine_mask_apply_media_fuses(struct intel_gt *gt)
* and bits have disable semantices.
*/
media_fuse = intel_uncore_read(gt->uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
- if (MEDIA_VER_FULL(i915) < IP_VER(12, 50))
+ if (MEDIA_VER_FULL(i915) < IP_VER(12, 55))
media_fuse = ~media_fuse;
vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
GEN11_GT_VEBOX_DISABLE_SHIFT;
- if (MEDIA_VER_FULL(i915) >= IP_VER(12, 50)) {
+ if (MEDIA_VER_FULL(i915) >= IP_VER(12, 55)) {
fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1);
gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1);
} else {
@@ -839,38 +838,6 @@ static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
}
}
-static void engine_mask_apply_copy_fuses(struct intel_gt *gt)
-{
- struct drm_i915_private *i915 = gt->i915;
- struct intel_gt_info *info = &gt->info;
- unsigned long meml3_mask;
- unsigned long quad;
-
- if (!(GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60) &&
- GRAPHICS_VER_FULL(i915) < IP_VER(12, 70)))
- return;
-
- meml3_mask = intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3);
- meml3_mask = REG_FIELD_GET(GEN12_MEML3_EN_MASK, meml3_mask);
-
- /*
- * Link Copy engines may be fused off according to meml3_mask. Each
- * bit is a quad that houses 2 Link Copy and two Sub Copy engines.
- */
- for_each_clear_bit(quad, &meml3_mask, GEN12_MAX_MSLICES) {
- unsigned int instance = quad * 2 + 1;
- intel_engine_mask_t mask = GENMASK(_BCS(instance + 1),
- _BCS(instance));
-
- if (mask & info->engine_mask) {
- gt_dbg(gt, "bcs%u fused off\n", instance);
- gt_dbg(gt, "bcs%u fused off\n", instance + 1);
-
- info->engine_mask &= ~mask;
- }
- }
-}
-
/*
* Determine which engines are fused off in our particular hardware.
* Note that we have a catch-22 situation where we need to be able to access
@@ -889,7 +856,6 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
engine_mask_apply_media_fuses(gt);
engine_mask_apply_compute_fuses(gt);
- engine_mask_apply_copy_fuses(gt);
/*
* The only use of the GSC CS is to load and communicate with the GSC
@@ -908,6 +874,23 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
info->engine_mask &= ~BIT(GSC0);
}
+ /*
+ * Do not create the command streamer for CCS slices beyond the first.
+ * All the workload submitted to the first engine will be shared among
+ * all the slices.
+ *
+ * Once the user will be allowed to customize the CCS mode, then this
+ * check needs to be removed.
+ */
+ if (IS_DG2(gt->i915)) {
+ u8 first_ccs = __ffs(CCS_MASK(gt));
+
+ /* Mask off all the CCS engine */
+ info->engine_mask &= ~GENMASK(CCS3, CCS0);
+ /* Put back in the first CCS engine */
+ info->engine_mask |= BIT(_CCS(first_ccs));
+ }
+
return info->engine_mask;
}
@@ -1193,7 +1176,6 @@ static int intel_engine_init_tlb_invalidation(struct intel_engine_cs *engine)
if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 74) ||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 71) ||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 70) ||
- GRAPHICS_VER_FULL(i915) == IP_VER(12, 50) ||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 55)) {
regs = xehp_regs;
num = ARRAY_SIZE(xehp_regs);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 96bdb93a948d..fb7bff27b45a 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -279,9 +279,6 @@ static int __engine_park(struct intel_wakeref *wf)
intel_engine_park_heartbeat(engine);
intel_breadcrumbs_park(engine->breadcrumbs);
- /* Must be reset upon idling, or we may miss the busy wakeup. */
- GEM_BUG_ON(engine->sched_engine->queue_priority_hint != INT_MIN);
-
if (engine->park)
engine->park(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 960e6be2042f..ba55c059063d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -586,7 +586,7 @@ struct intel_engine_cs {
#define I915_ENGINE_HAS_RCS_REG_STATE BIT(9)
#define I915_ENGINE_HAS_EU_PRIORITY BIT(10)
#define I915_ENGINE_FIRST_RENDER_COMPUTE BIT(11)
-#define I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT BIT(12)
+#define I915_ENGINE_USES_WA_HOLD_SWITCHOUT BIT(12)
unsigned int flags;
/*
@@ -696,10 +696,12 @@ intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)
}
/* Wa_14014475959:dg2 */
+/* Wa_16019325821 */
+/* Wa_14019159160 */
static inline bool
-intel_engine_uses_wa_hold_ccs_switchout(struct intel_engine_cs *engine)
+intel_engine_uses_wa_hold_switchout(struct intel_engine_cs *engine)
{
- return engine->flags & I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT;
+ return engine->flags & I915_ENGINE_USES_WA_HOLD_SWITCHOUT;
}
#endif /* __INTEL_ENGINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 42aade0faf2d..21829439e686 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -493,7 +493,7 @@ __execlists_schedule_in(struct i915_request *rq)
/* Use a fixed tag for OA and friends */
GEM_BUG_ON(ce->tag <= BITS_PER_LONG);
ce->lrc.ccid = ce->tag;
- } else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) {
+ } else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) {
/* We don't need a strict matching tag, just different values */
unsigned int tag = ffs(READ_ONCE(engine->context_tag));
@@ -613,7 +613,7 @@ static void __execlists_schedule_out(struct i915_request * const rq,
intel_engine_add_retire(engine, ce->timeline);
ccid = ce->lrc.ccid;
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) {
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) {
ccid >>= XEHP_SW_CTX_ID_SHIFT - 32;
ccid &= XEHP_MAX_CONTEXT_HW_ID;
} else {
@@ -1907,7 +1907,7 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
head, upper_32_bits(csb), lower_32_bits(csb));
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
promote = xehp_csb_parse(csb);
else if (GRAPHICS_VER(engine->i915) >= 12)
promote = gen12_csb_parse(csb);
@@ -2898,7 +2898,7 @@ static void enable_error_interrupt(struct intel_engine_cs *engine)
drm_err(&engine->i915->drm,
"engine '%s' resumed still in error: %08x\n",
engine->name, status);
- __intel_gt_reset(engine->gt, engine->mask);
+ intel_gt_reset_engine(engine);
}
/*
@@ -3272,6 +3272,9 @@ static void execlists_park(struct intel_engine_cs *engine)
{
cancel_timer(&engine->execlists.timer);
cancel_timer(&engine->execlists.preempt);
+
+ /* Reset upon idling, or we may delay the busy wakeup. */
+ WRITE_ONCE(engine->sched_engine->queue_priority_hint, INT_MIN);
}
static void add_to_engine(struct i915_request *rq)
@@ -3479,7 +3482,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
}
}
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) {
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) {
if (intel_engine_has_preemption(engine))
engine->emit_bb_start = xehp_emit_bb_start;
else
@@ -3582,7 +3585,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
if (GRAPHICS_VER(engine->i915) >= 11 &&
- GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 50)) {
+ GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 55)) {
execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index ec1cbe229f0e..0d0a0dc9f610 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -231,11 +231,8 @@ static void guc_ggtt_ct_invalidate(struct intel_gt *gt)
struct intel_uncore *uncore = gt->uncore;
intel_wakeref_t wakeref;
- with_intel_runtime_pm_if_active(uncore->rpm, wakeref) {
- struct intel_guc *guc = &gt->uc.guc;
-
- intel_guc_invalidate_tlb_guc(guc);
- }
+ with_intel_runtime_pm_if_active(uncore->rpm, wakeref)
+ intel_guc_invalidate_tlb_guc(gt_to_guc(gt));
}
static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
@@ -246,7 +243,7 @@ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
gen8_ggtt_invalidate(ggtt);
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) {
- if (intel_guc_tlb_invalidation_is_available(&gt->uc.guc))
+ if (intel_guc_tlb_invalidation_is_available(gt_to_guc(gt)))
guc_ggtt_ct_invalidate(gt);
else if (GRAPHICS_VER(i915) >= 12)
intel_uncore_write_fw(gt->uncore,
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.c b/drivers/gpu/drm/i915/gt/intel_gsc.c
index 6d440de8ba01..1e925c75fb08 100644
--- a/drivers/gpu/drm/i915/gt/intel_gsc.c
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.c
@@ -103,19 +103,6 @@ static const struct gsc_def gsc_def_dg1[] = {
}
};
-static const struct gsc_def gsc_def_xehpsdv[] = {
- {
- /* HECI1 not enabled on the device. */
- },
- {
- .name = "mei-gscfi",
- .bar = DG1_GSC_HECI2_BASE,
- .bar_size = GSC_BAR_LENGTH,
- .use_polling = true,
- .slow_firmware = true,
- }
-};
-
static const struct gsc_def gsc_def_dg2[] = {
{
.name = "mei-gsc",
@@ -188,8 +175,6 @@ static void gsc_init_one(struct drm_i915_private *i915, struct intel_gsc *gsc,
if (IS_DG1(i915)) {
def = &gsc_def_dg1[intf_id];
- } else if (IS_XEHPSDV(i915)) {
- def = &gsc_def_xehpsdv[intf_id];
} else if (IS_DG2(i915)) {
def = &gsc_def_dg2[intf_id];
} else {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index a425db5ed3a2..626b166e67ef 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -278,7 +278,7 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
intel_uncore_posting_read(uncore,
XELPMP_RING_FAULT_REG);
- } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+ } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
intel_gt_mcr_multicast_rmw(gt, XEHP_RING_FAULT_REG,
RING_FAULT_VALID, 0);
intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG);
@@ -403,7 +403,7 @@ void intel_gt_check_and_clear_faults(struct intel_gt *gt)
struct drm_i915_private *i915 = gt->i915;
/* From GEN8 onwards we only have one 'All Engine Fault Register' */
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
xehp_check_faults(gt);
else if (GRAPHICS_VER(i915) >= 8)
gen8_check_faults(gt);
@@ -832,7 +832,7 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
/* Scrub all HW state upon release */
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
- __intel_gt_reset(gt, ALL_ENGINES);
+ intel_gt_reset_all_engines(gt);
}
void intel_gt_driver_release(struct intel_gt *gt)
@@ -1024,6 +1024,12 @@ enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
return I915_MAP_WC;
}
+bool intel_gt_needs_wa_16018031267(struct intel_gt *gt)
+{
+ /* Wa_16018031267, Wa_16018063123 */
+ return IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 55), IP_VER(12, 71));
+}
+
bool intel_gt_needs_wa_22016122933(struct intel_gt *gt)
{
return MEDIA_VER_FULL(gt->i915) == IP_VER(13, 0) && gt->type == GT_MEDIA;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 608f5c872928..b5e114d284ad 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -82,17 +82,18 @@ struct drm_printer;
##__VA_ARGS__); \
} while (0)
-#define NEEDS_FASTCOLOR_BLT_WABB(engine) ( \
- IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 55), IP_VER(12, 71)) && \
- engine->class == COPY_ENGINE_CLASS && engine->instance == 0)
-
static inline bool gt_is_root(struct intel_gt *gt)
{
return !gt->info.id;
}
+bool intel_gt_needs_wa_16018031267(struct intel_gt *gt);
bool intel_gt_needs_wa_22016122933(struct intel_gt *gt);
+#define NEEDS_FASTCOLOR_BLT_WABB(engine) ( \
+ intel_gt_needs_wa_16018031267(engine->gt) && \
+ engine->class == COPY_ENGINE_CLASS && engine->instance == 0)
+
static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
{
return container_of(uc, struct intel_gt, uc);
@@ -123,6 +124,11 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
return guc_to_gt(guc)->i915;
}
+static inline struct intel_guc *gt_to_guc(struct intel_gt *gt)
+{
+ return &gt->uc.guc;
+}
+
void intel_gt_common_init_early(struct intel_gt *gt);
int intel_root_gt_init_early(struct drm_i915_private *i915);
int intel_gt_assign_ggtt(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
new file mode 100644
index 000000000000..99b71bb7da0a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_gt_ccs_mode.h"
+#include "intel_gt_regs.h"
+
+unsigned int intel_gt_apply_ccs_mode(struct intel_gt *gt)
+{
+ int cslice;
+ u32 mode = 0;
+ int first_ccs = __ffs(CCS_MASK(gt));
+
+ if (!IS_DG2(gt->i915))
+ return 0;
+
+ /* Build the value for the fixed CCS load balancing */
+ for (cslice = 0; cslice < I915_MAX_CCS; cslice++) {
+ if (CCS_MASK(gt) & BIT(cslice))
+ /*
+ * If available, assign the cslice
+ * to the first available engine...
+ */
+ mode |= XEHP_CCS_MODE_CSLICE(cslice, first_ccs);
+
+ else
+ /*
+ * ... otherwise, mark the cslice as
+ * unavailable if no CCS dispatches here
+ */
+ mode |= XEHP_CCS_MODE_CSLICE(cslice,
+ XEHP_CCS_MODE_CSLICE_MASK);
+ }
+
+ return mode;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h
new file mode 100644
index 000000000000..55547f2ff426
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_CCS_MODE_H__
+#define __INTEL_GT_CCS_MODE_H__
+
+struct intel_gt;
+
+unsigned int intel_gt_apply_ccs_mode(struct intel_gt *gt);
+
+#endif /* __INTEL_GT_CCS_MODE_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 77fb57223465..ad4c51f18d3a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -68,9 +68,9 @@ gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
struct intel_gt *media_gt = gt->i915->media_gt;
if (instance == OTHER_GUC_INSTANCE)
- return guc_irq_handler(&gt->uc.guc, iir);
+ return guc_irq_handler(gt_to_guc(gt), iir);
if (instance == OTHER_MEDIA_GUC_INSTANCE && media_gt)
- return guc_irq_handler(&media_gt->uc.guc, iir);
+ return guc_irq_handler(gt_to_guc(media_gt), iir);
if (instance == OTHER_GTPM_INSTANCE)
return gen11_rps_irq_handler(&gt->rps, iir);
@@ -442,7 +442,7 @@ void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl)
iir = raw_reg_read(regs, GEN8_GT_IIR(2));
if (likely(iir)) {
gen6_rps_irq_handler(&gt->rps, iir);
- guc_irq_handler(&gt->uc.guc, iir >> 16);
+ guc_irq_handler(gt_to_guc(gt), iir >> 16);
raw_reg_write(regs, GEN8_GT_IIR(2), iir);
}
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
index e253750a51c5..b8912bd6c08e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
@@ -57,51 +57,18 @@ static const struct intel_mmio_range icl_l3bank_steering_table[] = {
* are of a "GAM" subclass that has special rules. Thus we use a separate
* GAM table farther down for those.
*/
-static const struct intel_mmio_range xehpsdv_mslice_steering_table[] = {
+static const struct intel_mmio_range dg2_mslice_steering_table[] = {
{ 0x00DD00, 0x00DDFF },
{ 0x00E900, 0x00FFFF }, /* 0xEA00 - OxEFFF is unused */
{},
};
-static const struct intel_mmio_range xehpsdv_gam_steering_table[] = {
- { 0x004000, 0x004AFF },
- { 0x00C800, 0x00CFFF },
- {},
-};
-
-static const struct intel_mmio_range xehpsdv_lncf_steering_table[] = {
- { 0x00B000, 0x00B0FF },
- { 0x00D800, 0x00D8FF },
- {},
-};
-
static const struct intel_mmio_range dg2_lncf_steering_table[] = {
{ 0x00B000, 0x00B0FF },
{ 0x00D880, 0x00D8FF },
{},
};
-/*
- * We have several types of MCR registers on PVC where steering to (0,0)
- * will always provide us with a non-terminated value. We'll stick them
- * all in the same table for simplicity.
- */
-static const struct intel_mmio_range pvc_instance0_steering_table[] = {
- { 0x004000, 0x004AFF }, /* HALF-BSLICE */
- { 0x008800, 0x00887F }, /* CC */
- { 0x008A80, 0x008AFF }, /* TILEPSMI */
- { 0x00B000, 0x00B0FF }, /* HALF-BSLICE */
- { 0x00B100, 0x00B3FF }, /* L3BANK */
- { 0x00C800, 0x00CFFF }, /* HALF-BSLICE */
- { 0x00D800, 0x00D8FF }, /* HALF-BSLICE */
- { 0x00DD00, 0x00DDFF }, /* BSLICE */
- { 0x00E900, 0x00E9FF }, /* HALF-BSLICE */
- { 0x00EC00, 0x00EEFF }, /* HALF-BSLICE */
- { 0x00F000, 0x00FFFF }, /* HALF-BSLICE */
- { 0x024180, 0x0241FF }, /* HALF-BSLICE */
- {},
-};
-
static const struct intel_mmio_range xelpg_instance0_steering_table[] = {
{ 0x000B00, 0x000BFF }, /* SQIDI */
{ 0x001000, 0x001FFF }, /* SQIDI */
@@ -185,22 +152,16 @@ void intel_gt_mcr_init(struct intel_gt *gt)
gt->steering_table[INSTANCE0] = xelpg_instance0_steering_table;
gt->steering_table[L3BANK] = xelpg_l3bank_steering_table;
gt->steering_table[DSS] = xelpg_dss_steering_table;
- } else if (IS_PONTEVECCHIO(i915)) {
- gt->steering_table[INSTANCE0] = pvc_instance0_steering_table;
} else if (IS_DG2(i915)) {
- gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
+ gt->steering_table[MSLICE] = dg2_mslice_steering_table;
gt->steering_table[LNCF] = dg2_lncf_steering_table;
/*
* No need to hook up the GAM table since it has a dedicated
* steering control register on DG2 and can use implicit
* steering.
*/
- } else if (IS_XEHPSDV(i915)) {
- gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
- gt->steering_table[LNCF] = xehpsdv_lncf_steering_table;
- gt->steering_table[GAM] = xehpsdv_gam_steering_table;
} else if (GRAPHICS_VER(i915) >= 11 &&
- GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) {
+ GRAPHICS_VER_FULL(i915) < IP_VER(12, 55)) {
gt->steering_table[L3BANK] = icl_l3bank_steering_table;
gt->info.l3bank_mask =
~intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
@@ -821,8 +782,6 @@ void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt,
for (int i = 0; i < NUM_STEERING_TYPES; i++)
if (gt->steering_table[i])
report_steering_type(p, gt, i, dump_table);
- } else if (IS_PONTEVECCHIO(gt->i915)) {
- report_steering_type(p, gt, INSTANCE0, dump_table);
} else if (HAS_MSLICE_STEERING(gt->i915)) {
report_steering_type(p, gt, MSLICE, dump_table);
report_steering_type(p, gt, LNCF, dump_table);
@@ -842,10 +801,7 @@ void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt,
void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, unsigned int dss,
unsigned int *group, unsigned int *instance)
{
- if (IS_PONTEVECCHIO(gt->i915)) {
- *group = dss / GEN_DSS_PER_CSLICE;
- *instance = dss % GEN_DSS_PER_CSLICE;
- } else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) {
+ if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55)) {
*group = dss / GEN_DSS_PER_GSLICE;
*instance = dss % GEN_DSS_PER_GSLICE;
} else {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.h b/drivers/gpu/drm/i915/gt/intel_gt_mcr.h
index 01ac565a56a4..a67a4c35a4fa 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.h
@@ -54,7 +54,7 @@ int intel_gt_mcr_wait_for_reg(struct intel_gt *gt,
* the topology, so we lookup the DSS ID directly in "slice 0."
*/
#define _HAS_SS(ss_, gt_, group_, instance_) ( \
- GRAPHICS_VER_FULL(gt_->i915) >= IP_VER(12, 50) ? \
+ GRAPHICS_VER_FULL(gt_->i915) >= IP_VER(12, 55) ? \
intel_sseu_has_subslice(&(gt_)->info.sseu, 0, ss_) : \
intel_sseu_has_subslice(&(gt_)->info.sseu, group_, instance_))
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 220ac4f92edf..c08fdb65cc69 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -159,7 +159,7 @@ static bool reset_engines(struct intel_gt *gt)
if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
return false;
- return __intel_gt_reset(gt, ALL_ENGINES) == 0;
+ return intel_gt_reset_all_engines(gt) == 0;
}
static void gt_sanitize(struct intel_gt *gt, bool force)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index 7114c116e928..4fcba42cfe34 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -392,10 +392,6 @@ void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p)
drm_puts(p, "no P-state info available\n");
}
- drm_printf(p, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk);
- drm_printf(p, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq);
- drm_printf(p, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
-
intel_runtime_pm_put(uncore->rpm, wakeref);
}
@@ -538,7 +534,7 @@ static bool rps_eval(void *data)
{
struct intel_gt *gt = data;
- if (intel_guc_slpc_is_used(&gt->uc.guc))
+ if (intel_guc_slpc_is_used(gt_to_guc(gt)))
return false;
else
return HAS_RPS(gt->i915);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index 50962cfd1353..e42b3a5d4e63 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -718,44 +718,11 @@
#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434)
#define VFUNIT_CLKGATE_DIS REG_BIT(20)
-#define TSGUNIT_CLKGATE_DIS REG_BIT(17) /* XEHPSDV */
#define CG3DDISCFEG_CLKGATE_DIS REG_BIT(17) /* DG2 */
#define GAMEDIA_CLKGATE_DIS REG_BIT(11)
#define HSUNIT_CLKGATE_DIS REG_BIT(8)
#define VSUNIT_CLKGATE_DIS REG_BIT(3)
-#define UNSLCGCTL9440 _MMIO(0x9440)
-#define GAMTLBOACS_CLKGATE_DIS REG_BIT(28)
-#define GAMTLBVDBOX5_CLKGATE_DIS REG_BIT(27)
-#define GAMTLBVDBOX6_CLKGATE_DIS REG_BIT(26)
-#define GAMTLBVDBOX3_CLKGATE_DIS REG_BIT(24)
-#define GAMTLBVDBOX4_CLKGATE_DIS REG_BIT(23)
-#define GAMTLBVDBOX7_CLKGATE_DIS REG_BIT(22)
-#define GAMTLBVDBOX2_CLKGATE_DIS REG_BIT(21)
-#define GAMTLBVDBOX0_CLKGATE_DIS REG_BIT(17)
-#define GAMTLBKCR_CLKGATE_DIS REG_BIT(16)
-#define GAMTLBGUC_CLKGATE_DIS REG_BIT(15)
-#define GAMTLBBLT_CLKGATE_DIS REG_BIT(14)
-#define GAMTLBVDBOX1_CLKGATE_DIS REG_BIT(6)
-
-#define UNSLCGCTL9444 _MMIO(0x9444)
-#define GAMTLBGFXA0_CLKGATE_DIS REG_BIT(30)
-#define GAMTLBGFXA1_CLKGATE_DIS REG_BIT(29)
-#define GAMTLBCOMPA0_CLKGATE_DIS REG_BIT(28)
-#define GAMTLBCOMPA1_CLKGATE_DIS REG_BIT(27)
-#define GAMTLBCOMPB0_CLKGATE_DIS REG_BIT(26)
-#define GAMTLBCOMPB1_CLKGATE_DIS REG_BIT(25)
-#define GAMTLBCOMPC0_CLKGATE_DIS REG_BIT(24)
-#define GAMTLBCOMPC1_CLKGATE_DIS REG_BIT(23)
-#define GAMTLBCOMPD0_CLKGATE_DIS REG_BIT(22)
-#define GAMTLBCOMPD1_CLKGATE_DIS REG_BIT(21)
-#define GAMTLBMERT_CLKGATE_DIS REG_BIT(20)
-#define GAMTLBVEBOX3_CLKGATE_DIS REG_BIT(19)
-#define GAMTLBVEBOX2_CLKGATE_DIS REG_BIT(18)
-#define GAMTLBVEBOX1_CLKGATE_DIS REG_BIT(17)
-#define GAMTLBVEBOX0_CLKGATE_DIS REG_BIT(16)
-#define LTCDD_CLKGATE_DIS REG_BIT(10)
-
#define GEN11_SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4)
#define XEHP_SLICE_UNIT_LEVEL_CLKGATE MCR_REG(0x94d4)
#define SARBUNIT_CLKGATE_DIS (1 << 5)
@@ -765,9 +732,6 @@
#define L3_CLKGATE_DIS REG_BIT(16)
#define L3_CR2X_CLKGATE_DIS REG_BIT(17)
-#define SCCGCTL94DC MCR_REG(0x94dc)
-#define CG3DDISURB REG_BIT(14)
-
#define UNSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x94e4)
#define VSUNIT_CLKGATE_DIS_TGL REG_BIT(19)
#define PSDUNIT_CLKGATE_DIS REG_BIT(5)
@@ -989,10 +953,6 @@
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
#define GEN7_L3AGDIS (1 << 19)
-#define XEHPC_LNCFMISCCFGREG0 MCR_REG(0xb01c)
-#define XEHPC_HOSTCACHEEN REG_BIT(1)
-#define XEHPC_OVRLSCCC REG_BIT(0)
-
#define GEN7_L3CNTLREG2 _MMIO(0xb020)
/* MOCS (Memory Object Control State) registers */
@@ -1046,20 +1006,9 @@
#define XEHP_L3SQCREG5 MCR_REG(0xb158)
#define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0)
-#define MLTICTXCTL MCR_REG(0xb170)
-#define TDONRENDER REG_BIT(2)
-
#define XEHP_L3SCQREG7 MCR_REG(0xb188)
#define BLEND_FILL_CACHING_OPT_DIS REG_BIT(3)
-#define XEHPC_L3SCRUB MCR_REG(0xb18c)
-#define SCRUB_CL_DWNGRADE_SHARED REG_BIT(12)
-#define SCRUB_RATE_PER_BANK_MASK REG_GENMASK(2, 0)
-#define SCRUB_RATE_4B_PER_CLK REG_FIELD_PREP(SCRUB_RATE_PER_BANK_MASK, 0x6)
-
-#define L3SQCREG1_CCS0 MCR_REG(0xb200)
-#define FLUSHALLNONCOH REG_BIT(5)
-
#define GEN11_GLBLINVL _MMIO(0xb404)
#define GEN11_BANK_HASH_ADDR_EXCL_MASK (0x7f << 5)
#define GEN11_BANK_HASH_ADDR_EXCL_BIT0 (1 << 5)
@@ -1109,7 +1058,6 @@
#define XEHP_COMPCTX_TLB_INV_CR MCR_REG(0xcf04)
#define XELPMP_GSC_TLB_INV_CR _MMIO(0xcf04) /* media GT only */
-#define XEHP_MERT_MOD_CTRL MCR_REG(0xcf28)
#define RENDER_MOD_CTRL MCR_REG(0xcf2c)
#define COMP_MOD_CTRL MCR_REG(0xcf30)
#define XELPMP_GSC_MOD_CTRL _MMIO(0xcf30) /* media GT only */
@@ -1185,7 +1133,6 @@
#define EU_PERF_CNTL4 PERF_REG(0xe45c)
#define GEN9_ROW_CHICKEN4 MCR_REG(0xe48c)
-#define GEN12_DISABLE_GRF_CLEAR REG_BIT(13)
#define XEHP_DIS_BBL_SYSPIPE REG_BIT(11)
#define GEN12_DISABLE_TDL_PUSH REG_BIT(9)
#define GEN11_DIS_PICK_2ND_EU REG_BIT(7)
@@ -1202,7 +1149,6 @@
#define FLOW_CONTROL_ENABLE REG_BIT(15)
#define UGM_BACKUP_MODE REG_BIT(13)
#define MDQ_ARBITRATION_MODE REG_BIT(12)
-#define SYSTOLIC_DOP_CLOCK_GATING_DIS REG_BIT(10)
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE REG_BIT(8)
#define STALL_DOP_GATING_DISABLE REG_BIT(5)
#define THROTTLE_12_5 REG_GENMASK(4, 2)
@@ -1215,6 +1161,7 @@
#define GEN12_DISABLE_EARLY_READ REG_BIT(14)
#define GEN12_ENABLE_LARGE_GRF_MODE REG_BIT(12)
#define GEN12_PUSH_CONST_DEREF_HOLD_DIS REG_BIT(8)
+#define XELPG_DISABLE_TDL_SVHS_GATING REG_BIT(1)
#define GEN12_DISABLE_DOP_GATING REG_BIT(0)
#define RT_CTRL MCR_REG(0xe530)
@@ -1477,8 +1424,14 @@
#define ECOBITS_PPGTT_CACHE4B (0 << 8)
#define GEN12_RCU_MODE _MMIO(0x14800)
+#define XEHP_RCU_MODE_FIXED_SLICE_CCS_MODE REG_BIT(1)
#define GEN12_RCU_MODE_CCS_ENABLE REG_BIT(0)
+#define XEHP_CCS_MODE _MMIO(0x14804)
+#define XEHP_CCS_MODE_CSLICE_MASK REG_GENMASK(2, 0) /* CCS0-3 + rsvd */
+#define XEHP_CCS_MODE_CSLICE_WIDTH ilog2(XEHP_CCS_MODE_CSLICE_MASK + 1)
+#define XEHP_CCS_MODE_CSLICE(cslice, ccs) (ccs << (cslice * XEHP_CCS_MODE_CSLICE_WIDTH))
+
#define CHV_FUSE_GT _MMIO(VLV_GUNIT_BASE + 0x2168)
#define CHV_FGT_DISABLE_SS0 (1 << 10)
#define CHV_FGT_DISABLE_SS1 (1 << 11)
@@ -1679,11 +1632,6 @@
#define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000)
-#define GT0_PACKAGE_ENERGY_STATUS _MMIO(0x250004)
-#define GT0_PACKAGE_RAPL_LIMIT _MMIO(0x250008)
-#define GT0_PACKAGE_POWER_SKU_UNIT _MMIO(0x250068)
-#define GT0_PLATFORM_ENERGY_STATUS _MMIO(0x25006c)
-
/*
* Standalone Media's non-engine GT registers are located at their regular GT
* offsets plus 0x380000. This extra offset is stored inside the intel_uncore
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
index c0b202223940..d7784650e4d9 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -442,7 +442,7 @@ static ssize_t slpc_ignore_eff_freq_show(struct kobject *kobj,
char *buff)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
- struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ struct intel_guc_slpc *slpc = &gt_to_guc(gt)->slpc;
return sysfs_emit(buff, "%u\n", slpc->ignore_eff_freq);
}
@@ -452,7 +452,7 @@ static ssize_t slpc_ignore_eff_freq_store(struct kobject *kobj,
const char *buff, size_t count)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
- struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ struct intel_guc_slpc *slpc = &gt_to_guc(gt)->slpc;
int err;
u32 val;
@@ -573,7 +573,6 @@ static ssize_t media_freq_factor_show(struct kobject *kobj,
char *buff)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
- struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
intel_wakeref_t wakeref;
u32 mode;
@@ -581,20 +580,12 @@ static ssize_t media_freq_factor_show(struct kobject *kobj,
* Retrieve media_ratio_mode from GEN6_RPNSWREQ bit 13 set by
* GuC. GEN6_RPNSWREQ:13 value 0 represents 1:2 and 1 represents 1:1
*/
- if (IS_XEHPSDV(gt->i915) &&
- slpc->media_ratio_mode == SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL) {
- /*
- * For XEHPSDV dynamic mode GEN6_RPNSWREQ:13 does not contain
- * the media_ratio_mode, just return the cached media ratio
- */
- mode = slpc->media_ratio_mode;
- } else {
- with_intel_runtime_pm(gt->uncore->rpm, wakeref)
- mode = intel_uncore_read(gt->uncore, GEN6_RPNSWREQ);
- mode = REG_FIELD_GET(GEN12_MEDIA_FREQ_RATIO, mode) ?
- SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_ONE :
- SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO;
- }
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ mode = intel_uncore_read(gt->uncore, GEN6_RPNSWREQ);
+
+ mode = REG_FIELD_GET(GEN12_MEDIA_FREQ_RATIO, mode) ?
+ SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_ONE :
+ SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO;
return sysfs_emit(buff, "%u\n", media_ratio_mode_to_factor(mode));
}
@@ -604,7 +595,7 @@ static ssize_t media_freq_factor_store(struct kobject *kobj,
const char *buff, size_t count)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
- struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ struct intel_guc_slpc *slpc = &gt_to_guc(gt)->slpc;
u32 factor, mode;
int err;
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 7811a8c9da06..30b128b1fde7 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -680,7 +680,7 @@ void setup_private_pat(struct intel_gt *gt)
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
xelpg_setup_private_ppat(gt);
- else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
xehp_setup_private_ppat(gt);
else if (GRAPHICS_VER(i915) >= 12)
tgl_setup_private_ppat(uncore);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 7c367ba8d9dc..b387146ede98 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -546,47 +546,6 @@ static const u8 gen12_rcs_offsets[] = {
END
};
-static const u8 xehp_rcs_offsets[] = {
- NOP(1),
- LRI(13, POSTED),
- REG16(0x244),
- REG(0x034),
- REG(0x030),
- REG(0x038),
- REG(0x03c),
- REG(0x168),
- REG(0x140),
- REG(0x110),
- REG(0x1c0),
- REG(0x1c4),
- REG(0x1c8),
- REG(0x180),
- REG16(0x2b4),
-
- NOP(5),
- LRI(9, POSTED),
- REG16(0x3a8),
- REG16(0x28c),
- REG16(0x288),
- REG16(0x284),
- REG16(0x280),
- REG16(0x27c),
- REG16(0x278),
- REG16(0x274),
- REG16(0x270),
-
- LRI(3, POSTED),
- REG(0x1b0),
- REG16(0x5a8),
- REG16(0x5ac),
-
- NOP(6),
- LRI(1, 0),
- REG(0x0c8),
-
- END
-};
-
static const u8 dg2_rcs_offsets[] = {
NOP(1),
LRI(15, POSTED),
@@ -695,8 +654,6 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
return mtl_rcs_offsets;
else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
return dg2_rcs_offsets;
- else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
- return xehp_rcs_offsets;
else if (GRAPHICS_VER(engine->i915) >= 12)
return gen12_rcs_offsets;
else if (GRAPHICS_VER(engine->i915) >= 11)
@@ -719,7 +676,7 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
{
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
return 0x70;
else if (GRAPHICS_VER(engine->i915) >= 12)
return 0x60;
@@ -733,7 +690,7 @@ static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
static int lrc_ring_bb_offset(const struct intel_engine_cs *engine)
{
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
return 0x80;
else if (GRAPHICS_VER(engine->i915) >= 12)
return 0x70;
@@ -748,7 +705,7 @@ static int lrc_ring_bb_offset(const struct intel_engine_cs *engine)
static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
{
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
return 0x84;
else if (GRAPHICS_VER(engine->i915) >= 12)
return 0x74;
@@ -795,7 +752,7 @@ static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
{
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
/*
* Note that the CSFE context has a dummy slot for CMD_BUF_CCTL
* simply to match the RCS context image layout.
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
index 576e5ef0289b..6f7af4077135 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -35,9 +35,9 @@ static bool engine_supports_migration(struct intel_engine_cs *engine)
return true;
}
-static void xehpsdv_toggle_pdes(struct i915_address_space *vm,
- struct i915_page_table *pt,
- void *data)
+static void xehp_toggle_pdes(struct i915_address_space *vm,
+ struct i915_page_table *pt,
+ void *data)
{
struct insert_pte_data *d = data;
@@ -52,9 +52,9 @@ static void xehpsdv_toggle_pdes(struct i915_address_space *vm,
d->offset += SZ_2M;
}
-static void xehpsdv_insert_pte(struct i915_address_space *vm,
- struct i915_page_table *pt,
- void *data)
+static void xehp_insert_pte(struct i915_address_space *vm,
+ struct i915_page_table *pt,
+ void *data)
{
struct insert_pte_data *d = data;
@@ -120,7 +120,7 @@ static struct i915_address_space *migrate_vm(struct intel_gt *gt)
* 512 entry layout using 4K GTT pages. The other two windows just map
* lmem pages and must use the new compact 32 entry layout using 64K GTT
* pages, which ensures we can address any lmem object that the user
- * throws at us. We then also use the xehpsdv_toggle_pdes as a way of
+ * throws at us. We then also use the xehp_toggle_pdes as a way of
* just toggling the PDE bit(GEN12_PDE_64K) for us, to enable the
* compact layout for each of these page-tables, that fall within the
* [CHUNK_SIZE, 3 * CHUNK_SIZE) range.
@@ -209,12 +209,12 @@ static struct i915_address_space *migrate_vm(struct intel_gt *gt)
/* Now allow the GPU to rewrite the PTE via its own ppGTT */
if (HAS_64K_PAGES(gt->i915)) {
vm->vm.foreach(&vm->vm, base, d.offset - base,
- xehpsdv_insert_pte, &d);
+ xehp_insert_pte, &d);
d.offset = base + CHUNK_SZ;
vm->vm.foreach(&vm->vm,
d.offset,
2 * CHUNK_SZ,
- xehpsdv_toggle_pdes, &d);
+ xehp_toggle_pdes, &d);
} else {
vm->vm.foreach(&vm->vm, base, d.offset - base,
insert_pte, &d);
@@ -925,7 +925,7 @@ static int emit_clear(struct i915_request *rq, u32 offset, int size,
GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
ring_sz = XY_FAST_COLOR_BLT_DW;
else if (ver >= 8)
ring_sz = 8;
@@ -936,7 +936,7 @@ static int emit_clear(struct i915_request *rq, u32 offset, int size,
if (IS_ERR(cs))
return PTR_ERR(cs);
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
*cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
(XY_FAST_COLOR_BLT_DW - 2);
*cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) |
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 25c1023eb5f9..d791d63d49b4 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -53,7 +53,6 @@ struct drm_i915_mocs_table {
/* Helper defines */
#define GEN9_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
-#define PVC_NUM_MOCS_ENTRIES 3
#define MTL_NUM_MOCS_ENTRIES 16
/* (e)LLC caching options */
@@ -367,31 +366,6 @@ static const struct drm_i915_mocs_entry gen12_mocs_table[] = {
L3_3_WB),
};
-static const struct drm_i915_mocs_entry xehpsdv_mocs_table[] = {
- /* wa_1608975824 */
- MOCS_ENTRY(0, 0, L3_3_WB | L3_LKUP(1)),
-
- /* UC - Coherent; GO:L3 */
- MOCS_ENTRY(1, 0, L3_1_UC | L3_LKUP(1)),
- /* UC - Coherent; GO:Memory */
- MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
- /* UC - Non-Coherent; GO:Memory */
- MOCS_ENTRY(3, 0, L3_1_UC | L3_GLBGO(1)),
- /* UC - Non-Coherent; GO:L3 */
- MOCS_ENTRY(4, 0, L3_1_UC),
-
- /* WB */
- MOCS_ENTRY(5, 0, L3_3_WB | L3_LKUP(1)),
-
- /* HW Reserved - SW program but never use. */
- MOCS_ENTRY(48, 0, L3_3_WB | L3_LKUP(1)),
- MOCS_ENTRY(49, 0, L3_1_UC | L3_LKUP(1)),
- MOCS_ENTRY(60, 0, L3_1_UC),
- MOCS_ENTRY(61, 0, L3_1_UC),
- MOCS_ENTRY(62, 0, L3_1_UC),
- MOCS_ENTRY(63, 0, L3_1_UC),
-};
-
static const struct drm_i915_mocs_entry dg2_mocs_table[] = {
/* UC - Coherent; GO:L3 */
MOCS_ENTRY(0, 0, L3_1_UC | L3_LKUP(1)),
@@ -404,17 +378,6 @@ static const struct drm_i915_mocs_entry dg2_mocs_table[] = {
MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)),
};
-static const struct drm_i915_mocs_entry pvc_mocs_table[] = {
- /* Error */
- MOCS_ENTRY(0, 0, L3_3_WB),
-
- /* UC */
- MOCS_ENTRY(1, 0, L3_1_UC),
-
- /* WB */
- MOCS_ENTRY(2, 0, L3_3_WB),
-};
-
static const struct drm_i915_mocs_entry mtl_mocs_table[] = {
/* Error - Reserved for Non-Use */
MOCS_ENTRY(0,
@@ -501,25 +464,12 @@ static unsigned int get_mocs_settings(struct drm_i915_private *i915,
table->n_entries = MTL_NUM_MOCS_ENTRIES;
table->uc_index = 9;
table->unused_entries_index = 1;
- } else if (IS_PONTEVECCHIO(i915)) {
- table->size = ARRAY_SIZE(pvc_mocs_table);
- table->table = pvc_mocs_table;
- table->n_entries = PVC_NUM_MOCS_ENTRIES;
- table->uc_index = 1;
- table->wb_index = 2;
- table->unused_entries_index = 2;
} else if (IS_DG2(i915)) {
table->size = ARRAY_SIZE(dg2_mocs_table);
table->table = dg2_mocs_table;
table->uc_index = 1;
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
table->unused_entries_index = 3;
- } else if (IS_XEHPSDV(i915)) {
- table->size = ARRAY_SIZE(xehpsdv_mocs_table);
- table->table = xehpsdv_mocs_table;
- table->uc_index = 2;
- table->n_entries = GEN9_NUM_MOCS_ENTRIES;
- table->unused_entries_index = 5;
} else if (IS_DG1(i915)) {
table->size = ARRAY_SIZE(dg1_mocs_table);
table->table = dg1_mocs_table;
@@ -670,7 +620,7 @@ static void init_l3cc_table(struct intel_gt *gt,
intel_gt_mcr_lock(gt, &flags);
for_each_l3cc(l3cc, table, i)
- if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55))
intel_gt_mcr_multicast_write_fw(gt, XEHP_LNCFCMOCS(i), l3cc);
else
intel_uncore_write_fw(gt->uncore, GEN9_LNCFCMOCS(i), l3cc);
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 8f4b3c8af09c..c864d101faf9 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -109,7 +109,7 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
* thus allowing GuC to control RC6 entry/exit fully instead.
* We will not set the HW ENABLE and EI bits
*/
- if (!intel_guc_rc_enable(&gt->uc.guc))
+ if (!intel_guc_rc_enable(gt_to_guc(gt)))
rc6->ctl_enable = GEN6_RC_CTL_RC6_ENABLE;
else
rc6->ctl_enable =
@@ -569,7 +569,7 @@ static void __intel_rc6_disable(struct intel_rc6 *rc6)
struct intel_gt *gt = rc6_to_gt(rc6);
/* Take control of RC6 back from GuC */
- intel_guc_rc_disable(&gt->uc.guc);
+ intel_guc_rc_disable(gt_to_guc(gt));
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
if (GRAPHICS_VER(i915) >= 9)
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index c8e9aa41fdea..6161f7a3ff70 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -764,7 +764,7 @@ wa_14015076503_end(struct intel_gt *gt, intel_engine_mask_t engine_mask)
HECI_H_GS1_ER_PREP, 0);
}
-int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
+static int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
{
const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
reset_func reset;
@@ -879,8 +879,17 @@ static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
intel_engine_mask_t awake = 0;
enum intel_engine_id id;
- /* For GuC mode, ensure submission is disabled before stopping ring */
- intel_uc_reset_prepare(&gt->uc);
+ /**
+ * For GuC mode with submission enabled, ensure submission
+ * is disabled before stopping ring.
+ *
+ * For GuC mode with submission disabled, ensure that GuC is not
+ * sanitized, do that after engine reset. reset_prepare()
+ * is followed by engine reset which in this mode requires GuC to
+ * process any CSB FIFO entries generated by the resets.
+ */
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ intel_uc_reset_prepare(&gt->uc);
for_each_engine(engine, gt, id) {
if (intel_engine_pm_get_if_awake(engine))
@@ -978,7 +987,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
/* Even if the GPU reset fails, it should still stop the engines */
if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
- __intel_gt_reset(gt, ALL_ENGINES);
+ intel_gt_reset_all_engines(gt);
for_each_engine(engine, gt, id)
engine->submit_request = nop_submit_request;
@@ -1089,7 +1098,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
/* We must reset pending GPU events before restoring our submission */
ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
- ok = __intel_gt_reset(gt, ALL_ENGINES) == 0;
+ ok = intel_gt_reset_all_engines(gt) == 0;
if (!ok) {
/*
* Warn CI about the unrecoverable wedged condition.
@@ -1133,10 +1142,10 @@ static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
{
int err, i;
- err = __intel_gt_reset(gt, ALL_ENGINES);
+ err = intel_gt_reset_all_engines(gt);
for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
msleep(10 * (i + 1));
- err = __intel_gt_reset(gt, ALL_ENGINES);
+ err = intel_gt_reset_all_engines(gt);
}
if (err)
return err;
@@ -1227,6 +1236,9 @@ void intel_gt_reset(struct intel_gt *gt,
intel_overlay_reset(gt->i915);
+ /* sanitize uC after engine reset */
+ if (!intel_uc_uses_guc_submission(&gt->uc))
+ intel_uc_reset_prepare(&gt->uc);
/*
* Next we need to restore the context, but we don't use those
* yet either...
@@ -1270,7 +1282,30 @@ error:
goto finish;
}
-static int intel_gt_reset_engine(struct intel_engine_cs *engine)
+/**
+ * intel_gt_reset_all_engines() - Reset all engines in the given gt.
+ * @gt: the GT to reset all engines for.
+ *
+ * This function resets all engines within the given gt.
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int intel_gt_reset_all_engines(struct intel_gt *gt)
+{
+ return __intel_gt_reset(gt, ALL_ENGINES);
+}
+
+/**
+ * intel_gt_reset_engine() - Reset a specific engine within a gt.
+ * @engine: engine to be reset.
+ *
+ * This function resets the specified engine within a gt.
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int intel_gt_reset_engine(struct intel_engine_cs *engine)
{
return __intel_gt_reset(engine->gt, engine->mask);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
index f615b30b81c5..c00de353075c 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -54,7 +54,8 @@ int intel_gt_terminally_wedged(struct intel_gt *gt);
void intel_gt_set_wedged_on_init(struct intel_gt *gt);
void intel_gt_set_wedged_on_fini(struct intel_gt *gt);
-int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask);
+int intel_gt_reset_engine(struct intel_engine_cs *engine);
+int intel_gt_reset_all_engines(struct intel_gt *gt);
int intel_reset_guc(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 4feef874e6d6..c9cb2a391942 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -52,7 +52,7 @@ static struct intel_guc_slpc *rps_to_slpc(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
- return &gt->uc.guc.slpc;
+ return &gt_to_guc(gt)->slpc;
}
static bool rps_uses_slpc(struct intel_rps *rps)
@@ -1013,6 +1013,10 @@ void intel_rps_boost(struct i915_request *rq)
if (i915_request_signaled(rq) || i915_request_has_waitboost(rq))
return;
+ /* Waitboost is not needed for contexts marked with a Freq hint */
+ if (test_bit(CONTEXT_LOW_LATENCY, &rq->context->flags))
+ return;
+
/* Serializes with i915_request_retire() */
if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) {
struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
@@ -1086,11 +1090,7 @@ static u32 intel_rps_read_state_cap(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
struct intel_uncore *uncore = rps_to_uncore(rps);
- if (IS_PONTEVECCHIO(i915))
- return intel_uncore_read(uncore, PVC_RP_STATE_CAP);
- else if (IS_XEHPSDV(i915))
- return intel_uncore_read(uncore, XEHPSDV_RP_STATE_CAP);
- else if (IS_GEN9_LP(i915))
+ if (IS_GEN9_LP(i915))
return intel_uncore_read(uncore, BXT_RP_STATE_CAP);
else
return intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index 6a3246240e81..c8fadf58d836 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -214,13 +214,8 @@ static void xehp_sseu_info_init(struct intel_gt *gt)
int num_compute_regs, num_geometry_regs;
int eu;
- if (IS_PONTEVECCHIO(gt->i915)) {
- num_geometry_regs = 0;
- num_compute_regs = 2;
- } else {
- num_geometry_regs = 1;
- num_compute_regs = 1;
- }
+ num_geometry_regs = 1;
+ num_compute_regs = 1;
/*
* The concept of slice has been removed in Xe_HP. To be compatible
@@ -642,7 +637,7 @@ void intel_sseu_info_init(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
xehp_sseu_info_init(gt);
else if (GRAPHICS_VER(i915) >= 12)
gen12_sseu_info_init(gt);
@@ -851,7 +846,7 @@ void intel_sseu_print_topology(struct drm_i915_private *i915,
{
if (sseu->max_slices == 0)
drm_printf(p, "Unavailable\n");
- else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
sseu_print_xehp_topology(sseu, p);
else
sseu_print_hsw_topology(sseu, p);
diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.c b/drivers/gpu/drm/i915/gt/intel_tlb.c
index 4bb13d1890e3..756e9ebbc725 100644
--- a/drivers/gpu/drm/i915/gt/intel_tlb.c
+++ b/drivers/gpu/drm/i915/gt/intel_tlb.c
@@ -132,7 +132,7 @@ void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno)
return;
with_intel_gt_pm_if_awake(gt, wakeref) {
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
mutex_lock(&gt->tlb.invalidate_lock);
if (tlb_seqno_passed(gt, seqno))
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index d67d44611c28..5a0f1b279a80 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -10,12 +10,15 @@
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
#include "intel_gt.h"
+#include "intel_gt_ccs_mode.h"
#include "intel_gt_mcr.h"
#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_ring.h"
#include "intel_workarounds.h"
+#include "display/intel_fbc_regs.h"
+
/**
* DOC: Hardware workarounds
*
@@ -51,7 +54,8 @@
* registers belonging to BCS, VCS or VECS should be implemented in
* xcs_engine_wa_init(). Workarounds for registers not belonging to a specific
* engine's MMIO range but that are part of of the common RCS/CCS reset domain
- * should be implemented in general_render_compute_wa_init().
+ * should be implemented in general_render_compute_wa_init(). The settings
+ * about the CCS load balancing should be added in ccs_engine_wa_mode().
*
* - GT workarounds: the list of these WAs is applied whenever these registers
* revert to their default values: on GPU reset, suspend/resume [1]_, etc.
@@ -258,12 +262,6 @@ wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
}
static void
-wa_mcr_write(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set)
-{
- wa_mcr_write_clr_set(wal, reg, ~0, set);
-}
-
-static void
wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
{
wa_write_clr_set(wal, reg, set, set);
@@ -918,12 +916,8 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
xelpg_ctx_workarounds_init(engine, wal);
- else if (IS_PONTEVECCHIO(i915))
- ; /* noop; none at this time */
else if (IS_DG2(i915))
dg2_ctx_workarounds_init(engine, wal);
- else if (IS_XEHPSDV(i915))
- ; /* noop; none at this time */
else if (IS_DG1(i915))
dg1_ctx_workarounds_init(engine, wal);
else if (GRAPHICS_VER(i915) == 12)
@@ -1350,9 +1344,6 @@ xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
gt->steering_table[MSLICE] = NULL;
}
- if (IS_XEHPSDV(gt->i915) && slice_mask & BIT(0))
- gt->steering_table[GAM] = NULL;
-
slice = __ffs(slice_mask);
subslice = intel_sseu_find_first_xehp_dss(sseu, GEN_DSS_PER_GSLICE, slice) %
GEN_DSS_PER_GSLICE;
@@ -1380,20 +1371,6 @@ xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
}
static void
-pvc_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
-{
- unsigned int dss;
-
- /*
- * Setup implicit steering for COMPUTE and DSS ranges to the first
- * non-fused-off DSS. All other types of MCR registers will be
- * explicitly steered.
- */
- dss = intel_sseu_find_first_xehp_dss(&gt->info.sseu, 0, 0);
- __add_mcr_wa(gt, wal, dss / GEN_DSS_PER_CSLICE, dss % GEN_DSS_PER_CSLICE);
-}
-
-static void
icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = gt->i915;
@@ -1520,76 +1497,6 @@ dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
}
static void
-xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
-{
- struct drm_i915_private *i915 = gt->i915;
-
- xehp_init_mcr(gt, wal);
-
- /* Wa_1409757795:xehpsdv */
- wa_mcr_write_or(wal, SCCGCTL94DC, CG3DDISURB);
-
- /* Wa_18011725039:xehpsdv */
- if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) {
- wa_mcr_masked_dis(wal, MLTICTXCTL, TDONRENDER);
- wa_mcr_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH);
- }
-
- /* Wa_16011155590:xehpsdv */
- if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
- wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
- TSGUNIT_CLKGATE_DIS);
-
- /* Wa_14011780169:xehpsdv */
- if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_B0, STEP_FOREVER)) {
- wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS |
- GAMTLBVDBOX7_CLKGATE_DIS |
- GAMTLBVDBOX6_CLKGATE_DIS |
- GAMTLBVDBOX5_CLKGATE_DIS |
- GAMTLBVDBOX4_CLKGATE_DIS |
- GAMTLBVDBOX3_CLKGATE_DIS |
- GAMTLBVDBOX2_CLKGATE_DIS |
- GAMTLBVDBOX1_CLKGATE_DIS |
- GAMTLBVDBOX0_CLKGATE_DIS |
- GAMTLBKCR_CLKGATE_DIS |
- GAMTLBGUC_CLKGATE_DIS |
- GAMTLBBLT_CLKGATE_DIS);
- wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS |
- GAMTLBGFXA1_CLKGATE_DIS |
- GAMTLBCOMPA0_CLKGATE_DIS |
- GAMTLBCOMPA1_CLKGATE_DIS |
- GAMTLBCOMPB0_CLKGATE_DIS |
- GAMTLBCOMPB1_CLKGATE_DIS |
- GAMTLBCOMPC0_CLKGATE_DIS |
- GAMTLBCOMPC1_CLKGATE_DIS |
- GAMTLBCOMPD0_CLKGATE_DIS |
- GAMTLBCOMPD1_CLKGATE_DIS |
- GAMTLBMERT_CLKGATE_DIS |
- GAMTLBVEBOX3_CLKGATE_DIS |
- GAMTLBVEBOX2_CLKGATE_DIS |
- GAMTLBVEBOX1_CLKGATE_DIS |
- GAMTLBVEBOX0_CLKGATE_DIS);
- }
-
- /* Wa_16012725990:xehpsdv */
- if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_FOREVER))
- wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, VFUNIT_CLKGATE_DIS);
-
- /* Wa_14011060649:xehpsdv */
- wa_14011060649(gt, wal);
-
- /* Wa_14012362059:xehpsdv */
- wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB);
-
- /* Wa_14014368820:xehpsdv */
- wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
- INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
-
- /* Wa_14010670810:xehpsdv */
- wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
-}
-
-static void
dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
xehp_init_mcr(gt, wal);
@@ -1632,27 +1539,10 @@ dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
}
static void
-pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
-{
- pvc_init_mcr(gt, wal);
-
- /* Wa_14015795083 */
- wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
-
- /* Wa_18018781329 */
- wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
- wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
- wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
- wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
-
- /* Wa_16016694945 */
- wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
-}
-
-static void
xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
/* Wa_14018575942 / Wa_18018781329 */
+ wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
/* Wa_22016670082 */
@@ -1724,12 +1614,6 @@ static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal)
wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
}
- if (IS_PONTEVECCHIO(gt->i915)) {
- wa_mcr_write(wal, XEHPC_L3SCRUB,
- SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
- wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_HOSTCACHEEN);
- }
-
if (IS_DG2(gt->i915)) {
wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
@@ -1754,12 +1638,8 @@ gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)))
xelpg_gt_workarounds_init(gt, wal);
- else if (IS_PONTEVECCHIO(i915))
- pvc_gt_workarounds_init(gt, wal);
else if (IS_DG2(i915))
dg2_gt_workarounds_init(gt, wal);
- else if (IS_XEHPSDV(i915))
- xehpsdv_gt_workarounds_init(gt, wal);
else if (IS_DG1(i915))
dg1_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 12)
@@ -2177,30 +2057,6 @@ static void dg2_whitelist_build(struct intel_engine_cs *engine)
}
}
-static void blacklist_trtt(struct intel_engine_cs *engine)
-{
- struct i915_wa_list *w = &engine->whitelist;
-
- /*
- * Prevent read/write access to [0x4400, 0x4600) which covers
- * the TRTT range across all engines. Note that normally userspace
- * cannot access the other engines' trtt control, but for simplicity
- * we cover the entire range on each engine.
- */
- whitelist_reg_ext(w, _MMIO(0x4400),
- RING_FORCE_TO_NONPRIV_DENY |
- RING_FORCE_TO_NONPRIV_RANGE_64);
- whitelist_reg_ext(w, _MMIO(0x4500),
- RING_FORCE_TO_NONPRIV_DENY |
- RING_FORCE_TO_NONPRIV_RANGE_64);
-}
-
-static void pvc_whitelist_build(struct intel_engine_cs *engine)
-{
- /* Wa_16014440446:pvc */
- blacklist_trtt(engine);
-}
-
static void xelpg_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
@@ -2227,12 +2083,8 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
; /* none yet */
else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
xelpg_whitelist_build(engine);
- else if (IS_PONTEVECCHIO(i915))
- pvc_whitelist_build(engine);
else if (IS_DG2(i915))
dg2_whitelist_build(engine);
- else if (IS_XEHPSDV(i915))
- ; /* none needed */
else if (GRAPHICS_VER(i915) == 12)
tgl_whitelist_build(engine);
else if (GRAPHICS_VER(i915) == 11)
@@ -2813,10 +2665,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
static void
ccs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
- if (IS_PVC_CT_STEP(engine->i915, STEP_A0, STEP_C0)) {
- /* Wa_14014999345:pvc */
- wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS, DISABLE_ECC);
- }
+ /* boilerplate for any CCS engine workaround */
}
/*
@@ -2849,10 +2698,34 @@ add_render_compute_tuning_settings(struct intel_gt *gt,
wa_mcr_masked_field_set(wal, GEN9_ROW_CHICKEN4, THREAD_EX_ARB_MODE,
THREAD_EX_ARB_MODE_RR_AFTER_DEP);
- if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
+ if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC);
}
+static void ccs_engine_wa_mode(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+{
+ struct intel_gt *gt = engine->gt;
+ u32 mode;
+
+ if (!IS_DG2(gt->i915))
+ return;
+
+ /*
+ * Wa_14019159160: This workaround, along with others, leads to
+ * significant challenges in utilizing load balancing among the
+ * CCS slices. Consequently, an architectural decision has been
+ * made to completely disable automatic CCS load balancing.
+ */
+ wa_masked_en(wal, GEN12_RCU_MODE, XEHP_RCU_MODE_FIXED_SLICE_CCS_MODE);
+
+ /*
+ * After having disabled automatic load balancing we need to
+ * assign all slices to a single CCS. We will call it CCS mode 1
+ */
+ mode = intel_gt_apply_ccs_mode(gt);
+ wa_masked_en(wal, XEHP_CCS_MODE, mode);
+}
+
/*
* The workarounds in this function apply to shared registers in
* the general render reset domain that aren't tied to a
@@ -2891,10 +2764,14 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) ||
IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER) ||
- IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 74), IP_VER(12, 74)))
+ IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 74), IP_VER(12, 74))) {
/* Wa_14017856879 */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH);
+ /* Wa_14020495402 */
+ wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, XELPG_DISABLE_TDL_SVHS_GATING);
+ }
+
if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))
/*
@@ -2922,21 +2799,15 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
- IS_PONTEVECCHIO(i915) ||
IS_DG2(i915)) {
/* Wa_22014226127 */
wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
}
- if (IS_PONTEVECCHIO(i915) || IS_DG2(i915)) {
+ if (IS_DG2(i915)) {
/* Wa_14015227452:dg2,pvc */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
- /* Wa_16015675438:dg2,pvc */
- wa_masked_en(wal, FF_SLICE_CS_CHICKEN2, GEN12_PERF_FIX_BALANCING_CFE_DISABLE);
- }
-
- if (IS_DG2(i915)) {
/*
* Wa_16011620976:dg2_g11
* Wa_22015475538:dg2
@@ -2972,22 +2843,6 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
0 /* write-only, so skip validation */,
true);
}
-
- if (IS_XEHPSDV(i915)) {
- /* Wa_1409954639 */
- wa_mcr_masked_en(wal,
- GEN8_ROW_CHICKEN,
- SYSTOLIC_DOP_CLOCK_GATING_DIS);
-
- /* Wa_1607196519 */
- wa_mcr_masked_en(wal,
- GEN9_ROW_CHICKEN4,
- GEN12_DISABLE_GRF_CLEAR);
-
- /* Wa_14010449647:xehpsdv */
- wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
- GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
- }
}
static void
@@ -3003,8 +2858,10 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal
* to a single RCS/CCS engine's workaround list since
* they're reset as part of the general render domain reset.
*/
- if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
+ if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) {
general_render_compute_wa_init(engine, wal);
+ ccs_engine_wa_mode(engine, wal);
+ }
if (engine->class == COMPUTE_CLASS)
ccs_engine_wa_init(engine, wal);
@@ -3068,7 +2925,7 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset)
const struct i915_range *mcr_ranges;
int i;
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
mcr_ranges = mcr_ranges_xehp;
else if (GRAPHICS_VER(i915) >= 12)
mcr_ranges = mcr_ranges_gen12;
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 0dd4d00ee894..9ce8ff1c04fe 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -319,7 +319,7 @@ static int igt_hang_sanitycheck(void *arg)
i915_request_add(rq);
timeout = 0;
- intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */)
+ intel_wedge_on_timeout(&w, gt, HZ / 5 /* 200ms */)
timeout = i915_request_wait(rq, 0,
MAX_SCHEDULE_TIMEOUT);
if (intel_gt_is_wedged(gt))
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index f40de408cd3a..2cfc23c58e90 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -281,7 +281,7 @@ static int igt_atomic_reset(void *arg)
awake = reset_prepare(gt);
p->critical_section_begin();
- err = __intel_gt_reset(gt, ALL_ENGINES);
+ err = intel_gt_reset_all_engines(gt);
p->critical_section_end();
reset_finish(gt, awake);
diff --git a/drivers/gpu/drm/i915/gt/selftest_slpc.c b/drivers/gpu/drm/i915/gt/selftest_slpc.c
index 302d0540295d..4ecc4ae74a54 100644
--- a/drivers/gpu/drm/i915/gt/selftest_slpc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_slpc.c
@@ -53,7 +53,7 @@ static int slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 freq)
static int slpc_set_freq(struct intel_gt *gt, u32 freq)
{
int err;
- struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ struct intel_guc_slpc *slpc = &gt_to_guc(gt)->slpc;
err = slpc_set_max_freq(slpc, freq);
if (err) {
@@ -182,7 +182,7 @@ static int vary_min_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps,
static int slpc_power(struct intel_gt *gt, struct intel_engine_cs *engine)
{
- struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ struct intel_guc_slpc *slpc = &gt_to_guc(gt)->slpc;
struct {
u64 power;
int freq;
@@ -262,7 +262,7 @@ static int max_granted_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps,
static int run_test(struct intel_gt *gt, int test_type)
{
- struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ struct intel_guc_slpc *slpc = &gt_to_guc(gt)->slpc;
struct intel_rps *rps = &gt->rps;
struct intel_engine_cs *engine;
enum intel_engine_id id;
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index bccc3a1200bc..1fb6ff77fd89 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -7,6 +7,7 @@
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
+#include <linux/vmalloc.h>
#include "i915_drv.h"
#include "gem/i915_gem_object.h"
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
index 811add10c30d..c34674e797c6 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
@@ -207,6 +207,27 @@ struct slpc_shared_data {
u8 reserved_mode_definition[4096];
} __packed;
+struct slpc_context_frequency_request {
+ u32 frequency_request:16;
+ u32 reserved:12;
+ u32 is_compute:1;
+ u32 ignore_busyness:1;
+ u32 is_minimum:1;
+ u32 is_predefined:1;
+} __packed;
+
+#define SLPC_CTX_FREQ_REQ_IS_COMPUTE REG_BIT(28)
+
+struct slpc_optimized_strategies {
+ u32 compute:1;
+ u32 async_flip:1;
+ u32 media:1;
+ u32 vsync_flip:1;
+ u32 reserved:28;
+} __packed;
+
+#define SLPC_OPTIMIZED_STRATEGY_COMPUTE REG_BIT(0)
+
/**
* DOC: SLPC H2G MESSAGE FORMAT
*
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
index dabeaf4f245f..00d6402333f8 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
@@ -36,6 +36,7 @@ enum intel_guc_load_status {
INTEL_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_START,
INTEL_GUC_LOAD_STATUS_MPU_DATA_INVALID = 0x73,
INTEL_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID = 0x74,
+ INTEL_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR = 0x75,
INTEL_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_END,
INTEL_GUC_LOAD_STATUS_READY = 0xF0,
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
index 58012edd4eb0..bebf28e3c479 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
@@ -101,4 +101,11 @@ enum {
GUC_CONTEXT_POLICIES_KLV_NUM_IDS = 5,
};
+/*
+ * Workaround keys:
+ */
+enum {
+ GUC_WORKAROUND_KLV_SERIALIZED_RA_MODE = 0x9001,
+};
+
#endif /* _ABI_GUC_KLVS_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
index e2e42b3e0d5d..3b69bc6616bd 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
@@ -298,7 +298,7 @@ static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
memcpy_toio(gsc->local_vaddr, src, gsc->fw.size);
memset_io(gsc->local_vaddr + gsc->fw.size, 0, gsc->local->size - gsc->fw.size);
- intel_guc_write_barrier(&gt->uc.guc);
+ intel_guc_write_barrier(gt_to_guc(gt));
i915_gem_object_unpin_map(gsc->fw.obj);
@@ -351,7 +351,7 @@ static int gsc_fw_query_compatibility_version(struct intel_gsc_uc *gsc)
void *vaddr;
int err;
- err = intel_guc_allocate_and_map_vma(&gt->uc.guc, GSC_VER_PKT_SZ * 2,
+ err = intel_guc_allocate_and_map_vma(gt_to_guc(gt), GSC_VER_PKT_SZ * 2,
&vma, &vaddr);
if (err) {
gt_err(gt, "failed to allocate vma for GSC version query\n");
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
index 40817ebcca71..a7d5465655f9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
@@ -358,7 +358,8 @@ static int proxy_channel_alloc(struct intel_gsc_uc *gsc)
void *vaddr;
int err;
- err = intel_guc_allocate_and_map_vma(&gt->uc.guc, GSC_PROXY_CHANNEL_SIZE,
+ err = intel_guc_allocate_and_map_vma(gt_to_guc(gt),
+ GSC_PROXY_CHANNEL_SIZE,
&vma, &vaddr);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 2b450c43bbd7..5e60a34692af 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -286,7 +286,7 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
/* Wa_22012773006:gen11,gen12 < XeHP */
if (GRAPHICS_VER(gt->i915) >= 11 &&
- GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 50))
+ GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 55))
flags |= GUC_WA_POLLCS;
/* Wa_14014475959 */
@@ -294,6 +294,11 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
IS_DG2(gt->i915))
flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
+ /* Wa_16019325821 */
+ /* Wa_14019159160 */
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)))
+ flags |= GUC_WA_RCS_CCS_SWITCHOUT;
+
/*
* Wa_14012197797
* Wa_22011391025
@@ -315,15 +320,12 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
if (IS_DG2_G11(gt->i915))
flags |= GUC_WA_CONTEXT_ISOLATION;
- /* Wa_16015675438 */
- if (!RCS_MASK(gt))
- flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
-
- /* Wa_14018913170 */
- if (GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 7, 0)) {
- if (IS_DG2(gt->i915) || IS_METEORLAKE(gt->i915) || IS_PONTEVECCHIO(gt->i915))
- flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
- }
+ /*
+ * Wa_14018913170: Applicable to all platforms supported by i915 so
+ * don't bother testing for all X/Y/Z platforms explicitly.
+ */
+ if (GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 7, 0))
+ flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
return flags;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index be70c46604b4..57b903132776 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -204,6 +204,8 @@ struct intel_guc {
struct guc_mmio_reg *ads_regset;
/** @ads_golden_ctxt_size: size of the golden contexts in the ADS */
u32 ads_golden_ctxt_size;
+ /** @ads_waklv_size: size of workaround KLVs */
+ u32 ads_waklv_size;
/** @ads_capture_size: size of register lists in the ADS used for error capture */
u32 ads_capture_size;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index f7372f736a77..c606bb5e3b7b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -46,6 +46,10 @@
* +---------------------------------------+
* | padding |
* +---------------------------------------+ <== 4K aligned
+ * | w/a KLVs |
+ * +---------------------------------------+
+ * | padding |
+ * +---------------------------------------+ <== 4K aligned
* | capture lists |
* +---------------------------------------+
* | padding |
@@ -88,6 +92,11 @@ static u32 guc_ads_golden_ctxt_size(struct intel_guc *guc)
return PAGE_ALIGN(guc->ads_golden_ctxt_size);
}
+static u32 guc_ads_waklv_size(struct intel_guc *guc)
+{
+ return PAGE_ALIGN(guc->ads_waklv_size);
+}
+
static u32 guc_ads_capture_size(struct intel_guc *guc)
{
return PAGE_ALIGN(guc->ads_capture_size);
@@ -113,7 +122,7 @@ static u32 guc_ads_golden_ctxt_offset(struct intel_guc *guc)
return PAGE_ALIGN(offset);
}
-static u32 guc_ads_capture_offset(struct intel_guc *guc)
+static u32 guc_ads_waklv_offset(struct intel_guc *guc)
{
u32 offset;
@@ -123,6 +132,16 @@ static u32 guc_ads_capture_offset(struct intel_guc *guc)
return PAGE_ALIGN(offset);
}
+static u32 guc_ads_capture_offset(struct intel_guc *guc)
+{
+ u32 offset;
+
+ offset = guc_ads_waklv_offset(guc) +
+ guc_ads_waklv_size(guc);
+
+ return PAGE_ALIGN(offset);
+}
+
static u32 guc_ads_private_data_offset(struct intel_guc *guc)
{
u32 offset;
@@ -393,7 +412,7 @@ static int guc_mmio_regset_init(struct temp_regset *regset,
/* add in local MOCS registers */
for (i = 0; i < LNCFCMOCS_REG_COUNT; i++)
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
ret |= GUC_MCR_REG_ADD(gt, regset, XEHP_LNCFCMOCS(i), false);
else
ret |= GUC_MMIO_REG_ADD(gt, regset, GEN9_LNCFCMOCS(i), false);
@@ -503,7 +522,7 @@ static void fill_engine_enable_masks(struct intel_gt *gt,
#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
#define XEHP_LR_HW_CONTEXT_SIZE (96 * sizeof(u32))
-#define LR_HW_CONTEXT_SZ(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50) ? \
+#define LR_HW_CONTEXT_SZ(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55) ? \
XEHP_LR_HW_CONTEXT_SIZE : \
LR_HW_CONTEXT_SIZE)
#define LRC_SKIP_SIZE(i915) (LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SZ(i915))
@@ -796,6 +815,65 @@ engine_instance_list:
return PAGE_ALIGN(total_size);
}
+/* Wa_14019159160 */
+static u32 guc_waklv_ra_mode(struct intel_guc *guc, u32 offset, u32 remain)
+{
+ u32 size;
+ u32 klv_entry[] = {
+ /* 16:16 key/length */
+ FIELD_PREP(GUC_KLV_0_KEY, GUC_WORKAROUND_KLV_SERIALIZED_RA_MODE) |
+ FIELD_PREP(GUC_KLV_0_LEN, 0),
+ /* 0 dwords data */
+ };
+
+ size = sizeof(klv_entry);
+ GEM_BUG_ON(remain < size);
+
+ iosys_map_memcpy_to(&guc->ads_map, offset, klv_entry, size);
+
+ return size;
+}
+
+static void guc_waklv_init(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ u32 offset, addr_ggtt, remain, size;
+
+ if (!intel_uc_uses_guc_submission(&gt->uc))
+ return;
+
+ if (GUC_FIRMWARE_VER(guc) < MAKE_GUC_VER(70, 10, 0))
+ return;
+
+ GEM_BUG_ON(iosys_map_is_null(&guc->ads_map));
+ offset = guc_ads_waklv_offset(guc);
+ remain = guc_ads_waklv_size(guc);
+
+ /* Wa_14019159160 */
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) {
+ size = guc_waklv_ra_mode(guc, offset, remain);
+ offset += size;
+ remain -= size;
+ }
+
+ size = guc_ads_waklv_size(guc) - remain;
+ if (!size)
+ return;
+
+ offset = guc_ads_waklv_offset(guc);
+ addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
+
+ ads_blob_write(guc, ads.wa_klv_addr_lo, addr_ggtt);
+ ads_blob_write(guc, ads.wa_klv_addr_hi, 0);
+ ads_blob_write(guc, ads.wa_klv_size, size);
+}
+
+static int guc_prep_waklv(struct intel_guc *guc)
+{
+ /* Fudge something chunky for now: */
+ return PAGE_SIZE;
+}
+
static void __guc_ads_init(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
@@ -843,6 +921,9 @@ static void __guc_ads_init(struct intel_guc *guc)
/* MMIO save/restore list */
guc_mmio_reg_state_init(guc);
+ /* Workaround KLV list */
+ guc_waklv_init(guc);
+
/* Private Data */
ads_blob_write(guc, ads.private_data, base +
guc_ads_private_data_offset(guc));
@@ -886,6 +967,12 @@ int intel_guc_ads_create(struct intel_guc *guc)
return ret;
guc->ads_capture_size = ret;
+ /* And don't forget the workaround KLVs: */
+ ret = guc_prep_waklv(guc);
+ if (ret < 0)
+ return ret;
+ guc->ads_waklv_size = ret;
+
/* Now the total size can be determined: */
size = guc_ads_blob_size(guc);
@@ -961,7 +1048,7 @@ u32 intel_guc_engine_usage_offset(struct intel_guc *guc)
struct iosys_map intel_guc_engine_usage_record_map(struct intel_engine_cs *engine)
{
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
u8 guc_class = engine_class_to_guc_class(engine->class);
size_t offset = offsetof(struct __guc_ads_blob,
engine_usage.engines[guc_class][ilog2(engine->logical_mask)]);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
index a1cd40d80517..9547fff672bd 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -51,6 +51,7 @@
{ RING_ESR(0), 0, 0, "ESR" }, \
{ RING_DMA_FADD(0), 0, 0, "RING_DMA_FADD_LDW" }, \
{ RING_DMA_FADD_UDW(0), 0, 0, "RING_DMA_FADD_UDW" }, \
+ { RING_EIR(0), 0, 0, "EIR" }, \
{ RING_IPEIR(0), 0, 0, "IPEIR" }, \
{ RING_IPEHR(0), 0, 0, "IPEHR" }, \
{ RING_INSTPS(0), 0, 0, "INSTPS" }, \
@@ -80,9 +81,6 @@
{ GEN8_RING_PDP_LDW(0, 3), 0, 0, "PDP3_LDW" }, \
{ GEN8_RING_PDP_UDW(0, 3), 0, 0, "PDP3_UDW" }
-#define COMMON_BASE_HAS_EU \
- { EIR, 0, 0, "EIR" }
-
#define COMMON_BASE_RENDER \
{ GEN7_SC_INSTDONE, 0, 0, "GEN7_SC_INSTDONE" }
@@ -105,7 +103,6 @@ static const struct __guc_mmio_reg_descr xe_lp_global_regs[] = {
/* XE_LP Render / Compute Per-Class */
static const struct __guc_mmio_reg_descr xe_lp_rc_class_regs[] = {
- COMMON_BASE_HAS_EU,
COMMON_BASE_RENDER,
COMMON_GEN12BASE_RENDER,
};
@@ -148,7 +145,6 @@ static const struct __guc_mmio_reg_descr gen8_global_regs[] = {
};
static const struct __guc_mmio_reg_descr gen8_rc_class_regs[] = {
- COMMON_BASE_HAS_EU,
COMMON_BASE_RENDER,
};
@@ -1441,7 +1437,7 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
if (!cap || !ee->engine)
return -ENODEV;
- guc = &ee->engine->gt->uc.guc;
+ guc = gt_to_guc(ee->engine->gt);
i915_error_printf(ebuf, "global --- GuC Error Capture on %s command stream:\n",
ee->engine->name);
@@ -1543,7 +1539,7 @@ bool intel_guc_capture_is_matching_engine(struct intel_gt *gt,
if (!gt || !ce || !engine)
return false;
- guc = &gt->uc.guc;
+ guc = gt_to_guc(gt);
if (!guc->capture)
return false;
@@ -1573,7 +1569,7 @@ void intel_guc_capture_get_matching_node(struct intel_gt *gt,
if (!gt || !ee || !ce)
return;
- guc = &gt->uc.guc;
+ guc = gt_to_guc(gt);
if (!guc->capture)
return;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index 52332bb14339..23f54c84cbab 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -26,7 +26,7 @@ static void guc_prepare_xfer(struct intel_gt *gt)
GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
GUC_ENABLE_MIA_CLOCK_GATING;
- if (GRAPHICS_VER_FULL(uncore->i915) < IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(uncore->i915) < IP_VER(12, 55))
shim_flags |= GUC_DISABLE_SRAM_INIT_TO_ZEROES |
GUC_ENABLE_MIA_CACHING;
@@ -115,6 +115,7 @@ static inline bool guc_load_done(struct intel_uncore *uncore, u32 *status, bool
case INTEL_GUC_LOAD_STATUS_INIT_DATA_INVALID:
case INTEL_GUC_LOAD_STATUS_MPU_DATA_INVALID:
case INTEL_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
+ case INTEL_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR:
*success = false;
return true;
}
@@ -241,6 +242,11 @@ static int guc_wait_ucode(struct intel_guc *guc)
ret = -EPERM;
break;
+ case INTEL_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR:
+ guc_info(guc, "invalid w/a KLV entry\n");
+ ret = -EINVAL;
+ break;
+
case INTEL_GUC_LOAD_STATUS_HWCONFIG_START:
guc_info(guc, "still extracting hwconfig table.\n");
ret = -ETIMEDOUT;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 8ae1846431da..14797e80bc92 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -96,8 +96,9 @@
#define GUC_WA_GAM_CREDITS BIT(10)
#define GUC_WA_DUAL_QUEUE BIT(11)
#define GUC_WA_RCS_RESET_BEFORE_RC6 BIT(13)
-#define GUC_WA_CONTEXT_ISOLATION BIT(15)
#define GUC_WA_PRE_PARSER BIT(14)
+#define GUC_WA_CONTEXT_ISOLATION BIT(15)
+#define GUC_WA_RCS_CCS_SWITCHOUT BIT(16)
#define GUC_WA_HOLD_CCS_SWITCHOUT BIT(17)
#define GUC_WA_POLLCS BIT(18)
#define GUC_WA_RCS_REGS_IN_CCS_REGS_LIST BIT(21)
@@ -430,7 +431,10 @@ struct guc_ads {
u32 capture_instance[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
u32 capture_class[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
u32 capture_global[GUC_CAPTURE_LIST_INDEX_MAX];
- u32 reserved[14];
+ u32 wa_klv_addr_lo;
+ u32 wa_klv_addr_hi;
+ u32 wa_klv_size;
+ u32 reserved[11];
} __packed;
/* Engine usage stats */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
index cc9569af7f0c..b67a15f74276 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
@@ -111,7 +111,7 @@ static bool has_table(struct drm_i915_private *i915)
static int guc_hwconfig_init(struct intel_gt *gt)
{
struct intel_hwconfig *hwconfig = &gt->info.hwconfig;
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
int ret;
if (!has_table(gt->i915))
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 3e681ab6fbf9..706fffca698b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -537,6 +537,20 @@ int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val)
return ret;
}
+int intel_guc_slpc_set_strategy(struct intel_guc_slpc *slpc, u32 val)
+{
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_STRATEGIES,
+ val);
+
+ return ret;
+}
+
int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val)
{
struct drm_i915_private *i915 = slpc_to_i915(slpc);
@@ -711,6 +725,9 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
/* Set cached media freq ratio mode */
intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode);
+ /* Enable SLPC Optimized Strategy for compute */
+ intel_guc_slpc_set_strategy(slpc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
index 6ac6503c39d4..1cb5fd44f05c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
@@ -45,5 +45,6 @@ void intel_guc_pm_intrmsk_enable(struct intel_gt *gt);
void intel_guc_slpc_boost(struct intel_guc_slpc *slpc);
void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc);
int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val);
+int intel_guc_slpc_set_strategy(struct intel_guc_slpc *slpc, u32 val);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index f3dcae4b9d45..0eaa1064242c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -398,7 +398,7 @@ static inline void set_context_guc_id_invalid(struct intel_context *ce)
static inline struct intel_guc *ce_to_guc(struct intel_context *ce)
{
- return &ce->engine->gt->uc.guc;
+ return gt_to_guc(ce->engine->gt);
}
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
@@ -1246,7 +1246,7 @@ static void __get_engine_usage_record(struct intel_engine_cs *engine,
static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
{
struct intel_engine_guc_stats *stats = &engine->stats.guc;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
u32 last_switch, ctx_id, total;
lockdep_assert_held(&guc->timestamp.lock);
@@ -1311,7 +1311,7 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc;
struct i915_gpu_error *gpu_error = &engine->i915->gpu_error;
struct intel_gt *gt = engine->gt;
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
u64 total, gt_stamp_saved;
unsigned long flags;
u32 reset_count;
@@ -1403,14 +1403,17 @@ static void guc_cancel_busyness_worker(struct intel_guc *guc)
* Trying to pass a 'need_sync' or 'in_reset' flag all the way down through
* every possible call stack is unfeasible. It would be too intrusive to many
* areas that really don't care about the GuC backend. However, there is the
- * 'reset_in_progress' flag available, so just use that.
+ * I915_RESET_BACKOFF flag and the gt->reset.mutex can be tested for is_locked.
+ * So just use those. Note that testing both is required due to the hideously
+ * complex nature of the i915 driver's reset code paths.
*
* And note that in the case of a reset occurring during driver unload
- * (wedge_on_fini), skipping the cancel in _prepare (when the reset flag is set
- * is fine because there is another cancel in _finish (when the reset flag is
- * not).
+ * (wedged_on_fini), skipping the cancel in reset_prepare/reset_fini (when the
+ * reset flag/mutex are set) is fine because there is another explicit cancel in
+ * intel_guc_submission_fini (when the reset flag/mutex are not).
*/
- if (guc_to_gt(guc)->uc.reset_in_progress)
+ if (mutex_is_locked(&guc_to_gt(guc)->reset.mutex) ||
+ test_bit(I915_RESET_BACKOFF, &guc_to_gt(guc)->reset.flags))
cancel_delayed_work(&guc->timestamp.work);
else
cancel_delayed_work_sync(&guc->timestamp.work);
@@ -1424,8 +1427,6 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
unsigned long flags;
ktime_t unused;
- guc_cancel_busyness_worker(guc);
-
spin_lock_irqsave(&guc->timestamp.lock, flags);
guc_update_pm_timestamp(guc, &unused);
@@ -1576,7 +1577,7 @@ static void guc_fini_engine_stats(struct intel_guc *guc)
void intel_guc_busyness_park(struct intel_gt *gt)
{
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
if (!guc_submission_initialized(guc))
return;
@@ -1603,7 +1604,7 @@ void intel_guc_busyness_park(struct intel_gt *gt)
void intel_guc_busyness_unpark(struct intel_gt *gt)
{
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
unsigned long flags;
ktime_t unused;
@@ -2004,13 +2005,6 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc)
void intel_guc_submission_reset_finish(struct intel_guc *guc)
{
- /*
- * Ensure the busyness worker gets cancelled even on a fatal wedge.
- * Note that reset_prepare is not allowed to because it confuses lockdep.
- */
- if (guc_submission_initialized(guc))
- guc_cancel_busyness_worker(guc);
-
/* Reset called during driver load or during wedge? */
if (unlikely(!guc_submission_initialized(guc) ||
!intel_guc_is_fw_running(guc) ||
@@ -2136,6 +2130,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
if (!guc->submission_initialized)
return;
+ guc_fini_engine_stats(guc);
guc_flush_destroyed_contexts(guc);
guc_lrc_desc_pool_destroy_v69(guc);
i915_sched_engine_put(guc->sched_engine);
@@ -2194,7 +2189,7 @@ static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq)
static void guc_submit_request(struct i915_request *rq)
{
struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
- struct intel_guc *guc = &rq->engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(rq->engine->gt);
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
@@ -2220,11 +2215,10 @@ static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
order_base_2(ce->parallel.number_children
+ 1));
else
- ret = ida_simple_get(&guc->submission_state.guc_ids,
- NUMBER_MULTI_LRC_GUC_ID(guc),
- guc->submission_state.num_guc_ids,
- GFP_KERNEL | __GFP_RETRY_MAYFAIL |
- __GFP_NOWARN);
+ ret = ida_alloc_range(&guc->submission_state.guc_ids,
+ NUMBER_MULTI_LRC_GUC_ID(guc),
+ guc->submission_state.num_guc_ids - 1,
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(ret < 0))
return ret;
@@ -2247,8 +2241,8 @@ static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
+ 1));
} else {
--guc->submission_state.guc_ids_in_use;
- ida_simple_remove(&guc->submission_state.guc_ids,
- ce->guc_id.id);
+ ida_free(&guc->submission_state.guc_ids,
+ ce->guc_id.id);
}
clr_ctx_id_mapping(guc, ce->guc_id.id);
set_context_guc_id_invalid(ce);
@@ -2645,6 +2639,7 @@ MAKE_CONTEXT_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM)
MAKE_CONTEXT_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
MAKE_CONTEXT_POLICY_ADD(priority, SCHEDULING_PRIORITY)
MAKE_CONTEXT_POLICY_ADD(preempt_to_idle, PREEMPT_TO_IDLE_ON_QUANTUM_EXPIRY)
+MAKE_CONTEXT_POLICY_ADD(slpc_ctx_freq_req, SLPM_GT_FREQUENCY)
#undef MAKE_CONTEXT_POLICY_ADD
@@ -2660,10 +2655,11 @@ static int __guc_context_set_context_policies(struct intel_guc *guc,
static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
{
struct intel_engine_cs *engine = ce->engine;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
struct context_policy policy;
u32 execution_quantum;
u32 preemption_timeout;
+ u32 slpc_ctx_freq_req = 0;
unsigned long flags;
int ret;
@@ -2675,11 +2671,15 @@ static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
execution_quantum = engine->props.timeslice_duration_ms * 1000;
preemption_timeout = engine->props.preempt_timeout_ms * 1000;
+ if (ce->flags & BIT(CONTEXT_LOW_LATENCY))
+ slpc_ctx_freq_req |= SLPC_CTX_FREQ_REQ_IS_COMPUTE;
+
__guc_context_policy_start_klv(&policy, ce->guc_id.id);
__guc_context_policy_add_priority(&policy, ce->guc_state.prio);
__guc_context_policy_add_execution_quantum(&policy, execution_quantum);
__guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
+ __guc_context_policy_add_slpc_ctx_freq_req(&policy, slpc_ctx_freq_req);
if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
__guc_context_policy_add_preempt_to_idle(&policy, 1);
@@ -2736,7 +2736,7 @@ static u32 map_guc_prio_to_lrc_desc_prio(u8 prio)
static void prepare_context_registration_info_v69(struct intel_context *ce)
{
struct intel_engine_cs *engine = ce->engine;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
u32 ctx_id = ce->guc_id.id;
struct guc_lrc_desc_v69 *desc;
struct intel_context *child;
@@ -2805,7 +2805,7 @@ static void prepare_context_registration_info_v70(struct intel_context *ce,
struct guc_ctxt_registration_info *info)
{
struct intel_engine_cs *engine = ce->engine;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
u32 ctx_id = ce->guc_id.id;
GEM_BUG_ON(!engine->mask);
@@ -2868,7 +2868,7 @@ static int try_context_registration(struct intel_context *ce, bool loop)
{
struct intel_engine_cs *engine = ce->engine;
struct intel_runtime_pm *runtime_pm = engine->uncore->rpm;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
intel_wakeref_t wakeref;
u32 ctx_id = ce->guc_id.id;
bool context_registered;
@@ -4496,7 +4496,13 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
if (engine->class == COMPUTE_CLASS)
if (IS_GFX_GT_IP_STEP(engine->gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
IS_DG2(engine->i915))
- engine->flags |= I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT;
+ engine->flags |= I915_ENGINE_USES_WA_HOLD_SWITCHOUT;
+
+ /* Wa_16019325821 */
+ /* Wa_14019159160 */
+ if ((engine->class == COMPUTE_CLASS || engine->class == RENDER_CLASS) &&
+ IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71)))
+ engine->flags |= I915_ENGINE_USES_WA_HOLD_SWITCHOUT;
/*
* TODO: GuC supports timeslicing and semaphores as well, but they're
@@ -4507,7 +4513,7 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
*/
engine->emit_bb_start = gen8_emit_bb_start;
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
engine->emit_bb_start = xehp_emit_bb_start;
}
@@ -4549,7 +4555,7 @@ static void guc_sched_engine_destroy(struct kref *kref)
int intel_guc_submission_setup(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
/*
* The setup relies on several assumptions (e.g. irqs always enabled)
@@ -5308,7 +5314,7 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
void intel_guc_find_hung_context(struct intel_engine_cs *engine)
{
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
struct intel_context *ce;
struct i915_request *rq;
unsigned long index;
@@ -5370,7 +5376,7 @@ void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
struct i915_request *hung_rq,
struct drm_printer *m)
{
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
struct intel_context *ce;
unsigned long index;
unsigned long flags;
@@ -5822,7 +5828,7 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
if (!ve)
return ERR_PTR(-ENOMEM);
- guc = &siblings[0]->gt->uc.guc;
+ guc = gt_to_guc(siblings[0]->gt);
ve->base.i915 = siblings[0]->i915;
ve->base.gt = siblings[0]->gt;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index 0945b177d5f9..2d9152eb7282 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -385,7 +385,7 @@ int intel_huc_init(struct intel_huc *huc)
if (HAS_ENGINE(gt, GSC0)) {
struct i915_vma *vma;
- vma = intel_guc_allocate_vma(&gt->uc.guc, PXP43_HUC_AUTH_INOUT_SIZE * 2);
+ vma = intel_guc_allocate_vma(gt_to_guc(gt), PXP43_HUC_AUTH_INOUT_SIZE * 2);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
huc_info(huc, "Failed to allocate heci pkt\n");
@@ -540,7 +540,7 @@ int intel_huc_wait_for_auth_complete(struct intel_huc *huc,
int intel_huc_auth(struct intel_huc *huc, enum intel_huc_authentication_type type)
{
struct intel_gt *gt = huc_to_gt(huc);
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
int ret;
if (!intel_uc_fw_is_loaded(&huc->fw))
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 6dfe5d9456c6..7a63abf8f644 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -50,10 +50,6 @@ static void uc_expand_default_options(struct intel_uc *uc)
/* Default: enable HuC authentication and GuC submission */
i915->params.enable_guc = ENABLE_GUC_LOAD_HUC | ENABLE_GUC_SUBMISSION;
-
- /* XEHPSDV and PVC do not use HuC */
- if (IS_XEHPSDV(i915) || IS_PONTEVECCHIO(i915))
- i915->params.enable_guc &= ~ENABLE_GUC_LOAD_HUC;
}
/* Reset GuC providing us with fresh state for both GuC and HuC.
@@ -637,6 +633,10 @@ void intel_uc_reset_finish(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
+ /*
+ * NB: The wedge code path results in prepare -> prepare -> finish -> finish.
+ * So this function is sometimes called with the in-progress flag not set.
+ */
uc->reset_in_progress = false;
/* Firmware expected to be running when this function is called */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 756093eaf2ad..d80278eb45d7 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -807,7 +807,7 @@ static int try_firmware_load(struct intel_uc_fw *uc_fw, const struct firmware **
static int check_mtl_huc_guc_compatibility(struct intel_gt *gt,
struct intel_uc_fw_file *huc_selected)
{
- struct intel_uc_fw_file *guc_selected = &gt->uc.guc.fw.file_selected;
+ struct intel_uc_fw_file *guc_selected = &gt_to_guc(gt)->fw.file_selected;
struct intel_uc_fw_ver *huc_ver = &huc_selected->ver;
struct intel_uc_fw_ver *guc_ver = &guc_selected->ver;
bool new_huc, new_guc;
@@ -1209,7 +1209,7 @@ static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw)
* since its GGTT offset will be GuC accessible.
*/
GEM_BUG_ON(uc_fw->rsa_size > PAGE_SIZE);
- vma = intel_guc_allocate_vma(&gt->uc.guc, PAGE_SIZE);
+ vma = intel_guc_allocate_vma(gt_to_guc(gt), PAGE_SIZE);
if (IS_ERR(vma))
return PTR_ERR(vma);
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index c900aac85adb..68feb55654f7 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -144,7 +144,7 @@ err:
static int intel_guc_steal_guc_ids(void *arg)
{
struct intel_gt *gt = arg;
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
int ret, sv, context_index = 0;
intel_wakeref_t wakeref;
struct intel_engine_cs *engine;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index d4a3f3e093b0..4be8cb65fb7e 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -50,6 +50,7 @@
#include "trace.h"
#include "display/intel_display.h"
+#include "display/intel_sprite_regs.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index e0c5dfb788eb..2b7df7fcf369 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -36,8 +36,10 @@
#include "i915_reg.h"
#include "gvt.h"
+#include "display/bxt_dpio_phy_regs.h"
#include "display/intel_display.h"
#include "display/intel_dpio_phy.h"
+#include "display/intel_sprite_regs.h"
static int get_edp_pipe(struct intel_vgpu *vgpu)
{
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 313efdabee57..4140da68aabb 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -34,11 +34,14 @@
*/
#include <uapi/drm/drm_fourcc.h>
-#include "i915_drv.h"
+
#include "gvt.h"
+#include "i915_drv.h"
#include "i915_pvinfo.h"
#include "i915_reg.h"
+#include "display/intel_sprite_regs.h"
+
#define PRIMARY_FORMAT_NUM 16
struct pixel_format {
int drm_format; /* Pixel format in DRM definition */
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 4dd52ac2043e..d800d267f0e9 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -30,6 +30,7 @@
#include <linux/firmware.h>
#include <linux/crc32.h>
+#include <linux/vmalloc.h>
#include "i915_drv.h"
#include "gvt.h"
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 094fca9b0e73..58cca4906f41 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -39,6 +39,7 @@
#include "trace.h"
#include "gt/intel_gt_regs.h"
+#include <linux/vmalloc.h>
#if defined(VERBOSE_DEBUG)
#define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args)
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index efcb00472be2..22fbddbe3e23 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -41,6 +41,7 @@
#include "gvt.h"
#include "i915_pvinfo.h"
#include "intel_mchbar_regs.h"
+#include "display/bxt_dpio_phy_regs.h"
#include "display/intel_display_types.h"
#include "display/intel_dmc_regs.h"
#include "display/intel_dp_aux_regs.h"
@@ -49,9 +50,11 @@
#include "display/intel_fdi_regs.h"
#include "display/intel_pps_regs.h"
#include "display/intel_psr_regs.h"
+#include "display/intel_sprite_regs.h"
#include "display/skl_watermark_regs.h"
#include "display/vlv_dsi_pll_regs.h"
#include "gt/intel_gt_regs.h"
+#include <linux/vmalloc.h>
/* XXX FIXME i915 has changed PP_XXX definition */
#define PCH_PP_STATUS _MMIO(0xc7200)
@@ -2763,15 +2766,15 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT,
NULL, bxt_pcs_dw12_grp_write);
- MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT,
+ MMIO_DH(BXT_PORT_TX_DW3_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT,
bxt_port_tx_dw3_read, NULL);
MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT,
NULL, bxt_pcs_dw12_grp_write);
- MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT,
+ MMIO_DH(BXT_PORT_TX_DW3_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT,
bxt_port_tx_dw3_read, NULL);
MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT,
NULL, bxt_pcs_dw12_grp_write);
- MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT,
+ MMIO_DH(BXT_PORT_TX_DW3_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT,
bxt_port_tx_dw3_read, NULL);
MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write);
MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 5b5def6ddef7..e16e0d4c9534 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -33,10 +33,12 @@
*
*/
+#include <linux/vmalloc.h>
#include "i915_drv.h"
#include "i915_reg.h"
#include "gvt.h"
+#include "display/bxt_dpio_phy_regs.h"
#include "display/intel_dpio_phy.h"
#include "gt/intel_gt_regs.h"
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 08ad1bd651f1..63c751ca4119 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -34,6 +34,7 @@
#include "i915_drv.h"
#include "gvt.h"
#include "i915_pvinfo.h"
+#include <linux/vmalloc.h>
void populate_pvinfo_page(struct intel_vgpu *vgpu)
{
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 990eaa029d9c..bc717cf544e4 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -30,6 +30,7 @@
#include <linux/sort.h>
#include <linux/string_helpers.h>
+#include <linux/debugfs.h>
#include <drm/drm_debugfs.h>
#include "display/intel_display_params.h"
@@ -156,18 +157,6 @@ static const char *i915_cache_level_str(struct drm_i915_gem_object *obj)
case 4: return " WB (2-Way Coh)";
default: return " not defined";
}
- } else if (IS_PONTEVECCHIO(i915)) {
- switch (obj->pat_index) {
- case 0: return " UC";
- case 1: return " WC";
- case 2: return " WT";
- case 3: return " WB";
- case 4: return " WT (CLOS1)";
- case 5: return " WB (CLOS1)";
- case 6: return " WT (CLOS2)";
- case 7: return " WT (CLOS2)";
- default: return " not defined";
- }
} else if (GRAPHICS_VER(i915) >= 12) {
switch (obj->pat_index) {
case 0: return " WB";
diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.c b/drivers/gpu/drm/i915/i915_debugfs_params.c
index 8bca02025e09..33d2dcb0de65 100644
--- a/drivers/gpu/drm/i915/i915_debugfs_params.c
+++ b/drivers/gpu/drm/i915/i915_debugfs_params.c
@@ -4,6 +4,7 @@
*/
#include <linux/kernel.h>
+#include <linux/debugfs.h>
#include "i915_debugfs_params.h"
#include "gt/intel_gt.h"
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 9ee902d5b72c..161b21eff694 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -202,7 +202,7 @@ static void sanitize_gpu(struct drm_i915_private *i915)
unsigned int i;
for_each_gt(gt, i915, i)
- __intel_gt_reset(gt, ALL_ENGINES);
+ intel_gt_reset_all_engines(gt);
}
}
@@ -800,7 +800,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_cleanup_modeset2;
ret = intel_pxp_init(i915);
- if (ret != -ENODEV)
+ if (ret && ret != -ENODEV)
drm_dbg(&i915->drm, "pxp init failed with %d\n", ret);
ret = intel_display_driver_probe(i915);
@@ -920,27 +920,6 @@ static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
return 0;
}
-/**
- * i915_driver_lastclose - clean up after all DRM clients have exited
- * @dev: DRM device
- *
- * Take care of cleaning up after all DRM clients have exited. In the
- * mode setting case, we want to restore the kernel's initial mode (just
- * in case the last client left us in a bad state).
- *
- * Additionally, in the non-mode setting case, we'll tear down the GTT
- * and DMA structures, since the kernel won't be using them, and clea
- * up any GEM state.
- */
-static void i915_driver_lastclose(struct drm_device *dev)
-{
- struct drm_i915_private *i915 = to_i915(dev);
-
- intel_fbdev_restore_mode(i915);
-
- vga_switcheroo_process_delayed_switch();
-}
-
static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -1831,7 +1810,6 @@ static const struct drm_driver i915_drm_driver = {
DRIVER_SYNCOBJ_TIMELINE,
.release = i915_driver_release,
.open = i915_driver_open,
- .lastclose = i915_driver_lastclose,
.postclose = i915_driver_postclose,
.show_fdinfo = PTR_IF(IS_ENABLED(CONFIG_PROC_FS), i915_drm_client_fdinfo),
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e81b3b2858ac..ee0d7d5f135d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -235,25 +235,17 @@ struct drm_i915_private {
/* protects the irq masks */
spinlock_t irq_lock;
- bool display_irqs_enabled;
-
/* Sideband mailbox protection */
struct mutex sb_lock;
struct pm_qos_request sb_qos;
/** Cached value of IMR to avoid reads in updating the bitfield */
- union {
- u32 irq_mask;
- u32 de_irq_mask[I915_MAX_PIPES];
- };
- u32 pipestat_irq_mask[I915_MAX_PIPES];
+ u32 irq_mask;
bool preserve_bios_swizzle;
unsigned int fsb_freq, mem_freq, is_ddr3;
- unsigned int skl_preferred_vco_freq;
- unsigned int max_dotclk_freq;
unsigned int hpll_freq;
unsigned int czclk_freq;
@@ -350,9 +342,6 @@ struct drm_i915_private {
struct intel_pxp *pxp;
- /* For i915gm/i945gm vblank irq workaround */
- u8 vblank_enabled;
-
bool irq_enabled;
struct i915_pmu pmu;
@@ -544,9 +533,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_DG1(i915) IS_PLATFORM(i915, INTEL_DG1)
#define IS_ALDERLAKE_S(i915) IS_PLATFORM(i915, INTEL_ALDERLAKE_S)
#define IS_ALDERLAKE_P(i915) IS_PLATFORM(i915, INTEL_ALDERLAKE_P)
-#define IS_XEHPSDV(i915) IS_PLATFORM(i915, INTEL_XEHPSDV)
#define IS_DG2(i915) IS_PLATFORM(i915, INTEL_DG2)
-#define IS_PONTEVECCHIO(i915) IS_PLATFORM(i915, INTEL_PONTEVECCHIO)
#define IS_METEORLAKE(i915) IS_PLATFORM(i915, INTEL_METEORLAKE)
#define IS_LUNARLAKE(i915) 0
@@ -621,17 +608,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_TIGERLAKE_UY(i915) \
IS_SUBPLATFORM(i915, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_UY)
-#define IS_XEHPSDV_GRAPHICS_STEP(__i915, since, until) \
- (IS_XEHPSDV(__i915) && IS_GRAPHICS_STEP(__i915, since, until))
-
-#define IS_PVC_BD_STEP(__i915, since, until) \
- (IS_PONTEVECCHIO(__i915) && \
- IS_BASEDIE_STEP(__i915, since, until))
-
-#define IS_PVC_CT_STEP(__i915, since, until) \
- (IS_PONTEVECCHIO(__i915) && \
- IS_GRAPHICS_STEP(__i915, since, until))
-
#define IS_LP(i915) (INTEL_INFO(i915)->is_lp)
#define IS_GEN9_LP(i915) (GRAPHICS_VER(i915) == 9 && IS_LP(i915))
#define IS_GEN9_BC(i915) (GRAPHICS_VER(i915) == 9 && !IS_LP(i915))
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index 5c3fec63cb4c..a62405787e77 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -155,12 +155,18 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
*/
value = 1;
break;
+ case I915_PARAM_HAS_CONTEXT_FREQ_HINT:
+ if (intel_uc_uses_guc_submission(&to_gt(i915)->uc))
+ value = 1;
+ else
+ value = -EINVAL;
+ break;
case I915_PARAM_HAS_CONTEXT_ISOLATION:
value = intel_engines_has_context_isolation(i915);
break;
case I915_PARAM_SLICE_MASK:
/* Not supported from Xe_HP onward; use topology queries */
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
return -EINVAL;
value = sseu->slice_mask;
@@ -169,7 +175,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
break;
case I915_PARAM_SUBSLICE_MASK:
/* Not supported from Xe_HP onward; use topology queries */
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
return -EINVAL;
/* Only copy bits from the first slice */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index a0b784ebaddd..625b3c024540 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -28,6 +28,7 @@
*/
#include <linux/ascii85.h>
+#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/nmi.h>
#include <linux/pagevec.h>
@@ -1245,8 +1246,7 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
if (MEDIA_VER(i915) >= 13 && engine->gt->type == GT_MEDIA)
ee->fault_reg = intel_uncore_read(engine->uncore,
XELPMP_RING_FAULT_REG);
-
- else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
ee->fault_reg = intel_gt_mcr_read_any(engine->gt,
XEHP_RING_FAULT_REG);
else if (GRAPHICS_VER(i915) >= 12)
@@ -1852,7 +1852,7 @@ static void gt_record_global_regs(struct intel_gt_coredump *gt)
if (GRAPHICS_VER(i915) == 7)
gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
gt->fault_data0 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt,
XEHP_FAULT_TLB_DATA0);
gt->fault_data1 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt,
diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c
index 8c3f443c8347..49db3e09826c 100644
--- a/drivers/gpu/drm/i915/i915_hwmon.c
+++ b/drivers/gpu/drm/i915/i915_hwmon.c
@@ -72,12 +72,13 @@ hwm_locked_with_pm_intel_uncore_rmw(struct hwm_drvdata *ddat,
struct intel_uncore *uncore = ddat->uncore;
intel_wakeref_t wakeref;
- mutex_lock(&hwmon->hwmon_lock);
+ with_intel_runtime_pm(uncore->rpm, wakeref) {
+ mutex_lock(&hwmon->hwmon_lock);
- with_intel_runtime_pm(uncore->rpm, wakeref)
intel_uncore_rmw(uncore, reg, clear, set);
- mutex_unlock(&hwmon->hwmon_lock);
+ mutex_unlock(&hwmon->hwmon_lock);
+ }
}
/*
@@ -136,20 +137,21 @@ hwm_energy(struct hwm_drvdata *ddat, long *energy)
else
rgaddr = hwmon->rg.energy_status_all;
- mutex_lock(&hwmon->hwmon_lock);
+ with_intel_runtime_pm(uncore->rpm, wakeref) {
+ mutex_lock(&hwmon->hwmon_lock);
- with_intel_runtime_pm(uncore->rpm, wakeref)
reg_val = intel_uncore_read(uncore, rgaddr);
- if (reg_val >= ei->reg_val_prev)
- ei->accum_energy += reg_val - ei->reg_val_prev;
- else
- ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
- ei->reg_val_prev = reg_val;
+ if (reg_val >= ei->reg_val_prev)
+ ei->accum_energy += reg_val - ei->reg_val_prev;
+ else
+ ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
+ ei->reg_val_prev = reg_val;
- *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
- hwmon->scl_shift_energy);
- mutex_unlock(&hwmon->hwmon_lock);
+ *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
+ hwmon->scl_shift_energy);
+ mutex_unlock(&hwmon->hwmon_lock);
+ }
}
static ssize_t
@@ -404,6 +406,7 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
/* Block waiting for GuC reset to complete when needed */
for (;;) {
+ wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
mutex_lock(&hwmon->hwmon_lock);
prepare_to_wait(&ddat->waitq, &wait, TASK_INTERRUPTIBLE);
@@ -417,14 +420,13 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
}
mutex_unlock(&hwmon->hwmon_lock);
+ intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
schedule();
}
finish_wait(&ddat->waitq, &wait);
if (ret)
- goto unlock;
-
- wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
+ goto exit;
/* Disable PL1 limit and verify, because the limit cannot be disabled on all platforms */
if (val == PL1_DISABLE) {
@@ -444,9 +446,8 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
intel_uncore_rmw(ddat->uncore, hwmon->rg.pkg_rapl_limit,
PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, nval);
exit:
- intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
-unlock:
mutex_unlock(&hwmon->hwmon_lock);
+ intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
return ret;
}
@@ -738,12 +739,6 @@ hwm_get_preregistration_info(struct drm_i915_private *i915)
hwmon->rg.pkg_rapl_limit = PCU_PACKAGE_RAPL_LIMIT;
hwmon->rg.energy_status_all = PCU_PACKAGE_ENERGY_STATUS;
hwmon->rg.energy_status_tile = INVALID_MMIO_REG;
- } else if (IS_XEHPSDV(i915)) {
- hwmon->rg.pkg_power_sku_unit = GT0_PACKAGE_POWER_SKU_UNIT;
- hwmon->rg.pkg_power_sku = INVALID_MMIO_REG;
- hwmon->rg.pkg_rapl_limit = GT0_PACKAGE_RAPL_LIMIT;
- hwmon->rg.energy_status_all = GT0_PLATFORM_ENERGY_STATUS;
- hwmon->rg.energy_status_tile = GT0_PACKAGE_ENERGY_STATUS;
} else {
hwmon->rg.pkg_power_sku_unit = INVALID_MMIO_REG;
hwmon->rg.pkg_power_sku = INVALID_MMIO_REG;
@@ -792,7 +787,7 @@ void i915_hwmon_register(struct drm_i915_private *i915)
if (!IS_DGFX(i915))
return;
- hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
+ hwmon = kzalloc(sizeof(*hwmon), GFP_KERNEL);
if (!hwmon)
return;
@@ -818,14 +813,12 @@ void i915_hwmon_register(struct drm_i915_private *i915)
hwm_get_preregistration_info(i915);
/* hwmon_dev points to device hwmon<i> */
- hwmon_dev = devm_hwmon_device_register_with_info(dev, ddat->name,
- ddat,
- &hwm_chip_info,
- hwm_groups);
- if (IS_ERR(hwmon_dev)) {
- i915->hwmon = NULL;
- return;
- }
+ hwmon_dev = hwmon_device_register_with_info(dev, ddat->name,
+ ddat,
+ &hwm_chip_info,
+ hwm_groups);
+ if (IS_ERR(hwmon_dev))
+ goto err;
ddat->hwmon_dev = hwmon_dev;
@@ -838,16 +831,36 @@ void i915_hwmon_register(struct drm_i915_private *i915)
if (!hwm_gt_is_visible(ddat_gt, hwmon_energy, hwmon_energy_input, 0))
continue;
- hwmon_dev = devm_hwmon_device_register_with_info(dev, ddat_gt->name,
- ddat_gt,
- &hwm_gt_chip_info,
- NULL);
+ hwmon_dev = hwmon_device_register_with_info(dev, ddat_gt->name,
+ ddat_gt,
+ &hwm_gt_chip_info,
+ NULL);
if (!IS_ERR(hwmon_dev))
ddat_gt->hwmon_dev = hwmon_dev;
}
+ return;
+err:
+ i915_hwmon_unregister(i915);
}
void i915_hwmon_unregister(struct drm_i915_private *i915)
{
- fetch_and_zero(&i915->hwmon);
+ struct i915_hwmon *hwmon = i915->hwmon;
+ struct intel_gt *gt;
+ int i;
+
+ if (!hwmon)
+ return;
+
+ for_each_gt(gt, i915, i)
+ if (hwmon->ddat_gt[i].hwmon_dev)
+ hwmon_device_unregister(hwmon->ddat_gt[i].hwmon_dev);
+
+ if (hwmon->ddat.hwmon_dev)
+ hwmon_device_unregister(hwmon->ddat.hwmon_dev);
+
+ mutex_destroy(&hwmon->hwmon_lock);
+
+ kfree(i915->hwmon);
+ i915->hwmon = NULL;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8130f043693b..678d632ed043 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -702,7 +702,7 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
gen5_gt_irq_reset(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display_irqs_enabled)
+ if (dev_priv->display.irq.display_irqs_enabled)
vlv_display_irq_reset(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -767,7 +767,7 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
GEN3_IRQ_RESET(uncore, GEN8_PCU_);
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display_irqs_enabled)
+ if (dev_priv->display.irq.display_irqs_enabled)
vlv_display_irq_reset(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -784,7 +784,7 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
gen5_gt_irq_postinstall(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display_irqs_enabled)
+ if (dev_priv->display.irq.display_irqs_enabled)
vlv_display_irq_postinstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -838,7 +838,7 @@ static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
gen8_gt_irq_postinstall(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display_irqs_enabled)
+ if (dev_priv->display.irq.display_irqs_enabled)
vlv_display_irq_postinstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c
index ba82277254b7..cc41974cee74 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.c
+++ b/drivers/gpu/drm/i915/i915_memcpy.c
@@ -25,6 +25,8 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/cpufeature.h>
+#include <linux/bug.h>
+#include <linux/build_bug.h>
#include <asm/fpu/api.h>
#include "i915_memcpy.h"
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index de43048543e8..8c00169e3ab7 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -108,9 +108,6 @@ i915_param_named_unsafe(guc_firmware_path, charp, 0400,
i915_param_named_unsafe(huc_firmware_path, charp, 0400,
"HuC firmware path to use instead of the default one");
-i915_param_named_unsafe(dmc_firmware_path, charp, 0400,
- "DMC firmware path to use instead of the default one");
-
i915_param_named_unsafe(gsc_firmware_path, charp, 0400,
"GSC firmware path to use instead of the default one");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 1315d7fac850..2eb3f2115ff2 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -51,7 +51,6 @@ struct drm_printer;
param(int, guc_log_level, -1, 0400) \
param(char *, guc_firmware_path, NULL, 0400) \
param(char *, huc_firmware_path, NULL, 0400) \
- param(char *, dmc_firmware_path, NULL, 0400) \
param(char *, gsc_firmware_path, NULL, 0400) \
param(bool, memtest, false, 0400) \
param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO), 0600) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 8b4fdeabb12a..405ca17a990b 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -38,6 +38,9 @@
#include "i915_reg.h"
#include "intel_pci_config.h"
+__diag_push();
+__diag_ignore_all("-Woverride-init", "Allow field initialization overrides for device info");
+
#define PLATFORM(x) .platform = (x)
#define GEN(x) \
.__runtime.graphics.ip.ver = (x), \
@@ -59,14 +62,6 @@
[I915_CACHE_WT] = 2, \
}
-#define PVC_CACHELEVEL \
- .cachelevel_to_pat = { \
- [I915_CACHE_NONE] = 0, \
- [I915_CACHE_LLC] = 3, \
- [I915_CACHE_L3_LLC] = 3, \
- [I915_CACHE_WT] = 2, \
- }
-
#define MTL_CACHELEVEL \
.cachelevel_to_pat = { \
[I915_CACHE_NONE] = 2, \
@@ -705,8 +700,6 @@ static const struct intel_device_info adl_p_info = {
I915_GTT_PAGE_SIZE_2M
#define XE_HP_FEATURES \
- .__runtime.graphics.ip.ver = 12, \
- .__runtime.graphics.ip.rel = 50, \
XE_HP_PAGE_SIZES, \
TGL_CACHELEVEL, \
.dma_mask_size = 46, \
@@ -730,32 +723,12 @@ static const struct intel_device_info adl_p_info = {
.__runtime.ppgtt_size = 48, \
.__runtime.ppgtt_type = INTEL_PPGTT_FULL
-#define XE_HPM_FEATURES \
- .__runtime.media.ip.ver = 12, \
- .__runtime.media.ip.rel = 50
-
-__maybe_unused
-static const struct intel_device_info xehpsdv_info = {
- XE_HP_FEATURES,
- XE_HPM_FEATURES,
- DGFX_FEATURES,
- PLATFORM(INTEL_XEHPSDV),
- .has_64k_pages = 1,
- .has_media_ratio_mode = 1,
- .platform_engine_mask =
- BIT(RCS0) | BIT(BCS0) |
- BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) |
- BIT(VCS0) | BIT(VCS1) | BIT(VCS2) | BIT(VCS3) |
- BIT(VCS4) | BIT(VCS5) | BIT(VCS6) | BIT(VCS7) |
- BIT(CCS0) | BIT(CCS1) | BIT(CCS2) | BIT(CCS3),
- .require_force_probe = 1,
-};
-
#define DG2_FEATURES \
XE_HP_FEATURES, \
- XE_HPM_FEATURES, \
DGFX_FEATURES, \
+ .__runtime.graphics.ip.ver = 12, \
.__runtime.graphics.ip.rel = 55, \
+ .__runtime.media.ip.ver = 12, \
.__runtime.media.ip.rel = 55, \
PLATFORM(INTEL_DG2), \
.has_64k_pages = 1, \
@@ -778,33 +751,6 @@ static const struct intel_device_info ats_m_info = {
.tuning_thread_rr_after_dep = 1,
};
-#define XE_HPC_FEATURES \
- XE_HP_FEATURES, \
- .dma_mask_size = 52, \
- .has_3d_pipeline = 0, \
- .has_guc_deprivilege = 1, \
- .has_l3_ccs_read = 1, \
- .has_mslice_steering = 0, \
- .has_one_eu_per_fuse_bit = 1
-
-__maybe_unused
-static const struct intel_device_info pvc_info = {
- XE_HPC_FEATURES,
- XE_HPM_FEATURES,
- DGFX_FEATURES,
- .__runtime.graphics.ip.rel = 60,
- .__runtime.media.ip.rel = 60,
- PLATFORM(INTEL_PONTEVECCHIO),
- .has_flat_ccs = 0,
- .max_pat_index = 7,
- .platform_engine_mask =
- BIT(BCS0) |
- BIT(VCS0) |
- BIT(CCS0) | BIT(CCS1) | BIT(CCS2) | BIT(CCS3),
- .require_force_probe = 1,
- PVC_CACHELEVEL,
-};
-
static const struct intel_gt_definition xelpmp_extra_gt[] = {
{
.type = GT_MEDIA,
@@ -842,6 +788,8 @@ static const struct intel_device_info mtl_info = {
#undef PLATFORM
+__diag_pop();
+
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index bd9d812b1afa..0b1cd4c7a525 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -292,7 +292,7 @@ static u32 i915_perf_stream_paranoid = true;
#define OAREPORT_REASON_CTX_SWITCH (1<<3)
#define OAREPORT_REASON_CLK_RATIO (1<<5)
-#define HAS_MI_SET_PREDICATE(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+#define HAS_MI_SET_PREDICATE(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
/* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
*
@@ -817,7 +817,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
*/
if (oa_report_ctx_invalid(stream, report) &&
- GRAPHICS_VER_FULL(stream->engine->i915) < IP_VER(12, 50)) {
+ GRAPHICS_VER_FULL(stream->engine->i915) < IP_VER(12, 55)) {
ctx_id = INVALID_CTX_ID;
oa_context_id_squash(stream, report32);
}
@@ -1419,7 +1419,7 @@ static int gen12_get_render_context_id(struct i915_perf_stream *stream)
mask = ((1U << GEN12_GUC_SW_CTX_ID_WIDTH) - 1) <<
(GEN12_GUC_SW_CTX_ID_SHIFT - 32);
- } else if (GRAPHICS_VER_FULL(stream->engine->i915) >= IP_VER(12, 50)) {
+ } else if (GRAPHICS_VER_FULL(stream->engine->i915) >= IP_VER(12, 55)) {
ctx_id = (XEHP_MAX_CONTEXT_HW_ID - 1) <<
(XEHP_SW_CTX_ID_SHIFT - 32);
@@ -2881,11 +2881,11 @@ gen12_enable_metric_set(struct i915_perf_stream *stream,
int ret;
/*
- * Wa_1508761755:xehpsdv, dg2
+ * Wa_1508761755
* EU NOA signals behave incorrectly if EU clock gating is enabled.
* Disable thread stall DOP gating and EU DOP gating.
*/
- if (IS_XEHPSDV(i915) || IS_DG2(i915)) {
+ if (IS_DG2(i915)) {
intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
_MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
@@ -2911,7 +2911,7 @@ gen12_enable_metric_set(struct i915_perf_stream *stream,
/*
* Initialize Super Queue Internal Cnt Register
* Set PMON Enable in order to collect valid metrics.
- * Enable byets per clock reporting in OA for XEHPSDV onward.
+ * Enable bytes per clock reporting in OA.
*/
sqcnt1 = GEN12_SQCNT1_PMON_ENABLE |
(HAS_OA_BPC_REPORTING(i915) ? GEN12_SQCNT1_OABPC : 0);
@@ -2971,10 +2971,9 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream)
u32 sqcnt1;
/*
- * Wa_1508761755:xehpsdv, dg2
- * Enable thread stall DOP gating and EU DOP gating.
+ * Wa_1508761755: Enable thread stall DOP gating and EU DOP gating.
*/
- if (IS_XEHPSDV(i915) || IS_DG2(i915)) {
+ if (IS_DG2(i915)) {
intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
_MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE));
intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
@@ -4123,7 +4122,7 @@ static int read_properties_unlocked(struct i915_perf *perf,
props->hold_preemption = !!value;
break;
case DRM_I915_PERF_PROP_GLOBAL_SSEU: {
- if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 50)) {
+ if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 55)) {
drm_dbg(&perf->i915->drm,
"SSEU config not supported on gfx %x\n",
GRAPHICS_VER_FULL(perf->i915));
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 3baa2f54a86e..14d9ec0ed777 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -105,7 +105,7 @@ static int query_geometry_subslices(struct drm_i915_private *i915,
struct intel_engine_cs *engine;
struct i915_engine_class_instance classinstance;
- if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
return -ENODEV;
classinstance = *((struct i915_engine_class_instance *)&query_item->flags);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e00557e1a57f..e22a82a5ddd7 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -195,367 +195,6 @@
#define DPIO_SFR_BYPASS (1 << 1)
#define DPIO_CMNRST (1 << 0)
-/*
- * Per pipe/PLL DPIO regs
- */
-#define _VLV_PLL_DW3_CH0 0x800c
-#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */
-#define DPIO_POST_DIV_DAC 0
-#define DPIO_POST_DIV_HDMIDP 1 /* DAC 225-400M rate */
-#define DPIO_POST_DIV_LVDS1 2
-#define DPIO_POST_DIV_LVDS2 3
-#define DPIO_K_SHIFT (24) /* 4 bits */
-#define DPIO_P1_SHIFT (21) /* 3 bits */
-#define DPIO_P2_SHIFT (16) /* 5 bits */
-#define DPIO_N_SHIFT (12) /* 4 bits */
-#define DPIO_ENABLE_CALIBRATION (1 << 11)
-#define DPIO_M1DIV_SHIFT (8) /* 3 bits */
-#define DPIO_M2DIV_MASK 0xff
-#define _VLV_PLL_DW3_CH1 0x802c
-#define VLV_PLL_DW3(ch) _PIPE(ch, _VLV_PLL_DW3_CH0, _VLV_PLL_DW3_CH1)
-
-#define _VLV_PLL_DW5_CH0 0x8014
-#define DPIO_REFSEL_OVERRIDE 27
-#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
-#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
-#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */
-#define DPIO_PLL_REFCLK_SEL_MASK 3
-#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
-#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
-#define _VLV_PLL_DW5_CH1 0x8034
-#define VLV_PLL_DW5(ch) _PIPE(ch, _VLV_PLL_DW5_CH0, _VLV_PLL_DW5_CH1)
-
-#define _VLV_PLL_DW7_CH0 0x801c
-#define _VLV_PLL_DW7_CH1 0x803c
-#define VLV_PLL_DW7(ch) _PIPE(ch, _VLV_PLL_DW7_CH0, _VLV_PLL_DW7_CH1)
-
-#define _VLV_PLL_DW8_CH0 0x8040
-#define _VLV_PLL_DW8_CH1 0x8060
-#define VLV_PLL_DW8(ch) _PIPE(ch, _VLV_PLL_DW8_CH0, _VLV_PLL_DW8_CH1)
-
-#define VLV_PLL_DW9_BCAST 0xc044
-#define _VLV_PLL_DW9_CH0 0x8044
-#define _VLV_PLL_DW9_CH1 0x8064
-#define VLV_PLL_DW9(ch) _PIPE(ch, _VLV_PLL_DW9_CH0, _VLV_PLL_DW9_CH1)
-
-#define _VLV_PLL_DW10_CH0 0x8048
-#define _VLV_PLL_DW10_CH1 0x8068
-#define VLV_PLL_DW10(ch) _PIPE(ch, _VLV_PLL_DW10_CH0, _VLV_PLL_DW10_CH1)
-
-#define _VLV_PLL_DW11_CH0 0x804c
-#define _VLV_PLL_DW11_CH1 0x806c
-#define VLV_PLL_DW11(ch) _PIPE(ch, _VLV_PLL_DW11_CH0, _VLV_PLL_DW11_CH1)
-
-/* Spec for ref block start counts at DW10 */
-#define VLV_REF_DW13 0x80ac
-
-#define VLV_CMN_DW0 0x8100
-
-/*
- * Per DDI channel DPIO regs
- */
-
-#define _VLV_PCS_DW0_CH0 0x8200
-#define _VLV_PCS_DW0_CH1 0x8400
-#define DPIO_PCS_TX_LANE2_RESET (1 << 16)
-#define DPIO_PCS_TX_LANE1_RESET (1 << 7)
-#define DPIO_LEFT_TXFIFO_RST_MASTER2 (1 << 4)
-#define DPIO_RIGHT_TXFIFO_RST_MASTER2 (1 << 3)
-#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
-
-#define _VLV_PCS01_DW0_CH0 0x200
-#define _VLV_PCS23_DW0_CH0 0x400
-#define _VLV_PCS01_DW0_CH1 0x2600
-#define _VLV_PCS23_DW0_CH1 0x2800
-#define VLV_PCS01_DW0(ch) _PORT(ch, _VLV_PCS01_DW0_CH0, _VLV_PCS01_DW0_CH1)
-#define VLV_PCS23_DW0(ch) _PORT(ch, _VLV_PCS23_DW0_CH0, _VLV_PCS23_DW0_CH1)
-
-#define _VLV_PCS_DW1_CH0 0x8204
-#define _VLV_PCS_DW1_CH1 0x8404
-#define CHV_PCS_REQ_SOFTRESET_EN (1 << 23)
-#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1 << 22)
-#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1 << 21)
-#define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6)
-#define DPIO_PCS_CLK_SOFT_RESET (1 << 5)
-#define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1)
-
-#define _VLV_PCS01_DW1_CH0 0x204
-#define _VLV_PCS23_DW1_CH0 0x404
-#define _VLV_PCS01_DW1_CH1 0x2604
-#define _VLV_PCS23_DW1_CH1 0x2804
-#define VLV_PCS01_DW1(ch) _PORT(ch, _VLV_PCS01_DW1_CH0, _VLV_PCS01_DW1_CH1)
-#define VLV_PCS23_DW1(ch) _PORT(ch, _VLV_PCS23_DW1_CH0, _VLV_PCS23_DW1_CH1)
-
-#define _VLV_PCS_DW8_CH0 0x8220
-#define _VLV_PCS_DW8_CH1 0x8420
-#define CHV_PCS_USEDCLKCHANNEL_OVRRIDE (1 << 20)
-#define CHV_PCS_USEDCLKCHANNEL (1 << 21)
-#define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1)
-
-#define _VLV_PCS01_DW8_CH0 0x0220
-#define _VLV_PCS23_DW8_CH0 0x0420
-#define _VLV_PCS01_DW8_CH1 0x2620
-#define _VLV_PCS23_DW8_CH1 0x2820
-#define VLV_PCS01_DW8(port) _PORT(port, _VLV_PCS01_DW8_CH0, _VLV_PCS01_DW8_CH1)
-#define VLV_PCS23_DW8(port) _PORT(port, _VLV_PCS23_DW8_CH0, _VLV_PCS23_DW8_CH1)
-
-#define _VLV_PCS_DW9_CH0 0x8224
-#define _VLV_PCS_DW9_CH1 0x8424
-#define DPIO_PCS_TX2MARGIN_MASK (0x7 << 13)
-#define DPIO_PCS_TX2MARGIN_000 (0 << 13)
-#define DPIO_PCS_TX2MARGIN_101 (1 << 13)
-#define DPIO_PCS_TX1MARGIN_MASK (0x7 << 10)
-#define DPIO_PCS_TX1MARGIN_000 (0 << 10)
-#define DPIO_PCS_TX1MARGIN_101 (1 << 10)
-#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
-
-#define _VLV_PCS01_DW9_CH0 0x224
-#define _VLV_PCS23_DW9_CH0 0x424
-#define _VLV_PCS01_DW9_CH1 0x2624
-#define _VLV_PCS23_DW9_CH1 0x2824
-#define VLV_PCS01_DW9(ch) _PORT(ch, _VLV_PCS01_DW9_CH0, _VLV_PCS01_DW9_CH1)
-#define VLV_PCS23_DW9(ch) _PORT(ch, _VLV_PCS23_DW9_CH0, _VLV_PCS23_DW9_CH1)
-
-#define _CHV_PCS_DW10_CH0 0x8228
-#define _CHV_PCS_DW10_CH1 0x8428
-#define DPIO_PCS_SWING_CALC_TX0_TX2 (1 << 30)
-#define DPIO_PCS_SWING_CALC_TX1_TX3 (1 << 31)
-#define DPIO_PCS_TX2DEEMP_MASK (0xf << 24)
-#define DPIO_PCS_TX2DEEMP_9P5 (0 << 24)
-#define DPIO_PCS_TX2DEEMP_6P0 (2 << 24)
-#define DPIO_PCS_TX1DEEMP_MASK (0xf << 16)
-#define DPIO_PCS_TX1DEEMP_9P5 (0 << 16)
-#define DPIO_PCS_TX1DEEMP_6P0 (2 << 16)
-#define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1)
-
-#define _VLV_PCS01_DW10_CH0 0x0228
-#define _VLV_PCS23_DW10_CH0 0x0428
-#define _VLV_PCS01_DW10_CH1 0x2628
-#define _VLV_PCS23_DW10_CH1 0x2828
-#define VLV_PCS01_DW10(port) _PORT(port, _VLV_PCS01_DW10_CH0, _VLV_PCS01_DW10_CH1)
-#define VLV_PCS23_DW10(port) _PORT(port, _VLV_PCS23_DW10_CH0, _VLV_PCS23_DW10_CH1)
-
-#define _VLV_PCS_DW11_CH0 0x822c
-#define _VLV_PCS_DW11_CH1 0x842c
-#define DPIO_TX2_STAGGER_MASK(x) ((x) << 24)
-#define DPIO_LANEDESKEW_STRAP_OVRD (1 << 3)
-#define DPIO_LEFT_TXFIFO_RST_MASTER (1 << 1)
-#define DPIO_RIGHT_TXFIFO_RST_MASTER (1 << 0)
-#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
-
-#define _VLV_PCS01_DW11_CH0 0x022c
-#define _VLV_PCS23_DW11_CH0 0x042c
-#define _VLV_PCS01_DW11_CH1 0x262c
-#define _VLV_PCS23_DW11_CH1 0x282c
-#define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW11_CH0, _VLV_PCS01_DW11_CH1)
-#define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW11_CH0, _VLV_PCS23_DW11_CH1)
-
-#define _VLV_PCS01_DW12_CH0 0x0230
-#define _VLV_PCS23_DW12_CH0 0x0430
-#define _VLV_PCS01_DW12_CH1 0x2630
-#define _VLV_PCS23_DW12_CH1 0x2830
-#define VLV_PCS01_DW12(ch) _PORT(ch, _VLV_PCS01_DW12_CH0, _VLV_PCS01_DW12_CH1)
-#define VLV_PCS23_DW12(ch) _PORT(ch, _VLV_PCS23_DW12_CH0, _VLV_PCS23_DW12_CH1)
-
-#define _VLV_PCS_DW12_CH0 0x8230
-#define _VLV_PCS_DW12_CH1 0x8430
-#define DPIO_TX2_STAGGER_MULT(x) ((x) << 20)
-#define DPIO_TX1_STAGGER_MULT(x) ((x) << 16)
-#define DPIO_TX1_STAGGER_MASK(x) ((x) << 8)
-#define DPIO_LANESTAGGER_STRAP_OVRD (1 << 6)
-#define DPIO_LANESTAGGER_STRAP(x) ((x) << 0)
-#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
-
-#define _VLV_PCS_DW14_CH0 0x8238
-#define _VLV_PCS_DW14_CH1 0x8438
-#define VLV_PCS_DW14(ch) _PORT(ch, _VLV_PCS_DW14_CH0, _VLV_PCS_DW14_CH1)
-
-#define _VLV_PCS_DW23_CH0 0x825c
-#define _VLV_PCS_DW23_CH1 0x845c
-#define VLV_PCS_DW23(ch) _PORT(ch, _VLV_PCS_DW23_CH0, _VLV_PCS_DW23_CH1)
-
-#define _VLV_TX_DW2_CH0 0x8288
-#define _VLV_TX_DW2_CH1 0x8488
-#define DPIO_SWING_MARGIN000_SHIFT 16
-#define DPIO_SWING_MARGIN000_MASK (0xff << DPIO_SWING_MARGIN000_SHIFT)
-#define DPIO_UNIQ_TRANS_SCALE_SHIFT 8
-#define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1)
-
-#define _VLV_TX_DW3_CH0 0x828c
-#define _VLV_TX_DW3_CH1 0x848c
-/* The following bit for CHV phy */
-#define DPIO_TX_UNIQ_TRANS_SCALE_EN (1 << 27)
-#define DPIO_SWING_MARGIN101_SHIFT 16
-#define DPIO_SWING_MARGIN101_MASK (0xff << DPIO_SWING_MARGIN101_SHIFT)
-#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1)
-
-#define _VLV_TX_DW4_CH0 0x8290
-#define _VLV_TX_DW4_CH1 0x8490
-#define DPIO_SWING_DEEMPH9P5_SHIFT 24
-#define DPIO_SWING_DEEMPH9P5_MASK (0xff << DPIO_SWING_DEEMPH9P5_SHIFT)
-#define DPIO_SWING_DEEMPH6P0_SHIFT 16
-#define DPIO_SWING_DEEMPH6P0_MASK (0xff << DPIO_SWING_DEEMPH6P0_SHIFT)
-#define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1)
-
-#define _VLV_TX3_DW4_CH0 0x690
-#define _VLV_TX3_DW4_CH1 0x2a90
-#define VLV_TX3_DW4(ch) _PORT(ch, _VLV_TX3_DW4_CH0, _VLV_TX3_DW4_CH1)
-
-#define _VLV_TX_DW5_CH0 0x8294
-#define _VLV_TX_DW5_CH1 0x8494
-#define DPIO_TX_OCALINIT_EN (1 << 31)
-#define VLV_TX_DW5(ch) _PORT(ch, _VLV_TX_DW5_CH0, _VLV_TX_DW5_CH1)
-
-#define _VLV_TX_DW11_CH0 0x82ac
-#define _VLV_TX_DW11_CH1 0x84ac
-#define VLV_TX_DW11(ch) _PORT(ch, _VLV_TX_DW11_CH0, _VLV_TX_DW11_CH1)
-
-#define _VLV_TX_DW14_CH0 0x82b8
-#define _VLV_TX_DW14_CH1 0x84b8
-#define VLV_TX_DW14(ch) _PORT(ch, _VLV_TX_DW14_CH0, _VLV_TX_DW14_CH1)
-
-/* CHV dpPhy registers */
-#define _CHV_PLL_DW0_CH0 0x8000
-#define _CHV_PLL_DW0_CH1 0x8180
-#define CHV_PLL_DW0(ch) _PIPE(ch, _CHV_PLL_DW0_CH0, _CHV_PLL_DW0_CH1)
-
-#define _CHV_PLL_DW1_CH0 0x8004
-#define _CHV_PLL_DW1_CH1 0x8184
-#define DPIO_CHV_N_DIV_SHIFT 8
-#define DPIO_CHV_M1_DIV_BY_2 (0 << 0)
-#define CHV_PLL_DW1(ch) _PIPE(ch, _CHV_PLL_DW1_CH0, _CHV_PLL_DW1_CH1)
-
-#define _CHV_PLL_DW2_CH0 0x8008
-#define _CHV_PLL_DW2_CH1 0x8188
-#define CHV_PLL_DW2(ch) _PIPE(ch, _CHV_PLL_DW2_CH0, _CHV_PLL_DW2_CH1)
-
-#define _CHV_PLL_DW3_CH0 0x800c
-#define _CHV_PLL_DW3_CH1 0x818c
-#define DPIO_CHV_FRAC_DIV_EN (1 << 16)
-#define DPIO_CHV_FIRST_MOD (0 << 8)
-#define DPIO_CHV_SECOND_MOD (1 << 8)
-#define DPIO_CHV_FEEDFWD_GAIN_SHIFT 0
-#define DPIO_CHV_FEEDFWD_GAIN_MASK (0xF << 0)
-#define CHV_PLL_DW3(ch) _PIPE(ch, _CHV_PLL_DW3_CH0, _CHV_PLL_DW3_CH1)
-
-#define _CHV_PLL_DW6_CH0 0x8018
-#define _CHV_PLL_DW6_CH1 0x8198
-#define DPIO_CHV_GAIN_CTRL_SHIFT 16
-#define DPIO_CHV_INT_COEFF_SHIFT 8
-#define DPIO_CHV_PROP_COEFF_SHIFT 0
-#define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1)
-
-#define _CHV_PLL_DW8_CH0 0x8020
-#define _CHV_PLL_DW8_CH1 0x81A0
-#define DPIO_CHV_TDC_TARGET_CNT_SHIFT 0
-#define DPIO_CHV_TDC_TARGET_CNT_MASK (0x3FF << 0)
-#define CHV_PLL_DW8(ch) _PIPE(ch, _CHV_PLL_DW8_CH0, _CHV_PLL_DW8_CH1)
-
-#define _CHV_PLL_DW9_CH0 0x8024
-#define _CHV_PLL_DW9_CH1 0x81A4
-#define DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT 1 /* 3 bits */
-#define DPIO_CHV_INT_LOCK_THRESHOLD_MASK (7 << 1)
-#define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE 1 /* 1: coarse & 0 : fine */
-#define CHV_PLL_DW9(ch) _PIPE(ch, _CHV_PLL_DW9_CH0, _CHV_PLL_DW9_CH1)
-
-#define _CHV_CMN_DW0_CH0 0x8100
-#define DPIO_ALLDL_POWERDOWN_SHIFT_CH0 19
-#define DPIO_ANYDL_POWERDOWN_SHIFT_CH0 18
-#define DPIO_ALLDL_POWERDOWN (1 << 1)
-#define DPIO_ANYDL_POWERDOWN (1 << 0)
-
-#define _CHV_CMN_DW5_CH0 0x8114
-#define CHV_BUFRIGHTENA1_DISABLE (0 << 20)
-#define CHV_BUFRIGHTENA1_NORMAL (1 << 20)
-#define CHV_BUFRIGHTENA1_FORCE (3 << 20)
-#define CHV_BUFRIGHTENA1_MASK (3 << 20)
-#define CHV_BUFLEFTENA1_DISABLE (0 << 22)
-#define CHV_BUFLEFTENA1_NORMAL (1 << 22)
-#define CHV_BUFLEFTENA1_FORCE (3 << 22)
-#define CHV_BUFLEFTENA1_MASK (3 << 22)
-
-#define _CHV_CMN_DW13_CH0 0x8134
-#define _CHV_CMN_DW0_CH1 0x8080
-#define DPIO_CHV_S1_DIV_SHIFT 21
-#define DPIO_CHV_P1_DIV_SHIFT 13 /* 3 bits */
-#define DPIO_CHV_P2_DIV_SHIFT 8 /* 5 bits */
-#define DPIO_CHV_K_DIV_SHIFT 4
-#define DPIO_PLL_FREQLOCK (1 << 1)
-#define DPIO_PLL_LOCK (1 << 0)
-#define CHV_CMN_DW13(ch) _PIPE(ch, _CHV_CMN_DW13_CH0, _CHV_CMN_DW0_CH1)
-
-#define _CHV_CMN_DW14_CH0 0x8138
-#define _CHV_CMN_DW1_CH1 0x8084
-#define DPIO_AFC_RECAL (1 << 14)
-#define DPIO_DCLKP_EN (1 << 13)
-#define CHV_BUFLEFTENA2_DISABLE (0 << 17) /* CL2 DW1 only */
-#define CHV_BUFLEFTENA2_NORMAL (1 << 17) /* CL2 DW1 only */
-#define CHV_BUFLEFTENA2_FORCE (3 << 17) /* CL2 DW1 only */
-#define CHV_BUFLEFTENA2_MASK (3 << 17) /* CL2 DW1 only */
-#define CHV_BUFRIGHTENA2_DISABLE (0 << 19) /* CL2 DW1 only */
-#define CHV_BUFRIGHTENA2_NORMAL (1 << 19) /* CL2 DW1 only */
-#define CHV_BUFRIGHTENA2_FORCE (3 << 19) /* CL2 DW1 only */
-#define CHV_BUFRIGHTENA2_MASK (3 << 19) /* CL2 DW1 only */
-#define CHV_CMN_DW14(ch) _PIPE(ch, _CHV_CMN_DW14_CH0, _CHV_CMN_DW1_CH1)
-
-#define _CHV_CMN_DW19_CH0 0x814c
-#define _CHV_CMN_DW6_CH1 0x8098
-#define DPIO_ALLDL_POWERDOWN_SHIFT_CH1 30 /* CL2 DW6 only */
-#define DPIO_ANYDL_POWERDOWN_SHIFT_CH1 29 /* CL2 DW6 only */
-#define DPIO_DYNPWRDOWNEN_CH1 (1 << 28) /* CL2 DW6 only */
-#define CHV_CMN_USEDCLKCHANNEL (1 << 13)
-
-#define CHV_CMN_DW19(ch) _PIPE(ch, _CHV_CMN_DW19_CH0, _CHV_CMN_DW6_CH1)
-
-#define CHV_CMN_DW28 0x8170
-#define DPIO_CL1POWERDOWNEN (1 << 23)
-#define DPIO_DYNPWRDOWNEN_CH0 (1 << 22)
-#define DPIO_SUS_CLK_CONFIG_ON (0 << 0)
-#define DPIO_SUS_CLK_CONFIG_CLKREQ (1 << 0)
-#define DPIO_SUS_CLK_CONFIG_GATE (2 << 0)
-#define DPIO_SUS_CLK_CONFIG_GATE_CLKREQ (3 << 0)
-
-#define CHV_CMN_DW30 0x8178
-#define DPIO_CL2_LDOFUSE_PWRENB (1 << 6)
-#define DPIO_LRC_BYPASS (1 << 3)
-
-#define _TXLANE(ch, lane, offset) ((ch ? 0x2400 : 0) + \
- (lane) * 0x200 + (offset))
-
-#define CHV_TX_DW0(ch, lane) _TXLANE(ch, lane, 0x80)
-#define CHV_TX_DW1(ch, lane) _TXLANE(ch, lane, 0x84)
-#define CHV_TX_DW2(ch, lane) _TXLANE(ch, lane, 0x88)
-#define CHV_TX_DW3(ch, lane) _TXLANE(ch, lane, 0x8c)
-#define CHV_TX_DW4(ch, lane) _TXLANE(ch, lane, 0x90)
-#define CHV_TX_DW5(ch, lane) _TXLANE(ch, lane, 0x94)
-#define CHV_TX_DW6(ch, lane) _TXLANE(ch, lane, 0x98)
-#define CHV_TX_DW7(ch, lane) _TXLANE(ch, lane, 0x9c)
-#define CHV_TX_DW8(ch, lane) _TXLANE(ch, lane, 0xa0)
-#define CHV_TX_DW9(ch, lane) _TXLANE(ch, lane, 0xa4)
-#define CHV_TX_DW10(ch, lane) _TXLANE(ch, lane, 0xa8)
-#define CHV_TX_DW11(ch, lane) _TXLANE(ch, lane, 0xac)
-#define DPIO_FRC_LATENCY_SHFIT 8
-#define CHV_TX_DW14(ch, lane) _TXLANE(ch, lane, 0xb8)
-#define DPIO_UPAR_SHIFT 30
-
-/* BXT PHY registers */
-#define _BXT_PHY0_BASE 0x6C000
-#define _BXT_PHY1_BASE 0x162000
-#define _BXT_PHY2_BASE 0x163000
-#define BXT_PHY_BASE(phy) \
- _PICK_EVEN_2RANGES(phy, 1, \
- _BXT_PHY0_BASE, _BXT_PHY0_BASE, \
- _BXT_PHY1_BASE, _BXT_PHY2_BASE)
-
-#define _BXT_PHY(phy, reg) \
- _MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg))
-
-#define _BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \
- (BXT_PHY_BASE(phy) + _PIPE((ch), (reg_ch0) - _BXT_PHY0_BASE, \
- (reg_ch1) - _BXT_PHY0_BASE))
-#define _MMIO_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \
- _MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1))
-
#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
#define MIPIO_RST_CTRL (1 << 2)
@@ -577,250 +216,6 @@
_PHY_CTL_FAMILY_DDI, _PHY_CTL_FAMILY_DDI, \
_PHY_CTL_FAMILY_EDP, _PHY_CTL_FAMILY_DDI_C))
-/* BXT PHY PLL registers */
-#define _PORT_PLL_A 0x46074
-#define _PORT_PLL_B 0x46078
-#define _PORT_PLL_C 0x4607c
-#define PORT_PLL_ENABLE REG_BIT(31)
-#define PORT_PLL_LOCK REG_BIT(30)
-#define PORT_PLL_REF_SEL REG_BIT(27)
-#define PORT_PLL_POWER_ENABLE REG_BIT(26)
-#define PORT_PLL_POWER_STATE REG_BIT(25)
-#define BXT_PORT_PLL_ENABLE(port) _MMIO_PORT(port, _PORT_PLL_A, _PORT_PLL_B)
-
-#define _PORT_PLL_EBB_0_A 0x162034
-#define _PORT_PLL_EBB_0_B 0x6C034
-#define _PORT_PLL_EBB_0_C 0x6C340
-#define PORT_PLL_P1_MASK REG_GENMASK(15, 13)
-#define PORT_PLL_P1(p1) REG_FIELD_PREP(PORT_PLL_P1_MASK, (p1))
-#define PORT_PLL_P2_MASK REG_GENMASK(12, 8)
-#define PORT_PLL_P2(p2) REG_FIELD_PREP(PORT_PLL_P2_MASK, (p2))
-#define BXT_PORT_PLL_EBB_0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_PLL_EBB_0_B, \
- _PORT_PLL_EBB_0_C)
-
-#define _PORT_PLL_EBB_4_A 0x162038
-#define _PORT_PLL_EBB_4_B 0x6C038
-#define _PORT_PLL_EBB_4_C 0x6C344
-#define PORT_PLL_RECALIBRATE REG_BIT(14)
-#define PORT_PLL_10BIT_CLK_ENABLE REG_BIT(13)
-#define BXT_PORT_PLL_EBB_4(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_PLL_EBB_4_B, \
- _PORT_PLL_EBB_4_C)
-
-#define _PORT_PLL_0_A 0x162100
-#define _PORT_PLL_0_B 0x6C100
-#define _PORT_PLL_0_C 0x6C380
-/* PORT_PLL_0_A */
-#define PORT_PLL_M2_INT_MASK REG_GENMASK(7, 0)
-#define PORT_PLL_M2_INT(m2_int) REG_FIELD_PREP(PORT_PLL_M2_INT_MASK, (m2_int))
-/* PORT_PLL_1_A */
-#define PORT_PLL_N_MASK REG_GENMASK(11, 8)
-#define PORT_PLL_N(n) REG_FIELD_PREP(PORT_PLL_N_MASK, (n))
-/* PORT_PLL_2_A */
-#define PORT_PLL_M2_FRAC_MASK REG_GENMASK(21, 0)
-#define PORT_PLL_M2_FRAC(m2_frac) REG_FIELD_PREP(PORT_PLL_M2_FRAC_MASK, (m2_frac))
-/* PORT_PLL_3_A */
-#define PORT_PLL_M2_FRAC_ENABLE REG_BIT(16)
-/* PORT_PLL_6_A */
-#define PORT_PLL_GAIN_CTL_MASK REG_GENMASK(18, 16)
-#define PORT_PLL_GAIN_CTL(x) REG_FIELD_PREP(PORT_PLL_GAIN_CTL_MASK, (x))
-#define PORT_PLL_INT_COEFF_MASK REG_GENMASK(12, 8)
-#define PORT_PLL_INT_COEFF(x) REG_FIELD_PREP(PORT_PLL_INT_COEFF_MASK, (x))
-#define PORT_PLL_PROP_COEFF_MASK REG_GENMASK(3, 0)
-#define PORT_PLL_PROP_COEFF(x) REG_FIELD_PREP(PORT_PLL_PROP_COEFF_MASK, (x))
-/* PORT_PLL_8_A */
-#define PORT_PLL_TARGET_CNT_MASK REG_GENMASK(9, 0)
-#define PORT_PLL_TARGET_CNT(x) REG_FIELD_PREP(PORT_PLL_TARGET_CNT_MASK, (x))
-/* PORT_PLL_9_A */
-#define PORT_PLL_LOCK_THRESHOLD_MASK REG_GENMASK(3, 1)
-#define PORT_PLL_LOCK_THRESHOLD(x) REG_FIELD_PREP(PORT_PLL_LOCK_THRESHOLD_MASK, (x))
-/* PORT_PLL_10_A */
-#define PORT_PLL_DCO_AMP_OVR_EN_H REG_BIT(27)
-#define PORT_PLL_DCO_AMP_MASK REG_GENMASK(13, 10)
-#define PORT_PLL_DCO_AMP(x) REG_FIELD_PREP(PORT_PLL_DCO_AMP_MASK, (x))
-#define _PORT_PLL_BASE(phy, ch) _BXT_PHY_CH(phy, ch, \
- _PORT_PLL_0_B, \
- _PORT_PLL_0_C)
-#define BXT_PORT_PLL(phy, ch, idx) _MMIO(_PORT_PLL_BASE(phy, ch) + \
- (idx) * 4)
-
-/* BXT PHY common lane registers */
-#define _PORT_CL1CM_DW0_A 0x162000
-#define _PORT_CL1CM_DW0_BC 0x6C000
-#define PHY_POWER_GOOD (1 << 16)
-#define PHY_RESERVED (1 << 7)
-#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC)
-
-#define _PORT_CL1CM_DW9_A 0x162024
-#define _PORT_CL1CM_DW9_BC 0x6C024
-#define IREF0RC_OFFSET_SHIFT 8
-#define IREF0RC_OFFSET_MASK (0xFF << IREF0RC_OFFSET_SHIFT)
-#define BXT_PORT_CL1CM_DW9(phy) _BXT_PHY((phy), _PORT_CL1CM_DW9_BC)
-
-#define _PORT_CL1CM_DW10_A 0x162028
-#define _PORT_CL1CM_DW10_BC 0x6C028
-#define IREF1RC_OFFSET_SHIFT 8
-#define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT)
-#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC)
-
-#define _PORT_CL1CM_DW28_A 0x162070
-#define _PORT_CL1CM_DW28_BC 0x6C070
-#define OCL1_POWER_DOWN_EN (1 << 23)
-#define DW28_OLDO_DYN_PWR_DOWN_EN (1 << 22)
-#define SUS_CLK_CONFIG 0x3
-#define BXT_PORT_CL1CM_DW28(phy) _BXT_PHY((phy), _PORT_CL1CM_DW28_BC)
-
-#define _PORT_CL1CM_DW30_A 0x162078
-#define _PORT_CL1CM_DW30_BC 0x6C078
-#define OCL2_LDOFUSE_PWR_DIS (1 << 6)
-#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC)
-
-/* The spec defines this only for BXT PHY0, but lets assume that this
- * would exist for PHY1 too if it had a second channel.
- */
-#define _PORT_CL2CM_DW6_A 0x162358
-#define _PORT_CL2CM_DW6_BC 0x6C358
-#define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC)
-#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28)
-
-/* BXT PHY Ref registers */
-#define _PORT_REF_DW3_A 0x16218C
-#define _PORT_REF_DW3_BC 0x6C18C
-#define GRC_DONE (1 << 22)
-#define BXT_PORT_REF_DW3(phy) _BXT_PHY((phy), _PORT_REF_DW3_BC)
-
-#define _PORT_REF_DW6_A 0x162198
-#define _PORT_REF_DW6_BC 0x6C198
-#define GRC_CODE_SHIFT 24
-#define GRC_CODE_MASK (0xFF << GRC_CODE_SHIFT)
-#define GRC_CODE_FAST_SHIFT 16
-#define GRC_CODE_FAST_MASK (0xFF << GRC_CODE_FAST_SHIFT)
-#define GRC_CODE_SLOW_SHIFT 8
-#define GRC_CODE_SLOW_MASK (0xFF << GRC_CODE_SLOW_SHIFT)
-#define GRC_CODE_NOM_MASK 0xFF
-#define BXT_PORT_REF_DW6(phy) _BXT_PHY((phy), _PORT_REF_DW6_BC)
-
-#define _PORT_REF_DW8_A 0x1621A0
-#define _PORT_REF_DW8_BC 0x6C1A0
-#define GRC_DIS (1 << 15)
-#define GRC_RDY_OVRD (1 << 1)
-#define BXT_PORT_REF_DW8(phy) _BXT_PHY((phy), _PORT_REF_DW8_BC)
-
-/* BXT PHY PCS registers */
-#define _PORT_PCS_DW10_LN01_A 0x162428
-#define _PORT_PCS_DW10_LN01_B 0x6C428
-#define _PORT_PCS_DW10_LN01_C 0x6C828
-#define _PORT_PCS_DW10_GRP_A 0x162C28
-#define _PORT_PCS_DW10_GRP_B 0x6CC28
-#define _PORT_PCS_DW10_GRP_C 0x6CE28
-#define BXT_PORT_PCS_DW10_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_PCS_DW10_LN01_B, \
- _PORT_PCS_DW10_LN01_C)
-#define BXT_PORT_PCS_DW10_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_PCS_DW10_GRP_B, \
- _PORT_PCS_DW10_GRP_C)
-
-#define TX2_SWING_CALC_INIT (1 << 31)
-#define TX1_SWING_CALC_INIT (1 << 30)
-
-#define _PORT_PCS_DW12_LN01_A 0x162430
-#define _PORT_PCS_DW12_LN01_B 0x6C430
-#define _PORT_PCS_DW12_LN01_C 0x6C830
-#define _PORT_PCS_DW12_LN23_A 0x162630
-#define _PORT_PCS_DW12_LN23_B 0x6C630
-#define _PORT_PCS_DW12_LN23_C 0x6CA30
-#define _PORT_PCS_DW12_GRP_A 0x162c30
-#define _PORT_PCS_DW12_GRP_B 0x6CC30
-#define _PORT_PCS_DW12_GRP_C 0x6CE30
-#define LANESTAGGER_STRAP_OVRD (1 << 6)
-#define LANE_STAGGER_MASK 0x1F
-#define BXT_PORT_PCS_DW12_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_PCS_DW12_LN01_B, \
- _PORT_PCS_DW12_LN01_C)
-#define BXT_PORT_PCS_DW12_LN23(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_PCS_DW12_LN23_B, \
- _PORT_PCS_DW12_LN23_C)
-#define BXT_PORT_PCS_DW12_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_PCS_DW12_GRP_B, \
- _PORT_PCS_DW12_GRP_C)
-
-/* BXT PHY TX registers */
-#define _BXT_LANE_OFFSET(lane) (((lane) >> 1) * 0x200 + \
- ((lane) & 1) * 0x80)
-
-#define _PORT_TX_DW2_LN0_A 0x162508
-#define _PORT_TX_DW2_LN0_B 0x6C508
-#define _PORT_TX_DW2_LN0_C 0x6C908
-#define _PORT_TX_DW2_GRP_A 0x162D08
-#define _PORT_TX_DW2_GRP_B 0x6CD08
-#define _PORT_TX_DW2_GRP_C 0x6CF08
-#define BXT_PORT_TX_DW2_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_TX_DW2_LN0_B, \
- _PORT_TX_DW2_LN0_C)
-#define BXT_PORT_TX_DW2_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_TX_DW2_GRP_B, \
- _PORT_TX_DW2_GRP_C)
-#define MARGIN_000_SHIFT 16
-#define MARGIN_000 (0xFF << MARGIN_000_SHIFT)
-#define UNIQ_TRANS_SCALE_SHIFT 8
-#define UNIQ_TRANS_SCALE (0xFF << UNIQ_TRANS_SCALE_SHIFT)
-
-#define _PORT_TX_DW3_LN0_A 0x16250C
-#define _PORT_TX_DW3_LN0_B 0x6C50C
-#define _PORT_TX_DW3_LN0_C 0x6C90C
-#define _PORT_TX_DW3_GRP_A 0x162D0C
-#define _PORT_TX_DW3_GRP_B 0x6CD0C
-#define _PORT_TX_DW3_GRP_C 0x6CF0C
-#define BXT_PORT_TX_DW3_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_TX_DW3_LN0_B, \
- _PORT_TX_DW3_LN0_C)
-#define BXT_PORT_TX_DW3_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_TX_DW3_GRP_B, \
- _PORT_TX_DW3_GRP_C)
-#define SCALE_DCOMP_METHOD (1 << 26)
-#define UNIQUE_TRANGE_EN_METHOD (1 << 27)
-
-#define _PORT_TX_DW4_LN0_A 0x162510
-#define _PORT_TX_DW4_LN0_B 0x6C510
-#define _PORT_TX_DW4_LN0_C 0x6C910
-#define _PORT_TX_DW4_GRP_A 0x162D10
-#define _PORT_TX_DW4_GRP_B 0x6CD10
-#define _PORT_TX_DW4_GRP_C 0x6CF10
-#define BXT_PORT_TX_DW4_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_TX_DW4_LN0_B, \
- _PORT_TX_DW4_LN0_C)
-#define BXT_PORT_TX_DW4_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_TX_DW4_GRP_B, \
- _PORT_TX_DW4_GRP_C)
-#define DEEMPH_SHIFT 24
-#define DE_EMPHASIS (0xFF << DEEMPH_SHIFT)
-
-#define _PORT_TX_DW5_LN0_A 0x162514
-#define _PORT_TX_DW5_LN0_B 0x6C514
-#define _PORT_TX_DW5_LN0_C 0x6C914
-#define _PORT_TX_DW5_GRP_A 0x162D14
-#define _PORT_TX_DW5_GRP_B 0x6CD14
-#define _PORT_TX_DW5_GRP_C 0x6CF14
-#define BXT_PORT_TX_DW5_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_TX_DW5_LN0_B, \
- _PORT_TX_DW5_LN0_C)
-#define BXT_PORT_TX_DW5_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_TX_DW5_GRP_B, \
- _PORT_TX_DW5_GRP_C)
-#define DCC_DELAY_RANGE_1 (1 << 9)
-#define DCC_DELAY_RANGE_2 (1 << 8)
-
-#define _PORT_TX_DW14_LN0_A 0x162538
-#define _PORT_TX_DW14_LN0_B 0x6C538
-#define _PORT_TX_DW14_LN0_C 0x6C938
-#define LATENCY_OPTIM_SHIFT 30
-#define LATENCY_OPTIM (1 << LATENCY_OPTIM_SHIFT)
-#define BXT_PORT_TX_DW14_LN(phy, ch, lane) \
- _MMIO(_BXT_PHY_CH(phy, ch, _PORT_TX_DW14_LN0_B, \
- _PORT_TX_DW14_LN0_C) + \
- _BXT_LANE_OFFSET(lane))
-
/* UAIMI scratch pad register 1 */
#define UAIMI_SPR1 _MMIO(0x4F074)
/* SKL VccIO mask */
@@ -1228,22 +623,6 @@
#define I915_ASLE_INTERRUPT (1 << 0)
#define I915_BSD_USER_INTERRUPT (1 << 25)
-#define I915_HDMI_LPE_AUDIO_BASE (VLV_DISPLAY_BASE + 0x65000)
-#define I915_HDMI_LPE_AUDIO_SIZE 0x1000
-
-/* DisplayPort Audio w/ LPE */
-#define VLV_AUD_CHICKEN_BIT_REG _MMIO(VLV_DISPLAY_BASE + 0x62F38)
-#define VLV_CHICKEN_BIT_DBG_ENABLE (1 << 0)
-
-#define _VLV_AUD_PORT_EN_B_DBG (VLV_DISPLAY_BASE + 0x62F20)
-#define _VLV_AUD_PORT_EN_C_DBG (VLV_DISPLAY_BASE + 0x62F30)
-#define _VLV_AUD_PORT_EN_D_DBG (VLV_DISPLAY_BASE + 0x62F34)
-#define VLV_AUD_PORT_EN_DBG(port) _MMIO_PORT3((port) - PORT_B, \
- _VLV_AUD_PORT_EN_B_DBG, \
- _VLV_AUD_PORT_EN_C_DBG, \
- _VLV_AUD_PORT_EN_D_DBG)
-#define VLV_AMP_MUTE (1 << 1)
-
#define GEN6_BSD_RNCID _MMIO(0x12198)
#define GEN7_FF_THREAD_MODE _MMIO(0x20a0)
@@ -1264,109 +643,6 @@
#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1 << 4) /* Default */
#define GEN7_FF_DS_SCHED_HW (0x0 << 4)
-/*
- * Framebuffer compression (915+ only)
- */
-
-#define FBC_CFB_BASE _MMIO(0x3200) /* 4k page aligned */
-#define FBC_LL_BASE _MMIO(0x3204) /* 4k page aligned */
-#define FBC_CONTROL _MMIO(0x3208)
-#define FBC_CTL_EN REG_BIT(31)
-#define FBC_CTL_PERIODIC REG_BIT(30)
-#define FBC_CTL_INTERVAL_MASK REG_GENMASK(29, 16)
-#define FBC_CTL_INTERVAL(x) REG_FIELD_PREP(FBC_CTL_INTERVAL_MASK, (x))
-#define FBC_CTL_STOP_ON_MOD REG_BIT(15)
-#define FBC_CTL_UNCOMPRESSIBLE REG_BIT(14) /* i915+ */
-#define FBC_CTL_C3_IDLE REG_BIT(13) /* i945gm only */
-#define FBC_CTL_STRIDE_MASK REG_GENMASK(12, 5)
-#define FBC_CTL_STRIDE(x) REG_FIELD_PREP(FBC_CTL_STRIDE_MASK, (x))
-#define FBC_CTL_FENCENO_MASK REG_GENMASK(3, 0)
-#define FBC_CTL_FENCENO(x) REG_FIELD_PREP(FBC_CTL_FENCENO_MASK, (x))
-#define FBC_COMMAND _MMIO(0x320c)
-#define FBC_CMD_COMPRESS REG_BIT(0)
-#define FBC_STATUS _MMIO(0x3210)
-#define FBC_STAT_COMPRESSING REG_BIT(31)
-#define FBC_STAT_COMPRESSED REG_BIT(30)
-#define FBC_STAT_MODIFIED REG_BIT(29)
-#define FBC_STAT_CURRENT_LINE_MASK REG_GENMASK(10, 0)
-#define FBC_CONTROL2 _MMIO(0x3214) /* i965gm only */
-#define FBC_CTL_FENCE_DBL REG_BIT(4)
-#define FBC_CTL_IDLE_MASK REG_GENMASK(3, 2)
-#define FBC_CTL_IDLE_IMM REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 0)
-#define FBC_CTL_IDLE_FULL REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 1)
-#define FBC_CTL_IDLE_LINE REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 2)
-#define FBC_CTL_IDLE_DEBUG REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 3)
-#define FBC_CTL_CPU_FENCE_EN REG_BIT(1)
-#define FBC_CTL_PLANE_MASK REG_GENMASK(1, 0)
-#define FBC_CTL_PLANE(i9xx_plane) REG_FIELD_PREP(FBC_CTL_PLANE_MASK, (i9xx_plane))
-#define FBC_FENCE_OFF _MMIO(0x3218) /* i965gm only, BSpec typo has 321Bh */
-#define FBC_MOD_NUM _MMIO(0x3220) /* i965gm only */
-#define FBC_MOD_NUM_MASK REG_GENMASK(31, 1)
-#define FBC_MOD_NUM_VALID REG_BIT(0)
-#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4) /* 49 reisters */
-#define FBC_TAG_MASK REG_GENMASK(1, 0) /* 16 tags per register */
-#define FBC_TAG_MODIFIED REG_FIELD_PREP(FBC_TAG_MASK, 0)
-#define FBC_TAG_UNCOMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 1)
-#define FBC_TAG_UNCOMPRESSIBLE REG_FIELD_PREP(FBC_TAG_MASK, 2)
-#define FBC_TAG_COMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 3)
-
-#define FBC_LL_SIZE (1536)
-
-/* Framebuffer compression for GM45+ */
-#define DPFC_CB_BASE _MMIO(0x3200)
-#define ILK_DPFC_CB_BASE(fbc_id) _MMIO_PIPE((fbc_id), 0x43200, 0x43240)
-#define DPFC_CONTROL _MMIO(0x3208)
-#define ILK_DPFC_CONTROL(fbc_id) _MMIO_PIPE((fbc_id), 0x43208, 0x43248)
-#define DPFC_CTL_EN REG_BIT(31)
-#define DPFC_CTL_PLANE_MASK_G4X REG_BIT(30) /* g4x-snb */
-#define DPFC_CTL_PLANE_G4X(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_G4X, (i9xx_plane))
-#define DPFC_CTL_FENCE_EN_G4X REG_BIT(29) /* g4x-snb */
-#define DPFC_CTL_PLANE_MASK_IVB REG_GENMASK(30, 29) /* ivb only */
-#define DPFC_CTL_PLANE_IVB(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_IVB, (i9xx_plane))
-#define DPFC_CTL_FENCE_EN_IVB REG_BIT(28) /* ivb+ */
-#define DPFC_CTL_PERSISTENT_MODE REG_BIT(25) /* g4x-snb */
-#define DPFC_CTL_PLANE_BINDING_MASK REG_GENMASK(12, 11) /* lnl+ */
-#define DPFC_CTL_PLANE_BINDING(plane_id) REG_FIELD_PREP(DPFC_CTL_PLANE_BINDING_MASK, (plane_id))
-#define DPFC_CTL_FALSE_COLOR REG_BIT(10) /* ivb+ */
-#define DPFC_CTL_SR_EN REG_BIT(10) /* g4x only */
-#define DPFC_CTL_SR_EXIT_DIS REG_BIT(9) /* g4x only */
-#define DPFC_CTL_LIMIT_MASK REG_GENMASK(7, 6)
-#define DPFC_CTL_LIMIT_1X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 0)
-#define DPFC_CTL_LIMIT_2X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 1)
-#define DPFC_CTL_LIMIT_4X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 2)
-#define DPFC_CTL_FENCENO_MASK REG_GENMASK(3, 0)
-#define DPFC_CTL_FENCENO(fence) REG_FIELD_PREP(DPFC_CTL_FENCENO_MASK, (fence))
-#define DPFC_RECOMP_CTL _MMIO(0x320c)
-#define ILK_DPFC_RECOMP_CTL(fbc_id) _MMIO_PIPE((fbc_id), 0x4320c, 0x4324c)
-#define DPFC_RECOMP_STALL_EN REG_BIT(27)
-#define DPFC_RECOMP_STALL_WM_MASK REG_GENMASK(26, 16)
-#define DPFC_RECOMP_TIMER_COUNT_MASK REG_GENMASK(5, 0)
-#define DPFC_STATUS _MMIO(0x3210)
-#define ILK_DPFC_STATUS(fbc_id) _MMIO_PIPE((fbc_id), 0x43210, 0x43250)
-#define DPFC_INVAL_SEG_MASK REG_GENMASK(26, 16)
-#define DPFC_COMP_SEG_MASK REG_GENMASK(10, 0)
-#define DPFC_STATUS2 _MMIO(0x3214)
-#define ILK_DPFC_STATUS2(fbc_id) _MMIO_PIPE((fbc_id), 0x43214, 0x43254)
-#define DPFC_COMP_SEG_MASK_IVB REG_GENMASK(11, 0)
-#define DPFC_FENCE_YOFF _MMIO(0x3218)
-#define ILK_DPFC_FENCE_YOFF(fbc_id) _MMIO_PIPE((fbc_id), 0x43218, 0x43258)
-#define DPFC_CHICKEN _MMIO(0x3224)
-#define ILK_DPFC_CHICKEN(fbc_id) _MMIO_PIPE((fbc_id), 0x43224, 0x43264)
-#define DPFC_HT_MODIFY REG_BIT(31) /* pre-ivb */
-#define DPFC_NUKE_ON_ANY_MODIFICATION REG_BIT(23) /* bdw+ */
-#define DPFC_CHICKEN_COMP_DUMMY_PIXEL REG_BIT(14) /* glk+ */
-#define DPFC_CHICKEN_FORCE_SLB_INVALIDATION REG_BIT(13) /* icl+ */
-#define DPFC_DISABLE_DUMMY0 REG_BIT(8) /* ivb+ */
-
-#define GLK_FBC_STRIDE(fbc_id) _MMIO_PIPE((fbc_id), 0x43228, 0x43268)
-#define FBC_STRIDE_OVERRIDE REG_BIT(15)
-#define FBC_STRIDE_MASK REG_GENMASK(14, 0)
-#define FBC_STRIDE(x) REG_FIELD_PREP(FBC_STRIDE_MASK, (x))
-
-#define ILK_FBC_RT_BASE _MMIO(0x2128)
-#define ILK_FBC_RT_VALID REG_BIT(0)
-#define SNB_FBC_FRONT_BUFFER REG_BIT(1)
-
#define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000)
#define ILK_FBCQ_DIS REG_BIT(22)
#define ILK_PABSTRETCH_DIS REG_BIT(21)
@@ -1382,37 +658,18 @@
#define IVB_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 2)
#define IVB_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 3)
-
-/*
- * Framebuffer compression for Sandybridge
- *
- * The following two registers are of type GTTMMADR
- */
-#define SNB_DPFC_CTL_SA _MMIO(0x100100)
-#define SNB_DPFC_FENCE_EN REG_BIT(29)
-#define SNB_DPFC_FENCENO_MASK REG_GENMASK(4, 0)
-#define SNB_DPFC_FENCENO(fence) REG_FIELD_PREP(SNB_DPFC_FENCENO_MASK, (fence))
-#define SNB_DPFC_CPU_FENCE_OFFSET _MMIO(0x100104)
-
-/* Framebuffer compression for Ivybridge */
-#define IVB_FBC_RT_BASE _MMIO(0x7020)
-#define IVB_FBC_RT_BASE_UPPER _MMIO(0x7024)
-
#define IPS_CTL _MMIO(0x43408)
#define IPS_ENABLE REG_BIT(31)
#define IPS_FALSE_COLOR REG_BIT(4)
-#define MSG_FBC_REND_STATE(fbc_id) _MMIO_PIPE((fbc_id), 0x50380, 0x50384)
-#define FBC_REND_NUKE REG_BIT(2)
-#define FBC_REND_CACHE_CLEAN REG_BIT(1)
-
/*
* Clock control & power management
*/
-#define _DPLL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x6014)
-#define _DPLL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x6018)
-#define _CHV_DPLL_C (DISPLAY_MMIO_BASE(dev_priv) + 0x6030)
-#define DPLL(pipe) _MMIO_PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
+#define _DPLL_A 0x6014
+#define _DPLL_B 0x6018
+#define _CHV_DPLL_C 0x6030
+#define DPLL(pipe) _MMIO_BASE_PIPE3(DISPLAY_MMIO_BASE(dev_priv), \
+ (pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
#define VGA0 _MMIO(0x6000)
#define VGA1 _MMIO(0x6004)
@@ -1508,10 +765,11 @@
#define SDVO_MULTIPLIER_SHIFT_HIRES 4
#define SDVO_MULTIPLIER_SHIFT_VGA 0
-#define _DPLL_A_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x601c)
-#define _DPLL_B_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x6020)
-#define _CHV_DPLL_C_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x603c)
-#define DPLL_MD(pipe) _MMIO_PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
+#define _DPLL_A_MD 0x601c
+#define _DPLL_B_MD 0x6020
+#define _CHV_DPLL_C_MD 0x603c
+#define DPLL_MD(pipe) _MMIO_BASE_PIPE3(DISPLAY_MMIO_BASE(dev_priv), \
+ (pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
/*
* UDI pixel divider, controlling how many pixels are stuffed into a packet.
@@ -1716,42 +974,10 @@
#define GMBUSFREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6510)
-/*
- * Palette regs
- */
-#define _PALETTE_A 0xa000
-#define _PALETTE_B 0xa800
-#define _CHV_PALETTE_C 0xc000
-/* 8bit mode / i965+ 10.6 interpolated mode ldw/udw */
-#define PALETTE_RED_MASK REG_GENMASK(23, 16)
-#define PALETTE_GREEN_MASK REG_GENMASK(15, 8)
-#define PALETTE_BLUE_MASK REG_GENMASK(7, 0)
-/* pre-i965 10bit interpolated mode ldw */
-#define PALETTE_10BIT_RED_LDW_MASK REG_GENMASK(23, 16)
-#define PALETTE_10BIT_GREEN_LDW_MASK REG_GENMASK(15, 8)
-#define PALETTE_10BIT_BLUE_LDW_MASK REG_GENMASK(7, 0)
-/* pre-i965 10bit interpolated mode udw */
-#define PALETTE_10BIT_RED_EXP_MASK REG_GENMASK(23, 22)
-#define PALETTE_10BIT_RED_MANT_MASK REG_GENMASK(21, 18)
-#define PALETTE_10BIT_RED_UDW_MASK REG_GENMASK(17, 16)
-#define PALETTE_10BIT_GREEN_EXP_MASK REG_GENMASK(15, 14)
-#define PALETTE_10BIT_GREEN_MANT_MASK REG_GENMASK(13, 10)
-#define PALETTE_10BIT_GREEN_UDW_MASK REG_GENMASK(9, 8)
-#define PALETTE_10BIT_BLUE_EXP_MASK REG_GENMASK(7, 6)
-#define PALETTE_10BIT_BLUE_MANT_MASK REG_GENMASK(5, 2)
-#define PALETTE_10BIT_BLUE_UDW_MASK REG_GENMASK(1, 0)
-#define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \
- _PICK_EVEN_2RANGES(pipe, 2, \
- _PALETTE_A, _PALETTE_B, \
- _CHV_PALETTE_C, _CHV_PALETTE_C) + \
- (i) * 4)
-
#define PEG_BAND_GAP_DATA _MMIO(0x14d68)
#define BXT_RP_STATE_CAP _MMIO(0x138170)
#define GEN9_RP_STATE_LIMITS _MMIO(0x138148)
-#define XEHPSDV_RP_STATE_CAP _MMIO(0x250014)
-#define PVC_RP_STATE_CAP _MMIO(0x281014)
#define MTL_RP_STATE_CAP _MMIO(0x138000)
#define MTL_MEDIAP_STATE_CAP _MMIO(0x138020)
@@ -1911,18 +1137,18 @@
#define _PIPE_CRC_RES_4_B_IVB 0x61070
#define _PIPE_CRC_RES_5_B_IVB 0x61074
-#define PIPE_CRC_CTL(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_CTL_A)
-#define PIPE_CRC_RES_1_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_1_A_IVB)
-#define PIPE_CRC_RES_2_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_2_A_IVB)
-#define PIPE_CRC_RES_3_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_3_A_IVB)
-#define PIPE_CRC_RES_4_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_4_A_IVB)
-#define PIPE_CRC_RES_5_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_5_A_IVB)
+#define PIPE_CRC_CTL(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_CTL_A)
+#define PIPE_CRC_RES_1_IVB(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_1_A_IVB)
+#define PIPE_CRC_RES_2_IVB(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_2_A_IVB)
+#define PIPE_CRC_RES_3_IVB(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_3_A_IVB)
+#define PIPE_CRC_RES_4_IVB(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_4_A_IVB)
+#define PIPE_CRC_RES_5_IVB(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_5_A_IVB)
-#define PIPE_CRC_RES_RED(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RED_A)
-#define PIPE_CRC_RES_GREEN(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_GREEN_A)
-#define PIPE_CRC_RES_BLUE(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_BLUE_A)
-#define PIPE_CRC_RES_RES1_I915(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES1_A_I915)
-#define PIPE_CRC_RES_RES2_G4X(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES2_A_G4X)
+#define PIPE_CRC_RES_RED(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_RED_A)
+#define PIPE_CRC_RES_GREEN(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_GREEN_A)
+#define PIPE_CRC_RES_BLUE(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_BLUE_A)
+#define PIPE_CRC_RES_RES1_I915(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_RES1_A_I915)
+#define PIPE_CRC_RES_RES2_G4X(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_RES2_A_G4X)
/* Pipe/transcoder A timing regs */
#define _TRANS_HTOTAL_A 0x60000
@@ -1991,23 +1217,23 @@
#define _TRANS_VSYNC_DSI1 0x6b814
#define _TRANS_VSYNCSHIFT_DSI1 0x6b828
-#define TRANS_HTOTAL(trans) _MMIO_TRANS2((trans), _TRANS_HTOTAL_A)
-#define TRANS_HBLANK(trans) _MMIO_TRANS2((trans), _TRANS_HBLANK_A)
-#define TRANS_HSYNC(trans) _MMIO_TRANS2((trans), _TRANS_HSYNC_A)
-#define TRANS_VTOTAL(trans) _MMIO_TRANS2((trans), _TRANS_VTOTAL_A)
-#define TRANS_VBLANK(trans) _MMIO_TRANS2((trans), _TRANS_VBLANK_A)
-#define TRANS_VSYNC(trans) _MMIO_TRANS2((trans), _TRANS_VSYNC_A)
-#define BCLRPAT(trans) _MMIO_TRANS2((trans), _BCLRPAT_A)
-#define TRANS_VSYNCSHIFT(trans) _MMIO_TRANS2((trans), _TRANS_VSYNCSHIFT_A)
-#define PIPESRC(pipe) _MMIO_TRANS2((pipe), _PIPEASRC)
-#define TRANS_MULT(trans) _MMIO_TRANS2((trans), _TRANS_MULT_A)
+#define TRANS_HTOTAL(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HTOTAL_A)
+#define TRANS_HBLANK(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HBLANK_A)
+#define TRANS_HSYNC(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HSYNC_A)
+#define TRANS_VTOTAL(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VTOTAL_A)
+#define TRANS_VBLANK(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VBLANK_A)
+#define TRANS_VSYNC(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNC_A)
+#define BCLRPAT(trans) _MMIO_TRANS2(dev_priv, (trans), _BCLRPAT_A)
+#define TRANS_VSYNCSHIFT(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNCSHIFT_A)
+#define PIPESRC(pipe) _MMIO_TRANS2(dev_priv, (pipe), _PIPEASRC)
+#define TRANS_MULT(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_MULT_A)
/* VRR registers */
#define _TRANS_VRR_CTL_A 0x60420
#define _TRANS_VRR_CTL_B 0x61420
#define _TRANS_VRR_CTL_C 0x62420
#define _TRANS_VRR_CTL_D 0x63420
-#define TRANS_VRR_CTL(trans) _MMIO_TRANS2(trans, _TRANS_VRR_CTL_A)
+#define TRANS_VRR_CTL(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_CTL_A)
#define VRR_CTL_VRR_ENABLE REG_BIT(31)
#define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30)
#define VRR_CTL_FLIP_LINE_EN REG_BIT(29)
@@ -2021,21 +1247,21 @@
#define _TRANS_VRR_VMAX_B 0x61424
#define _TRANS_VRR_VMAX_C 0x62424
#define _TRANS_VRR_VMAX_D 0x63424
-#define TRANS_VRR_VMAX(trans) _MMIO_TRANS2(trans, _TRANS_VRR_VMAX_A)
+#define TRANS_VRR_VMAX(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_VMAX_A)
#define VRR_VMAX_MASK REG_GENMASK(19, 0)
#define _TRANS_VRR_VMIN_A 0x60434
#define _TRANS_VRR_VMIN_B 0x61434
#define _TRANS_VRR_VMIN_C 0x62434
#define _TRANS_VRR_VMIN_D 0x63434
-#define TRANS_VRR_VMIN(trans) _MMIO_TRANS2(trans, _TRANS_VRR_VMIN_A)
+#define TRANS_VRR_VMIN(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_VMIN_A)
#define VRR_VMIN_MASK REG_GENMASK(15, 0)
#define _TRANS_VRR_VMAXSHIFT_A 0x60428
#define _TRANS_VRR_VMAXSHIFT_B 0x61428
#define _TRANS_VRR_VMAXSHIFT_C 0x62428
#define _TRANS_VRR_VMAXSHIFT_D 0x63428
-#define TRANS_VRR_VMAXSHIFT(trans) _MMIO_TRANS2(trans, \
+#define TRANS_VRR_VMAXSHIFT(trans) _MMIO_TRANS2(dev_priv, trans, \
_TRANS_VRR_VMAXSHIFT_A)
#define VRR_VMAXSHIFT_DEC_MASK REG_GENMASK(29, 16)
#define VRR_VMAXSHIFT_DEC REG_BIT(16)
@@ -2045,7 +1271,7 @@
#define _TRANS_VRR_STATUS_B 0x6142C
#define _TRANS_VRR_STATUS_C 0x6242C
#define _TRANS_VRR_STATUS_D 0x6342C
-#define TRANS_VRR_STATUS(trans) _MMIO_TRANS2(trans, _TRANS_VRR_STATUS_A)
+#define TRANS_VRR_STATUS(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_STATUS_A)
#define VRR_STATUS_VMAX_REACHED REG_BIT(31)
#define VRR_STATUS_NOFLIP_TILL_BNDR REG_BIT(30)
#define VRR_STATUS_FLIP_BEF_BNDR REG_BIT(29)
@@ -2065,7 +1291,7 @@
#define _TRANS_VRR_VTOTAL_PREV_B 0x61480
#define _TRANS_VRR_VTOTAL_PREV_C 0x62480
#define _TRANS_VRR_VTOTAL_PREV_D 0x63480
-#define TRANS_VRR_VTOTAL_PREV(trans) _MMIO_TRANS2(trans, \
+#define TRANS_VRR_VTOTAL_PREV(trans) _MMIO_TRANS2(dev_priv, trans, \
_TRANS_VRR_VTOTAL_PREV_A)
#define VRR_VTOTAL_FLIP_BEFR_BNDR REG_BIT(31)
#define VRR_VTOTAL_FLIP_AFTER_BNDR REG_BIT(30)
@@ -2076,7 +1302,7 @@
#define _TRANS_VRR_FLIPLINE_B 0x61438
#define _TRANS_VRR_FLIPLINE_C 0x62438
#define _TRANS_VRR_FLIPLINE_D 0x63438
-#define TRANS_VRR_FLIPLINE(trans) _MMIO_TRANS2(trans, \
+#define TRANS_VRR_FLIPLINE(trans) _MMIO_TRANS2(dev_priv, trans, \
_TRANS_VRR_FLIPLINE_A)
#define VRR_FLIPLINE_MASK REG_GENMASK(19, 0)
@@ -2084,17 +1310,24 @@
#define _TRANS_VRR_STATUS2_B 0x6143C
#define _TRANS_VRR_STATUS2_C 0x6243C
#define _TRANS_VRR_STATUS2_D 0x6343C
-#define TRANS_VRR_STATUS2(trans) _MMIO_TRANS2(trans, _TRANS_VRR_STATUS2_A)
+#define TRANS_VRR_STATUS2(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_STATUS2_A)
#define VRR_STATUS2_VERT_LN_CNT_MASK REG_GENMASK(19, 0)
#define _TRANS_PUSH_A 0x60A70
#define _TRANS_PUSH_B 0x61A70
#define _TRANS_PUSH_C 0x62A70
#define _TRANS_PUSH_D 0x63A70
-#define TRANS_PUSH(trans) _MMIO_TRANS2(trans, _TRANS_PUSH_A)
+#define TRANS_PUSH(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_PUSH_A)
#define TRANS_PUSH_EN REG_BIT(31)
#define TRANS_PUSH_SEND REG_BIT(30)
+#define _TRANS_VRR_VSYNC_A 0x60078
+#define TRANS_VRR_VSYNC(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_VSYNC_A)
+#define VRR_VSYNC_END_MASK REG_GENMASK(28, 16)
+#define VRR_VSYNC_END(vsync_end) REG_FIELD_PREP(VRR_VSYNC_END_MASK, (vsync_end))
+#define VRR_VSYNC_START_MASK REG_GENMASK(12, 0)
+#define VRR_VSYNC_START(vsync_start) REG_FIELD_PREP(VRR_VSYNC_START_MASK, (vsync_start))
+
/* VGA port control */
#define ADPA _MMIO(0x61100)
#define PCH_ADPA _MMIO(0xe1100)
@@ -2312,6 +1545,7 @@
* (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
* of the infoframe structure specified by CEA-861. */
#define VIDEO_DIP_DATA_SIZE 32
+#define VIDEO_DIP_ASYNC_DATA_SIZE 36
#define VIDEO_DIP_GMP_DATA_SIZE 36
#define VIDEO_DIP_VSC_DATA_SIZE 36
#define VIDEO_DIP_PPS_DATA_SIZE 132
@@ -2350,6 +1584,8 @@
#define VIDEO_DIP_ENABLE_VS_HSW (1 << 8)
#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
+/* ADL and later: */
+#define VIDEO_DIP_ENABLE_AS_ADL REG_BIT(23)
/* Panel fitting */
#define PFIT_CONTROL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61230)
@@ -2588,6 +1824,9 @@
#define TRANSCONF_DITHER_TYPE_ST1 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 1)
#define TRANSCONF_DITHER_TYPE_ST2 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 2)
#define TRANSCONF_DITHER_TYPE_TEMP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 3)
+#define TRANSCONF_PIXEL_COUNT_SCALING_MASK REG_GENMASK(1, 0)
+#define TRANSCONF_PIXEL_COUNT_SCALING_X4 1
+
#define _PIPEASTAT 0x70024
#define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31)
#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30)
@@ -2639,18 +1878,18 @@
#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000
#define PIPESTAT_INT_STATUS_MASK 0x0000ffff
-#define TRANSCONF(trans) _MMIO_PIPE2((trans), _TRANSACONF)
-#define PIPEDSL(pipe) _MMIO_PIPE2(pipe, _PIPEADSL)
-#define PIPEFRAME(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEHIGH)
-#define PIPEFRAMEPIXEL(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEPIXEL)
-#define PIPESTAT(pipe) _MMIO_PIPE2(pipe, _PIPEASTAT)
+#define TRANSCONF(trans) _MMIO_PIPE2(dev_priv, (trans), _TRANSACONF)
+#define PIPEDSL(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEADSL)
+#define PIPEFRAME(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEHIGH)
+#define PIPEFRAMEPIXEL(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEPIXEL)
+#define PIPESTAT(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEASTAT)
#define _PIPEAGCMAX 0x70010
#define _PIPEBGCMAX 0x71010
-#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4) /* u1.16 */
+#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(dev_priv, pipe, _PIPEAGCMAX + (i) * 4) /* u1.16 */
#define _PIPE_ARB_CTL_A 0x70028 /* icl+ */
-#define PIPE_ARB_CTL(pipe) _MMIO_PIPE2(pipe, _PIPE_ARB_CTL_A)
+#define PIPE_ARB_CTL(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPE_ARB_CTL_A)
#define PIPE_ARB_USE_PROG_SLOTS REG_BIT(13)
#define _PIPE_MISC_A 0x70030
@@ -2694,7 +1933,7 @@
#define PIPE_MISC2(pipe) _MMIO_PIPE(pipe, _PIPE_MISC2_A, _PIPE_MISC2_B)
#define _ICL_PIPE_A_STATUS 0x70058
-#define ICL_PIPESTATUS(pipe) _MMIO_PIPE2(pipe, _ICL_PIPE_A_STATUS)
+#define ICL_PIPESTATUS(pipe) _MMIO_PIPE2(dev_priv, pipe, _ICL_PIPE_A_STATUS)
#define PIPE_STATUS_UNDERRUN REG_BIT(31)
#define PIPE_STATUS_SOFT_UNDERRUN_XELPD REG_BIT(28)
#define PIPE_STATUS_HARD_UNDERRUN_XELPD REG_BIT(27)
@@ -2969,8 +2208,8 @@
#define _WM0_PIPEA_ILK 0x45100
#define _WM0_PIPEB_ILK 0x45104
#define _WM0_PIPEC_IVB 0x45200
-#define WM0_PIPE_ILK(pipe) _MMIO_PIPE3((pipe), _WM0_PIPEA_ILK, \
- _WM0_PIPEB_ILK, _WM0_PIPEC_IVB)
+#define WM0_PIPE_ILK(pipe) _MMIO_BASE_PIPE3(0, (pipe), _WM0_PIPEA_ILK, \
+ _WM0_PIPEB_ILK, _WM0_PIPEC_IVB)
#define WM0_PIPE_PRIMARY_MASK REG_GENMASK(31, 16)
#define WM0_PIPE_SPRITE_MASK REG_GENMASK(15, 8)
#define WM0_PIPE_CURSOR_MASK REG_GENMASK(7, 0)
@@ -3024,8 +2263,8 @@
/* GM45+ just has to be different */
#define _PIPEA_FRMCOUNT_G4X 0x70040
#define _PIPEA_FLIPCOUNT_G4X 0x70044
-#define PIPE_FRMCOUNT_G4X(pipe) _MMIO_PIPE2(pipe, _PIPEA_FRMCOUNT_G4X)
-#define PIPE_FLIPCOUNT_G4X(pipe) _MMIO_PIPE2(pipe, _PIPEA_FLIPCOUNT_G4X)
+#define PIPE_FRMCOUNT_G4X(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FRMCOUNT_G4X)
+#define PIPE_FLIPCOUNT_G4X(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FLIPCOUNT_G4X)
/* Cursor A & B regs */
#define _CURACNTR 0x70080
@@ -3053,6 +2292,7 @@
#define MCURSOR_MODE_DISABLE 0x00
#define MCURSOR_MODE_128_32B_AX 0x02
#define MCURSOR_MODE_256_32B_AX 0x03
+#define MCURSOR_MODE_64_2B 0x04
#define MCURSOR_MODE_64_32B_AX 0x07
#define MCURSOR_MODE_128_ARGB_AX (0x20 | MCURSOR_MODE_128_32B_AX)
#define MCURSOR_MODE_256_ARGB_AX (0x20 | MCURSOR_MODE_256_32B_AX)
@@ -3085,14 +2325,14 @@
#define _CURBBASE_IVB 0x71084
#define _CURBPOS_IVB 0x71088
-#define CURCNTR(pipe) _MMIO_CURSOR2(pipe, _CURACNTR)
-#define CURBASE(pipe) _MMIO_CURSOR2(pipe, _CURABASE)
-#define CURPOS(pipe) _MMIO_CURSOR2(pipe, _CURAPOS)
-#define CURPOS_ERLY_TPT(pipe) _MMIO_CURSOR2(pipe, _CURAPOS_ERLY_TPT)
-#define CURSIZE(pipe) _MMIO_CURSOR2(pipe, _CURASIZE)
-#define CUR_FBC_CTL(pipe) _MMIO_CURSOR2(pipe, _CUR_FBC_CTL_A)
-#define CUR_CHICKEN(pipe) _MMIO_CURSOR2(pipe, _CUR_CHICKEN_A)
-#define CURSURFLIVE(pipe) _MMIO_CURSOR2(pipe, _CURASURFLIVE)
+#define CURCNTR(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CURACNTR)
+#define CURBASE(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CURABASE)
+#define CURPOS(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CURAPOS)
+#define CURPOS_ERLY_TPT(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CURAPOS_ERLY_TPT)
+#define CURSIZE(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CURASIZE)
+#define CUR_FBC_CTL(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CUR_FBC_CTL_A)
+#define CUR_CHICKEN(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CUR_CHICKEN_A)
+#define CURSURFLIVE(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CURASURFLIVE)
/* Display A control */
#define _DSPAADDR_VLV 0x7017C /* vlv/chv */
@@ -3149,18 +2389,18 @@
#define _DSPASURFLIVE 0x701AC
#define _DSPAGAMC 0x701E0
-#define DSPADDR_VLV(plane) _MMIO_PIPE2(plane, _DSPAADDR_VLV)
-#define DSPCNTR(plane) _MMIO_PIPE2(plane, _DSPACNTR)
-#define DSPADDR(plane) _MMIO_PIPE2(plane, _DSPAADDR)
-#define DSPSTRIDE(plane) _MMIO_PIPE2(plane, _DSPASTRIDE)
-#define DSPPOS(plane) _MMIO_PIPE2(plane, _DSPAPOS)
-#define DSPSIZE(plane) _MMIO_PIPE2(plane, _DSPASIZE)
-#define DSPSURF(plane) _MMIO_PIPE2(plane, _DSPASURF)
-#define DSPTILEOFF(plane) _MMIO_PIPE2(plane, _DSPATILEOFF)
+#define DSPADDR_VLV(plane) _MMIO_PIPE2(dev_priv, plane, _DSPAADDR_VLV)
+#define DSPCNTR(plane) _MMIO_PIPE2(dev_priv, plane, _DSPACNTR)
+#define DSPADDR(plane) _MMIO_PIPE2(dev_priv, plane, _DSPAADDR)
+#define DSPSTRIDE(plane) _MMIO_PIPE2(dev_priv, plane, _DSPASTRIDE)
+#define DSPPOS(plane) _MMIO_PIPE2(dev_priv, plane, _DSPAPOS)
+#define DSPSIZE(plane) _MMIO_PIPE2(dev_priv, plane, _DSPASIZE)
+#define DSPSURF(plane) _MMIO_PIPE2(dev_priv, plane, _DSPASURF)
+#define DSPTILEOFF(plane) _MMIO_PIPE2(dev_priv, plane, _DSPATILEOFF)
#define DSPLINOFF(plane) DSPADDR(plane)
-#define DSPOFFSET(plane) _MMIO_PIPE2(plane, _DSPAOFFSET)
-#define DSPSURFLIVE(plane) _MMIO_PIPE2(plane, _DSPASURFLIVE)
-#define DSPGAMC(plane, i) _MMIO_PIPE2(plane, _DSPAGAMC + (5 - (i)) * 4) /* plane C only, 6 x u0.8 */
+#define DSPOFFSET(plane) _MMIO_PIPE2(dev_priv, plane, _DSPAOFFSET)
+#define DSPSURFLIVE(plane) _MMIO_PIPE2(dev_priv, plane, _DSPASURFLIVE)
+#define DSPGAMC(plane, i) _MMIO_PIPE2(dev_priv, plane, _DSPAGAMC + (5 - (i)) * 4) /* plane C only, 6 x u0.8 */
/* CHV pipe B blender and primary plane */
#define _CHV_BLEND_A 0x60a00
@@ -3187,11 +2427,11 @@
#define PRIM_CONST_ALPHA_MASK REG_GENMASK(7, 0)
#define PRIM_CONST_ALPHA(alpha) REG_FIELD_PREP(PRIM_CONST_ALPHA_MASK, (alpha))
-#define CHV_BLEND(pipe) _MMIO_TRANS2(pipe, _CHV_BLEND_A)
-#define CHV_CANVAS(pipe) _MMIO_TRANS2(pipe, _CHV_CANVAS_A)
-#define PRIMPOS(plane) _MMIO_TRANS2(plane, _PRIMPOS_A)
-#define PRIMSIZE(plane) _MMIO_TRANS2(plane, _PRIMSIZE_A)
-#define PRIMCNSTALPHA(plane) _MMIO_TRANS2(plane, _PRIMCNSTALPHA_A)
+#define CHV_BLEND(pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_BLEND_A)
+#define CHV_CANVAS(pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_CANVAS_A)
+#define PRIMPOS(plane) _MMIO_TRANS2(dev_priv, plane, _PRIMPOS_A)
+#define PRIMSIZE(plane) _MMIO_TRANS2(dev_priv, plane, _PRIMSIZE_A)
+#define PRIMCNSTALPHA(plane) _MMIO_TRANS2(dev_priv, plane, _PRIMCNSTALPHA_A)
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
@@ -3241,346 +2481,6 @@
#define _PIPEDSI0CONF 0x7b008
#define _PIPEDSI1CONF 0x7b808
-/* Sprite A control */
-#define _DVSACNTR 0x72180
-#define DVS_ENABLE REG_BIT(31)
-#define DVS_PIPE_GAMMA_ENABLE REG_BIT(30)
-#define DVS_YUV_RANGE_CORRECTION_DISABLE REG_BIT(27)
-#define DVS_FORMAT_MASK REG_GENMASK(26, 25)
-#define DVS_FORMAT_YUV422 REG_FIELD_PREP(DVS_FORMAT_MASK, 0)
-#define DVS_FORMAT_RGBX101010 REG_FIELD_PREP(DVS_FORMAT_MASK, 1)
-#define DVS_FORMAT_RGBX888 REG_FIELD_PREP(DVS_FORMAT_MASK, 2)
-#define DVS_FORMAT_RGBX161616 REG_FIELD_PREP(DVS_FORMAT_MASK, 3)
-#define DVS_PIPE_CSC_ENABLE REG_BIT(24)
-#define DVS_SOURCE_KEY REG_BIT(22)
-#define DVS_RGB_ORDER_XBGR REG_BIT(20)
-#define DVS_YUV_FORMAT_BT709 REG_BIT(18)
-#define DVS_YUV_ORDER_MASK REG_GENMASK(17, 16)
-#define DVS_YUV_ORDER_YUYV REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 0)
-#define DVS_YUV_ORDER_UYVY REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 1)
-#define DVS_YUV_ORDER_YVYU REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 2)
-#define DVS_YUV_ORDER_VYUY REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 3)
-#define DVS_ROTATE_180 REG_BIT(15)
-#define DVS_TRICKLE_FEED_DISABLE REG_BIT(14)
-#define DVS_TILED REG_BIT(10)
-#define DVS_DEST_KEY REG_BIT(2)
-#define _DVSALINOFF 0x72184
-#define _DVSASTRIDE 0x72188
-#define _DVSAPOS 0x7218c
-#define DVS_POS_Y_MASK REG_GENMASK(31, 16)
-#define DVS_POS_Y(y) REG_FIELD_PREP(DVS_POS_Y_MASK, (y))
-#define DVS_POS_X_MASK REG_GENMASK(15, 0)
-#define DVS_POS_X(x) REG_FIELD_PREP(DVS_POS_X_MASK, (x))
-#define _DVSASIZE 0x72190
-#define DVS_HEIGHT_MASK REG_GENMASK(31, 16)
-#define DVS_HEIGHT(h) REG_FIELD_PREP(DVS_HEIGHT_MASK, (h))
-#define DVS_WIDTH_MASK REG_GENMASK(15, 0)
-#define DVS_WIDTH(w) REG_FIELD_PREP(DVS_WIDTH_MASK, (w))
-#define _DVSAKEYVAL 0x72194
-#define _DVSAKEYMSK 0x72198
-#define _DVSASURF 0x7219c
-#define DVS_ADDR_MASK REG_GENMASK(31, 12)
-#define _DVSAKEYMAXVAL 0x721a0
-#define _DVSATILEOFF 0x721a4
-#define DVS_OFFSET_Y_MASK REG_GENMASK(31, 16)
-#define DVS_OFFSET_Y(y) REG_FIELD_PREP(DVS_OFFSET_Y_MASK, (y))
-#define DVS_OFFSET_X_MASK REG_GENMASK(15, 0)
-#define DVS_OFFSET_X(x) REG_FIELD_PREP(DVS_OFFSET_X_MASK, (x))
-#define _DVSASURFLIVE 0x721ac
-#define _DVSAGAMC_G4X 0x721e0 /* g4x */
-#define _DVSASCALE 0x72204
-#define DVS_SCALE_ENABLE REG_BIT(31)
-#define DVS_FILTER_MASK REG_GENMASK(30, 29)
-#define DVS_FILTER_MEDIUM REG_FIELD_PREP(DVS_FILTER_MASK, 0)
-#define DVS_FILTER_ENHANCING REG_FIELD_PREP(DVS_FILTER_MASK, 1)
-#define DVS_FILTER_SOFTENING REG_FIELD_PREP(DVS_FILTER_MASK, 2)
-#define DVS_VERTICAL_OFFSET_HALF REG_BIT(28) /* must be enabled below */
-#define DVS_VERTICAL_OFFSET_ENABLE REG_BIT(27)
-#define DVS_SRC_WIDTH_MASK REG_GENMASK(26, 16)
-#define DVS_SRC_WIDTH(w) REG_FIELD_PREP(DVS_SRC_WIDTH_MASK, (w))
-#define DVS_SRC_HEIGHT_MASK REG_GENMASK(10, 0)
-#define DVS_SRC_HEIGHT(h) REG_FIELD_PREP(DVS_SRC_HEIGHT_MASK, (h))
-#define _DVSAGAMC_ILK 0x72300 /* ilk/snb */
-#define _DVSAGAMCMAX_ILK 0x72340 /* ilk/snb */
-
-#define _DVSBCNTR 0x73180
-#define _DVSBLINOFF 0x73184
-#define _DVSBSTRIDE 0x73188
-#define _DVSBPOS 0x7318c
-#define _DVSBSIZE 0x73190
-#define _DVSBKEYVAL 0x73194
-#define _DVSBKEYMSK 0x73198
-#define _DVSBSURF 0x7319c
-#define _DVSBKEYMAXVAL 0x731a0
-#define _DVSBTILEOFF 0x731a4
-#define _DVSBSURFLIVE 0x731ac
-#define _DVSBGAMC_G4X 0x731e0 /* g4x */
-#define _DVSBSCALE 0x73204
-#define _DVSBGAMC_ILK 0x73300 /* ilk/snb */
-#define _DVSBGAMCMAX_ILK 0x73340 /* ilk/snb */
-
-#define DVSCNTR(pipe) _MMIO_PIPE(pipe, _DVSACNTR, _DVSBCNTR)
-#define DVSLINOFF(pipe) _MMIO_PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
-#define DVSSTRIDE(pipe) _MMIO_PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE)
-#define DVSPOS(pipe) _MMIO_PIPE(pipe, _DVSAPOS, _DVSBPOS)
-#define DVSSURF(pipe) _MMIO_PIPE(pipe, _DVSASURF, _DVSBSURF)
-#define DVSKEYMAX(pipe) _MMIO_PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL)
-#define DVSSIZE(pipe) _MMIO_PIPE(pipe, _DVSASIZE, _DVSBSIZE)
-#define DVSSCALE(pipe) _MMIO_PIPE(pipe, _DVSASCALE, _DVSBSCALE)
-#define DVSTILEOFF(pipe) _MMIO_PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
-#define DVSKEYVAL(pipe) _MMIO_PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
-#define DVSKEYMSK(pipe) _MMIO_PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
-#define DVSSURFLIVE(pipe) _MMIO_PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
-#define DVSGAMC_G4X(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_G4X, _DVSBGAMC_G4X) + (5 - (i)) * 4) /* 6 x u0.8 */
-#define DVSGAMC_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_ILK, _DVSBGAMC_ILK) + (i) * 4) /* 16 x u0.10 */
-#define DVSGAMCMAX_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMCMAX_ILK, _DVSBGAMCMAX_ILK) + (i) * 4) /* 3 x u1.10 */
-
-#define _SPRA_CTL 0x70280
-#define SPRITE_ENABLE REG_BIT(31)
-#define SPRITE_PIPE_GAMMA_ENABLE REG_BIT(30)
-#define SPRITE_YUV_RANGE_CORRECTION_DISABLE REG_BIT(28)
-#define SPRITE_FORMAT_MASK REG_GENMASK(27, 25)
-#define SPRITE_FORMAT_YUV422 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 0)
-#define SPRITE_FORMAT_RGBX101010 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 1)
-#define SPRITE_FORMAT_RGBX888 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 2)
-#define SPRITE_FORMAT_RGBX161616 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 3)
-#define SPRITE_FORMAT_YUV444 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 4)
-#define SPRITE_FORMAT_XR_BGR101010 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 5) /* Extended range */
-#define SPRITE_PIPE_CSC_ENABLE REG_BIT(24)
-#define SPRITE_SOURCE_KEY REG_BIT(22)
-#define SPRITE_RGB_ORDER_RGBX REG_BIT(20) /* only for 888 and 161616 */
-#define SPRITE_YUV_TO_RGB_CSC_DISABLE REG_BIT(19)
-#define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 REG_BIT(18) /* 0 is BT601 */
-#define SPRITE_YUV_ORDER_MASK REG_GENMASK(17, 16)
-#define SPRITE_YUV_ORDER_YUYV REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 0)
-#define SPRITE_YUV_ORDER_UYVY REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 1)
-#define SPRITE_YUV_ORDER_YVYU REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 2)
-#define SPRITE_YUV_ORDER_VYUY REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 3)
-#define SPRITE_ROTATE_180 REG_BIT(15)
-#define SPRITE_TRICKLE_FEED_DISABLE REG_BIT(14)
-#define SPRITE_PLANE_GAMMA_DISABLE REG_BIT(13)
-#define SPRITE_TILED REG_BIT(10)
-#define SPRITE_DEST_KEY REG_BIT(2)
-#define _SPRA_LINOFF 0x70284
-#define _SPRA_STRIDE 0x70288
-#define _SPRA_POS 0x7028c
-#define SPRITE_POS_Y_MASK REG_GENMASK(31, 16)
-#define SPRITE_POS_Y(y) REG_FIELD_PREP(SPRITE_POS_Y_MASK, (y))
-#define SPRITE_POS_X_MASK REG_GENMASK(15, 0)
-#define SPRITE_POS_X(x) REG_FIELD_PREP(SPRITE_POS_X_MASK, (x))
-#define _SPRA_SIZE 0x70290
-#define SPRITE_HEIGHT_MASK REG_GENMASK(31, 16)
-#define SPRITE_HEIGHT(h) REG_FIELD_PREP(SPRITE_HEIGHT_MASK, (h))
-#define SPRITE_WIDTH_MASK REG_GENMASK(15, 0)
-#define SPRITE_WIDTH(w) REG_FIELD_PREP(SPRITE_WIDTH_MASK, (w))
-#define _SPRA_KEYVAL 0x70294
-#define _SPRA_KEYMSK 0x70298
-#define _SPRA_SURF 0x7029c
-#define SPRITE_ADDR_MASK REG_GENMASK(31, 12)
-#define _SPRA_KEYMAX 0x702a0
-#define _SPRA_TILEOFF 0x702a4
-#define SPRITE_OFFSET_Y_MASK REG_GENMASK(31, 16)
-#define SPRITE_OFFSET_Y(y) REG_FIELD_PREP(SPRITE_OFFSET_Y_MASK, (y))
-#define SPRITE_OFFSET_X_MASK REG_GENMASK(15, 0)
-#define SPRITE_OFFSET_X(x) REG_FIELD_PREP(SPRITE_OFFSET_X_MASK, (x))
-#define _SPRA_OFFSET 0x702a4
-#define _SPRA_SURFLIVE 0x702ac
-#define _SPRA_SCALE 0x70304
-#define SPRITE_SCALE_ENABLE REG_BIT(31)
-#define SPRITE_FILTER_MASK REG_GENMASK(30, 29)
-#define SPRITE_FILTER_MEDIUM REG_FIELD_PREP(SPRITE_FILTER_MASK, 0)
-#define SPRITE_FILTER_ENHANCING REG_FIELD_PREP(SPRITE_FILTER_MASK, 1)
-#define SPRITE_FILTER_SOFTENING REG_FIELD_PREP(SPRITE_FILTER_MASK, 2)
-#define SPRITE_VERTICAL_OFFSET_HALF REG_BIT(28) /* must be enabled below */
-#define SPRITE_VERTICAL_OFFSET_ENABLE REG_BIT(27)
-#define SPRITE_SRC_WIDTH_MASK REG_GENMASK(26, 16)
-#define SPRITE_SRC_WIDTH(w) REG_FIELD_PREP(SPRITE_SRC_WIDTH_MASK, (w))
-#define SPRITE_SRC_HEIGHT_MASK REG_GENMASK(10, 0)
-#define SPRITE_SRC_HEIGHT(h) REG_FIELD_PREP(SPRITE_SRC_HEIGHT_MASK, (h))
-#define _SPRA_GAMC 0x70400
-#define _SPRA_GAMC16 0x70440
-#define _SPRA_GAMC17 0x7044c
-
-#define _SPRB_CTL 0x71280
-#define _SPRB_LINOFF 0x71284
-#define _SPRB_STRIDE 0x71288
-#define _SPRB_POS 0x7128c
-#define _SPRB_SIZE 0x71290
-#define _SPRB_KEYVAL 0x71294
-#define _SPRB_KEYMSK 0x71298
-#define _SPRB_SURF 0x7129c
-#define _SPRB_KEYMAX 0x712a0
-#define _SPRB_TILEOFF 0x712a4
-#define _SPRB_OFFSET 0x712a4
-#define _SPRB_SURFLIVE 0x712ac
-#define _SPRB_SCALE 0x71304
-#define _SPRB_GAMC 0x71400
-#define _SPRB_GAMC16 0x71440
-#define _SPRB_GAMC17 0x7144c
-
-#define SPRCTL(pipe) _MMIO_PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
-#define SPRLINOFF(pipe) _MMIO_PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
-#define SPRSTRIDE(pipe) _MMIO_PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE)
-#define SPRPOS(pipe) _MMIO_PIPE(pipe, _SPRA_POS, _SPRB_POS)
-#define SPRSIZE(pipe) _MMIO_PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE)
-#define SPRKEYVAL(pipe) _MMIO_PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL)
-#define SPRKEYMSK(pipe) _MMIO_PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK)
-#define SPRSURF(pipe) _MMIO_PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
-#define SPRKEYMAX(pipe) _MMIO_PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
-#define SPRTILEOFF(pipe) _MMIO_PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
-#define SPROFFSET(pipe) _MMIO_PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
-#define SPRSCALE(pipe) _MMIO_PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
-#define SPRGAMC(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) + (i) * 4) /* 16 x u0.10 */
-#define SPRGAMC16(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC16, _SPRB_GAMC16) + (i) * 4) /* 3 x u1.10 */
-#define SPRGAMC17(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC17, _SPRB_GAMC17) + (i) * 4) /* 3 x u2.10 */
-#define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
-
-#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180)
-#define SP_ENABLE REG_BIT(31)
-#define SP_PIPE_GAMMA_ENABLE REG_BIT(30)
-#define SP_FORMAT_MASK REG_GENMASK(29, 26)
-#define SP_FORMAT_YUV422 REG_FIELD_PREP(SP_FORMAT_MASK, 0)
-#define SP_FORMAT_8BPP REG_FIELD_PREP(SP_FORMAT_MASK, 2)
-#define SP_FORMAT_BGR565 REG_FIELD_PREP(SP_FORMAT_MASK, 5)
-#define SP_FORMAT_BGRX8888 REG_FIELD_PREP(SP_FORMAT_MASK, 6)
-#define SP_FORMAT_BGRA8888 REG_FIELD_PREP(SP_FORMAT_MASK, 7)
-#define SP_FORMAT_RGBX1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 8)
-#define SP_FORMAT_RGBA1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 9)
-#define SP_FORMAT_BGRX1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 10) /* CHV pipe B */
-#define SP_FORMAT_BGRA1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 11) /* CHV pipe B */
-#define SP_FORMAT_RGBX8888 REG_FIELD_PREP(SP_FORMAT_MASK, 14)
-#define SP_FORMAT_RGBA8888 REG_FIELD_PREP(SP_FORMAT_MASK, 15)
-#define SP_ALPHA_PREMULTIPLY REG_BIT(23) /* CHV pipe B */
-#define SP_SOURCE_KEY REG_BIT(22)
-#define SP_YUV_FORMAT_BT709 REG_BIT(18)
-#define SP_YUV_ORDER_MASK REG_GENMASK(17, 16)
-#define SP_YUV_ORDER_YUYV REG_FIELD_PREP(SP_YUV_ORDER_MASK, 0)
-#define SP_YUV_ORDER_UYVY REG_FIELD_PREP(SP_YUV_ORDER_MASK, 1)
-#define SP_YUV_ORDER_YVYU REG_FIELD_PREP(SP_YUV_ORDER_MASK, 2)
-#define SP_YUV_ORDER_VYUY REG_FIELD_PREP(SP_YUV_ORDER_MASK, 3)
-#define SP_ROTATE_180 REG_BIT(15)
-#define SP_TILED REG_BIT(10)
-#define SP_MIRROR REG_BIT(8) /* CHV pipe B */
-#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184)
-#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188)
-#define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c)
-#define SP_POS_Y_MASK REG_GENMASK(31, 16)
-#define SP_POS_Y(y) REG_FIELD_PREP(SP_POS_Y_MASK, (y))
-#define SP_POS_X_MASK REG_GENMASK(15, 0)
-#define SP_POS_X(x) REG_FIELD_PREP(SP_POS_X_MASK, (x))
-#define _SPASIZE (VLV_DISPLAY_BASE + 0x72190)
-#define SP_HEIGHT_MASK REG_GENMASK(31, 16)
-#define SP_HEIGHT(h) REG_FIELD_PREP(SP_HEIGHT_MASK, (h))
-#define SP_WIDTH_MASK REG_GENMASK(15, 0)
-#define SP_WIDTH(w) REG_FIELD_PREP(SP_WIDTH_MASK, (w))
-#define _SPAKEYMINVAL (VLV_DISPLAY_BASE + 0x72194)
-#define _SPAKEYMSK (VLV_DISPLAY_BASE + 0x72198)
-#define _SPASURF (VLV_DISPLAY_BASE + 0x7219c)
-#define SP_ADDR_MASK REG_GENMASK(31, 12)
-#define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0)
-#define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4)
-#define SP_OFFSET_Y_MASK REG_GENMASK(31, 16)
-#define SP_OFFSET_Y(y) REG_FIELD_PREP(SP_OFFSET_Y_MASK, (y))
-#define SP_OFFSET_X_MASK REG_GENMASK(15, 0)
-#define SP_OFFSET_X(x) REG_FIELD_PREP(SP_OFFSET_X_MASK, (x))
-#define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8)
-#define SP_CONST_ALPHA_ENABLE REG_BIT(31)
-#define SP_CONST_ALPHA_MASK REG_GENMASK(7, 0)
-#define SP_CONST_ALPHA(alpha) REG_FIELD_PREP(SP_CONST_ALPHA_MASK, (alpha))
-#define _SPASURFLIVE (VLV_DISPLAY_BASE + 0x721ac)
-#define _SPACLRC0 (VLV_DISPLAY_BASE + 0x721d0)
-#define SP_CONTRAST_MASK REG_GENMASK(26, 18)
-#define SP_CONTRAST(x) REG_FIELD_PREP(SP_CONTRAST_MASK, (x)) /* u3.6 */
-#define SP_BRIGHTNESS_MASK REG_GENMASK(7, 0)
-#define SP_BRIGHTNESS(x) REG_FIELD_PREP(SP_BRIGHTNESS_MASK, (x)) /* s8 */
-#define _SPACLRC1 (VLV_DISPLAY_BASE + 0x721d4)
-#define SP_SH_SIN_MASK REG_GENMASK(26, 16)
-#define SP_SH_SIN(x) REG_FIELD_PREP(SP_SH_SIN_MASK, (x)) /* s4.7 */
-#define SP_SH_COS_MASK REG_GENMASK(9, 0)
-#define SP_SH_COS(x) REG_FIELD_PREP(SP_SH_COS_MASK, (x)) /* u3.7 */
-#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721e0)
-
-#define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280)
-#define _SPBLINOFF (VLV_DISPLAY_BASE + 0x72284)
-#define _SPBSTRIDE (VLV_DISPLAY_BASE + 0x72288)
-#define _SPBPOS (VLV_DISPLAY_BASE + 0x7228c)
-#define _SPBSIZE (VLV_DISPLAY_BASE + 0x72290)
-#define _SPBKEYMINVAL (VLV_DISPLAY_BASE + 0x72294)
-#define _SPBKEYMSK (VLV_DISPLAY_BASE + 0x72298)
-#define _SPBSURF (VLV_DISPLAY_BASE + 0x7229c)
-#define _SPBKEYMAXVAL (VLV_DISPLAY_BASE + 0x722a0)
-#define _SPBTILEOFF (VLV_DISPLAY_BASE + 0x722a4)
-#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
-#define _SPBSURFLIVE (VLV_DISPLAY_BASE + 0x722ac)
-#define _SPBCLRC0 (VLV_DISPLAY_BASE + 0x722d0)
-#define _SPBCLRC1 (VLV_DISPLAY_BASE + 0x722d4)
-#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722e0)
-
-#define _VLV_SPR(pipe, plane_id, reg_a, reg_b) \
- _PIPE((pipe) * 2 + (plane_id) - PLANE_SPRITE0, (reg_a), (reg_b))
-#define _MMIO_VLV_SPR(pipe, plane_id, reg_a, reg_b) \
- _MMIO(_VLV_SPR((pipe), (plane_id), (reg_a), (reg_b)))
-
-#define SPCNTR(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACNTR, _SPBCNTR)
-#define SPLINOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPALINOFF, _SPBLINOFF)
-#define SPSTRIDE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASTRIDE, _SPBSTRIDE)
-#define SPPOS(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAPOS, _SPBPOS)
-#define SPSIZE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASIZE, _SPBSIZE)
-#define SPKEYMINVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMINVAL, _SPBKEYMINVAL)
-#define SPKEYMSK(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMSK, _SPBKEYMSK)
-#define SPSURF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURF, _SPBSURF)
-#define SPKEYMAXVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
-#define SPTILEOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPATILEOFF, _SPBTILEOFF)
-#define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA)
-#define SPSURFLIVE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURFLIVE, _SPBSURFLIVE)
-#define SPCLRC0(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC0, _SPBCLRC0)
-#define SPCLRC1(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC1, _SPBCLRC1)
-#define SPGAMC(pipe, plane_id, i) _MMIO(_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC) + (5 - (i)) * 4) /* 6 x u0.10 */
-
-/*
- * CHV pipe B sprite CSC
- *
- * |cr| |c0 c1 c2| |cr + cr_ioff| |cr_ooff|
- * |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff|
- * |cb| |c6 c7 c8| |cb + cr_ioff| |cb_ooff|
- */
-#define _MMIO_CHV_SPCSC(plane_id, reg) \
- _MMIO(VLV_DISPLAY_BASE + ((plane_id) - PLANE_SPRITE0) * 0x1000 + (reg))
-
-#define SPCSCYGOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d900)
-#define SPCSCCBOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d904)
-#define SPCSCCROFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d908)
-#define SPCSC_OOFF_MASK REG_GENMASK(26, 16)
-#define SPCSC_OOFF(x) REG_FIELD_PREP(SPCSC_OOFF_MASK, (x) & 0x7ff) /* s11 */
-#define SPCSC_IOFF_MASK REG_GENMASK(10, 0)
-#define SPCSC_IOFF(x) REG_FIELD_PREP(SPCSC_IOFF_MASK, (x) & 0x7ff) /* s11 */
-
-#define SPCSCC01(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d90c)
-#define SPCSCC23(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d910)
-#define SPCSCC45(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d914)
-#define SPCSCC67(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d918)
-#define SPCSCC8(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d91c)
-#define SPCSC_C1_MASK REG_GENMASK(30, 16)
-#define SPCSC_C1(x) REG_FIELD_PREP(SPCSC_C1_MASK, (x) & 0x7fff) /* s3.12 */
-#define SPCSC_C0_MASK REG_GENMASK(14, 0)
-#define SPCSC_C0(x) REG_FIELD_PREP(SPCSC_C0_MASK, (x) & 0x7fff) /* s3.12 */
-
-#define SPCSCYGICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d920)
-#define SPCSCCBICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d924)
-#define SPCSCCRICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d928)
-#define SPCSC_IMAX_MASK REG_GENMASK(26, 16)
-#define SPCSC_IMAX(x) REG_FIELD_PREP(SPCSC_IMAX_MASK, (x) & 0x7ff) /* s11 */
-#define SPCSC_IMIN_MASK REG_GENMASK(10, 0)
-#define SPCSC_IMIN(x) REG_FIELD_PREP(SPCSC_IMIN_MASK, (x) & 0x7ff) /* s11 */
-
-#define SPCSCYGOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d92c)
-#define SPCSCCBOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d930)
-#define SPCSCCROCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d934)
-#define SPCSC_OMAX_MASK REG_GENMASK(25, 16)
-#define SPCSC_OMAX(x) REG_FIELD_PREP(SPCSC_OMAX_MASK, (x)) /* u10 */
-#define SPCSC_OMIN_MASK REG_GENMASK(9, 0)
-#define SPCSC_OMIN(x) REG_FIELD_PREP(SPCSC_OMIN_MASK, (x)) /* u10 */
-
/* Skylake plane registers */
#define _PLANE_CTL_1_A 0x70180
@@ -3990,14 +2890,14 @@
#define _PIPEB_LINK_M2 0x61048
#define _PIPEB_LINK_N2 0x6104c
-#define PIPE_DATA_M1(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_M1)
-#define PIPE_DATA_N1(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_N1)
-#define PIPE_DATA_M2(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_M2)
-#define PIPE_DATA_N2(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_N2)
-#define PIPE_LINK_M1(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_M1)
-#define PIPE_LINK_N1(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_N1)
-#define PIPE_LINK_M2(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_M2)
-#define PIPE_LINK_N2(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_N2)
+#define PIPE_DATA_M1(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M1)
+#define PIPE_DATA_N1(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N1)
+#define PIPE_DATA_M2(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M2)
+#define PIPE_DATA_N2(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N2)
+#define PIPE_LINK_M1(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M1)
+#define PIPE_LINK_N1(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N1)
+#define PIPE_LINK_M2(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M2)
+#define PIPE_LINK_N2(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N2)
/* CPU panel fitter */
/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
@@ -4555,6 +3455,11 @@
#define GLK_CL1_PWR_DOWN REG_BIT(11)
#define GLK_CL0_PWR_DOWN REG_BIT(10)
+#define CHICKEN_MISC_3 _MMIO(0x42088)
+#define DP_MST_DPT_DPTP_ALIGN_WA(trans) REG_BIT(9 + (trans) - TRANSCODER_A)
+#define DP_MST_SHORT_HBLANK_WA(trans) REG_BIT(5 + (trans) - TRANSCODER_A)
+#define DP_MST_FEC_BS_JITTER_WA(trans) REG_BIT(0 + (trans) - TRANSCODER_A)
+
#define CHICKEN_MISC_4 _MMIO(0x4208c)
#define CHICKEN_FBC_STRIDE_OVERRIDE REG_BIT(13)
#define CHICKEN_FBC_STRIDE_MASK REG_GENMASK(12, 0)
@@ -4599,7 +3504,7 @@
#define MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \
_MTL_CHICKEN_TRANS_A, \
_MTL_CHICKEN_TRANS_B)
-#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* ADL/DG2 */
+#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* tgl+ */
#define SKL_UNMASK_VBL_TO_PIPE_IN_SRD REG_BIT(30) /* skl+ */
#define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
#define HSW_FRAME_START_DELAY(x) REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x)
@@ -4611,7 +3516,9 @@
#define DDIE_TRAINING_OVERRIDE_ENABLE REG_BIT(17) /* CHICKEN_TRANS_A only */
#define DDIE_TRAINING_OVERRIDE_VALUE REG_BIT(16) /* CHICKEN_TRANS_A only */
#define PSR2_ADD_VERTICAL_LINE_COUNT REG_BIT(15)
+#define DP_FEC_BS_JITTER_WA REG_BIT(15)
#define PSR2_VSC_ENABLE_PROG_HEADER REG_BIT(12)
+#define DP_DSC_INSERT_SF_AT_EOL_WA REG_BIT(4)
#define DISP_ARB_CTL _MMIO(0x45000)
#define DISP_FBC_MEMORY_WAKE REG_BIT(31)
@@ -5010,27 +3917,29 @@
#define TVIDEO_DIP_GCP(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
/* Per-transcoder DIP controls (VLV) */
-#define _VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200)
-#define _VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208)
-#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210)
-
-#define _VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170)
-#define _VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174)
-#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178)
-
-#define _CHV_VIDEO_DIP_CTL_C (VLV_DISPLAY_BASE + 0x611f0)
-#define _CHV_VIDEO_DIP_DATA_C (VLV_DISPLAY_BASE + 0x611f4)
-#define _CHV_VIDEO_DIP_GDCP_PAYLOAD_C (VLV_DISPLAY_BASE + 0x611f8)
-
-#define VLV_TVIDEO_DIP_CTL(pipe) \
- _MMIO_PIPE3((pipe), _VLV_VIDEO_DIP_CTL_A, \
- _VLV_VIDEO_DIP_CTL_B, _CHV_VIDEO_DIP_CTL_C)
-#define VLV_TVIDEO_DIP_DATA(pipe) \
- _MMIO_PIPE3((pipe), _VLV_VIDEO_DIP_DATA_A, \
- _VLV_VIDEO_DIP_DATA_B, _CHV_VIDEO_DIP_DATA_C)
-#define VLV_TVIDEO_DIP_GCP(pipe) \
- _MMIO_PIPE3((pipe), _VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
- _VLV_VIDEO_DIP_GDCP_PAYLOAD_B, _CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
+#define _VLV_VIDEO_DIP_CTL_A 0x60200
+#define _VLV_VIDEO_DIP_CTL_B 0x61170
+#define _CHV_VIDEO_DIP_CTL_C 0x611f0
+#define VLV_TVIDEO_DIP_CTL(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
+ _VLV_VIDEO_DIP_CTL_A, \
+ _VLV_VIDEO_DIP_CTL_B, \
+ _CHV_VIDEO_DIP_CTL_C)
+
+#define _VLV_VIDEO_DIP_DATA_A 0x60208
+#define _VLV_VIDEO_DIP_DATA_B 0x61174
+#define _CHV_VIDEO_DIP_DATA_C 0x611f4
+#define VLV_TVIDEO_DIP_DATA(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
+ _VLV_VIDEO_DIP_DATA_A, \
+ _VLV_VIDEO_DIP_DATA_B, \
+ _CHV_VIDEO_DIP_DATA_C)
+
+#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
+#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
+#define _CHV_VIDEO_DIP_GDCP_PAYLOAD_C 0x611f8
+#define VLV_TVIDEO_DIP_GCP(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
+ _VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
+ _VLV_VIDEO_DIP_GDCP_PAYLOAD_B, \
+ _CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
/* Haswell DIP controls */
@@ -5040,6 +3949,7 @@
#define _HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
#define _HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
#define _HSW_VIDEO_DIP_VSC_DATA_A 0x60320
+#define _ADL_VIDEO_DIP_AS_DATA_A 0x60484
#define _GLK_VIDEO_DIP_DRM_DATA_A 0x60440
#define _HSW_VIDEO_DIP_AVI_ECC_A 0x60240
#define _HSW_VIDEO_DIP_VS_ECC_A 0x60280
@@ -5054,6 +3964,7 @@
#define _HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
#define _HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
#define _HSW_VIDEO_DIP_VSC_DATA_B 0x61320
+#define _ADL_VIDEO_DIP_AS_DATA_B 0x61484
#define _GLK_VIDEO_DIP_DRM_DATA_B 0x61440
#define _HSW_VIDEO_DIP_BVI_ECC_B 0x61240
#define _HSW_VIDEO_DIP_VS_ECC_B 0x61280
@@ -5073,22 +3984,25 @@
#define _ICL_VIDEO_DIP_PPS_ECC_A 0x603D4
#define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4
-#define HSW_TVIDEO_DIP_CTL(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_CTL_A)
-#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A)
-#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
-#define HSW_TVIDEO_DIP_VS_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
-#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
-#define HSW_TVIDEO_DIP_GMP_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4)
-#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
-#define GLK_TVIDEO_DIP_DRM_DATA(trans, i) _MMIO_TRANS2(trans, _GLK_VIDEO_DIP_DRM_DATA_A + (i) * 4)
-#define ICL_VIDEO_DIP_PPS_DATA(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
-#define ICL_VIDEO_DIP_PPS_ECC(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
+#define HSW_TVIDEO_DIP_CTL(trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_CTL_A)
+#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GCP_A)
+#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
+#define HSW_TVIDEO_DIP_VS_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
+#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
+#define HSW_TVIDEO_DIP_GMP_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4)
+#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
+#define GLK_TVIDEO_DIP_DRM_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _GLK_VIDEO_DIP_DRM_DATA_A + (i) * 4)
+#define ICL_VIDEO_DIP_PPS_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
+#define ICL_VIDEO_DIP_PPS_ECC(trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
+/*ADLP and later: */
+#define ADL_TVIDEO_DIP_AS_SDP_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans,\
+ _ADL_VIDEO_DIP_AS_DATA_A + (i) * 4)
#define _HSW_STEREO_3D_CTL_A 0x70020
#define S3D_ENABLE (1 << 31)
#define _HSW_STEREO_3D_CTL_B 0x71020
-#define HSW_STEREO_3D_CTL(trans) _MMIO_PIPE2(trans, _HSW_STEREO_3D_CTL_A)
+#define HSW_STEREO_3D_CTL(trans) _MMIO_PIPE2(dev_priv, trans, _HSW_STEREO_3D_CTL_A)
#define _PCH_TRANS_HTOTAL_B 0xe1000
#define _PCH_TRANS_HBLANK_B 0xe1004
@@ -5401,7 +4315,7 @@
#define POWER_SETUP_I1_SHIFT 6 /* 10.6 fixed point format */
#define POWER_SETUP_I1_DATA_MASK REG_GENMASK(15, 0)
#define GEN12_PCODE_READ_SAGV_BLOCK_TIME_US 0x23
-#define XEHP_PCODE_FREQUENCY_CONFIG 0x6e /* xehpsdv, pvc */
+#define XEHP_PCODE_FREQUENCY_CONFIG 0x6e /* pvc */
/* XEHP_PCODE_FREQUENCY_CONFIG sub-commands (param1) */
#define PCODE_MBOX_FC_SC_READ_FUSED_P0 0x0
#define PCODE_MBOX_FC_SC_READ_FUSED_PN 0x1
@@ -5566,15 +4480,6 @@ enum skl_power_gate {
((pw_idx) - ICL_PW_CTL_IDX_PW_1 + SKL_PG1)
#define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg)))
-#define _ICL_AUX_REG_IDX(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
-#define _ICL_AUX_ANAOVRD1_A 0x162398
-#define _ICL_AUX_ANAOVRD1_B 0x6C398
-#define ICL_AUX_ANAOVRD1(pw_idx) _MMIO(_PICK(_ICL_AUX_REG_IDX(pw_idx), \
- _ICL_AUX_ANAOVRD1_A, \
- _ICL_AUX_ANAOVRD1_B))
-#define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7)
-#define ICL_AUX_ANAOVRD1_ENABLE (1 << 0)
-
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400
@@ -5583,7 +4488,7 @@ enum skl_power_gate {
#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400
#define _TRANS_DDI_FUNC_CTL_DSI0 0x6b400
#define _TRANS_DDI_FUNC_CTL_DSI1 0x6bc00
-#define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A)
+#define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_DDI_FUNC_CTL_A)
#define TRANS_DDI_FUNC_ENABLE (1 << 31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
@@ -5638,7 +4543,7 @@ enum skl_power_gate {
#define _TRANS_DDI_FUNC_CTL2_EDP 0x6f404
#define _TRANS_DDI_FUNC_CTL2_DSI0 0x6b404
#define _TRANS_DDI_FUNC_CTL2_DSI1 0x6bc04
-#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL2_A)
+#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_DDI_FUNC_CTL2_A)
#define PORT_SYNC_MODE_ENABLE REG_BIT(4)
#define PORT_SYNC_MODE_MASTER_SELECT_MASK REG_GENMASK(2, 0)
#define PORT_SYNC_MODE_MASTER_SELECT(x) REG_FIELD_PREP(PORT_SYNC_MODE_MASTER_SELECT_MASK, (x))
@@ -5651,7 +4556,7 @@ enum skl_power_gate {
#define _DP_TP_CTL_B 0x64140
#define _TGL_DP_TP_CTL_A 0x60540
#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
-#define TGL_DP_TP_CTL(tran) _MMIO_TRANS2((tran), _TGL_DP_TP_CTL_A)
+#define TGL_DP_TP_CTL(tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_CTL_A)
#define DP_TP_CTL_ENABLE (1 << 31)
#define DP_TP_CTL_FEC_ENABLE (1 << 30)
#define DP_TP_CTL_MODE_SST (0 << 27)
@@ -5677,7 +4582,7 @@ enum skl_power_gate {
#define _DP_TP_STATUS_B 0x64144
#define _TGL_DP_TP_STATUS_A 0x60544
#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
-#define TGL_DP_TP_STATUS(tran) _MMIO_TRANS2((tran), _TGL_DP_TP_STATUS_A)
+#define TGL_DP_TP_STATUS(tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_STATUS_A)
#define DP_TP_STATUS_FEC_ENABLE_LIVE (1 << 28)
#define DP_TP_STATUS_IDLE_DONE (1 << 25)
#define DP_TP_STATUS_ACT_SENT (1 << 24)
@@ -5858,14 +4763,14 @@ enum skl_power_gate {
#define _TRANSB_MSA_MISC 0x61410
#define _TRANSC_MSA_MISC 0x62410
#define _TRANS_EDP_MSA_MISC 0x6f410
-#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC)
+#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(dev_priv, tran, _TRANSA_MSA_MISC)
/* See DP_MSA_MISC_* for the bit definitions */
#define _TRANS_A_SET_CONTEXT_LATENCY 0x6007C
#define _TRANS_B_SET_CONTEXT_LATENCY 0x6107C
#define _TRANS_C_SET_CONTEXT_LATENCY 0x6207C
#define _TRANS_D_SET_CONTEXT_LATENCY 0x6307C
-#define TRANS_SET_CONTEXT_LATENCY(tran) _MMIO_TRANS2(tran, _TRANS_A_SET_CONTEXT_LATENCY)
+#define TRANS_SET_CONTEXT_LATENCY(tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_A_SET_CONTEXT_LATENCY)
#define TRANS_SET_CONTEXT_LATENCY_MASK REG_GENMASK(15, 0)
#define TRANS_SET_CONTEXT_LATENCY_VALUE(x) REG_FIELD_PREP(TRANS_SET_CONTEXT_LATENCY_MASK, (x))
@@ -5900,7 +4805,9 @@ enum skl_power_gate {
#define CDCLK_FREQ_540 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 1)
#define CDCLK_FREQ_337_308 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 2)
#define CDCLK_FREQ_675_617 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 3)
-#define MDCLK_SOURCE_SEL_CDCLK_PLL REG_BIT(25)
+#define MDCLK_SOURCE_SEL_MASK REG_GENMASK(25, 25)
+#define MDCLK_SOURCE_SEL_CD2XCLK REG_FIELD_PREP(MDCLK_SOURCE_SEL_MASK, 0)
+#define MDCLK_SOURCE_SEL_CDCLK_PLL REG_FIELD_PREP(MDCLK_SOURCE_SEL_MASK, 1)
#define BXT_CDCLK_CD2X_DIV_SEL_MASK REG_GENMASK(23, 22)
#define BXT_CDCLK_CD2X_DIV_SEL_1 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 0)
#define BXT_CDCLK_CD2X_DIV_SEL_1_5 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 1)
@@ -6317,7 +5224,7 @@ enum skl_power_gate {
#define _VLV_PIPE_MSA_MISC_A 0x70048
#define VLV_PIPE_MSA_MISC(pipe) \
- _MMIO_PIPE2(pipe, _VLV_PIPE_MSA_MISC_A)
+ _MMIO_PIPE2(dev_priv, pipe, _VLV_PIPE_MSA_MISC_A)
#define VLV_MSA_MISC1_HW_ENABLE REG_BIT(31)
#define VLV_MSA_MISC1_SW_S3D_MASK REG_GENMASK(2, 0) /* MSA MISC1 3:1 */
@@ -6390,7 +5297,7 @@ enum skl_power_gate {
#define _MTL_CLKGATE_DIS_TRANS_A 0x604E8
#define _MTL_CLKGATE_DIS_TRANS_B 0x614E8
-#define MTL_CLKGATE_DIS_TRANS(trans) _MMIO_TRANS2(trans, _MTL_CLKGATE_DIS_TRANS_A)
+#define MTL_CLKGATE_DIS_TRANS(trans) _MMIO_TRANS2(dev_priv, trans, _MTL_CLKGATE_DIS_TRANS_A)
#define MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS REG_BIT(7)
#define MTL_MEM_SS_INFO_GLOBAL _MMIO(0x45700)
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index 0d735d5c2b35..942345548bc3 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -126,7 +126,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
return 0;
err_free_blocks:
- drm_buddy_free_list(mm, &bman_res->blocks);
+ drm_buddy_free_list(mm, &bman_res->blocks, 0);
mutex_unlock(&bman->lock);
err_free_res:
ttm_resource_fini(man, &bman_res->base);
@@ -141,7 +141,7 @@ static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man,
struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
mutex_lock(&bman->lock);
- drm_buddy_free_list(&bman->mm, &bman_res->blocks);
+ drm_buddy_free_list(&bman->mm, &bman_res->blocks, 0);
bman->visible_avail += bman_res->used_visible_size;
mutex_unlock(&bman->lock);
@@ -345,7 +345,7 @@ int i915_ttm_buddy_man_fini(struct ttm_device *bdev, unsigned int type)
ttm_set_driver_manager(bdev, type, NULL);
mutex_lock(&bman->lock);
- drm_buddy_free_list(mm, &bman->reserved);
+ drm_buddy_free_list(mm, &bman->reserved, 0);
drm_buddy_fini(mm);
bman->visible_avail += bman->visible_reserved;
WARN_ON_ONCE(bman->visible_avail != bman->visible_size);
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index b45ef0560611..06ec6ceb61d5 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -73,20 +73,6 @@ bool i915_error_injected(void);
__i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
fmt, ##__VA_ARGS__)
-#if defined(GCC_VERSION) && GCC_VERSION >= 70000
-#define add_overflows_t(T, A, B) \
- __builtin_add_overflow_p((A), (B), (T)0)
-#else
-#define add_overflows_t(T, A, B) ({ \
- typeof(A) a = (A); \
- typeof(B) b = (B); \
- (T)(a + b) < a; \
-})
-#endif
-
-#define add_overflows(A, B) \
- add_overflows_t(typeof((A) + (B)), (A), (B))
-
#define range_overflows(start, size, max) ({ \
typeof(start) start__ = (start); \
typeof(size) size__ = (size); \
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index d09aad34ba37..d2f064d2525c 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -34,6 +34,7 @@
#include "gt/intel_engine.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
#include "gt/intel_tlb.h"
@@ -103,12 +104,42 @@ static inline struct i915_vma *active_to_vma(struct i915_active *ref)
static int __i915_vma_active(struct i915_active *ref)
{
- return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
+ struct i915_vma *vma = active_to_vma(ref);
+
+ if (!i915_vma_tryget(vma))
+ return -ENOENT;
+
+ /*
+ * Exclude global GTT VMA from holding a GT wakeref
+ * while active, otherwise GPU never goes idle.
+ */
+ if (!i915_vma_is_ggtt(vma)) {
+ /*
+ * Since we and our _retire() counterpart can be
+ * called asynchronously, storing a wakeref tracking
+ * handle inside struct i915_vma is not safe, and
+ * there is no other good place for that. Hence,
+ * use untracked variants of intel_gt_pm_get/put().
+ */
+ intel_gt_pm_get_untracked(vma->vm->gt);
+ }
+
+ return 0;
}
static void __i915_vma_retire(struct i915_active *ref)
{
- i915_vma_put(active_to_vma(ref));
+ struct i915_vma *vma = active_to_vma(ref);
+
+ if (!i915_vma_is_ggtt(vma)) {
+ /*
+ * Since we can be called from atomic contexts,
+ * use an async variant of intel_gt_pm_put().
+ */
+ intel_gt_pm_put_async_untracked(vma->vm->gt);
+ }
+
+ i915_vma_put(vma);
}
static struct i915_vma *
@@ -1404,7 +1435,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
struct i915_vma_work *work = NULL;
struct dma_fence *moving = NULL;
struct i915_vma_resource *vma_res = NULL;
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref;
unsigned int bound;
int err;
@@ -1424,8 +1455,14 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
if (err)
return err;
- if (flags & PIN_GLOBAL)
- wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
+ /*
+ * In case of a global GTT, we must hold a runtime-pm wakeref
+ * while global PTEs are updated. In other cases, we hold
+ * the rpm reference while the VMA is active. Since runtime
+ * resume may require allocations, which are forbidden inside
+ * vm->mutex, get the first rpm wakeref outside of the mutex.
+ */
+ wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
if (flags & vma->vm->bind_async_flags) {
/* lock VM */
@@ -1561,8 +1598,7 @@ err_fence:
if (work)
dma_fence_work_commit_imm(&work->base);
err_rpm:
- if (wakeref)
- intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
if (moving)
dma_fence_put(moving);
@@ -1740,8 +1776,6 @@ static void release_references(struct i915_vma *vma, struct intel_gt *gt,
if (vm_ddestroy)
i915_vm_resv_put(vma->vm);
- /* Wait for async active retire */
- i915_active_wait(&vma->active);
i915_active_fini(&vma->active);
GEM_WARN_ON(vma->resource);
i915_vma_free(vma);
diff --git a/drivers/gpu/drm/i915/intel_clock_gating.c b/drivers/gpu/drm/i915/intel_clock_gating.c
index 9c21ce69bd98..1dc5281b2ade 100644
--- a/drivers/gpu/drm/i915/intel_clock_gating.c
+++ b/drivers/gpu/drm/i915/intel_clock_gating.c
@@ -28,6 +28,7 @@
#include "display/intel_de.h"
#include "display/intel_display.h"
#include "display/intel_display_trace.h"
+#include "display/intel_fbc_regs.h"
#include "display/skl_watermark.h"
#include "gt/intel_engine_regs.h"
@@ -105,12 +106,6 @@ static void bxt_init_clock_gating(struct drm_i915_private *i915)
* Display WA #0562: bxt
*/
intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS);
-
- /*
- * WaFbcHighMemBwCorruptionAvoidance:bxt
- * Display WA #0883: bxt
- */
- intel_uncore_rmw(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 0, DPFC_DISABLE_DUMMY0);
}
static void glk_init_clock_gating(struct drm_i915_private *i915)
@@ -349,13 +344,6 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *i915,
intel_uncore_write(&i915->uncore, GEN7_MISCCPCTL, misccpctl);
}
-static void xehpsdv_init_clock_gating(struct drm_i915_private *i915)
-{
- /* Wa_22010146351:xehpsdv */
- if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
- intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS);
-}
-
static void dg2_init_clock_gating(struct drm_i915_private *i915)
{
/* Wa_22010954014:dg2 */
@@ -363,17 +351,6 @@ static void dg2_init_clock_gating(struct drm_i915_private *i915)
SGSI_SIDECLK_DIS);
}
-static void pvc_init_clock_gating(struct drm_i915_private *i915)
-{
- /* Wa_14012385139:pvc */
- if (IS_PVC_BD_STEP(i915, STEP_A0, STEP_B0))
- intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS);
-
- /* Wa_22010954014:pvc */
- if (IS_PVC_BD_STEP(i915, STEP_A0, STEP_B0))
- intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, SGSI_SIDECLK_DIS);
-}
-
static void cnp_init_clock_gating(struct drm_i915_private *i915)
{
if (!HAS_PCH_CNP(i915))
@@ -396,13 +373,6 @@ static void cfl_init_clock_gating(struct drm_i915_private *i915)
* Display WA #0562: cfl
*/
intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS);
-
- /*
- * WaFbcNukeOnHostModify:cfl
- * Display WA #0873: cfl
- */
- intel_uncore_rmw(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
- 0, DPFC_NUKE_ON_ANY_MODIFICATION);
}
static void kbl_init_clock_gating(struct drm_i915_private *i915)
@@ -427,13 +397,6 @@ static void kbl_init_clock_gating(struct drm_i915_private *i915)
* Display WA #0562: kbl
*/
intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS);
-
- /*
- * WaFbcNukeOnHostModify:kbl
- * Display WA #0873: kbl
- */
- intel_uncore_rmw(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
- 0, DPFC_NUKE_ON_ANY_MODIFICATION);
}
static void skl_init_clock_gating(struct drm_i915_private *i915)
@@ -452,19 +415,6 @@ static void skl_init_clock_gating(struct drm_i915_private *i915)
* Display WA #0562: skl
*/
intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS);
-
- /*
- * WaFbcNukeOnHostModify:skl
- * Display WA #0873: skl
- */
- intel_uncore_rmw(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
- 0, DPFC_NUKE_ON_ANY_MODIFICATION);
-
- /*
- * WaFbcHighMemBwCorruptionAvoidance:skl
- * Display WA #0883: skl
- */
- intel_uncore_rmw(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 0, DPFC_DISABLE_DUMMY0);
}
static void bdw_init_clock_gating(struct drm_i915_private *i915)
@@ -762,9 +712,7 @@ static const struct drm_i915_clock_gating_funcs platform##_clock_gating_funcs =
.init_clock_gating = platform##_init_clock_gating, \
}
-CG_FUNCS(pvc);
CG_FUNCS(dg2);
-CG_FUNCS(xehpsdv);
CG_FUNCS(cfl);
CG_FUNCS(skl);
CG_FUNCS(kbl);
@@ -797,12 +745,8 @@ CG_FUNCS(nop);
*/
void intel_clock_gating_hooks_init(struct drm_i915_private *i915)
{
- if (IS_PONTEVECCHIO(i915))
- i915->clock_gating_funcs = &pvc_clock_gating_funcs;
- else if (IS_DG2(i915))
+ if (IS_DG2(i915))
i915->clock_gating_funcs = &dg2_clock_gating_funcs;
- else if (IS_XEHPSDV(i915))
- i915->clock_gating_funcs = &xehpsdv_clock_gating_funcs;
else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
i915->clock_gating_funcs = &cfl_clock_gating_funcs;
else if (IS_SKYLAKE(i915))
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 59bea1398c91..a0a43ea07f11 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -70,9 +70,7 @@ static const char * const platform_names[] = {
PLATFORM_NAME(DG1),
PLATFORM_NAME(ALDERLAKE_S),
PLATFORM_NAME(ALDERLAKE_P),
- PLATFORM_NAME(XEHPSDV),
PLATFORM_NAME(DG2),
- PLATFORM_NAME(PONTEVECCHIO),
PLATFORM_NAME(METEORLAKE),
};
#undef PLATFORM_NAME
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index eba2f0b919c8..d1a2abc7e513 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -87,9 +87,7 @@ enum intel_platform {
INTEL_DG1,
INTEL_ALDERLAKE_S,
INTEL_ALDERLAKE_P,
- INTEL_XEHPSDV,
INTEL_DG2,
- INTEL_PONTEVECCHIO,
INTEL_METEORLAKE,
INTEL_MAX_PLATFORMS
};
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 9b6d87c8b583..5a01d60e5186 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -28,6 +28,7 @@
#include "gt/intel_context.h"
#include "gt/intel_ring.h"
#include "gt/shmem_utils.h"
+#include <linux/vmalloc.h>
/**
* DOC: Intel GVT-g host support
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index 87ecc5104fd9..e1a35f70b544 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -3,6 +3,7 @@
* Copyright © 2020 Intel Corporation
*/
+#include "display/bxt_dpio_phy_regs.h"
#include "display/intel_audio_regs.h"
#include "display/intel_backlight_regs.h"
#include "display/intel_color_regs.h"
@@ -10,9 +11,11 @@
#include "display/intel_dmc_regs.h"
#include "display/intel_dp_aux_regs.h"
#include "display/intel_dpio_phy.h"
+#include "display/intel_fbc_regs.h"
#include "display/intel_fdi_regs.h"
#include "display/intel_lvds_regs.h"
#include "display/intel_psr_regs.h"
+#include "display/intel_sprite_regs.h"
#include "display/skl_watermark_regs.h"
#include "display/vlv_dsi_pll_regs.h"
#include "gt/intel_engine_regs.h"
@@ -1155,11 +1158,11 @@ static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0));
MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0));
MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0));
- MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW2_LN(DPIO_PHY0, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0));
- MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW3_LN(DPIO_PHY0, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0));
- MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW4_LN(DPIO_PHY0, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1));
@@ -1180,11 +1183,11 @@ static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1));
MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1));
MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1));
- MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_TX_DW2_LN(DPIO_PHY0, DPIO_CH1, 0));
MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1));
- MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_TX_DW3_LN(DPIO_PHY0, DPIO_CH1, 0));
MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1));
- MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_TX_DW4_LN(DPIO_PHY0, DPIO_CH1, 0));
MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1));
@@ -1205,11 +1208,11 @@ static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0));
MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0));
MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0));
- MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW2_LN(DPIO_PHY1, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0));
- MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW3_LN(DPIO_PHY1, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0));
- MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW4_LN(DPIO_PHY1, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1));
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index d4e844128826..2d0647aca964 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -272,15 +272,11 @@ intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm)
* intel_runtime_pm_get_noresume - grab a runtime pm reference
* @rpm: the intel_runtime_pm structure
*
- * This function grabs a device-level runtime pm reference (mostly used for GEM
- * code to ensure the GTT or GT is on).
+ * This function grabs a device-level runtime pm reference.
*
- * It will _not_ power up the device but instead only check that it's powered
- * on. Therefore it is only valid to call this functions from contexts where
- * the device is known to be powered up and where trying to power it up would
- * result in hilarity and deadlocks. That pretty much means only the system
- * suspend/resume code where this is used to grab runtime pm references for
- * delayed setup down in work items.
+ * It will _not_ resume the device but instead only get an extra wakeref.
+ * Therefore it is only valid to call this functions from contexts where
+ * the device is known to be active and with another wakeref previously hold.
*
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put() to release the reference again.
@@ -289,7 +285,7 @@ intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm)
*/
intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm)
{
- assert_rpm_wakelock_held(rpm);
+ assert_rpm_raw_wakeref_held(rpm);
pm_runtime_get_noresume(rpm->kdev);
intel_runtime_pm_acquire(rpm, true);
diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c
index b4162f1be765..a5adfb5d8fd2 100644
--- a/drivers/gpu/drm/i915/intel_step.c
+++ b/drivers/gpu/drm/i915/intel_step.c
@@ -102,13 +102,6 @@ static const struct intel_step_info adlp_revids[] = {
[0xC] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_D0 },
};
-static const struct intel_step_info xehpsdv_revids[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0) },
- [0x1] = { COMMON_GT_MEDIA_STEP(A1) },
- [0x4] = { COMMON_GT_MEDIA_STEP(B0) },
- [0x8] = { COMMON_GT_MEDIA_STEP(C0) },
-};
-
static const struct intel_step_info dg2_g10_revid_step_tbl[] = {
[0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 },
[0x1] = { COMMON_GT_MEDIA_STEP(A1), .display_step = STEP_A0 },
@@ -153,8 +146,6 @@ static u8 gmd_to_intel_step(struct drm_i915_private *i915,
return step;
}
-static void pvc_step_init(struct drm_i915_private *i915, int pci_revid);
-
void intel_step_init(struct drm_i915_private *i915)
{
const struct intel_step_info *revids = NULL;
@@ -178,10 +169,7 @@ void intel_step_init(struct drm_i915_private *i915)
return;
}
- if (IS_PONTEVECCHIO(i915)) {
- pvc_step_init(i915, revid);
- return;
- } else if (IS_DG2_G10(i915)) {
+ if (IS_DG2_G10(i915)) {
revids = dg2_g10_revid_step_tbl;
size = ARRAY_SIZE(dg2_g10_revid_step_tbl);
} else if (IS_DG2_G11(i915)) {
@@ -190,9 +178,6 @@ void intel_step_init(struct drm_i915_private *i915)
} else if (IS_DG2_G12(i915)) {
revids = dg2_g12_revid_step_tbl;
size = ARRAY_SIZE(dg2_g12_revid_step_tbl);
- } else if (IS_XEHPSDV(i915)) {
- revids = xehpsdv_revids;
- size = ARRAY_SIZE(xehpsdv_revids);
} else if (IS_ALDERLAKE_P_N(i915)) {
revids = adlp_n_revids;
size = ARRAY_SIZE(adlp_n_revids);
@@ -277,69 +262,6 @@ void intel_step_init(struct drm_i915_private *i915)
RUNTIME_INFO(i915)->step = step;
}
-#define PVC_BD_REVID GENMASK(5, 3)
-#define PVC_CT_REVID GENMASK(2, 0)
-
-static const int pvc_bd_subids[] = {
- [0x0] = STEP_A0,
- [0x3] = STEP_B0,
- [0x4] = STEP_B1,
- [0x5] = STEP_B3,
-};
-
-static const int pvc_ct_subids[] = {
- [0x3] = STEP_A0,
- [0x5] = STEP_B0,
- [0x6] = STEP_B1,
- [0x7] = STEP_C0,
-};
-
-static int
-pvc_step_lookup(struct drm_i915_private *i915, const char *type,
- const int *table, int size, int subid)
-{
- if (subid < size && table[subid] != STEP_NONE)
- return table[subid];
-
- drm_warn(&i915->drm, "Unknown %s id 0x%02x\n", type, subid);
-
- /*
- * As on other platforms, try to use the next higher ID if we land on a
- * gap in the table.
- */
- while (subid < size && table[subid] == STEP_NONE)
- subid++;
-
- if (subid < size) {
- drm_dbg(&i915->drm, "Using steppings for %s id 0x%02x\n",
- type, subid);
- return table[subid];
- }
-
- drm_dbg(&i915->drm, "Using future steppings\n");
- return STEP_FUTURE;
-}
-
-/*
- * PVC needs special handling since we don't lookup the
- * revid in a table, but rather specific bitfields within
- * the revid for various components.
- */
-static void pvc_step_init(struct drm_i915_private *i915, int pci_revid)
-{
- int ct_subid, bd_subid;
-
- bd_subid = FIELD_GET(PVC_BD_REVID, pci_revid);
- ct_subid = FIELD_GET(PVC_CT_REVID, pci_revid);
-
- RUNTIME_INFO(i915)->step.basedie_step =
- pvc_step_lookup(i915, "Base Die", pvc_bd_subids,
- ARRAY_SIZE(pvc_bd_subids), bd_subid);
- RUNTIME_INFO(i915)->step.graphics_step =
- pvc_step_lookup(i915, "Compute Tile", pvc_ct_subids,
- ARRAY_SIZE(pvc_ct_subids), ct_subid);
-}
-
#define STEP_NAME_CASE(name) \
case STEP_##name: \
return #name;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 76400e9c40f0..729409a4bada 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1106,45 +1106,6 @@ static const struct i915_range dg2_shadowed_regs[] = {
{ .start = 0x1F8510, .end = 0x1F8550 },
};
-static const struct i915_range pvc_shadowed_regs[] = {
- { .start = 0x2030, .end = 0x2030 },
- { .start = 0x2510, .end = 0x2550 },
- { .start = 0xA008, .end = 0xA00C },
- { .start = 0xA188, .end = 0xA188 },
- { .start = 0xA278, .end = 0xA278 },
- { .start = 0xA540, .end = 0xA56C },
- { .start = 0xC4C8, .end = 0xC4C8 },
- { .start = 0xC4E0, .end = 0xC4E0 },
- { .start = 0xC600, .end = 0xC600 },
- { .start = 0xC658, .end = 0xC658 },
- { .start = 0x22030, .end = 0x22030 },
- { .start = 0x22510, .end = 0x22550 },
- { .start = 0x1C0030, .end = 0x1C0030 },
- { .start = 0x1C0510, .end = 0x1C0550 },
- { .start = 0x1C4030, .end = 0x1C4030 },
- { .start = 0x1C4510, .end = 0x1C4550 },
- { .start = 0x1C8030, .end = 0x1C8030 },
- { .start = 0x1C8510, .end = 0x1C8550 },
- { .start = 0x1D0030, .end = 0x1D0030 },
- { .start = 0x1D0510, .end = 0x1D0550 },
- { .start = 0x1D4030, .end = 0x1D4030 },
- { .start = 0x1D4510, .end = 0x1D4550 },
- { .start = 0x1D8030, .end = 0x1D8030 },
- { .start = 0x1D8510, .end = 0x1D8550 },
- { .start = 0x1E0030, .end = 0x1E0030 },
- { .start = 0x1E0510, .end = 0x1E0550 },
- { .start = 0x1E4030, .end = 0x1E4030 },
- { .start = 0x1E4510, .end = 0x1E4550 },
- { .start = 0x1E8030, .end = 0x1E8030 },
- { .start = 0x1E8510, .end = 0x1E8550 },
- { .start = 0x1F0030, .end = 0x1F0030 },
- { .start = 0x1F0510, .end = 0x1F0550 },
- { .start = 0x1F4030, .end = 0x1F4030 },
- { .start = 0x1F4510, .end = 0x1F4550 },
- { .start = 0x1F8030, .end = 0x1F8030 },
- { .start = 0x1F8510, .end = 0x1F8550 },
-};
-
static const struct i915_range mtl_shadowed_regs[] = {
{ .start = 0x2030, .end = 0x2030 },
{ .start = 0x2510, .end = 0x2550 },
@@ -1471,195 +1432,31 @@ static const struct intel_forcewake_range __gen12_fw_ranges[] = {
0x1d3f00 - 0x1d3fff: VD2 */
};
-/*
- * Graphics IP version 12.55 brings a slight change to the 0xd800 range,
- * switching it from the GT domain to the render domain.
- */
-#define XEHP_FWRANGES(FW_RANGE_D800) \
- GEN_FW_RANGE(0x0, 0x1fff, 0), /* \
- 0x0 - 0xaff: reserved \
- 0xb00 - 0x1fff: always on */ \
- GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), \
- GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT), \
- GEN_FW_RANGE(0x4b00, 0x51ff, 0), /* \
- 0x4b00 - 0x4fff: reserved \
- 0x5000 - 0x51ff: always on */ \
- GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), \
- GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT), \
- GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), \
- GEN_FW_RANGE(0x8160, 0x81ff, 0), /* \
- 0x8160 - 0x817f: reserved \
- 0x8180 - 0x81ff: always on */ \
- GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT), \
- GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), \
- GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT), /* \
- 0x8500 - 0x87ff: gt \
- 0x8800 - 0x8c7f: reserved \
- 0x8c80 - 0x8cff: gt (DG2 only) */ \
- GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER), /* \
- 0x8d00 - 0x8dff: render (DG2 only) \
- 0x8e00 - 0x8fff: reserved */ \
- GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT), /* \
- 0x9000 - 0x947f: gt \
- 0x9480 - 0x94cf: reserved */ \
- GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER), \
- GEN_FW_RANGE(0x9560, 0x967f, 0), /* \
- 0x9560 - 0x95ff: always on \
- 0x9600 - 0x967f: reserved */ \
- GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /* \
- 0x9680 - 0x96ff: render (DG2 only) \
- 0x9700 - 0x97ff: reserved */ \
- GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /* \
- 0x9800 - 0xb4ff: gt \
- 0xb500 - 0xbfff: reserved \
- 0xc000 - 0xcfff: gt */ \
- GEN_FW_RANGE(0xd000, 0xd7ff, 0), \
- GEN_FW_RANGE(0xd800, 0xd87f, FW_RANGE_D800), \
- GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT), \
- GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER), \
- GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /* \
- 0xdd00 - 0xddff: gt \
- 0xde00 - 0xde7f: reserved */ \
- GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /* \
- 0xde80 - 0xdfff: render \
- 0xe000 - 0xe0ff: reserved \
- 0xe100 - 0xe8ff: render */ \
- GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /* \
- 0xe900 - 0xe9ff: gt \
- 0xea00 - 0xefff: reserved \
- 0xf000 - 0xffff: gt */ \
- GEN_FW_RANGE(0x10000, 0x12fff, 0), /* \
- 0x10000 - 0x11fff: reserved \
- 0x12000 - 0x127ff: always on \
- 0x12800 - 0x12fff: reserved */ \
- GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0), /* DG2 only */ \
- GEN_FW_RANGE(0x13200, 0x13fff, FORCEWAKE_MEDIA_VDBOX2), /* \
- 0x13200 - 0x133ff: VD2 (DG2 only) \
- 0x13400 - 0x13fff: reserved */ \
- GEN_FW_RANGE(0x14000, 0x141ff, FORCEWAKE_MEDIA_VDBOX0), /* XEHPSDV only */ \
- GEN_FW_RANGE(0x14200, 0x143ff, FORCEWAKE_MEDIA_VDBOX2), /* XEHPSDV only */ \
- GEN_FW_RANGE(0x14400, 0x145ff, FORCEWAKE_MEDIA_VDBOX4), /* XEHPSDV only */ \
- GEN_FW_RANGE(0x14600, 0x147ff, FORCEWAKE_MEDIA_VDBOX6), /* XEHPSDV only */ \
- GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER), \
- GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /* \
- 0x15000 - 0x15fff: gt (DG2 only) \
- 0x16000 - 0x16dff: reserved */ \
- GEN_FW_RANGE(0x16e00, 0x1ffff, FORCEWAKE_RENDER), \
- GEN_FW_RANGE(0x20000, 0x21fff, FORCEWAKE_MEDIA_VDBOX0), /* \
- 0x20000 - 0x20fff: VD0 (XEHPSDV only) \
- 0x21000 - 0x21fff: reserved */ \
- GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT), \
- GEN_FW_RANGE(0x24000, 0x2417f, 0), /* \
- 0x24000 - 0x2407f: always on \
- 0x24080 - 0x2417f: reserved */ \
- GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /* \
- 0x24180 - 0x241ff: gt \
- 0x24200 - 0x249ff: reserved */ \
- GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /* \
- 0x24a00 - 0x24a7f: render \
- 0x24a80 - 0x251ff: reserved */ \
- GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /* \
- 0x25200 - 0x252ff: gt \
- 0x25300 - 0x25fff: reserved */ \
- GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /* \
- 0x26000 - 0x27fff: render \
- 0x28000 - 0x29fff: reserved \
- 0x2a000 - 0x2ffff: undocumented */ \
- GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT), \
- GEN_FW_RANGE(0x40000, 0x1bffff, 0), \
- GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /* \
- 0x1c0000 - 0x1c2bff: VD0 \
- 0x1c2c00 - 0x1c2cff: reserved \
- 0x1c2d00 - 0x1c2dff: VD0 \
- 0x1c2e00 - 0x1c3eff: VD0 (DG2 only) \
- 0x1c3f00 - 0x1c3fff: VD0 */ \
- GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /* \
- 0x1c4000 - 0x1c6bff: VD1 \
- 0x1c6c00 - 0x1c6cff: reserved \
- 0x1c6d00 - 0x1c6dff: VD1 \
- 0x1c6e00 - 0x1c7fff: reserved */ \
- GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /* \
- 0x1c8000 - 0x1ca0ff: VE0 \
- 0x1ca100 - 0x1cbfff: reserved */ \
- GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0), \
- GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2), \
- GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4), \
- GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6), \
- GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /* \
- 0x1d0000 - 0x1d2bff: VD2 \
- 0x1d2c00 - 0x1d2cff: reserved \
- 0x1d2d00 - 0x1d2dff: VD2 \
- 0x1d2e00 - 0x1d3dff: VD2 (DG2 only) \
- 0x1d3e00 - 0x1d3eff: reserved \
- 0x1d3f00 - 0x1d3fff: VD2 */ \
- GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /* \
- 0x1d4000 - 0x1d6bff: VD3 \
- 0x1d6c00 - 0x1d6cff: reserved \
- 0x1d6d00 - 0x1d6dff: VD3 \
- 0x1d6e00 - 0x1d7fff: reserved */ \
- GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /* \
- 0x1d8000 - 0x1da0ff: VE1 \
- 0x1da100 - 0x1dffff: reserved */ \
- GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /* \
- 0x1e0000 - 0x1e2bff: VD4 \
- 0x1e2c00 - 0x1e2cff: reserved \
- 0x1e2d00 - 0x1e2dff: VD4 \
- 0x1e2e00 - 0x1e3eff: reserved \
- 0x1e3f00 - 0x1e3fff: VD4 */ \
- GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /* \
- 0x1e4000 - 0x1e6bff: VD5 \
- 0x1e6c00 - 0x1e6cff: reserved \
- 0x1e6d00 - 0x1e6dff: VD5 \
- 0x1e6e00 - 0x1e7fff: reserved */ \
- GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /* \
- 0x1e8000 - 0x1ea0ff: VE2 \
- 0x1ea100 - 0x1effff: reserved */ \
- GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /* \
- 0x1f0000 - 0x1f2bff: VD6 \
- 0x1f2c00 - 0x1f2cff: reserved \
- 0x1f2d00 - 0x1f2dff: VD6 \
- 0x1f2e00 - 0x1f3eff: reserved \
- 0x1f3f00 - 0x1f3fff: VD6 */ \
- GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /* \
- 0x1f4000 - 0x1f6bff: VD7 \
- 0x1f6c00 - 0x1f6cff: reserved \
- 0x1f6d00 - 0x1f6dff: VD7 \
- 0x1f6e00 - 0x1f7fff: reserved */ \
- GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3),
-
-static const struct intel_forcewake_range __xehp_fw_ranges[] = {
- XEHP_FWRANGES(FORCEWAKE_GT)
-};
-
static const struct intel_forcewake_range __dg2_fw_ranges[] = {
- XEHP_FWRANGES(FORCEWAKE_RENDER)
-};
-
-static const struct intel_forcewake_range __pvc_fw_ranges[] = {
- GEN_FW_RANGE(0x0, 0xaff, 0),
- GEN_FW_RANGE(0xb00, 0xbff, FORCEWAKE_GT),
- GEN_FW_RANGE(0xc00, 0xfff, 0),
- GEN_FW_RANGE(0x1000, 0x1fff, FORCEWAKE_GT),
+ GEN_FW_RANGE(0x0, 0x1fff, 0), /*
+ 0x0 - 0xaff: reserved
+ 0xb00 - 0x1fff: always on */
GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
- GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0x4000, 0x813f, FORCEWAKE_GT), /*
- 0x4000 - 0x4aff: gt
+ GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT),
+ GEN_FW_RANGE(0x4b00, 0x51ff, 0), /*
0x4b00 - 0x4fff: reserved
- 0x5000 - 0x51ff: gt
- 0x5200 - 0x52ff: reserved
- 0x5300 - 0x53ff: gt
- 0x5400 - 0x7fff: reserved
- 0x8000 - 0x813f: gt */
- GEN_FW_RANGE(0x8140, 0x817f, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0x8180, 0x81ff, 0),
- GEN_FW_RANGE(0x8200, 0x94cf, FORCEWAKE_GT), /*
- 0x8200 - 0x82ff: gt
- 0x8300 - 0x84ff: reserved
- 0x8500 - 0x887f: gt
- 0x8880 - 0x8a7f: reserved
- 0x8a80 - 0x8aff: gt
- 0x8b00 - 0x8fff: reserved
+ 0x5000 - 0x51ff: always on */
+ GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
+ GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
+ 0x8160 - 0x817f: reserved
+ 0x8180 - 0x81ff: always on */
+ GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
+ GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT), /*
+ 0x8500 - 0x87ff: gt
+ 0x8800 - 0x8c7f: reserved
+ 0x8c80 - 0x8cff: gt (DG2 only) */
+ GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER), /*
+ 0x8d00 - 0x8dff: render (DG2 only)
+ 0x8e00 - 0x8fff: reserved */
+ GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT), /*
0x9000 - 0x947f: gt
0x9480 - 0x94cf: reserved */
GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
@@ -1673,65 +1470,114 @@ static const struct intel_forcewake_range __pvc_fw_ranges[] = {
0x9800 - 0xb4ff: gt
0xb500 - 0xbfff: reserved
0xc000 - 0xcfff: gt */
- GEN_FW_RANGE(0xd000, 0xd3ff, 0),
- GEN_FW_RANGE(0xd400, 0xdbff, FORCEWAKE_GT),
+ GEN_FW_RANGE(0xd000, 0xd7ff, 0),
+ GEN_FW_RANGE(0xd800, 0xd87f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT),
GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
0xdd00 - 0xddff: gt
0xde00 - 0xde7f: reserved */
GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
- 0xde80 - 0xdeff: render
- 0xdf00 - 0xe1ff: reserved
- 0xe200 - 0xe7ff: render
- 0xe800 - 0xe8ff: reserved */
- GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT), /*
- 0xe900 - 0xe9ff: gt
- 0xea00 - 0xebff: reserved
- 0xec00 - 0xffff: gt
- 0x10000 - 0x11fff: reserved */
- GEN_FW_RANGE(0x12000, 0x12fff, 0), /*
+ 0xde80 - 0xdfff: render
+ 0xe000 - 0xe0ff: reserved
+ 0xe100 - 0xe8ff: render */
+ GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /*
+ 0xe900 - 0xe9ff: gt
+ 0xea00 - 0xefff: reserved
+ 0xf000 - 0xffff: gt */
+ GEN_FW_RANGE(0x10000, 0x12fff, 0), /*
+ 0x10000 - 0x11fff: reserved
0x12000 - 0x127ff: always on
0x12800 - 0x12fff: reserved */
- GEN_FW_RANGE(0x13000, 0x19fff, FORCEWAKE_GT), /*
- 0x13000 - 0x135ff: gt
- 0x13600 - 0x147ff: reserved
- 0x14800 - 0x153ff: gt
- 0x15400 - 0x19fff: reserved */
- GEN_FW_RANGE(0x1a000, 0x21fff, FORCEWAKE_RENDER), /*
- 0x1a000 - 0x1ffff: render
+ GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0),
+ GEN_FW_RANGE(0x13200, 0x147ff, FORCEWAKE_MEDIA_VDBOX2), /*
+ 0x13200 - 0x133ff: VD2 (DG2 only)
+ 0x13400 - 0x147ff: reserved */
+ GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /*
+ 0x15000 - 0x15fff: gt (DG2 only)
+ 0x16000 - 0x16dff: reserved */
+ GEN_FW_RANGE(0x16e00, 0x21fff, FORCEWAKE_RENDER), /*
+ 0x16e00 - 0x1ffff: render
0x20000 - 0x21fff: reserved */
GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
- 24000 - 0x2407f: always on
- 24080 - 0x2417f: reserved */
- GEN_FW_RANGE(0x24180, 0x25fff, FORCEWAKE_GT), /*
+ 0x24000 - 0x2407f: always on
+ 0x24080 - 0x2417f: reserved */
+ GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
0x24180 - 0x241ff: gt
- 0x24200 - 0x251ff: reserved
+ 0x24200 - 0x249ff: reserved */
+ GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
+ 0x24a00 - 0x24a7f: render
+ 0x24a80 - 0x251ff: reserved */
+ GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /*
0x25200 - 0x252ff: gt
0x25300 - 0x25fff: reserved */
GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /*
0x26000 - 0x27fff: render
- 0x28000 - 0x2ffff: reserved */
+ 0x28000 - 0x29fff: reserved
+ 0x2a000 - 0x2ffff: undocumented */
GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
GEN_FW_RANGE(0x40000, 0x1bffff, 0),
GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
0x1c0000 - 0x1c2bff: VD0
0x1c2c00 - 0x1c2cff: reserved
0x1c2d00 - 0x1c2dff: VD0
- 0x1c2e00 - 0x1c3eff: reserved
+ 0x1c2e00 - 0x1c3eff: VD0
0x1c3f00 - 0x1c3fff: VD0 */
- GEN_FW_RANGE(0x1c4000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX1), /*
- 0x1c4000 - 0x1c6aff: VD1
- 0x1c6b00 - 0x1c7eff: reserved
- 0x1c7f00 - 0x1c7fff: VD1
- 0x1c8000 - 0x1cffff: reserved */
- GEN_FW_RANGE(0x1d0000, 0x23ffff, FORCEWAKE_MEDIA_VDBOX2), /*
- 0x1d0000 - 0x1d2aff: VD2
- 0x1d2b00 - 0x1d3eff: reserved
- 0x1d3f00 - 0x1d3fff: VD2
- 0x1d4000 - 0x23ffff: reserved */
- GEN_FW_RANGE(0x240000, 0x3dffff, 0),
- GEN_FW_RANGE(0x3e0000, 0x3effff, FORCEWAKE_GT),
+ GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /*
+ 0x1c4000 - 0x1c6bff: VD1
+ 0x1c6c00 - 0x1c6cff: reserved
+ 0x1c6d00 - 0x1c6dff: VD1
+ 0x1c6e00 - 0x1c7fff: reserved */
+ GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
+ 0x1c8000 - 0x1ca0ff: VE0
+ 0x1ca100 - 0x1cbfff: reserved */
+ GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0),
+ GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2),
+ GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4),
+ GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6),
+ GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
+ 0x1d0000 - 0x1d2bff: VD2
+ 0x1d2c00 - 0x1d2cff: reserved
+ 0x1d2d00 - 0x1d2dff: VD2
+ 0x1d2e00 - 0x1d3dff: VD2
+ 0x1d3e00 - 0x1d3eff: reserved
+ 0x1d3f00 - 0x1d3fff: VD2 */
+ GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /*
+ 0x1d4000 - 0x1d6bff: VD3
+ 0x1d6c00 - 0x1d6cff: reserved
+ 0x1d6d00 - 0x1d6dff: VD3
+ 0x1d6e00 - 0x1d7fff: reserved */
+ GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /*
+ 0x1d8000 - 0x1da0ff: VE1
+ 0x1da100 - 0x1dffff: reserved */
+ GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /*
+ 0x1e0000 - 0x1e2bff: VD4
+ 0x1e2c00 - 0x1e2cff: reserved
+ 0x1e2d00 - 0x1e2dff: VD4
+ 0x1e2e00 - 0x1e3eff: reserved
+ 0x1e3f00 - 0x1e3fff: VD4 */
+ GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /*
+ 0x1e4000 - 0x1e6bff: VD5
+ 0x1e6c00 - 0x1e6cff: reserved
+ 0x1e6d00 - 0x1e6dff: VD5
+ 0x1e6e00 - 0x1e7fff: reserved */
+ GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /*
+ 0x1e8000 - 0x1ea0ff: VE2
+ 0x1ea100 - 0x1effff: reserved */
+ GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /*
+ 0x1f0000 - 0x1f2bff: VD6
+ 0x1f2c00 - 0x1f2cff: reserved
+ 0x1f2d00 - 0x1f2dff: VD6
+ 0x1f2e00 - 0x1f3eff: reserved
+ 0x1f3f00 - 0x1f3fff: VD6 */
+ GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /*
+ 0x1f4000 - 0x1f6bff: VD7
+ 0x1f6c00 - 0x1f6cff: reserved
+ 0x1f6d00 - 0x1f6dff: VD7
+ 0x1f6e00 - 0x1f7fff: reserved */
+ GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3),
};
static const struct intel_forcewake_range __mtl_fw_ranges[] = {
@@ -2576,18 +2422,10 @@ static int uncore_forcewake_init(struct intel_uncore *uncore)
ASSIGN_FW_DOMAINS_TABLE(uncore, __mtl_fw_ranges);
ASSIGN_SHADOW_TABLE(uncore, mtl_shadowed_regs);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
- } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60)) {
- ASSIGN_FW_DOMAINS_TABLE(uncore, __pvc_fw_ranges);
- ASSIGN_SHADOW_TABLE(uncore, pvc_shadowed_regs);
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
} else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges);
ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
- } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
- ASSIGN_FW_DOMAINS_TABLE(uncore, __xehp_fw_ranges);
- ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
} else if (GRAPHICS_VER(i915) >= 12) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
@@ -2734,7 +2572,7 @@ void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
* the forcewake domain if any of the other engines
* in the same media slice are present.
*/
- if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 50) && i % 2 == 0) {
+ if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 55) && i % 2 == 0) {
if ((i + 1 < I915_MAX_VCS) && HAS_ENGINE(gt, _VCS(i + 1)))
continue;
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index ee79e0809a6d..fee76c1d2f45 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -154,6 +154,30 @@ __wait_gsc_proxy_completed(struct drm_i915_private *i915)
pr_warn(DRIVER_NAME "Timed out waiting for gsc_proxy_completion!\n");
}
+static void
+__wait_gsc_huc_load_completed(struct drm_i915_private *i915)
+{
+ /* this only applies to DG2, so we only care about GT0 */
+ struct intel_huc *huc = &to_gt(i915)->uc.huc;
+ bool need_to_wait = (IS_ENABLED(CONFIG_INTEL_MEI_PXP) &&
+ intel_huc_wait_required(huc));
+ /*
+ * The GSC and PXP mei bringup depends on the kernel boot ordering, so
+ * to account for the worst case scenario the HuC code waits for up to
+ * 10s for the GSC driver to load and then another 5s for the PXP
+ * component to bind before giving up, even though those steps normally
+ * complete in less than a second from the i915 load. We match that
+ * timeout here, but we expect to bail early due to the fence being
+ * signalled even in a failure case, as it is extremely unlikely that
+ * both components will use their full timeout.
+ */
+ unsigned long timeout_ms = 15000;
+
+ if (need_to_wait &&
+ wait_for(i915_sw_fence_done(&huc->delayed_load.fence), timeout_ms))
+ pr_warn(DRIVER_NAME "Timed out waiting for huc load via GSC!\n");
+}
+
static int __run_selftests(const char *name,
struct selftest *st,
unsigned int count,
@@ -228,14 +252,16 @@ int i915_mock_selftests(void)
int i915_live_selftests(struct pci_dev *pdev)
{
+ struct drm_i915_private *i915 = pdev_to_i915(pdev);
int err;
if (!i915_selftest.live)
return 0;
- __wait_gsc_proxy_completed(pdev_to_i915(pdev));
+ __wait_gsc_proxy_completed(i915);
+ __wait_gsc_huc_load_completed(i915);
- err = run_selftests(live, pdev_to_i915(pdev));
+ err = run_selftests(live, i915);
if (err) {
i915_selftest.live = err;
return err;
@@ -251,14 +277,16 @@ int i915_live_selftests(struct pci_dev *pdev)
int i915_perf_selftests(struct pci_dev *pdev)
{
+ struct drm_i915_private *i915 = pdev_to_i915(pdev);
int err;
if (!i915_selftest.perf)
return 0;
- __wait_gsc_proxy_completed(pdev_to_i915(pdev));
+ __wait_gsc_proxy_completed(i915);
+ __wait_gsc_huc_load_completed(i915);
- err = run_selftests(perf, pdev_to_i915(pdev));
+ err = run_selftests(perf, i915);
if (err) {
i915_selftest.perf = err;
return err;
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 4f98aa8a861e..41eaa9b7f67d 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -71,7 +71,6 @@ static int intel_shadow_table_check(void)
{ gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) },
{ gen12_shadowed_regs, ARRAY_SIZE(gen12_shadowed_regs) },
{ dg2_shadowed_regs, ARRAY_SIZE(dg2_shadowed_regs) },
- { pvc_shadowed_regs, ARRAY_SIZE(pvc_shadowed_regs) },
{ mtl_shadowed_regs, ARRAY_SIZE(mtl_shadowed_regs) },
{ xelpmp_shadowed_regs, ARRAY_SIZE(xelpmp_shadowed_regs) },
};
@@ -119,8 +118,6 @@ int intel_uncore_mock_selftests(void)
{ __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true },
{ __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true },
{ __gen12_fw_ranges, ARRAY_SIZE(__gen12_fw_ranges), true },
- { __xehp_fw_ranges, ARRAY_SIZE(__xehp_fw_ranges), true },
- { __pvc_fw_ranges, ARRAY_SIZE(__pvc_fw_ranges), true },
{ __mtl_fw_ranges, ARRAY_SIZE(__mtl_fw_ranges), true },
{ __xelpmp_fw_ranges, ARRAY_SIZE(__xelpmp_fw_ranges), true },
};
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c
index 15492b69f698..e3287f1de774 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.c
+++ b/drivers/gpu/drm/i915/soc/intel_dram.c
@@ -681,6 +681,8 @@ void intel_dram_detect(struct drm_i915_private *i915)
if (ret)
return;
+ drm_dbg_kms(&i915->drm, "Num qgv points %u\n", dram_info->num_qgv_points);
+
drm_dbg_kms(&i915->drm, "DRAM channels: %u\n", dram_info->num_channels);
drm_dbg_kms(&i915->drm, "Watermark level 0 adjustment needed: %s\n",
diff --git a/drivers/gpu/drm/i915/vlv_sideband.c b/drivers/gpu/drm/i915/vlv_sideband.c
index ffa195560d0d..68291412f4cb 100644
--- a/drivers/gpu/drm/i915/vlv_sideband.c
+++ b/drivers/gpu/drm/i915/vlv_sideband.c
@@ -9,7 +9,6 @@
#include "vlv_sideband.h"
#include "display/intel_dpio_phy.h"
-#include "display/intel_display_types.h"
/*
* IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
diff --git a/drivers/gpu/drm/imagination/Makefile b/drivers/gpu/drm/imagination/Makefile
index ec6db8e9b403..9bc6a3884c22 100644
--- a/drivers/gpu/drm/imagination/Makefile
+++ b/drivers/gpu/drm/imagination/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only OR MIT
# Copyright (c) 2023 Imagination Technologies Ltd.
-subdir-ccflags-y := -I$(srctree)/$(src)
+subdir-ccflags-y := -I$(src)
powervr-y := \
pvr_ccb.o \
diff --git a/drivers/gpu/drm/imagination/pvr_fw_mips.h b/drivers/gpu/drm/imagination/pvr_fw_mips.h
index 408dbe63a90c..a0c5c41c8aa2 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_mips.h
+++ b/drivers/gpu/drm/imagination/pvr_fw_mips.h
@@ -7,13 +7,14 @@
#include "pvr_rogue_mips.h"
#include <asm/page.h>
+#include <linux/math.h>
#include <linux/types.h>
/* Forward declaration from pvr_gem.h. */
struct pvr_gem_object;
-#define PVR_MIPS_PT_PAGE_COUNT ((ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES * ROGUE_MIPSFW_PAGE_SIZE_4K) \
- >> PAGE_SHIFT)
+#define PVR_MIPS_PT_PAGE_COUNT DIV_ROUND_UP(ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES * ROGUE_MIPSFW_PAGE_SIZE_4K, PAGE_SIZE)
+
/**
* struct pvr_fw_mips_data - MIPS-specific data
*/
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c
index 31199e45b72e..73707daa4e52 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_trace.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c
@@ -12,6 +12,7 @@
#include <linux/build_bug.h>
#include <linux/dcache.h>
+#include <linux/debugfs.h>
#include <linux/sysfs.h>
#include <linux/types.h>
diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.c b/drivers/gpu/drm/imagination/pvr_vm_mips.c
index b7fef3c797e6..94af854547d6 100644
--- a/drivers/gpu/drm/imagination/pvr_vm_mips.c
+++ b/drivers/gpu/drm/imagination/pvr_vm_mips.c
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/vmalloc.h>
/**
* pvr_vm_mips_init() - Initialise MIPS FW pagetable
@@ -46,7 +47,7 @@ pvr_vm_mips_init(struct pvr_device *pvr_dev)
if (!mips_data)
return -ENOMEM;
- for (page_nr = 0; page_nr < ARRAY_SIZE(mips_data->pt_pages); page_nr++) {
+ for (page_nr = 0; page_nr < PVR_MIPS_PT_PAGE_COUNT; page_nr++) {
mips_data->pt_pages[page_nr] = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!mips_data->pt_pages[page_nr]) {
err = -ENOMEM;
@@ -102,7 +103,7 @@ pvr_vm_mips_fini(struct pvr_device *pvr_dev)
int page_nr;
vunmap(mips_data->pt);
- for (page_nr = ARRAY_SIZE(mips_data->pt_pages) - 1; page_nr >= 0; page_nr--) {
+ for (page_nr = PVR_MIPS_PT_PAGE_COUNT - 1; page_nr >= 0; page_nr--) {
dma_unmap_page(from_pvr_device(pvr_dev)->dev,
mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE);
diff --git a/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c
index dade8b59feae..704c549750f9 100644
--- a/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c
@@ -773,6 +773,13 @@ static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
.atomic_update = ipu_plane_atomic_update,
};
+static const struct drm_plane_helper_funcs ipu_primary_plane_helper_funcs = {
+ .atomic_check = ipu_plane_atomic_check,
+ .atomic_disable = ipu_plane_atomic_disable,
+ .atomic_update = ipu_plane_atomic_update,
+ .get_scanout_buffer = drm_fb_dma_get_scanout_buffer,
+};
+
bool ipu_plane_atomic_update_pending(struct drm_plane *plane)
{
struct ipu_plane *ipu_plane = to_ipu_plane(plane);
@@ -916,7 +923,10 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
ipu_plane->dma = dma;
ipu_plane->dp_flow = dp;
- drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs);
+ if (type == DRM_PLANE_TYPE_PRIMARY)
+ drm_plane_helper_add(&ipu_plane->base, &ipu_primary_plane_helper_funcs);
+ else
+ drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs);
if (dp == IPU_DP_FLOW_SYNC_BG || dp == IPU_DP_FLOW_SYNC_FG)
ret = drm_plane_create_zpos_property(&ipu_plane->base, zpos, 0,
diff --git a/drivers/gpu/drm/lima/lima_bcast.c b/drivers/gpu/drm/lima/lima_bcast.c
index fbc43f243c54..6d000504e1a4 100644
--- a/drivers/gpu/drm/lima/lima_bcast.c
+++ b/drivers/gpu/drm/lima/lima_bcast.c
@@ -43,6 +43,18 @@ void lima_bcast_suspend(struct lima_ip *ip)
}
+int lima_bcast_mask_irq(struct lima_ip *ip)
+{
+ bcast_write(LIMA_BCAST_BROADCAST_MASK, 0);
+ bcast_write(LIMA_BCAST_INTERRUPT_MASK, 0);
+ return 0;
+}
+
+int lima_bcast_reset(struct lima_ip *ip)
+{
+ return lima_bcast_hw_init(ip);
+}
+
int lima_bcast_init(struct lima_ip *ip)
{
int i;
diff --git a/drivers/gpu/drm/lima/lima_bcast.h b/drivers/gpu/drm/lima/lima_bcast.h
index 465ee587bceb..cd08841e4787 100644
--- a/drivers/gpu/drm/lima/lima_bcast.h
+++ b/drivers/gpu/drm/lima/lima_bcast.h
@@ -13,4 +13,7 @@ void lima_bcast_fini(struct lima_ip *ip);
void lima_bcast_enable(struct lima_device *dev, int num_pp);
+int lima_bcast_mask_irq(struct lima_ip *ip);
+int lima_bcast_reset(struct lima_ip *ip);
+
#endif
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index 10fd9154cc46..739c865b556f 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -371,6 +371,7 @@ static int lima_pdev_probe(struct platform_device *pdev)
{
struct lima_device *ldev;
struct drm_device *ddev;
+ const struct lima_compatible *comp;
int err;
err = lima_sched_slab_init();
@@ -384,7 +385,13 @@ static int lima_pdev_probe(struct platform_device *pdev)
}
ldev->dev = &pdev->dev;
- ldev->id = (enum lima_gpu_id)of_device_get_match_data(&pdev->dev);
+ comp = of_device_get_match_data(&pdev->dev);
+ if (!comp) {
+ err = -ENODEV;
+ goto err_out0;
+ }
+
+ ldev->id = comp->id;
platform_set_drvdata(pdev, ldev);
@@ -459,9 +466,17 @@ static void lima_pdev_remove(struct platform_device *pdev)
lima_sched_slab_fini();
}
+static const struct lima_compatible lima_mali400_data = {
+ .id = lima_gpu_mali400,
+};
+
+static const struct lima_compatible lima_mali450_data = {
+ .id = lima_gpu_mali450,
+};
+
static const struct of_device_id dt_match[] = {
- { .compatible = "arm,mali-400", .data = (void *)lima_gpu_mali400 },
- { .compatible = "arm,mali-450", .data = (void *)lima_gpu_mali450 },
+ { .compatible = "arm,mali-400", .data = &lima_mali400_data },
+ { .compatible = "arm,mali-450", .data = &lima_mali450_data },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/lima/lima_drv.h b/drivers/gpu/drm/lima/lima_drv.h
index c738d288547b..6706c19b166e 100644
--- a/drivers/gpu/drm/lima/lima_drv.h
+++ b/drivers/gpu/drm/lima/lima_drv.h
@@ -7,6 +7,7 @@
#include <drm/drm_file.h>
#include "lima_ctx.h"
+#include "lima_device.h"
extern int lima_sched_timeout_ms;
extern uint lima_heap_init_nr_pages;
@@ -39,6 +40,10 @@ struct lima_submit {
struct lima_sched_task *task;
};
+struct lima_compatible {
+ enum lima_gpu_id id;
+};
+
static inline struct lima_drm_priv *
to_lima_drm_priv(struct drm_file *file)
{
diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
index 6b354e2fb61d..3282997a0358 100644
--- a/drivers/gpu/drm/lima/lima_gp.c
+++ b/drivers/gpu/drm/lima/lima_gp.c
@@ -233,6 +233,13 @@ static void lima_gp_task_mmu_error(struct lima_sched_pipe *pipe)
lima_sched_pipe_task_done(pipe);
}
+static void lima_gp_task_mask_irq(struct lima_sched_pipe *pipe)
+{
+ struct lima_ip *ip = pipe->processor[0];
+
+ gp_write(LIMA_GP_INT_MASK, 0);
+}
+
static int lima_gp_task_recover(struct lima_sched_pipe *pipe)
{
struct lima_ip *ip = pipe->processor[0];
@@ -338,7 +345,9 @@ int lima_gp_init(struct lima_ip *ip)
void lima_gp_fini(struct lima_ip *ip)
{
+ struct lima_device *dev = ip->dev;
+ devm_free_irq(dev->dev, ip->irq, ip);
}
int lima_gp_pipe_init(struct lima_device *dev)
@@ -365,6 +374,7 @@ int lima_gp_pipe_init(struct lima_device *dev)
pipe->task_error = lima_gp_task_error;
pipe->task_mmu_error = lima_gp_task_mmu_error;
pipe->task_recover = lima_gp_task_recover;
+ pipe->task_mask_irq = lima_gp_task_mask_irq;
return 0;
}
diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
index e18317c5ca8c..6611e2836bf0 100644
--- a/drivers/gpu/drm/lima/lima_mmu.c
+++ b/drivers/gpu/drm/lima/lima_mmu.c
@@ -118,7 +118,12 @@ int lima_mmu_init(struct lima_ip *ip)
void lima_mmu_fini(struct lima_ip *ip)
{
+ struct lima_device *dev = ip->dev;
+
+ if (ip->id == lima_ip_ppmmu_bcast)
+ return;
+ devm_free_irq(dev->dev, ip->irq, ip);
}
void lima_mmu_flush_tlb(struct lima_ip *ip)
diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c
index d0d2db0ef1ce..eaab4788dff4 100644
--- a/drivers/gpu/drm/lima/lima_pp.c
+++ b/drivers/gpu/drm/lima/lima_pp.c
@@ -286,7 +286,9 @@ int lima_pp_init(struct lima_ip *ip)
void lima_pp_fini(struct lima_ip *ip)
{
+ struct lima_device *dev = ip->dev;
+ devm_free_irq(dev->dev, ip->irq, ip);
}
int lima_pp_bcast_resume(struct lima_ip *ip)
@@ -319,7 +321,9 @@ int lima_pp_bcast_init(struct lima_ip *ip)
void lima_pp_bcast_fini(struct lima_ip *ip)
{
+ struct lima_device *dev = ip->dev;
+ devm_free_irq(dev->dev, ip->irq, ip);
}
static int lima_pp_task_validate(struct lima_sched_pipe *pipe,
@@ -429,6 +433,9 @@ static void lima_pp_task_error(struct lima_sched_pipe *pipe)
lima_pp_hard_reset(ip);
}
+
+ if (pipe->bcast_processor)
+ lima_bcast_reset(pipe->bcast_processor);
}
static void lima_pp_task_mmu_error(struct lima_sched_pipe *pipe)
@@ -437,6 +444,20 @@ static void lima_pp_task_mmu_error(struct lima_sched_pipe *pipe)
lima_sched_pipe_task_done(pipe);
}
+static void lima_pp_task_mask_irq(struct lima_sched_pipe *pipe)
+{
+ int i;
+
+ for (i = 0; i < pipe->num_processor; i++) {
+ struct lima_ip *ip = pipe->processor[i];
+
+ pp_write(LIMA_PP_INT_MASK, 0);
+ }
+
+ if (pipe->bcast_processor)
+ lima_bcast_mask_irq(pipe->bcast_processor);
+}
+
static struct kmem_cache *lima_pp_task_slab;
static int lima_pp_task_slab_refcnt;
@@ -468,6 +489,7 @@ int lima_pp_pipe_init(struct lima_device *dev)
pipe->task_fini = lima_pp_task_fini;
pipe->task_error = lima_pp_task_error;
pipe->task_mmu_error = lima_pp_task_mmu_error;
+ pipe->task_mask_irq = lima_pp_task_mask_irq;
return 0;
}
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 00b19adfc888..bbf3f8feab94 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -422,12 +422,21 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
*/
for (i = 0; i < pipe->num_processor; i++)
synchronize_irq(pipe->processor[i]->irq);
+ if (pipe->bcast_processor)
+ synchronize_irq(pipe->bcast_processor->irq);
if (dma_fence_is_signaled(task->fence)) {
DRM_WARN("%s unexpectedly high interrupt latency\n", lima_ip_name(ip));
return DRM_GPU_SCHED_STAT_NOMINAL;
}
+ /*
+ * The task might still finish while this timeout handler runs.
+ * To prevent a race condition on its completion, mask all irqs
+ * on the running core until the next hard reset completes.
+ */
+ pipe->task_mask_irq(pipe);
+
if (!pipe->error)
DRM_ERROR("%s job timeout\n", lima_ip_name(ip));
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
index 6bd4f3b70109..85b23ba901d5 100644
--- a/drivers/gpu/drm/lima/lima_sched.h
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -80,6 +80,7 @@ struct lima_sched_pipe {
void (*task_error)(struct lima_sched_pipe *pipe);
void (*task_mmu_error)(struct lima_sched_pipe *pipe);
int (*task_recover)(struct lima_sched_pipe *pipe);
+ void (*task_mask_irq)(struct lima_sched_pipe *pipe);
struct work_struct recover_work;
};
diff --git a/drivers/gpu/drm/loongson/lsdc_crtc.c b/drivers/gpu/drm/loongson/lsdc_crtc.c
index 827acab580fa..03958b79f251 100644
--- a/drivers/gpu/drm/loongson/lsdc_crtc.c
+++ b/drivers/gpu/drm/loongson/lsdc_crtc.c
@@ -3,6 +3,7 @@
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <drm/drm_atomic.h>
diff --git a/drivers/gpu/drm/loongson/lsdc_gem.c b/drivers/gpu/drm/loongson/lsdc_gem.c
index 04293df2f0de..a720d8f53209 100644
--- a/drivers/gpu/drm/loongson/lsdc_gem.c
+++ b/drivers/gpu/drm/loongson/lsdc_gem.c
@@ -19,33 +19,24 @@ static int lsdc_gem_prime_pin(struct drm_gem_object *obj)
struct lsdc_bo *lbo = gem_to_lsdc_bo(obj);
int ret;
- ret = lsdc_bo_reserve(lbo);
- if (unlikely(ret))
- return ret;
+ dma_resv_assert_held(obj->resv);
ret = lsdc_bo_pin(lbo, LSDC_GEM_DOMAIN_GTT, NULL);
if (likely(ret == 0))
lbo->sharing_count++;
- lsdc_bo_unreserve(lbo);
-
return ret;
}
static void lsdc_gem_prime_unpin(struct drm_gem_object *obj)
{
struct lsdc_bo *lbo = gem_to_lsdc_bo(obj);
- int ret;
- ret = lsdc_bo_reserve(lbo);
- if (unlikely(ret))
- return;
+ dma_resv_assert_held(obj->resv);
lsdc_bo_unpin(lbo);
if (lbo->sharing_count)
lbo->sharing_count--;
-
- lsdc_bo_unreserve(lbo);
}
static struct sg_table *lsdc_gem_prime_get_sg_table(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index 76cab28e010c..96cbe020f493 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -26,7 +26,7 @@ config DRM_MEDIATEK_DP
select PHY_MTK_DP
select DRM_DISPLAY_HELPER
select DRM_DISPLAY_DP_HELPER
- select DRM_DP_AUX_BUS
+ select DRM_DISPLAY_DP_AUX_BUS
help
DRM/KMS Display Port driver for MediaTek SoCs.
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index 5e4436403b8d..32a2ed6c0cfe 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-mediatek-drm-y := mtk_disp_aal.o \
+mediatek-drm-y := mtk_crtc.o \
+ mtk_ddp_comp.o \
+ mtk_disp_aal.o \
mtk_disp_ccorr.o \
mtk_disp_color.o \
mtk_disp_gamma.o \
@@ -8,16 +10,14 @@ mediatek-drm-y := mtk_disp_aal.o \
mtk_disp_ovl.o \
mtk_disp_ovl_adaptor.o \
mtk_disp_rdma.o \
- mtk_drm_crtc.o \
- mtk_drm_ddp_comp.o \
mtk_drm_drv.o \
- mtk_drm_gem.o \
- mtk_drm_plane.o \
mtk_dsi.o \
mtk_dpi.o \
mtk_ethdr.o \
+ mtk_gem.o \
mtk_mdp_rdma.o \
- mtk_padding.o
+ mtk_padding.o \
+ mtk_plane.o
obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c
index a04499c4f9ca..6f34f573e127 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_crtc.c
@@ -19,14 +19,14 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_drm_drv.h"
-#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp_comp.h"
-#include "mtk_drm_gem.h"
-#include "mtk_drm_plane.h"
+#include "mtk_gem.h"
+#include "mtk_plane.h"
/*
- * struct mtk_drm_crtc - MediaTek specific crtc structure.
+ * struct mtk_crtc - MediaTek specific crtc structure.
* @base: crtc object.
* @enabled: records whether crtc_enable succeeded
* @planes: array of 4 drm_plane structures, one for each overlay plane
@@ -38,7 +38,7 @@
*
* TODO: Needs update: this header is missing a bunch of member descriptions.
*/
-struct mtk_drm_crtc {
+struct mtk_crtc {
struct drm_crtc base;
bool enabled;
@@ -80,9 +80,9 @@ struct mtk_crtc_state {
unsigned int pending_vrefresh;
};
-static inline struct mtk_drm_crtc *to_mtk_crtc(struct drm_crtc *c)
+static inline struct mtk_crtc *to_mtk_crtc(struct drm_crtc *c)
{
- return container_of(c, struct mtk_drm_crtc, base);
+ return container_of(c, struct mtk_crtc, base);
}
static inline struct mtk_crtc_state *to_mtk_crtc_state(struct drm_crtc_state *s)
@@ -90,7 +90,7 @@ static inline struct mtk_crtc_state *to_mtk_crtc_state(struct drm_crtc_state *s)
return container_of(s, struct mtk_crtc_state, base);
}
-static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
+static void mtk_crtc_finish_page_flip(struct mtk_crtc *mtk_crtc)
{
struct drm_crtc *crtc = &mtk_crtc->base;
unsigned long flags;
@@ -104,11 +104,11 @@ static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
}
}
-static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
+static void mtk_drm_finish_page_flip(struct mtk_crtc *mtk_crtc)
{
drm_crtc_handle_vblank(&mtk_crtc->base);
if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) {
- mtk_drm_crtc_finish_page_flip(mtk_crtc);
+ mtk_crtc_finish_page_flip(mtk_crtc);
mtk_crtc->pending_needs_vblank = false;
}
}
@@ -151,9 +151,9 @@ static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt)
}
#endif
-static void mtk_drm_crtc_destroy(struct drm_crtc *crtc)
+static void mtk_crtc_destroy(struct drm_crtc *crtc)
{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
int i;
mtk_mutex_put(mtk_crtc->mutex);
@@ -176,7 +176,7 @@ static void mtk_drm_crtc_destroy(struct drm_crtc *crtc)
drm_crtc_cleanup(crtc);
}
-static void mtk_drm_crtc_reset(struct drm_crtc *crtc)
+static void mtk_crtc_reset(struct drm_crtc *crtc)
{
struct mtk_crtc_state *state;
@@ -191,7 +191,7 @@ static void mtk_drm_crtc_reset(struct drm_crtc *crtc)
__drm_atomic_helper_crtc_reset(crtc, &state->base);
}
-static struct drm_crtc_state *mtk_drm_crtc_duplicate_state(struct drm_crtc *crtc)
+static struct drm_crtc_state *mtk_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct mtk_crtc_state *state;
@@ -208,18 +208,17 @@ static struct drm_crtc_state *mtk_drm_crtc_duplicate_state(struct drm_crtc *crtc
return &state->base;
}
-static void mtk_drm_crtc_destroy_state(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+static void mtk_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
{
__drm_atomic_helper_crtc_destroy_state(state);
kfree(to_mtk_crtc_state(state));
}
static enum drm_mode_status
-mtk_drm_crtc_mode_valid(struct drm_crtc *crtc,
- const struct drm_display_mode *mode)
+mtk_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
enum drm_mode_status status = MODE_OK;
int i;
@@ -231,15 +230,15 @@ mtk_drm_crtc_mode_valid(struct drm_crtc *crtc,
return status;
}
-static bool mtk_drm_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool mtk_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
/* Nothing to do here, but this callback is mandatory. */
return true;
}
-static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
+static void mtk_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state);
@@ -250,7 +249,7 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
state->pending_config = true;
}
-static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
+static int mtk_crtc_ddp_clk_enable(struct mtk_crtc *mtk_crtc)
{
int ret;
int i;
@@ -270,7 +269,7 @@ err:
return ret;
}
-static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
+static void mtk_crtc_ddp_clk_disable(struct mtk_crtc *mtk_crtc)
{
int i;
@@ -279,11 +278,11 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
}
static
-struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
- struct drm_plane *plane,
- unsigned int *local_layer)
+struct mtk_ddp_comp *mtk_ddp_comp_for_plane(struct drm_crtc *crtc,
+ struct drm_plane *plane,
+ unsigned int *local_layer)
{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *comp;
int i, count = 0;
unsigned int local_index = plane - mtk_crtc->planes;
@@ -306,7 +305,7 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
{
struct cmdq_cb_data *data = mssg;
struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client);
- struct mtk_drm_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client);
+ struct mtk_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_crtc, cmdq_client);
struct mtk_crtc_state *state;
unsigned int i;
@@ -346,7 +345,7 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
}
#endif
-static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
+static int mtk_crtc_ddp_hw_init(struct mtk_crtc *mtk_crtc)
{
struct drm_crtc *crtc = &mtk_crtc->base;
struct drm_connector *connector;
@@ -431,7 +430,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
/* should not enable layer before crtc enabled */
plane_state->pending.enable = false;
- comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
+ comp = mtk_ddp_comp_for_plane(crtc, plane, &local_layer);
if (comp)
mtk_ddp_comp_layer_config(comp, local_layer,
plane_state, NULL);
@@ -446,7 +445,7 @@ err_pm_runtime_put:
return ret;
}
-static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
+static void mtk_crtc_ddp_hw_fini(struct mtk_crtc *mtk_crtc)
{
struct drm_device *drm = mtk_crtc->base.dev;
struct drm_crtc *crtc = &mtk_crtc->base;
@@ -491,7 +490,7 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
struct cmdq_pkt *cmdq_handle)
{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
unsigned int i;
@@ -522,8 +521,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
if (!plane_state->pending.config)
continue;
- comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
- &local_layer);
+ comp = mtk_ddp_comp_for_plane(crtc, plane, &local_layer);
if (comp)
mtk_ddp_comp_layer_config(comp, local_layer,
@@ -547,8 +545,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
if (!plane_state->pending.async_config)
continue;
- comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
- &local_layer);
+ comp = mtk_ddp_comp_for_plane(crtc, plane, &local_layer);
if (comp)
mtk_ddp_comp_layer_config(comp, local_layer,
@@ -563,8 +560,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
}
}
-static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
- bool needs_vblank)
+static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
{
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle;
@@ -636,7 +632,7 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
static void mtk_crtc_ddp_irq(void *data)
{
struct drm_crtc *crtc = data;
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_drm_private *priv = crtc->dev->dev_private;
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
@@ -652,9 +648,9 @@ static void mtk_crtc_ddp_irq(void *data)
mtk_drm_finish_page_flip(mtk_crtc);
}
-static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
+static int mtk_crtc_enable_vblank(struct drm_crtc *crtc)
{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
mtk_ddp_comp_enable_vblank(comp);
@@ -662,22 +658,22 @@ static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
return 0;
}
-static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc)
+static void mtk_crtc_disable_vblank(struct drm_crtc *crtc)
{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
mtk_ddp_comp_disable_vblank(comp);
}
-static void mtk_drm_crtc_update_output(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
+static void mtk_crtc_update_output(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
{
int crtc_index = drm_crtc_index(crtc);
int i;
struct device *dev;
struct drm_crtc_state *crtc_state = state->crtcs[crtc_index].new_state;
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_drm_private *priv;
unsigned int encoder_mask = crtc_state->encoder_mask;
@@ -707,33 +703,33 @@ static void mtk_drm_crtc_update_output(struct drm_crtc *crtc,
}
}
-int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
- struct mtk_plane_state *state)
+int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
+ struct mtk_plane_state *state)
{
unsigned int local_layer;
struct mtk_ddp_comp *comp;
- comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
+ comp = mtk_ddp_comp_for_plane(crtc, plane, &local_layer);
if (comp)
return mtk_ddp_comp_layer_check(comp, local_layer, state);
return 0;
}
-void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
- struct drm_atomic_state *state)
+void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
if (!mtk_crtc->enabled)
return;
- mtk_drm_crtc_update_config(mtk_crtc, false);
+ mtk_crtc_update_config(mtk_crtc, false);
}
-static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
+static void mtk_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
int ret;
@@ -745,7 +741,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
return;
}
- mtk_drm_crtc_update_output(crtc, state);
+ mtk_crtc_update_output(crtc, state);
ret = mtk_crtc_ddp_hw_init(mtk_crtc);
if (ret) {
@@ -757,10 +753,10 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
mtk_crtc->enabled = true;
}
-static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
+static void mtk_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
int i;
@@ -779,7 +775,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
}
mtk_crtc->pending_planes = true;
- mtk_drm_crtc_update_config(mtk_crtc, false);
+ mtk_crtc_update_config(mtk_crtc, false);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
/* Wait for planes to be disabled by cmdq */
if (mtk_crtc->cmdq_client.chan)
@@ -797,13 +793,13 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
mtk_crtc->enabled = false;
}
-static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
+static void mtk_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state);
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
unsigned long flags;
if (mtk_crtc->event && mtk_crtc_state->base.event)
@@ -821,10 +817,10 @@ static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
}
}
-static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
+static void mtk_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
int i;
if (crtc->state->color_mgmt_changed)
@@ -832,33 +828,32 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state);
mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state);
}
- mtk_drm_crtc_update_config(mtk_crtc, !!mtk_crtc->event);
+ mtk_crtc_update_config(mtk_crtc, !!mtk_crtc->event);
}
static const struct drm_crtc_funcs mtk_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
- .destroy = mtk_drm_crtc_destroy,
- .reset = mtk_drm_crtc_reset,
- .atomic_duplicate_state = mtk_drm_crtc_duplicate_state,
- .atomic_destroy_state = mtk_drm_crtc_destroy_state,
- .enable_vblank = mtk_drm_crtc_enable_vblank,
- .disable_vblank = mtk_drm_crtc_disable_vblank,
+ .destroy = mtk_crtc_destroy,
+ .reset = mtk_crtc_reset,
+ .atomic_duplicate_state = mtk_crtc_duplicate_state,
+ .atomic_destroy_state = mtk_crtc_destroy_state,
+ .enable_vblank = mtk_crtc_enable_vblank,
+ .disable_vblank = mtk_crtc_disable_vblank,
};
static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
- .mode_fixup = mtk_drm_crtc_mode_fixup,
- .mode_set_nofb = mtk_drm_crtc_mode_set_nofb,
- .mode_valid = mtk_drm_crtc_mode_valid,
- .atomic_begin = mtk_drm_crtc_atomic_begin,
- .atomic_flush = mtk_drm_crtc_atomic_flush,
- .atomic_enable = mtk_drm_crtc_atomic_enable,
- .atomic_disable = mtk_drm_crtc_atomic_disable,
+ .mode_fixup = mtk_crtc_mode_fixup,
+ .mode_set_nofb = mtk_crtc_mode_set_nofb,
+ .mode_valid = mtk_crtc_mode_valid,
+ .atomic_begin = mtk_crtc_atomic_begin,
+ .atomic_flush = mtk_crtc_atomic_flush,
+ .atomic_enable = mtk_crtc_atomic_enable,
+ .atomic_disable = mtk_crtc_atomic_disable,
};
-static int mtk_drm_crtc_init(struct drm_device *drm,
- struct mtk_drm_crtc *mtk_crtc,
- unsigned int pipe)
+static int mtk_crtc_init(struct drm_device *drm, struct mtk_crtc *mtk_crtc,
+ unsigned int pipe)
{
struct drm_plane *primary = NULL;
struct drm_plane *cursor = NULL;
@@ -885,8 +880,7 @@ err_cleanup_crtc:
return ret;
}
-static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc,
- int comp_idx)
+static int mtk_crtc_num_comp_planes(struct mtk_crtc *mtk_crtc, int comp_idx)
{
struct mtk_ddp_comp *comp;
@@ -904,8 +898,8 @@ static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc,
}
static inline
-enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx,
- unsigned int num_planes)
+enum drm_plane_type mtk_crtc_plane_type(unsigned int plane_idx,
+ unsigned int num_planes)
{
if (plane_idx == 0)
return DRM_PLANE_TYPE_PRIMARY;
@@ -916,11 +910,11 @@ enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx,
}
-static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
- struct mtk_drm_crtc *mtk_crtc,
- int comp_idx, int pipe)
+static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev,
+ struct mtk_crtc *mtk_crtc,
+ int comp_idx, int pipe)
{
- int num_planes = mtk_drm_crtc_num_comp_planes(mtk_crtc, comp_idx);
+ int num_planes = mtk_crtc_num_comp_planes(mtk_crtc, comp_idx);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[comp_idx];
int i, ret;
@@ -928,8 +922,7 @@ static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
ret = mtk_plane_init(drm_dev,
&mtk_crtc->planes[mtk_crtc->layer_nr],
BIT(pipe),
- mtk_drm_crtc_plane_type(mtk_crtc->layer_nr,
- num_planes),
+ mtk_crtc_plane_type(mtk_crtc->layer_nr, num_planes),
mtk_ddp_comp_supported_rotations(comp),
mtk_ddp_comp_get_formats(comp),
mtk_ddp_comp_get_num_formats(comp));
@@ -941,9 +934,9 @@ static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
return 0;
}
-struct device *mtk_drm_crtc_dma_dev_get(struct drm_crtc *crtc)
+struct device *mtk_crtc_dma_dev_get(struct drm_crtc *crtc)
{
- struct mtk_drm_crtc *mtk_crtc = NULL;
+ struct mtk_crtc *mtk_crtc = NULL;
if (!crtc)
return NULL;
@@ -955,14 +948,14 @@ struct device *mtk_drm_crtc_dma_dev_get(struct drm_crtc *crtc)
return mtk_crtc->dma_dev;
}
-int mtk_drm_crtc_create(struct drm_device *drm_dev,
- const unsigned int *path, unsigned int path_len,
- int priv_data_index, const struct mtk_drm_route *conn_routes,
- unsigned int num_conn_routes)
+int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path,
+ unsigned int path_len, int priv_data_index,
+ const struct mtk_drm_route *conn_routes,
+ unsigned int num_conn_routes)
{
struct mtk_drm_private *priv = drm_dev->dev_private;
struct device *dev = drm_dev->dev;
- struct mtk_drm_crtc *mtk_crtc;
+ struct mtk_crtc *mtk_crtc;
unsigned int num_comp_planes = 0;
int ret;
int i;
@@ -1009,10 +1002,10 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
mtk_crtc->mmsys_dev = priv->mmsys_dev;
mtk_crtc->ddp_comp_nr = path_len;
- mtk_crtc->ddp_comp = devm_kmalloc_array(dev,
- mtk_crtc->ddp_comp_nr + (conn_routes ? 1 : 0),
- sizeof(*mtk_crtc->ddp_comp),
- GFP_KERNEL);
+ mtk_crtc->ddp_comp = devm_kcalloc(dev,
+ mtk_crtc->ddp_comp_nr + (conn_routes ? 1 : 0),
+ sizeof(*mtk_crtc->ddp_comp),
+ GFP_KERNEL);
if (!mtk_crtc->ddp_comp)
return -ENOMEM;
@@ -1047,7 +1040,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
}
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
- num_comp_planes += mtk_drm_crtc_num_comp_planes(mtk_crtc, i);
+ num_comp_planes += mtk_crtc_num_comp_planes(mtk_crtc, i);
mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes,
sizeof(struct drm_plane), GFP_KERNEL);
@@ -1055,8 +1048,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
return -ENOMEM;
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
- ret = mtk_drm_crtc_init_comp_planes(drm_dev, mtk_crtc, i,
- crtc_i);
+ ret = mtk_crtc_init_comp_planes(drm_dev, mtk_crtc, i, crtc_i);
if (ret)
return ret;
}
@@ -1068,7 +1060,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
*/
mtk_crtc->dma_dev = mtk_ddp_comp_dma_dev_get(&priv->ddp_comp[path[0]]);
- ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, crtc_i);
+ ret = mtk_crtc_init(drm_dev, mtk_crtc, crtc_i);
if (ret < 0)
return ret;
@@ -1138,7 +1130,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
mtk_crtc->num_conn_routes = num_conn_routes;
mtk_crtc->conn_routes = conn_routes;
- /* increase ddp_comp_nr at the end of mtk_drm_crtc_create */
+ /* increase ddp_comp_nr at the end of mtk_crtc_create */
mtk_crtc->ddp_comp_nr++;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.h b/drivers/gpu/drm/mediatek/mtk_crtc.h
new file mode 100644
index 000000000000..388e900b6f4d
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_crtc.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ */
+
+#ifndef MTK_CRTC_H
+#define MTK_CRTC_H
+
+#include <drm/drm_crtc.h>
+#include "mtk_ddp_comp.h"
+#include "mtk_drm_drv.h"
+#include "mtk_plane.h"
+
+#define MTK_MAX_BPC 10
+#define MTK_MIN_BPC 3
+
+void mtk_crtc_commit(struct drm_crtc *crtc);
+int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path,
+ unsigned int path_len, int priv_data_index,
+ const struct mtk_drm_route *conn_routes,
+ unsigned int num_conn_routes);
+int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
+ struct mtk_plane_state *state);
+void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
+ struct drm_atomic_state *plane_state);
+struct device *mtk_crtc_dma_dev_get(struct drm_crtc *crtc);
+
+#endif /* MTK_CRTC_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
index a515e96cfefc..17b036411292 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
@@ -14,11 +14,11 @@
#include <linux/soc/mediatek/mtk-cmdq.h>
#include <drm/drm_print.h>
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_disp_drv.h"
#include "mtk_drm_drv.h"
-#include "mtk_drm_plane.h"
-#include "mtk_drm_ddp_comp.h"
-#include "mtk_drm_crtc.h"
+#include "mtk_plane.h"
#define DISP_REG_DITHER_EN 0x0000
@@ -497,10 +497,10 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_DRM_ID_MAX]
[DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL },
};
-static bool mtk_drm_find_comp_in_ddp(struct device *dev,
- const unsigned int *path,
- unsigned int path_len,
- struct mtk_ddp_comp *ddp_comp)
+static bool mtk_ddp_comp_find(struct device *dev,
+ const unsigned int *path,
+ unsigned int path_len,
+ struct mtk_ddp_comp *ddp_comp)
{
unsigned int i;
@@ -514,10 +514,10 @@ static bool mtk_drm_find_comp_in_ddp(struct device *dev,
return false;
}
-static unsigned int mtk_drm_find_comp_in_ddp_conn_path(struct device *dev,
- const struct mtk_drm_route *routes,
- unsigned int num_routes,
- struct mtk_ddp_comp *ddp_comp)
+static unsigned int mtk_ddp_comp_find_in_route(struct device *dev,
+ const struct mtk_drm_route *routes,
+ unsigned int num_routes,
+ struct mtk_ddp_comp *ddp_comp)
{
int ret;
unsigned int i;
@@ -554,26 +554,31 @@ int mtk_ddp_comp_get_id(struct device_node *node,
return -EINVAL;
}
-unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm,
- struct device *dev)
+unsigned int mtk_find_possible_crtcs(struct drm_device *drm, struct device *dev)
{
struct mtk_drm_private *private = drm->dev_private;
unsigned int ret = 0;
- if (mtk_drm_find_comp_in_ddp(dev, private->data->main_path, private->data->main_len,
- private->ddp_comp))
+ if (mtk_ddp_comp_find(dev,
+ private->data->main_path,
+ private->data->main_len,
+ private->ddp_comp))
ret = BIT(0);
- else if (mtk_drm_find_comp_in_ddp(dev, private->data->ext_path,
- private->data->ext_len, private->ddp_comp))
+ else if (mtk_ddp_comp_find(dev,
+ private->data->ext_path,
+ private->data->ext_len,
+ private->ddp_comp))
ret = BIT(1);
- else if (mtk_drm_find_comp_in_ddp(dev, private->data->third_path,
- private->data->third_len, private->ddp_comp))
+ else if (mtk_ddp_comp_find(dev,
+ private->data->third_path,
+ private->data->third_len,
+ private->ddp_comp))
ret = BIT(2);
else
- ret = mtk_drm_find_comp_in_ddp_conn_path(dev,
- private->data->conn_routes,
- private->data->num_conn_routes,
- private->ddp_comp);
+ ret = mtk_ddp_comp_find_in_route(dev,
+ private->data->conn_routes,
+ private->data->num_conn_routes,
+ private->ddp_comp);
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
index 93d79a1366e9..26236691ce4c 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
@@ -3,8 +3,8 @@
* Copyright (c) 2015 MediaTek Inc.
*/
-#ifndef MTK_DRM_DDP_COMP_H
-#define MTK_DRM_DDP_COMP_H
+#ifndef MTK_DDP_COMP_H
+#define MTK_DDP_COMP_H
#include <linux/io.h>
#include <linux/pm_runtime.h>
@@ -326,8 +326,7 @@ static inline void mtk_ddp_comp_encoder_index_set(struct mtk_ddp_comp *comp)
int mtk_ddp_comp_get_id(struct device_node *node,
enum mtk_ddp_comp_type comp_type);
-unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm,
- struct device *dev);
+unsigned int mtk_find_possible_crtcs(struct drm_device *drm, struct device *dev);
int mtk_ddp_comp_init(struct device_node *comp_node, struct mtk_ddp_comp *comp,
unsigned int comp_id);
enum mtk_ddp_comp_type mtk_ddp_comp_get_type(unsigned int comp_id);
@@ -340,4 +339,4 @@ void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value,
void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, unsigned int value,
struct cmdq_client_reg *cmdq_reg, void __iomem *regs,
unsigned int offset, unsigned int mask);
-#endif /* MTK_DRM_DDP_COMP_H */
+#endif /* MTK_DDP_COMP_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_aal.c b/drivers/gpu/drm/mediatek/mtk_disp_aal.c
index 40fe403086c3..3ce8f32b06d5 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_aal.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_aal.c
@@ -11,9 +11,9 @@
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_disp_drv.h"
-#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#define DISP_AAL_EN 0x0000
@@ -223,7 +223,6 @@ struct platform_driver mtk_disp_aal_driver = {
.remove_new = mtk_disp_aal_remove,
.driver = {
.name = "mediatek-disp-aal",
- .owner = THIS_MODULE,
.of_match_table = mtk_disp_aal_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
index 465cddce0d32..df35e90dd25f 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
@@ -10,9 +10,9 @@
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_disp_drv.h"
-#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#define DISP_CCORR_EN 0x0000
@@ -214,7 +214,6 @@ struct platform_driver mtk_disp_ccorr_driver = {
.remove_new = mtk_disp_ccorr_remove,
.driver = {
.name = "mediatek-disp-ccorr",
- .owner = THIS_MODULE,
.of_match_table = mtk_disp_ccorr_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index 78ea99f1444f..7f0085be5671 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -10,9 +10,9 @@
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_disp_drv.h"
-#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#define DISP_COLOR_CFG_MAIN 0x0400
@@ -164,7 +164,6 @@ struct platform_driver mtk_disp_color_driver = {
.remove_new = mtk_disp_color_remove,
.driver = {
.name = "mediatek-disp-color",
- .owner = THIS_MODULE,
.of_match_table = mtk_disp_color_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
index 90e64467ea8f..082ac18fe04a 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
@@ -9,8 +9,8 @@
#include <linux/soc/mediatek/mtk-cmdq.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
#include <linux/soc/mediatek/mtk-mutex.h>
-#include "mtk_drm_plane.h"
#include "mtk_mdp_rdma.h"
+#include "mtk_plane.h"
int mtk_aal_clk_enable(struct device *dev);
void mtk_aal_clk_disable(struct device *dev);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
index c1bc8b00d938..ca8d1f3aca03 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
@@ -11,9 +11,9 @@
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_disp_drv.h"
-#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#define DISP_GAMMA_EN 0x0000
@@ -334,7 +334,6 @@ struct platform_driver mtk_disp_gamma_driver = {
.remove_new = mtk_disp_gamma_remove,
.driver = {
.name = "mediatek-disp-gamma",
- .owner = THIS_MODULE,
.of_match_table = mtk_disp_gamma_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_merge.c b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
index 32a29924bd54..77c057e0e671 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_merge.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
@@ -10,7 +10,7 @@
#include <linux/reset.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
-#include "mtk_drm_ddp_comp.h"
+#include "mtk_ddp_comp.h"
#include "mtk_drm_drv.h"
#include "mtk_disp_drv.h"
@@ -376,7 +376,6 @@ struct platform_driver mtk_disp_merge_driver = {
.remove_new = mtk_disp_merge_remove,
.driver = {
.name = "mediatek-disp-merge",
- .owner = THIS_MODULE,
.of_match_table = mtk_disp_merge_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 2bffe4245466..b552a02d7eae 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -15,9 +15,9 @@
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_disp_drv.h"
-#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#define DISP_REG_OVL_INTEN 0x0004
@@ -659,7 +659,6 @@ struct platform_driver mtk_disp_ovl_driver = {
.remove_new = mtk_disp_ovl_remove,
.driver = {
.name = "mediatek-disp-ovl",
- .owner = THIS_MODULE,
.of_match_table = mtk_disp_ovl_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
index 034d31824d4d..02dd7dcdfedb 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
@@ -17,9 +17,9 @@
#include <linux/soc/mediatek/mtk-mmsys.h>
#include <linux/soc/mediatek/mtk-mutex.h>
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_disp_drv.h"
-#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#include "mtk_ethdr.h"
@@ -629,6 +629,5 @@ struct platform_driver mtk_disp_ovl_adaptor_driver = {
.remove_new = mtk_disp_ovl_adaptor_remove,
.driver = {
.name = "mediatek-disp-ovl-adaptor",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index faa907f2f443..7b1a6e631200 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -13,9 +13,9 @@
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_disp_drv.h"
-#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#define DISP_REG_RDMA_INT_ENABLE 0x0000
@@ -428,7 +428,6 @@ struct platform_driver mtk_disp_rdma_driver = {
.remove_new = mtk_disp_rdma_remove,
.driver = {
.name = "mediatek-disp-rdma",
- .owner = THIS_MODULE,
.of_match_table = mtk_disp_rdma_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index 0ba72102636a..536366956447 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -2104,7 +2104,7 @@ static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP &&
!mtk_dp->train_info.cable_plugged_in) {
- ret = -EAGAIN;
+ ret = -EIO;
goto err;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index beb7d9d08e97..bfe8653005db 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -26,9 +26,9 @@
#include <drm/drm_of.h>
#include <drm/drm_simple_kms_helper.h>
+#include "mtk_ddp_comp.h"
#include "mtk_disp_drv.h"
#include "mtk_dpi_regs.h"
-#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
enum mtk_dpi_out_bit_num {
@@ -805,7 +805,7 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- dpi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm_dev, dpi->dev);
+ dpi->encoder.possible_crtcs = mtk_find_possible_crtcs(drm_dev, dpi->dev);
ret = drm_bridge_attach(&dpi->encoder, &dpi->bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
deleted file mode 100644
index 1f988ff1bf9f..000000000000
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015 MediaTek Inc.
- */
-
-#ifndef MTK_DRM_CRTC_H
-#define MTK_DRM_CRTC_H
-
-#include <drm/drm_crtc.h>
-#include "mtk_drm_ddp_comp.h"
-#include "mtk_drm_drv.h"
-#include "mtk_drm_plane.h"
-
-#define MTK_MAX_BPC 10
-#define MTK_MIN_BPC 3
-
-void mtk_drm_crtc_commit(struct drm_crtc *crtc);
-int mtk_drm_crtc_create(struct drm_device *drm_dev,
- const unsigned int *path,
- unsigned int path_len,
- int priv_data_index,
- const struct mtk_drm_route *conn_routes,
- unsigned int num_conn_routes);
-int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
- struct mtk_plane_state *state);
-void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
- struct drm_atomic_state *plane_state);
-struct device *mtk_drm_crtc_dma_dev_get(struct drm_crtc *crtc);
-
-#endif /* MTK_DRM_CRTC_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 74832c213092..b5f605751b0a 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -24,10 +24,10 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
-#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp_comp.h"
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_drm_drv.h"
-#include "mtk_drm_gem.h"
+#include "mtk_gem.h"
#define DRIVER_NAME "mediatek"
#define DRIVER_DESC "Mediatek SoC DRM"
@@ -494,24 +494,24 @@ static int mtk_drm_kms_init(struct drm_device *drm)
priv_n = private->all_drm_private[j];
if (i == CRTC_MAIN && priv_n->data->main_len) {
- ret = mtk_drm_crtc_create(drm, priv_n->data->main_path,
- priv_n->data->main_len, j,
- priv_n->data->conn_routes,
- priv_n->data->num_conn_routes);
+ ret = mtk_crtc_create(drm, priv_n->data->main_path,
+ priv_n->data->main_len, j,
+ priv_n->data->conn_routes,
+ priv_n->data->num_conn_routes);
if (ret)
goto err_component_unbind;
continue;
} else if (i == CRTC_EXT && priv_n->data->ext_len) {
- ret = mtk_drm_crtc_create(drm, priv_n->data->ext_path,
- priv_n->data->ext_len, j, NULL, 0);
+ ret = mtk_crtc_create(drm, priv_n->data->ext_path,
+ priv_n->data->ext_len, j, NULL, 0);
if (ret)
goto err_component_unbind;
continue;
} else if (i == CRTC_THIRD && priv_n->data->third_len) {
- ret = mtk_drm_crtc_create(drm, priv_n->data->third_path,
- priv_n->data->third_len, j, NULL, 0);
+ ret = mtk_crtc_create(drm, priv_n->data->third_path,
+ priv_n->data->third_len, j, NULL, 0);
if (ret)
goto err_component_unbind;
@@ -523,7 +523,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
/* Use OVL device for all DMA memory allocations */
crtc = drm_crtc_from_index(drm, 0);
if (crtc)
- dma_dev = mtk_drm_crtc_dma_dev_get(crtc);
+ dma_dev = mtk_crtc_dma_dev_get(crtc);
if (!dma_dev) {
ret = -ENODEV;
dev_err(drm->dev, "Need at least one OVL device\n");
@@ -576,8 +576,8 @@ DEFINE_DRM_GEM_FOPS(mtk_drm_fops);
* We need to override this because the device used to import the memory is
* not dev->dev, as drm_gem_prime_import() expects.
*/
-static struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
+static struct drm_gem_object *mtk_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
{
struct mtk_drm_private *private = dev->dev_private;
@@ -587,9 +587,9 @@ static struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev,
static const struct drm_driver mtk_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
- .dumb_create = mtk_drm_gem_dumb_create,
+ .dumb_create = mtk_gem_dumb_create,
- .gem_prime_import = mtk_drm_gem_prime_import,
+ .gem_prime_import = mtk_gem_prime_import,
.gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
.fops = &mtk_drm_fops,
@@ -709,6 +709,8 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
.data = (void *)MTK_DISP_GAMMA, },
{ .compatible = "mediatek,mt8183-disp-gamma",
.data = (void *)MTK_DISP_GAMMA, },
+ { .compatible = "mediatek,mt8195-disp-gamma",
+ .data = (void *)MTK_DISP_GAMMA, },
{ .compatible = "mediatek,mt8195-disp-merge",
.data = (void *)MTK_DISP_MERGE },
{ .compatible = "mediatek,mt2701-disp-mutex",
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index 33fadb08dc1c..78d698ede1bf 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -7,13 +7,13 @@
#define MTK_DRM_DRV_H
#include <linux/io.h>
-#include "mtk_drm_ddp_comp.h"
+#include "mtk_ddp_comp.h"
#define MAX_CONNECTOR 2
#define DDP_COMPONENT_DRM_OVL_ADAPTOR (DDP_COMPONENT_ID_MAX + 1)
#define DDP_COMPONENT_DRM_ID_MAX (DDP_COMPONENT_DRM_OVL_ADAPTOR + 1)
-enum mtk_drm_crtc_path {
+enum mtk_crtc_path {
CRTC_MAIN,
CRTC_EXT,
CRTC_THIRD,
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 9501f4019199..c255559cc56e 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -28,8 +28,8 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
+#include "mtk_ddp_comp.h"
#include "mtk_disp_drv.h"
-#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#define DSI_START 0x00
@@ -242,22 +242,23 @@ static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, HZ_PER_MHZ);
struct mtk_phy_timing *timing = &dsi->phy_timing;
- timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1;
- timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000;
- timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 -
+ timing->lpx = (80 * data_rate_mhz / (8 * 1000)) + 1;
+ timing->da_hs_prepare = (59 * data_rate_mhz + 4 * 1000) / 8000 + 1;
+ timing->da_hs_zero = (163 * data_rate_mhz + 11 * 1000) / 8000 + 1 -
timing->da_hs_prepare;
- timing->da_hs_trail = timing->da_hs_prepare + 1;
+ timing->da_hs_trail = (78 * data_rate_mhz + 7 * 1000) / 8000 + 1;
- timing->ta_go = 4 * timing->lpx - 2;
- timing->ta_sure = timing->lpx + 2;
- timing->ta_get = 4 * timing->lpx;
- timing->da_hs_exit = 2 * timing->lpx + 1;
+ timing->ta_go = 4 * timing->lpx;
+ timing->ta_sure = 3 * timing->lpx / 2;
+ timing->ta_get = 5 * timing->lpx;
+ timing->da_hs_exit = (118 * data_rate_mhz / (8 * 1000)) + 1;
- timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000);
- timing->clk_hs_post = timing->clk_hs_prepare + 8;
- timing->clk_hs_trail = timing->clk_hs_prepare;
- timing->clk_hs_zero = timing->clk_hs_trail * 4;
- timing->clk_hs_exit = 2 * timing->clk_hs_trail;
+ timing->clk_hs_prepare = (57 * data_rate_mhz / (8 * 1000)) + 1;
+ timing->clk_hs_post = (65 * data_rate_mhz + 53 * 1000) / 8000 + 1;
+ timing->clk_hs_trail = (78 * data_rate_mhz + 7 * 1000) / 8000 + 1;
+ timing->clk_hs_zero = (330 * data_rate_mhz / (8 * 1000)) + 1 -
+ timing->clk_hs_prepare;
+ timing->clk_hs_exit = (118 * data_rate_mhz / (8 * 1000)) + 1;
timcon0 = FIELD_PREP(LPX, timing->lpx) |
FIELD_PREP(HS_PREP, timing->da_hs_prepare) |
@@ -662,7 +663,7 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
/*
* mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
- * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
+ * mtk_dsi_stop() should be called after mtk_crtc_atomic_disable(),
* which needs irq for vblank, and mtk_dsi_stop() will disable irq.
* mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
* after dsi is fully set.
@@ -836,7 +837,7 @@ static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
return ret;
}
- dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev);
+ dsi->encoder.possible_crtcs = mtk_find_possible_crtcs(drm, dsi->host.dev);
ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.c b/drivers/gpu/drm/mediatek/mtk_ethdr.c
index 6a5d0c345aab..156c6ff547e8 100644
--- a/drivers/gpu/drm/mediatek/mtk_ethdr.c
+++ b/drivers/gpu/drm/mediatek/mtk_ethdr.c
@@ -14,8 +14,8 @@
#include <linux/soc/mediatek/mtk-cmdq.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
-#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp_comp.h"
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_drm_drv.h"
#include "mtk_ethdr.h"
@@ -363,7 +363,6 @@ struct platform_driver mtk_ethdr_driver = {
.remove_new = mtk_ethdr_remove,
.driver = {
.name = "mediatek-disp-ethdr",
- .owner = THIS_MODULE,
.of_match_table = mtk_ethdr_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_gem.c
index 4f2e3feabc0f..a172456d1d7b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_gem.c
@@ -4,6 +4,7 @@
*/
#include <linux/dma-buf.h>
+#include <linux/vmalloc.h>
#include <drm/drm.h>
#include <drm/drm_device.h>
@@ -12,37 +13,40 @@
#include <drm/drm_prime.h>
#include "mtk_drm_drv.h"
-#include "mtk_drm_gem.h"
+#include "mtk_gem.h"
-static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+static int mtk_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
static const struct vm_operations_struct vm_ops = {
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
-static const struct drm_gem_object_funcs mtk_drm_gem_object_funcs = {
- .free = mtk_drm_gem_free_object,
+static const struct drm_gem_object_funcs mtk_gem_object_funcs = {
+ .free = mtk_gem_free_object,
.get_sg_table = mtk_gem_prime_get_sg_table,
- .vmap = mtk_drm_gem_prime_vmap,
- .vunmap = mtk_drm_gem_prime_vunmap,
- .mmap = mtk_drm_gem_object_mmap,
+ .vmap = mtk_gem_prime_vmap,
+ .vunmap = mtk_gem_prime_vunmap,
+ .mmap = mtk_gem_object_mmap,
.vm_ops = &vm_ops,
};
-static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
- unsigned long size)
+static struct mtk_gem_obj *mtk_gem_init(struct drm_device *dev,
+ unsigned long size)
{
- struct mtk_drm_gem_obj *mtk_gem_obj;
+ struct mtk_gem_obj *mtk_gem_obj;
int ret;
size = round_up(size, PAGE_SIZE);
+ if (size == 0)
+ return ERR_PTR(-EINVAL);
+
mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL);
if (!mtk_gem_obj)
return ERR_PTR(-ENOMEM);
- mtk_gem_obj->base.funcs = &mtk_drm_gem_object_funcs;
+ mtk_gem_obj->base.funcs = &mtk_gem_object_funcs;
ret = drm_gem_object_init(dev, &mtk_gem_obj->base, size);
if (ret < 0) {
@@ -54,15 +58,15 @@ static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
return mtk_gem_obj;
}
-struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev,
- size_t size, bool alloc_kmap)
+struct mtk_gem_obj *mtk_gem_create(struct drm_device *dev,
+ size_t size, bool alloc_kmap)
{
struct mtk_drm_private *priv = dev->dev_private;
- struct mtk_drm_gem_obj *mtk_gem;
+ struct mtk_gem_obj *mtk_gem;
struct drm_gem_object *obj;
int ret;
- mtk_gem = mtk_drm_gem_init(dev, size);
+ mtk_gem = mtk_gem_init(dev, size);
if (IS_ERR(mtk_gem))
return ERR_CAST(mtk_gem);
@@ -97,9 +101,9 @@ err_gem_free:
return ERR_PTR(ret);
}
-void mtk_drm_gem_free_object(struct drm_gem_object *obj)
+void mtk_gem_free_object(struct drm_gem_object *obj)
{
- struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+ struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
struct mtk_drm_private *priv = obj->dev->dev_private;
if (mtk_gem->sg)
@@ -114,10 +118,10 @@ void mtk_drm_gem_free_object(struct drm_gem_object *obj)
kfree(mtk_gem);
}
-int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
- struct drm_mode_create_dumb *args)
+int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
{
- struct mtk_drm_gem_obj *mtk_gem;
+ struct mtk_gem_obj *mtk_gem;
int ret;
args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
@@ -130,7 +134,7 @@ int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
args->size = args->pitch;
args->size *= args->height;
- mtk_gem = mtk_drm_gem_create(dev, args->size, false);
+ mtk_gem = mtk_gem_create(dev, args->size, false);
if (IS_ERR(mtk_gem))
return PTR_ERR(mtk_gem);
@@ -148,16 +152,16 @@ int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
return 0;
err_handle_create:
- mtk_drm_gem_free_object(&mtk_gem->base);
+ mtk_gem_free_object(&mtk_gem->base);
return ret;
}
-static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
+static int mtk_gem_object_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
{
int ret;
- struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+ struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
struct mtk_drm_private *priv = obj->dev->dev_private;
/*
@@ -188,7 +192,7 @@ static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
*/
struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
- struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+ struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
struct mtk_drm_private *priv = obj->dev->dev_private;
struct sg_table *sgt;
int ret;
@@ -212,7 +216,7 @@ struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj)
struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg)
{
- struct mtk_drm_gem_obj *mtk_gem;
+ struct mtk_gem_obj *mtk_gem;
/* check if the entries in the sg_table are contiguous */
if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
@@ -220,7 +224,7 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
return ERR_PTR(-EINVAL);
}
- mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size);
+ mtk_gem = mtk_gem_init(dev, attach->dmabuf->size);
if (IS_ERR(mtk_gem))
return ERR_CAST(mtk_gem);
@@ -230,9 +234,9 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
return &mtk_gem->base;
}
-int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+int mtk_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{
- struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+ struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
struct sg_table *sgt = NULL;
unsigned int npages;
@@ -270,10 +274,9 @@ out:
return 0;
}
-void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj,
- struct iosys_map *map)
+void mtk_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
{
- struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+ struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
void *vaddr = map->vaddr;
if (!mtk_gem->pages)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.h b/drivers/gpu/drm/mediatek/mtk_gem.h
index 78f23b07a02e..66e5f154f698 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.h
+++ b/drivers/gpu/drm/mediatek/mtk_gem.h
@@ -3,8 +3,8 @@
* Copyright (c) 2015 MediaTek Inc.
*/
-#ifndef _MTK_DRM_GEM_H_
-#define _MTK_DRM_GEM_H_
+#ifndef _MTK_GEM_H_
+#define _MTK_GEM_H_
#include <drm/drm_gem.h>
@@ -22,7 +22,7 @@
* P.S. this object would be transferred to user as kms_bo.handle so
* user can access the buffer through kms_bo.handle.
*/
-struct mtk_drm_gem_obj {
+struct mtk_gem_obj {
struct drm_gem_object base;
void *cookie;
void *kvaddr;
@@ -32,18 +32,17 @@ struct mtk_drm_gem_obj {
struct page **pages;
};
-#define to_mtk_gem_obj(x) container_of(x, struct mtk_drm_gem_obj, base)
+#define to_mtk_gem_obj(x) container_of(x, struct mtk_gem_obj, base)
-void mtk_drm_gem_free_object(struct drm_gem_object *gem);
-struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev, size_t size,
- bool alloc_kmap);
-int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
- struct drm_mode_create_dumb *args);
+void mtk_gem_free_object(struct drm_gem_object *gem);
+struct mtk_gem_obj *mtk_gem_create(struct drm_device *dev, size_t size,
+ bool alloc_kmap);
+int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
-int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
-void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj,
- struct iosys_map *map);
+int mtk_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
+void mtk_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index c6bdc565e4a9..6e1cca97a654 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1695,7 +1695,7 @@ static int mtk_hdmi_register_audio_driver(struct device *dev)
return 0;
}
-static int mtk_drm_hdmi_probe(struct platform_device *pdev)
+static int mtk_hdmi_probe(struct platform_device *pdev)
{
struct mtk_hdmi *hdmi;
struct device *dev = &pdev->dev;
@@ -1754,7 +1754,7 @@ err_bridge_remove:
return ret;
}
-static void mtk_drm_hdmi_remove(struct platform_device *pdev)
+static void mtk_hdmi_remove(struct platform_device *pdev)
{
struct mtk_hdmi *hdmi = platform_get_drvdata(pdev);
@@ -1798,7 +1798,7 @@ static const struct mtk_hdmi_conf mtk_hdmi_conf_mt8167 = {
.cea_modes_only = true,
};
-static const struct of_device_id mtk_drm_hdmi_of_ids[] = {
+static const struct of_device_id mtk_hdmi_of_ids[] = {
{ .compatible = "mediatek,mt2701-hdmi",
.data = &mtk_hdmi_conf_mt2701,
},
@@ -1809,14 +1809,14 @@ static const struct of_device_id mtk_drm_hdmi_of_ids[] = {
},
{}
};
-MODULE_DEVICE_TABLE(of, mtk_drm_hdmi_of_ids);
+MODULE_DEVICE_TABLE(of, mtk_hdmi_of_ids);
static struct platform_driver mtk_hdmi_driver = {
- .probe = mtk_drm_hdmi_probe,
- .remove_new = mtk_drm_hdmi_remove,
+ .probe = mtk_hdmi_probe,
+ .remove_new = mtk_hdmi_remove,
.driver = {
.name = "mediatek-drm-hdmi",
- .of_match_table = mtk_drm_hdmi_of_ids,
+ .of_match_table = mtk_hdmi_of_ids,
.pm = &mtk_hdmi_pm_ops,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
index 54e46e440e0f..52d55861f954 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
@@ -284,8 +284,7 @@ static int mtk_hdmi_ddc_probe(struct platform_device *pdev)
return PTR_ERR(ddc->clk);
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ddc->regs = devm_ioremap_resource(&pdev->dev, mem);
+ ddc->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(ddc->regs))
return PTR_ERR(ddc->regs);
diff --git a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
index ee9ce9b6d078..925cbb7471ec 100644
--- a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
@@ -346,7 +346,6 @@ struct platform_driver mtk_mdp_rdma_driver = {
.remove_new = mtk_mdp_rdma_remove,
.driver = {
.name = "mediatek-mdp-rdma",
- .owner = THIS_MODULE,
.of_match_table = mtk_mdp_rdma_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_padding.c b/drivers/gpu/drm/mediatek/mtk_padding.c
index 0d6451c149b6..85bc6768b6bc 100644
--- a/drivers/gpu/drm/mediatek/mtk_padding.c
+++ b/drivers/gpu/drm/mediatek/mtk_padding.c
@@ -11,9 +11,9 @@
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_disp_drv.h"
-#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp_comp.h"
#define PADDING_CONTROL_REG 0x00
#define PADDING_BYPASS BIT(0)
@@ -154,7 +154,6 @@ struct platform_driver mtk_padding_driver = {
.remove = mtk_padding_remove,
.driver = {
.name = "mediatek-disp-padding",
- .owner = THIS_MODULE,
.of_match_table = mtk_padding_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c
index ddc9355b06d5..4625deb21d40 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_plane.c
@@ -13,11 +13,11 @@
#include <drm/drm_gem_atomic_helper.h>
#include <linux/align.h>
-#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp_comp.h"
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_drm_drv.h"
-#include "mtk_drm_gem.h"
-#include "mtk_drm_plane.h"
+#include "mtk_gem.h"
+#include "mtk_plane.h"
static const u64 modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
@@ -93,8 +93,8 @@ static bool mtk_plane_format_mod_supported(struct drm_plane *plane,
return true;
}
-static void mtk_drm_plane_destroy_state(struct drm_plane *plane,
- struct drm_plane_state *state)
+static void mtk_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
__drm_atomic_helper_plane_destroy_state(state);
kfree(to_mtk_plane_state(state));
@@ -117,8 +117,8 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane,
if (!plane->state->fb)
return -EINVAL;
- ret = mtk_drm_crtc_plane_check(new_plane_state->crtc, plane,
- to_mtk_plane_state(new_plane_state));
+ ret = mtk_crtc_plane_check(new_plane_state->crtc, plane,
+ to_mtk_plane_state(new_plane_state));
if (ret)
return ret;
@@ -135,7 +135,7 @@ static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
{
struct drm_framebuffer *fb = new_state->fb;
struct drm_gem_object *gem;
- struct mtk_drm_gem_obj *mtk_gem;
+ struct mtk_gem_obj *mtk_gem;
unsigned int pitch, format;
u64 modifier;
dma_addr_t addr;
@@ -232,7 +232,7 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane,
swap(plane->state->fb, new_state->fb);
wmb(); /* Make sure the above parameters are set before update */
new_plane_state->pending.async_dirty = true;
- mtk_drm_crtc_async_update(new_state->crtc, plane, state);
+ mtk_crtc_async_update(new_state->crtc, plane, state);
}
static const struct drm_plane_funcs mtk_plane_funcs = {
@@ -241,7 +241,7 @@ static const struct drm_plane_funcs mtk_plane_funcs = {
.destroy = drm_plane_cleanup,
.reset = mtk_plane_reset,
.atomic_duplicate_state = mtk_plane_duplicate_state,
- .atomic_destroy_state = mtk_drm_plane_destroy_state,
+ .atomic_destroy_state = mtk_plane_destroy_state,
.format_mod_supported = mtk_plane_format_mod_supported,
};
@@ -260,8 +260,8 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
if (WARN_ON(!new_plane_state->crtc))
return 0;
- ret = mtk_drm_crtc_plane_check(new_plane_state->crtc, plane,
- to_mtk_plane_state(new_plane_state));
+ ret = mtk_crtc_plane_check(new_plane_state->crtc, plane,
+ to_mtk_plane_state(new_plane_state));
if (ret)
return ret;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.h b/drivers/gpu/drm/mediatek/mtk_plane.h
index 99aff7da0831..231bb7aac947 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.h
+++ b/drivers/gpu/drm/mediatek/mtk_plane.h
@@ -4,8 +4,8 @@
* Author: CK Hu <ck.hu@mediatek.com>
*/
-#ifndef _MTK_DRM_PLANE_H_
-#define _MTK_DRM_PLANE_H_
+#ifndef _MTK_PLANE_H_
+#define _MTK_PLANE_H_
#include <drm/drm_crtc.h>
#include <linux/types.h>
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 5a9538bc0e26..5565f7777529 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -106,6 +106,8 @@
#define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 */
#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 */
#define HHI_HDMI_PHY_CNTL1 0x3a4 /* 0xe9 */
+#define PHY_CNTL1_INIT 0x03900000
+#define PHY_INVERT BIT(17)
#define HHI_HDMI_PHY_CNTL2 0x3a8 /* 0xea */
#define HHI_HDMI_PHY_CNTL3 0x3ac /* 0xeb */
#define HHI_HDMI_PHY_CNTL4 0x3b0 /* 0xec */
@@ -130,6 +132,8 @@ struct meson_dw_hdmi_data {
unsigned int addr);
void (*dwc_write)(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr, unsigned int data);
+ u32 cntl0_init;
+ u32 cntl1_init;
};
struct meson_dw_hdmi {
@@ -384,26 +388,6 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
dw_hdmi_bus_fmt_is_420(hdmi))
mode_is_420 = true;
- /* Enable clocks */
- regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100);
-
- /* Bring HDMITX MEM output of power down */
- regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0);
-
- /* Bring out of reset */
- dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_SW_RESET, 0);
-
- /* Enable internal pixclk, tmds_clk, spdif_clk, i2s_clk, cecclk */
- dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL,
- 0x3, 0x3);
-
- /* Enable cec_clk and hdcp22_tmdsclk_en */
- dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL,
- 0x3 << 4, 0x3 << 4);
-
- /* Enable normal output to PHY */
- dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
-
/* TMDS pattern setup */
if (mode->clock > 340000 && !mode_is_420) {
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01,
@@ -425,20 +409,6 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
/* Setup PHY parameters */
meson_hdmi_phy_setup_mode(dw_hdmi, mode, mode_is_420);
- /* Setup PHY */
- regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
- 0xffff << 16, 0x0390 << 16);
-
- /* BIT_INVERT */
- if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
- dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi") ||
- dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-g12a-dw-hdmi"))
- regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
- BIT(17), 0);
- else
- regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
- BIT(17), BIT(17));
-
/* Disable clock, fifo, fifo_wr */
regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, 0xf, 0);
@@ -492,7 +462,9 @@ static void dw_hdmi_phy_disable(struct dw_hdmi *hdmi,
DRM_DEBUG_DRIVER("\n");
- regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0);
+ /* Fallback to init mode */
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL1, dw_hdmi->data->cntl1_init);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, dw_hdmi->data->cntl0_init);
}
static enum drm_connector_status dw_hdmi_read_hpd(struct dw_hdmi *hdmi,
@@ -610,11 +582,22 @@ static const struct regmap_config meson_dw_hdmi_regmap_config = {
.fast_io = true,
};
-static const struct meson_dw_hdmi_data meson_dw_hdmi_gx_data = {
+static const struct meson_dw_hdmi_data meson_dw_hdmi_gxbb_data = {
.top_read = dw_hdmi_top_read,
.top_write = dw_hdmi_top_write,
.dwc_read = dw_hdmi_dwc_read,
.dwc_write = dw_hdmi_dwc_write,
+ .cntl0_init = 0x0,
+ .cntl1_init = PHY_CNTL1_INIT | PHY_INVERT,
+};
+
+static const struct meson_dw_hdmi_data meson_dw_hdmi_gxl_data = {
+ .top_read = dw_hdmi_top_read,
+ .top_write = dw_hdmi_top_write,
+ .dwc_read = dw_hdmi_dwc_read,
+ .dwc_write = dw_hdmi_dwc_write,
+ .cntl0_init = 0x0,
+ .cntl1_init = PHY_CNTL1_INIT,
};
static const struct meson_dw_hdmi_data meson_dw_hdmi_g12a_data = {
@@ -622,6 +605,8 @@ static const struct meson_dw_hdmi_data meson_dw_hdmi_g12a_data = {
.top_write = dw_hdmi_g12a_top_write,
.dwc_read = dw_hdmi_g12a_dwc_read,
.dwc_write = dw_hdmi_g12a_dwc_write,
+ .cntl0_init = 0x000b4242, /* Bandgap */
+ .cntl1_init = PHY_CNTL1_INIT,
};
static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi)
@@ -656,6 +641,13 @@ static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi)
meson_dw_hdmi->data->top_write(meson_dw_hdmi,
HDMITX_TOP_CLK_CNTL, 0xff);
+ /* Enable normal output to PHY */
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
+
+ /* Setup PHY */
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL1, meson_dw_hdmi->data->cntl1_init);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, meson_dw_hdmi->data->cntl0_init);
+
/* Enable HDMI-TX Interrupt */
meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
HDMITX_TOP_INTR_CORE);
@@ -865,11 +857,11 @@ static const struct dev_pm_ops meson_dw_hdmi_pm_ops = {
static const struct of_device_id meson_dw_hdmi_of_table[] = {
{ .compatible = "amlogic,meson-gxbb-dw-hdmi",
- .data = &meson_dw_hdmi_gx_data },
+ .data = &meson_dw_hdmi_gxbb_data },
{ .compatible = "amlogic,meson-gxl-dw-hdmi",
- .data = &meson_dw_hdmi_gx_data },
+ .data = &meson_dw_hdmi_gxl_data },
{ .compatible = "amlogic,meson-gxm-dw-hdmi",
- .data = &meson_dw_hdmi_gx_data },
+ .data = &meson_dw_hdmi_gxl_data },
{ .compatible = "amlogic,meson-g12a-dw-hdmi",
.data = &meson_dw_hdmi_g12a_data },
{ }
diff --git a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
index a6bc1bdb3d0d..a10cff3ca1fe 100644
--- a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
+++ b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
@@ -95,6 +95,7 @@ static int dw_mipi_dsi_phy_init(void *priv_data)
return ret;
}
+ clk_disable_unprepare(mipi_dsi->px_clk);
ret = clk_set_rate(mipi_dsi->px_clk, mipi_dsi->mode->clock * 1000);
if (ret) {
@@ -103,6 +104,12 @@ static int dw_mipi_dsi_phy_init(void *priv_data)
return ret;
}
+ ret = clk_prepare_enable(mipi_dsi->px_clk);
+ if (ret) {
+ dev_err(mipi_dsi->dev, "Failed to enable DSI Pixel clock (ret %d)\n", ret);
+ return ret;
+ }
+
switch (mipi_dsi->dsi_device->format) {
case MIPI_DSI_FMT_RGB888:
dpi_data_format = DPI_COLOR_24BIT;
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index 2a82119eb58e..2a942dc6a6dc 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -790,13 +790,13 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
FREQ_1000_1001(params[i].pixel_freq));
DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n",
i, params[i].phy_freq,
- FREQ_1000_1001(params[i].phy_freq/10)*10);
+ FREQ_1000_1001(params[i].phy_freq/1000)*1000);
/* Match strict frequency */
if (phy_freq == params[i].phy_freq &&
vclk_freq == params[i].vclk_freq)
return MODE_OK;
/* Match 1000/1001 variant */
- if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/10)*10) &&
+ if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/1000)*1000) &&
vclk_freq == FREQ_1000_1001(params[i].vclk_freq))
return MODE_OK;
}
@@ -1070,7 +1070,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
if ((phy_freq == params[freq].phy_freq ||
- phy_freq == FREQ_1000_1001(params[freq].phy_freq/10)*10) &&
+ phy_freq == FREQ_1000_1001(params[freq].phy_freq/1000)*1000) &&
(vclk_freq == params[freq].vclk_freq ||
vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) {
if (vclk_freq != params[freq].vclk_freq)
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 765e49fd8911..58a0e62eaf18 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -366,6 +366,7 @@ struct drm_crtc_state;
struct drm_display_mode;
struct drm_plane;
struct drm_atomic_state;
+struct drm_scanout_buffer;
extern const uint32_t mgag200_primary_plane_formats[];
extern const size_t mgag200_primary_plane_formats_size;
@@ -379,12 +380,16 @@ void mgag200_primary_plane_helper_atomic_enable(struct drm_plane *plane,
struct drm_atomic_state *state);
void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *old_state);
+int mgag200_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb);
+
#define MGAG200_PRIMARY_PLANE_HELPER_FUNCS \
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, \
.atomic_check = mgag200_primary_plane_helper_atomic_check, \
.atomic_update = mgag200_primary_plane_helper_atomic_update, \
.atomic_enable = mgag200_primary_plane_helper_atomic_enable, \
- .atomic_disable = mgag200_primary_plane_helper_atomic_disable
+ .atomic_disable = mgag200_primary_plane_helper_atomic_disable, \
+ .get_scanout_buffer = mgag200_primary_plane_helper_get_scanout_buffer
#define MGAG200_PRIMARY_PLANE_FUNCS \
.update_plane = drm_atomic_helper_update_plane, \
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index e17cb4c5f774..fc54851d3384 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -21,6 +21,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_panic.h>
#include <drm/drm_print.h>
#include "mgag200_drv.h"
@@ -546,6 +547,23 @@ void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane,
msleep(20);
}
+int mgag200_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct mga_device *mdev = to_mga_device(plane->dev);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR_IOMEM(mdev->vram);
+
+ if (plane->state && plane->state->fb) {
+ sb->format = plane->state->fb->format;
+ sb->width = plane->state->fb->width;
+ sb->height = plane->state->fb->height;
+ sb->pitch[0] = plane->state->fb->pitches[0];
+ sb->map[0] = map;
+ return 0;
+ }
+ return -ENODEV;
+}
+
/*
* CRTC
*/
diff --git a/drivers/gpu/drm/msm/.gitignore b/drivers/gpu/drm/msm/.gitignore
new file mode 100644
index 000000000000..9ab870da897d
--- /dev/null
+++ b/drivers/gpu/drm/msm/.gitignore
@@ -0,0 +1 @@
+generated/
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index f202f26adab2..1931ecf73e32 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -14,7 +14,7 @@ config DRM_MSM
select IOMMU_IO_PGTABLE
select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
- select DRM_DP_AUX_BUS
+ select DRM_DISPLAY_DP_AUX_BUS
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
select DRM_EXEC
@@ -54,6 +54,14 @@ config DRM_MSM_GPU_SUDO
Only use this if you are a driver developer. This should *not*
be enabled for production kernels. If unsure, say N.
+config DRM_MSM_VALIDATE_XML
+ bool "Validate XML register files against schema"
+ depends on DRM_MSM && EXPERT
+ depends on $(success,$(PYTHON3) -c "import lxml")
+ help
+ Validate XML files with register definitions against rules-fd schema.
+ This option is mostly targeting DRM MSM developers. If unsure, say N.
+
config DRM_MSM_MDSS
bool
depends on DRM_MSM
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index b21ae2880c71..eb788921ff4f 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,13 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
-ccflags-y := -I $(srctree)/$(src)
-ccflags-y += -I $(srctree)/$(src)/disp/dpu1
-ccflags-$(CONFIG_DRM_MSM_DSI) += -I $(srctree)/$(src)/dsi
-ccflags-$(CONFIG_DRM_MSM_DP) += -I $(srctree)/$(src)/dp
+ccflags-y := -I $(src)
+ccflags-y += -I $(obj)/generated
+ccflags-y += -I $(src)/disp/dpu1
+ccflags-$(CONFIG_DRM_MSM_DSI) += -I $(src)/dsi
+ccflags-$(CONFIG_DRM_MSM_DP) += -I $(src)/dp
-msm-y := \
+adreno-y := \
adreno/adreno_device.o \
adreno/adreno_gpu.o \
adreno/a2xx_gpu.o \
+ adreno/a2xx_gpummu.o \
adreno/a3xx_gpu.o \
adreno/a4xx_gpu.o \
adreno/a5xx_gpu.o \
@@ -17,7 +19,11 @@ msm-y := \
adreno/a6xx_gmu.o \
adreno/a6xx_hfi.o \
-msm-$(CONFIG_DRM_MSM_HDMI) += \
+adreno-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
+
+adreno-$(CONFIG_DRM_MSM_GPU_STATE) += adreno/a6xx_gpu_state.o
+
+msm-display-$(CONFIG_DRM_MSM_HDMI) += \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
@@ -30,7 +36,7 @@ msm-$(CONFIG_DRM_MSM_HDMI) += \
hdmi/hdmi_phy_8x74.o \
hdmi/hdmi_pll_8960.o \
-msm-$(CONFIG_DRM_MSM_MDP4) += \
+msm-display-$(CONFIG_DRM_MSM_MDP4) += \
disp/mdp4/mdp4_crtc.o \
disp/mdp4/mdp4_dsi_encoder.o \
disp/mdp4/mdp4_dtv_encoder.o \
@@ -41,7 +47,7 @@ msm-$(CONFIG_DRM_MSM_MDP4) += \
disp/mdp4/mdp4_kms.o \
disp/mdp4/mdp4_plane.o \
-msm-$(CONFIG_DRM_MSM_MDP5) += \
+msm-display-$(CONFIG_DRM_MSM_MDP5) += \
disp/mdp5/mdp5_cfg.o \
disp/mdp5/mdp5_cmd_encoder.o \
disp/mdp5/mdp5_ctl.o \
@@ -54,7 +60,7 @@ msm-$(CONFIG_DRM_MSM_MDP5) += \
disp/mdp5/mdp5_plane.o \
disp/mdp5/mdp5_smp.o \
-msm-$(CONFIG_DRM_MSM_DPU) += \
+msm-display-$(CONFIG_DRM_MSM_DPU) += \
disp/dpu1/dpu_core_perf.o \
disp/dpu1/dpu_crtc.o \
disp/dpu1/dpu_encoder.o \
@@ -84,14 +90,16 @@ msm-$(CONFIG_DRM_MSM_DPU) += \
disp/dpu1/dpu_vbif.o \
disp/dpu1/dpu_writeback.o
-msm-$(CONFIG_DRM_MSM_MDSS) += \
+msm-display-$(CONFIG_DRM_MSM_MDSS) += \
msm_mdss.o \
-msm-y += \
+msm-display-y += \
disp/mdp_format.o \
disp/mdp_kms.o \
disp/msm_disp_snapshot.o \
disp/msm_disp_snapshot_util.o \
+
+msm-y += \
msm_atomic.o \
msm_atomic_tracepoints.o \
msm_debugfs.o \
@@ -113,14 +121,13 @@ msm-y += \
msm_ringbuffer.o \
msm_submitqueue.o \
msm_gpu_tracepoints.o \
- msm_gpummu.o
-msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
- dp/dp_debug.o
+msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
-msm-$(CONFIG_DRM_MSM_GPU_STATE) += adreno/a6xx_gpu_state.o
+msm-display-$(CONFIG_DEBUG_FS) += \
+ dp/dp_debug.o
-msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
+msm-display-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
dp/dp_catalog.o \
dp/dp_ctrl.o \
dp/dp_display.o \
@@ -130,21 +137,76 @@ msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
dp/dp_audio.o \
dp/dp_utils.o
-msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
-
-msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
+msm-display-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
-msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
+msm-display-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
dsi/dsi_cfg.o \
dsi/dsi_host.o \
dsi/dsi_manager.o \
dsi/phy/dsi_phy.o
-msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
-msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
-msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o
-msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o
-msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o
-msm-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/phy/dsi_phy_7nm.o
+msm-display-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
+msm-display-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
+msm-display-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o
+msm-display-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o
+msm-display-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o
+msm-display-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/phy/dsi_phy_7nm.o
+
+msm-y += $(adreno-y) $(msm-display-y)
obj-$(CONFIG_DRM_MSM) += msm.o
+
+ifeq (y,$(CONFIG_DRM_MSM_VALIDATE_XML))
+ headergen-opts += --validate
+else
+ headergen-opts += --no-validate
+endif
+
+quiet_cmd_headergen = GENHDR $@
+ cmd_headergen = mkdir -p $(obj)/generated && $(PYTHON3) $(src)/registers/gen_header.py \
+ $(headergen-opts) --rnn $(src)/registers --xml $< c-defines > $@
+
+$(obj)/generated/%.xml.h: $(src)/registers/adreno/%.xml \
+ $(src)/registers/adreno/adreno_common.xml \
+ $(src)/registers/adreno/adreno_pm4.xml \
+ $(src)/registers/freedreno_copyright.xml \
+ $(src)/registers/gen_header.py \
+ $(src)/registers/rules-fd.xsd \
+ FORCE
+ $(call if_changed,headergen)
+
+$(obj)/generated/%.xml.h: $(src)/registers/display/%.xml \
+ $(src)/registers/freedreno_copyright.xml \
+ $(src)/registers/gen_header.py \
+ $(src)/registers/rules-fd.xsd \
+ FORCE
+ $(call if_changed,headergen)
+
+ADRENO_HEADERS = \
+ generated/a2xx.xml.h \
+ generated/a3xx.xml.h \
+ generated/a4xx.xml.h \
+ generated/a5xx.xml.h \
+ generated/a6xx.xml.h \
+ generated/a6xx_gmu.xml.h \
+ generated/adreno_common.xml.h \
+ generated/adreno_pm4.xml.h \
+
+DISPLAY_HEADERS = \
+ generated/dsi_phy_7nm.xml.h \
+ generated/dsi_phy_10nm.xml.h \
+ generated/dsi_phy_14nm.xml.h \
+ generated/dsi_phy_20nm.xml.h \
+ generated/dsi_phy_28nm_8960.xml.h \
+ generated/dsi_phy_28nm.xml.h \
+ generated/dsi.xml.h \
+ generated/hdmi.xml.h \
+ generated/mdp4.xml.h \
+ generated/mdp5.xml.h \
+ generated/mdp_common.xml.h \
+ generated/sfpb.xml.h
+
+$(addprefix $(obj)/,$(adreno-y)): $(addprefix $(obj)/,$(ADRENO_HEADERS))
+$(addprefix $(obj)/,$(msm-display-y)): $(addprefix $(obj)/,$(DISPLAY_HEADERS))
+
+targets += $(ADRENO_HEADERS) $(DISPLAY_HEADERS)
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
deleted file mode 100644
index 23141cbcea97..000000000000
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ /dev/null
@@ -1,3251 +0,0 @@
-#ifndef A2XX_XML
-#define A2XX_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
-http://gitlab.freedesktop.org/mesa/mesa/
-git clone https://gitlab.freedesktop.org/mesa/mesa.git
-
-The rules-ng-ng source files this header was generated from are:
-
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from Fri Jun 2 14:59:26 2023)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from Fri Jun 2 14:59:26 2023)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 85691 bytes, from Fri Feb 16 09:49:01 2024)
-
-Copyright (C) 2013-2024 by the following authors:
-- Rob Clark <robdclark@gmail.com> Rob Clark
-- Ilia Mirkin <imirkin@alum.mit.edu> Ilia Mirkin
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-*/
-
-#ifdef __KERNEL__
-#include <linux/bug.h>
-#define assert(x) BUG_ON(!(x))
-#else
-#include <assert.h>
-#endif
-
-#ifdef __cplusplus
-#define __struct_cast(X)
-#else
-#define __struct_cast(X) (struct X)
-#endif
-
-enum a2xx_rb_dither_type {
- DITHER_PIXEL = 0,
- DITHER_SUBPIXEL = 1,
-};
-
-enum a2xx_colorformatx {
- COLORX_4_4_4_4 = 0,
- COLORX_1_5_5_5 = 1,
- COLORX_5_6_5 = 2,
- COLORX_8 = 3,
- COLORX_8_8 = 4,
- COLORX_8_8_8_8 = 5,
- COLORX_S8_8_8_8 = 6,
- COLORX_16_FLOAT = 7,
- COLORX_16_16_FLOAT = 8,
- COLORX_16_16_16_16_FLOAT = 9,
- COLORX_32_FLOAT = 10,
- COLORX_32_32_FLOAT = 11,
- COLORX_32_32_32_32_FLOAT = 12,
- COLORX_2_3_3 = 13,
- COLORX_8_8_8 = 14,
-};
-
-enum a2xx_sq_surfaceformat {
- FMT_1_REVERSE = 0,
- FMT_1 = 1,
- FMT_8 = 2,
- FMT_1_5_5_5 = 3,
- FMT_5_6_5 = 4,
- FMT_6_5_5 = 5,
- FMT_8_8_8_8 = 6,
- FMT_2_10_10_10 = 7,
- FMT_8_A = 8,
- FMT_8_B = 9,
- FMT_8_8 = 10,
- FMT_Cr_Y1_Cb_Y0 = 11,
- FMT_Y1_Cr_Y0_Cb = 12,
- FMT_5_5_5_1 = 13,
- FMT_8_8_8_8_A = 14,
- FMT_4_4_4_4 = 15,
- FMT_8_8_8 = 16,
- FMT_DXT1 = 18,
- FMT_DXT2_3 = 19,
- FMT_DXT4_5 = 20,
- FMT_10_10_10_2 = 21,
- FMT_24_8 = 22,
- FMT_16 = 24,
- FMT_16_16 = 25,
- FMT_16_16_16_16 = 26,
- FMT_16_EXPAND = 27,
- FMT_16_16_EXPAND = 28,
- FMT_16_16_16_16_EXPAND = 29,
- FMT_16_FLOAT = 30,
- FMT_16_16_FLOAT = 31,
- FMT_16_16_16_16_FLOAT = 32,
- FMT_32 = 33,
- FMT_32_32 = 34,
- FMT_32_32_32_32 = 35,
- FMT_32_FLOAT = 36,
- FMT_32_32_FLOAT = 37,
- FMT_32_32_32_32_FLOAT = 38,
- FMT_ATI_TC_RGB = 39,
- FMT_ATI_TC_RGBA = 40,
- FMT_ATI_TC_555_565_RGB = 41,
- FMT_ATI_TC_555_565_RGBA = 42,
- FMT_ATI_TC_RGBA_INTERP = 43,
- FMT_ATI_TC_555_565_RGBA_INTERP = 44,
- FMT_ETC1_RGBA_INTERP = 46,
- FMT_ETC1_RGB = 47,
- FMT_ETC1_RGBA = 48,
- FMT_DXN = 49,
- FMT_2_3_3 = 51,
- FMT_2_10_10_10_AS_16_16_16_16 = 54,
- FMT_10_10_10_2_AS_16_16_16_16 = 55,
- FMT_32_32_32_FLOAT = 57,
- FMT_DXT3A = 58,
- FMT_DXT5A = 59,
- FMT_CTX1 = 60,
-};
-
-enum a2xx_sq_ps_vtx_mode {
- POSITION_1_VECTOR = 0,
- POSITION_2_VECTORS_UNUSED = 1,
- POSITION_2_VECTORS_SPRITE = 2,
- POSITION_2_VECTORS_EDGE = 3,
- POSITION_2_VECTORS_KILL = 4,
- POSITION_2_VECTORS_SPRITE_KILL = 5,
- POSITION_2_VECTORS_EDGE_KILL = 6,
- MULTIPASS = 7,
-};
-
-enum a2xx_sq_sample_cntl {
- CENTROIDS_ONLY = 0,
- CENTERS_ONLY = 1,
- CENTROIDS_AND_CENTERS = 2,
-};
-
-enum a2xx_dx_clip_space {
- DXCLIP_OPENGL = 0,
- DXCLIP_DIRECTX = 1,
-};
-
-enum a2xx_pa_su_sc_polymode {
- POLY_DISABLED = 0,
- POLY_DUALMODE = 1,
-};
-
-enum a2xx_rb_edram_mode {
- EDRAM_NOP = 0,
- COLOR_DEPTH = 4,
- DEPTH_ONLY = 5,
- EDRAM_COPY = 6,
-};
-
-enum a2xx_pa_sc_pattern_bit_order {
- LITTLE = 0,
- BIG = 1,
-};
-
-enum a2xx_pa_sc_auto_reset_cntl {
- NEVER = 0,
- EACH_PRIMITIVE = 1,
- EACH_PACKET = 2,
-};
-
-enum a2xx_pa_pixcenter {
- PIXCENTER_D3D = 0,
- PIXCENTER_OGL = 1,
-};
-
-enum a2xx_pa_roundmode {
- TRUNCATE = 0,
- ROUND = 1,
- ROUNDTOEVEN = 2,
- ROUNDTOODD = 3,
-};
-
-enum a2xx_pa_quantmode {
- ONE_SIXTEENTH = 0,
- ONE_EIGTH = 1,
- ONE_QUARTER = 2,
- ONE_HALF = 3,
- ONE = 4,
-};
-
-enum a2xx_rb_copy_sample_select {
- SAMPLE_0 = 0,
- SAMPLE_1 = 1,
- SAMPLE_2 = 2,
- SAMPLE_3 = 3,
- SAMPLE_01 = 4,
- SAMPLE_23 = 5,
- SAMPLE_0123 = 6,
-};
-
-enum a2xx_rb_blend_opcode {
- BLEND2_DST_PLUS_SRC = 0,
- BLEND2_SRC_MINUS_DST = 1,
- BLEND2_MIN_DST_SRC = 2,
- BLEND2_MAX_DST_SRC = 3,
- BLEND2_DST_MINUS_SRC = 4,
- BLEND2_DST_PLUS_SRC_BIAS = 5,
-};
-
-enum a2xx_su_perfcnt_select {
- PERF_PAPC_PASX_REQ = 0,
- PERF_PAPC_PASX_FIRST_VECTOR = 2,
- PERF_PAPC_PASX_SECOND_VECTOR = 3,
- PERF_PAPC_PASX_FIRST_DEAD = 4,
- PERF_PAPC_PASX_SECOND_DEAD = 5,
- PERF_PAPC_PASX_VTX_KILL_DISCARD = 6,
- PERF_PAPC_PASX_VTX_NAN_DISCARD = 7,
- PERF_PAPC_PA_INPUT_PRIM = 8,
- PERF_PAPC_PA_INPUT_NULL_PRIM = 9,
- PERF_PAPC_PA_INPUT_EVENT_FLAG = 10,
- PERF_PAPC_PA_INPUT_FIRST_PRIM_SLOT = 11,
- PERF_PAPC_PA_INPUT_END_OF_PACKET = 12,
- PERF_PAPC_CLPR_CULL_PRIM = 13,
- PERF_PAPC_CLPR_VV_CULL_PRIM = 15,
- PERF_PAPC_CLPR_VTX_KILL_CULL_PRIM = 17,
- PERF_PAPC_CLPR_VTX_NAN_CULL_PRIM = 18,
- PERF_PAPC_CLPR_CULL_TO_NULL_PRIM = 19,
- PERF_PAPC_CLPR_VV_CLIP_PRIM = 21,
- PERF_PAPC_CLPR_POINT_CLIP_CANDIDATE = 23,
- PERF_PAPC_CLPR_CLIP_PLANE_CNT_1 = 24,
- PERF_PAPC_CLPR_CLIP_PLANE_CNT_2 = 25,
- PERF_PAPC_CLPR_CLIP_PLANE_CNT_3 = 26,
- PERF_PAPC_CLPR_CLIP_PLANE_CNT_4 = 27,
- PERF_PAPC_CLPR_CLIP_PLANE_CNT_5 = 28,
- PERF_PAPC_CLPR_CLIP_PLANE_CNT_6 = 29,
- PERF_PAPC_CLPR_CLIP_PLANE_NEAR = 30,
- PERF_PAPC_CLPR_CLIP_PLANE_FAR = 31,
- PERF_PAPC_CLPR_CLIP_PLANE_LEFT = 32,
- PERF_PAPC_CLPR_CLIP_PLANE_RIGHT = 33,
- PERF_PAPC_CLPR_CLIP_PLANE_TOP = 34,
- PERF_PAPC_CLPR_CLIP_PLANE_BOTTOM = 35,
- PERF_PAPC_CLSM_NULL_PRIM = 36,
- PERF_PAPC_CLSM_TOTALLY_VISIBLE_PRIM = 37,
- PERF_PAPC_CLSM_CLIP_PRIM = 38,
- PERF_PAPC_CLSM_CULL_TO_NULL_PRIM = 39,
- PERF_PAPC_CLSM_OUT_PRIM_CNT_1 = 40,
- PERF_PAPC_CLSM_OUT_PRIM_CNT_2 = 41,
- PERF_PAPC_CLSM_OUT_PRIM_CNT_3 = 42,
- PERF_PAPC_CLSM_OUT_PRIM_CNT_4 = 43,
- PERF_PAPC_CLSM_OUT_PRIM_CNT_5 = 44,
- PERF_PAPC_CLSM_OUT_PRIM_CNT_6_7 = 45,
- PERF_PAPC_CLSM_NON_TRIVIAL_CULL = 46,
- PERF_PAPC_SU_INPUT_PRIM = 47,
- PERF_PAPC_SU_INPUT_CLIP_PRIM = 48,
- PERF_PAPC_SU_INPUT_NULL_PRIM = 49,
- PERF_PAPC_SU_ZERO_AREA_CULL_PRIM = 50,
- PERF_PAPC_SU_BACK_FACE_CULL_PRIM = 51,
- PERF_PAPC_SU_FRONT_FACE_CULL_PRIM = 52,
- PERF_PAPC_SU_POLYMODE_FACE_CULL = 53,
- PERF_PAPC_SU_POLYMODE_BACK_CULL = 54,
- PERF_PAPC_SU_POLYMODE_FRONT_CULL = 55,
- PERF_PAPC_SU_POLYMODE_INVALID_FILL = 56,
- PERF_PAPC_SU_OUTPUT_PRIM = 57,
- PERF_PAPC_SU_OUTPUT_CLIP_PRIM = 58,
- PERF_PAPC_SU_OUTPUT_NULL_PRIM = 59,
- PERF_PAPC_SU_OUTPUT_EVENT_FLAG = 60,
- PERF_PAPC_SU_OUTPUT_FIRST_PRIM_SLOT = 61,
- PERF_PAPC_SU_OUTPUT_END_OF_PACKET = 62,
- PERF_PAPC_SU_OUTPUT_POLYMODE_FACE = 63,
- PERF_PAPC_SU_OUTPUT_POLYMODE_BACK = 64,
- PERF_PAPC_SU_OUTPUT_POLYMODE_FRONT = 65,
- PERF_PAPC_SU_OUT_CLIP_POLYMODE_FACE = 66,
- PERF_PAPC_SU_OUT_CLIP_POLYMODE_BACK = 67,
- PERF_PAPC_SU_OUT_CLIP_POLYMODE_FRONT = 68,
- PERF_PAPC_PASX_REQ_IDLE = 69,
- PERF_PAPC_PASX_REQ_BUSY = 70,
- PERF_PAPC_PASX_REQ_STALLED = 71,
- PERF_PAPC_PASX_REC_IDLE = 72,
- PERF_PAPC_PASX_REC_BUSY = 73,
- PERF_PAPC_PASX_REC_STARVED_SX = 74,
- PERF_PAPC_PASX_REC_STALLED = 75,
- PERF_PAPC_PASX_REC_STALLED_POS_MEM = 76,
- PERF_PAPC_PASX_REC_STALLED_CCGSM_IN = 77,
- PERF_PAPC_CCGSM_IDLE = 78,
- PERF_PAPC_CCGSM_BUSY = 79,
- PERF_PAPC_CCGSM_STALLED = 80,
- PERF_PAPC_CLPRIM_IDLE = 81,
- PERF_PAPC_CLPRIM_BUSY = 82,
- PERF_PAPC_CLPRIM_STALLED = 83,
- PERF_PAPC_CLPRIM_STARVED_CCGSM = 84,
- PERF_PAPC_CLIPSM_IDLE = 85,
- PERF_PAPC_CLIPSM_BUSY = 86,
- PERF_PAPC_CLIPSM_WAIT_CLIP_VERT_ENGH = 87,
- PERF_PAPC_CLIPSM_WAIT_HIGH_PRI_SEQ = 88,
- PERF_PAPC_CLIPSM_WAIT_CLIPGA = 89,
- PERF_PAPC_CLIPSM_WAIT_AVAIL_VTE_CLIP = 90,
- PERF_PAPC_CLIPSM_WAIT_CLIP_OUTSM = 91,
- PERF_PAPC_CLIPGA_IDLE = 92,
- PERF_PAPC_CLIPGA_BUSY = 93,
- PERF_PAPC_CLIPGA_STARVED_VTE_CLIP = 94,
- PERF_PAPC_CLIPGA_STALLED = 95,
- PERF_PAPC_CLIP_IDLE = 96,
- PERF_PAPC_CLIP_BUSY = 97,
- PERF_PAPC_SU_IDLE = 98,
- PERF_PAPC_SU_BUSY = 99,
- PERF_PAPC_SU_STARVED_CLIP = 100,
- PERF_PAPC_SU_STALLED_SC = 101,
- PERF_PAPC_SU_FACENESS_CULL = 102,
-};
-
-enum a2xx_sc_perfcnt_select {
- SC_SR_WINDOW_VALID = 0,
- SC_CW_WINDOW_VALID = 1,
- SC_QM_WINDOW_VALID = 2,
- SC_FW_WINDOW_VALID = 3,
- SC_EZ_WINDOW_VALID = 4,
- SC_IT_WINDOW_VALID = 5,
- SC_STARVED_BY_PA = 6,
- SC_STALLED_BY_RB_TILE = 7,
- SC_STALLED_BY_RB_SAMP = 8,
- SC_STARVED_BY_RB_EZ = 9,
- SC_STALLED_BY_SAMPLE_FF = 10,
- SC_STALLED_BY_SQ = 11,
- SC_STALLED_BY_SP = 12,
- SC_TOTAL_NO_PRIMS = 13,
- SC_NON_EMPTY_PRIMS = 14,
- SC_NO_TILES_PASSING_QM = 15,
- SC_NO_PIXELS_PRE_EZ = 16,
- SC_NO_PIXELS_POST_EZ = 17,
-};
-
-enum a2xx_vgt_perfcount_select {
- VGT_SQ_EVENT_WINDOW_ACTIVE = 0,
- VGT_SQ_SEND = 1,
- VGT_SQ_STALLED = 2,
- VGT_SQ_STARVED_BUSY = 3,
- VGT_SQ_STARVED_IDLE = 4,
- VGT_SQ_STATIC = 5,
- VGT_PA_EVENT_WINDOW_ACTIVE = 6,
- VGT_PA_CLIP_V_SEND = 7,
- VGT_PA_CLIP_V_STALLED = 8,
- VGT_PA_CLIP_V_STARVED_BUSY = 9,
- VGT_PA_CLIP_V_STARVED_IDLE = 10,
- VGT_PA_CLIP_V_STATIC = 11,
- VGT_PA_CLIP_P_SEND = 12,
- VGT_PA_CLIP_P_STALLED = 13,
- VGT_PA_CLIP_P_STARVED_BUSY = 14,
- VGT_PA_CLIP_P_STARVED_IDLE = 15,
- VGT_PA_CLIP_P_STATIC = 16,
- VGT_PA_CLIP_S_SEND = 17,
- VGT_PA_CLIP_S_STALLED = 18,
- VGT_PA_CLIP_S_STARVED_BUSY = 19,
- VGT_PA_CLIP_S_STARVED_IDLE = 20,
- VGT_PA_CLIP_S_STATIC = 21,
- RBIU_FIFOS_EVENT_WINDOW_ACTIVE = 22,
- RBIU_IMMED_DATA_FIFO_STARVED = 23,
- RBIU_IMMED_DATA_FIFO_STALLED = 24,
- RBIU_DMA_REQUEST_FIFO_STARVED = 25,
- RBIU_DMA_REQUEST_FIFO_STALLED = 26,
- RBIU_DRAW_INITIATOR_FIFO_STARVED = 27,
- RBIU_DRAW_INITIATOR_FIFO_STALLED = 28,
- BIN_PRIM_NEAR_CULL = 29,
- BIN_PRIM_ZERO_CULL = 30,
- BIN_PRIM_FAR_CULL = 31,
- BIN_PRIM_BIN_CULL = 32,
- BIN_PRIM_FACE_CULL = 33,
- SPARE34 = 34,
- SPARE35 = 35,
- SPARE36 = 36,
- SPARE37 = 37,
- SPARE38 = 38,
- SPARE39 = 39,
- TE_SU_IN_VALID = 40,
- TE_SU_IN_READ = 41,
- TE_SU_IN_PRIM = 42,
- TE_SU_IN_EOP = 43,
- TE_SU_IN_NULL_PRIM = 44,
- TE_WK_IN_VALID = 45,
- TE_WK_IN_READ = 46,
- TE_OUT_PRIM_VALID = 47,
- TE_OUT_PRIM_READ = 48,
-};
-
-enum a2xx_tcr_perfcount_select {
- DGMMPD_IPMUX0_STALL = 0,
- DGMMPD_IPMUX_ALL_STALL = 4,
- OPMUX0_L2_WRITES = 5,
-};
-
-enum a2xx_tp_perfcount_select {
- POINT_QUADS = 0,
- BILIN_QUADS = 1,
- ANISO_QUADS = 2,
- MIP_QUADS = 3,
- VOL_QUADS = 4,
- MIP_VOL_QUADS = 5,
- MIP_ANISO_QUADS = 6,
- VOL_ANISO_QUADS = 7,
- ANISO_2_1_QUADS = 8,
- ANISO_4_1_QUADS = 9,
- ANISO_6_1_QUADS = 10,
- ANISO_8_1_QUADS = 11,
- ANISO_10_1_QUADS = 12,
- ANISO_12_1_QUADS = 13,
- ANISO_14_1_QUADS = 14,
- ANISO_16_1_QUADS = 15,
- MIP_VOL_ANISO_QUADS = 16,
- ALIGN_2_QUADS = 17,
- ALIGN_4_QUADS = 18,
- PIX_0_QUAD = 19,
- PIX_1_QUAD = 20,
- PIX_2_QUAD = 21,
- PIX_3_QUAD = 22,
- PIX_4_QUAD = 23,
- TP_MIPMAP_LOD0 = 24,
- TP_MIPMAP_LOD1 = 25,
- TP_MIPMAP_LOD2 = 26,
- TP_MIPMAP_LOD3 = 27,
- TP_MIPMAP_LOD4 = 28,
- TP_MIPMAP_LOD5 = 29,
- TP_MIPMAP_LOD6 = 30,
- TP_MIPMAP_LOD7 = 31,
- TP_MIPMAP_LOD8 = 32,
- TP_MIPMAP_LOD9 = 33,
- TP_MIPMAP_LOD10 = 34,
- TP_MIPMAP_LOD11 = 35,
- TP_MIPMAP_LOD12 = 36,
- TP_MIPMAP_LOD13 = 37,
- TP_MIPMAP_LOD14 = 38,
-};
-
-enum a2xx_tcm_perfcount_select {
- QUAD0_RD_LAT_FIFO_EMPTY = 0,
- QUAD0_RD_LAT_FIFO_4TH_FULL = 3,
- QUAD0_RD_LAT_FIFO_HALF_FULL = 4,
- QUAD0_RD_LAT_FIFO_FULL = 5,
- QUAD0_RD_LAT_FIFO_LT_4TH_FULL = 6,
- READ_STARVED_QUAD0 = 28,
- READ_STARVED = 32,
- READ_STALLED_QUAD0 = 33,
- READ_STALLED = 37,
- VALID_READ_QUAD0 = 38,
- TC_TP_STARVED_QUAD0 = 42,
- TC_TP_STARVED = 46,
-};
-
-enum a2xx_tcf_perfcount_select {
- VALID_CYCLES = 0,
- SINGLE_PHASES = 1,
- ANISO_PHASES = 2,
- MIP_PHASES = 3,
- VOL_PHASES = 4,
- MIP_VOL_PHASES = 5,
- MIP_ANISO_PHASES = 6,
- VOL_ANISO_PHASES = 7,
- ANISO_2_1_PHASES = 8,
- ANISO_4_1_PHASES = 9,
- ANISO_6_1_PHASES = 10,
- ANISO_8_1_PHASES = 11,
- ANISO_10_1_PHASES = 12,
- ANISO_12_1_PHASES = 13,
- ANISO_14_1_PHASES = 14,
- ANISO_16_1_PHASES = 15,
- MIP_VOL_ANISO_PHASES = 16,
- ALIGN_2_PHASES = 17,
- ALIGN_4_PHASES = 18,
- TPC_BUSY = 19,
- TPC_STALLED = 20,
- TPC_STARVED = 21,
- TPC_WORKING = 22,
- TPC_WALKER_BUSY = 23,
- TPC_WALKER_STALLED = 24,
- TPC_WALKER_WORKING = 25,
- TPC_ALIGNER_BUSY = 26,
- TPC_ALIGNER_STALLED = 27,
- TPC_ALIGNER_STALLED_BY_BLEND = 28,
- TPC_ALIGNER_STALLED_BY_CACHE = 29,
- TPC_ALIGNER_WORKING = 30,
- TPC_BLEND_BUSY = 31,
- TPC_BLEND_SYNC = 32,
- TPC_BLEND_STARVED = 33,
- TPC_BLEND_WORKING = 34,
- OPCODE_0x00 = 35,
- OPCODE_0x01 = 36,
- OPCODE_0x04 = 37,
- OPCODE_0x10 = 38,
- OPCODE_0x11 = 39,
- OPCODE_0x12 = 40,
- OPCODE_0x13 = 41,
- OPCODE_0x18 = 42,
- OPCODE_0x19 = 43,
- OPCODE_0x1A = 44,
- OPCODE_OTHER = 45,
- IN_FIFO_0_EMPTY = 56,
- IN_FIFO_0_LT_HALF_FULL = 57,
- IN_FIFO_0_HALF_FULL = 58,
- IN_FIFO_0_FULL = 59,
- IN_FIFO_TPC_EMPTY = 72,
- IN_FIFO_TPC_LT_HALF_FULL = 73,
- IN_FIFO_TPC_HALF_FULL = 74,
- IN_FIFO_TPC_FULL = 75,
- TPC_TC_XFC = 76,
- TPC_TC_STATE = 77,
- TC_STALL = 78,
- QUAD0_TAPS = 79,
- QUADS = 83,
- TCA_SYNC_STALL = 84,
- TAG_STALL = 85,
- TCB_SYNC_STALL = 88,
- TCA_VALID = 89,
- PROBES_VALID = 90,
- MISS_STALL = 91,
- FETCH_FIFO_STALL = 92,
- TCO_STALL = 93,
- ANY_STALL = 94,
- TAG_MISSES = 95,
- TAG_HITS = 96,
- SUB_TAG_MISSES = 97,
- SET0_INVALIDATES = 98,
- SET1_INVALIDATES = 99,
- SET2_INVALIDATES = 100,
- SET3_INVALIDATES = 101,
- SET0_TAG_MISSES = 102,
- SET1_TAG_MISSES = 103,
- SET2_TAG_MISSES = 104,
- SET3_TAG_MISSES = 105,
- SET0_TAG_HITS = 106,
- SET1_TAG_HITS = 107,
- SET2_TAG_HITS = 108,
- SET3_TAG_HITS = 109,
- SET0_SUB_TAG_MISSES = 110,
- SET1_SUB_TAG_MISSES = 111,
- SET2_SUB_TAG_MISSES = 112,
- SET3_SUB_TAG_MISSES = 113,
- SET0_EVICT1 = 114,
- SET0_EVICT2 = 115,
- SET0_EVICT3 = 116,
- SET0_EVICT4 = 117,
- SET0_EVICT5 = 118,
- SET0_EVICT6 = 119,
- SET0_EVICT7 = 120,
- SET0_EVICT8 = 121,
- SET1_EVICT1 = 130,
- SET1_EVICT2 = 131,
- SET1_EVICT3 = 132,
- SET1_EVICT4 = 133,
- SET1_EVICT5 = 134,
- SET1_EVICT6 = 135,
- SET1_EVICT7 = 136,
- SET1_EVICT8 = 137,
- SET2_EVICT1 = 146,
- SET2_EVICT2 = 147,
- SET2_EVICT3 = 148,
- SET2_EVICT4 = 149,
- SET2_EVICT5 = 150,
- SET2_EVICT6 = 151,
- SET2_EVICT7 = 152,
- SET2_EVICT8 = 153,
- SET3_EVICT1 = 162,
- SET3_EVICT2 = 163,
- SET3_EVICT3 = 164,
- SET3_EVICT4 = 165,
- SET3_EVICT5 = 166,
- SET3_EVICT6 = 167,
- SET3_EVICT7 = 168,
- SET3_EVICT8 = 169,
- FF_EMPTY = 178,
- FF_LT_HALF_FULL = 179,
- FF_HALF_FULL = 180,
- FF_FULL = 181,
- FF_XFC = 182,
- FF_STALLED = 183,
- FG_MASKS = 184,
- FG_LEFT_MASKS = 185,
- FG_LEFT_MASK_STALLED = 186,
- FG_LEFT_NOT_DONE_STALL = 187,
- FG_LEFT_FG_STALL = 188,
- FG_LEFT_SECTORS = 189,
- FG0_REQUESTS = 195,
- FG0_STALLED = 196,
- MEM_REQ512 = 199,
- MEM_REQ_SENT = 200,
- MEM_LOCAL_READ_REQ = 202,
- TC0_MH_STALLED = 203,
-};
-
-enum a2xx_sq_perfcnt_select {
- SQ_PIXEL_VECTORS_SUB = 0,
- SQ_VERTEX_VECTORS_SUB = 1,
- SQ_ALU0_ACTIVE_VTX_SIMD0 = 2,
- SQ_ALU1_ACTIVE_VTX_SIMD0 = 3,
- SQ_ALU0_ACTIVE_PIX_SIMD0 = 4,
- SQ_ALU1_ACTIVE_PIX_SIMD0 = 5,
- SQ_ALU0_ACTIVE_VTX_SIMD1 = 6,
- SQ_ALU1_ACTIVE_VTX_SIMD1 = 7,
- SQ_ALU0_ACTIVE_PIX_SIMD1 = 8,
- SQ_ALU1_ACTIVE_PIX_SIMD1 = 9,
- SQ_EXPORT_CYCLES = 10,
- SQ_ALU_CST_WRITTEN = 11,
- SQ_TEX_CST_WRITTEN = 12,
- SQ_ALU_CST_STALL = 13,
- SQ_ALU_TEX_STALL = 14,
- SQ_INST_WRITTEN = 15,
- SQ_BOOLEAN_WRITTEN = 16,
- SQ_LOOPS_WRITTEN = 17,
- SQ_PIXEL_SWAP_IN = 18,
- SQ_PIXEL_SWAP_OUT = 19,
- SQ_VERTEX_SWAP_IN = 20,
- SQ_VERTEX_SWAP_OUT = 21,
- SQ_ALU_VTX_INST_ISSUED = 22,
- SQ_TEX_VTX_INST_ISSUED = 23,
- SQ_VC_VTX_INST_ISSUED = 24,
- SQ_CF_VTX_INST_ISSUED = 25,
- SQ_ALU_PIX_INST_ISSUED = 26,
- SQ_TEX_PIX_INST_ISSUED = 27,
- SQ_VC_PIX_INST_ISSUED = 28,
- SQ_CF_PIX_INST_ISSUED = 29,
- SQ_ALU0_FIFO_EMPTY_SIMD0 = 30,
- SQ_ALU1_FIFO_EMPTY_SIMD0 = 31,
- SQ_ALU0_FIFO_EMPTY_SIMD1 = 32,
- SQ_ALU1_FIFO_EMPTY_SIMD1 = 33,
- SQ_ALU_NOPS = 34,
- SQ_PRED_SKIP = 35,
- SQ_SYNC_ALU_STALL_SIMD0_VTX = 36,
- SQ_SYNC_ALU_STALL_SIMD1_VTX = 37,
- SQ_SYNC_TEX_STALL_VTX = 38,
- SQ_SYNC_VC_STALL_VTX = 39,
- SQ_CONSTANTS_USED_SIMD0 = 40,
- SQ_CONSTANTS_SENT_SP_SIMD0 = 41,
- SQ_GPR_STALL_VTX = 42,
- SQ_GPR_STALL_PIX = 43,
- SQ_VTX_RS_STALL = 44,
- SQ_PIX_RS_STALL = 45,
- SQ_SX_PC_FULL = 46,
- SQ_SX_EXP_BUFF_FULL = 47,
- SQ_SX_POS_BUFF_FULL = 48,
- SQ_INTERP_QUADS = 49,
- SQ_INTERP_ACTIVE = 50,
- SQ_IN_PIXEL_STALL = 51,
- SQ_IN_VTX_STALL = 52,
- SQ_VTX_CNT = 53,
- SQ_VTX_VECTOR2 = 54,
- SQ_VTX_VECTOR3 = 55,
- SQ_VTX_VECTOR4 = 56,
- SQ_PIXEL_VECTOR1 = 57,
- SQ_PIXEL_VECTOR23 = 58,
- SQ_PIXEL_VECTOR4 = 59,
- SQ_CONSTANTS_USED_SIMD1 = 60,
- SQ_CONSTANTS_SENT_SP_SIMD1 = 61,
- SQ_SX_MEM_EXP_FULL = 62,
- SQ_ALU0_ACTIVE_VTX_SIMD2 = 63,
- SQ_ALU1_ACTIVE_VTX_SIMD2 = 64,
- SQ_ALU0_ACTIVE_PIX_SIMD2 = 65,
- SQ_ALU1_ACTIVE_PIX_SIMD2 = 66,
- SQ_ALU0_ACTIVE_VTX_SIMD3 = 67,
- SQ_PERFCOUNT_VTX_QUAL_TP_DONE = 68,
- SQ_ALU0_ACTIVE_PIX_SIMD3 = 69,
- SQ_PERFCOUNT_PIX_QUAL_TP_DONE = 70,
- SQ_ALU0_FIFO_EMPTY_SIMD2 = 71,
- SQ_ALU1_FIFO_EMPTY_SIMD2 = 72,
- SQ_ALU0_FIFO_EMPTY_SIMD3 = 73,
- SQ_ALU1_FIFO_EMPTY_SIMD3 = 74,
- SQ_SYNC_ALU_STALL_SIMD2_VTX = 75,
- SQ_PERFCOUNT_VTX_POP_THREAD = 76,
- SQ_SYNC_ALU_STALL_SIMD0_PIX = 77,
- SQ_SYNC_ALU_STALL_SIMD1_PIX = 78,
- SQ_SYNC_ALU_STALL_SIMD2_PIX = 79,
- SQ_PERFCOUNT_PIX_POP_THREAD = 80,
- SQ_SYNC_TEX_STALL_PIX = 81,
- SQ_SYNC_VC_STALL_PIX = 82,
- SQ_CONSTANTS_USED_SIMD2 = 83,
- SQ_CONSTANTS_SENT_SP_SIMD2 = 84,
- SQ_PERFCOUNT_VTX_DEALLOC_ACK = 85,
- SQ_PERFCOUNT_PIX_DEALLOC_ACK = 86,
- SQ_ALU0_FIFO_FULL_SIMD0 = 87,
- SQ_ALU1_FIFO_FULL_SIMD0 = 88,
- SQ_ALU0_FIFO_FULL_SIMD1 = 89,
- SQ_ALU1_FIFO_FULL_SIMD1 = 90,
- SQ_ALU0_FIFO_FULL_SIMD2 = 91,
- SQ_ALU1_FIFO_FULL_SIMD2 = 92,
- SQ_ALU0_FIFO_FULL_SIMD3 = 93,
- SQ_ALU1_FIFO_FULL_SIMD3 = 94,
- VC_PERF_STATIC = 95,
- VC_PERF_STALLED = 96,
- VC_PERF_STARVED = 97,
- VC_PERF_SEND = 98,
- VC_PERF_ACTUAL_STARVED = 99,
- PIXEL_THREAD_0_ACTIVE = 100,
- VERTEX_THREAD_0_ACTIVE = 101,
- PIXEL_THREAD_0_NUMBER = 102,
- VERTEX_THREAD_0_NUMBER = 103,
- VERTEX_EVENT_NUMBER = 104,
- PIXEL_EVENT_NUMBER = 105,
- PTRBUFF_EF_PUSH = 106,
- PTRBUFF_EF_POP_EVENT = 107,
- PTRBUFF_EF_POP_NEW_VTX = 108,
- PTRBUFF_EF_POP_DEALLOC = 109,
- PTRBUFF_EF_POP_PVECTOR = 110,
- PTRBUFF_EF_POP_PVECTOR_X = 111,
- PTRBUFF_EF_POP_PVECTOR_VNZ = 112,
- PTRBUFF_PB_DEALLOC = 113,
- PTRBUFF_PI_STATE_PPB_POP = 114,
- PTRBUFF_PI_RTR = 115,
- PTRBUFF_PI_READ_EN = 116,
- PTRBUFF_PI_BUFF_SWAP = 117,
- PTRBUFF_SQ_FREE_BUFF = 118,
- PTRBUFF_SQ_DEC = 119,
- PTRBUFF_SC_VALID_CNTL_EVENT = 120,
- PTRBUFF_SC_VALID_IJ_XFER = 121,
- PTRBUFF_SC_NEW_VECTOR_1_Q = 122,
- PTRBUFF_QUAL_NEW_VECTOR = 123,
- PTRBUFF_QUAL_EVENT = 124,
- PTRBUFF_END_BUFFER = 125,
- PTRBUFF_FILL_QUAD = 126,
- VERTS_WRITTEN_SPI = 127,
- TP_FETCH_INSTR_EXEC = 128,
- TP_FETCH_INSTR_REQ = 129,
- TP_DATA_RETURN = 130,
- SPI_WRITE_CYCLES_SP = 131,
- SPI_WRITES_SP = 132,
- SP_ALU_INSTR_EXEC = 133,
- SP_CONST_ADDR_TO_SQ = 134,
- SP_PRED_KILLS_TO_SQ = 135,
- SP_EXPORT_CYCLES_TO_SX = 136,
- SP_EXPORTS_TO_SX = 137,
- SQ_CYCLES_ELAPSED = 138,
- SQ_TCFS_OPT_ALLOC_EXEC = 139,
- SQ_TCFS_NO_OPT_ALLOC = 140,
- SQ_ALU0_NO_OPT_ALLOC = 141,
- SQ_ALU1_NO_OPT_ALLOC = 142,
- SQ_TCFS_ARB_XFC_CNT = 143,
- SQ_ALU0_ARB_XFC_CNT = 144,
- SQ_ALU1_ARB_XFC_CNT = 145,
- SQ_TCFS_CFS_UPDATE_CNT = 146,
- SQ_ALU0_CFS_UPDATE_CNT = 147,
- SQ_ALU1_CFS_UPDATE_CNT = 148,
- SQ_VTX_PUSH_THREAD_CNT = 149,
- SQ_VTX_POP_THREAD_CNT = 150,
- SQ_PIX_PUSH_THREAD_CNT = 151,
- SQ_PIX_POP_THREAD_CNT = 152,
- SQ_PIX_TOTAL = 153,
- SQ_PIX_KILLED = 154,
-};
-
-enum a2xx_sx_perfcnt_select {
- SX_EXPORT_VECTORS = 0,
- SX_DUMMY_QUADS = 1,
- SX_ALPHA_FAIL = 2,
- SX_RB_QUAD_BUSY = 3,
- SX_RB_COLOR_BUSY = 4,
- SX_RB_QUAD_STALL = 5,
- SX_RB_COLOR_STALL = 6,
-};
-
-enum a2xx_rbbm_perfcount1_sel {
- RBBM1_COUNT = 0,
- RBBM1_NRT_BUSY = 1,
- RBBM1_RB_BUSY = 2,
- RBBM1_SQ_CNTX0_BUSY = 3,
- RBBM1_SQ_CNTX17_BUSY = 4,
- RBBM1_VGT_BUSY = 5,
- RBBM1_VGT_NODMA_BUSY = 6,
- RBBM1_PA_BUSY = 7,
- RBBM1_SC_CNTX_BUSY = 8,
- RBBM1_TPC_BUSY = 9,
- RBBM1_TC_BUSY = 10,
- RBBM1_SX_BUSY = 11,
- RBBM1_CP_COHER_BUSY = 12,
- RBBM1_CP_NRT_BUSY = 13,
- RBBM1_GFX_IDLE_STALL = 14,
- RBBM1_INTERRUPT = 15,
-};
-
-enum a2xx_cp_perfcount_sel {
- ALWAYS_COUNT = 0,
- TRANS_FIFO_FULL = 1,
- TRANS_FIFO_AF = 2,
- RCIU_PFPTRANS_WAIT = 3,
- RCIU_NRTTRANS_WAIT = 6,
- CSF_NRT_READ_WAIT = 8,
- CSF_I1_FIFO_FULL = 9,
- CSF_I2_FIFO_FULL = 10,
- CSF_ST_FIFO_FULL = 11,
- CSF_RING_ROQ_FULL = 13,
- CSF_I1_ROQ_FULL = 14,
- CSF_I2_ROQ_FULL = 15,
- CSF_ST_ROQ_FULL = 16,
- MIU_TAG_MEM_FULL = 18,
- MIU_WRITECLEAN = 19,
- MIU_NRT_WRITE_STALLED = 22,
- MIU_NRT_READ_STALLED = 23,
- ME_WRITE_CONFIRM_FIFO_FULL = 24,
- ME_VS_DEALLOC_FIFO_FULL = 25,
- ME_PS_DEALLOC_FIFO_FULL = 26,
- ME_REGS_VS_EVENT_FIFO_FULL = 27,
- ME_REGS_PS_EVENT_FIFO_FULL = 28,
- ME_REGS_CF_EVENT_FIFO_FULL = 29,
- ME_MICRO_RB_STARVED = 30,
- ME_MICRO_I1_STARVED = 31,
- ME_MICRO_I2_STARVED = 32,
- ME_MICRO_ST_STARVED = 33,
- RCIU_RBBM_DWORD_SENT = 40,
- ME_BUSY_CLOCKS = 41,
- ME_WAIT_CONTEXT_AVAIL = 42,
- PFP_TYPE0_PACKET = 43,
- PFP_TYPE3_PACKET = 44,
- CSF_RB_WPTR_NEQ_RPTR = 45,
- CSF_I1_SIZE_NEQ_ZERO = 46,
- CSF_I2_SIZE_NEQ_ZERO = 47,
- CSF_RBI1I2_FETCHING = 48,
-};
-
-enum a2xx_rb_perfcnt_select {
- RBPERF_CNTX_BUSY = 0,
- RBPERF_CNTX_BUSY_MAX = 1,
- RBPERF_SX_QUAD_STARVED = 2,
- RBPERF_SX_QUAD_STARVED_MAX = 3,
- RBPERF_GA_GC_CH0_SYS_REQ = 4,
- RBPERF_GA_GC_CH0_SYS_REQ_MAX = 5,
- RBPERF_GA_GC_CH1_SYS_REQ = 6,
- RBPERF_GA_GC_CH1_SYS_REQ_MAX = 7,
- RBPERF_MH_STARVED = 8,
- RBPERF_MH_STARVED_MAX = 9,
- RBPERF_AZ_BC_COLOR_BUSY = 10,
- RBPERF_AZ_BC_COLOR_BUSY_MAX = 11,
- RBPERF_AZ_BC_Z_BUSY = 12,
- RBPERF_AZ_BC_Z_BUSY_MAX = 13,
- RBPERF_RB_SC_TILE_RTR_N = 14,
- RBPERF_RB_SC_TILE_RTR_N_MAX = 15,
- RBPERF_RB_SC_SAMP_RTR_N = 16,
- RBPERF_RB_SC_SAMP_RTR_N_MAX = 17,
- RBPERF_RB_SX_QUAD_RTR_N = 18,
- RBPERF_RB_SX_QUAD_RTR_N_MAX = 19,
- RBPERF_RB_SX_COLOR_RTR_N = 20,
- RBPERF_RB_SX_COLOR_RTR_N_MAX = 21,
- RBPERF_RB_SC_SAMP_LZ_BUSY = 22,
- RBPERF_RB_SC_SAMP_LZ_BUSY_MAX = 23,
- RBPERF_ZXP_STALL = 24,
- RBPERF_ZXP_STALL_MAX = 25,
- RBPERF_EVENT_PENDING = 26,
- RBPERF_EVENT_PENDING_MAX = 27,
- RBPERF_RB_MH_VALID = 28,
- RBPERF_RB_MH_VALID_MAX = 29,
- RBPERF_SX_RB_QUAD_SEND = 30,
- RBPERF_SX_RB_COLOR_SEND = 31,
- RBPERF_SC_RB_TILE_SEND = 32,
- RBPERF_SC_RB_SAMPLE_SEND = 33,
- RBPERF_SX_RB_MEM_EXPORT = 34,
- RBPERF_SX_RB_QUAD_EVENT = 35,
- RBPERF_SC_RB_TILE_EVENT_FILTERED = 36,
- RBPERF_SC_RB_TILE_EVENT_ALL = 37,
- RBPERF_RB_SC_EZ_SEND = 38,
- RBPERF_RB_SX_INDEX_SEND = 39,
- RBPERF_GMEM_INTFO_RD = 40,
- RBPERF_GMEM_INTF1_RD = 41,
- RBPERF_GMEM_INTFO_WR = 42,
- RBPERF_GMEM_INTF1_WR = 43,
- RBPERF_RB_CP_CONTEXT_DONE = 44,
- RBPERF_RB_CP_CACHE_FLUSH = 45,
- RBPERF_ZPASS_DONE = 46,
- RBPERF_ZCMD_VALID = 47,
- RBPERF_CCMD_VALID = 48,
- RBPERF_ACCUM_GRANT = 49,
- RBPERF_ACCUM_C0_GRANT = 50,
- RBPERF_ACCUM_C1_GRANT = 51,
- RBPERF_ACCUM_FULL_BE_WR = 52,
- RBPERF_ACCUM_REQUEST_NO_GRANT = 53,
- RBPERF_ACCUM_TIMEOUT_PULSE = 54,
- RBPERF_ACCUM_LIN_TIMEOUT_PULSE = 55,
- RBPERF_ACCUM_CAM_HIT_FLUSHING = 56,
-};
-
-enum a2xx_mh_perfcnt_select {
- CP_R0_REQUESTS = 0,
- CP_R1_REQUESTS = 1,
- CP_R2_REQUESTS = 2,
- CP_R3_REQUESTS = 3,
- CP_R4_REQUESTS = 4,
- CP_TOTAL_READ_REQUESTS = 5,
- CP_TOTAL_WRITE_REQUESTS = 6,
- CP_TOTAL_REQUESTS = 7,
- CP_DATA_BYTES_WRITTEN = 8,
- CP_WRITE_CLEAN_RESPONSES = 9,
- CP_R0_READ_BURSTS_RECEIVED = 10,
- CP_R1_READ_BURSTS_RECEIVED = 11,
- CP_R2_READ_BURSTS_RECEIVED = 12,
- CP_R3_READ_BURSTS_RECEIVED = 13,
- CP_R4_READ_BURSTS_RECEIVED = 14,
- CP_TOTAL_READ_BURSTS_RECEIVED = 15,
- CP_R0_DATA_BEATS_READ = 16,
- CP_R1_DATA_BEATS_READ = 17,
- CP_R2_DATA_BEATS_READ = 18,
- CP_R3_DATA_BEATS_READ = 19,
- CP_R4_DATA_BEATS_READ = 20,
- CP_TOTAL_DATA_BEATS_READ = 21,
- VGT_R0_REQUESTS = 22,
- VGT_R1_REQUESTS = 23,
- VGT_TOTAL_REQUESTS = 24,
- VGT_R0_READ_BURSTS_RECEIVED = 25,
- VGT_R1_READ_BURSTS_RECEIVED = 26,
- VGT_TOTAL_READ_BURSTS_RECEIVED = 27,
- VGT_R0_DATA_BEATS_READ = 28,
- VGT_R1_DATA_BEATS_READ = 29,
- VGT_TOTAL_DATA_BEATS_READ = 30,
- TC_TOTAL_REQUESTS = 31,
- TC_ROQ_REQUESTS = 32,
- TC_INFO_SENT = 33,
- TC_READ_BURSTS_RECEIVED = 34,
- TC_DATA_BEATS_READ = 35,
- TCD_BURSTS_READ = 36,
- RB_REQUESTS = 37,
- RB_DATA_BYTES_WRITTEN = 38,
- RB_WRITE_CLEAN_RESPONSES = 39,
- AXI_READ_REQUESTS_ID_0 = 40,
- AXI_READ_REQUESTS_ID_1 = 41,
- AXI_READ_REQUESTS_ID_2 = 42,
- AXI_READ_REQUESTS_ID_3 = 43,
- AXI_READ_REQUESTS_ID_4 = 44,
- AXI_READ_REQUESTS_ID_5 = 45,
- AXI_READ_REQUESTS_ID_6 = 46,
- AXI_READ_REQUESTS_ID_7 = 47,
- AXI_TOTAL_READ_REQUESTS = 48,
- AXI_WRITE_REQUESTS_ID_0 = 49,
- AXI_WRITE_REQUESTS_ID_1 = 50,
- AXI_WRITE_REQUESTS_ID_2 = 51,
- AXI_WRITE_REQUESTS_ID_3 = 52,
- AXI_WRITE_REQUESTS_ID_4 = 53,
- AXI_WRITE_REQUESTS_ID_5 = 54,
- AXI_WRITE_REQUESTS_ID_6 = 55,
- AXI_WRITE_REQUESTS_ID_7 = 56,
- AXI_TOTAL_WRITE_REQUESTS = 57,
- AXI_TOTAL_REQUESTS_ID_0 = 58,
- AXI_TOTAL_REQUESTS_ID_1 = 59,
- AXI_TOTAL_REQUESTS_ID_2 = 60,
- AXI_TOTAL_REQUESTS_ID_3 = 61,
- AXI_TOTAL_REQUESTS_ID_4 = 62,
- AXI_TOTAL_REQUESTS_ID_5 = 63,
- AXI_TOTAL_REQUESTS_ID_6 = 64,
- AXI_TOTAL_REQUESTS_ID_7 = 65,
- AXI_TOTAL_REQUESTS = 66,
- AXI_READ_CHANNEL_BURSTS_ID_0 = 67,
- AXI_READ_CHANNEL_BURSTS_ID_1 = 68,
- AXI_READ_CHANNEL_BURSTS_ID_2 = 69,
- AXI_READ_CHANNEL_BURSTS_ID_3 = 70,
- AXI_READ_CHANNEL_BURSTS_ID_4 = 71,
- AXI_READ_CHANNEL_BURSTS_ID_5 = 72,
- AXI_READ_CHANNEL_BURSTS_ID_6 = 73,
- AXI_READ_CHANNEL_BURSTS_ID_7 = 74,
- AXI_READ_CHANNEL_TOTAL_BURSTS = 75,
- AXI_READ_CHANNEL_DATA_BEATS_READ_ID_0 = 76,
- AXI_READ_CHANNEL_DATA_BEATS_READ_ID_1 = 77,
- AXI_READ_CHANNEL_DATA_BEATS_READ_ID_2 = 78,
- AXI_READ_CHANNEL_DATA_BEATS_READ_ID_3 = 79,
- AXI_READ_CHANNEL_DATA_BEATS_READ_ID_4 = 80,
- AXI_READ_CHANNEL_DATA_BEATS_READ_ID_5 = 81,
- AXI_READ_CHANNEL_DATA_BEATS_READ_ID_6 = 82,
- AXI_READ_CHANNEL_DATA_BEATS_READ_ID_7 = 83,
- AXI_READ_CHANNEL_TOTAL_DATA_BEATS_READ = 84,
- AXI_WRITE_CHANNEL_BURSTS_ID_0 = 85,
- AXI_WRITE_CHANNEL_BURSTS_ID_1 = 86,
- AXI_WRITE_CHANNEL_BURSTS_ID_2 = 87,
- AXI_WRITE_CHANNEL_BURSTS_ID_3 = 88,
- AXI_WRITE_CHANNEL_BURSTS_ID_4 = 89,
- AXI_WRITE_CHANNEL_BURSTS_ID_5 = 90,
- AXI_WRITE_CHANNEL_BURSTS_ID_6 = 91,
- AXI_WRITE_CHANNEL_BURSTS_ID_7 = 92,
- AXI_WRITE_CHANNEL_TOTAL_BURSTS = 93,
- AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_0 = 94,
- AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_1 = 95,
- AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_2 = 96,
- AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_3 = 97,
- AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_4 = 98,
- AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_5 = 99,
- AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_6 = 100,
- AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_7 = 101,
- AXI_WRITE_CHANNEL_TOTAL_DATA_BYTES_WRITTEN = 102,
- AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_0 = 103,
- AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_1 = 104,
- AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_2 = 105,
- AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_3 = 106,
- AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_4 = 107,
- AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_5 = 108,
- AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_6 = 109,
- AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_7 = 110,
- AXI_WRITE_RESPONSE_CHANNEL_TOTAL_RESPONSES = 111,
- TOTAL_MMU_MISSES = 112,
- MMU_READ_MISSES = 113,
- MMU_WRITE_MISSES = 114,
- TOTAL_MMU_HITS = 115,
- MMU_READ_HITS = 116,
- MMU_WRITE_HITS = 117,
- SPLIT_MODE_TC_HITS = 118,
- SPLIT_MODE_TC_MISSES = 119,
- SPLIT_MODE_NON_TC_HITS = 120,
- SPLIT_MODE_NON_TC_MISSES = 121,
- STALL_AWAITING_TLB_MISS_FETCH = 122,
- MMU_TLB_MISS_READ_BURSTS_RECEIVED = 123,
- MMU_TLB_MISS_DATA_BEATS_READ = 124,
- CP_CYCLES_HELD_OFF = 125,
- VGT_CYCLES_HELD_OFF = 126,
- TC_CYCLES_HELD_OFF = 127,
- TC_ROQ_CYCLES_HELD_OFF = 128,
- TC_CYCLES_HELD_OFF_TCD_FULL = 129,
- RB_CYCLES_HELD_OFF = 130,
- TOTAL_CYCLES_ANY_CLNT_HELD_OFF = 131,
- TLB_MISS_CYCLES_HELD_OFF = 132,
- AXI_READ_REQUEST_HELD_OFF = 133,
- AXI_WRITE_REQUEST_HELD_OFF = 134,
- AXI_REQUEST_HELD_OFF = 135,
- AXI_REQUEST_HELD_OFF_INFLIGHT_LIMIT = 136,
- AXI_WRITE_DATA_HELD_OFF = 137,
- CP_SAME_PAGE_BANK_REQUESTS = 138,
- VGT_SAME_PAGE_BANK_REQUESTS = 139,
- TC_SAME_PAGE_BANK_REQUESTS = 140,
- TC_ARB_HOLD_SAME_PAGE_BANK_REQUESTS = 141,
- RB_SAME_PAGE_BANK_REQUESTS = 142,
- TOTAL_SAME_PAGE_BANK_REQUESTS = 143,
- CP_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 144,
- VGT_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 145,
- TC_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 146,
- RB_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 147,
- TOTAL_SAME_PAGE_BANK_KILLED_FAIRNESS_LIMIT = 148,
- TOTAL_MH_READ_REQUESTS = 149,
- TOTAL_MH_WRITE_REQUESTS = 150,
- TOTAL_MH_REQUESTS = 151,
- MH_BUSY = 152,
- CP_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 153,
- VGT_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 154,
- TC_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 155,
- RB_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 156,
- TC_ROQ_N_VALID_ENTRIES = 157,
- ARQ_N_ENTRIES = 158,
- WDB_N_ENTRIES = 159,
- MH_READ_LATENCY_OUTST_REQ_SUM = 160,
- MC_READ_LATENCY_OUTST_REQ_SUM = 161,
- MC_TOTAL_READ_REQUESTS = 162,
- ELAPSED_CYCLES_MH_GATED_CLK = 163,
- ELAPSED_CLK_CYCLES = 164,
- CP_W_16B_REQUESTS = 165,
- CP_W_32B_REQUESTS = 166,
- TC_16B_REQUESTS = 167,
- TC_32B_REQUESTS = 168,
- PA_REQUESTS = 169,
- PA_DATA_BYTES_WRITTEN = 170,
- PA_WRITE_CLEAN_RESPONSES = 171,
- PA_CYCLES_HELD_OFF = 172,
- AXI_READ_REQUEST_DATA_BEATS_ID_0 = 173,
- AXI_READ_REQUEST_DATA_BEATS_ID_1 = 174,
- AXI_READ_REQUEST_DATA_BEATS_ID_2 = 175,
- AXI_READ_REQUEST_DATA_BEATS_ID_3 = 176,
- AXI_READ_REQUEST_DATA_BEATS_ID_4 = 177,
- AXI_READ_REQUEST_DATA_BEATS_ID_5 = 178,
- AXI_READ_REQUEST_DATA_BEATS_ID_6 = 179,
- AXI_READ_REQUEST_DATA_BEATS_ID_7 = 180,
- AXI_TOTAL_READ_REQUEST_DATA_BEATS = 181,
-};
-
-enum perf_mode_cnt {
- PERF_STATE_RESET = 0,
- PERF_STATE_ENABLE = 1,
- PERF_STATE_FREEZE = 2,
-};
-
-enum adreno_mmu_clnt_beh {
- BEH_NEVR = 0,
- BEH_TRAN_RNG = 1,
- BEH_TRAN_FLT = 2,
-};
-
-enum sq_tex_clamp {
- SQ_TEX_WRAP = 0,
- SQ_TEX_MIRROR = 1,
- SQ_TEX_CLAMP_LAST_TEXEL = 2,
- SQ_TEX_MIRROR_ONCE_LAST_TEXEL = 3,
- SQ_TEX_CLAMP_HALF_BORDER = 4,
- SQ_TEX_MIRROR_ONCE_HALF_BORDER = 5,
- SQ_TEX_CLAMP_BORDER = 6,
- SQ_TEX_MIRROR_ONCE_BORDER = 7,
-};
-
-enum sq_tex_swiz {
- SQ_TEX_X = 0,
- SQ_TEX_Y = 1,
- SQ_TEX_Z = 2,
- SQ_TEX_W = 3,
- SQ_TEX_ZERO = 4,
- SQ_TEX_ONE = 5,
-};
-
-enum sq_tex_filter {
- SQ_TEX_FILTER_POINT = 0,
- SQ_TEX_FILTER_BILINEAR = 1,
- SQ_TEX_FILTER_BASEMAP = 2,
- SQ_TEX_FILTER_USE_FETCH_CONST = 3,
-};
-
-enum sq_tex_aniso_filter {
- SQ_TEX_ANISO_FILTER_DISABLED = 0,
- SQ_TEX_ANISO_FILTER_MAX_1_1 = 1,
- SQ_TEX_ANISO_FILTER_MAX_2_1 = 2,
- SQ_TEX_ANISO_FILTER_MAX_4_1 = 3,
- SQ_TEX_ANISO_FILTER_MAX_8_1 = 4,
- SQ_TEX_ANISO_FILTER_MAX_16_1 = 5,
- SQ_TEX_ANISO_FILTER_USE_FETCH_CONST = 7,
-};
-
-enum sq_tex_dimension {
- SQ_TEX_DIMENSION_1D = 0,
- SQ_TEX_DIMENSION_2D = 1,
- SQ_TEX_DIMENSION_3D = 2,
- SQ_TEX_DIMENSION_CUBE = 3,
-};
-
-enum sq_tex_border_color {
- SQ_TEX_BORDER_COLOR_BLACK = 0,
- SQ_TEX_BORDER_COLOR_WHITE = 1,
- SQ_TEX_BORDER_COLOR_ACBYCR_BLACK = 2,
- SQ_TEX_BORDER_COLOR_ACBCRY_BLACK = 3,
-};
-
-enum sq_tex_sign {
- SQ_TEX_SIGN_UNSIGNED = 0,
- SQ_TEX_SIGN_SIGNED = 1,
- SQ_TEX_SIGN_UNSIGNED_BIASED = 2,
- SQ_TEX_SIGN_GAMMA = 3,
-};
-
-enum sq_tex_endian {
- SQ_TEX_ENDIAN_NONE = 0,
- SQ_TEX_ENDIAN_8IN16 = 1,
- SQ_TEX_ENDIAN_8IN32 = 2,
- SQ_TEX_ENDIAN_16IN32 = 3,
-};
-
-enum sq_tex_clamp_policy {
- SQ_TEX_CLAMP_POLICY_D3D = 0,
- SQ_TEX_CLAMP_POLICY_OGL = 1,
-};
-
-enum sq_tex_num_format {
- SQ_TEX_NUM_FORMAT_FRAC = 0,
- SQ_TEX_NUM_FORMAT_INT = 1,
-};
-
-enum sq_tex_type {
- SQ_TEX_TYPE_0 = 0,
- SQ_TEX_TYPE_1 = 1,
- SQ_TEX_TYPE_2 = 2,
- SQ_TEX_TYPE_3 = 3,
-};
-
-#define REG_A2XX_RBBM_PATCH_RELEASE 0x00000001
-
-#define REG_A2XX_RBBM_CNTL 0x0000003b
-
-#define REG_A2XX_RBBM_SOFT_RESET 0x0000003c
-
-#define REG_A2XX_CP_PFP_UCODE_ADDR 0x000000c0
-
-#define REG_A2XX_CP_PFP_UCODE_DATA 0x000000c1
-
-#define REG_A2XX_MH_MMU_CONFIG 0x00000040
-#define A2XX_MH_MMU_CONFIG_MMU_ENABLE 0x00000001
-#define A2XX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE 0x00000002
-#define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK 0x00000030
-#define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT 4
-static inline uint32_t A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK;
-}
-#define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK 0x000000c0
-#define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT 6
-static inline uint32_t A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK;
-}
-#define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK 0x00000300
-#define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT 8
-static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK;
-}
-#define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK 0x00000c00
-#define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT 10
-static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK;
-}
-#define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK 0x00003000
-#define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT 12
-static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK;
-}
-#define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK 0x0000c000
-#define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT 14
-static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK;
-}
-#define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK 0x00030000
-#define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT 16
-static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK;
-}
-#define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK 0x000c0000
-#define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT 18
-static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK;
-}
-#define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK 0x00300000
-#define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT 20
-static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK;
-}
-#define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK 0x00c00000
-#define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT 22
-static inline uint32_t A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK;
-}
-#define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK 0x03000000
-#define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT 24
-static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK;
-}
-
-#define REG_A2XX_MH_MMU_VA_RANGE 0x00000041
-#define A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__MASK 0x00000fff
-#define A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__SHIFT 0
-static inline uint32_t A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS(uint32_t val)
-{
- return ((val) << A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__SHIFT) & A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__MASK;
-}
-#define A2XX_MH_MMU_VA_RANGE_VA_BASE__MASK 0xfffff000
-#define A2XX_MH_MMU_VA_RANGE_VA_BASE__SHIFT 12
-static inline uint32_t A2XX_MH_MMU_VA_RANGE_VA_BASE(uint32_t val)
-{
- return ((val) << A2XX_MH_MMU_VA_RANGE_VA_BASE__SHIFT) & A2XX_MH_MMU_VA_RANGE_VA_BASE__MASK;
-}
-
-#define REG_A2XX_MH_MMU_PT_BASE 0x00000042
-
-#define REG_A2XX_MH_MMU_PAGE_FAULT 0x00000043
-
-#define REG_A2XX_MH_MMU_TRAN_ERROR 0x00000044
-
-#define REG_A2XX_MH_MMU_INVALIDATE 0x00000045
-#define A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL 0x00000001
-#define A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC 0x00000002
-
-#define REG_A2XX_MH_MMU_MPU_BASE 0x00000046
-
-#define REG_A2XX_MH_MMU_MPU_END 0x00000047
-
-#define REG_A2XX_NQWAIT_UNTIL 0x00000394
-
-#define REG_A2XX_RBBM_PERFCOUNTER0_SELECT 0x00000395
-
-#define REG_A2XX_RBBM_PERFCOUNTER1_SELECT 0x00000396
-
-#define REG_A2XX_RBBM_PERFCOUNTER0_LO 0x00000397
-
-#define REG_A2XX_RBBM_PERFCOUNTER0_HI 0x00000398
-
-#define REG_A2XX_RBBM_PERFCOUNTER1_LO 0x00000399
-
-#define REG_A2XX_RBBM_PERFCOUNTER1_HI 0x0000039a
-
-#define REG_A2XX_RBBM_DEBUG 0x0000039b
-
-#define REG_A2XX_RBBM_PM_OVERRIDE1 0x0000039c
-#define A2XX_RBBM_PM_OVERRIDE1_RBBM_AHBCLK_PM_OVERRIDE 0x00000001
-#define A2XX_RBBM_PM_OVERRIDE1_SC_REG_SCLK_PM_OVERRIDE 0x00000002
-#define A2XX_RBBM_PM_OVERRIDE1_SC_SCLK_PM_OVERRIDE 0x00000004
-#define A2XX_RBBM_PM_OVERRIDE1_SP_TOP_SCLK_PM_OVERRIDE 0x00000008
-#define A2XX_RBBM_PM_OVERRIDE1_SP_V0_SCLK_PM_OVERRIDE 0x00000010
-#define A2XX_RBBM_PM_OVERRIDE1_SQ_REG_SCLK_PM_OVERRIDE 0x00000020
-#define A2XX_RBBM_PM_OVERRIDE1_SQ_REG_FIFOS_SCLK_PM_OVERRIDE 0x00000040
-#define A2XX_RBBM_PM_OVERRIDE1_SQ_CONST_MEM_SCLK_PM_OVERRIDE 0x00000080
-#define A2XX_RBBM_PM_OVERRIDE1_SQ_SQ_SCLK_PM_OVERRIDE 0x00000100
-#define A2XX_RBBM_PM_OVERRIDE1_SX_SCLK_PM_OVERRIDE 0x00000200
-#define A2XX_RBBM_PM_OVERRIDE1_SX_REG_SCLK_PM_OVERRIDE 0x00000400
-#define A2XX_RBBM_PM_OVERRIDE1_TCM_TCO_SCLK_PM_OVERRIDE 0x00000800
-#define A2XX_RBBM_PM_OVERRIDE1_TCM_TCM_SCLK_PM_OVERRIDE 0x00001000
-#define A2XX_RBBM_PM_OVERRIDE1_TCM_TCD_SCLK_PM_OVERRIDE 0x00002000
-#define A2XX_RBBM_PM_OVERRIDE1_TCM_REG_SCLK_PM_OVERRIDE 0x00004000
-#define A2XX_RBBM_PM_OVERRIDE1_TPC_TPC_SCLK_PM_OVERRIDE 0x00008000
-#define A2XX_RBBM_PM_OVERRIDE1_TPC_REG_SCLK_PM_OVERRIDE 0x00010000
-#define A2XX_RBBM_PM_OVERRIDE1_TCF_TCA_SCLK_PM_OVERRIDE 0x00020000
-#define A2XX_RBBM_PM_OVERRIDE1_TCF_TCB_SCLK_PM_OVERRIDE 0x00040000
-#define A2XX_RBBM_PM_OVERRIDE1_TCF_TCB_READ_SCLK_PM_OVERRIDE 0x00080000
-#define A2XX_RBBM_PM_OVERRIDE1_TP_TP_SCLK_PM_OVERRIDE 0x00100000
-#define A2XX_RBBM_PM_OVERRIDE1_TP_REG_SCLK_PM_OVERRIDE 0x00200000
-#define A2XX_RBBM_PM_OVERRIDE1_CP_G_SCLK_PM_OVERRIDE 0x00400000
-#define A2XX_RBBM_PM_OVERRIDE1_CP_REG_SCLK_PM_OVERRIDE 0x00800000
-#define A2XX_RBBM_PM_OVERRIDE1_CP_G_REG_SCLK_PM_OVERRIDE 0x01000000
-#define A2XX_RBBM_PM_OVERRIDE1_SPI_SCLK_PM_OVERRIDE 0x02000000
-#define A2XX_RBBM_PM_OVERRIDE1_RB_REG_SCLK_PM_OVERRIDE 0x04000000
-#define A2XX_RBBM_PM_OVERRIDE1_RB_SCLK_PM_OVERRIDE 0x08000000
-#define A2XX_RBBM_PM_OVERRIDE1_MH_MH_SCLK_PM_OVERRIDE 0x10000000
-#define A2XX_RBBM_PM_OVERRIDE1_MH_REG_SCLK_PM_OVERRIDE 0x20000000
-#define A2XX_RBBM_PM_OVERRIDE1_MH_MMU_SCLK_PM_OVERRIDE 0x40000000
-#define A2XX_RBBM_PM_OVERRIDE1_MH_TCROQ_SCLK_PM_OVERRIDE 0x80000000
-
-#define REG_A2XX_RBBM_PM_OVERRIDE2 0x0000039d
-#define A2XX_RBBM_PM_OVERRIDE2_PA_REG_SCLK_PM_OVERRIDE 0x00000001
-#define A2XX_RBBM_PM_OVERRIDE2_PA_PA_SCLK_PM_OVERRIDE 0x00000002
-#define A2XX_RBBM_PM_OVERRIDE2_PA_AG_SCLK_PM_OVERRIDE 0x00000004
-#define A2XX_RBBM_PM_OVERRIDE2_VGT_REG_SCLK_PM_OVERRIDE 0x00000008
-#define A2XX_RBBM_PM_OVERRIDE2_VGT_FIFOS_SCLK_PM_OVERRIDE 0x00000010
-#define A2XX_RBBM_PM_OVERRIDE2_VGT_VGT_SCLK_PM_OVERRIDE 0x00000020
-#define A2XX_RBBM_PM_OVERRIDE2_DEBUG_PERF_SCLK_PM_OVERRIDE 0x00000040
-#define A2XX_RBBM_PM_OVERRIDE2_PERM_SCLK_PM_OVERRIDE 0x00000080
-#define A2XX_RBBM_PM_OVERRIDE2_GC_GA_GMEM0_PM_OVERRIDE 0x00000100
-#define A2XX_RBBM_PM_OVERRIDE2_GC_GA_GMEM1_PM_OVERRIDE 0x00000200
-#define A2XX_RBBM_PM_OVERRIDE2_GC_GA_GMEM2_PM_OVERRIDE 0x00000400
-#define A2XX_RBBM_PM_OVERRIDE2_GC_GA_GMEM3_PM_OVERRIDE 0x00000800
-
-#define REG_A2XX_RBBM_DEBUG_OUT 0x000003a0
-
-#define REG_A2XX_RBBM_DEBUG_CNTL 0x000003a1
-
-#define REG_A2XX_RBBM_READ_ERROR 0x000003b3
-
-#define REG_A2XX_RBBM_INT_CNTL 0x000003b4
-#define A2XX_RBBM_INT_CNTL_RDERR_INT_MASK 0x00000001
-#define A2XX_RBBM_INT_CNTL_DISPLAY_UPDATE_INT_MASK 0x00000002
-#define A2XX_RBBM_INT_CNTL_GUI_IDLE_INT_MASK 0x00080000
-
-#define REG_A2XX_RBBM_INT_STATUS 0x000003b5
-
-#define REG_A2XX_RBBM_INT_ACK 0x000003b6
-
-#define REG_A2XX_MASTER_INT_SIGNAL 0x000003b7
-#define A2XX_MASTER_INT_SIGNAL_MH_INT_STAT 0x00000020
-#define A2XX_MASTER_INT_SIGNAL_SQ_INT_STAT 0x04000000
-#define A2XX_MASTER_INT_SIGNAL_CP_INT_STAT 0x40000000
-#define A2XX_MASTER_INT_SIGNAL_RBBM_INT_STAT 0x80000000
-
-#define REG_A2XX_RBBM_PERIPHID1 0x000003f9
-
-#define REG_A2XX_RBBM_PERIPHID2 0x000003fa
-
-#define REG_A2XX_CP_PERFMON_CNTL 0x00000444
-#define A2XX_CP_PERFMON_CNTL_PERF_MODE_CNT__MASK 0x00000007
-#define A2XX_CP_PERFMON_CNTL_PERF_MODE_CNT__SHIFT 0
-static inline uint32_t A2XX_CP_PERFMON_CNTL_PERF_MODE_CNT(enum perf_mode_cnt val)
-{
- return ((val) << A2XX_CP_PERFMON_CNTL_PERF_MODE_CNT__SHIFT) & A2XX_CP_PERFMON_CNTL_PERF_MODE_CNT__MASK;
-}
-
-#define REG_A2XX_CP_PERFCOUNTER_SELECT 0x00000445
-
-#define REG_A2XX_CP_PERFCOUNTER_LO 0x00000446
-
-#define REG_A2XX_CP_PERFCOUNTER_HI 0x00000447
-
-#define REG_A2XX_RBBM_STATUS 0x000005d0
-#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK 0x0000001f
-#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT 0
-static inline uint32_t A2XX_RBBM_STATUS_CMDFIFO_AVAIL(uint32_t val)
-{
- return ((val) << A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT) & A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK;
-}
-#define A2XX_RBBM_STATUS_TC_BUSY 0x00000020
-#define A2XX_RBBM_STATUS_HIRQ_PENDING 0x00000100
-#define A2XX_RBBM_STATUS_CPRQ_PENDING 0x00000200
-#define A2XX_RBBM_STATUS_CFRQ_PENDING 0x00000400
-#define A2XX_RBBM_STATUS_PFRQ_PENDING 0x00000800
-#define A2XX_RBBM_STATUS_VGT_BUSY_NO_DMA 0x00001000
-#define A2XX_RBBM_STATUS_RBBM_WU_BUSY 0x00004000
-#define A2XX_RBBM_STATUS_CP_NRT_BUSY 0x00010000
-#define A2XX_RBBM_STATUS_MH_BUSY 0x00040000
-#define A2XX_RBBM_STATUS_MH_COHERENCY_BUSY 0x00080000
-#define A2XX_RBBM_STATUS_SX_BUSY 0x00200000
-#define A2XX_RBBM_STATUS_TPC_BUSY 0x00400000
-#define A2XX_RBBM_STATUS_SC_CNTX_BUSY 0x01000000
-#define A2XX_RBBM_STATUS_PA_BUSY 0x02000000
-#define A2XX_RBBM_STATUS_VGT_BUSY 0x04000000
-#define A2XX_RBBM_STATUS_SQ_CNTX17_BUSY 0x08000000
-#define A2XX_RBBM_STATUS_SQ_CNTX0_BUSY 0x10000000
-#define A2XX_RBBM_STATUS_RB_CNTX_BUSY 0x40000000
-#define A2XX_RBBM_STATUS_GUI_ACTIVE 0x80000000
-
-#define REG_A2XX_MH_ARBITER_CONFIG 0x00000a40
-#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__MASK 0x0000003f
-#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__SHIFT 0
-static inline uint32_t A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT(uint32_t val)
-{
- return ((val) << A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__SHIFT) & A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__MASK;
-}
-#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_GRANULARITY 0x00000040
-#define A2XX_MH_ARBITER_CONFIG_L1_ARB_ENABLE 0x00000080
-#define A2XX_MH_ARBITER_CONFIG_L1_ARB_HOLD_ENABLE 0x00000100
-#define A2XX_MH_ARBITER_CONFIG_L2_ARB_CONTROL 0x00000200
-#define A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__MASK 0x00001c00
-#define A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__SHIFT 10
-static inline uint32_t A2XX_MH_ARBITER_CONFIG_PAGE_SIZE(uint32_t val)
-{
- return ((val) << A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__SHIFT) & A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__MASK;
-}
-#define A2XX_MH_ARBITER_CONFIG_TC_REORDER_ENABLE 0x00002000
-#define A2XX_MH_ARBITER_CONFIG_TC_ARB_HOLD_ENABLE 0x00004000
-#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT_ENABLE 0x00008000
-#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__MASK 0x003f0000
-#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__SHIFT 16
-static inline uint32_t A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(uint32_t val)
-{
- return ((val) << A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__SHIFT) & A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__MASK;
-}
-#define A2XX_MH_ARBITER_CONFIG_CP_CLNT_ENABLE 0x00400000
-#define A2XX_MH_ARBITER_CONFIG_VGT_CLNT_ENABLE 0x00800000
-#define A2XX_MH_ARBITER_CONFIG_TC_CLNT_ENABLE 0x01000000
-#define A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE 0x02000000
-#define A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE 0x04000000
-
-#define REG_A2XX_MH_INTERRUPT_MASK 0x00000a42
-#define A2XX_MH_INTERRUPT_MASK_AXI_READ_ERROR 0x00000001
-#define A2XX_MH_INTERRUPT_MASK_AXI_WRITE_ERROR 0x00000002
-#define A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT 0x00000004
-
-#define REG_A2XX_MH_INTERRUPT_STATUS 0x00000a43
-
-#define REG_A2XX_MH_INTERRUPT_CLEAR 0x00000a44
-
-#define REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG1 0x00000a54
-
-#define REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG2 0x00000a55
-
-#define REG_A2XX_A220_VSC_BIN_SIZE 0x00000c01
-#define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
-#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0
-static inline uint32_t A2XX_A220_VSC_BIN_SIZE_WIDTH(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT) & A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK;
-}
-#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
-#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT 5
-static inline uint32_t A2XX_A220_VSC_BIN_SIZE_HEIGHT(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT) & A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK;
-}
-
-#define REG_A2XX_VSC_PIPE(i0) (0x00000c06 + 0x3*(i0))
-
-static inline uint32_t REG_A2XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
-
-static inline uint32_t REG_A2XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; }
-
-static inline uint32_t REG_A2XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; }
-
-#define REG_A2XX_PC_DEBUG_CNTL 0x00000c38
-
-#define REG_A2XX_PC_DEBUG_DATA 0x00000c39
-
-#define REG_A2XX_PA_SC_VIZ_QUERY_STATUS 0x00000c44
-
-#define REG_A2XX_GRAS_DEBUG_CNTL 0x00000c80
-
-#define REG_A2XX_PA_SU_DEBUG_CNTL 0x00000c80
-
-#define REG_A2XX_GRAS_DEBUG_DATA 0x00000c81
-
-#define REG_A2XX_PA_SU_DEBUG_DATA 0x00000c81
-
-#define REG_A2XX_PA_SU_FACE_DATA 0x00000c86
-#define A2XX_PA_SU_FACE_DATA_BASE_ADDR__MASK 0xffffffe0
-#define A2XX_PA_SU_FACE_DATA_BASE_ADDR__SHIFT 5
-static inline uint32_t A2XX_PA_SU_FACE_DATA_BASE_ADDR(uint32_t val)
-{
- return ((val) << A2XX_PA_SU_FACE_DATA_BASE_ADDR__SHIFT) & A2XX_PA_SU_FACE_DATA_BASE_ADDR__MASK;
-}
-
-#define REG_A2XX_SQ_GPR_MANAGEMENT 0x00000d00
-#define A2XX_SQ_GPR_MANAGEMENT_REG_DYNAMIC 0x00000001
-#define A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX__MASK 0x00000ff0
-#define A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX__SHIFT 4
-static inline uint32_t A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX(uint32_t val)
-{
- return ((val) << A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX__SHIFT) & A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX__MASK;
-}
-#define A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX__MASK 0x000ff000
-#define A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX__SHIFT 12
-static inline uint32_t A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX(uint32_t val)
-{
- return ((val) << A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX__SHIFT) & A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX__MASK;
-}
-
-#define REG_A2XX_SQ_FLOW_CONTROL 0x00000d01
-
-#define REG_A2XX_SQ_INST_STORE_MANAGMENT 0x00000d02
-#define A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX__MASK 0x00000fff
-#define A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX__SHIFT 0
-static inline uint32_t A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX(uint32_t val)
-{
- return ((val) << A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX__SHIFT) & A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX__MASK;
-}
-#define A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX__MASK 0x0fff0000
-#define A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX__SHIFT 16
-static inline uint32_t A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX(uint32_t val)
-{
- return ((val) << A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX__SHIFT) & A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX__MASK;
-}
-
-#define REG_A2XX_SQ_DEBUG_MISC 0x00000d05
-
-#define REG_A2XX_SQ_INT_CNTL 0x00000d34
-
-#define REG_A2XX_SQ_INT_STATUS 0x00000d35
-
-#define REG_A2XX_SQ_INT_ACK 0x00000d36
-
-#define REG_A2XX_SQ_DEBUG_INPUT_FSM 0x00000dae
-
-#define REG_A2XX_SQ_DEBUG_CONST_MGR_FSM 0x00000daf
-
-#define REG_A2XX_SQ_DEBUG_TP_FSM 0x00000db0
-
-#define REG_A2XX_SQ_DEBUG_FSM_ALU_0 0x00000db1
-
-#define REG_A2XX_SQ_DEBUG_FSM_ALU_1 0x00000db2
-
-#define REG_A2XX_SQ_DEBUG_EXP_ALLOC 0x00000db3
-
-#define REG_A2XX_SQ_DEBUG_PTR_BUFF 0x00000db4
-
-#define REG_A2XX_SQ_DEBUG_GPR_VTX 0x00000db5
-
-#define REG_A2XX_SQ_DEBUG_GPR_PIX 0x00000db6
-
-#define REG_A2XX_SQ_DEBUG_TB_STATUS_SEL 0x00000db7
-
-#define REG_A2XX_SQ_DEBUG_VTX_TB_0 0x00000db8
-
-#define REG_A2XX_SQ_DEBUG_VTX_TB_1 0x00000db9
-
-#define REG_A2XX_SQ_DEBUG_VTX_TB_STATUS_REG 0x00000dba
-
-#define REG_A2XX_SQ_DEBUG_VTX_TB_STATE_MEM 0x00000dbb
-
-#define REG_A2XX_SQ_DEBUG_PIX_TB_0 0x00000dbc
-
-#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_0 0x00000dbd
-
-#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_1 0x00000dbe
-
-#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_2 0x00000dbf
-
-#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_3 0x00000dc0
-
-#define REG_A2XX_SQ_DEBUG_PIX_TB_STATE_MEM 0x00000dc1
-
-#define REG_A2XX_TC_CNTL_STATUS 0x00000e00
-#define A2XX_TC_CNTL_STATUS_L2_INVALIDATE 0x00000001
-
-#define REG_A2XX_TP0_CHICKEN 0x00000e1e
-
-#define REG_A2XX_RB_BC_CONTROL 0x00000f01
-#define A2XX_RB_BC_CONTROL_ACCUM_LINEAR_MODE_ENABLE 0x00000001
-#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK 0x00000006
-#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT 1
-static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT(uint32_t val)
-{
- return ((val) << A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK;
-}
-#define A2XX_RB_BC_CONTROL_DISABLE_EDRAM_CAM 0x00000008
-#define A2XX_RB_BC_CONTROL_DISABLE_EZ_FAST_CONTEXT_SWITCH 0x00000010
-#define A2XX_RB_BC_CONTROL_DISABLE_EZ_NULL_ZCMD_DROP 0x00000020
-#define A2XX_RB_BC_CONTROL_DISABLE_LZ_NULL_ZCMD_DROP 0x00000040
-#define A2XX_RB_BC_CONTROL_ENABLE_AZ_THROTTLE 0x00000080
-#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK 0x00001f00
-#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT 8
-static inline uint32_t A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT(uint32_t val)
-{
- return ((val) << A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT) & A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK;
-}
-#define A2XX_RB_BC_CONTROL_ENABLE_CRC_UPDATE 0x00004000
-#define A2XX_RB_BC_CONTROL_CRC_MODE 0x00008000
-#define A2XX_RB_BC_CONTROL_DISABLE_SAMPLE_COUNTERS 0x00010000
-#define A2XX_RB_BC_CONTROL_DISABLE_ACCUM 0x00020000
-#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK 0x003c0000
-#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT 18
-static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK(uint32_t val)
-{
- return ((val) << A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK;
-}
-#define A2XX_RB_BC_CONTROL_LINEAR_PERFORMANCE_ENABLE 0x00400000
-#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK 0x07800000
-#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT 23
-static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT(uint32_t val)
-{
- return ((val) << A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK;
-}
-#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK 0x18000000
-#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT 27
-static inline uint32_t A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT(uint32_t val)
-{
- return ((val) << A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK;
-}
-#define A2XX_RB_BC_CONTROL_MEM_EXPORT_LINEAR_MODE_ENABLE 0x20000000
-#define A2XX_RB_BC_CONTROL_CRC_SYSTEM 0x40000000
-#define A2XX_RB_BC_CONTROL_RESERVED6 0x80000000
-
-#define REG_A2XX_RB_EDRAM_INFO 0x00000f02
-
-#define REG_A2XX_RB_DEBUG_CNTL 0x00000f26
-
-#define REG_A2XX_RB_DEBUG_DATA 0x00000f27
-
-#define REG_A2XX_RB_SURFACE_INFO 0x00002000
-#define A2XX_RB_SURFACE_INFO_SURFACE_PITCH__MASK 0x00003fff
-#define A2XX_RB_SURFACE_INFO_SURFACE_PITCH__SHIFT 0
-static inline uint32_t A2XX_RB_SURFACE_INFO_SURFACE_PITCH(uint32_t val)
-{
- return ((val) << A2XX_RB_SURFACE_INFO_SURFACE_PITCH__SHIFT) & A2XX_RB_SURFACE_INFO_SURFACE_PITCH__MASK;
-}
-#define A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__MASK 0x0000c000
-#define A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__SHIFT 14
-static inline uint32_t A2XX_RB_SURFACE_INFO_MSAA_SAMPLES(uint32_t val)
-{
- return ((val) << A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__SHIFT) & A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__MASK;
-}
-
-#define REG_A2XX_RB_COLOR_INFO 0x00002001
-#define A2XX_RB_COLOR_INFO_FORMAT__MASK 0x0000000f
-#define A2XX_RB_COLOR_INFO_FORMAT__SHIFT 0
-static inline uint32_t A2XX_RB_COLOR_INFO_FORMAT(enum a2xx_colorformatx val)
-{
- return ((val) << A2XX_RB_COLOR_INFO_FORMAT__SHIFT) & A2XX_RB_COLOR_INFO_FORMAT__MASK;
-}
-#define A2XX_RB_COLOR_INFO_ROUND_MODE__MASK 0x00000030
-#define A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT 4
-static inline uint32_t A2XX_RB_COLOR_INFO_ROUND_MODE(uint32_t val)
-{
- return ((val) << A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT) & A2XX_RB_COLOR_INFO_ROUND_MODE__MASK;
-}
-#define A2XX_RB_COLOR_INFO_LINEAR 0x00000040
-#define A2XX_RB_COLOR_INFO_ENDIAN__MASK 0x00000180
-#define A2XX_RB_COLOR_INFO_ENDIAN__SHIFT 7
-static inline uint32_t A2XX_RB_COLOR_INFO_ENDIAN(uint32_t val)
-{
- return ((val) << A2XX_RB_COLOR_INFO_ENDIAN__SHIFT) & A2XX_RB_COLOR_INFO_ENDIAN__MASK;
-}
-#define A2XX_RB_COLOR_INFO_SWAP__MASK 0x00000600
-#define A2XX_RB_COLOR_INFO_SWAP__SHIFT 9
-static inline uint32_t A2XX_RB_COLOR_INFO_SWAP(uint32_t val)
-{
- return ((val) << A2XX_RB_COLOR_INFO_SWAP__SHIFT) & A2XX_RB_COLOR_INFO_SWAP__MASK;
-}
-#define A2XX_RB_COLOR_INFO_BASE__MASK 0xfffff000
-#define A2XX_RB_COLOR_INFO_BASE__SHIFT 12
-static inline uint32_t A2XX_RB_COLOR_INFO_BASE(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
-}
-
-#define REG_A2XX_RB_DEPTH_INFO 0x00002002
-#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001
-#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
-static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
-{
- return ((val) << A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
-}
-#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff000
-#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12
-static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
-}
-
-#define REG_A2XX_A225_RB_COLOR_INFO3 0x00002005
-
-#define REG_A2XX_COHER_DEST_BASE_0 0x00002006
-
-#define REG_A2XX_PA_SC_SCREEN_SCISSOR_TL 0x0000200e
-#define A2XX_PA_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
-#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
-#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
-static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
-{
- return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK;
-}
-#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
-#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
-static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
-{
- return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK;
-}
-
-#define REG_A2XX_PA_SC_SCREEN_SCISSOR_BR 0x0000200f
-#define A2XX_PA_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
-#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
-#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
-static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
-{
- return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK;
-}
-#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
-#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
-static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
-{
- return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK;
-}
-
-#define REG_A2XX_PA_SC_WINDOW_OFFSET 0x00002080
-#define A2XX_PA_SC_WINDOW_OFFSET_X__MASK 0x00007fff
-#define A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT 0
-static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_X(int32_t val)
-{
- return ((val) << A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_X__MASK;
-}
-#define A2XX_PA_SC_WINDOW_OFFSET_Y__MASK 0x7fff0000
-#define A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT 16
-static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_Y(int32_t val)
-{
- return ((val) << A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_Y__MASK;
-}
-#define A2XX_PA_SC_WINDOW_OFFSET_DISABLE 0x80000000
-
-#define REG_A2XX_PA_SC_WINDOW_SCISSOR_TL 0x00002081
-#define A2XX_PA_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
-#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
-#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
-static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
-{
- return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK;
-}
-#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
-#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
-static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
-{
- return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK;
-}
-
-#define REG_A2XX_PA_SC_WINDOW_SCISSOR_BR 0x00002082
-#define A2XX_PA_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
-#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
-#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
-static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
-{
- return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK;
-}
-#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
-#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
-static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
-{
- return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK;
-}
-
-#define REG_A2XX_UNKNOWN_2010 0x00002010
-
-#define REG_A2XX_VGT_MAX_VTX_INDX 0x00002100
-
-#define REG_A2XX_VGT_MIN_VTX_INDX 0x00002101
-
-#define REG_A2XX_VGT_INDX_OFFSET 0x00002102
-
-#define REG_A2XX_A225_PC_MULTI_PRIM_IB_RESET_INDX 0x00002103
-
-#define REG_A2XX_RB_COLOR_MASK 0x00002104
-#define A2XX_RB_COLOR_MASK_WRITE_RED 0x00000001
-#define A2XX_RB_COLOR_MASK_WRITE_GREEN 0x00000002
-#define A2XX_RB_COLOR_MASK_WRITE_BLUE 0x00000004
-#define A2XX_RB_COLOR_MASK_WRITE_ALPHA 0x00000008
-
-#define REG_A2XX_RB_BLEND_RED 0x00002105
-
-#define REG_A2XX_RB_BLEND_GREEN 0x00002106
-
-#define REG_A2XX_RB_BLEND_BLUE 0x00002107
-
-#define REG_A2XX_RB_BLEND_ALPHA 0x00002108
-
-#define REG_A2XX_RB_FOG_COLOR 0x00002109
-#define A2XX_RB_FOG_COLOR_FOG_RED__MASK 0x000000ff
-#define A2XX_RB_FOG_COLOR_FOG_RED__SHIFT 0
-static inline uint32_t A2XX_RB_FOG_COLOR_FOG_RED(uint32_t val)
-{
- return ((val) << A2XX_RB_FOG_COLOR_FOG_RED__SHIFT) & A2XX_RB_FOG_COLOR_FOG_RED__MASK;
-}
-#define A2XX_RB_FOG_COLOR_FOG_GREEN__MASK 0x0000ff00
-#define A2XX_RB_FOG_COLOR_FOG_GREEN__SHIFT 8
-static inline uint32_t A2XX_RB_FOG_COLOR_FOG_GREEN(uint32_t val)
-{
- return ((val) << A2XX_RB_FOG_COLOR_FOG_GREEN__SHIFT) & A2XX_RB_FOG_COLOR_FOG_GREEN__MASK;
-}
-#define A2XX_RB_FOG_COLOR_FOG_BLUE__MASK 0x00ff0000
-#define A2XX_RB_FOG_COLOR_FOG_BLUE__SHIFT 16
-static inline uint32_t A2XX_RB_FOG_COLOR_FOG_BLUE(uint32_t val)
-{
- return ((val) << A2XX_RB_FOG_COLOR_FOG_BLUE__SHIFT) & A2XX_RB_FOG_COLOR_FOG_BLUE__MASK;
-}
-
-#define REG_A2XX_RB_STENCILREFMASK_BF 0x0000210c
-#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
-#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
-static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
-{
- return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
-}
-#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
-#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
-static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
-{
- return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
-}
-#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
-#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
-static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
-{
- return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
-}
-
-#define REG_A2XX_RB_STENCILREFMASK 0x0000210d
-#define A2XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
-#define A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
-static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
-{
- return ((val) << A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILREF__MASK;
-}
-#define A2XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
-#define A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
-static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
-{
- return ((val) << A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILMASK__MASK;
-}
-#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
-#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
-static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
-{
- return ((val) << A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
-}
-
-#define REG_A2XX_RB_ALPHA_REF 0x0000210e
-
-#define REG_A2XX_PA_CL_VPORT_XSCALE 0x0000210f
-#define A2XX_PA_CL_VPORT_XSCALE__MASK 0xffffffff
-#define A2XX_PA_CL_VPORT_XSCALE__SHIFT 0
-static inline uint32_t A2XX_PA_CL_VPORT_XSCALE(float val)
-{
- return ((fui(val)) << A2XX_PA_CL_VPORT_XSCALE__SHIFT) & A2XX_PA_CL_VPORT_XSCALE__MASK;
-}
-
-#define REG_A2XX_PA_CL_VPORT_XOFFSET 0x00002110
-#define A2XX_PA_CL_VPORT_XOFFSET__MASK 0xffffffff
-#define A2XX_PA_CL_VPORT_XOFFSET__SHIFT 0
-static inline uint32_t A2XX_PA_CL_VPORT_XOFFSET(float val)
-{
- return ((fui(val)) << A2XX_PA_CL_VPORT_XOFFSET__SHIFT) & A2XX_PA_CL_VPORT_XOFFSET__MASK;
-}
-
-#define REG_A2XX_PA_CL_VPORT_YSCALE 0x00002111
-#define A2XX_PA_CL_VPORT_YSCALE__MASK 0xffffffff
-#define A2XX_PA_CL_VPORT_YSCALE__SHIFT 0
-static inline uint32_t A2XX_PA_CL_VPORT_YSCALE(float val)
-{
- return ((fui(val)) << A2XX_PA_CL_VPORT_YSCALE__SHIFT) & A2XX_PA_CL_VPORT_YSCALE__MASK;
-}
-
-#define REG_A2XX_PA_CL_VPORT_YOFFSET 0x00002112
-#define A2XX_PA_CL_VPORT_YOFFSET__MASK 0xffffffff
-#define A2XX_PA_CL_VPORT_YOFFSET__SHIFT 0
-static inline uint32_t A2XX_PA_CL_VPORT_YOFFSET(float val)
-{
- return ((fui(val)) << A2XX_PA_CL_VPORT_YOFFSET__SHIFT) & A2XX_PA_CL_VPORT_YOFFSET__MASK;
-}
-
-#define REG_A2XX_PA_CL_VPORT_ZSCALE 0x00002113
-#define A2XX_PA_CL_VPORT_ZSCALE__MASK 0xffffffff
-#define A2XX_PA_CL_VPORT_ZSCALE__SHIFT 0
-static inline uint32_t A2XX_PA_CL_VPORT_ZSCALE(float val)
-{
- return ((fui(val)) << A2XX_PA_CL_VPORT_ZSCALE__SHIFT) & A2XX_PA_CL_VPORT_ZSCALE__MASK;
-}
-
-#define REG_A2XX_PA_CL_VPORT_ZOFFSET 0x00002114
-#define A2XX_PA_CL_VPORT_ZOFFSET__MASK 0xffffffff
-#define A2XX_PA_CL_VPORT_ZOFFSET__SHIFT 0
-static inline uint32_t A2XX_PA_CL_VPORT_ZOFFSET(float val)
-{
- return ((fui(val)) << A2XX_PA_CL_VPORT_ZOFFSET__SHIFT) & A2XX_PA_CL_VPORT_ZOFFSET__MASK;
-}
-
-#define REG_A2XX_SQ_PROGRAM_CNTL 0x00002180
-#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK 0x000000ff
-#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT 0
-static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_REGS(uint32_t val)
-{
- return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK;
-}
-#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK 0x0000ff00
-#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT 8
-static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_REGS(uint32_t val)
-{
- return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK;
-}
-#define A2XX_SQ_PROGRAM_CNTL_VS_RESOURCE 0x00010000
-#define A2XX_SQ_PROGRAM_CNTL_PS_RESOURCE 0x00020000
-#define A2XX_SQ_PROGRAM_CNTL_PARAM_GEN 0x00040000
-#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_PIX 0x00080000
-#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK 0x00f00000
-#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT 20
-static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT(uint32_t val)
-{
- return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK;
-}
-#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK 0x07000000
-#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT 24
-static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE(enum a2xx_sq_ps_vtx_mode val)
-{
- return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK;
-}
-#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK 0x78000000
-#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT 27
-static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE(uint32_t val)
-{
- return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK;
-}
-#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_VTX 0x80000000
-
-#define REG_A2XX_SQ_CONTEXT_MISC 0x00002181
-#define A2XX_SQ_CONTEXT_MISC_INST_PRED_OPTIMIZE 0x00000001
-#define A2XX_SQ_CONTEXT_MISC_SC_OUTPUT_SCREEN_XY 0x00000002
-#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK 0x0000000c
-#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT 2
-static inline uint32_t A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL(enum a2xx_sq_sample_cntl val)
-{
- return ((val) << A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT) & A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK;
-}
-#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK 0x0000ff00
-#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT 8
-static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val)
-{
- return ((val) << A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT) & A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK;
-}
-#define A2XX_SQ_CONTEXT_MISC_PERFCOUNTER_REF 0x00010000
-#define A2XX_SQ_CONTEXT_MISC_YEILD_OPTIMIZE 0x00020000
-#define A2XX_SQ_CONTEXT_MISC_TX_CACHE_SEL 0x00040000
-
-#define REG_A2XX_SQ_INTERPOLATOR_CNTL 0x00002182
-#define A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE__MASK 0x0000ffff
-#define A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE__SHIFT 0
-static inline uint32_t A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE(uint32_t val)
-{
- return ((val) << A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE__SHIFT) & A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE__MASK;
-}
-#define A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN__MASK 0xffff0000
-#define A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN__SHIFT 16
-static inline uint32_t A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN(uint32_t val)
-{
- return ((val) << A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN__SHIFT) & A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN__MASK;
-}
-
-#define REG_A2XX_SQ_WRAPPING_0 0x00002183
-#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_0__MASK 0x0000000f
-#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_0__SHIFT 0
-static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_0(uint32_t val)
-{
- return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_0__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_0__MASK;
-}
-#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_1__MASK 0x000000f0
-#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_1__SHIFT 4
-static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_1(uint32_t val)
-{
- return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_1__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_1__MASK;
-}
-#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_2__MASK 0x00000f00
-#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_2__SHIFT 8
-static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_2(uint32_t val)
-{
- return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_2__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_2__MASK;
-}
-#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_3__MASK 0x0000f000
-#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_3__SHIFT 12
-static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_3(uint32_t val)
-{
- return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_3__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_3__MASK;
-}
-#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_4__MASK 0x000f0000
-#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_4__SHIFT 16
-static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_4(uint32_t val)
-{
- return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_4__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_4__MASK;
-}
-#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_5__MASK 0x00f00000
-#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_5__SHIFT 20
-static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_5(uint32_t val)
-{
- return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_5__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_5__MASK;
-}
-#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_6__MASK 0x0f000000
-#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_6__SHIFT 24
-static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_6(uint32_t val)
-{
- return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_6__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_6__MASK;
-}
-#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_7__MASK 0xf0000000
-#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_7__SHIFT 28
-static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_7(uint32_t val)
-{
- return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_7__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_7__MASK;
-}
-
-#define REG_A2XX_SQ_WRAPPING_1 0x00002184
-#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_8__MASK 0x0000000f
-#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_8__SHIFT 0
-static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_8(uint32_t val)
-{
- return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_8__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_8__MASK;
-}
-#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_9__MASK 0x000000f0
-#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_9__SHIFT 4
-static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_9(uint32_t val)
-{
- return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_9__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_9__MASK;
-}
-#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_10__MASK 0x00000f00
-#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_10__SHIFT 8
-static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_10(uint32_t val)
-{
- return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_10__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_10__MASK;
-}
-#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_11__MASK 0x0000f000
-#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_11__SHIFT 12
-static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_11(uint32_t val)
-{
- return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_11__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_11__MASK;
-}
-#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_12__MASK 0x000f0000
-#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_12__SHIFT 16
-static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_12(uint32_t val)
-{
- return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_12__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_12__MASK;
-}
-#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_13__MASK 0x00f00000
-#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_13__SHIFT 20
-static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_13(uint32_t val)
-{
- return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_13__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_13__MASK;
-}
-#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_14__MASK 0x0f000000
-#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_14__SHIFT 24
-static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_14(uint32_t val)
-{
- return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_14__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_14__MASK;
-}
-#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_15__MASK 0xf0000000
-#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_15__SHIFT 28
-static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_15(uint32_t val)
-{
- return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_15__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_15__MASK;
-}
-
-#define REG_A2XX_SQ_PS_PROGRAM 0x000021f6
-#define A2XX_SQ_PS_PROGRAM_BASE__MASK 0x00000fff
-#define A2XX_SQ_PS_PROGRAM_BASE__SHIFT 0
-static inline uint32_t A2XX_SQ_PS_PROGRAM_BASE(uint32_t val)
-{
- return ((val) << A2XX_SQ_PS_PROGRAM_BASE__SHIFT) & A2XX_SQ_PS_PROGRAM_BASE__MASK;
-}
-#define A2XX_SQ_PS_PROGRAM_SIZE__MASK 0x00fff000
-#define A2XX_SQ_PS_PROGRAM_SIZE__SHIFT 12
-static inline uint32_t A2XX_SQ_PS_PROGRAM_SIZE(uint32_t val)
-{
- return ((val) << A2XX_SQ_PS_PROGRAM_SIZE__SHIFT) & A2XX_SQ_PS_PROGRAM_SIZE__MASK;
-}
-
-#define REG_A2XX_SQ_VS_PROGRAM 0x000021f7
-#define A2XX_SQ_VS_PROGRAM_BASE__MASK 0x00000fff
-#define A2XX_SQ_VS_PROGRAM_BASE__SHIFT 0
-static inline uint32_t A2XX_SQ_VS_PROGRAM_BASE(uint32_t val)
-{
- return ((val) << A2XX_SQ_VS_PROGRAM_BASE__SHIFT) & A2XX_SQ_VS_PROGRAM_BASE__MASK;
-}
-#define A2XX_SQ_VS_PROGRAM_SIZE__MASK 0x00fff000
-#define A2XX_SQ_VS_PROGRAM_SIZE__SHIFT 12
-static inline uint32_t A2XX_SQ_VS_PROGRAM_SIZE(uint32_t val)
-{
- return ((val) << A2XX_SQ_VS_PROGRAM_SIZE__SHIFT) & A2XX_SQ_VS_PROGRAM_SIZE__MASK;
-}
-
-#define REG_A2XX_VGT_EVENT_INITIATOR 0x000021f9
-
-#define REG_A2XX_VGT_DRAW_INITIATOR 0x000021fc
-#define A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f
-#define A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0
-static inline uint32_t A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val)
-{
- return ((val) << A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK;
-}
-#define A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0
-#define A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6
-static inline uint32_t A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val)
-{
- return ((val) << A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK;
-}
-#define A2XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK 0x00000600
-#define A2XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT 9
-static inline uint32_t A2XX_VGT_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val)
-{
- return ((val) << A2XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT) & A2XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK;
-}
-#define A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000800
-#define A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT 11
-static inline uint32_t A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size val)
-{
- return ((val) << A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK;
-}
-#define A2XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000
-#define A2XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000
-#define A2XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000
-#define A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK 0xff000000
-#define A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT 24
-static inline uint32_t A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val)
-{
- return ((val) << A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT) & A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK;
-}
-
-#define REG_A2XX_VGT_IMMED_DATA 0x000021fd
-
-#define REG_A2XX_RB_DEPTHCONTROL 0x00002200
-#define A2XX_RB_DEPTHCONTROL_STENCIL_ENABLE 0x00000001
-#define A2XX_RB_DEPTHCONTROL_Z_ENABLE 0x00000002
-#define A2XX_RB_DEPTHCONTROL_Z_WRITE_ENABLE 0x00000004
-#define A2XX_RB_DEPTHCONTROL_EARLY_Z_ENABLE 0x00000008
-#define A2XX_RB_DEPTHCONTROL_ZFUNC__MASK 0x00000070
-#define A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT 4
-static inline uint32_t A2XX_RB_DEPTHCONTROL_ZFUNC(enum adreno_compare_func val)
-{
- return ((val) << A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_ZFUNC__MASK;
-}
-#define A2XX_RB_DEPTHCONTROL_BACKFACE_ENABLE 0x00000080
-#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK 0x00000700
-#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT 8
-static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC(enum adreno_compare_func val)
-{
- return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK;
-}
-#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK 0x00003800
-#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT 11
-static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL(enum adreno_stencil_op val)
-{
- return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK;
-}
-#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK 0x0001c000
-#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT 14
-static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS(enum adreno_stencil_op val)
-{
- return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK;
-}
-#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK 0x000e0000
-#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT 17
-static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL(enum adreno_stencil_op val)
-{
- return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK;
-}
-#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK 0x00700000
-#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT 20
-static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF(enum adreno_compare_func val)
-{
- return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK;
-}
-#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK 0x03800000
-#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT 23
-static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF(enum adreno_stencil_op val)
-{
- return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK;
-}
-#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK 0x1c000000
-#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT 26
-static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF(enum adreno_stencil_op val)
-{
- return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK;
-}
-#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK 0xe0000000
-#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT 29
-static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF(enum adreno_stencil_op val)
-{
- return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK;
-}
-
-#define REG_A2XX_RB_BLEND_CONTROL 0x00002201
-#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK 0x0000001f
-#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT 0
-static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND(enum adreno_rb_blend_factor val)
-{
- return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK;
-}
-#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK 0x000000e0
-#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT 5
-static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN(enum a2xx_rb_blend_opcode val)
-{
- return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK;
-}
-#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK 0x00001f00
-#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT 8
-static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND(enum adreno_rb_blend_factor val)
-{
- return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK;
-}
-#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK 0x001f0000
-#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT 16
-static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND(enum adreno_rb_blend_factor val)
-{
- return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK;
-}
-#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK 0x00e00000
-#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT 21
-static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN(enum a2xx_rb_blend_opcode val)
-{
- return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK;
-}
-#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK 0x1f000000
-#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT 24
-static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND(enum adreno_rb_blend_factor val)
-{
- return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK;
-}
-#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE_ENABLE 0x20000000
-#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE 0x40000000
-
-#define REG_A2XX_RB_COLORCONTROL 0x00002202
-#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK 0x00000007
-#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT 0
-static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_FUNC(enum adreno_compare_func val)
-{
- return ((val) << A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK;
-}
-#define A2XX_RB_COLORCONTROL_ALPHA_TEST_ENABLE 0x00000008
-#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_ENABLE 0x00000010
-#define A2XX_RB_COLORCONTROL_BLEND_DISABLE 0x00000020
-#define A2XX_RB_COLORCONTROL_VOB_ENABLE 0x00000040
-#define A2XX_RB_COLORCONTROL_VS_EXPORTS_FOG 0x00000080
-#define A2XX_RB_COLORCONTROL_ROP_CODE__MASK 0x00000f00
-#define A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT 8
-static inline uint32_t A2XX_RB_COLORCONTROL_ROP_CODE(uint32_t val)
-{
- return ((val) << A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT) & A2XX_RB_COLORCONTROL_ROP_CODE__MASK;
-}
-#define A2XX_RB_COLORCONTROL_DITHER_MODE__MASK 0x00003000
-#define A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT 12
-static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_MODE(enum adreno_rb_dither_mode val)
-{
- return ((val) << A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_MODE__MASK;
-}
-#define A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK 0x0000c000
-#define A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT 14
-static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_TYPE(enum a2xx_rb_dither_type val)
-{
- return ((val) << A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK;
-}
-#define A2XX_RB_COLORCONTROL_PIXEL_FOG 0x00010000
-#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK 0x03000000
-#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT 24
-static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0(uint32_t val)
-{
- return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK;
-}
-#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK 0x0c000000
-#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT 26
-static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1(uint32_t val)
-{
- return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK;
-}
-#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK 0x30000000
-#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT 28
-static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2(uint32_t val)
-{
- return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK;
-}
-#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK 0xc0000000
-#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT 30
-static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3(uint32_t val)
-{
- return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK;
-}
-
-#define REG_A2XX_VGT_CURRENT_BIN_ID_MAX 0x00002203
-#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK 0x00000007
-#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT 0
-static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN(uint32_t val)
-{
- return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK;
-}
-#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK 0x00000038
-#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT 3
-static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_ROW(uint32_t val)
-{
- return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK;
-}
-#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK 0x000001c0
-#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT 6
-static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK(uint32_t val)
-{
- return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK;
-}
-
-#define REG_A2XX_PA_CL_CLIP_CNTL 0x00002204
-#define A2XX_PA_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
-#define A2XX_PA_CL_CLIP_CNTL_BOUNDARY_EDGE_FLAG_ENA 0x00040000
-#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK 0x00080000
-#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT 19
-static inline uint32_t A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF(enum a2xx_dx_clip_space val)
-{
- return ((val) << A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT) & A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK;
-}
-#define A2XX_PA_CL_CLIP_CNTL_DIS_CLIP_ERR_DETECT 0x00100000
-#define A2XX_PA_CL_CLIP_CNTL_VTX_KILL_OR 0x00200000
-#define A2XX_PA_CL_CLIP_CNTL_XY_NAN_RETAIN 0x00400000
-#define A2XX_PA_CL_CLIP_CNTL_Z_NAN_RETAIN 0x00800000
-#define A2XX_PA_CL_CLIP_CNTL_W_NAN_RETAIN 0x01000000
-
-#define REG_A2XX_PA_SU_SC_MODE_CNTL 0x00002205
-#define A2XX_PA_SU_SC_MODE_CNTL_CULL_FRONT 0x00000001
-#define A2XX_PA_SU_SC_MODE_CNTL_CULL_BACK 0x00000002
-#define A2XX_PA_SU_SC_MODE_CNTL_FACE 0x00000004
-#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK 0x00000018
-#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT 3
-static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_POLYMODE(enum a2xx_pa_su_sc_polymode val)
-{
- return ((val) << A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK;
-}
-#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK 0x000000e0
-#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT 5
-static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
-{
- return ((val) << A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK;
-}
-#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK 0x00000700
-#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT 8
-static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
-{
- return ((val) << A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK;
-}
-#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_FRONT_ENABLE 0x00000800
-#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_BACK_ENABLE 0x00001000
-#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_PARA_ENABLE 0x00002000
-#define A2XX_PA_SU_SC_MODE_CNTL_MSAA_ENABLE 0x00008000
-#define A2XX_PA_SU_SC_MODE_CNTL_VTX_WINDOW_OFFSET_ENABLE 0x00010000
-#define A2XX_PA_SU_SC_MODE_CNTL_LINE_STIPPLE_ENABLE 0x00040000
-#define A2XX_PA_SU_SC_MODE_CNTL_PROVOKING_VTX_LAST 0x00080000
-#define A2XX_PA_SU_SC_MODE_CNTL_PERSP_CORR_DIS 0x00100000
-#define A2XX_PA_SU_SC_MODE_CNTL_MULTI_PRIM_IB_ENA 0x00200000
-#define A2XX_PA_SU_SC_MODE_CNTL_QUAD_ORDER_ENABLE 0x00800000
-#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_ALL_TRI 0x02000000
-#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_FIRST_TRI_NEW_STATE 0x04000000
-#define A2XX_PA_SU_SC_MODE_CNTL_CLAMPED_FACENESS 0x10000000
-#define A2XX_PA_SU_SC_MODE_CNTL_ZERO_AREA_FACENESS 0x20000000
-#define A2XX_PA_SU_SC_MODE_CNTL_FACE_KILL_ENABLE 0x40000000
-#define A2XX_PA_SU_SC_MODE_CNTL_FACE_WRITE_ENABLE 0x80000000
-
-#define REG_A2XX_PA_CL_VTE_CNTL 0x00002206
-#define A2XX_PA_CL_VTE_CNTL_VPORT_X_SCALE_ENA 0x00000001
-#define A2XX_PA_CL_VTE_CNTL_VPORT_X_OFFSET_ENA 0x00000002
-#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_SCALE_ENA 0x00000004
-#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_OFFSET_ENA 0x00000008
-#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_SCALE_ENA 0x00000010
-#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_OFFSET_ENA 0x00000020
-#define A2XX_PA_CL_VTE_CNTL_VTX_XY_FMT 0x00000100
-#define A2XX_PA_CL_VTE_CNTL_VTX_Z_FMT 0x00000200
-#define A2XX_PA_CL_VTE_CNTL_VTX_W0_FMT 0x00000400
-#define A2XX_PA_CL_VTE_CNTL_PERFCOUNTER_REF 0x00000800
-
-#define REG_A2XX_VGT_CURRENT_BIN_ID_MIN 0x00002207
-#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK 0x00000007
-#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT 0
-static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN(uint32_t val)
-{
- return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK;
-}
-#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK 0x00000038
-#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT 3
-static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_ROW(uint32_t val)
-{
- return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK;
-}
-#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK 0x000001c0
-#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT 6
-static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK(uint32_t val)
-{
- return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK;
-}
-
-#define REG_A2XX_RB_MODECONTROL 0x00002208
-#define A2XX_RB_MODECONTROL_EDRAM_MODE__MASK 0x00000007
-#define A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT 0
-static inline uint32_t A2XX_RB_MODECONTROL_EDRAM_MODE(enum a2xx_rb_edram_mode val)
-{
- return ((val) << A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT) & A2XX_RB_MODECONTROL_EDRAM_MODE__MASK;
-}
-
-#define REG_A2XX_A220_RB_LRZ_VSC_CONTROL 0x00002209
-
-#define REG_A2XX_RB_SAMPLE_POS 0x0000220a
-
-#define REG_A2XX_CLEAR_COLOR 0x0000220b
-#define A2XX_CLEAR_COLOR_RED__MASK 0x000000ff
-#define A2XX_CLEAR_COLOR_RED__SHIFT 0
-static inline uint32_t A2XX_CLEAR_COLOR_RED(uint32_t val)
-{
- return ((val) << A2XX_CLEAR_COLOR_RED__SHIFT) & A2XX_CLEAR_COLOR_RED__MASK;
-}
-#define A2XX_CLEAR_COLOR_GREEN__MASK 0x0000ff00
-#define A2XX_CLEAR_COLOR_GREEN__SHIFT 8
-static inline uint32_t A2XX_CLEAR_COLOR_GREEN(uint32_t val)
-{
- return ((val) << A2XX_CLEAR_COLOR_GREEN__SHIFT) & A2XX_CLEAR_COLOR_GREEN__MASK;
-}
-#define A2XX_CLEAR_COLOR_BLUE__MASK 0x00ff0000
-#define A2XX_CLEAR_COLOR_BLUE__SHIFT 16
-static inline uint32_t A2XX_CLEAR_COLOR_BLUE(uint32_t val)
-{
- return ((val) << A2XX_CLEAR_COLOR_BLUE__SHIFT) & A2XX_CLEAR_COLOR_BLUE__MASK;
-}
-#define A2XX_CLEAR_COLOR_ALPHA__MASK 0xff000000
-#define A2XX_CLEAR_COLOR_ALPHA__SHIFT 24
-static inline uint32_t A2XX_CLEAR_COLOR_ALPHA(uint32_t val)
-{
- return ((val) << A2XX_CLEAR_COLOR_ALPHA__SHIFT) & A2XX_CLEAR_COLOR_ALPHA__MASK;
-}
-
-#define REG_A2XX_A220_GRAS_CONTROL 0x00002210
-
-#define REG_A2XX_PA_SU_POINT_SIZE 0x00002280
-#define A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK 0x0000ffff
-#define A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT 0
-static inline uint32_t A2XX_PA_SU_POINT_SIZE_HEIGHT(float val)
-{
- return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT) & A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK;
-}
-#define A2XX_PA_SU_POINT_SIZE_WIDTH__MASK 0xffff0000
-#define A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT 16
-static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val)
-{
- return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT) & A2XX_PA_SU_POINT_SIZE_WIDTH__MASK;
-}
-
-#define REG_A2XX_PA_SU_POINT_MINMAX 0x00002281
-#define A2XX_PA_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
-#define A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT 0
-static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MIN(float val)
-{
- return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MIN__MASK;
-}
-#define A2XX_PA_SU_POINT_MINMAX_MAX__MASK 0xffff0000
-#define A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT 16
-static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val)
-{
- return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MAX__MASK;
-}
-
-#define REG_A2XX_PA_SU_LINE_CNTL 0x00002282
-#define A2XX_PA_SU_LINE_CNTL_WIDTH__MASK 0x0000ffff
-#define A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT 0
-static inline uint32_t A2XX_PA_SU_LINE_CNTL_WIDTH(float val)
-{
- return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT) & A2XX_PA_SU_LINE_CNTL_WIDTH__MASK;
-}
-
-#define REG_A2XX_PA_SC_LINE_STIPPLE 0x00002283
-#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK 0x0000ffff
-#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT 0
-static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN(uint32_t val)
-{
- return ((val) << A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK;
-}
-#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK 0x00ff0000
-#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT 16
-static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT(uint32_t val)
-{
- return ((val) << A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK;
-}
-#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK 0x10000000
-#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT 28
-static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER(enum a2xx_pa_sc_pattern_bit_order val)
-{
- return ((val) << A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK;
-}
-#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK 0x60000000
-#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT 29
-static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL(enum a2xx_pa_sc_auto_reset_cntl val)
-{
- return ((val) << A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK;
-}
-
-#define REG_A2XX_PA_SC_VIZ_QUERY 0x00002293
-#define A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ENA 0x00000001
-#define A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID__MASK 0x0000007e
-#define A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID__SHIFT 1
-static inline uint32_t A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID(uint32_t val)
-{
- return ((val) << A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID__SHIFT) & A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID__MASK;
-}
-#define A2XX_PA_SC_VIZ_QUERY_KILL_PIX_POST_EARLY_Z 0x00000100
-
-#define REG_A2XX_VGT_ENHANCE 0x00002294
-
-#define REG_A2XX_PA_SC_LINE_CNTL 0x00002300
-#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK 0x0000ffff
-#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT 0
-static inline uint32_t A2XX_PA_SC_LINE_CNTL_BRES_CNTL(uint32_t val)
-{
- return ((val) << A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT) & A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK;
-}
-#define A2XX_PA_SC_LINE_CNTL_USE_BRES_CNTL 0x00000100
-#define A2XX_PA_SC_LINE_CNTL_EXPAND_LINE_WIDTH 0x00000200
-#define A2XX_PA_SC_LINE_CNTL_LAST_PIXEL 0x00000400
-
-#define REG_A2XX_PA_SC_AA_CONFIG 0x00002301
-#define A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES__MASK 0x00000007
-#define A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES__SHIFT 0
-static inline uint32_t A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES(uint32_t val)
-{
- return ((val) << A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES__SHIFT) & A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES__MASK;
-}
-#define A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST__MASK 0x0001e000
-#define A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST__SHIFT 13
-static inline uint32_t A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST(uint32_t val)
-{
- return ((val) << A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST__SHIFT) & A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST__MASK;
-}
-
-#define REG_A2XX_PA_SU_VTX_CNTL 0x00002302
-#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK 0x00000001
-#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT 0
-static inline uint32_t A2XX_PA_SU_VTX_CNTL_PIX_CENTER(enum a2xx_pa_pixcenter val)
-{
- return ((val) << A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT) & A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK;
-}
-#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK 0x00000006
-#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT 1
-static inline uint32_t A2XX_PA_SU_VTX_CNTL_ROUND_MODE(enum a2xx_pa_roundmode val)
-{
- return ((val) << A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK;
-}
-#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK 0x00000380
-#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT 7
-static inline uint32_t A2XX_PA_SU_VTX_CNTL_QUANT_MODE(enum a2xx_pa_quantmode val)
-{
- return ((val) << A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK;
-}
-
-#define REG_A2XX_PA_CL_GB_VERT_CLIP_ADJ 0x00002303
-#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK 0xffffffff
-#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT 0
-static inline uint32_t A2XX_PA_CL_GB_VERT_CLIP_ADJ(float val)
-{
- return ((fui(val)) << A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK;
-}
-
-#define REG_A2XX_PA_CL_GB_VERT_DISC_ADJ 0x00002304
-#define A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK 0xffffffff
-#define A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT 0
-static inline uint32_t A2XX_PA_CL_GB_VERT_DISC_ADJ(float val)
-{
- return ((fui(val)) << A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK;
-}
-
-#define REG_A2XX_PA_CL_GB_HORZ_CLIP_ADJ 0x00002305
-#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK 0xffffffff
-#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT 0
-static inline uint32_t A2XX_PA_CL_GB_HORZ_CLIP_ADJ(float val)
-{
- return ((fui(val)) << A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK;
-}
-
-#define REG_A2XX_PA_CL_GB_HORZ_DISC_ADJ 0x00002306
-#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK 0xffffffff
-#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT 0
-static inline uint32_t A2XX_PA_CL_GB_HORZ_DISC_ADJ(float val)
-{
- return ((fui(val)) << A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK;
-}
-
-#define REG_A2XX_SQ_VS_CONST 0x00002307
-#define A2XX_SQ_VS_CONST_BASE__MASK 0x000001ff
-#define A2XX_SQ_VS_CONST_BASE__SHIFT 0
-static inline uint32_t A2XX_SQ_VS_CONST_BASE(uint32_t val)
-{
- return ((val) << A2XX_SQ_VS_CONST_BASE__SHIFT) & A2XX_SQ_VS_CONST_BASE__MASK;
-}
-#define A2XX_SQ_VS_CONST_SIZE__MASK 0x001ff000
-#define A2XX_SQ_VS_CONST_SIZE__SHIFT 12
-static inline uint32_t A2XX_SQ_VS_CONST_SIZE(uint32_t val)
-{
- return ((val) << A2XX_SQ_VS_CONST_SIZE__SHIFT) & A2XX_SQ_VS_CONST_SIZE__MASK;
-}
-
-#define REG_A2XX_SQ_PS_CONST 0x00002308
-#define A2XX_SQ_PS_CONST_BASE__MASK 0x000001ff
-#define A2XX_SQ_PS_CONST_BASE__SHIFT 0
-static inline uint32_t A2XX_SQ_PS_CONST_BASE(uint32_t val)
-{
- return ((val) << A2XX_SQ_PS_CONST_BASE__SHIFT) & A2XX_SQ_PS_CONST_BASE__MASK;
-}
-#define A2XX_SQ_PS_CONST_SIZE__MASK 0x001ff000
-#define A2XX_SQ_PS_CONST_SIZE__SHIFT 12
-static inline uint32_t A2XX_SQ_PS_CONST_SIZE(uint32_t val)
-{
- return ((val) << A2XX_SQ_PS_CONST_SIZE__SHIFT) & A2XX_SQ_PS_CONST_SIZE__MASK;
-}
-
-#define REG_A2XX_SQ_DEBUG_MISC_0 0x00002309
-
-#define REG_A2XX_SQ_DEBUG_MISC_1 0x0000230a
-
-#define REG_A2XX_PA_SC_AA_MASK 0x00002312
-
-#define REG_A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL 0x00002316
-#define A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH__MASK 0x00000007
-#define A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH__SHIFT 0
-static inline uint32_t A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH(uint32_t val)
-{
- return ((val) << A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH__SHIFT) & A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH__MASK;
-}
-
-#define REG_A2XX_VGT_OUT_DEALLOC_CNTL 0x00002317
-#define A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST__MASK 0x00000003
-#define A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST__SHIFT 0
-static inline uint32_t A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST(uint32_t val)
-{
- return ((val) << A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST__SHIFT) & A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST__MASK;
-}
-
-#define REG_A2XX_RB_COPY_CONTROL 0x00002318
-#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK 0x00000007
-#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT 0
-static inline uint32_t A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT(enum a2xx_rb_copy_sample_select val)
-{
- return ((val) << A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT) & A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK;
-}
-#define A2XX_RB_COPY_CONTROL_DEPTH_CLEAR_ENABLE 0x00000008
-#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK 0x000000f0
-#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT 4
-static inline uint32_t A2XX_RB_COPY_CONTROL_CLEAR_MASK(uint32_t val)
-{
- return ((val) << A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT) & A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK;
-}
-
-#define REG_A2XX_RB_COPY_DEST_BASE 0x00002319
-
-#define REG_A2XX_RB_COPY_DEST_PITCH 0x0000231a
-#define A2XX_RB_COPY_DEST_PITCH__MASK 0xffffffff
-#define A2XX_RB_COPY_DEST_PITCH__SHIFT 0
-static inline uint32_t A2XX_RB_COPY_DEST_PITCH(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A2XX_RB_COPY_DEST_PITCH__SHIFT) & A2XX_RB_COPY_DEST_PITCH__MASK;
-}
-
-#define REG_A2XX_RB_COPY_DEST_INFO 0x0000231b
-#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK 0x00000007
-#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT 0
-static inline uint32_t A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN(enum adreno_rb_surface_endian val)
-{
- return ((val) << A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT) & A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK;
-}
-#define A2XX_RB_COPY_DEST_INFO_LINEAR 0x00000008
-#define A2XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000f0
-#define A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 4
-static inline uint32_t A2XX_RB_COPY_DEST_INFO_FORMAT(enum a2xx_colorformatx val)
-{
- return ((val) << A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A2XX_RB_COPY_DEST_INFO_FORMAT__MASK;
-}
-#define A2XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
-#define A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
-static inline uint32_t A2XX_RB_COPY_DEST_INFO_SWAP(uint32_t val)
-{
- return ((val) << A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A2XX_RB_COPY_DEST_INFO_SWAP__MASK;
-}
-#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00
-#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10
-static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
-{
- return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK;
-}
-#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK 0x00003000
-#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT 12
-static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_TYPE(enum a2xx_rb_dither_type val)
-{
- return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK;
-}
-#define A2XX_RB_COPY_DEST_INFO_WRITE_RED 0x00004000
-#define A2XX_RB_COPY_DEST_INFO_WRITE_GREEN 0x00008000
-#define A2XX_RB_COPY_DEST_INFO_WRITE_BLUE 0x00010000
-#define A2XX_RB_COPY_DEST_INFO_WRITE_ALPHA 0x00020000
-
-#define REG_A2XX_RB_COPY_DEST_OFFSET 0x0000231c
-#define A2XX_RB_COPY_DEST_OFFSET_X__MASK 0x00001fff
-#define A2XX_RB_COPY_DEST_OFFSET_X__SHIFT 0
-static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_X(uint32_t val)
-{
- return ((val) << A2XX_RB_COPY_DEST_OFFSET_X__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_X__MASK;
-}
-#define A2XX_RB_COPY_DEST_OFFSET_Y__MASK 0x03ffe000
-#define A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT 13
-static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_Y(uint32_t val)
-{
- return ((val) << A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_Y__MASK;
-}
-
-#define REG_A2XX_RB_DEPTH_CLEAR 0x0000231d
-
-#define REG_A2XX_RB_SAMPLE_COUNT_CTL 0x00002324
-
-#define REG_A2XX_RB_COLOR_DEST_MASK 0x00002326
-
-#define REG_A2XX_A225_GRAS_UCP0X 0x00002340
-
-#define REG_A2XX_A225_GRAS_UCP5W 0x00002357
-
-#define REG_A2XX_A225_GRAS_UCP_ENABLED 0x00002360
-
-#define REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE 0x00002380
-
-#define REG_A2XX_PA_SU_POLY_OFFSET_FRONT_OFFSET 0x00002381
-
-#define REG_A2XX_PA_SU_POLY_OFFSET_BACK_SCALE 0x00002382
-
-#define REG_A2XX_PA_SU_POLY_OFFSET_BACK_OFFSET 0x00002383
-
-#define REG_A2XX_SQ_CONSTANT_0 0x00004000
-
-#define REG_A2XX_SQ_FETCH_0 0x00004800
-
-#define REG_A2XX_SQ_CF_BOOLEANS 0x00004900
-
-#define REG_A2XX_SQ_CF_LOOP 0x00004908
-
-#define REG_A2XX_COHER_SIZE_PM4 0x00000a29
-
-#define REG_A2XX_COHER_BASE_PM4 0x00000a2a
-
-#define REG_A2XX_COHER_STATUS_PM4 0x00000a2b
-
-#define REG_A2XX_PA_SU_PERFCOUNTER0_SELECT 0x00000c88
-
-#define REG_A2XX_PA_SU_PERFCOUNTER1_SELECT 0x00000c89
-
-#define REG_A2XX_PA_SU_PERFCOUNTER2_SELECT 0x00000c8a
-
-#define REG_A2XX_PA_SU_PERFCOUNTER3_SELECT 0x00000c8b
-
-#define REG_A2XX_PA_SU_PERFCOUNTER0_LOW 0x00000c8c
-
-#define REG_A2XX_PA_SU_PERFCOUNTER0_HI 0x00000c8d
-
-#define REG_A2XX_PA_SU_PERFCOUNTER1_LOW 0x00000c8e
-
-#define REG_A2XX_PA_SU_PERFCOUNTER1_HI 0x00000c8f
-
-#define REG_A2XX_PA_SU_PERFCOUNTER2_LOW 0x00000c90
-
-#define REG_A2XX_PA_SU_PERFCOUNTER2_HI 0x00000c91
-
-#define REG_A2XX_PA_SU_PERFCOUNTER3_LOW 0x00000c92
-
-#define REG_A2XX_PA_SU_PERFCOUNTER3_HI 0x00000c93
-
-#define REG_A2XX_PA_SC_PERFCOUNTER0_SELECT 0x00000c98
-
-#define REG_A2XX_PA_SC_PERFCOUNTER0_LOW 0x00000c99
-
-#define REG_A2XX_PA_SC_PERFCOUNTER0_HI 0x00000c9a
-
-#define REG_A2XX_VGT_PERFCOUNTER0_SELECT 0x00000c48
-
-#define REG_A2XX_VGT_PERFCOUNTER1_SELECT 0x00000c49
-
-#define REG_A2XX_VGT_PERFCOUNTER2_SELECT 0x00000c4a
-
-#define REG_A2XX_VGT_PERFCOUNTER3_SELECT 0x00000c4b
-
-#define REG_A2XX_VGT_PERFCOUNTER0_LOW 0x00000c4c
-
-#define REG_A2XX_VGT_PERFCOUNTER1_LOW 0x00000c4e
-
-#define REG_A2XX_VGT_PERFCOUNTER2_LOW 0x00000c50
-
-#define REG_A2XX_VGT_PERFCOUNTER3_LOW 0x00000c52
-
-#define REG_A2XX_VGT_PERFCOUNTER0_HI 0x00000c4d
-
-#define REG_A2XX_VGT_PERFCOUNTER1_HI 0x00000c4f
-
-#define REG_A2XX_VGT_PERFCOUNTER2_HI 0x00000c51
-
-#define REG_A2XX_VGT_PERFCOUNTER3_HI 0x00000c53
-
-#define REG_A2XX_TCR_PERFCOUNTER0_SELECT 0x00000e05
-
-#define REG_A2XX_TCR_PERFCOUNTER1_SELECT 0x00000e08
-
-#define REG_A2XX_TCR_PERFCOUNTER0_HI 0x00000e06
-
-#define REG_A2XX_TCR_PERFCOUNTER1_HI 0x00000e09
-
-#define REG_A2XX_TCR_PERFCOUNTER0_LOW 0x00000e07
-
-#define REG_A2XX_TCR_PERFCOUNTER1_LOW 0x00000e0a
-
-#define REG_A2XX_TP0_PERFCOUNTER0_SELECT 0x00000e1f
-
-#define REG_A2XX_TP0_PERFCOUNTER0_HI 0x00000e20
-
-#define REG_A2XX_TP0_PERFCOUNTER0_LOW 0x00000e21
-
-#define REG_A2XX_TP0_PERFCOUNTER1_SELECT 0x00000e22
-
-#define REG_A2XX_TP0_PERFCOUNTER1_HI 0x00000e23
-
-#define REG_A2XX_TP0_PERFCOUNTER1_LOW 0x00000e24
-
-#define REG_A2XX_TCM_PERFCOUNTER0_SELECT 0x00000e54
-
-#define REG_A2XX_TCM_PERFCOUNTER1_SELECT 0x00000e57
-
-#define REG_A2XX_TCM_PERFCOUNTER0_HI 0x00000e55
-
-#define REG_A2XX_TCM_PERFCOUNTER1_HI 0x00000e58
-
-#define REG_A2XX_TCM_PERFCOUNTER0_LOW 0x00000e56
-
-#define REG_A2XX_TCM_PERFCOUNTER1_LOW 0x00000e59
-
-#define REG_A2XX_TCF_PERFCOUNTER0_SELECT 0x00000e5a
-
-#define REG_A2XX_TCF_PERFCOUNTER1_SELECT 0x00000e5d
-
-#define REG_A2XX_TCF_PERFCOUNTER2_SELECT 0x00000e60
-
-#define REG_A2XX_TCF_PERFCOUNTER3_SELECT 0x00000e63
-
-#define REG_A2XX_TCF_PERFCOUNTER4_SELECT 0x00000e66
-
-#define REG_A2XX_TCF_PERFCOUNTER5_SELECT 0x00000e69
-
-#define REG_A2XX_TCF_PERFCOUNTER6_SELECT 0x00000e6c
-
-#define REG_A2XX_TCF_PERFCOUNTER7_SELECT 0x00000e6f
-
-#define REG_A2XX_TCF_PERFCOUNTER8_SELECT 0x00000e72
-
-#define REG_A2XX_TCF_PERFCOUNTER9_SELECT 0x00000e75
-
-#define REG_A2XX_TCF_PERFCOUNTER10_SELECT 0x00000e78
-
-#define REG_A2XX_TCF_PERFCOUNTER11_SELECT 0x00000e7b
-
-#define REG_A2XX_TCF_PERFCOUNTER0_HI 0x00000e5b
-
-#define REG_A2XX_TCF_PERFCOUNTER1_HI 0x00000e5e
-
-#define REG_A2XX_TCF_PERFCOUNTER2_HI 0x00000e61
-
-#define REG_A2XX_TCF_PERFCOUNTER3_HI 0x00000e64
-
-#define REG_A2XX_TCF_PERFCOUNTER4_HI 0x00000e67
-
-#define REG_A2XX_TCF_PERFCOUNTER5_HI 0x00000e6a
-
-#define REG_A2XX_TCF_PERFCOUNTER6_HI 0x00000e6d
-
-#define REG_A2XX_TCF_PERFCOUNTER7_HI 0x00000e70
-
-#define REG_A2XX_TCF_PERFCOUNTER8_HI 0x00000e73
-
-#define REG_A2XX_TCF_PERFCOUNTER9_HI 0x00000e76
-
-#define REG_A2XX_TCF_PERFCOUNTER10_HI 0x00000e79
-
-#define REG_A2XX_TCF_PERFCOUNTER11_HI 0x00000e7c
-
-#define REG_A2XX_TCF_PERFCOUNTER0_LOW 0x00000e5c
-
-#define REG_A2XX_TCF_PERFCOUNTER1_LOW 0x00000e5f
-
-#define REG_A2XX_TCF_PERFCOUNTER2_LOW 0x00000e62
-
-#define REG_A2XX_TCF_PERFCOUNTER3_LOW 0x00000e65
-
-#define REG_A2XX_TCF_PERFCOUNTER4_LOW 0x00000e68
-
-#define REG_A2XX_TCF_PERFCOUNTER5_LOW 0x00000e6b
-
-#define REG_A2XX_TCF_PERFCOUNTER6_LOW 0x00000e6e
-
-#define REG_A2XX_TCF_PERFCOUNTER7_LOW 0x00000e71
-
-#define REG_A2XX_TCF_PERFCOUNTER8_LOW 0x00000e74
-
-#define REG_A2XX_TCF_PERFCOUNTER9_LOW 0x00000e77
-
-#define REG_A2XX_TCF_PERFCOUNTER10_LOW 0x00000e7a
-
-#define REG_A2XX_TCF_PERFCOUNTER11_LOW 0x00000e7d
-
-#define REG_A2XX_SQ_PERFCOUNTER0_SELECT 0x00000dc8
-
-#define REG_A2XX_SQ_PERFCOUNTER1_SELECT 0x00000dc9
-
-#define REG_A2XX_SQ_PERFCOUNTER2_SELECT 0x00000dca
-
-#define REG_A2XX_SQ_PERFCOUNTER3_SELECT 0x00000dcb
-
-#define REG_A2XX_SQ_PERFCOUNTER0_LOW 0x00000dcc
-
-#define REG_A2XX_SQ_PERFCOUNTER0_HI 0x00000dcd
-
-#define REG_A2XX_SQ_PERFCOUNTER1_LOW 0x00000dce
-
-#define REG_A2XX_SQ_PERFCOUNTER1_HI 0x00000dcf
-
-#define REG_A2XX_SQ_PERFCOUNTER2_LOW 0x00000dd0
-
-#define REG_A2XX_SQ_PERFCOUNTER2_HI 0x00000dd1
-
-#define REG_A2XX_SQ_PERFCOUNTER3_LOW 0x00000dd2
-
-#define REG_A2XX_SQ_PERFCOUNTER3_HI 0x00000dd3
-
-#define REG_A2XX_SX_PERFCOUNTER0_SELECT 0x00000dd4
-
-#define REG_A2XX_SX_PERFCOUNTER0_LOW 0x00000dd8
-
-#define REG_A2XX_SX_PERFCOUNTER0_HI 0x00000dd9
-
-#define REG_A2XX_MH_PERFCOUNTER0_SELECT 0x00000a46
-
-#define REG_A2XX_MH_PERFCOUNTER1_SELECT 0x00000a4a
-
-#define REG_A2XX_MH_PERFCOUNTER0_CONFIG 0x00000a47
-
-#define REG_A2XX_MH_PERFCOUNTER1_CONFIG 0x00000a4b
-
-#define REG_A2XX_MH_PERFCOUNTER0_LOW 0x00000a48
-
-#define REG_A2XX_MH_PERFCOUNTER1_LOW 0x00000a4c
-
-#define REG_A2XX_MH_PERFCOUNTER0_HI 0x00000a49
-
-#define REG_A2XX_MH_PERFCOUNTER1_HI 0x00000a4d
-
-#define REG_A2XX_RB_PERFCOUNTER0_SELECT 0x00000f04
-
-#define REG_A2XX_RB_PERFCOUNTER1_SELECT 0x00000f05
-
-#define REG_A2XX_RB_PERFCOUNTER2_SELECT 0x00000f06
-
-#define REG_A2XX_RB_PERFCOUNTER3_SELECT 0x00000f07
-
-#define REG_A2XX_RB_PERFCOUNTER0_LOW 0x00000f08
-
-#define REG_A2XX_RB_PERFCOUNTER0_HI 0x00000f09
-
-#define REG_A2XX_RB_PERFCOUNTER1_LOW 0x00000f0a
-
-#define REG_A2XX_RB_PERFCOUNTER1_HI 0x00000f0b
-
-#define REG_A2XX_RB_PERFCOUNTER2_LOW 0x00000f0c
-
-#define REG_A2XX_RB_PERFCOUNTER2_HI 0x00000f0d
-
-#define REG_A2XX_RB_PERFCOUNTER3_LOW 0x00000f0e
-
-#define REG_A2XX_RB_PERFCOUNTER3_HI 0x00000f0f
-
-#define REG_A2XX_SQ_TEX_0 0x00000000
-#define A2XX_SQ_TEX_0_TYPE__MASK 0x00000003
-#define A2XX_SQ_TEX_0_TYPE__SHIFT 0
-static inline uint32_t A2XX_SQ_TEX_0_TYPE(enum sq_tex_type val)
-{
- return ((val) << A2XX_SQ_TEX_0_TYPE__SHIFT) & A2XX_SQ_TEX_0_TYPE__MASK;
-}
-#define A2XX_SQ_TEX_0_SIGN_X__MASK 0x0000000c
-#define A2XX_SQ_TEX_0_SIGN_X__SHIFT 2
-static inline uint32_t A2XX_SQ_TEX_0_SIGN_X(enum sq_tex_sign val)
-{
- return ((val) << A2XX_SQ_TEX_0_SIGN_X__SHIFT) & A2XX_SQ_TEX_0_SIGN_X__MASK;
-}
-#define A2XX_SQ_TEX_0_SIGN_Y__MASK 0x00000030
-#define A2XX_SQ_TEX_0_SIGN_Y__SHIFT 4
-static inline uint32_t A2XX_SQ_TEX_0_SIGN_Y(enum sq_tex_sign val)
-{
- return ((val) << A2XX_SQ_TEX_0_SIGN_Y__SHIFT) & A2XX_SQ_TEX_0_SIGN_Y__MASK;
-}
-#define A2XX_SQ_TEX_0_SIGN_Z__MASK 0x000000c0
-#define A2XX_SQ_TEX_0_SIGN_Z__SHIFT 6
-static inline uint32_t A2XX_SQ_TEX_0_SIGN_Z(enum sq_tex_sign val)
-{
- return ((val) << A2XX_SQ_TEX_0_SIGN_Z__SHIFT) & A2XX_SQ_TEX_0_SIGN_Z__MASK;
-}
-#define A2XX_SQ_TEX_0_SIGN_W__MASK 0x00000300
-#define A2XX_SQ_TEX_0_SIGN_W__SHIFT 8
-static inline uint32_t A2XX_SQ_TEX_0_SIGN_W(enum sq_tex_sign val)
-{
- return ((val) << A2XX_SQ_TEX_0_SIGN_W__SHIFT) & A2XX_SQ_TEX_0_SIGN_W__MASK;
-}
-#define A2XX_SQ_TEX_0_CLAMP_X__MASK 0x00001c00
-#define A2XX_SQ_TEX_0_CLAMP_X__SHIFT 10
-static inline uint32_t A2XX_SQ_TEX_0_CLAMP_X(enum sq_tex_clamp val)
-{
- return ((val) << A2XX_SQ_TEX_0_CLAMP_X__SHIFT) & A2XX_SQ_TEX_0_CLAMP_X__MASK;
-}
-#define A2XX_SQ_TEX_0_CLAMP_Y__MASK 0x0000e000
-#define A2XX_SQ_TEX_0_CLAMP_Y__SHIFT 13
-static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Y(enum sq_tex_clamp val)
-{
- return ((val) << A2XX_SQ_TEX_0_CLAMP_Y__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Y__MASK;
-}
-#define A2XX_SQ_TEX_0_CLAMP_Z__MASK 0x00070000
-#define A2XX_SQ_TEX_0_CLAMP_Z__SHIFT 16
-static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Z(enum sq_tex_clamp val)
-{
- return ((val) << A2XX_SQ_TEX_0_CLAMP_Z__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Z__MASK;
-}
-#define A2XX_SQ_TEX_0_PITCH__MASK 0x7fc00000
-#define A2XX_SQ_TEX_0_PITCH__SHIFT 22
-static inline uint32_t A2XX_SQ_TEX_0_PITCH(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK;
-}
-#define A2XX_SQ_TEX_0_TILED 0x80000000
-
-#define REG_A2XX_SQ_TEX_1 0x00000001
-#define A2XX_SQ_TEX_1_FORMAT__MASK 0x0000003f
-#define A2XX_SQ_TEX_1_FORMAT__SHIFT 0
-static inline uint32_t A2XX_SQ_TEX_1_FORMAT(enum a2xx_sq_surfaceformat val)
-{
- return ((val) << A2XX_SQ_TEX_1_FORMAT__SHIFT) & A2XX_SQ_TEX_1_FORMAT__MASK;
-}
-#define A2XX_SQ_TEX_1_ENDIANNESS__MASK 0x000000c0
-#define A2XX_SQ_TEX_1_ENDIANNESS__SHIFT 6
-static inline uint32_t A2XX_SQ_TEX_1_ENDIANNESS(enum sq_tex_endian val)
-{
- return ((val) << A2XX_SQ_TEX_1_ENDIANNESS__SHIFT) & A2XX_SQ_TEX_1_ENDIANNESS__MASK;
-}
-#define A2XX_SQ_TEX_1_REQUEST_SIZE__MASK 0x00000300
-#define A2XX_SQ_TEX_1_REQUEST_SIZE__SHIFT 8
-static inline uint32_t A2XX_SQ_TEX_1_REQUEST_SIZE(uint32_t val)
-{
- return ((val) << A2XX_SQ_TEX_1_REQUEST_SIZE__SHIFT) & A2XX_SQ_TEX_1_REQUEST_SIZE__MASK;
-}
-#define A2XX_SQ_TEX_1_STACKED 0x00000400
-#define A2XX_SQ_TEX_1_CLAMP_POLICY__MASK 0x00000800
-#define A2XX_SQ_TEX_1_CLAMP_POLICY__SHIFT 11
-static inline uint32_t A2XX_SQ_TEX_1_CLAMP_POLICY(enum sq_tex_clamp_policy val)
-{
- return ((val) << A2XX_SQ_TEX_1_CLAMP_POLICY__SHIFT) & A2XX_SQ_TEX_1_CLAMP_POLICY__MASK;
-}
-#define A2XX_SQ_TEX_1_BASE_ADDRESS__MASK 0xfffff000
-#define A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT 12
-static inline uint32_t A2XX_SQ_TEX_1_BASE_ADDRESS(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT) & A2XX_SQ_TEX_1_BASE_ADDRESS__MASK;
-}
-
-#define REG_A2XX_SQ_TEX_2 0x00000002
-#define A2XX_SQ_TEX_2_WIDTH__MASK 0x00001fff
-#define A2XX_SQ_TEX_2_WIDTH__SHIFT 0
-static inline uint32_t A2XX_SQ_TEX_2_WIDTH(uint32_t val)
-{
- return ((val) << A2XX_SQ_TEX_2_WIDTH__SHIFT) & A2XX_SQ_TEX_2_WIDTH__MASK;
-}
-#define A2XX_SQ_TEX_2_HEIGHT__MASK 0x03ffe000
-#define A2XX_SQ_TEX_2_HEIGHT__SHIFT 13
-static inline uint32_t A2XX_SQ_TEX_2_HEIGHT(uint32_t val)
-{
- return ((val) << A2XX_SQ_TEX_2_HEIGHT__SHIFT) & A2XX_SQ_TEX_2_HEIGHT__MASK;
-}
-#define A2XX_SQ_TEX_2_DEPTH__MASK 0xfc000000
-#define A2XX_SQ_TEX_2_DEPTH__SHIFT 26
-static inline uint32_t A2XX_SQ_TEX_2_DEPTH(uint32_t val)
-{
- return ((val) << A2XX_SQ_TEX_2_DEPTH__SHIFT) & A2XX_SQ_TEX_2_DEPTH__MASK;
-}
-
-#define REG_A2XX_SQ_TEX_3 0x00000003
-#define A2XX_SQ_TEX_3_NUM_FORMAT__MASK 0x00000001
-#define A2XX_SQ_TEX_3_NUM_FORMAT__SHIFT 0
-static inline uint32_t A2XX_SQ_TEX_3_NUM_FORMAT(enum sq_tex_num_format val)
-{
- return ((val) << A2XX_SQ_TEX_3_NUM_FORMAT__SHIFT) & A2XX_SQ_TEX_3_NUM_FORMAT__MASK;
-}
-#define A2XX_SQ_TEX_3_SWIZ_X__MASK 0x0000000e
-#define A2XX_SQ_TEX_3_SWIZ_X__SHIFT 1
-static inline uint32_t A2XX_SQ_TEX_3_SWIZ_X(enum sq_tex_swiz val)
-{
- return ((val) << A2XX_SQ_TEX_3_SWIZ_X__SHIFT) & A2XX_SQ_TEX_3_SWIZ_X__MASK;
-}
-#define A2XX_SQ_TEX_3_SWIZ_Y__MASK 0x00000070
-#define A2XX_SQ_TEX_3_SWIZ_Y__SHIFT 4
-static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Y(enum sq_tex_swiz val)
-{
- return ((val) << A2XX_SQ_TEX_3_SWIZ_Y__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Y__MASK;
-}
-#define A2XX_SQ_TEX_3_SWIZ_Z__MASK 0x00000380
-#define A2XX_SQ_TEX_3_SWIZ_Z__SHIFT 7
-static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Z(enum sq_tex_swiz val)
-{
- return ((val) << A2XX_SQ_TEX_3_SWIZ_Z__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Z__MASK;
-}
-#define A2XX_SQ_TEX_3_SWIZ_W__MASK 0x00001c00
-#define A2XX_SQ_TEX_3_SWIZ_W__SHIFT 10
-static inline uint32_t A2XX_SQ_TEX_3_SWIZ_W(enum sq_tex_swiz val)
-{
- return ((val) << A2XX_SQ_TEX_3_SWIZ_W__SHIFT) & A2XX_SQ_TEX_3_SWIZ_W__MASK;
-}
-#define A2XX_SQ_TEX_3_EXP_ADJUST__MASK 0x0007e000
-#define A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT 13
-static inline uint32_t A2XX_SQ_TEX_3_EXP_ADJUST(int32_t val)
-{
- return ((val) << A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT) & A2XX_SQ_TEX_3_EXP_ADJUST__MASK;
-}
-#define A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK 0x00180000
-#define A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT 19
-static inline uint32_t A2XX_SQ_TEX_3_XY_MAG_FILTER(enum sq_tex_filter val)
-{
- return ((val) << A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK;
-}
-#define A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK 0x00600000
-#define A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT 21
-static inline uint32_t A2XX_SQ_TEX_3_XY_MIN_FILTER(enum sq_tex_filter val)
-{
- return ((val) << A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK;
-}
-#define A2XX_SQ_TEX_3_MIP_FILTER__MASK 0x01800000
-#define A2XX_SQ_TEX_3_MIP_FILTER__SHIFT 23
-static inline uint32_t A2XX_SQ_TEX_3_MIP_FILTER(enum sq_tex_filter val)
-{
- return ((val) << A2XX_SQ_TEX_3_MIP_FILTER__SHIFT) & A2XX_SQ_TEX_3_MIP_FILTER__MASK;
-}
-#define A2XX_SQ_TEX_3_ANISO_FILTER__MASK 0x0e000000
-#define A2XX_SQ_TEX_3_ANISO_FILTER__SHIFT 25
-static inline uint32_t A2XX_SQ_TEX_3_ANISO_FILTER(enum sq_tex_aniso_filter val)
-{
- return ((val) << A2XX_SQ_TEX_3_ANISO_FILTER__SHIFT) & A2XX_SQ_TEX_3_ANISO_FILTER__MASK;
-}
-#define A2XX_SQ_TEX_3_BORDER_SIZE__MASK 0x80000000
-#define A2XX_SQ_TEX_3_BORDER_SIZE__SHIFT 31
-static inline uint32_t A2XX_SQ_TEX_3_BORDER_SIZE(uint32_t val)
-{
- return ((val) << A2XX_SQ_TEX_3_BORDER_SIZE__SHIFT) & A2XX_SQ_TEX_3_BORDER_SIZE__MASK;
-}
-
-#define REG_A2XX_SQ_TEX_4 0x00000004
-#define A2XX_SQ_TEX_4_VOL_MAG_FILTER__MASK 0x00000001
-#define A2XX_SQ_TEX_4_VOL_MAG_FILTER__SHIFT 0
-static inline uint32_t A2XX_SQ_TEX_4_VOL_MAG_FILTER(enum sq_tex_filter val)
-{
- return ((val) << A2XX_SQ_TEX_4_VOL_MAG_FILTER__SHIFT) & A2XX_SQ_TEX_4_VOL_MAG_FILTER__MASK;
-}
-#define A2XX_SQ_TEX_4_VOL_MIN_FILTER__MASK 0x00000002
-#define A2XX_SQ_TEX_4_VOL_MIN_FILTER__SHIFT 1
-static inline uint32_t A2XX_SQ_TEX_4_VOL_MIN_FILTER(enum sq_tex_filter val)
-{
- return ((val) << A2XX_SQ_TEX_4_VOL_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_4_VOL_MIN_FILTER__MASK;
-}
-#define A2XX_SQ_TEX_4_MIP_MIN_LEVEL__MASK 0x0000003c
-#define A2XX_SQ_TEX_4_MIP_MIN_LEVEL__SHIFT 2
-static inline uint32_t A2XX_SQ_TEX_4_MIP_MIN_LEVEL(uint32_t val)
-{
- return ((val) << A2XX_SQ_TEX_4_MIP_MIN_LEVEL__SHIFT) & A2XX_SQ_TEX_4_MIP_MIN_LEVEL__MASK;
-}
-#define A2XX_SQ_TEX_4_MIP_MAX_LEVEL__MASK 0x000003c0
-#define A2XX_SQ_TEX_4_MIP_MAX_LEVEL__SHIFT 6
-static inline uint32_t A2XX_SQ_TEX_4_MIP_MAX_LEVEL(uint32_t val)
-{
- return ((val) << A2XX_SQ_TEX_4_MIP_MAX_LEVEL__SHIFT) & A2XX_SQ_TEX_4_MIP_MAX_LEVEL__MASK;
-}
-#define A2XX_SQ_TEX_4_MAX_ANISO_WALK 0x00000400
-#define A2XX_SQ_TEX_4_MIN_ANISO_WALK 0x00000800
-#define A2XX_SQ_TEX_4_LOD_BIAS__MASK 0x003ff000
-#define A2XX_SQ_TEX_4_LOD_BIAS__SHIFT 12
-static inline uint32_t A2XX_SQ_TEX_4_LOD_BIAS(float val)
-{
- return ((((int32_t)(val * 32.0))) << A2XX_SQ_TEX_4_LOD_BIAS__SHIFT) & A2XX_SQ_TEX_4_LOD_BIAS__MASK;
-}
-#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__MASK 0x07c00000
-#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__SHIFT 22
-static inline uint32_t A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H(uint32_t val)
-{
- return ((val) << A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__SHIFT) & A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__MASK;
-}
-#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__MASK 0xf8000000
-#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__SHIFT 27
-static inline uint32_t A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V(uint32_t val)
-{
- return ((val) << A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__SHIFT) & A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__MASK;
-}
-
-#define REG_A2XX_SQ_TEX_5 0x00000005
-#define A2XX_SQ_TEX_5_BORDER_COLOR__MASK 0x00000003
-#define A2XX_SQ_TEX_5_BORDER_COLOR__SHIFT 0
-static inline uint32_t A2XX_SQ_TEX_5_BORDER_COLOR(enum sq_tex_border_color val)
-{
- return ((val) << A2XX_SQ_TEX_5_BORDER_COLOR__SHIFT) & A2XX_SQ_TEX_5_BORDER_COLOR__MASK;
-}
-#define A2XX_SQ_TEX_5_FORCE_BCW_MAX 0x00000004
-#define A2XX_SQ_TEX_5_TRI_CLAMP__MASK 0x00000018
-#define A2XX_SQ_TEX_5_TRI_CLAMP__SHIFT 3
-static inline uint32_t A2XX_SQ_TEX_5_TRI_CLAMP(uint32_t val)
-{
- return ((val) << A2XX_SQ_TEX_5_TRI_CLAMP__SHIFT) & A2XX_SQ_TEX_5_TRI_CLAMP__MASK;
-}
-#define A2XX_SQ_TEX_5_ANISO_BIAS__MASK 0x000001e0
-#define A2XX_SQ_TEX_5_ANISO_BIAS__SHIFT 5
-static inline uint32_t A2XX_SQ_TEX_5_ANISO_BIAS(float val)
-{
- return ((((int32_t)(val * 1.0))) << A2XX_SQ_TEX_5_ANISO_BIAS__SHIFT) & A2XX_SQ_TEX_5_ANISO_BIAS__MASK;
-}
-#define A2XX_SQ_TEX_5_DIMENSION__MASK 0x00000600
-#define A2XX_SQ_TEX_5_DIMENSION__SHIFT 9
-static inline uint32_t A2XX_SQ_TEX_5_DIMENSION(enum sq_tex_dimension val)
-{
- return ((val) << A2XX_SQ_TEX_5_DIMENSION__SHIFT) & A2XX_SQ_TEX_5_DIMENSION__MASK;
-}
-#define A2XX_SQ_TEX_5_PACKED_MIPS 0x00000800
-#define A2XX_SQ_TEX_5_MIP_ADDRESS__MASK 0xfffff000
-#define A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT 12
-static inline uint32_t A2XX_SQ_TEX_5_MIP_ADDRESS(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT) & A2XX_SQ_TEX_5_MIP_ADDRESS__MASK;
-}
-
-#ifdef __cplusplus
-#endif
-
-#endif /* A2XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index 0d8133f3174b..0dc255ddf5ce 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -113,7 +113,7 @@ static int a2xx_hw_init(struct msm_gpu *gpu)
uint32_t *ptr, len;
int i, ret;
- msm_gpummu_params(gpu->aspace->mmu, &pt_base, &tran_error);
+ a2xx_gpummu_params(gpu->aspace->mmu, &pt_base, &tran_error);
DBG("%s", gpu->name);
@@ -469,7 +469,7 @@ static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu)
static struct msm_gem_address_space *
a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
{
- struct msm_mmu *mmu = msm_gpummu_new(&pdev->dev, gpu);
+ struct msm_mmu *mmu = a2xx_gpummu_new(&pdev->dev, gpu);
struct msm_gem_address_space *aspace;
aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.h b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
index 161a075f94af..53702f19990f 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
@@ -19,4 +19,8 @@ struct a2xx_gpu {
};
#define to_a2xx_gpu(x) container_of(x, struct a2xx_gpu, base)
+struct msm_mmu *a2xx_gpummu_new(struct device *dev, struct msm_gpu *gpu);
+void a2xx_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
+ dma_addr_t *tran_error);
+
#endif /* __A2XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_gpummu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
index f7d1945e0c9f..39641551eeb6 100644
--- a/drivers/gpu/drm/msm/msm_gpummu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
@@ -5,30 +5,33 @@
#include "msm_drv.h"
#include "msm_mmu.h"
-#include "adreno/adreno_gpu.h"
-#include "adreno/a2xx.xml.h"
-struct msm_gpummu {
+#include "adreno_gpu.h"
+#include "a2xx_gpu.h"
+
+#include "a2xx.xml.h"
+
+struct a2xx_gpummu {
struct msm_mmu base;
struct msm_gpu *gpu;
dma_addr_t pt_base;
uint32_t *table;
};
-#define to_msm_gpummu(x) container_of(x, struct msm_gpummu, base)
+#define to_a2xx_gpummu(x) container_of(x, struct a2xx_gpummu, base)
#define GPUMMU_VA_START SZ_16M
#define GPUMMU_VA_RANGE (0xfff * SZ_64K)
#define GPUMMU_PAGE_SIZE SZ_4K
#define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
-static void msm_gpummu_detach(struct msm_mmu *mmu)
+static void a2xx_gpummu_detach(struct msm_mmu *mmu)
{
}
-static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
+static int a2xx_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, size_t len, int prot)
{
- struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
+ struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu);
unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
struct sg_dma_page_iter dma_iter;
unsigned prot_bits = 0;
@@ -53,9 +56,9 @@ static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
return 0;
}
-static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
+static int a2xx_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
{
- struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
+ struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu);
unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
unsigned i;
@@ -68,13 +71,13 @@ static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
return 0;
}
-static void msm_gpummu_resume_translation(struct msm_mmu *mmu)
+static void a2xx_gpummu_resume_translation(struct msm_mmu *mmu)
{
}
-static void msm_gpummu_destroy(struct msm_mmu *mmu)
+static void a2xx_gpummu_destroy(struct msm_mmu *mmu)
{
- struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
+ struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu);
dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base,
DMA_ATTR_FORCE_CONTIGUOUS);
@@ -83,16 +86,16 @@ static void msm_gpummu_destroy(struct msm_mmu *mmu)
}
static const struct msm_mmu_funcs funcs = {
- .detach = msm_gpummu_detach,
- .map = msm_gpummu_map,
- .unmap = msm_gpummu_unmap,
- .destroy = msm_gpummu_destroy,
- .resume_translation = msm_gpummu_resume_translation,
+ .detach = a2xx_gpummu_detach,
+ .map = a2xx_gpummu_map,
+ .unmap = a2xx_gpummu_unmap,
+ .destroy = a2xx_gpummu_destroy,
+ .resume_translation = a2xx_gpummu_resume_translation,
};
-struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu)
+struct msm_mmu *a2xx_gpummu_new(struct device *dev, struct msm_gpu *gpu)
{
- struct msm_gpummu *gpummu;
+ struct a2xx_gpummu *gpummu;
gpummu = kzalloc(sizeof(*gpummu), GFP_KERNEL);
if (!gpummu)
@@ -111,10 +114,10 @@ struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu)
return &gpummu->base;
}
-void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
+void a2xx_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
dma_addr_t *tran_error)
{
- dma_addr_t base = to_msm_gpummu(mmu)->pt_base;
+ dma_addr_t base = to_a2xx_gpummu(mmu)->pt_base;
*pt_base = base;
*tran_error = base + TABLE_SIZE; /* 32-byte aligned */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
deleted file mode 100644
index 5edd740ad3bb..000000000000
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ /dev/null
@@ -1,3268 +0,0 @@
-#ifndef A3XX_XML
-#define A3XX_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
-http://gitlab.freedesktop.org/mesa/mesa/
-git clone https://gitlab.freedesktop.org/mesa/mesa.git
-
-The rules-ng-ng source files this header was generated from are:
-
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84323 bytes, from Wed Aug 23 10:39:39 2023)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from Fri Jun 2 14:59:26 2023)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 85691 bytes, from Fri Feb 16 09:49:01 2024)
-
-Copyright (C) 2013-2024 by the following authors:
-- Rob Clark <robdclark@gmail.com> Rob Clark
-- Ilia Mirkin <imirkin@alum.mit.edu> Ilia Mirkin
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-*/
-
-#ifdef __KERNEL__
-#include <linux/bug.h>
-#define assert(x) BUG_ON(!(x))
-#else
-#include <assert.h>
-#endif
-
-#ifdef __cplusplus
-#define __struct_cast(X)
-#else
-#define __struct_cast(X) (struct X)
-#endif
-
-enum a3xx_tile_mode {
- LINEAR = 0,
- TILE_4X4 = 1,
- TILE_32X32 = 2,
- TILE_4X2 = 3,
-};
-
-enum a3xx_state_block_id {
- HLSQ_BLOCK_ID_TP_TEX = 2,
- HLSQ_BLOCK_ID_TP_MIPMAP = 3,
- HLSQ_BLOCK_ID_SP_VS = 4,
- HLSQ_BLOCK_ID_SP_FS = 6,
-};
-
-enum a3xx_cache_opcode {
- INVALIDATE = 1,
-};
-
-enum a3xx_vtx_fmt {
- VFMT_32_FLOAT = 0,
- VFMT_32_32_FLOAT = 1,
- VFMT_32_32_32_FLOAT = 2,
- VFMT_32_32_32_32_FLOAT = 3,
- VFMT_16_FLOAT = 4,
- VFMT_16_16_FLOAT = 5,
- VFMT_16_16_16_FLOAT = 6,
- VFMT_16_16_16_16_FLOAT = 7,
- VFMT_32_FIXED = 8,
- VFMT_32_32_FIXED = 9,
- VFMT_32_32_32_FIXED = 10,
- VFMT_32_32_32_32_FIXED = 11,
- VFMT_16_SINT = 16,
- VFMT_16_16_SINT = 17,
- VFMT_16_16_16_SINT = 18,
- VFMT_16_16_16_16_SINT = 19,
- VFMT_16_UINT = 20,
- VFMT_16_16_UINT = 21,
- VFMT_16_16_16_UINT = 22,
- VFMT_16_16_16_16_UINT = 23,
- VFMT_16_SNORM = 24,
- VFMT_16_16_SNORM = 25,
- VFMT_16_16_16_SNORM = 26,
- VFMT_16_16_16_16_SNORM = 27,
- VFMT_16_UNORM = 28,
- VFMT_16_16_UNORM = 29,
- VFMT_16_16_16_UNORM = 30,
- VFMT_16_16_16_16_UNORM = 31,
- VFMT_32_UINT = 32,
- VFMT_32_32_UINT = 33,
- VFMT_32_32_32_UINT = 34,
- VFMT_32_32_32_32_UINT = 35,
- VFMT_32_SINT = 36,
- VFMT_32_32_SINT = 37,
- VFMT_32_32_32_SINT = 38,
- VFMT_32_32_32_32_SINT = 39,
- VFMT_8_UINT = 40,
- VFMT_8_8_UINT = 41,
- VFMT_8_8_8_UINT = 42,
- VFMT_8_8_8_8_UINT = 43,
- VFMT_8_UNORM = 44,
- VFMT_8_8_UNORM = 45,
- VFMT_8_8_8_UNORM = 46,
- VFMT_8_8_8_8_UNORM = 47,
- VFMT_8_SINT = 48,
- VFMT_8_8_SINT = 49,
- VFMT_8_8_8_SINT = 50,
- VFMT_8_8_8_8_SINT = 51,
- VFMT_8_SNORM = 52,
- VFMT_8_8_SNORM = 53,
- VFMT_8_8_8_SNORM = 54,
- VFMT_8_8_8_8_SNORM = 55,
- VFMT_10_10_10_2_UINT = 56,
- VFMT_10_10_10_2_UNORM = 57,
- VFMT_10_10_10_2_SINT = 58,
- VFMT_10_10_10_2_SNORM = 59,
- VFMT_2_10_10_10_UINT = 60,
- VFMT_2_10_10_10_UNORM = 61,
- VFMT_2_10_10_10_SINT = 62,
- VFMT_2_10_10_10_SNORM = 63,
- VFMT_NONE = 255,
-};
-
-enum a3xx_tex_fmt {
- TFMT_5_6_5_UNORM = 4,
- TFMT_5_5_5_1_UNORM = 5,
- TFMT_4_4_4_4_UNORM = 7,
- TFMT_Z16_UNORM = 9,
- TFMT_X8Z24_UNORM = 10,
- TFMT_Z32_FLOAT = 11,
- TFMT_UV_64X32 = 16,
- TFMT_VU_64X32 = 17,
- TFMT_Y_64X32 = 18,
- TFMT_NV12_64X32 = 19,
- TFMT_UV_LINEAR = 20,
- TFMT_VU_LINEAR = 21,
- TFMT_Y_LINEAR = 22,
- TFMT_NV12_LINEAR = 23,
- TFMT_I420_Y = 24,
- TFMT_I420_U = 26,
- TFMT_I420_V = 27,
- TFMT_ATC_RGB = 32,
- TFMT_ATC_RGBA_EXPLICIT = 33,
- TFMT_ETC1 = 34,
- TFMT_ATC_RGBA_INTERPOLATED = 35,
- TFMT_DXT1 = 36,
- TFMT_DXT3 = 37,
- TFMT_DXT5 = 38,
- TFMT_2_10_10_10_UNORM = 40,
- TFMT_10_10_10_2_UNORM = 41,
- TFMT_9_9_9_E5_FLOAT = 42,
- TFMT_11_11_10_FLOAT = 43,
- TFMT_A8_UNORM = 44,
- TFMT_L8_UNORM = 45,
- TFMT_L8_A8_UNORM = 47,
- TFMT_8_UNORM = 48,
- TFMT_8_8_UNORM = 49,
- TFMT_8_8_8_UNORM = 50,
- TFMT_8_8_8_8_UNORM = 51,
- TFMT_8_SNORM = 52,
- TFMT_8_8_SNORM = 53,
- TFMT_8_8_8_SNORM = 54,
- TFMT_8_8_8_8_SNORM = 55,
- TFMT_8_UINT = 56,
- TFMT_8_8_UINT = 57,
- TFMT_8_8_8_UINT = 58,
- TFMT_8_8_8_8_UINT = 59,
- TFMT_8_SINT = 60,
- TFMT_8_8_SINT = 61,
- TFMT_8_8_8_SINT = 62,
- TFMT_8_8_8_8_SINT = 63,
- TFMT_16_FLOAT = 64,
- TFMT_16_16_FLOAT = 65,
- TFMT_16_16_16_16_FLOAT = 67,
- TFMT_16_UINT = 68,
- TFMT_16_16_UINT = 69,
- TFMT_16_16_16_16_UINT = 71,
- TFMT_16_SINT = 72,
- TFMT_16_16_SINT = 73,
- TFMT_16_16_16_16_SINT = 75,
- TFMT_16_UNORM = 76,
- TFMT_16_16_UNORM = 77,
- TFMT_16_16_16_16_UNORM = 79,
- TFMT_16_SNORM = 80,
- TFMT_16_16_SNORM = 81,
- TFMT_16_16_16_16_SNORM = 83,
- TFMT_32_FLOAT = 84,
- TFMT_32_32_FLOAT = 85,
- TFMT_32_32_32_32_FLOAT = 87,
- TFMT_32_UINT = 88,
- TFMT_32_32_UINT = 89,
- TFMT_32_32_32_32_UINT = 91,
- TFMT_32_SINT = 92,
- TFMT_32_32_SINT = 93,
- TFMT_32_32_32_32_SINT = 95,
- TFMT_2_10_10_10_UINT = 96,
- TFMT_10_10_10_2_UINT = 97,
- TFMT_ETC2_RG11_SNORM = 112,
- TFMT_ETC2_RG11_UNORM = 113,
- TFMT_ETC2_R11_SNORM = 114,
- TFMT_ETC2_R11_UNORM = 115,
- TFMT_ETC2_RGBA8 = 116,
- TFMT_ETC2_RGB8A1 = 117,
- TFMT_ETC2_RGB8 = 118,
- TFMT_NONE = 255,
-};
-
-enum a3xx_color_fmt {
- RB_R5G6B5_UNORM = 0,
- RB_R5G5B5A1_UNORM = 1,
- RB_R4G4B4A4_UNORM = 3,
- RB_R8G8B8_UNORM = 4,
- RB_R8G8B8A8_UNORM = 8,
- RB_R8G8B8A8_SNORM = 9,
- RB_R8G8B8A8_UINT = 10,
- RB_R8G8B8A8_SINT = 11,
- RB_R8G8_UNORM = 12,
- RB_R8G8_SNORM = 13,
- RB_R8G8_UINT = 14,
- RB_R8G8_SINT = 15,
- RB_R10G10B10A2_UNORM = 16,
- RB_A2R10G10B10_UNORM = 17,
- RB_R10G10B10A2_UINT = 18,
- RB_A2R10G10B10_UINT = 19,
- RB_A8_UNORM = 20,
- RB_R8_UNORM = 21,
- RB_R16_FLOAT = 24,
- RB_R16G16_FLOAT = 25,
- RB_R16G16B16A16_FLOAT = 27,
- RB_R11G11B10_FLOAT = 28,
- RB_R16_SNORM = 32,
- RB_R16G16_SNORM = 33,
- RB_R16G16B16A16_SNORM = 35,
- RB_R16_UNORM = 36,
- RB_R16G16_UNORM = 37,
- RB_R16G16B16A16_UNORM = 39,
- RB_R16_SINT = 40,
- RB_R16G16_SINT = 41,
- RB_R16G16B16A16_SINT = 43,
- RB_R16_UINT = 44,
- RB_R16G16_UINT = 45,
- RB_R16G16B16A16_UINT = 47,
- RB_R32_FLOAT = 48,
- RB_R32G32_FLOAT = 49,
- RB_R32G32B32A32_FLOAT = 51,
- RB_R32_SINT = 52,
- RB_R32G32_SINT = 53,
- RB_R32G32B32A32_SINT = 55,
- RB_R32_UINT = 56,
- RB_R32G32_UINT = 57,
- RB_R32G32B32A32_UINT = 59,
- RB_NONE = 255,
-};
-
-enum a3xx_cp_perfcounter_select {
- CP_ALWAYS_COUNT = 0,
- CP_AHB_PFPTRANS_WAIT = 3,
- CP_AHB_NRTTRANS_WAIT = 6,
- CP_CSF_NRT_READ_WAIT = 8,
- CP_CSF_I1_FIFO_FULL = 9,
- CP_CSF_I2_FIFO_FULL = 10,
- CP_CSF_ST_FIFO_FULL = 11,
- CP_RESERVED_12 = 12,
- CP_CSF_RING_ROQ_FULL = 13,
- CP_CSF_I1_ROQ_FULL = 14,
- CP_CSF_I2_ROQ_FULL = 15,
- CP_CSF_ST_ROQ_FULL = 16,
- CP_RESERVED_17 = 17,
- CP_MIU_TAG_MEM_FULL = 18,
- CP_MIU_NRT_WRITE_STALLED = 22,
- CP_MIU_NRT_READ_STALLED = 23,
- CP_ME_REGS_RB_DONE_FIFO_FULL = 26,
- CP_ME_REGS_VS_EVENT_FIFO_FULL = 27,
- CP_ME_REGS_PS_EVENT_FIFO_FULL = 28,
- CP_ME_REGS_CF_EVENT_FIFO_FULL = 29,
- CP_ME_MICRO_RB_STARVED = 30,
- CP_AHB_RBBM_DWORD_SENT = 40,
- CP_ME_BUSY_CLOCKS = 41,
- CP_ME_WAIT_CONTEXT_AVAIL = 42,
- CP_PFP_TYPE0_PACKET = 43,
- CP_PFP_TYPE3_PACKET = 44,
- CP_CSF_RB_WPTR_NEQ_RPTR = 45,
- CP_CSF_I1_SIZE_NEQ_ZERO = 46,
- CP_CSF_I2_SIZE_NEQ_ZERO = 47,
- CP_CSF_RBI1I2_FETCHING = 48,
-};
-
-enum a3xx_gras_tse_perfcounter_select {
- GRAS_TSEPERF_INPUT_PRIM = 0,
- GRAS_TSEPERF_INPUT_NULL_PRIM = 1,
- GRAS_TSEPERF_TRIVAL_REJ_PRIM = 2,
- GRAS_TSEPERF_CLIPPED_PRIM = 3,
- GRAS_TSEPERF_NEW_PRIM = 4,
- GRAS_TSEPERF_ZERO_AREA_PRIM = 5,
- GRAS_TSEPERF_FACENESS_CULLED_PRIM = 6,
- GRAS_TSEPERF_ZERO_PIXEL_PRIM = 7,
- GRAS_TSEPERF_OUTPUT_NULL_PRIM = 8,
- GRAS_TSEPERF_OUTPUT_VISIBLE_PRIM = 9,
- GRAS_TSEPERF_PRE_CLIP_PRIM = 10,
- GRAS_TSEPERF_POST_CLIP_PRIM = 11,
- GRAS_TSEPERF_WORKING_CYCLES = 12,
- GRAS_TSEPERF_PC_STARVE = 13,
- GRAS_TSERASPERF_STALL = 14,
-};
-
-enum a3xx_gras_ras_perfcounter_select {
- GRAS_RASPERF_16X16_TILES = 0,
- GRAS_RASPERF_8X8_TILES = 1,
- GRAS_RASPERF_4X4_TILES = 2,
- GRAS_RASPERF_WORKING_CYCLES = 3,
- GRAS_RASPERF_STALL_CYCLES_BY_RB = 4,
- GRAS_RASPERF_STALL_CYCLES_BY_VSC = 5,
- GRAS_RASPERF_STARVE_CYCLES_BY_TSE = 6,
-};
-
-enum a3xx_hlsq_perfcounter_select {
- HLSQ_PERF_SP_VS_CONSTANT = 0,
- HLSQ_PERF_SP_VS_INSTRUCTIONS = 1,
- HLSQ_PERF_SP_FS_CONSTANT = 2,
- HLSQ_PERF_SP_FS_INSTRUCTIONS = 3,
- HLSQ_PERF_TP_STATE = 4,
- HLSQ_PERF_QUADS = 5,
- HLSQ_PERF_PIXELS = 6,
- HLSQ_PERF_VERTICES = 7,
- HLSQ_PERF_FS8_THREADS = 8,
- HLSQ_PERF_FS16_THREADS = 9,
- HLSQ_PERF_FS32_THREADS = 10,
- HLSQ_PERF_VS8_THREADS = 11,
- HLSQ_PERF_VS16_THREADS = 12,
- HLSQ_PERF_SP_VS_DATA_BYTES = 13,
- HLSQ_PERF_SP_FS_DATA_BYTES = 14,
- HLSQ_PERF_ACTIVE_CYCLES = 15,
- HLSQ_PERF_STALL_CYCLES_SP_STATE = 16,
- HLSQ_PERF_STALL_CYCLES_SP_VS = 17,
- HLSQ_PERF_STALL_CYCLES_SP_FS = 18,
- HLSQ_PERF_STALL_CYCLES_UCHE = 19,
- HLSQ_PERF_RBBM_LOAD_CYCLES = 20,
- HLSQ_PERF_DI_TO_VS_START_SP0 = 21,
- HLSQ_PERF_DI_TO_FS_START_SP0 = 22,
- HLSQ_PERF_VS_START_TO_DONE_SP0 = 23,
- HLSQ_PERF_FS_START_TO_DONE_SP0 = 24,
- HLSQ_PERF_SP_STATE_COPY_CYCLES_VS = 25,
- HLSQ_PERF_SP_STATE_COPY_CYCLES_FS = 26,
- HLSQ_PERF_UCHE_LATENCY_CYCLES = 27,
- HLSQ_PERF_UCHE_LATENCY_COUNT = 28,
-};
-
-enum a3xx_pc_perfcounter_select {
- PC_PCPERF_VISIBILITY_STREAMS = 0,
- PC_PCPERF_TOTAL_INSTANCES = 1,
- PC_PCPERF_PRIMITIVES_PC_VPC = 2,
- PC_PCPERF_PRIMITIVES_KILLED_BY_VS = 3,
- PC_PCPERF_PRIMITIVES_VISIBLE_BY_VS = 4,
- PC_PCPERF_DRAWCALLS_KILLED_BY_VS = 5,
- PC_PCPERF_DRAWCALLS_VISIBLE_BY_VS = 6,
- PC_PCPERF_VERTICES_TO_VFD = 7,
- PC_PCPERF_REUSED_VERTICES = 8,
- PC_PCPERF_CYCLES_STALLED_BY_VFD = 9,
- PC_PCPERF_CYCLES_STALLED_BY_TSE = 10,
- PC_PCPERF_CYCLES_STALLED_BY_VBIF = 11,
- PC_PCPERF_CYCLES_IS_WORKING = 12,
-};
-
-enum a3xx_rb_perfcounter_select {
- RB_RBPERF_ACTIVE_CYCLES_ANY = 0,
- RB_RBPERF_ACTIVE_CYCLES_ALL = 1,
- RB_RBPERF_STARVE_CYCLES_BY_SP = 2,
- RB_RBPERF_STARVE_CYCLES_BY_RAS = 3,
- RB_RBPERF_STARVE_CYCLES_BY_MARB = 4,
- RB_RBPERF_STALL_CYCLES_BY_MARB = 5,
- RB_RBPERF_STALL_CYCLES_BY_HLSQ = 6,
- RB_RBPERF_RB_MARB_DATA = 7,
- RB_RBPERF_SP_RB_QUAD = 8,
- RB_RBPERF_RAS_EARLY_Z_QUADS = 9,
- RB_RBPERF_GMEM_CH0_READ = 10,
- RB_RBPERF_GMEM_CH1_READ = 11,
- RB_RBPERF_GMEM_CH0_WRITE = 12,
- RB_RBPERF_GMEM_CH1_WRITE = 13,
- RB_RBPERF_CP_CONTEXT_DONE = 14,
- RB_RBPERF_CP_CACHE_FLUSH = 15,
- RB_RBPERF_CP_ZPASS_DONE = 16,
-};
-
-enum a3xx_rbbm_perfcounter_select {
- RBBM_ALAWYS_ON = 0,
- RBBM_VBIF_BUSY = 1,
- RBBM_TSE_BUSY = 2,
- RBBM_RAS_BUSY = 3,
- RBBM_PC_DCALL_BUSY = 4,
- RBBM_PC_VSD_BUSY = 5,
- RBBM_VFD_BUSY = 6,
- RBBM_VPC_BUSY = 7,
- RBBM_UCHE_BUSY = 8,
- RBBM_VSC_BUSY = 9,
- RBBM_HLSQ_BUSY = 10,
- RBBM_ANY_RB_BUSY = 11,
- RBBM_ANY_TEX_BUSY = 12,
- RBBM_ANY_USP_BUSY = 13,
- RBBM_ANY_MARB_BUSY = 14,
- RBBM_ANY_ARB_BUSY = 15,
- RBBM_AHB_STATUS_BUSY = 16,
- RBBM_AHB_STATUS_STALLED = 17,
- RBBM_AHB_STATUS_TXFR = 18,
- RBBM_AHB_STATUS_TXFR_SPLIT = 19,
- RBBM_AHB_STATUS_TXFR_ERROR = 20,
- RBBM_AHB_STATUS_LONG_STALL = 21,
- RBBM_RBBM_STATUS_MASKED = 22,
-};
-
-enum a3xx_sp_perfcounter_select {
- SP_LM_LOAD_INSTRUCTIONS = 0,
- SP_LM_STORE_INSTRUCTIONS = 1,
- SP_LM_ATOMICS = 2,
- SP_UCHE_LOAD_INSTRUCTIONS = 3,
- SP_UCHE_STORE_INSTRUCTIONS = 4,
- SP_UCHE_ATOMICS = 5,
- SP_VS_TEX_INSTRUCTIONS = 6,
- SP_VS_CFLOW_INSTRUCTIONS = 7,
- SP_VS_EFU_INSTRUCTIONS = 8,
- SP_VS_FULL_ALU_INSTRUCTIONS = 9,
- SP_VS_HALF_ALU_INSTRUCTIONS = 10,
- SP_FS_TEX_INSTRUCTIONS = 11,
- SP_FS_CFLOW_INSTRUCTIONS = 12,
- SP_FS_EFU_INSTRUCTIONS = 13,
- SP_FS_FULL_ALU_INSTRUCTIONS = 14,
- SP_FS_HALF_ALU_INSTRUCTIONS = 15,
- SP_FS_BARY_INSTRUCTIONS = 16,
- SP_VS_INSTRUCTIONS = 17,
- SP_FS_INSTRUCTIONS = 18,
- SP_ADDR_LOCK_COUNT = 19,
- SP_UCHE_READ_TRANS = 20,
- SP_UCHE_WRITE_TRANS = 21,
- SP_EXPORT_VPC_TRANS = 22,
- SP_EXPORT_RB_TRANS = 23,
- SP_PIXELS_KILLED = 24,
- SP_ICL1_REQUESTS = 25,
- SP_ICL1_MISSES = 26,
- SP_ICL0_REQUESTS = 27,
- SP_ICL0_MISSES = 28,
- SP_ALU_ACTIVE_CYCLES = 29,
- SP_EFU_ACTIVE_CYCLES = 30,
- SP_STALL_CYCLES_BY_VPC = 31,
- SP_STALL_CYCLES_BY_TP = 32,
- SP_STALL_CYCLES_BY_UCHE = 33,
- SP_STALL_CYCLES_BY_RB = 34,
- SP_ACTIVE_CYCLES_ANY = 35,
- SP_ACTIVE_CYCLES_ALL = 36,
-};
-
-enum a3xx_tp_perfcounter_select {
- TPL1_TPPERF_L1_REQUESTS = 0,
- TPL1_TPPERF_TP0_L1_REQUESTS = 1,
- TPL1_TPPERF_TP0_L1_MISSES = 2,
- TPL1_TPPERF_TP1_L1_REQUESTS = 3,
- TPL1_TPPERF_TP1_L1_MISSES = 4,
- TPL1_TPPERF_TP2_L1_REQUESTS = 5,
- TPL1_TPPERF_TP2_L1_MISSES = 6,
- TPL1_TPPERF_TP3_L1_REQUESTS = 7,
- TPL1_TPPERF_TP3_L1_MISSES = 8,
- TPL1_TPPERF_OUTPUT_TEXELS_POINT = 9,
- TPL1_TPPERF_OUTPUT_TEXELS_BILINEAR = 10,
- TPL1_TPPERF_OUTPUT_TEXELS_MIP = 11,
- TPL1_TPPERF_OUTPUT_TEXELS_ANISO = 12,
- TPL1_TPPERF_BILINEAR_OPS = 13,
- TPL1_TPPERF_QUADSQUADS_OFFSET = 14,
- TPL1_TPPERF_QUADQUADS_SHADOW = 15,
- TPL1_TPPERF_QUADS_ARRAY = 16,
- TPL1_TPPERF_QUADS_PROJECTION = 17,
- TPL1_TPPERF_QUADS_GRADIENT = 18,
- TPL1_TPPERF_QUADS_1D2D = 19,
- TPL1_TPPERF_QUADS_3DCUBE = 20,
- TPL1_TPPERF_ZERO_LOD = 21,
- TPL1_TPPERF_OUTPUT_TEXELS = 22,
- TPL1_TPPERF_ACTIVE_CYCLES_ANY = 23,
- TPL1_TPPERF_ACTIVE_CYCLES_ALL = 24,
- TPL1_TPPERF_STALL_CYCLES_BY_ARB = 25,
- TPL1_TPPERF_LATENCY = 26,
- TPL1_TPPERF_LATENCY_TRANS = 27,
-};
-
-enum a3xx_vfd_perfcounter_select {
- VFD_PERF_UCHE_BYTE_FETCHED = 0,
- VFD_PERF_UCHE_TRANS = 1,
- VFD_PERF_VPC_BYPASS_COMPONENTS = 2,
- VFD_PERF_FETCH_INSTRUCTIONS = 3,
- VFD_PERF_DECODE_INSTRUCTIONS = 4,
- VFD_PERF_ACTIVE_CYCLES = 5,
- VFD_PERF_STALL_CYCLES_UCHE = 6,
- VFD_PERF_STALL_CYCLES_HLSQ = 7,
- VFD_PERF_STALL_CYCLES_VPC_BYPASS = 8,
- VFD_PERF_STALL_CYCLES_VPC_ALLOC = 9,
-};
-
-enum a3xx_vpc_perfcounter_select {
- VPC_PERF_SP_LM_PRIMITIVES = 0,
- VPC_PERF_COMPONENTS_FROM_SP = 1,
- VPC_PERF_SP_LM_COMPONENTS = 2,
- VPC_PERF_ACTIVE_CYCLES = 3,
- VPC_PERF_STALL_CYCLES_LM = 4,
- VPC_PERF_STALL_CYCLES_RAS = 5,
-};
-
-enum a3xx_uche_perfcounter_select {
- UCHE_UCHEPERF_VBIF_READ_BEATS_TP = 0,
- UCHE_UCHEPERF_VBIF_READ_BEATS_VFD = 1,
- UCHE_UCHEPERF_VBIF_READ_BEATS_HLSQ = 2,
- UCHE_UCHEPERF_VBIF_READ_BEATS_MARB = 3,
- UCHE_UCHEPERF_VBIF_READ_BEATS_SP = 4,
- UCHE_UCHEPERF_READ_REQUESTS_TP = 8,
- UCHE_UCHEPERF_READ_REQUESTS_VFD = 9,
- UCHE_UCHEPERF_READ_REQUESTS_HLSQ = 10,
- UCHE_UCHEPERF_READ_REQUESTS_MARB = 11,
- UCHE_UCHEPERF_READ_REQUESTS_SP = 12,
- UCHE_UCHEPERF_WRITE_REQUESTS_MARB = 13,
- UCHE_UCHEPERF_WRITE_REQUESTS_SP = 14,
- UCHE_UCHEPERF_TAG_CHECK_FAILS = 15,
- UCHE_UCHEPERF_EVICTS = 16,
- UCHE_UCHEPERF_FLUSHES = 17,
- UCHE_UCHEPERF_VBIF_LATENCY_CYCLES = 18,
- UCHE_UCHEPERF_VBIF_LATENCY_SAMPLES = 19,
- UCHE_UCHEPERF_ACTIVE_CYCLES = 20,
-};
-
-enum a3xx_intp_mode {
- SMOOTH = 0,
- FLAT = 1,
- ZERO = 2,
- ONE = 3,
-};
-
-enum a3xx_repl_mode {
- S = 1,
- T = 2,
- ONE_T = 3,
-};
-
-enum a3xx_tex_filter {
- A3XX_TEX_NEAREST = 0,
- A3XX_TEX_LINEAR = 1,
- A3XX_TEX_ANISO = 2,
-};
-
-enum a3xx_tex_clamp {
- A3XX_TEX_REPEAT = 0,
- A3XX_TEX_CLAMP_TO_EDGE = 1,
- A3XX_TEX_MIRROR_REPEAT = 2,
- A3XX_TEX_CLAMP_TO_BORDER = 3,
- A3XX_TEX_MIRROR_CLAMP = 4,
-};
-
-enum a3xx_tex_aniso {
- A3XX_TEX_ANISO_1 = 0,
- A3XX_TEX_ANISO_2 = 1,
- A3XX_TEX_ANISO_4 = 2,
- A3XX_TEX_ANISO_8 = 3,
- A3XX_TEX_ANISO_16 = 4,
-};
-
-enum a3xx_tex_swiz {
- A3XX_TEX_X = 0,
- A3XX_TEX_Y = 1,
- A3XX_TEX_Z = 2,
- A3XX_TEX_W = 3,
- A3XX_TEX_ZERO = 4,
- A3XX_TEX_ONE = 5,
-};
-
-enum a3xx_tex_type {
- A3XX_TEX_1D = 0,
- A3XX_TEX_2D = 1,
- A3XX_TEX_CUBE = 2,
- A3XX_TEX_3D = 3,
-};
-
-enum a3xx_tex_msaa {
- A3XX_TPL1_MSAA1X = 0,
- A3XX_TPL1_MSAA2X = 1,
- A3XX_TPL1_MSAA4X = 2,
- A3XX_TPL1_MSAA8X = 3,
-};
-
-#define A3XX_INT0_RBBM_GPU_IDLE 0x00000001
-#define A3XX_INT0_RBBM_AHB_ERROR 0x00000002
-#define A3XX_INT0_RBBM_REG_TIMEOUT 0x00000004
-#define A3XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
-#define A3XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
-#define A3XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00000020
-#define A3XX_INT0_VFD_ERROR 0x00000040
-#define A3XX_INT0_CP_SW_INT 0x00000080
-#define A3XX_INT0_CP_T0_PACKET_IN_IB 0x00000100
-#define A3XX_INT0_CP_OPCODE_ERROR 0x00000200
-#define A3XX_INT0_CP_RESERVED_BIT_ERROR 0x00000400
-#define A3XX_INT0_CP_HW_FAULT 0x00000800
-#define A3XX_INT0_CP_DMA 0x00001000
-#define A3XX_INT0_CP_IB2_INT 0x00002000
-#define A3XX_INT0_CP_IB1_INT 0x00004000
-#define A3XX_INT0_CP_RB_INT 0x00008000
-#define A3XX_INT0_CP_REG_PROTECT_FAULT 0x00010000
-#define A3XX_INT0_CP_RB_DONE_TS 0x00020000
-#define A3XX_INT0_CP_VS_DONE_TS 0x00040000
-#define A3XX_INT0_CP_PS_DONE_TS 0x00080000
-#define A3XX_INT0_CACHE_FLUSH_TS 0x00100000
-#define A3XX_INT0_CP_AHB_ERROR_HALT 0x00200000
-#define A3XX_INT0_MISC_HANG_DETECT 0x01000000
-#define A3XX_INT0_UCHE_OOB_ACCESS 0x02000000
-
-#define REG_A3XX_RBBM_HW_VERSION 0x00000000
-
-#define REG_A3XX_RBBM_HW_RELEASE 0x00000001
-
-#define REG_A3XX_RBBM_HW_CONFIGURATION 0x00000002
-
-#define REG_A3XX_RBBM_CLOCK_CTL 0x00000010
-
-#define REG_A3XX_RBBM_SP_HYST_CNT 0x00000012
-
-#define REG_A3XX_RBBM_SW_RESET_CMD 0x00000018
-
-#define REG_A3XX_RBBM_AHB_CTL0 0x00000020
-
-#define REG_A3XX_RBBM_AHB_CTL1 0x00000021
-
-#define REG_A3XX_RBBM_AHB_CMD 0x00000022
-
-#define REG_A3XX_RBBM_AHB_ERROR_STATUS 0x00000027
-
-#define REG_A3XX_RBBM_GPR0_CTL 0x0000002e
-
-#define REG_A3XX_RBBM_STATUS 0x00000030
-#define A3XX_RBBM_STATUS_HI_BUSY 0x00000001
-#define A3XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
-#define A3XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
-#define A3XX_RBBM_STATUS_CP_NRT_BUSY 0x00004000
-#define A3XX_RBBM_STATUS_VBIF_BUSY 0x00008000
-#define A3XX_RBBM_STATUS_TSE_BUSY 0x00010000
-#define A3XX_RBBM_STATUS_RAS_BUSY 0x00020000
-#define A3XX_RBBM_STATUS_RB_BUSY 0x00040000
-#define A3XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
-#define A3XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
-#define A3XX_RBBM_STATUS_VFD_BUSY 0x00200000
-#define A3XX_RBBM_STATUS_VPC_BUSY 0x00400000
-#define A3XX_RBBM_STATUS_UCHE_BUSY 0x00800000
-#define A3XX_RBBM_STATUS_SP_BUSY 0x01000000
-#define A3XX_RBBM_STATUS_TPL1_BUSY 0x02000000
-#define A3XX_RBBM_STATUS_MARB_BUSY 0x04000000
-#define A3XX_RBBM_STATUS_VSC_BUSY 0x08000000
-#define A3XX_RBBM_STATUS_ARB_BUSY 0x10000000
-#define A3XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
-#define A3XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000
-#define A3XX_RBBM_STATUS_GPU_BUSY 0x80000000
-
-#define REG_A3XX_RBBM_NQWAIT_UNTIL 0x00000040
-
-#define REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x00000033
-
-#define REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x00000050
-
-#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL0 0x00000051
-
-#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL1 0x00000054
-
-#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL2 0x00000057
-
-#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x0000005a
-
-#define REG_A3XX_RBBM_INT_SET_CMD 0x00000060
-#define REG_A3XX_RBBM_INT_CLEAR_CMD 0x00000061
-#define REG_A3XX_RBBM_INT_0_MASK 0x00000063
-#define REG_A3XX_RBBM_INT_0_STATUS 0x00000064
-#define REG_A3XX_RBBM_PERFCTR_CTL 0x00000080
-#define A3XX_RBBM_PERFCTR_CTL_ENABLE 0x00000001
-
-#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD0 0x00000081
-
-#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD1 0x00000082
-
-#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000084
-
-#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000085
-
-#define REG_A3XX_RBBM_PERFCOUNTER0_SELECT 0x00000086
-
-#define REG_A3XX_RBBM_PERFCOUNTER1_SELECT 0x00000087
-
-#define REG_A3XX_RBBM_GPU_BUSY_MASKED 0x00000088
-
-#define REG_A3XX_RBBM_PERFCTR_CP_0_LO 0x00000090
-
-#define REG_A3XX_RBBM_PERFCTR_CP_0_HI 0x00000091
-
-#define REG_A3XX_RBBM_PERFCTR_RBBM_0_LO 0x00000092
-
-#define REG_A3XX_RBBM_PERFCTR_RBBM_0_HI 0x00000093
-
-#define REG_A3XX_RBBM_PERFCTR_RBBM_1_LO 0x00000094
-
-#define REG_A3XX_RBBM_PERFCTR_RBBM_1_HI 0x00000095
-
-#define REG_A3XX_RBBM_PERFCTR_PC_0_LO 0x00000096
-
-#define REG_A3XX_RBBM_PERFCTR_PC_0_HI 0x00000097
-
-#define REG_A3XX_RBBM_PERFCTR_PC_1_LO 0x00000098
-
-#define REG_A3XX_RBBM_PERFCTR_PC_1_HI 0x00000099
-
-#define REG_A3XX_RBBM_PERFCTR_PC_2_LO 0x0000009a
-
-#define REG_A3XX_RBBM_PERFCTR_PC_2_HI 0x0000009b
-
-#define REG_A3XX_RBBM_PERFCTR_PC_3_LO 0x0000009c
-
-#define REG_A3XX_RBBM_PERFCTR_PC_3_HI 0x0000009d
-
-#define REG_A3XX_RBBM_PERFCTR_VFD_0_LO 0x0000009e
-
-#define REG_A3XX_RBBM_PERFCTR_VFD_0_HI 0x0000009f
-
-#define REG_A3XX_RBBM_PERFCTR_VFD_1_LO 0x000000a0
-
-#define REG_A3XX_RBBM_PERFCTR_VFD_1_HI 0x000000a1
-
-#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_LO 0x000000a2
-
-#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_HI 0x000000a3
-
-#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_LO 0x000000a4
-
-#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_HI 0x000000a5
-
-#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_LO 0x000000a6
-
-#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_HI 0x000000a7
-
-#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_LO 0x000000a8
-
-#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_HI 0x000000a9
-
-#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_LO 0x000000aa
-
-#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_HI 0x000000ab
-
-#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_LO 0x000000ac
-
-#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_HI 0x000000ad
-
-#define REG_A3XX_RBBM_PERFCTR_VPC_0_LO 0x000000ae
-
-#define REG_A3XX_RBBM_PERFCTR_VPC_0_HI 0x000000af
-
-#define REG_A3XX_RBBM_PERFCTR_VPC_1_LO 0x000000b0
-
-#define REG_A3XX_RBBM_PERFCTR_VPC_1_HI 0x000000b1
-
-#define REG_A3XX_RBBM_PERFCTR_TSE_0_LO 0x000000b2
-
-#define REG_A3XX_RBBM_PERFCTR_TSE_0_HI 0x000000b3
-
-#define REG_A3XX_RBBM_PERFCTR_TSE_1_LO 0x000000b4
-
-#define REG_A3XX_RBBM_PERFCTR_TSE_1_HI 0x000000b5
-
-#define REG_A3XX_RBBM_PERFCTR_RAS_0_LO 0x000000b6
-
-#define REG_A3XX_RBBM_PERFCTR_RAS_0_HI 0x000000b7
-
-#define REG_A3XX_RBBM_PERFCTR_RAS_1_LO 0x000000b8
-
-#define REG_A3XX_RBBM_PERFCTR_RAS_1_HI 0x000000b9
-
-#define REG_A3XX_RBBM_PERFCTR_UCHE_0_LO 0x000000ba
-
-#define REG_A3XX_RBBM_PERFCTR_UCHE_0_HI 0x000000bb
-
-#define REG_A3XX_RBBM_PERFCTR_UCHE_1_LO 0x000000bc
-
-#define REG_A3XX_RBBM_PERFCTR_UCHE_1_HI 0x000000bd
-
-#define REG_A3XX_RBBM_PERFCTR_UCHE_2_LO 0x000000be
-
-#define REG_A3XX_RBBM_PERFCTR_UCHE_2_HI 0x000000bf
-
-#define REG_A3XX_RBBM_PERFCTR_UCHE_3_LO 0x000000c0
-
-#define REG_A3XX_RBBM_PERFCTR_UCHE_3_HI 0x000000c1
-
-#define REG_A3XX_RBBM_PERFCTR_UCHE_4_LO 0x000000c2
-
-#define REG_A3XX_RBBM_PERFCTR_UCHE_4_HI 0x000000c3
-
-#define REG_A3XX_RBBM_PERFCTR_UCHE_5_LO 0x000000c4
-
-#define REG_A3XX_RBBM_PERFCTR_UCHE_5_HI 0x000000c5
-
-#define REG_A3XX_RBBM_PERFCTR_TP_0_LO 0x000000c6
-
-#define REG_A3XX_RBBM_PERFCTR_TP_0_HI 0x000000c7
-
-#define REG_A3XX_RBBM_PERFCTR_TP_1_LO 0x000000c8
-
-#define REG_A3XX_RBBM_PERFCTR_TP_1_HI 0x000000c9
-
-#define REG_A3XX_RBBM_PERFCTR_TP_2_LO 0x000000ca
-
-#define REG_A3XX_RBBM_PERFCTR_TP_2_HI 0x000000cb
-
-#define REG_A3XX_RBBM_PERFCTR_TP_3_LO 0x000000cc
-
-#define REG_A3XX_RBBM_PERFCTR_TP_3_HI 0x000000cd
-
-#define REG_A3XX_RBBM_PERFCTR_TP_4_LO 0x000000ce
-
-#define REG_A3XX_RBBM_PERFCTR_TP_4_HI 0x000000cf
-
-#define REG_A3XX_RBBM_PERFCTR_TP_5_LO 0x000000d0
-
-#define REG_A3XX_RBBM_PERFCTR_TP_5_HI 0x000000d1
-
-#define REG_A3XX_RBBM_PERFCTR_SP_0_LO 0x000000d2
-
-#define REG_A3XX_RBBM_PERFCTR_SP_0_HI 0x000000d3
-
-#define REG_A3XX_RBBM_PERFCTR_SP_1_LO 0x000000d4
-
-#define REG_A3XX_RBBM_PERFCTR_SP_1_HI 0x000000d5
-
-#define REG_A3XX_RBBM_PERFCTR_SP_2_LO 0x000000d6
-
-#define REG_A3XX_RBBM_PERFCTR_SP_2_HI 0x000000d7
-
-#define REG_A3XX_RBBM_PERFCTR_SP_3_LO 0x000000d8
-
-#define REG_A3XX_RBBM_PERFCTR_SP_3_HI 0x000000d9
-
-#define REG_A3XX_RBBM_PERFCTR_SP_4_LO 0x000000da
-
-#define REG_A3XX_RBBM_PERFCTR_SP_4_HI 0x000000db
-
-#define REG_A3XX_RBBM_PERFCTR_SP_5_LO 0x000000dc
-
-#define REG_A3XX_RBBM_PERFCTR_SP_5_HI 0x000000dd
-
-#define REG_A3XX_RBBM_PERFCTR_SP_6_LO 0x000000de
-
-#define REG_A3XX_RBBM_PERFCTR_SP_6_HI 0x000000df
-
-#define REG_A3XX_RBBM_PERFCTR_SP_7_LO 0x000000e0
-
-#define REG_A3XX_RBBM_PERFCTR_SP_7_HI 0x000000e1
-
-#define REG_A3XX_RBBM_PERFCTR_RB_0_LO 0x000000e2
-
-#define REG_A3XX_RBBM_PERFCTR_RB_0_HI 0x000000e3
-
-#define REG_A3XX_RBBM_PERFCTR_RB_1_LO 0x000000e4
-
-#define REG_A3XX_RBBM_PERFCTR_RB_1_HI 0x000000e5
-
-#define REG_A3XX_RBBM_PERFCTR_PWR_0_LO 0x000000ea
-
-#define REG_A3XX_RBBM_PERFCTR_PWR_0_HI 0x000000eb
-
-#define REG_A3XX_RBBM_PERFCTR_PWR_1_LO 0x000000ec
-
-#define REG_A3XX_RBBM_PERFCTR_PWR_1_HI 0x000000ed
-
-#define REG_A3XX_RBBM_RBBM_CTL 0x00000100
-
-#define REG_A3XX_RBBM_DEBUG_BUS_CTL 0x00000111
-
-#define REG_A3XX_RBBM_DEBUG_BUS_DATA_STATUS 0x00000112
-
-#define REG_A3XX_CP_PFP_UCODE_ADDR 0x000001c9
-
-#define REG_A3XX_CP_PFP_UCODE_DATA 0x000001ca
-
-#define REG_A3XX_CP_ROQ_ADDR 0x000001cc
-
-#define REG_A3XX_CP_ROQ_DATA 0x000001cd
-
-#define REG_A3XX_CP_MERCIU_ADDR 0x000001d1
-
-#define REG_A3XX_CP_MERCIU_DATA 0x000001d2
-
-#define REG_A3XX_CP_MERCIU_DATA2 0x000001d3
-
-#define REG_A3XX_CP_MEQ_ADDR 0x000001da
-
-#define REG_A3XX_CP_MEQ_DATA 0x000001db
-
-#define REG_A3XX_CP_WFI_PEND_CTR 0x000001f5
-
-#define REG_A3XX_RBBM_PM_OVERRIDE2 0x0000039d
-
-#define REG_A3XX_CP_PERFCOUNTER_SELECT 0x00000445
-
-#define REG_A3XX_CP_HW_FAULT 0x0000045c
-
-#define REG_A3XX_CP_PROTECT_CTRL 0x0000045e
-
-#define REG_A3XX_CP_PROTECT_STATUS 0x0000045f
-
-#define REG_A3XX_CP_PROTECT(i0) (0x00000460 + 0x1*(i0))
-
-static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460 + 0x1*i0; }
-
-#define REG_A3XX_CP_AHB_FAULT 0x0000054d
-
-#define REG_A3XX_SQ_GPR_MANAGEMENT 0x00000d00
-
-#define REG_A3XX_SQ_INST_STORE_MANAGMENT 0x00000d02
-
-#define REG_A3XX_TP0_CHICKEN 0x00000e1e
-
-#define REG_A3XX_SP_GLOBAL_MEM_SIZE 0x00000e22
-
-#define REG_A3XX_SP_GLOBAL_MEM_ADDR 0x00000e23
-
-#define REG_A3XX_GRAS_CL_CLIP_CNTL 0x00002040
-#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 0x00001000
-#define A3XX_GRAS_CL_CLIP_CNTL_IJ_NON_PERSP_CENTER 0x00002000
-#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTROID 0x00004000
-#define A3XX_GRAS_CL_CLIP_CNTL_IJ_NON_PERSP_CENTROID 0x00008000
-#define A3XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
-#define A3XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000
-#define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 0x00080000
-#define A3XX_GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 0x00100000
-#define A3XX_GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 0x00200000
-#define A3XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z 0x00400000
-#define A3XX_GRAS_CL_CLIP_CNTL_ZCOORD 0x00800000
-#define A3XX_GRAS_CL_CLIP_CNTL_WCOORD 0x01000000
-#define A3XX_GRAS_CL_CLIP_CNTL_ZCLIP_DISABLE 0x02000000
-#define A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__MASK 0x1c000000
-#define A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__SHIFT 26
-static inline uint32_t A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES(uint32_t val)
-{
- return ((val) << A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__SHIFT) & A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__MASK;
-}
-
-#define REG_A3XX_GRAS_CL_GB_CLIP_ADJ 0x00002044
-#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff
-#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT 0
-static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ(uint32_t val)
-{
- return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK;
-}
-#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK 0x000ffc00
-#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT 10
-static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_VERT(uint32_t val)
-{
- return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK;
-}
-
-#define REG_A3XX_GRAS_CL_VPORT_XOFFSET 0x00002048
-#define A3XX_GRAS_CL_VPORT_XOFFSET__MASK 0xffffffff
-#define A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT 0
-static inline uint32_t A3XX_GRAS_CL_VPORT_XOFFSET(float val)
-{
- return ((fui(val)) << A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_XOFFSET__MASK;
-}
-
-#define REG_A3XX_GRAS_CL_VPORT_XSCALE 0x00002049
-#define A3XX_GRAS_CL_VPORT_XSCALE__MASK 0xffffffff
-#define A3XX_GRAS_CL_VPORT_XSCALE__SHIFT 0
-static inline uint32_t A3XX_GRAS_CL_VPORT_XSCALE(float val)
-{
- return ((fui(val)) << A3XX_GRAS_CL_VPORT_XSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_XSCALE__MASK;
-}
-
-#define REG_A3XX_GRAS_CL_VPORT_YOFFSET 0x0000204a
-#define A3XX_GRAS_CL_VPORT_YOFFSET__MASK 0xffffffff
-#define A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT 0
-static inline uint32_t A3XX_GRAS_CL_VPORT_YOFFSET(float val)
-{
- return ((fui(val)) << A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_YOFFSET__MASK;
-}
-
-#define REG_A3XX_GRAS_CL_VPORT_YSCALE 0x0000204b
-#define A3XX_GRAS_CL_VPORT_YSCALE__MASK 0xffffffff
-#define A3XX_GRAS_CL_VPORT_YSCALE__SHIFT 0
-static inline uint32_t A3XX_GRAS_CL_VPORT_YSCALE(float val)
-{
- return ((fui(val)) << A3XX_GRAS_CL_VPORT_YSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_YSCALE__MASK;
-}
-
-#define REG_A3XX_GRAS_CL_VPORT_ZOFFSET 0x0000204c
-#define A3XX_GRAS_CL_VPORT_ZOFFSET__MASK 0xffffffff
-#define A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT 0
-static inline uint32_t A3XX_GRAS_CL_VPORT_ZOFFSET(float val)
-{
- return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_ZOFFSET__MASK;
-}
-
-#define REG_A3XX_GRAS_CL_VPORT_ZSCALE 0x0000204d
-#define A3XX_GRAS_CL_VPORT_ZSCALE__MASK 0xffffffff
-#define A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT 0
-static inline uint32_t A3XX_GRAS_CL_VPORT_ZSCALE(float val)
-{
- return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_ZSCALE__MASK;
-}
-
-#define REG_A3XX_GRAS_SU_POINT_MINMAX 0x00002068
-#define A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
-#define A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
-static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MIN(float val)
-{
- return ((((uint32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
-}
-#define A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
-#define A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
-static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MAX(float val)
-{
- return ((((uint32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
-}
-
-#define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069
-#define A3XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
-#define A3XX_GRAS_SU_POINT_SIZE__SHIFT 0
-static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val)
-{
- return ((((int32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_SIZE__SHIFT) & A3XX_GRAS_SU_POINT_SIZE__MASK;
-}
-
-#define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c
-#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK 0x00ffffff
-#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT 0
-static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
-{
- return ((((int32_t)(val * 1048576.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK;
-}
-
-#define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000206d
-#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
-#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
-static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
-{
- return ((((int32_t)(val * 64.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
-}
-
-#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070
-#define A3XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001
-#define A3XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002
-#define A3XX_GRAS_SU_MODE_CONTROL_FRONT_CW 0x00000004
-#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007f8
-#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3
-static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
-{
- return ((((int32_t)(val * 4.0))) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
-}
-#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
-
-#define REG_A3XX_GRAS_SC_CONTROL 0x00002072
-#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x000000f0
-#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 4
-static inline uint32_t A3XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
-{
- return ((val) << A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK;
-}
-#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000f00
-#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 8
-static inline uint32_t A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES(enum a3xx_msaa_samples val)
-{
- return ((val) << A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK;
-}
-#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000
-#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12
-static inline uint32_t A3XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val)
-{
- return ((val) << A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK;
-}
-
-#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_TL 0x00002074
-#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
-#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
-#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
-static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
-{
- return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK;
-}
-#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
-#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
-static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
-{
- return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK;
-}
-
-#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_BR 0x00002075
-#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
-#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
-#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
-static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
-{
- return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK;
-}
-#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
-#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
-static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
-{
- return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK;
-}
-
-#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_TL 0x00002079
-#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
-#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
-#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
-static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
-{
- return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
-}
-#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
-#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
-static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
-{
- return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
-}
-
-#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000207a
-#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
-#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
-#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
-static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
-{
- return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
-}
-#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
-#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
-static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
-{
- return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
-}
-
-#define REG_A3XX_RB_MODE_CONTROL 0x000020c0
-#define A3XX_RB_MODE_CONTROL_GMEM_BYPASS 0x00000080
-#define A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK 0x00000700
-#define A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT 8
-static inline uint32_t A3XX_RB_MODE_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
-{
- return ((val) << A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT) & A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK;
-}
-#define A3XX_RB_MODE_CONTROL_MRT__MASK 0x00003000
-#define A3XX_RB_MODE_CONTROL_MRT__SHIFT 12
-static inline uint32_t A3XX_RB_MODE_CONTROL_MRT(uint32_t val)
-{
- return ((val) << A3XX_RB_MODE_CONTROL_MRT__SHIFT) & A3XX_RB_MODE_CONTROL_MRT__MASK;
-}
-#define A3XX_RB_MODE_CONTROL_MARB_CACHE_SPLIT_MODE 0x00008000
-#define A3XX_RB_MODE_CONTROL_PACKER_TIMER_ENABLE 0x00010000
-
-#define REG_A3XX_RB_RENDER_CONTROL 0x000020c1
-#define A3XX_RB_RENDER_CONTROL_DUAL_COLOR_IN_ENABLE 0x00000001
-#define A3XX_RB_RENDER_CONTROL_YUV_IN_ENABLE 0x00000002
-#define A3XX_RB_RENDER_CONTROL_COV_VALUE_INPUT_ENABLE 0x00000004
-#define A3XX_RB_RENDER_CONTROL_FACENESS 0x00000008
-#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK 0x00000ff0
-#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT 4
-static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT) & A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK;
-}
-#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000
-#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000
-#define A3XX_RB_RENDER_CONTROL_COORD_MASK__MASK 0x0003c000
-#define A3XX_RB_RENDER_CONTROL_COORD_MASK__SHIFT 14
-static inline uint32_t A3XX_RB_RENDER_CONTROL_COORD_MASK(uint32_t val)
-{
- return ((val) << A3XX_RB_RENDER_CONTROL_COORD_MASK__SHIFT) & A3XX_RB_RENDER_CONTROL_COORD_MASK__MASK;
-}
-#define A3XX_RB_RENDER_CONTROL_I_CLAMP_ENABLE 0x00080000
-#define A3XX_RB_RENDER_CONTROL_COV_VALUE_OUTPUT_ENABLE 0x00100000
-#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST 0x00400000
-#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000
-#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24
-static inline uint32_t A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
-{
- return ((val) << A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK;
-}
-#define A3XX_RB_RENDER_CONTROL_ALPHA_TO_COVERAGE 0x40000000
-#define A3XX_RB_RENDER_CONTROL_ALPHA_TO_ONE 0x80000000
-
-#define REG_A3XX_RB_MSAA_CONTROL 0x000020c2
-#define A3XX_RB_MSAA_CONTROL_DISABLE 0x00000400
-#define A3XX_RB_MSAA_CONTROL_SAMPLES__MASK 0x0000f000
-#define A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT 12
-static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLES(enum a3xx_msaa_samples val)
-{
- return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLES__MASK;
-}
-#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK 0xffff0000
-#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT 16
-static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(uint32_t val)
-{
- return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK;
-}
-
-#define REG_A3XX_RB_ALPHA_REF 0x000020c3
-#define A3XX_RB_ALPHA_REF_UINT__MASK 0x0000ff00
-#define A3XX_RB_ALPHA_REF_UINT__SHIFT 8
-static inline uint32_t A3XX_RB_ALPHA_REF_UINT(uint32_t val)
-{
- return ((val) << A3XX_RB_ALPHA_REF_UINT__SHIFT) & A3XX_RB_ALPHA_REF_UINT__MASK;
-}
-#define A3XX_RB_ALPHA_REF_FLOAT__MASK 0xffff0000
-#define A3XX_RB_ALPHA_REF_FLOAT__SHIFT 16
-static inline uint32_t A3XX_RB_ALPHA_REF_FLOAT(float val)
-{
- return ((_mesa_float_to_half(val)) << A3XX_RB_ALPHA_REF_FLOAT__SHIFT) & A3XX_RB_ALPHA_REF_FLOAT__MASK;
-}
-
-#define REG_A3XX_RB_MRT(i0) (0x000020c4 + 0x4*(i0))
-
-static inline uint32_t REG_A3XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
-#define A3XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008
-#define A3XX_RB_MRT_CONTROL_BLEND 0x00000010
-#define A3XX_RB_MRT_CONTROL_BLEND2 0x00000020
-#define A3XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00
-#define A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8
-static inline uint32_t A3XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
-{
- return ((val) << A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A3XX_RB_MRT_CONTROL_ROP_CODE__MASK;
-}
-#define A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK 0x00003000
-#define A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT 12
-static inline uint32_t A3XX_RB_MRT_CONTROL_DITHER_MODE(enum adreno_rb_dither_mode val)
-{
- return ((val) << A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT) & A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK;
-}
-#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000
-#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24
-static inline uint32_t A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
-{
- return ((val) << A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
-}
-
-static inline uint32_t REG_A3XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x000020c5 + 0x4*i0; }
-#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000003f
-#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
-static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a3xx_color_fmt val)
-{
- return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
-}
-#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x000000c0
-#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 6
-static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a3xx_tile_mode val)
-{
- return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
-}
-#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00000c00
-#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 10
-static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
-{
- return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
-}
-#define A3XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00004000
-#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xfffe0000
-#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 17
-static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK;
-}
-
-static inline uint32_t REG_A3XX_RB_MRT_BUF_BASE(uint32_t i0) { return 0x000020c6 + 0x4*i0; }
-#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK 0xfffffff0
-#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT 4
-static inline uint32_t A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT) & A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK;
-}
-
-static inline uint32_t REG_A3XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x000020c7 + 0x4*i0; }
-#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
-#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
-static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
-{
- return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
-}
-#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
-#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
-static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
-{
- return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
-}
-#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
-#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
-static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
-{
- return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
-}
-#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
-#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
-static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
-{
- return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
-}
-#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
-#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
-static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
-{
- return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
-}
-#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
-#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
-static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
-{
- return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
-}
-#define A3XX_RB_MRT_BLEND_CONTROL_CLAMP_ENABLE 0x20000000
-
-#define REG_A3XX_RB_BLEND_RED 0x000020e4
-#define A3XX_RB_BLEND_RED_UINT__MASK 0x000000ff
-#define A3XX_RB_BLEND_RED_UINT__SHIFT 0
-static inline uint32_t A3XX_RB_BLEND_RED_UINT(uint32_t val)
-{
- return ((val) << A3XX_RB_BLEND_RED_UINT__SHIFT) & A3XX_RB_BLEND_RED_UINT__MASK;
-}
-#define A3XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
-#define A3XX_RB_BLEND_RED_FLOAT__SHIFT 16
-static inline uint32_t A3XX_RB_BLEND_RED_FLOAT(float val)
-{
- return ((_mesa_float_to_half(val)) << A3XX_RB_BLEND_RED_FLOAT__SHIFT) & A3XX_RB_BLEND_RED_FLOAT__MASK;
-}
-
-#define REG_A3XX_RB_BLEND_GREEN 0x000020e5
-#define A3XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
-#define A3XX_RB_BLEND_GREEN_UINT__SHIFT 0
-static inline uint32_t A3XX_RB_BLEND_GREEN_UINT(uint32_t val)
-{
- return ((val) << A3XX_RB_BLEND_GREEN_UINT__SHIFT) & A3XX_RB_BLEND_GREEN_UINT__MASK;
-}
-#define A3XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
-#define A3XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
-static inline uint32_t A3XX_RB_BLEND_GREEN_FLOAT(float val)
-{
- return ((_mesa_float_to_half(val)) << A3XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A3XX_RB_BLEND_GREEN_FLOAT__MASK;
-}
-
-#define REG_A3XX_RB_BLEND_BLUE 0x000020e6
-#define A3XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
-#define A3XX_RB_BLEND_BLUE_UINT__SHIFT 0
-static inline uint32_t A3XX_RB_BLEND_BLUE_UINT(uint32_t val)
-{
- return ((val) << A3XX_RB_BLEND_BLUE_UINT__SHIFT) & A3XX_RB_BLEND_BLUE_UINT__MASK;
-}
-#define A3XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
-#define A3XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
-static inline uint32_t A3XX_RB_BLEND_BLUE_FLOAT(float val)
-{
- return ((_mesa_float_to_half(val)) << A3XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A3XX_RB_BLEND_BLUE_FLOAT__MASK;
-}
-
-#define REG_A3XX_RB_BLEND_ALPHA 0x000020e7
-#define A3XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
-#define A3XX_RB_BLEND_ALPHA_UINT__SHIFT 0
-static inline uint32_t A3XX_RB_BLEND_ALPHA_UINT(uint32_t val)
-{
- return ((val) << A3XX_RB_BLEND_ALPHA_UINT__SHIFT) & A3XX_RB_BLEND_ALPHA_UINT__MASK;
-}
-#define A3XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
-#define A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
-static inline uint32_t A3XX_RB_BLEND_ALPHA_FLOAT(float val)
-{
- return ((_mesa_float_to_half(val)) << A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A3XX_RB_BLEND_ALPHA_FLOAT__MASK;
-}
-
-#define REG_A3XX_RB_CLEAR_COLOR_DW0 0x000020e8
-
-#define REG_A3XX_RB_CLEAR_COLOR_DW1 0x000020e9
-
-#define REG_A3XX_RB_CLEAR_COLOR_DW2 0x000020ea
-
-#define REG_A3XX_RB_CLEAR_COLOR_DW3 0x000020eb
-
-#define REG_A3XX_RB_COPY_CONTROL 0x000020ec
-#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003
-#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT 0
-static inline uint32_t A3XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples val)
-{
- return ((val) << A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK;
-}
-#define A3XX_RB_COPY_CONTROL_DEPTHCLEAR 0x00000008
-#define A3XX_RB_COPY_CONTROL_MODE__MASK 0x00000070
-#define A3XX_RB_COPY_CONTROL_MODE__SHIFT 4
-static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val)
-{
- return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK;
-}
-#define A3XX_RB_COPY_CONTROL_MSAA_SRGB_DOWNSAMPLE 0x00000080
-#define A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00
-#define A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8
-static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
-{
- return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
-}
-#define A3XX_RB_COPY_CONTROL_DEPTH32_RESOLVE 0x00001000
-#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
-#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
-static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
-{
- assert(!(val & 0x3fff));
- return (((val >> 14)) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
-}
-
-#define REG_A3XX_RB_COPY_DEST_BASE 0x000020ed
-#define A3XX_RB_COPY_DEST_BASE_BASE__MASK 0xfffffff0
-#define A3XX_RB_COPY_DEST_BASE_BASE__SHIFT 4
-static inline uint32_t A3XX_RB_COPY_DEST_BASE_BASE(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A3XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A3XX_RB_COPY_DEST_BASE_BASE__MASK;
-}
-
-#define REG_A3XX_RB_COPY_DEST_PITCH 0x000020ee
-#define A3XX_RB_COPY_DEST_PITCH_PITCH__MASK 0xffffffff
-#define A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT 0
-static inline uint32_t A3XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A3XX_RB_COPY_DEST_PITCH_PITCH__MASK;
-}
-
-#define REG_A3XX_RB_COPY_DEST_INFO 0x000020ef
-#define A3XX_RB_COPY_DEST_INFO_TILE__MASK 0x00000003
-#define A3XX_RB_COPY_DEST_INFO_TILE__SHIFT 0
-static inline uint32_t A3XX_RB_COPY_DEST_INFO_TILE(enum a3xx_tile_mode val)
-{
- return ((val) << A3XX_RB_COPY_DEST_INFO_TILE__SHIFT) & A3XX_RB_COPY_DEST_INFO_TILE__MASK;
-}
-#define A3XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000fc
-#define A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 2
-static inline uint32_t A3XX_RB_COPY_DEST_INFO_FORMAT(enum a3xx_color_fmt val)
-{
- return ((val) << A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A3XX_RB_COPY_DEST_INFO_FORMAT__MASK;
-}
-#define A3XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
-#define A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
-static inline uint32_t A3XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val)
-{
- return ((val) << A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A3XX_RB_COPY_DEST_INFO_SWAP__MASK;
-}
-#define A3XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00
-#define A3XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10
-static inline uint32_t A3XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
-{
- return ((val) << A3XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A3XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK;
-}
-#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000
-#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14
-static inline uint32_t A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val)
-{
- return ((val) << A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT) & A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK;
-}
-#define A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK 0x001c0000
-#define A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT 18
-static inline uint32_t A3XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endian val)
-{
- return ((val) << A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT) & A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK;
-}
-
-#define REG_A3XX_RB_DEPTH_CONTROL 0x00002100
-#define A3XX_RB_DEPTH_CONTROL_FRAG_WRITES_Z 0x00000001
-#define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x00000002
-#define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004
-#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00000008
-#define A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070
-#define A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4
-static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
-{
- return ((val) << A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
-}
-#define A3XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080
-#define A3XX_RB_DEPTH_CONTROL_Z_READ_ENABLE 0x80000000
-
-#define REG_A3XX_RB_DEPTH_CLEAR 0x00002101
-
-#define REG_A3XX_RB_DEPTH_INFO 0x00002102
-#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000003
-#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
-static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
-{
- return ((val) << A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
-}
-#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff800
-#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 11
-static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
-}
-
-#define REG_A3XX_RB_DEPTH_PITCH 0x00002103
-#define A3XX_RB_DEPTH_PITCH__MASK 0xffffffff
-#define A3XX_RB_DEPTH_PITCH__SHIFT 0
-static inline uint32_t A3XX_RB_DEPTH_PITCH(uint32_t val)
-{
- assert(!(val & 0x7));
- return (((val >> 3)) << A3XX_RB_DEPTH_PITCH__SHIFT) & A3XX_RB_DEPTH_PITCH__MASK;
-}
-
-#define REG_A3XX_RB_STENCIL_CONTROL 0x00002104
-#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
-#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
-#define A3XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
-#define A3XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
-#define A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
-static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
-{
- return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC__MASK;
-}
-#define A3XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
-#define A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
-static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
-{
- return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL__MASK;
-}
-#define A3XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
-#define A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
-static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
-{
- return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS__MASK;
-}
-#define A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
-#define A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
-static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
-{
- return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
-}
-#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
-#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
-static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
-{
- return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
-}
-#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
-#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
-static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
-{
- return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
-}
-#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
-#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
-static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
-{
- return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
-}
-#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
-#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
-static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
-{
- return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
-}
-
-#define REG_A3XX_RB_STENCIL_CLEAR 0x00002105
-
-#define REG_A3XX_RB_STENCIL_INFO 0x00002106
-#define A3XX_RB_STENCIL_INFO_STENCIL_BASE__MASK 0xfffff800
-#define A3XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT 11
-static inline uint32_t A3XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A3XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A3XX_RB_STENCIL_INFO_STENCIL_BASE__MASK;
-}
-
-#define REG_A3XX_RB_STENCIL_PITCH 0x00002107
-#define A3XX_RB_STENCIL_PITCH__MASK 0xffffffff
-#define A3XX_RB_STENCIL_PITCH__SHIFT 0
-static inline uint32_t A3XX_RB_STENCIL_PITCH(uint32_t val)
-{
- assert(!(val & 0x7));
- return (((val >> 3)) << A3XX_RB_STENCIL_PITCH__SHIFT) & A3XX_RB_STENCIL_PITCH__MASK;
-}
-
-#define REG_A3XX_RB_STENCILREFMASK 0x00002108
-#define A3XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
-#define A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
-static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
-{
- return ((val) << A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILREF__MASK;
-}
-#define A3XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
-#define A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
-static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
-{
- return ((val) << A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILMASK__MASK;
-}
-#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
-#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
-static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
-{
- return ((val) << A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
-}
-
-#define REG_A3XX_RB_STENCILREFMASK_BF 0x00002109
-#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
-#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
-static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
-{
- return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
-}
-#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
-#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
-static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
-{
- return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
-}
-#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
-#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
-static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
-{
- return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
-}
-
-#define REG_A3XX_RB_LRZ_VSC_CONTROL 0x0000210c
-#define A3XX_RB_LRZ_VSC_CONTROL_BINNING_ENABLE 0x00000002
-
-#define REG_A3XX_RB_WINDOW_OFFSET 0x0000210e
-#define A3XX_RB_WINDOW_OFFSET_X__MASK 0x0000ffff
-#define A3XX_RB_WINDOW_OFFSET_X__SHIFT 0
-static inline uint32_t A3XX_RB_WINDOW_OFFSET_X(uint32_t val)
-{
- return ((val) << A3XX_RB_WINDOW_OFFSET_X__SHIFT) & A3XX_RB_WINDOW_OFFSET_X__MASK;
-}
-#define A3XX_RB_WINDOW_OFFSET_Y__MASK 0xffff0000
-#define A3XX_RB_WINDOW_OFFSET_Y__SHIFT 16
-static inline uint32_t A3XX_RB_WINDOW_OFFSET_Y(uint32_t val)
-{
- return ((val) << A3XX_RB_WINDOW_OFFSET_Y__SHIFT) & A3XX_RB_WINDOW_OFFSET_Y__MASK;
-}
-
-#define REG_A3XX_RB_SAMPLE_COUNT_CONTROL 0x00002110
-#define A3XX_RB_SAMPLE_COUNT_CONTROL_RESET 0x00000001
-#define A3XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
-
-#define REG_A3XX_RB_SAMPLE_COUNT_ADDR 0x00002111
-
-#define REG_A3XX_RB_Z_CLAMP_MIN 0x00002114
-
-#define REG_A3XX_RB_Z_CLAMP_MAX 0x00002115
-
-#define REG_A3XX_VGT_BIN_BASE 0x000021e1
-
-#define REG_A3XX_VGT_BIN_SIZE 0x000021e2
-
-#define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4
-#define A3XX_PC_VSTREAM_CONTROL_SIZE__MASK 0x003f0000
-#define A3XX_PC_VSTREAM_CONTROL_SIZE__SHIFT 16
-static inline uint32_t A3XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val)
-{
- return ((val) << A3XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A3XX_PC_VSTREAM_CONTROL_SIZE__MASK;
-}
-#define A3XX_PC_VSTREAM_CONTROL_N__MASK 0x07c00000
-#define A3XX_PC_VSTREAM_CONTROL_N__SHIFT 22
-static inline uint32_t A3XX_PC_VSTREAM_CONTROL_N(uint32_t val)
-{
- return ((val) << A3XX_PC_VSTREAM_CONTROL_N__SHIFT) & A3XX_PC_VSTREAM_CONTROL_N__MASK;
-}
-
-#define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea
-
-#define REG_A3XX_PC_PRIM_VTX_CNTL 0x000021ec
-#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK 0x0000001f
-#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT 0
-static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC(uint32_t val)
-{
- return ((val) << A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK;
-}
-#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK 0x000000e0
-#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT 5
-static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
-{
- return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK;
-}
-#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK 0x00000700
-#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT 8
-static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
-{
- return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK;
-}
-#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_ENABLE 0x00001000
-#define A3XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART 0x00100000
-#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
-#define A3XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000
-
-#define REG_A3XX_PC_RESTART_INDEX 0x000021ed
-
-#define REG_A3XX_HLSQ_CONTROL_0_REG 0x00002200
-#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000030
-#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4
-static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
-{
- return ((val) << A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
-}
-#define A3XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040
-#define A3XX_HLSQ_CONTROL_0_REG_COMPUTEMODE 0x00000100
-#define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
-#define A3XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
-#define A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__MASK 0x00fff000
-#define A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__SHIFT 12
-static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC(uint32_t val)
-{
- return ((val) << A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__MASK;
-}
-#define A3XX_HLSQ_CONTROL_0_REG_FSONLYTEX 0x02000000
-#define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
-#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK 0x08000000
-#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT 27
-static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val)
-{
- return ((val) << A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK;
-}
-#define A3XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000
-#define A3XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000
-#define A3XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000
-#define A3XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000
-
-#define REG_A3XX_HLSQ_CONTROL_1_REG 0x00002201
-#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x000000c0
-#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6
-static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val)
-{
- return ((val) << A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK;
-}
-#define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
-#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__MASK 0x00ff0000
-#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__SHIFT 16
-static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID(uint32_t val)
-{
- return ((val) << A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__MASK;
-}
-#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__MASK 0xff000000
-#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__SHIFT 24
-static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID(uint32_t val)
-{
- return ((val) << A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__MASK;
-}
-
-#define REG_A3XX_HLSQ_CONTROL_2_REG 0x00002202
-#define A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__MASK 0x000003fc
-#define A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__SHIFT 2
-static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID(uint32_t val)
-{
- return ((val) << A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__MASK;
-}
-#define A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__MASK 0x03fc0000
-#define A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__SHIFT 18
-static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID(uint32_t val)
-{
- return ((val) << A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__MASK;
-}
-#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
-#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26
-static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
-{
- return ((val) << A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK;
-}
-
-#define REG_A3XX_HLSQ_CONTROL_3_REG 0x00002203
-#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__MASK 0x000000ff
-#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__SHIFT 0
-static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID(uint32_t val)
-{
- return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__MASK;
-}
-#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__MASK 0x0000ff00
-#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__SHIFT 8
-static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID(uint32_t val)
-{
- return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__MASK;
-}
-#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__MASK 0x00ff0000
-#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__SHIFT 16
-static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID(uint32_t val)
-{
- return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__MASK;
-}
-#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__MASK 0xff000000
-#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__SHIFT 24
-static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID(uint32_t val)
-{
- return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__MASK;
-}
-
-#define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204
-#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x000003ff
-#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0
-static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
-{
- return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
-}
-#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x001ff000
-#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
-static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
-{
- return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK;
-}
-#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
-#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT 24
-static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val)
-{
- return ((val) << A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK;
-}
-
-#define REG_A3XX_HLSQ_FS_CONTROL_REG 0x00002205
-#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x000003ff
-#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0
-static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
-{
- return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
-}
-#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x001ff000
-#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
-static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
-{
- return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK;
-}
-#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
-#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT 24
-static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val)
-{
- return ((val) << A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK;
-}
-
-#define REG_A3XX_HLSQ_CONST_VSPRESV_RANGE_REG 0x00002206
-#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK 0x000001ff
-#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
-static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
-{
- return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK;
-}
-#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK 0x01ff0000
-#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
-static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
-{
- return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK;
-}
-
-#define REG_A3XX_HLSQ_CONST_FSPRESV_RANGE_REG 0x00002207
-#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK 0x000001ff
-#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
-static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
-{
- return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK;
-}
-#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK 0x01ff0000
-#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
-static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
-{
- return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK;
-}
-
-#define REG_A3XX_HLSQ_CL_NDRANGE_0_REG 0x0000220a
-#define A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__MASK 0x00000003
-#define A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__SHIFT 0
-static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM(uint32_t val)
-{
- return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__MASK;
-}
-#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__MASK 0x00000ffc
-#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__SHIFT 2
-static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0(uint32_t val)
-{
- return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__MASK;
-}
-#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__MASK 0x003ff000
-#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__SHIFT 12
-static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1(uint32_t val)
-{
- return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__MASK;
-}
-#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__MASK 0xffc00000
-#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__SHIFT 22
-static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2(uint32_t val)
-{
- return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__MASK;
-}
-
-#define REG_A3XX_HLSQ_CL_GLOBAL_WORK(i0) (0x0000220b + 0x2*(i0))
-
-static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK_SIZE(uint32_t i0) { return 0x0000220b + 0x2*i0; }
-
-static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK_OFFSET(uint32_t i0) { return 0x0000220c + 0x2*i0; }
-
-#define REG_A3XX_HLSQ_CL_CONTROL_0_REG 0x00002211
-
-#define REG_A3XX_HLSQ_CL_CONTROL_1_REG 0x00002212
-
-#define REG_A3XX_HLSQ_CL_KERNEL_CONST_REG 0x00002214
-
-#define REG_A3XX_HLSQ_CL_KERNEL_GROUP(i0) (0x00002215 + 0x1*(i0))
-
-static inline uint32_t REG_A3XX_HLSQ_CL_KERNEL_GROUP_RATIO(uint32_t i0) { return 0x00002215 + 0x1*i0; }
-
-#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Y_REG 0x00002216
-
-#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x00002217
-
-#define REG_A3XX_HLSQ_CL_WG_OFFSET_REG 0x0000221a
-
-#define REG_A3XX_VFD_CONTROL_0 0x00002240
-#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK 0x0003ffff
-#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT 0
-static inline uint32_t A3XX_VFD_CONTROL_0_TOTALATTRTOVS(uint32_t val)
-{
- return ((val) << A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT) & A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK;
-}
-#define A3XX_VFD_CONTROL_0_PACKETSIZE__MASK 0x003c0000
-#define A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT 18
-static inline uint32_t A3XX_VFD_CONTROL_0_PACKETSIZE(uint32_t val)
-{
- return ((val) << A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT) & A3XX_VFD_CONTROL_0_PACKETSIZE__MASK;
-}
-#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x07c00000
-#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 22
-static inline uint32_t A3XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val)
-{
- return ((val) << A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK;
-}
-#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK 0xf8000000
-#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT 27
-static inline uint32_t A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val)
-{
- return ((val) << A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK;
-}
-
-#define REG_A3XX_VFD_CONTROL_1 0x00002241
-#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000000f
-#define A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0
-static inline uint32_t A3XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val)
-{
- return ((val) << A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK;
-}
-#define A3XX_VFD_CONTROL_1_MAXTHRESHOLD__MASK 0x000000f0
-#define A3XX_VFD_CONTROL_1_MAXTHRESHOLD__SHIFT 4
-static inline uint32_t A3XX_VFD_CONTROL_1_MAXTHRESHOLD(uint32_t val)
-{
- return ((val) << A3XX_VFD_CONTROL_1_MAXTHRESHOLD__SHIFT) & A3XX_VFD_CONTROL_1_MAXTHRESHOLD__MASK;
-}
-#define A3XX_VFD_CONTROL_1_MINTHRESHOLD__MASK 0x00000f00
-#define A3XX_VFD_CONTROL_1_MINTHRESHOLD__SHIFT 8
-static inline uint32_t A3XX_VFD_CONTROL_1_MINTHRESHOLD(uint32_t val)
-{
- return ((val) << A3XX_VFD_CONTROL_1_MINTHRESHOLD__SHIFT) & A3XX_VFD_CONTROL_1_MINTHRESHOLD__MASK;
-}
-#define A3XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
-#define A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
-static inline uint32_t A3XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
-{
- return ((val) << A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A3XX_VFD_CONTROL_1_REGID4VTX__MASK;
-}
-#define A3XX_VFD_CONTROL_1_REGID4INST__MASK 0xff000000
-#define A3XX_VFD_CONTROL_1_REGID4INST__SHIFT 24
-static inline uint32_t A3XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
-{
- return ((val) << A3XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A3XX_VFD_CONTROL_1_REGID4INST__MASK;
-}
-
-#define REG_A3XX_VFD_INDEX_MIN 0x00002242
-
-#define REG_A3XX_VFD_INDEX_MAX 0x00002243
-
-#define REG_A3XX_VFD_INSTANCEID_OFFSET 0x00002244
-
-#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
-
-#define REG_A3XX_VFD_FETCH(i0) (0x00002246 + 0x2*(i0))
-
-static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; }
-#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK 0x0000007f
-#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT 0
-static inline uint32_t A3XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val)
-{
- return ((val) << A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK;
-}
-#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0000ff80
-#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT 7
-static inline uint32_t A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val)
-{
- return ((val) << A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK;
-}
-#define A3XX_VFD_FETCH_INSTR_0_INSTANCED 0x00010000
-#define A3XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00020000
-#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK 0x00fc0000
-#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT 18
-static inline uint32_t A3XX_VFD_FETCH_INSTR_0_INDEXCODE(uint32_t val)
-{
- return ((val) << A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK;
-}
-#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK 0xff000000
-#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT 24
-static inline uint32_t A3XX_VFD_FETCH_INSTR_0_STEPRATE(uint32_t val)
-{
- return ((val) << A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK;
-}
-
-static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x00002247 + 0x2*i0; }
-
-#define REG_A3XX_VFD_DECODE(i0) (0x00002266 + 0x1*(i0))
-
-static inline uint32_t REG_A3XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x00002266 + 0x1*i0; }
-#define A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK 0x0000000f
-#define A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT 0
-static inline uint32_t A3XX_VFD_DECODE_INSTR_WRITEMASK(uint32_t val)
-{
- return ((val) << A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT) & A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK;
-}
-#define A3XX_VFD_DECODE_INSTR_CONSTFILL 0x00000010
-#define A3XX_VFD_DECODE_INSTR_FORMAT__MASK 0x00000fc0
-#define A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT 6
-static inline uint32_t A3XX_VFD_DECODE_INSTR_FORMAT(enum a3xx_vtx_fmt val)
-{
- return ((val) << A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A3XX_VFD_DECODE_INSTR_FORMAT__MASK;
-}
-#define A3XX_VFD_DECODE_INSTR_REGID__MASK 0x000ff000
-#define A3XX_VFD_DECODE_INSTR_REGID__SHIFT 12
-static inline uint32_t A3XX_VFD_DECODE_INSTR_REGID(uint32_t val)
-{
- return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK;
-}
-#define A3XX_VFD_DECODE_INSTR_INT 0x00100000
-#define A3XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000
-#define A3XX_VFD_DECODE_INSTR_SWAP__SHIFT 22
-static inline uint32_t A3XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
-{
- return ((val) << A3XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A3XX_VFD_DECODE_INSTR_SWAP__MASK;
-}
-#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000
-#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24
-static inline uint32_t A3XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
-{
- return ((val) << A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT) & A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK;
-}
-#define A3XX_VFD_DECODE_INSTR_LASTCOMPVALID 0x20000000
-#define A3XX_VFD_DECODE_INSTR_SWITCHNEXT 0x40000000
-
-#define REG_A3XX_VFD_VS_THREADING_THRESHOLD 0x0000227e
-#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK 0x0000000f
-#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT 0
-static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD(uint32_t val)
-{
- return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK;
-}
-#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK 0x0000ff00
-#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT 8
-static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT(uint32_t val)
-{
- return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK;
-}
-
-#define REG_A3XX_VPC_ATTR 0x00002280
-#define A3XX_VPC_ATTR_TOTALATTR__MASK 0x000001ff
-#define A3XX_VPC_ATTR_TOTALATTR__SHIFT 0
-static inline uint32_t A3XX_VPC_ATTR_TOTALATTR(uint32_t val)
-{
- return ((val) << A3XX_VPC_ATTR_TOTALATTR__SHIFT) & A3XX_VPC_ATTR_TOTALATTR__MASK;
-}
-#define A3XX_VPC_ATTR_PSIZE 0x00000200
-#define A3XX_VPC_ATTR_THRDASSIGN__MASK 0x0ffff000
-#define A3XX_VPC_ATTR_THRDASSIGN__SHIFT 12
-static inline uint32_t A3XX_VPC_ATTR_THRDASSIGN(uint32_t val)
-{
- return ((val) << A3XX_VPC_ATTR_THRDASSIGN__SHIFT) & A3XX_VPC_ATTR_THRDASSIGN__MASK;
-}
-#define A3XX_VPC_ATTR_LMSIZE__MASK 0xf0000000
-#define A3XX_VPC_ATTR_LMSIZE__SHIFT 28
-static inline uint32_t A3XX_VPC_ATTR_LMSIZE(uint32_t val)
-{
- return ((val) << A3XX_VPC_ATTR_LMSIZE__SHIFT) & A3XX_VPC_ATTR_LMSIZE__MASK;
-}
-
-#define REG_A3XX_VPC_PACK 0x00002281
-#define A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK 0x0000ff00
-#define A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT 8
-static inline uint32_t A3XX_VPC_PACK_NUMFPNONPOSVAR(uint32_t val)
-{
- return ((val) << A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT) & A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK;
-}
-#define A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK 0x00ff0000
-#define A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT 16
-static inline uint32_t A3XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
-{
- return ((val) << A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK;
-}
-
-#define REG_A3XX_VPC_VARYING_INTERP(i0) (0x00002282 + 0x1*(i0))
-
-static inline uint32_t REG_A3XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002282 + 0x1*i0; }
-#define A3XX_VPC_VARYING_INTERP_MODE_C0__MASK 0x00000003
-#define A3XX_VPC_VARYING_INTERP_MODE_C0__SHIFT 0
-static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C0(enum a3xx_intp_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C0__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C0__MASK;
-}
-#define A3XX_VPC_VARYING_INTERP_MODE_C1__MASK 0x0000000c
-#define A3XX_VPC_VARYING_INTERP_MODE_C1__SHIFT 2
-static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C1(enum a3xx_intp_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C1__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C1__MASK;
-}
-#define A3XX_VPC_VARYING_INTERP_MODE_C2__MASK 0x00000030
-#define A3XX_VPC_VARYING_INTERP_MODE_C2__SHIFT 4
-static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C2(enum a3xx_intp_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C2__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C2__MASK;
-}
-#define A3XX_VPC_VARYING_INTERP_MODE_C3__MASK 0x000000c0
-#define A3XX_VPC_VARYING_INTERP_MODE_C3__SHIFT 6
-static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C3(enum a3xx_intp_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C3__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C3__MASK;
-}
-#define A3XX_VPC_VARYING_INTERP_MODE_C4__MASK 0x00000300
-#define A3XX_VPC_VARYING_INTERP_MODE_C4__SHIFT 8
-static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C4(enum a3xx_intp_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C4__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C4__MASK;
-}
-#define A3XX_VPC_VARYING_INTERP_MODE_C5__MASK 0x00000c00
-#define A3XX_VPC_VARYING_INTERP_MODE_C5__SHIFT 10
-static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C5(enum a3xx_intp_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C5__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C5__MASK;
-}
-#define A3XX_VPC_VARYING_INTERP_MODE_C6__MASK 0x00003000
-#define A3XX_VPC_VARYING_INTERP_MODE_C6__SHIFT 12
-static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C6(enum a3xx_intp_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C6__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C6__MASK;
-}
-#define A3XX_VPC_VARYING_INTERP_MODE_C7__MASK 0x0000c000
-#define A3XX_VPC_VARYING_INTERP_MODE_C7__SHIFT 14
-static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C7(enum a3xx_intp_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C7__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C7__MASK;
-}
-#define A3XX_VPC_VARYING_INTERP_MODE_C8__MASK 0x00030000
-#define A3XX_VPC_VARYING_INTERP_MODE_C8__SHIFT 16
-static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C8(enum a3xx_intp_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C8__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C8__MASK;
-}
-#define A3XX_VPC_VARYING_INTERP_MODE_C9__MASK 0x000c0000
-#define A3XX_VPC_VARYING_INTERP_MODE_C9__SHIFT 18
-static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C9(enum a3xx_intp_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C9__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C9__MASK;
-}
-#define A3XX_VPC_VARYING_INTERP_MODE_CA__MASK 0x00300000
-#define A3XX_VPC_VARYING_INTERP_MODE_CA__SHIFT 20
-static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CA(enum a3xx_intp_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CA__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CA__MASK;
-}
-#define A3XX_VPC_VARYING_INTERP_MODE_CB__MASK 0x00c00000
-#define A3XX_VPC_VARYING_INTERP_MODE_CB__SHIFT 22
-static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CB(enum a3xx_intp_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CB__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CB__MASK;
-}
-#define A3XX_VPC_VARYING_INTERP_MODE_CC__MASK 0x03000000
-#define A3XX_VPC_VARYING_INTERP_MODE_CC__SHIFT 24
-static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CC(enum a3xx_intp_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CC__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CC__MASK;
-}
-#define A3XX_VPC_VARYING_INTERP_MODE_CD__MASK 0x0c000000
-#define A3XX_VPC_VARYING_INTERP_MODE_CD__SHIFT 26
-static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CD(enum a3xx_intp_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CD__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CD__MASK;
-}
-#define A3XX_VPC_VARYING_INTERP_MODE_CE__MASK 0x30000000
-#define A3XX_VPC_VARYING_INTERP_MODE_CE__SHIFT 28
-static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CE(enum a3xx_intp_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CE__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CE__MASK;
-}
-#define A3XX_VPC_VARYING_INTERP_MODE_CF__MASK 0xc0000000
-#define A3XX_VPC_VARYING_INTERP_MODE_CF__SHIFT 30
-static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CF(enum a3xx_intp_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CF__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CF__MASK;
-}
-
-#define REG_A3XX_VPC_VARYING_PS_REPL(i0) (0x00002286 + 0x1*(i0))
-
-static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00002286 + 0x1*i0; }
-#define A3XX_VPC_VARYING_PS_REPL_MODE_C0__MASK 0x00000003
-#define A3XX_VPC_VARYING_PS_REPL_MODE_C0__SHIFT 0
-static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C0(enum a3xx_repl_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C0__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C0__MASK;
-}
-#define A3XX_VPC_VARYING_PS_REPL_MODE_C1__MASK 0x0000000c
-#define A3XX_VPC_VARYING_PS_REPL_MODE_C1__SHIFT 2
-static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C1(enum a3xx_repl_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C1__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C1__MASK;
-}
-#define A3XX_VPC_VARYING_PS_REPL_MODE_C2__MASK 0x00000030
-#define A3XX_VPC_VARYING_PS_REPL_MODE_C2__SHIFT 4
-static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C2(enum a3xx_repl_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C2__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C2__MASK;
-}
-#define A3XX_VPC_VARYING_PS_REPL_MODE_C3__MASK 0x000000c0
-#define A3XX_VPC_VARYING_PS_REPL_MODE_C3__SHIFT 6
-static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C3(enum a3xx_repl_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C3__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C3__MASK;
-}
-#define A3XX_VPC_VARYING_PS_REPL_MODE_C4__MASK 0x00000300
-#define A3XX_VPC_VARYING_PS_REPL_MODE_C4__SHIFT 8
-static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C4(enum a3xx_repl_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C4__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C4__MASK;
-}
-#define A3XX_VPC_VARYING_PS_REPL_MODE_C5__MASK 0x00000c00
-#define A3XX_VPC_VARYING_PS_REPL_MODE_C5__SHIFT 10
-static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C5(enum a3xx_repl_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C5__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C5__MASK;
-}
-#define A3XX_VPC_VARYING_PS_REPL_MODE_C6__MASK 0x00003000
-#define A3XX_VPC_VARYING_PS_REPL_MODE_C6__SHIFT 12
-static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C6(enum a3xx_repl_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C6__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C6__MASK;
-}
-#define A3XX_VPC_VARYING_PS_REPL_MODE_C7__MASK 0x0000c000
-#define A3XX_VPC_VARYING_PS_REPL_MODE_C7__SHIFT 14
-static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C7(enum a3xx_repl_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C7__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C7__MASK;
-}
-#define A3XX_VPC_VARYING_PS_REPL_MODE_C8__MASK 0x00030000
-#define A3XX_VPC_VARYING_PS_REPL_MODE_C8__SHIFT 16
-static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C8(enum a3xx_repl_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C8__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C8__MASK;
-}
-#define A3XX_VPC_VARYING_PS_REPL_MODE_C9__MASK 0x000c0000
-#define A3XX_VPC_VARYING_PS_REPL_MODE_C9__SHIFT 18
-static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C9(enum a3xx_repl_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C9__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C9__MASK;
-}
-#define A3XX_VPC_VARYING_PS_REPL_MODE_CA__MASK 0x00300000
-#define A3XX_VPC_VARYING_PS_REPL_MODE_CA__SHIFT 20
-static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CA(enum a3xx_repl_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CA__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CA__MASK;
-}
-#define A3XX_VPC_VARYING_PS_REPL_MODE_CB__MASK 0x00c00000
-#define A3XX_VPC_VARYING_PS_REPL_MODE_CB__SHIFT 22
-static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CB(enum a3xx_repl_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CB__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CB__MASK;
-}
-#define A3XX_VPC_VARYING_PS_REPL_MODE_CC__MASK 0x03000000
-#define A3XX_VPC_VARYING_PS_REPL_MODE_CC__SHIFT 24
-static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CC(enum a3xx_repl_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CC__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CC__MASK;
-}
-#define A3XX_VPC_VARYING_PS_REPL_MODE_CD__MASK 0x0c000000
-#define A3XX_VPC_VARYING_PS_REPL_MODE_CD__SHIFT 26
-static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CD(enum a3xx_repl_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CD__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CD__MASK;
-}
-#define A3XX_VPC_VARYING_PS_REPL_MODE_CE__MASK 0x30000000
-#define A3XX_VPC_VARYING_PS_REPL_MODE_CE__SHIFT 28
-static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CE(enum a3xx_repl_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CE__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CE__MASK;
-}
-#define A3XX_VPC_VARYING_PS_REPL_MODE_CF__MASK 0xc0000000
-#define A3XX_VPC_VARYING_PS_REPL_MODE_CF__SHIFT 30
-static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CF(enum a3xx_repl_mode val)
-{
- return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CF__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CF__MASK;
-}
-
-#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_0 0x0000228a
-
-#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_1 0x0000228b
-
-#define REG_A3XX_SP_SP_CTRL_REG 0x000022c0
-#define A3XX_SP_SP_CTRL_REG_RESOLVE 0x00010000
-#define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK 0x00040000
-#define A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT 18
-static inline uint32_t A3XX_SP_SP_CTRL_REG_CONSTMODE(uint32_t val)
-{
- return ((val) << A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK;
-}
-#define A3XX_SP_SP_CTRL_REG_BINNING 0x00080000
-#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK 0x00300000
-#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT 20
-static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val)
-{
- return ((val) << A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK;
-}
-#define A3XX_SP_SP_CTRL_REG_L0MODE__MASK 0x00c00000
-#define A3XX_SP_SP_CTRL_REG_L0MODE__SHIFT 22
-static inline uint32_t A3XX_SP_SP_CTRL_REG_L0MODE(uint32_t val)
-{
- return ((val) << A3XX_SP_SP_CTRL_REG_L0MODE__SHIFT) & A3XX_SP_SP_CTRL_REG_L0MODE__MASK;
-}
-
-#define REG_A3XX_SP_VS_CTRL_REG0 0x000022c4
-#define A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001
-#define A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0
-static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
-{
- return ((val) << A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK;
-}
-#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002
-#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1
-static inline uint32_t A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val)
-{
- return ((val) << A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK;
-}
-#define A3XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004
-#define A3XX_SP_VS_CTRL_REG0_ALUSCHMODE 0x00000008
-#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
-#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
-static inline uint32_t A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
-}
-#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
-#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
-static inline uint32_t A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
-}
-#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
-#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
-static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
-{
- return ((val) << A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
-}
-#define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000
-#define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK 0xff000000
-#define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT 24
-static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG0_LENGTH__MASK;
-}
-
-#define REG_A3XX_SP_VS_CTRL_REG1 0x000022c5
-#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff
-#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT 0
-static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTLENGTH(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK;
-}
-#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00
-#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10
-static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK;
-}
-#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x7f000000
-#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 24
-static inline uint32_t A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK;
-}
-
-#define REG_A3XX_SP_VS_PARAM_REG 0x000022c6
-#define A3XX_SP_VS_PARAM_REG_POSREGID__MASK 0x000000ff
-#define A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT 0
-static inline uint32_t A3XX_SP_VS_PARAM_REG_POSREGID(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_POSREGID__MASK;
-}
-#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK 0x0000ff00
-#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT 8
-static inline uint32_t A3XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK;
-}
-#define A3XX_SP_VS_PARAM_REG_POS2DMODE 0x00010000
-#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0x01f00000
-#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20
-static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK;
-}
-
-#define REG_A3XX_SP_VS_OUT(i0) (0x000022c7 + 0x1*(i0))
-
-static inline uint32_t REG_A3XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
-#define A3XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
-#define A3XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
-static inline uint32_t A3XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_A_REGID__MASK;
-}
-#define A3XX_SP_VS_OUT_REG_A_HALF 0x00000100
-#define A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00
-#define A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9
-static inline uint32_t A3XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
-}
-#define A3XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000
-#define A3XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
-static inline uint32_t A3XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_B_REGID__MASK;
-}
-#define A3XX_SP_VS_OUT_REG_B_HALF 0x01000000
-#define A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000
-#define A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25
-static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
-}
-
-#define REG_A3XX_SP_VS_VPC_DST(i0) (0x000022d0 + 0x1*(i0))
-
-static inline uint32_t REG_A3XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x0000007f
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
-static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
-}
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x00007f00
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
-static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
-}
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x007f0000
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
-static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
-}
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0x7f000000
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
-static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
-}
-
-#define REG_A3XX_SP_VS_OBJ_OFFSET_REG 0x000022d4
-#define A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK 0x0000ffff
-#define A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT 0
-static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK;
-}
-#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
-#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
-static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
-}
-#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
-#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
-static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
-}
-
-#define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5
-
-#define REG_A3XX_SP_VS_PVT_MEM_PARAM_REG 0x000022d6
-#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK 0x000000ff
-#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT 0
-static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val)
-{
- assert(!(val & 0x7f));
- return (((val >> 7)) << A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK;
-}
-#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK 0x00ffff00
-#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT 8
-static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK;
-}
-#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK 0xff000000
-#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT 24
-static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK;
-}
-
-#define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7
-#define A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__MASK 0x0000001f
-#define A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT 0
-static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__MASK;
-}
-#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK 0xffffffe0
-#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT 5
-static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
-}
-
-#define REG_A3XX_SP_VS_PVT_MEM_SIZE_REG 0x000022d8
-
-#define REG_A3XX_SP_VS_LENGTH_REG 0x000022df
-#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff
-#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT 0
-static inline uint32_t A3XX_SP_VS_LENGTH_REG_SHADERLENGTH(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK;
-}
-
-#define REG_A3XX_SP_FS_CTRL_REG0 0x000022e0
-#define A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001
-#define A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0
-static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
-{
- return ((val) << A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK;
-}
-#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002
-#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1
-static inline uint32_t A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val)
-{
- return ((val) << A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK;
-}
-#define A3XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004
-#define A3XX_SP_FS_CTRL_REG0_ALUSCHMODE 0x00000008
-#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
-#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
-static inline uint32_t A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
-}
-#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
-#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
-static inline uint32_t A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
-}
-#define A3XX_SP_FS_CTRL_REG0_FSBYPASSENABLE 0x00020000
-#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP 0x00040000
-#define A3XX_SP_FS_CTRL_REG0_OUTORDERED 0x00080000
-#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
-#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
-static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
-{
- return ((val) << A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
-}
-#define A3XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000
-#define A3XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000
-#define A3XX_SP_FS_CTRL_REG0_COMPUTEMODE 0x00800000
-#define A3XX_SP_FS_CTRL_REG0_LENGTH__MASK 0xff000000
-#define A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT 24
-static inline uint32_t A3XX_SP_FS_CTRL_REG0_LENGTH(uint32_t val)
-{
- return ((val) << A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG0_LENGTH__MASK;
-}
-
-#define REG_A3XX_SP_FS_CTRL_REG1 0x000022e1
-#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff
-#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT 0
-static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val)
-{
- return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK;
-}
-#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00
-#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10
-static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val)
-{
- return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK;
-}
-#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x00f00000
-#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 20
-static inline uint32_t A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
-{
- return ((val) << A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK;
-}
-#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK 0x7f000000
-#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT 24
-static inline uint32_t A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(uint32_t val)
-{
- return ((val) << A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT) & A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK;
-}
-
-#define REG_A3XX_SP_FS_OBJ_OFFSET_REG 0x000022e2
-#define A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK 0x0000ffff
-#define A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT 0
-static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET(uint32_t val)
-{
- return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK;
-}
-#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
-#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
-static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
-{
- return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
-}
-#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
-#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
-static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
-{
- return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
-}
-
-#define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3
-
-#define REG_A3XX_SP_FS_PVT_MEM_PARAM_REG 0x000022e4
-#define A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK 0x000000ff
-#define A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT 0
-static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val)
-{
- return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK;
-}
-#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK 0x00ffff00
-#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT 8
-static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET(uint32_t val)
-{
- return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK;
-}
-#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK 0xff000000
-#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT 24
-static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD(uint32_t val)
-{
- return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK;
-}
-
-#define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5
-#define A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__MASK 0x0000001f
-#define A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT 0
-static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val)
-{
- return ((val) << A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__MASK;
-}
-#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK 0xffffffe0
-#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT 5
-static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
-}
-
-#define REG_A3XX_SP_FS_PVT_MEM_SIZE_REG 0x000022e6
-
-#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_0 0x000022e8
-
-#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x000022e9
-
-#define REG_A3XX_SP_FS_OUTPUT_REG 0x000022ec
-#define A3XX_SP_FS_OUTPUT_REG_MRT__MASK 0x00000003
-#define A3XX_SP_FS_OUTPUT_REG_MRT__SHIFT 0
-static inline uint32_t A3XX_SP_FS_OUTPUT_REG_MRT(uint32_t val)
-{
- return ((val) << A3XX_SP_FS_OUTPUT_REG_MRT__SHIFT) & A3XX_SP_FS_OUTPUT_REG_MRT__MASK;
-}
-#define A3XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE 0x00000080
-#define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK 0x0000ff00
-#define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT 8
-static inline uint32_t A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID(uint32_t val)
-{
- return ((val) << A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT) & A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK;
-}
-
-#define REG_A3XX_SP_FS_MRT(i0) (0x000022f0 + 0x1*(i0))
-
-static inline uint32_t REG_A3XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
-#define A3XX_SP_FS_MRT_REG_REGID__MASK 0x000000ff
-#define A3XX_SP_FS_MRT_REG_REGID__SHIFT 0
-static inline uint32_t A3XX_SP_FS_MRT_REG_REGID(uint32_t val)
-{
- return ((val) << A3XX_SP_FS_MRT_REG_REGID__SHIFT) & A3XX_SP_FS_MRT_REG_REGID__MASK;
-}
-#define A3XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100
-#define A3XX_SP_FS_MRT_REG_SINT 0x00000400
-#define A3XX_SP_FS_MRT_REG_UINT 0x00000800
-
-#define REG_A3XX_SP_FS_IMAGE_OUTPUT(i0) (0x000022f4 + 0x1*(i0))
-
-static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT_REG(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
-#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK 0x0000003f
-#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT 0
-static inline uint32_t A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT(enum a3xx_color_fmt val)
-{
- return ((val) << A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT) & A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK;
-}
-
-#define REG_A3XX_SP_FS_LENGTH_REG 0x000022ff
-#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff
-#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT 0
-static inline uint32_t A3XX_SP_FS_LENGTH_REG_SHADERLENGTH(uint32_t val)
-{
- return ((val) << A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK;
-}
-
-#define REG_A3XX_PA_SC_AA_CONFIG 0x00002301
-
-#define REG_A3XX_TPL1_TP_VS_TEX_OFFSET 0x00002340
-#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff
-#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0
-static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val)
-{
- return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK;
-}
-#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00
-#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8
-static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val)
-{
- return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK;
-}
-#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000
-#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT 16
-static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
-{
- return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK;
-}
-
-#define REG_A3XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR 0x00002341
-
-#define REG_A3XX_TPL1_TP_FS_TEX_OFFSET 0x00002342
-#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff
-#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0
-static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val)
-{
- return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK;
-}
-#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00
-#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8
-static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val)
-{
- return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK;
-}
-#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000
-#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT 16
-static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
-{
- return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK;
-}
-
-#define REG_A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x00002343
-
-#define REG_A3XX_VBIF_CLKON 0x00003001
-
-#define REG_A3XX_VBIF_FIXED_SORT_EN 0x0000300c
-
-#define REG_A3XX_VBIF_FIXED_SORT_SEL0 0x0000300d
-
-#define REG_A3XX_VBIF_FIXED_SORT_SEL1 0x0000300e
-
-#define REG_A3XX_VBIF_ABIT_SORT 0x0000301c
-
-#define REG_A3XX_VBIF_ABIT_SORT_CONF 0x0000301d
-
-#define REG_A3XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
-
-#define REG_A3XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
-
-#define REG_A3XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
-
-#define REG_A3XX_VBIF_IN_WR_LIM_CONF0 0x00003030
-
-#define REG_A3XX_VBIF_IN_WR_LIM_CONF1 0x00003031
-
-#define REG_A3XX_VBIF_OUT_RD_LIM_CONF0 0x00003034
-
-#define REG_A3XX_VBIF_OUT_WR_LIM_CONF0 0x00003035
-
-#define REG_A3XX_VBIF_DDR_OUT_MAX_BURST 0x00003036
-
-#define REG_A3XX_VBIF_ARB_CTL 0x0000303c
-
-#define REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
-
-#define REG_A3XX_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x00003058
-
-#define REG_A3XX_VBIF_OUT_AXI_AOOO_EN 0x0000305e
-
-#define REG_A3XX_VBIF_OUT_AXI_AOOO 0x0000305f
-
-#define REG_A3XX_VBIF_PERF_CNT_EN 0x00003070
-#define A3XX_VBIF_PERF_CNT_EN_CNT0 0x00000001
-#define A3XX_VBIF_PERF_CNT_EN_CNT1 0x00000002
-#define A3XX_VBIF_PERF_CNT_EN_PWRCNT0 0x00000004
-#define A3XX_VBIF_PERF_CNT_EN_PWRCNT1 0x00000008
-#define A3XX_VBIF_PERF_CNT_EN_PWRCNT2 0x00000010
-
-#define REG_A3XX_VBIF_PERF_CNT_CLR 0x00003071
-#define A3XX_VBIF_PERF_CNT_CLR_CNT0 0x00000001
-#define A3XX_VBIF_PERF_CNT_CLR_CNT1 0x00000002
-#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT0 0x00000004
-#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT1 0x00000008
-#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT2 0x00000010
-
-#define REG_A3XX_VBIF_PERF_CNT_SEL 0x00003072
-
-#define REG_A3XX_VBIF_PERF_CNT0_LO 0x00003073
-
-#define REG_A3XX_VBIF_PERF_CNT0_HI 0x00003074
-
-#define REG_A3XX_VBIF_PERF_CNT1_LO 0x00003075
-
-#define REG_A3XX_VBIF_PERF_CNT1_HI 0x00003076
-
-#define REG_A3XX_VBIF_PERF_PWR_CNT0_LO 0x00003077
-
-#define REG_A3XX_VBIF_PERF_PWR_CNT0_HI 0x00003078
-
-#define REG_A3XX_VBIF_PERF_PWR_CNT1_LO 0x00003079
-
-#define REG_A3XX_VBIF_PERF_PWR_CNT1_HI 0x0000307a
-
-#define REG_A3XX_VBIF_PERF_PWR_CNT2_LO 0x0000307b
-
-#define REG_A3XX_VBIF_PERF_PWR_CNT2_HI 0x0000307c
-
-#define REG_A3XX_VSC_BIN_SIZE 0x00000c01
-#define A3XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
-#define A3XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
-static inline uint32_t A3XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A3XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A3XX_VSC_BIN_SIZE_WIDTH__MASK;
-}
-#define A3XX_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
-#define A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT 5
-static inline uint32_t A3XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A3XX_VSC_BIN_SIZE_HEIGHT__MASK;
-}
-
-#define REG_A3XX_VSC_SIZE_ADDRESS 0x00000c02
-
-#define REG_A3XX_VSC_PIPE(i0) (0x00000c06 + 0x3*(i0))
-
-static inline uint32_t REG_A3XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
-#define A3XX_VSC_PIPE_CONFIG_X__MASK 0x000003ff
-#define A3XX_VSC_PIPE_CONFIG_X__SHIFT 0
-static inline uint32_t A3XX_VSC_PIPE_CONFIG_X(uint32_t val)
-{
- return ((val) << A3XX_VSC_PIPE_CONFIG_X__SHIFT) & A3XX_VSC_PIPE_CONFIG_X__MASK;
-}
-#define A3XX_VSC_PIPE_CONFIG_Y__MASK 0x000ffc00
-#define A3XX_VSC_PIPE_CONFIG_Y__SHIFT 10
-static inline uint32_t A3XX_VSC_PIPE_CONFIG_Y(uint32_t val)
-{
- return ((val) << A3XX_VSC_PIPE_CONFIG_Y__SHIFT) & A3XX_VSC_PIPE_CONFIG_Y__MASK;
-}
-#define A3XX_VSC_PIPE_CONFIG_W__MASK 0x00f00000
-#define A3XX_VSC_PIPE_CONFIG_W__SHIFT 20
-static inline uint32_t A3XX_VSC_PIPE_CONFIG_W(uint32_t val)
-{
- return ((val) << A3XX_VSC_PIPE_CONFIG_W__SHIFT) & A3XX_VSC_PIPE_CONFIG_W__MASK;
-}
-#define A3XX_VSC_PIPE_CONFIG_H__MASK 0x0f000000
-#define A3XX_VSC_PIPE_CONFIG_H__SHIFT 24
-static inline uint32_t A3XX_VSC_PIPE_CONFIG_H(uint32_t val)
-{
- return ((val) << A3XX_VSC_PIPE_CONFIG_H__SHIFT) & A3XX_VSC_PIPE_CONFIG_H__MASK;
-}
-
-static inline uint32_t REG_A3XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; }
-
-static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; }
-
-#define REG_A3XX_VSC_BIN_CONTROL 0x00000c3c
-#define A3XX_VSC_BIN_CONTROL_BINNING_ENABLE 0x00000001
-
-#define REG_A3XX_UNKNOWN_0C3D 0x00000c3d
-
-#define REG_A3XX_PC_PERFCOUNTER0_SELECT 0x00000c48
-
-#define REG_A3XX_PC_PERFCOUNTER1_SELECT 0x00000c49
-
-#define REG_A3XX_PC_PERFCOUNTER2_SELECT 0x00000c4a
-
-#define REG_A3XX_PC_PERFCOUNTER3_SELECT 0x00000c4b
-
-#define REG_A3XX_GRAS_TSE_DEBUG_ECO 0x00000c81
-
-#define REG_A3XX_GRAS_PERFCOUNTER0_SELECT 0x00000c88
-
-#define REG_A3XX_GRAS_PERFCOUNTER1_SELECT 0x00000c89
-
-#define REG_A3XX_GRAS_PERFCOUNTER2_SELECT 0x00000c8a
-
-#define REG_A3XX_GRAS_PERFCOUNTER3_SELECT 0x00000c8b
-
-#define REG_A3XX_GRAS_CL_USER_PLANE(i0) (0x00000ca0 + 0x4*(i0))
-
-static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_X(uint32_t i0) { return 0x00000ca0 + 0x4*i0; }
-
-static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Y(uint32_t i0) { return 0x00000ca1 + 0x4*i0; }
-
-static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Z(uint32_t i0) { return 0x00000ca2 + 0x4*i0; }
-
-static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_W(uint32_t i0) { return 0x00000ca3 + 0x4*i0; }
-
-#define REG_A3XX_RB_GMEM_BASE_ADDR 0x00000cc0
-
-#define REG_A3XX_RB_DEBUG_ECO_CONTROLS_ADDR 0x00000cc1
-
-#define REG_A3XX_RB_PERFCOUNTER0_SELECT 0x00000cc6
-
-#define REG_A3XX_RB_PERFCOUNTER1_SELECT 0x00000cc7
-
-#define REG_A3XX_RB_FRAME_BUFFER_DIMENSION 0x00000ce0
-#define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK 0x00003fff
-#define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT 0
-static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(uint32_t val)
-{
- return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK;
-}
-#define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK 0x0fffc000
-#define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT 14
-static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(uint32_t val)
-{
- return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK;
-}
-
-#define REG_A3XX_HLSQ_PERFCOUNTER0_SELECT 0x00000e00
-
-#define REG_A3XX_HLSQ_PERFCOUNTER1_SELECT 0x00000e01
-
-#define REG_A3XX_HLSQ_PERFCOUNTER2_SELECT 0x00000e02
-
-#define REG_A3XX_HLSQ_PERFCOUNTER3_SELECT 0x00000e03
-
-#define REG_A3XX_HLSQ_PERFCOUNTER4_SELECT 0x00000e04
-
-#define REG_A3XX_HLSQ_PERFCOUNTER5_SELECT 0x00000e05
-
-#define REG_A3XX_UNKNOWN_0E43 0x00000e43
-
-#define REG_A3XX_VFD_PERFCOUNTER0_SELECT 0x00000e44
-
-#define REG_A3XX_VFD_PERFCOUNTER1_SELECT 0x00000e45
-
-#define REG_A3XX_VPC_VPC_DEBUG_RAM_SEL 0x00000e61
-
-#define REG_A3XX_VPC_VPC_DEBUG_RAM_READ 0x00000e62
-
-#define REG_A3XX_VPC_PERFCOUNTER0_SELECT 0x00000e64
-
-#define REG_A3XX_VPC_PERFCOUNTER1_SELECT 0x00000e65
-
-#define REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG 0x00000e82
-
-#define REG_A3XX_UCHE_PERFCOUNTER0_SELECT 0x00000e84
-
-#define REG_A3XX_UCHE_PERFCOUNTER1_SELECT 0x00000e85
-
-#define REG_A3XX_UCHE_PERFCOUNTER2_SELECT 0x00000e86
-
-#define REG_A3XX_UCHE_PERFCOUNTER3_SELECT 0x00000e87
-
-#define REG_A3XX_UCHE_PERFCOUNTER4_SELECT 0x00000e88
-
-#define REG_A3XX_UCHE_PERFCOUNTER5_SELECT 0x00000e89
-
-#define REG_A3XX_UCHE_CACHE_INVALIDATE0_REG 0x00000ea0
-#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK 0x0fffffff
-#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT 0
-static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR(uint32_t val)
-{
- return ((val) << A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK;
-}
-
-#define REG_A3XX_UCHE_CACHE_INVALIDATE1_REG 0x00000ea1
-#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK 0x0fffffff
-#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT 0
-static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR(uint32_t val)
-{
- return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK;
-}
-#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK 0x30000000
-#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT 28
-static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_opcode val)
-{
- return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK;
-}
-#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ENTIRE_CACHE 0x80000000
-
-#define REG_A3XX_UNKNOWN_0EA6 0x00000ea6
-
-#define REG_A3XX_SP_PERFCOUNTER0_SELECT 0x00000ec4
-
-#define REG_A3XX_SP_PERFCOUNTER1_SELECT 0x00000ec5
-
-#define REG_A3XX_SP_PERFCOUNTER2_SELECT 0x00000ec6
-
-#define REG_A3XX_SP_PERFCOUNTER3_SELECT 0x00000ec7
-
-#define REG_A3XX_SP_PERFCOUNTER4_SELECT 0x00000ec8
-
-#define REG_A3XX_SP_PERFCOUNTER5_SELECT 0x00000ec9
-
-#define REG_A3XX_SP_PERFCOUNTER6_SELECT 0x00000eca
-
-#define REG_A3XX_SP_PERFCOUNTER7_SELECT 0x00000ecb
-
-#define REG_A3XX_UNKNOWN_0EE0 0x00000ee0
-
-#define REG_A3XX_UNKNOWN_0F03 0x00000f03
-
-#define REG_A3XX_TP_PERFCOUNTER0_SELECT 0x00000f04
-
-#define REG_A3XX_TP_PERFCOUNTER1_SELECT 0x00000f05
-
-#define REG_A3XX_TP_PERFCOUNTER2_SELECT 0x00000f06
-
-#define REG_A3XX_TP_PERFCOUNTER3_SELECT 0x00000f07
-
-#define REG_A3XX_TP_PERFCOUNTER4_SELECT 0x00000f08
-
-#define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09
-
-#define REG_A3XX_VGT_CL_INITIATOR 0x000021f0
-
-#define REG_A3XX_VGT_EVENT_INITIATOR 0x000021f9
-
-#define REG_A3XX_VGT_DRAW_INITIATOR 0x000021fc
-#define A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f
-#define A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0
-static inline uint32_t A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val)
-{
- return ((val) << A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK;
-}
-#define A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0
-#define A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6
-static inline uint32_t A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val)
-{
- return ((val) << A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK;
-}
-#define A3XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK 0x00000600
-#define A3XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT 9
-static inline uint32_t A3XX_VGT_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val)
-{
- return ((val) << A3XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT) & A3XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK;
-}
-#define A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000800
-#define A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT 11
-static inline uint32_t A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size val)
-{
- return ((val) << A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK;
-}
-#define A3XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000
-#define A3XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000
-#define A3XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000
-#define A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK 0xff000000
-#define A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT 24
-static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val)
-{
- return ((val) << A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT) & A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK;
-}
-
-#define REG_A3XX_VGT_IMMED_DATA 0x000021fd
-
-#define REG_A3XX_TEX_SAMP_0 0x00000000
-#define A3XX_TEX_SAMP_0_CLAMPENABLE 0x00000001
-#define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR 0x00000002
-#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c
-#define A3XX_TEX_SAMP_0_XY_MAG__SHIFT 2
-static inline uint32_t A3XX_TEX_SAMP_0_XY_MAG(enum a3xx_tex_filter val)
-{
- return ((val) << A3XX_TEX_SAMP_0_XY_MAG__SHIFT) & A3XX_TEX_SAMP_0_XY_MAG__MASK;
-}
-#define A3XX_TEX_SAMP_0_XY_MIN__MASK 0x00000030
-#define A3XX_TEX_SAMP_0_XY_MIN__SHIFT 4
-static inline uint32_t A3XX_TEX_SAMP_0_XY_MIN(enum a3xx_tex_filter val)
-{
- return ((val) << A3XX_TEX_SAMP_0_XY_MIN__SHIFT) & A3XX_TEX_SAMP_0_XY_MIN__MASK;
-}
-#define A3XX_TEX_SAMP_0_WRAP_S__MASK 0x000001c0
-#define A3XX_TEX_SAMP_0_WRAP_S__SHIFT 6
-static inline uint32_t A3XX_TEX_SAMP_0_WRAP_S(enum a3xx_tex_clamp val)
-{
- return ((val) << A3XX_TEX_SAMP_0_WRAP_S__SHIFT) & A3XX_TEX_SAMP_0_WRAP_S__MASK;
-}
-#define A3XX_TEX_SAMP_0_WRAP_T__MASK 0x00000e00
-#define A3XX_TEX_SAMP_0_WRAP_T__SHIFT 9
-static inline uint32_t A3XX_TEX_SAMP_0_WRAP_T(enum a3xx_tex_clamp val)
-{
- return ((val) << A3XX_TEX_SAMP_0_WRAP_T__SHIFT) & A3XX_TEX_SAMP_0_WRAP_T__MASK;
-}
-#define A3XX_TEX_SAMP_0_WRAP_R__MASK 0x00007000
-#define A3XX_TEX_SAMP_0_WRAP_R__SHIFT 12
-static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val)
-{
- return ((val) << A3XX_TEX_SAMP_0_WRAP_R__SHIFT) & A3XX_TEX_SAMP_0_WRAP_R__MASK;
-}
-#define A3XX_TEX_SAMP_0_ANISO__MASK 0x00038000
-#define A3XX_TEX_SAMP_0_ANISO__SHIFT 15
-static inline uint32_t A3XX_TEX_SAMP_0_ANISO(enum a3xx_tex_aniso val)
-{
- return ((val) << A3XX_TEX_SAMP_0_ANISO__SHIFT) & A3XX_TEX_SAMP_0_ANISO__MASK;
-}
-#define A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK 0x00700000
-#define A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT 20
-static inline uint32_t A3XX_TEX_SAMP_0_COMPARE_FUNC(enum adreno_compare_func val)
-{
- return ((val) << A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT) & A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK;
-}
-#define A3XX_TEX_SAMP_0_CUBEMAPSEAMLESSFILTOFF 0x01000000
-#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000
-
-#define REG_A3XX_TEX_SAMP_1 0x00000001
-#define A3XX_TEX_SAMP_1_LOD_BIAS__MASK 0x000007ff
-#define A3XX_TEX_SAMP_1_LOD_BIAS__SHIFT 0
-static inline uint32_t A3XX_TEX_SAMP_1_LOD_BIAS(float val)
-{
- return ((((int32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_LOD_BIAS__SHIFT) & A3XX_TEX_SAMP_1_LOD_BIAS__MASK;
-}
-#define A3XX_TEX_SAMP_1_MAX_LOD__MASK 0x003ff000
-#define A3XX_TEX_SAMP_1_MAX_LOD__SHIFT 12
-static inline uint32_t A3XX_TEX_SAMP_1_MAX_LOD(float val)
-{
- return ((((uint32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A3XX_TEX_SAMP_1_MAX_LOD__MASK;
-}
-#define A3XX_TEX_SAMP_1_MIN_LOD__MASK 0xffc00000
-#define A3XX_TEX_SAMP_1_MIN_LOD__SHIFT 22
-static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val)
-{
- return ((((uint32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A3XX_TEX_SAMP_1_MIN_LOD__MASK;
-}
-
-#define REG_A3XX_TEX_CONST_0 0x00000000
-#define A3XX_TEX_CONST_0_TILE_MODE__MASK 0x00000003
-#define A3XX_TEX_CONST_0_TILE_MODE__SHIFT 0
-static inline uint32_t A3XX_TEX_CONST_0_TILE_MODE(enum a3xx_tile_mode val)
-{
- return ((val) << A3XX_TEX_CONST_0_TILE_MODE__SHIFT) & A3XX_TEX_CONST_0_TILE_MODE__MASK;
-}
-#define A3XX_TEX_CONST_0_SRGB 0x00000004
-#define A3XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
-#define A3XX_TEX_CONST_0_SWIZ_X__SHIFT 4
-static inline uint32_t A3XX_TEX_CONST_0_SWIZ_X(enum a3xx_tex_swiz val)
-{
- return ((val) << A3XX_TEX_CONST_0_SWIZ_X__SHIFT) & A3XX_TEX_CONST_0_SWIZ_X__MASK;
-}
-#define A3XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
-#define A3XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
-static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Y(enum a3xx_tex_swiz val)
-{
- return ((val) << A3XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Y__MASK;
-}
-#define A3XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
-#define A3XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
-static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Z(enum a3xx_tex_swiz val)
-{
- return ((val) << A3XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Z__MASK;
-}
-#define A3XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
-#define A3XX_TEX_CONST_0_SWIZ_W__SHIFT 13
-static inline uint32_t A3XX_TEX_CONST_0_SWIZ_W(enum a3xx_tex_swiz val)
-{
- return ((val) << A3XX_TEX_CONST_0_SWIZ_W__SHIFT) & A3XX_TEX_CONST_0_SWIZ_W__MASK;
-}
-#define A3XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000
-#define A3XX_TEX_CONST_0_MIPLVLS__SHIFT 16
-static inline uint32_t A3XX_TEX_CONST_0_MIPLVLS(uint32_t val)
-{
- return ((val) << A3XX_TEX_CONST_0_MIPLVLS__SHIFT) & A3XX_TEX_CONST_0_MIPLVLS__MASK;
-}
-#define A3XX_TEX_CONST_0_MSAATEX__MASK 0x00300000
-#define A3XX_TEX_CONST_0_MSAATEX__SHIFT 20
-static inline uint32_t A3XX_TEX_CONST_0_MSAATEX(enum a3xx_tex_msaa val)
-{
- return ((val) << A3XX_TEX_CONST_0_MSAATEX__SHIFT) & A3XX_TEX_CONST_0_MSAATEX__MASK;
-}
-#define A3XX_TEX_CONST_0_FMT__MASK 0x1fc00000
-#define A3XX_TEX_CONST_0_FMT__SHIFT 22
-static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val)
-{
- return ((val) << A3XX_TEX_CONST_0_FMT__SHIFT) & A3XX_TEX_CONST_0_FMT__MASK;
-}
-#define A3XX_TEX_CONST_0_NOCONVERT 0x20000000
-#define A3XX_TEX_CONST_0_TYPE__MASK 0xc0000000
-#define A3XX_TEX_CONST_0_TYPE__SHIFT 30
-static inline uint32_t A3XX_TEX_CONST_0_TYPE(enum a3xx_tex_type val)
-{
- return ((val) << A3XX_TEX_CONST_0_TYPE__SHIFT) & A3XX_TEX_CONST_0_TYPE__MASK;
-}
-
-#define REG_A3XX_TEX_CONST_1 0x00000001
-#define A3XX_TEX_CONST_1_HEIGHT__MASK 0x00003fff
-#define A3XX_TEX_CONST_1_HEIGHT__SHIFT 0
-static inline uint32_t A3XX_TEX_CONST_1_HEIGHT(uint32_t val)
-{
- return ((val) << A3XX_TEX_CONST_1_HEIGHT__SHIFT) & A3XX_TEX_CONST_1_HEIGHT__MASK;
-}
-#define A3XX_TEX_CONST_1_WIDTH__MASK 0x0fffc000
-#define A3XX_TEX_CONST_1_WIDTH__SHIFT 14
-static inline uint32_t A3XX_TEX_CONST_1_WIDTH(uint32_t val)
-{
- return ((val) << A3XX_TEX_CONST_1_WIDTH__SHIFT) & A3XX_TEX_CONST_1_WIDTH__MASK;
-}
-#define A3XX_TEX_CONST_1_PITCHALIGN__MASK 0xf0000000
-#define A3XX_TEX_CONST_1_PITCHALIGN__SHIFT 28
-static inline uint32_t A3XX_TEX_CONST_1_PITCHALIGN(uint32_t val)
-{
- return ((val) << A3XX_TEX_CONST_1_PITCHALIGN__SHIFT) & A3XX_TEX_CONST_1_PITCHALIGN__MASK;
-}
-
-#define REG_A3XX_TEX_CONST_2 0x00000002
-#define A3XX_TEX_CONST_2_INDX__MASK 0x000001ff
-#define A3XX_TEX_CONST_2_INDX__SHIFT 0
-static inline uint32_t A3XX_TEX_CONST_2_INDX(uint32_t val)
-{
- return ((val) << A3XX_TEX_CONST_2_INDX__SHIFT) & A3XX_TEX_CONST_2_INDX__MASK;
-}
-#define A3XX_TEX_CONST_2_PITCH__MASK 0x3ffff000
-#define A3XX_TEX_CONST_2_PITCH__SHIFT 12
-static inline uint32_t A3XX_TEX_CONST_2_PITCH(uint32_t val)
-{
- return ((val) << A3XX_TEX_CONST_2_PITCH__SHIFT) & A3XX_TEX_CONST_2_PITCH__MASK;
-}
-#define A3XX_TEX_CONST_2_SWAP__MASK 0xc0000000
-#define A3XX_TEX_CONST_2_SWAP__SHIFT 30
-static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
-{
- return ((val) << A3XX_TEX_CONST_2_SWAP__SHIFT) & A3XX_TEX_CONST_2_SWAP__MASK;
-}
-
-#define REG_A3XX_TEX_CONST_3 0x00000003
-#define A3XX_TEX_CONST_3_LAYERSZ1__MASK 0x0001ffff
-#define A3XX_TEX_CONST_3_LAYERSZ1__SHIFT 0
-static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ1(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A3XX_TEX_CONST_3_LAYERSZ1__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ1__MASK;
-}
-#define A3XX_TEX_CONST_3_DEPTH__MASK 0x0ffe0000
-#define A3XX_TEX_CONST_3_DEPTH__SHIFT 17
-static inline uint32_t A3XX_TEX_CONST_3_DEPTH(uint32_t val)
-{
- return ((val) << A3XX_TEX_CONST_3_DEPTH__SHIFT) & A3XX_TEX_CONST_3_DEPTH__MASK;
-}
-#define A3XX_TEX_CONST_3_LAYERSZ2__MASK 0xf0000000
-#define A3XX_TEX_CONST_3_LAYERSZ2__SHIFT 28
-static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ2(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A3XX_TEX_CONST_3_LAYERSZ2__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ2__MASK;
-}
-
-#ifdef __cplusplus
-#endif
-
-#endif /* A3XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
deleted file mode 100644
index 103a416a787f..000000000000
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ /dev/null
@@ -1,4379 +0,0 @@
-#ifndef A4XX_XML
-#define A4XX_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
-http://gitlab.freedesktop.org/mesa/mesa/
-git clone https://gitlab.freedesktop.org/mesa/mesa.git
-
-The rules-ng-ng source files this header was generated from are:
-
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from Fri Jun 2 14:59:26 2023)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from Fri Jun 2 14:59:26 2023)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 85691 bytes, from Fri Feb 16 09:49:01 2024)
-
-Copyright (C) 2013-2024 by the following authors:
-- Rob Clark <robdclark@gmail.com> Rob Clark
-- Ilia Mirkin <imirkin@alum.mit.edu> Ilia Mirkin
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-*/
-
-#ifdef __KERNEL__
-#include <linux/bug.h>
-#define assert(x) BUG_ON(!(x))
-#else
-#include <assert.h>
-#endif
-
-#ifdef __cplusplus
-#define __struct_cast(X)
-#else
-#define __struct_cast(X) (struct X)
-#endif
-
-enum a4xx_color_fmt {
- RB4_A8_UNORM = 1,
- RB4_R8_UNORM = 2,
- RB4_R8_SNORM = 3,
- RB4_R8_UINT = 4,
- RB4_R8_SINT = 5,
- RB4_R4G4B4A4_UNORM = 8,
- RB4_R5G5B5A1_UNORM = 10,
- RB4_R5G6B5_UNORM = 14,
- RB4_R8G8_UNORM = 15,
- RB4_R8G8_SNORM = 16,
- RB4_R8G8_UINT = 17,
- RB4_R8G8_SINT = 18,
- RB4_R16_UNORM = 19,
- RB4_R16_SNORM = 20,
- RB4_R16_FLOAT = 21,
- RB4_R16_UINT = 22,
- RB4_R16_SINT = 23,
- RB4_R8G8B8_UNORM = 25,
- RB4_R8G8B8A8_UNORM = 26,
- RB4_R8G8B8A8_SNORM = 28,
- RB4_R8G8B8A8_UINT = 29,
- RB4_R8G8B8A8_SINT = 30,
- RB4_R10G10B10A2_UNORM = 31,
- RB4_R10G10B10A2_UINT = 34,
- RB4_R11G11B10_FLOAT = 39,
- RB4_R16G16_UNORM = 40,
- RB4_R16G16_SNORM = 41,
- RB4_R16G16_FLOAT = 42,
- RB4_R16G16_UINT = 43,
- RB4_R16G16_SINT = 44,
- RB4_R32_FLOAT = 45,
- RB4_R32_UINT = 46,
- RB4_R32_SINT = 47,
- RB4_R16G16B16A16_UNORM = 52,
- RB4_R16G16B16A16_SNORM = 53,
- RB4_R16G16B16A16_FLOAT = 54,
- RB4_R16G16B16A16_UINT = 55,
- RB4_R16G16B16A16_SINT = 56,
- RB4_R32G32_FLOAT = 57,
- RB4_R32G32_UINT = 58,
- RB4_R32G32_SINT = 59,
- RB4_R32G32B32A32_FLOAT = 60,
- RB4_R32G32B32A32_UINT = 61,
- RB4_R32G32B32A32_SINT = 62,
- RB4_NONE = 255,
-};
-
-enum a4xx_tile_mode {
- TILE4_LINEAR = 0,
- TILE4_2 = 2,
- TILE4_3 = 3,
-};
-
-enum a4xx_vtx_fmt {
- VFMT4_32_FLOAT = 1,
- VFMT4_32_32_FLOAT = 2,
- VFMT4_32_32_32_FLOAT = 3,
- VFMT4_32_32_32_32_FLOAT = 4,
- VFMT4_16_FLOAT = 5,
- VFMT4_16_16_FLOAT = 6,
- VFMT4_16_16_16_FLOAT = 7,
- VFMT4_16_16_16_16_FLOAT = 8,
- VFMT4_32_FIXED = 9,
- VFMT4_32_32_FIXED = 10,
- VFMT4_32_32_32_FIXED = 11,
- VFMT4_32_32_32_32_FIXED = 12,
- VFMT4_11_11_10_FLOAT = 13,
- VFMT4_16_SINT = 16,
- VFMT4_16_16_SINT = 17,
- VFMT4_16_16_16_SINT = 18,
- VFMT4_16_16_16_16_SINT = 19,
- VFMT4_16_UINT = 20,
- VFMT4_16_16_UINT = 21,
- VFMT4_16_16_16_UINT = 22,
- VFMT4_16_16_16_16_UINT = 23,
- VFMT4_16_SNORM = 24,
- VFMT4_16_16_SNORM = 25,
- VFMT4_16_16_16_SNORM = 26,
- VFMT4_16_16_16_16_SNORM = 27,
- VFMT4_16_UNORM = 28,
- VFMT4_16_16_UNORM = 29,
- VFMT4_16_16_16_UNORM = 30,
- VFMT4_16_16_16_16_UNORM = 31,
- VFMT4_32_UINT = 32,
- VFMT4_32_32_UINT = 33,
- VFMT4_32_32_32_UINT = 34,
- VFMT4_32_32_32_32_UINT = 35,
- VFMT4_32_SINT = 36,
- VFMT4_32_32_SINT = 37,
- VFMT4_32_32_32_SINT = 38,
- VFMT4_32_32_32_32_SINT = 39,
- VFMT4_8_UINT = 40,
- VFMT4_8_8_UINT = 41,
- VFMT4_8_8_8_UINT = 42,
- VFMT4_8_8_8_8_UINT = 43,
- VFMT4_8_UNORM = 44,
- VFMT4_8_8_UNORM = 45,
- VFMT4_8_8_8_UNORM = 46,
- VFMT4_8_8_8_8_UNORM = 47,
- VFMT4_8_SINT = 48,
- VFMT4_8_8_SINT = 49,
- VFMT4_8_8_8_SINT = 50,
- VFMT4_8_8_8_8_SINT = 51,
- VFMT4_8_SNORM = 52,
- VFMT4_8_8_SNORM = 53,
- VFMT4_8_8_8_SNORM = 54,
- VFMT4_8_8_8_8_SNORM = 55,
- VFMT4_10_10_10_2_UINT = 56,
- VFMT4_10_10_10_2_UNORM = 57,
- VFMT4_10_10_10_2_SINT = 58,
- VFMT4_10_10_10_2_SNORM = 59,
- VFMT4_2_10_10_10_UINT = 60,
- VFMT4_2_10_10_10_UNORM = 61,
- VFMT4_2_10_10_10_SINT = 62,
- VFMT4_2_10_10_10_SNORM = 63,
- VFMT4_NONE = 255,
-};
-
-enum a4xx_tex_fmt {
- TFMT4_A8_UNORM = 3,
- TFMT4_8_UNORM = 4,
- TFMT4_8_SNORM = 5,
- TFMT4_8_UINT = 6,
- TFMT4_8_SINT = 7,
- TFMT4_4_4_4_4_UNORM = 8,
- TFMT4_5_5_5_1_UNORM = 9,
- TFMT4_5_6_5_UNORM = 11,
- TFMT4_L8_A8_UNORM = 13,
- TFMT4_8_8_UNORM = 14,
- TFMT4_8_8_SNORM = 15,
- TFMT4_8_8_UINT = 16,
- TFMT4_8_8_SINT = 17,
- TFMT4_16_UNORM = 18,
- TFMT4_16_SNORM = 19,
- TFMT4_16_FLOAT = 20,
- TFMT4_16_UINT = 21,
- TFMT4_16_SINT = 22,
- TFMT4_8_8_8_8_UNORM = 28,
- TFMT4_8_8_8_8_SNORM = 29,
- TFMT4_8_8_8_8_UINT = 30,
- TFMT4_8_8_8_8_SINT = 31,
- TFMT4_9_9_9_E5_FLOAT = 32,
- TFMT4_10_10_10_2_UNORM = 33,
- TFMT4_10_10_10_2_UINT = 34,
- TFMT4_11_11_10_FLOAT = 37,
- TFMT4_16_16_UNORM = 38,
- TFMT4_16_16_SNORM = 39,
- TFMT4_16_16_FLOAT = 40,
- TFMT4_16_16_UINT = 41,
- TFMT4_16_16_SINT = 42,
- TFMT4_32_FLOAT = 43,
- TFMT4_32_UINT = 44,
- TFMT4_32_SINT = 45,
- TFMT4_16_16_16_16_UNORM = 51,
- TFMT4_16_16_16_16_SNORM = 52,
- TFMT4_16_16_16_16_FLOAT = 53,
- TFMT4_16_16_16_16_UINT = 54,
- TFMT4_16_16_16_16_SINT = 55,
- TFMT4_32_32_FLOAT = 56,
- TFMT4_32_32_UINT = 57,
- TFMT4_32_32_SINT = 58,
- TFMT4_32_32_32_FLOAT = 59,
- TFMT4_32_32_32_UINT = 60,
- TFMT4_32_32_32_SINT = 61,
- TFMT4_32_32_32_32_FLOAT = 63,
- TFMT4_32_32_32_32_UINT = 64,
- TFMT4_32_32_32_32_SINT = 65,
- TFMT4_X8Z24_UNORM = 71,
- TFMT4_DXT1 = 86,
- TFMT4_DXT3 = 87,
- TFMT4_DXT5 = 88,
- TFMT4_RGTC1_UNORM = 90,
- TFMT4_RGTC1_SNORM = 91,
- TFMT4_RGTC2_UNORM = 94,
- TFMT4_RGTC2_SNORM = 95,
- TFMT4_BPTC_UFLOAT = 97,
- TFMT4_BPTC_FLOAT = 98,
- TFMT4_BPTC = 99,
- TFMT4_ATC_RGB = 100,
- TFMT4_ATC_RGBA_EXPLICIT = 101,
- TFMT4_ATC_RGBA_INTERPOLATED = 102,
- TFMT4_ETC2_RG11_UNORM = 103,
- TFMT4_ETC2_RG11_SNORM = 104,
- TFMT4_ETC2_R11_UNORM = 105,
- TFMT4_ETC2_R11_SNORM = 106,
- TFMT4_ETC1 = 107,
- TFMT4_ETC2_RGB8 = 108,
- TFMT4_ETC2_RGBA8 = 109,
- TFMT4_ETC2_RGB8A1 = 110,
- TFMT4_ASTC_4x4 = 111,
- TFMT4_ASTC_5x4 = 112,
- TFMT4_ASTC_5x5 = 113,
- TFMT4_ASTC_6x5 = 114,
- TFMT4_ASTC_6x6 = 115,
- TFMT4_ASTC_8x5 = 116,
- TFMT4_ASTC_8x6 = 117,
- TFMT4_ASTC_8x8 = 118,
- TFMT4_ASTC_10x5 = 119,
- TFMT4_ASTC_10x6 = 120,
- TFMT4_ASTC_10x8 = 121,
- TFMT4_ASTC_10x10 = 122,
- TFMT4_ASTC_12x10 = 123,
- TFMT4_ASTC_12x12 = 124,
- TFMT4_NONE = 255,
-};
-
-enum a4xx_depth_format {
- DEPTH4_NONE = 0,
- DEPTH4_16 = 1,
- DEPTH4_24_8 = 2,
- DEPTH4_32 = 3,
-};
-
-enum a4xx_ccu_perfcounter_select {
- CCU_BUSY_CYCLES = 0,
- CCU_RB_DEPTH_RETURN_STALL = 2,
- CCU_RB_COLOR_RETURN_STALL = 3,
- CCU_DEPTH_BLOCKS = 6,
- CCU_COLOR_BLOCKS = 7,
- CCU_DEPTH_BLOCK_HIT = 8,
- CCU_COLOR_BLOCK_HIT = 9,
- CCU_DEPTH_FLAG1_COUNT = 10,
- CCU_DEPTH_FLAG2_COUNT = 11,
- CCU_DEPTH_FLAG3_COUNT = 12,
- CCU_DEPTH_FLAG4_COUNT = 13,
- CCU_COLOR_FLAG1_COUNT = 14,
- CCU_COLOR_FLAG2_COUNT = 15,
- CCU_COLOR_FLAG3_COUNT = 16,
- CCU_COLOR_FLAG4_COUNT = 17,
- CCU_PARTIAL_BLOCK_READ = 18,
-};
-
-enum a4xx_cp_perfcounter_select {
- CP_ALWAYS_COUNT = 0,
- CP_BUSY = 1,
- CP_PFP_IDLE = 2,
- CP_PFP_BUSY_WORKING = 3,
- CP_PFP_STALL_CYCLES_ANY = 4,
- CP_PFP_STARVE_CYCLES_ANY = 5,
- CP_PFP_STARVED_PER_LOAD_ADDR = 6,
- CP_PFP_STALLED_PER_STORE_ADDR = 7,
- CP_PFP_PC_PROFILE = 8,
- CP_PFP_MATCH_PM4_PKT_PROFILE = 9,
- CP_PFP_COND_INDIRECT_DISCARDED = 10,
- CP_LONG_RESUMPTIONS = 11,
- CP_RESUME_CYCLES = 12,
- CP_RESUME_TO_BOUNDARY_CYCLES = 13,
- CP_LONG_PREEMPTIONS = 14,
- CP_PREEMPT_CYCLES = 15,
- CP_PREEMPT_TO_BOUNDARY_CYCLES = 16,
- CP_ME_FIFO_EMPTY_PFP_IDLE = 17,
- CP_ME_FIFO_EMPTY_PFP_BUSY = 18,
- CP_ME_FIFO_NOT_EMPTY_NOT_FULL = 19,
- CP_ME_FIFO_FULL_ME_BUSY = 20,
- CP_ME_FIFO_FULL_ME_NON_WORKING = 21,
- CP_ME_WAITING_FOR_PACKETS = 22,
- CP_ME_BUSY_WORKING = 23,
- CP_ME_STARVE_CYCLES_ANY = 24,
- CP_ME_STARVE_CYCLES_PER_PROFILE = 25,
- CP_ME_STALL_CYCLES_PER_PROFILE = 26,
- CP_ME_PC_PROFILE = 27,
- CP_RCIU_FIFO_EMPTY = 28,
- CP_RCIU_FIFO_NOT_EMPTY_NOT_FULL = 29,
- CP_RCIU_FIFO_FULL = 30,
- CP_RCIU_FIFO_FULL_NO_CONTEXT = 31,
- CP_RCIU_FIFO_FULL_AHB_MASTER = 32,
- CP_RCIU_FIFO_FULL_OTHER = 33,
- CP_AHB_IDLE = 34,
- CP_AHB_STALL_ON_GRANT_NO_SPLIT = 35,
- CP_AHB_STALL_ON_GRANT_SPLIT = 36,
- CP_AHB_STALL_ON_GRANT_SPLIT_PROFILE = 37,
- CP_AHB_BUSY_WORKING = 38,
- CP_AHB_BUSY_STALL_ON_HRDY = 39,
- CP_AHB_BUSY_STALL_ON_HRDY_PROFILE = 40,
-};
-
-enum a4xx_gras_ras_perfcounter_select {
- RAS_SUPER_TILES = 0,
- RAS_8X8_TILES = 1,
- RAS_4X4_TILES = 2,
- RAS_BUSY_CYCLES = 3,
- RAS_STALL_CYCLES_BY_RB = 4,
- RAS_STALL_CYCLES_BY_VSC = 5,
- RAS_STARVE_CYCLES_BY_TSE = 6,
- RAS_SUPERTILE_CYCLES = 7,
- RAS_TILE_CYCLES = 8,
- RAS_FULLY_COVERED_SUPER_TILES = 9,
- RAS_FULLY_COVERED_8X8_TILES = 10,
- RAS_4X4_PRIM = 11,
- RAS_8X4_4X8_PRIM = 12,
- RAS_8X8_PRIM = 13,
-};
-
-enum a4xx_gras_tse_perfcounter_select {
- TSE_INPUT_PRIM = 0,
- TSE_INPUT_NULL_PRIM = 1,
- TSE_TRIVAL_REJ_PRIM = 2,
- TSE_CLIPPED_PRIM = 3,
- TSE_NEW_PRIM = 4,
- TSE_ZERO_AREA_PRIM = 5,
- TSE_FACENESS_CULLED_PRIM = 6,
- TSE_ZERO_PIXEL_PRIM = 7,
- TSE_OUTPUT_NULL_PRIM = 8,
- TSE_OUTPUT_VISIBLE_PRIM = 9,
- TSE_PRE_CLIP_PRIM = 10,
- TSE_POST_CLIP_PRIM = 11,
- TSE_BUSY_CYCLES = 12,
- TSE_PC_STARVE = 13,
- TSE_RAS_STALL = 14,
- TSE_STALL_BARYPLANE_FIFO_FULL = 15,
- TSE_STALL_ZPLANE_FIFO_FULL = 16,
-};
-
-enum a4xx_hlsq_perfcounter_select {
- HLSQ_SP_VS_STAGE_CONSTANT = 0,
- HLSQ_SP_VS_STAGE_INSTRUCTIONS = 1,
- HLSQ_SP_FS_STAGE_CONSTANT = 2,
- HLSQ_SP_FS_STAGE_INSTRUCTIONS = 3,
- HLSQ_TP_STATE = 4,
- HLSQ_QUADS = 5,
- HLSQ_PIXELS = 6,
- HLSQ_VERTICES = 7,
- HLSQ_SP_VS_STAGE_DATA_BYTES = 13,
- HLSQ_SP_FS_STAGE_DATA_BYTES = 14,
- HLSQ_BUSY_CYCLES = 15,
- HLSQ_STALL_CYCLES_SP_STATE = 16,
- HLSQ_STALL_CYCLES_SP_VS_STAGE = 17,
- HLSQ_STALL_CYCLES_SP_FS_STAGE = 18,
- HLSQ_STALL_CYCLES_UCHE = 19,
- HLSQ_RBBM_LOAD_CYCLES = 20,
- HLSQ_DI_TO_VS_START_SP = 21,
- HLSQ_DI_TO_FS_START_SP = 22,
- HLSQ_VS_STAGE_START_TO_DONE_SP = 23,
- HLSQ_FS_STAGE_START_TO_DONE_SP = 24,
- HLSQ_SP_STATE_COPY_CYCLES_VS_STAGE = 25,
- HLSQ_SP_STATE_COPY_CYCLES_FS_STAGE = 26,
- HLSQ_UCHE_LATENCY_CYCLES = 27,
- HLSQ_UCHE_LATENCY_COUNT = 28,
- HLSQ_STARVE_CYCLES_VFD = 29,
-};
-
-enum a4xx_pc_perfcounter_select {
- PC_VIS_STREAMS_LOADED = 0,
- PC_VPC_PRIMITIVES = 2,
- PC_DEAD_PRIM = 3,
- PC_LIVE_PRIM = 4,
- PC_DEAD_DRAWCALLS = 5,
- PC_LIVE_DRAWCALLS = 6,
- PC_VERTEX_MISSES = 7,
- PC_STALL_CYCLES_VFD = 9,
- PC_STALL_CYCLES_TSE = 10,
- PC_STALL_CYCLES_UCHE = 11,
- PC_WORKING_CYCLES = 12,
- PC_IA_VERTICES = 13,
- PC_GS_PRIMITIVES = 14,
- PC_HS_INVOCATIONS = 15,
- PC_DS_INVOCATIONS = 16,
- PC_DS_PRIMITIVES = 17,
- PC_STARVE_CYCLES_FOR_INDEX = 20,
- PC_STARVE_CYCLES_FOR_TESS_FACTOR = 21,
- PC_STARVE_CYCLES_FOR_VIZ_STREAM = 22,
- PC_STALL_CYCLES_TESS = 23,
- PC_STARVE_CYCLES_FOR_POSITION = 24,
- PC_MODE0_DRAWCALL = 25,
- PC_MODE1_DRAWCALL = 26,
- PC_MODE2_DRAWCALL = 27,
- PC_MODE3_DRAWCALL = 28,
- PC_MODE4_DRAWCALL = 29,
- PC_PREDICATED_DEAD_DRAWCALL = 30,
- PC_STALL_CYCLES_BY_TSE_ONLY = 31,
- PC_STALL_CYCLES_BY_VPC_ONLY = 32,
- PC_VPC_POS_DATA_TRANSACTION = 33,
- PC_BUSY_CYCLES = 34,
- PC_STARVE_CYCLES_DI = 35,
- PC_STALL_CYCLES_VPC = 36,
- TESS_WORKING_CYCLES = 37,
- TESS_NUM_CYCLES_SETUP_WORKING = 38,
- TESS_NUM_CYCLES_PTGEN_WORKING = 39,
- TESS_NUM_CYCLES_CONNGEN_WORKING = 40,
- TESS_BUSY_CYCLES = 41,
- TESS_STARVE_CYCLES_PC = 42,
- TESS_STALL_CYCLES_PC = 43,
-};
-
-enum a4xx_pwr_perfcounter_select {
- PWR_CORE_CLOCK_CYCLES = 0,
- PWR_BUSY_CLOCK_CYCLES = 1,
-};
-
-enum a4xx_rb_perfcounter_select {
- RB_BUSY_CYCLES = 0,
- RB_BUSY_CYCLES_BINNING = 1,
- RB_BUSY_CYCLES_RENDERING = 2,
- RB_BUSY_CYCLES_RESOLVE = 3,
- RB_STARVE_CYCLES_BY_SP = 4,
- RB_STARVE_CYCLES_BY_RAS = 5,
- RB_STARVE_CYCLES_BY_MARB = 6,
- RB_STALL_CYCLES_BY_MARB = 7,
- RB_STALL_CYCLES_BY_HLSQ = 8,
- RB_RB_RB_MARB_DATA = 9,
- RB_SP_RB_QUAD = 10,
- RB_RAS_RB_Z_QUADS = 11,
- RB_GMEM_CH0_READ = 12,
- RB_GMEM_CH1_READ = 13,
- RB_GMEM_CH0_WRITE = 14,
- RB_GMEM_CH1_WRITE = 15,
- RB_CP_CONTEXT_DONE = 16,
- RB_CP_CACHE_FLUSH = 17,
- RB_CP_ZPASS_DONE = 18,
- RB_STALL_FIFO0_FULL = 19,
- RB_STALL_FIFO1_FULL = 20,
- RB_STALL_FIFO2_FULL = 21,
- RB_STALL_FIFO3_FULL = 22,
- RB_RB_HLSQ_TRANSACTIONS = 23,
- RB_Z_READ = 24,
- RB_Z_WRITE = 25,
- RB_C_READ = 26,
- RB_C_WRITE = 27,
- RB_C_READ_LATENCY = 28,
- RB_Z_READ_LATENCY = 29,
- RB_STALL_BY_UCHE = 30,
- RB_MARB_UCHE_TRANSACTIONS = 31,
- RB_CACHE_STALL_MISS = 32,
- RB_CACHE_STALL_FIFO_FULL = 33,
- RB_8BIT_BLENDER_UNITS_ACTIVE = 34,
- RB_16BIT_BLENDER_UNITS_ACTIVE = 35,
- RB_SAMPLER_UNITS_ACTIVE = 36,
- RB_TOTAL_PASS = 38,
- RB_Z_PASS = 39,
- RB_Z_FAIL = 40,
- RB_S_FAIL = 41,
- RB_POWER0 = 42,
- RB_POWER1 = 43,
- RB_POWER2 = 44,
- RB_POWER3 = 45,
- RB_POWER4 = 46,
- RB_POWER5 = 47,
- RB_POWER6 = 48,
- RB_POWER7 = 49,
-};
-
-enum a4xx_rbbm_perfcounter_select {
- RBBM_ALWAYS_ON = 0,
- RBBM_VBIF_BUSY = 1,
- RBBM_TSE_BUSY = 2,
- RBBM_RAS_BUSY = 3,
- RBBM_PC_DCALL_BUSY = 4,
- RBBM_PC_VSD_BUSY = 5,
- RBBM_VFD_BUSY = 6,
- RBBM_VPC_BUSY = 7,
- RBBM_UCHE_BUSY = 8,
- RBBM_VSC_BUSY = 9,
- RBBM_HLSQ_BUSY = 10,
- RBBM_ANY_RB_BUSY = 11,
- RBBM_ANY_TPL1_BUSY = 12,
- RBBM_ANY_SP_BUSY = 13,
- RBBM_ANY_MARB_BUSY = 14,
- RBBM_ANY_ARB_BUSY = 15,
- RBBM_AHB_STATUS_BUSY = 16,
- RBBM_AHB_STATUS_STALLED = 17,
- RBBM_AHB_STATUS_TXFR = 18,
- RBBM_AHB_STATUS_TXFR_SPLIT = 19,
- RBBM_AHB_STATUS_TXFR_ERROR = 20,
- RBBM_AHB_STATUS_LONG_STALL = 21,
- RBBM_STATUS_MASKED = 22,
- RBBM_CP_BUSY_GFX_CORE_IDLE = 23,
- RBBM_TESS_BUSY = 24,
- RBBM_COM_BUSY = 25,
- RBBM_DCOM_BUSY = 32,
- RBBM_ANY_CCU_BUSY = 33,
- RBBM_DPM_BUSY = 34,
-};
-
-enum a4xx_sp_perfcounter_select {
- SP_LM_LOAD_INSTRUCTIONS = 0,
- SP_LM_STORE_INSTRUCTIONS = 1,
- SP_LM_ATOMICS = 2,
- SP_GM_LOAD_INSTRUCTIONS = 3,
- SP_GM_STORE_INSTRUCTIONS = 4,
- SP_GM_ATOMICS = 5,
- SP_VS_STAGE_TEX_INSTRUCTIONS = 6,
- SP_VS_STAGE_CFLOW_INSTRUCTIONS = 7,
- SP_VS_STAGE_EFU_INSTRUCTIONS = 8,
- SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 9,
- SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 10,
- SP_FS_STAGE_TEX_INSTRUCTIONS = 11,
- SP_FS_STAGE_CFLOW_INSTRUCTIONS = 12,
- SP_FS_STAGE_EFU_INSTRUCTIONS = 13,
- SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 14,
- SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 15,
- SP_VS_INSTRUCTIONS = 17,
- SP_FS_INSTRUCTIONS = 18,
- SP_ADDR_LOCK_COUNT = 19,
- SP_UCHE_READ_TRANS = 20,
- SP_UCHE_WRITE_TRANS = 21,
- SP_EXPORT_VPC_TRANS = 22,
- SP_EXPORT_RB_TRANS = 23,
- SP_PIXELS_KILLED = 24,
- SP_ICL1_REQUESTS = 25,
- SP_ICL1_MISSES = 26,
- SP_ICL0_REQUESTS = 27,
- SP_ICL0_MISSES = 28,
- SP_ALU_WORKING_CYCLES = 29,
- SP_EFU_WORKING_CYCLES = 30,
- SP_STALL_CYCLES_BY_VPC = 31,
- SP_STALL_CYCLES_BY_TP = 32,
- SP_STALL_CYCLES_BY_UCHE = 33,
- SP_STALL_CYCLES_BY_RB = 34,
- SP_BUSY_CYCLES = 35,
- SP_HS_INSTRUCTIONS = 36,
- SP_DS_INSTRUCTIONS = 37,
- SP_GS_INSTRUCTIONS = 38,
- SP_CS_INSTRUCTIONS = 39,
- SP_SCHEDULER_NON_WORKING = 40,
- SP_WAVE_CONTEXTS = 41,
- SP_WAVE_CONTEXT_CYCLES = 42,
- SP_POWER0 = 43,
- SP_POWER1 = 44,
- SP_POWER2 = 45,
- SP_POWER3 = 46,
- SP_POWER4 = 47,
- SP_POWER5 = 48,
- SP_POWER6 = 49,
- SP_POWER7 = 50,
- SP_POWER8 = 51,
- SP_POWER9 = 52,
- SP_POWER10 = 53,
- SP_POWER11 = 54,
- SP_POWER12 = 55,
- SP_POWER13 = 56,
- SP_POWER14 = 57,
- SP_POWER15 = 58,
-};
-
-enum a4xx_tp_perfcounter_select {
- TP_L1_REQUESTS = 0,
- TP_L1_MISSES = 1,
- TP_QUADS_OFFSET = 8,
- TP_QUAD_SHADOW = 9,
- TP_QUADS_ARRAY = 10,
- TP_QUADS_GRADIENT = 11,
- TP_QUADS_1D2D = 12,
- TP_QUADS_3DCUBE = 13,
- TP_BUSY_CYCLES = 16,
- TP_STALL_CYCLES_BY_ARB = 17,
- TP_STATE_CACHE_REQUESTS = 20,
- TP_STATE_CACHE_MISSES = 21,
- TP_POWER0 = 22,
- TP_POWER1 = 23,
- TP_POWER2 = 24,
- TP_POWER3 = 25,
- TP_POWER4 = 26,
- TP_POWER5 = 27,
- TP_POWER6 = 28,
- TP_POWER7 = 29,
-};
-
-enum a4xx_uche_perfcounter_select {
- UCHE_VBIF_READ_BEATS_TP = 0,
- UCHE_VBIF_READ_BEATS_VFD = 1,
- UCHE_VBIF_READ_BEATS_HLSQ = 2,
- UCHE_VBIF_READ_BEATS_MARB = 3,
- UCHE_VBIF_READ_BEATS_SP = 4,
- UCHE_READ_REQUESTS_TP = 5,
- UCHE_READ_REQUESTS_VFD = 6,
- UCHE_READ_REQUESTS_HLSQ = 7,
- UCHE_READ_REQUESTS_MARB = 8,
- UCHE_READ_REQUESTS_SP = 9,
- UCHE_WRITE_REQUESTS_MARB = 10,
- UCHE_WRITE_REQUESTS_SP = 11,
- UCHE_TAG_CHECK_FAILS = 12,
- UCHE_EVICTS = 13,
- UCHE_FLUSHES = 14,
- UCHE_VBIF_LATENCY_CYCLES = 15,
- UCHE_VBIF_LATENCY_SAMPLES = 16,
- UCHE_BUSY_CYCLES = 17,
- UCHE_VBIF_READ_BEATS_PC = 18,
- UCHE_READ_REQUESTS_PC = 19,
- UCHE_WRITE_REQUESTS_VPC = 20,
- UCHE_STALL_BY_VBIF = 21,
- UCHE_WRITE_REQUESTS_VSC = 22,
- UCHE_POWER0 = 23,
- UCHE_POWER1 = 24,
- UCHE_POWER2 = 25,
- UCHE_POWER3 = 26,
- UCHE_POWER4 = 27,
- UCHE_POWER5 = 28,
- UCHE_POWER6 = 29,
- UCHE_POWER7 = 30,
-};
-
-enum a4xx_vbif_perfcounter_select {
- AXI_READ_REQUESTS_ID_0 = 0,
- AXI_READ_REQUESTS_ID_1 = 1,
- AXI_READ_REQUESTS_ID_2 = 2,
- AXI_READ_REQUESTS_ID_3 = 3,
- AXI_READ_REQUESTS_ID_4 = 4,
- AXI_READ_REQUESTS_ID_5 = 5,
- AXI_READ_REQUESTS_ID_6 = 6,
- AXI_READ_REQUESTS_ID_7 = 7,
- AXI_READ_REQUESTS_ID_8 = 8,
- AXI_READ_REQUESTS_ID_9 = 9,
- AXI_READ_REQUESTS_ID_10 = 10,
- AXI_READ_REQUESTS_ID_11 = 11,
- AXI_READ_REQUESTS_ID_12 = 12,
- AXI_READ_REQUESTS_ID_13 = 13,
- AXI_READ_REQUESTS_ID_14 = 14,
- AXI_READ_REQUESTS_ID_15 = 15,
- AXI0_READ_REQUESTS_TOTAL = 16,
- AXI1_READ_REQUESTS_TOTAL = 17,
- AXI2_READ_REQUESTS_TOTAL = 18,
- AXI3_READ_REQUESTS_TOTAL = 19,
- AXI_READ_REQUESTS_TOTAL = 20,
- AXI_WRITE_REQUESTS_ID_0 = 21,
- AXI_WRITE_REQUESTS_ID_1 = 22,
- AXI_WRITE_REQUESTS_ID_2 = 23,
- AXI_WRITE_REQUESTS_ID_3 = 24,
- AXI_WRITE_REQUESTS_ID_4 = 25,
- AXI_WRITE_REQUESTS_ID_5 = 26,
- AXI_WRITE_REQUESTS_ID_6 = 27,
- AXI_WRITE_REQUESTS_ID_7 = 28,
- AXI_WRITE_REQUESTS_ID_8 = 29,
- AXI_WRITE_REQUESTS_ID_9 = 30,
- AXI_WRITE_REQUESTS_ID_10 = 31,
- AXI_WRITE_REQUESTS_ID_11 = 32,
- AXI_WRITE_REQUESTS_ID_12 = 33,
- AXI_WRITE_REQUESTS_ID_13 = 34,
- AXI_WRITE_REQUESTS_ID_14 = 35,
- AXI_WRITE_REQUESTS_ID_15 = 36,
- AXI0_WRITE_REQUESTS_TOTAL = 37,
- AXI1_WRITE_REQUESTS_TOTAL = 38,
- AXI2_WRITE_REQUESTS_TOTAL = 39,
- AXI3_WRITE_REQUESTS_TOTAL = 40,
- AXI_WRITE_REQUESTS_TOTAL = 41,
- AXI_TOTAL_REQUESTS = 42,
- AXI_READ_DATA_BEATS_ID_0 = 43,
- AXI_READ_DATA_BEATS_ID_1 = 44,
- AXI_READ_DATA_BEATS_ID_2 = 45,
- AXI_READ_DATA_BEATS_ID_3 = 46,
- AXI_READ_DATA_BEATS_ID_4 = 47,
- AXI_READ_DATA_BEATS_ID_5 = 48,
- AXI_READ_DATA_BEATS_ID_6 = 49,
- AXI_READ_DATA_BEATS_ID_7 = 50,
- AXI_READ_DATA_BEATS_ID_8 = 51,
- AXI_READ_DATA_BEATS_ID_9 = 52,
- AXI_READ_DATA_BEATS_ID_10 = 53,
- AXI_READ_DATA_BEATS_ID_11 = 54,
- AXI_READ_DATA_BEATS_ID_12 = 55,
- AXI_READ_DATA_BEATS_ID_13 = 56,
- AXI_READ_DATA_BEATS_ID_14 = 57,
- AXI_READ_DATA_BEATS_ID_15 = 58,
- AXI0_READ_DATA_BEATS_TOTAL = 59,
- AXI1_READ_DATA_BEATS_TOTAL = 60,
- AXI2_READ_DATA_BEATS_TOTAL = 61,
- AXI3_READ_DATA_BEATS_TOTAL = 62,
- AXI_READ_DATA_BEATS_TOTAL = 63,
- AXI_WRITE_DATA_BEATS_ID_0 = 64,
- AXI_WRITE_DATA_BEATS_ID_1 = 65,
- AXI_WRITE_DATA_BEATS_ID_2 = 66,
- AXI_WRITE_DATA_BEATS_ID_3 = 67,
- AXI_WRITE_DATA_BEATS_ID_4 = 68,
- AXI_WRITE_DATA_BEATS_ID_5 = 69,
- AXI_WRITE_DATA_BEATS_ID_6 = 70,
- AXI_WRITE_DATA_BEATS_ID_7 = 71,
- AXI_WRITE_DATA_BEATS_ID_8 = 72,
- AXI_WRITE_DATA_BEATS_ID_9 = 73,
- AXI_WRITE_DATA_BEATS_ID_10 = 74,
- AXI_WRITE_DATA_BEATS_ID_11 = 75,
- AXI_WRITE_DATA_BEATS_ID_12 = 76,
- AXI_WRITE_DATA_BEATS_ID_13 = 77,
- AXI_WRITE_DATA_BEATS_ID_14 = 78,
- AXI_WRITE_DATA_BEATS_ID_15 = 79,
- AXI0_WRITE_DATA_BEATS_TOTAL = 80,
- AXI1_WRITE_DATA_BEATS_TOTAL = 81,
- AXI2_WRITE_DATA_BEATS_TOTAL = 82,
- AXI3_WRITE_DATA_BEATS_TOTAL = 83,
- AXI_WRITE_DATA_BEATS_TOTAL = 84,
- AXI_DATA_BEATS_TOTAL = 85,
- CYCLES_HELD_OFF_ID_0 = 86,
- CYCLES_HELD_OFF_ID_1 = 87,
- CYCLES_HELD_OFF_ID_2 = 88,
- CYCLES_HELD_OFF_ID_3 = 89,
- CYCLES_HELD_OFF_ID_4 = 90,
- CYCLES_HELD_OFF_ID_5 = 91,
- CYCLES_HELD_OFF_ID_6 = 92,
- CYCLES_HELD_OFF_ID_7 = 93,
- CYCLES_HELD_OFF_ID_8 = 94,
- CYCLES_HELD_OFF_ID_9 = 95,
- CYCLES_HELD_OFF_ID_10 = 96,
- CYCLES_HELD_OFF_ID_11 = 97,
- CYCLES_HELD_OFF_ID_12 = 98,
- CYCLES_HELD_OFF_ID_13 = 99,
- CYCLES_HELD_OFF_ID_14 = 100,
- CYCLES_HELD_OFF_ID_15 = 101,
- AXI_READ_REQUEST_HELD_OFF = 102,
- AXI_WRITE_REQUEST_HELD_OFF = 103,
- AXI_REQUEST_HELD_OFF = 104,
- AXI_WRITE_DATA_HELD_OFF = 105,
- OCMEM_AXI_READ_REQUEST_HELD_OFF = 106,
- OCMEM_AXI_WRITE_REQUEST_HELD_OFF = 107,
- OCMEM_AXI_REQUEST_HELD_OFF = 108,
- OCMEM_AXI_WRITE_DATA_HELD_OFF = 109,
- ELAPSED_CYCLES_DDR = 110,
- ELAPSED_CYCLES_OCMEM = 111,
-};
-
-enum a4xx_vfd_perfcounter_select {
- VFD_UCHE_BYTE_FETCHED = 0,
- VFD_UCHE_TRANS = 1,
- VFD_FETCH_INSTRUCTIONS = 3,
- VFD_BUSY_CYCLES = 5,
- VFD_STALL_CYCLES_UCHE = 6,
- VFD_STALL_CYCLES_HLSQ = 7,
- VFD_STALL_CYCLES_VPC_BYPASS = 8,
- VFD_STALL_CYCLES_VPC_ALLOC = 9,
- VFD_MODE_0_FIBERS = 13,
- VFD_MODE_1_FIBERS = 14,
- VFD_MODE_2_FIBERS = 15,
- VFD_MODE_3_FIBERS = 16,
- VFD_MODE_4_FIBERS = 17,
- VFD_BFIFO_STALL = 18,
- VFD_NUM_VERTICES_TOTAL = 19,
- VFD_PACKER_FULL = 20,
- VFD_UCHE_REQUEST_FIFO_FULL = 21,
- VFD_STARVE_CYCLES_PC = 22,
- VFD_STARVE_CYCLES_UCHE = 23,
-};
-
-enum a4xx_vpc_perfcounter_select {
- VPC_SP_LM_COMPONENTS = 2,
- VPC_SP0_LM_BYTES = 3,
- VPC_SP1_LM_BYTES = 4,
- VPC_SP2_LM_BYTES = 5,
- VPC_SP3_LM_BYTES = 6,
- VPC_WORKING_CYCLES = 7,
- VPC_STALL_CYCLES_LM = 8,
- VPC_STARVE_CYCLES_RAS = 9,
- VPC_STREAMOUT_CYCLES = 10,
- VPC_UCHE_TRANSACTIONS = 12,
- VPC_STALL_CYCLES_UCHE = 13,
- VPC_BUSY_CYCLES = 14,
- VPC_STARVE_CYCLES_SP = 15,
-};
-
-enum a4xx_vsc_perfcounter_select {
- VSC_BUSY_CYCLES = 0,
- VSC_WORKING_CYCLES = 1,
- VSC_STALL_CYCLES_UCHE = 2,
- VSC_STARVE_CYCLES_RAS = 3,
- VSC_EOT_NUM = 4,
-};
-
-enum a4xx_tex_filter {
- A4XX_TEX_NEAREST = 0,
- A4XX_TEX_LINEAR = 1,
- A4XX_TEX_ANISO = 2,
-};
-
-enum a4xx_tex_clamp {
- A4XX_TEX_REPEAT = 0,
- A4XX_TEX_CLAMP_TO_EDGE = 1,
- A4XX_TEX_MIRROR_REPEAT = 2,
- A4XX_TEX_CLAMP_TO_BORDER = 3,
- A4XX_TEX_MIRROR_CLAMP = 4,
-};
-
-enum a4xx_tex_aniso {
- A4XX_TEX_ANISO_1 = 0,
- A4XX_TEX_ANISO_2 = 1,
- A4XX_TEX_ANISO_4 = 2,
- A4XX_TEX_ANISO_8 = 3,
- A4XX_TEX_ANISO_16 = 4,
-};
-
-enum a4xx_tex_swiz {
- A4XX_TEX_X = 0,
- A4XX_TEX_Y = 1,
- A4XX_TEX_Z = 2,
- A4XX_TEX_W = 3,
- A4XX_TEX_ZERO = 4,
- A4XX_TEX_ONE = 5,
-};
-
-enum a4xx_tex_type {
- A4XX_TEX_1D = 0,
- A4XX_TEX_2D = 1,
- A4XX_TEX_CUBE = 2,
- A4XX_TEX_3D = 3,
- A4XX_TEX_BUFFER = 4,
-};
-
-#define A4XX_CGC_HLSQ_EARLY_CYC__MASK 0x00700000
-#define A4XX_CGC_HLSQ_EARLY_CYC__SHIFT 20
-static inline uint32_t A4XX_CGC_HLSQ_EARLY_CYC(uint32_t val)
-{
- return ((val) << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT) & A4XX_CGC_HLSQ_EARLY_CYC__MASK;
-}
-
-#define A4XX_INT0_RBBM_GPU_IDLE 0x00000001
-#define A4XX_INT0_RBBM_AHB_ERROR 0x00000002
-#define A4XX_INT0_RBBM_REG_TIMEOUT 0x00000004
-#define A4XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
-#define A4XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
-#define A4XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00000020
-#define A4XX_INT0_VFD_ERROR 0x00000040
-#define A4XX_INT0_CP_SW_INT 0x00000080
-#define A4XX_INT0_CP_T0_PACKET_IN_IB 0x00000100
-#define A4XX_INT0_CP_OPCODE_ERROR 0x00000200
-#define A4XX_INT0_CP_RESERVED_BIT_ERROR 0x00000400
-#define A4XX_INT0_CP_HW_FAULT 0x00000800
-#define A4XX_INT0_CP_DMA 0x00001000
-#define A4XX_INT0_CP_IB2_INT 0x00002000
-#define A4XX_INT0_CP_IB1_INT 0x00004000
-#define A4XX_INT0_CP_RB_INT 0x00008000
-#define A4XX_INT0_CP_REG_PROTECT_FAULT 0x00010000
-#define A4XX_INT0_CP_RB_DONE_TS 0x00020000
-#define A4XX_INT0_CP_VS_DONE_TS 0x00040000
-#define A4XX_INT0_CP_PS_DONE_TS 0x00080000
-#define A4XX_INT0_CACHE_FLUSH_TS 0x00100000
-#define A4XX_INT0_CP_AHB_ERROR_HALT 0x00200000
-#define A4XX_INT0_MISC_HANG_DETECT 0x01000000
-#define A4XX_INT0_UCHE_OOB_ACCESS 0x02000000
-
-#define REG_A4XX_RB_GMEM_BASE_ADDR 0x00000cc0
-
-#define REG_A4XX_RB_PERFCTR_RB_SEL_0 0x00000cc7
-
-#define REG_A4XX_RB_PERFCTR_RB_SEL_1 0x00000cc8
-
-#define REG_A4XX_RB_PERFCTR_RB_SEL_2 0x00000cc9
-
-#define REG_A4XX_RB_PERFCTR_RB_SEL_3 0x00000cca
-
-#define REG_A4XX_RB_PERFCTR_RB_SEL_4 0x00000ccb
-
-#define REG_A4XX_RB_PERFCTR_RB_SEL_5 0x00000ccc
-
-#define REG_A4XX_RB_PERFCTR_RB_SEL_6 0x00000ccd
-
-#define REG_A4XX_RB_PERFCTR_RB_SEL_7 0x00000cce
-
-#define REG_A4XX_RB_PERFCTR_CCU_SEL_0 0x00000ccf
-
-#define REG_A4XX_RB_PERFCTR_CCU_SEL_1 0x00000cd0
-
-#define REG_A4XX_RB_PERFCTR_CCU_SEL_2 0x00000cd1
-
-#define REG_A4XX_RB_PERFCTR_CCU_SEL_3 0x00000cd2
-
-#define REG_A4XX_RB_FRAME_BUFFER_DIMENSION 0x00000ce0
-#define A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK 0x00003fff
-#define A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT 0
-static inline uint32_t A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(uint32_t val)
-{
- return ((val) << A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT) & A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK;
-}
-#define A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK 0x3fff0000
-#define A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT 16
-static inline uint32_t A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(uint32_t val)
-{
- return ((val) << A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT) & A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK;
-}
-
-#define REG_A4XX_RB_CLEAR_COLOR_DW0 0x000020cc
-
-#define REG_A4XX_RB_CLEAR_COLOR_DW1 0x000020cd
-
-#define REG_A4XX_RB_CLEAR_COLOR_DW2 0x000020ce
-
-#define REG_A4XX_RB_CLEAR_COLOR_DW3 0x000020cf
-
-#define REG_A4XX_RB_MODE_CONTROL 0x000020a0
-#define A4XX_RB_MODE_CONTROL_WIDTH__MASK 0x0000003f
-#define A4XX_RB_MODE_CONTROL_WIDTH__SHIFT 0
-static inline uint32_t A4XX_RB_MODE_CONTROL_WIDTH(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A4XX_RB_MODE_CONTROL_WIDTH__SHIFT) & A4XX_RB_MODE_CONTROL_WIDTH__MASK;
-}
-#define A4XX_RB_MODE_CONTROL_HEIGHT__MASK 0x00003f00
-#define A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT 8
-static inline uint32_t A4XX_RB_MODE_CONTROL_HEIGHT(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT) & A4XX_RB_MODE_CONTROL_HEIGHT__MASK;
-}
-#define A4XX_RB_MODE_CONTROL_ENABLE_GMEM 0x00010000
-
-#define REG_A4XX_RB_RENDER_CONTROL 0x000020a1
-#define A4XX_RB_RENDER_CONTROL_BINNING_PASS 0x00000001
-#define A4XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00000020
-
-#define REG_A4XX_RB_MSAA_CONTROL 0x000020a2
-#define A4XX_RB_MSAA_CONTROL_DISABLE 0x00001000
-#define A4XX_RB_MSAA_CONTROL_SAMPLES__MASK 0x0000e000
-#define A4XX_RB_MSAA_CONTROL_SAMPLES__SHIFT 13
-static inline uint32_t A4XX_RB_MSAA_CONTROL_SAMPLES(uint32_t val)
-{
- return ((val) << A4XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A4XX_RB_MSAA_CONTROL_SAMPLES__MASK;
-}
-
-#define REG_A4XX_RB_RENDER_CONTROL2 0x000020a3
-#define A4XX_RB_RENDER_CONTROL2_COORD_MASK__MASK 0x0000000f
-#define A4XX_RB_RENDER_CONTROL2_COORD_MASK__SHIFT 0
-static inline uint32_t A4XX_RB_RENDER_CONTROL2_COORD_MASK(uint32_t val)
-{
- return ((val) << A4XX_RB_RENDER_CONTROL2_COORD_MASK__SHIFT) & A4XX_RB_RENDER_CONTROL2_COORD_MASK__MASK;
-}
-#define A4XX_RB_RENDER_CONTROL2_SAMPLEMASK 0x00000010
-#define A4XX_RB_RENDER_CONTROL2_FACENESS 0x00000020
-#define A4XX_RB_RENDER_CONTROL2_SAMPLEID 0x00000040
-#define A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__MASK 0x00000380
-#define A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__SHIFT 7
-static inline uint32_t A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES(uint32_t val)
-{
- return ((val) << A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__SHIFT) & A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__MASK;
-}
-#define A4XX_RB_RENDER_CONTROL2_SAMPLEID_HR 0x00000800
-#define A4XX_RB_RENDER_CONTROL2_IJ_PERSP_PIXEL 0x00001000
-#define A4XX_RB_RENDER_CONTROL2_IJ_PERSP_CENTROID 0x00002000
-#define A4XX_RB_RENDER_CONTROL2_IJ_PERSP_SAMPLE 0x00004000
-#define A4XX_RB_RENDER_CONTROL2_SIZE 0x00008000
-
-#define REG_A4XX_RB_MRT(i0) (0x000020a4 + 0x5*(i0))
-
-static inline uint32_t REG_A4XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020a4 + 0x5*i0; }
-#define A4XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008
-#define A4XX_RB_MRT_CONTROL_BLEND 0x00000010
-#define A4XX_RB_MRT_CONTROL_BLEND2 0x00000020
-#define A4XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000040
-#define A4XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00
-#define A4XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8
-static inline uint32_t A4XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
-{
- return ((val) << A4XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A4XX_RB_MRT_CONTROL_ROP_CODE__MASK;
-}
-#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000
-#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24
-static inline uint32_t A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
-{
- return ((val) << A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
-}
-
-static inline uint32_t REG_A4XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x000020a5 + 0x5*i0; }
-#define A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000003f
-#define A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
-static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a4xx_color_fmt val)
-{
- return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
-}
-#define A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x000000c0
-#define A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 6
-static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a4xx_tile_mode val)
-{
- return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
-}
-#define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK 0x00000600
-#define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT 9
-static inline uint32_t A4XX_RB_MRT_BUF_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
-{
- return ((val) << A4XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT) & A4XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK;
-}
-#define A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00001800
-#define A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 11
-static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
-{
- return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
-}
-#define A4XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00002000
-#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xffffc000
-#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 14
-static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
-{
- assert(!(val & 0xf));
- return (((val >> 4)) << A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK;
-}
-
-static inline uint32_t REG_A4XX_RB_MRT_BASE(uint32_t i0) { return 0x000020a6 + 0x5*i0; }
-
-static inline uint32_t REG_A4XX_RB_MRT_CONTROL3(uint32_t i0) { return 0x000020a7 + 0x5*i0; }
-#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK 0x03fffff8
-#define A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT 3
-static inline uint32_t A4XX_RB_MRT_CONTROL3_STRIDE(uint32_t val)
-{
- return ((val) << A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT) & A4XX_RB_MRT_CONTROL3_STRIDE__MASK;
-}
-
-static inline uint32_t REG_A4XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x000020a8 + 0x5*i0; }
-#define A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
-#define A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
-static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
-{
- return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
-}
-#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
-#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
-static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
-{
- return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
-}
-#define A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
-#define A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
-static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
-{
- return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
-}
-#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
-#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
-static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
-{
- return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
-}
-#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
-#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
-static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
-{
- return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
-}
-#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
-#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
-static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
-{
- return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
-}
-
-#define REG_A4XX_RB_BLEND_RED 0x000020f0
-#define A4XX_RB_BLEND_RED_UINT__MASK 0x000000ff
-#define A4XX_RB_BLEND_RED_UINT__SHIFT 0
-static inline uint32_t A4XX_RB_BLEND_RED_UINT(uint32_t val)
-{
- return ((val) << A4XX_RB_BLEND_RED_UINT__SHIFT) & A4XX_RB_BLEND_RED_UINT__MASK;
-}
-#define A4XX_RB_BLEND_RED_SINT__MASK 0x0000ff00
-#define A4XX_RB_BLEND_RED_SINT__SHIFT 8
-static inline uint32_t A4XX_RB_BLEND_RED_SINT(uint32_t val)
-{
- return ((val) << A4XX_RB_BLEND_RED_SINT__SHIFT) & A4XX_RB_BLEND_RED_SINT__MASK;
-}
-#define A4XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
-#define A4XX_RB_BLEND_RED_FLOAT__SHIFT 16
-static inline uint32_t A4XX_RB_BLEND_RED_FLOAT(float val)
-{
- return ((_mesa_float_to_half(val)) << A4XX_RB_BLEND_RED_FLOAT__SHIFT) & A4XX_RB_BLEND_RED_FLOAT__MASK;
-}
-
-#define REG_A4XX_RB_BLEND_RED_F32 0x000020f1
-#define A4XX_RB_BLEND_RED_F32__MASK 0xffffffff
-#define A4XX_RB_BLEND_RED_F32__SHIFT 0
-static inline uint32_t A4XX_RB_BLEND_RED_F32(float val)
-{
- return ((fui(val)) << A4XX_RB_BLEND_RED_F32__SHIFT) & A4XX_RB_BLEND_RED_F32__MASK;
-}
-
-#define REG_A4XX_RB_BLEND_GREEN 0x000020f2
-#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
-#define A4XX_RB_BLEND_GREEN_UINT__SHIFT 0
-static inline uint32_t A4XX_RB_BLEND_GREEN_UINT(uint32_t val)
-{
- return ((val) << A4XX_RB_BLEND_GREEN_UINT__SHIFT) & A4XX_RB_BLEND_GREEN_UINT__MASK;
-}
-#define A4XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00
-#define A4XX_RB_BLEND_GREEN_SINT__SHIFT 8
-static inline uint32_t A4XX_RB_BLEND_GREEN_SINT(uint32_t val)
-{
- return ((val) << A4XX_RB_BLEND_GREEN_SINT__SHIFT) & A4XX_RB_BLEND_GREEN_SINT__MASK;
-}
-#define A4XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
-#define A4XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
-static inline uint32_t A4XX_RB_BLEND_GREEN_FLOAT(float val)
-{
- return ((_mesa_float_to_half(val)) << A4XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A4XX_RB_BLEND_GREEN_FLOAT__MASK;
-}
-
-#define REG_A4XX_RB_BLEND_GREEN_F32 0x000020f3
-#define A4XX_RB_BLEND_GREEN_F32__MASK 0xffffffff
-#define A4XX_RB_BLEND_GREEN_F32__SHIFT 0
-static inline uint32_t A4XX_RB_BLEND_GREEN_F32(float val)
-{
- return ((fui(val)) << A4XX_RB_BLEND_GREEN_F32__SHIFT) & A4XX_RB_BLEND_GREEN_F32__MASK;
-}
-
-#define REG_A4XX_RB_BLEND_BLUE 0x000020f4
-#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
-#define A4XX_RB_BLEND_BLUE_UINT__SHIFT 0
-static inline uint32_t A4XX_RB_BLEND_BLUE_UINT(uint32_t val)
-{
- return ((val) << A4XX_RB_BLEND_BLUE_UINT__SHIFT) & A4XX_RB_BLEND_BLUE_UINT__MASK;
-}
-#define A4XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00
-#define A4XX_RB_BLEND_BLUE_SINT__SHIFT 8
-static inline uint32_t A4XX_RB_BLEND_BLUE_SINT(uint32_t val)
-{
- return ((val) << A4XX_RB_BLEND_BLUE_SINT__SHIFT) & A4XX_RB_BLEND_BLUE_SINT__MASK;
-}
-#define A4XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
-#define A4XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
-static inline uint32_t A4XX_RB_BLEND_BLUE_FLOAT(float val)
-{
- return ((_mesa_float_to_half(val)) << A4XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A4XX_RB_BLEND_BLUE_FLOAT__MASK;
-}
-
-#define REG_A4XX_RB_BLEND_BLUE_F32 0x000020f5
-#define A4XX_RB_BLEND_BLUE_F32__MASK 0xffffffff
-#define A4XX_RB_BLEND_BLUE_F32__SHIFT 0
-static inline uint32_t A4XX_RB_BLEND_BLUE_F32(float val)
-{
- return ((fui(val)) << A4XX_RB_BLEND_BLUE_F32__SHIFT) & A4XX_RB_BLEND_BLUE_F32__MASK;
-}
-
-#define REG_A4XX_RB_BLEND_ALPHA 0x000020f6
-#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
-#define A4XX_RB_BLEND_ALPHA_UINT__SHIFT 0
-static inline uint32_t A4XX_RB_BLEND_ALPHA_UINT(uint32_t val)
-{
- return ((val) << A4XX_RB_BLEND_ALPHA_UINT__SHIFT) & A4XX_RB_BLEND_ALPHA_UINT__MASK;
-}
-#define A4XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00
-#define A4XX_RB_BLEND_ALPHA_SINT__SHIFT 8
-static inline uint32_t A4XX_RB_BLEND_ALPHA_SINT(uint32_t val)
-{
- return ((val) << A4XX_RB_BLEND_ALPHA_SINT__SHIFT) & A4XX_RB_BLEND_ALPHA_SINT__MASK;
-}
-#define A4XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
-#define A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
-static inline uint32_t A4XX_RB_BLEND_ALPHA_FLOAT(float val)
-{
- return ((_mesa_float_to_half(val)) << A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A4XX_RB_BLEND_ALPHA_FLOAT__MASK;
-}
-
-#define REG_A4XX_RB_BLEND_ALPHA_F32 0x000020f7
-#define A4XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff
-#define A4XX_RB_BLEND_ALPHA_F32__SHIFT 0
-static inline uint32_t A4XX_RB_BLEND_ALPHA_F32(float val)
-{
- return ((fui(val)) << A4XX_RB_BLEND_ALPHA_F32__SHIFT) & A4XX_RB_BLEND_ALPHA_F32__MASK;
-}
-
-#define REG_A4XX_RB_ALPHA_CONTROL 0x000020f8
-#define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff
-#define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0
-static inline uint32_t A4XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val)
-{
- return ((val) << A4XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A4XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK;
-}
-#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100
-#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00
-#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9
-static inline uint32_t A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
-{
- return ((val) << A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
-}
-
-#define REG_A4XX_RB_FS_OUTPUT 0x000020f9
-#define A4XX_RB_FS_OUTPUT_ENABLE_BLEND__MASK 0x000000ff
-#define A4XX_RB_FS_OUTPUT_ENABLE_BLEND__SHIFT 0
-static inline uint32_t A4XX_RB_FS_OUTPUT_ENABLE_BLEND(uint32_t val)
-{
- return ((val) << A4XX_RB_FS_OUTPUT_ENABLE_BLEND__SHIFT) & A4XX_RB_FS_OUTPUT_ENABLE_BLEND__MASK;
-}
-#define A4XX_RB_FS_OUTPUT_INDEPENDENT_BLEND 0x00000100
-#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK 0xffff0000
-#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT 16
-static inline uint32_t A4XX_RB_FS_OUTPUT_SAMPLE_MASK(uint32_t val)
-{
- return ((val) << A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT) & A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK;
-}
-
-#define REG_A4XX_RB_SAMPLE_COUNT_CONTROL 0x000020fa
-#define A4XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
-#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK 0xfffffffc
-#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT 2
-static inline uint32_t A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT) & A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK;
-}
-
-#define REG_A4XX_RB_RENDER_COMPONENTS 0x000020fb
-#define A4XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f
-#define A4XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0
-static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT0(uint32_t val)
-{
- return ((val) << A4XX_RB_RENDER_COMPONENTS_RT0__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT0__MASK;
-}
-#define A4XX_RB_RENDER_COMPONENTS_RT1__MASK 0x000000f0
-#define A4XX_RB_RENDER_COMPONENTS_RT1__SHIFT 4
-static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT1(uint32_t val)
-{
- return ((val) << A4XX_RB_RENDER_COMPONENTS_RT1__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT1__MASK;
-}
-#define A4XX_RB_RENDER_COMPONENTS_RT2__MASK 0x00000f00
-#define A4XX_RB_RENDER_COMPONENTS_RT2__SHIFT 8
-static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT2(uint32_t val)
-{
- return ((val) << A4XX_RB_RENDER_COMPONENTS_RT2__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT2__MASK;
-}
-#define A4XX_RB_RENDER_COMPONENTS_RT3__MASK 0x0000f000
-#define A4XX_RB_RENDER_COMPONENTS_RT3__SHIFT 12
-static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT3(uint32_t val)
-{
- return ((val) << A4XX_RB_RENDER_COMPONENTS_RT3__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT3__MASK;
-}
-#define A4XX_RB_RENDER_COMPONENTS_RT4__MASK 0x000f0000
-#define A4XX_RB_RENDER_COMPONENTS_RT4__SHIFT 16
-static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT4(uint32_t val)
-{
- return ((val) << A4XX_RB_RENDER_COMPONENTS_RT4__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT4__MASK;
-}
-#define A4XX_RB_RENDER_COMPONENTS_RT5__MASK 0x00f00000
-#define A4XX_RB_RENDER_COMPONENTS_RT5__SHIFT 20
-static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT5(uint32_t val)
-{
- return ((val) << A4XX_RB_RENDER_COMPONENTS_RT5__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT5__MASK;
-}
-#define A4XX_RB_RENDER_COMPONENTS_RT6__MASK 0x0f000000
-#define A4XX_RB_RENDER_COMPONENTS_RT6__SHIFT 24
-static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT6(uint32_t val)
-{
- return ((val) << A4XX_RB_RENDER_COMPONENTS_RT6__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT6__MASK;
-}
-#define A4XX_RB_RENDER_COMPONENTS_RT7__MASK 0xf0000000
-#define A4XX_RB_RENDER_COMPONENTS_RT7__SHIFT 28
-static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT7(uint32_t val)
-{
- return ((val) << A4XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT7__MASK;
-}
-
-#define REG_A4XX_RB_COPY_CONTROL 0x000020fc
-#define A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003
-#define A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT 0
-static inline uint32_t A4XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples val)
-{
- return ((val) << A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK;
-}
-#define A4XX_RB_COPY_CONTROL_MODE__MASK 0x00000070
-#define A4XX_RB_COPY_CONTROL_MODE__SHIFT 4
-static inline uint32_t A4XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val)
-{
- return ((val) << A4XX_RB_COPY_CONTROL_MODE__SHIFT) & A4XX_RB_COPY_CONTROL_MODE__MASK;
-}
-#define A4XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00
-#define A4XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8
-static inline uint32_t A4XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
-{
- return ((val) << A4XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A4XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
-}
-#define A4XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
-#define A4XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
-static inline uint32_t A4XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
-{
- assert(!(val & 0x3fff));
- return (((val >> 14)) << A4XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A4XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
-}
-
-#define REG_A4XX_RB_COPY_DEST_BASE 0x000020fd
-#define A4XX_RB_COPY_DEST_BASE_BASE__MASK 0xffffffe0
-#define A4XX_RB_COPY_DEST_BASE_BASE__SHIFT 5
-static inline uint32_t A4XX_RB_COPY_DEST_BASE_BASE(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A4XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A4XX_RB_COPY_DEST_BASE_BASE__MASK;
-}
-
-#define REG_A4XX_RB_COPY_DEST_PITCH 0x000020fe
-#define A4XX_RB_COPY_DEST_PITCH_PITCH__MASK 0xffffffff
-#define A4XX_RB_COPY_DEST_PITCH_PITCH__SHIFT 0
-static inline uint32_t A4XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A4XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A4XX_RB_COPY_DEST_PITCH_PITCH__MASK;
-}
-
-#define REG_A4XX_RB_COPY_DEST_INFO 0x000020ff
-#define A4XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000fc
-#define A4XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 2
-static inline uint32_t A4XX_RB_COPY_DEST_INFO_FORMAT(enum a4xx_color_fmt val)
-{
- return ((val) << A4XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A4XX_RB_COPY_DEST_INFO_FORMAT__MASK;
-}
-#define A4XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
-#define A4XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
-static inline uint32_t A4XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val)
-{
- return ((val) << A4XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A4XX_RB_COPY_DEST_INFO_SWAP__MASK;
-}
-#define A4XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00
-#define A4XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10
-static inline uint32_t A4XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
-{
- return ((val) << A4XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A4XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK;
-}
-#define A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000
-#define A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14
-static inline uint32_t A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val)
-{
- return ((val) << A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT) & A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK;
-}
-#define A4XX_RB_COPY_DEST_INFO_ENDIAN__MASK 0x001c0000
-#define A4XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT 18
-static inline uint32_t A4XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endian val)
-{
- return ((val) << A4XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT) & A4XX_RB_COPY_DEST_INFO_ENDIAN__MASK;
-}
-#define A4XX_RB_COPY_DEST_INFO_TILE__MASK 0x03000000
-#define A4XX_RB_COPY_DEST_INFO_TILE__SHIFT 24
-static inline uint32_t A4XX_RB_COPY_DEST_INFO_TILE(enum a4xx_tile_mode val)
-{
- return ((val) << A4XX_RB_COPY_DEST_INFO_TILE__SHIFT) & A4XX_RB_COPY_DEST_INFO_TILE__MASK;
-}
-
-#define REG_A4XX_RB_FS_OUTPUT_REG 0x00002100
-#define A4XX_RB_FS_OUTPUT_REG_MRT__MASK 0x0000000f
-#define A4XX_RB_FS_OUTPUT_REG_MRT__SHIFT 0
-static inline uint32_t A4XX_RB_FS_OUTPUT_REG_MRT(uint32_t val)
-{
- return ((val) << A4XX_RB_FS_OUTPUT_REG_MRT__SHIFT) & A4XX_RB_FS_OUTPUT_REG_MRT__MASK;
-}
-#define A4XX_RB_FS_OUTPUT_REG_FRAG_WRITES_Z 0x00000020
-
-#define REG_A4XX_RB_DEPTH_CONTROL 0x00002101
-#define A4XX_RB_DEPTH_CONTROL_FRAG_WRITES_Z 0x00000001
-#define A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x00000002
-#define A4XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004
-#define A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070
-#define A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4
-static inline uint32_t A4XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
-{
- return ((val) << A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
-}
-#define A4XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080
-#define A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00010000
-#define A4XX_RB_DEPTH_CONTROL_FORCE_FRAGZ_TO_FS 0x00020000
-#define A4XX_RB_DEPTH_CONTROL_Z_READ_ENABLE 0x80000000
-
-#define REG_A4XX_RB_DEPTH_CLEAR 0x00002102
-
-#define REG_A4XX_RB_DEPTH_INFO 0x00002103
-#define A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000003
-#define A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
-static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum a4xx_depth_format val)
-{
- return ((val) << A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
-}
-#define A4XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff000
-#define A4XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12
-static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A4XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A4XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
-}
-
-#define REG_A4XX_RB_DEPTH_PITCH 0x00002104
-#define A4XX_RB_DEPTH_PITCH__MASK 0xffffffff
-#define A4XX_RB_DEPTH_PITCH__SHIFT 0
-static inline uint32_t A4XX_RB_DEPTH_PITCH(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A4XX_RB_DEPTH_PITCH__SHIFT) & A4XX_RB_DEPTH_PITCH__MASK;
-}
-
-#define REG_A4XX_RB_DEPTH_PITCH2 0x00002105
-#define A4XX_RB_DEPTH_PITCH2__MASK 0xffffffff
-#define A4XX_RB_DEPTH_PITCH2__SHIFT 0
-static inline uint32_t A4XX_RB_DEPTH_PITCH2(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A4XX_RB_DEPTH_PITCH2__SHIFT) & A4XX_RB_DEPTH_PITCH2__MASK;
-}
-
-#define REG_A4XX_RB_STENCIL_CONTROL 0x00002106
-#define A4XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
-#define A4XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
-#define A4XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
-#define A4XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
-#define A4XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
-static inline uint32_t A4XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
-{
- return ((val) << A4XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A4XX_RB_STENCIL_CONTROL_FUNC__MASK;
-}
-#define A4XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
-#define A4XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
-static inline uint32_t A4XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
-{
- return ((val) << A4XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A4XX_RB_STENCIL_CONTROL_FAIL__MASK;
-}
-#define A4XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
-#define A4XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
-static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
-{
- return ((val) << A4XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZPASS__MASK;
-}
-#define A4XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
-#define A4XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
-static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
-{
- return ((val) << A4XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
-}
-#define A4XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
-#define A4XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
-static inline uint32_t A4XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
-{
- return ((val) << A4XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
-}
-#define A4XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
-#define A4XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
-static inline uint32_t A4XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
-{
- return ((val) << A4XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
-}
-#define A4XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
-#define A4XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
-static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
-{
- return ((val) << A4XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
-}
-#define A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
-#define A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
-static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
-{
- return ((val) << A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
-}
-
-#define REG_A4XX_RB_STENCIL_CONTROL2 0x00002107
-#define A4XX_RB_STENCIL_CONTROL2_STENCIL_BUFFER 0x00000001
-
-#define REG_A4XX_RB_STENCIL_INFO 0x00002108
-#define A4XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
-#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK 0xfffff000
-#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT 12
-static inline uint32_t A4XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK;
-}
-
-#define REG_A4XX_RB_STENCIL_PITCH 0x00002109
-#define A4XX_RB_STENCIL_PITCH__MASK 0xffffffff
-#define A4XX_RB_STENCIL_PITCH__SHIFT 0
-static inline uint32_t A4XX_RB_STENCIL_PITCH(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A4XX_RB_STENCIL_PITCH__SHIFT) & A4XX_RB_STENCIL_PITCH__MASK;
-}
-
-#define REG_A4XX_RB_STENCILREFMASK 0x0000210b
-#define A4XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
-#define A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
-static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
-{
- return ((val) << A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILREF__MASK;
-}
-#define A4XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
-#define A4XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
-static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
-{
- return ((val) << A4XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILMASK__MASK;
-}
-#define A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
-#define A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
-static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
-{
- return ((val) << A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
-}
-
-#define REG_A4XX_RB_STENCILREFMASK_BF 0x0000210c
-#define A4XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
-#define A4XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
-static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
-{
- return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
-}
-#define A4XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
-#define A4XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
-static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
-{
- return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
-}
-#define A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
-#define A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
-static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
-{
- return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
-}
-
-#define REG_A4XX_RB_BIN_OFFSET 0x0000210d
-#define A4XX_RB_BIN_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
-#define A4XX_RB_BIN_OFFSET_X__MASK 0x00007fff
-#define A4XX_RB_BIN_OFFSET_X__SHIFT 0
-static inline uint32_t A4XX_RB_BIN_OFFSET_X(uint32_t val)
-{
- return ((val) << A4XX_RB_BIN_OFFSET_X__SHIFT) & A4XX_RB_BIN_OFFSET_X__MASK;
-}
-#define A4XX_RB_BIN_OFFSET_Y__MASK 0x7fff0000
-#define A4XX_RB_BIN_OFFSET_Y__SHIFT 16
-static inline uint32_t A4XX_RB_BIN_OFFSET_Y(uint32_t val)
-{
- return ((val) << A4XX_RB_BIN_OFFSET_Y__SHIFT) & A4XX_RB_BIN_OFFSET_Y__MASK;
-}
-
-#define REG_A4XX_RB_VPORT_Z_CLAMP(i0) (0x00002120 + 0x2*(i0))
-
-static inline uint32_t REG_A4XX_RB_VPORT_Z_CLAMP_MIN(uint32_t i0) { return 0x00002120 + 0x2*i0; }
-
-static inline uint32_t REG_A4XX_RB_VPORT_Z_CLAMP_MAX(uint32_t i0) { return 0x00002121 + 0x2*i0; }
-
-#define REG_A4XX_RBBM_HW_VERSION 0x00000000
-
-#define REG_A4XX_RBBM_HW_CONFIGURATION 0x00000002
-
-#define REG_A4XX_RBBM_CLOCK_CTL_TP(i0) (0x00000004 + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_TP_REG(uint32_t i0) { return 0x00000004 + 0x1*i0; }
-
-#define REG_A4XX_RBBM_CLOCK_CTL2_TP(i0) (0x00000008 + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_TP_REG(uint32_t i0) { return 0x00000008 + 0x1*i0; }
-
-#define REG_A4XX_RBBM_CLOCK_HYST_TP(i0) (0x0000000c + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_TP_REG(uint32_t i0) { return 0x0000000c + 0x1*i0; }
-
-#define REG_A4XX_RBBM_CLOCK_DELAY_TP(i0) (0x00000010 + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP_REG(uint32_t i0) { return 0x00000010 + 0x1*i0; }
-
-#define REG_A4XX_RBBM_CLOCK_CTL_UCHE 0x00000014
-
-#define REG_A4XX_RBBM_CLOCK_CTL2_UCHE 0x00000015
-
-#define REG_A4XX_RBBM_CLOCK_CTL3_UCHE 0x00000016
-
-#define REG_A4XX_RBBM_CLOCK_CTL4_UCHE 0x00000017
-
-#define REG_A4XX_RBBM_CLOCK_HYST_UCHE 0x00000018
-
-#define REG_A4XX_RBBM_CLOCK_DELAY_UCHE 0x00000019
-
-#define REG_A4XX_RBBM_CLOCK_MODE_GPC 0x0000001a
-
-#define REG_A4XX_RBBM_CLOCK_DELAY_GPC 0x0000001b
-
-#define REG_A4XX_RBBM_CLOCK_HYST_GPC 0x0000001c
-
-#define REG_A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM 0x0000001d
-
-#define REG_A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x0000001e
-
-#define REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x0000001f
-
-#define REG_A4XX_RBBM_CLOCK_CTL 0x00000020
-
-#define REG_A4XX_RBBM_SP_HYST_CNT 0x00000021
-
-#define REG_A4XX_RBBM_SW_RESET_CMD 0x00000022
-
-#define REG_A4XX_RBBM_AHB_CTL0 0x00000023
-
-#define REG_A4XX_RBBM_AHB_CTL1 0x00000024
-
-#define REG_A4XX_RBBM_AHB_CMD 0x00000025
-
-#define REG_A4XX_RBBM_RB_SUB_BLOCK_SEL_CTL 0x00000026
-
-#define REG_A4XX_RBBM_RAM_ACC_63_32 0x00000028
-
-#define REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x0000002b
-
-#define REG_A4XX_RBBM_INTERFACE_HANG_INT_CTL 0x0000002f
-
-#define REG_A4XX_RBBM_INTERFACE_HANG_MASK_CTL4 0x00000034
-
-#define REG_A4XX_RBBM_INT_CLEAR_CMD 0x00000036
-
-#define REG_A4XX_RBBM_INT_0_MASK 0x00000037
-
-#define REG_A4XX_RBBM_RBBM_CTL 0x0000003e
-
-#define REG_A4XX_RBBM_AHB_DEBUG_CTL 0x0000003f
-
-#define REG_A4XX_RBBM_VBIF_DEBUG_CTL 0x00000041
-
-#define REG_A4XX_RBBM_CLOCK_CTL2 0x00000042
-
-#define REG_A4XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045
-
-#define REG_A4XX_RBBM_RESET_CYCLES 0x00000047
-
-#define REG_A4XX_RBBM_EXT_TRACE_BUS_CTL 0x00000049
-
-#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_A 0x0000004a
-
-#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_B 0x0000004b
-
-#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_C 0x0000004c
-
-#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_D 0x0000004d
-
-#define REG_A4XX_RBBM_POWER_CNTL_IP 0x00000098
-#define A4XX_RBBM_POWER_CNTL_IP_SW_COLLAPSE 0x00000001
-#define A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON 0x00100000
-
-#define REG_A4XX_RBBM_PERFCTR_CP_0_LO 0x0000009c
-
-#define REG_A4XX_RBBM_PERFCTR_CP_0_HI 0x0000009d
-
-#define REG_A4XX_RBBM_PERFCTR_CP_1_LO 0x0000009e
-
-#define REG_A4XX_RBBM_PERFCTR_CP_1_HI 0x0000009f
-
-#define REG_A4XX_RBBM_PERFCTR_CP_2_LO 0x000000a0
-
-#define REG_A4XX_RBBM_PERFCTR_CP_2_HI 0x000000a1
-
-#define REG_A4XX_RBBM_PERFCTR_CP_3_LO 0x000000a2
-
-#define REG_A4XX_RBBM_PERFCTR_CP_3_HI 0x000000a3
-
-#define REG_A4XX_RBBM_PERFCTR_CP_4_LO 0x000000a4
-
-#define REG_A4XX_RBBM_PERFCTR_CP_4_HI 0x000000a5
-
-#define REG_A4XX_RBBM_PERFCTR_CP_5_LO 0x000000a6
-
-#define REG_A4XX_RBBM_PERFCTR_CP_5_HI 0x000000a7
-
-#define REG_A4XX_RBBM_PERFCTR_CP_6_LO 0x000000a8
-
-#define REG_A4XX_RBBM_PERFCTR_CP_6_HI 0x000000a9
-
-#define REG_A4XX_RBBM_PERFCTR_CP_7_LO 0x000000aa
-
-#define REG_A4XX_RBBM_PERFCTR_CP_7_HI 0x000000ab
-
-#define REG_A4XX_RBBM_PERFCTR_RBBM_0_LO 0x000000ac
-
-#define REG_A4XX_RBBM_PERFCTR_RBBM_0_HI 0x000000ad
-
-#define REG_A4XX_RBBM_PERFCTR_RBBM_1_LO 0x000000ae
-
-#define REG_A4XX_RBBM_PERFCTR_RBBM_1_HI 0x000000af
-
-#define REG_A4XX_RBBM_PERFCTR_RBBM_2_LO 0x000000b0
-
-#define REG_A4XX_RBBM_PERFCTR_RBBM_2_HI 0x000000b1
-
-#define REG_A4XX_RBBM_PERFCTR_RBBM_3_LO 0x000000b2
-
-#define REG_A4XX_RBBM_PERFCTR_RBBM_3_HI 0x000000b3
-
-#define REG_A4XX_RBBM_PERFCTR_PC_0_LO 0x000000b4
-
-#define REG_A4XX_RBBM_PERFCTR_PC_0_HI 0x000000b5
-
-#define REG_A4XX_RBBM_PERFCTR_PC_1_LO 0x000000b6
-
-#define REG_A4XX_RBBM_PERFCTR_PC_1_HI 0x000000b7
-
-#define REG_A4XX_RBBM_PERFCTR_PC_2_LO 0x000000b8
-
-#define REG_A4XX_RBBM_PERFCTR_PC_2_HI 0x000000b9
-
-#define REG_A4XX_RBBM_PERFCTR_PC_3_LO 0x000000ba
-
-#define REG_A4XX_RBBM_PERFCTR_PC_3_HI 0x000000bb
-
-#define REG_A4XX_RBBM_PERFCTR_PC_4_LO 0x000000bc
-
-#define REG_A4XX_RBBM_PERFCTR_PC_4_HI 0x000000bd
-
-#define REG_A4XX_RBBM_PERFCTR_PC_5_LO 0x000000be
-
-#define REG_A4XX_RBBM_PERFCTR_PC_5_HI 0x000000bf
-
-#define REG_A4XX_RBBM_PERFCTR_PC_6_LO 0x000000c0
-
-#define REG_A4XX_RBBM_PERFCTR_PC_6_HI 0x000000c1
-
-#define REG_A4XX_RBBM_PERFCTR_PC_7_LO 0x000000c2
-
-#define REG_A4XX_RBBM_PERFCTR_PC_7_HI 0x000000c3
-
-#define REG_A4XX_RBBM_PERFCTR_VFD_0_LO 0x000000c4
-
-#define REG_A4XX_RBBM_PERFCTR_VFD_0_HI 0x000000c5
-
-#define REG_A4XX_RBBM_PERFCTR_VFD_1_LO 0x000000c6
-
-#define REG_A4XX_RBBM_PERFCTR_VFD_1_HI 0x000000c7
-
-#define REG_A4XX_RBBM_PERFCTR_VFD_2_LO 0x000000c8
-
-#define REG_A4XX_RBBM_PERFCTR_VFD_2_HI 0x000000c9
-
-#define REG_A4XX_RBBM_PERFCTR_VFD_3_LO 0x000000ca
-
-#define REG_A4XX_RBBM_PERFCTR_VFD_3_HI 0x000000cb
-
-#define REG_A4XX_RBBM_PERFCTR_VFD_4_LO 0x000000cc
-
-#define REG_A4XX_RBBM_PERFCTR_VFD_4_HI 0x000000cd
-
-#define REG_A4XX_RBBM_PERFCTR_VFD_5_LO 0x000000ce
-
-#define REG_A4XX_RBBM_PERFCTR_VFD_5_HI 0x000000cf
-
-#define REG_A4XX_RBBM_PERFCTR_VFD_6_LO 0x000000d0
-
-#define REG_A4XX_RBBM_PERFCTR_VFD_6_HI 0x000000d1
-
-#define REG_A4XX_RBBM_PERFCTR_VFD_7_LO 0x000000d2
-
-#define REG_A4XX_RBBM_PERFCTR_VFD_7_HI 0x000000d3
-
-#define REG_A4XX_RBBM_PERFCTR_HLSQ_0_LO 0x000000d4
-
-#define REG_A4XX_RBBM_PERFCTR_HLSQ_0_HI 0x000000d5
-
-#define REG_A4XX_RBBM_PERFCTR_HLSQ_1_LO 0x000000d6
-
-#define REG_A4XX_RBBM_PERFCTR_HLSQ_1_HI 0x000000d7
-
-#define REG_A4XX_RBBM_PERFCTR_HLSQ_2_LO 0x000000d8
-
-#define REG_A4XX_RBBM_PERFCTR_HLSQ_2_HI 0x000000d9
-
-#define REG_A4XX_RBBM_PERFCTR_HLSQ_3_LO 0x000000da
-
-#define REG_A4XX_RBBM_PERFCTR_HLSQ_3_HI 0x000000db
-
-#define REG_A4XX_RBBM_PERFCTR_HLSQ_4_LO 0x000000dc
-
-#define REG_A4XX_RBBM_PERFCTR_HLSQ_4_HI 0x000000dd
-
-#define REG_A4XX_RBBM_PERFCTR_HLSQ_5_LO 0x000000de
-
-#define REG_A4XX_RBBM_PERFCTR_HLSQ_5_HI 0x000000df
-
-#define REG_A4XX_RBBM_PERFCTR_HLSQ_6_LO 0x000000e0
-
-#define REG_A4XX_RBBM_PERFCTR_HLSQ_6_HI 0x000000e1
-
-#define REG_A4XX_RBBM_PERFCTR_HLSQ_7_LO 0x000000e2
-
-#define REG_A4XX_RBBM_PERFCTR_HLSQ_7_HI 0x000000e3
-
-#define REG_A4XX_RBBM_PERFCTR_VPC_0_LO 0x000000e4
-
-#define REG_A4XX_RBBM_PERFCTR_VPC_0_HI 0x000000e5
-
-#define REG_A4XX_RBBM_PERFCTR_VPC_1_LO 0x000000e6
-
-#define REG_A4XX_RBBM_PERFCTR_VPC_1_HI 0x000000e7
-
-#define REG_A4XX_RBBM_PERFCTR_VPC_2_LO 0x000000e8
-
-#define REG_A4XX_RBBM_PERFCTR_VPC_2_HI 0x000000e9
-
-#define REG_A4XX_RBBM_PERFCTR_VPC_3_LO 0x000000ea
-
-#define REG_A4XX_RBBM_PERFCTR_VPC_3_HI 0x000000eb
-
-#define REG_A4XX_RBBM_PERFCTR_CCU_0_LO 0x000000ec
-
-#define REG_A4XX_RBBM_PERFCTR_CCU_0_HI 0x000000ed
-
-#define REG_A4XX_RBBM_PERFCTR_CCU_1_LO 0x000000ee
-
-#define REG_A4XX_RBBM_PERFCTR_CCU_1_HI 0x000000ef
-
-#define REG_A4XX_RBBM_PERFCTR_CCU_2_LO 0x000000f0
-
-#define REG_A4XX_RBBM_PERFCTR_CCU_2_HI 0x000000f1
-
-#define REG_A4XX_RBBM_PERFCTR_CCU_3_LO 0x000000f2
-
-#define REG_A4XX_RBBM_PERFCTR_CCU_3_HI 0x000000f3
-
-#define REG_A4XX_RBBM_PERFCTR_TSE_0_LO 0x000000f4
-
-#define REG_A4XX_RBBM_PERFCTR_TSE_0_HI 0x000000f5
-
-#define REG_A4XX_RBBM_PERFCTR_TSE_1_LO 0x000000f6
-
-#define REG_A4XX_RBBM_PERFCTR_TSE_1_HI 0x000000f7
-
-#define REG_A4XX_RBBM_PERFCTR_TSE_2_LO 0x000000f8
-
-#define REG_A4XX_RBBM_PERFCTR_TSE_2_HI 0x000000f9
-
-#define REG_A4XX_RBBM_PERFCTR_TSE_3_LO 0x000000fa
-
-#define REG_A4XX_RBBM_PERFCTR_TSE_3_HI 0x000000fb
-
-#define REG_A4XX_RBBM_PERFCTR_RAS_0_LO 0x000000fc
-
-#define REG_A4XX_RBBM_PERFCTR_RAS_0_HI 0x000000fd
-
-#define REG_A4XX_RBBM_PERFCTR_RAS_1_LO 0x000000fe
-
-#define REG_A4XX_RBBM_PERFCTR_RAS_1_HI 0x000000ff
-
-#define REG_A4XX_RBBM_PERFCTR_RAS_2_LO 0x00000100
-
-#define REG_A4XX_RBBM_PERFCTR_RAS_2_HI 0x00000101
-
-#define REG_A4XX_RBBM_PERFCTR_RAS_3_LO 0x00000102
-
-#define REG_A4XX_RBBM_PERFCTR_RAS_3_HI 0x00000103
-
-#define REG_A4XX_RBBM_PERFCTR_UCHE_0_LO 0x00000104
-
-#define REG_A4XX_RBBM_PERFCTR_UCHE_0_HI 0x00000105
-
-#define REG_A4XX_RBBM_PERFCTR_UCHE_1_LO 0x00000106
-
-#define REG_A4XX_RBBM_PERFCTR_UCHE_1_HI 0x00000107
-
-#define REG_A4XX_RBBM_PERFCTR_UCHE_2_LO 0x00000108
-
-#define REG_A4XX_RBBM_PERFCTR_UCHE_2_HI 0x00000109
-
-#define REG_A4XX_RBBM_PERFCTR_UCHE_3_LO 0x0000010a
-
-#define REG_A4XX_RBBM_PERFCTR_UCHE_3_HI 0x0000010b
-
-#define REG_A4XX_RBBM_PERFCTR_UCHE_4_LO 0x0000010c
-
-#define REG_A4XX_RBBM_PERFCTR_UCHE_4_HI 0x0000010d
-
-#define REG_A4XX_RBBM_PERFCTR_UCHE_5_LO 0x0000010e
-
-#define REG_A4XX_RBBM_PERFCTR_UCHE_5_HI 0x0000010f
-
-#define REG_A4XX_RBBM_PERFCTR_UCHE_6_LO 0x00000110
-
-#define REG_A4XX_RBBM_PERFCTR_UCHE_6_HI 0x00000111
-
-#define REG_A4XX_RBBM_PERFCTR_UCHE_7_LO 0x00000112
-
-#define REG_A4XX_RBBM_PERFCTR_UCHE_7_HI 0x00000113
-
-#define REG_A4XX_RBBM_PERFCTR_TP_0_LO 0x00000114
-
-#define REG_A4XX_RBBM_PERFCTR_TP_0_HI 0x00000115
-
-#define REG_A4XX_RBBM_PERFCTR_TP_1_LO 0x00000116
-
-#define REG_A4XX_RBBM_PERFCTR_TP_1_HI 0x00000117
-
-#define REG_A4XX_RBBM_PERFCTR_TP_2_LO 0x00000118
-
-#define REG_A4XX_RBBM_PERFCTR_TP_2_HI 0x00000119
-
-#define REG_A4XX_RBBM_PERFCTR_TP_3_LO 0x0000011a
-
-#define REG_A4XX_RBBM_PERFCTR_TP_3_HI 0x0000011b
-
-#define REG_A4XX_RBBM_PERFCTR_TP_4_LO 0x0000011c
-
-#define REG_A4XX_RBBM_PERFCTR_TP_4_HI 0x0000011d
-
-#define REG_A4XX_RBBM_PERFCTR_TP_5_LO 0x0000011e
-
-#define REG_A4XX_RBBM_PERFCTR_TP_5_HI 0x0000011f
-
-#define REG_A4XX_RBBM_PERFCTR_TP_6_LO 0x00000120
-
-#define REG_A4XX_RBBM_PERFCTR_TP_6_HI 0x00000121
-
-#define REG_A4XX_RBBM_PERFCTR_TP_7_LO 0x00000122
-
-#define REG_A4XX_RBBM_PERFCTR_TP_7_HI 0x00000123
-
-#define REG_A4XX_RBBM_PERFCTR_SP_0_LO 0x00000124
-
-#define REG_A4XX_RBBM_PERFCTR_SP_0_HI 0x00000125
-
-#define REG_A4XX_RBBM_PERFCTR_SP_1_LO 0x00000126
-
-#define REG_A4XX_RBBM_PERFCTR_SP_1_HI 0x00000127
-
-#define REG_A4XX_RBBM_PERFCTR_SP_2_LO 0x00000128
-
-#define REG_A4XX_RBBM_PERFCTR_SP_2_HI 0x00000129
-
-#define REG_A4XX_RBBM_PERFCTR_SP_3_LO 0x0000012a
-
-#define REG_A4XX_RBBM_PERFCTR_SP_3_HI 0x0000012b
-
-#define REG_A4XX_RBBM_PERFCTR_SP_4_LO 0x0000012c
-
-#define REG_A4XX_RBBM_PERFCTR_SP_4_HI 0x0000012d
-
-#define REG_A4XX_RBBM_PERFCTR_SP_5_LO 0x0000012e
-
-#define REG_A4XX_RBBM_PERFCTR_SP_5_HI 0x0000012f
-
-#define REG_A4XX_RBBM_PERFCTR_SP_6_LO 0x00000130
-
-#define REG_A4XX_RBBM_PERFCTR_SP_6_HI 0x00000131
-
-#define REG_A4XX_RBBM_PERFCTR_SP_7_LO 0x00000132
-
-#define REG_A4XX_RBBM_PERFCTR_SP_7_HI 0x00000133
-
-#define REG_A4XX_RBBM_PERFCTR_SP_8_LO 0x00000134
-
-#define REG_A4XX_RBBM_PERFCTR_SP_8_HI 0x00000135
-
-#define REG_A4XX_RBBM_PERFCTR_SP_9_LO 0x00000136
-
-#define REG_A4XX_RBBM_PERFCTR_SP_9_HI 0x00000137
-
-#define REG_A4XX_RBBM_PERFCTR_SP_10_LO 0x00000138
-
-#define REG_A4XX_RBBM_PERFCTR_SP_10_HI 0x00000139
-
-#define REG_A4XX_RBBM_PERFCTR_SP_11_LO 0x0000013a
-
-#define REG_A4XX_RBBM_PERFCTR_SP_11_HI 0x0000013b
-
-#define REG_A4XX_RBBM_PERFCTR_RB_0_LO 0x0000013c
-
-#define REG_A4XX_RBBM_PERFCTR_RB_0_HI 0x0000013d
-
-#define REG_A4XX_RBBM_PERFCTR_RB_1_LO 0x0000013e
-
-#define REG_A4XX_RBBM_PERFCTR_RB_1_HI 0x0000013f
-
-#define REG_A4XX_RBBM_PERFCTR_RB_2_LO 0x00000140
-
-#define REG_A4XX_RBBM_PERFCTR_RB_2_HI 0x00000141
-
-#define REG_A4XX_RBBM_PERFCTR_RB_3_LO 0x00000142
-
-#define REG_A4XX_RBBM_PERFCTR_RB_3_HI 0x00000143
-
-#define REG_A4XX_RBBM_PERFCTR_RB_4_LO 0x00000144
-
-#define REG_A4XX_RBBM_PERFCTR_RB_4_HI 0x00000145
-
-#define REG_A4XX_RBBM_PERFCTR_RB_5_LO 0x00000146
-
-#define REG_A4XX_RBBM_PERFCTR_RB_5_HI 0x00000147
-
-#define REG_A4XX_RBBM_PERFCTR_RB_6_LO 0x00000148
-
-#define REG_A4XX_RBBM_PERFCTR_RB_6_HI 0x00000149
-
-#define REG_A4XX_RBBM_PERFCTR_RB_7_LO 0x0000014a
-
-#define REG_A4XX_RBBM_PERFCTR_RB_7_HI 0x0000014b
-
-#define REG_A4XX_RBBM_PERFCTR_VSC_0_LO 0x0000014c
-
-#define REG_A4XX_RBBM_PERFCTR_VSC_0_HI 0x0000014d
-
-#define REG_A4XX_RBBM_PERFCTR_VSC_1_LO 0x0000014e
-
-#define REG_A4XX_RBBM_PERFCTR_VSC_1_HI 0x0000014f
-
-#define REG_A4XX_RBBM_PERFCTR_PWR_0_LO 0x00000166
-
-#define REG_A4XX_RBBM_PERFCTR_PWR_0_HI 0x00000167
-
-#define REG_A4XX_RBBM_PERFCTR_PWR_1_LO 0x00000168
-
-#define REG_A4XX_RBBM_PERFCTR_PWR_1_HI 0x00000169
-
-#define REG_A4XX_RBBM_ALWAYSON_COUNTER_LO 0x0000016e
-
-#define REG_A4XX_RBBM_ALWAYSON_COUNTER_HI 0x0000016f
-
-#define REG_A4XX_RBBM_CLOCK_CTL_SP(i0) (0x00000068 + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP_REG(uint32_t i0) { return 0x00000068 + 0x1*i0; }
-
-#define REG_A4XX_RBBM_CLOCK_CTL2_SP(i0) (0x0000006c + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_SP_REG(uint32_t i0) { return 0x0000006c + 0x1*i0; }
-
-#define REG_A4XX_RBBM_CLOCK_HYST_SP(i0) (0x00000070 + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_SP_REG(uint32_t i0) { return 0x00000070 + 0x1*i0; }
-
-#define REG_A4XX_RBBM_CLOCK_DELAY_SP(i0) (0x00000074 + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_SP_REG(uint32_t i0) { return 0x00000074 + 0x1*i0; }
-
-#define REG_A4XX_RBBM_CLOCK_CTL_RB(i0) (0x00000078 + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_RB_REG(uint32_t i0) { return 0x00000078 + 0x1*i0; }
-
-#define REG_A4XX_RBBM_CLOCK_CTL2_RB(i0) (0x0000007c + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_RB_REG(uint32_t i0) { return 0x0000007c + 0x1*i0; }
-
-#define REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i0) (0x00000082 + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU_REG(uint32_t i0) { return 0x00000082 + 0x1*i0; }
-
-#define REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i0) (0x00000086 + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU_REG(uint32_t i0) { return 0x00000086 + 0x1*i0; }
-
-#define REG_A4XX_RBBM_CLOCK_HYST_COM_DCOM 0x00000080
-
-#define REG_A4XX_RBBM_CLOCK_CTL_COM_DCOM 0x00000081
-
-#define REG_A4XX_RBBM_CLOCK_CTL_HLSQ 0x0000008a
-
-#define REG_A4XX_RBBM_CLOCK_HYST_HLSQ 0x0000008b
-
-#define REG_A4XX_RBBM_CLOCK_DELAY_HLSQ 0x0000008c
-
-#define REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM 0x0000008d
-
-#define REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i0) (0x0000008e + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0) { return 0x0000008e + 0x1*i0; }
-
-#define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0 0x00000099
-
-#define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1 0x0000009a
-
-#define REG_A4XX_RBBM_PERFCTR_CTL 0x00000170
-
-#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD0 0x00000171
-
-#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD1 0x00000172
-
-#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD2 0x00000173
-
-#define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000174
-
-#define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000175
-
-#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_0 0x00000176
-
-#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_1 0x00000177
-
-#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_2 0x00000178
-
-#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_3 0x00000179
-
-#define REG_A4XX_RBBM_GPU_BUSY_MASKED 0x0000017a
-
-#define REG_A4XX_RBBM_INT_0_STATUS 0x0000017d
-
-#define REG_A4XX_RBBM_CLOCK_STATUS 0x00000182
-
-#define REG_A4XX_RBBM_AHB_STATUS 0x00000189
-
-#define REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS 0x0000018c
-
-#define REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS 0x0000018d
-
-#define REG_A4XX_RBBM_AHB_ERROR_STATUS 0x0000018f
-
-#define REG_A4XX_RBBM_STATUS 0x00000191
-#define A4XX_RBBM_STATUS_HI_BUSY 0x00000001
-#define A4XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
-#define A4XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
-#define A4XX_RBBM_STATUS_CP_NRT_BUSY 0x00004000
-#define A4XX_RBBM_STATUS_VBIF_BUSY 0x00008000
-#define A4XX_RBBM_STATUS_TSE_BUSY 0x00010000
-#define A4XX_RBBM_STATUS_RAS_BUSY 0x00020000
-#define A4XX_RBBM_STATUS_RB_BUSY 0x00040000
-#define A4XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
-#define A4XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
-#define A4XX_RBBM_STATUS_VFD_BUSY 0x00200000
-#define A4XX_RBBM_STATUS_VPC_BUSY 0x00400000
-#define A4XX_RBBM_STATUS_UCHE_BUSY 0x00800000
-#define A4XX_RBBM_STATUS_SP_BUSY 0x01000000
-#define A4XX_RBBM_STATUS_TPL1_BUSY 0x02000000
-#define A4XX_RBBM_STATUS_MARB_BUSY 0x04000000
-#define A4XX_RBBM_STATUS_VSC_BUSY 0x08000000
-#define A4XX_RBBM_STATUS_ARB_BUSY 0x10000000
-#define A4XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
-#define A4XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000
-#define A4XX_RBBM_STATUS_GPU_BUSY 0x80000000
-
-#define REG_A4XX_RBBM_INTERFACE_RRDY_STATUS5 0x0000019f
-
-#define REG_A4XX_RBBM_POWER_STATUS 0x000001b0
-#define A4XX_RBBM_POWER_STATUS_SP_TP_PWR_ON 0x00100000
-
-#define REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2 0x000001b8
-
-#define REG_A4XX_CP_SCRATCH_UMASK 0x00000228
-
-#define REG_A4XX_CP_SCRATCH_ADDR 0x00000229
-
-#define REG_A4XX_CP_RB_BASE 0x00000200
-
-#define REG_A4XX_CP_RB_CNTL 0x00000201
-
-#define REG_A4XX_CP_RB_WPTR 0x00000205
-
-#define REG_A4XX_CP_RB_RPTR_ADDR 0x00000203
-
-#define REG_A4XX_CP_RB_RPTR 0x00000204
-
-#define REG_A4XX_CP_IB1_BASE 0x00000206
-
-#define REG_A4XX_CP_IB1_BUFSZ 0x00000207
-
-#define REG_A4XX_CP_IB2_BASE 0x00000208
-
-#define REG_A4XX_CP_IB2_BUFSZ 0x00000209
-
-#define REG_A4XX_CP_ME_NRT_ADDR 0x0000020c
-
-#define REG_A4XX_CP_ME_NRT_DATA 0x0000020d
-
-#define REG_A4XX_CP_ME_RB_DONE_DATA 0x00000217
-
-#define REG_A4XX_CP_QUEUE_THRESH2 0x00000219
-
-#define REG_A4XX_CP_MERCIU_SIZE 0x0000021b
-
-#define REG_A4XX_CP_ROQ_ADDR 0x0000021c
-
-#define REG_A4XX_CP_ROQ_DATA 0x0000021d
-
-#define REG_A4XX_CP_MEQ_ADDR 0x0000021e
-
-#define REG_A4XX_CP_MEQ_DATA 0x0000021f
-
-#define REG_A4XX_CP_MERCIU_ADDR 0x00000220
-
-#define REG_A4XX_CP_MERCIU_DATA 0x00000221
-
-#define REG_A4XX_CP_MERCIU_DATA2 0x00000222
-
-#define REG_A4XX_CP_PFP_UCODE_ADDR 0x00000223
-
-#define REG_A4XX_CP_PFP_UCODE_DATA 0x00000224
-
-#define REG_A4XX_CP_ME_RAM_WADDR 0x00000225
-
-#define REG_A4XX_CP_ME_RAM_RADDR 0x00000226
-
-#define REG_A4XX_CP_ME_RAM_DATA 0x00000227
-
-#define REG_A4XX_CP_PREEMPT 0x0000022a
-
-#define REG_A4XX_CP_CNTL 0x0000022c
-
-#define REG_A4XX_CP_ME_CNTL 0x0000022d
-
-#define REG_A4XX_CP_DEBUG 0x0000022e
-
-#define REG_A4XX_CP_DEBUG_ECO_CONTROL 0x00000231
-
-#define REG_A4XX_CP_DRAW_STATE_ADDR 0x00000232
-
-#define REG_A4XX_CP_PROTECT(i0) (0x00000240 + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240 + 0x1*i0; }
-#define A4XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff
-#define A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
-static inline uint32_t A4XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
-{
- return ((val) << A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A4XX_CP_PROTECT_REG_BASE_ADDR__MASK;
-}
-#define A4XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000
-#define A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24
-static inline uint32_t A4XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
-{
- return ((val) << A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A4XX_CP_PROTECT_REG_MASK_LEN__MASK;
-}
-#define A4XX_CP_PROTECT_REG_TRAP_WRITE 0x20000000
-#define A4XX_CP_PROTECT_REG_TRAP_READ 0x40000000
-
-#define REG_A4XX_CP_PROTECT_CTRL 0x00000250
-
-#define REG_A4XX_CP_ST_BASE 0x000004c0
-
-#define REG_A4XX_CP_STQ_AVAIL 0x000004ce
-
-#define REG_A4XX_CP_MERCIU_STAT 0x000004d0
-
-#define REG_A4XX_CP_WFI_PEND_CTR 0x000004d2
-
-#define REG_A4XX_CP_HW_FAULT 0x000004d8
-
-#define REG_A4XX_CP_PROTECT_STATUS 0x000004da
-
-#define REG_A4XX_CP_EVENTS_IN_FLIGHT 0x000004dd
-
-#define REG_A4XX_CP_PERFCTR_CP_SEL_0 0x00000500
-
-#define REG_A4XX_CP_PERFCTR_CP_SEL_1 0x00000501
-
-#define REG_A4XX_CP_PERFCTR_CP_SEL_2 0x00000502
-
-#define REG_A4XX_CP_PERFCTR_CP_SEL_3 0x00000503
-
-#define REG_A4XX_CP_PERFCTR_CP_SEL_4 0x00000504
-
-#define REG_A4XX_CP_PERFCTR_CP_SEL_5 0x00000505
-
-#define REG_A4XX_CP_PERFCTR_CP_SEL_6 0x00000506
-
-#define REG_A4XX_CP_PERFCTR_CP_SEL_7 0x00000507
-
-#define REG_A4XX_CP_PERFCOMBINER_SELECT 0x0000050b
-
-#define REG_A4XX_CP_SCRATCH(i0) (0x00000578 + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000578 + 0x1*i0; }
-
-#define REG_A4XX_SP_VS_STATUS 0x00000ec0
-
-#define REG_A4XX_SP_MODE_CONTROL 0x00000ec3
-
-#define REG_A4XX_SP_PERFCTR_SP_SEL_0 0x00000ec4
-
-#define REG_A4XX_SP_PERFCTR_SP_SEL_1 0x00000ec5
-
-#define REG_A4XX_SP_PERFCTR_SP_SEL_2 0x00000ec6
-
-#define REG_A4XX_SP_PERFCTR_SP_SEL_3 0x00000ec7
-
-#define REG_A4XX_SP_PERFCTR_SP_SEL_4 0x00000ec8
-
-#define REG_A4XX_SP_PERFCTR_SP_SEL_5 0x00000ec9
-
-#define REG_A4XX_SP_PERFCTR_SP_SEL_6 0x00000eca
-
-#define REG_A4XX_SP_PERFCTR_SP_SEL_7 0x00000ecb
-
-#define REG_A4XX_SP_PERFCTR_SP_SEL_8 0x00000ecc
-
-#define REG_A4XX_SP_PERFCTR_SP_SEL_9 0x00000ecd
-
-#define REG_A4XX_SP_PERFCTR_SP_SEL_10 0x00000ece
-
-#define REG_A4XX_SP_PERFCTR_SP_SEL_11 0x00000ecf
-
-#define REG_A4XX_SP_SP_CTRL_REG 0x000022c0
-#define A4XX_SP_SP_CTRL_REG_BINNING_PASS 0x00080000
-
-#define REG_A4XX_SP_INSTR_CACHE_CTRL 0x000022c1
-#define A4XX_SP_INSTR_CACHE_CTRL_VS_BUFFER 0x00000080
-#define A4XX_SP_INSTR_CACHE_CTRL_FS_BUFFER 0x00000100
-#define A4XX_SP_INSTR_CACHE_CTRL_INSTR_BUFFER 0x00000400
-
-#define REG_A4XX_SP_VS_CTRL_REG0 0x000022c4
-#define A4XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001
-#define A4XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0
-static inline uint32_t A4XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
-{
- return ((val) << A4XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A4XX_SP_VS_CTRL_REG0_THREADMODE__MASK;
-}
-#define A4XX_SP_VS_CTRL_REG0_VARYING 0x00000002
-#define A4XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004
-#define A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
-#define A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
-static inline uint32_t A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
-}
-#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
-#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
-static inline uint32_t A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
-}
-#define A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
-#define A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
-static inline uint32_t A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
-{
- return ((val) << A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK;
-}
-#define A4XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
-#define A4XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
-static inline uint32_t A4XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
-{
- return ((val) << A4XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A4XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
-}
-#define A4XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000
-#define A4XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000
-
-#define REG_A4XX_SP_VS_CTRL_REG1 0x000022c5
-#define A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK 0x000000ff
-#define A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT 0
-static inline uint32_t A4XX_SP_VS_CTRL_REG1_CONSTLENGTH(uint32_t val)
-{
- return ((val) << A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT) & A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK;
-}
-#define A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x7f000000
-#define A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 24
-static inline uint32_t A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
-{
- return ((val) << A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK;
-}
-
-#define REG_A4XX_SP_VS_PARAM_REG 0x000022c6
-#define A4XX_SP_VS_PARAM_REG_POSREGID__MASK 0x000000ff
-#define A4XX_SP_VS_PARAM_REG_POSREGID__SHIFT 0
-static inline uint32_t A4XX_SP_VS_PARAM_REG_POSREGID(uint32_t val)
-{
- return ((val) << A4XX_SP_VS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_VS_PARAM_REG_POSREGID__MASK;
-}
-#define A4XX_SP_VS_PARAM_REG_PSIZEREGID__MASK 0x0000ff00
-#define A4XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT 8
-static inline uint32_t A4XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val)
-{
- return ((val) << A4XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A4XX_SP_VS_PARAM_REG_PSIZEREGID__MASK;
-}
-#define A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0xfff00000
-#define A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20
-static inline uint32_t A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
-{
- return ((val) << A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK;
-}
-
-#define REG_A4XX_SP_VS_OUT(i0) (0x000022c7 + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
-#define A4XX_SP_VS_OUT_REG_A_REGID__MASK 0x000001ff
-#define A4XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
-static inline uint32_t A4XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
-{
- return ((val) << A4XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_VS_OUT_REG_A_REGID__MASK;
-}
-#define A4XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00
-#define A4XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9
-static inline uint32_t A4XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
-{
- return ((val) << A4XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
-}
-#define A4XX_SP_VS_OUT_REG_B_REGID__MASK 0x01ff0000
-#define A4XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
-static inline uint32_t A4XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
-{
- return ((val) << A4XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_VS_OUT_REG_B_REGID__MASK;
-}
-#define A4XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000
-#define A4XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25
-static inline uint32_t A4XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
-{
- return ((val) << A4XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
-}
-
-#define REG_A4XX_SP_VS_VPC_DST(i0) (0x000022d8 + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d8 + 0x1*i0; }
-#define A4XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
-#define A4XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
-static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
-{
- return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
-}
-#define A4XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
-#define A4XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
-static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
-{
- return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
-}
-#define A4XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
-#define A4XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
-static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
-{
- return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
-}
-#define A4XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
-#define A4XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
-static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
-{
- return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
-}
-
-#define REG_A4XX_SP_VS_OBJ_OFFSET_REG 0x000022e0
-#define A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
-#define A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
-static inline uint32_t A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
-{
- return ((val) << A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
-}
-#define A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
-#define A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
-static inline uint32_t A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
-{
- return ((val) << A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
-}
-
-#define REG_A4XX_SP_VS_OBJ_START 0x000022e1
-
-#define REG_A4XX_SP_VS_PVT_MEM_PARAM 0x000022e2
-
-#define REG_A4XX_SP_VS_PVT_MEM_ADDR 0x000022e3
-
-#define REG_A4XX_SP_VS_LENGTH_REG 0x000022e5
-
-#define REG_A4XX_SP_FS_CTRL_REG0 0x000022e8
-#define A4XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001
-#define A4XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0
-static inline uint32_t A4XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
-{
- return ((val) << A4XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A4XX_SP_FS_CTRL_REG0_THREADMODE__MASK;
-}
-#define A4XX_SP_FS_CTRL_REG0_VARYING 0x00000002
-#define A4XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004
-#define A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
-#define A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
-static inline uint32_t A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
-}
-#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
-#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
-static inline uint32_t A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
-}
-#define A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
-#define A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
-static inline uint32_t A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
-{
- return ((val) << A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK;
-}
-#define A4XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
-#define A4XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
-static inline uint32_t A4XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
-{
- return ((val) << A4XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A4XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
-}
-#define A4XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000
-#define A4XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000
-
-#define REG_A4XX_SP_FS_CTRL_REG1 0x000022e9
-#define A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK 0x000000ff
-#define A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT 0
-static inline uint32_t A4XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val)
-{
- return ((val) << A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK;
-}
-#define A4XX_SP_FS_CTRL_REG1_FACENESS 0x00080000
-#define A4XX_SP_FS_CTRL_REG1_VARYING 0x00100000
-#define A4XX_SP_FS_CTRL_REG1_FRAGCOORD 0x00200000
-
-#define REG_A4XX_SP_FS_OBJ_OFFSET_REG 0x000022ea
-#define A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
-#define A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
-static inline uint32_t A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
-{
- return ((val) << A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
-}
-#define A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
-#define A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
-static inline uint32_t A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
-{
- return ((val) << A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
-}
-
-#define REG_A4XX_SP_FS_OBJ_START 0x000022eb
-
-#define REG_A4XX_SP_FS_PVT_MEM_PARAM 0x000022ec
-
-#define REG_A4XX_SP_FS_PVT_MEM_ADDR 0x000022ed
-
-#define REG_A4XX_SP_FS_LENGTH_REG 0x000022ef
-
-#define REG_A4XX_SP_FS_OUTPUT_REG 0x000022f0
-#define A4XX_SP_FS_OUTPUT_REG_MRT__MASK 0x0000000f
-#define A4XX_SP_FS_OUTPUT_REG_MRT__SHIFT 0
-static inline uint32_t A4XX_SP_FS_OUTPUT_REG_MRT(uint32_t val)
-{
- return ((val) << A4XX_SP_FS_OUTPUT_REG_MRT__SHIFT) & A4XX_SP_FS_OUTPUT_REG_MRT__MASK;
-}
-#define A4XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE 0x00000080
-#define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK 0x0000ff00
-#define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT 8
-static inline uint32_t A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID(uint32_t val)
-{
- return ((val) << A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT) & A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK;
-}
-#define A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__MASK 0xff000000
-#define A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__SHIFT 24
-static inline uint32_t A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID(uint32_t val)
-{
- return ((val) << A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__SHIFT) & A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__MASK;
-}
-
-#define REG_A4XX_SP_FS_MRT(i0) (0x000022f1 + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f1 + 0x1*i0; }
-#define A4XX_SP_FS_MRT_REG_REGID__MASK 0x000000ff
-#define A4XX_SP_FS_MRT_REG_REGID__SHIFT 0
-static inline uint32_t A4XX_SP_FS_MRT_REG_REGID(uint32_t val)
-{
- return ((val) << A4XX_SP_FS_MRT_REG_REGID__SHIFT) & A4XX_SP_FS_MRT_REG_REGID__MASK;
-}
-#define A4XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100
-#define A4XX_SP_FS_MRT_REG_COLOR_SINT 0x00000400
-#define A4XX_SP_FS_MRT_REG_COLOR_UINT 0x00000800
-#define A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK 0x0003f000
-#define A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT 12
-static inline uint32_t A4XX_SP_FS_MRT_REG_MRTFORMAT(enum a4xx_color_fmt val)
-{
- return ((val) << A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT) & A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK;
-}
-#define A4XX_SP_FS_MRT_REG_COLOR_SRGB 0x00040000
-
-#define REG_A4XX_SP_CS_CTRL_REG0 0x00002300
-#define A4XX_SP_CS_CTRL_REG0_THREADMODE__MASK 0x00000001
-#define A4XX_SP_CS_CTRL_REG0_THREADMODE__SHIFT 0
-static inline uint32_t A4XX_SP_CS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
-{
- return ((val) << A4XX_SP_CS_CTRL_REG0_THREADMODE__SHIFT) & A4XX_SP_CS_CTRL_REG0_THREADMODE__MASK;
-}
-#define A4XX_SP_CS_CTRL_REG0_VARYING 0x00000002
-#define A4XX_SP_CS_CTRL_REG0_CACHEINVALID 0x00000004
-#define A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
-#define A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
-static inline uint32_t A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
-}
-#define A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
-#define A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
-static inline uint32_t A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
-}
-#define A4XX_SP_CS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
-#define A4XX_SP_CS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
-static inline uint32_t A4XX_SP_CS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
-{
- return ((val) << A4XX_SP_CS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A4XX_SP_CS_CTRL_REG0_INOUTREGOVERLAP__MASK;
-}
-#define A4XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00100000
-#define A4XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 20
-static inline uint32_t A4XX_SP_CS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
-{
- return ((val) << A4XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A4XX_SP_CS_CTRL_REG0_THREADSIZE__MASK;
-}
-#define A4XX_SP_CS_CTRL_REG0_SUPERTHREADMODE 0x00200000
-#define A4XX_SP_CS_CTRL_REG0_PIXLODENABLE 0x00400000
-
-#define REG_A4XX_SP_CS_OBJ_OFFSET_REG 0x00002301
-
-#define REG_A4XX_SP_CS_OBJ_START 0x00002302
-
-#define REG_A4XX_SP_CS_PVT_MEM_PARAM 0x00002303
-
-#define REG_A4XX_SP_CS_PVT_MEM_ADDR 0x00002304
-
-#define REG_A4XX_SP_CS_PVT_MEM_SIZE 0x00002305
-
-#define REG_A4XX_SP_CS_LENGTH_REG 0x00002306
-
-#define REG_A4XX_SP_HS_OBJ_OFFSET_REG 0x0000230d
-#define A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
-#define A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
-static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
-{
- return ((val) << A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
-}
-#define A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
-#define A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
-static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
-{
- return ((val) << A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
-}
-
-#define REG_A4XX_SP_HS_OBJ_START 0x0000230e
-
-#define REG_A4XX_SP_HS_PVT_MEM_PARAM 0x0000230f
-
-#define REG_A4XX_SP_HS_PVT_MEM_ADDR 0x00002310
-
-#define REG_A4XX_SP_HS_LENGTH_REG 0x00002312
-
-#define REG_A4XX_SP_DS_PARAM_REG 0x0000231a
-#define A4XX_SP_DS_PARAM_REG_POSREGID__MASK 0x000000ff
-#define A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT 0
-static inline uint32_t A4XX_SP_DS_PARAM_REG_POSREGID(uint32_t val)
-{
- return ((val) << A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_DS_PARAM_REG_POSREGID__MASK;
-}
-#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK 0xfff00000
-#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT 20
-static inline uint32_t A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR(uint32_t val)
-{
- return ((val) << A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK;
-}
-
-#define REG_A4XX_SP_DS_OUT(i0) (0x0000231b + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_SP_DS_OUT_REG(uint32_t i0) { return 0x0000231b + 0x1*i0; }
-#define A4XX_SP_DS_OUT_REG_A_REGID__MASK 0x000001ff
-#define A4XX_SP_DS_OUT_REG_A_REGID__SHIFT 0
-static inline uint32_t A4XX_SP_DS_OUT_REG_A_REGID(uint32_t val)
-{
- return ((val) << A4XX_SP_DS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_A_REGID__MASK;
-}
-#define A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK 0x00001e00
-#define A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT 9
-static inline uint32_t A4XX_SP_DS_OUT_REG_A_COMPMASK(uint32_t val)
-{
- return ((val) << A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK;
-}
-#define A4XX_SP_DS_OUT_REG_B_REGID__MASK 0x01ff0000
-#define A4XX_SP_DS_OUT_REG_B_REGID__SHIFT 16
-static inline uint32_t A4XX_SP_DS_OUT_REG_B_REGID(uint32_t val)
-{
- return ((val) << A4XX_SP_DS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_B_REGID__MASK;
-}
-#define A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK 0x1e000000
-#define A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT 25
-static inline uint32_t A4XX_SP_DS_OUT_REG_B_COMPMASK(uint32_t val)
-{
- return ((val) << A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK;
-}
-
-#define REG_A4XX_SP_DS_VPC_DST(i0) (0x0000232c + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_SP_DS_VPC_DST_REG(uint32_t i0) { return 0x0000232c + 0x1*i0; }
-#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
-#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT 0
-static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC0(uint32_t val)
-{
- return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK;
-}
-#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
-#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT 8
-static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC1(uint32_t val)
-{
- return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK;
-}
-#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
-#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT 16
-static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC2(uint32_t val)
-{
- return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK;
-}
-#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
-#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT 24
-static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC3(uint32_t val)
-{
- return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK;
-}
-
-#define REG_A4XX_SP_DS_OBJ_OFFSET_REG 0x00002334
-#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
-#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
-static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
-{
- return ((val) << A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
-}
-#define A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
-#define A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
-static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
-{
- return ((val) << A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
-}
-
-#define REG_A4XX_SP_DS_OBJ_START 0x00002335
-
-#define REG_A4XX_SP_DS_PVT_MEM_PARAM 0x00002336
-
-#define REG_A4XX_SP_DS_PVT_MEM_ADDR 0x00002337
-
-#define REG_A4XX_SP_DS_LENGTH_REG 0x00002339
-
-#define REG_A4XX_SP_GS_PARAM_REG 0x00002341
-#define A4XX_SP_GS_PARAM_REG_POSREGID__MASK 0x000000ff
-#define A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT 0
-static inline uint32_t A4XX_SP_GS_PARAM_REG_POSREGID(uint32_t val)
-{
- return ((val) << A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_POSREGID__MASK;
-}
-#define A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK 0x0000ff00
-#define A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT 8
-static inline uint32_t A4XX_SP_GS_PARAM_REG_PRIMREGID(uint32_t val)
-{
- return ((val) << A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK;
-}
-#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK 0xfff00000
-#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT 20
-static inline uint32_t A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR(uint32_t val)
-{
- return ((val) << A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK;
-}
-
-#define REG_A4XX_SP_GS_OUT(i0) (0x00002342 + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_SP_GS_OUT_REG(uint32_t i0) { return 0x00002342 + 0x1*i0; }
-#define A4XX_SP_GS_OUT_REG_A_REGID__MASK 0x000001ff
-#define A4XX_SP_GS_OUT_REG_A_REGID__SHIFT 0
-static inline uint32_t A4XX_SP_GS_OUT_REG_A_REGID(uint32_t val)
-{
- return ((val) << A4XX_SP_GS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_A_REGID__MASK;
-}
-#define A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK 0x00001e00
-#define A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT 9
-static inline uint32_t A4XX_SP_GS_OUT_REG_A_COMPMASK(uint32_t val)
-{
- return ((val) << A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK;
-}
-#define A4XX_SP_GS_OUT_REG_B_REGID__MASK 0x01ff0000
-#define A4XX_SP_GS_OUT_REG_B_REGID__SHIFT 16
-static inline uint32_t A4XX_SP_GS_OUT_REG_B_REGID(uint32_t val)
-{
- return ((val) << A4XX_SP_GS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_B_REGID__MASK;
-}
-#define A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK 0x1e000000
-#define A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT 25
-static inline uint32_t A4XX_SP_GS_OUT_REG_B_COMPMASK(uint32_t val)
-{
- return ((val) << A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK;
-}
-
-#define REG_A4XX_SP_GS_VPC_DST(i0) (0x00002353 + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_SP_GS_VPC_DST_REG(uint32_t i0) { return 0x00002353 + 0x1*i0; }
-#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
-#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT 0
-static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC0(uint32_t val)
-{
- return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK;
-}
-#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
-#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT 8
-static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC1(uint32_t val)
-{
- return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK;
-}
-#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
-#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT 16
-static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC2(uint32_t val)
-{
- return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK;
-}
-#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
-#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT 24
-static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC3(uint32_t val)
-{
- return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK;
-}
-
-#define REG_A4XX_SP_GS_OBJ_OFFSET_REG 0x0000235b
-#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
-#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
-static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
-{
- return ((val) << A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
-}
-#define A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
-#define A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
-static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
-{
- return ((val) << A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
-}
-
-#define REG_A4XX_SP_GS_OBJ_START 0x0000235c
-
-#define REG_A4XX_SP_GS_PVT_MEM_PARAM 0x0000235d
-
-#define REG_A4XX_SP_GS_PVT_MEM_ADDR 0x0000235e
-
-#define REG_A4XX_SP_GS_LENGTH_REG 0x00002360
-
-#define REG_A4XX_VPC_DEBUG_RAM_SEL 0x00000e60
-
-#define REG_A4XX_VPC_DEBUG_RAM_READ 0x00000e61
-
-#define REG_A4XX_VPC_DEBUG_ECO_CONTROL 0x00000e64
-
-#define REG_A4XX_VPC_PERFCTR_VPC_SEL_0 0x00000e65
-
-#define REG_A4XX_VPC_PERFCTR_VPC_SEL_1 0x00000e66
-
-#define REG_A4XX_VPC_PERFCTR_VPC_SEL_2 0x00000e67
-
-#define REG_A4XX_VPC_PERFCTR_VPC_SEL_3 0x00000e68
-
-#define REG_A4XX_VPC_ATTR 0x00002140
-#define A4XX_VPC_ATTR_TOTALATTR__MASK 0x000001ff
-#define A4XX_VPC_ATTR_TOTALATTR__SHIFT 0
-static inline uint32_t A4XX_VPC_ATTR_TOTALATTR(uint32_t val)
-{
- return ((val) << A4XX_VPC_ATTR_TOTALATTR__SHIFT) & A4XX_VPC_ATTR_TOTALATTR__MASK;
-}
-#define A4XX_VPC_ATTR_PSIZE 0x00000200
-#define A4XX_VPC_ATTR_THRDASSIGN__MASK 0x00003000
-#define A4XX_VPC_ATTR_THRDASSIGN__SHIFT 12
-static inline uint32_t A4XX_VPC_ATTR_THRDASSIGN(uint32_t val)
-{
- return ((val) << A4XX_VPC_ATTR_THRDASSIGN__SHIFT) & A4XX_VPC_ATTR_THRDASSIGN__MASK;
-}
-#define A4XX_VPC_ATTR_ENABLE 0x02000000
-
-#define REG_A4XX_VPC_PACK 0x00002141
-#define A4XX_VPC_PACK_NUMBYPASSVAR__MASK 0x000000ff
-#define A4XX_VPC_PACK_NUMBYPASSVAR__SHIFT 0
-static inline uint32_t A4XX_VPC_PACK_NUMBYPASSVAR(uint32_t val)
-{
- return ((val) << A4XX_VPC_PACK_NUMBYPASSVAR__SHIFT) & A4XX_VPC_PACK_NUMBYPASSVAR__MASK;
-}
-#define A4XX_VPC_PACK_NUMFPNONPOSVAR__MASK 0x0000ff00
-#define A4XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT 8
-static inline uint32_t A4XX_VPC_PACK_NUMFPNONPOSVAR(uint32_t val)
-{
- return ((val) << A4XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT) & A4XX_VPC_PACK_NUMFPNONPOSVAR__MASK;
-}
-#define A4XX_VPC_PACK_NUMNONPOSVSVAR__MASK 0x00ff0000
-#define A4XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT 16
-static inline uint32_t A4XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
-{
- return ((val) << A4XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A4XX_VPC_PACK_NUMNONPOSVSVAR__MASK;
-}
-
-#define REG_A4XX_VPC_VARYING_INTERP(i0) (0x00002142 + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002142 + 0x1*i0; }
-
-#define REG_A4XX_VPC_VARYING_PS_REPL(i0) (0x0000214a + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000214a + 0x1*i0; }
-
-#define REG_A4XX_VPC_SO_FLUSH_WADDR_3 0x0000216e
-
-#define REG_A4XX_VSC_BIN_SIZE 0x00000c00
-#define A4XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
-#define A4XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
-static inline uint32_t A4XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A4XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A4XX_VSC_BIN_SIZE_WIDTH__MASK;
-}
-#define A4XX_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
-#define A4XX_VSC_BIN_SIZE_HEIGHT__SHIFT 5
-static inline uint32_t A4XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A4XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A4XX_VSC_BIN_SIZE_HEIGHT__MASK;
-}
-
-#define REG_A4XX_VSC_SIZE_ADDRESS 0x00000c01
-
-#define REG_A4XX_VSC_SIZE_ADDRESS2 0x00000c02
-
-#define REG_A4XX_VSC_DEBUG_ECO_CONTROL 0x00000c03
-
-#define REG_A4XX_VSC_PIPE_CONFIG(i0) (0x00000c08 + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000c08 + 0x1*i0; }
-#define A4XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff
-#define A4XX_VSC_PIPE_CONFIG_REG_X__SHIFT 0
-static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_X(uint32_t val)
-{
- return ((val) << A4XX_VSC_PIPE_CONFIG_REG_X__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_X__MASK;
-}
-#define A4XX_VSC_PIPE_CONFIG_REG_Y__MASK 0x000ffc00
-#define A4XX_VSC_PIPE_CONFIG_REG_Y__SHIFT 10
-static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_Y(uint32_t val)
-{
- return ((val) << A4XX_VSC_PIPE_CONFIG_REG_Y__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_Y__MASK;
-}
-#define A4XX_VSC_PIPE_CONFIG_REG_W__MASK 0x00f00000
-#define A4XX_VSC_PIPE_CONFIG_REG_W__SHIFT 20
-static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_W(uint32_t val)
-{
- return ((val) << A4XX_VSC_PIPE_CONFIG_REG_W__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_W__MASK;
-}
-#define A4XX_VSC_PIPE_CONFIG_REG_H__MASK 0x0f000000
-#define A4XX_VSC_PIPE_CONFIG_REG_H__SHIFT 24
-static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
-{
- return ((val) << A4XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_H__MASK;
-}
-
-#define REG_A4XX_VSC_PIPE_DATA_ADDRESS(i0) (0x00000c10 + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_VSC_PIPE_DATA_ADDRESS_REG(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
-
-#define REG_A4XX_VSC_PIPE_DATA_LENGTH(i0) (0x00000c18 + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH_REG(uint32_t i0) { return 0x00000c18 + 0x1*i0; }
-
-#define REG_A4XX_VSC_PIPE_PARTIAL_POSN_1 0x00000c41
-
-#define REG_A4XX_VSC_PERFCTR_VSC_SEL_0 0x00000c50
-
-#define REG_A4XX_VSC_PERFCTR_VSC_SEL_1 0x00000c51
-
-#define REG_A4XX_VFD_DEBUG_CONTROL 0x00000e40
-
-#define REG_A4XX_VFD_PERFCTR_VFD_SEL_0 0x00000e43
-
-#define REG_A4XX_VFD_PERFCTR_VFD_SEL_1 0x00000e44
-
-#define REG_A4XX_VFD_PERFCTR_VFD_SEL_2 0x00000e45
-
-#define REG_A4XX_VFD_PERFCTR_VFD_SEL_3 0x00000e46
-
-#define REG_A4XX_VFD_PERFCTR_VFD_SEL_4 0x00000e47
-
-#define REG_A4XX_VFD_PERFCTR_VFD_SEL_5 0x00000e48
-
-#define REG_A4XX_VFD_PERFCTR_VFD_SEL_6 0x00000e49
-
-#define REG_A4XX_VFD_PERFCTR_VFD_SEL_7 0x00000e4a
-
-#define REG_A4XX_VGT_CL_INITIATOR 0x000021d0
-
-#define REG_A4XX_VGT_EVENT_INITIATOR 0x000021d9
-
-#define REG_A4XX_VFD_CONTROL_0 0x00002200
-#define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK 0x000000ff
-#define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT 0
-static inline uint32_t A4XX_VFD_CONTROL_0_TOTALATTRTOVS(uint32_t val)
-{
- return ((val) << A4XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT) & A4XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK;
-}
-#define A4XX_VFD_CONTROL_0_BYPASSATTROVS__MASK 0x0001fe00
-#define A4XX_VFD_CONTROL_0_BYPASSATTROVS__SHIFT 9
-static inline uint32_t A4XX_VFD_CONTROL_0_BYPASSATTROVS(uint32_t val)
-{
- return ((val) << A4XX_VFD_CONTROL_0_BYPASSATTROVS__SHIFT) & A4XX_VFD_CONTROL_0_BYPASSATTROVS__MASK;
-}
-#define A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x03f00000
-#define A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 20
-static inline uint32_t A4XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val)
-{
- return ((val) << A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK;
-}
-#define A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK 0xfc000000
-#define A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT 26
-static inline uint32_t A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val)
-{
- return ((val) << A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT) & A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK;
-}
-
-#define REG_A4XX_VFD_CONTROL_1 0x00002201
-#define A4XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000ffff
-#define A4XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0
-static inline uint32_t A4XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val)
-{
- return ((val) << A4XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A4XX_VFD_CONTROL_1_MAXSTORAGE__MASK;
-}
-#define A4XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
-#define A4XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
-static inline uint32_t A4XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
-{
- return ((val) << A4XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A4XX_VFD_CONTROL_1_REGID4VTX__MASK;
-}
-#define A4XX_VFD_CONTROL_1_REGID4INST__MASK 0xff000000
-#define A4XX_VFD_CONTROL_1_REGID4INST__SHIFT 24
-static inline uint32_t A4XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
-{
- return ((val) << A4XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A4XX_VFD_CONTROL_1_REGID4INST__MASK;
-}
-
-#define REG_A4XX_VFD_CONTROL_2 0x00002202
-
-#define REG_A4XX_VFD_CONTROL_3 0x00002203
-#define A4XX_VFD_CONTROL_3_REGID_VTXCNT__MASK 0x0000ff00
-#define A4XX_VFD_CONTROL_3_REGID_VTXCNT__SHIFT 8
-static inline uint32_t A4XX_VFD_CONTROL_3_REGID_VTXCNT(uint32_t val)
-{
- return ((val) << A4XX_VFD_CONTROL_3_REGID_VTXCNT__SHIFT) & A4XX_VFD_CONTROL_3_REGID_VTXCNT__MASK;
-}
-#define A4XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000
-#define A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16
-static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val)
-{
- return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSX__MASK;
-}
-#define A4XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000
-#define A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24
-static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
-{
- return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSY__MASK;
-}
-
-#define REG_A4XX_VFD_CONTROL_4 0x00002204
-
-#define REG_A4XX_VFD_INDEX_OFFSET 0x00002208
-
-#define REG_A4XX_VFD_FETCH(i0) (0x0000220a + 0x4*(i0))
-
-static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x0000220a + 0x4*i0; }
-#define A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK 0x0000007f
-#define A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT 0
-static inline uint32_t A4XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val)
-{
- return ((val) << A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK;
-}
-#define A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0001ff80
-#define A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT 7
-static inline uint32_t A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val)
-{
- return ((val) << A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK;
-}
-#define A4XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00080000
-#define A4XX_VFD_FETCH_INSTR_0_INSTANCED 0x00100000
-
-static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x0000220b + 0x4*i0; }
-
-static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_2(uint32_t i0) { return 0x0000220c + 0x4*i0; }
-#define A4XX_VFD_FETCH_INSTR_2_SIZE__MASK 0xffffffff
-#define A4XX_VFD_FETCH_INSTR_2_SIZE__SHIFT 0
-static inline uint32_t A4XX_VFD_FETCH_INSTR_2_SIZE(uint32_t val)
-{
- return ((val) << A4XX_VFD_FETCH_INSTR_2_SIZE__SHIFT) & A4XX_VFD_FETCH_INSTR_2_SIZE__MASK;
-}
-
-static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_3(uint32_t i0) { return 0x0000220d + 0x4*i0; }
-#define A4XX_VFD_FETCH_INSTR_3_STEPRATE__MASK 0x000001ff
-#define A4XX_VFD_FETCH_INSTR_3_STEPRATE__SHIFT 0
-static inline uint32_t A4XX_VFD_FETCH_INSTR_3_STEPRATE(uint32_t val)
-{
- return ((val) << A4XX_VFD_FETCH_INSTR_3_STEPRATE__SHIFT) & A4XX_VFD_FETCH_INSTR_3_STEPRATE__MASK;
-}
-
-#define REG_A4XX_VFD_DECODE(i0) (0x0000228a + 0x1*(i0))
-
-static inline uint32_t REG_A4XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000228a + 0x1*i0; }
-#define A4XX_VFD_DECODE_INSTR_WRITEMASK__MASK 0x0000000f
-#define A4XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT 0
-static inline uint32_t A4XX_VFD_DECODE_INSTR_WRITEMASK(uint32_t val)
-{
- return ((val) << A4XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT) & A4XX_VFD_DECODE_INSTR_WRITEMASK__MASK;
-}
-#define A4XX_VFD_DECODE_INSTR_CONSTFILL 0x00000010
-#define A4XX_VFD_DECODE_INSTR_FORMAT__MASK 0x00000fc0
-#define A4XX_VFD_DECODE_INSTR_FORMAT__SHIFT 6
-static inline uint32_t A4XX_VFD_DECODE_INSTR_FORMAT(enum a4xx_vtx_fmt val)
-{
- return ((val) << A4XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A4XX_VFD_DECODE_INSTR_FORMAT__MASK;
-}
-#define A4XX_VFD_DECODE_INSTR_REGID__MASK 0x000ff000
-#define A4XX_VFD_DECODE_INSTR_REGID__SHIFT 12
-static inline uint32_t A4XX_VFD_DECODE_INSTR_REGID(uint32_t val)
-{
- return ((val) << A4XX_VFD_DECODE_INSTR_REGID__SHIFT) & A4XX_VFD_DECODE_INSTR_REGID__MASK;
-}
-#define A4XX_VFD_DECODE_INSTR_INT 0x00100000
-#define A4XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000
-#define A4XX_VFD_DECODE_INSTR_SWAP__SHIFT 22
-static inline uint32_t A4XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
-{
- return ((val) << A4XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A4XX_VFD_DECODE_INSTR_SWAP__MASK;
-}
-#define A4XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000
-#define A4XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24
-static inline uint32_t A4XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
-{
- return ((val) << A4XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT) & A4XX_VFD_DECODE_INSTR_SHIFTCNT__MASK;
-}
-#define A4XX_VFD_DECODE_INSTR_LASTCOMPVALID 0x20000000
-#define A4XX_VFD_DECODE_INSTR_SWITCHNEXT 0x40000000
-
-#define REG_A4XX_TPL1_DEBUG_ECO_CONTROL 0x00000f00
-
-#define REG_A4XX_TPL1_TP_MODE_CONTROL 0x00000f03
-
-#define REG_A4XX_TPL1_PERFCTR_TP_SEL_0 0x00000f04
-
-#define REG_A4XX_TPL1_PERFCTR_TP_SEL_1 0x00000f05
-
-#define REG_A4XX_TPL1_PERFCTR_TP_SEL_2 0x00000f06
-
-#define REG_A4XX_TPL1_PERFCTR_TP_SEL_3 0x00000f07
-
-#define REG_A4XX_TPL1_PERFCTR_TP_SEL_4 0x00000f08
-
-#define REG_A4XX_TPL1_PERFCTR_TP_SEL_5 0x00000f09
-
-#define REG_A4XX_TPL1_PERFCTR_TP_SEL_6 0x00000f0a
-
-#define REG_A4XX_TPL1_PERFCTR_TP_SEL_7 0x00000f0b
-
-#define REG_A4XX_TPL1_TP_TEX_OFFSET 0x00002380
-
-#define REG_A4XX_TPL1_TP_TEX_COUNT 0x00002381
-#define A4XX_TPL1_TP_TEX_COUNT_VS__MASK 0x000000ff
-#define A4XX_TPL1_TP_TEX_COUNT_VS__SHIFT 0
-static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_VS(uint32_t val)
-{
- return ((val) << A4XX_TPL1_TP_TEX_COUNT_VS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_VS__MASK;
-}
-#define A4XX_TPL1_TP_TEX_COUNT_HS__MASK 0x0000ff00
-#define A4XX_TPL1_TP_TEX_COUNT_HS__SHIFT 8
-static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_HS(uint32_t val)
-{
- return ((val) << A4XX_TPL1_TP_TEX_COUNT_HS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_HS__MASK;
-}
-#define A4XX_TPL1_TP_TEX_COUNT_DS__MASK 0x00ff0000
-#define A4XX_TPL1_TP_TEX_COUNT_DS__SHIFT 16
-static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_DS(uint32_t val)
-{
- return ((val) << A4XX_TPL1_TP_TEX_COUNT_DS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_DS__MASK;
-}
-#define A4XX_TPL1_TP_TEX_COUNT_GS__MASK 0xff000000
-#define A4XX_TPL1_TP_TEX_COUNT_GS__SHIFT 24
-static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_GS(uint32_t val)
-{
- return ((val) << A4XX_TPL1_TP_TEX_COUNT_GS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_GS__MASK;
-}
-
-#define REG_A4XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR 0x00002384
-
-#define REG_A4XX_TPL1_TP_HS_BORDER_COLOR_BASE_ADDR 0x00002387
-
-#define REG_A4XX_TPL1_TP_DS_BORDER_COLOR_BASE_ADDR 0x0000238a
-
-#define REG_A4XX_TPL1_TP_GS_BORDER_COLOR_BASE_ADDR 0x0000238d
-
-#define REG_A4XX_TPL1_TP_FS_TEX_COUNT 0x000023a0
-#define A4XX_TPL1_TP_FS_TEX_COUNT_FS__MASK 0x000000ff
-#define A4XX_TPL1_TP_FS_TEX_COUNT_FS__SHIFT 0
-static inline uint32_t A4XX_TPL1_TP_FS_TEX_COUNT_FS(uint32_t val)
-{
- return ((val) << A4XX_TPL1_TP_FS_TEX_COUNT_FS__SHIFT) & A4XX_TPL1_TP_FS_TEX_COUNT_FS__MASK;
-}
-#define A4XX_TPL1_TP_FS_TEX_COUNT_CS__MASK 0x0000ff00
-#define A4XX_TPL1_TP_FS_TEX_COUNT_CS__SHIFT 8
-static inline uint32_t A4XX_TPL1_TP_FS_TEX_COUNT_CS(uint32_t val)
-{
- return ((val) << A4XX_TPL1_TP_FS_TEX_COUNT_CS__SHIFT) & A4XX_TPL1_TP_FS_TEX_COUNT_CS__MASK;
-}
-
-#define REG_A4XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x000023a1
-
-#define REG_A4XX_TPL1_TP_CS_BORDER_COLOR_BASE_ADDR 0x000023a4
-
-#define REG_A4XX_TPL1_TP_CS_SAMPLER_BASE_ADDR 0x000023a5
-
-#define REG_A4XX_TPL1_TP_CS_TEXMEMOBJ_BASE_ADDR 0x000023a6
-
-#define REG_A4XX_GRAS_TSE_STATUS 0x00000c80
-
-#define REG_A4XX_GRAS_DEBUG_ECO_CONTROL 0x00000c81
-
-#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c88
-
-#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_1 0x00000c89
-
-#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_2 0x00000c8a
-
-#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c8b
-
-#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_0 0x00000c8c
-
-#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_1 0x00000c8d
-
-#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_2 0x00000c8e
-
-#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_3 0x00000c8f
-
-#define REG_A4XX_GRAS_CL_CLIP_CNTL 0x00002000
-#define A4XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00008000
-#define A4XX_GRAS_CL_CLIP_CNTL_ZNEAR_CLIP_DISABLE 0x00010000
-#define A4XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000
-#define A4XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z 0x00400000
-
-#define REG_A4XX_GRAS_CNTL 0x00002003
-#define A4XX_GRAS_CNTL_IJ_PERSP 0x00000001
-#define A4XX_GRAS_CNTL_IJ_LINEAR 0x00000002
-
-#define REG_A4XX_GRAS_CL_GB_CLIP_ADJ 0x00002004
-#define A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff
-#define A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT 0
-static inline uint32_t A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ(uint32_t val)
-{
- return ((val) << A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT) & A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK;
-}
-#define A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK 0x000ffc00
-#define A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT 10
-static inline uint32_t A4XX_GRAS_CL_GB_CLIP_ADJ_VERT(uint32_t val)
-{
- return ((val) << A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT) & A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK;
-}
-
-#define REG_A4XX_GRAS_CL_VPORT_XOFFSET_0 0x00002008
-#define A4XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff
-#define A4XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0
-static inline uint32_t A4XX_GRAS_CL_VPORT_XOFFSET_0(float val)
-{
- return ((fui(val)) << A4XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
-}
-
-#define REG_A4XX_GRAS_CL_VPORT_XSCALE_0 0x00002009
-#define A4XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff
-#define A4XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0
-static inline uint32_t A4XX_GRAS_CL_VPORT_XSCALE_0(float val)
-{
- return ((fui(val)) << A4XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_XSCALE_0__MASK;
-}
-
-#define REG_A4XX_GRAS_CL_VPORT_YOFFSET_0 0x0000200a
-#define A4XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff
-#define A4XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0
-static inline uint32_t A4XX_GRAS_CL_VPORT_YOFFSET_0(float val)
-{
- return ((fui(val)) << A4XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
-}
-
-#define REG_A4XX_GRAS_CL_VPORT_YSCALE_0 0x0000200b
-#define A4XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff
-#define A4XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0
-static inline uint32_t A4XX_GRAS_CL_VPORT_YSCALE_0(float val)
-{
- return ((fui(val)) << A4XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_YSCALE_0__MASK;
-}
-
-#define REG_A4XX_GRAS_CL_VPORT_ZOFFSET_0 0x0000200c
-#define A4XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff
-#define A4XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0
-static inline uint32_t A4XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
-{
- return ((fui(val)) << A4XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
-}
-
-#define REG_A4XX_GRAS_CL_VPORT_ZSCALE_0 0x0000200d
-#define A4XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff
-#define A4XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0
-static inline uint32_t A4XX_GRAS_CL_VPORT_ZSCALE_0(float val)
-{
- return ((fui(val)) << A4XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
-}
-
-#define REG_A4XX_GRAS_SU_POINT_MINMAX 0x00002070
-#define A4XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
-#define A4XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
-static inline uint32_t A4XX_GRAS_SU_POINT_MINMAX_MIN(float val)
-{
- return ((((uint32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A4XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
-}
-#define A4XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
-#define A4XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
-static inline uint32_t A4XX_GRAS_SU_POINT_MINMAX_MAX(float val)
-{
- return ((((uint32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A4XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
-}
-
-#define REG_A4XX_GRAS_SU_POINT_SIZE 0x00002071
-#define A4XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
-#define A4XX_GRAS_SU_POINT_SIZE__SHIFT 0
-static inline uint32_t A4XX_GRAS_SU_POINT_SIZE(float val)
-{
- return ((((int32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_SIZE__SHIFT) & A4XX_GRAS_SU_POINT_SIZE__MASK;
-}
-
-#define REG_A4XX_GRAS_ALPHA_CONTROL 0x00002073
-#define A4XX_GRAS_ALPHA_CONTROL_ALPHA_TEST_ENABLE 0x00000004
-#define A4XX_GRAS_ALPHA_CONTROL_FORCE_FRAGZ_TO_FS 0x00000008
-
-#define REG_A4XX_GRAS_SU_POLY_OFFSET_SCALE 0x00002074
-#define A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
-#define A4XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
-static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
-{
- return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
-}
-
-#define REG_A4XX_GRAS_SU_POLY_OFFSET_OFFSET 0x00002075
-#define A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
-#define A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
-static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
-{
- return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
-}
-
-#define REG_A4XX_GRAS_SU_POLY_OFFSET_CLAMP 0x00002076
-#define A4XX_GRAS_SU_POLY_OFFSET_CLAMP__MASK 0xffffffff
-#define A4XX_GRAS_SU_POLY_OFFSET_CLAMP__SHIFT 0
-static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_CLAMP(float val)
-{
- return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_CLAMP__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_CLAMP__MASK;
-}
-
-#define REG_A4XX_GRAS_DEPTH_CONTROL 0x00002077
-#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK 0x00000003
-#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT 0
-static inline uint32_t A4XX_GRAS_DEPTH_CONTROL_FORMAT(enum a4xx_depth_format val)
-{
- return ((val) << A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT) & A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK;
-}
-
-#define REG_A4XX_GRAS_SU_MODE_CONTROL 0x00002078
-#define A4XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001
-#define A4XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002
-#define A4XX_GRAS_SU_MODE_CONTROL_FRONT_CW 0x00000004
-#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007f8
-#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3
-static inline uint32_t A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
-{
- return ((((int32_t)(val * 4.0))) << A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
-}
-#define A4XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
-#define A4XX_GRAS_SU_MODE_CONTROL_MSAA_ENABLE 0x00002000
-#define A4XX_GRAS_SU_MODE_CONTROL_RENDERING_PASS 0x00100000
-
-#define REG_A4XX_GRAS_SC_CONTROL 0x0000207b
-#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x0000000c
-#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 2
-static inline uint32_t A4XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
-{
- return ((val) << A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK;
-}
-#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000380
-#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 7
-static inline uint32_t A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES(uint32_t val)
-{
- return ((val) << A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK;
-}
-#define A4XX_GRAS_SC_CONTROL_MSAA_DISABLE 0x00000800
-#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000
-#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12
-static inline uint32_t A4XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val)
-{
- return ((val) << A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK;
-}
-
-#define REG_A4XX_GRAS_SC_SCREEN_SCISSOR_TL 0x0000207c
-#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
-#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
-#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
-static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
-{
- return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK;
-}
-#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
-#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
-static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
-{
- return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK;
-}
-
-#define REG_A4XX_GRAS_SC_SCREEN_SCISSOR_BR 0x0000207d
-#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
-#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
-#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
-static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
-{
- return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK;
-}
-#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
-#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
-static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
-{
- return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK;
-}
-
-#define REG_A4XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000209c
-#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
-#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
-#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
-static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
-{
- return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
-}
-#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
-#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
-static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
-{
- return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
-}
-
-#define REG_A4XX_GRAS_SC_WINDOW_SCISSOR_TL 0x0000209d
-#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
-#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
-#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
-static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
-{
- return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
-}
-#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
-#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
-static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
-{
- return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
-}
-
-#define REG_A4XX_GRAS_SC_EXTENT_WINDOW_BR 0x0000209e
-#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_WINDOW_OFFSET_DISABLE 0x80000000
-#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__MASK 0x00007fff
-#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__SHIFT 0
-static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_BR_X(uint32_t val)
-{
- return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__MASK;
-}
-#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__MASK 0x7fff0000
-#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__SHIFT 16
-static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y(uint32_t val)
-{
- return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__MASK;
-}
-
-#define REG_A4XX_GRAS_SC_EXTENT_WINDOW_TL 0x0000209f
-#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_WINDOW_OFFSET_DISABLE 0x80000000
-#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__MASK 0x00007fff
-#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__SHIFT 0
-static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_X(uint32_t val)
-{
- return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__MASK;
-}
-#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__MASK 0x7fff0000
-#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__SHIFT 16
-static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y(uint32_t val)
-{
- return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__MASK;
-}
-
-#define REG_A4XX_UCHE_CACHE_MODE_CONTROL 0x00000e80
-
-#define REG_A4XX_UCHE_TRAP_BASE_LO 0x00000e83
-
-#define REG_A4XX_UCHE_TRAP_BASE_HI 0x00000e84
-
-#define REG_A4XX_UCHE_CACHE_STATUS 0x00000e88
-
-#define REG_A4XX_UCHE_INVALIDATE0 0x00000e8a
-
-#define REG_A4XX_UCHE_INVALIDATE1 0x00000e8b
-
-#define REG_A4XX_UCHE_CACHE_WAYS_VFD 0x00000e8c
-
-#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000e8e
-
-#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000e8f
-
-#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000e90
-
-#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000e91
-
-#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000e92
-
-#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000e93
-
-#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000e94
-
-#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000e95
-
-#define REG_A4XX_HLSQ_TIMEOUT_THRESHOLD 0x00000e00
-
-#define REG_A4XX_HLSQ_DEBUG_ECO_CONTROL 0x00000e04
-
-#define REG_A4XX_HLSQ_MODE_CONTROL 0x00000e05
-
-#define REG_A4XX_HLSQ_PERF_PIPE_MASK 0x00000e0e
-
-#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x00000e06
-
-#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x00000e07
-
-#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x00000e08
-
-#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x00000e09
-
-#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x00000e0a
-
-#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x00000e0b
-
-#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_6 0x00000e0c
-
-#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_7 0x00000e0d
-
-#define REG_A4XX_HLSQ_CONTROL_0_REG 0x000023c0
-#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010
-#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4
-static inline uint32_t A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
-{
- return ((val) << A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
-}
-#define A4XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040
-#define A4XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
-#define A4XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
-#define A4XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
-#define A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK 0x08000000
-#define A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT 27
-static inline uint32_t A4XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT) & A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK;
-}
-#define A4XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000
-#define A4XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000
-#define A4XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000
-#define A4XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000
-
-#define REG_A4XX_HLSQ_CONTROL_1_REG 0x000023c1
-#define A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x00000040
-#define A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6
-static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val)
-{
- return ((val) << A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK;
-}
-#define A4XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
-#define A4XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200
-#define A4XX_HLSQ_CONTROL_1_REG_COORDREGID__MASK 0x00ff0000
-#define A4XX_HLSQ_CONTROL_1_REG_COORDREGID__SHIFT 16
-static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_COORDREGID(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CONTROL_1_REG_COORDREGID__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_COORDREGID__MASK;
-}
-#define A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__MASK 0xff000000
-#define A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__SHIFT 24
-static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__MASK;
-}
-
-#define REG_A4XX_HLSQ_CONTROL_2_REG 0x000023c2
-#define A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
-#define A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26
-static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK;
-}
-#define A4XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000003fc
-#define A4XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 2
-static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
-}
-#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__MASK 0x0003fc00
-#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__SHIFT 10
-static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__MASK;
-}
-#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__MASK 0x03fc0000
-#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__SHIFT 18
-static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__MASK;
-}
-
-#define REG_A4XX_HLSQ_CONTROL_3_REG 0x000023c3
-#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK 0x000000ff
-#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT 0
-static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK;
-}
-#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK 0x0000ff00
-#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT 8
-static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK;
-}
-#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK 0x00ff0000
-#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT 16
-static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK;
-}
-#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK 0xff000000
-#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT 24
-static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK;
-}
-
-#define REG_A4XX_HLSQ_CONTROL_4_REG 0x000023c4
-#define A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK 0x000000ff
-#define A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT 0
-static inline uint32_t A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT) & A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK;
-}
-#define A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK 0x0000ff00
-#define A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT 8
-static inline uint32_t A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT) & A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK;
-}
-
-#define REG_A4XX_HLSQ_VS_CONTROL_REG 0x000023c5
-#define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
-#define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0
-static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
-}
-#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
-#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
-static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
-}
-#define A4XX_HLSQ_VS_CONTROL_REG_SSBO_ENABLE 0x00008000
-#define A4XX_HLSQ_VS_CONTROL_REG_ENABLED 0x00010000
-#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
-#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
-static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
-}
-#define A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
-#define A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT 24
-static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK;
-}
-
-#define REG_A4XX_HLSQ_FS_CONTROL_REG 0x000023c6
-#define A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
-#define A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0
-static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
-}
-#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
-#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
-static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
-}
-#define A4XX_HLSQ_FS_CONTROL_REG_SSBO_ENABLE 0x00008000
-#define A4XX_HLSQ_FS_CONTROL_REG_ENABLED 0x00010000
-#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
-#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
-static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
-}
-#define A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
-#define A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT 24
-static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK;
-}
-
-#define REG_A4XX_HLSQ_HS_CONTROL_REG 0x000023c7
-#define A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
-#define A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__SHIFT 0
-static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__MASK;
-}
-#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
-#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
-static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
-}
-#define A4XX_HLSQ_HS_CONTROL_REG_SSBO_ENABLE 0x00008000
-#define A4XX_HLSQ_HS_CONTROL_REG_ENABLED 0x00010000
-#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
-#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
-static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
-}
-#define A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
-#define A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__SHIFT 24
-static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__MASK;
-}
-
-#define REG_A4XX_HLSQ_DS_CONTROL_REG 0x000023c8
-#define A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
-#define A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__SHIFT 0
-static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__MASK;
-}
-#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
-#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
-static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
-}
-#define A4XX_HLSQ_DS_CONTROL_REG_SSBO_ENABLE 0x00008000
-#define A4XX_HLSQ_DS_CONTROL_REG_ENABLED 0x00010000
-#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
-#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
-static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
-}
-#define A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
-#define A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__SHIFT 24
-static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__MASK;
-}
-
-#define REG_A4XX_HLSQ_GS_CONTROL_REG 0x000023c9
-#define A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
-#define A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__SHIFT 0
-static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__MASK;
-}
-#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
-#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
-static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
-}
-#define A4XX_HLSQ_GS_CONTROL_REG_SSBO_ENABLE 0x00008000
-#define A4XX_HLSQ_GS_CONTROL_REG_ENABLED 0x00010000
-#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
-#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
-static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
-}
-#define A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
-#define A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT 24
-static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK;
-}
-
-#define REG_A4XX_HLSQ_CS_CONTROL_REG 0x000023ca
-#define A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
-#define A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__SHIFT 0
-static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__MASK;
-}
-#define A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
-#define A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
-static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
-}
-#define A4XX_HLSQ_CS_CONTROL_REG_SSBO_ENABLE 0x00008000
-#define A4XX_HLSQ_CS_CONTROL_REG_ENABLED 0x00010000
-#define A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
-#define A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
-static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__MASK;
-}
-#define A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
-#define A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__SHIFT 24
-static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__MASK;
-}
-
-#define REG_A4XX_HLSQ_CL_NDRANGE_0 0x000023cd
-#define A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__MASK 0x00000003
-#define A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__SHIFT 0
-static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__MASK;
-}
-#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc
-#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__SHIFT 2
-static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__MASK;
-}
-#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000
-#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__SHIFT 12
-static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__MASK;
-}
-#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000
-#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__SHIFT 22
-static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__MASK;
-}
-
-#define REG_A4XX_HLSQ_CL_NDRANGE_1 0x000023ce
-#define A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__MASK 0xffffffff
-#define A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__SHIFT 0
-static inline uint32_t A4XX_HLSQ_CL_NDRANGE_1_SIZE_X(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__SHIFT) & A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__MASK;
-}
-
-#define REG_A4XX_HLSQ_CL_NDRANGE_2 0x000023cf
-
-#define REG_A4XX_HLSQ_CL_NDRANGE_3 0x000023d0
-#define A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__MASK 0xffffffff
-#define A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__SHIFT 0
-static inline uint32_t A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__SHIFT) & A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__MASK;
-}
-
-#define REG_A4XX_HLSQ_CL_NDRANGE_4 0x000023d1
-
-#define REG_A4XX_HLSQ_CL_NDRANGE_5 0x000023d2
-#define A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__MASK 0xffffffff
-#define A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__SHIFT 0
-static inline uint32_t A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__SHIFT) & A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__MASK;
-}
-
-#define REG_A4XX_HLSQ_CL_NDRANGE_6 0x000023d3
-
-#define REG_A4XX_HLSQ_CL_CONTROL_0 0x000023d4
-#define A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__MASK 0x00000fff
-#define A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__SHIFT 0
-static inline uint32_t A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__SHIFT) & A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__MASK;
-}
-#define A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID__MASK 0x00fff000
-#define A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID__SHIFT 12
-static inline uint32_t A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID__SHIFT) & A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID__MASK;
-}
-#define A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__MASK 0xff000000
-#define A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__SHIFT 24
-static inline uint32_t A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__SHIFT) & A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__MASK;
-}
-
-#define REG_A4XX_HLSQ_CL_CONTROL_1 0x000023d5
-#define A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID__MASK 0x00000fff
-#define A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID__SHIFT 0
-static inline uint32_t A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID__SHIFT) & A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID__MASK;
-}
-#define A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID__MASK 0x00fff000
-#define A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID__SHIFT 12
-static inline uint32_t A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID__SHIFT) & A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID__MASK;
-}
-
-#define REG_A4XX_HLSQ_CL_KERNEL_CONST 0x000023d6
-#define A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID__MASK 0x00000fff
-#define A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID__SHIFT 0
-static inline uint32_t A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID__SHIFT) & A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID__MASK;
-}
-#define A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID__MASK 0x00fff000
-#define A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID__SHIFT 12
-static inline uint32_t A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID__SHIFT) & A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID__MASK;
-}
-
-#define REG_A4XX_HLSQ_CL_KERNEL_GROUP_X 0x000023d7
-
-#define REG_A4XX_HLSQ_CL_KERNEL_GROUP_Y 0x000023d8
-
-#define REG_A4XX_HLSQ_CL_KERNEL_GROUP_Z 0x000023d9
-
-#define REG_A4XX_HLSQ_CL_WG_OFFSET 0x000023da
-#define A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID__MASK 0x00000fff
-#define A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID__SHIFT 0
-static inline uint32_t A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID(uint32_t val)
-{
- return ((val) << A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID__SHIFT) & A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID__MASK;
-}
-
-#define REG_A4XX_HLSQ_UPDATE_CONTROL 0x000023db
-
-#define REG_A4XX_PC_BINNING_COMMAND 0x00000d00
-#define A4XX_PC_BINNING_COMMAND_BINNING_ENABLE 0x00000001
-
-#define REG_A4XX_PC_TESSFACTOR_ADDR 0x00000d08
-
-#define REG_A4XX_PC_DRAWCALL_SETUP_OVERRIDE 0x00000d0c
-
-#define REG_A4XX_PC_PERFCTR_PC_SEL_0 0x00000d10
-
-#define REG_A4XX_PC_PERFCTR_PC_SEL_1 0x00000d11
-
-#define REG_A4XX_PC_PERFCTR_PC_SEL_2 0x00000d12
-
-#define REG_A4XX_PC_PERFCTR_PC_SEL_3 0x00000d13
-
-#define REG_A4XX_PC_PERFCTR_PC_SEL_4 0x00000d14
-
-#define REG_A4XX_PC_PERFCTR_PC_SEL_5 0x00000d15
-
-#define REG_A4XX_PC_PERFCTR_PC_SEL_6 0x00000d16
-
-#define REG_A4XX_PC_PERFCTR_PC_SEL_7 0x00000d17
-
-#define REG_A4XX_PC_BIN_BASE 0x000021c0
-
-#define REG_A4XX_PC_VSTREAM_CONTROL 0x000021c2
-#define A4XX_PC_VSTREAM_CONTROL_SIZE__MASK 0x003f0000
-#define A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT 16
-static inline uint32_t A4XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val)
-{
- return ((val) << A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A4XX_PC_VSTREAM_CONTROL_SIZE__MASK;
-}
-#define A4XX_PC_VSTREAM_CONTROL_N__MASK 0x07c00000
-#define A4XX_PC_VSTREAM_CONTROL_N__SHIFT 22
-static inline uint32_t A4XX_PC_VSTREAM_CONTROL_N(uint32_t val)
-{
- return ((val) << A4XX_PC_VSTREAM_CONTROL_N__SHIFT) & A4XX_PC_VSTREAM_CONTROL_N__MASK;
-}
-
-#define REG_A4XX_PC_PRIM_VTX_CNTL 0x000021c4
-#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__MASK 0x0000000f
-#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__SHIFT 0
-static inline uint32_t A4XX_PC_PRIM_VTX_CNTL_VAROUT(uint32_t val)
-{
- return ((val) << A4XX_PC_PRIM_VTX_CNTL_VAROUT__SHIFT) & A4XX_PC_PRIM_VTX_CNTL_VAROUT__MASK;
-}
-#define A4XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART 0x00100000
-#define A4XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
-#define A4XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000
-
-#define REG_A4XX_PC_PRIM_VTX_CNTL2 0x000021c5
-#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__MASK 0x00000007
-#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__SHIFT 0
-static inline uint32_t A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
-{
- return ((val) << A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__SHIFT) & A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__MASK;
-}
-#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__MASK 0x00000038
-#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__SHIFT 3
-static inline uint32_t A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
-{
- return ((val) << A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__SHIFT) & A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__MASK;
-}
-#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_ENABLE 0x00000040
-
-#define REG_A4XX_PC_RESTART_INDEX 0x000021c6
-
-#define REG_A4XX_PC_GS_PARAM 0x000021e5
-#define A4XX_PC_GS_PARAM_MAX_VERTICES__MASK 0x000003ff
-#define A4XX_PC_GS_PARAM_MAX_VERTICES__SHIFT 0
-static inline uint32_t A4XX_PC_GS_PARAM_MAX_VERTICES(uint32_t val)
-{
- return ((val) << A4XX_PC_GS_PARAM_MAX_VERTICES__SHIFT) & A4XX_PC_GS_PARAM_MAX_VERTICES__MASK;
-}
-#define A4XX_PC_GS_PARAM_INVOCATIONS__MASK 0x0000f800
-#define A4XX_PC_GS_PARAM_INVOCATIONS__SHIFT 11
-static inline uint32_t A4XX_PC_GS_PARAM_INVOCATIONS(uint32_t val)
-{
- return ((val) << A4XX_PC_GS_PARAM_INVOCATIONS__SHIFT) & A4XX_PC_GS_PARAM_INVOCATIONS__MASK;
-}
-#define A4XX_PC_GS_PARAM_PRIMTYPE__MASK 0x01800000
-#define A4XX_PC_GS_PARAM_PRIMTYPE__SHIFT 23
-static inline uint32_t A4XX_PC_GS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
-{
- return ((val) << A4XX_PC_GS_PARAM_PRIMTYPE__SHIFT) & A4XX_PC_GS_PARAM_PRIMTYPE__MASK;
-}
-#define A4XX_PC_GS_PARAM_LAYER 0x80000000
-
-#define REG_A4XX_PC_HS_PARAM 0x000021e7
-#define A4XX_PC_HS_PARAM_VERTICES_OUT__MASK 0x0000003f
-#define A4XX_PC_HS_PARAM_VERTICES_OUT__SHIFT 0
-static inline uint32_t A4XX_PC_HS_PARAM_VERTICES_OUT(uint32_t val)
-{
- return ((val) << A4XX_PC_HS_PARAM_VERTICES_OUT__SHIFT) & A4XX_PC_HS_PARAM_VERTICES_OUT__MASK;
-}
-#define A4XX_PC_HS_PARAM_SPACING__MASK 0x00600000
-#define A4XX_PC_HS_PARAM_SPACING__SHIFT 21
-static inline uint32_t A4XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val)
-{
- return ((val) << A4XX_PC_HS_PARAM_SPACING__SHIFT) & A4XX_PC_HS_PARAM_SPACING__MASK;
-}
-#define A4XX_PC_HS_PARAM_CW 0x00800000
-#define A4XX_PC_HS_PARAM_CONNECTED 0x01000000
-
-#define REG_A4XX_VBIF_VERSION 0x00003000
-
-#define REG_A4XX_VBIF_CLKON 0x00003001
-#define A4XX_VBIF_CLKON_FORCE_ON_TESTBUS 0x00000001
-
-#define REG_A4XX_VBIF_ABIT_SORT 0x0000301c
-
-#define REG_A4XX_VBIF_ABIT_SORT_CONF 0x0000301d
-
-#define REG_A4XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
-
-#define REG_A4XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
-
-#define REG_A4XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
-
-#define REG_A4XX_VBIF_IN_WR_LIM_CONF0 0x00003030
-
-#define REG_A4XX_VBIF_IN_WR_LIM_CONF1 0x00003031
-
-#define REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
-
-#define REG_A4XX_VBIF_PERF_CNT_EN0 0x000030c0
-
-#define REG_A4XX_VBIF_PERF_CNT_EN1 0x000030c1
-
-#define REG_A4XX_VBIF_PERF_CNT_EN2 0x000030c2
-
-#define REG_A4XX_VBIF_PERF_CNT_EN3 0x000030c3
-
-#define REG_A4XX_VBIF_PERF_CNT_SEL0 0x000030d0
-
-#define REG_A4XX_VBIF_PERF_CNT_SEL1 0x000030d1
-
-#define REG_A4XX_VBIF_PERF_CNT_SEL2 0x000030d2
-
-#define REG_A4XX_VBIF_PERF_CNT_SEL3 0x000030d3
-
-#define REG_A4XX_VBIF_PERF_CNT_LOW0 0x000030d8
-
-#define REG_A4XX_VBIF_PERF_CNT_LOW1 0x000030d9
-
-#define REG_A4XX_VBIF_PERF_CNT_LOW2 0x000030da
-
-#define REG_A4XX_VBIF_PERF_CNT_LOW3 0x000030db
-
-#define REG_A4XX_VBIF_PERF_CNT_HIGH0 0x000030e0
-
-#define REG_A4XX_VBIF_PERF_CNT_HIGH1 0x000030e1
-
-#define REG_A4XX_VBIF_PERF_CNT_HIGH2 0x000030e2
-
-#define REG_A4XX_VBIF_PERF_CNT_HIGH3 0x000030e3
-
-#define REG_A4XX_VBIF_PERF_PWR_CNT_EN0 0x00003100
-
-#define REG_A4XX_VBIF_PERF_PWR_CNT_EN1 0x00003101
-
-#define REG_A4XX_VBIF_PERF_PWR_CNT_EN2 0x00003102
-
-#define REG_A4XX_UNKNOWN_0CC5 0x00000cc5
-
-#define REG_A4XX_UNKNOWN_0CC6 0x00000cc6
-
-#define REG_A4XX_UNKNOWN_0D01 0x00000d01
-
-#define REG_A4XX_UNKNOWN_0E42 0x00000e42
-
-#define REG_A4XX_UNKNOWN_0EC2 0x00000ec2
-
-#define REG_A4XX_UNKNOWN_2001 0x00002001
-
-#define REG_A4XX_UNKNOWN_209B 0x0000209b
-
-#define REG_A4XX_UNKNOWN_20EF 0x000020ef
-
-#define REG_A4XX_UNKNOWN_2152 0x00002152
-
-#define REG_A4XX_UNKNOWN_2153 0x00002153
-
-#define REG_A4XX_UNKNOWN_2154 0x00002154
-
-#define REG_A4XX_UNKNOWN_2155 0x00002155
-
-#define REG_A4XX_UNKNOWN_2156 0x00002156
-
-#define REG_A4XX_UNKNOWN_2157 0x00002157
-
-#define REG_A4XX_UNKNOWN_21C3 0x000021c3
-
-#define REG_A4XX_UNKNOWN_21E6 0x000021e6
-
-#define REG_A4XX_UNKNOWN_2209 0x00002209
-
-#define REG_A4XX_UNKNOWN_22D7 0x000022d7
-
-#define REG_A4XX_UNKNOWN_2352 0x00002352
-
-#define REG_A4XX_TEX_SAMP_0 0x00000000
-#define A4XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
-#define A4XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
-#define A4XX_TEX_SAMP_0_XY_MAG__SHIFT 1
-static inline uint32_t A4XX_TEX_SAMP_0_XY_MAG(enum a4xx_tex_filter val)
-{
- return ((val) << A4XX_TEX_SAMP_0_XY_MAG__SHIFT) & A4XX_TEX_SAMP_0_XY_MAG__MASK;
-}
-#define A4XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018
-#define A4XX_TEX_SAMP_0_XY_MIN__SHIFT 3
-static inline uint32_t A4XX_TEX_SAMP_0_XY_MIN(enum a4xx_tex_filter val)
-{
- return ((val) << A4XX_TEX_SAMP_0_XY_MIN__SHIFT) & A4XX_TEX_SAMP_0_XY_MIN__MASK;
-}
-#define A4XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0
-#define A4XX_TEX_SAMP_0_WRAP_S__SHIFT 5
-static inline uint32_t A4XX_TEX_SAMP_0_WRAP_S(enum a4xx_tex_clamp val)
-{
- return ((val) << A4XX_TEX_SAMP_0_WRAP_S__SHIFT) & A4XX_TEX_SAMP_0_WRAP_S__MASK;
-}
-#define A4XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700
-#define A4XX_TEX_SAMP_0_WRAP_T__SHIFT 8
-static inline uint32_t A4XX_TEX_SAMP_0_WRAP_T(enum a4xx_tex_clamp val)
-{
- return ((val) << A4XX_TEX_SAMP_0_WRAP_T__SHIFT) & A4XX_TEX_SAMP_0_WRAP_T__MASK;
-}
-#define A4XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800
-#define A4XX_TEX_SAMP_0_WRAP_R__SHIFT 11
-static inline uint32_t A4XX_TEX_SAMP_0_WRAP_R(enum a4xx_tex_clamp val)
-{
- return ((val) << A4XX_TEX_SAMP_0_WRAP_R__SHIFT) & A4XX_TEX_SAMP_0_WRAP_R__MASK;
-}
-#define A4XX_TEX_SAMP_0_ANISO__MASK 0x0001c000
-#define A4XX_TEX_SAMP_0_ANISO__SHIFT 14
-static inline uint32_t A4XX_TEX_SAMP_0_ANISO(enum a4xx_tex_aniso val)
-{
- return ((val) << A4XX_TEX_SAMP_0_ANISO__SHIFT) & A4XX_TEX_SAMP_0_ANISO__MASK;
-}
-#define A4XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000
-#define A4XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19
-static inline uint32_t A4XX_TEX_SAMP_0_LOD_BIAS(float val)
-{
- return ((((int32_t)(val * 256.0))) << A4XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A4XX_TEX_SAMP_0_LOD_BIAS__MASK;
-}
-
-#define REG_A4XX_TEX_SAMP_1 0x00000001
-#define A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
-#define A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1
-static inline uint32_t A4XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
-{
- return ((val) << A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
-}
-#define A4XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010
-#define A4XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020
-#define A4XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040
-#define A4XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
-#define A4XX_TEX_SAMP_1_MAX_LOD__SHIFT 8
-static inline uint32_t A4XX_TEX_SAMP_1_MAX_LOD(float val)
-{
- return ((((uint32_t)(val * 256.0))) << A4XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A4XX_TEX_SAMP_1_MAX_LOD__MASK;
-}
-#define A4XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000
-#define A4XX_TEX_SAMP_1_MIN_LOD__SHIFT 20
-static inline uint32_t A4XX_TEX_SAMP_1_MIN_LOD(float val)
-{
- return ((((uint32_t)(val * 256.0))) << A4XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A4XX_TEX_SAMP_1_MIN_LOD__MASK;
-}
-
-#define REG_A4XX_TEX_CONST_0 0x00000000
-#define A4XX_TEX_CONST_0_TILED 0x00000001
-#define A4XX_TEX_CONST_0_SRGB 0x00000004
-#define A4XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
-#define A4XX_TEX_CONST_0_SWIZ_X__SHIFT 4
-static inline uint32_t A4XX_TEX_CONST_0_SWIZ_X(enum a4xx_tex_swiz val)
-{
- return ((val) << A4XX_TEX_CONST_0_SWIZ_X__SHIFT) & A4XX_TEX_CONST_0_SWIZ_X__MASK;
-}
-#define A4XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
-#define A4XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
-static inline uint32_t A4XX_TEX_CONST_0_SWIZ_Y(enum a4xx_tex_swiz val)
-{
- return ((val) << A4XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A4XX_TEX_CONST_0_SWIZ_Y__MASK;
-}
-#define A4XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
-#define A4XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
-static inline uint32_t A4XX_TEX_CONST_0_SWIZ_Z(enum a4xx_tex_swiz val)
-{
- return ((val) << A4XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A4XX_TEX_CONST_0_SWIZ_Z__MASK;
-}
-#define A4XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
-#define A4XX_TEX_CONST_0_SWIZ_W__SHIFT 13
-static inline uint32_t A4XX_TEX_CONST_0_SWIZ_W(enum a4xx_tex_swiz val)
-{
- return ((val) << A4XX_TEX_CONST_0_SWIZ_W__SHIFT) & A4XX_TEX_CONST_0_SWIZ_W__MASK;
-}
-#define A4XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000
-#define A4XX_TEX_CONST_0_MIPLVLS__SHIFT 16
-static inline uint32_t A4XX_TEX_CONST_0_MIPLVLS(uint32_t val)
-{
- return ((val) << A4XX_TEX_CONST_0_MIPLVLS__SHIFT) & A4XX_TEX_CONST_0_MIPLVLS__MASK;
-}
-#define A4XX_TEX_CONST_0_FMT__MASK 0x1fc00000
-#define A4XX_TEX_CONST_0_FMT__SHIFT 22
-static inline uint32_t A4XX_TEX_CONST_0_FMT(enum a4xx_tex_fmt val)
-{
- return ((val) << A4XX_TEX_CONST_0_FMT__SHIFT) & A4XX_TEX_CONST_0_FMT__MASK;
-}
-#define A4XX_TEX_CONST_0_TYPE__MASK 0xe0000000
-#define A4XX_TEX_CONST_0_TYPE__SHIFT 29
-static inline uint32_t A4XX_TEX_CONST_0_TYPE(enum a4xx_tex_type val)
-{
- return ((val) << A4XX_TEX_CONST_0_TYPE__SHIFT) & A4XX_TEX_CONST_0_TYPE__MASK;
-}
-
-#define REG_A4XX_TEX_CONST_1 0x00000001
-#define A4XX_TEX_CONST_1_HEIGHT__MASK 0x00007fff
-#define A4XX_TEX_CONST_1_HEIGHT__SHIFT 0
-static inline uint32_t A4XX_TEX_CONST_1_HEIGHT(uint32_t val)
-{
- return ((val) << A4XX_TEX_CONST_1_HEIGHT__SHIFT) & A4XX_TEX_CONST_1_HEIGHT__MASK;
-}
-#define A4XX_TEX_CONST_1_WIDTH__MASK 0x3fff8000
-#define A4XX_TEX_CONST_1_WIDTH__SHIFT 15
-static inline uint32_t A4XX_TEX_CONST_1_WIDTH(uint32_t val)
-{
- return ((val) << A4XX_TEX_CONST_1_WIDTH__SHIFT) & A4XX_TEX_CONST_1_WIDTH__MASK;
-}
-
-#define REG_A4XX_TEX_CONST_2 0x00000002
-#define A4XX_TEX_CONST_2_PITCHALIGN__MASK 0x0000000f
-#define A4XX_TEX_CONST_2_PITCHALIGN__SHIFT 0
-static inline uint32_t A4XX_TEX_CONST_2_PITCHALIGN(uint32_t val)
-{
- return ((val) << A4XX_TEX_CONST_2_PITCHALIGN__SHIFT) & A4XX_TEX_CONST_2_PITCHALIGN__MASK;
-}
-#define A4XX_TEX_CONST_2_BUFFER 0x00000040
-#define A4XX_TEX_CONST_2_PITCH__MASK 0x3ffffe00
-#define A4XX_TEX_CONST_2_PITCH__SHIFT 9
-static inline uint32_t A4XX_TEX_CONST_2_PITCH(uint32_t val)
-{
- return ((val) << A4XX_TEX_CONST_2_PITCH__SHIFT) & A4XX_TEX_CONST_2_PITCH__MASK;
-}
-#define A4XX_TEX_CONST_2_SWAP__MASK 0xc0000000
-#define A4XX_TEX_CONST_2_SWAP__SHIFT 30
-static inline uint32_t A4XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
-{
- return ((val) << A4XX_TEX_CONST_2_SWAP__SHIFT) & A4XX_TEX_CONST_2_SWAP__MASK;
-}
-
-#define REG_A4XX_TEX_CONST_3 0x00000003
-#define A4XX_TEX_CONST_3_LAYERSZ__MASK 0x00003fff
-#define A4XX_TEX_CONST_3_LAYERSZ__SHIFT 0
-static inline uint32_t A4XX_TEX_CONST_3_LAYERSZ(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A4XX_TEX_CONST_3_LAYERSZ__SHIFT) & A4XX_TEX_CONST_3_LAYERSZ__MASK;
-}
-#define A4XX_TEX_CONST_3_DEPTH__MASK 0x7ffc0000
-#define A4XX_TEX_CONST_3_DEPTH__SHIFT 18
-static inline uint32_t A4XX_TEX_CONST_3_DEPTH(uint32_t val)
-{
- return ((val) << A4XX_TEX_CONST_3_DEPTH__SHIFT) & A4XX_TEX_CONST_3_DEPTH__MASK;
-}
-
-#define REG_A4XX_TEX_CONST_4 0x00000004
-#define A4XX_TEX_CONST_4_LAYERSZ__MASK 0x0000000f
-#define A4XX_TEX_CONST_4_LAYERSZ__SHIFT 0
-static inline uint32_t A4XX_TEX_CONST_4_LAYERSZ(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A4XX_TEX_CONST_4_LAYERSZ__SHIFT) & A4XX_TEX_CONST_4_LAYERSZ__MASK;
-}
-#define A4XX_TEX_CONST_4_BASE__MASK 0xffffffe0
-#define A4XX_TEX_CONST_4_BASE__SHIFT 5
-static inline uint32_t A4XX_TEX_CONST_4_BASE(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A4XX_TEX_CONST_4_BASE__SHIFT) & A4XX_TEX_CONST_4_BASE__MASK;
-}
-
-#define REG_A4XX_TEX_CONST_5 0x00000005
-
-#define REG_A4XX_TEX_CONST_6 0x00000006
-
-#define REG_A4XX_TEX_CONST_7 0x00000007
-
-#define REG_A4XX_SSBO_0_0 0x00000000
-#define A4XX_SSBO_0_0_BASE__MASK 0xffffffe0
-#define A4XX_SSBO_0_0_BASE__SHIFT 5
-static inline uint32_t A4XX_SSBO_0_0_BASE(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A4XX_SSBO_0_0_BASE__SHIFT) & A4XX_SSBO_0_0_BASE__MASK;
-}
-
-#define REG_A4XX_SSBO_0_1 0x00000001
-#define A4XX_SSBO_0_1_PITCH__MASK 0x003fffff
-#define A4XX_SSBO_0_1_PITCH__SHIFT 0
-static inline uint32_t A4XX_SSBO_0_1_PITCH(uint32_t val)
-{
- return ((val) << A4XX_SSBO_0_1_PITCH__SHIFT) & A4XX_SSBO_0_1_PITCH__MASK;
-}
-
-#define REG_A4XX_SSBO_0_2 0x00000002
-#define A4XX_SSBO_0_2_ARRAY_PITCH__MASK 0x03fff000
-#define A4XX_SSBO_0_2_ARRAY_PITCH__SHIFT 12
-static inline uint32_t A4XX_SSBO_0_2_ARRAY_PITCH(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A4XX_SSBO_0_2_ARRAY_PITCH__SHIFT) & A4XX_SSBO_0_2_ARRAY_PITCH__MASK;
-}
-
-#define REG_A4XX_SSBO_0_3 0x00000003
-#define A4XX_SSBO_0_3_CPP__MASK 0x0000003f
-#define A4XX_SSBO_0_3_CPP__SHIFT 0
-static inline uint32_t A4XX_SSBO_0_3_CPP(uint32_t val)
-{
- return ((val) << A4XX_SSBO_0_3_CPP__SHIFT) & A4XX_SSBO_0_3_CPP__MASK;
-}
-
-#define REG_A4XX_SSBO_1_0 0x00000000
-#define A4XX_SSBO_1_0_CPP__MASK 0x0000001f
-#define A4XX_SSBO_1_0_CPP__SHIFT 0
-static inline uint32_t A4XX_SSBO_1_0_CPP(uint32_t val)
-{
- return ((val) << A4XX_SSBO_1_0_CPP__SHIFT) & A4XX_SSBO_1_0_CPP__MASK;
-}
-#define A4XX_SSBO_1_0_FMT__MASK 0x0000ff00
-#define A4XX_SSBO_1_0_FMT__SHIFT 8
-static inline uint32_t A4XX_SSBO_1_0_FMT(enum a4xx_color_fmt val)
-{
- return ((val) << A4XX_SSBO_1_0_FMT__SHIFT) & A4XX_SSBO_1_0_FMT__MASK;
-}
-#define A4XX_SSBO_1_0_WIDTH__MASK 0xffff0000
-#define A4XX_SSBO_1_0_WIDTH__SHIFT 16
-static inline uint32_t A4XX_SSBO_1_0_WIDTH(uint32_t val)
-{
- return ((val) << A4XX_SSBO_1_0_WIDTH__SHIFT) & A4XX_SSBO_1_0_WIDTH__MASK;
-}
-
-#define REG_A4XX_SSBO_1_1 0x00000001
-#define A4XX_SSBO_1_1_HEIGHT__MASK 0x0000ffff
-#define A4XX_SSBO_1_1_HEIGHT__SHIFT 0
-static inline uint32_t A4XX_SSBO_1_1_HEIGHT(uint32_t val)
-{
- return ((val) << A4XX_SSBO_1_1_HEIGHT__SHIFT) & A4XX_SSBO_1_1_HEIGHT__MASK;
-}
-#define A4XX_SSBO_1_1_DEPTH__MASK 0xffff0000
-#define A4XX_SSBO_1_1_DEPTH__SHIFT 16
-static inline uint32_t A4XX_SSBO_1_1_DEPTH(uint32_t val)
-{
- return ((val) << A4XX_SSBO_1_1_DEPTH__SHIFT) & A4XX_SSBO_1_1_DEPTH__MASK;
-}
-
-#ifdef __cplusplus
-#endif
-
-#endif /* A4XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
deleted file mode 100644
index d66306c14986..000000000000
--- a/drivers/gpu/drm/msm/adreno/a5xx.xml.h
+++ /dev/null
@@ -1,5572 +0,0 @@
-#ifndef A5XX_XML
-#define A5XX_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
-http://gitlab.freedesktop.org/mesa/mesa/
-git clone https://gitlab.freedesktop.org/mesa/mesa.git
-
-The rules-ng-ng source files this header was generated from are:
-
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 151693 bytes, from Wed Aug 23 10:39:39 2023)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from Fri Jun 2 14:59:26 2023)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 85691 bytes, from Fri Feb 16 09:49:01 2024)
-
-Copyright (C) 2013-2024 by the following authors:
-- Rob Clark <robdclark@gmail.com> Rob Clark
-- Ilia Mirkin <imirkin@alum.mit.edu> Ilia Mirkin
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-*/
-
-#ifdef __KERNEL__
-#include <linux/bug.h>
-#define assert(x) BUG_ON(!(x))
-#else
-#include <assert.h>
-#endif
-
-#ifdef __cplusplus
-#define __struct_cast(X)
-#else
-#define __struct_cast(X) (struct X)
-#endif
-
-enum a5xx_color_fmt {
- RB5_A8_UNORM = 2,
- RB5_R8_UNORM = 3,
- RB5_R8_SNORM = 4,
- RB5_R8_UINT = 5,
- RB5_R8_SINT = 6,
- RB5_R4G4B4A4_UNORM = 8,
- RB5_R5G5B5A1_UNORM = 10,
- RB5_R5G6B5_UNORM = 14,
- RB5_R8G8_UNORM = 15,
- RB5_R8G8_SNORM = 16,
- RB5_R8G8_UINT = 17,
- RB5_R8G8_SINT = 18,
- RB5_R16_UNORM = 21,
- RB5_R16_SNORM = 22,
- RB5_R16_FLOAT = 23,
- RB5_R16_UINT = 24,
- RB5_R16_SINT = 25,
- RB5_R8G8B8A8_UNORM = 48,
- RB5_R8G8B8_UNORM = 49,
- RB5_R8G8B8A8_SNORM = 50,
- RB5_R8G8B8A8_UINT = 51,
- RB5_R8G8B8A8_SINT = 52,
- RB5_R10G10B10A2_UNORM = 55,
- RB5_R10G10B10A2_UINT = 58,
- RB5_R11G11B10_FLOAT = 66,
- RB5_R16G16_UNORM = 67,
- RB5_R16G16_SNORM = 68,
- RB5_R16G16_FLOAT = 69,
- RB5_R16G16_UINT = 70,
- RB5_R16G16_SINT = 71,
- RB5_R32_FLOAT = 74,
- RB5_R32_UINT = 75,
- RB5_R32_SINT = 76,
- RB5_R16G16B16A16_UNORM = 96,
- RB5_R16G16B16A16_SNORM = 97,
- RB5_R16G16B16A16_FLOAT = 98,
- RB5_R16G16B16A16_UINT = 99,
- RB5_R16G16B16A16_SINT = 100,
- RB5_R32G32_FLOAT = 103,
- RB5_R32G32_UINT = 104,
- RB5_R32G32_SINT = 105,
- RB5_R32G32B32A32_FLOAT = 130,
- RB5_R32G32B32A32_UINT = 131,
- RB5_R32G32B32A32_SINT = 132,
- RB5_NONE = 255,
-};
-
-enum a5xx_tile_mode {
- TILE5_LINEAR = 0,
- TILE5_2 = 2,
- TILE5_3 = 3,
-};
-
-enum a5xx_vtx_fmt {
- VFMT5_8_UNORM = 3,
- VFMT5_8_SNORM = 4,
- VFMT5_8_UINT = 5,
- VFMT5_8_SINT = 6,
- VFMT5_8_8_UNORM = 15,
- VFMT5_8_8_SNORM = 16,
- VFMT5_8_8_UINT = 17,
- VFMT5_8_8_SINT = 18,
- VFMT5_16_UNORM = 21,
- VFMT5_16_SNORM = 22,
- VFMT5_16_FLOAT = 23,
- VFMT5_16_UINT = 24,
- VFMT5_16_SINT = 25,
- VFMT5_8_8_8_UNORM = 33,
- VFMT5_8_8_8_SNORM = 34,
- VFMT5_8_8_8_UINT = 35,
- VFMT5_8_8_8_SINT = 36,
- VFMT5_8_8_8_8_UNORM = 48,
- VFMT5_8_8_8_8_SNORM = 50,
- VFMT5_8_8_8_8_UINT = 51,
- VFMT5_8_8_8_8_SINT = 52,
- VFMT5_10_10_10_2_UNORM = 54,
- VFMT5_10_10_10_2_SNORM = 57,
- VFMT5_10_10_10_2_UINT = 58,
- VFMT5_10_10_10_2_SINT = 59,
- VFMT5_11_11_10_FLOAT = 66,
- VFMT5_16_16_UNORM = 67,
- VFMT5_16_16_SNORM = 68,
- VFMT5_16_16_FLOAT = 69,
- VFMT5_16_16_UINT = 70,
- VFMT5_16_16_SINT = 71,
- VFMT5_32_UNORM = 72,
- VFMT5_32_SNORM = 73,
- VFMT5_32_FLOAT = 74,
- VFMT5_32_UINT = 75,
- VFMT5_32_SINT = 76,
- VFMT5_32_FIXED = 77,
- VFMT5_16_16_16_UNORM = 88,
- VFMT5_16_16_16_SNORM = 89,
- VFMT5_16_16_16_FLOAT = 90,
- VFMT5_16_16_16_UINT = 91,
- VFMT5_16_16_16_SINT = 92,
- VFMT5_16_16_16_16_UNORM = 96,
- VFMT5_16_16_16_16_SNORM = 97,
- VFMT5_16_16_16_16_FLOAT = 98,
- VFMT5_16_16_16_16_UINT = 99,
- VFMT5_16_16_16_16_SINT = 100,
- VFMT5_32_32_UNORM = 101,
- VFMT5_32_32_SNORM = 102,
- VFMT5_32_32_FLOAT = 103,
- VFMT5_32_32_UINT = 104,
- VFMT5_32_32_SINT = 105,
- VFMT5_32_32_FIXED = 106,
- VFMT5_32_32_32_UNORM = 112,
- VFMT5_32_32_32_SNORM = 113,
- VFMT5_32_32_32_UINT = 114,
- VFMT5_32_32_32_SINT = 115,
- VFMT5_32_32_32_FLOAT = 116,
- VFMT5_32_32_32_FIXED = 117,
- VFMT5_32_32_32_32_UNORM = 128,
- VFMT5_32_32_32_32_SNORM = 129,
- VFMT5_32_32_32_32_FLOAT = 130,
- VFMT5_32_32_32_32_UINT = 131,
- VFMT5_32_32_32_32_SINT = 132,
- VFMT5_32_32_32_32_FIXED = 133,
- VFMT5_NONE = 255,
-};
-
-enum a5xx_tex_fmt {
- TFMT5_A8_UNORM = 2,
- TFMT5_8_UNORM = 3,
- TFMT5_8_SNORM = 4,
- TFMT5_8_UINT = 5,
- TFMT5_8_SINT = 6,
- TFMT5_4_4_4_4_UNORM = 8,
- TFMT5_5_5_5_1_UNORM = 10,
- TFMT5_5_6_5_UNORM = 14,
- TFMT5_8_8_UNORM = 15,
- TFMT5_8_8_SNORM = 16,
- TFMT5_8_8_UINT = 17,
- TFMT5_8_8_SINT = 18,
- TFMT5_L8_A8_UNORM = 19,
- TFMT5_16_UNORM = 21,
- TFMT5_16_SNORM = 22,
- TFMT5_16_FLOAT = 23,
- TFMT5_16_UINT = 24,
- TFMT5_16_SINT = 25,
- TFMT5_8_8_8_8_UNORM = 48,
- TFMT5_8_8_8_UNORM = 49,
- TFMT5_8_8_8_8_SNORM = 50,
- TFMT5_8_8_8_8_UINT = 51,
- TFMT5_8_8_8_8_SINT = 52,
- TFMT5_9_9_9_E5_FLOAT = 53,
- TFMT5_10_10_10_2_UNORM = 54,
- TFMT5_10_10_10_2_UINT = 58,
- TFMT5_11_11_10_FLOAT = 66,
- TFMT5_16_16_UNORM = 67,
- TFMT5_16_16_SNORM = 68,
- TFMT5_16_16_FLOAT = 69,
- TFMT5_16_16_UINT = 70,
- TFMT5_16_16_SINT = 71,
- TFMT5_32_FLOAT = 74,
- TFMT5_32_UINT = 75,
- TFMT5_32_SINT = 76,
- TFMT5_16_16_16_16_UNORM = 96,
- TFMT5_16_16_16_16_SNORM = 97,
- TFMT5_16_16_16_16_FLOAT = 98,
- TFMT5_16_16_16_16_UINT = 99,
- TFMT5_16_16_16_16_SINT = 100,
- TFMT5_32_32_FLOAT = 103,
- TFMT5_32_32_UINT = 104,
- TFMT5_32_32_SINT = 105,
- TFMT5_32_32_32_UINT = 114,
- TFMT5_32_32_32_SINT = 115,
- TFMT5_32_32_32_FLOAT = 116,
- TFMT5_32_32_32_32_FLOAT = 130,
- TFMT5_32_32_32_32_UINT = 131,
- TFMT5_32_32_32_32_SINT = 132,
- TFMT5_X8Z24_UNORM = 160,
- TFMT5_ETC2_RG11_UNORM = 171,
- TFMT5_ETC2_RG11_SNORM = 172,
- TFMT5_ETC2_R11_UNORM = 173,
- TFMT5_ETC2_R11_SNORM = 174,
- TFMT5_ETC1 = 175,
- TFMT5_ETC2_RGB8 = 176,
- TFMT5_ETC2_RGBA8 = 177,
- TFMT5_ETC2_RGB8A1 = 178,
- TFMT5_DXT1 = 179,
- TFMT5_DXT3 = 180,
- TFMT5_DXT5 = 181,
- TFMT5_RGTC1_UNORM = 183,
- TFMT5_RGTC1_SNORM = 184,
- TFMT5_RGTC2_UNORM = 187,
- TFMT5_RGTC2_SNORM = 188,
- TFMT5_BPTC_UFLOAT = 190,
- TFMT5_BPTC_FLOAT = 191,
- TFMT5_BPTC = 192,
- TFMT5_ASTC_4x4 = 193,
- TFMT5_ASTC_5x4 = 194,
- TFMT5_ASTC_5x5 = 195,
- TFMT5_ASTC_6x5 = 196,
- TFMT5_ASTC_6x6 = 197,
- TFMT5_ASTC_8x5 = 198,
- TFMT5_ASTC_8x6 = 199,
- TFMT5_ASTC_8x8 = 200,
- TFMT5_ASTC_10x5 = 201,
- TFMT5_ASTC_10x6 = 202,
- TFMT5_ASTC_10x8 = 203,
- TFMT5_ASTC_10x10 = 204,
- TFMT5_ASTC_12x10 = 205,
- TFMT5_ASTC_12x12 = 206,
- TFMT5_NONE = 255,
-};
-
-enum a5xx_depth_format {
- DEPTH5_NONE = 0,
- DEPTH5_16 = 1,
- DEPTH5_24_8 = 2,
- DEPTH5_32 = 4,
-};
-
-enum a5xx_blit_buf {
- BLIT_MRT0 = 0,
- BLIT_MRT1 = 1,
- BLIT_MRT2 = 2,
- BLIT_MRT3 = 3,
- BLIT_MRT4 = 4,
- BLIT_MRT5 = 5,
- BLIT_MRT6 = 6,
- BLIT_MRT7 = 7,
- BLIT_ZS = 8,
- BLIT_S = 9,
-};
-
-enum a5xx_cp_perfcounter_select {
- PERF_CP_ALWAYS_COUNT = 0,
- PERF_CP_BUSY_GFX_CORE_IDLE = 1,
- PERF_CP_BUSY_CYCLES = 2,
- PERF_CP_PFP_IDLE = 3,
- PERF_CP_PFP_BUSY_WORKING = 4,
- PERF_CP_PFP_STALL_CYCLES_ANY = 5,
- PERF_CP_PFP_STARVE_CYCLES_ANY = 6,
- PERF_CP_PFP_ICACHE_MISS = 7,
- PERF_CP_PFP_ICACHE_HIT = 8,
- PERF_CP_PFP_MATCH_PM4_PKT_PROFILE = 9,
- PERF_CP_ME_BUSY_WORKING = 10,
- PERF_CP_ME_IDLE = 11,
- PERF_CP_ME_STARVE_CYCLES_ANY = 12,
- PERF_CP_ME_FIFO_EMPTY_PFP_IDLE = 13,
- PERF_CP_ME_FIFO_EMPTY_PFP_BUSY = 14,
- PERF_CP_ME_FIFO_FULL_ME_BUSY = 15,
- PERF_CP_ME_FIFO_FULL_ME_NON_WORKING = 16,
- PERF_CP_ME_STALL_CYCLES_ANY = 17,
- PERF_CP_ME_ICACHE_MISS = 18,
- PERF_CP_ME_ICACHE_HIT = 19,
- PERF_CP_NUM_PREEMPTIONS = 20,
- PERF_CP_PREEMPTION_REACTION_DELAY = 21,
- PERF_CP_PREEMPTION_SWITCH_OUT_TIME = 22,
- PERF_CP_PREEMPTION_SWITCH_IN_TIME = 23,
- PERF_CP_DEAD_DRAWS_IN_BIN_RENDER = 24,
- PERF_CP_PREDICATED_DRAWS_KILLED = 25,
- PERF_CP_MODE_SWITCH = 26,
- PERF_CP_ZPASS_DONE = 27,
- PERF_CP_CONTEXT_DONE = 28,
- PERF_CP_CACHE_FLUSH = 29,
- PERF_CP_LONG_PREEMPTIONS = 30,
-};
-
-enum a5xx_rbbm_perfcounter_select {
- PERF_RBBM_ALWAYS_COUNT = 0,
- PERF_RBBM_ALWAYS_ON = 1,
- PERF_RBBM_TSE_BUSY = 2,
- PERF_RBBM_RAS_BUSY = 3,
- PERF_RBBM_PC_DCALL_BUSY = 4,
- PERF_RBBM_PC_VSD_BUSY = 5,
- PERF_RBBM_STATUS_MASKED = 6,
- PERF_RBBM_COM_BUSY = 7,
- PERF_RBBM_DCOM_BUSY = 8,
- PERF_RBBM_VBIF_BUSY = 9,
- PERF_RBBM_VSC_BUSY = 10,
- PERF_RBBM_TESS_BUSY = 11,
- PERF_RBBM_UCHE_BUSY = 12,
- PERF_RBBM_HLSQ_BUSY = 13,
-};
-
-enum a5xx_pc_perfcounter_select {
- PERF_PC_BUSY_CYCLES = 0,
- PERF_PC_WORKING_CYCLES = 1,
- PERF_PC_STALL_CYCLES_VFD = 2,
- PERF_PC_STALL_CYCLES_TSE = 3,
- PERF_PC_STALL_CYCLES_VPC = 4,
- PERF_PC_STALL_CYCLES_UCHE = 5,
- PERF_PC_STALL_CYCLES_TESS = 6,
- PERF_PC_STALL_CYCLES_TSE_ONLY = 7,
- PERF_PC_STALL_CYCLES_VPC_ONLY = 8,
- PERF_PC_PASS1_TF_STALL_CYCLES = 9,
- PERF_PC_STARVE_CYCLES_FOR_INDEX = 10,
- PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR = 11,
- PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM = 12,
- PERF_PC_STARVE_CYCLES_FOR_POSITION = 13,
- PERF_PC_STARVE_CYCLES_DI = 14,
- PERF_PC_VIS_STREAMS_LOADED = 15,
- PERF_PC_INSTANCES = 16,
- PERF_PC_VPC_PRIMITIVES = 17,
- PERF_PC_DEAD_PRIM = 18,
- PERF_PC_LIVE_PRIM = 19,
- PERF_PC_VERTEX_HITS = 20,
- PERF_PC_IA_VERTICES = 21,
- PERF_PC_IA_PRIMITIVES = 22,
- PERF_PC_GS_PRIMITIVES = 23,
- PERF_PC_HS_INVOCATIONS = 24,
- PERF_PC_DS_INVOCATIONS = 25,
- PERF_PC_VS_INVOCATIONS = 26,
- PERF_PC_GS_INVOCATIONS = 27,
- PERF_PC_DS_PRIMITIVES = 28,
- PERF_PC_VPC_POS_DATA_TRANSACTION = 29,
- PERF_PC_3D_DRAWCALLS = 30,
- PERF_PC_2D_DRAWCALLS = 31,
- PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS = 32,
- PERF_TESS_BUSY_CYCLES = 33,
- PERF_TESS_WORKING_CYCLES = 34,
- PERF_TESS_STALL_CYCLES_PC = 35,
- PERF_TESS_STARVE_CYCLES_PC = 36,
-};
-
-enum a5xx_vfd_perfcounter_select {
- PERF_VFD_BUSY_CYCLES = 0,
- PERF_VFD_STALL_CYCLES_UCHE = 1,
- PERF_VFD_STALL_CYCLES_VPC_ALLOC = 2,
- PERF_VFD_STALL_CYCLES_MISS_VB = 3,
- PERF_VFD_STALL_CYCLES_MISS_Q = 4,
- PERF_VFD_STALL_CYCLES_SP_INFO = 5,
- PERF_VFD_STALL_CYCLES_SP_ATTR = 6,
- PERF_VFD_STALL_CYCLES_VFDP_VB = 7,
- PERF_VFD_STALL_CYCLES_VFDP_Q = 8,
- PERF_VFD_DECODER_PACKER_STALL = 9,
- PERF_VFD_STARVE_CYCLES_UCHE = 10,
- PERF_VFD_RBUFFER_FULL = 11,
- PERF_VFD_ATTR_INFO_FIFO_FULL = 12,
- PERF_VFD_DECODED_ATTRIBUTE_BYTES = 13,
- PERF_VFD_NUM_ATTRIBUTES = 14,
- PERF_VFD_INSTRUCTIONS = 15,
- PERF_VFD_UPPER_SHADER_FIBERS = 16,
- PERF_VFD_LOWER_SHADER_FIBERS = 17,
- PERF_VFD_MODE_0_FIBERS = 18,
- PERF_VFD_MODE_1_FIBERS = 19,
- PERF_VFD_MODE_2_FIBERS = 20,
- PERF_VFD_MODE_3_FIBERS = 21,
- PERF_VFD_MODE_4_FIBERS = 22,
- PERF_VFD_TOTAL_VERTICES = 23,
- PERF_VFD_NUM_ATTR_MISS = 24,
- PERF_VFD_1_BURST_REQ = 25,
- PERF_VFDP_STALL_CYCLES_VFD = 26,
- PERF_VFDP_STALL_CYCLES_VFD_INDEX = 27,
- PERF_VFDP_STALL_CYCLES_VFD_PROG = 28,
- PERF_VFDP_STARVE_CYCLES_PC = 29,
- PERF_VFDP_VS_STAGE_32_WAVES = 30,
-};
-
-enum a5xx_hlsq_perfcounter_select {
- PERF_HLSQ_BUSY_CYCLES = 0,
- PERF_HLSQ_STALL_CYCLES_UCHE = 1,
- PERF_HLSQ_STALL_CYCLES_SP_STATE = 2,
- PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE = 3,
- PERF_HLSQ_UCHE_LATENCY_CYCLES = 4,
- PERF_HLSQ_UCHE_LATENCY_COUNT = 5,
- PERF_HLSQ_FS_STAGE_32_WAVES = 6,
- PERF_HLSQ_FS_STAGE_64_WAVES = 7,
- PERF_HLSQ_QUADS = 8,
- PERF_HLSQ_SP_STATE_COPY_TRANS_FS_STAGE = 9,
- PERF_HLSQ_SP_STATE_COPY_TRANS_VS_STAGE = 10,
- PERF_HLSQ_TP_STATE_COPY_TRANS_FS_STAGE = 11,
- PERF_HLSQ_TP_STATE_COPY_TRANS_VS_STAGE = 12,
- PERF_HLSQ_CS_INVOCATIONS = 13,
- PERF_HLSQ_COMPUTE_DRAWCALLS = 14,
-};
-
-enum a5xx_vpc_perfcounter_select {
- PERF_VPC_BUSY_CYCLES = 0,
- PERF_VPC_WORKING_CYCLES = 1,
- PERF_VPC_STALL_CYCLES_UCHE = 2,
- PERF_VPC_STALL_CYCLES_VFD_WACK = 3,
- PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC = 4,
- PERF_VPC_STALL_CYCLES_PC = 5,
- PERF_VPC_STALL_CYCLES_SP_LM = 6,
- PERF_VPC_POS_EXPORT_STALL_CYCLES = 7,
- PERF_VPC_STARVE_CYCLES_SP = 8,
- PERF_VPC_STARVE_CYCLES_LRZ = 9,
- PERF_VPC_PC_PRIMITIVES = 10,
- PERF_VPC_SP_COMPONENTS = 11,
- PERF_VPC_SP_LM_PRIMITIVES = 12,
- PERF_VPC_SP_LM_COMPONENTS = 13,
- PERF_VPC_SP_LM_DWORDS = 14,
- PERF_VPC_STREAMOUT_COMPONENTS = 15,
- PERF_VPC_GRANT_PHASES = 16,
-};
-
-enum a5xx_tse_perfcounter_select {
- PERF_TSE_BUSY_CYCLES = 0,
- PERF_TSE_CLIPPING_CYCLES = 1,
- PERF_TSE_STALL_CYCLES_RAS = 2,
- PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE = 3,
- PERF_TSE_STALL_CYCLES_LRZ_ZPLANE = 4,
- PERF_TSE_STARVE_CYCLES_PC = 5,
- PERF_TSE_INPUT_PRIM = 6,
- PERF_TSE_INPUT_NULL_PRIM = 7,
- PERF_TSE_TRIVAL_REJ_PRIM = 8,
- PERF_TSE_CLIPPED_PRIM = 9,
- PERF_TSE_ZERO_AREA_PRIM = 10,
- PERF_TSE_FACENESS_CULLED_PRIM = 11,
- PERF_TSE_ZERO_PIXEL_PRIM = 12,
- PERF_TSE_OUTPUT_NULL_PRIM = 13,
- PERF_TSE_OUTPUT_VISIBLE_PRIM = 14,
- PERF_TSE_CINVOCATION = 15,
- PERF_TSE_CPRIMITIVES = 16,
- PERF_TSE_2D_INPUT_PRIM = 17,
- PERF_TSE_2D_ALIVE_CLCLES = 18,
-};
-
-enum a5xx_ras_perfcounter_select {
- PERF_RAS_BUSY_CYCLES = 0,
- PERF_RAS_SUPERTILE_ACTIVE_CYCLES = 1,
- PERF_RAS_STALL_CYCLES_LRZ = 2,
- PERF_RAS_STARVE_CYCLES_TSE = 3,
- PERF_RAS_SUPER_TILES = 4,
- PERF_RAS_8X4_TILES = 5,
- PERF_RAS_MASKGEN_ACTIVE = 6,
- PERF_RAS_FULLY_COVERED_SUPER_TILES = 7,
- PERF_RAS_FULLY_COVERED_8X4_TILES = 8,
- PERF_RAS_PRIM_KILLED_INVISILBE = 9,
-};
-
-enum a5xx_lrz_perfcounter_select {
- PERF_LRZ_BUSY_CYCLES = 0,
- PERF_LRZ_STARVE_CYCLES_RAS = 1,
- PERF_LRZ_STALL_CYCLES_RB = 2,
- PERF_LRZ_STALL_CYCLES_VSC = 3,
- PERF_LRZ_STALL_CYCLES_VPC = 4,
- PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH = 5,
- PERF_LRZ_STALL_CYCLES_UCHE = 6,
- PERF_LRZ_LRZ_READ = 7,
- PERF_LRZ_LRZ_WRITE = 8,
- PERF_LRZ_READ_LATENCY = 9,
- PERF_LRZ_MERGE_CACHE_UPDATING = 10,
- PERF_LRZ_PRIM_KILLED_BY_MASKGEN = 11,
- PERF_LRZ_PRIM_KILLED_BY_LRZ = 12,
- PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ = 13,
- PERF_LRZ_FULL_8X8_TILES = 14,
- PERF_LRZ_PARTIAL_8X8_TILES = 15,
- PERF_LRZ_TILE_KILLED = 16,
- PERF_LRZ_TOTAL_PIXEL = 17,
- PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ = 18,
-};
-
-enum a5xx_uche_perfcounter_select {
- PERF_UCHE_BUSY_CYCLES = 0,
- PERF_UCHE_STALL_CYCLES_VBIF = 1,
- PERF_UCHE_VBIF_LATENCY_CYCLES = 2,
- PERF_UCHE_VBIF_LATENCY_SAMPLES = 3,
- PERF_UCHE_VBIF_READ_BEATS_TP = 4,
- PERF_UCHE_VBIF_READ_BEATS_VFD = 5,
- PERF_UCHE_VBIF_READ_BEATS_HLSQ = 6,
- PERF_UCHE_VBIF_READ_BEATS_LRZ = 7,
- PERF_UCHE_VBIF_READ_BEATS_SP = 8,
- PERF_UCHE_READ_REQUESTS_TP = 9,
- PERF_UCHE_READ_REQUESTS_VFD = 10,
- PERF_UCHE_READ_REQUESTS_HLSQ = 11,
- PERF_UCHE_READ_REQUESTS_LRZ = 12,
- PERF_UCHE_READ_REQUESTS_SP = 13,
- PERF_UCHE_WRITE_REQUESTS_LRZ = 14,
- PERF_UCHE_WRITE_REQUESTS_SP = 15,
- PERF_UCHE_WRITE_REQUESTS_VPC = 16,
- PERF_UCHE_WRITE_REQUESTS_VSC = 17,
- PERF_UCHE_EVICTS = 18,
- PERF_UCHE_BANK_REQ0 = 19,
- PERF_UCHE_BANK_REQ1 = 20,
- PERF_UCHE_BANK_REQ2 = 21,
- PERF_UCHE_BANK_REQ3 = 22,
- PERF_UCHE_BANK_REQ4 = 23,
- PERF_UCHE_BANK_REQ5 = 24,
- PERF_UCHE_BANK_REQ6 = 25,
- PERF_UCHE_BANK_REQ7 = 26,
- PERF_UCHE_VBIF_READ_BEATS_CH0 = 27,
- PERF_UCHE_VBIF_READ_BEATS_CH1 = 28,
- PERF_UCHE_GMEM_READ_BEATS = 29,
- PERF_UCHE_FLAG_COUNT = 30,
-};
-
-enum a5xx_tp_perfcounter_select {
- PERF_TP_BUSY_CYCLES = 0,
- PERF_TP_STALL_CYCLES_UCHE = 1,
- PERF_TP_LATENCY_CYCLES = 2,
- PERF_TP_LATENCY_TRANS = 3,
- PERF_TP_FLAG_CACHE_REQUEST_SAMPLES = 4,
- PERF_TP_FLAG_CACHE_REQUEST_LATENCY = 5,
- PERF_TP_L1_CACHELINE_REQUESTS = 6,
- PERF_TP_L1_CACHELINE_MISSES = 7,
- PERF_TP_SP_TP_TRANS = 8,
- PERF_TP_TP_SP_TRANS = 9,
- PERF_TP_OUTPUT_PIXELS = 10,
- PERF_TP_FILTER_WORKLOAD_16BIT = 11,
- PERF_TP_FILTER_WORKLOAD_32BIT = 12,
- PERF_TP_QUADS_RECEIVED = 13,
- PERF_TP_QUADS_OFFSET = 14,
- PERF_TP_QUADS_SHADOW = 15,
- PERF_TP_QUADS_ARRAY = 16,
- PERF_TP_QUADS_GRADIENT = 17,
- PERF_TP_QUADS_1D = 18,
- PERF_TP_QUADS_2D = 19,
- PERF_TP_QUADS_BUFFER = 20,
- PERF_TP_QUADS_3D = 21,
- PERF_TP_QUADS_CUBE = 22,
- PERF_TP_STATE_CACHE_REQUESTS = 23,
- PERF_TP_STATE_CACHE_MISSES = 24,
- PERF_TP_DIVERGENT_QUADS_RECEIVED = 25,
- PERF_TP_BINDLESS_STATE_CACHE_REQUESTS = 26,
- PERF_TP_BINDLESS_STATE_CACHE_MISSES = 27,
- PERF_TP_PRT_NON_RESIDENT_EVENTS = 28,
- PERF_TP_OUTPUT_PIXELS_POINT = 29,
- PERF_TP_OUTPUT_PIXELS_BILINEAR = 30,
- PERF_TP_OUTPUT_PIXELS_MIP = 31,
- PERF_TP_OUTPUT_PIXELS_ANISO = 32,
- PERF_TP_OUTPUT_PIXELS_ZERO_LOD = 33,
- PERF_TP_FLAG_CACHE_REQUESTS = 34,
- PERF_TP_FLAG_CACHE_MISSES = 35,
- PERF_TP_L1_5_L2_REQUESTS = 36,
- PERF_TP_2D_OUTPUT_PIXELS = 37,
- PERF_TP_2D_OUTPUT_PIXELS_POINT = 38,
- PERF_TP_2D_OUTPUT_PIXELS_BILINEAR = 39,
- PERF_TP_2D_FILTER_WORKLOAD_16BIT = 40,
- PERF_TP_2D_FILTER_WORKLOAD_32BIT = 41,
-};
-
-enum a5xx_sp_perfcounter_select {
- PERF_SP_BUSY_CYCLES = 0,
- PERF_SP_ALU_WORKING_CYCLES = 1,
- PERF_SP_EFU_WORKING_CYCLES = 2,
- PERF_SP_STALL_CYCLES_VPC = 3,
- PERF_SP_STALL_CYCLES_TP = 4,
- PERF_SP_STALL_CYCLES_UCHE = 5,
- PERF_SP_STALL_CYCLES_RB = 6,
- PERF_SP_SCHEDULER_NON_WORKING = 7,
- PERF_SP_WAVE_CONTEXTS = 8,
- PERF_SP_WAVE_CONTEXT_CYCLES = 9,
- PERF_SP_FS_STAGE_WAVE_CYCLES = 10,
- PERF_SP_FS_STAGE_WAVE_SAMPLES = 11,
- PERF_SP_VS_STAGE_WAVE_CYCLES = 12,
- PERF_SP_VS_STAGE_WAVE_SAMPLES = 13,
- PERF_SP_FS_STAGE_DURATION_CYCLES = 14,
- PERF_SP_VS_STAGE_DURATION_CYCLES = 15,
- PERF_SP_WAVE_CTRL_CYCLES = 16,
- PERF_SP_WAVE_LOAD_CYCLES = 17,
- PERF_SP_WAVE_EMIT_CYCLES = 18,
- PERF_SP_WAVE_NOP_CYCLES = 19,
- PERF_SP_WAVE_WAIT_CYCLES = 20,
- PERF_SP_WAVE_FETCH_CYCLES = 21,
- PERF_SP_WAVE_IDLE_CYCLES = 22,
- PERF_SP_WAVE_END_CYCLES = 23,
- PERF_SP_WAVE_LONG_SYNC_CYCLES = 24,
- PERF_SP_WAVE_SHORT_SYNC_CYCLES = 25,
- PERF_SP_WAVE_JOIN_CYCLES = 26,
- PERF_SP_LM_LOAD_INSTRUCTIONS = 27,
- PERF_SP_LM_STORE_INSTRUCTIONS = 28,
- PERF_SP_LM_ATOMICS = 29,
- PERF_SP_GM_LOAD_INSTRUCTIONS = 30,
- PERF_SP_GM_STORE_INSTRUCTIONS = 31,
- PERF_SP_GM_ATOMICS = 32,
- PERF_SP_VS_STAGE_TEX_INSTRUCTIONS = 33,
- PERF_SP_VS_STAGE_CFLOW_INSTRUCTIONS = 34,
- PERF_SP_VS_STAGE_EFU_INSTRUCTIONS = 35,
- PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 36,
- PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 37,
- PERF_SP_FS_STAGE_TEX_INSTRUCTIONS = 38,
- PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS = 39,
- PERF_SP_FS_STAGE_EFU_INSTRUCTIONS = 40,
- PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 41,
- PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 42,
- PERF_SP_FS_STAGE_BARY_INSTRUCTIONS = 43,
- PERF_SP_VS_INSTRUCTIONS = 44,
- PERF_SP_FS_INSTRUCTIONS = 45,
- PERF_SP_ADDR_LOCK_COUNT = 46,
- PERF_SP_UCHE_READ_TRANS = 47,
- PERF_SP_UCHE_WRITE_TRANS = 48,
- PERF_SP_EXPORT_VPC_TRANS = 49,
- PERF_SP_EXPORT_RB_TRANS = 50,
- PERF_SP_PIXELS_KILLED = 51,
- PERF_SP_ICL1_REQUESTS = 52,
- PERF_SP_ICL1_MISSES = 53,
- PERF_SP_ICL0_REQUESTS = 54,
- PERF_SP_ICL0_MISSES = 55,
- PERF_SP_HS_INSTRUCTIONS = 56,
- PERF_SP_DS_INSTRUCTIONS = 57,
- PERF_SP_GS_INSTRUCTIONS = 58,
- PERF_SP_CS_INSTRUCTIONS = 59,
- PERF_SP_GPR_READ = 60,
- PERF_SP_GPR_WRITE = 61,
- PERF_SP_LM_CH0_REQUESTS = 62,
- PERF_SP_LM_CH1_REQUESTS = 63,
- PERF_SP_LM_BANK_CONFLICTS = 64,
-};
-
-enum a5xx_rb_perfcounter_select {
- PERF_RB_BUSY_CYCLES = 0,
- PERF_RB_STALL_CYCLES_CCU = 1,
- PERF_RB_STALL_CYCLES_HLSQ = 2,
- PERF_RB_STALL_CYCLES_FIFO0_FULL = 3,
- PERF_RB_STALL_CYCLES_FIFO1_FULL = 4,
- PERF_RB_STALL_CYCLES_FIFO2_FULL = 5,
- PERF_RB_STARVE_CYCLES_SP = 6,
- PERF_RB_STARVE_CYCLES_LRZ_TILE = 7,
- PERF_RB_STARVE_CYCLES_CCU = 8,
- PERF_RB_STARVE_CYCLES_Z_PLANE = 9,
- PERF_RB_STARVE_CYCLES_BARY_PLANE = 10,
- PERF_RB_Z_WORKLOAD = 11,
- PERF_RB_HLSQ_ACTIVE = 12,
- PERF_RB_Z_READ = 13,
- PERF_RB_Z_WRITE = 14,
- PERF_RB_C_READ = 15,
- PERF_RB_C_WRITE = 16,
- PERF_RB_TOTAL_PASS = 17,
- PERF_RB_Z_PASS = 18,
- PERF_RB_Z_FAIL = 19,
- PERF_RB_S_FAIL = 20,
- PERF_RB_BLENDED_FXP_COMPONENTS = 21,
- PERF_RB_BLENDED_FP16_COMPONENTS = 22,
- RB_RESERVED = 23,
- PERF_RB_2D_ALIVE_CYCLES = 24,
- PERF_RB_2D_STALL_CYCLES_A2D = 25,
- PERF_RB_2D_STARVE_CYCLES_SRC = 26,
- PERF_RB_2D_STARVE_CYCLES_SP = 27,
- PERF_RB_2D_STARVE_CYCLES_DST = 28,
- PERF_RB_2D_VALID_PIXELS = 29,
-};
-
-enum a5xx_rb_samples_perfcounter_select {
- TOTAL_SAMPLES = 0,
- ZPASS_SAMPLES = 1,
- ZFAIL_SAMPLES = 2,
- SFAIL_SAMPLES = 3,
-};
-
-enum a5xx_vsc_perfcounter_select {
- PERF_VSC_BUSY_CYCLES = 0,
- PERF_VSC_WORKING_CYCLES = 1,
- PERF_VSC_STALL_CYCLES_UCHE = 2,
- PERF_VSC_EOT_NUM = 3,
-};
-
-enum a5xx_ccu_perfcounter_select {
- PERF_CCU_BUSY_CYCLES = 0,
- PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN = 1,
- PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN = 2,
- PERF_CCU_STARVE_CYCLES_FLAG_RETURN = 3,
- PERF_CCU_DEPTH_BLOCKS = 4,
- PERF_CCU_COLOR_BLOCKS = 5,
- PERF_CCU_DEPTH_BLOCK_HIT = 6,
- PERF_CCU_COLOR_BLOCK_HIT = 7,
- PERF_CCU_PARTIAL_BLOCK_READ = 8,
- PERF_CCU_GMEM_READ = 9,
- PERF_CCU_GMEM_WRITE = 10,
- PERF_CCU_DEPTH_READ_FLAG0_COUNT = 11,
- PERF_CCU_DEPTH_READ_FLAG1_COUNT = 12,
- PERF_CCU_DEPTH_READ_FLAG2_COUNT = 13,
- PERF_CCU_DEPTH_READ_FLAG3_COUNT = 14,
- PERF_CCU_DEPTH_READ_FLAG4_COUNT = 15,
- PERF_CCU_COLOR_READ_FLAG0_COUNT = 16,
- PERF_CCU_COLOR_READ_FLAG1_COUNT = 17,
- PERF_CCU_COLOR_READ_FLAG2_COUNT = 18,
- PERF_CCU_COLOR_READ_FLAG3_COUNT = 19,
- PERF_CCU_COLOR_READ_FLAG4_COUNT = 20,
- PERF_CCU_2D_BUSY_CYCLES = 21,
- PERF_CCU_2D_RD_REQ = 22,
- PERF_CCU_2D_WR_REQ = 23,
- PERF_CCU_2D_REORDER_STARVE_CYCLES = 24,
- PERF_CCU_2D_PIXELS = 25,
-};
-
-enum a5xx_cmp_perfcounter_select {
- PERF_CMPDECMP_STALL_CYCLES_VBIF = 0,
- PERF_CMPDECMP_VBIF_LATENCY_CYCLES = 1,
- PERF_CMPDECMP_VBIF_LATENCY_SAMPLES = 2,
- PERF_CMPDECMP_VBIF_READ_DATA_CCU = 3,
- PERF_CMPDECMP_VBIF_WRITE_DATA_CCU = 4,
- PERF_CMPDECMP_VBIF_READ_REQUEST = 5,
- PERF_CMPDECMP_VBIF_WRITE_REQUEST = 6,
- PERF_CMPDECMP_VBIF_READ_DATA = 7,
- PERF_CMPDECMP_VBIF_WRITE_DATA = 8,
- PERF_CMPDECMP_FLAG_FETCH_CYCLES = 9,
- PERF_CMPDECMP_FLAG_FETCH_SAMPLES = 10,
- PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT = 11,
- PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT = 12,
- PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT = 13,
- PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT = 14,
- PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT = 15,
- PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT = 16,
- PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT = 17,
- PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT = 18,
- PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_REQ = 19,
- PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_WR = 20,
- PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_RETURN = 21,
- PERF_CMPDECMP_2D_RD_DATA = 22,
- PERF_CMPDECMP_2D_WR_DATA = 23,
-};
-
-enum a5xx_vbif_perfcounter_select {
- AXI_READ_REQUESTS_ID_0 = 0,
- AXI_READ_REQUESTS_ID_1 = 1,
- AXI_READ_REQUESTS_ID_2 = 2,
- AXI_READ_REQUESTS_ID_3 = 3,
- AXI_READ_REQUESTS_ID_4 = 4,
- AXI_READ_REQUESTS_ID_5 = 5,
- AXI_READ_REQUESTS_ID_6 = 6,
- AXI_READ_REQUESTS_ID_7 = 7,
- AXI_READ_REQUESTS_ID_8 = 8,
- AXI_READ_REQUESTS_ID_9 = 9,
- AXI_READ_REQUESTS_ID_10 = 10,
- AXI_READ_REQUESTS_ID_11 = 11,
- AXI_READ_REQUESTS_ID_12 = 12,
- AXI_READ_REQUESTS_ID_13 = 13,
- AXI_READ_REQUESTS_ID_14 = 14,
- AXI_READ_REQUESTS_ID_15 = 15,
- AXI0_READ_REQUESTS_TOTAL = 16,
- AXI1_READ_REQUESTS_TOTAL = 17,
- AXI2_READ_REQUESTS_TOTAL = 18,
- AXI3_READ_REQUESTS_TOTAL = 19,
- AXI_READ_REQUESTS_TOTAL = 20,
- AXI_WRITE_REQUESTS_ID_0 = 21,
- AXI_WRITE_REQUESTS_ID_1 = 22,
- AXI_WRITE_REQUESTS_ID_2 = 23,
- AXI_WRITE_REQUESTS_ID_3 = 24,
- AXI_WRITE_REQUESTS_ID_4 = 25,
- AXI_WRITE_REQUESTS_ID_5 = 26,
- AXI_WRITE_REQUESTS_ID_6 = 27,
- AXI_WRITE_REQUESTS_ID_7 = 28,
- AXI_WRITE_REQUESTS_ID_8 = 29,
- AXI_WRITE_REQUESTS_ID_9 = 30,
- AXI_WRITE_REQUESTS_ID_10 = 31,
- AXI_WRITE_REQUESTS_ID_11 = 32,
- AXI_WRITE_REQUESTS_ID_12 = 33,
- AXI_WRITE_REQUESTS_ID_13 = 34,
- AXI_WRITE_REQUESTS_ID_14 = 35,
- AXI_WRITE_REQUESTS_ID_15 = 36,
- AXI0_WRITE_REQUESTS_TOTAL = 37,
- AXI1_WRITE_REQUESTS_TOTAL = 38,
- AXI2_WRITE_REQUESTS_TOTAL = 39,
- AXI3_WRITE_REQUESTS_TOTAL = 40,
- AXI_WRITE_REQUESTS_TOTAL = 41,
- AXI_TOTAL_REQUESTS = 42,
- AXI_READ_DATA_BEATS_ID_0 = 43,
- AXI_READ_DATA_BEATS_ID_1 = 44,
- AXI_READ_DATA_BEATS_ID_2 = 45,
- AXI_READ_DATA_BEATS_ID_3 = 46,
- AXI_READ_DATA_BEATS_ID_4 = 47,
- AXI_READ_DATA_BEATS_ID_5 = 48,
- AXI_READ_DATA_BEATS_ID_6 = 49,
- AXI_READ_DATA_BEATS_ID_7 = 50,
- AXI_READ_DATA_BEATS_ID_8 = 51,
- AXI_READ_DATA_BEATS_ID_9 = 52,
- AXI_READ_DATA_BEATS_ID_10 = 53,
- AXI_READ_DATA_BEATS_ID_11 = 54,
- AXI_READ_DATA_BEATS_ID_12 = 55,
- AXI_READ_DATA_BEATS_ID_13 = 56,
- AXI_READ_DATA_BEATS_ID_14 = 57,
- AXI_READ_DATA_BEATS_ID_15 = 58,
- AXI0_READ_DATA_BEATS_TOTAL = 59,
- AXI1_READ_DATA_BEATS_TOTAL = 60,
- AXI2_READ_DATA_BEATS_TOTAL = 61,
- AXI3_READ_DATA_BEATS_TOTAL = 62,
- AXI_READ_DATA_BEATS_TOTAL = 63,
- AXI_WRITE_DATA_BEATS_ID_0 = 64,
- AXI_WRITE_DATA_BEATS_ID_1 = 65,
- AXI_WRITE_DATA_BEATS_ID_2 = 66,
- AXI_WRITE_DATA_BEATS_ID_3 = 67,
- AXI_WRITE_DATA_BEATS_ID_4 = 68,
- AXI_WRITE_DATA_BEATS_ID_5 = 69,
- AXI_WRITE_DATA_BEATS_ID_6 = 70,
- AXI_WRITE_DATA_BEATS_ID_7 = 71,
- AXI_WRITE_DATA_BEATS_ID_8 = 72,
- AXI_WRITE_DATA_BEATS_ID_9 = 73,
- AXI_WRITE_DATA_BEATS_ID_10 = 74,
- AXI_WRITE_DATA_BEATS_ID_11 = 75,
- AXI_WRITE_DATA_BEATS_ID_12 = 76,
- AXI_WRITE_DATA_BEATS_ID_13 = 77,
- AXI_WRITE_DATA_BEATS_ID_14 = 78,
- AXI_WRITE_DATA_BEATS_ID_15 = 79,
- AXI0_WRITE_DATA_BEATS_TOTAL = 80,
- AXI1_WRITE_DATA_BEATS_TOTAL = 81,
- AXI2_WRITE_DATA_BEATS_TOTAL = 82,
- AXI3_WRITE_DATA_BEATS_TOTAL = 83,
- AXI_WRITE_DATA_BEATS_TOTAL = 84,
- AXI_DATA_BEATS_TOTAL = 85,
-};
-
-enum a5xx_tex_filter {
- A5XX_TEX_NEAREST = 0,
- A5XX_TEX_LINEAR = 1,
- A5XX_TEX_ANISO = 2,
-};
-
-enum a5xx_tex_clamp {
- A5XX_TEX_REPEAT = 0,
- A5XX_TEX_CLAMP_TO_EDGE = 1,
- A5XX_TEX_MIRROR_REPEAT = 2,
- A5XX_TEX_CLAMP_TO_BORDER = 3,
- A5XX_TEX_MIRROR_CLAMP = 4,
-};
-
-enum a5xx_tex_aniso {
- A5XX_TEX_ANISO_1 = 0,
- A5XX_TEX_ANISO_2 = 1,
- A5XX_TEX_ANISO_4 = 2,
- A5XX_TEX_ANISO_8 = 3,
- A5XX_TEX_ANISO_16 = 4,
-};
-
-enum a5xx_tex_swiz {
- A5XX_TEX_X = 0,
- A5XX_TEX_Y = 1,
- A5XX_TEX_Z = 2,
- A5XX_TEX_W = 3,
- A5XX_TEX_ZERO = 4,
- A5XX_TEX_ONE = 5,
-};
-
-enum a5xx_tex_type {
- A5XX_TEX_1D = 0,
- A5XX_TEX_2D = 1,
- A5XX_TEX_CUBE = 2,
- A5XX_TEX_3D = 3,
- A5XX_TEX_BUFFER = 4,
-};
-
-#define A5XX_INT0_RBBM_GPU_IDLE 0x00000001
-#define A5XX_INT0_RBBM_AHB_ERROR 0x00000002
-#define A5XX_INT0_RBBM_TRANSFER_TIMEOUT 0x00000004
-#define A5XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
-#define A5XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
-#define A5XX_INT0_RBBM_ETS_MS_TIMEOUT 0x00000020
-#define A5XX_INT0_RBBM_ATB_ASYNC_OVERFLOW 0x00000040
-#define A5XX_INT0_RBBM_GPC_ERROR 0x00000080
-#define A5XX_INT0_CP_SW 0x00000100
-#define A5XX_INT0_CP_HW_ERROR 0x00000200
-#define A5XX_INT0_CP_CCU_FLUSH_DEPTH_TS 0x00000400
-#define A5XX_INT0_CP_CCU_FLUSH_COLOR_TS 0x00000800
-#define A5XX_INT0_CP_CCU_RESOLVE_TS 0x00001000
-#define A5XX_INT0_CP_IB2 0x00002000
-#define A5XX_INT0_CP_IB1 0x00004000
-#define A5XX_INT0_CP_RB 0x00008000
-#define A5XX_INT0_CP_UNUSED_1 0x00010000
-#define A5XX_INT0_CP_RB_DONE_TS 0x00020000
-#define A5XX_INT0_CP_WT_DONE_TS 0x00040000
-#define A5XX_INT0_UNKNOWN_1 0x00080000
-#define A5XX_INT0_CP_CACHE_FLUSH_TS 0x00100000
-#define A5XX_INT0_UNUSED_2 0x00200000
-#define A5XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00400000
-#define A5XX_INT0_MISC_HANG_DETECT 0x00800000
-#define A5XX_INT0_UCHE_OOB_ACCESS 0x01000000
-#define A5XX_INT0_UCHE_TRAP_INTR 0x02000000
-#define A5XX_INT0_DEBBUS_INTR_0 0x04000000
-#define A5XX_INT0_DEBBUS_INTR_1 0x08000000
-#define A5XX_INT0_GPMU_VOLTAGE_DROOP 0x10000000
-#define A5XX_INT0_GPMU_FIRMWARE 0x20000000
-#define A5XX_INT0_ISDB_CPU_IRQ 0x40000000
-#define A5XX_INT0_ISDB_UNDER_DEBUG 0x80000000
-
-#define A5XX_CP_INT_CP_OPCODE_ERROR 0x00000001
-#define A5XX_CP_INT_CP_RESERVED_BIT_ERROR 0x00000002
-#define A5XX_CP_INT_CP_HW_FAULT_ERROR 0x00000004
-#define A5XX_CP_INT_CP_DMA_ERROR 0x00000008
-#define A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR 0x00000010
-#define A5XX_CP_INT_CP_AHB_ERROR 0x00000020
-
-#define REG_A5XX_CP_RB_BASE 0x00000800
-
-#define REG_A5XX_CP_RB_BASE_HI 0x00000801
-
-#define REG_A5XX_CP_RB_CNTL 0x00000802
-
-#define REG_A5XX_CP_RB_RPTR_ADDR 0x00000804
-
-#define REG_A5XX_CP_RB_RPTR_ADDR_HI 0x00000805
-
-#define REG_A5XX_CP_RB_RPTR 0x00000806
-
-#define REG_A5XX_CP_RB_WPTR 0x00000807
-
-#define REG_A5XX_CP_PFP_STAT_ADDR 0x00000808
-
-#define REG_A5XX_CP_PFP_STAT_DATA 0x00000809
-
-#define REG_A5XX_CP_DRAW_STATE_ADDR 0x0000080b
-
-#define REG_A5XX_CP_DRAW_STATE_DATA 0x0000080c
-
-#define REG_A5XX_CP_ME_NRT_ADDR_LO 0x0000080d
-
-#define REG_A5XX_CP_ME_NRT_ADDR_HI 0x0000080e
-
-#define REG_A5XX_CP_ME_NRT_DATA 0x00000810
-
-#define REG_A5XX_CP_CRASH_SCRIPT_BASE_LO 0x00000817
-
-#define REG_A5XX_CP_CRASH_SCRIPT_BASE_HI 0x00000818
-
-#define REG_A5XX_CP_CRASH_DUMP_CNTL 0x00000819
-
-#define REG_A5XX_CP_ME_STAT_ADDR 0x0000081a
-
-#define REG_A5XX_CP_ROQ_THRESHOLDS_1 0x0000081f
-
-#define REG_A5XX_CP_ROQ_THRESHOLDS_2 0x00000820
-
-#define REG_A5XX_CP_ROQ_DBG_ADDR 0x00000821
-
-#define REG_A5XX_CP_ROQ_DBG_DATA 0x00000822
-
-#define REG_A5XX_CP_MEQ_DBG_ADDR 0x00000823
-
-#define REG_A5XX_CP_MEQ_DBG_DATA 0x00000824
-
-#define REG_A5XX_CP_MEQ_THRESHOLDS 0x00000825
-
-#define REG_A5XX_CP_MERCIU_SIZE 0x00000826
-
-#define REG_A5XX_CP_MERCIU_DBG_ADDR 0x00000827
-
-#define REG_A5XX_CP_MERCIU_DBG_DATA_1 0x00000828
-
-#define REG_A5XX_CP_MERCIU_DBG_DATA_2 0x00000829
-
-#define REG_A5XX_CP_PFP_UCODE_DBG_ADDR 0x0000082a
-
-#define REG_A5XX_CP_PFP_UCODE_DBG_DATA 0x0000082b
-
-#define REG_A5XX_CP_ME_UCODE_DBG_ADDR 0x0000082f
-
-#define REG_A5XX_CP_ME_UCODE_DBG_DATA 0x00000830
-
-#define REG_A5XX_CP_CNTL 0x00000831
-
-#define REG_A5XX_CP_PFP_ME_CNTL 0x00000832
-
-#define REG_A5XX_CP_CHICKEN_DBG 0x00000833
-
-#define REG_A5XX_CP_PFP_INSTR_BASE_LO 0x00000835
-
-#define REG_A5XX_CP_PFP_INSTR_BASE_HI 0x00000836
-
-#define REG_A5XX_CP_ME_INSTR_BASE_LO 0x00000838
-
-#define REG_A5XX_CP_ME_INSTR_BASE_HI 0x00000839
-
-#define REG_A5XX_CP_CONTEXT_SWITCH_CNTL 0x0000083b
-
-#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO 0x0000083c
-
-#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI 0x0000083d
-
-#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO 0x0000083e
-
-#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_HI 0x0000083f
-
-#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO 0x00000840
-
-#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI 0x00000841
-
-#define REG_A5XX_CP_ADDR_MODE_CNTL 0x00000860
-
-#define REG_A5XX_CP_ME_STAT_DATA 0x00000b14
-
-#define REG_A5XX_CP_WFI_PEND_CTR 0x00000b15
-
-#define REG_A5XX_CP_INTERRUPT_STATUS 0x00000b18
-
-#define REG_A5XX_CP_HW_FAULT 0x00000b1a
-
-#define REG_A5XX_CP_PROTECT_STATUS 0x00000b1c
-
-#define REG_A5XX_CP_IB1_BASE 0x00000b1f
-
-#define REG_A5XX_CP_IB1_BASE_HI 0x00000b20
-
-#define REG_A5XX_CP_IB1_BUFSZ 0x00000b21
-
-#define REG_A5XX_CP_IB2_BASE 0x00000b22
-
-#define REG_A5XX_CP_IB2_BASE_HI 0x00000b23
-
-#define REG_A5XX_CP_IB2_BUFSZ 0x00000b24
-
-#define REG_A5XX_CP_SCRATCH(i0) (0x00000b78 + 0x1*(i0))
-
-static inline uint32_t REG_A5XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000b78 + 0x1*i0; }
-
-#define REG_A5XX_CP_PROTECT(i0) (0x00000880 + 0x1*(i0))
-
-static inline uint32_t REG_A5XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000880 + 0x1*i0; }
-#define A5XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff
-#define A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
-static inline uint32_t A5XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
-{
- return ((val) << A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A5XX_CP_PROTECT_REG_BASE_ADDR__MASK;
-}
-#define A5XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000
-#define A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24
-static inline uint32_t A5XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
-{
- return ((val) << A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A5XX_CP_PROTECT_REG_MASK_LEN__MASK;
-}
-#define A5XX_CP_PROTECT_REG_TRAP_WRITE 0x20000000
-#define A5XX_CP_PROTECT_REG_TRAP_READ 0x40000000
-
-#define REG_A5XX_CP_PROTECT_CNTL 0x000008a0
-
-#define REG_A5XX_CP_AHB_FAULT 0x00000b1b
-
-#define REG_A5XX_CP_PERFCTR_CP_SEL_0 0x00000bb0
-
-#define REG_A5XX_CP_PERFCTR_CP_SEL_1 0x00000bb1
-
-#define REG_A5XX_CP_PERFCTR_CP_SEL_2 0x00000bb2
-
-#define REG_A5XX_CP_PERFCTR_CP_SEL_3 0x00000bb3
-
-#define REG_A5XX_CP_PERFCTR_CP_SEL_4 0x00000bb4
-
-#define REG_A5XX_CP_PERFCTR_CP_SEL_5 0x00000bb5
-
-#define REG_A5XX_CP_PERFCTR_CP_SEL_6 0x00000bb6
-
-#define REG_A5XX_CP_PERFCTR_CP_SEL_7 0x00000bb7
-
-#define REG_A5XX_VSC_ADDR_MODE_CNTL 0x00000bc1
-
-#define REG_A5XX_CP_POWERCTR_CP_SEL_0 0x00000bba
-
-#define REG_A5XX_CP_POWERCTR_CP_SEL_1 0x00000bbb
-
-#define REG_A5XX_CP_POWERCTR_CP_SEL_2 0x00000bbc
-
-#define REG_A5XX_CP_POWERCTR_CP_SEL_3 0x00000bbd
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_A 0x00000004
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_B 0x00000005
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_C 0x00000006
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_D 0x00000007
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLT 0x00000008
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLM 0x00000009
-
-#define REG_A5XX_RBBM_CFG_DEBBUS_CTLTM_ENABLE_SHIFT 0x00000018
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_OPL 0x0000000a
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_OPE 0x0000000b
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_0 0x0000000c
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_1 0x0000000d
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_2 0x0000000e
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_3 0x0000000f
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_0 0x00000010
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_1 0x00000011
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_2 0x00000012
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_3 0x00000013
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_0 0x00000014
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_1 0x00000015
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_0 0x00000016
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_1 0x00000017
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_2 0x00000018
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_3 0x00000019
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_0 0x0000001a
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_1 0x0000001b
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_2 0x0000001c
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_3 0x0000001d
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_NIBBLEE 0x0000001e
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC0 0x0000001f
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC1 0x00000020
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_LOADREG 0x00000021
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_IDX 0x00000022
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_CLRC 0x00000023
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_LOADIVT 0x00000024
-
-#define REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000002f
-
-#define REG_A5XX_RBBM_INT_CLEAR_CMD 0x00000037
-
-#define REG_A5XX_RBBM_INT_0_MASK 0x00000038
-#define A5XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE 0x00000001
-#define A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR 0x00000002
-#define A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT 0x00000004
-#define A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT 0x00000008
-#define A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT 0x00000010
-#define A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT 0x00000020
-#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW 0x00000040
-#define A5XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR 0x00000080
-#define A5XX_RBBM_INT_0_MASK_CP_SW 0x00000100
-#define A5XX_RBBM_INT_0_MASK_CP_HW_ERROR 0x00000200
-#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_DEPTH_TS 0x00000400
-#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_COLOR_TS 0x00000800
-#define A5XX_RBBM_INT_0_MASK_CP_CCU_RESOLVE_TS 0x00001000
-#define A5XX_RBBM_INT_0_MASK_CP_IB2 0x00002000
-#define A5XX_RBBM_INT_0_MASK_CP_IB1 0x00004000
-#define A5XX_RBBM_INT_0_MASK_CP_RB 0x00008000
-#define A5XX_RBBM_INT_0_MASK_CP_RB_DONE_TS 0x00020000
-#define A5XX_RBBM_INT_0_MASK_CP_WT_DONE_TS 0x00040000
-#define A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS 0x00100000
-#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW 0x00400000
-#define A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT 0x00800000
-#define A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS 0x01000000
-#define A5XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR 0x02000000
-#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_0 0x04000000
-#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_1 0x08000000
-#define A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP 0x10000000
-#define A5XX_RBBM_INT_0_MASK_GPMU_FIRMWARE 0x20000000
-#define A5XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ 0x40000000
-#define A5XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG 0x80000000
-
-#define REG_A5XX_RBBM_AHB_DBG_CNTL 0x0000003f
-
-#define REG_A5XX_RBBM_EXT_VBIF_DBG_CNTL 0x00000041
-
-#define REG_A5XX_RBBM_SW_RESET_CMD 0x00000043
-
-#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045
-
-#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD2 0x00000046
-
-#define REG_A5XX_RBBM_DBG_LO_HI_GPIO 0x00000048
-
-#define REG_A5XX_RBBM_EXT_TRACE_BUS_CNTL 0x00000049
-
-#define REG_A5XX_RBBM_CLOCK_CNTL_TP0 0x0000004a
-
-#define REG_A5XX_RBBM_CLOCK_CNTL_TP1 0x0000004b
-
-#define REG_A5XX_RBBM_CLOCK_CNTL_TP2 0x0000004c
-
-#define REG_A5XX_RBBM_CLOCK_CNTL_TP3 0x0000004d
-
-#define REG_A5XX_RBBM_CLOCK_CNTL2_TP0 0x0000004e
-
-#define REG_A5XX_RBBM_CLOCK_CNTL2_TP1 0x0000004f
-
-#define REG_A5XX_RBBM_CLOCK_CNTL2_TP2 0x00000050
-
-#define REG_A5XX_RBBM_CLOCK_CNTL2_TP3 0x00000051
-
-#define REG_A5XX_RBBM_CLOCK_CNTL3_TP0 0x00000052
-
-#define REG_A5XX_RBBM_CLOCK_CNTL3_TP1 0x00000053
-
-#define REG_A5XX_RBBM_CLOCK_CNTL3_TP2 0x00000054
-
-#define REG_A5XX_RBBM_CLOCK_CNTL3_TP3 0x00000055
-
-#define REG_A5XX_RBBM_READ_AHB_THROUGH_DBG 0x00000059
-
-#define REG_A5XX_RBBM_CLOCK_CNTL_UCHE 0x0000005a
-
-#define REG_A5XX_RBBM_CLOCK_CNTL2_UCHE 0x0000005b
-
-#define REG_A5XX_RBBM_CLOCK_CNTL3_UCHE 0x0000005c
-
-#define REG_A5XX_RBBM_CLOCK_CNTL4_UCHE 0x0000005d
-
-#define REG_A5XX_RBBM_CLOCK_HYST_UCHE 0x0000005e
-
-#define REG_A5XX_RBBM_CLOCK_DELAY_UCHE 0x0000005f
-
-#define REG_A5XX_RBBM_CLOCK_MODE_GPC 0x00000060
-
-#define REG_A5XX_RBBM_CLOCK_DELAY_GPC 0x00000061
-
-#define REG_A5XX_RBBM_CLOCK_HYST_GPC 0x00000062
-
-#define REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM 0x00000063
-
-#define REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x00000064
-
-#define REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x00000065
-
-#define REG_A5XX_RBBM_CLOCK_DELAY_HLSQ 0x00000066
-
-#define REG_A5XX_RBBM_CLOCK_CNTL 0x00000067
-
-#define REG_A5XX_RBBM_CLOCK_CNTL_SP0 0x00000068
-
-#define REG_A5XX_RBBM_CLOCK_CNTL_SP1 0x00000069
-
-#define REG_A5XX_RBBM_CLOCK_CNTL_SP2 0x0000006a
-
-#define REG_A5XX_RBBM_CLOCK_CNTL_SP3 0x0000006b
-
-#define REG_A5XX_RBBM_CLOCK_CNTL2_SP0 0x0000006c
-
-#define REG_A5XX_RBBM_CLOCK_CNTL2_SP1 0x0000006d
-
-#define REG_A5XX_RBBM_CLOCK_CNTL2_SP2 0x0000006e
-
-#define REG_A5XX_RBBM_CLOCK_CNTL2_SP3 0x0000006f
-
-#define REG_A5XX_RBBM_CLOCK_HYST_SP0 0x00000070
-
-#define REG_A5XX_RBBM_CLOCK_HYST_SP1 0x00000071
-
-#define REG_A5XX_RBBM_CLOCK_HYST_SP2 0x00000072
-
-#define REG_A5XX_RBBM_CLOCK_HYST_SP3 0x00000073
-
-#define REG_A5XX_RBBM_CLOCK_DELAY_SP0 0x00000074
-
-#define REG_A5XX_RBBM_CLOCK_DELAY_SP1 0x00000075
-
-#define REG_A5XX_RBBM_CLOCK_DELAY_SP2 0x00000076
-
-#define REG_A5XX_RBBM_CLOCK_DELAY_SP3 0x00000077
-
-#define REG_A5XX_RBBM_CLOCK_CNTL_RB0 0x00000078
-
-#define REG_A5XX_RBBM_CLOCK_CNTL_RB1 0x00000079
-
-#define REG_A5XX_RBBM_CLOCK_CNTL_RB2 0x0000007a
-
-#define REG_A5XX_RBBM_CLOCK_CNTL_RB3 0x0000007b
-
-#define REG_A5XX_RBBM_CLOCK_CNTL2_RB0 0x0000007c
-
-#define REG_A5XX_RBBM_CLOCK_CNTL2_RB1 0x0000007d
-
-#define REG_A5XX_RBBM_CLOCK_CNTL2_RB2 0x0000007e
-
-#define REG_A5XX_RBBM_CLOCK_CNTL2_RB3 0x0000007f
-
-#define REG_A5XX_RBBM_CLOCK_HYST_RAC 0x00000080
-
-#define REG_A5XX_RBBM_CLOCK_DELAY_RAC 0x00000081
-
-#define REG_A5XX_RBBM_CLOCK_CNTL_CCU0 0x00000082
-
-#define REG_A5XX_RBBM_CLOCK_CNTL_CCU1 0x00000083
-
-#define REG_A5XX_RBBM_CLOCK_CNTL_CCU2 0x00000084
-
-#define REG_A5XX_RBBM_CLOCK_CNTL_CCU3 0x00000085
-
-#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0 0x00000086
-
-#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1 0x00000087
-
-#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2 0x00000088
-
-#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3 0x00000089
-
-#define REG_A5XX_RBBM_CLOCK_CNTL_RAC 0x0000008a
-
-#define REG_A5XX_RBBM_CLOCK_CNTL2_RAC 0x0000008b
-
-#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0 0x0000008c
-
-#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1 0x0000008d
-
-#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2 0x0000008e
-
-#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3 0x0000008f
-
-#define REG_A5XX_RBBM_CLOCK_HYST_VFD 0x00000090
-
-#define REG_A5XX_RBBM_CLOCK_MODE_VFD 0x00000091
-
-#define REG_A5XX_RBBM_CLOCK_DELAY_VFD 0x00000092
-
-#define REG_A5XX_RBBM_AHB_CNTL0 0x00000093
-
-#define REG_A5XX_RBBM_AHB_CNTL1 0x00000094
-
-#define REG_A5XX_RBBM_AHB_CNTL2 0x00000095
-
-#define REG_A5XX_RBBM_AHB_CMD 0x00000096
-
-#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11 0x0000009c
-
-#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12 0x0000009d
-
-#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13 0x0000009e
-
-#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14 0x0000009f
-
-#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15 0x000000a0
-
-#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16 0x000000a1
-
-#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17 0x000000a2
-
-#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18 0x000000a3
-
-#define REG_A5XX_RBBM_CLOCK_DELAY_TP0 0x000000a4
-
-#define REG_A5XX_RBBM_CLOCK_DELAY_TP1 0x000000a5
-
-#define REG_A5XX_RBBM_CLOCK_DELAY_TP2 0x000000a6
-
-#define REG_A5XX_RBBM_CLOCK_DELAY_TP3 0x000000a7
-
-#define REG_A5XX_RBBM_CLOCK_DELAY2_TP0 0x000000a8
-
-#define REG_A5XX_RBBM_CLOCK_DELAY2_TP1 0x000000a9
-
-#define REG_A5XX_RBBM_CLOCK_DELAY2_TP2 0x000000aa
-
-#define REG_A5XX_RBBM_CLOCK_DELAY2_TP3 0x000000ab
-
-#define REG_A5XX_RBBM_CLOCK_DELAY3_TP0 0x000000ac
-
-#define REG_A5XX_RBBM_CLOCK_DELAY3_TP1 0x000000ad
-
-#define REG_A5XX_RBBM_CLOCK_DELAY3_TP2 0x000000ae
-
-#define REG_A5XX_RBBM_CLOCK_DELAY3_TP3 0x000000af
-
-#define REG_A5XX_RBBM_CLOCK_HYST_TP0 0x000000b0
-
-#define REG_A5XX_RBBM_CLOCK_HYST_TP1 0x000000b1
-
-#define REG_A5XX_RBBM_CLOCK_HYST_TP2 0x000000b2
-
-#define REG_A5XX_RBBM_CLOCK_HYST_TP3 0x000000b3
-
-#define REG_A5XX_RBBM_CLOCK_HYST2_TP0 0x000000b4
-
-#define REG_A5XX_RBBM_CLOCK_HYST2_TP1 0x000000b5
-
-#define REG_A5XX_RBBM_CLOCK_HYST2_TP2 0x000000b6
-
-#define REG_A5XX_RBBM_CLOCK_HYST2_TP3 0x000000b7
-
-#define REG_A5XX_RBBM_CLOCK_HYST3_TP0 0x000000b8
-
-#define REG_A5XX_RBBM_CLOCK_HYST3_TP1 0x000000b9
-
-#define REG_A5XX_RBBM_CLOCK_HYST3_TP2 0x000000ba
-
-#define REG_A5XX_RBBM_CLOCK_HYST3_TP3 0x000000bb
-
-#define REG_A5XX_RBBM_CLOCK_CNTL_GPMU 0x000000c8
-
-#define REG_A5XX_RBBM_CLOCK_DELAY_GPMU 0x000000c9
-
-#define REG_A5XX_RBBM_CLOCK_HYST_GPMU 0x000000ca
-
-#define REG_A5XX_RBBM_PERFCTR_CP_0_LO 0x000003a0
-
-#define REG_A5XX_RBBM_PERFCTR_CP_0_HI 0x000003a1
-
-#define REG_A5XX_RBBM_PERFCTR_CP_1_LO 0x000003a2
-
-#define REG_A5XX_RBBM_PERFCTR_CP_1_HI 0x000003a3
-
-#define REG_A5XX_RBBM_PERFCTR_CP_2_LO 0x000003a4
-
-#define REG_A5XX_RBBM_PERFCTR_CP_2_HI 0x000003a5
-
-#define REG_A5XX_RBBM_PERFCTR_CP_3_LO 0x000003a6
-
-#define REG_A5XX_RBBM_PERFCTR_CP_3_HI 0x000003a7
-
-#define REG_A5XX_RBBM_PERFCTR_CP_4_LO 0x000003a8
-
-#define REG_A5XX_RBBM_PERFCTR_CP_4_HI 0x000003a9
-
-#define REG_A5XX_RBBM_PERFCTR_CP_5_LO 0x000003aa
-
-#define REG_A5XX_RBBM_PERFCTR_CP_5_HI 0x000003ab
-
-#define REG_A5XX_RBBM_PERFCTR_CP_6_LO 0x000003ac
-
-#define REG_A5XX_RBBM_PERFCTR_CP_6_HI 0x000003ad
-
-#define REG_A5XX_RBBM_PERFCTR_CP_7_LO 0x000003ae
-
-#define REG_A5XX_RBBM_PERFCTR_CP_7_HI 0x000003af
-
-#define REG_A5XX_RBBM_PERFCTR_RBBM_0_LO 0x000003b0
-
-#define REG_A5XX_RBBM_PERFCTR_RBBM_0_HI 0x000003b1
-
-#define REG_A5XX_RBBM_PERFCTR_RBBM_1_LO 0x000003b2
-
-#define REG_A5XX_RBBM_PERFCTR_RBBM_1_HI 0x000003b3
-
-#define REG_A5XX_RBBM_PERFCTR_RBBM_2_LO 0x000003b4
-
-#define REG_A5XX_RBBM_PERFCTR_RBBM_2_HI 0x000003b5
-
-#define REG_A5XX_RBBM_PERFCTR_RBBM_3_LO 0x000003b6
-
-#define REG_A5XX_RBBM_PERFCTR_RBBM_3_HI 0x000003b7
-
-#define REG_A5XX_RBBM_PERFCTR_PC_0_LO 0x000003b8
-
-#define REG_A5XX_RBBM_PERFCTR_PC_0_HI 0x000003b9
-
-#define REG_A5XX_RBBM_PERFCTR_PC_1_LO 0x000003ba
-
-#define REG_A5XX_RBBM_PERFCTR_PC_1_HI 0x000003bb
-
-#define REG_A5XX_RBBM_PERFCTR_PC_2_LO 0x000003bc
-
-#define REG_A5XX_RBBM_PERFCTR_PC_2_HI 0x000003bd
-
-#define REG_A5XX_RBBM_PERFCTR_PC_3_LO 0x000003be
-
-#define REG_A5XX_RBBM_PERFCTR_PC_3_HI 0x000003bf
-
-#define REG_A5XX_RBBM_PERFCTR_PC_4_LO 0x000003c0
-
-#define REG_A5XX_RBBM_PERFCTR_PC_4_HI 0x000003c1
-
-#define REG_A5XX_RBBM_PERFCTR_PC_5_LO 0x000003c2
-
-#define REG_A5XX_RBBM_PERFCTR_PC_5_HI 0x000003c3
-
-#define REG_A5XX_RBBM_PERFCTR_PC_6_LO 0x000003c4
-
-#define REG_A5XX_RBBM_PERFCTR_PC_6_HI 0x000003c5
-
-#define REG_A5XX_RBBM_PERFCTR_PC_7_LO 0x000003c6
-
-#define REG_A5XX_RBBM_PERFCTR_PC_7_HI 0x000003c7
-
-#define REG_A5XX_RBBM_PERFCTR_VFD_0_LO 0x000003c8
-
-#define REG_A5XX_RBBM_PERFCTR_VFD_0_HI 0x000003c9
-
-#define REG_A5XX_RBBM_PERFCTR_VFD_1_LO 0x000003ca
-
-#define REG_A5XX_RBBM_PERFCTR_VFD_1_HI 0x000003cb
-
-#define REG_A5XX_RBBM_PERFCTR_VFD_2_LO 0x000003cc
-
-#define REG_A5XX_RBBM_PERFCTR_VFD_2_HI 0x000003cd
-
-#define REG_A5XX_RBBM_PERFCTR_VFD_3_LO 0x000003ce
-
-#define REG_A5XX_RBBM_PERFCTR_VFD_3_HI 0x000003cf
-
-#define REG_A5XX_RBBM_PERFCTR_VFD_4_LO 0x000003d0
-
-#define REG_A5XX_RBBM_PERFCTR_VFD_4_HI 0x000003d1
-
-#define REG_A5XX_RBBM_PERFCTR_VFD_5_LO 0x000003d2
-
-#define REG_A5XX_RBBM_PERFCTR_VFD_5_HI 0x000003d3
-
-#define REG_A5XX_RBBM_PERFCTR_VFD_6_LO 0x000003d4
-
-#define REG_A5XX_RBBM_PERFCTR_VFD_6_HI 0x000003d5
-
-#define REG_A5XX_RBBM_PERFCTR_VFD_7_LO 0x000003d6
-
-#define REG_A5XX_RBBM_PERFCTR_VFD_7_HI 0x000003d7
-
-#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_LO 0x000003d8
-
-#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_HI 0x000003d9
-
-#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_LO 0x000003da
-
-#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_HI 0x000003db
-
-#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_LO 0x000003dc
-
-#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_HI 0x000003dd
-
-#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_LO 0x000003de
-
-#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_HI 0x000003df
-
-#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_LO 0x000003e0
-
-#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_HI 0x000003e1
-
-#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_LO 0x000003e2
-
-#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_HI 0x000003e3
-
-#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_LO 0x000003e4
-
-#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_HI 0x000003e5
-
-#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_LO 0x000003e6
-
-#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_HI 0x000003e7
-
-#define REG_A5XX_RBBM_PERFCTR_VPC_0_LO 0x000003e8
-
-#define REG_A5XX_RBBM_PERFCTR_VPC_0_HI 0x000003e9
-
-#define REG_A5XX_RBBM_PERFCTR_VPC_1_LO 0x000003ea
-
-#define REG_A5XX_RBBM_PERFCTR_VPC_1_HI 0x000003eb
-
-#define REG_A5XX_RBBM_PERFCTR_VPC_2_LO 0x000003ec
-
-#define REG_A5XX_RBBM_PERFCTR_VPC_2_HI 0x000003ed
-
-#define REG_A5XX_RBBM_PERFCTR_VPC_3_LO 0x000003ee
-
-#define REG_A5XX_RBBM_PERFCTR_VPC_3_HI 0x000003ef
-
-#define REG_A5XX_RBBM_PERFCTR_CCU_0_LO 0x000003f0
-
-#define REG_A5XX_RBBM_PERFCTR_CCU_0_HI 0x000003f1
-
-#define REG_A5XX_RBBM_PERFCTR_CCU_1_LO 0x000003f2
-
-#define REG_A5XX_RBBM_PERFCTR_CCU_1_HI 0x000003f3
-
-#define REG_A5XX_RBBM_PERFCTR_CCU_2_LO 0x000003f4
-
-#define REG_A5XX_RBBM_PERFCTR_CCU_2_HI 0x000003f5
-
-#define REG_A5XX_RBBM_PERFCTR_CCU_3_LO 0x000003f6
-
-#define REG_A5XX_RBBM_PERFCTR_CCU_3_HI 0x000003f7
-
-#define REG_A5XX_RBBM_PERFCTR_TSE_0_LO 0x000003f8
-
-#define REG_A5XX_RBBM_PERFCTR_TSE_0_HI 0x000003f9
-
-#define REG_A5XX_RBBM_PERFCTR_TSE_1_LO 0x000003fa
-
-#define REG_A5XX_RBBM_PERFCTR_TSE_1_HI 0x000003fb
-
-#define REG_A5XX_RBBM_PERFCTR_TSE_2_LO 0x000003fc
-
-#define REG_A5XX_RBBM_PERFCTR_TSE_2_HI 0x000003fd
-
-#define REG_A5XX_RBBM_PERFCTR_TSE_3_LO 0x000003fe
-
-#define REG_A5XX_RBBM_PERFCTR_TSE_3_HI 0x000003ff
-
-#define REG_A5XX_RBBM_PERFCTR_RAS_0_LO 0x00000400
-
-#define REG_A5XX_RBBM_PERFCTR_RAS_0_HI 0x00000401
-
-#define REG_A5XX_RBBM_PERFCTR_RAS_1_LO 0x00000402
-
-#define REG_A5XX_RBBM_PERFCTR_RAS_1_HI 0x00000403
-
-#define REG_A5XX_RBBM_PERFCTR_RAS_2_LO 0x00000404
-
-#define REG_A5XX_RBBM_PERFCTR_RAS_2_HI 0x00000405
-
-#define REG_A5XX_RBBM_PERFCTR_RAS_3_LO 0x00000406
-
-#define REG_A5XX_RBBM_PERFCTR_RAS_3_HI 0x00000407
-
-#define REG_A5XX_RBBM_PERFCTR_UCHE_0_LO 0x00000408
-
-#define REG_A5XX_RBBM_PERFCTR_UCHE_0_HI 0x00000409
-
-#define REG_A5XX_RBBM_PERFCTR_UCHE_1_LO 0x0000040a
-
-#define REG_A5XX_RBBM_PERFCTR_UCHE_1_HI 0x0000040b
-
-#define REG_A5XX_RBBM_PERFCTR_UCHE_2_LO 0x0000040c
-
-#define REG_A5XX_RBBM_PERFCTR_UCHE_2_HI 0x0000040d
-
-#define REG_A5XX_RBBM_PERFCTR_UCHE_3_LO 0x0000040e
-
-#define REG_A5XX_RBBM_PERFCTR_UCHE_3_HI 0x0000040f
-
-#define REG_A5XX_RBBM_PERFCTR_UCHE_4_LO 0x00000410
-
-#define REG_A5XX_RBBM_PERFCTR_UCHE_4_HI 0x00000411
-
-#define REG_A5XX_RBBM_PERFCTR_UCHE_5_LO 0x00000412
-
-#define REG_A5XX_RBBM_PERFCTR_UCHE_5_HI 0x00000413
-
-#define REG_A5XX_RBBM_PERFCTR_UCHE_6_LO 0x00000414
-
-#define REG_A5XX_RBBM_PERFCTR_UCHE_6_HI 0x00000415
-
-#define REG_A5XX_RBBM_PERFCTR_UCHE_7_LO 0x00000416
-
-#define REG_A5XX_RBBM_PERFCTR_UCHE_7_HI 0x00000417
-
-#define REG_A5XX_RBBM_PERFCTR_TP_0_LO 0x00000418
-
-#define REG_A5XX_RBBM_PERFCTR_TP_0_HI 0x00000419
-
-#define REG_A5XX_RBBM_PERFCTR_TP_1_LO 0x0000041a
-
-#define REG_A5XX_RBBM_PERFCTR_TP_1_HI 0x0000041b
-
-#define REG_A5XX_RBBM_PERFCTR_TP_2_LO 0x0000041c
-
-#define REG_A5XX_RBBM_PERFCTR_TP_2_HI 0x0000041d
-
-#define REG_A5XX_RBBM_PERFCTR_TP_3_LO 0x0000041e
-
-#define REG_A5XX_RBBM_PERFCTR_TP_3_HI 0x0000041f
-
-#define REG_A5XX_RBBM_PERFCTR_TP_4_LO 0x00000420
-
-#define REG_A5XX_RBBM_PERFCTR_TP_4_HI 0x00000421
-
-#define REG_A5XX_RBBM_PERFCTR_TP_5_LO 0x00000422
-
-#define REG_A5XX_RBBM_PERFCTR_TP_5_HI 0x00000423
-
-#define REG_A5XX_RBBM_PERFCTR_TP_6_LO 0x00000424
-
-#define REG_A5XX_RBBM_PERFCTR_TP_6_HI 0x00000425
-
-#define REG_A5XX_RBBM_PERFCTR_TP_7_LO 0x00000426
-
-#define REG_A5XX_RBBM_PERFCTR_TP_7_HI 0x00000427
-
-#define REG_A5XX_RBBM_PERFCTR_SP_0_LO 0x00000428
-
-#define REG_A5XX_RBBM_PERFCTR_SP_0_HI 0x00000429
-
-#define REG_A5XX_RBBM_PERFCTR_SP_1_LO 0x0000042a
-
-#define REG_A5XX_RBBM_PERFCTR_SP_1_HI 0x0000042b
-
-#define REG_A5XX_RBBM_PERFCTR_SP_2_LO 0x0000042c
-
-#define REG_A5XX_RBBM_PERFCTR_SP_2_HI 0x0000042d
-
-#define REG_A5XX_RBBM_PERFCTR_SP_3_LO 0x0000042e
-
-#define REG_A5XX_RBBM_PERFCTR_SP_3_HI 0x0000042f
-
-#define REG_A5XX_RBBM_PERFCTR_SP_4_LO 0x00000430
-
-#define REG_A5XX_RBBM_PERFCTR_SP_4_HI 0x00000431
-
-#define REG_A5XX_RBBM_PERFCTR_SP_5_LO 0x00000432
-
-#define REG_A5XX_RBBM_PERFCTR_SP_5_HI 0x00000433
-
-#define REG_A5XX_RBBM_PERFCTR_SP_6_LO 0x00000434
-
-#define REG_A5XX_RBBM_PERFCTR_SP_6_HI 0x00000435
-
-#define REG_A5XX_RBBM_PERFCTR_SP_7_LO 0x00000436
-
-#define REG_A5XX_RBBM_PERFCTR_SP_7_HI 0x00000437
-
-#define REG_A5XX_RBBM_PERFCTR_SP_8_LO 0x00000438
-
-#define REG_A5XX_RBBM_PERFCTR_SP_8_HI 0x00000439
-
-#define REG_A5XX_RBBM_PERFCTR_SP_9_LO 0x0000043a
-
-#define REG_A5XX_RBBM_PERFCTR_SP_9_HI 0x0000043b
-
-#define REG_A5XX_RBBM_PERFCTR_SP_10_LO 0x0000043c
-
-#define REG_A5XX_RBBM_PERFCTR_SP_10_HI 0x0000043d
-
-#define REG_A5XX_RBBM_PERFCTR_SP_11_LO 0x0000043e
-
-#define REG_A5XX_RBBM_PERFCTR_SP_11_HI 0x0000043f
-
-#define REG_A5XX_RBBM_PERFCTR_RB_0_LO 0x00000440
-
-#define REG_A5XX_RBBM_PERFCTR_RB_0_HI 0x00000441
-
-#define REG_A5XX_RBBM_PERFCTR_RB_1_LO 0x00000442
-
-#define REG_A5XX_RBBM_PERFCTR_RB_1_HI 0x00000443
-
-#define REG_A5XX_RBBM_PERFCTR_RB_2_LO 0x00000444
-
-#define REG_A5XX_RBBM_PERFCTR_RB_2_HI 0x00000445
-
-#define REG_A5XX_RBBM_PERFCTR_RB_3_LO 0x00000446
-
-#define REG_A5XX_RBBM_PERFCTR_RB_3_HI 0x00000447
-
-#define REG_A5XX_RBBM_PERFCTR_RB_4_LO 0x00000448
-
-#define REG_A5XX_RBBM_PERFCTR_RB_4_HI 0x00000449
-
-#define REG_A5XX_RBBM_PERFCTR_RB_5_LO 0x0000044a
-
-#define REG_A5XX_RBBM_PERFCTR_RB_5_HI 0x0000044b
-
-#define REG_A5XX_RBBM_PERFCTR_RB_6_LO 0x0000044c
-
-#define REG_A5XX_RBBM_PERFCTR_RB_6_HI 0x0000044d
-
-#define REG_A5XX_RBBM_PERFCTR_RB_7_LO 0x0000044e
-
-#define REG_A5XX_RBBM_PERFCTR_RB_7_HI 0x0000044f
-
-#define REG_A5XX_RBBM_PERFCTR_VSC_0_LO 0x00000450
-
-#define REG_A5XX_RBBM_PERFCTR_VSC_0_HI 0x00000451
-
-#define REG_A5XX_RBBM_PERFCTR_VSC_1_LO 0x00000452
-
-#define REG_A5XX_RBBM_PERFCTR_VSC_1_HI 0x00000453
-
-#define REG_A5XX_RBBM_PERFCTR_LRZ_0_LO 0x00000454
-
-#define REG_A5XX_RBBM_PERFCTR_LRZ_0_HI 0x00000455
-
-#define REG_A5XX_RBBM_PERFCTR_LRZ_1_LO 0x00000456
-
-#define REG_A5XX_RBBM_PERFCTR_LRZ_1_HI 0x00000457
-
-#define REG_A5XX_RBBM_PERFCTR_LRZ_2_LO 0x00000458
-
-#define REG_A5XX_RBBM_PERFCTR_LRZ_2_HI 0x00000459
-
-#define REG_A5XX_RBBM_PERFCTR_LRZ_3_LO 0x0000045a
-
-#define REG_A5XX_RBBM_PERFCTR_LRZ_3_HI 0x0000045b
-
-#define REG_A5XX_RBBM_PERFCTR_CMP_0_LO 0x0000045c
-
-#define REG_A5XX_RBBM_PERFCTR_CMP_0_HI 0x0000045d
-
-#define REG_A5XX_RBBM_PERFCTR_CMP_1_LO 0x0000045e
-
-#define REG_A5XX_RBBM_PERFCTR_CMP_1_HI 0x0000045f
-
-#define REG_A5XX_RBBM_PERFCTR_CMP_2_LO 0x00000460
-
-#define REG_A5XX_RBBM_PERFCTR_CMP_2_HI 0x00000461
-
-#define REG_A5XX_RBBM_PERFCTR_CMP_3_LO 0x00000462
-
-#define REG_A5XX_RBBM_PERFCTR_CMP_3_HI 0x00000463
-
-#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0 0x0000046b
-
-#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1 0x0000046c
-
-#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2 0x0000046d
-
-#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000046e
-
-#define REG_A5XX_RBBM_ALWAYSON_COUNTER_LO 0x000004d2
-
-#define REG_A5XX_RBBM_ALWAYSON_COUNTER_HI 0x000004d3
-
-#define REG_A5XX_RBBM_STATUS 0x000004f5
-#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB 0x80000000
-#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP 0x40000000
-#define A5XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
-#define A5XX_RBBM_STATUS_VSC_BUSY 0x10000000
-#define A5XX_RBBM_STATUS_TPL1_BUSY 0x08000000
-#define A5XX_RBBM_STATUS_SP_BUSY 0x04000000
-#define A5XX_RBBM_STATUS_UCHE_BUSY 0x02000000
-#define A5XX_RBBM_STATUS_VPC_BUSY 0x01000000
-#define A5XX_RBBM_STATUS_VFDP_BUSY 0x00800000
-#define A5XX_RBBM_STATUS_VFD_BUSY 0x00400000
-#define A5XX_RBBM_STATUS_TESS_BUSY 0x00200000
-#define A5XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
-#define A5XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
-#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY 0x00040000
-#define A5XX_RBBM_STATUS_DCOM_BUSY 0x00020000
-#define A5XX_RBBM_STATUS_COM_BUSY 0x00010000
-#define A5XX_RBBM_STATUS_LRZ_BUZY 0x00008000
-#define A5XX_RBBM_STATUS_A2D_DSP_BUSY 0x00004000
-#define A5XX_RBBM_STATUS_CCUFCHE_BUSY 0x00002000
-#define A5XX_RBBM_STATUS_RB_BUSY 0x00001000
-#define A5XX_RBBM_STATUS_RAS_BUSY 0x00000800
-#define A5XX_RBBM_STATUS_TSE_BUSY 0x00000400
-#define A5XX_RBBM_STATUS_VBIF_BUSY 0x00000200
-#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST 0x00000100
-#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST 0x00000080
-#define A5XX_RBBM_STATUS_CP_BUSY 0x00000040
-#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY 0x00000020
-#define A5XX_RBBM_STATUS_CP_CRASH_BUSY 0x00000010
-#define A5XX_RBBM_STATUS_CP_ETS_BUSY 0x00000008
-#define A5XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
-#define A5XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
-#define A5XX_RBBM_STATUS_HI_BUSY 0x00000001
-
-#define REG_A5XX_RBBM_STATUS3 0x00000530
-#define A5XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT 0x01000000
-
-#define REG_A5XX_RBBM_INT_0_STATUS 0x000004e1
-
-#define REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS 0x000004f0
-
-#define REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS 0x000004f1
-
-#define REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS 0x000004f3
-
-#define REG_A5XX_RBBM_AHB_ERROR_STATUS 0x000004f4
-
-#define REG_A5XX_RBBM_PERFCTR_CNTL 0x00000464
-
-#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD0 0x00000465
-
-#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD1 0x00000466
-
-#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD2 0x00000467
-
-#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD3 0x00000468
-
-#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000469
-
-#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x0000046a
-
-#define REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x0000046f
-
-#define REG_A5XX_RBBM_AHB_ERROR 0x000004ed
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_EVENT_LOGIC 0x00000504
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_OVER 0x00000505
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT0 0x00000506
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT1 0x00000507
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT2 0x00000508
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT3 0x00000509
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT4 0x0000050a
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT5 0x0000050b
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_ADDR 0x0000050c
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF0 0x0000050d
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF1 0x0000050e
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF2 0x0000050f
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF3 0x00000510
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF4 0x00000511
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_MISR0 0x00000512
-
-#define REG_A5XX_RBBM_CFG_DBGBUS_MISR1 0x00000513
-
-#define REG_A5XX_RBBM_ISDB_CNT 0x00000533
-
-#define REG_A5XX_RBBM_SECVID_TRUST_CONFIG 0x0000f000
-
-#define REG_A5XX_RBBM_SECVID_TRUST_CNTL 0x0000f400
-
-#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO 0x0000f800
-
-#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI 0x0000f801
-
-#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE 0x0000f802
-
-#define REG_A5XX_RBBM_SECVID_TSB_CNTL 0x0000f803
-
-#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_LO 0x0000f804
-
-#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_HI 0x0000f805
-
-#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_LO 0x0000f806
-
-#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_HI 0x0000f807
-
-#define REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0x0000f810
-
-#define REG_A5XX_VSC_BIN_SIZE 0x00000bc2
-#define A5XX_VSC_BIN_SIZE_WIDTH__MASK 0x000000ff
-#define A5XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
-static inline uint32_t A5XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A5XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A5XX_VSC_BIN_SIZE_WIDTH__MASK;
-}
-#define A5XX_VSC_BIN_SIZE_HEIGHT__MASK 0x0001fe00
-#define A5XX_VSC_BIN_SIZE_HEIGHT__SHIFT 9
-static inline uint32_t A5XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A5XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A5XX_VSC_BIN_SIZE_HEIGHT__MASK;
-}
-
-#define REG_A5XX_VSC_SIZE_ADDRESS_LO 0x00000bc3
-
-#define REG_A5XX_VSC_SIZE_ADDRESS_HI 0x00000bc4
-
-#define REG_A5XX_UNKNOWN_0BC5 0x00000bc5
-
-#define REG_A5XX_UNKNOWN_0BC6 0x00000bc6
-
-#define REG_A5XX_VSC_PIPE_CONFIG(i0) (0x00000bd0 + 0x1*(i0))
-
-static inline uint32_t REG_A5XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000bd0 + 0x1*i0; }
-#define A5XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff
-#define A5XX_VSC_PIPE_CONFIG_REG_X__SHIFT 0
-static inline uint32_t A5XX_VSC_PIPE_CONFIG_REG_X(uint32_t val)
-{
- return ((val) << A5XX_VSC_PIPE_CONFIG_REG_X__SHIFT) & A5XX_VSC_PIPE_CONFIG_REG_X__MASK;
-}
-#define A5XX_VSC_PIPE_CONFIG_REG_Y__MASK 0x000ffc00
-#define A5XX_VSC_PIPE_CONFIG_REG_Y__SHIFT 10
-static inline uint32_t A5XX_VSC_PIPE_CONFIG_REG_Y(uint32_t val)
-{
- return ((val) << A5XX_VSC_PIPE_CONFIG_REG_Y__SHIFT) & A5XX_VSC_PIPE_CONFIG_REG_Y__MASK;
-}
-#define A5XX_VSC_PIPE_CONFIG_REG_W__MASK 0x00f00000
-#define A5XX_VSC_PIPE_CONFIG_REG_W__SHIFT 20
-static inline uint32_t A5XX_VSC_PIPE_CONFIG_REG_W(uint32_t val)
-{
- return ((val) << A5XX_VSC_PIPE_CONFIG_REG_W__SHIFT) & A5XX_VSC_PIPE_CONFIG_REG_W__MASK;
-}
-#define A5XX_VSC_PIPE_CONFIG_REG_H__MASK 0x0f000000
-#define A5XX_VSC_PIPE_CONFIG_REG_H__SHIFT 24
-static inline uint32_t A5XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
-{
- return ((val) << A5XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A5XX_VSC_PIPE_CONFIG_REG_H__MASK;
-}
-
-#define REG_A5XX_VSC_PIPE_DATA_ADDRESS(i0) (0x00000be0 + 0x2*(i0))
-
-static inline uint32_t REG_A5XX_VSC_PIPE_DATA_ADDRESS_LO(uint32_t i0) { return 0x00000be0 + 0x2*i0; }
-
-static inline uint32_t REG_A5XX_VSC_PIPE_DATA_ADDRESS_HI(uint32_t i0) { return 0x00000be1 + 0x2*i0; }
-
-#define REG_A5XX_VSC_PIPE_DATA_LENGTH(i0) (0x00000c00 + 0x1*(i0))
-
-static inline uint32_t REG_A5XX_VSC_PIPE_DATA_LENGTH_REG(uint32_t i0) { return 0x00000c00 + 0x1*i0; }
-
-#define REG_A5XX_VSC_PERFCTR_VSC_SEL_0 0x00000c60
-
-#define REG_A5XX_VSC_PERFCTR_VSC_SEL_1 0x00000c61
-
-#define REG_A5XX_VSC_RESOLVE_CNTL 0x00000cdd
-#define A5XX_VSC_RESOLVE_CNTL_WINDOW_OFFSET_DISABLE 0x80000000
-#define A5XX_VSC_RESOLVE_CNTL_X__MASK 0x00007fff
-#define A5XX_VSC_RESOLVE_CNTL_X__SHIFT 0
-static inline uint32_t A5XX_VSC_RESOLVE_CNTL_X(uint32_t val)
-{
- return ((val) << A5XX_VSC_RESOLVE_CNTL_X__SHIFT) & A5XX_VSC_RESOLVE_CNTL_X__MASK;
-}
-#define A5XX_VSC_RESOLVE_CNTL_Y__MASK 0x7fff0000
-#define A5XX_VSC_RESOLVE_CNTL_Y__SHIFT 16
-static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
-{
- return ((val) << A5XX_VSC_RESOLVE_CNTL_Y__SHIFT) & A5XX_VSC_RESOLVE_CNTL_Y__MASK;
-}
-
-#define REG_A5XX_GRAS_ADDR_MODE_CNTL 0x00000c81
-
-#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c90
-
-#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_1 0x00000c91
-
-#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_2 0x00000c92
-
-#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c93
-
-#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_0 0x00000c94
-
-#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_1 0x00000c95
-
-#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_2 0x00000c96
-
-#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_3 0x00000c97
-
-#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_0 0x00000c98
-
-#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_1 0x00000c99
-
-#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_2 0x00000c9a
-
-#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_3 0x00000c9b
-
-#define REG_A5XX_RB_DBG_ECO_CNTL 0x00000cc4
-
-#define REG_A5XX_RB_ADDR_MODE_CNTL 0x00000cc5
-
-#define REG_A5XX_RB_MODE_CNTL 0x00000cc6
-
-#define REG_A5XX_RB_CCU_CNTL 0x00000cc7
-
-#define REG_A5XX_RB_PERFCTR_RB_SEL_0 0x00000cd0
-
-#define REG_A5XX_RB_PERFCTR_RB_SEL_1 0x00000cd1
-
-#define REG_A5XX_RB_PERFCTR_RB_SEL_2 0x00000cd2
-
-#define REG_A5XX_RB_PERFCTR_RB_SEL_3 0x00000cd3
-
-#define REG_A5XX_RB_PERFCTR_RB_SEL_4 0x00000cd4
-
-#define REG_A5XX_RB_PERFCTR_RB_SEL_5 0x00000cd5
-
-#define REG_A5XX_RB_PERFCTR_RB_SEL_6 0x00000cd6
-
-#define REG_A5XX_RB_PERFCTR_RB_SEL_7 0x00000cd7
-
-#define REG_A5XX_RB_PERFCTR_CCU_SEL_0 0x00000cd8
-
-#define REG_A5XX_RB_PERFCTR_CCU_SEL_1 0x00000cd9
-
-#define REG_A5XX_RB_PERFCTR_CCU_SEL_2 0x00000cda
-
-#define REG_A5XX_RB_PERFCTR_CCU_SEL_3 0x00000cdb
-
-#define REG_A5XX_RB_POWERCTR_RB_SEL_0 0x00000ce0
-
-#define REG_A5XX_RB_POWERCTR_RB_SEL_1 0x00000ce1
-
-#define REG_A5XX_RB_POWERCTR_RB_SEL_2 0x00000ce2
-
-#define REG_A5XX_RB_POWERCTR_RB_SEL_3 0x00000ce3
-
-#define REG_A5XX_RB_POWERCTR_CCU_SEL_0 0x00000ce4
-
-#define REG_A5XX_RB_POWERCTR_CCU_SEL_1 0x00000ce5
-
-#define REG_A5XX_RB_PERFCTR_CMP_SEL_0 0x00000cec
-
-#define REG_A5XX_RB_PERFCTR_CMP_SEL_1 0x00000ced
-
-#define REG_A5XX_RB_PERFCTR_CMP_SEL_2 0x00000cee
-
-#define REG_A5XX_RB_PERFCTR_CMP_SEL_3 0x00000cef
-
-#define REG_A5XX_PC_DBG_ECO_CNTL 0x00000d00
-#define A5XX_PC_DBG_ECO_CNTL_TWOPASSUSEWFI 0x00000100
-
-#define REG_A5XX_PC_ADDR_MODE_CNTL 0x00000d01
-
-#define REG_A5XX_PC_MODE_CNTL 0x00000d02
-
-#define REG_A5XX_PC_INDEX_BUF_LO 0x00000d04
-
-#define REG_A5XX_PC_INDEX_BUF_HI 0x00000d05
-
-#define REG_A5XX_PC_START_INDEX 0x00000d06
-
-#define REG_A5XX_PC_MAX_INDEX 0x00000d07
-
-#define REG_A5XX_PC_TESSFACTOR_ADDR_LO 0x00000d08
-
-#define REG_A5XX_PC_TESSFACTOR_ADDR_HI 0x00000d09
-
-#define REG_A5XX_PC_PERFCTR_PC_SEL_0 0x00000d10
-
-#define REG_A5XX_PC_PERFCTR_PC_SEL_1 0x00000d11
-
-#define REG_A5XX_PC_PERFCTR_PC_SEL_2 0x00000d12
-
-#define REG_A5XX_PC_PERFCTR_PC_SEL_3 0x00000d13
-
-#define REG_A5XX_PC_PERFCTR_PC_SEL_4 0x00000d14
-
-#define REG_A5XX_PC_PERFCTR_PC_SEL_5 0x00000d15
-
-#define REG_A5XX_PC_PERFCTR_PC_SEL_6 0x00000d16
-
-#define REG_A5XX_PC_PERFCTR_PC_SEL_7 0x00000d17
-
-#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_0 0x00000e00
-
-#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_1 0x00000e01
-
-#define REG_A5XX_HLSQ_DBG_ECO_CNTL 0x00000e04
-
-#define REG_A5XX_HLSQ_ADDR_MODE_CNTL 0x00000e05
-
-#define REG_A5XX_HLSQ_MODE_CNTL 0x00000e06
-
-#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x00000e10
-
-#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x00000e11
-
-#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x00000e12
-
-#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x00000e13
-
-#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x00000e14
-
-#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x00000e15
-
-#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_6 0x00000e16
-
-#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_7 0x00000e17
-
-#define REG_A5XX_HLSQ_SPTP_RDSEL 0x00000f08
-
-#define REG_A5XX_HLSQ_DBG_READ_SEL 0x0000bc00
-
-#define REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE 0x0000a000
-
-#define REG_A5XX_VFD_ADDR_MODE_CNTL 0x00000e41
-
-#define REG_A5XX_VFD_MODE_CNTL 0x00000e42
-
-#define REG_A5XX_VFD_PERFCTR_VFD_SEL_0 0x00000e50
-
-#define REG_A5XX_VFD_PERFCTR_VFD_SEL_1 0x00000e51
-
-#define REG_A5XX_VFD_PERFCTR_VFD_SEL_2 0x00000e52
-
-#define REG_A5XX_VFD_PERFCTR_VFD_SEL_3 0x00000e53
-
-#define REG_A5XX_VFD_PERFCTR_VFD_SEL_4 0x00000e54
-
-#define REG_A5XX_VFD_PERFCTR_VFD_SEL_5 0x00000e55
-
-#define REG_A5XX_VFD_PERFCTR_VFD_SEL_6 0x00000e56
-
-#define REG_A5XX_VFD_PERFCTR_VFD_SEL_7 0x00000e57
-
-#define REG_A5XX_VPC_DBG_ECO_CNTL 0x00000e60
-#define A5XX_VPC_DBG_ECO_CNTL_ALLFLATOPTDIS 0x00000400
-
-#define REG_A5XX_VPC_ADDR_MODE_CNTL 0x00000e61
-
-#define REG_A5XX_VPC_MODE_CNTL 0x00000e62
-#define A5XX_VPC_MODE_CNTL_BINNING_PASS 0x00000001
-
-#define REG_A5XX_VPC_PERFCTR_VPC_SEL_0 0x00000e64
-
-#define REG_A5XX_VPC_PERFCTR_VPC_SEL_1 0x00000e65
-
-#define REG_A5XX_VPC_PERFCTR_VPC_SEL_2 0x00000e66
-
-#define REG_A5XX_VPC_PERFCTR_VPC_SEL_3 0x00000e67
-
-#define REG_A5XX_UCHE_ADDR_MODE_CNTL 0x00000e80
-
-#define REG_A5XX_UCHE_MODE_CNTL 0x00000e81
-
-#define REG_A5XX_UCHE_SVM_CNTL 0x00000e82
-
-#define REG_A5XX_UCHE_WRITE_THRU_BASE_LO 0x00000e87
-
-#define REG_A5XX_UCHE_WRITE_THRU_BASE_HI 0x00000e88
-
-#define REG_A5XX_UCHE_TRAP_BASE_LO 0x00000e89
-
-#define REG_A5XX_UCHE_TRAP_BASE_HI 0x00000e8a
-
-#define REG_A5XX_UCHE_GMEM_RANGE_MIN_LO 0x00000e8b
-
-#define REG_A5XX_UCHE_GMEM_RANGE_MIN_HI 0x00000e8c
-
-#define REG_A5XX_UCHE_GMEM_RANGE_MAX_LO 0x00000e8d
-
-#define REG_A5XX_UCHE_GMEM_RANGE_MAX_HI 0x00000e8e
-
-#define REG_A5XX_UCHE_DBG_ECO_CNTL_2 0x00000e8f
-
-#define REG_A5XX_UCHE_DBG_ECO_CNTL 0x00000e90
-
-#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_LO 0x00000e91
-
-#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_HI 0x00000e92
-
-#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_LO 0x00000e93
-
-#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_HI 0x00000e94
-
-#define REG_A5XX_UCHE_CACHE_INVALIDATE 0x00000e95
-
-#define REG_A5XX_UCHE_CACHE_WAYS 0x00000e96
-
-#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000ea0
-
-#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000ea1
-
-#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000ea2
-
-#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000ea3
-
-#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000ea4
-
-#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000ea5
-
-#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000ea6
-
-#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000ea7
-
-#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_0 0x00000ea8
-
-#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_1 0x00000ea9
-
-#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_2 0x00000eaa
-
-#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_3 0x00000eab
-
-#define REG_A5XX_UCHE_TRAP_LOG_LO 0x00000eb1
-
-#define REG_A5XX_UCHE_TRAP_LOG_HI 0x00000eb2
-
-#define REG_A5XX_SP_DBG_ECO_CNTL 0x00000ec0
-
-#define REG_A5XX_SP_ADDR_MODE_CNTL 0x00000ec1
-
-#define REG_A5XX_SP_MODE_CNTL 0x00000ec2
-
-#define REG_A5XX_SP_PERFCTR_SP_SEL_0 0x00000ed0
-
-#define REG_A5XX_SP_PERFCTR_SP_SEL_1 0x00000ed1
-
-#define REG_A5XX_SP_PERFCTR_SP_SEL_2 0x00000ed2
-
-#define REG_A5XX_SP_PERFCTR_SP_SEL_3 0x00000ed3
-
-#define REG_A5XX_SP_PERFCTR_SP_SEL_4 0x00000ed4
-
-#define REG_A5XX_SP_PERFCTR_SP_SEL_5 0x00000ed5
-
-#define REG_A5XX_SP_PERFCTR_SP_SEL_6 0x00000ed6
-
-#define REG_A5XX_SP_PERFCTR_SP_SEL_7 0x00000ed7
-
-#define REG_A5XX_SP_PERFCTR_SP_SEL_8 0x00000ed8
-
-#define REG_A5XX_SP_PERFCTR_SP_SEL_9 0x00000ed9
-
-#define REG_A5XX_SP_PERFCTR_SP_SEL_10 0x00000eda
-
-#define REG_A5XX_SP_PERFCTR_SP_SEL_11 0x00000edb
-
-#define REG_A5XX_SP_POWERCTR_SP_SEL_0 0x00000edc
-
-#define REG_A5XX_SP_POWERCTR_SP_SEL_1 0x00000edd
-
-#define REG_A5XX_SP_POWERCTR_SP_SEL_2 0x00000ede
-
-#define REG_A5XX_SP_POWERCTR_SP_SEL_3 0x00000edf
-
-#define REG_A5XX_TPL1_ADDR_MODE_CNTL 0x00000f01
-
-#define REG_A5XX_TPL1_MODE_CNTL 0x00000f02
-
-#define REG_A5XX_TPL1_PERFCTR_TP_SEL_0 0x00000f10
-
-#define REG_A5XX_TPL1_PERFCTR_TP_SEL_1 0x00000f11
-
-#define REG_A5XX_TPL1_PERFCTR_TP_SEL_2 0x00000f12
-
-#define REG_A5XX_TPL1_PERFCTR_TP_SEL_3 0x00000f13
-
-#define REG_A5XX_TPL1_PERFCTR_TP_SEL_4 0x00000f14
-
-#define REG_A5XX_TPL1_PERFCTR_TP_SEL_5 0x00000f15
-
-#define REG_A5XX_TPL1_PERFCTR_TP_SEL_6 0x00000f16
-
-#define REG_A5XX_TPL1_PERFCTR_TP_SEL_7 0x00000f17
-
-#define REG_A5XX_TPL1_POWERCTR_TP_SEL_0 0x00000f18
-
-#define REG_A5XX_TPL1_POWERCTR_TP_SEL_1 0x00000f19
-
-#define REG_A5XX_TPL1_POWERCTR_TP_SEL_2 0x00000f1a
-
-#define REG_A5XX_TPL1_POWERCTR_TP_SEL_3 0x00000f1b
-
-#define REG_A5XX_VBIF_VERSION 0x00003000
-
-#define REG_A5XX_VBIF_CLKON 0x00003001
-
-#define REG_A5XX_VBIF_ABIT_SORT 0x00003028
-
-#define REG_A5XX_VBIF_ABIT_SORT_CONF 0x00003029
-
-#define REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
-
-#define REG_A5XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
-
-#define REG_A5XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
-
-#define REG_A5XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
-
-#define REG_A5XX_VBIF_XIN_HALT_CTRL0 0x00003080
-
-#define REG_A5XX_VBIF_XIN_HALT_CTRL1 0x00003081
-
-#define REG_A5XX_VBIF_TEST_BUS_OUT_CTRL 0x00003084
-
-#define REG_A5XX_VBIF_TEST_BUS1_CTRL0 0x00003085
-
-#define REG_A5XX_VBIF_TEST_BUS1_CTRL1 0x00003086
-
-#define REG_A5XX_VBIF_TEST_BUS2_CTRL0 0x00003087
-
-#define REG_A5XX_VBIF_TEST_BUS2_CTRL1 0x00003088
-
-#define REG_A5XX_VBIF_TEST_BUS_OUT 0x0000308c
-
-#define REG_A5XX_VBIF_PERF_CNT_EN0 0x000030c0
-
-#define REG_A5XX_VBIF_PERF_CNT_EN1 0x000030c1
-
-#define REG_A5XX_VBIF_PERF_CNT_EN2 0x000030c2
-
-#define REG_A5XX_VBIF_PERF_CNT_EN3 0x000030c3
-
-#define REG_A5XX_VBIF_PERF_CNT_CLR0 0x000030c8
-
-#define REG_A5XX_VBIF_PERF_CNT_CLR1 0x000030c9
-
-#define REG_A5XX_VBIF_PERF_CNT_CLR2 0x000030ca
-
-#define REG_A5XX_VBIF_PERF_CNT_CLR3 0x000030cb
-
-#define REG_A5XX_VBIF_PERF_CNT_SEL0 0x000030d0
-
-#define REG_A5XX_VBIF_PERF_CNT_SEL1 0x000030d1
-
-#define REG_A5XX_VBIF_PERF_CNT_SEL2 0x000030d2
-
-#define REG_A5XX_VBIF_PERF_CNT_SEL3 0x000030d3
-
-#define REG_A5XX_VBIF_PERF_CNT_LOW0 0x000030d8
-
-#define REG_A5XX_VBIF_PERF_CNT_LOW1 0x000030d9
-
-#define REG_A5XX_VBIF_PERF_CNT_LOW2 0x000030da
-
-#define REG_A5XX_VBIF_PERF_CNT_LOW3 0x000030db
-
-#define REG_A5XX_VBIF_PERF_CNT_HIGH0 0x000030e0
-
-#define REG_A5XX_VBIF_PERF_CNT_HIGH1 0x000030e1
-
-#define REG_A5XX_VBIF_PERF_CNT_HIGH2 0x000030e2
-
-#define REG_A5XX_VBIF_PERF_CNT_HIGH3 0x000030e3
-
-#define REG_A5XX_VBIF_PERF_PWR_CNT_EN0 0x00003100
-
-#define REG_A5XX_VBIF_PERF_PWR_CNT_EN1 0x00003101
-
-#define REG_A5XX_VBIF_PERF_PWR_CNT_EN2 0x00003102
-
-#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW0 0x00003110
-
-#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW1 0x00003111
-
-#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW2 0x00003112
-
-#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH0 0x00003118
-
-#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH1 0x00003119
-
-#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a
-
-#define REG_A5XX_GPMU_INST_RAM_BASE 0x00008800
-
-#define REG_A5XX_GPMU_DATA_RAM_BASE 0x00009800
-
-#define REG_A5XX_SP_POWER_COUNTER_0_LO 0x0000a840
-
-#define REG_A5XX_SP_POWER_COUNTER_0_HI 0x0000a841
-
-#define REG_A5XX_SP_POWER_COUNTER_1_LO 0x0000a842
-
-#define REG_A5XX_SP_POWER_COUNTER_1_HI 0x0000a843
-
-#define REG_A5XX_SP_POWER_COUNTER_2_LO 0x0000a844
-
-#define REG_A5XX_SP_POWER_COUNTER_2_HI 0x0000a845
-
-#define REG_A5XX_SP_POWER_COUNTER_3_LO 0x0000a846
-
-#define REG_A5XX_SP_POWER_COUNTER_3_HI 0x0000a847
-
-#define REG_A5XX_TP_POWER_COUNTER_0_LO 0x0000a848
-
-#define REG_A5XX_TP_POWER_COUNTER_0_HI 0x0000a849
-
-#define REG_A5XX_TP_POWER_COUNTER_1_LO 0x0000a84a
-
-#define REG_A5XX_TP_POWER_COUNTER_1_HI 0x0000a84b
-
-#define REG_A5XX_TP_POWER_COUNTER_2_LO 0x0000a84c
-
-#define REG_A5XX_TP_POWER_COUNTER_2_HI 0x0000a84d
-
-#define REG_A5XX_TP_POWER_COUNTER_3_LO 0x0000a84e
-
-#define REG_A5XX_TP_POWER_COUNTER_3_HI 0x0000a84f
-
-#define REG_A5XX_RB_POWER_COUNTER_0_LO 0x0000a850
-
-#define REG_A5XX_RB_POWER_COUNTER_0_HI 0x0000a851
-
-#define REG_A5XX_RB_POWER_COUNTER_1_LO 0x0000a852
-
-#define REG_A5XX_RB_POWER_COUNTER_1_HI 0x0000a853
-
-#define REG_A5XX_RB_POWER_COUNTER_2_LO 0x0000a854
-
-#define REG_A5XX_RB_POWER_COUNTER_2_HI 0x0000a855
-
-#define REG_A5XX_RB_POWER_COUNTER_3_LO 0x0000a856
-
-#define REG_A5XX_RB_POWER_COUNTER_3_HI 0x0000a857
-
-#define REG_A5XX_CCU_POWER_COUNTER_0_LO 0x0000a858
-
-#define REG_A5XX_CCU_POWER_COUNTER_0_HI 0x0000a859
-
-#define REG_A5XX_CCU_POWER_COUNTER_1_LO 0x0000a85a
-
-#define REG_A5XX_CCU_POWER_COUNTER_1_HI 0x0000a85b
-
-#define REG_A5XX_UCHE_POWER_COUNTER_0_LO 0x0000a85c
-
-#define REG_A5XX_UCHE_POWER_COUNTER_0_HI 0x0000a85d
-
-#define REG_A5XX_UCHE_POWER_COUNTER_1_LO 0x0000a85e
-
-#define REG_A5XX_UCHE_POWER_COUNTER_1_HI 0x0000a85f
-
-#define REG_A5XX_UCHE_POWER_COUNTER_2_LO 0x0000a860
-
-#define REG_A5XX_UCHE_POWER_COUNTER_2_HI 0x0000a861
-
-#define REG_A5XX_UCHE_POWER_COUNTER_3_LO 0x0000a862
-
-#define REG_A5XX_UCHE_POWER_COUNTER_3_HI 0x0000a863
-
-#define REG_A5XX_CP_POWER_COUNTER_0_LO 0x0000a864
-
-#define REG_A5XX_CP_POWER_COUNTER_0_HI 0x0000a865
-
-#define REG_A5XX_CP_POWER_COUNTER_1_LO 0x0000a866
-
-#define REG_A5XX_CP_POWER_COUNTER_1_HI 0x0000a867
-
-#define REG_A5XX_CP_POWER_COUNTER_2_LO 0x0000a868
-
-#define REG_A5XX_CP_POWER_COUNTER_2_HI 0x0000a869
-
-#define REG_A5XX_CP_POWER_COUNTER_3_LO 0x0000a86a
-
-#define REG_A5XX_CP_POWER_COUNTER_3_HI 0x0000a86b
-
-#define REG_A5XX_GPMU_POWER_COUNTER_0_LO 0x0000a86c
-
-#define REG_A5XX_GPMU_POWER_COUNTER_0_HI 0x0000a86d
-
-#define REG_A5XX_GPMU_POWER_COUNTER_1_LO 0x0000a86e
-
-#define REG_A5XX_GPMU_POWER_COUNTER_1_HI 0x0000a86f
-
-#define REG_A5XX_GPMU_POWER_COUNTER_2_LO 0x0000a870
-
-#define REG_A5XX_GPMU_POWER_COUNTER_2_HI 0x0000a871
-
-#define REG_A5XX_GPMU_POWER_COUNTER_3_LO 0x0000a872
-
-#define REG_A5XX_GPMU_POWER_COUNTER_3_HI 0x0000a873
-
-#define REG_A5XX_GPMU_POWER_COUNTER_4_LO 0x0000a874
-
-#define REG_A5XX_GPMU_POWER_COUNTER_4_HI 0x0000a875
-
-#define REG_A5XX_GPMU_POWER_COUNTER_5_LO 0x0000a876
-
-#define REG_A5XX_GPMU_POWER_COUNTER_5_HI 0x0000a877
-
-#define REG_A5XX_GPMU_POWER_COUNTER_ENABLE 0x0000a878
-
-#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_LO 0x0000a879
-
-#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_HI 0x0000a87a
-
-#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_RESET 0x0000a87b
-
-#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_0 0x0000a87c
-
-#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_1 0x0000a87d
-
-#define REG_A5XX_GPMU_GPMU_SP_CLOCK_CONTROL 0x0000a880
-
-#define REG_A5XX_GPMU_SP_POWER_CNTL 0x0000a881
-
-#define REG_A5XX_GPMU_RBCCU_CLOCK_CNTL 0x0000a886
-
-#define REG_A5XX_GPMU_RBCCU_POWER_CNTL 0x0000a887
-
-#define REG_A5XX_GPMU_SP_PWR_CLK_STATUS 0x0000a88b
-#define A5XX_GPMU_SP_PWR_CLK_STATUS_PWR_ON 0x00100000
-
-#define REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS 0x0000a88d
-#define A5XX_GPMU_RBCCU_PWR_CLK_STATUS_PWR_ON 0x00100000
-
-#define REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY 0x0000a891
-
-#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL 0x0000a892
-
-#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST 0x0000a893
-
-#define REG_A5XX_GPMU_PWR_COL_BINNING_CTRL 0x0000a894
-
-#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL 0x0000a8a3
-
-#define REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL 0x0000a8a8
-
-#define REG_A5XX_GPMU_WFI_CONFIG 0x0000a8c1
-
-#define REG_A5XX_GPMU_RBBM_INTR_INFO 0x0000a8d6
-
-#define REG_A5XX_GPMU_CM3_SYSRESET 0x0000a8d8
-
-#define REG_A5XX_GPMU_GENERAL_0 0x0000a8e0
-
-#define REG_A5XX_GPMU_GENERAL_1 0x0000a8e1
-
-#define REG_A5XX_GPMU_TEMP_SENSOR_ID 0x0000ac00
-
-#define REG_A5XX_GPMU_TEMP_SENSOR_CONFIG 0x0000ac01
-
-#define REG_A5XX_GPMU_TEMP_VAL 0x0000ac02
-
-#define REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD 0x0000ac03
-
-#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_STATUS 0x0000ac05
-
-#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK 0x0000ac06
-
-#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_0_1 0x0000ac40
-
-#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_2_3 0x0000ac41
-
-#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_0_1 0x0000ac42
-
-#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_2_3 0x0000ac43
-
-#define REG_A5XX_GPMU_BASE_LEAKAGE 0x0000ac46
-
-#define REG_A5XX_GPMU_GPMU_VOLTAGE 0x0000ac60
-
-#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_STATUS 0x0000ac61
-
-#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK 0x0000ac62
-
-#define REG_A5XX_GPMU_GPMU_PWR_THRESHOLD 0x0000ac80
-
-#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL 0x0000acc4
-
-#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS 0x0000acc5
-
-#define REG_A5XX_GDPM_CONFIG1 0x0000b80c
-
-#define REG_A5XX_GDPM_CONFIG2 0x0000b80d
-
-#define REG_A5XX_GDPM_INT_EN 0x0000b80f
-
-#define REG_A5XX_GDPM_INT_MASK 0x0000b811
-
-#define REG_A5XX_GPMU_BEC_ENABLE 0x0000b9a0
-
-#define REG_A5XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000c41a
-
-#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_0 0x0000c41d
-
-#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_2 0x0000c41f
-
-#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_4 0x0000c421
-
-#define REG_A5XX_GPU_CS_ENABLE_REG 0x0000c520
-
-#define REG_A5XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x0000c557
-
-#define REG_A5XX_GRAS_CL_CNTL 0x0000e000
-#define A5XX_GRAS_CL_CNTL_ZERO_GB_SCALE_Z 0x00000040
-
-#define REG_A5XX_GRAS_VS_CL_CNTL 0x0000e001
-#define A5XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK 0x000000ff
-#define A5XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT 0
-static inline uint32_t A5XX_GRAS_VS_CL_CNTL_CLIP_MASK(uint32_t val)
-{
- return ((val) << A5XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT) & A5XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK;
-}
-#define A5XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK 0x0000ff00
-#define A5XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT 8
-static inline uint32_t A5XX_GRAS_VS_CL_CNTL_CULL_MASK(uint32_t val)
-{
- return ((val) << A5XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT) & A5XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK;
-}
-
-#define REG_A5XX_UNKNOWN_E004 0x0000e004
-
-#define REG_A5XX_GRAS_CNTL 0x0000e005
-#define A5XX_GRAS_CNTL_IJ_PERSP_PIXEL 0x00000001
-#define A5XX_GRAS_CNTL_IJ_PERSP_CENTROID 0x00000002
-#define A5XX_GRAS_CNTL_IJ_PERSP_SAMPLE 0x00000004
-#define A5XX_GRAS_CNTL_IJ_LINEAR_PIXEL 0x00000008
-#define A5XX_GRAS_CNTL_IJ_LINEAR_CENTROID 0x00000010
-#define A5XX_GRAS_CNTL_IJ_LINEAR_SAMPLE 0x00000020
-#define A5XX_GRAS_CNTL_COORD_MASK__MASK 0x000003c0
-#define A5XX_GRAS_CNTL_COORD_MASK__SHIFT 6
-static inline uint32_t A5XX_GRAS_CNTL_COORD_MASK(uint32_t val)
-{
- return ((val) << A5XX_GRAS_CNTL_COORD_MASK__SHIFT) & A5XX_GRAS_CNTL_COORD_MASK__MASK;
-}
-
-#define REG_A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x0000e006
-#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000003ff
-#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT 0
-static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val)
-{
- return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK;
-}
-#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK 0x000ffc00
-#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT 10
-static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val)
-{
- return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK;
-}
-
-#define REG_A5XX_GRAS_CL_VPORT_XOFFSET_0 0x0000e010
-#define A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff
-#define A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0
-static inline uint32_t A5XX_GRAS_CL_VPORT_XOFFSET_0(float val)
-{
- return ((fui(val)) << A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
-}
-
-#define REG_A5XX_GRAS_CL_VPORT_XSCALE_0 0x0000e011
-#define A5XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff
-#define A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0
-static inline uint32_t A5XX_GRAS_CL_VPORT_XSCALE_0(float val)
-{
- return ((fui(val)) << A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_XSCALE_0__MASK;
-}
-
-#define REG_A5XX_GRAS_CL_VPORT_YOFFSET_0 0x0000e012
-#define A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff
-#define A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0
-static inline uint32_t A5XX_GRAS_CL_VPORT_YOFFSET_0(float val)
-{
- return ((fui(val)) << A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
-}
-
-#define REG_A5XX_GRAS_CL_VPORT_YSCALE_0 0x0000e013
-#define A5XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff
-#define A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0
-static inline uint32_t A5XX_GRAS_CL_VPORT_YSCALE_0(float val)
-{
- return ((fui(val)) << A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_YSCALE_0__MASK;
-}
-
-#define REG_A5XX_GRAS_CL_VPORT_ZOFFSET_0 0x0000e014
-#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff
-#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0
-static inline uint32_t A5XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
-{
- return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
-}
-
-#define REG_A5XX_GRAS_CL_VPORT_ZSCALE_0 0x0000e015
-#define A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff
-#define A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0
-static inline uint32_t A5XX_GRAS_CL_VPORT_ZSCALE_0(float val)
-{
- return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
-}
-
-#define REG_A5XX_GRAS_SU_CNTL 0x0000e090
-#define A5XX_GRAS_SU_CNTL_CULL_FRONT 0x00000001
-#define A5XX_GRAS_SU_CNTL_CULL_BACK 0x00000002
-#define A5XX_GRAS_SU_CNTL_FRONT_CW 0x00000004
-#define A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK 0x000007f8
-#define A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT 3
-static inline uint32_t A5XX_GRAS_SU_CNTL_LINEHALFWIDTH(float val)
-{
- return ((((int32_t)(val * 4.0))) << A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT) & A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
-}
-#define A5XX_GRAS_SU_CNTL_POLY_OFFSET 0x00000800
-#define A5XX_GRAS_SU_CNTL_LINE_MODE__MASK 0x00002000
-#define A5XX_GRAS_SU_CNTL_LINE_MODE__SHIFT 13
-static inline uint32_t A5XX_GRAS_SU_CNTL_LINE_MODE(enum a5xx_line_mode val)
-{
- return ((val) << A5XX_GRAS_SU_CNTL_LINE_MODE__SHIFT) & A5XX_GRAS_SU_CNTL_LINE_MODE__MASK;
-}
-
-#define REG_A5XX_GRAS_SU_POINT_MINMAX 0x0000e091
-#define A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
-#define A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
-static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MIN(float val)
-{
- return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
-}
-#define A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
-#define A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
-static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MAX(float val)
-{
- return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
-}
-
-#define REG_A5XX_GRAS_SU_POINT_SIZE 0x0000e092
-#define A5XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
-#define A5XX_GRAS_SU_POINT_SIZE__SHIFT 0
-static inline uint32_t A5XX_GRAS_SU_POINT_SIZE(float val)
-{
- return ((((int32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_SIZE__SHIFT) & A5XX_GRAS_SU_POINT_SIZE__MASK;
-}
-
-#define REG_A5XX_GRAS_SU_LAYERED 0x0000e093
-
-#define REG_A5XX_GRAS_SU_DEPTH_PLANE_CNTL 0x0000e094
-#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
-#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_UNK1 0x00000002
-
-#define REG_A5XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000e095
-#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
-#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
-static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
-{
- return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
-}
-
-#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000e096
-#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
-#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
-static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
-{
- return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
-}
-
-#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP 0x0000e097
-#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK 0xffffffff
-#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT 0
-static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(float val)
-{
- return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK;
-}
-
-#define REG_A5XX_GRAS_SU_DEPTH_BUFFER_INFO 0x0000e098
-#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
-#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
-static inline uint32_t A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val)
-{
- return ((val) << A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
-}
-
-#define REG_A5XX_GRAS_SU_CONSERVATIVE_RAS_CNTL 0x0000e099
-
-#define REG_A5XX_GRAS_SC_CNTL 0x0000e0a0
-#define A5XX_GRAS_SC_CNTL_BINNING_PASS 0x00000001
-#define A5XX_GRAS_SC_CNTL_SAMPLES_PASSED 0x00008000
-
-#define REG_A5XX_GRAS_SC_BIN_CNTL 0x0000e0a1
-
-#define REG_A5XX_GRAS_SC_RAS_MSAA_CNTL 0x0000e0a2
-#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
-#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
-static inline uint32_t A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
-{
- return ((val) << A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK;
-}
-
-#define REG_A5XX_GRAS_SC_DEST_MSAA_CNTL 0x0000e0a3
-#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
-#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
-static inline uint32_t A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
-{
- return ((val) << A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK;
-}
-#define A5XX_GRAS_SC_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
-
-#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_CNTL 0x0000e0a4
-
-#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0 0x0000e0aa
-#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
-#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK 0x00007fff
-#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT 0
-static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(uint32_t val)
-{
- return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK;
-}
-#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK 0x7fff0000
-#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT 16
-static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(uint32_t val)
-{
- return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK;
-}
-
-#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0 0x0000e0ab
-#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
-#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK 0x00007fff
-#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT 0
-static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X(uint32_t val)
-{
- return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK;
-}
-#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK 0x7fff0000
-#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT 16
-static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y(uint32_t val)
-{
- return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK;
-}
-
-#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0 0x0000e0ca
-#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
-#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK 0x00007fff
-#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT 0
-static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(uint32_t val)
-{
- return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK;
-}
-#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK 0x7fff0000
-#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT 16
-static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(uint32_t val)
-{
- return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK;
-}
-
-#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0 0x0000e0cb
-#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
-#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK 0x00007fff
-#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT 0
-static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X(uint32_t val)
-{
- return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK;
-}
-#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK 0x7fff0000
-#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT 16
-static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y(uint32_t val)
-{
- return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK;
-}
-
-#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_TL 0x0000e0ea
-#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
-#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
-#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
-static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
-{
- return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
-}
-#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
-#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
-static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
-{
- return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
-}
-
-#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000e0eb
-#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
-#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
-#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
-static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
-{
- return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
-}
-#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
-#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
-static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
-{
- return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
-}
-
-#define REG_A5XX_GRAS_LRZ_CNTL 0x0000e100
-#define A5XX_GRAS_LRZ_CNTL_ENABLE 0x00000001
-#define A5XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002
-#define A5XX_GRAS_LRZ_CNTL_GREATER 0x00000004
-
-#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_LO 0x0000e101
-
-#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_HI 0x0000e102
-
-#define REG_A5XX_GRAS_LRZ_BUFFER_PITCH 0x0000e103
-#define A5XX_GRAS_LRZ_BUFFER_PITCH__MASK 0xffffffff
-#define A5XX_GRAS_LRZ_BUFFER_PITCH__SHIFT 0
-static inline uint32_t A5XX_GRAS_LRZ_BUFFER_PITCH(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A5XX_GRAS_LRZ_BUFFER_PITCH__SHIFT) & A5XX_GRAS_LRZ_BUFFER_PITCH__MASK;
-}
-
-#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO 0x0000e104
-
-#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI 0x0000e105
-
-#define REG_A5XX_RB_CNTL 0x0000e140
-#define A5XX_RB_CNTL_WIDTH__MASK 0x000000ff
-#define A5XX_RB_CNTL_WIDTH__SHIFT 0
-static inline uint32_t A5XX_RB_CNTL_WIDTH(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A5XX_RB_CNTL_WIDTH__SHIFT) & A5XX_RB_CNTL_WIDTH__MASK;
-}
-#define A5XX_RB_CNTL_HEIGHT__MASK 0x0001fe00
-#define A5XX_RB_CNTL_HEIGHT__SHIFT 9
-static inline uint32_t A5XX_RB_CNTL_HEIGHT(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A5XX_RB_CNTL_HEIGHT__SHIFT) & A5XX_RB_CNTL_HEIGHT__MASK;
-}
-#define A5XX_RB_CNTL_BYPASS 0x00020000
-
-#define REG_A5XX_RB_RENDER_CNTL 0x0000e141
-#define A5XX_RB_RENDER_CNTL_BINNING_PASS 0x00000001
-#define A5XX_RB_RENDER_CNTL_SAMPLES_PASSED 0x00000040
-#define A5XX_RB_RENDER_CNTL_DISABLE_COLOR_PIPE 0x00000080
-#define A5XX_RB_RENDER_CNTL_FLAG_DEPTH 0x00004000
-#define A5XX_RB_RENDER_CNTL_FLAG_DEPTH2 0x00008000
-#define A5XX_RB_RENDER_CNTL_FLAG_MRTS__MASK 0x00ff0000
-#define A5XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT 16
-static inline uint32_t A5XX_RB_RENDER_CNTL_FLAG_MRTS(uint32_t val)
-{
- return ((val) << A5XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT) & A5XX_RB_RENDER_CNTL_FLAG_MRTS__MASK;
-}
-#define A5XX_RB_RENDER_CNTL_FLAG_MRTS2__MASK 0xff000000
-#define A5XX_RB_RENDER_CNTL_FLAG_MRTS2__SHIFT 24
-static inline uint32_t A5XX_RB_RENDER_CNTL_FLAG_MRTS2(uint32_t val)
-{
- return ((val) << A5XX_RB_RENDER_CNTL_FLAG_MRTS2__SHIFT) & A5XX_RB_RENDER_CNTL_FLAG_MRTS2__MASK;
-}
-
-#define REG_A5XX_RB_RAS_MSAA_CNTL 0x0000e142
-#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
-#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
-static inline uint32_t A5XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
-{
- return ((val) << A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK;
-}
-
-#define REG_A5XX_RB_DEST_MSAA_CNTL 0x0000e143
-#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
-#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
-static inline uint32_t A5XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
-{
- return ((val) << A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK;
-}
-#define A5XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
-
-#define REG_A5XX_RB_RENDER_CONTROL0 0x0000e144
-#define A5XX_RB_RENDER_CONTROL0_IJ_PERSP_PIXEL 0x00000001
-#define A5XX_RB_RENDER_CONTROL0_IJ_PERSP_CENTROID 0x00000002
-#define A5XX_RB_RENDER_CONTROL0_IJ_PERSP_SAMPLE 0x00000004
-#define A5XX_RB_RENDER_CONTROL0_IJ_LINEAR_PIXEL 0x00000008
-#define A5XX_RB_RENDER_CONTROL0_IJ_LINEAR_CENTROID 0x00000010
-#define A5XX_RB_RENDER_CONTROL0_IJ_LINEAR_SAMPLE 0x00000020
-#define A5XX_RB_RENDER_CONTROL0_COORD_MASK__MASK 0x000003c0
-#define A5XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT 6
-static inline uint32_t A5XX_RB_RENDER_CONTROL0_COORD_MASK(uint32_t val)
-{
- return ((val) << A5XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT) & A5XX_RB_RENDER_CONTROL0_COORD_MASK__MASK;
-}
-
-#define REG_A5XX_RB_RENDER_CONTROL1 0x0000e145
-#define A5XX_RB_RENDER_CONTROL1_SAMPLEMASK 0x00000001
-#define A5XX_RB_RENDER_CONTROL1_FACENESS 0x00000002
-#define A5XX_RB_RENDER_CONTROL1_SAMPLEID 0x00000004
-
-#define REG_A5XX_RB_FS_OUTPUT_CNTL 0x0000e146
-#define A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
-#define A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT 0
-static inline uint32_t A5XX_RB_FS_OUTPUT_CNTL_MRT(uint32_t val)
-{
- return ((val) << A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK;
-}
-#define A5XX_RB_FS_OUTPUT_CNTL_FRAG_WRITES_Z 0x00000020
-
-#define REG_A5XX_RB_RENDER_COMPONENTS 0x0000e147
-#define A5XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f
-#define A5XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0
-static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT0(uint32_t val)
-{
- return ((val) << A5XX_RB_RENDER_COMPONENTS_RT0__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT0__MASK;
-}
-#define A5XX_RB_RENDER_COMPONENTS_RT1__MASK 0x000000f0
-#define A5XX_RB_RENDER_COMPONENTS_RT1__SHIFT 4
-static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT1(uint32_t val)
-{
- return ((val) << A5XX_RB_RENDER_COMPONENTS_RT1__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT1__MASK;
-}
-#define A5XX_RB_RENDER_COMPONENTS_RT2__MASK 0x00000f00
-#define A5XX_RB_RENDER_COMPONENTS_RT2__SHIFT 8
-static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT2(uint32_t val)
-{
- return ((val) << A5XX_RB_RENDER_COMPONENTS_RT2__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT2__MASK;
-}
-#define A5XX_RB_RENDER_COMPONENTS_RT3__MASK 0x0000f000
-#define A5XX_RB_RENDER_COMPONENTS_RT3__SHIFT 12
-static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT3(uint32_t val)
-{
- return ((val) << A5XX_RB_RENDER_COMPONENTS_RT3__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT3__MASK;
-}
-#define A5XX_RB_RENDER_COMPONENTS_RT4__MASK 0x000f0000
-#define A5XX_RB_RENDER_COMPONENTS_RT4__SHIFT 16
-static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT4(uint32_t val)
-{
- return ((val) << A5XX_RB_RENDER_COMPONENTS_RT4__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT4__MASK;
-}
-#define A5XX_RB_RENDER_COMPONENTS_RT5__MASK 0x00f00000
-#define A5XX_RB_RENDER_COMPONENTS_RT5__SHIFT 20
-static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT5(uint32_t val)
-{
- return ((val) << A5XX_RB_RENDER_COMPONENTS_RT5__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT5__MASK;
-}
-#define A5XX_RB_RENDER_COMPONENTS_RT6__MASK 0x0f000000
-#define A5XX_RB_RENDER_COMPONENTS_RT6__SHIFT 24
-static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT6(uint32_t val)
-{
- return ((val) << A5XX_RB_RENDER_COMPONENTS_RT6__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT6__MASK;
-}
-#define A5XX_RB_RENDER_COMPONENTS_RT7__MASK 0xf0000000
-#define A5XX_RB_RENDER_COMPONENTS_RT7__SHIFT 28
-static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT7(uint32_t val)
-{
- return ((val) << A5XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT7__MASK;
-}
-
-#define REG_A5XX_RB_MRT(i0) (0x0000e150 + 0x7*(i0))
-
-static inline uint32_t REG_A5XX_RB_MRT_CONTROL(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
-#define A5XX_RB_MRT_CONTROL_BLEND 0x00000001
-#define A5XX_RB_MRT_CONTROL_BLEND2 0x00000002
-#define A5XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000004
-#define A5XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000078
-#define A5XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 3
-static inline uint32_t A5XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
-{
- return ((val) << A5XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A5XX_RB_MRT_CONTROL_ROP_CODE__MASK;
-}
-#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780
-#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7
-static inline uint32_t A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
-{
- return ((val) << A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
-}
-
-static inline uint32_t REG_A5XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x0000e151 + 0x7*i0; }
-#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
-#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
-static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
-{
- return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
-}
-#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
-#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
-static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
-{
- return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
-}
-#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
-#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
-static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
-{
- return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
-}
-#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
-#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
-static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
-{
- return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
-}
-#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
-#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
-static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
-{
- return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
-}
-#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
-#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
-static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
-{
- return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
-}
-
-static inline uint32_t REG_A5XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x0000e152 + 0x7*i0; }
-#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x000000ff
-#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
-static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
-{
- return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
-}
-#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x00000300
-#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 8
-static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a5xx_tile_mode val)
-{
- return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
-}
-#define A5XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK 0x00001800
-#define A5XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT 11
-static inline uint32_t A5XX_RB_MRT_BUF_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
-{
- return ((val) << A5XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK;
-}
-#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000
-#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13
-static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
-{
- return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
-}
-#define A5XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00008000
-
-static inline uint32_t REG_A5XX_RB_MRT_PITCH(uint32_t i0) { return 0x0000e153 + 0x7*i0; }
-#define A5XX_RB_MRT_PITCH__MASK 0xffffffff
-#define A5XX_RB_MRT_PITCH__SHIFT 0
-static inline uint32_t A5XX_RB_MRT_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A5XX_RB_MRT_PITCH__SHIFT) & A5XX_RB_MRT_PITCH__MASK;
-}
-
-static inline uint32_t REG_A5XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x0000e154 + 0x7*i0; }
-#define A5XX_RB_MRT_ARRAY_PITCH__MASK 0xffffffff
-#define A5XX_RB_MRT_ARRAY_PITCH__SHIFT 0
-static inline uint32_t A5XX_RB_MRT_ARRAY_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A5XX_RB_MRT_ARRAY_PITCH__SHIFT) & A5XX_RB_MRT_ARRAY_PITCH__MASK;
-}
-
-static inline uint32_t REG_A5XX_RB_MRT_BASE_LO(uint32_t i0) { return 0x0000e155 + 0x7*i0; }
-
-static inline uint32_t REG_A5XX_RB_MRT_BASE_HI(uint32_t i0) { return 0x0000e156 + 0x7*i0; }
-
-#define REG_A5XX_RB_BLEND_RED 0x0000e1a0
-#define A5XX_RB_BLEND_RED_UINT__MASK 0x000000ff
-#define A5XX_RB_BLEND_RED_UINT__SHIFT 0
-static inline uint32_t A5XX_RB_BLEND_RED_UINT(uint32_t val)
-{
- return ((val) << A5XX_RB_BLEND_RED_UINT__SHIFT) & A5XX_RB_BLEND_RED_UINT__MASK;
-}
-#define A5XX_RB_BLEND_RED_SINT__MASK 0x0000ff00
-#define A5XX_RB_BLEND_RED_SINT__SHIFT 8
-static inline uint32_t A5XX_RB_BLEND_RED_SINT(uint32_t val)
-{
- return ((val) << A5XX_RB_BLEND_RED_SINT__SHIFT) & A5XX_RB_BLEND_RED_SINT__MASK;
-}
-#define A5XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
-#define A5XX_RB_BLEND_RED_FLOAT__SHIFT 16
-static inline uint32_t A5XX_RB_BLEND_RED_FLOAT(float val)
-{
- return ((_mesa_float_to_half(val)) << A5XX_RB_BLEND_RED_FLOAT__SHIFT) & A5XX_RB_BLEND_RED_FLOAT__MASK;
-}
-
-#define REG_A5XX_RB_BLEND_RED_F32 0x0000e1a1
-#define A5XX_RB_BLEND_RED_F32__MASK 0xffffffff
-#define A5XX_RB_BLEND_RED_F32__SHIFT 0
-static inline uint32_t A5XX_RB_BLEND_RED_F32(float val)
-{
- return ((fui(val)) << A5XX_RB_BLEND_RED_F32__SHIFT) & A5XX_RB_BLEND_RED_F32__MASK;
-}
-
-#define REG_A5XX_RB_BLEND_GREEN 0x0000e1a2
-#define A5XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
-#define A5XX_RB_BLEND_GREEN_UINT__SHIFT 0
-static inline uint32_t A5XX_RB_BLEND_GREEN_UINT(uint32_t val)
-{
- return ((val) << A5XX_RB_BLEND_GREEN_UINT__SHIFT) & A5XX_RB_BLEND_GREEN_UINT__MASK;
-}
-#define A5XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00
-#define A5XX_RB_BLEND_GREEN_SINT__SHIFT 8
-static inline uint32_t A5XX_RB_BLEND_GREEN_SINT(uint32_t val)
-{
- return ((val) << A5XX_RB_BLEND_GREEN_SINT__SHIFT) & A5XX_RB_BLEND_GREEN_SINT__MASK;
-}
-#define A5XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
-#define A5XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
-static inline uint32_t A5XX_RB_BLEND_GREEN_FLOAT(float val)
-{
- return ((_mesa_float_to_half(val)) << A5XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A5XX_RB_BLEND_GREEN_FLOAT__MASK;
-}
-
-#define REG_A5XX_RB_BLEND_GREEN_F32 0x0000e1a3
-#define A5XX_RB_BLEND_GREEN_F32__MASK 0xffffffff
-#define A5XX_RB_BLEND_GREEN_F32__SHIFT 0
-static inline uint32_t A5XX_RB_BLEND_GREEN_F32(float val)
-{
- return ((fui(val)) << A5XX_RB_BLEND_GREEN_F32__SHIFT) & A5XX_RB_BLEND_GREEN_F32__MASK;
-}
-
-#define REG_A5XX_RB_BLEND_BLUE 0x0000e1a4
-#define A5XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
-#define A5XX_RB_BLEND_BLUE_UINT__SHIFT 0
-static inline uint32_t A5XX_RB_BLEND_BLUE_UINT(uint32_t val)
-{
- return ((val) << A5XX_RB_BLEND_BLUE_UINT__SHIFT) & A5XX_RB_BLEND_BLUE_UINT__MASK;
-}
-#define A5XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00
-#define A5XX_RB_BLEND_BLUE_SINT__SHIFT 8
-static inline uint32_t A5XX_RB_BLEND_BLUE_SINT(uint32_t val)
-{
- return ((val) << A5XX_RB_BLEND_BLUE_SINT__SHIFT) & A5XX_RB_BLEND_BLUE_SINT__MASK;
-}
-#define A5XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
-#define A5XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
-static inline uint32_t A5XX_RB_BLEND_BLUE_FLOAT(float val)
-{
- return ((_mesa_float_to_half(val)) << A5XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A5XX_RB_BLEND_BLUE_FLOAT__MASK;
-}
-
-#define REG_A5XX_RB_BLEND_BLUE_F32 0x0000e1a5
-#define A5XX_RB_BLEND_BLUE_F32__MASK 0xffffffff
-#define A5XX_RB_BLEND_BLUE_F32__SHIFT 0
-static inline uint32_t A5XX_RB_BLEND_BLUE_F32(float val)
-{
- return ((fui(val)) << A5XX_RB_BLEND_BLUE_F32__SHIFT) & A5XX_RB_BLEND_BLUE_F32__MASK;
-}
-
-#define REG_A5XX_RB_BLEND_ALPHA 0x0000e1a6
-#define A5XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
-#define A5XX_RB_BLEND_ALPHA_UINT__SHIFT 0
-static inline uint32_t A5XX_RB_BLEND_ALPHA_UINT(uint32_t val)
-{
- return ((val) << A5XX_RB_BLEND_ALPHA_UINT__SHIFT) & A5XX_RB_BLEND_ALPHA_UINT__MASK;
-}
-#define A5XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00
-#define A5XX_RB_BLEND_ALPHA_SINT__SHIFT 8
-static inline uint32_t A5XX_RB_BLEND_ALPHA_SINT(uint32_t val)
-{
- return ((val) << A5XX_RB_BLEND_ALPHA_SINT__SHIFT) & A5XX_RB_BLEND_ALPHA_SINT__MASK;
-}
-#define A5XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
-#define A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
-static inline uint32_t A5XX_RB_BLEND_ALPHA_FLOAT(float val)
-{
- return ((_mesa_float_to_half(val)) << A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A5XX_RB_BLEND_ALPHA_FLOAT__MASK;
-}
-
-#define REG_A5XX_RB_BLEND_ALPHA_F32 0x0000e1a7
-#define A5XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff
-#define A5XX_RB_BLEND_ALPHA_F32__SHIFT 0
-static inline uint32_t A5XX_RB_BLEND_ALPHA_F32(float val)
-{
- return ((fui(val)) << A5XX_RB_BLEND_ALPHA_F32__SHIFT) & A5XX_RB_BLEND_ALPHA_F32__MASK;
-}
-
-#define REG_A5XX_RB_ALPHA_CONTROL 0x0000e1a8
-#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff
-#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0
-static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val)
-{
- return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK;
-}
-#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100
-#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00
-#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9
-static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
-{
- return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
-}
-
-#define REG_A5XX_RB_BLEND_CNTL 0x0000e1a9
-#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
-#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
-static inline uint32_t A5XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
-{
- return ((val) << A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
-}
-#define A5XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
-#define A5XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
-#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
-#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
-static inline uint32_t A5XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
-{
- return ((val) << A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK;
-}
-
-#define REG_A5XX_RB_DEPTH_PLANE_CNTL 0x0000e1b0
-#define A5XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
-#define A5XX_RB_DEPTH_PLANE_CNTL_UNK1 0x00000002
-
-#define REG_A5XX_RB_DEPTH_CNTL 0x0000e1b1
-#define A5XX_RB_DEPTH_CNTL_Z_TEST_ENABLE 0x00000001
-#define A5XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002
-#define A5XX_RB_DEPTH_CNTL_ZFUNC__MASK 0x0000001c
-#define A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT 2
-static inline uint32_t A5XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val)
-{
- return ((val) << A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A5XX_RB_DEPTH_CNTL_ZFUNC__MASK;
-}
-#define A5XX_RB_DEPTH_CNTL_Z_READ_ENABLE 0x00000040
-
-#define REG_A5XX_RB_DEPTH_BUFFER_INFO 0x0000e1b2
-#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
-#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
-static inline uint32_t A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val)
-{
- return ((val) << A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
-}
-
-#define REG_A5XX_RB_DEPTH_BUFFER_BASE_LO 0x0000e1b3
-
-#define REG_A5XX_RB_DEPTH_BUFFER_BASE_HI 0x0000e1b4
-
-#define REG_A5XX_RB_DEPTH_BUFFER_PITCH 0x0000e1b5
-#define A5XX_RB_DEPTH_BUFFER_PITCH__MASK 0xffffffff
-#define A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0
-static inline uint32_t A5XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_PITCH__MASK;
-}
-
-#define REG_A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x0000e1b6
-#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK 0xffffffff
-#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0
-static inline uint32_t A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK;
-}
-
-#define REG_A5XX_RB_STENCIL_CONTROL 0x0000e1c0
-#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
-#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
-#define A5XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
-#define A5XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
-#define A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
-static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
-{
- return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC__MASK;
-}
-#define A5XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
-#define A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
-static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
-{
- return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL__MASK;
-}
-#define A5XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
-#define A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
-static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
-{
- return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS__MASK;
-}
-#define A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
-#define A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
-static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
-{
- return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
-}
-#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
-#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
-static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
-{
- return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
-}
-#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
-#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
-static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
-{
- return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
-}
-#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
-#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
-static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
-{
- return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
-}
-#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
-#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
-static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
-{
- return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
-}
-
-#define REG_A5XX_RB_STENCIL_INFO 0x0000e1c1
-#define A5XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
-
-#define REG_A5XX_RB_STENCIL_BASE_LO 0x0000e1c2
-
-#define REG_A5XX_RB_STENCIL_BASE_HI 0x0000e1c3
-
-#define REG_A5XX_RB_STENCIL_PITCH 0x0000e1c4
-#define A5XX_RB_STENCIL_PITCH__MASK 0xffffffff
-#define A5XX_RB_STENCIL_PITCH__SHIFT 0
-static inline uint32_t A5XX_RB_STENCIL_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A5XX_RB_STENCIL_PITCH__SHIFT) & A5XX_RB_STENCIL_PITCH__MASK;
-}
-
-#define REG_A5XX_RB_STENCIL_ARRAY_PITCH 0x0000e1c5
-#define A5XX_RB_STENCIL_ARRAY_PITCH__MASK 0xffffffff
-#define A5XX_RB_STENCIL_ARRAY_PITCH__SHIFT 0
-static inline uint32_t A5XX_RB_STENCIL_ARRAY_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A5XX_RB_STENCIL_ARRAY_PITCH__SHIFT) & A5XX_RB_STENCIL_ARRAY_PITCH__MASK;
-}
-
-#define REG_A5XX_RB_STENCILREFMASK 0x0000e1c6
-#define A5XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
-#define A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
-static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
-{
- return ((val) << A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILREF__MASK;
-}
-#define A5XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
-#define A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
-static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
-{
- return ((val) << A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILMASK__MASK;
-}
-#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
-#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
-static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
-{
- return ((val) << A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
-}
-
-#define REG_A5XX_RB_STENCILREFMASK_BF 0x0000e1c7
-#define A5XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
-#define A5XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
-static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
-{
- return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
-}
-#define A5XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
-#define A5XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
-static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
-{
- return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
-}
-#define A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
-#define A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
-static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
-{
- return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
-}
-
-#define REG_A5XX_RB_WINDOW_OFFSET 0x0000e1d0
-#define A5XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
-#define A5XX_RB_WINDOW_OFFSET_X__MASK 0x00007fff
-#define A5XX_RB_WINDOW_OFFSET_X__SHIFT 0
-static inline uint32_t A5XX_RB_WINDOW_OFFSET_X(uint32_t val)
-{
- return ((val) << A5XX_RB_WINDOW_OFFSET_X__SHIFT) & A5XX_RB_WINDOW_OFFSET_X__MASK;
-}
-#define A5XX_RB_WINDOW_OFFSET_Y__MASK 0x7fff0000
-#define A5XX_RB_WINDOW_OFFSET_Y__SHIFT 16
-static inline uint32_t A5XX_RB_WINDOW_OFFSET_Y(uint32_t val)
-{
- return ((val) << A5XX_RB_WINDOW_OFFSET_Y__SHIFT) & A5XX_RB_WINDOW_OFFSET_Y__MASK;
-}
-
-#define REG_A5XX_RB_SAMPLE_COUNT_CONTROL 0x0000e1d1
-#define A5XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
-
-#define REG_A5XX_RB_BLIT_CNTL 0x0000e210
-#define A5XX_RB_BLIT_CNTL_BUF__MASK 0x0000000f
-#define A5XX_RB_BLIT_CNTL_BUF__SHIFT 0
-static inline uint32_t A5XX_RB_BLIT_CNTL_BUF(enum a5xx_blit_buf val)
-{
- return ((val) << A5XX_RB_BLIT_CNTL_BUF__SHIFT) & A5XX_RB_BLIT_CNTL_BUF__MASK;
-}
-
-#define REG_A5XX_RB_RESOLVE_CNTL_1 0x0000e211
-#define A5XX_RB_RESOLVE_CNTL_1_WINDOW_OFFSET_DISABLE 0x80000000
-#define A5XX_RB_RESOLVE_CNTL_1_X__MASK 0x00007fff
-#define A5XX_RB_RESOLVE_CNTL_1_X__SHIFT 0
-static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_X(uint32_t val)
-{
- return ((val) << A5XX_RB_RESOLVE_CNTL_1_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_X__MASK;
-}
-#define A5XX_RB_RESOLVE_CNTL_1_Y__MASK 0x7fff0000
-#define A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT 16
-static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_Y(uint32_t val)
-{
- return ((val) << A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_Y__MASK;
-}
-
-#define REG_A5XX_RB_RESOLVE_CNTL_2 0x0000e212
-#define A5XX_RB_RESOLVE_CNTL_2_WINDOW_OFFSET_DISABLE 0x80000000
-#define A5XX_RB_RESOLVE_CNTL_2_X__MASK 0x00007fff
-#define A5XX_RB_RESOLVE_CNTL_2_X__SHIFT 0
-static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_X(uint32_t val)
-{
- return ((val) << A5XX_RB_RESOLVE_CNTL_2_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_X__MASK;
-}
-#define A5XX_RB_RESOLVE_CNTL_2_Y__MASK 0x7fff0000
-#define A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT 16
-static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_Y(uint32_t val)
-{
- return ((val) << A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_Y__MASK;
-}
-
-#define REG_A5XX_RB_RESOLVE_CNTL_3 0x0000e213
-#define A5XX_RB_RESOLVE_CNTL_3_TILED 0x00000001
-
-#define REG_A5XX_RB_BLIT_DST_LO 0x0000e214
-
-#define REG_A5XX_RB_BLIT_DST_HI 0x0000e215
-
-#define REG_A5XX_RB_BLIT_DST_PITCH 0x0000e216
-#define A5XX_RB_BLIT_DST_PITCH__MASK 0xffffffff
-#define A5XX_RB_BLIT_DST_PITCH__SHIFT 0
-static inline uint32_t A5XX_RB_BLIT_DST_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A5XX_RB_BLIT_DST_PITCH__SHIFT) & A5XX_RB_BLIT_DST_PITCH__MASK;
-}
-
-#define REG_A5XX_RB_BLIT_DST_ARRAY_PITCH 0x0000e217
-#define A5XX_RB_BLIT_DST_ARRAY_PITCH__MASK 0xffffffff
-#define A5XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT 0
-static inline uint32_t A5XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A5XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT) & A5XX_RB_BLIT_DST_ARRAY_PITCH__MASK;
-}
-
-#define REG_A5XX_RB_CLEAR_COLOR_DW0 0x0000e218
-
-#define REG_A5XX_RB_CLEAR_COLOR_DW1 0x0000e219
-
-#define REG_A5XX_RB_CLEAR_COLOR_DW2 0x0000e21a
-
-#define REG_A5XX_RB_CLEAR_COLOR_DW3 0x0000e21b
-
-#define REG_A5XX_RB_CLEAR_CNTL 0x0000e21c
-#define A5XX_RB_CLEAR_CNTL_FAST_CLEAR 0x00000002
-#define A5XX_RB_CLEAR_CNTL_MSAA_RESOLVE 0x00000004
-#define A5XX_RB_CLEAR_CNTL_MASK__MASK 0x000000f0
-#define A5XX_RB_CLEAR_CNTL_MASK__SHIFT 4
-static inline uint32_t A5XX_RB_CLEAR_CNTL_MASK(uint32_t val)
-{
- return ((val) << A5XX_RB_CLEAR_CNTL_MASK__SHIFT) & A5XX_RB_CLEAR_CNTL_MASK__MASK;
-}
-
-#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_LO 0x0000e240
-
-#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_HI 0x0000e241
-
-#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_PITCH 0x0000e242
-
-#define REG_A5XX_RB_MRT_FLAG_BUFFER(i0) (0x0000e243 + 0x4*(i0))
-
-static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ADDR_LO(uint32_t i0) { return 0x0000e243 + 0x4*i0; }
-
-static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ADDR_HI(uint32_t i0) { return 0x0000e244 + 0x4*i0; }
-
-static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t i0) { return 0x0000e245 + 0x4*i0; }
-#define A5XX_RB_MRT_FLAG_BUFFER_PITCH__MASK 0xffffffff
-#define A5XX_RB_MRT_FLAG_BUFFER_PITCH__SHIFT 0
-static inline uint32_t A5XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A5XX_RB_MRT_FLAG_BUFFER_PITCH__SHIFT) & A5XX_RB_MRT_FLAG_BUFFER_PITCH__MASK;
-}
-
-static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(uint32_t i0) { return 0x0000e246 + 0x4*i0; }
-#define A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__MASK 0xffffffff
-#define A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__SHIFT 0
-static inline uint32_t A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__MASK;
-}
-
-#define REG_A5XX_RB_BLIT_FLAG_DST_LO 0x0000e263
-
-#define REG_A5XX_RB_BLIT_FLAG_DST_HI 0x0000e264
-
-#define REG_A5XX_RB_BLIT_FLAG_DST_PITCH 0x0000e265
-#define A5XX_RB_BLIT_FLAG_DST_PITCH__MASK 0xffffffff
-#define A5XX_RB_BLIT_FLAG_DST_PITCH__SHIFT 0
-static inline uint32_t A5XX_RB_BLIT_FLAG_DST_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A5XX_RB_BLIT_FLAG_DST_PITCH__SHIFT) & A5XX_RB_BLIT_FLAG_DST_PITCH__MASK;
-}
-
-#define REG_A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH 0x0000e266
-#define A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__MASK 0xffffffff
-#define A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__SHIFT 0
-static inline uint32_t A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__SHIFT) & A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__MASK;
-}
-
-#define REG_A5XX_RB_SAMPLE_COUNT_ADDR_LO 0x0000e267
-
-#define REG_A5XX_RB_SAMPLE_COUNT_ADDR_HI 0x0000e268
-
-#define REG_A5XX_VPC_CNTL_0 0x0000e280
-#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK 0x0000007f
-#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT 0
-static inline uint32_t A5XX_VPC_CNTL_0_STRIDE_IN_VPC(uint32_t val)
-{
- return ((val) << A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT) & A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK;
-}
-#define A5XX_VPC_CNTL_0_VARYING 0x00000800
-
-#define REG_A5XX_VPC_VARYING_INTERP(i0) (0x0000e282 + 0x1*(i0))
-
-static inline uint32_t REG_A5XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x0000e282 + 0x1*i0; }
-
-#define REG_A5XX_VPC_VARYING_PS_REPL(i0) (0x0000e28a + 0x1*(i0))
-
-static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000e28a + 0x1*i0; }
-
-#define REG_A5XX_UNKNOWN_E292 0x0000e292
-
-#define REG_A5XX_UNKNOWN_E293 0x0000e293
-
-#define REG_A5XX_VPC_VAR(i0) (0x0000e294 + 0x1*(i0))
-
-static inline uint32_t REG_A5XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x0000e294 + 0x1*i0; }
-
-#define REG_A5XX_VPC_GS_SIV_CNTL 0x0000e298
-
-#define REG_A5XX_VPC_CLIP_CNTL 0x0000e29a
-#define A5XX_VPC_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff
-#define A5XX_VPC_CLIP_CNTL_CLIP_MASK__SHIFT 0
-static inline uint32_t A5XX_VPC_CLIP_CNTL_CLIP_MASK(uint32_t val)
-{
- return ((val) << A5XX_VPC_CLIP_CNTL_CLIP_MASK__SHIFT) & A5XX_VPC_CLIP_CNTL_CLIP_MASK__MASK;
-}
-#define A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC__MASK 0x0000ff00
-#define A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT 8
-static inline uint32_t A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val)
-{
- return ((val) << A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC__MASK;
-}
-#define A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC__MASK 0x00ff0000
-#define A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT 16
-static inline uint32_t A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val)
-{
- return ((val) << A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC__MASK;
-}
-
-#define REG_A5XX_VPC_PACK 0x0000e29d
-#define A5XX_VPC_PACK_NUMNONPOSVAR__MASK 0x000000ff
-#define A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT 0
-static inline uint32_t A5XX_VPC_PACK_NUMNONPOSVAR(uint32_t val)
-{
- return ((val) << A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT) & A5XX_VPC_PACK_NUMNONPOSVAR__MASK;
-}
-#define A5XX_VPC_PACK_PSIZELOC__MASK 0x0000ff00
-#define A5XX_VPC_PACK_PSIZELOC__SHIFT 8
-static inline uint32_t A5XX_VPC_PACK_PSIZELOC(uint32_t val)
-{
- return ((val) << A5XX_VPC_PACK_PSIZELOC__SHIFT) & A5XX_VPC_PACK_PSIZELOC__MASK;
-}
-
-#define REG_A5XX_VPC_FS_PRIMITIVEID_CNTL 0x0000e2a0
-
-#define REG_A5XX_VPC_SO_BUF_CNTL 0x0000e2a1
-#define A5XX_VPC_SO_BUF_CNTL_BUF0 0x00000001
-#define A5XX_VPC_SO_BUF_CNTL_BUF1 0x00000008
-#define A5XX_VPC_SO_BUF_CNTL_BUF2 0x00000040
-#define A5XX_VPC_SO_BUF_CNTL_BUF3 0x00000200
-#define A5XX_VPC_SO_BUF_CNTL_ENABLE 0x00008000
-
-#define REG_A5XX_VPC_SO_OVERRIDE 0x0000e2a2
-#define A5XX_VPC_SO_OVERRIDE_SO_DISABLE 0x00000001
-
-#define REG_A5XX_VPC_SO_CNTL 0x0000e2a3
-#define A5XX_VPC_SO_CNTL_ENABLE 0x00010000
-
-#define REG_A5XX_VPC_SO_PROG 0x0000e2a4
-#define A5XX_VPC_SO_PROG_A_BUF__MASK 0x00000003
-#define A5XX_VPC_SO_PROG_A_BUF__SHIFT 0
-static inline uint32_t A5XX_VPC_SO_PROG_A_BUF(uint32_t val)
-{
- return ((val) << A5XX_VPC_SO_PROG_A_BUF__SHIFT) & A5XX_VPC_SO_PROG_A_BUF__MASK;
-}
-#define A5XX_VPC_SO_PROG_A_OFF__MASK 0x000007fc
-#define A5XX_VPC_SO_PROG_A_OFF__SHIFT 2
-static inline uint32_t A5XX_VPC_SO_PROG_A_OFF(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A5XX_VPC_SO_PROG_A_OFF__SHIFT) & A5XX_VPC_SO_PROG_A_OFF__MASK;
-}
-#define A5XX_VPC_SO_PROG_A_EN 0x00000800
-#define A5XX_VPC_SO_PROG_B_BUF__MASK 0x00003000
-#define A5XX_VPC_SO_PROG_B_BUF__SHIFT 12
-static inline uint32_t A5XX_VPC_SO_PROG_B_BUF(uint32_t val)
-{
- return ((val) << A5XX_VPC_SO_PROG_B_BUF__SHIFT) & A5XX_VPC_SO_PROG_B_BUF__MASK;
-}
-#define A5XX_VPC_SO_PROG_B_OFF__MASK 0x007fc000
-#define A5XX_VPC_SO_PROG_B_OFF__SHIFT 14
-static inline uint32_t A5XX_VPC_SO_PROG_B_OFF(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A5XX_VPC_SO_PROG_B_OFF__SHIFT) & A5XX_VPC_SO_PROG_B_OFF__MASK;
-}
-#define A5XX_VPC_SO_PROG_B_EN 0x00800000
-
-#define REG_A5XX_VPC_SO(i0) (0x0000e2a7 + 0x7*(i0))
-
-static inline uint32_t REG_A5XX_VPC_SO_BUFFER_BASE_LO(uint32_t i0) { return 0x0000e2a7 + 0x7*i0; }
-
-static inline uint32_t REG_A5XX_VPC_SO_BUFFER_BASE_HI(uint32_t i0) { return 0x0000e2a8 + 0x7*i0; }
-
-static inline uint32_t REG_A5XX_VPC_SO_BUFFER_SIZE(uint32_t i0) { return 0x0000e2a9 + 0x7*i0; }
-
-static inline uint32_t REG_A5XX_VPC_SO_NCOMP(uint32_t i0) { return 0x0000e2aa + 0x7*i0; }
-
-static inline uint32_t REG_A5XX_VPC_SO_BUFFER_OFFSET(uint32_t i0) { return 0x0000e2ab + 0x7*i0; }
-
-static inline uint32_t REG_A5XX_VPC_SO_FLUSH_BASE_LO(uint32_t i0) { return 0x0000e2ac + 0x7*i0; }
-
-static inline uint32_t REG_A5XX_VPC_SO_FLUSH_BASE_HI(uint32_t i0) { return 0x0000e2ad + 0x7*i0; }
-
-#define REG_A5XX_PC_PRIMITIVE_CNTL 0x0000e384
-#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK 0x0000007f
-#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT 0
-static inline uint32_t A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC(uint32_t val)
-{
- return ((val) << A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK;
-}
-#define A5XX_PC_PRIMITIVE_CNTL_PRIMITIVE_RESTART 0x00000100
-#define A5XX_PC_PRIMITIVE_CNTL_COUNT_PRIMITIVES 0x00000200
-#define A5XX_PC_PRIMITIVE_CNTL_PROVOKING_VTX_LAST 0x00000400
-
-#define REG_A5XX_PC_PRIM_VTX_CNTL 0x0000e385
-#define A5XX_PC_PRIM_VTX_CNTL_PSIZE 0x00000800
-
-#define REG_A5XX_PC_RASTER_CNTL 0x0000e388
-#define A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__MASK 0x00000007
-#define A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__SHIFT 0
-static inline uint32_t A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
-{
- return ((val) << A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__SHIFT) & A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__MASK;
-}
-#define A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__MASK 0x00000038
-#define A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__SHIFT 3
-static inline uint32_t A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
-{
- return ((val) << A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__MASK;
-}
-#define A5XX_PC_RASTER_CNTL_POLYMODE_ENABLE 0x00000040
-
-#define REG_A5XX_PC_CLIP_CNTL 0x0000e389
-#define A5XX_PC_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff
-#define A5XX_PC_CLIP_CNTL_CLIP_MASK__SHIFT 0
-static inline uint32_t A5XX_PC_CLIP_CNTL_CLIP_MASK(uint32_t val)
-{
- return ((val) << A5XX_PC_CLIP_CNTL_CLIP_MASK__SHIFT) & A5XX_PC_CLIP_CNTL_CLIP_MASK__MASK;
-}
-
-#define REG_A5XX_PC_RESTART_INDEX 0x0000e38c
-
-#define REG_A5XX_PC_GS_LAYERED 0x0000e38d
-
-#define REG_A5XX_PC_GS_PARAM 0x0000e38e
-#define A5XX_PC_GS_PARAM_MAX_VERTICES__MASK 0x000003ff
-#define A5XX_PC_GS_PARAM_MAX_VERTICES__SHIFT 0
-static inline uint32_t A5XX_PC_GS_PARAM_MAX_VERTICES(uint32_t val)
-{
- return ((val) << A5XX_PC_GS_PARAM_MAX_VERTICES__SHIFT) & A5XX_PC_GS_PARAM_MAX_VERTICES__MASK;
-}
-#define A5XX_PC_GS_PARAM_INVOCATIONS__MASK 0x0000f800
-#define A5XX_PC_GS_PARAM_INVOCATIONS__SHIFT 11
-static inline uint32_t A5XX_PC_GS_PARAM_INVOCATIONS(uint32_t val)
-{
- return ((val) << A5XX_PC_GS_PARAM_INVOCATIONS__SHIFT) & A5XX_PC_GS_PARAM_INVOCATIONS__MASK;
-}
-#define A5XX_PC_GS_PARAM_PRIMTYPE__MASK 0x01800000
-#define A5XX_PC_GS_PARAM_PRIMTYPE__SHIFT 23
-static inline uint32_t A5XX_PC_GS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
-{
- return ((val) << A5XX_PC_GS_PARAM_PRIMTYPE__SHIFT) & A5XX_PC_GS_PARAM_PRIMTYPE__MASK;
-}
-
-#define REG_A5XX_PC_HS_PARAM 0x0000e38f
-#define A5XX_PC_HS_PARAM_VERTICES_OUT__MASK 0x0000003f
-#define A5XX_PC_HS_PARAM_VERTICES_OUT__SHIFT 0
-static inline uint32_t A5XX_PC_HS_PARAM_VERTICES_OUT(uint32_t val)
-{
- return ((val) << A5XX_PC_HS_PARAM_VERTICES_OUT__SHIFT) & A5XX_PC_HS_PARAM_VERTICES_OUT__MASK;
-}
-#define A5XX_PC_HS_PARAM_SPACING__MASK 0x00600000
-#define A5XX_PC_HS_PARAM_SPACING__SHIFT 21
-static inline uint32_t A5XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val)
-{
- return ((val) << A5XX_PC_HS_PARAM_SPACING__SHIFT) & A5XX_PC_HS_PARAM_SPACING__MASK;
-}
-#define A5XX_PC_HS_PARAM_CW 0x00800000
-#define A5XX_PC_HS_PARAM_CONNECTED 0x01000000
-
-#define REG_A5XX_PC_POWER_CNTL 0x0000e3b0
-
-#define REG_A5XX_VFD_CONTROL_0 0x0000e400
-#define A5XX_VFD_CONTROL_0_VTXCNT__MASK 0x0000003f
-#define A5XX_VFD_CONTROL_0_VTXCNT__SHIFT 0
-static inline uint32_t A5XX_VFD_CONTROL_0_VTXCNT(uint32_t val)
-{
- return ((val) << A5XX_VFD_CONTROL_0_VTXCNT__SHIFT) & A5XX_VFD_CONTROL_0_VTXCNT__MASK;
-}
-
-#define REG_A5XX_VFD_CONTROL_1 0x0000e401
-#define A5XX_VFD_CONTROL_1_REGID4VTX__MASK 0x000000ff
-#define A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT 0
-static inline uint32_t A5XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
-{
- return ((val) << A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A5XX_VFD_CONTROL_1_REGID4VTX__MASK;
-}
-#define A5XX_VFD_CONTROL_1_REGID4INST__MASK 0x0000ff00
-#define A5XX_VFD_CONTROL_1_REGID4INST__SHIFT 8
-static inline uint32_t A5XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
-{
- return ((val) << A5XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A5XX_VFD_CONTROL_1_REGID4INST__MASK;
-}
-#define A5XX_VFD_CONTROL_1_REGID4PRIMID__MASK 0x00ff0000
-#define A5XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT 16
-static inline uint32_t A5XX_VFD_CONTROL_1_REGID4PRIMID(uint32_t val)
-{
- return ((val) << A5XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT) & A5XX_VFD_CONTROL_1_REGID4PRIMID__MASK;
-}
-
-#define REG_A5XX_VFD_CONTROL_2 0x0000e402
-#define A5XX_VFD_CONTROL_2_REGID_PATCHID__MASK 0x000000ff
-#define A5XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT 0
-static inline uint32_t A5XX_VFD_CONTROL_2_REGID_PATCHID(uint32_t val)
-{
- return ((val) << A5XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT) & A5XX_VFD_CONTROL_2_REGID_PATCHID__MASK;
-}
-
-#define REG_A5XX_VFD_CONTROL_3 0x0000e403
-#define A5XX_VFD_CONTROL_3_REGID_PATCHID__MASK 0x0000ff00
-#define A5XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT 8
-static inline uint32_t A5XX_VFD_CONTROL_3_REGID_PATCHID(uint32_t val)
-{
- return ((val) << A5XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT) & A5XX_VFD_CONTROL_3_REGID_PATCHID__MASK;
-}
-#define A5XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000
-#define A5XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16
-static inline uint32_t A5XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val)
-{
- return ((val) << A5XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A5XX_VFD_CONTROL_3_REGID_TESSX__MASK;
-}
-#define A5XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000
-#define A5XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24
-static inline uint32_t A5XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
-{
- return ((val) << A5XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A5XX_VFD_CONTROL_3_REGID_TESSY__MASK;
-}
-
-#define REG_A5XX_VFD_CONTROL_4 0x0000e404
-
-#define REG_A5XX_VFD_CONTROL_5 0x0000e405
-
-#define REG_A5XX_VFD_INDEX_OFFSET 0x0000e408
-
-#define REG_A5XX_VFD_INSTANCE_START_OFFSET 0x0000e409
-
-#define REG_A5XX_VFD_FETCH(i0) (0x0000e40a + 0x4*(i0))
-
-static inline uint32_t REG_A5XX_VFD_FETCH_BASE_LO(uint32_t i0) { return 0x0000e40a + 0x4*i0; }
-
-static inline uint32_t REG_A5XX_VFD_FETCH_BASE_HI(uint32_t i0) { return 0x0000e40b + 0x4*i0; }
-
-static inline uint32_t REG_A5XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000e40c + 0x4*i0; }
-
-static inline uint32_t REG_A5XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000e40d + 0x4*i0; }
-
-#define REG_A5XX_VFD_DECODE(i0) (0x0000e48a + 0x2*(i0))
-
-static inline uint32_t REG_A5XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000e48a + 0x2*i0; }
-#define A5XX_VFD_DECODE_INSTR_IDX__MASK 0x0000001f
-#define A5XX_VFD_DECODE_INSTR_IDX__SHIFT 0
-static inline uint32_t A5XX_VFD_DECODE_INSTR_IDX(uint32_t val)
-{
- return ((val) << A5XX_VFD_DECODE_INSTR_IDX__SHIFT) & A5XX_VFD_DECODE_INSTR_IDX__MASK;
-}
-#define A5XX_VFD_DECODE_INSTR_INSTANCED 0x00020000
-#define A5XX_VFD_DECODE_INSTR_FORMAT__MASK 0x0ff00000
-#define A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20
-static inline uint32_t A5XX_VFD_DECODE_INSTR_FORMAT(enum a5xx_vtx_fmt val)
-{
- return ((val) << A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A5XX_VFD_DECODE_INSTR_FORMAT__MASK;
-}
-#define A5XX_VFD_DECODE_INSTR_SWAP__MASK 0x30000000
-#define A5XX_VFD_DECODE_INSTR_SWAP__SHIFT 28
-static inline uint32_t A5XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
-{
- return ((val) << A5XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A5XX_VFD_DECODE_INSTR_SWAP__MASK;
-}
-#define A5XX_VFD_DECODE_INSTR_UNK30 0x40000000
-#define A5XX_VFD_DECODE_INSTR_FLOAT 0x80000000
-
-static inline uint32_t REG_A5XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000e48b + 0x2*i0; }
-
-#define REG_A5XX_VFD_DEST_CNTL(i0) (0x0000e4ca + 0x1*(i0))
-
-static inline uint32_t REG_A5XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000e4ca + 0x1*i0; }
-#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK 0x0000000f
-#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT 0
-static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK(uint32_t val)
-{
- return ((val) << A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK;
-}
-#define A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK 0x00000ff0
-#define A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT 4
-static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val)
-{
- return ((val) << A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK;
-}
-
-#define REG_A5XX_VFD_POWER_CNTL 0x0000e4f0
-
-#define REG_A5XX_SP_SP_CNTL 0x0000e580
-
-#define REG_A5XX_SP_VS_CONFIG 0x0000e584
-#define A5XX_SP_VS_CONFIG_ENABLED 0x00000001
-#define A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
-#define A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
-static inline uint32_t A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
-{
- return ((val) << A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET__MASK;
-}
-#define A5XX_SP_VS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
-#define A5XX_SP_VS_CONFIG_SHADEROBJOFFSET__SHIFT 8
-static inline uint32_t A5XX_SP_VS_CONFIG_SHADEROBJOFFSET(uint32_t val)
-{
- return ((val) << A5XX_SP_VS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_VS_CONFIG_SHADEROBJOFFSET__MASK;
-}
-
-#define REG_A5XX_SP_FS_CONFIG 0x0000e585
-#define A5XX_SP_FS_CONFIG_ENABLED 0x00000001
-#define A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
-#define A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
-static inline uint32_t A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
-{
- return ((val) << A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET__MASK;
-}
-#define A5XX_SP_FS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
-#define A5XX_SP_FS_CONFIG_SHADEROBJOFFSET__SHIFT 8
-static inline uint32_t A5XX_SP_FS_CONFIG_SHADEROBJOFFSET(uint32_t val)
-{
- return ((val) << A5XX_SP_FS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_FS_CONFIG_SHADEROBJOFFSET__MASK;
-}
-
-#define REG_A5XX_SP_HS_CONFIG 0x0000e586
-#define A5XX_SP_HS_CONFIG_ENABLED 0x00000001
-#define A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
-#define A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
-static inline uint32_t A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
-{
- return ((val) << A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET__MASK;
-}
-#define A5XX_SP_HS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
-#define A5XX_SP_HS_CONFIG_SHADEROBJOFFSET__SHIFT 8
-static inline uint32_t A5XX_SP_HS_CONFIG_SHADEROBJOFFSET(uint32_t val)
-{
- return ((val) << A5XX_SP_HS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_HS_CONFIG_SHADEROBJOFFSET__MASK;
-}
-
-#define REG_A5XX_SP_DS_CONFIG 0x0000e587
-#define A5XX_SP_DS_CONFIG_ENABLED 0x00000001
-#define A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
-#define A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
-static inline uint32_t A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
-{
- return ((val) << A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET__MASK;
-}
-#define A5XX_SP_DS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
-#define A5XX_SP_DS_CONFIG_SHADEROBJOFFSET__SHIFT 8
-static inline uint32_t A5XX_SP_DS_CONFIG_SHADEROBJOFFSET(uint32_t val)
-{
- return ((val) << A5XX_SP_DS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_DS_CONFIG_SHADEROBJOFFSET__MASK;
-}
-
-#define REG_A5XX_SP_GS_CONFIG 0x0000e588
-#define A5XX_SP_GS_CONFIG_ENABLED 0x00000001
-#define A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
-#define A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
-static inline uint32_t A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
-{
- return ((val) << A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET__MASK;
-}
-#define A5XX_SP_GS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
-#define A5XX_SP_GS_CONFIG_SHADEROBJOFFSET__SHIFT 8
-static inline uint32_t A5XX_SP_GS_CONFIG_SHADEROBJOFFSET(uint32_t val)
-{
- return ((val) << A5XX_SP_GS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_GS_CONFIG_SHADEROBJOFFSET__MASK;
-}
-
-#define REG_A5XX_SP_CS_CONFIG 0x0000e589
-#define A5XX_SP_CS_CONFIG_ENABLED 0x00000001
-#define A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
-#define A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
-static inline uint32_t A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
-{
- return ((val) << A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET__MASK;
-}
-#define A5XX_SP_CS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
-#define A5XX_SP_CS_CONFIG_SHADEROBJOFFSET__SHIFT 8
-static inline uint32_t A5XX_SP_CS_CONFIG_SHADEROBJOFFSET(uint32_t val)
-{
- return ((val) << A5XX_SP_CS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_CS_CONFIG_SHADEROBJOFFSET__MASK;
-}
-
-#define REG_A5XX_SP_VS_CONFIG_MAX_CONST 0x0000e58a
-
-#define REG_A5XX_SP_FS_CONFIG_MAX_CONST 0x0000e58b
-
-#define REG_A5XX_SP_VS_CTRL_REG0 0x0000e590
-#define A5XX_SP_VS_CTRL_REG0_BUFFER 0x00000004
-#define A5XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00000008
-#define A5XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 3
-static inline uint32_t A5XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
-{
- return ((val) << A5XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
-}
-#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
-#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
-static inline uint32_t A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
-}
-#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
-#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
-static inline uint32_t A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
-}
-#define A5XX_SP_VS_CTRL_REG0_VARYING 0x00010000
-#define A5XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00100000
-#define A5XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
-#define A5XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT 25
-static inline uint32_t A5XX_SP_VS_CTRL_REG0_BRANCHSTACK(uint32_t val)
-{
- return ((val) << A5XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK;
-}
-
-#define REG_A5XX_SP_PRIMITIVE_CNTL 0x0000e592
-#define A5XX_SP_PRIMITIVE_CNTL_VSOUT__MASK 0x0000001f
-#define A5XX_SP_PRIMITIVE_CNTL_VSOUT__SHIFT 0
-static inline uint32_t A5XX_SP_PRIMITIVE_CNTL_VSOUT(uint32_t val)
-{
- return ((val) << A5XX_SP_PRIMITIVE_CNTL_VSOUT__SHIFT) & A5XX_SP_PRIMITIVE_CNTL_VSOUT__MASK;
-}
-
-#define REG_A5XX_SP_VS_OUT(i0) (0x0000e593 + 0x1*(i0))
-
-static inline uint32_t REG_A5XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
-#define A5XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
-#define A5XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
-static inline uint32_t A5XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
-{
- return ((val) << A5XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_A_REGID__MASK;
-}
-#define A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00000f00
-#define A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 8
-static inline uint32_t A5XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
-{
- return ((val) << A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
-}
-#define A5XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000
-#define A5XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
-static inline uint32_t A5XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
-{
- return ((val) << A5XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_B_REGID__MASK;
-}
-#define A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x0f000000
-#define A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 24
-static inline uint32_t A5XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
-{
- return ((val) << A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
-}
-
-#define REG_A5XX_SP_VS_VPC_DST(i0) (0x0000e5a3 + 0x1*(i0))
-
-static inline uint32_t REG_A5XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; }
-#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
-#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
-static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
-{
- return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
-}
-#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
-#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
-static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
-{
- return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
-}
-#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
-#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
-static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
-{
- return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
-}
-#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
-#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
-static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
-{
- return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
-}
-
-#define REG_A5XX_UNKNOWN_E5AB 0x0000e5ab
-
-#define REG_A5XX_SP_VS_OBJ_START_LO 0x0000e5ac
-
-#define REG_A5XX_SP_VS_OBJ_START_HI 0x0000e5ad
-
-#define REG_A5XX_SP_VS_PVT_MEM_PARAM 0x0000e5ae
-#define A5XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
-#define A5XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
-static inline uint32_t A5XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
-{
- assert(!(val & 0x1ff));
- return (((val >> 9)) << A5XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A5XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
-}
-#define A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK 0x00ffff00
-#define A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT 8
-static inline uint32_t A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKOFFSET(uint32_t val)
-{
- assert(!(val & 0x7ff));
- return (((val >> 11)) << A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT) & A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK;
-}
-#define A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
-#define A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
-static inline uint32_t A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
-{
- return ((val) << A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
-}
-
-#define REG_A5XX_SP_VS_PVT_MEM_ADDR 0x0000e5af
-
-#define REG_A5XX_SP_VS_PVT_MEM_SIZE 0x0000e5b1
-#define A5XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
-#define A5XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
-static inline uint32_t A5XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A5XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A5XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
-}
-
-#define REG_A5XX_SP_FS_CTRL_REG0 0x0000e5c0
-#define A5XX_SP_FS_CTRL_REG0_BUFFER 0x00000004
-#define A5XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00000008
-#define A5XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 3
-static inline uint32_t A5XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
-{
- return ((val) << A5XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
-}
-#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
-#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
-static inline uint32_t A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
-}
-#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
-#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
-static inline uint32_t A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
-}
-#define A5XX_SP_FS_CTRL_REG0_VARYING 0x00010000
-#define A5XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00100000
-#define A5XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
-#define A5XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT 25
-static inline uint32_t A5XX_SP_FS_CTRL_REG0_BRANCHSTACK(uint32_t val)
-{
- return ((val) << A5XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK;
-}
-
-#define REG_A5XX_UNKNOWN_E5C2 0x0000e5c2
-
-#define REG_A5XX_SP_FS_OBJ_START_LO 0x0000e5c3
-
-#define REG_A5XX_SP_FS_OBJ_START_HI 0x0000e5c4
-
-#define REG_A5XX_SP_FS_PVT_MEM_PARAM 0x0000e5c5
-#define A5XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
-#define A5XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
-static inline uint32_t A5XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
-{
- assert(!(val & 0x1ff));
- return (((val >> 9)) << A5XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A5XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
-}
-#define A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK 0x00ffff00
-#define A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT 8
-static inline uint32_t A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKOFFSET(uint32_t val)
-{
- assert(!(val & 0x7ff));
- return (((val >> 11)) << A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT) & A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK;
-}
-#define A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
-#define A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
-static inline uint32_t A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
-{
- return ((val) << A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
-}
-
-#define REG_A5XX_SP_FS_PVT_MEM_ADDR 0x0000e5c6
-
-#define REG_A5XX_SP_FS_PVT_MEM_SIZE 0x0000e5c8
-#define A5XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
-#define A5XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
-static inline uint32_t A5XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A5XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A5XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
-}
-
-#define REG_A5XX_SP_BLEND_CNTL 0x0000e5c9
-#define A5XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
-#define A5XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
-static inline uint32_t A5XX_SP_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
-{
- return ((val) << A5XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A5XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK;
-}
-#define A5XX_SP_BLEND_CNTL_UNK8 0x00000100
-#define A5XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
-
-#define REG_A5XX_SP_FS_OUTPUT_CNTL 0x0000e5ca
-#define A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
-#define A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT 0
-static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_MRT(uint32_t val)
-{
- return ((val) << A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK;
-}
-#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK 0x00001fe0
-#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT 5
-static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID(uint32_t val)
-{
- return ((val) << A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK;
-}
-#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK 0x001fe000
-#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT 13
-static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID(uint32_t val)
-{
- return ((val) << A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK;
-}
-
-#define REG_A5XX_SP_FS_OUTPUT(i0) (0x0000e5cb + 0x1*(i0))
-
-static inline uint32_t REG_A5XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000e5cb + 0x1*i0; }
-#define A5XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff
-#define A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT 0
-static inline uint32_t A5XX_SP_FS_OUTPUT_REG_REGID(uint32_t val)
-{
- return ((val) << A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_REG_REGID__MASK;
-}
-#define A5XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100
-
-#define REG_A5XX_SP_FS_MRT(i0) (0x0000e5d3 + 0x1*(i0))
-
-static inline uint32_t REG_A5XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; }
-#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK 0x000000ff
-#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT 0
-static inline uint32_t A5XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a5xx_color_fmt val)
-{
- return ((val) << A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
-}
-#define A5XX_SP_FS_MRT_REG_COLOR_SINT 0x00000100
-#define A5XX_SP_FS_MRT_REG_COLOR_UINT 0x00000200
-#define A5XX_SP_FS_MRT_REG_COLOR_SRGB 0x00000400
-
-#define REG_A5XX_UNKNOWN_E5DB 0x0000e5db
-
-#define REG_A5XX_SP_CS_CTRL_REG0 0x0000e5f0
-#define A5XX_SP_CS_CTRL_REG0_BUFFER 0x00000004
-#define A5XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00000008
-#define A5XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 3
-static inline uint32_t A5XX_SP_CS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
-{
- return ((val) << A5XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_CS_CTRL_REG0_THREADSIZE__MASK;
-}
-#define A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
-#define A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
-static inline uint32_t A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
-}
-#define A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
-#define A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
-static inline uint32_t A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
-}
-#define A5XX_SP_CS_CTRL_REG0_VARYING 0x00010000
-#define A5XX_SP_CS_CTRL_REG0_PIXLODENABLE 0x00100000
-#define A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
-#define A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT 25
-static inline uint32_t A5XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val)
-{
- return ((val) << A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK;
-}
-
-#define REG_A5XX_UNKNOWN_E5F2 0x0000e5f2
-
-#define REG_A5XX_SP_CS_OBJ_START_LO 0x0000e5f3
-
-#define REG_A5XX_SP_CS_OBJ_START_HI 0x0000e5f4
-
-#define REG_A5XX_SP_CS_PVT_MEM_PARAM 0x0000e5f5
-#define A5XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
-#define A5XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
-static inline uint32_t A5XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
-{
- assert(!(val & 0x1ff));
- return (((val >> 9)) << A5XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A5XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
-}
-#define A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK 0x00ffff00
-#define A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT 8
-static inline uint32_t A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKOFFSET(uint32_t val)
-{
- assert(!(val & 0x7ff));
- return (((val >> 11)) << A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT) & A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK;
-}
-#define A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
-#define A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
-static inline uint32_t A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
-{
- return ((val) << A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
-}
-
-#define REG_A5XX_SP_CS_PVT_MEM_ADDR 0x0000e5f6
-
-#define REG_A5XX_SP_CS_PVT_MEM_SIZE 0x0000e5f8
-#define A5XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
-#define A5XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
-static inline uint32_t A5XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A5XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A5XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
-}
-
-#define REG_A5XX_SP_HS_CTRL_REG0 0x0000e600
-#define A5XX_SP_HS_CTRL_REG0_BUFFER 0x00000004
-#define A5XX_SP_HS_CTRL_REG0_THREADSIZE__MASK 0x00000008
-#define A5XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT 3
-static inline uint32_t A5XX_SP_HS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
-{
- return ((val) << A5XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_HS_CTRL_REG0_THREADSIZE__MASK;
-}
-#define A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
-#define A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
-static inline uint32_t A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
-}
-#define A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
-#define A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
-static inline uint32_t A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
-}
-#define A5XX_SP_HS_CTRL_REG0_VARYING 0x00010000
-#define A5XX_SP_HS_CTRL_REG0_PIXLODENABLE 0x00100000
-#define A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
-#define A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT 25
-static inline uint32_t A5XX_SP_HS_CTRL_REG0_BRANCHSTACK(uint32_t val)
-{
- return ((val) << A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK;
-}
-
-#define REG_A5XX_UNKNOWN_E602 0x0000e602
-
-#define REG_A5XX_SP_HS_OBJ_START_LO 0x0000e603
-
-#define REG_A5XX_SP_HS_OBJ_START_HI 0x0000e604
-
-#define REG_A5XX_SP_HS_PVT_MEM_PARAM 0x0000e605
-#define A5XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
-#define A5XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
-static inline uint32_t A5XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
-{
- assert(!(val & 0x1ff));
- return (((val >> 9)) << A5XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A5XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
-}
-#define A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK 0x00ffff00
-#define A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT 8
-static inline uint32_t A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKOFFSET(uint32_t val)
-{
- assert(!(val & 0x7ff));
- return (((val >> 11)) << A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT) & A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK;
-}
-#define A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
-#define A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
-static inline uint32_t A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
-{
- return ((val) << A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
-}
-
-#define REG_A5XX_SP_HS_PVT_MEM_ADDR 0x0000e606
-
-#define REG_A5XX_SP_HS_PVT_MEM_SIZE 0x0000e608
-#define A5XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
-#define A5XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
-static inline uint32_t A5XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A5XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A5XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
-}
-
-#define REG_A5XX_SP_DS_CTRL_REG0 0x0000e610
-#define A5XX_SP_DS_CTRL_REG0_BUFFER 0x00000004
-#define A5XX_SP_DS_CTRL_REG0_THREADSIZE__MASK 0x00000008
-#define A5XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT 3
-static inline uint32_t A5XX_SP_DS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
-{
- return ((val) << A5XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_DS_CTRL_REG0_THREADSIZE__MASK;
-}
-#define A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
-#define A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
-static inline uint32_t A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
-}
-#define A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
-#define A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
-static inline uint32_t A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
-}
-#define A5XX_SP_DS_CTRL_REG0_VARYING 0x00010000
-#define A5XX_SP_DS_CTRL_REG0_PIXLODENABLE 0x00100000
-#define A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
-#define A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT 25
-static inline uint32_t A5XX_SP_DS_CTRL_REG0_BRANCHSTACK(uint32_t val)
-{
- return ((val) << A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK;
-}
-
-#define REG_A5XX_UNKNOWN_E62B 0x0000e62b
-
-#define REG_A5XX_SP_DS_OBJ_START_LO 0x0000e62c
-
-#define REG_A5XX_SP_DS_OBJ_START_HI 0x0000e62d
-
-#define REG_A5XX_SP_DS_PVT_MEM_PARAM 0x0000e62e
-#define A5XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
-#define A5XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
-static inline uint32_t A5XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
-{
- assert(!(val & 0x1ff));
- return (((val >> 9)) << A5XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A5XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
-}
-#define A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK 0x00ffff00
-#define A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT 8
-static inline uint32_t A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKOFFSET(uint32_t val)
-{
- assert(!(val & 0x7ff));
- return (((val >> 11)) << A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT) & A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK;
-}
-#define A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
-#define A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
-static inline uint32_t A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
-{
- return ((val) << A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
-}
-
-#define REG_A5XX_SP_DS_PVT_MEM_ADDR 0x0000e62f
-
-#define REG_A5XX_SP_DS_PVT_MEM_SIZE 0x0000e631
-#define A5XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
-#define A5XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
-static inline uint32_t A5XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A5XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A5XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
-}
-
-#define REG_A5XX_SP_GS_CTRL_REG0 0x0000e640
-#define A5XX_SP_GS_CTRL_REG0_BUFFER 0x00000004
-#define A5XX_SP_GS_CTRL_REG0_THREADSIZE__MASK 0x00000008
-#define A5XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT 3
-static inline uint32_t A5XX_SP_GS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
-{
- return ((val) << A5XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_GS_CTRL_REG0_THREADSIZE__MASK;
-}
-#define A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
-#define A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
-static inline uint32_t A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
-}
-#define A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
-#define A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
-static inline uint32_t A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
-}
-#define A5XX_SP_GS_CTRL_REG0_VARYING 0x00010000
-#define A5XX_SP_GS_CTRL_REG0_PIXLODENABLE 0x00100000
-#define A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
-#define A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT 25
-static inline uint32_t A5XX_SP_GS_CTRL_REG0_BRANCHSTACK(uint32_t val)
-{
- return ((val) << A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK;
-}
-
-#define REG_A5XX_UNKNOWN_E65B 0x0000e65b
-
-#define REG_A5XX_SP_GS_OBJ_START_LO 0x0000e65c
-
-#define REG_A5XX_SP_GS_OBJ_START_HI 0x0000e65d
-
-#define REG_A5XX_SP_GS_PVT_MEM_PARAM 0x0000e65e
-#define A5XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
-#define A5XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
-static inline uint32_t A5XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
-{
- assert(!(val & 0x1ff));
- return (((val >> 9)) << A5XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A5XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
-}
-#define A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK 0x00ffff00
-#define A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT 8
-static inline uint32_t A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKOFFSET(uint32_t val)
-{
- assert(!(val & 0x7ff));
- return (((val >> 11)) << A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT) & A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK;
-}
-#define A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
-#define A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
-static inline uint32_t A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
-{
- return ((val) << A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
-}
-
-#define REG_A5XX_SP_GS_PVT_MEM_ADDR 0x0000e65f
-
-#define REG_A5XX_SP_GS_PVT_MEM_SIZE 0x0000e661
-#define A5XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
-#define A5XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
-static inline uint32_t A5XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A5XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A5XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
-}
-
-#define REG_A5XX_TPL1_TP_RAS_MSAA_CNTL 0x0000e704
-#define A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
-#define A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
-static inline uint32_t A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
-{
- return ((val) << A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__MASK;
-}
-
-#define REG_A5XX_TPL1_TP_DEST_MSAA_CNTL 0x0000e705
-#define A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
-#define A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
-static inline uint32_t A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
-{
- return ((val) << A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__MASK;
-}
-#define A5XX_TPL1_TP_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
-
-#define REG_A5XX_TPL1_TP_BORDER_COLOR_BASE_ADDR_LO 0x0000e706
-
-#define REG_A5XX_TPL1_TP_BORDER_COLOR_BASE_ADDR_HI 0x0000e707
-
-#define REG_A5XX_TPL1_VS_TEX_COUNT 0x0000e700
-
-#define REG_A5XX_TPL1_HS_TEX_COUNT 0x0000e701
-
-#define REG_A5XX_TPL1_DS_TEX_COUNT 0x0000e702
-
-#define REG_A5XX_TPL1_GS_TEX_COUNT 0x0000e703
-
-#define REG_A5XX_TPL1_VS_TEX_SAMP_LO 0x0000e722
-
-#define REG_A5XX_TPL1_VS_TEX_SAMP_HI 0x0000e723
-
-#define REG_A5XX_TPL1_HS_TEX_SAMP_LO 0x0000e724
-
-#define REG_A5XX_TPL1_HS_TEX_SAMP_HI 0x0000e725
-
-#define REG_A5XX_TPL1_DS_TEX_SAMP_LO 0x0000e726
-
-#define REG_A5XX_TPL1_DS_TEX_SAMP_HI 0x0000e727
-
-#define REG_A5XX_TPL1_GS_TEX_SAMP_LO 0x0000e728
-
-#define REG_A5XX_TPL1_GS_TEX_SAMP_HI 0x0000e729
-
-#define REG_A5XX_TPL1_VS_TEX_CONST_LO 0x0000e72a
-
-#define REG_A5XX_TPL1_VS_TEX_CONST_HI 0x0000e72b
-
-#define REG_A5XX_TPL1_HS_TEX_CONST_LO 0x0000e72c
-
-#define REG_A5XX_TPL1_HS_TEX_CONST_HI 0x0000e72d
-
-#define REG_A5XX_TPL1_DS_TEX_CONST_LO 0x0000e72e
-
-#define REG_A5XX_TPL1_DS_TEX_CONST_HI 0x0000e72f
-
-#define REG_A5XX_TPL1_GS_TEX_CONST_LO 0x0000e730
-
-#define REG_A5XX_TPL1_GS_TEX_CONST_HI 0x0000e731
-
-#define REG_A5XX_TPL1_FS_TEX_COUNT 0x0000e750
-
-#define REG_A5XX_TPL1_CS_TEX_COUNT 0x0000e751
-
-#define REG_A5XX_TPL1_FS_TEX_SAMP_LO 0x0000e75a
-
-#define REG_A5XX_TPL1_FS_TEX_SAMP_HI 0x0000e75b
-
-#define REG_A5XX_TPL1_CS_TEX_SAMP_LO 0x0000e75c
-
-#define REG_A5XX_TPL1_CS_TEX_SAMP_HI 0x0000e75d
-
-#define REG_A5XX_TPL1_FS_TEX_CONST_LO 0x0000e75e
-
-#define REG_A5XX_TPL1_FS_TEX_CONST_HI 0x0000e75f
-
-#define REG_A5XX_TPL1_CS_TEX_CONST_LO 0x0000e760
-
-#define REG_A5XX_TPL1_CS_TEX_CONST_HI 0x0000e761
-
-#define REG_A5XX_TPL1_TP_FS_ROTATION_CNTL 0x0000e764
-
-#define REG_A5XX_HLSQ_CONTROL_0_REG 0x0000e784
-#define A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000001
-#define A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
-{
- return ((val) << A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
-}
-#define A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE__MASK 0x00000004
-#define A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE__SHIFT 2
-static inline uint32_t A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE(enum a3xx_threadsize val)
-{
- return ((val) << A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE__SHIFT) & A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE__MASK;
-}
-
-#define REG_A5XX_HLSQ_CONTROL_1_REG 0x0000e785
-#define A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK 0x0000003f
-#define A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT) & A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK;
-}
-
-#define REG_A5XX_HLSQ_CONTROL_2_REG 0x0000e786
-#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff
-#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
-}
-#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK 0x0000ff00
-#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT 8
-static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SAMPLEID(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK;
-}
-#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK 0x00ff0000
-#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT 16
-static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK;
-}
-#define A5XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK 0xff000000
-#define A5XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT 24
-static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_CENTERRHW(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK;
-}
-
-#define REG_A5XX_HLSQ_CONTROL_3_REG 0x0000e787
-#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK 0x000000ff
-#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK;
-}
-#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK 0x0000ff00
-#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT 8
-static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK;
-}
-#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK 0x00ff0000
-#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT 16
-static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK;
-}
-#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK 0xff000000
-#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT 24
-static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK;
-}
-
-#define REG_A5XX_HLSQ_CONTROL_4_REG 0x0000e788
-#define A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK 0x000000ff
-#define A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK;
-}
-#define A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK 0x0000ff00
-#define A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT 8
-static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK;
-}
-#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000
-#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16
-static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK;
-}
-#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000
-#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24
-static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK;
-}
-
-#define REG_A5XX_HLSQ_UPDATE_CNTL 0x0000e78a
-
-#define REG_A5XX_HLSQ_VS_CONFIG 0x0000e78b
-#define A5XX_HLSQ_VS_CONFIG_ENABLED 0x00000001
-#define A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
-#define A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
-static inline uint32_t A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET__MASK;
-}
-#define A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
-#define A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET__SHIFT 8
-static inline uint32_t A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET__MASK;
-}
-
-#define REG_A5XX_HLSQ_FS_CONFIG 0x0000e78c
-#define A5XX_HLSQ_FS_CONFIG_ENABLED 0x00000001
-#define A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
-#define A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
-static inline uint32_t A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET__MASK;
-}
-#define A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
-#define A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET__SHIFT 8
-static inline uint32_t A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET__MASK;
-}
-
-#define REG_A5XX_HLSQ_HS_CONFIG 0x0000e78d
-#define A5XX_HLSQ_HS_CONFIG_ENABLED 0x00000001
-#define A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
-#define A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
-static inline uint32_t A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET__MASK;
-}
-#define A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
-#define A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET__SHIFT 8
-static inline uint32_t A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET__MASK;
-}
-
-#define REG_A5XX_HLSQ_DS_CONFIG 0x0000e78e
-#define A5XX_HLSQ_DS_CONFIG_ENABLED 0x00000001
-#define A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
-#define A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
-static inline uint32_t A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET__MASK;
-}
-#define A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
-#define A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET__SHIFT 8
-static inline uint32_t A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET__MASK;
-}
-
-#define REG_A5XX_HLSQ_GS_CONFIG 0x0000e78f
-#define A5XX_HLSQ_GS_CONFIG_ENABLED 0x00000001
-#define A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
-#define A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
-static inline uint32_t A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET__MASK;
-}
-#define A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
-#define A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET__SHIFT 8
-static inline uint32_t A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET__MASK;
-}
-
-#define REG_A5XX_HLSQ_CS_CONFIG 0x0000e790
-#define A5XX_HLSQ_CS_CONFIG_ENABLED 0x00000001
-#define A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
-#define A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
-static inline uint32_t A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET__MASK;
-}
-#define A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
-#define A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET__SHIFT 8
-static inline uint32_t A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET__MASK;
-}
-
-#define REG_A5XX_HLSQ_VS_CNTL 0x0000e791
-#define A5XX_HLSQ_VS_CNTL_SSBO_ENABLE 0x00000001
-#define A5XX_HLSQ_VS_CNTL_INSTRLEN__MASK 0xfffffffe
-#define A5XX_HLSQ_VS_CNTL_INSTRLEN__SHIFT 1
-static inline uint32_t A5XX_HLSQ_VS_CNTL_INSTRLEN(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_VS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_VS_CNTL_INSTRLEN__MASK;
-}
-
-#define REG_A5XX_HLSQ_FS_CNTL 0x0000e792
-#define A5XX_HLSQ_FS_CNTL_SSBO_ENABLE 0x00000001
-#define A5XX_HLSQ_FS_CNTL_INSTRLEN__MASK 0xfffffffe
-#define A5XX_HLSQ_FS_CNTL_INSTRLEN__SHIFT 1
-static inline uint32_t A5XX_HLSQ_FS_CNTL_INSTRLEN(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_FS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_FS_CNTL_INSTRLEN__MASK;
-}
-
-#define REG_A5XX_HLSQ_HS_CNTL 0x0000e793
-#define A5XX_HLSQ_HS_CNTL_SSBO_ENABLE 0x00000001
-#define A5XX_HLSQ_HS_CNTL_INSTRLEN__MASK 0xfffffffe
-#define A5XX_HLSQ_HS_CNTL_INSTRLEN__SHIFT 1
-static inline uint32_t A5XX_HLSQ_HS_CNTL_INSTRLEN(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_HS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_HS_CNTL_INSTRLEN__MASK;
-}
-
-#define REG_A5XX_HLSQ_DS_CNTL 0x0000e794
-#define A5XX_HLSQ_DS_CNTL_SSBO_ENABLE 0x00000001
-#define A5XX_HLSQ_DS_CNTL_INSTRLEN__MASK 0xfffffffe
-#define A5XX_HLSQ_DS_CNTL_INSTRLEN__SHIFT 1
-static inline uint32_t A5XX_HLSQ_DS_CNTL_INSTRLEN(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_DS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_DS_CNTL_INSTRLEN__MASK;
-}
-
-#define REG_A5XX_HLSQ_GS_CNTL 0x0000e795
-#define A5XX_HLSQ_GS_CNTL_SSBO_ENABLE 0x00000001
-#define A5XX_HLSQ_GS_CNTL_INSTRLEN__MASK 0xfffffffe
-#define A5XX_HLSQ_GS_CNTL_INSTRLEN__SHIFT 1
-static inline uint32_t A5XX_HLSQ_GS_CNTL_INSTRLEN(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_GS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_GS_CNTL_INSTRLEN__MASK;
-}
-
-#define REG_A5XX_HLSQ_CS_CNTL 0x0000e796
-#define A5XX_HLSQ_CS_CNTL_SSBO_ENABLE 0x00000001
-#define A5XX_HLSQ_CS_CNTL_INSTRLEN__MASK 0xfffffffe
-#define A5XX_HLSQ_CS_CNTL_INSTRLEN__SHIFT 1
-static inline uint32_t A5XX_HLSQ_CS_CNTL_INSTRLEN(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_CS_CNTL_INSTRLEN__MASK;
-}
-
-#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_X 0x0000e7b9
-
-#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000e7ba
-
-#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000e7bb
-
-#define REG_A5XX_HLSQ_CS_NDRANGE_0 0x0000e7b0
-#define A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK 0x00000003
-#define A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT) & A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK;
-}
-#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc
-#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT 2
-static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT) & A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK;
-}
-#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000
-#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT 12
-static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT) & A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK;
-}
-#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000
-#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT 22
-static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT) & A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK;
-}
-
-#define REG_A5XX_HLSQ_CS_NDRANGE_1 0x0000e7b1
-#define A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK 0xffffffff
-#define A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT) & A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK;
-}
-
-#define REG_A5XX_HLSQ_CS_NDRANGE_2 0x0000e7b2
-#define A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK 0xffffffff
-#define A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT) & A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK;
-}
-
-#define REG_A5XX_HLSQ_CS_NDRANGE_3 0x0000e7b3
-#define A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK 0xffffffff
-#define A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT) & A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK;
-}
-
-#define REG_A5XX_HLSQ_CS_NDRANGE_4 0x0000e7b4
-#define A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK 0xffffffff
-#define A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT) & A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK;
-}
-
-#define REG_A5XX_HLSQ_CS_NDRANGE_5 0x0000e7b5
-#define A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK 0xffffffff
-#define A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT) & A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK;
-}
-
-#define REG_A5XX_HLSQ_CS_NDRANGE_6 0x0000e7b6
-#define A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK 0xffffffff
-#define A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT) & A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK;
-}
-
-#define REG_A5XX_HLSQ_CS_CNTL_0 0x0000e7b7
-#define A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK 0x000000ff
-#define A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT) & A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK;
-}
-#define A5XX_HLSQ_CS_CNTL_0_UNK0__MASK 0x0000ff00
-#define A5XX_HLSQ_CS_CNTL_0_UNK0__SHIFT 8
-static inline uint32_t A5XX_HLSQ_CS_CNTL_0_UNK0(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CS_CNTL_0_UNK0__SHIFT) & A5XX_HLSQ_CS_CNTL_0_UNK0__MASK;
-}
-#define A5XX_HLSQ_CS_CNTL_0_UNK1__MASK 0x00ff0000
-#define A5XX_HLSQ_CS_CNTL_0_UNK1__SHIFT 16
-static inline uint32_t A5XX_HLSQ_CS_CNTL_0_UNK1(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CS_CNTL_0_UNK1__SHIFT) & A5XX_HLSQ_CS_CNTL_0_UNK1__MASK;
-}
-#define A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK 0xff000000
-#define A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT 24
-static inline uint32_t A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID(uint32_t val)
-{
- return ((val) << A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT) & A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK;
-}
-
-#define REG_A5XX_HLSQ_CS_CNTL_1 0x0000e7b8
-
-#define REG_A5XX_UNKNOWN_E7C0 0x0000e7c0
-
-#define REG_A5XX_HLSQ_VS_CONSTLEN 0x0000e7c3
-
-#define REG_A5XX_HLSQ_VS_INSTRLEN 0x0000e7c4
-
-#define REG_A5XX_UNKNOWN_E7C5 0x0000e7c5
-
-#define REG_A5XX_HLSQ_HS_CONSTLEN 0x0000e7c8
-
-#define REG_A5XX_HLSQ_HS_INSTRLEN 0x0000e7c9
-
-#define REG_A5XX_UNKNOWN_E7CA 0x0000e7ca
-
-#define REG_A5XX_HLSQ_DS_CONSTLEN 0x0000e7cd
-
-#define REG_A5XX_HLSQ_DS_INSTRLEN 0x0000e7ce
-
-#define REG_A5XX_UNKNOWN_E7CF 0x0000e7cf
-
-#define REG_A5XX_HLSQ_GS_CONSTLEN 0x0000e7d2
-
-#define REG_A5XX_HLSQ_GS_INSTRLEN 0x0000e7d3
-
-#define REG_A5XX_UNKNOWN_E7D4 0x0000e7d4
-
-#define REG_A5XX_HLSQ_FS_CONSTLEN 0x0000e7d7
-
-#define REG_A5XX_HLSQ_FS_INSTRLEN 0x0000e7d8
-
-#define REG_A5XX_UNKNOWN_E7D9 0x0000e7d9
-
-#define REG_A5XX_HLSQ_CS_CONSTLEN 0x0000e7dc
-
-#define REG_A5XX_HLSQ_CS_INSTRLEN 0x0000e7dd
-
-#define REG_A5XX_RB_2D_BLIT_CNTL 0x00002100
-
-#define REG_A5XX_RB_2D_SRC_SOLID_DW0 0x00002101
-
-#define REG_A5XX_RB_2D_SRC_SOLID_DW1 0x00002102
-
-#define REG_A5XX_RB_2D_SRC_SOLID_DW2 0x00002103
-
-#define REG_A5XX_RB_2D_SRC_SOLID_DW3 0x00002104
-
-#define REG_A5XX_RB_2D_SRC_INFO 0x00002107
-#define A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
-#define A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
-static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
-{
- return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__MASK;
-}
-#define A5XX_RB_2D_SRC_INFO_TILE_MODE__MASK 0x00000300
-#define A5XX_RB_2D_SRC_INFO_TILE_MODE__SHIFT 8
-static inline uint32_t A5XX_RB_2D_SRC_INFO_TILE_MODE(enum a5xx_tile_mode val)
-{
- return ((val) << A5XX_RB_2D_SRC_INFO_TILE_MODE__SHIFT) & A5XX_RB_2D_SRC_INFO_TILE_MODE__MASK;
-}
-#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
-#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
-static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
-{
- return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK;
-}
-#define A5XX_RB_2D_SRC_INFO_FLAGS 0x00001000
-#define A5XX_RB_2D_SRC_INFO_SRGB 0x00002000
-
-#define REG_A5XX_RB_2D_SRC_LO 0x00002108
-
-#define REG_A5XX_RB_2D_SRC_HI 0x00002109
-
-#define REG_A5XX_RB_2D_SRC_SIZE 0x0000210a
-#define A5XX_RB_2D_SRC_SIZE_PITCH__MASK 0x0000ffff
-#define A5XX_RB_2D_SRC_SIZE_PITCH__SHIFT 0
-static inline uint32_t A5XX_RB_2D_SRC_SIZE_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A5XX_RB_2D_SRC_SIZE_PITCH__SHIFT) & A5XX_RB_2D_SRC_SIZE_PITCH__MASK;
-}
-#define A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__MASK 0xffff0000
-#define A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__SHIFT 16
-static inline uint32_t A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__SHIFT) & A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__MASK;
-}
-
-#define REG_A5XX_RB_2D_DST_INFO 0x00002110
-#define A5XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
-#define A5XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT 0
-static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
-{
- return ((val) << A5XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK;
-}
-#define A5XX_RB_2D_DST_INFO_TILE_MODE__MASK 0x00000300
-#define A5XX_RB_2D_DST_INFO_TILE_MODE__SHIFT 8
-static inline uint32_t A5XX_RB_2D_DST_INFO_TILE_MODE(enum a5xx_tile_mode val)
-{
- return ((val) << A5XX_RB_2D_DST_INFO_TILE_MODE__SHIFT) & A5XX_RB_2D_DST_INFO_TILE_MODE__MASK;
-}
-#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
-#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT 10
-static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
-{
- return ((val) << A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK;
-}
-#define A5XX_RB_2D_DST_INFO_FLAGS 0x00001000
-#define A5XX_RB_2D_DST_INFO_SRGB 0x00002000
-
-#define REG_A5XX_RB_2D_DST_LO 0x00002111
-
-#define REG_A5XX_RB_2D_DST_HI 0x00002112
-
-#define REG_A5XX_RB_2D_DST_SIZE 0x00002113
-#define A5XX_RB_2D_DST_SIZE_PITCH__MASK 0x0000ffff
-#define A5XX_RB_2D_DST_SIZE_PITCH__SHIFT 0
-static inline uint32_t A5XX_RB_2D_DST_SIZE_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A5XX_RB_2D_DST_SIZE_PITCH__SHIFT) & A5XX_RB_2D_DST_SIZE_PITCH__MASK;
-}
-#define A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__MASK 0xffff0000
-#define A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__SHIFT 16
-static inline uint32_t A5XX_RB_2D_DST_SIZE_ARRAY_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__SHIFT) & A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__MASK;
-}
-
-#define REG_A5XX_RB_2D_SRC_FLAGS_LO 0x00002140
-
-#define REG_A5XX_RB_2D_SRC_FLAGS_HI 0x00002141
-
-#define REG_A5XX_RB_2D_SRC_FLAGS_PITCH 0x00002142
-#define A5XX_RB_2D_SRC_FLAGS_PITCH__MASK 0xffffffff
-#define A5XX_RB_2D_SRC_FLAGS_PITCH__SHIFT 0
-static inline uint32_t A5XX_RB_2D_SRC_FLAGS_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A5XX_RB_2D_SRC_FLAGS_PITCH__SHIFT) & A5XX_RB_2D_SRC_FLAGS_PITCH__MASK;
-}
-
-#define REG_A5XX_RB_2D_DST_FLAGS_LO 0x00002143
-
-#define REG_A5XX_RB_2D_DST_FLAGS_HI 0x00002144
-
-#define REG_A5XX_RB_2D_DST_FLAGS_PITCH 0x00002145
-#define A5XX_RB_2D_DST_FLAGS_PITCH__MASK 0xffffffff
-#define A5XX_RB_2D_DST_FLAGS_PITCH__SHIFT 0
-static inline uint32_t A5XX_RB_2D_DST_FLAGS_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A5XX_RB_2D_DST_FLAGS_PITCH__SHIFT) & A5XX_RB_2D_DST_FLAGS_PITCH__MASK;
-}
-
-#define REG_A5XX_GRAS_2D_BLIT_CNTL 0x00002180
-
-#define REG_A5XX_GRAS_2D_SRC_INFO 0x00002181
-#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
-#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
-static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
-{
- return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK;
-}
-#define A5XX_GRAS_2D_SRC_INFO_TILE_MODE__MASK 0x00000300
-#define A5XX_GRAS_2D_SRC_INFO_TILE_MODE__SHIFT 8
-static inline uint32_t A5XX_GRAS_2D_SRC_INFO_TILE_MODE(enum a5xx_tile_mode val)
-{
- return ((val) << A5XX_GRAS_2D_SRC_INFO_TILE_MODE__SHIFT) & A5XX_GRAS_2D_SRC_INFO_TILE_MODE__MASK;
-}
-#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
-#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
-static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
-{
- return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK;
-}
-#define A5XX_GRAS_2D_SRC_INFO_FLAGS 0x00001000
-#define A5XX_GRAS_2D_SRC_INFO_SRGB 0x00002000
-
-#define REG_A5XX_GRAS_2D_DST_INFO 0x00002182
-#define A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
-#define A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__SHIFT 0
-static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
-{
- return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK;
-}
-#define A5XX_GRAS_2D_DST_INFO_TILE_MODE__MASK 0x00000300
-#define A5XX_GRAS_2D_DST_INFO_TILE_MODE__SHIFT 8
-static inline uint32_t A5XX_GRAS_2D_DST_INFO_TILE_MODE(enum a5xx_tile_mode val)
-{
- return ((val) << A5XX_GRAS_2D_DST_INFO_TILE_MODE__SHIFT) & A5XX_GRAS_2D_DST_INFO_TILE_MODE__MASK;
-}
-#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
-#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT 10
-static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
-{
- return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK;
-}
-#define A5XX_GRAS_2D_DST_INFO_FLAGS 0x00001000
-#define A5XX_GRAS_2D_DST_INFO_SRGB 0x00002000
-
-#define REG_A5XX_UNKNOWN_2184 0x00002184
-
-#define REG_A5XX_TEX_SAMP_0 0x00000000
-#define A5XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
-#define A5XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
-#define A5XX_TEX_SAMP_0_XY_MAG__SHIFT 1
-static inline uint32_t A5XX_TEX_SAMP_0_XY_MAG(enum a5xx_tex_filter val)
-{
- return ((val) << A5XX_TEX_SAMP_0_XY_MAG__SHIFT) & A5XX_TEX_SAMP_0_XY_MAG__MASK;
-}
-#define A5XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018
-#define A5XX_TEX_SAMP_0_XY_MIN__SHIFT 3
-static inline uint32_t A5XX_TEX_SAMP_0_XY_MIN(enum a5xx_tex_filter val)
-{
- return ((val) << A5XX_TEX_SAMP_0_XY_MIN__SHIFT) & A5XX_TEX_SAMP_0_XY_MIN__MASK;
-}
-#define A5XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0
-#define A5XX_TEX_SAMP_0_WRAP_S__SHIFT 5
-static inline uint32_t A5XX_TEX_SAMP_0_WRAP_S(enum a5xx_tex_clamp val)
-{
- return ((val) << A5XX_TEX_SAMP_0_WRAP_S__SHIFT) & A5XX_TEX_SAMP_0_WRAP_S__MASK;
-}
-#define A5XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700
-#define A5XX_TEX_SAMP_0_WRAP_T__SHIFT 8
-static inline uint32_t A5XX_TEX_SAMP_0_WRAP_T(enum a5xx_tex_clamp val)
-{
- return ((val) << A5XX_TEX_SAMP_0_WRAP_T__SHIFT) & A5XX_TEX_SAMP_0_WRAP_T__MASK;
-}
-#define A5XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800
-#define A5XX_TEX_SAMP_0_WRAP_R__SHIFT 11
-static inline uint32_t A5XX_TEX_SAMP_0_WRAP_R(enum a5xx_tex_clamp val)
-{
- return ((val) << A5XX_TEX_SAMP_0_WRAP_R__SHIFT) & A5XX_TEX_SAMP_0_WRAP_R__MASK;
-}
-#define A5XX_TEX_SAMP_0_ANISO__MASK 0x0001c000
-#define A5XX_TEX_SAMP_0_ANISO__SHIFT 14
-static inline uint32_t A5XX_TEX_SAMP_0_ANISO(enum a5xx_tex_aniso val)
-{
- return ((val) << A5XX_TEX_SAMP_0_ANISO__SHIFT) & A5XX_TEX_SAMP_0_ANISO__MASK;
-}
-#define A5XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000
-#define A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19
-static inline uint32_t A5XX_TEX_SAMP_0_LOD_BIAS(float val)
-{
- return ((((int32_t)(val * 256.0))) << A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A5XX_TEX_SAMP_0_LOD_BIAS__MASK;
-}
-
-#define REG_A5XX_TEX_SAMP_1 0x00000001
-#define A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
-#define A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1
-static inline uint32_t A5XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
-{
- return ((val) << A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
-}
-#define A5XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010
-#define A5XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020
-#define A5XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040
-#define A5XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
-#define A5XX_TEX_SAMP_1_MAX_LOD__SHIFT 8
-static inline uint32_t A5XX_TEX_SAMP_1_MAX_LOD(float val)
-{
- return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A5XX_TEX_SAMP_1_MAX_LOD__MASK;
-}
-#define A5XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000
-#define A5XX_TEX_SAMP_1_MIN_LOD__SHIFT 20
-static inline uint32_t A5XX_TEX_SAMP_1_MIN_LOD(float val)
-{
- return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A5XX_TEX_SAMP_1_MIN_LOD__MASK;
-}
-
-#define REG_A5XX_TEX_SAMP_2 0x00000002
-#define A5XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK 0xffffff80
-#define A5XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT 7
-static inline uint32_t A5XX_TEX_SAMP_2_BCOLOR_OFFSET(uint32_t val)
-{
- return ((val) << A5XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT) & A5XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK;
-}
-
-#define REG_A5XX_TEX_SAMP_3 0x00000003
-
-#define REG_A5XX_TEX_CONST_0 0x00000000
-#define A5XX_TEX_CONST_0_TILE_MODE__MASK 0x00000003
-#define A5XX_TEX_CONST_0_TILE_MODE__SHIFT 0
-static inline uint32_t A5XX_TEX_CONST_0_TILE_MODE(enum a5xx_tile_mode val)
-{
- return ((val) << A5XX_TEX_CONST_0_TILE_MODE__SHIFT) & A5XX_TEX_CONST_0_TILE_MODE__MASK;
-}
-#define A5XX_TEX_CONST_0_SRGB 0x00000004
-#define A5XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
-#define A5XX_TEX_CONST_0_SWIZ_X__SHIFT 4
-static inline uint32_t A5XX_TEX_CONST_0_SWIZ_X(enum a5xx_tex_swiz val)
-{
- return ((val) << A5XX_TEX_CONST_0_SWIZ_X__SHIFT) & A5XX_TEX_CONST_0_SWIZ_X__MASK;
-}
-#define A5XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
-#define A5XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
-static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Y(enum a5xx_tex_swiz val)
-{
- return ((val) << A5XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Y__MASK;
-}
-#define A5XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
-#define A5XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
-static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Z(enum a5xx_tex_swiz val)
-{
- return ((val) << A5XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Z__MASK;
-}
-#define A5XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
-#define A5XX_TEX_CONST_0_SWIZ_W__SHIFT 13
-static inline uint32_t A5XX_TEX_CONST_0_SWIZ_W(enum a5xx_tex_swiz val)
-{
- return ((val) << A5XX_TEX_CONST_0_SWIZ_W__SHIFT) & A5XX_TEX_CONST_0_SWIZ_W__MASK;
-}
-#define A5XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000
-#define A5XX_TEX_CONST_0_MIPLVLS__SHIFT 16
-static inline uint32_t A5XX_TEX_CONST_0_MIPLVLS(uint32_t val)
-{
- return ((val) << A5XX_TEX_CONST_0_MIPLVLS__SHIFT) & A5XX_TEX_CONST_0_MIPLVLS__MASK;
-}
-#define A5XX_TEX_CONST_0_SAMPLES__MASK 0x00300000
-#define A5XX_TEX_CONST_0_SAMPLES__SHIFT 20
-static inline uint32_t A5XX_TEX_CONST_0_SAMPLES(enum a3xx_msaa_samples val)
-{
- return ((val) << A5XX_TEX_CONST_0_SAMPLES__SHIFT) & A5XX_TEX_CONST_0_SAMPLES__MASK;
-}
-#define A5XX_TEX_CONST_0_FMT__MASK 0x3fc00000
-#define A5XX_TEX_CONST_0_FMT__SHIFT 22
-static inline uint32_t A5XX_TEX_CONST_0_FMT(enum a5xx_tex_fmt val)
-{
- return ((val) << A5XX_TEX_CONST_0_FMT__SHIFT) & A5XX_TEX_CONST_0_FMT__MASK;
-}
-#define A5XX_TEX_CONST_0_SWAP__MASK 0xc0000000
-#define A5XX_TEX_CONST_0_SWAP__SHIFT 30
-static inline uint32_t A5XX_TEX_CONST_0_SWAP(enum a3xx_color_swap val)
-{
- return ((val) << A5XX_TEX_CONST_0_SWAP__SHIFT) & A5XX_TEX_CONST_0_SWAP__MASK;
-}
-
-#define REG_A5XX_TEX_CONST_1 0x00000001
-#define A5XX_TEX_CONST_1_WIDTH__MASK 0x00007fff
-#define A5XX_TEX_CONST_1_WIDTH__SHIFT 0
-static inline uint32_t A5XX_TEX_CONST_1_WIDTH(uint32_t val)
-{
- return ((val) << A5XX_TEX_CONST_1_WIDTH__SHIFT) & A5XX_TEX_CONST_1_WIDTH__MASK;
-}
-#define A5XX_TEX_CONST_1_HEIGHT__MASK 0x3fff8000
-#define A5XX_TEX_CONST_1_HEIGHT__SHIFT 15
-static inline uint32_t A5XX_TEX_CONST_1_HEIGHT(uint32_t val)
-{
- return ((val) << A5XX_TEX_CONST_1_HEIGHT__SHIFT) & A5XX_TEX_CONST_1_HEIGHT__MASK;
-}
-
-#define REG_A5XX_TEX_CONST_2 0x00000002
-#define A5XX_TEX_CONST_2_BUFFER 0x00000010
-#define A5XX_TEX_CONST_2_PITCHALIGN__MASK 0x0000000f
-#define A5XX_TEX_CONST_2_PITCHALIGN__SHIFT 0
-static inline uint32_t A5XX_TEX_CONST_2_PITCHALIGN(uint32_t val)
-{
- return ((val) << A5XX_TEX_CONST_2_PITCHALIGN__SHIFT) & A5XX_TEX_CONST_2_PITCHALIGN__MASK;
-}
-#define A5XX_TEX_CONST_2_PITCH__MASK 0x1fffff80
-#define A5XX_TEX_CONST_2_PITCH__SHIFT 7
-static inline uint32_t A5XX_TEX_CONST_2_PITCH(uint32_t val)
-{
- return ((val) << A5XX_TEX_CONST_2_PITCH__SHIFT) & A5XX_TEX_CONST_2_PITCH__MASK;
-}
-#define A5XX_TEX_CONST_2_TYPE__MASK 0xe0000000
-#define A5XX_TEX_CONST_2_TYPE__SHIFT 29
-static inline uint32_t A5XX_TEX_CONST_2_TYPE(enum a5xx_tex_type val)
-{
- return ((val) << A5XX_TEX_CONST_2_TYPE__SHIFT) & A5XX_TEX_CONST_2_TYPE__MASK;
-}
-
-#define REG_A5XX_TEX_CONST_3 0x00000003
-#define A5XX_TEX_CONST_3_ARRAY_PITCH__MASK 0x00003fff
-#define A5XX_TEX_CONST_3_ARRAY_PITCH__SHIFT 0
-static inline uint32_t A5XX_TEX_CONST_3_ARRAY_PITCH(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A5XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A5XX_TEX_CONST_3_ARRAY_PITCH__MASK;
-}
-#define A5XX_TEX_CONST_3_MIN_LAYERSZ__MASK 0x07800000
-#define A5XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT 23
-static inline uint32_t A5XX_TEX_CONST_3_MIN_LAYERSZ(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A5XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT) & A5XX_TEX_CONST_3_MIN_LAYERSZ__MASK;
-}
-#define A5XX_TEX_CONST_3_TILE_ALL 0x08000000
-#define A5XX_TEX_CONST_3_FLAG 0x10000000
-
-#define REG_A5XX_TEX_CONST_4 0x00000004
-#define A5XX_TEX_CONST_4_BASE_LO__MASK 0xffffffe0
-#define A5XX_TEX_CONST_4_BASE_LO__SHIFT 5
-static inline uint32_t A5XX_TEX_CONST_4_BASE_LO(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A5XX_TEX_CONST_4_BASE_LO__SHIFT) & A5XX_TEX_CONST_4_BASE_LO__MASK;
-}
-
-#define REG_A5XX_TEX_CONST_5 0x00000005
-#define A5XX_TEX_CONST_5_BASE_HI__MASK 0x0001ffff
-#define A5XX_TEX_CONST_5_BASE_HI__SHIFT 0
-static inline uint32_t A5XX_TEX_CONST_5_BASE_HI(uint32_t val)
-{
- return ((val) << A5XX_TEX_CONST_5_BASE_HI__SHIFT) & A5XX_TEX_CONST_5_BASE_HI__MASK;
-}
-#define A5XX_TEX_CONST_5_DEPTH__MASK 0x3ffe0000
-#define A5XX_TEX_CONST_5_DEPTH__SHIFT 17
-static inline uint32_t A5XX_TEX_CONST_5_DEPTH(uint32_t val)
-{
- return ((val) << A5XX_TEX_CONST_5_DEPTH__SHIFT) & A5XX_TEX_CONST_5_DEPTH__MASK;
-}
-
-#define REG_A5XX_TEX_CONST_6 0x00000006
-
-#define REG_A5XX_TEX_CONST_7 0x00000007
-
-#define REG_A5XX_TEX_CONST_8 0x00000008
-
-#define REG_A5XX_TEX_CONST_9 0x00000009
-
-#define REG_A5XX_TEX_CONST_10 0x0000000a
-
-#define REG_A5XX_TEX_CONST_11 0x0000000b
-
-#define REG_A5XX_SSBO_0_0 0x00000000
-#define A5XX_SSBO_0_0_BASE_LO__MASK 0xffffffe0
-#define A5XX_SSBO_0_0_BASE_LO__SHIFT 5
-static inline uint32_t A5XX_SSBO_0_0_BASE_LO(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A5XX_SSBO_0_0_BASE_LO__SHIFT) & A5XX_SSBO_0_0_BASE_LO__MASK;
-}
-
-#define REG_A5XX_SSBO_0_1 0x00000001
-#define A5XX_SSBO_0_1_PITCH__MASK 0x003fffff
-#define A5XX_SSBO_0_1_PITCH__SHIFT 0
-static inline uint32_t A5XX_SSBO_0_1_PITCH(uint32_t val)
-{
- return ((val) << A5XX_SSBO_0_1_PITCH__SHIFT) & A5XX_SSBO_0_1_PITCH__MASK;
-}
-
-#define REG_A5XX_SSBO_0_2 0x00000002
-#define A5XX_SSBO_0_2_ARRAY_PITCH__MASK 0x03fff000
-#define A5XX_SSBO_0_2_ARRAY_PITCH__SHIFT 12
-static inline uint32_t A5XX_SSBO_0_2_ARRAY_PITCH(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A5XX_SSBO_0_2_ARRAY_PITCH__SHIFT) & A5XX_SSBO_0_2_ARRAY_PITCH__MASK;
-}
-
-#define REG_A5XX_SSBO_0_3 0x00000003
-#define A5XX_SSBO_0_3_CPP__MASK 0x0000003f
-#define A5XX_SSBO_0_3_CPP__SHIFT 0
-static inline uint32_t A5XX_SSBO_0_3_CPP(uint32_t val)
-{
- return ((val) << A5XX_SSBO_0_3_CPP__SHIFT) & A5XX_SSBO_0_3_CPP__MASK;
-}
-
-#define REG_A5XX_SSBO_1_0 0x00000000
-#define A5XX_SSBO_1_0_FMT__MASK 0x0000ff00
-#define A5XX_SSBO_1_0_FMT__SHIFT 8
-static inline uint32_t A5XX_SSBO_1_0_FMT(enum a5xx_tex_fmt val)
-{
- return ((val) << A5XX_SSBO_1_0_FMT__SHIFT) & A5XX_SSBO_1_0_FMT__MASK;
-}
-#define A5XX_SSBO_1_0_WIDTH__MASK 0xffff0000
-#define A5XX_SSBO_1_0_WIDTH__SHIFT 16
-static inline uint32_t A5XX_SSBO_1_0_WIDTH(uint32_t val)
-{
- return ((val) << A5XX_SSBO_1_0_WIDTH__SHIFT) & A5XX_SSBO_1_0_WIDTH__MASK;
-}
-
-#define REG_A5XX_SSBO_1_1 0x00000001
-#define A5XX_SSBO_1_1_HEIGHT__MASK 0x0000ffff
-#define A5XX_SSBO_1_1_HEIGHT__SHIFT 0
-static inline uint32_t A5XX_SSBO_1_1_HEIGHT(uint32_t val)
-{
- return ((val) << A5XX_SSBO_1_1_HEIGHT__SHIFT) & A5XX_SSBO_1_1_HEIGHT__MASK;
-}
-#define A5XX_SSBO_1_1_DEPTH__MASK 0xffff0000
-#define A5XX_SSBO_1_1_DEPTH__SHIFT 16
-static inline uint32_t A5XX_SSBO_1_1_DEPTH(uint32_t val)
-{
- return ((val) << A5XX_SSBO_1_1_DEPTH__SHIFT) & A5XX_SSBO_1_1_DEPTH__MASK;
-}
-
-#define REG_A5XX_SSBO_2_0 0x00000000
-#define A5XX_SSBO_2_0_BASE_LO__MASK 0xffffffff
-#define A5XX_SSBO_2_0_BASE_LO__SHIFT 0
-static inline uint32_t A5XX_SSBO_2_0_BASE_LO(uint32_t val)
-{
- return ((val) << A5XX_SSBO_2_0_BASE_LO__SHIFT) & A5XX_SSBO_2_0_BASE_LO__MASK;
-}
-
-#define REG_A5XX_SSBO_2_1 0x00000001
-#define A5XX_SSBO_2_1_BASE_HI__MASK 0xffffffff
-#define A5XX_SSBO_2_1_BASE_HI__SHIFT 0
-static inline uint32_t A5XX_SSBO_2_1_BASE_HI(uint32_t val)
-{
- return ((val) << A5XX_SSBO_2_1_BASE_HI__SHIFT) & A5XX_SSBO_2_1_BASE_HI__MASK;
-}
-
-#define REG_A5XX_UBO_0 0x00000000
-#define A5XX_UBO_0_BASE_LO__MASK 0xffffffff
-#define A5XX_UBO_0_BASE_LO__SHIFT 0
-static inline uint32_t A5XX_UBO_0_BASE_LO(uint32_t val)
-{
- return ((val) << A5XX_UBO_0_BASE_LO__SHIFT) & A5XX_UBO_0_BASE_LO__MASK;
-}
-
-#define REG_A5XX_UBO_1 0x00000001
-#define A5XX_UBO_1_BASE_HI__MASK 0x0001ffff
-#define A5XX_UBO_1_BASE_HI__SHIFT 0
-static inline uint32_t A5XX_UBO_1_BASE_HI(uint32_t val)
-{
- return ((val) << A5XX_UBO_1_BASE_HI__SHIFT) & A5XX_UBO_1_BASE_HI__MASK;
-}
-
-#ifdef __cplusplus
-#endif
-
-#endif /* A5XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
deleted file mode 100644
index 92e23bf2458d..000000000000
--- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h
+++ /dev/null
@@ -1,11858 +0,0 @@
-#ifndef A6XX_XML
-#define A6XX_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
-http://gitlab.freedesktop.org/mesa/mesa/
-git clone https://gitlab.freedesktop.org/mesa/mesa.git
-
-The rules-ng-ng source files this header was generated from are:
-
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 243381 bytes, from Sat Feb 24 09:06:40 2024)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from Fri Jun 2 14:59:26 2023)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 85856 bytes, from Fri Feb 23 13:07:00 2024)
-
-Copyright (C) 2013-2024 by the following authors:
-- Rob Clark <robdclark@gmail.com> Rob Clark
-- Ilia Mirkin <imirkin@alum.mit.edu> Ilia Mirkin
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-*/
-
-#ifdef __KERNEL__
-#include <linux/bug.h>
-#define assert(x) BUG_ON(!(x))
-#else
-#include <assert.h>
-#endif
-
-#ifdef __cplusplus
-#define __struct_cast(X)
-#else
-#define __struct_cast(X) (struct X)
-#endif
-
-enum a6xx_tile_mode {
- TILE6_LINEAR = 0,
- TILE6_2 = 2,
- TILE6_3 = 3,
-};
-
-enum a6xx_format {
- FMT6_A8_UNORM = 2,
- FMT6_8_UNORM = 3,
- FMT6_8_SNORM = 4,
- FMT6_8_UINT = 5,
- FMT6_8_SINT = 6,
- FMT6_4_4_4_4_UNORM = 8,
- FMT6_5_5_5_1_UNORM = 10,
- FMT6_1_5_5_5_UNORM = 12,
- FMT6_5_6_5_UNORM = 14,
- FMT6_8_8_UNORM = 15,
- FMT6_8_8_SNORM = 16,
- FMT6_8_8_UINT = 17,
- FMT6_8_8_SINT = 18,
- FMT6_L8_A8_UNORM = 19,
- FMT6_16_UNORM = 21,
- FMT6_16_SNORM = 22,
- FMT6_16_FLOAT = 23,
- FMT6_16_UINT = 24,
- FMT6_16_SINT = 25,
- FMT6_8_8_8_UNORM = 33,
- FMT6_8_8_8_SNORM = 34,
- FMT6_8_8_8_UINT = 35,
- FMT6_8_8_8_SINT = 36,
- FMT6_8_8_8_8_UNORM = 48,
- FMT6_8_8_8_X8_UNORM = 49,
- FMT6_8_8_8_8_SNORM = 50,
- FMT6_8_8_8_8_UINT = 51,
- FMT6_8_8_8_8_SINT = 52,
- FMT6_9_9_9_E5_FLOAT = 53,
- FMT6_10_10_10_2_UNORM = 54,
- FMT6_10_10_10_2_UNORM_DEST = 55,
- FMT6_10_10_10_2_SNORM = 57,
- FMT6_10_10_10_2_UINT = 58,
- FMT6_10_10_10_2_SINT = 59,
- FMT6_11_11_10_FLOAT = 66,
- FMT6_16_16_UNORM = 67,
- FMT6_16_16_SNORM = 68,
- FMT6_16_16_FLOAT = 69,
- FMT6_16_16_UINT = 70,
- FMT6_16_16_SINT = 71,
- FMT6_32_UNORM = 72,
- FMT6_32_SNORM = 73,
- FMT6_32_FLOAT = 74,
- FMT6_32_UINT = 75,
- FMT6_32_SINT = 76,
- FMT6_32_FIXED = 77,
- FMT6_16_16_16_UNORM = 88,
- FMT6_16_16_16_SNORM = 89,
- FMT6_16_16_16_FLOAT = 90,
- FMT6_16_16_16_UINT = 91,
- FMT6_16_16_16_SINT = 92,
- FMT6_16_16_16_16_UNORM = 96,
- FMT6_16_16_16_16_SNORM = 97,
- FMT6_16_16_16_16_FLOAT = 98,
- FMT6_16_16_16_16_UINT = 99,
- FMT6_16_16_16_16_SINT = 100,
- FMT6_32_32_UNORM = 101,
- FMT6_32_32_SNORM = 102,
- FMT6_32_32_FLOAT = 103,
- FMT6_32_32_UINT = 104,
- FMT6_32_32_SINT = 105,
- FMT6_32_32_FIXED = 106,
- FMT6_32_32_32_UNORM = 112,
- FMT6_32_32_32_SNORM = 113,
- FMT6_32_32_32_UINT = 114,
- FMT6_32_32_32_SINT = 115,
- FMT6_32_32_32_FLOAT = 116,
- FMT6_32_32_32_FIXED = 117,
- FMT6_32_32_32_32_UNORM = 128,
- FMT6_32_32_32_32_SNORM = 129,
- FMT6_32_32_32_32_FLOAT = 130,
- FMT6_32_32_32_32_UINT = 131,
- FMT6_32_32_32_32_SINT = 132,
- FMT6_32_32_32_32_FIXED = 133,
- FMT6_G8R8B8R8_422_UNORM = 140,
- FMT6_R8G8R8B8_422_UNORM = 141,
- FMT6_R8_G8B8_2PLANE_420_UNORM = 142,
- FMT6_NV21 = 143,
- FMT6_R8_G8_B8_3PLANE_420_UNORM = 144,
- FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8 = 145,
- FMT6_NV12_Y = 148,
- FMT6_NV12_UV = 149,
- FMT6_NV12_VU = 150,
- FMT6_NV12_4R = 151,
- FMT6_NV12_4R_Y = 152,
- FMT6_NV12_4R_UV = 153,
- FMT6_P010 = 154,
- FMT6_P010_Y = 155,
- FMT6_P010_UV = 156,
- FMT6_TP10 = 157,
- FMT6_TP10_Y = 158,
- FMT6_TP10_UV = 159,
- FMT6_Z24_UNORM_S8_UINT = 160,
- FMT6_ETC2_RG11_UNORM = 171,
- FMT6_ETC2_RG11_SNORM = 172,
- FMT6_ETC2_R11_UNORM = 173,
- FMT6_ETC2_R11_SNORM = 174,
- FMT6_ETC1 = 175,
- FMT6_ETC2_RGB8 = 176,
- FMT6_ETC2_RGBA8 = 177,
- FMT6_ETC2_RGB8A1 = 178,
- FMT6_DXT1 = 179,
- FMT6_DXT3 = 180,
- FMT6_DXT5 = 181,
- FMT6_RGTC1_UNORM = 183,
- FMT6_RGTC1_SNORM = 184,
- FMT6_RGTC2_UNORM = 187,
- FMT6_RGTC2_SNORM = 188,
- FMT6_BPTC_UFLOAT = 190,
- FMT6_BPTC_FLOAT = 191,
- FMT6_BPTC = 192,
- FMT6_ASTC_4x4 = 193,
- FMT6_ASTC_5x4 = 194,
- FMT6_ASTC_5x5 = 195,
- FMT6_ASTC_6x5 = 196,
- FMT6_ASTC_6x6 = 197,
- FMT6_ASTC_8x5 = 198,
- FMT6_ASTC_8x6 = 199,
- FMT6_ASTC_8x8 = 200,
- FMT6_ASTC_10x5 = 201,
- FMT6_ASTC_10x6 = 202,
- FMT6_ASTC_10x8 = 203,
- FMT6_ASTC_10x10 = 204,
- FMT6_ASTC_12x10 = 205,
- FMT6_ASTC_12x12 = 206,
- FMT6_Z24_UINT_S8_UINT = 234,
- FMT6_NONE = 255,
-};
-
-enum a6xx_polygon_mode {
- POLYMODE6_POINTS = 1,
- POLYMODE6_LINES = 2,
- POLYMODE6_TRIANGLES = 3,
-};
-
-enum a6xx_depth_format {
- DEPTH6_NONE = 0,
- DEPTH6_16 = 1,
- DEPTH6_24_8 = 2,
- DEPTH6_32 = 4,
-};
-
-enum a6xx_shader_id {
- A6XX_TP0_TMO_DATA = 9,
- A6XX_TP0_SMO_DATA = 10,
- A6XX_TP0_MIPMAP_BASE_DATA = 11,
- A6XX_TP1_TMO_DATA = 25,
- A6XX_TP1_SMO_DATA = 26,
- A6XX_TP1_MIPMAP_BASE_DATA = 27,
- A6XX_SP_INST_DATA = 41,
- A6XX_SP_LB_0_DATA = 42,
- A6XX_SP_LB_1_DATA = 43,
- A6XX_SP_LB_2_DATA = 44,
- A6XX_SP_LB_3_DATA = 45,
- A6XX_SP_LB_4_DATA = 46,
- A6XX_SP_LB_5_DATA = 47,
- A6XX_SP_CB_BINDLESS_DATA = 48,
- A6XX_SP_CB_LEGACY_DATA = 49,
- A6XX_SP_UAV_DATA = 50,
- A6XX_SP_INST_TAG = 51,
- A6XX_SP_CB_BINDLESS_TAG = 52,
- A6XX_SP_TMO_UMO_TAG = 53,
- A6XX_SP_SMO_TAG = 54,
- A6XX_SP_STATE_DATA = 55,
- A6XX_HLSQ_CHUNK_CVS_RAM = 73,
- A6XX_HLSQ_CHUNK_CPS_RAM = 74,
- A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 75,
- A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 76,
- A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 77,
- A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 78,
- A6XX_HLSQ_CVS_MISC_RAM = 80,
- A6XX_HLSQ_CPS_MISC_RAM = 81,
- A6XX_HLSQ_INST_RAM = 82,
- A6XX_HLSQ_GFX_CVS_CONST_RAM = 83,
- A6XX_HLSQ_GFX_CPS_CONST_RAM = 84,
- A6XX_HLSQ_CVS_MISC_RAM_TAG = 85,
- A6XX_HLSQ_CPS_MISC_RAM_TAG = 86,
- A6XX_HLSQ_INST_RAM_TAG = 87,
- A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 88,
- A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 89,
- A6XX_HLSQ_PWR_REST_RAM = 90,
- A6XX_HLSQ_PWR_REST_TAG = 91,
- A6XX_HLSQ_DATAPATH_META = 96,
- A6XX_HLSQ_FRONTEND_META = 97,
- A6XX_HLSQ_INDIRECT_META = 98,
- A6XX_HLSQ_BACKEND_META = 99,
- A6XX_SP_LB_6_DATA = 112,
- A6XX_SP_LB_7_DATA = 113,
- A6XX_HLSQ_INST_RAM_1 = 115,
-};
-
-enum a7xx_statetype_id {
- A7XX_TP0_NCTX_REG = 0,
- A7XX_TP0_CTX0_3D_CVS_REG = 1,
- A7XX_TP0_CTX0_3D_CPS_REG = 2,
- A7XX_TP0_CTX1_3D_CVS_REG = 3,
- A7XX_TP0_CTX1_3D_CPS_REG = 4,
- A7XX_TP0_CTX2_3D_CPS_REG = 5,
- A7XX_TP0_CTX3_3D_CPS_REG = 6,
- A7XX_TP0_TMO_DATA = 9,
- A7XX_TP0_SMO_DATA = 10,
- A7XX_TP0_MIPMAP_BASE_DATA = 11,
- A7XX_SP_NCTX_REG = 32,
- A7XX_SP_CTX0_3D_CVS_REG = 33,
- A7XX_SP_CTX0_3D_CPS_REG = 34,
- A7XX_SP_CTX1_3D_CVS_REG = 35,
- A7XX_SP_CTX1_3D_CPS_REG = 36,
- A7XX_SP_CTX2_3D_CPS_REG = 37,
- A7XX_SP_CTX3_3D_CPS_REG = 38,
- A7XX_SP_INST_DATA = 39,
- A7XX_SP_INST_DATA_1 = 40,
- A7XX_SP_LB_0_DATA = 41,
- A7XX_SP_LB_1_DATA = 42,
- A7XX_SP_LB_2_DATA = 43,
- A7XX_SP_LB_3_DATA = 44,
- A7XX_SP_LB_4_DATA = 45,
- A7XX_SP_LB_5_DATA = 46,
- A7XX_SP_LB_6_DATA = 47,
- A7XX_SP_LB_7_DATA = 48,
- A7XX_SP_CB_RAM = 49,
- A7XX_SP_LB_13_DATA = 50,
- A7XX_SP_LB_14_DATA = 51,
- A7XX_SP_INST_TAG = 52,
- A7XX_SP_INST_DATA_2 = 53,
- A7XX_SP_TMO_TAG = 54,
- A7XX_SP_SMO_TAG = 55,
- A7XX_SP_STATE_DATA = 56,
- A7XX_SP_HWAVE_RAM = 57,
- A7XX_SP_L0_INST_BUF = 58,
- A7XX_SP_LB_8_DATA = 59,
- A7XX_SP_LB_9_DATA = 60,
- A7XX_SP_LB_10_DATA = 61,
- A7XX_SP_LB_11_DATA = 62,
- A7XX_SP_LB_12_DATA = 63,
- A7XX_HLSQ_DATAPATH_DSTR_META = 64,
- A7XX_HLSQ_L2STC_TAG_RAM = 67,
- A7XX_HLSQ_L2STC_INFO_CMD = 68,
- A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG = 69,
- A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG = 70,
- A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM = 71,
- A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM = 72,
- A7XX_HLSQ_CHUNK_CVS_RAM = 73,
- A7XX_HLSQ_CHUNK_CPS_RAM = 74,
- A7XX_HLSQ_CHUNK_CVS_RAM_TAG = 75,
- A7XX_HLSQ_CHUNK_CPS_RAM_TAG = 76,
- A7XX_HLSQ_ICB_CVS_CB_BASE_TAG = 77,
- A7XX_HLSQ_ICB_CPS_CB_BASE_TAG = 78,
- A7XX_HLSQ_CVS_MISC_RAM = 79,
- A7XX_HLSQ_CPS_MISC_RAM = 80,
- A7XX_HLSQ_CPS_MISC_RAM_1 = 81,
- A7XX_HLSQ_INST_RAM = 82,
- A7XX_HLSQ_GFX_CVS_CONST_RAM = 83,
- A7XX_HLSQ_GFX_CPS_CONST_RAM = 84,
- A7XX_HLSQ_CVS_MISC_RAM_TAG = 85,
- A7XX_HLSQ_CPS_MISC_RAM_TAG = 86,
- A7XX_HLSQ_INST_RAM_TAG = 87,
- A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 88,
- A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 89,
- A7XX_HLSQ_GFX_LOCAL_MISC_RAM = 90,
- A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG = 91,
- A7XX_HLSQ_INST_RAM_1 = 92,
- A7XX_HLSQ_STPROC_META = 93,
- A7XX_HLSQ_BV_BE_META = 94,
- A7XX_HLSQ_INST_RAM_2 = 95,
- A7XX_HLSQ_DATAPATH_META = 96,
- A7XX_HLSQ_FRONTEND_META = 97,
- A7XX_HLSQ_INDIRECT_META = 98,
- A7XX_HLSQ_BACKEND_META = 99,
-};
-
-enum a6xx_debugbus_id {
- A6XX_DBGBUS_CP = 1,
- A6XX_DBGBUS_RBBM = 2,
- A6XX_DBGBUS_VBIF = 3,
- A6XX_DBGBUS_HLSQ = 4,
- A6XX_DBGBUS_UCHE = 5,
- A6XX_DBGBUS_DPM = 6,
- A6XX_DBGBUS_TESS = 7,
- A6XX_DBGBUS_PC = 8,
- A6XX_DBGBUS_VFDP = 9,
- A6XX_DBGBUS_VPC = 10,
- A6XX_DBGBUS_TSE = 11,
- A6XX_DBGBUS_RAS = 12,
- A6XX_DBGBUS_VSC = 13,
- A6XX_DBGBUS_COM = 14,
- A6XX_DBGBUS_LRZ = 16,
- A6XX_DBGBUS_A2D = 17,
- A6XX_DBGBUS_CCUFCHE = 18,
- A6XX_DBGBUS_GMU_CX = 19,
- A6XX_DBGBUS_RBP = 20,
- A6XX_DBGBUS_DCS = 21,
- A6XX_DBGBUS_DBGC = 22,
- A6XX_DBGBUS_CX = 23,
- A6XX_DBGBUS_GMU_GX = 24,
- A6XX_DBGBUS_TPFCHE = 25,
- A6XX_DBGBUS_GBIF_GX = 26,
- A6XX_DBGBUS_GPC = 29,
- A6XX_DBGBUS_LARC = 30,
- A6XX_DBGBUS_HLSQ_SPTP = 31,
- A6XX_DBGBUS_RB_0 = 32,
- A6XX_DBGBUS_RB_1 = 33,
- A6XX_DBGBUS_RB_2 = 34,
- A6XX_DBGBUS_UCHE_WRAPPER = 36,
- A6XX_DBGBUS_CCU_0 = 40,
- A6XX_DBGBUS_CCU_1 = 41,
- A6XX_DBGBUS_CCU_2 = 42,
- A6XX_DBGBUS_VFD_0 = 56,
- A6XX_DBGBUS_VFD_1 = 57,
- A6XX_DBGBUS_VFD_2 = 58,
- A6XX_DBGBUS_VFD_3 = 59,
- A6XX_DBGBUS_VFD_4 = 60,
- A6XX_DBGBUS_VFD_5 = 61,
- A6XX_DBGBUS_SP_0 = 64,
- A6XX_DBGBUS_SP_1 = 65,
- A6XX_DBGBUS_SP_2 = 66,
- A6XX_DBGBUS_TPL1_0 = 72,
- A6XX_DBGBUS_TPL1_1 = 73,
- A6XX_DBGBUS_TPL1_2 = 74,
- A6XX_DBGBUS_TPL1_3 = 75,
- A6XX_DBGBUS_TPL1_4 = 76,
- A6XX_DBGBUS_TPL1_5 = 77,
- A6XX_DBGBUS_SPTP_0 = 88,
- A6XX_DBGBUS_SPTP_1 = 89,
- A6XX_DBGBUS_SPTP_2 = 90,
- A6XX_DBGBUS_SPTP_3 = 91,
- A6XX_DBGBUS_SPTP_4 = 92,
- A6XX_DBGBUS_SPTP_5 = 93,
-};
-
-enum a7xx_state_location {
- A7XX_HLSQ_STATE = 0,
- A7XX_HLSQ_DP = 1,
- A7XX_SP_TOP = 2,
- A7XX_USPTP = 3,
-};
-
-enum a7xx_pipe {
- A7XX_PIPE_NONE = 0,
- A7XX_PIPE_BR = 1,
- A7XX_PIPE_BV = 2,
- A7XX_PIPE_LPAC = 3,
-};
-
-enum a7xx_cluster {
- A7XX_CLUSTER_NONE = 0,
- A7XX_CLUSTER_FE = 1,
- A7XX_CLUSTER_SP_VS = 2,
- A7XX_CLUSTER_PC_VS = 3,
- A7XX_CLUSTER_GRAS = 4,
- A7XX_CLUSTER_SP_PS = 5,
- A7XX_CLUSTER_VPC_PS = 6,
- A7XX_CLUSTER_PS = 7,
-};
-
-enum a7xx_debugbus_id {
- A7XX_DBGBUS_CP_0_0 = 1,
- A7XX_DBGBUS_CP_0_1 = 2,
- A7XX_DBGBUS_RBBM = 3,
- A7XX_DBGBUS_GBIF_GX = 5,
- A7XX_DBGBUS_GBIF_CX = 6,
- A7XX_DBGBUS_HLSQ = 7,
- A7XX_DBGBUS_UCHE_0 = 9,
- A7XX_DBGBUS_UCHE_1 = 10,
- A7XX_DBGBUS_TESS_BR = 13,
- A7XX_DBGBUS_TESS_BV = 14,
- A7XX_DBGBUS_PC_BR = 17,
- A7XX_DBGBUS_PC_BV = 18,
- A7XX_DBGBUS_VFDP_BR = 21,
- A7XX_DBGBUS_VFDP_BV = 22,
- A7XX_DBGBUS_VPC_BR = 25,
- A7XX_DBGBUS_VPC_BV = 26,
- A7XX_DBGBUS_TSE_BR = 29,
- A7XX_DBGBUS_TSE_BV = 30,
- A7XX_DBGBUS_RAS_BR = 33,
- A7XX_DBGBUS_RAS_BV = 34,
- A7XX_DBGBUS_VSC = 37,
- A7XX_DBGBUS_COM_0 = 39,
- A7XX_DBGBUS_LRZ_BR = 43,
- A7XX_DBGBUS_LRZ_BV = 44,
- A7XX_DBGBUS_UFC_0 = 47,
- A7XX_DBGBUS_UFC_1 = 48,
- A7XX_DBGBUS_GMU_GX = 55,
- A7XX_DBGBUS_DBGC = 59,
- A7XX_DBGBUS_CX = 60,
- A7XX_DBGBUS_GMU_CX = 61,
- A7XX_DBGBUS_GPC_BR = 62,
- A7XX_DBGBUS_GPC_BV = 63,
- A7XX_DBGBUS_LARC = 66,
- A7XX_DBGBUS_HLSQ_SPTP = 68,
- A7XX_DBGBUS_RB_0 = 70,
- A7XX_DBGBUS_RB_1 = 71,
- A7XX_DBGBUS_RB_2 = 72,
- A7XX_DBGBUS_RB_3 = 73,
- A7XX_DBGBUS_RB_4 = 74,
- A7XX_DBGBUS_RB_5 = 75,
- A7XX_DBGBUS_UCHE_WRAPPER = 102,
- A7XX_DBGBUS_CCU_0 = 106,
- A7XX_DBGBUS_CCU_1 = 107,
- A7XX_DBGBUS_CCU_2 = 108,
- A7XX_DBGBUS_CCU_3 = 109,
- A7XX_DBGBUS_CCU_4 = 110,
- A7XX_DBGBUS_CCU_5 = 111,
- A7XX_DBGBUS_VFD_BR_0 = 138,
- A7XX_DBGBUS_VFD_BR_1 = 139,
- A7XX_DBGBUS_VFD_BR_2 = 140,
- A7XX_DBGBUS_VFD_BR_3 = 141,
- A7XX_DBGBUS_VFD_BR_4 = 142,
- A7XX_DBGBUS_VFD_BR_5 = 143,
- A7XX_DBGBUS_VFD_BR_6 = 144,
- A7XX_DBGBUS_VFD_BR_7 = 145,
- A7XX_DBGBUS_VFD_BV_0 = 202,
- A7XX_DBGBUS_VFD_BV_1 = 203,
- A7XX_DBGBUS_VFD_BV_2 = 204,
- A7XX_DBGBUS_VFD_BV_3 = 205,
- A7XX_DBGBUS_USP_0 = 234,
- A7XX_DBGBUS_USP_1 = 235,
- A7XX_DBGBUS_USP_2 = 236,
- A7XX_DBGBUS_USP_3 = 237,
- A7XX_DBGBUS_USP_4 = 238,
- A7XX_DBGBUS_USP_5 = 239,
- A7XX_DBGBUS_TP_0 = 266,
- A7XX_DBGBUS_TP_1 = 267,
- A7XX_DBGBUS_TP_2 = 268,
- A7XX_DBGBUS_TP_3 = 269,
- A7XX_DBGBUS_TP_4 = 270,
- A7XX_DBGBUS_TP_5 = 271,
- A7XX_DBGBUS_TP_6 = 272,
- A7XX_DBGBUS_TP_7 = 273,
- A7XX_DBGBUS_TP_8 = 274,
- A7XX_DBGBUS_TP_9 = 275,
- A7XX_DBGBUS_TP_10 = 276,
- A7XX_DBGBUS_TP_11 = 277,
- A7XX_DBGBUS_USPTP_0 = 330,
- A7XX_DBGBUS_USPTP_1 = 331,
- A7XX_DBGBUS_USPTP_2 = 332,
- A7XX_DBGBUS_USPTP_3 = 333,
- A7XX_DBGBUS_USPTP_4 = 334,
- A7XX_DBGBUS_USPTP_5 = 335,
- A7XX_DBGBUS_USPTP_6 = 336,
- A7XX_DBGBUS_USPTP_7 = 337,
- A7XX_DBGBUS_USPTP_8 = 338,
- A7XX_DBGBUS_USPTP_9 = 339,
- A7XX_DBGBUS_USPTP_10 = 340,
- A7XX_DBGBUS_USPTP_11 = 341,
- A7XX_DBGBUS_CCHE_0 = 396,
- A7XX_DBGBUS_CCHE_1 = 397,
- A7XX_DBGBUS_CCHE_2 = 398,
- A7XX_DBGBUS_VPC_DSTR_0 = 408,
- A7XX_DBGBUS_VPC_DSTR_1 = 409,
- A7XX_DBGBUS_VPC_DSTR_2 = 410,
- A7XX_DBGBUS_HLSQ_DP_STR_0 = 411,
- A7XX_DBGBUS_HLSQ_DP_STR_1 = 412,
- A7XX_DBGBUS_HLSQ_DP_STR_2 = 413,
- A7XX_DBGBUS_HLSQ_DP_STR_3 = 414,
- A7XX_DBGBUS_HLSQ_DP_STR_4 = 415,
- A7XX_DBGBUS_HLSQ_DP_STR_5 = 416,
- A7XX_DBGBUS_UFC_DSTR_0 = 443,
- A7XX_DBGBUS_UFC_DSTR_1 = 444,
- A7XX_DBGBUS_UFC_DSTR_2 = 445,
- A7XX_DBGBUS_CGC_SUBCORE = 446,
- A7XX_DBGBUS_CGC_CORE = 447,
-};
-
-enum a6xx_cp_perfcounter_select {
- PERF_CP_ALWAYS_COUNT = 0,
- PERF_CP_BUSY_GFX_CORE_IDLE = 1,
- PERF_CP_BUSY_CYCLES = 2,
- PERF_CP_NUM_PREEMPTIONS = 3,
- PERF_CP_PREEMPTION_REACTION_DELAY = 4,
- PERF_CP_PREEMPTION_SWITCH_OUT_TIME = 5,
- PERF_CP_PREEMPTION_SWITCH_IN_TIME = 6,
- PERF_CP_DEAD_DRAWS_IN_BIN_RENDER = 7,
- PERF_CP_PREDICATED_DRAWS_KILLED = 8,
- PERF_CP_MODE_SWITCH = 9,
- PERF_CP_ZPASS_DONE = 10,
- PERF_CP_CONTEXT_DONE = 11,
- PERF_CP_CACHE_FLUSH = 12,
- PERF_CP_LONG_PREEMPTIONS = 13,
- PERF_CP_SQE_I_CACHE_STARVE = 14,
- PERF_CP_SQE_IDLE = 15,
- PERF_CP_SQE_PM4_STARVE_RB_IB = 16,
- PERF_CP_SQE_PM4_STARVE_SDS = 17,
- PERF_CP_SQE_MRB_STARVE = 18,
- PERF_CP_SQE_RRB_STARVE = 19,
- PERF_CP_SQE_VSD_STARVE = 20,
- PERF_CP_VSD_DECODE_STARVE = 21,
- PERF_CP_SQE_PIPE_OUT_STALL = 22,
- PERF_CP_SQE_SYNC_STALL = 23,
- PERF_CP_SQE_PM4_WFI_STALL = 24,
- PERF_CP_SQE_SYS_WFI_STALL = 25,
- PERF_CP_SQE_T4_EXEC = 26,
- PERF_CP_SQE_LOAD_STATE_EXEC = 27,
- PERF_CP_SQE_SAVE_SDS_STATE = 28,
- PERF_CP_SQE_DRAW_EXEC = 29,
- PERF_CP_SQE_CTXT_REG_BUNCH_EXEC = 30,
- PERF_CP_SQE_EXEC_PROFILED = 31,
- PERF_CP_MEMORY_POOL_EMPTY = 32,
- PERF_CP_MEMORY_POOL_SYNC_STALL = 33,
- PERF_CP_MEMORY_POOL_ABOVE_THRESH = 34,
- PERF_CP_AHB_WR_STALL_PRE_DRAWS = 35,
- PERF_CP_AHB_STALL_SQE_GMU = 36,
- PERF_CP_AHB_STALL_SQE_WR_OTHER = 37,
- PERF_CP_AHB_STALL_SQE_RD_OTHER = 38,
- PERF_CP_CLUSTER0_EMPTY = 39,
- PERF_CP_CLUSTER1_EMPTY = 40,
- PERF_CP_CLUSTER2_EMPTY = 41,
- PERF_CP_CLUSTER3_EMPTY = 42,
- PERF_CP_CLUSTER4_EMPTY = 43,
- PERF_CP_CLUSTER5_EMPTY = 44,
- PERF_CP_PM4_DATA = 45,
- PERF_CP_PM4_HEADERS = 46,
- PERF_CP_VBIF_READ_BEATS = 47,
- PERF_CP_VBIF_WRITE_BEATS = 48,
- PERF_CP_SQE_INSTR_COUNTER = 49,
-};
-
-enum a6xx_rbbm_perfcounter_select {
- PERF_RBBM_ALWAYS_COUNT = 0,
- PERF_RBBM_ALWAYS_ON = 1,
- PERF_RBBM_TSE_BUSY = 2,
- PERF_RBBM_RAS_BUSY = 3,
- PERF_RBBM_PC_DCALL_BUSY = 4,
- PERF_RBBM_PC_VSD_BUSY = 5,
- PERF_RBBM_STATUS_MASKED = 6,
- PERF_RBBM_COM_BUSY = 7,
- PERF_RBBM_DCOM_BUSY = 8,
- PERF_RBBM_VBIF_BUSY = 9,
- PERF_RBBM_VSC_BUSY = 10,
- PERF_RBBM_TESS_BUSY = 11,
- PERF_RBBM_UCHE_BUSY = 12,
- PERF_RBBM_HLSQ_BUSY = 13,
-};
-
-enum a6xx_pc_perfcounter_select {
- PERF_PC_BUSY_CYCLES = 0,
- PERF_PC_WORKING_CYCLES = 1,
- PERF_PC_STALL_CYCLES_VFD = 2,
- PERF_PC_STALL_CYCLES_TSE = 3,
- PERF_PC_STALL_CYCLES_VPC = 4,
- PERF_PC_STALL_CYCLES_UCHE = 5,
- PERF_PC_STALL_CYCLES_TESS = 6,
- PERF_PC_STALL_CYCLES_TSE_ONLY = 7,
- PERF_PC_STALL_CYCLES_VPC_ONLY = 8,
- PERF_PC_PASS1_TF_STALL_CYCLES = 9,
- PERF_PC_STARVE_CYCLES_FOR_INDEX = 10,
- PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR = 11,
- PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM = 12,
- PERF_PC_STARVE_CYCLES_FOR_POSITION = 13,
- PERF_PC_STARVE_CYCLES_DI = 14,
- PERF_PC_VIS_STREAMS_LOADED = 15,
- PERF_PC_INSTANCES = 16,
- PERF_PC_VPC_PRIMITIVES = 17,
- PERF_PC_DEAD_PRIM = 18,
- PERF_PC_LIVE_PRIM = 19,
- PERF_PC_VERTEX_HITS = 20,
- PERF_PC_IA_VERTICES = 21,
- PERF_PC_IA_PRIMITIVES = 22,
- PERF_PC_GS_PRIMITIVES = 23,
- PERF_PC_HS_INVOCATIONS = 24,
- PERF_PC_DS_INVOCATIONS = 25,
- PERF_PC_VS_INVOCATIONS = 26,
- PERF_PC_GS_INVOCATIONS = 27,
- PERF_PC_DS_PRIMITIVES = 28,
- PERF_PC_VPC_POS_DATA_TRANSACTION = 29,
- PERF_PC_3D_DRAWCALLS = 30,
- PERF_PC_2D_DRAWCALLS = 31,
- PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS = 32,
- PERF_TESS_BUSY_CYCLES = 33,
- PERF_TESS_WORKING_CYCLES = 34,
- PERF_TESS_STALL_CYCLES_PC = 35,
- PERF_TESS_STARVE_CYCLES_PC = 36,
- PERF_PC_TSE_TRANSACTION = 37,
- PERF_PC_TSE_VERTEX = 38,
- PERF_PC_TESS_PC_UV_TRANS = 39,
- PERF_PC_TESS_PC_UV_PATCHES = 40,
- PERF_PC_TESS_FACTOR_TRANS = 41,
-};
-
-enum a6xx_vfd_perfcounter_select {
- PERF_VFD_BUSY_CYCLES = 0,
- PERF_VFD_STALL_CYCLES_UCHE = 1,
- PERF_VFD_STALL_CYCLES_VPC_ALLOC = 2,
- PERF_VFD_STALL_CYCLES_SP_INFO = 3,
- PERF_VFD_STALL_CYCLES_SP_ATTR = 4,
- PERF_VFD_STARVE_CYCLES_UCHE = 5,
- PERF_VFD_RBUFFER_FULL = 6,
- PERF_VFD_ATTR_INFO_FIFO_FULL = 7,
- PERF_VFD_DECODED_ATTRIBUTE_BYTES = 8,
- PERF_VFD_NUM_ATTRIBUTES = 9,
- PERF_VFD_UPPER_SHADER_FIBERS = 10,
- PERF_VFD_LOWER_SHADER_FIBERS = 11,
- PERF_VFD_MODE_0_FIBERS = 12,
- PERF_VFD_MODE_1_FIBERS = 13,
- PERF_VFD_MODE_2_FIBERS = 14,
- PERF_VFD_MODE_3_FIBERS = 15,
- PERF_VFD_MODE_4_FIBERS = 16,
- PERF_VFD_TOTAL_VERTICES = 17,
- PERF_VFDP_STALL_CYCLES_VFD = 18,
- PERF_VFDP_STALL_CYCLES_VFD_INDEX = 19,
- PERF_VFDP_STALL_CYCLES_VFD_PROG = 20,
- PERF_VFDP_STARVE_CYCLES_PC = 21,
- PERF_VFDP_VS_STAGE_WAVES = 22,
-};
-
-enum a6xx_hlsq_perfcounter_select {
- PERF_HLSQ_BUSY_CYCLES = 0,
- PERF_HLSQ_STALL_CYCLES_UCHE = 1,
- PERF_HLSQ_STALL_CYCLES_SP_STATE = 2,
- PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE = 3,
- PERF_HLSQ_UCHE_LATENCY_CYCLES = 4,
- PERF_HLSQ_UCHE_LATENCY_COUNT = 5,
- PERF_HLSQ_FS_STAGE_1X_WAVES = 6,
- PERF_HLSQ_FS_STAGE_2X_WAVES = 7,
- PERF_HLSQ_QUADS = 8,
- PERF_HLSQ_CS_INVOCATIONS = 9,
- PERF_HLSQ_COMPUTE_DRAWCALLS = 10,
- PERF_HLSQ_FS_DATA_WAIT_PROGRAMMING = 11,
- PERF_HLSQ_DUAL_FS_PROG_ACTIVE = 12,
- PERF_HLSQ_DUAL_VS_PROG_ACTIVE = 13,
- PERF_HLSQ_FS_BATCH_COUNT_ZERO = 14,
- PERF_HLSQ_VS_BATCH_COUNT_ZERO = 15,
- PERF_HLSQ_WAVE_PENDING_NO_QUAD = 16,
- PERF_HLSQ_WAVE_PENDING_NO_PRIM_BASE = 17,
- PERF_HLSQ_STALL_CYCLES_VPC = 18,
- PERF_HLSQ_PIXELS = 19,
- PERF_HLSQ_DRAW_MODE_SWITCH_VSFS_SYNC = 20,
-};
-
-enum a6xx_vpc_perfcounter_select {
- PERF_VPC_BUSY_CYCLES = 0,
- PERF_VPC_WORKING_CYCLES = 1,
- PERF_VPC_STALL_CYCLES_UCHE = 2,
- PERF_VPC_STALL_CYCLES_VFD_WACK = 3,
- PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC = 4,
- PERF_VPC_STALL_CYCLES_PC = 5,
- PERF_VPC_STALL_CYCLES_SP_LM = 6,
- PERF_VPC_STARVE_CYCLES_SP = 7,
- PERF_VPC_STARVE_CYCLES_LRZ = 8,
- PERF_VPC_PC_PRIMITIVES = 9,
- PERF_VPC_SP_COMPONENTS = 10,
- PERF_VPC_STALL_CYCLES_VPCRAM_POS = 11,
- PERF_VPC_LRZ_ASSIGN_PRIMITIVES = 12,
- PERF_VPC_RB_VISIBLE_PRIMITIVES = 13,
- PERF_VPC_LM_TRANSACTION = 14,
- PERF_VPC_STREAMOUT_TRANSACTION = 15,
- PERF_VPC_VS_BUSY_CYCLES = 16,
- PERF_VPC_PS_BUSY_CYCLES = 17,
- PERF_VPC_VS_WORKING_CYCLES = 18,
- PERF_VPC_PS_WORKING_CYCLES = 19,
- PERF_VPC_STARVE_CYCLES_RB = 20,
- PERF_VPC_NUM_VPCRAM_READ_POS = 21,
- PERF_VPC_WIT_FULL_CYCLES = 22,
- PERF_VPC_VPCRAM_FULL_CYCLES = 23,
- PERF_VPC_LM_FULL_WAIT_FOR_INTP_END = 24,
- PERF_VPC_NUM_VPCRAM_WRITE = 25,
- PERF_VPC_NUM_VPCRAM_READ_SO = 26,
- PERF_VPC_NUM_ATTR_REQ_LM = 27,
-};
-
-enum a6xx_tse_perfcounter_select {
- PERF_TSE_BUSY_CYCLES = 0,
- PERF_TSE_CLIPPING_CYCLES = 1,
- PERF_TSE_STALL_CYCLES_RAS = 2,
- PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE = 3,
- PERF_TSE_STALL_CYCLES_LRZ_ZPLANE = 4,
- PERF_TSE_STARVE_CYCLES_PC = 5,
- PERF_TSE_INPUT_PRIM = 6,
- PERF_TSE_INPUT_NULL_PRIM = 7,
- PERF_TSE_TRIVAL_REJ_PRIM = 8,
- PERF_TSE_CLIPPED_PRIM = 9,
- PERF_TSE_ZERO_AREA_PRIM = 10,
- PERF_TSE_FACENESS_CULLED_PRIM = 11,
- PERF_TSE_ZERO_PIXEL_PRIM = 12,
- PERF_TSE_OUTPUT_NULL_PRIM = 13,
- PERF_TSE_OUTPUT_VISIBLE_PRIM = 14,
- PERF_TSE_CINVOCATION = 15,
- PERF_TSE_CPRIMITIVES = 16,
- PERF_TSE_2D_INPUT_PRIM = 17,
- PERF_TSE_2D_ALIVE_CYCLES = 18,
- PERF_TSE_CLIP_PLANES = 19,
-};
-
-enum a6xx_ras_perfcounter_select {
- PERF_RAS_BUSY_CYCLES = 0,
- PERF_RAS_SUPERTILE_ACTIVE_CYCLES = 1,
- PERF_RAS_STALL_CYCLES_LRZ = 2,
- PERF_RAS_STARVE_CYCLES_TSE = 3,
- PERF_RAS_SUPER_TILES = 4,
- PERF_RAS_8X4_TILES = 5,
- PERF_RAS_MASKGEN_ACTIVE = 6,
- PERF_RAS_FULLY_COVERED_SUPER_TILES = 7,
- PERF_RAS_FULLY_COVERED_8X4_TILES = 8,
- PERF_RAS_PRIM_KILLED_INVISILBE = 9,
- PERF_RAS_SUPERTILE_GEN_ACTIVE_CYCLES = 10,
- PERF_RAS_LRZ_INTF_WORKING_CYCLES = 11,
- PERF_RAS_BLOCKS = 12,
-};
-
-enum a6xx_uche_perfcounter_select {
- PERF_UCHE_BUSY_CYCLES = 0,
- PERF_UCHE_STALL_CYCLES_ARBITER = 1,
- PERF_UCHE_VBIF_LATENCY_CYCLES = 2,
- PERF_UCHE_VBIF_LATENCY_SAMPLES = 3,
- PERF_UCHE_VBIF_READ_BEATS_TP = 4,
- PERF_UCHE_VBIF_READ_BEATS_VFD = 5,
- PERF_UCHE_VBIF_READ_BEATS_HLSQ = 6,
- PERF_UCHE_VBIF_READ_BEATS_LRZ = 7,
- PERF_UCHE_VBIF_READ_BEATS_SP = 8,
- PERF_UCHE_READ_REQUESTS_TP = 9,
- PERF_UCHE_READ_REQUESTS_VFD = 10,
- PERF_UCHE_READ_REQUESTS_HLSQ = 11,
- PERF_UCHE_READ_REQUESTS_LRZ = 12,
- PERF_UCHE_READ_REQUESTS_SP = 13,
- PERF_UCHE_WRITE_REQUESTS_LRZ = 14,
- PERF_UCHE_WRITE_REQUESTS_SP = 15,
- PERF_UCHE_WRITE_REQUESTS_VPC = 16,
- PERF_UCHE_WRITE_REQUESTS_VSC = 17,
- PERF_UCHE_EVICTS = 18,
- PERF_UCHE_BANK_REQ0 = 19,
- PERF_UCHE_BANK_REQ1 = 20,
- PERF_UCHE_BANK_REQ2 = 21,
- PERF_UCHE_BANK_REQ3 = 22,
- PERF_UCHE_BANK_REQ4 = 23,
- PERF_UCHE_BANK_REQ5 = 24,
- PERF_UCHE_BANK_REQ6 = 25,
- PERF_UCHE_BANK_REQ7 = 26,
- PERF_UCHE_VBIF_READ_BEATS_CH0 = 27,
- PERF_UCHE_VBIF_READ_BEATS_CH1 = 28,
- PERF_UCHE_GMEM_READ_BEATS = 29,
- PERF_UCHE_TPH_REF_FULL = 30,
- PERF_UCHE_TPH_VICTIM_FULL = 31,
- PERF_UCHE_TPH_EXT_FULL = 32,
- PERF_UCHE_VBIF_STALL_WRITE_DATA = 33,
- PERF_UCHE_DCMP_LATENCY_SAMPLES = 34,
- PERF_UCHE_DCMP_LATENCY_CYCLES = 35,
- PERF_UCHE_VBIF_READ_BEATS_PC = 36,
- PERF_UCHE_READ_REQUESTS_PC = 37,
- PERF_UCHE_RAM_READ_REQ = 38,
- PERF_UCHE_RAM_WRITE_REQ = 39,
-};
-
-enum a6xx_tp_perfcounter_select {
- PERF_TP_BUSY_CYCLES = 0,
- PERF_TP_STALL_CYCLES_UCHE = 1,
- PERF_TP_LATENCY_CYCLES = 2,
- PERF_TP_LATENCY_TRANS = 3,
- PERF_TP_FLAG_CACHE_REQUEST_SAMPLES = 4,
- PERF_TP_FLAG_CACHE_REQUEST_LATENCY = 5,
- PERF_TP_L1_CACHELINE_REQUESTS = 6,
- PERF_TP_L1_CACHELINE_MISSES = 7,
- PERF_TP_SP_TP_TRANS = 8,
- PERF_TP_TP_SP_TRANS = 9,
- PERF_TP_OUTPUT_PIXELS = 10,
- PERF_TP_FILTER_WORKLOAD_16BIT = 11,
- PERF_TP_FILTER_WORKLOAD_32BIT = 12,
- PERF_TP_QUADS_RECEIVED = 13,
- PERF_TP_QUADS_OFFSET = 14,
- PERF_TP_QUADS_SHADOW = 15,
- PERF_TP_QUADS_ARRAY = 16,
- PERF_TP_QUADS_GRADIENT = 17,
- PERF_TP_QUADS_1D = 18,
- PERF_TP_QUADS_2D = 19,
- PERF_TP_QUADS_BUFFER = 20,
- PERF_TP_QUADS_3D = 21,
- PERF_TP_QUADS_CUBE = 22,
- PERF_TP_DIVERGENT_QUADS_RECEIVED = 23,
- PERF_TP_PRT_NON_RESIDENT_EVENTS = 24,
- PERF_TP_OUTPUT_PIXELS_POINT = 25,
- PERF_TP_OUTPUT_PIXELS_BILINEAR = 26,
- PERF_TP_OUTPUT_PIXELS_MIP = 27,
- PERF_TP_OUTPUT_PIXELS_ANISO = 28,
- PERF_TP_OUTPUT_PIXELS_ZERO_LOD = 29,
- PERF_TP_FLAG_CACHE_REQUESTS = 30,
- PERF_TP_FLAG_CACHE_MISSES = 31,
- PERF_TP_L1_5_L2_REQUESTS = 32,
- PERF_TP_2D_OUTPUT_PIXELS = 33,
- PERF_TP_2D_OUTPUT_PIXELS_POINT = 34,
- PERF_TP_2D_OUTPUT_PIXELS_BILINEAR = 35,
- PERF_TP_2D_FILTER_WORKLOAD_16BIT = 36,
- PERF_TP_2D_FILTER_WORKLOAD_32BIT = 37,
- PERF_TP_TPA2TPC_TRANS = 38,
- PERF_TP_L1_MISSES_ASTC_1TILE = 39,
- PERF_TP_L1_MISSES_ASTC_2TILE = 40,
- PERF_TP_L1_MISSES_ASTC_4TILE = 41,
- PERF_TP_L1_5_L2_COMPRESS_REQS = 42,
- PERF_TP_L1_5_L2_COMPRESS_MISS = 43,
- PERF_TP_L1_BANK_CONFLICT = 44,
- PERF_TP_L1_5_MISS_LATENCY_CYCLES = 45,
- PERF_TP_L1_5_MISS_LATENCY_TRANS = 46,
- PERF_TP_QUADS_CONSTANT_MULTIPLIED = 47,
- PERF_TP_FRONTEND_WORKING_CYCLES = 48,
- PERF_TP_L1_TAG_WORKING_CYCLES = 49,
- PERF_TP_L1_DATA_WRITE_WORKING_CYCLES = 50,
- PERF_TP_PRE_L1_DECOM_WORKING_CYCLES = 51,
- PERF_TP_BACKEND_WORKING_CYCLES = 52,
- PERF_TP_FLAG_CACHE_WORKING_CYCLES = 53,
- PERF_TP_L1_5_CACHE_WORKING_CYCLES = 54,
- PERF_TP_STARVE_CYCLES_SP = 55,
- PERF_TP_STARVE_CYCLES_UCHE = 56,
-};
-
-enum a6xx_sp_perfcounter_select {
- PERF_SP_BUSY_CYCLES = 0,
- PERF_SP_ALU_WORKING_CYCLES = 1,
- PERF_SP_EFU_WORKING_CYCLES = 2,
- PERF_SP_STALL_CYCLES_VPC = 3,
- PERF_SP_STALL_CYCLES_TP = 4,
- PERF_SP_STALL_CYCLES_UCHE = 5,
- PERF_SP_STALL_CYCLES_RB = 6,
- PERF_SP_NON_EXECUTION_CYCLES = 7,
- PERF_SP_WAVE_CONTEXTS = 8,
- PERF_SP_WAVE_CONTEXT_CYCLES = 9,
- PERF_SP_FS_STAGE_WAVE_CYCLES = 10,
- PERF_SP_FS_STAGE_WAVE_SAMPLES = 11,
- PERF_SP_VS_STAGE_WAVE_CYCLES = 12,
- PERF_SP_VS_STAGE_WAVE_SAMPLES = 13,
- PERF_SP_FS_STAGE_DURATION_CYCLES = 14,
- PERF_SP_VS_STAGE_DURATION_CYCLES = 15,
- PERF_SP_WAVE_CTRL_CYCLES = 16,
- PERF_SP_WAVE_LOAD_CYCLES = 17,
- PERF_SP_WAVE_EMIT_CYCLES = 18,
- PERF_SP_WAVE_NOP_CYCLES = 19,
- PERF_SP_WAVE_WAIT_CYCLES = 20,
- PERF_SP_WAVE_FETCH_CYCLES = 21,
- PERF_SP_WAVE_IDLE_CYCLES = 22,
- PERF_SP_WAVE_END_CYCLES = 23,
- PERF_SP_WAVE_LONG_SYNC_CYCLES = 24,
- PERF_SP_WAVE_SHORT_SYNC_CYCLES = 25,
- PERF_SP_WAVE_JOIN_CYCLES = 26,
- PERF_SP_LM_LOAD_INSTRUCTIONS = 27,
- PERF_SP_LM_STORE_INSTRUCTIONS = 28,
- PERF_SP_LM_ATOMICS = 29,
- PERF_SP_GM_LOAD_INSTRUCTIONS = 30,
- PERF_SP_GM_STORE_INSTRUCTIONS = 31,
- PERF_SP_GM_ATOMICS = 32,
- PERF_SP_VS_STAGE_TEX_INSTRUCTIONS = 33,
- PERF_SP_VS_STAGE_EFU_INSTRUCTIONS = 34,
- PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 35,
- PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 36,
- PERF_SP_FS_STAGE_TEX_INSTRUCTIONS = 37,
- PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS = 38,
- PERF_SP_FS_STAGE_EFU_INSTRUCTIONS = 39,
- PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 40,
- PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 41,
- PERF_SP_FS_STAGE_BARY_INSTRUCTIONS = 42,
- PERF_SP_VS_INSTRUCTIONS = 43,
- PERF_SP_FS_INSTRUCTIONS = 44,
- PERF_SP_ADDR_LOCK_COUNT = 45,
- PERF_SP_UCHE_READ_TRANS = 46,
- PERF_SP_UCHE_WRITE_TRANS = 47,
- PERF_SP_EXPORT_VPC_TRANS = 48,
- PERF_SP_EXPORT_RB_TRANS = 49,
- PERF_SP_PIXELS_KILLED = 50,
- PERF_SP_ICL1_REQUESTS = 51,
- PERF_SP_ICL1_MISSES = 52,
- PERF_SP_HS_INSTRUCTIONS = 53,
- PERF_SP_DS_INSTRUCTIONS = 54,
- PERF_SP_GS_INSTRUCTIONS = 55,
- PERF_SP_CS_INSTRUCTIONS = 56,
- PERF_SP_GPR_READ = 57,
- PERF_SP_GPR_WRITE = 58,
- PERF_SP_FS_STAGE_HALF_EFU_INSTRUCTIONS = 59,
- PERF_SP_VS_STAGE_HALF_EFU_INSTRUCTIONS = 60,
- PERF_SP_LM_BANK_CONFLICTS = 61,
- PERF_SP_TEX_CONTROL_WORKING_CYCLES = 62,
- PERF_SP_LOAD_CONTROL_WORKING_CYCLES = 63,
- PERF_SP_FLOW_CONTROL_WORKING_CYCLES = 64,
- PERF_SP_LM_WORKING_CYCLES = 65,
- PERF_SP_DISPATCHER_WORKING_CYCLES = 66,
- PERF_SP_SEQUENCER_WORKING_CYCLES = 67,
- PERF_SP_LOW_EFFICIENCY_STARVED_BY_TP = 68,
- PERF_SP_STARVE_CYCLES_HLSQ = 69,
- PERF_SP_NON_EXECUTION_LS_CYCLES = 70,
- PERF_SP_WORKING_EU = 71,
- PERF_SP_ANY_EU_WORKING = 72,
- PERF_SP_WORKING_EU_FS_STAGE = 73,
- PERF_SP_ANY_EU_WORKING_FS_STAGE = 74,
- PERF_SP_WORKING_EU_VS_STAGE = 75,
- PERF_SP_ANY_EU_WORKING_VS_STAGE = 76,
- PERF_SP_WORKING_EU_CS_STAGE = 77,
- PERF_SP_ANY_EU_WORKING_CS_STAGE = 78,
- PERF_SP_GPR_READ_PREFETCH = 79,
- PERF_SP_GPR_READ_CONFLICT = 80,
- PERF_SP_GPR_WRITE_CONFLICT = 81,
- PERF_SP_GM_LOAD_LATENCY_CYCLES = 82,
- PERF_SP_GM_LOAD_LATENCY_SAMPLES = 83,
- PERF_SP_EXECUTABLE_WAVES = 84,
-};
-
-enum a6xx_rb_perfcounter_select {
- PERF_RB_BUSY_CYCLES = 0,
- PERF_RB_STALL_CYCLES_HLSQ = 1,
- PERF_RB_STALL_CYCLES_FIFO0_FULL = 2,
- PERF_RB_STALL_CYCLES_FIFO1_FULL = 3,
- PERF_RB_STALL_CYCLES_FIFO2_FULL = 4,
- PERF_RB_STARVE_CYCLES_SP = 5,
- PERF_RB_STARVE_CYCLES_LRZ_TILE = 6,
- PERF_RB_STARVE_CYCLES_CCU = 7,
- PERF_RB_STARVE_CYCLES_Z_PLANE = 8,
- PERF_RB_STARVE_CYCLES_BARY_PLANE = 9,
- PERF_RB_Z_WORKLOAD = 10,
- PERF_RB_HLSQ_ACTIVE = 11,
- PERF_RB_Z_READ = 12,
- PERF_RB_Z_WRITE = 13,
- PERF_RB_C_READ = 14,
- PERF_RB_C_WRITE = 15,
- PERF_RB_TOTAL_PASS = 16,
- PERF_RB_Z_PASS = 17,
- PERF_RB_Z_FAIL = 18,
- PERF_RB_S_FAIL = 19,
- PERF_RB_BLENDED_FXP_COMPONENTS = 20,
- PERF_RB_BLENDED_FP16_COMPONENTS = 21,
- PERF_RB_PS_INVOCATIONS = 22,
- PERF_RB_2D_ALIVE_CYCLES = 23,
- PERF_RB_2D_STALL_CYCLES_A2D = 24,
- PERF_RB_2D_STARVE_CYCLES_SRC = 25,
- PERF_RB_2D_STARVE_CYCLES_SP = 26,
- PERF_RB_2D_STARVE_CYCLES_DST = 27,
- PERF_RB_2D_VALID_PIXELS = 28,
- PERF_RB_3D_PIXELS = 29,
- PERF_RB_BLENDER_WORKING_CYCLES = 30,
- PERF_RB_ZPROC_WORKING_CYCLES = 31,
- PERF_RB_CPROC_WORKING_CYCLES = 32,
- PERF_RB_SAMPLER_WORKING_CYCLES = 33,
- PERF_RB_STALL_CYCLES_CCU_COLOR_READ = 34,
- PERF_RB_STALL_CYCLES_CCU_COLOR_WRITE = 35,
- PERF_RB_STALL_CYCLES_CCU_DEPTH_READ = 36,
- PERF_RB_STALL_CYCLES_CCU_DEPTH_WRITE = 37,
- PERF_RB_STALL_CYCLES_VPC = 38,
- PERF_RB_2D_INPUT_TRANS = 39,
- PERF_RB_2D_OUTPUT_RB_DST_TRANS = 40,
- PERF_RB_2D_OUTPUT_RB_SRC_TRANS = 41,
- PERF_RB_BLENDED_FP32_COMPONENTS = 42,
- PERF_RB_COLOR_PIX_TILES = 43,
- PERF_RB_STALL_CYCLES_CCU = 44,
- PERF_RB_EARLY_Z_ARB3_GRANT = 45,
- PERF_RB_LATE_Z_ARB3_GRANT = 46,
- PERF_RB_EARLY_Z_SKIP_GRANT = 47,
-};
-
-enum a6xx_vsc_perfcounter_select {
- PERF_VSC_BUSY_CYCLES = 0,
- PERF_VSC_WORKING_CYCLES = 1,
- PERF_VSC_STALL_CYCLES_UCHE = 2,
- PERF_VSC_EOT_NUM = 3,
- PERF_VSC_INPUT_TILES = 4,
-};
-
-enum a6xx_ccu_perfcounter_select {
- PERF_CCU_BUSY_CYCLES = 0,
- PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN = 1,
- PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN = 2,
- PERF_CCU_STARVE_CYCLES_FLAG_RETURN = 3,
- PERF_CCU_DEPTH_BLOCKS = 4,
- PERF_CCU_COLOR_BLOCKS = 5,
- PERF_CCU_DEPTH_BLOCK_HIT = 6,
- PERF_CCU_COLOR_BLOCK_HIT = 7,
- PERF_CCU_PARTIAL_BLOCK_READ = 8,
- PERF_CCU_GMEM_READ = 9,
- PERF_CCU_GMEM_WRITE = 10,
- PERF_CCU_DEPTH_READ_FLAG0_COUNT = 11,
- PERF_CCU_DEPTH_READ_FLAG1_COUNT = 12,
- PERF_CCU_DEPTH_READ_FLAG2_COUNT = 13,
- PERF_CCU_DEPTH_READ_FLAG3_COUNT = 14,
- PERF_CCU_DEPTH_READ_FLAG4_COUNT = 15,
- PERF_CCU_DEPTH_READ_FLAG5_COUNT = 16,
- PERF_CCU_DEPTH_READ_FLAG6_COUNT = 17,
- PERF_CCU_DEPTH_READ_FLAG8_COUNT = 18,
- PERF_CCU_COLOR_READ_FLAG0_COUNT = 19,
- PERF_CCU_COLOR_READ_FLAG1_COUNT = 20,
- PERF_CCU_COLOR_READ_FLAG2_COUNT = 21,
- PERF_CCU_COLOR_READ_FLAG3_COUNT = 22,
- PERF_CCU_COLOR_READ_FLAG4_COUNT = 23,
- PERF_CCU_COLOR_READ_FLAG5_COUNT = 24,
- PERF_CCU_COLOR_READ_FLAG6_COUNT = 25,
- PERF_CCU_COLOR_READ_FLAG8_COUNT = 26,
- PERF_CCU_2D_RD_REQ = 27,
- PERF_CCU_2D_WR_REQ = 28,
-};
-
-enum a6xx_lrz_perfcounter_select {
- PERF_LRZ_BUSY_CYCLES = 0,
- PERF_LRZ_STARVE_CYCLES_RAS = 1,
- PERF_LRZ_STALL_CYCLES_RB = 2,
- PERF_LRZ_STALL_CYCLES_VSC = 3,
- PERF_LRZ_STALL_CYCLES_VPC = 4,
- PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH = 5,
- PERF_LRZ_STALL_CYCLES_UCHE = 6,
- PERF_LRZ_LRZ_READ = 7,
- PERF_LRZ_LRZ_WRITE = 8,
- PERF_LRZ_READ_LATENCY = 9,
- PERF_LRZ_MERGE_CACHE_UPDATING = 10,
- PERF_LRZ_PRIM_KILLED_BY_MASKGEN = 11,
- PERF_LRZ_PRIM_KILLED_BY_LRZ = 12,
- PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ = 13,
- PERF_LRZ_FULL_8X8_TILES = 14,
- PERF_LRZ_PARTIAL_8X8_TILES = 15,
- PERF_LRZ_TILE_KILLED = 16,
- PERF_LRZ_TOTAL_PIXEL = 17,
- PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ = 18,
- PERF_LRZ_FULLY_COVERED_TILES = 19,
- PERF_LRZ_PARTIAL_COVERED_TILES = 20,
- PERF_LRZ_FEEDBACK_ACCEPT = 21,
- PERF_LRZ_FEEDBACK_DISCARD = 22,
- PERF_LRZ_FEEDBACK_STALL = 23,
- PERF_LRZ_STALL_CYCLES_RB_ZPLANE = 24,
- PERF_LRZ_STALL_CYCLES_RB_BPLANE = 25,
- PERF_LRZ_STALL_CYCLES_VC = 26,
- PERF_LRZ_RAS_MASK_TRANS = 27,
-};
-
-enum a6xx_cmp_perfcounter_select {
- PERF_CMPDECMP_STALL_CYCLES_ARB = 0,
- PERF_CMPDECMP_VBIF_LATENCY_CYCLES = 1,
- PERF_CMPDECMP_VBIF_LATENCY_SAMPLES = 2,
- PERF_CMPDECMP_VBIF_READ_DATA_CCU = 3,
- PERF_CMPDECMP_VBIF_WRITE_DATA_CCU = 4,
- PERF_CMPDECMP_VBIF_READ_REQUEST = 5,
- PERF_CMPDECMP_VBIF_WRITE_REQUEST = 6,
- PERF_CMPDECMP_VBIF_READ_DATA = 7,
- PERF_CMPDECMP_VBIF_WRITE_DATA = 8,
- PERF_CMPDECMP_FLAG_FETCH_CYCLES = 9,
- PERF_CMPDECMP_FLAG_FETCH_SAMPLES = 10,
- PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT = 11,
- PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT = 12,
- PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT = 13,
- PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT = 14,
- PERF_CMPDECMP_DEPTH_WRITE_FLAG5_COUNT = 15,
- PERF_CMPDECMP_DEPTH_WRITE_FLAG6_COUNT = 16,
- PERF_CMPDECMP_DEPTH_WRITE_FLAG8_COUNT = 17,
- PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT = 18,
- PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT = 19,
- PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT = 20,
- PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT = 21,
- PERF_CMPDECMP_COLOR_WRITE_FLAG5_COUNT = 22,
- PERF_CMPDECMP_COLOR_WRITE_FLAG6_COUNT = 23,
- PERF_CMPDECMP_COLOR_WRITE_FLAG8_COUNT = 24,
- PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_REQ = 25,
- PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_WR = 26,
- PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_RETURN = 27,
- PERF_CMPDECMP_2D_RD_DATA = 28,
- PERF_CMPDECMP_2D_WR_DATA = 29,
- PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH0 = 30,
- PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH1 = 31,
- PERF_CMPDECMP_2D_OUTPUT_TRANS = 32,
- PERF_CMPDECMP_VBIF_WRITE_DATA_UCHE = 33,
- PERF_CMPDECMP_DEPTH_WRITE_FLAG0_COUNT = 34,
- PERF_CMPDECMP_COLOR_WRITE_FLAG0_COUNT = 35,
- PERF_CMPDECMP_COLOR_WRITE_FLAGALPHA_COUNT = 36,
- PERF_CMPDECMP_2D_BUSY_CYCLES = 37,
- PERF_CMPDECMP_2D_REORDER_STARVE_CYCLES = 38,
- PERF_CMPDECMP_2D_PIXELS = 39,
-};
-
-enum a6xx_2d_ifmt {
- R2D_UNORM8 = 16,
- R2D_INT32 = 7,
- R2D_INT16 = 6,
- R2D_INT8 = 5,
- R2D_FLOAT32 = 4,
- R2D_FLOAT16 = 3,
- R2D_UNORM8_SRGB = 1,
- R2D_RAW = 0,
-};
-
-enum a6xx_ztest_mode {
- A6XX_EARLY_Z = 0,
- A6XX_LATE_Z = 1,
- A6XX_EARLY_LRZ_LATE_Z = 2,
- A6XX_INVALID_ZTEST = 3,
-};
-
-enum a6xx_tess_spacing {
- TESS_EQUAL = 0,
- TESS_FRACTIONAL_ODD = 2,
- TESS_FRACTIONAL_EVEN = 3,
-};
-
-enum a6xx_tess_output {
- TESS_POINTS = 0,
- TESS_LINES = 1,
- TESS_CW_TRIS = 2,
- TESS_CCW_TRIS = 3,
-};
-
-enum a6xx_sequenced_thread_dist {
- DIST_SCREEN_COORD = 0,
- DIST_ALL_TO_RB0 = 1,
-};
-
-enum a6xx_single_prim_mode {
- NO_FLUSH = 0,
- FLUSH_PER_OVERLAP_AND_OVERWRITE = 1,
- FLUSH_PER_OVERLAP = 3,
-};
-
-enum a6xx_raster_mode {
- TYPE_TILED = 0,
- TYPE_WRITER = 1,
-};
-
-enum a6xx_raster_direction {
- LR_TB = 0,
- RL_TB = 1,
- LR_BT = 2,
- RB_BT = 3,
-};
-
-enum a6xx_render_mode {
- RENDERING_PASS = 0,
- BINNING_PASS = 1,
-};
-
-enum a6xx_buffers_location {
- BUFFERS_IN_GMEM = 0,
- BUFFERS_IN_SYSMEM = 3,
-};
-
-enum a6xx_lrz_dir_status {
- LRZ_DIR_LE = 1,
- LRZ_DIR_GE = 2,
- LRZ_DIR_INVALID = 3,
-};
-
-enum a6xx_fragcoord_sample_mode {
- FRAGCOORD_CENTER = 0,
- FRAGCOORD_SAMPLE = 3,
-};
-
-enum a6xx_rotation {
- ROTATE_0 = 0,
- ROTATE_90 = 1,
- ROTATE_180 = 2,
- ROTATE_270 = 3,
- ROTATE_HFLIP = 4,
- ROTATE_VFLIP = 5,
-};
-
-enum a6xx_ccu_cache_size {
- CCU_CACHE_SIZE_FULL = 0,
- CCU_CACHE_SIZE_HALF = 1,
- CCU_CACHE_SIZE_QUARTER = 2,
- CCU_CACHE_SIZE_EIGHTH = 3,
-};
-
-enum a6xx_varying_interp_mode {
- INTERP_SMOOTH = 0,
- INTERP_FLAT = 1,
- INTERP_ZERO = 2,
- INTERP_ONE = 3,
-};
-
-enum a6xx_varying_ps_repl_mode {
- PS_REPL_NONE = 0,
- PS_REPL_S = 1,
- PS_REPL_T = 2,
- PS_REPL_ONE_MINUS_T = 3,
-};
-
-enum a6xx_threadsize {
- THREAD64 = 0,
- THREAD128 = 1,
-};
-
-enum a6xx_bindless_descriptor_size {
- BINDLESS_DESCRIPTOR_16B = 1,
- BINDLESS_DESCRIPTOR_64B = 3,
-};
-
-enum a6xx_isam_mode {
- ISAMMODE_CL = 1,
- ISAMMODE_GL = 2,
-};
-
-enum a7xx_cs_yalign {
- CS_YALIGN_1 = 8,
- CS_YALIGN_2 = 4,
- CS_YALIGN_4 = 2,
- CS_YALIGN_8 = 1,
-};
-
-enum a6xx_tex_filter {
- A6XX_TEX_NEAREST = 0,
- A6XX_TEX_LINEAR = 1,
- A6XX_TEX_ANISO = 2,
- A6XX_TEX_CUBIC = 3,
-};
-
-enum a6xx_tex_clamp {
- A6XX_TEX_REPEAT = 0,
- A6XX_TEX_CLAMP_TO_EDGE = 1,
- A6XX_TEX_MIRROR_REPEAT = 2,
- A6XX_TEX_CLAMP_TO_BORDER = 3,
- A6XX_TEX_MIRROR_CLAMP = 4,
-};
-
-enum a6xx_tex_aniso {
- A6XX_TEX_ANISO_1 = 0,
- A6XX_TEX_ANISO_2 = 1,
- A6XX_TEX_ANISO_4 = 2,
- A6XX_TEX_ANISO_8 = 3,
- A6XX_TEX_ANISO_16 = 4,
-};
-
-enum a6xx_reduction_mode {
- A6XX_REDUCTION_MODE_AVERAGE = 0,
- A6XX_REDUCTION_MODE_MIN = 1,
- A6XX_REDUCTION_MODE_MAX = 2,
-};
-
-enum a6xx_tex_swiz {
- A6XX_TEX_X = 0,
- A6XX_TEX_Y = 1,
- A6XX_TEX_Z = 2,
- A6XX_TEX_W = 3,
- A6XX_TEX_ZERO = 4,
- A6XX_TEX_ONE = 5,
-};
-
-enum a6xx_tex_type {
- A6XX_TEX_1D = 0,
- A6XX_TEX_2D = 1,
- A6XX_TEX_CUBE = 2,
- A6XX_TEX_3D = 3,
- A6XX_TEX_BUFFER = 4,
-};
-
-#define A6XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE 0x00000001
-#define A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR 0x00000002
-#define A6XX_RBBM_INT_0_MASK_CP_IPC_INTR_0 0x00000010
-#define A6XX_RBBM_INT_0_MASK_CP_IPC_INTR_1 0x00000020
-#define A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW 0x00000040
-#define A6XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR 0x00000080
-#define A6XX_RBBM_INT_0_MASK_CP_SW 0x00000100
-#define A6XX_RBBM_INT_0_MASK_CP_HW_ERROR 0x00000200
-#define A6XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_DEPTH_TS 0x00000400
-#define A6XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_COLOR_TS 0x00000800
-#define A6XX_RBBM_INT_0_MASK_CP_CCU_RESOLVE_TS 0x00001000
-#define A6XX_RBBM_INT_0_MASK_CP_IB2 0x00002000
-#define A6XX_RBBM_INT_0_MASK_CP_IB1 0x00004000
-#define A6XX_RBBM_INT_0_MASK_CP_RB 0x00008000
-#define A6XX_RBBM_INT_0_MASK_PM4CPINTERRUPT 0x00008000
-#define A6XX_RBBM_INT_0_MASK_PM4CPINTERRUPTLPAC 0x00010000
-#define A6XX_RBBM_INT_0_MASK_CP_RB_DONE_TS 0x00020000
-#define A6XX_RBBM_INT_0_MASK_CP_WT_DONE_TS 0x00040000
-#define A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS 0x00100000
-#define A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS_LPAC 0x00200000
-#define A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW 0x00400000
-#define A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT 0x00800000
-#define A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS 0x01000000
-#define A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR 0x02000000
-#define A6XX_RBBM_INT_0_MASK_DEBBUS_INTR_0 0x04000000
-#define A6XX_RBBM_INT_0_MASK_DEBBUS_INTR_1 0x08000000
-#define A6XX_RBBM_INT_0_MASK_TSBWRITEERROR 0x10000000
-#define A6XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ 0x40000000
-#define A6XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG 0x80000000
-
-#define A6XX_CP_INT_CP_OPCODE_ERROR 0x00000001
-#define A6XX_CP_INT_CP_UCODE_ERROR 0x00000002
-#define A6XX_CP_INT_CP_HW_FAULT_ERROR 0x00000004
-#define A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR 0x00000010
-#define A6XX_CP_INT_CP_AHB_ERROR 0x00000020
-#define A6XX_CP_INT_CP_VSD_PARITY_ERROR 0x00000040
-#define A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR 0x00000080
-#define A6XX_CP_INT_CP_OPCODE_ERROR_LPAC 0x00000100
-#define A6XX_CP_INT_CP_UCODE_ERROR_LPAC 0x00000200
-#define A6XX_CP_INT_CP_HW_FAULT_ERROR_LPAC 0x00000400
-#define A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR_LPAC 0x00000800
-#define A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR_LPAC 0x00001000
-#define A6XX_CP_INT_CP_OPCODE_ERROR_BV 0x00002000
-#define A6XX_CP_INT_CP_UCODE_ERROR_BV 0x00004000
-#define A6XX_CP_INT_CP_HW_FAULT_ERROR_BV 0x00008000
-#define A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR_BV 0x00010000
-#define A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR_BV 0x00020000
-
-#define REG_A6XX_CP_RB_BASE 0x00000800
-
-#define REG_A6XX_CP_RB_CNTL 0x00000802
-
-#define REG_A6XX_CP_RB_RPTR_ADDR 0x00000804
-
-#define REG_A6XX_CP_RB_RPTR 0x00000806
-
-#define REG_A6XX_CP_RB_WPTR 0x00000807
-
-#define REG_A6XX_CP_SQE_CNTL 0x00000808
-
-#define REG_A6XX_CP_CP2GMU_STATUS 0x00000812
-#define A6XX_CP_CP2GMU_STATUS_IFPC 0x00000001
-
-#define REG_A6XX_CP_HW_FAULT 0x00000821
-
-#define REG_A6XX_CP_INTERRUPT_STATUS 0x00000823
-#define REG_A6XX_CP_PROTECT_STATUS 0x00000824
-
-#define REG_A6XX_CP_STATUS_1 0x00000825
-
-#define REG_A6XX_CP_SQE_INSTR_BASE 0x00000830
-
-#define REG_A6XX_CP_MISC_CNTL 0x00000840
-
-#define REG_A6XX_CP_APRIV_CNTL 0x00000844
-#define A6XX_CP_APRIV_CNTL_CDWRITE 0x00000040
-#define A6XX_CP_APRIV_CNTL_CDREAD 0x00000020
-#define A6XX_CP_APRIV_CNTL_RBRPWB 0x00000008
-#define A6XX_CP_APRIV_CNTL_RBPRIVLEVEL 0x00000004
-#define A6XX_CP_APRIV_CNTL_RBFETCH 0x00000002
-#define A6XX_CP_APRIV_CNTL_ICACHE 0x00000001
-
-#define REG_A6XX_CP_PREEMPT_THRESHOLD 0x000008c0
-
-#define REG_A6XX_CP_ROQ_THRESHOLDS_1 0x000008c1
-#define A6XX_CP_ROQ_THRESHOLDS_1_MRB_START__MASK 0x000000ff
-#define A6XX_CP_ROQ_THRESHOLDS_1_MRB_START__SHIFT 0
-static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_MRB_START(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A6XX_CP_ROQ_THRESHOLDS_1_MRB_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_MRB_START__MASK;
-}
-#define A6XX_CP_ROQ_THRESHOLDS_1_VSD_START__MASK 0x0000ff00
-#define A6XX_CP_ROQ_THRESHOLDS_1_VSD_START__SHIFT 8
-static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_VSD_START(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A6XX_CP_ROQ_THRESHOLDS_1_VSD_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_VSD_START__MASK;
-}
-#define A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__MASK 0x00ff0000
-#define A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__SHIFT 16
-static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_IB1_START(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__MASK;
-}
-#define A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__MASK 0xff000000
-#define A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__SHIFT 24
-static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_IB2_START(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__MASK;
-}
-
-#define REG_A6XX_CP_ROQ_THRESHOLDS_2 0x000008c2
-#define A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__MASK 0x000001ff
-#define A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__SHIFT 0
-static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_2_SDS_START(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__MASK;
-}
-#define A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__MASK 0xffff0000
-#define A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__SHIFT 16
-static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__MASK;
-}
-
-#define REG_A6XX_CP_MEM_POOL_SIZE 0x000008c3
-
-#define REG_A6XX_CP_CHICKEN_DBG 0x00000841
-
-#define REG_A6XX_CP_ADDR_MODE_CNTL 0x00000842
-
-#define REG_A6XX_CP_DBG_ECO_CNTL 0x00000843
-
-#define REG_A6XX_CP_PROTECT_CNTL 0x0000084f
-#define A6XX_CP_PROTECT_CNTL_LAST_SPAN_INF_RANGE 0x00000008
-#define A6XX_CP_PROTECT_CNTL_ACCESS_FAULT_ON_VIOL_EN 0x00000002
-#define A6XX_CP_PROTECT_CNTL_ACCESS_PROT_EN 0x00000001
-
-#define REG_A6XX_CP_SCRATCH(i0) (0x00000883 + 0x1*(i0))
-
-static inline uint32_t REG_A6XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000883 + 0x1*i0; }
-
-#define REG_A6XX_CP_PROTECT(i0) (0x00000850 + 0x1*(i0))
-
-static inline uint32_t REG_A6XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000850 + 0x1*i0; }
-#define A6XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0003ffff
-#define A6XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
-static inline uint32_t A6XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
-{
- return ((val) << A6XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A6XX_CP_PROTECT_REG_BASE_ADDR__MASK;
-}
-#define A6XX_CP_PROTECT_REG_MASK_LEN__MASK 0x7ffc0000
-#define A6XX_CP_PROTECT_REG_MASK_LEN__SHIFT 18
-static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
-{
- return ((val) << A6XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A6XX_CP_PROTECT_REG_MASK_LEN__MASK;
-}
-#define A6XX_CP_PROTECT_REG_READ 0x80000000
-
-#define REG_A6XX_CP_CONTEXT_SWITCH_CNTL 0x000008a0
-
-#define REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO 0x000008a1
-
-#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR 0x000008a3
-
-#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR 0x000008a5
-
-#define REG_A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR 0x000008a7
-
-#define REG_A7XX_CP_CONTEXT_SWITCH_LEVEL_STATUS 0x000008ab
-
-#define REG_A6XX_CP_PERFCTR_CP_SEL(i0) (0x000008d0 + 0x1*(i0))
-
-#define REG_A7XX_CP_BV_PERFCTR_CP_SEL(i0) (0x000008e0 + 0x1*(i0))
-
-#define REG_A6XX_CP_CRASH_SCRIPT_BASE 0x00000900
-
-#define REG_A6XX_CP_CRASH_DUMP_CNTL 0x00000902
-
-#define REG_A6XX_CP_CRASH_DUMP_STATUS 0x00000903
-
-#define REG_A6XX_CP_SQE_STAT_ADDR 0x00000908
-
-#define REG_A6XX_CP_SQE_STAT_DATA 0x00000909
-
-#define REG_A6XX_CP_DRAW_STATE_ADDR 0x0000090a
-
-#define REG_A6XX_CP_DRAW_STATE_DATA 0x0000090b
-
-#define REG_A6XX_CP_ROQ_DBG_ADDR 0x0000090c
-
-#define REG_A6XX_CP_ROQ_DBG_DATA 0x0000090d
-
-#define REG_A6XX_CP_MEM_POOL_DBG_ADDR 0x0000090e
-
-#define REG_A6XX_CP_MEM_POOL_DBG_DATA 0x0000090f
-
-#define REG_A6XX_CP_SQE_UCODE_DBG_ADDR 0x00000910
-
-#define REG_A6XX_CP_SQE_UCODE_DBG_DATA 0x00000911
-
-#define REG_A6XX_CP_IB1_BASE 0x00000928
-
-#define REG_A6XX_CP_IB1_REM_SIZE 0x0000092a
-
-#define REG_A6XX_CP_IB2_BASE 0x0000092b
-
-#define REG_A6XX_CP_IB2_REM_SIZE 0x0000092d
-
-#define REG_A6XX_CP_SDS_BASE 0x0000092e
-
-#define REG_A6XX_CP_SDS_REM_SIZE 0x00000930
-
-#define REG_A6XX_CP_MRB_BASE 0x00000931
-
-#define REG_A6XX_CP_MRB_REM_SIZE 0x00000933
-
-#define REG_A6XX_CP_VSD_BASE 0x00000934
-
-#define REG_A6XX_CP_ROQ_RB_STAT 0x00000939
-#define A6XX_CP_ROQ_RB_STAT_RPTR__MASK 0x000003ff
-#define A6XX_CP_ROQ_RB_STAT_RPTR__SHIFT 0
-static inline uint32_t A6XX_CP_ROQ_RB_STAT_RPTR(uint32_t val)
-{
- return ((val) << A6XX_CP_ROQ_RB_STAT_RPTR__SHIFT) & A6XX_CP_ROQ_RB_STAT_RPTR__MASK;
-}
-#define A6XX_CP_ROQ_RB_STAT_WPTR__MASK 0x03ff0000
-#define A6XX_CP_ROQ_RB_STAT_WPTR__SHIFT 16
-static inline uint32_t A6XX_CP_ROQ_RB_STAT_WPTR(uint32_t val)
-{
- return ((val) << A6XX_CP_ROQ_RB_STAT_WPTR__SHIFT) & A6XX_CP_ROQ_RB_STAT_WPTR__MASK;
-}
-
-#define REG_A6XX_CP_ROQ_IB1_STAT 0x0000093a
-#define A6XX_CP_ROQ_IB1_STAT_RPTR__MASK 0x000003ff
-#define A6XX_CP_ROQ_IB1_STAT_RPTR__SHIFT 0
-static inline uint32_t A6XX_CP_ROQ_IB1_STAT_RPTR(uint32_t val)
-{
- return ((val) << A6XX_CP_ROQ_IB1_STAT_RPTR__SHIFT) & A6XX_CP_ROQ_IB1_STAT_RPTR__MASK;
-}
-#define A6XX_CP_ROQ_IB1_STAT_WPTR__MASK 0x03ff0000
-#define A6XX_CP_ROQ_IB1_STAT_WPTR__SHIFT 16
-static inline uint32_t A6XX_CP_ROQ_IB1_STAT_WPTR(uint32_t val)
-{
- return ((val) << A6XX_CP_ROQ_IB1_STAT_WPTR__SHIFT) & A6XX_CP_ROQ_IB1_STAT_WPTR__MASK;
-}
-
-#define REG_A6XX_CP_ROQ_IB2_STAT 0x0000093b
-#define A6XX_CP_ROQ_IB2_STAT_RPTR__MASK 0x000003ff
-#define A6XX_CP_ROQ_IB2_STAT_RPTR__SHIFT 0
-static inline uint32_t A6XX_CP_ROQ_IB2_STAT_RPTR(uint32_t val)
-{
- return ((val) << A6XX_CP_ROQ_IB2_STAT_RPTR__SHIFT) & A6XX_CP_ROQ_IB2_STAT_RPTR__MASK;
-}
-#define A6XX_CP_ROQ_IB2_STAT_WPTR__MASK 0x03ff0000
-#define A6XX_CP_ROQ_IB2_STAT_WPTR__SHIFT 16
-static inline uint32_t A6XX_CP_ROQ_IB2_STAT_WPTR(uint32_t val)
-{
- return ((val) << A6XX_CP_ROQ_IB2_STAT_WPTR__SHIFT) & A6XX_CP_ROQ_IB2_STAT_WPTR__MASK;
-}
-
-#define REG_A6XX_CP_ROQ_SDS_STAT 0x0000093c
-#define A6XX_CP_ROQ_SDS_STAT_RPTR__MASK 0x000003ff
-#define A6XX_CP_ROQ_SDS_STAT_RPTR__SHIFT 0
-static inline uint32_t A6XX_CP_ROQ_SDS_STAT_RPTR(uint32_t val)
-{
- return ((val) << A6XX_CP_ROQ_SDS_STAT_RPTR__SHIFT) & A6XX_CP_ROQ_SDS_STAT_RPTR__MASK;
-}
-#define A6XX_CP_ROQ_SDS_STAT_WPTR__MASK 0x03ff0000
-#define A6XX_CP_ROQ_SDS_STAT_WPTR__SHIFT 16
-static inline uint32_t A6XX_CP_ROQ_SDS_STAT_WPTR(uint32_t val)
-{
- return ((val) << A6XX_CP_ROQ_SDS_STAT_WPTR__SHIFT) & A6XX_CP_ROQ_SDS_STAT_WPTR__MASK;
-}
-
-#define REG_A6XX_CP_ROQ_MRB_STAT 0x0000093d
-#define A6XX_CP_ROQ_MRB_STAT_RPTR__MASK 0x000003ff
-#define A6XX_CP_ROQ_MRB_STAT_RPTR__SHIFT 0
-static inline uint32_t A6XX_CP_ROQ_MRB_STAT_RPTR(uint32_t val)
-{
- return ((val) << A6XX_CP_ROQ_MRB_STAT_RPTR__SHIFT) & A6XX_CP_ROQ_MRB_STAT_RPTR__MASK;
-}
-#define A6XX_CP_ROQ_MRB_STAT_WPTR__MASK 0x03ff0000
-#define A6XX_CP_ROQ_MRB_STAT_WPTR__SHIFT 16
-static inline uint32_t A6XX_CP_ROQ_MRB_STAT_WPTR(uint32_t val)
-{
- return ((val) << A6XX_CP_ROQ_MRB_STAT_WPTR__SHIFT) & A6XX_CP_ROQ_MRB_STAT_WPTR__MASK;
-}
-
-#define REG_A6XX_CP_ROQ_VSD_STAT 0x0000093e
-#define A6XX_CP_ROQ_VSD_STAT_RPTR__MASK 0x000003ff
-#define A6XX_CP_ROQ_VSD_STAT_RPTR__SHIFT 0
-static inline uint32_t A6XX_CP_ROQ_VSD_STAT_RPTR(uint32_t val)
-{
- return ((val) << A6XX_CP_ROQ_VSD_STAT_RPTR__SHIFT) & A6XX_CP_ROQ_VSD_STAT_RPTR__MASK;
-}
-#define A6XX_CP_ROQ_VSD_STAT_WPTR__MASK 0x03ff0000
-#define A6XX_CP_ROQ_VSD_STAT_WPTR__SHIFT 16
-static inline uint32_t A6XX_CP_ROQ_VSD_STAT_WPTR(uint32_t val)
-{
- return ((val) << A6XX_CP_ROQ_VSD_STAT_WPTR__SHIFT) & A6XX_CP_ROQ_VSD_STAT_WPTR__MASK;
-}
-
-#define REG_A6XX_CP_IB1_DWORDS 0x00000943
-
-#define REG_A6XX_CP_IB2_DWORDS 0x00000944
-
-#define REG_A6XX_CP_SDS_DWORDS 0x00000945
-
-#define REG_A6XX_CP_MRB_DWORDS 0x00000946
-
-#define REG_A6XX_CP_VSD_DWORDS 0x00000947
-
-#define REG_A6XX_CP_ROQ_AVAIL_RB 0x00000948
-#define A6XX_CP_ROQ_AVAIL_RB_REM__MASK 0xffff0000
-#define A6XX_CP_ROQ_AVAIL_RB_REM__SHIFT 16
-static inline uint32_t A6XX_CP_ROQ_AVAIL_RB_REM(uint32_t val)
-{
- return ((val) << A6XX_CP_ROQ_AVAIL_RB_REM__SHIFT) & A6XX_CP_ROQ_AVAIL_RB_REM__MASK;
-}
-
-#define REG_A6XX_CP_ROQ_AVAIL_IB1 0x00000949
-#define A6XX_CP_ROQ_AVAIL_IB1_REM__MASK 0xffff0000
-#define A6XX_CP_ROQ_AVAIL_IB1_REM__SHIFT 16
-static inline uint32_t A6XX_CP_ROQ_AVAIL_IB1_REM(uint32_t val)
-{
- return ((val) << A6XX_CP_ROQ_AVAIL_IB1_REM__SHIFT) & A6XX_CP_ROQ_AVAIL_IB1_REM__MASK;
-}
-
-#define REG_A6XX_CP_ROQ_AVAIL_IB2 0x0000094a
-#define A6XX_CP_ROQ_AVAIL_IB2_REM__MASK 0xffff0000
-#define A6XX_CP_ROQ_AVAIL_IB2_REM__SHIFT 16
-static inline uint32_t A6XX_CP_ROQ_AVAIL_IB2_REM(uint32_t val)
-{
- return ((val) << A6XX_CP_ROQ_AVAIL_IB2_REM__SHIFT) & A6XX_CP_ROQ_AVAIL_IB2_REM__MASK;
-}
-
-#define REG_A6XX_CP_ROQ_AVAIL_SDS 0x0000094b
-#define A6XX_CP_ROQ_AVAIL_SDS_REM__MASK 0xffff0000
-#define A6XX_CP_ROQ_AVAIL_SDS_REM__SHIFT 16
-static inline uint32_t A6XX_CP_ROQ_AVAIL_SDS_REM(uint32_t val)
-{
- return ((val) << A6XX_CP_ROQ_AVAIL_SDS_REM__SHIFT) & A6XX_CP_ROQ_AVAIL_SDS_REM__MASK;
-}
-
-#define REG_A6XX_CP_ROQ_AVAIL_MRB 0x0000094c
-#define A6XX_CP_ROQ_AVAIL_MRB_REM__MASK 0xffff0000
-#define A6XX_CP_ROQ_AVAIL_MRB_REM__SHIFT 16
-static inline uint32_t A6XX_CP_ROQ_AVAIL_MRB_REM(uint32_t val)
-{
- return ((val) << A6XX_CP_ROQ_AVAIL_MRB_REM__SHIFT) & A6XX_CP_ROQ_AVAIL_MRB_REM__MASK;
-}
-
-#define REG_A6XX_CP_ROQ_AVAIL_VSD 0x0000094d
-#define A6XX_CP_ROQ_AVAIL_VSD_REM__MASK 0xffff0000
-#define A6XX_CP_ROQ_AVAIL_VSD_REM__SHIFT 16
-static inline uint32_t A6XX_CP_ROQ_AVAIL_VSD_REM(uint32_t val)
-{
- return ((val) << A6XX_CP_ROQ_AVAIL_VSD_REM__SHIFT) & A6XX_CP_ROQ_AVAIL_VSD_REM__MASK;
-}
-
-#define REG_A6XX_CP_ALWAYS_ON_COUNTER 0x00000980
-
-#define REG_A6XX_CP_AHB_CNTL 0x0000098d
-
-#define REG_A6XX_CP_APERTURE_CNTL_HOST 0x00000a00
-
-#define REG_A7XX_CP_APERTURE_CNTL_HOST 0x00000a00
-#define A7XX_CP_APERTURE_CNTL_HOST_PIPE__MASK 0x00003000
-#define A7XX_CP_APERTURE_CNTL_HOST_PIPE__SHIFT 12
-static inline uint32_t A7XX_CP_APERTURE_CNTL_HOST_PIPE(enum a7xx_pipe val)
-{
- return ((val) << A7XX_CP_APERTURE_CNTL_HOST_PIPE__SHIFT) & A7XX_CP_APERTURE_CNTL_HOST_PIPE__MASK;
-}
-#define A7XX_CP_APERTURE_CNTL_HOST_CLUSTER__MASK 0x00000700
-#define A7XX_CP_APERTURE_CNTL_HOST_CLUSTER__SHIFT 8
-static inline uint32_t A7XX_CP_APERTURE_CNTL_HOST_CLUSTER(enum a7xx_cluster val)
-{
- return ((val) << A7XX_CP_APERTURE_CNTL_HOST_CLUSTER__SHIFT) & A7XX_CP_APERTURE_CNTL_HOST_CLUSTER__MASK;
-}
-#define A7XX_CP_APERTURE_CNTL_HOST_CONTEXT__MASK 0x00000030
-#define A7XX_CP_APERTURE_CNTL_HOST_CONTEXT__SHIFT 4
-static inline uint32_t A7XX_CP_APERTURE_CNTL_HOST_CONTEXT(uint32_t val)
-{
- return ((val) << A7XX_CP_APERTURE_CNTL_HOST_CONTEXT__SHIFT) & A7XX_CP_APERTURE_CNTL_HOST_CONTEXT__MASK;
-}
-
-#define REG_A6XX_CP_APERTURE_CNTL_CD 0x00000a03
-
-#define REG_A7XX_CP_APERTURE_CNTL_CD 0x00000a03
-#define A7XX_CP_APERTURE_CNTL_CD_PIPE__MASK 0x00003000
-#define A7XX_CP_APERTURE_CNTL_CD_PIPE__SHIFT 12
-static inline uint32_t A7XX_CP_APERTURE_CNTL_CD_PIPE(enum a7xx_pipe val)
-{
- return ((val) << A7XX_CP_APERTURE_CNTL_CD_PIPE__SHIFT) & A7XX_CP_APERTURE_CNTL_CD_PIPE__MASK;
-}
-#define A7XX_CP_APERTURE_CNTL_CD_CLUSTER__MASK 0x00000700
-#define A7XX_CP_APERTURE_CNTL_CD_CLUSTER__SHIFT 8
-static inline uint32_t A7XX_CP_APERTURE_CNTL_CD_CLUSTER(enum a7xx_cluster val)
-{
- return ((val) << A7XX_CP_APERTURE_CNTL_CD_CLUSTER__SHIFT) & A7XX_CP_APERTURE_CNTL_CD_CLUSTER__MASK;
-}
-#define A7XX_CP_APERTURE_CNTL_CD_CONTEXT__MASK 0x00000030
-#define A7XX_CP_APERTURE_CNTL_CD_CONTEXT__SHIFT 4
-static inline uint32_t A7XX_CP_APERTURE_CNTL_CD_CONTEXT(uint32_t val)
-{
- return ((val) << A7XX_CP_APERTURE_CNTL_CD_CONTEXT__SHIFT) & A7XX_CP_APERTURE_CNTL_CD_CONTEXT__MASK;
-}
-
-#define REG_A7XX_CP_BV_PROTECT_STATUS 0x00000a61
-
-#define REG_A7XX_CP_BV_HW_FAULT 0x00000a64
-
-#define REG_A7XX_CP_BV_DRAW_STATE_ADDR 0x00000a81
-
-#define REG_A7XX_CP_BV_DRAW_STATE_DATA 0x00000a82
-
-#define REG_A7XX_CP_BV_ROQ_DBG_ADDR 0x00000a83
-
-#define REG_A7XX_CP_BV_ROQ_DBG_DATA 0x00000a84
-
-#define REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR 0x00000a85
-
-#define REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA 0x00000a86
-
-#define REG_A7XX_CP_BV_SQE_STAT_ADDR 0x00000a87
-
-#define REG_A7XX_CP_BV_SQE_STAT_DATA 0x00000a88
-
-#define REG_A7XX_CP_BV_MEM_POOL_DBG_ADDR 0x00000a96
-
-#define REG_A7XX_CP_BV_MEM_POOL_DBG_DATA 0x00000a97
-
-#define REG_A7XX_CP_BV_RB_RPTR_ADDR 0x00000a98
-
-#define REG_A7XX_CP_RESOURCE_TBL_DBG_ADDR 0x00000a9a
-
-#define REG_A7XX_CP_RESOURCE_TBL_DBG_DATA 0x00000a9b
-
-#define REG_A7XX_CP_BV_APRIV_CNTL 0x00000ad0
-
-#define REG_A7XX_CP_BV_CHICKEN_DBG 0x00000ada
-
-#define REG_A7XX_CP_LPAC_DRAW_STATE_ADDR 0x00000b0a
-
-#define REG_A7XX_CP_LPAC_DRAW_STATE_DATA 0x00000b0b
-
-#define REG_A7XX_CP_LPAC_ROQ_DBG_ADDR 0x00000b0c
-
-#define REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR 0x00000b27
-
-#define REG_A7XX_CP_SQE_AC_UCODE_DBG_DATA 0x00000b28
-
-#define REG_A7XX_CP_SQE_AC_STAT_ADDR 0x00000b29
-
-#define REG_A7XX_CP_SQE_AC_STAT_DATA 0x00000b2a
-
-#define REG_A7XX_CP_LPAC_APRIV_CNTL 0x00000b31
-
-#define REG_A6XX_CP_LPAC_PROG_FIFO_SIZE 0x00000b34
-
-#define REG_A7XX_CP_LPAC_ROQ_DBG_DATA 0x00000b35
-
-#define REG_A7XX_CP_LPAC_FIFO_DBG_DATA 0x00000b36
-
-#define REG_A7XX_CP_LPAC_FIFO_DBG_ADDR 0x00000b40
-
-#define REG_A6XX_CP_LPAC_SQE_INSTR_BASE 0x00000b82
-
-#define REG_A6XX_VSC_ADDR_MODE_CNTL 0x00000c01
-
-#define REG_A6XX_RBBM_GPR0_CNTL 0x00000018
-
-#define REG_A6XX_RBBM_INT_0_STATUS 0x00000201
-#define REG_A6XX_RBBM_STATUS 0x00000210
-#define A6XX_RBBM_STATUS_GPU_BUSY_IGN_AHB 0x00800000
-#define A6XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP 0x00400000
-#define A6XX_RBBM_STATUS_HLSQ_BUSY 0x00200000
-#define A6XX_RBBM_STATUS_VSC_BUSY 0x00100000
-#define A6XX_RBBM_STATUS_TPL1_BUSY 0x00080000
-#define A6XX_RBBM_STATUS_SP_BUSY 0x00040000
-#define A6XX_RBBM_STATUS_UCHE_BUSY 0x00020000
-#define A6XX_RBBM_STATUS_VPC_BUSY 0x00010000
-#define A6XX_RBBM_STATUS_VFD_BUSY 0x00008000
-#define A6XX_RBBM_STATUS_TESS_BUSY 0x00004000
-#define A6XX_RBBM_STATUS_PC_VSD_BUSY 0x00002000
-#define A6XX_RBBM_STATUS_PC_DCALL_BUSY 0x00001000
-#define A6XX_RBBM_STATUS_COM_DCOM_BUSY 0x00000800
-#define A6XX_RBBM_STATUS_LRZ_BUSY 0x00000400
-#define A6XX_RBBM_STATUS_A2D_BUSY 0x00000200
-#define A6XX_RBBM_STATUS_CCU_BUSY 0x00000100
-#define A6XX_RBBM_STATUS_RB_BUSY 0x00000080
-#define A6XX_RBBM_STATUS_RAS_BUSY 0x00000040
-#define A6XX_RBBM_STATUS_TSE_BUSY 0x00000020
-#define A6XX_RBBM_STATUS_VBIF_BUSY 0x00000010
-#define A6XX_RBBM_STATUS_GFX_DBGC_BUSY 0x00000008
-#define A6XX_RBBM_STATUS_CP_BUSY 0x00000004
-#define A6XX_RBBM_STATUS_CP_AHB_BUSY_CP_MASTER 0x00000002
-#define A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER 0x00000001
-
-#define REG_A6XX_RBBM_STATUS1 0x00000211
-
-#define REG_A6XX_RBBM_STATUS2 0x00000212
-
-#define REG_A6XX_RBBM_STATUS3 0x00000213
-#define A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT 0x01000000
-
-#define REG_A6XX_RBBM_VBIF_GX_RESET_STATUS 0x00000215
-
-#define REG_A7XX_RBBM_CLOCK_MODE_CP 0x00000260
-
-#define REG_A7XX_RBBM_CLOCK_MODE_BV_LRZ 0x00000284
-
-#define REG_A7XX_RBBM_CLOCK_MODE_BV_GRAS 0x00000285
-
-#define REG_A7XX_RBBM_CLOCK_MODE2_GRAS 0x00000286
-
-#define REG_A7XX_RBBM_CLOCK_MODE_BV_VFD 0x00000287
-
-#define REG_A7XX_RBBM_CLOCK_MODE_BV_GPC 0x00000288
-
-#define REG_A6XX_RBBM_PERFCTR_CP(i0) (0x00000400 + 0x2*(i0))
-
-#define REG_A6XX_RBBM_PERFCTR_RBBM(i0) (0x0000041c + 0x2*(i0))
-
-#define REG_A6XX_RBBM_PERFCTR_PC(i0) (0x00000424 + 0x2*(i0))
-
-#define REG_A6XX_RBBM_PERFCTR_VFD(i0) (0x00000434 + 0x2*(i0))
-
-#define REG_A6XX_RBBM_PERFCTR_HLSQ(i0) (0x00000444 + 0x2*(i0))
-
-#define REG_A6XX_RBBM_PERFCTR_VPC(i0) (0x00000450 + 0x2*(i0))
-
-#define REG_A6XX_RBBM_PERFCTR_CCU(i0) (0x0000045c + 0x2*(i0))
-
-#define REG_A6XX_RBBM_PERFCTR_TSE(i0) (0x00000466 + 0x2*(i0))
-
-#define REG_A6XX_RBBM_PERFCTR_RAS(i0) (0x0000046e + 0x2*(i0))
-
-#define REG_A6XX_RBBM_PERFCTR_UCHE(i0) (0x00000476 + 0x2*(i0))
-
-#define REG_A6XX_RBBM_PERFCTR_TP(i0) (0x0000048e + 0x2*(i0))
-
-#define REG_A6XX_RBBM_PERFCTR_SP(i0) (0x000004a6 + 0x2*(i0))
-
-#define REG_A6XX_RBBM_PERFCTR_RB(i0) (0x000004d6 + 0x2*(i0))
-
-#define REG_A6XX_RBBM_PERFCTR_VSC(i0) (0x000004e6 + 0x2*(i0))
-
-#define REG_A6XX_RBBM_PERFCTR_LRZ(i0) (0x000004ea + 0x2*(i0))
-
-#define REG_A6XX_RBBM_PERFCTR_CMP(i0) (0x000004f2 + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR_CP(i0) (0x00000300 + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR_RBBM(i0) (0x0000031c + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR_PC(i0) (0x00000324 + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR_VFD(i0) (0x00000334 + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR_HLSQ(i0) (0x00000344 + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR_VPC(i0) (0x00000350 + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR_CCU(i0) (0x0000035c + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR_TSE(i0) (0x00000366 + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR_RAS(i0) (0x0000036e + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR_UCHE(i0) (0x00000376 + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR_TP(i0) (0x0000038e + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR_SP(i0) (0x000003a6 + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR_RB(i0) (0x000003d6 + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR_VSC(i0) (0x000003e6 + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR_LRZ(i0) (0x000003ea + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR_CMP(i0) (0x000003f2 + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR_UFC(i0) (0x000003fa + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR2_HLSQ(i0) (0x00000410 + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR2_CP(i0) (0x0000041c + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR2_SP(i0) (0x0000042a + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR2_TP(i0) (0x00000442 + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR2_UFC(i0) (0x0000044e + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR_BV_PC(i0) (0x00000460 + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR_BV_VFD(i0) (0x00000470 + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR_BV_VPC(i0) (0x00000480 + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR_BV_TSE(i0) (0x0000048c + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR_BV_RAS(i0) (0x00000494 + 0x2*(i0))
-
-#define REG_A7XX_RBBM_PERFCTR_BV_LRZ(i0) (0x0000049c + 0x2*(i0))
-
-#define REG_A6XX_RBBM_PERFCTR_CNTL 0x00000500
-
-#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD0 0x00000501
-
-#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD1 0x00000502
-
-#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD2 0x00000503
-
-#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD3 0x00000504
-
-#define REG_A6XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000505
-
-#define REG_A6XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000506
-
-#define REG_A6XX_RBBM_PERFCTR_RBBM_SEL(i0) (0x00000507 + 0x1*(i0))
-
-#define REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x0000050b
-
-#define REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD 0x0000050e
-
-#define REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS 0x0000050f
-
-#define REG_A6XX_RBBM_ISDB_CNT 0x00000533
-
-#define REG_A7XX_RBBM_NC_MODE_CNTL 0x00000534
-
-#define REG_A7XX_RBBM_SNAPSHOT_STATUS 0x00000535
-
-#define REG_A6XX_RBBM_PRIMCTR_0_LO 0x00000540
-
-#define REG_A6XX_RBBM_PRIMCTR_0_HI 0x00000541
-
-#define REG_A6XX_RBBM_PRIMCTR_1_LO 0x00000542
-
-#define REG_A6XX_RBBM_PRIMCTR_1_HI 0x00000543
-
-#define REG_A6XX_RBBM_PRIMCTR_2_LO 0x00000544
-
-#define REG_A6XX_RBBM_PRIMCTR_2_HI 0x00000545
-
-#define REG_A6XX_RBBM_PRIMCTR_3_LO 0x00000546
-
-#define REG_A6XX_RBBM_PRIMCTR_3_HI 0x00000547
-
-#define REG_A6XX_RBBM_PRIMCTR_4_LO 0x00000548
-
-#define REG_A6XX_RBBM_PRIMCTR_4_HI 0x00000549
-
-#define REG_A6XX_RBBM_PRIMCTR_5_LO 0x0000054a
-
-#define REG_A6XX_RBBM_PRIMCTR_5_HI 0x0000054b
-
-#define REG_A6XX_RBBM_PRIMCTR_6_LO 0x0000054c
-
-#define REG_A6XX_RBBM_PRIMCTR_6_HI 0x0000054d
-
-#define REG_A6XX_RBBM_PRIMCTR_7_LO 0x0000054e
-
-#define REG_A6XX_RBBM_PRIMCTR_7_HI 0x0000054f
-
-#define REG_A6XX_RBBM_PRIMCTR_8_LO 0x00000550
-
-#define REG_A6XX_RBBM_PRIMCTR_8_HI 0x00000551
-
-#define REG_A6XX_RBBM_PRIMCTR_9_LO 0x00000552
-
-#define REG_A6XX_RBBM_PRIMCTR_9_HI 0x00000553
-
-#define REG_A6XX_RBBM_PRIMCTR_10_LO 0x00000554
-
-#define REG_A6XX_RBBM_PRIMCTR_10_HI 0x00000555
-
-#define REG_A6XX_RBBM_SECVID_TRUST_CNTL 0x0000f400
-
-#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE 0x0000f800
-
-#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE 0x0000f802
-
-#define REG_A6XX_RBBM_SECVID_TSB_CNTL 0x0000f803
-
-#define REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0x0000f810
-
-#define REG_A7XX_RBBM_SECVID_TSB_STATUS 0x0000fc00
-
-#define REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL 0x00000010
-
-#define REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL 0x00000011
-
-#define REG_A6XX_RBBM_GBIF_HALT 0x00000016
-
-#define REG_A6XX_RBBM_GBIF_HALT_ACK 0x00000017
-
-#define REG_A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD 0x0000001c
-#define A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD_WAIT_GPU_IDLE 0x00000001
-
-#define REG_A7XX_RBBM_GBIF_HALT 0x00000016
-
-#define REG_A7XX_RBBM_GBIF_HALT_ACK 0x00000017
-
-#define REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000001f
-
-#define REG_A6XX_RBBM_INT_CLEAR_CMD 0x00000037
-#define REG_A6XX_RBBM_INT_0_MASK 0x00000038
-#define REG_A7XX_RBBM_INT_2_MASK 0x0000003a
-
-#define REG_A6XX_RBBM_SP_HYST_CNT 0x00000042
-
-#define REG_A6XX_RBBM_SW_RESET_CMD 0x00000043
-
-#define REG_A6XX_RBBM_RAC_THRESHOLD_CNT 0x00000044
-
-#define REG_A6XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045
-
-#define REG_A6XX_RBBM_BLOCK_SW_RESET_CMD2 0x00000046
-
-#define REG_A7XX_RBBM_CLOCK_CNTL_GLOBAL 0x000000ad
-
-#define REG_A6XX_RBBM_CLOCK_CNTL 0x000000ae
-
-#define REG_A6XX_RBBM_CLOCK_CNTL_SP0 0x000000b0
-
-#define REG_A6XX_RBBM_CLOCK_CNTL_SP1 0x000000b1
-
-#define REG_A6XX_RBBM_CLOCK_CNTL_SP2 0x000000b2
-
-#define REG_A6XX_RBBM_CLOCK_CNTL_SP3 0x000000b3
-
-#define REG_A6XX_RBBM_CLOCK_CNTL2_SP0 0x000000b4
-
-#define REG_A6XX_RBBM_CLOCK_CNTL2_SP1 0x000000b5
-
-#define REG_A6XX_RBBM_CLOCK_CNTL2_SP2 0x000000b6
-
-#define REG_A6XX_RBBM_CLOCK_CNTL2_SP3 0x000000b7
-
-#define REG_A6XX_RBBM_CLOCK_DELAY_SP0 0x000000b8
-
-#define REG_A6XX_RBBM_CLOCK_DELAY_SP1 0x000000b9
-
-#define REG_A6XX_RBBM_CLOCK_DELAY_SP2 0x000000ba
-
-#define REG_A6XX_RBBM_CLOCK_DELAY_SP3 0x000000bb
-
-#define REG_A6XX_RBBM_CLOCK_HYST_SP0 0x000000bc
-
-#define REG_A6XX_RBBM_CLOCK_HYST_SP1 0x000000bd
-
-#define REG_A6XX_RBBM_CLOCK_HYST_SP2 0x000000be
-
-#define REG_A6XX_RBBM_CLOCK_HYST_SP3 0x000000bf
-
-#define REG_A6XX_RBBM_CLOCK_CNTL_TP0 0x000000c0
-
-#define REG_A6XX_RBBM_CLOCK_CNTL_TP1 0x000000c1
-
-#define REG_A6XX_RBBM_CLOCK_CNTL_TP2 0x000000c2
-
-#define REG_A6XX_RBBM_CLOCK_CNTL_TP3 0x000000c3
-
-#define REG_A6XX_RBBM_CLOCK_CNTL2_TP0 0x000000c4
-
-#define REG_A6XX_RBBM_CLOCK_CNTL2_TP1 0x000000c5
-
-#define REG_A6XX_RBBM_CLOCK_CNTL2_TP2 0x000000c6
-
-#define REG_A6XX_RBBM_CLOCK_CNTL2_TP3 0x000000c7
-
-#define REG_A6XX_RBBM_CLOCK_CNTL3_TP0 0x000000c8
-
-#define REG_A6XX_RBBM_CLOCK_CNTL3_TP1 0x000000c9
-
-#define REG_A6XX_RBBM_CLOCK_CNTL3_TP2 0x000000ca
-
-#define REG_A6XX_RBBM_CLOCK_CNTL3_TP3 0x000000cb
-
-#define REG_A6XX_RBBM_CLOCK_CNTL4_TP0 0x000000cc
-
-#define REG_A6XX_RBBM_CLOCK_CNTL4_TP1 0x000000cd
-
-#define REG_A6XX_RBBM_CLOCK_CNTL4_TP2 0x000000ce
-
-#define REG_A6XX_RBBM_CLOCK_CNTL4_TP3 0x000000cf
-
-#define REG_A6XX_RBBM_CLOCK_DELAY_TP0 0x000000d0
-
-#define REG_A6XX_RBBM_CLOCK_DELAY_TP1 0x000000d1
-
-#define REG_A6XX_RBBM_CLOCK_DELAY_TP2 0x000000d2
-
-#define REG_A6XX_RBBM_CLOCK_DELAY_TP3 0x000000d3
-
-#define REG_A6XX_RBBM_CLOCK_DELAY2_TP0 0x000000d4
-
-#define REG_A6XX_RBBM_CLOCK_DELAY2_TP1 0x000000d5
-
-#define REG_A6XX_RBBM_CLOCK_DELAY2_TP2 0x000000d6
-
-#define REG_A6XX_RBBM_CLOCK_DELAY2_TP3 0x000000d7
-
-#define REG_A6XX_RBBM_CLOCK_DELAY3_TP0 0x000000d8
-
-#define REG_A6XX_RBBM_CLOCK_DELAY3_TP1 0x000000d9
-
-#define REG_A6XX_RBBM_CLOCK_DELAY3_TP2 0x000000da
-
-#define REG_A6XX_RBBM_CLOCK_DELAY3_TP3 0x000000db
-
-#define REG_A6XX_RBBM_CLOCK_DELAY4_TP0 0x000000dc
-
-#define REG_A6XX_RBBM_CLOCK_DELAY4_TP1 0x000000dd
-
-#define REG_A6XX_RBBM_CLOCK_DELAY4_TP2 0x000000de
-
-#define REG_A6XX_RBBM_CLOCK_DELAY4_TP3 0x000000df
-
-#define REG_A6XX_RBBM_CLOCK_HYST_TP0 0x000000e0
-
-#define REG_A6XX_RBBM_CLOCK_HYST_TP1 0x000000e1
-
-#define REG_A6XX_RBBM_CLOCK_HYST_TP2 0x000000e2
-
-#define REG_A6XX_RBBM_CLOCK_HYST_TP3 0x000000e3
-
-#define REG_A6XX_RBBM_CLOCK_HYST2_TP0 0x000000e4
-
-#define REG_A6XX_RBBM_CLOCK_HYST2_TP1 0x000000e5
-
-#define REG_A6XX_RBBM_CLOCK_HYST2_TP2 0x000000e6
-
-#define REG_A6XX_RBBM_CLOCK_HYST2_TP3 0x000000e7
-
-#define REG_A6XX_RBBM_CLOCK_HYST3_TP0 0x000000e8
-
-#define REG_A6XX_RBBM_CLOCK_HYST3_TP1 0x000000e9
-
-#define REG_A6XX_RBBM_CLOCK_HYST3_TP2 0x000000ea
-
-#define REG_A6XX_RBBM_CLOCK_HYST3_TP3 0x000000eb
-
-#define REG_A6XX_RBBM_CLOCK_HYST4_TP0 0x000000ec
-
-#define REG_A6XX_RBBM_CLOCK_HYST4_TP1 0x000000ed
-
-#define REG_A6XX_RBBM_CLOCK_HYST4_TP2 0x000000ee
-
-#define REG_A6XX_RBBM_CLOCK_HYST4_TP3 0x000000ef
-
-#define REG_A6XX_RBBM_CLOCK_CNTL_RB0 0x000000f0
-
-#define REG_A6XX_RBBM_CLOCK_CNTL_RB1 0x000000f1
-
-#define REG_A6XX_RBBM_CLOCK_CNTL_RB2 0x000000f2
-
-#define REG_A6XX_RBBM_CLOCK_CNTL_RB3 0x000000f3
-
-#define REG_A6XX_RBBM_CLOCK_CNTL2_RB0 0x000000f4
-
-#define REG_A6XX_RBBM_CLOCK_CNTL2_RB1 0x000000f5
-
-#define REG_A6XX_RBBM_CLOCK_CNTL2_RB2 0x000000f6
-
-#define REG_A6XX_RBBM_CLOCK_CNTL2_RB3 0x000000f7
-
-#define REG_A6XX_RBBM_CLOCK_CNTL_CCU0 0x000000f8
-
-#define REG_A6XX_RBBM_CLOCK_CNTL_CCU1 0x000000f9
-
-#define REG_A6XX_RBBM_CLOCK_CNTL_CCU2 0x000000fa
-
-#define REG_A6XX_RBBM_CLOCK_CNTL_CCU3 0x000000fb
-
-#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0 0x00000100
-
-#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1 0x00000101
-
-#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2 0x00000102
-
-#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3 0x00000103
-
-#define REG_A6XX_RBBM_CLOCK_CNTL_RAC 0x00000104
-
-#define REG_A6XX_RBBM_CLOCK_CNTL2_RAC 0x00000105
-
-#define REG_A6XX_RBBM_CLOCK_DELAY_RAC 0x00000106
-
-#define REG_A6XX_RBBM_CLOCK_HYST_RAC 0x00000107
-
-#define REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM 0x00000108
-
-#define REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x00000109
-
-#define REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x0000010a
-
-#define REG_A6XX_RBBM_CLOCK_CNTL_UCHE 0x0000010b
-
-#define REG_A6XX_RBBM_CLOCK_CNTL2_UCHE 0x0000010c
-
-#define REG_A6XX_RBBM_CLOCK_CNTL3_UCHE 0x0000010d
-
-#define REG_A6XX_RBBM_CLOCK_CNTL4_UCHE 0x0000010e
-
-#define REG_A6XX_RBBM_CLOCK_DELAY_UCHE 0x0000010f
-
-#define REG_A6XX_RBBM_CLOCK_HYST_UCHE 0x00000110
-
-#define REG_A6XX_RBBM_CLOCK_MODE_VFD 0x00000111
-
-#define REG_A6XX_RBBM_CLOCK_DELAY_VFD 0x00000112
-
-#define REG_A6XX_RBBM_CLOCK_HYST_VFD 0x00000113
-
-#define REG_A6XX_RBBM_CLOCK_MODE_GPC 0x00000114
-
-#define REG_A6XX_RBBM_CLOCK_DELAY_GPC 0x00000115
-
-#define REG_A6XX_RBBM_CLOCK_HYST_GPC 0x00000116
-
-#define REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2 0x00000117
-
-#define REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX 0x00000118
-
-#define REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX 0x00000119
-
-#define REG_A6XX_RBBM_CLOCK_HYST_GMU_GX 0x0000011a
-
-#define REG_A6XX_RBBM_CLOCK_MODE_HLSQ 0x0000011b
-
-#define REG_A6XX_RBBM_CLOCK_DELAY_HLSQ 0x0000011c
-
-#define REG_A6XX_RBBM_CLOCK_HYST_HLSQ 0x0000011d
-
-#define REG_A7XX_RBBM_CGC_GLOBAL_LOAD_CMD 0x0000011e
-
-#define REG_A7XX_RBBM_CGC_P2S_TRIG_CMD 0x0000011f
-
-#define REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE 0x00000120
-
-#define REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE 0x00000121
-
-#define REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE 0x00000122
-
-#define REG_A7XX_RBBM_CGC_P2S_STATUS 0x00000122
-#define A7XX_RBBM_CGC_P2S_STATUS_TXDONE 0x00000001
-
-#define REG_A6XX_RBBM_CLOCK_CNTL_FCHE 0x00000123
-
-#define REG_A6XX_RBBM_CLOCK_DELAY_FCHE 0x00000124
-
-#define REG_A6XX_RBBM_CLOCK_HYST_FCHE 0x00000125
-
-#define REG_A6XX_RBBM_CLOCK_CNTL_MHUB 0x00000126
-
-#define REG_A6XX_RBBM_CLOCK_DELAY_MHUB 0x00000127
-
-#define REG_A6XX_RBBM_CLOCK_HYST_MHUB 0x00000128
-
-#define REG_A6XX_RBBM_CLOCK_DELAY_GLC 0x00000129
-
-#define REG_A6XX_RBBM_CLOCK_HYST_GLC 0x0000012a
-
-#define REG_A6XX_RBBM_CLOCK_CNTL_GLC 0x0000012b
-
-#define REG_A7XX_RBBM_CLOCK_HYST2_VFD 0x0000012f
-
-#define REG_A6XX_RBBM_LPAC_GBIF_CLIENT_QOS_CNTL 0x000005ff
-
-#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_A 0x00000600
-
-#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_B 0x00000601
-
-#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_C 0x00000602
-
-#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_D 0x00000603
-#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK 0x000000ff
-#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT 0
-static inline uint32_t A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(uint32_t val)
-{
- return ((val) << A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT) & A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK;
-}
-#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK 0x0000ff00
-#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT 8
-static inline uint32_t A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(uint32_t val)
-{
- return ((val) << A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT) & A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK;
-}
-
-#define REG_A6XX_DBGC_CFG_DBGBUS_CNTLT 0x00000604
-#define A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f
-#define A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0
-static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val)
-{
- return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK;
-}
-#define A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000
-#define A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12
-static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val)
-{
- return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK;
-}
-#define A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000
-#define A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28
-static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val)
-{
- return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK;
-}
-
-#define REG_A6XX_DBGC_CFG_DBGBUS_CNTLM 0x00000605
-#define A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000
-#define A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24
-static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
-{
- return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK;
-}
-
-#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0 0x00000608
-
-#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1 0x00000609
-
-#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2 0x0000060a
-
-#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3 0x0000060b
-
-#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0 0x0000060c
-
-#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1 0x0000060d
-
-#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2 0x0000060e
-
-#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3 0x0000060f
-
-#define REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0 0x00000610
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0
-static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val)
-{
- return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK;
-}
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4
-static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val)
-{
- return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK;
-}
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8
-static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val)
-{
- return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK;
-}
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12
-static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val)
-{
- return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK;
-}
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16
-static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val)
-{
- return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK;
-}
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20
-static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val)
-{
- return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK;
-}
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24
-static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val)
-{
- return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK;
-}
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28
-static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val)
-{
- return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK;
-}
-
-#define REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1 0x00000611
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0
-static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val)
-{
- return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK;
-}
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4
-static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val)
-{
- return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK;
-}
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8
-static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val)
-{
- return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK;
-}
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12
-static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val)
-{
- return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK;
-}
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16
-static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val)
-{
- return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK;
-}
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20
-static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val)
-{
- return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK;
-}
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24
-static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val)
-{
- return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK;
-}
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000
-#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28
-static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
-{
- return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK;
-}
-
-#define REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0000062f
-
-#define REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00000630
-
-#define REG_A6XX_VSC_PERFCTR_VSC_SEL(i0) (0x00000cd8 + 0x1*(i0))
-
-#define REG_A7XX_VSC_UNKNOWN_0CD8 0x00000cd8
-#define A7XX_VSC_UNKNOWN_0CD8_BINNING 0x00000001
-
-#define REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE 0x0000c800
-
-#define REG_A6XX_HLSQ_DBG_READ_SEL 0x0000d000
-
-#define REG_A6XX_UCHE_ADDR_MODE_CNTL 0x00000e00
-
-#define REG_A6XX_UCHE_MODE_CNTL 0x00000e01
-
-#define REG_A6XX_UCHE_WRITE_RANGE_MAX 0x00000e05
-
-#define REG_A6XX_UCHE_WRITE_THRU_BASE 0x00000e07
-
-#define REG_A6XX_UCHE_TRAP_BASE 0x00000e09
-
-#define REG_A6XX_UCHE_GMEM_RANGE_MIN 0x00000e0b
-
-#define REG_A6XX_UCHE_GMEM_RANGE_MAX 0x00000e0d
-
-#define REG_A6XX_UCHE_CACHE_WAYS 0x00000e17
-
-#define REG_A6XX_UCHE_FILTER_CNTL 0x00000e18
-
-#define REG_A6XX_UCHE_CLIENT_PF 0x00000e19
-#define A6XX_UCHE_CLIENT_PF_PERFSEL__MASK 0x000000ff
-#define A6XX_UCHE_CLIENT_PF_PERFSEL__SHIFT 0
-static inline uint32_t A6XX_UCHE_CLIENT_PF_PERFSEL(uint32_t val)
-{
- return ((val) << A6XX_UCHE_CLIENT_PF_PERFSEL__SHIFT) & A6XX_UCHE_CLIENT_PF_PERFSEL__MASK;
-}
-
-#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL(i0) (0x00000e1c + 0x1*(i0))
-
-#define REG_A6XX_UCHE_GBIF_GX_CONFIG 0x00000e3a
-
-#define REG_A6XX_UCHE_CMDQ_CONFIG 0x00000e3c
-
-#define REG_A6XX_VBIF_VERSION 0x00003000
-
-#define REG_A6XX_VBIF_CLKON 0x00003001
-#define A6XX_VBIF_CLKON_FORCE_ON_TESTBUS 0x00000002
-
-#define REG_A6XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
-
-#define REG_A6XX_VBIF_XIN_HALT_CTRL0 0x00003080
-
-#define REG_A6XX_VBIF_XIN_HALT_CTRL1 0x00003081
-
-#define REG_A6XX_VBIF_TEST_BUS_OUT_CTRL 0x00003084
-
-#define REG_A6XX_VBIF_TEST_BUS1_CTRL0 0x00003085
-
-#define REG_A6XX_VBIF_TEST_BUS1_CTRL1 0x00003086
-#define A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__MASK 0x0000000f
-#define A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__SHIFT 0
-static inline uint32_t A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL(uint32_t val)
-{
- return ((val) << A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__SHIFT) & A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__MASK;
-}
-
-#define REG_A6XX_VBIF_TEST_BUS2_CTRL0 0x00003087
-
-#define REG_A6XX_VBIF_TEST_BUS2_CTRL1 0x00003088
-#define A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__MASK 0x000001ff
-#define A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__SHIFT 0
-static inline uint32_t A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL(uint32_t val)
-{
- return ((val) << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__SHIFT) & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__MASK;
-}
-
-#define REG_A6XX_VBIF_TEST_BUS_OUT 0x0000308c
-
-#define REG_A6XX_VBIF_PERF_CNT_SEL0 0x000030d0
-
-#define REG_A6XX_VBIF_PERF_CNT_SEL1 0x000030d1
-
-#define REG_A6XX_VBIF_PERF_CNT_SEL2 0x000030d2
-
-#define REG_A6XX_VBIF_PERF_CNT_SEL3 0x000030d3
-
-#define REG_A6XX_VBIF_PERF_CNT_LOW0 0x000030d8
-
-#define REG_A6XX_VBIF_PERF_CNT_LOW1 0x000030d9
-
-#define REG_A6XX_VBIF_PERF_CNT_LOW2 0x000030da
-
-#define REG_A6XX_VBIF_PERF_CNT_LOW3 0x000030db
-
-#define REG_A6XX_VBIF_PERF_CNT_HIGH0 0x000030e0
-
-#define REG_A6XX_VBIF_PERF_CNT_HIGH1 0x000030e1
-
-#define REG_A6XX_VBIF_PERF_CNT_HIGH2 0x000030e2
-
-#define REG_A6XX_VBIF_PERF_CNT_HIGH3 0x000030e3
-
-#define REG_A6XX_VBIF_PERF_PWR_CNT_EN0 0x00003100
-
-#define REG_A6XX_VBIF_PERF_PWR_CNT_EN1 0x00003101
-
-#define REG_A6XX_VBIF_PERF_PWR_CNT_EN2 0x00003102
-
-#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW0 0x00003110
-
-#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW1 0x00003111
-
-#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW2 0x00003112
-
-#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH0 0x00003118
-
-#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH1 0x00003119
-
-#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a
-
-#define REG_A6XX_GBIF_SCACHE_CNTL0 0x00003c01
-
-#define REG_A6XX_GBIF_SCACHE_CNTL1 0x00003c02
-
-#define REG_A6XX_GBIF_QSB_SIDE0 0x00003c03
-
-#define REG_A6XX_GBIF_QSB_SIDE1 0x00003c04
-
-#define REG_A6XX_GBIF_QSB_SIDE2 0x00003c05
-
-#define REG_A6XX_GBIF_QSB_SIDE3 0x00003c06
-
-#define REG_A6XX_GBIF_HALT 0x00003c45
-
-#define REG_A6XX_GBIF_HALT_ACK 0x00003c46
-
-#define REG_A6XX_GBIF_PERF_PWR_CNT_EN 0x00003cc0
-
-#define REG_A6XX_GBIF_PERF_PWR_CNT_CLR 0x00003cc1
-
-#define REG_A6XX_GBIF_PERF_CNT_SEL 0x00003cc2
-
-#define REG_A6XX_GBIF_PERF_PWR_CNT_SEL 0x00003cc3
-
-#define REG_A6XX_GBIF_PERF_CNT_LOW0 0x00003cc4
-
-#define REG_A6XX_GBIF_PERF_CNT_LOW1 0x00003cc5
-
-#define REG_A6XX_GBIF_PERF_CNT_LOW2 0x00003cc6
-
-#define REG_A6XX_GBIF_PERF_CNT_LOW3 0x00003cc7
-
-#define REG_A6XX_GBIF_PERF_CNT_HIGH0 0x00003cc8
-
-#define REG_A6XX_GBIF_PERF_CNT_HIGH1 0x00003cc9
-
-#define REG_A6XX_GBIF_PERF_CNT_HIGH2 0x00003cca
-
-#define REG_A6XX_GBIF_PERF_CNT_HIGH3 0x00003ccb
-
-#define REG_A6XX_GBIF_PWR_CNT_LOW0 0x00003ccc
-
-#define REG_A6XX_GBIF_PWR_CNT_LOW1 0x00003ccd
-
-#define REG_A6XX_GBIF_PWR_CNT_LOW2 0x00003cce
-
-#define REG_A6XX_GBIF_PWR_CNT_HIGH0 0x00003ccf
-
-#define REG_A6XX_GBIF_PWR_CNT_HIGH1 0x00003cd0
-
-#define REG_A6XX_GBIF_PWR_CNT_HIGH2 0x00003cd1
-
-#define REG_A6XX_VSC_DBG_ECO_CNTL 0x00000c00
-
-#define REG_A6XX_VSC_BIN_SIZE 0x00000c02
-#define A6XX_VSC_BIN_SIZE_WIDTH__MASK 0x000000ff
-#define A6XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
-static inline uint32_t A6XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A6XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A6XX_VSC_BIN_SIZE_WIDTH__MASK;
-}
-#define A6XX_VSC_BIN_SIZE_HEIGHT__MASK 0x0001ff00
-#define A6XX_VSC_BIN_SIZE_HEIGHT__SHIFT 8
-static inline uint32_t A6XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
-{
- assert(!(val & 0xf));
- return (((val >> 4)) << A6XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A6XX_VSC_BIN_SIZE_HEIGHT__MASK;
-}
-
-#define REG_A6XX_VSC_DRAW_STRM_SIZE_ADDRESS 0x00000c03
-
-#define REG_A6XX_VSC_BIN_COUNT 0x00000c06
-#define A6XX_VSC_BIN_COUNT_NX__MASK 0x000007fe
-#define A6XX_VSC_BIN_COUNT_NX__SHIFT 1
-static inline uint32_t A6XX_VSC_BIN_COUNT_NX(uint32_t val)
-{
- return ((val) << A6XX_VSC_BIN_COUNT_NX__SHIFT) & A6XX_VSC_BIN_COUNT_NX__MASK;
-}
-#define A6XX_VSC_BIN_COUNT_NY__MASK 0x001ff800
-#define A6XX_VSC_BIN_COUNT_NY__SHIFT 11
-static inline uint32_t A6XX_VSC_BIN_COUNT_NY(uint32_t val)
-{
- return ((val) << A6XX_VSC_BIN_COUNT_NY__SHIFT) & A6XX_VSC_BIN_COUNT_NY__MASK;
-}
-
-#define REG_A6XX_VSC_PIPE_CONFIG(i0) (0x00000c10 + 0x1*(i0))
-
-static inline uint32_t REG_A6XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
-#define A6XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff
-#define A6XX_VSC_PIPE_CONFIG_REG_X__SHIFT 0
-static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_X(uint32_t val)
-{
- return ((val) << A6XX_VSC_PIPE_CONFIG_REG_X__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_X__MASK;
-}
-#define A6XX_VSC_PIPE_CONFIG_REG_Y__MASK 0x000ffc00
-#define A6XX_VSC_PIPE_CONFIG_REG_Y__SHIFT 10
-static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_Y(uint32_t val)
-{
- return ((val) << A6XX_VSC_PIPE_CONFIG_REG_Y__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_Y__MASK;
-}
-#define A6XX_VSC_PIPE_CONFIG_REG_W__MASK 0x03f00000
-#define A6XX_VSC_PIPE_CONFIG_REG_W__SHIFT 20
-static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_W(uint32_t val)
-{
- return ((val) << A6XX_VSC_PIPE_CONFIG_REG_W__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_W__MASK;
-}
-#define A6XX_VSC_PIPE_CONFIG_REG_H__MASK 0xfc000000
-#define A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT 26
-static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
-{
- return ((val) << A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_H__MASK;
-}
-
-#define REG_A6XX_VSC_PRIM_STRM_ADDRESS 0x00000c30
-
-#define REG_A6XX_VSC_PRIM_STRM_PITCH 0x00000c32
-
-#define REG_A6XX_VSC_PRIM_STRM_LIMIT 0x00000c33
-
-#define REG_A6XX_VSC_DRAW_STRM_ADDRESS 0x00000c34
-
-#define REG_A6XX_VSC_DRAW_STRM_PITCH 0x00000c36
-
-#define REG_A6XX_VSC_DRAW_STRM_LIMIT 0x00000c37
-
-#define REG_A6XX_VSC_STATE(i0) (0x00000c38 + 0x1*(i0))
-
-static inline uint32_t REG_A6XX_VSC_STATE_REG(uint32_t i0) { return 0x00000c38 + 0x1*i0; }
-
-#define REG_A6XX_VSC_PRIM_STRM_SIZE(i0) (0x00000c58 + 0x1*(i0))
-
-static inline uint32_t REG_A6XX_VSC_PRIM_STRM_SIZE_REG(uint32_t i0) { return 0x00000c58 + 0x1*i0; }
-
-#define REG_A6XX_VSC_DRAW_STRM_SIZE(i0) (0x00000c78 + 0x1*(i0))
-
-static inline uint32_t REG_A6XX_VSC_DRAW_STRM_SIZE_REG(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
-
-#define REG_A7XX_UCHE_UNKNOWN_0E10 0x00000e10
-
-#define REG_A7XX_UCHE_UNKNOWN_0E11 0x00000e11
-
-#define REG_A6XX_UCHE_UNKNOWN_0E12 0x00000e12
-
-#define REG_A6XX_GRAS_CL_CNTL 0x00008000
-#define A6XX_GRAS_CL_CNTL_CLIP_DISABLE 0x00000001
-#define A6XX_GRAS_CL_CNTL_ZNEAR_CLIP_DISABLE 0x00000002
-#define A6XX_GRAS_CL_CNTL_ZFAR_CLIP_DISABLE 0x00000004
-#define A6XX_GRAS_CL_CNTL_Z_CLAMP_ENABLE 0x00000020
-#define A6XX_GRAS_CL_CNTL_ZERO_GB_SCALE_Z 0x00000040
-#define A6XX_GRAS_CL_CNTL_VP_CLIP_CODE_IGNORE 0x00000080
-#define A6XX_GRAS_CL_CNTL_VP_XFORM_DISABLE 0x00000100
-#define A6XX_GRAS_CL_CNTL_PERSP_DIVISION_DISABLE 0x00000200
-
-#define REG_A6XX_GRAS_VS_CL_CNTL 0x00008001
-#define A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK 0x000000ff
-#define A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT 0
-static inline uint32_t A6XX_GRAS_VS_CL_CNTL_CLIP_MASK(uint32_t val)
-{
- return ((val) << A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT) & A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK;
-}
-#define A6XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK 0x0000ff00
-#define A6XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT 8
-static inline uint32_t A6XX_GRAS_VS_CL_CNTL_CULL_MASK(uint32_t val)
-{
- return ((val) << A6XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT) & A6XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK;
-}
-
-#define REG_A6XX_GRAS_DS_CL_CNTL 0x00008002
-#define A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__MASK 0x000000ff
-#define A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__SHIFT 0
-static inline uint32_t A6XX_GRAS_DS_CL_CNTL_CLIP_MASK(uint32_t val)
-{
- return ((val) << A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__SHIFT) & A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__MASK;
-}
-#define A6XX_GRAS_DS_CL_CNTL_CULL_MASK__MASK 0x0000ff00
-#define A6XX_GRAS_DS_CL_CNTL_CULL_MASK__SHIFT 8
-static inline uint32_t A6XX_GRAS_DS_CL_CNTL_CULL_MASK(uint32_t val)
-{
- return ((val) << A6XX_GRAS_DS_CL_CNTL_CULL_MASK__SHIFT) & A6XX_GRAS_DS_CL_CNTL_CULL_MASK__MASK;
-}
-
-#define REG_A6XX_GRAS_GS_CL_CNTL 0x00008003
-#define A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__MASK 0x000000ff
-#define A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__SHIFT 0
-static inline uint32_t A6XX_GRAS_GS_CL_CNTL_CLIP_MASK(uint32_t val)
-{
- return ((val) << A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__SHIFT) & A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__MASK;
-}
-#define A6XX_GRAS_GS_CL_CNTL_CULL_MASK__MASK 0x0000ff00
-#define A6XX_GRAS_GS_CL_CNTL_CULL_MASK__SHIFT 8
-static inline uint32_t A6XX_GRAS_GS_CL_CNTL_CULL_MASK(uint32_t val)
-{
- return ((val) << A6XX_GRAS_GS_CL_CNTL_CULL_MASK__SHIFT) & A6XX_GRAS_GS_CL_CNTL_CULL_MASK__MASK;
-}
-
-#define REG_A6XX_GRAS_MAX_LAYER_INDEX 0x00008004
-
-#define REG_A6XX_GRAS_CNTL 0x00008005
-#define A6XX_GRAS_CNTL_IJ_PERSP_PIXEL 0x00000001
-#define A6XX_GRAS_CNTL_IJ_PERSP_CENTROID 0x00000002
-#define A6XX_GRAS_CNTL_IJ_PERSP_SAMPLE 0x00000004
-#define A6XX_GRAS_CNTL_IJ_LINEAR_PIXEL 0x00000008
-#define A6XX_GRAS_CNTL_IJ_LINEAR_CENTROID 0x00000010
-#define A6XX_GRAS_CNTL_IJ_LINEAR_SAMPLE 0x00000020
-#define A6XX_GRAS_CNTL_COORD_MASK__MASK 0x000003c0
-#define A6XX_GRAS_CNTL_COORD_MASK__SHIFT 6
-static inline uint32_t A6XX_GRAS_CNTL_COORD_MASK(uint32_t val)
-{
- return ((val) << A6XX_GRAS_CNTL_COORD_MASK__SHIFT) & A6XX_GRAS_CNTL_COORD_MASK__MASK;
-}
-#define A6XX_GRAS_CNTL_UNK10 0x00000400
-#define A6XX_GRAS_CNTL_UNK11 0x00000800
-
-#define REG_A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x00008006
-#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000001ff
-#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT 0
-static inline uint32_t A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val)
-{
- return ((val) << A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK;
-}
-#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK 0x0007fc00
-#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT 10
-static inline uint32_t A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val)
-{
- return ((val) << A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK;
-}
-
-#define REG_A7XX_GRAS_UNKNOWN_8007 0x00008007
-
-#define REG_A7XX_GRAS_UNKNOWN_8008 0x00008008
-
-#define REG_A7XX_GRAS_UNKNOWN_8009 0x00008009
-
-#define REG_A7XX_GRAS_UNKNOWN_800A 0x0000800a
-
-#define REG_A7XX_GRAS_UNKNOWN_800B 0x0000800b
-
-#define REG_A7XX_GRAS_UNKNOWN_800C 0x0000800c
-
-#define REG_A6XX_GRAS_CL_VPORT(i0) (0x00008010 + 0x6*(i0))
-
-static inline uint32_t REG_A6XX_GRAS_CL_VPORT_XOFFSET(uint32_t i0) { return 0x00008010 + 0x6*i0; }
-#define A6XX_GRAS_CL_VPORT_XOFFSET__MASK 0xffffffff
-#define A6XX_GRAS_CL_VPORT_XOFFSET__SHIFT 0
-static inline uint32_t A6XX_GRAS_CL_VPORT_XOFFSET(float val)
-{
- return ((fui(val)) << A6XX_GRAS_CL_VPORT_XOFFSET__SHIFT) & A6XX_GRAS_CL_VPORT_XOFFSET__MASK;
-}
-
-static inline uint32_t REG_A6XX_GRAS_CL_VPORT_XSCALE(uint32_t i0) { return 0x00008011 + 0x6*i0; }
-#define A6XX_GRAS_CL_VPORT_XSCALE__MASK 0xffffffff
-#define A6XX_GRAS_CL_VPORT_XSCALE__SHIFT 0
-static inline uint32_t A6XX_GRAS_CL_VPORT_XSCALE(float val)
-{
- return ((fui(val)) << A6XX_GRAS_CL_VPORT_XSCALE__SHIFT) & A6XX_GRAS_CL_VPORT_XSCALE__MASK;
-}
-
-static inline uint32_t REG_A6XX_GRAS_CL_VPORT_YOFFSET(uint32_t i0) { return 0x00008012 + 0x6*i0; }
-#define A6XX_GRAS_CL_VPORT_YOFFSET__MASK 0xffffffff
-#define A6XX_GRAS_CL_VPORT_YOFFSET__SHIFT 0
-static inline uint32_t A6XX_GRAS_CL_VPORT_YOFFSET(float val)
-{
- return ((fui(val)) << A6XX_GRAS_CL_VPORT_YOFFSET__SHIFT) & A6XX_GRAS_CL_VPORT_YOFFSET__MASK;
-}
-
-static inline uint32_t REG_A6XX_GRAS_CL_VPORT_YSCALE(uint32_t i0) { return 0x00008013 + 0x6*i0; }
-#define A6XX_GRAS_CL_VPORT_YSCALE__MASK 0xffffffff
-#define A6XX_GRAS_CL_VPORT_YSCALE__SHIFT 0
-static inline uint32_t A6XX_GRAS_CL_VPORT_YSCALE(float val)
-{
- return ((fui(val)) << A6XX_GRAS_CL_VPORT_YSCALE__SHIFT) & A6XX_GRAS_CL_VPORT_YSCALE__MASK;
-}
-
-static inline uint32_t REG_A6XX_GRAS_CL_VPORT_ZOFFSET(uint32_t i0) { return 0x00008014 + 0x6*i0; }
-#define A6XX_GRAS_CL_VPORT_ZOFFSET__MASK 0xffffffff
-#define A6XX_GRAS_CL_VPORT_ZOFFSET__SHIFT 0
-static inline uint32_t A6XX_GRAS_CL_VPORT_ZOFFSET(float val)
-{
- return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZOFFSET__SHIFT) & A6XX_GRAS_CL_VPORT_ZOFFSET__MASK;
-}
-
-static inline uint32_t REG_A6XX_GRAS_CL_VPORT_ZSCALE(uint32_t i0) { return 0x00008015 + 0x6*i0; }
-#define A6XX_GRAS_CL_VPORT_ZSCALE__MASK 0xffffffff
-#define A6XX_GRAS_CL_VPORT_ZSCALE__SHIFT 0
-static inline uint32_t A6XX_GRAS_CL_VPORT_ZSCALE(float val)
-{
- return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZSCALE__SHIFT) & A6XX_GRAS_CL_VPORT_ZSCALE__MASK;
-}
-
-#define REG_A6XX_GRAS_CL_Z_CLAMP(i0) (0x00008070 + 0x2*(i0))
-
-static inline uint32_t REG_A6XX_GRAS_CL_Z_CLAMP_MIN(uint32_t i0) { return 0x00008070 + 0x2*i0; }
-#define A6XX_GRAS_CL_Z_CLAMP_MIN__MASK 0xffffffff
-#define A6XX_GRAS_CL_Z_CLAMP_MIN__SHIFT 0
-static inline uint32_t A6XX_GRAS_CL_Z_CLAMP_MIN(float val)
-{
- return ((fui(val)) << A6XX_GRAS_CL_Z_CLAMP_MIN__SHIFT) & A6XX_GRAS_CL_Z_CLAMP_MIN__MASK;
-}
-
-static inline uint32_t REG_A6XX_GRAS_CL_Z_CLAMP_MAX(uint32_t i0) { return 0x00008071 + 0x2*i0; }
-#define A6XX_GRAS_CL_Z_CLAMP_MAX__MASK 0xffffffff
-#define A6XX_GRAS_CL_Z_CLAMP_MAX__SHIFT 0
-static inline uint32_t A6XX_GRAS_CL_Z_CLAMP_MAX(float val)
-{
- return ((fui(val)) << A6XX_GRAS_CL_Z_CLAMP_MAX__SHIFT) & A6XX_GRAS_CL_Z_CLAMP_MAX__MASK;
-}
-
-#define REG_A6XX_GRAS_SU_CNTL 0x00008090
-#define A6XX_GRAS_SU_CNTL_CULL_FRONT 0x00000001
-#define A6XX_GRAS_SU_CNTL_CULL_BACK 0x00000002
-#define A6XX_GRAS_SU_CNTL_FRONT_CW 0x00000004
-#define A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK 0x000007f8
-#define A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT 3
-static inline uint32_t A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(float val)
-{
- return ((((int32_t)(val * 4.0))) << A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT) & A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
-}
-#define A6XX_GRAS_SU_CNTL_POLY_OFFSET 0x00000800
-#define A6XX_GRAS_SU_CNTL_UNK12 0x00001000
-#define A6XX_GRAS_SU_CNTL_LINE_MODE__MASK 0x00002000
-#define A6XX_GRAS_SU_CNTL_LINE_MODE__SHIFT 13
-static inline uint32_t A6XX_GRAS_SU_CNTL_LINE_MODE(enum a5xx_line_mode val)
-{
- return ((val) << A6XX_GRAS_SU_CNTL_LINE_MODE__SHIFT) & A6XX_GRAS_SU_CNTL_LINE_MODE__MASK;
-}
-#define A6XX_GRAS_SU_CNTL_UNK15__MASK 0x00018000
-#define A6XX_GRAS_SU_CNTL_UNK15__SHIFT 15
-static inline uint32_t A6XX_GRAS_SU_CNTL_UNK15(uint32_t val)
-{
- return ((val) << A6XX_GRAS_SU_CNTL_UNK15__SHIFT) & A6XX_GRAS_SU_CNTL_UNK15__MASK;
-}
-#define A6XX_GRAS_SU_CNTL_MULTIVIEW_ENABLE 0x00020000
-#define A6XX_GRAS_SU_CNTL_RENDERTARGETINDEXINCR 0x00040000
-#define A6XX_GRAS_SU_CNTL_VIEWPORTINDEXINCR 0x00080000
-#define A6XX_GRAS_SU_CNTL_UNK20__MASK 0x00700000
-#define A6XX_GRAS_SU_CNTL_UNK20__SHIFT 20
-static inline uint32_t A6XX_GRAS_SU_CNTL_UNK20(uint32_t val)
-{
- return ((val) << A6XX_GRAS_SU_CNTL_UNK20__SHIFT) & A6XX_GRAS_SU_CNTL_UNK20__MASK;
-}
-
-#define REG_A6XX_GRAS_SU_POINT_MINMAX 0x00008091
-#define A6XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
-#define A6XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
-static inline uint32_t A6XX_GRAS_SU_POINT_MINMAX_MIN(float val)
-{
- return ((((uint32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A6XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
-}
-#define A6XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
-#define A6XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
-static inline uint32_t A6XX_GRAS_SU_POINT_MINMAX_MAX(float val)
-{
- return ((((uint32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A6XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
-}
-
-#define REG_A6XX_GRAS_SU_POINT_SIZE 0x00008092
-#define A6XX_GRAS_SU_POINT_SIZE__MASK 0x0000ffff
-#define A6XX_GRAS_SU_POINT_SIZE__SHIFT 0
-static inline uint32_t A6XX_GRAS_SU_POINT_SIZE(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_SIZE__SHIFT) & A6XX_GRAS_SU_POINT_SIZE__MASK;
-}
-
-#define REG_A6XX_GRAS_SU_DEPTH_PLANE_CNTL 0x00008094
-#define A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__MASK 0x00000003
-#define A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__SHIFT 0
-static inline uint32_t A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE(enum a6xx_ztest_mode val)
-{
- return ((val) << A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__SHIFT) & A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__MASK;
-}
-
-#define REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE 0x00008095
-#define A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
-#define A6XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
-static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
-{
- return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
-}
-
-#define REG_A6XX_GRAS_SU_POLY_OFFSET_OFFSET 0x00008096
-#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
-#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
-static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
-{
- return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
-}
-
-#define REG_A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP 0x00008097
-#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK 0xffffffff
-#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT 0
-static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(float val)
-{
- return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK;
-}
-
-#define REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO 0x00008098
-#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
-#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
-static inline uint32_t A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_depth_format val)
-{
- return ((val) << A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
-}
-#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3 0x00000008
-
-#define REG_A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL 0x00008099
-#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_CONSERVATIVERASEN 0x00000001
-#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT__MASK 0x00000006
-#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT__SHIFT 1
-static inline uint32_t A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT(uint32_t val)
-{
- return ((val) << A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT__SHIFT) & A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT__MASK;
-}
-#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_INNERCONSERVATIVERASEN 0x00000008
-#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4__MASK 0x00000030
-#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4__SHIFT 4
-static inline uint32_t A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4(uint32_t val)
-{
- return ((val) << A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4__SHIFT) & A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4__MASK;
-}
-
-#define REG_A6XX_GRAS_SU_PATH_RENDERING_CNTL 0x0000809a
-#define A6XX_GRAS_SU_PATH_RENDERING_CNTL_UNK0 0x00000001
-#define A6XX_GRAS_SU_PATH_RENDERING_CNTL_LINELENGTHEN 0x00000002
-
-#define REG_A6XX_GRAS_VS_LAYER_CNTL 0x0000809b
-#define A6XX_GRAS_VS_LAYER_CNTL_WRITES_LAYER 0x00000001
-#define A6XX_GRAS_VS_LAYER_CNTL_WRITES_VIEW 0x00000002
-
-#define REG_A6XX_GRAS_GS_LAYER_CNTL 0x0000809c
-#define A6XX_GRAS_GS_LAYER_CNTL_WRITES_LAYER 0x00000001
-#define A6XX_GRAS_GS_LAYER_CNTL_WRITES_VIEW 0x00000002
-
-#define REG_A6XX_GRAS_DS_LAYER_CNTL 0x0000809d
-#define A6XX_GRAS_DS_LAYER_CNTL_WRITES_LAYER 0x00000001
-#define A6XX_GRAS_DS_LAYER_CNTL_WRITES_VIEW 0x00000002
-
-#define REG_A6XX_GRAS_SC_CNTL 0x000080a0
-#define A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE__MASK 0x00000007
-#define A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE__SHIFT 0
-static inline uint32_t A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE(uint32_t val)
-{
- return ((val) << A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE__SHIFT) & A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE__MASK;
-}
-#define A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE__MASK 0x00000018
-#define A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE__SHIFT 3
-static inline uint32_t A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE(enum a6xx_single_prim_mode val)
-{
- return ((val) << A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE__SHIFT) & A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE__MASK;
-}
-#define A6XX_GRAS_SC_CNTL_RASTER_MODE__MASK 0x00000020
-#define A6XX_GRAS_SC_CNTL_RASTER_MODE__SHIFT 5
-static inline uint32_t A6XX_GRAS_SC_CNTL_RASTER_MODE(enum a6xx_raster_mode val)
-{
- return ((val) << A6XX_GRAS_SC_CNTL_RASTER_MODE__SHIFT) & A6XX_GRAS_SC_CNTL_RASTER_MODE__MASK;
-}
-#define A6XX_GRAS_SC_CNTL_RASTER_DIRECTION__MASK 0x000000c0
-#define A6XX_GRAS_SC_CNTL_RASTER_DIRECTION__SHIFT 6
-static inline uint32_t A6XX_GRAS_SC_CNTL_RASTER_DIRECTION(enum a6xx_raster_direction val)
-{
- return ((val) << A6XX_GRAS_SC_CNTL_RASTER_DIRECTION__SHIFT) & A6XX_GRAS_SC_CNTL_RASTER_DIRECTION__MASK;
-}
-#define A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION__MASK 0x00000100
-#define A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION__SHIFT 8
-static inline uint32_t A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION(enum a6xx_sequenced_thread_dist val)
-{
- return ((val) << A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION__SHIFT) & A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION__MASK;
-}
-#define A6XX_GRAS_SC_CNTL_UNK9 0x00000200
-#define A6XX_GRAS_SC_CNTL_ROTATION__MASK 0x00000c00
-#define A6XX_GRAS_SC_CNTL_ROTATION__SHIFT 10
-static inline uint32_t A6XX_GRAS_SC_CNTL_ROTATION(uint32_t val)
-{
- return ((val) << A6XX_GRAS_SC_CNTL_ROTATION__SHIFT) & A6XX_GRAS_SC_CNTL_ROTATION__MASK;
-}
-#define A6XX_GRAS_SC_CNTL_EARLYVIZOUTEN 0x00001000
-
-#define REG_A6XX_GRAS_BIN_CONTROL 0x000080a1
-#define A6XX_GRAS_BIN_CONTROL_BINW__MASK 0x0000003f
-#define A6XX_GRAS_BIN_CONTROL_BINW__SHIFT 0
-static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINW(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A6XX_GRAS_BIN_CONTROL_BINW__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINW__MASK;
-}
-#define A6XX_GRAS_BIN_CONTROL_BINH__MASK 0x00007f00
-#define A6XX_GRAS_BIN_CONTROL_BINH__SHIFT 8
-static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINH(uint32_t val)
-{
- assert(!(val & 0xf));
- return (((val >> 4)) << A6XX_GRAS_BIN_CONTROL_BINH__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINH__MASK;
-}
-#define A6XX_GRAS_BIN_CONTROL_RENDER_MODE__MASK 0x001c0000
-#define A6XX_GRAS_BIN_CONTROL_RENDER_MODE__SHIFT 18
-static inline uint32_t A6XX_GRAS_BIN_CONTROL_RENDER_MODE(enum a6xx_render_mode val)
-{
- return ((val) << A6XX_GRAS_BIN_CONTROL_RENDER_MODE__SHIFT) & A6XX_GRAS_BIN_CONTROL_RENDER_MODE__MASK;
-}
-#define A6XX_GRAS_BIN_CONTROL_FORCE_LRZ_WRITE_DIS 0x00200000
-#define A6XX_GRAS_BIN_CONTROL_BUFFERS_LOCATION__MASK 0x00c00000
-#define A6XX_GRAS_BIN_CONTROL_BUFFERS_LOCATION__SHIFT 22
-static inline uint32_t A6XX_GRAS_BIN_CONTROL_BUFFERS_LOCATION(enum a6xx_buffers_location val)
-{
- return ((val) << A6XX_GRAS_BIN_CONTROL_BUFFERS_LOCATION__SHIFT) & A6XX_GRAS_BIN_CONTROL_BUFFERS_LOCATION__MASK;
-}
-#define A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK 0x07000000
-#define A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT 24
-static inline uint32_t A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK(uint32_t val)
-{
- return ((val) << A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT) & A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK;
-}
-#define A6XX_GRAS_BIN_CONTROL_UNK27 0x08000000
-
-#define REG_A6XX_GRAS_RAS_MSAA_CNTL 0x000080a2
-#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
-#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
-static inline uint32_t A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
-{
- return ((val) << A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK;
-}
-#define A6XX_GRAS_RAS_MSAA_CNTL_UNK2 0x00000004
-#define A6XX_GRAS_RAS_MSAA_CNTL_UNK3 0x00000008
-
-#define REG_A6XX_GRAS_DEST_MSAA_CNTL 0x000080a3
-#define A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
-#define A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
-static inline uint32_t A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
-{
- return ((val) << A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__MASK;
-}
-#define A6XX_GRAS_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
-
-#define REG_A6XX_GRAS_SAMPLE_CONFIG 0x000080a4
-#define A6XX_GRAS_SAMPLE_CONFIG_UNK0 0x00000001
-#define A6XX_GRAS_SAMPLE_CONFIG_LOCATION_ENABLE 0x00000002
-
-#define REG_A6XX_GRAS_SAMPLE_LOCATION_0 0x000080a5
-#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK 0x0000000f
-#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT 0
-static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK;
-}
-#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK 0x000000f0
-#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT 4
-static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK;
-}
-#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK 0x00000f00
-#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT 8
-static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK;
-}
-#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK 0x0000f000
-#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT 12
-static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK;
-}
-#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK 0x000f0000
-#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT 16
-static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK;
-}
-#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK 0x00f00000
-#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT 20
-static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK;
-}
-#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK 0x0f000000
-#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT 24
-static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK;
-}
-#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK 0xf0000000
-#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT 28
-static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK;
-}
-
-#define REG_A6XX_GRAS_SAMPLE_LOCATION_1 0x000080a6
-#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK 0x0000000f
-#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT 0
-static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK;
-}
-#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK 0x000000f0
-#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT 4
-static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK;
-}
-#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK 0x00000f00
-#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT 8
-static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK;
-}
-#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK 0x0000f000
-#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT 12
-static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK;
-}
-#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK 0x000f0000
-#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT 16
-static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK;
-}
-#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK 0x00f00000
-#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT 20
-static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK;
-}
-#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK 0x0f000000
-#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT 24
-static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK;
-}
-#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK 0xf0000000
-#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT 28
-static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK;
-}
-
-#define REG_A7XX_GRAS_UNKNOWN_80A7 0x000080a7
-
-#define REG_A6XX_GRAS_UNKNOWN_80AF 0x000080af
-
-#define REG_A6XX_GRAS_SC_SCREEN_SCISSOR(i0) (0x000080b0 + 0x2*(i0))
-
-static inline uint32_t REG_A6XX_GRAS_SC_SCREEN_SCISSOR_TL(uint32_t i0) { return 0x000080b0 + 0x2*i0; }
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x0000ffff
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
-static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
-{
- return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK;
-}
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0xffff0000
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
-static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
-{
- return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK;
-}
-
-static inline uint32_t REG_A6XX_GRAS_SC_SCREEN_SCISSOR_BR(uint32_t i0) { return 0x000080b1 + 0x2*i0; }
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x0000ffff
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
-static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
-{
- return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK;
-}
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0xffff0000
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
-static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
-{
- return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK;
-}
-
-#define REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR(i0) (0x000080d0 + 0x2*(i0))
-
-static inline uint32_t REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL(uint32_t i0) { return 0x000080d0 + 0x2*i0; }
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__MASK 0x0000ffff
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__SHIFT 0
-static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X(uint32_t val)
-{
- return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__MASK;
-}
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__MASK 0xffff0000
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__SHIFT 16
-static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y(uint32_t val)
-{
- return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__MASK;
-}
-
-static inline uint32_t REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR(uint32_t i0) { return 0x000080d1 + 0x2*i0; }
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__MASK 0x0000ffff
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__SHIFT 0
-static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X(uint32_t val)
-{
- return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__MASK;
-}
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__MASK 0xffff0000
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__SHIFT 16
-static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y(uint32_t val)
-{
- return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__MASK;
-}
-
-#define REG_A6XX_GRAS_SC_WINDOW_SCISSOR_TL 0x000080f0
-#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00003fff
-#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
-static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
-{
- return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
-}
-#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x3fff0000
-#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
-static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
-{
- return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
-}
-
-#define REG_A6XX_GRAS_SC_WINDOW_SCISSOR_BR 0x000080f1
-#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00003fff
-#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
-static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
-{
- return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
-}
-#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x3fff0000
-#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
-static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
-{
- return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
-}
-
-#define REG_A7XX_GRAS_UNKNOWN_80F4 0x000080f4
-
-#define REG_A7XX_GRAS_UNKNOWN_80F5 0x000080f5
-
-#define REG_A7XX_GRAS_UNKNOWN_80F6 0x000080f6
-
-#define REG_A7XX_GRAS_UNKNOWN_80F8 0x000080f8
-
-#define REG_A7XX_GRAS_UNKNOWN_80F9 0x000080f9
-
-#define REG_A7XX_GRAS_UNKNOWN_80FA 0x000080fa
-
-#define REG_A6XX_GRAS_LRZ_CNTL 0x00008100
-#define A6XX_GRAS_LRZ_CNTL_ENABLE 0x00000001
-#define A6XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002
-#define A6XX_GRAS_LRZ_CNTL_GREATER 0x00000004
-#define A6XX_GRAS_LRZ_CNTL_FC_ENABLE 0x00000008
-#define A6XX_GRAS_LRZ_CNTL_Z_TEST_ENABLE 0x00000010
-#define A6XX_GRAS_LRZ_CNTL_Z_BOUNDS_ENABLE 0x00000020
-#define A6XX_GRAS_LRZ_CNTL_DIR__MASK 0x000000c0
-#define A6XX_GRAS_LRZ_CNTL_DIR__SHIFT 6
-static inline uint32_t A6XX_GRAS_LRZ_CNTL_DIR(enum a6xx_lrz_dir_status val)
-{
- return ((val) << A6XX_GRAS_LRZ_CNTL_DIR__SHIFT) & A6XX_GRAS_LRZ_CNTL_DIR__MASK;
-}
-#define A6XX_GRAS_LRZ_CNTL_DIR_WRITE 0x00000100
-#define A6XX_GRAS_LRZ_CNTL_DISABLE_ON_WRONG_DIR 0x00000200
-#define A6XX_GRAS_LRZ_CNTL_Z_FUNC__MASK 0x00003800
-#define A6XX_GRAS_LRZ_CNTL_Z_FUNC__SHIFT 11
-static inline uint32_t A6XX_GRAS_LRZ_CNTL_Z_FUNC(enum adreno_compare_func val)
-{
- return ((val) << A6XX_GRAS_LRZ_CNTL_Z_FUNC__SHIFT) & A6XX_GRAS_LRZ_CNTL_Z_FUNC__MASK;
-}
-
-#define REG_A6XX_GRAS_LRZ_PS_INPUT_CNTL 0x00008101
-#define A6XX_GRAS_LRZ_PS_INPUT_CNTL_SAMPLEID 0x00000001
-#define A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE__MASK 0x00000006
-#define A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE__SHIFT 1
-static inline uint32_t A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE(enum a6xx_fragcoord_sample_mode val)
-{
- return ((val) << A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE__SHIFT) & A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE__MASK;
-}
-
-#define REG_A6XX_GRAS_LRZ_MRT_BUF_INFO_0 0x00008102
-#define A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT__MASK 0x000000ff
-#define A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT__SHIFT 0
-static inline uint32_t A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT(enum a6xx_format val)
-{
- return ((val) << A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT__SHIFT) & A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT__MASK;
-}
-
-#define REG_A6XX_GRAS_LRZ_BUFFER_BASE 0x00008103
-
-#define REG_A6XX_GRAS_LRZ_BUFFER_PITCH 0x00008105
-#define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK 0x000000ff
-#define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT 0
-static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK;
-}
-#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK 0x1ffffc00
-#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT 10
-static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
-{
- assert(!(val & 0xf));
- return (((val >> 4)) << A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK;
-}
-
-#define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE 0x00008106
-
-#define REG_A6XX_GRAS_SAMPLE_CNTL 0x00008109
-#define A6XX_GRAS_SAMPLE_CNTL_PER_SAMP_MODE 0x00000001
-
-#define REG_A6XX_GRAS_LRZ_DEPTH_VIEW 0x0000810a
-#define A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_LAYER__MASK 0x000007ff
-#define A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_LAYER__SHIFT 0
-static inline uint32_t A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_LAYER(uint32_t val)
-{
- return ((val) << A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_LAYER__SHIFT) & A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_LAYER__MASK;
-}
-#define A6XX_GRAS_LRZ_DEPTH_VIEW_LAYER_COUNT__MASK 0x07ff0000
-#define A6XX_GRAS_LRZ_DEPTH_VIEW_LAYER_COUNT__SHIFT 16
-static inline uint32_t A6XX_GRAS_LRZ_DEPTH_VIEW_LAYER_COUNT(uint32_t val)
-{
- return ((val) << A6XX_GRAS_LRZ_DEPTH_VIEW_LAYER_COUNT__SHIFT) & A6XX_GRAS_LRZ_DEPTH_VIEW_LAYER_COUNT__MASK;
-}
-#define A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_MIP_LEVEL__MASK 0xf0000000
-#define A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_MIP_LEVEL__SHIFT 28
-static inline uint32_t A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_MIP_LEVEL(uint32_t val)
-{
- return ((val) << A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_MIP_LEVEL__SHIFT) & A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_MIP_LEVEL__MASK;
-}
-
-#define REG_A7XX_GRAS_UNKNOWN_810B 0x0000810b
-
-#define REG_A6XX_GRAS_UNKNOWN_8110 0x00008110
-
-#define REG_A7XX_GRAS_LRZ_CLEAR_DEPTH_F32 0x00008111
-#define A7XX_GRAS_LRZ_CLEAR_DEPTH_F32__MASK 0xffffffff
-#define A7XX_GRAS_LRZ_CLEAR_DEPTH_F32__SHIFT 0
-static inline uint32_t A7XX_GRAS_LRZ_CLEAR_DEPTH_F32(float val)
-{
- return ((fui(val)) << A7XX_GRAS_LRZ_CLEAR_DEPTH_F32__SHIFT) & A7XX_GRAS_LRZ_CLEAR_DEPTH_F32__MASK;
-}
-
-#define REG_A7XX_GRAS_UNKNOWN_8113 0x00008113
-
-#define REG_A7XX_GRAS_UNKNOWN_8120 0x00008120
-
-#define REG_A7XX_GRAS_UNKNOWN_8121 0x00008121
-
-#define REG_A6XX_GRAS_2D_BLIT_CNTL 0x00008400
-#define A6XX_GRAS_2D_BLIT_CNTL_ROTATE__MASK 0x00000007
-#define A6XX_GRAS_2D_BLIT_CNTL_ROTATE__SHIFT 0
-static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_ROTATE(enum a6xx_rotation val)
-{
- return ((val) << A6XX_GRAS_2D_BLIT_CNTL_ROTATE__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_ROTATE__MASK;
-}
-#define A6XX_GRAS_2D_BLIT_CNTL_OVERWRITEEN 0x00000008
-#define A6XX_GRAS_2D_BLIT_CNTL_UNK4__MASK 0x00000070
-#define A6XX_GRAS_2D_BLIT_CNTL_UNK4__SHIFT 4
-static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_UNK4(uint32_t val)
-{
- return ((val) << A6XX_GRAS_2D_BLIT_CNTL_UNK4__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_UNK4__MASK;
-}
-#define A6XX_GRAS_2D_BLIT_CNTL_SOLID_COLOR 0x00000080
-#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK 0x0000ff00
-#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT 8
-static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_format val)
-{
- return ((val) << A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
-}
-#define A6XX_GRAS_2D_BLIT_CNTL_SCISSOR 0x00010000
-#define A6XX_GRAS_2D_BLIT_CNTL_UNK17__MASK 0x00060000
-#define A6XX_GRAS_2D_BLIT_CNTL_UNK17__SHIFT 17
-static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_UNK17(uint32_t val)
-{
- return ((val) << A6XX_GRAS_2D_BLIT_CNTL_UNK17__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_UNK17__MASK;
-}
-#define A6XX_GRAS_2D_BLIT_CNTL_D24S8 0x00080000
-#define A6XX_GRAS_2D_BLIT_CNTL_MASK__MASK 0x00f00000
-#define A6XX_GRAS_2D_BLIT_CNTL_MASK__SHIFT 20
-static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_MASK(uint32_t val)
-{
- return ((val) << A6XX_GRAS_2D_BLIT_CNTL_MASK__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_MASK__MASK;
-}
-#define A6XX_GRAS_2D_BLIT_CNTL_IFMT__MASK 0x1f000000
-#define A6XX_GRAS_2D_BLIT_CNTL_IFMT__SHIFT 24
-static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_IFMT(enum a6xx_2d_ifmt val)
-{
- return ((val) << A6XX_GRAS_2D_BLIT_CNTL_IFMT__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_IFMT__MASK;
-}
-#define A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE__MASK 0x20000000
-#define A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE__SHIFT 29
-static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE(enum a6xx_raster_mode val)
-{
- return ((val) << A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE__MASK;
-}
-#define A6XX_GRAS_2D_BLIT_CNTL_UNK30 0x40000000
-
-#define REG_A6XX_GRAS_2D_SRC_TL_X 0x00008401
-#define A6XX_GRAS_2D_SRC_TL_X__MASK 0x01ffff00
-#define A6XX_GRAS_2D_SRC_TL_X__SHIFT 8
-static inline uint32_t A6XX_GRAS_2D_SRC_TL_X(int32_t val)
-{
- return ((val) << A6XX_GRAS_2D_SRC_TL_X__SHIFT) & A6XX_GRAS_2D_SRC_TL_X__MASK;
-}
-
-#define REG_A6XX_GRAS_2D_SRC_BR_X 0x00008402
-#define A6XX_GRAS_2D_SRC_BR_X__MASK 0x01ffff00
-#define A6XX_GRAS_2D_SRC_BR_X__SHIFT 8
-static inline uint32_t A6XX_GRAS_2D_SRC_BR_X(int32_t val)
-{
- return ((val) << A6XX_GRAS_2D_SRC_BR_X__SHIFT) & A6XX_GRAS_2D_SRC_BR_X__MASK;
-}
-
-#define REG_A6XX_GRAS_2D_SRC_TL_Y 0x00008403
-#define A6XX_GRAS_2D_SRC_TL_Y__MASK 0x01ffff00
-#define A6XX_GRAS_2D_SRC_TL_Y__SHIFT 8
-static inline uint32_t A6XX_GRAS_2D_SRC_TL_Y(int32_t val)
-{
- return ((val) << A6XX_GRAS_2D_SRC_TL_Y__SHIFT) & A6XX_GRAS_2D_SRC_TL_Y__MASK;
-}
-
-#define REG_A6XX_GRAS_2D_SRC_BR_Y 0x00008404
-#define A6XX_GRAS_2D_SRC_BR_Y__MASK 0x01ffff00
-#define A6XX_GRAS_2D_SRC_BR_Y__SHIFT 8
-static inline uint32_t A6XX_GRAS_2D_SRC_BR_Y(int32_t val)
-{
- return ((val) << A6XX_GRAS_2D_SRC_BR_Y__SHIFT) & A6XX_GRAS_2D_SRC_BR_Y__MASK;
-}
-
-#define REG_A6XX_GRAS_2D_DST_TL 0x00008405
-#define A6XX_GRAS_2D_DST_TL_X__MASK 0x00003fff
-#define A6XX_GRAS_2D_DST_TL_X__SHIFT 0
-static inline uint32_t A6XX_GRAS_2D_DST_TL_X(uint32_t val)
-{
- return ((val) << A6XX_GRAS_2D_DST_TL_X__SHIFT) & A6XX_GRAS_2D_DST_TL_X__MASK;
-}
-#define A6XX_GRAS_2D_DST_TL_Y__MASK 0x3fff0000
-#define A6XX_GRAS_2D_DST_TL_Y__SHIFT 16
-static inline uint32_t A6XX_GRAS_2D_DST_TL_Y(uint32_t val)
-{
- return ((val) << A6XX_GRAS_2D_DST_TL_Y__SHIFT) & A6XX_GRAS_2D_DST_TL_Y__MASK;
-}
-
-#define REG_A6XX_GRAS_2D_DST_BR 0x00008406
-#define A6XX_GRAS_2D_DST_BR_X__MASK 0x00003fff
-#define A6XX_GRAS_2D_DST_BR_X__SHIFT 0
-static inline uint32_t A6XX_GRAS_2D_DST_BR_X(uint32_t val)
-{
- return ((val) << A6XX_GRAS_2D_DST_BR_X__SHIFT) & A6XX_GRAS_2D_DST_BR_X__MASK;
-}
-#define A6XX_GRAS_2D_DST_BR_Y__MASK 0x3fff0000
-#define A6XX_GRAS_2D_DST_BR_Y__SHIFT 16
-static inline uint32_t A6XX_GRAS_2D_DST_BR_Y(uint32_t val)
-{
- return ((val) << A6XX_GRAS_2D_DST_BR_Y__SHIFT) & A6XX_GRAS_2D_DST_BR_Y__MASK;
-}
-
-#define REG_A6XX_GRAS_2D_UNKNOWN_8407 0x00008407
-
-#define REG_A6XX_GRAS_2D_UNKNOWN_8408 0x00008408
-
-#define REG_A6XX_GRAS_2D_UNKNOWN_8409 0x00008409
-
-#define REG_A6XX_GRAS_2D_RESOLVE_CNTL_1 0x0000840a
-#define A6XX_GRAS_2D_RESOLVE_CNTL_1_X__MASK 0x00003fff
-#define A6XX_GRAS_2D_RESOLVE_CNTL_1_X__SHIFT 0
-static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_1_X(uint32_t val)
-{
- return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_1_X__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_1_X__MASK;
-}
-#define A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__MASK 0x3fff0000
-#define A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__SHIFT 16
-static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_1_Y(uint32_t val)
-{
- return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__MASK;
-}
-
-#define REG_A6XX_GRAS_2D_RESOLVE_CNTL_2 0x0000840b
-#define A6XX_GRAS_2D_RESOLVE_CNTL_2_X__MASK 0x00003fff
-#define A6XX_GRAS_2D_RESOLVE_CNTL_2_X__SHIFT 0
-static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_2_X(uint32_t val)
-{
- return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_2_X__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_2_X__MASK;
-}
-#define A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__MASK 0x3fff0000
-#define A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__SHIFT 16
-static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_2_Y(uint32_t val)
-{
- return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__MASK;
-}
-
-#define REG_A6XX_GRAS_DBG_ECO_CNTL 0x00008600
-#define A6XX_GRAS_DBG_ECO_CNTL_UNK7 0x00000080
-#define A6XX_GRAS_DBG_ECO_CNTL_LRZCACHELOCKDIS 0x00000800
-
-#define REG_A6XX_GRAS_ADDR_MODE_CNTL 0x00008601
-
-#define REG_A7XX_GRAS_NC_MODE_CNTL 0x00008602
-
-#define REG_A6XX_GRAS_PERFCTR_TSE_SEL(i0) (0x00008610 + 0x1*(i0))
-
-#define REG_A6XX_GRAS_PERFCTR_RAS_SEL(i0) (0x00008614 + 0x1*(i0))
-
-#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL(i0) (0x00008618 + 0x1*(i0))
-
-#define REG_A6XX_RB_BIN_CONTROL 0x00008800
-#define A6XX_RB_BIN_CONTROL_BINW__MASK 0x0000003f
-#define A6XX_RB_BIN_CONTROL_BINW__SHIFT 0
-static inline uint32_t A6XX_RB_BIN_CONTROL_BINW(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A6XX_RB_BIN_CONTROL_BINW__SHIFT) & A6XX_RB_BIN_CONTROL_BINW__MASK;
-}
-#define A6XX_RB_BIN_CONTROL_BINH__MASK 0x00007f00
-#define A6XX_RB_BIN_CONTROL_BINH__SHIFT 8
-static inline uint32_t A6XX_RB_BIN_CONTROL_BINH(uint32_t val)
-{
- assert(!(val & 0xf));
- return (((val >> 4)) << A6XX_RB_BIN_CONTROL_BINH__SHIFT) & A6XX_RB_BIN_CONTROL_BINH__MASK;
-}
-#define A6XX_RB_BIN_CONTROL_RENDER_MODE__MASK 0x001c0000
-#define A6XX_RB_BIN_CONTROL_RENDER_MODE__SHIFT 18
-static inline uint32_t A6XX_RB_BIN_CONTROL_RENDER_MODE(enum a6xx_render_mode val)
-{
- return ((val) << A6XX_RB_BIN_CONTROL_RENDER_MODE__SHIFT) & A6XX_RB_BIN_CONTROL_RENDER_MODE__MASK;
-}
-#define A6XX_RB_BIN_CONTROL_FORCE_LRZ_WRITE_DIS 0x00200000
-#define A6XX_RB_BIN_CONTROL_BUFFERS_LOCATION__MASK 0x00c00000
-#define A6XX_RB_BIN_CONTROL_BUFFERS_LOCATION__SHIFT 22
-static inline uint32_t A6XX_RB_BIN_CONTROL_BUFFERS_LOCATION(enum a6xx_buffers_location val)
-{
- return ((val) << A6XX_RB_BIN_CONTROL_BUFFERS_LOCATION__SHIFT) & A6XX_RB_BIN_CONTROL_BUFFERS_LOCATION__MASK;
-}
-#define A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK 0x07000000
-#define A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT 24
-static inline uint32_t A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK(uint32_t val)
-{
- return ((val) << A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT) & A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK;
-}
-
-#define REG_A7XX_RB_BIN_CONTROL 0x00008800
-#define A7XX_RB_BIN_CONTROL_BINW__MASK 0x0000003f
-#define A7XX_RB_BIN_CONTROL_BINW__SHIFT 0
-static inline uint32_t A7XX_RB_BIN_CONTROL_BINW(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A7XX_RB_BIN_CONTROL_BINW__SHIFT) & A7XX_RB_BIN_CONTROL_BINW__MASK;
-}
-#define A7XX_RB_BIN_CONTROL_BINH__MASK 0x00007f00
-#define A7XX_RB_BIN_CONTROL_BINH__SHIFT 8
-static inline uint32_t A7XX_RB_BIN_CONTROL_BINH(uint32_t val)
-{
- assert(!(val & 0xf));
- return (((val >> 4)) << A7XX_RB_BIN_CONTROL_BINH__SHIFT) & A7XX_RB_BIN_CONTROL_BINH__MASK;
-}
-#define A7XX_RB_BIN_CONTROL_RENDER_MODE__MASK 0x001c0000
-#define A7XX_RB_BIN_CONTROL_RENDER_MODE__SHIFT 18
-static inline uint32_t A7XX_RB_BIN_CONTROL_RENDER_MODE(enum a6xx_render_mode val)
-{
- return ((val) << A7XX_RB_BIN_CONTROL_RENDER_MODE__SHIFT) & A7XX_RB_BIN_CONTROL_RENDER_MODE__MASK;
-}
-#define A7XX_RB_BIN_CONTROL_FORCE_LRZ_WRITE_DIS 0x00200000
-#define A7XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK 0x07000000
-#define A7XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT 24
-static inline uint32_t A7XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK(uint32_t val)
-{
- return ((val) << A7XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT) & A7XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK;
-}
-
-#define REG_A6XX_RB_RENDER_CNTL 0x00008801
-#define A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE__MASK 0x00000038
-#define A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE__SHIFT 3
-static inline uint32_t A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE(uint32_t val)
-{
- return ((val) << A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE__SHIFT) & A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE__MASK;
-}
-#define A6XX_RB_RENDER_CNTL_EARLYVIZOUTEN 0x00000040
-#define A6XX_RB_RENDER_CNTL_BINNING 0x00000080
-#define A6XX_RB_RENDER_CNTL_UNK8__MASK 0x00000700
-#define A6XX_RB_RENDER_CNTL_UNK8__SHIFT 8
-static inline uint32_t A6XX_RB_RENDER_CNTL_UNK8(uint32_t val)
-{
- return ((val) << A6XX_RB_RENDER_CNTL_UNK8__SHIFT) & A6XX_RB_RENDER_CNTL_UNK8__MASK;
-}
-#define A6XX_RB_RENDER_CNTL_RASTER_MODE__MASK 0x00000100
-#define A6XX_RB_RENDER_CNTL_RASTER_MODE__SHIFT 8
-static inline uint32_t A6XX_RB_RENDER_CNTL_RASTER_MODE(enum a6xx_raster_mode val)
-{
- return ((val) << A6XX_RB_RENDER_CNTL_RASTER_MODE__SHIFT) & A6XX_RB_RENDER_CNTL_RASTER_MODE__MASK;
-}
-#define A6XX_RB_RENDER_CNTL_RASTER_DIRECTION__MASK 0x00000600
-#define A6XX_RB_RENDER_CNTL_RASTER_DIRECTION__SHIFT 9
-static inline uint32_t A6XX_RB_RENDER_CNTL_RASTER_DIRECTION(enum a6xx_raster_direction val)
-{
- return ((val) << A6XX_RB_RENDER_CNTL_RASTER_DIRECTION__SHIFT) & A6XX_RB_RENDER_CNTL_RASTER_DIRECTION__MASK;
-}
-#define A6XX_RB_RENDER_CNTL_CONSERVATIVERASEN 0x00000800
-#define A6XX_RB_RENDER_CNTL_INNERCONSERVATIVERASEN 0x00001000
-#define A6XX_RB_RENDER_CNTL_FLAG_DEPTH 0x00004000
-#define A6XX_RB_RENDER_CNTL_FLAG_MRTS__MASK 0x00ff0000
-#define A6XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT 16
-static inline uint32_t A6XX_RB_RENDER_CNTL_FLAG_MRTS(uint32_t val)
-{
- return ((val) << A6XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT) & A6XX_RB_RENDER_CNTL_FLAG_MRTS__MASK;
-}
-
-#define REG_A7XX_RB_RENDER_CNTL 0x00008801
-#define A7XX_RB_RENDER_CNTL_EARLYVIZOUTEN 0x00000040
-#define A7XX_RB_RENDER_CNTL_BINNING 0x00000080
-#define A7XX_RB_RENDER_CNTL_RASTER_MODE__MASK 0x00000100
-#define A7XX_RB_RENDER_CNTL_RASTER_MODE__SHIFT 8
-static inline uint32_t A7XX_RB_RENDER_CNTL_RASTER_MODE(enum a6xx_raster_mode val)
-{
- return ((val) << A7XX_RB_RENDER_CNTL_RASTER_MODE__SHIFT) & A7XX_RB_RENDER_CNTL_RASTER_MODE__MASK;
-}
-#define A7XX_RB_RENDER_CNTL_RASTER_DIRECTION__MASK 0x00000600
-#define A7XX_RB_RENDER_CNTL_RASTER_DIRECTION__SHIFT 9
-static inline uint32_t A7XX_RB_RENDER_CNTL_RASTER_DIRECTION(enum a6xx_raster_direction val)
-{
- return ((val) << A7XX_RB_RENDER_CNTL_RASTER_DIRECTION__SHIFT) & A7XX_RB_RENDER_CNTL_RASTER_DIRECTION__MASK;
-}
-#define A7XX_RB_RENDER_CNTL_CONSERVATIVERASEN 0x00000800
-#define A7XX_RB_RENDER_CNTL_INNERCONSERVATIVERASEN 0x00001000
-
-#define REG_A7XX_GRAS_SU_RENDER_CNTL 0x00008116
-#define A7XX_GRAS_SU_RENDER_CNTL_BINNING 0x00000080
-
-#define REG_A6XX_RB_RAS_MSAA_CNTL 0x00008802
-#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
-#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
-static inline uint32_t A6XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
-{
- return ((val) << A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK;
-}
-#define A6XX_RB_RAS_MSAA_CNTL_UNK2 0x00000004
-#define A6XX_RB_RAS_MSAA_CNTL_UNK3 0x00000008
-
-#define REG_A6XX_RB_DEST_MSAA_CNTL 0x00008803
-#define A6XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
-#define A6XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
-static inline uint32_t A6XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
-{
- return ((val) << A6XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK;
-}
-#define A6XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
-
-#define REG_A6XX_RB_SAMPLE_CONFIG 0x00008804
-#define A6XX_RB_SAMPLE_CONFIG_UNK0 0x00000001
-#define A6XX_RB_SAMPLE_CONFIG_LOCATION_ENABLE 0x00000002
-
-#define REG_A6XX_RB_SAMPLE_LOCATION_0 0x00008805
-#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK 0x0000000f
-#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT 0
-static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK;
-}
-#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK 0x000000f0
-#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT 4
-static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK;
-}
-#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK 0x00000f00
-#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT 8
-static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK;
-}
-#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK 0x0000f000
-#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT 12
-static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK;
-}
-#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK 0x000f0000
-#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT 16
-static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK;
-}
-#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK 0x00f00000
-#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT 20
-static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK;
-}
-#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK 0x0f000000
-#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT 24
-static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK;
-}
-#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK 0xf0000000
-#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT 28
-static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK;
-}
-
-#define REG_A6XX_RB_SAMPLE_LOCATION_1 0x00008806
-#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK 0x0000000f
-#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT 0
-static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK;
-}
-#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK 0x000000f0
-#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT 4
-static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK;
-}
-#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK 0x00000f00
-#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT 8
-static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK;
-}
-#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK 0x0000f000
-#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT 12
-static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK;
-}
-#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK 0x000f0000
-#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT 16
-static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK;
-}
-#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK 0x00f00000
-#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT 20
-static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK;
-}
-#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK 0x0f000000
-#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT 24
-static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK;
-}
-#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK 0xf0000000
-#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT 28
-static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK;
-}
-
-#define REG_A6XX_RB_RENDER_CONTROL0 0x00008809
-#define A6XX_RB_RENDER_CONTROL0_IJ_PERSP_PIXEL 0x00000001
-#define A6XX_RB_RENDER_CONTROL0_IJ_PERSP_CENTROID 0x00000002
-#define A6XX_RB_RENDER_CONTROL0_IJ_PERSP_SAMPLE 0x00000004
-#define A6XX_RB_RENDER_CONTROL0_IJ_LINEAR_PIXEL 0x00000008
-#define A6XX_RB_RENDER_CONTROL0_IJ_LINEAR_CENTROID 0x00000010
-#define A6XX_RB_RENDER_CONTROL0_IJ_LINEAR_SAMPLE 0x00000020
-#define A6XX_RB_RENDER_CONTROL0_COORD_MASK__MASK 0x000003c0
-#define A6XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT 6
-static inline uint32_t A6XX_RB_RENDER_CONTROL0_COORD_MASK(uint32_t val)
-{
- return ((val) << A6XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT) & A6XX_RB_RENDER_CONTROL0_COORD_MASK__MASK;
-}
-#define A6XX_RB_RENDER_CONTROL0_UNK10 0x00000400
-
-#define REG_A6XX_RB_RENDER_CONTROL1 0x0000880a
-#define A6XX_RB_RENDER_CONTROL1_SAMPLEMASK 0x00000001
-#define A6XX_RB_RENDER_CONTROL1_POSTDEPTHCOVERAGE 0x00000002
-#define A6XX_RB_RENDER_CONTROL1_FACENESS 0x00000004
-#define A6XX_RB_RENDER_CONTROL1_SAMPLEID 0x00000008
-#define A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE__MASK 0x00000030
-#define A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE__SHIFT 4
-static inline uint32_t A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE(enum a6xx_fragcoord_sample_mode val)
-{
- return ((val) << A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE__SHIFT) & A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE__MASK;
-}
-#define A6XX_RB_RENDER_CONTROL1_CENTERRHW 0x00000040
-#define A6XX_RB_RENDER_CONTROL1_LINELENGTHEN 0x00000080
-#define A6XX_RB_RENDER_CONTROL1_FOVEATION 0x00000100
-
-#define REG_A6XX_RB_FS_OUTPUT_CNTL0 0x0000880b
-#define A6XX_RB_FS_OUTPUT_CNTL0_DUAL_COLOR_IN_ENABLE 0x00000001
-#define A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_Z 0x00000002
-#define A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_SAMPMASK 0x00000004
-#define A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_STENCILREF 0x00000008
-
-#define REG_A6XX_RB_FS_OUTPUT_CNTL1 0x0000880c
-#define A6XX_RB_FS_OUTPUT_CNTL1_MRT__MASK 0x0000000f
-#define A6XX_RB_FS_OUTPUT_CNTL1_MRT__SHIFT 0
-static inline uint32_t A6XX_RB_FS_OUTPUT_CNTL1_MRT(uint32_t val)
-{
- return ((val) << A6XX_RB_FS_OUTPUT_CNTL1_MRT__SHIFT) & A6XX_RB_FS_OUTPUT_CNTL1_MRT__MASK;
-}
-
-#define REG_A6XX_RB_RENDER_COMPONENTS 0x0000880d
-#define A6XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f
-#define A6XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0
-static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT0(uint32_t val)
-{
- return ((val) << A6XX_RB_RENDER_COMPONENTS_RT0__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT0__MASK;
-}
-#define A6XX_RB_RENDER_COMPONENTS_RT1__MASK 0x000000f0
-#define A6XX_RB_RENDER_COMPONENTS_RT1__SHIFT 4
-static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT1(uint32_t val)
-{
- return ((val) << A6XX_RB_RENDER_COMPONENTS_RT1__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT1__MASK;
-}
-#define A6XX_RB_RENDER_COMPONENTS_RT2__MASK 0x00000f00
-#define A6XX_RB_RENDER_COMPONENTS_RT2__SHIFT 8
-static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT2(uint32_t val)
-{
- return ((val) << A6XX_RB_RENDER_COMPONENTS_RT2__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT2__MASK;
-}
-#define A6XX_RB_RENDER_COMPONENTS_RT3__MASK 0x0000f000
-#define A6XX_RB_RENDER_COMPONENTS_RT3__SHIFT 12
-static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT3(uint32_t val)
-{
- return ((val) << A6XX_RB_RENDER_COMPONENTS_RT3__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT3__MASK;
-}
-#define A6XX_RB_RENDER_COMPONENTS_RT4__MASK 0x000f0000
-#define A6XX_RB_RENDER_COMPONENTS_RT4__SHIFT 16
-static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT4(uint32_t val)
-{
- return ((val) << A6XX_RB_RENDER_COMPONENTS_RT4__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT4__MASK;
-}
-#define A6XX_RB_RENDER_COMPONENTS_RT5__MASK 0x00f00000
-#define A6XX_RB_RENDER_COMPONENTS_RT5__SHIFT 20
-static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT5(uint32_t val)
-{
- return ((val) << A6XX_RB_RENDER_COMPONENTS_RT5__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT5__MASK;
-}
-#define A6XX_RB_RENDER_COMPONENTS_RT6__MASK 0x0f000000
-#define A6XX_RB_RENDER_COMPONENTS_RT6__SHIFT 24
-static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT6(uint32_t val)
-{
- return ((val) << A6XX_RB_RENDER_COMPONENTS_RT6__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT6__MASK;
-}
-#define A6XX_RB_RENDER_COMPONENTS_RT7__MASK 0xf0000000
-#define A6XX_RB_RENDER_COMPONENTS_RT7__SHIFT 28
-static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT7(uint32_t val)
-{
- return ((val) << A6XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT7__MASK;
-}
-
-#define REG_A6XX_RB_DITHER_CNTL 0x0000880e
-#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__MASK 0x00000003
-#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__SHIFT 0
-static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0(enum adreno_rb_dither_mode val)
-{
- return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__MASK;
-}
-#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__MASK 0x0000000c
-#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__SHIFT 2
-static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1(enum adreno_rb_dither_mode val)
-{
- return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__MASK;
-}
-#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__MASK 0x00000030
-#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__SHIFT 4
-static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2(enum adreno_rb_dither_mode val)
-{
- return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__MASK;
-}
-#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__MASK 0x000000c0
-#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__SHIFT 6
-static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3(enum adreno_rb_dither_mode val)
-{
- return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__MASK;
-}
-#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__MASK 0x00000300
-#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__SHIFT 8
-static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4(enum adreno_rb_dither_mode val)
-{
- return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__MASK;
-}
-#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__MASK 0x00000c00
-#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__SHIFT 10
-static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5(enum adreno_rb_dither_mode val)
-{
- return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__MASK;
-}
-#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__MASK 0x00003000
-#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__SHIFT 12
-static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6(enum adreno_rb_dither_mode val)
-{
- return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__MASK;
-}
-#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__MASK 0x0000c000
-#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__SHIFT 14
-static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7(enum adreno_rb_dither_mode val)
-{
- return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__MASK;
-}
-
-#define REG_A6XX_RB_SRGB_CNTL 0x0000880f
-#define A6XX_RB_SRGB_CNTL_SRGB_MRT0 0x00000001
-#define A6XX_RB_SRGB_CNTL_SRGB_MRT1 0x00000002
-#define A6XX_RB_SRGB_CNTL_SRGB_MRT2 0x00000004
-#define A6XX_RB_SRGB_CNTL_SRGB_MRT3 0x00000008
-#define A6XX_RB_SRGB_CNTL_SRGB_MRT4 0x00000010
-#define A6XX_RB_SRGB_CNTL_SRGB_MRT5 0x00000020
-#define A6XX_RB_SRGB_CNTL_SRGB_MRT6 0x00000040
-#define A6XX_RB_SRGB_CNTL_SRGB_MRT7 0x00000080
-
-#define REG_A6XX_RB_SAMPLE_CNTL 0x00008810
-#define A6XX_RB_SAMPLE_CNTL_PER_SAMP_MODE 0x00000001
-
-#define REG_A6XX_RB_UNKNOWN_8811 0x00008811
-
-#define REG_A7XX_RB_UNKNOWN_8812 0x00008812
-
-#define REG_A6XX_RB_UNKNOWN_8818 0x00008818
-
-#define REG_A6XX_RB_UNKNOWN_8819 0x00008819
-
-#define REG_A6XX_RB_UNKNOWN_881A 0x0000881a
-
-#define REG_A6XX_RB_UNKNOWN_881B 0x0000881b
-
-#define REG_A6XX_RB_UNKNOWN_881C 0x0000881c
-
-#define REG_A6XX_RB_UNKNOWN_881D 0x0000881d
-
-#define REG_A6XX_RB_UNKNOWN_881E 0x0000881e
-
-#define REG_A6XX_RB_MRT(i0) (0x00008820 + 0x8*(i0))
-
-static inline uint32_t REG_A6XX_RB_MRT_CONTROL(uint32_t i0) { return 0x00008820 + 0x8*i0; }
-#define A6XX_RB_MRT_CONTROL_BLEND 0x00000001
-#define A6XX_RB_MRT_CONTROL_BLEND2 0x00000002
-#define A6XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000004
-#define A6XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000078
-#define A6XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 3
-static inline uint32_t A6XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
-{
- return ((val) << A6XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A6XX_RB_MRT_CONTROL_ROP_CODE__MASK;
-}
-#define A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780
-#define A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7
-static inline uint32_t A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
-{
- return ((val) << A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
-}
-
-static inline uint32_t REG_A6XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x00008821 + 0x8*i0; }
-#define A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
-#define A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
-static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
-{
- return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
-}
-#define A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
-#define A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
-static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
-{
- return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
-}
-#define A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
-#define A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
-static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
-{
- return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
-}
-#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
-#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
-static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
-{
- return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
-}
-#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
-#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
-static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
-{
- return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
-}
-#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
-#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
-static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
-{
- return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
-}
-
-static inline uint32_t REG_A6XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x00008822 + 0x8*i0; }
-#define A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x000000ff
-#define A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
-static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a6xx_format val)
-{
- return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
-}
-#define A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x00000300
-#define A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 8
-static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a6xx_tile_mode val)
-{
- return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
-}
-#define A6XX_RB_MRT_BUF_INFO_UNK10 0x00000400
-#define A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000
-#define A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13
-static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
-{
- return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
-}
-
-static inline uint32_t REG_A7XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x00008822 + 0x8*i0; }
-#define A7XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x000000ff
-#define A7XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
-static inline uint32_t A7XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a6xx_format val)
-{
- return ((val) << A7XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A7XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
-}
-#define A7XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x00000300
-#define A7XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 8
-static inline uint32_t A7XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a6xx_tile_mode val)
-{
- return ((val) << A7XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A7XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
-}
-#define A7XX_RB_MRT_BUF_INFO_UNK10 0x00000400
-#define A7XX_RB_MRT_BUF_INFO_LOSSLESSCOMPEN 0x00000800
-#define A7XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000
-#define A7XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13
-static inline uint32_t A7XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
-{
- return ((val) << A7XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A7XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
-}
-
-static inline uint32_t REG_A6XX_RB_MRT_PITCH(uint32_t i0) { return 0x00008823 + 0x8*i0; }
-#define A6XX_RB_MRT_PITCH__MASK 0xffffffff
-#define A6XX_RB_MRT_PITCH__SHIFT 0
-static inline uint32_t A6XX_RB_MRT_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A6XX_RB_MRT_PITCH__SHIFT) & A6XX_RB_MRT_PITCH__MASK;
-}
-
-static inline uint32_t REG_A6XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x00008824 + 0x8*i0; }
-#define A6XX_RB_MRT_ARRAY_PITCH__MASK 0xffffffff
-#define A6XX_RB_MRT_ARRAY_PITCH__SHIFT 0
-static inline uint32_t A6XX_RB_MRT_ARRAY_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A6XX_RB_MRT_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_ARRAY_PITCH__MASK;
-}
-
-static inline uint32_t REG_A6XX_RB_MRT_BASE(uint32_t i0) { return 0x00008825 + 0x8*i0; }
-
-static inline uint32_t REG_A6XX_RB_MRT_BASE_GMEM(uint32_t i0) { return 0x00008827 + 0x8*i0; }
-
-#define REG_A6XX_RB_BLEND_RED_F32 0x00008860
-#define A6XX_RB_BLEND_RED_F32__MASK 0xffffffff
-#define A6XX_RB_BLEND_RED_F32__SHIFT 0
-static inline uint32_t A6XX_RB_BLEND_RED_F32(float val)
-{
- return ((fui(val)) << A6XX_RB_BLEND_RED_F32__SHIFT) & A6XX_RB_BLEND_RED_F32__MASK;
-}
-
-#define REG_A6XX_RB_BLEND_GREEN_F32 0x00008861
-#define A6XX_RB_BLEND_GREEN_F32__MASK 0xffffffff
-#define A6XX_RB_BLEND_GREEN_F32__SHIFT 0
-static inline uint32_t A6XX_RB_BLEND_GREEN_F32(float val)
-{
- return ((fui(val)) << A6XX_RB_BLEND_GREEN_F32__SHIFT) & A6XX_RB_BLEND_GREEN_F32__MASK;
-}
-
-#define REG_A6XX_RB_BLEND_BLUE_F32 0x00008862
-#define A6XX_RB_BLEND_BLUE_F32__MASK 0xffffffff
-#define A6XX_RB_BLEND_BLUE_F32__SHIFT 0
-static inline uint32_t A6XX_RB_BLEND_BLUE_F32(float val)
-{
- return ((fui(val)) << A6XX_RB_BLEND_BLUE_F32__SHIFT) & A6XX_RB_BLEND_BLUE_F32__MASK;
-}
-
-#define REG_A6XX_RB_BLEND_ALPHA_F32 0x00008863
-#define A6XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff
-#define A6XX_RB_BLEND_ALPHA_F32__SHIFT 0
-static inline uint32_t A6XX_RB_BLEND_ALPHA_F32(float val)
-{
- return ((fui(val)) << A6XX_RB_BLEND_ALPHA_F32__SHIFT) & A6XX_RB_BLEND_ALPHA_F32__MASK;
-}
-
-#define REG_A6XX_RB_ALPHA_CONTROL 0x00008864
-#define A6XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff
-#define A6XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0
-static inline uint32_t A6XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val)
-{
- return ((val) << A6XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A6XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK;
-}
-#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100
-#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00
-#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9
-static inline uint32_t A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
-{
- return ((val) << A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
-}
-
-#define REG_A6XX_RB_BLEND_CNTL 0x00008865
-#define A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
-#define A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
-static inline uint32_t A6XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
-{
- return ((val) << A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
-}
-#define A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
-#define A6XX_RB_BLEND_CNTL_DUAL_COLOR_IN_ENABLE 0x00000200
-#define A6XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
-#define A6XX_RB_BLEND_CNTL_ALPHA_TO_ONE 0x00000800
-#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
-#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
-static inline uint32_t A6XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
-{
- return ((val) << A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK;
-}
-
-#define REG_A6XX_RB_DEPTH_PLANE_CNTL 0x00008870
-#define A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__MASK 0x00000003
-#define A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__SHIFT 0
-static inline uint32_t A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE(enum a6xx_ztest_mode val)
-{
- return ((val) << A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__SHIFT) & A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__MASK;
-}
-
-#define REG_A6XX_RB_DEPTH_CNTL 0x00008871
-#define A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE 0x00000001
-#define A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002
-#define A6XX_RB_DEPTH_CNTL_ZFUNC__MASK 0x0000001c
-#define A6XX_RB_DEPTH_CNTL_ZFUNC__SHIFT 2
-static inline uint32_t A6XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val)
-{
- return ((val) << A6XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A6XX_RB_DEPTH_CNTL_ZFUNC__MASK;
-}
-#define A6XX_RB_DEPTH_CNTL_Z_CLAMP_ENABLE 0x00000020
-#define A6XX_RB_DEPTH_CNTL_Z_READ_ENABLE 0x00000040
-#define A6XX_RB_DEPTH_CNTL_Z_BOUNDS_ENABLE 0x00000080
-
-#define REG_A6XX_GRAS_SU_DEPTH_CNTL 0x00008114
-#define A6XX_GRAS_SU_DEPTH_CNTL_Z_TEST_ENABLE 0x00000001
-
-#define REG_A6XX_RB_DEPTH_BUFFER_INFO 0x00008872
-#define A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
-#define A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
-static inline uint32_t A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_depth_format val)
-{
- return ((val) << A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
-}
-#define A6XX_RB_DEPTH_BUFFER_INFO_UNK3__MASK 0x00000018
-#define A6XX_RB_DEPTH_BUFFER_INFO_UNK3__SHIFT 3
-static inline uint32_t A6XX_RB_DEPTH_BUFFER_INFO_UNK3(uint32_t val)
-{
- return ((val) << A6XX_RB_DEPTH_BUFFER_INFO_UNK3__SHIFT) & A6XX_RB_DEPTH_BUFFER_INFO_UNK3__MASK;
-}
-
-#define REG_A7XX_RB_DEPTH_BUFFER_INFO 0x00008872
-#define A7XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
-#define A7XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
-static inline uint32_t A7XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_depth_format val)
-{
- return ((val) << A7XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A7XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
-}
-#define A7XX_RB_DEPTH_BUFFER_INFO_UNK3__MASK 0x00000018
-#define A7XX_RB_DEPTH_BUFFER_INFO_UNK3__SHIFT 3
-static inline uint32_t A7XX_RB_DEPTH_BUFFER_INFO_UNK3(uint32_t val)
-{
- return ((val) << A7XX_RB_DEPTH_BUFFER_INFO_UNK3__SHIFT) & A7XX_RB_DEPTH_BUFFER_INFO_UNK3__MASK;
-}
-#define A7XX_RB_DEPTH_BUFFER_INFO_TILEMODE__MASK 0x00000060
-#define A7XX_RB_DEPTH_BUFFER_INFO_TILEMODE__SHIFT 5
-static inline uint32_t A7XX_RB_DEPTH_BUFFER_INFO_TILEMODE(enum a6xx_tile_mode val)
-{
- return ((val) << A7XX_RB_DEPTH_BUFFER_INFO_TILEMODE__SHIFT) & A7XX_RB_DEPTH_BUFFER_INFO_TILEMODE__MASK;
-}
-#define A7XX_RB_DEPTH_BUFFER_INFO_LOSSLESSCOMPEN 0x00000080
-
-#define REG_A6XX_RB_DEPTH_BUFFER_PITCH 0x00008873
-#define A6XX_RB_DEPTH_BUFFER_PITCH__MASK 0x00003fff
-#define A6XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0
-static inline uint32_t A6XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A6XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A6XX_RB_DEPTH_BUFFER_PITCH__MASK;
-}
-
-#define REG_A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x00008874
-#define A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK 0x0fffffff
-#define A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0
-static inline uint32_t A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK;
-}
-
-#define REG_A6XX_RB_DEPTH_BUFFER_BASE 0x00008875
-
-#define REG_A6XX_RB_DEPTH_BUFFER_BASE_GMEM 0x00008877
-
-#define REG_A6XX_RB_Z_BOUNDS_MIN 0x00008878
-#define A6XX_RB_Z_BOUNDS_MIN__MASK 0xffffffff
-#define A6XX_RB_Z_BOUNDS_MIN__SHIFT 0
-static inline uint32_t A6XX_RB_Z_BOUNDS_MIN(float val)
-{
- return ((fui(val)) << A6XX_RB_Z_BOUNDS_MIN__SHIFT) & A6XX_RB_Z_BOUNDS_MIN__MASK;
-}
-
-#define REG_A6XX_RB_Z_BOUNDS_MAX 0x00008879
-#define A6XX_RB_Z_BOUNDS_MAX__MASK 0xffffffff
-#define A6XX_RB_Z_BOUNDS_MAX__SHIFT 0
-static inline uint32_t A6XX_RB_Z_BOUNDS_MAX(float val)
-{
- return ((fui(val)) << A6XX_RB_Z_BOUNDS_MAX__SHIFT) & A6XX_RB_Z_BOUNDS_MAX__MASK;
-}
-
-#define REG_A6XX_RB_STENCIL_CONTROL 0x00008880
-#define A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
-#define A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
-#define A6XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
-#define A6XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
-#define A6XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
-static inline uint32_t A6XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
-{
- return ((val) << A6XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A6XX_RB_STENCIL_CONTROL_FUNC__MASK;
-}
-#define A6XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
-#define A6XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
-static inline uint32_t A6XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
-{
- return ((val) << A6XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A6XX_RB_STENCIL_CONTROL_FAIL__MASK;
-}
-#define A6XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
-#define A6XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
-static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
-{
- return ((val) << A6XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZPASS__MASK;
-}
-#define A6XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
-#define A6XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
-static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
-{
- return ((val) << A6XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
-}
-#define A6XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
-#define A6XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
-static inline uint32_t A6XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
-{
- return ((val) << A6XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
-}
-#define A6XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
-#define A6XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
-static inline uint32_t A6XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
-{
- return ((val) << A6XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
-}
-#define A6XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
-#define A6XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
-static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
-{
- return ((val) << A6XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
-}
-#define A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
-#define A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
-static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
-{
- return ((val) << A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
-}
-
-#define REG_A6XX_GRAS_SU_STENCIL_CNTL 0x00008115
-#define A6XX_GRAS_SU_STENCIL_CNTL_STENCIL_ENABLE 0x00000001
-
-#define REG_A6XX_RB_STENCIL_INFO 0x00008881
-#define A6XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
-#define A6XX_RB_STENCIL_INFO_UNK1 0x00000002
-
-#define REG_A7XX_RB_STENCIL_INFO 0x00008881
-#define A7XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
-#define A7XX_RB_STENCIL_INFO_UNK1 0x00000002
-#define A7XX_RB_STENCIL_INFO_TILEMODE__MASK 0x0000000c
-#define A7XX_RB_STENCIL_INFO_TILEMODE__SHIFT 2
-static inline uint32_t A7XX_RB_STENCIL_INFO_TILEMODE(enum a6xx_tile_mode val)
-{
- return ((val) << A7XX_RB_STENCIL_INFO_TILEMODE__SHIFT) & A7XX_RB_STENCIL_INFO_TILEMODE__MASK;
-}
-
-#define REG_A6XX_RB_STENCIL_BUFFER_PITCH 0x00008882
-#define A6XX_RB_STENCIL_BUFFER_PITCH__MASK 0x00000fff
-#define A6XX_RB_STENCIL_BUFFER_PITCH__SHIFT 0
-static inline uint32_t A6XX_RB_STENCIL_BUFFER_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A6XX_RB_STENCIL_BUFFER_PITCH__SHIFT) & A6XX_RB_STENCIL_BUFFER_PITCH__MASK;
-}
-
-#define REG_A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH 0x00008883
-#define A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK 0x00ffffff
-#define A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__SHIFT 0
-static inline uint32_t A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK;
-}
-
-#define REG_A6XX_RB_STENCIL_BUFFER_BASE 0x00008884
-
-#define REG_A6XX_RB_STENCIL_BUFFER_BASE_GMEM 0x00008886
-
-#define REG_A6XX_RB_STENCILREF 0x00008887
-#define A6XX_RB_STENCILREF_REF__MASK 0x000000ff
-#define A6XX_RB_STENCILREF_REF__SHIFT 0
-static inline uint32_t A6XX_RB_STENCILREF_REF(uint32_t val)
-{
- return ((val) << A6XX_RB_STENCILREF_REF__SHIFT) & A6XX_RB_STENCILREF_REF__MASK;
-}
-#define A6XX_RB_STENCILREF_BFREF__MASK 0x0000ff00
-#define A6XX_RB_STENCILREF_BFREF__SHIFT 8
-static inline uint32_t A6XX_RB_STENCILREF_BFREF(uint32_t val)
-{
- return ((val) << A6XX_RB_STENCILREF_BFREF__SHIFT) & A6XX_RB_STENCILREF_BFREF__MASK;
-}
-
-#define REG_A6XX_RB_STENCILMASK 0x00008888
-#define A6XX_RB_STENCILMASK_MASK__MASK 0x000000ff
-#define A6XX_RB_STENCILMASK_MASK__SHIFT 0
-static inline uint32_t A6XX_RB_STENCILMASK_MASK(uint32_t val)
-{
- return ((val) << A6XX_RB_STENCILMASK_MASK__SHIFT) & A6XX_RB_STENCILMASK_MASK__MASK;
-}
-#define A6XX_RB_STENCILMASK_BFMASK__MASK 0x0000ff00
-#define A6XX_RB_STENCILMASK_BFMASK__SHIFT 8
-static inline uint32_t A6XX_RB_STENCILMASK_BFMASK(uint32_t val)
-{
- return ((val) << A6XX_RB_STENCILMASK_BFMASK__SHIFT) & A6XX_RB_STENCILMASK_BFMASK__MASK;
-}
-
-#define REG_A6XX_RB_STENCILWRMASK 0x00008889
-#define A6XX_RB_STENCILWRMASK_WRMASK__MASK 0x000000ff
-#define A6XX_RB_STENCILWRMASK_WRMASK__SHIFT 0
-static inline uint32_t A6XX_RB_STENCILWRMASK_WRMASK(uint32_t val)
-{
- return ((val) << A6XX_RB_STENCILWRMASK_WRMASK__SHIFT) & A6XX_RB_STENCILWRMASK_WRMASK__MASK;
-}
-#define A6XX_RB_STENCILWRMASK_BFWRMASK__MASK 0x0000ff00
-#define A6XX_RB_STENCILWRMASK_BFWRMASK__SHIFT 8
-static inline uint32_t A6XX_RB_STENCILWRMASK_BFWRMASK(uint32_t val)
-{
- return ((val) << A6XX_RB_STENCILWRMASK_BFWRMASK__SHIFT) & A6XX_RB_STENCILWRMASK_BFWRMASK__MASK;
-}
-
-#define REG_A6XX_RB_WINDOW_OFFSET 0x00008890
-#define A6XX_RB_WINDOW_OFFSET_X__MASK 0x00003fff
-#define A6XX_RB_WINDOW_OFFSET_X__SHIFT 0
-static inline uint32_t A6XX_RB_WINDOW_OFFSET_X(uint32_t val)
-{
- return ((val) << A6XX_RB_WINDOW_OFFSET_X__SHIFT) & A6XX_RB_WINDOW_OFFSET_X__MASK;
-}
-#define A6XX_RB_WINDOW_OFFSET_Y__MASK 0x3fff0000
-#define A6XX_RB_WINDOW_OFFSET_Y__SHIFT 16
-static inline uint32_t A6XX_RB_WINDOW_OFFSET_Y(uint32_t val)
-{
- return ((val) << A6XX_RB_WINDOW_OFFSET_Y__SHIFT) & A6XX_RB_WINDOW_OFFSET_Y__MASK;
-}
-
-#define REG_A6XX_RB_SAMPLE_COUNT_CONTROL 0x00008891
-#define A6XX_RB_SAMPLE_COUNT_CONTROL_DISABLE 0x00000001
-#define A6XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
-
-#define REG_A6XX_RB_LRZ_CNTL 0x00008898
-#define A6XX_RB_LRZ_CNTL_ENABLE 0x00000001
-
-#define REG_A7XX_RB_UNKNOWN_8899 0x00008899
-
-#define REG_A6XX_RB_Z_CLAMP_MIN 0x000088c0
-#define A6XX_RB_Z_CLAMP_MIN__MASK 0xffffffff
-#define A6XX_RB_Z_CLAMP_MIN__SHIFT 0
-static inline uint32_t A6XX_RB_Z_CLAMP_MIN(float val)
-{
- return ((fui(val)) << A6XX_RB_Z_CLAMP_MIN__SHIFT) & A6XX_RB_Z_CLAMP_MIN__MASK;
-}
-
-#define REG_A6XX_RB_Z_CLAMP_MAX 0x000088c1
-#define A6XX_RB_Z_CLAMP_MAX__MASK 0xffffffff
-#define A6XX_RB_Z_CLAMP_MAX__SHIFT 0
-static inline uint32_t A6XX_RB_Z_CLAMP_MAX(float val)
-{
- return ((fui(val)) << A6XX_RB_Z_CLAMP_MAX__SHIFT) & A6XX_RB_Z_CLAMP_MAX__MASK;
-}
-
-#define REG_A6XX_RB_UNKNOWN_88D0 0x000088d0
-#define A6XX_RB_UNKNOWN_88D0_UNK0__MASK 0x00001fff
-#define A6XX_RB_UNKNOWN_88D0_UNK0__SHIFT 0
-static inline uint32_t A6XX_RB_UNKNOWN_88D0_UNK0(uint32_t val)
-{
- return ((val) << A6XX_RB_UNKNOWN_88D0_UNK0__SHIFT) & A6XX_RB_UNKNOWN_88D0_UNK0__MASK;
-}
-#define A6XX_RB_UNKNOWN_88D0_UNK16__MASK 0x07ff0000
-#define A6XX_RB_UNKNOWN_88D0_UNK16__SHIFT 16
-static inline uint32_t A6XX_RB_UNKNOWN_88D0_UNK16(uint32_t val)
-{
- return ((val) << A6XX_RB_UNKNOWN_88D0_UNK16__SHIFT) & A6XX_RB_UNKNOWN_88D0_UNK16__MASK;
-}
-
-#define REG_A6XX_RB_BLIT_SCISSOR_TL 0x000088d1
-#define A6XX_RB_BLIT_SCISSOR_TL_X__MASK 0x00003fff
-#define A6XX_RB_BLIT_SCISSOR_TL_X__SHIFT 0
-static inline uint32_t A6XX_RB_BLIT_SCISSOR_TL_X(uint32_t val)
-{
- return ((val) << A6XX_RB_BLIT_SCISSOR_TL_X__SHIFT) & A6XX_RB_BLIT_SCISSOR_TL_X__MASK;
-}
-#define A6XX_RB_BLIT_SCISSOR_TL_Y__MASK 0x3fff0000
-#define A6XX_RB_BLIT_SCISSOR_TL_Y__SHIFT 16
-static inline uint32_t A6XX_RB_BLIT_SCISSOR_TL_Y(uint32_t val)
-{
- return ((val) << A6XX_RB_BLIT_SCISSOR_TL_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_TL_Y__MASK;
-}
-
-#define REG_A6XX_RB_BLIT_SCISSOR_BR 0x000088d2
-#define A6XX_RB_BLIT_SCISSOR_BR_X__MASK 0x00003fff
-#define A6XX_RB_BLIT_SCISSOR_BR_X__SHIFT 0
-static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_X(uint32_t val)
-{
- return ((val) << A6XX_RB_BLIT_SCISSOR_BR_X__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_X__MASK;
-}
-#define A6XX_RB_BLIT_SCISSOR_BR_Y__MASK 0x3fff0000
-#define A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT 16
-static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_Y(uint32_t val)
-{
- return ((val) << A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_Y__MASK;
-}
-
-#define REG_A6XX_RB_BIN_CONTROL2 0x000088d3
-#define A6XX_RB_BIN_CONTROL2_BINW__MASK 0x0000003f
-#define A6XX_RB_BIN_CONTROL2_BINW__SHIFT 0
-static inline uint32_t A6XX_RB_BIN_CONTROL2_BINW(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A6XX_RB_BIN_CONTROL2_BINW__SHIFT) & A6XX_RB_BIN_CONTROL2_BINW__MASK;
-}
-#define A6XX_RB_BIN_CONTROL2_BINH__MASK 0x00007f00
-#define A6XX_RB_BIN_CONTROL2_BINH__SHIFT 8
-static inline uint32_t A6XX_RB_BIN_CONTROL2_BINH(uint32_t val)
-{
- assert(!(val & 0xf));
- return (((val >> 4)) << A6XX_RB_BIN_CONTROL2_BINH__SHIFT) & A6XX_RB_BIN_CONTROL2_BINH__MASK;
-}
-
-#define REG_A6XX_RB_WINDOW_OFFSET2 0x000088d4
-#define A6XX_RB_WINDOW_OFFSET2_X__MASK 0x00003fff
-#define A6XX_RB_WINDOW_OFFSET2_X__SHIFT 0
-static inline uint32_t A6XX_RB_WINDOW_OFFSET2_X(uint32_t val)
-{
- return ((val) << A6XX_RB_WINDOW_OFFSET2_X__SHIFT) & A6XX_RB_WINDOW_OFFSET2_X__MASK;
-}
-#define A6XX_RB_WINDOW_OFFSET2_Y__MASK 0x3fff0000
-#define A6XX_RB_WINDOW_OFFSET2_Y__SHIFT 16
-static inline uint32_t A6XX_RB_WINDOW_OFFSET2_Y(uint32_t val)
-{
- return ((val) << A6XX_RB_WINDOW_OFFSET2_Y__SHIFT) & A6XX_RB_WINDOW_OFFSET2_Y__MASK;
-}
-
-#define REG_A6XX_RB_BLIT_GMEM_MSAA_CNTL 0x000088d5
-#define A6XX_RB_BLIT_GMEM_MSAA_CNTL_SAMPLES__MASK 0x00000018
-#define A6XX_RB_BLIT_GMEM_MSAA_CNTL_SAMPLES__SHIFT 3
-static inline uint32_t A6XX_RB_BLIT_GMEM_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
-{
- return ((val) << A6XX_RB_BLIT_GMEM_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_BLIT_GMEM_MSAA_CNTL_SAMPLES__MASK;
-}
-
-#define REG_A6XX_RB_BLIT_BASE_GMEM 0x000088d6
-
-#define REG_A6XX_RB_BLIT_DST_INFO 0x000088d7
-#define A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK 0x00000003
-#define A6XX_RB_BLIT_DST_INFO_TILE_MODE__SHIFT 0
-static inline uint32_t A6XX_RB_BLIT_DST_INFO_TILE_MODE(enum a6xx_tile_mode val)
-{
- return ((val) << A6XX_RB_BLIT_DST_INFO_TILE_MODE__SHIFT) & A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK;
-}
-#define A6XX_RB_BLIT_DST_INFO_FLAGS 0x00000004
-#define A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK 0x00000018
-#define A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT 3
-static inline uint32_t A6XX_RB_BLIT_DST_INFO_SAMPLES(enum a3xx_msaa_samples val)
-{
- return ((val) << A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT) & A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK;
-}
-#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK 0x00000060
-#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT 5
-static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
-{
- return ((val) << A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK;
-}
-#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK 0x00007f80
-#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT 7
-static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(enum a6xx_format val)
-{
- return ((val) << A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK;
-}
-#define A6XX_RB_BLIT_DST_INFO_UNK15 0x00008000
-
-#define REG_A6XX_RB_BLIT_DST 0x000088d8
-
-#define REG_A6XX_RB_BLIT_DST_PITCH 0x000088da
-#define A6XX_RB_BLIT_DST_PITCH__MASK 0x0000ffff
-#define A6XX_RB_BLIT_DST_PITCH__SHIFT 0
-static inline uint32_t A6XX_RB_BLIT_DST_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A6XX_RB_BLIT_DST_PITCH__SHIFT) & A6XX_RB_BLIT_DST_PITCH__MASK;
-}
-
-#define REG_A6XX_RB_BLIT_DST_ARRAY_PITCH 0x000088db
-#define A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK 0x1fffffff
-#define A6XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT 0
-static inline uint32_t A6XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A6XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT) & A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK;
-}
-
-#define REG_A6XX_RB_BLIT_FLAG_DST 0x000088dc
-
-#define REG_A6XX_RB_BLIT_FLAG_DST_PITCH 0x000088de
-#define A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__MASK 0x000007ff
-#define A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__SHIFT 0
-static inline uint32_t A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__SHIFT) & A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__MASK;
-}
-#define A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__MASK 0x0ffff800
-#define A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__SHIFT 11
-static inline uint32_t A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH(uint32_t val)
-{
- assert(!(val & 0x7f));
- return (((val >> 7)) << A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__MASK;
-}
-
-#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0 0x000088df
-
-#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW1 0x000088e0
-
-#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW2 0x000088e1
-
-#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW3 0x000088e2
-
-#define REG_A6XX_RB_BLIT_INFO 0x000088e3
-#define A6XX_RB_BLIT_INFO_UNK0 0x00000001
-#define A6XX_RB_BLIT_INFO_GMEM 0x00000002
-#define A6XX_RB_BLIT_INFO_SAMPLE_0 0x00000004
-#define A6XX_RB_BLIT_INFO_DEPTH 0x00000008
-#define A6XX_RB_BLIT_INFO_CLEAR_MASK__MASK 0x000000f0
-#define A6XX_RB_BLIT_INFO_CLEAR_MASK__SHIFT 4
-static inline uint32_t A6XX_RB_BLIT_INFO_CLEAR_MASK(uint32_t val)
-{
- return ((val) << A6XX_RB_BLIT_INFO_CLEAR_MASK__SHIFT) & A6XX_RB_BLIT_INFO_CLEAR_MASK__MASK;
-}
-#define A6XX_RB_BLIT_INFO_LAST__MASK 0x00000300
-#define A6XX_RB_BLIT_INFO_LAST__SHIFT 8
-static inline uint32_t A6XX_RB_BLIT_INFO_LAST(uint32_t val)
-{
- return ((val) << A6XX_RB_BLIT_INFO_LAST__SHIFT) & A6XX_RB_BLIT_INFO_LAST__MASK;
-}
-#define A6XX_RB_BLIT_INFO_BUFFER_ID__MASK 0x0000f000
-#define A6XX_RB_BLIT_INFO_BUFFER_ID__SHIFT 12
-static inline uint32_t A6XX_RB_BLIT_INFO_BUFFER_ID(uint32_t val)
-{
- return ((val) << A6XX_RB_BLIT_INFO_BUFFER_ID__SHIFT) & A6XX_RB_BLIT_INFO_BUFFER_ID__MASK;
-}
-
-#define REG_A7XX_RB_UNKNOWN_88E4 0x000088e4
-#define A7XX_RB_UNKNOWN_88E4_UNK0 0x00000001
-
-#define REG_A7XX_RB_CCU_CNTL2 0x000088e5
-#define A7XX_RB_CCU_CNTL2_DEPTH_OFFSET_HI__MASK 0x00000001
-#define A7XX_RB_CCU_CNTL2_DEPTH_OFFSET_HI__SHIFT 0
-static inline uint32_t A7XX_RB_CCU_CNTL2_DEPTH_OFFSET_HI(uint32_t val)
-{
- return ((val) << A7XX_RB_CCU_CNTL2_DEPTH_OFFSET_HI__SHIFT) & A7XX_RB_CCU_CNTL2_DEPTH_OFFSET_HI__MASK;
-}
-#define A7XX_RB_CCU_CNTL2_COLOR_OFFSET_HI__MASK 0x00000004
-#define A7XX_RB_CCU_CNTL2_COLOR_OFFSET_HI__SHIFT 2
-static inline uint32_t A7XX_RB_CCU_CNTL2_COLOR_OFFSET_HI(uint32_t val)
-{
- return ((val) << A7XX_RB_CCU_CNTL2_COLOR_OFFSET_HI__SHIFT) & A7XX_RB_CCU_CNTL2_COLOR_OFFSET_HI__MASK;
-}
-#define A7XX_RB_CCU_CNTL2_DEPTH_CACHE_SIZE__MASK 0x00000c00
-#define A7XX_RB_CCU_CNTL2_DEPTH_CACHE_SIZE__SHIFT 10
-static inline uint32_t A7XX_RB_CCU_CNTL2_DEPTH_CACHE_SIZE(enum a6xx_ccu_cache_size val)
-{
- return ((val) << A7XX_RB_CCU_CNTL2_DEPTH_CACHE_SIZE__SHIFT) & A7XX_RB_CCU_CNTL2_DEPTH_CACHE_SIZE__MASK;
-}
-#define A7XX_RB_CCU_CNTL2_DEPTH_OFFSET__MASK 0x001ff000
-#define A7XX_RB_CCU_CNTL2_DEPTH_OFFSET__SHIFT 12
-static inline uint32_t A7XX_RB_CCU_CNTL2_DEPTH_OFFSET(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A7XX_RB_CCU_CNTL2_DEPTH_OFFSET__SHIFT) & A7XX_RB_CCU_CNTL2_DEPTH_OFFSET__MASK;
-}
-#define A7XX_RB_CCU_CNTL2_COLOR_CACHE_SIZE__MASK 0x00600000
-#define A7XX_RB_CCU_CNTL2_COLOR_CACHE_SIZE__SHIFT 21
-static inline uint32_t A7XX_RB_CCU_CNTL2_COLOR_CACHE_SIZE(enum a6xx_ccu_cache_size val)
-{
- return ((val) << A7XX_RB_CCU_CNTL2_COLOR_CACHE_SIZE__SHIFT) & A7XX_RB_CCU_CNTL2_COLOR_CACHE_SIZE__MASK;
-}
-#define A7XX_RB_CCU_CNTL2_COLOR_OFFSET__MASK 0xff800000
-#define A7XX_RB_CCU_CNTL2_COLOR_OFFSET__SHIFT 23
-static inline uint32_t A7XX_RB_CCU_CNTL2_COLOR_OFFSET(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A7XX_RB_CCU_CNTL2_COLOR_OFFSET__SHIFT) & A7XX_RB_CCU_CNTL2_COLOR_OFFSET__MASK;
-}
-
-#define REG_A6XX_RB_UNKNOWN_88F0 0x000088f0
-
-#define REG_A6XX_RB_UNK_FLAG_BUFFER_BASE 0x000088f1
-
-#define REG_A6XX_RB_UNK_FLAG_BUFFER_PITCH 0x000088f3
-#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__MASK 0x000007ff
-#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__SHIFT 0
-static inline uint32_t A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__MASK;
-}
-#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK 0x00fff800
-#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11
-static inline uint32_t A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
-{
- assert(!(val & 0x7f));
- return (((val >> 7)) << A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
-}
-
-#define REG_A6XX_RB_UNKNOWN_88F4 0x000088f4
-
-#define REG_A7XX_RB_UNKNOWN_88F5 0x000088f5
-
-#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE 0x00008900
-
-#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_PITCH 0x00008902
-#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__MASK 0x0000007f
-#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__SHIFT 0
-static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__MASK;
-}
-#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__MASK 0x00000700
-#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__SHIFT 8
-static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8(uint32_t val)
-{
- return ((val) << A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__MASK;
-}
-#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK 0x0ffff800
-#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11
-static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
-{
- assert(!(val & 0x7f));
- return (((val >> 7)) << A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
-}
-
-#define REG_A6XX_RB_MRT_FLAG_BUFFER(i0) (0x00008903 + 0x3*(i0))
-
-static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR(uint32_t i0) { return 0x00008903 + 0x3*i0; }
-
-static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t i0) { return 0x00008905 + 0x3*i0; }
-#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK 0x000007ff
-#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT 0
-static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK;
-}
-#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK 0x1ffff800
-#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11
-static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
-{
- assert(!(val & 0x7f));
- return (((val >> 7)) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
-}
-
-#define REG_A6XX_RB_SAMPLE_COUNT_ADDR 0x00008927
-
-#define REG_A6XX_RB_UNKNOWN_8A00 0x00008a00
-
-#define REG_A6XX_RB_UNKNOWN_8A10 0x00008a10
-
-#define REG_A6XX_RB_UNKNOWN_8A20 0x00008a20
-
-#define REG_A6XX_RB_UNKNOWN_8A30 0x00008a30
-
-#define REG_A6XX_RB_2D_BLIT_CNTL 0x00008c00
-#define A6XX_RB_2D_BLIT_CNTL_ROTATE__MASK 0x00000007
-#define A6XX_RB_2D_BLIT_CNTL_ROTATE__SHIFT 0
-static inline uint32_t A6XX_RB_2D_BLIT_CNTL_ROTATE(enum a6xx_rotation val)
-{
- return ((val) << A6XX_RB_2D_BLIT_CNTL_ROTATE__SHIFT) & A6XX_RB_2D_BLIT_CNTL_ROTATE__MASK;
-}
-#define A6XX_RB_2D_BLIT_CNTL_OVERWRITEEN 0x00000008
-#define A6XX_RB_2D_BLIT_CNTL_UNK4__MASK 0x00000070
-#define A6XX_RB_2D_BLIT_CNTL_UNK4__SHIFT 4
-static inline uint32_t A6XX_RB_2D_BLIT_CNTL_UNK4(uint32_t val)
-{
- return ((val) << A6XX_RB_2D_BLIT_CNTL_UNK4__SHIFT) & A6XX_RB_2D_BLIT_CNTL_UNK4__MASK;
-}
-#define A6XX_RB_2D_BLIT_CNTL_SOLID_COLOR 0x00000080
-#define A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK 0x0000ff00
-#define A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT 8
-static inline uint32_t A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_format val)
-{
- return ((val) << A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
-}
-#define A6XX_RB_2D_BLIT_CNTL_SCISSOR 0x00010000
-#define A6XX_RB_2D_BLIT_CNTL_UNK17__MASK 0x00060000
-#define A6XX_RB_2D_BLIT_CNTL_UNK17__SHIFT 17
-static inline uint32_t A6XX_RB_2D_BLIT_CNTL_UNK17(uint32_t val)
-{
- return ((val) << A6XX_RB_2D_BLIT_CNTL_UNK17__SHIFT) & A6XX_RB_2D_BLIT_CNTL_UNK17__MASK;
-}
-#define A6XX_RB_2D_BLIT_CNTL_D24S8 0x00080000
-#define A6XX_RB_2D_BLIT_CNTL_MASK__MASK 0x00f00000
-#define A6XX_RB_2D_BLIT_CNTL_MASK__SHIFT 20
-static inline uint32_t A6XX_RB_2D_BLIT_CNTL_MASK(uint32_t val)
-{
- return ((val) << A6XX_RB_2D_BLIT_CNTL_MASK__SHIFT) & A6XX_RB_2D_BLIT_CNTL_MASK__MASK;
-}
-#define A6XX_RB_2D_BLIT_CNTL_IFMT__MASK 0x1f000000
-#define A6XX_RB_2D_BLIT_CNTL_IFMT__SHIFT 24
-static inline uint32_t A6XX_RB_2D_BLIT_CNTL_IFMT(enum a6xx_2d_ifmt val)
-{
- return ((val) << A6XX_RB_2D_BLIT_CNTL_IFMT__SHIFT) & A6XX_RB_2D_BLIT_CNTL_IFMT__MASK;
-}
-#define A6XX_RB_2D_BLIT_CNTL_RASTER_MODE__MASK 0x20000000
-#define A6XX_RB_2D_BLIT_CNTL_RASTER_MODE__SHIFT 29
-static inline uint32_t A6XX_RB_2D_BLIT_CNTL_RASTER_MODE(enum a6xx_raster_mode val)
-{
- return ((val) << A6XX_RB_2D_BLIT_CNTL_RASTER_MODE__SHIFT) & A6XX_RB_2D_BLIT_CNTL_RASTER_MODE__MASK;
-}
-#define A6XX_RB_2D_BLIT_CNTL_UNK30 0x40000000
-
-#define REG_A6XX_RB_2D_UNKNOWN_8C01 0x00008c01
-
-#define REG_A6XX_RB_2D_DST_INFO 0x00008c17
-#define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
-#define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT 0
-static inline uint32_t A6XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a6xx_format val)
-{
- return ((val) << A6XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK;
-}
-#define A6XX_RB_2D_DST_INFO_TILE_MODE__MASK 0x00000300
-#define A6XX_RB_2D_DST_INFO_TILE_MODE__SHIFT 8
-static inline uint32_t A6XX_RB_2D_DST_INFO_TILE_MODE(enum a6xx_tile_mode val)
-{
- return ((val) << A6XX_RB_2D_DST_INFO_TILE_MODE__SHIFT) & A6XX_RB_2D_DST_INFO_TILE_MODE__MASK;
-}
-#define A6XX_RB_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
-#define A6XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT 10
-static inline uint32_t A6XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
-{
- return ((val) << A6XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_2D_DST_INFO_COLOR_SWAP__MASK;
-}
-#define A6XX_RB_2D_DST_INFO_FLAGS 0x00001000
-#define A6XX_RB_2D_DST_INFO_SRGB 0x00002000
-#define A6XX_RB_2D_DST_INFO_SAMPLES__MASK 0x0000c000
-#define A6XX_RB_2D_DST_INFO_SAMPLES__SHIFT 14
-static inline uint32_t A6XX_RB_2D_DST_INFO_SAMPLES(enum a3xx_msaa_samples val)
-{
- return ((val) << A6XX_RB_2D_DST_INFO_SAMPLES__SHIFT) & A6XX_RB_2D_DST_INFO_SAMPLES__MASK;
-}
-#define A6XX_RB_2D_DST_INFO_FILTER 0x00010000
-#define A6XX_RB_2D_DST_INFO_UNK17 0x00020000
-#define A6XX_RB_2D_DST_INFO_SAMPLES_AVERAGE 0x00040000
-#define A6XX_RB_2D_DST_INFO_UNK19 0x00080000
-#define A6XX_RB_2D_DST_INFO_UNK20 0x00100000
-#define A6XX_RB_2D_DST_INFO_UNK21 0x00200000
-#define A6XX_RB_2D_DST_INFO_UNK22 0x00400000
-#define A6XX_RB_2D_DST_INFO_UNK23__MASK 0x07800000
-#define A6XX_RB_2D_DST_INFO_UNK23__SHIFT 23
-static inline uint32_t A6XX_RB_2D_DST_INFO_UNK23(uint32_t val)
-{
- return ((val) << A6XX_RB_2D_DST_INFO_UNK23__SHIFT) & A6XX_RB_2D_DST_INFO_UNK23__MASK;
-}
-#define A6XX_RB_2D_DST_INFO_UNK28 0x10000000
-
-#define REG_A6XX_RB_2D_DST 0x00008c18
-
-#define REG_A6XX_RB_2D_DST_PITCH 0x00008c1a
-#define A6XX_RB_2D_DST_PITCH__MASK 0x0000ffff
-#define A6XX_RB_2D_DST_PITCH__SHIFT 0
-static inline uint32_t A6XX_RB_2D_DST_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A6XX_RB_2D_DST_PITCH__SHIFT) & A6XX_RB_2D_DST_PITCH__MASK;
-}
-
-#define REG_A6XX_RB_2D_DST_PLANE1 0x00008c1b
-
-#define REG_A6XX_RB_2D_DST_PLANE_PITCH 0x00008c1d
-#define A6XX_RB_2D_DST_PLANE_PITCH__MASK 0x0000ffff
-#define A6XX_RB_2D_DST_PLANE_PITCH__SHIFT 0
-static inline uint32_t A6XX_RB_2D_DST_PLANE_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A6XX_RB_2D_DST_PLANE_PITCH__SHIFT) & A6XX_RB_2D_DST_PLANE_PITCH__MASK;
-}
-
-#define REG_A6XX_RB_2D_DST_PLANE2 0x00008c1e
-
-#define REG_A6XX_RB_2D_DST_FLAGS 0x00008c20
-
-#define REG_A6XX_RB_2D_DST_FLAGS_PITCH 0x00008c22
-#define A6XX_RB_2D_DST_FLAGS_PITCH__MASK 0x000000ff
-#define A6XX_RB_2D_DST_FLAGS_PITCH__SHIFT 0
-static inline uint32_t A6XX_RB_2D_DST_FLAGS_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A6XX_RB_2D_DST_FLAGS_PITCH__SHIFT) & A6XX_RB_2D_DST_FLAGS_PITCH__MASK;
-}
-
-#define REG_A6XX_RB_2D_DST_FLAGS_PLANE 0x00008c23
-
-#define REG_A6XX_RB_2D_DST_FLAGS_PLANE_PITCH 0x00008c25
-#define A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__MASK 0x000000ff
-#define A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__SHIFT 0
-static inline uint32_t A6XX_RB_2D_DST_FLAGS_PLANE_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__SHIFT) & A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__MASK;
-}
-
-#define REG_A6XX_RB_2D_SRC_SOLID_C0 0x00008c2c
-
-#define REG_A6XX_RB_2D_SRC_SOLID_C1 0x00008c2d
-
-#define REG_A6XX_RB_2D_SRC_SOLID_C2 0x00008c2e
-
-#define REG_A6XX_RB_2D_SRC_SOLID_C3 0x00008c2f
-
-#define REG_A6XX_RB_UNKNOWN_8E01 0x00008e01
-
-#define REG_A6XX_RB_DBG_ECO_CNTL 0x00008e04
-
-#define REG_A6XX_RB_ADDR_MODE_CNTL 0x00008e05
-
-#define REG_A7XX_RB_UNKNOWN_8E06 0x00008e06
-
-#define REG_A6XX_RB_CCU_CNTL 0x00008e07
-#define A6XX_RB_CCU_CNTL_GMEM_FAST_CLEAR_DISABLE 0x00000001
-#define A6XX_RB_CCU_CNTL_CONCURRENT_RESOLVE 0x00000004
-#define A6XX_RB_CCU_CNTL_DEPTH_OFFSET_HI__MASK 0x00000080
-#define A6XX_RB_CCU_CNTL_DEPTH_OFFSET_HI__SHIFT 7
-static inline uint32_t A6XX_RB_CCU_CNTL_DEPTH_OFFSET_HI(uint32_t val)
-{
- return ((val) << A6XX_RB_CCU_CNTL_DEPTH_OFFSET_HI__SHIFT) & A6XX_RB_CCU_CNTL_DEPTH_OFFSET_HI__MASK;
-}
-#define A6XX_RB_CCU_CNTL_COLOR_OFFSET_HI__MASK 0x00000200
-#define A6XX_RB_CCU_CNTL_COLOR_OFFSET_HI__SHIFT 9
-static inline uint32_t A6XX_RB_CCU_CNTL_COLOR_OFFSET_HI(uint32_t val)
-{
- return ((val) << A6XX_RB_CCU_CNTL_COLOR_OFFSET_HI__SHIFT) & A6XX_RB_CCU_CNTL_COLOR_OFFSET_HI__MASK;
-}
-#define A6XX_RB_CCU_CNTL_DEPTH_CACHE_SIZE__MASK 0x00000c00
-#define A6XX_RB_CCU_CNTL_DEPTH_CACHE_SIZE__SHIFT 10
-static inline uint32_t A6XX_RB_CCU_CNTL_DEPTH_CACHE_SIZE(enum a6xx_ccu_cache_size val)
-{
- return ((val) << A6XX_RB_CCU_CNTL_DEPTH_CACHE_SIZE__SHIFT) & A6XX_RB_CCU_CNTL_DEPTH_CACHE_SIZE__MASK;
-}
-#define A6XX_RB_CCU_CNTL_DEPTH_OFFSET__MASK 0x001ff000
-#define A6XX_RB_CCU_CNTL_DEPTH_OFFSET__SHIFT 12
-static inline uint32_t A6XX_RB_CCU_CNTL_DEPTH_OFFSET(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A6XX_RB_CCU_CNTL_DEPTH_OFFSET__SHIFT) & A6XX_RB_CCU_CNTL_DEPTH_OFFSET__MASK;
-}
-#define A6XX_RB_CCU_CNTL_COLOR_CACHE_SIZE__MASK 0x00600000
-#define A6XX_RB_CCU_CNTL_COLOR_CACHE_SIZE__SHIFT 21
-static inline uint32_t A6XX_RB_CCU_CNTL_COLOR_CACHE_SIZE(enum a6xx_ccu_cache_size val)
-{
- return ((val) << A6XX_RB_CCU_CNTL_COLOR_CACHE_SIZE__SHIFT) & A6XX_RB_CCU_CNTL_COLOR_CACHE_SIZE__MASK;
-}
-#define A6XX_RB_CCU_CNTL_COLOR_OFFSET__MASK 0xff800000
-#define A6XX_RB_CCU_CNTL_COLOR_OFFSET__SHIFT 23
-static inline uint32_t A6XX_RB_CCU_CNTL_COLOR_OFFSET(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A6XX_RB_CCU_CNTL_COLOR_OFFSET__SHIFT) & A6XX_RB_CCU_CNTL_COLOR_OFFSET__MASK;
-}
-
-#define REG_A7XX_RB_CCU_CNTL 0x00008e07
-#define A7XX_RB_CCU_CNTL_GMEM_FAST_CLEAR_DISABLE 0x00000001
-#define A7XX_RB_CCU_CNTL_CONCURRENT_RESOLVE 0x00000004
-
-#define REG_A6XX_RB_NC_MODE_CNTL 0x00008e08
-#define A6XX_RB_NC_MODE_CNTL_MODE 0x00000001
-#define A6XX_RB_NC_MODE_CNTL_LOWER_BIT__MASK 0x00000006
-#define A6XX_RB_NC_MODE_CNTL_LOWER_BIT__SHIFT 1
-static inline uint32_t A6XX_RB_NC_MODE_CNTL_LOWER_BIT(uint32_t val)
-{
- return ((val) << A6XX_RB_NC_MODE_CNTL_LOWER_BIT__SHIFT) & A6XX_RB_NC_MODE_CNTL_LOWER_BIT__MASK;
-}
-#define A6XX_RB_NC_MODE_CNTL_MIN_ACCESS_LENGTH 0x00000008
-#define A6XX_RB_NC_MODE_CNTL_AMSBC 0x00000010
-#define A6XX_RB_NC_MODE_CNTL_UPPER_BIT__MASK 0x00000400
-#define A6XX_RB_NC_MODE_CNTL_UPPER_BIT__SHIFT 10
-static inline uint32_t A6XX_RB_NC_MODE_CNTL_UPPER_BIT(uint32_t val)
-{
- return ((val) << A6XX_RB_NC_MODE_CNTL_UPPER_BIT__SHIFT) & A6XX_RB_NC_MODE_CNTL_UPPER_BIT__MASK;
-}
-#define A6XX_RB_NC_MODE_CNTL_RGB565_PREDICATOR 0x00000800
-#define A6XX_RB_NC_MODE_CNTL_UNK12__MASK 0x00003000
-#define A6XX_RB_NC_MODE_CNTL_UNK12__SHIFT 12
-static inline uint32_t A6XX_RB_NC_MODE_CNTL_UNK12(uint32_t val)
-{
- return ((val) << A6XX_RB_NC_MODE_CNTL_UNK12__SHIFT) & A6XX_RB_NC_MODE_CNTL_UNK12__MASK;
-}
-
-#define REG_A7XX_RB_UNKNOWN_8E09 0x00008e09
-
-#define REG_A6XX_RB_PERFCTR_RB_SEL(i0) (0x00008e10 + 0x1*(i0))
-
-#define REG_A6XX_RB_PERFCTR_CCU_SEL(i0) (0x00008e18 + 0x1*(i0))
-
-#define REG_A6XX_RB_UNKNOWN_8E28 0x00008e28
-
-#define REG_A6XX_RB_PERFCTR_CMP_SEL(i0) (0x00008e2c + 0x1*(i0))
-
-#define REG_A7XX_RB_PERFCTR_UFC_SEL(i0) (0x00008e30 + 0x1*(i0))
-
-#define REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST 0x00008e3b
-
-#define REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD 0x00008e3d
-
-#define REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE 0x00008e50
-
-#define REG_A6XX_RB_UNKNOWN_8E51 0x00008e51
-
-#define REG_A7XX_RB_UNKNOWN_8E79 0x00008e79
-
-#define REG_A6XX_VPC_GS_PARAM 0x00009100
-#define A6XX_VPC_GS_PARAM_LINELENGTHLOC__MASK 0x000000ff
-#define A6XX_VPC_GS_PARAM_LINELENGTHLOC__SHIFT 0
-static inline uint32_t A6XX_VPC_GS_PARAM_LINELENGTHLOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_GS_PARAM_LINELENGTHLOC__SHIFT) & A6XX_VPC_GS_PARAM_LINELENGTHLOC__MASK;
-}
-
-#define REG_A6XX_VPC_VS_CLIP_CNTL 0x00009101
-#define A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff
-#define A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__SHIFT 0
-static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK(uint32_t val)
-{
- return ((val) << A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__MASK;
-}
-#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK 0x0000ff00
-#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT 8
-static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK;
-}
-#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK 0x00ff0000
-#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT 16
-static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK;
-}
-
-#define REG_A6XX_VPC_GS_CLIP_CNTL 0x00009102
-#define A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff
-#define A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__SHIFT 0
-static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK(uint32_t val)
-{
- return ((val) << A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__MASK;
-}
-#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK 0x0000ff00
-#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT 8
-static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK;
-}
-#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK 0x00ff0000
-#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT 16
-static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK;
-}
-
-#define REG_A6XX_VPC_DS_CLIP_CNTL 0x00009103
-#define A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff
-#define A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__SHIFT 0
-static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK(uint32_t val)
-{
- return ((val) << A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__MASK;
-}
-#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK 0x0000ff00
-#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT 8
-static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK;
-}
-#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK 0x00ff0000
-#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT 16
-static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK;
-}
-
-#define REG_A6XX_VPC_VS_CLIP_CNTL_V2 0x00009311
-#define A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_MASK__MASK 0x000000ff
-#define A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_MASK__SHIFT 0
-static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_MASK(uint32_t val)
-{
- return ((val) << A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_MASK__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_MASK__MASK;
-}
-#define A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__MASK 0x0000ff00
-#define A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__SHIFT 8
-static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_03_LOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__MASK;
-}
-#define A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__MASK 0x00ff0000
-#define A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__SHIFT 16
-static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_47_LOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__MASK;
-}
-
-#define REG_A6XX_VPC_GS_CLIP_CNTL_V2 0x00009312
-#define A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_MASK__MASK 0x000000ff
-#define A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_MASK__SHIFT 0
-static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_MASK(uint32_t val)
-{
- return ((val) << A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_MASK__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_MASK__MASK;
-}
-#define A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__MASK 0x0000ff00
-#define A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__SHIFT 8
-static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_03_LOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__MASK;
-}
-#define A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__MASK 0x00ff0000
-#define A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__SHIFT 16
-static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_47_LOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__MASK;
-}
-
-#define REG_A6XX_VPC_DS_CLIP_CNTL_V2 0x00009313
-#define A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_MASK__MASK 0x000000ff
-#define A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_MASK__SHIFT 0
-static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_MASK(uint32_t val)
-{
- return ((val) << A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_MASK__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_MASK__MASK;
-}
-#define A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__MASK 0x0000ff00
-#define A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__SHIFT 8
-static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_03_LOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__MASK;
-}
-#define A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__MASK 0x00ff0000
-#define A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__SHIFT 16
-static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_47_LOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__MASK;
-}
-
-#define REG_A6XX_VPC_VS_LAYER_CNTL 0x00009104
-#define A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__MASK 0x000000ff
-#define A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__SHIFT 0
-static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_LAYERLOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__MASK;
-}
-#define A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__MASK 0x0000ff00
-#define A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__SHIFT 8
-static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_VIEWLOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__MASK;
-}
-#define A6XX_VPC_VS_LAYER_CNTL_SHADINGRATELOC__MASK 0x00ff0000
-#define A6XX_VPC_VS_LAYER_CNTL_SHADINGRATELOC__SHIFT 16
-static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_SHADINGRATELOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_VS_LAYER_CNTL_SHADINGRATELOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_SHADINGRATELOC__MASK;
-}
-
-#define REG_A6XX_VPC_GS_LAYER_CNTL 0x00009105
-#define A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__MASK 0x000000ff
-#define A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__SHIFT 0
-static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_LAYERLOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__MASK;
-}
-#define A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__MASK 0x0000ff00
-#define A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__SHIFT 8
-static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_VIEWLOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__MASK;
-}
-#define A6XX_VPC_GS_LAYER_CNTL_SHADINGRATELOC__MASK 0x00ff0000
-#define A6XX_VPC_GS_LAYER_CNTL_SHADINGRATELOC__SHIFT 16
-static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_SHADINGRATELOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_GS_LAYER_CNTL_SHADINGRATELOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_SHADINGRATELOC__MASK;
-}
-
-#define REG_A6XX_VPC_DS_LAYER_CNTL 0x00009106
-#define A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__MASK 0x000000ff
-#define A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__SHIFT 0
-static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_LAYERLOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__MASK;
-}
-#define A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__MASK 0x0000ff00
-#define A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__SHIFT 8
-static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_VIEWLOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__MASK;
-}
-#define A6XX_VPC_DS_LAYER_CNTL_SHADINGRATELOC__MASK 0x00ff0000
-#define A6XX_VPC_DS_LAYER_CNTL_SHADINGRATELOC__SHIFT 16
-static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_SHADINGRATELOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_DS_LAYER_CNTL_SHADINGRATELOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_SHADINGRATELOC__MASK;
-}
-
-#define REG_A6XX_VPC_VS_LAYER_CNTL_V2 0x00009314
-#define A6XX_VPC_VS_LAYER_CNTL_V2_LAYERLOC__MASK 0x000000ff
-#define A6XX_VPC_VS_LAYER_CNTL_V2_LAYERLOC__SHIFT 0
-static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_V2_LAYERLOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_VS_LAYER_CNTL_V2_LAYERLOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_V2_LAYERLOC__MASK;
-}
-#define A6XX_VPC_VS_LAYER_CNTL_V2_VIEWLOC__MASK 0x0000ff00
-#define A6XX_VPC_VS_LAYER_CNTL_V2_VIEWLOC__SHIFT 8
-static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_V2_VIEWLOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_VS_LAYER_CNTL_V2_VIEWLOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_V2_VIEWLOC__MASK;
-}
-#define A6XX_VPC_VS_LAYER_CNTL_V2_SHADINGRATELOC__MASK 0x00ff0000
-#define A6XX_VPC_VS_LAYER_CNTL_V2_SHADINGRATELOC__SHIFT 16
-static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_V2_SHADINGRATELOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_VS_LAYER_CNTL_V2_SHADINGRATELOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_V2_SHADINGRATELOC__MASK;
-}
-
-#define REG_A6XX_VPC_GS_LAYER_CNTL_V2 0x00009315
-#define A6XX_VPC_GS_LAYER_CNTL_V2_LAYERLOC__MASK 0x000000ff
-#define A6XX_VPC_GS_LAYER_CNTL_V2_LAYERLOC__SHIFT 0
-static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_V2_LAYERLOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_GS_LAYER_CNTL_V2_LAYERLOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_V2_LAYERLOC__MASK;
-}
-#define A6XX_VPC_GS_LAYER_CNTL_V2_VIEWLOC__MASK 0x0000ff00
-#define A6XX_VPC_GS_LAYER_CNTL_V2_VIEWLOC__SHIFT 8
-static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_V2_VIEWLOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_GS_LAYER_CNTL_V2_VIEWLOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_V2_VIEWLOC__MASK;
-}
-#define A6XX_VPC_GS_LAYER_CNTL_V2_SHADINGRATELOC__MASK 0x00ff0000
-#define A6XX_VPC_GS_LAYER_CNTL_V2_SHADINGRATELOC__SHIFT 16
-static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_V2_SHADINGRATELOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_GS_LAYER_CNTL_V2_SHADINGRATELOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_V2_SHADINGRATELOC__MASK;
-}
-
-#define REG_A6XX_VPC_DS_LAYER_CNTL_V2 0x00009316
-#define A6XX_VPC_DS_LAYER_CNTL_V2_LAYERLOC__MASK 0x000000ff
-#define A6XX_VPC_DS_LAYER_CNTL_V2_LAYERLOC__SHIFT 0
-static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_V2_LAYERLOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_DS_LAYER_CNTL_V2_LAYERLOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_V2_LAYERLOC__MASK;
-}
-#define A6XX_VPC_DS_LAYER_CNTL_V2_VIEWLOC__MASK 0x0000ff00
-#define A6XX_VPC_DS_LAYER_CNTL_V2_VIEWLOC__SHIFT 8
-static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_V2_VIEWLOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_DS_LAYER_CNTL_V2_VIEWLOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_V2_VIEWLOC__MASK;
-}
-#define A6XX_VPC_DS_LAYER_CNTL_V2_SHADINGRATELOC__MASK 0x00ff0000
-#define A6XX_VPC_DS_LAYER_CNTL_V2_SHADINGRATELOC__SHIFT 16
-static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_V2_SHADINGRATELOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_DS_LAYER_CNTL_V2_SHADINGRATELOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_V2_SHADINGRATELOC__MASK;
-}
-
-#define REG_A6XX_VPC_UNKNOWN_9107 0x00009107
-#define A6XX_VPC_UNKNOWN_9107_RASTER_DISCARD 0x00000001
-#define A6XX_VPC_UNKNOWN_9107_UNK2 0x00000004
-
-#define REG_A6XX_VPC_POLYGON_MODE 0x00009108
-#define A6XX_VPC_POLYGON_MODE_MODE__MASK 0x00000003
-#define A6XX_VPC_POLYGON_MODE_MODE__SHIFT 0
-static inline uint32_t A6XX_VPC_POLYGON_MODE_MODE(enum a6xx_polygon_mode val)
-{
- return ((val) << A6XX_VPC_POLYGON_MODE_MODE__SHIFT) & A6XX_VPC_POLYGON_MODE_MODE__MASK;
-}
-
-#define REG_A7XX_VPC_PRIMITIVE_CNTL_0 0x00009109
-#define A7XX_VPC_PRIMITIVE_CNTL_0_PRIMITIVE_RESTART 0x00000001
-#define A7XX_VPC_PRIMITIVE_CNTL_0_PROVOKING_VTX_LAST 0x00000002
-#define A7XX_VPC_PRIMITIVE_CNTL_0_D3D_VERTEX_ORDERING 0x00000004
-#define A7XX_VPC_PRIMITIVE_CNTL_0_UNK3 0x00000008
-
-#define REG_A7XX_VPC_PRIMITIVE_CNTL_5 0x0000910a
-#define A7XX_VPC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__MASK 0x000000ff
-#define A7XX_VPC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__SHIFT 0
-static inline uint32_t A7XX_VPC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT(uint32_t val)
-{
- return ((val) << A7XX_VPC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__SHIFT) & A7XX_VPC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__MASK;
-}
-#define A7XX_VPC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__MASK 0x00007c00
-#define A7XX_VPC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__SHIFT 10
-static inline uint32_t A7XX_VPC_PRIMITIVE_CNTL_5_GS_INVOCATIONS(uint32_t val)
-{
- return ((val) << A7XX_VPC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__SHIFT) & A7XX_VPC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__MASK;
-}
-#define A7XX_VPC_PRIMITIVE_CNTL_5_LINELENGTHEN 0x00008000
-#define A7XX_VPC_PRIMITIVE_CNTL_5_GS_OUTPUT__MASK 0x00030000
-#define A7XX_VPC_PRIMITIVE_CNTL_5_GS_OUTPUT__SHIFT 16
-static inline uint32_t A7XX_VPC_PRIMITIVE_CNTL_5_GS_OUTPUT(enum a6xx_tess_output val)
-{
- return ((val) << A7XX_VPC_PRIMITIVE_CNTL_5_GS_OUTPUT__SHIFT) & A7XX_VPC_PRIMITIVE_CNTL_5_GS_OUTPUT__MASK;
-}
-#define A7XX_VPC_PRIMITIVE_CNTL_5_UNK18 0x00040000
-
-#define REG_A7XX_VPC_MULTIVIEW_MASK 0x0000910b
-
-#define REG_A7XX_VPC_MULTIVIEW_CNTL 0x0000910c
-#define A7XX_VPC_MULTIVIEW_CNTL_ENABLE 0x00000001
-#define A7XX_VPC_MULTIVIEW_CNTL_DISABLEMULTIPOS 0x00000002
-#define A7XX_VPC_MULTIVIEW_CNTL_VIEWS__MASK 0x0000007c
-#define A7XX_VPC_MULTIVIEW_CNTL_VIEWS__SHIFT 2
-static inline uint32_t A7XX_VPC_MULTIVIEW_CNTL_VIEWS(uint32_t val)
-{
- return ((val) << A7XX_VPC_MULTIVIEW_CNTL_VIEWS__SHIFT) & A7XX_VPC_MULTIVIEW_CNTL_VIEWS__MASK;
-}
-
-#define REG_A6XX_VPC_VARYING_INTERP(i0) (0x00009200 + 0x1*(i0))
-
-static inline uint32_t REG_A6XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00009200 + 0x1*i0; }
-
-#define REG_A6XX_VPC_VARYING_PS_REPL(i0) (0x00009208 + 0x1*(i0))
-
-static inline uint32_t REG_A6XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00009208 + 0x1*i0; }
-
-#define REG_A6XX_VPC_UNKNOWN_9210 0x00009210
-
-#define REG_A6XX_VPC_UNKNOWN_9211 0x00009211
-
-#define REG_A6XX_VPC_VAR(i0) (0x00009212 + 0x1*(i0))
-
-static inline uint32_t REG_A6XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x00009212 + 0x1*i0; }
-
-#define REG_A6XX_VPC_SO_CNTL 0x00009216
-#define A6XX_VPC_SO_CNTL_ADDR__MASK 0x000000ff
-#define A6XX_VPC_SO_CNTL_ADDR__SHIFT 0
-static inline uint32_t A6XX_VPC_SO_CNTL_ADDR(uint32_t val)
-{
- return ((val) << A6XX_VPC_SO_CNTL_ADDR__SHIFT) & A6XX_VPC_SO_CNTL_ADDR__MASK;
-}
-#define A6XX_VPC_SO_CNTL_RESET 0x00010000
-
-#define REG_A6XX_VPC_SO_PROG 0x00009217
-#define A6XX_VPC_SO_PROG_A_BUF__MASK 0x00000003
-#define A6XX_VPC_SO_PROG_A_BUF__SHIFT 0
-static inline uint32_t A6XX_VPC_SO_PROG_A_BUF(uint32_t val)
-{
- return ((val) << A6XX_VPC_SO_PROG_A_BUF__SHIFT) & A6XX_VPC_SO_PROG_A_BUF__MASK;
-}
-#define A6XX_VPC_SO_PROG_A_OFF__MASK 0x000007fc
-#define A6XX_VPC_SO_PROG_A_OFF__SHIFT 2
-static inline uint32_t A6XX_VPC_SO_PROG_A_OFF(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A6XX_VPC_SO_PROG_A_OFF__SHIFT) & A6XX_VPC_SO_PROG_A_OFF__MASK;
-}
-#define A6XX_VPC_SO_PROG_A_EN 0x00000800
-#define A6XX_VPC_SO_PROG_B_BUF__MASK 0x00003000
-#define A6XX_VPC_SO_PROG_B_BUF__SHIFT 12
-static inline uint32_t A6XX_VPC_SO_PROG_B_BUF(uint32_t val)
-{
- return ((val) << A6XX_VPC_SO_PROG_B_BUF__SHIFT) & A6XX_VPC_SO_PROG_B_BUF__MASK;
-}
-#define A6XX_VPC_SO_PROG_B_OFF__MASK 0x007fc000
-#define A6XX_VPC_SO_PROG_B_OFF__SHIFT 14
-static inline uint32_t A6XX_VPC_SO_PROG_B_OFF(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A6XX_VPC_SO_PROG_B_OFF__SHIFT) & A6XX_VPC_SO_PROG_B_OFF__MASK;
-}
-#define A6XX_VPC_SO_PROG_B_EN 0x00800000
-
-#define REG_A6XX_VPC_SO_STREAM_COUNTS 0x00009218
-
-#define REG_A6XX_VPC_SO(i0) (0x0000921a + 0x7*(i0))
-
-static inline uint32_t REG_A6XX_VPC_SO_BUFFER_BASE(uint32_t i0) { return 0x0000921a + 0x7*i0; }
-
-static inline uint32_t REG_A6XX_VPC_SO_BUFFER_SIZE(uint32_t i0) { return 0x0000921c + 0x7*i0; }
-
-static inline uint32_t REG_A6XX_VPC_SO_BUFFER_STRIDE(uint32_t i0) { return 0x0000921d + 0x7*i0; }
-
-static inline uint32_t REG_A6XX_VPC_SO_BUFFER_OFFSET(uint32_t i0) { return 0x0000921e + 0x7*i0; }
-
-static inline uint32_t REG_A6XX_VPC_SO_FLUSH_BASE(uint32_t i0) { return 0x0000921f + 0x7*i0; }
-
-#define REG_A6XX_VPC_POINT_COORD_INVERT 0x00009236
-#define A6XX_VPC_POINT_COORD_INVERT_INVERT 0x00000001
-
-#define REG_A6XX_VPC_UNKNOWN_9300 0x00009300
-
-#define REG_A6XX_VPC_VS_PACK 0x00009301
-#define A6XX_VPC_VS_PACK_STRIDE_IN_VPC__MASK 0x000000ff
-#define A6XX_VPC_VS_PACK_STRIDE_IN_VPC__SHIFT 0
-static inline uint32_t A6XX_VPC_VS_PACK_STRIDE_IN_VPC(uint32_t val)
-{
- return ((val) << A6XX_VPC_VS_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_VS_PACK_STRIDE_IN_VPC__MASK;
-}
-#define A6XX_VPC_VS_PACK_POSITIONLOC__MASK 0x0000ff00
-#define A6XX_VPC_VS_PACK_POSITIONLOC__SHIFT 8
-static inline uint32_t A6XX_VPC_VS_PACK_POSITIONLOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_VS_PACK_POSITIONLOC__SHIFT) & A6XX_VPC_VS_PACK_POSITIONLOC__MASK;
-}
-#define A6XX_VPC_VS_PACK_PSIZELOC__MASK 0x00ff0000
-#define A6XX_VPC_VS_PACK_PSIZELOC__SHIFT 16
-static inline uint32_t A6XX_VPC_VS_PACK_PSIZELOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_VS_PACK_PSIZELOC__SHIFT) & A6XX_VPC_VS_PACK_PSIZELOC__MASK;
-}
-#define A6XX_VPC_VS_PACK_EXTRAPOS__MASK 0x0f000000
-#define A6XX_VPC_VS_PACK_EXTRAPOS__SHIFT 24
-static inline uint32_t A6XX_VPC_VS_PACK_EXTRAPOS(uint32_t val)
-{
- return ((val) << A6XX_VPC_VS_PACK_EXTRAPOS__SHIFT) & A6XX_VPC_VS_PACK_EXTRAPOS__MASK;
-}
-
-#define REG_A6XX_VPC_GS_PACK 0x00009302
-#define A6XX_VPC_GS_PACK_STRIDE_IN_VPC__MASK 0x000000ff
-#define A6XX_VPC_GS_PACK_STRIDE_IN_VPC__SHIFT 0
-static inline uint32_t A6XX_VPC_GS_PACK_STRIDE_IN_VPC(uint32_t val)
-{
- return ((val) << A6XX_VPC_GS_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_GS_PACK_STRIDE_IN_VPC__MASK;
-}
-#define A6XX_VPC_GS_PACK_POSITIONLOC__MASK 0x0000ff00
-#define A6XX_VPC_GS_PACK_POSITIONLOC__SHIFT 8
-static inline uint32_t A6XX_VPC_GS_PACK_POSITIONLOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_GS_PACK_POSITIONLOC__SHIFT) & A6XX_VPC_GS_PACK_POSITIONLOC__MASK;
-}
-#define A6XX_VPC_GS_PACK_PSIZELOC__MASK 0x00ff0000
-#define A6XX_VPC_GS_PACK_PSIZELOC__SHIFT 16
-static inline uint32_t A6XX_VPC_GS_PACK_PSIZELOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_GS_PACK_PSIZELOC__SHIFT) & A6XX_VPC_GS_PACK_PSIZELOC__MASK;
-}
-#define A6XX_VPC_GS_PACK_EXTRAPOS__MASK 0x0f000000
-#define A6XX_VPC_GS_PACK_EXTRAPOS__SHIFT 24
-static inline uint32_t A6XX_VPC_GS_PACK_EXTRAPOS(uint32_t val)
-{
- return ((val) << A6XX_VPC_GS_PACK_EXTRAPOS__SHIFT) & A6XX_VPC_GS_PACK_EXTRAPOS__MASK;
-}
-
-#define REG_A6XX_VPC_DS_PACK 0x00009303
-#define A6XX_VPC_DS_PACK_STRIDE_IN_VPC__MASK 0x000000ff
-#define A6XX_VPC_DS_PACK_STRIDE_IN_VPC__SHIFT 0
-static inline uint32_t A6XX_VPC_DS_PACK_STRIDE_IN_VPC(uint32_t val)
-{
- return ((val) << A6XX_VPC_DS_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_DS_PACK_STRIDE_IN_VPC__MASK;
-}
-#define A6XX_VPC_DS_PACK_POSITIONLOC__MASK 0x0000ff00
-#define A6XX_VPC_DS_PACK_POSITIONLOC__SHIFT 8
-static inline uint32_t A6XX_VPC_DS_PACK_POSITIONLOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_DS_PACK_POSITIONLOC__SHIFT) & A6XX_VPC_DS_PACK_POSITIONLOC__MASK;
-}
-#define A6XX_VPC_DS_PACK_PSIZELOC__MASK 0x00ff0000
-#define A6XX_VPC_DS_PACK_PSIZELOC__SHIFT 16
-static inline uint32_t A6XX_VPC_DS_PACK_PSIZELOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_DS_PACK_PSIZELOC__SHIFT) & A6XX_VPC_DS_PACK_PSIZELOC__MASK;
-}
-#define A6XX_VPC_DS_PACK_EXTRAPOS__MASK 0x0f000000
-#define A6XX_VPC_DS_PACK_EXTRAPOS__SHIFT 24
-static inline uint32_t A6XX_VPC_DS_PACK_EXTRAPOS(uint32_t val)
-{
- return ((val) << A6XX_VPC_DS_PACK_EXTRAPOS__SHIFT) & A6XX_VPC_DS_PACK_EXTRAPOS__MASK;
-}
-
-#define REG_A6XX_VPC_CNTL_0 0x00009304
-#define A6XX_VPC_CNTL_0_NUMNONPOSVAR__MASK 0x000000ff
-#define A6XX_VPC_CNTL_0_NUMNONPOSVAR__SHIFT 0
-static inline uint32_t A6XX_VPC_CNTL_0_NUMNONPOSVAR(uint32_t val)
-{
- return ((val) << A6XX_VPC_CNTL_0_NUMNONPOSVAR__SHIFT) & A6XX_VPC_CNTL_0_NUMNONPOSVAR__MASK;
-}
-#define A6XX_VPC_CNTL_0_PRIMIDLOC__MASK 0x0000ff00
-#define A6XX_VPC_CNTL_0_PRIMIDLOC__SHIFT 8
-static inline uint32_t A6XX_VPC_CNTL_0_PRIMIDLOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_CNTL_0_PRIMIDLOC__SHIFT) & A6XX_VPC_CNTL_0_PRIMIDLOC__MASK;
-}
-#define A6XX_VPC_CNTL_0_VARYING 0x00010000
-#define A6XX_VPC_CNTL_0_VIEWIDLOC__MASK 0xff000000
-#define A6XX_VPC_CNTL_0_VIEWIDLOC__SHIFT 24
-static inline uint32_t A6XX_VPC_CNTL_0_VIEWIDLOC(uint32_t val)
-{
- return ((val) << A6XX_VPC_CNTL_0_VIEWIDLOC__SHIFT) & A6XX_VPC_CNTL_0_VIEWIDLOC__MASK;
-}
-
-#define REG_A6XX_VPC_SO_STREAM_CNTL 0x00009305
-#define A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM__MASK 0x00000007
-#define A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM__SHIFT 0
-static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM(uint32_t val)
-{
- return ((val) << A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM__MASK;
-}
-#define A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM__MASK 0x00000038
-#define A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM__SHIFT 3
-static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM(uint32_t val)
-{
- return ((val) << A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM__MASK;
-}
-#define A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM__MASK 0x000001c0
-#define A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM__SHIFT 6
-static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM(uint32_t val)
-{
- return ((val) << A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM__MASK;
-}
-#define A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM__MASK 0x00000e00
-#define A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM__SHIFT 9
-static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM(uint32_t val)
-{
- return ((val) << A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM__MASK;
-}
-#define A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE__MASK 0x00078000
-#define A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE__SHIFT 15
-static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE(uint32_t val)
-{
- return ((val) << A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE__MASK;
-}
-
-#define REG_A6XX_VPC_SO_DISABLE 0x00009306
-#define A6XX_VPC_SO_DISABLE_DISABLE 0x00000001
-
-#define REG_A7XX_VPC_POLYGON_MODE2 0x00009307
-#define A7XX_VPC_POLYGON_MODE2_MODE__MASK 0x00000003
-#define A7XX_VPC_POLYGON_MODE2_MODE__SHIFT 0
-static inline uint32_t A7XX_VPC_POLYGON_MODE2_MODE(enum a6xx_polygon_mode val)
-{
- return ((val) << A7XX_VPC_POLYGON_MODE2_MODE__SHIFT) & A7XX_VPC_POLYGON_MODE2_MODE__MASK;
-}
-
-#define REG_A7XX_VPC_ATTR_BUF_SIZE_GMEM 0x00009308
-#define A7XX_VPC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM__MASK 0xffffffff
-#define A7XX_VPC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM__SHIFT 0
-static inline uint32_t A7XX_VPC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM(uint32_t val)
-{
- return ((val) << A7XX_VPC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM__SHIFT) & A7XX_VPC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM__MASK;
-}
-
-#define REG_A7XX_VPC_ATTR_BUF_BASE_GMEM 0x00009309
-#define A7XX_VPC_ATTR_BUF_BASE_GMEM_BASE_GMEM__MASK 0xffffffff
-#define A7XX_VPC_ATTR_BUF_BASE_GMEM_BASE_GMEM__SHIFT 0
-static inline uint32_t A7XX_VPC_ATTR_BUF_BASE_GMEM_BASE_GMEM(uint32_t val)
-{
- return ((val) << A7XX_VPC_ATTR_BUF_BASE_GMEM_BASE_GMEM__SHIFT) & A7XX_VPC_ATTR_BUF_BASE_GMEM_BASE_GMEM__MASK;
-}
-
-#define REG_A7XX_PC_ATTR_BUF_SIZE_GMEM 0x00009b09
-#define A7XX_PC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM__MASK 0xffffffff
-#define A7XX_PC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM__SHIFT 0
-static inline uint32_t A7XX_PC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM(uint32_t val)
-{
- return ((val) << A7XX_PC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM__SHIFT) & A7XX_PC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM__MASK;
-}
-
-#define REG_A6XX_VPC_DBG_ECO_CNTL 0x00009600
-
-#define REG_A6XX_VPC_ADDR_MODE_CNTL 0x00009601
-
-#define REG_A6XX_VPC_UNKNOWN_9602 0x00009602
-
-#define REG_A6XX_VPC_UNKNOWN_9603 0x00009603
-
-#define REG_A6XX_VPC_PERFCTR_VPC_SEL(i0) (0x00009604 + 0x1*(i0))
-
-#define REG_A7XX_VPC_PERFCTR_VPC_SEL(i0) (0x0000960b + 0x1*(i0))
-
-#define REG_A6XX_PC_TESS_NUM_VERTEX 0x00009800
-
-#define REG_A6XX_PC_HS_INPUT_SIZE 0x00009801
-#define A6XX_PC_HS_INPUT_SIZE_SIZE__MASK 0x000007ff
-#define A6XX_PC_HS_INPUT_SIZE_SIZE__SHIFT 0
-static inline uint32_t A6XX_PC_HS_INPUT_SIZE_SIZE(uint32_t val)
-{
- return ((val) << A6XX_PC_HS_INPUT_SIZE_SIZE__SHIFT) & A6XX_PC_HS_INPUT_SIZE_SIZE__MASK;
-}
-#define A6XX_PC_HS_INPUT_SIZE_UNK13 0x00002000
-
-#define REG_A6XX_PC_TESS_CNTL 0x00009802
-#define A6XX_PC_TESS_CNTL_SPACING__MASK 0x00000003
-#define A6XX_PC_TESS_CNTL_SPACING__SHIFT 0
-static inline uint32_t A6XX_PC_TESS_CNTL_SPACING(enum a6xx_tess_spacing val)
-{
- return ((val) << A6XX_PC_TESS_CNTL_SPACING__SHIFT) & A6XX_PC_TESS_CNTL_SPACING__MASK;
-}
-#define A6XX_PC_TESS_CNTL_OUTPUT__MASK 0x0000000c
-#define A6XX_PC_TESS_CNTL_OUTPUT__SHIFT 2
-static inline uint32_t A6XX_PC_TESS_CNTL_OUTPUT(enum a6xx_tess_output val)
-{
- return ((val) << A6XX_PC_TESS_CNTL_OUTPUT__SHIFT) & A6XX_PC_TESS_CNTL_OUTPUT__MASK;
-}
-
-#define REG_A6XX_PC_RESTART_INDEX 0x00009803
-
-#define REG_A6XX_PC_MODE_CNTL 0x00009804
-
-#define REG_A6XX_PC_POWER_CNTL 0x00009805
-
-#define REG_A6XX_PC_PS_CNTL 0x00009806
-#define A6XX_PC_PS_CNTL_PRIMITIVEIDEN 0x00000001
-
-#define REG_A6XX_PC_SO_STREAM_CNTL 0x00009808
-#define A6XX_PC_SO_STREAM_CNTL_STREAM_ENABLE__MASK 0x00078000
-#define A6XX_PC_SO_STREAM_CNTL_STREAM_ENABLE__SHIFT 15
-static inline uint32_t A6XX_PC_SO_STREAM_CNTL_STREAM_ENABLE(uint32_t val)
-{
- return ((val) << A6XX_PC_SO_STREAM_CNTL_STREAM_ENABLE__SHIFT) & A6XX_PC_SO_STREAM_CNTL_STREAM_ENABLE__MASK;
-}
-
-#define REG_A6XX_PC_DGEN_SU_CONSERVATIVE_RAS_CNTL 0x0000980a
-#define A6XX_PC_DGEN_SU_CONSERVATIVE_RAS_CNTL_CONSERVATIVERASEN 0x00000001
-
-#define REG_A6XX_PC_DRAW_CMD 0x00009840
-#define A6XX_PC_DRAW_CMD_STATE_ID__MASK 0x000000ff
-#define A6XX_PC_DRAW_CMD_STATE_ID__SHIFT 0
-static inline uint32_t A6XX_PC_DRAW_CMD_STATE_ID(uint32_t val)
-{
- return ((val) << A6XX_PC_DRAW_CMD_STATE_ID__SHIFT) & A6XX_PC_DRAW_CMD_STATE_ID__MASK;
-}
-
-#define REG_A6XX_PC_DISPATCH_CMD 0x00009841
-#define A6XX_PC_DISPATCH_CMD_STATE_ID__MASK 0x000000ff
-#define A6XX_PC_DISPATCH_CMD_STATE_ID__SHIFT 0
-static inline uint32_t A6XX_PC_DISPATCH_CMD_STATE_ID(uint32_t val)
-{
- return ((val) << A6XX_PC_DISPATCH_CMD_STATE_ID__SHIFT) & A6XX_PC_DISPATCH_CMD_STATE_ID__MASK;
-}
-
-#define REG_A6XX_PC_EVENT_CMD 0x00009842
-#define A6XX_PC_EVENT_CMD_STATE_ID__MASK 0x00ff0000
-#define A6XX_PC_EVENT_CMD_STATE_ID__SHIFT 16
-static inline uint32_t A6XX_PC_EVENT_CMD_STATE_ID(uint32_t val)
-{
- return ((val) << A6XX_PC_EVENT_CMD_STATE_ID__SHIFT) & A6XX_PC_EVENT_CMD_STATE_ID__MASK;
-}
-#define A6XX_PC_EVENT_CMD_EVENT__MASK 0x0000007f
-#define A6XX_PC_EVENT_CMD_EVENT__SHIFT 0
-static inline uint32_t A6XX_PC_EVENT_CMD_EVENT(enum vgt_event_type val)
-{
- return ((val) << A6XX_PC_EVENT_CMD_EVENT__SHIFT) & A6XX_PC_EVENT_CMD_EVENT__MASK;
-}
-
-#define REG_A6XX_PC_MARKER 0x00009880
-
-#define REG_A6XX_PC_POLYGON_MODE 0x00009981
-#define A6XX_PC_POLYGON_MODE_MODE__MASK 0x00000003
-#define A6XX_PC_POLYGON_MODE_MODE__SHIFT 0
-static inline uint32_t A6XX_PC_POLYGON_MODE_MODE(enum a6xx_polygon_mode val)
-{
- return ((val) << A6XX_PC_POLYGON_MODE_MODE__SHIFT) & A6XX_PC_POLYGON_MODE_MODE__MASK;
-}
-
-#define REG_A7XX_PC_POLYGON_MODE 0x00009809
-#define A7XX_PC_POLYGON_MODE_MODE__MASK 0x00000003
-#define A7XX_PC_POLYGON_MODE_MODE__SHIFT 0
-static inline uint32_t A7XX_PC_POLYGON_MODE_MODE(enum a6xx_polygon_mode val)
-{
- return ((val) << A7XX_PC_POLYGON_MODE_MODE__SHIFT) & A7XX_PC_POLYGON_MODE_MODE__MASK;
-}
-
-#define REG_A6XX_PC_RASTER_CNTL 0x00009980
-#define A6XX_PC_RASTER_CNTL_STREAM__MASK 0x00000003
-#define A6XX_PC_RASTER_CNTL_STREAM__SHIFT 0
-static inline uint32_t A6XX_PC_RASTER_CNTL_STREAM(uint32_t val)
-{
- return ((val) << A6XX_PC_RASTER_CNTL_STREAM__SHIFT) & A6XX_PC_RASTER_CNTL_STREAM__MASK;
-}
-#define A6XX_PC_RASTER_CNTL_DISCARD 0x00000004
-
-#define REG_A7XX_PC_RASTER_CNTL 0x00009107
-#define A7XX_PC_RASTER_CNTL_STREAM__MASK 0x00000003
-#define A7XX_PC_RASTER_CNTL_STREAM__SHIFT 0
-static inline uint32_t A7XX_PC_RASTER_CNTL_STREAM(uint32_t val)
-{
- return ((val) << A7XX_PC_RASTER_CNTL_STREAM__SHIFT) & A7XX_PC_RASTER_CNTL_STREAM__MASK;
-}
-#define A7XX_PC_RASTER_CNTL_DISCARD 0x00000004
-
-#define REG_A7XX_PC_RASTER_CNTL_V2 0x00009317
-#define A7XX_PC_RASTER_CNTL_V2_STREAM__MASK 0x00000003
-#define A7XX_PC_RASTER_CNTL_V2_STREAM__SHIFT 0
-static inline uint32_t A7XX_PC_RASTER_CNTL_V2_STREAM(uint32_t val)
-{
- return ((val) << A7XX_PC_RASTER_CNTL_V2_STREAM__SHIFT) & A7XX_PC_RASTER_CNTL_V2_STREAM__MASK;
-}
-#define A7XX_PC_RASTER_CNTL_V2_DISCARD 0x00000004
-
-#define REG_A6XX_PC_PRIMITIVE_CNTL_0 0x00009b00
-#define A6XX_PC_PRIMITIVE_CNTL_0_PRIMITIVE_RESTART 0x00000001
-#define A6XX_PC_PRIMITIVE_CNTL_0_PROVOKING_VTX_LAST 0x00000002
-#define A6XX_PC_PRIMITIVE_CNTL_0_D3D_VERTEX_ORDERING 0x00000004
-#define A6XX_PC_PRIMITIVE_CNTL_0_UNK3 0x00000008
-
-#define REG_A6XX_PC_VS_OUT_CNTL 0x00009b01
-#define A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff
-#define A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__SHIFT 0
-static inline uint32_t A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val)
-{
- return ((val) << A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__MASK;
-}
-#define A6XX_PC_VS_OUT_CNTL_PSIZE 0x00000100
-#define A6XX_PC_VS_OUT_CNTL_LAYER 0x00000200
-#define A6XX_PC_VS_OUT_CNTL_VIEW 0x00000400
-#define A6XX_PC_VS_OUT_CNTL_PRIMITIVE_ID 0x00000800
-#define A6XX_PC_VS_OUT_CNTL_CLIP_MASK__MASK 0x00ff0000
-#define A6XX_PC_VS_OUT_CNTL_CLIP_MASK__SHIFT 16
-static inline uint32_t A6XX_PC_VS_OUT_CNTL_CLIP_MASK(uint32_t val)
-{
- return ((val) << A6XX_PC_VS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_VS_OUT_CNTL_CLIP_MASK__MASK;
-}
-#define A6XX_PC_VS_OUT_CNTL_SHADINGRATE 0x01000000
-
-#define REG_A6XX_PC_GS_OUT_CNTL 0x00009b02
-#define A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff
-#define A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__SHIFT 0
-static inline uint32_t A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val)
-{
- return ((val) << A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__MASK;
-}
-#define A6XX_PC_GS_OUT_CNTL_PSIZE 0x00000100
-#define A6XX_PC_GS_OUT_CNTL_LAYER 0x00000200
-#define A6XX_PC_GS_OUT_CNTL_VIEW 0x00000400
-#define A6XX_PC_GS_OUT_CNTL_PRIMITIVE_ID 0x00000800
-#define A6XX_PC_GS_OUT_CNTL_CLIP_MASK__MASK 0x00ff0000
-#define A6XX_PC_GS_OUT_CNTL_CLIP_MASK__SHIFT 16
-static inline uint32_t A6XX_PC_GS_OUT_CNTL_CLIP_MASK(uint32_t val)
-{
- return ((val) << A6XX_PC_GS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_GS_OUT_CNTL_CLIP_MASK__MASK;
-}
-#define A6XX_PC_GS_OUT_CNTL_SHADINGRATE 0x01000000
-
-#define REG_A6XX_PC_HS_OUT_CNTL 0x00009b03
-#define A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff
-#define A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC__SHIFT 0
-static inline uint32_t A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val)
-{
- return ((val) << A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC__MASK;
-}
-#define A6XX_PC_HS_OUT_CNTL_PSIZE 0x00000100
-#define A6XX_PC_HS_OUT_CNTL_LAYER 0x00000200
-#define A6XX_PC_HS_OUT_CNTL_VIEW 0x00000400
-#define A6XX_PC_HS_OUT_CNTL_PRIMITIVE_ID 0x00000800
-#define A6XX_PC_HS_OUT_CNTL_CLIP_MASK__MASK 0x00ff0000
-#define A6XX_PC_HS_OUT_CNTL_CLIP_MASK__SHIFT 16
-static inline uint32_t A6XX_PC_HS_OUT_CNTL_CLIP_MASK(uint32_t val)
-{
- return ((val) << A6XX_PC_HS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_HS_OUT_CNTL_CLIP_MASK__MASK;
-}
-#define A6XX_PC_HS_OUT_CNTL_SHADINGRATE 0x01000000
-
-#define REG_A6XX_PC_DS_OUT_CNTL 0x00009b04
-#define A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff
-#define A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__SHIFT 0
-static inline uint32_t A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val)
-{
- return ((val) << A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__MASK;
-}
-#define A6XX_PC_DS_OUT_CNTL_PSIZE 0x00000100
-#define A6XX_PC_DS_OUT_CNTL_LAYER 0x00000200
-#define A6XX_PC_DS_OUT_CNTL_VIEW 0x00000400
-#define A6XX_PC_DS_OUT_CNTL_PRIMITIVE_ID 0x00000800
-#define A6XX_PC_DS_OUT_CNTL_CLIP_MASK__MASK 0x00ff0000
-#define A6XX_PC_DS_OUT_CNTL_CLIP_MASK__SHIFT 16
-static inline uint32_t A6XX_PC_DS_OUT_CNTL_CLIP_MASK(uint32_t val)
-{
- return ((val) << A6XX_PC_DS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_DS_OUT_CNTL_CLIP_MASK__MASK;
-}
-#define A6XX_PC_DS_OUT_CNTL_SHADINGRATE 0x01000000
-
-#define REG_A6XX_PC_PRIMITIVE_CNTL_5 0x00009b05
-#define A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__MASK 0x000000ff
-#define A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__SHIFT 0
-static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT(uint32_t val)
-{
- return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__MASK;
-}
-#define A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__MASK 0x00007c00
-#define A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__SHIFT 10
-static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS(uint32_t val)
-{
- return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__MASK;
-}
-#define A6XX_PC_PRIMITIVE_CNTL_5_LINELENGTHEN 0x00008000
-#define A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__MASK 0x00030000
-#define A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__SHIFT 16
-static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT(enum a6xx_tess_output val)
-{
- return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__MASK;
-}
-#define A6XX_PC_PRIMITIVE_CNTL_5_UNK18 0x00040000
-
-#define REG_A6XX_PC_PRIMITIVE_CNTL_6 0x00009b06
-#define A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__MASK 0x000007ff
-#define A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__SHIFT 0
-static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC(uint32_t val)
-{
- return ((val) << A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__MASK;
-}
-
-#define REG_A6XX_PC_MULTIVIEW_CNTL 0x00009b07
-#define A6XX_PC_MULTIVIEW_CNTL_ENABLE 0x00000001
-#define A6XX_PC_MULTIVIEW_CNTL_DISABLEMULTIPOS 0x00000002
-#define A6XX_PC_MULTIVIEW_CNTL_VIEWS__MASK 0x0000007c
-#define A6XX_PC_MULTIVIEW_CNTL_VIEWS__SHIFT 2
-static inline uint32_t A6XX_PC_MULTIVIEW_CNTL_VIEWS(uint32_t val)
-{
- return ((val) << A6XX_PC_MULTIVIEW_CNTL_VIEWS__SHIFT) & A6XX_PC_MULTIVIEW_CNTL_VIEWS__MASK;
-}
-
-#define REG_A6XX_PC_MULTIVIEW_MASK 0x00009b08
-
-#define REG_A6XX_PC_2D_EVENT_CMD 0x00009c00
-#define A6XX_PC_2D_EVENT_CMD_EVENT__MASK 0x0000007f
-#define A6XX_PC_2D_EVENT_CMD_EVENT__SHIFT 0
-static inline uint32_t A6XX_PC_2D_EVENT_CMD_EVENT(enum vgt_event_type val)
-{
- return ((val) << A6XX_PC_2D_EVENT_CMD_EVENT__SHIFT) & A6XX_PC_2D_EVENT_CMD_EVENT__MASK;
-}
-#define A6XX_PC_2D_EVENT_CMD_STATE_ID__MASK 0x0000ff00
-#define A6XX_PC_2D_EVENT_CMD_STATE_ID__SHIFT 8
-static inline uint32_t A6XX_PC_2D_EVENT_CMD_STATE_ID(uint32_t val)
-{
- return ((val) << A6XX_PC_2D_EVENT_CMD_STATE_ID__SHIFT) & A6XX_PC_2D_EVENT_CMD_STATE_ID__MASK;
-}
-
-#define REG_A6XX_PC_DBG_ECO_CNTL 0x00009e00
-
-#define REG_A6XX_PC_ADDR_MODE_CNTL 0x00009e01
-
-#define REG_A6XX_PC_DRAW_INDX_BASE 0x00009e04
-
-#define REG_A6XX_PC_DRAW_FIRST_INDX 0x00009e06
-
-#define REG_A6XX_PC_DRAW_MAX_INDICES 0x00009e07
-
-#define REG_A6XX_PC_TESSFACTOR_ADDR 0x00009e08
-
-#define REG_A7XX_PC_TESSFACTOR_ADDR 0x00009810
-
-#define REG_A6XX_PC_DRAW_INITIATOR 0x00009e0b
-#define A6XX_PC_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f
-#define A6XX_PC_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0
-static inline uint32_t A6XX_PC_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val)
-{
- return ((val) << A6XX_PC_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A6XX_PC_DRAW_INITIATOR_PRIM_TYPE__MASK;
-}
-#define A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0
-#define A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6
-static inline uint32_t A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val)
-{
- return ((val) << A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT__MASK;
-}
-#define A6XX_PC_DRAW_INITIATOR_VIS_CULL__MASK 0x00000300
-#define A6XX_PC_DRAW_INITIATOR_VIS_CULL__SHIFT 8
-static inline uint32_t A6XX_PC_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val)
-{
- return ((val) << A6XX_PC_DRAW_INITIATOR_VIS_CULL__SHIFT) & A6XX_PC_DRAW_INITIATOR_VIS_CULL__MASK;
-}
-#define A6XX_PC_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000c00
-#define A6XX_PC_DRAW_INITIATOR_INDEX_SIZE__SHIFT 10
-static inline uint32_t A6XX_PC_DRAW_INITIATOR_INDEX_SIZE(enum a4xx_index_size val)
-{
- return ((val) << A6XX_PC_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A6XX_PC_DRAW_INITIATOR_INDEX_SIZE__MASK;
-}
-#define A6XX_PC_DRAW_INITIATOR_PATCH_TYPE__MASK 0x00003000
-#define A6XX_PC_DRAW_INITIATOR_PATCH_TYPE__SHIFT 12
-static inline uint32_t A6XX_PC_DRAW_INITIATOR_PATCH_TYPE(enum a6xx_patch_type val)
-{
- return ((val) << A6XX_PC_DRAW_INITIATOR_PATCH_TYPE__SHIFT) & A6XX_PC_DRAW_INITIATOR_PATCH_TYPE__MASK;
-}
-#define A6XX_PC_DRAW_INITIATOR_GS_ENABLE 0x00010000
-#define A6XX_PC_DRAW_INITIATOR_TESS_ENABLE 0x00020000
-
-#define REG_A6XX_PC_DRAW_NUM_INSTANCES 0x00009e0c
-
-#define REG_A6XX_PC_DRAW_NUM_INDICES 0x00009e0d
-
-#define REG_A6XX_PC_VSTREAM_CONTROL 0x00009e11
-#define A6XX_PC_VSTREAM_CONTROL_UNK0__MASK 0x0000ffff
-#define A6XX_PC_VSTREAM_CONTROL_UNK0__SHIFT 0
-static inline uint32_t A6XX_PC_VSTREAM_CONTROL_UNK0(uint32_t val)
-{
- return ((val) << A6XX_PC_VSTREAM_CONTROL_UNK0__SHIFT) & A6XX_PC_VSTREAM_CONTROL_UNK0__MASK;
-}
-#define A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__MASK 0x003f0000
-#define A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__SHIFT 16
-static inline uint32_t A6XX_PC_VSTREAM_CONTROL_VSC_SIZE(uint32_t val)
-{
- return ((val) << A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__SHIFT) & A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__MASK;
-}
-#define A6XX_PC_VSTREAM_CONTROL_VSC_N__MASK 0x07c00000
-#define A6XX_PC_VSTREAM_CONTROL_VSC_N__SHIFT 22
-static inline uint32_t A6XX_PC_VSTREAM_CONTROL_VSC_N(uint32_t val)
-{
- return ((val) << A6XX_PC_VSTREAM_CONTROL_VSC_N__SHIFT) & A6XX_PC_VSTREAM_CONTROL_VSC_N__MASK;
-}
-
-#define REG_A6XX_PC_BIN_PRIM_STRM 0x00009e12
-
-#define REG_A6XX_PC_BIN_DRAW_STRM 0x00009e14
-
-#define REG_A6XX_PC_VISIBILITY_OVERRIDE 0x00009e1c
-#define A6XX_PC_VISIBILITY_OVERRIDE_OVERRIDE 0x00000001
-
-#define REG_A7XX_PC_UNKNOWN_9E24 0x00009e24
-
-#define REG_A6XX_PC_PERFCTR_PC_SEL(i0) (0x00009e34 + 0x1*(i0))
-
-#define REG_A7XX_PC_PERFCTR_PC_SEL(i0) (0x00009e42 + 0x1*(i0))
-
-#define REG_A6XX_PC_UNKNOWN_9E72 0x00009e72
-
-#define REG_A6XX_VFD_CONTROL_0 0x0000a000
-#define A6XX_VFD_CONTROL_0_FETCH_CNT__MASK 0x0000003f
-#define A6XX_VFD_CONTROL_0_FETCH_CNT__SHIFT 0
-static inline uint32_t A6XX_VFD_CONTROL_0_FETCH_CNT(uint32_t val)
-{
- return ((val) << A6XX_VFD_CONTROL_0_FETCH_CNT__SHIFT) & A6XX_VFD_CONTROL_0_FETCH_CNT__MASK;
-}
-#define A6XX_VFD_CONTROL_0_DECODE_CNT__MASK 0x00003f00
-#define A6XX_VFD_CONTROL_0_DECODE_CNT__SHIFT 8
-static inline uint32_t A6XX_VFD_CONTROL_0_DECODE_CNT(uint32_t val)
-{
- return ((val) << A6XX_VFD_CONTROL_0_DECODE_CNT__SHIFT) & A6XX_VFD_CONTROL_0_DECODE_CNT__MASK;
-}
-
-#define REG_A6XX_VFD_CONTROL_1 0x0000a001
-#define A6XX_VFD_CONTROL_1_REGID4VTX__MASK 0x000000ff
-#define A6XX_VFD_CONTROL_1_REGID4VTX__SHIFT 0
-static inline uint32_t A6XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
-{
- return ((val) << A6XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A6XX_VFD_CONTROL_1_REGID4VTX__MASK;
-}
-#define A6XX_VFD_CONTROL_1_REGID4INST__MASK 0x0000ff00
-#define A6XX_VFD_CONTROL_1_REGID4INST__SHIFT 8
-static inline uint32_t A6XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
-{
- return ((val) << A6XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A6XX_VFD_CONTROL_1_REGID4INST__MASK;
-}
-#define A6XX_VFD_CONTROL_1_REGID4PRIMID__MASK 0x00ff0000
-#define A6XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT 16
-static inline uint32_t A6XX_VFD_CONTROL_1_REGID4PRIMID(uint32_t val)
-{
- return ((val) << A6XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT) & A6XX_VFD_CONTROL_1_REGID4PRIMID__MASK;
-}
-#define A6XX_VFD_CONTROL_1_REGID4VIEWID__MASK 0xff000000
-#define A6XX_VFD_CONTROL_1_REGID4VIEWID__SHIFT 24
-static inline uint32_t A6XX_VFD_CONTROL_1_REGID4VIEWID(uint32_t val)
-{
- return ((val) << A6XX_VFD_CONTROL_1_REGID4VIEWID__SHIFT) & A6XX_VFD_CONTROL_1_REGID4VIEWID__MASK;
-}
-
-#define REG_A6XX_VFD_CONTROL_2 0x0000a002
-#define A6XX_VFD_CONTROL_2_REGID_HSRELPATCHID__MASK 0x000000ff
-#define A6XX_VFD_CONTROL_2_REGID_HSRELPATCHID__SHIFT 0
-static inline uint32_t A6XX_VFD_CONTROL_2_REGID_HSRELPATCHID(uint32_t val)
-{
- return ((val) << A6XX_VFD_CONTROL_2_REGID_HSRELPATCHID__SHIFT) & A6XX_VFD_CONTROL_2_REGID_HSRELPATCHID__MASK;
-}
-#define A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__MASK 0x0000ff00
-#define A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__SHIFT 8
-static inline uint32_t A6XX_VFD_CONTROL_2_REGID_INVOCATIONID(uint32_t val)
-{
- return ((val) << A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__SHIFT) & A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__MASK;
-}
-
-#define REG_A6XX_VFD_CONTROL_3 0x0000a003
-#define A6XX_VFD_CONTROL_3_REGID_DSPRIMID__MASK 0x000000ff
-#define A6XX_VFD_CONTROL_3_REGID_DSPRIMID__SHIFT 0
-static inline uint32_t A6XX_VFD_CONTROL_3_REGID_DSPRIMID(uint32_t val)
-{
- return ((val) << A6XX_VFD_CONTROL_3_REGID_DSPRIMID__SHIFT) & A6XX_VFD_CONTROL_3_REGID_DSPRIMID__MASK;
-}
-#define A6XX_VFD_CONTROL_3_REGID_DSRELPATCHID__MASK 0x0000ff00
-#define A6XX_VFD_CONTROL_3_REGID_DSRELPATCHID__SHIFT 8
-static inline uint32_t A6XX_VFD_CONTROL_3_REGID_DSRELPATCHID(uint32_t val)
-{
- return ((val) << A6XX_VFD_CONTROL_3_REGID_DSRELPATCHID__SHIFT) & A6XX_VFD_CONTROL_3_REGID_DSRELPATCHID__MASK;
-}
-#define A6XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000
-#define A6XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16
-static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val)
-{
- return ((val) << A6XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A6XX_VFD_CONTROL_3_REGID_TESSX__MASK;
-}
-#define A6XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000
-#define A6XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24
-static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
-{
- return ((val) << A6XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A6XX_VFD_CONTROL_3_REGID_TESSY__MASK;
-}
-
-#define REG_A6XX_VFD_CONTROL_4 0x0000a004
-#define A6XX_VFD_CONTROL_4_UNK0__MASK 0x000000ff
-#define A6XX_VFD_CONTROL_4_UNK0__SHIFT 0
-static inline uint32_t A6XX_VFD_CONTROL_4_UNK0(uint32_t val)
-{
- return ((val) << A6XX_VFD_CONTROL_4_UNK0__SHIFT) & A6XX_VFD_CONTROL_4_UNK0__MASK;
-}
-
-#define REG_A6XX_VFD_CONTROL_5 0x0000a005
-#define A6XX_VFD_CONTROL_5_REGID_GSHEADER__MASK 0x000000ff
-#define A6XX_VFD_CONTROL_5_REGID_GSHEADER__SHIFT 0
-static inline uint32_t A6XX_VFD_CONTROL_5_REGID_GSHEADER(uint32_t val)
-{
- return ((val) << A6XX_VFD_CONTROL_5_REGID_GSHEADER__SHIFT) & A6XX_VFD_CONTROL_5_REGID_GSHEADER__MASK;
-}
-#define A6XX_VFD_CONTROL_5_UNK8__MASK 0x0000ff00
-#define A6XX_VFD_CONTROL_5_UNK8__SHIFT 8
-static inline uint32_t A6XX_VFD_CONTROL_5_UNK8(uint32_t val)
-{
- return ((val) << A6XX_VFD_CONTROL_5_UNK8__SHIFT) & A6XX_VFD_CONTROL_5_UNK8__MASK;
-}
-
-#define REG_A6XX_VFD_CONTROL_6 0x0000a006
-#define A6XX_VFD_CONTROL_6_PRIMID4PSEN 0x00000001
-
-#define REG_A6XX_VFD_MODE_CNTL 0x0000a007
-#define A6XX_VFD_MODE_CNTL_RENDER_MODE__MASK 0x00000007
-#define A6XX_VFD_MODE_CNTL_RENDER_MODE__SHIFT 0
-static inline uint32_t A6XX_VFD_MODE_CNTL_RENDER_MODE(enum a6xx_render_mode val)
-{
- return ((val) << A6XX_VFD_MODE_CNTL_RENDER_MODE__SHIFT) & A6XX_VFD_MODE_CNTL_RENDER_MODE__MASK;
-}
-
-#define REG_A6XX_VFD_MULTIVIEW_CNTL 0x0000a008
-#define A6XX_VFD_MULTIVIEW_CNTL_ENABLE 0x00000001
-#define A6XX_VFD_MULTIVIEW_CNTL_DISABLEMULTIPOS 0x00000002
-#define A6XX_VFD_MULTIVIEW_CNTL_VIEWS__MASK 0x0000007c
-#define A6XX_VFD_MULTIVIEW_CNTL_VIEWS__SHIFT 2
-static inline uint32_t A6XX_VFD_MULTIVIEW_CNTL_VIEWS(uint32_t val)
-{
- return ((val) << A6XX_VFD_MULTIVIEW_CNTL_VIEWS__SHIFT) & A6XX_VFD_MULTIVIEW_CNTL_VIEWS__MASK;
-}
-
-#define REG_A6XX_VFD_ADD_OFFSET 0x0000a009
-#define A6XX_VFD_ADD_OFFSET_VERTEX 0x00000001
-#define A6XX_VFD_ADD_OFFSET_INSTANCE 0x00000002
-
-#define REG_A6XX_VFD_INDEX_OFFSET 0x0000a00e
-
-#define REG_A6XX_VFD_INSTANCE_START_OFFSET 0x0000a00f
-
-#define REG_A6XX_VFD_FETCH(i0) (0x0000a010 + 0x4*(i0))
-
-static inline uint32_t REG_A6XX_VFD_FETCH_BASE(uint32_t i0) { return 0x0000a010 + 0x4*i0; }
-
-static inline uint32_t REG_A6XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000a012 + 0x4*i0; }
-
-static inline uint32_t REG_A6XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000a013 + 0x4*i0; }
-
-#define REG_A6XX_VFD_DECODE(i0) (0x0000a090 + 0x2*(i0))
-
-static inline uint32_t REG_A6XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000a090 + 0x2*i0; }
-#define A6XX_VFD_DECODE_INSTR_IDX__MASK 0x0000001f
-#define A6XX_VFD_DECODE_INSTR_IDX__SHIFT 0
-static inline uint32_t A6XX_VFD_DECODE_INSTR_IDX(uint32_t val)
-{
- return ((val) << A6XX_VFD_DECODE_INSTR_IDX__SHIFT) & A6XX_VFD_DECODE_INSTR_IDX__MASK;
-}
-#define A6XX_VFD_DECODE_INSTR_OFFSET__MASK 0x0001ffe0
-#define A6XX_VFD_DECODE_INSTR_OFFSET__SHIFT 5
-static inline uint32_t A6XX_VFD_DECODE_INSTR_OFFSET(uint32_t val)
-{
- return ((val) << A6XX_VFD_DECODE_INSTR_OFFSET__SHIFT) & A6XX_VFD_DECODE_INSTR_OFFSET__MASK;
-}
-#define A6XX_VFD_DECODE_INSTR_INSTANCED 0x00020000
-#define A6XX_VFD_DECODE_INSTR_FORMAT__MASK 0x0ff00000
-#define A6XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20
-static inline uint32_t A6XX_VFD_DECODE_INSTR_FORMAT(enum a6xx_format val)
-{
- return ((val) << A6XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A6XX_VFD_DECODE_INSTR_FORMAT__MASK;
-}
-#define A6XX_VFD_DECODE_INSTR_SWAP__MASK 0x30000000
-#define A6XX_VFD_DECODE_INSTR_SWAP__SHIFT 28
-static inline uint32_t A6XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
-{
- return ((val) << A6XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A6XX_VFD_DECODE_INSTR_SWAP__MASK;
-}
-#define A6XX_VFD_DECODE_INSTR_UNK30 0x40000000
-#define A6XX_VFD_DECODE_INSTR_FLOAT 0x80000000
-
-static inline uint32_t REG_A6XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000a091 + 0x2*i0; }
-
-#define REG_A6XX_VFD_DEST_CNTL(i0) (0x0000a0d0 + 0x1*(i0))
-
-static inline uint32_t REG_A6XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000a0d0 + 0x1*i0; }
-#define A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK 0x0000000f
-#define A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT 0
-static inline uint32_t A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK(uint32_t val)
-{
- return ((val) << A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT) & A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK;
-}
-#define A6XX_VFD_DEST_CNTL_INSTR_REGID__MASK 0x00000ff0
-#define A6XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT 4
-static inline uint32_t A6XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val)
-{
- return ((val) << A6XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT) & A6XX_VFD_DEST_CNTL_INSTR_REGID__MASK;
-}
-
-#define REG_A6XX_VFD_POWER_CNTL 0x0000a0f8
-
-#define REG_A7XX_VFD_UNKNOWN_A600 0x0000a600
-
-#define REG_A6XX_VFD_ADDR_MODE_CNTL 0x0000a601
-
-#define REG_A6XX_VFD_PERFCTR_VFD_SEL(i0) (0x0000a610 + 0x1*(i0))
-
-#define REG_A7XX_VFD_PERFCTR_VFD_SEL(i0) (0x0000a610 + 0x1*(i0))
-
-#define REG_A6XX_SP_VS_CTRL_REG0 0x0000a800
-#define A6XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001
-#define A6XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0
-static inline uint32_t A6XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
-{
- return ((val) << A6XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_VS_CTRL_REG0_THREADMODE__MASK;
-}
-#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
-#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
-static inline uint32_t A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
-}
-#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
-#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
-static inline uint32_t A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
-}
-#define A6XX_SP_VS_CTRL_REG0_UNK13 0x00002000
-#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
-#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT 14
-static inline uint32_t A6XX_SP_VS_CTRL_REG0_BRANCHSTACK(uint32_t val)
-{
- return ((val) << A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK;
-}
-#define A6XX_SP_VS_CTRL_REG0_MERGEDREGS 0x00100000
-#define A6XX_SP_VS_CTRL_REG0_EARLYPREAMBLE 0x00200000
-
-#define REG_A6XX_SP_VS_BRANCH_COND 0x0000a801
-
-#define REG_A6XX_SP_VS_PRIMITIVE_CNTL 0x0000a802
-#define A6XX_SP_VS_PRIMITIVE_CNTL_OUT__MASK 0x0000003f
-#define A6XX_SP_VS_PRIMITIVE_CNTL_OUT__SHIFT 0
-static inline uint32_t A6XX_SP_VS_PRIMITIVE_CNTL_OUT(uint32_t val)
-{
- return ((val) << A6XX_SP_VS_PRIMITIVE_CNTL_OUT__SHIFT) & A6XX_SP_VS_PRIMITIVE_CNTL_OUT__MASK;
-}
-#define A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__MASK 0x00003fc0
-#define A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT 6
-static inline uint32_t A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID(uint32_t val)
-{
- return ((val) << A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT) & A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__MASK;
-}
-
-#define REG_A6XX_SP_VS_OUT(i0) (0x0000a803 + 0x1*(i0))
-
-static inline uint32_t REG_A6XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000a803 + 0x1*i0; }
-#define A6XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
-#define A6XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
-static inline uint32_t A6XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
-{
- return ((val) << A6XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A6XX_SP_VS_OUT_REG_A_REGID__MASK;
-}
-#define A6XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00000f00
-#define A6XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 8
-static inline uint32_t A6XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
-{
- return ((val) << A6XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A6XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
-}
-#define A6XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000
-#define A6XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
-static inline uint32_t A6XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
-{
- return ((val) << A6XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A6XX_SP_VS_OUT_REG_B_REGID__MASK;
-}
-#define A6XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x0f000000
-#define A6XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 24
-static inline uint32_t A6XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
-{
- return ((val) << A6XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
-}
-
-#define REG_A6XX_SP_VS_VPC_DST(i0) (0x0000a813 + 0x1*(i0))
-
-static inline uint32_t REG_A6XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000a813 + 0x1*i0; }
-#define A6XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
-#define A6XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
-static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
-{
- return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
-}
-#define A6XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
-#define A6XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
-static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
-{
- return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
-}
-#define A6XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
-#define A6XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
-static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
-{
- return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
-}
-#define A6XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
-#define A6XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
-static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
-{
- return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
-}
-
-#define REG_A6XX_SP_VS_OBJ_FIRST_EXEC_OFFSET 0x0000a81b
-
-#define REG_A6XX_SP_VS_OBJ_START 0x0000a81c
-
-#define REG_A6XX_SP_VS_PVT_MEM_PARAM 0x0000a81e
-#define A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
-#define A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
-static inline uint32_t A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
-{
- assert(!(val & 0x1ff));
- return (((val >> 9)) << A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
-}
-#define A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
-#define A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
-static inline uint32_t A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
-{
- return ((val) << A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
-}
-
-#define REG_A6XX_SP_VS_PVT_MEM_ADDR 0x0000a81f
-
-#define REG_A6XX_SP_VS_PVT_MEM_SIZE 0x0000a821
-#define A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
-#define A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
-static inline uint32_t A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
-}
-#define A6XX_SP_VS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
-
-#define REG_A6XX_SP_VS_TEX_COUNT 0x0000a822
-
-#define REG_A6XX_SP_VS_CONFIG 0x0000a823
-#define A6XX_SP_VS_CONFIG_BINDLESS_TEX 0x00000001
-#define A6XX_SP_VS_CONFIG_BINDLESS_SAMP 0x00000002
-#define A6XX_SP_VS_CONFIG_BINDLESS_IBO 0x00000004
-#define A6XX_SP_VS_CONFIG_BINDLESS_UBO 0x00000008
-#define A6XX_SP_VS_CONFIG_ENABLED 0x00000100
-#define A6XX_SP_VS_CONFIG_NTEX__MASK 0x0001fe00
-#define A6XX_SP_VS_CONFIG_NTEX__SHIFT 9
-static inline uint32_t A6XX_SP_VS_CONFIG_NTEX(uint32_t val)
-{
- return ((val) << A6XX_SP_VS_CONFIG_NTEX__SHIFT) & A6XX_SP_VS_CONFIG_NTEX__MASK;
-}
-#define A6XX_SP_VS_CONFIG_NSAMP__MASK 0x003e0000
-#define A6XX_SP_VS_CONFIG_NSAMP__SHIFT 17
-static inline uint32_t A6XX_SP_VS_CONFIG_NSAMP(uint32_t val)
-{
- return ((val) << A6XX_SP_VS_CONFIG_NSAMP__SHIFT) & A6XX_SP_VS_CONFIG_NSAMP__MASK;
-}
-#define A6XX_SP_VS_CONFIG_NIBO__MASK 0x1fc00000
-#define A6XX_SP_VS_CONFIG_NIBO__SHIFT 22
-static inline uint32_t A6XX_SP_VS_CONFIG_NIBO(uint32_t val)
-{
- return ((val) << A6XX_SP_VS_CONFIG_NIBO__SHIFT) & A6XX_SP_VS_CONFIG_NIBO__MASK;
-}
-
-#define REG_A6XX_SP_VS_INSTRLEN 0x0000a824
-
-#define REG_A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET 0x0000a825
-#define A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff
-#define A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0
-static inline uint32_t A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val)
-{
- assert(!(val & 0x7ff));
- return (((val >> 11)) << A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
-}
-
-#define REG_A7XX_SP_VS_VGPR_CONFIG 0x0000a82d
-
-#define REG_A6XX_SP_HS_CTRL_REG0 0x0000a830
-#define A6XX_SP_HS_CTRL_REG0_THREADMODE__MASK 0x00000001
-#define A6XX_SP_HS_CTRL_REG0_THREADMODE__SHIFT 0
-static inline uint32_t A6XX_SP_HS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
-{
- return ((val) << A6XX_SP_HS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_HS_CTRL_REG0_THREADMODE__MASK;
-}
-#define A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
-#define A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
-static inline uint32_t A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
-}
-#define A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
-#define A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
-static inline uint32_t A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
-}
-#define A6XX_SP_HS_CTRL_REG0_UNK13 0x00002000
-#define A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
-#define A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT 14
-static inline uint32_t A6XX_SP_HS_CTRL_REG0_BRANCHSTACK(uint32_t val)
-{
- return ((val) << A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK;
-}
-#define A6XX_SP_HS_CTRL_REG0_EARLYPREAMBLE 0x00100000
-
-#define REG_A6XX_SP_HS_WAVE_INPUT_SIZE 0x0000a831
-
-#define REG_A6XX_SP_HS_BRANCH_COND 0x0000a832
-
-#define REG_A6XX_SP_HS_OBJ_FIRST_EXEC_OFFSET 0x0000a833
-
-#define REG_A6XX_SP_HS_OBJ_START 0x0000a834
-
-#define REG_A6XX_SP_HS_PVT_MEM_PARAM 0x0000a836
-#define A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
-#define A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
-static inline uint32_t A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
-{
- assert(!(val & 0x1ff));
- return (((val >> 9)) << A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
-}
-#define A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
-#define A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
-static inline uint32_t A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
-{
- return ((val) << A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
-}
-
-#define REG_A6XX_SP_HS_PVT_MEM_ADDR 0x0000a837
-
-#define REG_A6XX_SP_HS_PVT_MEM_SIZE 0x0000a839
-#define A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
-#define A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
-static inline uint32_t A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
-}
-#define A6XX_SP_HS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
-
-#define REG_A6XX_SP_HS_TEX_COUNT 0x0000a83a
-
-#define REG_A6XX_SP_HS_CONFIG 0x0000a83b
-#define A6XX_SP_HS_CONFIG_BINDLESS_TEX 0x00000001
-#define A6XX_SP_HS_CONFIG_BINDLESS_SAMP 0x00000002
-#define A6XX_SP_HS_CONFIG_BINDLESS_IBO 0x00000004
-#define A6XX_SP_HS_CONFIG_BINDLESS_UBO 0x00000008
-#define A6XX_SP_HS_CONFIG_ENABLED 0x00000100
-#define A6XX_SP_HS_CONFIG_NTEX__MASK 0x0001fe00
-#define A6XX_SP_HS_CONFIG_NTEX__SHIFT 9
-static inline uint32_t A6XX_SP_HS_CONFIG_NTEX(uint32_t val)
-{
- return ((val) << A6XX_SP_HS_CONFIG_NTEX__SHIFT) & A6XX_SP_HS_CONFIG_NTEX__MASK;
-}
-#define A6XX_SP_HS_CONFIG_NSAMP__MASK 0x003e0000
-#define A6XX_SP_HS_CONFIG_NSAMP__SHIFT 17
-static inline uint32_t A6XX_SP_HS_CONFIG_NSAMP(uint32_t val)
-{
- return ((val) << A6XX_SP_HS_CONFIG_NSAMP__SHIFT) & A6XX_SP_HS_CONFIG_NSAMP__MASK;
-}
-#define A6XX_SP_HS_CONFIG_NIBO__MASK 0x1fc00000
-#define A6XX_SP_HS_CONFIG_NIBO__SHIFT 22
-static inline uint32_t A6XX_SP_HS_CONFIG_NIBO(uint32_t val)
-{
- return ((val) << A6XX_SP_HS_CONFIG_NIBO__SHIFT) & A6XX_SP_HS_CONFIG_NIBO__MASK;
-}
-
-#define REG_A6XX_SP_HS_INSTRLEN 0x0000a83c
-
-#define REG_A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET 0x0000a83d
-#define A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff
-#define A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0
-static inline uint32_t A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val)
-{
- assert(!(val & 0x7ff));
- return (((val >> 11)) << A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
-}
-
-#define REG_A7XX_SP_HS_VGPR_CONFIG 0x0000a82f
-
-#define REG_A6XX_SP_DS_CTRL_REG0 0x0000a840
-#define A6XX_SP_DS_CTRL_REG0_THREADMODE__MASK 0x00000001
-#define A6XX_SP_DS_CTRL_REG0_THREADMODE__SHIFT 0
-static inline uint32_t A6XX_SP_DS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
-{
- return ((val) << A6XX_SP_DS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_DS_CTRL_REG0_THREADMODE__MASK;
-}
-#define A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
-#define A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
-static inline uint32_t A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
-}
-#define A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
-#define A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
-static inline uint32_t A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
-}
-#define A6XX_SP_DS_CTRL_REG0_UNK13 0x00002000
-#define A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
-#define A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT 14
-static inline uint32_t A6XX_SP_DS_CTRL_REG0_BRANCHSTACK(uint32_t val)
-{
- return ((val) << A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK;
-}
-#define A6XX_SP_DS_CTRL_REG0_EARLYPREAMBLE 0x00100000
-
-#define REG_A6XX_SP_DS_BRANCH_COND 0x0000a841
-
-#define REG_A6XX_SP_DS_PRIMITIVE_CNTL 0x0000a842
-#define A6XX_SP_DS_PRIMITIVE_CNTL_OUT__MASK 0x0000003f
-#define A6XX_SP_DS_PRIMITIVE_CNTL_OUT__SHIFT 0
-static inline uint32_t A6XX_SP_DS_PRIMITIVE_CNTL_OUT(uint32_t val)
-{
- return ((val) << A6XX_SP_DS_PRIMITIVE_CNTL_OUT__SHIFT) & A6XX_SP_DS_PRIMITIVE_CNTL_OUT__MASK;
-}
-#define A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__MASK 0x00003fc0
-#define A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT 6
-static inline uint32_t A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID(uint32_t val)
-{
- return ((val) << A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT) & A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__MASK;
-}
-
-#define REG_A6XX_SP_DS_OUT(i0) (0x0000a843 + 0x1*(i0))
-
-static inline uint32_t REG_A6XX_SP_DS_OUT_REG(uint32_t i0) { return 0x0000a843 + 0x1*i0; }
-#define A6XX_SP_DS_OUT_REG_A_REGID__MASK 0x000000ff
-#define A6XX_SP_DS_OUT_REG_A_REGID__SHIFT 0
-static inline uint32_t A6XX_SP_DS_OUT_REG_A_REGID(uint32_t val)
-{
- return ((val) << A6XX_SP_DS_OUT_REG_A_REGID__SHIFT) & A6XX_SP_DS_OUT_REG_A_REGID__MASK;
-}
-#define A6XX_SP_DS_OUT_REG_A_COMPMASK__MASK 0x00000f00
-#define A6XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT 8
-static inline uint32_t A6XX_SP_DS_OUT_REG_A_COMPMASK(uint32_t val)
-{
- return ((val) << A6XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT) & A6XX_SP_DS_OUT_REG_A_COMPMASK__MASK;
-}
-#define A6XX_SP_DS_OUT_REG_B_REGID__MASK 0x00ff0000
-#define A6XX_SP_DS_OUT_REG_B_REGID__SHIFT 16
-static inline uint32_t A6XX_SP_DS_OUT_REG_B_REGID(uint32_t val)
-{
- return ((val) << A6XX_SP_DS_OUT_REG_B_REGID__SHIFT) & A6XX_SP_DS_OUT_REG_B_REGID__MASK;
-}
-#define A6XX_SP_DS_OUT_REG_B_COMPMASK__MASK 0x0f000000
-#define A6XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT 24
-static inline uint32_t A6XX_SP_DS_OUT_REG_B_COMPMASK(uint32_t val)
-{
- return ((val) << A6XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_DS_OUT_REG_B_COMPMASK__MASK;
-}
-
-#define REG_A6XX_SP_DS_VPC_DST(i0) (0x0000a853 + 0x1*(i0))
-
-static inline uint32_t REG_A6XX_SP_DS_VPC_DST_REG(uint32_t i0) { return 0x0000a853 + 0x1*i0; }
-#define A6XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
-#define A6XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT 0
-static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC0(uint32_t val)
-{
- return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK;
-}
-#define A6XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
-#define A6XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT 8
-static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC1(uint32_t val)
-{
- return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK;
-}
-#define A6XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
-#define A6XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT 16
-static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC2(uint32_t val)
-{
- return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK;
-}
-#define A6XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
-#define A6XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT 24
-static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC3(uint32_t val)
-{
- return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK;
-}
-
-#define REG_A6XX_SP_DS_OBJ_FIRST_EXEC_OFFSET 0x0000a85b
-
-#define REG_A6XX_SP_DS_OBJ_START 0x0000a85c
-
-#define REG_A6XX_SP_DS_PVT_MEM_PARAM 0x0000a85e
-#define A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
-#define A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
-static inline uint32_t A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
-{
- assert(!(val & 0x1ff));
- return (((val >> 9)) << A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
-}
-#define A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
-#define A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
-static inline uint32_t A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
-{
- return ((val) << A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
-}
-
-#define REG_A6XX_SP_DS_PVT_MEM_ADDR 0x0000a85f
-
-#define REG_A6XX_SP_DS_PVT_MEM_SIZE 0x0000a861
-#define A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
-#define A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
-static inline uint32_t A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
-}
-#define A6XX_SP_DS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
-
-#define REG_A6XX_SP_DS_TEX_COUNT 0x0000a862
-
-#define REG_A6XX_SP_DS_CONFIG 0x0000a863
-#define A6XX_SP_DS_CONFIG_BINDLESS_TEX 0x00000001
-#define A6XX_SP_DS_CONFIG_BINDLESS_SAMP 0x00000002
-#define A6XX_SP_DS_CONFIG_BINDLESS_IBO 0x00000004
-#define A6XX_SP_DS_CONFIG_BINDLESS_UBO 0x00000008
-#define A6XX_SP_DS_CONFIG_ENABLED 0x00000100
-#define A6XX_SP_DS_CONFIG_NTEX__MASK 0x0001fe00
-#define A6XX_SP_DS_CONFIG_NTEX__SHIFT 9
-static inline uint32_t A6XX_SP_DS_CONFIG_NTEX(uint32_t val)
-{
- return ((val) << A6XX_SP_DS_CONFIG_NTEX__SHIFT) & A6XX_SP_DS_CONFIG_NTEX__MASK;
-}
-#define A6XX_SP_DS_CONFIG_NSAMP__MASK 0x003e0000
-#define A6XX_SP_DS_CONFIG_NSAMP__SHIFT 17
-static inline uint32_t A6XX_SP_DS_CONFIG_NSAMP(uint32_t val)
-{
- return ((val) << A6XX_SP_DS_CONFIG_NSAMP__SHIFT) & A6XX_SP_DS_CONFIG_NSAMP__MASK;
-}
-#define A6XX_SP_DS_CONFIG_NIBO__MASK 0x1fc00000
-#define A6XX_SP_DS_CONFIG_NIBO__SHIFT 22
-static inline uint32_t A6XX_SP_DS_CONFIG_NIBO(uint32_t val)
-{
- return ((val) << A6XX_SP_DS_CONFIG_NIBO__SHIFT) & A6XX_SP_DS_CONFIG_NIBO__MASK;
-}
-
-#define REG_A6XX_SP_DS_INSTRLEN 0x0000a864
-
-#define REG_A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET 0x0000a865
-#define A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff
-#define A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0
-static inline uint32_t A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val)
-{
- assert(!(val & 0x7ff));
- return (((val >> 11)) << A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
-}
-
-#define REG_A7XX_SP_DS_VGPR_CONFIG 0x0000a868
-
-#define REG_A6XX_SP_GS_CTRL_REG0 0x0000a870
-#define A6XX_SP_GS_CTRL_REG0_THREADMODE__MASK 0x00000001
-#define A6XX_SP_GS_CTRL_REG0_THREADMODE__SHIFT 0
-static inline uint32_t A6XX_SP_GS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
-{
- return ((val) << A6XX_SP_GS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_GS_CTRL_REG0_THREADMODE__MASK;
-}
-#define A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
-#define A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
-static inline uint32_t A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
-}
-#define A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
-#define A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
-static inline uint32_t A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
-}
-#define A6XX_SP_GS_CTRL_REG0_UNK13 0x00002000
-#define A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
-#define A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT 14
-static inline uint32_t A6XX_SP_GS_CTRL_REG0_BRANCHSTACK(uint32_t val)
-{
- return ((val) << A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK;
-}
-#define A6XX_SP_GS_CTRL_REG0_EARLYPREAMBLE 0x00100000
-
-#define REG_A6XX_SP_GS_PRIM_SIZE 0x0000a871
-
-#define REG_A6XX_SP_GS_BRANCH_COND 0x0000a872
-
-#define REG_A6XX_SP_GS_PRIMITIVE_CNTL 0x0000a873
-#define A6XX_SP_GS_PRIMITIVE_CNTL_OUT__MASK 0x0000003f
-#define A6XX_SP_GS_PRIMITIVE_CNTL_OUT__SHIFT 0
-static inline uint32_t A6XX_SP_GS_PRIMITIVE_CNTL_OUT(uint32_t val)
-{
- return ((val) << A6XX_SP_GS_PRIMITIVE_CNTL_OUT__SHIFT) & A6XX_SP_GS_PRIMITIVE_CNTL_OUT__MASK;
-}
-#define A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__MASK 0x00003fc0
-#define A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT 6
-static inline uint32_t A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID(uint32_t val)
-{
- return ((val) << A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT) & A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__MASK;
-}
-
-#define REG_A6XX_SP_GS_OUT(i0) (0x0000a874 + 0x1*(i0))
-
-static inline uint32_t REG_A6XX_SP_GS_OUT_REG(uint32_t i0) { return 0x0000a874 + 0x1*i0; }
-#define A6XX_SP_GS_OUT_REG_A_REGID__MASK 0x000000ff
-#define A6XX_SP_GS_OUT_REG_A_REGID__SHIFT 0
-static inline uint32_t A6XX_SP_GS_OUT_REG_A_REGID(uint32_t val)
-{
- return ((val) << A6XX_SP_GS_OUT_REG_A_REGID__SHIFT) & A6XX_SP_GS_OUT_REG_A_REGID__MASK;
-}
-#define A6XX_SP_GS_OUT_REG_A_COMPMASK__MASK 0x00000f00
-#define A6XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT 8
-static inline uint32_t A6XX_SP_GS_OUT_REG_A_COMPMASK(uint32_t val)
-{
- return ((val) << A6XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT) & A6XX_SP_GS_OUT_REG_A_COMPMASK__MASK;
-}
-#define A6XX_SP_GS_OUT_REG_B_REGID__MASK 0x00ff0000
-#define A6XX_SP_GS_OUT_REG_B_REGID__SHIFT 16
-static inline uint32_t A6XX_SP_GS_OUT_REG_B_REGID(uint32_t val)
-{
- return ((val) << A6XX_SP_GS_OUT_REG_B_REGID__SHIFT) & A6XX_SP_GS_OUT_REG_B_REGID__MASK;
-}
-#define A6XX_SP_GS_OUT_REG_B_COMPMASK__MASK 0x0f000000
-#define A6XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT 24
-static inline uint32_t A6XX_SP_GS_OUT_REG_B_COMPMASK(uint32_t val)
-{
- return ((val) << A6XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_GS_OUT_REG_B_COMPMASK__MASK;
-}
-
-#define REG_A6XX_SP_GS_VPC_DST(i0) (0x0000a884 + 0x1*(i0))
-
-static inline uint32_t REG_A6XX_SP_GS_VPC_DST_REG(uint32_t i0) { return 0x0000a884 + 0x1*i0; }
-#define A6XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
-#define A6XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT 0
-static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC0(uint32_t val)
-{
- return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK;
-}
-#define A6XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
-#define A6XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT 8
-static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC1(uint32_t val)
-{
- return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK;
-}
-#define A6XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
-#define A6XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT 16
-static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC2(uint32_t val)
-{
- return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK;
-}
-#define A6XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
-#define A6XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT 24
-static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC3(uint32_t val)
-{
- return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK;
-}
-
-#define REG_A6XX_SP_GS_OBJ_FIRST_EXEC_OFFSET 0x0000a88c
-
-#define REG_A6XX_SP_GS_OBJ_START 0x0000a88d
-
-#define REG_A6XX_SP_GS_PVT_MEM_PARAM 0x0000a88f
-#define A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
-#define A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
-static inline uint32_t A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
-{
- assert(!(val & 0x1ff));
- return (((val >> 9)) << A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
-}
-#define A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
-#define A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
-static inline uint32_t A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
-{
- return ((val) << A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
-}
-
-#define REG_A6XX_SP_GS_PVT_MEM_ADDR 0x0000a890
-
-#define REG_A6XX_SP_GS_PVT_MEM_SIZE 0x0000a892
-#define A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
-#define A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
-static inline uint32_t A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
-}
-#define A6XX_SP_GS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
-
-#define REG_A6XX_SP_GS_TEX_COUNT 0x0000a893
-
-#define REG_A6XX_SP_GS_CONFIG 0x0000a894
-#define A6XX_SP_GS_CONFIG_BINDLESS_TEX 0x00000001
-#define A6XX_SP_GS_CONFIG_BINDLESS_SAMP 0x00000002
-#define A6XX_SP_GS_CONFIG_BINDLESS_IBO 0x00000004
-#define A6XX_SP_GS_CONFIG_BINDLESS_UBO 0x00000008
-#define A6XX_SP_GS_CONFIG_ENABLED 0x00000100
-#define A6XX_SP_GS_CONFIG_NTEX__MASK 0x0001fe00
-#define A6XX_SP_GS_CONFIG_NTEX__SHIFT 9
-static inline uint32_t A6XX_SP_GS_CONFIG_NTEX(uint32_t val)
-{
- return ((val) << A6XX_SP_GS_CONFIG_NTEX__SHIFT) & A6XX_SP_GS_CONFIG_NTEX__MASK;
-}
-#define A6XX_SP_GS_CONFIG_NSAMP__MASK 0x003e0000
-#define A6XX_SP_GS_CONFIG_NSAMP__SHIFT 17
-static inline uint32_t A6XX_SP_GS_CONFIG_NSAMP(uint32_t val)
-{
- return ((val) << A6XX_SP_GS_CONFIG_NSAMP__SHIFT) & A6XX_SP_GS_CONFIG_NSAMP__MASK;
-}
-#define A6XX_SP_GS_CONFIG_NIBO__MASK 0x1fc00000
-#define A6XX_SP_GS_CONFIG_NIBO__SHIFT 22
-static inline uint32_t A6XX_SP_GS_CONFIG_NIBO(uint32_t val)
-{
- return ((val) << A6XX_SP_GS_CONFIG_NIBO__SHIFT) & A6XX_SP_GS_CONFIG_NIBO__MASK;
-}
-
-#define REG_A6XX_SP_GS_INSTRLEN 0x0000a895
-
-#define REG_A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET 0x0000a896
-#define A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff
-#define A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0
-static inline uint32_t A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val)
-{
- assert(!(val & 0x7ff));
- return (((val >> 11)) << A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
-}
-
-#define REG_A7XX_SP_GS_VGPR_CONFIG 0x0000a899
-
-#define REG_A6XX_SP_VS_TEX_SAMP 0x0000a8a0
-
-#define REG_A6XX_SP_HS_TEX_SAMP 0x0000a8a2
-
-#define REG_A6XX_SP_DS_TEX_SAMP 0x0000a8a4
-
-#define REG_A6XX_SP_GS_TEX_SAMP 0x0000a8a6
-
-#define REG_A6XX_SP_VS_TEX_CONST 0x0000a8a8
-
-#define REG_A6XX_SP_HS_TEX_CONST 0x0000a8aa
-
-#define REG_A6XX_SP_DS_TEX_CONST 0x0000a8ac
-
-#define REG_A6XX_SP_GS_TEX_CONST 0x0000a8ae
-
-#define REG_A6XX_SP_FS_CTRL_REG0 0x0000a980
-#define A6XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001
-#define A6XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0
-static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
-{
- return ((val) << A6XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_FS_CTRL_REG0_THREADMODE__MASK;
-}
-#define A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
-#define A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
-static inline uint32_t A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
-}
-#define A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
-#define A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
-static inline uint32_t A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
-}
-#define A6XX_SP_FS_CTRL_REG0_UNK13 0x00002000
-#define A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
-#define A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT 14
-static inline uint32_t A6XX_SP_FS_CTRL_REG0_BRANCHSTACK(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK;
-}
-#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
-#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
-static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a6xx_threadsize val)
-{
- return ((val) << A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
-}
-#define A6XX_SP_FS_CTRL_REG0_UNK21 0x00200000
-#define A6XX_SP_FS_CTRL_REG0_VARYING 0x00400000
-#define A6XX_SP_FS_CTRL_REG0_LODPIXMASK 0x00800000
-#define A6XX_SP_FS_CTRL_REG0_UNK24 0x01000000
-#define A6XX_SP_FS_CTRL_REG0_UNK25 0x02000000
-#define A6XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x04000000
-#define A6XX_SP_FS_CTRL_REG0_UNK27 0x08000000
-#define A6XX_SP_FS_CTRL_REG0_EARLYPREAMBLE 0x10000000
-#define A6XX_SP_FS_CTRL_REG0_MERGEDREGS 0x80000000
-
-#define REG_A6XX_SP_FS_BRANCH_COND 0x0000a981
-
-#define REG_A6XX_SP_FS_OBJ_FIRST_EXEC_OFFSET 0x0000a982
-
-#define REG_A6XX_SP_FS_OBJ_START 0x0000a983
-
-#define REG_A6XX_SP_FS_PVT_MEM_PARAM 0x0000a985
-#define A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
-#define A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
-static inline uint32_t A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
-{
- assert(!(val & 0x1ff));
- return (((val >> 9)) << A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
-}
-#define A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
-#define A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
-static inline uint32_t A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
-}
-
-#define REG_A6XX_SP_FS_PVT_MEM_ADDR 0x0000a986
-
-#define REG_A6XX_SP_FS_PVT_MEM_SIZE 0x0000a988
-#define A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
-#define A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
-static inline uint32_t A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
-}
-#define A6XX_SP_FS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
-
-#define REG_A6XX_SP_BLEND_CNTL 0x0000a989
-#define A6XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
-#define A6XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
-static inline uint32_t A6XX_SP_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
-{
- return ((val) << A6XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A6XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK;
-}
-#define A6XX_SP_BLEND_CNTL_UNK8 0x00000100
-#define A6XX_SP_BLEND_CNTL_DUAL_COLOR_IN_ENABLE 0x00000200
-#define A6XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
-
-#define REG_A6XX_SP_SRGB_CNTL 0x0000a98a
-#define A6XX_SP_SRGB_CNTL_SRGB_MRT0 0x00000001
-#define A6XX_SP_SRGB_CNTL_SRGB_MRT1 0x00000002
-#define A6XX_SP_SRGB_CNTL_SRGB_MRT2 0x00000004
-#define A6XX_SP_SRGB_CNTL_SRGB_MRT3 0x00000008
-#define A6XX_SP_SRGB_CNTL_SRGB_MRT4 0x00000010
-#define A6XX_SP_SRGB_CNTL_SRGB_MRT5 0x00000020
-#define A6XX_SP_SRGB_CNTL_SRGB_MRT6 0x00000040
-#define A6XX_SP_SRGB_CNTL_SRGB_MRT7 0x00000080
-
-#define REG_A6XX_SP_FS_RENDER_COMPONENTS 0x0000a98b
-#define A6XX_SP_FS_RENDER_COMPONENTS_RT0__MASK 0x0000000f
-#define A6XX_SP_FS_RENDER_COMPONENTS_RT0__SHIFT 0
-static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT0(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT0__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT0__MASK;
-}
-#define A6XX_SP_FS_RENDER_COMPONENTS_RT1__MASK 0x000000f0
-#define A6XX_SP_FS_RENDER_COMPONENTS_RT1__SHIFT 4
-static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT1(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT1__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT1__MASK;
-}
-#define A6XX_SP_FS_RENDER_COMPONENTS_RT2__MASK 0x00000f00
-#define A6XX_SP_FS_RENDER_COMPONENTS_RT2__SHIFT 8
-static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT2(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT2__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT2__MASK;
-}
-#define A6XX_SP_FS_RENDER_COMPONENTS_RT3__MASK 0x0000f000
-#define A6XX_SP_FS_RENDER_COMPONENTS_RT3__SHIFT 12
-static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT3(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT3__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT3__MASK;
-}
-#define A6XX_SP_FS_RENDER_COMPONENTS_RT4__MASK 0x000f0000
-#define A6XX_SP_FS_RENDER_COMPONENTS_RT4__SHIFT 16
-static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT4(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT4__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT4__MASK;
-}
-#define A6XX_SP_FS_RENDER_COMPONENTS_RT5__MASK 0x00f00000
-#define A6XX_SP_FS_RENDER_COMPONENTS_RT5__SHIFT 20
-static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT5(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT5__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT5__MASK;
-}
-#define A6XX_SP_FS_RENDER_COMPONENTS_RT6__MASK 0x0f000000
-#define A6XX_SP_FS_RENDER_COMPONENTS_RT6__SHIFT 24
-static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT6(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT6__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT6__MASK;
-}
-#define A6XX_SP_FS_RENDER_COMPONENTS_RT7__MASK 0xf0000000
-#define A6XX_SP_FS_RENDER_COMPONENTS_RT7__SHIFT 28
-static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT7(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT7__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT7__MASK;
-}
-
-#define REG_A6XX_SP_FS_OUTPUT_CNTL0 0x0000a98c
-#define A6XX_SP_FS_OUTPUT_CNTL0_DUAL_COLOR_IN_ENABLE 0x00000001
-#define A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__MASK 0x0000ff00
-#define A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__SHIFT 8
-static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__MASK;
-}
-#define A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__MASK 0x00ff0000
-#define A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__SHIFT 16
-static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__MASK;
-}
-#define A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__MASK 0xff000000
-#define A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__SHIFT 24
-static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__MASK;
-}
-
-#define REG_A6XX_SP_FS_OUTPUT_CNTL1 0x0000a98d
-#define A6XX_SP_FS_OUTPUT_CNTL1_MRT__MASK 0x0000000f
-#define A6XX_SP_FS_OUTPUT_CNTL1_MRT__SHIFT 0
-static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL1_MRT(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_OUTPUT_CNTL1_MRT__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL1_MRT__MASK;
-}
-
-#define REG_A6XX_SP_FS_OUTPUT(i0) (0x0000a98e + 0x1*(i0))
-
-static inline uint32_t REG_A6XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000a98e + 0x1*i0; }
-#define A6XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff
-#define A6XX_SP_FS_OUTPUT_REG_REGID__SHIFT 0
-static inline uint32_t A6XX_SP_FS_OUTPUT_REG_REGID(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_REG_REGID__MASK;
-}
-#define A6XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100
-
-#define REG_A6XX_SP_FS_MRT(i0) (0x0000a996 + 0x1*(i0))
-
-static inline uint32_t REG_A6XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000a996 + 0x1*i0; }
-#define A6XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK 0x000000ff
-#define A6XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT 0
-static inline uint32_t A6XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a6xx_format val)
-{
- return ((val) << A6XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A6XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
-}
-#define A6XX_SP_FS_MRT_REG_COLOR_SINT 0x00000100
-#define A6XX_SP_FS_MRT_REG_COLOR_UINT 0x00000200
-#define A6XX_SP_FS_MRT_REG_UNK10 0x00000400
-
-#define REG_A6XX_SP_FS_PREFETCH_CNTL 0x0000a99e
-#define A6XX_SP_FS_PREFETCH_CNTL_COUNT__MASK 0x00000007
-#define A6XX_SP_FS_PREFETCH_CNTL_COUNT__SHIFT 0
-static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_COUNT(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_PREFETCH_CNTL_COUNT__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_COUNT__MASK;
-}
-#define A6XX_SP_FS_PREFETCH_CNTL_IJ_WRITE_DISABLE 0x00000008
-#define A6XX_SP_FS_PREFETCH_CNTL_ENDOFQUAD 0x00000010
-#define A6XX_SP_FS_PREFETCH_CNTL_WRITE_COLOR_TO_OUTPUT 0x00000020
-#define A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID__MASK 0x00007fc0
-#define A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID__SHIFT 6
-static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID__MASK;
-}
-#define A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID4COORD__MASK 0x01ff0000
-#define A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID4COORD__SHIFT 16
-static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID4COORD(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID4COORD__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID4COORD__MASK;
-}
-
-#define REG_A6XX_SP_FS_PREFETCH(i0) (0x0000a99f + 0x1*(i0))
-
-static inline uint32_t REG_A6XX_SP_FS_PREFETCH_CMD(uint32_t i0) { return 0x0000a99f + 0x1*i0; }
-#define A6XX_SP_FS_PREFETCH_CMD_SRC__MASK 0x0000007f
-#define A6XX_SP_FS_PREFETCH_CMD_SRC__SHIFT 0
-static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_SRC(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_PREFETCH_CMD_SRC__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_SRC__MASK;
-}
-#define A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__MASK 0x00000780
-#define A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__SHIFT 7
-static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_SAMP_ID(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__MASK;
-}
-#define A6XX_SP_FS_PREFETCH_CMD_TEX_ID__MASK 0x0000f800
-#define A6XX_SP_FS_PREFETCH_CMD_TEX_ID__SHIFT 11
-static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_TEX_ID(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_PREFETCH_CMD_TEX_ID__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_TEX_ID__MASK;
-}
-#define A6XX_SP_FS_PREFETCH_CMD_DST__MASK 0x003f0000
-#define A6XX_SP_FS_PREFETCH_CMD_DST__SHIFT 16
-static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_DST(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_PREFETCH_CMD_DST__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_DST__MASK;
-}
-#define A6XX_SP_FS_PREFETCH_CMD_WRMASK__MASK 0x03c00000
-#define A6XX_SP_FS_PREFETCH_CMD_WRMASK__SHIFT 22
-static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_WRMASK(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_PREFETCH_CMD_WRMASK__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_WRMASK__MASK;
-}
-#define A6XX_SP_FS_PREFETCH_CMD_HALF 0x04000000
-#define A6XX_SP_FS_PREFETCH_CMD_UNK27 0x08000000
-#define A6XX_SP_FS_PREFETCH_CMD_BINDLESS 0x10000000
-#define A6XX_SP_FS_PREFETCH_CMD_CMD__MASK 0xe0000000
-#define A6XX_SP_FS_PREFETCH_CMD_CMD__SHIFT 29
-static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_CMD(enum a6xx_tex_prefetch_cmd val)
-{
- return ((val) << A6XX_SP_FS_PREFETCH_CMD_CMD__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_CMD__MASK;
-}
-
-#define REG_A7XX_SP_FS_PREFETCH(i0) (0x0000a99f + 0x1*(i0))
-
-static inline uint32_t REG_A7XX_SP_FS_PREFETCH_CMD(uint32_t i0) { return 0x0000a99f + 0x1*i0; }
-#define A7XX_SP_FS_PREFETCH_CMD_SRC__MASK 0x0000007f
-#define A7XX_SP_FS_PREFETCH_CMD_SRC__SHIFT 0
-static inline uint32_t A7XX_SP_FS_PREFETCH_CMD_SRC(uint32_t val)
-{
- return ((val) << A7XX_SP_FS_PREFETCH_CMD_SRC__SHIFT) & A7XX_SP_FS_PREFETCH_CMD_SRC__MASK;
-}
-#define A7XX_SP_FS_PREFETCH_CMD_SAMP_ID__MASK 0x00000380
-#define A7XX_SP_FS_PREFETCH_CMD_SAMP_ID__SHIFT 7
-static inline uint32_t A7XX_SP_FS_PREFETCH_CMD_SAMP_ID(uint32_t val)
-{
- return ((val) << A7XX_SP_FS_PREFETCH_CMD_SAMP_ID__SHIFT) & A7XX_SP_FS_PREFETCH_CMD_SAMP_ID__MASK;
-}
-#define A7XX_SP_FS_PREFETCH_CMD_TEX_ID__MASK 0x00001c00
-#define A7XX_SP_FS_PREFETCH_CMD_TEX_ID__SHIFT 10
-static inline uint32_t A7XX_SP_FS_PREFETCH_CMD_TEX_ID(uint32_t val)
-{
- return ((val) << A7XX_SP_FS_PREFETCH_CMD_TEX_ID__SHIFT) & A7XX_SP_FS_PREFETCH_CMD_TEX_ID__MASK;
-}
-#define A7XX_SP_FS_PREFETCH_CMD_DST__MASK 0x0007e000
-#define A7XX_SP_FS_PREFETCH_CMD_DST__SHIFT 13
-static inline uint32_t A7XX_SP_FS_PREFETCH_CMD_DST(uint32_t val)
-{
- return ((val) << A7XX_SP_FS_PREFETCH_CMD_DST__SHIFT) & A7XX_SP_FS_PREFETCH_CMD_DST__MASK;
-}
-#define A7XX_SP_FS_PREFETCH_CMD_WRMASK__MASK 0x00780000
-#define A7XX_SP_FS_PREFETCH_CMD_WRMASK__SHIFT 19
-static inline uint32_t A7XX_SP_FS_PREFETCH_CMD_WRMASK(uint32_t val)
-{
- return ((val) << A7XX_SP_FS_PREFETCH_CMD_WRMASK__SHIFT) & A7XX_SP_FS_PREFETCH_CMD_WRMASK__MASK;
-}
-#define A7XX_SP_FS_PREFETCH_CMD_HALF 0x00800000
-#define A7XX_SP_FS_PREFETCH_CMD_BINDLESS 0x02000000
-#define A7XX_SP_FS_PREFETCH_CMD_CMD__MASK 0x3c000000
-#define A7XX_SP_FS_PREFETCH_CMD_CMD__SHIFT 26
-static inline uint32_t A7XX_SP_FS_PREFETCH_CMD_CMD(enum a6xx_tex_prefetch_cmd val)
-{
- return ((val) << A7XX_SP_FS_PREFETCH_CMD_CMD__SHIFT) & A7XX_SP_FS_PREFETCH_CMD_CMD__MASK;
-}
-
-#define REG_A6XX_SP_FS_BINDLESS_PREFETCH(i0) (0x0000a9a3 + 0x1*(i0))
-
-static inline uint32_t REG_A6XX_SP_FS_BINDLESS_PREFETCH_CMD(uint32_t i0) { return 0x0000a9a3 + 0x1*i0; }
-#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__MASK 0x0000ffff
-#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__SHIFT 0
-static inline uint32_t A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__SHIFT) & A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__MASK;
-}
-#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__MASK 0xffff0000
-#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__SHIFT 16
-static inline uint32_t A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__SHIFT) & A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__MASK;
-}
-
-#define REG_A6XX_SP_FS_TEX_COUNT 0x0000a9a7
-
-#define REG_A6XX_SP_UNKNOWN_A9A8 0x0000a9a8
-
-#define REG_A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET 0x0000a9a9
-#define A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff
-#define A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0
-static inline uint32_t A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val)
-{
- assert(!(val & 0x7ff));
- return (((val >> 11)) << A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
-}
-
-#define REG_A6XX_SP_CS_CTRL_REG0 0x0000a9b0
-#define A6XX_SP_CS_CTRL_REG0_THREADMODE__MASK 0x00000001
-#define A6XX_SP_CS_CTRL_REG0_THREADMODE__SHIFT 0
-static inline uint32_t A6XX_SP_CS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
-{
- return ((val) << A6XX_SP_CS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_CS_CTRL_REG0_THREADMODE__MASK;
-}
-#define A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
-#define A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
-static inline uint32_t A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
-}
-#define A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
-#define A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
-static inline uint32_t A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
-}
-#define A6XX_SP_CS_CTRL_REG0_UNK13 0x00002000
-#define A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
-#define A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT 14
-static inline uint32_t A6XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val)
-{
- return ((val) << A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK;
-}
-#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00100000
-#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 20
-static inline uint32_t A6XX_SP_CS_CTRL_REG0_THREADSIZE(enum a6xx_threadsize val)
-{
- return ((val) << A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK;
-}
-#define A6XX_SP_CS_CTRL_REG0_UNK21 0x00200000
-#define A6XX_SP_CS_CTRL_REG0_UNK22 0x00400000
-#define A6XX_SP_CS_CTRL_REG0_EARLYPREAMBLE 0x00800000
-#define A6XX_SP_CS_CTRL_REG0_MERGEDREGS 0x80000000
-
-#define REG_A6XX_SP_CS_UNKNOWN_A9B1 0x0000a9b1
-#define A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE__MASK 0x0000001f
-#define A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE__SHIFT 0
-static inline uint32_t A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE(uint32_t val)
-{
- return ((val) << A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE__SHIFT) & A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE__MASK;
-}
-#define A6XX_SP_CS_UNKNOWN_A9B1_UNK5 0x00000020
-#define A6XX_SP_CS_UNKNOWN_A9B1_UNK6 0x00000040
-
-#define REG_A6XX_SP_CS_BRANCH_COND 0x0000a9b2
-
-#define REG_A6XX_SP_CS_OBJ_FIRST_EXEC_OFFSET 0x0000a9b3
-
-#define REG_A6XX_SP_CS_OBJ_START 0x0000a9b4
-
-#define REG_A6XX_SP_CS_PVT_MEM_PARAM 0x0000a9b6
-#define A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
-#define A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
-static inline uint32_t A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
-{
- assert(!(val & 0x1ff));
- return (((val >> 9)) << A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
-}
-#define A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
-#define A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
-static inline uint32_t A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
-{
- return ((val) << A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
-}
-
-#define REG_A6XX_SP_CS_PVT_MEM_ADDR 0x0000a9b7
-
-#define REG_A6XX_SP_CS_PVT_MEM_SIZE 0x0000a9b9
-#define A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
-#define A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
-static inline uint32_t A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
-}
-#define A6XX_SP_CS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
-
-#define REG_A6XX_SP_CS_TEX_COUNT 0x0000a9ba
-
-#define REG_A6XX_SP_CS_CONFIG 0x0000a9bb
-#define A6XX_SP_CS_CONFIG_BINDLESS_TEX 0x00000001
-#define A6XX_SP_CS_CONFIG_BINDLESS_SAMP 0x00000002
-#define A6XX_SP_CS_CONFIG_BINDLESS_IBO 0x00000004
-#define A6XX_SP_CS_CONFIG_BINDLESS_UBO 0x00000008
-#define A6XX_SP_CS_CONFIG_ENABLED 0x00000100
-#define A6XX_SP_CS_CONFIG_NTEX__MASK 0x0001fe00
-#define A6XX_SP_CS_CONFIG_NTEX__SHIFT 9
-static inline uint32_t A6XX_SP_CS_CONFIG_NTEX(uint32_t val)
-{
- return ((val) << A6XX_SP_CS_CONFIG_NTEX__SHIFT) & A6XX_SP_CS_CONFIG_NTEX__MASK;
-}
-#define A6XX_SP_CS_CONFIG_NSAMP__MASK 0x003e0000
-#define A6XX_SP_CS_CONFIG_NSAMP__SHIFT 17
-static inline uint32_t A6XX_SP_CS_CONFIG_NSAMP(uint32_t val)
-{
- return ((val) << A6XX_SP_CS_CONFIG_NSAMP__SHIFT) & A6XX_SP_CS_CONFIG_NSAMP__MASK;
-}
-#define A6XX_SP_CS_CONFIG_NIBO__MASK 0x1fc00000
-#define A6XX_SP_CS_CONFIG_NIBO__SHIFT 22
-static inline uint32_t A6XX_SP_CS_CONFIG_NIBO(uint32_t val)
-{
- return ((val) << A6XX_SP_CS_CONFIG_NIBO__SHIFT) & A6XX_SP_CS_CONFIG_NIBO__MASK;
-}
-
-#define REG_A6XX_SP_CS_INSTRLEN 0x0000a9bc
-
-#define REG_A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET 0x0000a9bd
-#define A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff
-#define A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0
-static inline uint32_t A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val)
-{
- assert(!(val & 0x7ff));
- return (((val >> 11)) << A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
-}
-
-#define REG_A7XX_SP_CS_UNKNOWN_A9BE 0x0000a9be
-
-#define REG_A7XX_SP_CS_VGPR_CONFIG 0x0000a9c5
-
-#define REG_A6XX_SP_CS_CNTL_0 0x0000a9c2
-#define A6XX_SP_CS_CNTL_0_WGIDCONSTID__MASK 0x000000ff
-#define A6XX_SP_CS_CNTL_0_WGIDCONSTID__SHIFT 0
-static inline uint32_t A6XX_SP_CS_CNTL_0_WGIDCONSTID(uint32_t val)
-{
- return ((val) << A6XX_SP_CS_CNTL_0_WGIDCONSTID__SHIFT) & A6XX_SP_CS_CNTL_0_WGIDCONSTID__MASK;
-}
-#define A6XX_SP_CS_CNTL_0_WGSIZECONSTID__MASK 0x0000ff00
-#define A6XX_SP_CS_CNTL_0_WGSIZECONSTID__SHIFT 8
-static inline uint32_t A6XX_SP_CS_CNTL_0_WGSIZECONSTID(uint32_t val)
-{
- return ((val) << A6XX_SP_CS_CNTL_0_WGSIZECONSTID__SHIFT) & A6XX_SP_CS_CNTL_0_WGSIZECONSTID__MASK;
-}
-#define A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID__MASK 0x00ff0000
-#define A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID__SHIFT 16
-static inline uint32_t A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID(uint32_t val)
-{
- return ((val) << A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID__SHIFT) & A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID__MASK;
-}
-#define A6XX_SP_CS_CNTL_0_LOCALIDREGID__MASK 0xff000000
-#define A6XX_SP_CS_CNTL_0_LOCALIDREGID__SHIFT 24
-static inline uint32_t A6XX_SP_CS_CNTL_0_LOCALIDREGID(uint32_t val)
-{
- return ((val) << A6XX_SP_CS_CNTL_0_LOCALIDREGID__SHIFT) & A6XX_SP_CS_CNTL_0_LOCALIDREGID__MASK;
-}
-
-#define REG_A6XX_SP_CS_CNTL_1 0x0000a9c3
-#define A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__MASK 0x000000ff
-#define A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT 0
-static inline uint32_t A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID(uint32_t val)
-{
- return ((val) << A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT) & A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__MASK;
-}
-#define A6XX_SP_CS_CNTL_1_SINGLE_SP_CORE 0x00000100
-#define A6XX_SP_CS_CNTL_1_THREADSIZE__MASK 0x00000200
-#define A6XX_SP_CS_CNTL_1_THREADSIZE__SHIFT 9
-static inline uint32_t A6XX_SP_CS_CNTL_1_THREADSIZE(enum a6xx_threadsize val)
-{
- return ((val) << A6XX_SP_CS_CNTL_1_THREADSIZE__SHIFT) & A6XX_SP_CS_CNTL_1_THREADSIZE__MASK;
-}
-#define A6XX_SP_CS_CNTL_1_THREADSIZE_SCALAR 0x00000400
-
-#define REG_A7XX_SP_CS_CNTL_1 0x0000a9c3
-#define A7XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__MASK 0x000000ff
-#define A7XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT 0
-static inline uint32_t A7XX_SP_CS_CNTL_1_LINEARLOCALIDREGID(uint32_t val)
-{
- return ((val) << A7XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT) & A7XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__MASK;
-}
-#define A7XX_SP_CS_CNTL_1_THREADSIZE__MASK 0x00000100
-#define A7XX_SP_CS_CNTL_1_THREADSIZE__SHIFT 8
-static inline uint32_t A7XX_SP_CS_CNTL_1_THREADSIZE(enum a6xx_threadsize val)
-{
- return ((val) << A7XX_SP_CS_CNTL_1_THREADSIZE__SHIFT) & A7XX_SP_CS_CNTL_1_THREADSIZE__MASK;
-}
-#define A7XX_SP_CS_CNTL_1_THREADSIZE_SCALAR 0x00000200
-#define A7XX_SP_CS_CNTL_1_UNK15 0x00008000
-
-#define REG_A6XX_SP_FS_TEX_SAMP 0x0000a9e0
-
-#define REG_A6XX_SP_CS_TEX_SAMP 0x0000a9e2
-
-#define REG_A6XX_SP_FS_TEX_CONST 0x0000a9e4
-
-#define REG_A6XX_SP_CS_TEX_CONST 0x0000a9e6
-
-#define REG_A6XX_SP_CS_BINDLESS_BASE(i0) (0x0000a9e8 + 0x2*(i0))
-
-static inline uint32_t REG_A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000a9e8 + 0x2*i0; }
-#define A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003
-#define A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT 0
-static inline uint32_t A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx_bindless_descriptor_size val)
-{
- return ((val) << A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK;
-}
-#define A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffffffffffc
-#define A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2
-static inline uint32_t A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR(uint64_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK;
-}
-
-#define REG_A7XX_SP_CS_BINDLESS_BASE(i0) (0x0000a9e8 + 0x2*(i0))
-
-static inline uint32_t REG_A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000a9e8 + 0x2*i0; }
-#define A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003
-#define A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT 0
-static inline uint32_t A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx_bindless_descriptor_size val)
-{
- return ((val) << A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK;
-}
-#define A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffffffffffc
-#define A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2
-static inline uint32_t A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR(uint64_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK;
-}
-
-#define REG_A6XX_SP_CS_IBO 0x0000a9f2
-
-#define REG_A6XX_SP_CS_IBO_COUNT 0x0000aa00
-
-#define REG_A7XX_SP_FS_VGPR_CONFIG 0x0000aa01
-
-#define REG_A7XX_SP_PS_ALIASED_COMPONENTS_CONTROL 0x0000aa02
-#define A7XX_SP_PS_ALIASED_COMPONENTS_CONTROL_ENABLED 0x00000001
-
-#define REG_A7XX_SP_PS_ALIASED_COMPONENTS 0x0000aa03
-#define A7XX_SP_PS_ALIASED_COMPONENTS_RT0__MASK 0x0000000f
-#define A7XX_SP_PS_ALIASED_COMPONENTS_RT0__SHIFT 0
-static inline uint32_t A7XX_SP_PS_ALIASED_COMPONENTS_RT0(uint32_t val)
-{
- return ((val) << A7XX_SP_PS_ALIASED_COMPONENTS_RT0__SHIFT) & A7XX_SP_PS_ALIASED_COMPONENTS_RT0__MASK;
-}
-#define A7XX_SP_PS_ALIASED_COMPONENTS_RT1__MASK 0x000000f0
-#define A7XX_SP_PS_ALIASED_COMPONENTS_RT1__SHIFT 4
-static inline uint32_t A7XX_SP_PS_ALIASED_COMPONENTS_RT1(uint32_t val)
-{
- return ((val) << A7XX_SP_PS_ALIASED_COMPONENTS_RT1__SHIFT) & A7XX_SP_PS_ALIASED_COMPONENTS_RT1__MASK;
-}
-#define A7XX_SP_PS_ALIASED_COMPONENTS_RT2__MASK 0x00000f00
-#define A7XX_SP_PS_ALIASED_COMPONENTS_RT2__SHIFT 8
-static inline uint32_t A7XX_SP_PS_ALIASED_COMPONENTS_RT2(uint32_t val)
-{
- return ((val) << A7XX_SP_PS_ALIASED_COMPONENTS_RT2__SHIFT) & A7XX_SP_PS_ALIASED_COMPONENTS_RT2__MASK;
-}
-#define A7XX_SP_PS_ALIASED_COMPONENTS_RT3__MASK 0x0000f000
-#define A7XX_SP_PS_ALIASED_COMPONENTS_RT3__SHIFT 12
-static inline uint32_t A7XX_SP_PS_ALIASED_COMPONENTS_RT3(uint32_t val)
-{
- return ((val) << A7XX_SP_PS_ALIASED_COMPONENTS_RT3__SHIFT) & A7XX_SP_PS_ALIASED_COMPONENTS_RT3__MASK;
-}
-#define A7XX_SP_PS_ALIASED_COMPONENTS_RT4__MASK 0x000f0000
-#define A7XX_SP_PS_ALIASED_COMPONENTS_RT4__SHIFT 16
-static inline uint32_t A7XX_SP_PS_ALIASED_COMPONENTS_RT4(uint32_t val)
-{
- return ((val) << A7XX_SP_PS_ALIASED_COMPONENTS_RT4__SHIFT) & A7XX_SP_PS_ALIASED_COMPONENTS_RT4__MASK;
-}
-#define A7XX_SP_PS_ALIASED_COMPONENTS_RT5__MASK 0x00f00000
-#define A7XX_SP_PS_ALIASED_COMPONENTS_RT5__SHIFT 20
-static inline uint32_t A7XX_SP_PS_ALIASED_COMPONENTS_RT5(uint32_t val)
-{
- return ((val) << A7XX_SP_PS_ALIASED_COMPONENTS_RT5__SHIFT) & A7XX_SP_PS_ALIASED_COMPONENTS_RT5__MASK;
-}
-#define A7XX_SP_PS_ALIASED_COMPONENTS_RT6__MASK 0x0f000000
-#define A7XX_SP_PS_ALIASED_COMPONENTS_RT6__SHIFT 24
-static inline uint32_t A7XX_SP_PS_ALIASED_COMPONENTS_RT6(uint32_t val)
-{
- return ((val) << A7XX_SP_PS_ALIASED_COMPONENTS_RT6__SHIFT) & A7XX_SP_PS_ALIASED_COMPONENTS_RT6__MASK;
-}
-#define A7XX_SP_PS_ALIASED_COMPONENTS_RT7__MASK 0xf0000000
-#define A7XX_SP_PS_ALIASED_COMPONENTS_RT7__SHIFT 28
-static inline uint32_t A7XX_SP_PS_ALIASED_COMPONENTS_RT7(uint32_t val)
-{
- return ((val) << A7XX_SP_PS_ALIASED_COMPONENTS_RT7__SHIFT) & A7XX_SP_PS_ALIASED_COMPONENTS_RT7__MASK;
-}
-
-#define REG_A6XX_SP_UNKNOWN_AAF2 0x0000aaf2
-
-#define REG_A6XX_SP_MODE_CONTROL 0x0000ab00
-#define A6XX_SP_MODE_CONTROL_CONSTANT_DEMOTION_ENABLE 0x00000001
-#define A6XX_SP_MODE_CONTROL_ISAMMODE__MASK 0x00000006
-#define A6XX_SP_MODE_CONTROL_ISAMMODE__SHIFT 1
-static inline uint32_t A6XX_SP_MODE_CONTROL_ISAMMODE(enum a6xx_isam_mode val)
-{
- return ((val) << A6XX_SP_MODE_CONTROL_ISAMMODE__SHIFT) & A6XX_SP_MODE_CONTROL_ISAMMODE__MASK;
-}
-#define A6XX_SP_MODE_CONTROL_SHARED_CONSTS_ENABLE 0x00000008
-
-#define REG_A7XX_SP_UNKNOWN_AB01 0x0000ab01
-
-#define REG_A7XX_SP_UNKNOWN_AB02 0x0000ab02
-
-#define REG_A6XX_SP_FS_CONFIG 0x0000ab04
-#define A6XX_SP_FS_CONFIG_BINDLESS_TEX 0x00000001
-#define A6XX_SP_FS_CONFIG_BINDLESS_SAMP 0x00000002
-#define A6XX_SP_FS_CONFIG_BINDLESS_IBO 0x00000004
-#define A6XX_SP_FS_CONFIG_BINDLESS_UBO 0x00000008
-#define A6XX_SP_FS_CONFIG_ENABLED 0x00000100
-#define A6XX_SP_FS_CONFIG_NTEX__MASK 0x0001fe00
-#define A6XX_SP_FS_CONFIG_NTEX__SHIFT 9
-static inline uint32_t A6XX_SP_FS_CONFIG_NTEX(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_CONFIG_NTEX__SHIFT) & A6XX_SP_FS_CONFIG_NTEX__MASK;
-}
-#define A6XX_SP_FS_CONFIG_NSAMP__MASK 0x003e0000
-#define A6XX_SP_FS_CONFIG_NSAMP__SHIFT 17
-static inline uint32_t A6XX_SP_FS_CONFIG_NSAMP(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_CONFIG_NSAMP__SHIFT) & A6XX_SP_FS_CONFIG_NSAMP__MASK;
-}
-#define A6XX_SP_FS_CONFIG_NIBO__MASK 0x1fc00000
-#define A6XX_SP_FS_CONFIG_NIBO__SHIFT 22
-static inline uint32_t A6XX_SP_FS_CONFIG_NIBO(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_CONFIG_NIBO__SHIFT) & A6XX_SP_FS_CONFIG_NIBO__MASK;
-}
-
-#define REG_A6XX_SP_FS_INSTRLEN 0x0000ab05
-
-#define REG_A6XX_SP_BINDLESS_BASE(i0) (0x0000ab10 + 0x2*(i0))
-
-static inline uint32_t REG_A6XX_SP_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000ab10 + 0x2*i0; }
-#define A6XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003
-#define A6XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT 0
-static inline uint32_t A6XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx_bindless_descriptor_size val)
-{
- return ((val) << A6XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A6XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK;
-}
-#define A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffffffffffc
-#define A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2
-static inline uint32_t A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR(uint64_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK;
-}
-
-#define REG_A7XX_SP_BINDLESS_BASE(i0) (0x0000ab0a + 0x2*(i0))
-
-static inline uint32_t REG_A7XX_SP_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000ab0a + 0x2*i0; }
-#define A7XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003
-#define A7XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT 0
-static inline uint32_t A7XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx_bindless_descriptor_size val)
-{
- return ((val) << A7XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A7XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK;
-}
-#define A7XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffffffffffc
-#define A7XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2
-static inline uint32_t A7XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR(uint64_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A7XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A7XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK;
-}
-
-#define REG_A6XX_SP_IBO 0x0000ab1a
-
-#define REG_A6XX_SP_IBO_COUNT 0x0000ab20
-
-#define REG_A7XX_SP_UNKNOWN_AB22 0x0000ab22
-
-#define REG_A6XX_SP_2D_DST_FORMAT 0x0000acc0
-#define A6XX_SP_2D_DST_FORMAT_NORM 0x00000001
-#define A6XX_SP_2D_DST_FORMAT_SINT 0x00000002
-#define A6XX_SP_2D_DST_FORMAT_UINT 0x00000004
-#define A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__MASK 0x000007f8
-#define A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__SHIFT 3
-static inline uint32_t A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT(enum a6xx_format val)
-{
- return ((val) << A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__SHIFT) & A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__MASK;
-}
-#define A6XX_SP_2D_DST_FORMAT_SRGB 0x00000800
-#define A6XX_SP_2D_DST_FORMAT_MASK__MASK 0x0000f000
-#define A6XX_SP_2D_DST_FORMAT_MASK__SHIFT 12
-static inline uint32_t A6XX_SP_2D_DST_FORMAT_MASK(uint32_t val)
-{
- return ((val) << A6XX_SP_2D_DST_FORMAT_MASK__SHIFT) & A6XX_SP_2D_DST_FORMAT_MASK__MASK;
-}
-
-#define REG_A7XX_SP_2D_DST_FORMAT 0x0000a9bf
-#define A7XX_SP_2D_DST_FORMAT_NORM 0x00000001
-#define A7XX_SP_2D_DST_FORMAT_SINT 0x00000002
-#define A7XX_SP_2D_DST_FORMAT_UINT 0x00000004
-#define A7XX_SP_2D_DST_FORMAT_COLOR_FORMAT__MASK 0x000007f8
-#define A7XX_SP_2D_DST_FORMAT_COLOR_FORMAT__SHIFT 3
-static inline uint32_t A7XX_SP_2D_DST_FORMAT_COLOR_FORMAT(enum a6xx_format val)
-{
- return ((val) << A7XX_SP_2D_DST_FORMAT_COLOR_FORMAT__SHIFT) & A7XX_SP_2D_DST_FORMAT_COLOR_FORMAT__MASK;
-}
-#define A7XX_SP_2D_DST_FORMAT_SRGB 0x00000800
-#define A7XX_SP_2D_DST_FORMAT_MASK__MASK 0x0000f000
-#define A7XX_SP_2D_DST_FORMAT_MASK__SHIFT 12
-static inline uint32_t A7XX_SP_2D_DST_FORMAT_MASK(uint32_t val)
-{
- return ((val) << A7XX_SP_2D_DST_FORMAT_MASK__SHIFT) & A7XX_SP_2D_DST_FORMAT_MASK__MASK;
-}
-
-#define REG_A6XX_SP_DBG_ECO_CNTL 0x0000ae00
-
-#define REG_A6XX_SP_ADDR_MODE_CNTL 0x0000ae01
-
-#define REG_A6XX_SP_NC_MODE_CNTL 0x0000ae02
-
-#define REG_A6XX_SP_CHICKEN_BITS 0x0000ae03
-
-#define REG_A6XX_SP_FLOAT_CNTL 0x0000ae04
-#define A6XX_SP_FLOAT_CNTL_F16_NO_INF 0x00000008
-
-#define REG_A7XX_SP_UNKNOWN_AE06 0x0000ae06
-
-#define REG_A7XX_SP_UNKNOWN_AE08 0x0000ae08
-
-#define REG_A7XX_SP_UNKNOWN_AE09 0x0000ae09
-
-#define REG_A7XX_SP_UNKNOWN_AE0A 0x0000ae0a
-
-#define REG_A6XX_SP_PERFCTR_ENABLE 0x0000ae0f
-#define A6XX_SP_PERFCTR_ENABLE_VS 0x00000001
-#define A6XX_SP_PERFCTR_ENABLE_HS 0x00000002
-#define A6XX_SP_PERFCTR_ENABLE_DS 0x00000004
-#define A6XX_SP_PERFCTR_ENABLE_GS 0x00000008
-#define A6XX_SP_PERFCTR_ENABLE_FS 0x00000010
-#define A6XX_SP_PERFCTR_ENABLE_CS 0x00000020
-
-#define REG_A6XX_SP_PERFCTR_SP_SEL(i0) (0x0000ae10 + 0x1*(i0))
-
-#define REG_A7XX_SP_PERFCTR_HLSQ_SEL(i0) (0x0000ae60 + 0x1*(i0))
-
-#define REG_A7XX_SP_UNKNOWN_AE6A 0x0000ae6a
-
-#define REG_A7XX_SP_UNKNOWN_AE6B 0x0000ae6b
-
-#define REG_A7XX_SP_UNKNOWN_AE6C 0x0000ae6c
-
-#define REG_A7XX_SP_READ_SEL 0x0000ae6d
-#define A7XX_SP_READ_SEL_LOCATION__MASK 0x000c0000
-#define A7XX_SP_READ_SEL_LOCATION__SHIFT 18
-static inline uint32_t A7XX_SP_READ_SEL_LOCATION(enum a7xx_state_location val)
-{
- return ((val) << A7XX_SP_READ_SEL_LOCATION__SHIFT) & A7XX_SP_READ_SEL_LOCATION__MASK;
-}
-#define A7XX_SP_READ_SEL_PIPE__MASK 0x00030000
-#define A7XX_SP_READ_SEL_PIPE__SHIFT 16
-static inline uint32_t A7XX_SP_READ_SEL_PIPE(enum a7xx_pipe val)
-{
- return ((val) << A7XX_SP_READ_SEL_PIPE__SHIFT) & A7XX_SP_READ_SEL_PIPE__MASK;
-}
-#define A7XX_SP_READ_SEL_STATETYPE__MASK 0x0000ff00
-#define A7XX_SP_READ_SEL_STATETYPE__SHIFT 8
-static inline uint32_t A7XX_SP_READ_SEL_STATETYPE(enum a7xx_statetype_id val)
-{
- return ((val) << A7XX_SP_READ_SEL_STATETYPE__SHIFT) & A7XX_SP_READ_SEL_STATETYPE__MASK;
-}
-#define A7XX_SP_READ_SEL_USPTP__MASK 0x000000f0
-#define A7XX_SP_READ_SEL_USPTP__SHIFT 4
-static inline uint32_t A7XX_SP_READ_SEL_USPTP(uint32_t val)
-{
- return ((val) << A7XX_SP_READ_SEL_USPTP__SHIFT) & A7XX_SP_READ_SEL_USPTP__MASK;
-}
-#define A7XX_SP_READ_SEL_SPTP__MASK 0x0000000f
-#define A7XX_SP_READ_SEL_SPTP__SHIFT 0
-static inline uint32_t A7XX_SP_READ_SEL_SPTP(uint32_t val)
-{
- return ((val) << A7XX_SP_READ_SEL_SPTP__SHIFT) & A7XX_SP_READ_SEL_SPTP__MASK;
-}
-
-#define REG_A7XX_SP_DBG_CNTL 0x0000ae71
-
-#define REG_A7XX_SP_UNKNOWN_AE73 0x0000ae73
-
-#define REG_A7XX_SP_PERFCTR_SP_SEL(i0) (0x0000ae80 + 0x1*(i0))
-
-#define REG_A6XX_SP_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE 0x0000be22
-
-#define REG_A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR 0x0000b180
-
-#define REG_A6XX_SP_UNKNOWN_B182 0x0000b182
-
-#define REG_A6XX_SP_UNKNOWN_B183 0x0000b183
-
-#define REG_A6XX_SP_UNKNOWN_B190 0x0000b190
-
-#define REG_A6XX_SP_UNKNOWN_B191 0x0000b191
-
-#define REG_A6XX_SP_TP_RAS_MSAA_CNTL 0x0000b300
-#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
-#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
-static inline uint32_t A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
-{
- return ((val) << A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__MASK;
-}
-#define A6XX_SP_TP_RAS_MSAA_CNTL_UNK2__MASK 0x0000000c
-#define A6XX_SP_TP_RAS_MSAA_CNTL_UNK2__SHIFT 2
-static inline uint32_t A6XX_SP_TP_RAS_MSAA_CNTL_UNK2(uint32_t val)
-{
- return ((val) << A6XX_SP_TP_RAS_MSAA_CNTL_UNK2__SHIFT) & A6XX_SP_TP_RAS_MSAA_CNTL_UNK2__MASK;
-}
-
-#define REG_A6XX_SP_TP_DEST_MSAA_CNTL 0x0000b301
-#define A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
-#define A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
-static inline uint32_t A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
-{
- return ((val) << A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__MASK;
-}
-#define A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
-
-#define REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR 0x0000b302
-
-#define REG_A6XX_SP_TP_SAMPLE_CONFIG 0x0000b304
-#define A6XX_SP_TP_SAMPLE_CONFIG_UNK0 0x00000001
-#define A6XX_SP_TP_SAMPLE_CONFIG_LOCATION_ENABLE 0x00000002
-
-#define REG_A6XX_SP_TP_SAMPLE_LOCATION_0 0x0000b305
-#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK 0x0000000f
-#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT 0
-static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK;
-}
-#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK 0x000000f0
-#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT 4
-static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK;
-}
-#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK 0x00000f00
-#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT 8
-static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK;
-}
-#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK 0x0000f000
-#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT 12
-static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK;
-}
-#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK 0x000f0000
-#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT 16
-static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK;
-}
-#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK 0x00f00000
-#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT 20
-static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK;
-}
-#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK 0x0f000000
-#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT 24
-static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK;
-}
-#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK 0xf0000000
-#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT 28
-static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK;
-}
-
-#define REG_A6XX_SP_TP_SAMPLE_LOCATION_1 0x0000b306
-#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK 0x0000000f
-#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT 0
-static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK;
-}
-#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK 0x000000f0
-#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT 4
-static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK;
-}
-#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK 0x00000f00
-#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT 8
-static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK;
-}
-#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK 0x0000f000
-#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT 12
-static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK;
-}
-#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK 0x000f0000
-#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT 16
-static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK;
-}
-#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK 0x00f00000
-#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT 20
-static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK;
-}
-#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK 0x0f000000
-#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT 24
-static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK;
-}
-#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK 0xf0000000
-#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT 28
-static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y(float val)
-{
- return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK;
-}
-
-#define REG_A6XX_SP_TP_WINDOW_OFFSET 0x0000b307
-#define A6XX_SP_TP_WINDOW_OFFSET_X__MASK 0x00003fff
-#define A6XX_SP_TP_WINDOW_OFFSET_X__SHIFT 0
-static inline uint32_t A6XX_SP_TP_WINDOW_OFFSET_X(uint32_t val)
-{
- return ((val) << A6XX_SP_TP_WINDOW_OFFSET_X__SHIFT) & A6XX_SP_TP_WINDOW_OFFSET_X__MASK;
-}
-#define A6XX_SP_TP_WINDOW_OFFSET_Y__MASK 0x3fff0000
-#define A6XX_SP_TP_WINDOW_OFFSET_Y__SHIFT 16
-static inline uint32_t A6XX_SP_TP_WINDOW_OFFSET_Y(uint32_t val)
-{
- return ((val) << A6XX_SP_TP_WINDOW_OFFSET_Y__SHIFT) & A6XX_SP_TP_WINDOW_OFFSET_Y__MASK;
-}
-
-#define REG_A6XX_SP_TP_MODE_CNTL 0x0000b309
-#define A6XX_SP_TP_MODE_CNTL_ISAMMODE__MASK 0x00000003
-#define A6XX_SP_TP_MODE_CNTL_ISAMMODE__SHIFT 0
-static inline uint32_t A6XX_SP_TP_MODE_CNTL_ISAMMODE(enum a6xx_isam_mode val)
-{
- return ((val) << A6XX_SP_TP_MODE_CNTL_ISAMMODE__SHIFT) & A6XX_SP_TP_MODE_CNTL_ISAMMODE__MASK;
-}
-#define A6XX_SP_TP_MODE_CNTL_UNK3__MASK 0x000000fc
-#define A6XX_SP_TP_MODE_CNTL_UNK3__SHIFT 2
-static inline uint32_t A6XX_SP_TP_MODE_CNTL_UNK3(uint32_t val)
-{
- return ((val) << A6XX_SP_TP_MODE_CNTL_UNK3__SHIFT) & A6XX_SP_TP_MODE_CNTL_UNK3__MASK;
-}
-
-#define REG_A7XX_SP_UNKNOWN_B310 0x0000b310
-
-#define REG_A6XX_SP_PS_2D_SRC_INFO 0x0000b4c0
-#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
-#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
-static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT(enum a6xx_format val)
-{
- return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK;
-}
-#define A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__MASK 0x00000300
-#define A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__SHIFT 8
-static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_TILE_MODE(enum a6xx_tile_mode val)
-{
- return ((val) << A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__MASK;
-}
-#define A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
-#define A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
-static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
-{
- return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK;
-}
-#define A6XX_SP_PS_2D_SRC_INFO_FLAGS 0x00001000
-#define A6XX_SP_PS_2D_SRC_INFO_SRGB 0x00002000
-#define A6XX_SP_PS_2D_SRC_INFO_SAMPLES__MASK 0x0000c000
-#define A6XX_SP_PS_2D_SRC_INFO_SAMPLES__SHIFT 14
-static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_SAMPLES(enum a3xx_msaa_samples val)
-{
- return ((val) << A6XX_SP_PS_2D_SRC_INFO_SAMPLES__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_SAMPLES__MASK;
-}
-#define A6XX_SP_PS_2D_SRC_INFO_FILTER 0x00010000
-#define A6XX_SP_PS_2D_SRC_INFO_UNK17 0x00020000
-#define A6XX_SP_PS_2D_SRC_INFO_SAMPLES_AVERAGE 0x00040000
-#define A6XX_SP_PS_2D_SRC_INFO_UNK19 0x00080000
-#define A6XX_SP_PS_2D_SRC_INFO_UNK20 0x00100000
-#define A6XX_SP_PS_2D_SRC_INFO_UNK21 0x00200000
-#define A6XX_SP_PS_2D_SRC_INFO_UNK22 0x00400000
-#define A6XX_SP_PS_2D_SRC_INFO_UNK23__MASK 0x07800000
-#define A6XX_SP_PS_2D_SRC_INFO_UNK23__SHIFT 23
-static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_UNK23(uint32_t val)
-{
- return ((val) << A6XX_SP_PS_2D_SRC_INFO_UNK23__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_UNK23__MASK;
-}
-#define A6XX_SP_PS_2D_SRC_INFO_UNK28 0x10000000
-
-#define REG_A6XX_SP_PS_2D_SRC_SIZE 0x0000b4c1
-#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK 0x00007fff
-#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT 0
-static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_WIDTH(uint32_t val)
-{
- return ((val) << A6XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT) & A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK;
-}
-#define A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK 0x3fff8000
-#define A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT 15
-static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_HEIGHT(uint32_t val)
-{
- return ((val) << A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT) & A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK;
-}
-
-#define REG_A6XX_SP_PS_2D_SRC 0x0000b4c2
-
-#define REG_A6XX_SP_PS_2D_SRC_PITCH 0x0000b4c4
-#define A6XX_SP_PS_2D_SRC_PITCH_UNK0__MASK 0x000001ff
-#define A6XX_SP_PS_2D_SRC_PITCH_UNK0__SHIFT 0
-static inline uint32_t A6XX_SP_PS_2D_SRC_PITCH_UNK0(uint32_t val)
-{
- return ((val) << A6XX_SP_PS_2D_SRC_PITCH_UNK0__SHIFT) & A6XX_SP_PS_2D_SRC_PITCH_UNK0__MASK;
-}
-#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK 0x00fffe00
-#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT 9
-static inline uint32_t A6XX_SP_PS_2D_SRC_PITCH_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK;
-}
-
-#define REG_A7XX_SP_PS_2D_SRC_INFO 0x0000b2c0
-#define A7XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
-#define A7XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
-static inline uint32_t A7XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT(enum a6xx_format val)
-{
- return ((val) << A7XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A7XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK;
-}
-#define A7XX_SP_PS_2D_SRC_INFO_TILE_MODE__MASK 0x00000300
-#define A7XX_SP_PS_2D_SRC_INFO_TILE_MODE__SHIFT 8
-static inline uint32_t A7XX_SP_PS_2D_SRC_INFO_TILE_MODE(enum a6xx_tile_mode val)
-{
- return ((val) << A7XX_SP_PS_2D_SRC_INFO_TILE_MODE__SHIFT) & A7XX_SP_PS_2D_SRC_INFO_TILE_MODE__MASK;
-}
-#define A7XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
-#define A7XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
-static inline uint32_t A7XX_SP_PS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
-{
- return ((val) << A7XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A7XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK;
-}
-#define A7XX_SP_PS_2D_SRC_INFO_FLAGS 0x00001000
-#define A7XX_SP_PS_2D_SRC_INFO_SRGB 0x00002000
-#define A7XX_SP_PS_2D_SRC_INFO_SAMPLES__MASK 0x0000c000
-#define A7XX_SP_PS_2D_SRC_INFO_SAMPLES__SHIFT 14
-static inline uint32_t A7XX_SP_PS_2D_SRC_INFO_SAMPLES(enum a3xx_msaa_samples val)
-{
- return ((val) << A7XX_SP_PS_2D_SRC_INFO_SAMPLES__SHIFT) & A7XX_SP_PS_2D_SRC_INFO_SAMPLES__MASK;
-}
-#define A7XX_SP_PS_2D_SRC_INFO_FILTER 0x00010000
-#define A7XX_SP_PS_2D_SRC_INFO_UNK17 0x00020000
-#define A7XX_SP_PS_2D_SRC_INFO_SAMPLES_AVERAGE 0x00040000
-#define A7XX_SP_PS_2D_SRC_INFO_UNK19 0x00080000
-#define A7XX_SP_PS_2D_SRC_INFO_UNK20 0x00100000
-#define A7XX_SP_PS_2D_SRC_INFO_UNK21 0x00200000
-#define A7XX_SP_PS_2D_SRC_INFO_UNK22 0x00400000
-#define A7XX_SP_PS_2D_SRC_INFO_UNK23__MASK 0x07800000
-#define A7XX_SP_PS_2D_SRC_INFO_UNK23__SHIFT 23
-static inline uint32_t A7XX_SP_PS_2D_SRC_INFO_UNK23(uint32_t val)
-{
- return ((val) << A7XX_SP_PS_2D_SRC_INFO_UNK23__SHIFT) & A7XX_SP_PS_2D_SRC_INFO_UNK23__MASK;
-}
-#define A7XX_SP_PS_2D_SRC_INFO_UNK28 0x10000000
-
-#define REG_A7XX_SP_PS_2D_SRC_SIZE 0x0000b2c1
-#define A7XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK 0x00007fff
-#define A7XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT 0
-static inline uint32_t A7XX_SP_PS_2D_SRC_SIZE_WIDTH(uint32_t val)
-{
- return ((val) << A7XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT) & A7XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK;
-}
-#define A7XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK 0x3fff8000
-#define A7XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT 15
-static inline uint32_t A7XX_SP_PS_2D_SRC_SIZE_HEIGHT(uint32_t val)
-{
- return ((val) << A7XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT) & A7XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK;
-}
-
-#define REG_A7XX_SP_PS_2D_SRC 0x0000b2c2
-
-#define REG_A7XX_SP_PS_2D_SRC_PITCH 0x0000b2c4
-#define A7XX_SP_PS_2D_SRC_PITCH_UNK0__MASK 0x000001ff
-#define A7XX_SP_PS_2D_SRC_PITCH_UNK0__SHIFT 0
-static inline uint32_t A7XX_SP_PS_2D_SRC_PITCH_UNK0(uint32_t val)
-{
- return ((val) << A7XX_SP_PS_2D_SRC_PITCH_UNK0__SHIFT) & A7XX_SP_PS_2D_SRC_PITCH_UNK0__MASK;
-}
-#define A7XX_SP_PS_2D_SRC_PITCH_PITCH__MASK 0x00fffe00
-#define A7XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT 9
-static inline uint32_t A7XX_SP_PS_2D_SRC_PITCH_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A7XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT) & A7XX_SP_PS_2D_SRC_PITCH_PITCH__MASK;
-}
-
-#define REG_A6XX_SP_PS_2D_SRC_PLANE1 0x0000b4c5
-
-#define REG_A6XX_SP_PS_2D_SRC_PLANE_PITCH 0x0000b4c7
-#define A6XX_SP_PS_2D_SRC_PLANE_PITCH__MASK 0x00000fff
-#define A6XX_SP_PS_2D_SRC_PLANE_PITCH__SHIFT 0
-static inline uint32_t A6XX_SP_PS_2D_SRC_PLANE_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A6XX_SP_PS_2D_SRC_PLANE_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_PLANE_PITCH__MASK;
-}
-
-#define REG_A6XX_SP_PS_2D_SRC_PLANE2 0x0000b4c8
-
-#define REG_A7XX_SP_PS_2D_SRC_PLANE1 0x0000b2c5
-
-#define REG_A7XX_SP_PS_2D_SRC_PLANE_PITCH 0x0000b2c7
-#define A7XX_SP_PS_2D_SRC_PLANE_PITCH__MASK 0x00000fff
-#define A7XX_SP_PS_2D_SRC_PLANE_PITCH__SHIFT 0
-static inline uint32_t A7XX_SP_PS_2D_SRC_PLANE_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A7XX_SP_PS_2D_SRC_PLANE_PITCH__SHIFT) & A7XX_SP_PS_2D_SRC_PLANE_PITCH__MASK;
-}
-
-#define REG_A7XX_SP_PS_2D_SRC_PLANE2 0x0000b2c8
-
-#define REG_A6XX_SP_PS_2D_SRC_FLAGS 0x0000b4ca
-
-#define REG_A6XX_SP_PS_2D_SRC_FLAGS_PITCH 0x0000b4cc
-#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH__MASK 0x000000ff
-#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH__SHIFT 0
-static inline uint32_t A6XX_SP_PS_2D_SRC_FLAGS_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A6XX_SP_PS_2D_SRC_FLAGS_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_FLAGS_PITCH__MASK;
-}
-
-#define REG_A7XX_SP_PS_2D_SRC_FLAGS 0x0000b2ca
-
-#define REG_A7XX_SP_PS_2D_SRC_FLAGS_PITCH 0x0000b2cc
-#define A7XX_SP_PS_2D_SRC_FLAGS_PITCH__MASK 0x000000ff
-#define A7XX_SP_PS_2D_SRC_FLAGS_PITCH__SHIFT 0
-static inline uint32_t A7XX_SP_PS_2D_SRC_FLAGS_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A7XX_SP_PS_2D_SRC_FLAGS_PITCH__SHIFT) & A7XX_SP_PS_2D_SRC_FLAGS_PITCH__MASK;
-}
-
-#define REG_A6XX_SP_PS_UNKNOWN_B4CD 0x0000b4cd
-
-#define REG_A6XX_SP_PS_UNKNOWN_B4CE 0x0000b4ce
-
-#define REG_A6XX_SP_PS_UNKNOWN_B4CF 0x0000b4cf
-
-#define REG_A6XX_SP_PS_UNKNOWN_B4D0 0x0000b4d0
-
-#define REG_A6XX_SP_WINDOW_OFFSET 0x0000b4d1
-#define A6XX_SP_WINDOW_OFFSET_X__MASK 0x00003fff
-#define A6XX_SP_WINDOW_OFFSET_X__SHIFT 0
-static inline uint32_t A6XX_SP_WINDOW_OFFSET_X(uint32_t val)
-{
- return ((val) << A6XX_SP_WINDOW_OFFSET_X__SHIFT) & A6XX_SP_WINDOW_OFFSET_X__MASK;
-}
-#define A6XX_SP_WINDOW_OFFSET_Y__MASK 0x3fff0000
-#define A6XX_SP_WINDOW_OFFSET_Y__SHIFT 16
-static inline uint32_t A6XX_SP_WINDOW_OFFSET_Y(uint32_t val)
-{
- return ((val) << A6XX_SP_WINDOW_OFFSET_Y__SHIFT) & A6XX_SP_WINDOW_OFFSET_Y__MASK;
-}
-
-#define REG_A7XX_SP_PS_UNKNOWN_B4CD 0x0000b2cd
-
-#define REG_A7XX_SP_PS_UNKNOWN_B4CE 0x0000b2ce
-
-#define REG_A7XX_SP_PS_UNKNOWN_B4CF 0x0000b2cf
-
-#define REG_A7XX_SP_PS_UNKNOWN_B4D0 0x0000b2d0
-
-#define REG_A7XX_SP_PS_2D_WINDOW_OFFSET 0x0000b2d1
-#define A7XX_SP_PS_2D_WINDOW_OFFSET_X__MASK 0x00003fff
-#define A7XX_SP_PS_2D_WINDOW_OFFSET_X__SHIFT 0
-static inline uint32_t A7XX_SP_PS_2D_WINDOW_OFFSET_X(uint32_t val)
-{
- return ((val) << A7XX_SP_PS_2D_WINDOW_OFFSET_X__SHIFT) & A7XX_SP_PS_2D_WINDOW_OFFSET_X__MASK;
-}
-#define A7XX_SP_PS_2D_WINDOW_OFFSET_Y__MASK 0x3fff0000
-#define A7XX_SP_PS_2D_WINDOW_OFFSET_Y__SHIFT 16
-static inline uint32_t A7XX_SP_PS_2D_WINDOW_OFFSET_Y(uint32_t val)
-{
- return ((val) << A7XX_SP_PS_2D_WINDOW_OFFSET_Y__SHIFT) & A7XX_SP_PS_2D_WINDOW_OFFSET_Y__MASK;
-}
-
-#define REG_A7XX_SP_PS_UNKNOWN_B2D2 0x0000b2d2
-
-#define REG_A7XX_SP_WINDOW_OFFSET 0x0000ab21
-#define A7XX_SP_WINDOW_OFFSET_X__MASK 0x00003fff
-#define A7XX_SP_WINDOW_OFFSET_X__SHIFT 0
-static inline uint32_t A7XX_SP_WINDOW_OFFSET_X(uint32_t val)
-{
- return ((val) << A7XX_SP_WINDOW_OFFSET_X__SHIFT) & A7XX_SP_WINDOW_OFFSET_X__MASK;
-}
-#define A7XX_SP_WINDOW_OFFSET_Y__MASK 0x3fff0000
-#define A7XX_SP_WINDOW_OFFSET_Y__SHIFT 16
-static inline uint32_t A7XX_SP_WINDOW_OFFSET_Y(uint32_t val)
-{
- return ((val) << A7XX_SP_WINDOW_OFFSET_Y__SHIFT) & A7XX_SP_WINDOW_OFFSET_Y__MASK;
-}
-
-#define REG_A6XX_TPL1_DBG_ECO_CNTL 0x0000b600
-
-#define REG_A6XX_TPL1_ADDR_MODE_CNTL 0x0000b601
-
-#define REG_A6XX_TPL1_UNKNOWN_B602 0x0000b602
-
-#define REG_A6XX_TPL1_NC_MODE_CNTL 0x0000b604
-#define A6XX_TPL1_NC_MODE_CNTL_MODE 0x00000001
-#define A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__MASK 0x00000006
-#define A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__SHIFT 1
-static inline uint32_t A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT(uint32_t val)
-{
- return ((val) << A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__SHIFT) & A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__MASK;
-}
-#define A6XX_TPL1_NC_MODE_CNTL_MIN_ACCESS_LENGTH 0x00000008
-#define A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__MASK 0x00000010
-#define A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__SHIFT 4
-static inline uint32_t A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT(uint32_t val)
-{
- return ((val) << A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__SHIFT) & A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__MASK;
-}
-#define A6XX_TPL1_NC_MODE_CNTL_UNK6__MASK 0x000000c0
-#define A6XX_TPL1_NC_MODE_CNTL_UNK6__SHIFT 6
-static inline uint32_t A6XX_TPL1_NC_MODE_CNTL_UNK6(uint32_t val)
-{
- return ((val) << A6XX_TPL1_NC_MODE_CNTL_UNK6__SHIFT) & A6XX_TPL1_NC_MODE_CNTL_UNK6__MASK;
-}
-
-#define REG_A6XX_TPL1_UNKNOWN_B605 0x0000b605
-
-#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0 0x0000b608
-
-#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1 0x0000b609
-
-#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2 0x0000b60a
-
-#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3 0x0000b60b
-
-#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4 0x0000b60c
-
-#define REG_A7XX_TPL1_BICUBIC_WEIGHTS_TABLE_0 0x0000b608
-
-#define REG_A7XX_TPL1_BICUBIC_WEIGHTS_TABLE_1 0x0000b609
-
-#define REG_A7XX_TPL1_BICUBIC_WEIGHTS_TABLE_2 0x0000b60a
-
-#define REG_A7XX_TPL1_BICUBIC_WEIGHTS_TABLE_3 0x0000b60b
-
-#define REG_A7XX_TPL1_BICUBIC_WEIGHTS_TABLE_4 0x0000b60c
-
-#define REG_A6XX_TPL1_PERFCTR_TP_SEL(i0) (0x0000b610 + 0x1*(i0))
-
-#define REG_A6XX_HLSQ_VS_CNTL 0x0000b800
-#define A6XX_HLSQ_VS_CNTL_CONSTLEN__MASK 0x000000ff
-#define A6XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT 0
-static inline uint32_t A6XX_HLSQ_VS_CNTL_CONSTLEN(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A6XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_VS_CNTL_CONSTLEN__MASK;
-}
-#define A6XX_HLSQ_VS_CNTL_ENABLED 0x00000100
-#define A6XX_HLSQ_VS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200
-
-#define REG_A6XX_HLSQ_HS_CNTL 0x0000b801
-#define A6XX_HLSQ_HS_CNTL_CONSTLEN__MASK 0x000000ff
-#define A6XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT 0
-static inline uint32_t A6XX_HLSQ_HS_CNTL_CONSTLEN(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A6XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_HS_CNTL_CONSTLEN__MASK;
-}
-#define A6XX_HLSQ_HS_CNTL_ENABLED 0x00000100
-#define A6XX_HLSQ_HS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200
-
-#define REG_A6XX_HLSQ_DS_CNTL 0x0000b802
-#define A6XX_HLSQ_DS_CNTL_CONSTLEN__MASK 0x000000ff
-#define A6XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT 0
-static inline uint32_t A6XX_HLSQ_DS_CNTL_CONSTLEN(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A6XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_DS_CNTL_CONSTLEN__MASK;
-}
-#define A6XX_HLSQ_DS_CNTL_ENABLED 0x00000100
-#define A6XX_HLSQ_DS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200
-
-#define REG_A6XX_HLSQ_GS_CNTL 0x0000b803
-#define A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK 0x000000ff
-#define A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT 0
-static inline uint32_t A6XX_HLSQ_GS_CNTL_CONSTLEN(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK;
-}
-#define A6XX_HLSQ_GS_CNTL_ENABLED 0x00000100
-#define A6XX_HLSQ_GS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200
-
-#define REG_A7XX_HLSQ_VS_CNTL 0x0000a827
-#define A7XX_HLSQ_VS_CNTL_CONSTLEN__MASK 0x000000ff
-#define A7XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT 0
-static inline uint32_t A7XX_HLSQ_VS_CNTL_CONSTLEN(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A7XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT) & A7XX_HLSQ_VS_CNTL_CONSTLEN__MASK;
-}
-#define A7XX_HLSQ_VS_CNTL_ENABLED 0x00000100
-#define A7XX_HLSQ_VS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200
-
-#define REG_A7XX_HLSQ_HS_CNTL 0x0000a83f
-#define A7XX_HLSQ_HS_CNTL_CONSTLEN__MASK 0x000000ff
-#define A7XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT 0
-static inline uint32_t A7XX_HLSQ_HS_CNTL_CONSTLEN(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A7XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT) & A7XX_HLSQ_HS_CNTL_CONSTLEN__MASK;
-}
-#define A7XX_HLSQ_HS_CNTL_ENABLED 0x00000100
-#define A7XX_HLSQ_HS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200
-
-#define REG_A7XX_HLSQ_DS_CNTL 0x0000a867
-#define A7XX_HLSQ_DS_CNTL_CONSTLEN__MASK 0x000000ff
-#define A7XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT 0
-static inline uint32_t A7XX_HLSQ_DS_CNTL_CONSTLEN(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A7XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT) & A7XX_HLSQ_DS_CNTL_CONSTLEN__MASK;
-}
-#define A7XX_HLSQ_DS_CNTL_ENABLED 0x00000100
-#define A7XX_HLSQ_DS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200
-
-#define REG_A7XX_HLSQ_GS_CNTL 0x0000a898
-#define A7XX_HLSQ_GS_CNTL_CONSTLEN__MASK 0x000000ff
-#define A7XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT 0
-static inline uint32_t A7XX_HLSQ_GS_CNTL_CONSTLEN(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A7XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT) & A7XX_HLSQ_GS_CNTL_CONSTLEN__MASK;
-}
-#define A7XX_HLSQ_GS_CNTL_ENABLED 0x00000100
-#define A7XX_HLSQ_GS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200
-
-#define REG_A7XX_HLSQ_FS_UNKNOWN_A9AA 0x0000a9aa
-#define A7XX_HLSQ_FS_UNKNOWN_A9AA_CONSTS_LOAD_DISABLE 0x00000001
-
-#define REG_A7XX_HLSQ_UNKNOWN_A9AC 0x0000a9ac
-
-#define REG_A7XX_HLSQ_UNKNOWN_A9AD 0x0000a9ad
-
-#define REG_A7XX_HLSQ_UNKNOWN_A9AE 0x0000a9ae
-#define A7XX_HLSQ_UNKNOWN_A9AE_SYSVAL_REGS_COUNT__MASK 0x000000ff
-#define A7XX_HLSQ_UNKNOWN_A9AE_SYSVAL_REGS_COUNT__SHIFT 0
-static inline uint32_t A7XX_HLSQ_UNKNOWN_A9AE_SYSVAL_REGS_COUNT(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_UNKNOWN_A9AE_SYSVAL_REGS_COUNT__SHIFT) & A7XX_HLSQ_UNKNOWN_A9AE_SYSVAL_REGS_COUNT__MASK;
-}
-#define A7XX_HLSQ_UNKNOWN_A9AE_UNK8 0x00000100
-#define A7XX_HLSQ_UNKNOWN_A9AE_UNK9 0x00000200
-
-#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_CMD 0x0000b820
-
-#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR 0x0000b821
-
-#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_DATA 0x0000b823
-
-#define REG_A6XX_HLSQ_FS_CNTL_0 0x0000b980
-#define A6XX_HLSQ_FS_CNTL_0_THREADSIZE__MASK 0x00000001
-#define A6XX_HLSQ_FS_CNTL_0_THREADSIZE__SHIFT 0
-static inline uint32_t A6XX_HLSQ_FS_CNTL_0_THREADSIZE(enum a6xx_threadsize val)
-{
- return ((val) << A6XX_HLSQ_FS_CNTL_0_THREADSIZE__SHIFT) & A6XX_HLSQ_FS_CNTL_0_THREADSIZE__MASK;
-}
-#define A6XX_HLSQ_FS_CNTL_0_VARYINGS 0x00000002
-#define A6XX_HLSQ_FS_CNTL_0_UNK2__MASK 0x00000ffc
-#define A6XX_HLSQ_FS_CNTL_0_UNK2__SHIFT 2
-static inline uint32_t A6XX_HLSQ_FS_CNTL_0_UNK2(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_FS_CNTL_0_UNK2__SHIFT) & A6XX_HLSQ_FS_CNTL_0_UNK2__MASK;
-}
-
-#define REG_A6XX_HLSQ_UNKNOWN_B981 0x0000b981
-
-#define REG_A6XX_HLSQ_CONTROL_1_REG 0x0000b982
-#define A6XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK 0x00000007
-#define A6XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT 0
-static inline uint32_t A6XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT) & A6XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK;
-}
-
-#define REG_A6XX_HLSQ_CONTROL_2_REG 0x0000b983
-#define A6XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff
-#define A6XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0
-static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
-}
-#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK 0x0000ff00
-#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT 8
-static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SAMPLEID(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK;
-}
-#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK 0x00ff0000
-#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT 16
-static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK;
-}
-#define A6XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK 0xff000000
-#define A6XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT 24
-static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_CENTERRHW(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK;
-}
-
-#define REG_A6XX_HLSQ_CONTROL_3_REG 0x0000b984
-#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK 0x000000ff
-#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT 0
-static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK;
-}
-#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK 0x0000ff00
-#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT 8
-static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK;
-}
-#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK 0x00ff0000
-#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT 16
-static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK;
-}
-#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK 0xff000000
-#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT 24
-static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK;
-}
-
-#define REG_A6XX_HLSQ_CONTROL_4_REG 0x0000b985
-#define A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK 0x000000ff
-#define A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT 0
-static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK;
-}
-#define A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK 0x0000ff00
-#define A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT 8
-static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK;
-}
-#define A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000
-#define A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16
-static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK;
-}
-#define A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000
-#define A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24
-static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK;
-}
-
-#define REG_A6XX_HLSQ_CONTROL_5_REG 0x0000b986
-#define A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__MASK 0x000000ff
-#define A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__SHIFT 0
-static inline uint32_t A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__SHIFT) & A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__MASK;
-}
-#define A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__MASK 0x0000ff00
-#define A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__SHIFT 8
-static inline uint32_t A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__SHIFT) & A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__MASK;
-}
-
-#define REG_A6XX_HLSQ_CS_CNTL 0x0000b987
-#define A6XX_HLSQ_CS_CNTL_CONSTLEN__MASK 0x000000ff
-#define A6XX_HLSQ_CS_CNTL_CONSTLEN__SHIFT 0
-static inline uint32_t A6XX_HLSQ_CS_CNTL_CONSTLEN(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A6XX_HLSQ_CS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_CS_CNTL_CONSTLEN__MASK;
-}
-#define A6XX_HLSQ_CS_CNTL_ENABLED 0x00000100
-#define A6XX_HLSQ_CS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200
-
-#define REG_A7XX_HLSQ_FS_CNTL_0 0x0000a9c6
-#define A7XX_HLSQ_FS_CNTL_0_THREADSIZE__MASK 0x00000001
-#define A7XX_HLSQ_FS_CNTL_0_THREADSIZE__SHIFT 0
-static inline uint32_t A7XX_HLSQ_FS_CNTL_0_THREADSIZE(enum a6xx_threadsize val)
-{
- return ((val) << A7XX_HLSQ_FS_CNTL_0_THREADSIZE__SHIFT) & A7XX_HLSQ_FS_CNTL_0_THREADSIZE__MASK;
-}
-#define A7XX_HLSQ_FS_CNTL_0_VARYINGS 0x00000002
-#define A7XX_HLSQ_FS_CNTL_0_UNK2__MASK 0x00000ffc
-#define A7XX_HLSQ_FS_CNTL_0_UNK2__SHIFT 2
-static inline uint32_t A7XX_HLSQ_FS_CNTL_0_UNK2(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_FS_CNTL_0_UNK2__SHIFT) & A7XX_HLSQ_FS_CNTL_0_UNK2__MASK;
-}
-
-#define REG_A7XX_HLSQ_CONTROL_1_REG 0x0000a9c7
-#define A7XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK 0x00000007
-#define A7XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT 0
-static inline uint32_t A7XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT) & A7XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK;
-}
-
-#define REG_A7XX_HLSQ_CONTROL_2_REG 0x0000a9c8
-#define A7XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff
-#define A7XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0
-static inline uint32_t A7XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A7XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
-}
-#define A7XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK 0x0000ff00
-#define A7XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT 8
-static inline uint32_t A7XX_HLSQ_CONTROL_2_REG_SAMPLEID(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT) & A7XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK;
-}
-#define A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK 0x00ff0000
-#define A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT 16
-static inline uint32_t A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK;
-}
-#define A7XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK 0xff000000
-#define A7XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT 24
-static inline uint32_t A7XX_HLSQ_CONTROL_2_REG_CENTERRHW(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT) & A7XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK;
-}
-
-#define REG_A7XX_HLSQ_CONTROL_3_REG 0x0000a9c9
-#define A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK 0x000000ff
-#define A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT 0
-static inline uint32_t A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK;
-}
-#define A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK 0x0000ff00
-#define A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT 8
-static inline uint32_t A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK;
-}
-#define A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK 0x00ff0000
-#define A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT 16
-static inline uint32_t A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK;
-}
-#define A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK 0xff000000
-#define A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT 24
-static inline uint32_t A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK;
-}
-
-#define REG_A7XX_HLSQ_CONTROL_4_REG 0x0000a9ca
-#define A7XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK 0x000000ff
-#define A7XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT 0
-static inline uint32_t A7XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT) & A7XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK;
-}
-#define A7XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK 0x0000ff00
-#define A7XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT 8
-static inline uint32_t A7XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT) & A7XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK;
-}
-#define A7XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000
-#define A7XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16
-static inline uint32_t A7XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A7XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK;
-}
-#define A7XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000
-#define A7XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24
-static inline uint32_t A7XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A7XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK;
-}
-
-#define REG_A7XX_HLSQ_CONTROL_5_REG 0x0000a9cb
-#define A7XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__MASK 0x000000ff
-#define A7XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__SHIFT 0
-static inline uint32_t A7XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__SHIFT) & A7XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__MASK;
-}
-#define A7XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__MASK 0x0000ff00
-#define A7XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__SHIFT 8
-static inline uint32_t A7XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__SHIFT) & A7XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__MASK;
-}
-
-#define REG_A7XX_HLSQ_CS_CNTL 0x0000a9cd
-#define A7XX_HLSQ_CS_CNTL_CONSTLEN__MASK 0x000000ff
-#define A7XX_HLSQ_CS_CNTL_CONSTLEN__SHIFT 0
-static inline uint32_t A7XX_HLSQ_CS_CNTL_CONSTLEN(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A7XX_HLSQ_CS_CNTL_CONSTLEN__SHIFT) & A7XX_HLSQ_CS_CNTL_CONSTLEN__MASK;
-}
-#define A7XX_HLSQ_CS_CNTL_ENABLED 0x00000100
-#define A7XX_HLSQ_CS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200
-
-#define REG_A6XX_HLSQ_CS_NDRANGE_0 0x0000b990
-#define A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK 0x00000003
-#define A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT 0
-static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK;
-}
-#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc
-#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT 2
-static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK;
-}
-#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000
-#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT 12
-static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK;
-}
-#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000
-#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT 22
-static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK;
-}
-
-#define REG_A6XX_HLSQ_CS_NDRANGE_1 0x0000b991
-#define A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK 0xffffffff
-#define A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT 0
-static inline uint32_t A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT) & A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK;
-}
-
-#define REG_A6XX_HLSQ_CS_NDRANGE_2 0x0000b992
-#define A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK 0xffffffff
-#define A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT 0
-static inline uint32_t A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT) & A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK;
-}
-
-#define REG_A6XX_HLSQ_CS_NDRANGE_3 0x0000b993
-#define A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK 0xffffffff
-#define A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT 0
-static inline uint32_t A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT) & A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK;
-}
-
-#define REG_A6XX_HLSQ_CS_NDRANGE_4 0x0000b994
-#define A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK 0xffffffff
-#define A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT 0
-static inline uint32_t A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT) & A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK;
-}
-
-#define REG_A6XX_HLSQ_CS_NDRANGE_5 0x0000b995
-#define A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK 0xffffffff
-#define A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT 0
-static inline uint32_t A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT) & A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK;
-}
-
-#define REG_A6XX_HLSQ_CS_NDRANGE_6 0x0000b996
-#define A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK 0xffffffff
-#define A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT 0
-static inline uint32_t A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT) & A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK;
-}
-
-#define REG_A6XX_HLSQ_CS_CNTL_0 0x0000b997
-#define A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK 0x000000ff
-#define A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT 0
-static inline uint32_t A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK;
-}
-#define A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID__MASK 0x0000ff00
-#define A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID__SHIFT 8
-static inline uint32_t A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID__MASK;
-}
-#define A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID__MASK 0x00ff0000
-#define A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID__SHIFT 16
-static inline uint32_t A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID__MASK;
-}
-#define A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK 0xff000000
-#define A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT 24
-static inline uint32_t A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK;
-}
-
-#define REG_A6XX_HLSQ_CS_CNTL_1 0x0000b998
-#define A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__MASK 0x000000ff
-#define A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT 0
-static inline uint32_t A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT) & A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__MASK;
-}
-#define A6XX_HLSQ_CS_CNTL_1_SINGLE_SP_CORE 0x00000100
-#define A6XX_HLSQ_CS_CNTL_1_THREADSIZE__MASK 0x00000200
-#define A6XX_HLSQ_CS_CNTL_1_THREADSIZE__SHIFT 9
-static inline uint32_t A6XX_HLSQ_CS_CNTL_1_THREADSIZE(enum a6xx_threadsize val)
-{
- return ((val) << A6XX_HLSQ_CS_CNTL_1_THREADSIZE__SHIFT) & A6XX_HLSQ_CS_CNTL_1_THREADSIZE__MASK;
-}
-#define A6XX_HLSQ_CS_CNTL_1_THREADSIZE_SCALAR 0x00000400
-
-#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_X 0x0000b999
-
-#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000b99a
-
-#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000b99b
-
-#define REG_A7XX_HLSQ_CS_NDRANGE_0 0x0000a9d4
-#define A7XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK 0x00000003
-#define A7XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT 0
-static inline uint32_t A7XX_HLSQ_CS_NDRANGE_0_KERNELDIM(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT) & A7XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK;
-}
-#define A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc
-#define A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT 2
-static inline uint32_t A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT) & A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK;
-}
-#define A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000
-#define A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT 12
-static inline uint32_t A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT) & A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK;
-}
-#define A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000
-#define A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT 22
-static inline uint32_t A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT) & A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK;
-}
-
-#define REG_A7XX_HLSQ_CS_NDRANGE_1 0x0000a9d5
-#define A7XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK 0xffffffff
-#define A7XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT 0
-static inline uint32_t A7XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT) & A7XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK;
-}
-
-#define REG_A7XX_HLSQ_CS_NDRANGE_2 0x0000a9d6
-#define A7XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK 0xffffffff
-#define A7XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT 0
-static inline uint32_t A7XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT) & A7XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK;
-}
-
-#define REG_A7XX_HLSQ_CS_NDRANGE_3 0x0000a9d7
-#define A7XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK 0xffffffff
-#define A7XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT 0
-static inline uint32_t A7XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT) & A7XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK;
-}
-
-#define REG_A7XX_HLSQ_CS_NDRANGE_4 0x0000a9d8
-#define A7XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK 0xffffffff
-#define A7XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT 0
-static inline uint32_t A7XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT) & A7XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK;
-}
-
-#define REG_A7XX_HLSQ_CS_NDRANGE_5 0x0000a9d9
-#define A7XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK 0xffffffff
-#define A7XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT 0
-static inline uint32_t A7XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT) & A7XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK;
-}
-
-#define REG_A7XX_HLSQ_CS_NDRANGE_6 0x0000a9da
-#define A7XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK 0xffffffff
-#define A7XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT 0
-static inline uint32_t A7XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT) & A7XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK;
-}
-
-#define REG_A7XX_HLSQ_CS_KERNEL_GROUP_X 0x0000a9dc
-
-#define REG_A7XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000a9dd
-
-#define REG_A7XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000a9de
-
-#define REG_A7XX_HLSQ_CS_CNTL_1 0x0000a9db
-#define A7XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__MASK 0x000000ff
-#define A7XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT 0
-static inline uint32_t A7XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT) & A7XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__MASK;
-}
-#define A7XX_HLSQ_CS_CNTL_1_THREADSIZE__MASK 0x00000200
-#define A7XX_HLSQ_CS_CNTL_1_THREADSIZE__SHIFT 9
-static inline uint32_t A7XX_HLSQ_CS_CNTL_1_THREADSIZE(enum a6xx_threadsize val)
-{
- return ((val) << A7XX_HLSQ_CS_CNTL_1_THREADSIZE__SHIFT) & A7XX_HLSQ_CS_CNTL_1_THREADSIZE__MASK;
-}
-#define A7XX_HLSQ_CS_CNTL_1_UNK11 0x00000800
-#define A7XX_HLSQ_CS_CNTL_1_UNK22 0x00400000
-#define A7XX_HLSQ_CS_CNTL_1_UNK26 0x04000000
-#define A7XX_HLSQ_CS_CNTL_1_YALIGN__MASK 0x78000000
-#define A7XX_HLSQ_CS_CNTL_1_YALIGN__SHIFT 27
-static inline uint32_t A7XX_HLSQ_CS_CNTL_1_YALIGN(enum a7xx_cs_yalign val)
-{
- return ((val) << A7XX_HLSQ_CS_CNTL_1_YALIGN__SHIFT) & A7XX_HLSQ_CS_CNTL_1_YALIGN__MASK;
-}
-
-#define REG_A7XX_HLSQ_CS_LOCAL_SIZE 0x0000a9df
-#define A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEX__MASK 0x00000ffc
-#define A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEX__SHIFT 2
-static inline uint32_t A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEX(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEX__SHIFT) & A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEX__MASK;
-}
-#define A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEY__MASK 0x003ff000
-#define A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEY__SHIFT 12
-static inline uint32_t A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEY(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEY__SHIFT) & A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEY__MASK;
-}
-#define A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEZ__MASK 0xffc00000
-#define A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEZ__SHIFT 22
-static inline uint32_t A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEZ(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEZ__SHIFT) & A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEZ__MASK;
-}
-
-#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_CMD 0x0000b9a0
-
-#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR 0x0000b9a1
-
-#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_DATA 0x0000b9a3
-
-#define REG_A6XX_HLSQ_CS_BINDLESS_BASE(i0) (0x0000b9c0 + 0x2*(i0))
-
-static inline uint32_t REG_A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000b9c0 + 0x2*i0; }
-#define A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003
-#define A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT 0
-static inline uint32_t A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx_bindless_descriptor_size val)
-{
- return ((val) << A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK;
-}
-#define A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffffffffffc
-#define A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2
-static inline uint32_t A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR(uint64_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK;
-}
-
-#define REG_A6XX_HLSQ_CS_UNKNOWN_B9D0 0x0000b9d0
-#define A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE__MASK 0x0000001f
-#define A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE__SHIFT 0
-static inline uint32_t A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE__SHIFT) & A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE__MASK;
-}
-#define A6XX_HLSQ_CS_UNKNOWN_B9D0_UNK5 0x00000020
-#define A6XX_HLSQ_CS_UNKNOWN_B9D0_UNK6 0x00000040
-
-#define REG_A6XX_HLSQ_DRAW_CMD 0x0000bb00
-#define A6XX_HLSQ_DRAW_CMD_STATE_ID__MASK 0x000000ff
-#define A6XX_HLSQ_DRAW_CMD_STATE_ID__SHIFT 0
-static inline uint32_t A6XX_HLSQ_DRAW_CMD_STATE_ID(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_DRAW_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_DRAW_CMD_STATE_ID__MASK;
-}
-
-#define REG_A6XX_HLSQ_DISPATCH_CMD 0x0000bb01
-#define A6XX_HLSQ_DISPATCH_CMD_STATE_ID__MASK 0x000000ff
-#define A6XX_HLSQ_DISPATCH_CMD_STATE_ID__SHIFT 0
-static inline uint32_t A6XX_HLSQ_DISPATCH_CMD_STATE_ID(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_DISPATCH_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_DISPATCH_CMD_STATE_ID__MASK;
-}
-
-#define REG_A6XX_HLSQ_EVENT_CMD 0x0000bb02
-#define A6XX_HLSQ_EVENT_CMD_STATE_ID__MASK 0x00ff0000
-#define A6XX_HLSQ_EVENT_CMD_STATE_ID__SHIFT 16
-static inline uint32_t A6XX_HLSQ_EVENT_CMD_STATE_ID(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_EVENT_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_EVENT_CMD_STATE_ID__MASK;
-}
-#define A6XX_HLSQ_EVENT_CMD_EVENT__MASK 0x0000007f
-#define A6XX_HLSQ_EVENT_CMD_EVENT__SHIFT 0
-static inline uint32_t A6XX_HLSQ_EVENT_CMD_EVENT(enum vgt_event_type val)
-{
- return ((val) << A6XX_HLSQ_EVENT_CMD_EVENT__SHIFT) & A6XX_HLSQ_EVENT_CMD_EVENT__MASK;
-}
-
-#define REG_A6XX_HLSQ_INVALIDATE_CMD 0x0000bb08
-#define A6XX_HLSQ_INVALIDATE_CMD_VS_STATE 0x00000001
-#define A6XX_HLSQ_INVALIDATE_CMD_HS_STATE 0x00000002
-#define A6XX_HLSQ_INVALIDATE_CMD_DS_STATE 0x00000004
-#define A6XX_HLSQ_INVALIDATE_CMD_GS_STATE 0x00000008
-#define A6XX_HLSQ_INVALIDATE_CMD_FS_STATE 0x00000010
-#define A6XX_HLSQ_INVALIDATE_CMD_CS_STATE 0x00000020
-#define A6XX_HLSQ_INVALIDATE_CMD_CS_IBO 0x00000040
-#define A6XX_HLSQ_INVALIDATE_CMD_GFX_IBO 0x00000080
-#define A6XX_HLSQ_INVALIDATE_CMD_CS_SHARED_CONST 0x00080000
-#define A6XX_HLSQ_INVALIDATE_CMD_GFX_SHARED_CONST 0x00000100
-#define A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__MASK 0x00003e00
-#define A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__SHIFT 9
-static inline uint32_t A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__SHIFT) & A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__MASK;
-}
-#define A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__MASK 0x0007c000
-#define A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__SHIFT 14
-static inline uint32_t A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__SHIFT) & A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__MASK;
-}
-
-#define REG_A7XX_HLSQ_INVALIDATE_CMD 0x0000ab1f
-#define A7XX_HLSQ_INVALIDATE_CMD_VS_STATE 0x00000001
-#define A7XX_HLSQ_INVALIDATE_CMD_HS_STATE 0x00000002
-#define A7XX_HLSQ_INVALIDATE_CMD_DS_STATE 0x00000004
-#define A7XX_HLSQ_INVALIDATE_CMD_GS_STATE 0x00000008
-#define A7XX_HLSQ_INVALIDATE_CMD_FS_STATE 0x00000010
-#define A7XX_HLSQ_INVALIDATE_CMD_CS_STATE 0x00000020
-#define A7XX_HLSQ_INVALIDATE_CMD_CS_IBO 0x00000040
-#define A7XX_HLSQ_INVALIDATE_CMD_GFX_IBO 0x00000080
-#define A7XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__MASK 0x0001fe00
-#define A7XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__SHIFT 9
-static inline uint32_t A7XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__SHIFT) & A7XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__MASK;
-}
-#define A7XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__MASK 0x01fe0000
-#define A7XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__SHIFT 17
-static inline uint32_t A7XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__SHIFT) & A7XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__MASK;
-}
-
-#define REG_A6XX_HLSQ_FS_CNTL 0x0000bb10
-#define A6XX_HLSQ_FS_CNTL_CONSTLEN__MASK 0x000000ff
-#define A6XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT 0
-static inline uint32_t A6XX_HLSQ_FS_CNTL_CONSTLEN(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A6XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_FS_CNTL_CONSTLEN__MASK;
-}
-#define A6XX_HLSQ_FS_CNTL_ENABLED 0x00000100
-#define A6XX_HLSQ_FS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200
-
-#define REG_A7XX_HLSQ_FS_CNTL 0x0000ab03
-#define A7XX_HLSQ_FS_CNTL_CONSTLEN__MASK 0x000000ff
-#define A7XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT 0
-static inline uint32_t A7XX_HLSQ_FS_CNTL_CONSTLEN(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A7XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT) & A7XX_HLSQ_FS_CNTL_CONSTLEN__MASK;
-}
-#define A7XX_HLSQ_FS_CNTL_ENABLED 0x00000100
-#define A7XX_HLSQ_FS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200
-
-#define REG_A7XX_HLSQ_SHARED_CONSTS_IMM(i0) (0x0000ab40 + 0x1*(i0))
-
-#define REG_A6XX_HLSQ_SHARED_CONSTS 0x0000bb11
-#define A6XX_HLSQ_SHARED_CONSTS_ENABLE 0x00000001
-
-#define REG_A6XX_HLSQ_BINDLESS_BASE(i0) (0x0000bb20 + 0x2*(i0))
-
-static inline uint32_t REG_A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000bb20 + 0x2*i0; }
-#define A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003
-#define A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT 0
-static inline uint32_t A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx_bindless_descriptor_size val)
-{
- return ((val) << A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK;
-}
-#define A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffffffffffc
-#define A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2
-static inline uint32_t A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR(uint64_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK;
-}
-
-#define REG_A6XX_HLSQ_2D_EVENT_CMD 0x0000bd80
-#define A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__MASK 0x0000ff00
-#define A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__SHIFT 8
-static inline uint32_t A6XX_HLSQ_2D_EVENT_CMD_STATE_ID(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__MASK;
-}
-#define A6XX_HLSQ_2D_EVENT_CMD_EVENT__MASK 0x0000007f
-#define A6XX_HLSQ_2D_EVENT_CMD_EVENT__SHIFT 0
-static inline uint32_t A6XX_HLSQ_2D_EVENT_CMD_EVENT(enum vgt_event_type val)
-{
- return ((val) << A6XX_HLSQ_2D_EVENT_CMD_EVENT__SHIFT) & A6XX_HLSQ_2D_EVENT_CMD_EVENT__MASK;
-}
-
-#define REG_A6XX_HLSQ_UNKNOWN_BE00 0x0000be00
-
-#define REG_A6XX_HLSQ_UNKNOWN_BE01 0x0000be01
-
-#define REG_A6XX_HLSQ_DBG_ECO_CNTL 0x0000be04
-
-#define REG_A6XX_HLSQ_ADDR_MODE_CNTL 0x0000be05
-
-#define REG_A6XX_HLSQ_UNKNOWN_BE08 0x0000be08
-
-#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL(i0) (0x0000be10 + 0x1*(i0))
-
-#define REG_A6XX_HLSQ_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE 0x0000be22
-
-#define REG_A7XX_SP_AHB_READ_APERTURE 0x0000c000
-
-#define REG_A7XX_SP_UNKNOWN_0CE2 0x00000ce2
-
-#define REG_A7XX_SP_UNKNOWN_0CE4 0x00000ce4
-
-#define REG_A7XX_SP_UNKNOWN_0CE6 0x00000ce6
-
-#define REG_A6XX_CP_EVENT_START 0x0000d600
-#define A6XX_CP_EVENT_START_STATE_ID__MASK 0x000000ff
-#define A6XX_CP_EVENT_START_STATE_ID__SHIFT 0
-static inline uint32_t A6XX_CP_EVENT_START_STATE_ID(uint32_t val)
-{
- return ((val) << A6XX_CP_EVENT_START_STATE_ID__SHIFT) & A6XX_CP_EVENT_START_STATE_ID__MASK;
-}
-
-#define REG_A6XX_CP_EVENT_END 0x0000d601
-#define A6XX_CP_EVENT_END_STATE_ID__MASK 0x000000ff
-#define A6XX_CP_EVENT_END_STATE_ID__SHIFT 0
-static inline uint32_t A6XX_CP_EVENT_END_STATE_ID(uint32_t val)
-{
- return ((val) << A6XX_CP_EVENT_END_STATE_ID__SHIFT) & A6XX_CP_EVENT_END_STATE_ID__MASK;
-}
-
-#define REG_A6XX_CP_2D_EVENT_START 0x0000d700
-#define A6XX_CP_2D_EVENT_START_STATE_ID__MASK 0x000000ff
-#define A6XX_CP_2D_EVENT_START_STATE_ID__SHIFT 0
-static inline uint32_t A6XX_CP_2D_EVENT_START_STATE_ID(uint32_t val)
-{
- return ((val) << A6XX_CP_2D_EVENT_START_STATE_ID__SHIFT) & A6XX_CP_2D_EVENT_START_STATE_ID__MASK;
-}
-
-#define REG_A6XX_CP_2D_EVENT_END 0x0000d701
-#define A6XX_CP_2D_EVENT_END_STATE_ID__MASK 0x000000ff
-#define A6XX_CP_2D_EVENT_END_STATE_ID__SHIFT 0
-static inline uint32_t A6XX_CP_2D_EVENT_END_STATE_ID(uint32_t val)
-{
- return ((val) << A6XX_CP_2D_EVENT_END_STATE_ID__SHIFT) & A6XX_CP_2D_EVENT_END_STATE_ID__MASK;
-}
-
-#define REG_A6XX_TEX_SAMP_0 0x00000000
-#define A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
-#define A6XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
-#define A6XX_TEX_SAMP_0_XY_MAG__SHIFT 1
-static inline uint32_t A6XX_TEX_SAMP_0_XY_MAG(enum a6xx_tex_filter val)
-{
- return ((val) << A6XX_TEX_SAMP_0_XY_MAG__SHIFT) & A6XX_TEX_SAMP_0_XY_MAG__MASK;
-}
-#define A6XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018
-#define A6XX_TEX_SAMP_0_XY_MIN__SHIFT 3
-static inline uint32_t A6XX_TEX_SAMP_0_XY_MIN(enum a6xx_tex_filter val)
-{
- return ((val) << A6XX_TEX_SAMP_0_XY_MIN__SHIFT) & A6XX_TEX_SAMP_0_XY_MIN__MASK;
-}
-#define A6XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0
-#define A6XX_TEX_SAMP_0_WRAP_S__SHIFT 5
-static inline uint32_t A6XX_TEX_SAMP_0_WRAP_S(enum a6xx_tex_clamp val)
-{
- return ((val) << A6XX_TEX_SAMP_0_WRAP_S__SHIFT) & A6XX_TEX_SAMP_0_WRAP_S__MASK;
-}
-#define A6XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700
-#define A6XX_TEX_SAMP_0_WRAP_T__SHIFT 8
-static inline uint32_t A6XX_TEX_SAMP_0_WRAP_T(enum a6xx_tex_clamp val)
-{
- return ((val) << A6XX_TEX_SAMP_0_WRAP_T__SHIFT) & A6XX_TEX_SAMP_0_WRAP_T__MASK;
-}
-#define A6XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800
-#define A6XX_TEX_SAMP_0_WRAP_R__SHIFT 11
-static inline uint32_t A6XX_TEX_SAMP_0_WRAP_R(enum a6xx_tex_clamp val)
-{
- return ((val) << A6XX_TEX_SAMP_0_WRAP_R__SHIFT) & A6XX_TEX_SAMP_0_WRAP_R__MASK;
-}
-#define A6XX_TEX_SAMP_0_ANISO__MASK 0x0001c000
-#define A6XX_TEX_SAMP_0_ANISO__SHIFT 14
-static inline uint32_t A6XX_TEX_SAMP_0_ANISO(enum a6xx_tex_aniso val)
-{
- return ((val) << A6XX_TEX_SAMP_0_ANISO__SHIFT) & A6XX_TEX_SAMP_0_ANISO__MASK;
-}
-#define A6XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000
-#define A6XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19
-static inline uint32_t A6XX_TEX_SAMP_0_LOD_BIAS(float val)
-{
- return ((((int32_t)(val * 256.0))) << A6XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A6XX_TEX_SAMP_0_LOD_BIAS__MASK;
-}
-
-#define REG_A6XX_TEX_SAMP_1 0x00000001
-#define A6XX_TEX_SAMP_1_CLAMPENABLE 0x00000001
-#define A6XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
-#define A6XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1
-static inline uint32_t A6XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
-{
- return ((val) << A6XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A6XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
-}
-#define A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010
-#define A6XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020
-#define A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040
-#define A6XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
-#define A6XX_TEX_SAMP_1_MAX_LOD__SHIFT 8
-static inline uint32_t A6XX_TEX_SAMP_1_MAX_LOD(float val)
-{
- return ((((uint32_t)(val * 256.0))) << A6XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A6XX_TEX_SAMP_1_MAX_LOD__MASK;
-}
-#define A6XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000
-#define A6XX_TEX_SAMP_1_MIN_LOD__SHIFT 20
-static inline uint32_t A6XX_TEX_SAMP_1_MIN_LOD(float val)
-{
- return ((((uint32_t)(val * 256.0))) << A6XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A6XX_TEX_SAMP_1_MIN_LOD__MASK;
-}
-
-#define REG_A6XX_TEX_SAMP_2 0x00000002
-#define A6XX_TEX_SAMP_2_REDUCTION_MODE__MASK 0x00000003
-#define A6XX_TEX_SAMP_2_REDUCTION_MODE__SHIFT 0
-static inline uint32_t A6XX_TEX_SAMP_2_REDUCTION_MODE(enum a6xx_reduction_mode val)
-{
- return ((val) << A6XX_TEX_SAMP_2_REDUCTION_MODE__SHIFT) & A6XX_TEX_SAMP_2_REDUCTION_MODE__MASK;
-}
-#define A6XX_TEX_SAMP_2_CHROMA_LINEAR 0x00000020
-#define A6XX_TEX_SAMP_2_BCOLOR__MASK 0xffffff80
-#define A6XX_TEX_SAMP_2_BCOLOR__SHIFT 7
-static inline uint32_t A6XX_TEX_SAMP_2_BCOLOR(uint32_t val)
-{
- return ((val) << A6XX_TEX_SAMP_2_BCOLOR__SHIFT) & A6XX_TEX_SAMP_2_BCOLOR__MASK;
-}
-
-#define REG_A6XX_TEX_SAMP_3 0x00000003
-
-#define REG_A6XX_TEX_CONST_0 0x00000000
-#define A6XX_TEX_CONST_0_TILE_MODE__MASK 0x00000003
-#define A6XX_TEX_CONST_0_TILE_MODE__SHIFT 0
-static inline uint32_t A6XX_TEX_CONST_0_TILE_MODE(enum a6xx_tile_mode val)
-{
- return ((val) << A6XX_TEX_CONST_0_TILE_MODE__SHIFT) & A6XX_TEX_CONST_0_TILE_MODE__MASK;
-}
-#define A6XX_TEX_CONST_0_SRGB 0x00000004
-#define A6XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
-#define A6XX_TEX_CONST_0_SWIZ_X__SHIFT 4
-static inline uint32_t A6XX_TEX_CONST_0_SWIZ_X(enum a6xx_tex_swiz val)
-{
- return ((val) << A6XX_TEX_CONST_0_SWIZ_X__SHIFT) & A6XX_TEX_CONST_0_SWIZ_X__MASK;
-}
-#define A6XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
-#define A6XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
-static inline uint32_t A6XX_TEX_CONST_0_SWIZ_Y(enum a6xx_tex_swiz val)
-{
- return ((val) << A6XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A6XX_TEX_CONST_0_SWIZ_Y__MASK;
-}
-#define A6XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
-#define A6XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
-static inline uint32_t A6XX_TEX_CONST_0_SWIZ_Z(enum a6xx_tex_swiz val)
-{
- return ((val) << A6XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A6XX_TEX_CONST_0_SWIZ_Z__MASK;
-}
-#define A6XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
-#define A6XX_TEX_CONST_0_SWIZ_W__SHIFT 13
-static inline uint32_t A6XX_TEX_CONST_0_SWIZ_W(enum a6xx_tex_swiz val)
-{
- return ((val) << A6XX_TEX_CONST_0_SWIZ_W__SHIFT) & A6XX_TEX_CONST_0_SWIZ_W__MASK;
-}
-#define A6XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000
-#define A6XX_TEX_CONST_0_MIPLVLS__SHIFT 16
-static inline uint32_t A6XX_TEX_CONST_0_MIPLVLS(uint32_t val)
-{
- return ((val) << A6XX_TEX_CONST_0_MIPLVLS__SHIFT) & A6XX_TEX_CONST_0_MIPLVLS__MASK;
-}
-#define A6XX_TEX_CONST_0_CHROMA_MIDPOINT_X 0x00010000
-#define A6XX_TEX_CONST_0_CHROMA_MIDPOINT_Y 0x00040000
-#define A6XX_TEX_CONST_0_SAMPLES__MASK 0x00300000
-#define A6XX_TEX_CONST_0_SAMPLES__SHIFT 20
-static inline uint32_t A6XX_TEX_CONST_0_SAMPLES(enum a3xx_msaa_samples val)
-{
- return ((val) << A6XX_TEX_CONST_0_SAMPLES__SHIFT) & A6XX_TEX_CONST_0_SAMPLES__MASK;
-}
-#define A6XX_TEX_CONST_0_FMT__MASK 0x3fc00000
-#define A6XX_TEX_CONST_0_FMT__SHIFT 22
-static inline uint32_t A6XX_TEX_CONST_0_FMT(enum a6xx_format val)
-{
- return ((val) << A6XX_TEX_CONST_0_FMT__SHIFT) & A6XX_TEX_CONST_0_FMT__MASK;
-}
-#define A6XX_TEX_CONST_0_SWAP__MASK 0xc0000000
-#define A6XX_TEX_CONST_0_SWAP__SHIFT 30
-static inline uint32_t A6XX_TEX_CONST_0_SWAP(enum a3xx_color_swap val)
-{
- return ((val) << A6XX_TEX_CONST_0_SWAP__SHIFT) & A6XX_TEX_CONST_0_SWAP__MASK;
-}
-
-#define REG_A6XX_TEX_CONST_1 0x00000001
-#define A6XX_TEX_CONST_1_WIDTH__MASK 0x00007fff
-#define A6XX_TEX_CONST_1_WIDTH__SHIFT 0
-static inline uint32_t A6XX_TEX_CONST_1_WIDTH(uint32_t val)
-{
- return ((val) << A6XX_TEX_CONST_1_WIDTH__SHIFT) & A6XX_TEX_CONST_1_WIDTH__MASK;
-}
-#define A6XX_TEX_CONST_1_HEIGHT__MASK 0x3fff8000
-#define A6XX_TEX_CONST_1_HEIGHT__SHIFT 15
-static inline uint32_t A6XX_TEX_CONST_1_HEIGHT(uint32_t val)
-{
- return ((val) << A6XX_TEX_CONST_1_HEIGHT__SHIFT) & A6XX_TEX_CONST_1_HEIGHT__MASK;
-}
-
-#define REG_A6XX_TEX_CONST_2 0x00000002
-#define A6XX_TEX_CONST_2_STRUCTSIZETEXELS__MASK 0x0000fff0
-#define A6XX_TEX_CONST_2_STRUCTSIZETEXELS__SHIFT 4
-static inline uint32_t A6XX_TEX_CONST_2_STRUCTSIZETEXELS(uint32_t val)
-{
- return ((val) << A6XX_TEX_CONST_2_STRUCTSIZETEXELS__SHIFT) & A6XX_TEX_CONST_2_STRUCTSIZETEXELS__MASK;
-}
-#define A6XX_TEX_CONST_2_STARTOFFSETTEXELS__MASK 0x003f0000
-#define A6XX_TEX_CONST_2_STARTOFFSETTEXELS__SHIFT 16
-static inline uint32_t A6XX_TEX_CONST_2_STARTOFFSETTEXELS(uint32_t val)
-{
- return ((val) << A6XX_TEX_CONST_2_STARTOFFSETTEXELS__SHIFT) & A6XX_TEX_CONST_2_STARTOFFSETTEXELS__MASK;
-}
-#define A6XX_TEX_CONST_2_PITCHALIGN__MASK 0x0000000f
-#define A6XX_TEX_CONST_2_PITCHALIGN__SHIFT 0
-static inline uint32_t A6XX_TEX_CONST_2_PITCHALIGN(uint32_t val)
-{
- return ((val) << A6XX_TEX_CONST_2_PITCHALIGN__SHIFT) & A6XX_TEX_CONST_2_PITCHALIGN__MASK;
-}
-#define A6XX_TEX_CONST_2_PITCH__MASK 0x1fffff80
-#define A6XX_TEX_CONST_2_PITCH__SHIFT 7
-static inline uint32_t A6XX_TEX_CONST_2_PITCH(uint32_t val)
-{
- return ((val) << A6XX_TEX_CONST_2_PITCH__SHIFT) & A6XX_TEX_CONST_2_PITCH__MASK;
-}
-#define A6XX_TEX_CONST_2_TYPE__MASK 0xe0000000
-#define A6XX_TEX_CONST_2_TYPE__SHIFT 29
-static inline uint32_t A6XX_TEX_CONST_2_TYPE(enum a6xx_tex_type val)
-{
- return ((val) << A6XX_TEX_CONST_2_TYPE__SHIFT) & A6XX_TEX_CONST_2_TYPE__MASK;
-}
-
-#define REG_A6XX_TEX_CONST_3 0x00000003
-#define A6XX_TEX_CONST_3_ARRAY_PITCH__MASK 0x007fffff
-#define A6XX_TEX_CONST_3_ARRAY_PITCH__SHIFT 0
-static inline uint32_t A6XX_TEX_CONST_3_ARRAY_PITCH(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A6XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A6XX_TEX_CONST_3_ARRAY_PITCH__MASK;
-}
-#define A6XX_TEX_CONST_3_MIN_LAYERSZ__MASK 0x07800000
-#define A6XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT 23
-static inline uint32_t A6XX_TEX_CONST_3_MIN_LAYERSZ(uint32_t val)
-{
- assert(!(val & 0xfff));
- return (((val >> 12)) << A6XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT) & A6XX_TEX_CONST_3_MIN_LAYERSZ__MASK;
-}
-#define A6XX_TEX_CONST_3_TILE_ALL 0x08000000
-#define A6XX_TEX_CONST_3_FLAG 0x10000000
-
-#define REG_A6XX_TEX_CONST_4 0x00000004
-#define A6XX_TEX_CONST_4_BASE_LO__MASK 0xffffffe0
-#define A6XX_TEX_CONST_4_BASE_LO__SHIFT 5
-static inline uint32_t A6XX_TEX_CONST_4_BASE_LO(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A6XX_TEX_CONST_4_BASE_LO__SHIFT) & A6XX_TEX_CONST_4_BASE_LO__MASK;
-}
-
-#define REG_A6XX_TEX_CONST_5 0x00000005
-#define A6XX_TEX_CONST_5_BASE_HI__MASK 0x0001ffff
-#define A6XX_TEX_CONST_5_BASE_HI__SHIFT 0
-static inline uint32_t A6XX_TEX_CONST_5_BASE_HI(uint32_t val)
-{
- return ((val) << A6XX_TEX_CONST_5_BASE_HI__SHIFT) & A6XX_TEX_CONST_5_BASE_HI__MASK;
-}
-#define A6XX_TEX_CONST_5_DEPTH__MASK 0x3ffe0000
-#define A6XX_TEX_CONST_5_DEPTH__SHIFT 17
-static inline uint32_t A6XX_TEX_CONST_5_DEPTH(uint32_t val)
-{
- return ((val) << A6XX_TEX_CONST_5_DEPTH__SHIFT) & A6XX_TEX_CONST_5_DEPTH__MASK;
-}
-
-#define REG_A6XX_TEX_CONST_6 0x00000006
-#define A6XX_TEX_CONST_6_MIN_LOD_CLAMP__MASK 0x00000fff
-#define A6XX_TEX_CONST_6_MIN_LOD_CLAMP__SHIFT 0
-static inline uint32_t A6XX_TEX_CONST_6_MIN_LOD_CLAMP(float val)
-{
- return ((((uint32_t)(val * 256.0))) << A6XX_TEX_CONST_6_MIN_LOD_CLAMP__SHIFT) & A6XX_TEX_CONST_6_MIN_LOD_CLAMP__MASK;
-}
-#define A6XX_TEX_CONST_6_PLANE_PITCH__MASK 0xffffff00
-#define A6XX_TEX_CONST_6_PLANE_PITCH__SHIFT 8
-static inline uint32_t A6XX_TEX_CONST_6_PLANE_PITCH(uint32_t val)
-{
- return ((val) << A6XX_TEX_CONST_6_PLANE_PITCH__SHIFT) & A6XX_TEX_CONST_6_PLANE_PITCH__MASK;
-}
-
-#define REG_A6XX_TEX_CONST_7 0x00000007
-#define A6XX_TEX_CONST_7_FLAG_LO__MASK 0xffffffe0
-#define A6XX_TEX_CONST_7_FLAG_LO__SHIFT 5
-static inline uint32_t A6XX_TEX_CONST_7_FLAG_LO(uint32_t val)
-{
- assert(!(val & 0x1f));
- return (((val >> 5)) << A6XX_TEX_CONST_7_FLAG_LO__SHIFT) & A6XX_TEX_CONST_7_FLAG_LO__MASK;
-}
-
-#define REG_A6XX_TEX_CONST_8 0x00000008
-#define A6XX_TEX_CONST_8_FLAG_HI__MASK 0x0001ffff
-#define A6XX_TEX_CONST_8_FLAG_HI__SHIFT 0
-static inline uint32_t A6XX_TEX_CONST_8_FLAG_HI(uint32_t val)
-{
- return ((val) << A6XX_TEX_CONST_8_FLAG_HI__SHIFT) & A6XX_TEX_CONST_8_FLAG_HI__MASK;
-}
-
-#define REG_A6XX_TEX_CONST_9 0x00000009
-#define A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__MASK 0x0001ffff
-#define A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__SHIFT 0
-static inline uint32_t A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH(uint32_t val)
-{
- assert(!(val & 0xf));
- return (((val >> 4)) << A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__MASK;
-}
-
-#define REG_A6XX_TEX_CONST_10 0x0000000a
-#define A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__MASK 0x0000007f
-#define A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__SHIFT 0
-static inline uint32_t A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH(uint32_t val)
-{
- assert(!(val & 0x3f));
- return (((val >> 6)) << A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__SHIFT) & A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__MASK;
-}
-#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__MASK 0x00000f00
-#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__SHIFT 8
-static inline uint32_t A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW(uint32_t val)
-{
- return ((val) << A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__SHIFT) & A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__MASK;
-}
-#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__MASK 0x0000f000
-#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__SHIFT 12
-static inline uint32_t A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH(uint32_t val)
-{
- return ((val) << A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__SHIFT) & A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__MASK;
-}
-
-#define REG_A6XX_TEX_CONST_11 0x0000000b
-
-#define REG_A6XX_TEX_CONST_12 0x0000000c
-
-#define REG_A6XX_TEX_CONST_13 0x0000000d
-
-#define REG_A6XX_TEX_CONST_14 0x0000000e
-
-#define REG_A6XX_TEX_CONST_15 0x0000000f
-
-#define REG_A6XX_UBO_0 0x00000000
-#define A6XX_UBO_0_BASE_LO__MASK 0xffffffff
-#define A6XX_UBO_0_BASE_LO__SHIFT 0
-static inline uint32_t A6XX_UBO_0_BASE_LO(uint32_t val)
-{
- return ((val) << A6XX_UBO_0_BASE_LO__SHIFT) & A6XX_UBO_0_BASE_LO__MASK;
-}
-
-#define REG_A6XX_UBO_1 0x00000001
-#define A6XX_UBO_1_BASE_HI__MASK 0x0001ffff
-#define A6XX_UBO_1_BASE_HI__SHIFT 0
-static inline uint32_t A6XX_UBO_1_BASE_HI(uint32_t val)
-{
- return ((val) << A6XX_UBO_1_BASE_HI__SHIFT) & A6XX_UBO_1_BASE_HI__MASK;
-}
-#define A6XX_UBO_1_SIZE__MASK 0xfffe0000
-#define A6XX_UBO_1_SIZE__SHIFT 17
-static inline uint32_t A6XX_UBO_1_SIZE(uint32_t val)
-{
- return ((val) << A6XX_UBO_1_SIZE__SHIFT) & A6XX_UBO_1_SIZE__MASK;
-}
-
-#define REG_A6XX_PDC_GPU_ENABLE_PDC 0x00001140
-
-#define REG_A6XX_PDC_GPU_SEQ_START_ADDR 0x00001148
-
-#define REG_A6XX_PDC_GPU_TCS0_CONTROL 0x00001540
-
-#define REG_A6XX_PDC_GPU_TCS0_CMD_ENABLE_BANK 0x00001541
-
-#define REG_A6XX_PDC_GPU_TCS0_CMD_WAIT_FOR_CMPL_BANK 0x00001542
-
-#define REG_A6XX_PDC_GPU_TCS0_CMD0_MSGID 0x00001543
-
-#define REG_A6XX_PDC_GPU_TCS0_CMD0_ADDR 0x00001544
-
-#define REG_A6XX_PDC_GPU_TCS0_CMD0_DATA 0x00001545
-
-#define REG_A6XX_PDC_GPU_TCS1_CONTROL 0x00001572
-
-#define REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK 0x00001573
-
-#define REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK 0x00001574
-
-#define REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID 0x00001575
-
-#define REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR 0x00001576
-
-#define REG_A6XX_PDC_GPU_TCS1_CMD0_DATA 0x00001577
-
-#define REG_A6XX_PDC_GPU_TCS2_CONTROL 0x000015a4
-
-#define REG_A6XX_PDC_GPU_TCS2_CMD_ENABLE_BANK 0x000015a5
-
-#define REG_A6XX_PDC_GPU_TCS2_CMD_WAIT_FOR_CMPL_BANK 0x000015a6
-
-#define REG_A6XX_PDC_GPU_TCS2_CMD0_MSGID 0x000015a7
-
-#define REG_A6XX_PDC_GPU_TCS2_CMD0_ADDR 0x000015a8
-
-#define REG_A6XX_PDC_GPU_TCS2_CMD0_DATA 0x000015a9
-
-#define REG_A6XX_PDC_GPU_TCS3_CONTROL 0x000015d6
-
-#define REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK 0x000015d7
-
-#define REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK 0x000015d8
-
-#define REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID 0x000015d9
-
-#define REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR 0x000015da
-
-#define REG_A6XX_PDC_GPU_TCS3_CMD0_DATA 0x000015db
-
-#define REG_A6XX_PDC_GPU_SEQ_MEM_0 0x00000000
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A 0x00000000
-#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__MASK 0x000000ff
-#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT 0
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK 0x0000ff00
-#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT 8
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK;
-}
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B 0x00000001
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C 0x00000002
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D 0x00000003
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT 0x00000004
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK;
-}
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM 0x00000005
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK;
-}
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0 0x00000008
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1 0x00000009
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2 0x0000000a
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3 0x0000000b
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0 0x0000000c
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1 0x0000000d
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2 0x0000000e
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3 0x0000000f
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0 0x00000010
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK;
-}
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1 0x00000011
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK;
-}
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0000002f
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00000030
-
-#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0 0x00000001
-
-#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1 0x00000002
-
-#define REG_A7XX_CX_MISC_TCM_RET_CNTL 0x00000039
-
-#ifdef __cplusplus
-template<chip CHIP> constexpr inline uint16_t CMD_REGS[] = {};
-template<chip CHIP> constexpr inline uint16_t RP_BLIT_REGS[] = {};
-template<> constexpr inline uint16_t CMD_REGS<A6XX>[] = {
- 0xc03,
- 0xc04,
- 0xc30,
- 0xc31,
- 0xc32,
- 0xc33,
- 0xc34,
- 0xc35,
- 0xc36,
- 0xc37,
- 0xe12,
- 0xe17,
- 0xe19,
- 0x8099,
- 0x80af,
- 0x810a,
- 0x8110,
- 0x8600,
- 0x880e,
- 0x8811,
- 0x8818,
- 0x8819,
- 0x881a,
- 0x881b,
- 0x881c,
- 0x881d,
- 0x881e,
- 0x8864,
- 0x8891,
- 0x88f0,
- 0x8927,
- 0x8928,
- 0x8e01,
- 0x8e04,
- 0x8e07,
- 0x9210,
- 0x9211,
- 0x9218,
- 0x9219,
- 0x921a,
- 0x921b,
- 0x921c,
- 0x921d,
- 0x921e,
- 0x921f,
- 0x9220,
- 0x9221,
- 0x9222,
- 0x9223,
- 0x9224,
- 0x9225,
- 0x9226,
- 0x9227,
- 0x9228,
- 0x9229,
- 0x922a,
- 0x922b,
- 0x922c,
- 0x922d,
- 0x922e,
- 0x922f,
- 0x9230,
- 0x9231,
- 0x9232,
- 0x9233,
- 0x9234,
- 0x9235,
- 0x9236,
- 0x9300,
- 0x9600,
- 0x9601,
- 0x9602,
- 0x9e08,
- 0x9e09,
- 0x9e72,
- 0xa007,
- 0xa009,
- 0xa8a0,
- 0xa8a1,
- 0xa8a2,
- 0xa8a3,
- 0xa8a4,
- 0xa8a5,
- 0xa8a6,
- 0xa8a7,
- 0xa8a8,
- 0xa8a9,
- 0xa8aa,
- 0xa8ab,
- 0xa8ac,
- 0xa8ad,
- 0xa8ae,
- 0xa8af,
- 0xa9a8,
- 0xa9b0,
- 0xa9b1,
- 0xa9b2,
- 0xa9b3,
- 0xa9b4,
- 0xa9b5,
- 0xa9b6,
- 0xa9b7,
- 0xa9b8,
- 0xa9b9,
- 0xa9ba,
- 0xa9bb,
- 0xa9bc,
- 0xa9bd,
- 0xa9c2,
- 0xa9c3,
- 0xa9e2,
- 0xa9e3,
- 0xa9e6,
- 0xa9e7,
- 0xa9e8,
- 0xa9e9,
- 0xa9ea,
- 0xa9eb,
- 0xa9ec,
- 0xa9ed,
- 0xa9ee,
- 0xa9ef,
- 0xa9f0,
- 0xa9f1,
- 0xaaf2,
- 0xab1a,
- 0xab1b,
- 0xab20,
- 0xae00,
- 0xae03,
- 0xae04,
- 0xae0f,
- 0xb180,
- 0xb181,
- 0xb182,
- 0xb183,
- 0xb302,
- 0xb303,
- 0xb309,
- 0xb600,
- 0xb602,
- 0xb605,
- 0xb987,
- 0xb9d0,
- 0xbb08,
- 0xbb11,
- 0xbb20,
- 0xbb21,
- 0xbb22,
- 0xbb23,
- 0xbb24,
- 0xbb25,
- 0xbb26,
- 0xbb27,
- 0xbb28,
- 0xbb29,
- 0xbe00,
- 0xbe01,
- 0xbe04,
-};
-template<> constexpr inline uint16_t CMD_REGS<A7XX>[] = {
- 0xc03,
- 0xc04,
- 0xc30,
- 0xc31,
- 0xc32,
- 0xc33,
- 0xc34,
- 0xc35,
- 0xc36,
- 0xc37,
- 0xce2,
- 0xce3,
- 0xce4,
- 0xce5,
- 0xce6,
- 0xce7,
- 0xe10,
- 0xe11,
- 0xe12,
- 0xe17,
- 0xe19,
- 0x8008,
- 0x8009,
- 0x800a,
- 0x800b,
- 0x800c,
- 0x8099,
- 0x80a7,
- 0x80af,
- 0x80f4,
- 0x80f5,
- 0x80f5,
- 0x80f6,
- 0x80f6,
- 0x80f7,
- 0x80f8,
- 0x80f9,
- 0x80f9,
- 0x80fa,
- 0x80fa,
- 0x80fb,
- 0x810a,
- 0x810b,
- 0x8110,
- 0x8120,
- 0x8121,
- 0x8600,
- 0x880e,
- 0x8811,
- 0x8818,
- 0x8819,
- 0x881a,
- 0x881b,
- 0x881c,
- 0x881d,
- 0x881e,
- 0x8864,
- 0x8891,
- 0x8899,
- 0x88e5,
- 0x88f0,
- 0x8927,
- 0x8928,
- 0x8e01,
- 0x8e04,
- 0x8e06,
- 0x8e07,
- 0x8e09,
- 0x8e79,
- 0x9218,
- 0x9219,
- 0x921a,
- 0x921b,
- 0x921c,
- 0x921d,
- 0x921e,
- 0x921f,
- 0x9220,
- 0x9221,
- 0x9222,
- 0x9223,
- 0x9224,
- 0x9225,
- 0x9226,
- 0x9227,
- 0x9228,
- 0x9229,
- 0x922a,
- 0x922b,
- 0x922c,
- 0x922d,
- 0x922e,
- 0x922f,
- 0x9230,
- 0x9231,
- 0x9232,
- 0x9233,
- 0x9234,
- 0x9235,
- 0x9236,
- 0x9300,
- 0x9600,
- 0x9601,
- 0x9602,
- 0x9810,
- 0x9811,
- 0x9e24,
- 0x9e72,
- 0xa007,
- 0xa009,
- 0xa600,
- 0xa82d,
- 0xa82f,
- 0xa868,
- 0xa899,
- 0xa8a0,
- 0xa8a1,
- 0xa8a2,
- 0xa8a3,
- 0xa8a4,
- 0xa8a5,
- 0xa8a6,
- 0xa8a7,
- 0xa8a8,
- 0xa8a9,
- 0xa8aa,
- 0xa8ab,
- 0xa8ac,
- 0xa8ad,
- 0xa8ae,
- 0xa8af,
- 0xa9a8,
- 0xa9ac,
- 0xa9ad,
- 0xa9b0,
- 0xa9b1,
- 0xa9b2,
- 0xa9b3,
- 0xa9b4,
- 0xa9b5,
- 0xa9b6,
- 0xa9b7,
- 0xa9b8,
- 0xa9b9,
- 0xa9ba,
- 0xa9bb,
- 0xa9bc,
- 0xa9bd,
- 0xa9be,
- 0xa9c2,
- 0xa9c3,
- 0xa9c5,
- 0xa9cd,
- 0xa9df,
- 0xa9e2,
- 0xa9e3,
- 0xa9e6,
- 0xa9e7,
- 0xa9e8,
- 0xa9e9,
- 0xa9ea,
- 0xa9eb,
- 0xa9ec,
- 0xa9ed,
- 0xa9ee,
- 0xa9ef,
- 0xa9f0,
- 0xa9f1,
- 0xa9f2,
- 0xa9f3,
- 0xa9f4,
- 0xa9f5,
- 0xa9f6,
- 0xa9f7,
- 0xaa01,
- 0xaa02,
- 0xaa03,
- 0xaaf2,
- 0xab01,
- 0xab02,
- 0xab1a,
- 0xab1b,
- 0xab1f,
- 0xab20,
- 0xab22,
- 0xae00,
- 0xae03,
- 0xae04,
- 0xae06,
- 0xae08,
- 0xae09,
- 0xae0a,
- 0xae0f,
- 0xae6a,
- 0xae6b,
- 0xae6c,
- 0xae73,
- 0xb180,
- 0xb181,
- 0xb182,
- 0xb183,
- 0xb302,
- 0xb303,
- 0xb309,
- 0xb310,
- 0xb600,
- 0xb602,
- 0xb608,
- 0xb609,
- 0xb60a,
- 0xb60b,
- 0xb60c,
-};
-template<> constexpr inline uint16_t RP_BLIT_REGS<A6XX>[] = {
- 0xc02,
- 0xc06,
- 0xc10,
- 0xc11,
- 0xc12,
- 0xc13,
- 0xc14,
- 0xc15,
- 0xc16,
- 0xc17,
- 0xc18,
- 0xc19,
- 0xc1a,
- 0xc1b,
- 0xc1c,
- 0xc1d,
- 0xc1e,
- 0xc1f,
- 0xc20,
- 0xc21,
- 0xc22,
- 0xc23,
- 0xc24,
- 0xc25,
- 0xc26,
- 0xc27,
- 0xc28,
- 0xc29,
- 0xc2a,
- 0xc2b,
- 0xc2c,
- 0xc2d,
- 0xc2e,
- 0xc2f,
- 0xc38,
- 0xc39,
- 0xc3a,
- 0xc3b,
- 0xc3c,
- 0xc3d,
- 0xc3e,
- 0xc3f,
- 0xc40,
- 0xc41,
- 0xc42,
- 0xc43,
- 0xc44,
- 0xc45,
- 0xc46,
- 0xc47,
- 0xc48,
- 0xc49,
- 0xc4a,
- 0xc4b,
- 0xc4c,
- 0xc4d,
- 0xc4e,
- 0xc4f,
- 0xc50,
- 0xc51,
- 0xc52,
- 0xc53,
- 0xc54,
- 0xc55,
- 0xc56,
- 0xc57,
- 0xc58,
- 0xc59,
- 0xc5a,
- 0xc5b,
- 0xc5c,
- 0xc5d,
- 0xc5e,
- 0xc5f,
- 0xc60,
- 0xc61,
- 0xc62,
- 0xc63,
- 0xc64,
- 0xc65,
- 0xc66,
- 0xc67,
- 0xc68,
- 0xc69,
- 0xc6a,
- 0xc6b,
- 0xc6c,
- 0xc6d,
- 0xc6e,
- 0xc6f,
- 0xc70,
- 0xc71,
- 0xc72,
- 0xc73,
- 0xc74,
- 0xc75,
- 0xc76,
- 0xc77,
- 0xc78,
- 0xc79,
- 0xc7a,
- 0xc7b,
- 0xc7c,
- 0xc7d,
- 0xc7e,
- 0xc7f,
- 0xc80,
- 0xc81,
- 0xc82,
- 0xc83,
- 0xc84,
- 0xc85,
- 0xc86,
- 0xc87,
- 0xc88,
- 0xc89,
- 0xc8a,
- 0xc8b,
- 0xc8c,
- 0xc8d,
- 0xc8e,
- 0xc8f,
- 0xc90,
- 0xc91,
- 0xc92,
- 0xc93,
- 0xc94,
- 0xc95,
- 0xc96,
- 0xc97,
- 0x8000,
- 0x8001,
- 0x8002,
- 0x8003,
- 0x8004,
- 0x8005,
- 0x8006,
- 0x8010,
- 0x8011,
- 0x8012,
- 0x8013,
- 0x8014,
- 0x8015,
- 0x8016,
- 0x8017,
- 0x8018,
- 0x8019,
- 0x801a,
- 0x801b,
- 0x801c,
- 0x801d,
- 0x801e,
- 0x801f,
- 0x8020,
- 0x8021,
- 0x8022,
- 0x8023,
- 0x8024,
- 0x8025,
- 0x8026,
- 0x8027,
- 0x8028,
- 0x8029,
- 0x802a,
- 0x802b,
- 0x802c,
- 0x802d,
- 0x802e,
- 0x802f,
- 0x8030,
- 0x8031,
- 0x8032,
- 0x8033,
- 0x8034,
- 0x8035,
- 0x8036,
- 0x8037,
- 0x8038,
- 0x8039,
- 0x803a,
- 0x803b,
- 0x803c,
- 0x803d,
- 0x803e,
- 0x803f,
- 0x8040,
- 0x8041,
- 0x8042,
- 0x8043,
- 0x8044,
- 0x8045,
- 0x8046,
- 0x8047,
- 0x8048,
- 0x8049,
- 0x804a,
- 0x804b,
- 0x804c,
- 0x804d,
- 0x804e,
- 0x804f,
- 0x8050,
- 0x8051,
- 0x8052,
- 0x8053,
- 0x8054,
- 0x8055,
- 0x8056,
- 0x8057,
- 0x8058,
- 0x8059,
- 0x805a,
- 0x805b,
- 0x805c,
- 0x805d,
- 0x805e,
- 0x805f,
- 0x8060,
- 0x8061,
- 0x8062,
- 0x8063,
- 0x8064,
- 0x8065,
- 0x8066,
- 0x8067,
- 0x8068,
- 0x8069,
- 0x806a,
- 0x806b,
- 0x806c,
- 0x806d,
- 0x806e,
- 0x806f,
- 0x8070,
- 0x8071,
- 0x8072,
- 0x8073,
- 0x8074,
- 0x8075,
- 0x8076,
- 0x8077,
- 0x8078,
- 0x8079,
- 0x807a,
- 0x807b,
- 0x807c,
- 0x807d,
- 0x807e,
- 0x807f,
- 0x8080,
- 0x8081,
- 0x8082,
- 0x8083,
- 0x8084,
- 0x8085,
- 0x8086,
- 0x8087,
- 0x8088,
- 0x8089,
- 0x808a,
- 0x808b,
- 0x808c,
- 0x808d,
- 0x808e,
- 0x808f,
- 0x8090,
- 0x8091,
- 0x8092,
- 0x8094,
- 0x8095,
- 0x8096,
- 0x8097,
- 0x8098,
- 0x809b,
- 0x809c,
- 0x809d,
- 0x80a0,
- 0x80a1,
- 0x80a2,
- 0x80a3,
- 0x80a4,
- 0x80a5,
- 0x80a6,
- 0x80b0,
- 0x80b1,
- 0x80b2,
- 0x80b3,
- 0x80b4,
- 0x80b5,
- 0x80b6,
- 0x80b7,
- 0x80b8,
- 0x80b9,
- 0x80ba,
- 0x80bb,
- 0x80bc,
- 0x80bd,
- 0x80be,
- 0x80bf,
- 0x80c0,
- 0x80c1,
- 0x80c2,
- 0x80c3,
- 0x80c4,
- 0x80c5,
- 0x80c6,
- 0x80c7,
- 0x80c8,
- 0x80c9,
- 0x80ca,
- 0x80cb,
- 0x80cc,
- 0x80cd,
- 0x80ce,
- 0x80cf,
- 0x80d0,
- 0x80d1,
- 0x80d2,
- 0x80d3,
- 0x80d4,
- 0x80d5,
- 0x80d6,
- 0x80d7,
- 0x80d8,
- 0x80d9,
- 0x80da,
- 0x80db,
- 0x80dc,
- 0x80dd,
- 0x80de,
- 0x80df,
- 0x80e0,
- 0x80e1,
- 0x80e2,
- 0x80e3,
- 0x80e4,
- 0x80e5,
- 0x80e6,
- 0x80e7,
- 0x80e8,
- 0x80e9,
- 0x80ea,
- 0x80eb,
- 0x80ec,
- 0x80ed,
- 0x80ee,
- 0x80ef,
- 0x80f0,
- 0x80f1,
- 0x8100,
- 0x8101,
- 0x8102,
- 0x8103,
- 0x8104,
- 0x8105,
- 0x8106,
- 0x8107,
- 0x8109,
- 0x8114,
- 0x8115,
- 0x8400,
- 0x8401,
- 0x8402,
- 0x8403,
- 0x8404,
- 0x8405,
- 0x8406,
- 0x840a,
- 0x840b,
- 0x8800,
- 0x8801,
- 0x8802,
- 0x8803,
- 0x8804,
- 0x8805,
- 0x8806,
- 0x8809,
- 0x880a,
- 0x880b,
- 0x880c,
- 0x880d,
- 0x880f,
- 0x8810,
- 0x8820,
- 0x8821,
- 0x8822,
- 0x8823,
- 0x8824,
- 0x8825,
- 0x8826,
- 0x8827,
- 0x8828,
- 0x8829,
- 0x882a,
- 0x882b,
- 0x882c,
- 0x882d,
- 0x882e,
- 0x882f,
- 0x8830,
- 0x8831,
- 0x8832,
- 0x8833,
- 0x8834,
- 0x8835,
- 0x8836,
- 0x8837,
- 0x8838,
- 0x8839,
- 0x883a,
- 0x883b,
- 0x883c,
- 0x883d,
- 0x883e,
- 0x883f,
- 0x8840,
- 0x8841,
- 0x8842,
- 0x8843,
- 0x8844,
- 0x8845,
- 0x8846,
- 0x8847,
- 0x8848,
- 0x8849,
- 0x884a,
- 0x884b,
- 0x884c,
- 0x884d,
- 0x884e,
- 0x884f,
- 0x8850,
- 0x8851,
- 0x8852,
- 0x8853,
- 0x8854,
- 0x8855,
- 0x8856,
- 0x8857,
- 0x8858,
- 0x8859,
- 0x885a,
- 0x885b,
- 0x885c,
- 0x885d,
- 0x885e,
- 0x885f,
- 0x8860,
- 0x8861,
- 0x8862,
- 0x8863,
- 0x8865,
- 0x8870,
- 0x8871,
- 0x8872,
- 0x8873,
- 0x8874,
- 0x8875,
- 0x8876,
- 0x8877,
- 0x8878,
- 0x8879,
- 0x8880,
- 0x8881,
- 0x8882,
- 0x8883,
- 0x8884,
- 0x8885,
- 0x8886,
- 0x8887,
- 0x8888,
- 0x8889,
- 0x8890,
- 0x8898,
- 0x88c0,
- 0x88c1,
- 0x88d0,
- 0x88d1,
- 0x88d2,
- 0x88d3,
- 0x88d4,
- 0x88d5,
- 0x88d6,
- 0x88d7,
- 0x88d8,
- 0x88d9,
- 0x88da,
- 0x88db,
- 0x88dc,
- 0x88dd,
- 0x88de,
- 0x88df,
- 0x88e0,
- 0x88e1,
- 0x88e2,
- 0x88e3,
- 0x8900,
- 0x8901,
- 0x8902,
- 0x8903,
- 0x8904,
- 0x8905,
- 0x8906,
- 0x8907,
- 0x8908,
- 0x8909,
- 0x890a,
- 0x890b,
- 0x890c,
- 0x890d,
- 0x890e,
- 0x890f,
- 0x8910,
- 0x8911,
- 0x8912,
- 0x8913,
- 0x8914,
- 0x8915,
- 0x8916,
- 0x8917,
- 0x8918,
- 0x8919,
- 0x891a,
- 0x8a00,
- 0x8a10,
- 0x8a20,
- 0x8a30,
- 0x8c00,
- 0x8c01,
- 0x8c17,
- 0x8c18,
- 0x8c19,
- 0x8c1a,
- 0x8c1b,
- 0x8c1c,
- 0x8c1d,
- 0x8c1e,
- 0x8c1f,
- 0x8c20,
- 0x8c21,
- 0x8c22,
- 0x8c23,
- 0x8c24,
- 0x8c25,
- 0x8c2c,
- 0x8c2d,
- 0x8c2e,
- 0x8c2f,
- 0x9100,
- 0x9101,
- 0x9102,
- 0x9103,
- 0x9104,
- 0x9105,
- 0x9106,
- 0x9107,
- 0x9108,
- 0x9200,
- 0x9201,
- 0x9202,
- 0x9203,
- 0x9204,
- 0x9205,
- 0x9206,
- 0x9207,
- 0x9208,
- 0x9209,
- 0x920a,
- 0x920b,
- 0x920c,
- 0x920d,
- 0x920e,
- 0x920f,
- 0x9212,
- 0x9213,
- 0x9214,
- 0x9215,
- 0x9216,
- 0x9217,
- 0x9301,
- 0x9302,
- 0x9303,
- 0x9304,
- 0x9305,
- 0x9306,
- 0x9311,
- 0x9312,
- 0x9313,
- 0x9314,
- 0x9315,
- 0x9316,
- 0x9800,
- 0x9801,
- 0x9802,
- 0x9803,
- 0x9804,
- 0x9805,
- 0x9806,
- 0x9808,
- 0x9980,
- 0x9981,
- 0x9b00,
- 0x9b01,
- 0x9b02,
- 0x9b03,
- 0x9b04,
- 0x9b05,
- 0x9b06,
- 0x9b07,
- 0x9b08,
- 0xa000,
- 0xa001,
- 0xa002,
- 0xa003,
- 0xa004,
- 0xa005,
- 0xa006,
- 0xa008,
- 0xa00e,
- 0xa00f,
- 0xa010,
- 0xa011,
- 0xa012,
- 0xa013,
- 0xa014,
- 0xa015,
- 0xa016,
- 0xa017,
- 0xa018,
- 0xa019,
- 0xa01a,
- 0xa01b,
- 0xa01c,
- 0xa01d,
- 0xa01e,
- 0xa01f,
- 0xa020,
- 0xa021,
- 0xa022,
- 0xa023,
- 0xa024,
- 0xa025,
- 0xa026,
- 0xa027,
- 0xa028,
- 0xa029,
- 0xa02a,
- 0xa02b,
- 0xa02c,
- 0xa02d,
- 0xa02e,
- 0xa02f,
- 0xa030,
- 0xa031,
- 0xa032,
- 0xa033,
- 0xa034,
- 0xa035,
- 0xa036,
- 0xa037,
- 0xa038,
- 0xa039,
- 0xa03a,
- 0xa03b,
- 0xa03c,
- 0xa03d,
- 0xa03e,
- 0xa03f,
- 0xa040,
- 0xa041,
- 0xa042,
- 0xa043,
- 0xa044,
- 0xa045,
- 0xa046,
- 0xa047,
- 0xa048,
- 0xa049,
- 0xa04a,
- 0xa04b,
- 0xa04c,
- 0xa04d,
- 0xa04e,
- 0xa04f,
- 0xa050,
- 0xa051,
- 0xa052,
- 0xa053,
- 0xa054,
- 0xa055,
- 0xa056,
- 0xa057,
- 0xa058,
- 0xa059,
- 0xa05a,
- 0xa05b,
- 0xa05c,
- 0xa05d,
- 0xa05e,
- 0xa05f,
- 0xa060,
- 0xa061,
- 0xa062,
- 0xa063,
- 0xa064,
- 0xa065,
- 0xa066,
- 0xa067,
- 0xa068,
- 0xa069,
- 0xa06a,
- 0xa06b,
- 0xa06c,
- 0xa06d,
- 0xa06e,
- 0xa06f,
- 0xa070,
- 0xa071,
- 0xa072,
- 0xa073,
- 0xa074,
- 0xa075,
- 0xa076,
- 0xa077,
- 0xa078,
- 0xa079,
- 0xa07a,
- 0xa07b,
- 0xa07c,
- 0xa07d,
- 0xa07e,
- 0xa07f,
- 0xa080,
- 0xa081,
- 0xa082,
- 0xa083,
- 0xa084,
- 0xa085,
- 0xa086,
- 0xa087,
- 0xa088,
- 0xa089,
- 0xa08a,
- 0xa08b,
- 0xa08c,
- 0xa08d,
- 0xa08e,
- 0xa08f,
- 0xa090,
- 0xa091,
- 0xa092,
- 0xa093,
- 0xa094,
- 0xa095,
- 0xa096,
- 0xa097,
- 0xa098,
- 0xa099,
- 0xa09a,
- 0xa09b,
- 0xa09c,
- 0xa09d,
- 0xa09e,
- 0xa09f,
- 0xa0a0,
- 0xa0a1,
- 0xa0a2,
- 0xa0a3,
- 0xa0a4,
- 0xa0a5,
- 0xa0a6,
- 0xa0a7,
- 0xa0a8,
- 0xa0a9,
- 0xa0aa,
- 0xa0ab,
- 0xa0ac,
- 0xa0ad,
- 0xa0ae,
- 0xa0af,
- 0xa0b0,
- 0xa0b1,
- 0xa0b2,
- 0xa0b3,
- 0xa0b4,
- 0xa0b5,
- 0xa0b6,
- 0xa0b7,
- 0xa0b8,
- 0xa0b9,
- 0xa0ba,
- 0xa0bb,
- 0xa0bc,
- 0xa0bd,
- 0xa0be,
- 0xa0bf,
- 0xa0c0,
- 0xa0c1,
- 0xa0c2,
- 0xa0c3,
- 0xa0c4,
- 0xa0c5,
- 0xa0c6,
- 0xa0c7,
- 0xa0c8,
- 0xa0c9,
- 0xa0ca,
- 0xa0cb,
- 0xa0cc,
- 0xa0cd,
- 0xa0ce,
- 0xa0cf,
- 0xa0d0,
- 0xa0d1,
- 0xa0d2,
- 0xa0d3,
- 0xa0d4,
- 0xa0d5,
- 0xa0d6,
- 0xa0d7,
- 0xa0d8,
- 0xa0d9,
- 0xa0da,
- 0xa0db,
- 0xa0dc,
- 0xa0dd,
- 0xa0de,
- 0xa0df,
- 0xa0e0,
- 0xa0e1,
- 0xa0e2,
- 0xa0e3,
- 0xa0e4,
- 0xa0e5,
- 0xa0e6,
- 0xa0e7,
- 0xa0e8,
- 0xa0e9,
- 0xa0ea,
- 0xa0eb,
- 0xa0ec,
- 0xa0ed,
- 0xa0ee,
- 0xa0ef,
- 0xa0f8,
- 0xa800,
- 0xa802,
- 0xa803,
- 0xa804,
- 0xa805,
- 0xa806,
- 0xa807,
- 0xa808,
- 0xa809,
- 0xa80a,
- 0xa80b,
- 0xa80c,
- 0xa80d,
- 0xa80e,
- 0xa80f,
- 0xa810,
- 0xa811,
- 0xa812,
- 0xa813,
- 0xa814,
- 0xa815,
- 0xa816,
- 0xa817,
- 0xa818,
- 0xa819,
- 0xa81a,
- 0xa81b,
- 0xa81c,
- 0xa81d,
- 0xa81e,
- 0xa81f,
- 0xa820,
- 0xa821,
- 0xa822,
- 0xa823,
- 0xa824,
- 0xa825,
- 0xa830,
- 0xa831,
- 0xa832,
- 0xa833,
- 0xa834,
- 0xa835,
- 0xa836,
- 0xa837,
- 0xa838,
- 0xa839,
- 0xa83a,
- 0xa83b,
- 0xa83c,
- 0xa83d,
- 0xa840,
- 0xa842,
- 0xa843,
- 0xa844,
- 0xa845,
- 0xa846,
- 0xa847,
- 0xa848,
- 0xa849,
- 0xa84a,
- 0xa84b,
- 0xa84c,
- 0xa84d,
- 0xa84e,
- 0xa84f,
- 0xa850,
- 0xa851,
- 0xa852,
- 0xa853,
- 0xa854,
- 0xa855,
- 0xa856,
- 0xa857,
- 0xa858,
- 0xa859,
- 0xa85a,
- 0xa85b,
- 0xa85c,
- 0xa85d,
- 0xa85e,
- 0xa85f,
- 0xa860,
- 0xa861,
- 0xa862,
- 0xa863,
- 0xa864,
- 0xa865,
- 0xa870,
- 0xa871,
- 0xa872,
- 0xa873,
- 0xa874,
- 0xa875,
- 0xa876,
- 0xa877,
- 0xa878,
- 0xa879,
- 0xa87a,
- 0xa87b,
- 0xa87c,
- 0xa87d,
- 0xa87e,
- 0xa87f,
- 0xa880,
- 0xa881,
- 0xa882,
- 0xa883,
- 0xa884,
- 0xa885,
- 0xa886,
- 0xa887,
- 0xa888,
- 0xa889,
- 0xa88a,
- 0xa88b,
- 0xa88c,
- 0xa88d,
- 0xa88e,
- 0xa88f,
- 0xa890,
- 0xa891,
- 0xa892,
- 0xa893,
- 0xa894,
- 0xa895,
- 0xa896,
- 0xa980,
- 0xa982,
- 0xa983,
- 0xa984,
- 0xa985,
- 0xa986,
- 0xa987,
- 0xa988,
- 0xa989,
- 0xa98a,
- 0xa98b,
- 0xa98c,
- 0xa98d,
- 0xa98e,
- 0xa98f,
- 0xa990,
- 0xa991,
- 0xa992,
- 0xa993,
- 0xa994,
- 0xa995,
- 0xa996,
- 0xa997,
- 0xa998,
- 0xa999,
- 0xa99a,
- 0xa99b,
- 0xa99c,
- 0xa99d,
- 0xa99e,
- 0xa99f,
- 0xa9a0,
- 0xa9a1,
- 0xa9a2,
- 0xa9a3,
- 0xa9a4,
- 0xa9a5,
- 0xa9a6,
- 0xa9a7,
- 0xa9a9,
- 0xa9e0,
- 0xa9e1,
- 0xa9e4,
- 0xa9e5,
- 0xab00,
- 0xab04,
- 0xab05,
- 0xab10,
- 0xab11,
- 0xab12,
- 0xab13,
- 0xab14,
- 0xab15,
- 0xab16,
- 0xab17,
- 0xab18,
- 0xab19,
- 0xacc0,
- 0xb300,
- 0xb301,
- 0xb304,
- 0xb305,
- 0xb306,
- 0xb307,
- 0xb4c0,
- 0xb4c1,
- 0xb4c2,
- 0xb4c3,
- 0xb4c4,
- 0xb4ca,
- 0xb4cb,
- 0xb4cc,
- 0xb4d1,
- 0xb800,
- 0xb801,
- 0xb802,
- 0xb803,
- 0xb980,
- 0xb982,
- 0xb983,
- 0xb984,
- 0xb985,
- 0xb986,
- 0xb990,
- 0xb991,
- 0xb992,
- 0xb993,
- 0xb994,
- 0xb995,
- 0xb996,
- 0xb997,
- 0xb998,
- 0xb999,
- 0xb99a,
- 0xb99b,
- 0xb9c0,
- 0xb9c1,
- 0xb9c2,
- 0xb9c3,
- 0xb9c4,
- 0xb9c5,
- 0xb9c6,
- 0xb9c7,
- 0xb9c8,
- 0xb9c9,
- 0xbb10,
-};
-template<> constexpr inline uint16_t RP_BLIT_REGS<A7XX>[] = {
- 0xc02,
- 0xc06,
- 0xc10,
- 0xc11,
- 0xc12,
- 0xc13,
- 0xc14,
- 0xc15,
- 0xc16,
- 0xc17,
- 0xc18,
- 0xc19,
- 0xc1a,
- 0xc1b,
- 0xc1c,
- 0xc1d,
- 0xc1e,
- 0xc1f,
- 0xc20,
- 0xc21,
- 0xc22,
- 0xc23,
- 0xc24,
- 0xc25,
- 0xc26,
- 0xc27,
- 0xc28,
- 0xc29,
- 0xc2a,
- 0xc2b,
- 0xc2c,
- 0xc2d,
- 0xc2e,
- 0xc2f,
- 0xc38,
- 0xc39,
- 0xc3a,
- 0xc3b,
- 0xc3c,
- 0xc3d,
- 0xc3e,
- 0xc3f,
- 0xc40,
- 0xc41,
- 0xc42,
- 0xc43,
- 0xc44,
- 0xc45,
- 0xc46,
- 0xc47,
- 0xc48,
- 0xc49,
- 0xc4a,
- 0xc4b,
- 0xc4c,
- 0xc4d,
- 0xc4e,
- 0xc4f,
- 0xc50,
- 0xc51,
- 0xc52,
- 0xc53,
- 0xc54,
- 0xc55,
- 0xc56,
- 0xc57,
- 0x8000,
- 0x8001,
- 0x8002,
- 0x8003,
- 0x8004,
- 0x8005,
- 0x8006,
- 0x8007,
- 0x8010,
- 0x8011,
- 0x8012,
- 0x8013,
- 0x8014,
- 0x8015,
- 0x8016,
- 0x8017,
- 0x8018,
- 0x8019,
- 0x801a,
- 0x801b,
- 0x801c,
- 0x801d,
- 0x801e,
- 0x801f,
- 0x8020,
- 0x8021,
- 0x8022,
- 0x8023,
- 0x8024,
- 0x8025,
- 0x8026,
- 0x8027,
- 0x8028,
- 0x8029,
- 0x802a,
- 0x802b,
- 0x802c,
- 0x802d,
- 0x802e,
- 0x802f,
- 0x8030,
- 0x8031,
- 0x8032,
- 0x8033,
- 0x8034,
- 0x8035,
- 0x8036,
- 0x8037,
- 0x8038,
- 0x8039,
- 0x803a,
- 0x803b,
- 0x803c,
- 0x803d,
- 0x803e,
- 0x803f,
- 0x8040,
- 0x8041,
- 0x8042,
- 0x8043,
- 0x8044,
- 0x8045,
- 0x8046,
- 0x8047,
- 0x8048,
- 0x8049,
- 0x804a,
- 0x804b,
- 0x804c,
- 0x804d,
- 0x804e,
- 0x804f,
- 0x8050,
- 0x8051,
- 0x8052,
- 0x8053,
- 0x8054,
- 0x8055,
- 0x8056,
- 0x8057,
- 0x8058,
- 0x8059,
- 0x805a,
- 0x805b,
- 0x805c,
- 0x805d,
- 0x805e,
- 0x805f,
- 0x8060,
- 0x8061,
- 0x8062,
- 0x8063,
- 0x8064,
- 0x8065,
- 0x8066,
- 0x8067,
- 0x8068,
- 0x8069,
- 0x806a,
- 0x806b,
- 0x806c,
- 0x806d,
- 0x806e,
- 0x806f,
- 0x8070,
- 0x8071,
- 0x8072,
- 0x8073,
- 0x8074,
- 0x8075,
- 0x8076,
- 0x8077,
- 0x8078,
- 0x8079,
- 0x807a,
- 0x807b,
- 0x807c,
- 0x807d,
- 0x807e,
- 0x807f,
- 0x8080,
- 0x8081,
- 0x8082,
- 0x8083,
- 0x8084,
- 0x8085,
- 0x8086,
- 0x8087,
- 0x8088,
- 0x8089,
- 0x808a,
- 0x808b,
- 0x808c,
- 0x808d,
- 0x808e,
- 0x808f,
- 0x8090,
- 0x8091,
- 0x8092,
- 0x8094,
- 0x8095,
- 0x8096,
- 0x8097,
- 0x8098,
- 0x809b,
- 0x809c,
- 0x809d,
- 0x80a0,
- 0x80a1,
- 0x80a2,
- 0x80a3,
- 0x80a4,
- 0x80a5,
- 0x80a6,
- 0x80b0,
- 0x80b1,
- 0x80b2,
- 0x80b3,
- 0x80b4,
- 0x80b5,
- 0x80b6,
- 0x80b7,
- 0x80b8,
- 0x80b9,
- 0x80ba,
- 0x80bb,
- 0x80bc,
- 0x80bd,
- 0x80be,
- 0x80bf,
- 0x80c0,
- 0x80c1,
- 0x80c2,
- 0x80c3,
- 0x80c4,
- 0x80c5,
- 0x80c6,
- 0x80c7,
- 0x80c8,
- 0x80c9,
- 0x80ca,
- 0x80cb,
- 0x80cc,
- 0x80cd,
- 0x80ce,
- 0x80cf,
- 0x80d0,
- 0x80d1,
- 0x80d2,
- 0x80d3,
- 0x80d4,
- 0x80d5,
- 0x80d6,
- 0x80d7,
- 0x80d8,
- 0x80d9,
- 0x80da,
- 0x80db,
- 0x80dc,
- 0x80dd,
- 0x80de,
- 0x80df,
- 0x80e0,
- 0x80e1,
- 0x80e2,
- 0x80e3,
- 0x80e4,
- 0x80e5,
- 0x80e6,
- 0x80e7,
- 0x80e8,
- 0x80e9,
- 0x80ea,
- 0x80eb,
- 0x80ec,
- 0x80ed,
- 0x80ee,
- 0x80ef,
- 0x80f0,
- 0x80f1,
- 0x8100,
- 0x8101,
- 0x8102,
- 0x8103,
- 0x8104,
- 0x8105,
- 0x8106,
- 0x8107,
- 0x8109,
- 0x8113,
- 0x8114,
- 0x8115,
- 0x8116,
- 0x8400,
- 0x8401,
- 0x8402,
- 0x8403,
- 0x8404,
- 0x8405,
- 0x8406,
- 0x840a,
- 0x840b,
- 0x8800,
- 0x8801,
- 0x8802,
- 0x8803,
- 0x8804,
- 0x8805,
- 0x8806,
- 0x8809,
- 0x880a,
- 0x880b,
- 0x880c,
- 0x880d,
- 0x880f,
- 0x8810,
- 0x8812,
- 0x8820,
- 0x8821,
- 0x8822,
- 0x8823,
- 0x8824,
- 0x8825,
- 0x8826,
- 0x8827,
- 0x8828,
- 0x8829,
- 0x882a,
- 0x882b,
- 0x882c,
- 0x882d,
- 0x882e,
- 0x882f,
- 0x8830,
- 0x8831,
- 0x8832,
- 0x8833,
- 0x8834,
- 0x8835,
- 0x8836,
- 0x8837,
- 0x8838,
- 0x8839,
- 0x883a,
- 0x883b,
- 0x883c,
- 0x883d,
- 0x883e,
- 0x883f,
- 0x8840,
- 0x8841,
- 0x8842,
- 0x8843,
- 0x8844,
- 0x8845,
- 0x8846,
- 0x8847,
- 0x8848,
- 0x8849,
- 0x884a,
- 0x884b,
- 0x884c,
- 0x884d,
- 0x884e,
- 0x884f,
- 0x8850,
- 0x8851,
- 0x8852,
- 0x8853,
- 0x8854,
- 0x8855,
- 0x8856,
- 0x8857,
- 0x8858,
- 0x8859,
- 0x885a,
- 0x885b,
- 0x885c,
- 0x885d,
- 0x885e,
- 0x885f,
- 0x8860,
- 0x8861,
- 0x8862,
- 0x8863,
- 0x8865,
- 0x8870,
- 0x8871,
- 0x8872,
- 0x8873,
- 0x8874,
- 0x8875,
- 0x8876,
- 0x8877,
- 0x8878,
- 0x8879,
- 0x8880,
- 0x8881,
- 0x8882,
- 0x8883,
- 0x8884,
- 0x8885,
- 0x8886,
- 0x8887,
- 0x8888,
- 0x8889,
- 0x8890,
- 0x8898,
- 0x88c0,
- 0x88c1,
- 0x88d0,
- 0x88d1,
- 0x88d2,
- 0x88d3,
- 0x88d4,
- 0x88d5,
- 0x88d6,
- 0x88d7,
- 0x88d8,
- 0x88d9,
- 0x88da,
- 0x88db,
- 0x88dc,
- 0x88dd,
- 0x88de,
- 0x88df,
- 0x88e0,
- 0x88e1,
- 0x88e2,
- 0x88e3,
- 0x8900,
- 0x8901,
- 0x8902,
- 0x8903,
- 0x8904,
- 0x8905,
- 0x8906,
- 0x8907,
- 0x8908,
- 0x8909,
- 0x890a,
- 0x890b,
- 0x890c,
- 0x890d,
- 0x890e,
- 0x890f,
- 0x8910,
- 0x8911,
- 0x8912,
- 0x8913,
- 0x8914,
- 0x8915,
- 0x8916,
- 0x8917,
- 0x8918,
- 0x8919,
- 0x891a,
- 0x8c00,
- 0x8c01,
- 0x8c17,
- 0x8c18,
- 0x8c19,
- 0x8c1a,
- 0x8c1b,
- 0x8c1c,
- 0x8c1d,
- 0x8c1e,
- 0x8c1f,
- 0x8c20,
- 0x8c21,
- 0x8c22,
- 0x8c23,
- 0x8c24,
- 0x8c25,
- 0x8c2c,
- 0x8c2d,
- 0x8c2e,
- 0x8c2f,
- 0x9101,
- 0x9102,
- 0x9103,
- 0x9104,
- 0x9105,
- 0x9106,
- 0x9107,
- 0x9108,
- 0x9109,
- 0x910a,
- 0x910b,
- 0x910c,
- 0x9200,
- 0x9201,
- 0x9202,
- 0x9203,
- 0x9204,
- 0x9205,
- 0x9206,
- 0x9207,
- 0x9208,
- 0x9209,
- 0x920a,
- 0x920b,
- 0x920c,
- 0x920d,
- 0x920e,
- 0x920f,
- 0x9212,
- 0x9213,
- 0x9214,
- 0x9215,
- 0x9216,
- 0x9217,
- 0x9301,
- 0x9302,
- 0x9303,
- 0x9304,
- 0x9305,
- 0x9306,
- 0x9307,
- 0x9308,
- 0x9309,
- 0x9311,
- 0x9312,
- 0x9313,
- 0x9314,
- 0x9315,
- 0x9316,
- 0x9317,
- 0x9800,
- 0x9801,
- 0x9802,
- 0x9803,
- 0x9804,
- 0x9805,
- 0x9806,
- 0x9808,
- 0x9809,
- 0x9b00,
- 0x9b01,
- 0x9b02,
- 0x9b03,
- 0x9b04,
- 0x9b05,
- 0x9b07,
- 0x9b08,
- 0x9b09,
- 0xa000,
- 0xa001,
- 0xa002,
- 0xa003,
- 0xa004,
- 0xa005,
- 0xa006,
- 0xa008,
- 0xa00e,
- 0xa00f,
- 0xa010,
- 0xa011,
- 0xa012,
- 0xa013,
- 0xa014,
- 0xa015,
- 0xa016,
- 0xa017,
- 0xa018,
- 0xa019,
- 0xa01a,
- 0xa01b,
- 0xa01c,
- 0xa01d,
- 0xa01e,
- 0xa01f,
- 0xa020,
- 0xa021,
- 0xa022,
- 0xa023,
- 0xa024,
- 0xa025,
- 0xa026,
- 0xa027,
- 0xa028,
- 0xa029,
- 0xa02a,
- 0xa02b,
- 0xa02c,
- 0xa02d,
- 0xa02e,
- 0xa02f,
- 0xa030,
- 0xa031,
- 0xa032,
- 0xa033,
- 0xa034,
- 0xa035,
- 0xa036,
- 0xa037,
- 0xa038,
- 0xa039,
- 0xa03a,
- 0xa03b,
- 0xa03c,
- 0xa03d,
- 0xa03e,
- 0xa03f,
- 0xa040,
- 0xa041,
- 0xa042,
- 0xa043,
- 0xa044,
- 0xa045,
- 0xa046,
- 0xa047,
- 0xa048,
- 0xa049,
- 0xa04a,
- 0xa04b,
- 0xa04c,
- 0xa04d,
- 0xa04e,
- 0xa04f,
- 0xa050,
- 0xa051,
- 0xa052,
- 0xa053,
- 0xa054,
- 0xa055,
- 0xa056,
- 0xa057,
- 0xa058,
- 0xa059,
- 0xa05a,
- 0xa05b,
- 0xa05c,
- 0xa05d,
- 0xa05e,
- 0xa05f,
- 0xa060,
- 0xa061,
- 0xa062,
- 0xa063,
- 0xa064,
- 0xa065,
- 0xa066,
- 0xa067,
- 0xa068,
- 0xa069,
- 0xa06a,
- 0xa06b,
- 0xa06c,
- 0xa06d,
- 0xa06e,
- 0xa06f,
- 0xa070,
- 0xa071,
- 0xa072,
- 0xa073,
- 0xa074,
- 0xa075,
- 0xa076,
- 0xa077,
- 0xa078,
- 0xa079,
- 0xa07a,
- 0xa07b,
- 0xa07c,
- 0xa07d,
- 0xa07e,
- 0xa07f,
- 0xa080,
- 0xa081,
- 0xa082,
- 0xa083,
- 0xa084,
- 0xa085,
- 0xa086,
- 0xa087,
- 0xa088,
- 0xa089,
- 0xa08a,
- 0xa08b,
- 0xa08c,
- 0xa08d,
- 0xa08e,
- 0xa08f,
- 0xa090,
- 0xa091,
- 0xa092,
- 0xa093,
- 0xa094,
- 0xa095,
- 0xa096,
- 0xa097,
- 0xa098,
- 0xa099,
- 0xa09a,
- 0xa09b,
- 0xa09c,
- 0xa09d,
- 0xa09e,
- 0xa09f,
- 0xa0a0,
- 0xa0a1,
- 0xa0a2,
- 0xa0a3,
- 0xa0a4,
- 0xa0a5,
- 0xa0a6,
- 0xa0a7,
- 0xa0a8,
- 0xa0a9,
- 0xa0aa,
- 0xa0ab,
- 0xa0ac,
- 0xa0ad,
- 0xa0ae,
- 0xa0af,
- 0xa0b0,
- 0xa0b1,
- 0xa0b2,
- 0xa0b3,
- 0xa0b4,
- 0xa0b5,
- 0xa0b6,
- 0xa0b7,
- 0xa0b8,
- 0xa0b9,
- 0xa0ba,
- 0xa0bb,
- 0xa0bc,
- 0xa0bd,
- 0xa0be,
- 0xa0bf,
- 0xa0c0,
- 0xa0c1,
- 0xa0c2,
- 0xa0c3,
- 0xa0c4,
- 0xa0c5,
- 0xa0c6,
- 0xa0c7,
- 0xa0c8,
- 0xa0c9,
- 0xa0ca,
- 0xa0cb,
- 0xa0cc,
- 0xa0cd,
- 0xa0ce,
- 0xa0cf,
- 0xa0d0,
- 0xa0d1,
- 0xa0d2,
- 0xa0d3,
- 0xa0d4,
- 0xa0d5,
- 0xa0d6,
- 0xa0d7,
- 0xa0d8,
- 0xa0d9,
- 0xa0da,
- 0xa0db,
- 0xa0dc,
- 0xa0dd,
- 0xa0de,
- 0xa0df,
- 0xa0e0,
- 0xa0e1,
- 0xa0e2,
- 0xa0e3,
- 0xa0e4,
- 0xa0e5,
- 0xa0e6,
- 0xa0e7,
- 0xa0e8,
- 0xa0e9,
- 0xa0ea,
- 0xa0eb,
- 0xa0ec,
- 0xa0ed,
- 0xa0ee,
- 0xa0ef,
- 0xa0f8,
- 0xa800,
- 0xa802,
- 0xa803,
- 0xa804,
- 0xa805,
- 0xa806,
- 0xa807,
- 0xa808,
- 0xa809,
- 0xa80a,
- 0xa80b,
- 0xa80c,
- 0xa80d,
- 0xa80e,
- 0xa80f,
- 0xa810,
- 0xa811,
- 0xa812,
- 0xa813,
- 0xa814,
- 0xa815,
- 0xa816,
- 0xa817,
- 0xa818,
- 0xa819,
- 0xa81a,
- 0xa81b,
- 0xa81c,
- 0xa81d,
- 0xa81e,
- 0xa81f,
- 0xa820,
- 0xa821,
- 0xa822,
- 0xa823,
- 0xa824,
- 0xa825,
- 0xa827,
- 0xa830,
- 0xa831,
- 0xa832,
- 0xa833,
- 0xa834,
- 0xa835,
- 0xa836,
- 0xa837,
- 0xa838,
- 0xa839,
- 0xa83a,
- 0xa83b,
- 0xa83c,
- 0xa83d,
- 0xa83f,
- 0xa840,
- 0xa842,
- 0xa843,
- 0xa844,
- 0xa845,
- 0xa846,
- 0xa847,
- 0xa848,
- 0xa849,
- 0xa84a,
- 0xa84b,
- 0xa84c,
- 0xa84d,
- 0xa84e,
- 0xa84f,
- 0xa850,
- 0xa851,
- 0xa852,
- 0xa853,
- 0xa854,
- 0xa855,
- 0xa856,
- 0xa857,
- 0xa858,
- 0xa859,
- 0xa85a,
- 0xa85b,
- 0xa85c,
- 0xa85d,
- 0xa85e,
- 0xa85f,
- 0xa860,
- 0xa861,
- 0xa862,
- 0xa863,
- 0xa864,
- 0xa865,
- 0xa867,
- 0xa870,
- 0xa871,
- 0xa872,
- 0xa873,
- 0xa874,
- 0xa875,
- 0xa876,
- 0xa877,
- 0xa878,
- 0xa879,
- 0xa87a,
- 0xa87b,
- 0xa87c,
- 0xa87d,
- 0xa87e,
- 0xa87f,
- 0xa880,
- 0xa881,
- 0xa882,
- 0xa883,
- 0xa884,
- 0xa885,
- 0xa886,
- 0xa887,
- 0xa888,
- 0xa889,
- 0xa88a,
- 0xa88b,
- 0xa88c,
- 0xa88d,
- 0xa88e,
- 0xa88f,
- 0xa890,
- 0xa891,
- 0xa892,
- 0xa893,
- 0xa894,
- 0xa895,
- 0xa896,
- 0xa898,
- 0xa980,
- 0xa982,
- 0xa983,
- 0xa984,
- 0xa985,
- 0xa986,
- 0xa987,
- 0xa988,
- 0xa989,
- 0xa98a,
- 0xa98b,
- 0xa98c,
- 0xa98d,
- 0xa98e,
- 0xa98f,
- 0xa990,
- 0xa991,
- 0xa992,
- 0xa993,
- 0xa994,
- 0xa995,
- 0xa996,
- 0xa997,
- 0xa998,
- 0xa999,
- 0xa99a,
- 0xa99b,
- 0xa99c,
- 0xa99d,
- 0xa99e,
- 0xa99f,
- 0xa9a0,
- 0xa9a1,
- 0xa9a2,
- 0xa9a3,
- 0xa9a4,
- 0xa9a5,
- 0xa9a6,
- 0xa9a7,
- 0xa9a9,
- 0xa9aa,
- 0xa9ae,
- 0xa9bf,
- 0xa9c6,
- 0xa9c7,
- 0xa9c8,
- 0xa9c9,
- 0xa9ca,
- 0xa9cb,
- 0xa9d4,
- 0xa9d5,
- 0xa9d6,
- 0xa9d7,
- 0xa9d8,
- 0xa9d9,
- 0xa9da,
- 0xa9db,
- 0xa9dc,
- 0xa9dd,
- 0xa9de,
- 0xa9e0,
- 0xa9e1,
- 0xa9e4,
- 0xa9e5,
- 0xab00,
- 0xab03,
- 0xab04,
- 0xab05,
- 0xab0a,
- 0xab0b,
- 0xab0c,
- 0xab0d,
- 0xab0e,
- 0xab0f,
- 0xab10,
- 0xab11,
- 0xab12,
- 0xab13,
- 0xab14,
- 0xab15,
- 0xab16,
- 0xab17,
- 0xab18,
- 0xab19,
- 0xab21,
- 0xb2c0,
- 0xb2c2,
- 0xb2c3,
- 0xb2ca,
- 0xb2cb,
- 0xb2cc,
- 0xb2d2,
- 0xb300,
- 0xb301,
- 0xb304,
- 0xb305,
- 0xb306,
- 0xb307,
-};
-#endif
-
-#endif /* A6XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 8bea8ef26f77..0e3dfd4c2bc8 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -507,7 +507,7 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
{
- msm_writel(value, ptr + (offset << 2));
+ writel(value, ptr + (offset << 2));
}
static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 592b296aab22..94b6c5cab6f4 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -103,12 +103,12 @@ struct a6xx_gmu {
static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
{
- return msm_readl(gmu->mmio + (offset << 2));
+ return readl(gmu->mmio + (offset << 2));
}
static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
{
- msm_writel(value, gmu->mmio + (offset << 2));
+ writel(value, gmu->mmio + (offset << 2));
}
static inline void
@@ -131,8 +131,8 @@ static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi)
{
u64 val;
- val = (u64) msm_readl(gmu->mmio + (lo << 2));
- val |= ((u64) msm_readl(gmu->mmio + (hi << 2)) << 32);
+ val = (u64) readl(gmu->mmio + (lo << 2));
+ val |= ((u64) readl(gmu->mmio + (hi << 2)) << 32);
return val;
}
@@ -143,12 +143,12 @@ static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi)
static inline u32 gmu_read_rscc(struct a6xx_gmu *gmu, u32 offset)
{
- return msm_readl(gmu->rscc + (offset << 2));
+ return readl(gmu->rscc + (offset << 2));
}
static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value)
{
- msm_writel(value, gmu->rscc + (offset << 2));
+ writel(value, gmu->rscc + (offset << 2));
}
#define gmu_poll_timeout_rscc(gmu, addr, val, cond, interval, timeout) \
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
deleted file mode 100644
index 9d7f93929367..000000000000
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
+++ /dev/null
@@ -1,422 +0,0 @@
-#ifndef A6XX_GMU_XML
-#define A6XX_GMU_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
-http://gitlab.freedesktop.org/mesa/mesa/
-git clone https://gitlab.freedesktop.org/mesa/mesa.git
-
-The rules-ng-ng source files this header was generated from are:
-
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11820 bytes, from Fri Jun 2 14:59:26 2023)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from Fri Jun 2 14:59:26 2023)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023)
-
-Copyright (C) 2013-2024 by the following authors:
-- Rob Clark <robdclark@gmail.com> Rob Clark
-- Ilia Mirkin <imirkin@alum.mit.edu> Ilia Mirkin
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-*/
-
-#ifdef __KERNEL__
-#include <linux/bug.h>
-#define assert(x) BUG_ON(!(x))
-#else
-#include <assert.h>
-#endif
-
-#ifdef __cplusplus
-#define __struct_cast(X)
-#else
-#define __struct_cast(X) (struct X)
-#endif
-
-#define A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB 0x00800000
-#define A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB 0x40000000
-
-#define A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK 0x00400000
-#define A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK 0x40000000
-#define A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK 0x40000000
-#define A6XX_GMU_OOB_DCVS_SET_MASK 0x00800000
-#define A6XX_GMU_OOB_DCVS_CHECK_MASK 0x80000000
-#define A6XX_GMU_OOB_DCVS_CLEAR_MASK 0x80000000
-#define A6XX_GMU_OOB_GPU_SET_MASK 0x00040000
-#define A6XX_GMU_OOB_GPU_CHECK_MASK 0x04000000
-#define A6XX_GMU_OOB_GPU_CLEAR_MASK 0x04000000
-#define A6XX_GMU_OOB_PERFCNTR_SET_MASK 0x00020000
-#define A6XX_GMU_OOB_PERFCNTR_CHECK_MASK 0x02000000
-#define A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK 0x02000000
-
-#define A6XX_HFI_IRQ_MSGQ_MASK 0x00000001
-#define A6XX_HFI_IRQ_DSGQ_MASK 0x00000002
-#define A6XX_HFI_IRQ_BLOCKED_MSG_MASK 0x00000004
-#define A6XX_HFI_IRQ_CM3_FAULT_MASK 0x00800000
-#define A6XX_HFI_IRQ_GMU_ERR_MASK__MASK 0x007f0000
-#define A6XX_HFI_IRQ_GMU_ERR_MASK__SHIFT 16
-static inline uint32_t A6XX_HFI_IRQ_GMU_ERR_MASK(uint32_t val)
-{
- return ((val) << A6XX_HFI_IRQ_GMU_ERR_MASK__SHIFT) & A6XX_HFI_IRQ_GMU_ERR_MASK__MASK;
-}
-#define A6XX_HFI_IRQ_OOB_MASK__MASK 0xff000000
-#define A6XX_HFI_IRQ_OOB_MASK__SHIFT 24
-static inline uint32_t A6XX_HFI_IRQ_OOB_MASK(uint32_t val)
-{
- return ((val) << A6XX_HFI_IRQ_OOB_MASK__SHIFT) & A6XX_HFI_IRQ_OOB_MASK__MASK;
-}
-
-#define A6XX_HFI_H2F_IRQ_MASK_BIT 0x00000001
-
-#define REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL 0x00000080
-
-#define REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL 0x00000081
-
-#define REG_A6XX_GMU_CM3_ITCM_START 0x00000c00
-
-#define REG_A6XX_GMU_CM3_DTCM_START 0x00001c00
-
-#define REG_A6XX_GMU_NMI_CONTROL_STATUS 0x000023f0
-
-#define REG_A6XX_GMU_BOOT_SLUMBER_OPTION 0x000023f8
-
-#define REG_A6XX_GMU_GX_VOTE_IDX 0x000023f9
-
-#define REG_A6XX_GMU_MX_VOTE_IDX 0x000023fa
-
-#define REG_A6XX_GMU_DCVS_ACK_OPTION 0x000023fc
-
-#define REG_A6XX_GMU_DCVS_PERF_SETTING 0x000023fd
-
-#define REG_A6XX_GMU_DCVS_BW_SETTING 0x000023fe
-
-#define REG_A6XX_GMU_DCVS_RETURN 0x000023ff
-
-#define REG_A6XX_GMU_ICACHE_CONFIG 0x00004c00
-
-#define REG_A6XX_GMU_DCACHE_CONFIG 0x00004c01
-
-#define REG_A6XX_GMU_SYS_BUS_CONFIG 0x00004c0f
-
-#define REG_A6XX_GMU_CM3_SYSRESET 0x00005000
-
-#define REG_A6XX_GMU_CM3_BOOT_CONFIG 0x00005001
-
-#define REG_A6XX_GMU_CM3_FW_BUSY 0x0000501a
-
-#define REG_A6XX_GMU_CM3_FW_INIT_RESULT 0x0000501c
-
-#define REG_A6XX_GMU_CM3_CFG 0x0000502d
-
-#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE 0x00005040
-
-#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0 0x00005041
-
-#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1 0x00005042
-
-#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L 0x00005044
-
-#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H 0x00005045
-
-#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L 0x00005046
-
-#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H 0x00005047
-
-#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L 0x00005048
-
-#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H 0x00005049
-
-#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L 0x0000504a
-
-#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H 0x0000504b
-
-#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L 0x0000504c
-
-#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H 0x0000504d
-
-#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L 0x0000504e
-
-#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H 0x0000504f
-
-#define REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL 0x000050c0
-#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE 0x00000001
-#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE 0x00000002
-#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE 0x00000004
-#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__MASK 0x00003c00
-#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__SHIFT 10
-static inline uint32_t A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS(uint32_t val)
-{
- return ((val) << A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__SHIFT) & A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__MASK;
-}
-#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__MASK 0xffffc000
-#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__SHIFT 14
-static inline uint32_t A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH(uint32_t val)
-{
- return ((val) << A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__SHIFT) & A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__MASK;
-}
-
-#define REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST 0x000050c1
-
-#define REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST 0x000050c2
-
-#define REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS 0x000050d0
-#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_OFF 0x00000001
-#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_ON 0x00000002
-#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF 0x00000004
-#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_ON 0x00000008
-#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF 0x00000010
-#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GMU_UP_POWER_STATE 0x00000020
-#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF 0x00000040
-#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF 0x00000080
-
-#define REG_A6XX_GMU_GPU_NAP_CTRL 0x000050e4
-#define A6XX_GMU_GPU_NAP_CTRL_HW_NAP_ENABLE 0x00000001
-#define A6XX_GMU_GPU_NAP_CTRL_SID__MASK 0x000001f0
-#define A6XX_GMU_GPU_NAP_CTRL_SID__SHIFT 4
-static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
-{
- return ((val) << A6XX_GMU_GPU_NAP_CTRL_SID__SHIFT) & A6XX_GMU_GPU_NAP_CTRL_SID__MASK;
-}
-
-#define REG_A6XX_GMU_RPMH_CTRL 0x000050e8
-#define A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE 0x00000001
-#define A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE 0x00000010
-#define A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE 0x00000100
-#define A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE 0x00000200
-#define A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE 0x00000400
-#define A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE 0x00000800
-#define A6XX_GMU_RPMH_CTRL_DDR_MIN_VOTE_ENABLE 0x00001000
-#define A6XX_GMU_RPMH_CTRL_MX_MIN_VOTE_ENABLE 0x00002000
-#define A6XX_GMU_RPMH_CTRL_CX_MIN_VOTE_ENABLE 0x00004000
-#define A6XX_GMU_RPMH_CTRL_GFX_MIN_VOTE_ENABLE 0x00008000
-
-#define REG_A6XX_GMU_RPMH_HYST_CTRL 0x000050e9
-
-#define REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE 0x000050ec
-
-#define REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF 0x000050f0
-
-#define REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF 0x000050f1
-
-#define REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG 0x00005100
-
-#define REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP 0x00005101
-
-#define REG_A6XX_GMU_BOOT_KMD_LM_HANDSHAKE 0x000051f0
-
-#define REG_A6XX_GMU_LLM_GLM_SLEEP_CTRL 0x00005157
-
-#define REG_A6XX_GMU_LLM_GLM_SLEEP_STATUS 0x00005158
-
-#define REG_A6XX_GMU_ALWAYS_ON_COUNTER_L 0x00005088
-
-#define REG_A6XX_GMU_ALWAYS_ON_COUNTER_H 0x00005089
-
-#define REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE 0x000050c3
-
-#define REG_A6XX_GMU_HFI_CTRL_STATUS 0x00005180
-
-#define REG_A6XX_GMU_HFI_VERSION_INFO 0x00005181
-
-#define REG_A6XX_GMU_HFI_SFR_ADDR 0x00005182
-
-#define REG_A6XX_GMU_HFI_MMAP_ADDR 0x00005183
-
-#define REG_A6XX_GMU_HFI_QTBL_INFO 0x00005184
-
-#define REG_A6XX_GMU_HFI_QTBL_ADDR 0x00005185
-
-#define REG_A6XX_GMU_HFI_CTRL_INIT 0x00005186
-
-#define REG_A6XX_GMU_GMU2HOST_INTR_SET 0x00005190
-
-#define REG_A6XX_GMU_GMU2HOST_INTR_CLR 0x00005191
-
-#define REG_A6XX_GMU_GMU2HOST_INTR_INFO 0x00005192
-#define A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ 0x00000001
-#define A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT 0x00800000
-
-#define REG_A6XX_GMU_GMU2HOST_INTR_MASK 0x00005193
-
-#define REG_A6XX_GMU_HOST2GMU_INTR_SET 0x00005194
-
-#define REG_A6XX_GMU_HOST2GMU_INTR_CLR 0x00005195
-
-#define REG_A6XX_GMU_HOST2GMU_INTR_RAW_INFO 0x00005196
-
-#define REG_A6XX_GMU_HOST2GMU_INTR_EN_0 0x00005197
-
-#define REG_A6XX_GMU_HOST2GMU_INTR_EN_1 0x00005198
-
-#define REG_A6XX_GMU_HOST2GMU_INTR_EN_2 0x00005199
-
-#define REG_A6XX_GMU_HOST2GMU_INTR_EN_3 0x0000519a
-
-#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_0 0x0000519b
-
-#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_1 0x0000519c
-
-#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_2 0x0000519d
-
-#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_3 0x0000519e
-
-#define REG_A6XX_GMU_GENERAL_0 0x000051c5
-
-#define REG_A6XX_GMU_GENERAL_1 0x000051c6
-
-#define REG_A6XX_GMU_GENERAL_6 0x000051cb
-
-#define REG_A6XX_GMU_GENERAL_7 0x000051cc
-
-#define REG_A7XX_GMU_GENERAL_8 0x000051cd
-
-#define REG_A7XX_GMU_GENERAL_9 0x000051ce
-
-#define REG_A7XX_GMU_GENERAL_10 0x000051cf
-
-#define REG_A6XX_GMU_ISENSE_CTRL 0x0000515d
-
-#define REG_A6XX_GPU_CS_ENABLE_REG 0x00008920
-
-#define REG_A6XX_GPU_GMU_CX_GMU_ISENSE_CTRL 0x0000515d
-
-#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL3 0x00008578
-
-#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL2 0x00008558
-
-#define REG_A6XX_GPU_CS_A_SENSOR_CTRL_0 0x00008580
-
-#define REG_A6XX_GPU_CS_A_SENSOR_CTRL_2 0x00027ada
-
-#define REG_A6XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000881a
-
-#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x00008957
-
-#define REG_A6XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000881a
-
-#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_0 0x0000881d
-
-#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_2 0x0000881f
-
-#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_4 0x00008821
-
-#define REG_A6XX_GPU_CS_AMP_CALIBRATION_DONE 0x00008965
-
-#define REG_A6XX_GPU_CS_AMP_PERIOD_CTRL 0x0000896d
-
-#define REG_A6XX_GPU_CS_AMP_CALIBRATION_DONE 0x00008965
-
-#define REG_A6XX_GPU_GMU_CX_GMU_PWR_THRESHOLD 0x0000514d
-
-#define REG_A6XX_GMU_AO_INTERRUPT_EN 0x00009303
-
-#define REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR 0x00009304
-
-#define REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS 0x00009305
-#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE 0x00000001
-#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_RSCC_COMP 0x00000002
-#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_VDROOP 0x00000004
-#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR 0x00000008
-#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_DBD_WAKEUP 0x00000010
-#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR 0x00000020
-
-#define REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK 0x00009306
-
-#define REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL 0x00009309
-
-#define REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL 0x0000930a
-
-#define REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL 0x0000930b
-
-#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS 0x0000930c
-#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB 0x00800000
-
-#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2 0x0000930d
-
-#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK 0x0000930e
-
-#define REG_A6XX_GMU_AO_AHB_FENCE_CTRL 0x00009310
-
-#define REG_A6XX_GMU_AHB_FENCE_STATUS 0x00009313
-
-#define REG_A6XX_GMU_AHB_FENCE_STATUS_CLR 0x00009314
-
-#define REG_A6XX_GMU_RBBM_INT_UNMASKED_STATUS 0x00009315
-
-#define REG_A6XX_GMU_AO_SPARE_CNTL 0x00009316
-
-#define REG_A6XX_GMU_RSCC_CONTROL_REQ 0x00009307
-
-#define REG_A6XX_GMU_RSCC_CONTROL_ACK 0x00009308
-
-#define REG_A6XX_GMU_AHB_FENCE_RANGE_0 0x00009311
-
-#define REG_A6XX_GMU_AHB_FENCE_RANGE_1 0x00009312
-
-#define REG_A6XX_GPU_CC_GX_GDSCR 0x00009c03
-
-#define REG_A6XX_GPU_CC_GX_DOMAIN_MISC 0x00009d42
-
-#define REG_A6XX_GPU_CPR_FSM_CTL 0x0000c001
-
-#define REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0 0x00000004
-
-#define REG_A6XX_RSCC_PDC_SEQ_START_ADDR 0x00000008
-
-#define REG_A6XX_RSCC_PDC_MATCH_VALUE_LO 0x00000009
-
-#define REG_A6XX_RSCC_PDC_MATCH_VALUE_HI 0x0000000a
-
-#define REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0 0x0000000b
-
-#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR 0x0000000d
-
-#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA 0x0000000e
-
-#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0 0x00000082
-
-#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0 0x00000083
-
-#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0 0x00000089
-
-#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0 0x0000008c
-
-#define REG_A6XX_RSCC_OVERRIDE_START_ADDR 0x00000100
-
-#define REG_A6XX_RSCC_SEQ_BUSY_DRV0 0x00000101
-
-#define REG_A7XX_RSCC_SEQ_MEM_0_DRV0_A740 0x00000154
-
-#define REG_A6XX_RSCC_SEQ_MEM_0_DRV0 0x00000180
-
-#define REG_A6XX_RSCC_TCS0_DRV0_STATUS 0x00000346
-
-#define REG_A6XX_RSCC_TCS1_DRV0_STATUS 0x000003ee
-
-#define REG_A6XX_RSCC_TCS2_DRV0_STATUS 0x00000496
-
-#define REG_A6XX_RSCC_TCS3_DRV0_STATUS 0x0000053e
-
-#ifdef __cplusplus
-#endif
-
-#endif /* A6XX_GMU_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 0674aca0f8a3..973872ad0474 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -284,7 +284,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
- get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
+ get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0),
rbmemptr_stats(ring, index, cpcycles_start));
get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
rbmemptr_stats(ring, index, alwayson_start));
@@ -330,7 +330,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_PKT7(ring, CP_SET_MARKER, 1);
OUT_RING(ring, 0x00e); /* IB1LIST end */
- get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
+ get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0),
rbmemptr_stats(ring, index, cpcycles_end));
get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
rbmemptr_stats(ring, index, alwayson_end));
@@ -1255,8 +1255,9 @@ static const u32 a730_protect[] = {
A6XX_PROTECT_NORDWR(0x00699, 0x01e9),
A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
- /* 0x008d0-0x008dd are unprotected on purpose for tools like perfetto */
- A6XX_PROTECT_RDONLY(0x008de, 0x0154),
+ /* 0x008d0-0x008dd and 0x008e0-0x008e6 are unprotected on purpose for tools like perfetto */
+ A6XX_PROTECT_NORDWR(0x008de, 0x0001),
+ A6XX_PROTECT_RDONLY(0x008e7, 0x014b),
A6XX_PROTECT_NORDWR(0x00900, 0x004d),
A6XX_PROTECT_NORDWR(0x0098d, 0x00b2),
A6XX_PROTECT_NORDWR(0x00a41, 0x01be),
@@ -1291,8 +1292,7 @@ static const u32 a730_protect[] = {
A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
A6XX_PROTECT_NORDWR(0x1f860, 0x0000),
A6XX_PROTECT_NORDWR(0x1f878, 0x002a),
- /* CP_PROTECT_REG[44, 46] are left untouched! */
- 0,
+ /* CP_PROTECT_REG[45, 46] are left untouched! */
0,
0,
A6XX_PROTECT_NORDWR(0x1f8c0, 0x00000),
@@ -1377,6 +1377,10 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
if (adreno_is_a618(gpu))
gpu->ubwc_config.highest_bank_bit = 14;
+ if (adreno_is_a619(gpu))
+ /* TODO: Should be 14 but causes corruption at e.g. 1920x1200 on DP */
+ gpu->ubwc_config.highest_bank_bit = 13;
+
if (adreno_is_a619_holi(gpu))
gpu->ubwc_config.highest_bank_bit = 13;
@@ -3058,7 +3062,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
ret = a6xx_set_supported_hw(&pdev->dev, config->info);
if (ret) {
- a6xx_destroy(&(a6xx_gpu->base.base));
+ a6xx_llc_slices_destroy(a6xx_gpu);
+ kfree(a6xx_gpu);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 34822b080759..8917032b7515 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -69,12 +69,12 @@ static inline void a6xx_llc_rmw(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 mask, u3
static inline u32 a6xx_llc_read(struct a6xx_gpu *a6xx_gpu, u32 reg)
{
- return msm_readl(a6xx_gpu->llc_mmio + (reg << 2));
+ return readl(a6xx_gpu->llc_mmio + (reg << 2));
}
static inline void a6xx_llc_write(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 value)
{
- msm_writel(value, a6xx_gpu->llc_mmio + (reg << 2));
+ writel(value, a6xx_gpu->llc_mmio + (reg << 2));
}
#define shadowptr(_a6xx_gpu, _ring) ((_a6xx_gpu)->shadow_iova + \
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 1f5245fc2cdc..0a7717a4fc2f 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -13,15 +13,18 @@
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
+#pragma GCC diagnostic ignored "-Wunused-const-variable"
#include "adreno_gen7_0_0_snapshot.h"
#include "adreno_gen7_2_0_snapshot.h"
+#include "adreno_gen7_9_0_snapshot.h"
#pragma GCC diagnostic pop
struct a6xx_gpu_state_obj {
const void *handle;
u32 *data;
+ u32 count; /* optional, used when count potentially read from hw */
};
struct a6xx_gpu_state {
@@ -192,10 +195,10 @@ static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset,
}
#define cxdbg_write(ptr, offset, val) \
- msm_writel((val), (ptr) + ((offset) << 2))
+ writel((val), (ptr) + ((offset) << 2))
#define cxdbg_read(ptr, offset) \
- msm_readl((ptr) + ((offset) << 2))
+ readl((ptr) + ((offset) << 2))
/* read a value from the CX debug bus */
static int cx_debugbus_read(void __iomem *cxdbg, u32 block, u32 offset,
@@ -384,21 +387,29 @@ static void a7xx_get_debugbus_blocks(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- int debugbus_blocks_count, total_debugbus_blocks;
- const u32 *debugbus_blocks;
+ int debugbus_blocks_count, gbif_debugbus_blocks_count, total_debugbus_blocks;
+ const u32 *debugbus_blocks, *gbif_debugbus_blocks;
int i;
if (adreno_is_a730(adreno_gpu)) {
debugbus_blocks = gen7_0_0_debugbus_blocks;
debugbus_blocks_count = ARRAY_SIZE(gen7_0_0_debugbus_blocks);
- } else {
- BUG_ON(!adreno_is_a740_family(adreno_gpu));
+ gbif_debugbus_blocks = a7xx_gbif_debugbus_blocks;
+ gbif_debugbus_blocks_count = ARRAY_SIZE(a7xx_gbif_debugbus_blocks);
+ } else if (adreno_is_a740_family(adreno_gpu)) {
debugbus_blocks = gen7_2_0_debugbus_blocks;
debugbus_blocks_count = ARRAY_SIZE(gen7_2_0_debugbus_blocks);
+ gbif_debugbus_blocks = a7xx_gbif_debugbus_blocks;
+ gbif_debugbus_blocks_count = ARRAY_SIZE(a7xx_gbif_debugbus_blocks);
+ } else {
+ BUG_ON(!adreno_is_a750(adreno_gpu));
+ debugbus_blocks = gen7_9_0_debugbus_blocks;
+ debugbus_blocks_count = ARRAY_SIZE(gen7_9_0_debugbus_blocks);
+ gbif_debugbus_blocks = gen7_9_0_gbif_debugbus_blocks;
+ gbif_debugbus_blocks_count = ARRAY_SIZE(gen7_9_0_gbif_debugbus_blocks);
}
- total_debugbus_blocks = debugbus_blocks_count +
- ARRAY_SIZE(a7xx_gbif_debugbus_blocks);
+ total_debugbus_blocks = debugbus_blocks_count + gbif_debugbus_blocks_count;
a6xx_state->debugbus = state_kcalloc(a6xx_state, total_debugbus_blocks,
sizeof(*a6xx_state->debugbus));
@@ -410,9 +421,9 @@ static void a7xx_get_debugbus_blocks(struct msm_gpu *gpu,
&a6xx_state->debugbus[i]);
}
- for (i = 0; i < ARRAY_SIZE(a7xx_gbif_debugbus_blocks); i++) {
+ for (i = 0; i < gbif_debugbus_blocks_count; i++) {
a6xx_get_debugbus_block(gpu,
- a6xx_state, &a7xx_gbif_debugbus_blocks[i],
+ a6xx_state, &a7xx_debugbus_blocks[gbif_debugbus_blocks[i]],
&a6xx_state->debugbus[i + debugbus_blocks_count]);
}
}
@@ -813,10 +824,13 @@ static void a7xx_get_clusters(struct msm_gpu *gpu,
if (adreno_is_a730(adreno_gpu)) {
clusters = gen7_0_0_clusters;
clusters_size = ARRAY_SIZE(gen7_0_0_clusters);
- } else {
- BUG_ON(!adreno_is_a740_family(adreno_gpu));
+ } else if (adreno_is_a740_family(adreno_gpu)) {
clusters = gen7_2_0_clusters;
clusters_size = ARRAY_SIZE(gen7_2_0_clusters);
+ } else {
+ BUG_ON(!adreno_is_a750(adreno_gpu));
+ clusters = gen7_9_0_clusters;
+ clusters_size = ARRAY_SIZE(gen7_9_0_clusters);
}
a6xx_state->clusters = state_kcalloc(a6xx_state,
@@ -852,7 +866,7 @@ static void a6xx_get_shader_block(struct msm_gpu *gpu,
(block->type << 8) | i);
in += CRASHDUMP_READ(in, REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE,
- block->size, dumper->iova + A6XX_CD_DATA_OFFSET);
+ block->size, out);
out += block->size * sizeof(u32);
}
@@ -948,10 +962,13 @@ static void a7xx_get_shaders(struct msm_gpu *gpu,
if (adreno_is_a730(adreno_gpu)) {
shader_blocks = gen7_0_0_shader_blocks;
num_shader_blocks = ARRAY_SIZE(gen7_0_0_shader_blocks);
- } else {
- BUG_ON(!adreno_is_a740_family(adreno_gpu));
+ } else if (adreno_is_a740_family(adreno_gpu)) {
shader_blocks = gen7_2_0_shader_blocks;
num_shader_blocks = ARRAY_SIZE(gen7_2_0_shader_blocks);
+ } else {
+ BUG_ON(!adreno_is_a750(adreno_gpu));
+ shader_blocks = gen7_9_0_shader_blocks;
+ num_shader_blocks = ARRAY_SIZE(gen7_9_0_shader_blocks);
}
a6xx_state->shaders = state_kcalloc(a6xx_state,
@@ -1337,10 +1354,13 @@ static void a7xx_get_registers(struct msm_gpu *gpu,
if (adreno_is_a730(adreno_gpu)) {
reglist = gen7_0_0_reg_list;
pre_crashdumper_regs = gen7_0_0_pre_crashdumper_gpu_registers;
- } else {
- BUG_ON(!adreno_is_a740_family(adreno_gpu));
+ } else if (adreno_is_a740_family(adreno_gpu)) {
reglist = gen7_2_0_reg_list;
pre_crashdumper_regs = gen7_0_0_pre_crashdumper_gpu_registers;
+ } else {
+ BUG_ON(!adreno_is_a750(adreno_gpu));
+ reglist = gen7_9_0_reg_list;
+ pre_crashdumper_regs = gen7_9_0_pre_crashdumper_gpu_registers;
}
count = A7XX_PRE_CRASHDUMPER_SIZE + A7XX_POST_CRASHDUMPER_SIZE;
@@ -1388,7 +1408,8 @@ static void a7xx_get_post_crashdumper_registers(struct msm_gpu *gpu,
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
const u32 *regs;
- BUG_ON(!(adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu)));
+ BUG_ON(!(adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu) ||
+ adreno_is_a750(adreno_gpu)));
regs = gen7_0_0_post_crashdumper_registers;
a7xx_get_ahb_gpu_registers(gpu,
@@ -1417,16 +1438,18 @@ static u32 a7xx_get_cp_roq_size(struct msm_gpu *gpu)
/* Read a block of data from an indexed register pair */
static void a6xx_get_indexed_regs(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
- struct a6xx_indexed_registers *indexed,
+ const struct a6xx_indexed_registers *indexed,
struct a6xx_gpu_state_obj *obj)
{
+ u32 count = indexed->count;
int i;
obj->handle = (const void *) indexed;
if (indexed->count_fn)
- indexed->count = indexed->count_fn(gpu);
+ count = indexed->count_fn(gpu);
- obj->data = state_kcalloc(a6xx_state, indexed->count, sizeof(u32));
+ obj->data = state_kcalloc(a6xx_state, count, sizeof(u32));
+ obj->count = count;
if (!obj->data)
return;
@@ -1434,7 +1457,7 @@ static void a6xx_get_indexed_regs(struct msm_gpu *gpu,
gpu_write(gpu, indexed->addr, 0);
/* Read the data - each read increments the internal address by 1 */
- for (i = 0; i < indexed->count; i++)
+ for (i = 0; i < count; i++)
obj->data[i] = gpu_read(gpu, indexed->data);
}
@@ -1491,10 +1514,18 @@ static void a7xx_get_indexed_registers(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const struct a6xx_indexed_registers *indexed_regs;
int i, indexed_count, mempool_count;
- BUG_ON(!(adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu)));
- indexed_count = ARRAY_SIZE(a7xx_indexed_reglist);
+ if (adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu)) {
+ indexed_regs = a7xx_indexed_reglist;
+ indexed_count = ARRAY_SIZE(a7xx_indexed_reglist);
+ } else {
+ BUG_ON(!adreno_is_a750(adreno_gpu));
+ indexed_regs = gen7_9_0_cp_indexed_reg_list;
+ indexed_count = ARRAY_SIZE(gen7_9_0_cp_indexed_reg_list);
+ }
+
mempool_count = ARRAY_SIZE(a7xx_cp_bv_mempool_indexed);
a6xx_state->indexed_regs = state_kcalloc(a6xx_state,
@@ -1507,7 +1538,7 @@ static void a7xx_get_indexed_registers(struct msm_gpu *gpu,
/* First read the common regs */
for (i = 0; i < indexed_count; i++)
- a6xx_get_indexed_regs(gpu, a6xx_state, &a7xx_indexed_reglist[i],
+ a6xx_get_indexed_regs(gpu, a6xx_state, &indexed_regs[i],
&a6xx_state->indexed_regs[i]);
gpu_rmw(gpu, REG_A6XX_CP_CHICKEN_DBG, 0, BIT(2));
@@ -1862,9 +1893,9 @@ static void a6xx_show_indexed_regs(struct a6xx_gpu_state_obj *obj,
return;
print_name(p, " - regs-name: ", indexed->name);
- drm_printf(p, " dwords: %d\n", indexed->count);
+ drm_printf(p, " dwords: %d\n", obj->count);
- print_ascii85(p, indexed->count << 2, obj->data);
+ print_ascii85(p, obj->count << 2, obj->data);
}
static void a6xx_show_debugbus_block(const struct a6xx_debugbus_block *block,
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index 5ddd32063bcc..dd4c28a8d923 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -397,7 +397,7 @@ struct a6xx_indexed_registers {
u32 (*count_fn)(struct msm_gpu *gpu);
};
-static struct a6xx_indexed_registers a6xx_indexed_reglist[] = {
+static const struct a6xx_indexed_registers a6xx_indexed_reglist[] = {
{ "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL },
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
@@ -408,7 +408,7 @@ static struct a6xx_indexed_registers a6xx_indexed_reglist[] = {
REG_A6XX_CP_ROQ_DBG_DATA, 0, a6xx_get_cp_roq_size},
};
-static struct a6xx_indexed_registers a7xx_indexed_reglist[] = {
+static const struct a6xx_indexed_registers a7xx_indexed_reglist[] = {
{ "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL },
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
@@ -433,12 +433,12 @@ static struct a6xx_indexed_registers a7xx_indexed_reglist[] = {
REG_A6XX_CP_ROQ_DBG_DATA, 0, a7xx_get_cp_roq_size },
};
-static struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
+static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
"CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060, NULL,
};
-static struct a6xx_indexed_registers a7xx_cp_bv_mempool_indexed[] = {
+static const struct a6xx_indexed_registers a7xx_cp_bv_mempool_indexed[] = {
{ "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2100, NULL },
{ "CP_BV_MEMPOOL", REG_A7XX_CP_BV_MEM_POOL_DBG_ADDR,
@@ -517,9 +517,9 @@ static const struct a6xx_debugbus_block a650_debugbus_blocks[] = {
DEBUGBUS(A6XX_DBGBUS_SPTP_5, 0x100),
};
-static const struct a6xx_debugbus_block a7xx_gbif_debugbus_blocks[] = {
- DEBUGBUS(A7XX_DBGBUS_GBIF_CX, 0x100),
- DEBUGBUS(A7XX_DBGBUS_GBIF_GX, 0x100),
+static const u32 a7xx_gbif_debugbus_blocks[] = {
+ A7XX_DBGBUS_GBIF_CX,
+ A7XX_DBGBUS_GBIF_GX,
};
static const struct a6xx_debugbus_block a7xx_cx_debugbus_blocks[] = {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
deleted file mode 100644
index fbc27930e550..000000000000
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ /dev/null
@@ -1,539 +0,0 @@
-#ifndef ADRENO_COMMON_XML
-#define ADRENO_COMMON_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
-http://gitlab.freedesktop.org/mesa/mesa/
-git clone https://gitlab.freedesktop.org/mesa/mesa.git
-
-The rules-ng-ng source files this header was generated from are:
-
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023)
-*/
-
-#ifdef __KERNEL__
-#include <linux/bug.h>
-#define assert(x) BUG_ON(!(x))
-#else
-#include <assert.h>
-#endif
-
-#ifdef __cplusplus
-#define __struct_cast(X)
-#else
-#define __struct_cast(X) (struct X)
-#endif
-
-enum chip {
- A2XX = 2,
- A3XX = 3,
- A4XX = 4,
- A5XX = 5,
- A6XX = 6,
- A7XX = 7,
-};
-
-enum adreno_pa_su_sc_draw {
- PC_DRAW_POINTS = 0,
- PC_DRAW_LINES = 1,
- PC_DRAW_TRIANGLES = 2,
-};
-
-enum adreno_compare_func {
- FUNC_NEVER = 0,
- FUNC_LESS = 1,
- FUNC_EQUAL = 2,
- FUNC_LEQUAL = 3,
- FUNC_GREATER = 4,
- FUNC_NOTEQUAL = 5,
- FUNC_GEQUAL = 6,
- FUNC_ALWAYS = 7,
-};
-
-enum adreno_stencil_op {
- STENCIL_KEEP = 0,
- STENCIL_ZERO = 1,
- STENCIL_REPLACE = 2,
- STENCIL_INCR_CLAMP = 3,
- STENCIL_DECR_CLAMP = 4,
- STENCIL_INVERT = 5,
- STENCIL_INCR_WRAP = 6,
- STENCIL_DECR_WRAP = 7,
-};
-
-enum adreno_rb_blend_factor {
- FACTOR_ZERO = 0,
- FACTOR_ONE = 1,
- FACTOR_SRC_COLOR = 4,
- FACTOR_ONE_MINUS_SRC_COLOR = 5,
- FACTOR_SRC_ALPHA = 6,
- FACTOR_ONE_MINUS_SRC_ALPHA = 7,
- FACTOR_DST_COLOR = 8,
- FACTOR_ONE_MINUS_DST_COLOR = 9,
- FACTOR_DST_ALPHA = 10,
- FACTOR_ONE_MINUS_DST_ALPHA = 11,
- FACTOR_CONSTANT_COLOR = 12,
- FACTOR_ONE_MINUS_CONSTANT_COLOR = 13,
- FACTOR_CONSTANT_ALPHA = 14,
- FACTOR_ONE_MINUS_CONSTANT_ALPHA = 15,
- FACTOR_SRC_ALPHA_SATURATE = 16,
- FACTOR_SRC1_COLOR = 20,
- FACTOR_ONE_MINUS_SRC1_COLOR = 21,
- FACTOR_SRC1_ALPHA = 22,
- FACTOR_ONE_MINUS_SRC1_ALPHA = 23,
-};
-
-enum adreno_rb_surface_endian {
- ENDIAN_NONE = 0,
- ENDIAN_8IN16 = 1,
- ENDIAN_8IN32 = 2,
- ENDIAN_16IN32 = 3,
- ENDIAN_8IN64 = 4,
- ENDIAN_8IN128 = 5,
-};
-
-enum adreno_rb_dither_mode {
- DITHER_DISABLE = 0,
- DITHER_ALWAYS = 1,
- DITHER_IF_ALPHA_OFF = 2,
-};
-
-enum adreno_rb_depth_format {
- DEPTHX_16 = 0,
- DEPTHX_24_8 = 1,
- DEPTHX_32 = 2,
-};
-
-enum adreno_rb_copy_control_mode {
- RB_COPY_RESOLVE = 1,
- RB_COPY_CLEAR = 2,
- RB_COPY_DEPTH_STENCIL = 5,
-};
-
-enum a3xx_rop_code {
- ROP_CLEAR = 0,
- ROP_NOR = 1,
- ROP_AND_INVERTED = 2,
- ROP_COPY_INVERTED = 3,
- ROP_AND_REVERSE = 4,
- ROP_INVERT = 5,
- ROP_XOR = 6,
- ROP_NAND = 7,
- ROP_AND = 8,
- ROP_EQUIV = 9,
- ROP_NOOP = 10,
- ROP_OR_INVERTED = 11,
- ROP_COPY = 12,
- ROP_OR_REVERSE = 13,
- ROP_OR = 14,
- ROP_SET = 15,
-};
-
-enum a3xx_render_mode {
- RB_RENDERING_PASS = 0,
- RB_TILING_PASS = 1,
- RB_RESOLVE_PASS = 2,
- RB_COMPUTE_PASS = 3,
-};
-
-enum a3xx_msaa_samples {
- MSAA_ONE = 0,
- MSAA_TWO = 1,
- MSAA_FOUR = 2,
- MSAA_EIGHT = 3,
-};
-
-enum a3xx_threadmode {
- MULTI = 0,
- SINGLE = 1,
-};
-
-enum a3xx_instrbuffermode {
- CACHE = 0,
- BUFFER = 1,
-};
-
-enum a3xx_threadsize {
- TWO_QUADS = 0,
- FOUR_QUADS = 1,
-};
-
-enum a3xx_color_swap {
- WZYX = 0,
- WXYZ = 1,
- ZYXW = 2,
- XYZW = 3,
-};
-
-enum a3xx_rb_blend_opcode {
- BLEND_DST_PLUS_SRC = 0,
- BLEND_SRC_MINUS_DST = 1,
- BLEND_DST_MINUS_SRC = 2,
- BLEND_MIN_DST_SRC = 3,
- BLEND_MAX_DST_SRC = 4,
-};
-
-enum a4xx_tess_spacing {
- EQUAL_SPACING = 0,
- ODD_SPACING = 2,
- EVEN_SPACING = 3,
-};
-
-enum a5xx_address_mode {
- ADDR_32B = 0,
- ADDR_64B = 1,
-};
-
-enum a5xx_line_mode {
- BRESENHAM = 0,
- RECTANGULAR = 1,
-};
-
-enum a6xx_tex_prefetch_cmd {
- TEX_PREFETCH_UNK0 = 0,
- TEX_PREFETCH_SAM = 1,
- TEX_PREFETCH_GATHER4R = 2,
- TEX_PREFETCH_GATHER4G = 3,
- TEX_PREFETCH_GATHER4B = 4,
- TEX_PREFETCH_GATHER4A = 5,
- TEX_PREFETCH_UNK6 = 6,
- TEX_PREFETCH_UNK7 = 7,
-};
-
-#define REG_AXXX_CP_RB_BASE 0x000001c0
-
-#define REG_AXXX_CP_RB_CNTL 0x000001c1
-#define AXXX_CP_RB_CNTL_BUFSZ__MASK 0x0000003f
-#define AXXX_CP_RB_CNTL_BUFSZ__SHIFT 0
-static inline uint32_t AXXX_CP_RB_CNTL_BUFSZ(uint32_t val)
-{
- return ((val) << AXXX_CP_RB_CNTL_BUFSZ__SHIFT) & AXXX_CP_RB_CNTL_BUFSZ__MASK;
-}
-#define AXXX_CP_RB_CNTL_BLKSZ__MASK 0x00003f00
-#define AXXX_CP_RB_CNTL_BLKSZ__SHIFT 8
-static inline uint32_t AXXX_CP_RB_CNTL_BLKSZ(uint32_t val)
-{
- return ((val) << AXXX_CP_RB_CNTL_BLKSZ__SHIFT) & AXXX_CP_RB_CNTL_BLKSZ__MASK;
-}
-#define AXXX_CP_RB_CNTL_BUF_SWAP__MASK 0x00030000
-#define AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT 16
-static inline uint32_t AXXX_CP_RB_CNTL_BUF_SWAP(uint32_t val)
-{
- return ((val) << AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT) & AXXX_CP_RB_CNTL_BUF_SWAP__MASK;
-}
-#define AXXX_CP_RB_CNTL_POLL_EN 0x00100000
-#define AXXX_CP_RB_CNTL_NO_UPDATE 0x08000000
-#define AXXX_CP_RB_CNTL_RPTR_WR_EN 0x80000000
-
-#define REG_AXXX_CP_RB_RPTR_ADDR 0x000001c3
-#define AXXX_CP_RB_RPTR_ADDR_SWAP__MASK 0x00000003
-#define AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT 0
-static inline uint32_t AXXX_CP_RB_RPTR_ADDR_SWAP(uint32_t val)
-{
- return ((val) << AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT) & AXXX_CP_RB_RPTR_ADDR_SWAP__MASK;
-}
-#define AXXX_CP_RB_RPTR_ADDR_ADDR__MASK 0xfffffffc
-#define AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT 2
-static inline uint32_t AXXX_CP_RB_RPTR_ADDR_ADDR(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT) & AXXX_CP_RB_RPTR_ADDR_ADDR__MASK;
-}
-
-#define REG_AXXX_CP_RB_RPTR 0x000001c4
-
-#define REG_AXXX_CP_RB_WPTR 0x000001c5
-
-#define REG_AXXX_CP_RB_WPTR_DELAY 0x000001c6
-
-#define REG_AXXX_CP_RB_RPTR_WR 0x000001c7
-
-#define REG_AXXX_CP_RB_WPTR_BASE 0x000001c8
-
-#define REG_AXXX_CP_QUEUE_THRESHOLDS 0x000001d5
-#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK 0x0000000f
-#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT 0
-static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(uint32_t val)
-{
- return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK;
-}
-#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK 0x00000f00
-#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT 8
-static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(uint32_t val)
-{
- return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK;
-}
-#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK 0x000f0000
-#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT 16
-static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(uint32_t val)
-{
- return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK;
-}
-
-#define REG_AXXX_CP_MEQ_THRESHOLDS 0x000001d6
-#define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK 0x001f0000
-#define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT 16
-static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_MEQ_END(uint32_t val)
-{
- return ((val) << AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK;
-}
-#define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK 0x1f000000
-#define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT 24
-static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_ROQ_END(uint32_t val)
-{
- return ((val) << AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK;
-}
-
-#define REG_AXXX_CP_CSQ_AVAIL 0x000001d7
-#define AXXX_CP_CSQ_AVAIL_RING__MASK 0x0000007f
-#define AXXX_CP_CSQ_AVAIL_RING__SHIFT 0
-static inline uint32_t AXXX_CP_CSQ_AVAIL_RING(uint32_t val)
-{
- return ((val) << AXXX_CP_CSQ_AVAIL_RING__SHIFT) & AXXX_CP_CSQ_AVAIL_RING__MASK;
-}
-#define AXXX_CP_CSQ_AVAIL_IB1__MASK 0x00007f00
-#define AXXX_CP_CSQ_AVAIL_IB1__SHIFT 8
-static inline uint32_t AXXX_CP_CSQ_AVAIL_IB1(uint32_t val)
-{
- return ((val) << AXXX_CP_CSQ_AVAIL_IB1__SHIFT) & AXXX_CP_CSQ_AVAIL_IB1__MASK;
-}
-#define AXXX_CP_CSQ_AVAIL_IB2__MASK 0x007f0000
-#define AXXX_CP_CSQ_AVAIL_IB2__SHIFT 16
-static inline uint32_t AXXX_CP_CSQ_AVAIL_IB2(uint32_t val)
-{
- return ((val) << AXXX_CP_CSQ_AVAIL_IB2__SHIFT) & AXXX_CP_CSQ_AVAIL_IB2__MASK;
-}
-
-#define REG_AXXX_CP_STQ_AVAIL 0x000001d8
-#define AXXX_CP_STQ_AVAIL_ST__MASK 0x0000007f
-#define AXXX_CP_STQ_AVAIL_ST__SHIFT 0
-static inline uint32_t AXXX_CP_STQ_AVAIL_ST(uint32_t val)
-{
- return ((val) << AXXX_CP_STQ_AVAIL_ST__SHIFT) & AXXX_CP_STQ_AVAIL_ST__MASK;
-}
-
-#define REG_AXXX_CP_MEQ_AVAIL 0x000001d9
-#define AXXX_CP_MEQ_AVAIL_MEQ__MASK 0x0000001f
-#define AXXX_CP_MEQ_AVAIL_MEQ__SHIFT 0
-static inline uint32_t AXXX_CP_MEQ_AVAIL_MEQ(uint32_t val)
-{
- return ((val) << AXXX_CP_MEQ_AVAIL_MEQ__SHIFT) & AXXX_CP_MEQ_AVAIL_MEQ__MASK;
-}
-
-#define REG_AXXX_SCRATCH_UMSK 0x000001dc
-#define AXXX_SCRATCH_UMSK_UMSK__MASK 0x000000ff
-#define AXXX_SCRATCH_UMSK_UMSK__SHIFT 0
-static inline uint32_t AXXX_SCRATCH_UMSK_UMSK(uint32_t val)
-{
- return ((val) << AXXX_SCRATCH_UMSK_UMSK__SHIFT) & AXXX_SCRATCH_UMSK_UMSK__MASK;
-}
-#define AXXX_SCRATCH_UMSK_SWAP__MASK 0x00030000
-#define AXXX_SCRATCH_UMSK_SWAP__SHIFT 16
-static inline uint32_t AXXX_SCRATCH_UMSK_SWAP(uint32_t val)
-{
- return ((val) << AXXX_SCRATCH_UMSK_SWAP__SHIFT) & AXXX_SCRATCH_UMSK_SWAP__MASK;
-}
-
-#define REG_AXXX_SCRATCH_ADDR 0x000001dd
-
-#define REG_AXXX_CP_ME_RDADDR 0x000001ea
-
-#define REG_AXXX_CP_STATE_DEBUG_INDEX 0x000001ec
-
-#define REG_AXXX_CP_STATE_DEBUG_DATA 0x000001ed
-
-#define REG_AXXX_CP_INT_CNTL 0x000001f2
-#define AXXX_CP_INT_CNTL_SW_INT_MASK 0x00080000
-#define AXXX_CP_INT_CNTL_T0_PACKET_IN_IB_MASK 0x00800000
-#define AXXX_CP_INT_CNTL_OPCODE_ERROR_MASK 0x01000000
-#define AXXX_CP_INT_CNTL_PROTECTED_MODE_ERROR_MASK 0x02000000
-#define AXXX_CP_INT_CNTL_RESERVED_BIT_ERROR_MASK 0x04000000
-#define AXXX_CP_INT_CNTL_IB_ERROR_MASK 0x08000000
-#define AXXX_CP_INT_CNTL_IB2_INT_MASK 0x20000000
-#define AXXX_CP_INT_CNTL_IB1_INT_MASK 0x40000000
-#define AXXX_CP_INT_CNTL_RB_INT_MASK 0x80000000
-
-#define REG_AXXX_CP_INT_STATUS 0x000001f3
-
-#define REG_AXXX_CP_INT_ACK 0x000001f4
-
-#define REG_AXXX_CP_ME_CNTL 0x000001f6
-#define AXXX_CP_ME_CNTL_BUSY 0x20000000
-#define AXXX_CP_ME_CNTL_HALT 0x10000000
-
-#define REG_AXXX_CP_ME_STATUS 0x000001f7
-
-#define REG_AXXX_CP_ME_RAM_WADDR 0x000001f8
-
-#define REG_AXXX_CP_ME_RAM_RADDR 0x000001f9
-
-#define REG_AXXX_CP_ME_RAM_DATA 0x000001fa
-
-#define REG_AXXX_CP_DEBUG 0x000001fc
-#define AXXX_CP_DEBUG_PREDICATE_DISABLE 0x00800000
-#define AXXX_CP_DEBUG_PROG_END_PTR_ENABLE 0x01000000
-#define AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE 0x02000000
-#define AXXX_CP_DEBUG_PREFETCH_PASS_NOPS 0x04000000
-#define AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE 0x08000000
-#define AXXX_CP_DEBUG_PREFETCH_MATCH_DISABLE 0x10000000
-#define AXXX_CP_DEBUG_SIMPLE_ME_FLOW_CONTROL 0x40000000
-#define AXXX_CP_DEBUG_MIU_WRITE_PACK_DISABLE 0x80000000
-
-#define REG_AXXX_CP_CSQ_RB_STAT 0x000001fd
-#define AXXX_CP_CSQ_RB_STAT_RPTR__MASK 0x0000007f
-#define AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT 0
-static inline uint32_t AXXX_CP_CSQ_RB_STAT_RPTR(uint32_t val)
-{
- return ((val) << AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_RPTR__MASK;
-}
-#define AXXX_CP_CSQ_RB_STAT_WPTR__MASK 0x007f0000
-#define AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT 16
-static inline uint32_t AXXX_CP_CSQ_RB_STAT_WPTR(uint32_t val)
-{
- return ((val) << AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_WPTR__MASK;
-}
-
-#define REG_AXXX_CP_CSQ_IB1_STAT 0x000001fe
-#define AXXX_CP_CSQ_IB1_STAT_RPTR__MASK 0x0000007f
-#define AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT 0
-static inline uint32_t AXXX_CP_CSQ_IB1_STAT_RPTR(uint32_t val)
-{
- return ((val) << AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_RPTR__MASK;
-}
-#define AXXX_CP_CSQ_IB1_STAT_WPTR__MASK 0x007f0000
-#define AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT 16
-static inline uint32_t AXXX_CP_CSQ_IB1_STAT_WPTR(uint32_t val)
-{
- return ((val) << AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_WPTR__MASK;
-}
-
-#define REG_AXXX_CP_CSQ_IB2_STAT 0x000001ff
-#define AXXX_CP_CSQ_IB2_STAT_RPTR__MASK 0x0000007f
-#define AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT 0
-static inline uint32_t AXXX_CP_CSQ_IB2_STAT_RPTR(uint32_t val)
-{
- return ((val) << AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_RPTR__MASK;
-}
-#define AXXX_CP_CSQ_IB2_STAT_WPTR__MASK 0x007f0000
-#define AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT 16
-static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
-{
- return ((val) << AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_WPTR__MASK;
-}
-
-#define REG_AXXX_CP_NON_PREFETCH_CNTRS 0x00000440
-
-#define REG_AXXX_CP_STQ_ST_STAT 0x00000443
-
-#define REG_AXXX_CP_ST_BASE 0x0000044d
-
-#define REG_AXXX_CP_ST_BUFSZ 0x0000044e
-
-#define REG_AXXX_CP_MEQ_STAT 0x0000044f
-
-#define REG_AXXX_CP_MIU_TAG_STAT 0x00000452
-
-#define REG_AXXX_CP_BIN_MASK_LO 0x00000454
-
-#define REG_AXXX_CP_BIN_MASK_HI 0x00000455
-
-#define REG_AXXX_CP_BIN_SELECT_LO 0x00000456
-
-#define REG_AXXX_CP_BIN_SELECT_HI 0x00000457
-
-#define REG_AXXX_CP_IB1_BASE 0x00000458
-
-#define REG_AXXX_CP_IB1_BUFSZ 0x00000459
-
-#define REG_AXXX_CP_IB2_BASE 0x0000045a
-
-#define REG_AXXX_CP_IB2_BUFSZ 0x0000045b
-
-#define REG_AXXX_CP_STAT 0x0000047f
-#define AXXX_CP_STAT_CP_BUSY 0x80000000
-#define AXXX_CP_STAT_VS_EVENT_FIFO_BUSY 0x40000000
-#define AXXX_CP_STAT_PS_EVENT_FIFO_BUSY 0x20000000
-#define AXXX_CP_STAT_CF_EVENT_FIFO_BUSY 0x10000000
-#define AXXX_CP_STAT_RB_EVENT_FIFO_BUSY 0x08000000
-#define AXXX_CP_STAT_ME_BUSY 0x04000000
-#define AXXX_CP_STAT_MIU_WR_C_BUSY 0x02000000
-#define AXXX_CP_STAT_CP_3D_BUSY 0x00800000
-#define AXXX_CP_STAT_CP_NRT_BUSY 0x00400000
-#define AXXX_CP_STAT_RBIU_SCRATCH_BUSY 0x00200000
-#define AXXX_CP_STAT_RCIU_ME_BUSY 0x00100000
-#define AXXX_CP_STAT_RCIU_PFP_BUSY 0x00080000
-#define AXXX_CP_STAT_MEQ_RING_BUSY 0x00040000
-#define AXXX_CP_STAT_PFP_BUSY 0x00020000
-#define AXXX_CP_STAT_ST_QUEUE_BUSY 0x00010000
-#define AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY 0x00002000
-#define AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY 0x00001000
-#define AXXX_CP_STAT_RING_QUEUE_BUSY 0x00000800
-#define AXXX_CP_STAT_CSF_BUSY 0x00000400
-#define AXXX_CP_STAT_CSF_ST_BUSY 0x00000200
-#define AXXX_CP_STAT_EVENT_BUSY 0x00000100
-#define AXXX_CP_STAT_CSF_INDIRECT2_BUSY 0x00000080
-#define AXXX_CP_STAT_CSF_INDIRECTS_BUSY 0x00000040
-#define AXXX_CP_STAT_CSF_RING_BUSY 0x00000020
-#define AXXX_CP_STAT_RCIU_BUSY 0x00000010
-#define AXXX_CP_STAT_RBIU_BUSY 0x00000008
-#define AXXX_CP_STAT_MIU_RD_RETURN_BUSY 0x00000004
-#define AXXX_CP_STAT_MIU_RD_REQ_BUSY 0x00000002
-#define AXXX_CP_STAT_MIU_WR_BUSY 0x00000001
-
-#define REG_AXXX_CP_SCRATCH_REG0 0x00000578
-
-#define REG_AXXX_CP_SCRATCH_REG1 0x00000579
-
-#define REG_AXXX_CP_SCRATCH_REG2 0x0000057a
-
-#define REG_AXXX_CP_SCRATCH_REG3 0x0000057b
-
-#define REG_AXXX_CP_SCRATCH_REG4 0x0000057c
-
-#define REG_AXXX_CP_SCRATCH_REG5 0x0000057d
-
-#define REG_AXXX_CP_SCRATCH_REG6 0x0000057e
-
-#define REG_AXXX_CP_SCRATCH_REG7 0x0000057f
-
-#define REG_AXXX_CP_ME_VS_EVENT_SRC 0x00000600
-
-#define REG_AXXX_CP_ME_VS_EVENT_ADDR 0x00000601
-
-#define REG_AXXX_CP_ME_VS_EVENT_DATA 0x00000602
-
-#define REG_AXXX_CP_ME_VS_EVENT_ADDR_SWM 0x00000603
-
-#define REG_AXXX_CP_ME_VS_EVENT_DATA_SWM 0x00000604
-
-#define REG_AXXX_CP_ME_PS_EVENT_SRC 0x00000605
-
-#define REG_AXXX_CP_ME_PS_EVENT_ADDR 0x00000606
-
-#define REG_AXXX_CP_ME_PS_EVENT_DATA 0x00000607
-
-#define REG_AXXX_CP_ME_PS_EVENT_ADDR_SWM 0x00000608
-
-#define REG_AXXX_CP_ME_PS_EVENT_DATA_SWM 0x00000609
-
-#define REG_AXXX_CP_ME_CF_EVENT_SRC 0x0000060a
-
-#define REG_AXXX_CP_ME_CF_EVENT_ADDR 0x0000060b
-
-#define REG_AXXX_CP_ME_CF_EVENT_DATA 0x0000060c
-
-#define REG_AXXX_CP_ME_NRT_ADDR 0x0000060d
-
-#define REG_AXXX_CP_ME_NRT_DATA 0x0000060e
-
-#define REG_AXXX_CP_ME_VS_FETCH_DONE_SRC 0x00000612
-
-#define REG_AXXX_CP_ME_VS_FETCH_DONE_ADDR 0x00000613
-
-#define REG_AXXX_CP_ME_VS_FETCH_DONE_DATA 0x00000614
-
-#ifdef __cplusplus
-#endif
-
-#endif /* ADRENO_COMMON_XML */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
new file mode 100644
index 000000000000..260d66eccfec
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
@@ -0,0 +1,1446 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef __ADRENO_GEN7_9_0_SNAPSHOT_H
+#define __ADRENO_GEN7_9_0_SNAPSHOT_H
+
+#include "a6xx_gpu_state.h"
+
+static const u32 gen7_9_0_debugbus_blocks[] = {
+ A7XX_DBGBUS_CP_0_0,
+ A7XX_DBGBUS_CP_0_1,
+ A7XX_DBGBUS_RBBM,
+ A7XX_DBGBUS_HLSQ,
+ A7XX_DBGBUS_UCHE_0,
+ A7XX_DBGBUS_UCHE_1,
+ A7XX_DBGBUS_TESS_BR,
+ A7XX_DBGBUS_TESS_BV,
+ A7XX_DBGBUS_PC_BR,
+ A7XX_DBGBUS_PC_BV,
+ A7XX_DBGBUS_VFDP_BR,
+ A7XX_DBGBUS_VFDP_BV,
+ A7XX_DBGBUS_VPC_BR,
+ A7XX_DBGBUS_VPC_BV,
+ A7XX_DBGBUS_TSE_BR,
+ A7XX_DBGBUS_TSE_BV,
+ A7XX_DBGBUS_RAS_BR,
+ A7XX_DBGBUS_RAS_BV,
+ A7XX_DBGBUS_VSC,
+ A7XX_DBGBUS_COM_0,
+ A7XX_DBGBUS_LRZ_BR,
+ A7XX_DBGBUS_LRZ_BV,
+ A7XX_DBGBUS_UFC_0,
+ A7XX_DBGBUS_UFC_1,
+ A7XX_DBGBUS_GMU_GX,
+ A7XX_DBGBUS_DBGC,
+ A7XX_DBGBUS_GPC_BR,
+ A7XX_DBGBUS_GPC_BV,
+ A7XX_DBGBUS_LARC,
+ A7XX_DBGBUS_HLSQ_SPTP,
+ A7XX_DBGBUS_RB_0,
+ A7XX_DBGBUS_RB_1,
+ A7XX_DBGBUS_RB_2,
+ A7XX_DBGBUS_RB_3,
+ A7XX_DBGBUS_RB_4,
+ A7XX_DBGBUS_RB_5,
+ A7XX_DBGBUS_UCHE_WRAPPER,
+ A7XX_DBGBUS_CCU_0,
+ A7XX_DBGBUS_CCU_1,
+ A7XX_DBGBUS_CCU_2,
+ A7XX_DBGBUS_CCU_3,
+ A7XX_DBGBUS_CCU_4,
+ A7XX_DBGBUS_CCU_5,
+ A7XX_DBGBUS_VFD_BR_0,
+ A7XX_DBGBUS_VFD_BR_1,
+ A7XX_DBGBUS_VFD_BR_2,
+ A7XX_DBGBUS_VFD_BV_0,
+ A7XX_DBGBUS_VFD_BV_1,
+ A7XX_DBGBUS_VFD_BV_2,
+ A7XX_DBGBUS_USP_0,
+ A7XX_DBGBUS_USP_1,
+ A7XX_DBGBUS_USP_2,
+ A7XX_DBGBUS_USP_3,
+ A7XX_DBGBUS_USP_4,
+ A7XX_DBGBUS_USP_5,
+ A7XX_DBGBUS_TP_0,
+ A7XX_DBGBUS_TP_1,
+ A7XX_DBGBUS_TP_2,
+ A7XX_DBGBUS_TP_3,
+ A7XX_DBGBUS_TP_4,
+ A7XX_DBGBUS_TP_5,
+ A7XX_DBGBUS_TP_6,
+ A7XX_DBGBUS_TP_7,
+ A7XX_DBGBUS_TP_8,
+ A7XX_DBGBUS_TP_9,
+ A7XX_DBGBUS_TP_10,
+ A7XX_DBGBUS_TP_11,
+ A7XX_DBGBUS_USPTP_0,
+ A7XX_DBGBUS_USPTP_1,
+ A7XX_DBGBUS_USPTP_2,
+ A7XX_DBGBUS_USPTP_3,
+ A7XX_DBGBUS_USPTP_4,
+ A7XX_DBGBUS_USPTP_5,
+ A7XX_DBGBUS_USPTP_6,
+ A7XX_DBGBUS_USPTP_7,
+ A7XX_DBGBUS_USPTP_8,
+ A7XX_DBGBUS_USPTP_9,
+ A7XX_DBGBUS_USPTP_10,
+ A7XX_DBGBUS_USPTP_11,
+ A7XX_DBGBUS_CCHE_0,
+ A7XX_DBGBUS_CCHE_1,
+ A7XX_DBGBUS_CCHE_2,
+ A7XX_DBGBUS_VPC_DSTR_0,
+ A7XX_DBGBUS_VPC_DSTR_1,
+ A7XX_DBGBUS_VPC_DSTR_2,
+ A7XX_DBGBUS_HLSQ_DP_STR_0,
+ A7XX_DBGBUS_HLSQ_DP_STR_1,
+ A7XX_DBGBUS_HLSQ_DP_STR_2,
+ A7XX_DBGBUS_HLSQ_DP_STR_3,
+ A7XX_DBGBUS_HLSQ_DP_STR_4,
+ A7XX_DBGBUS_HLSQ_DP_STR_5,
+ A7XX_DBGBUS_UFC_DSTR_0,
+ A7XX_DBGBUS_UFC_DSTR_1,
+ A7XX_DBGBUS_UFC_DSTR_2,
+ A7XX_DBGBUS_CGC_SUBCORE,
+ A7XX_DBGBUS_CGC_CORE,
+};
+
+static const u32 gen7_9_0_gbif_debugbus_blocks[] = {
+ A7XX_DBGBUS_GBIF_GX,
+};
+
+static const u32 gen7_9_0_cx_debugbus_blocks[] = {
+ A7XX_DBGBUS_CX,
+ A7XX_DBGBUS_GMU_CX,
+ A7XX_DBGBUS_GBIF_CX,
+};
+
+static struct gen7_shader_block gen7_9_0_shader_blocks[] = {
+ { A7XX_TP0_TMO_DATA, 0x0200, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_TP0_SMO_DATA, 0x0080, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_TP0_MIPMAP_BASE_DATA, 0x03C0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_INST_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_INST_DATA_1, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_0_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_1_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_2_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_3_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_4_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_5_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_6_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_7_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_CB_RAM, 0x0390, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_13_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_14_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_INST_TAG, 0x00C0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_INST_DATA_2, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_TMO_TAG, 0x0080, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_SMO_TAG, 0x0080, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_STATE_DATA, 0x0040, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_HWAVE_RAM, 0x0100, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_L0_INST_BUF, 0x0050, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_8_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_9_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_10_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_11_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_12_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
+ { A7XX_HLSQ_DATAPATH_DSTR_META, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_DATAPATH_DSTR_META, 0x0010, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_L2STC_TAG_RAM, 0x0200, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_L2STC_INFO_CMD, 0x0474, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x0080, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x0080, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG, 0x0080, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x0400, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x0400, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM, 0x0400, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CHUNK_CVS_RAM, 0x01C0, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CHUNK_CVS_RAM, 0x01C0, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CHUNK_CPS_RAM, 0x0300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CHUNK_CPS_RAM, 0x0180, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x0010, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x0010, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CVS_MISC_RAM, 0x0540, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CVS_MISC_RAM, 0x0540, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CPS_MISC_RAM, 0x0640, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CPS_MISC_RAM, 0x00B0, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CPS_MISC_RAM_1, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INST_RAM, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INST_RAM, 0x0800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INST_RAM, 0x0200, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x0800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x0800, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x0050, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x0050, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x0050, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x0008, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INST_RAM_TAG, 0x0014, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INST_RAM_TAG, 0x0010, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INST_RAM_TAG, 0x0004, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x0020, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_LOCAL_MISC_RAM, 0x03C0, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_LOCAL_MISC_RAM, 0x0280, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_LOCAL_MISC_RAM, 0x0050, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG, 0x0008, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INST_RAM_1, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_STPROC_META, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_BV_BE_META, 0x0018, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_BV_BE_META, 0x0018, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INST_RAM_2, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_DATAPATH_META, 0x0020, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_FRONTEND_META, 0x0080, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_FRONTEND_META, 0x0080, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_FRONTEND_META, 0x0080, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INDIRECT_META, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_BACKEND_META, 0x0040, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_BACKEND_META, 0x0040, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_BACKEND_META, 0x0040, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
+};
+
+/*
+ * Block : ['PRE_CRASHDUMPER', 'GBIF']
+ * pairs : 2 (Regs:5), 5 (Regs:38)
+ */
+static const u32 gen7_9_0_pre_crashdumper_gpu_registers[] = {
+ 0x00210, 0x00213, 0x00536, 0x00536, 0x03c00, 0x03c0b, 0x03c40, 0x03c42,
+ 0x03c45, 0x03c47, 0x03c49, 0x03c4a, 0x03cc0, 0x03cd1,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_pre_crashdumper_gpu_registers), 8));
+
+/*
+ * Block : ['BROADCAST', 'CP', 'GRAS', 'GXCLKCTL']
+ * Block : ['PC', 'RBBM', 'RDVM', 'UCHE']
+ * Block : ['VFD', 'VPC', 'VSC']
+ * Pipeline: A7XX_PIPE_NONE
+ * pairs : 196 (Regs:1778)
+ */
+static const u32 gen7_9_0_gpu_registers[] = {
+ 0x00000, 0x00000, 0x00002, 0x00002, 0x00011, 0x00012, 0x00016, 0x0001b,
+ 0x0001f, 0x00032, 0x00038, 0x0003c, 0x00044, 0x00044, 0x00047, 0x00047,
+ 0x00049, 0x0004a, 0x0004c, 0x0004c, 0x00056, 0x00056, 0x00073, 0x0007d,
+ 0x00090, 0x000a8, 0x000ad, 0x000ad, 0x00117, 0x00117, 0x00120, 0x00122,
+ 0x00130, 0x0013f, 0x00142, 0x0015f, 0x00162, 0x00164, 0x00166, 0x00171,
+ 0x00173, 0x00174, 0x00176, 0x0017b, 0x0017e, 0x00180, 0x00183, 0x00192,
+ 0x00195, 0x00196, 0x00199, 0x0019a, 0x0019d, 0x001a2, 0x001aa, 0x001ae,
+ 0x001b9, 0x001b9, 0x001bb, 0x001bb, 0x001be, 0x001be, 0x001c1, 0x001c2,
+ 0x001c5, 0x001c5, 0x001c7, 0x001c7, 0x001c9, 0x001c9, 0x001cb, 0x001ce,
+ 0x001d1, 0x001df, 0x001e1, 0x001e3, 0x001e5, 0x001e5, 0x001e7, 0x001e9,
+ 0x00200, 0x0020d, 0x00215, 0x00253, 0x00260, 0x00260, 0x00264, 0x00270,
+ 0x00272, 0x00274, 0x00281, 0x00281, 0x00283, 0x00283, 0x00289, 0x0028d,
+ 0x00290, 0x002a2, 0x002c0, 0x002c1, 0x00300, 0x00401, 0x00410, 0x00451,
+ 0x00460, 0x004a3, 0x004c0, 0x004d1, 0x00500, 0x00500, 0x00507, 0x0050b,
+ 0x0050f, 0x0050f, 0x00511, 0x00511, 0x00533, 0x00535, 0x00540, 0x0055b,
+ 0x00564, 0x00567, 0x00574, 0x00577, 0x00584, 0x0059b, 0x005fb, 0x005ff,
+ 0x00800, 0x00808, 0x00810, 0x00813, 0x00820, 0x00821, 0x00823, 0x00827,
+ 0x00830, 0x00834, 0x0083f, 0x00841, 0x00843, 0x00847, 0x0084f, 0x00886,
+ 0x008a0, 0x008ab, 0x008c0, 0x008c0, 0x008c4, 0x008c4, 0x008c6, 0x008c6,
+ 0x008d0, 0x008dd, 0x008e0, 0x008e6, 0x008f0, 0x008f3, 0x00900, 0x00903,
+ 0x00908, 0x00911, 0x00928, 0x0093e, 0x00942, 0x0094d, 0x00980, 0x00984,
+ 0x0098d, 0x0098f, 0x009b0, 0x009b4, 0x009c2, 0x009c9, 0x009ce, 0x009d7,
+ 0x009e0, 0x009e7, 0x00a00, 0x00a00, 0x00a02, 0x00a03, 0x00a10, 0x00a4f,
+ 0x00a61, 0x00a9f, 0x00ad0, 0x00adb, 0x00b00, 0x00b31, 0x00b35, 0x00b3c,
+ 0x00b40, 0x00b40, 0x00b70, 0x00b73, 0x00b78, 0x00b79, 0x00b7c, 0x00b7d,
+ 0x00b80, 0x00b81, 0x00b84, 0x00b85, 0x00b88, 0x00b89, 0x00b8c, 0x00b8d,
+ 0x00b90, 0x00b93, 0x00b98, 0x00b99, 0x00b9c, 0x00b9d, 0x00ba0, 0x00ba1,
+ 0x00ba4, 0x00ba5, 0x00ba8, 0x00ba9, 0x00bac, 0x00bad, 0x00bb0, 0x00bb1,
+ 0x00bb4, 0x00bb5, 0x00bb8, 0x00bb9, 0x00bbc, 0x00bbd, 0x00bc0, 0x00bc1,
+ 0x00c00, 0x00c00, 0x00c02, 0x00c04, 0x00c06, 0x00c06, 0x00c10, 0x00cd9,
+ 0x00ce0, 0x00d0c, 0x00df0, 0x00df4, 0x00e01, 0x00e02, 0x00e07, 0x00e0e,
+ 0x00e10, 0x00e13, 0x00e17, 0x00e19, 0x00e1c, 0x00e2b, 0x00e30, 0x00e32,
+ 0x00e3a, 0x00e3d, 0x00e50, 0x00e5b, 0x02840, 0x0287f, 0x0ec00, 0x0ec01,
+ 0x0ec05, 0x0ec05, 0x0ec07, 0x0ec07, 0x0ec0a, 0x0ec0a, 0x0ec12, 0x0ec12,
+ 0x0ec26, 0x0ec28, 0x0ec2b, 0x0ec2d, 0x0ec2f, 0x0ec2f, 0x0ec40, 0x0ec41,
+ 0x0ec45, 0x0ec45, 0x0ec47, 0x0ec47, 0x0ec4a, 0x0ec4a, 0x0ec52, 0x0ec52,
+ 0x0ec66, 0x0ec68, 0x0ec6b, 0x0ec6d, 0x0ec6f, 0x0ec6f, 0x0ec80, 0x0ec81,
+ 0x0ec85, 0x0ec85, 0x0ec87, 0x0ec87, 0x0ec8a, 0x0ec8a, 0x0ec92, 0x0ec92,
+ 0x0eca6, 0x0eca8, 0x0ecab, 0x0ecad, 0x0ecaf, 0x0ecaf, 0x0ecc0, 0x0ecc1,
+ 0x0ecc5, 0x0ecc5, 0x0ecc7, 0x0ecc7, 0x0ecca, 0x0ecca, 0x0ecd2, 0x0ecd2,
+ 0x0ece6, 0x0ece8, 0x0eceb, 0x0eced, 0x0ecef, 0x0ecef, 0x0ed00, 0x0ed01,
+ 0x0ed05, 0x0ed05, 0x0ed07, 0x0ed07, 0x0ed0a, 0x0ed0a, 0x0ed12, 0x0ed12,
+ 0x0ed26, 0x0ed28, 0x0ed2b, 0x0ed2d, 0x0ed2f, 0x0ed2f, 0x0ed40, 0x0ed41,
+ 0x0ed45, 0x0ed45, 0x0ed47, 0x0ed47, 0x0ed4a, 0x0ed4a, 0x0ed52, 0x0ed52,
+ 0x0ed66, 0x0ed68, 0x0ed6b, 0x0ed6d, 0x0ed6f, 0x0ed6f, 0x0ed80, 0x0ed81,
+ 0x0ed85, 0x0ed85, 0x0ed87, 0x0ed87, 0x0ed8a, 0x0ed8a, 0x0ed92, 0x0ed92,
+ 0x0eda6, 0x0eda8, 0x0edab, 0x0edad, 0x0edaf, 0x0edaf,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_gpu_registers), 8));
+
+static const u32 gen7_9_0_gxclkctl_registers[] = {
+ 0x18800, 0x18800, 0x18808, 0x1880b, 0x18820, 0x18822, 0x18830, 0x18830,
+ 0x18834, 0x1883b,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_gxclkctl_registers), 8));
+
+/*
+ * Block : ['GMUAO', 'GMUCX', 'GMUCX_RAM']
+ * Pipeline: A7XX_PIPE_NONE
+ * pairs : 134 (Regs:429)
+ */
+static const u32 gen7_9_0_gmu_registers[] = {
+ 0x10001, 0x10001, 0x10003, 0x10003, 0x10401, 0x10401, 0x10403, 0x10403,
+ 0x10801, 0x10801, 0x10803, 0x10803, 0x10c01, 0x10c01, 0x10c03, 0x10c03,
+ 0x11001, 0x11001, 0x11003, 0x11003, 0x11401, 0x11401, 0x11403, 0x11403,
+ 0x11801, 0x11801, 0x11803, 0x11803, 0x11c01, 0x11c01, 0x11c03, 0x11c03,
+ 0x1f400, 0x1f40b, 0x1f40f, 0x1f411, 0x1f500, 0x1f500, 0x1f507, 0x1f507,
+ 0x1f509, 0x1f50b, 0x1f700, 0x1f701, 0x1f704, 0x1f706, 0x1f708, 0x1f709,
+ 0x1f70c, 0x1f70d, 0x1f710, 0x1f711, 0x1f713, 0x1f716, 0x1f718, 0x1f71d,
+ 0x1f720, 0x1f724, 0x1f729, 0x1f729, 0x1f730, 0x1f747, 0x1f750, 0x1f756,
+ 0x1f758, 0x1f759, 0x1f75c, 0x1f75c, 0x1f760, 0x1f761, 0x1f764, 0x1f76b,
+ 0x1f770, 0x1f775, 0x1f780, 0x1f785, 0x1f790, 0x1f798, 0x1f7a0, 0x1f7a8,
+ 0x1f7b0, 0x1f7b3, 0x1f800, 0x1f804, 0x1f807, 0x1f808, 0x1f80b, 0x1f80c,
+ 0x1f80f, 0x1f80f, 0x1f811, 0x1f811, 0x1f813, 0x1f817, 0x1f819, 0x1f81c,
+ 0x1f824, 0x1f82a, 0x1f82d, 0x1f830, 0x1f840, 0x1f853, 0x1f860, 0x1f860,
+ 0x1f862, 0x1f866, 0x1f868, 0x1f869, 0x1f870, 0x1f879, 0x1f87f, 0x1f881,
+ 0x1f890, 0x1f896, 0x1f8a0, 0x1f8a2, 0x1f8a4, 0x1f8af, 0x1f8b8, 0x1f8b9,
+ 0x1f8c0, 0x1f8c1, 0x1f8c3, 0x1f8c4, 0x1f8d0, 0x1f8d0, 0x1f8ec, 0x1f8ec,
+ 0x1f8f0, 0x1f8f1, 0x1f910, 0x1f917, 0x1f920, 0x1f921, 0x1f924, 0x1f925,
+ 0x1f928, 0x1f929, 0x1f92c, 0x1f92d, 0x1f942, 0x1f944, 0x1f948, 0x1f94a,
+ 0x1f94f, 0x1f951, 0x1f954, 0x1f955, 0x1f95d, 0x1f95d, 0x1f962, 0x1f96b,
+ 0x1f970, 0x1f971, 0x1f973, 0x1f977, 0x1f97c, 0x1f97c, 0x1f980, 0x1f981,
+ 0x1f984, 0x1f986, 0x1f992, 0x1f993, 0x1f996, 0x1f99e, 0x1f9c5, 0x1f9d4,
+ 0x1f9f0, 0x1f9f1, 0x1f9f8, 0x1f9fa, 0x1f9fc, 0x1f9fc, 0x1fa00, 0x1fa03,
+ 0x20000, 0x20013, 0x20018, 0x2001a, 0x20020, 0x20021, 0x20024, 0x20025,
+ 0x2002a, 0x2002c, 0x20030, 0x20031, 0x20034, 0x20036, 0x23801, 0x23801,
+ 0x23803, 0x23803, 0x23805, 0x23805, 0x23807, 0x23807, 0x23809, 0x23809,
+ 0x2380b, 0x2380b, 0x2380d, 0x2380d, 0x2380f, 0x2380f, 0x23811, 0x23811,
+ 0x23813, 0x23813, 0x23815, 0x23815, 0x23817, 0x23817, 0x23819, 0x23819,
+ 0x2381b, 0x2381b, 0x2381d, 0x2381d, 0x2381f, 0x23820, 0x23822, 0x23822,
+ 0x23824, 0x23824, 0x23826, 0x23826, 0x23828, 0x23828, 0x2382a, 0x2382a,
+ 0x2382c, 0x2382c, 0x2382e, 0x2382e, 0x23830, 0x23830, 0x23832, 0x23832,
+ 0x23834, 0x23834, 0x23836, 0x23836, 0x23838, 0x23838, 0x2383a, 0x2383a,
+ 0x2383c, 0x2383c, 0x2383e, 0x2383e, 0x23840, 0x23847, 0x23b00, 0x23b01,
+ 0x23b03, 0x23b03, 0x23b05, 0x23b0e, 0x23b10, 0x23b13, 0x23b15, 0x23b16,
+ 0x23b28, 0x23b28, 0x23b30, 0x23b30,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_gmu_registers), 8));
+
+/*
+ * Block : ['GMUGX']
+ * Pipeline: A7XX_PIPE_NONE
+ * pairs : 44 (Regs:454)
+ */
+static const u32 gen7_9_0_gmugx_registers[] = {
+ 0x1a400, 0x1a41f, 0x1a440, 0x1a45f, 0x1a480, 0x1a49f, 0x1a4c0, 0x1a4df,
+ 0x1a500, 0x1a51f, 0x1a540, 0x1a55f, 0x1a580, 0x1a59f, 0x1a600, 0x1a61f,
+ 0x1a640, 0x1a65f, 0x1a780, 0x1a781, 0x1a783, 0x1a785, 0x1a787, 0x1a789,
+ 0x1a78b, 0x1a78d, 0x1a78f, 0x1a791, 0x1a793, 0x1a795, 0x1a797, 0x1a799,
+ 0x1a79b, 0x1a79d, 0x1a79f, 0x1a7a1, 0x1a7a3, 0x1a7a3, 0x1a7a8, 0x1a7b9,
+ 0x1a7c0, 0x1a7c1, 0x1a7c4, 0x1a7c5, 0x1a7c8, 0x1a7c9, 0x1a7cc, 0x1a7cd,
+ 0x1a7d0, 0x1a7d1, 0x1a7d4, 0x1a7d5, 0x1a7d8, 0x1a7d9, 0x1a7dc, 0x1a7dd,
+ 0x1a7e0, 0x1a7e1, 0x1a7fc, 0x1a7fd, 0x1a800, 0x1a808, 0x1a816, 0x1a816,
+ 0x1a81e, 0x1a81e, 0x1a826, 0x1a826, 0x1a82e, 0x1a82e, 0x1a836, 0x1a836,
+ 0x1a83e, 0x1a83e, 0x1a846, 0x1a846, 0x1a84e, 0x1a84e, 0x1a856, 0x1a856,
+ 0x1a883, 0x1a884, 0x1a890, 0x1a8b3, 0x1a900, 0x1a92b, 0x1a940, 0x1a940,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_gmugx_registers), 8));
+
+/*
+ * Block : ['CX_MISC']
+ * Pipeline: A7XX_PIPE_NONE
+ * pairs : 7 (Regs:56)
+ */
+static const u32 gen7_9_0_cx_misc_registers[] = {
+ 0x27800, 0x27800, 0x27810, 0x27814, 0x27820, 0x27824, 0x27828, 0x2782a,
+ 0x27832, 0x27857, 0x27880, 0x27881, 0x27c00, 0x27c01,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_cx_misc_registers), 8));
+
+/*
+ * Block : ['DBGC']
+ * Pipeline: A7XX_PIPE_NONE
+ * pairs : 19 (Regs:155)
+ */
+static const u32 gen7_9_0_dbgc_registers[] = {
+ 0x00600, 0x0061c, 0x0061e, 0x00634, 0x00640, 0x00643, 0x0064e, 0x00652,
+ 0x00654, 0x0065e, 0x00699, 0x00699, 0x0069b, 0x0069e, 0x006c2, 0x006e4,
+ 0x006e6, 0x006e6, 0x006e9, 0x006e9, 0x006eb, 0x006eb, 0x006f1, 0x006f4,
+ 0x00700, 0x00707, 0x00718, 0x00718, 0x00720, 0x00729, 0x00740, 0x0074a,
+ 0x00758, 0x00758, 0x00760, 0x00762,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_dbgc_registers), 8));
+
+/*
+ * Block : ['CX_DBGC']
+ * Pipeline: A7XX_PIPE_NONE
+ * pairs : 7 (Regs:75)
+ */
+static const u32 gen7_9_0_cx_dbgc_registers[] = {
+ 0x18400, 0x1841c, 0x1841e, 0x18434, 0x18440, 0x18443, 0x1844e, 0x18452,
+ 0x18454, 0x1845e, 0x18520, 0x18520, 0x18580, 0x18581,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_cx_dbgc_registers), 8));
+
+/*
+ * Block : ['BROADCAST', 'CP', 'CX_DBGC', 'CX_MISC', 'DBGC', 'GBIF']
+ * Block : ['GMUAO', 'GMUCX', 'GMUGX', 'GRAS', 'GXCLKCTL', 'PC']
+ * Block : ['RBBM', 'RDVM', 'UCHE', 'VFD', 'VPC', 'VSC']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_NONE
+ * pairs : 29 (Regs:573)
+ */
+static const u32 gen7_9_0_non_context_pipe_br_registers[] = {
+ 0x00887, 0x0088c, 0x08600, 0x08602, 0x08610, 0x0861b, 0x08620, 0x08620,
+ 0x08630, 0x08630, 0x08637, 0x08639, 0x08640, 0x08640, 0x09600, 0x09603,
+ 0x0960a, 0x09616, 0x09624, 0x0963a, 0x09640, 0x09640, 0x09e00, 0x09e00,
+ 0x09e02, 0x09e07, 0x09e0a, 0x09e16, 0x09e18, 0x09e1a, 0x09e1c, 0x09e1c,
+ 0x09e20, 0x09e25, 0x09e30, 0x09e31, 0x09e40, 0x09e51, 0x09e64, 0x09e6c,
+ 0x09e70, 0x09e72, 0x09e78, 0x09e79, 0x09e80, 0x09fff, 0x0a600, 0x0a600,
+ 0x0a603, 0x0a603, 0x0a610, 0x0a61f, 0x0a630, 0x0a631, 0x0a638, 0x0a63c,
+ 0x0a640, 0x0a65f,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_pipe_br_registers), 8));
+
+/*
+ * Block : ['BROADCAST', 'CP', 'CX_DBGC', 'CX_MISC', 'DBGC', 'GBIF']
+ * Block : ['GMUAO', 'GMUCX', 'GMUGX', 'GRAS', 'GXCLKCTL', 'PC']
+ * Block : ['RBBM', 'RDVM', 'UCHE', 'VFD', 'VPC', 'VSC']
+ * Pipeline: A7XX_PIPE_BV
+ * Cluster : A7XX_CLUSTER_NONE
+ * pairs : 29 (Regs:573)
+ */
+static const u32 gen7_9_0_non_context_pipe_bv_registers[] = {
+ 0x00887, 0x0088c, 0x08600, 0x08602, 0x08610, 0x0861b, 0x08620, 0x08620,
+ 0x08630, 0x08630, 0x08637, 0x08639, 0x08640, 0x08640, 0x09600, 0x09603,
+ 0x0960a, 0x09616, 0x09624, 0x0963a, 0x09640, 0x09640, 0x09e00, 0x09e00,
+ 0x09e02, 0x09e07, 0x09e0a, 0x09e16, 0x09e18, 0x09e1a, 0x09e1c, 0x09e1c,
+ 0x09e20, 0x09e25, 0x09e30, 0x09e31, 0x09e40, 0x09e51, 0x09e64, 0x09e6c,
+ 0x09e70, 0x09e72, 0x09e78, 0x09e79, 0x09e80, 0x09fff, 0x0a600, 0x0a600,
+ 0x0a603, 0x0a603, 0x0a610, 0x0a61f, 0x0a630, 0x0a631, 0x0a638, 0x0a63c,
+ 0x0a640, 0x0a65f,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_pipe_bv_registers), 8));
+
+/*
+ * Block : ['BROADCAST', 'CP', 'CX_DBGC', 'CX_MISC', 'DBGC', 'GBIF']
+ * Block : ['GMUAO', 'GMUCX', 'GMUGX', 'GRAS', 'GXCLKCTL', 'PC']
+ * Block : ['RBBM', 'RDVM', 'UCHE', 'VFD', 'VPC', 'VSC']
+ * Pipeline: A7XX_PIPE_LPAC
+ * Cluster : A7XX_CLUSTER_NONE
+ * pairs : 2 (Regs:7)
+ */
+static const u32 gen7_9_0_non_context_pipe_lpac_registers[] = {
+ 0x00887, 0x0088c, 0x00f80, 0x00f80,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_pipe_lpac_registers), 8));
+
+/*
+ * Block : ['RB']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_NONE
+ * pairs : 5 (Regs:37)
+ */
+static const u32 gen7_9_0_non_context_rb_pipe_br_rac_registers[] = {
+ 0x08e10, 0x08e1c, 0x08e20, 0x08e25, 0x08e51, 0x08e5a, 0x08e6a, 0x08e6d,
+ 0x08ea0, 0x08ea3,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_rb_pipe_br_rac_registers), 8));
+
+/*
+ * Block : ['RB']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_NONE
+ * pairs : 15 (Regs:66)
+ */
+static const u32 gen7_9_0_non_context_rb_pipe_br_rbp_registers[] = {
+ 0x08e01, 0x08e01, 0x08e04, 0x08e04, 0x08e06, 0x08e09, 0x08e0c, 0x08e0c,
+ 0x08e28, 0x08e28, 0x08e2c, 0x08e35, 0x08e3b, 0x08e40, 0x08e50, 0x08e50,
+ 0x08e5b, 0x08e5d, 0x08e5f, 0x08e5f, 0x08e61, 0x08e61, 0x08e63, 0x08e66,
+ 0x08e68, 0x08e69, 0x08e70, 0x08e7d, 0x08e80, 0x08e8f,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_rb_pipe_br_rbp_registers), 8));
+
+/*
+ * Block : ['SP']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_NONE
+ * Location: A7XX_HLSQ_STATE
+ * pairs : 4 (Regs:28)
+ */
+static const u32 gen7_9_0_non_context_sp_pipe_br_hlsq_state_registers[] = {
+ 0x0ae52, 0x0ae52, 0x0ae60, 0x0ae67, 0x0ae69, 0x0ae75, 0x0aec0, 0x0aec5,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_br_hlsq_state_registers), 8));
+
+/*
+ * Block : ['SP']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_NONE
+ * Location: A7XX_SP_TOP
+ * pairs : 10 (Regs:61)
+ */
+static const u32 gen7_9_0_non_context_sp_pipe_br_sp_top_registers[] = {
+ 0x0ae00, 0x0ae00, 0x0ae02, 0x0ae04, 0x0ae06, 0x0ae0a, 0x0ae0c, 0x0ae0c,
+ 0x0ae0f, 0x0ae0f, 0x0ae28, 0x0ae2b, 0x0ae35, 0x0ae35, 0x0ae3a, 0x0ae3f,
+ 0x0ae50, 0x0ae52, 0x0ae80, 0x0aea3,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_br_sp_top_registers), 8));
+
+/*
+ * Block : ['SP']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_NONE
+ * Location: A7XX_USPTP
+ * pairs : 12 (Regs:62)
+ */
+static const u32 gen7_9_0_non_context_sp_pipe_br_usptp_registers[] = {
+ 0x0ae00, 0x0ae00, 0x0ae02, 0x0ae04, 0x0ae06, 0x0ae0a, 0x0ae0c, 0x0ae0c,
+ 0x0ae0f, 0x0ae0f, 0x0ae28, 0x0ae2b, 0x0ae30, 0x0ae32, 0x0ae35, 0x0ae35,
+ 0x0ae3a, 0x0ae3b, 0x0ae3e, 0x0ae3f, 0x0ae50, 0x0ae52, 0x0ae80, 0x0aea3,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_br_usptp_registers), 8));
+
+/*
+ * Block : ['SP']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_NONE
+ * Location: A7XX_HLSQ_DP_STR
+ * pairs : 2 (Regs:5)
+ */
+static const u32 gen7_9_0_non_context_sp_pipe_br_hlsq_dp_str_registers[] = {
+ 0x0ae6b, 0x0ae6c, 0x0ae73, 0x0ae75,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_br_hlsq_dp_str_registers), 8));
+
+/*
+ * Block : ['SP']
+ * Pipeline: A7XX_PIPE_LPAC
+ * Cluster : A7XX_CLUSTER_NONE
+ * Location: A7XX_HLSQ_STATE
+ * pairs : 1 (Regs:5)
+ */
+static const u32 gen7_9_0_non_context_sp_pipe_lpac_hlsq_state_registers[] = {
+ 0x0af88, 0x0af8c,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_lpac_hlsq_state_registers), 8));
+
+/*
+ * Block : ['SP']
+ * Pipeline: A7XX_PIPE_LPAC
+ * Cluster : A7XX_CLUSTER_NONE
+ * Location: A7XX_SP_TOP
+ * pairs : 1 (Regs:6)
+ */
+static const u32 gen7_9_0_non_context_sp_pipe_lpac_sp_top_registers[] = {
+ 0x0af80, 0x0af85,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_lpac_sp_top_registers), 8));
+
+/*
+ * Block : ['SP']
+ * Pipeline: A7XX_PIPE_LPAC
+ * Cluster : A7XX_CLUSTER_NONE
+ * Location: A7XX_USPTP
+ * pairs : 2 (Regs:9)
+ */
+static const u32 gen7_9_0_non_context_sp_pipe_lpac_usptp_registers[] = {
+ 0x0af80, 0x0af85, 0x0af90, 0x0af92,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_lpac_usptp_registers), 8));
+
+/*
+ * Block : ['TPL1']
+ * Pipeline: A7XX_PIPE_NONE
+ * Cluster : A7XX_CLUSTER_NONE
+ * Location: A7XX_USPTP
+ * pairs : 5 (Regs:29)
+ */
+static const u32 gen7_9_0_non_context_tpl1_pipe_none_usptp_registers[] = {
+ 0x0b602, 0x0b602, 0x0b604, 0x0b604, 0x0b608, 0x0b60c, 0x0b610, 0x0b621,
+ 0x0b630, 0x0b633,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_tpl1_pipe_none_usptp_registers), 8));
+
+/*
+ * Block : ['TPL1']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_NONE
+ * Location: A7XX_USPTP
+ * pairs : 1 (Regs:1)
+ */
+static const u32 gen7_9_0_non_context_tpl1_pipe_br_usptp_registers[] = {
+ 0x0b600, 0x0b600,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_tpl1_pipe_br_usptp_registers), 8));
+
+/*
+ * Block : ['TPL1']
+ * Pipeline: A7XX_PIPE_LPAC
+ * Cluster : A7XX_CLUSTER_NONE
+ * Location: A7XX_USPTP
+ * pairs : 1 (Regs:1)
+ */
+static const u32 gen7_9_0_non_context_tpl1_pipe_lpac_usptp_registers[] = {
+ 0x0b780, 0x0b780,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_tpl1_pipe_lpac_usptp_registers), 8));
+
+/*
+ * Block : ['GRAS']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_GRAS
+ * pairs : 14 (Regs:293)
+ */
+static const u32 gen7_9_0_gras_pipe_br_cluster_gras_registers[] = {
+ 0x08000, 0x0800c, 0x08010, 0x08092, 0x08094, 0x08099, 0x0809b, 0x0809d,
+ 0x080a0, 0x080a7, 0x080af, 0x080f1, 0x080f4, 0x080f6, 0x080f8, 0x080fa,
+ 0x08100, 0x08107, 0x08109, 0x0810b, 0x08110, 0x08116, 0x08120, 0x0813f,
+ 0x08400, 0x08406, 0x0840a, 0x0840b,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_gras_pipe_br_cluster_gras_registers), 8));
+
+/*
+ * Block : ['GRAS']
+ * Pipeline: A7XX_PIPE_BV
+ * Cluster : A7XX_CLUSTER_GRAS
+ * pairs : 14 (Regs:293)
+ */
+static const u32 gen7_9_0_gras_pipe_bv_cluster_gras_registers[] = {
+ 0x08000, 0x0800c, 0x08010, 0x08092, 0x08094, 0x08099, 0x0809b, 0x0809d,
+ 0x080a0, 0x080a7, 0x080af, 0x080f1, 0x080f4, 0x080f6, 0x080f8, 0x080fa,
+ 0x08100, 0x08107, 0x08109, 0x0810b, 0x08110, 0x08116, 0x08120, 0x0813f,
+ 0x08400, 0x08406, 0x0840a, 0x0840b,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_gras_pipe_bv_cluster_gras_registers), 8));
+
+/*
+ * Block : ['PC']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_FE
+ * pairs : 6 (Regs:31)
+ */
+static const u32 gen7_9_0_pc_pipe_br_cluster_fe_registers[] = {
+ 0x09800, 0x09804, 0x09806, 0x0980a, 0x09810, 0x09811, 0x09884, 0x09886,
+ 0x09970, 0x09972, 0x09b00, 0x09b0c,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_pc_pipe_br_cluster_fe_registers), 8));
+
+/*
+ * Block : ['PC']
+ * Pipeline: A7XX_PIPE_BV
+ * Cluster : A7XX_CLUSTER_FE
+ * pairs : 6 (Regs:31)
+ */
+static const u32 gen7_9_0_pc_pipe_bv_cluster_fe_registers[] = {
+ 0x09800, 0x09804, 0x09806, 0x0980a, 0x09810, 0x09811, 0x09884, 0x09886,
+ 0x09970, 0x09972, 0x09b00, 0x09b0c,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_pc_pipe_bv_cluster_fe_registers), 8));
+
+/*
+ * Block : ['VFD']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_FE
+ * pairs : 2 (Regs:236)
+ */
+static const u32 gen7_9_0_vfd_pipe_br_cluster_fe_registers[] = {
+ 0x0a000, 0x0a009, 0x0a00e, 0x0a0ef,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_vfd_pipe_br_cluster_fe_registers), 8));
+
+/*
+ * Block : ['VFD']
+ * Pipeline: A7XX_PIPE_BV
+ * Cluster : A7XX_CLUSTER_FE
+ * pairs : 2 (Regs:236)
+ */
+static const u32 gen7_9_0_vfd_pipe_bv_cluster_fe_registers[] = {
+ 0x0a000, 0x0a009, 0x0a00e, 0x0a0ef,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_vfd_pipe_bv_cluster_fe_registers), 8));
+
+/*
+ * Block : ['VPC']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_FE
+ * pairs : 2 (Regs:18)
+ */
+static const u32 gen7_9_0_vpc_pipe_br_cluster_fe_registers[] = {
+ 0x09300, 0x0930a, 0x09311, 0x09317,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_br_cluster_fe_registers), 8));
+
+/*
+ * Block : ['VPC']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_PC_VS
+ * pairs : 3 (Regs:30)
+ */
+static const u32 gen7_9_0_vpc_pipe_br_cluster_pc_vs_registers[] = {
+ 0x09101, 0x0910c, 0x09300, 0x0930a, 0x09311, 0x09317,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_br_cluster_pc_vs_registers), 8));
+
+/*
+ * Block : ['VPC']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_VPC_PS
+ * pairs : 5 (Regs:76)
+ */
+static const u32 gen7_9_0_vpc_pipe_br_cluster_vpc_ps_registers[] = {
+ 0x09200, 0x0920f, 0x09212, 0x09216, 0x09218, 0x0923c, 0x09300, 0x0930a,
+ 0x09311, 0x09317,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_br_cluster_vpc_ps_registers), 8));
+
+/*
+ * Block : ['VPC']
+ * Pipeline: A7XX_PIPE_BV
+ * Cluster : A7XX_CLUSTER_FE
+ * pairs : 2 (Regs:18)
+ */
+static const u32 gen7_9_0_vpc_pipe_bv_cluster_fe_registers[] = {
+ 0x09300, 0x0930a, 0x09311, 0x09317,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_bv_cluster_fe_registers), 8));
+
+/*
+ * Block : ['VPC']
+ * Pipeline: A7XX_PIPE_BV
+ * Cluster : A7XX_CLUSTER_PC_VS
+ * pairs : 3 (Regs:30)
+ */
+static const u32 gen7_9_0_vpc_pipe_bv_cluster_pc_vs_registers[] = {
+ 0x09101, 0x0910c, 0x09300, 0x0930a, 0x09311, 0x09317,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_bv_cluster_pc_vs_registers), 8));
+
+/*
+ * Block : ['VPC']
+ * Pipeline: A7XX_PIPE_BV
+ * Cluster : A7XX_CLUSTER_VPC_PS
+ * pairs : 5 (Regs:76)
+ */
+static const u32 gen7_9_0_vpc_pipe_bv_cluster_vpc_ps_registers[] = {
+ 0x09200, 0x0920f, 0x09212, 0x09216, 0x09218, 0x0923c, 0x09300, 0x0930a,
+ 0x09311, 0x09317,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_bv_cluster_vpc_ps_registers), 8));
+
+/*
+ * Block : ['RB']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_PS
+ * pairs : 39 (Regs:133)
+ */
+static const u32 gen7_9_0_rb_pipe_br_cluster_ps_rac_registers[] = {
+ 0x08802, 0x08802, 0x08804, 0x08806, 0x08809, 0x0880a, 0x0880e, 0x08811,
+ 0x08818, 0x0881e, 0x08821, 0x08821, 0x08823, 0x08826, 0x08829, 0x08829,
+ 0x0882b, 0x0882e, 0x08831, 0x08831, 0x08833, 0x08836, 0x08839, 0x08839,
+ 0x0883b, 0x0883e, 0x08841, 0x08841, 0x08843, 0x08846, 0x08849, 0x08849,
+ 0x0884b, 0x0884e, 0x08851, 0x08851, 0x08853, 0x08856, 0x08859, 0x08859,
+ 0x0885b, 0x0885e, 0x08860, 0x08864, 0x08870, 0x08870, 0x08873, 0x08876,
+ 0x08878, 0x08879, 0x08882, 0x08885, 0x08887, 0x08889, 0x08891, 0x08891,
+ 0x08898, 0x08899, 0x088c0, 0x088c1, 0x088e5, 0x088e5, 0x088f4, 0x088f5,
+ 0x08a00, 0x08a05, 0x08a10, 0x08a15, 0x08a20, 0x08a25, 0x08a30, 0x08a35,
+ 0x08c00, 0x08c01, 0x08c18, 0x08c1f, 0x08c26, 0x08c34,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_rb_pipe_br_cluster_ps_rac_registers), 8));
+
+/*
+ * Block : ['RB']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_PS
+ * pairs : 34 (Regs:100)
+ */
+static const u32 gen7_9_0_rb_pipe_br_cluster_ps_rbp_registers[] = {
+ 0x08800, 0x08801, 0x08803, 0x08803, 0x0880b, 0x0880d, 0x08812, 0x08812,
+ 0x08820, 0x08820, 0x08822, 0x08822, 0x08827, 0x08828, 0x0882a, 0x0882a,
+ 0x0882f, 0x08830, 0x08832, 0x08832, 0x08837, 0x08838, 0x0883a, 0x0883a,
+ 0x0883f, 0x08840, 0x08842, 0x08842, 0x08847, 0x08848, 0x0884a, 0x0884a,
+ 0x0884f, 0x08850, 0x08852, 0x08852, 0x08857, 0x08858, 0x0885a, 0x0885a,
+ 0x0885f, 0x0885f, 0x08865, 0x08865, 0x08871, 0x08872, 0x08877, 0x08877,
+ 0x08880, 0x08881, 0x08886, 0x08886, 0x08890, 0x08890, 0x088d0, 0x088e4,
+ 0x088e8, 0x088ea, 0x088f0, 0x088f0, 0x08900, 0x0891a, 0x08927, 0x08928,
+ 0x08c17, 0x08c17, 0x08c20, 0x08c25,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_rb_pipe_br_cluster_ps_rbp_registers), 8));
+
+/*
+ * Block : ['SP']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_SP_VS
+ * Location: A7XX_HLSQ_STATE
+ * pairs : 29 (Regs:215)
+ */
+static const u32 gen7_9_0_sp_pipe_br_cluster_sp_vs_hlsq_state_registers[] = {
+ 0x0a800, 0x0a801, 0x0a81b, 0x0a81d, 0x0a822, 0x0a822, 0x0a824, 0x0a824,
+ 0x0a827, 0x0a82a, 0x0a830, 0x0a830, 0x0a832, 0x0a835, 0x0a83a, 0x0a83a,
+ 0x0a83c, 0x0a83c, 0x0a83f, 0x0a841, 0x0a85b, 0x0a85d, 0x0a862, 0x0a862,
+ 0x0a864, 0x0a864, 0x0a867, 0x0a867, 0x0a870, 0x0a870, 0x0a872, 0x0a872,
+ 0x0a88c, 0x0a88e, 0x0a893, 0x0a893, 0x0a895, 0x0a895, 0x0a898, 0x0a898,
+ 0x0a89a, 0x0a89d, 0x0a8a0, 0x0a8af, 0x0a8c0, 0x0a8c3, 0x0a974, 0x0a977,
+ 0x0ab00, 0x0ab03, 0x0ab05, 0x0ab05, 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20,
+ 0x0ab40, 0x0abbf,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_vs_hlsq_state_registers), 8));
+
+/*
+ * Block : ['SP']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_SP_VS
+ * Location: A7XX_SP_TOP
+ * pairs : 22 (Regs:73)
+ */
+static const u32 gen7_9_0_sp_pipe_br_cluster_sp_vs_sp_top_registers[] = {
+ 0x0a800, 0x0a800, 0x0a81c, 0x0a81d, 0x0a822, 0x0a824, 0x0a82d, 0x0a82d,
+ 0x0a82f, 0x0a831, 0x0a834, 0x0a835, 0x0a83a, 0x0a83c, 0x0a840, 0x0a840,
+ 0x0a85c, 0x0a85d, 0x0a862, 0x0a864, 0x0a868, 0x0a868, 0x0a870, 0x0a871,
+ 0x0a88d, 0x0a88e, 0x0a893, 0x0a895, 0x0a899, 0x0a899, 0x0a8a0, 0x0a8af,
+ 0x0a974, 0x0a977, 0x0ab00, 0x0ab00, 0x0ab02, 0x0ab02, 0x0ab04, 0x0ab05,
+ 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_vs_sp_top_registers), 8));
+
+/*
+ * Block : ['SP']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_SP_VS
+ * Location: A7XX_USPTP
+ * pairs : 16 (Regs:269)
+ */
+static const u32 gen7_9_0_sp_pipe_br_cluster_sp_vs_usptp_registers[] = {
+ 0x0a800, 0x0a81b, 0x0a81e, 0x0a821, 0x0a823, 0x0a827, 0x0a82d, 0x0a82d,
+ 0x0a82f, 0x0a833, 0x0a836, 0x0a839, 0x0a83b, 0x0a85b, 0x0a85e, 0x0a861,
+ 0x0a863, 0x0a868, 0x0a870, 0x0a88c, 0x0a88f, 0x0a892, 0x0a894, 0x0a899,
+ 0x0a8c0, 0x0a8c3, 0x0ab00, 0x0ab05, 0x0ab21, 0x0ab22, 0x0ab40, 0x0abbf,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_vs_usptp_registers), 8));
+
+/*
+ * Block : ['SP']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_SP_PS
+ * Location: A7XX_HLSQ_STATE
+ * pairs : 21 (Regs:334)
+ */
+static const u32 gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_state_registers[] = {
+ 0x0a980, 0x0a984, 0x0a99e, 0x0a99e, 0x0a9a7, 0x0a9a7, 0x0a9aa, 0x0a9aa,
+ 0x0a9ae, 0x0a9b0, 0x0a9b2, 0x0a9b5, 0x0a9ba, 0x0a9ba, 0x0a9bc, 0x0a9bc,
+ 0x0a9c4, 0x0a9c4, 0x0a9c6, 0x0a9c6, 0x0a9cd, 0x0a9cd, 0x0a9e0, 0x0a9fc,
+ 0x0aa00, 0x0aa00, 0x0aa30, 0x0aa31, 0x0aa40, 0x0aabf, 0x0aaf2, 0x0aaf3,
+ 0x0ab00, 0x0ab03, 0x0ab05, 0x0ab05, 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20,
+ 0x0ab40, 0x0abbf,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_state_registers), 8));
+
+/*
+ * Block : ['SP']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_SP_PS
+ * Location: A7XX_HLSQ_DP
+ * pairs : 3 (Regs:19)
+ */
+static const u32 gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_registers[] = {
+ 0x0a9b1, 0x0a9b1, 0x0a9c6, 0x0a9cb, 0x0a9d4, 0x0a9df,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_registers), 8));
+
+/*
+ * Block : ['SP']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_SP_PS
+ * Location: A7XX_SP_TOP
+ * pairs : 18 (Regs:77)
+ */
+static const u32 gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registers[] = {
+ 0x0a980, 0x0a980, 0x0a982, 0x0a984, 0x0a99e, 0x0a9a2, 0x0a9a7, 0x0a9a8,
+ 0x0a9aa, 0x0a9aa, 0x0a9ae, 0x0a9ae, 0x0a9b0, 0x0a9b1, 0x0a9b3, 0x0a9b5,
+ 0x0a9ba, 0x0a9bc, 0x0a9c5, 0x0a9c5, 0x0a9e0, 0x0a9f9, 0x0aa00, 0x0aa03,
+ 0x0aaf2, 0x0aaf3, 0x0ab00, 0x0ab00, 0x0ab02, 0x0ab02, 0x0ab04, 0x0ab05,
+ 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registers), 8));
+
+/*
+ * Block : ['SP']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_SP_PS
+ * Location: A7XX_USPTP
+ * pairs : 17 (Regs:333)
+ */
+static const u32 gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_registers[] = {
+ 0x0a980, 0x0a982, 0x0a985, 0x0a9a6, 0x0a9a8, 0x0a9a9, 0x0a9ab, 0x0a9ae,
+ 0x0a9b0, 0x0a9b3, 0x0a9b6, 0x0a9b9, 0x0a9bb, 0x0a9bf, 0x0a9c2, 0x0a9c3,
+ 0x0a9c5, 0x0a9c5, 0x0a9cd, 0x0a9cd, 0x0a9d0, 0x0a9d3, 0x0aa01, 0x0aa03,
+ 0x0aa30, 0x0aa31, 0x0aa40, 0x0aabf, 0x0ab00, 0x0ab05, 0x0ab21, 0x0ab22,
+ 0x0ab40, 0x0abbf,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_registers), 8));
+
+/*
+ * Block : ['SP']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_SP_PS
+ * Location: A7XX_HLSQ_DP_STR
+ * pairs : 1 (Regs:6)
+ */
+static const u32 gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_registers[] = {
+ 0x0a9c6, 0x0a9cb,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_registers), 8));
+
+/*
+ * Block : ['SP']
+ * Pipeline: A7XX_PIPE_BV
+ * Cluster : A7XX_CLUSTER_SP_VS
+ * Location: A7XX_HLSQ_STATE
+ * pairs : 28 (Regs:213)
+ */
+static const u32 gen7_9_0_sp_pipe_bv_cluster_sp_vs_hlsq_state_registers[] = {
+ 0x0a800, 0x0a801, 0x0a81b, 0x0a81d, 0x0a822, 0x0a822, 0x0a824, 0x0a824,
+ 0x0a827, 0x0a82a, 0x0a830, 0x0a830, 0x0a832, 0x0a835, 0x0a83a, 0x0a83a,
+ 0x0a83c, 0x0a83c, 0x0a83f, 0x0a841, 0x0a85b, 0x0a85d, 0x0a862, 0x0a862,
+ 0x0a864, 0x0a864, 0x0a867, 0x0a867, 0x0a870, 0x0a870, 0x0a872, 0x0a872,
+ 0x0a88c, 0x0a88e, 0x0a893, 0x0a893, 0x0a895, 0x0a895, 0x0a898, 0x0a898,
+ 0x0a89a, 0x0a89d, 0x0a8a0, 0x0a8af, 0x0a8c0, 0x0a8c3, 0x0a974, 0x0a977,
+ 0x0ab00, 0x0ab02, 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20, 0x0ab40, 0x0abbf,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_bv_cluster_sp_vs_hlsq_state_registers), 8));
+
+/*
+ * Block : ['SP']
+ * Pipeline: A7XX_PIPE_BV
+ * Cluster : A7XX_CLUSTER_SP_VS
+ * Location: A7XX_SP_TOP
+ * pairs : 21 (Regs:71)
+ */
+static const u32 gen7_9_0_sp_pipe_bv_cluster_sp_vs_sp_top_registers[] = {
+ 0x0a800, 0x0a800, 0x0a81c, 0x0a81d, 0x0a822, 0x0a824, 0x0a82d, 0x0a82d,
+ 0x0a82f, 0x0a831, 0x0a834, 0x0a835, 0x0a83a, 0x0a83c, 0x0a840, 0x0a840,
+ 0x0a85c, 0x0a85d, 0x0a862, 0x0a864, 0x0a868, 0x0a868, 0x0a870, 0x0a871,
+ 0x0a88d, 0x0a88e, 0x0a893, 0x0a895, 0x0a899, 0x0a899, 0x0a8a0, 0x0a8af,
+ 0x0a974, 0x0a977, 0x0ab00, 0x0ab00, 0x0ab02, 0x0ab02, 0x0ab0a, 0x0ab1b,
+ 0x0ab20, 0x0ab20,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_bv_cluster_sp_vs_sp_top_registers), 8));
+
+/*
+ * Block : ['SP']
+ * Pipeline: A7XX_PIPE_BV
+ * Cluster : A7XX_CLUSTER_SP_VS
+ * Location: A7XX_USPTP
+ * pairs : 16 (Regs:266)
+ */
+static const u32 gen7_9_0_sp_pipe_bv_cluster_sp_vs_usptp_registers[] = {
+ 0x0a800, 0x0a81b, 0x0a81e, 0x0a821, 0x0a823, 0x0a827, 0x0a82d, 0x0a82d,
+ 0x0a82f, 0x0a833, 0x0a836, 0x0a839, 0x0a83b, 0x0a85b, 0x0a85e, 0x0a861,
+ 0x0a863, 0x0a868, 0x0a870, 0x0a88c, 0x0a88f, 0x0a892, 0x0a894, 0x0a899,
+ 0x0a8c0, 0x0a8c3, 0x0ab00, 0x0ab02, 0x0ab21, 0x0ab22, 0x0ab40, 0x0abbf,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_bv_cluster_sp_vs_usptp_registers), 8));
+
+/*
+ * Block : ['SP']
+ * Pipeline: A7XX_PIPE_LPAC
+ * Cluster : A7XX_CLUSTER_SP_PS
+ * Location: A7XX_HLSQ_STATE
+ * pairs : 14 (Regs:299)
+ */
+static const u32 gen7_9_0_sp_pipe_lpac_cluster_sp_ps_hlsq_state_registers[] = {
+ 0x0a9b0, 0x0a9b0, 0x0a9b2, 0x0a9b5, 0x0a9ba, 0x0a9ba, 0x0a9bc, 0x0a9bc,
+ 0x0a9c4, 0x0a9c4, 0x0a9cd, 0x0a9cd, 0x0a9e2, 0x0a9e3, 0x0a9e6, 0x0a9fc,
+ 0x0aa00, 0x0aa00, 0x0aa31, 0x0aa35, 0x0aa40, 0x0aabf, 0x0aaf3, 0x0aaf3,
+ 0x0ab00, 0x0ab01, 0x0ab40, 0x0abbf,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_lpac_cluster_sp_ps_hlsq_state_registers), 8));
+
+/*
+ * Block : ['SP']
+ * Pipeline: A7XX_PIPE_LPAC
+ * Cluster : A7XX_CLUSTER_SP_PS
+ * Location: A7XX_HLSQ_DP
+ * pairs : 2 (Regs:13)
+ */
+static const u32 gen7_9_0_sp_pipe_lpac_cluster_sp_ps_hlsq_dp_registers[] = {
+ 0x0a9b1, 0x0a9b1, 0x0a9d4, 0x0a9df,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_lpac_cluster_sp_ps_hlsq_dp_registers), 8));
+
+/*
+ * Block : ['SP']
+ * Pipeline: A7XX_PIPE_LPAC
+ * Cluster : A7XX_CLUSTER_SP_PS
+ * Location: A7XX_SP_TOP
+ * pairs : 9 (Regs:34)
+ */
+static const u32 gen7_9_0_sp_pipe_lpac_cluster_sp_ps_sp_top_registers[] = {
+ 0x0a9b0, 0x0a9b1, 0x0a9b3, 0x0a9b5, 0x0a9ba, 0x0a9bc, 0x0a9c5, 0x0a9c5,
+ 0x0a9e2, 0x0a9e3, 0x0a9e6, 0x0a9f9, 0x0aa00, 0x0aa00, 0x0aaf3, 0x0aaf3,
+ 0x0ab00, 0x0ab00,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_lpac_cluster_sp_ps_sp_top_registers), 8));
+
+/*
+ * Block : ['SP']
+ * Pipeline: A7XX_PIPE_LPAC
+ * Cluster : A7XX_CLUSTER_SP_PS
+ * Location: A7XX_USPTP
+ * pairs : 11 (Regs:279)
+ */
+static const u32 gen7_9_0_sp_pipe_lpac_cluster_sp_ps_usptp_registers[] = {
+ 0x0a9b0, 0x0a9b3, 0x0a9b6, 0x0a9b9, 0x0a9bb, 0x0a9be, 0x0a9c2, 0x0a9c3,
+ 0x0a9c5, 0x0a9c5, 0x0a9cd, 0x0a9cd, 0x0a9d0, 0x0a9d3, 0x0aa31, 0x0aa31,
+ 0x0aa40, 0x0aabf, 0x0ab00, 0x0ab01, 0x0ab40, 0x0abbf,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_lpac_cluster_sp_ps_usptp_registers), 8));
+
+/*
+ * Block : ['TPL1']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_SP_VS
+ * Location: A7XX_USPTP
+ * pairs : 3 (Regs:10)
+ */
+static const u32 gen7_9_0_tpl1_pipe_br_cluster_sp_vs_usptp_registers[] = {
+ 0x0b300, 0x0b307, 0x0b309, 0x0b309, 0x0b310, 0x0b310,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_tpl1_pipe_br_cluster_sp_vs_usptp_registers), 8));
+
+/*
+ * Block : ['TPL1']
+ * Pipeline: A7XX_PIPE_BR
+ * Cluster : A7XX_CLUSTER_SP_PS
+ * Location: A7XX_USPTP
+ * pairs : 6 (Regs:42)
+ */
+static const u32 gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers[] = {
+ 0x0b180, 0x0b183, 0x0b190, 0x0b195, 0x0b2c0, 0x0b2d5, 0x0b300, 0x0b307,
+ 0x0b309, 0x0b309, 0x0b310, 0x0b310,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers), 8));
+
+/*
+ * Block : ['TPL1']
+ * Pipeline: A7XX_PIPE_BV
+ * Cluster : A7XX_CLUSTER_SP_VS
+ * Location: A7XX_USPTP
+ * pairs : 3 (Regs:10)
+ */
+static const u32 gen7_9_0_tpl1_pipe_bv_cluster_sp_vs_usptp_registers[] = {
+ 0x0b300, 0x0b307, 0x0b309, 0x0b309, 0x0b310, 0x0b310,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_tpl1_pipe_bv_cluster_sp_vs_usptp_registers), 8));
+
+/*
+ * Block : ['TPL1']
+ * Pipeline: A7XX_PIPE_LPAC
+ * Cluster : A7XX_CLUSTER_SP_PS
+ * Location: A7XX_USPTP
+ * pairs : 5 (Regs:7)
+ */
+static const u32 gen7_9_0_tpl1_pipe_lpac_cluster_sp_ps_usptp_registers[] = {
+ 0x0b180, 0x0b181, 0x0b300, 0x0b301, 0x0b307, 0x0b307, 0x0b309, 0x0b309,
+ 0x0b310, 0x0b310,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_tpl1_pipe_lpac_cluster_sp_ps_usptp_registers), 8));
+
+static const struct gen7_sel_reg gen7_9_0_rb_rac_sel = {
+ .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
+ .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
+ .val = 0,
+};
+
+static const struct gen7_sel_reg gen7_9_0_rb_rbp_sel = {
+ .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
+ .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
+ .val = 0x9,
+};
+
+static struct gen7_cluster_registers gen7_9_0_clusters[] = {
+ { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ gen7_9_0_non_context_pipe_br_registers, },
+ { A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
+ gen7_9_0_non_context_pipe_bv_registers, },
+ { A7XX_CLUSTER_NONE, A7XX_PIPE_LPAC, STATE_NON_CONTEXT,
+ gen7_9_0_non_context_pipe_lpac_registers, },
+ { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ gen7_9_0_non_context_rb_pipe_br_rac_registers, &gen7_9_0_rb_rac_sel, },
+ { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ gen7_9_0_non_context_rb_pipe_br_rbp_registers, &gen7_9_0_rb_rbp_sel, },
+ { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ gen7_9_0_rb_pipe_br_cluster_ps_rac_registers, &gen7_9_0_rb_rac_sel, },
+ { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ gen7_9_0_rb_pipe_br_cluster_ps_rac_registers, &gen7_9_0_rb_rac_sel, },
+ { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ gen7_9_0_rb_pipe_br_cluster_ps_rbp_registers, &gen7_9_0_rb_rbp_sel, },
+ { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ gen7_9_0_rb_pipe_br_cluster_ps_rbp_registers, &gen7_9_0_rb_rbp_sel, },
+ { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ gen7_9_0_gras_pipe_br_cluster_gras_registers, },
+ { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ gen7_9_0_gras_pipe_br_cluster_gras_registers, },
+ { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ gen7_9_0_gras_pipe_bv_cluster_gras_registers, },
+ { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ gen7_9_0_gras_pipe_bv_cluster_gras_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ gen7_9_0_pc_pipe_br_cluster_fe_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ gen7_9_0_pc_pipe_br_cluster_fe_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ gen7_9_0_pc_pipe_bv_cluster_fe_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ gen7_9_0_pc_pipe_bv_cluster_fe_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ gen7_9_0_vfd_pipe_br_cluster_fe_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ gen7_9_0_vfd_pipe_br_cluster_fe_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ gen7_9_0_vfd_pipe_bv_cluster_fe_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ gen7_9_0_vfd_pipe_bv_cluster_fe_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ gen7_9_0_vpc_pipe_br_cluster_fe_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ gen7_9_0_vpc_pipe_br_cluster_fe_registers, },
+ { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ gen7_9_0_vpc_pipe_br_cluster_pc_vs_registers, },
+ { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ gen7_9_0_vpc_pipe_br_cluster_pc_vs_registers, },
+ { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ gen7_9_0_vpc_pipe_br_cluster_vpc_ps_registers, },
+ { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ gen7_9_0_vpc_pipe_br_cluster_vpc_ps_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ gen7_9_0_vpc_pipe_bv_cluster_fe_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ gen7_9_0_vpc_pipe_bv_cluster_fe_registers, },
+ { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ gen7_9_0_vpc_pipe_bv_cluster_pc_vs_registers, },
+ { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ gen7_9_0_vpc_pipe_bv_cluster_pc_vs_registers, },
+ { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ gen7_9_0_vpc_pipe_bv_cluster_vpc_ps_registers, },
+ { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ gen7_9_0_vpc_pipe_bv_cluster_vpc_ps_registers, },
+};
+
+static struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] = {
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ gen7_9_0_non_context_sp_pipe_br_hlsq_state_registers, 0xae00},
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ gen7_9_0_non_context_sp_pipe_br_sp_top_registers, 0xae00},
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ gen7_9_0_non_context_sp_pipe_br_usptp_registers, 0xae00},
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_DP_STR,
+ gen7_9_0_non_context_sp_pipe_br_hlsq_dp_str_registers, 0xae00},
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE,
+ gen7_9_0_non_context_sp_pipe_lpac_hlsq_state_registers, 0xaf80},
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP,
+ gen7_9_0_non_context_sp_pipe_lpac_sp_top_registers, 0xaf80},
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ gen7_9_0_non_context_sp_pipe_lpac_usptp_registers, 0xaf80},
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_NONE, 0, A7XX_USPTP,
+ gen7_9_0_non_context_tpl1_pipe_none_usptp_registers, 0xb600},
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ gen7_9_0_non_context_tpl1_pipe_br_usptp_registers, 0xb600},
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ gen7_9_0_non_context_tpl1_pipe_lpac_usptp_registers, 0xb780},
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ gen7_9_0_sp_pipe_br_cluster_sp_vs_hlsq_state_registers, 0xa800},
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ gen7_9_0_sp_pipe_br_cluster_sp_vs_sp_top_registers, 0xa800},
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ gen7_9_0_sp_pipe_br_cluster_sp_vs_usptp_registers, 0xa800},
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_HLSQ_STATE,
+ gen7_9_0_sp_pipe_bv_cluster_sp_vs_hlsq_state_registers, 0xa800},
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_SP_TOP,
+ gen7_9_0_sp_pipe_bv_cluster_sp_vs_sp_top_registers, 0xa800},
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP,
+ gen7_9_0_sp_pipe_bv_cluster_sp_vs_usptp_registers, 0xa800},
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE,
+ gen7_9_0_sp_pipe_br_cluster_sp_vs_hlsq_state_registers, 0xa800},
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP,
+ gen7_9_0_sp_pipe_br_cluster_sp_vs_sp_top_registers, 0xa800},
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ gen7_9_0_sp_pipe_br_cluster_sp_vs_usptp_registers, 0xa800},
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_HLSQ_STATE,
+ gen7_9_0_sp_pipe_bv_cluster_sp_vs_hlsq_state_registers, 0xa800},
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_SP_TOP,
+ gen7_9_0_sp_pipe_bv_cluster_sp_vs_sp_top_registers, 0xa800},
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP,
+ gen7_9_0_sp_pipe_bv_cluster_sp_vs_usptp_registers, 0xa800},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_state_registers, 0xa800},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_DP,
+ gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_registers, 0xa800},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registers, 0xa800},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_registers, 0xa800},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_DP_STR,
+ gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_registers, 0xa800},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE,
+ gen7_9_0_sp_pipe_lpac_cluster_sp_ps_hlsq_state_registers, 0xa800},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_DP,
+ gen7_9_0_sp_pipe_lpac_cluster_sp_ps_hlsq_dp_registers, 0xa800},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP,
+ gen7_9_0_sp_pipe_lpac_cluster_sp_ps_sp_top_registers, 0xa800},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ gen7_9_0_sp_pipe_lpac_cluster_sp_ps_usptp_registers, 0xa800},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE,
+ gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_state_registers, 0xa800},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_DP,
+ gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_registers, 0xa800},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP,
+ gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registers, 0xa800},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_registers, 0xa800},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_DP_STR,
+ gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_registers, 0xa800},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_HLSQ_DP,
+ gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_registers, 0xa800},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_SP_TOP,
+ gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registers, 0xa800},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP,
+ gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_registers, 0xa800},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_HLSQ_DP_STR,
+ gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_registers, 0xa800},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_HLSQ_DP,
+ gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_registers, 0xa800},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_SP_TOP,
+ gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registers, 0xa800},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP,
+ gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_registers, 0xa800},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_HLSQ_DP_STR,
+ gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_registers, 0xa800},
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ gen7_9_0_tpl1_pipe_br_cluster_sp_vs_usptp_registers, 0xb000},
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP,
+ gen7_9_0_tpl1_pipe_bv_cluster_sp_vs_usptp_registers, 0xb000},
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ gen7_9_0_tpl1_pipe_br_cluster_sp_vs_usptp_registers, 0xb000},
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP,
+ gen7_9_0_tpl1_pipe_bv_cluster_sp_vs_usptp_registers, 0xb000},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ gen7_9_0_tpl1_pipe_lpac_cluster_sp_ps_usptp_registers, 0xb000},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP,
+ gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000},
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP,
+ gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000},
+};
+
+static struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = {
+ { "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
+ REG_A6XX_CP_SQE_STAT_DATA, 0x00040},
+ { "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
+ REG_A6XX_CP_DRAW_STATE_DATA, 0x00200},
+ { "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
+ REG_A6XX_CP_ROQ_DBG_DATA, 0x00800},
+ { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
+ REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x08000},
+ { "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
+ REG_A7XX_CP_BV_DRAW_STATE_DATA, 0x00200},
+ { "CP_BV_ROQ_DBG_ADDR", REG_A7XX_CP_BV_ROQ_DBG_ADDR,
+ REG_A7XX_CP_BV_ROQ_DBG_DATA, 0x00800},
+ { "CP_BV_SQE_UCODE_DBG_ADDR", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
+ REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA, 0x08000},
+ { "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_SQE_STAT_ADDR,
+ REG_A7XX_CP_BV_SQE_STAT_DATA, 0x00040},
+ { "CP_RESOURCE_TBL", REG_A7XX_CP_RESOURCE_TBL_DBG_ADDR,
+ REG_A7XX_CP_RESOURCE_TBL_DBG_DATA, 0x04100},
+ { "CP_LPAC_DRAW_STATE_ADDR", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
+ REG_A7XX_CP_LPAC_DRAW_STATE_DATA, 0x00200},
+ { "CP_LPAC_ROQ", REG_A7XX_CP_LPAC_ROQ_DBG_ADDR,
+ REG_A7XX_CP_LPAC_ROQ_DBG_DATA, 0x00200},
+ { "CP_SQE_AC_UCODE_DBG_ADDR", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
+ REG_A7XX_CP_SQE_AC_UCODE_DBG_DATA, 0x08000},
+ { "CP_SQE_AC_STAT_ADDR", REG_A7XX_CP_SQE_AC_STAT_ADDR,
+ REG_A7XX_CP_SQE_AC_STAT_DATA, 0x00040},
+ { "CP_LPAC_FIFO_DBG_ADDR", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
+ REG_A7XX_CP_LPAC_FIFO_DBG_DATA, 0x00040},
+ { "CP_AQE_ROQ_0", REG_A7XX_CP_AQE_ROQ_DBG_ADDR_0,
+ REG_A7XX_CP_AQE_ROQ_DBG_DATA_0, 0x00100},
+ { "CP_AQE_ROQ_1", REG_A7XX_CP_AQE_ROQ_DBG_ADDR_1,
+ REG_A7XX_CP_AQE_ROQ_DBG_DATA_1, 0x00100},
+ { "CP_AQE_UCODE_DBG_0", REG_A7XX_CP_AQE_UCODE_DBG_ADDR_0,
+ REG_A7XX_CP_AQE_UCODE_DBG_DATA_0, 0x08000},
+ { "CP_AQE_UCODE_DBG_1", REG_A7XX_CP_AQE_UCODE_DBG_ADDR_1,
+ REG_A7XX_CP_AQE_UCODE_DBG_DATA_1, 0x08000},
+ { "CP_AQE_STAT_0", REG_A7XX_CP_AQE_STAT_ADDR_0,
+ REG_A7XX_CP_AQE_STAT_DATA_0, 0x00040},
+ { "CP_AQE_STAT_1", REG_A7XX_CP_AQE_STAT_ADDR_1,
+ REG_A7XX_CP_AQE_STAT_DATA_1, 0x00040},
+};
+
+static struct gen7_reg_list gen7_9_0_reg_list[] = {
+ { gen7_9_0_gpu_registers, NULL},
+ { gen7_9_0_cx_misc_registers, NULL},
+ { gen7_9_0_cx_dbgc_registers, NULL},
+ { gen7_9_0_dbgc_registers, NULL},
+ { NULL, NULL},
+};
+
+static const u32 gen7_9_0_cpr_registers[] = {
+ 0x26800, 0x26805, 0x26808, 0x2680d, 0x26814, 0x26815, 0x2681c, 0x2681c,
+ 0x26820, 0x26839, 0x26840, 0x26841, 0x26848, 0x26849, 0x26850, 0x26851,
+ 0x26880, 0x268a1, 0x26980, 0x269b0, 0x269c0, 0x269c8, 0x269e0, 0x269ee,
+ 0x269fb, 0x269ff, 0x26a02, 0x26a07, 0x26a09, 0x26a0b, 0x26a10, 0x26b0f,
+ 0x27440, 0x27441, 0x27444, 0x27444, 0x27480, 0x274a2, 0x274ac, 0x274c4,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_cpr_registers), 8));
+
+static const u32 gen7_9_0_dpm_registers[] = {
+ 0x1aa00, 0x1aa06, 0x1aa09, 0x1aa0a, 0x1aa0c, 0x1aa0d, 0x1aa0f, 0x1aa12,
+ 0x1aa14, 0x1aa47, 0x1aa50, 0x1aa51,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_dpm_registers), 8));
+
+static const u32 gen7_9_0_dpm_leakage_registers[] = {
+ 0x21c00, 0x21c00, 0x21c08, 0x21c09, 0x21c0e, 0x21c0f, 0x21c4f, 0x21c50,
+ 0x21c52, 0x21c52, 0x21c54, 0x21c56, 0x21c58, 0x21c5a, 0x21c5c, 0x21c60,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_dpm_leakage_registers), 8));
+
+static const u32 gen7_9_0_gfx_gpu_acd_registers[] = {
+ 0x18c00, 0x18c16, 0x18c20, 0x18c2d, 0x18c30, 0x18c31, 0x18c35, 0x18c35,
+ 0x18c37, 0x18c37, 0x18c3a, 0x18c3a, 0x18c42, 0x18c42, 0x18c56, 0x18c58,
+ 0x18c5b, 0x18c5d, 0x18c5f, 0x18c62,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_gfx_gpu_acd_registers), 8));
+
+static const u32 gen7_9_0_gpucc_registers[] = {
+ 0x24000, 0x2400f, 0x24400, 0x2440f, 0x24800, 0x24805, 0x24c00, 0x24cff,
+ 0x25400, 0x25404, 0x25800, 0x25804, 0x25c00, 0x25c04, 0x26000, 0x26004,
+ 0x26400, 0x26405, 0x26414, 0x2641d, 0x2642a, 0x26430, 0x26432, 0x26434,
+ 0x26441, 0x2644b, 0x2644d, 0x26463, 0x26466, 0x26468, 0x26478, 0x2647a,
+ 0x26489, 0x2648a, 0x2649c, 0x2649e, 0x264a0, 0x264a6, 0x264c5, 0x264c7,
+ 0x264d6, 0x264d8, 0x264e8, 0x264e9, 0x264f9, 0x264fc, 0x2650b, 0x2650b,
+ 0x2651c, 0x2651e, 0x26540, 0x2654e, 0x26554, 0x26573, 0x26576, 0x2657a,
+ UINT_MAX, UINT_MAX,
+
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_gpucc_registers), 8));
+
+static const u32 gen7_9_0_isense_registers[] = {
+ 0x22c3a, 0x22c3c, 0x22c41, 0x22c41, 0x22c46, 0x22c47, 0x22c4c, 0x22c4c,
+ 0x22c51, 0x22c51, 0x22c56, 0x22c56, 0x22c5b, 0x22c5b, 0x22c60, 0x22c60,
+ 0x22c65, 0x22c65, 0x22c6a, 0x22c70, 0x22c75, 0x22c75, 0x22c7a, 0x22c7a,
+ 0x22c7f, 0x22c7f, 0x22c84, 0x22c85, 0x22c8a, 0x22c8a, 0x22c8f, 0x22c8f,
+ 0x23000, 0x23009, 0x2300e, 0x2300e, 0x23013, 0x23013, 0x23018, 0x23018,
+ 0x2301d, 0x2301d, 0x23022, 0x23022, 0x23027, 0x23032, 0x23037, 0x23037,
+ 0x2303c, 0x2303c, 0x23041, 0x23041, 0x23046, 0x23046, 0x2304b, 0x2304b,
+ 0x23050, 0x23050, 0x23055, 0x23055, 0x2305a, 0x2305a, 0x2305f, 0x2305f,
+ 0x23064, 0x23064, 0x23069, 0x2306a, 0x2306f, 0x2306f, 0x23074, 0x23075,
+ 0x2307a, 0x2307e, 0x23083, 0x23083, 0x23088, 0x23088, 0x2308d, 0x2308d,
+ 0x23092, 0x23092, 0x230e2, 0x230e2,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_isense_registers), 8));
+
+static const u32 gen7_9_0_rscc_registers[] = {
+ 0x14000, 0x14036, 0x14040, 0x14047, 0x14080, 0x14084, 0x14089, 0x1408c,
+ 0x14091, 0x14094, 0x14099, 0x1409c, 0x140a1, 0x140a4, 0x140a9, 0x140ac,
+ 0x14100, 0x14104, 0x14114, 0x14119, 0x14124, 0x14132, 0x14154, 0x1416b,
+ 0x14340, 0x14342, 0x14344, 0x1437c, 0x143f0, 0x143f8, 0x143fa, 0x143fe,
+ 0x14400, 0x14404, 0x14406, 0x1440a, 0x1440c, 0x14410, 0x14412, 0x14416,
+ 0x14418, 0x1441c, 0x1441e, 0x14422, 0x14424, 0x14424, 0x14498, 0x144a0,
+ 0x144a2, 0x144a6, 0x144a8, 0x144ac, 0x144ae, 0x144b2, 0x144b4, 0x144b8,
+ 0x144ba, 0x144be, 0x144c0, 0x144c4, 0x144c6, 0x144ca, 0x144cc, 0x144cc,
+ 0x14540, 0x14548, 0x1454a, 0x1454e, 0x14550, 0x14554, 0x14556, 0x1455a,
+ 0x1455c, 0x14560, 0x14562, 0x14566, 0x14568, 0x1456c, 0x1456e, 0x14572,
+ 0x14574, 0x14574, 0x145e8, 0x145f0, 0x145f2, 0x145f6, 0x145f8, 0x145fc,
+ 0x145fe, 0x14602, 0x14604, 0x14608, 0x1460a, 0x1460e, 0x14610, 0x14614,
+ 0x14616, 0x1461a, 0x1461c, 0x1461c, 0x14690, 0x14698, 0x1469a, 0x1469e,
+ 0x146a0, 0x146a4, 0x146a6, 0x146aa, 0x146ac, 0x146b0, 0x146b2, 0x146b6,
+ 0x146b8, 0x146bc, 0x146be, 0x146c2, 0x146c4, 0x146c4, 0x14738, 0x14740,
+ 0x14742, 0x14746, 0x14748, 0x1474c, 0x1474e, 0x14752, 0x14754, 0x14758,
+ 0x1475a, 0x1475e, 0x14760, 0x14764, 0x14766, 0x1476a, 0x1476c, 0x1476c,
+ 0x147e0, 0x147e8, 0x147ea, 0x147ee, 0x147f0, 0x147f4, 0x147f6, 0x147fa,
+ 0x147fc, 0x14800, 0x14802, 0x14806, 0x14808, 0x1480c, 0x1480e, 0x14812,
+ 0x14814, 0x14814, 0x14888, 0x14890, 0x14892, 0x14896, 0x14898, 0x1489c,
+ 0x1489e, 0x148a2, 0x148a4, 0x148a8, 0x148aa, 0x148ae, 0x148b0, 0x148b4,
+ 0x148b6, 0x148ba, 0x148bc, 0x148bc, 0x14930, 0x14938, 0x1493a, 0x1493e,
+ 0x14940, 0x14944, 0x14946, 0x1494a, 0x1494c, 0x14950, 0x14952, 0x14956,
+ 0x14958, 0x1495c, 0x1495e, 0x14962, 0x14964, 0x14964,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_9_0_rscc_registers), 8));
+
+static const u32 *gen7_9_0_external_core_regs[] = {
+ gen7_9_0_gpucc_registers,
+ gen7_9_0_gxclkctl_registers,
+ gen7_9_0_cpr_registers,
+ gen7_9_0_dpm_registers,
+ gen7_9_0_dpm_leakage_registers,
+ gen7_9_0_gfx_gpu_acd_registers,
+};
+#endif /*_ADRENO_GEN7_9_0_SNAPSHOT_H */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
deleted file mode 100644
index 7067376e25e1..000000000000
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ /dev/null
@@ -1,2803 +0,0 @@
-#ifndef ADRENO_PM4_XML
-#define ADRENO_PM4_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
-http://gitlab.freedesktop.org/mesa/mesa/
-git clone https://gitlab.freedesktop.org/mesa/mesa.git
-
-The rules-ng-ng source files this header was generated from are:
-
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 85856 bytes, from Fri Feb 23 13:07:00 2024)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023)
-*/
-
-#ifdef __KERNEL__
-#include <linux/bug.h>
-#define assert(x) BUG_ON(!(x))
-#else
-#include <assert.h>
-#endif
-
-#ifdef __cplusplus
-#define __struct_cast(X)
-#else
-#define __struct_cast(X) (struct X)
-#endif
-
-enum vgt_event_type {
- VS_DEALLOC = 0,
- PS_DEALLOC = 1,
- VS_DONE_TS = 2,
- PS_DONE_TS = 3,
- CACHE_FLUSH_TS = 4,
- CONTEXT_DONE = 5,
- CACHE_FLUSH = 6,
- VIZQUERY_START = 7,
- HLSQ_FLUSH = 7,
- VIZQUERY_END = 8,
- SC_WAIT_WC = 9,
- WRITE_PRIMITIVE_COUNTS = 9,
- START_PRIMITIVE_CTRS = 11,
- STOP_PRIMITIVE_CTRS = 12,
- RST_PIX_CNT = 13,
- RST_VTX_CNT = 14,
- TILE_FLUSH = 15,
- STAT_EVENT = 16,
- CACHE_FLUSH_AND_INV_TS_EVENT = 20,
- ZPASS_DONE = 21,
- CACHE_FLUSH_AND_INV_EVENT = 22,
- RB_DONE_TS = 22,
- PERFCOUNTER_START = 23,
- PERFCOUNTER_STOP = 24,
- VS_FETCH_DONE = 27,
- FACENESS_FLUSH = 28,
- WT_DONE_TS = 8,
- START_FRAGMENT_CTRS = 13,
- STOP_FRAGMENT_CTRS = 14,
- START_COMPUTE_CTRS = 15,
- STOP_COMPUTE_CTRS = 16,
- FLUSH_SO_0 = 17,
- FLUSH_SO_1 = 18,
- FLUSH_SO_2 = 19,
- FLUSH_SO_3 = 20,
- PC_CCU_INVALIDATE_DEPTH = 24,
- PC_CCU_INVALIDATE_COLOR = 25,
- PC_CCU_RESOLVE_TS = 26,
- PC_CCU_FLUSH_DEPTH_TS = 28,
- PC_CCU_FLUSH_COLOR_TS = 29,
- BLIT = 30,
- LRZ_CLEAR = 37,
- LRZ_FLUSH = 38,
- BLIT_OP_FILL_2D = 39,
- BLIT_OP_COPY_2D = 40,
- UNK_40 = 40,
- BLIT_OP_SCALE_2D = 42,
- CONTEXT_DONE_2D = 43,
- UNK_2C = 44,
- UNK_2D = 45,
- CACHE_INVALIDATE = 49,
- LABEL = 63,
- DUMMY_EVENT = 1,
- CCU_INVALIDATE_DEPTH = 24,
- CCU_INVALIDATE_COLOR = 25,
- CCU_RESOLVE_CLEAN = 26,
- CCU_FLUSH_DEPTH = 28,
- CCU_FLUSH_COLOR = 29,
- CCU_RESOLVE = 30,
- CCU_END_RESOLVE_GROUP = 31,
- CCU_CLEAN_DEPTH = 32,
- CCU_CLEAN_COLOR = 33,
- CACHE_RESET = 48,
- CACHE_CLEAN = 49,
- CACHE_FLUSH7 = 50,
- CACHE_INVALIDATE7 = 51,
-};
-
-enum pc_di_primtype {
- DI_PT_NONE = 0,
- DI_PT_POINTLIST_PSIZE = 1,
- DI_PT_LINELIST = 2,
- DI_PT_LINESTRIP = 3,
- DI_PT_TRILIST = 4,
- DI_PT_TRIFAN = 5,
- DI_PT_TRISTRIP = 6,
- DI_PT_LINELOOP = 7,
- DI_PT_RECTLIST = 8,
- DI_PT_POINTLIST = 9,
- DI_PT_LINE_ADJ = 10,
- DI_PT_LINESTRIP_ADJ = 11,
- DI_PT_TRI_ADJ = 12,
- DI_PT_TRISTRIP_ADJ = 13,
- DI_PT_PATCHES0 = 31,
- DI_PT_PATCHES1 = 32,
- DI_PT_PATCHES2 = 33,
- DI_PT_PATCHES3 = 34,
- DI_PT_PATCHES4 = 35,
- DI_PT_PATCHES5 = 36,
- DI_PT_PATCHES6 = 37,
- DI_PT_PATCHES7 = 38,
- DI_PT_PATCHES8 = 39,
- DI_PT_PATCHES9 = 40,
- DI_PT_PATCHES10 = 41,
- DI_PT_PATCHES11 = 42,
- DI_PT_PATCHES12 = 43,
- DI_PT_PATCHES13 = 44,
- DI_PT_PATCHES14 = 45,
- DI_PT_PATCHES15 = 46,
- DI_PT_PATCHES16 = 47,
- DI_PT_PATCHES17 = 48,
- DI_PT_PATCHES18 = 49,
- DI_PT_PATCHES19 = 50,
- DI_PT_PATCHES20 = 51,
- DI_PT_PATCHES21 = 52,
- DI_PT_PATCHES22 = 53,
- DI_PT_PATCHES23 = 54,
- DI_PT_PATCHES24 = 55,
- DI_PT_PATCHES25 = 56,
- DI_PT_PATCHES26 = 57,
- DI_PT_PATCHES27 = 58,
- DI_PT_PATCHES28 = 59,
- DI_PT_PATCHES29 = 60,
- DI_PT_PATCHES30 = 61,
- DI_PT_PATCHES31 = 62,
-};
-
-enum pc_di_src_sel {
- DI_SRC_SEL_DMA = 0,
- DI_SRC_SEL_IMMEDIATE = 1,
- DI_SRC_SEL_AUTO_INDEX = 2,
- DI_SRC_SEL_AUTO_XFB = 3,
-};
-
-enum pc_di_face_cull_sel {
- DI_FACE_CULL_NONE = 0,
- DI_FACE_CULL_FETCH = 1,
- DI_FACE_BACKFACE_CULL = 2,
- DI_FACE_FRONTFACE_CULL = 3,
-};
-
-enum pc_di_index_size {
- INDEX_SIZE_IGN = 0,
- INDEX_SIZE_16_BIT = 0,
- INDEX_SIZE_32_BIT = 1,
- INDEX_SIZE_8_BIT = 2,
- INDEX_SIZE_INVALID = 0,
-};
-
-enum pc_di_vis_cull_mode {
- IGNORE_VISIBILITY = 0,
- USE_VISIBILITY = 1,
-};
-
-enum adreno_pm4_packet_type {
- CP_TYPE0_PKT = 0x00000000,
- CP_TYPE1_PKT = 0x40000000,
- CP_TYPE2_PKT = 0x80000000,
- CP_TYPE3_PKT = 0xc0000000,
- CP_TYPE4_PKT = 0x40000000,
- CP_TYPE7_PKT = 0x70000000,
-};
-
-enum adreno_pm4_type3_packets {
- CP_ME_INIT = 72,
- CP_NOP = 16,
- CP_PREEMPT_ENABLE = 28,
- CP_PREEMPT_TOKEN = 30,
- CP_INDIRECT_BUFFER = 63,
- CP_INDIRECT_BUFFER_CHAIN = 87,
- CP_INDIRECT_BUFFER_PFD = 55,
- CP_WAIT_FOR_IDLE = 38,
- CP_WAIT_REG_MEM = 60,
- CP_WAIT_REG_EQ = 82,
- CP_WAIT_REG_GTE = 83,
- CP_WAIT_UNTIL_READ = 92,
- CP_WAIT_IB_PFD_COMPLETE = 93,
- CP_REG_RMW = 33,
- CP_SET_BIN_DATA = 47,
- CP_SET_BIN_DATA5 = 47,
- CP_REG_TO_MEM = 62,
- CP_MEM_WRITE = 61,
- CP_MEM_WRITE_CNTR = 79,
- CP_COND_EXEC = 68,
- CP_COND_WRITE = 69,
- CP_COND_WRITE5 = 69,
- CP_EVENT_WRITE = 70,
- CP_EVENT_WRITE7 = 70,
- CP_EVENT_WRITE_SHD = 88,
- CP_EVENT_WRITE_CFL = 89,
- CP_EVENT_WRITE_ZPD = 91,
- CP_RUN_OPENCL = 49,
- CP_DRAW_INDX = 34,
- CP_DRAW_INDX_2 = 54,
- CP_DRAW_INDX_BIN = 52,
- CP_DRAW_INDX_2_BIN = 53,
- CP_VIZ_QUERY = 35,
- CP_SET_STATE = 37,
- CP_SET_CONSTANT = 45,
- CP_IM_LOAD = 39,
- CP_IM_LOAD_IMMEDIATE = 43,
- CP_LOAD_CONSTANT_CONTEXT = 46,
- CP_INVALIDATE_STATE = 59,
- CP_SET_SHADER_BASES = 74,
- CP_SET_BIN_MASK = 80,
- CP_SET_BIN_SELECT = 81,
- CP_CONTEXT_UPDATE = 94,
- CP_INTERRUPT = 64,
- CP_IM_STORE = 44,
- CP_SET_DRAW_INIT_FLAGS = 75,
- CP_SET_PROTECTED_MODE = 95,
- CP_BOOTSTRAP_UCODE = 111,
- CP_LOAD_STATE = 48,
- CP_LOAD_STATE4 = 48,
- CP_COND_INDIRECT_BUFFER_PFE = 58,
- CP_COND_INDIRECT_BUFFER_PFD = 50,
- CP_INDIRECT_BUFFER_PFE = 63,
- CP_SET_BIN = 76,
- CP_TEST_TWO_MEMS = 113,
- CP_REG_WR_NO_CTXT = 120,
- CP_RECORD_PFP_TIMESTAMP = 17,
- CP_SET_SECURE_MODE = 102,
- CP_WAIT_FOR_ME = 19,
- CP_SET_DRAW_STATE = 67,
- CP_DRAW_INDX_OFFSET = 56,
- CP_DRAW_INDIRECT = 40,
- CP_DRAW_INDX_INDIRECT = 41,
- CP_DRAW_INDIRECT_MULTI = 42,
- CP_DRAW_AUTO = 36,
- CP_DRAW_PRED_ENABLE_GLOBAL = 25,
- CP_DRAW_PRED_ENABLE_LOCAL = 26,
- CP_DRAW_PRED_SET = 78,
- CP_WIDE_REG_WRITE = 116,
- CP_SCRATCH_TO_REG = 77,
- CP_REG_TO_SCRATCH = 74,
- CP_WAIT_MEM_WRITES = 18,
- CP_COND_REG_EXEC = 71,
- CP_MEM_TO_REG = 66,
- CP_EXEC_CS_INDIRECT = 65,
- CP_EXEC_CS = 51,
- CP_PERFCOUNTER_ACTION = 80,
- CP_SMMU_TABLE_UPDATE = 83,
- CP_SET_MARKER = 101,
- CP_SET_PSEUDO_REG = 86,
- CP_CONTEXT_REG_BUNCH = 92,
- CP_YIELD_ENABLE = 28,
- CP_SKIP_IB2_ENABLE_GLOBAL = 29,
- CP_SKIP_IB2_ENABLE_LOCAL = 35,
- CP_SET_SUBDRAW_SIZE = 53,
- CP_WHERE_AM_I = 98,
- CP_SET_VISIBILITY_OVERRIDE = 100,
- CP_PREEMPT_ENABLE_GLOBAL = 105,
- CP_PREEMPT_ENABLE_LOCAL = 106,
- CP_CONTEXT_SWITCH_YIELD = 107,
- CP_SET_RENDER_MODE = 108,
- CP_COMPUTE_CHECKPOINT = 110,
- CP_MEM_TO_MEM = 115,
- CP_BLIT = 44,
- CP_REG_TEST = 57,
- CP_SET_MODE = 99,
- CP_LOAD_STATE6_GEOM = 50,
- CP_LOAD_STATE6_FRAG = 52,
- CP_LOAD_STATE6 = 54,
- IN_IB_PREFETCH_END = 23,
- IN_SUBBLK_PREFETCH = 31,
- IN_INSTR_PREFETCH = 32,
- IN_INSTR_MATCH = 71,
- IN_CONST_PREFETCH = 73,
- IN_INCR_UPDT_STATE = 85,
- IN_INCR_UPDT_CONST = 86,
- IN_INCR_UPDT_INSTR = 87,
- PKT4 = 4,
- IN_IB_END = 10,
- IN_GMU_INTERRUPT = 11,
- IN_PREEMPT = 15,
- CP_SCRATCH_WRITE = 76,
- CP_REG_TO_MEM_OFFSET_MEM = 116,
- CP_REG_TO_MEM_OFFSET_REG = 114,
- CP_WAIT_MEM_GTE = 20,
- CP_WAIT_TWO_REGS = 112,
- CP_MEMCPY = 117,
- CP_SET_BIN_DATA5_OFFSET = 46,
- CP_SET_UNK_BIN_DATA = 45,
- CP_CONTEXT_SWITCH = 84,
- CP_SET_CTXSWITCH_IB = 85,
- CP_REG_WRITE = 109,
- CP_START_BIN = 80,
- CP_END_BIN = 81,
- CP_PREEMPT_DISABLE = 108,
- CP_WAIT_TIMESTAMP = 20,
- CP_GLOBAL_TIMESTAMP = 21,
- CP_LOCAL_TIMESTAMP = 22,
- CP_THREAD_CONTROL = 23,
- CP_RESOURCE_LIST = 24,
- CP_BV_BR_COUNT_OPS = 27,
- CP_MODIFY_TIMESTAMP = 28,
- CP_CONTEXT_REG_BUNCH2 = 93,
- CP_MEM_TO_SCRATCH_MEM = 73,
- CP_FIXED_STRIDE_DRAW_TABLE = 127,
- CP_RESET_CONTEXT_STATE = 31,
-};
-
-enum adreno_state_block {
- SB_VERT_TEX = 0,
- SB_VERT_MIPADDR = 1,
- SB_FRAG_TEX = 2,
- SB_FRAG_MIPADDR = 3,
- SB_VERT_SHADER = 4,
- SB_GEOM_SHADER = 5,
- SB_FRAG_SHADER = 6,
- SB_COMPUTE_SHADER = 7,
-};
-
-enum adreno_state_type {
- ST_SHADER = 0,
- ST_CONSTANTS = 1,
-};
-
-enum adreno_state_src {
- SS_DIRECT = 0,
- SS_INVALID_ALL_IC = 2,
- SS_INVALID_PART_IC = 3,
- SS_INDIRECT = 4,
- SS_INDIRECT_TCM = 5,
- SS_INDIRECT_STM = 6,
-};
-
-enum a4xx_state_block {
- SB4_VS_TEX = 0,
- SB4_HS_TEX = 1,
- SB4_DS_TEX = 2,
- SB4_GS_TEX = 3,
- SB4_FS_TEX = 4,
- SB4_CS_TEX = 5,
- SB4_VS_SHADER = 8,
- SB4_HS_SHADER = 9,
- SB4_DS_SHADER = 10,
- SB4_GS_SHADER = 11,
- SB4_FS_SHADER = 12,
- SB4_CS_SHADER = 13,
- SB4_SSBO = 14,
- SB4_CS_SSBO = 15,
-};
-
-enum a4xx_state_type {
- ST4_SHADER = 0,
- ST4_CONSTANTS = 1,
- ST4_UBO = 2,
-};
-
-enum a4xx_state_src {
- SS4_DIRECT = 0,
- SS4_INDIRECT = 2,
-};
-
-enum a6xx_state_block {
- SB6_VS_TEX = 0,
- SB6_HS_TEX = 1,
- SB6_DS_TEX = 2,
- SB6_GS_TEX = 3,
- SB6_FS_TEX = 4,
- SB6_CS_TEX = 5,
- SB6_VS_SHADER = 8,
- SB6_HS_SHADER = 9,
- SB6_DS_SHADER = 10,
- SB6_GS_SHADER = 11,
- SB6_FS_SHADER = 12,
- SB6_CS_SHADER = 13,
- SB6_IBO = 14,
- SB6_CS_IBO = 15,
-};
-
-enum a6xx_state_type {
- ST6_SHADER = 0,
- ST6_CONSTANTS = 1,
- ST6_UBO = 2,
- ST6_IBO = 3,
-};
-
-enum a6xx_state_src {
- SS6_DIRECT = 0,
- SS6_BINDLESS = 1,
- SS6_INDIRECT = 2,
- SS6_UBO = 3,
-};
-
-enum a4xx_index_size {
- INDEX4_SIZE_8_BIT = 0,
- INDEX4_SIZE_16_BIT = 1,
- INDEX4_SIZE_32_BIT = 2,
-};
-
-enum a6xx_patch_type {
- TESS_QUADS = 0,
- TESS_TRIANGLES = 1,
- TESS_ISOLINES = 2,
-};
-
-enum a6xx_draw_indirect_opcode {
- INDIRECT_OP_NORMAL = 2,
- INDIRECT_OP_INDEXED = 4,
- INDIRECT_OP_INDIRECT_COUNT = 6,
- INDIRECT_OP_INDIRECT_COUNT_INDEXED = 7,
-};
-
-enum cp_draw_pred_src {
- PRED_SRC_MEM = 5,
-};
-
-enum cp_draw_pred_test {
- NE_0_PASS = 0,
- EQ_0_PASS = 1,
-};
-
-enum cp_cond_function {
- WRITE_ALWAYS = 0,
- WRITE_LT = 1,
- WRITE_LE = 2,
- WRITE_EQ = 3,
- WRITE_NE = 4,
- WRITE_GE = 5,
- WRITE_GT = 6,
-};
-
-enum poll_memory_type {
- POLL_REGISTER = 0,
- POLL_MEMORY = 1,
- POLL_SCRATCH = 2,
- POLL_ON_CHIP = 3,
-};
-
-enum render_mode_cmd {
- BYPASS = 1,
- BINNING = 2,
- GMEM = 3,
- BLIT2D = 5,
- BLIT2DSCALE = 7,
- END2D = 8,
-};
-
-enum event_write_src {
- EV_WRITE_USER_32B = 0,
- EV_WRITE_USER_64B = 1,
- EV_WRITE_TIMESTAMP_SUM = 2,
- EV_WRITE_ALWAYSON = 3,
- EV_WRITE_REGS_CONTENT = 4,
-};
-
-enum event_write_dst {
- EV_DST_RAM = 0,
- EV_DST_ONCHIP = 1,
-};
-
-enum cp_blit_cmd {
- BLIT_OP_FILL = 0,
- BLIT_OP_COPY = 1,
- BLIT_OP_SCALE = 3,
-};
-
-enum a6xx_marker {
- RM6_BYPASS = 1,
- RM6_BINNING = 2,
- RM6_GMEM = 4,
- RM6_ENDVIS = 5,
- RM6_RESOLVE = 6,
- RM6_YIELD = 7,
- RM6_COMPUTE = 8,
- RM6_BLIT2DSCALE = 12,
- RM6_IB1LIST_START = 13,
- RM6_IB1LIST_END = 14,
- RM6_IFPC_ENABLE = 256,
- RM6_IFPC_DISABLE = 257,
-};
-
-enum pseudo_reg {
- SMMU_INFO = 0,
- NON_SECURE_SAVE_ADDR = 1,
- SECURE_SAVE_ADDR = 2,
- NON_PRIV_SAVE_ADDR = 3,
- COUNTER = 4,
- DRAW_STRM_ADDRESS = 8,
- DRAW_STRM_SIZE_ADDRESS = 9,
- PRIM_STRM_ADDRESS = 10,
- UNK_STRM_ADDRESS = 11,
- UNK_STRM_SIZE_ADDRESS = 12,
- BINDLESS_BASE_0_ADDR = 16,
- BINDLESS_BASE_1_ADDR = 17,
- BINDLESS_BASE_2_ADDR = 18,
- BINDLESS_BASE_3_ADDR = 19,
- BINDLESS_BASE_4_ADDR = 20,
- BINDLESS_BASE_5_ADDR = 21,
- BINDLESS_BASE_6_ADDR = 22,
-};
-
-enum source_type {
- SOURCE_REG = 0,
- SOURCE_SCRATCH_MEM = 1,
-};
-
-enum compare_mode {
- PRED_TEST = 1,
- REG_COMPARE = 2,
- RENDER_MODE = 3,
- REG_COMPARE_IMM = 4,
- THREAD_MODE = 5,
-};
-
-enum ctxswitch_ib {
- RESTORE_IB = 0,
- YIELD_RESTORE_IB = 1,
- SAVE_IB = 2,
- RB_SAVE_IB = 3,
-};
-
-enum reg_tracker {
- TRACK_CNTL_REG = 1,
- TRACK_RENDER_CNTL = 2,
- UNK_EVENT_WRITE = 4,
- TRACK_LRZ = 8,
-};
-
-enum ts_wait_value_src {
- TS_WAIT_GE_32B = 0,
- TS_WAIT_GE_64B = 1,
- TS_WAIT_GE_TIMESTAMP_SUM = 2,
-};
-
-enum ts_wait_type {
- TS_WAIT_RAM = 0,
- TS_WAIT_ONCHIP = 1,
-};
-
-enum pipe_count_op {
- PIPE_CLEAR_BV_BR = 1,
- PIPE_SET_BR_OFFSET = 2,
- PIPE_BR_WAIT_FOR_BV = 3,
- PIPE_BV_WAIT_FOR_BR = 4,
-};
-
-enum timestamp_op {
- MODIFY_TIMESTAMP_CLEAR = 0,
- MODIFY_TIMESTAMP_ADD_GLOBAL = 1,
- MODIFY_TIMESTAMP_ADD_LOCAL = 2,
-};
-
-enum cp_thread {
- CP_SET_THREAD_BR = 1,
- CP_SET_THREAD_BV = 2,
- CP_SET_THREAD_BOTH = 3,
-};
-
-#define REG_CP_LOAD_STATE_0 0x00000000
-#define CP_LOAD_STATE_0_DST_OFF__MASK 0x0000ffff
-#define CP_LOAD_STATE_0_DST_OFF__SHIFT 0
-static inline uint32_t CP_LOAD_STATE_0_DST_OFF(uint32_t val)
-{
- return ((val) << CP_LOAD_STATE_0_DST_OFF__SHIFT) & CP_LOAD_STATE_0_DST_OFF__MASK;
-}
-#define CP_LOAD_STATE_0_STATE_SRC__MASK 0x00070000
-#define CP_LOAD_STATE_0_STATE_SRC__SHIFT 16
-static inline uint32_t CP_LOAD_STATE_0_STATE_SRC(enum adreno_state_src val)
-{
- return ((val) << CP_LOAD_STATE_0_STATE_SRC__SHIFT) & CP_LOAD_STATE_0_STATE_SRC__MASK;
-}
-#define CP_LOAD_STATE_0_STATE_BLOCK__MASK 0x00380000
-#define CP_LOAD_STATE_0_STATE_BLOCK__SHIFT 19
-static inline uint32_t CP_LOAD_STATE_0_STATE_BLOCK(enum adreno_state_block val)
-{
- return ((val) << CP_LOAD_STATE_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE_0_STATE_BLOCK__MASK;
-}
-#define CP_LOAD_STATE_0_NUM_UNIT__MASK 0xffc00000
-#define CP_LOAD_STATE_0_NUM_UNIT__SHIFT 22
-static inline uint32_t CP_LOAD_STATE_0_NUM_UNIT(uint32_t val)
-{
- return ((val) << CP_LOAD_STATE_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE_0_NUM_UNIT__MASK;
-}
-
-#define REG_CP_LOAD_STATE_1 0x00000001
-#define CP_LOAD_STATE_1_STATE_TYPE__MASK 0x00000003
-#define CP_LOAD_STATE_1_STATE_TYPE__SHIFT 0
-static inline uint32_t CP_LOAD_STATE_1_STATE_TYPE(enum adreno_state_type val)
-{
- return ((val) << CP_LOAD_STATE_1_STATE_TYPE__SHIFT) & CP_LOAD_STATE_1_STATE_TYPE__MASK;
-}
-#define CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK 0xfffffffc
-#define CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT 2
-static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK;
-}
-
-#define REG_CP_LOAD_STATE4_0 0x00000000
-#define CP_LOAD_STATE4_0_DST_OFF__MASK 0x00003fff
-#define CP_LOAD_STATE4_0_DST_OFF__SHIFT 0
-static inline uint32_t CP_LOAD_STATE4_0_DST_OFF(uint32_t val)
-{
- return ((val) << CP_LOAD_STATE4_0_DST_OFF__SHIFT) & CP_LOAD_STATE4_0_DST_OFF__MASK;
-}
-#define CP_LOAD_STATE4_0_STATE_SRC__MASK 0x00030000
-#define CP_LOAD_STATE4_0_STATE_SRC__SHIFT 16
-static inline uint32_t CP_LOAD_STATE4_0_STATE_SRC(enum a4xx_state_src val)
-{
- return ((val) << CP_LOAD_STATE4_0_STATE_SRC__SHIFT) & CP_LOAD_STATE4_0_STATE_SRC__MASK;
-}
-#define CP_LOAD_STATE4_0_STATE_BLOCK__MASK 0x003c0000
-#define CP_LOAD_STATE4_0_STATE_BLOCK__SHIFT 18
-static inline uint32_t CP_LOAD_STATE4_0_STATE_BLOCK(enum a4xx_state_block val)
-{
- return ((val) << CP_LOAD_STATE4_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE4_0_STATE_BLOCK__MASK;
-}
-#define CP_LOAD_STATE4_0_NUM_UNIT__MASK 0xffc00000
-#define CP_LOAD_STATE4_0_NUM_UNIT__SHIFT 22
-static inline uint32_t CP_LOAD_STATE4_0_NUM_UNIT(uint32_t val)
-{
- return ((val) << CP_LOAD_STATE4_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE4_0_NUM_UNIT__MASK;
-}
-
-#define REG_CP_LOAD_STATE4_1 0x00000001
-#define CP_LOAD_STATE4_1_STATE_TYPE__MASK 0x00000003
-#define CP_LOAD_STATE4_1_STATE_TYPE__SHIFT 0
-static inline uint32_t CP_LOAD_STATE4_1_STATE_TYPE(enum a4xx_state_type val)
-{
- return ((val) << CP_LOAD_STATE4_1_STATE_TYPE__SHIFT) & CP_LOAD_STATE4_1_STATE_TYPE__MASK;
-}
-#define CP_LOAD_STATE4_1_EXT_SRC_ADDR__MASK 0xfffffffc
-#define CP_LOAD_STATE4_1_EXT_SRC_ADDR__SHIFT 2
-static inline uint32_t CP_LOAD_STATE4_1_EXT_SRC_ADDR(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << CP_LOAD_STATE4_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE4_1_EXT_SRC_ADDR__MASK;
-}
-
-#define REG_CP_LOAD_STATE4_2 0x00000002
-#define CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__MASK 0xffffffff
-#define CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__SHIFT 0
-static inline uint32_t CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI(uint32_t val)
-{
- return ((val) << CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__MASK;
-}
-
-#define REG_CP_LOAD_STATE6_0 0x00000000
-#define CP_LOAD_STATE6_0_DST_OFF__MASK 0x00003fff
-#define CP_LOAD_STATE6_0_DST_OFF__SHIFT 0
-static inline uint32_t CP_LOAD_STATE6_0_DST_OFF(uint32_t val)
-{
- return ((val) << CP_LOAD_STATE6_0_DST_OFF__SHIFT) & CP_LOAD_STATE6_0_DST_OFF__MASK;
-}
-#define CP_LOAD_STATE6_0_STATE_TYPE__MASK 0x0000c000
-#define CP_LOAD_STATE6_0_STATE_TYPE__SHIFT 14
-static inline uint32_t CP_LOAD_STATE6_0_STATE_TYPE(enum a6xx_state_type val)
-{
- return ((val) << CP_LOAD_STATE6_0_STATE_TYPE__SHIFT) & CP_LOAD_STATE6_0_STATE_TYPE__MASK;
-}
-#define CP_LOAD_STATE6_0_STATE_SRC__MASK 0x00030000
-#define CP_LOAD_STATE6_0_STATE_SRC__SHIFT 16
-static inline uint32_t CP_LOAD_STATE6_0_STATE_SRC(enum a6xx_state_src val)
-{
- return ((val) << CP_LOAD_STATE6_0_STATE_SRC__SHIFT) & CP_LOAD_STATE6_0_STATE_SRC__MASK;
-}
-#define CP_LOAD_STATE6_0_STATE_BLOCK__MASK 0x003c0000
-#define CP_LOAD_STATE6_0_STATE_BLOCK__SHIFT 18
-static inline uint32_t CP_LOAD_STATE6_0_STATE_BLOCK(enum a6xx_state_block val)
-{
- return ((val) << CP_LOAD_STATE6_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE6_0_STATE_BLOCK__MASK;
-}
-#define CP_LOAD_STATE6_0_NUM_UNIT__MASK 0xffc00000
-#define CP_LOAD_STATE6_0_NUM_UNIT__SHIFT 22
-static inline uint32_t CP_LOAD_STATE6_0_NUM_UNIT(uint32_t val)
-{
- return ((val) << CP_LOAD_STATE6_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE6_0_NUM_UNIT__MASK;
-}
-
-#define REG_CP_LOAD_STATE6_1 0x00000001
-#define CP_LOAD_STATE6_1_EXT_SRC_ADDR__MASK 0xfffffffc
-#define CP_LOAD_STATE6_1_EXT_SRC_ADDR__SHIFT 2
-static inline uint32_t CP_LOAD_STATE6_1_EXT_SRC_ADDR(uint32_t val)
-{
- assert(!(val & 0x3));
- return (((val >> 2)) << CP_LOAD_STATE6_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE6_1_EXT_SRC_ADDR__MASK;
-}
-
-#define REG_CP_LOAD_STATE6_2 0x00000002
-#define CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__MASK 0xffffffff
-#define CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__SHIFT 0
-static inline uint32_t CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(uint32_t val)
-{
- return ((val) << CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__MASK;
-}
-
-#define REG_CP_LOAD_STATE6_EXT_SRC_ADDR 0x00000001
-
-#define REG_CP_DRAW_INDX_0 0x00000000
-#define CP_DRAW_INDX_0_VIZ_QUERY__MASK 0xffffffff
-#define CP_DRAW_INDX_0_VIZ_QUERY__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_0_VIZ_QUERY(uint32_t val)
-{
- return ((val) << CP_DRAW_INDX_0_VIZ_QUERY__SHIFT) & CP_DRAW_INDX_0_VIZ_QUERY__MASK;
-}
-
-#define REG_CP_DRAW_INDX_1 0x00000001
-#define CP_DRAW_INDX_1_PRIM_TYPE__MASK 0x0000003f
-#define CP_DRAW_INDX_1_PRIM_TYPE__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_1_PRIM_TYPE(enum pc_di_primtype val)
-{
- return ((val) << CP_DRAW_INDX_1_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_1_PRIM_TYPE__MASK;
-}
-#define CP_DRAW_INDX_1_SOURCE_SELECT__MASK 0x000000c0
-#define CP_DRAW_INDX_1_SOURCE_SELECT__SHIFT 6
-static inline uint32_t CP_DRAW_INDX_1_SOURCE_SELECT(enum pc_di_src_sel val)
-{
- return ((val) << CP_DRAW_INDX_1_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_1_SOURCE_SELECT__MASK;
-}
-#define CP_DRAW_INDX_1_VIS_CULL__MASK 0x00000600
-#define CP_DRAW_INDX_1_VIS_CULL__SHIFT 9
-static inline uint32_t CP_DRAW_INDX_1_VIS_CULL(enum pc_di_vis_cull_mode val)
-{
- return ((val) << CP_DRAW_INDX_1_VIS_CULL__SHIFT) & CP_DRAW_INDX_1_VIS_CULL__MASK;
-}
-#define CP_DRAW_INDX_1_INDEX_SIZE__MASK 0x00000800
-#define CP_DRAW_INDX_1_INDEX_SIZE__SHIFT 11
-static inline uint32_t CP_DRAW_INDX_1_INDEX_SIZE(enum pc_di_index_size val)
-{
- return ((val) << CP_DRAW_INDX_1_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_1_INDEX_SIZE__MASK;
-}
-#define CP_DRAW_INDX_1_NOT_EOP 0x00001000
-#define CP_DRAW_INDX_1_SMALL_INDEX 0x00002000
-#define CP_DRAW_INDX_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000
-#define CP_DRAW_INDX_1_NUM_INSTANCES__MASK 0xff000000
-#define CP_DRAW_INDX_1_NUM_INSTANCES__SHIFT 24
-static inline uint32_t CP_DRAW_INDX_1_NUM_INSTANCES(uint32_t val)
-{
- return ((val) << CP_DRAW_INDX_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_1_NUM_INSTANCES__MASK;
-}
-
-#define REG_CP_DRAW_INDX_2 0x00000002
-#define CP_DRAW_INDX_2_NUM_INDICES__MASK 0xffffffff
-#define CP_DRAW_INDX_2_NUM_INDICES__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_2_NUM_INDICES(uint32_t val)
-{
- return ((val) << CP_DRAW_INDX_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_NUM_INDICES__MASK;
-}
-
-#define REG_CP_DRAW_INDX_3 0x00000003
-#define CP_DRAW_INDX_3_INDX_BASE__MASK 0xffffffff
-#define CP_DRAW_INDX_3_INDX_BASE__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_3_INDX_BASE(uint32_t val)
-{
- return ((val) << CP_DRAW_INDX_3_INDX_BASE__SHIFT) & CP_DRAW_INDX_3_INDX_BASE__MASK;
-}
-
-#define REG_CP_DRAW_INDX_4 0x00000004
-#define CP_DRAW_INDX_4_INDX_SIZE__MASK 0xffffffff
-#define CP_DRAW_INDX_4_INDX_SIZE__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_4_INDX_SIZE(uint32_t val)
-{
- return ((val) << CP_DRAW_INDX_4_INDX_SIZE__SHIFT) & CP_DRAW_INDX_4_INDX_SIZE__MASK;
-}
-
-#define REG_CP_DRAW_INDX_2_0 0x00000000
-#define CP_DRAW_INDX_2_0_VIZ_QUERY__MASK 0xffffffff
-#define CP_DRAW_INDX_2_0_VIZ_QUERY__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_2_0_VIZ_QUERY(uint32_t val)
-{
- return ((val) << CP_DRAW_INDX_2_0_VIZ_QUERY__SHIFT) & CP_DRAW_INDX_2_0_VIZ_QUERY__MASK;
-}
-
-#define REG_CP_DRAW_INDX_2_1 0x00000001
-#define CP_DRAW_INDX_2_1_PRIM_TYPE__MASK 0x0000003f
-#define CP_DRAW_INDX_2_1_PRIM_TYPE__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_2_1_PRIM_TYPE(enum pc_di_primtype val)
-{
- return ((val) << CP_DRAW_INDX_2_1_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_2_1_PRIM_TYPE__MASK;
-}
-#define CP_DRAW_INDX_2_1_SOURCE_SELECT__MASK 0x000000c0
-#define CP_DRAW_INDX_2_1_SOURCE_SELECT__SHIFT 6
-static inline uint32_t CP_DRAW_INDX_2_1_SOURCE_SELECT(enum pc_di_src_sel val)
-{
- return ((val) << CP_DRAW_INDX_2_1_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_2_1_SOURCE_SELECT__MASK;
-}
-#define CP_DRAW_INDX_2_1_VIS_CULL__MASK 0x00000600
-#define CP_DRAW_INDX_2_1_VIS_CULL__SHIFT 9
-static inline uint32_t CP_DRAW_INDX_2_1_VIS_CULL(enum pc_di_vis_cull_mode val)
-{
- return ((val) << CP_DRAW_INDX_2_1_VIS_CULL__SHIFT) & CP_DRAW_INDX_2_1_VIS_CULL__MASK;
-}
-#define CP_DRAW_INDX_2_1_INDEX_SIZE__MASK 0x00000800
-#define CP_DRAW_INDX_2_1_INDEX_SIZE__SHIFT 11
-static inline uint32_t CP_DRAW_INDX_2_1_INDEX_SIZE(enum pc_di_index_size val)
-{
- return ((val) << CP_DRAW_INDX_2_1_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_2_1_INDEX_SIZE__MASK;
-}
-#define CP_DRAW_INDX_2_1_NOT_EOP 0x00001000
-#define CP_DRAW_INDX_2_1_SMALL_INDEX 0x00002000
-#define CP_DRAW_INDX_2_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000
-#define CP_DRAW_INDX_2_1_NUM_INSTANCES__MASK 0xff000000
-#define CP_DRAW_INDX_2_1_NUM_INSTANCES__SHIFT 24
-static inline uint32_t CP_DRAW_INDX_2_1_NUM_INSTANCES(uint32_t val)
-{
- return ((val) << CP_DRAW_INDX_2_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_2_1_NUM_INSTANCES__MASK;
-}
-
-#define REG_CP_DRAW_INDX_2_2 0x00000002
-#define CP_DRAW_INDX_2_2_NUM_INDICES__MASK 0xffffffff
-#define CP_DRAW_INDX_2_2_NUM_INDICES__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_2_2_NUM_INDICES(uint32_t val)
-{
- return ((val) << CP_DRAW_INDX_2_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_2_NUM_INDICES__MASK;
-}
-
-#define REG_CP_DRAW_INDX_OFFSET_0 0x00000000
-#define CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__MASK 0x0000003f
-#define CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(enum pc_di_primtype val)
-{
- return ((val) << CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__MASK;
-}
-#define CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK 0x000000c0
-#define CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT 6
-static inline uint32_t CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(enum pc_di_src_sel val)
-{
- return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK;
-}
-#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK 0x00000300
-#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT 8
-static inline uint32_t CP_DRAW_INDX_OFFSET_0_VIS_CULL(enum pc_di_vis_cull_mode val)
-{
- return ((val) << CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT) & CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK;
-}
-#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000c00
-#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 10
-static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum a4xx_index_size val)
-{
- return ((val) << CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK;
-}
-#define CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__MASK 0x00003000
-#define CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__SHIFT 12
-static inline uint32_t CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(enum a6xx_patch_type val)
-{
- return ((val) << CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__SHIFT) & CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__MASK;
-}
-#define CP_DRAW_INDX_OFFSET_0_GS_ENABLE 0x00010000
-#define CP_DRAW_INDX_OFFSET_0_TESS_ENABLE 0x00020000
-
-#define REG_CP_DRAW_INDX_OFFSET_1 0x00000001
-#define CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__MASK 0xffffffff
-#define CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES(uint32_t val)
-{
- return ((val) << CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__MASK;
-}
-
-#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002
-#define CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK 0xffffffff
-#define CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_OFFSET_2_NUM_INDICES(uint32_t val)
-{
- return ((val) << CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK;
-}
-
-#define REG_CP_DRAW_INDX_OFFSET_3 0x00000003
-#define CP_DRAW_INDX_OFFSET_3_FIRST_INDX__MASK 0xffffffff
-#define CP_DRAW_INDX_OFFSET_3_FIRST_INDX__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_OFFSET_3_FIRST_INDX(uint32_t val)
-{
- return ((val) << CP_DRAW_INDX_OFFSET_3_FIRST_INDX__SHIFT) & CP_DRAW_INDX_OFFSET_3_FIRST_INDX__MASK;
-}
-
-#define REG_A5XX_CP_DRAW_INDX_OFFSET_4 0x00000004
-#define A5XX_CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__MASK 0xffffffff
-#define A5XX_CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__SHIFT 0
-static inline uint32_t A5XX_CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO(uint32_t val)
-{
- return ((val) << A5XX_CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__SHIFT) & A5XX_CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__MASK;
-}
-
-#define REG_A5XX_CP_DRAW_INDX_OFFSET_5 0x00000005
-#define A5XX_CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__MASK 0xffffffff
-#define A5XX_CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__SHIFT 0
-static inline uint32_t A5XX_CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI(uint32_t val)
-{
- return ((val) << A5XX_CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__SHIFT) & A5XX_CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__MASK;
-}
-
-#define REG_A5XX_CP_DRAW_INDX_OFFSET_INDX_BASE 0x00000004
-
-#define REG_A5XX_CP_DRAW_INDX_OFFSET_6 0x00000006
-#define A5XX_CP_DRAW_INDX_OFFSET_6_MAX_INDICES__MASK 0xffffffff
-#define A5XX_CP_DRAW_INDX_OFFSET_6_MAX_INDICES__SHIFT 0
-static inline uint32_t A5XX_CP_DRAW_INDX_OFFSET_6_MAX_INDICES(uint32_t val)
-{
- return ((val) << A5XX_CP_DRAW_INDX_OFFSET_6_MAX_INDICES__SHIFT) & A5XX_CP_DRAW_INDX_OFFSET_6_MAX_INDICES__MASK;
-}
-
-#define REG_CP_DRAW_INDX_OFFSET_4 0x00000004
-#define CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK 0xffffffff
-#define CP_DRAW_INDX_OFFSET_4_INDX_BASE__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_OFFSET_4_INDX_BASE(uint64_t val)
-{
- return ((val) << CP_DRAW_INDX_OFFSET_4_INDX_BASE__SHIFT) & CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK;
-}
-
-#define REG_CP_DRAW_INDX_OFFSET_5 0x00000005
-#define CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK 0xffffffff
-#define CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_OFFSET_5_INDX_SIZE(uint32_t val)
-{
- return ((val) << CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK;
-}
-
-#define REG_A4XX_CP_DRAW_INDIRECT_0 0x00000000
-#define A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__MASK 0x0000003f
-#define A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__SHIFT 0
-static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE(enum pc_di_primtype val)
-{
- return ((val) << A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__MASK;
-}
-#define A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__MASK 0x000000c0
-#define A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__SHIFT 6
-static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT(enum pc_di_src_sel val)
-{
- return ((val) << A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__MASK;
-}
-#define A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__MASK 0x00000300
-#define A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__SHIFT 8
-static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_VIS_CULL(enum pc_di_vis_cull_mode val)
-{
- return ((val) << A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__MASK;
-}
-#define A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__MASK 0x00000c00
-#define A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__SHIFT 10
-static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE(enum a4xx_index_size val)
-{
- return ((val) << A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__MASK;
-}
-#define A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__MASK 0x00003000
-#define A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__SHIFT 12
-static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE(enum a6xx_patch_type val)
-{
- return ((val) << A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__MASK;
-}
-#define A4XX_CP_DRAW_INDIRECT_0_GS_ENABLE 0x00010000
-#define A4XX_CP_DRAW_INDIRECT_0_TESS_ENABLE 0x00020000
-
-#define REG_A4XX_CP_DRAW_INDIRECT_1 0x00000001
-#define A4XX_CP_DRAW_INDIRECT_1_INDIRECT__MASK 0xffffffff
-#define A4XX_CP_DRAW_INDIRECT_1_INDIRECT__SHIFT 0
-static inline uint32_t A4XX_CP_DRAW_INDIRECT_1_INDIRECT(uint32_t val)
-{
- return ((val) << A4XX_CP_DRAW_INDIRECT_1_INDIRECT__SHIFT) & A4XX_CP_DRAW_INDIRECT_1_INDIRECT__MASK;
-}
-
-#define REG_A5XX_CP_DRAW_INDIRECT_1 0x00000001
-#define A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__MASK 0xffffffff
-#define A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__SHIFT 0
-static inline uint32_t A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO(uint32_t val)
-{
- return ((val) << A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__SHIFT) & A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__MASK;
-}
-
-#define REG_A5XX_CP_DRAW_INDIRECT_2 0x00000002
-#define A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__MASK 0xffffffff
-#define A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__SHIFT 0
-static inline uint32_t A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI(uint32_t val)
-{
- return ((val) << A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__SHIFT) & A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__MASK;
-}
-
-#define REG_A5XX_CP_DRAW_INDIRECT_INDIRECT 0x00000001
-
-#define REG_A4XX_CP_DRAW_INDX_INDIRECT_0 0x00000000
-#define A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__MASK 0x0000003f
-#define A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__SHIFT 0
-static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE(enum pc_di_primtype val)
-{
- return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__MASK;
-}
-#define A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__MASK 0x000000c0
-#define A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__SHIFT 6
-static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT(enum pc_di_src_sel val)
-{
- return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__MASK;
-}
-#define A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__MASK 0x00000300
-#define A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__SHIFT 8
-static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL(enum pc_di_vis_cull_mode val)
-{
- return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__MASK;
-}
-#define A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__MASK 0x00000c00
-#define A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__SHIFT 10
-static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE(enum a4xx_index_size val)
-{
- return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__MASK;
-}
-#define A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__MASK 0x00003000
-#define A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__SHIFT 12
-static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE(enum a6xx_patch_type val)
-{
- return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__MASK;
-}
-#define A4XX_CP_DRAW_INDX_INDIRECT_0_GS_ENABLE 0x00010000
-#define A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_ENABLE 0x00020000
-
-#define REG_A4XX_CP_DRAW_INDX_INDIRECT_1 0x00000001
-#define A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__MASK 0xffffffff
-#define A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__SHIFT 0
-static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE(uint32_t val)
-{
- return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__MASK;
-}
-
-#define REG_A4XX_CP_DRAW_INDX_INDIRECT_2 0x00000002
-#define A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__MASK 0xffffffff
-#define A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__SHIFT 0
-static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE(uint32_t val)
-{
- return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__MASK;
-}
-
-#define REG_A4XX_CP_DRAW_INDX_INDIRECT_3 0x00000003
-#define A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__MASK 0xffffffff
-#define A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__SHIFT 0
-static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT(uint32_t val)
-{
- return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__MASK;
-}
-
-#define REG_A5XX_CP_DRAW_INDX_INDIRECT_1 0x00000001
-#define A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__MASK 0xffffffff
-#define A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__SHIFT 0
-static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO(uint32_t val)
-{
- return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__MASK;
-}
-
-#define REG_A5XX_CP_DRAW_INDX_INDIRECT_2 0x00000002
-#define A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__MASK 0xffffffff
-#define A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__SHIFT 0
-static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI(uint32_t val)
-{
- return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__MASK;
-}
-
-#define REG_A5XX_CP_DRAW_INDX_INDIRECT_INDX_BASE 0x00000001
-
-#define REG_A5XX_CP_DRAW_INDX_INDIRECT_3 0x00000003
-#define A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__MASK 0xffffffff
-#define A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__SHIFT 0
-static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(uint32_t val)
-{
- return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__MASK;
-}
-
-#define REG_A5XX_CP_DRAW_INDX_INDIRECT_4 0x00000004
-#define A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__MASK 0xffffffff
-#define A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__SHIFT 0
-static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO(uint32_t val)
-{
- return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__MASK;
-}
-
-#define REG_A5XX_CP_DRAW_INDX_INDIRECT_5 0x00000005
-#define A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__MASK 0xffffffff
-#define A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__SHIFT 0
-static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI(uint32_t val)
-{
- return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__MASK;
-}
-
-#define REG_A5XX_CP_DRAW_INDX_INDIRECT_INDIRECT 0x00000004
-
-#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_0 0x00000000
-#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__MASK 0x0000003f
-#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__SHIFT 0
-static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE(enum pc_di_primtype val)
-{
- return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__MASK;
-}
-#define A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__MASK 0x000000c0
-#define A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__SHIFT 6
-static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT(enum pc_di_src_sel val)
-{
- return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__MASK;
-}
-#define A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__MASK 0x00000300
-#define A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__SHIFT 8
-static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL(enum pc_di_vis_cull_mode val)
-{
- return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__MASK;
-}
-#define A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__MASK 0x00000c00
-#define A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__SHIFT 10
-static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE(enum a4xx_index_size val)
-{
- return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__MASK;
-}
-#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__MASK 0x00003000
-#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__SHIFT 12
-static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE(enum a6xx_patch_type val)
-{
- return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__MASK;
-}
-#define A6XX_CP_DRAW_INDIRECT_MULTI_0_GS_ENABLE 0x00010000
-#define A6XX_CP_DRAW_INDIRECT_MULTI_0_TESS_ENABLE 0x00020000
-
-#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_1 0x00000001
-#define A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__MASK 0x0000000f
-#define A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__SHIFT 0
-static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(enum a6xx_draw_indirect_opcode val)
-{
- return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__MASK;
-}
-#define A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__MASK 0x003fff00
-#define A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__SHIFT 8
-static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(uint32_t val)
-{
- return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__MASK;
-}
-
-#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_DRAW_COUNT 0x00000002
-
-#define REG_INDIRECT_OP_NORMAL_CP_DRAW_INDIRECT_MULTI_INDIRECT 0x00000003
-
-#define REG_INDIRECT_OP_NORMAL_CP_DRAW_INDIRECT_MULTI_STRIDE 0x00000005
-
-#define REG_INDIRECT_OP_INDEXED_CP_DRAW_INDIRECT_MULTI_INDEX 0x00000003
-
-#define REG_INDIRECT_OP_INDEXED_CP_DRAW_INDIRECT_MULTI_MAX_INDICES 0x00000005
-
-#define REG_INDIRECT_OP_INDEXED_CP_DRAW_INDIRECT_MULTI_INDIRECT 0x00000006
-
-#define REG_INDIRECT_OP_INDEXED_CP_DRAW_INDIRECT_MULTI_STRIDE 0x00000008
-
-#define REG_INDIRECT_OP_INDIRECT_COUNT_CP_DRAW_INDIRECT_MULTI_INDIRECT 0x00000003
-
-#define REG_INDIRECT_OP_INDIRECT_COUNT_CP_DRAW_INDIRECT_MULTI_INDIRECT_COUNT 0x00000005
-
-#define REG_INDIRECT_OP_INDIRECT_COUNT_CP_DRAW_INDIRECT_MULTI_STRIDE 0x00000007
-
-#define REG_INDIRECT_OP_INDIRECT_COUNT_INDEXED_CP_DRAW_INDIRECT_MULTI_INDEX 0x00000003
-
-#define REG_INDIRECT_OP_INDIRECT_COUNT_INDEXED_CP_DRAW_INDIRECT_MULTI_MAX_INDICES 0x00000005
-
-#define REG_INDIRECT_OP_INDIRECT_COUNT_INDEXED_CP_DRAW_INDIRECT_MULTI_INDIRECT 0x00000006
-
-#define REG_INDIRECT_OP_INDIRECT_COUNT_INDEXED_CP_DRAW_INDIRECT_MULTI_INDIRECT_COUNT 0x00000008
-
-#define REG_INDIRECT_OP_INDIRECT_COUNT_INDEXED_CP_DRAW_INDIRECT_MULTI_STRIDE 0x0000000a
-
-#define REG_CP_DRAW_AUTO_0 0x00000000
-#define CP_DRAW_AUTO_0_PRIM_TYPE__MASK 0x0000003f
-#define CP_DRAW_AUTO_0_PRIM_TYPE__SHIFT 0
-static inline uint32_t CP_DRAW_AUTO_0_PRIM_TYPE(enum pc_di_primtype val)
-{
- return ((val) << CP_DRAW_AUTO_0_PRIM_TYPE__SHIFT) & CP_DRAW_AUTO_0_PRIM_TYPE__MASK;
-}
-#define CP_DRAW_AUTO_0_SOURCE_SELECT__MASK 0x000000c0
-#define CP_DRAW_AUTO_0_SOURCE_SELECT__SHIFT 6
-static inline uint32_t CP_DRAW_AUTO_0_SOURCE_SELECT(enum pc_di_src_sel val)
-{
- return ((val) << CP_DRAW_AUTO_0_SOURCE_SELECT__SHIFT) & CP_DRAW_AUTO_0_SOURCE_SELECT__MASK;
-}
-#define CP_DRAW_AUTO_0_VIS_CULL__MASK 0x00000300
-#define CP_DRAW_AUTO_0_VIS_CULL__SHIFT 8
-static inline uint32_t CP_DRAW_AUTO_0_VIS_CULL(enum pc_di_vis_cull_mode val)
-{
- return ((val) << CP_DRAW_AUTO_0_VIS_CULL__SHIFT) & CP_DRAW_AUTO_0_VIS_CULL__MASK;
-}
-#define CP_DRAW_AUTO_0_INDEX_SIZE__MASK 0x00000c00
-#define CP_DRAW_AUTO_0_INDEX_SIZE__SHIFT 10
-static inline uint32_t CP_DRAW_AUTO_0_INDEX_SIZE(enum a4xx_index_size val)
-{
- return ((val) << CP_DRAW_AUTO_0_INDEX_SIZE__SHIFT) & CP_DRAW_AUTO_0_INDEX_SIZE__MASK;
-}
-#define CP_DRAW_AUTO_0_PATCH_TYPE__MASK 0x00003000
-#define CP_DRAW_AUTO_0_PATCH_TYPE__SHIFT 12
-static inline uint32_t CP_DRAW_AUTO_0_PATCH_TYPE(enum a6xx_patch_type val)
-{
- return ((val) << CP_DRAW_AUTO_0_PATCH_TYPE__SHIFT) & CP_DRAW_AUTO_0_PATCH_TYPE__MASK;
-}
-#define CP_DRAW_AUTO_0_GS_ENABLE 0x00010000
-#define CP_DRAW_AUTO_0_TESS_ENABLE 0x00020000
-
-#define REG_CP_DRAW_AUTO_1 0x00000001
-#define CP_DRAW_AUTO_1_NUM_INSTANCES__MASK 0xffffffff
-#define CP_DRAW_AUTO_1_NUM_INSTANCES__SHIFT 0
-static inline uint32_t CP_DRAW_AUTO_1_NUM_INSTANCES(uint32_t val)
-{
- return ((val) << CP_DRAW_AUTO_1_NUM_INSTANCES__SHIFT) & CP_DRAW_AUTO_1_NUM_INSTANCES__MASK;
-}
-
-#define REG_CP_DRAW_AUTO_NUM_VERTICES_BASE 0x00000002
-
-#define REG_CP_DRAW_AUTO_4 0x00000004
-#define CP_DRAW_AUTO_4_NUM_VERTICES_OFFSET__MASK 0xffffffff
-#define CP_DRAW_AUTO_4_NUM_VERTICES_OFFSET__SHIFT 0
-static inline uint32_t CP_DRAW_AUTO_4_NUM_VERTICES_OFFSET(uint32_t val)
-{
- return ((val) << CP_DRAW_AUTO_4_NUM_VERTICES_OFFSET__SHIFT) & CP_DRAW_AUTO_4_NUM_VERTICES_OFFSET__MASK;
-}
-
-#define REG_CP_DRAW_AUTO_5 0x00000005
-#define CP_DRAW_AUTO_5_STRIDE__MASK 0xffffffff
-#define CP_DRAW_AUTO_5_STRIDE__SHIFT 0
-static inline uint32_t CP_DRAW_AUTO_5_STRIDE(uint32_t val)
-{
- return ((val) << CP_DRAW_AUTO_5_STRIDE__SHIFT) & CP_DRAW_AUTO_5_STRIDE__MASK;
-}
-
-#define REG_CP_DRAW_PRED_ENABLE_GLOBAL_0 0x00000000
-#define CP_DRAW_PRED_ENABLE_GLOBAL_0_ENABLE 0x00000001
-
-#define REG_CP_DRAW_PRED_ENABLE_LOCAL_0 0x00000000
-#define CP_DRAW_PRED_ENABLE_LOCAL_0_ENABLE 0x00000001
-
-#define REG_CP_DRAW_PRED_SET_0 0x00000000
-#define CP_DRAW_PRED_SET_0_SRC__MASK 0x000000f0
-#define CP_DRAW_PRED_SET_0_SRC__SHIFT 4
-static inline uint32_t CP_DRAW_PRED_SET_0_SRC(enum cp_draw_pred_src val)
-{
- return ((val) << CP_DRAW_PRED_SET_0_SRC__SHIFT) & CP_DRAW_PRED_SET_0_SRC__MASK;
-}
-#define CP_DRAW_PRED_SET_0_TEST__MASK 0x00000100
-#define CP_DRAW_PRED_SET_0_TEST__SHIFT 8
-static inline uint32_t CP_DRAW_PRED_SET_0_TEST(enum cp_draw_pred_test val)
-{
- return ((val) << CP_DRAW_PRED_SET_0_TEST__SHIFT) & CP_DRAW_PRED_SET_0_TEST__MASK;
-}
-
-#define REG_CP_DRAW_PRED_SET_MEM_ADDR 0x00000001
-
-#define REG_CP_SET_DRAW_STATE_(i0) (0x00000000 + 0x3*(i0))
-
-static inline uint32_t REG_CP_SET_DRAW_STATE__0(uint32_t i0) { return 0x00000000 + 0x3*i0; }
-#define CP_SET_DRAW_STATE__0_COUNT__MASK 0x0000ffff
-#define CP_SET_DRAW_STATE__0_COUNT__SHIFT 0
-static inline uint32_t CP_SET_DRAW_STATE__0_COUNT(uint32_t val)
-{
- return ((val) << CP_SET_DRAW_STATE__0_COUNT__SHIFT) & CP_SET_DRAW_STATE__0_COUNT__MASK;
-}
-#define CP_SET_DRAW_STATE__0_DIRTY 0x00010000
-#define CP_SET_DRAW_STATE__0_DISABLE 0x00020000
-#define CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS 0x00040000
-#define CP_SET_DRAW_STATE__0_LOAD_IMMED 0x00080000
-#define CP_SET_DRAW_STATE__0_BINNING 0x00100000
-#define CP_SET_DRAW_STATE__0_GMEM 0x00200000
-#define CP_SET_DRAW_STATE__0_SYSMEM 0x00400000
-#define CP_SET_DRAW_STATE__0_GROUP_ID__MASK 0x1f000000
-#define CP_SET_DRAW_STATE__0_GROUP_ID__SHIFT 24
-static inline uint32_t CP_SET_DRAW_STATE__0_GROUP_ID(uint32_t val)
-{
- return ((val) << CP_SET_DRAW_STATE__0_GROUP_ID__SHIFT) & CP_SET_DRAW_STATE__0_GROUP_ID__MASK;
-}
-
-static inline uint32_t REG_CP_SET_DRAW_STATE__1(uint32_t i0) { return 0x00000001 + 0x3*i0; }
-#define CP_SET_DRAW_STATE__1_ADDR_LO__MASK 0xffffffff
-#define CP_SET_DRAW_STATE__1_ADDR_LO__SHIFT 0
-static inline uint32_t CP_SET_DRAW_STATE__1_ADDR_LO(uint32_t val)
-{
- return ((val) << CP_SET_DRAW_STATE__1_ADDR_LO__SHIFT) & CP_SET_DRAW_STATE__1_ADDR_LO__MASK;
-}
-
-static inline uint32_t REG_CP_SET_DRAW_STATE__2(uint32_t i0) { return 0x00000002 + 0x3*i0; }
-#define CP_SET_DRAW_STATE__2_ADDR_HI__MASK 0xffffffff
-#define CP_SET_DRAW_STATE__2_ADDR_HI__SHIFT 0
-static inline uint32_t CP_SET_DRAW_STATE__2_ADDR_HI(uint32_t val)
-{
- return ((val) << CP_SET_DRAW_STATE__2_ADDR_HI__SHIFT) & CP_SET_DRAW_STATE__2_ADDR_HI__MASK;
-}
-
-#define REG_CP_SET_BIN_0 0x00000000
-
-#define REG_CP_SET_BIN_1 0x00000001
-#define CP_SET_BIN_1_X1__MASK 0x0000ffff
-#define CP_SET_BIN_1_X1__SHIFT 0
-static inline uint32_t CP_SET_BIN_1_X1(uint32_t val)
-{
- return ((val) << CP_SET_BIN_1_X1__SHIFT) & CP_SET_BIN_1_X1__MASK;
-}
-#define CP_SET_BIN_1_Y1__MASK 0xffff0000
-#define CP_SET_BIN_1_Y1__SHIFT 16
-static inline uint32_t CP_SET_BIN_1_Y1(uint32_t val)
-{
- return ((val) << CP_SET_BIN_1_Y1__SHIFT) & CP_SET_BIN_1_Y1__MASK;
-}
-
-#define REG_CP_SET_BIN_2 0x00000002
-#define CP_SET_BIN_2_X2__MASK 0x0000ffff
-#define CP_SET_BIN_2_X2__SHIFT 0
-static inline uint32_t CP_SET_BIN_2_X2(uint32_t val)
-{
- return ((val) << CP_SET_BIN_2_X2__SHIFT) & CP_SET_BIN_2_X2__MASK;
-}
-#define CP_SET_BIN_2_Y2__MASK 0xffff0000
-#define CP_SET_BIN_2_Y2__SHIFT 16
-static inline uint32_t CP_SET_BIN_2_Y2(uint32_t val)
-{
- return ((val) << CP_SET_BIN_2_Y2__SHIFT) & CP_SET_BIN_2_Y2__MASK;
-}
-
-#define REG_CP_SET_BIN_DATA_0 0x00000000
-#define CP_SET_BIN_DATA_0_BIN_DATA_ADDR__MASK 0xffffffff
-#define CP_SET_BIN_DATA_0_BIN_DATA_ADDR__SHIFT 0
-static inline uint32_t CP_SET_BIN_DATA_0_BIN_DATA_ADDR(uint32_t val)
-{
- return ((val) << CP_SET_BIN_DATA_0_BIN_DATA_ADDR__SHIFT) & CP_SET_BIN_DATA_0_BIN_DATA_ADDR__MASK;
-}
-
-#define REG_CP_SET_BIN_DATA_1 0x00000001
-#define CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK 0xffffffff
-#define CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT 0
-static inline uint32_t CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS(uint32_t val)
-{
- return ((val) << CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT) & CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK;
-}
-
-#define REG_CP_SET_BIN_DATA5_0 0x00000000
-#define CP_SET_BIN_DATA5_0_VSC_SIZE__MASK 0x003f0000
-#define CP_SET_BIN_DATA5_0_VSC_SIZE__SHIFT 16
-static inline uint32_t CP_SET_BIN_DATA5_0_VSC_SIZE(uint32_t val)
-{
- return ((val) << CP_SET_BIN_DATA5_0_VSC_SIZE__SHIFT) & CP_SET_BIN_DATA5_0_VSC_SIZE__MASK;
-}
-#define CP_SET_BIN_DATA5_0_VSC_N__MASK 0x07c00000
-#define CP_SET_BIN_DATA5_0_VSC_N__SHIFT 22
-static inline uint32_t CP_SET_BIN_DATA5_0_VSC_N(uint32_t val)
-{
- return ((val) << CP_SET_BIN_DATA5_0_VSC_N__SHIFT) & CP_SET_BIN_DATA5_0_VSC_N__MASK;
-}
-
-#define REG_CP_SET_BIN_DATA5_1 0x00000001
-#define CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__MASK 0xffffffff
-#define CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__SHIFT 0
-static inline uint32_t CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO(uint32_t val)
-{
- return ((val) << CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__SHIFT) & CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__MASK;
-}
-
-#define REG_CP_SET_BIN_DATA5_2 0x00000002
-#define CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__MASK 0xffffffff
-#define CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__SHIFT 0
-static inline uint32_t CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI(uint32_t val)
-{
- return ((val) << CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__SHIFT) & CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__MASK;
-}
-
-#define REG_CP_SET_BIN_DATA5_3 0x00000003
-#define CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__MASK 0xffffffff
-#define CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__SHIFT 0
-static inline uint32_t CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO(uint32_t val)
-{
- return ((val) << CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__SHIFT) & CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__MASK;
-}
-
-#define REG_CP_SET_BIN_DATA5_4 0x00000004
-#define CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__MASK 0xffffffff
-#define CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__SHIFT 0
-static inline uint32_t CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI(uint32_t val)
-{
- return ((val) << CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__SHIFT) & CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__MASK;
-}
-
-#define REG_CP_SET_BIN_DATA5_5 0x00000005
-#define CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__MASK 0xffffffff
-#define CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__SHIFT 0
-static inline uint32_t CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO(uint32_t val)
-{
- return ((val) << CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__SHIFT) & CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__MASK;
-}
-
-#define REG_CP_SET_BIN_DATA5_6 0x00000006
-#define CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__MASK 0xffffffff
-#define CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__SHIFT 0
-static inline uint32_t CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI(uint32_t val)
-{
- return ((val) << CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__SHIFT) & CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__MASK;
-}
-
-#define REG_CP_SET_BIN_DATA5_7 0x00000007
-
-#define REG_CP_SET_BIN_DATA5_9 0x00000009
-
-#define REG_CP_SET_BIN_DATA5_OFFSET_0 0x00000000
-#define CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__MASK 0x003f0000
-#define CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__SHIFT 16
-static inline uint32_t CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE(uint32_t val)
-{
- return ((val) << CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__SHIFT) & CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__MASK;
-}
-#define CP_SET_BIN_DATA5_OFFSET_0_VSC_N__MASK 0x07c00000
-#define CP_SET_BIN_DATA5_OFFSET_0_VSC_N__SHIFT 22
-static inline uint32_t CP_SET_BIN_DATA5_OFFSET_0_VSC_N(uint32_t val)
-{
- return ((val) << CP_SET_BIN_DATA5_OFFSET_0_VSC_N__SHIFT) & CP_SET_BIN_DATA5_OFFSET_0_VSC_N__MASK;
-}
-
-#define REG_CP_SET_BIN_DATA5_OFFSET_1 0x00000001
-#define CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__MASK 0xffffffff
-#define CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__SHIFT 0
-static inline uint32_t CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET(uint32_t val)
-{
- return ((val) << CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__SHIFT) & CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__MASK;
-}
-
-#define REG_CP_SET_BIN_DATA5_OFFSET_2 0x00000002
-#define CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__MASK 0xffffffff
-#define CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__SHIFT 0
-static inline uint32_t CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET(uint32_t val)
-{
- return ((val) << CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__SHIFT) & CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__MASK;
-}
-
-#define REG_CP_SET_BIN_DATA5_OFFSET_3 0x00000003
-#define CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__MASK 0xffffffff
-#define CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__SHIFT 0
-static inline uint32_t CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET(uint32_t val)
-{
- return ((val) << CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__SHIFT) & CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__MASK;
-}
-
-#define REG_CP_REG_RMW_0 0x00000000
-#define CP_REG_RMW_0_DST_REG__MASK 0x0003ffff
-#define CP_REG_RMW_0_DST_REG__SHIFT 0
-static inline uint32_t CP_REG_RMW_0_DST_REG(uint32_t val)
-{
- return ((val) << CP_REG_RMW_0_DST_REG__SHIFT) & CP_REG_RMW_0_DST_REG__MASK;
-}
-#define CP_REG_RMW_0_ROTATE__MASK 0x1f000000
-#define CP_REG_RMW_0_ROTATE__SHIFT 24
-static inline uint32_t CP_REG_RMW_0_ROTATE(uint32_t val)
-{
- return ((val) << CP_REG_RMW_0_ROTATE__SHIFT) & CP_REG_RMW_0_ROTATE__MASK;
-}
-#define CP_REG_RMW_0_SRC1_ADD 0x20000000
-#define CP_REG_RMW_0_SRC1_IS_REG 0x40000000
-#define CP_REG_RMW_0_SRC0_IS_REG 0x80000000
-
-#define REG_CP_REG_RMW_1 0x00000001
-#define CP_REG_RMW_1_SRC0__MASK 0xffffffff
-#define CP_REG_RMW_1_SRC0__SHIFT 0
-static inline uint32_t CP_REG_RMW_1_SRC0(uint32_t val)
-{
- return ((val) << CP_REG_RMW_1_SRC0__SHIFT) & CP_REG_RMW_1_SRC0__MASK;
-}
-
-#define REG_CP_REG_RMW_2 0x00000002
-#define CP_REG_RMW_2_SRC1__MASK 0xffffffff
-#define CP_REG_RMW_2_SRC1__SHIFT 0
-static inline uint32_t CP_REG_RMW_2_SRC1(uint32_t val)
-{
- return ((val) << CP_REG_RMW_2_SRC1__SHIFT) & CP_REG_RMW_2_SRC1__MASK;
-}
-
-#define REG_CP_REG_TO_MEM_0 0x00000000
-#define CP_REG_TO_MEM_0_REG__MASK 0x0003ffff
-#define CP_REG_TO_MEM_0_REG__SHIFT 0
-static inline uint32_t CP_REG_TO_MEM_0_REG(uint32_t val)
-{
- return ((val) << CP_REG_TO_MEM_0_REG__SHIFT) & CP_REG_TO_MEM_0_REG__MASK;
-}
-#define CP_REG_TO_MEM_0_CNT__MASK 0x3ffc0000
-#define CP_REG_TO_MEM_0_CNT__SHIFT 18
-static inline uint32_t CP_REG_TO_MEM_0_CNT(uint32_t val)
-{
- return ((val) << CP_REG_TO_MEM_0_CNT__SHIFT) & CP_REG_TO_MEM_0_CNT__MASK;
-}
-#define CP_REG_TO_MEM_0_64B 0x40000000
-#define CP_REG_TO_MEM_0_ACCUMULATE 0x80000000
-
-#define REG_CP_REG_TO_MEM_1 0x00000001
-#define CP_REG_TO_MEM_1_DEST__MASK 0xffffffff
-#define CP_REG_TO_MEM_1_DEST__SHIFT 0
-static inline uint32_t CP_REG_TO_MEM_1_DEST(uint32_t val)
-{
- return ((val) << CP_REG_TO_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_1_DEST__MASK;
-}
-
-#define REG_CP_REG_TO_MEM_2 0x00000002
-#define CP_REG_TO_MEM_2_DEST_HI__MASK 0xffffffff
-#define CP_REG_TO_MEM_2_DEST_HI__SHIFT 0
-static inline uint32_t CP_REG_TO_MEM_2_DEST_HI(uint32_t val)
-{
- return ((val) << CP_REG_TO_MEM_2_DEST_HI__SHIFT) & CP_REG_TO_MEM_2_DEST_HI__MASK;
-}
-
-#define REG_CP_REG_TO_MEM_OFFSET_REG_0 0x00000000
-#define CP_REG_TO_MEM_OFFSET_REG_0_REG__MASK 0x0003ffff
-#define CP_REG_TO_MEM_OFFSET_REG_0_REG__SHIFT 0
-static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_0_REG(uint32_t val)
-{
- return ((val) << CP_REG_TO_MEM_OFFSET_REG_0_REG__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_0_REG__MASK;
-}
-#define CP_REG_TO_MEM_OFFSET_REG_0_CNT__MASK 0x3ffc0000
-#define CP_REG_TO_MEM_OFFSET_REG_0_CNT__SHIFT 18
-static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_0_CNT(uint32_t val)
-{
- return ((val) << CP_REG_TO_MEM_OFFSET_REG_0_CNT__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_0_CNT__MASK;
-}
-#define CP_REG_TO_MEM_OFFSET_REG_0_64B 0x40000000
-#define CP_REG_TO_MEM_OFFSET_REG_0_ACCUMULATE 0x80000000
-
-#define REG_CP_REG_TO_MEM_OFFSET_REG_1 0x00000001
-#define CP_REG_TO_MEM_OFFSET_REG_1_DEST__MASK 0xffffffff
-#define CP_REG_TO_MEM_OFFSET_REG_1_DEST__SHIFT 0
-static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_1_DEST(uint32_t val)
-{
- return ((val) << CP_REG_TO_MEM_OFFSET_REG_1_DEST__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_1_DEST__MASK;
-}
-
-#define REG_CP_REG_TO_MEM_OFFSET_REG_2 0x00000002
-#define CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__MASK 0xffffffff
-#define CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__SHIFT 0
-static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI(uint32_t val)
-{
- return ((val) << CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__MASK;
-}
-
-#define REG_CP_REG_TO_MEM_OFFSET_REG_3 0x00000003
-#define CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__MASK 0x0003ffff
-#define CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__SHIFT 0
-static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0(uint32_t val)
-{
- return ((val) << CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__MASK;
-}
-#define CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0_SCRATCH 0x00080000
-
-#define REG_CP_REG_TO_MEM_OFFSET_MEM_0 0x00000000
-#define CP_REG_TO_MEM_OFFSET_MEM_0_REG__MASK 0x0003ffff
-#define CP_REG_TO_MEM_OFFSET_MEM_0_REG__SHIFT 0
-static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_0_REG(uint32_t val)
-{
- return ((val) << CP_REG_TO_MEM_OFFSET_MEM_0_REG__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_0_REG__MASK;
-}
-#define CP_REG_TO_MEM_OFFSET_MEM_0_CNT__MASK 0x3ffc0000
-#define CP_REG_TO_MEM_OFFSET_MEM_0_CNT__SHIFT 18
-static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_0_CNT(uint32_t val)
-{
- return ((val) << CP_REG_TO_MEM_OFFSET_MEM_0_CNT__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_0_CNT__MASK;
-}
-#define CP_REG_TO_MEM_OFFSET_MEM_0_64B 0x40000000
-#define CP_REG_TO_MEM_OFFSET_MEM_0_ACCUMULATE 0x80000000
-
-#define REG_CP_REG_TO_MEM_OFFSET_MEM_1 0x00000001
-#define CP_REG_TO_MEM_OFFSET_MEM_1_DEST__MASK 0xffffffff
-#define CP_REG_TO_MEM_OFFSET_MEM_1_DEST__SHIFT 0
-static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_1_DEST(uint32_t val)
-{
- return ((val) << CP_REG_TO_MEM_OFFSET_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_1_DEST__MASK;
-}
-
-#define REG_CP_REG_TO_MEM_OFFSET_MEM_2 0x00000002
-#define CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__MASK 0xffffffff
-#define CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__SHIFT 0
-static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI(uint32_t val)
-{
- return ((val) << CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__MASK;
-}
-
-#define REG_CP_REG_TO_MEM_OFFSET_MEM_3 0x00000003
-#define CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__MASK 0xffffffff
-#define CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__SHIFT 0
-static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO(uint32_t val)
-{
- return ((val) << CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__MASK;
-}
-
-#define REG_CP_REG_TO_MEM_OFFSET_MEM_4 0x00000004
-#define CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__MASK 0xffffffff
-#define CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__SHIFT 0
-static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI(uint32_t val)
-{
- return ((val) << CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__MASK;
-}
-
-#define REG_CP_MEM_TO_REG_0 0x00000000
-#define CP_MEM_TO_REG_0_REG__MASK 0x0003ffff
-#define CP_MEM_TO_REG_0_REG__SHIFT 0
-static inline uint32_t CP_MEM_TO_REG_0_REG(uint32_t val)
-{
- return ((val) << CP_MEM_TO_REG_0_REG__SHIFT) & CP_MEM_TO_REG_0_REG__MASK;
-}
-#define CP_MEM_TO_REG_0_CNT__MASK 0x3ff80000
-#define CP_MEM_TO_REG_0_CNT__SHIFT 19
-static inline uint32_t CP_MEM_TO_REG_0_CNT(uint32_t val)
-{
- return ((val) << CP_MEM_TO_REG_0_CNT__SHIFT) & CP_MEM_TO_REG_0_CNT__MASK;
-}
-#define CP_MEM_TO_REG_0_SHIFT_BY_2 0x40000000
-#define CP_MEM_TO_REG_0_UNK31 0x80000000
-
-#define REG_CP_MEM_TO_REG_1 0x00000001
-#define CP_MEM_TO_REG_1_SRC__MASK 0xffffffff
-#define CP_MEM_TO_REG_1_SRC__SHIFT 0
-static inline uint32_t CP_MEM_TO_REG_1_SRC(uint32_t val)
-{
- return ((val) << CP_MEM_TO_REG_1_SRC__SHIFT) & CP_MEM_TO_REG_1_SRC__MASK;
-}
-
-#define REG_CP_MEM_TO_REG_2 0x00000002
-#define CP_MEM_TO_REG_2_SRC_HI__MASK 0xffffffff
-#define CP_MEM_TO_REG_2_SRC_HI__SHIFT 0
-static inline uint32_t CP_MEM_TO_REG_2_SRC_HI(uint32_t val)
-{
- return ((val) << CP_MEM_TO_REG_2_SRC_HI__SHIFT) & CP_MEM_TO_REG_2_SRC_HI__MASK;
-}
-
-#define REG_CP_MEM_TO_MEM_0 0x00000000
-#define CP_MEM_TO_MEM_0_NEG_A 0x00000001
-#define CP_MEM_TO_MEM_0_NEG_B 0x00000002
-#define CP_MEM_TO_MEM_0_NEG_C 0x00000004
-#define CP_MEM_TO_MEM_0_DOUBLE 0x20000000
-#define CP_MEM_TO_MEM_0_WAIT_FOR_MEM_WRITES 0x40000000
-#define CP_MEM_TO_MEM_0_UNK31 0x80000000
-
-#define REG_CP_MEMCPY_0 0x00000000
-#define CP_MEMCPY_0_DWORDS__MASK 0xffffffff
-#define CP_MEMCPY_0_DWORDS__SHIFT 0
-static inline uint32_t CP_MEMCPY_0_DWORDS(uint32_t val)
-{
- return ((val) << CP_MEMCPY_0_DWORDS__SHIFT) & CP_MEMCPY_0_DWORDS__MASK;
-}
-
-#define REG_CP_MEMCPY_1 0x00000001
-#define CP_MEMCPY_1_SRC_LO__MASK 0xffffffff
-#define CP_MEMCPY_1_SRC_LO__SHIFT 0
-static inline uint32_t CP_MEMCPY_1_SRC_LO(uint32_t val)
-{
- return ((val) << CP_MEMCPY_1_SRC_LO__SHIFT) & CP_MEMCPY_1_SRC_LO__MASK;
-}
-
-#define REG_CP_MEMCPY_2 0x00000002
-#define CP_MEMCPY_2_SRC_HI__MASK 0xffffffff
-#define CP_MEMCPY_2_SRC_HI__SHIFT 0
-static inline uint32_t CP_MEMCPY_2_SRC_HI(uint32_t val)
-{
- return ((val) << CP_MEMCPY_2_SRC_HI__SHIFT) & CP_MEMCPY_2_SRC_HI__MASK;
-}
-
-#define REG_CP_MEMCPY_3 0x00000003
-#define CP_MEMCPY_3_DST_LO__MASK 0xffffffff
-#define CP_MEMCPY_3_DST_LO__SHIFT 0
-static inline uint32_t CP_MEMCPY_3_DST_LO(uint32_t val)
-{
- return ((val) << CP_MEMCPY_3_DST_LO__SHIFT) & CP_MEMCPY_3_DST_LO__MASK;
-}
-
-#define REG_CP_MEMCPY_4 0x00000004
-#define CP_MEMCPY_4_DST_HI__MASK 0xffffffff
-#define CP_MEMCPY_4_DST_HI__SHIFT 0
-static inline uint32_t CP_MEMCPY_4_DST_HI(uint32_t val)
-{
- return ((val) << CP_MEMCPY_4_DST_HI__SHIFT) & CP_MEMCPY_4_DST_HI__MASK;
-}
-
-#define REG_CP_REG_TO_SCRATCH_0 0x00000000
-#define CP_REG_TO_SCRATCH_0_REG__MASK 0x0003ffff
-#define CP_REG_TO_SCRATCH_0_REG__SHIFT 0
-static inline uint32_t CP_REG_TO_SCRATCH_0_REG(uint32_t val)
-{
- return ((val) << CP_REG_TO_SCRATCH_0_REG__SHIFT) & CP_REG_TO_SCRATCH_0_REG__MASK;
-}
-#define CP_REG_TO_SCRATCH_0_SCRATCH__MASK 0x00700000
-#define CP_REG_TO_SCRATCH_0_SCRATCH__SHIFT 20
-static inline uint32_t CP_REG_TO_SCRATCH_0_SCRATCH(uint32_t val)
-{
- return ((val) << CP_REG_TO_SCRATCH_0_SCRATCH__SHIFT) & CP_REG_TO_SCRATCH_0_SCRATCH__MASK;
-}
-#define CP_REG_TO_SCRATCH_0_CNT__MASK 0x07000000
-#define CP_REG_TO_SCRATCH_0_CNT__SHIFT 24
-static inline uint32_t CP_REG_TO_SCRATCH_0_CNT(uint32_t val)
-{
- return ((val) << CP_REG_TO_SCRATCH_0_CNT__SHIFT) & CP_REG_TO_SCRATCH_0_CNT__MASK;
-}
-
-#define REG_CP_SCRATCH_TO_REG_0 0x00000000
-#define CP_SCRATCH_TO_REG_0_REG__MASK 0x0003ffff
-#define CP_SCRATCH_TO_REG_0_REG__SHIFT 0
-static inline uint32_t CP_SCRATCH_TO_REG_0_REG(uint32_t val)
-{
- return ((val) << CP_SCRATCH_TO_REG_0_REG__SHIFT) & CP_SCRATCH_TO_REG_0_REG__MASK;
-}
-#define CP_SCRATCH_TO_REG_0_UNK18 0x00040000
-#define CP_SCRATCH_TO_REG_0_SCRATCH__MASK 0x00700000
-#define CP_SCRATCH_TO_REG_0_SCRATCH__SHIFT 20
-static inline uint32_t CP_SCRATCH_TO_REG_0_SCRATCH(uint32_t val)
-{
- return ((val) << CP_SCRATCH_TO_REG_0_SCRATCH__SHIFT) & CP_SCRATCH_TO_REG_0_SCRATCH__MASK;
-}
-#define CP_SCRATCH_TO_REG_0_CNT__MASK 0x07000000
-#define CP_SCRATCH_TO_REG_0_CNT__SHIFT 24
-static inline uint32_t CP_SCRATCH_TO_REG_0_CNT(uint32_t val)
-{
- return ((val) << CP_SCRATCH_TO_REG_0_CNT__SHIFT) & CP_SCRATCH_TO_REG_0_CNT__MASK;
-}
-
-#define REG_CP_SCRATCH_WRITE_0 0x00000000
-#define CP_SCRATCH_WRITE_0_SCRATCH__MASK 0x00700000
-#define CP_SCRATCH_WRITE_0_SCRATCH__SHIFT 20
-static inline uint32_t CP_SCRATCH_WRITE_0_SCRATCH(uint32_t val)
-{
- return ((val) << CP_SCRATCH_WRITE_0_SCRATCH__SHIFT) & CP_SCRATCH_WRITE_0_SCRATCH__MASK;
-}
-
-#define REG_CP_MEM_WRITE_0 0x00000000
-#define CP_MEM_WRITE_0_ADDR_LO__MASK 0xffffffff
-#define CP_MEM_WRITE_0_ADDR_LO__SHIFT 0
-static inline uint32_t CP_MEM_WRITE_0_ADDR_LO(uint32_t val)
-{
- return ((val) << CP_MEM_WRITE_0_ADDR_LO__SHIFT) & CP_MEM_WRITE_0_ADDR_LO__MASK;
-}
-
-#define REG_CP_MEM_WRITE_1 0x00000001
-#define CP_MEM_WRITE_1_ADDR_HI__MASK 0xffffffff
-#define CP_MEM_WRITE_1_ADDR_HI__SHIFT 0
-static inline uint32_t CP_MEM_WRITE_1_ADDR_HI(uint32_t val)
-{
- return ((val) << CP_MEM_WRITE_1_ADDR_HI__SHIFT) & CP_MEM_WRITE_1_ADDR_HI__MASK;
-}
-
-#define REG_CP_COND_WRITE_0 0x00000000
-#define CP_COND_WRITE_0_FUNCTION__MASK 0x00000007
-#define CP_COND_WRITE_0_FUNCTION__SHIFT 0
-static inline uint32_t CP_COND_WRITE_0_FUNCTION(enum cp_cond_function val)
-{
- return ((val) << CP_COND_WRITE_0_FUNCTION__SHIFT) & CP_COND_WRITE_0_FUNCTION__MASK;
-}
-#define CP_COND_WRITE_0_POLL_MEMORY 0x00000010
-#define CP_COND_WRITE_0_WRITE_MEMORY 0x00000100
-
-#define REG_CP_COND_WRITE_1 0x00000001
-#define CP_COND_WRITE_1_POLL_ADDR__MASK 0xffffffff
-#define CP_COND_WRITE_1_POLL_ADDR__SHIFT 0
-static inline uint32_t CP_COND_WRITE_1_POLL_ADDR(uint32_t val)
-{
- return ((val) << CP_COND_WRITE_1_POLL_ADDR__SHIFT) & CP_COND_WRITE_1_POLL_ADDR__MASK;
-}
-
-#define REG_CP_COND_WRITE_2 0x00000002
-#define CP_COND_WRITE_2_REF__MASK 0xffffffff
-#define CP_COND_WRITE_2_REF__SHIFT 0
-static inline uint32_t CP_COND_WRITE_2_REF(uint32_t val)
-{
- return ((val) << CP_COND_WRITE_2_REF__SHIFT) & CP_COND_WRITE_2_REF__MASK;
-}
-
-#define REG_CP_COND_WRITE_3 0x00000003
-#define CP_COND_WRITE_3_MASK__MASK 0xffffffff
-#define CP_COND_WRITE_3_MASK__SHIFT 0
-static inline uint32_t CP_COND_WRITE_3_MASK(uint32_t val)
-{
- return ((val) << CP_COND_WRITE_3_MASK__SHIFT) & CP_COND_WRITE_3_MASK__MASK;
-}
-
-#define REG_CP_COND_WRITE_4 0x00000004
-#define CP_COND_WRITE_4_WRITE_ADDR__MASK 0xffffffff
-#define CP_COND_WRITE_4_WRITE_ADDR__SHIFT 0
-static inline uint32_t CP_COND_WRITE_4_WRITE_ADDR(uint32_t val)
-{
- return ((val) << CP_COND_WRITE_4_WRITE_ADDR__SHIFT) & CP_COND_WRITE_4_WRITE_ADDR__MASK;
-}
-
-#define REG_CP_COND_WRITE_5 0x00000005
-#define CP_COND_WRITE_5_WRITE_DATA__MASK 0xffffffff
-#define CP_COND_WRITE_5_WRITE_DATA__SHIFT 0
-static inline uint32_t CP_COND_WRITE_5_WRITE_DATA(uint32_t val)
-{
- return ((val) << CP_COND_WRITE_5_WRITE_DATA__SHIFT) & CP_COND_WRITE_5_WRITE_DATA__MASK;
-}
-
-#define REG_CP_COND_WRITE5_0 0x00000000
-#define CP_COND_WRITE5_0_FUNCTION__MASK 0x00000007
-#define CP_COND_WRITE5_0_FUNCTION__SHIFT 0
-static inline uint32_t CP_COND_WRITE5_0_FUNCTION(enum cp_cond_function val)
-{
- return ((val) << CP_COND_WRITE5_0_FUNCTION__SHIFT) & CP_COND_WRITE5_0_FUNCTION__MASK;
-}
-#define CP_COND_WRITE5_0_SIGNED_COMPARE 0x00000008
-#define CP_COND_WRITE5_0_POLL__MASK 0x00000030
-#define CP_COND_WRITE5_0_POLL__SHIFT 4
-static inline uint32_t CP_COND_WRITE5_0_POLL(enum poll_memory_type val)
-{
- return ((val) << CP_COND_WRITE5_0_POLL__SHIFT) & CP_COND_WRITE5_0_POLL__MASK;
-}
-#define CP_COND_WRITE5_0_WRITE_MEMORY 0x00000100
-
-#define REG_CP_COND_WRITE5_1 0x00000001
-#define CP_COND_WRITE5_1_POLL_ADDR_LO__MASK 0xffffffff
-#define CP_COND_WRITE5_1_POLL_ADDR_LO__SHIFT 0
-static inline uint32_t CP_COND_WRITE5_1_POLL_ADDR_LO(uint32_t val)
-{
- return ((val) << CP_COND_WRITE5_1_POLL_ADDR_LO__SHIFT) & CP_COND_WRITE5_1_POLL_ADDR_LO__MASK;
-}
-
-#define REG_CP_COND_WRITE5_2 0x00000002
-#define CP_COND_WRITE5_2_POLL_ADDR_HI__MASK 0xffffffff
-#define CP_COND_WRITE5_2_POLL_ADDR_HI__SHIFT 0
-static inline uint32_t CP_COND_WRITE5_2_POLL_ADDR_HI(uint32_t val)
-{
- return ((val) << CP_COND_WRITE5_2_POLL_ADDR_HI__SHIFT) & CP_COND_WRITE5_2_POLL_ADDR_HI__MASK;
-}
-
-#define REG_CP_COND_WRITE5_3 0x00000003
-#define CP_COND_WRITE5_3_REF__MASK 0xffffffff
-#define CP_COND_WRITE5_3_REF__SHIFT 0
-static inline uint32_t CP_COND_WRITE5_3_REF(uint32_t val)
-{
- return ((val) << CP_COND_WRITE5_3_REF__SHIFT) & CP_COND_WRITE5_3_REF__MASK;
-}
-
-#define REG_CP_COND_WRITE5_4 0x00000004
-#define CP_COND_WRITE5_4_MASK__MASK 0xffffffff
-#define CP_COND_WRITE5_4_MASK__SHIFT 0
-static inline uint32_t CP_COND_WRITE5_4_MASK(uint32_t val)
-{
- return ((val) << CP_COND_WRITE5_4_MASK__SHIFT) & CP_COND_WRITE5_4_MASK__MASK;
-}
-
-#define REG_CP_COND_WRITE5_5 0x00000005
-#define CP_COND_WRITE5_5_WRITE_ADDR_LO__MASK 0xffffffff
-#define CP_COND_WRITE5_5_WRITE_ADDR_LO__SHIFT 0
-static inline uint32_t CP_COND_WRITE5_5_WRITE_ADDR_LO(uint32_t val)
-{
- return ((val) << CP_COND_WRITE5_5_WRITE_ADDR_LO__SHIFT) & CP_COND_WRITE5_5_WRITE_ADDR_LO__MASK;
-}
-
-#define REG_CP_COND_WRITE5_6 0x00000006
-#define CP_COND_WRITE5_6_WRITE_ADDR_HI__MASK 0xffffffff
-#define CP_COND_WRITE5_6_WRITE_ADDR_HI__SHIFT 0
-static inline uint32_t CP_COND_WRITE5_6_WRITE_ADDR_HI(uint32_t val)
-{
- return ((val) << CP_COND_WRITE5_6_WRITE_ADDR_HI__SHIFT) & CP_COND_WRITE5_6_WRITE_ADDR_HI__MASK;
-}
-
-#define REG_CP_COND_WRITE5_7 0x00000007
-#define CP_COND_WRITE5_7_WRITE_DATA__MASK 0xffffffff
-#define CP_COND_WRITE5_7_WRITE_DATA__SHIFT 0
-static inline uint32_t CP_COND_WRITE5_7_WRITE_DATA(uint32_t val)
-{
- return ((val) << CP_COND_WRITE5_7_WRITE_DATA__SHIFT) & CP_COND_WRITE5_7_WRITE_DATA__MASK;
-}
-
-#define REG_CP_WAIT_MEM_GTE_0 0x00000000
-#define CP_WAIT_MEM_GTE_0_RESERVED__MASK 0xffffffff
-#define CP_WAIT_MEM_GTE_0_RESERVED__SHIFT 0
-static inline uint32_t CP_WAIT_MEM_GTE_0_RESERVED(uint32_t val)
-{
- return ((val) << CP_WAIT_MEM_GTE_0_RESERVED__SHIFT) & CP_WAIT_MEM_GTE_0_RESERVED__MASK;
-}
-
-#define REG_CP_WAIT_MEM_GTE_1 0x00000001
-#define CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__MASK 0xffffffff
-#define CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__SHIFT 0
-static inline uint32_t CP_WAIT_MEM_GTE_1_POLL_ADDR_LO(uint32_t val)
-{
- return ((val) << CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__SHIFT) & CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__MASK;
-}
-
-#define REG_CP_WAIT_MEM_GTE_2 0x00000002
-#define CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__MASK 0xffffffff
-#define CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__SHIFT 0
-static inline uint32_t CP_WAIT_MEM_GTE_2_POLL_ADDR_HI(uint32_t val)
-{
- return ((val) << CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__SHIFT) & CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__MASK;
-}
-
-#define REG_CP_WAIT_MEM_GTE_3 0x00000003
-#define CP_WAIT_MEM_GTE_3_REF__MASK 0xffffffff
-#define CP_WAIT_MEM_GTE_3_REF__SHIFT 0
-static inline uint32_t CP_WAIT_MEM_GTE_3_REF(uint32_t val)
-{
- return ((val) << CP_WAIT_MEM_GTE_3_REF__SHIFT) & CP_WAIT_MEM_GTE_3_REF__MASK;
-}
-
-#define REG_CP_WAIT_REG_MEM_0 0x00000000
-#define CP_WAIT_REG_MEM_0_FUNCTION__MASK 0x00000007
-#define CP_WAIT_REG_MEM_0_FUNCTION__SHIFT 0
-static inline uint32_t CP_WAIT_REG_MEM_0_FUNCTION(enum cp_cond_function val)
-{
- return ((val) << CP_WAIT_REG_MEM_0_FUNCTION__SHIFT) & CP_WAIT_REG_MEM_0_FUNCTION__MASK;
-}
-#define CP_WAIT_REG_MEM_0_SIGNED_COMPARE 0x00000008
-#define CP_WAIT_REG_MEM_0_POLL__MASK 0x00000030
-#define CP_WAIT_REG_MEM_0_POLL__SHIFT 4
-static inline uint32_t CP_WAIT_REG_MEM_0_POLL(enum poll_memory_type val)
-{
- return ((val) << CP_WAIT_REG_MEM_0_POLL__SHIFT) & CP_WAIT_REG_MEM_0_POLL__MASK;
-}
-#define CP_WAIT_REG_MEM_0_WRITE_MEMORY 0x00000100
-
-#define REG_CP_WAIT_REG_MEM_1 0x00000001
-#define CP_WAIT_REG_MEM_1_POLL_ADDR_LO__MASK 0xffffffff
-#define CP_WAIT_REG_MEM_1_POLL_ADDR_LO__SHIFT 0
-static inline uint32_t CP_WAIT_REG_MEM_1_POLL_ADDR_LO(uint32_t val)
-{
- return ((val) << CP_WAIT_REG_MEM_1_POLL_ADDR_LO__SHIFT) & CP_WAIT_REG_MEM_1_POLL_ADDR_LO__MASK;
-}
-
-#define REG_CP_WAIT_REG_MEM_2 0x00000002
-#define CP_WAIT_REG_MEM_2_POLL_ADDR_HI__MASK 0xffffffff
-#define CP_WAIT_REG_MEM_2_POLL_ADDR_HI__SHIFT 0
-static inline uint32_t CP_WAIT_REG_MEM_2_POLL_ADDR_HI(uint32_t val)
-{
- return ((val) << CP_WAIT_REG_MEM_2_POLL_ADDR_HI__SHIFT) & CP_WAIT_REG_MEM_2_POLL_ADDR_HI__MASK;
-}
-
-#define REG_CP_WAIT_REG_MEM_3 0x00000003
-#define CP_WAIT_REG_MEM_3_REF__MASK 0xffffffff
-#define CP_WAIT_REG_MEM_3_REF__SHIFT 0
-static inline uint32_t CP_WAIT_REG_MEM_3_REF(uint32_t val)
-{
- return ((val) << CP_WAIT_REG_MEM_3_REF__SHIFT) & CP_WAIT_REG_MEM_3_REF__MASK;
-}
-
-#define REG_CP_WAIT_REG_MEM_4 0x00000004
-#define CP_WAIT_REG_MEM_4_MASK__MASK 0xffffffff
-#define CP_WAIT_REG_MEM_4_MASK__SHIFT 0
-static inline uint32_t CP_WAIT_REG_MEM_4_MASK(uint32_t val)
-{
- return ((val) << CP_WAIT_REG_MEM_4_MASK__SHIFT) & CP_WAIT_REG_MEM_4_MASK__MASK;
-}
-
-#define REG_CP_WAIT_REG_MEM_5 0x00000005
-#define CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__MASK 0xffffffff
-#define CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__SHIFT 0
-static inline uint32_t CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(uint32_t val)
-{
- return ((val) << CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__SHIFT) & CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__MASK;
-}
-
-#define REG_CP_WAIT_TWO_REGS_0 0x00000000
-#define CP_WAIT_TWO_REGS_0_REG0__MASK 0x0003ffff
-#define CP_WAIT_TWO_REGS_0_REG0__SHIFT 0
-static inline uint32_t CP_WAIT_TWO_REGS_0_REG0(uint32_t val)
-{
- return ((val) << CP_WAIT_TWO_REGS_0_REG0__SHIFT) & CP_WAIT_TWO_REGS_0_REG0__MASK;
-}
-
-#define REG_CP_WAIT_TWO_REGS_1 0x00000001
-#define CP_WAIT_TWO_REGS_1_REG1__MASK 0x0003ffff
-#define CP_WAIT_TWO_REGS_1_REG1__SHIFT 0
-static inline uint32_t CP_WAIT_TWO_REGS_1_REG1(uint32_t val)
-{
- return ((val) << CP_WAIT_TWO_REGS_1_REG1__SHIFT) & CP_WAIT_TWO_REGS_1_REG1__MASK;
-}
-
-#define REG_CP_WAIT_TWO_REGS_2 0x00000002
-#define CP_WAIT_TWO_REGS_2_REF__MASK 0xffffffff
-#define CP_WAIT_TWO_REGS_2_REF__SHIFT 0
-static inline uint32_t CP_WAIT_TWO_REGS_2_REF(uint32_t val)
-{
- return ((val) << CP_WAIT_TWO_REGS_2_REF__SHIFT) & CP_WAIT_TWO_REGS_2_REF__MASK;
-}
-
-#define REG_CP_DISPATCH_COMPUTE_0 0x00000000
-
-#define REG_CP_DISPATCH_COMPUTE_1 0x00000001
-#define CP_DISPATCH_COMPUTE_1_X__MASK 0xffffffff
-#define CP_DISPATCH_COMPUTE_1_X__SHIFT 0
-static inline uint32_t CP_DISPATCH_COMPUTE_1_X(uint32_t val)
-{
- return ((val) << CP_DISPATCH_COMPUTE_1_X__SHIFT) & CP_DISPATCH_COMPUTE_1_X__MASK;
-}
-
-#define REG_CP_DISPATCH_COMPUTE_2 0x00000002
-#define CP_DISPATCH_COMPUTE_2_Y__MASK 0xffffffff
-#define CP_DISPATCH_COMPUTE_2_Y__SHIFT 0
-static inline uint32_t CP_DISPATCH_COMPUTE_2_Y(uint32_t val)
-{
- return ((val) << CP_DISPATCH_COMPUTE_2_Y__SHIFT) & CP_DISPATCH_COMPUTE_2_Y__MASK;
-}
-
-#define REG_CP_DISPATCH_COMPUTE_3 0x00000003
-#define CP_DISPATCH_COMPUTE_3_Z__MASK 0xffffffff
-#define CP_DISPATCH_COMPUTE_3_Z__SHIFT 0
-static inline uint32_t CP_DISPATCH_COMPUTE_3_Z(uint32_t val)
-{
- return ((val) << CP_DISPATCH_COMPUTE_3_Z__SHIFT) & CP_DISPATCH_COMPUTE_3_Z__MASK;
-}
-
-#define REG_CP_SET_RENDER_MODE_0 0x00000000
-#define CP_SET_RENDER_MODE_0_MODE__MASK 0x000001ff
-#define CP_SET_RENDER_MODE_0_MODE__SHIFT 0
-static inline uint32_t CP_SET_RENDER_MODE_0_MODE(enum render_mode_cmd val)
-{
- return ((val) << CP_SET_RENDER_MODE_0_MODE__SHIFT) & CP_SET_RENDER_MODE_0_MODE__MASK;
-}
-
-#define REG_CP_SET_RENDER_MODE_1 0x00000001
-#define CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK 0xffffffff
-#define CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT 0
-static inline uint32_t CP_SET_RENDER_MODE_1_ADDR_0_LO(uint32_t val)
-{
- return ((val) << CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT) & CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK;
-}
-
-#define REG_CP_SET_RENDER_MODE_2 0x00000002
-#define CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK 0xffffffff
-#define CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT 0
-static inline uint32_t CP_SET_RENDER_MODE_2_ADDR_0_HI(uint32_t val)
-{
- return ((val) << CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT) & CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK;
-}
-
-#define REG_CP_SET_RENDER_MODE_3 0x00000003
-#define CP_SET_RENDER_MODE_3_VSC_ENABLE 0x00000008
-#define CP_SET_RENDER_MODE_3_GMEM_ENABLE 0x00000010
-
-#define REG_CP_SET_RENDER_MODE_4 0x00000004
-
-#define REG_CP_SET_RENDER_MODE_5 0x00000005
-#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK 0xffffffff
-#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT 0
-static inline uint32_t CP_SET_RENDER_MODE_5_ADDR_1_LEN(uint32_t val)
-{
- return ((val) << CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT) & CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK;
-}
-
-#define REG_CP_SET_RENDER_MODE_6 0x00000006
-#define CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK 0xffffffff
-#define CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT 0
-static inline uint32_t CP_SET_RENDER_MODE_6_ADDR_1_LO(uint32_t val)
-{
- return ((val) << CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT) & CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK;
-}
-
-#define REG_CP_SET_RENDER_MODE_7 0x00000007
-#define CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK 0xffffffff
-#define CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT 0
-static inline uint32_t CP_SET_RENDER_MODE_7_ADDR_1_HI(uint32_t val)
-{
- return ((val) << CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT) & CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK;
-}
-
-#define REG_CP_COMPUTE_CHECKPOINT_0 0x00000000
-#define CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__MASK 0xffffffff
-#define CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__SHIFT 0
-static inline uint32_t CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO(uint32_t val)
-{
- return ((val) << CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__SHIFT) & CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__MASK;
-}
-
-#define REG_CP_COMPUTE_CHECKPOINT_1 0x00000001
-#define CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__MASK 0xffffffff
-#define CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__SHIFT 0
-static inline uint32_t CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI(uint32_t val)
-{
- return ((val) << CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__SHIFT) & CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__MASK;
-}
-
-#define REG_CP_COMPUTE_CHECKPOINT_2 0x00000002
-
-#define REG_CP_COMPUTE_CHECKPOINT_3 0x00000003
-
-#define REG_CP_COMPUTE_CHECKPOINT_4 0x00000004
-#define CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__MASK 0xffffffff
-#define CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__SHIFT 0
-static inline uint32_t CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN(uint32_t val)
-{
- return ((val) << CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__SHIFT) & CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__MASK;
-}
-
-#define REG_CP_COMPUTE_CHECKPOINT_5 0x00000005
-#define CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__MASK 0xffffffff
-#define CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__SHIFT 0
-static inline uint32_t CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO(uint32_t val)
-{
- return ((val) << CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__SHIFT) & CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__MASK;
-}
-
-#define REG_CP_COMPUTE_CHECKPOINT_6 0x00000006
-#define CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__MASK 0xffffffff
-#define CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__SHIFT 0
-static inline uint32_t CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI(uint32_t val)
-{
- return ((val) << CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__SHIFT) & CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__MASK;
-}
-
-#define REG_CP_COMPUTE_CHECKPOINT_7 0x00000007
-
-#define REG_CP_PERFCOUNTER_ACTION_0 0x00000000
-
-#define REG_CP_PERFCOUNTER_ACTION_1 0x00000001
-#define CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__MASK 0xffffffff
-#define CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__SHIFT 0
-static inline uint32_t CP_PERFCOUNTER_ACTION_1_ADDR_0_LO(uint32_t val)
-{
- return ((val) << CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__SHIFT) & CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__MASK;
-}
-
-#define REG_CP_PERFCOUNTER_ACTION_2 0x00000002
-#define CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__MASK 0xffffffff
-#define CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__SHIFT 0
-static inline uint32_t CP_PERFCOUNTER_ACTION_2_ADDR_0_HI(uint32_t val)
-{
- return ((val) << CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__SHIFT) & CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__MASK;
-}
-
-#define REG_CP_EVENT_WRITE_0 0x00000000
-#define CP_EVENT_WRITE_0_EVENT__MASK 0x000000ff
-#define CP_EVENT_WRITE_0_EVENT__SHIFT 0
-static inline uint32_t CP_EVENT_WRITE_0_EVENT(enum vgt_event_type val)
-{
- return ((val) << CP_EVENT_WRITE_0_EVENT__SHIFT) & CP_EVENT_WRITE_0_EVENT__MASK;
-}
-#define CP_EVENT_WRITE_0_TIMESTAMP 0x40000000
-#define CP_EVENT_WRITE_0_IRQ 0x80000000
-
-#define REG_CP_EVENT_WRITE_1 0x00000001
-#define CP_EVENT_WRITE_1_ADDR_0_LO__MASK 0xffffffff
-#define CP_EVENT_WRITE_1_ADDR_0_LO__SHIFT 0
-static inline uint32_t CP_EVENT_WRITE_1_ADDR_0_LO(uint32_t val)
-{
- return ((val) << CP_EVENT_WRITE_1_ADDR_0_LO__SHIFT) & CP_EVENT_WRITE_1_ADDR_0_LO__MASK;
-}
-
-#define REG_CP_EVENT_WRITE_2 0x00000002
-#define CP_EVENT_WRITE_2_ADDR_0_HI__MASK 0xffffffff
-#define CP_EVENT_WRITE_2_ADDR_0_HI__SHIFT 0
-static inline uint32_t CP_EVENT_WRITE_2_ADDR_0_HI(uint32_t val)
-{
- return ((val) << CP_EVENT_WRITE_2_ADDR_0_HI__SHIFT) & CP_EVENT_WRITE_2_ADDR_0_HI__MASK;
-}
-
-#define REG_CP_EVENT_WRITE_3 0x00000003
-
-#define REG_CP_EVENT_WRITE7_0 0x00000000
-#define CP_EVENT_WRITE7_0_EVENT__MASK 0x000000ff
-#define CP_EVENT_WRITE7_0_EVENT__SHIFT 0
-static inline uint32_t CP_EVENT_WRITE7_0_EVENT(enum vgt_event_type val)
-{
- return ((val) << CP_EVENT_WRITE7_0_EVENT__SHIFT) & CP_EVENT_WRITE7_0_EVENT__MASK;
-}
-#define CP_EVENT_WRITE7_0_WRITE_SAMPLE_COUNT 0x00001000
-#define CP_EVENT_WRITE7_0_SAMPLE_COUNT_END_OFFSET 0x00002000
-#define CP_EVENT_WRITE7_0_WRITE_SAMPLE_COUNT_DIFF 0x00004000
-#define CP_EVENT_WRITE7_0_INC_BV_COUNT 0x00010000
-#define CP_EVENT_WRITE7_0_INC_BR_COUNT 0x00020000
-#define CP_EVENT_WRITE7_0_CLEAR_RENDER_RESOURCE 0x00040000
-#define CP_EVENT_WRITE7_0_CLEAR_LRZ_RESOURCE 0x00080000
-#define CP_EVENT_WRITE7_0_WRITE_SRC__MASK 0x00700000
-#define CP_EVENT_WRITE7_0_WRITE_SRC__SHIFT 20
-static inline uint32_t CP_EVENT_WRITE7_0_WRITE_SRC(enum event_write_src val)
-{
- return ((val) << CP_EVENT_WRITE7_0_WRITE_SRC__SHIFT) & CP_EVENT_WRITE7_0_WRITE_SRC__MASK;
-}
-#define CP_EVENT_WRITE7_0_WRITE_DST__MASK 0x01000000
-#define CP_EVENT_WRITE7_0_WRITE_DST__SHIFT 24
-static inline uint32_t CP_EVENT_WRITE7_0_WRITE_DST(enum event_write_dst val)
-{
- return ((val) << CP_EVENT_WRITE7_0_WRITE_DST__SHIFT) & CP_EVENT_WRITE7_0_WRITE_DST__MASK;
-}
-#define CP_EVENT_WRITE7_0_WRITE_ENABLED 0x08000000
-
-#define REG_EV_DST_RAM_CP_EVENT_WRITE7_1 0x00000001
-#define EV_DST_RAM_CP_EVENT_WRITE7_1_ADDR_0_LO__MASK 0xffffffff
-#define EV_DST_RAM_CP_EVENT_WRITE7_1_ADDR_0_LO__SHIFT 0
-static inline uint32_t EV_DST_RAM_CP_EVENT_WRITE7_1_ADDR_0_LO(uint32_t val)
-{
- return ((val) << EV_DST_RAM_CP_EVENT_WRITE7_1_ADDR_0_LO__SHIFT) & EV_DST_RAM_CP_EVENT_WRITE7_1_ADDR_0_LO__MASK;
-}
-
-#define REG_EV_DST_RAM_CP_EVENT_WRITE7_2 0x00000002
-#define EV_DST_RAM_CP_EVENT_WRITE7_2_ADDR_0_HI__MASK 0xffffffff
-#define EV_DST_RAM_CP_EVENT_WRITE7_2_ADDR_0_HI__SHIFT 0
-static inline uint32_t EV_DST_RAM_CP_EVENT_WRITE7_2_ADDR_0_HI(uint32_t val)
-{
- return ((val) << EV_DST_RAM_CP_EVENT_WRITE7_2_ADDR_0_HI__SHIFT) & EV_DST_RAM_CP_EVENT_WRITE7_2_ADDR_0_HI__MASK;
-}
-
-#define REG_EV_DST_RAM_CP_EVENT_WRITE7_3 0x00000003
-#define EV_DST_RAM_CP_EVENT_WRITE7_3_PAYLOAD_0__MASK 0xffffffff
-#define EV_DST_RAM_CP_EVENT_WRITE7_3_PAYLOAD_0__SHIFT 0
-static inline uint32_t EV_DST_RAM_CP_EVENT_WRITE7_3_PAYLOAD_0(uint32_t val)
-{
- return ((val) << EV_DST_RAM_CP_EVENT_WRITE7_3_PAYLOAD_0__SHIFT) & EV_DST_RAM_CP_EVENT_WRITE7_3_PAYLOAD_0__MASK;
-}
-
-#define REG_EV_DST_RAM_CP_EVENT_WRITE7_4 0x00000004
-#define EV_DST_RAM_CP_EVENT_WRITE7_4_PAYLOAD_1__MASK 0xffffffff
-#define EV_DST_RAM_CP_EVENT_WRITE7_4_PAYLOAD_1__SHIFT 0
-static inline uint32_t EV_DST_RAM_CP_EVENT_WRITE7_4_PAYLOAD_1(uint32_t val)
-{
- return ((val) << EV_DST_RAM_CP_EVENT_WRITE7_4_PAYLOAD_1__SHIFT) & EV_DST_RAM_CP_EVENT_WRITE7_4_PAYLOAD_1__MASK;
-}
-
-#define REG_EV_DST_ONCHIP_CP_EVENT_WRITE7_1 0x00000001
-#define EV_DST_ONCHIP_CP_EVENT_WRITE7_1_ONCHIP_ADDR_0__MASK 0xffffffff
-#define EV_DST_ONCHIP_CP_EVENT_WRITE7_1_ONCHIP_ADDR_0__SHIFT 0
-static inline uint32_t EV_DST_ONCHIP_CP_EVENT_WRITE7_1_ONCHIP_ADDR_0(uint32_t val)
-{
- return ((val) << EV_DST_ONCHIP_CP_EVENT_WRITE7_1_ONCHIP_ADDR_0__SHIFT) & EV_DST_ONCHIP_CP_EVENT_WRITE7_1_ONCHIP_ADDR_0__MASK;
-}
-
-#define REG_EV_DST_ONCHIP_CP_EVENT_WRITE7_3 0x00000003
-#define EV_DST_ONCHIP_CP_EVENT_WRITE7_3_PAYLOAD_0__MASK 0xffffffff
-#define EV_DST_ONCHIP_CP_EVENT_WRITE7_3_PAYLOAD_0__SHIFT 0
-static inline uint32_t EV_DST_ONCHIP_CP_EVENT_WRITE7_3_PAYLOAD_0(uint32_t val)
-{
- return ((val) << EV_DST_ONCHIP_CP_EVENT_WRITE7_3_PAYLOAD_0__SHIFT) & EV_DST_ONCHIP_CP_EVENT_WRITE7_3_PAYLOAD_0__MASK;
-}
-
-#define REG_EV_DST_ONCHIP_CP_EVENT_WRITE7_4 0x00000004
-#define EV_DST_ONCHIP_CP_EVENT_WRITE7_4_PAYLOAD_1__MASK 0xffffffff
-#define EV_DST_ONCHIP_CP_EVENT_WRITE7_4_PAYLOAD_1__SHIFT 0
-static inline uint32_t EV_DST_ONCHIP_CP_EVENT_WRITE7_4_PAYLOAD_1(uint32_t val)
-{
- return ((val) << EV_DST_ONCHIP_CP_EVENT_WRITE7_4_PAYLOAD_1__SHIFT) & EV_DST_ONCHIP_CP_EVENT_WRITE7_4_PAYLOAD_1__MASK;
-}
-
-#define REG_CP_BLIT_0 0x00000000
-#define CP_BLIT_0_OP__MASK 0x0000000f
-#define CP_BLIT_0_OP__SHIFT 0
-static inline uint32_t CP_BLIT_0_OP(enum cp_blit_cmd val)
-{
- return ((val) << CP_BLIT_0_OP__SHIFT) & CP_BLIT_0_OP__MASK;
-}
-
-#define REG_CP_BLIT_1 0x00000001
-#define CP_BLIT_1_SRC_X1__MASK 0x00003fff
-#define CP_BLIT_1_SRC_X1__SHIFT 0
-static inline uint32_t CP_BLIT_1_SRC_X1(uint32_t val)
-{
- return ((val) << CP_BLIT_1_SRC_X1__SHIFT) & CP_BLIT_1_SRC_X1__MASK;
-}
-#define CP_BLIT_1_SRC_Y1__MASK 0x3fff0000
-#define CP_BLIT_1_SRC_Y1__SHIFT 16
-static inline uint32_t CP_BLIT_1_SRC_Y1(uint32_t val)
-{
- return ((val) << CP_BLIT_1_SRC_Y1__SHIFT) & CP_BLIT_1_SRC_Y1__MASK;
-}
-
-#define REG_CP_BLIT_2 0x00000002
-#define CP_BLIT_2_SRC_X2__MASK 0x00003fff
-#define CP_BLIT_2_SRC_X2__SHIFT 0
-static inline uint32_t CP_BLIT_2_SRC_X2(uint32_t val)
-{
- return ((val) << CP_BLIT_2_SRC_X2__SHIFT) & CP_BLIT_2_SRC_X2__MASK;
-}
-#define CP_BLIT_2_SRC_Y2__MASK 0x3fff0000
-#define CP_BLIT_2_SRC_Y2__SHIFT 16
-static inline uint32_t CP_BLIT_2_SRC_Y2(uint32_t val)
-{
- return ((val) << CP_BLIT_2_SRC_Y2__SHIFT) & CP_BLIT_2_SRC_Y2__MASK;
-}
-
-#define REG_CP_BLIT_3 0x00000003
-#define CP_BLIT_3_DST_X1__MASK 0x00003fff
-#define CP_BLIT_3_DST_X1__SHIFT 0
-static inline uint32_t CP_BLIT_3_DST_X1(uint32_t val)
-{
- return ((val) << CP_BLIT_3_DST_X1__SHIFT) & CP_BLIT_3_DST_X1__MASK;
-}
-#define CP_BLIT_3_DST_Y1__MASK 0x3fff0000
-#define CP_BLIT_3_DST_Y1__SHIFT 16
-static inline uint32_t CP_BLIT_3_DST_Y1(uint32_t val)
-{
- return ((val) << CP_BLIT_3_DST_Y1__SHIFT) & CP_BLIT_3_DST_Y1__MASK;
-}
-
-#define REG_CP_BLIT_4 0x00000004
-#define CP_BLIT_4_DST_X2__MASK 0x00003fff
-#define CP_BLIT_4_DST_X2__SHIFT 0
-static inline uint32_t CP_BLIT_4_DST_X2(uint32_t val)
-{
- return ((val) << CP_BLIT_4_DST_X2__SHIFT) & CP_BLIT_4_DST_X2__MASK;
-}
-#define CP_BLIT_4_DST_Y2__MASK 0x3fff0000
-#define CP_BLIT_4_DST_Y2__SHIFT 16
-static inline uint32_t CP_BLIT_4_DST_Y2(uint32_t val)
-{
- return ((val) << CP_BLIT_4_DST_Y2__SHIFT) & CP_BLIT_4_DST_Y2__MASK;
-}
-
-#define REG_CP_EXEC_CS_0 0x00000000
-
-#define REG_CP_EXEC_CS_1 0x00000001
-#define CP_EXEC_CS_1_NGROUPS_X__MASK 0xffffffff
-#define CP_EXEC_CS_1_NGROUPS_X__SHIFT 0
-static inline uint32_t CP_EXEC_CS_1_NGROUPS_X(uint32_t val)
-{
- return ((val) << CP_EXEC_CS_1_NGROUPS_X__SHIFT) & CP_EXEC_CS_1_NGROUPS_X__MASK;
-}
-
-#define REG_CP_EXEC_CS_2 0x00000002
-#define CP_EXEC_CS_2_NGROUPS_Y__MASK 0xffffffff
-#define CP_EXEC_CS_2_NGROUPS_Y__SHIFT 0
-static inline uint32_t CP_EXEC_CS_2_NGROUPS_Y(uint32_t val)
-{
- return ((val) << CP_EXEC_CS_2_NGROUPS_Y__SHIFT) & CP_EXEC_CS_2_NGROUPS_Y__MASK;
-}
-
-#define REG_CP_EXEC_CS_3 0x00000003
-#define CP_EXEC_CS_3_NGROUPS_Z__MASK 0xffffffff
-#define CP_EXEC_CS_3_NGROUPS_Z__SHIFT 0
-static inline uint32_t CP_EXEC_CS_3_NGROUPS_Z(uint32_t val)
-{
- return ((val) << CP_EXEC_CS_3_NGROUPS_Z__SHIFT) & CP_EXEC_CS_3_NGROUPS_Z__MASK;
-}
-
-#define REG_A4XX_CP_EXEC_CS_INDIRECT_0 0x00000000
-
-#define REG_A4XX_CP_EXEC_CS_INDIRECT_1 0x00000001
-#define A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__MASK 0xffffffff
-#define A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__SHIFT 0
-static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_1_ADDR(uint32_t val)
-{
- return ((val) << A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__MASK;
-}
-
-#define REG_A4XX_CP_EXEC_CS_INDIRECT_2 0x00000002
-#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__MASK 0x00000ffc
-#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__SHIFT 2
-static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX(uint32_t val)
-{
- return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__MASK;
-}
-#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__MASK 0x003ff000
-#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__SHIFT 12
-static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY(uint32_t val)
-{
- return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__MASK;
-}
-#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__MASK 0xffc00000
-#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__SHIFT 22
-static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ(uint32_t val)
-{
- return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__MASK;
-}
-
-#define REG_A5XX_CP_EXEC_CS_INDIRECT_1 0x00000001
-#define A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__MASK 0xffffffff
-#define A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__SHIFT 0
-static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO(uint32_t val)
-{
- return ((val) << A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__MASK;
-}
-
-#define REG_A5XX_CP_EXEC_CS_INDIRECT_2 0x00000002
-#define A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__MASK 0xffffffff
-#define A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__SHIFT 0
-static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI(uint32_t val)
-{
- return ((val) << A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__MASK;
-}
-
-#define REG_A5XX_CP_EXEC_CS_INDIRECT_3 0x00000003
-#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__MASK 0x00000ffc
-#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__SHIFT 2
-static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(uint32_t val)
-{
- return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__MASK;
-}
-#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__MASK 0x003ff000
-#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__SHIFT 12
-static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(uint32_t val)
-{
- return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__MASK;
-}
-#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__MASK 0xffc00000
-#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__SHIFT 22
-static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(uint32_t val)
-{
- return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__MASK;
-}
-
-#define REG_A6XX_CP_SET_MARKER_0 0x00000000
-#define A6XX_CP_SET_MARKER_0_MODE__MASK 0x000001ff
-#define A6XX_CP_SET_MARKER_0_MODE__SHIFT 0
-static inline uint32_t A6XX_CP_SET_MARKER_0_MODE(enum a6xx_marker val)
-{
- return ((val) << A6XX_CP_SET_MARKER_0_MODE__SHIFT) & A6XX_CP_SET_MARKER_0_MODE__MASK;
-}
-#define A6XX_CP_SET_MARKER_0_MARKER__MASK 0x0000000f
-#define A6XX_CP_SET_MARKER_0_MARKER__SHIFT 0
-static inline uint32_t A6XX_CP_SET_MARKER_0_MARKER(enum a6xx_marker val)
-{
- return ((val) << A6XX_CP_SET_MARKER_0_MARKER__SHIFT) & A6XX_CP_SET_MARKER_0_MARKER__MASK;
-}
-
-#define REG_A6XX_CP_SET_PSEUDO_REG_(i0) (0x00000000 + 0x3*(i0))
-
-static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG__0(uint32_t i0) { return 0x00000000 + 0x3*i0; }
-#define A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK 0x000007ff
-#define A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT 0
-static inline uint32_t A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG(enum pseudo_reg val)
-{
- return ((val) << A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT) & A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK;
-}
-
-static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG__1(uint32_t i0) { return 0x00000001 + 0x3*i0; }
-#define A6XX_CP_SET_PSEUDO_REG__1_LO__MASK 0xffffffff
-#define A6XX_CP_SET_PSEUDO_REG__1_LO__SHIFT 0
-static inline uint32_t A6XX_CP_SET_PSEUDO_REG__1_LO(uint32_t val)
-{
- return ((val) << A6XX_CP_SET_PSEUDO_REG__1_LO__SHIFT) & A6XX_CP_SET_PSEUDO_REG__1_LO__MASK;
-}
-
-static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG__2(uint32_t i0) { return 0x00000002 + 0x3*i0; }
-#define A6XX_CP_SET_PSEUDO_REG__2_HI__MASK 0xffffffff
-#define A6XX_CP_SET_PSEUDO_REG__2_HI__SHIFT 0
-static inline uint32_t A6XX_CP_SET_PSEUDO_REG__2_HI(uint32_t val)
-{
- return ((val) << A6XX_CP_SET_PSEUDO_REG__2_HI__SHIFT) & A6XX_CP_SET_PSEUDO_REG__2_HI__MASK;
-}
-
-#define REG_A6XX_CP_REG_TEST_0 0x00000000
-#define A6XX_CP_REG_TEST_0_REG__MASK 0x0003ffff
-#define A6XX_CP_REG_TEST_0_REG__SHIFT 0
-static inline uint32_t A6XX_CP_REG_TEST_0_REG(uint32_t val)
-{
- return ((val) << A6XX_CP_REG_TEST_0_REG__SHIFT) & A6XX_CP_REG_TEST_0_REG__MASK;
-}
-#define A6XX_CP_REG_TEST_0_SCRATCH_MEM_OFFSET__MASK 0x0003ffff
-#define A6XX_CP_REG_TEST_0_SCRATCH_MEM_OFFSET__SHIFT 0
-static inline uint32_t A6XX_CP_REG_TEST_0_SCRATCH_MEM_OFFSET(uint32_t val)
-{
- return ((val) << A6XX_CP_REG_TEST_0_SCRATCH_MEM_OFFSET__SHIFT) & A6XX_CP_REG_TEST_0_SCRATCH_MEM_OFFSET__MASK;
-}
-#define A6XX_CP_REG_TEST_0_SOURCE__MASK 0x00040000
-#define A6XX_CP_REG_TEST_0_SOURCE__SHIFT 18
-static inline uint32_t A6XX_CP_REG_TEST_0_SOURCE(enum source_type val)
-{
- return ((val) << A6XX_CP_REG_TEST_0_SOURCE__SHIFT) & A6XX_CP_REG_TEST_0_SOURCE__MASK;
-}
-#define A6XX_CP_REG_TEST_0_BIT__MASK 0x01f00000
-#define A6XX_CP_REG_TEST_0_BIT__SHIFT 20
-static inline uint32_t A6XX_CP_REG_TEST_0_BIT(uint32_t val)
-{
- return ((val) << A6XX_CP_REG_TEST_0_BIT__SHIFT) & A6XX_CP_REG_TEST_0_BIT__MASK;
-}
-#define A6XX_CP_REG_TEST_0_SKIP_WAIT_FOR_ME 0x02000000
-#define A6XX_CP_REG_TEST_0_PRED_BIT__MASK 0x7c000000
-#define A6XX_CP_REG_TEST_0_PRED_BIT__SHIFT 26
-static inline uint32_t A6XX_CP_REG_TEST_0_PRED_BIT(uint32_t val)
-{
- return ((val) << A6XX_CP_REG_TEST_0_PRED_BIT__SHIFT) & A6XX_CP_REG_TEST_0_PRED_BIT__MASK;
-}
-#define A6XX_CP_REG_TEST_0_PRED_UPDATE 0x80000000
-
-#define REG_A6XX_CP_REG_TEST_PRED_MASK 0x00000001
-
-#define REG_A6XX_CP_REG_TEST_PRED_VAL 0x00000002
-
-#define REG_CP_COND_REG_EXEC_0 0x00000000
-#define CP_COND_REG_EXEC_0_REG0__MASK 0x0003ffff
-#define CP_COND_REG_EXEC_0_REG0__SHIFT 0
-static inline uint32_t CP_COND_REG_EXEC_0_REG0(uint32_t val)
-{
- return ((val) << CP_COND_REG_EXEC_0_REG0__SHIFT) & CP_COND_REG_EXEC_0_REG0__MASK;
-}
-#define CP_COND_REG_EXEC_0_PRED_BIT__MASK 0x007c0000
-#define CP_COND_REG_EXEC_0_PRED_BIT__SHIFT 18
-static inline uint32_t CP_COND_REG_EXEC_0_PRED_BIT(uint32_t val)
-{
- return ((val) << CP_COND_REG_EXEC_0_PRED_BIT__SHIFT) & CP_COND_REG_EXEC_0_PRED_BIT__MASK;
-}
-#define CP_COND_REG_EXEC_0_SKIP_WAIT_FOR_ME 0x00800000
-#define CP_COND_REG_EXEC_0_ONCHIP_MEM 0x01000000
-#define CP_COND_REG_EXEC_0_BINNING 0x02000000
-#define CP_COND_REG_EXEC_0_GMEM 0x04000000
-#define CP_COND_REG_EXEC_0_SYSMEM 0x08000000
-#define CP_COND_REG_EXEC_0_BV 0x02000000
-#define CP_COND_REG_EXEC_0_BR 0x04000000
-#define CP_COND_REG_EXEC_0_LPAC 0x08000000
-#define CP_COND_REG_EXEC_0_MODE__MASK 0xf0000000
-#define CP_COND_REG_EXEC_0_MODE__SHIFT 28
-static inline uint32_t CP_COND_REG_EXEC_0_MODE(enum compare_mode val)
-{
- return ((val) << CP_COND_REG_EXEC_0_MODE__SHIFT) & CP_COND_REG_EXEC_0_MODE__MASK;
-}
-
-#define REG_PRED_TEST_CP_COND_REG_EXEC_1 0x00000001
-#define PRED_TEST_CP_COND_REG_EXEC_1_DWORDS__MASK 0x00ffffff
-#define PRED_TEST_CP_COND_REG_EXEC_1_DWORDS__SHIFT 0
-static inline uint32_t PRED_TEST_CP_COND_REG_EXEC_1_DWORDS(uint32_t val)
-{
- return ((val) << PRED_TEST_CP_COND_REG_EXEC_1_DWORDS__SHIFT) & PRED_TEST_CP_COND_REG_EXEC_1_DWORDS__MASK;
-}
-
-#define REG_REG_COMPARE_CP_COND_REG_EXEC_1 0x00000001
-#define REG_COMPARE_CP_COND_REG_EXEC_1_REG1__MASK 0x0003ffff
-#define REG_COMPARE_CP_COND_REG_EXEC_1_REG1__SHIFT 0
-static inline uint32_t REG_COMPARE_CP_COND_REG_EXEC_1_REG1(uint32_t val)
-{
- return ((val) << REG_COMPARE_CP_COND_REG_EXEC_1_REG1__SHIFT) & REG_COMPARE_CP_COND_REG_EXEC_1_REG1__MASK;
-}
-#define REG_COMPARE_CP_COND_REG_EXEC_1_ONCHIP_MEM 0x01000000
-
-#define REG_RENDER_MODE_CP_COND_REG_EXEC_1 0x00000001
-#define RENDER_MODE_CP_COND_REG_EXEC_1_DWORDS__MASK 0x00ffffff
-#define RENDER_MODE_CP_COND_REG_EXEC_1_DWORDS__SHIFT 0
-static inline uint32_t RENDER_MODE_CP_COND_REG_EXEC_1_DWORDS(uint32_t val)
-{
- return ((val) << RENDER_MODE_CP_COND_REG_EXEC_1_DWORDS__SHIFT) & RENDER_MODE_CP_COND_REG_EXEC_1_DWORDS__MASK;
-}
-
-#define REG_REG_COMPARE_IMM_CP_COND_REG_EXEC_1 0x00000001
-#define REG_COMPARE_IMM_CP_COND_REG_EXEC_1_IMM__MASK 0xffffffff
-#define REG_COMPARE_IMM_CP_COND_REG_EXEC_1_IMM__SHIFT 0
-static inline uint32_t REG_COMPARE_IMM_CP_COND_REG_EXEC_1_IMM(uint32_t val)
-{
- return ((val) << REG_COMPARE_IMM_CP_COND_REG_EXEC_1_IMM__SHIFT) & REG_COMPARE_IMM_CP_COND_REG_EXEC_1_IMM__MASK;
-}
-
-#define REG_THREAD_MODE_CP_COND_REG_EXEC_1 0x00000001
-#define THREAD_MODE_CP_COND_REG_EXEC_1_DWORDS__MASK 0x00ffffff
-#define THREAD_MODE_CP_COND_REG_EXEC_1_DWORDS__SHIFT 0
-static inline uint32_t THREAD_MODE_CP_COND_REG_EXEC_1_DWORDS(uint32_t val)
-{
- return ((val) << THREAD_MODE_CP_COND_REG_EXEC_1_DWORDS__SHIFT) & THREAD_MODE_CP_COND_REG_EXEC_1_DWORDS__MASK;
-}
-
-#define REG_CP_COND_REG_EXEC_2 0x00000002
-#define CP_COND_REG_EXEC_2_DWORDS__MASK 0x00ffffff
-#define CP_COND_REG_EXEC_2_DWORDS__SHIFT 0
-static inline uint32_t CP_COND_REG_EXEC_2_DWORDS(uint32_t val)
-{
- return ((val) << CP_COND_REG_EXEC_2_DWORDS__SHIFT) & CP_COND_REG_EXEC_2_DWORDS__MASK;
-}
-
-#define REG_CP_COND_EXEC_0 0x00000000
-#define CP_COND_EXEC_0_ADDR0_LO__MASK 0xffffffff
-#define CP_COND_EXEC_0_ADDR0_LO__SHIFT 0
-static inline uint32_t CP_COND_EXEC_0_ADDR0_LO(uint32_t val)
-{
- return ((val) << CP_COND_EXEC_0_ADDR0_LO__SHIFT) & CP_COND_EXEC_0_ADDR0_LO__MASK;
-}
-
-#define REG_CP_COND_EXEC_1 0x00000001
-#define CP_COND_EXEC_1_ADDR0_HI__MASK 0xffffffff
-#define CP_COND_EXEC_1_ADDR0_HI__SHIFT 0
-static inline uint32_t CP_COND_EXEC_1_ADDR0_HI(uint32_t val)
-{
- return ((val) << CP_COND_EXEC_1_ADDR0_HI__SHIFT) & CP_COND_EXEC_1_ADDR0_HI__MASK;
-}
-
-#define REG_CP_COND_EXEC_2 0x00000002
-#define CP_COND_EXEC_2_ADDR1_LO__MASK 0xffffffff
-#define CP_COND_EXEC_2_ADDR1_LO__SHIFT 0
-static inline uint32_t CP_COND_EXEC_2_ADDR1_LO(uint32_t val)
-{
- return ((val) << CP_COND_EXEC_2_ADDR1_LO__SHIFT) & CP_COND_EXEC_2_ADDR1_LO__MASK;
-}
-
-#define REG_CP_COND_EXEC_3 0x00000003
-#define CP_COND_EXEC_3_ADDR1_HI__MASK 0xffffffff
-#define CP_COND_EXEC_3_ADDR1_HI__SHIFT 0
-static inline uint32_t CP_COND_EXEC_3_ADDR1_HI(uint32_t val)
-{
- return ((val) << CP_COND_EXEC_3_ADDR1_HI__SHIFT) & CP_COND_EXEC_3_ADDR1_HI__MASK;
-}
-
-#define REG_CP_COND_EXEC_4 0x00000004
-#define CP_COND_EXEC_4_REF__MASK 0xffffffff
-#define CP_COND_EXEC_4_REF__SHIFT 0
-static inline uint32_t CP_COND_EXEC_4_REF(uint32_t val)
-{
- return ((val) << CP_COND_EXEC_4_REF__SHIFT) & CP_COND_EXEC_4_REF__MASK;
-}
-
-#define REG_CP_COND_EXEC_5 0x00000005
-#define CP_COND_EXEC_5_DWORDS__MASK 0xffffffff
-#define CP_COND_EXEC_5_DWORDS__SHIFT 0
-static inline uint32_t CP_COND_EXEC_5_DWORDS(uint32_t val)
-{
- return ((val) << CP_COND_EXEC_5_DWORDS__SHIFT) & CP_COND_EXEC_5_DWORDS__MASK;
-}
-
-#define REG_CP_SET_CTXSWITCH_IB_0 0x00000000
-#define CP_SET_CTXSWITCH_IB_0_ADDR_LO__MASK 0xffffffff
-#define CP_SET_CTXSWITCH_IB_0_ADDR_LO__SHIFT 0
-static inline uint32_t CP_SET_CTXSWITCH_IB_0_ADDR_LO(uint32_t val)
-{
- return ((val) << CP_SET_CTXSWITCH_IB_0_ADDR_LO__SHIFT) & CP_SET_CTXSWITCH_IB_0_ADDR_LO__MASK;
-}
-
-#define REG_CP_SET_CTXSWITCH_IB_1 0x00000001
-#define CP_SET_CTXSWITCH_IB_1_ADDR_HI__MASK 0xffffffff
-#define CP_SET_CTXSWITCH_IB_1_ADDR_HI__SHIFT 0
-static inline uint32_t CP_SET_CTXSWITCH_IB_1_ADDR_HI(uint32_t val)
-{
- return ((val) << CP_SET_CTXSWITCH_IB_1_ADDR_HI__SHIFT) & CP_SET_CTXSWITCH_IB_1_ADDR_HI__MASK;
-}
-
-#define REG_CP_SET_CTXSWITCH_IB_2 0x00000002
-#define CP_SET_CTXSWITCH_IB_2_DWORDS__MASK 0x000fffff
-#define CP_SET_CTXSWITCH_IB_2_DWORDS__SHIFT 0
-static inline uint32_t CP_SET_CTXSWITCH_IB_2_DWORDS(uint32_t val)
-{
- return ((val) << CP_SET_CTXSWITCH_IB_2_DWORDS__SHIFT) & CP_SET_CTXSWITCH_IB_2_DWORDS__MASK;
-}
-#define CP_SET_CTXSWITCH_IB_2_TYPE__MASK 0x00300000
-#define CP_SET_CTXSWITCH_IB_2_TYPE__SHIFT 20
-static inline uint32_t CP_SET_CTXSWITCH_IB_2_TYPE(enum ctxswitch_ib val)
-{
- return ((val) << CP_SET_CTXSWITCH_IB_2_TYPE__SHIFT) & CP_SET_CTXSWITCH_IB_2_TYPE__MASK;
-}
-
-#define REG_CP_REG_WRITE_0 0x00000000
-#define CP_REG_WRITE_0_TRACKER__MASK 0x0000000f
-#define CP_REG_WRITE_0_TRACKER__SHIFT 0
-static inline uint32_t CP_REG_WRITE_0_TRACKER(enum reg_tracker val)
-{
- return ((val) << CP_REG_WRITE_0_TRACKER__SHIFT) & CP_REG_WRITE_0_TRACKER__MASK;
-}
-
-#define REG_CP_REG_WRITE_1 0x00000001
-
-#define REG_CP_REG_WRITE_2 0x00000002
-
-#define REG_CP_SMMU_TABLE_UPDATE_0 0x00000000
-#define CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__MASK 0xffffffff
-#define CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__SHIFT 0
-static inline uint32_t CP_SMMU_TABLE_UPDATE_0_TTBR0_LO(uint32_t val)
-{
- return ((val) << CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__SHIFT) & CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__MASK;
-}
-
-#define REG_CP_SMMU_TABLE_UPDATE_1 0x00000001
-#define CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__MASK 0x0000ffff
-#define CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__SHIFT 0
-static inline uint32_t CP_SMMU_TABLE_UPDATE_1_TTBR0_HI(uint32_t val)
-{
- return ((val) << CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__SHIFT) & CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__MASK;
-}
-#define CP_SMMU_TABLE_UPDATE_1_ASID__MASK 0xffff0000
-#define CP_SMMU_TABLE_UPDATE_1_ASID__SHIFT 16
-static inline uint32_t CP_SMMU_TABLE_UPDATE_1_ASID(uint32_t val)
-{
- return ((val) << CP_SMMU_TABLE_UPDATE_1_ASID__SHIFT) & CP_SMMU_TABLE_UPDATE_1_ASID__MASK;
-}
-
-#define REG_CP_SMMU_TABLE_UPDATE_2 0x00000002
-#define CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__MASK 0xffffffff
-#define CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__SHIFT 0
-static inline uint32_t CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR(uint32_t val)
-{
- return ((val) << CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__SHIFT) & CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__MASK;
-}
-
-#define REG_CP_SMMU_TABLE_UPDATE_3 0x00000003
-#define CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__MASK 0xffffffff
-#define CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__SHIFT 0
-static inline uint32_t CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK(uint32_t val)
-{
- return ((val) << CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__SHIFT) & CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__MASK;
-}
-
-#define REG_CP_START_BIN_BIN_COUNT 0x00000000
-
-#define REG_CP_START_BIN_PREFIX_ADDR 0x00000001
-
-#define REG_CP_START_BIN_PREFIX_DWORDS 0x00000003
-
-#define REG_CP_START_BIN_BODY_DWORDS 0x00000004
-
-#define REG_CP_WAIT_TIMESTAMP_0 0x00000000
-#define CP_WAIT_TIMESTAMP_0_WAIT_VALUE_SRC__MASK 0x00000003
-#define CP_WAIT_TIMESTAMP_0_WAIT_VALUE_SRC__SHIFT 0
-static inline uint32_t CP_WAIT_TIMESTAMP_0_WAIT_VALUE_SRC(enum ts_wait_value_src val)
-{
- return ((val) << CP_WAIT_TIMESTAMP_0_WAIT_VALUE_SRC__SHIFT) & CP_WAIT_TIMESTAMP_0_WAIT_VALUE_SRC__MASK;
-}
-#define CP_WAIT_TIMESTAMP_0_WAIT_DST__MASK 0x00000010
-#define CP_WAIT_TIMESTAMP_0_WAIT_DST__SHIFT 4
-static inline uint32_t CP_WAIT_TIMESTAMP_0_WAIT_DST(enum ts_wait_type val)
-{
- return ((val) << CP_WAIT_TIMESTAMP_0_WAIT_DST__SHIFT) & CP_WAIT_TIMESTAMP_0_WAIT_DST__MASK;
-}
-
-#define REG_TS_WAIT_RAM_CP_WAIT_TIMESTAMP_ADDR 0x00000001
-
-#define REG_TS_WAIT_ONCHIP_CP_WAIT_TIMESTAMP_ONCHIP_ADDR_0 0x00000001
-
-#define REG_CP_WAIT_TIMESTAMP_SRC_0 0x00000003
-
-#define REG_CP_WAIT_TIMESTAMP_SRC_1 0x00000004
-
-#define REG_CP_BV_BR_COUNT_OPS_0 0x00000000
-#define CP_BV_BR_COUNT_OPS_0_OP__MASK 0x0000000f
-#define CP_BV_BR_COUNT_OPS_0_OP__SHIFT 0
-static inline uint32_t CP_BV_BR_COUNT_OPS_0_OP(enum pipe_count_op val)
-{
- return ((val) << CP_BV_BR_COUNT_OPS_0_OP__SHIFT) & CP_BV_BR_COUNT_OPS_0_OP__MASK;
-}
-
-#define REG_CP_BV_BR_COUNT_OPS_1 0x00000001
-#define CP_BV_BR_COUNT_OPS_1_BR_OFFSET__MASK 0x0000ffff
-#define CP_BV_BR_COUNT_OPS_1_BR_OFFSET__SHIFT 0
-static inline uint32_t CP_BV_BR_COUNT_OPS_1_BR_OFFSET(uint32_t val)
-{
- return ((val) << CP_BV_BR_COUNT_OPS_1_BR_OFFSET__SHIFT) & CP_BV_BR_COUNT_OPS_1_BR_OFFSET__MASK;
-}
-
-#define REG_CP_MODIFY_TIMESTAMP_0 0x00000000
-#define CP_MODIFY_TIMESTAMP_0_ADD__MASK 0x000000ff
-#define CP_MODIFY_TIMESTAMP_0_ADD__SHIFT 0
-static inline uint32_t CP_MODIFY_TIMESTAMP_0_ADD(uint32_t val)
-{
- return ((val) << CP_MODIFY_TIMESTAMP_0_ADD__SHIFT) & CP_MODIFY_TIMESTAMP_0_ADD__MASK;
-}
-#define CP_MODIFY_TIMESTAMP_0_OP__MASK 0xf0000000
-#define CP_MODIFY_TIMESTAMP_0_OP__SHIFT 28
-static inline uint32_t CP_MODIFY_TIMESTAMP_0_OP(enum timestamp_op val)
-{
- return ((val) << CP_MODIFY_TIMESTAMP_0_OP__SHIFT) & CP_MODIFY_TIMESTAMP_0_OP__MASK;
-}
-
-#define REG_CP_MEM_TO_SCRATCH_MEM_0 0x00000000
-#define CP_MEM_TO_SCRATCH_MEM_0_CNT__MASK 0x0000003f
-#define CP_MEM_TO_SCRATCH_MEM_0_CNT__SHIFT 0
-static inline uint32_t CP_MEM_TO_SCRATCH_MEM_0_CNT(uint32_t val)
-{
- return ((val) << CP_MEM_TO_SCRATCH_MEM_0_CNT__SHIFT) & CP_MEM_TO_SCRATCH_MEM_0_CNT__MASK;
-}
-
-#define REG_CP_MEM_TO_SCRATCH_MEM_1 0x00000001
-#define CP_MEM_TO_SCRATCH_MEM_1_OFFSET__MASK 0x0000003f
-#define CP_MEM_TO_SCRATCH_MEM_1_OFFSET__SHIFT 0
-static inline uint32_t CP_MEM_TO_SCRATCH_MEM_1_OFFSET(uint32_t val)
-{
- return ((val) << CP_MEM_TO_SCRATCH_MEM_1_OFFSET__SHIFT) & CP_MEM_TO_SCRATCH_MEM_1_OFFSET__MASK;
-}
-
-#define REG_CP_MEM_TO_SCRATCH_MEM_2 0x00000002
-#define CP_MEM_TO_SCRATCH_MEM_2_SRC__MASK 0xffffffff
-#define CP_MEM_TO_SCRATCH_MEM_2_SRC__SHIFT 0
-static inline uint32_t CP_MEM_TO_SCRATCH_MEM_2_SRC(uint32_t val)
-{
- return ((val) << CP_MEM_TO_SCRATCH_MEM_2_SRC__SHIFT) & CP_MEM_TO_SCRATCH_MEM_2_SRC__MASK;
-}
-
-#define REG_CP_MEM_TO_SCRATCH_MEM_3 0x00000003
-#define CP_MEM_TO_SCRATCH_MEM_3_SRC_HI__MASK 0xffffffff
-#define CP_MEM_TO_SCRATCH_MEM_3_SRC_HI__SHIFT 0
-static inline uint32_t CP_MEM_TO_SCRATCH_MEM_3_SRC_HI(uint32_t val)
-{
- return ((val) << CP_MEM_TO_SCRATCH_MEM_3_SRC_HI__SHIFT) & CP_MEM_TO_SCRATCH_MEM_3_SRC_HI__MASK;
-}
-
-#define REG_CP_THREAD_CONTROL_0 0x00000000
-#define CP_THREAD_CONTROL_0_THREAD__MASK 0x00000003
-#define CP_THREAD_CONTROL_0_THREAD__SHIFT 0
-static inline uint32_t CP_THREAD_CONTROL_0_THREAD(enum cp_thread val)
-{
- return ((val) << CP_THREAD_CONTROL_0_THREAD__SHIFT) & CP_THREAD_CONTROL_0_THREAD__MASK;
-}
-#define CP_THREAD_CONTROL_0_CONCURRENT_BIN_DISABLE 0x08000000
-#define CP_THREAD_CONTROL_0_SYNC_THREADS 0x80000000
-
-#define REG_CP_FIXED_STRIDE_DRAW_TABLE_IB_BASE 0x00000000
-
-#define REG_CP_FIXED_STRIDE_DRAW_TABLE_2 0x00000002
-#define CP_FIXED_STRIDE_DRAW_TABLE_2_IB_SIZE__MASK 0x00000fff
-#define CP_FIXED_STRIDE_DRAW_TABLE_2_IB_SIZE__SHIFT 0
-static inline uint32_t CP_FIXED_STRIDE_DRAW_TABLE_2_IB_SIZE(uint32_t val)
-{
- return ((val) << CP_FIXED_STRIDE_DRAW_TABLE_2_IB_SIZE__SHIFT) & CP_FIXED_STRIDE_DRAW_TABLE_2_IB_SIZE__MASK;
-}
-#define CP_FIXED_STRIDE_DRAW_TABLE_2_STRIDE__MASK 0xfff00000
-#define CP_FIXED_STRIDE_DRAW_TABLE_2_STRIDE__SHIFT 20
-static inline uint32_t CP_FIXED_STRIDE_DRAW_TABLE_2_STRIDE(uint32_t val)
-{
- return ((val) << CP_FIXED_STRIDE_DRAW_TABLE_2_STRIDE__SHIFT) & CP_FIXED_STRIDE_DRAW_TABLE_2_STRIDE__MASK;
-}
-
-#define REG_CP_FIXED_STRIDE_DRAW_TABLE_3 0x00000003
-#define CP_FIXED_STRIDE_DRAW_TABLE_3_COUNT__MASK 0xffffffff
-#define CP_FIXED_STRIDE_DRAW_TABLE_3_COUNT__SHIFT 0
-static inline uint32_t CP_FIXED_STRIDE_DRAW_TABLE_3_COUNT(uint32_t val)
-{
- return ((val) << CP_FIXED_STRIDE_DRAW_TABLE_3_COUNT__SHIFT) & CP_FIXED_STRIDE_DRAW_TABLE_3_COUNT__MASK;
-}
-
-#define REG_CP_RESET_CONTEXT_STATE_0 0x00000000
-#define CP_RESET_CONTEXT_STATE_0_CLEAR_ON_CHIP_TS 0x00000001
-#define CP_RESET_CONTEXT_STATE_0_CLEAR_RESOURCE_TABLE 0x00000002
-#define CP_RESET_CONTEXT_STATE_0_CLEAR_GLOBAL_LOCAL_TS 0x00000004
-
-#ifdef __cplusplus
-#endif
-
-#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
index 9a9f7092c526..a3e60ac70689 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
@@ -324,6 +324,7 @@ static const struct dpu_wb_cfg x1e80100_wb[] = {
},
};
+/* TODO: INTF 3, 8 and 7 are used for MST, marked as INTF_NONE for now */
static const struct dpu_intf_cfg x1e80100_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@@ -358,8 +359,8 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
.features = INTF_SC7280_MASK,
- .type = INTF_DP,
- .controller_id = MSM_DP_CONTROLLER_1,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
@@ -368,7 +369,7 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
.base = 0x38000, .len = 0x280,
.features = INTF_SC7280_MASK,
.type = INTF_DP,
- .controller_id = MSM_DP_CONTROLLER_2,
+ .controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 20),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 21),
@@ -381,6 +382,33 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 22),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 23),
+ }, {
+ .name = "intf_6", .id = INTF_6,
+ .base = 0x3A000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_2,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
+ }, {
+ .name = "intf_7", .id = INTF_7,
+ .base = 0x3b000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_2, /* pair with intf_6 for DP MST */
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 18),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 19),
+ }, {
+ .name = "intf_8", .id = INTF_8,
+ .base = 0x3c000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_1, /* pair with intf_4 for DP MST */
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index ef871239adb2..68fae048a9a8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -459,15 +459,15 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
&perf->core_clk_rate);
debugfs_create_u32("enable_bw_release", 0600, entry,
(u32 *)&perf->enable_bw_release);
- debugfs_create_u32("threshold_low", 0600, entry,
+ debugfs_create_u32("threshold_low", 0400, entry,
(u32 *)&perf->perf_cfg->max_bw_low);
- debugfs_create_u32("threshold_high", 0600, entry,
+ debugfs_create_u32("threshold_high", 0400, entry,
(u32 *)&perf->perf_cfg->max_bw_high);
- debugfs_create_u32("min_core_ib", 0600, entry,
+ debugfs_create_u32("min_core_ib", 0400, entry,
(u32 *)&perf->perf_cfg->min_core_ib);
- debugfs_create_u32("min_llcc_ib", 0600, entry,
+ debugfs_create_u32("min_llcc_ib", 0400, entry,
(u32 *)&perf->perf_cfg->min_llcc_ib);
- debugfs_create_u32("min_dram_ib", 0600, entry,
+ debugfs_create_u32("min_dram_ib", 0400, entry,
(u32 *)&perf->perf_cfg->min_dram_ib);
debugfs_create_file("perf_mode", 0600, entry,
(u32 *)perf, &dpu_core_perf_mode_fops);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 88c2e51ab166..9f2164782844 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -320,7 +320,7 @@ static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc,
}
static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
- struct dpu_plane_state *pstate, struct dpu_format *format)
+ struct dpu_plane_state *pstate, const struct msm_format *format)
{
struct dpu_hw_mixer *lm = mixer->hw_lm;
uint32_t blend_op;
@@ -363,7 +363,7 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
fg_alpha, bg_alpha, blend_op);
DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
- &format->base.pixel_format, format->alpha_enable, blend_op);
+ &format->pixel_format, format->alpha_enable, blend_op);
}
static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
@@ -395,7 +395,7 @@ static void _dpu_crtc_blend_setup_pipe(struct drm_crtc *crtc,
struct dpu_crtc_mixer *mixer,
u32 num_mixers,
enum dpu_stage stage,
- struct dpu_format *format,
+ const struct msm_format *format,
uint64_t modifier,
struct dpu_sw_pipe *pipe,
unsigned int stage_idx,
@@ -412,7 +412,7 @@ static void _dpu_crtc_blend_setup_pipe(struct drm_crtc *crtc,
trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
state, to_dpu_plane_state(state), stage_idx,
- format->base.pixel_format,
+ format->pixel_format,
modifier);
DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d multirect_idx %d\n",
@@ -440,7 +440,7 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
struct drm_plane_state *state;
struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
struct dpu_plane_state *pstate = NULL;
- struct dpu_format *format;
+ const struct msm_format *format;
struct dpu_hw_ctl *ctl = mixer->lm_ctl;
uint32_t lm_idx;
@@ -459,7 +459,7 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
pstate = to_dpu_plane_state(state);
fb = state->fb;
- format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
+ format = msm_framebuffer_format(pstate->base.fb);
if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
bg_alpha_enable = true;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 9a14d2232e4a..119f3ea50a7c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -675,7 +675,7 @@ static int dpu_encoder_virt_atomic_check(
if (disp_info->intf_type == INTF_WB && conn_state->writeback_job) {
fb = conn_state->writeback_job->fb;
- if (fb && DPU_FORMAT_IS_YUV(to_dpu_format(msm_framebuffer_format(fb))))
+ if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb)))
topology.needs_cdm = true;
} else if (disp_info->intf_type == INTF_DP) {
if (msm_dp_is_yuv_420_enabled(priv->dp[disp_info->h_tile_instance[0]], adj_mode))
@@ -2184,7 +2184,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
}
void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc,
- const struct dpu_format *dpu_fmt,
+ const struct msm_format *dpu_fmt,
u32 output_type)
{
struct dpu_hw_cdm *hw_cdm;
@@ -2202,9 +2202,9 @@ void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc,
if (!hw_cdm)
return;
- if (!DPU_FORMAT_IS_YUV(dpu_fmt)) {
- DPU_DEBUG("[enc:%d] cdm_disable fmt:%x\n", DRMID(phys_enc->parent),
- dpu_fmt->base.pixel_format);
+ if (!MSM_FORMAT_IS_YUV(dpu_fmt)) {
+ DPU_DEBUG("[enc:%d] cdm_disable fmt:%p4cc\n", DRMID(phys_enc->parent),
+ &dpu_fmt->pixel_format);
if (hw_cdm->ops.bind_pingpong_blk)
hw_cdm->ops.bind_pingpong_blk(hw_cdm, PINGPONG_NONE);
@@ -2217,25 +2217,25 @@ void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc,
cdm_cfg->output_height = phys_enc->cached_mode.vdisplay;
cdm_cfg->output_fmt = dpu_fmt;
cdm_cfg->output_type = output_type;
- cdm_cfg->output_bit_depth = DPU_FORMAT_IS_DX(dpu_fmt) ?
+ cdm_cfg->output_bit_depth = MSM_FORMAT_IS_DX(dpu_fmt) ?
CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT;
cdm_cfg->csc_cfg = &dpu_csc10_rgb2yuv_601l;
/* enable 10 bit logic */
switch (cdm_cfg->output_fmt->chroma_sample) {
- case DPU_CHROMA_RGB:
+ case CHROMA_FULL:
cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
break;
- case DPU_CHROMA_H2V1:
+ case CHROMA_H2V1:
cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
break;
- case DPU_CHROMA_420:
+ case CHROMA_420:
cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE;
break;
- case DPU_CHROMA_H1V2:
+ case CHROMA_H1V2:
default:
DPU_ERROR("[enc:%d] unsupported chroma sampling type\n",
DRMID(phys_enc->parent));
@@ -2244,9 +2244,9 @@ void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc,
break;
}
- DPU_DEBUG("[enc:%d] cdm_enable:%d,%d,%X,%d,%d,%d,%d]\n",
+ DPU_DEBUG("[enc:%d] cdm_enable:%d,%d,%p4cc,%d,%d,%d,%d]\n",
DRMID(phys_enc->parent), cdm_cfg->output_width,
- cdm_cfg->output_height, cdm_cfg->output_fmt->base.pixel_format,
+ cdm_cfg->output_height, &cdm_cfg->output_fmt->pixel_format,
cdm_cfg->output_type, cdm_cfg->output_bit_depth,
cdm_cfg->h_cdwn_type, cdm_cfg->v_cdwn_type);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index 98d1b64a43e8..002e89cc1705 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -393,7 +393,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc);
* @output_type: HDMI/WB
*/
void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc,
- const struct dpu_format *dpu_fmt,
+ const struct msm_format *dpu_fmt,
u32 output_type);
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index fc1d5736d7fc..489be1c0c704 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -448,9 +448,6 @@ static void dpu_encoder_phys_cmd_enable_helper(
_dpu_encoder_phys_cmd_pingpong_config(phys_enc);
- if (!dpu_encoder_phys_cmd_is_master(phys_enc))
- return;
-
ctl = phys_enc->hw_ctl;
ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index d9e7dbf0499c..ef69c2f408c3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -235,7 +235,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
{
struct drm_display_mode mode;
struct dpu_hw_intf_timing_params timing_params = { 0 };
- const struct dpu_format *fmt = NULL;
+ const struct msm_format *fmt = NULL;
u32 fmt_fourcc;
unsigned long lock_flags;
struct dpu_hw_intf_cfg intf_cfg = { 0 };
@@ -274,7 +274,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
drm_mode_to_intf_timing_params(phys_enc, &mode, &timing_params);
- fmt = dpu_get_dpu_format(fmt_fourcc);
+ fmt = mdp_get_format(&phys_enc->dpu_kms->base, fmt_fourcc, 0);
DPU_DEBUG_VIDENC(phys_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
if (phys_enc->hw_cdm)
@@ -409,12 +409,12 @@ end:
static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_ctl *ctl;
- const struct dpu_format *fmt;
+ const struct msm_format *fmt;
u32 fmt_fourcc;
ctl = phys_enc->hw_ctl;
fmt_fourcc = dpu_encoder_get_drm_fmt(phys_enc);
- fmt = dpu_get_dpu_format(fmt_fourcc);
+ fmt = mdp_get_format(&phys_enc->dpu_kms->base, fmt_fourcc, 0);
DPU_DEBUG_VIDENC(phys_enc, "\n");
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 1924a2b28e53..d3ea91c1d7d2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -322,11 +322,11 @@ static void dpu_encoder_phys_wb_setup(
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
struct drm_writeback_job *wb_job;
const struct msm_format *format;
- const struct dpu_format *dpu_fmt;
+ const struct msm_format *dpu_fmt;
wb_job = wb_enc->wb_job;
format = msm_framebuffer_format(wb_enc->wb_job->fb);
- dpu_fmt = dpu_get_dpu_format_ext(format->pixel_format, wb_job->fb->modifier);
+ dpu_fmt = mdp_get_format(&phys_enc->dpu_kms->base, format->pixel_format, wb_job->fb->modifier);
DPU_DEBUG("[mode_set:%d, \"%s\",%d,%d]\n",
hw_wb->idx - WB_0, mode.name,
@@ -576,11 +576,11 @@ static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc
format = msm_framebuffer_format(job->fb);
- wb_cfg->dest.format = dpu_get_dpu_format_ext(
- format->pixel_format, job->fb->modifier);
+ wb_cfg->dest.format = mdp_get_format(&phys_enc->dpu_kms->base,
+ format->pixel_format, job->fb->modifier);
if (!wb_cfg->dest.format) {
/* this error should be detected during atomic_check */
- DPU_ERROR("failed to get format %x\n", format->pixel_format);
+ DPU_ERROR("failed to get format %p4cc\n", &format->pixel_format);
return;
}
@@ -594,7 +594,7 @@ static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc
wb_cfg->dest.height = job->fb->height;
wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes;
- if ((wb_cfg->dest.format->fetch_planes == DPU_PLANE_PLANAR) &&
+ if ((wb_cfg->dest.format->fetch_type == MDP_PLANE_PLANAR) &&
(wb_cfg->dest.format->element[0] == C1_B_Cb))
swap(wb_cfg->dest.plane_addr[1], wb_cfg->dest.plane_addr[2]);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index e366ab134249..6b1e9a617da3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -11,179 +11,12 @@
#include "dpu_kms.h"
#include "dpu_formats.h"
-#define DPU_UBWC_META_MACRO_W_H 16
-#define DPU_UBWC_META_BLOCK_SIZE 256
#define DPU_UBWC_PLANE_SIZE_ALIGNMENT 4096
-#define DPU_TILE_HEIGHT_DEFAULT 1
-#define DPU_TILE_HEIGHT_TILED 4
-#define DPU_TILE_HEIGHT_UBWC 4
-#define DPU_TILE_HEIGHT_NV12 8
-
#define DPU_MAX_IMG_WIDTH 0x3FFF
#define DPU_MAX_IMG_HEIGHT 0x3FFF
/*
- * DPU supported format packing, bpp, and other format
- * information.
- * DPU currently only supports interleaved RGB formats
- * UBWC support for a pixel format is indicated by the flag,
- * there is additional meta data plane for such formats
- */
-
-#define INTERLEAVED_RGB_FMT(fmt, a, r, g, b, e0, e1, e2, e3, uc, alpha, \
-bp, flg, fm, np) \
-{ \
- .base.pixel_format = DRM_FORMAT_ ## fmt, \
- .fetch_planes = DPU_PLANE_INTERLEAVED, \
- .alpha_enable = alpha, \
- .element = { (e0), (e1), (e2), (e3) }, \
- .bits = { g, b, r, a }, \
- .chroma_sample = DPU_CHROMA_RGB, \
- .unpack_align_msb = 0, \
- .unpack_tight = 1, \
- .unpack_count = uc, \
- .bpp = bp, \
- .fetch_mode = fm, \
- .flag = {(flg)}, \
- .num_planes = np, \
- .tile_height = DPU_TILE_HEIGHT_DEFAULT \
-}
-
-#define INTERLEAVED_RGB_FMT_TILED(fmt, a, r, g, b, e0, e1, e2, e3, uc, \
-alpha, bp, flg, fm, np, th) \
-{ \
- .base.pixel_format = DRM_FORMAT_ ## fmt, \
- .fetch_planes = DPU_PLANE_INTERLEAVED, \
- .alpha_enable = alpha, \
- .element = { (e0), (e1), (e2), (e3) }, \
- .bits = { g, b, r, a }, \
- .chroma_sample = DPU_CHROMA_RGB, \
- .unpack_align_msb = 0, \
- .unpack_tight = 1, \
- .unpack_count = uc, \
- .bpp = bp, \
- .fetch_mode = fm, \
- .flag = {(flg)}, \
- .num_planes = np, \
- .tile_height = th \
-}
-
-
-#define INTERLEAVED_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, e3, \
-alpha, chroma, count, bp, flg, fm, np) \
-{ \
- .base.pixel_format = DRM_FORMAT_ ## fmt, \
- .fetch_planes = DPU_PLANE_INTERLEAVED, \
- .alpha_enable = alpha, \
- .element = { (e0), (e1), (e2), (e3)}, \
- .bits = { g, b, r, a }, \
- .chroma_sample = chroma, \
- .unpack_align_msb = 0, \
- .unpack_tight = 1, \
- .unpack_count = count, \
- .bpp = bp, \
- .fetch_mode = fm, \
- .flag = {(flg)}, \
- .num_planes = np, \
- .tile_height = DPU_TILE_HEIGHT_DEFAULT \
-}
-
-#define PSEUDO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np) \
-{ \
- .base.pixel_format = DRM_FORMAT_ ## fmt, \
- .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
- .alpha_enable = false, \
- .element = { (e0), (e1), 0, 0 }, \
- .bits = { g, b, r, a }, \
- .chroma_sample = chroma, \
- .unpack_align_msb = 0, \
- .unpack_tight = 1, \
- .unpack_count = 2, \
- .bpp = 2, \
- .fetch_mode = fm, \
- .flag = {(flg)}, \
- .num_planes = np, \
- .tile_height = DPU_TILE_HEIGHT_DEFAULT \
-}
-
-#define PSEUDO_YUV_FMT_TILED(fmt, a, r, g, b, e0, e1, chroma, \
-flg, fm, np, th) \
-{ \
- .base.pixel_format = DRM_FORMAT_ ## fmt, \
- .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
- .alpha_enable = false, \
- .element = { (e0), (e1), 0, 0 }, \
- .bits = { g, b, r, a }, \
- .chroma_sample = chroma, \
- .unpack_align_msb = 0, \
- .unpack_tight = 1, \
- .unpack_count = 2, \
- .bpp = 2, \
- .fetch_mode = fm, \
- .flag = {(flg)}, \
- .num_planes = np, \
- .tile_height = th \
-}
-
-#define PSEUDO_YUV_FMT_LOOSE(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)\
-{ \
- .base.pixel_format = DRM_FORMAT_ ## fmt, \
- .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
- .alpha_enable = false, \
- .element = { (e0), (e1), 0, 0 }, \
- .bits = { g, b, r, a }, \
- .chroma_sample = chroma, \
- .unpack_align_msb = 1, \
- .unpack_tight = 0, \
- .unpack_count = 2, \
- .bpp = 2, \
- .fetch_mode = fm, \
- .flag = {(flg)}, \
- .num_planes = np, \
- .tile_height = DPU_TILE_HEIGHT_DEFAULT \
-}
-
-#define PSEUDO_YUV_FMT_LOOSE_TILED(fmt, a, r, g, b, e0, e1, chroma, \
-flg, fm, np, th) \
-{ \
- .base.pixel_format = DRM_FORMAT_ ## fmt, \
- .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
- .alpha_enable = false, \
- .element = { (e0), (e1), 0, 0 }, \
- .bits = { g, b, r, a }, \
- .chroma_sample = chroma, \
- .unpack_align_msb = 1, \
- .unpack_tight = 0, \
- .unpack_count = 2, \
- .bpp = 2, \
- .fetch_mode = fm, \
- .flag = {(flg)}, \
- .num_planes = np, \
- .tile_height = th \
-}
-
-
-#define PLANAR_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, alpha, chroma, bp, \
-flg, fm, np) \
-{ \
- .base.pixel_format = DRM_FORMAT_ ## fmt, \
- .fetch_planes = DPU_PLANE_PLANAR, \
- .alpha_enable = alpha, \
- .element = { (e0), (e1), (e2), 0 }, \
- .bits = { g, b, r, a }, \
- .chroma_sample = chroma, \
- .unpack_align_msb = 0, \
- .unpack_tight = 1, \
- .unpack_count = 1, \
- .bpp = bp, \
- .fetch_mode = fm, \
- .flag = {(flg)}, \
- .num_planes = np, \
- .tile_height = DPU_TILE_HEIGHT_DEFAULT \
-}
-
-/*
* struct dpu_media_color_map - maps drm format to media format
* @format: DRM base pixel format
* @color: Media API color related to DRM format
@@ -193,380 +26,11 @@ struct dpu_media_color_map {
uint32_t color;
};
-static const struct dpu_format dpu_format_map[] = {
- INTERLEAVED_RGB_FMT(ARGB8888,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
- true, 4, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(ABGR8888,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- true, 4, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(XBGR8888,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- false, 4, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(RGBA8888,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
- true, 4, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(BGRA8888,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
- true, 4, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(BGRX8888,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
- false, 4, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(XRGB8888,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
- false, 4, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(RGBX8888,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
- false, 4, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(RGB888,
- 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
- false, 3, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(BGR888,
- 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
- false, 3, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(RGB565,
- 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
- false, 2, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(BGR565,
- 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
- false, 2, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(ARGB1555,
- COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
- true, 2, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(ABGR1555,
- COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- true, 2, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(RGBA5551,
- COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
- true, 2, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(BGRA5551,
- COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
- true, 2, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(XRGB1555,
- COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
- false, 2, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(XBGR1555,
- COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- false, 2, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(RGBX5551,
- COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
- false, 2, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(BGRX5551,
- COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
- false, 2, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(ARGB4444,
- COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
- true, 2, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(ABGR4444,
- COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- true, 2, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(RGBA4444,
- COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
- true, 2, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(BGRA4444,
- COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
- true, 2, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(XRGB4444,
- COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
- false, 2, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(XBGR4444,
- COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- false, 2, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(RGBX4444,
- COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
- false, 2, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(BGRX4444,
- COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
- false, 2, 0,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(BGRA1010102,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
- true, 4, DPU_FORMAT_FLAG_DX,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(RGBA1010102,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
- true, 4, DPU_FORMAT_FLAG_DX,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(ABGR2101010,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- true, 4, DPU_FORMAT_FLAG_DX,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(ARGB2101010,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
- true, 4, DPU_FORMAT_FLAG_DX,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(XRGB2101010,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
- false, 4, DPU_FORMAT_FLAG_DX,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(BGRX1010102,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
- false, 4, DPU_FORMAT_FLAG_DX,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(XBGR2101010,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- false, 4, DPU_FORMAT_FLAG_DX,
- DPU_FETCH_LINEAR, 1),
-
- INTERLEAVED_RGB_FMT(RGBX1010102,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
- false, 4, DPU_FORMAT_FLAG_DX,
- DPU_FETCH_LINEAR, 1),
-
- PSEUDO_YUV_FMT(NV12,
- 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C2_R_Cr,
- DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
- DPU_FETCH_LINEAR, 2),
-
- PSEUDO_YUV_FMT(NV21,
- 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C1_B_Cb,
- DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
- DPU_FETCH_LINEAR, 2),
-
- PSEUDO_YUV_FMT(NV16,
- 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C2_R_Cr,
- DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV,
- DPU_FETCH_LINEAR, 2),
-
- PSEUDO_YUV_FMT(NV61,
- 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C1_B_Cb,
- DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV,
- DPU_FETCH_LINEAR, 2),
-
- PSEUDO_YUV_FMT_LOOSE(P010,
- 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C2_R_Cr,
- DPU_CHROMA_420, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_YUV,
- DPU_FETCH_LINEAR, 2),
-
- INTERLEAVED_YUV_FMT(VYUY,
- 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y,
- false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
- DPU_FETCH_LINEAR, 2),
-
- INTERLEAVED_YUV_FMT(UYVY,
- 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y,
- false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
- DPU_FETCH_LINEAR, 2),
-
- INTERLEAVED_YUV_FMT(YUYV,
- 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr,
- false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
- DPU_FETCH_LINEAR, 2),
-
- INTERLEAVED_YUV_FMT(YVYU,
- 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb,
- false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
- DPU_FETCH_LINEAR, 2),
-
- PLANAR_YUV_FMT(YUV420,
- 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C1_B_Cb, C0_G_Y,
- false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV,
- DPU_FETCH_LINEAR, 3),
-
- PLANAR_YUV_FMT(YVU420,
- 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C2_R_Cr, C0_G_Y,
- false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV,
- DPU_FETCH_LINEAR, 3),
-};
-
-/*
- * UBWC formats table:
- * This table holds the UBWC formats supported.
- * If a compression ratio needs to be used for this or any other format,
- * the data will be passed by user-space.
- */
-static const struct dpu_format dpu_format_map_ubwc[] = {
- INTERLEAVED_RGB_FMT_TILED(BGR565,
- 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
- false, 2, DPU_FORMAT_FLAG_COMPRESSED,
- DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
-
- INTERLEAVED_RGB_FMT_TILED(ABGR8888,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- true, 4, DPU_FORMAT_FLAG_COMPRESSED,
- DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
-
- /* ARGB8888 and ABGR8888 purposely have the same color
- * ordering. The hardware only supports ABGR8888 UBWC
- * natively.
- */
- INTERLEAVED_RGB_FMT_TILED(ARGB8888,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- true, 4, DPU_FORMAT_FLAG_COMPRESSED,
- DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
-
- INTERLEAVED_RGB_FMT_TILED(XBGR8888,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- false, 4, DPU_FORMAT_FLAG_COMPRESSED,
- DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
-
- INTERLEAVED_RGB_FMT_TILED(XRGB8888,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- false, 4, DPU_FORMAT_FLAG_COMPRESSED,
- DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
-
- INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
- DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
-
- INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
- DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
-
- INTERLEAVED_RGB_FMT_TILED(XRGB2101010,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
- DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
-
- /* XRGB2101010 and ARGB2101010 purposely have the same color
- * ordering. The hardware only supports ARGB2101010 UBWC
- * natively.
- */
- INTERLEAVED_RGB_FMT_TILED(ARGB2101010,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
- DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
-
- PSEUDO_YUV_FMT_TILED(NV12,
- 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C2_R_Cr,
- DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV |
- DPU_FORMAT_FLAG_COMPRESSED,
- DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
-
- PSEUDO_YUV_FMT_TILED(P010,
- 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C2_R_Cr,
- DPU_CHROMA_420, DPU_FORMAT_FLAG_DX |
- DPU_FORMAT_FLAG_YUV |
- DPU_FORMAT_FLAG_COMPRESSED,
- DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_UBWC),
-};
-
/* _dpu_get_v_h_subsample_rate - Get subsample rates for all formats we support
* Note: Not using the drm_format_*_subsampling since we have formats
*/
static void _dpu_get_v_h_subsample_rate(
- enum dpu_chroma_samp_type chroma_sample,
+ enum mdp_chroma_samp_type chroma_sample,
uint32_t *v_sample,
uint32_t *h_sample)
{
@@ -574,15 +38,15 @@ static void _dpu_get_v_h_subsample_rate(
return;
switch (chroma_sample) {
- case DPU_CHROMA_H2V1:
+ case CHROMA_H2V1:
*v_sample = 1;
*h_sample = 2;
break;
- case DPU_CHROMA_H1V2:
+ case CHROMA_H1V2:
*v_sample = 2;
*h_sample = 1;
break;
- case DPU_CHROMA_420:
+ case CHROMA_420:
*v_sample = 2;
*h_sample = 2;
break;
@@ -593,7 +57,7 @@ static void _dpu_get_v_h_subsample_rate(
}
}
-static int _dpu_format_get_media_color_ubwc(const struct dpu_format *fmt)
+static int _dpu_format_get_media_color_ubwc(const struct msm_format *fmt)
{
static const struct dpu_media_color_map dpu_media_ubwc_map[] = {
{DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC},
@@ -609,10 +73,10 @@ static int _dpu_format_get_media_color_ubwc(const struct dpu_format *fmt)
int color_fmt = -1;
int i;
- if (fmt->base.pixel_format == DRM_FORMAT_NV12 ||
- fmt->base.pixel_format == DRM_FORMAT_P010) {
- if (DPU_FORMAT_IS_DX(fmt)) {
- if (fmt->unpack_tight)
+ if (fmt->pixel_format == DRM_FORMAT_NV12 ||
+ fmt->pixel_format == DRM_FORMAT_P010) {
+ if (MSM_FORMAT_IS_DX(fmt)) {
+ if (fmt->flags & MSM_FORMAT_FLAG_UNPACK_TIGHT)
color_fmt = COLOR_FMT_NV12_BPP10_UBWC;
else
color_fmt = COLOR_FMT_P010_UBWC;
@@ -622,7 +86,7 @@ static int _dpu_format_get_media_color_ubwc(const struct dpu_format *fmt)
}
for (i = 0; i < ARRAY_SIZE(dpu_media_ubwc_map); ++i)
- if (fmt->base.pixel_format == dpu_media_ubwc_map[i].format) {
+ if (fmt->pixel_format == dpu_media_ubwc_map[i].format) {
color_fmt = dpu_media_ubwc_map[i].color;
break;
}
@@ -630,14 +94,14 @@ static int _dpu_format_get_media_color_ubwc(const struct dpu_format *fmt)
}
static int _dpu_format_get_plane_sizes_ubwc(
- const struct dpu_format *fmt,
+ const struct msm_format *fmt,
const uint32_t width,
const uint32_t height,
struct dpu_hw_fmt_layout *layout)
{
int i;
int color;
- bool meta = DPU_FORMAT_IS_UBWC(fmt);
+ bool meta = MSM_FORMAT_IS_UBWC(fmt);
memset(layout, 0, sizeof(struct dpu_hw_fmt_layout));
layout->format = fmt;
@@ -647,12 +111,12 @@ static int _dpu_format_get_plane_sizes_ubwc(
color = _dpu_format_get_media_color_ubwc(fmt);
if (color < 0) {
- DRM_ERROR("UBWC format not supported for fmt: %4.4s\n",
- (char *)&fmt->base.pixel_format);
+ DRM_ERROR("UBWC format not supported for fmt: %p4cc\n",
+ &fmt->pixel_format);
return -EINVAL;
}
- if (DPU_FORMAT_IS_YUV(layout->format)) {
+ if (MSM_FORMAT_IS_YUV(layout->format)) {
uint32_t y_sclines, uv_sclines;
uint32_t y_meta_scanlines = 0;
uint32_t uv_meta_scanlines = 0;
@@ -709,7 +173,7 @@ done:
}
static int _dpu_format_get_plane_sizes_linear(
- const struct dpu_format *fmt,
+ const struct msm_format *fmt,
const uint32_t width,
const uint32_t height,
struct dpu_hw_fmt_layout *layout,
@@ -724,7 +188,7 @@ static int _dpu_format_get_plane_sizes_linear(
layout->num_planes = fmt->num_planes;
/* Due to memset above, only need to set planes of interest */
- if (fmt->fetch_planes == DPU_PLANE_INTERLEAVED) {
+ if (fmt->fetch_type == MDP_PLANE_INTERLEAVED) {
layout->num_planes = 1;
layout->plane_size[0] = width * height * layout->format->bpp;
layout->plane_pitch[0] = width * layout->format->bpp;
@@ -742,8 +206,8 @@ static int _dpu_format_get_plane_sizes_linear(
return -EINVAL;
}
- if ((fmt->base.pixel_format == DRM_FORMAT_NV12) &&
- (DPU_FORMAT_IS_DX(fmt)))
+ if ((fmt->pixel_format == DRM_FORMAT_NV12) &&
+ (MSM_FORMAT_IS_DX(fmt)))
bpp = 2;
layout->plane_pitch[0] = width * bpp;
layout->plane_pitch[1] = layout->plane_pitch[0] / h_subsample;
@@ -751,7 +215,7 @@ static int _dpu_format_get_plane_sizes_linear(
layout->plane_size[1] = layout->plane_pitch[1] *
(height / v_subsample);
- if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
+ if (fmt->fetch_type == MDP_PLANE_PSEUDO_PLANAR) {
layout->num_planes = 2;
layout->plane_size[1] *= 2;
layout->plane_pitch[1] *= 2;
@@ -781,7 +245,7 @@ static int _dpu_format_get_plane_sizes_linear(
}
static int dpu_format_get_plane_sizes(
- const struct dpu_format *fmt,
+ const struct msm_format *fmt,
const uint32_t w,
const uint32_t h,
struct dpu_hw_fmt_layout *layout,
@@ -797,7 +261,7 @@ static int dpu_format_get_plane_sizes(
return -ERANGE;
}
- if (DPU_FORMAT_IS_UBWC(fmt) || DPU_FORMAT_IS_TILE(fmt))
+ if (MSM_FORMAT_IS_UBWC(fmt) || MSM_FORMAT_IS_TILE(fmt))
return _dpu_format_get_plane_sizes_ubwc(fmt, w, h, layout);
return _dpu_format_get_plane_sizes_linear(fmt, w, h, layout, pitches);
@@ -823,10 +287,10 @@ static int _dpu_format_populate_addrs_ubwc(
return -EFAULT;
}
- meta = DPU_FORMAT_IS_UBWC(layout->format);
+ meta = MSM_FORMAT_IS_UBWC(layout->format);
/* Per-format logic for verifying active planes */
- if (DPU_FORMAT_IS_YUV(layout->format)) {
+ if (MSM_FORMAT_IS_YUV(layout->format)) {
/************************************************/
/* UBWC ** */
/* buffer ** DPU PLANE */
@@ -942,7 +406,7 @@ int dpu_format_populate_layout(
return -ERANGE;
}
- layout->format = to_dpu_format(msm_framebuffer_format(fb));
+ layout->format = msm_framebuffer_format(fb);
/* Populate the plane sizes etc via get_format */
ret = dpu_format_get_plane_sizes(layout->format, fb->width, fb->height,
@@ -951,8 +415,8 @@ int dpu_format_populate_layout(
return ret;
/* Populate the addresses given the fb */
- if (DPU_FORMAT_IS_UBWC(layout->format) ||
- DPU_FORMAT_IS_TILE(layout->format))
+ if (MSM_FORMAT_IS_UBWC(layout->format) ||
+ MSM_FORMAT_IS_TILE(layout->format))
ret = _dpu_format_populate_addrs_ubwc(aspace, fb, layout);
else
ret = _dpu_format_populate_addrs_linear(aspace, fb, layout);
@@ -962,23 +426,21 @@ int dpu_format_populate_layout(
int dpu_format_check_modified_format(
const struct msm_kms *kms,
- const struct msm_format *msm_fmt,
+ const struct msm_format *fmt,
const struct drm_mode_fb_cmd2 *cmd,
struct drm_gem_object **bos)
{
const struct drm_format_info *info;
- const struct dpu_format *fmt;
struct dpu_hw_fmt_layout layout;
uint32_t bos_total_size = 0;
int ret, i;
- if (!msm_fmt || !cmd || !bos) {
+ if (!fmt || !cmd || !bos) {
DRM_ERROR("invalid arguments\n");
return -EINVAL;
}
- fmt = to_dpu_format(msm_fmt);
- info = drm_format_info(fmt->base.pixel_format);
+ info = drm_format_info(fmt->pixel_format);
if (!info)
return -EINVAL;
@@ -1004,65 +466,3 @@ int dpu_format_check_modified_format(
return 0;
}
-
-const struct dpu_format *dpu_get_dpu_format_ext(
- const uint32_t format,
- const uint64_t modifier)
-{
- uint32_t i = 0;
- const struct dpu_format *fmt = NULL;
- const struct dpu_format *map = NULL;
- ssize_t map_size = 0;
-
- /*
- * Currently only support exactly zero or one modifier.
- * All planes use the same modifier.
- */
- DRM_DEBUG_ATOMIC("plane format modifier 0x%llX\n", modifier);
-
- switch (modifier) {
- case 0:
- map = dpu_format_map;
- map_size = ARRAY_SIZE(dpu_format_map);
- break;
- case DRM_FORMAT_MOD_QCOM_COMPRESSED:
- map = dpu_format_map_ubwc;
- map_size = ARRAY_SIZE(dpu_format_map_ubwc);
- DRM_DEBUG_ATOMIC("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
- (char *)&format);
- break;
- default:
- DPU_ERROR("unsupported format modifier %llX\n", modifier);
- return NULL;
- }
-
- for (i = 0; i < map_size; i++) {
- if (format == map[i].base.pixel_format) {
- fmt = &map[i];
- break;
- }
- }
-
- if (fmt == NULL)
- DPU_ERROR("unsupported fmt: %4.4s modifier 0x%llX\n",
- (char *)&format, modifier);
- else
- DRM_DEBUG_ATOMIC("fmt %4.4s mod 0x%llX ubwc %d yuv %d\n",
- (char *)&format, modifier,
- DPU_FORMAT_IS_UBWC(fmt),
- DPU_FORMAT_IS_YUV(fmt));
-
- return fmt;
-}
-
-const struct msm_format *dpu_get_msm_format(
- struct msm_kms *kms,
- const uint32_t format,
- const uint64_t modifiers)
-{
- const struct dpu_format *fmt = dpu_get_dpu_format_ext(format,
- modifiers);
- if (fmt)
- return &fmt->base;
- return NULL;
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
index 84b8b3289f18..210d0ed5f0af 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
@@ -10,17 +10,6 @@
#include "dpu_hw_mdss.h"
/**
- * dpu_get_dpu_format_ext() - Returns dpu format structure pointer.
- * @format: DRM FourCC Code
- * @modifiers: format modifier array from client, one per plane
- */
-const struct dpu_format *dpu_get_dpu_format_ext(
- const uint32_t format,
- const uint64_t modifier);
-
-#define dpu_get_dpu_format(f) dpu_get_dpu_format_ext(f, 0)
-
-/**
* dpu_find_format - validate if the pixel format is supported
* @format: dpu format
* @supported_formats: supported formats by dpu HW
@@ -43,22 +32,10 @@ static inline bool dpu_find_format(u32 format, const u32 *supported_formats,
}
/**
- * dpu_get_msm_format - get an dpu_format by its msm_format base
- * callback function registers with the msm_kms layer
- * @kms: kms driver
- * @format: DRM FourCC Code
- * @modifiers: data layout modifier
- */
-const struct msm_format *dpu_get_msm_format(
- struct msm_kms *kms,
- const uint32_t format,
- const uint64_t modifiers);
-
-/**
* dpu_format_check_modified_format - validate format and buffers for
* dpu non-standard, i.e. modified format
* @kms: kms driver
- * @msm_fmt: pointer to the msm_fmt base pointer of an dpu_format
+ * @msm_fmt: pointer to the msm_fmt base pointer of an msm_format
* @cmd: fb_cmd2 structure user request
* @bos: gem buffer object list
*
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
index 9016b3ade6bc..55d2768a6d4d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
@@ -170,7 +170,7 @@ static int dpu_hw_cdm_setup_cdwn(struct dpu_hw_cdm *ctx, struct dpu_hw_cdm_cfg *
static int dpu_hw_cdm_enable(struct dpu_hw_cdm *ctx, struct dpu_hw_cdm_cfg *cdm)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
- const struct dpu_format *fmt;
+ const struct msm_format *fmt;
u32 opmode = 0;
u32 csc = 0;
@@ -179,14 +179,14 @@ static int dpu_hw_cdm_enable(struct dpu_hw_cdm *ctx, struct dpu_hw_cdm_cfg *cdm)
fmt = cdm->output_fmt;
- if (!DPU_FORMAT_IS_YUV(fmt))
+ if (!MSM_FORMAT_IS_YUV(fmt))
return -EINVAL;
dpu_hw_csc_setup(&ctx->hw, CDM_CSC_10_MATRIX_COEFF_0, cdm->csc_cfg, true);
dpu_hw_cdm_setup_cdwn(ctx, cdm);
if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) {
- if (fmt->chroma_sample == DPU_CHROMA_H1V2)
+ if (fmt->chroma_sample == CHROMA_H1V2)
return -EINVAL; /*unsupported format */
opmode = CDM_HDMI_PACK_OP_MODE_EN;
opmode |= (fmt->chroma_sample << 1);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
index 348424df87c6..ec71c9886d75 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
@@ -19,7 +19,7 @@ struct dpu_hw_cdm;
* @output_bit_depth: output bit-depth of CDM block
* @h_cdwn_type: downsample type used for horizontal pixels
* @v_cdwn_type: downsample type used for vertical pixels
- * @output_fmt: handle to dpu_format of CDM block
+ * @output_fmt: handle to msm_format of CDM block
* @csc_cfg: handle to CSC matrix programmed for CDM block
* @output_type: interface to which CDM is paired (HDMI/WB)
* @pp_id: ping-pong block to which CDM is bound to
@@ -30,7 +30,7 @@ struct dpu_hw_cdm_cfg {
u32 output_bit_depth;
u32 h_cdwn_type;
u32 v_cdwn_type;
- const struct dpu_format *output_fmt;
+ const struct msm_format *output_fmt;
const struct dpu_csc_cfg *csc_cfg;
u32 output_type;
int pp_id;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index a06f69d0b257..2e50049f2f85 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -545,6 +545,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 intf_active = 0;
+ u32 dsc_active = 0;
u32 wb_active = 0;
u32 mode_sel = 0;
@@ -560,6 +561,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
+ dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
if (cfg->intf)
intf_active |= BIT(cfg->intf - INTF_0);
@@ -567,17 +569,18 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
if (cfg->wb)
wb_active |= BIT(cfg->wb - WB_0);
+ if (cfg->dsc)
+ dsc_active |= cfg->dsc;
+
DPU_REG_WRITE(c, CTL_TOP, mode_sel);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
+ DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
if (cfg->merge_3d)
DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
BIT(cfg->merge_3d - MERGE_3D_0));
- if (cfg->dsc)
- DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc);
-
if (cfg->cdm)
DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cfg->cdm);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index 946dd0135dff..b85881aab047 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -223,9 +223,11 @@ static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, unsigned int
VERB("IRQ=[%d, %d]\n", DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
- if (!irq_entry->cb)
+ if (!irq_entry->cb) {
DRM_ERROR("no registered cb, IRQ=[%d, %d]\n",
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
+ return;
+ }
atomic_inc(&irq_entry->count);
@@ -525,14 +527,14 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms,
int ret;
if (!irq_cb) {
- DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n",
- DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
+ DPU_ERROR("IRQ=[%d, %d] NULL callback\n",
+ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
return -EINVAL;
}
if (!dpu_core_irq_is_valid(irq_idx)) {
- DPU_ERROR("invalid IRQ=[%d, %d]\n",
- DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
+ DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n",
+ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index 965692ef7892..225c1c7768ff 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -96,11 +96,11 @@
#define INTF_CFG2_DCE_DATA_COMPRESS BIT(12)
-static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
+static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *intf,
const struct dpu_hw_intf_timing_params *p,
- const struct dpu_format *fmt)
+ const struct msm_format *fmt)
{
- struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
u32 hsync_period, vsync_period;
u32 display_v_start, display_v_end;
u32 hsync_start_x, hsync_end_x;
@@ -118,7 +118,7 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
/* read interface_cfg */
intf_cfg = DPU_REG_READ(c, INTF_CONFIG);
- if (ctx->cap->type == INTF_DP)
+ if (intf->cap->type == INTF_DP)
dp_intf = true;
hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width +
@@ -194,16 +194,16 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
(p->vsync_polarity << 1) | /* VSYNC Polarity */
(p->hsync_polarity << 0); /* HSYNC Polarity */
- if (!DPU_FORMAT_IS_YUV(fmt))
- panel_format = (fmt->bits[C0_G_Y] |
- (fmt->bits[C1_B_Cb] << 2) |
- (fmt->bits[C2_R_Cr] << 4) |
+ if (!MSM_FORMAT_IS_YUV(fmt))
+ panel_format = (fmt->bpc_g_y |
+ (fmt->bpc_b_cb << 2) |
+ (fmt->bpc_r_cr << 4) |
(0x21 << 8));
else
/* Interface treats all the pixel data in RGB888 format */
- panel_format = (COLOR_8BIT |
- (COLOR_8BIT << 2) |
- (COLOR_8BIT << 4) |
+ panel_format = (BPC8 |
+ (BPC8 << 2) |
+ (BPC8 << 4) |
(0x21 << 8));
DPU_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl);
@@ -223,7 +223,7 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
DPU_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3);
DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
- if (ctx->cap->features & BIT(DPU_DATA_HCTL_EN)) {
+ if (intf->cap->features & BIT(DPU_DATA_HCTL_EN)) {
/*
* DATA_HCTL_EN controls data timing which can be different from
* video timing. It is recommended to enable it for all cases, except
@@ -518,10 +518,10 @@ static void dpu_hw_intf_disable_autorefresh(struct dpu_hw_intf *intf,
}
-static void dpu_hw_intf_program_intf_cmd_cfg(struct dpu_hw_intf *ctx,
+static void dpu_hw_intf_program_intf_cmd_cfg(struct dpu_hw_intf *intf,
struct dpu_hw_intf_cmd_mode_cfg *cmd_mode_cfg)
{
- u32 intf_cfg2 = DPU_REG_READ(&ctx->hw, INTF_CONFIG2);
+ u32 intf_cfg2 = DPU_REG_READ(&intf->hw, INTF_CONFIG2);
if (cmd_mode_cfg->data_compress)
intf_cfg2 |= INTF_CFG2_DCE_DATA_COMPRESS;
@@ -529,7 +529,7 @@ static void dpu_hw_intf_program_intf_cmd_cfg(struct dpu_hw_intf *ctx,
if (cmd_mode_cfg->wide_bus_en)
intf_cfg2 |= INTF_CFG2_DATABUS_WIDEN;
- DPU_REG_WRITE(&ctx->hw, INTF_CONFIG2, intf_cfg2);
+ DPU_REG_WRITE(&intf->hw, INTF_CONFIG2, intf_cfg2);
}
struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index 6f4c87244f94..f9015c67a574 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -81,7 +81,7 @@ struct dpu_hw_intf_cmd_mode_cfg {
struct dpu_hw_intf_ops {
void (*setup_timing_gen)(struct dpu_hw_intf *intf,
const struct dpu_hw_intf_timing_params *p,
- const struct dpu_format *fmt);
+ const struct msm_format *fmt);
void (*setup_prg_fetch)(struct dpu_hw_intf *intf,
const struct dpu_hw_intf_prog_fetch *fetch);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index 5df545904057..66759623fc42 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -10,6 +10,8 @@
#include "msm_drv.h"
+#include "disp/mdp_format.h"
+
#define DPU_DBG_NAME "dpu"
#define DPU_NONE 0
@@ -35,28 +37,6 @@
#define DPU_MAX_DE_CURVES 3
#endif
-enum dpu_format_flags {
- DPU_FORMAT_FLAG_YUV_BIT,
- DPU_FORMAT_FLAG_DX_BIT,
- DPU_FORMAT_FLAG_COMPRESSED_BIT,
- DPU_FORMAT_FLAG_BIT_MAX,
-};
-
-#define DPU_FORMAT_FLAG_YUV BIT(DPU_FORMAT_FLAG_YUV_BIT)
-#define DPU_FORMAT_FLAG_DX BIT(DPU_FORMAT_FLAG_DX_BIT)
-#define DPU_FORMAT_FLAG_COMPRESSED BIT(DPU_FORMAT_FLAG_COMPRESSED_BIT)
-#define DPU_FORMAT_IS_YUV(X) \
- (test_bit(DPU_FORMAT_FLAG_YUV_BIT, (X)->flag))
-#define DPU_FORMAT_IS_DX(X) \
- (test_bit(DPU_FORMAT_FLAG_DX_BIT, (X)->flag))
-#define DPU_FORMAT_IS_LINEAR(X) ((X)->fetch_mode == DPU_FETCH_LINEAR)
-#define DPU_FORMAT_IS_TILE(X) \
- (((X)->fetch_mode == DPU_FETCH_UBWC) && \
- !test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
-#define DPU_FORMAT_IS_UBWC(X) \
- (((X)->fetch_mode == DPU_FETCH_UBWC) && \
- test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
-
#define DPU_BLEND_FG_ALPHA_FG_CONST (0 << 0)
#define DPU_BLEND_FG_ALPHA_BG_CONST (1 << 0)
#define DPU_BLEND_FG_ALPHA_FG_PIXEL (2 << 0)
@@ -291,67 +271,6 @@ enum dpu_vbif {
};
/**
- * DPU HW,Component order color map
- */
-enum {
- C0_G_Y = 0,
- C1_B_Cb = 1,
- C2_R_Cr = 2,
- C3_ALPHA = 3
-};
-
-/**
- * enum dpu_plane_type - defines how the color component pixel packing
- * @DPU_PLANE_INTERLEAVED : Color components in single plane
- * @DPU_PLANE_PLANAR : Color component in separate planes
- * @DPU_PLANE_PSEUDO_PLANAR : Chroma components interleaved in separate plane
- */
-enum dpu_plane_type {
- DPU_PLANE_INTERLEAVED,
- DPU_PLANE_PLANAR,
- DPU_PLANE_PSEUDO_PLANAR,
-};
-
-/**
- * enum dpu_chroma_samp_type - chroma sub-samplng type
- * @DPU_CHROMA_RGB : No chroma subsampling
- * @DPU_CHROMA_H2V1 : Chroma pixels are horizontally subsampled
- * @DPU_CHROMA_H1V2 : Chroma pixels are vertically subsampled
- * @DPU_CHROMA_420 : 420 subsampling
- */
-enum dpu_chroma_samp_type {
- DPU_CHROMA_RGB,
- DPU_CHROMA_H2V1,
- DPU_CHROMA_H1V2,
- DPU_CHROMA_420
-};
-
-/**
- * dpu_fetch_type - Defines How DPU HW fetches data
- * @DPU_FETCH_LINEAR : fetch is line by line
- * @DPU_FETCH_TILE : fetches data in Z order from a tile
- * @DPU_FETCH_UBWC : fetch and decompress data
- */
-enum dpu_fetch_type {
- DPU_FETCH_LINEAR,
- DPU_FETCH_TILE,
- DPU_FETCH_UBWC
-};
-
-/**
- * Value of enum chosen to fit the number of bits
- * expected by the HW programming.
- */
-enum {
- COLOR_ALPHA_1BIT = 0,
- COLOR_ALPHA_4BIT = 1,
- COLOR_4BIT = 0,
- COLOR_5BIT = 1, /* No 5-bit Alpha */
- COLOR_6BIT = 2, /* 6-Bit Alpha also = 2 */
- COLOR_8BIT = 3, /* 8-Bit Alpha also = 3 */
-};
-
-/**
* enum dpu_3d_blend_mode
* Desribes how the 3d data is blended
* @BLEND_3D_NONE : 3d blending not enabled
@@ -370,43 +289,6 @@ enum dpu_3d_blend_mode {
BLEND_3D_MAX
};
-/** struct dpu_format - defines the format configuration which
- * allows DPU HW to correctly fetch and decode the format
- * @base: base msm_format structure containing fourcc code
- * @fetch_planes: how the color components are packed in pixel format
- * @element: element color ordering
- * @bits: element bit widths
- * @chroma_sample: chroma sub-samplng type
- * @unpack_align_msb: unpack aligned, 0 to LSB, 1 to MSB
- * @unpack_tight: 0 for loose, 1 for tight
- * @unpack_count: 0 = 1 component, 1 = 2 component
- * @bpp: bytes per pixel
- * @alpha_enable: whether the format has an alpha channel
- * @num_planes: number of planes (including meta data planes)
- * @fetch_mode: linear, tiled, or ubwc hw fetch behavior
- * @flag: usage bit flags
- * @tile_width: format tile width
- * @tile_height: format tile height
- */
-struct dpu_format {
- struct msm_format base;
- enum dpu_plane_type fetch_planes;
- u8 element[DPU_MAX_PLANES];
- u8 bits[DPU_MAX_PLANES];
- enum dpu_chroma_samp_type chroma_sample;
- u8 unpack_align_msb;
- u8 unpack_tight;
- u8 unpack_count;
- u8 bpp;
- u8 alpha_enable;
- u8 num_planes;
- enum dpu_fetch_type fetch_mode;
- DECLARE_BITMAP(flag, DPU_FORMAT_FLAG_BIT_MAX);
- u16 tile_width;
- u16 tile_height;
-};
-#define to_dpu_format(x) container_of(x, struct dpu_format, base)
-
/**
* struct dpu_hw_fmt_layout - format information of the source pixel data
* @format: pixel format parameters
@@ -419,7 +301,7 @@ struct dpu_format {
* @plane_pitch: pitch of each plane
*/
struct dpu_hw_fmt_layout {
- const struct dpu_format *format;
+ const struct msm_format *format;
uint32_t num_planes;
uint32_t width;
uint32_t height;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 0bf8a83e8df3..2c720f1fc1b2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -2,6 +2,8 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
+#include <linux/debugfs.h>
+
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_lm.h"
@@ -206,7 +208,7 @@ static void _sspp_setup_csc10_opmode(struct dpu_hw_sspp *ctx,
* Setup source pixel format, flip,
*/
static void dpu_hw_sspp_setup_format(struct dpu_sw_pipe *pipe,
- const struct dpu_format *fmt, u32 flags)
+ const struct msm_format *fmt, u32 flags)
{
struct dpu_hw_sspp *ctx = pipe->sspp;
struct dpu_hw_blk_reg_map *c;
@@ -241,20 +243,20 @@ static void dpu_hw_sspp_setup_format(struct dpu_sw_pipe *pipe,
chroma_samp = fmt->chroma_sample;
if (flags & DPU_SSPP_SOURCE_ROTATED_90) {
- if (chroma_samp == DPU_CHROMA_H2V1)
- chroma_samp = DPU_CHROMA_H1V2;
- else if (chroma_samp == DPU_CHROMA_H1V2)
- chroma_samp = DPU_CHROMA_H2V1;
+ if (chroma_samp == CHROMA_H2V1)
+ chroma_samp = CHROMA_H1V2;
+ else if (chroma_samp == CHROMA_H1V2)
+ chroma_samp = CHROMA_H2V1;
}
- src_format = (chroma_samp << 23) | (fmt->fetch_planes << 19) |
- (fmt->bits[C3_ALPHA] << 6) | (fmt->bits[C2_R_Cr] << 4) |
- (fmt->bits[C1_B_Cb] << 2) | (fmt->bits[C0_G_Y] << 0);
+ src_format = (chroma_samp << 23) | (fmt->fetch_type << 19) |
+ (fmt->bpc_a << 6) | (fmt->bpc_r_cr << 4) |
+ (fmt->bpc_b_cb << 2) | (fmt->bpc_g_y << 0);
if (flags & DPU_SSPP_ROT_90)
src_format |= BIT(11); /* ROT90 */
- if (fmt->alpha_enable && fmt->fetch_planes == DPU_PLANE_INTERLEAVED)
+ if (fmt->alpha_enable && fmt->fetch_type == MDP_PLANE_INTERLEAVED)
src_format |= BIT(8); /* SRCC3_EN */
if (flags & DPU_SSPP_SOLID_FILL)
@@ -263,12 +265,12 @@ static void dpu_hw_sspp_setup_format(struct dpu_sw_pipe *pipe,
unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
(fmt->element[1] << 8) | (fmt->element[0] << 0);
src_format |= ((fmt->unpack_count - 1) << 12) |
- (fmt->unpack_tight << 17) |
- (fmt->unpack_align_msb << 18) |
+ ((fmt->flags & MSM_FORMAT_FLAG_UNPACK_TIGHT ? 1 : 0) << 17) |
+ ((fmt->flags & MSM_FORMAT_FLAG_UNPACK_ALIGN_MSB ? 1 : 0) << 18) |
((fmt->bpp - 1) << 9);
- if (fmt->fetch_mode != DPU_FETCH_LINEAR) {
- if (DPU_FORMAT_IS_UBWC(fmt))
+ if (fmt->fetch_mode != MDP_FETCH_LINEAR) {
+ if (MSM_FORMAT_IS_UBWC(fmt))
opmode |= MDSS_MDP_OP_BWC_EN;
src_format |= (fmt->fetch_mode & 3) << 30; /*FRAME_FORMAT */
DPU_REG_WRITE(c, SSPP_FETCH_CONFIG,
@@ -295,7 +297,7 @@ static void dpu_hw_sspp_setup_format(struct dpu_sw_pipe *pipe,
break;
case UBWC_4_0:
DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
- DPU_FORMAT_IS_YUV(fmt) ? 0 : BIT(30));
+ MSM_FORMAT_IS_YUV(fmt) ? 0 : BIT(30));
break;
}
}
@@ -303,20 +305,20 @@ static void dpu_hw_sspp_setup_format(struct dpu_sw_pipe *pipe,
opmode |= MDSS_MDP_OP_PE_OVERRIDE;
/* if this is YUV pixel format, enable CSC */
- if (DPU_FORMAT_IS_YUV(fmt))
+ if (MSM_FORMAT_IS_YUV(fmt))
src_format |= BIT(15);
- if (DPU_FORMAT_IS_DX(fmt))
+ if (MSM_FORMAT_IS_DX(fmt))
src_format |= BIT(14);
/* update scaler opmode, if appropriate */
if (test_bit(DPU_SSPP_CSC, &ctx->cap->features))
_sspp_setup_opmode(ctx, VIG_OP_CSC_EN | VIG_OP_CSC_SRC_DATAFMT,
- DPU_FORMAT_IS_YUV(fmt));
+ MSM_FORMAT_IS_YUV(fmt));
else if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features))
_sspp_setup_csc10_opmode(ctx,
VIG_CSC_10_EN | VIG_CSC_10_SRC_DATAFMT,
- DPU_FORMAT_IS_YUV(fmt));
+ MSM_FORMAT_IS_YUV(fmt));
DPU_REG_WRITE(c, format_off, src_format);
DPU_REG_WRITE(c, unpack_pat_off, unpack);
@@ -385,7 +387,7 @@ static void dpu_hw_sspp_setup_pe_config(struct dpu_hw_sspp *ctx,
static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_sspp *ctx,
struct dpu_hw_scaler3_cfg *scaler3_cfg,
- const struct dpu_format *format)
+ const struct msm_format *format)
{
if (!ctx || !scaler3_cfg)
return;
@@ -556,7 +558,7 @@ static void dpu_hw_sspp_setup_qos_ctrl(struct dpu_hw_sspp *ctx,
}
static void dpu_hw_sspp_setup_cdp(struct dpu_sw_pipe *pipe,
- const struct dpu_format *fmt,
+ const struct msm_format *fmt,
bool enable)
{
struct dpu_hw_sspp *ctx = pipe->sspp;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index b7dc52312c39..4a910b808687 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -183,7 +183,7 @@ struct dpu_hw_sspp_ops {
* @flags: Extra flags for format config
*/
void (*setup_format)(struct dpu_sw_pipe *pipe,
- const struct dpu_format *fmt, u32 flags);
+ const struct msm_format *fmt, u32 flags);
/**
* setup_rects - setup pipe ROI rectangles
@@ -279,7 +279,7 @@ struct dpu_hw_sspp_ops {
*/
void (*setup_scaler)(struct dpu_hw_sspp *ctx,
struct dpu_hw_scaler3_cfg *scaler3_cfg,
- const struct dpu_format *format);
+ const struct msm_format *format);
/**
* setup_cdp - setup client driven prefetch
@@ -288,7 +288,7 @@ struct dpu_hw_sspp_ops {
* @enable: whether the CDP should be enabled for this pipe
*/
void (*setup_cdp)(struct dpu_sw_pipe *pipe,
- const struct dpu_format *fmt,
+ const struct msm_format *fmt,
bool enable);
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
index dd475827314e..486be346d40d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -282,7 +282,7 @@ static void _dpu_hw_setup_scaler3_de(struct dpu_hw_blk_reg_map *c,
void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
struct dpu_hw_scaler3_cfg *scaler3_cfg,
u32 scaler_offset, u32 scaler_version,
- const struct dpu_format *format)
+ const struct msm_format *format)
{
u32 op_mode = 0;
u32 phase_init, preload, src_y_rgb, src_uv, dst;
@@ -293,7 +293,7 @@ void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
op_mode |= BIT(0);
op_mode |= (scaler3_cfg->y_rgb_filter_cfg & 0x3) << 16;
- if (format && DPU_FORMAT_IS_YUV(format)) {
+ if (format && MSM_FORMAT_IS_YUV(format)) {
op_mode |= BIT(12);
op_mode |= (scaler3_cfg->uv_filter_cfg & 0x3) << 24;
}
@@ -367,7 +367,7 @@ void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
DPU_REG_WRITE(c, QSEED3_DST_SIZE + scaler_offset, dst);
end:
- if (format && !DPU_FORMAT_IS_DX(format))
+ if (format && !MSM_FORMAT_IS_DX(format))
op_mode |= BIT(14);
if (format && format->alpha_enable) {
@@ -522,16 +522,16 @@ int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c,
#define CDP_PRELOAD_AHEAD_64 BIT(3)
void dpu_setup_cdp(struct dpu_hw_blk_reg_map *c, u32 offset,
- const struct dpu_format *fmt, bool enable)
+ const struct msm_format *fmt, bool enable)
{
u32 cdp_cntl = CDP_PRELOAD_AHEAD_64;
if (enable)
cdp_cntl |= CDP_ENABLE;
- if (DPU_FORMAT_IS_UBWC(fmt))
+ if (MSM_FORMAT_IS_UBWC(fmt))
cdp_cntl |= CDP_UBWC_META_ENABLE;
- if (DPU_FORMAT_IS_UBWC(fmt) ||
- DPU_FORMAT_IS_TILE(fmt))
+ if (MSM_FORMAT_IS_UBWC(fmt) ||
+ MSM_FORMAT_IS_TILE(fmt))
cdp_cntl |= CDP_TILE_AMORTIZE_ENABLE;
DPU_REG_WRITE(c, offset, cdp_cntl);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
index 64ded69fa903..67b08e99335d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -344,14 +344,14 @@ void *dpu_hw_util_get_dir(void);
void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
struct dpu_hw_scaler3_cfg *scaler3_cfg,
u32 scaler_offset, u32 scaler_version,
- const struct dpu_format *format);
+ const struct msm_format *format);
void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
u32 csc_reg_off,
const struct dpu_csc_cfg *data, bool csc10);
void dpu_setup_cdp(struct dpu_hw_blk_reg_map *c, u32 offset,
- const struct dpu_format *fmt, bool enable);
+ const struct msm_format *fmt, bool enable);
u64 _dpu_hw_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
u32 total_fl);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
index e75995f7fcea..93ff01c889b5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
@@ -67,7 +67,7 @@ static void dpu_hw_wb_setup_format(struct dpu_hw_wb *ctx,
struct dpu_hw_wb_cfg *data)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
- const struct dpu_format *fmt = data->dest.format;
+ const struct msm_format *fmt = data->dest.format;
u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
u32 write_config = 0;
u32 opmode = 0;
@@ -76,20 +76,20 @@ static void dpu_hw_wb_setup_format(struct dpu_hw_wb *ctx,
chroma_samp = fmt->chroma_sample;
dst_format = (chroma_samp << 23) |
- (fmt->fetch_planes << 19) |
- (fmt->bits[C3_ALPHA] << 6) |
- (fmt->bits[C2_R_Cr] << 4) |
- (fmt->bits[C1_B_Cb] << 2) |
- (fmt->bits[C0_G_Y] << 0);
+ (fmt->fetch_type << 19) |
+ (fmt->bpc_a << 6) |
+ (fmt->bpc_r_cr << 4) |
+ (fmt->bpc_b_cb << 2) |
+ (fmt->bpc_g_y << 0);
- if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
+ if (fmt->bpc_a || fmt->alpha_enable) {
dst_format |= BIT(8); /* DSTC3_EN */
if (!fmt->alpha_enable ||
!(ctx->caps->features & BIT(DPU_WB_PIPE_ALPHA)))
dst_format |= BIT(14); /* DST_ALPHA_X */
}
- if (DPU_FORMAT_IS_YUV(fmt))
+ if (MSM_FORMAT_IS_YUV(fmt))
dst_format |= BIT(15);
pattern = (fmt->element[3] << 24) |
@@ -97,8 +97,8 @@ static void dpu_hw_wb_setup_format(struct dpu_hw_wb *ctx,
(fmt->element[1] << 8) |
(fmt->element[0] << 0);
- dst_format |= (fmt->unpack_align_msb << 18) |
- (fmt->unpack_tight << 17) |
+ dst_format |= ((fmt->flags & MSM_FORMAT_FLAG_UNPACK_ALIGN_MSB ? 1 : 0) << 18) |
+ ((fmt->flags & MSM_FORMAT_FLAG_UNPACK_TIGHT ? 1 : 0) << 17) |
((fmt->unpack_count - 1) << 12) |
((fmt->bpp - 1) << 9);
@@ -149,7 +149,7 @@ static void dpu_hw_wb_setup_qos_lut(struct dpu_hw_wb *ctx,
}
static void dpu_hw_wb_setup_cdp(struct dpu_hw_wb *ctx,
- const struct dpu_format *fmt,
+ const struct msm_format *fmt,
bool enable)
{
if (!ctx)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
index e671796ea379..37497473e16c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
@@ -46,7 +46,7 @@ struct dpu_hw_wb_ops {
struct dpu_hw_qos_cfg *cfg);
void (*setup_cdp)(struct dpu_hw_wb *ctx,
- const struct dpu_format *fmt,
+ const struct msm_format *fmt,
bool enable);
bool (*setup_clk_force_ctrl)(struct dpu_hw_wb *ctx,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index a1f5d7c4ab91..1955848b1b78 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -348,9 +348,18 @@ static void dpu_kms_global_destroy_state(struct drm_private_obj *obj,
kfree(dpu_state);
}
+static void dpu_kms_global_print_state(struct drm_printer *p,
+ const struct drm_private_state *state)
+{
+ const struct dpu_global_state *global_state = to_dpu_global_state(state);
+
+ dpu_rm_print_state(p, global_state);
+}
+
static const struct drm_private_state_funcs dpu_kms_global_state_funcs = {
.atomic_duplicate_state = dpu_kms_global_duplicate_state,
.atomic_destroy_state = dpu_kms_global_destroy_state,
+ .atomic_print_state = dpu_kms_global_print_state,
};
static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
@@ -364,6 +373,9 @@ static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state,
&state->base,
&dpu_kms_global_state_funcs);
+
+ state->rm = &dpu_kms->rm;
+
return 0;
}
@@ -970,7 +982,6 @@ static const struct msm_kms_funcs kms_funcs = {
.enable_vblank = dpu_kms_enable_vblank,
.disable_vblank = dpu_kms_disable_vblank,
.check_modified_format = dpu_format_check_modified_format,
- .get_format = dpu_get_msm_format,
.destroy = dpu_kms_destroy,
.snapshot = dpu_kms_mdp_snapshot,
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index b5db3fc76ca6..e2adc937ea63 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -130,6 +130,8 @@ struct vsync_info {
struct dpu_global_state {
struct drm_private_state base;
+ struct dpu_rm *rm;
+
uint32_t pingpong_to_enc_id[PINGPONG_MAX - PINGPONG_0];
uint32_t mixer_to_enc_id[LM_MAX - LM_0];
uint32_t ctl_to_enc_id[CTL_MAX - CTL_0];
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index ff975ad51145..1c3a2657450c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -113,7 +113,7 @@ static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
* Prefill BW Equation: line src bytes * line_time
*/
static u64 _dpu_plane_calc_bw(const struct dpu_mdss_cfg *catalog,
- const struct dpu_format *fmt,
+ const struct msm_format *fmt,
const struct drm_display_mode *mode,
struct dpu_sw_pipe_cfg *pipe_cfg)
{
@@ -195,7 +195,7 @@ static u64 _dpu_plane_calc_clk(const struct drm_display_mode *mode,
static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
struct dpu_sw_pipe *pipe,
enum dpu_qos_lut_usage lut_usage,
- const struct dpu_format *fmt, u32 src_width)
+ const struct msm_format *fmt, u32 src_width)
{
struct dpu_plane *pdpu;
u32 fixed_buff_size;
@@ -214,8 +214,8 @@ static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
/* FIXME: in multirect case account for the src_width of all the planes */
- if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
- if (fmt->chroma_sample == DPU_CHROMA_420) {
+ if (fmt->fetch_type == MDP_PLANE_PSEUDO_PLANAR) {
+ if (fmt->chroma_sample == CHROMA_420) {
/* NV12 */
total_fl = (fixed_buff_size / 2) /
((src_width + 32) * fmt->bpp);
@@ -234,9 +234,9 @@ static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
}
}
- DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s w:%u fl:%u\n",
+ DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %p4cc w:%u fl:%u\n",
pipe->sspp->idx - SSPP_VIG0,
- (char *)&fmt->base.pixel_format,
+ &fmt->pixel_format,
src_width, total_fl);
return total_fl;
@@ -251,7 +251,7 @@ static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
*/
static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
struct dpu_sw_pipe *pipe,
- const struct dpu_format *fmt, struct dpu_sw_pipe_cfg *pipe_cfg)
+ const struct msm_format *fmt, struct dpu_sw_pipe_cfg *pipe_cfg)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_hw_qos_cfg cfg;
@@ -260,7 +260,7 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
if (!pdpu->is_rt_pipe) {
lut_usage = DPU_QOS_LUT_USAGE_NRT;
} else {
- if (fmt && DPU_FORMAT_IS_LINEAR(fmt))
+ if (fmt && MSM_FORMAT_IS_LINEAR(fmt))
lut_usage = DPU_QOS_LUT_USAGE_LINEAR;
else
lut_usage = DPU_QOS_LUT_USAGE_MACROTILE;
@@ -284,26 +284,26 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
pdpu->is_rt_pipe);
trace_dpu_perf_set_qos_luts(pipe->sspp->idx - SSPP_VIG0,
- (fmt) ? fmt->base.pixel_format : 0,
+ (fmt) ? fmt->pixel_format : 0,
pdpu->is_rt_pipe, total_fl, cfg.creq_lut, lut_usage);
- DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%llx\n",
+ DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %p4cc rt:%d fl:%u lut:0x%llx\n",
pdpu->pipe - SSPP_VIG0,
- fmt ? (char *)&fmt->base.pixel_format : NULL,
+ fmt ? &fmt->pixel_format : NULL,
pdpu->is_rt_pipe, total_fl, cfg.creq_lut);
trace_dpu_perf_set_danger_luts(pdpu->pipe - SSPP_VIG0,
- (fmt) ? fmt->base.pixel_format : 0,
+ (fmt) ? fmt->pixel_format : 0,
(fmt) ? fmt->fetch_mode : 0,
cfg.danger_lut,
cfg.safe_lut);
- DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s mode:%d luts[0x%x, 0x%x]\n",
- pdpu->pipe - SSPP_VIG0,
- fmt ? (char *)&fmt->base.pixel_format : NULL,
- fmt ? fmt->fetch_mode : -1,
- cfg.danger_lut,
- cfg.safe_lut);
+ DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %p4cc mode:%d luts[0x%x, 0x%x]\n",
+ pdpu->pipe - SSPP_VIG0,
+ fmt ? &fmt->pixel_format : NULL,
+ fmt ? fmt->fetch_mode : -1,
+ cfg.danger_lut,
+ cfg.safe_lut);
pipe->sspp->ops.setup_qos_lut(pipe->sspp, &cfg);
}
@@ -425,7 +425,7 @@ static void _dpu_plane_set_qos_remap(struct drm_plane *plane,
static void _dpu_plane_setup_scaler3(struct dpu_hw_sspp *pipe_hw,
uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h,
struct dpu_hw_scaler3_cfg *scale_cfg,
- const struct dpu_format *fmt,
+ const struct msm_format *fmt,
uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v,
unsigned int rotation)
{
@@ -477,7 +477,7 @@ static void _dpu_plane_setup_scaler3(struct dpu_hw_sspp *pipe_hw,
scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
}
}
- if (!(DPU_FORMAT_IS_YUV(fmt)) && (src_h == dst_h)
+ if (!(MSM_FORMAT_IS_YUV(fmt)) && (src_h == dst_h)
&& (src_w == dst_w))
return;
@@ -510,11 +510,11 @@ static void _dpu_plane_setup_pixel_ext(struct dpu_hw_scaler3_cfg *scale_cfg,
}
static const struct dpu_csc_cfg *_dpu_plane_get_csc(struct dpu_sw_pipe *pipe,
- const struct dpu_format *fmt)
+ const struct msm_format *fmt)
{
const struct dpu_csc_cfg *csc_ptr;
- if (!DPU_FORMAT_IS_YUV(fmt))
+ if (!MSM_FORMAT_IS_YUV(fmt))
return NULL;
if (BIT(DPU_SSPP_CSC_10BIT) & pipe->sspp->cap->features)
@@ -526,12 +526,12 @@ static const struct dpu_csc_cfg *_dpu_plane_get_csc(struct dpu_sw_pipe *pipe,
}
static void _dpu_plane_setup_scaler(struct dpu_sw_pipe *pipe,
- const struct dpu_format *fmt, bool color_fill,
+ const struct msm_format *fmt, bool color_fill,
struct dpu_sw_pipe_cfg *pipe_cfg,
unsigned int rotation)
{
struct dpu_hw_sspp *pipe_hw = pipe->sspp;
- const struct drm_format_info *info = drm_format_info(fmt->base.pixel_format);
+ const struct drm_format_info *info = drm_format_info(fmt->pixel_format);
struct dpu_hw_scaler3_cfg scaler3_cfg;
struct dpu_hw_pixel_ext pixel_ext;
u32 src_width = drm_rect_width(&pipe_cfg->src_rect);
@@ -577,7 +577,7 @@ static void _dpu_plane_color_fill_pipe(struct dpu_plane_state *pstate,
struct dpu_sw_pipe *pipe,
struct drm_rect *dst_rect,
u32 fill_color,
- const struct dpu_format *fmt)
+ const struct msm_format *fmt)
{
struct dpu_sw_pipe_cfg pipe_cfg;
@@ -615,8 +615,9 @@ static void _dpu_plane_color_fill_pipe(struct dpu_plane_state *pstate,
static void _dpu_plane_color_fill(struct dpu_plane *pdpu,
uint32_t color, uint32_t alpha)
{
- const struct dpu_format *fmt;
+ const struct msm_format *fmt;
const struct drm_plane *plane = &pdpu->base;
+ struct msm_drm_private *priv = plane->dev->dev_private;
struct dpu_plane_state *pstate = to_dpu_plane_state(plane->state);
u32 fill_color = (color & 0xFFFFFF) | ((alpha & 0xFF) << 24);
@@ -626,7 +627,7 @@ static void _dpu_plane_color_fill(struct dpu_plane *pdpu,
* select fill format to match user property expectation,
* h/w only supports RGB variants
*/
- fmt = dpu_get_dpu_format(DRM_FORMAT_ABGR8888);
+ fmt = mdp_get_format(priv->kms, DRM_FORMAT_ABGR8888, 0);
/* should not happen ever */
if (!fmt)
return;
@@ -704,7 +705,7 @@ static void dpu_plane_cleanup_fb(struct drm_plane *plane,
static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu,
const struct dpu_sspp_sub_blks *sblk,
- struct drm_rect src, const struct dpu_format *fmt)
+ struct drm_rect src, const struct msm_format *fmt)
{
size_t num_formats;
const u32 *supported_formats;
@@ -723,8 +724,8 @@ static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu,
supported_formats = sblk->rotation_cfg->rot_format_list;
num_formats = sblk->rotation_cfg->rot_num_formats;
- if (!DPU_FORMAT_IS_UBWC(fmt) ||
- !dpu_find_format(fmt->base.pixel_format, supported_formats, num_formats))
+ if (!MSM_FORMAT_IS_UBWC(fmt) ||
+ !dpu_find_format(fmt->pixel_format, supported_formats, num_formats))
return -EINVAL;
return 0;
@@ -733,15 +734,15 @@ static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu,
static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
struct dpu_sw_pipe *pipe,
struct dpu_sw_pipe_cfg *pipe_cfg,
- const struct dpu_format *fmt,
+ const struct msm_format *fmt,
const struct drm_display_mode *mode)
{
uint32_t min_src_size;
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
- min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
+ min_src_size = MSM_FORMAT_IS_YUV(fmt) ? 2 : 1;
- if (DPU_FORMAT_IS_YUV(fmt) &&
+ if (MSM_FORMAT_IS_YUV(fmt) &&
(!pipe->sspp->cap->sblk->scaler_blk.len ||
!pipe->sspp->cap->sblk->csc_blk.len)) {
DPU_DEBUG_PLANE(pdpu,
@@ -758,7 +759,7 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
}
/* valid yuv image */
- if (DPU_FORMAT_IS_YUV(fmt) &&
+ if (MSM_FORMAT_IS_YUV(fmt) &&
(pipe_cfg->src_rect.x1 & 0x1 ||
pipe_cfg->src_rect.y1 & 0x1 ||
drm_rect_width(&pipe_cfg->src_rect) & 0x1 ||
@@ -798,7 +799,7 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
struct dpu_sw_pipe *pipe = &pstate->pipe;
struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
const struct drm_crtc_state *crtc_state = NULL;
- const struct dpu_format *fmt;
+ const struct msm_format *fmt;
struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
struct drm_rect fb_rect = { 0 };
@@ -858,7 +859,7 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
return -E2BIG;
}
- fmt = to_dpu_format(msm_framebuffer_format(new_plane_state->fb));
+ fmt = msm_framebuffer_format(new_plane_state->fb);
max_linewidth = pdpu->catalog->caps->max_linewidth;
@@ -870,7 +871,7 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
* full width is more than max_linewidth, thus each rect is
* wider than allowed.
*/
- if (DPU_FORMAT_IS_UBWC(fmt) &&
+ if (MSM_FORMAT_IS_UBWC(fmt) &&
drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) {
DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, tiled format\n",
DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
@@ -887,7 +888,7 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
drm_rect_height(&pipe_cfg->src_rect) != drm_rect_height(&pipe_cfg->dst_rect) ||
(!test_bit(DPU_SSPP_SMART_DMA_V1, &pipe->sspp->cap->features) &&
!test_bit(DPU_SSPP_SMART_DMA_V2, &pipe->sspp->cap->features)) ||
- DPU_FORMAT_IS_YUV(fmt)) {
+ MSM_FORMAT_IS_YUV(fmt)) {
DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, can't use split source\n",
DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
return -E2BIG;
@@ -945,8 +946,8 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
static void dpu_plane_flush_csc(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe)
{
- const struct dpu_format *format =
- to_dpu_format(msm_framebuffer_format(pdpu->base.state->fb));
+ const struct msm_format *format =
+ msm_framebuffer_format(pdpu->base.state->fb);
const struct dpu_csc_cfg *csc_ptr;
if (!pipe->sspp || !pipe->sspp->ops.setup_csc)
@@ -1017,7 +1018,7 @@ void dpu_plane_set_error(struct drm_plane *plane, bool error)
static void dpu_plane_sspp_update_pipe(struct drm_plane *plane,
struct dpu_sw_pipe *pipe,
struct dpu_sw_pipe_cfg *pipe_cfg,
- const struct dpu_format *fmt,
+ const struct msm_format *fmt,
int frame_rate,
struct dpu_hw_fmt_layout *layout)
{
@@ -1095,8 +1096,8 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
struct drm_crtc *crtc = state->crtc;
struct drm_framebuffer *fb = state->fb;
bool is_rt_pipe;
- const struct dpu_format *fmt =
- to_dpu_format(msm_framebuffer_format(fb));
+ const struct msm_format *fmt =
+ msm_framebuffer_format(fb);
struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
@@ -1118,9 +1119,9 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
pdpu->is_rt_pipe = is_rt_pipe;
DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT
- ", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src),
+ ", %p4cc ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src),
crtc->base.id, DRM_RECT_ARG(&state->dst),
- (char *)&fmt->base.pixel_format, DPU_FORMAT_IS_UBWC(fmt));
+ &fmt->pixel_format, MSM_FORMAT_IS_UBWC(fmt));
dpu_plane_sspp_update_pipe(plane, pipe, pipe_cfg, fmt,
drm_mode_vrefresh(&crtc->mode),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index cb5ce3c62a22..44938ba7a2b7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -758,3 +758,59 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
return num_blks;
}
+
+static void dpu_rm_print_state_helper(struct drm_printer *p,
+ struct dpu_hw_blk *blk,
+ uint32_t mapping)
+{
+ if (!blk)
+ drm_puts(p, "- ");
+ else if (!mapping)
+ drm_puts(p, "# ");
+ else
+ drm_printf(p, "%d ", mapping);
+}
+
+
+void dpu_rm_print_state(struct drm_printer *p,
+ const struct dpu_global_state *global_state)
+{
+ const struct dpu_rm *rm = global_state->rm;
+ int i;
+
+ drm_puts(p, "resource mapping:\n");
+ drm_puts(p, "\tpingpong=");
+ for (i = 0; i < ARRAY_SIZE(global_state->pingpong_to_enc_id); i++)
+ dpu_rm_print_state_helper(p, rm->pingpong_blks[i],
+ global_state->pingpong_to_enc_id[i]);
+ drm_puts(p, "\n");
+
+ drm_puts(p, "\tmixer=");
+ for (i = 0; i < ARRAY_SIZE(global_state->mixer_to_enc_id); i++)
+ dpu_rm_print_state_helper(p, rm->mixer_blks[i],
+ global_state->mixer_to_enc_id[i]);
+ drm_puts(p, "\n");
+
+ drm_puts(p, "\tctl=");
+ for (i = 0; i < ARRAY_SIZE(global_state->ctl_to_enc_id); i++)
+ dpu_rm_print_state_helper(p, rm->ctl_blks[i],
+ global_state->ctl_to_enc_id[i]);
+ drm_puts(p, "\n");
+
+ drm_puts(p, "\tdspp=");
+ for (i = 0; i < ARRAY_SIZE(global_state->dspp_to_enc_id); i++)
+ dpu_rm_print_state_helper(p, rm->dspp_blks[i],
+ global_state->dspp_to_enc_id[i]);
+ drm_puts(p, "\n");
+
+ drm_puts(p, "\tdsc=");
+ for (i = 0; i < ARRAY_SIZE(global_state->dsc_to_enc_id); i++)
+ dpu_rm_print_state_helper(p, rm->dsc_blks[i],
+ global_state->dsc_to_enc_id[i]);
+ drm_puts(p, "\n");
+
+ drm_puts(p, "\tcdm=");
+ dpu_rm_print_state_helper(p, rm->cdm_blk,
+ global_state->cdm_to_enc_id);
+ drm_puts(p, "\n");
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index e3f83ebc656b..e63db8ace6b9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -90,6 +90,14 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size);
/**
+ * dpu_rm_print_state - output the RM private state
+ * @p: DRM printer
+ * @global_state: global state
+ */
+void dpu_rm_print_state(struct drm_printer *p,
+ const struct dpu_global_state *global_state);
+
+/**
* dpu_rm_get_intf - Return a struct dpu_hw_intf instance given it's index.
* @rm: DPU Resource Manager handle
* @intf_idx: INTF's index
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
deleted file mode 100644
index cc8fde450884..000000000000
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
+++ /dev/null
@@ -1,1181 +0,0 @@
-#ifndef MDP4_XML
-#define MDP4_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
-
-The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42)
-
-Copyright (C) 2013-2022 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/
-
-
-enum mdp4_pipe {
- VG1 = 0,
- VG2 = 1,
- RGB1 = 2,
- RGB2 = 3,
- RGB3 = 4,
- VG3 = 5,
- VG4 = 6,
-};
-
-enum mdp4_mixer {
- MIXER0 = 0,
- MIXER1 = 1,
- MIXER2 = 2,
-};
-
-enum mdp4_intf {
- INTF_LCDC_DTV = 0,
- INTF_DSI_VIDEO = 1,
- INTF_DSI_CMD = 2,
- INTF_EBI2_TV = 3,
-};
-
-enum mdp4_cursor_format {
- CURSOR_ARGB = 1,
- CURSOR_XRGB = 2,
-};
-
-enum mdp4_frame_format {
- FRAME_LINEAR = 0,
- FRAME_TILE_ARGB_4X4 = 1,
- FRAME_TILE_YCBCR_420 = 2,
-};
-
-enum mdp4_scale_unit {
- SCALE_FIR = 0,
- SCALE_MN_PHASE = 1,
- SCALE_PIXEL_RPT = 2,
-};
-
-enum mdp4_dma {
- DMA_P = 0,
- DMA_S = 1,
- DMA_E = 2,
-};
-
-#define MDP4_IRQ_OVERLAY0_DONE 0x00000001
-#define MDP4_IRQ_OVERLAY1_DONE 0x00000002
-#define MDP4_IRQ_DMA_S_DONE 0x00000004
-#define MDP4_IRQ_DMA_E_DONE 0x00000008
-#define MDP4_IRQ_DMA_P_DONE 0x00000010
-#define MDP4_IRQ_VG1_HISTOGRAM 0x00000020
-#define MDP4_IRQ_VG2_HISTOGRAM 0x00000040
-#define MDP4_IRQ_PRIMARY_VSYNC 0x00000080
-#define MDP4_IRQ_PRIMARY_INTF_UDERRUN 0x00000100
-#define MDP4_IRQ_EXTERNAL_VSYNC 0x00000200
-#define MDP4_IRQ_EXTERNAL_INTF_UDERRUN 0x00000400
-#define MDP4_IRQ_PRIMARY_RDPTR 0x00000800
-#define MDP4_IRQ_DMA_P_HISTOGRAM 0x00020000
-#define MDP4_IRQ_DMA_S_HISTOGRAM 0x04000000
-#define MDP4_IRQ_OVERLAY2_DONE 0x40000000
-#define REG_MDP4_VERSION 0x00000000
-#define MDP4_VERSION_MINOR__MASK 0x00ff0000
-#define MDP4_VERSION_MINOR__SHIFT 16
-static inline uint32_t MDP4_VERSION_MINOR(uint32_t val)
-{
- return ((val) << MDP4_VERSION_MINOR__SHIFT) & MDP4_VERSION_MINOR__MASK;
-}
-#define MDP4_VERSION_MAJOR__MASK 0xff000000
-#define MDP4_VERSION_MAJOR__SHIFT 24
-static inline uint32_t MDP4_VERSION_MAJOR(uint32_t val)
-{
- return ((val) << MDP4_VERSION_MAJOR__SHIFT) & MDP4_VERSION_MAJOR__MASK;
-}
-
-#define REG_MDP4_OVLP0_KICK 0x00000004
-
-#define REG_MDP4_OVLP1_KICK 0x00000008
-
-#define REG_MDP4_OVLP2_KICK 0x000000d0
-
-#define REG_MDP4_DMA_P_KICK 0x0000000c
-
-#define REG_MDP4_DMA_S_KICK 0x00000010
-
-#define REG_MDP4_DMA_E_KICK 0x00000014
-
-#define REG_MDP4_DISP_STATUS 0x00000018
-
-#define REG_MDP4_DISP_INTF_SEL 0x00000038
-#define MDP4_DISP_INTF_SEL_PRIM__MASK 0x00000003
-#define MDP4_DISP_INTF_SEL_PRIM__SHIFT 0
-static inline uint32_t MDP4_DISP_INTF_SEL_PRIM(enum mdp4_intf val)
-{
- return ((val) << MDP4_DISP_INTF_SEL_PRIM__SHIFT) & MDP4_DISP_INTF_SEL_PRIM__MASK;
-}
-#define MDP4_DISP_INTF_SEL_SEC__MASK 0x0000000c
-#define MDP4_DISP_INTF_SEL_SEC__SHIFT 2
-static inline uint32_t MDP4_DISP_INTF_SEL_SEC(enum mdp4_intf val)
-{
- return ((val) << MDP4_DISP_INTF_SEL_SEC__SHIFT) & MDP4_DISP_INTF_SEL_SEC__MASK;
-}
-#define MDP4_DISP_INTF_SEL_EXT__MASK 0x00000030
-#define MDP4_DISP_INTF_SEL_EXT__SHIFT 4
-static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val)
-{
- return ((val) << MDP4_DISP_INTF_SEL_EXT__SHIFT) & MDP4_DISP_INTF_SEL_EXT__MASK;
-}
-#define MDP4_DISP_INTF_SEL_DSI_VIDEO 0x00000040
-#define MDP4_DISP_INTF_SEL_DSI_CMD 0x00000080
-
-#define REG_MDP4_RESET_STATUS 0x0000003c
-
-#define REG_MDP4_READ_CNFG 0x0000004c
-
-#define REG_MDP4_INTR_ENABLE 0x00000050
-
-#define REG_MDP4_INTR_STATUS 0x00000054
-
-#define REG_MDP4_INTR_CLEAR 0x00000058
-
-#define REG_MDP4_EBI2_LCD0 0x00000060
-
-#define REG_MDP4_EBI2_LCD1 0x00000064
-
-#define REG_MDP4_PORTMAP_MODE 0x00000070
-
-#define REG_MDP4_CS_CONTROLLER0 0x000000c0
-
-#define REG_MDP4_CS_CONTROLLER1 0x000000c4
-
-#define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp_mixer_stage_id val)
-{
- return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK;
-}
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp_mixer_stage_id val)
-{
- return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK;
-}
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp_mixer_stage_id val)
-{
- return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK;
-}
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp_mixer_stage_id val)
-{
- return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK;
-}
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp_mixer_stage_id val)
-{
- return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK;
-}
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp_mixer_stage_id val)
-{
- return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK;
-}
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp_mixer_stage_id val)
-{
- return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK;
-}
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp_mixer_stage_id val)
-{
- return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK;
-}
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE7_MIXER1 0x80000000
-
-#define REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD 0x000100fc
-
-#define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100
-#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007
-#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp_mixer_stage_id val)
-{
- return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK;
-}
-#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008
-#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070
-#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp_mixer_stage_id val)
-{
- return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK;
-}
-#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080
-#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700
-#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp_mixer_stage_id val)
-{
- return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK;
-}
-#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800
-#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000
-#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp_mixer_stage_id val)
-{
- return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK;
-}
-#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000
-#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000
-#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp_mixer_stage_id val)
-{
- return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK;
-}
-#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000
-#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000
-#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp_mixer_stage_id val)
-{
- return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK;
-}
-#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000
-#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000
-#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp_mixer_stage_id val)
-{
- return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK;
-}
-#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000
-#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000
-#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp_mixer_stage_id val)
-{
- return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK;
-}
-#define MDP4_LAYERMIXER_IN_CFG_PIPE7_MIXER1 0x80000000
-
-#define REG_MDP4_VG2_SRC_FORMAT 0x00030050
-
-#define REG_MDP4_VG2_CONST_COLOR 0x00031008
-
-#define REG_MDP4_OVERLAY_FLUSH 0x00018000
-#define MDP4_OVERLAY_FLUSH_OVLP0 0x00000001
-#define MDP4_OVERLAY_FLUSH_OVLP1 0x00000002
-#define MDP4_OVERLAY_FLUSH_VG1 0x00000004
-#define MDP4_OVERLAY_FLUSH_VG2 0x00000008
-#define MDP4_OVERLAY_FLUSH_RGB1 0x00000010
-#define MDP4_OVERLAY_FLUSH_RGB2 0x00000020
-
-static inline uint32_t __offset_OVLP(uint32_t idx)
-{
- switch (idx) {
- case 0: return 0x00010000;
- case 1: return 0x00018000;
- case 2: return 0x00088000;
- default: return INVALID_IDX(idx);
- }
-}
-static inline uint32_t REG_MDP4_OVLP(uint32_t i0) { return 0x00000000 + __offset_OVLP(i0); }
-
-static inline uint32_t REG_MDP4_OVLP_CFG(uint32_t i0) { return 0x00000004 + __offset_OVLP(i0); }
-
-static inline uint32_t REG_MDP4_OVLP_SIZE(uint32_t i0) { return 0x00000008 + __offset_OVLP(i0); }
-#define MDP4_OVLP_SIZE_HEIGHT__MASK 0xffff0000
-#define MDP4_OVLP_SIZE_HEIGHT__SHIFT 16
-static inline uint32_t MDP4_OVLP_SIZE_HEIGHT(uint32_t val)
-{
- return ((val) << MDP4_OVLP_SIZE_HEIGHT__SHIFT) & MDP4_OVLP_SIZE_HEIGHT__MASK;
-}
-#define MDP4_OVLP_SIZE_WIDTH__MASK 0x0000ffff
-#define MDP4_OVLP_SIZE_WIDTH__SHIFT 0
-static inline uint32_t MDP4_OVLP_SIZE_WIDTH(uint32_t val)
-{
- return ((val) << MDP4_OVLP_SIZE_WIDTH__SHIFT) & MDP4_OVLP_SIZE_WIDTH__MASK;
-}
-
-static inline uint32_t REG_MDP4_OVLP_BASE(uint32_t i0) { return 0x0000000c + __offset_OVLP(i0); }
-
-static inline uint32_t REG_MDP4_OVLP_STRIDE(uint32_t i0) { return 0x00000010 + __offset_OVLP(i0); }
-
-static inline uint32_t REG_MDP4_OVLP_OPMODE(uint32_t i0) { return 0x00000014 + __offset_OVLP(i0); }
-
-static inline uint32_t __offset_STAGE(uint32_t idx)
-{
- switch (idx) {
- case 0: return 0x00000104;
- case 1: return 0x00000124;
- case 2: return 0x00000144;
- case 3: return 0x00000160;
- default: return INVALID_IDX(idx);
- }
-}
-static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
-
-static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
-#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003
-#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0
-static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp_alpha_type val)
-{
- return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK;
-}
-#define MDP4_OVLP_STAGE_OP_FG_INV_ALPHA 0x00000004
-#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008
-#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030
-#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4
-static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp_alpha_type val)
-{
- return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK;
-}
-#define MDP4_OVLP_STAGE_OP_BG_INV_ALPHA 0x00000040
-#define MDP4_OVLP_STAGE_OP_BG_MOD_ALPHA 0x00000080
-#define MDP4_OVLP_STAGE_OP_FG_TRANSP 0x00000100
-#define MDP4_OVLP_STAGE_OP_BG_TRANSP 0x00000200
-
-static inline uint32_t REG_MDP4_OVLP_STAGE_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_OVLP(i0) + __offset_STAGE(i1); }
-
-static inline uint32_t REG_MDP4_OVLP_STAGE_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_OVLP(i0) + __offset_STAGE(i1); }
-
-static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_OVLP(i0) + __offset_STAGE(i1); }
-
-static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_OVLP(i0) + __offset_STAGE(i1); }
-
-static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_OVLP(i0) + __offset_STAGE(i1); }
-
-static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_OVLP(i0) + __offset_STAGE(i1); }
-
-static inline uint32_t __offset_STAGE_CO3(uint32_t idx)
-{
- switch (idx) {
- case 0: return 0x00001004;
- case 1: return 0x00001404;
- case 2: return 0x00001804;
- case 3: return 0x00001b84;
- default: return INVALID_IDX(idx);
- }
-}
-static inline uint32_t REG_MDP4_OVLP_STAGE_CO3(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); }
-
-static inline uint32_t REG_MDP4_OVLP_STAGE_CO3_SEL(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); }
-#define MDP4_OVLP_STAGE_CO3_SEL_FG_ALPHA 0x00000001
-
-static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW0(uint32_t i0) { return 0x00000180 + __offset_OVLP(i0); }
-
-static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW1(uint32_t i0) { return 0x00000184 + __offset_OVLP(i0); }
-
-static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH0(uint32_t i0) { return 0x00000188 + __offset_OVLP(i0); }
-
-static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH1(uint32_t i0) { return 0x0000018c + __offset_OVLP(i0); }
-
-static inline uint32_t REG_MDP4_OVLP_CSC_CONFIG(uint32_t i0) { return 0x00000200 + __offset_OVLP(i0); }
-
-static inline uint32_t REG_MDP4_OVLP_CSC(uint32_t i0) { return 0x00002000 + __offset_OVLP(i0); }
-
-
-static inline uint32_t REG_MDP4_OVLP_CSC_MV(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_OVLP_CSC_MV_VAL(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; }
-
-#define REG_MDP4_DMA_P_OP_MODE 0x00090070
-
-static inline uint32_t REG_MDP4_LUTN(uint32_t i0) { return 0x00094800 + 0x400*i0; }
-
-static inline uint32_t REG_MDP4_LUTN_LUT(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_LUTN_LUT_VAL(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; }
-
-#define REG_MDP4_DMA_S_OP_MODE 0x000a0028
-
-static inline uint32_t REG_MDP4_DMA_E_QUANT(uint32_t i0) { return 0x000b0070 + 0x4*i0; }
-
-static inline uint32_t __offset_DMA(enum mdp4_dma idx)
-{
- switch (idx) {
- case DMA_P: return 0x00090000;
- case DMA_S: return 0x000a0000;
- case DMA_E: return 0x000b0000;
- default: return INVALID_IDX(idx);
- }
-}
-static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
-
-static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
-#define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003
-#define MDP4_DMA_CONFIG_G_BPC__SHIFT 0
-static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp_bpc val)
-{
- return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK;
-}
-#define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c
-#define MDP4_DMA_CONFIG_B_BPC__SHIFT 2
-static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp_bpc val)
-{
- return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK;
-}
-#define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030
-#define MDP4_DMA_CONFIG_R_BPC__SHIFT 4
-static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp_bpc val)
-{
- return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK;
-}
-#define MDP4_DMA_CONFIG_PACK_ALIGN_MSB 0x00000080
-#define MDP4_DMA_CONFIG_PACK__MASK 0x0000ff00
-#define MDP4_DMA_CONFIG_PACK__SHIFT 8
-static inline uint32_t MDP4_DMA_CONFIG_PACK(uint32_t val)
-{
- return ((val) << MDP4_DMA_CONFIG_PACK__SHIFT) & MDP4_DMA_CONFIG_PACK__MASK;
-}
-#define MDP4_DMA_CONFIG_DEFLKR_EN 0x01000000
-#define MDP4_DMA_CONFIG_DITHER_EN 0x01000000
-
-static inline uint32_t REG_MDP4_DMA_SRC_SIZE(enum mdp4_dma i0) { return 0x00000004 + __offset_DMA(i0); }
-#define MDP4_DMA_SRC_SIZE_HEIGHT__MASK 0xffff0000
-#define MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT 16
-static inline uint32_t MDP4_DMA_SRC_SIZE_HEIGHT(uint32_t val)
-{
- return ((val) << MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT) & MDP4_DMA_SRC_SIZE_HEIGHT__MASK;
-}
-#define MDP4_DMA_SRC_SIZE_WIDTH__MASK 0x0000ffff
-#define MDP4_DMA_SRC_SIZE_WIDTH__SHIFT 0
-static inline uint32_t MDP4_DMA_SRC_SIZE_WIDTH(uint32_t val)
-{
- return ((val) << MDP4_DMA_SRC_SIZE_WIDTH__SHIFT) & MDP4_DMA_SRC_SIZE_WIDTH__MASK;
-}
-
-static inline uint32_t REG_MDP4_DMA_SRC_BASE(enum mdp4_dma i0) { return 0x00000008 + __offset_DMA(i0); }
-
-static inline uint32_t REG_MDP4_DMA_SRC_STRIDE(enum mdp4_dma i0) { return 0x0000000c + __offset_DMA(i0); }
-
-static inline uint32_t REG_MDP4_DMA_DST_SIZE(enum mdp4_dma i0) { return 0x00000010 + __offset_DMA(i0); }
-#define MDP4_DMA_DST_SIZE_HEIGHT__MASK 0xffff0000
-#define MDP4_DMA_DST_SIZE_HEIGHT__SHIFT 16
-static inline uint32_t MDP4_DMA_DST_SIZE_HEIGHT(uint32_t val)
-{
- return ((val) << MDP4_DMA_DST_SIZE_HEIGHT__SHIFT) & MDP4_DMA_DST_SIZE_HEIGHT__MASK;
-}
-#define MDP4_DMA_DST_SIZE_WIDTH__MASK 0x0000ffff
-#define MDP4_DMA_DST_SIZE_WIDTH__SHIFT 0
-static inline uint32_t MDP4_DMA_DST_SIZE_WIDTH(uint32_t val)
-{
- return ((val) << MDP4_DMA_DST_SIZE_WIDTH__SHIFT) & MDP4_DMA_DST_SIZE_WIDTH__MASK;
-}
-
-static inline uint32_t REG_MDP4_DMA_CURSOR_SIZE(enum mdp4_dma i0) { return 0x00000044 + __offset_DMA(i0); }
-#define MDP4_DMA_CURSOR_SIZE_WIDTH__MASK 0x0000007f
-#define MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT 0
-static inline uint32_t MDP4_DMA_CURSOR_SIZE_WIDTH(uint32_t val)
-{
- return ((val) << MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT) & MDP4_DMA_CURSOR_SIZE_WIDTH__MASK;
-}
-#define MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK 0x007f0000
-#define MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT 16
-static inline uint32_t MDP4_DMA_CURSOR_SIZE_HEIGHT(uint32_t val)
-{
- return ((val) << MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT) & MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK;
-}
-
-static inline uint32_t REG_MDP4_DMA_CURSOR_BASE(enum mdp4_dma i0) { return 0x00000048 + __offset_DMA(i0); }
-
-static inline uint32_t REG_MDP4_DMA_CURSOR_POS(enum mdp4_dma i0) { return 0x0000004c + __offset_DMA(i0); }
-#define MDP4_DMA_CURSOR_POS_X__MASK 0x0000ffff
-#define MDP4_DMA_CURSOR_POS_X__SHIFT 0
-static inline uint32_t MDP4_DMA_CURSOR_POS_X(uint32_t val)
-{
- return ((val) << MDP4_DMA_CURSOR_POS_X__SHIFT) & MDP4_DMA_CURSOR_POS_X__MASK;
-}
-#define MDP4_DMA_CURSOR_POS_Y__MASK 0xffff0000
-#define MDP4_DMA_CURSOR_POS_Y__SHIFT 16
-static inline uint32_t MDP4_DMA_CURSOR_POS_Y(uint32_t val)
-{
- return ((val) << MDP4_DMA_CURSOR_POS_Y__SHIFT) & MDP4_DMA_CURSOR_POS_Y__MASK;
-}
-
-static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_CONFIG(enum mdp4_dma i0) { return 0x00000060 + __offset_DMA(i0); }
-#define MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN 0x00000001
-#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK 0x00000006
-#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT 1
-static inline uint32_t MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(enum mdp4_cursor_format val)
-{
- return ((val) << MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT) & MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK;
-}
-#define MDP4_DMA_CURSOR_BLEND_CONFIG_TRANSP_EN 0x00000008
-
-static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_PARAM(enum mdp4_dma i0) { return 0x00000064 + __offset_DMA(i0); }
-
-static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_LOW(enum mdp4_dma i0) { return 0x00000068 + __offset_DMA(i0); }
-
-static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_HIGH(enum mdp4_dma i0) { return 0x0000006c + __offset_DMA(i0); }
-
-static inline uint32_t REG_MDP4_DMA_FETCH_CONFIG(enum mdp4_dma i0) { return 0x00001004 + __offset_DMA(i0); }
-
-static inline uint32_t REG_MDP4_DMA_CSC(enum mdp4_dma i0) { return 0x00003000 + __offset_DMA(i0); }
-
-
-static inline uint32_t REG_MDP4_DMA_CSC_MV(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_DMA_CSC_MV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_DMA_CSC_POST_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_DMA_CSC_POST_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_DMA_CSC_POST_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_DMA_CSC_POST_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_PIPE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; }
-
-static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; }
-#define MDP4_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
-#define MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
-static inline uint32_t MDP4_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
-{
- return ((val) << MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SRC_SIZE_HEIGHT__MASK;
-}
-#define MDP4_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff
-#define MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT 0
-static inline uint32_t MDP4_PIPE_SRC_SIZE_WIDTH(uint32_t val)
-{
- return ((val) << MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SRC_SIZE_WIDTH__MASK;
-}
-
-static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mdp4_pipe i0) { return 0x00020004 + 0x10000*i0; }
-#define MDP4_PIPE_SRC_XY_Y__MASK 0xffff0000
-#define MDP4_PIPE_SRC_XY_Y__SHIFT 16
-static inline uint32_t MDP4_PIPE_SRC_XY_Y(uint32_t val)
-{
- return ((val) << MDP4_PIPE_SRC_XY_Y__SHIFT) & MDP4_PIPE_SRC_XY_Y__MASK;
-}
-#define MDP4_PIPE_SRC_XY_X__MASK 0x0000ffff
-#define MDP4_PIPE_SRC_XY_X__SHIFT 0
-static inline uint32_t MDP4_PIPE_SRC_XY_X(uint32_t val)
-{
- return ((val) << MDP4_PIPE_SRC_XY_X__SHIFT) & MDP4_PIPE_SRC_XY_X__MASK;
-}
-
-static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mdp4_pipe i0) { return 0x00020008 + 0x10000*i0; }
-#define MDP4_PIPE_DST_SIZE_HEIGHT__MASK 0xffff0000
-#define MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT 16
-static inline uint32_t MDP4_PIPE_DST_SIZE_HEIGHT(uint32_t val)
-{
- return ((val) << MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_DST_SIZE_HEIGHT__MASK;
-}
-#define MDP4_PIPE_DST_SIZE_WIDTH__MASK 0x0000ffff
-#define MDP4_PIPE_DST_SIZE_WIDTH__SHIFT 0
-static inline uint32_t MDP4_PIPE_DST_SIZE_WIDTH(uint32_t val)
-{
- return ((val) << MDP4_PIPE_DST_SIZE_WIDTH__SHIFT) & MDP4_PIPE_DST_SIZE_WIDTH__MASK;
-}
-
-static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mdp4_pipe i0) { return 0x0002000c + 0x10000*i0; }
-#define MDP4_PIPE_DST_XY_Y__MASK 0xffff0000
-#define MDP4_PIPE_DST_XY_Y__SHIFT 16
-static inline uint32_t MDP4_PIPE_DST_XY_Y(uint32_t val)
-{
- return ((val) << MDP4_PIPE_DST_XY_Y__SHIFT) & MDP4_PIPE_DST_XY_Y__MASK;
-}
-#define MDP4_PIPE_DST_XY_X__MASK 0x0000ffff
-#define MDP4_PIPE_DST_XY_X__SHIFT 0
-static inline uint32_t MDP4_PIPE_DST_XY_X(uint32_t val)
-{
- return ((val) << MDP4_PIPE_DST_XY_X__SHIFT) & MDP4_PIPE_DST_XY_X__MASK;
-}
-
-static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mdp4_pipe i0) { return 0x00020010 + 0x10000*i0; }
-
-static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mdp4_pipe i0) { return 0x00020014 + 0x10000*i0; }
-
-static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mdp4_pipe i0) { return 0x00020018 + 0x10000*i0; }
-
-static inline uint32_t REG_MDP4_PIPE_SRCP3_BASE(enum mdp4_pipe i0) { return 0x0002001c + 0x10000*i0; }
-
-static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mdp4_pipe i0) { return 0x00020040 + 0x10000*i0; }
-#define MDP4_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
-#define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT 0
-static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P0(uint32_t val)
-{
- return ((val) << MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P0__MASK;
-}
-#define MDP4_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000
-#define MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT 16
-static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P1(uint32_t val)
-{
- return ((val) << MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P1__MASK;
-}
-
-static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mdp4_pipe i0) { return 0x00020044 + 0x10000*i0; }
-#define MDP4_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff
-#define MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT 0
-static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P2(uint32_t val)
-{
- return ((val) << MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P2__MASK;
-}
-#define MDP4_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000
-#define MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT 16
-static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P3(uint32_t val)
-{
- return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK;
-}
-
-static inline uint32_t REG_MDP4_PIPE_SSTILE_FRAME_SIZE(enum mdp4_pipe i0) { return 0x00020048 + 0x10000*i0; }
-#define MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__MASK 0xffff0000
-#define MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__SHIFT 16
-static inline uint32_t MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT(uint32_t val)
-{
- return ((val) << MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__MASK;
-}
-#define MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__MASK 0x0000ffff
-#define MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__SHIFT 0
-static inline uint32_t MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH(uint32_t val)
-{
- return ((val) << MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__MASK;
-}
-
-static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; }
-#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
-#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val)
-{
- return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK;
-}
-#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c
-#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val)
-{
- return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK;
-}
-#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030
-#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val)
-{
- return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK;
-}
-#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0
-#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val)
-{
- return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK;
-}
-#define MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100
-#define MDP4_PIPE_SRC_FORMAT_CPP__MASK 0x00000600
-#define MDP4_PIPE_SRC_FORMAT_CPP__SHIFT 9
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_CPP(uint32_t val)
-{
- return ((val) << MDP4_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CPP__MASK;
-}
-#define MDP4_PIPE_SRC_FORMAT_ROTATED_90 0x00001000
-#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00006000
-#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 13
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
-{
- return ((val) << MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK;
-}
-#define MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000
-#define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
-#define MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__MASK 0x00180000
-#define MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__SHIFT 19
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_FETCH_PLANES(uint32_t val)
-{
- return ((val) << MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__SHIFT) & MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__MASK;
-}
-#define MDP4_PIPE_SRC_FORMAT_SOLID_FILL 0x00400000
-#define MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x0c000000
-#define MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 26
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp_chroma_samp_type val)
-{
- return ((val) << MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK;
-}
-#define MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__MASK 0x60000000
-#define MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__SHIFT 29
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT(enum mdp4_frame_format val)
-{
- return ((val) << MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__SHIFT) & MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__MASK;
-}
-
-static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mdp4_pipe i0) { return 0x00020054 + 0x10000*i0; }
-#define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
-#define MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT 0
-static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
-{
- return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM0__MASK;
-}
-#define MDP4_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00
-#define MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT 8
-static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM1(uint32_t val)
-{
- return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM1__MASK;
-}
-#define MDP4_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000
-#define MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT 16
-static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM2(uint32_t val)
-{
- return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM2__MASK;
-}
-#define MDP4_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000
-#define MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT 24
-static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
-{
- return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM3__MASK;
-}
-
-static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mdp4_pipe i0) { return 0x00020058 + 0x10000*i0; }
-#define MDP4_PIPE_OP_MODE_SCALEX_EN 0x00000001
-#define MDP4_PIPE_OP_MODE_SCALEY_EN 0x00000002
-#define MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__MASK 0x0000000c
-#define MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__SHIFT 2
-static inline uint32_t MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL(enum mdp4_scale_unit val)
-{
- return ((val) << MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__SHIFT) & MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__MASK;
-}
-#define MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__MASK 0x00000030
-#define MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__SHIFT 4
-static inline uint32_t MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL(enum mdp4_scale_unit val)
-{
- return ((val) << MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__SHIFT) & MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__MASK;
-}
-#define MDP4_PIPE_OP_MODE_SRC_YCBCR 0x00000200
-#define MDP4_PIPE_OP_MODE_DST_YCBCR 0x00000400
-#define MDP4_PIPE_OP_MODE_CSC_EN 0x00000800
-#define MDP4_PIPE_OP_MODE_FLIP_LR 0x00002000
-#define MDP4_PIPE_OP_MODE_FLIP_UD 0x00004000
-#define MDP4_PIPE_OP_MODE_DITHER_EN 0x00008000
-#define MDP4_PIPE_OP_MODE_IGC_LUT_EN 0x00010000
-#define MDP4_PIPE_OP_MODE_DEINT_EN 0x00040000
-#define MDP4_PIPE_OP_MODE_DEINT_ODD_REF 0x00080000
-
-static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mdp4_pipe i0) { return 0x0002005c + 0x10000*i0; }
-
-static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mdp4_pipe i0) { return 0x00020060 + 0x10000*i0; }
-
-static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mdp4_pipe i0) { return 0x00021004 + 0x10000*i0; }
-
-static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mdp4_pipe i0) { return 0x00021008 + 0x10000*i0; }
-
-static inline uint32_t REG_MDP4_PIPE_CSC(enum mdp4_pipe i0) { return 0x00024000 + 0x10000*i0; }
-
-
-static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
-
-#define REG_MDP4_LCDC 0x000c0000
-
-#define REG_MDP4_LCDC_ENABLE 0x000c0000
-
-#define REG_MDP4_LCDC_HSYNC_CTRL 0x000c0004
-#define MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
-#define MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT 0
-static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PULSEW(uint32_t val)
-{
- return ((val) << MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK;
-}
-#define MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK 0xffff0000
-#define MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT 16
-static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PERIOD(uint32_t val)
-{
- return ((val) << MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK;
-}
-
-#define REG_MDP4_LCDC_VSYNC_PERIOD 0x000c0008
-
-#define REG_MDP4_LCDC_VSYNC_LEN 0x000c000c
-
-#define REG_MDP4_LCDC_DISPLAY_HCTRL 0x000c0010
-#define MDP4_LCDC_DISPLAY_HCTRL_START__MASK 0x0000ffff
-#define MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT 0
-static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_START(uint32_t val)
-{
- return ((val) << MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_START__MASK;
-}
-#define MDP4_LCDC_DISPLAY_HCTRL_END__MASK 0xffff0000
-#define MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT 16
-static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_END(uint32_t val)
-{
- return ((val) << MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_END__MASK;
-}
-
-#define REG_MDP4_LCDC_DISPLAY_VSTART 0x000c0014
-
-#define REG_MDP4_LCDC_DISPLAY_VEND 0x000c0018
-
-#define REG_MDP4_LCDC_ACTIVE_HCTL 0x000c001c
-#define MDP4_LCDC_ACTIVE_HCTL_START__MASK 0x00007fff
-#define MDP4_LCDC_ACTIVE_HCTL_START__SHIFT 0
-static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_START(uint32_t val)
-{
- return ((val) << MDP4_LCDC_ACTIVE_HCTL_START__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_START__MASK;
-}
-#define MDP4_LCDC_ACTIVE_HCTL_END__MASK 0x7fff0000
-#define MDP4_LCDC_ACTIVE_HCTL_END__SHIFT 16
-static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_END(uint32_t val)
-{
- return ((val) << MDP4_LCDC_ACTIVE_HCTL_END__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_END__MASK;
-}
-#define MDP4_LCDC_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
-
-#define REG_MDP4_LCDC_ACTIVE_VSTART 0x000c0020
-
-#define REG_MDP4_LCDC_ACTIVE_VEND 0x000c0024
-
-#define REG_MDP4_LCDC_BORDER_CLR 0x000c0028
-
-#define REG_MDP4_LCDC_UNDERFLOW_CLR 0x000c002c
-#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
-#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT 0
-static inline uint32_t MDP4_LCDC_UNDERFLOW_CLR_COLOR(uint32_t val)
-{
- return ((val) << MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK;
-}
-#define MDP4_LCDC_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
-
-#define REG_MDP4_LCDC_HSYNC_SKEW 0x000c0030
-
-#define REG_MDP4_LCDC_TEST_CNTL 0x000c0034
-
-#define REG_MDP4_LCDC_CTRL_POLARITY 0x000c0038
-#define MDP4_LCDC_CTRL_POLARITY_HSYNC_LOW 0x00000001
-#define MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW 0x00000002
-#define MDP4_LCDC_CTRL_POLARITY_DATA_EN_LOW 0x00000004
-
-#define REG_MDP4_LCDC_LVDS_INTF_CTL 0x000c2000
-#define MDP4_LCDC_LVDS_INTF_CTL_MODE_SEL 0x00000004
-#define MDP4_LCDC_LVDS_INTF_CTL_RGB_OUT 0x00000008
-#define MDP4_LCDC_LVDS_INTF_CTL_CH_SWAP 0x00000010
-#define MDP4_LCDC_LVDS_INTF_CTL_CH1_RES_BIT 0x00000020
-#define MDP4_LCDC_LVDS_INTF_CTL_CH2_RES_BIT 0x00000040
-#define MDP4_LCDC_LVDS_INTF_CTL_ENABLE 0x00000080
-#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN 0x00000100
-#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN 0x00000200
-#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN 0x00000400
-#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE3_EN 0x00000800
-#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE0_EN 0x00001000
-#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE1_EN 0x00002000
-#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE2_EN 0x00004000
-#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE3_EN 0x00008000
-#define MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN 0x00010000
-#define MDP4_LCDC_LVDS_INTF_CTL_CH2_CLK_LANE_EN 0x00020000
-
-static inline uint32_t REG_MDP4_LCDC_LVDS_MUX_CTL(uint32_t i0) { return 0x000c2014 + 0x8*i0; }
-
-static inline uint32_t REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(uint32_t i0) { return 0x000c2014 + 0x8*i0; }
-#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__MASK 0x000000ff
-#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__SHIFT 0
-static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(uint32_t val)
-{
- return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__MASK;
-}
-#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__MASK 0x0000ff00
-#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__SHIFT 8
-static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(uint32_t val)
-{
- return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__MASK;
-}
-#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__MASK 0x00ff0000
-#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__SHIFT 16
-static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(uint32_t val)
-{
- return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__MASK;
-}
-#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__MASK 0xff000000
-#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__SHIFT 24
-static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(uint32_t val)
-{
- return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__MASK;
-}
-
-static inline uint32_t REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(uint32_t i0) { return 0x000c2018 + 0x8*i0; }
-#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__MASK 0x000000ff
-#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__SHIFT 0
-static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(uint32_t val)
-{
- return ((val) << MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__MASK;
-}
-#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__MASK 0x0000ff00
-#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__SHIFT 8
-static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(uint32_t val)
-{
- return ((val) << MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__MASK;
-}
-#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__MASK 0x00ff0000
-#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__SHIFT 16
-static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(uint32_t val)
-{
- return ((val) << MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__MASK;
-}
-
-#define REG_MDP4_LCDC_LVDS_PHY_RESET 0x000c2034
-
-#define REG_MDP4_LVDS_PHY_PLL_CTRL_0 0x000c3000
-
-#define REG_MDP4_LVDS_PHY_PLL_CTRL_1 0x000c3004
-
-#define REG_MDP4_LVDS_PHY_PLL_CTRL_2 0x000c3008
-
-#define REG_MDP4_LVDS_PHY_PLL_CTRL_3 0x000c300c
-
-#define REG_MDP4_LVDS_PHY_PLL_CTRL_5 0x000c3014
-
-#define REG_MDP4_LVDS_PHY_PLL_CTRL_6 0x000c3018
-
-#define REG_MDP4_LVDS_PHY_PLL_CTRL_7 0x000c301c
-
-#define REG_MDP4_LVDS_PHY_PLL_CTRL_8 0x000c3020
-
-#define REG_MDP4_LVDS_PHY_PLL_CTRL_9 0x000c3024
-
-#define REG_MDP4_LVDS_PHY_PLL_LOCKED 0x000c3080
-
-#define REG_MDP4_LVDS_PHY_CFG2 0x000c3108
-
-#define REG_MDP4_LVDS_PHY_CFG0 0x000c3100
-#define MDP4_LVDS_PHY_CFG0_SERIALIZATION_ENBLE 0x00000010
-#define MDP4_LVDS_PHY_CFG0_CHANNEL0 0x00000040
-#define MDP4_LVDS_PHY_CFG0_CHANNEL1 0x00000080
-
-#define REG_MDP4_DTV 0x000d0000
-
-#define REG_MDP4_DTV_ENABLE 0x000d0000
-
-#define REG_MDP4_DTV_HSYNC_CTRL 0x000d0004
-#define MDP4_DTV_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
-#define MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT 0
-static inline uint32_t MDP4_DTV_HSYNC_CTRL_PULSEW(uint32_t val)
-{
- return ((val) << MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DTV_HSYNC_CTRL_PULSEW__MASK;
-}
-#define MDP4_DTV_HSYNC_CTRL_PERIOD__MASK 0xffff0000
-#define MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT 16
-static inline uint32_t MDP4_DTV_HSYNC_CTRL_PERIOD(uint32_t val)
-{
- return ((val) << MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DTV_HSYNC_CTRL_PERIOD__MASK;
-}
-
-#define REG_MDP4_DTV_VSYNC_PERIOD 0x000d0008
-
-#define REG_MDP4_DTV_VSYNC_LEN 0x000d000c
-
-#define REG_MDP4_DTV_DISPLAY_HCTRL 0x000d0018
-#define MDP4_DTV_DISPLAY_HCTRL_START__MASK 0x0000ffff
-#define MDP4_DTV_DISPLAY_HCTRL_START__SHIFT 0
-static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_START(uint32_t val)
-{
- return ((val) << MDP4_DTV_DISPLAY_HCTRL_START__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_START__MASK;
-}
-#define MDP4_DTV_DISPLAY_HCTRL_END__MASK 0xffff0000
-#define MDP4_DTV_DISPLAY_HCTRL_END__SHIFT 16
-static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_END(uint32_t val)
-{
- return ((val) << MDP4_DTV_DISPLAY_HCTRL_END__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_END__MASK;
-}
-
-#define REG_MDP4_DTV_DISPLAY_VSTART 0x000d001c
-
-#define REG_MDP4_DTV_DISPLAY_VEND 0x000d0020
-
-#define REG_MDP4_DTV_ACTIVE_HCTL 0x000d002c
-#define MDP4_DTV_ACTIVE_HCTL_START__MASK 0x00007fff
-#define MDP4_DTV_ACTIVE_HCTL_START__SHIFT 0
-static inline uint32_t MDP4_DTV_ACTIVE_HCTL_START(uint32_t val)
-{
- return ((val) << MDP4_DTV_ACTIVE_HCTL_START__SHIFT) & MDP4_DTV_ACTIVE_HCTL_START__MASK;
-}
-#define MDP4_DTV_ACTIVE_HCTL_END__MASK 0x7fff0000
-#define MDP4_DTV_ACTIVE_HCTL_END__SHIFT 16
-static inline uint32_t MDP4_DTV_ACTIVE_HCTL_END(uint32_t val)
-{
- return ((val) << MDP4_DTV_ACTIVE_HCTL_END__SHIFT) & MDP4_DTV_ACTIVE_HCTL_END__MASK;
-}
-#define MDP4_DTV_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
-
-#define REG_MDP4_DTV_ACTIVE_VSTART 0x000d0030
-
-#define REG_MDP4_DTV_ACTIVE_VEND 0x000d0038
-
-#define REG_MDP4_DTV_BORDER_CLR 0x000d0040
-
-#define REG_MDP4_DTV_UNDERFLOW_CLR 0x000d0044
-#define MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
-#define MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT 0
-static inline uint32_t MDP4_DTV_UNDERFLOW_CLR_COLOR(uint32_t val)
-{
- return ((val) << MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK;
-}
-#define MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
-
-#define REG_MDP4_DTV_HSYNC_SKEW 0x000d0048
-
-#define REG_MDP4_DTV_TEST_CNTL 0x000d004c
-
-#define REG_MDP4_DTV_CTRL_POLARITY 0x000d0050
-#define MDP4_DTV_CTRL_POLARITY_HSYNC_LOW 0x00000001
-#define MDP4_DTV_CTRL_POLARITY_VSYNC_LOW 0x00000002
-#define MDP4_DTV_CTRL_POLARITY_DATA_EN_LOW 0x00000004
-
-#define REG_MDP4_DSI 0x000e0000
-
-#define REG_MDP4_DSI_ENABLE 0x000e0000
-
-#define REG_MDP4_DSI_HSYNC_CTRL 0x000e0004
-#define MDP4_DSI_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
-#define MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT 0
-static inline uint32_t MDP4_DSI_HSYNC_CTRL_PULSEW(uint32_t val)
-{
- return ((val) << MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DSI_HSYNC_CTRL_PULSEW__MASK;
-}
-#define MDP4_DSI_HSYNC_CTRL_PERIOD__MASK 0xffff0000
-#define MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT 16
-static inline uint32_t MDP4_DSI_HSYNC_CTRL_PERIOD(uint32_t val)
-{
- return ((val) << MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DSI_HSYNC_CTRL_PERIOD__MASK;
-}
-
-#define REG_MDP4_DSI_VSYNC_PERIOD 0x000e0008
-
-#define REG_MDP4_DSI_VSYNC_LEN 0x000e000c
-
-#define REG_MDP4_DSI_DISPLAY_HCTRL 0x000e0010
-#define MDP4_DSI_DISPLAY_HCTRL_START__MASK 0x0000ffff
-#define MDP4_DSI_DISPLAY_HCTRL_START__SHIFT 0
-static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_START(uint32_t val)
-{
- return ((val) << MDP4_DSI_DISPLAY_HCTRL_START__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_START__MASK;
-}
-#define MDP4_DSI_DISPLAY_HCTRL_END__MASK 0xffff0000
-#define MDP4_DSI_DISPLAY_HCTRL_END__SHIFT 16
-static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_END(uint32_t val)
-{
- return ((val) << MDP4_DSI_DISPLAY_HCTRL_END__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_END__MASK;
-}
-
-#define REG_MDP4_DSI_DISPLAY_VSTART 0x000e0014
-
-#define REG_MDP4_DSI_DISPLAY_VEND 0x000e0018
-
-#define REG_MDP4_DSI_ACTIVE_HCTL 0x000e001c
-#define MDP4_DSI_ACTIVE_HCTL_START__MASK 0x00007fff
-#define MDP4_DSI_ACTIVE_HCTL_START__SHIFT 0
-static inline uint32_t MDP4_DSI_ACTIVE_HCTL_START(uint32_t val)
-{
- return ((val) << MDP4_DSI_ACTIVE_HCTL_START__SHIFT) & MDP4_DSI_ACTIVE_HCTL_START__MASK;
-}
-#define MDP4_DSI_ACTIVE_HCTL_END__MASK 0x7fff0000
-#define MDP4_DSI_ACTIVE_HCTL_END__SHIFT 16
-static inline uint32_t MDP4_DSI_ACTIVE_HCTL_END(uint32_t val)
-{
- return ((val) << MDP4_DSI_ACTIVE_HCTL_END__SHIFT) & MDP4_DSI_ACTIVE_HCTL_END__MASK;
-}
-#define MDP4_DSI_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
-
-#define REG_MDP4_DSI_ACTIVE_VSTART 0x000e0020
-
-#define REG_MDP4_DSI_ACTIVE_VEND 0x000e0024
-
-#define REG_MDP4_DSI_BORDER_CLR 0x000e0028
-
-#define REG_MDP4_DSI_UNDERFLOW_CLR 0x000e002c
-#define MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
-#define MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT 0
-static inline uint32_t MDP4_DSI_UNDERFLOW_CLR_COLOR(uint32_t val)
-{
- return ((val) << MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK;
-}
-#define MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
-
-#define REG_MDP4_DSI_HSYNC_SKEW 0x000e0030
-
-#define REG_MDP4_DSI_TEST_CNTL 0x000e0034
-
-#define REG_MDP4_DSI_CTRL_POLARITY 0x000e0038
-#define MDP4_DSI_CTRL_POLARITY_HSYNC_LOW 0x00000001
-#define MDP4_DSI_CTRL_POLARITY_VSYNC_LOW 0x00000002
-#define MDP4_DSI_CTRL_POLARITY_DATA_EN_LOW 0x00000004
-
-
-#endif /* MDP4_XML */
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index 75f93e346282..b8610aa806ea 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -182,8 +182,8 @@ static void blend_setup(struct drm_crtc *crtc)
enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
int idx = idxs[pipe_id];
if (idx > 0) {
- const struct mdp_format *format =
- to_mdp_format(msm_framebuffer_format(plane->state->fb));
+ const struct msm_format *format =
+ msm_framebuffer_format(plane->state->fb);
alpha[idx-1] = format->alpha_enable;
}
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 4ba1cb74ad76..6e4e74f9d63d 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -151,7 +151,6 @@ static const struct mdp_kms_funcs kms_funcs = {
.flush_commit = mdp4_flush_commit,
.wait_flush = mdp4_wait_flush,
.complete_commit = mdp4_complete_commit,
- .get_format = mdp_get_format,
.round_pixclk = mdp4_round_pixclk,
.destroy = mdp4_destroy,
},
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
index 01179e764a29..94b1ba92785f 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
@@ -44,12 +44,12 @@ struct mdp4_kms {
static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data)
{
- msm_writel(data, mdp4_kms->mmio + reg);
+ writel(data, mdp4_kms->mmio + reg);
}
static inline u32 mdp4_read(struct mdp4_kms *mdp4_kms, u32 reg)
{
- return msm_readl(mdp4_kms->mmio + reg);
+ return readl(mdp4_kms->mmio + reg);
}
static inline uint32_t pipe2flush(enum mdp4_pipe pipe)
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
index b689b618da78..3fefb2088008 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
@@ -20,12 +20,6 @@ struct mdp4_plane {
const char *name;
enum mdp4_pipe pipe;
-
- uint32_t caps;
- uint32_t nformats;
- uint32_t formats[32];
-
- bool enabled;
};
#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base)
@@ -59,15 +53,6 @@ static struct mdp4_kms *get_kms(struct drm_plane *plane)
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
-static void mdp4_plane_destroy(struct drm_plane *plane)
-{
- struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
-
- drm_plane_cleanup(plane);
-
- kfree(mdp4_plane);
-}
-
/* helper to install properties which are common to planes and crtcs */
static void mdp4_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj)
@@ -85,7 +70,6 @@ static int mdp4_plane_set_property(struct drm_plane *plane,
static const struct drm_plane_funcs mdp4_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = mdp4_plane_destroy,
.set_property = mdp4_plane_set_property,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
@@ -218,7 +202,7 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
enum mdp4_pipe pipe = mdp4_plane->pipe;
- const struct mdp_format *format;
+ const struct msm_format *format;
uint32_t op_mode = 0;
uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
@@ -241,7 +225,7 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
fb->base.id, src_x, src_y, src_w, src_h,
crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
- format = to_mdp_format(msm_framebuffer_format(fb));
+ format = msm_framebuffer_format(fb);
if (src_w > (crtc_w * DOWN_SCALE_MAX)) {
DRM_DEV_ERROR(dev->dev, "Width down scaling exceeds limits!\n");
@@ -267,7 +251,7 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
uint32_t sel_unit = SCALE_FIR;
op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN;
- if (MDP_FORMAT_IS_YUV(format)) {
+ if (MSM_FORMAT_IS_YUV(format)) {
if (crtc_w > src_w)
sel_unit = SCALE_PIXEL_RPT;
else if (crtc_w <= (src_w / 4))
@@ -283,7 +267,7 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
uint32_t sel_unit = SCALE_FIR;
op_mode |= MDP4_PIPE_OP_MODE_SCALEY_EN;
- if (MDP_FORMAT_IS_YUV(format)) {
+ if (MSM_FORMAT_IS_YUV(format)) {
if (crtc_h > src_h)
sel_unit = SCALE_PIXEL_RPT;
@@ -316,24 +300,25 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe),
MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
- MDP4_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
- MDP4_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
- MDP4_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
+ MDP4_PIPE_SRC_FORMAT_R_BPC(format->bpc_r_cr) |
+ MDP4_PIPE_SRC_FORMAT_G_BPC(format->bpc_g_y) |
+ MDP4_PIPE_SRC_FORMAT_B_BPC(format->bpc_b_cb) |
COND(format->alpha_enable, MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
- MDP4_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
+ MDP4_PIPE_SRC_FORMAT_CPP(format->bpp - 1) |
MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
MDP4_PIPE_SRC_FORMAT_FETCH_PLANES(format->fetch_type) |
MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample) |
MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT(frame_type) |
- COND(format->unpack_tight, MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT));
+ COND(format->flags & MSM_FORMAT_FLAG_UNPACK_TIGHT,
+ MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe),
- MDP4_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
- MDP4_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
- MDP4_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
- MDP4_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
+ MDP4_PIPE_SRC_UNPACK_ELEM0(format->element[0]) |
+ MDP4_PIPE_SRC_UNPACK_ELEM1(format->element[1]) |
+ MDP4_PIPE_SRC_UNPACK_ELEM2(format->element[2]) |
+ MDP4_PIPE_SRC_UNPACK_ELEM3(format->element[3]));
- if (MDP_FORMAT_IS_YUV(format)) {
+ if (MSM_FORMAT_IS_YUV(format)) {
struct csc_cfg *csc = mdp_get_default_csc_cfg(CSC_YUV2RGB);
op_mode |= MDP4_PIPE_OP_MODE_SRC_YCBCR;
@@ -371,37 +356,81 @@ static const uint64_t supported_format_modifiers[] = {
DRM_FORMAT_MOD_INVALID
};
+static const uint32_t mdp4_rgb_formats[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+};
+
+static const uint32_t mdp4_rgb_yuv_formats[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YVU420,
+};
+
/* initialize plane */
struct drm_plane *mdp4_plane_init(struct drm_device *dev,
enum mdp4_pipe pipe_id, bool private_plane)
{
struct drm_plane *plane = NULL;
struct mdp4_plane *mdp4_plane;
- int ret;
enum drm_plane_type type;
+ uint32_t pipe_caps;
+ const uint32_t *formats;
+ size_t nformats;
- mdp4_plane = kzalloc(sizeof(*mdp4_plane), GFP_KERNEL);
- if (!mdp4_plane) {
- ret = -ENOMEM;
- goto fail;
+ type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
+
+ pipe_caps = mdp4_pipe_caps(pipe_id);
+ if (pipe_supports_yuv(pipe_caps)) {
+ formats = mdp4_rgb_yuv_formats;
+ nformats = ARRAY_SIZE(mdp4_rgb_yuv_formats);
+ } else {
+ formats = mdp4_rgb_formats;
+ nformats = ARRAY_SIZE(mdp4_rgb_formats);
}
+ mdp4_plane = drmm_universal_plane_alloc(dev, struct mdp4_plane, base,
+ 0xff, &mdp4_plane_funcs,
+ formats, nformats,
+ supported_format_modifiers,
+ type, NULL);
+ if (IS_ERR(mdp4_plane))
+ return ERR_CAST(mdp4_plane);
+
plane = &mdp4_plane->base;
mdp4_plane->pipe = pipe_id;
mdp4_plane->name = pipe_names[pipe_id];
- mdp4_plane->caps = mdp4_pipe_caps(pipe_id);
-
- mdp4_plane->nformats = mdp_get_formats(mdp4_plane->formats,
- ARRAY_SIZE(mdp4_plane->formats),
- !pipe_supports_yuv(mdp4_plane->caps));
-
- type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
- ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
- mdp4_plane->formats, mdp4_plane->nformats,
- supported_format_modifiers, type, NULL);
- if (ret)
- goto fail;
drm_plane_helper_add(plane, &mdp4_plane_helper_funcs);
@@ -410,10 +439,4 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
drm_plane_enable_fb_damage_clips(plane);
return plane;
-
-fail:
- if (plane)
- mdp4_plane_destroy(plane);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
deleted file mode 100644
index 270e11c904bd..000000000000
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
+++ /dev/null
@@ -1,1979 +0,0 @@
-#ifndef MDP5_XML
-#define MDP5_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
-
-The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42)
-
-Copyright (C) 2013-2022 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/
-
-
-enum mdp5_intf_type {
- INTF_DISABLED = 0,
- INTF_DSI = 1,
- INTF_HDMI = 3,
- INTF_LCDC = 5,
- INTF_eDP = 9,
- INTF_VIRTUAL = 100,
- INTF_WB = 101,
-};
-
-enum mdp5_intfnum {
- NO_INTF = 0,
- INTF0 = 1,
- INTF1 = 2,
- INTF2 = 3,
- INTF3 = 4,
-};
-
-enum mdp5_pipe {
- SSPP_NONE = 0,
- SSPP_VIG0 = 1,
- SSPP_VIG1 = 2,
- SSPP_VIG2 = 3,
- SSPP_RGB0 = 4,
- SSPP_RGB1 = 5,
- SSPP_RGB2 = 6,
- SSPP_DMA0 = 7,
- SSPP_DMA1 = 8,
- SSPP_VIG3 = 9,
- SSPP_RGB3 = 10,
- SSPP_CURSOR0 = 11,
- SSPP_CURSOR1 = 12,
-};
-
-enum mdp5_format {
- DUMMY = 0,
-};
-
-enum mdp5_ctl_mode {
- MODE_NONE = 0,
- MODE_WB_0_BLOCK = 1,
- MODE_WB_1_BLOCK = 2,
- MODE_WB_0_LINE = 3,
- MODE_WB_1_LINE = 4,
- MODE_WB_2_LINE = 5,
-};
-
-enum mdp5_pack_3d {
- PACK_3D_FRAME_INT = 0,
- PACK_3D_H_ROW_INT = 1,
- PACK_3D_V_ROW_INT = 2,
- PACK_3D_COL_INT = 3,
-};
-
-enum mdp5_scale_filter {
- SCALE_FILTER_NEAREST = 0,
- SCALE_FILTER_BIL = 1,
- SCALE_FILTER_PCMN = 2,
- SCALE_FILTER_CA = 3,
-};
-
-enum mdp5_pipe_bwc {
- BWC_LOSSLESS = 0,
- BWC_Q_HIGH = 1,
- BWC_Q_MED = 2,
-};
-
-enum mdp5_cursor_format {
- CURSOR_FMT_ARGB8888 = 0,
- CURSOR_FMT_ARGB1555 = 2,
- CURSOR_FMT_ARGB4444 = 4,
-};
-
-enum mdp5_cursor_alpha {
- CURSOR_ALPHA_CONST = 0,
- CURSOR_ALPHA_PER_PIXEL = 2,
-};
-
-enum mdp5_igc_type {
- IGC_VIG = 0,
- IGC_RGB = 1,
- IGC_DMA = 2,
- IGC_DSPP = 3,
-};
-
-enum mdp5_data_format {
- DATA_FORMAT_RGB = 0,
- DATA_FORMAT_YUV = 1,
-};
-
-enum mdp5_block_size {
- BLOCK_SIZE_64 = 0,
- BLOCK_SIZE_128 = 1,
-};
-
-enum mdp5_rotate_mode {
- ROTATE_0 = 0,
- ROTATE_90 = 1,
-};
-
-enum mdp5_chroma_downsample_method {
- DS_MTHD_NO_PIXEL_DROP = 0,
- DS_MTHD_PIXEL_DROP = 1,
-};
-
-#define MDP5_IRQ_WB_0_DONE 0x00000001
-#define MDP5_IRQ_WB_1_DONE 0x00000002
-#define MDP5_IRQ_WB_2_DONE 0x00000010
-#define MDP5_IRQ_PING_PONG_0_DONE 0x00000100
-#define MDP5_IRQ_PING_PONG_1_DONE 0x00000200
-#define MDP5_IRQ_PING_PONG_2_DONE 0x00000400
-#define MDP5_IRQ_PING_PONG_3_DONE 0x00000800
-#define MDP5_IRQ_PING_PONG_0_RD_PTR 0x00001000
-#define MDP5_IRQ_PING_PONG_1_RD_PTR 0x00002000
-#define MDP5_IRQ_PING_PONG_2_RD_PTR 0x00004000
-#define MDP5_IRQ_PING_PONG_3_RD_PTR 0x00008000
-#define MDP5_IRQ_PING_PONG_0_WR_PTR 0x00010000
-#define MDP5_IRQ_PING_PONG_1_WR_PTR 0x00020000
-#define MDP5_IRQ_PING_PONG_2_WR_PTR 0x00040000
-#define MDP5_IRQ_PING_PONG_3_WR_PTR 0x00080000
-#define MDP5_IRQ_PING_PONG_0_AUTO_REF 0x00100000
-#define MDP5_IRQ_PING_PONG_1_AUTO_REF 0x00200000
-#define MDP5_IRQ_PING_PONG_2_AUTO_REF 0x00400000
-#define MDP5_IRQ_PING_PONG_3_AUTO_REF 0x00800000
-#define MDP5_IRQ_INTF0_UNDER_RUN 0x01000000
-#define MDP5_IRQ_INTF0_VSYNC 0x02000000
-#define MDP5_IRQ_INTF1_UNDER_RUN 0x04000000
-#define MDP5_IRQ_INTF1_VSYNC 0x08000000
-#define MDP5_IRQ_INTF2_UNDER_RUN 0x10000000
-#define MDP5_IRQ_INTF2_VSYNC 0x20000000
-#define MDP5_IRQ_INTF3_UNDER_RUN 0x40000000
-#define MDP5_IRQ_INTF3_VSYNC 0x80000000
-#define REG_MDSS_HW_VERSION 0x00000000
-#define MDSS_HW_VERSION_STEP__MASK 0x0000ffff
-#define MDSS_HW_VERSION_STEP__SHIFT 0
-static inline uint32_t MDSS_HW_VERSION_STEP(uint32_t val)
-{
- return ((val) << MDSS_HW_VERSION_STEP__SHIFT) & MDSS_HW_VERSION_STEP__MASK;
-}
-#define MDSS_HW_VERSION_MINOR__MASK 0x0fff0000
-#define MDSS_HW_VERSION_MINOR__SHIFT 16
-static inline uint32_t MDSS_HW_VERSION_MINOR(uint32_t val)
-{
- return ((val) << MDSS_HW_VERSION_MINOR__SHIFT) & MDSS_HW_VERSION_MINOR__MASK;
-}
-#define MDSS_HW_VERSION_MAJOR__MASK 0xf0000000
-#define MDSS_HW_VERSION_MAJOR__SHIFT 28
-static inline uint32_t MDSS_HW_VERSION_MAJOR(uint32_t val)
-{
- return ((val) << MDSS_HW_VERSION_MAJOR__SHIFT) & MDSS_HW_VERSION_MAJOR__MASK;
-}
-
-#define REG_MDSS_HW_INTR_STATUS 0x00000010
-#define MDSS_HW_INTR_STATUS_INTR_MDP 0x00000001
-#define MDSS_HW_INTR_STATUS_INTR_DSI0 0x00000010
-#define MDSS_HW_INTR_STATUS_INTR_DSI1 0x00000020
-#define MDSS_HW_INTR_STATUS_INTR_HDMI 0x00000100
-#define MDSS_HW_INTR_STATUS_INTR_EDP 0x00001000
-
-#define REG_MDP5_HW_VERSION 0x00000000
-#define MDP5_HW_VERSION_STEP__MASK 0x0000ffff
-#define MDP5_HW_VERSION_STEP__SHIFT 0
-static inline uint32_t MDP5_HW_VERSION_STEP(uint32_t val)
-{
- return ((val) << MDP5_HW_VERSION_STEP__SHIFT) & MDP5_HW_VERSION_STEP__MASK;
-}
-#define MDP5_HW_VERSION_MINOR__MASK 0x0fff0000
-#define MDP5_HW_VERSION_MINOR__SHIFT 16
-static inline uint32_t MDP5_HW_VERSION_MINOR(uint32_t val)
-{
- return ((val) << MDP5_HW_VERSION_MINOR__SHIFT) & MDP5_HW_VERSION_MINOR__MASK;
-}
-#define MDP5_HW_VERSION_MAJOR__MASK 0xf0000000
-#define MDP5_HW_VERSION_MAJOR__SHIFT 28
-static inline uint32_t MDP5_HW_VERSION_MAJOR(uint32_t val)
-{
- return ((val) << MDP5_HW_VERSION_MAJOR__SHIFT) & MDP5_HW_VERSION_MAJOR__MASK;
-}
-
-#define REG_MDP5_DISP_INTF_SEL 0x00000004
-#define MDP5_DISP_INTF_SEL_INTF0__MASK 0x000000ff
-#define MDP5_DISP_INTF_SEL_INTF0__SHIFT 0
-static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val)
-{
- return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK;
-}
-#define MDP5_DISP_INTF_SEL_INTF1__MASK 0x0000ff00
-#define MDP5_DISP_INTF_SEL_INTF1__SHIFT 8
-static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val)
-{
- return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK;
-}
-#define MDP5_DISP_INTF_SEL_INTF2__MASK 0x00ff0000
-#define MDP5_DISP_INTF_SEL_INTF2__SHIFT 16
-static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val)
-{
- return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK;
-}
-#define MDP5_DISP_INTF_SEL_INTF3__MASK 0xff000000
-#define MDP5_DISP_INTF_SEL_INTF3__SHIFT 24
-static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val)
-{
- return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK;
-}
-
-#define REG_MDP5_INTR_EN 0x00000010
-
-#define REG_MDP5_INTR_STATUS 0x00000014
-
-#define REG_MDP5_INTR_CLEAR 0x00000018
-
-#define REG_MDP5_HIST_INTR_EN 0x0000001c
-
-#define REG_MDP5_HIST_INTR_STATUS 0x00000020
-
-#define REG_MDP5_HIST_INTR_CLEAR 0x00000024
-
-#define REG_MDP5_SPARE_0 0x00000028
-#define MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN 0x00000001
-
-static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000080 + 0x4*i0; }
-
-static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000080 + 0x4*i0; }
-#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff
-#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0
-static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(uint32_t val)
-{
- return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
-}
-#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00
-#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8
-static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(uint32_t val)
-{
- return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
-}
-#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000
-#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16
-static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(uint32_t val)
-{
- return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
-}
-
-static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000130 + 0x4*i0; }
-
-static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000130 + 0x4*i0; }
-#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff
-#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0
-static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(uint32_t val)
-{
- return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK;
-}
-#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00
-#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8
-static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(uint32_t val)
-{
- return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK;
-}
-#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000
-#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16
-static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(uint32_t val)
-{
- return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK;
-}
-
-static inline uint32_t __offset_IGC(enum mdp5_igc_type idx)
-{
- switch (idx) {
- case IGC_VIG: return 0x00000200;
- case IGC_RGB: return 0x00000210;
- case IGC_DMA: return 0x00000220;
- case IGC_DSPP: return 0x00000300;
- default: return INVALID_IDX(idx);
- }
-}
-static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); }
-
-static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
-#define MDP5_IGC_LUT_REG_VAL__MASK 0x00000fff
-#define MDP5_IGC_LUT_REG_VAL__SHIFT 0
-static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val)
-{
- return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK;
-}
-#define MDP5_IGC_LUT_REG_INDEX_UPDATE 0x02000000
-#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000
-#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
-#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
-
-#define REG_MDP5_SPLIT_DPL_EN 0x000002f4
-
-#define REG_MDP5_SPLIT_DPL_UPPER 0x000002f8
-#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002
-#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004
-#define MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010
-#define MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100
-
-#define REG_MDP5_SPLIT_DPL_LOWER 0x000003f0
-#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002
-#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004
-#define MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010
-#define MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100
-
-static inline uint32_t __offset_CTL(uint32_t idx)
-{
- switch (idx) {
- case 0: return (mdp5_cfg->ctl.base[0]);
- case 1: return (mdp5_cfg->ctl.base[1]);
- case 2: return (mdp5_cfg->ctl.base[2]);
- case 3: return (mdp5_cfg->ctl.base[3]);
- case 4: return (mdp5_cfg->ctl.base[4]);
- default: return INVALID_IDX(idx);
- }
-}
-static inline uint32_t REG_MDP5_CTL(uint32_t i0) { return 0x00000000 + __offset_CTL(i0); }
-
-static inline uint32_t __offset_LAYER(uint32_t idx)
-{
- switch (idx) {
- case 0: return 0x00000000;
- case 1: return 0x00000004;
- case 2: return 0x00000008;
- case 3: return 0x0000000c;
- case 4: return 0x00000010;
- case 5: return 0x00000024;
- default: return INVALID_IDX(idx);
- }
-}
-static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); }
-
-static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); }
-#define MDP5_CTL_LAYER_REG_VIG0__MASK 0x00000007
-#define MDP5_CTL_LAYER_REG_VIG0__SHIFT 0
-static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(uint32_t val)
-{
- return ((val) << MDP5_CTL_LAYER_REG_VIG0__SHIFT) & MDP5_CTL_LAYER_REG_VIG0__MASK;
-}
-#define MDP5_CTL_LAYER_REG_VIG1__MASK 0x00000038
-#define MDP5_CTL_LAYER_REG_VIG1__SHIFT 3
-static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(uint32_t val)
-{
- return ((val) << MDP5_CTL_LAYER_REG_VIG1__SHIFT) & MDP5_CTL_LAYER_REG_VIG1__MASK;
-}
-#define MDP5_CTL_LAYER_REG_VIG2__MASK 0x000001c0
-#define MDP5_CTL_LAYER_REG_VIG2__SHIFT 6
-static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(uint32_t val)
-{
- return ((val) << MDP5_CTL_LAYER_REG_VIG2__SHIFT) & MDP5_CTL_LAYER_REG_VIG2__MASK;
-}
-#define MDP5_CTL_LAYER_REG_RGB0__MASK 0x00000e00
-#define MDP5_CTL_LAYER_REG_RGB0__SHIFT 9
-static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(uint32_t val)
-{
- return ((val) << MDP5_CTL_LAYER_REG_RGB0__SHIFT) & MDP5_CTL_LAYER_REG_RGB0__MASK;
-}
-#define MDP5_CTL_LAYER_REG_RGB1__MASK 0x00007000
-#define MDP5_CTL_LAYER_REG_RGB1__SHIFT 12
-static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(uint32_t val)
-{
- return ((val) << MDP5_CTL_LAYER_REG_RGB1__SHIFT) & MDP5_CTL_LAYER_REG_RGB1__MASK;
-}
-#define MDP5_CTL_LAYER_REG_RGB2__MASK 0x00038000
-#define MDP5_CTL_LAYER_REG_RGB2__SHIFT 15
-static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(uint32_t val)
-{
- return ((val) << MDP5_CTL_LAYER_REG_RGB2__SHIFT) & MDP5_CTL_LAYER_REG_RGB2__MASK;
-}
-#define MDP5_CTL_LAYER_REG_DMA0__MASK 0x001c0000
-#define MDP5_CTL_LAYER_REG_DMA0__SHIFT 18
-static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(uint32_t val)
-{
- return ((val) << MDP5_CTL_LAYER_REG_DMA0__SHIFT) & MDP5_CTL_LAYER_REG_DMA0__MASK;
-}
-#define MDP5_CTL_LAYER_REG_DMA1__MASK 0x00e00000
-#define MDP5_CTL_LAYER_REG_DMA1__SHIFT 21
-static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(uint32_t val)
-{
- return ((val) << MDP5_CTL_LAYER_REG_DMA1__SHIFT) & MDP5_CTL_LAYER_REG_DMA1__MASK;
-}
-#define MDP5_CTL_LAYER_REG_BORDER_COLOR 0x01000000
-#define MDP5_CTL_LAYER_REG_CURSOR_OUT 0x02000000
-#define MDP5_CTL_LAYER_REG_VIG3__MASK 0x1c000000
-#define MDP5_CTL_LAYER_REG_VIG3__SHIFT 26
-static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(uint32_t val)
-{
- return ((val) << MDP5_CTL_LAYER_REG_VIG3__SHIFT) & MDP5_CTL_LAYER_REG_VIG3__MASK;
-}
-#define MDP5_CTL_LAYER_REG_RGB3__MASK 0xe0000000
-#define MDP5_CTL_LAYER_REG_RGB3__SHIFT 29
-static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(uint32_t val)
-{
- return ((val) << MDP5_CTL_LAYER_REG_RGB3__SHIFT) & MDP5_CTL_LAYER_REG_RGB3__MASK;
-}
-
-static inline uint32_t REG_MDP5_CTL_OP(uint32_t i0) { return 0x00000014 + __offset_CTL(i0); }
-#define MDP5_CTL_OP_MODE__MASK 0x0000000f
-#define MDP5_CTL_OP_MODE__SHIFT 0
-static inline uint32_t MDP5_CTL_OP_MODE(enum mdp5_ctl_mode val)
-{
- return ((val) << MDP5_CTL_OP_MODE__SHIFT) & MDP5_CTL_OP_MODE__MASK;
-}
-#define MDP5_CTL_OP_INTF_NUM__MASK 0x00000070
-#define MDP5_CTL_OP_INTF_NUM__SHIFT 4
-static inline uint32_t MDP5_CTL_OP_INTF_NUM(enum mdp5_intfnum val)
-{
- return ((val) << MDP5_CTL_OP_INTF_NUM__SHIFT) & MDP5_CTL_OP_INTF_NUM__MASK;
-}
-#define MDP5_CTL_OP_CMD_MODE 0x00020000
-#define MDP5_CTL_OP_PACK_3D_ENABLE 0x00080000
-#define MDP5_CTL_OP_PACK_3D__MASK 0x00300000
-#define MDP5_CTL_OP_PACK_3D__SHIFT 20
-static inline uint32_t MDP5_CTL_OP_PACK_3D(enum mdp5_pack_3d val)
-{
- return ((val) << MDP5_CTL_OP_PACK_3D__SHIFT) & MDP5_CTL_OP_PACK_3D__MASK;
-}
-
-static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000018 + __offset_CTL(i0); }
-#define MDP5_CTL_FLUSH_VIG0 0x00000001
-#define MDP5_CTL_FLUSH_VIG1 0x00000002
-#define MDP5_CTL_FLUSH_VIG2 0x00000004
-#define MDP5_CTL_FLUSH_RGB0 0x00000008
-#define MDP5_CTL_FLUSH_RGB1 0x00000010
-#define MDP5_CTL_FLUSH_RGB2 0x00000020
-#define MDP5_CTL_FLUSH_LM0 0x00000040
-#define MDP5_CTL_FLUSH_LM1 0x00000080
-#define MDP5_CTL_FLUSH_LM2 0x00000100
-#define MDP5_CTL_FLUSH_LM3 0x00000200
-#define MDP5_CTL_FLUSH_LM4 0x00000400
-#define MDP5_CTL_FLUSH_DMA0 0x00000800
-#define MDP5_CTL_FLUSH_DMA1 0x00001000
-#define MDP5_CTL_FLUSH_DSPP0 0x00002000
-#define MDP5_CTL_FLUSH_DSPP1 0x00004000
-#define MDP5_CTL_FLUSH_DSPP2 0x00008000
-#define MDP5_CTL_FLUSH_WB 0x00010000
-#define MDP5_CTL_FLUSH_CTL 0x00020000
-#define MDP5_CTL_FLUSH_VIG3 0x00040000
-#define MDP5_CTL_FLUSH_RGB3 0x00080000
-#define MDP5_CTL_FLUSH_LM5 0x00100000
-#define MDP5_CTL_FLUSH_DSPP3 0x00200000
-#define MDP5_CTL_FLUSH_CURSOR_0 0x00400000
-#define MDP5_CTL_FLUSH_CURSOR_1 0x00800000
-#define MDP5_CTL_FLUSH_CHROMADOWN_0 0x04000000
-#define MDP5_CTL_FLUSH_TIMING_3 0x10000000
-#define MDP5_CTL_FLUSH_TIMING_2 0x20000000
-#define MDP5_CTL_FLUSH_TIMING_1 0x40000000
-#define MDP5_CTL_FLUSH_TIMING_0 0x80000000
-
-static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000001c + __offset_CTL(i0); }
-
-static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000020 + __offset_CTL(i0); }
-
-static inline uint32_t __offset_LAYER_EXT(uint32_t idx)
-{
- switch (idx) {
- case 0: return 0x00000040;
- case 1: return 0x00000044;
- case 2: return 0x00000048;
- case 3: return 0x0000004c;
- case 4: return 0x00000050;
- case 5: return 0x00000054;
- default: return INVALID_IDX(idx);
- }
-}
-static inline uint32_t REG_MDP5_CTL_LAYER_EXT(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); }
-
-static inline uint32_t REG_MDP5_CTL_LAYER_EXT_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); }
-#define MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3 0x00000001
-#define MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3 0x00000004
-#define MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3 0x00000010
-#define MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3 0x00000040
-#define MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3 0x00000100
-#define MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3 0x00000400
-#define MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3 0x00001000
-#define MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3 0x00004000
-#define MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3 0x00010000
-#define MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3 0x00040000
-#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK 0x00f00000
-#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT 20
-static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR0(enum mdp_mixer_stage_id val)
-{
- return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK;
-}
-#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK 0x3c000000
-#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT 26
-static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR1(enum mdp_mixer_stage_id val)
-{
- return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK;
-}
-
-static inline uint32_t __offset_PIPE(enum mdp5_pipe idx)
-{
- switch (idx) {
- case SSPP_NONE: return (INVALID_IDX(idx));
- case SSPP_VIG0: return (mdp5_cfg->pipe_vig.base[0]);
- case SSPP_VIG1: return (mdp5_cfg->pipe_vig.base[1]);
- case SSPP_VIG2: return (mdp5_cfg->pipe_vig.base[2]);
- case SSPP_RGB0: return (mdp5_cfg->pipe_rgb.base[0]);
- case SSPP_RGB1: return (mdp5_cfg->pipe_rgb.base[1]);
- case SSPP_RGB2: return (mdp5_cfg->pipe_rgb.base[2]);
- case SSPP_DMA0: return (mdp5_cfg->pipe_dma.base[0]);
- case SSPP_DMA1: return (mdp5_cfg->pipe_dma.base[1]);
- case SSPP_VIG3: return (mdp5_cfg->pipe_vig.base[3]);
- case SSPP_RGB3: return (mdp5_cfg->pipe_rgb.base[3]);
- case SSPP_CURSOR0: return (mdp5_cfg->pipe_cursor.base[0]);
- case SSPP_CURSOR1: return (mdp5_cfg->pipe_cursor.base[1]);
- default: return INVALID_IDX(idx);
- }
-}
-static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); }
-
-static inline uint32_t REG_MDP5_PIPE_OP_MODE(enum mdp5_pipe i0) { return 0x00000200 + __offset_PIPE(i0); }
-#define MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__MASK 0x00080000
-#define MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT 19
-static inline uint32_t MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT(enum mdp5_data_format val)
-{
- return ((val) << MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT) & MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__MASK;
-}
-#define MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__MASK 0x00040000
-#define MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT 18
-static inline uint32_t MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT(enum mdp5_data_format val)
-{
- return ((val) << MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT) & MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__MASK;
-}
-#define MDP5_PIPE_OP_MODE_CSC_1_EN 0x00020000
-
-static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000002c4 + __offset_PIPE(i0); }
-
-static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000002f0 + __offset_PIPE(i0); }
-
-static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00000300 + __offset_PIPE(i0); }
-
-static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_0(enum mdp5_pipe i0) { return 0x00000320 + __offset_PIPE(i0); }
-#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__MASK 0x00001fff
-#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__SHIFT 0
-static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11(uint32_t val)
-{
- return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__MASK;
-}
-#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__MASK 0x1fff0000
-#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__SHIFT 16
-static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12(uint32_t val)
-{
- return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__MASK;
-}
-
-static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_1(enum mdp5_pipe i0) { return 0x00000324 + __offset_PIPE(i0); }
-#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__MASK 0x00001fff
-#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__SHIFT 0
-static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13(uint32_t val)
-{
- return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__MASK;
-}
-#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__MASK 0x1fff0000
-#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__SHIFT 16
-static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21(uint32_t val)
-{
- return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__MASK;
-}
-
-static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_2(enum mdp5_pipe i0) { return 0x00000328 + __offset_PIPE(i0); }
-#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__MASK 0x00001fff
-#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__SHIFT 0
-static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22(uint32_t val)
-{
- return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__MASK;
-}
-#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__MASK 0x1fff0000
-#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__SHIFT 16
-static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23(uint32_t val)
-{
- return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__MASK;
-}
-
-static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_3(enum mdp5_pipe i0) { return 0x0000032c + __offset_PIPE(i0); }
-#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__MASK 0x00001fff
-#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__SHIFT 0
-static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31(uint32_t val)
-{
- return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__MASK;
-}
-#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__MASK 0x1fff0000
-#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__SHIFT 16
-static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32(uint32_t val)
-{
- return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__MASK;
-}
-
-static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_4(enum mdp5_pipe i0) { return 0x00000330 + __offset_PIPE(i0); }
-#define MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__MASK 0x00001fff
-#define MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__SHIFT 0
-static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33(uint32_t val)
-{
- return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__MASK;
-}
-
-static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_CLAMP(enum mdp5_pipe i0, uint32_t i1) { return 0x00000334 + __offset_PIPE(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_CLAMP_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000334 + __offset_PIPE(i0) + 0x4*i1; }
-#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__MASK 0x000000ff
-#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__SHIFT 0
-static inline uint32_t MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH(uint32_t val)
-{
- return ((val) << MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__SHIFT) & MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__MASK;
-}
-#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__MASK 0x0000ff00
-#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__SHIFT 8
-static inline uint32_t MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW(uint32_t val)
-{
- return ((val) << MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__SHIFT) & MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__MASK;
-}
-
-static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_CLAMP(enum mdp5_pipe i0, uint32_t i1) { return 0x00000340 + __offset_PIPE(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_CLAMP_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000340 + __offset_PIPE(i0) + 0x4*i1; }
-#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__MASK 0x000000ff
-#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__SHIFT 0
-static inline uint32_t MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH(uint32_t val)
-{
- return ((val) << MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__SHIFT) & MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__MASK;
-}
-#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__MASK 0x0000ff00
-#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__SHIFT 8
-static inline uint32_t MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW(uint32_t val)
-{
- return ((val) << MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__SHIFT) & MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__MASK;
-}
-
-static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_BIAS(enum mdp5_pipe i0, uint32_t i1) { return 0x0000034c + __offset_PIPE(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_BIAS_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x0000034c + __offset_PIPE(i0) + 0x4*i1; }
-#define MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__MASK 0x000001ff
-#define MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__SHIFT 0
-static inline uint32_t MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE(uint32_t val)
-{
- return ((val) << MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__SHIFT) & MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__MASK;
-}
-
-static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_BIAS(enum mdp5_pipe i0, uint32_t i1) { return 0x00000358 + __offset_PIPE(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_BIAS_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000358 + __offset_PIPE(i0) + 0x4*i1; }
-#define MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__MASK 0x000001ff
-#define MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__SHIFT 0
-static inline uint32_t MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE(uint32_t val)
-{
- return ((val) << MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__SHIFT) & MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__MASK;
-}
-
-static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); }
-#define MDP5_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
-#define MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
-static inline uint32_t MDP5_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
-{
- return ((val) << MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_SIZE_HEIGHT__MASK;
-}
-#define MDP5_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff
-#define MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT 0
-static inline uint32_t MDP5_PIPE_SRC_SIZE_WIDTH(uint32_t val)
-{
- return ((val) << MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_SIZE_WIDTH__MASK;
-}
-
-static inline uint32_t REG_MDP5_PIPE_SRC_IMG_SIZE(enum mdp5_pipe i0) { return 0x00000004 + __offset_PIPE(i0); }
-#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK 0xffff0000
-#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT 16
-static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(uint32_t val)
-{
- return ((val) << MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK;
-}
-#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK 0x0000ffff
-#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT 0
-static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_WIDTH(uint32_t val)
-{
- return ((val) << MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK;
-}
-
-static inline uint32_t REG_MDP5_PIPE_SRC_XY(enum mdp5_pipe i0) { return 0x00000008 + __offset_PIPE(i0); }
-#define MDP5_PIPE_SRC_XY_Y__MASK 0xffff0000
-#define MDP5_PIPE_SRC_XY_Y__SHIFT 16
-static inline uint32_t MDP5_PIPE_SRC_XY_Y(uint32_t val)
-{
- return ((val) << MDP5_PIPE_SRC_XY_Y__SHIFT) & MDP5_PIPE_SRC_XY_Y__MASK;
-}
-#define MDP5_PIPE_SRC_XY_X__MASK 0x0000ffff
-#define MDP5_PIPE_SRC_XY_X__SHIFT 0
-static inline uint32_t MDP5_PIPE_SRC_XY_X(uint32_t val)
-{
- return ((val) << MDP5_PIPE_SRC_XY_X__SHIFT) & MDP5_PIPE_SRC_XY_X__MASK;
-}
-
-static inline uint32_t REG_MDP5_PIPE_OUT_SIZE(enum mdp5_pipe i0) { return 0x0000000c + __offset_PIPE(i0); }
-#define MDP5_PIPE_OUT_SIZE_HEIGHT__MASK 0xffff0000
-#define MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT 16
-static inline uint32_t MDP5_PIPE_OUT_SIZE_HEIGHT(uint32_t val)
-{
- return ((val) << MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_OUT_SIZE_HEIGHT__MASK;
-}
-#define MDP5_PIPE_OUT_SIZE_WIDTH__MASK 0x0000ffff
-#define MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT 0
-static inline uint32_t MDP5_PIPE_OUT_SIZE_WIDTH(uint32_t val)
-{
- return ((val) << MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT) & MDP5_PIPE_OUT_SIZE_WIDTH__MASK;
-}
-
-static inline uint32_t REG_MDP5_PIPE_OUT_XY(enum mdp5_pipe i0) { return 0x00000010 + __offset_PIPE(i0); }
-#define MDP5_PIPE_OUT_XY_Y__MASK 0xffff0000
-#define MDP5_PIPE_OUT_XY_Y__SHIFT 16
-static inline uint32_t MDP5_PIPE_OUT_XY_Y(uint32_t val)
-{
- return ((val) << MDP5_PIPE_OUT_XY_Y__SHIFT) & MDP5_PIPE_OUT_XY_Y__MASK;
-}
-#define MDP5_PIPE_OUT_XY_X__MASK 0x0000ffff
-#define MDP5_PIPE_OUT_XY_X__SHIFT 0
-static inline uint32_t MDP5_PIPE_OUT_XY_X(uint32_t val)
-{
- return ((val) << MDP5_PIPE_OUT_XY_X__SHIFT) & MDP5_PIPE_OUT_XY_X__MASK;
-}
-
-static inline uint32_t REG_MDP5_PIPE_SRC0_ADDR(enum mdp5_pipe i0) { return 0x00000014 + __offset_PIPE(i0); }
-
-static inline uint32_t REG_MDP5_PIPE_SRC1_ADDR(enum mdp5_pipe i0) { return 0x00000018 + __offset_PIPE(i0); }
-
-static inline uint32_t REG_MDP5_PIPE_SRC2_ADDR(enum mdp5_pipe i0) { return 0x0000001c + __offset_PIPE(i0); }
-
-static inline uint32_t REG_MDP5_PIPE_SRC3_ADDR(enum mdp5_pipe i0) { return 0x00000020 + __offset_PIPE(i0); }
-
-static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_A(enum mdp5_pipe i0) { return 0x00000024 + __offset_PIPE(i0); }
-#define MDP5_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
-#define MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT 0
-static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P0(uint32_t val)
-{
- return ((val) << MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P0__MASK;
-}
-#define MDP5_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000
-#define MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT 16
-static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P1(uint32_t val)
-{
- return ((val) << MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P1__MASK;
-}
-
-static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_B(enum mdp5_pipe i0) { return 0x00000028 + __offset_PIPE(i0); }
-#define MDP5_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff
-#define MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT 0
-static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P2(uint32_t val)
-{
- return ((val) << MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P2__MASK;
-}
-#define MDP5_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000
-#define MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT 16
-static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P3(uint32_t val)
-{
- return ((val) << MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P3__MASK;
-}
-
-static inline uint32_t REG_MDP5_PIPE_STILE_FRAME_SIZE(enum mdp5_pipe i0) { return 0x0000002c + __offset_PIPE(i0); }
-
-static inline uint32_t REG_MDP5_PIPE_SRC_FORMAT(enum mdp5_pipe i0) { return 0x00000030 + __offset_PIPE(i0); }
-#define MDP5_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
-#define MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
-static inline uint32_t MDP5_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val)
-{
- return ((val) << MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_G_BPC__MASK;
-}
-#define MDP5_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c
-#define MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT 2
-static inline uint32_t MDP5_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val)
-{
- return ((val) << MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_B_BPC__MASK;
-}
-#define MDP5_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030
-#define MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT 4
-static inline uint32_t MDP5_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val)
-{
- return ((val) << MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_R_BPC__MASK;
-}
-#define MDP5_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0
-#define MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT 6
-static inline uint32_t MDP5_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val)
-{
- return ((val) << MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_A_BPC__MASK;
-}
-#define MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100
-#define MDP5_PIPE_SRC_FORMAT_CPP__MASK 0x00000600
-#define MDP5_PIPE_SRC_FORMAT_CPP__SHIFT 9
-static inline uint32_t MDP5_PIPE_SRC_FORMAT_CPP(uint32_t val)
-{
- return ((val) << MDP5_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CPP__MASK;
-}
-#define MDP5_PIPE_SRC_FORMAT_ROT90 0x00000800
-#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00003000
-#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 12
-static inline uint32_t MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
-{
- return ((val) << MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK;
-}
-#define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000
-#define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
-#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK 0x00180000
-#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT 19
-static inline uint32_t MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(enum mdp_fetch_type val)
-{
- return ((val) << MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT) & MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK;
-}
-#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x01800000
-#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 23
-static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp_chroma_samp_type val)
-{
- return ((val) << MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK;
-}
-
-static inline uint32_t REG_MDP5_PIPE_SRC_UNPACK(enum mdp5_pipe i0) { return 0x00000034 + __offset_PIPE(i0); }
-#define MDP5_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
-#define MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT 0
-static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
-{
- return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM0__MASK;
-}
-#define MDP5_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00
-#define MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT 8
-static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM1(uint32_t val)
-{
- return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM1__MASK;
-}
-#define MDP5_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000
-#define MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT 16
-static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM2(uint32_t val)
-{
- return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM2__MASK;
-}
-#define MDP5_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000
-#define MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT 24
-static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
-{
- return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM3__MASK;
-}
-
-static inline uint32_t REG_MDP5_PIPE_SRC_OP_MODE(enum mdp5_pipe i0) { return 0x00000038 + __offset_PIPE(i0); }
-#define MDP5_PIPE_SRC_OP_MODE_BWC_EN 0x00000001
-#define MDP5_PIPE_SRC_OP_MODE_BWC__MASK 0x00000006
-#define MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT 1
-static inline uint32_t MDP5_PIPE_SRC_OP_MODE_BWC(enum mdp5_pipe_bwc val)
-{
- return ((val) << MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT) & MDP5_PIPE_SRC_OP_MODE_BWC__MASK;
-}
-#define MDP5_PIPE_SRC_OP_MODE_FLIP_LR 0x00002000
-#define MDP5_PIPE_SRC_OP_MODE_FLIP_UD 0x00004000
-#define MDP5_PIPE_SRC_OP_MODE_IGC_EN 0x00010000
-#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_0 0x00020000
-#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_1 0x00040000
-#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE 0x00400000
-#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE_ODD 0x00800000
-#define MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE 0x80000000
-
-static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000003c + __offset_PIPE(i0); }
-
-static inline uint32_t REG_MDP5_PIPE_FETCH_CONFIG(enum mdp5_pipe i0) { return 0x00000048 + __offset_PIPE(i0); }
-
-static inline uint32_t REG_MDP5_PIPE_VC1_RANGE(enum mdp5_pipe i0) { return 0x0000004c + __offset_PIPE(i0); }
-
-static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(enum mdp5_pipe i0) { return 0x00000050 + __offset_PIPE(i0); }
-
-static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(enum mdp5_pipe i0) { return 0x00000054 + __offset_PIPE(i0); }
-
-static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(enum mdp5_pipe i0) { return 0x00000058 + __offset_PIPE(i0); }
-
-static inline uint32_t REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(enum mdp5_pipe i0) { return 0x00000070 + __offset_PIPE(i0); }
-
-static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC0_ADDR(enum mdp5_pipe i0) { return 0x000000a4 + __offset_PIPE(i0); }
-
-static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC1_ADDR(enum mdp5_pipe i0) { return 0x000000a8 + __offset_PIPE(i0); }
-
-static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC2_ADDR(enum mdp5_pipe i0) { return 0x000000ac + __offset_PIPE(i0); }
-
-static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC3_ADDR(enum mdp5_pipe i0) { return 0x000000b0 + __offset_PIPE(i0); }
-
-static inline uint32_t REG_MDP5_PIPE_DECIMATION(enum mdp5_pipe i0) { return 0x000000b4 + __offset_PIPE(i0); }
-#define MDP5_PIPE_DECIMATION_VERT__MASK 0x000000ff
-#define MDP5_PIPE_DECIMATION_VERT__SHIFT 0
-static inline uint32_t MDP5_PIPE_DECIMATION_VERT(uint32_t val)
-{
- return ((val) << MDP5_PIPE_DECIMATION_VERT__SHIFT) & MDP5_PIPE_DECIMATION_VERT__MASK;
-}
-#define MDP5_PIPE_DECIMATION_HORZ__MASK 0x0000ff00
-#define MDP5_PIPE_DECIMATION_HORZ__SHIFT 8
-static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val)
-{
- return ((val) << MDP5_PIPE_DECIMATION_HORZ__SHIFT) & MDP5_PIPE_DECIMATION_HORZ__MASK;
-}
-
-static inline uint32_t __offset_SW_PIX_EXT(enum mdp_component_type idx)
-{
- switch (idx) {
- case COMP_0: return 0x00000100;
- case COMP_1_2: return 0x00000110;
- case COMP_3: return 0x00000120;
- default: return INVALID_IDX(idx);
- }
-}
-static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000000 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); }
-
-static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_LR(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000000 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); }
-#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__MASK 0x000000ff
-#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__SHIFT 0
-static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT(uint32_t val)
-{
- return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__MASK;
-}
-#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__MASK 0x0000ff00
-#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__SHIFT 8
-static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF(int32_t val)
-{
- return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__MASK;
-}
-#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__MASK 0x00ff0000
-#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__SHIFT 16
-static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT(uint32_t val)
-{
- return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__MASK;
-}
-#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__MASK 0xff000000
-#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__SHIFT 24
-static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF(int32_t val)
-{
- return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__MASK;
-}
-
-static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_TB(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000004 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); }
-#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__MASK 0x000000ff
-#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__SHIFT 0
-static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT(uint32_t val)
-{
- return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__MASK;
-}
-#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__MASK 0x0000ff00
-#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__SHIFT 8
-static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF(int32_t val)
-{
- return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__MASK;
-}
-#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__MASK 0x00ff0000
-#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__SHIFT 16
-static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT(uint32_t val)
-{
- return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__MASK;
-}
-#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__MASK 0xff000000
-#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__SHIFT 24
-static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF(int32_t val)
-{
- return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__MASK;
-}
-
-static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000008 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); }
-#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__MASK 0x0000ffff
-#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__SHIFT 0
-static inline uint32_t MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT(uint32_t val)
-{
- return ((val) << MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__MASK;
-}
-#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__MASK 0xffff0000
-#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__SHIFT 16
-static inline uint32_t MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM(uint32_t val)
-{
- return ((val) << MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__SHIFT) & MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__MASK;
-}
-
-static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00000204 + __offset_PIPE(i0); }
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK 0x00000300
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT 8
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(enum mdp5_scale_filter val)
-{
- return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK;
-}
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK 0x00000c00
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT 10
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(enum mdp5_scale_filter val)
-{
- return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK;
-}
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK 0x00003000
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT 12
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(enum mdp5_scale_filter val)
-{
- return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK;
-}
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK 0x0000c000
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT 14
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(enum mdp5_scale_filter val)
-{
- return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK;
-}
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK 0x00030000
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT 16
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(enum mdp5_scale_filter val)
-{
- return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK;
-}
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK 0x000c0000
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT 18
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(enum mdp5_scale_filter val)
-{
- return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK;
-}
-
-static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000210 + __offset_PIPE(i0); }
-
-static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00000214 + __offset_PIPE(i0); }
-
-static inline uint32_t REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000218 + __offset_PIPE(i0); }
-
-static inline uint32_t REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x0000021c + __offset_PIPE(i0); }
-
-static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00000220 + __offset_PIPE(i0); }
-
-static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00000224 + __offset_PIPE(i0); }
-
-static inline uint32_t __offset_LM(uint32_t idx)
-{
- switch (idx) {
- case 0: return (mdp5_cfg->lm.base[0]);
- case 1: return (mdp5_cfg->lm.base[1]);
- case 2: return (mdp5_cfg->lm.base[2]);
- case 3: return (mdp5_cfg->lm.base[3]);
- case 4: return (mdp5_cfg->lm.base[4]);
- case 5: return (mdp5_cfg->lm.base[5]);
- default: return INVALID_IDX(idx);
- }
-}
-static inline uint32_t REG_MDP5_LM(uint32_t i0) { return 0x00000000 + __offset_LM(i0); }
-
-static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00000000 + __offset_LM(i0); }
-#define MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA 0x00000002
-#define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004
-#define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008
-#define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010
-#define MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA 0x00000020
-#define MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA 0x00000040
-#define MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA 0x00000080
-#define MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT 0x80000000
-
-static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00000004 + __offset_LM(i0); }
-#define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000
-#define MDP5_LM_OUT_SIZE_HEIGHT__SHIFT 16
-static inline uint32_t MDP5_LM_OUT_SIZE_HEIGHT(uint32_t val)
-{
- return ((val) << MDP5_LM_OUT_SIZE_HEIGHT__SHIFT) & MDP5_LM_OUT_SIZE_HEIGHT__MASK;
-}
-#define MDP5_LM_OUT_SIZE_WIDTH__MASK 0x0000ffff
-#define MDP5_LM_OUT_SIZE_WIDTH__SHIFT 0
-static inline uint32_t MDP5_LM_OUT_SIZE_WIDTH(uint32_t val)
-{
- return ((val) << MDP5_LM_OUT_SIZE_WIDTH__SHIFT) & MDP5_LM_OUT_SIZE_WIDTH__MASK;
-}
-
-static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x00000008 + __offset_LM(i0); }
-
-static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00000010 + __offset_LM(i0); }
-
-static inline uint32_t __offset_BLEND(uint32_t idx)
-{
- switch (idx) {
- case 0: return 0x00000020;
- case 1: return 0x00000050;
- case 2: return 0x00000080;
- case 3: return 0x000000b0;
- case 4: return 0x00000230;
- case 5: return 0x00000260;
- case 6: return 0x00000290;
- default: return INVALID_IDX(idx);
- }
-}
-static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); }
-
-static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); }
-#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK 0x00000003
-#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT 0
-static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val)
-{
- return ((val) << MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK;
-}
-#define MDP5_LM_BLEND_OP_MODE_FG_INV_ALPHA 0x00000004
-#define MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA 0x00000008
-#define MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA 0x00000010
-#define MDP5_LM_BLEND_OP_MODE_FG_TRANSP_EN 0x00000020
-#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK 0x00000300
-#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT 8
-static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val)
-{
- return ((val) << MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK;
-}
-#define MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA 0x00000400
-#define MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA 0x00000800
-#define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA 0x00001000
-#define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN 0x00002000
-
-static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_LM(i0) + __offset_BLEND(i1); }
-
-static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_LM(i0) + __offset_BLEND(i1); }
-
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_LM(i0) + __offset_BLEND(i1); }
-
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_LM(i0) + __offset_BLEND(i1); }
-
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_LM(i0) + __offset_BLEND(i1); }
-
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_LM(i0) + __offset_BLEND(i1); }
-
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000001c + __offset_LM(i0) + __offset_BLEND(i1); }
-
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + __offset_BLEND(i1); }
-
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + __offset_BLEND(i1); }
-
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + __offset_BLEND(i1); }
-
-static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000000e0 + __offset_LM(i0); }
-#define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK 0x0000ffff
-#define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__SHIFT 0
-static inline uint32_t MDP5_LM_CURSOR_IMG_SIZE_SRC_W(uint32_t val)
-{
- return ((val) << MDP5_LM_CURSOR_IMG_SIZE_SRC_W__SHIFT) & MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK;
-}
-#define MDP5_LM_CURSOR_IMG_SIZE_SRC_H__MASK 0xffff0000
-#define MDP5_LM_CURSOR_IMG_SIZE_SRC_H__SHIFT 16
-static inline uint32_t MDP5_LM_CURSOR_IMG_SIZE_SRC_H(uint32_t val)
-{
- return ((val) << MDP5_LM_CURSOR_IMG_SIZE_SRC_H__SHIFT) & MDP5_LM_CURSOR_IMG_SIZE_SRC_H__MASK;
-}
-
-static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000000e4 + __offset_LM(i0); }
-#define MDP5_LM_CURSOR_SIZE_ROI_W__MASK 0x0000ffff
-#define MDP5_LM_CURSOR_SIZE_ROI_W__SHIFT 0
-static inline uint32_t MDP5_LM_CURSOR_SIZE_ROI_W(uint32_t val)
-{
- return ((val) << MDP5_LM_CURSOR_SIZE_ROI_W__SHIFT) & MDP5_LM_CURSOR_SIZE_ROI_W__MASK;
-}
-#define MDP5_LM_CURSOR_SIZE_ROI_H__MASK 0xffff0000
-#define MDP5_LM_CURSOR_SIZE_ROI_H__SHIFT 16
-static inline uint32_t MDP5_LM_CURSOR_SIZE_ROI_H(uint32_t val)
-{
- return ((val) << MDP5_LM_CURSOR_SIZE_ROI_H__SHIFT) & MDP5_LM_CURSOR_SIZE_ROI_H__MASK;
-}
-
-static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000000e8 + __offset_LM(i0); }
-#define MDP5_LM_CURSOR_XY_SRC_X__MASK 0x0000ffff
-#define MDP5_LM_CURSOR_XY_SRC_X__SHIFT 0
-static inline uint32_t MDP5_LM_CURSOR_XY_SRC_X(uint32_t val)
-{
- return ((val) << MDP5_LM_CURSOR_XY_SRC_X__SHIFT) & MDP5_LM_CURSOR_XY_SRC_X__MASK;
-}
-#define MDP5_LM_CURSOR_XY_SRC_Y__MASK 0xffff0000
-#define MDP5_LM_CURSOR_XY_SRC_Y__SHIFT 16
-static inline uint32_t MDP5_LM_CURSOR_XY_SRC_Y(uint32_t val)
-{
- return ((val) << MDP5_LM_CURSOR_XY_SRC_Y__SHIFT) & MDP5_LM_CURSOR_XY_SRC_Y__MASK;
-}
-
-static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000000dc + __offset_LM(i0); }
-#define MDP5_LM_CURSOR_STRIDE_STRIDE__MASK 0x0000ffff
-#define MDP5_LM_CURSOR_STRIDE_STRIDE__SHIFT 0
-static inline uint32_t MDP5_LM_CURSOR_STRIDE_STRIDE(uint32_t val)
-{
- return ((val) << MDP5_LM_CURSOR_STRIDE_STRIDE__SHIFT) & MDP5_LM_CURSOR_STRIDE_STRIDE__MASK;
-}
-
-static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000000ec + __offset_LM(i0); }
-#define MDP5_LM_CURSOR_FORMAT_FORMAT__MASK 0x00000007
-#define MDP5_LM_CURSOR_FORMAT_FORMAT__SHIFT 0
-static inline uint32_t MDP5_LM_CURSOR_FORMAT_FORMAT(enum mdp5_cursor_format val)
-{
- return ((val) << MDP5_LM_CURSOR_FORMAT_FORMAT__SHIFT) & MDP5_LM_CURSOR_FORMAT_FORMAT__MASK;
-}
-
-static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000000f0 + __offset_LM(i0); }
-
-static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000000f4 + __offset_LM(i0); }
-#define MDP5_LM_CURSOR_START_XY_X_START__MASK 0x0000ffff
-#define MDP5_LM_CURSOR_START_XY_X_START__SHIFT 0
-static inline uint32_t MDP5_LM_CURSOR_START_XY_X_START(uint32_t val)
-{
- return ((val) << MDP5_LM_CURSOR_START_XY_X_START__SHIFT) & MDP5_LM_CURSOR_START_XY_X_START__MASK;
-}
-#define MDP5_LM_CURSOR_START_XY_Y_START__MASK 0xffff0000
-#define MDP5_LM_CURSOR_START_XY_Y_START__SHIFT 16
-static inline uint32_t MDP5_LM_CURSOR_START_XY_Y_START(uint32_t val)
-{
- return ((val) << MDP5_LM_CURSOR_START_XY_Y_START__SHIFT) & MDP5_LM_CURSOR_START_XY_Y_START__MASK;
-}
-
-static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000000f8 + __offset_LM(i0); }
-#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN 0x00000001
-#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__MASK 0x00000006
-#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__SHIFT 1
-static inline uint32_t MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(enum mdp5_cursor_alpha val)
-{
- return ((val) << MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__SHIFT) & MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__MASK;
-}
-#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN 0x00000008
-
-static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000000fc + __offset_LM(i0); }
-
-static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW0(uint32_t i0) { return 0x00000100 + __offset_LM(i0); }
-
-static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW1(uint32_t i0) { return 0x00000104 + __offset_LM(i0); }
-
-static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH0(uint32_t i0) { return 0x00000108 + __offset_LM(i0); }
-
-static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH1(uint32_t i0) { return 0x0000010c + __offset_LM(i0); }
-
-static inline uint32_t REG_MDP5_LM_GC_LUT_BASE(uint32_t i0) { return 0x00000110 + __offset_LM(i0); }
-
-static inline uint32_t __offset_DSPP(uint32_t idx)
-{
- switch (idx) {
- case 0: return (mdp5_cfg->dspp.base[0]);
- case 1: return (mdp5_cfg->dspp.base[1]);
- case 2: return (mdp5_cfg->dspp.base[2]);
- case 3: return (mdp5_cfg->dspp.base[3]);
- default: return INVALID_IDX(idx);
- }
-}
-static inline uint32_t REG_MDP5_DSPP(uint32_t i0) { return 0x00000000 + __offset_DSPP(i0); }
-
-static inline uint32_t REG_MDP5_DSPP_OP_MODE(uint32_t i0) { return 0x00000000 + __offset_DSPP(i0); }
-#define MDP5_DSPP_OP_MODE_IGC_LUT_EN 0x00000001
-#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK 0x0000000e
-#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT 1
-static inline uint32_t MDP5_DSPP_OP_MODE_IGC_TBL_IDX(uint32_t val)
-{
- return ((val) << MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT) & MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK;
-}
-#define MDP5_DSPP_OP_MODE_PCC_EN 0x00000010
-#define MDP5_DSPP_OP_MODE_DITHER_EN 0x00000100
-#define MDP5_DSPP_OP_MODE_HIST_EN 0x00010000
-#define MDP5_DSPP_OP_MODE_AUTO_CLEAR 0x00020000
-#define MDP5_DSPP_OP_MODE_HIST_LUT_EN 0x00080000
-#define MDP5_DSPP_OP_MODE_PA_EN 0x00100000
-#define MDP5_DSPP_OP_MODE_GAMUT_EN 0x00800000
-#define MDP5_DSPP_OP_MODE_GAMUT_ORDER 0x01000000
-
-static inline uint32_t REG_MDP5_DSPP_PCC_BASE(uint32_t i0) { return 0x00000030 + __offset_DSPP(i0); }
-
-static inline uint32_t REG_MDP5_DSPP_DITHER_DEPTH(uint32_t i0) { return 0x00000150 + __offset_DSPP(i0); }
-
-static inline uint32_t REG_MDP5_DSPP_HIST_CTL_BASE(uint32_t i0) { return 0x00000210 + __offset_DSPP(i0); }
-
-static inline uint32_t REG_MDP5_DSPP_HIST_LUT_BASE(uint32_t i0) { return 0x00000230 + __offset_DSPP(i0); }
-
-static inline uint32_t REG_MDP5_DSPP_HIST_LUT_SWAP(uint32_t i0) { return 0x00000234 + __offset_DSPP(i0); }
-
-static inline uint32_t REG_MDP5_DSPP_PA_BASE(uint32_t i0) { return 0x00000238 + __offset_DSPP(i0); }
-
-static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000002dc + __offset_DSPP(i0); }
-
-static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000002b0 + __offset_DSPP(i0); }
-
-static inline uint32_t __offset_PP(uint32_t idx)
-{
- switch (idx) {
- case 0: return (mdp5_cfg->pp.base[0]);
- case 1: return (mdp5_cfg->pp.base[1]);
- case 2: return (mdp5_cfg->pp.base[2]);
- case 3: return (mdp5_cfg->pp.base[3]);
- default: return INVALID_IDX(idx);
- }
-}
-static inline uint32_t REG_MDP5_PP(uint32_t i0) { return 0x00000000 + __offset_PP(i0); }
-
-static inline uint32_t REG_MDP5_PP_TEAR_CHECK_EN(uint32_t i0) { return 0x00000000 + __offset_PP(i0); }
-
-static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_VSYNC(uint32_t i0) { return 0x00000004 + __offset_PP(i0); }
-#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK 0x0007ffff
-#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT 0
-static inline uint32_t MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(uint32_t val)
-{
- return ((val) << MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT) & MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK;
-}
-#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN 0x00080000
-#define MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN 0x00100000
-
-static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_HEIGHT(uint32_t i0) { return 0x00000008 + __offset_PP(i0); }
-
-static inline uint32_t REG_MDP5_PP_SYNC_WRCOUNT(uint32_t i0) { return 0x0000000c + __offset_PP(i0); }
-#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK 0x0000ffff
-#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT 0
-static inline uint32_t MDP5_PP_SYNC_WRCOUNT_LINE_COUNT(uint32_t val)
-{
- return ((val) << MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK;
-}
-#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK 0xffff0000
-#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT 16
-static inline uint32_t MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT(uint32_t val)
-{
- return ((val) << MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK;
-}
-
-static inline uint32_t REG_MDP5_PP_VSYNC_INIT_VAL(uint32_t i0) { return 0x00000010 + __offset_PP(i0); }
-
-static inline uint32_t REG_MDP5_PP_INT_COUNT_VAL(uint32_t i0) { return 0x00000014 + __offset_PP(i0); }
-#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK 0x0000ffff
-#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT 0
-static inline uint32_t MDP5_PP_INT_COUNT_VAL_LINE_COUNT(uint32_t val)
-{
- return ((val) << MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK;
-}
-#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK 0xffff0000
-#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT 16
-static inline uint32_t MDP5_PP_INT_COUNT_VAL_FRAME_COUNT(uint32_t val)
-{
- return ((val) << MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK;
-}
-
-static inline uint32_t REG_MDP5_PP_SYNC_THRESH(uint32_t i0) { return 0x00000018 + __offset_PP(i0); }
-#define MDP5_PP_SYNC_THRESH_START__MASK 0x0000ffff
-#define MDP5_PP_SYNC_THRESH_START__SHIFT 0
-static inline uint32_t MDP5_PP_SYNC_THRESH_START(uint32_t val)
-{
- return ((val) << MDP5_PP_SYNC_THRESH_START__SHIFT) & MDP5_PP_SYNC_THRESH_START__MASK;
-}
-#define MDP5_PP_SYNC_THRESH_CONTINUE__MASK 0xffff0000
-#define MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT 16
-static inline uint32_t MDP5_PP_SYNC_THRESH_CONTINUE(uint32_t val)
-{
- return ((val) << MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT) & MDP5_PP_SYNC_THRESH_CONTINUE__MASK;
-}
-
-static inline uint32_t REG_MDP5_PP_START_POS(uint32_t i0) { return 0x0000001c + __offset_PP(i0); }
-
-static inline uint32_t REG_MDP5_PP_RD_PTR_IRQ(uint32_t i0) { return 0x00000020 + __offset_PP(i0); }
-
-static inline uint32_t REG_MDP5_PP_WR_PTR_IRQ(uint32_t i0) { return 0x00000024 + __offset_PP(i0); }
-
-static inline uint32_t REG_MDP5_PP_OUT_LINE_COUNT(uint32_t i0) { return 0x00000028 + __offset_PP(i0); }
-
-static inline uint32_t REG_MDP5_PP_PP_LINE_COUNT(uint32_t i0) { return 0x0000002c + __offset_PP(i0); }
-
-static inline uint32_t REG_MDP5_PP_AUTOREFRESH_CONFIG(uint32_t i0) { return 0x00000030 + __offset_PP(i0); }
-
-static inline uint32_t REG_MDP5_PP_FBC_MODE(uint32_t i0) { return 0x00000034 + __offset_PP(i0); }
-
-static inline uint32_t REG_MDP5_PP_FBC_BUDGET_CTL(uint32_t i0) { return 0x00000038 + __offset_PP(i0); }
-
-static inline uint32_t REG_MDP5_PP_FBC_LOSSY_MODE(uint32_t i0) { return 0x0000003c + __offset_PP(i0); }
-
-static inline uint32_t __offset_WB(uint32_t idx)
-{
- switch (idx) {
-#if 0 /* TEMPORARY until patch that adds wb.base[] is merged */
- case 0: return (mdp5_cfg->wb.base[0]);
- case 1: return (mdp5_cfg->wb.base[1]);
- case 2: return (mdp5_cfg->wb.base[2]);
- case 3: return (mdp5_cfg->wb.base[3]);
- case 4: return (mdp5_cfg->wb.base[4]);
-#endif
- default: return INVALID_IDX(idx);
- }
-}
-static inline uint32_t REG_MDP5_WB(uint32_t i0) { return 0x00000000 + __offset_WB(i0); }
-
-static inline uint32_t REG_MDP5_WB_DST_FORMAT(uint32_t i0) { return 0x00000000 + __offset_WB(i0); }
-#define MDP5_WB_DST_FORMAT_DSTC0_OUT__MASK 0x00000003
-#define MDP5_WB_DST_FORMAT_DSTC0_OUT__SHIFT 0
-static inline uint32_t MDP5_WB_DST_FORMAT_DSTC0_OUT(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_FORMAT_DSTC0_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC0_OUT__MASK;
-}
-#define MDP5_WB_DST_FORMAT_DSTC1_OUT__MASK 0x0000000c
-#define MDP5_WB_DST_FORMAT_DSTC1_OUT__SHIFT 2
-static inline uint32_t MDP5_WB_DST_FORMAT_DSTC1_OUT(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_FORMAT_DSTC1_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC1_OUT__MASK;
-}
-#define MDP5_WB_DST_FORMAT_DSTC2_OUT__MASK 0x00000030
-#define MDP5_WB_DST_FORMAT_DSTC2_OUT__SHIFT 4
-static inline uint32_t MDP5_WB_DST_FORMAT_DSTC2_OUT(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_FORMAT_DSTC2_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC2_OUT__MASK;
-}
-#define MDP5_WB_DST_FORMAT_DSTC3_OUT__MASK 0x000000c0
-#define MDP5_WB_DST_FORMAT_DSTC3_OUT__SHIFT 6
-static inline uint32_t MDP5_WB_DST_FORMAT_DSTC3_OUT(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_FORMAT_DSTC3_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC3_OUT__MASK;
-}
-#define MDP5_WB_DST_FORMAT_DSTC3_EN 0x00000100
-#define MDP5_WB_DST_FORMAT_DST_BPP__MASK 0x00000600
-#define MDP5_WB_DST_FORMAT_DST_BPP__SHIFT 9
-static inline uint32_t MDP5_WB_DST_FORMAT_DST_BPP(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_FORMAT_DST_BPP__SHIFT) & MDP5_WB_DST_FORMAT_DST_BPP__MASK;
-}
-#define MDP5_WB_DST_FORMAT_PACK_COUNT__MASK 0x00003000
-#define MDP5_WB_DST_FORMAT_PACK_COUNT__SHIFT 12
-static inline uint32_t MDP5_WB_DST_FORMAT_PACK_COUNT(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_FORMAT_PACK_COUNT__SHIFT) & MDP5_WB_DST_FORMAT_PACK_COUNT__MASK;
-}
-#define MDP5_WB_DST_FORMAT_DST_ALPHA_X 0x00004000
-#define MDP5_WB_DST_FORMAT_PACK_TIGHT 0x00020000
-#define MDP5_WB_DST_FORMAT_PACK_ALIGN_MSB 0x00040000
-#define MDP5_WB_DST_FORMAT_WRITE_PLANES__MASK 0x00180000
-#define MDP5_WB_DST_FORMAT_WRITE_PLANES__SHIFT 19
-static inline uint32_t MDP5_WB_DST_FORMAT_WRITE_PLANES(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_FORMAT_WRITE_PLANES__SHIFT) & MDP5_WB_DST_FORMAT_WRITE_PLANES__MASK;
-}
-#define MDP5_WB_DST_FORMAT_DST_DITHER_EN 0x00400000
-#define MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__MASK 0x03800000
-#define MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__SHIFT 23
-static inline uint32_t MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__SHIFT) & MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__MASK;
-}
-#define MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__MASK 0x3c000000
-#define MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__SHIFT 26
-static inline uint32_t MDP5_WB_DST_FORMAT_DST_CHROMA_SITE(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__SHIFT) & MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__MASK;
-}
-#define MDP5_WB_DST_FORMAT_FRAME_FORMAT__MASK 0xc0000000
-#define MDP5_WB_DST_FORMAT_FRAME_FORMAT__SHIFT 30
-static inline uint32_t MDP5_WB_DST_FORMAT_FRAME_FORMAT(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_FORMAT_FRAME_FORMAT__SHIFT) & MDP5_WB_DST_FORMAT_FRAME_FORMAT__MASK;
-}
-
-static inline uint32_t REG_MDP5_WB_DST_OP_MODE(uint32_t i0) { return 0x00000004 + __offset_WB(i0); }
-#define MDP5_WB_DST_OP_MODE_BWC_ENC_EN 0x00000001
-#define MDP5_WB_DST_OP_MODE_BWC_ENC_OP__MASK 0x00000006
-#define MDP5_WB_DST_OP_MODE_BWC_ENC_OP__SHIFT 1
-static inline uint32_t MDP5_WB_DST_OP_MODE_BWC_ENC_OP(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_OP_MODE_BWC_ENC_OP__SHIFT) & MDP5_WB_DST_OP_MODE_BWC_ENC_OP__MASK;
-}
-#define MDP5_WB_DST_OP_MODE_BLOCK_SIZE__MASK 0x00000010
-#define MDP5_WB_DST_OP_MODE_BLOCK_SIZE__SHIFT 4
-static inline uint32_t MDP5_WB_DST_OP_MODE_BLOCK_SIZE(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_OP_MODE_BLOCK_SIZE__SHIFT) & MDP5_WB_DST_OP_MODE_BLOCK_SIZE__MASK;
-}
-#define MDP5_WB_DST_OP_MODE_ROT_MODE__MASK 0x00000020
-#define MDP5_WB_DST_OP_MODE_ROT_MODE__SHIFT 5
-static inline uint32_t MDP5_WB_DST_OP_MODE_ROT_MODE(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_OP_MODE_ROT_MODE__SHIFT) & MDP5_WB_DST_OP_MODE_ROT_MODE__MASK;
-}
-#define MDP5_WB_DST_OP_MODE_ROT_EN 0x00000040
-#define MDP5_WB_DST_OP_MODE_CSC_EN 0x00000100
-#define MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__MASK 0x00000200
-#define MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT 9
-static inline uint32_t MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__MASK;
-}
-#define MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__MASK 0x00000400
-#define MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT 10
-static inline uint32_t MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__MASK;
-}
-#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_EN 0x00000800
-#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__MASK 0x00001000
-#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__SHIFT 12
-static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__MASK;
-}
-#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__MASK 0x00002000
-#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__SHIFT 13
-static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__MASK;
-}
-#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__MASK 0x00004000
-#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__SHIFT 14
-static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__MASK;
-}
-
-static inline uint32_t REG_MDP5_WB_DST_PACK_PATTERN(uint32_t i0) { return 0x00000008 + __offset_WB(i0); }
-#define MDP5_WB_DST_PACK_PATTERN_ELEMENT0__MASK 0x00000003
-#define MDP5_WB_DST_PACK_PATTERN_ELEMENT0__SHIFT 0
-static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT0(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT0__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT0__MASK;
-}
-#define MDP5_WB_DST_PACK_PATTERN_ELEMENT1__MASK 0x00000300
-#define MDP5_WB_DST_PACK_PATTERN_ELEMENT1__SHIFT 8
-static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT1(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT1__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT1__MASK;
-}
-#define MDP5_WB_DST_PACK_PATTERN_ELEMENT2__MASK 0x00030000
-#define MDP5_WB_DST_PACK_PATTERN_ELEMENT2__SHIFT 16
-static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT2(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT2__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT2__MASK;
-}
-#define MDP5_WB_DST_PACK_PATTERN_ELEMENT3__MASK 0x03000000
-#define MDP5_WB_DST_PACK_PATTERN_ELEMENT3__SHIFT 24
-static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT3(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT3__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT3__MASK;
-}
-
-static inline uint32_t REG_MDP5_WB_DST0_ADDR(uint32_t i0) { return 0x0000000c + __offset_WB(i0); }
-
-static inline uint32_t REG_MDP5_WB_DST1_ADDR(uint32_t i0) { return 0x00000010 + __offset_WB(i0); }
-
-static inline uint32_t REG_MDP5_WB_DST2_ADDR(uint32_t i0) { return 0x00000014 + __offset_WB(i0); }
-
-static inline uint32_t REG_MDP5_WB_DST3_ADDR(uint32_t i0) { return 0x00000018 + __offset_WB(i0); }
-
-static inline uint32_t REG_MDP5_WB_DST_YSTRIDE0(uint32_t i0) { return 0x0000001c + __offset_WB(i0); }
-#define MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__MASK 0x0000ffff
-#define MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__SHIFT 0
-static inline uint32_t MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__MASK;
-}
-#define MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__MASK 0xffff0000
-#define MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__SHIFT 16
-static inline uint32_t MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__MASK;
-}
-
-static inline uint32_t REG_MDP5_WB_DST_YSTRIDE1(uint32_t i0) { return 0x00000020 + __offset_WB(i0); }
-#define MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__MASK 0x0000ffff
-#define MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__SHIFT 0
-static inline uint32_t MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__MASK;
-}
-#define MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__MASK 0xffff0000
-#define MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__SHIFT 16
-static inline uint32_t MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE(uint32_t val)
-{
- return ((val) << MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__MASK;
-}
-
-static inline uint32_t REG_MDP5_WB_DST_DITHER_BITDEPTH(uint32_t i0) { return 0x00000024 + __offset_WB(i0); }
-
-static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW0(uint32_t i0) { return 0x00000030 + __offset_WB(i0); }
-
-static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW1(uint32_t i0) { return 0x00000034 + __offset_WB(i0); }
-
-static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW2(uint32_t i0) { return 0x00000038 + __offset_WB(i0); }
-
-static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW3(uint32_t i0) { return 0x0000003c + __offset_WB(i0); }
-
-static inline uint32_t REG_MDP5_WB_DST_WRITE_CONFIG(uint32_t i0) { return 0x00000048 + __offset_WB(i0); }
-
-static inline uint32_t REG_MDP5_WB_ROTATION_DNSCALER(uint32_t i0) { return 0x00000050 + __offset_WB(i0); }
-
-static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_X_0_3(uint32_t i0) { return 0x00000060 + __offset_WB(i0); }
-
-static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_X_1_2(uint32_t i0) { return 0x00000064 + __offset_WB(i0); }
-
-static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_Y_0_3(uint32_t i0) { return 0x00000068 + __offset_WB(i0); }
-
-static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_Y_1_2(uint32_t i0) { return 0x0000006c + __offset_WB(i0); }
-
-static inline uint32_t REG_MDP5_WB_OUT_SIZE(uint32_t i0) { return 0x00000074 + __offset_WB(i0); }
-#define MDP5_WB_OUT_SIZE_DST_W__MASK 0x0000ffff
-#define MDP5_WB_OUT_SIZE_DST_W__SHIFT 0
-static inline uint32_t MDP5_WB_OUT_SIZE_DST_W(uint32_t val)
-{
- return ((val) << MDP5_WB_OUT_SIZE_DST_W__SHIFT) & MDP5_WB_OUT_SIZE_DST_W__MASK;
-}
-#define MDP5_WB_OUT_SIZE_DST_H__MASK 0xffff0000
-#define MDP5_WB_OUT_SIZE_DST_H__SHIFT 16
-static inline uint32_t MDP5_WB_OUT_SIZE_DST_H(uint32_t val)
-{
- return ((val) << MDP5_WB_OUT_SIZE_DST_H__SHIFT) & MDP5_WB_OUT_SIZE_DST_H__MASK;
-}
-
-static inline uint32_t REG_MDP5_WB_ALPHA_X_VALUE(uint32_t i0) { return 0x00000078 + __offset_WB(i0); }
-
-static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_0(uint32_t i0) { return 0x00000260 + __offset_WB(i0); }
-#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__MASK 0x00001fff
-#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__SHIFT 0
-static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11(uint32_t val)
-{
- return ((val) << MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__MASK;
-}
-#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__MASK 0x1fff0000
-#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__SHIFT 16
-static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12(uint32_t val)
-{
- return ((val) << MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__MASK;
-}
-
-static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_1(uint32_t i0) { return 0x00000264 + __offset_WB(i0); }
-#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__MASK 0x00001fff
-#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__SHIFT 0
-static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13(uint32_t val)
-{
- return ((val) << MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__MASK;
-}
-#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__MASK 0x1fff0000
-#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__SHIFT 16
-static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21(uint32_t val)
-{
- return ((val) << MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__MASK;
-}
-
-static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_2(uint32_t i0) { return 0x00000268 + __offset_WB(i0); }
-#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__MASK 0x00001fff
-#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__SHIFT 0
-static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22(uint32_t val)
-{
- return ((val) << MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__MASK;
-}
-#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__MASK 0x1fff0000
-#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__SHIFT 16
-static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23(uint32_t val)
-{
- return ((val) << MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__MASK;
-}
-
-static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_3(uint32_t i0) { return 0x0000026c + __offset_WB(i0); }
-#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__MASK 0x00001fff
-#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__SHIFT 0
-static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31(uint32_t val)
-{
- return ((val) << MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__MASK;
-}
-#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__MASK 0x1fff0000
-#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__SHIFT 16
-static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32(uint32_t val)
-{
- return ((val) << MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__MASK;
-}
-
-static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_4(uint32_t i0) { return 0x00000270 + __offset_WB(i0); }
-#define MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__MASK 0x00001fff
-#define MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__SHIFT 0
-static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33(uint32_t val)
-{
- return ((val) << MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__MASK;
-}
-
-static inline uint32_t REG_MDP5_WB_CSC_COMP_PRECLAMP(uint32_t i0, uint32_t i1) { return 0x00000274 + __offset_WB(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP5_WB_CSC_COMP_PRECLAMP_REG(uint32_t i0, uint32_t i1) { return 0x00000274 + __offset_WB(i0) + 0x4*i1; }
-#define MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__MASK 0x000000ff
-#define MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__SHIFT 0
-static inline uint32_t MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH(uint32_t val)
-{
- return ((val) << MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__SHIFT) & MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__MASK;
-}
-#define MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__MASK 0x0000ff00
-#define MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__SHIFT 8
-static inline uint32_t MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW(uint32_t val)
-{
- return ((val) << MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__SHIFT) & MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__MASK;
-}
-
-static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTCLAMP(uint32_t i0, uint32_t i1) { return 0x00000280 + __offset_WB(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTCLAMP_REG(uint32_t i0, uint32_t i1) { return 0x00000280 + __offset_WB(i0) + 0x4*i1; }
-#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__MASK 0x000000ff
-#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__SHIFT 0
-static inline uint32_t MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH(uint32_t val)
-{
- return ((val) << MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__SHIFT) & MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__MASK;
-}
-#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__MASK 0x0000ff00
-#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__SHIFT 8
-static inline uint32_t MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW(uint32_t val)
-{
- return ((val) << MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__SHIFT) & MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__MASK;
-}
-
-static inline uint32_t REG_MDP5_WB_CSC_COMP_PREBIAS(uint32_t i0, uint32_t i1) { return 0x0000028c + __offset_WB(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP5_WB_CSC_COMP_PREBIAS_REG(uint32_t i0, uint32_t i1) { return 0x0000028c + __offset_WB(i0) + 0x4*i1; }
-#define MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__MASK 0x000001ff
-#define MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__SHIFT 0
-static inline uint32_t MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE(uint32_t val)
-{
- return ((val) << MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__SHIFT) & MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__MASK;
-}
-
-static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTBIAS(uint32_t i0, uint32_t i1) { return 0x00000298 + __offset_WB(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTBIAS_REG(uint32_t i0, uint32_t i1) { return 0x00000298 + __offset_WB(i0) + 0x4*i1; }
-#define MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__MASK 0x000001ff
-#define MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__SHIFT 0
-static inline uint32_t MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE(uint32_t val)
-{
- return ((val) << MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__SHIFT) & MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__MASK;
-}
-
-static inline uint32_t __offset_INTF(uint32_t idx)
-{
- switch (idx) {
- case 0: return (mdp5_cfg->intf.base[0]);
- case 1: return (mdp5_cfg->intf.base[1]);
- case 2: return (mdp5_cfg->intf.base[2]);
- case 3: return (mdp5_cfg->intf.base[3]);
- case 4: return (mdp5_cfg->intf.base[4]);
- default: return INVALID_IDX(idx);
- }
-}
-static inline uint32_t REG_MDP5_INTF(uint32_t i0) { return 0x00000000 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_TIMING_ENGINE_EN(uint32_t i0) { return 0x00000000 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_CONFIG(uint32_t i0) { return 0x00000004 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_HSYNC_CTL(uint32_t i0) { return 0x00000008 + __offset_INTF(i0); }
-#define MDP5_INTF_HSYNC_CTL_PULSEW__MASK 0x0000ffff
-#define MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT 0
-static inline uint32_t MDP5_INTF_HSYNC_CTL_PULSEW(uint32_t val)
-{
- return ((val) << MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT) & MDP5_INTF_HSYNC_CTL_PULSEW__MASK;
-}
-#define MDP5_INTF_HSYNC_CTL_PERIOD__MASK 0xffff0000
-#define MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT 16
-static inline uint32_t MDP5_INTF_HSYNC_CTL_PERIOD(uint32_t val)
-{
- return ((val) << MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT) & MDP5_INTF_HSYNC_CTL_PERIOD__MASK;
-}
-
-static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F0(uint32_t i0) { return 0x0000000c + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F1(uint32_t i0) { return 0x00000010 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F0(uint32_t i0) { return 0x00000014 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F1(uint32_t i0) { return 0x00000018 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F0(uint32_t i0) { return 0x0000001c + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F1(uint32_t i0) { return 0x00000020 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F0(uint32_t i0) { return 0x00000024 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F1(uint32_t i0) { return 0x00000028 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F0(uint32_t i0) { return 0x0000002c + __offset_INTF(i0); }
-#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK 0x7fffffff
-#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT 0
-static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val)
-{
- return ((val) << MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK;
-}
-#define MDP5_INTF_ACTIVE_VSTART_F0_ACTIVE_V_ENABLE 0x80000000
-
-static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F1(uint32_t i0) { return 0x00000030 + __offset_INTF(i0); }
-#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK 0x7fffffff
-#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT 0
-static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val)
-{
- return ((val) << MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK;
-}
-
-static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F0(uint32_t i0) { return 0x00000034 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F1(uint32_t i0) { return 0x00000038 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_DISPLAY_HCTL(uint32_t i0) { return 0x0000003c + __offset_INTF(i0); }
-#define MDP5_INTF_DISPLAY_HCTL_START__MASK 0x0000ffff
-#define MDP5_INTF_DISPLAY_HCTL_START__SHIFT 0
-static inline uint32_t MDP5_INTF_DISPLAY_HCTL_START(uint32_t val)
-{
- return ((val) << MDP5_INTF_DISPLAY_HCTL_START__SHIFT) & MDP5_INTF_DISPLAY_HCTL_START__MASK;
-}
-#define MDP5_INTF_DISPLAY_HCTL_END__MASK 0xffff0000
-#define MDP5_INTF_DISPLAY_HCTL_END__SHIFT 16
-static inline uint32_t MDP5_INTF_DISPLAY_HCTL_END(uint32_t val)
-{
- return ((val) << MDP5_INTF_DISPLAY_HCTL_END__SHIFT) & MDP5_INTF_DISPLAY_HCTL_END__MASK;
-}
-
-static inline uint32_t REG_MDP5_INTF_ACTIVE_HCTL(uint32_t i0) { return 0x00000040 + __offset_INTF(i0); }
-#define MDP5_INTF_ACTIVE_HCTL_START__MASK 0x00007fff
-#define MDP5_INTF_ACTIVE_HCTL_START__SHIFT 0
-static inline uint32_t MDP5_INTF_ACTIVE_HCTL_START(uint32_t val)
-{
- return ((val) << MDP5_INTF_ACTIVE_HCTL_START__SHIFT) & MDP5_INTF_ACTIVE_HCTL_START__MASK;
-}
-#define MDP5_INTF_ACTIVE_HCTL_END__MASK 0x7fff0000
-#define MDP5_INTF_ACTIVE_HCTL_END__SHIFT 16
-static inline uint32_t MDP5_INTF_ACTIVE_HCTL_END(uint32_t val)
-{
- return ((val) << MDP5_INTF_ACTIVE_HCTL_END__SHIFT) & MDP5_INTF_ACTIVE_HCTL_END__MASK;
-}
-#define MDP5_INTF_ACTIVE_HCTL_ACTIVE_H_ENABLE 0x80000000
-
-static inline uint32_t REG_MDP5_INTF_BORDER_COLOR(uint32_t i0) { return 0x00000044 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_UNDERFLOW_COLOR(uint32_t i0) { return 0x00000048 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_HSYNC_SKEW(uint32_t i0) { return 0x0000004c + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_POLARITY_CTL(uint32_t i0) { return 0x00000050 + __offset_INTF(i0); }
-#define MDP5_INTF_POLARITY_CTL_HSYNC_LOW 0x00000001
-#define MDP5_INTF_POLARITY_CTL_VSYNC_LOW 0x00000002
-#define MDP5_INTF_POLARITY_CTL_DATA_EN_LOW 0x00000004
-
-static inline uint32_t REG_MDP5_INTF_TEST_CTL(uint32_t i0) { return 0x00000054 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_TP_COLOR0(uint32_t i0) { return 0x00000058 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_TP_COLOR1(uint32_t i0) { return 0x0000005c + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_DSI_CMD_MODE_TRIGGER_EN(uint32_t i0) { return 0x00000084 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_PANEL_FORMAT(uint32_t i0) { return 0x00000090 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_FRAME_LINE_COUNT_EN(uint32_t i0) { return 0x000000a8 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_FRAME_COUNT(uint32_t i0) { return 0x000000ac + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_LINE_COUNT(uint32_t i0) { return 0x000000b0 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_DEFLICKER_CONFIG(uint32_t i0) { return 0x000000f0 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_DEFLICKER_STRNG_COEFF(uint32_t i0) { return 0x000000f4 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_DEFLICKER_WEAK_COEFF(uint32_t i0) { return 0x000000f8 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_TPG_ENABLE(uint32_t i0) { return 0x00000100 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_TPG_MAIN_CONTROL(uint32_t i0) { return 0x00000104 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_TPG_VIDEO_CONFIG(uint32_t i0) { return 0x00000108 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_TPG_COMPONENT_LIMITS(uint32_t i0) { return 0x0000010c + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_TPG_RECTANGLE(uint32_t i0) { return 0x00000110 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_TPG_INITIAL_VALUE(uint32_t i0) { return 0x00000114 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_TPG_BLK_WHITE_PATTERN_FRAME(uint32_t i0) { return 0x00000118 + __offset_INTF(i0); }
-
-static inline uint32_t REG_MDP5_INTF_TPG_RGB_MAPPING(uint32_t i0) { return 0x0000011c + __offset_INTF(i0); }
-
-static inline uint32_t __offset_AD(uint32_t idx)
-{
- switch (idx) {
- case 0: return (mdp5_cfg->ad.base[0]);
- case 1: return (mdp5_cfg->ad.base[1]);
- default: return INVALID_IDX(idx);
- }
-}
-static inline uint32_t REG_MDP5_AD(uint32_t i0) { return 0x00000000 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_BYPASS(uint32_t i0) { return 0x00000000 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_CTRL_0(uint32_t i0) { return 0x00000004 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_CTRL_1(uint32_t i0) { return 0x00000008 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_FRAME_SIZE(uint32_t i0) { return 0x0000000c + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_CON_CTRL_0(uint32_t i0) { return 0x00000010 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_CON_CTRL_1(uint32_t i0) { return 0x00000014 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_STR_MAN(uint32_t i0) { return 0x00000018 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_VAR(uint32_t i0) { return 0x0000001c + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_DITH(uint32_t i0) { return 0x00000020 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_DITH_CTRL(uint32_t i0) { return 0x00000024 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_AMP_LIM(uint32_t i0) { return 0x00000028 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_SLOPE(uint32_t i0) { return 0x0000002c + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_BW_LVL(uint32_t i0) { return 0x00000030 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_LOGO_POS(uint32_t i0) { return 0x00000034 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_LUT_FI(uint32_t i0) { return 0x00000038 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_LUT_CC(uint32_t i0) { return 0x0000007c + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_STR_LIM(uint32_t i0) { return 0x000000c8 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_CALIB_AB(uint32_t i0) { return 0x000000cc + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_CALIB_CD(uint32_t i0) { return 0x000000d0 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_MODE_SEL(uint32_t i0) { return 0x000000d4 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_TFILT_CTRL(uint32_t i0) { return 0x000000d8 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_BL_MINMAX(uint32_t i0) { return 0x000000dc + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_BL(uint32_t i0) { return 0x000000e0 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_BL_MAX(uint32_t i0) { return 0x000000e8 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_AL(uint32_t i0) { return 0x000000ec + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_AL_MIN(uint32_t i0) { return 0x000000f0 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_AL_FILT(uint32_t i0) { return 0x000000f4 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_CFG_BUF(uint32_t i0) { return 0x000000f8 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_LUT_AL(uint32_t i0) { return 0x00000100 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_TARG_STR(uint32_t i0) { return 0x00000144 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_START_CALC(uint32_t i0) { return 0x00000148 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_STR_OUT(uint32_t i0) { return 0x0000014c + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_BL_OUT(uint32_t i0) { return 0x00000154 + __offset_AD(i0); }
-
-static inline uint32_t REG_MDP5_AD_CALC_DONE(uint32_t i0) { return 0x00000158 + __offset_AD(i0); }
-
-
-#endif /* MDP5_XML */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
index 26c5d8b4ab46..4b988e69fbfc 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
@@ -69,6 +69,16 @@ struct mdp5_mdp_block {
uint32_t caps; /* MDP capabilities: MDP_CAP_xxx bits */
};
+struct mdp5_wb_instance {
+ int id;
+ int lm;
+};
+
+struct mdp5_wb_block {
+ MDP5_SUB_BLOCK_DEFINITION;
+ struct mdp5_wb_instance instances[MAX_BASES];
+};
+
#define MDP5_INTF_NUM_MAX 5
struct mdp5_intf_block {
@@ -98,6 +108,7 @@ struct mdp5_cfg_hw {
struct mdp5_sub_block pp;
struct mdp5_sub_block dsc;
struct mdp5_sub_block cdm;
+ struct mdp5_wb_block wb;
struct mdp5_intf_block intf;
struct mdp5_perf_block perf;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 4a3db2ea1689..0f653e62b4a0 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -216,7 +216,7 @@ static void blend_setup(struct drm_crtc *crtc)
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_plane *plane;
struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
- const struct mdp_format *format;
+ const struct msm_format *format;
struct mdp5_hw_mixer *mixer = pipeline->mixer;
uint32_t lm = mixer->lm;
struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
@@ -274,7 +274,7 @@ static void blend_setup(struct drm_crtc *crtc)
ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
DBG("Border Color is enabled");
} else if (plane_cnt) {
- format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb));
+ format = msm_framebuffer_format(pstates[STAGE_BASE]->base.fb);
if (format->alpha_enable)
bg_alpha_enabled = true;
@@ -285,8 +285,7 @@ static void blend_setup(struct drm_crtc *crtc)
if (!pstates[i])
continue;
- format = to_mdp_format(
- msm_framebuffer_format(pstates[i]->base.fb));
+ format = msm_framebuffer_format(pstates[i]->base.fb);
plane = pstates[i]->base.plane;
blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index a874fd95cc20..374704cce656 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -224,7 +224,6 @@ static const struct mdp_kms_funcs kms_funcs = {
.prepare_commit = mdp5_prepare_commit,
.wait_flush = mdp5_wait_flush,
.complete_commit = mdp5_complete_commit,
- .get_format = mdp_get_format,
.destroy = mdp5_kms_destroy,
},
.set_irqmask = mdp5_set_irqmask,
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
index fac9f05aa639..36b6842dfc9c 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
@@ -171,13 +171,13 @@ struct mdp5_encoder {
static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
{
WARN_ON(mdp5_kms->enable_count <= 0);
- msm_writel(data, mdp5_kms->mmio + reg);
+ writel(data, mdp5_kms->mmio + reg);
}
static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg)
{
WARN_ON(mdp5_kms->enable_count <= 0);
- return msm_readl(mdp5_kms->mmio + reg);
+ return readl(mdp5_kms->mmio + reg);
}
static inline const char *stage2name(enum mdp_mixer_stage_id stage)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 0d5ff03cb091..62de248ed1b0 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -17,9 +17,6 @@
struct mdp5_plane {
struct drm_plane base;
-
- uint32_t nformats;
- uint32_t formats[32];
};
#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
@@ -38,15 +35,6 @@ static bool plane_enabled(struct drm_plane_state *state)
return state->visible;
}
-static void mdp5_plane_destroy(struct drm_plane *plane)
-{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
-
- drm_plane_cleanup(plane);
-
- kfree(mdp5_plane);
-}
-
/* helper to install properties which are common to planes and crtcs */
static void mdp5_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj)
@@ -138,7 +126,6 @@ static void mdp5_plane_destroy_state(struct drm_plane *plane,
static const struct drm_plane_funcs mdp5_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = mdp5_plane_destroy,
.reset = mdp5_plane_reset,
.atomic_duplicate_state = mdp5_plane_duplicate_state,
.atomic_destroy_state = mdp5_plane_destroy_state,
@@ -231,12 +218,12 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
if (plane_enabled(state)) {
unsigned int rotation;
- const struct mdp_format *format;
+ const struct msm_format *format;
struct mdp5_kms *mdp5_kms = get_kms(plane);
uint32_t blkcfg = 0;
- format = to_mdp_format(msm_framebuffer_format(state->fb));
- if (MDP_FORMAT_IS_YUV(format))
+ format = msm_framebuffer_format(state->fb);
+ if (MSM_FORMAT_IS_YUV(format))
caps |= MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC;
if (((state->src_w >> 16) != state->crtc_w) ||
@@ -271,8 +258,8 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
new_hwpipe = true;
if (mdp5_kms->smp) {
- const struct mdp_format *format =
- to_mdp_format(msm_framebuffer_format(state->fb));
+ const struct msm_format *format =
+ msm_framebuffer_format(state->fb);
blkcfg = mdp5_smp_calculate(mdp5_kms->smp, format,
state->src_w >> 16, false);
@@ -633,14 +620,14 @@ static int calc_scaley_steps(struct drm_plane *plane,
return 0;
}
-static uint32_t get_scale_config(const struct mdp_format *format,
+static uint32_t get_scale_config(const struct msm_format *format,
uint32_t src, uint32_t dst, bool horz)
{
- const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
- bool scaling = format->is_yuv ? true : (src != dst);
+ const struct drm_format_info *info = drm_format_info(format->pixel_format);
+ bool yuv = MSM_FORMAT_IS_YUV(format);
+ bool scaling = yuv ? true : (src != dst);
uint32_t sub;
uint32_t ya_filter, uv_filter;
- bool yuv = format->is_yuv;
if (!scaling)
return 0;
@@ -664,12 +651,12 @@ static uint32_t get_scale_config(const struct mdp_format *format,
COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(uv_filter));
}
-static void calc_pixel_ext(const struct mdp_format *format,
+static void calc_pixel_ext(const struct msm_format *format,
uint32_t src, uint32_t dst, uint32_t phase_step[2],
int pix_ext_edge1[COMP_MAX], int pix_ext_edge2[COMP_MAX],
bool horz)
{
- bool scaling = format->is_yuv ? true : (src != dst);
+ bool scaling = MSM_FORMAT_IS_YUV(format) ? true : (src != dst);
int i;
/*
@@ -687,11 +674,11 @@ static void calc_pixel_ext(const struct mdp_format *format,
}
static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
- const struct mdp_format *format,
+ const struct msm_format *format,
uint32_t src_w, int pe_left[COMP_MAX], int pe_right[COMP_MAX],
uint32_t src_h, int pe_top[COMP_MAX], int pe_bottom[COMP_MAX])
{
- const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
+ const struct drm_format_info *info = drm_format_info(format->pixel_format);
uint32_t lr, tb, req;
int i;
@@ -699,7 +686,7 @@ static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
uint32_t roi_w = src_w;
uint32_t roi_h = src_h;
- if (format->is_yuv && i == COMP_1_2) {
+ if (MSM_FORMAT_IS_YUV(format) && i == COMP_1_2) {
roi_w /= info->hsub;
roi_h /= info->vsub;
}
@@ -773,8 +760,8 @@ static void mdp5_hwpipe_mode_set(struct mdp5_kms *mdp5_kms,
{
enum mdp5_pipe pipe = hwpipe->pipe;
bool has_pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT;
- const struct mdp_format *format =
- to_mdp_format(msm_framebuffer_format(fb));
+ const struct msm_format *format =
+ msm_framebuffer_format(fb);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_img_w) |
@@ -798,21 +785,22 @@ static void mdp5_hwpipe_mode_set(struct mdp5_kms *mdp5_kms,
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
- MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
- MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
- MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
+ MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r_cr) |
+ MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g_y) |
+ MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b_cb) |
COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
- MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
+ MDP5_PIPE_SRC_FORMAT_CPP(format->bpp - 1) |
MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
- COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
+ COND(format->flags & MSM_FORMAT_FLAG_UNPACK_TIGHT,
+ MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) |
MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
- MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
- MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
- MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
- MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
+ MDP5_PIPE_SRC_UNPACK_ELEM0(format->element[0]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM1(format->element[1]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM2(format->element[2]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM3(format->element[3]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
(hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) |
@@ -845,7 +833,7 @@ static void mdp5_hwpipe_mode_set(struct mdp5_kms *mdp5_kms,
}
if (hwpipe->caps & MDP_PIPE_CAP_CSC) {
- if (MDP_FORMAT_IS_YUV(format))
+ if (MSM_FORMAT_IS_YUV(format))
csc_enable(mdp5_kms, pipe,
mdp_get_default_csc_cfg(CSC_YUV2RGB));
else
@@ -864,7 +852,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = hwpipe->pipe;
struct mdp5_hw_pipe *right_hwpipe;
- const struct mdp_format *format;
+ const struct msm_format *format;
uint32_t nplanes, config = 0;
struct phase_step step = { { 0 } };
struct pixel_ext pe = { { 0 } };
@@ -885,8 +873,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
if (WARN_ON(nplanes > pipe2nclients(pipe)))
return -EINVAL;
- format = to_mdp_format(msm_framebuffer_format(fb));
- pix_format = format->base.pixel_format;
+ format = msm_framebuffer_format(fb);
+ pix_format = format->pixel_format;
src_x = src->x1;
src_y = src->y1;
@@ -1007,31 +995,48 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
return mask;
}
+static const uint32_t mdp5_plane_formats[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YVU420,
+};
+
/* initialize plane */
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
enum drm_plane_type type)
{
struct drm_plane *plane = NULL;
struct mdp5_plane *mdp5_plane;
- int ret;
- mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL);
- if (!mdp5_plane) {
- ret = -ENOMEM;
- goto fail;
- }
+ mdp5_plane = drmm_universal_plane_alloc(dev, struct mdp5_plane, base,
+ 0xff, &mdp5_plane_funcs,
+ mdp5_plane_formats, ARRAY_SIZE(mdp5_plane_formats),
+ NULL, type, NULL);
+ if (IS_ERR(mdp5_plane))
+ return ERR_CAST(mdp5_plane);
plane = &mdp5_plane->base;
- mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
- ARRAY_SIZE(mdp5_plane->formats), false);
-
- ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
- mdp5_plane->formats, mdp5_plane->nformats,
- NULL, type, NULL);
- if (ret)
- goto fail;
-
drm_plane_helper_add(plane, &mdp5_plane_helper_funcs);
mdp5_plane_install_properties(plane, &plane->base);
@@ -1039,10 +1044,4 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
drm_plane_enable_fb_damage_clips(plane);
return plane;
-
-fail:
- if (plane)
- mdp5_plane_destroy(plane);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
index b4bebb425d22..3a7f7edda96b 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
@@ -114,10 +114,10 @@ static void set_fifo_thresholds(struct mdp5_smp *smp,
* presumably happens during the dma from scanout buffer).
*/
uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
- const struct mdp_format *format,
+ const struct msm_format *format,
u32 width, bool hdecim)
{
- const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
+ const struct drm_format_info *info = drm_format_info(format->pixel_format);
struct mdp5_kms *mdp5_kms = get_kms(smp);
int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
int i, hsub, nplanes, nlines;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
index 21732ed485be..1be9832382d7 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
@@ -74,7 +74,7 @@ void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p,
struct mdp5_global_state *global_state);
uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
- const struct mdp_format *format,
+ const struct msm_format *format,
u32 width, bool hdecim);
int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
diff --git a/drivers/gpu/drm/msm/disp/mdp_common.xml.h b/drivers/gpu/drm/msm/disp/mdp_common.xml.h
deleted file mode 100644
index 4dd8d7db2862..000000000000
--- a/drivers/gpu/drm/msm/disp/mdp_common.xml.h
+++ /dev/null
@@ -1,111 +0,0 @@
-#ifndef MDP_COMMON_XML
-#define MDP_COMMON_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
-
-The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42)
-
-Copyright (C) 2013-2022 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/
-
-
-enum mdp_chroma_samp_type {
- CHROMA_FULL = 0,
- CHROMA_H2V1 = 1,
- CHROMA_H1V2 = 2,
- CHROMA_420 = 3,
-};
-
-enum mdp_fetch_type {
- MDP_PLANE_INTERLEAVED = 0,
- MDP_PLANE_PLANAR = 1,
- MDP_PLANE_PSEUDO_PLANAR = 2,
-};
-
-enum mdp_mixer_stage_id {
- STAGE_UNUSED = 0,
- STAGE_BASE = 1,
- STAGE0 = 2,
- STAGE1 = 3,
- STAGE2 = 4,
- STAGE3 = 5,
- STAGE4 = 6,
- STAGE5 = 7,
- STAGE6 = 8,
- STAGE_MAX = 8,
-};
-
-enum mdp_alpha_type {
- FG_CONST = 0,
- BG_CONST = 1,
- FG_PIXEL = 2,
- BG_PIXEL = 3,
-};
-
-enum mdp_component_type {
- COMP_0 = 0,
- COMP_1_2 = 1,
- COMP_3 = 2,
- COMP_MAX = 3,
-};
-
-enum mdp_bpc {
- BPC1 = 0,
- BPC5 = 1,
- BPC6 = 2,
- BPC8 = 3,
-};
-
-enum mdp_bpc_alpha {
- BPC1A = 0,
- BPC4A = 1,
- BPC6A = 2,
- BPC8A = 3,
-};
-
-
-#endif /* MDP_COMMON_XML */
diff --git a/drivers/gpu/drm/msm/disp/mdp_format.c b/drivers/gpu/drm/msm/disp/mdp_format.c
index 025595336f26..426782d50cb4 100644
--- a/drivers/gpu/drm/msm/disp/mdp_format.c
+++ b/drivers/gpu/drm/msm/disp/mdp_format.c
@@ -62,115 +62,573 @@ static struct csc_cfg csc_convert[CSC_MAX] = {
},
};
-#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs, yuv) { \
- .base = { .pixel_format = DRM_FORMAT_ ## name }, \
- .bpc_a = BPC ## a ## A, \
- .bpc_r = BPC ## r, \
- .bpc_g = BPC ## g, \
- .bpc_b = BPC ## b, \
- .unpack = { e0, e1, e2, e3 }, \
- .alpha_enable = alpha, \
- .unpack_tight = tight, \
- .cpp = c, \
- .unpack_count = cnt, \
- .fetch_type = fp, \
- .chroma_sample = cs, \
- .is_yuv = yuv, \
+#define MDP_TILE_HEIGHT_DEFAULT 1
+#define MDP_TILE_HEIGHT_UBWC 4
+#define MDP_TILE_HEIGHT_NV12 8
+
+#define INTERLEAVED_RGB_FMT(fmt, a, r, g, b, e0, e1, e2, e3, uc, alpha, \
+bp, flg, fm, np) \
+{ \
+ .pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_type = MDP_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bpc_g_y = g, \
+ .bpc_b_cb = b, \
+ .bpc_r_cr = r, \
+ .bpc_a = a, \
+ .chroma_sample = CHROMA_FULL, \
+ .unpack_count = uc, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flags = MSM_FORMAT_FLAG_UNPACK_TIGHT | flg, \
+ .num_planes = np, \
+ .tile_height = MDP_TILE_HEIGHT_DEFAULT \
}
-#define BPC0A 0
+#define INTERLEAVED_RGB_FMT_TILED(fmt, a, r, g, b, e0, e1, e2, e3, uc, \
+alpha, bp, flg, fm, np, th) \
+{ \
+ .pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_type = MDP_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bpc_g_y = g, \
+ .bpc_b_cb = b, \
+ .bpc_r_cr = r, \
+ .bpc_a = a, \
+ .chroma_sample = CHROMA_FULL, \
+ .unpack_count = uc, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flags = MSM_FORMAT_FLAG_UNPACK_TIGHT | flg, \
+ .num_planes = np, \
+ .tile_height = th \
+}
-/*
- * Note: Keep RGB formats 1st, followed by YUV formats to avoid breaking
- * mdp_get_rgb_formats()'s implementation.
- */
-static const struct mdp_format formats[] = {
- /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt ... */
- FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4,
- MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
- FMT(ABGR8888, 8, 8, 8, 8, 2, 0, 1, 3, true, true, 4, 4,
- MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
- FMT(RGBA8888, 8, 8, 8, 8, 3, 1, 0, 2, true, true, 4, 4,
- MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
- FMT(BGRA8888, 8, 8, 8, 8, 3, 2, 0, 1, true, true, 4, 4,
- MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
- FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4,
- MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
- FMT(XBGR8888, 8, 8, 8, 8, 2, 0, 1, 3, false, true, 4, 4,
- MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
- FMT(RGBX8888, 8, 8, 8, 8, 3, 1, 0, 2, false, true, 4, 4,
- MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
- FMT(BGRX8888, 8, 8, 8, 8, 3, 2, 0, 1, false, true, 4, 4,
- MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
- FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3,
- MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
- FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3,
- MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
- FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3,
- MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
- FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3,
- MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+#define INTERLEAVED_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, e3, \
+alpha, chroma, count, bp, flg, fm, np) \
+{ \
+ .pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_type = MDP_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3)}, \
+ .bpc_g_y = g, \
+ .bpc_b_cb = b, \
+ .bpc_r_cr = r, \
+ .bpc_a = a, \
+ .chroma_sample = chroma, \
+ .unpack_count = count, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flags = MSM_FORMAT_FLAG_UNPACK_TIGHT | flg, \
+ .num_planes = np, \
+ .tile_height = MDP_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np) \
+{ \
+ .pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_type = MDP_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = 0, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bpc_g_y = g, \
+ .bpc_b_cb = b, \
+ .bpc_r_cr = r, \
+ .bpc_a = a, \
+ .chroma_sample = chroma, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flags = MSM_FORMAT_FLAG_UNPACK_TIGHT | flg, \
+ .num_planes = np, \
+ .tile_height = MDP_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT_TILED(fmt, a, r, g, b, e0, e1, chroma, \
+flg, fm, np, th) \
+{ \
+ .pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_type = MDP_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = 0, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bpc_g_y = g, \
+ .bpc_b_cb = b, \
+ .bpc_r_cr = r, \
+ .bpc_a = a, \
+ .chroma_sample = chroma, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flags = MSM_FORMAT_FLAG_UNPACK_TIGHT | flg, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+#define PSEUDO_YUV_FMT_LOOSE(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)\
+{ \
+ .pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_type = MDP_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = 0, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bpc_g_y = g, \
+ .bpc_b_cb = b, \
+ .bpc_r_cr = r, \
+ .bpc_a = a, \
+ .chroma_sample = chroma, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flags = MSM_FORMAT_FLAG_UNPACK_ALIGN_MSB | flg, \
+ .num_planes = np, \
+ .tile_height = MDP_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT_LOOSE_TILED(fmt, a, r, g, b, e0, e1, chroma, \
+flg, fm, np, th) \
+{ \
+ .pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_type = MDP_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = 0, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bpc_g_y = g, \
+ .bpc_b_cb = b, \
+ .bpc_r_cr = r, \
+ .bpc_a = a, \
+ .chroma_sample = chroma, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flags = MSM_FORMAT_FLAG_UNPACK_ALIGN_MSB | flg, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+#define PLANAR_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, alpha, chroma, bp, \
+flg, fm, np) \
+{ \
+ .pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_type = MDP_PLANE_PLANAR, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), 0 }, \
+ .bpc_g_y = g, \
+ .bpc_b_cb = b, \
+ .bpc_r_cr = r, \
+ .bpc_a = a, \
+ .chroma_sample = chroma, \
+ .unpack_count = 1, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flags = MSM_FORMAT_FLAG_UNPACK_TIGHT | flg, \
+ .num_planes = np, \
+ .tile_height = MDP_TILE_HEIGHT_DEFAULT \
+}
+
+static const struct msm_format mdp_formats[] = {
+ INTERLEAVED_RGB_FMT(ARGB8888,
+ BPC8A, BPC8, BPC8, BPC8,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR8888,
+ BPC8A, BPC8, BPC8, BPC8,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR8888,
+ BPC8A, BPC8, BPC8, BPC8,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA8888,
+ BPC8A, BPC8, BPC8, BPC8,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA8888,
+ BPC8A, BPC8, BPC8, BPC8,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX8888,
+ BPC8A, BPC8, BPC8, BPC8,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB8888,
+ BPC8A, BPC8, BPC8, BPC8,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX8888,
+ BPC8A, BPC8, BPC8, BPC8,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 4, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGB888,
+ 0, BPC8, BPC8, BPC8,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ false, 3, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGR888,
+ 0, BPC8, BPC8, BPC8,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 3, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGB565,
+ 0, BPC5, BPC6, BPC5,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ false, 2, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGR565,
+ 0, BPC5, BPC6, BPC5,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB1555,
+ BPC1A, BPC5, BPC5, BPC5,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 2, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR1555,
+ BPC1A, BPC5, BPC5, BPC5,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 2, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA5551,
+ BPC1A, BPC5, BPC5, BPC5,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 2, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA5551,
+ BPC1A, BPC5, BPC5, BPC5,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 2, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB1555,
+ BPC1A, BPC5, BPC5, BPC5,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 2, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR1555,
+ BPC1A, BPC5, BPC5, BPC5,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 2, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX5551,
+ BPC1A, BPC5, BPC5, BPC5,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 2, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX5551,
+ BPC1A, BPC5, BPC5, BPC5,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 2, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB4444,
+ BPC4A, BPC4, BPC4, BPC4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 2, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR4444,
+ BPC4A, BPC4, BPC4, BPC4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 2, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA4444,
+ BPC4A, BPC4, BPC4, BPC4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 2, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA4444,
+ BPC4A, BPC4, BPC4, BPC4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 2, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB4444,
+ BPC4A, BPC4, BPC4, BPC4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 2, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR4444,
+ BPC4A, BPC4, BPC4, BPC4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 2, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX4444,
+ BPC4A, BPC4, BPC4, BPC4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 2, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX4444,
+ BPC4A, BPC4, BPC4, BPC4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 2, 0,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA1010102,
+ BPC8A, BPC8, BPC8, BPC8,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, MSM_FORMAT_FLAG_DX,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA1010102,
+ BPC8A, BPC8, BPC8, BPC8,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, MSM_FORMAT_FLAG_DX,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR2101010,
+ BPC8A, BPC8, BPC8, BPC8,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, MSM_FORMAT_FLAG_DX,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB2101010,
+ BPC8A, BPC8, BPC8, BPC8,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, MSM_FORMAT_FLAG_DX,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB2101010,
+ BPC8A, BPC8, BPC8, BPC8,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, MSM_FORMAT_FLAG_DX,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX1010102,
+ BPC8A, BPC8, BPC8, BPC8,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, MSM_FORMAT_FLAG_DX,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR2101010,
+ BPC8A, BPC8, BPC8, BPC8,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, MSM_FORMAT_FLAG_DX,
+ MDP_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX1010102,
+ BPC8A, BPC8, BPC8, BPC8,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 4, MSM_FORMAT_FLAG_DX,
+ MDP_FETCH_LINEAR, 1),
/* --- RGB formats above / YUV formats below this line --- */
/* 2 plane YUV */
- FMT(NV12, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2,
- MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true),
- FMT(NV21, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2,
- MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true),
- FMT(NV16, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2,
- MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true),
- FMT(NV61, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2,
- MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true),
+ PSEUDO_YUV_FMT(NV12,
+ 0, BPC8, BPC8, BPC8,
+ C1_B_Cb, C2_R_Cr,
+ CHROMA_420, MSM_FORMAT_FLAG_YUV,
+ MDP_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV21,
+ 0, BPC8, BPC8, BPC8,
+ C2_R_Cr, C1_B_Cb,
+ CHROMA_420, MSM_FORMAT_FLAG_YUV,
+ MDP_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV16,
+ 0, BPC8, BPC8, BPC8,
+ C1_B_Cb, C2_R_Cr,
+ CHROMA_H2V1, MSM_FORMAT_FLAG_YUV,
+ MDP_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV61,
+ 0, BPC8, BPC8, BPC8,
+ C2_R_Cr, C1_B_Cb,
+ CHROMA_H2V1, MSM_FORMAT_FLAG_YUV,
+ MDP_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT_LOOSE(P010,
+ 0, BPC8, BPC8, BPC8,
+ C1_B_Cb, C2_R_Cr,
+ CHROMA_420, MSM_FORMAT_FLAG_DX | MSM_FORMAT_FLAG_YUV,
+ MDP_FETCH_LINEAR, 2),
+
/* 1 plane YUV */
- FMT(VYUY, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 2, 4,
- MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
- FMT(UYVY, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 2, 4,
- MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
- FMT(YUYV, 0, 8, 8, 8, 0, 1, 0, 2, false, true, 2, 4,
- MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
- FMT(YVYU, 0, 8, 8, 8, 0, 2, 0, 1, false, true, 2, 4,
- MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+ INTERLEAVED_YUV_FMT(VYUY,
+ 0, BPC8, BPC8, BPC8,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y,
+ false, CHROMA_H2V1, 4, 2, MSM_FORMAT_FLAG_YUV,
+ MDP_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(UYVY,
+ 0, BPC8, BPC8, BPC8,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y,
+ false, CHROMA_H2V1, 4, 2, MSM_FORMAT_FLAG_YUV,
+ MDP_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(YUYV,
+ 0, BPC8, BPC8, BPC8,
+ C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr,
+ false, CHROMA_H2V1, 4, 2, MSM_FORMAT_FLAG_YUV,
+ MDP_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(YVYU,
+ 0, BPC8, BPC8, BPC8,
+ C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb,
+ false, CHROMA_H2V1, 4, 2, MSM_FORMAT_FLAG_YUV,
+ MDP_FETCH_LINEAR, 2),
+
/* 3 plane YUV */
- FMT(YUV420, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 1, 1,
- MDP_PLANE_PLANAR, CHROMA_420, true),
- FMT(YVU420, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 1, 1,
- MDP_PLANE_PLANAR, CHROMA_420, true),
+ PLANAR_YUV_FMT(YUV420,
+ 0, BPC8, BPC8, BPC8,
+ C2_R_Cr, C1_B_Cb, C0_G_Y,
+ false, CHROMA_420, 1, MSM_FORMAT_FLAG_YUV,
+ MDP_FETCH_LINEAR, 3),
+
+ PLANAR_YUV_FMT(YVU420,
+ 0, BPC8, BPC8, BPC8,
+ C1_B_Cb, C2_R_Cr, C0_G_Y,
+ false, CHROMA_420, 1, MSM_FORMAT_FLAG_YUV,
+ MDP_FETCH_LINEAR, 3),
};
/*
- * Note:
- * @rgb_only must be set to true, when requesting
- * supported formats for RGB pipes.
+ * UBWC formats table:
+ * This table holds the UBWC formats supported.
+ * If a compression ratio needs to be used for this or any other format,
+ * the data will be passed by user-space.
*/
-uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats,
- bool rgb_only)
-{
- uint32_t i;
- for (i = 0; i < ARRAY_SIZE(formats); i++) {
- const struct mdp_format *f = &formats[i];
+static const struct msm_format mdp_formats_ubwc[] = {
+ INTERLEAVED_RGB_FMT_TILED(BGR565,
+ 0, BPC5, BPC6, BPC5,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, MSM_FORMAT_FLAG_COMPRESSED,
+ MDP_FETCH_UBWC, 2, MDP_TILE_HEIGHT_UBWC),
- if (i == max_formats)
- break;
+ INTERLEAVED_RGB_FMT_TILED(ABGR8888,
+ BPC8A, BPC8, BPC8, BPC8,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, MSM_FORMAT_FLAG_COMPRESSED,
+ MDP_FETCH_UBWC, 2, MDP_TILE_HEIGHT_UBWC),
- if (rgb_only && MDP_FORMAT_IS_YUV(f))
- break;
+ /* ARGB8888 and ABGR8888 purposely have the same color
+ * ordering. The hardware only supports ABGR8888 UBWC
+ * natively.
+ */
+ INTERLEAVED_RGB_FMT_TILED(ARGB8888,
+ BPC8A, BPC8, BPC8, BPC8,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, MSM_FORMAT_FLAG_COMPRESSED,
+ MDP_FETCH_UBWC, 2, MDP_TILE_HEIGHT_UBWC),
- pixel_formats[i] = f->base.pixel_format;
- }
+ INTERLEAVED_RGB_FMT_TILED(XBGR8888,
+ BPC8A, BPC8, BPC8, BPC8,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, MSM_FORMAT_FLAG_COMPRESSED,
+ MDP_FETCH_UBWC, 2, MDP_TILE_HEIGHT_UBWC),
- return i;
-}
+ INTERLEAVED_RGB_FMT_TILED(XRGB8888,
+ BPC8A, BPC8, BPC8, BPC8,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, MSM_FORMAT_FLAG_COMPRESSED,
+ MDP_FETCH_UBWC, 2, MDP_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
+ BPC8A, BPC8, BPC8, BPC8,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, MSM_FORMAT_FLAG_DX | MSM_FORMAT_FLAG_COMPRESSED,
+ MDP_FETCH_UBWC, 2, MDP_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
+ BPC8A, BPC8, BPC8, BPC8,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, MSM_FORMAT_FLAG_DX | MSM_FORMAT_FLAG_COMPRESSED,
+ MDP_FETCH_UBWC, 2, MDP_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(XRGB2101010,
+ BPC8A, BPC8, BPC8, BPC8,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, MSM_FORMAT_FLAG_DX | MSM_FORMAT_FLAG_COMPRESSED,
+ MDP_FETCH_UBWC, 2, MDP_TILE_HEIGHT_UBWC),
+
+ /* XRGB2101010 and ARGB2101010 purposely have the same color
+ * ordering. The hardware only supports ARGB2101010 UBWC
+ * natively.
+ */
+ INTERLEAVED_RGB_FMT_TILED(ARGB2101010,
+ BPC8A, BPC8, BPC8, BPC8,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, MSM_FORMAT_FLAG_DX | MSM_FORMAT_FLAG_COMPRESSED,
+ MDP_FETCH_UBWC, 2, MDP_TILE_HEIGHT_UBWC),
+
+ PSEUDO_YUV_FMT_TILED(NV12,
+ 0, BPC8, BPC8, BPC8,
+ C1_B_Cb, C2_R_Cr,
+ CHROMA_420, MSM_FORMAT_FLAG_YUV |
+ MSM_FORMAT_FLAG_COMPRESSED,
+ MDP_FETCH_UBWC, 4, MDP_TILE_HEIGHT_NV12),
+
+ PSEUDO_YUV_FMT_TILED(P010,
+ 0, BPC8, BPC8, BPC8,
+ C1_B_Cb, C2_R_Cr,
+ CHROMA_420, MSM_FORMAT_FLAG_DX |
+ MSM_FORMAT_FLAG_YUV |
+ MSM_FORMAT_FLAG_COMPRESSED,
+ MDP_FETCH_UBWC, 4, MDP_TILE_HEIGHT_UBWC),
+};
const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format,
uint64_t modifier)
{
+ const struct msm_format *map = NULL;
+ ssize_t map_size;
int i;
- for (i = 0; i < ARRAY_SIZE(formats); i++) {
- const struct mdp_format *f = &formats[i];
- if (f->base.pixel_format == format)
- return &f->base;
+
+ switch (modifier) {
+ case 0:
+ map = mdp_formats;
+ map_size = ARRAY_SIZE(mdp_formats);
+ break;
+ case DRM_FORMAT_MOD_QCOM_COMPRESSED:
+ map = mdp_formats_ubwc;
+ map_size = ARRAY_SIZE(mdp_formats_ubwc);
+ break;
+ default:
+ drm_err(kms->dev, "unsupported format modifier %llX\n", modifier);
+ return NULL;
}
+
+ for (i = 0; i < map_size; i++) {
+ const struct msm_format *f = &map[i];
+
+ if (f->pixel_format == format)
+ return f;
+ }
+
+ drm_err(kms->dev, "unsupported fmt: %p4cc modifier 0x%llX\n",
+ &format, modifier);
+
return NULL;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp_format.h b/drivers/gpu/drm/msm/disp/mdp_format.h
new file mode 100644
index 000000000000..a00d646ff4d4
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp_format.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __MSM_FORMAT_H__
+#define __MSM_FORMAT_H__
+
+#include "mdp_common.xml.h"
+
+enum msm_format_flags {
+ MSM_FORMAT_FLAG_YUV_BIT,
+ MSM_FORMAT_FLAG_DX_BIT,
+ MSM_FORMAT_FLAG_COMPRESSED_BIT,
+ MSM_FORMAT_FLAG_UNPACK_TIGHT_BIT,
+ MSM_FORMAT_FLAG_UNPACK_ALIGN_MSB_BIT,
+};
+
+#define MSM_FORMAT_FLAG_YUV BIT(MSM_FORMAT_FLAG_YUV_BIT)
+#define MSM_FORMAT_FLAG_DX BIT(MSM_FORMAT_FLAG_DX_BIT)
+#define MSM_FORMAT_FLAG_COMPRESSED BIT(MSM_FORMAT_FLAG_COMPRESSED_BIT)
+#define MSM_FORMAT_FLAG_UNPACK_TIGHT BIT(MSM_FORMAT_FLAG_UNPACK_TIGHT_BIT)
+#define MSM_FORMAT_FLAG_UNPACK_ALIGN_MSB BIT(MSM_FORMAT_FLAG_UNPACK_ALIGN_MSB_BIT)
+
+/**
+ * DPU HW,Component order color map
+ */
+enum {
+ C0_G_Y = 0,
+ C1_B_Cb = 1,
+ C2_R_Cr = 2,
+ C3_ALPHA = 3
+};
+
+/**
+ * struct msm_format: defines the format configuration
+ * @pixel_format: format fourcc
+ * @element: element color ordering
+ * @fetch_type: how the color components are packed in pixel format
+ * @chroma_sample: chroma sub-samplng type
+ * @alpha_enable: whether the format has an alpha channel
+ * @unpack_count: number of the components to unpack
+ * @bpp: bytes per pixel
+ * @flags: usage bit flags
+ * @num_planes: number of planes (including meta data planes)
+ * @fetch_mode: linear, tiled, or ubwc hw fetch behavior
+ * @tile_height: format tile height
+ */
+struct msm_format {
+ uint32_t pixel_format;
+ enum mdp_bpc bpc_g_y, bpc_b_cb, bpc_r_cr;
+ enum mdp_bpc_alpha bpc_a;
+ u8 element[4];
+ enum mdp_fetch_type fetch_type;
+ enum mdp_chroma_samp_type chroma_sample;
+ bool alpha_enable;
+ u8 unpack_count;
+ u8 bpp;
+ unsigned long flags;
+ u8 num_planes;
+ enum mdp_fetch_mode fetch_mode;
+ u16 tile_height;
+};
+
+#define MSM_FORMAT_IS_YUV(X) ((X)->flags & MSM_FORMAT_FLAG_YUV)
+#define MSM_FORMAT_IS_DX(X) ((X)->flags & MSM_FORMAT_FLAG_DX)
+#define MSM_FORMAT_IS_LINEAR(X) ((X)->fetch_mode == MDP_FETCH_LINEAR)
+#define MSM_FORMAT_IS_TILE(X) \
+ (((X)->fetch_mode == MDP_FETCH_UBWC) && \
+ !((X)->flags & MSM_FORMAT_FLAG_COMPRESSED))
+#define MSM_FORMAT_IS_UBWC(X) \
+ (((X)->fetch_mode == MDP_FETCH_UBWC) && \
+ ((X)->flags & MSM_FORMAT_FLAG_COMPRESSED))
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/mdp_kms.h b/drivers/gpu/drm/msm/disp/mdp_kms.h
index b0286d5d5130..068fbeac6edb 100644
--- a/drivers/gpu/drm/msm/disp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp_kms.h
@@ -11,6 +11,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include "mdp_format.h"
#include "msm_drv.h"
#include "msm_kms.h"
#include "mdp_common.xml.h"
@@ -77,23 +78,6 @@ void mdp_irq_update(struct mdp_kms *mdp_kms);
* pixel format helpers:
*/
-struct mdp_format {
- struct msm_format base;
- enum mdp_bpc bpc_r, bpc_g, bpc_b;
- enum mdp_bpc_alpha bpc_a;
- uint8_t unpack[4];
- bool alpha_enable, unpack_tight;
- uint8_t cpp, unpack_count;
- enum mdp_fetch_type fetch_type;
- enum mdp_chroma_samp_type chroma_sample;
- bool is_yuv;
-};
-#define to_mdp_format(x) container_of(x, struct mdp_format, base)
-#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv)
-
-uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only);
-const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier);
-
/* MDP capabilities */
#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */
#define MDP_CAP_DSC BIT(1) /* VESA Display Stream Compression */
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
index 7634e4b74208..a599fc5d63c5 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.c
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -22,9 +22,7 @@ struct dp_audio_private {
struct platform_device *pdev;
struct drm_device *drm_dev;
struct dp_catalog *catalog;
- struct dp_panel *panel;
- bool engine_on;
u32 channels;
struct dp_audio dp_audio;
@@ -34,11 +32,7 @@ static u32 dp_audio_get_header(struct dp_catalog *catalog,
enum dp_catalog_audio_sdp_type sdp,
enum dp_catalog_audio_header_type header)
{
- catalog->sdp_type = sdp;
- catalog->sdp_header = header;
- dp_catalog_audio_get_header(catalog);
-
- return catalog->audio_data;
+ return dp_catalog_audio_get_header(catalog, sdp, header);
}
static void dp_audio_set_header(struct dp_catalog *catalog,
@@ -46,10 +40,7 @@ static void dp_audio_set_header(struct dp_catalog *catalog,
enum dp_catalog_audio_sdp_type sdp,
enum dp_catalog_audio_header_type header)
{
- catalog->sdp_type = sdp;
- catalog->sdp_header = header;
- catalog->audio_data = data;
- dp_catalog_audio_set_header(catalog);
+ dp_catalog_audio_set_header(catalog, sdp, header, data);
}
static void dp_audio_stream_sdp(struct dp_audio_private *audio)
@@ -319,8 +310,7 @@ static void dp_audio_setup_acr(struct dp_audio_private *audio)
break;
}
- catalog->audio_data = select;
- dp_catalog_audio_config_acr(catalog);
+ dp_catalog_audio_config_acr(catalog, select);
}
static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio)
@@ -346,18 +336,14 @@ static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio)
break;
}
- catalog->audio_data = safe_to_exit_level;
- dp_catalog_audio_sfe_level(catalog);
+ dp_catalog_audio_sfe_level(catalog, safe_to_exit_level);
}
static void dp_audio_enable(struct dp_audio_private *audio, bool enable)
{
struct dp_catalog *catalog = audio->catalog;
- catalog->audio_data = enable;
- dp_catalog_audio_enable(catalog);
-
- audio->engine_on = enable;
+ dp_catalog_audio_enable(catalog, enable);
}
static struct dp_audio_private *dp_audio_get_data(struct platform_device *pdev)
@@ -571,7 +557,6 @@ struct dp_audio *dp_audio_get(struct platform_device *pdev,
}
audio->pdev = pdev;
- audio->panel = panel;
audio->catalog = catalog;
dp_audio = &audio->dp_audio;
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index adbd5a367395..da46a433bf74 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -38,6 +38,7 @@ struct dp_aux_private {
bool no_send_stop;
bool initted;
bool is_edp;
+ bool enable_xfers;
u32 offset;
u32 segment;
@@ -87,8 +88,7 @@ static ssize_t dp_aux_write(struct dp_aux_private *aux,
/* index = 0, write */
if (i == 0)
reg |= DP_AUX_DATA_INDEX_WRITE;
- aux->catalog->aux_data = reg;
- dp_catalog_aux_write_data(aux->catalog);
+ dp_catalog_aux_write_data(aux->catalog, reg);
}
dp_catalog_aux_clear_trans(aux->catalog, false);
@@ -106,8 +106,7 @@ static ssize_t dp_aux_write(struct dp_aux_private *aux,
}
reg |= DP_AUX_TRANS_CTRL_GO;
- aux->catalog->aux_data = reg;
- dp_catalog_aux_write_trans(aux->catalog);
+ dp_catalog_aux_write_trans(aux->catalog, reg);
return len;
}
@@ -145,8 +144,7 @@ static ssize_t dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
data = DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */
data |= DP_AUX_DATA_READ; /* read */
- aux->catalog->aux_data = data;
- dp_catalog_aux_write_data(aux->catalog);
+ dp_catalog_aux_write_data(aux->catalog, data);
dp = msg->buffer;
@@ -305,19 +303,14 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
}
/*
- * For eDP it's important to give a reasonably long wait here for HPD
- * to be asserted. This is because the panel driver may have _just_
- * turned on the panel and then tried to do an AUX transfer. The panel
- * driver has no way of knowing when the panel is ready, so it's up
- * to us to wait. For DP we never get into this situation so let's
- * avoid ever doing the extra long wait for DP.
+ * If we're using DP and an external display isn't connected then the
+ * transfer won't succeed. Return right away. If we don't do this we
+ * can end up with long timeouts if someone tries to access the DP AUX
+ * character device when no DP device is connected.
*/
- if (aux->is_edp) {
- ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog);
- if (ret) {
- DRM_DEBUG_DP("Panel not ready for aux transactions\n");
- goto exit;
- }
+ if (!aux->is_edp && !aux->enable_xfers) {
+ ret = -ENXIO;
+ goto exit;
}
dp_aux_update_offset_and_segment(aux, msg);
@@ -436,6 +429,14 @@ irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux)
return IRQ_HANDLED;
}
+void dp_aux_enable_xfers(struct drm_dp_aux *dp_aux, bool enabled)
+{
+ struct dp_aux_private *aux;
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ aux->enable_xfers = enabled;
+}
+
void dp_aux_reconfig(struct drm_dp_aux *dp_aux)
{
struct dp_aux_private *aux;
@@ -513,7 +514,7 @@ static int dp_wait_hpd_asserted(struct drm_dp_aux *dp_aux,
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
pm_runtime_get_sync(aux->dev);
- ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog);
+ ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog, wait_us);
pm_runtime_put_sync(aux->dev);
return ret;
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
index f47d591c1f54..4f65e892a807 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.h
+++ b/drivers/gpu/drm/msm/dp/dp_aux.h
@@ -12,6 +12,7 @@
int dp_aux_register(struct drm_dp_aux *dp_aux);
void dp_aux_unregister(struct drm_dp_aux *dp_aux);
irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux);
+void dp_aux_enable_xfers(struct drm_dp_aux *dp_aux, bool enabled);
void dp_aux_init(struct drm_dp_aux *dp_aux);
void dp_aux_deinit(struct drm_dp_aux *dp_aux);
void dp_aux_reconfig(struct drm_dp_aux *dp_aux);
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index 3e7c84cdef47..6e55cbf69674 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -81,7 +81,6 @@ struct dp_catalog_private {
struct dss_io_data io;
u32 (*audio_map)[DP_AUDIO_SDP_HEADER_MAX];
struct dp_catalog dp_catalog;
- u8 aux_lut_cfg_index[PHY_AUX_CFG_MAX];
};
void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *disp_state)
@@ -170,21 +169,21 @@ u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog)
return dp_read_aux(catalog, REG_DP_AUX_DATA);
}
-int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog)
+int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog, u32 data)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
- dp_write_aux(catalog, REG_DP_AUX_DATA, dp_catalog->aux_data);
+ dp_write_aux(catalog, REG_DP_AUX_DATA, data);
return 0;
}
-int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog)
+int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog, u32 data)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
- dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, dp_catalog->aux_data);
+ dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data);
return 0;
}
@@ -263,17 +262,18 @@ void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable)
dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
}
-int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog)
+int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog,
+ unsigned long wait_us)
{
u32 state;
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
- /* poll for hpd connected status every 2ms and timeout after 500ms */
+ /* poll for hpd connected status every 2ms and timeout after wait_us */
return readl_poll_timeout(catalog->io.aux.base +
REG_DP_DP_HPD_INT_STATUS,
state, state & DP_DP_HPD_STATE_STATUS_CONNECTED,
- 2000, 500000);
+ min(wait_us, 2000), wait_us);
}
static void dump_regs(void __iomem *base, int len)
@@ -469,7 +469,7 @@ void dp_catalog_setup_peripheral_flush(struct dp_catalog *dp_catalog)
void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog,
u32 rate, u32 stream_rate_khz,
- bool fixed_nvid, bool is_ycbcr_420)
+ bool is_ycbcr_420)
{
u32 pixel_m, pixel_n;
u32 mvid, nvid, pixel_div = 0, dispcc_input_rate;
@@ -881,19 +881,17 @@ u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog)
}
/* panel related catalog functions */
-int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog)
+int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog, u32 total,
+ u32 sync_start, u32 width_blanking, u32 dp_active)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
u32 reg;
- dp_write_link(catalog, REG_DP_TOTAL_HOR_VER,
- dp_catalog->total);
- dp_write_link(catalog, REG_DP_START_HOR_VER_FROM_SYNC,
- dp_catalog->sync_start);
- dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY,
- dp_catalog->width_blanking);
- dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_catalog->dp_active);
+ dp_write_link(catalog, REG_DP_TOTAL_HOR_VER, total);
+ dp_write_link(catalog, REG_DP_START_HOR_VER_FROM_SYNC, sync_start);
+ dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY, width_blanking);
+ dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_active);
reg = dp_read_p0(catalog, MMSS_DP_INTF_CONFIG);
@@ -1162,34 +1160,28 @@ struct dp_catalog *dp_catalog_get(struct device *dev)
return &catalog->dp_catalog;
}
-void dp_catalog_audio_get_header(struct dp_catalog *dp_catalog)
+u32 dp_catalog_audio_get_header(struct dp_catalog *dp_catalog,
+ enum dp_catalog_audio_sdp_type sdp,
+ enum dp_catalog_audio_header_type header)
{
struct dp_catalog_private *catalog;
u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
- enum dp_catalog_audio_sdp_type sdp;
- enum dp_catalog_audio_header_type header;
-
- if (!dp_catalog)
- return;
catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
sdp_map = catalog->audio_map;
- sdp = dp_catalog->sdp_type;
- header = dp_catalog->sdp_header;
- dp_catalog->audio_data = dp_read_link(catalog,
- sdp_map[sdp][header]);
+ return dp_read_link(catalog, sdp_map[sdp][header]);
}
-void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog)
+void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog,
+ enum dp_catalog_audio_sdp_type sdp,
+ enum dp_catalog_audio_header_type header,
+ u32 data)
{
struct dp_catalog_private *catalog;
u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
- enum dp_catalog_audio_sdp_type sdp;
- enum dp_catalog_audio_header_type header;
- u32 data;
if (!dp_catalog)
return;
@@ -1198,17 +1190,14 @@ void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog)
struct dp_catalog_private, dp_catalog);
sdp_map = catalog->audio_map;
- sdp = dp_catalog->sdp_type;
- header = dp_catalog->sdp_header;
- data = dp_catalog->audio_data;
dp_write_link(catalog, sdp_map[sdp][header], data);
}
-void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog)
+void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog, u32 select)
{
struct dp_catalog_private *catalog;
- u32 acr_ctrl, select;
+ u32 acr_ctrl;
if (!dp_catalog)
return;
@@ -1216,7 +1205,6 @@ void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog)
catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
- select = dp_catalog->audio_data;
acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14);
drm_dbg_dp(catalog->drm_dev, "select: %#x, acr_ctrl: %#x\n",
@@ -1225,10 +1213,9 @@ void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog)
dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl);
}
-void dp_catalog_audio_enable(struct dp_catalog *dp_catalog)
+void dp_catalog_audio_enable(struct dp_catalog *dp_catalog, bool enable)
{
struct dp_catalog_private *catalog;
- bool enable;
u32 audio_ctrl;
if (!dp_catalog)
@@ -1237,7 +1224,6 @@ void dp_catalog_audio_enable(struct dp_catalog *dp_catalog)
catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
- enable = !!dp_catalog->audio_data;
audio_ctrl = dp_read_link(catalog, MMSS_DP_AUDIO_CFG);
if (enable)
@@ -1332,10 +1318,10 @@ void dp_catalog_audio_init(struct dp_catalog *dp_catalog)
catalog->audio_map = sdp_map;
}
-void dp_catalog_audio_sfe_level(struct dp_catalog *dp_catalog)
+void dp_catalog_audio_sfe_level(struct dp_catalog *dp_catalog, u32 safe_to_exit_level)
{
struct dp_catalog_private *catalog;
- u32 mainlink_levels, safe_to_exit_level;
+ u32 mainlink_levels;
if (!dp_catalog)
return;
@@ -1343,7 +1329,6 @@ void dp_catalog_audio_sfe_level(struct dp_catalog *dp_catalog)
catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
- safe_to_exit_level = dp_catalog->audio_data;
mainlink_levels = dp_read_link(catalog, REG_DP_MAINLINK_LEVELS);
mainlink_levels &= 0xFE0;
mainlink_levels |= safe_to_exit_level;
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
index 75ec290127c7..4679d50b8c73 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -28,26 +28,9 @@
#define DP_INTR_FRAME_END BIT(6)
#define DP_INTR_CRC_UPDATED BIT(9)
-#define DP_AUX_CFG_MAX_VALUE_CNT 3
-
#define DP_HW_VERSION_1_0 0x10000000
#define DP_HW_VERSION_1_2 0x10020000
-/* PHY AUX config registers */
-enum dp_phy_aux_config_type {
- PHY_AUX_CFG0,
- PHY_AUX_CFG1,
- PHY_AUX_CFG2,
- PHY_AUX_CFG3,
- PHY_AUX_CFG4,
- PHY_AUX_CFG5,
- PHY_AUX_CFG6,
- PHY_AUX_CFG7,
- PHY_AUX_CFG8,
- PHY_AUX_CFG9,
- PHY_AUX_CFG_MAX,
-};
-
enum dp_catalog_audio_sdp_type {
DP_AUDIO_SDP_STREAM,
DP_AUDIO_SDP_TIMESTAMP,
@@ -65,14 +48,6 @@ enum dp_catalog_audio_header_type {
};
struct dp_catalog {
- u32 aux_data;
- u32 total;
- u32 sync_start;
- u32 width_blanking;
- u32 dp_active;
- enum dp_catalog_audio_sdp_type sdp_type;
- enum dp_catalog_audio_header_type sdp_header;
- u32 audio_data;
bool wide_bus_en;
};
@@ -81,13 +56,14 @@ void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *d
/* AUX APIs */
u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog);
-int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog);
-int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog);
+int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog, u32 data);
+int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog, u32 data);
int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read);
int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog);
void dp_catalog_aux_reset(struct dp_catalog *dp_catalog);
void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable);
-int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog);
+int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog,
+ unsigned long wait_us);
u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog);
/* DP Controller APIs */
@@ -99,7 +75,7 @@ void dp_catalog_ctrl_psr_mainlink_enable(struct dp_catalog *dp_catalog, bool ena
void dp_catalog_setup_peripheral_flush(struct dp_catalog *dp_catalog);
void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, u32 cc, u32 tb);
void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, u32 rate,
- u32 stream_rate_khz, bool fixed_nvid, bool is_ycbcr_420);
+ u32 stream_rate_khz, bool is_ycbcr_420);
int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog, u32 pattern);
u32 dp_catalog_hw_revision(const struct dp_catalog *dp_catalog);
void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog);
@@ -124,7 +100,8 @@ void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog);
/* DP Panel APIs */
-int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog);
+int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog, u32 total,
+ u32 sync_start, u32 width_blanking, u32 dp_active);
void dp_catalog_panel_enable_vsc_sdp(struct dp_catalog *dp_catalog, struct dp_sdp *vsc_sdp);
void dp_catalog_panel_disable_vsc_sdp(struct dp_catalog *dp_catalog);
void dp_catalog_dump_regs(struct dp_catalog *dp_catalog);
@@ -135,12 +112,17 @@ void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog);
struct dp_catalog *dp_catalog_get(struct device *dev);
/* DP Audio APIs */
-void dp_catalog_audio_get_header(struct dp_catalog *catalog);
-void dp_catalog_audio_set_header(struct dp_catalog *catalog);
-void dp_catalog_audio_config_acr(struct dp_catalog *catalog);
-void dp_catalog_audio_enable(struct dp_catalog *catalog);
+u32 dp_catalog_audio_get_header(struct dp_catalog *dp_catalog,
+ enum dp_catalog_audio_sdp_type sdp,
+ enum dp_catalog_audio_header_type header);
+void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog,
+ enum dp_catalog_audio_sdp_type sdp,
+ enum dp_catalog_audio_header_type header,
+ u32 data);
+void dp_catalog_audio_config_acr(struct dp_catalog *catalog, u32 select);
+void dp_catalog_audio_enable(struct dp_catalog *catalog, bool enable);
void dp_catalog_audio_config_sdp(struct dp_catalog *catalog);
void dp_catalog_audio_init(struct dp_catalog *catalog);
-void dp_catalog_audio_sfe_level(struct dp_catalog *catalog);
+void dp_catalog_audio_sfe_level(struct dp_catalog *catalog, u32 safe_to_exit_level);
#endif /* _DP_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index c4dda1faef67..7bc8a9f0657a 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1052,14 +1052,14 @@ static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
if (ret)
return ret;
- if (voltage_swing_level >= DP_TRAIN_VOLTAGE_SWING_MAX) {
+ if (voltage_swing_level >= DP_TRAIN_LEVEL_MAX) {
drm_dbg_dp(ctrl->drm_dev,
"max. voltage swing level reached %d\n",
voltage_swing_level);
max_level_reached |= DP_TRAIN_MAX_SWING_REACHED;
}
- if (pre_emphasis_level >= DP_TRAIN_PRE_EMPHASIS_MAX) {
+ if (pre_emphasis_level >= DP_TRAIN_LEVEL_MAX) {
drm_dbg_dp(ctrl->drm_dev,
"max. pre-emphasis level reached %d\n",
pre_emphasis_level);
@@ -1150,7 +1150,7 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
}
if (ctrl->link->phy_params.v_level >=
- DP_TRAIN_VOLTAGE_SWING_MAX) {
+ DP_TRAIN_LEVEL_MAX) {
DRM_ERROR_RATELIMITED("max v_level reached\n");
return -EAGAIN;
}
@@ -1566,21 +1566,6 @@ void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl)
phy, phy->init_count, phy->power_count);
}
-static bool dp_ctrl_use_fixed_nvid(struct dp_ctrl_private *ctrl)
-{
- const u8 *dpcd = ctrl->panel->dpcd;
-
- /*
- * For better interop experience, used a fixed NVID=0x8000
- * whenever connected to a VGA dongle downstream.
- */
- if (drm_dp_is_branch(dpcd))
- return (drm_dp_has_quirk(&ctrl->panel->desc,
- DP_DPCD_QUIRK_CONSTANT_N));
-
- return false;
-}
-
static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl)
{
struct phy *phy = ctrl->phy;
@@ -2022,7 +2007,7 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
dp_catalog_ctrl_config_msa(ctrl->catalog,
ctrl->link->link_params.rate,
- pixel_rate_orig, dp_ctrl_use_fixed_nvid(ctrl),
+ pixel_rate_orig,
ctrl->panel->dp_mode.out_fmt_is_yuv_420);
dp_ctrl_setup_tr_unit(ctrl);
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
index fa014cee7e21..ffcbd9a25748 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -12,7 +12,6 @@
#include "dp_catalog.h"
struct dp_ctrl {
- atomic_t aborted;
bool wide_bus_en;
};
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index eca5a02f9003..b8611f6d2296 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -21,8 +21,6 @@ struct dp_debug_private {
struct dp_link *link;
struct dp_panel *panel;
struct drm_connector *connector;
-
- struct dp_debug dp_debug;
};
static int dp_debug_show(struct seq_file *seq, void *p)
@@ -199,10 +197,24 @@ static const struct file_operations test_active_fops = {
.write = dp_test_active_write
};
-static void dp_debug_init(struct dp_debug *dp_debug, struct dentry *root, bool is_edp)
+int dp_debug_init(struct device *dev, struct dp_panel *panel,
+ struct dp_link *link,
+ struct drm_connector *connector,
+ struct dentry *root, bool is_edp)
{
- struct dp_debug_private *debug = container_of(dp_debug,
- struct dp_debug_private, dp_debug);
+ struct dp_debug_private *debug;
+
+ if (!dev || !panel || !link) {
+ DRM_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+
+ debug = devm_kzalloc(dev, sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ return -ENOMEM;
+
+ debug->link = link;
+ debug->panel = panel;
debugfs_create_file("dp_debug", 0444, root,
debug, &dp_debug_fops);
@@ -220,41 +232,6 @@ static void dp_debug_init(struct dp_debug *dp_debug, struct dentry *root, bool i
root,
debug, &dp_test_type_fops);
}
-}
-struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
- struct dp_link *link,
- struct drm_connector *connector,
- struct dentry *root, bool is_edp)
-{
- struct dp_debug_private *debug;
- struct dp_debug *dp_debug;
- int rc;
-
- if (!dev || !panel || !link) {
- DRM_ERROR("invalid input\n");
- rc = -EINVAL;
- goto error;
- }
-
- debug = devm_kzalloc(dev, sizeof(*debug), GFP_KERNEL);
- if (!debug) {
- rc = -ENOMEM;
- goto error;
- }
-
- debug->dp_debug.debug_en = false;
- debug->link = link;
- debug->panel = panel;
-
- dp_debug = &debug->dp_debug;
- dp_debug->vdisplay = 0;
- dp_debug->hdisplay = 0;
- dp_debug->vrefresh = 0;
-
- dp_debug_init(dp_debug, root, is_edp);
-
- return dp_debug;
- error:
- return ERR_PTR(rc);
+ return 0;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h
index 9b3b2e702f65..7e1aa892fc09 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.h
+++ b/drivers/gpu/drm/msm/dp/dp_debug.h
@@ -9,22 +9,6 @@
#include "dp_panel.h"
#include "dp_link.h"
-/**
- * struct dp_debug
- * @debug_en: specifies whether debug mode enabled
- * @vdisplay: used to filter out vdisplay value
- * @hdisplay: used to filter out hdisplay value
- * @vrefresh: used to filter out vrefresh value
- * @tpg_state: specifies whether tpg feature is enabled
- */
-struct dp_debug {
- bool debug_en;
- int aspect_ratio;
- int vdisplay;
- int hdisplay;
- int vrefresh;
-};
-
#if defined(CONFIG_DEBUG_FS)
/**
@@ -41,22 +25,22 @@ struct dp_debug {
* This function sets up the debug module and provides a way
* for debugfs input to be communicated with existing modules
*/
-struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
- struct dp_link *link,
- struct drm_connector *connector,
- struct dentry *root,
- bool is_edp);
+int dp_debug_init(struct device *dev, struct dp_panel *panel,
+ struct dp_link *link,
+ struct drm_connector *connector,
+ struct dentry *root,
+ bool is_edp);
#else
static inline
-struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
- struct dp_link *link,
- struct drm_connector *connector,
- struct dentry *root,
- bool is_edp)
+int dp_debug_init(struct device *dev, struct dp_panel *panel,
+ struct dp_link *link,
+ struct drm_connector *connector,
+ struct dentry *root,
+ bool is_edp)
{
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
#endif /* defined(CONFIG_DEBUG_FS) */
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index c4cb82af5c2f..672a7ba52eda 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -74,7 +74,6 @@ struct dp_event {
};
struct dp_display_private {
- char *name;
int irq;
unsigned int id;
@@ -82,18 +81,15 @@ struct dp_display_private {
/* state variables */
bool core_initialized;
bool phy_initialized;
- bool hpd_irq_on;
bool audio_supported;
struct drm_device *drm_dev;
- struct dentry *root;
struct dp_catalog *catalog;
struct drm_dp_aux *aux;
struct dp_link *link;
struct dp_panel *panel;
struct dp_ctrl *ctrl;
- struct dp_debug *debug;
struct dp_display_mode dp_mode;
struct msm_dp dp_display;
@@ -119,55 +115,49 @@ struct dp_display_private {
struct msm_dp_desc {
phys_addr_t io_start;
unsigned int id;
- unsigned int connector_type;
bool wide_bus_supported;
};
static const struct msm_dp_desc sc7180_dp_descs[] = {
- { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
+ { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0 },
{}
};
static const struct msm_dp_desc sc7280_dp_descs[] = {
- { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true },
- { .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_1, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_supported = true },
+ { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
+ { .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true },
{}
};
static const struct msm_dp_desc sc8180x_dp_descs[] = {
- { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
- { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
- { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_eDP },
+ { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0 },
+ { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1 },
+ { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2 },
{}
};
static const struct msm_dp_desc sc8280xp_dp_descs[] = {
- { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true },
- { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true },
- { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true },
- { .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_3, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true },
- { .io_start = 0x22090000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true },
- { .io_start = 0x22098000, .id = MSM_DP_CONTROLLER_1, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true },
- { .io_start = 0x2209a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true },
- { .io_start = 0x220a0000, .id = MSM_DP_CONTROLLER_3, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true },
+ { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
+ { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true },
+ { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true },
+ { .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_3, .wide_bus_supported = true },
+ { .io_start = 0x22090000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
+ { .io_start = 0x22098000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true },
+ { .io_start = 0x2209a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true },
+ { .io_start = 0x220a0000, .id = MSM_DP_CONTROLLER_3, .wide_bus_supported = true },
{}
};
-static const struct msm_dp_desc sc8280xp_edp_descs[] = {
- { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_supported = true },
- { .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_3, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_supported = true },
- { .io_start = 0x2209a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_supported = true },
- { .io_start = 0x220a0000, .id = MSM_DP_CONTROLLER_3, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_supported = true },
- {}
-};
-
-static const struct msm_dp_desc sm8350_dp_descs[] = {
- { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
+static const struct msm_dp_desc sm8650_dp_descs[] = {
+ { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0 },
{}
};
-static const struct msm_dp_desc sm8650_dp_descs[] = {
- { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
+static const struct msm_dp_desc x1e80100_dp_descs[] = {
+ { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
+ { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true },
+ { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true },
+ { .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_3, .wide_bus_supported = true },
{}
};
@@ -178,10 +168,11 @@ static const struct of_device_id dp_dt_match[] = {
{ .compatible = "qcom,sc8180x-dp", .data = &sc8180x_dp_descs },
{ .compatible = "qcom,sc8180x-edp", .data = &sc8180x_dp_descs },
{ .compatible = "qcom,sc8280xp-dp", .data = &sc8280xp_dp_descs },
- { .compatible = "qcom,sc8280xp-edp", .data = &sc8280xp_edp_descs },
+ { .compatible = "qcom,sc8280xp-edp", .data = &sc8280xp_dp_descs },
{ .compatible = "qcom,sdm845-dp", .data = &sc7180_dp_descs },
- { .compatible = "qcom,sm8350-dp", .data = &sm8350_dp_descs },
+ { .compatible = "qcom,sm8350-dp", .data = &sc7180_dp_descs },
{ .compatible = "qcom,sm8650-dp", .data = &sm8650_dp_descs },
+ { .compatible = "qcom,x1e80100-dp", .data = &x1e80100_dp_descs },
{}
};
@@ -484,7 +475,7 @@ static void dp_display_handle_video_request(struct dp_display_private *dp)
}
}
-static int dp_display_handle_port_ststus_changed(struct dp_display_private *dp)
+static int dp_display_handle_port_status_changed(struct dp_display_private *dp)
{
int rc = 0;
@@ -541,7 +532,7 @@ static int dp_display_usbpd_attention_cb(struct device *dev)
drm_dbg_dp(dp->drm_dev, "hpd_state=%d sink_request=%d\n",
dp->hpd_state, sink_request);
if (sink_request & DS_PORT_STATUS_CHANGED)
- rc = dp_display_handle_port_ststus_changed(dp);
+ rc = dp_display_handle_port_status_changed(dp);
else
rc = dp_display_handle_irq_hpd(dp);
}
@@ -555,6 +546,8 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
int ret;
struct platform_device *pdev = dp->dp_display.pdev;
+ dp_aux_enable_xfers(dp->aux, true);
+
mutex_lock(&dp->event_mutex);
state = dp->hpd_state;
@@ -588,6 +581,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
ret = dp_display_usbpd_configure_cb(&pdev->dev);
if (ret) { /* link train failed */
dp->hpd_state = ST_DISCONNECTED;
+ pm_runtime_put_sync(&pdev->dev);
} else {
dp->hpd_state = ST_MAINLINK_READY;
}
@@ -619,6 +613,8 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
u32 state;
struct platform_device *pdev = dp->dp_display.pdev;
+ dp_aux_enable_xfers(dp->aux, false);
+
mutex_lock(&dp->event_mutex);
state = dp->hpd_state;
@@ -645,6 +641,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
dp_display_host_phy_exit(dp);
dp->hpd_state = ST_DISCONNECTED;
dp_display_notify_disconnect(&dp->dp_display.pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
mutex_unlock(&dp->event_mutex);
return 0;
}
@@ -726,6 +723,14 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
if (IS_ERR(phy))
return PTR_ERR(phy);
+ rc = phy_set_mode_ext(phy, PHY_MODE_DP,
+ dp->dp_display.is_edp ? PHY_SUBMODE_EDP : PHY_SUBMODE_DP);
+ if (rc) {
+ DRM_ERROR("failed to set phy submode, rc = %d\n", rc);
+ dp->catalog = NULL;
+ goto error;
+ }
+
dp->catalog = dp_catalog_get(dev);
if (IS_ERR(dp->catalog)) {
rc = PTR_ERR(dp->catalog);
@@ -801,7 +806,6 @@ static int dp_display_set_mode(struct msm_dp *dp_display,
drm_mode_copy(&dp->panel->dp_mode.drm_mode, &mode->drm_mode);
dp->panel->dp_mode.bpp = mode->bpp;
- dp->panel->dp_mode.capabilities = mode->capabilities;
dp->panel->dp_mode.out_fmt_is_yuv_420 = mode->out_fmt_is_yuv_420;
dp_panel_init_panel_info(dp->panel);
return 0;
@@ -1241,6 +1245,25 @@ static int dp_auxbus_done_probe(struct drm_dp_aux *aux)
return dp_display_probe_tail(aux->dev);
}
+static int dp_display_get_connector_type(struct platform_device *pdev,
+ const struct msm_dp_desc *desc)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *aux_bus = of_get_child_by_name(node, "aux-bus");
+ struct device_node *panel = of_get_child_by_name(aux_bus, "panel");
+ int connector_type;
+
+ if (panel)
+ connector_type = DRM_MODE_CONNECTOR_eDP;
+ else
+ connector_type = DRM_MODE_SUBCONNECTOR_DisplayPort;
+
+ of_node_put(panel);
+ of_node_put(aux_bus);
+
+ return connector_type;
+}
+
static int dp_display_probe(struct platform_device *pdev)
{
int rc = 0;
@@ -1261,9 +1284,8 @@ static int dp_display_probe(struct platform_device *pdev)
return -EINVAL;
dp->dp_display.pdev = pdev;
- dp->name = "drm_dp";
dp->id = desc->id;
- dp->dp_display.connector_type = desc->connector_type;
+ dp->dp_display.connector_type = dp_display_get_connector_type(pdev, desc);
dp->wide_bus_supported = desc->wide_bus_supported;
dp->dp_display.is_edp =
(dp->dp_display.connector_type == DRM_MODE_CONNECTOR_eDP);
@@ -1431,14 +1453,9 @@ void dp_display_debugfs_init(struct msm_dp *dp_display, struct dentry *root, boo
dp = container_of(dp_display, struct dp_display_private, dp_display);
dev = &dp->dp_display.pdev->dev;
- dp->debug = dp_debug_get(dev, dp->panel,
- dp->link, dp->dp_display.connector,
- root, is_edp);
- if (IS_ERR(dp->debug)) {
- rc = PTR_ERR(dp->debug);
+ rc = dp_debug_init(dev, dp->panel, dp->link, dp->dp_display.connector, root, is_edp);
+ if (rc)
DRM_ERROR("failed to initialize debug, rc = %d\n", rc);
- dp->debug = NULL;
- }
}
int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index 234dada88687..ec7fa67e0569 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -16,7 +16,6 @@ struct msm_dp {
struct drm_device *drm_dev;
struct platform_device *pdev;
struct device *codec_dev;
- struct drm_bridge *bridge;
struct drm_connector *connector;
struct drm_bridge *next_bridge;
bool link_ready;
@@ -28,8 +27,6 @@ struct msm_dp {
hdmi_codec_plugged_cb plugged_cb;
- bool wide_bus_en;
-
struct dp_audio *dp_audio;
bool psr_supported;
};
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index a819a4ff76a9..1b9be5bd97f1 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -347,8 +347,6 @@ int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
}
}
- dp_display->bridge = bridge;
-
return 0;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 49dfac1fd1ef..d8967615d84d 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -36,7 +36,6 @@ struct dp_link_request {
struct dp_link_private {
u32 prev_sink_count;
- struct device *dev;
struct drm_device *drm_dev;
struct drm_dp_aux *aux;
struct dp_link dp_link;
@@ -804,8 +803,6 @@ int dp_link_psm_config(struct dp_link *dp_link,
if (ret)
DRM_ERROR("Failed to %s low power mode\n", enable ?
"enter" : "exit");
- else
- dp_link->psm_enabled = enable;
mutex_unlock(&link->psm_mutex);
return ret;
@@ -1109,6 +1106,7 @@ int dp_link_get_colorimetry_config(struct dp_link *dp_link)
int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
{
int i;
+ u8 max_p_level;
int v_max = 0, p_max = 0;
struct dp_link_private *link;
@@ -1140,30 +1138,29 @@ int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
* Adjust the voltage swing and pre-emphasis level combination to within
* the allowable range.
*/
- if (dp_link->phy_params.v_level > DP_TRAIN_VOLTAGE_SWING_MAX) {
+ if (dp_link->phy_params.v_level > DP_TRAIN_LEVEL_MAX) {
drm_dbg_dp(link->drm_dev,
"Requested vSwingLevel=%d, change to %d\n",
dp_link->phy_params.v_level,
- DP_TRAIN_VOLTAGE_SWING_MAX);
- dp_link->phy_params.v_level = DP_TRAIN_VOLTAGE_SWING_MAX;
+ DP_TRAIN_LEVEL_MAX);
+ dp_link->phy_params.v_level = DP_TRAIN_LEVEL_MAX;
}
- if (dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_MAX) {
+ if (dp_link->phy_params.p_level > DP_TRAIN_LEVEL_MAX) {
drm_dbg_dp(link->drm_dev,
"Requested preEmphasisLevel=%d, change to %d\n",
dp_link->phy_params.p_level,
- DP_TRAIN_PRE_EMPHASIS_MAX);
- dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_MAX;
+ DP_TRAIN_LEVEL_MAX);
+ dp_link->phy_params.p_level = DP_TRAIN_LEVEL_MAX;
}
- if ((dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_LVL_1)
- && (dp_link->phy_params.v_level ==
- DP_TRAIN_VOLTAGE_SWING_LVL_2)) {
+ max_p_level = DP_TRAIN_LEVEL_MAX - dp_link->phy_params.v_level;
+ if (dp_link->phy_params.p_level > max_p_level) {
drm_dbg_dp(link->drm_dev,
"Requested preEmphasisLevel=%d, change to %d\n",
dp_link->phy_params.p_level,
- DP_TRAIN_PRE_EMPHASIS_LVL_1);
- dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_LVL_1;
+ max_p_level);
+ dp_link->phy_params.p_level = max_p_level;
}
drm_dbg_dp(link->drm_dev, "adjusted: v_level=%d, p_level=%d\n",
@@ -1226,7 +1223,6 @@ struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux)
if (!link)
return ERR_PTR(-ENOMEM);
- link->dev = dev;
link->aux = aux;
mutex_init(&link->psm_mutex);
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
index 83da170bc56b..5846337bb56f 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.h
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -19,19 +19,7 @@ struct dp_link_info {
unsigned long capabilities;
};
-enum dp_link_voltage_level {
- DP_TRAIN_VOLTAGE_SWING_LVL_0 = 0,
- DP_TRAIN_VOLTAGE_SWING_LVL_1 = 1,
- DP_TRAIN_VOLTAGE_SWING_LVL_2 = 2,
- DP_TRAIN_VOLTAGE_SWING_MAX = DP_TRAIN_VOLTAGE_SWING_LVL_2,
-};
-
-enum dp_link_preemaphasis_level {
- DP_TRAIN_PRE_EMPHASIS_LVL_0 = 0,
- DP_TRAIN_PRE_EMPHASIS_LVL_1 = 1,
- DP_TRAIN_PRE_EMPHASIS_LVL_2 = 2,
- DP_TRAIN_PRE_EMPHASIS_MAX = DP_TRAIN_PRE_EMPHASIS_LVL_2,
-};
+#define DP_TRAIN_LEVEL_MAX 3
struct dp_link_test_video {
u32 test_video_pattern;
@@ -74,7 +62,6 @@ struct dp_link_phy_params {
struct dp_link {
u32 sink_request;
u32 test_response;
- bool psm_enabled;
u8 sink_count;
struct dp_link_test_video test_video;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 8e7069453952..07db8f37cd06 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -353,6 +353,10 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel)
struct dp_catalog *catalog;
struct dp_panel_private *panel;
struct drm_display_mode *drm_mode;
+ u32 width_blanking;
+ u32 sync_start;
+ u32 dp_active;
+ u32 total;
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
catalog = panel->catalog;
@@ -376,13 +380,13 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel)
data <<= 16;
data |= total_hor;
- catalog->total = data;
+ total = data;
data = (drm_mode->vtotal - drm_mode->vsync_start);
data <<= 16;
data |= (drm_mode->htotal - drm_mode->hsync_start);
- catalog->sync_start = data;
+ sync_start = data;
data = drm_mode->vsync_end - drm_mode->vsync_start;
data <<= 16;
@@ -390,15 +394,15 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel)
data |= drm_mode->hsync_end - drm_mode->hsync_start;
data |= (panel->dp_panel.dp_mode.h_active_low << 15);
- catalog->width_blanking = data;
+ width_blanking = data;
data = drm_mode->vdisplay;
data <<= 16;
data |= drm_mode->hdisplay;
- catalog->dp_active = data;
+ dp_active = data;
- dp_catalog_panel_timing_cfg(catalog);
+ dp_catalog_panel_timing_cfg(catalog, total, sync_start, width_blanking, dp_active);
if (dp_panel->dp_mode.out_fmt_is_yuv_420)
dp_panel_setup_vsc_sdp_yuv_420(dp_panel);
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index e843f5062d1f..4ea42fa936ae 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -15,7 +15,6 @@ struct edid;
struct dp_display_mode {
struct drm_display_mode drm_mode;
- u32 capabilities;
u32 bpp;
u32 h_active_low;
u32 v_active_low;
@@ -40,7 +39,6 @@ struct dp_panel {
u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
struct dp_link_info link_info;
- struct drm_dp_desc desc;
struct edid *edid;
struct drm_connector *connector;
struct dp_display_mode dp_mode;
@@ -48,7 +46,6 @@ struct dp_panel {
bool video_test;
bool vsc_sdp_supported;
- u32 vic;
u32 max_dp_lanes;
u32 max_dp_link_rate;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 37c4c07005fe..efd7c23b662f 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -120,6 +120,22 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
struct msm_drm_private *priv = dev_get_drvdata(master);
struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
+ /*
+ * Next bridge doesn't exist for the secondary DSI host in a bonded
+ * pair.
+ */
+ if (!msm_dsi_is_bonded_dsi(msm_dsi) ||
+ msm_dsi_is_master_dsi(msm_dsi)) {
+ struct drm_bridge *ext_bridge;
+
+ ext_bridge = devm_drm_of_get_bridge(&msm_dsi->pdev->dev,
+ msm_dsi->pdev->dev.of_node, 1, 0);
+ if (IS_ERR(ext_bridge))
+ return PTR_ERR(ext_bridge);
+
+ msm_dsi->next_bridge = ext_bridge;
+ }
+
priv->dsi[msm_dsi->id] = msm_dsi;
return 0;
@@ -216,7 +232,6 @@ void __exit msm_dsi_unregister(void)
int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
struct drm_encoder *encoder)
{
- struct drm_bridge *bridge;
int ret;
msm_dsi->dev = dev;
@@ -236,14 +251,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
return 0;
}
- bridge = msm_dsi_manager_bridge_init(msm_dsi, encoder);
- if (IS_ERR(bridge)) {
- ret = PTR_ERR(bridge);
- DRM_DEV_ERROR(dev->dev, "failed to create dsi bridge: %d\n", ret);
- return ret;
- }
-
- ret = msm_dsi_manager_ext_bridge_init(msm_dsi->id, bridge);
+ ret = msm_dsi_manager_connector_init(msm_dsi, encoder);
if (ret) {
DRM_DEV_ERROR(dev->dev,
"failed to create dsi connector: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 2ad9a842c678..afc290408ba4 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -38,6 +38,8 @@ struct msm_dsi {
struct mipi_dsi_host *host;
struct msm_dsi_phy *phy;
+ struct drm_bridge *next_bridge;
+
struct device *phy_dev;
bool phy_enabled;
@@ -45,9 +47,8 @@ struct msm_dsi {
};
/* dsi manager */
-struct drm_bridge *msm_dsi_manager_bridge_init(struct msm_dsi *msm_dsi,
- struct drm_encoder *encoder);
-int msm_dsi_manager_ext_bridge_init(u8 id, struct drm_bridge *int_bridge);
+int msm_dsi_manager_connector_init(struct msm_dsi *msm_dsi,
+ struct drm_encoder *encoder);
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
deleted file mode 100644
index 2a7d980e12c3..000000000000
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ /dev/null
@@ -1,790 +0,0 @@
-#ifndef DSI_XML
-#define DSI_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
-
-The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42)
-
-Copyright (C) 2013-2022 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/
-
-
-enum dsi_traffic_mode {
- NON_BURST_SYNCH_PULSE = 0,
- NON_BURST_SYNCH_EVENT = 1,
- BURST_MODE = 2,
-};
-
-enum dsi_vid_dst_format {
- VID_DST_FORMAT_RGB565 = 0,
- VID_DST_FORMAT_RGB666 = 1,
- VID_DST_FORMAT_RGB666_LOOSE = 2,
- VID_DST_FORMAT_RGB888 = 3,
-};
-
-enum dsi_rgb_swap {
- SWAP_RGB = 0,
- SWAP_RBG = 1,
- SWAP_BGR = 2,
- SWAP_BRG = 3,
- SWAP_GRB = 4,
- SWAP_GBR = 5,
-};
-
-enum dsi_cmd_trigger {
- TRIGGER_NONE = 0,
- TRIGGER_SEOF = 1,
- TRIGGER_TE = 2,
- TRIGGER_SW = 4,
- TRIGGER_SW_SEOF = 5,
- TRIGGER_SW_TE = 6,
-};
-
-enum dsi_cmd_dst_format {
- CMD_DST_FORMAT_RGB111 = 0,
- CMD_DST_FORMAT_RGB332 = 3,
- CMD_DST_FORMAT_RGB444 = 4,
- CMD_DST_FORMAT_RGB565 = 6,
- CMD_DST_FORMAT_RGB666 = 7,
- CMD_DST_FORMAT_RGB888 = 8,
-};
-
-enum dsi_lane_swap {
- LANE_SWAP_0123 = 0,
- LANE_SWAP_3012 = 1,
- LANE_SWAP_2301 = 2,
- LANE_SWAP_1230 = 3,
- LANE_SWAP_0321 = 4,
- LANE_SWAP_1032 = 5,
- LANE_SWAP_2103 = 6,
- LANE_SWAP_3210 = 7,
-};
-
-enum video_config_bpp {
- VIDEO_CONFIG_18BPP = 0,
- VIDEO_CONFIG_24BPP = 1,
-};
-
-enum video_pattern_sel {
- VID_PRBS = 0,
- VID_INCREMENTAL = 1,
- VID_FIXED = 2,
- VID_MDSS_GENERAL_PATTERN = 3,
-};
-
-enum cmd_mdp_stream0_pattern_sel {
- CMD_MDP_PRBS = 0,
- CMD_MDP_INCREMENTAL = 1,
- CMD_MDP_FIXED = 2,
- CMD_MDP_MDSS_GENERAL_PATTERN = 3,
-};
-
-enum cmd_dma_pattern_sel {
- CMD_DMA_PRBS = 0,
- CMD_DMA_INCREMENTAL = 1,
- CMD_DMA_FIXED = 2,
- CMD_DMA_CUSTOM_PATTERN_DMA_FIFO = 3,
-};
-
-#define DSI_IRQ_CMD_DMA_DONE 0x00000001
-#define DSI_IRQ_MASK_CMD_DMA_DONE 0x00000002
-#define DSI_IRQ_CMD_MDP_DONE 0x00000100
-#define DSI_IRQ_MASK_CMD_MDP_DONE 0x00000200
-#define DSI_IRQ_VIDEO_DONE 0x00010000
-#define DSI_IRQ_MASK_VIDEO_DONE 0x00020000
-#define DSI_IRQ_BTA_DONE 0x00100000
-#define DSI_IRQ_MASK_BTA_DONE 0x00200000
-#define DSI_IRQ_ERROR 0x01000000
-#define DSI_IRQ_MASK_ERROR 0x02000000
-#define REG_DSI_6G_HW_VERSION 0x00000000
-#define DSI_6G_HW_VERSION_MAJOR__MASK 0xf0000000
-#define DSI_6G_HW_VERSION_MAJOR__SHIFT 28
-static inline uint32_t DSI_6G_HW_VERSION_MAJOR(uint32_t val)
-{
- return ((val) << DSI_6G_HW_VERSION_MAJOR__SHIFT) & DSI_6G_HW_VERSION_MAJOR__MASK;
-}
-#define DSI_6G_HW_VERSION_MINOR__MASK 0x0fff0000
-#define DSI_6G_HW_VERSION_MINOR__SHIFT 16
-static inline uint32_t DSI_6G_HW_VERSION_MINOR(uint32_t val)
-{
- return ((val) << DSI_6G_HW_VERSION_MINOR__SHIFT) & DSI_6G_HW_VERSION_MINOR__MASK;
-}
-#define DSI_6G_HW_VERSION_STEP__MASK 0x0000ffff
-#define DSI_6G_HW_VERSION_STEP__SHIFT 0
-static inline uint32_t DSI_6G_HW_VERSION_STEP(uint32_t val)
-{
- return ((val) << DSI_6G_HW_VERSION_STEP__SHIFT) & DSI_6G_HW_VERSION_STEP__MASK;
-}
-
-#define REG_DSI_CTRL 0x00000000
-#define DSI_CTRL_ENABLE 0x00000001
-#define DSI_CTRL_VID_MODE_EN 0x00000002
-#define DSI_CTRL_CMD_MODE_EN 0x00000004
-#define DSI_CTRL_LANE0 0x00000010
-#define DSI_CTRL_LANE1 0x00000020
-#define DSI_CTRL_LANE2 0x00000040
-#define DSI_CTRL_LANE3 0x00000080
-#define DSI_CTRL_CLK_EN 0x00000100
-#define DSI_CTRL_ECC_CHECK 0x00100000
-#define DSI_CTRL_CRC_CHECK 0x01000000
-
-#define REG_DSI_STATUS0 0x00000004
-#define DSI_STATUS0_CMD_MODE_ENGINE_BUSY 0x00000001
-#define DSI_STATUS0_CMD_MODE_DMA_BUSY 0x00000002
-#define DSI_STATUS0_CMD_MODE_MDP_BUSY 0x00000004
-#define DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY 0x00000008
-#define DSI_STATUS0_DSI_BUSY 0x00000010
-#define DSI_STATUS0_INTERLEAVE_OP_CONTENTION 0x80000000
-
-#define REG_DSI_FIFO_STATUS 0x00000008
-#define DSI_FIFO_STATUS_VIDEO_MDP_FIFO_OVERFLOW 0x00000001
-#define DSI_FIFO_STATUS_VIDEO_MDP_FIFO_UNDERFLOW 0x00000008
-#define DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW 0x00000080
-#define DSI_FIFO_STATUS_CMD_DMA_FIFO_RD_WATERMARK_REACH 0x00000100
-#define DSI_FIFO_STATUS_CMD_DMA_FIFO_WR_WATERMARK_REACH 0x00000200
-#define DSI_FIFO_STATUS_CMD_DMA_FIFO_UNDERFLOW 0x00000400
-#define DSI_FIFO_STATUS_DLN0_LP_FIFO_EMPTY 0x00001000
-#define DSI_FIFO_STATUS_DLN0_LP_FIFO_FULL 0x00002000
-#define DSI_FIFO_STATUS_DLN0_LP_FIFO_OVERFLOW 0x00004000
-#define DSI_FIFO_STATUS_DLN0_HS_FIFO_EMPTY 0x00010000
-#define DSI_FIFO_STATUS_DLN0_HS_FIFO_FULL 0x00020000
-#define DSI_FIFO_STATUS_DLN0_HS_FIFO_OVERFLOW 0x00040000
-#define DSI_FIFO_STATUS_DLN0_HS_FIFO_UNDERFLOW 0x00080000
-#define DSI_FIFO_STATUS_DLN1_HS_FIFO_EMPTY 0x00100000
-#define DSI_FIFO_STATUS_DLN1_HS_FIFO_FULL 0x00200000
-#define DSI_FIFO_STATUS_DLN1_HS_FIFO_OVERFLOW 0x00400000
-#define DSI_FIFO_STATUS_DLN1_HS_FIFO_UNDERFLOW 0x00800000
-#define DSI_FIFO_STATUS_DLN2_HS_FIFO_EMPTY 0x01000000
-#define DSI_FIFO_STATUS_DLN2_HS_FIFO_FULL 0x02000000
-#define DSI_FIFO_STATUS_DLN2_HS_FIFO_OVERFLOW 0x04000000
-#define DSI_FIFO_STATUS_DLN2_HS_FIFO_UNDERFLOW 0x08000000
-#define DSI_FIFO_STATUS_DLN3_HS_FIFO_EMPTY 0x10000000
-#define DSI_FIFO_STATUS_DLN3_HS_FIFO_FULL 0x20000000
-#define DSI_FIFO_STATUS_DLN3_HS_FIFO_OVERFLOW 0x40000000
-#define DSI_FIFO_STATUS_DLN3_HS_FIFO_UNDERFLOW 0x80000000
-
-#define REG_DSI_VID_CFG0 0x0000000c
-#define DSI_VID_CFG0_VIRT_CHANNEL__MASK 0x00000003
-#define DSI_VID_CFG0_VIRT_CHANNEL__SHIFT 0
-static inline uint32_t DSI_VID_CFG0_VIRT_CHANNEL(uint32_t val)
-{
- return ((val) << DSI_VID_CFG0_VIRT_CHANNEL__SHIFT) & DSI_VID_CFG0_VIRT_CHANNEL__MASK;
-}
-#define DSI_VID_CFG0_DST_FORMAT__MASK 0x00000030
-#define DSI_VID_CFG0_DST_FORMAT__SHIFT 4
-static inline uint32_t DSI_VID_CFG0_DST_FORMAT(enum dsi_vid_dst_format val)
-{
- return ((val) << DSI_VID_CFG0_DST_FORMAT__SHIFT) & DSI_VID_CFG0_DST_FORMAT__MASK;
-}
-#define DSI_VID_CFG0_TRAFFIC_MODE__MASK 0x00000300
-#define DSI_VID_CFG0_TRAFFIC_MODE__SHIFT 8
-static inline uint32_t DSI_VID_CFG0_TRAFFIC_MODE(enum dsi_traffic_mode val)
-{
- return ((val) << DSI_VID_CFG0_TRAFFIC_MODE__SHIFT) & DSI_VID_CFG0_TRAFFIC_MODE__MASK;
-}
-#define DSI_VID_CFG0_BLLP_POWER_STOP 0x00001000
-#define DSI_VID_CFG0_EOF_BLLP_POWER_STOP 0x00008000
-#define DSI_VID_CFG0_HSA_POWER_STOP 0x00010000
-#define DSI_VID_CFG0_HBP_POWER_STOP 0x00100000
-#define DSI_VID_CFG0_HFP_POWER_STOP 0x01000000
-#define DSI_VID_CFG0_PULSE_MODE_HSA_HE 0x10000000
-
-#define REG_DSI_VID_CFG1 0x0000001c
-#define DSI_VID_CFG1_R_SEL 0x00000001
-#define DSI_VID_CFG1_G_SEL 0x00000010
-#define DSI_VID_CFG1_B_SEL 0x00000100
-#define DSI_VID_CFG1_RGB_SWAP__MASK 0x00007000
-#define DSI_VID_CFG1_RGB_SWAP__SHIFT 12
-static inline uint32_t DSI_VID_CFG1_RGB_SWAP(enum dsi_rgb_swap val)
-{
- return ((val) << DSI_VID_CFG1_RGB_SWAP__SHIFT) & DSI_VID_CFG1_RGB_SWAP__MASK;
-}
-
-#define REG_DSI_ACTIVE_H 0x00000020
-#define DSI_ACTIVE_H_START__MASK 0x00000fff
-#define DSI_ACTIVE_H_START__SHIFT 0
-static inline uint32_t DSI_ACTIVE_H_START(uint32_t val)
-{
- return ((val) << DSI_ACTIVE_H_START__SHIFT) & DSI_ACTIVE_H_START__MASK;
-}
-#define DSI_ACTIVE_H_END__MASK 0x0fff0000
-#define DSI_ACTIVE_H_END__SHIFT 16
-static inline uint32_t DSI_ACTIVE_H_END(uint32_t val)
-{
- return ((val) << DSI_ACTIVE_H_END__SHIFT) & DSI_ACTIVE_H_END__MASK;
-}
-
-#define REG_DSI_ACTIVE_V 0x00000024
-#define DSI_ACTIVE_V_START__MASK 0x00000fff
-#define DSI_ACTIVE_V_START__SHIFT 0
-static inline uint32_t DSI_ACTIVE_V_START(uint32_t val)
-{
- return ((val) << DSI_ACTIVE_V_START__SHIFT) & DSI_ACTIVE_V_START__MASK;
-}
-#define DSI_ACTIVE_V_END__MASK 0x0fff0000
-#define DSI_ACTIVE_V_END__SHIFT 16
-static inline uint32_t DSI_ACTIVE_V_END(uint32_t val)
-{
- return ((val) << DSI_ACTIVE_V_END__SHIFT) & DSI_ACTIVE_V_END__MASK;
-}
-
-#define REG_DSI_TOTAL 0x00000028
-#define DSI_TOTAL_H_TOTAL__MASK 0x00000fff
-#define DSI_TOTAL_H_TOTAL__SHIFT 0
-static inline uint32_t DSI_TOTAL_H_TOTAL(uint32_t val)
-{
- return ((val) << DSI_TOTAL_H_TOTAL__SHIFT) & DSI_TOTAL_H_TOTAL__MASK;
-}
-#define DSI_TOTAL_V_TOTAL__MASK 0x0fff0000
-#define DSI_TOTAL_V_TOTAL__SHIFT 16
-static inline uint32_t DSI_TOTAL_V_TOTAL(uint32_t val)
-{
- return ((val) << DSI_TOTAL_V_TOTAL__SHIFT) & DSI_TOTAL_V_TOTAL__MASK;
-}
-
-#define REG_DSI_ACTIVE_HSYNC 0x0000002c
-#define DSI_ACTIVE_HSYNC_START__MASK 0x00000fff
-#define DSI_ACTIVE_HSYNC_START__SHIFT 0
-static inline uint32_t DSI_ACTIVE_HSYNC_START(uint32_t val)
-{
- return ((val) << DSI_ACTIVE_HSYNC_START__SHIFT) & DSI_ACTIVE_HSYNC_START__MASK;
-}
-#define DSI_ACTIVE_HSYNC_END__MASK 0x0fff0000
-#define DSI_ACTIVE_HSYNC_END__SHIFT 16
-static inline uint32_t DSI_ACTIVE_HSYNC_END(uint32_t val)
-{
- return ((val) << DSI_ACTIVE_HSYNC_END__SHIFT) & DSI_ACTIVE_HSYNC_END__MASK;
-}
-
-#define REG_DSI_ACTIVE_VSYNC_HPOS 0x00000030
-#define DSI_ACTIVE_VSYNC_HPOS_START__MASK 0x00000fff
-#define DSI_ACTIVE_VSYNC_HPOS_START__SHIFT 0
-static inline uint32_t DSI_ACTIVE_VSYNC_HPOS_START(uint32_t val)
-{
- return ((val) << DSI_ACTIVE_VSYNC_HPOS_START__SHIFT) & DSI_ACTIVE_VSYNC_HPOS_START__MASK;
-}
-#define DSI_ACTIVE_VSYNC_HPOS_END__MASK 0x0fff0000
-#define DSI_ACTIVE_VSYNC_HPOS_END__SHIFT 16
-static inline uint32_t DSI_ACTIVE_VSYNC_HPOS_END(uint32_t val)
-{
- return ((val) << DSI_ACTIVE_VSYNC_HPOS_END__SHIFT) & DSI_ACTIVE_VSYNC_HPOS_END__MASK;
-}
-
-#define REG_DSI_ACTIVE_VSYNC_VPOS 0x00000034
-#define DSI_ACTIVE_VSYNC_VPOS_START__MASK 0x00000fff
-#define DSI_ACTIVE_VSYNC_VPOS_START__SHIFT 0
-static inline uint32_t DSI_ACTIVE_VSYNC_VPOS_START(uint32_t val)
-{
- return ((val) << DSI_ACTIVE_VSYNC_VPOS_START__SHIFT) & DSI_ACTIVE_VSYNC_VPOS_START__MASK;
-}
-#define DSI_ACTIVE_VSYNC_VPOS_END__MASK 0x0fff0000
-#define DSI_ACTIVE_VSYNC_VPOS_END__SHIFT 16
-static inline uint32_t DSI_ACTIVE_VSYNC_VPOS_END(uint32_t val)
-{
- return ((val) << DSI_ACTIVE_VSYNC_VPOS_END__SHIFT) & DSI_ACTIVE_VSYNC_VPOS_END__MASK;
-}
-
-#define REG_DSI_CMD_DMA_CTRL 0x00000038
-#define DSI_CMD_DMA_CTRL_BROADCAST_EN 0x80000000
-#define DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER 0x10000000
-#define DSI_CMD_DMA_CTRL_LOW_POWER 0x04000000
-
-#define REG_DSI_CMD_CFG0 0x0000003c
-#define DSI_CMD_CFG0_DST_FORMAT__MASK 0x0000000f
-#define DSI_CMD_CFG0_DST_FORMAT__SHIFT 0
-static inline uint32_t DSI_CMD_CFG0_DST_FORMAT(enum dsi_cmd_dst_format val)
-{
- return ((val) << DSI_CMD_CFG0_DST_FORMAT__SHIFT) & DSI_CMD_CFG0_DST_FORMAT__MASK;
-}
-#define DSI_CMD_CFG0_R_SEL 0x00000010
-#define DSI_CMD_CFG0_G_SEL 0x00000100
-#define DSI_CMD_CFG0_B_SEL 0x00001000
-#define DSI_CMD_CFG0_INTERLEAVE_MAX__MASK 0x00f00000
-#define DSI_CMD_CFG0_INTERLEAVE_MAX__SHIFT 20
-static inline uint32_t DSI_CMD_CFG0_INTERLEAVE_MAX(uint32_t val)
-{
- return ((val) << DSI_CMD_CFG0_INTERLEAVE_MAX__SHIFT) & DSI_CMD_CFG0_INTERLEAVE_MAX__MASK;
-}
-#define DSI_CMD_CFG0_RGB_SWAP__MASK 0x00070000
-#define DSI_CMD_CFG0_RGB_SWAP__SHIFT 16
-static inline uint32_t DSI_CMD_CFG0_RGB_SWAP(enum dsi_rgb_swap val)
-{
- return ((val) << DSI_CMD_CFG0_RGB_SWAP__SHIFT) & DSI_CMD_CFG0_RGB_SWAP__MASK;
-}
-
-#define REG_DSI_CMD_CFG1 0x00000040
-#define DSI_CMD_CFG1_WR_MEM_START__MASK 0x000000ff
-#define DSI_CMD_CFG1_WR_MEM_START__SHIFT 0
-static inline uint32_t DSI_CMD_CFG1_WR_MEM_START(uint32_t val)
-{
- return ((val) << DSI_CMD_CFG1_WR_MEM_START__SHIFT) & DSI_CMD_CFG1_WR_MEM_START__MASK;
-}
-#define DSI_CMD_CFG1_WR_MEM_CONTINUE__MASK 0x0000ff00
-#define DSI_CMD_CFG1_WR_MEM_CONTINUE__SHIFT 8
-static inline uint32_t DSI_CMD_CFG1_WR_MEM_CONTINUE(uint32_t val)
-{
- return ((val) << DSI_CMD_CFG1_WR_MEM_CONTINUE__SHIFT) & DSI_CMD_CFG1_WR_MEM_CONTINUE__MASK;
-}
-#define DSI_CMD_CFG1_INSERT_DCS_COMMAND 0x00010000
-
-#define REG_DSI_DMA_BASE 0x00000044
-
-#define REG_DSI_DMA_LEN 0x00000048
-
-#define REG_DSI_CMD_MDP_STREAM0_CTRL 0x00000054
-#define DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE__MASK 0x0000003f
-#define DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE__SHIFT 0
-static inline uint32_t DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE(uint32_t val)
-{
- return ((val) << DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE__MASK;
-}
-#define DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL__MASK 0x00000300
-#define DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL__SHIFT 8
-static inline uint32_t DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL(uint32_t val)
-{
- return ((val) << DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL__MASK;
-}
-#define DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT__MASK 0xffff0000
-#define DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT__SHIFT 16
-static inline uint32_t DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT(uint32_t val)
-{
- return ((val) << DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT__MASK;
-}
-
-#define REG_DSI_CMD_MDP_STREAM0_TOTAL 0x00000058
-#define DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL__MASK 0x00000fff
-#define DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL__SHIFT 0
-static inline uint32_t DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL(uint32_t val)
-{
- return ((val) << DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL__MASK;
-}
-#define DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL__MASK 0x0fff0000
-#define DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL__SHIFT 16
-static inline uint32_t DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL(uint32_t val)
-{
- return ((val) << DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL__MASK;
-}
-
-#define REG_DSI_CMD_MDP_STREAM1_CTRL 0x0000005c
-#define DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE__MASK 0x0000003f
-#define DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE__SHIFT 0
-static inline uint32_t DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE(uint32_t val)
-{
- return ((val) << DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE__MASK;
-}
-#define DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL__MASK 0x00000300
-#define DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL__SHIFT 8
-static inline uint32_t DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL(uint32_t val)
-{
- return ((val) << DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL__MASK;
-}
-#define DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT__MASK 0xffff0000
-#define DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT__SHIFT 16
-static inline uint32_t DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT(uint32_t val)
-{
- return ((val) << DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT__MASK;
-}
-
-#define REG_DSI_CMD_MDP_STREAM1_TOTAL 0x00000060
-#define DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL__MASK 0x0000ffff
-#define DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL__SHIFT 0
-static inline uint32_t DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL(uint32_t val)
-{
- return ((val) << DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL__MASK;
-}
-#define DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL__MASK 0xffff0000
-#define DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL__SHIFT 16
-static inline uint32_t DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL(uint32_t val)
-{
- return ((val) << DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL__MASK;
-}
-
-#define REG_DSI_ACK_ERR_STATUS 0x00000064
-
-static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; }
-
-static inline uint32_t REG_DSI_RDBK_DATA(uint32_t i0) { return 0x00000068 + 0x4*i0; }
-
-#define REG_DSI_TRIG_CTRL 0x00000080
-#define DSI_TRIG_CTRL_DMA_TRIGGER__MASK 0x00000007
-#define DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT 0
-static inline uint32_t DSI_TRIG_CTRL_DMA_TRIGGER(enum dsi_cmd_trigger val)
-{
- return ((val) << DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT) & DSI_TRIG_CTRL_DMA_TRIGGER__MASK;
-}
-#define DSI_TRIG_CTRL_MDP_TRIGGER__MASK 0x00000070
-#define DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT 4
-static inline uint32_t DSI_TRIG_CTRL_MDP_TRIGGER(enum dsi_cmd_trigger val)
-{
- return ((val) << DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT) & DSI_TRIG_CTRL_MDP_TRIGGER__MASK;
-}
-#define DSI_TRIG_CTRL_STREAM__MASK 0x00000300
-#define DSI_TRIG_CTRL_STREAM__SHIFT 8
-static inline uint32_t DSI_TRIG_CTRL_STREAM(uint32_t val)
-{
- return ((val) << DSI_TRIG_CTRL_STREAM__SHIFT) & DSI_TRIG_CTRL_STREAM__MASK;
-}
-#define DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME 0x00001000
-#define DSI_TRIG_CTRL_TE 0x80000000
-
-#define REG_DSI_TRIG_DMA 0x0000008c
-
-#define REG_DSI_DLN0_PHY_ERR 0x000000b0
-#define DSI_DLN0_PHY_ERR_DLN0_ERR_ESC 0x00000001
-#define DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC 0x00000010
-#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL 0x00000100
-#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 0x00001000
-#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1 0x00010000
-
-#define REG_DSI_LP_TIMER_CTRL 0x000000b4
-#define DSI_LP_TIMER_CTRL_LP_RX_TO__MASK 0x0000ffff
-#define DSI_LP_TIMER_CTRL_LP_RX_TO__SHIFT 0
-static inline uint32_t DSI_LP_TIMER_CTRL_LP_RX_TO(uint32_t val)
-{
- return ((val) << DSI_LP_TIMER_CTRL_LP_RX_TO__SHIFT) & DSI_LP_TIMER_CTRL_LP_RX_TO__MASK;
-}
-#define DSI_LP_TIMER_CTRL_BTA_TO__MASK 0xffff0000
-#define DSI_LP_TIMER_CTRL_BTA_TO__SHIFT 16
-static inline uint32_t DSI_LP_TIMER_CTRL_BTA_TO(uint32_t val)
-{
- return ((val) << DSI_LP_TIMER_CTRL_BTA_TO__SHIFT) & DSI_LP_TIMER_CTRL_BTA_TO__MASK;
-}
-
-#define REG_DSI_HS_TIMER_CTRL 0x000000b8
-#define DSI_HS_TIMER_CTRL_HS_TX_TO__MASK 0x0000ffff
-#define DSI_HS_TIMER_CTRL_HS_TX_TO__SHIFT 0
-static inline uint32_t DSI_HS_TIMER_CTRL_HS_TX_TO(uint32_t val)
-{
- return ((val) << DSI_HS_TIMER_CTRL_HS_TX_TO__SHIFT) & DSI_HS_TIMER_CTRL_HS_TX_TO__MASK;
-}
-#define DSI_HS_TIMER_CTRL_TIMER_RESOLUTION__MASK 0x000f0000
-#define DSI_HS_TIMER_CTRL_TIMER_RESOLUTION__SHIFT 16
-static inline uint32_t DSI_HS_TIMER_CTRL_TIMER_RESOLUTION(uint32_t val)
-{
- return ((val) << DSI_HS_TIMER_CTRL_TIMER_RESOLUTION__SHIFT) & DSI_HS_TIMER_CTRL_TIMER_RESOLUTION__MASK;
-}
-#define DSI_HS_TIMER_CTRL_HS_TX_TO_STOP_EN 0x10000000
-
-#define REG_DSI_TIMEOUT_STATUS 0x000000bc
-
-#define REG_DSI_CLKOUT_TIMING_CTRL 0x000000c0
-#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK 0x0000003f
-#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT 0
-static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(uint32_t val)
-{
- return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK;
-}
-#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK 0x00003f00
-#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT 8
-static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
-{
- return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK;
-}
-
-#define REG_DSI_EOT_PACKET_CTRL 0x000000c8
-#define DSI_EOT_PACKET_CTRL_TX_EOT_APPEND 0x00000001
-#define DSI_EOT_PACKET_CTRL_RX_EOT_IGNORE 0x00000010
-
-#define REG_DSI_LANE_STATUS 0x000000a4
-#define DSI_LANE_STATUS_DLN0_STOPSTATE 0x00000001
-#define DSI_LANE_STATUS_DLN1_STOPSTATE 0x00000002
-#define DSI_LANE_STATUS_DLN2_STOPSTATE 0x00000004
-#define DSI_LANE_STATUS_DLN3_STOPSTATE 0x00000008
-#define DSI_LANE_STATUS_CLKLN_STOPSTATE 0x00000010
-#define DSI_LANE_STATUS_DLN0_ULPS_ACTIVE_NOT 0x00000100
-#define DSI_LANE_STATUS_DLN1_ULPS_ACTIVE_NOT 0x00000200
-#define DSI_LANE_STATUS_DLN2_ULPS_ACTIVE_NOT 0x00000400
-#define DSI_LANE_STATUS_DLN3_ULPS_ACTIVE_NOT 0x00000800
-#define DSI_LANE_STATUS_CLKLN_ULPS_ACTIVE_NOT 0x00001000
-#define DSI_LANE_STATUS_DLN0_DIRECTION 0x00010000
-
-#define REG_DSI_LANE_CTRL 0x000000a8
-#define DSI_LANE_CTRL_HS_REQ_SEL_PHY 0x01000000
-#define DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST 0x10000000
-
-#define REG_DSI_LANE_SWAP_CTRL 0x000000ac
-#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__MASK 0x00000007
-#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__SHIFT 0
-static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val)
-{
- return ((val) << DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__SHIFT) & DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__MASK;
-}
-
-#define REG_DSI_ERR_INT_MASK0 0x00000108
-
-#define REG_DSI_INTR_CTRL 0x0000010c
-
-#define REG_DSI_RESET 0x00000114
-
-#define REG_DSI_CLK_CTRL 0x00000118
-#define DSI_CLK_CTRL_AHBS_HCLK_ON 0x00000001
-#define DSI_CLK_CTRL_AHBM_SCLK_ON 0x00000002
-#define DSI_CLK_CTRL_PCLK_ON 0x00000004
-#define DSI_CLK_CTRL_DSICLK_ON 0x00000008
-#define DSI_CLK_CTRL_BYTECLK_ON 0x00000010
-#define DSI_CLK_CTRL_ESCCLK_ON 0x00000020
-#define DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK 0x00000200
-
-#define REG_DSI_CLK_STATUS 0x0000011c
-#define DSI_CLK_STATUS_DSI_AON_AHBM_HCLK_ACTIVE 0x00000001
-#define DSI_CLK_STATUS_DSI_DYN_AHBM_HCLK_ACTIVE 0x00000002
-#define DSI_CLK_STATUS_DSI_AON_AHBS_HCLK_ACTIVE 0x00000004
-#define DSI_CLK_STATUS_DSI_DYN_AHBS_HCLK_ACTIVE 0x00000008
-#define DSI_CLK_STATUS_DSI_AON_DSICLK_ACTIVE 0x00000010
-#define DSI_CLK_STATUS_DSI_DYN_DSICLK_ACTIVE 0x00000020
-#define DSI_CLK_STATUS_DSI_AON_BYTECLK_ACTIVE 0x00000040
-#define DSI_CLK_STATUS_DSI_DYN_BYTECLK_ACTIVE 0x00000080
-#define DSI_CLK_STATUS_DSI_AON_ESCCLK_ACTIVE 0x00000100
-#define DSI_CLK_STATUS_DSI_AON_PCLK_ACTIVE 0x00000200
-#define DSI_CLK_STATUS_DSI_DYN_PCLK_ACTIVE 0x00000400
-#define DSI_CLK_STATUS_DSI_DYN_CMD_PCLK_ACTIVE 0x00001000
-#define DSI_CLK_STATUS_DSI_CMD_PCLK_ACTIVE 0x00002000
-#define DSI_CLK_STATUS_DSI_VID_PCLK_ACTIVE 0x00004000
-#define DSI_CLK_STATUS_DSI_CAM_BIST_PCLK_ACT 0x00008000
-#define DSI_CLK_STATUS_PLL_UNLOCKED 0x00010000
-
-#define REG_DSI_PHY_RESET 0x00000128
-#define DSI_PHY_RESET_RESET 0x00000001
-
-#define REG_DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL 0x00000160
-
-#define REG_DSI_TPG_MAIN_CONTROL 0x00000198
-#define DSI_TPG_MAIN_CONTROL_CHECKERED_RECTANGLE_PATTERN 0x00000100
-
-#define REG_DSI_TPG_VIDEO_CONFIG 0x000001a0
-#define DSI_TPG_VIDEO_CONFIG_BPP__MASK 0x00000003
-#define DSI_TPG_VIDEO_CONFIG_BPP__SHIFT 0
-static inline uint32_t DSI_TPG_VIDEO_CONFIG_BPP(enum video_config_bpp val)
-{
- return ((val) << DSI_TPG_VIDEO_CONFIG_BPP__SHIFT) & DSI_TPG_VIDEO_CONFIG_BPP__MASK;
-}
-#define DSI_TPG_VIDEO_CONFIG_RGB 0x00000004
-
-#define REG_DSI_TEST_PATTERN_GEN_CTRL 0x00000158
-#define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__MASK 0x00030000
-#define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__SHIFT 16
-static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL(enum cmd_dma_pattern_sel val)
-{
- return ((val) << DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__MASK;
-}
-#define DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__MASK 0x00000300
-#define DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__SHIFT 8
-static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL(enum cmd_mdp_stream0_pattern_sel val)
-{
- return ((val) << DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__MASK;
-}
-#define DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__MASK 0x00000030
-#define DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__SHIFT 4
-static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL(enum video_pattern_sel val)
-{
- return ((val) << DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__MASK;
-}
-#define DSI_TEST_PATTERN_GEN_CTRL_TPG_DMA_FIFO_MODE 0x00000004
-#define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_TPG_EN 0x00000002
-#define DSI_TEST_PATTERN_GEN_CTRL_EN 0x00000001
-
-#define REG_DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0 0x00000168
-
-#define REG_DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER 0x00000180
-#define DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER_SW_TRIGGER 0x00000001
-
-#define REG_DSI_TPG_MAIN_CONTROL2 0x0000019c
-#define DSI_TPG_MAIN_CONTROL2_CMD_MDP0_CHECKERED_RECTANGLE_PATTERN 0x00000080
-#define DSI_TPG_MAIN_CONTROL2_CMD_MDP1_CHECKERED_RECTANGLE_PATTERN 0x00010000
-#define DSI_TPG_MAIN_CONTROL2_CMD_MDP2_CHECKERED_RECTANGLE_PATTERN 0x02000000
-
-#define REG_DSI_T_CLK_PRE_EXTEND 0x0000017c
-#define DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK 0x00000001
-
-#define REG_DSI_CMD_MODE_MDP_CTRL2 0x000001b4
-#define DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2__MASK 0x0000000f
-#define DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2__SHIFT 0
-static inline uint32_t DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2(enum dsi_cmd_dst_format val)
-{
- return ((val) << DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2__SHIFT) & DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2__MASK;
-}
-#define DSI_CMD_MODE_MDP_CTRL2_R_SEL 0x00000010
-#define DSI_CMD_MODE_MDP_CTRL2_G_SEL 0x00000020
-#define DSI_CMD_MODE_MDP_CTRL2_B_SEL 0x00000040
-#define DSI_CMD_MODE_MDP_CTRL2_BYTE_MSB_LSB_FLIP 0x00000080
-#define DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP__MASK 0x00000700
-#define DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP__SHIFT 8
-static inline uint32_t DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP(enum dsi_rgb_swap val)
-{
- return ((val) << DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP__SHIFT) & DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP__MASK;
-}
-#define DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__MASK 0x00007000
-#define DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__SHIFT 12
-static inline uint32_t DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP(enum dsi_rgb_swap val)
-{
- return ((val) << DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__SHIFT) & DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__MASK;
-}
-#define DSI_CMD_MODE_MDP_CTRL2_BURST_MODE 0x00010000
-#define DSI_CMD_MODE_MDP_CTRL2_DATABUS_WIDEN 0x00100000
-
-#define REG_DSI_CMD_MODE_MDP_STREAM2_CTRL 0x000001b8
-#define DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__MASK 0x0000003f
-#define DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__SHIFT 0
-static inline uint32_t DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE(uint32_t val)
-{
- return ((val) << DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__MASK;
-}
-#define DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL__MASK 0x00000300
-#define DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL__SHIFT 8
-static inline uint32_t DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL(uint32_t val)
-{
- return ((val) << DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL__MASK;
-}
-#define DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT__MASK 0xffff0000
-#define DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT__SHIFT 16
-static inline uint32_t DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT(uint32_t val)
-{
- return ((val) << DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT__MASK;
-}
-
-#define REG_DSI_RDBK_DATA_CTRL 0x000001d0
-#define DSI_RDBK_DATA_CTRL_COUNT__MASK 0x00ff0000
-#define DSI_RDBK_DATA_CTRL_COUNT__SHIFT 16
-static inline uint32_t DSI_RDBK_DATA_CTRL_COUNT(uint32_t val)
-{
- return ((val) << DSI_RDBK_DATA_CTRL_COUNT__SHIFT) & DSI_RDBK_DATA_CTRL_COUNT__MASK;
-}
-#define DSI_RDBK_DATA_CTRL_CLR 0x00000001
-
-#define REG_DSI_VERSION 0x000001f0
-#define DSI_VERSION_MAJOR__MASK 0xff000000
-#define DSI_VERSION_MAJOR__SHIFT 24
-static inline uint32_t DSI_VERSION_MAJOR(uint32_t val)
-{
- return ((val) << DSI_VERSION_MAJOR__SHIFT) & DSI_VERSION_MAJOR__MASK;
-}
-
-#define REG_DSI_CPHY_MODE_CTRL 0x000002d4
-
-#define REG_DSI_VIDEO_COMPRESSION_MODE_CTRL 0x0000029c
-#define DSI_VIDEO_COMPRESSION_MODE_CTRL_WC__MASK 0xffff0000
-#define DSI_VIDEO_COMPRESSION_MODE_CTRL_WC__SHIFT 16
-static inline uint32_t DSI_VIDEO_COMPRESSION_MODE_CTRL_WC(uint32_t val)
-{
- return ((val) << DSI_VIDEO_COMPRESSION_MODE_CTRL_WC__SHIFT) & DSI_VIDEO_COMPRESSION_MODE_CTRL_WC__MASK;
-}
-#define DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE__MASK 0x00003f00
-#define DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE__SHIFT 8
-static inline uint32_t DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE(uint32_t val)
-{
- return ((val) << DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE__SHIFT) & DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE__MASK;
-}
-#define DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE__MASK 0x000000c0
-#define DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE__SHIFT 6
-static inline uint32_t DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE(uint32_t val)
-{
- return ((val) << DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE__SHIFT) & DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE__MASK;
-}
-#define DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM__MASK 0x00000030
-#define DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM__SHIFT 4
-static inline uint32_t DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM(uint32_t val)
-{
- return ((val) << DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM__SHIFT) & DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM__MASK;
-}
-#define DSI_VIDEO_COMPRESSION_MODE_CTRL_EN 0x00000001
-
-#define REG_DSI_COMMAND_COMPRESSION_MODE_CTRL 0x000002a4
-#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_DATATYPE__MASK 0x3f000000
-#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_DATATYPE__SHIFT 24
-static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_DATATYPE(uint32_t val)
-{
- return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_DATATYPE__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_DATATYPE__MASK;
-}
-#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_PKT_PER_LINE__MASK 0x00c00000
-#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_PKT_PER_LINE__SHIFT 22
-static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_PKT_PER_LINE(uint32_t val)
-{
- return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_PKT_PER_LINE__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_PKT_PER_LINE__MASK;
-}
-#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EOL_BYTE_NUM__MASK 0x00300000
-#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EOL_BYTE_NUM__SHIFT 20
-static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EOL_BYTE_NUM(uint32_t val)
-{
- return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EOL_BYTE_NUM__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EOL_BYTE_NUM__MASK;
-}
-#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EN 0x00010000
-#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE__MASK 0x00003f00
-#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE__SHIFT 8
-static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE(uint32_t val)
-{
- return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE__MASK;
-}
-#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_PKT_PER_LINE__MASK 0x000000c0
-#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_PKT_PER_LINE__SHIFT 6
-static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_PKT_PER_LINE(uint32_t val)
-{
- return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_PKT_PER_LINE__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_PKT_PER_LINE__MASK;
-}
-#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EOL_BYTE_NUM__MASK 0x00000030
-#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EOL_BYTE_NUM__SHIFT 4
-static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EOL_BYTE_NUM(uint32_t val)
-{
- return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EOL_BYTE_NUM__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EOL_BYTE_NUM__MASK;
-}
-#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EN 0x00000001
-
-#define REG_DSI_COMMAND_COMPRESSION_MODE_CTRL2 0x000002a8
-#define DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM1_SLICE_WIDTH__MASK 0xffff0000
-#define DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM1_SLICE_WIDTH__SHIFT 16
-static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM1_SLICE_WIDTH(uint32_t val)
-{
- return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM1_SLICE_WIDTH__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM1_SLICE_WIDTH__MASK;
-}
-#define DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__MASK 0x0000ffff
-#define DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__SHIFT 0
-static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH(uint32_t val)
-{
- return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__MASK;
-}
-
-
-#endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 9d86a6aca6f2..a50f4dda5941 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -55,7 +55,7 @@ static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
* scratch register which we never touch)
*/
- ver = msm_readl(base + REG_DSI_VERSION);
+ ver = readl(base + REG_DSI_VERSION);
if (ver) {
/* older dsi host, there is no register shift */
ver = FIELD(ver, DSI_VERSION_MAJOR);
@@ -73,12 +73,12 @@ static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
* registers are shifted down, read DSI_VERSION again with
* the shifted offset
*/
- ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
+ ver = readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
ver = FIELD(ver, DSI_VERSION_MAJOR);
if (ver == MSM_DSI_VER_MAJOR_6G) {
/* 6G version */
*major = ver;
- *minor = msm_readl(base + REG_DSI_6G_HW_VERSION);
+ *minor = readl(base + REG_DSI_6G_HW_VERSION);
return 0;
} else {
return -EINVAL;
@@ -186,11 +186,11 @@ struct msm_dsi_host {
static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
{
- return msm_readl(msm_host->ctrl_base + reg);
+ return readl(msm_host->ctrl_base + reg);
}
static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
{
- msm_writel(data, msm_host->ctrl_base + reg);
+ writel(data, msm_host->ctrl_base + reg);
}
static const struct msm_dsi_cfg_handler *dsi_get_config(
@@ -356,8 +356,8 @@ int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
{
int ret;
- DBG("Set clk rates: pclk=%d, byteclk=%lu",
- msm_host->mode->clock, msm_host->byte_clk_rate);
+ DBG("Set clk rates: pclk=%lu, byteclk=%lu",
+ msm_host->pixel_clk_rate, msm_host->byte_clk_rate);
ret = dev_pm_opp_set_rate(&msm_host->pdev->dev,
msm_host->byte_clk_rate);
@@ -430,9 +430,9 @@ int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host)
{
int ret;
- DBG("Set clk rates: pclk=%d, byteclk=%lu, esc_clk=%lu, dsi_src_clk=%lu",
- msm_host->mode->clock, msm_host->byte_clk_rate,
- msm_host->esc_clk_rate, msm_host->src_clk_rate);
+ DBG("Set clk rates: pclk=%lu, byteclk=%lu, esc_clk=%lu, dsi_src_clk=%lu",
+ msm_host->pixel_clk_rate, msm_host->byte_clk_rate,
+ msm_host->esc_clk_rate, msm_host->src_clk_rate);
ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
if (ret) {
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index af2a287cb3bd..5b3f3068fd92 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -423,7 +423,18 @@ static enum drm_mode_status dsi_mgr_bridge_mode_valid(struct drm_bridge *bridge,
return msm_dsi_host_check_dsc(host, mode);
}
+static int dsi_mgr_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ int id = dsi_mgr_bridge_get_id(bridge);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+
+ return drm_bridge_attach(bridge->encoder, msm_dsi->next_bridge,
+ bridge, flags);
+}
+
static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = {
+ .attach = dsi_mgr_bridge_attach,
.pre_enable = dsi_mgr_bridge_pre_enable,
.post_disable = dsi_mgr_bridge_post_disable,
.mode_set = dsi_mgr_bridge_mode_set,
@@ -431,17 +442,19 @@ static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = {
};
/* initialize bridge */
-struct drm_bridge *msm_dsi_manager_bridge_init(struct msm_dsi *msm_dsi,
- struct drm_encoder *encoder)
+int msm_dsi_manager_connector_init(struct msm_dsi *msm_dsi,
+ struct drm_encoder *encoder)
{
+ struct drm_device *dev = msm_dsi->dev;
struct drm_bridge *bridge;
struct dsi_bridge *dsi_bridge;
+ struct drm_connector *connector;
int ret;
dsi_bridge = devm_kzalloc(msm_dsi->dev->dev,
sizeof(*dsi_bridge), GFP_KERNEL);
if (!dsi_bridge)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
dsi_bridge->id = msm_dsi->id;
@@ -450,60 +463,22 @@ struct drm_bridge *msm_dsi_manager_bridge_init(struct msm_dsi *msm_dsi,
ret = devm_drm_bridge_add(msm_dsi->dev->dev, bridge);
if (ret)
- return ERR_PTR(ret);
+ return ret;
- ret = drm_bridge_attach(encoder, bridge, NULL, 0);
+ ret = drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
- return ERR_PTR(ret);
-
- return bridge;
-}
-
-int msm_dsi_manager_ext_bridge_init(u8 id, struct drm_bridge *int_bridge)
-{
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- struct drm_device *dev = msm_dsi->dev;
- struct drm_encoder *encoder;
- struct drm_bridge *ext_bridge;
- int ret;
+ return ret;
- ext_bridge = devm_drm_of_get_bridge(&msm_dsi->pdev->dev,
- msm_dsi->pdev->dev.of_node, 1, 0);
- if (IS_ERR(ext_bridge))
- return PTR_ERR(ext_bridge);
-
- encoder = int_bridge->encoder;
-
- /*
- * Try first to create the bridge without it creating its own
- * connector.. currently some bridges support this, and others
- * do not (and some support both modes)
- */
- ret = drm_bridge_attach(encoder, ext_bridge, int_bridge,
- DRM_BRIDGE_ATTACH_NO_CONNECTOR);
- if (ret == -EINVAL) {
- /*
- * link the internal dsi bridge to the external bridge,
- * connector is created by the next bridge.
- */
- ret = drm_bridge_attach(encoder, ext_bridge, int_bridge, 0);
- if (ret < 0)
- return ret;
- } else {
- struct drm_connector *connector;
-
- /* We are in charge of the connector, create one now. */
- connector = drm_bridge_connector_init(dev, encoder);
- if (IS_ERR(connector)) {
- DRM_ERROR("Unable to create bridge connector\n");
- return PTR_ERR(connector);
- }
-
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret < 0)
- return ret;
+ connector = drm_bridge_connector_init(dev, encoder);
+ if (IS_ERR(connector)) {
+ DRM_ERROR("Unable to create bridge connector\n");
+ return PTR_ERR(connector);
}
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret < 0)
+ return ret;
+
return 0;
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_10nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_10nm.xml.h
deleted file mode 100644
index a2ae8777e59e..000000000000
--- a/drivers/gpu/drm/msm/dsi/dsi_phy_10nm.xml.h
+++ /dev/null
@@ -1,227 +0,0 @@
-#ifndef DSI_PHY_10NM_XML
-#define DSI_PHY_10NM_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
-
-The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42)
-
-Copyright (C) 2013-2022 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/
-
-
-#define REG_DSI_10nm_PHY_CMN_REVISION_ID0 0x00000000
-
-#define REG_DSI_10nm_PHY_CMN_REVISION_ID1 0x00000004
-
-#define REG_DSI_10nm_PHY_CMN_REVISION_ID2 0x00000008
-
-#define REG_DSI_10nm_PHY_CMN_REVISION_ID3 0x0000000c
-
-#define REG_DSI_10nm_PHY_CMN_CLK_CFG0 0x00000010
-
-#define REG_DSI_10nm_PHY_CMN_CLK_CFG1 0x00000014
-
-#define REG_DSI_10nm_PHY_CMN_GLBL_CTRL 0x00000018
-
-#define REG_DSI_10nm_PHY_CMN_RBUF_CTRL 0x0000001c
-
-#define REG_DSI_10nm_PHY_CMN_VREG_CTRL 0x00000020
-
-#define REG_DSI_10nm_PHY_CMN_CTRL_0 0x00000024
-
-#define REG_DSI_10nm_PHY_CMN_CTRL_1 0x00000028
-
-#define REG_DSI_10nm_PHY_CMN_CTRL_2 0x0000002c
-
-#define REG_DSI_10nm_PHY_CMN_LANE_CFG0 0x00000030
-
-#define REG_DSI_10nm_PHY_CMN_LANE_CFG1 0x00000034
-
-#define REG_DSI_10nm_PHY_CMN_PLL_CNTRL 0x00000038
-
-#define REG_DSI_10nm_PHY_CMN_LANE_CTRL0 0x00000098
-
-#define REG_DSI_10nm_PHY_CMN_LANE_CTRL1 0x0000009c
-
-#define REG_DSI_10nm_PHY_CMN_LANE_CTRL2 0x000000a0
-
-#define REG_DSI_10nm_PHY_CMN_LANE_CTRL3 0x000000a4
-
-#define REG_DSI_10nm_PHY_CMN_LANE_CTRL4 0x000000a8
-
-#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0 0x000000ac
-
-#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1 0x000000b0
-
-#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2 0x000000b4
-
-#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3 0x000000b8
-
-#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4 0x000000bc
-
-#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5 0x000000c0
-
-#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6 0x000000c4
-
-#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7 0x000000c8
-
-#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8 0x000000cc
-
-#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9 0x000000d0
-
-#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10 0x000000d4
-
-#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11 0x000000d8
-
-#define REG_DSI_10nm_PHY_CMN_PHY_STATUS 0x000000ec
-
-#define REG_DSI_10nm_PHY_CMN_LANE_STATUS0 0x000000f4
-
-#define REG_DSI_10nm_PHY_CMN_LANE_STATUS1 0x000000f8
-
-static inline uint32_t REG_DSI_10nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_10nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_10nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_10nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_10nm_PHY_LN_CFG3(uint32_t i0) { return 0x0000000c + 0x80*i0; }
-
-static inline uint32_t REG_DSI_10nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000010 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_10nm_PHY_LN_PIN_SWAP(uint32_t i0) { return 0x00000014 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(uint32_t i0) { return 0x00000018 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(uint32_t i0) { return 0x0000001c + 0x80*i0; }
-
-static inline uint32_t REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(uint32_t i0) { return 0x00000020 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(uint32_t i0) { return 0x00000024 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_10nm_PHY_LN_LPRX_CTRL(uint32_t i0) { return 0x00000028 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_10nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x0000002c + 0x80*i0; }
-
-#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE 0x00000000
-
-#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO 0x00000004
-
-#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE 0x00000010
-
-#define REG_DSI_10nm_PHY_PLL_DSM_DIVIDER 0x0000001c
-
-#define REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER 0x00000020
-
-#define REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES 0x00000024
-
-#define REG_DSI_10nm_PHY_PLL_CMODE 0x0000002c
-
-#define REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS 0x00000030
-
-#define REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE 0x00000054
-
-#define REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE 0x00000064
-
-#define REG_DSI_10nm_PHY_PLL_PFILT 0x0000007c
-
-#define REG_DSI_10nm_PHY_PLL_IFILT 0x00000080
-
-#define REG_DSI_10nm_PHY_PLL_OUTDIV 0x00000094
-
-#define REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE 0x000000a4
-
-#define REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE 0x000000a8
-
-#define REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO 0x000000b4
-
-#define REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1 0x000000cc
-
-#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1 0x000000d0
-
-#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1 0x000000d4
-
-#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1 0x000000d8
-
-#define REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1 0x0000010c
-
-#define REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1 0x00000110
-
-#define REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1 0x00000114
-
-#define REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1 0x00000118
-
-#define REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1 0x0000011c
-
-#define REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1 0x00000120
-
-#define REG_DSI_10nm_PHY_PLL_SSC_CONTROL 0x0000013c
-
-#define REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE 0x00000140
-
-#define REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1 0x00000144
-
-#define REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1 0x0000014c
-
-#define REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1 0x00000154
-
-#define REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1 0x0000015c
-
-#define REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x00000164
-
-#define REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE 0x00000180
-
-#define REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY 0x00000184
-
-#define REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS 0x0000018c
-
-#define REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE 0x000001a0
-
-
-#endif /* DSI_PHY_10NM_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_14nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_14nm.xml.h
deleted file mode 100644
index 24e2fdc0cde1..000000000000
--- a/drivers/gpu/drm/msm/dsi/dsi_phy_14nm.xml.h
+++ /dev/null
@@ -1,309 +0,0 @@
-#ifndef DSI_PHY_14NM_XML
-#define DSI_PHY_14NM_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
-
-The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42)
-
-Copyright (C) 2013-2022 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/
-
-
-#define REG_DSI_14nm_PHY_CMN_REVISION_ID0 0x00000000
-
-#define REG_DSI_14nm_PHY_CMN_REVISION_ID1 0x00000004
-
-#define REG_DSI_14nm_PHY_CMN_REVISION_ID2 0x00000008
-
-#define REG_DSI_14nm_PHY_CMN_REVISION_ID3 0x0000000c
-
-#define REG_DSI_14nm_PHY_CMN_CLK_CFG0 0x00000010
-#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK 0x000000f0
-#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT 4
-static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK;
-}
-#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK 0x000000f0
-#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT 4
-static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK;
-}
-
-#define REG_DSI_14nm_PHY_CMN_CLK_CFG1 0x00000014
-#define DSI_14nm_PHY_CMN_CLK_CFG1_DSICLK_SEL 0x00000001
-
-#define REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL 0x00000018
-#define DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000004
-
-#define REG_DSI_14nm_PHY_CMN_CTRL_0 0x0000001c
-
-#define REG_DSI_14nm_PHY_CMN_CTRL_1 0x00000020
-
-#define REG_DSI_14nm_PHY_CMN_HW_TRIGGER 0x00000024
-
-#define REG_DSI_14nm_PHY_CMN_SW_CFG0 0x00000028
-
-#define REG_DSI_14nm_PHY_CMN_SW_CFG1 0x0000002c
-
-#define REG_DSI_14nm_PHY_CMN_SW_CFG2 0x00000030
-
-#define REG_DSI_14nm_PHY_CMN_HW_CFG0 0x00000034
-
-#define REG_DSI_14nm_PHY_CMN_HW_CFG1 0x00000038
-
-#define REG_DSI_14nm_PHY_CMN_HW_CFG2 0x0000003c
-
-#define REG_DSI_14nm_PHY_CMN_HW_CFG3 0x00000040
-
-#define REG_DSI_14nm_PHY_CMN_HW_CFG4 0x00000044
-
-#define REG_DSI_14nm_PHY_CMN_PLL_CNTRL 0x00000048
-#define DSI_14nm_PHY_CMN_PLL_CNTRL_PLL_START 0x00000001
-
-#define REG_DSI_14nm_PHY_CMN_LDO_CNTRL 0x0000004c
-#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK 0x0000003f
-#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT 0
-static inline uint32_t DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT) & DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK;
-}
-
-static inline uint32_t REG_DSI_14nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
-#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK 0x000000c0
-#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT 6
-static inline uint32_t DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT) & DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK;
-}
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
-#define DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN 0x00000001
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_CFG3(uint32_t i0) { return 0x0000000c + 0x80*i0; }
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000010 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_STR(uint32_t i0) { return 0x00000014 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(uint32_t i0) { return 0x00000018 + 0x80*i0; }
-#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
-#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT 0
-static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK;
-}
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(uint32_t i0) { return 0x0000001c + 0x80*i0; }
-#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
-#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT 0
-static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK;
-}
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(uint32_t i0) { return 0x00000020 + 0x80*i0; }
-#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
-#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
-static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK;
-}
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(uint32_t i0) { return 0x00000024 + 0x80*i0; }
-#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
-#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
-static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK;
-}
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(uint32_t i0) { return 0x00000028 + 0x80*i0; }
-#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
-#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT 0
-static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK;
-}
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(uint32_t i0) { return 0x0000002c + 0x80*i0; }
-#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK 0x00000007
-#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT 0
-static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK;
-}
-#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
-#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT 4
-static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK;
-}
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(uint32_t i0) { return 0x00000030 + 0x80*i0; }
-#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK 0x00000007
-#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT 0
-static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK;
-}
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(uint32_t i0) { return 0x00000034 + 0x80*i0; }
-#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
-#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
-static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK;
-}
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(uint32_t i0) { return 0x00000038 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(uint32_t i0) { return 0x0000003c + 0x80*i0; }
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_VREG_CNTRL(uint32_t i0) { return 0x00000064 + 0x80*i0; }
-
-#define REG_DSI_14nm_PHY_PLL_IE_TRIM 0x00000000
-
-#define REG_DSI_14nm_PHY_PLL_IP_TRIM 0x00000004
-
-#define REG_DSI_14nm_PHY_PLL_IPTAT_TRIM 0x00000010
-
-#define REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN 0x0000001c
-
-#define REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET 0x00000028
-
-#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL 0x0000002c
-
-#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2 0x00000030
-
-#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL3 0x00000034
-
-#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL4 0x00000038
-
-#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5 0x0000003c
-
-#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1 0x00000040
-
-#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2 0x00000044
-
-#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT1 0x00000048
-
-#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT2 0x0000004c
-
-#define REG_DSI_14nm_PHY_PLL_VREF_CFG1 0x0000005c
-
-#define REG_DSI_14nm_PHY_PLL_KVCO_CODE 0x00000058
-
-#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1 0x0000006c
-
-#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2 0x00000070
-
-#define REG_DSI_14nm_PHY_PLL_VCO_COUNT1 0x00000074
-
-#define REG_DSI_14nm_PHY_PLL_VCO_COUNT2 0x00000078
-
-#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1 0x0000007c
-
-#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2 0x00000080
-
-#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3 0x00000084
-
-#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN 0x00000088
-
-#define REG_DSI_14nm_PHY_PLL_PLL_VCO_TUNE 0x0000008c
-
-#define REG_DSI_14nm_PHY_PLL_DEC_START 0x00000090
-
-#define REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER 0x00000094
-
-#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1 0x00000098
-
-#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2 0x0000009c
-
-#define REG_DSI_14nm_PHY_PLL_SSC_PER1 0x000000a0
-
-#define REG_DSI_14nm_PHY_PLL_SSC_PER2 0x000000a4
-
-#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1 0x000000a8
-
-#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2 0x000000ac
-
-#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1 0x000000b4
-
-#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2 0x000000b8
-
-#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3 0x000000bc
-
-#define REG_DSI_14nm_PHY_PLL_TXCLK_EN 0x000000c0
-
-#define REG_DSI_14nm_PHY_PLL_PLL_CRCTRL 0x000000c4
-
-#define REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS 0x000000cc
-
-#define REG_DSI_14nm_PHY_PLL_PLL_MISC1 0x000000e8
-
-#define REG_DSI_14nm_PHY_PLL_CP_SET_CUR 0x000000f0
-
-#define REG_DSI_14nm_PHY_PLL_PLL_ICPMSET 0x000000f4
-
-#define REG_DSI_14nm_PHY_PLL_PLL_ICPCSET 0x000000f8
-
-#define REG_DSI_14nm_PHY_PLL_PLL_ICP_SET 0x000000fc
-
-#define REG_DSI_14nm_PHY_PLL_PLL_LPF1 0x00000100
-
-#define REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV 0x00000104
-
-#define REG_DSI_14nm_PHY_PLL_PLL_BANDGAP 0x00000108
-
-
-#endif /* DSI_PHY_14NM_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_20nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_20nm.xml.h
deleted file mode 100644
index 6352541f37e9..000000000000
--- a/drivers/gpu/drm/msm/dsi/dsi_phy_20nm.xml.h
+++ /dev/null
@@ -1,237 +0,0 @@
-#ifndef DSI_PHY_20NM_XML
-#define DSI_PHY_20NM_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
-
-The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42)
-
-Copyright (C) 2013-2022 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/
-
-
-static inline uint32_t REG_DSI_20nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; }
-
-static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_20nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; }
-
-static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; }
-
-#define REG_DSI_20nm_PHY_LNCK_CFG_0 0x00000100
-
-#define REG_DSI_20nm_PHY_LNCK_CFG_1 0x00000104
-
-#define REG_DSI_20nm_PHY_LNCK_CFG_2 0x00000108
-
-#define REG_DSI_20nm_PHY_LNCK_CFG_3 0x0000010c
-
-#define REG_DSI_20nm_PHY_LNCK_CFG_4 0x00000110
-
-#define REG_DSI_20nm_PHY_LNCK_TEST_DATAPATH 0x00000114
-
-#define REG_DSI_20nm_PHY_LNCK_DEBUG_SEL 0x00000118
-
-#define REG_DSI_20nm_PHY_LNCK_TEST_STR0 0x0000011c
-
-#define REG_DSI_20nm_PHY_LNCK_TEST_STR1 0x00000120
-
-#define REG_DSI_20nm_PHY_TIMING_CTRL_0 0x00000140
-#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff
-#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0
-static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
-{
- return ((val) << DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
-}
-
-#define REG_DSI_20nm_PHY_TIMING_CTRL_1 0x00000144
-#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff
-#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0
-static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
-{
- return ((val) << DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
-}
-
-#define REG_DSI_20nm_PHY_TIMING_CTRL_2 0x00000148
-#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff
-#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0
-static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
-{
- return ((val) << DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
-}
-
-#define REG_DSI_20nm_PHY_TIMING_CTRL_3 0x0000014c
-#define DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8 0x00000001
-
-#define REG_DSI_20nm_PHY_TIMING_CTRL_4 0x00000150
-#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
-#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0
-static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
-{
- return ((val) << DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
-}
-
-#define REG_DSI_20nm_PHY_TIMING_CTRL_5 0x00000154
-#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
-#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0
-static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
-{
- return ((val) << DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
-}
-
-#define REG_DSI_20nm_PHY_TIMING_CTRL_6 0x00000158
-#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
-#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
-static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
-{
- return ((val) << DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
-}
-
-#define REG_DSI_20nm_PHY_TIMING_CTRL_7 0x0000015c
-#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
-#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
-static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
-{
- return ((val) << DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
-}
-
-#define REG_DSI_20nm_PHY_TIMING_CTRL_8 0x00000160
-#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
-#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0
-static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
-{
- return ((val) << DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK;
-}
-
-#define REG_DSI_20nm_PHY_TIMING_CTRL_9 0x00000164
-#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007
-#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0
-static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
-{
- return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK;
-}
-#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
-#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4
-static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
-{
- return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK;
-}
-
-#define REG_DSI_20nm_PHY_TIMING_CTRL_10 0x00000168
-#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007
-#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0
-static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
-{
- return ((val) << DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK;
-}
-
-#define REG_DSI_20nm_PHY_TIMING_CTRL_11 0x0000016c
-#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
-#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
-static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
-{
- return ((val) << DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
-}
-
-#define REG_DSI_20nm_PHY_CTRL_0 0x00000170
-
-#define REG_DSI_20nm_PHY_CTRL_1 0x00000174
-
-#define REG_DSI_20nm_PHY_CTRL_2 0x00000178
-
-#define REG_DSI_20nm_PHY_CTRL_3 0x0000017c
-
-#define REG_DSI_20nm_PHY_CTRL_4 0x00000180
-
-#define REG_DSI_20nm_PHY_STRENGTH_0 0x00000184
-
-#define REG_DSI_20nm_PHY_STRENGTH_1 0x00000188
-
-#define REG_DSI_20nm_PHY_BIST_CTRL_0 0x000001b4
-
-#define REG_DSI_20nm_PHY_BIST_CTRL_1 0x000001b8
-
-#define REG_DSI_20nm_PHY_BIST_CTRL_2 0x000001bc
-
-#define REG_DSI_20nm_PHY_BIST_CTRL_3 0x000001c0
-
-#define REG_DSI_20nm_PHY_BIST_CTRL_4 0x000001c4
-
-#define REG_DSI_20nm_PHY_BIST_CTRL_5 0x000001c8
-
-#define REG_DSI_20nm_PHY_GLBL_TEST_CTRL 0x000001d4
-#define DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000001
-
-#define REG_DSI_20nm_PHY_LDO_CNTRL 0x000001dc
-
-#define REG_DSI_20nm_PHY_REGULATOR_CTRL_0 0x00000000
-
-#define REG_DSI_20nm_PHY_REGULATOR_CTRL_1 0x00000004
-
-#define REG_DSI_20nm_PHY_REGULATOR_CTRL_2 0x00000008
-
-#define REG_DSI_20nm_PHY_REGULATOR_CTRL_3 0x0000000c
-
-#define REG_DSI_20nm_PHY_REGULATOR_CTRL_4 0x00000010
-
-#define REG_DSI_20nm_PHY_REGULATOR_CTRL_5 0x00000014
-
-#define REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018
-
-
-#endif /* DSI_PHY_20NM_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_28nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_28nm.xml.h
deleted file mode 100644
index 178bd4fd7893..000000000000
--- a/drivers/gpu/drm/msm/dsi/dsi_phy_28nm.xml.h
+++ /dev/null
@@ -1,384 +0,0 @@
-#ifndef DSI_PHY_28NM_XML
-#define DSI_PHY_28NM_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
-
-The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42)
-
-Copyright (C) 2013-2022 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/
-
-
-static inline uint32_t REG_DSI_28nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; }
-
-#define REG_DSI_28nm_PHY_LNCK_CFG_0 0x00000100
-
-#define REG_DSI_28nm_PHY_LNCK_CFG_1 0x00000104
-
-#define REG_DSI_28nm_PHY_LNCK_CFG_2 0x00000108
-
-#define REG_DSI_28nm_PHY_LNCK_CFG_3 0x0000010c
-
-#define REG_DSI_28nm_PHY_LNCK_CFG_4 0x00000110
-
-#define REG_DSI_28nm_PHY_LNCK_TEST_DATAPATH 0x00000114
-
-#define REG_DSI_28nm_PHY_LNCK_DEBUG_SEL 0x00000118
-
-#define REG_DSI_28nm_PHY_LNCK_TEST_STR0 0x0000011c
-
-#define REG_DSI_28nm_PHY_LNCK_TEST_STR1 0x00000120
-
-#define REG_DSI_28nm_PHY_TIMING_CTRL_0 0x00000140
-#define DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff
-#define DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
-}
-
-#define REG_DSI_28nm_PHY_TIMING_CTRL_1 0x00000144
-#define DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff
-#define DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
-}
-
-#define REG_DSI_28nm_PHY_TIMING_CTRL_2 0x00000148
-#define DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff
-#define DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
-}
-
-#define REG_DSI_28nm_PHY_TIMING_CTRL_3 0x0000014c
-#define DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8 0x00000001
-
-#define REG_DSI_28nm_PHY_TIMING_CTRL_4 0x00000150
-#define DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
-#define DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
-}
-
-#define REG_DSI_28nm_PHY_TIMING_CTRL_5 0x00000154
-#define DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
-#define DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
-}
-
-#define REG_DSI_28nm_PHY_TIMING_CTRL_6 0x00000158
-#define DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
-#define DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
-}
-
-#define REG_DSI_28nm_PHY_TIMING_CTRL_7 0x0000015c
-#define DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
-#define DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
-}
-
-#define REG_DSI_28nm_PHY_TIMING_CTRL_8 0x00000160
-#define DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
-#define DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__MASK;
-}
-
-#define REG_DSI_28nm_PHY_TIMING_CTRL_9 0x00000164
-#define DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007
-#define DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__MASK;
-}
-#define DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
-#define DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4
-static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__MASK;
-}
-
-#define REG_DSI_28nm_PHY_TIMING_CTRL_10 0x00000168
-#define DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007
-#define DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__MASK;
-}
-
-#define REG_DSI_28nm_PHY_TIMING_CTRL_11 0x0000016c
-#define DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
-#define DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
-}
-
-#define REG_DSI_28nm_PHY_CTRL_0 0x00000170
-
-#define REG_DSI_28nm_PHY_CTRL_1 0x00000174
-
-#define REG_DSI_28nm_PHY_CTRL_2 0x00000178
-
-#define REG_DSI_28nm_PHY_CTRL_3 0x0000017c
-
-#define REG_DSI_28nm_PHY_CTRL_4 0x00000180
-
-#define REG_DSI_28nm_PHY_STRENGTH_0 0x00000184
-
-#define REG_DSI_28nm_PHY_STRENGTH_1 0x00000188
-
-#define REG_DSI_28nm_PHY_BIST_CTRL_0 0x000001b4
-
-#define REG_DSI_28nm_PHY_BIST_CTRL_1 0x000001b8
-
-#define REG_DSI_28nm_PHY_BIST_CTRL_2 0x000001bc
-
-#define REG_DSI_28nm_PHY_BIST_CTRL_3 0x000001c0
-
-#define REG_DSI_28nm_PHY_BIST_CTRL_4 0x000001c4
-
-#define REG_DSI_28nm_PHY_BIST_CTRL_5 0x000001c8
-
-#define REG_DSI_28nm_PHY_GLBL_TEST_CTRL 0x000001d4
-#define DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000001
-
-#define REG_DSI_28nm_PHY_LDO_CNTRL 0x000001dc
-
-#define REG_DSI_28nm_PHY_REGULATOR_CTRL_0 0x00000000
-
-#define REG_DSI_28nm_PHY_REGULATOR_CTRL_1 0x00000004
-
-#define REG_DSI_28nm_PHY_REGULATOR_CTRL_2 0x00000008
-
-#define REG_DSI_28nm_PHY_REGULATOR_CTRL_3 0x0000000c
-
-#define REG_DSI_28nm_PHY_REGULATOR_CTRL_4 0x00000010
-
-#define REG_DSI_28nm_PHY_REGULATOR_CTRL_5 0x00000014
-
-#define REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018
-
-#define REG_DSI_28nm_PHY_PLL_REFCLK_CFG 0x00000000
-#define DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR 0x00000001
-
-#define REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG 0x00000004
-
-#define REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG 0x00000008
-
-#define REG_DSI_28nm_PHY_PLL_VCOLPF_CFG 0x0000000c
-
-#define REG_DSI_28nm_PHY_PLL_VREG_CFG 0x00000010
-#define DSI_28nm_PHY_PLL_VREG_CFG_POSTDIV1_BYPASS_B 0x00000002
-
-#define REG_DSI_28nm_PHY_PLL_PWRGEN_CFG 0x00000014
-
-#define REG_DSI_28nm_PHY_PLL_DMUX_CFG 0x00000018
-
-#define REG_DSI_28nm_PHY_PLL_AMUX_CFG 0x0000001c
-
-#define REG_DSI_28nm_PHY_PLL_GLB_CFG 0x00000020
-#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B 0x00000001
-#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B 0x00000002
-#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B 0x00000004
-#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE 0x00000008
-
-#define REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG 0x00000024
-
-#define REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG 0x00000028
-
-#define REG_DSI_28nm_PHY_PLL_LPFR_CFG 0x0000002c
-
-#define REG_DSI_28nm_PHY_PLL_LPFC1_CFG 0x00000030
-
-#define REG_DSI_28nm_PHY_PLL_LPFC2_CFG 0x00000034
-
-#define REG_DSI_28nm_PHY_PLL_SDM_CFG0 0x00000038
-#define DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__MASK 0x0000003f
-#define DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__MASK;
-}
-#define DSI_28nm_PHY_PLL_SDM_CFG0_BYP 0x00000040
-
-#define REG_DSI_28nm_PHY_PLL_SDM_CFG1 0x0000003c
-#define DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK 0x0000003f
-#define DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
-}
-#define DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__MASK 0x00000040
-#define DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__SHIFT 6
-static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__MASK;
-}
-
-#define REG_DSI_28nm_PHY_PLL_SDM_CFG2 0x00000040
-#define DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__MASK 0x000000ff
-#define DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__MASK;
-}
-
-#define REG_DSI_28nm_PHY_PLL_SDM_CFG3 0x00000044
-#define DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__MASK 0x000000ff
-#define DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__MASK;
-}
-
-#define REG_DSI_28nm_PHY_PLL_SDM_CFG4 0x00000048
-
-#define REG_DSI_28nm_PHY_PLL_SSC_CFG0 0x0000004c
-
-#define REG_DSI_28nm_PHY_PLL_SSC_CFG1 0x00000050
-
-#define REG_DSI_28nm_PHY_PLL_SSC_CFG2 0x00000054
-
-#define REG_DSI_28nm_PHY_PLL_SSC_CFG3 0x00000058
-
-#define REG_DSI_28nm_PHY_PLL_LKDET_CFG0 0x0000005c
-
-#define REG_DSI_28nm_PHY_PLL_LKDET_CFG1 0x00000060
-
-#define REG_DSI_28nm_PHY_PLL_LKDET_CFG2 0x00000064
-
-#define REG_DSI_28nm_PHY_PLL_TEST_CFG 0x00000068
-#define DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET 0x00000001
-
-#define REG_DSI_28nm_PHY_PLL_CAL_CFG0 0x0000006c
-
-#define REG_DSI_28nm_PHY_PLL_CAL_CFG1 0x00000070
-
-#define REG_DSI_28nm_PHY_PLL_CAL_CFG2 0x00000074
-
-#define REG_DSI_28nm_PHY_PLL_CAL_CFG3 0x00000078
-
-#define REG_DSI_28nm_PHY_PLL_CAL_CFG4 0x0000007c
-
-#define REG_DSI_28nm_PHY_PLL_CAL_CFG5 0x00000080
-
-#define REG_DSI_28nm_PHY_PLL_CAL_CFG6 0x00000084
-
-#define REG_DSI_28nm_PHY_PLL_CAL_CFG7 0x00000088
-
-#define REG_DSI_28nm_PHY_PLL_CAL_CFG8 0x0000008c
-
-#define REG_DSI_28nm_PHY_PLL_CAL_CFG9 0x00000090
-
-#define REG_DSI_28nm_PHY_PLL_CAL_CFG10 0x00000094
-
-#define REG_DSI_28nm_PHY_PLL_CAL_CFG11 0x00000098
-
-#define REG_DSI_28nm_PHY_PLL_EFUSE_CFG 0x0000009c
-
-#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS_SEL 0x000000a0
-
-#define REG_DSI_28nm_PHY_PLL_CTRL_42 0x000000a4
-
-#define REG_DSI_28nm_PHY_PLL_CTRL_43 0x000000a8
-
-#define REG_DSI_28nm_PHY_PLL_CTRL_44 0x000000ac
-
-#define REG_DSI_28nm_PHY_PLL_CTRL_45 0x000000b0
-
-#define REG_DSI_28nm_PHY_PLL_CTRL_46 0x000000b4
-
-#define REG_DSI_28nm_PHY_PLL_CTRL_47 0x000000b8
-
-#define REG_DSI_28nm_PHY_PLL_CTRL_48 0x000000bc
-
-#define REG_DSI_28nm_PHY_PLL_STATUS 0x000000c0
-#define DSI_28nm_PHY_PLL_STATUS_PLL_RDY 0x00000001
-
-#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS0 0x000000c4
-
-#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS1 0x000000c8
-
-#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS2 0x000000cc
-
-#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS3 0x000000d0
-
-#define REG_DSI_28nm_PHY_PLL_CTRL_54 0x000000d4
-
-
-#endif /* DSI_PHY_28NM_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_28nm_8960.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_28nm_8960.xml.h
deleted file mode 100644
index 5f900bb53519..000000000000
--- a/drivers/gpu/drm/msm/dsi/dsi_phy_28nm_8960.xml.h
+++ /dev/null
@@ -1,286 +0,0 @@
-#ifndef DSI_PHY_28NM_8960_XML
-#define DSI_PHY_28NM_8960_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
-
-The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42)
-
-Copyright (C) 2013-2022 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/
-
-
-static inline uint32_t REG_DSI_28nm_8960_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_8960_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_8960_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_8960_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000000c + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x00000014 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000018 + 0x40*i0; }
-
-#define REG_DSI_28nm_8960_PHY_LNCK_CFG_0 0x00000100
-
-#define REG_DSI_28nm_8960_PHY_LNCK_CFG_1 0x00000104
-
-#define REG_DSI_28nm_8960_PHY_LNCK_CFG_2 0x00000108
-
-#define REG_DSI_28nm_8960_PHY_LNCK_TEST_DATAPATH 0x0000010c
-
-#define REG_DSI_28nm_8960_PHY_LNCK_TEST_STR0 0x00000114
-
-#define REG_DSI_28nm_8960_PHY_LNCK_TEST_STR1 0x00000118
-
-#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_0 0x00000140
-#define DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff
-#define DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0
-static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
-{
- return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
-}
-
-#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_1 0x00000144
-#define DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff
-#define DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0
-static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
-{
- return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
-}
-
-#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_2 0x00000148
-#define DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff
-#define DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0
-static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
-{
- return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
-}
-
-#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_3 0x0000014c
-
-#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_4 0x00000150
-#define DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
-#define DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0
-static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
-{
- return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
-}
-
-#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_5 0x00000154
-#define DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
-#define DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0
-static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
-{
- return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
-}
-
-#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_6 0x00000158
-#define DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
-#define DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
-static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
-{
- return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
-}
-
-#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_7 0x0000015c
-#define DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
-#define DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
-static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
-{
- return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
-}
-
-#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_8 0x00000160
-#define DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
-#define DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0
-static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
-{
- return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__MASK;
-}
-
-#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_9 0x00000164
-#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007
-#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0
-static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
-{
- return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__MASK;
-}
-#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
-#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4
-static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
-{
- return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__MASK;
-}
-
-#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_10 0x00000168
-#define DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007
-#define DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0
-static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
-{
- return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__MASK;
-}
-
-#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_11 0x0000016c
-#define DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
-#define DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
-static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
-{
- return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
-}
-
-#define REG_DSI_28nm_8960_PHY_CTRL_0 0x00000170
-
-#define REG_DSI_28nm_8960_PHY_CTRL_1 0x00000174
-
-#define REG_DSI_28nm_8960_PHY_CTRL_2 0x00000178
-
-#define REG_DSI_28nm_8960_PHY_CTRL_3 0x0000017c
-
-#define REG_DSI_28nm_8960_PHY_STRENGTH_0 0x00000180
-
-#define REG_DSI_28nm_8960_PHY_STRENGTH_1 0x00000184
-
-#define REG_DSI_28nm_8960_PHY_STRENGTH_2 0x00000188
-
-#define REG_DSI_28nm_8960_PHY_BIST_CTRL_0 0x0000018c
-
-#define REG_DSI_28nm_8960_PHY_BIST_CTRL_1 0x00000190
-
-#define REG_DSI_28nm_8960_PHY_BIST_CTRL_2 0x00000194
-
-#define REG_DSI_28nm_8960_PHY_BIST_CTRL_3 0x00000198
-
-#define REG_DSI_28nm_8960_PHY_BIST_CTRL_4 0x0000019c
-
-#define REG_DSI_28nm_8960_PHY_LDO_CTRL 0x000001b0
-
-#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0 0x00000000
-
-#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1 0x00000004
-
-#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2 0x00000008
-
-#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3 0x0000000c
-
-#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4 0x00000010
-
-#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_5 0x00000014
-
-#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG 0x00000018
-
-#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER 0x00000028
-
-#define REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_0 0x0000002c
-
-#define REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_1 0x00000030
-
-#define REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2 0x00000034
-
-#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_0 0x00000038
-
-#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1 0x0000003c
-
-#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_2 0x00000040
-
-#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_3 0x00000044
-
-#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_4 0x00000048
-
-#define REG_DSI_28nm_8960_PHY_MISC_CAL_STATUS 0x00000050
-#define DSI_28nm_8960_PHY_MISC_CAL_STATUS_CAL_BUSY 0x00000010
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_0 0x00000000
-#define DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE 0x00000001
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_1 0x00000004
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_2 0x00000008
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_3 0x0000000c
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_4 0x00000010
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_5 0x00000014
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_6 0x00000018
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_7 0x0000001c
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_8 0x00000020
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_9 0x00000024
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_10 0x00000028
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_11 0x0000002c
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_12 0x00000030
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_13 0x00000034
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_14 0x00000038
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_15 0x0000003c
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_16 0x00000040
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_17 0x00000044
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_18 0x00000048
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_19 0x0000004c
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_20 0x00000050
-
-#define REG_DSI_28nm_8960_PHY_PLL_RDY 0x00000080
-#define DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY 0x00000001
-
-
-#endif /* DSI_PHY_28NM_8960_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_7nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_7nm.xml.h
deleted file mode 100644
index 584cbd0205ef..000000000000
--- a/drivers/gpu/drm/msm/dsi/dsi_phy_7nm.xml.h
+++ /dev/null
@@ -1,483 +0,0 @@
-#ifndef DSI_PHY_7NM_XML
-#define DSI_PHY_7NM_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
-
-The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42)
-
-Copyright (C) 2013-2022 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/
-
-
-#define REG_DSI_7nm_PHY_CMN_REVISION_ID0 0x00000000
-
-#define REG_DSI_7nm_PHY_CMN_REVISION_ID1 0x00000004
-
-#define REG_DSI_7nm_PHY_CMN_REVISION_ID2 0x00000008
-
-#define REG_DSI_7nm_PHY_CMN_REVISION_ID3 0x0000000c
-
-#define REG_DSI_7nm_PHY_CMN_CLK_CFG0 0x00000010
-
-#define REG_DSI_7nm_PHY_CMN_CLK_CFG1 0x00000014
-
-#define REG_DSI_7nm_PHY_CMN_GLBL_CTRL 0x00000018
-
-#define REG_DSI_7nm_PHY_CMN_RBUF_CTRL 0x0000001c
-
-#define REG_DSI_7nm_PHY_CMN_VREG_CTRL_0 0x00000020
-
-#define REG_DSI_7nm_PHY_CMN_CTRL_0 0x00000024
-
-#define REG_DSI_7nm_PHY_CMN_CTRL_1 0x00000028
-
-#define REG_DSI_7nm_PHY_CMN_CTRL_2 0x0000002c
-
-#define REG_DSI_7nm_PHY_CMN_CTRL_3 0x00000030
-
-#define REG_DSI_7nm_PHY_CMN_LANE_CFG0 0x00000034
-
-#define REG_DSI_7nm_PHY_CMN_LANE_CFG1 0x00000038
-
-#define REG_DSI_7nm_PHY_CMN_PLL_CNTRL 0x0000003c
-
-#define REG_DSI_7nm_PHY_CMN_DPHY_SOT 0x00000040
-
-#define REG_DSI_7nm_PHY_CMN_LANE_CTRL0 0x000000a0
-
-#define REG_DSI_7nm_PHY_CMN_LANE_CTRL1 0x000000a4
-
-#define REG_DSI_7nm_PHY_CMN_LANE_CTRL2 0x000000a8
-
-#define REG_DSI_7nm_PHY_CMN_LANE_CTRL3 0x000000ac
-
-#define REG_DSI_7nm_PHY_CMN_LANE_CTRL4 0x000000b0
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0 0x000000b4
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1 0x000000b8
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2 0x000000bc
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3 0x000000c0
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4 0x000000c4
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5 0x000000c8
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6 0x000000cc
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7 0x000000d0
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8 0x000000d4
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9 0x000000d8
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10 0x000000dc
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11 0x000000e0
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12 0x000000e4
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13 0x000000e8
-
-#define REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0 0x000000ec
-
-#define REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_1 0x000000f0
-
-#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL 0x000000f4
-
-#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL 0x000000f8
-
-#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_MID_CTRL 0x000000fc
-
-#define REG_DSI_7nm_PHY_CMN_GLBL_LPTX_STR_CTRL 0x00000100
-
-#define REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0 0x00000104
-
-#define REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_1 0x00000108
-
-#define REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL 0x0000010c
-
-#define REG_DSI_7nm_PHY_CMN_VREG_CTRL_1 0x00000110
-
-#define REG_DSI_7nm_PHY_CMN_CTRL_4 0x00000114
-
-#define REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4 0x00000128
-
-#define REG_DSI_7nm_PHY_CMN_PHY_STATUS 0x00000140
-
-#define REG_DSI_7nm_PHY_CMN_LANE_STATUS0 0x00000148
-
-#define REG_DSI_7nm_PHY_CMN_LANE_STATUS1 0x0000014c
-
-#define REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE10 0x000001ac
-
-static inline uint32_t REG_DSI_7nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_7nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_7nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_7nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_7nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000000c + 0x80*i0; }
-
-static inline uint32_t REG_DSI_7nm_PHY_LN_PIN_SWAP(uint32_t i0) { return 0x00000010 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_7nm_PHY_LN_LPRX_CTRL(uint32_t i0) { return 0x00000014 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_7nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x00000018 + 0x80*i0; }
-
-#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_ONE 0x00000000
-
-#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO 0x00000004
-
-#define REG_DSI_7nm_PHY_PLL_INT_LOOP_SETTINGS 0x00000008
-
-#define REG_DSI_7nm_PHY_PLL_INT_LOOP_SETTINGS_TWO 0x0000000c
-
-#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE 0x00000010
-
-#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FOUR 0x00000014
-
-#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE 0x00000018
-
-#define REG_DSI_7nm_PHY_PLL_INT_LOOP_CONTROLS 0x0000001c
-
-#define REG_DSI_7nm_PHY_PLL_DSM_DIVIDER 0x00000020
-
-#define REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER 0x00000024
-
-#define REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES 0x00000028
-
-#define REG_DSI_7nm_PHY_PLL_FREQ_UPDATE_CONTROL_OVERRIDES 0x0000002c
-
-#define REG_DSI_7nm_PHY_PLL_CMODE 0x00000030
-
-#define REG_DSI_7nm_PHY_PLL_PSM_CTRL 0x00000034
-
-#define REG_DSI_7nm_PHY_PLL_RSM_CTRL 0x00000038
-
-#define REG_DSI_7nm_PHY_PLL_VCO_TUNE_MAP 0x0000003c
-
-#define REG_DSI_7nm_PHY_PLL_PLL_CNTRL 0x00000040
-
-#define REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS 0x00000044
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_TIMER_LOW 0x00000048
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_TIMER_HIGH 0x0000004c
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS 0x00000050
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_MIN 0x00000054
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_MAX 0x00000058
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_PFILT 0x0000005c
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_IFILT 0x00000060
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_TWO 0x00000064
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE 0x00000068
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_FOUR 0x0000006c
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_ICODE_HIGH 0x00000070
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_ICODE_LOW 0x00000074
-
-#define REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE 0x00000078
-
-#define REG_DSI_7nm_PHY_PLL_FREQ_DETECT_THRESH 0x0000007c
-
-#define REG_DSI_7nm_PHY_PLL_FREQ_DET_REFCLK_HIGH 0x00000080
-
-#define REG_DSI_7nm_PHY_PLL_FREQ_DET_REFCLK_LOW 0x00000084
-
-#define REG_DSI_7nm_PHY_PLL_FREQ_DET_PLLCLK_HIGH 0x00000088
-
-#define REG_DSI_7nm_PHY_PLL_FREQ_DET_PLLCLK_LOW 0x0000008c
-
-#define REG_DSI_7nm_PHY_PLL_PFILT 0x00000090
-
-#define REG_DSI_7nm_PHY_PLL_IFILT 0x00000094
-
-#define REG_DSI_7nm_PHY_PLL_PLL_GAIN 0x00000098
-
-#define REG_DSI_7nm_PHY_PLL_ICODE_LOW 0x0000009c
-
-#define REG_DSI_7nm_PHY_PLL_ICODE_HIGH 0x000000a0
-
-#define REG_DSI_7nm_PHY_PLL_LOCKDET 0x000000a4
-
-#define REG_DSI_7nm_PHY_PLL_OUTDIV 0x000000a8
-
-#define REG_DSI_7nm_PHY_PLL_FASTLOCK_CONTROL 0x000000ac
-
-#define REG_DSI_7nm_PHY_PLL_PASS_OUT_OVERRIDE_ONE 0x000000b0
-
-#define REG_DSI_7nm_PHY_PLL_PASS_OUT_OVERRIDE_TWO 0x000000b4
-
-#define REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE 0x000000b8
-
-#define REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE 0x000000bc
-
-#define REG_DSI_7nm_PHY_PLL_RATE_CHANGE 0x000000c0
-
-#define REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS 0x000000c4
-
-#define REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO 0x000000c8
-
-#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START 0x000000cc
-
-#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW 0x000000d0
-
-#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID 0x000000d4
-
-#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH 0x000000d8
-
-#define REG_DSI_7nm_PHY_PLL_DEC_FRAC_MUXES 0x000000dc
-
-#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1 0x000000e0
-
-#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1 0x000000e4
-
-#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1 0x000000e8
-
-#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1 0x000000ec
-
-#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_2 0x000000f0
-
-#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_2 0x000000f4
-
-#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_2 0x000000f8
-
-#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_2 0x000000fc
-
-#define REG_DSI_7nm_PHY_PLL_MASH_CONTROL 0x00000100
-
-#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW 0x00000104
-
-#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH 0x00000108
-
-#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW 0x0000010c
-
-#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH 0x00000110
-
-#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW 0x00000114
-
-#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH 0x00000118
-
-#define REG_DSI_7nm_PHY_PLL_SSC_MUX_CONTROL 0x0000011c
-
-#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1 0x00000120
-
-#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1 0x00000124
-
-#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1 0x00000128
-
-#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1 0x0000012c
-
-#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1 0x00000130
-
-#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1 0x00000134
-
-#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_2 0x00000138
-
-#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_2 0x0000013c
-
-#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_2 0x00000140
-
-#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_2 0x00000144
-
-#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_2 0x00000148
-
-#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_2 0x0000014c
-
-#define REG_DSI_7nm_PHY_PLL_SSC_CONTROL 0x00000150
-
-#define REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE 0x00000154
-
-#define REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1 0x00000158
-
-#define REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_2 0x0000015c
-
-#define REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1 0x00000160
-
-#define REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_2 0x00000164
-
-#define REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1 0x00000168
-
-#define REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_2 0x0000016c
-
-#define REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1 0x00000170
-
-#define REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_2 0x00000174
-
-#define REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x00000178
-
-#define REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_2 0x0000017c
-
-#define REG_DSI_7nm_PHY_PLL_PLL_FASTLOCK_EN_BAND 0x00000180
-
-#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_MID 0x00000184
-
-#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_HIGH 0x00000188
-
-#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_MUX 0x0000018c
-
-#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE 0x00000190
-
-#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY 0x00000194
-
-#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_MIN_DELAY 0x00000198
-
-#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS 0x0000019c
-
-#define REG_DSI_7nm_PHY_PLL_SPARE_AND_JPC_OVERRIDES 0x000001a0
-
-#define REG_DSI_7nm_PHY_PLL_BIAS_CONTROL_1 0x000001a4
-
-#define REG_DSI_7nm_PHY_PLL_BIAS_CONTROL_2 0x000001a8
-
-#define REG_DSI_7nm_PHY_PLL_ALOG_OBSV_BUS_CTRL_1 0x000001ac
-
-#define REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE 0x000001b0
-
-#define REG_DSI_7nm_PHY_PLL_COMMON_STATUS_TWO 0x000001b4
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL 0x000001b8
-
-#define REG_DSI_7nm_PHY_PLL_ICODE_ACCUM_STATUS_LOW 0x000001bc
-
-#define REG_DSI_7nm_PHY_PLL_ICODE_ACCUM_STATUS_HIGH 0x000001c0
-
-#define REG_DSI_7nm_PHY_PLL_FD_OUT_LOW 0x000001c4
-
-#define REG_DSI_7nm_PHY_PLL_FD_OUT_HIGH 0x000001c8
-
-#define REG_DSI_7nm_PHY_PLL_ALOG_OBSV_BUS_STATUS_1 0x000001cc
-
-#define REG_DSI_7nm_PHY_PLL_PLL_MISC_CONFIG 0x000001d0
-
-#define REG_DSI_7nm_PHY_PLL_FLL_CONFIG 0x000001d4
-
-#define REG_DSI_7nm_PHY_PLL_FLL_FREQ_ACQ_TIME 0x000001d8
-
-#define REG_DSI_7nm_PHY_PLL_FLL_CODE0 0x000001dc
-
-#define REG_DSI_7nm_PHY_PLL_FLL_CODE1 0x000001e0
-
-#define REG_DSI_7nm_PHY_PLL_FLL_GAIN0 0x000001e4
-
-#define REG_DSI_7nm_PHY_PLL_FLL_GAIN1 0x000001e8
-
-#define REG_DSI_7nm_PHY_PLL_SW_RESET 0x000001ec
-
-#define REG_DSI_7nm_PHY_PLL_FAST_PWRUP 0x000001f0
-
-#define REG_DSI_7nm_PHY_PLL_LOCKTIME0 0x000001f4
-
-#define REG_DSI_7nm_PHY_PLL_LOCKTIME1 0x000001f8
-
-#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS_SEL 0x000001fc
-
-#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS0 0x00000200
-
-#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS1 0x00000204
-
-#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS2 0x00000208
-
-#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS3 0x0000020c
-
-#define REG_DSI_7nm_PHY_PLL_ANALOG_FLL_CONTROL_OVERRIDES 0x00000210
-
-#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG 0x00000214
-
-#define REG_DSI_7nm_PHY_PLL_VCO_CAL_CODE1_MODE0_STATUS 0x00000218
-
-#define REG_DSI_7nm_PHY_PLL_VCO_CAL_CODE1_MODE1_STATUS 0x0000021c
-
-#define REG_DSI_7nm_PHY_PLL_RESET_SM_STATUS 0x00000220
-
-#define REG_DSI_7nm_PHY_PLL_TDC_OFFSET 0x00000224
-
-#define REG_DSI_7nm_PHY_PLL_PS3_PWRDOWN_CONTROLS 0x00000228
-
-#define REG_DSI_7nm_PHY_PLL_PS4_PWRDOWN_CONTROLS 0x0000022c
-
-#define REG_DSI_7nm_PHY_PLL_PLL_RST_CONTROLS 0x00000230
-
-#define REG_DSI_7nm_PHY_PLL_GEAR_BAND_SELECT_CONTROLS 0x00000234
-
-#define REG_DSI_7nm_PHY_PLL_PSM_CLK_CONTROLS 0x00000238
-
-#define REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES_2 0x0000023c
-
-#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1 0x00000240
-
-#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG_2 0x00000244
-
-#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_1 0x00000248
-
-#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_2 0x0000024c
-
-#define REG_DSI_7nm_PHY_PLL_CMODE_1 0x00000250
-
-#define REG_DSI_7nm_PHY_PLL_CMODE_2 0x00000254
-
-#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1 0x00000258
-
-#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_2 0x0000025c
-
-#define REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE 0x00000260
-
-
-#endif /* DSI_PHY_7NM_XML */
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
deleted file mode 100644
index 7062f7164216..000000000000
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ /dev/null
@@ -1,131 +0,0 @@
-#ifndef MMSS_CC_XML
-#define MMSS_CC_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
-
-The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42)
-
-Copyright (C) 2013-2022 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/
-
-
-enum mmss_cc_clk {
- CLK = 0,
- PCLK = 1,
-};
-
-#define REG_MMSS_CC_AHB 0x00000008
-
-static inline uint32_t __offset_CLK(enum mmss_cc_clk idx)
-{
- switch (idx) {
- case CLK: return 0x0000004c;
- case PCLK: return 0x00000130;
- default: return INVALID_IDX(idx);
- }
-}
-static inline uint32_t REG_MMSS_CC_CLK(enum mmss_cc_clk i0) { return 0x00000000 + __offset_CLK(i0); }
-
-static inline uint32_t REG_MMSS_CC_CLK_CC(enum mmss_cc_clk i0) { return 0x00000000 + __offset_CLK(i0); }
-#define MMSS_CC_CLK_CC_CLK_EN 0x00000001
-#define MMSS_CC_CLK_CC_ROOT_EN 0x00000004
-#define MMSS_CC_CLK_CC_MND_EN 0x00000020
-#define MMSS_CC_CLK_CC_MND_MODE__MASK 0x000000c0
-#define MMSS_CC_CLK_CC_MND_MODE__SHIFT 6
-static inline uint32_t MMSS_CC_CLK_CC_MND_MODE(uint32_t val)
-{
- return ((val) << MMSS_CC_CLK_CC_MND_MODE__SHIFT) & MMSS_CC_CLK_CC_MND_MODE__MASK;
-}
-#define MMSS_CC_CLK_CC_PMXO_SEL__MASK 0x00000300
-#define MMSS_CC_CLK_CC_PMXO_SEL__SHIFT 8
-static inline uint32_t MMSS_CC_CLK_CC_PMXO_SEL(uint32_t val)
-{
- return ((val) << MMSS_CC_CLK_CC_PMXO_SEL__SHIFT) & MMSS_CC_CLK_CC_PMXO_SEL__MASK;
-}
-
-static inline uint32_t REG_MMSS_CC_CLK_MD(enum mmss_cc_clk i0) { return 0x00000004 + __offset_CLK(i0); }
-#define MMSS_CC_CLK_MD_D__MASK 0x000000ff
-#define MMSS_CC_CLK_MD_D__SHIFT 0
-static inline uint32_t MMSS_CC_CLK_MD_D(uint32_t val)
-{
- return ((val) << MMSS_CC_CLK_MD_D__SHIFT) & MMSS_CC_CLK_MD_D__MASK;
-}
-#define MMSS_CC_CLK_MD_M__MASK 0x0000ff00
-#define MMSS_CC_CLK_MD_M__SHIFT 8
-static inline uint32_t MMSS_CC_CLK_MD_M(uint32_t val)
-{
- return ((val) << MMSS_CC_CLK_MD_M__SHIFT) & MMSS_CC_CLK_MD_M__MASK;
-}
-
-static inline uint32_t REG_MMSS_CC_CLK_NS(enum mmss_cc_clk i0) { return 0x00000008 + __offset_CLK(i0); }
-#define MMSS_CC_CLK_NS_SRC__MASK 0x0000000f
-#define MMSS_CC_CLK_NS_SRC__SHIFT 0
-static inline uint32_t MMSS_CC_CLK_NS_SRC(uint32_t val)
-{
- return ((val) << MMSS_CC_CLK_NS_SRC__SHIFT) & MMSS_CC_CLK_NS_SRC__MASK;
-}
-#define MMSS_CC_CLK_NS_PRE_DIV_FUNC__MASK 0x00fff000
-#define MMSS_CC_CLK_NS_PRE_DIV_FUNC__SHIFT 12
-static inline uint32_t MMSS_CC_CLK_NS_PRE_DIV_FUNC(uint32_t val)
-{
- return ((val) << MMSS_CC_CLK_NS_PRE_DIV_FUNC__SHIFT) & MMSS_CC_CLK_NS_PRE_DIV_FUNC__MASK;
-}
-#define MMSS_CC_CLK_NS_VAL__MASK 0xff000000
-#define MMSS_CC_CLK_NS_VAL__SHIFT 24
-static inline uint32_t MMSS_CC_CLK_NS_VAL(uint32_t val)
-{
- return ((val) << MMSS_CC_CLK_NS_VAL__SHIFT) & MMSS_CC_CLK_NS_VAL__MASK;
-}
-
-#define REG_MMSS_CC_DSI2_PIXEL_CC 0x00000094
-
-#define REG_MMSS_CC_DSI2_PIXEL_NS 0x000000e4
-
-#define REG_MMSS_CC_DSI2_PIXEL_CC2 0x00000264
-
-
-#endif /* MMSS_CC_XML */
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index e4275d3ad581..5a5dc3faa971 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -12,10 +12,10 @@
#include "dsi.h"
-#define dsi_phy_read(offset) msm_readl((offset))
-#define dsi_phy_write(offset, data) msm_writel((data), (offset))
-#define dsi_phy_write_udelay(offset, data, delay_us) { msm_writel((data), (offset)); udelay(delay_us); }
-#define dsi_phy_write_ndelay(offset, data, delay_ns) { msm_writel((data), (offset)); ndelay(delay_ns); }
+#define dsi_phy_read(offset) readl((offset))
+#define dsi_phy_write(offset, data) writel((data), (offset))
+#define dsi_phy_write_udelay(offset, data, delay_us) { writel((data), (offset)); udelay(delay_us); }
+#define dsi_phy_write_ndelay(offset, data, delay_ns) { writel((data), (offset)); ndelay(delay_ns); }
struct msm_dsi_phy_ops {
int (*pll_init)(struct msm_dsi_phy *phy);
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
deleted file mode 100644
index 344a1a1620cd..000000000000
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ /dev/null
@@ -1,70 +0,0 @@
-#ifndef SFPB_XML
-#define SFPB_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
-
-The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42)
-
-Copyright (C) 2013-2022 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/
-
-
-enum sfpb_ahb_arb_master_port_en {
- SFPB_MASTER_PORT_ENABLE = 3,
- SFPB_MASTER_PORT_DISABLE = 0,
-};
-
-#define REG_SFPB_GPREG 0x00000058
-#define SFPB_GPREG_MASTER_PORT_EN__MASK 0x00001800
-#define SFPB_GPREG_MASTER_PORT_EN__SHIFT 11
-static inline uint32_t SFPB_GPREG_MASTER_PORT_EN(enum sfpb_ahb_arb_master_port_en val)
-{
- return ((val) << SFPB_GPREG_MASTER_PORT_EN__SHIFT) & SFPB_GPREG_MASTER_PORT_EN__MASK;
-}
-
-
-#endif /* SFPB_XML */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index c8ebd75176bb..24abcb7254cc 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -5,8 +5,8 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/gpio/consumer.h>
#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index ec5786440391..4586baf36415 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -115,17 +115,17 @@ void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on);
static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data)
{
- msm_writel(data, hdmi->mmio + reg);
+ writel(data, hdmi->mmio + reg);
}
static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg)
{
- return msm_readl(hdmi->mmio + reg);
+ return readl(hdmi->mmio + reg);
}
static inline u32 hdmi_qfprom_read(struct hdmi *hdmi, u32 reg)
{
- return msm_readl(hdmi->qfprom_mmio + reg);
+ return readl(hdmi->qfprom_mmio + reg);
}
/*
@@ -166,12 +166,12 @@ struct hdmi_phy {
static inline void hdmi_phy_write(struct hdmi_phy *phy, u32 reg, u32 data)
{
- msm_writel(data, phy->mmio + reg);
+ writel(data, phy->mmio + reg);
}
static inline u32 hdmi_phy_read(struct hdmi_phy *phy, u32 reg)
{
- return msm_readl(phy->mmio + reg);
+ return readl(phy->mmio + reg);
}
int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
deleted file mode 100644
index 973b460486a5..000000000000
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ /dev/null
@@ -1,1399 +0,0 @@
-#ifndef HDMI_XML
-#define HDMI_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
-
-The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42)
-
-Copyright (C) 2013-2022 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/
-
-
-enum hdmi_hdcp_key_state {
- HDCP_KEYS_STATE_NO_KEYS = 0,
- HDCP_KEYS_STATE_NOT_CHECKED = 1,
- HDCP_KEYS_STATE_CHECKING = 2,
- HDCP_KEYS_STATE_VALID = 3,
- HDCP_KEYS_STATE_AKSV_NOT_VALID = 4,
- HDCP_KEYS_STATE_CHKSUM_MISMATCH = 5,
- HDCP_KEYS_STATE_PROD_AKSV = 6,
- HDCP_KEYS_STATE_RESERVED = 7,
-};
-
-enum hdmi_ddc_read_write {
- DDC_WRITE = 0,
- DDC_READ = 1,
-};
-
-enum hdmi_acr_cts {
- ACR_NONE = 0,
- ACR_32 = 1,
- ACR_44 = 2,
- ACR_48 = 3,
-};
-
-#define REG_HDMI_CTRL 0x00000000
-#define HDMI_CTRL_ENABLE 0x00000001
-#define HDMI_CTRL_HDMI 0x00000002
-#define HDMI_CTRL_ENCRYPTED 0x00000004
-
-#define REG_HDMI_AUDIO_PKT_CTRL1 0x00000020
-#define HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND 0x00000001
-
-#define REG_HDMI_ACR_PKT_CTRL 0x00000024
-#define HDMI_ACR_PKT_CTRL_CONT 0x00000001
-#define HDMI_ACR_PKT_CTRL_SEND 0x00000002
-#define HDMI_ACR_PKT_CTRL_SELECT__MASK 0x00000030
-#define HDMI_ACR_PKT_CTRL_SELECT__SHIFT 4
-static inline uint32_t HDMI_ACR_PKT_CTRL_SELECT(enum hdmi_acr_cts val)
-{
- return ((val) << HDMI_ACR_PKT_CTRL_SELECT__SHIFT) & HDMI_ACR_PKT_CTRL_SELECT__MASK;
-}
-#define HDMI_ACR_PKT_CTRL_SOURCE 0x00000100
-#define HDMI_ACR_PKT_CTRL_N_MULTIPLIER__MASK 0x00070000
-#define HDMI_ACR_PKT_CTRL_N_MULTIPLIER__SHIFT 16
-static inline uint32_t HDMI_ACR_PKT_CTRL_N_MULTIPLIER(uint32_t val)
-{
- return ((val) << HDMI_ACR_PKT_CTRL_N_MULTIPLIER__SHIFT) & HDMI_ACR_PKT_CTRL_N_MULTIPLIER__MASK;
-}
-#define HDMI_ACR_PKT_CTRL_AUDIO_PRIORITY 0x80000000
-
-#define REG_HDMI_VBI_PKT_CTRL 0x00000028
-#define HDMI_VBI_PKT_CTRL_GC_ENABLE 0x00000010
-#define HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME 0x00000020
-#define HDMI_VBI_PKT_CTRL_ISRC_SEND 0x00000100
-#define HDMI_VBI_PKT_CTRL_ISRC_CONTINUOUS 0x00000200
-#define HDMI_VBI_PKT_CTRL_ACP_SEND 0x00001000
-#define HDMI_VBI_PKT_CTRL_ACP_SRC_SW 0x00002000
-
-#define REG_HDMI_INFOFRAME_CTRL0 0x0000002c
-#define HDMI_INFOFRAME_CTRL0_AVI_SEND 0x00000001
-#define HDMI_INFOFRAME_CTRL0_AVI_CONT 0x00000002
-#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND 0x00000010
-#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT 0x00000020
-#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE 0x00000040
-#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE 0x00000080
-
-#define REG_HDMI_INFOFRAME_CTRL1 0x00000030
-#define HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK 0x0000003f
-#define HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__SHIFT 0
-static inline uint32_t HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE(uint32_t val)
-{
- return ((val) << HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__SHIFT) & HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK;
-}
-#define HDMI_INFOFRAME_CTRL1_AUDIO_INFO_LINE__MASK 0x00003f00
-#define HDMI_INFOFRAME_CTRL1_AUDIO_INFO_LINE__SHIFT 8
-static inline uint32_t HDMI_INFOFRAME_CTRL1_AUDIO_INFO_LINE(uint32_t val)
-{
- return ((val) << HDMI_INFOFRAME_CTRL1_AUDIO_INFO_LINE__SHIFT) & HDMI_INFOFRAME_CTRL1_AUDIO_INFO_LINE__MASK;
-}
-#define HDMI_INFOFRAME_CTRL1_MPEG_INFO_LINE__MASK 0x003f0000
-#define HDMI_INFOFRAME_CTRL1_MPEG_INFO_LINE__SHIFT 16
-static inline uint32_t HDMI_INFOFRAME_CTRL1_MPEG_INFO_LINE(uint32_t val)
-{
- return ((val) << HDMI_INFOFRAME_CTRL1_MPEG_INFO_LINE__SHIFT) & HDMI_INFOFRAME_CTRL1_MPEG_INFO_LINE__MASK;
-}
-#define HDMI_INFOFRAME_CTRL1_VENSPEC_INFO_LINE__MASK 0x3f000000
-#define HDMI_INFOFRAME_CTRL1_VENSPEC_INFO_LINE__SHIFT 24
-static inline uint32_t HDMI_INFOFRAME_CTRL1_VENSPEC_INFO_LINE(uint32_t val)
-{
- return ((val) << HDMI_INFOFRAME_CTRL1_VENSPEC_INFO_LINE__SHIFT) & HDMI_INFOFRAME_CTRL1_VENSPEC_INFO_LINE__MASK;
-}
-
-#define REG_HDMI_GEN_PKT_CTRL 0x00000034
-#define HDMI_GEN_PKT_CTRL_GENERIC0_SEND 0x00000001
-#define HDMI_GEN_PKT_CTRL_GENERIC0_CONT 0x00000002
-#define HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__MASK 0x0000000c
-#define HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__SHIFT 2
-static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE(uint32_t val)
-{
- return ((val) << HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__MASK;
-}
-#define HDMI_GEN_PKT_CTRL_GENERIC1_SEND 0x00000010
-#define HDMI_GEN_PKT_CTRL_GENERIC1_CONT 0x00000020
-#define HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK 0x003f0000
-#define HDMI_GEN_PKT_CTRL_GENERIC0_LINE__SHIFT 16
-static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC0_LINE(uint32_t val)
-{
- return ((val) << HDMI_GEN_PKT_CTRL_GENERIC0_LINE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK;
-}
-#define HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK 0x3f000000
-#define HDMI_GEN_PKT_CTRL_GENERIC1_LINE__SHIFT 24
-static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC1_LINE(uint32_t val)
-{
- return ((val) << HDMI_GEN_PKT_CTRL_GENERIC1_LINE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK;
-}
-
-#define REG_HDMI_GC 0x00000040
-#define HDMI_GC_MUTE 0x00000001
-
-#define REG_HDMI_AUDIO_PKT_CTRL2 0x00000044
-#define HDMI_AUDIO_PKT_CTRL2_OVERRIDE 0x00000001
-#define HDMI_AUDIO_PKT_CTRL2_LAYOUT 0x00000002
-
-static inline uint32_t REG_HDMI_AVI_INFO(uint32_t i0) { return 0x0000006c + 0x4*i0; }
-
-#define REG_HDMI_GENERIC0_HDR 0x00000084
-
-static inline uint32_t REG_HDMI_GENERIC0(uint32_t i0) { return 0x00000088 + 0x4*i0; }
-
-#define REG_HDMI_GENERIC1_HDR 0x000000a4
-
-static inline uint32_t REG_HDMI_GENERIC1(uint32_t i0) { return 0x000000a8 + 0x4*i0; }
-
-static inline uint32_t REG_HDMI_ACR(enum hdmi_acr_cts i0) { return 0x000000c4 + 0x8*i0; }
-
-static inline uint32_t REG_HDMI_ACR_0(enum hdmi_acr_cts i0) { return 0x000000c4 + 0x8*i0; }
-#define HDMI_ACR_0_CTS__MASK 0xfffff000
-#define HDMI_ACR_0_CTS__SHIFT 12
-static inline uint32_t HDMI_ACR_0_CTS(uint32_t val)
-{
- return ((val) << HDMI_ACR_0_CTS__SHIFT) & HDMI_ACR_0_CTS__MASK;
-}
-
-static inline uint32_t REG_HDMI_ACR_1(enum hdmi_acr_cts i0) { return 0x000000c8 + 0x8*i0; }
-#define HDMI_ACR_1_N__MASK 0xffffffff
-#define HDMI_ACR_1_N__SHIFT 0
-static inline uint32_t HDMI_ACR_1_N(uint32_t val)
-{
- return ((val) << HDMI_ACR_1_N__SHIFT) & HDMI_ACR_1_N__MASK;
-}
-
-#define REG_HDMI_AUDIO_INFO0 0x000000e4
-#define HDMI_AUDIO_INFO0_CHECKSUM__MASK 0x000000ff
-#define HDMI_AUDIO_INFO0_CHECKSUM__SHIFT 0
-static inline uint32_t HDMI_AUDIO_INFO0_CHECKSUM(uint32_t val)
-{
- return ((val) << HDMI_AUDIO_INFO0_CHECKSUM__SHIFT) & HDMI_AUDIO_INFO0_CHECKSUM__MASK;
-}
-#define HDMI_AUDIO_INFO0_CC__MASK 0x00000700
-#define HDMI_AUDIO_INFO0_CC__SHIFT 8
-static inline uint32_t HDMI_AUDIO_INFO0_CC(uint32_t val)
-{
- return ((val) << HDMI_AUDIO_INFO0_CC__SHIFT) & HDMI_AUDIO_INFO0_CC__MASK;
-}
-
-#define REG_HDMI_AUDIO_INFO1 0x000000e8
-#define HDMI_AUDIO_INFO1_CA__MASK 0x000000ff
-#define HDMI_AUDIO_INFO1_CA__SHIFT 0
-static inline uint32_t HDMI_AUDIO_INFO1_CA(uint32_t val)
-{
- return ((val) << HDMI_AUDIO_INFO1_CA__SHIFT) & HDMI_AUDIO_INFO1_CA__MASK;
-}
-#define HDMI_AUDIO_INFO1_LSV__MASK 0x00007800
-#define HDMI_AUDIO_INFO1_LSV__SHIFT 11
-static inline uint32_t HDMI_AUDIO_INFO1_LSV(uint32_t val)
-{
- return ((val) << HDMI_AUDIO_INFO1_LSV__SHIFT) & HDMI_AUDIO_INFO1_LSV__MASK;
-}
-#define HDMI_AUDIO_INFO1_DM_INH 0x00008000
-
-#define REG_HDMI_HDCP_CTRL 0x00000110
-#define HDMI_HDCP_CTRL_ENABLE 0x00000001
-#define HDMI_HDCP_CTRL_ENCRYPTION_ENABLE 0x00000100
-
-#define REG_HDMI_HDCP_DEBUG_CTRL 0x00000114
-#define HDMI_HDCP_DEBUG_CTRL_RNG_CIPHER 0x00000004
-
-#define REG_HDMI_HDCP_INT_CTRL 0x00000118
-#define HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT 0x00000001
-#define HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_ACK 0x00000002
-#define HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_MASK 0x00000004
-#define HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT 0x00000010
-#define HDMI_HDCP_INT_CTRL_AUTH_FAIL_ACK 0x00000020
-#define HDMI_HDCP_INT_CTRL_AUTH_FAIL_MASK 0x00000040
-#define HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK 0x00000080
-#define HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_INT 0x00000100
-#define HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_ACK 0x00000200
-#define HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_MASK 0x00000400
-#define HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_INT 0x00001000
-#define HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_ACK 0x00002000
-#define HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_MASK 0x00004000
-
-#define REG_HDMI_HDCP_LINK0_STATUS 0x0000011c
-#define HDMI_HDCP_LINK0_STATUS_AN_0_READY 0x00000100
-#define HDMI_HDCP_LINK0_STATUS_AN_1_READY 0x00000200
-#define HDMI_HDCP_LINK0_STATUS_RI_MATCHES 0x00001000
-#define HDMI_HDCP_LINK0_STATUS_V_MATCHES 0x00100000
-#define HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK 0x70000000
-#define HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT 28
-static inline uint32_t HDMI_HDCP_LINK0_STATUS_KEY_STATE(enum hdmi_hdcp_key_state val)
-{
- return ((val) << HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT) & HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK;
-}
-
-#define REG_HDMI_HDCP_DDC_CTRL_0 0x00000120
-#define HDMI_HDCP_DDC_CTRL_0_DISABLE 0x00000001
-
-#define REG_HDMI_HDCP_DDC_CTRL_1 0x00000124
-#define HDMI_HDCP_DDC_CTRL_1_FAILED_ACK 0x00000001
-
-#define REG_HDMI_HDCP_DDC_STATUS 0x00000128
-#define HDMI_HDCP_DDC_STATUS_XFER_REQ 0x00000010
-#define HDMI_HDCP_DDC_STATUS_XFER_DONE 0x00000400
-#define HDMI_HDCP_DDC_STATUS_ABORTED 0x00001000
-#define HDMI_HDCP_DDC_STATUS_TIMEOUT 0x00002000
-#define HDMI_HDCP_DDC_STATUS_NACK0 0x00004000
-#define HDMI_HDCP_DDC_STATUS_NACK1 0x00008000
-#define HDMI_HDCP_DDC_STATUS_FAILED 0x00010000
-
-#define REG_HDMI_HDCP_ENTROPY_CTRL0 0x0000012c
-
-#define REG_HDMI_HDCP_ENTROPY_CTRL1 0x0000025c
-
-#define REG_HDMI_HDCP_RESET 0x00000130
-#define HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE 0x00000001
-
-#define REG_HDMI_HDCP_RCVPORT_DATA0 0x00000134
-
-#define REG_HDMI_HDCP_RCVPORT_DATA1 0x00000138
-
-#define REG_HDMI_HDCP_RCVPORT_DATA2_0 0x0000013c
-
-#define REG_HDMI_HDCP_RCVPORT_DATA2_1 0x00000140
-
-#define REG_HDMI_HDCP_RCVPORT_DATA3 0x00000144
-
-#define REG_HDMI_HDCP_RCVPORT_DATA4 0x00000148
-
-#define REG_HDMI_HDCP_RCVPORT_DATA5 0x0000014c
-
-#define REG_HDMI_HDCP_RCVPORT_DATA6 0x00000150
-
-#define REG_HDMI_HDCP_RCVPORT_DATA7 0x00000154
-
-#define REG_HDMI_HDCP_RCVPORT_DATA8 0x00000158
-
-#define REG_HDMI_HDCP_RCVPORT_DATA9 0x0000015c
-
-#define REG_HDMI_HDCP_RCVPORT_DATA10 0x00000160
-
-#define REG_HDMI_HDCP_RCVPORT_DATA11 0x00000164
-
-#define REG_HDMI_HDCP_RCVPORT_DATA12 0x00000168
-
-#define REG_HDMI_VENSPEC_INFO0 0x0000016c
-
-#define REG_HDMI_VENSPEC_INFO1 0x00000170
-
-#define REG_HDMI_VENSPEC_INFO2 0x00000174
-
-#define REG_HDMI_VENSPEC_INFO3 0x00000178
-
-#define REG_HDMI_VENSPEC_INFO4 0x0000017c
-
-#define REG_HDMI_VENSPEC_INFO5 0x00000180
-
-#define REG_HDMI_VENSPEC_INFO6 0x00000184
-
-#define REG_HDMI_AUDIO_CFG 0x000001d0
-#define HDMI_AUDIO_CFG_ENGINE_ENABLE 0x00000001
-#define HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK 0x000000f0
-#define HDMI_AUDIO_CFG_FIFO_WATERMARK__SHIFT 4
-static inline uint32_t HDMI_AUDIO_CFG_FIFO_WATERMARK(uint32_t val)
-{
- return ((val) << HDMI_AUDIO_CFG_FIFO_WATERMARK__SHIFT) & HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK;
-}
-
-#define REG_HDMI_USEC_REFTIMER 0x00000208
-
-#define REG_HDMI_DDC_CTRL 0x0000020c
-#define HDMI_DDC_CTRL_GO 0x00000001
-#define HDMI_DDC_CTRL_SOFT_RESET 0x00000002
-#define HDMI_DDC_CTRL_SEND_RESET 0x00000004
-#define HDMI_DDC_CTRL_SW_STATUS_RESET 0x00000008
-#define HDMI_DDC_CTRL_TRANSACTION_CNT__MASK 0x00300000
-#define HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT 20
-static inline uint32_t HDMI_DDC_CTRL_TRANSACTION_CNT(uint32_t val)
-{
- return ((val) << HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT) & HDMI_DDC_CTRL_TRANSACTION_CNT__MASK;
-}
-
-#define REG_HDMI_DDC_ARBITRATION 0x00000210
-#define HDMI_DDC_ARBITRATION_HW_ARBITRATION 0x00000010
-
-#define REG_HDMI_DDC_INT_CTRL 0x00000214
-#define HDMI_DDC_INT_CTRL_SW_DONE_INT 0x00000001
-#define HDMI_DDC_INT_CTRL_SW_DONE_ACK 0x00000002
-#define HDMI_DDC_INT_CTRL_SW_DONE_MASK 0x00000004
-
-#define REG_HDMI_DDC_SW_STATUS 0x00000218
-#define HDMI_DDC_SW_STATUS_NACK0 0x00001000
-#define HDMI_DDC_SW_STATUS_NACK1 0x00002000
-#define HDMI_DDC_SW_STATUS_NACK2 0x00004000
-#define HDMI_DDC_SW_STATUS_NACK3 0x00008000
-
-#define REG_HDMI_DDC_HW_STATUS 0x0000021c
-#define HDMI_DDC_HW_STATUS_DONE 0x00000008
-
-#define REG_HDMI_DDC_SPEED 0x00000220
-#define HDMI_DDC_SPEED_THRESHOLD__MASK 0x00000003
-#define HDMI_DDC_SPEED_THRESHOLD__SHIFT 0
-static inline uint32_t HDMI_DDC_SPEED_THRESHOLD(uint32_t val)
-{
- return ((val) << HDMI_DDC_SPEED_THRESHOLD__SHIFT) & HDMI_DDC_SPEED_THRESHOLD__MASK;
-}
-#define HDMI_DDC_SPEED_PRESCALE__MASK 0xffff0000
-#define HDMI_DDC_SPEED_PRESCALE__SHIFT 16
-static inline uint32_t HDMI_DDC_SPEED_PRESCALE(uint32_t val)
-{
- return ((val) << HDMI_DDC_SPEED_PRESCALE__SHIFT) & HDMI_DDC_SPEED_PRESCALE__MASK;
-}
-
-#define REG_HDMI_DDC_SETUP 0x00000224
-#define HDMI_DDC_SETUP_TIMEOUT__MASK 0xff000000
-#define HDMI_DDC_SETUP_TIMEOUT__SHIFT 24
-static inline uint32_t HDMI_DDC_SETUP_TIMEOUT(uint32_t val)
-{
- return ((val) << HDMI_DDC_SETUP_TIMEOUT__SHIFT) & HDMI_DDC_SETUP_TIMEOUT__MASK;
-}
-
-static inline uint32_t REG_HDMI_I2C_TRANSACTION(uint32_t i0) { return 0x00000228 + 0x4*i0; }
-
-static inline uint32_t REG_HDMI_I2C_TRANSACTION_REG(uint32_t i0) { return 0x00000228 + 0x4*i0; }
-#define HDMI_I2C_TRANSACTION_REG_RW__MASK 0x00000001
-#define HDMI_I2C_TRANSACTION_REG_RW__SHIFT 0
-static inline uint32_t HDMI_I2C_TRANSACTION_REG_RW(enum hdmi_ddc_read_write val)
-{
- return ((val) << HDMI_I2C_TRANSACTION_REG_RW__SHIFT) & HDMI_I2C_TRANSACTION_REG_RW__MASK;
-}
-#define HDMI_I2C_TRANSACTION_REG_STOP_ON_NACK 0x00000100
-#define HDMI_I2C_TRANSACTION_REG_START 0x00001000
-#define HDMI_I2C_TRANSACTION_REG_STOP 0x00002000
-#define HDMI_I2C_TRANSACTION_REG_CNT__MASK 0x00ff0000
-#define HDMI_I2C_TRANSACTION_REG_CNT__SHIFT 16
-static inline uint32_t HDMI_I2C_TRANSACTION_REG_CNT(uint32_t val)
-{
- return ((val) << HDMI_I2C_TRANSACTION_REG_CNT__SHIFT) & HDMI_I2C_TRANSACTION_REG_CNT__MASK;
-}
-
-#define REG_HDMI_DDC_DATA 0x00000238
-#define HDMI_DDC_DATA_DATA_RW__MASK 0x00000001
-#define HDMI_DDC_DATA_DATA_RW__SHIFT 0
-static inline uint32_t HDMI_DDC_DATA_DATA_RW(enum hdmi_ddc_read_write val)
-{
- return ((val) << HDMI_DDC_DATA_DATA_RW__SHIFT) & HDMI_DDC_DATA_DATA_RW__MASK;
-}
-#define HDMI_DDC_DATA_DATA__MASK 0x0000ff00
-#define HDMI_DDC_DATA_DATA__SHIFT 8
-static inline uint32_t HDMI_DDC_DATA_DATA(uint32_t val)
-{
- return ((val) << HDMI_DDC_DATA_DATA__SHIFT) & HDMI_DDC_DATA_DATA__MASK;
-}
-#define HDMI_DDC_DATA_INDEX__MASK 0x00ff0000
-#define HDMI_DDC_DATA_INDEX__SHIFT 16
-static inline uint32_t HDMI_DDC_DATA_INDEX(uint32_t val)
-{
- return ((val) << HDMI_DDC_DATA_INDEX__SHIFT) & HDMI_DDC_DATA_INDEX__MASK;
-}
-#define HDMI_DDC_DATA_INDEX_WRITE 0x80000000
-
-#define REG_HDMI_HDCP_SHA_CTRL 0x0000023c
-
-#define REG_HDMI_HDCP_SHA_STATUS 0x00000240
-#define HDMI_HDCP_SHA_STATUS_BLOCK_DONE 0x00000001
-#define HDMI_HDCP_SHA_STATUS_COMP_DONE 0x00000010
-
-#define REG_HDMI_HDCP_SHA_DATA 0x00000244
-#define HDMI_HDCP_SHA_DATA_DONE 0x00000001
-
-#define REG_HDMI_HPD_INT_STATUS 0x00000250
-#define HDMI_HPD_INT_STATUS_INT 0x00000001
-#define HDMI_HPD_INT_STATUS_CABLE_DETECTED 0x00000002
-
-#define REG_HDMI_HPD_INT_CTRL 0x00000254
-#define HDMI_HPD_INT_CTRL_INT_ACK 0x00000001
-#define HDMI_HPD_INT_CTRL_INT_CONNECT 0x00000002
-#define HDMI_HPD_INT_CTRL_INT_EN 0x00000004
-#define HDMI_HPD_INT_CTRL_RX_INT_ACK 0x00000010
-#define HDMI_HPD_INT_CTRL_RX_INT_EN 0x00000020
-#define HDMI_HPD_INT_CTRL_RCV_PLUGIN_DET_MASK 0x00000200
-
-#define REG_HDMI_HPD_CTRL 0x00000258
-#define HDMI_HPD_CTRL_TIMEOUT__MASK 0x00001fff
-#define HDMI_HPD_CTRL_TIMEOUT__SHIFT 0
-static inline uint32_t HDMI_HPD_CTRL_TIMEOUT(uint32_t val)
-{
- return ((val) << HDMI_HPD_CTRL_TIMEOUT__SHIFT) & HDMI_HPD_CTRL_TIMEOUT__MASK;
-}
-#define HDMI_HPD_CTRL_ENABLE 0x10000000
-
-#define REG_HDMI_DDC_REF 0x0000027c
-#define HDMI_DDC_REF_REFTIMER_ENABLE 0x00010000
-#define HDMI_DDC_REF_REFTIMER__MASK 0x0000ffff
-#define HDMI_DDC_REF_REFTIMER__SHIFT 0
-static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val)
-{
- return ((val) << HDMI_DDC_REF_REFTIMER__SHIFT) & HDMI_DDC_REF_REFTIMER__MASK;
-}
-
-#define REG_HDMI_HDCP_SW_UPPER_AKSV 0x00000284
-
-#define REG_HDMI_HDCP_SW_LOWER_AKSV 0x00000288
-
-#define REG_HDMI_CEC_CTRL 0x0000028c
-
-#define REG_HDMI_CEC_WR_DATA 0x00000290
-
-#define REG_HDMI_CEC_CEC_RETRANSMIT 0x00000294
-
-#define REG_HDMI_CEC_STATUS 0x00000298
-
-#define REG_HDMI_CEC_INT 0x0000029c
-
-#define REG_HDMI_CEC_ADDR 0x000002a0
-
-#define REG_HDMI_CEC_TIME 0x000002a4
-
-#define REG_HDMI_CEC_REFTIMER 0x000002a8
-
-#define REG_HDMI_CEC_RD_DATA 0x000002ac
-
-#define REG_HDMI_CEC_RD_FILTER 0x000002b0
-
-#define REG_HDMI_ACTIVE_HSYNC 0x000002b4
-#define HDMI_ACTIVE_HSYNC_START__MASK 0x00001fff
-#define HDMI_ACTIVE_HSYNC_START__SHIFT 0
-static inline uint32_t HDMI_ACTIVE_HSYNC_START(uint32_t val)
-{
- return ((val) << HDMI_ACTIVE_HSYNC_START__SHIFT) & HDMI_ACTIVE_HSYNC_START__MASK;
-}
-#define HDMI_ACTIVE_HSYNC_END__MASK 0x0fff0000
-#define HDMI_ACTIVE_HSYNC_END__SHIFT 16
-static inline uint32_t HDMI_ACTIVE_HSYNC_END(uint32_t val)
-{
- return ((val) << HDMI_ACTIVE_HSYNC_END__SHIFT) & HDMI_ACTIVE_HSYNC_END__MASK;
-}
-
-#define REG_HDMI_ACTIVE_VSYNC 0x000002b8
-#define HDMI_ACTIVE_VSYNC_START__MASK 0x00001fff
-#define HDMI_ACTIVE_VSYNC_START__SHIFT 0
-static inline uint32_t HDMI_ACTIVE_VSYNC_START(uint32_t val)
-{
- return ((val) << HDMI_ACTIVE_VSYNC_START__SHIFT) & HDMI_ACTIVE_VSYNC_START__MASK;
-}
-#define HDMI_ACTIVE_VSYNC_END__MASK 0x1fff0000
-#define HDMI_ACTIVE_VSYNC_END__SHIFT 16
-static inline uint32_t HDMI_ACTIVE_VSYNC_END(uint32_t val)
-{
- return ((val) << HDMI_ACTIVE_VSYNC_END__SHIFT) & HDMI_ACTIVE_VSYNC_END__MASK;
-}
-
-#define REG_HDMI_VSYNC_ACTIVE_F2 0x000002bc
-#define HDMI_VSYNC_ACTIVE_F2_START__MASK 0x00001fff
-#define HDMI_VSYNC_ACTIVE_F2_START__SHIFT 0
-static inline uint32_t HDMI_VSYNC_ACTIVE_F2_START(uint32_t val)
-{
- return ((val) << HDMI_VSYNC_ACTIVE_F2_START__SHIFT) & HDMI_VSYNC_ACTIVE_F2_START__MASK;
-}
-#define HDMI_VSYNC_ACTIVE_F2_END__MASK 0x1fff0000
-#define HDMI_VSYNC_ACTIVE_F2_END__SHIFT 16
-static inline uint32_t HDMI_VSYNC_ACTIVE_F2_END(uint32_t val)
-{
- return ((val) << HDMI_VSYNC_ACTIVE_F2_END__SHIFT) & HDMI_VSYNC_ACTIVE_F2_END__MASK;
-}
-
-#define REG_HDMI_TOTAL 0x000002c0
-#define HDMI_TOTAL_H_TOTAL__MASK 0x00001fff
-#define HDMI_TOTAL_H_TOTAL__SHIFT 0
-static inline uint32_t HDMI_TOTAL_H_TOTAL(uint32_t val)
-{
- return ((val) << HDMI_TOTAL_H_TOTAL__SHIFT) & HDMI_TOTAL_H_TOTAL__MASK;
-}
-#define HDMI_TOTAL_V_TOTAL__MASK 0x1fff0000
-#define HDMI_TOTAL_V_TOTAL__SHIFT 16
-static inline uint32_t HDMI_TOTAL_V_TOTAL(uint32_t val)
-{
- return ((val) << HDMI_TOTAL_V_TOTAL__SHIFT) & HDMI_TOTAL_V_TOTAL__MASK;
-}
-
-#define REG_HDMI_VSYNC_TOTAL_F2 0x000002c4
-#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK 0x00001fff
-#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT 0
-static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val)
-{
- return ((val) << HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT) & HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK;
-}
-
-#define REG_HDMI_FRAME_CTRL 0x000002c8
-#define HDMI_FRAME_CTRL_RGB_MUX_SEL_BGR 0x00001000
-#define HDMI_FRAME_CTRL_VSYNC_LOW 0x10000000
-#define HDMI_FRAME_CTRL_HSYNC_LOW 0x20000000
-#define HDMI_FRAME_CTRL_INTERLACED_EN 0x80000000
-
-#define REG_HDMI_AUD_INT 0x000002cc
-#define HDMI_AUD_INT_AUD_FIFO_URUN_INT 0x00000001
-#define HDMI_AUD_INT_AUD_FIFO_URAN_MASK 0x00000002
-#define HDMI_AUD_INT_AUD_SAM_DROP_INT 0x00000004
-#define HDMI_AUD_INT_AUD_SAM_DROP_MASK 0x00000008
-
-#define REG_HDMI_PHY_CTRL 0x000002d4
-#define HDMI_PHY_CTRL_SW_RESET_PLL 0x00000001
-#define HDMI_PHY_CTRL_SW_RESET_PLL_LOW 0x00000002
-#define HDMI_PHY_CTRL_SW_RESET 0x00000004
-#define HDMI_PHY_CTRL_SW_RESET_LOW 0x00000008
-
-#define REG_HDMI_CEC_WR_RANGE 0x000002dc
-
-#define REG_HDMI_CEC_RD_RANGE 0x000002e0
-
-#define REG_HDMI_VERSION 0x000002e4
-
-#define REG_HDMI_CEC_COMPL_CTL 0x00000360
-
-#define REG_HDMI_CEC_RD_START_RANGE 0x00000364
-
-#define REG_HDMI_CEC_RD_TOTAL_RANGE 0x00000368
-
-#define REG_HDMI_CEC_RD_ERR_RESP_LO 0x0000036c
-
-#define REG_HDMI_CEC_WR_CHECK_CONFIG 0x00000370
-
-#define REG_HDMI_8x60_PHY_REG0 0x00000000
-#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK 0x0000001c
-#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT 2
-static inline uint32_t HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(uint32_t val)
-{
- return ((val) << HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT) & HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK;
-}
-
-#define REG_HDMI_8x60_PHY_REG1 0x00000004
-#define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK 0x000000f0
-#define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT 4
-static inline uint32_t HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(uint32_t val)
-{
- return ((val) << HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT) & HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK;
-}
-#define HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK 0x0000000f
-#define HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT 0
-static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val)
-{
- return ((val) << HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT) & HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK;
-}
-
-#define REG_HDMI_8x60_PHY_REG2 0x00000008
-#define HDMI_8x60_PHY_REG2_PD_DESER 0x00000001
-#define HDMI_8x60_PHY_REG2_PD_DRIVE_1 0x00000002
-#define HDMI_8x60_PHY_REG2_PD_DRIVE_2 0x00000004
-#define HDMI_8x60_PHY_REG2_PD_DRIVE_3 0x00000008
-#define HDMI_8x60_PHY_REG2_PD_DRIVE_4 0x00000010
-#define HDMI_8x60_PHY_REG2_PD_PLL 0x00000020
-#define HDMI_8x60_PHY_REG2_PD_PWRGEN 0x00000040
-#define HDMI_8x60_PHY_REG2_RCV_SENSE_EN 0x00000080
-
-#define REG_HDMI_8x60_PHY_REG3 0x0000000c
-#define HDMI_8x60_PHY_REG3_PLL_ENABLE 0x00000001
-
-#define REG_HDMI_8x60_PHY_REG4 0x00000010
-
-#define REG_HDMI_8x60_PHY_REG5 0x00000014
-
-#define REG_HDMI_8x60_PHY_REG6 0x00000018
-
-#define REG_HDMI_8x60_PHY_REG7 0x0000001c
-
-#define REG_HDMI_8x60_PHY_REG8 0x00000020
-
-#define REG_HDMI_8x60_PHY_REG9 0x00000024
-
-#define REG_HDMI_8x60_PHY_REG10 0x00000028
-
-#define REG_HDMI_8x60_PHY_REG11 0x0000002c
-
-#define REG_HDMI_8x60_PHY_REG12 0x00000030
-#define HDMI_8x60_PHY_REG12_RETIMING_EN 0x00000001
-#define HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN 0x00000002
-#define HDMI_8x60_PHY_REG12_FORCE_LOCK 0x00000010
-
-#define REG_HDMI_8960_PHY_REG0 0x00000000
-
-#define REG_HDMI_8960_PHY_REG1 0x00000004
-
-#define REG_HDMI_8960_PHY_REG2 0x00000008
-
-#define REG_HDMI_8960_PHY_REG3 0x0000000c
-
-#define REG_HDMI_8960_PHY_REG4 0x00000010
-
-#define REG_HDMI_8960_PHY_REG5 0x00000014
-
-#define REG_HDMI_8960_PHY_REG6 0x00000018
-
-#define REG_HDMI_8960_PHY_REG7 0x0000001c
-
-#define REG_HDMI_8960_PHY_REG8 0x00000020
-
-#define REG_HDMI_8960_PHY_REG9 0x00000024
-
-#define REG_HDMI_8960_PHY_REG10 0x00000028
-
-#define REG_HDMI_8960_PHY_REG11 0x0000002c
-
-#define REG_HDMI_8960_PHY_REG12 0x00000030
-#define HDMI_8960_PHY_REG12_SW_RESET 0x00000020
-#define HDMI_8960_PHY_REG12_PWRDN_B 0x00000080
-
-#define REG_HDMI_8960_PHY_REG_BIST_CFG 0x00000034
-
-#define REG_HDMI_8960_PHY_DEBUG_BUS_SEL 0x00000038
-
-#define REG_HDMI_8960_PHY_REG_MISC0 0x0000003c
-
-#define REG_HDMI_8960_PHY_REG13 0x00000040
-
-#define REG_HDMI_8960_PHY_REG14 0x00000044
-
-#define REG_HDMI_8960_PHY_REG15 0x00000048
-
-#define REG_HDMI_8960_PHY_PLL_REFCLK_CFG 0x00000000
-
-#define REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG 0x00000004
-
-#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 0x00000008
-
-#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 0x0000000c
-
-#define REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG 0x00000010
-
-#define REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG 0x00000014
-
-#define REG_HDMI_8960_PHY_PLL_PWRDN_B 0x00000018
-#define HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL 0x00000002
-#define HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B 0x00000008
-
-#define REG_HDMI_8960_PHY_PLL_SDM_CFG0 0x0000001c
-
-#define REG_HDMI_8960_PHY_PLL_SDM_CFG1 0x00000020
-
-#define REG_HDMI_8960_PHY_PLL_SDM_CFG2 0x00000024
-
-#define REG_HDMI_8960_PHY_PLL_SDM_CFG3 0x00000028
-
-#define REG_HDMI_8960_PHY_PLL_SDM_CFG4 0x0000002c
-
-#define REG_HDMI_8960_PHY_PLL_SSC_CFG0 0x00000030
-
-#define REG_HDMI_8960_PHY_PLL_SSC_CFG1 0x00000034
-
-#define REG_HDMI_8960_PHY_PLL_SSC_CFG2 0x00000038
-
-#define REG_HDMI_8960_PHY_PLL_SSC_CFG3 0x0000003c
-
-#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 0x00000040
-
-#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 0x00000044
-
-#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 0x00000048
-
-#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 0x0000004c
-
-#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 0x00000050
-
-#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 0x00000054
-
-#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 0x00000058
-
-#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 0x0000005c
-
-#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 0x00000060
-
-#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 0x00000064
-
-#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 0x00000068
-
-#define REG_HDMI_8960_PHY_PLL_DEBUG_SEL 0x0000006c
-
-#define REG_HDMI_8960_PHY_PLL_MISC0 0x00000070
-
-#define REG_HDMI_8960_PHY_PLL_MISC1 0x00000074
-
-#define REG_HDMI_8960_PHY_PLL_MISC2 0x00000078
-
-#define REG_HDMI_8960_PHY_PLL_MISC3 0x0000007c
-
-#define REG_HDMI_8960_PHY_PLL_MISC4 0x00000080
-
-#define REG_HDMI_8960_PHY_PLL_MISC5 0x00000084
-
-#define REG_HDMI_8960_PHY_PLL_MISC6 0x00000088
-
-#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS0 0x0000008c
-
-#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS1 0x00000090
-
-#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS2 0x00000094
-
-#define REG_HDMI_8960_PHY_PLL_STATUS0 0x00000098
-#define HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK 0x00000001
-
-#define REG_HDMI_8960_PHY_PLL_STATUS1 0x0000009c
-
-#define REG_HDMI_8x74_ANA_CFG0 0x00000000
-
-#define REG_HDMI_8x74_ANA_CFG1 0x00000004
-
-#define REG_HDMI_8x74_ANA_CFG2 0x00000008
-
-#define REG_HDMI_8x74_ANA_CFG3 0x0000000c
-
-#define REG_HDMI_8x74_PD_CTRL0 0x00000010
-
-#define REG_HDMI_8x74_PD_CTRL1 0x00000014
-
-#define REG_HDMI_8x74_GLB_CFG 0x00000018
-
-#define REG_HDMI_8x74_DCC_CFG0 0x0000001c
-
-#define REG_HDMI_8x74_DCC_CFG1 0x00000020
-
-#define REG_HDMI_8x74_TXCAL_CFG0 0x00000024
-
-#define REG_HDMI_8x74_TXCAL_CFG1 0x00000028
-
-#define REG_HDMI_8x74_TXCAL_CFG2 0x0000002c
-
-#define REG_HDMI_8x74_TXCAL_CFG3 0x00000030
-
-#define REG_HDMI_8x74_BIST_CFG0 0x00000034
-
-#define REG_HDMI_8x74_BIST_PATN0 0x0000003c
-
-#define REG_HDMI_8x74_BIST_PATN1 0x00000040
-
-#define REG_HDMI_8x74_BIST_PATN2 0x00000044
-
-#define REG_HDMI_8x74_BIST_PATN3 0x00000048
-
-#define REG_HDMI_8x74_STATUS 0x0000005c
-
-#define REG_HDMI_28nm_PHY_PLL_REFCLK_CFG 0x00000000
-
-#define REG_HDMI_28nm_PHY_PLL_POSTDIV1_CFG 0x00000004
-
-#define REG_HDMI_28nm_PHY_PLL_CHGPUMP_CFG 0x00000008
-
-#define REG_HDMI_28nm_PHY_PLL_VCOLPF_CFG 0x0000000c
-
-#define REG_HDMI_28nm_PHY_PLL_VREG_CFG 0x00000010
-
-#define REG_HDMI_28nm_PHY_PLL_PWRGEN_CFG 0x00000014
-
-#define REG_HDMI_28nm_PHY_PLL_DMUX_CFG 0x00000018
-
-#define REG_HDMI_28nm_PHY_PLL_AMUX_CFG 0x0000001c
-
-#define REG_HDMI_28nm_PHY_PLL_GLB_CFG 0x00000020
-#define HDMI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B 0x00000001
-#define HDMI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B 0x00000002
-#define HDMI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B 0x00000004
-#define HDMI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE 0x00000008
-
-#define REG_HDMI_28nm_PHY_PLL_POSTDIV2_CFG 0x00000024
-
-#define REG_HDMI_28nm_PHY_PLL_POSTDIV3_CFG 0x00000028
-
-#define REG_HDMI_28nm_PHY_PLL_LPFR_CFG 0x0000002c
-
-#define REG_HDMI_28nm_PHY_PLL_LPFC1_CFG 0x00000030
-
-#define REG_HDMI_28nm_PHY_PLL_LPFC2_CFG 0x00000034
-
-#define REG_HDMI_28nm_PHY_PLL_SDM_CFG0 0x00000038
-
-#define REG_HDMI_28nm_PHY_PLL_SDM_CFG1 0x0000003c
-
-#define REG_HDMI_28nm_PHY_PLL_SDM_CFG2 0x00000040
-
-#define REG_HDMI_28nm_PHY_PLL_SDM_CFG3 0x00000044
-
-#define REG_HDMI_28nm_PHY_PLL_SDM_CFG4 0x00000048
-
-#define REG_HDMI_28nm_PHY_PLL_SSC_CFG0 0x0000004c
-
-#define REG_HDMI_28nm_PHY_PLL_SSC_CFG1 0x00000050
-
-#define REG_HDMI_28nm_PHY_PLL_SSC_CFG2 0x00000054
-
-#define REG_HDMI_28nm_PHY_PLL_SSC_CFG3 0x00000058
-
-#define REG_HDMI_28nm_PHY_PLL_LKDET_CFG0 0x0000005c
-
-#define REG_HDMI_28nm_PHY_PLL_LKDET_CFG1 0x00000060
-
-#define REG_HDMI_28nm_PHY_PLL_LKDET_CFG2 0x00000064
-
-#define REG_HDMI_28nm_PHY_PLL_TEST_CFG 0x00000068
-#define HDMI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET 0x00000001
-
-#define REG_HDMI_28nm_PHY_PLL_CAL_CFG0 0x0000006c
-
-#define REG_HDMI_28nm_PHY_PLL_CAL_CFG1 0x00000070
-
-#define REG_HDMI_28nm_PHY_PLL_CAL_CFG2 0x00000074
-
-#define REG_HDMI_28nm_PHY_PLL_CAL_CFG3 0x00000078
-
-#define REG_HDMI_28nm_PHY_PLL_CAL_CFG4 0x0000007c
-
-#define REG_HDMI_28nm_PHY_PLL_CAL_CFG5 0x00000080
-
-#define REG_HDMI_28nm_PHY_PLL_CAL_CFG6 0x00000084
-
-#define REG_HDMI_28nm_PHY_PLL_CAL_CFG7 0x00000088
-
-#define REG_HDMI_28nm_PHY_PLL_CAL_CFG8 0x0000008c
-
-#define REG_HDMI_28nm_PHY_PLL_CAL_CFG9 0x00000090
-
-#define REG_HDMI_28nm_PHY_PLL_CAL_CFG10 0x00000094
-
-#define REG_HDMI_28nm_PHY_PLL_CAL_CFG11 0x00000098
-
-#define REG_HDMI_28nm_PHY_PLL_EFUSE_CFG 0x0000009c
-
-#define REG_HDMI_28nm_PHY_PLL_DEBUG_BUS_SEL 0x000000a0
-
-#define REG_HDMI_28nm_PHY_PLL_STATUS 0x000000c0
-
-#define REG_HDMI_8996_PHY_CFG 0x00000000
-
-#define REG_HDMI_8996_PHY_PD_CTL 0x00000004
-
-#define REG_HDMI_8996_PHY_MODE 0x00000008
-
-#define REG_HDMI_8996_PHY_MISR_CLEAR 0x0000000c
-
-#define REG_HDMI_8996_PHY_TX0_TX1_BIST_CFG0 0x00000010
-
-#define REG_HDMI_8996_PHY_TX0_TX1_BIST_CFG1 0x00000014
-
-#define REG_HDMI_8996_PHY_TX0_TX1_PRBS_SEED_BYTE0 0x00000018
-
-#define REG_HDMI_8996_PHY_TX0_TX1_PRBS_SEED_BYTE1 0x0000001c
-
-#define REG_HDMI_8996_PHY_TX0_TX1_BIST_PATTERN0 0x00000020
-
-#define REG_HDMI_8996_PHY_TX0_TX1_BIST_PATTERN1 0x00000024
-
-#define REG_HDMI_8996_PHY_TX2_TX3_BIST_CFG0 0x00000028
-
-#define REG_HDMI_8996_PHY_TX2_TX3_BIST_CFG1 0x0000002c
-
-#define REG_HDMI_8996_PHY_TX2_TX3_PRBS_SEED_BYTE0 0x00000030
-
-#define REG_HDMI_8996_PHY_TX2_TX3_PRBS_SEED_BYTE1 0x00000034
-
-#define REG_HDMI_8996_PHY_TX2_TX3_BIST_PATTERN0 0x00000038
-
-#define REG_HDMI_8996_PHY_TX2_TX3_BIST_PATTERN1 0x0000003c
-
-#define REG_HDMI_8996_PHY_DEBUG_BUS_SEL 0x00000040
-
-#define REG_HDMI_8996_PHY_TXCAL_CFG0 0x00000044
-
-#define REG_HDMI_8996_PHY_TXCAL_CFG1 0x00000048
-
-#define REG_HDMI_8996_PHY_TX0_TX1_LANE_CTL 0x0000004c
-
-#define REG_HDMI_8996_PHY_TX2_TX3_LANE_CTL 0x00000050
-
-#define REG_HDMI_8996_PHY_LANE_BIST_CONFIG 0x00000054
-
-#define REG_HDMI_8996_PHY_CLOCK 0x00000058
-
-#define REG_HDMI_8996_PHY_MISC1 0x0000005c
-
-#define REG_HDMI_8996_PHY_MISC2 0x00000060
-
-#define REG_HDMI_8996_PHY_TX0_TX1_BIST_STATUS0 0x00000064
-
-#define REG_HDMI_8996_PHY_TX0_TX1_BIST_STATUS1 0x00000068
-
-#define REG_HDMI_8996_PHY_TX0_TX1_BIST_STATUS2 0x0000006c
-
-#define REG_HDMI_8996_PHY_TX2_TX3_BIST_STATUS0 0x00000070
-
-#define REG_HDMI_8996_PHY_TX2_TX3_BIST_STATUS1 0x00000074
-
-#define REG_HDMI_8996_PHY_TX2_TX3_BIST_STATUS2 0x00000078
-
-#define REG_HDMI_8996_PHY_PRE_MISR_STATUS0 0x0000007c
-
-#define REG_HDMI_8996_PHY_PRE_MISR_STATUS1 0x00000080
-
-#define REG_HDMI_8996_PHY_PRE_MISR_STATUS2 0x00000084
-
-#define REG_HDMI_8996_PHY_PRE_MISR_STATUS3 0x00000088
-
-#define REG_HDMI_8996_PHY_POST_MISR_STATUS0 0x0000008c
-
-#define REG_HDMI_8996_PHY_POST_MISR_STATUS1 0x00000090
-
-#define REG_HDMI_8996_PHY_POST_MISR_STATUS2 0x00000094
-
-#define REG_HDMI_8996_PHY_POST_MISR_STATUS3 0x00000098
-
-#define REG_HDMI_8996_PHY_STATUS 0x0000009c
-
-#define REG_HDMI_8996_PHY_MISC3_STATUS 0x000000a0
-
-#define REG_HDMI_8996_PHY_MISC4_STATUS 0x000000a4
-
-#define REG_HDMI_8996_PHY_DEBUG_BUS0 0x000000a8
-
-#define REG_HDMI_8996_PHY_DEBUG_BUS1 0x000000ac
-
-#define REG_HDMI_8996_PHY_DEBUG_BUS2 0x000000b0
-
-#define REG_HDMI_8996_PHY_DEBUG_BUS3 0x000000b4
-
-#define REG_HDMI_8996_PHY_PHY_REVISION_ID0 0x000000b8
-
-#define REG_HDMI_8996_PHY_PHY_REVISION_ID1 0x000000bc
-
-#define REG_HDMI_8996_PHY_PHY_REVISION_ID2 0x000000c0
-
-#define REG_HDMI_8996_PHY_PHY_REVISION_ID3 0x000000c4
-
-#define REG_HDMI_PHY_QSERDES_COM_ATB_SEL1 0x00000000
-
-#define REG_HDMI_PHY_QSERDES_COM_ATB_SEL2 0x00000004
-
-#define REG_HDMI_PHY_QSERDES_COM_FREQ_UPDATE 0x00000008
-
-#define REG_HDMI_PHY_QSERDES_COM_BG_TIMER 0x0000000c
-
-#define REG_HDMI_PHY_QSERDES_COM_SSC_EN_CENTER 0x00000010
-
-#define REG_HDMI_PHY_QSERDES_COM_SSC_ADJ_PER1 0x00000014
-
-#define REG_HDMI_PHY_QSERDES_COM_SSC_ADJ_PER2 0x00000018
-
-#define REG_HDMI_PHY_QSERDES_COM_SSC_PER1 0x0000001c
-
-#define REG_HDMI_PHY_QSERDES_COM_SSC_PER2 0x00000020
-
-#define REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE1 0x00000024
-
-#define REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE2 0x00000028
-
-#define REG_HDMI_PHY_QSERDES_COM_POST_DIV 0x0000002c
-
-#define REG_HDMI_PHY_QSERDES_COM_POST_DIV_MUX 0x00000030
-
-#define REG_HDMI_PHY_QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x00000034
-
-#define REG_HDMI_PHY_QSERDES_COM_CLK_ENABLE1 0x00000038
-
-#define REG_HDMI_PHY_QSERDES_COM_SYS_CLK_CTRL 0x0000003c
-
-#define REG_HDMI_PHY_QSERDES_COM_SYSCLK_BUF_ENABLE 0x00000040
-
-#define REG_HDMI_PHY_QSERDES_COM_PLL_EN 0x00000044
-
-#define REG_HDMI_PHY_QSERDES_COM_PLL_IVCO 0x00000048
-
-#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE0 0x0000004c
-
-#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE0 0x00000050
-
-#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE0 0x00000054
-
-#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE1 0x00000058
-
-#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE1 0x0000005c
-
-#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE1 0x00000060
-
-#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE2 0x00000064
-
-#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD0 0x00000064
-
-#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE2 0x00000068
-
-#define REG_HDMI_PHY_QSERDES_COM_EP_CLOCK_DETECT_CTRL 0x00000068
-
-#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE2 0x0000006c
-
-#define REG_HDMI_PHY_QSERDES_COM_SYSCLK_DET_COMP_STATUS 0x0000006c
-
-#define REG_HDMI_PHY_QSERDES_COM_BG_TRIM 0x00000070
-
-#define REG_HDMI_PHY_QSERDES_COM_CLK_EP_DIV 0x00000074
-
-#define REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE0 0x00000078
-
-#define REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE1 0x0000007c
-
-#define REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE2 0x00000080
-
-#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD1 0x00000080
-
-#define REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE0 0x00000084
-
-#define REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE1 0x00000088
-
-#define REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE2 0x0000008c
-
-#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD2 0x0000008c
-
-#define REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE0 0x00000090
-
-#define REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE1 0x00000094
-
-#define REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE2 0x00000098
-
-#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD3 0x00000098
-
-#define REG_HDMI_PHY_QSERDES_COM_PLL_CNTRL 0x0000009c
-
-#define REG_HDMI_PHY_QSERDES_COM_PHASE_SEL_CTRL 0x000000a0
-
-#define REG_HDMI_PHY_QSERDES_COM_PHASE_SEL_DC 0x000000a4
-
-#define REG_HDMI_PHY_QSERDES_COM_CORE_CLK_IN_SYNC_SEL 0x000000a8
-
-#define REG_HDMI_PHY_QSERDES_COM_BIAS_EN_CTRL_BY_PSM 0x000000a8
-
-#define REG_HDMI_PHY_QSERDES_COM_SYSCLK_EN_SEL 0x000000ac
-
-#define REG_HDMI_PHY_QSERDES_COM_CML_SYSCLK_SEL 0x000000b0
-
-#define REG_HDMI_PHY_QSERDES_COM_RESETSM_CNTRL 0x000000b4
-
-#define REG_HDMI_PHY_QSERDES_COM_RESETSM_CNTRL2 0x000000b8
-
-#define REG_HDMI_PHY_QSERDES_COM_RESTRIM_CTRL 0x000000bc
-
-#define REG_HDMI_PHY_QSERDES_COM_RESTRIM_CTRL2 0x000000c0
-
-#define REG_HDMI_PHY_QSERDES_COM_RESCODE_DIV_NUM 0x000000c4
-
-#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP_EN 0x000000c8
-
-#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP_CFG 0x000000cc
-
-#define REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE0 0x000000d0
-
-#define REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE1 0x000000d4
-
-#define REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE2 0x000000d8
-
-#define REG_HDMI_PHY_QSERDES_COM_VCOCAL_DEADMAN_CTRL 0x000000d8
-
-#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE0 0x000000dc
-
-#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE0 0x000000e0
-
-#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE0 0x000000e4
-
-#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE1 0x000000e8
-
-#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE1 0x000000ec
-
-#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE1 0x000000f0
-
-#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE2 0x000000f4
-
-#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MINVAL1 0x000000f4
-
-#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE2 0x000000f8
-
-#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MINVAL2 0x000000f8
-
-#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE2 0x000000fc
-
-#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD4 0x000000fc
-
-#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_INITVAL 0x00000100
-
-#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_EN 0x00000104
-
-#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x00000108
-
-#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x0000010c
-
-#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE1 0x00000110
-
-#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE1 0x00000114
-
-#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE2 0x00000118
-
-#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAXVAL1 0x00000118
-
-#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE2 0x0000011c
-
-#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAXVAL2 0x0000011c
-
-#define REG_HDMI_PHY_QSERDES_COM_RES_TRIM_CONTROL2 0x00000120
-
-#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_CTRL 0x00000124
-
-#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAP 0x00000128
-
-#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE1_MODE0 0x0000012c
-
-#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE2_MODE0 0x00000130
-
-#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE1_MODE1 0x00000134
-
-#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE2_MODE1 0x00000138
-
-#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE1_MODE2 0x0000013c
-
-#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_INITVAL1 0x0000013c
-
-#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE2_MODE2 0x00000140
-
-#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_INITVAL2 0x00000140
-
-#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_TIMER1 0x00000144
-
-#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_TIMER2 0x00000148
-
-#define REG_HDMI_PHY_QSERDES_COM_SAR 0x0000014c
-
-#define REG_HDMI_PHY_QSERDES_COM_SAR_CLK 0x00000150
-
-#define REG_HDMI_PHY_QSERDES_COM_SAR_CODE_OUT_STATUS 0x00000154
-
-#define REG_HDMI_PHY_QSERDES_COM_SAR_CODE_READY_STATUS 0x00000158
-
-#define REG_HDMI_PHY_QSERDES_COM_CMN_STATUS 0x0000015c
-
-#define REG_HDMI_PHY_QSERDES_COM_RESET_SM_STATUS 0x00000160
-
-#define REG_HDMI_PHY_QSERDES_COM_RESTRIM_CODE_STATUS 0x00000164
-
-#define REG_HDMI_PHY_QSERDES_COM_PLLCAL_CODE1_STATUS 0x00000168
-
-#define REG_HDMI_PHY_QSERDES_COM_PLLCAL_CODE2_STATUS 0x0000016c
-
-#define REG_HDMI_PHY_QSERDES_COM_BG_CTRL 0x00000170
-
-#define REG_HDMI_PHY_QSERDES_COM_CLK_SELECT 0x00000174
-
-#define REG_HDMI_PHY_QSERDES_COM_HSCLK_SEL 0x00000178
-
-#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_BINCODE_STATUS 0x0000017c
-
-#define REG_HDMI_PHY_QSERDES_COM_PLL_ANALOG 0x00000180
-
-#define REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV 0x00000184
-
-#define REG_HDMI_PHY_QSERDES_COM_SW_RESET 0x00000188
-
-#define REG_HDMI_PHY_QSERDES_COM_CORE_CLK_EN 0x0000018c
-
-#define REG_HDMI_PHY_QSERDES_COM_C_READY_STATUS 0x00000190
-
-#define REG_HDMI_PHY_QSERDES_COM_CMN_CONFIG 0x00000194
-
-#define REG_HDMI_PHY_QSERDES_COM_CMN_RATE_OVERRIDE 0x00000198
-
-#define REG_HDMI_PHY_QSERDES_COM_SVS_MODE_CLK_SEL 0x0000019c
-
-#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS0 0x000001a0
-
-#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS1 0x000001a4
-
-#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS2 0x000001a8
-
-#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS3 0x000001ac
-
-#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS_SEL 0x000001b0
-
-#define REG_HDMI_PHY_QSERDES_COM_CMN_MISC1 0x000001b4
-
-#define REG_HDMI_PHY_QSERDES_COM_CMN_MISC2 0x000001b8
-
-#define REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV_MODE1 0x000001bc
-
-#define REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV_MODE2 0x000001c0
-
-#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD5 0x000001c4
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_MODE_LANENO 0x00000000
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_INVERT 0x00000004
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_CLKBUF_ENABLE 0x00000008
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_CMN_CONTROL_ONE 0x0000000c
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_CMN_CONTROL_TWO 0x00000010
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_CMN_CONTROL_THREE 0x00000014
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_TX_EMP_POST1_LVL 0x00000018
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_TX_POST2_EMPH 0x0000001c
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_TX_BOOST_LVL_UP_DN 0x00000020
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_HP_PD_ENABLES 0x00000024
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_TX_IDLE_LVL_LARGE_AMP 0x00000028
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL 0x0000002c
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL_OFFSET 0x00000030
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_RESET_TSYNC_EN 0x00000034
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_PRE_STALL_LDO_BOOST_EN 0x00000038
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_TX_BAND 0x0000003c
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_SLEW_CNTL 0x00000040
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_INTERFACE_SELECT 0x00000044
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_LPB_EN 0x00000048
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_TX 0x0000004c
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_RX 0x00000050
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_OFFSET 0x00000054
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_PERL_LENGTH1 0x00000058
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_PERL_LENGTH2 0x0000005c
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_SERDES_BYP_EN_OUT 0x00000060
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_DEBUG_BUS_SEL 0x00000064
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN 0x00000068
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_TX_POL_INV 0x0000006c
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_PARRATE_REC_DETECT_IDLE_EN 0x00000070
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN1 0x00000074
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN2 0x00000078
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN3 0x0000007c
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN4 0x00000080
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN5 0x00000084
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN6 0x00000088
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN7 0x0000008c
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN8 0x00000090
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_LANE_MODE 0x00000094
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_IDAC_CAL_LANE_MODE 0x00000098
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_IDAC_CAL_LANE_MODE_CONFIGURATION 0x0000009c
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_ATB_SEL1 0x000000a0
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_ATB_SEL2 0x000000a4
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_RCV_DETECT_LVL 0x000000a8
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_RCV_DETECT_LVL_2 0x000000ac
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED1 0x000000b0
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED2 0x000000b4
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED3 0x000000b8
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED4 0x000000bc
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_RESET_GEN 0x000000c0
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_RESET_GEN_MUXES 0x000000c4
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_TRAN_DRVR_EMP_EN 0x000000c8
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_TX_INTERFACE_MODE 0x000000cc
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_CTRL 0x000000d0
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_ENCODED_OR_DATA 0x000000d4
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_1_DIVIDER_BAND2 0x000000d8
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_2_DIVIDER_BAND2 0x000000dc
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_3_DIVIDER_BAND2 0x000000e0
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_4_DIVIDER_BAND2 0x000000e4
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_1_DIVIDER_BAND0_1 0x000000e8
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_2_DIVIDER_BAND0_1 0x000000ec
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_3_DIVIDER_BAND0_1 0x000000f0
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_4_DIVIDER_BAND0_1 0x000000f4
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL1 0x000000f8
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL2 0x000000fc
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_TX_ALOG_INTF_OBSV_CNTL 0x00000100
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_STATUS 0x00000104
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_ERROR_COUNT1 0x00000108
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_ERROR_COUNT2 0x0000010c
-
-#define REG_HDMI_PHY_QSERDES_TX_LX_TX_ALOG_INTF_OBSV 0x00000110
-
-
-#endif /* HDMI_XML */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
index 4dd055416620..8c8d80b59573 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -86,18 +86,18 @@ static inline struct hdmi_phy *pll_get_phy(struct hdmi_pll_8996 *pll)
static inline void hdmi_pll_write(struct hdmi_pll_8996 *pll, int offset,
u32 data)
{
- msm_writel(data, pll->mmio_qserdes_com + offset);
+ writel(data, pll->mmio_qserdes_com + offset);
}
static inline u32 hdmi_pll_read(struct hdmi_pll_8996 *pll, int offset)
{
- return msm_readl(pll->mmio_qserdes_com + offset);
+ return readl(pll->mmio_qserdes_com + offset);
}
static inline void hdmi_tx_chan_write(struct hdmi_pll_8996 *pll, int channel,
int offset, int data)
{
- msm_writel(data, pll->mmio_qserdes_tx[channel] + offset);
+ writel(data, pll->mmio_qserdes_tx[channel] + offset);
}
static inline u32 pll_get_cpctrl(u64 frac_start, unsigned long ref_clk,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
index cb35a297afbd..83c8781fcc3f 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
@@ -236,12 +236,12 @@ static const struct pll_rate freqtbl[] = {
static inline void pll_write(struct hdmi_pll_8960 *pll, u32 reg, u32 data)
{
- msm_writel(data, pll->mmio + reg);
+ writel(data, pll->mmio + reg);
}
static inline u32 pll_read(struct hdmi_pll_8960 *pll, u32 reg)
{
- return msm_readl(pll->mmio + reg);
+ return readl(pll->mmio + reg);
}
static inline struct hdmi_phy *pll_get_phy(struct hdmi_pll_8960 *pll)
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
deleted file mode 100644
index 498801526695..000000000000
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ /dev/null
@@ -1,61 +0,0 @@
-#ifndef QFPROM_XML
-#define QFPROM_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
-
-The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42)
-
-Copyright (C) 2013-2022 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/
-
-
-#define REG_QFPROM_CONFIG_ROW0_LSB 0x00000238
-#define QFPROM_CONFIG_ROW0_LSB_HDMI_DISABLE 0x00200000
-#define QFPROM_CONFIG_ROW0_LSB_HDCP_DISABLE 0x00400000
-
-
-#endif /* QFPROM_XML */
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 97790faffd23..9c33f4e3f822 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -17,8 +17,9 @@
#include "msm_drv.h"
#include "msm_debugfs.h"
+#include "msm_gem.h"
+#include "msm_gpu.h"
#include "msm_kms.h"
-#include "adreno/adreno_gpu.h"
/*
* MSM driver version:
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 65f213660452..912ebaa5df84 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -239,9 +239,7 @@ struct msm_drm_private {
bool disable_err_irq;
};
-struct msm_format {
- uint32_t pixel_format;
-};
+const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier);
struct msm_pending_timer;
@@ -488,15 +486,12 @@ void __iomem *msm_ioremap_mdss(struct platform_device *mdss_pdev,
struct icc_path *msm_icc_get(struct device *dev, const char *name);
-#define msm_writel(data, addr) writel((data), (addr))
-#define msm_readl(addr) readl((addr))
-
static inline void msm_rmw(void __iomem *addr, u32 mask, u32 or)
{
- u32 val = msm_readl(addr);
+ u32 val = readl(addr);
val &= ~mask;
- msm_writel(val | or, addr);
+ writel(val | or, addr);
}
/**
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index e3f61c39df69..09268e416843 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -89,7 +89,7 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
for (i = 0; i < n; i++) {
ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &msm_fb->iova[i]);
- drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)",
+ drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)\n",
fb->base.id, i, msm_fb->iova[i], ret);
if (ret)
return ret;
@@ -176,16 +176,16 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
const struct msm_format *format;
int ret, i, n;
- drm_dbg_state(dev, "create framebuffer: mode_cmd=%p (%dx%d@%4.4s)",
- mode_cmd, mode_cmd->width, mode_cmd->height,
- (char *)&mode_cmd->pixel_format);
+ drm_dbg_state(dev, "create framebuffer: mode_cmd=%p (%dx%d@%p4cc)\n",
+ mode_cmd, mode_cmd->width, mode_cmd->height,
+ &mode_cmd->pixel_format);
n = info->num_planes;
- format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
+ format = mdp_get_format(kms, mode_cmd->pixel_format,
mode_cmd->modifier[0]);
if (!format) {
- DRM_DEV_ERROR(dev->dev, "unsupported pixel format: %4.4s\n",
- (char *)&mode_cmd->pixel_format);
+ DRM_DEV_ERROR(dev->dev, "unsupported pixel format: %p4cc\n",
+ &mode_cmd->pixel_format);
ret = -EINVAL;
goto fail;
}
@@ -232,7 +232,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
refcount_set(&msm_fb->dirtyfb, 1);
- drm_dbg_state(dev, "create: FB ID: %d (%p)", fb->base.id, fb);
+ drm_dbg_state(dev, "create: FB ID: %d (%p)\n", fb->base.id, fb);
return fb;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 175ee4ab8a6f..a5c6498a43f0 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -219,7 +219,7 @@ static void put_pages(struct drm_gem_object *obj)
}
}
-static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj,
+static struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj,
unsigned madv)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -257,24 +257,24 @@ static void pin_obj_locked(struct drm_gem_object *obj)
mutex_unlock(&priv->lru.lock);
}
-struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
+struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
{
struct page **p;
- msm_gem_lock(obj);
- p = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED);
+ msm_gem_assert_locked(obj);
+
+ p = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
if (!IS_ERR(p))
pin_obj_locked(obj);
- msm_gem_unlock(obj);
return p;
}
-void msm_gem_unpin_pages(struct drm_gem_object *obj)
+void msm_gem_unpin_pages_locked(struct drm_gem_object *obj)
{
- msm_gem_lock(obj);
+ msm_gem_assert_locked(obj);
+
msm_gem_unpin_locked(obj);
- msm_gem_unlock(obj);
}
static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
@@ -489,7 +489,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
msm_gem_assert_locked(obj);
- pages = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED);
+ pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
if (IS_ERR(pages))
return PTR_ERR(pages);
@@ -703,7 +703,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
if (obj->import_attach)
return ERR_PTR(-ENODEV);
- pages = msm_gem_pin_pages_locked(obj, madv);
+ pages = msm_gem_get_pages_locked(obj, madv);
if (IS_ERR(pages))
return ERR_CAST(pages);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 8d414b072c29..85f0257e83da 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -140,8 +140,8 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
void msm_gem_unpin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
void msm_gem_pin_obj_locked(struct drm_gem_object *obj);
-struct page **msm_gem_pin_pages(struct drm_gem_object *obj);
-void msm_gem_unpin_pages(struct drm_gem_object *obj);
+struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj);
+void msm_gem_unpin_pages_locked(struct drm_gem_object *obj);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 0915f3b68752..ee267490c935 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -47,13 +47,23 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
int msm_gem_prime_pin(struct drm_gem_object *obj)
{
- if (!obj->import_attach)
- msm_gem_pin_pages(obj);
- return 0;
+ struct page **pages;
+ int ret = 0;
+
+ if (obj->import_attach)
+ return 0;
+
+ pages = msm_gem_pin_pages_locked(obj);
+ if (IS_ERR(pages))
+ ret = PTR_ERR(pages);
+
+ return ret;
}
void msm_gem_prime_unpin(struct drm_gem_object *obj)
{
- if (!obj->import_attach)
- msm_gem_unpin_pages(obj);
+ if (obj->import_attach)
+ return;
+
+ msm_gem_unpin_pages_locked(obj);
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 655002b21b0d..cd185b9636d2 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -11,7 +11,7 @@
#include "msm_mmu.h"
#include "msm_fence.h"
#include "msm_gpu_trace.h"
-#include "adreno/adreno_gpu.h"
+//#include "adreno/adreno_gpu.h"
#include <generated/utsrelease.h>
#include <linux/string_helpers.h>
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 2bfcb222e353..a0c1bd6d1d5b 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -555,12 +555,12 @@ struct msm_gpu_state {
static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
{
- msm_writel(data, gpu->mmio + (reg << 2));
+ writel(data, gpu->mmio + (reg << 2));
}
static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
{
- return msm_readl(gpu->mmio + (reg << 2));
+ return readl(gpu->mmio + (reg << 2));
}
static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
@@ -586,8 +586,8 @@ static inline u64 gpu_read64(struct msm_gpu *gpu, u32 reg)
* when the lo is read, so make sure to read the lo first to trigger
* that
*/
- val = (u64) msm_readl(gpu->mmio + (reg << 2));
- val |= ((u64) msm_readl(gpu->mmio + ((reg + 1) << 2)) << 32);
+ val = (u64) readl(gpu->mmio + (reg << 2));
+ val |= ((u64) readl(gpu->mmio + ((reg + 1) << 2)) << 32);
return val;
}
@@ -595,8 +595,8 @@ static inline u64 gpu_read64(struct msm_gpu *gpu, u32 reg)
static inline void gpu_write64(struct msm_gpu *gpu, u32 reg, u64 val)
{
/* Why not a writeq here? Read the screed above */
- msm_writel(lower_32_bits(val), gpu->mmio + (reg << 2));
- msm_writel(upper_32_bits(val), gpu->mmio + ((reg + 1) << 2));
+ writel(lower_32_bits(val), gpu->mmio + (reg << 2));
+ writel(upper_32_bits(val), gpu->mmio + ((reg + 1) << 2));
}
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
index 84c21ec2ceea..af6a6fcb1173 100644
--- a/drivers/gpu/drm/msm/msm_kms.c
+++ b/drivers/gpu/drm/msm/msm_kms.c
@@ -149,7 +149,7 @@ int msm_crtc_enable_vblank(struct drm_crtc *crtc)
struct msm_kms *kms = priv->kms;
if (!kms)
return -ENXIO;
- drm_dbg_vbl(dev, "crtc=%u", crtc->base.id);
+ drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
return vblank_ctrl_queue_work(priv, crtc, true);
}
@@ -160,7 +160,7 @@ void msm_crtc_disable_vblank(struct drm_crtc *crtc)
struct msm_kms *kms = priv->kms;
if (!kms)
return;
- drm_dbg_vbl(dev, "crtc=%u", crtc->base.id);
+ drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
vblank_ctrl_queue_work(priv, crtc, false);
}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 0641f6111b93..1e0c54de3716 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -92,10 +92,6 @@ struct msm_kms_funcs {
* Format handling:
*/
- /* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */
- const struct msm_format *(*get_format)(struct msm_kms *kms,
- const uint32_t format,
- const uint64_t modifiers);
/* do format checking on format modified through fb_cmd2 modifiers */
int (*check_modified_format)(const struct msm_kms *kms,
const struct msm_format *msm_fmt,
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index eb72d3645c1d..88af4f490881 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -42,7 +42,6 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks);
struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsigned long quirks);
-struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
int (*handler)(void *arg, unsigned long iova, int flags, void *data))
@@ -53,10 +52,6 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent);
-void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
- dma_addr_t *tran_error);
-
-
int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr,
int *asid);
struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu);
diff --git a/drivers/gpu/drm/msm/registers/.gitignore b/drivers/gpu/drm/msm/registers/.gitignore
new file mode 100644
index 000000000000..848e0e3efbcb
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/.gitignore
@@ -0,0 +1,4 @@
+# ignore XML files present at Mesa but not used by the kernel
+adreno/adreno_control_regs.xml
+adreno/adreno_pipe_regs.xml
+adreno/ocmem.xml
diff --git a/drivers/gpu/drm/msm/registers/adreno/a2xx.xml b/drivers/gpu/drm/msm/registers/adreno/a2xx.xml
new file mode 100644
index 000000000000..22caddaa0db9
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/adreno/a2xx.xml
@@ -0,0 +1,1865 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="adreno/adreno_common.xml"/>
+<import file="adreno/adreno_pm4.xml"/>
+
+
+<enum name="a2xx_rb_dither_type">
+ <value name="DITHER_PIXEL" value="0"/>
+ <value name="DITHER_SUBPIXEL" value="1"/>
+</enum>
+
+<enum name="a2xx_colorformatx">
+ <value name="COLORX_4_4_4_4" value="0"/>
+ <value name="COLORX_1_5_5_5" value="1"/>
+ <value name="COLORX_5_6_5" value="2"/>
+ <value name="COLORX_8" value="3"/>
+ <value name="COLORX_8_8" value="4"/>
+ <value name="COLORX_8_8_8_8" value="5"/>
+ <value name="COLORX_S8_8_8_8" value="6"/>
+ <value name="COLORX_16_FLOAT" value="7"/>
+ <value name="COLORX_16_16_FLOAT" value="8"/>
+ <value name="COLORX_16_16_16_16_FLOAT" value="9"/>
+ <value name="COLORX_32_FLOAT" value="10"/>
+ <value name="COLORX_32_32_FLOAT" value="11"/>
+ <value name="COLORX_32_32_32_32_FLOAT" value="12"/>
+ <value name="COLORX_2_3_3" value="13"/>
+ <value name="COLORX_8_8_8" value="14"/>
+</enum>
+
+<enum name="a2xx_sq_surfaceformat">
+ <value name="FMT_1_REVERSE" value="0"/>
+ <value name="FMT_1" value="1"/>
+ <value name="FMT_8" value="2"/>
+ <value name="FMT_1_5_5_5" value="3"/>
+ <value name="FMT_5_6_5" value="4"/>
+ <value name="FMT_6_5_5" value="5"/>
+ <value name="FMT_8_8_8_8" value="6"/>
+ <value name="FMT_2_10_10_10" value="7"/>
+ <value name="FMT_8_A" value="8"/>
+ <value name="FMT_8_B" value="9"/>
+ <value name="FMT_8_8" value="10"/>
+ <value name="FMT_Cr_Y1_Cb_Y0" value="11"/>
+ <value name="FMT_Y1_Cr_Y0_Cb" value="12"/>
+ <value name="FMT_5_5_5_1" value="13"/>
+ <value name="FMT_8_8_8_8_A" value="14"/>
+ <value name="FMT_4_4_4_4" value="15"/>
+ <value name="FMT_8_8_8" value="16"/>
+ <value name="FMT_DXT1" value="18"/>
+ <value name="FMT_DXT2_3" value="19"/>
+ <value name="FMT_DXT4_5" value="20"/>
+ <value name="FMT_10_10_10_2" value="21"/>
+ <value name="FMT_24_8" value="22"/>
+ <value name="FMT_16" value="24"/>
+ <value name="FMT_16_16" value="25"/>
+ <value name="FMT_16_16_16_16" value="26"/>
+ <value name="FMT_16_EXPAND" value="27"/>
+ <value name="FMT_16_16_EXPAND" value="28"/>
+ <value name="FMT_16_16_16_16_EXPAND" value="29"/>
+ <value name="FMT_16_FLOAT" value="30"/>
+ <value name="FMT_16_16_FLOAT" value="31"/>
+ <value name="FMT_16_16_16_16_FLOAT" value="32"/>
+ <value name="FMT_32" value="33"/>
+ <value name="FMT_32_32" value="34"/>
+ <value name="FMT_32_32_32_32" value="35"/>
+ <value name="FMT_32_FLOAT" value="36"/>
+ <value name="FMT_32_32_FLOAT" value="37"/>
+ <value name="FMT_32_32_32_32_FLOAT" value="38"/>
+ <value name="FMT_ATI_TC_RGB" value="39"/>
+ <value name="FMT_ATI_TC_RGBA" value="40"/>
+ <value name="FMT_ATI_TC_555_565_RGB" value="41"/>
+ <value name="FMT_ATI_TC_555_565_RGBA" value="42"/>
+ <value name="FMT_ATI_TC_RGBA_INTERP" value="43"/>
+ <value name="FMT_ATI_TC_555_565_RGBA_INTERP" value="44"/>
+ <value name="FMT_ETC1_RGBA_INTERP" value="46"/>
+ <value name="FMT_ETC1_RGB" value="47"/>
+ <value name="FMT_ETC1_RGBA" value="48"/>
+ <value name="FMT_DXN" value="49"/>
+ <value name="FMT_2_3_3" value="51"/>
+ <value name="FMT_2_10_10_10_AS_16_16_16_16" value="54"/>
+ <value name="FMT_10_10_10_2_AS_16_16_16_16" value="55"/>
+ <value name="FMT_32_32_32_FLOAT" value="57"/>
+ <value name="FMT_DXT3A" value="58"/>
+ <value name="FMT_DXT5A" value="59"/>
+ <value name="FMT_CTX1" value="60"/>
+</enum>
+
+<enum name="a2xx_sq_ps_vtx_mode">
+ <value name="POSITION_1_VECTOR" value="0"/>
+ <value name="POSITION_2_VECTORS_UNUSED" value="1"/>
+ <value name="POSITION_2_VECTORS_SPRITE" value="2"/>
+ <value name="POSITION_2_VECTORS_EDGE" value="3"/>
+ <value name="POSITION_2_VECTORS_KILL" value="4"/>
+ <value name="POSITION_2_VECTORS_SPRITE_KILL" value="5"/>
+ <value name="POSITION_2_VECTORS_EDGE_KILL" value="6"/>
+ <value name="MULTIPASS" value="7"/>
+</enum>
+
+<enum name="a2xx_sq_sample_cntl">
+ <value name="CENTROIDS_ONLY" value="0"/>
+ <value name="CENTERS_ONLY" value="1"/>
+ <value name="CENTROIDS_AND_CENTERS" value="2"/>
+</enum>
+
+<enum name="a2xx_dx_clip_space">
+ <value name="DXCLIP_OPENGL" value="0"/>
+ <value name="DXCLIP_DIRECTX" value="1"/>
+</enum>
+
+<enum name="a2xx_pa_su_sc_polymode">
+ <value name="POLY_DISABLED" value="0"/>
+ <value name="POLY_DUALMODE" value="1"/>
+</enum>
+
+<enum name="a2xx_rb_edram_mode">
+ <value name="EDRAM_NOP" value="0"/>
+ <value name="COLOR_DEPTH" value="4"/>
+ <value name="DEPTH_ONLY" value="5"/>
+ <value name="EDRAM_COPY" value="6"/>
+</enum>
+
+<enum name="a2xx_pa_sc_pattern_bit_order">
+ <value name="LITTLE" value="0"/>
+ <value name="BIG" value="1"/>
+</enum>
+
+<enum name="a2xx_pa_sc_auto_reset_cntl">
+ <value name="NEVER" value="0"/>
+ <value name="EACH_PRIMITIVE" value="1"/>
+ <value name="EACH_PACKET" value="2"/>
+</enum>
+
+<enum name="a2xx_pa_pixcenter">
+ <value name="PIXCENTER_D3D" value="0"/>
+ <value name="PIXCENTER_OGL" value="1"/>
+</enum>
+
+<enum name="a2xx_pa_roundmode">
+ <value name="TRUNCATE" value="0"/>
+ <value name="ROUND" value="1"/>
+ <value name="ROUNDTOEVEN" value="2"/>
+ <value name="ROUNDTOODD" value="3"/>
+</enum>
+
+<enum name="a2xx_pa_quantmode">
+ <value name="ONE_SIXTEENTH" value="0"/>
+ <value name="ONE_EIGTH" value="1"/>
+ <value name="ONE_QUARTER" value="2"/>
+ <value name="ONE_HALF" value="3"/>
+ <value name="ONE" value="4"/>
+</enum>
+
+<enum name="a2xx_rb_copy_sample_select">
+ <value name="SAMPLE_0" value="0"/>
+ <value name="SAMPLE_1" value="1"/>
+ <value name="SAMPLE_2" value="2"/>
+ <value name="SAMPLE_3" value="3"/>
+ <value name="SAMPLE_01" value="4"/>
+ <value name="SAMPLE_23" value="5"/>
+ <value name="SAMPLE_0123" value="6"/>
+</enum>
+
+<enum name="a2xx_rb_blend_opcode">
+ <value name="BLEND2_DST_PLUS_SRC" value="0"/>
+ <value name="BLEND2_SRC_MINUS_DST" value="1"/>
+ <value name="BLEND2_MIN_DST_SRC" value="2"/>
+ <value name="BLEND2_MAX_DST_SRC" value="3"/>
+ <value name="BLEND2_DST_MINUS_SRC" value="4"/>
+ <value name="BLEND2_DST_PLUS_SRC_BIAS" value="5"/>
+</enum>
+
+<enum name="a2xx_su_perfcnt_select">
+ <value value="0" name="PERF_PAPC_PASX_REQ"/>
+ <value value="2" name="PERF_PAPC_PASX_FIRST_VECTOR"/>
+ <value value="3" name="PERF_PAPC_PASX_SECOND_VECTOR"/>
+ <value value="4" name="PERF_PAPC_PASX_FIRST_DEAD"/>
+ <value value="5" name="PERF_PAPC_PASX_SECOND_DEAD"/>
+ <value value="6" name="PERF_PAPC_PASX_VTX_KILL_DISCARD"/>
+ <value value="7" name="PERF_PAPC_PASX_VTX_NAN_DISCARD"/>
+ <value value="8" name="PERF_PAPC_PA_INPUT_PRIM"/>
+ <value value="9" name="PERF_PAPC_PA_INPUT_NULL_PRIM"/>
+ <value value="10" name="PERF_PAPC_PA_INPUT_EVENT_FLAG"/>
+ <value value="11" name="PERF_PAPC_PA_INPUT_FIRST_PRIM_SLOT"/>
+ <value value="12" name="PERF_PAPC_PA_INPUT_END_OF_PACKET"/>
+ <value value="13" name="PERF_PAPC_CLPR_CULL_PRIM"/>
+ <value value="15" name="PERF_PAPC_CLPR_VV_CULL_PRIM"/>
+ <value value="17" name="PERF_PAPC_CLPR_VTX_KILL_CULL_PRIM"/>
+ <value value="18" name="PERF_PAPC_CLPR_VTX_NAN_CULL_PRIM"/>
+ <value value="19" name="PERF_PAPC_CLPR_CULL_TO_NULL_PRIM"/>
+ <value value="21" name="PERF_PAPC_CLPR_VV_CLIP_PRIM"/>
+ <value value="23" name="PERF_PAPC_CLPR_POINT_CLIP_CANDIDATE"/>
+ <value value="24" name="PERF_PAPC_CLPR_CLIP_PLANE_CNT_1"/>
+ <value value="25" name="PERF_PAPC_CLPR_CLIP_PLANE_CNT_2"/>
+ <value value="26" name="PERF_PAPC_CLPR_CLIP_PLANE_CNT_3"/>
+ <value value="27" name="PERF_PAPC_CLPR_CLIP_PLANE_CNT_4"/>
+ <value value="28" name="PERF_PAPC_CLPR_CLIP_PLANE_CNT_5"/>
+ <value value="29" name="PERF_PAPC_CLPR_CLIP_PLANE_CNT_6"/>
+ <value value="30" name="PERF_PAPC_CLPR_CLIP_PLANE_NEAR"/>
+ <value value="31" name="PERF_PAPC_CLPR_CLIP_PLANE_FAR"/>
+ <value value="32" name="PERF_PAPC_CLPR_CLIP_PLANE_LEFT"/>
+ <value value="33" name="PERF_PAPC_CLPR_CLIP_PLANE_RIGHT"/>
+ <value value="34" name="PERF_PAPC_CLPR_CLIP_PLANE_TOP"/>
+ <value value="35" name="PERF_PAPC_CLPR_CLIP_PLANE_BOTTOM"/>
+ <value value="36" name="PERF_PAPC_CLSM_NULL_PRIM"/>
+ <value value="37" name="PERF_PAPC_CLSM_TOTALLY_VISIBLE_PRIM"/>
+ <value value="38" name="PERF_PAPC_CLSM_CLIP_PRIM"/>
+ <value value="39" name="PERF_PAPC_CLSM_CULL_TO_NULL_PRIM"/>
+ <value value="40" name="PERF_PAPC_CLSM_OUT_PRIM_CNT_1"/>
+ <value value="41" name="PERF_PAPC_CLSM_OUT_PRIM_CNT_2"/>
+ <value value="42" name="PERF_PAPC_CLSM_OUT_PRIM_CNT_3"/>
+ <value value="43" name="PERF_PAPC_CLSM_OUT_PRIM_CNT_4"/>
+ <value value="44" name="PERF_PAPC_CLSM_OUT_PRIM_CNT_5"/>
+ <value value="45" name="PERF_PAPC_CLSM_OUT_PRIM_CNT_6_7"/>
+ <value value="46" name="PERF_PAPC_CLSM_NON_TRIVIAL_CULL"/>
+ <value value="47" name="PERF_PAPC_SU_INPUT_PRIM"/>
+ <value value="48" name="PERF_PAPC_SU_INPUT_CLIP_PRIM"/>
+ <value value="49" name="PERF_PAPC_SU_INPUT_NULL_PRIM"/>
+ <value value="50" name="PERF_PAPC_SU_ZERO_AREA_CULL_PRIM"/>
+ <value value="51" name="PERF_PAPC_SU_BACK_FACE_CULL_PRIM"/>
+ <value value="52" name="PERF_PAPC_SU_FRONT_FACE_CULL_PRIM"/>
+ <value value="53" name="PERF_PAPC_SU_POLYMODE_FACE_CULL"/>
+ <value value="54" name="PERF_PAPC_SU_POLYMODE_BACK_CULL"/>
+ <value value="55" name="PERF_PAPC_SU_POLYMODE_FRONT_CULL"/>
+ <value value="56" name="PERF_PAPC_SU_POLYMODE_INVALID_FILL"/>
+ <value value="57" name="PERF_PAPC_SU_OUTPUT_PRIM"/>
+ <value value="58" name="PERF_PAPC_SU_OUTPUT_CLIP_PRIM"/>
+ <value value="59" name="PERF_PAPC_SU_OUTPUT_NULL_PRIM"/>
+ <value value="60" name="PERF_PAPC_SU_OUTPUT_EVENT_FLAG"/>
+ <value value="61" name="PERF_PAPC_SU_OUTPUT_FIRST_PRIM_SLOT"/>
+ <value value="62" name="PERF_PAPC_SU_OUTPUT_END_OF_PACKET"/>
+ <value value="63" name="PERF_PAPC_SU_OUTPUT_POLYMODE_FACE"/>
+ <value value="64" name="PERF_PAPC_SU_OUTPUT_POLYMODE_BACK"/>
+ <value value="65" name="PERF_PAPC_SU_OUTPUT_POLYMODE_FRONT"/>
+ <value value="66" name="PERF_PAPC_SU_OUT_CLIP_POLYMODE_FACE"/>
+ <value value="67" name="PERF_PAPC_SU_OUT_CLIP_POLYMODE_BACK"/>
+ <value value="68" name="PERF_PAPC_SU_OUT_CLIP_POLYMODE_FRONT"/>
+ <value value="69" name="PERF_PAPC_PASX_REQ_IDLE"/>
+ <value value="70" name="PERF_PAPC_PASX_REQ_BUSY"/>
+ <value value="71" name="PERF_PAPC_PASX_REQ_STALLED"/>
+ <value value="72" name="PERF_PAPC_PASX_REC_IDLE"/>
+ <value value="73" name="PERF_PAPC_PASX_REC_BUSY"/>
+ <value value="74" name="PERF_PAPC_PASX_REC_STARVED_SX"/>
+ <value value="75" name="PERF_PAPC_PASX_REC_STALLED"/>
+ <value value="76" name="PERF_PAPC_PASX_REC_STALLED_POS_MEM"/>
+ <value value="77" name="PERF_PAPC_PASX_REC_STALLED_CCGSM_IN"/>
+ <value value="78" name="PERF_PAPC_CCGSM_IDLE"/>
+ <value value="79" name="PERF_PAPC_CCGSM_BUSY"/>
+ <value value="80" name="PERF_PAPC_CCGSM_STALLED"/>
+ <value value="81" name="PERF_PAPC_CLPRIM_IDLE"/>
+ <value value="82" name="PERF_PAPC_CLPRIM_BUSY"/>
+ <value value="83" name="PERF_PAPC_CLPRIM_STALLED"/>
+ <value value="84" name="PERF_PAPC_CLPRIM_STARVED_CCGSM"/>
+ <value value="85" name="PERF_PAPC_CLIPSM_IDLE"/>
+ <value value="86" name="PERF_PAPC_CLIPSM_BUSY"/>
+ <value value="87" name="PERF_PAPC_CLIPSM_WAIT_CLIP_VERT_ENGH"/>
+ <value value="88" name="PERF_PAPC_CLIPSM_WAIT_HIGH_PRI_SEQ"/>
+ <value value="89" name="PERF_PAPC_CLIPSM_WAIT_CLIPGA"/>
+ <value value="90" name="PERF_PAPC_CLIPSM_WAIT_AVAIL_VTE_CLIP"/>
+ <value value="91" name="PERF_PAPC_CLIPSM_WAIT_CLIP_OUTSM"/>
+ <value value="92" name="PERF_PAPC_CLIPGA_IDLE"/>
+ <value value="93" name="PERF_PAPC_CLIPGA_BUSY"/>
+ <value value="94" name="PERF_PAPC_CLIPGA_STARVED_VTE_CLIP"/>
+ <value value="95" name="PERF_PAPC_CLIPGA_STALLED"/>
+ <value value="96" name="PERF_PAPC_CLIP_IDLE"/>
+ <value value="97" name="PERF_PAPC_CLIP_BUSY"/>
+ <value value="98" name="PERF_PAPC_SU_IDLE"/>
+ <value value="99" name="PERF_PAPC_SU_BUSY"/>
+ <value value="100" name="PERF_PAPC_SU_STARVED_CLIP"/>
+ <value value="101" name="PERF_PAPC_SU_STALLED_SC"/>
+ <value value="102" name="PERF_PAPC_SU_FACENESS_CULL"/>
+</enum>
+
+<enum name="a2xx_sc_perfcnt_select">
+ <value value="0" name="SC_SR_WINDOW_VALID"/>
+ <value value="1" name="SC_CW_WINDOW_VALID"/>
+ <value value="2" name="SC_QM_WINDOW_VALID"/>
+ <value value="3" name="SC_FW_WINDOW_VALID"/>
+ <value value="4" name="SC_EZ_WINDOW_VALID"/>
+ <value value="5" name="SC_IT_WINDOW_VALID"/>
+ <value value="6" name="SC_STARVED_BY_PA"/>
+ <value value="7" name="SC_STALLED_BY_RB_TILE"/>
+ <value value="8" name="SC_STALLED_BY_RB_SAMP"/>
+ <value value="9" name="SC_STARVED_BY_RB_EZ"/>
+ <value value="10" name="SC_STALLED_BY_SAMPLE_FF"/>
+ <value value="11" name="SC_STALLED_BY_SQ"/>
+ <value value="12" name="SC_STALLED_BY_SP"/>
+ <value value="13" name="SC_TOTAL_NO_PRIMS"/>
+ <value value="14" name="SC_NON_EMPTY_PRIMS"/>
+ <value value="15" name="SC_NO_TILES_PASSING_QM"/>
+ <value value="16" name="SC_NO_PIXELS_PRE_EZ"/>
+ <value value="17" name="SC_NO_PIXELS_POST_EZ"/>
+</enum>
+
+<enum name="a2xx_vgt_perfcount_select">
+ <value value="0" name="VGT_SQ_EVENT_WINDOW_ACTIVE"/>
+ <value value="1" name="VGT_SQ_SEND"/>
+ <value value="2" name="VGT_SQ_STALLED"/>
+ <value value="3" name="VGT_SQ_STARVED_BUSY"/>
+ <value value="4" name="VGT_SQ_STARVED_IDLE"/>
+ <value value="5" name="VGT_SQ_STATIC"/>
+ <value value="6" name="VGT_PA_EVENT_WINDOW_ACTIVE"/>
+ <value value="7" name="VGT_PA_CLIP_V_SEND"/>
+ <value value="8" name="VGT_PA_CLIP_V_STALLED"/>
+ <value value="9" name="VGT_PA_CLIP_V_STARVED_BUSY"/>
+ <value value="10" name="VGT_PA_CLIP_V_STARVED_IDLE"/>
+ <value value="11" name="VGT_PA_CLIP_V_STATIC"/>
+ <value value="12" name="VGT_PA_CLIP_P_SEND"/>
+ <value value="13" name="VGT_PA_CLIP_P_STALLED"/>
+ <value value="14" name="VGT_PA_CLIP_P_STARVED_BUSY"/>
+ <value value="15" name="VGT_PA_CLIP_P_STARVED_IDLE"/>
+ <value value="16" name="VGT_PA_CLIP_P_STATIC"/>
+ <value value="17" name="VGT_PA_CLIP_S_SEND"/>
+ <value value="18" name="VGT_PA_CLIP_S_STALLED"/>
+ <value value="19" name="VGT_PA_CLIP_S_STARVED_BUSY"/>
+ <value value="20" name="VGT_PA_CLIP_S_STARVED_IDLE"/>
+ <value value="21" name="VGT_PA_CLIP_S_STATIC"/>
+ <value value="22" name="RBIU_FIFOS_EVENT_WINDOW_ACTIVE"/>
+ <value value="23" name="RBIU_IMMED_DATA_FIFO_STARVED"/>
+ <value value="24" name="RBIU_IMMED_DATA_FIFO_STALLED"/>
+ <value value="25" name="RBIU_DMA_REQUEST_FIFO_STARVED"/>
+ <value value="26" name="RBIU_DMA_REQUEST_FIFO_STALLED"/>
+ <value value="27" name="RBIU_DRAW_INITIATOR_FIFO_STARVED"/>
+ <value value="28" name="RBIU_DRAW_INITIATOR_FIFO_STALLED"/>
+ <value value="29" name="BIN_PRIM_NEAR_CULL"/>
+ <value value="30" name="BIN_PRIM_ZERO_CULL"/>
+ <value value="31" name="BIN_PRIM_FAR_CULL"/>
+ <value value="32" name="BIN_PRIM_BIN_CULL"/>
+ <value value="33" name="BIN_PRIM_FACE_CULL"/>
+ <value value="34" name="SPARE34"/>
+ <value value="35" name="SPARE35"/>
+ <value value="36" name="SPARE36"/>
+ <value value="37" name="SPARE37"/>
+ <value value="38" name="SPARE38"/>
+ <value value="39" name="SPARE39"/>
+ <value value="40" name="TE_SU_IN_VALID"/>
+ <value value="41" name="TE_SU_IN_READ"/>
+ <value value="42" name="TE_SU_IN_PRIM"/>
+ <value value="43" name="TE_SU_IN_EOP"/>
+ <value value="44" name="TE_SU_IN_NULL_PRIM"/>
+ <value value="45" name="TE_WK_IN_VALID"/>
+ <value value="46" name="TE_WK_IN_READ"/>
+ <value value="47" name="TE_OUT_PRIM_VALID"/>
+ <value value="48" name="TE_OUT_PRIM_READ"/>
+</enum>
+
+<enum name="a2xx_tcr_perfcount_select">
+ <value value="0" name="DGMMPD_IPMUX0_STALL"/>
+ <value value="4" name="DGMMPD_IPMUX_ALL_STALL"/>
+ <value value="5" name="OPMUX0_L2_WRITES"/>
+</enum>
+
+<enum name="a2xx_tp_perfcount_select">
+ <value value="0" name="POINT_QUADS"/>
+ <value value="1" name="BILIN_QUADS"/>
+ <value value="2" name="ANISO_QUADS"/>
+ <value value="3" name="MIP_QUADS"/>
+ <value value="4" name="VOL_QUADS"/>
+ <value value="5" name="MIP_VOL_QUADS"/>
+ <value value="6" name="MIP_ANISO_QUADS"/>
+ <value value="7" name="VOL_ANISO_QUADS"/>
+ <value value="8" name="ANISO_2_1_QUADS"/>
+ <value value="9" name="ANISO_4_1_QUADS"/>
+ <value value="10" name="ANISO_6_1_QUADS"/>
+ <value value="11" name="ANISO_8_1_QUADS"/>
+ <value value="12" name="ANISO_10_1_QUADS"/>
+ <value value="13" name="ANISO_12_1_QUADS"/>
+ <value value="14" name="ANISO_14_1_QUADS"/>
+ <value value="15" name="ANISO_16_1_QUADS"/>
+ <value value="16" name="MIP_VOL_ANISO_QUADS"/>
+ <value value="17" name="ALIGN_2_QUADS"/>
+ <value value="18" name="ALIGN_4_QUADS"/>
+ <value value="19" name="PIX_0_QUAD"/>
+ <value value="20" name="PIX_1_QUAD"/>
+ <value value="21" name="PIX_2_QUAD"/>
+ <value value="22" name="PIX_3_QUAD"/>
+ <value value="23" name="PIX_4_QUAD"/>
+ <value value="24" name="TP_MIPMAP_LOD0"/>
+ <value value="25" name="TP_MIPMAP_LOD1"/>
+ <value value="26" name="TP_MIPMAP_LOD2"/>
+ <value value="27" name="TP_MIPMAP_LOD3"/>
+ <value value="28" name="TP_MIPMAP_LOD4"/>
+ <value value="29" name="TP_MIPMAP_LOD5"/>
+ <value value="30" name="TP_MIPMAP_LOD6"/>
+ <value value="31" name="TP_MIPMAP_LOD7"/>
+ <value value="32" name="TP_MIPMAP_LOD8"/>
+ <value value="33" name="TP_MIPMAP_LOD9"/>
+ <value value="34" name="TP_MIPMAP_LOD10"/>
+ <value value="35" name="TP_MIPMAP_LOD11"/>
+ <value value="36" name="TP_MIPMAP_LOD12"/>
+ <value value="37" name="TP_MIPMAP_LOD13"/>
+ <value value="38" name="TP_MIPMAP_LOD14"/>
+</enum>
+
+<enum name="a2xx_tcm_perfcount_select">
+ <value value="0" name="QUAD0_RD_LAT_FIFO_EMPTY"/>
+ <value value="3" name="QUAD0_RD_LAT_FIFO_4TH_FULL"/>
+ <value value="4" name="QUAD0_RD_LAT_FIFO_HALF_FULL"/>
+ <value value="5" name="QUAD0_RD_LAT_FIFO_FULL"/>
+ <value value="6" name="QUAD0_RD_LAT_FIFO_LT_4TH_FULL"/>
+ <value value="28" name="READ_STARVED_QUAD0"/>
+ <value value="32" name="READ_STARVED"/>
+ <value value="33" name="READ_STALLED_QUAD0"/>
+ <value value="37" name="READ_STALLED"/>
+ <value value="38" name="VALID_READ_QUAD0"/>
+ <value value="42" name="TC_TP_STARVED_QUAD0"/>
+ <value value="46" name="TC_TP_STARVED"/>
+</enum>
+
+<enum name="a2xx_tcf_perfcount_select">
+ <value value="0" name="VALID_CYCLES"/>
+ <value value="1" name="SINGLE_PHASES"/>
+ <value value="2" name="ANISO_PHASES"/>
+ <value value="3" name="MIP_PHASES"/>
+ <value value="4" name="VOL_PHASES"/>
+ <value value="5" name="MIP_VOL_PHASES"/>
+ <value value="6" name="MIP_ANISO_PHASES"/>
+ <value value="7" name="VOL_ANISO_PHASES"/>
+ <value value="8" name="ANISO_2_1_PHASES"/>
+ <value value="9" name="ANISO_4_1_PHASES"/>
+ <value value="10" name="ANISO_6_1_PHASES"/>
+ <value value="11" name="ANISO_8_1_PHASES"/>
+ <value value="12" name="ANISO_10_1_PHASES"/>
+ <value value="13" name="ANISO_12_1_PHASES"/>
+ <value value="14" name="ANISO_14_1_PHASES"/>
+ <value value="15" name="ANISO_16_1_PHASES"/>
+ <value value="16" name="MIP_VOL_ANISO_PHASES"/>
+ <value value="17" name="ALIGN_2_PHASES"/>
+ <value value="18" name="ALIGN_4_PHASES"/>
+ <value value="19" name="TPC_BUSY"/>
+ <value value="20" name="TPC_STALLED"/>
+ <value value="21" name="TPC_STARVED"/>
+ <value value="22" name="TPC_WORKING"/>
+ <value value="23" name="TPC_WALKER_BUSY"/>
+ <value value="24" name="TPC_WALKER_STALLED"/>
+ <value value="25" name="TPC_WALKER_WORKING"/>
+ <value value="26" name="TPC_ALIGNER_BUSY"/>
+ <value value="27" name="TPC_ALIGNER_STALLED"/>
+ <value value="28" name="TPC_ALIGNER_STALLED_BY_BLEND"/>
+ <value value="29" name="TPC_ALIGNER_STALLED_BY_CACHE"/>
+ <value value="30" name="TPC_ALIGNER_WORKING"/>
+ <value value="31" name="TPC_BLEND_BUSY"/>
+ <value value="32" name="TPC_BLEND_SYNC"/>
+ <value value="33" name="TPC_BLEND_STARVED"/>
+ <value value="34" name="TPC_BLEND_WORKING"/>
+ <value value="35" name="OPCODE_0x00"/>
+ <value value="36" name="OPCODE_0x01"/>
+ <value value="37" name="OPCODE_0x04"/>
+ <value value="38" name="OPCODE_0x10"/>
+ <value value="39" name="OPCODE_0x11"/>
+ <value value="40" name="OPCODE_0x12"/>
+ <value value="41" name="OPCODE_0x13"/>
+ <value value="42" name="OPCODE_0x18"/>
+ <value value="43" name="OPCODE_0x19"/>
+ <value value="44" name="OPCODE_0x1A"/>
+ <value value="45" name="OPCODE_OTHER"/>
+ <value value="56" name="IN_FIFO_0_EMPTY"/>
+ <value value="57" name="IN_FIFO_0_LT_HALF_FULL"/>
+ <value value="58" name="IN_FIFO_0_HALF_FULL"/>
+ <value value="59" name="IN_FIFO_0_FULL"/>
+ <value value="72" name="IN_FIFO_TPC_EMPTY"/>
+ <value value="73" name="IN_FIFO_TPC_LT_HALF_FULL"/>
+ <value value="74" name="IN_FIFO_TPC_HALF_FULL"/>
+ <value value="75" name="IN_FIFO_TPC_FULL"/>
+ <value value="76" name="TPC_TC_XFC"/>
+ <value value="77" name="TPC_TC_STATE"/>
+ <value value="78" name="TC_STALL"/>
+ <value value="79" name="QUAD0_TAPS"/>
+ <value value="83" name="QUADS"/>
+ <value value="84" name="TCA_SYNC_STALL"/>
+ <value value="85" name="TAG_STALL"/>
+ <value value="88" name="TCB_SYNC_STALL"/>
+ <value value="89" name="TCA_VALID"/>
+ <value value="90" name="PROBES_VALID"/>
+ <value value="91" name="MISS_STALL"/>
+ <value value="92" name="FETCH_FIFO_STALL"/>
+ <value value="93" name="TCO_STALL"/>
+ <value value="94" name="ANY_STALL"/>
+ <value value="95" name="TAG_MISSES"/>
+ <value value="96" name="TAG_HITS"/>
+ <value value="97" name="SUB_TAG_MISSES"/>
+ <value value="98" name="SET0_INVALIDATES"/>
+ <value value="99" name="SET1_INVALIDATES"/>
+ <value value="100" name="SET2_INVALIDATES"/>
+ <value value="101" name="SET3_INVALIDATES"/>
+ <value value="102" name="SET0_TAG_MISSES"/>
+ <value value="103" name="SET1_TAG_MISSES"/>
+ <value value="104" name="SET2_TAG_MISSES"/>
+ <value value="105" name="SET3_TAG_MISSES"/>
+ <value value="106" name="SET0_TAG_HITS"/>
+ <value value="107" name="SET1_TAG_HITS"/>
+ <value value="108" name="SET2_TAG_HITS"/>
+ <value value="109" name="SET3_TAG_HITS"/>
+ <value value="110" name="SET0_SUB_TAG_MISSES"/>
+ <value value="111" name="SET1_SUB_TAG_MISSES"/>
+ <value value="112" name="SET2_SUB_TAG_MISSES"/>
+ <value value="113" name="SET3_SUB_TAG_MISSES"/>
+ <value value="114" name="SET0_EVICT1"/>
+ <value value="115" name="SET0_EVICT2"/>
+ <value value="116" name="SET0_EVICT3"/>
+ <value value="117" name="SET0_EVICT4"/>
+ <value value="118" name="SET0_EVICT5"/>
+ <value value="119" name="SET0_EVICT6"/>
+ <value value="120" name="SET0_EVICT7"/>
+ <value value="121" name="SET0_EVICT8"/>
+ <value value="130" name="SET1_EVICT1"/>
+ <value value="131" name="SET1_EVICT2"/>
+ <value value="132" name="SET1_EVICT3"/>
+ <value value="133" name="SET1_EVICT4"/>
+ <value value="134" name="SET1_EVICT5"/>
+ <value value="135" name="SET1_EVICT6"/>
+ <value value="136" name="SET1_EVICT7"/>
+ <value value="137" name="SET1_EVICT8"/>
+ <value value="146" name="SET2_EVICT1"/>
+ <value value="147" name="SET2_EVICT2"/>
+ <value value="148" name="SET2_EVICT3"/>
+ <value value="149" name="SET2_EVICT4"/>
+ <value value="150" name="SET2_EVICT5"/>
+ <value value="151" name="SET2_EVICT6"/>
+ <value value="152" name="SET2_EVICT7"/>
+ <value value="153" name="SET2_EVICT8"/>
+ <value value="162" name="SET3_EVICT1"/>
+ <value value="163" name="SET3_EVICT2"/>
+ <value value="164" name="SET3_EVICT3"/>
+ <value value="165" name="SET3_EVICT4"/>
+ <value value="166" name="SET3_EVICT5"/>
+ <value value="167" name="SET3_EVICT6"/>
+ <value value="168" name="SET3_EVICT7"/>
+ <value value="169" name="SET3_EVICT8"/>
+ <value value="178" name="FF_EMPTY"/>
+ <value value="179" name="FF_LT_HALF_FULL"/>
+ <value value="180" name="FF_HALF_FULL"/>
+ <value value="181" name="FF_FULL"/>
+ <value value="182" name="FF_XFC"/>
+ <value value="183" name="FF_STALLED"/>
+ <value value="184" name="FG_MASKS"/>
+ <value value="185" name="FG_LEFT_MASKS"/>
+ <value value="186" name="FG_LEFT_MASK_STALLED"/>
+ <value value="187" name="FG_LEFT_NOT_DONE_STALL"/>
+ <value value="188" name="FG_LEFT_FG_STALL"/>
+ <value value="189" name="FG_LEFT_SECTORS"/>
+ <value value="195" name="FG0_REQUESTS"/>
+ <value value="196" name="FG0_STALLED"/>
+ <value value="199" name="MEM_REQ512"/>
+ <value value="200" name="MEM_REQ_SENT"/>
+ <value value="202" name="MEM_LOCAL_READ_REQ"/>
+ <value value="203" name="TC0_MH_STALLED"/>
+</enum>
+
+<enum name="a2xx_sq_perfcnt_select">
+ <value value="0" name="SQ_PIXEL_VECTORS_SUB"/>
+ <value value="1" name="SQ_VERTEX_VECTORS_SUB"/>
+ <value value="2" name="SQ_ALU0_ACTIVE_VTX_SIMD0"/>
+ <value value="3" name="SQ_ALU1_ACTIVE_VTX_SIMD0"/>
+ <value value="4" name="SQ_ALU0_ACTIVE_PIX_SIMD0"/>
+ <value value="5" name="SQ_ALU1_ACTIVE_PIX_SIMD0"/>
+ <value value="6" name="SQ_ALU0_ACTIVE_VTX_SIMD1"/>
+ <value value="7" name="SQ_ALU1_ACTIVE_VTX_SIMD1"/>
+ <value value="8" name="SQ_ALU0_ACTIVE_PIX_SIMD1"/>
+ <value value="9" name="SQ_ALU1_ACTIVE_PIX_SIMD1"/>
+ <value value="10" name="SQ_EXPORT_CYCLES"/>
+ <value value="11" name="SQ_ALU_CST_WRITTEN"/>
+ <value value="12" name="SQ_TEX_CST_WRITTEN"/>
+ <value value="13" name="SQ_ALU_CST_STALL"/>
+ <value value="14" name="SQ_ALU_TEX_STALL"/>
+ <value value="15" name="SQ_INST_WRITTEN"/>
+ <value value="16" name="SQ_BOOLEAN_WRITTEN"/>
+ <value value="17" name="SQ_LOOPS_WRITTEN"/>
+ <value value="18" name="SQ_PIXEL_SWAP_IN"/>
+ <value value="19" name="SQ_PIXEL_SWAP_OUT"/>
+ <value value="20" name="SQ_VERTEX_SWAP_IN"/>
+ <value value="21" name="SQ_VERTEX_SWAP_OUT"/>
+ <value value="22" name="SQ_ALU_VTX_INST_ISSUED"/>
+ <value value="23" name="SQ_TEX_VTX_INST_ISSUED"/>
+ <value value="24" name="SQ_VC_VTX_INST_ISSUED"/>
+ <value value="25" name="SQ_CF_VTX_INST_ISSUED"/>
+ <value value="26" name="SQ_ALU_PIX_INST_ISSUED"/>
+ <value value="27" name="SQ_TEX_PIX_INST_ISSUED"/>
+ <value value="28" name="SQ_VC_PIX_INST_ISSUED"/>
+ <value value="29" name="SQ_CF_PIX_INST_ISSUED"/>
+ <value value="30" name="SQ_ALU0_FIFO_EMPTY_SIMD0"/>
+ <value value="31" name="SQ_ALU1_FIFO_EMPTY_SIMD0"/>
+ <value value="32" name="SQ_ALU0_FIFO_EMPTY_SIMD1"/>
+ <value value="33" name="SQ_ALU1_FIFO_EMPTY_SIMD1"/>
+ <value value="34" name="SQ_ALU_NOPS"/>
+ <value value="35" name="SQ_PRED_SKIP"/>
+ <value value="36" name="SQ_SYNC_ALU_STALL_SIMD0_VTX"/>
+ <value value="37" name="SQ_SYNC_ALU_STALL_SIMD1_VTX"/>
+ <value value="38" name="SQ_SYNC_TEX_STALL_VTX"/>
+ <value value="39" name="SQ_SYNC_VC_STALL_VTX"/>
+ <value value="40" name="SQ_CONSTANTS_USED_SIMD0"/>
+ <value value="41" name="SQ_CONSTANTS_SENT_SP_SIMD0"/>
+ <value value="42" name="SQ_GPR_STALL_VTX"/>
+ <value value="43" name="SQ_GPR_STALL_PIX"/>
+ <value value="44" name="SQ_VTX_RS_STALL"/>
+ <value value="45" name="SQ_PIX_RS_STALL"/>
+ <value value="46" name="SQ_SX_PC_FULL"/>
+ <value value="47" name="SQ_SX_EXP_BUFF_FULL"/>
+ <value value="48" name="SQ_SX_POS_BUFF_FULL"/>
+ <value value="49" name="SQ_INTERP_QUADS"/>
+ <value value="50" name="SQ_INTERP_ACTIVE"/>
+ <value value="51" name="SQ_IN_PIXEL_STALL"/>
+ <value value="52" name="SQ_IN_VTX_STALL"/>
+ <value value="53" name="SQ_VTX_CNT"/>
+ <value value="54" name="SQ_VTX_VECTOR2"/>
+ <value value="55" name="SQ_VTX_VECTOR3"/>
+ <value value="56" name="SQ_VTX_VECTOR4"/>
+ <value value="57" name="SQ_PIXEL_VECTOR1"/>
+ <value value="58" name="SQ_PIXEL_VECTOR23"/>
+ <value value="59" name="SQ_PIXEL_VECTOR4"/>
+ <value value="60" name="SQ_CONSTANTS_USED_SIMD1"/>
+ <value value="61" name="SQ_CONSTANTS_SENT_SP_SIMD1"/>
+ <value value="62" name="SQ_SX_MEM_EXP_FULL"/>
+ <value value="63" name="SQ_ALU0_ACTIVE_VTX_SIMD2"/>
+ <value value="64" name="SQ_ALU1_ACTIVE_VTX_SIMD2"/>
+ <value value="65" name="SQ_ALU0_ACTIVE_PIX_SIMD2"/>
+ <value value="66" name="SQ_ALU1_ACTIVE_PIX_SIMD2"/>
+ <value value="67" name="SQ_ALU0_ACTIVE_VTX_SIMD3"/>
+ <value value="68" name="SQ_PERFCOUNT_VTX_QUAL_TP_DONE"/>
+ <value value="69" name="SQ_ALU0_ACTIVE_PIX_SIMD3"/>
+ <value value="70" name="SQ_PERFCOUNT_PIX_QUAL_TP_DONE"/>
+ <value value="71" name="SQ_ALU0_FIFO_EMPTY_SIMD2"/>
+ <value value="72" name="SQ_ALU1_FIFO_EMPTY_SIMD2"/>
+ <value value="73" name="SQ_ALU0_FIFO_EMPTY_SIMD3"/>
+ <value value="74" name="SQ_ALU1_FIFO_EMPTY_SIMD3"/>
+ <value value="75" name="SQ_SYNC_ALU_STALL_SIMD2_VTX"/>
+ <value value="76" name="SQ_PERFCOUNT_VTX_POP_THREAD"/>
+ <value value="77" name="SQ_SYNC_ALU_STALL_SIMD0_PIX"/>
+ <value value="78" name="SQ_SYNC_ALU_STALL_SIMD1_PIX"/>
+ <value value="79" name="SQ_SYNC_ALU_STALL_SIMD2_PIX"/>
+ <value value="80" name="SQ_PERFCOUNT_PIX_POP_THREAD"/>
+ <value value="81" name="SQ_SYNC_TEX_STALL_PIX"/>
+ <value value="82" name="SQ_SYNC_VC_STALL_PIX"/>
+ <value value="83" name="SQ_CONSTANTS_USED_SIMD2"/>
+ <value value="84" name="SQ_CONSTANTS_SENT_SP_SIMD2"/>
+ <value value="85" name="SQ_PERFCOUNT_VTX_DEALLOC_ACK"/>
+ <value value="86" name="SQ_PERFCOUNT_PIX_DEALLOC_ACK"/>
+ <value value="87" name="SQ_ALU0_FIFO_FULL_SIMD0"/>
+ <value value="88" name="SQ_ALU1_FIFO_FULL_SIMD0"/>
+ <value value="89" name="SQ_ALU0_FIFO_FULL_SIMD1"/>
+ <value value="90" name="SQ_ALU1_FIFO_FULL_SIMD1"/>
+ <value value="91" name="SQ_ALU0_FIFO_FULL_SIMD2"/>
+ <value value="92" name="SQ_ALU1_FIFO_FULL_SIMD2"/>
+ <value value="93" name="SQ_ALU0_FIFO_FULL_SIMD3"/>
+ <value value="94" name="SQ_ALU1_FIFO_FULL_SIMD3"/>
+ <value value="95" name="VC_PERF_STATIC"/>
+ <value value="96" name="VC_PERF_STALLED"/>
+ <value value="97" name="VC_PERF_STARVED"/>
+ <value value="98" name="VC_PERF_SEND"/>
+ <value value="99" name="VC_PERF_ACTUAL_STARVED"/>
+ <value value="100" name="PIXEL_THREAD_0_ACTIVE"/>
+ <value value="101" name="VERTEX_THREAD_0_ACTIVE"/>
+ <value value="102" name="PIXEL_THREAD_0_NUMBER"/>
+ <value value="103" name="VERTEX_THREAD_0_NUMBER"/>
+ <value value="104" name="VERTEX_EVENT_NUMBER"/>
+ <value value="105" name="PIXEL_EVENT_NUMBER"/>
+ <value value="106" name="PTRBUFF_EF_PUSH"/>
+ <value value="107" name="PTRBUFF_EF_POP_EVENT"/>
+ <value value="108" name="PTRBUFF_EF_POP_NEW_VTX"/>
+ <value value="109" name="PTRBUFF_EF_POP_DEALLOC"/>
+ <value value="110" name="PTRBUFF_EF_POP_PVECTOR"/>
+ <value value="111" name="PTRBUFF_EF_POP_PVECTOR_X"/>
+ <value value="112" name="PTRBUFF_EF_POP_PVECTOR_VNZ"/>
+ <value value="113" name="PTRBUFF_PB_DEALLOC"/>
+ <value value="114" name="PTRBUFF_PI_STATE_PPB_POP"/>
+ <value value="115" name="PTRBUFF_PI_RTR"/>
+ <value value="116" name="PTRBUFF_PI_READ_EN"/>
+ <value value="117" name="PTRBUFF_PI_BUFF_SWAP"/>
+ <value value="118" name="PTRBUFF_SQ_FREE_BUFF"/>
+ <value value="119" name="PTRBUFF_SQ_DEC"/>
+ <value value="120" name="PTRBUFF_SC_VALID_CNTL_EVENT"/>
+ <value value="121" name="PTRBUFF_SC_VALID_IJ_XFER"/>
+ <value value="122" name="PTRBUFF_SC_NEW_VECTOR_1_Q"/>
+ <value value="123" name="PTRBUFF_QUAL_NEW_VECTOR"/>
+ <value value="124" name="PTRBUFF_QUAL_EVENT"/>
+ <value value="125" name="PTRBUFF_END_BUFFER"/>
+ <value value="126" name="PTRBUFF_FILL_QUAD"/>
+ <value value="127" name="VERTS_WRITTEN_SPI"/>
+ <value value="128" name="TP_FETCH_INSTR_EXEC"/>
+ <value value="129" name="TP_FETCH_INSTR_REQ"/>
+ <value value="130" name="TP_DATA_RETURN"/>
+ <value value="131" name="SPI_WRITE_CYCLES_SP"/>
+ <value value="132" name="SPI_WRITES_SP"/>
+ <value value="133" name="SP_ALU_INSTR_EXEC"/>
+ <value value="134" name="SP_CONST_ADDR_TO_SQ"/>
+ <value value="135" name="SP_PRED_KILLS_TO_SQ"/>
+ <value value="136" name="SP_EXPORT_CYCLES_TO_SX"/>
+ <value value="137" name="SP_EXPORTS_TO_SX"/>
+ <value value="138" name="SQ_CYCLES_ELAPSED"/>
+ <value value="139" name="SQ_TCFS_OPT_ALLOC_EXEC"/>
+ <value value="140" name="SQ_TCFS_NO_OPT_ALLOC"/>
+ <value value="141" name="SQ_ALU0_NO_OPT_ALLOC"/>
+ <value value="142" name="SQ_ALU1_NO_OPT_ALLOC"/>
+ <value value="143" name="SQ_TCFS_ARB_XFC_CNT"/>
+ <value value="144" name="SQ_ALU0_ARB_XFC_CNT"/>
+ <value value="145" name="SQ_ALU1_ARB_XFC_CNT"/>
+ <value value="146" name="SQ_TCFS_CFS_UPDATE_CNT"/>
+ <value value="147" name="SQ_ALU0_CFS_UPDATE_CNT"/>
+ <value value="148" name="SQ_ALU1_CFS_UPDATE_CNT"/>
+ <value value="149" name="SQ_VTX_PUSH_THREAD_CNT"/>
+ <value value="150" name="SQ_VTX_POP_THREAD_CNT"/>
+ <value value="151" name="SQ_PIX_PUSH_THREAD_CNT"/>
+ <value value="152" name="SQ_PIX_POP_THREAD_CNT"/>
+ <value value="153" name="SQ_PIX_TOTAL"/>
+ <value value="154" name="SQ_PIX_KILLED"/>
+</enum>
+
+<enum name="a2xx_sx_perfcnt_select">
+ <value value="0" name="SX_EXPORT_VECTORS"/>
+ <value value="1" name="SX_DUMMY_QUADS"/>
+ <value value="2" name="SX_ALPHA_FAIL"/>
+ <value value="3" name="SX_RB_QUAD_BUSY"/>
+ <value value="4" name="SX_RB_COLOR_BUSY"/>
+ <value value="5" name="SX_RB_QUAD_STALL"/>
+ <value value="6" name="SX_RB_COLOR_STALL"/>
+</enum>
+
+<enum name="a2xx_rbbm_perfcount1_sel">
+ <value value="0" name="RBBM1_COUNT"/>
+ <value value="1" name="RBBM1_NRT_BUSY"/>
+ <value value="2" name="RBBM1_RB_BUSY"/>
+ <value value="3" name="RBBM1_SQ_CNTX0_BUSY"/>
+ <value value="4" name="RBBM1_SQ_CNTX17_BUSY"/>
+ <value value="5" name="RBBM1_VGT_BUSY"/>
+ <value value="6" name="RBBM1_VGT_NODMA_BUSY"/>
+ <value value="7" name="RBBM1_PA_BUSY"/>
+ <value value="8" name="RBBM1_SC_CNTX_BUSY"/>
+ <value value="9" name="RBBM1_TPC_BUSY"/>
+ <value value="10" name="RBBM1_TC_BUSY"/>
+ <value value="11" name="RBBM1_SX_BUSY"/>
+ <value value="12" name="RBBM1_CP_COHER_BUSY"/>
+ <value value="13" name="RBBM1_CP_NRT_BUSY"/>
+ <value value="14" name="RBBM1_GFX_IDLE_STALL"/>
+ <value value="15" name="RBBM1_INTERRUPT"/>
+</enum>
+
+<enum name="a2xx_cp_perfcount_sel">
+ <value value="0" name="ALWAYS_COUNT"/>
+ <value value="1" name="TRANS_FIFO_FULL"/>
+ <value value="2" name="TRANS_FIFO_AF"/>
+ <value value="3" name="RCIU_PFPTRANS_WAIT"/>
+ <value value="6" name="RCIU_NRTTRANS_WAIT"/>
+ <value value="8" name="CSF_NRT_READ_WAIT"/>
+ <value value="9" name="CSF_I1_FIFO_FULL"/>
+ <value value="10" name="CSF_I2_FIFO_FULL"/>
+ <value value="11" name="CSF_ST_FIFO_FULL"/>
+ <value value="13" name="CSF_RING_ROQ_FULL"/>
+ <value value="14" name="CSF_I1_ROQ_FULL"/>
+ <value value="15" name="CSF_I2_ROQ_FULL"/>
+ <value value="16" name="CSF_ST_ROQ_FULL"/>
+ <value value="18" name="MIU_TAG_MEM_FULL"/>
+ <value value="19" name="MIU_WRITECLEAN"/>
+ <value value="22" name="MIU_NRT_WRITE_STALLED"/>
+ <value value="23" name="MIU_NRT_READ_STALLED"/>
+ <value value="24" name="ME_WRITE_CONFIRM_FIFO_FULL"/>
+ <value value="25" name="ME_VS_DEALLOC_FIFO_FULL"/>
+ <value value="26" name="ME_PS_DEALLOC_FIFO_FULL"/>
+ <value value="27" name="ME_REGS_VS_EVENT_FIFO_FULL"/>
+ <value value="28" name="ME_REGS_PS_EVENT_FIFO_FULL"/>
+ <value value="29" name="ME_REGS_CF_EVENT_FIFO_FULL"/>
+ <value value="30" name="ME_MICRO_RB_STARVED"/>
+ <value value="31" name="ME_MICRO_I1_STARVED"/>
+ <value value="32" name="ME_MICRO_I2_STARVED"/>
+ <value value="33" name="ME_MICRO_ST_STARVED"/>
+ <value value="40" name="RCIU_RBBM_DWORD_SENT"/>
+ <value value="41" name="ME_BUSY_CLOCKS"/>
+ <value value="42" name="ME_WAIT_CONTEXT_AVAIL"/>
+ <value value="43" name="PFP_TYPE0_PACKET"/>
+ <value value="44" name="PFP_TYPE3_PACKET"/>
+ <value value="45" name="CSF_RB_WPTR_NEQ_RPTR"/>
+ <value value="46" name="CSF_I1_SIZE_NEQ_ZERO"/>
+ <value value="47" name="CSF_I2_SIZE_NEQ_ZERO"/>
+ <value value="48" name="CSF_RBI1I2_FETCHING"/>
+</enum>
+
+<enum name="a2xx_rb_perfcnt_select">
+ <value value="0" name="RBPERF_CNTX_BUSY"/>
+ <value value="1" name="RBPERF_CNTX_BUSY_MAX"/>
+ <value value="2" name="RBPERF_SX_QUAD_STARVED"/>
+ <value value="3" name="RBPERF_SX_QUAD_STARVED_MAX"/>
+ <value value="4" name="RBPERF_GA_GC_CH0_SYS_REQ"/>
+ <value value="5" name="RBPERF_GA_GC_CH0_SYS_REQ_MAX"/>
+ <value value="6" name="RBPERF_GA_GC_CH1_SYS_REQ"/>
+ <value value="7" name="RBPERF_GA_GC_CH1_SYS_REQ_MAX"/>
+ <value value="8" name="RBPERF_MH_STARVED"/>
+ <value value="9" name="RBPERF_MH_STARVED_MAX"/>
+ <value value="10" name="RBPERF_AZ_BC_COLOR_BUSY"/>
+ <value value="11" name="RBPERF_AZ_BC_COLOR_BUSY_MAX"/>
+ <value value="12" name="RBPERF_AZ_BC_Z_BUSY"/>
+ <value value="13" name="RBPERF_AZ_BC_Z_BUSY_MAX"/>
+ <value value="14" name="RBPERF_RB_SC_TILE_RTR_N"/>
+ <value value="15" name="RBPERF_RB_SC_TILE_RTR_N_MAX"/>
+ <value value="16" name="RBPERF_RB_SC_SAMP_RTR_N"/>
+ <value value="17" name="RBPERF_RB_SC_SAMP_RTR_N_MAX"/>
+ <value value="18" name="RBPERF_RB_SX_QUAD_RTR_N"/>
+ <value value="19" name="RBPERF_RB_SX_QUAD_RTR_N_MAX"/>
+ <value value="20" name="RBPERF_RB_SX_COLOR_RTR_N"/>
+ <value value="21" name="RBPERF_RB_SX_COLOR_RTR_N_MAX"/>
+ <value value="22" name="RBPERF_RB_SC_SAMP_LZ_BUSY"/>
+ <value value="23" name="RBPERF_RB_SC_SAMP_LZ_BUSY_MAX"/>
+ <value value="24" name="RBPERF_ZXP_STALL"/>
+ <value value="25" name="RBPERF_ZXP_STALL_MAX"/>
+ <value value="26" name="RBPERF_EVENT_PENDING"/>
+ <value value="27" name="RBPERF_EVENT_PENDING_MAX"/>
+ <value value="28" name="RBPERF_RB_MH_VALID"/>
+ <value value="29" name="RBPERF_RB_MH_VALID_MAX"/>
+ <value value="30" name="RBPERF_SX_RB_QUAD_SEND"/>
+ <value value="31" name="RBPERF_SX_RB_COLOR_SEND"/>
+ <value value="32" name="RBPERF_SC_RB_TILE_SEND"/>
+ <value value="33" name="RBPERF_SC_RB_SAMPLE_SEND"/>
+ <value value="34" name="RBPERF_SX_RB_MEM_EXPORT"/>
+ <value value="35" name="RBPERF_SX_RB_QUAD_EVENT"/>
+ <value value="36" name="RBPERF_SC_RB_TILE_EVENT_FILTERED"/>
+ <value value="37" name="RBPERF_SC_RB_TILE_EVENT_ALL"/>
+ <value value="38" name="RBPERF_RB_SC_EZ_SEND"/>
+ <value value="39" name="RBPERF_RB_SX_INDEX_SEND"/>
+ <value value="40" name="RBPERF_GMEM_INTFO_RD"/>
+ <value value="41" name="RBPERF_GMEM_INTF1_RD"/>
+ <value value="42" name="RBPERF_GMEM_INTFO_WR"/>
+ <value value="43" name="RBPERF_GMEM_INTF1_WR"/>
+ <value value="44" name="RBPERF_RB_CP_CONTEXT_DONE"/>
+ <value value="45" name="RBPERF_RB_CP_CACHE_FLUSH"/>
+ <value value="46" name="RBPERF_ZPASS_DONE"/>
+ <value value="47" name="RBPERF_ZCMD_VALID"/>
+ <value value="48" name="RBPERF_CCMD_VALID"/>
+ <value value="49" name="RBPERF_ACCUM_GRANT"/>
+ <value value="50" name="RBPERF_ACCUM_C0_GRANT"/>
+ <value value="51" name="RBPERF_ACCUM_C1_GRANT"/>
+ <value value="52" name="RBPERF_ACCUM_FULL_BE_WR"/>
+ <value value="53" name="RBPERF_ACCUM_REQUEST_NO_GRANT"/>
+ <value value="54" name="RBPERF_ACCUM_TIMEOUT_PULSE"/>
+ <value value="55" name="RBPERF_ACCUM_LIN_TIMEOUT_PULSE"/>
+ <value value="56" name="RBPERF_ACCUM_CAM_HIT_FLUSHING"/>
+</enum>
+
+<enum name="a2xx_mh_perfcnt_select">
+ <value value="0" name="CP_R0_REQUESTS"/>
+ <value value="1" name="CP_R1_REQUESTS"/>
+ <value value="2" name="CP_R2_REQUESTS"/>
+ <value value="3" name="CP_R3_REQUESTS"/>
+ <value value="4" name="CP_R4_REQUESTS"/>
+ <value value="5" name="CP_TOTAL_READ_REQUESTS"/>
+ <value value="6" name="CP_TOTAL_WRITE_REQUESTS"/>
+ <value value="7" name="CP_TOTAL_REQUESTS"/>
+ <value value="8" name="CP_DATA_BYTES_WRITTEN"/>
+ <value value="9" name="CP_WRITE_CLEAN_RESPONSES"/>
+ <value value="10" name="CP_R0_READ_BURSTS_RECEIVED"/>
+ <value value="11" name="CP_R1_READ_BURSTS_RECEIVED"/>
+ <value value="12" name="CP_R2_READ_BURSTS_RECEIVED"/>
+ <value value="13" name="CP_R3_READ_BURSTS_RECEIVED"/>
+ <value value="14" name="CP_R4_READ_BURSTS_RECEIVED"/>
+ <value value="15" name="CP_TOTAL_READ_BURSTS_RECEIVED"/>
+ <value value="16" name="CP_R0_DATA_BEATS_READ"/>
+ <value value="17" name="CP_R1_DATA_BEATS_READ"/>
+ <value value="18" name="CP_R2_DATA_BEATS_READ"/>
+ <value value="19" name="CP_R3_DATA_BEATS_READ"/>
+ <value value="20" name="CP_R4_DATA_BEATS_READ"/>
+ <value value="21" name="CP_TOTAL_DATA_BEATS_READ"/>
+ <value value="22" name="VGT_R0_REQUESTS"/>
+ <value value="23" name="VGT_R1_REQUESTS"/>
+ <value value="24" name="VGT_TOTAL_REQUESTS"/>
+ <value value="25" name="VGT_R0_READ_BURSTS_RECEIVED"/>
+ <value value="26" name="VGT_R1_READ_BURSTS_RECEIVED"/>
+ <value value="27" name="VGT_TOTAL_READ_BURSTS_RECEIVED"/>
+ <value value="28" name="VGT_R0_DATA_BEATS_READ"/>
+ <value value="29" name="VGT_R1_DATA_BEATS_READ"/>
+ <value value="30" name="VGT_TOTAL_DATA_BEATS_READ"/>
+ <value value="31" name="TC_TOTAL_REQUESTS"/>
+ <value value="32" name="TC_ROQ_REQUESTS"/>
+ <value value="33" name="TC_INFO_SENT"/>
+ <value value="34" name="TC_READ_BURSTS_RECEIVED"/>
+ <value value="35" name="TC_DATA_BEATS_READ"/>
+ <value value="36" name="TCD_BURSTS_READ"/>
+ <value value="37" name="RB_REQUESTS"/>
+ <value value="38" name="RB_DATA_BYTES_WRITTEN"/>
+ <value value="39" name="RB_WRITE_CLEAN_RESPONSES"/>
+ <value value="40" name="AXI_READ_REQUESTS_ID_0"/>
+ <value value="41" name="AXI_READ_REQUESTS_ID_1"/>
+ <value value="42" name="AXI_READ_REQUESTS_ID_2"/>
+ <value value="43" name="AXI_READ_REQUESTS_ID_3"/>
+ <value value="44" name="AXI_READ_REQUESTS_ID_4"/>
+ <value value="45" name="AXI_READ_REQUESTS_ID_5"/>
+ <value value="46" name="AXI_READ_REQUESTS_ID_6"/>
+ <value value="47" name="AXI_READ_REQUESTS_ID_7"/>
+ <value value="48" name="AXI_TOTAL_READ_REQUESTS"/>
+ <value value="49" name="AXI_WRITE_REQUESTS_ID_0"/>
+ <value value="50" name="AXI_WRITE_REQUESTS_ID_1"/>
+ <value value="51" name="AXI_WRITE_REQUESTS_ID_2"/>
+ <value value="52" name="AXI_WRITE_REQUESTS_ID_3"/>
+ <value value="53" name="AXI_WRITE_REQUESTS_ID_4"/>
+ <value value="54" name="AXI_WRITE_REQUESTS_ID_5"/>
+ <value value="55" name="AXI_WRITE_REQUESTS_ID_6"/>
+ <value value="56" name="AXI_WRITE_REQUESTS_ID_7"/>
+ <value value="57" name="AXI_TOTAL_WRITE_REQUESTS"/>
+ <value value="58" name="AXI_TOTAL_REQUESTS_ID_0"/>
+ <value value="59" name="AXI_TOTAL_REQUESTS_ID_1"/>
+ <value value="60" name="AXI_TOTAL_REQUESTS_ID_2"/>
+ <value value="61" name="AXI_TOTAL_REQUESTS_ID_3"/>
+ <value value="62" name="AXI_TOTAL_REQUESTS_ID_4"/>
+ <value value="63" name="AXI_TOTAL_REQUESTS_ID_5"/>
+ <value value="64" name="AXI_TOTAL_REQUESTS_ID_6"/>
+ <value value="65" name="AXI_TOTAL_REQUESTS_ID_7"/>
+ <value value="66" name="AXI_TOTAL_REQUESTS"/>
+ <value value="67" name="AXI_READ_CHANNEL_BURSTS_ID_0"/>
+ <value value="68" name="AXI_READ_CHANNEL_BURSTS_ID_1"/>
+ <value value="69" name="AXI_READ_CHANNEL_BURSTS_ID_2"/>
+ <value value="70" name="AXI_READ_CHANNEL_BURSTS_ID_3"/>
+ <value value="71" name="AXI_READ_CHANNEL_BURSTS_ID_4"/>
+ <value value="72" name="AXI_READ_CHANNEL_BURSTS_ID_5"/>
+ <value value="73" name="AXI_READ_CHANNEL_BURSTS_ID_6"/>
+ <value value="74" name="AXI_READ_CHANNEL_BURSTS_ID_7"/>
+ <value value="75" name="AXI_READ_CHANNEL_TOTAL_BURSTS"/>
+ <value value="76" name="AXI_READ_CHANNEL_DATA_BEATS_READ_ID_0"/>
+ <value value="77" name="AXI_READ_CHANNEL_DATA_BEATS_READ_ID_1"/>
+ <value value="78" name="AXI_READ_CHANNEL_DATA_BEATS_READ_ID_2"/>
+ <value value="79" name="AXI_READ_CHANNEL_DATA_BEATS_READ_ID_3"/>
+ <value value="80" name="AXI_READ_CHANNEL_DATA_BEATS_READ_ID_4"/>
+ <value value="81" name="AXI_READ_CHANNEL_DATA_BEATS_READ_ID_5"/>
+ <value value="82" name="AXI_READ_CHANNEL_DATA_BEATS_READ_ID_6"/>
+ <value value="83" name="AXI_READ_CHANNEL_DATA_BEATS_READ_ID_7"/>
+ <value value="84" name="AXI_READ_CHANNEL_TOTAL_DATA_BEATS_READ"/>
+ <value value="85" name="AXI_WRITE_CHANNEL_BURSTS_ID_0"/>
+ <value value="86" name="AXI_WRITE_CHANNEL_BURSTS_ID_1"/>
+ <value value="87" name="AXI_WRITE_CHANNEL_BURSTS_ID_2"/>
+ <value value="88" name="AXI_WRITE_CHANNEL_BURSTS_ID_3"/>
+ <value value="89" name="AXI_WRITE_CHANNEL_BURSTS_ID_4"/>
+ <value value="90" name="AXI_WRITE_CHANNEL_BURSTS_ID_5"/>
+ <value value="91" name="AXI_WRITE_CHANNEL_BURSTS_ID_6"/>
+ <value value="92" name="AXI_WRITE_CHANNEL_BURSTS_ID_7"/>
+ <value value="93" name="AXI_WRITE_CHANNEL_TOTAL_BURSTS"/>
+ <value value="94" name="AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_0"/>
+ <value value="95" name="AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_1"/>
+ <value value="96" name="AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_2"/>
+ <value value="97" name="AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_3"/>
+ <value value="98" name="AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_4"/>
+ <value value="99" name="AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_5"/>
+ <value value="100" name="AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_6"/>
+ <value value="101" name="AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_7"/>
+ <value value="102" name="AXI_WRITE_CHANNEL_TOTAL_DATA_BYTES_WRITTEN"/>
+ <value value="103" name="AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_0"/>
+ <value value="104" name="AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_1"/>
+ <value value="105" name="AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_2"/>
+ <value value="106" name="AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_3"/>
+ <value value="107" name="AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_4"/>
+ <value value="108" name="AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_5"/>
+ <value value="109" name="AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_6"/>
+ <value value="110" name="AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_7"/>
+ <value value="111" name="AXI_WRITE_RESPONSE_CHANNEL_TOTAL_RESPONSES"/>
+ <value value="112" name="TOTAL_MMU_MISSES"/>
+ <value value="113" name="MMU_READ_MISSES"/>
+ <value value="114" name="MMU_WRITE_MISSES"/>
+ <value value="115" name="TOTAL_MMU_HITS"/>
+ <value value="116" name="MMU_READ_HITS"/>
+ <value value="117" name="MMU_WRITE_HITS"/>
+ <value value="118" name="SPLIT_MODE_TC_HITS"/>
+ <value value="119" name="SPLIT_MODE_TC_MISSES"/>
+ <value value="120" name="SPLIT_MODE_NON_TC_HITS"/>
+ <value value="121" name="SPLIT_MODE_NON_TC_MISSES"/>
+ <value value="122" name="STALL_AWAITING_TLB_MISS_FETCH"/>
+ <value value="123" name="MMU_TLB_MISS_READ_BURSTS_RECEIVED"/>
+ <value value="124" name="MMU_TLB_MISS_DATA_BEATS_READ"/>
+ <value value="125" name="CP_CYCLES_HELD_OFF"/>
+ <value value="126" name="VGT_CYCLES_HELD_OFF"/>
+ <value value="127" name="TC_CYCLES_HELD_OFF"/>
+ <value value="128" name="TC_ROQ_CYCLES_HELD_OFF"/>
+ <value value="129" name="TC_CYCLES_HELD_OFF_TCD_FULL"/>
+ <value value="130" name="RB_CYCLES_HELD_OFF"/>
+ <value value="131" name="TOTAL_CYCLES_ANY_CLNT_HELD_OFF"/>
+ <value value="132" name="TLB_MISS_CYCLES_HELD_OFF"/>
+ <value value="133" name="AXI_READ_REQUEST_HELD_OFF"/>
+ <value value="134" name="AXI_WRITE_REQUEST_HELD_OFF"/>
+ <value value="135" name="AXI_REQUEST_HELD_OFF"/>
+ <value value="136" name="AXI_REQUEST_HELD_OFF_INFLIGHT_LIMIT"/>
+ <value value="137" name="AXI_WRITE_DATA_HELD_OFF"/>
+ <value value="138" name="CP_SAME_PAGE_BANK_REQUESTS"/>
+ <value value="139" name="VGT_SAME_PAGE_BANK_REQUESTS"/>
+ <value value="140" name="TC_SAME_PAGE_BANK_REQUESTS"/>
+ <value value="141" name="TC_ARB_HOLD_SAME_PAGE_BANK_REQUESTS"/>
+ <value value="142" name="RB_SAME_PAGE_BANK_REQUESTS"/>
+ <value value="143" name="TOTAL_SAME_PAGE_BANK_REQUESTS"/>
+ <value value="144" name="CP_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT"/>
+ <value value="145" name="VGT_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT"/>
+ <value value="146" name="TC_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT"/>
+ <value value="147" name="RB_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT"/>
+ <value value="148" name="TOTAL_SAME_PAGE_BANK_KILLED_FAIRNESS_LIMIT"/>
+ <value value="149" name="TOTAL_MH_READ_REQUESTS"/>
+ <value value="150" name="TOTAL_MH_WRITE_REQUESTS"/>
+ <value value="151" name="TOTAL_MH_REQUESTS"/>
+ <value value="152" name="MH_BUSY"/>
+ <value value="153" name="CP_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE"/>
+ <value value="154" name="VGT_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE"/>
+ <value value="155" name="TC_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE"/>
+ <value value="156" name="RB_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE"/>
+ <value value="157" name="TC_ROQ_N_VALID_ENTRIES"/>
+ <value value="158" name="ARQ_N_ENTRIES"/>
+ <value value="159" name="WDB_N_ENTRIES"/>
+ <value value="160" name="MH_READ_LATENCY_OUTST_REQ_SUM"/>
+ <value value="161" name="MC_READ_LATENCY_OUTST_REQ_SUM"/>
+ <value value="162" name="MC_TOTAL_READ_REQUESTS"/>
+ <value value="163" name="ELAPSED_CYCLES_MH_GATED_CLK"/>
+ <value value="164" name="ELAPSED_CLK_CYCLES"/>
+ <value value="165" name="CP_W_16B_REQUESTS"/>
+ <value value="166" name="CP_W_32B_REQUESTS"/>
+ <value value="167" name="TC_16B_REQUESTS"/>
+ <value value="168" name="TC_32B_REQUESTS"/>
+ <value value="169" name="PA_REQUESTS"/>
+ <value value="170" name="PA_DATA_BYTES_WRITTEN"/>
+ <value value="171" name="PA_WRITE_CLEAN_RESPONSES"/>
+ <value value="172" name="PA_CYCLES_HELD_OFF"/>
+ <value value="173" name="AXI_READ_REQUEST_DATA_BEATS_ID_0"/>
+ <value value="174" name="AXI_READ_REQUEST_DATA_BEATS_ID_1"/>
+ <value value="175" name="AXI_READ_REQUEST_DATA_BEATS_ID_2"/>
+ <value value="176" name="AXI_READ_REQUEST_DATA_BEATS_ID_3"/>
+ <value value="177" name="AXI_READ_REQUEST_DATA_BEATS_ID_4"/>
+ <value value="178" name="AXI_READ_REQUEST_DATA_BEATS_ID_5"/>
+ <value value="179" name="AXI_READ_REQUEST_DATA_BEATS_ID_6"/>
+ <value value="180" name="AXI_READ_REQUEST_DATA_BEATS_ID_7"/>
+ <value value="181" name="AXI_TOTAL_READ_REQUEST_DATA_BEATS"/>
+</enum>
+
+<enum name="perf_mode_cnt">
+ <value name="PERF_STATE_RESET" value="0"/>
+ <value name="PERF_STATE_ENABLE" value="1"/>
+ <value name="PERF_STATE_FREEZE" value="2"/>
+</enum>
+
+<domain name="A2XX" width="32">
+
+ <bitset name="a2xx_vgt_current_bin_id_min_max" inline="yes">
+ <bitfield name="COLUMN" low="0" high="2" type="uint"/>
+ <bitfield name="ROW" low="3" high="5" type="uint"/>
+ <bitfield name="GUARD_BAND_MASK" low="6" high="8" type="uint"/>
+ </bitset>
+
+ <reg32 offset="0x0001" name="RBBM_PATCH_RELEASE"/>
+ <reg32 offset="0x003b" name="RBBM_CNTL"/>
+ <reg32 offset="0x003c" name="RBBM_SOFT_RESET"/>
+ <reg32 offset="0x00c0" name="CP_PFP_UCODE_ADDR"/>
+ <reg32 offset="0x00c1" name="CP_PFP_UCODE_DATA"/>
+
+ <enum name="adreno_mmu_clnt_beh">
+ <value name="BEH_NEVR" value="0"/>
+ <value name="BEH_TRAN_RNG" value="1"/>
+ <value name="BEH_TRAN_FLT" value="2"/>
+ </enum>
+
+ <!--
+ Note: these seem applicable only for a2xx devices with gpummu? At
+ any rate, MH_MMU_CONFIG shows up in places in a3xx firmware where
+ it doesn't make sense, so I think offset 0x40 must be a different
+ register on a3xx.. so moving this back into A2XX domain:
+ -->
+ <reg32 offset="0x0040" name="MH_MMU_CONFIG">
+ <bitfield name="MMU_ENABLE" pos="0" type="boolean"/>
+ <bitfield name="SPLIT_MODE_ENABLE" pos="1" type="boolean"/>
+ <bitfield name="RB_W_CLNT_BEHAVIOR" low="4" high="5" type="adreno_mmu_clnt_beh"/>
+ <bitfield name="CP_W_CLNT_BEHAVIOR" low="6" high="7" type="adreno_mmu_clnt_beh"/>
+ <bitfield name="CP_R0_CLNT_BEHAVIOR" low="8" high="9" type="adreno_mmu_clnt_beh"/>
+ <bitfield name="CP_R1_CLNT_BEHAVIOR" low="10" high="11" type="adreno_mmu_clnt_beh"/>
+ <bitfield name="CP_R2_CLNT_BEHAVIOR" low="12" high="13" type="adreno_mmu_clnt_beh"/>
+ <bitfield name="CP_R3_CLNT_BEHAVIOR" low="14" high="15" type="adreno_mmu_clnt_beh"/>
+ <bitfield name="CP_R4_CLNT_BEHAVIOR" low="16" high="17" type="adreno_mmu_clnt_beh"/>
+ <bitfield name="VGT_R0_CLNT_BEHAVIOR" low="18" high="19" type="adreno_mmu_clnt_beh"/>
+ <bitfield name="VGT_R1_CLNT_BEHAVIOR" low="20" high="21" type="adreno_mmu_clnt_beh"/>
+ <bitfield name="TC_R_CLNT_BEHAVIOR" low="22" high="23" type="adreno_mmu_clnt_beh"/>
+ <bitfield name="PA_W_CLNT_BEHAVIOR" low="24" high="25" type="adreno_mmu_clnt_beh"/>
+ </reg32>
+ <reg32 offset="0x0041" name="MH_MMU_VA_RANGE">
+ <bitfield name="NUM_64KB_REGIONS" low="0" high="11" type="uint"/>
+ <bitfield name="VA_BASE" low="12" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x0042" name="MH_MMU_PT_BASE"/>
+ <reg32 offset="0x0043" name="MH_MMU_PAGE_FAULT"/>
+ <reg32 offset="0x0044" name="MH_MMU_TRAN_ERROR"/>
+ <reg32 offset="0x0045" name="MH_MMU_INVALIDATE">
+ <bitfield name="INVALIDATE_ALL" pos="0" type="boolean"/>
+ <bitfield name="INVALIDATE_TC" pos="1" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0046" name="MH_MMU_MPU_BASE"/>
+ <reg32 offset="0x0047" name="MH_MMU_MPU_END"/>
+
+ <reg32 offset="0x0394" name="NQWAIT_UNTIL"/>
+ <reg32 offset="0x0395" name="RBBM_PERFCOUNTER0_SELECT"/>
+ <reg32 offset="0x0396" name="RBBM_PERFCOUNTER1_SELECT"/>
+ <reg32 offset="0x0397" name="RBBM_PERFCOUNTER0_LO"/>
+ <reg32 offset="0x0398" name="RBBM_PERFCOUNTER0_HI"/>
+ <reg32 offset="0x0399" name="RBBM_PERFCOUNTER1_LO"/>
+ <reg32 offset="0x039a" name="RBBM_PERFCOUNTER1_HI"/>
+ <reg32 offset="0x039b" name="RBBM_DEBUG"/>
+ <reg32 offset="0x039c" name="RBBM_PM_OVERRIDE1">
+ <bitfield name="RBBM_AHBCLK_PM_OVERRIDE" pos="0" type="boolean"/>
+ <bitfield name="SC_REG_SCLK_PM_OVERRIDE" pos="1" type="boolean"/>
+ <bitfield name="SC_SCLK_PM_OVERRIDE" pos="2" type="boolean"/>
+ <bitfield name="SP_TOP_SCLK_PM_OVERRIDE" pos="3" type="boolean"/>
+ <bitfield name="SP_V0_SCLK_PM_OVERRIDE" pos="4" type="boolean"/>
+ <bitfield name="SQ_REG_SCLK_PM_OVERRIDE" pos="5" type="boolean"/>
+ <bitfield name="SQ_REG_FIFOS_SCLK_PM_OVERRIDE" pos="6" type="boolean"/>
+ <bitfield name="SQ_CONST_MEM_SCLK_PM_OVERRIDE" pos="7" type="boolean"/>
+ <bitfield name="SQ_SQ_SCLK_PM_OVERRIDE" pos="8" type="boolean"/>
+ <bitfield name="SX_SCLK_PM_OVERRIDE" pos="9" type="boolean"/>
+ <bitfield name="SX_REG_SCLK_PM_OVERRIDE" pos="10" type="boolean"/>
+ <bitfield name="TCM_TCO_SCLK_PM_OVERRIDE" pos="11" type="boolean"/>
+ <bitfield name="TCM_TCM_SCLK_PM_OVERRIDE" pos="12" type="boolean"/>
+ <bitfield name="TCM_TCD_SCLK_PM_OVERRIDE" pos="13" type="boolean"/>
+ <bitfield name="TCM_REG_SCLK_PM_OVERRIDE" pos="14" type="boolean"/>
+ <bitfield name="TPC_TPC_SCLK_PM_OVERRIDE" pos="15" type="boolean"/>
+ <bitfield name="TPC_REG_SCLK_PM_OVERRIDE" pos="16" type="boolean"/>
+ <bitfield name="TCF_TCA_SCLK_PM_OVERRIDE" pos="17" type="boolean"/>
+ <bitfield name="TCF_TCB_SCLK_PM_OVERRIDE" pos="18" type="boolean"/>
+ <bitfield name="TCF_TCB_READ_SCLK_PM_OVERRIDE" pos="19" type="boolean"/>
+ <bitfield name="TP_TP_SCLK_PM_OVERRIDE" pos="20" type="boolean"/>
+ <bitfield name="TP_REG_SCLK_PM_OVERRIDE" pos="21" type="boolean"/>
+ <bitfield name="CP_G_SCLK_PM_OVERRIDE" pos="22" type="boolean"/>
+ <bitfield name="CP_REG_SCLK_PM_OVERRIDE" pos="23" type="boolean"/>
+ <bitfield name="CP_G_REG_SCLK_PM_OVERRIDE" pos="24" type="boolean"/>
+ <bitfield name="SPI_SCLK_PM_OVERRIDE" pos="25" type="boolean"/>
+ <bitfield name="RB_REG_SCLK_PM_OVERRIDE" pos="26" type="boolean"/>
+ <bitfield name="RB_SCLK_PM_OVERRIDE" pos="27" type="boolean"/>
+ <bitfield name="MH_MH_SCLK_PM_OVERRIDE" pos="28" type="boolean"/>
+ <bitfield name="MH_REG_SCLK_PM_OVERRIDE" pos="29" type="boolean"/>
+ <bitfield name="MH_MMU_SCLK_PM_OVERRIDE" pos="30" type="boolean"/>
+ <bitfield name="MH_TCROQ_SCLK_PM_OVERRIDE" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x039d" name="RBBM_PM_OVERRIDE2">
+ <bitfield name="PA_REG_SCLK_PM_OVERRIDE" pos="0" type="boolean"/>
+ <bitfield name="PA_PA_SCLK_PM_OVERRIDE" pos="1" type="boolean"/>
+ <bitfield name="PA_AG_SCLK_PM_OVERRIDE" pos="2" type="boolean"/>
+ <bitfield name="VGT_REG_SCLK_PM_OVERRIDE" pos="3" type="boolean"/>
+ <bitfield name="VGT_FIFOS_SCLK_PM_OVERRIDE" pos="4" type="boolean"/>
+ <bitfield name="VGT_VGT_SCLK_PM_OVERRIDE" pos="5" type="boolean"/>
+ <bitfield name="DEBUG_PERF_SCLK_PM_OVERRIDE" pos="6" type="boolean"/>
+ <bitfield name="PERM_SCLK_PM_OVERRIDE" pos="7" type="boolean"/>
+ <bitfield name="GC_GA_GMEM0_PM_OVERRIDE" pos="8" type="boolean"/>
+ <bitfield name="GC_GA_GMEM1_PM_OVERRIDE" pos="9" type="boolean"/>
+ <bitfield name="GC_GA_GMEM2_PM_OVERRIDE" pos="10" type="boolean"/>
+ <bitfield name="GC_GA_GMEM3_PM_OVERRIDE" pos="11" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x03a0" name="RBBM_DEBUG_OUT"/>
+ <reg32 offset="0x03a1" name="RBBM_DEBUG_CNTL"/>
+ <reg32 offset="0x03b3" name="RBBM_READ_ERROR"/>
+ <reg32 offset="0x03b4" name="RBBM_INT_CNTL">
+ <bitfield name="RDERR_INT_MASK" pos="0" type="boolean"/>
+ <bitfield name="DISPLAY_UPDATE_INT_MASK" pos="1" type="boolean"/>
+ <bitfield name="GUI_IDLE_INT_MASK" pos="19" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x03b5" name="RBBM_INT_STATUS"/>
+ <reg32 offset="0x03b6" name="RBBM_INT_ACK"/>
+ <reg32 offset="0x03b7" name="MASTER_INT_SIGNAL">
+ <bitfield name="MH_INT_STAT" pos="5" type="boolean"/>
+ <bitfield name="SQ_INT_STAT" pos="26" type="boolean"/>
+ <bitfield name="CP_INT_STAT" pos="30" type="boolean"/>
+ <bitfield name="RBBM_INT_STAT" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x03f9" name="RBBM_PERIPHID1"/>
+ <reg32 offset="0x03fa" name="RBBM_PERIPHID2"/>
+ <reg32 offset="0x0444" name="CP_PERFMON_CNTL">
+ <!-- The width is uncertain -->
+ <bitfield name="PERF_MODE_CNT" low="0" high="2" type="perf_mode_cnt"/>
+ </reg32>
+ <reg32 offset="0x0445" name="CP_PERFCOUNTER_SELECT"/>
+ <reg32 offset="0x0446" name="CP_PERFCOUNTER_LO"/>
+ <reg32 offset="0x0447" name="CP_PERFCOUNTER_HI"/>
+ <reg32 offset="0x05d0" name="RBBM_STATUS">
+ <bitfield name="CMDFIFO_AVAIL" low="0" high="4" type="uint"/>
+ <bitfield name="TC_BUSY" pos="5" type="boolean"/>
+ <bitfield name="HIRQ_PENDING" pos="8" type="boolean"/>
+ <bitfield name="CPRQ_PENDING" pos="9" type="boolean"/>
+ <bitfield name="CFRQ_PENDING" pos="10" type="boolean"/>
+ <bitfield name="PFRQ_PENDING" pos="11" type="boolean"/>
+ <bitfield name="VGT_BUSY_NO_DMA" pos="12" type="boolean"/>
+ <bitfield name="RBBM_WU_BUSY" pos="14" type="boolean"/>
+ <bitfield name="CP_NRT_BUSY" pos="16" type="boolean"/>
+ <bitfield name="MH_BUSY" pos="18" type="boolean"/>
+ <bitfield name="MH_COHERENCY_BUSY" pos="19" type="boolean"/>
+ <bitfield name="SX_BUSY" pos="21" type="boolean"/>
+ <bitfield name="TPC_BUSY" pos="22" type="boolean"/>
+ <bitfield name="SC_CNTX_BUSY" pos="24" type="boolean"/>
+ <bitfield name="PA_BUSY" pos="25" type="boolean"/>
+ <bitfield name="VGT_BUSY" pos="26" type="boolean"/>
+ <bitfield name="SQ_CNTX17_BUSY" pos="27" type="boolean"/>
+ <bitfield name="SQ_CNTX0_BUSY" pos="28" type="boolean"/>
+ <bitfield name="RB_CNTX_BUSY" pos="30" type="boolean"/>
+ <bitfield name="GUI_ACTIVE" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0a40" name="MH_ARBITER_CONFIG">
+ <bitfield name="SAME_PAGE_LIMIT" low="0" high="5" type="uint"/>
+ <bitfield name="SAME_PAGE_GRANULARITY" pos="6" type="boolean"/>
+ <bitfield name="L1_ARB_ENABLE" pos="7" type="boolean"/>
+ <bitfield name="L1_ARB_HOLD_ENABLE" pos="8" type="boolean"/>
+ <bitfield name="L2_ARB_CONTROL" pos="9" type="boolean"/>
+ <bitfield name="PAGE_SIZE" low="10" high="12" type="uint"/>
+ <bitfield name="TC_REORDER_ENABLE" pos="13" type="boolean"/>
+ <bitfield name="TC_ARB_HOLD_ENABLE" pos="14" type="boolean"/>
+ <bitfield name="IN_FLIGHT_LIMIT_ENABLE" pos="15" type="boolean"/>
+ <bitfield name="IN_FLIGHT_LIMIT" low="16" high="21" type="uint"/>
+ <bitfield name="CP_CLNT_ENABLE" pos="22" type="boolean"/>
+ <bitfield name="VGT_CLNT_ENABLE" pos="23" type="boolean"/>
+ <bitfield name="TC_CLNT_ENABLE" pos="24" type="boolean"/>
+ <bitfield name="RB_CLNT_ENABLE" pos="25" type="boolean"/>
+ <bitfield name="PA_CLNT_ENABLE" pos="26" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0a42" name="MH_INTERRUPT_MASK">
+ <bitfield name="AXI_READ_ERROR" pos="0" type="boolean"/>
+ <bitfield name="AXI_WRITE_ERROR" pos="1" type="boolean"/>
+ <bitfield name="MMU_PAGE_FAULT" pos="2" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0a43" name="MH_INTERRUPT_STATUS"/>
+ <reg32 offset="0x0a44" name="MH_INTERRUPT_CLEAR"/>
+ <reg32 offset="0x0a54" name="MH_CLNT_INTF_CTRL_CONFIG1"/>
+ <reg32 offset="0x0a55" name="MH_CLNT_INTF_CTRL_CONFIG2"/>
+ <reg32 offset="0x0c01" name="A220_VSC_BIN_SIZE">
+ <bitfield name="WIDTH" low="0" high="4" shr="5" type="uint"/>
+ <bitfield name="HEIGHT" low="5" high="9" shr="5" type="uint"/>
+ </reg32>
+ <array offset="0x0c06" name="VSC_PIPE" stride="3" length="8">
+ <reg32 offset="0x0" name="CONFIG"/>
+ <reg32 offset="0x1" name="DATA_ADDRESS"/>
+ <reg32 offset="0x2" name="DATA_LENGTH"/>
+ </array>
+ <reg32 offset="0x0c38" name="PC_DEBUG_CNTL"/>
+ <reg32 offset="0x0c39" name="PC_DEBUG_DATA"/>
+ <reg32 offset="0x0c44" name="PA_SC_VIZ_QUERY_STATUS"/>
+ <reg32 offset="0x0c80" name="GRAS_DEBUG_CNTL"/>
+ <reg32 offset="0x0c80" name="PA_SU_DEBUG_CNTL"/>
+ <reg32 offset="0x0c81" name="GRAS_DEBUG_DATA"/>
+ <reg32 offset="0x0c81" name="PA_SU_DEBUG_DATA"/>
+ <reg32 offset="0x0c86" name="PA_SU_FACE_DATA">
+ <bitfield name="BASE_ADDR" low="5" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x0d00" name="SQ_GPR_MANAGEMENT">
+ <bitfield name="REG_DYNAMIC" pos="0" type="boolean"/>
+ <bitfield name="REG_SIZE_PIX" low="4" high="11" type="uint"/>
+ <bitfield name="REG_SIZE_VTX" low="12" high="19" type="uint"/>
+ </reg32>
+ <reg32 offset="0x0d01" name="SQ_FLOW_CONTROL"/>
+ <reg32 offset="0x0d02" name="SQ_INST_STORE_MANAGMENT">
+ <bitfield name="INST_BASE_PIX" low="0" high="11" type="uint"/>
+ <bitfield name="INST_BASE_VTX" low="16" high="27" type="uint"/>
+ </reg32>
+ <reg32 offset="0x0d05" name="SQ_DEBUG_MISC"/>
+ <reg32 offset="0x0d34" name="SQ_INT_CNTL"/>
+ <reg32 offset="0x0d35" name="SQ_INT_STATUS"/>
+ <reg32 offset="0x0d36" name="SQ_INT_ACK"/>
+ <reg32 offset="0x0dae" name="SQ_DEBUG_INPUT_FSM"/>
+ <reg32 offset="0x0daf" name="SQ_DEBUG_CONST_MGR_FSM"/>
+ <reg32 offset="0x0db0" name="SQ_DEBUG_TP_FSM"/>
+ <reg32 offset="0x0db1" name="SQ_DEBUG_FSM_ALU_0"/>
+ <reg32 offset="0x0db2" name="SQ_DEBUG_FSM_ALU_1"/>
+ <reg32 offset="0x0db3" name="SQ_DEBUG_EXP_ALLOC"/>
+ <reg32 offset="0x0db4" name="SQ_DEBUG_PTR_BUFF"/>
+ <reg32 offset="0x0db5" name="SQ_DEBUG_GPR_VTX"/>
+ <reg32 offset="0x0db6" name="SQ_DEBUG_GPR_PIX"/>
+ <reg32 offset="0x0db7" name="SQ_DEBUG_TB_STATUS_SEL"/>
+ <reg32 offset="0x0db8" name="SQ_DEBUG_VTX_TB_0"/>
+ <reg32 offset="0x0db9" name="SQ_DEBUG_VTX_TB_1"/>
+ <reg32 offset="0x0dba" name="SQ_DEBUG_VTX_TB_STATUS_REG"/>
+ <reg32 offset="0x0dbb" name="SQ_DEBUG_VTX_TB_STATE_MEM"/>
+ <reg32 offset="0x0dbc" name="SQ_DEBUG_PIX_TB_0"/>
+ <reg32 offset="0x0dbd" name="SQ_DEBUG_PIX_TB_STATUS_REG_0"/>
+ <reg32 offset="0x0dbe" name="SQ_DEBUG_PIX_TB_STATUS_REG_1"/>
+ <reg32 offset="0x0dbf" name="SQ_DEBUG_PIX_TB_STATUS_REG_2"/>
+ <reg32 offset="0x0dc0" name="SQ_DEBUG_PIX_TB_STATUS_REG_3"/>
+ <reg32 offset="0x0dc1" name="SQ_DEBUG_PIX_TB_STATE_MEM"/>
+ <reg32 offset="0x0e00" name="TC_CNTL_STATUS">
+ <bitfield name="L2_INVALIDATE" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0e1e" name="TP0_CHICKEN"/>
+ <reg32 offset="0x0f01" name="RB_BC_CONTROL">
+ <bitfield name="ACCUM_LINEAR_MODE_ENABLE" pos="0" type="boolean"/>
+ <bitfield name="ACCUM_TIMEOUT_SELECT" low="1" high="2" type="uint"/>
+ <bitfield name="DISABLE_EDRAM_CAM" pos="3" type="boolean"/>
+ <bitfield name="DISABLE_EZ_FAST_CONTEXT_SWITCH" pos="4" type="boolean"/>
+ <bitfield name="DISABLE_EZ_NULL_ZCMD_DROP" pos="5" type="boolean"/>
+ <bitfield name="DISABLE_LZ_NULL_ZCMD_DROP" pos="6" type="boolean"/>
+ <bitfield name="ENABLE_AZ_THROTTLE" pos="7" type="boolean"/>
+ <bitfield name="AZ_THROTTLE_COUNT" low="8" high="12" type="uint"/>
+ <bitfield name="ENABLE_CRC_UPDATE" pos="14" type="boolean"/>
+ <bitfield name="CRC_MODE" pos="15" type="boolean"/>
+ <bitfield name="DISABLE_SAMPLE_COUNTERS" pos="16" type="boolean"/>
+ <bitfield name="DISABLE_ACCUM" pos="17" type="boolean"/>
+ <bitfield name="ACCUM_ALLOC_MASK" low="18" high="21" type="uint"/>
+ <bitfield name="LINEAR_PERFORMANCE_ENABLE" pos="22" type="boolean"/>
+ <bitfield name="ACCUM_DATA_FIFO_LIMIT" low="23" high="26" type="uint"/>
+ <bitfield name="MEM_EXPORT_TIMEOUT_SELECT" low="27" high="28" type="uint"/>
+ <bitfield name="MEM_EXPORT_LINEAR_MODE_ENABLE" pos="29" type="boolean"/>
+ <bitfield name="CRC_SYSTEM" pos="30" type="boolean"/>
+ <bitfield name="RESERVED6" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0f02" name="RB_EDRAM_INFO"/>
+ <reg32 offset="0x0f26" name="RB_DEBUG_CNTL"/>
+ <reg32 offset="0x0f27" name="RB_DEBUG_DATA"/>
+ <reg32 offset="0x2000" name="RB_SURFACE_INFO">
+ <bitfield name="SURFACE_PITCH" low="0" high="13" type="uint"/>
+ <bitfield name="MSAA_SAMPLES" low="14" high="15" type="uint"/>
+ </reg32>
+ <reg32 offset="0x2001" name="RB_COLOR_INFO">
+ <bitfield name="FORMAT" low="0" high="3" type="a2xx_colorformatx"/>
+ <bitfield name="ROUND_MODE" low="4" high="5" type="uint"/>
+ <bitfield name="LINEAR" pos="6" type="boolean"/>
+ <bitfield name="ENDIAN" low="7" high="8" type="uint"/>
+ <bitfield name="SWAP" low="9" high="10" type="uint"/>
+ <bitfield name="BASE" low="12" high="31" shr="12"/>
+ </reg32>
+ <reg32 offset="0x2002" name="RB_DEPTH_INFO">
+ <bitfield name="DEPTH_FORMAT" pos="0" type="adreno_rb_depth_format"/>
+ <bitfield name="DEPTH_BASE" low="12" high="31" type="uint" shr="12"/>
+ </reg32>
+ <reg32 offset="0x2005" name="A225_RB_COLOR_INFO3"/>
+ <reg32 offset="0x2006" name="COHER_DEST_BASE_0"/>
+ <reg32 offset="0x200e" name="PA_SC_SCREEN_SCISSOR_TL" type="adreno_reg_xy"/>
+ <reg32 offset="0x200f" name="PA_SC_SCREEN_SCISSOR_BR" type="adreno_reg_xy"/>
+ <reg32 offset="0x2080" name="PA_SC_WINDOW_OFFSET">
+ <bitfield name="X" low="0" high="14" type="int"/>
+ <bitfield name="Y" low="16" high="30" type="int"/>
+ <bitfield name="DISABLE" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x2081" name="PA_SC_WINDOW_SCISSOR_TL" type="adreno_reg_xy"/>
+ <reg32 offset="0x2082" name="PA_SC_WINDOW_SCISSOR_BR" type="adreno_reg_xy"/>
+ <reg32 offset="0x2010" name="UNKNOWN_2010"/>
+ <reg32 offset="0x2100" name="VGT_MAX_VTX_INDX"/>
+ <reg32 offset="0x2101" name="VGT_MIN_VTX_INDX"/>
+ <reg32 offset="0x2102" name="VGT_INDX_OFFSET"/>
+ <reg32 offset="0x2103" name="A225_PC_MULTI_PRIM_IB_RESET_INDX"/>
+ <reg32 offset="0x2104" name="RB_COLOR_MASK">
+ <bitfield name="WRITE_RED" pos="0" type="boolean"/>
+ <bitfield name="WRITE_GREEN" pos="1" type="boolean"/>
+ <bitfield name="WRITE_BLUE" pos="2" type="boolean"/>
+ <bitfield name="WRITE_ALPHA" pos="3" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x2105" name="RB_BLEND_RED"/>
+ <reg32 offset="0x2106" name="RB_BLEND_GREEN"/>
+ <reg32 offset="0x2107" name="RB_BLEND_BLUE"/>
+ <reg32 offset="0x2108" name="RB_BLEND_ALPHA"/>
+ <reg32 offset="0x2109" name="RB_FOG_COLOR">
+ <bitfield name="FOG_RED" low="0" high="7" type="uint"/>
+ <bitfield name="FOG_GREEN" low="8" high="15" type="uint"/>
+ <bitfield name="FOG_BLUE" low="16" high="23" type="uint"/>
+ </reg32>
+ <reg32 offset="0x210c" name="RB_STENCILREFMASK_BF" type="adreno_rb_stencilrefmask"/>
+ <reg32 offset="0x210d" name="RB_STENCILREFMASK" type="adreno_rb_stencilrefmask"/>
+ <reg32 offset="0x210e" name="RB_ALPHA_REF"/>
+ <reg32 offset="0x210f" name="PA_CL_VPORT_XSCALE" type="float"/>
+ <reg32 offset="0x2110" name="PA_CL_VPORT_XOFFSET" type="float"/>
+ <reg32 offset="0x2111" name="PA_CL_VPORT_YSCALE" type="float"/>
+ <reg32 offset="0x2112" name="PA_CL_VPORT_YOFFSET" type="float"/>
+ <reg32 offset="0x2113" name="PA_CL_VPORT_ZSCALE" type="float"/>
+ <reg32 offset="0x2114" name="PA_CL_VPORT_ZOFFSET" type="float"/>
+ <reg32 offset="0x2180" name="SQ_PROGRAM_CNTL">
+ <doc>
+ note: only 0x3f worth of valid register values for VS_REGS and
+ PS_REGS, but high bit is set to indicate '0 registers used':
+ </doc>
+ <bitfield name="VS_REGS" low="0" high="7" type="uint"/>
+ <bitfield name="PS_REGS" low="8" high="15" type="uint"/>
+ <bitfield name="VS_RESOURCE" pos="16" type="boolean"/>
+ <bitfield name="PS_RESOURCE" pos="17" type="boolean"/>
+ <bitfield name="PARAM_GEN" pos="18" type="boolean"/>
+ <bitfield name="GEN_INDEX_PIX" pos="19" type="boolean"/>
+ <bitfield name="VS_EXPORT_COUNT" low="20" high="23" type="uint"/>
+ <bitfield name="VS_EXPORT_MODE" low="24" high="26" type="a2xx_sq_ps_vtx_mode"/>
+ <bitfield name="PS_EXPORT_MODE" low="27" high="30" type="uint"/>
+ <bitfield name="GEN_INDEX_VTX" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x2181" name="SQ_CONTEXT_MISC">
+ <bitfield name="INST_PRED_OPTIMIZE" pos="0" type="boolean"/>
+ <bitfield name="SC_OUTPUT_SCREEN_XY" pos="1" type="boolean"/>
+ <bitfield name="SC_SAMPLE_CNTL" low="2" high="3" type="a2xx_sq_sample_cntl"/>
+ <bitfield name="PARAM_GEN_POS" low="8" high="15" type="uint"/>
+ <bitfield name="PERFCOUNTER_REF" pos="16" type="boolean"/>
+ <bitfield name="YEILD_OPTIMIZE" pos="17" type="boolean"/>
+ <bitfield name="TX_CACHE_SEL" pos="18" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x2182" name="SQ_INTERPOLATOR_CNTL">
+ <bitfield name="PARAM_SHADE" low="0" high="15" type="uint"/>
+ <bitfield name="SAMPLING_PATTERN" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x2183" name="SQ_WRAPPING_0">
+ <bitfield name="PARAM_WRAP_0" low="0" high="3" type="uint"/>
+ <bitfield name="PARAM_WRAP_1" low="4" high="7" type="uint"/>
+ <bitfield name="PARAM_WRAP_2" low="8" high="11" type="uint"/>
+ <bitfield name="PARAM_WRAP_3" low="12" high="15" type="uint"/>
+ <bitfield name="PARAM_WRAP_4" low="16" high="19" type="uint"/>
+ <bitfield name="PARAM_WRAP_5" low="20" high="23" type="uint"/>
+ <bitfield name="PARAM_WRAP_6" low="24" high="27" type="uint"/>
+ <bitfield name="PARAM_WRAP_7" low="28" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x2184" name="SQ_WRAPPING_1">
+ <bitfield name="PARAM_WRAP_8" low="0" high="3" type="uint"/>
+ <bitfield name="PARAM_WRAP_9" low="4" high="7" type="uint"/>
+ <bitfield name="PARAM_WRAP_10" low="8" high="11" type="uint"/>
+ <bitfield name="PARAM_WRAP_11" low="12" high="15" type="uint"/>
+ <bitfield name="PARAM_WRAP_12" low="16" high="19" type="uint"/>
+ <bitfield name="PARAM_WRAP_13" low="20" high="23" type="uint"/>
+ <bitfield name="PARAM_WRAP_14" low="24" high="27" type="uint"/>
+ <bitfield name="PARAM_WRAP_15" low="28" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x21f6" name="SQ_PS_PROGRAM">
+ <bitfield name="BASE" low="0" high="11" type="uint"/>
+ <bitfield name="SIZE" low="12" high="23" type="uint"/>
+ </reg32>
+ <reg32 offset="0x21f7" name="SQ_VS_PROGRAM">
+ <bitfield name="BASE" low="0" high="11" type="uint"/>
+ <bitfield name="SIZE" low="12" high="23" type="uint"/>
+ </reg32>
+ <reg32 offset="0x21f9" name="VGT_EVENT_INITIATOR"/>
+ <reg32 offset="0x21fc" name="VGT_DRAW_INITIATOR" type="vgt_draw_initiator"/>
+ <reg32 offset="0x21fd" name="VGT_IMMED_DATA"/>
+ <reg32 offset="0x2200" name="RB_DEPTHCONTROL">
+ <bitfield name="STENCIL_ENABLE" pos="0" type="boolean"/>
+ <bitfield name="Z_ENABLE" pos="1" type="boolean"/>
+ <bitfield name="Z_WRITE_ENABLE" pos="2" type="boolean"/>
+ <bitfield name="EARLY_Z_ENABLE" pos="3" type="boolean"/>
+ <bitfield name="ZFUNC" low="4" high="6" type="adreno_compare_func"/>
+ <bitfield name="BACKFACE_ENABLE" pos="7" type="boolean"/>
+ <bitfield name="STENCILFUNC" low="8" high="10" type="adreno_compare_func"/>
+ <bitfield name="STENCILFAIL" low="11" high="13" type="adreno_stencil_op"/>
+ <bitfield name="STENCILZPASS" low="14" high="16" type="adreno_stencil_op"/>
+ <bitfield name="STENCILZFAIL" low="17" high="19" type="adreno_stencil_op"/>
+ <bitfield name="STENCILFUNC_BF" low="20" high="22" type="adreno_compare_func"/>
+ <bitfield name="STENCILFAIL_BF" low="23" high="25" type="adreno_stencil_op"/>
+ <bitfield name="STENCILZPASS_BF" low="26" high="28" type="adreno_stencil_op"/>
+ <bitfield name="STENCILZFAIL_BF" low="29" high="31" type="adreno_stencil_op"/>
+ </reg32>
+ <reg32 offset="0x2201" name="RB_BLEND_CONTROL">
+ <bitfield name="COLOR_SRCBLEND" low="0" high="4" type="adreno_rb_blend_factor"/>
+ <bitfield name="COLOR_COMB_FCN" low="5" high="7" type="a2xx_rb_blend_opcode"/>
+ <bitfield name="COLOR_DESTBLEND" low="8" high="12" type="adreno_rb_blend_factor"/>
+ <bitfield name="ALPHA_SRCBLEND" low="16" high="20" type="adreno_rb_blend_factor"/>
+ <bitfield name="ALPHA_COMB_FCN" low="21" high="23" type="a2xx_rb_blend_opcode"/>
+ <bitfield name="ALPHA_DESTBLEND" low="24" high="28" type="adreno_rb_blend_factor"/>
+ <bitfield name="BLEND_FORCE_ENABLE" pos="29" type="boolean"/>
+ <bitfield name="BLEND_FORCE" pos="30" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x2202" name="RB_COLORCONTROL">
+ <bitfield name="ALPHA_FUNC" low="0" high="2" type="adreno_compare_func"/>
+ <bitfield name="ALPHA_TEST_ENABLE" pos="3" type="boolean"/>
+ <bitfield name="ALPHA_TO_MASK_ENABLE" pos="4" type="boolean"/>
+ <bitfield name="BLEND_DISABLE" pos="5" type="boolean"/>
+ <bitfield name="VOB_ENABLE" pos="6" type="boolean"/>
+ <bitfield name="VS_EXPORTS_FOG" pos="7" type="boolean"/>
+ <bitfield name="ROP_CODE" low="8" high="11" type="uint"/>
+ <bitfield name="DITHER_MODE" low="12" high="13" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_TYPE" low="14" high="15" type="a2xx_rb_dither_type"/>
+ <bitfield name="PIXEL_FOG" pos="16" type="boolean"/>
+ <bitfield name="ALPHA_TO_MASK_OFFSET0" low="24" high="25" type="uint"/>
+ <bitfield name="ALPHA_TO_MASK_OFFSET1" low="26" high="27" type="uint"/>
+ <bitfield name="ALPHA_TO_MASK_OFFSET2" low="28" high="29" type="uint"/>
+ <bitfield name="ALPHA_TO_MASK_OFFSET3" low="30" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x2203" name="VGT_CURRENT_BIN_ID_MAX" type="a2xx_vgt_current_bin_id_min_max"/>
+ <reg32 offset="0x2204" name="PA_CL_CLIP_CNTL">
+ <bitfield name="CLIP_DISABLE" pos="16" type="boolean"/>
+ <bitfield name="BOUNDARY_EDGE_FLAG_ENA" pos="18" type="boolean"/>
+ <bitfield name="DX_CLIP_SPACE_DEF" pos="19" type="a2xx_dx_clip_space"/>
+ <bitfield name="DIS_CLIP_ERR_DETECT" pos="20" type="boolean"/>
+ <bitfield name="VTX_KILL_OR" pos="21" type="boolean"/>
+ <bitfield name="XY_NAN_RETAIN" pos="22" type="boolean"/>
+ <bitfield name="Z_NAN_RETAIN" pos="23" type="boolean"/>
+ <bitfield name="W_NAN_RETAIN" pos="24" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x2205" name="PA_SU_SC_MODE_CNTL">
+ <bitfield name="CULL_FRONT" pos="0" type="boolean"/>
+ <bitfield name="CULL_BACK" pos="1" type="boolean"/>
+ <bitfield name="FACE" pos="2" type="boolean"/>
+ <bitfield name="POLYMODE" low="3" high="4" type="a2xx_pa_su_sc_polymode"/>
+ <bitfield name="FRONT_PTYPE" low="5" high="7" type="adreno_pa_su_sc_draw"/>
+ <bitfield name="BACK_PTYPE" low="8" high="10" type="adreno_pa_su_sc_draw"/>
+ <bitfield name="POLY_OFFSET_FRONT_ENABLE" pos="11" type="boolean"/>
+ <bitfield name="POLY_OFFSET_BACK_ENABLE" pos="12" type="boolean"/>
+ <bitfield name="POLY_OFFSET_PARA_ENABLE" pos="13" type="boolean"/>
+ <bitfield name="MSAA_ENABLE" pos="15" type="boolean"/>
+ <bitfield name="VTX_WINDOW_OFFSET_ENABLE" pos="16" type="boolean"/>
+ <bitfield name="LINE_STIPPLE_ENABLE" pos="18" type="boolean"/>
+ <bitfield name="PROVOKING_VTX_LAST" pos="19" type="boolean"/>
+ <bitfield name="PERSP_CORR_DIS" pos="20" type="boolean"/>
+ <bitfield name="MULTI_PRIM_IB_ENA" pos="21" type="boolean"/>
+ <bitfield name="QUAD_ORDER_ENABLE" pos="23" type="boolean"/>
+ <bitfield name="WAIT_RB_IDLE_ALL_TRI" pos="25" type="boolean"/>
+ <bitfield name="WAIT_RB_IDLE_FIRST_TRI_NEW_STATE" pos="26" type="boolean"/>
+ <bitfield name="CLAMPED_FACENESS" pos="28" type="boolean"/>
+ <bitfield name="ZERO_AREA_FACENESS" pos="29" type="boolean"/>
+ <bitfield name="FACE_KILL_ENABLE" pos="30" type="boolean"/>
+ <bitfield name="FACE_WRITE_ENABLE" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x2206" name="PA_CL_VTE_CNTL">
+ <bitfield name="VPORT_X_SCALE_ENA" pos="0" type="boolean"/>
+ <bitfield name="VPORT_X_OFFSET_ENA" pos="1" type="boolean"/>
+ <bitfield name="VPORT_Y_SCALE_ENA" pos="2" type="boolean"/>
+ <bitfield name="VPORT_Y_OFFSET_ENA" pos="3" type="boolean"/>
+ <bitfield name="VPORT_Z_SCALE_ENA" pos="4" type="boolean"/>
+ <bitfield name="VPORT_Z_OFFSET_ENA" pos="5" type="boolean"/>
+ <bitfield name="VTX_XY_FMT" pos="8" type="boolean"/>
+ <bitfield name="VTX_Z_FMT" pos="9" type="boolean"/>
+ <bitfield name="VTX_W0_FMT" pos="10" type="boolean"/>
+ <bitfield name="PERFCOUNTER_REF" pos="11" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x2207" name="VGT_CURRENT_BIN_ID_MIN" type="a2xx_vgt_current_bin_id_min_max"/>
+ <reg32 offset="0x2208" name="RB_MODECONTROL">
+ <bitfield name="EDRAM_MODE" low="0" high="2" type="a2xx_rb_edram_mode"/>
+ </reg32>
+ <reg32 offset="0x2209" name="A220_RB_LRZ_VSC_CONTROL"/>
+ <reg32 offset="0x220a" name="RB_SAMPLE_POS"/>
+ <reg32 offset="0x220b" name="CLEAR_COLOR">
+ <bitfield name="RED" low="0" high="7"/>
+ <bitfield name="GREEN" low="8" high="15"/>
+ <bitfield name="BLUE" low="16" high="23"/>
+ <bitfield name="ALPHA" low="24" high="31"/>
+ </reg32>
+ <reg32 offset="0x2210" name="A220_GRAS_CONTROL"/>
+ <reg32 offset="0x2280" name="PA_SU_POINT_SIZE">
+ <bitfield name="HEIGHT" low="0" high="15" type="ufixed" radix="4"/>
+ <bitfield name="WIDTH" low="16" high="31" type="ufixed" radix="4"/>
+ </reg32>
+ <reg32 offset="0x2281" name="PA_SU_POINT_MINMAX">
+ <bitfield name="MIN" low="0" high="15" type="ufixed" radix="4"/>
+ <bitfield name="MAX" low="16" high="31" type="ufixed" radix="4"/>
+ </reg32>
+ <reg32 offset="0x2282" name="PA_SU_LINE_CNTL">
+ <bitfield name="WIDTH" low="0" high="15" type="ufixed" radix="4"/>
+ </reg32>
+ <reg32 offset="0x2283" name="PA_SC_LINE_STIPPLE">
+ <bitfield name="LINE_PATTERN" low="0" high="15" type="hex"/>
+ <bitfield name="REPEAT_COUNT" low="16" high="23" type="uint"/>
+ <bitfield name="PATTERN_BIT_ORDER" pos="28" type="a2xx_pa_sc_pattern_bit_order"/>
+ <bitfield name="AUTO_RESET_CNTL" low="29" high="30" type="a2xx_pa_sc_auto_reset_cntl"/>
+ </reg32>
+ <reg32 offset="0x2293" name="PA_SC_VIZ_QUERY">
+ <bitfield name="VIZ_QUERY_ENA" pos="0" type="boolean"/>
+ <bitfield name="VIZ_QUERY_ID" low="1" high="6" type="uint"/>
+ <bitfield name="KILL_PIX_POST_EARLY_Z" pos="8" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x2294" name="VGT_ENHANCE"/>
+ <reg32 offset="0x2300" name="PA_SC_LINE_CNTL">
+ <bitfield name="BRES_CNTL" low="0" high="15" type="uint"/>
+ <bitfield name="USE_BRES_CNTL" pos="8" type="boolean"/>
+ <bitfield name="EXPAND_LINE_WIDTH" pos="9" type="boolean"/>
+ <bitfield name="LAST_PIXEL" pos="10" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x2301" name="PA_SC_AA_CONFIG">
+ <bitfield name="MSAA_NUM_SAMPLES" low="0" high="2" type="uint"/>
+ <bitfield name="MAX_SAMPLE_DIST" low="13" high="16" type="uint"/>
+ </reg32>
+ <reg32 offset="0x2302" name="PA_SU_VTX_CNTL">
+ <bitfield name="PIX_CENTER" pos="0" type="a2xx_pa_pixcenter"/>
+ <bitfield name="ROUND_MODE" low="1" high="2" type="a2xx_pa_roundmode"/>
+ <bitfield name="QUANT_MODE" low="7" high="9" type="a2xx_pa_quantmode"/>
+ </reg32>
+ <reg32 offset="0x2303" name="PA_CL_GB_VERT_CLIP_ADJ" type="float"/>
+ <reg32 offset="0x2304" name="PA_CL_GB_VERT_DISC_ADJ" type="float"/>
+ <reg32 offset="0x2305" name="PA_CL_GB_HORZ_CLIP_ADJ" type="float"/>
+ <reg32 offset="0x2306" name="PA_CL_GB_HORZ_DISC_ADJ" type="float"/>
+ <reg32 offset="0x2307" name="SQ_VS_CONST">
+ <bitfield name="BASE" low="0" high="8" type="uint"/>
+ <bitfield name="SIZE" low="12" high="20" type="uint"/>
+ </reg32>
+ <reg32 offset="0x2308" name="SQ_PS_CONST">
+ <bitfield name="BASE" low="0" high="8" type="uint"/>
+ <bitfield name="SIZE" low="12" high="20" type="uint"/>
+ </reg32>
+ <reg32 offset="0x2309" name="SQ_DEBUG_MISC_0"/>
+ <reg32 offset="0x230a" name="SQ_DEBUG_MISC_1"/>
+ <reg32 offset="0x2312" name="PA_SC_AA_MASK"/>
+ <reg32 offset="0x2316" name="VGT_VERTEX_REUSE_BLOCK_CNTL">
+ <bitfield name="VTX_REUSE_DEPTH" low="0" high="2" type="uint"/>
+ </reg32>
+ <reg32 offset="0x2317" name="VGT_OUT_DEALLOC_CNTL">
+ <bitfield name="DEALLOC_DIST" low="0" high="1" type="uint"/>
+ </reg32>
+ <reg32 offset="0x2318" name="RB_COPY_CONTROL">
+ <bitfield name="COPY_SAMPLE_SELECT" low="0" high="2" type="a2xx_rb_copy_sample_select"/>
+ <bitfield name="DEPTH_CLEAR_ENABLE" pos="3" type="boolean"/>
+ <bitfield name="CLEAR_MASK" low="4" high="7" type="hex"/>
+ </reg32>
+ <reg32 offset="0x2319" name="RB_COPY_DEST_BASE"/>
+ <reg32 offset="0x231a" name="RB_COPY_DEST_PITCH" shr="5" type="uint"/>
+ <reg32 offset="0x231b" name="RB_COPY_DEST_INFO">
+ <bitfield name="DEST_ENDIAN" low="0" high="2" type="adreno_rb_surface_endian"/>
+ <bitfield name="LINEAR" pos="3" type="boolean"/>
+ <bitfield name="FORMAT" low="4" high="7" type="a2xx_colorformatx"/>
+ <bitfield name="SWAP" low="8" high="9" type="uint"/>
+ <bitfield name="DITHER_MODE" low="10" high="11" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_TYPE" low="12" high="13" type="a2xx_rb_dither_type"/>
+ <bitfield name="WRITE_RED" pos="14" type="boolean"/>
+ <bitfield name="WRITE_GREEN" pos="15" type="boolean"/>
+ <bitfield name="WRITE_BLUE" pos="16" type="boolean"/>
+ <bitfield name="WRITE_ALPHA" pos="17" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x231c" name="RB_COPY_DEST_OFFSET">
+ <bitfield name="X" low="0" high="12" type="uint"/>
+ <bitfield name="Y" low="13" high="25" type="uint"/>
+ </reg32>
+ <reg32 offset="0x231d" name="RB_DEPTH_CLEAR"/>
+ <reg32 offset="0x2324" name="RB_SAMPLE_COUNT_CTL"/>
+ <reg32 offset="0x2326" name="RB_COLOR_DEST_MASK"/>
+ <reg32 offset="0x2340" name="A225_GRAS_UCP0X"/>
+ <reg32 offset="0x2357" name="A225_GRAS_UCP5W"/>
+ <reg32 offset="0x2360" name="A225_GRAS_UCP_ENABLED"/>
+ <reg32 offset="0x2380" name="PA_SU_POLY_OFFSET_FRONT_SCALE"/>
+ <reg32 offset="0x2381" name="PA_SU_POLY_OFFSET_FRONT_OFFSET"/>
+ <reg32 offset="0x2382" name="PA_SU_POLY_OFFSET_BACK_SCALE"/>
+ <reg32 offset="0x2383" name="PA_SU_POLY_OFFSET_BACK_OFFSET"/>
+ <reg32 offset="0x4000" name="SQ_CONSTANT_0"/>
+ <reg32 offset="0x4800" name="SQ_FETCH_0"/>
+ <reg32 offset="0x4900" name="SQ_CF_BOOLEANS"/>
+ <reg32 offset="0x4908" name="SQ_CF_LOOP"/>
+ <reg32 offset="0xa29" name="COHER_SIZE_PM4"/>
+ <reg32 offset="0xa2a" name="COHER_BASE_PM4"/>
+ <reg32 offset="0xa2b" name="COHER_STATUS_PM4"/>
+
+ <reg32 offset="0x0c88" name="PA_SU_PERFCOUNTER0_SELECT"/>
+ <reg32 offset="0x0c89" name="PA_SU_PERFCOUNTER1_SELECT"/>
+ <reg32 offset="0x0c8a" name="PA_SU_PERFCOUNTER2_SELECT"/>
+ <reg32 offset="0x0c8b" name="PA_SU_PERFCOUNTER3_SELECT"/>
+ <reg32 offset="0x0c8c" name="PA_SU_PERFCOUNTER0_LOW"/>
+ <reg32 offset="0x0c8d" name="PA_SU_PERFCOUNTER0_HI"/>
+ <reg32 offset="0x0c8e" name="PA_SU_PERFCOUNTER1_LOW"/>
+ <reg32 offset="0x0c8f" name="PA_SU_PERFCOUNTER1_HI"/>
+ <reg32 offset="0x0c90" name="PA_SU_PERFCOUNTER2_LOW"/>
+ <reg32 offset="0x0c91" name="PA_SU_PERFCOUNTER2_HI"/>
+ <reg32 offset="0x0c92" name="PA_SU_PERFCOUNTER3_LOW"/>
+ <reg32 offset="0x0c93" name="PA_SU_PERFCOUNTER3_HI"/>
+ <reg32 offset="0x0c98" name="PA_SC_PERFCOUNTER0_SELECT"/>
+ <reg32 offset="0x0c99" name="PA_SC_PERFCOUNTER0_LOW"/>
+ <reg32 offset="0x0c9a" name="PA_SC_PERFCOUNTER0_HI"/>
+ <reg32 offset="0x0c48" name="VGT_PERFCOUNTER0_SELECT"/>
+ <reg32 offset="0x0c49" name="VGT_PERFCOUNTER1_SELECT"/>
+ <reg32 offset="0x0c4a" name="VGT_PERFCOUNTER2_SELECT"/>
+ <reg32 offset="0x0c4b" name="VGT_PERFCOUNTER3_SELECT"/>
+ <reg32 offset="0x0c4c" name="VGT_PERFCOUNTER0_LOW"/>
+ <reg32 offset="0x0c4e" name="VGT_PERFCOUNTER1_LOW"/>
+ <reg32 offset="0x0c50" name="VGT_PERFCOUNTER2_LOW"/>
+ <reg32 offset="0x0c52" name="VGT_PERFCOUNTER3_LOW"/>
+ <reg32 offset="0x0c4d" name="VGT_PERFCOUNTER0_HI"/>
+ <reg32 offset="0x0c4f" name="VGT_PERFCOUNTER1_HI"/>
+ <reg32 offset="0x0c51" name="VGT_PERFCOUNTER2_HI"/>
+ <reg32 offset="0x0c53" name="VGT_PERFCOUNTER3_HI"/>
+ <reg32 offset="0x0e05" name="TCR_PERFCOUNTER0_SELECT"/>
+ <reg32 offset="0x0e08" name="TCR_PERFCOUNTER1_SELECT"/>
+ <reg32 offset="0x0e06" name="TCR_PERFCOUNTER0_HI"/>
+ <reg32 offset="0x0e09" name="TCR_PERFCOUNTER1_HI"/>
+ <reg32 offset="0x0e07" name="TCR_PERFCOUNTER0_LOW"/>
+ <reg32 offset="0x0e0a" name="TCR_PERFCOUNTER1_LOW"/>
+ <reg32 offset="0x0e1f" name="TP0_PERFCOUNTER0_SELECT"/>
+ <reg32 offset="0x0e20" name="TP0_PERFCOUNTER0_HI"/>
+ <reg32 offset="0x0e21" name="TP0_PERFCOUNTER0_LOW"/>
+ <reg32 offset="0x0e22" name="TP0_PERFCOUNTER1_SELECT"/>
+ <reg32 offset="0x0e23" name="TP0_PERFCOUNTER1_HI"/>
+ <reg32 offset="0x0e24" name="TP0_PERFCOUNTER1_LOW"/>
+ <reg32 offset="0x0e54" name="TCM_PERFCOUNTER0_SELECT"/>
+ <reg32 offset="0x0e57" name="TCM_PERFCOUNTER1_SELECT"/>
+ <reg32 offset="0x0e55" name="TCM_PERFCOUNTER0_HI"/>
+ <reg32 offset="0x0e58" name="TCM_PERFCOUNTER1_HI"/>
+ <reg32 offset="0x0e56" name="TCM_PERFCOUNTER0_LOW"/>
+ <reg32 offset="0x0e59" name="TCM_PERFCOUNTER1_LOW"/>
+ <reg32 offset="0x0e5a" name="TCF_PERFCOUNTER0_SELECT"/>
+ <reg32 offset="0x0e5d" name="TCF_PERFCOUNTER1_SELECT"/>
+ <reg32 offset="0x0e60" name="TCF_PERFCOUNTER2_SELECT"/>
+ <reg32 offset="0x0e63" name="TCF_PERFCOUNTER3_SELECT"/>
+ <reg32 offset="0x0e66" name="TCF_PERFCOUNTER4_SELECT"/>
+ <reg32 offset="0x0e69" name="TCF_PERFCOUNTER5_SELECT"/>
+ <reg32 offset="0x0e6c" name="TCF_PERFCOUNTER6_SELECT"/>
+ <reg32 offset="0x0e6f" name="TCF_PERFCOUNTER7_SELECT"/>
+ <reg32 offset="0x0e72" name="TCF_PERFCOUNTER8_SELECT"/>
+ <reg32 offset="0x0e75" name="TCF_PERFCOUNTER9_SELECT"/>
+ <reg32 offset="0x0e78" name="TCF_PERFCOUNTER10_SELECT"/>
+ <reg32 offset="0x0e7b" name="TCF_PERFCOUNTER11_SELECT"/>
+ <reg32 offset="0x0e5b" name="TCF_PERFCOUNTER0_HI"/>
+ <reg32 offset="0x0e5e" name="TCF_PERFCOUNTER1_HI"/>
+ <reg32 offset="0x0e61" name="TCF_PERFCOUNTER2_HI"/>
+ <reg32 offset="0x0e64" name="TCF_PERFCOUNTER3_HI"/>
+ <reg32 offset="0x0e67" name="TCF_PERFCOUNTER4_HI"/>
+ <reg32 offset="0x0e6a" name="TCF_PERFCOUNTER5_HI"/>
+ <reg32 offset="0x0e6d" name="TCF_PERFCOUNTER6_HI"/>
+ <reg32 offset="0x0e70" name="TCF_PERFCOUNTER7_HI"/>
+ <reg32 offset="0x0e73" name="TCF_PERFCOUNTER8_HI"/>
+ <reg32 offset="0x0e76" name="TCF_PERFCOUNTER9_HI"/>
+ <reg32 offset="0x0e79" name="TCF_PERFCOUNTER10_HI"/>
+ <reg32 offset="0x0e7c" name="TCF_PERFCOUNTER11_HI"/>
+ <reg32 offset="0x0e5c" name="TCF_PERFCOUNTER0_LOW"/>
+ <reg32 offset="0x0e5f" name="TCF_PERFCOUNTER1_LOW"/>
+ <reg32 offset="0x0e62" name="TCF_PERFCOUNTER2_LOW"/>
+ <reg32 offset="0x0e65" name="TCF_PERFCOUNTER3_LOW"/>
+ <reg32 offset="0x0e68" name="TCF_PERFCOUNTER4_LOW"/>
+ <reg32 offset="0x0e6b" name="TCF_PERFCOUNTER5_LOW"/>
+ <reg32 offset="0x0e6e" name="TCF_PERFCOUNTER6_LOW"/>
+ <reg32 offset="0x0e71" name="TCF_PERFCOUNTER7_LOW"/>
+ <reg32 offset="0x0e74" name="TCF_PERFCOUNTER8_LOW"/>
+ <reg32 offset="0x0e77" name="TCF_PERFCOUNTER9_LOW"/>
+ <reg32 offset="0x0e7a" name="TCF_PERFCOUNTER10_LOW"/>
+ <reg32 offset="0x0e7d" name="TCF_PERFCOUNTER11_LOW"/>
+ <reg32 offset="0x0dc8" name="SQ_PERFCOUNTER0_SELECT"/>
+ <reg32 offset="0x0dc9" name="SQ_PERFCOUNTER1_SELECT"/>
+ <reg32 offset="0x0dca" name="SQ_PERFCOUNTER2_SELECT"/>
+ <reg32 offset="0x0dcb" name="SQ_PERFCOUNTER3_SELECT"/>
+ <reg32 offset="0x0dcc" name="SQ_PERFCOUNTER0_LOW"/>
+ <reg32 offset="0x0dcd" name="SQ_PERFCOUNTER0_HI"/>
+ <reg32 offset="0x0dce" name="SQ_PERFCOUNTER1_LOW"/>
+ <reg32 offset="0x0dcf" name="SQ_PERFCOUNTER1_HI"/>
+ <reg32 offset="0x0dd0" name="SQ_PERFCOUNTER2_LOW"/>
+ <reg32 offset="0x0dd1" name="SQ_PERFCOUNTER2_HI"/>
+ <reg32 offset="0x0dd2" name="SQ_PERFCOUNTER3_LOW"/>
+ <reg32 offset="0x0dd3" name="SQ_PERFCOUNTER3_HI"/>
+ <reg32 offset="0x0dd4" name="SX_PERFCOUNTER0_SELECT"/>
+ <reg32 offset="0x0dd8" name="SX_PERFCOUNTER0_LOW"/>
+ <reg32 offset="0x0dd9" name="SX_PERFCOUNTER0_HI"/>
+ <reg32 offset="0x0a46" name="MH_PERFCOUNTER0_SELECT"/>
+ <reg32 offset="0x0a4a" name="MH_PERFCOUNTER1_SELECT"/>
+ <reg32 offset="0x0a47" name="MH_PERFCOUNTER0_CONFIG"/>
+ <reg32 offset="0x0a4b" name="MH_PERFCOUNTER1_CONFIG"/>
+ <reg32 offset="0x0a48" name="MH_PERFCOUNTER0_LOW"/>
+ <reg32 offset="0x0a4c" name="MH_PERFCOUNTER1_LOW"/>
+ <reg32 offset="0x0a49" name="MH_PERFCOUNTER0_HI"/>
+ <reg32 offset="0x0a4d" name="MH_PERFCOUNTER1_HI"/>
+ <reg32 offset="0x0f04" name="RB_PERFCOUNTER0_SELECT"/>
+ <reg32 offset="0x0f05" name="RB_PERFCOUNTER1_SELECT"/>
+ <reg32 offset="0x0f06" name="RB_PERFCOUNTER2_SELECT"/>
+ <reg32 offset="0x0f07" name="RB_PERFCOUNTER3_SELECT"/>
+ <reg32 offset="0x0f08" name="RB_PERFCOUNTER0_LOW"/>
+ <reg32 offset="0x0f09" name="RB_PERFCOUNTER0_HI"/>
+ <reg32 offset="0x0f0a" name="RB_PERFCOUNTER1_LOW"/>
+ <reg32 offset="0x0f0b" name="RB_PERFCOUNTER1_HI"/>
+ <reg32 offset="0x0f0c" name="RB_PERFCOUNTER2_LOW"/>
+ <reg32 offset="0x0f0d" name="RB_PERFCOUNTER2_HI"/>
+ <reg32 offset="0x0f0e" name="RB_PERFCOUNTER3_LOW"/>
+ <reg32 offset="0x0f0f" name="RB_PERFCOUNTER3_HI"/>
+</domain>
+
+<domain name="A2XX_SQ_TEX" width="32">
+ <doc>Texture state dwords</doc>
+ <enum name="sq_tex_clamp">
+ <value name="SQ_TEX_WRAP" value="0"/>
+ <value name="SQ_TEX_MIRROR" value="1"/>
+ <value name="SQ_TEX_CLAMP_LAST_TEXEL" value="2"/>
+ <value name="SQ_TEX_MIRROR_ONCE_LAST_TEXEL" value="3"/>
+ <value name="SQ_TEX_CLAMP_HALF_BORDER" value="4"/>
+ <value name="SQ_TEX_MIRROR_ONCE_HALF_BORDER" value="5"/>
+ <value name="SQ_TEX_CLAMP_BORDER" value="6"/>
+ <value name="SQ_TEX_MIRROR_ONCE_BORDER" value="7"/>
+ </enum>
+ <enum name="sq_tex_swiz">
+ <value name="SQ_TEX_X" value="0"/>
+ <value name="SQ_TEX_Y" value="1"/>
+ <value name="SQ_TEX_Z" value="2"/>
+ <value name="SQ_TEX_W" value="3"/>
+ <value name="SQ_TEX_ZERO" value="4"/>
+ <value name="SQ_TEX_ONE" value="5"/>
+ </enum>
+ <enum name="sq_tex_filter">
+ <value name="SQ_TEX_FILTER_POINT" value="0"/>
+ <value name="SQ_TEX_FILTER_BILINEAR" value="1"/>
+ <value name="SQ_TEX_FILTER_BASEMAP" value="2"/>
+ <value name="SQ_TEX_FILTER_USE_FETCH_CONST" value="3"/>
+ </enum>
+ <enum name="sq_tex_aniso_filter">
+ <value name="SQ_TEX_ANISO_FILTER_DISABLED" value="0"/>
+ <value name="SQ_TEX_ANISO_FILTER_MAX_1_1" value="1"/>
+ <value name="SQ_TEX_ANISO_FILTER_MAX_2_1" value="2"/>
+ <value name="SQ_TEX_ANISO_FILTER_MAX_4_1" value="3"/>
+ <value name="SQ_TEX_ANISO_FILTER_MAX_8_1" value="4"/>
+ <value name="SQ_TEX_ANISO_FILTER_MAX_16_1" value="5"/>
+ <value name="SQ_TEX_ANISO_FILTER_USE_FETCH_CONST" value="7"/>
+ </enum>
+ <enum name="sq_tex_dimension">
+ <value name="SQ_TEX_DIMENSION_1D" value="0"/>
+ <value name="SQ_TEX_DIMENSION_2D" value="1"/>
+ <value name="SQ_TEX_DIMENSION_3D" value="2"/>
+ <value name="SQ_TEX_DIMENSION_CUBE" value="3"/>
+ </enum>
+ <enum name="sq_tex_border_color">
+ <value name="SQ_TEX_BORDER_COLOR_BLACK" value="0"/>
+ <value name="SQ_TEX_BORDER_COLOR_WHITE" value="1"/>
+ <value name="SQ_TEX_BORDER_COLOR_ACBYCR_BLACK" value="2"/>
+ <value name="SQ_TEX_BORDER_COLOR_ACBCRY_BLACK" value="3"/>
+ </enum>
+ <enum name="sq_tex_sign">
+ <value name="SQ_TEX_SIGN_UNSIGNED" value="0"/>
+ <value name="SQ_TEX_SIGN_SIGNED" value="1"/>
+ <!-- biased: 2*color-1 (range -1,1 when sampling) -->
+ <value name="SQ_TEX_SIGN_UNSIGNED_BIASED" value="2"/>
+ <!-- gamma: sRGB to linear - doesn't seem to work on adreno? -->
+ <value name="SQ_TEX_SIGN_GAMMA" value="3"/>
+ </enum>
+ <enum name="sq_tex_endian">
+ <value name="SQ_TEX_ENDIAN_NONE" value="0"/>
+ <value name="SQ_TEX_ENDIAN_8IN16" value="1"/>
+ <value name="SQ_TEX_ENDIAN_8IN32" value="2"/>
+ <value name="SQ_TEX_ENDIAN_16IN32" value="3"/>
+ </enum>
+ <enum name="sq_tex_clamp_policy">
+ <value name="SQ_TEX_CLAMP_POLICY_D3D" value="0"/>
+ <value name="SQ_TEX_CLAMP_POLICY_OGL" value="1"/>
+ </enum>
+ <enum name="sq_tex_num_format">
+ <value name="SQ_TEX_NUM_FORMAT_FRAC" value="0"/>
+ <value name="SQ_TEX_NUM_FORMAT_INT" value="1"/>
+ </enum>
+ <enum name="sq_tex_type">
+ <value name="SQ_TEX_TYPE_0" value="0"/>
+ <value name="SQ_TEX_TYPE_1" value="1"/>
+ <value name="SQ_TEX_TYPE_2" value="2"/>
+ <value name="SQ_TEX_TYPE_3" value="3"/>
+ </enum>
+ <reg32 offset="0" name="0">
+ <bitfield name="TYPE" low="0" high="1" type="sq_tex_type"/>
+ <bitfield name="SIGN_X" low="2" high="3" type="sq_tex_sign"/>
+ <bitfield name="SIGN_Y" low="4" high="5" type="sq_tex_sign"/>
+ <bitfield name="SIGN_Z" low="6" high="7" type="sq_tex_sign"/>
+ <bitfield name="SIGN_W" low="8" high="9" type="sq_tex_sign"/>
+ <bitfield name="CLAMP_X" low="10" high="12" type="sq_tex_clamp"/>
+ <bitfield name="CLAMP_Y" low="13" high="15" type="sq_tex_clamp"/>
+ <bitfield name="CLAMP_Z" low="16" high="18" type="sq_tex_clamp"/>
+ <bitfield name="PITCH" low="22" high="30" shr="5" type="uint"/>
+ <bitfield name="TILED" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="FORMAT" low="0" high="5" type="a2xx_sq_surfaceformat"/>
+ <bitfield name="ENDIANNESS" low="6" high="7" type="sq_tex_endian"/>
+ <bitfield name="REQUEST_SIZE" low="8" high="9" type="uint"/>
+ <bitfield name="STACKED" pos="10" type="boolean"/>
+ <bitfield name="CLAMP_POLICY" pos="11" type="sq_tex_clamp_policy"/>
+ <bitfield name="BASE_ADDRESS" low="12" high="31" type="uint" shr="12"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="WIDTH" low="0" high="12" type="uint"/>
+ <bitfield name="HEIGHT" low="13" high="25" type="uint"/>
+ <bitfield name="DEPTH" low="26" high="31" type="uint"/>
+ <!-- 1d/3d have different bit configurations -->
+ </reg32>
+ <reg32 offset="3" name="3">
+ <bitfield name="NUM_FORMAT" pos="0" type="sq_tex_num_format"/>
+ <bitfield name="SWIZ_X" low="1" high="3" type="sq_tex_swiz"/>
+ <bitfield name="SWIZ_Y" low="4" high="6" type="sq_tex_swiz"/>
+ <bitfield name="SWIZ_Z" low="7" high="9" type="sq_tex_swiz"/>
+ <bitfield name="SWIZ_W" low="10" high="12" type="sq_tex_swiz"/>
+ <bitfield name="EXP_ADJUST" low="13" high="18" type="int"/>
+ <bitfield name="XY_MAG_FILTER" low="19" high="20" type="sq_tex_filter"/>
+ <bitfield name="XY_MIN_FILTER" low="21" high="22" type="sq_tex_filter"/>
+ <bitfield name="MIP_FILTER" low="23" high="24" type="sq_tex_filter"/>
+ <bitfield name="ANISO_FILTER" low="25" high="27" type="sq_tex_aniso_filter"/>
+ <bitfield name="BORDER_SIZE" pos="31" type="uint"/>
+ </reg32>
+ <reg32 offset="4" name="4">
+ <bitfield name="VOL_MAG_FILTER" pos="0" type="sq_tex_filter"/>
+ <bitfield name="VOL_MIN_FILTER" pos="1" type="sq_tex_filter"/>
+ <bitfield name="MIP_MIN_LEVEL" low="2" high="5" type="uint"/>
+ <bitfield name="MIP_MAX_LEVEL" low="6" high="9" type="uint"/>
+ <bitfield name="MAX_ANISO_WALK" pos="10" type="boolean"/>
+ <bitfield name="MIN_ANISO_WALK" pos="11" type="boolean"/>
+ <bitfield name="LOD_BIAS" low="12" high="21" type="fixed" radix="5"/>
+ <bitfield name="GRAD_EXP_ADJUST_H" low="22" high="26" type="uint"/>
+ <bitfield name="GRAD_EXP_ADJUST_V" low="27" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="5" name="5">
+ <bitfield name="BORDER_COLOR" low="0" high="1" type="sq_tex_border_color"/>
+ <bitfield name="FORCE_BCW_MAX" pos="2" type="boolean"/>
+ <bitfield name="TRI_CLAMP" low="3" high="4" type="uint"/>
+ <bitfield name="ANISO_BIAS" low="5" high="8" type="fixed" radix="0"/> <!-- radix unknown -->
+ <bitfield name="DIMENSION" low="9" high="10" type="sq_tex_dimension"/>
+ <bitfield name="PACKED_MIPS" pos="11" type="boolean"/>
+ <bitfield name="MIP_ADDRESS" low="12" high="31" type="uint" shr="12"/>
+ </reg32>
+</domain>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a3xx.xml b/drivers/gpu/drm/msm/registers/adreno/a3xx.xml
new file mode 100644
index 000000000000..6717abc0a897
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/adreno/a3xx.xml
@@ -0,0 +1,1751 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="adreno/adreno_common.xml"/>
+<import file="adreno/adreno_pm4.xml"/>
+
+<enum name="a3xx_tile_mode">
+ <value name="LINEAR" value="0"/>
+ <value name="TILE_4X4" value="1"/> <!-- "normal" case for textures -->
+ <value name="TILE_32X32" value="2"/> <!-- only used in GMEM -->
+ <value name="TILE_4X2" value="3"/> <!-- only used for CrCb -->
+</enum>
+
+<enum name="a3xx_state_block_id">
+ <value name="HLSQ_BLOCK_ID_TP_TEX" value="2"/>
+ <value name="HLSQ_BLOCK_ID_TP_MIPMAP" value="3"/>
+ <value name="HLSQ_BLOCK_ID_SP_VS" value="4"/>
+ <value name="HLSQ_BLOCK_ID_SP_FS" value="6"/>
+</enum>
+
+<enum name="a3xx_cache_opcode">
+ <value name="INVALIDATE" value="1"/>
+</enum>
+
+<enum name="a3xx_vtx_fmt">
+ <value name="VFMT_32_FLOAT" value="0x0"/>
+ <value name="VFMT_32_32_FLOAT" value="0x1"/>
+ <value name="VFMT_32_32_32_FLOAT" value="0x2"/>
+ <value name="VFMT_32_32_32_32_FLOAT" value="0x3"/>
+
+ <value name="VFMT_16_FLOAT" value="0x4"/>
+ <value name="VFMT_16_16_FLOAT" value="0x5"/>
+ <value name="VFMT_16_16_16_FLOAT" value="0x6"/>
+ <value name="VFMT_16_16_16_16_FLOAT" value="0x7"/>
+
+ <value name="VFMT_32_FIXED" value="0x8"/>
+ <value name="VFMT_32_32_FIXED" value="0x9"/>
+ <value name="VFMT_32_32_32_FIXED" value="0xa"/>
+ <value name="VFMT_32_32_32_32_FIXED" value="0xb"/>
+
+ <value name="VFMT_16_SINT" value="0x10"/>
+ <value name="VFMT_16_16_SINT" value="0x11"/>
+ <value name="VFMT_16_16_16_SINT" value="0x12"/>
+ <value name="VFMT_16_16_16_16_SINT" value="0x13"/>
+ <value name="VFMT_16_UINT" value="0x14"/>
+ <value name="VFMT_16_16_UINT" value="0x15"/>
+ <value name="VFMT_16_16_16_UINT" value="0x16"/>
+ <value name="VFMT_16_16_16_16_UINT" value="0x17"/>
+ <value name="VFMT_16_SNORM" value="0x18"/>
+ <value name="VFMT_16_16_SNORM" value="0x19"/>
+ <value name="VFMT_16_16_16_SNORM" value="0x1a"/>
+ <value name="VFMT_16_16_16_16_SNORM" value="0x1b"/>
+ <value name="VFMT_16_UNORM" value="0x1c"/>
+ <value name="VFMT_16_16_UNORM" value="0x1d"/>
+ <value name="VFMT_16_16_16_UNORM" value="0x1e"/>
+ <value name="VFMT_16_16_16_16_UNORM" value="0x1f"/>
+
+ <!-- seems to be no NORM variants for 32bit.. -->
+ <value name="VFMT_32_UINT" value="0x20"/>
+ <value name="VFMT_32_32_UINT" value="0x21"/>
+ <value name="VFMT_32_32_32_UINT" value="0x22"/>
+ <value name="VFMT_32_32_32_32_UINT" value="0x23"/>
+ <value name="VFMT_32_SINT" value="0x24"/>
+ <value name="VFMT_32_32_SINT" value="0x25"/>
+ <value name="VFMT_32_32_32_SINT" value="0x26"/>
+ <value name="VFMT_32_32_32_32_SINT" value="0x27"/>
+
+ <value name="VFMT_8_UINT" value="0x28"/>
+ <value name="VFMT_8_8_UINT" value="0x29"/>
+ <value name="VFMT_8_8_8_UINT" value="0x2a"/>
+ <value name="VFMT_8_8_8_8_UINT" value="0x2b"/>
+ <value name="VFMT_8_UNORM" value="0x2c"/>
+ <value name="VFMT_8_8_UNORM" value="0x2d"/>
+ <value name="VFMT_8_8_8_UNORM" value="0x2e"/>
+ <value name="VFMT_8_8_8_8_UNORM" value="0x2f"/>
+ <value name="VFMT_8_SINT" value="0x30"/>
+ <value name="VFMT_8_8_SINT" value="0x31"/>
+ <value name="VFMT_8_8_8_SINT" value="0x32"/>
+ <value name="VFMT_8_8_8_8_SINT" value="0x33"/>
+ <value name="VFMT_8_SNORM" value="0x34"/>
+ <value name="VFMT_8_8_SNORM" value="0x35"/>
+ <value name="VFMT_8_8_8_SNORM" value="0x36"/>
+ <value name="VFMT_8_8_8_8_SNORM" value="0x37"/>
+ <value name="VFMT_10_10_10_2_UINT" value="0x38"/>
+ <value name="VFMT_10_10_10_2_UNORM" value="0x39"/>
+ <value name="VFMT_10_10_10_2_SINT" value="0x3a"/>
+ <value name="VFMT_10_10_10_2_SNORM" value="0x3b"/>
+ <value name="VFMT_2_10_10_10_UINT" value="0x3c"/>
+ <value name="VFMT_2_10_10_10_UNORM" value="0x3d"/>
+ <value name="VFMT_2_10_10_10_SINT" value="0x3e"/>
+ <value name="VFMT_2_10_10_10_SNORM" value="0x3f"/>
+
+ <value name="VFMT_NONE" value="0xff"/>
+</enum>
+
+<enum name="a3xx_tex_fmt">
+ <value name="TFMT_5_6_5_UNORM" value="0x4"/>
+ <value name="TFMT_5_5_5_1_UNORM" value="0x5"/>
+ <value name="TFMT_4_4_4_4_UNORM" value="0x7"/>
+ <value name="TFMT_Z16_UNORM" value="0x9"/>
+ <value name="TFMT_X8Z24_UNORM" value="0xa"/>
+ <value name="TFMT_Z32_FLOAT" value="0xb"/>
+
+ <!--
+ The NV12 tiled/linear formats seem to require gang'd sampler
+ slots (ie. sampler state N plus N+1) for Y and UV planes.
+ They fetch yuv in single sam instruction, but still require
+ colorspace conversion in the shader.
+ -->
+ <value name="TFMT_UV_64X32" value="0x10"/>
+ <value name="TFMT_VU_64X32" value="0x11"/>
+ <value name="TFMT_Y_64X32" value="0x12"/>
+ <value name="TFMT_NV12_64X32" value="0x13"/>
+ <value name="TFMT_UV_LINEAR" value="0x14"/>
+ <value name="TFMT_VU_LINEAR" value="0x15"/>
+ <value name="TFMT_Y_LINEAR" value="0x16"/>
+ <value name="TFMT_NV12_LINEAR" value="0x17"/>
+ <value name="TFMT_I420_Y" value="0x18"/>
+ <value name="TFMT_I420_U" value="0x1a"/>
+ <value name="TFMT_I420_V" value="0x1b"/>
+
+ <value name="TFMT_ATC_RGB" value="0x20"/>
+ <value name="TFMT_ATC_RGBA_EXPLICIT" value="0x21"/>
+ <value name="TFMT_ETC1" value="0x22"/>
+ <value name="TFMT_ATC_RGBA_INTERPOLATED" value="0x23"/>
+
+ <value name="TFMT_DXT1" value="0x24"/>
+ <value name="TFMT_DXT3" value="0x25"/>
+ <value name="TFMT_DXT5" value="0x26"/>
+
+ <value name="TFMT_2_10_10_10_UNORM" value="0x28"/>
+ <value name="TFMT_10_10_10_2_UNORM" value="0x29"/>
+ <value name="TFMT_9_9_9_E5_FLOAT" value="0x2a"/>
+ <value name="TFMT_11_11_10_FLOAT" value="0x2b"/>
+ <value name="TFMT_A8_UNORM" value="0x2c"/> <!-- GL_ALPHA -->
+ <value name="TFMT_L8_UNORM" value="0x2d"/>
+ <value name="TFMT_L8_A8_UNORM" value="0x2f"/> <!-- GL_LUMINANCE_ALPHA -->
+
+ <!--
+ NOTE: GL_ALPHA and GL_LUMINANCE_ALPHA aren't handled in a similar way
+ to float16, float32.. but they seem to use non-standard swizzle too..
+ perhaps we can ditch that if the pattern follows of 0xn0, 0xn1, 0xn2,
+ 0xn3 for 1, 2, 3, 4 components respectively..
+
+ Only formats filled in below are the ones that have been observed by
+ the blob or tested.. you can guess what the missing ones are..
+ -->
+
+ <value name="TFMT_8_UNORM" value="0x30"/> <!-- GL_LUMINANCE -->
+ <value name="TFMT_8_8_UNORM" value="0x31"/>
+ <value name="TFMT_8_8_8_UNORM" value="0x32"/>
+ <value name="TFMT_8_8_8_8_UNORM" value="0x33"/>
+
+ <value name="TFMT_8_SNORM" value="0x34"/>
+ <value name="TFMT_8_8_SNORM" value="0x35"/>
+ <value name="TFMT_8_8_8_SNORM" value="0x36"/>
+ <value name="TFMT_8_8_8_8_SNORM" value="0x37"/>
+
+ <value name="TFMT_8_UINT" value="0x38"/>
+ <value name="TFMT_8_8_UINT" value="0x39"/>
+ <value name="TFMT_8_8_8_UINT" value="0x3a"/>
+ <value name="TFMT_8_8_8_8_UINT" value="0x3b"/>
+
+ <value name="TFMT_8_SINT" value="0x3c"/>
+ <value name="TFMT_8_8_SINT" value="0x3d"/>
+ <value name="TFMT_8_8_8_SINT" value="0x3e"/>
+ <value name="TFMT_8_8_8_8_SINT" value="0x3f"/>
+
+ <value name="TFMT_16_FLOAT" value="0x40"/>
+ <value name="TFMT_16_16_FLOAT" value="0x41"/>
+ <!-- TFMT_FLOAT_16_16_16 -->
+ <value name="TFMT_16_16_16_16_FLOAT" value="0x43"/>
+
+ <value name="TFMT_16_UINT" value="0x44"/>
+ <value name="TFMT_16_16_UINT" value="0x45"/>
+ <value name="TFMT_16_16_16_16_UINT" value="0x47"/>
+
+ <value name="TFMT_16_SINT" value="0x48"/>
+ <value name="TFMT_16_16_SINT" value="0x49"/>
+ <value name="TFMT_16_16_16_16_SINT" value="0x4b"/>
+
+ <value name="TFMT_16_UNORM" value="0x4c"/>
+ <value name="TFMT_16_16_UNORM" value="0x4d"/>
+ <value name="TFMT_16_16_16_16_UNORM" value="0x4f"/>
+
+ <value name="TFMT_16_SNORM" value="0x50"/>
+ <value name="TFMT_16_16_SNORM" value="0x51"/>
+ <value name="TFMT_16_16_16_16_SNORM" value="0x53"/>
+
+ <value name="TFMT_32_FLOAT" value="0x54"/>
+ <value name="TFMT_32_32_FLOAT" value="0x55"/>
+ <!-- TFMT_32_32_32_FLOAT -->
+ <value name="TFMT_32_32_32_32_FLOAT" value="0x57"/>
+
+ <value name="TFMT_32_UINT" value="0x58"/>
+ <value name="TFMT_32_32_UINT" value="0x59"/>
+ <value name="TFMT_32_32_32_32_UINT" value="0x5b"/>
+
+ <value name="TFMT_32_SINT" value="0x5c"/>
+ <value name="TFMT_32_32_SINT" value="0x5d"/>
+ <value name="TFMT_32_32_32_32_SINT" value="0x5f"/>
+
+ <value name="TFMT_2_10_10_10_UINT" value="0x60"/>
+ <value name="TFMT_10_10_10_2_UINT" value="0x61"/>
+
+ <value name="TFMT_ETC2_RG11_SNORM" value="0x70"/>
+ <value name="TFMT_ETC2_RG11_UNORM" value="0x71"/>
+ <value name="TFMT_ETC2_R11_SNORM" value="0x72"/>
+ <value name="TFMT_ETC2_R11_UNORM" value="0x73"/>
+ <value name="TFMT_ETC2_RGBA8" value="0x74"/>
+ <value name="TFMT_ETC2_RGB8A1" value="0x75"/>
+ <value name="TFMT_ETC2_RGB8" value="0x76"/>
+
+ <value name="TFMT_NONE" value="0xff"/>
+</enum>
+
+<enum name="a3xx_color_fmt">
+ <value name="RB_R5G6B5_UNORM" value="0x00"/>
+ <value name="RB_R5G5B5A1_UNORM" value="0x01"/>
+ <value name="RB_R4G4B4A4_UNORM" value="0x03"/>
+ <value name="RB_R8G8B8_UNORM" value="0x04"/>
+ <value name="RB_R8G8B8A8_UNORM" value="0x08"/>
+ <value name="RB_R8G8B8A8_SNORM" value="0x09"/>
+ <value name="RB_R8G8B8A8_UINT" value="0x0a"/>
+ <value name="RB_R8G8B8A8_SINT" value="0x0b"/>
+ <value name="RB_R8G8_UNORM" value="0x0c"/>
+ <value name="RB_R8G8_SNORM" value="0x0d"/>
+ <value name="RB_R8G8_UINT" value="0x0e"/>
+ <value name="RB_R8G8_SINT" value="0x0f"/>
+ <value name="RB_R10G10B10A2_UNORM" value="0x10"/>
+ <value name="RB_A2R10G10B10_UNORM" value="0x11"/>
+ <value name="RB_R10G10B10A2_UINT" value="0x12"/>
+ <value name="RB_A2R10G10B10_UINT" value="0x13"/>
+
+ <value name="RB_A8_UNORM" value="0x14"/>
+ <value name="RB_R8_UNORM" value="0x15"/>
+
+ <value name="RB_R16_FLOAT" value="0x18"/>
+ <value name="RB_R16G16_FLOAT" value="0x19"/>
+ <value name="RB_R16G16B16A16_FLOAT" value="0x1b"/> <!-- GL_HALF_FLOAT_OES -->
+ <value name="RB_R11G11B10_FLOAT" value="0x1c"/>
+
+ <value name="RB_R16_SNORM" value="0x20"/>
+ <value name="RB_R16G16_SNORM" value="0x21"/>
+ <value name="RB_R16G16B16A16_SNORM" value="0x23"/>
+
+ <value name="RB_R16_UNORM" value="0x24"/>
+ <value name="RB_R16G16_UNORM" value="0x25"/>
+ <value name="RB_R16G16B16A16_UNORM" value="0x27"/>
+
+ <value name="RB_R16_SINT" value="0x28"/>
+ <value name="RB_R16G16_SINT" value="0x29"/>
+ <value name="RB_R16G16B16A16_SINT" value="0x2b"/>
+
+ <value name="RB_R16_UINT" value="0x2c"/>
+ <value name="RB_R16G16_UINT" value="0x2d"/>
+ <value name="RB_R16G16B16A16_UINT" value="0x2f"/>
+
+ <value name="RB_R32_FLOAT" value="0x30"/>
+ <value name="RB_R32G32_FLOAT" value="0x31"/>
+ <value name="RB_R32G32B32A32_FLOAT" value="0x33"/> <!-- GL_FLOAT -->
+
+ <value name="RB_R32_SINT" value="0x34"/>
+ <value name="RB_R32G32_SINT" value="0x35"/>
+ <value name="RB_R32G32B32A32_SINT" value="0x37"/>
+
+ <value name="RB_R32_UINT" value="0x38"/>
+ <value name="RB_R32G32_UINT" value="0x39"/>
+ <value name="RB_R32G32B32A32_UINT" value="0x3b"/>
+
+ <value name="RB_NONE" value="0xff"/>
+</enum>
+
+<enum name="a3xx_cp_perfcounter_select">
+ <value value="0x00" name="CP_ALWAYS_COUNT"/>
+ <value value="0x03" name="CP_AHB_PFPTRANS_WAIT"/>
+ <value value="0x06" name="CP_AHB_NRTTRANS_WAIT"/>
+ <value value="0x08" name="CP_CSF_NRT_READ_WAIT"/>
+ <value value="0x09" name="CP_CSF_I1_FIFO_FULL"/>
+ <value value="0x0a" name="CP_CSF_I2_FIFO_FULL"/>
+ <value value="0x0b" name="CP_CSF_ST_FIFO_FULL"/>
+ <value value="0x0c" name="CP_RESERVED_12"/>
+ <value value="0x0d" name="CP_CSF_RING_ROQ_FULL"/>
+ <value value="0x0e" name="CP_CSF_I1_ROQ_FULL"/>
+ <value value="0x0f" name="CP_CSF_I2_ROQ_FULL"/>
+ <value value="0x10" name="CP_CSF_ST_ROQ_FULL"/>
+ <value value="0x11" name="CP_RESERVED_17"/>
+ <value value="0x12" name="CP_MIU_TAG_MEM_FULL"/>
+ <value value="0x16" name="CP_MIU_NRT_WRITE_STALLED"/>
+ <value value="0x17" name="CP_MIU_NRT_READ_STALLED"/>
+ <value value="0x1a" name="CP_ME_REGS_RB_DONE_FIFO_FULL"/>
+ <value value="0x1b" name="CP_ME_REGS_VS_EVENT_FIFO_FULL"/>
+ <value value="0x1c" name="CP_ME_REGS_PS_EVENT_FIFO_FULL"/>
+ <value value="0x1d" name="CP_ME_REGS_CF_EVENT_FIFO_FULL"/>
+ <value value="0x1e" name="CP_ME_MICRO_RB_STARVED"/>
+ <value value="0x28" name="CP_AHB_RBBM_DWORD_SENT"/>
+ <value value="0x29" name="CP_ME_BUSY_CLOCKS"/>
+ <value value="0x2a" name="CP_ME_WAIT_CONTEXT_AVAIL"/>
+ <value value="0x2b" name="CP_PFP_TYPE0_PACKET"/>
+ <value value="0x2c" name="CP_PFP_TYPE3_PACKET"/>
+ <value value="0x2d" name="CP_CSF_RB_WPTR_NEQ_RPTR"/>
+ <value value="0x2e" name="CP_CSF_I1_SIZE_NEQ_ZERO"/>
+ <value value="0x2f" name="CP_CSF_I2_SIZE_NEQ_ZERO"/>
+ <value value="0x30" name="CP_CSF_RBI1I2_FETCHING"/>
+</enum>
+
+<enum name="a3xx_gras_tse_perfcounter_select">
+ <value value="0x00" name="GRAS_TSEPERF_INPUT_PRIM"/>
+ <value value="0x01" name="GRAS_TSEPERF_INPUT_NULL_PRIM"/>
+ <value value="0x02" name="GRAS_TSEPERF_TRIVAL_REJ_PRIM"/>
+ <value value="0x03" name="GRAS_TSEPERF_CLIPPED_PRIM"/>
+ <value value="0x04" name="GRAS_TSEPERF_NEW_PRIM"/>
+ <value value="0x05" name="GRAS_TSEPERF_ZERO_AREA_PRIM"/>
+ <value value="0x06" name="GRAS_TSEPERF_FACENESS_CULLED_PRIM"/>
+ <value value="0x07" name="GRAS_TSEPERF_ZERO_PIXEL_PRIM"/>
+ <value value="0x08" name="GRAS_TSEPERF_OUTPUT_NULL_PRIM"/>
+ <value value="0x09" name="GRAS_TSEPERF_OUTPUT_VISIBLE_PRIM"/>
+ <value value="0x0a" name="GRAS_TSEPERF_PRE_CLIP_PRIM"/>
+ <value value="0x0b" name="GRAS_TSEPERF_POST_CLIP_PRIM"/>
+ <value value="0x0c" name="GRAS_TSEPERF_WORKING_CYCLES"/>
+ <value value="0x0d" name="GRAS_TSEPERF_PC_STARVE"/>
+ <value value="0x0e" name="GRAS_TSERASPERF_STALL"/>
+</enum>
+
+<enum name="a3xx_gras_ras_perfcounter_select">
+ <value value="0x00" name="GRAS_RASPERF_16X16_TILES"/>
+ <value value="0x01" name="GRAS_RASPERF_8X8_TILES"/>
+ <value value="0x02" name="GRAS_RASPERF_4X4_TILES"/>
+ <value value="0x03" name="GRAS_RASPERF_WORKING_CYCLES"/>
+ <value value="0x04" name="GRAS_RASPERF_STALL_CYCLES_BY_RB"/>
+ <value value="0x05" name="GRAS_RASPERF_STALL_CYCLES_BY_VSC"/>
+ <value value="0x06" name="GRAS_RASPERF_STARVE_CYCLES_BY_TSE"/>
+</enum>
+
+<enum name="a3xx_hlsq_perfcounter_select">
+ <value value="0x00" name="HLSQ_PERF_SP_VS_CONSTANT"/>
+ <value value="0x01" name="HLSQ_PERF_SP_VS_INSTRUCTIONS"/>
+ <value value="0x02" name="HLSQ_PERF_SP_FS_CONSTANT"/>
+ <value value="0x03" name="HLSQ_PERF_SP_FS_INSTRUCTIONS"/>
+ <value value="0x04" name="HLSQ_PERF_TP_STATE"/>
+ <value value="0x05" name="HLSQ_PERF_QUADS"/>
+ <value value="0x06" name="HLSQ_PERF_PIXELS"/>
+ <value value="0x07" name="HLSQ_PERF_VERTICES"/>
+ <value value="0x08" name="HLSQ_PERF_FS8_THREADS"/>
+ <value value="0x09" name="HLSQ_PERF_FS16_THREADS"/>
+ <value value="0x0a" name="HLSQ_PERF_FS32_THREADS"/>
+ <value value="0x0b" name="HLSQ_PERF_VS8_THREADS"/>
+ <value value="0x0c" name="HLSQ_PERF_VS16_THREADS"/>
+ <value value="0x0d" name="HLSQ_PERF_SP_VS_DATA_BYTES"/>
+ <value value="0x0e" name="HLSQ_PERF_SP_FS_DATA_BYTES"/>
+ <value value="0x0f" name="HLSQ_PERF_ACTIVE_CYCLES"/>
+ <value value="0x10" name="HLSQ_PERF_STALL_CYCLES_SP_STATE"/>
+ <value value="0x11" name="HLSQ_PERF_STALL_CYCLES_SP_VS"/>
+ <value value="0x12" name="HLSQ_PERF_STALL_CYCLES_SP_FS"/>
+ <value value="0x13" name="HLSQ_PERF_STALL_CYCLES_UCHE"/>
+ <value value="0x14" name="HLSQ_PERF_RBBM_LOAD_CYCLES"/>
+ <value value="0x15" name="HLSQ_PERF_DI_TO_VS_START_SP0"/>
+ <value value="0x16" name="HLSQ_PERF_DI_TO_FS_START_SP0"/>
+ <value value="0x17" name="HLSQ_PERF_VS_START_TO_DONE_SP0"/>
+ <value value="0x18" name="HLSQ_PERF_FS_START_TO_DONE_SP0"/>
+ <value value="0x19" name="HLSQ_PERF_SP_STATE_COPY_CYCLES_VS"/>
+ <value value="0x1a" name="HLSQ_PERF_SP_STATE_COPY_CYCLES_FS"/>
+ <value value="0x1b" name="HLSQ_PERF_UCHE_LATENCY_CYCLES"/>
+ <value value="0x1c" name="HLSQ_PERF_UCHE_LATENCY_COUNT"/>
+</enum>
+
+<enum name="a3xx_pc_perfcounter_select">
+ <value value="0x00" name="PC_PCPERF_VISIBILITY_STREAMS"/>
+ <value value="0x01" name="PC_PCPERF_TOTAL_INSTANCES"/>
+ <value value="0x02" name="PC_PCPERF_PRIMITIVES_PC_VPC"/>
+ <value value="0x03" name="PC_PCPERF_PRIMITIVES_KILLED_BY_VS"/>
+ <value value="0x04" name="PC_PCPERF_PRIMITIVES_VISIBLE_BY_VS"/>
+ <value value="0x05" name="PC_PCPERF_DRAWCALLS_KILLED_BY_VS"/>
+ <value value="0x06" name="PC_PCPERF_DRAWCALLS_VISIBLE_BY_VS"/>
+ <value value="0x07" name="PC_PCPERF_VERTICES_TO_VFD"/>
+ <value value="0x08" name="PC_PCPERF_REUSED_VERTICES"/>
+ <value value="0x09" name="PC_PCPERF_CYCLES_STALLED_BY_VFD"/>
+ <value value="0x0a" name="PC_PCPERF_CYCLES_STALLED_BY_TSE"/>
+ <value value="0x0b" name="PC_PCPERF_CYCLES_STALLED_BY_VBIF"/>
+ <value value="0x0c" name="PC_PCPERF_CYCLES_IS_WORKING"/>
+</enum>
+
+<enum name="a3xx_rb_perfcounter_select">
+ <value value="0x00" name="RB_RBPERF_ACTIVE_CYCLES_ANY"/>
+ <value value="0x01" name="RB_RBPERF_ACTIVE_CYCLES_ALL"/>
+ <value value="0x02" name="RB_RBPERF_STARVE_CYCLES_BY_SP"/>
+ <value value="0x03" name="RB_RBPERF_STARVE_CYCLES_BY_RAS"/>
+ <value value="0x04" name="RB_RBPERF_STARVE_CYCLES_BY_MARB"/>
+ <value value="0x05" name="RB_RBPERF_STALL_CYCLES_BY_MARB"/>
+ <value value="0x06" name="RB_RBPERF_STALL_CYCLES_BY_HLSQ"/>
+ <value value="0x07" name="RB_RBPERF_RB_MARB_DATA"/>
+ <value value="0x08" name="RB_RBPERF_SP_RB_QUAD"/>
+ <value value="0x09" name="RB_RBPERF_RAS_EARLY_Z_QUADS"/>
+ <value value="0x0a" name="RB_RBPERF_GMEM_CH0_READ"/>
+ <value value="0x0b" name="RB_RBPERF_GMEM_CH1_READ"/>
+ <value value="0x0c" name="RB_RBPERF_GMEM_CH0_WRITE"/>
+ <value value="0x0d" name="RB_RBPERF_GMEM_CH1_WRITE"/>
+ <value value="0x0e" name="RB_RBPERF_CP_CONTEXT_DONE"/>
+ <value value="0x0f" name="RB_RBPERF_CP_CACHE_FLUSH"/>
+ <value value="0x10" name="RB_RBPERF_CP_ZPASS_DONE"/>
+</enum>
+
+<enum name="a3xx_rbbm_perfcounter_select">
+ <value value="0" name="RBBM_ALAWYS_ON"/>
+ <value value="1" name="RBBM_VBIF_BUSY"/>
+ <value value="2" name="RBBM_TSE_BUSY"/>
+ <value value="3" name="RBBM_RAS_BUSY"/>
+ <value value="4" name="RBBM_PC_DCALL_BUSY"/>
+ <value value="5" name="RBBM_PC_VSD_BUSY"/>
+ <value value="6" name="RBBM_VFD_BUSY"/>
+ <value value="7" name="RBBM_VPC_BUSY"/>
+ <value value="8" name="RBBM_UCHE_BUSY"/>
+ <value value="9" name="RBBM_VSC_BUSY"/>
+ <value value="10" name="RBBM_HLSQ_BUSY"/>
+ <value value="11" name="RBBM_ANY_RB_BUSY"/>
+ <value value="12" name="RBBM_ANY_TEX_BUSY"/>
+ <value value="13" name="RBBM_ANY_USP_BUSY"/>
+ <value value="14" name="RBBM_ANY_MARB_BUSY"/>
+ <value value="15" name="RBBM_ANY_ARB_BUSY"/>
+ <value value="16" name="RBBM_AHB_STATUS_BUSY"/>
+ <value value="17" name="RBBM_AHB_STATUS_STALLED"/>
+ <value value="18" name="RBBM_AHB_STATUS_TXFR"/>
+ <value value="19" name="RBBM_AHB_STATUS_TXFR_SPLIT"/>
+ <value value="20" name="RBBM_AHB_STATUS_TXFR_ERROR"/>
+ <value value="21" name="RBBM_AHB_STATUS_LONG_STALL"/>
+ <value value="22" name="RBBM_RBBM_STATUS_MASKED"/>
+</enum>
+
+<enum name="a3xx_sp_perfcounter_select">
+ <value value="0x00" name="SP_LM_LOAD_INSTRUCTIONS"/>
+ <value value="0x01" name="SP_LM_STORE_INSTRUCTIONS"/>
+ <value value="0x02" name="SP_LM_ATOMICS"/>
+ <value value="0x03" name="SP_UCHE_LOAD_INSTRUCTIONS"/>
+ <value value="0x04" name="SP_UCHE_STORE_INSTRUCTIONS"/>
+ <value value="0x05" name="SP_UCHE_ATOMICS"/>
+ <value value="0x06" name="SP_VS_TEX_INSTRUCTIONS"/>
+ <value value="0x07" name="SP_VS_CFLOW_INSTRUCTIONS"/>
+ <value value="0x08" name="SP_VS_EFU_INSTRUCTIONS"/>
+ <value value="0x09" name="SP_VS_FULL_ALU_INSTRUCTIONS"/>
+ <value value="0x0a" name="SP_VS_HALF_ALU_INSTRUCTIONS"/>
+ <value value="0x0b" name="SP_FS_TEX_INSTRUCTIONS"/>
+ <value value="0x0c" name="SP_FS_CFLOW_INSTRUCTIONS"/>
+ <value value="0x0d" name="SP_FS_EFU_INSTRUCTIONS"/>
+ <value value="0x0e" name="SP_FS_FULL_ALU_INSTRUCTIONS"/>
+ <value value="0x0f" name="SP_FS_HALF_ALU_INSTRUCTIONS"/>
+ <value value="0x10" name="SP_FS_BARY_INSTRUCTIONS"/>
+ <value value="0x11" name="SP_VS_INSTRUCTIONS"/>
+ <value value="0x12" name="SP_FS_INSTRUCTIONS"/>
+ <value value="0x13" name="SP_ADDR_LOCK_COUNT"/>
+ <value value="0x14" name="SP_UCHE_READ_TRANS"/>
+ <value value="0x15" name="SP_UCHE_WRITE_TRANS"/>
+ <value value="0x16" name="SP_EXPORT_VPC_TRANS"/>
+ <value value="0x17" name="SP_EXPORT_RB_TRANS"/>
+ <value value="0x18" name="SP_PIXELS_KILLED"/>
+ <value value="0x19" name="SP_ICL1_REQUESTS"/>
+ <value value="0x1a" name="SP_ICL1_MISSES"/>
+ <value value="0x1b" name="SP_ICL0_REQUESTS"/>
+ <value value="0x1c" name="SP_ICL0_MISSES"/>
+ <value value="0x1d" name="SP_ALU_ACTIVE_CYCLES"/>
+ <value value="0x1e" name="SP_EFU_ACTIVE_CYCLES"/>
+ <value value="0x1f" name="SP_STALL_CYCLES_BY_VPC"/>
+ <value value="0x20" name="SP_STALL_CYCLES_BY_TP"/>
+ <value value="0x21" name="SP_STALL_CYCLES_BY_UCHE"/>
+ <value value="0x22" name="SP_STALL_CYCLES_BY_RB"/>
+ <value value="0x23" name="SP_ACTIVE_CYCLES_ANY"/>
+ <value value="0x24" name="SP_ACTIVE_CYCLES_ALL"/>
+</enum>
+
+<enum name="a3xx_tp_perfcounter_select">
+ <value value="0x00" name="TPL1_TPPERF_L1_REQUESTS"/>
+ <value value="0x01" name="TPL1_TPPERF_TP0_L1_REQUESTS"/>
+ <value value="0x02" name="TPL1_TPPERF_TP0_L1_MISSES"/>
+ <value value="0x03" name="TPL1_TPPERF_TP1_L1_REQUESTS"/>
+ <value value="0x04" name="TPL1_TPPERF_TP1_L1_MISSES"/>
+ <value value="0x05" name="TPL1_TPPERF_TP2_L1_REQUESTS"/>
+ <value value="0x06" name="TPL1_TPPERF_TP2_L1_MISSES"/>
+ <value value="0x07" name="TPL1_TPPERF_TP3_L1_REQUESTS"/>
+ <value value="0x08" name="TPL1_TPPERF_TP3_L1_MISSES"/>
+ <value value="0x09" name="TPL1_TPPERF_OUTPUT_TEXELS_POINT"/>
+ <value value="0x0a" name="TPL1_TPPERF_OUTPUT_TEXELS_BILINEAR"/>
+ <value value="0x0b" name="TPL1_TPPERF_OUTPUT_TEXELS_MIP"/>
+ <value value="0x0c" name="TPL1_TPPERF_OUTPUT_TEXELS_ANISO"/>
+ <value value="0x0d" name="TPL1_TPPERF_BILINEAR_OPS"/>
+ <value value="0x0e" name="TPL1_TPPERF_QUADSQUADS_OFFSET"/>
+ <value value="0x0f" name="TPL1_TPPERF_QUADQUADS_SHADOW"/>
+ <value value="0x10" name="TPL1_TPPERF_QUADS_ARRAY"/>
+ <value value="0x11" name="TPL1_TPPERF_QUADS_PROJECTION"/>
+ <value value="0x12" name="TPL1_TPPERF_QUADS_GRADIENT"/>
+ <value value="0x13" name="TPL1_TPPERF_QUADS_1D2D"/>
+ <value value="0x14" name="TPL1_TPPERF_QUADS_3DCUBE"/>
+ <value value="0x15" name="TPL1_TPPERF_ZERO_LOD"/>
+ <value value="0x16" name="TPL1_TPPERF_OUTPUT_TEXELS"/>
+ <value value="0x17" name="TPL1_TPPERF_ACTIVE_CYCLES_ANY"/>
+ <value value="0x18" name="TPL1_TPPERF_ACTIVE_CYCLES_ALL"/>
+ <value value="0x19" name="TPL1_TPPERF_STALL_CYCLES_BY_ARB"/>
+ <value value="0x1a" name="TPL1_TPPERF_LATENCY"/>
+ <value value="0x1b" name="TPL1_TPPERF_LATENCY_TRANS"/>
+</enum>
+
+<enum name="a3xx_vfd_perfcounter_select">
+ <value value="0" name="VFD_PERF_UCHE_BYTE_FETCHED"/>
+ <value value="1" name="VFD_PERF_UCHE_TRANS"/>
+ <value value="2" name="VFD_PERF_VPC_BYPASS_COMPONENTS"/>
+ <value value="3" name="VFD_PERF_FETCH_INSTRUCTIONS"/>
+ <value value="4" name="VFD_PERF_DECODE_INSTRUCTIONS"/>
+ <value value="5" name="VFD_PERF_ACTIVE_CYCLES"/>
+ <value value="6" name="VFD_PERF_STALL_CYCLES_UCHE"/>
+ <value value="7" name="VFD_PERF_STALL_CYCLES_HLSQ"/>
+ <value value="8" name="VFD_PERF_STALL_CYCLES_VPC_BYPASS"/>
+ <value value="9" name="VFD_PERF_STALL_CYCLES_VPC_ALLOC"/>
+</enum>
+
+<enum name="a3xx_vpc_perfcounter_select">
+ <value value="0" name="VPC_PERF_SP_LM_PRIMITIVES"/>
+ <value value="1" name="VPC_PERF_COMPONENTS_FROM_SP"/>
+ <value value="2" name="VPC_PERF_SP_LM_COMPONENTS"/>
+ <value value="3" name="VPC_PERF_ACTIVE_CYCLES"/>
+ <value value="4" name="VPC_PERF_STALL_CYCLES_LM"/>
+ <value value="5" name="VPC_PERF_STALL_CYCLES_RAS"/>
+</enum>
+
+<enum name="a3xx_uche_perfcounter_select">
+ <value value="0x00" name="UCHE_UCHEPERF_VBIF_READ_BEATS_TP"/>
+ <value value="0x01" name="UCHE_UCHEPERF_VBIF_READ_BEATS_VFD"/>
+ <value value="0x02" name="UCHE_UCHEPERF_VBIF_READ_BEATS_HLSQ"/>
+ <value value="0x03" name="UCHE_UCHEPERF_VBIF_READ_BEATS_MARB"/>
+ <value value="0x04" name="UCHE_UCHEPERF_VBIF_READ_BEATS_SP"/>
+ <value value="0x08" name="UCHE_UCHEPERF_READ_REQUESTS_TP"/>
+ <value value="0x09" name="UCHE_UCHEPERF_READ_REQUESTS_VFD"/>
+ <value value="0x0a" name="UCHE_UCHEPERF_READ_REQUESTS_HLSQ"/>
+ <value value="0x0b" name="UCHE_UCHEPERF_READ_REQUESTS_MARB"/>
+ <value value="0x0c" name="UCHE_UCHEPERF_READ_REQUESTS_SP"/>
+ <value value="0x0d" name="UCHE_UCHEPERF_WRITE_REQUESTS_MARB"/>
+ <value value="0x0e" name="UCHE_UCHEPERF_WRITE_REQUESTS_SP"/>
+ <value value="0x0f" name="UCHE_UCHEPERF_TAG_CHECK_FAILS"/>
+ <value value="0x10" name="UCHE_UCHEPERF_EVICTS"/>
+ <value value="0x11" name="UCHE_UCHEPERF_FLUSHES"/>
+ <value value="0x12" name="UCHE_UCHEPERF_VBIF_LATENCY_CYCLES"/>
+ <value value="0x13" name="UCHE_UCHEPERF_VBIF_LATENCY_SAMPLES"/>
+ <value value="0x14" name="UCHE_UCHEPERF_ACTIVE_CYCLES"/>
+</enum>
+
+<enum name="a3xx_intp_mode">
+ <value name="SMOOTH" value="0"/>
+ <value name="FLAT" value="1"/>
+ <value name="ZERO" value="2"/>
+ <value name="ONE" value="3"/>
+</enum>
+
+<enum name="a3xx_repl_mode">
+ <value name="S" value="1"/>
+ <value name="T" value="2"/>
+ <value name="ONE_T" value="3"/>
+</enum>
+
+<domain name="A3XX" width="32">
+ <!-- RBBM registers -->
+ <reg32 offset="0x0000" name="RBBM_HW_VERSION"/>
+ <reg32 offset="0x0001" name="RBBM_HW_RELEASE"/>
+ <reg32 offset="0x0002" name="RBBM_HW_CONFIGURATION"/>
+ <reg32 offset="0x0010" name="RBBM_CLOCK_CTL"/>
+ <reg32 offset="0x0012" name="RBBM_SP_HYST_CNT"/>
+ <reg32 offset="0x0018" name="RBBM_SW_RESET_CMD"/>
+ <reg32 offset="0x0020" name="RBBM_AHB_CTL0"/>
+ <reg32 offset="0x0021" name="RBBM_AHB_CTL1"/>
+ <reg32 offset="0x0022" name="RBBM_AHB_CMD"/>
+ <reg32 offset="0x0027" name="RBBM_AHB_ERROR_STATUS"/>
+ <reg32 offset="0x002e" name="RBBM_GPR0_CTL"/>
+ <reg32 offset="0x0030" name="RBBM_STATUS">
+ <bitfield name="HI_BUSY" pos="0" type="boolean"/>
+ <bitfield name="CP_ME_BUSY" pos="1" type="boolean"/>
+ <bitfield name="CP_PFP_BUSY" pos="2" type="boolean"/>
+ <bitfield name="CP_NRT_BUSY" pos="14" type="boolean"/>
+ <bitfield name="VBIF_BUSY" pos="15" type="boolean"/>
+ <bitfield name="TSE_BUSY" pos="16" type="boolean"/>
+ <bitfield name="RAS_BUSY" pos="17" type="boolean"/>
+ <bitfield name="RB_BUSY" pos="18" type="boolean"/>
+ <bitfield name="PC_DCALL_BUSY" pos="19" type="boolean"/>
+ <bitfield name="PC_VSD_BUSY" pos="20" type="boolean"/>
+ <bitfield name="VFD_BUSY" pos="21" type="boolean"/>
+ <bitfield name="VPC_BUSY" pos="22" type="boolean"/>
+ <bitfield name="UCHE_BUSY" pos="23" type="boolean"/>
+ <bitfield name="SP_BUSY" pos="24" type="boolean"/>
+ <bitfield name="TPL1_BUSY" pos="25" type="boolean"/>
+ <bitfield name="MARB_BUSY" pos="26" type="boolean"/>
+ <bitfield name="VSC_BUSY" pos="27" type="boolean"/>
+ <bitfield name="ARB_BUSY" pos="28" type="boolean"/>
+ <bitfield name="HLSQ_BUSY" pos="29" type="boolean"/>
+ <bitfield name="GPU_BUSY_NOHC" pos="30" type="boolean"/>
+ <bitfield name="GPU_BUSY" pos="31" type="boolean"/>
+ </reg32>
+ <!-- used in fw CP_WAIT_FOR_IDLE, similar to NQWAIT_UNTIL on a2xx: -->
+ <reg32 offset="0x0040" name="RBBM_NQWAIT_UNTIL"/>
+ <reg32 offset="0x0033" name="RBBM_WAIT_IDLE_CLOCKS_CTL"/>
+ <reg32 offset="0x0050" name="RBBM_INTERFACE_HANG_INT_CTL"/>
+ <reg32 offset="0x0051" name="RBBM_INTERFACE_HANG_MASK_CTL0"/>
+ <reg32 offset="0x0054" name="RBBM_INTERFACE_HANG_MASK_CTL1"/>
+ <reg32 offset="0x0057" name="RBBM_INTERFACE_HANG_MASK_CTL2"/>
+ <reg32 offset="0x005a" name="RBBM_INTERFACE_HANG_MASK_CTL3"/>
+
+ <bitset name="A3XX_INT0">
+ <bitfield name="RBBM_GPU_IDLE" pos="0" type="boolean"/>
+ <bitfield name="RBBM_AHB_ERROR" pos="1" type="boolean"/>
+ <bitfield name="RBBM_REG_TIMEOUT" pos="2" type="boolean"/>
+ <bitfield name="RBBM_ME_MS_TIMEOUT" pos="3" type="boolean"/>
+ <bitfield name="RBBM_PFP_MS_TIMEOUT" pos="4" type="boolean"/>
+ <bitfield name="RBBM_ATB_BUS_OVERFLOW" pos="5" type="boolean"/>
+ <bitfield name="VFD_ERROR" pos="6" type="boolean"/>
+ <bitfield name="CP_SW_INT" pos="7" type="boolean"/>
+ <bitfield name="CP_T0_PACKET_IN_IB" pos="8" type="boolean"/>
+ <bitfield name="CP_OPCODE_ERROR" pos="9" type="boolean"/>
+ <bitfield name="CP_RESERVED_BIT_ERROR" pos="10" type="boolean"/>
+ <bitfield name="CP_HW_FAULT" pos="11" type="boolean"/>
+ <bitfield name="CP_DMA" pos="12" type="boolean"/>
+ <bitfield name="CP_IB2_INT" pos="13" type="boolean"/>
+ <bitfield name="CP_IB1_INT" pos="14" type="boolean"/>
+ <bitfield name="CP_RB_INT" pos="15" type="boolean"/>
+ <bitfield name="CP_REG_PROTECT_FAULT" pos="16" type="boolean"/>
+ <bitfield name="CP_RB_DONE_TS" pos="17" type="boolean"/>
+ <bitfield name="CP_VS_DONE_TS" pos="18" type="boolean"/>
+ <bitfield name="CP_PS_DONE_TS" pos="19" type="boolean"/>
+ <bitfield name="CACHE_FLUSH_TS" pos="20" type="boolean"/>
+ <bitfield name="CP_AHB_ERROR_HALT" pos="21" type="boolean"/>
+ <bitfield name="MISC_HANG_DETECT" pos="24" type="boolean"/>
+ <bitfield name="UCHE_OOB_ACCESS" pos="25" type="boolean"/>
+ </bitset>
+
+
+ <!--
+ set in pm4 fw INVALID_JUMP_TABLE_ENTRY and CP_INTERRUPT (compare
+ to CP_INT_STATUS in a2xx firmware), so this seems to be the a3xx
+ way for fw to raise and irq:
+ -->
+ <reg32 offset="0x0060" name="RBBM_INT_SET_CMD" type="A3XX_INT0"/>
+ <reg32 offset="0x0061" name="RBBM_INT_CLEAR_CMD" type="A3XX_INT0"/>
+ <reg32 offset="0x0063" name="RBBM_INT_0_MASK" type="A3XX_INT0"/>
+ <reg32 offset="0x0064" name="RBBM_INT_0_STATUS" type="A3XX_INT0"/>
+ <reg32 offset="0x0080" name="RBBM_PERFCTR_CTL">
+ <bitfield name="ENABLE" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0081" name="RBBM_PERFCTR_LOAD_CMD0"/>
+ <reg32 offset="0x0082" name="RBBM_PERFCTR_LOAD_CMD1"/>
+ <reg32 offset="0x0084" name="RBBM_PERFCTR_LOAD_VALUE_LO"/>
+ <reg32 offset="0x0085" name="RBBM_PERFCTR_LOAD_VALUE_HI"/>
+ <reg32 offset="0x0086" name="RBBM_PERFCOUNTER0_SELECT" type="a3xx_rbbm_perfcounter_select"/>
+ <reg32 offset="0x0087" name="RBBM_PERFCOUNTER1_SELECT" type="a3xx_rbbm_perfcounter_select"/>
+ <reg32 offset="0x0088" name="RBBM_GPU_BUSY_MASKED"/>
+ <reg32 offset="0x0090" name="RBBM_PERFCTR_CP_0_LO"/>
+ <reg32 offset="0x0091" name="RBBM_PERFCTR_CP_0_HI"/>
+ <reg32 offset="0x0092" name="RBBM_PERFCTR_RBBM_0_LO"/>
+ <reg32 offset="0x0093" name="RBBM_PERFCTR_RBBM_0_HI"/>
+ <reg32 offset="0x0094" name="RBBM_PERFCTR_RBBM_1_LO"/>
+ <reg32 offset="0x0095" name="RBBM_PERFCTR_RBBM_1_HI"/>
+ <reg32 offset="0x0096" name="RBBM_PERFCTR_PC_0_LO"/>
+ <reg32 offset="0x0097" name="RBBM_PERFCTR_PC_0_HI"/>
+ <reg32 offset="0x0098" name="RBBM_PERFCTR_PC_1_LO"/>
+ <reg32 offset="0x0099" name="RBBM_PERFCTR_PC_1_HI"/>
+ <reg32 offset="0x009a" name="RBBM_PERFCTR_PC_2_LO"/>
+ <reg32 offset="0x009b" name="RBBM_PERFCTR_PC_2_HI"/>
+ <reg32 offset="0x009c" name="RBBM_PERFCTR_PC_3_LO"/>
+ <reg32 offset="0x009d" name="RBBM_PERFCTR_PC_3_HI"/>
+ <reg32 offset="0x009e" name="RBBM_PERFCTR_VFD_0_LO"/>
+ <reg32 offset="0x009f" name="RBBM_PERFCTR_VFD_0_HI"/>
+ <reg32 offset="0x00a0" name="RBBM_PERFCTR_VFD_1_LO"/>
+ <reg32 offset="0x00a1" name="RBBM_PERFCTR_VFD_1_HI"/>
+ <reg32 offset="0x00a2" name="RBBM_PERFCTR_HLSQ_0_LO"/>
+ <reg32 offset="0x00a3" name="RBBM_PERFCTR_HLSQ_0_HI"/>
+ <reg32 offset="0x00a4" name="RBBM_PERFCTR_HLSQ_1_LO"/>
+ <reg32 offset="0x00a5" name="RBBM_PERFCTR_HLSQ_1_HI"/>
+ <reg32 offset="0x00a6" name="RBBM_PERFCTR_HLSQ_2_LO"/>
+ <reg32 offset="0x00a7" name="RBBM_PERFCTR_HLSQ_2_HI"/>
+ <reg32 offset="0x00a8" name="RBBM_PERFCTR_HLSQ_3_LO"/>
+ <reg32 offset="0x00a9" name="RBBM_PERFCTR_HLSQ_3_HI"/>
+ <reg32 offset="0x00aa" name="RBBM_PERFCTR_HLSQ_4_LO"/>
+ <reg32 offset="0x00ab" name="RBBM_PERFCTR_HLSQ_4_HI"/>
+ <reg32 offset="0x00ac" name="RBBM_PERFCTR_HLSQ_5_LO"/>
+ <reg32 offset="0x00ad" name="RBBM_PERFCTR_HLSQ_5_HI"/>
+ <reg32 offset="0x00ae" name="RBBM_PERFCTR_VPC_0_LO"/>
+ <reg32 offset="0x00af" name="RBBM_PERFCTR_VPC_0_HI"/>
+ <reg32 offset="0x00b0" name="RBBM_PERFCTR_VPC_1_LO"/>
+ <reg32 offset="0x00b1" name="RBBM_PERFCTR_VPC_1_HI"/>
+ <reg32 offset="0x00b2" name="RBBM_PERFCTR_TSE_0_LO"/>
+ <reg32 offset="0x00b3" name="RBBM_PERFCTR_TSE_0_HI"/>
+ <reg32 offset="0x00b4" name="RBBM_PERFCTR_TSE_1_LO"/>
+ <reg32 offset="0x00b5" name="RBBM_PERFCTR_TSE_1_HI"/>
+ <reg32 offset="0x00b6" name="RBBM_PERFCTR_RAS_0_LO"/>
+ <reg32 offset="0x00b7" name="RBBM_PERFCTR_RAS_0_HI"/>
+ <reg32 offset="0x00b8" name="RBBM_PERFCTR_RAS_1_LO"/>
+ <reg32 offset="0x00b9" name="RBBM_PERFCTR_RAS_1_HI"/>
+ <reg32 offset="0x00ba" name="RBBM_PERFCTR_UCHE_0_LO"/>
+ <reg32 offset="0x00bb" name="RBBM_PERFCTR_UCHE_0_HI"/>
+ <reg32 offset="0x00bc" name="RBBM_PERFCTR_UCHE_1_LO"/>
+ <reg32 offset="0x00bd" name="RBBM_PERFCTR_UCHE_1_HI"/>
+ <reg32 offset="0x00be" name="RBBM_PERFCTR_UCHE_2_LO"/>
+ <reg32 offset="0x00bf" name="RBBM_PERFCTR_UCHE_2_HI"/>
+ <reg32 offset="0x00c0" name="RBBM_PERFCTR_UCHE_3_LO"/>
+ <reg32 offset="0x00c1" name="RBBM_PERFCTR_UCHE_3_HI"/>
+ <reg32 offset="0x00c2" name="RBBM_PERFCTR_UCHE_4_LO"/>
+ <reg32 offset="0x00c3" name="RBBM_PERFCTR_UCHE_4_HI"/>
+ <reg32 offset="0x00c4" name="RBBM_PERFCTR_UCHE_5_LO"/>
+ <reg32 offset="0x00c5" name="RBBM_PERFCTR_UCHE_5_HI"/>
+ <reg32 offset="0x00c6" name="RBBM_PERFCTR_TP_0_LO"/>
+ <reg32 offset="0x00c7" name="RBBM_PERFCTR_TP_0_HI"/>
+ <reg32 offset="0x00c8" name="RBBM_PERFCTR_TP_1_LO"/>
+ <reg32 offset="0x00c9" name="RBBM_PERFCTR_TP_1_HI"/>
+ <reg32 offset="0x00ca" name="RBBM_PERFCTR_TP_2_LO"/>
+ <reg32 offset="0x00cb" name="RBBM_PERFCTR_TP_2_HI"/>
+ <reg32 offset="0x00cc" name="RBBM_PERFCTR_TP_3_LO"/>
+ <reg32 offset="0x00cd" name="RBBM_PERFCTR_TP_3_HI"/>
+ <reg32 offset="0x00ce" name="RBBM_PERFCTR_TP_4_LO"/>
+ <reg32 offset="0x00cf" name="RBBM_PERFCTR_TP_4_HI"/>
+ <reg32 offset="0x00d0" name="RBBM_PERFCTR_TP_5_LO"/>
+ <reg32 offset="0x00d1" name="RBBM_PERFCTR_TP_5_HI"/>
+ <reg32 offset="0x00d2" name="RBBM_PERFCTR_SP_0_LO"/>
+ <reg32 offset="0x00d3" name="RBBM_PERFCTR_SP_0_HI"/>
+ <reg32 offset="0x00d4" name="RBBM_PERFCTR_SP_1_LO"/>
+ <reg32 offset="0x00d5" name="RBBM_PERFCTR_SP_1_HI"/>
+ <reg32 offset="0x00d6" name="RBBM_PERFCTR_SP_2_LO"/>
+ <reg32 offset="0x00d7" name="RBBM_PERFCTR_SP_2_HI"/>
+ <reg32 offset="0x00d8" name="RBBM_PERFCTR_SP_3_LO"/>
+ <reg32 offset="0x00d9" name="RBBM_PERFCTR_SP_3_HI"/>
+ <reg32 offset="0x00da" name="RBBM_PERFCTR_SP_4_LO"/>
+ <reg32 offset="0x00db" name="RBBM_PERFCTR_SP_4_HI"/>
+ <reg32 offset="0x00dc" name="RBBM_PERFCTR_SP_5_LO"/>
+ <reg32 offset="0x00dd" name="RBBM_PERFCTR_SP_5_HI"/>
+ <reg32 offset="0x00de" name="RBBM_PERFCTR_SP_6_LO"/>
+ <reg32 offset="0x00df" name="RBBM_PERFCTR_SP_6_HI"/>
+ <reg32 offset="0x00e0" name="RBBM_PERFCTR_SP_7_LO"/>
+ <reg32 offset="0x00e1" name="RBBM_PERFCTR_SP_7_HI"/>
+ <reg32 offset="0x00e2" name="RBBM_PERFCTR_RB_0_LO"/>
+ <reg32 offset="0x00e3" name="RBBM_PERFCTR_RB_0_HI"/>
+ <reg32 offset="0x00e4" name="RBBM_PERFCTR_RB_1_LO"/>
+ <reg32 offset="0x00e5" name="RBBM_PERFCTR_RB_1_HI"/>
+ <reg32 offset="0x00ea" name="RBBM_PERFCTR_PWR_0_LO"/>
+ <reg32 offset="0x00eb" name="RBBM_PERFCTR_PWR_0_HI"/>
+ <reg32 offset="0x00ec" name="RBBM_PERFCTR_PWR_1_LO"/>
+ <reg32 offset="0x00ed" name="RBBM_PERFCTR_PWR_1_HI"/>
+ <reg32 offset="0x0100" name="RBBM_RBBM_CTL"/>
+ <reg32 offset="0x0111" name="RBBM_DEBUG_BUS_CTL"/>
+ <reg32 offset="0x0112" name="RBBM_DEBUG_BUS_DATA_STATUS"/>
+
+ <!-- CP registers -->
+ <reg32 offset="0x01c9" name="CP_PFP_UCODE_ADDR"/>
+ <reg32 offset="0x01ca" name="CP_PFP_UCODE_DATA"/>
+ <reg32 offset="0x01cc" name="CP_ROQ_ADDR"/>
+ <reg32 offset="0x01cd" name="CP_ROQ_DATA"/>
+ <reg32 offset="0x01d1" name="CP_MERCIU_ADDR"/>
+ <reg32 offset="0x01d2" name="CP_MERCIU_DATA"/>
+ <reg32 offset="0x01d3" name="CP_MERCIU_DATA2"/>
+ <!-- see a3xx_snapshot_cp_meq().. looks like the way to dump queue between pfp and pm4 -->
+ <reg32 offset="0x01da" name="CP_MEQ_ADDR"/>
+ <reg32 offset="0x01db" name="CP_MEQ_DATA"/>
+ <reg32 offset="0x01f5" name="CP_WFI_PEND_CTR"/>
+ <reg32 offset="0x039d" name="RBBM_PM_OVERRIDE2"/>
+
+ <reg32 offset="0x0445" name="CP_PERFCOUNTER_SELECT" type="a3xx_cp_perfcounter_select"/>
+ <reg32 offset="0x045c" name="CP_HW_FAULT"/>
+ <reg32 offset="0x045e" name="CP_PROTECT_CTRL"/>
+ <reg32 offset="0x045f" name="CP_PROTECT_STATUS"/>
+ <array offset="0x0460" name="CP_PROTECT" stride="1" length="16">
+ <reg32 offset="0x0" name="REG"/>
+ </array>
+ <reg32 offset="0x054d" name="CP_AHB_FAULT"/>
+
+ <reg32 offset="0x0d00" name="SQ_GPR_MANAGEMENT"/>
+ <reg32 offset="0x0d02" name="SQ_INST_STORE_MANAGMENT"/>
+ <reg32 offset="0x0e1e" name="TP0_CHICKEN"/>
+
+ <!-- these I guess or either SP or HLSQ since related to shader core setup: -->
+ <reg32 offset="0x0e22" name="SP_GLOBAL_MEM_SIZE" type="uint">
+ <doc>
+ The pair of MEM_SIZE/ADDR registers get programmed
+ in sequence with the size/addr of each buffer.
+ </doc>
+ </reg32>
+ <reg32 offset="0x0e23" name="SP_GLOBAL_MEM_ADDR"/>
+
+ <!-- GRAS registers -->
+ <reg32 offset="0x2040" name="GRAS_CL_CLIP_CNTL">
+ <bitfield name="IJ_PERSP_CENTER" pos="12" type="boolean"/>
+ <bitfield name="IJ_NON_PERSP_CENTER" pos="13" type="boolean"/>
+ <bitfield name="IJ_PERSP_CENTROID" pos="14" type="boolean"/>
+ <bitfield name="IJ_NON_PERSP_CENTROID" pos="15" type="boolean"/>
+ <bitfield name="CLIP_DISABLE" pos="16" type="boolean"/>
+ <bitfield name="ZFAR_CLIP_DISABLE" pos="17" type="boolean"/>
+ <bitfield name="VP_CLIP_CODE_IGNORE" pos="19" type="boolean"/>
+ <bitfield name="VP_XFORM_DISABLE" pos="20" type="boolean"/>
+ <bitfield name="PERSP_DIVISION_DISABLE" pos="21" type="boolean"/>
+ <bitfield name="ZERO_GB_SCALE_Z" pos="22" type="boolean">
+ <doc>aka clip_halfz</doc>
+ </bitfield>
+ <!-- set when gl_FragCoord.z is enabled in frag shader: -->
+ <bitfield name="ZCOORD" pos="23" type="boolean"/>
+ <bitfield name="WCOORD" pos="24" type="boolean"/>
+ <!-- set when frag shader writes z (so early z test disabled: -->
+ <bitfield name="ZCLIP_DISABLE" pos="25" type="boolean"/>
+ <bitfield name="NUM_USER_CLIP_PLANES" low="26" high="28" type="uint"/>
+ </reg32>
+ <reg32 offset="0x2044" name="GRAS_CL_GB_CLIP_ADJ">
+ <bitfield name="HORZ" low="0" high="9" type="uint"/>
+ <bitfield name="VERT" low="10" high="19" type="uint"/>
+ </reg32>
+ <reg32 offset="0x2048" name="GRAS_CL_VPORT_XOFFSET" type="float"/>
+ <reg32 offset="0x2049" name="GRAS_CL_VPORT_XSCALE" type="float"/>
+ <reg32 offset="0x204a" name="GRAS_CL_VPORT_YOFFSET" type="float"/>
+ <reg32 offset="0x204b" name="GRAS_CL_VPORT_YSCALE" type="float"/>
+ <reg32 offset="0x204c" name="GRAS_CL_VPORT_ZOFFSET" type="float"/>
+ <reg32 offset="0x204d" name="GRAS_CL_VPORT_ZSCALE" type="float"/>
+ <reg32 offset="0x2068" name="GRAS_SU_POINT_MINMAX">
+ <bitfield name="MIN" low="0" high="15" type="ufixed" radix="4"/>
+ <bitfield name="MAX" low="16" high="31" type="ufixed" radix="4"/>
+ </reg32>
+ <reg32 offset="0x2069" name="GRAS_SU_POINT_SIZE" type="fixed" radix="4"/>
+ <reg32 offset="0x206c" name="GRAS_SU_POLY_OFFSET_SCALE">
+ <bitfield name="VAL" low="0" high="23" type="fixed" radix="20"/>
+ <doc>range of -8.0 to 8.0</doc>
+ </reg32>
+ <reg32 offset="0x206d" name="GRAS_SU_POLY_OFFSET_OFFSET" radix="6" type="fixed">
+ <doc>range of -512.0 to 512.0</doc>
+ </reg32>
+ <reg32 offset="0x2070" name="GRAS_SU_MODE_CONTROL">
+ <bitfield name="CULL_FRONT" pos="0" type="boolean"/>
+ <bitfield name="CULL_BACK" pos="1" type="boolean"/>
+ <bitfield name="FRONT_CW" pos="2" type="boolean"/>
+ <bitfield name="LINEHALFWIDTH" low="3" high="10" radix="2" type="fixed"/>
+ <bitfield name="POLY_OFFSET" pos="11" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x2072" name="GRAS_SC_CONTROL">
+ <!-- complete wild-ass-guess for sizes of these bitfields.. -->
+ <bitfield name="RENDER_MODE" low="4" high="7" type="a3xx_render_mode"/>
+ <bitfield name="MSAA_SAMPLES" low="8" high="11" type="a3xx_msaa_samples"/>
+ <bitfield name="RASTER_MODE" low="12" high="15"/>
+ </reg32>
+
+ <reg32 offset="0x2074" name="GRAS_SC_SCREEN_SCISSOR_TL" type="adreno_reg_xy"/>
+ <reg32 offset="0x2075" name="GRAS_SC_SCREEN_SCISSOR_BR" type="adreno_reg_xy"/>
+ <reg32 offset="0x2079" name="GRAS_SC_WINDOW_SCISSOR_TL" type="adreno_reg_xy"/>
+ <reg32 offset="0x207a" name="GRAS_SC_WINDOW_SCISSOR_BR" type="adreno_reg_xy"/>
+
+ <!-- RB registers -->
+ <reg32 offset="0x20c0" name="RB_MODE_CONTROL">
+ <!-- guess on the # of bits here.. -->
+ <bitfield name="GMEM_BYPASS" pos="7" type="boolean"/>
+ <doc>
+ RENDER_MODE is RB_RESOLVE_PASS for gmem->mem, otherwise RB_RENDER_PASS
+ </doc>
+ <bitfield name="RENDER_MODE" low="8" high="10" type="a3xx_render_mode"/>
+ <bitfield name="MRT" low="12" high="13" type="uint">
+ <doc>render targets - 1</doc>
+ </bitfield>
+ <bitfield name="MARB_CACHE_SPLIT_MODE" pos="15" type="boolean"/>
+ <bitfield name="PACKER_TIMER_ENABLE" pos="16" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x20c1" name="RB_RENDER_CONTROL">
+ <bitfield name="DUAL_COLOR_IN_ENABLE" pos="0" type="boolean"/>
+ <bitfield name="YUV_IN_ENABLE" pos="1" type="boolean"/>
+ <bitfield name="COV_VALUE_INPUT_ENABLE" pos="2" type="boolean"/>
+ <!-- set when gl_FrontFacing is accessed in frag shader: -->
+ <bitfield name="FACENESS" pos="3" type="boolean"/>
+ <bitfield name="BIN_WIDTH" low="4" high="11" shr="5" type="uint"/>
+ <bitfield name="DISABLE_COLOR_PIPE" pos="12" type="boolean"/>
+ <!--
+ ENABLE_GMEM not set on mem2gmem.. so possibly it is actually
+ controlling blend or readback from GMEM??
+ -->
+ <bitfield name="ENABLE_GMEM" pos="13" type="boolean"/>
+ <bitfield name="COORD_MASK" low="14" high="17" type="hex"/>
+ <bitfield name="I_CLAMP_ENABLE" pos="19" type="boolean"/>
+ <bitfield name="COV_VALUE_OUTPUT_ENABLE" pos="20" type="boolean"/>
+ <bitfield name="ALPHA_TEST" pos="22" type="boolean"/>
+ <bitfield name="ALPHA_TEST_FUNC" low="24" high="26" type="adreno_compare_func"/>
+ <bitfield name="ALPHA_TO_COVERAGE" pos="30" type="boolean"/>
+ <bitfield name="ALPHA_TO_ONE" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x20c2" name="RB_MSAA_CONTROL">
+ <bitfield name="DISABLE" pos="10" type="boolean"/>
+ <bitfield name="SAMPLES" low="12" high="15" type="a3xx_msaa_samples"/>
+ <bitfield name="SAMPLE_MASK" low="16" high="31" type="hex"/>
+ </reg32>
+ <reg32 offset="0x20c3" name="RB_ALPHA_REF">
+ <bitfield name="UINT" low="8" high="15" type="hex"/>
+ <bitfield name="FLOAT" low="16" high="31" type="float"/>
+ </reg32>
+ <array offset="0x20c4" name="RB_MRT" stride="4" length="4">
+ <reg32 offset="0x0" name="CONTROL">
+ <bitfield name="READ_DEST_ENABLE" pos="3" type="boolean"/>
+ <!-- both these bits seem to get set when enabling GL_BLEND.. -->
+ <bitfield name="BLEND" pos="4" type="boolean"/>
+ <bitfield name="BLEND2" pos="5" type="boolean"/>
+ <bitfield name="ROP_CODE" low="8" high="11" type="a3xx_rop_code"/>
+ <bitfield name="DITHER_MODE" low="12" high="13" type="adreno_rb_dither_mode"/>
+ <bitfield name="COMPONENT_ENABLE" low="24" high="27" type="hex"/>
+ </reg32>
+ <reg32 offset="0x1" name="BUF_INFO">
+ <bitfield name="COLOR_FORMAT" low="0" high="5" type="a3xx_color_fmt"/>
+ <bitfield name="COLOR_TILE_MODE" low="6" high="7" type="a3xx_tile_mode"/>
+ <bitfield name="COLOR_SWAP" low="10" high="11" type="a3xx_color_swap"/>
+ <bitfield name="COLOR_SRGB" pos="14" type="boolean"/>
+ <doc>
+ Pitch (actually, appears to be pitch in bytes, so really is a stride)
+ in GMEM, so pitch of the current tile.
+ </doc>
+ <bitfield name="COLOR_BUF_PITCH" low="17" high="31" shr="5" type="uint"/>
+ </reg32>
+ <reg32 offset="0x2" name="BUF_BASE">
+ <doc>offset into GMEM (or system memory address in bypass mode)</doc>
+ <bitfield name="COLOR_BUF_BASE" low="4" high="31" shr="5" type="hex"/>
+ </reg32>
+ <reg32 offset="0x3" name="BLEND_CONTROL">
+ <bitfield name="RGB_SRC_FACTOR" low="0" high="4" type="adreno_rb_blend_factor"/>
+ <bitfield name="RGB_BLEND_OPCODE" low="5" high="7" type="a3xx_rb_blend_opcode"/>
+ <bitfield name="RGB_DEST_FACTOR" low="8" high="12" type="adreno_rb_blend_factor"/>
+ <bitfield name="ALPHA_SRC_FACTOR" low="16" high="20" type="adreno_rb_blend_factor"/>
+ <bitfield name="ALPHA_BLEND_OPCODE" low="21" high="23" type="a3xx_rb_blend_opcode"/>
+ <bitfield name="ALPHA_DEST_FACTOR" low="24" high="28" type="adreno_rb_blend_factor"/>
+ <bitfield name="CLAMP_ENABLE" pos="29" type="boolean"/>
+ </reg32>
+ </array>
+
+ <reg32 offset="0x20e4" name="RB_BLEND_RED">
+ <bitfield name="UINT" low="0" high="7" type="hex"/>
+ <bitfield name="FLOAT" low="16" high="31" type="float"/>
+ </reg32>
+ <reg32 offset="0x20e5" name="RB_BLEND_GREEN">
+ <bitfield name="UINT" low="0" high="7" type="hex"/>
+ <bitfield name="FLOAT" low="16" high="31" type="float"/>
+ </reg32>
+ <reg32 offset="0x20e6" name="RB_BLEND_BLUE">
+ <bitfield name="UINT" low="0" high="7" type="hex"/>
+ <bitfield name="FLOAT" low="16" high="31" type="float"/>
+ </reg32>
+ <reg32 offset="0x20e7" name="RB_BLEND_ALPHA">
+ <bitfield name="UINT" low="0" high="7" type="hex"/>
+ <bitfield name="FLOAT" low="16" high="31" type="float"/>
+ </reg32>
+
+ <reg32 offset="0x20e8" name="RB_CLEAR_COLOR_DW0"/>
+ <reg32 offset="0x20e9" name="RB_CLEAR_COLOR_DW1"/>
+ <reg32 offset="0x20ea" name="RB_CLEAR_COLOR_DW2"/>
+ <reg32 offset="0x20eb" name="RB_CLEAR_COLOR_DW3"/>
+ <reg32 offset="0x20ec" name="RB_COPY_CONTROL">
+ <!-- not sure # of bits -->
+ <bitfield name="MSAA_RESOLVE" low="0" high="1" type="a3xx_msaa_samples"/>
+ <bitfield name="DEPTHCLEAR" pos="3" type="boolean"/>
+ <bitfield name="MODE" low="4" high="6" type="adreno_rb_copy_control_mode"/>
+ <bitfield name="MSAA_SRGB_DOWNSAMPLE" pos="7" type="boolean"/>
+ <bitfield name="FASTCLEAR" low="8" high="11" type="hex"/>
+ <bitfield name="DEPTH32_RESOLVE" pos="12" type="boolean"/> <!-- enabled on a Z32F copy -->
+ <bitfield name="GMEM_BASE" low="14" high="31" shr="14" type="hex"/>
+ </reg32>
+ <reg32 offset="0x20ed" name="RB_COPY_DEST_BASE">
+ <bitfield name="BASE" low="4" high="31" shr="5" type="hex"/>
+ </reg32>
+ <reg32 offset="0x20ee" name="RB_COPY_DEST_PITCH">
+ <doc>actually, appears to be pitch in bytes, so really is a stride</doc>
+ <!-- not actually sure about max pitch... -->
+ <bitfield name="PITCH" low="0" high="31" shr="5" type="uint"/>
+ </reg32>
+ <reg32 offset="0x20ef" name="RB_COPY_DEST_INFO">
+ <bitfield name="TILE" low="0" high="1" type="a3xx_tile_mode"/>
+ <bitfield name="FORMAT" low="2" high="7" type="a3xx_color_fmt"/>
+ <bitfield name="SWAP" low="8" high="9" type="a3xx_color_swap"/>
+ <bitfield name="DITHER_MODE" low="10" high="11" type="adreno_rb_dither_mode"/>
+ <bitfield name="COMPONENT_ENABLE" low="14" high="17" type="hex"/>
+ <bitfield name="ENDIAN" low="18" high="20" type="adreno_rb_surface_endian"/>
+ </reg32>
+ <reg32 offset="0x2100" name="RB_DEPTH_CONTROL">
+ <!--
+ guessing that this matches a2xx with the stencil fields
+ moved out into RB_STENCIL_CONTROL?
+ -->
+ <bitfield name="FRAG_WRITES_Z" pos="0" type="boolean"/>
+ <bitfield name="Z_TEST_ENABLE" pos="1" type="boolean"/>
+ <bitfield name="Z_WRITE_ENABLE" pos="2" type="boolean"/>
+ <bitfield name="EARLY_Z_DISABLE" pos="3" type="boolean"/>
+ <bitfield name="ZFUNC" low="4" high="6" type="adreno_compare_func"/>
+ <bitfield name="Z_CLAMP_ENABLE" pos="7" type="boolean"/>
+ <doc>Z_READ_ENABLE bit is set for zfunc other than GL_ALWAYS or GL_NEVER</doc>
+ <bitfield name="Z_READ_ENABLE" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x2101" name="RB_DEPTH_CLEAR">
+ <doc>seems to be always set to 0x00000000</doc>
+ </reg32>
+ <reg32 offset="0x2102" name="RB_DEPTH_INFO">
+ <bitfield name="DEPTH_FORMAT" low="0" high="1" type="adreno_rb_depth_format"/>
+ <doc>
+ DEPTH_BASE is offset in GMEM to depth/stencil buffer, ie
+ bin_w * bin_h / 1024 (possible rounded up to multiple of
+ something?? ie. 39 becomes 40, 78 becomes 80.. 75 becomes
+ 80.. so maybe it needs to be multiple of 8??
+ </doc>
+ <bitfield name="DEPTH_BASE" low="11" high="31" shr="12" type="hex"/>
+ </reg32>
+ <reg32 offset="0x2103" name="RB_DEPTH_PITCH" shr="3" type="uint">
+ <doc>
+ Pitch of depth buffer or combined depth+stencil buffer
+ in z24s8 cases.
+ </doc>
+ </reg32>
+ <reg32 offset="0x2104" name="RB_STENCIL_CONTROL">
+ <bitfield name="STENCIL_ENABLE" pos="0" type="boolean"/>
+ <bitfield name="STENCIL_ENABLE_BF" pos="1" type="boolean"/>
+ <!--
+ set for stencil operations that require read from stencil
+ buffer, but not for example for stencil clear (which does
+ not require read).. so guessing this is analogous to
+ READ_DEST_ENABLE for color buffer..
+ -->
+ <bitfield name="STENCIL_READ" pos="2" type="boolean"/>
+ <bitfield name="FUNC" low="8" high="10" type="adreno_compare_func"/>
+ <bitfield name="FAIL" low="11" high="13" type="adreno_stencil_op"/>
+ <bitfield name="ZPASS" low="14" high="16" type="adreno_stencil_op"/>
+ <bitfield name="ZFAIL" low="17" high="19" type="adreno_stencil_op"/>
+ <bitfield name="FUNC_BF" low="20" high="22" type="adreno_compare_func"/>
+ <bitfield name="FAIL_BF" low="23" high="25" type="adreno_stencil_op"/>
+ <bitfield name="ZPASS_BF" low="26" high="28" type="adreno_stencil_op"/>
+ <bitfield name="ZFAIL_BF" low="29" high="31" type="adreno_stencil_op"/>
+ </reg32>
+ <reg32 offset="0x2105" name="RB_STENCIL_CLEAR">
+ <doc>seems to be always set to 0x00000000</doc>
+ </reg32>
+ <reg32 offset="0x2106" name="RB_STENCIL_INFO">
+ <doc>Base address for stencil when not using interleaved depth/stencil</doc>
+ <bitfield name="STENCIL_BASE" low="11" high="31" shr="12" type="hex"/>
+ </reg32>
+ <reg32 offset="0x2107" name="RB_STENCIL_PITCH" shr="3" type="uint">
+ <doc>pitch of stencil buffer when not using interleaved depth/stencil</doc>
+ </reg32>
+ <reg32 offset="0x2108" name="RB_STENCILREFMASK" type="adreno_rb_stencilrefmask"/>
+ <reg32 offset="0x2109" name="RB_STENCILREFMASK_BF" type="adreno_rb_stencilrefmask"/>
+ <!-- VSC == visibility stream c?? -->
+ <reg32 offset="0x210c" name="RB_LRZ_VSC_CONTROL">
+ <doc>seems to be set to 0x00000002 during binning pass</doc>
+ <bitfield name="BINNING_ENABLE" pos="1" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x210e" name="RB_WINDOW_OFFSET">
+ <doc>X/Y offset of current bin</doc>
+ <bitfield name="X" low="0" high="15" type="uint"/>
+ <bitfield name="Y" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x2110" name="RB_SAMPLE_COUNT_CONTROL">
+ <bitfield name="RESET" pos="0" type="boolean"/>
+ <bitfield name="COPY" pos="1" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x2111" name="RB_SAMPLE_COUNT_ADDR"/>
+ <reg32 offset="0x2114" name="RB_Z_CLAMP_MIN"/>
+ <reg32 offset="0x2115" name="RB_Z_CLAMP_MAX"/>
+
+ <!-- PC registers -->
+ <reg32 offset="0x21e1" name="VGT_BIN_BASE">
+ <doc>
+ seems to be where firmware writes BIN_DATA_ADDR from
+ CP_SET_BIN_DATA packet.. probably should be called
+ PC_BIN_BASE (just using name from yamato for now)
+ </doc>
+ </reg32>
+ <reg32 offset="0x21e2" name="VGT_BIN_SIZE">
+ <doc>probably should be PC_BIN_SIZE</doc>
+ </reg32>
+ <reg32 offset="0x21e4" name="PC_VSTREAM_CONTROL">
+ <doc>SIZE is current pipe width * height (in tiles)</doc>
+ <bitfield name="SIZE" low="16" high="21" type="uint"/>
+ <doc>
+ N is some sort of slot # between 0..(SIZE-1). In case
+ multiple tiles use same pipe, each tile gets unique slot #
+ </doc>
+ <bitfield name="N" low="22" high="26" type="uint"/>
+ </reg32>
+ <reg32 offset="0x21ea" name="PC_VERTEX_REUSE_BLOCK_CNTL"/>
+ <reg32 offset="0x21ec" name="PC_PRIM_VTX_CNTL">
+ <doc>
+ STRIDE_IN_VPC: ALIGN(next_outloc - 8, 4) / 4
+ (but, in cases where you'd expect 1, the blob driver uses
+ 2, so possibly 0 (no varying) or minimum of 2)
+ </doc>
+ <bitfield name="STRIDE_IN_VPC" low="0" high="4" type="uint"/>
+ <bitfield name="POLYMODE_FRONT_PTYPE" low="5" high="7" type="adreno_pa_su_sc_draw"/>
+ <bitfield name="POLYMODE_BACK_PTYPE" low="8" high="10" type="adreno_pa_su_sc_draw"/>
+ <bitfield name="POLYMODE_ENABLE" pos="12" type="boolean"/>
+ <bitfield name="PRIMITIVE_RESTART" pos="20" type="boolean"/>
+ <bitfield name="PROVOKING_VTX_LAST" pos="25" type="boolean"/>
+ <!-- PSIZE bit set if gl_PointSize written: -->
+ <bitfield name="PSIZE" pos="26" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x21ed" name="PC_RESTART_INDEX"/>
+
+ <!-- HLSQ registers -->
+ <bitset name="a3xx_hlsq_vs_fs_control_reg" inline="yes">
+ <bitfield name="CONSTLENGTH" low="0" high="9" type="uint"/>
+ <bitfield name="CONSTSTARTOFFSET" low="12" high="20" type="uint"/>
+ <bitfield name="INSTRLENGTH" low="24" high="31" type="uint"/>
+ </bitset>
+ <bitset name="a3xx_hlsq_const_vs_fs_presv_range_reg" inline="yes">
+ <!-- are these a3xx_regid?? -->
+ <bitfield name="STARTENTRY" low="0" high="8"/>
+ <bitfield name="ENDENTRY" low="16" high="24"/>
+ </bitset>
+
+ <reg32 offset="0x2200" name="HLSQ_CONTROL_0_REG">
+ <bitfield name="FSTHREADSIZE" low="4" high="5" type="a3xx_threadsize"/>
+ <bitfield name="FSSUPERTHREADENABLE" pos="6" type="boolean"/>
+ <bitfield name="COMPUTEMODE" pos="8" type="boolean"/>
+ <bitfield name="SPSHADERRESTART" pos="9" type="boolean"/>
+ <bitfield name="RESERVED2" pos="10" type="boolean"/>
+ <bitfield name="CYCLETIMEOUTLIMITVPC" low="12" high="23" type="uint"/>
+ <bitfield name="FSONLYTEX" pos="25" type="boolean"/>
+ <bitfield name="CHUNKDISABLE" pos="26" type="boolean"/>
+ <bitfield name="CONSTMODE" pos="27" type="uint"/>
+ <bitfield name="LAZYUPDATEDISABLE" pos="28" type="boolean"/>
+ <bitfield name="SPCONSTFULLUPDATE" pos="29" type="boolean"/>
+ <bitfield name="TPFULLUPDATE" pos="30" type="boolean"/>
+ <bitfield name="SINGLECONTEXT" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x2201" name="HLSQ_CONTROL_1_REG">
+ <bitfield name="VSTHREADSIZE" low="6" high="7" type="a3xx_threadsize"/>
+ <bitfield name="VSSUPERTHREADENABLE" pos="8" type="boolean"/>
+ <bitfield name="FRAGCOORDXYREGID" low="16" high="23" type="a3xx_regid"/>
+ <bitfield name="FRAGCOORDZWREGID" low="24" high="31" type="a3xx_regid"/>
+ </reg32>
+ <reg32 offset="0x2202" name="HLSQ_CONTROL_2_REG">
+ <bitfield name="FACENESSREGID" low="2" high="9" type="a3xx_regid"/>
+ <bitfield name="COVVALUEREGID" low="18" high="25" type="a3xx_regid"/>
+ <bitfield name="PRIMALLOCTHRESHOLD" low="26" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x2203" name="HLSQ_CONTROL_3_REG">
+ <bitfield name="IJPERSPCENTERREGID" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="IJNONPERSPCENTERREGID" low="8" high="15" type="a3xx_regid"/>
+ <bitfield name="IJPERSPCENTROIDREGID" low="16" high="23" type="a3xx_regid"/>
+ <bitfield name="IJNONPERSPCENTROIDREGID" low="24" high="31" type="a3xx_regid"/>
+ </reg32>
+ <reg32 offset="0x2204" name="HLSQ_VS_CONTROL_REG" type="a3xx_hlsq_vs_fs_control_reg"/>
+ <reg32 offset="0x2205" name="HLSQ_FS_CONTROL_REG" type="a3xx_hlsq_vs_fs_control_reg"/>
+ <reg32 offset="0x2206" name="HLSQ_CONST_VSPRESV_RANGE_REG" type="a3xx_hlsq_const_vs_fs_presv_range_reg"/>
+ <reg32 offset="0x2207" name="HLSQ_CONST_FSPRESV_RANGE_REG" type="a3xx_hlsq_const_vs_fs_presv_range_reg"/>
+ <reg32 offset="0x220a" name="HLSQ_CL_NDRANGE_0_REG">
+ <bitfield name="WORKDIM" low="0" high="1" type="uint"/>
+ <bitfield name="LOCALSIZE0" low="2" high="11" type="uint"/>
+ <bitfield name="LOCALSIZE1" low="12" high="21" type="uint"/>
+ <bitfield name="LOCALSIZE2" low="22" high="31" type="uint"/>
+ </reg32>
+ <array offset="0x220b" name="HLSQ_CL_GLOBAL_WORK" stride="2" length="3">
+ <doc>indexed by dimension</doc>
+ <reg32 offset="0" name="SIZE" type="uint"/>
+ <reg32 offset="1" name="OFFSET" type="uint"/>
+ </array>
+ <reg32 offset="0x2211" name="HLSQ_CL_CONTROL_0_REG"/>
+ <reg32 offset="0x2212" name="HLSQ_CL_CONTROL_1_REG"/>
+ <reg32 offset="0x2214" name="HLSQ_CL_KERNEL_CONST_REG"/>
+ <array offset="0x2215" name="HLSQ_CL_KERNEL_GROUP" stride="1" length="3">
+ <doc>indexed by dimension, global_size / local_size</doc>
+ <reg32 offset="0" name="RATIO" type="uint"/>
+ </array>
+ <reg32 offset="0x2216" name="HLSQ_CL_KERNEL_GROUP_Y_REG" type="uint"/>
+ <reg32 offset="0x2217" name="HLSQ_CL_KERNEL_GROUP_Z_REG" type="uint"/>
+ <reg32 offset="0x221a" name="HLSQ_CL_WG_OFFSET_REG"/>
+
+ <!-- VFD registers -->
+ <reg32 offset="0x2240" name="VFD_CONTROL_0">
+ <doc>
+ TOTALATTRTOVS is # of attributes to vertex shader, in register
+ slots (ie. vec4+vec3 -> 7)
+ </doc>
+ <bitfield name="TOTALATTRTOVS" low="0" high="17" type="uint"/>
+ <bitfield name="PACKETSIZE" low="18" high="21" type="uint"/>
+ <doc>STRMDECINSTRCNT is # of VFD_DECODE_INSTR registers valid</doc>
+ <bitfield name="STRMDECINSTRCNT" low="22" high="26" type="uint"/>
+ <doc>STRMFETCHINSTRCNT is # of VFD_FETCH_INSTR registers valid</doc>
+ <bitfield name="STRMFETCHINSTRCNT" low="27" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x2241" name="VFD_CONTROL_1">
+ <doc>MAXSTORAGE could be # of attributes/vbo's</doc>
+ <bitfield name="MAXSTORAGE" low="0" high="3" type="uint"/>
+ <bitfield name="MAXTHRESHOLD" low="4" high="7" type="uint"/>
+ <bitfield name="MINTHRESHOLD" low="8" high="11" type="uint"/>
+ <bitfield name="REGID4VTX" low="16" high="23" type="a3xx_regid"/>
+ <bitfield name="REGID4INST" low="24" high="31" type="a3xx_regid"/>
+ </reg32>
+ <reg32 offset="0x2242" name="VFD_INDEX_MIN" type="uint"/>
+ <reg32 offset="0x2243" name="VFD_INDEX_MAX" type="uint"/>
+ <reg32 offset="0x2244" name="VFD_INSTANCEID_OFFSET" type="uint"/>
+ <reg32 offset="0x2245" name="VFD_INDEX_OFFSET" type="uint"/>
+ <array offset="0x2246" name="VFD_FETCH" stride="2" length="16">
+ <reg32 offset="0x0" name="INSTR_0">
+ <bitfield name="FETCHSIZE" low="0" high="6" type="uint"/>
+ <bitfield name="BUFSTRIDE" low="7" high="15" type="uint"/>
+ <bitfield name="INSTANCED" pos="16" type="boolean"/>
+ <bitfield name="SWITCHNEXT" pos="17" type="boolean"/>
+ <bitfield name="INDEXCODE" low="18" high="23" type="uint"/>
+ <bitfield name="STEPRATE" low="24" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x1" name="INSTR_1"/>
+ </array>
+ <array offset="0x2266" name="VFD_DECODE" stride="1" length="16">
+ <reg32 offset="0x0" name="INSTR">
+ <bitfield name="WRITEMASK" low="0" high="3" type="hex"/>
+ <!-- not sure if this is a bit flag and another flag above it, or?? -->
+ <bitfield name="CONSTFILL" pos="4" type="boolean"/>
+ <bitfield name="FORMAT" low="6" high="11" type="a3xx_vtx_fmt"/>
+ <bitfield name="REGID" low="12" high="19" type="a3xx_regid"/>
+ <bitfield name="INT" pos="20" type="boolean"/>
+ <doc>SHIFTCNT appears to be size, ie. FLOAT_32_32_32 is 12, and BYTE_8 is 1</doc>
+ <bitfield name="SWAP" low="22" high="23" type="a3xx_color_swap"/>
+ <bitfield name="SHIFTCNT" low="24" high="28" type="uint"/>
+ <bitfield name="LASTCOMPVALID" pos="29" type="boolean"/>
+ <bitfield name="SWITCHNEXT" pos="30" type="boolean"/>
+ </reg32>
+ </array>
+ <reg32 offset="0x227e" name="VFD_VS_THREADING_THRESHOLD">
+ <bitfield name="REGID_THRESHOLD" low="0" high="3" type="uint"/>
+ <!-- <bitfield name="RESERVED6" low="4" high="7" type="uint"/> -->
+ <bitfield name="REGID_VTXCNT" low="8" high="15" type="a3xx_regid"/>
+ </reg32>
+
+ <!-- VPC registers -->
+ <reg32 offset="0x2280" name="VPC_ATTR">
+ <bitfield name="TOTALATTR" low="0" high="8" type="uint"/>
+ <!-- PSIZE bit set if gl_PointSize written: -->
+ <bitfield name="PSIZE" pos="9" type="boolean"/>
+ <bitfield name="THRDASSIGN" low="12" high="27" type="uint"/>
+ <bitfield name="LMSIZE" low="28" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x2281" name="VPC_PACK">
+ <!-- these are always seem to be set to same as TOTALATTR -->
+ <bitfield name="NUMFPNONPOSVAR" low="8" high="15" type="uint"/>
+ <bitfield name="NUMNONPOSVSVAR" low="16" high="23" type="uint"/>
+ </reg32>
+ <!--
+ varying interpolate mode. One field per scalar/component
+ (since varying slots are scalar, so things don't have to
+ be aligned to vec4).
+ 4 regs * 16 scalar components each => 16 vec4
+ -->
+ <array offset="0x2282" name="VPC_VARYING_INTERP" stride="1" length="4">
+ <reg32 offset="0x0" name="MODE">
+ <bitfield name="C0" low="0" high="1" type="a3xx_intp_mode"/>
+ <bitfield name="C1" low="2" high="3" type="a3xx_intp_mode"/>
+ <bitfield name="C2" low="4" high="5" type="a3xx_intp_mode"/>
+ <bitfield name="C3" low="6" high="7" type="a3xx_intp_mode"/>
+ <bitfield name="C4" low="8" high="9" type="a3xx_intp_mode"/>
+ <bitfield name="C5" low="10" high="11" type="a3xx_intp_mode"/>
+ <bitfield name="C6" low="12" high="13" type="a3xx_intp_mode"/>
+ <bitfield name="C7" low="14" high="15" type="a3xx_intp_mode"/>
+ <bitfield name="C8" low="16" high="17" type="a3xx_intp_mode"/>
+ <bitfield name="C9" low="18" high="19" type="a3xx_intp_mode"/>
+ <bitfield name="CA" low="20" high="21" type="a3xx_intp_mode"/>
+ <bitfield name="CB" low="22" high="23" type="a3xx_intp_mode"/>
+ <bitfield name="CC" low="24" high="25" type="a3xx_intp_mode"/>
+ <bitfield name="CD" low="26" high="27" type="a3xx_intp_mode"/>
+ <bitfield name="CE" low="28" high="29" type="a3xx_intp_mode"/>
+ <bitfield name="CF" low="30" high="31" type="a3xx_intp_mode"/>
+ </reg32>
+ </array>
+ <array offset="0x2286" name="VPC_VARYING_PS_REPL" stride="1" length="4">
+ <reg32 offset="0x0" name="MODE">
+ <bitfield name="C0" low="0" high="1" type="a3xx_repl_mode"/>
+ <bitfield name="C1" low="2" high="3" type="a3xx_repl_mode"/>
+ <bitfield name="C2" low="4" high="5" type="a3xx_repl_mode"/>
+ <bitfield name="C3" low="6" high="7" type="a3xx_repl_mode"/>
+ <bitfield name="C4" low="8" high="9" type="a3xx_repl_mode"/>
+ <bitfield name="C5" low="10" high="11" type="a3xx_repl_mode"/>
+ <bitfield name="C6" low="12" high="13" type="a3xx_repl_mode"/>
+ <bitfield name="C7" low="14" high="15" type="a3xx_repl_mode"/>
+ <bitfield name="C8" low="16" high="17" type="a3xx_repl_mode"/>
+ <bitfield name="C9" low="18" high="19" type="a3xx_repl_mode"/>
+ <bitfield name="CA" low="20" high="21" type="a3xx_repl_mode"/>
+ <bitfield name="CB" low="22" high="23" type="a3xx_repl_mode"/>
+ <bitfield name="CC" low="24" high="25" type="a3xx_repl_mode"/>
+ <bitfield name="CD" low="26" high="27" type="a3xx_repl_mode"/>
+ <bitfield name="CE" low="28" high="29" type="a3xx_repl_mode"/>
+ <bitfield name="CF" low="30" high="31" type="a3xx_repl_mode"/>
+ </reg32>
+ </array>
+ <reg32 offset="0x228a" name="VPC_VARY_CYLWRAP_ENABLE_0"/>
+ <reg32 offset="0x228b" name="VPC_VARY_CYLWRAP_ENABLE_1"/>
+
+ <!-- SP registers -->
+ <bitset name="a3xx_vs_fs_length_reg" inline="yes">
+ <bitfield name="SHADERLENGTH" low="0" high="31" type="uint"/>
+ </bitset>
+
+ <bitset name="sp_vs_fs_obj_offset_reg" inline="yes">
+ <bitfield name="FIRSTEXECINSTROFFSET" low="0" high="15" type="uint"/>
+ <doc>
+ From register spec:
+ SP_FS_OBJ_OFFSET_REG.CONSTOBJECTSTARTOFFSET [16:24]: Constant object
+ start offset in on chip RAM,
+ 128bit aligned
+ </doc>
+ <bitfield name="CONSTOBJECTOFFSET" low="16" high="24" type="uint"/>
+ <bitfield name="SHADEROBJOFFSET" low="25" high="31" type="uint"/>
+ </bitset>
+
+ <reg32 offset="0x22c0" name="SP_SP_CTRL_REG">
+ <!-- this bit is set during resolve pass: -->
+ <bitfield name="RESOLVE" pos="16" type="boolean"/>
+ <bitfield name="CONSTMODE" pos="18" type="uint"/>
+ <bitfield name="BINNING" pos="19" type="boolean"/>
+ <bitfield name="SLEEPMODE" low="20" high="21" type="uint"/>
+ <!-- L0MODE==1 when oxiliForceSpL0ModeBuffer=1 -->
+ <bitfield name="L0MODE" low="22" high="23" type="uint"/>
+ </reg32>
+ <reg32 offset="0x22c4" name="SP_VS_CTRL_REG0">
+ <bitfield name="THREADMODE" pos="0" type="a3xx_threadmode"/>
+ <bitfield name="INSTRBUFFERMODE" pos="1" type="a3xx_instrbuffermode"/>
+ <!-- maybe CACHEINVALID is two bits?? -->
+ <bitfield name="CACHEINVALID" pos="2" type="boolean"/>
+ <bitfield name="ALUSCHMODE" pos="3" type="boolean"/>
+ <doc>
+ The full/half register footprint is in units of four components,
+ so if r0.x is used, that counts as all of r0.[xyzw] as used.
+ There are separate full/half register footprint values as the
+ full and half registers are independent (not overlapping).
+ Presumably the thread scheduler hardware allocates the full/half
+ register names from the actual physical register file and
+ handles the register renaming.
+ </doc>
+ <bitfield name="HALFREGFOOTPRINT" low="4" high="9" type="uint"/>
+ <bitfield name="FULLREGFOOTPRINT" low="10" high="15" type="uint"/>
+ <bitfield name="THREADSIZE" pos="20" type="a3xx_threadsize"/>
+ <bitfield name="SUPERTHREADMODE" pos="21" type="boolean"/>
+ <doc>
+ From regspec:
+ SP_FS_CTRL_REG0.FS_LENGTH [31:24]: FS length, unit = 256bits.
+ If bit31 is 1, it means overflow
+ or any long shader.
+ </doc>
+ <bitfield name="LENGTH" low="24" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x22c5" name="SP_VS_CTRL_REG1">
+ <bitfield name="CONSTLENGTH" low="0" high="9" type="uint"/>
+ <!--
+ not sure about full vs half const.. I can't get blob generate
+ something with a mediump/lowp uniform.
+ -->
+ <bitfield name="CONSTFOOTPRINT" low="10" high="19" type="uint"/>
+ <bitfield name="INITIALOUTSTANDING" low="24" high="30" type="uint"/>
+ </reg32>
+ <reg32 offset="0x22c6" name="SP_VS_PARAM_REG">
+ <bitfield name="POSREGID" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="PSIZEREGID" low="8" high="15" type="a3xx_regid"/>
+ <bitfield name="POS2DMODE" pos="16" type="boolean"/>
+ <bitfield name="TOTALVSOUTVAR" low="20" high="24" type="uint"/>
+ </reg32>
+ <array offset="0x22c7" name="SP_VS_OUT" stride="1" length="8">
+ <reg32 offset="0x0" name="REG">
+ <bitfield name="A_REGID" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="A_HALF" pos="8" type="boolean"/>
+ <bitfield name="A_COMPMASK" low="9" high="12" type="hex"/>
+ <bitfield name="B_REGID" low="16" high="23" type="a3xx_regid"/>
+ <bitfield name="B_HALF" pos="24" type="boolean"/>
+ <bitfield name="B_COMPMASK" low="25" high="28" type="hex"/>
+ </reg32>
+ </array>
+ <array offset="0x22d0" name="SP_VS_VPC_DST" stride="1" length="4">
+ <reg32 offset="0x0" name="REG">
+ <doc>
+ These seem to be offsets for storage of the varyings.
+ Always seems to start from 8, possibly loc 0 and 4
+ are for gl_Position and gl_PointSize?
+ </doc>
+ <bitfield name="OUTLOC0" low="0" high="6" type="uint"/>
+ <bitfield name="OUTLOC1" low="8" high="14" type="uint"/>
+ <bitfield name="OUTLOC2" low="16" high="22" type="uint"/>
+ <bitfield name="OUTLOC3" low="24" high="30" type="uint"/>
+ </reg32>
+ </array>
+ <reg32 offset="0x22d4" name="SP_VS_OBJ_OFFSET_REG" type="sp_vs_fs_obj_offset_reg"/>
+ <doc>
+ SP_VS_OBJ_START_REG contains pointer to the vertex shader program,
+ immediately followed by the binning shader program (although I
+ guess that is probably just re-using the same gpu buffer)
+ </doc>
+ <reg32 offset="0x22d5" name="SP_VS_OBJ_START_REG"/>
+ <reg32 offset="0x22d6" name="SP_VS_PVT_MEM_PARAM_REG">
+ <bitfield name="MEMSIZEPERITEM" low="0" high="7" shr="7">
+ <doc>The size of memory that ldp/stp can address, in 128 byte increments.</doc>
+ </bitfield>
+ <bitfield name="HWSTACKOFFSET" low="8" high="23" type="uint"/>
+ <bitfield name="HWSTACKSIZEPERTHREAD" low="24" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x22d7" name="SP_VS_PVT_MEM_ADDR_REG">
+ <bitfield name="BURSTLEN" low="0" high="4"/>
+ <bitfield name="SHADERSTARTADDRESS" shr="5" low="5" high="31"/>
+ </reg32>
+ <reg32 offset="0x22d8" name="SP_VS_PVT_MEM_SIZE_REG"/>
+ <reg32 offset="0x22df" name="SP_VS_LENGTH_REG" type="a3xx_vs_fs_length_reg"/>
+ <reg32 offset="0x22e0" name="SP_FS_CTRL_REG0">
+ <bitfield name="THREADMODE" pos="0" type="a3xx_threadmode"/>
+ <bitfield name="INSTRBUFFERMODE" pos="1" type="a3xx_instrbuffermode"/>
+ <!-- maybe CACHEINVALID is two bits?? -->
+ <bitfield name="CACHEINVALID" pos="2" type="boolean"/>
+ <bitfield name="ALUSCHMODE" pos="3" type="boolean"/>
+ <doc>
+ The full/half register footprint is in units of four components,
+ so if r0.x is used, that counts as all of r0.[xyzw] as used.
+ There are separate full/half register footprint values as the
+ full and half registers are independent (not overlapping).
+ Presumably the thread scheduler hardware allocates the full/half
+ register names from the actual physical register file and
+ handles the register renaming.
+ </doc>
+ <bitfield name="HALFREGFOOTPRINT" low="4" high="9" type="uint"/>
+ <bitfield name="FULLREGFOOTPRINT" low="10" high="15" type="uint"/>
+ <bitfield name="FSBYPASSENABLE" pos="17" type="boolean"/>
+ <bitfield name="INOUTREGOVERLAP" pos="18" type="boolean"/>
+ <bitfield name="OUTORDERED" pos="19" type="boolean"/>
+ <bitfield name="THREADSIZE" pos="20" type="a3xx_threadsize"/>
+ <bitfield name="SUPERTHREADMODE" pos="21" type="boolean"/>
+ <bitfield name="PIXLODENABLE" pos="22" type="boolean"/>
+ <bitfield name="COMPUTEMODE" pos="23" type="boolean"/>
+ <doc>
+ From regspec:
+ SP_FS_CTRL_REG0.FS_LENGTH [31:24]: FS length, unit = 256bits.
+ If bit31 is 1, it means overflow
+ or any long shader.
+ </doc>
+ <bitfield name="LENGTH" low="24" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x22e1" name="SP_FS_CTRL_REG1">
+ <bitfield name="CONSTLENGTH" low="0" high="9" type="uint"/>
+ <bitfield name="CONSTFOOTPRINT" low="10" high="19" type="uint"/>
+ <bitfield name="INITIALOUTSTANDING" low="20" high="23" type="uint"/>
+ <bitfield name="HALFPRECVAROFFSET" low="24" high="30" type="uint"/>
+ </reg32>
+ <reg32 offset="0x22e2" name="SP_FS_OBJ_OFFSET_REG" type="sp_vs_fs_obj_offset_reg"/>
+ <doc>SP_FS_OBJ_START_REG contains pointer to fragment shader program</doc>
+ <reg32 offset="0x22e3" name="SP_FS_OBJ_START_REG"/>
+ <reg32 offset="0x22e4" name="SP_FS_PVT_MEM_PARAM_REG">
+ <bitfield name="MEMSIZEPERITEM" low="0" high="7" type="uint"/>
+ <bitfield name="HWSTACKOFFSET" low="8" high="23" type="uint"/>
+ <bitfield name="HWSTACKSIZEPERTHREAD" low="24" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x22e5" name="SP_FS_PVT_MEM_ADDR_REG">
+ <bitfield name="BURSTLEN" low="0" high="4"/>
+ <bitfield name="SHADERSTARTADDRESS" shr="5" low="5" high="31"/>
+ </reg32>
+ <reg32 offset="0x22e6" name="SP_FS_PVT_MEM_SIZE_REG"/>
+ <reg32 offset="0x22e8" name="SP_FS_FLAT_SHAD_MODE_REG_0">
+ <doc>seems to be one bit per scalar, '1' for flat, '0' for smooth</doc>
+ </reg32>
+ <reg32 offset="0x22e9" name="SP_FS_FLAT_SHAD_MODE_REG_1">
+ <doc>seems to be one bit per scalar, '1' for flat, '0' for smooth</doc>
+ </reg32>
+ <reg32 offset="0x22ec" name="SP_FS_OUTPUT_REG">
+ <bitfield name="MRT" low="0" high="1" type="uint">
+ <doc>render targets - 1</doc>
+ </bitfield>
+ <bitfield name="DEPTH_ENABLE" pos="7" type="boolean"/>
+ <bitfield name="DEPTH_REGID" low="8" high="15" type="a3xx_regid"/>
+ </reg32>
+ <array offset="0x22f0" name="SP_FS_MRT" stride="1" length="4">
+ <reg32 offset="0x0" name="REG">
+ <bitfield name="REGID" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="HALF_PRECISION" pos="8" type="boolean"/>
+ <bitfield name="SINT" pos="10" type="boolean"/>
+ <bitfield name="UINT" pos="11" type="boolean"/>
+ </reg32>
+ </array>
+ <array offset="0x22f4" name="SP_FS_IMAGE_OUTPUT" stride="1" length="4">
+ <reg32 offset="0x0" name="REG">
+ <bitfield name="MRTFORMAT" low="0" high="5" type="a3xx_color_fmt"/>
+ </reg32>
+ </array>
+ <reg32 offset="0x22ff" name="SP_FS_LENGTH_REG" type="a3xx_vs_fs_length_reg"/>
+
+ <reg32 offset="0x2301" name="PA_SC_AA_CONFIG"/>
+ <!-- TPL1 registers -->
+ <!-- assume VS/FS_TEX_OFFSET is same -->
+ <bitset name="a3xx_tpl1_tp_vs_fs_tex_offset" inline="yes">
+ <bitfield name="SAMPLEROFFSET" low="0" high="7" type="uint"/>
+ <bitfield name="MEMOBJOFFSET" low="8" high="15" type="uint"/>
+ <!-- not sure the size of this: -->
+ <bitfield name="BASETABLEPTR" low="16" high="31" type="uint"/>
+ </bitset>
+ <reg32 offset="0x2340" name="TPL1_TP_VS_TEX_OFFSET" type="a3xx_tpl1_tp_vs_fs_tex_offset"/>
+ <reg32 offset="0x2341" name="TPL1_TP_VS_BORDER_COLOR_BASE_ADDR"/>
+ <reg32 offset="0x2342" name="TPL1_TP_FS_TEX_OFFSET" type="a3xx_tpl1_tp_vs_fs_tex_offset"/>
+ <reg32 offset="0x2343" name="TPL1_TP_FS_BORDER_COLOR_BASE_ADDR"/>
+
+ <!-- VBIF registers -->
+ <reg32 offset="0x3001" name="VBIF_CLKON"/>
+ <reg32 offset="0x300c" name="VBIF_FIXED_SORT_EN"/>
+ <reg32 offset="0x300d" name="VBIF_FIXED_SORT_SEL0"/>
+ <reg32 offset="0x300e" name="VBIF_FIXED_SORT_SEL1"/>
+ <reg32 offset="0x301c" name="VBIF_ABIT_SORT"/>
+ <reg32 offset="0x301d" name="VBIF_ABIT_SORT_CONF"/>
+ <reg32 offset="0x302a" name="VBIF_GATE_OFF_WRREQ_EN"/>
+ <reg32 offset="0x302c" name="VBIF_IN_RD_LIM_CONF0"/>
+ <reg32 offset="0x302d" name="VBIF_IN_RD_LIM_CONF1"/>
+ <reg32 offset="0x3030" name="VBIF_IN_WR_LIM_CONF0"/>
+ <reg32 offset="0x3031" name="VBIF_IN_WR_LIM_CONF1"/>
+ <reg32 offset="0x3034" name="VBIF_OUT_RD_LIM_CONF0"/>
+ <reg32 offset="0x3035" name="VBIF_OUT_WR_LIM_CONF0"/>
+ <reg32 offset="0x3036" name="VBIF_DDR_OUT_MAX_BURST"/>
+ <reg32 offset="0x303c" name="VBIF_ARB_CTL"/>
+ <reg32 offset="0x3049" name="VBIF_ROUND_ROBIN_QOS_ARB"/>
+ <reg32 offset="0x3058" name="VBIF_OUT_AXI_AMEMTYPE_CONF0"/>
+ <reg32 offset="0x305e" name="VBIF_OUT_AXI_AOOO_EN"/>
+ <reg32 offset="0x305f" name="VBIF_OUT_AXI_AOOO"/>
+
+ <bitset name="a3xx_vbif_perf_cnt" inline="yes">
+ <bitfield name="CNT0" pos="0" type="boolean"/>
+ <bitfield name="CNT1" pos="1" type="boolean"/>
+ <bitfield name="PWRCNT0" pos="2" type="boolean"/>
+ <bitfield name="PWRCNT1" pos="3" type="boolean"/>
+ <bitfield name="PWRCNT2" pos="4" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x3070" name="VBIF_PERF_CNT_EN" type="a3xx_vbif_perf_cnt"/>
+ <reg32 offset="0x3071" name="VBIF_PERF_CNT_CLR" type="a3xx_vbif_perf_cnt"/>
+ <reg32 offset="0x3072" name="VBIF_PERF_CNT_SEL"/>
+ <reg32 offset="0x3073" name="VBIF_PERF_CNT0_LO"/>
+ <reg32 offset="0x3074" name="VBIF_PERF_CNT0_HI"/>
+ <reg32 offset="0x3075" name="VBIF_PERF_CNT1_LO"/>
+ <reg32 offset="0x3076" name="VBIF_PERF_CNT1_HI"/>
+ <reg32 offset="0x3077" name="VBIF_PERF_PWR_CNT0_LO"/>
+ <reg32 offset="0x3078" name="VBIF_PERF_PWR_CNT0_HI"/>
+ <reg32 offset="0x3079" name="VBIF_PERF_PWR_CNT1_LO"/>
+ <reg32 offset="0x307a" name="VBIF_PERF_PWR_CNT1_HI"/>
+ <reg32 offset="0x307b" name="VBIF_PERF_PWR_CNT2_LO"/>
+ <reg32 offset="0x307c" name="VBIF_PERF_PWR_CNT2_HI"/>
+
+
+ <reg32 offset="0x0c01" name="VSC_BIN_SIZE">
+ <bitfield name="WIDTH" low="0" high="4" shr="5" type="uint"/>
+ <bitfield name="HEIGHT" low="5" high="9" shr="5" type="uint"/>
+ </reg32>
+
+ <reg32 offset="0x0c02" name="VSC_SIZE_ADDRESS"/>
+ <array offset="0x0c06" name="VSC_PIPE" stride="3" length="8">
+ <reg32 offset="0x0" name="CONFIG">
+ <doc>
+ Configures the mapping between VSC_PIPE buffer and
+ bin, X/Y specify the bin index in the horiz/vert
+ direction (0,0 is upper left, 0,1 is leftmost bin
+ on second row, and so on). W/H specify the number
+ of bins assigned to this VSC_PIPE in the horiz/vert
+ dimension.
+ </doc>
+ <bitfield name="X" low="0" high="9" type="uint"/>
+ <bitfield name="Y" low="10" high="19" type="uint"/>
+ <bitfield name="W" low="20" high="23" type="uint"/>
+ <bitfield name="H" low="24" high="27" type="uint"/>
+ </reg32>
+ <reg32 offset="0x1" name="DATA_ADDRESS"/>
+ <reg32 offset="0x2" name="DATA_LENGTH"/>
+ </array>
+ <reg32 offset="0x0c3c" name="VSC_BIN_CONTROL">
+ <doc>seems to be set to 0x00000001 during binning pass</doc>
+ <bitfield name="BINNING_ENABLE" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0c3d" name="UNKNOWN_0C3D">
+ <doc>seems to be always set to 0x00000001</doc>
+ </reg32>
+ <reg32 offset="0x0c48" name="PC_PERFCOUNTER0_SELECT" type="a3xx_pc_perfcounter_select"/>
+ <reg32 offset="0x0c49" name="PC_PERFCOUNTER1_SELECT" type="a3xx_pc_perfcounter_select"/>
+ <reg32 offset="0x0c4a" name="PC_PERFCOUNTER2_SELECT" type="a3xx_pc_perfcounter_select"/>
+ <reg32 offset="0x0c4b" name="PC_PERFCOUNTER3_SELECT" type="a3xx_pc_perfcounter_select"/>
+ <reg32 offset="0x0c81" name="GRAS_TSE_DEBUG_ECO">
+ <doc>seems to be always set to 0x00000001</doc>
+ </reg32>
+
+ <reg32 offset="0x0c88" name="GRAS_PERFCOUNTER0_SELECT" type="a3xx_gras_tse_perfcounter_select"/>
+ <reg32 offset="0x0c89" name="GRAS_PERFCOUNTER1_SELECT" type="a3xx_gras_tse_perfcounter_select"/>
+ <reg32 offset="0x0c8a" name="GRAS_PERFCOUNTER2_SELECT" type="a3xx_gras_ras_perfcounter_select"/>
+ <reg32 offset="0x0c8b" name="GRAS_PERFCOUNTER3_SELECT" type="a3xx_gras_ras_perfcounter_select"/>
+ <array offset="0x0ca0" name="GRAS_CL_USER_PLANE" stride="4" length="6">
+ <reg32 offset="0x0" name="X"/>
+ <reg32 offset="0x1" name="Y"/>
+ <reg32 offset="0x2" name="Z"/>
+ <reg32 offset="0x3" name="W"/>
+ </array>
+ <reg32 offset="0x0cc0" name="RB_GMEM_BASE_ADDR"/>
+ <reg32 offset="0x0cc1" name="RB_DEBUG_ECO_CONTROLS_ADDR"/>
+ <reg32 offset="0x0cc6" name="RB_PERFCOUNTER0_SELECT" type="a3xx_rb_perfcounter_select"/>
+ <reg32 offset="0x0cc7" name="RB_PERFCOUNTER1_SELECT" type="a3xx_rb_perfcounter_select"/>
+ <reg32 offset="0x0ce0" name="RB_FRAME_BUFFER_DIMENSION">
+ <bitfield name="WIDTH" low="0" high="13" type="uint"/>
+ <bitfield name="HEIGHT" low="14" high="27" type="uint"/>
+ </reg32>
+ <reg32 offset="0x0e00" name="HLSQ_PERFCOUNTER0_SELECT" type="a3xx_hlsq_perfcounter_select"/>
+ <reg32 offset="0x0e01" name="HLSQ_PERFCOUNTER1_SELECT" type="a3xx_hlsq_perfcounter_select"/>
+ <reg32 offset="0x0e02" name="HLSQ_PERFCOUNTER2_SELECT" type="a3xx_hlsq_perfcounter_select"/>
+ <reg32 offset="0x0e03" name="HLSQ_PERFCOUNTER3_SELECT" type="a3xx_hlsq_perfcounter_select"/>
+ <reg32 offset="0x0e04" name="HLSQ_PERFCOUNTER4_SELECT" type="a3xx_hlsq_perfcounter_select"/>
+ <reg32 offset="0x0e05" name="HLSQ_PERFCOUNTER5_SELECT" type="a3xx_hlsq_perfcounter_select"/>
+ <reg32 offset="0x0e43" name="UNKNOWN_0E43">
+ <doc>seems to be always set to 0x00000001</doc>
+ </reg32>
+ <reg32 offset="0x0e44" name="VFD_PERFCOUNTER0_SELECT" type="a3xx_vfd_perfcounter_select"/>
+ <reg32 offset="0x0e45" name="VFD_PERFCOUNTER1_SELECT" type="a3xx_vfd_perfcounter_select"/>
+ <reg32 offset="0x0e61" name="VPC_VPC_DEBUG_RAM_SEL"/>
+ <reg32 offset="0x0e62" name="VPC_VPC_DEBUG_RAM_READ"/>
+ <reg32 offset="0x0e64" name="VPC_PERFCOUNTER0_SELECT" type="a3xx_vpc_perfcounter_select"/>
+ <reg32 offset="0x0e65" name="VPC_PERFCOUNTER1_SELECT" type="a3xx_vpc_perfcounter_select"/>
+ <reg32 offset="0x0e82" name="UCHE_CACHE_MODE_CONTROL_REG"/>
+ <reg32 offset="0x0e84" name="UCHE_PERFCOUNTER0_SELECT" type="a3xx_uche_perfcounter_select"/>
+ <reg32 offset="0x0e85" name="UCHE_PERFCOUNTER1_SELECT" type="a3xx_uche_perfcounter_select"/>
+ <reg32 offset="0x0e86" name="UCHE_PERFCOUNTER2_SELECT" type="a3xx_uche_perfcounter_select"/>
+ <reg32 offset="0x0e87" name="UCHE_PERFCOUNTER3_SELECT" type="a3xx_uche_perfcounter_select"/>
+ <reg32 offset="0x0e88" name="UCHE_PERFCOUNTER4_SELECT" type="a3xx_uche_perfcounter_select"/>
+ <reg32 offset="0x0e89" name="UCHE_PERFCOUNTER5_SELECT" type="a3xx_uche_perfcounter_select"/>
+ <reg32 offset="0x0ea0" name="UCHE_CACHE_INVALIDATE0_REG">
+ <!-- might be shifted right by 5, assuming 32byte cache line size.. -->
+ <bitfield name="ADDR" low="0" high="27" type="hex"/>
+ </reg32>
+ <reg32 offset="0x0ea1" name="UCHE_CACHE_INVALIDATE1_REG">
+ <!-- might be shifted right by 5, assuming 32byte cache line size.. -->
+ <bitfield name="ADDR" low="0" high="27" type="hex"/>
+ <!-- I'd assume 2 bits, for FLUSH/INVALIDATE/CLEAN? -->
+ <bitfield name="OPCODE" low="28" high="29" type="a3xx_cache_opcode"/>
+ <bitfield name="ENTIRE_CACHE" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0ea6" name="UNKNOWN_0EA6"/>
+ <reg32 offset="0x0ec4" name="SP_PERFCOUNTER0_SELECT" type="a3xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ec5" name="SP_PERFCOUNTER1_SELECT" type="a3xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ec6" name="SP_PERFCOUNTER2_SELECT" type="a3xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ec7" name="SP_PERFCOUNTER3_SELECT" type="a3xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ec8" name="SP_PERFCOUNTER4_SELECT" type="a3xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ec9" name="SP_PERFCOUNTER5_SELECT" type="a3xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0eca" name="SP_PERFCOUNTER6_SELECT" type="a3xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ecb" name="SP_PERFCOUNTER7_SELECT" type="a3xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ee0" name="UNKNOWN_0EE0">
+ <doc>seems to be always set to 0x00000003</doc>
+ </reg32>
+ <reg32 offset="0x0f03" name="UNKNOWN_0F03">
+ <doc>seems to be always set to 0x00000001</doc>
+ </reg32>
+ <reg32 offset="0x0f04" name="TP_PERFCOUNTER0_SELECT" type="a3xx_tp_perfcounter_select"/>
+ <reg32 offset="0x0f05" name="TP_PERFCOUNTER1_SELECT" type="a3xx_tp_perfcounter_select"/>
+ <reg32 offset="0x0f06" name="TP_PERFCOUNTER2_SELECT" type="a3xx_tp_perfcounter_select"/>
+ <reg32 offset="0x0f07" name="TP_PERFCOUNTER3_SELECT" type="a3xx_tp_perfcounter_select"/>
+ <reg32 offset="0x0f08" name="TP_PERFCOUNTER4_SELECT" type="a3xx_tp_perfcounter_select"/>
+ <reg32 offset="0x0f09" name="TP_PERFCOUNTER5_SELECT" type="a3xx_tp_perfcounter_select"/>
+
+ <!-- this seems to be the register that CP_RUN_OPENCL writes: -->
+ <reg32 offset="0x21f0" name="VGT_CL_INITIATOR"/>
+
+ <!-- seems to be same as a2xx according to fwdump.. -->
+ <reg32 offset="0x21f9" name="VGT_EVENT_INITIATOR"/>
+ <reg32 offset="0x21fc" name="VGT_DRAW_INITIATOR" type="vgt_draw_initiator"/>
+ <reg32 offset="0x21fd" name="VGT_IMMED_DATA"/>
+</domain>
+
+<domain name="A3XX_TEX_SAMP" width="32">
+ <doc>Texture sampler dwords</doc>
+ <enum name="a3xx_tex_filter">
+ <value name="A3XX_TEX_NEAREST" value="0"/>
+ <value name="A3XX_TEX_LINEAR" value="1"/>
+ <value name="A3XX_TEX_ANISO" value="2"/>
+ </enum>
+ <enum name="a3xx_tex_clamp">
+ <value name="A3XX_TEX_REPEAT" value="0"/>
+ <value name="A3XX_TEX_CLAMP_TO_EDGE" value="1"/>
+ <value name="A3XX_TEX_MIRROR_REPEAT" value="2"/>
+ <value name="A3XX_TEX_CLAMP_TO_BORDER" value="3"/>
+ <value name="A3XX_TEX_MIRROR_CLAMP" value="4"/>
+ </enum>
+ <enum name="a3xx_tex_aniso">
+ <value name="A3XX_TEX_ANISO_1" value="0"/>
+ <value name="A3XX_TEX_ANISO_2" value="1"/>
+ <value name="A3XX_TEX_ANISO_4" value="2"/>
+ <value name="A3XX_TEX_ANISO_8" value="3"/>
+ <value name="A3XX_TEX_ANISO_16" value="4"/>
+ </enum>
+ <reg32 offset="0" name="0">
+ <bitfield name="CLAMPENABLE" pos="0" type="boolean"/>
+ <bitfield name="MIPFILTER_LINEAR" pos="1" type="boolean"/>
+ <bitfield name="XY_MAG" low="2" high="3" type="a3xx_tex_filter"/>
+ <bitfield name="XY_MIN" low="4" high="5" type="a3xx_tex_filter"/>
+ <bitfield name="WRAP_S" low="6" high="8" type="a3xx_tex_clamp"/>
+ <bitfield name="WRAP_T" low="9" high="11" type="a3xx_tex_clamp"/>
+ <bitfield name="WRAP_R" low="12" high="14" type="a3xx_tex_clamp"/>
+ <bitfield name="ANISO" low="15" high="17" type="a3xx_tex_aniso"/>
+ <bitfield name="COMPARE_FUNC" low="20" high="22" type="adreno_compare_func"/>
+ <bitfield name="CUBEMAPSEAMLESSFILTOFF" pos="24" type="boolean"/>
+ <!-- UNNORM_COORDS == CLK_NORMALIZED_COORDS_FALSE -->
+ <bitfield name="UNNORM_COORDS" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="LOD_BIAS" low="0" high="10" type="fixed" radix="6"/>
+ <bitfield name="MAX_LOD" low="12" high="21" type="ufixed" radix="6"/>
+ <bitfield name="MIN_LOD" low="22" high="31" type="ufixed" radix="6"/>
+ </reg32>
+</domain>
+
+<domain name="A3XX_TEX_CONST" width="32">
+ <doc>Texture constant dwords</doc>
+ <enum name="a3xx_tex_swiz">
+ <!-- same as a2xx? -->
+ <value name="A3XX_TEX_X" value="0"/>
+ <value name="A3XX_TEX_Y" value="1"/>
+ <value name="A3XX_TEX_Z" value="2"/>
+ <value name="A3XX_TEX_W" value="3"/>
+ <value name="A3XX_TEX_ZERO" value="4"/>
+ <value name="A3XX_TEX_ONE" value="5"/>
+ </enum>
+ <enum name="a3xx_tex_type">
+ <value name="A3XX_TEX_1D" value="0"/>
+ <value name="A3XX_TEX_2D" value="1"/>
+ <value name="A3XX_TEX_CUBE" value="2"/>
+ <value name="A3XX_TEX_3D" value="3"/>
+ </enum>
+ <enum name="a3xx_tex_msaa">
+ <value name="A3XX_TPL1_MSAA1X" value="0"/>
+ <value name="A3XX_TPL1_MSAA2X" value="1"/>
+ <value name="A3XX_TPL1_MSAA4X" value="2"/>
+ <value name="A3XX_TPL1_MSAA8X" value="3"/>
+ </enum>
+ <reg32 offset="0" name="0">
+ <bitfield name="TILE_MODE" low="0" high="1" type="a3xx_tile_mode"/>
+ <bitfield name="SRGB" pos="2" type="boolean"/>
+ <bitfield name="SWIZ_X" low="4" high="6" type="a3xx_tex_swiz"/>
+ <bitfield name="SWIZ_Y" low="7" high="9" type="a3xx_tex_swiz"/>
+ <bitfield name="SWIZ_Z" low="10" high="12" type="a3xx_tex_swiz"/>
+ <bitfield name="SWIZ_W" low="13" high="15" type="a3xx_tex_swiz"/>
+ <bitfield name="MIPLVLS" low="16" high="19" type="uint"/>
+ <bitfield name="MSAATEX" low="20" high="21" type="a3xx_tex_msaa"/>
+ <bitfield name="FMT" low="22" high="28" type="a3xx_tex_fmt"/>
+ <bitfield name="NOCONVERT" pos="29" type="boolean"/>
+ <bitfield name="TYPE" low="30" high="31" type="a3xx_tex_type"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="HEIGHT" low="0" high="13" type="uint"/>
+ <bitfield name="WIDTH" low="14" high="27" type="uint"/>
+ <!-- minimum pitch (for mipmap levels): log2(pitchalign / 16) -->
+ <bitfield name="PITCHALIGN" low="28" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <doc>INDX is index of texture address(es) in MIPMAP state block</doc>
+ <bitfield name="INDX" low="0" high="8" type="uint"/>
+ <doc>Pitch in bytes (so actually stride)</doc>
+ <bitfield name="PITCH" low="12" high="29" type="uint"/>
+ <doc>SWAP bit is set for BGRA instead of RGBA</doc>
+ <bitfield name="SWAP" low="30" high="31" type="a3xx_color_swap"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <!--
+ Update: the two LAYERSZn seem not to be the same thing.
+ According to Ilia's experimentation the first one goes up
+ to at *least* bit 14..
+ -->
+ <bitfield name="LAYERSZ1" low="0" high="16" shr="12" type="uint"/>
+ <bitfield name="DEPTH" low="17" high="27" type="uint"/>
+ <bitfield name="LAYERSZ2" low="28" high="31" shr="12" type="uint"/>
+ </reg32>
+</domain>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a4xx.xml b/drivers/gpu/drm/msm/registers/adreno/a4xx.xml
new file mode 100644
index 000000000000..69a9f9b02bc9
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/adreno/a4xx.xml
@@ -0,0 +1,2409 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="adreno/adreno_common.xml"/>
+<import file="adreno/adreno_pm4.xml"/>
+
+<enum name="a4xx_color_fmt">
+ <value name="RB4_A8_UNORM" value="0x01"/>
+ <value name="RB4_R8_UNORM" value="0x02"/>
+ <value name="RB4_R8_SNORM" value="0x03"/>
+ <value name="RB4_R8_UINT" value="0x04"/>
+ <value name="RB4_R8_SINT" value="0x05"/>
+
+ <value name="RB4_R4G4B4A4_UNORM" value="0x08"/>
+ <value name="RB4_R5G5B5A1_UNORM" value="0x0a"/>
+ <value name="RB4_R5G6B5_UNORM" value="0x0e"/>
+ <value name="RB4_R8G8_UNORM" value="0x0f"/>
+ <value name="RB4_R8G8_SNORM" value="0x10"/>
+ <value name="RB4_R8G8_UINT" value="0x11"/>
+ <value name="RB4_R8G8_SINT" value="0x12"/>
+ <value name="RB4_R16_UNORM" value="0x13"/>
+ <value name="RB4_R16_SNORM" value="0x14"/>
+ <value name="RB4_R16_FLOAT" value="0x15"/>
+ <value name="RB4_R16_UINT" value="0x16"/>
+ <value name="RB4_R16_SINT" value="0x17"/>
+
+ <value name="RB4_R8G8B8_UNORM" value="0x19"/>
+
+ <value name="RB4_R8G8B8A8_UNORM" value="0x1a"/>
+ <value name="RB4_R8G8B8A8_SNORM" value="0x1c"/>
+ <value name="RB4_R8G8B8A8_UINT" value="0x1d"/>
+ <value name="RB4_R8G8B8A8_SINT" value="0x1e"/>
+ <value name="RB4_R10G10B10A2_UNORM" value="0x1f"/>
+ <value name="RB4_R10G10B10A2_UINT" value="0x22"/>
+ <value name="RB4_R11G11B10_FLOAT" value="0x27"/>
+ <value name="RB4_R16G16_UNORM" value="0x28"/>
+ <value name="RB4_R16G16_SNORM" value="0x29"/>
+ <value name="RB4_R16G16_FLOAT" value="0x2a"/>
+ <value name="RB4_R16G16_UINT" value="0x2b"/>
+ <value name="RB4_R16G16_SINT" value="0x2c"/>
+ <value name="RB4_R32_FLOAT" value="0x2d"/>
+ <value name="RB4_R32_UINT" value="0x2e"/>
+ <value name="RB4_R32_SINT" value="0x2f"/>
+
+ <value name="RB4_R16G16B16A16_UNORM" value="0x34"/>
+ <value name="RB4_R16G16B16A16_SNORM" value="0x35"/>
+ <value name="RB4_R16G16B16A16_FLOAT" value="0x36"/>
+ <value name="RB4_R16G16B16A16_UINT" value="0x37"/>
+ <value name="RB4_R16G16B16A16_SINT" value="0x38"/>
+ <value name="RB4_R32G32_FLOAT" value="0x39"/>
+ <value name="RB4_R32G32_UINT" value="0x3a"/>
+ <value name="RB4_R32G32_SINT" value="0x3b"/>
+
+ <value name="RB4_R32G32B32A32_FLOAT" value="0x3c"/>
+ <value name="RB4_R32G32B32A32_UINT" value="0x3d"/>
+ <value name="RB4_R32G32B32A32_SINT" value="0x3e"/>
+
+ <value name="RB4_NONE" value="0xff"/>
+</enum>
+
+<enum name="a4xx_tile_mode">
+ <value name="TILE4_LINEAR" value="0"/>
+ <value name="TILE4_2" value="2"/>
+ <value name="TILE4_3" value="3"/>
+</enum>
+
+<enum name="a4xx_vtx_fmt" prefix="chipset">
+ <!-- hmm, shifted one compared to a3xx?!? -->
+ <value name="VFMT4_32_FLOAT" value="0x1"/>
+ <value name="VFMT4_32_32_FLOAT" value="0x2"/>
+ <value name="VFMT4_32_32_32_FLOAT" value="0x3"/>
+ <value name="VFMT4_32_32_32_32_FLOAT" value="0x4"/>
+
+ <value name="VFMT4_16_FLOAT" value="0x5"/>
+ <value name="VFMT4_16_16_FLOAT" value="0x6"/>
+ <value name="VFMT4_16_16_16_FLOAT" value="0x7"/>
+ <value name="VFMT4_16_16_16_16_FLOAT" value="0x8"/>
+
+ <value name="VFMT4_32_FIXED" value="0x9"/>
+ <value name="VFMT4_32_32_FIXED" value="0xa"/>
+ <value name="VFMT4_32_32_32_FIXED" value="0xb"/>
+ <value name="VFMT4_32_32_32_32_FIXED" value="0xc"/>
+
+ <value name="VFMT4_11_11_10_FLOAT" value="0xd"/>
+
+ <!-- beyond here it does not appear to be shifted -->
+ <value name="VFMT4_16_SINT" value="0x10"/>
+ <value name="VFMT4_16_16_SINT" value="0x11"/>
+ <value name="VFMT4_16_16_16_SINT" value="0x12"/>
+ <value name="VFMT4_16_16_16_16_SINT" value="0x13"/>
+ <value name="VFMT4_16_UINT" value="0x14"/>
+ <value name="VFMT4_16_16_UINT" value="0x15"/>
+ <value name="VFMT4_16_16_16_UINT" value="0x16"/>
+ <value name="VFMT4_16_16_16_16_UINT" value="0x17"/>
+ <value name="VFMT4_16_SNORM" value="0x18"/>
+ <value name="VFMT4_16_16_SNORM" value="0x19"/>
+ <value name="VFMT4_16_16_16_SNORM" value="0x1a"/>
+ <value name="VFMT4_16_16_16_16_SNORM" value="0x1b"/>
+ <value name="VFMT4_16_UNORM" value="0x1c"/>
+ <value name="VFMT4_16_16_UNORM" value="0x1d"/>
+ <value name="VFMT4_16_16_16_UNORM" value="0x1e"/>
+ <value name="VFMT4_16_16_16_16_UNORM" value="0x1f"/>
+
+ <value name="VFMT4_32_UINT" value="0x20"/>
+ <value name="VFMT4_32_32_UINT" value="0x21"/>
+ <value name="VFMT4_32_32_32_UINT" value="0x22"/>
+ <value name="VFMT4_32_32_32_32_UINT" value="0x23"/>
+ <value name="VFMT4_32_SINT" value="0x24"/>
+ <value name="VFMT4_32_32_SINT" value="0x25"/>
+ <value name="VFMT4_32_32_32_SINT" value="0x26"/>
+ <value name="VFMT4_32_32_32_32_SINT" value="0x27"/>
+
+ <value name="VFMT4_8_UINT" value="0x28"/>
+ <value name="VFMT4_8_8_UINT" value="0x29"/>
+ <value name="VFMT4_8_8_8_UINT" value="0x2a"/>
+ <value name="VFMT4_8_8_8_8_UINT" value="0x2b"/>
+ <value name="VFMT4_8_UNORM" value="0x2c"/>
+ <value name="VFMT4_8_8_UNORM" value="0x2d"/>
+ <value name="VFMT4_8_8_8_UNORM" value="0x2e"/>
+ <value name="VFMT4_8_8_8_8_UNORM" value="0x2f"/>
+ <value name="VFMT4_8_SINT" value="0x30"/>
+ <value name="VFMT4_8_8_SINT" value="0x31"/>
+ <value name="VFMT4_8_8_8_SINT" value="0x32"/>
+ <value name="VFMT4_8_8_8_8_SINT" value="0x33"/>
+ <value name="VFMT4_8_SNORM" value="0x34"/>
+ <value name="VFMT4_8_8_SNORM" value="0x35"/>
+ <value name="VFMT4_8_8_8_SNORM" value="0x36"/>
+ <value name="VFMT4_8_8_8_8_SNORM" value="0x37"/>
+
+ <value name="VFMT4_10_10_10_2_UINT" value="0x38"/>
+ <value name="VFMT4_10_10_10_2_UNORM" value="0x39"/>
+ <value name="VFMT4_10_10_10_2_SINT" value="0x3a"/>
+ <value name="VFMT4_10_10_10_2_SNORM" value="0x3b"/>
+ <value name="VFMT4_2_10_10_10_UINT" value="0x3c"/>
+ <value name="VFMT4_2_10_10_10_UNORM" value="0x3d"/>
+ <value name="VFMT4_2_10_10_10_SINT" value="0x3e"/>
+ <value name="VFMT4_2_10_10_10_SNORM" value="0x3f"/>
+
+ <value name="VFMT4_NONE" value="0xff"/>
+</enum>
+
+<enum name="a4xx_tex_fmt">
+ <!-- 0x00 .. 0x02 -->
+
+ <!-- 8-bit formats -->
+ <value name="TFMT4_A8_UNORM" value="0x03"/>
+ <value name="TFMT4_8_UNORM" value="0x04"/>
+ <value name="TFMT4_8_SNORM" value="0x05"/>
+ <value name="TFMT4_8_UINT" value="0x06"/>
+ <value name="TFMT4_8_SINT" value="0x07"/>
+
+ <!-- 16-bit formats -->
+ <value name="TFMT4_4_4_4_4_UNORM" value="0x08"/>
+ <value name="TFMT4_5_5_5_1_UNORM" value="0x09"/>
+ <!-- 0x0a -->
+ <value name="TFMT4_5_6_5_UNORM" value="0x0b"/>
+
+ <!-- 0x0c -->
+
+ <value name="TFMT4_L8_A8_UNORM" value="0x0d"/>
+ <value name="TFMT4_8_8_UNORM" value="0x0e"/>
+ <value name="TFMT4_8_8_SNORM" value="0x0f"/>
+ <value name="TFMT4_8_8_UINT" value="0x10"/>
+ <value name="TFMT4_8_8_SINT" value="0x11"/>
+
+ <value name="TFMT4_16_UNORM" value="0x12"/>
+ <value name="TFMT4_16_SNORM" value="0x13"/>
+ <value name="TFMT4_16_FLOAT" value="0x14"/>
+ <value name="TFMT4_16_UINT" value="0x15"/>
+ <value name="TFMT4_16_SINT" value="0x16"/>
+
+ <!-- 0x17 .. 0x1b -->
+
+ <!-- 32-bit formats -->
+ <value name="TFMT4_8_8_8_8_UNORM" value="0x1c"/>
+ <value name="TFMT4_8_8_8_8_SNORM" value="0x1d"/>
+ <value name="TFMT4_8_8_8_8_UINT" value="0x1e"/>
+ <value name="TFMT4_8_8_8_8_SINT" value="0x1f"/>
+
+ <value name="TFMT4_9_9_9_E5_FLOAT" value="0x20"/>
+ <value name="TFMT4_10_10_10_2_UNORM" value="0x21"/>
+ <value name="TFMT4_10_10_10_2_UINT" value="0x22"/>
+ <!-- 0x23 .. 0x24 -->
+ <value name="TFMT4_11_11_10_FLOAT" value="0x25"/>
+
+ <value name="TFMT4_16_16_UNORM" value="0x26"/>
+ <value name="TFMT4_16_16_SNORM" value="0x27"/>
+ <value name="TFMT4_16_16_FLOAT" value="0x28"/>
+ <value name="TFMT4_16_16_UINT" value="0x29"/>
+ <value name="TFMT4_16_16_SINT" value="0x2a"/>
+
+ <value name="TFMT4_32_FLOAT" value="0x2b"/>
+ <value name="TFMT4_32_UINT" value="0x2c"/>
+ <value name="TFMT4_32_SINT" value="0x2d"/>
+
+ <!-- 0x2e .. 0x32 -->
+
+ <!-- 64-bit formats -->
+ <value name="TFMT4_16_16_16_16_UNORM" value="0x33"/>
+ <value name="TFMT4_16_16_16_16_SNORM" value="0x34"/>
+ <value name="TFMT4_16_16_16_16_FLOAT" value="0x35"/>
+ <value name="TFMT4_16_16_16_16_UINT" value="0x36"/>
+ <value name="TFMT4_16_16_16_16_SINT" value="0x37"/>
+
+ <value name="TFMT4_32_32_FLOAT" value="0x38"/>
+ <value name="TFMT4_32_32_UINT" value="0x39"/>
+ <value name="TFMT4_32_32_SINT" value="0x3a"/>
+
+ <!-- 96-bit formats -->
+ <value name="TFMT4_32_32_32_FLOAT" value="0x3b"/>
+ <value name="TFMT4_32_32_32_UINT" value="0x3c"/>
+ <value name="TFMT4_32_32_32_SINT" value="0x3d"/>
+
+ <!-- 0x3e -->
+
+ <!-- 128-bit formats -->
+ <value name="TFMT4_32_32_32_32_FLOAT" value="0x3f"/>
+ <value name="TFMT4_32_32_32_32_UINT" value="0x40"/>
+ <value name="TFMT4_32_32_32_32_SINT" value="0x41"/>
+
+ <!-- 0x42 .. 0x46 -->
+ <value name="TFMT4_X8Z24_UNORM" value="0x47"/>
+ <!-- 0x48 .. 0x55 -->
+
+ <!-- compressed formats -->
+ <value name="TFMT4_DXT1" value="0x56"/>
+ <value name="TFMT4_DXT3" value="0x57"/>
+ <value name="TFMT4_DXT5" value="0x58"/>
+ <!-- 0x59 -->
+ <value name="TFMT4_RGTC1_UNORM" value="0x5a"/>
+ <value name="TFMT4_RGTC1_SNORM" value="0x5b"/>
+ <!-- 0x5c .. 0x5d -->
+ <value name="TFMT4_RGTC2_UNORM" value="0x5e"/>
+ <value name="TFMT4_RGTC2_SNORM" value="0x5f"/>
+ <!-- 0x60 -->
+ <value name="TFMT4_BPTC_UFLOAT" value="0x61"/>
+ <value name="TFMT4_BPTC_FLOAT" value="0x62"/>
+ <value name="TFMT4_BPTC" value="0x63"/>
+ <value name="TFMT4_ATC_RGB" value="0x64"/>
+ <value name="TFMT4_ATC_RGBA_EXPLICIT" value="0x65"/>
+ <value name="TFMT4_ATC_RGBA_INTERPOLATED" value="0x66"/>
+ <value name="TFMT4_ETC2_RG11_UNORM" value="0x67"/>
+ <value name="TFMT4_ETC2_RG11_SNORM" value="0x68"/>
+ <value name="TFMT4_ETC2_R11_UNORM" value="0x69"/>
+ <value name="TFMT4_ETC2_R11_SNORM" value="0x6a"/>
+ <value name="TFMT4_ETC1" value="0x6b"/>
+ <value name="TFMT4_ETC2_RGB8" value="0x6c"/>
+ <value name="TFMT4_ETC2_RGBA8" value="0x6d"/>
+ <value name="TFMT4_ETC2_RGB8A1" value="0x6e"/>
+ <value name="TFMT4_ASTC_4x4" value="0x6f"/>
+ <value name="TFMT4_ASTC_5x4" value="0x70"/>
+ <value name="TFMT4_ASTC_5x5" value="0x71"/>
+ <value name="TFMT4_ASTC_6x5" value="0x72"/>
+ <value name="TFMT4_ASTC_6x6" value="0x73"/>
+ <value name="TFMT4_ASTC_8x5" value="0x74"/>
+ <value name="TFMT4_ASTC_8x6" value="0x75"/>
+ <value name="TFMT4_ASTC_8x8" value="0x76"/>
+ <value name="TFMT4_ASTC_10x5" value="0x77"/>
+ <value name="TFMT4_ASTC_10x6" value="0x78"/>
+ <value name="TFMT4_ASTC_10x8" value="0x79"/>
+ <value name="TFMT4_ASTC_10x10" value="0x7a"/>
+ <value name="TFMT4_ASTC_12x10" value="0x7b"/>
+ <value name="TFMT4_ASTC_12x12" value="0x7c"/>
+ <!-- 0x7d .. 0x7f -->
+
+ <value name="TFMT4_NONE" value="0xff"/>
+</enum>
+
+<enum name="a4xx_depth_format">
+ <value name="DEPTH4_NONE" value="0"/>
+ <value name="DEPTH4_16" value="1"/>
+ <value name="DEPTH4_24_8" value="2"/>
+ <value name="DEPTH4_32" value="3"/>
+</enum>
+
+<!--
+NOTE counters extracted from test-perf log with the following awful
+script:
+##################
+#!/bin/bash
+
+log=$1
+
+grep -F "counter
+countable
+group" $log | grep -v gl > shortlist.txt
+
+countable=""
+IFS=$'\n'; for line in $(cat shortlist.txt); do
+ # parse ######### group[$n]: $name
+ l=${line########### group}
+ if [ $l != $line ]; then
+ group=`echo $line | awk '{print $3}'`
+ echo "Group: $group"
+ continue
+ fi
+ # parse ######### counter[$n]: $name
+ l=${line########### counter}
+ if [ $l != $line ]; then
+ countable=`echo $line | awk '{print $3}'`
+ #echo " Countable: $countable"
+ continue
+ fi
+ # parse countable:
+ l=${line## countable:}
+ if [ $l != $line ]; then
+ val=`echo $line | awk '{print $2}'`
+ echo "<value value=\"$val\" name=\"$countable\"/>"
+ fi
+
+done
+##################
+ -->
+<enum name="a4xx_ccu_perfcounter_select">
+ <value value="0" name="CCU_BUSY_CYCLES"/>
+ <value value="2" name="CCU_RB_DEPTH_RETURN_STALL"/>
+ <value value="3" name="CCU_RB_COLOR_RETURN_STALL"/>
+ <value value="6" name="CCU_DEPTH_BLOCKS"/>
+ <value value="7" name="CCU_COLOR_BLOCKS"/>
+ <value value="8" name="CCU_DEPTH_BLOCK_HIT"/>
+ <value value="9" name="CCU_COLOR_BLOCK_HIT"/>
+ <value value="10" name="CCU_DEPTH_FLAG1_COUNT"/>
+ <value value="11" name="CCU_DEPTH_FLAG2_COUNT"/>
+ <value value="12" name="CCU_DEPTH_FLAG3_COUNT"/>
+ <value value="13" name="CCU_DEPTH_FLAG4_COUNT"/>
+ <value value="14" name="CCU_COLOR_FLAG1_COUNT"/>
+ <value value="15" name="CCU_COLOR_FLAG2_COUNT"/>
+ <value value="16" name="CCU_COLOR_FLAG3_COUNT"/>
+ <value value="17" name="CCU_COLOR_FLAG4_COUNT"/>
+ <value value="18" name="CCU_PARTIAL_BLOCK_READ"/>
+</enum>
+
+<!--
+NOTE other than CP_ALWAYS_COUNT (which is the only one we use so far),
+on a3xx the countable #'s from AMD_performance_monitor disagreed with
+TRM. All these #'s for a4xx come from AMD_performance_monitor, so
+perhaps they should be taken with a grain of salt
+-->
+<enum name="a4xx_cp_perfcounter_select">
+ <!-- first ctr at least seems same as a3xx, so we can measure freq -->
+ <value value="0" name="CP_ALWAYS_COUNT"/>
+ <value value="1" name="CP_BUSY"/>
+ <value value="2" name="CP_PFP_IDLE"/>
+ <value value="3" name="CP_PFP_BUSY_WORKING"/>
+ <value value="4" name="CP_PFP_STALL_CYCLES_ANY"/>
+ <value value="5" name="CP_PFP_STARVE_CYCLES_ANY"/>
+ <value value="6" name="CP_PFP_STARVED_PER_LOAD_ADDR"/>
+ <value value="7" name="CP_PFP_STALLED_PER_STORE_ADDR"/>
+ <value value="8" name="CP_PFP_PC_PROFILE"/>
+ <value value="9" name="CP_PFP_MATCH_PM4_PKT_PROFILE"/>
+ <value value="10" name="CP_PFP_COND_INDIRECT_DISCARDED"/>
+ <value value="11" name="CP_LONG_RESUMPTIONS"/>
+ <value value="12" name="CP_RESUME_CYCLES"/>
+ <value value="13" name="CP_RESUME_TO_BOUNDARY_CYCLES"/>
+ <value value="14" name="CP_LONG_PREEMPTIONS"/>
+ <value value="15" name="CP_PREEMPT_CYCLES"/>
+ <value value="16" name="CP_PREEMPT_TO_BOUNDARY_CYCLES"/>
+ <value value="17" name="CP_ME_FIFO_EMPTY_PFP_IDLE"/>
+ <value value="18" name="CP_ME_FIFO_EMPTY_PFP_BUSY"/>
+ <value value="19" name="CP_ME_FIFO_NOT_EMPTY_NOT_FULL"/>
+ <value value="20" name="CP_ME_FIFO_FULL_ME_BUSY"/>
+ <value value="21" name="CP_ME_FIFO_FULL_ME_NON_WORKING"/>
+ <value value="22" name="CP_ME_WAITING_FOR_PACKETS"/>
+ <value value="23" name="CP_ME_BUSY_WORKING"/>
+ <value value="24" name="CP_ME_STARVE_CYCLES_ANY"/>
+ <value value="25" name="CP_ME_STARVE_CYCLES_PER_PROFILE"/>
+ <value value="26" name="CP_ME_STALL_CYCLES_PER_PROFILE"/>
+ <value value="27" name="CP_ME_PC_PROFILE"/>
+ <value value="28" name="CP_RCIU_FIFO_EMPTY"/>
+ <value value="29" name="CP_RCIU_FIFO_NOT_EMPTY_NOT_FULL"/>
+ <value value="30" name="CP_RCIU_FIFO_FULL"/>
+ <value value="31" name="CP_RCIU_FIFO_FULL_NO_CONTEXT"/>
+ <value value="32" name="CP_RCIU_FIFO_FULL_AHB_MASTER"/>
+ <value value="33" name="CP_RCIU_FIFO_FULL_OTHER"/>
+ <value value="34" name="CP_AHB_IDLE"/>
+ <value value="35" name="CP_AHB_STALL_ON_GRANT_NO_SPLIT"/>
+ <value value="36" name="CP_AHB_STALL_ON_GRANT_SPLIT"/>
+ <value value="37" name="CP_AHB_STALL_ON_GRANT_SPLIT_PROFILE"/>
+ <value value="38" name="CP_AHB_BUSY_WORKING"/>
+ <value value="39" name="CP_AHB_BUSY_STALL_ON_HRDY"/>
+ <value value="40" name="CP_AHB_BUSY_STALL_ON_HRDY_PROFILE"/>
+</enum>
+
+<enum name="a4xx_gras_ras_perfcounter_select">
+ <value value="0" name="RAS_SUPER_TILES"/>
+ <value value="1" name="RAS_8X8_TILES"/>
+ <value value="2" name="RAS_4X4_TILES"/>
+ <value value="3" name="RAS_BUSY_CYCLES"/>
+ <value value="4" name="RAS_STALL_CYCLES_BY_RB"/>
+ <value value="5" name="RAS_STALL_CYCLES_BY_VSC"/>
+ <value value="6" name="RAS_STARVE_CYCLES_BY_TSE"/>
+ <value value="7" name="RAS_SUPERTILE_CYCLES"/>
+ <value value="8" name="RAS_TILE_CYCLES"/>
+ <value value="9" name="RAS_FULLY_COVERED_SUPER_TILES"/>
+ <value value="10" name="RAS_FULLY_COVERED_8X8_TILES"/>
+ <value value="11" name="RAS_4X4_PRIM"/>
+ <value value="12" name="RAS_8X4_4X8_PRIM"/>
+ <value value="13" name="RAS_8X8_PRIM"/>
+</enum>
+
+<enum name="a4xx_gras_tse_perfcounter_select">
+ <value value="0" name="TSE_INPUT_PRIM"/>
+ <value value="1" name="TSE_INPUT_NULL_PRIM"/>
+ <value value="2" name="TSE_TRIVAL_REJ_PRIM"/>
+ <value value="3" name="TSE_CLIPPED_PRIM"/>
+ <value value="4" name="TSE_NEW_PRIM"/>
+ <value value="5" name="TSE_ZERO_AREA_PRIM"/>
+ <value value="6" name="TSE_FACENESS_CULLED_PRIM"/>
+ <value value="7" name="TSE_ZERO_PIXEL_PRIM"/>
+ <value value="8" name="TSE_OUTPUT_NULL_PRIM"/>
+ <value value="9" name="TSE_OUTPUT_VISIBLE_PRIM"/>
+ <value value="10" name="TSE_PRE_CLIP_PRIM"/>
+ <value value="11" name="TSE_POST_CLIP_PRIM"/>
+ <value value="12" name="TSE_BUSY_CYCLES"/>
+ <value value="13" name="TSE_PC_STARVE"/>
+ <value value="14" name="TSE_RAS_STALL"/>
+ <value value="15" name="TSE_STALL_BARYPLANE_FIFO_FULL"/>
+ <value value="16" name="TSE_STALL_ZPLANE_FIFO_FULL"/>
+</enum>
+
+<enum name="a4xx_hlsq_perfcounter_select">
+ <value value="0" name="HLSQ_SP_VS_STAGE_CONSTANT"/>
+ <value value="1" name="HLSQ_SP_VS_STAGE_INSTRUCTIONS"/>
+ <value value="2" name="HLSQ_SP_FS_STAGE_CONSTANT"/>
+ <value value="3" name="HLSQ_SP_FS_STAGE_INSTRUCTIONS"/>
+ <value value="4" name="HLSQ_TP_STATE"/>
+ <value value="5" name="HLSQ_QUADS"/>
+ <value value="6" name="HLSQ_PIXELS"/>
+ <value value="7" name="HLSQ_VERTICES"/>
+ <value value="13" name="HLSQ_SP_VS_STAGE_DATA_BYTES"/>
+ <value value="14" name="HLSQ_SP_FS_STAGE_DATA_BYTES"/>
+ <value value="15" name="HLSQ_BUSY_CYCLES"/>
+ <value value="16" name="HLSQ_STALL_CYCLES_SP_STATE"/>
+ <value value="17" name="HLSQ_STALL_CYCLES_SP_VS_STAGE"/>
+ <value value="18" name="HLSQ_STALL_CYCLES_SP_FS_STAGE"/>
+ <value value="19" name="HLSQ_STALL_CYCLES_UCHE"/>
+ <value value="20" name="HLSQ_RBBM_LOAD_CYCLES"/>
+ <value value="21" name="HLSQ_DI_TO_VS_START_SP"/>
+ <value value="22" name="HLSQ_DI_TO_FS_START_SP"/>
+ <value value="23" name="HLSQ_VS_STAGE_START_TO_DONE_SP"/>
+ <value value="24" name="HLSQ_FS_STAGE_START_TO_DONE_SP"/>
+ <value value="25" name="HLSQ_SP_STATE_COPY_CYCLES_VS_STAGE"/>
+ <value value="26" name="HLSQ_SP_STATE_COPY_CYCLES_FS_STAGE"/>
+ <value value="27" name="HLSQ_UCHE_LATENCY_CYCLES"/>
+ <value value="28" name="HLSQ_UCHE_LATENCY_COUNT"/>
+ <value value="29" name="HLSQ_STARVE_CYCLES_VFD"/>
+</enum>
+
+<enum name="a4xx_pc_perfcounter_select">
+ <value value="0" name="PC_VIS_STREAMS_LOADED"/>
+ <value value="2" name="PC_VPC_PRIMITIVES"/>
+ <value value="3" name="PC_DEAD_PRIM"/>
+ <value value="4" name="PC_LIVE_PRIM"/>
+ <value value="5" name="PC_DEAD_DRAWCALLS"/>
+ <value value="6" name="PC_LIVE_DRAWCALLS"/>
+ <value value="7" name="PC_VERTEX_MISSES"/>
+ <value value="9" name="PC_STALL_CYCLES_VFD"/>
+ <value value="10" name="PC_STALL_CYCLES_TSE"/>
+ <value value="11" name="PC_STALL_CYCLES_UCHE"/>
+ <value value="12" name="PC_WORKING_CYCLES"/>
+ <value value="13" name="PC_IA_VERTICES"/>
+ <value value="14" name="PC_GS_PRIMITIVES"/>
+ <value value="15" name="PC_HS_INVOCATIONS"/>
+ <value value="16" name="PC_DS_INVOCATIONS"/>
+ <value value="17" name="PC_DS_PRIMITIVES"/>
+ <value value="20" name="PC_STARVE_CYCLES_FOR_INDEX"/>
+ <value value="21" name="PC_STARVE_CYCLES_FOR_TESS_FACTOR"/>
+ <value value="22" name="PC_STARVE_CYCLES_FOR_VIZ_STREAM"/>
+ <value value="23" name="PC_STALL_CYCLES_TESS"/>
+ <value value="24" name="PC_STARVE_CYCLES_FOR_POSITION"/>
+ <value value="25" name="PC_MODE0_DRAWCALL"/>
+ <value value="26" name="PC_MODE1_DRAWCALL"/>
+ <value value="27" name="PC_MODE2_DRAWCALL"/>
+ <value value="28" name="PC_MODE3_DRAWCALL"/>
+ <value value="29" name="PC_MODE4_DRAWCALL"/>
+ <value value="30" name="PC_PREDICATED_DEAD_DRAWCALL"/>
+ <value value="31" name="PC_STALL_CYCLES_BY_TSE_ONLY"/>
+ <value value="32" name="PC_STALL_CYCLES_BY_VPC_ONLY"/>
+ <value value="33" name="PC_VPC_POS_DATA_TRANSACTION"/>
+ <value value="34" name="PC_BUSY_CYCLES"/>
+ <value value="35" name="PC_STARVE_CYCLES_DI"/>
+ <value value="36" name="PC_STALL_CYCLES_VPC"/>
+ <value value="37" name="TESS_WORKING_CYCLES"/>
+ <value value="38" name="TESS_NUM_CYCLES_SETUP_WORKING"/>
+ <value value="39" name="TESS_NUM_CYCLES_PTGEN_WORKING"/>
+ <value value="40" name="TESS_NUM_CYCLES_CONNGEN_WORKING"/>
+ <value value="41" name="TESS_BUSY_CYCLES"/>
+ <value value="42" name="TESS_STARVE_CYCLES_PC"/>
+ <value value="43" name="TESS_STALL_CYCLES_PC"/>
+</enum>
+
+<enum name="a4xx_pwr_perfcounter_select">
+ <!-- NOTE not actually used.. see RBBM_RBBM_CTL.RESET_PWR_CTR0/1 -->
+ <value value="0" name="PWR_CORE_CLOCK_CYCLES"/>
+ <value value="1" name="PWR_BUSY_CLOCK_CYCLES"/>
+</enum>
+
+<enum name="a4xx_rb_perfcounter_select">
+ <value value="0" name="RB_BUSY_CYCLES"/>
+ <value value="1" name="RB_BUSY_CYCLES_BINNING"/>
+ <value value="2" name="RB_BUSY_CYCLES_RENDERING"/>
+ <value value="3" name="RB_BUSY_CYCLES_RESOLVE"/>
+ <value value="4" name="RB_STARVE_CYCLES_BY_SP"/>
+ <value value="5" name="RB_STARVE_CYCLES_BY_RAS"/>
+ <value value="6" name="RB_STARVE_CYCLES_BY_MARB"/>
+ <value value="7" name="RB_STALL_CYCLES_BY_MARB"/>
+ <value value="8" name="RB_STALL_CYCLES_BY_HLSQ"/>
+ <value value="9" name="RB_RB_RB_MARB_DATA"/>
+ <value value="10" name="RB_SP_RB_QUAD"/>
+ <value value="11" name="RB_RAS_RB_Z_QUADS"/>
+ <value value="12" name="RB_GMEM_CH0_READ"/>
+ <value value="13" name="RB_GMEM_CH1_READ"/>
+ <value value="14" name="RB_GMEM_CH0_WRITE"/>
+ <value value="15" name="RB_GMEM_CH1_WRITE"/>
+ <value value="16" name="RB_CP_CONTEXT_DONE"/>
+ <value value="17" name="RB_CP_CACHE_FLUSH"/>
+ <value value="18" name="RB_CP_ZPASS_DONE"/>
+ <value value="19" name="RB_STALL_FIFO0_FULL"/>
+ <value value="20" name="RB_STALL_FIFO1_FULL"/>
+ <value value="21" name="RB_STALL_FIFO2_FULL"/>
+ <value value="22" name="RB_STALL_FIFO3_FULL"/>
+ <value value="23" name="RB_RB_HLSQ_TRANSACTIONS"/>
+ <value value="24" name="RB_Z_READ"/>
+ <value value="25" name="RB_Z_WRITE"/>
+ <value value="26" name="RB_C_READ"/>
+ <value value="27" name="RB_C_WRITE"/>
+ <value value="28" name="RB_C_READ_LATENCY"/>
+ <value value="29" name="RB_Z_READ_LATENCY"/>
+ <value value="30" name="RB_STALL_BY_UCHE"/>
+ <value value="31" name="RB_MARB_UCHE_TRANSACTIONS"/>
+ <value value="32" name="RB_CACHE_STALL_MISS"/>
+ <value value="33" name="RB_CACHE_STALL_FIFO_FULL"/>
+ <value value="34" name="RB_8BIT_BLENDER_UNITS_ACTIVE"/>
+ <value value="35" name="RB_16BIT_BLENDER_UNITS_ACTIVE"/>
+ <value value="36" name="RB_SAMPLER_UNITS_ACTIVE"/>
+ <value value="38" name="RB_TOTAL_PASS"/>
+ <value value="39" name="RB_Z_PASS"/>
+ <value value="40" name="RB_Z_FAIL"/>
+ <value value="41" name="RB_S_FAIL"/>
+ <value value="42" name="RB_POWER0"/>
+ <value value="43" name="RB_POWER1"/>
+ <value value="44" name="RB_POWER2"/>
+ <value value="45" name="RB_POWER3"/>
+ <value value="46" name="RB_POWER4"/>
+ <value value="47" name="RB_POWER5"/>
+ <value value="48" name="RB_POWER6"/>
+ <value value="49" name="RB_POWER7"/>
+</enum>
+
+<enum name="a4xx_rbbm_perfcounter_select">
+ <value value="0" name="RBBM_ALWAYS_ON"/>
+ <value value="1" name="RBBM_VBIF_BUSY"/>
+ <value value="2" name="RBBM_TSE_BUSY"/>
+ <value value="3" name="RBBM_RAS_BUSY"/>
+ <value value="4" name="RBBM_PC_DCALL_BUSY"/>
+ <value value="5" name="RBBM_PC_VSD_BUSY"/>
+ <value value="6" name="RBBM_VFD_BUSY"/>
+ <value value="7" name="RBBM_VPC_BUSY"/>
+ <value value="8" name="RBBM_UCHE_BUSY"/>
+ <value value="9" name="RBBM_VSC_BUSY"/>
+ <value value="10" name="RBBM_HLSQ_BUSY"/>
+ <value value="11" name="RBBM_ANY_RB_BUSY"/>
+ <value value="12" name="RBBM_ANY_TPL1_BUSY"/>
+ <value value="13" name="RBBM_ANY_SP_BUSY"/>
+ <value value="14" name="RBBM_ANY_MARB_BUSY"/>
+ <value value="15" name="RBBM_ANY_ARB_BUSY"/>
+ <value value="16" name="RBBM_AHB_STATUS_BUSY"/>
+ <value value="17" name="RBBM_AHB_STATUS_STALLED"/>
+ <value value="18" name="RBBM_AHB_STATUS_TXFR"/>
+ <value value="19" name="RBBM_AHB_STATUS_TXFR_SPLIT"/>
+ <value value="20" name="RBBM_AHB_STATUS_TXFR_ERROR"/>
+ <value value="21" name="RBBM_AHB_STATUS_LONG_STALL"/>
+ <value value="22" name="RBBM_STATUS_MASKED"/>
+ <value value="23" name="RBBM_CP_BUSY_GFX_CORE_IDLE"/>
+ <value value="24" name="RBBM_TESS_BUSY"/>
+ <value value="25" name="RBBM_COM_BUSY"/>
+ <value value="32" name="RBBM_DCOM_BUSY"/>
+ <value value="33" name="RBBM_ANY_CCU_BUSY"/>
+ <value value="34" name="RBBM_DPM_BUSY"/>
+</enum>
+
+<enum name="a4xx_sp_perfcounter_select">
+ <value value="0" name="SP_LM_LOAD_INSTRUCTIONS"/>
+ <value value="1" name="SP_LM_STORE_INSTRUCTIONS"/>
+ <value value="2" name="SP_LM_ATOMICS"/>
+ <value value="3" name="SP_GM_LOAD_INSTRUCTIONS"/>
+ <value value="4" name="SP_GM_STORE_INSTRUCTIONS"/>
+ <value value="5" name="SP_GM_ATOMICS"/>
+ <value value="6" name="SP_VS_STAGE_TEX_INSTRUCTIONS"/>
+ <value value="7" name="SP_VS_STAGE_CFLOW_INSTRUCTIONS"/>
+ <value value="8" name="SP_VS_STAGE_EFU_INSTRUCTIONS"/>
+ <value value="9" name="SP_VS_STAGE_FULL_ALU_INSTRUCTIONS"/>
+ <value value="10" name="SP_VS_STAGE_HALF_ALU_INSTRUCTIONS"/>
+ <value value="11" name="SP_FS_STAGE_TEX_INSTRUCTIONS"/>
+ <value value="12" name="SP_FS_STAGE_CFLOW_INSTRUCTIONS"/>
+ <value value="13" name="SP_FS_STAGE_EFU_INSTRUCTIONS"/>
+ <value value="14" name="SP_FS_STAGE_FULL_ALU_INSTRUCTIONS"/>
+ <value value="15" name="SP_FS_STAGE_HALF_ALU_INSTRUCTIONS"/>
+ <value value="17" name="SP_VS_INSTRUCTIONS"/>
+ <value value="18" name="SP_FS_INSTRUCTIONS"/>
+ <value value="19" name="SP_ADDR_LOCK_COUNT"/>
+ <value value="20" name="SP_UCHE_READ_TRANS"/>
+ <value value="21" name="SP_UCHE_WRITE_TRANS"/>
+ <value value="22" name="SP_EXPORT_VPC_TRANS"/>
+ <value value="23" name="SP_EXPORT_RB_TRANS"/>
+ <value value="24" name="SP_PIXELS_KILLED"/>
+ <value value="25" name="SP_ICL1_REQUESTS"/>
+ <value value="26" name="SP_ICL1_MISSES"/>
+ <value value="27" name="SP_ICL0_REQUESTS"/>
+ <value value="28" name="SP_ICL0_MISSES"/>
+ <value value="29" name="SP_ALU_WORKING_CYCLES"/>
+ <value value="30" name="SP_EFU_WORKING_CYCLES"/>
+ <value value="31" name="SP_STALL_CYCLES_BY_VPC"/>
+ <value value="32" name="SP_STALL_CYCLES_BY_TP"/>
+ <value value="33" name="SP_STALL_CYCLES_BY_UCHE"/>
+ <value value="34" name="SP_STALL_CYCLES_BY_RB"/>
+ <value value="35" name="SP_BUSY_CYCLES"/>
+ <value value="36" name="SP_HS_INSTRUCTIONS"/>
+ <value value="37" name="SP_DS_INSTRUCTIONS"/>
+ <value value="38" name="SP_GS_INSTRUCTIONS"/>
+ <value value="39" name="SP_CS_INSTRUCTIONS"/>
+ <value value="40" name="SP_SCHEDULER_NON_WORKING"/>
+ <value value="41" name="SP_WAVE_CONTEXTS"/>
+ <value value="42" name="SP_WAVE_CONTEXT_CYCLES"/>
+ <value value="43" name="SP_POWER0"/>
+ <value value="44" name="SP_POWER1"/>
+ <value value="45" name="SP_POWER2"/>
+ <value value="46" name="SP_POWER3"/>
+ <value value="47" name="SP_POWER4"/>
+ <value value="48" name="SP_POWER5"/>
+ <value value="49" name="SP_POWER6"/>
+ <value value="50" name="SP_POWER7"/>
+ <value value="51" name="SP_POWER8"/>
+ <value value="52" name="SP_POWER9"/>
+ <value value="53" name="SP_POWER10"/>
+ <value value="54" name="SP_POWER11"/>
+ <value value="55" name="SP_POWER12"/>
+ <value value="56" name="SP_POWER13"/>
+ <value value="57" name="SP_POWER14"/>
+ <value value="58" name="SP_POWER15"/>
+</enum>
+
+<enum name="a4xx_tp_perfcounter_select">
+ <value value="0" name="TP_L1_REQUESTS"/>
+ <value value="1" name="TP_L1_MISSES"/>
+ <value value="8" name="TP_QUADS_OFFSET"/>
+ <value value="9" name="TP_QUAD_SHADOW"/>
+ <value value="10" name="TP_QUADS_ARRAY"/>
+ <value value="11" name="TP_QUADS_GRADIENT"/>
+ <value value="12" name="TP_QUADS_1D2D"/>
+ <value value="13" name="TP_QUADS_3DCUBE"/>
+ <value value="16" name="TP_BUSY_CYCLES"/>
+ <value value="17" name="TP_STALL_CYCLES_BY_ARB"/>
+ <value value="20" name="TP_STATE_CACHE_REQUESTS"/>
+ <value value="21" name="TP_STATE_CACHE_MISSES"/>
+ <value value="22" name="TP_POWER0"/>
+ <value value="23" name="TP_POWER1"/>
+ <value value="24" name="TP_POWER2"/>
+ <value value="25" name="TP_POWER3"/>
+ <value value="26" name="TP_POWER4"/>
+ <value value="27" name="TP_POWER5"/>
+ <value value="28" name="TP_POWER6"/>
+ <value value="29" name="TP_POWER7"/>
+</enum>
+
+<enum name="a4xx_uche_perfcounter_select">
+ <value value="0" name="UCHE_VBIF_READ_BEATS_TP"/>
+ <value value="1" name="UCHE_VBIF_READ_BEATS_VFD"/>
+ <value value="2" name="UCHE_VBIF_READ_BEATS_HLSQ"/>
+ <value value="3" name="UCHE_VBIF_READ_BEATS_MARB"/>
+ <value value="4" name="UCHE_VBIF_READ_BEATS_SP"/>
+ <value value="5" name="UCHE_READ_REQUESTS_TP"/>
+ <value value="6" name="UCHE_READ_REQUESTS_VFD"/>
+ <value value="7" name="UCHE_READ_REQUESTS_HLSQ"/>
+ <value value="8" name="UCHE_READ_REQUESTS_MARB"/>
+ <value value="9" name="UCHE_READ_REQUESTS_SP"/>
+ <value value="10" name="UCHE_WRITE_REQUESTS_MARB"/>
+ <value value="11" name="UCHE_WRITE_REQUESTS_SP"/>
+ <value value="12" name="UCHE_TAG_CHECK_FAILS"/>
+ <value value="13" name="UCHE_EVICTS"/>
+ <value value="14" name="UCHE_FLUSHES"/>
+ <value value="15" name="UCHE_VBIF_LATENCY_CYCLES"/>
+ <value value="16" name="UCHE_VBIF_LATENCY_SAMPLES"/>
+ <value value="17" name="UCHE_BUSY_CYCLES"/>
+ <value value="18" name="UCHE_VBIF_READ_BEATS_PC"/>
+ <value value="19" name="UCHE_READ_REQUESTS_PC"/>
+ <value value="20" name="UCHE_WRITE_REQUESTS_VPC"/>
+ <value value="21" name="UCHE_STALL_BY_VBIF"/>
+ <value value="22" name="UCHE_WRITE_REQUESTS_VSC"/>
+ <value value="23" name="UCHE_POWER0"/>
+ <value value="24" name="UCHE_POWER1"/>
+ <value value="25" name="UCHE_POWER2"/>
+ <value value="26" name="UCHE_POWER3"/>
+ <value value="27" name="UCHE_POWER4"/>
+ <value value="28" name="UCHE_POWER5"/>
+ <value value="29" name="UCHE_POWER6"/>
+ <value value="30" name="UCHE_POWER7"/>
+</enum>
+
+<enum name="a4xx_vbif_perfcounter_select">
+ <value value="0" name="AXI_READ_REQUESTS_ID_0"/>
+ <value value="1" name="AXI_READ_REQUESTS_ID_1"/>
+ <value value="2" name="AXI_READ_REQUESTS_ID_2"/>
+ <value value="3" name="AXI_READ_REQUESTS_ID_3"/>
+ <value value="4" name="AXI_READ_REQUESTS_ID_4"/>
+ <value value="5" name="AXI_READ_REQUESTS_ID_5"/>
+ <value value="6" name="AXI_READ_REQUESTS_ID_6"/>
+ <value value="7" name="AXI_READ_REQUESTS_ID_7"/>
+ <value value="8" name="AXI_READ_REQUESTS_ID_8"/>
+ <value value="9" name="AXI_READ_REQUESTS_ID_9"/>
+ <value value="10" name="AXI_READ_REQUESTS_ID_10"/>
+ <value value="11" name="AXI_READ_REQUESTS_ID_11"/>
+ <value value="12" name="AXI_READ_REQUESTS_ID_12"/>
+ <value value="13" name="AXI_READ_REQUESTS_ID_13"/>
+ <value value="14" name="AXI_READ_REQUESTS_ID_14"/>
+ <value value="15" name="AXI_READ_REQUESTS_ID_15"/>
+ <value value="16" name="AXI0_READ_REQUESTS_TOTAL"/>
+ <value value="17" name="AXI1_READ_REQUESTS_TOTAL"/>
+ <value value="18" name="AXI2_READ_REQUESTS_TOTAL"/>
+ <value value="19" name="AXI3_READ_REQUESTS_TOTAL"/>
+ <value value="20" name="AXI_READ_REQUESTS_TOTAL"/>
+ <value value="21" name="AXI_WRITE_REQUESTS_ID_0"/>
+ <value value="22" name="AXI_WRITE_REQUESTS_ID_1"/>
+ <value value="23" name="AXI_WRITE_REQUESTS_ID_2"/>
+ <value value="24" name="AXI_WRITE_REQUESTS_ID_3"/>
+ <value value="25" name="AXI_WRITE_REQUESTS_ID_4"/>
+ <value value="26" name="AXI_WRITE_REQUESTS_ID_5"/>
+ <value value="27" name="AXI_WRITE_REQUESTS_ID_6"/>
+ <value value="28" name="AXI_WRITE_REQUESTS_ID_7"/>
+ <value value="29" name="AXI_WRITE_REQUESTS_ID_8"/>
+ <value value="30" name="AXI_WRITE_REQUESTS_ID_9"/>
+ <value value="31" name="AXI_WRITE_REQUESTS_ID_10"/>
+ <value value="32" name="AXI_WRITE_REQUESTS_ID_11"/>
+ <value value="33" name="AXI_WRITE_REQUESTS_ID_12"/>
+ <value value="34" name="AXI_WRITE_REQUESTS_ID_13"/>
+ <value value="35" name="AXI_WRITE_REQUESTS_ID_14"/>
+ <value value="36" name="AXI_WRITE_REQUESTS_ID_15"/>
+ <value value="37" name="AXI0_WRITE_REQUESTS_TOTAL"/>
+ <value value="38" name="AXI1_WRITE_REQUESTS_TOTAL"/>
+ <value value="39" name="AXI2_WRITE_REQUESTS_TOTAL"/>
+ <value value="40" name="AXI3_WRITE_REQUESTS_TOTAL"/>
+ <value value="41" name="AXI_WRITE_REQUESTS_TOTAL"/>
+ <value value="42" name="AXI_TOTAL_REQUESTS"/>
+ <value value="43" name="AXI_READ_DATA_BEATS_ID_0"/>
+ <value value="44" name="AXI_READ_DATA_BEATS_ID_1"/>
+ <value value="45" name="AXI_READ_DATA_BEATS_ID_2"/>
+ <value value="46" name="AXI_READ_DATA_BEATS_ID_3"/>
+ <value value="47" name="AXI_READ_DATA_BEATS_ID_4"/>
+ <value value="48" name="AXI_READ_DATA_BEATS_ID_5"/>
+ <value value="49" name="AXI_READ_DATA_BEATS_ID_6"/>
+ <value value="50" name="AXI_READ_DATA_BEATS_ID_7"/>
+ <value value="51" name="AXI_READ_DATA_BEATS_ID_8"/>
+ <value value="52" name="AXI_READ_DATA_BEATS_ID_9"/>
+ <value value="53" name="AXI_READ_DATA_BEATS_ID_10"/>
+ <value value="54" name="AXI_READ_DATA_BEATS_ID_11"/>
+ <value value="55" name="AXI_READ_DATA_BEATS_ID_12"/>
+ <value value="56" name="AXI_READ_DATA_BEATS_ID_13"/>
+ <value value="57" name="AXI_READ_DATA_BEATS_ID_14"/>
+ <value value="58" name="AXI_READ_DATA_BEATS_ID_15"/>
+ <value value="59" name="AXI0_READ_DATA_BEATS_TOTAL"/>
+ <value value="60" name="AXI1_READ_DATA_BEATS_TOTAL"/>
+ <value value="61" name="AXI2_READ_DATA_BEATS_TOTAL"/>
+ <value value="62" name="AXI3_READ_DATA_BEATS_TOTAL"/>
+ <value value="63" name="AXI_READ_DATA_BEATS_TOTAL"/>
+ <value value="64" name="AXI_WRITE_DATA_BEATS_ID_0"/>
+ <value value="65" name="AXI_WRITE_DATA_BEATS_ID_1"/>
+ <value value="66" name="AXI_WRITE_DATA_BEATS_ID_2"/>
+ <value value="67" name="AXI_WRITE_DATA_BEATS_ID_3"/>
+ <value value="68" name="AXI_WRITE_DATA_BEATS_ID_4"/>
+ <value value="69" name="AXI_WRITE_DATA_BEATS_ID_5"/>
+ <value value="70" name="AXI_WRITE_DATA_BEATS_ID_6"/>
+ <value value="71" name="AXI_WRITE_DATA_BEATS_ID_7"/>
+ <value value="72" name="AXI_WRITE_DATA_BEATS_ID_8"/>
+ <value value="73" name="AXI_WRITE_DATA_BEATS_ID_9"/>
+ <value value="74" name="AXI_WRITE_DATA_BEATS_ID_10"/>
+ <value value="75" name="AXI_WRITE_DATA_BEATS_ID_11"/>
+ <value value="76" name="AXI_WRITE_DATA_BEATS_ID_12"/>
+ <value value="77" name="AXI_WRITE_DATA_BEATS_ID_13"/>
+ <value value="78" name="AXI_WRITE_DATA_BEATS_ID_14"/>
+ <value value="79" name="AXI_WRITE_DATA_BEATS_ID_15"/>
+ <value value="80" name="AXI0_WRITE_DATA_BEATS_TOTAL"/>
+ <value value="81" name="AXI1_WRITE_DATA_BEATS_TOTAL"/>
+ <value value="82" name="AXI2_WRITE_DATA_BEATS_TOTAL"/>
+ <value value="83" name="AXI3_WRITE_DATA_BEATS_TOTAL"/>
+ <value value="84" name="AXI_WRITE_DATA_BEATS_TOTAL"/>
+ <value value="85" name="AXI_DATA_BEATS_TOTAL"/>
+ <value value="86" name="CYCLES_HELD_OFF_ID_0"/>
+ <value value="87" name="CYCLES_HELD_OFF_ID_1"/>
+ <value value="88" name="CYCLES_HELD_OFF_ID_2"/>
+ <value value="89" name="CYCLES_HELD_OFF_ID_3"/>
+ <value value="90" name="CYCLES_HELD_OFF_ID_4"/>
+ <value value="91" name="CYCLES_HELD_OFF_ID_5"/>
+ <value value="92" name="CYCLES_HELD_OFF_ID_6"/>
+ <value value="93" name="CYCLES_HELD_OFF_ID_7"/>
+ <value value="94" name="CYCLES_HELD_OFF_ID_8"/>
+ <value value="95" name="CYCLES_HELD_OFF_ID_9"/>
+ <value value="96" name="CYCLES_HELD_OFF_ID_10"/>
+ <value value="97" name="CYCLES_HELD_OFF_ID_11"/>
+ <value value="98" name="CYCLES_HELD_OFF_ID_12"/>
+ <value value="99" name="CYCLES_HELD_OFF_ID_13"/>
+ <value value="100" name="CYCLES_HELD_OFF_ID_14"/>
+ <value value="101" name="CYCLES_HELD_OFF_ID_15"/>
+ <value value="102" name="AXI_READ_REQUEST_HELD_OFF"/>
+ <value value="103" name="AXI_WRITE_REQUEST_HELD_OFF"/>
+ <value value="104" name="AXI_REQUEST_HELD_OFF"/>
+ <value value="105" name="AXI_WRITE_DATA_HELD_OFF"/>
+ <value value="106" name="OCMEM_AXI_READ_REQUEST_HELD_OFF"/>
+ <value value="107" name="OCMEM_AXI_WRITE_REQUEST_HELD_OFF"/>
+ <value value="108" name="OCMEM_AXI_REQUEST_HELD_OFF"/>
+ <value value="109" name="OCMEM_AXI_WRITE_DATA_HELD_OFF"/>
+ <value value="110" name="ELAPSED_CYCLES_DDR"/>
+ <value value="111" name="ELAPSED_CYCLES_OCMEM"/>
+</enum>
+
+<enum name="a4xx_vfd_perfcounter_select">
+ <value value="0" name="VFD_UCHE_BYTE_FETCHED"/>
+ <value value="1" name="VFD_UCHE_TRANS"/>
+ <value value="3" name="VFD_FETCH_INSTRUCTIONS"/>
+ <value value="5" name="VFD_BUSY_CYCLES"/>
+ <value value="6" name="VFD_STALL_CYCLES_UCHE"/>
+ <value value="7" name="VFD_STALL_CYCLES_HLSQ"/>
+ <value value="8" name="VFD_STALL_CYCLES_VPC_BYPASS"/>
+ <value value="9" name="VFD_STALL_CYCLES_VPC_ALLOC"/>
+ <value value="13" name="VFD_MODE_0_FIBERS"/>
+ <value value="14" name="VFD_MODE_1_FIBERS"/>
+ <value value="15" name="VFD_MODE_2_FIBERS"/>
+ <value value="16" name="VFD_MODE_3_FIBERS"/>
+ <value value="17" name="VFD_MODE_4_FIBERS"/>
+ <value value="18" name="VFD_BFIFO_STALL"/>
+ <value value="19" name="VFD_NUM_VERTICES_TOTAL"/>
+ <value value="20" name="VFD_PACKER_FULL"/>
+ <value value="21" name="VFD_UCHE_REQUEST_FIFO_FULL"/>
+ <value value="22" name="VFD_STARVE_CYCLES_PC"/>
+ <value value="23" name="VFD_STARVE_CYCLES_UCHE"/>
+</enum>
+
+<enum name="a4xx_vpc_perfcounter_select">
+ <value value="2" name="VPC_SP_LM_COMPONENTS"/>
+ <value value="3" name="VPC_SP0_LM_BYTES"/>
+ <value value="4" name="VPC_SP1_LM_BYTES"/>
+ <value value="5" name="VPC_SP2_LM_BYTES"/>
+ <value value="6" name="VPC_SP3_LM_BYTES"/>
+ <value value="7" name="VPC_WORKING_CYCLES"/>
+ <value value="8" name="VPC_STALL_CYCLES_LM"/>
+ <value value="9" name="VPC_STARVE_CYCLES_RAS"/>
+ <value value="10" name="VPC_STREAMOUT_CYCLES"/>
+ <value value="12" name="VPC_UCHE_TRANSACTIONS"/>
+ <value value="13" name="VPC_STALL_CYCLES_UCHE"/>
+ <value value="14" name="VPC_BUSY_CYCLES"/>
+ <value value="15" name="VPC_STARVE_CYCLES_SP"/>
+</enum>
+
+<enum name="a4xx_vsc_perfcounter_select">
+ <value value="0" name="VSC_BUSY_CYCLES"/>
+ <value value="1" name="VSC_WORKING_CYCLES"/>
+ <value value="2" name="VSC_STALL_CYCLES_UCHE"/>
+ <value value="3" name="VSC_STARVE_CYCLES_RAS"/>
+ <value value="4" name="VSC_EOT_NUM"/>
+</enum>
+
+<domain name="A4XX" width="32">
+ <!-- RB registers -->
+ <reg32 offset="0x0cc0" name="RB_GMEM_BASE_ADDR"/>
+ <reg32 offset="0x0cc7" name="RB_PERFCTR_RB_SEL_0" type="a4xx_rb_perfcounter_select"/>
+ <reg32 offset="0x0cc8" name="RB_PERFCTR_RB_SEL_1" type="a4xx_rb_perfcounter_select"/>
+ <reg32 offset="0x0cc9" name="RB_PERFCTR_RB_SEL_2" type="a4xx_rb_perfcounter_select"/>
+ <reg32 offset="0x0cca" name="RB_PERFCTR_RB_SEL_3" type="a4xx_rb_perfcounter_select"/>
+ <reg32 offset="0x0ccb" name="RB_PERFCTR_RB_SEL_4" type="a4xx_rb_perfcounter_select"/>
+ <reg32 offset="0x0ccc" name="RB_PERFCTR_RB_SEL_5" type="a4xx_rb_perfcounter_select"/>
+ <reg32 offset="0x0ccd" name="RB_PERFCTR_RB_SEL_6" type="a4xx_rb_perfcounter_select"/>
+ <reg32 offset="0x0cce" name="RB_PERFCTR_RB_SEL_7" type="a4xx_rb_perfcounter_select"/>
+ <reg32 offset="0x0ccf" name="RB_PERFCTR_CCU_SEL_0" type="a4xx_ccu_perfcounter_select"/>
+ <reg32 offset="0x0cd0" name="RB_PERFCTR_CCU_SEL_1" type="a4xx_ccu_perfcounter_select"/>
+ <reg32 offset="0x0cd1" name="RB_PERFCTR_CCU_SEL_2" type="a4xx_ccu_perfcounter_select"/>
+ <reg32 offset="0x0cd2" name="RB_PERFCTR_CCU_SEL_3" type="a4xx_ccu_perfcounter_select"/>
+ <reg32 offset="0x0ce0" name="RB_FRAME_BUFFER_DIMENSION">
+ <bitfield name="WIDTH" low="0" high="13" type="uint"/>
+ <bitfield name="HEIGHT" low="16" high="29" type="uint"/>
+ </reg32>
+ <reg32 offset="0x20cc" name="RB_CLEAR_COLOR_DW0"/>
+ <reg32 offset="0x20cd" name="RB_CLEAR_COLOR_DW1"/>
+ <reg32 offset="0x20ce" name="RB_CLEAR_COLOR_DW2"/>
+ <reg32 offset="0x20cf" name="RB_CLEAR_COLOR_DW3"/>
+ <reg32 offset="0x20a0" name="RB_MODE_CONTROL">
+ <!--
+ for non-bypass mode, these are bin width/height.. although
+ possibly bigger bitfields to hold entire width/height for
+ gmem-bypass?? Either way, it appears to need to be multiple
+ of 32..
+ -->
+ <bitfield name="WIDTH" low="0" high="5" shr="5" type="uint"/>
+ <bitfield name="HEIGHT" low="8" high="13" shr="5" type="uint"/>
+ <bitfield name="ENABLE_GMEM" pos="16" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x20a1" name="RB_RENDER_CONTROL">
+ <bitfield name="BINNING_PASS" pos="0" type="boolean"/>
+ <!-- nearly everything has bit3 set.. -->
+ <!-- bit5 set on resolve and tiling pass -->
+ <bitfield name="DISABLE_COLOR_PIPE" pos="5" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x20a2" name="RB_MSAA_CONTROL">
+ <bitfield name="DISABLE" pos="12" type="boolean"/>
+ <bitfield name="SAMPLES" low="13" high="15" type="uint"/>
+ </reg32>
+ <reg32 offset="0x20a3" name="RB_RENDER_CONTROL2">
+ <bitfield name="COORD_MASK" low="0" high="3" type="hex"/>
+ <bitfield name="SAMPLEMASK" pos="4" type="boolean"/>
+ <bitfield name="FACENESS" pos="5" type="boolean"/>
+ <bitfield name="SAMPLEID" pos="6" type="boolean"/>
+ <bitfield name="MSAA_SAMPLES" low="7" high="9" type="uint"/>
+ <bitfield name="SAMPLEID_HR" pos="11" type="boolean"/>
+ <bitfield name="IJ_PERSP_PIXEL" pos="12" type="boolean"/>
+ <!-- the 2 below are just educated guesses -->
+ <bitfield name="IJ_PERSP_CENTROID" pos="13" type="boolean"/>
+ <bitfield name="IJ_PERSP_SAMPLE" pos="14" type="boolean"/>
+ <!-- needs to be enabled to get nopersp values,
+ perhaps other cases too? -->
+ <bitfield name="SIZE" pos="15" type="boolean"/>
+ </reg32>
+ <array offset="0x20a4" name="RB_MRT" stride="5" length="8">
+ <reg32 offset="0x0" name="CONTROL">
+ <bitfield name="READ_DEST_ENABLE" pos="3" type="boolean"/>
+ <!-- both these bits seem to get set when enabling GL_BLEND.. -->
+ <bitfield name="BLEND" pos="4" type="boolean"/>
+ <bitfield name="BLEND2" pos="5" type="boolean"/>
+ <bitfield name="ROP_ENABLE" pos="6" type="boolean"/>
+ <bitfield name="ROP_CODE" low="8" high="11" type="a3xx_rop_code"/>
+ <bitfield name="COMPONENT_ENABLE" low="24" high="27" type="hex"/>
+ </reg32>
+ <reg32 offset="0x1" name="BUF_INFO">
+ <bitfield name="COLOR_FORMAT" low="0" high="5" type="a4xx_color_fmt"/>
+ <!--
+ guestimate position of COLOR_TILE_MODE.. this works out if
+ common value is 2, like on a3xx..
+ -->
+ <bitfield name="COLOR_TILE_MODE" low="6" high="7" type="a4xx_tile_mode"/>
+ <bitfield name="DITHER_MODE" low="9" high="10" type="adreno_rb_dither_mode"/>
+ <bitfield name="COLOR_SWAP" low="11" high="12" type="a3xx_color_swap"/>
+ <bitfield name="COLOR_SRGB" pos="13" type="boolean"/>
+ <!-- note: possibly some # of lsb's aren't there: -->
+ <doc>
+ Pitch (actually, appears to be pitch in bytes, so really is a stride)
+ in GMEM, so pitch of the current tile.
+ </doc>
+ <bitfield name="COLOR_BUF_PITCH" low="14" high="31" shr="4" type="uint"/>
+ </reg32>
+ <reg32 offset="0x2" name="BASE"/>
+ <reg32 offset="0x3" name="CONTROL3">
+ <!-- probably missing some lsb's.. and guessing upper size -->
+ <!-- pitch * cpp * msaa: -->
+ <bitfield name="STRIDE" low="3" high="25" type="uint"/>
+ </reg32>
+ <reg32 offset="0x4" name="BLEND_CONTROL">
+ <bitfield name="RGB_SRC_FACTOR" low="0" high="4" type="adreno_rb_blend_factor"/>
+ <bitfield name="RGB_BLEND_OPCODE" low="5" high="7" type="a3xx_rb_blend_opcode"/>
+ <bitfield name="RGB_DEST_FACTOR" low="8" high="12" type="adreno_rb_blend_factor"/>
+ <bitfield name="ALPHA_SRC_FACTOR" low="16" high="20" type="adreno_rb_blend_factor"/>
+ <bitfield name="ALPHA_BLEND_OPCODE" low="21" high="23" type="a3xx_rb_blend_opcode"/>
+ <bitfield name="ALPHA_DEST_FACTOR" low="24" high="28" type="adreno_rb_blend_factor"/>
+ </reg32>
+ </array>
+
+ <reg32 offset="0x20f0" name="RB_BLEND_RED">
+ <bitfield name="UINT" low="0" high="7" type="hex"/>
+ <bitfield name="SINT" low="8" high="15" type="hex"/>
+ <bitfield name="FLOAT" low="16" high="31" type="float"/>
+ </reg32>
+ <reg32 offset="0x20f1" name="RB_BLEND_RED_F32" type="float"/>
+
+ <reg32 offset="0x20f2" name="RB_BLEND_GREEN">
+ <bitfield name="UINT" low="0" high="7" type="hex"/>
+ <bitfield name="SINT" low="8" high="15" type="hex"/>
+ <bitfield name="FLOAT" low="16" high="31" type="float"/>
+ </reg32>
+ <reg32 offset="0x20f3" name="RB_BLEND_GREEN_F32" type="float"/>
+
+ <reg32 offset="0x20f4" name="RB_BLEND_BLUE">
+ <bitfield name="UINT" low="0" high="7" type="hex"/>
+ <bitfield name="SINT" low="8" high="15" type="hex"/>
+ <bitfield name="FLOAT" low="16" high="31" type="float"/>
+ </reg32>
+ <reg32 offset="0x20f5" name="RB_BLEND_BLUE_F32" type="float"/>
+
+ <reg32 offset="0x20f6" name="RB_BLEND_ALPHA">
+ <bitfield name="UINT" low="0" high="7" type="hex"/>
+ <bitfield name="SINT" low="8" high="15" type="hex"/>
+ <bitfield name="FLOAT" low="16" high="31" type="float"/>
+ </reg32>
+ <reg32 offset="0x20f7" name="RB_BLEND_ALPHA_F32" type="float"/>
+
+ <reg32 offset="0x20f8" name="RB_ALPHA_CONTROL">
+ <bitfield name="ALPHA_REF" low="0" high="7" type="hex"/>
+ <bitfield name="ALPHA_TEST" pos="8" type="boolean"/>
+ <bitfield name="ALPHA_TEST_FUNC" low="9" high="11" type="adreno_compare_func"/>
+ </reg32>
+ <reg32 offset="0x20f9" name="RB_FS_OUTPUT">
+ <!-- per-mrt enable bit -->
+ <bitfield name="ENABLE_BLEND" low="0" high="7"/>
+ <bitfield name="INDEPENDENT_BLEND" pos="8" type="boolean"/>
+ <!-- a guess? -->
+ <bitfield name="SAMPLE_MASK" low="16" high="31"/>
+ </reg32>
+ <reg32 offset="0x20fa" name="RB_SAMPLE_COUNT_CONTROL">
+ <bitfield name="COPY" pos="1" type="boolean"/>
+ <bitfield name="ADDR" low="2" high="31" shr="2"/>
+ </reg32>
+ <!-- always 00000000 for binning pass, else 0000000f: -->
+ <reg32 offset="0x20fb" name="RB_RENDER_COMPONENTS">
+ <bitfield name="RT0" low="0" high="3"/>
+ <bitfield name="RT1" low="4" high="7"/>
+ <bitfield name="RT2" low="8" high="11"/>
+ <bitfield name="RT3" low="12" high="15"/>
+ <bitfield name="RT4" low="16" high="19"/>
+ <bitfield name="RT5" low="20" high="23"/>
+ <bitfield name="RT6" low="24" high="27"/>
+ <bitfield name="RT7" low="28" high="31"/>
+ </reg32>
+
+ <reg32 offset="0x20fc" name="RB_COPY_CONTROL">
+ <!-- not sure # of bits -->
+ <bitfield name="MSAA_RESOLVE" low="0" high="1" type="a3xx_msaa_samples"/>
+ <bitfield name="MODE" low="4" high="6" type="adreno_rb_copy_control_mode"/>
+ <bitfield name="FASTCLEAR" low="8" high="11" type="hex"/>
+ <bitfield name="GMEM_BASE" low="14" high="31" shr="14" type="hex"/>
+ </reg32>
+ <reg32 offset="0x20fd" name="RB_COPY_DEST_BASE">
+ <bitfield name="BASE" low="5" high="31" shr="5" type="hex"/>
+ </reg32>
+ <reg32 offset="0x20fe" name="RB_COPY_DEST_PITCH">
+ <doc>actually, appears to be pitch in bytes, so really is a stride</doc>
+ <!-- not actually sure about max pitch... -->
+ <bitfield name="PITCH" low="0" high="31" shr="5" type="uint"/>
+ </reg32>
+ <reg32 offset="0x20ff" name="RB_COPY_DEST_INFO">
+ <bitfield name="FORMAT" low="2" high="7" type="a4xx_color_fmt"/>
+ <bitfield name="SWAP" low="8" high="9" type="a3xx_color_swap"/>
+ <bitfield name="DITHER_MODE" low="10" high="11" type="adreno_rb_dither_mode"/>
+ <bitfield name="COMPONENT_ENABLE" low="14" high="17" type="hex"/>
+ <bitfield name="ENDIAN" low="18" high="20" type="adreno_rb_surface_endian"/>
+ <bitfield name="TILE" low="24" high="25" type="a4xx_tile_mode"/>
+ </reg32>
+ <reg32 offset="0x2100" name="RB_FS_OUTPUT_REG">
+ <!-- bit0 set except for binning pass.. -->
+ <bitfield name="MRT" low="0" high="3" type="uint"/>
+ <bitfield name="FRAG_WRITES_Z" pos="5" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x2101" name="RB_DEPTH_CONTROL">
+ <!--
+ guessing that this matches a2xx with the stencil fields
+ moved out into RB_STENCIL_CONTROL?
+ -->
+ <bitfield name="FRAG_WRITES_Z" pos="0" type="boolean"/>
+ <bitfield name="Z_TEST_ENABLE" pos="1" type="boolean"/>
+ <bitfield name="Z_WRITE_ENABLE" pos="2" type="boolean"/>
+ <bitfield name="ZFUNC" low="4" high="6" type="adreno_compare_func"/>
+ <bitfield name="Z_CLAMP_ENABLE" pos="7" type="boolean"/>
+ <bitfield name="EARLY_Z_DISABLE" pos="16" type="boolean"/>
+ <bitfield name="FORCE_FRAGZ_TO_FS" pos="17" type="boolean"/>
+ <doc>Z_READ_ENABLE bit is set for zfunc other than GL_ALWAYS or GL_NEVER</doc>
+ <bitfield name="Z_READ_ENABLE" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x2102" name="RB_DEPTH_CLEAR"/>
+ <reg32 offset="0x2103" name="RB_DEPTH_INFO">
+ <bitfield name="DEPTH_FORMAT" low="0" high="1" type="a4xx_depth_format"/>
+ <doc>
+ DEPTH_BASE is offset in GMEM to depth/stencil buffer, ie
+ bin_w * bin_h / 1024 (possible rounded up to multiple of
+ something?? ie. 39 becomes 40, 78 becomes 80.. 75 becomes
+ 80.. so maybe it needs to be multiple of 8??
+ </doc>
+ <bitfield name="DEPTH_BASE" low="12" high="31" shr="12" type="hex"/>
+ </reg32>
+ <reg32 offset="0x2104" name="RB_DEPTH_PITCH" shr="5" type="uint">
+ <doc>stride of depth/stencil buffer</doc>
+ </reg32>
+ <reg32 offset="0x2105" name="RB_DEPTH_PITCH2" shr="5" type="uint">
+ <doc>???</doc>
+ </reg32>
+ <reg32 offset="0x2106" name="RB_STENCIL_CONTROL">
+ <bitfield name="STENCIL_ENABLE" pos="0" type="boolean"/>
+ <bitfield name="STENCIL_ENABLE_BF" pos="1" type="boolean"/>
+ <!--
+ set for stencil operations that require read from stencil
+ buffer, but not for example for stencil clear (which does
+ not require read).. so guessing this is analogous to
+ READ_DEST_ENABLE for color buffer..
+ -->
+ <bitfield name="STENCIL_READ" pos="2" type="boolean"/>
+ <bitfield name="FUNC" low="8" high="10" type="adreno_compare_func"/>
+ <bitfield name="FAIL" low="11" high="13" type="adreno_stencil_op"/>
+ <bitfield name="ZPASS" low="14" high="16" type="adreno_stencil_op"/>
+ <bitfield name="ZFAIL" low="17" high="19" type="adreno_stencil_op"/>
+ <bitfield name="FUNC_BF" low="20" high="22" type="adreno_compare_func"/>
+ <bitfield name="FAIL_BF" low="23" high="25" type="adreno_stencil_op"/>
+ <bitfield name="ZPASS_BF" low="26" high="28" type="adreno_stencil_op"/>
+ <bitfield name="ZFAIL_BF" low="29" high="31" type="adreno_stencil_op"/>
+ </reg32>
+ <reg32 offset="0x2107" name="RB_STENCIL_CONTROL2">
+ <!--
+ This seems to be set by blob if there is a stencil buffer
+ at all in GMEM, regardless of whether it is enabled for
+ a particular draw (ie. RB_STENCIL_CONTROL). Not really
+ sure if that is required or just a quirk of the blob
+ -->
+ <bitfield name="STENCIL_BUFFER" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x2108" name="RB_STENCIL_INFO">
+ <bitfield name="SEPARATE_STENCIL" pos="0" type="boolean"/>
+ <doc>Base address for stencil when not using interleaved depth/stencil</doc>
+ <bitfield name="STENCIL_BASE" low="12" high="31" shr="12" type="hex"/>
+ </reg32>
+ <reg32 offset="0x2109" name="RB_STENCIL_PITCH" shr="5" type="uint">
+ <doc>pitch of stencil buffer when not using interleaved depth/stencil</doc>
+ </reg32>
+
+ <reg32 offset="0x210b" name="RB_STENCILREFMASK" type="adreno_rb_stencilrefmask"/>
+ <reg32 offset="0x210c" name="RB_STENCILREFMASK_BF" type="adreno_rb_stencilrefmask"/>
+ <reg32 offset="0x210d" name="RB_BIN_OFFSET" type="adreno_reg_xy"/>
+ <array offset="0x2120" name="RB_VPORT_Z_CLAMP" stride="2" length="16">
+ <reg32 offset="0x0" name="MIN"/>
+ <reg32 offset="0x1" name="MAX"/>
+ </array>
+
+ <!-- RBBM registers -->
+ <reg32 offset="0x0000" name="RBBM_HW_VERSION"/>
+ <reg32 offset="0x0002" name="RBBM_HW_CONFIGURATION"/>
+ <array offset="0x4" name="RBBM_CLOCK_CTL_TP" stride="1" length="4">
+ <reg32 offset="0x0" name="REG"/>
+ </array>
+ <array offset="0x8" name="RBBM_CLOCK_CTL2_TP" stride="1" length="4">
+ <reg32 offset="0x0" name="REG"/>
+ </array>
+ <array offset="0xc" name="RBBM_CLOCK_HYST_TP" stride="1" length="4">
+ <reg32 offset="0x0" name="REG"/>
+ </array>
+ <array offset="0x10" name="RBBM_CLOCK_DELAY_TP" stride="1" length="4">
+ <reg32 offset="0x0" name="REG"/>
+ </array>
+ <reg32 offset="0x0014" name="RBBM_CLOCK_CTL_UCHE "/>
+ <reg32 offset="0x0015" name="RBBM_CLOCK_CTL2_UCHE"/>
+ <reg32 offset="0x0016" name="RBBM_CLOCK_CTL3_UCHE"/>
+ <reg32 offset="0x0017" name="RBBM_CLOCK_CTL4_UCHE"/>
+ <reg32 offset="0x0018" name="RBBM_CLOCK_HYST_UCHE"/>
+ <reg32 offset="0x0019" name="RBBM_CLOCK_DELAY_UCHE"/>
+ <reg32 offset="0x001a" name="RBBM_CLOCK_MODE_GPC"/>
+ <reg32 offset="0x001b" name="RBBM_CLOCK_DELAY_GPC"/>
+ <reg32 offset="0x001c" name="RBBM_CLOCK_HYST_GPC"/>
+ <reg32 offset="0x001d" name="RBBM_CLOCK_CTL_TSE_RAS_RBBM"/>
+ <reg32 offset="0x001e" name="RBBM_CLOCK_HYST_TSE_RAS_RBBM"/>
+ <reg32 offset="0x001f" name="RBBM_CLOCK_DELAY_TSE_RAS_RBBM"/>
+ <reg32 offset="0x0020" name="RBBM_CLOCK_CTL"/>
+ <reg32 offset="0x0021" name="RBBM_SP_HYST_CNT"/>
+ <reg32 offset="0x0022" name="RBBM_SW_RESET_CMD"/>
+ <reg32 offset="0x0023" name="RBBM_AHB_CTL0"/>
+ <reg32 offset="0x0024" name="RBBM_AHB_CTL1"/>
+ <reg32 offset="0x0025" name="RBBM_AHB_CMD"/>
+ <reg32 offset="0x0026" name="RBBM_RB_SUB_BLOCK_SEL_CTL"/>
+ <reg32 offset="0x0028" name="RBBM_RAM_ACC_63_32"/>
+ <reg32 offset="0x002b" name="RBBM_WAIT_IDLE_CLOCKS_CTL"/>
+ <reg32 offset="0x002f" name="RBBM_INTERFACE_HANG_INT_CTL"/>
+ <reg32 offset="0x0034" name="RBBM_INTERFACE_HANG_MASK_CTL4"/>
+ <reg32 offset="0x0036" name="RBBM_INT_CLEAR_CMD"/>
+ <reg32 offset="0x0037" name="RBBM_INT_0_MASK"/>
+ <reg32 offset="0x003e" name="RBBM_RBBM_CTL"/>
+ <reg32 offset="0x003f" name="RBBM_AHB_DEBUG_CTL"/>
+ <reg32 offset="0x0041" name="RBBM_VBIF_DEBUG_CTL"/>
+ <reg32 offset="0x0042" name="RBBM_CLOCK_CTL2"/>
+ <reg32 offset="0x0045" name="RBBM_BLOCK_SW_RESET_CMD"/>
+ <reg32 offset="0x0047" name="RBBM_RESET_CYCLES"/>
+ <reg32 offset="0x0049" name="RBBM_EXT_TRACE_BUS_CTL"/>
+ <reg32 offset="0x004a" name="RBBM_CFG_DEBBUS_SEL_A"/>
+ <reg32 offset="0x004b" name="RBBM_CFG_DEBBUS_SEL_B"/>
+ <reg32 offset="0x004c" name="RBBM_CFG_DEBBUS_SEL_C"/>
+ <reg32 offset="0x004d" name="RBBM_CFG_DEBBUS_SEL_D"/>
+ <reg32 offset="0x0098" name="RBBM_POWER_CNTL_IP">
+ <bitfield name="SW_COLLAPSE" pos="0" type="boolean"/>
+ <bitfield name="SP_TP_PWR_ON" pos="20" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x009c" name="RBBM_PERFCTR_CP_0_LO"/>
+ <reg32 offset="0x009d" name="RBBM_PERFCTR_CP_0_HI"/>
+ <reg32 offset="0x009e" name="RBBM_PERFCTR_CP_1_LO"/>
+ <reg32 offset="0x009f" name="RBBM_PERFCTR_CP_1_HI"/>
+ <reg32 offset="0x00a0" name="RBBM_PERFCTR_CP_2_LO"/>
+ <reg32 offset="0x00a1" name="RBBM_PERFCTR_CP_2_HI"/>
+ <reg32 offset="0x00a2" name="RBBM_PERFCTR_CP_3_LO"/>
+ <reg32 offset="0x00a3" name="RBBM_PERFCTR_CP_3_HI"/>
+ <reg32 offset="0x00a4" name="RBBM_PERFCTR_CP_4_LO"/>
+ <reg32 offset="0x00a5" name="RBBM_PERFCTR_CP_4_HI"/>
+ <reg32 offset="0x00a6" name="RBBM_PERFCTR_CP_5_LO"/>
+ <reg32 offset="0x00a7" name="RBBM_PERFCTR_CP_5_HI"/>
+ <reg32 offset="0x00a8" name="RBBM_PERFCTR_CP_6_LO"/>
+ <reg32 offset="0x00a9" name="RBBM_PERFCTR_CP_6_HI"/>
+ <reg32 offset="0x00aa" name="RBBM_PERFCTR_CP_7_LO"/>
+ <reg32 offset="0x00ab" name="RBBM_PERFCTR_CP_7_HI"/>
+ <reg32 offset="0x00ac" name="RBBM_PERFCTR_RBBM_0_LO"/>
+ <reg32 offset="0x00ad" name="RBBM_PERFCTR_RBBM_0_HI"/>
+ <reg32 offset="0x00ae" name="RBBM_PERFCTR_RBBM_1_LO"/>
+ <reg32 offset="0x00af" name="RBBM_PERFCTR_RBBM_1_HI"/>
+ <reg32 offset="0x00b0" name="RBBM_PERFCTR_RBBM_2_LO"/>
+ <reg32 offset="0x00b1" name="RBBM_PERFCTR_RBBM_2_HI"/>
+ <reg32 offset="0x00b2" name="RBBM_PERFCTR_RBBM_3_LO"/>
+ <reg32 offset="0x00b3" name="RBBM_PERFCTR_RBBM_3_HI"/>
+ <reg32 offset="0x00b4" name="RBBM_PERFCTR_PC_0_LO"/>
+ <reg32 offset="0x00b5" name="RBBM_PERFCTR_PC_0_HI"/>
+ <reg32 offset="0x00b6" name="RBBM_PERFCTR_PC_1_LO"/>
+ <reg32 offset="0x00b7" name="RBBM_PERFCTR_PC_1_HI"/>
+ <reg32 offset="0x00b8" name="RBBM_PERFCTR_PC_2_LO"/>
+ <reg32 offset="0x00b9" name="RBBM_PERFCTR_PC_2_HI"/>
+ <reg32 offset="0x00ba" name="RBBM_PERFCTR_PC_3_LO"/>
+ <reg32 offset="0x00bb" name="RBBM_PERFCTR_PC_3_HI"/>
+ <reg32 offset="0x00bc" name="RBBM_PERFCTR_PC_4_LO"/>
+ <reg32 offset="0x00bd" name="RBBM_PERFCTR_PC_4_HI"/>
+ <reg32 offset="0x00be" name="RBBM_PERFCTR_PC_5_LO"/>
+ <reg32 offset="0x00bf" name="RBBM_PERFCTR_PC_5_HI"/>
+ <reg32 offset="0x00c0" name="RBBM_PERFCTR_PC_6_LO"/>
+ <reg32 offset="0x00c1" name="RBBM_PERFCTR_PC_6_HI"/>
+ <reg32 offset="0x00c2" name="RBBM_PERFCTR_PC_7_LO"/>
+ <reg32 offset="0x00c3" name="RBBM_PERFCTR_PC_7_HI"/>
+ <reg32 offset="0x00c4" name="RBBM_PERFCTR_VFD_0_LO"/>
+ <reg32 offset="0x00c5" name="RBBM_PERFCTR_VFD_0_HI"/>
+ <reg32 offset="0x00c6" name="RBBM_PERFCTR_VFD_1_LO"/>
+ <reg32 offset="0x00c7" name="RBBM_PERFCTR_VFD_1_HI"/>
+ <reg32 offset="0x00c8" name="RBBM_PERFCTR_VFD_2_LO"/>
+ <reg32 offset="0x00c9" name="RBBM_PERFCTR_VFD_2_HI"/>
+ <reg32 offset="0x00ca" name="RBBM_PERFCTR_VFD_3_LO"/>
+ <reg32 offset="0x00cb" name="RBBM_PERFCTR_VFD_3_HI"/>
+ <reg32 offset="0x00cc" name="RBBM_PERFCTR_VFD_4_LO"/>
+ <reg32 offset="0x00cd" name="RBBM_PERFCTR_VFD_4_HI"/>
+ <reg32 offset="0x00ce" name="RBBM_PERFCTR_VFD_5_LO"/>
+ <reg32 offset="0x00cf" name="RBBM_PERFCTR_VFD_5_HI"/>
+ <reg32 offset="0x00d0" name="RBBM_PERFCTR_VFD_6_LO"/>
+ <reg32 offset="0x00d1" name="RBBM_PERFCTR_VFD_6_HI"/>
+ <reg32 offset="0x00d2" name="RBBM_PERFCTR_VFD_7_LO"/>
+ <reg32 offset="0x00d3" name="RBBM_PERFCTR_VFD_7_HI"/>
+ <reg32 offset="0x00d4" name="RBBM_PERFCTR_HLSQ_0_LO"/>
+ <reg32 offset="0x00d5" name="RBBM_PERFCTR_HLSQ_0_HI"/>
+ <reg32 offset="0x00d6" name="RBBM_PERFCTR_HLSQ_1_LO"/>
+ <reg32 offset="0x00d7" name="RBBM_PERFCTR_HLSQ_1_HI"/>
+ <reg32 offset="0x00d8" name="RBBM_PERFCTR_HLSQ_2_LO"/>
+ <reg32 offset="0x00d9" name="RBBM_PERFCTR_HLSQ_2_HI"/>
+ <reg32 offset="0x00da" name="RBBM_PERFCTR_HLSQ_3_LO"/>
+ <reg32 offset="0x00db" name="RBBM_PERFCTR_HLSQ_3_HI"/>
+ <reg32 offset="0x00dc" name="RBBM_PERFCTR_HLSQ_4_LO"/>
+ <reg32 offset="0x00dd" name="RBBM_PERFCTR_HLSQ_4_HI"/>
+ <reg32 offset="0x00de" name="RBBM_PERFCTR_HLSQ_5_LO"/>
+ <reg32 offset="0x00df" name="RBBM_PERFCTR_HLSQ_5_HI"/>
+ <reg32 offset="0x00e0" name="RBBM_PERFCTR_HLSQ_6_LO"/>
+ <reg32 offset="0x00e1" name="RBBM_PERFCTR_HLSQ_6_HI"/>
+ <reg32 offset="0x00e2" name="RBBM_PERFCTR_HLSQ_7_LO"/>
+ <reg32 offset="0x00e3" name="RBBM_PERFCTR_HLSQ_7_HI"/>
+ <reg32 offset="0x00e4" name="RBBM_PERFCTR_VPC_0_LO"/>
+ <reg32 offset="0x00e5" name="RBBM_PERFCTR_VPC_0_HI"/>
+ <reg32 offset="0x00e6" name="RBBM_PERFCTR_VPC_1_LO"/>
+ <reg32 offset="0x00e7" name="RBBM_PERFCTR_VPC_1_HI"/>
+ <reg32 offset="0x00e8" name="RBBM_PERFCTR_VPC_2_LO"/>
+ <reg32 offset="0x00e9" name="RBBM_PERFCTR_VPC_2_HI"/>
+ <reg32 offset="0x00ea" name="RBBM_PERFCTR_VPC_3_LO"/>
+ <reg32 offset="0x00eb" name="RBBM_PERFCTR_VPC_3_HI"/>
+ <reg32 offset="0x00ec" name="RBBM_PERFCTR_CCU_0_LO"/>
+ <reg32 offset="0x00ed" name="RBBM_PERFCTR_CCU_0_HI"/>
+ <reg32 offset="0x00ee" name="RBBM_PERFCTR_CCU_1_LO"/>
+ <reg32 offset="0x00ef" name="RBBM_PERFCTR_CCU_1_HI"/>
+ <reg32 offset="0x00f0" name="RBBM_PERFCTR_CCU_2_LO"/>
+ <reg32 offset="0x00f1" name="RBBM_PERFCTR_CCU_2_HI"/>
+ <reg32 offset="0x00f2" name="RBBM_PERFCTR_CCU_3_LO"/>
+ <reg32 offset="0x00f3" name="RBBM_PERFCTR_CCU_3_HI"/>
+ <reg32 offset="0x00f4" name="RBBM_PERFCTR_TSE_0_LO"/>
+ <reg32 offset="0x00f5" name="RBBM_PERFCTR_TSE_0_HI"/>
+ <reg32 offset="0x00f6" name="RBBM_PERFCTR_TSE_1_LO"/>
+ <reg32 offset="0x00f7" name="RBBM_PERFCTR_TSE_1_HI"/>
+ <reg32 offset="0x00f8" name="RBBM_PERFCTR_TSE_2_LO"/>
+ <reg32 offset="0x00f9" name="RBBM_PERFCTR_TSE_2_HI"/>
+ <reg32 offset="0x00fa" name="RBBM_PERFCTR_TSE_3_LO"/>
+ <reg32 offset="0x00fb" name="RBBM_PERFCTR_TSE_3_HI"/>
+ <reg32 offset="0x00fc" name="RBBM_PERFCTR_RAS_0_LO"/>
+ <reg32 offset="0x00fd" name="RBBM_PERFCTR_RAS_0_HI"/>
+ <reg32 offset="0x00fe" name="RBBM_PERFCTR_RAS_1_LO"/>
+ <reg32 offset="0x00ff" name="RBBM_PERFCTR_RAS_1_HI"/>
+ <reg32 offset="0x0100" name="RBBM_PERFCTR_RAS_2_LO"/>
+ <reg32 offset="0x0101" name="RBBM_PERFCTR_RAS_2_HI"/>
+ <reg32 offset="0x0102" name="RBBM_PERFCTR_RAS_3_LO"/>
+ <reg32 offset="0x0103" name="RBBM_PERFCTR_RAS_3_HI"/>
+ <reg32 offset="0x0104" name="RBBM_PERFCTR_UCHE_0_LO"/>
+ <reg32 offset="0x0105" name="RBBM_PERFCTR_UCHE_0_HI"/>
+ <reg32 offset="0x0106" name="RBBM_PERFCTR_UCHE_1_LO"/>
+ <reg32 offset="0x0107" name="RBBM_PERFCTR_UCHE_1_HI"/>
+ <reg32 offset="0x0108" name="RBBM_PERFCTR_UCHE_2_LO"/>
+ <reg32 offset="0x0109" name="RBBM_PERFCTR_UCHE_2_HI"/>
+ <reg32 offset="0x010a" name="RBBM_PERFCTR_UCHE_3_LO"/>
+ <reg32 offset="0x010b" name="RBBM_PERFCTR_UCHE_3_HI"/>
+ <reg32 offset="0x010c" name="RBBM_PERFCTR_UCHE_4_LO"/>
+ <reg32 offset="0x010d" name="RBBM_PERFCTR_UCHE_4_HI"/>
+ <reg32 offset="0x010e" name="RBBM_PERFCTR_UCHE_5_LO"/>
+ <reg32 offset="0x010f" name="RBBM_PERFCTR_UCHE_5_HI"/>
+ <reg32 offset="0x0110" name="RBBM_PERFCTR_UCHE_6_LO"/>
+ <reg32 offset="0x0111" name="RBBM_PERFCTR_UCHE_6_HI"/>
+ <reg32 offset="0x0112" name="RBBM_PERFCTR_UCHE_7_LO"/>
+ <reg32 offset="0x0113" name="RBBM_PERFCTR_UCHE_7_HI"/>
+ <reg32 offset="0x0114" name="RBBM_PERFCTR_TP_0_LO"/>
+ <reg32 offset="0x0115" name="RBBM_PERFCTR_TP_0_HI"/>
+ <reg32 offset="0x0116" name="RBBM_PERFCTR_TP_1_LO"/>
+ <reg32 offset="0x0117" name="RBBM_PERFCTR_TP_1_HI"/>
+ <reg32 offset="0x0118" name="RBBM_PERFCTR_TP_2_LO"/>
+ <reg32 offset="0x0119" name="RBBM_PERFCTR_TP_2_HI"/>
+ <reg32 offset="0x011a" name="RBBM_PERFCTR_TP_3_LO"/>
+ <reg32 offset="0x011b" name="RBBM_PERFCTR_TP_3_HI"/>
+ <reg32 offset="0x011c" name="RBBM_PERFCTR_TP_4_LO"/>
+ <reg32 offset="0x011d" name="RBBM_PERFCTR_TP_4_HI"/>
+ <reg32 offset="0x011e" name="RBBM_PERFCTR_TP_5_LO"/>
+ <reg32 offset="0x011f" name="RBBM_PERFCTR_TP_5_HI"/>
+ <reg32 offset="0x0120" name="RBBM_PERFCTR_TP_6_LO"/>
+ <reg32 offset="0x0121" name="RBBM_PERFCTR_TP_6_HI"/>
+ <reg32 offset="0x0122" name="RBBM_PERFCTR_TP_7_LO"/>
+ <reg32 offset="0x0123" name="RBBM_PERFCTR_TP_7_HI"/>
+ <reg32 offset="0x0124" name="RBBM_PERFCTR_SP_0_LO"/>
+ <reg32 offset="0x0125" name="RBBM_PERFCTR_SP_0_HI"/>
+ <reg32 offset="0x0126" name="RBBM_PERFCTR_SP_1_LO"/>
+ <reg32 offset="0x0127" name="RBBM_PERFCTR_SP_1_HI"/>
+ <reg32 offset="0x0128" name="RBBM_PERFCTR_SP_2_LO"/>
+ <reg32 offset="0x0129" name="RBBM_PERFCTR_SP_2_HI"/>
+ <reg32 offset="0x012a" name="RBBM_PERFCTR_SP_3_LO"/>
+ <reg32 offset="0x012b" name="RBBM_PERFCTR_SP_3_HI"/>
+ <reg32 offset="0x012c" name="RBBM_PERFCTR_SP_4_LO"/>
+ <reg32 offset="0x012d" name="RBBM_PERFCTR_SP_4_HI"/>
+ <reg32 offset="0x012e" name="RBBM_PERFCTR_SP_5_LO"/>
+ <reg32 offset="0x012f" name="RBBM_PERFCTR_SP_5_HI"/>
+ <reg32 offset="0x0130" name="RBBM_PERFCTR_SP_6_LO"/>
+ <reg32 offset="0x0131" name="RBBM_PERFCTR_SP_6_HI"/>
+ <reg32 offset="0x0132" name="RBBM_PERFCTR_SP_7_LO"/>
+ <reg32 offset="0x0133" name="RBBM_PERFCTR_SP_7_HI"/>
+ <reg32 offset="0x0134" name="RBBM_PERFCTR_SP_8_LO"/>
+ <reg32 offset="0x0135" name="RBBM_PERFCTR_SP_8_HI"/>
+ <reg32 offset="0x0136" name="RBBM_PERFCTR_SP_9_LO"/>
+ <reg32 offset="0x0137" name="RBBM_PERFCTR_SP_9_HI"/>
+ <reg32 offset="0x0138" name="RBBM_PERFCTR_SP_10_LO"/>
+ <reg32 offset="0x0139" name="RBBM_PERFCTR_SP_10_HI"/>
+ <reg32 offset="0x013a" name="RBBM_PERFCTR_SP_11_LO"/>
+ <reg32 offset="0x013b" name="RBBM_PERFCTR_SP_11_HI"/>
+ <reg32 offset="0x013c" name="RBBM_PERFCTR_RB_0_LO"/>
+ <reg32 offset="0x013d" name="RBBM_PERFCTR_RB_0_HI"/>
+ <reg32 offset="0x013e" name="RBBM_PERFCTR_RB_1_LO"/>
+ <reg32 offset="0x013f" name="RBBM_PERFCTR_RB_1_HI"/>
+ <reg32 offset="0x0140" name="RBBM_PERFCTR_RB_2_LO"/>
+ <reg32 offset="0x0141" name="RBBM_PERFCTR_RB_2_HI"/>
+ <reg32 offset="0x0142" name="RBBM_PERFCTR_RB_3_LO"/>
+ <reg32 offset="0x0143" name="RBBM_PERFCTR_RB_3_HI"/>
+ <reg32 offset="0x0144" name="RBBM_PERFCTR_RB_4_LO"/>
+ <reg32 offset="0x0145" name="RBBM_PERFCTR_RB_4_HI"/>
+ <reg32 offset="0x0146" name="RBBM_PERFCTR_RB_5_LO"/>
+ <reg32 offset="0x0147" name="RBBM_PERFCTR_RB_5_HI"/>
+ <reg32 offset="0x0148" name="RBBM_PERFCTR_RB_6_LO"/>
+ <reg32 offset="0x0149" name="RBBM_PERFCTR_RB_6_HI"/>
+ <reg32 offset="0x014a" name="RBBM_PERFCTR_RB_7_LO"/>
+ <reg32 offset="0x014b" name="RBBM_PERFCTR_RB_7_HI"/>
+ <reg32 offset="0x014c" name="RBBM_PERFCTR_VSC_0_LO"/>
+ <reg32 offset="0x014d" name="RBBM_PERFCTR_VSC_0_HI"/>
+ <reg32 offset="0x014e" name="RBBM_PERFCTR_VSC_1_LO"/>
+ <reg32 offset="0x014f" name="RBBM_PERFCTR_VSC_1_HI"/>
+ <reg32 offset="0x0166" name="RBBM_PERFCTR_PWR_0_LO"/>
+ <reg32 offset="0x0167" name="RBBM_PERFCTR_PWR_0_HI"/>
+ <reg32 offset="0x0168" name="RBBM_PERFCTR_PWR_1_LO"/>
+ <reg32 offset="0x0169" name="RBBM_PERFCTR_PWR_1_HI"/>
+ <reg32 offset="0x016e" name="RBBM_ALWAYSON_COUNTER_LO"/>
+ <reg32 offset="0x016f" name="RBBM_ALWAYSON_COUNTER_HI"/>
+ <array offset="0x0068" name="RBBM_CLOCK_CTL_SP" stride="1" length="4">
+ <reg32 offset="0x0" name="REG"/>
+ </array>
+ <array offset="0x006c" name="RBBM_CLOCK_CTL2_SP" stride="1" length="4">
+ <reg32 offset="0x0" name="REG"/>
+ </array>
+ <array offset="0x0070" name="RBBM_CLOCK_HYST_SP" stride="1" length="4">
+ <reg32 offset="0x0" name="REG"/>
+ </array>
+ <array offset="0x0074" name="RBBM_CLOCK_DELAY_SP" stride="1" length="4">
+ <reg32 offset="0x0" name="REG"/>
+ </array>
+ <array offset="0x0078" name="RBBM_CLOCK_CTL_RB" stride="1" length="4">
+ <reg32 offset="0x0" name="REG"/>
+ </array>
+ <array offset="0x007c" name="RBBM_CLOCK_CTL2_RB" stride="1" length="4">
+ <reg32 offset="0x0" name="REG"/>
+ </array>
+ <array offset="0x0082" name="RBBM_CLOCK_CTL_MARB_CCU" stride="1" length="4">
+ <reg32 offset="0x0" name="REG"/>
+ </array>
+ <array offset="0x0086" name="RBBM_CLOCK_HYST_RB_MARB_CCU" stride="1" length="4">
+ <reg32 offset="0x0" name="REG"/>
+ </array>
+ <reg32 offset="0x0080" name="RBBM_CLOCK_HYST_COM_DCOM"/>
+ <reg32 offset="0x0081" name="RBBM_CLOCK_CTL_COM_DCOM"/>
+ <reg32 offset="0x008a" name="RBBM_CLOCK_CTL_HLSQ"/>
+ <reg32 offset="0x008b" name="RBBM_CLOCK_HYST_HLSQ"/>
+ <reg32 offset="0x008c" name="RBBM_CLOCK_DELAY_HLSQ"/>
+ <bitset name="A4XX_CGC_HLSQ">
+ <bitfield name="EARLY_CYC" low="20" high="22" type="uint"/>
+ </bitset>
+ <reg32 offset="0x008d" name="RBBM_CLOCK_DELAY_COM_DCOM"/>
+ <array offset="0x008e" name="RBBM_CLOCK_DELAY_RB_MARB_CCU_L1" stride="1" length="4">
+ <reg32 offset="0x0" name="REG"/>
+ </array>
+ <bitset name="A4XX_INT0">
+ <bitfield name="RBBM_GPU_IDLE" pos="0" type="boolean"/>
+ <bitfield name="RBBM_AHB_ERROR" pos="1" type="boolean"/>
+ <bitfield name="RBBM_REG_TIMEOUT" pos="2" type="boolean"/>
+ <bitfield name="RBBM_ME_MS_TIMEOUT" pos="3" type="boolean"/>
+ <bitfield name="RBBM_PFP_MS_TIMEOUT" pos="4" type="boolean"/>
+ <bitfield name="RBBM_ATB_BUS_OVERFLOW" pos="5" type="boolean"/>
+ <bitfield name="VFD_ERROR" pos="6" type="boolean"/>
+ <bitfield name="CP_SW_INT" pos="7" type="boolean"/>
+ <bitfield name="CP_T0_PACKET_IN_IB" pos="8" type="boolean"/>
+ <bitfield name="CP_OPCODE_ERROR" pos="9" type="boolean"/>
+ <bitfield name="CP_RESERVED_BIT_ERROR" pos="10" type="boolean"/>
+ <bitfield name="CP_HW_FAULT" pos="11" type="boolean"/>
+ <bitfield name="CP_DMA" pos="12" type="boolean"/>
+ <bitfield name="CP_IB2_INT" pos="13" type="boolean"/>
+ <bitfield name="CP_IB1_INT" pos="14" type="boolean"/>
+ <bitfield name="CP_RB_INT" pos="15" type="boolean"/>
+ <bitfield name="CP_REG_PROTECT_FAULT" pos="16" type="boolean"/>
+ <bitfield name="CP_RB_DONE_TS" pos="17" type="boolean"/>
+ <bitfield name="CP_VS_DONE_TS" pos="18" type="boolean"/>
+ <bitfield name="CP_PS_DONE_TS" pos="19" type="boolean"/>
+ <bitfield name="CACHE_FLUSH_TS" pos="20" type="boolean"/>
+ <bitfield name="CP_AHB_ERROR_HALT" pos="21" type="boolean"/>
+ <bitfield name="MISC_HANG_DETECT" pos="24" type="boolean"/>
+ <bitfield name="UCHE_OOB_ACCESS" pos="25" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x0099" name="RBBM_SP_REGFILE_SLEEP_CNTL_0"/>
+ <reg32 offset="0x009a" name="RBBM_SP_REGFILE_SLEEP_CNTL_1"/>
+ <reg32 offset="0x0170" name="RBBM_PERFCTR_CTL"/>
+ <reg32 offset="0x0171" name="RBBM_PERFCTR_LOAD_CMD0"/>
+ <reg32 offset="0x0172" name="RBBM_PERFCTR_LOAD_CMD1"/>
+ <reg32 offset="0x0173" name="RBBM_PERFCTR_LOAD_CMD2"/>
+ <reg32 offset="0x0174" name="RBBM_PERFCTR_LOAD_VALUE_LO"/>
+ <reg32 offset="0x0175" name="RBBM_PERFCTR_LOAD_VALUE_HI"/>
+ <reg32 offset="0x0176" name="RBBM_PERFCTR_RBBM_SEL_0" type="a4xx_rbbm_perfcounter_select"/>
+ <reg32 offset="0x0177" name="RBBM_PERFCTR_RBBM_SEL_1" type="a4xx_rbbm_perfcounter_select"/>
+ <reg32 offset="0x0178" name="RBBM_PERFCTR_RBBM_SEL_2" type="a4xx_rbbm_perfcounter_select"/>
+ <reg32 offset="0x0179" name="RBBM_PERFCTR_RBBM_SEL_3" type="a4xx_rbbm_perfcounter_select"/>
+ <reg32 offset="0x017a" name="RBBM_GPU_BUSY_MASKED"/>
+ <reg32 offset="0x017d" name="RBBM_INT_0_STATUS"/>
+ <reg32 offset="0x0182" name="RBBM_CLOCK_STATUS"/>
+ <reg32 offset="0x0189" name="RBBM_AHB_STATUS"/>
+ <reg32 offset="0x018c" name="RBBM_AHB_ME_SPLIT_STATUS"/>
+ <reg32 offset="0x018d" name="RBBM_AHB_PFP_SPLIT_STATUS"/>
+ <reg32 offset="0x018f" name="RBBM_AHB_ERROR_STATUS"/>
+ <reg32 offset="0x0191" name="RBBM_STATUS">
+ <bitfield name="HI_BUSY" pos="0" type="boolean"/>
+ <bitfield name="CP_ME_BUSY" pos="1" type="boolean"/>
+ <bitfield name="CP_PFP_BUSY" pos="2" type="boolean"/>
+ <bitfield name="CP_NRT_BUSY" pos="14" type="boolean"/>
+ <bitfield name="VBIF_BUSY" pos="15" type="boolean"/>
+ <bitfield name="TSE_BUSY" pos="16" type="boolean"/>
+ <bitfield name="RAS_BUSY" pos="17" type="boolean"/>
+ <bitfield name="RB_BUSY" pos="18" type="boolean"/>
+ <bitfield name="PC_DCALL_BUSY" pos="19" type="boolean"/>
+ <bitfield name="PC_VSD_BUSY" pos="20" type="boolean"/>
+ <bitfield name="VFD_BUSY" pos="21" type="boolean"/>
+ <bitfield name="VPC_BUSY" pos="22" type="boolean"/>
+ <bitfield name="UCHE_BUSY" pos="23" type="boolean"/>
+ <bitfield name="SP_BUSY" pos="24" type="boolean"/>
+ <bitfield name="TPL1_BUSY" pos="25" type="boolean"/>
+ <bitfield name="MARB_BUSY" pos="26" type="boolean"/>
+ <bitfield name="VSC_BUSY" pos="27" type="boolean"/>
+ <bitfield name="ARB_BUSY" pos="28" type="boolean"/>
+ <bitfield name="HLSQ_BUSY" pos="29" type="boolean"/>
+ <bitfield name="GPU_BUSY_NOHC" pos="30" type="boolean"/>
+ <bitfield name="GPU_BUSY" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x019f" name="RBBM_INTERFACE_RRDY_STATUS5"/>
+ <reg32 offset="0x01b0" name="RBBM_POWER_STATUS">
+ <bitfield name="SP_TP_PWR_ON" pos="20" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x01b8" name="RBBM_WAIT_IDLE_CLOCKS_CTL2"/>
+
+ <!-- CP registers -->
+ <reg32 offset="0x0228" name="CP_SCRATCH_UMASK"/>
+ <reg32 offset="0x0229" name="CP_SCRATCH_ADDR"/>
+ <reg32 offset="0x0200" name="CP_RB_BASE"/>
+ <reg32 offset="0x0201" name="CP_RB_CNTL"/>
+ <reg32 offset="0x0205" name="CP_RB_WPTR"/>
+ <reg32 offset="0x0203" name="CP_RB_RPTR_ADDR"/>
+ <reg32 offset="0x0204" name="CP_RB_RPTR"/>
+ <reg32 offset="0x0206" name="CP_IB1_BASE"/>
+ <reg32 offset="0x0207" name="CP_IB1_BUFSZ"/>
+ <reg32 offset="0x0208" name="CP_IB2_BASE"/>
+ <reg32 offset="0x0209" name="CP_IB2_BUFSZ"/>
+ <reg32 offset="0x020c" name="CP_ME_NRT_ADDR"/>
+ <reg32 offset="0x020d" name="CP_ME_NRT_DATA"/>
+ <reg32 offset="0x0217" name="CP_ME_RB_DONE_DATA"/>
+ <reg32 offset="0x0219" name="CP_QUEUE_THRESH2"/>
+ <reg32 offset="0x021b" name="CP_MERCIU_SIZE"/>
+ <reg32 offset="0x021c" name="CP_ROQ_ADDR"/>
+ <reg32 offset="0x021d" name="CP_ROQ_DATA"/>
+ <reg32 offset="0x021e" name="CP_MEQ_ADDR"/>
+ <reg32 offset="0x021f" name="CP_MEQ_DATA"/>
+ <reg32 offset="0x0220" name="CP_MERCIU_ADDR"/>
+ <reg32 offset="0x0221" name="CP_MERCIU_DATA"/>
+ <reg32 offset="0x0222" name="CP_MERCIU_DATA2"/>
+ <reg32 offset="0x0223" name="CP_PFP_UCODE_ADDR"/>
+ <reg32 offset="0x0224" name="CP_PFP_UCODE_DATA"/>
+ <reg32 offset="0x0225" name="CP_ME_RAM_WADDR"/>
+ <reg32 offset="0x0226" name="CP_ME_RAM_RADDR"/>
+ <reg32 offset="0x0227" name="CP_ME_RAM_DATA"/>
+ <reg32 offset="0x022a" name="CP_PREEMPT"/>
+ <reg32 offset="0x022c" name="CP_CNTL"/>
+ <reg32 offset="0x022d" name="CP_ME_CNTL"/>
+ <reg32 offset="0x022e" name="CP_DEBUG"/>
+ <reg32 offset="0x0231" name="CP_DEBUG_ECO_CONTROL"/>
+ <reg32 offset="0x0232" name="CP_DRAW_STATE_ADDR"/>
+ <array offset="0x0240" name="CP_PROTECT" stride="1" length="16">
+ <reg32 offset="0x0" name="REG" type="adreno_cp_protect"/>
+ </array>
+ <reg32 offset="0x0250" name="CP_PROTECT_CTRL"/>
+ <reg32 offset="0x04c0" name="CP_ST_BASE"/>
+ <reg32 offset="0x04ce" name="CP_STQ_AVAIL"/>
+ <reg32 offset="0x04d0" name="CP_MERCIU_STAT"/>
+ <reg32 offset="0x04d2" name="CP_WFI_PEND_CTR"/>
+ <reg32 offset="0x04d8" name="CP_HW_FAULT"/>
+ <reg32 offset="0x04da" name="CP_PROTECT_STATUS"/>
+ <reg32 offset="0x04dd" name="CP_EVENTS_IN_FLIGHT"/>
+ <reg32 offset="0x0500" name="CP_PERFCTR_CP_SEL_0" type="a4xx_cp_perfcounter_select"/>
+ <reg32 offset="0x0501" name="CP_PERFCTR_CP_SEL_1" type="a4xx_cp_perfcounter_select"/>
+ <reg32 offset="0x0502" name="CP_PERFCTR_CP_SEL_2" type="a4xx_cp_perfcounter_select"/>
+ <reg32 offset="0x0503" name="CP_PERFCTR_CP_SEL_3" type="a4xx_cp_perfcounter_select"/>
+ <reg32 offset="0x0504" name="CP_PERFCTR_CP_SEL_4" type="a4xx_cp_perfcounter_select"/>
+ <reg32 offset="0x0505" name="CP_PERFCTR_CP_SEL_5" type="a4xx_cp_perfcounter_select"/>
+ <reg32 offset="0x0506" name="CP_PERFCTR_CP_SEL_6" type="a4xx_cp_perfcounter_select"/>
+ <reg32 offset="0x0507" name="CP_PERFCTR_CP_SEL_7" type="a4xx_cp_perfcounter_select"/>
+ <reg32 offset="0x050b" name="CP_PERFCOMBINER_SELECT"/>
+ <array offset="0x0578" name="CP_SCRATCH" stride="1" length="23">
+ <reg32 offset="0x0" name="REG"/>
+ </array>
+
+
+ <!-- SP registers -->
+ <reg32 offset="0x0ec0" name="SP_VS_STATUS"/>
+ <reg32 offset="0x0ec3" name="SP_MODE_CONTROL"/>
+
+ <reg32 offset="0x0ec4" name="SP_PERFCTR_SP_SEL_0" type="a4xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ec5" name="SP_PERFCTR_SP_SEL_1" type="a4xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ec6" name="SP_PERFCTR_SP_SEL_2" type="a4xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ec7" name="SP_PERFCTR_SP_SEL_3" type="a4xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ec8" name="SP_PERFCTR_SP_SEL_4" type="a4xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ec9" name="SP_PERFCTR_SP_SEL_5" type="a4xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0eca" name="SP_PERFCTR_SP_SEL_6" type="a4xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ecb" name="SP_PERFCTR_SP_SEL_7" type="a4xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ecc" name="SP_PERFCTR_SP_SEL_8" type="a4xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ecd" name="SP_PERFCTR_SP_SEL_9" type="a4xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ece" name="SP_PERFCTR_SP_SEL_10" type="a4xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ecf" name="SP_PERFCTR_SP_SEL_11" type="a4xx_sp_perfcounter_select"/>
+
+ <reg32 offset="0x22c0" name="SP_SP_CTRL_REG">
+ <bitfield name="BINNING_PASS" pos="19" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x22c1" name="SP_INSTR_CACHE_CTRL">
+ <!-- set when VS in buffer mode: -->
+ <bitfield name="VS_BUFFER" pos="7" type="boolean"/>
+ <!-- set when FS in buffer mode: -->
+ <bitfield name="FS_BUFFER" pos="8" type="boolean"/>
+ <!-- set when both VS or FS in buffer mode: -->
+ <bitfield name="INSTR_BUFFER" pos="10" type="boolean"/>
+ <!-- TODO other bits probably matter when other stages active? -->
+ </reg32>
+
+ <bitset name="a4xx_sp_vs_fs_ctrl_reg0" inline="yes">
+ <!--
+ NOTE that SP_{VS,FS}_CTRL_REG1 are different, but so far REG0
+ appears to be the same..
+ -->
+ <bitfield name="THREADMODE" pos="0" type="a3xx_threadmode"/>
+ <!-- VARYING bit only for FS.. think it controls emitting (ei) flag? -->
+ <bitfield name="VARYING" pos="1" type="boolean"/>
+ <!-- maybe CACHEINVALID is two bits?? -->
+ <bitfield name="CACHEINVALID" pos="2" type="boolean"/>
+ <doc>
+ The full/half register footprint is in units of four components,
+ so if r0.x is used, that counts as all of r0.[xyzw] as used.
+ There are separate full/half register footprint values as the
+ full and half registers are independent (not overlapping).
+ Presumably the thread scheduler hardware allocates the full/half
+ register names from the actual physical register file and
+ handles the register renaming.
+ </doc>
+ <bitfield name="HALFREGFOOTPRINT" low="4" high="9" type="uint"/>
+ <bitfield name="FULLREGFOOTPRINT" low="10" high="15" type="uint"/>
+ <!-- maybe INOUTREGOVERLAP is a bitflag? -->
+ <bitfield name="INOUTREGOVERLAP" low="18" high="19" type="uint"/>
+ <bitfield name="THREADSIZE" pos="20" type="a3xx_threadsize"/>
+ <bitfield name="SUPERTHREADMODE" pos="21" type="boolean"/>
+ <bitfield name="PIXLODENABLE" pos="22" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x22c4" name="SP_VS_CTRL_REG0" type="a4xx_sp_vs_fs_ctrl_reg0"/>
+ <reg32 offset="0x22c5" name="SP_VS_CTRL_REG1">
+ <bitfield name="CONSTLENGTH" low="0" high="7" type="uint"/>
+ <bitfield name="INITIALOUTSTANDING" low="24" high="30" type="uint"/>
+ </reg32>
+ <reg32 offset="0x22c6" name="SP_VS_PARAM_REG">
+ <bitfield name="POSREGID" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="PSIZEREGID" low="8" high="15" type="a3xx_regid"/>
+ <bitfield name="TOTALVSOUTVAR" low="20" high="31" type="uint"/>
+ </reg32>
+ <array offset="0x22c7" name="SP_VS_OUT" stride="1" length="16">
+ <reg32 offset="0x0" name="REG">
+ <bitfield name="A_REGID" low="0" high="8" type="a3xx_regid"/>
+ <bitfield name="A_COMPMASK" low="9" high="12" type="hex"/>
+ <bitfield name="B_REGID" low="16" high="24" type="a3xx_regid"/>
+ <bitfield name="B_COMPMASK" low="25" high="28" type="hex"/>
+ </reg32>
+ </array>
+ <array offset="0x22d8" name="SP_VS_VPC_DST" stride="1" length="8">
+ <reg32 offset="0x0" name="REG">
+ <doc>
+ These seem to be offsets for storage of the varyings.
+ Always seems to start from 8, possibly loc 0 and 4
+ are for gl_Position and gl_PointSize?
+ </doc>
+ <bitfield name="OUTLOC0" low="0" high="7" type="uint"/>
+ <bitfield name="OUTLOC1" low="8" high="15" type="uint"/>
+ <bitfield name="OUTLOC2" low="16" high="23" type="uint"/>
+ <bitfield name="OUTLOC3" low="24" high="31" type="uint"/>
+ </reg32>
+ </array>
+
+ <reg32 offset="0x22e0" name="SP_VS_OBJ_OFFSET_REG">
+ <!-- always 00000000: -->
+ <doc>
+ From register spec:
+ SP_FS_OBJ_OFFSET_REG.CONSTOBJECTSTARTOFFSET [16:24]: Constant object
+ start offset in on chip RAM,
+ 128bit aligned
+ </doc>
+ <bitfield name="CONSTOBJECTOFFSET" low="16" high="24" type="uint"/>
+ <bitfield name="SHADEROBJOFFSET" low="25" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x22e1" name="SP_VS_OBJ_START"/>
+ <reg32 offset="0x22e2" name="SP_VS_PVT_MEM_PARAM"/>
+ <reg32 offset="0x22e3" name="SP_VS_PVT_MEM_ADDR"/>
+ <reg32 offset="0x22e5" name="SP_VS_LENGTH_REG" type="uint"/>
+ <reg32 offset="0x22e8" name="SP_FS_CTRL_REG0" type="a4xx_sp_vs_fs_ctrl_reg0"/>
+ <reg32 offset="0x22e9" name="SP_FS_CTRL_REG1">
+ <bitfield name="CONSTLENGTH" low="0" high="7" type="uint"/>
+ <bitfield name="FACENESS" pos="19" type="boolean"/>
+ <bitfield name="VARYING" pos="20" type="boolean"/>
+ <bitfield name="FRAGCOORD" pos="21" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x22ea" name="SP_FS_OBJ_OFFSET_REG">
+ <bitfield name="CONSTOBJECTOFFSET" low="16" high="24" type="uint"/>
+ <bitfield name="SHADEROBJOFFSET" low="25" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x22eb" name="SP_FS_OBJ_START"/>
+ <reg32 offset="0x22ec" name="SP_FS_PVT_MEM_PARAM"/>
+ <reg32 offset="0x22ed" name="SP_FS_PVT_MEM_ADDR"/>
+ <reg32 offset="0x22ef" name="SP_FS_LENGTH_REG" type="uint"/>
+ <reg32 offset="0x22f0" name="SP_FS_OUTPUT_REG">
+ <bitfield name="MRT" low="0" high="3" type="uint"/>
+ <bitfield name="DEPTH_ENABLE" pos="7" type="boolean"/>
+ <!-- TODO double check.. for now assume same as a3xx -->
+ <bitfield name="DEPTH_REGID" low="8" high="15" type="a3xx_regid"/>
+ <bitfield name="SAMPLEMASK_REGID" low="24" high="31" type="a3xx_regid"/>
+ </reg32>
+ <array offset="0x22f1" name="SP_FS_MRT" stride="1" length="8">
+ <reg32 offset="0x0" name="REG">
+ <bitfield name="REGID" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="HALF_PRECISION" pos="8" type="boolean"/>
+ <bitfield name="COLOR_SINT" pos="10" type="boolean"/>
+ <bitfield name="COLOR_UINT" pos="11" type="boolean"/>
+ <bitfield name="MRTFORMAT" low="12" high="17" type="a4xx_color_fmt"/>
+ <bitfield name="COLOR_SRGB" pos="18" type="boolean"/>
+ </reg32>
+ </array>
+ <reg32 offset="0x2300" name="SP_CS_CTRL_REG0" type="a4xx_sp_vs_fs_ctrl_reg0"/>
+ <reg32 offset="0x2301" name="SP_CS_OBJ_OFFSET_REG"/>
+ <reg32 offset="0x2302" name="SP_CS_OBJ_START"/>
+ <reg32 offset="0x2303" name="SP_CS_PVT_MEM_PARAM"/>
+ <reg32 offset="0x2304" name="SP_CS_PVT_MEM_ADDR"/>
+ <reg32 offset="0x2305" name="SP_CS_PVT_MEM_SIZE"/>
+ <reg32 offset="0x2306" name="SP_CS_LENGTH_REG" type="uint"/>
+ <reg32 offset="0x230d" name="SP_HS_OBJ_OFFSET_REG">
+ <bitfield name="CONSTOBJECTOFFSET" low="16" high="24" type="uint"/>
+ <bitfield name="SHADEROBJOFFSET" low="25" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x230e" name="SP_HS_OBJ_START"/>
+ <reg32 offset="0x230f" name="SP_HS_PVT_MEM_PARAM"/>
+ <reg32 offset="0x2310" name="SP_HS_PVT_MEM_ADDR"/>
+ <reg32 offset="0x2312" name="SP_HS_LENGTH_REG" type="uint"/>
+
+ <reg32 offset="0x231a" name="SP_DS_PARAM_REG">
+ <bitfield name="POSREGID" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="TOTALGSOUTVAR" low="20" high="31" type="uint"/>
+ </reg32>
+ <array offset="0x231b" name="SP_DS_OUT" stride="1" length="16">
+ <reg32 offset="0x0" name="REG">
+ <bitfield name="A_REGID" low="0" high="8" type="a3xx_regid"/>
+ <bitfield name="A_COMPMASK" low="9" high="12" type="hex"/>
+ <bitfield name="B_REGID" low="16" high="24" type="a3xx_regid"/>
+ <bitfield name="B_COMPMASK" low="25" high="28" type="hex"/>
+ </reg32>
+ </array>
+ <array offset="0x232c" name="SP_DS_VPC_DST" stride="1" length="8">
+ <reg32 offset="0x0" name="REG">
+ <doc>
+ These seem to be offsets for storage of the varyings.
+ Always seems to start from 8, possibly loc 0 and 4
+ are for gl_Position and gl_PointSize?
+ </doc>
+ <bitfield name="OUTLOC0" low="0" high="7" type="uint"/>
+ <bitfield name="OUTLOC1" low="8" high="15" type="uint"/>
+ <bitfield name="OUTLOC2" low="16" high="23" type="uint"/>
+ <bitfield name="OUTLOC3" low="24" high="31" type="uint"/>
+ </reg32>
+ </array>
+ <reg32 offset="0x2334" name="SP_DS_OBJ_OFFSET_REG">
+ <bitfield name="CONSTOBJECTOFFSET" low="16" high="24" type="uint"/>
+ <bitfield name="SHADEROBJOFFSET" low="25" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x2335" name="SP_DS_OBJ_START"/>
+ <reg32 offset="0x2336" name="SP_DS_PVT_MEM_PARAM"/>
+ <reg32 offset="0x2337" name="SP_DS_PVT_MEM_ADDR"/>
+ <reg32 offset="0x2339" name="SP_DS_LENGTH_REG" type="uint"/>
+
+ <reg32 offset="0x2341" name="SP_GS_PARAM_REG">
+ <bitfield name="POSREGID" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="PRIMREGID" low="8" high="15" type="a3xx_regid"/>
+ <bitfield name="TOTALGSOUTVAR" low="20" high="31" type="uint"/>
+ </reg32>
+ <array offset="0x2342" name="SP_GS_OUT" stride="1" length="16">
+ <reg32 offset="0x0" name="REG">
+ <bitfield name="A_REGID" low="0" high="8" type="a3xx_regid"/>
+ <bitfield name="A_COMPMASK" low="9" high="12" type="hex"/>
+ <bitfield name="B_REGID" low="16" high="24" type="a3xx_regid"/>
+ <bitfield name="B_COMPMASK" low="25" high="28" type="hex"/>
+ </reg32>
+ </array>
+ <array offset="0x2353" name="SP_GS_VPC_DST" stride="1" length="8">
+ <reg32 offset="0x0" name="REG">
+ <doc>
+ These seem to be offsets for storage of the varyings.
+ Always seems to start from 8, possibly loc 0 and 4
+ are for gl_Position and gl_PointSize?
+ </doc>
+ <bitfield name="OUTLOC0" low="0" high="7" type="uint"/>
+ <bitfield name="OUTLOC1" low="8" high="15" type="uint"/>
+ <bitfield name="OUTLOC2" low="16" high="23" type="uint"/>
+ <bitfield name="OUTLOC3" low="24" high="31" type="uint"/>
+ </reg32>
+ </array>
+ <reg32 offset="0x235b" name="SP_GS_OBJ_OFFSET_REG">
+ <bitfield name="CONSTOBJECTOFFSET" low="16" high="24" type="uint"/>
+ <bitfield name="SHADEROBJOFFSET" low="25" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x235c" name="SP_GS_OBJ_START"/>
+ <reg32 offset="0x235d" name="SP_GS_PVT_MEM_PARAM"/>
+ <reg32 offset="0x235e" name="SP_GS_PVT_MEM_ADDR"/>
+ <reg32 offset="0x2360" name="SP_GS_LENGTH_REG" type="uint"/>
+
+ <!-- VPC registers -->
+ <reg32 offset="0x0e60" name="VPC_DEBUG_RAM_SEL"/>
+ <reg32 offset="0x0e61" name="VPC_DEBUG_RAM_READ"/>
+ <reg32 offset="0x0e64" name="VPC_DEBUG_ECO_CONTROL"/>
+ <reg32 offset="0x0e65" name="VPC_PERFCTR_VPC_SEL_0" type="a4xx_vpc_perfcounter_select"/>
+ <reg32 offset="0x0e66" name="VPC_PERFCTR_VPC_SEL_1" type="a4xx_vpc_perfcounter_select"/>
+ <reg32 offset="0x0e67" name="VPC_PERFCTR_VPC_SEL_2" type="a4xx_vpc_perfcounter_select"/>
+ <reg32 offset="0x0e68" name="VPC_PERFCTR_VPC_SEL_3" type="a4xx_vpc_perfcounter_select"/>
+ <reg32 offset="0x2140" name="VPC_ATTR">
+ <bitfield name="TOTALATTR" low="0" high="8" type="uint"/>
+ <!-- PSIZE bit set if gl_PointSize written: -->
+ <bitfield name="PSIZE" pos="9" type="boolean"/>
+ <bitfield name="THRDASSIGN" low="12" high="13" type="uint"/>
+ <bitfield name="ENABLE" pos="25" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x2141" name="VPC_PACK">
+ <bitfield name="NUMBYPASSVAR" low="0" high="7" type="uint"/>
+ <bitfield name="NUMFPNONPOSVAR" low="8" high="15" type="uint"/>
+ <bitfield name="NUMNONPOSVSVAR" low="16" high="23" type="uint"/>
+ </reg32>
+ <array offset="0x2142" name="VPC_VARYING_INTERP" stride="1" length="8">
+ <reg32 offset="0x0" name="MODE"/>
+ </array>
+ <array offset="0x214a" name="VPC_VARYING_PS_REPL" stride="1" length="8">
+ <reg32 offset="0x0" name="MODE"/>
+ </array>
+
+ <reg32 offset="0x216e" name="VPC_SO_FLUSH_WADDR_3"/>
+
+ <!-- VSC registers -->
+ <reg32 offset="0x0c00" name="VSC_BIN_SIZE">
+ <bitfield name="WIDTH" low="0" high="4" shr="5" type="uint"/>
+ <bitfield name="HEIGHT" low="5" high="9" shr="5" type="uint"/>
+ </reg32>
+ <reg32 offset="0x0c01" name="VSC_SIZE_ADDRESS"/>
+ <reg32 offset="0x0c02" name="VSC_SIZE_ADDRESS2"/>
+ <reg32 offset="0x0c03" name="VSC_DEBUG_ECO_CONTROL"/>
+ <array offset="0x0c08" name="VSC_PIPE_CONFIG" stride="1" length="8">
+ <reg32 offset="0x0" name="REG">
+ <doc>
+ Configures the mapping between VSC_PIPE buffer and
+ bin, X/Y specify the bin index in the horiz/vert
+ direction (0,0 is upper left, 0,1 is leftmost bin
+ on second row, and so on). W/H specify the number
+ of bins assigned to this VSC_PIPE in the horiz/vert
+ dimension.
+ </doc>
+ <bitfield name="X" low="0" high="9" type="uint"/>
+ <bitfield name="Y" low="10" high="19" type="uint"/>
+ <bitfield name="W" low="20" high="23" type="uint"/>
+ <bitfield name="H" low="24" high="27" type="uint"/>
+ </reg32>
+ </array>
+ <array offset="0x0c10" name="VSC_PIPE_DATA_ADDRESS" stride="1" length="8">
+ <reg32 offset="0x0" name="REG"/>
+ </array>
+ <array offset="0x0c18" name="VSC_PIPE_DATA_LENGTH" stride="1" length="8">
+ <reg32 offset="0x0" name="REG"/>
+ </array>
+ <reg32 offset="0x0c41" name="VSC_PIPE_PARTIAL_POSN_1"/>
+ <reg32 offset="0x0c50" name="VSC_PERFCTR_VSC_SEL_0" type="a4xx_vsc_perfcounter_select"/>
+ <reg32 offset="0x0c51" name="VSC_PERFCTR_VSC_SEL_1" type="a4xx_vsc_perfcounter_select"/>
+
+ <!-- VFD registers -->
+ <reg32 offset="0x0e40" name="VFD_DEBUG_CONTROL"/>
+ <reg32 offset="0x0e43" name="VFD_PERFCTR_VFD_SEL_0" type="a4xx_vfd_perfcounter_select"/>
+ <reg32 offset="0x0e44" name="VFD_PERFCTR_VFD_SEL_1" type="a4xx_vfd_perfcounter_select"/>
+ <reg32 offset="0x0e45" name="VFD_PERFCTR_VFD_SEL_2" type="a4xx_vfd_perfcounter_select"/>
+ <reg32 offset="0x0e46" name="VFD_PERFCTR_VFD_SEL_3" type="a4xx_vfd_perfcounter_select"/>
+ <reg32 offset="0x0e47" name="VFD_PERFCTR_VFD_SEL_4" type="a4xx_vfd_perfcounter_select"/>
+ <reg32 offset="0x0e48" name="VFD_PERFCTR_VFD_SEL_5" type="a4xx_vfd_perfcounter_select"/>
+ <reg32 offset="0x0e49" name="VFD_PERFCTR_VFD_SEL_6" type="a4xx_vfd_perfcounter_select"/>
+ <reg32 offset="0x0e4a" name="VFD_PERFCTR_VFD_SEL_7" type="a4xx_vfd_perfcounter_select"/>
+ <reg32 offset="0x21d0" name="VGT_CL_INITIATOR"/>
+ <reg32 offset="0x21d9" name="VGT_EVENT_INITIATOR"/>
+ <reg32 offset="0x2200" name="VFD_CONTROL_0">
+ <doc>
+ TOTALATTRTOVS is # of attributes to vertex shader, in register
+ slots (ie. vec4+vec3 -> 7)
+ </doc>
+ <bitfield name="TOTALATTRTOVS" low="0" high="7" type="uint"/>
+ <doc>
+ BYPASSATTROVS seems to count varyings that are just directly
+ assigned from attributes (ie, "vFoo = aFoo;")
+ </doc>
+ <bitfield name="BYPASSATTROVS" low="9" high="16" type="uint"/>
+ <doc>STRMDECINSTRCNT is # of VFD_DECODE_INSTR registers valid</doc>
+ <bitfield name="STRMDECINSTRCNT" low="20" high="25" type="uint"/>
+ <doc>STRMFETCHINSTRCNT is # of VFD_FETCH_INSTR registers valid</doc>
+ <bitfield name="STRMFETCHINSTRCNT" low="26" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x2201" name="VFD_CONTROL_1">
+ <doc>MAXSTORAGE could be # of attributes/vbo's</doc>
+ <bitfield name="MAXSTORAGE" low="0" high="15" type="uint"/>
+ <bitfield name="REGID4VTX" low="16" high="23" type="a3xx_regid"/>
+ <bitfield name="REGID4INST" low="24" high="31" type="a3xx_regid"/>
+ </reg32>
+ <reg32 offset="0x2202" name="VFD_CONTROL_2"/>
+ <reg32 offset="0x2203" name="VFD_CONTROL_3">
+ <bitfield name="REGID_VTXCNT" low="8" high="15" type="a3xx_regid"/>
+ <bitfield name="REGID_TESSX" low="16" high="23" type="a3xx_regid"/>
+ <bitfield name="REGID_TESSY" low="24" high="31" type="a3xx_regid"/>
+ </reg32>
+ <reg32 offset="0x2204" name="VFD_CONTROL_4"/>
+ <reg32 offset="0x2208" name="VFD_INDEX_OFFSET"/>
+ <array offset="0x220a" name="VFD_FETCH" stride="4" length="32">
+ <reg32 offset="0x0" name="INSTR_0">
+ <bitfield name="FETCHSIZE" low="0" high="6" type="uint"/>
+ <bitfield name="BUFSTRIDE" low="7" high="16" type="uint"/>
+ <bitfield name="SWITCHNEXT" pos="19" type="boolean"/>
+ <bitfield name="INSTANCED" pos="20" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x1" name="INSTR_1"/>
+ <reg32 offset="0x2" name="INSTR_2">
+ <bitfield name="SIZE" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="0x3" name="INSTR_3">
+ <!-- might well be bigger.. -->
+ <bitfield name="STEPRATE" low="0" high="8" type="uint"/>
+ </reg32>
+ </array>
+ <array offset="0x228a" name="VFD_DECODE" stride="1" length="32">
+ <reg32 offset="0x0" name="INSTR">
+ <bitfield name="WRITEMASK" low="0" high="3" type="hex"/>
+ <!-- not sure if this is a bit flag and another flag above it, or?? -->
+ <bitfield name="CONSTFILL" pos="4" type="boolean"/>
+ <bitfield name="FORMAT" low="6" high="11" type="a4xx_vtx_fmt"/>
+ <bitfield name="REGID" low="12" high="19" type="a3xx_regid"/>
+ <bitfield name="INT" pos="20" type="boolean"/>
+ <doc>SHIFTCNT appears to be size, ie. FLOAT_32_32_32 is 12, and BYTE_8 is 1</doc>
+ <bitfield name="SWAP" low="22" high="23" type="a3xx_color_swap"/>
+ <bitfield name="SHIFTCNT" low="24" high="28" type="uint"/>
+ <bitfield name="LASTCOMPVALID" pos="29" type="boolean"/>
+ <bitfield name="SWITCHNEXT" pos="30" type="boolean"/>
+ </reg32>
+ </array>
+
+ <!-- TPL1 registers -->
+ <reg32 offset="0x0f00" name="TPL1_DEBUG_ECO_CONTROL"/>
+ <!-- always 0000003a: -->
+ <reg32 offset="0x0f03" name="TPL1_TP_MODE_CONTROL"/>
+ <reg32 offset="0x0f04" name="TPL1_PERFCTR_TP_SEL_0" type="a4xx_tp_perfcounter_select"/>
+ <reg32 offset="0x0f05" name="TPL1_PERFCTR_TP_SEL_1" type="a4xx_tp_perfcounter_select"/>
+ <reg32 offset="0x0f06" name="TPL1_PERFCTR_TP_SEL_2" type="a4xx_tp_perfcounter_select"/>
+ <reg32 offset="0x0f07" name="TPL1_PERFCTR_TP_SEL_3" type="a4xx_tp_perfcounter_select"/>
+ <reg32 offset="0x0f08" name="TPL1_PERFCTR_TP_SEL_4" type="a4xx_tp_perfcounter_select"/>
+ <reg32 offset="0x0f09" name="TPL1_PERFCTR_TP_SEL_5" type="a4xx_tp_perfcounter_select"/>
+ <reg32 offset="0x0f0a" name="TPL1_PERFCTR_TP_SEL_6" type="a4xx_tp_perfcounter_select"/>
+ <reg32 offset="0x0f0b" name="TPL1_PERFCTR_TP_SEL_7" type="a4xx_tp_perfcounter_select"/>
+ <reg32 offset="0x2380" name="TPL1_TP_TEX_OFFSET"/>
+ <reg32 offset="0x2381" name="TPL1_TP_TEX_COUNT">
+ <bitfield name="VS" low="0" high="7" type="uint"/>
+ <bitfield name="HS" low="8" high="15" type="uint"/>
+ <bitfield name="DS" low="16" high="23" type="uint"/>
+ <bitfield name="GS" low="24" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x2384" name="TPL1_TP_VS_BORDER_COLOR_BASE_ADDR"/>
+ <reg32 offset="0x2387" name="TPL1_TP_HS_BORDER_COLOR_BASE_ADDR"/>
+ <reg32 offset="0x238a" name="TPL1_TP_DS_BORDER_COLOR_BASE_ADDR"/>
+ <reg32 offset="0x238d" name="TPL1_TP_GS_BORDER_COLOR_BASE_ADDR"/>
+ <reg32 offset="0x23a0" name="TPL1_TP_FS_TEX_COUNT">
+ <bitfield name="FS" low="0" high="7" type="uint"/>
+ <bitfield name="CS" low="8" high="15" type="uint"/>
+ </reg32>
+ <reg32 offset="0x23a1" name="TPL1_TP_FS_BORDER_COLOR_BASE_ADDR"/>
+ <reg32 offset="0x23a4" name="TPL1_TP_CS_BORDER_COLOR_BASE_ADDR"/>
+ <reg32 offset="0x23a5" name="TPL1_TP_CS_SAMPLER_BASE_ADDR"/>
+ <reg32 offset="0x23a6" name="TPL1_TP_CS_TEXMEMOBJ_BASE_ADDR"/>
+
+ <!-- GRAS registers -->
+ <reg32 offset="0x0c80" name="GRAS_TSE_STATUS"/>
+ <reg32 offset="0x0c81" name="GRAS_DEBUG_ECO_CONTROL"/>
+ <reg32 offset="0x0c88" name="GRAS_PERFCTR_TSE_SEL_0" type="a4xx_gras_tse_perfcounter_select"/>
+ <reg32 offset="0x0c89" name="GRAS_PERFCTR_TSE_SEL_1" type="a4xx_gras_tse_perfcounter_select"/>
+ <reg32 offset="0x0c8a" name="GRAS_PERFCTR_TSE_SEL_2" type="a4xx_gras_tse_perfcounter_select"/>
+ <reg32 offset="0x0c8b" name="GRAS_PERFCTR_TSE_SEL_3" type="a4xx_gras_tse_perfcounter_select"/>
+ <reg32 offset="0x0c8c" name="GRAS_PERFCTR_RAS_SEL_0" type="a4xx_gras_ras_perfcounter_select"/>
+ <reg32 offset="0x0c8d" name="GRAS_PERFCTR_RAS_SEL_1" type="a4xx_gras_ras_perfcounter_select"/>
+ <reg32 offset="0x0c8e" name="GRAS_PERFCTR_RAS_SEL_2" type="a4xx_gras_ras_perfcounter_select"/>
+ <reg32 offset="0x0c8f" name="GRAS_PERFCTR_RAS_SEL_3" type="a4xx_gras_ras_perfcounter_select"/>
+ <reg32 offset="0x2000" name="GRAS_CL_CLIP_CNTL">
+ <bitfield name="CLIP_DISABLE" pos="15" type="boolean"/>
+ <bitfield name="ZNEAR_CLIP_DISABLE" pos="16" type="boolean"/>
+ <bitfield name="ZFAR_CLIP_DISABLE" pos="17" type="boolean"/>
+ <bitfield name="ZERO_GB_SCALE_Z" pos="22" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x2003" name="GRAS_CNTL">
+ <bitfield name="IJ_PERSP" pos="0" type="boolean"/>
+ <bitfield name="IJ_LINEAR" pos="1" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x2004" name="GRAS_CL_GB_CLIP_ADJ">
+ <bitfield name="HORZ" low="0" high="9" type="uint"/>
+ <bitfield name="VERT" low="10" high="19" type="uint"/>
+ </reg32>
+ <reg32 offset="0x2008" name="GRAS_CL_VPORT_XOFFSET_0" type="float"/>
+ <reg32 offset="0x2009" name="GRAS_CL_VPORT_XSCALE_0" type="float"/>
+ <reg32 offset="0x200a" name="GRAS_CL_VPORT_YOFFSET_0" type="float"/>
+ <reg32 offset="0x200b" name="GRAS_CL_VPORT_YSCALE_0" type="float"/>
+ <reg32 offset="0x200c" name="GRAS_CL_VPORT_ZOFFSET_0" type="float"/>
+ <reg32 offset="0x200d" name="GRAS_CL_VPORT_ZSCALE_0" type="float"/>
+ <reg32 offset="0x2070" name="GRAS_SU_POINT_MINMAX">
+ <bitfield name="MIN" low="0" high="15" type="ufixed" radix="4"/>
+ <bitfield name="MAX" low="16" high="31" type="ufixed" radix="4"/>
+ </reg32>
+ <reg32 offset="0x2071" name="GRAS_SU_POINT_SIZE" type="fixed" radix="4"/>
+ <reg32 offset="0x2073" name="GRAS_ALPHA_CONTROL">
+ <bitfield name="ALPHA_TEST_ENABLE" pos="2" type="boolean"/>
+ <bitfield name="FORCE_FRAGZ_TO_FS" pos="3" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x2074" name="GRAS_SU_POLY_OFFSET_SCALE" type="float"/>
+ <reg32 offset="0x2075" name="GRAS_SU_POLY_OFFSET_OFFSET" type="float"/>
+ <reg32 offset="0x2076" name="GRAS_SU_POLY_OFFSET_CLAMP" type="float"/>
+ <reg32 offset="0x2077" name="GRAS_DEPTH_CONTROL">
+ <!-- guestimating that this is GRAS based on addr -->
+ <bitfield name="FORMAT" low="0" high="1" type="a4xx_depth_format"/>
+ </reg32>
+ <reg32 offset="0x2078" name="GRAS_SU_MODE_CONTROL">
+ <bitfield name="CULL_FRONT" pos="0" type="boolean"/>
+ <bitfield name="CULL_BACK" pos="1" type="boolean"/>
+ <bitfield name="FRONT_CW" pos="2" type="boolean"/>
+ <bitfield name="LINEHALFWIDTH" low="3" high="10" radix="2" type="fixed"/>
+ <bitfield name="POLY_OFFSET" pos="11" type="boolean"/>
+ <bitfield name="MSAA_ENABLE" pos="13" type="boolean"/>
+ <!-- bit20 set whenever RENDER_MODE = RB_RENDERING_PASS -->
+ <bitfield name="RENDERING_PASS" pos="20" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x207b" name="GRAS_SC_CONTROL">
+ <!-- complete wild-ass-guess for sizes of these bitfields.. -->
+ <bitfield name="RENDER_MODE" low="2" high="3" type="a3xx_render_mode"/>
+ <bitfield name="MSAA_SAMPLES" low="7" high="9" type="uint"/>
+ <bitfield name="MSAA_DISABLE" pos="11" type="boolean"/>
+ <bitfield name="RASTER_MODE" low="12" high="15"/>
+ </reg32>
+ <reg32 offset="0x207c" name="GRAS_SC_SCREEN_SCISSOR_TL" type="adreno_reg_xy"/>
+ <reg32 offset="0x207d" name="GRAS_SC_SCREEN_SCISSOR_BR" type="adreno_reg_xy"/>
+ <reg32 offset="0x209c" name="GRAS_SC_WINDOW_SCISSOR_BR" type="adreno_reg_xy"/>
+ <reg32 offset="0x209d" name="GRAS_SC_WINDOW_SCISSOR_TL" type="adreno_reg_xy"/>
+ <reg32 offset="0x209e" name="GRAS_SC_EXTENT_WINDOW_BR" type="adreno_reg_xy"/>
+ <reg32 offset="0x209f" name="GRAS_SC_EXTENT_WINDOW_TL" type="adreno_reg_xy"/>
+
+ <!-- UCHE registers -->
+ <reg32 offset="0x0e80" name="UCHE_CACHE_MODE_CONTROL"/>
+ <reg32 offset="0x0e83" name="UCHE_TRAP_BASE_LO"/>
+ <reg32 offset="0x0e84" name="UCHE_TRAP_BASE_HI"/>
+ <reg32 offset="0x0e88" name="UCHE_CACHE_STATUS"/>
+ <reg32 offset="0x0e8a" name="UCHE_INVALIDATE0"/>
+ <reg32 offset="0x0e8b" name="UCHE_INVALIDATE1"/>
+ <reg32 offset="0x0e8c" name="UCHE_CACHE_WAYS_VFD"/>
+ <reg32 offset="0x0e8e" name="UCHE_PERFCTR_UCHE_SEL_0" type="a4xx_uche_perfcounter_select"/>
+ <reg32 offset="0x0e8f" name="UCHE_PERFCTR_UCHE_SEL_1" type="a4xx_uche_perfcounter_select"/>
+ <reg32 offset="0x0e90" name="UCHE_PERFCTR_UCHE_SEL_2" type="a4xx_uche_perfcounter_select"/>
+ <reg32 offset="0x0e91" name="UCHE_PERFCTR_UCHE_SEL_3" type="a4xx_uche_perfcounter_select"/>
+ <reg32 offset="0x0e92" name="UCHE_PERFCTR_UCHE_SEL_4" type="a4xx_uche_perfcounter_select"/>
+ <reg32 offset="0x0e93" name="UCHE_PERFCTR_UCHE_SEL_5" type="a4xx_uche_perfcounter_select"/>
+ <reg32 offset="0x0e94" name="UCHE_PERFCTR_UCHE_SEL_6" type="a4xx_uche_perfcounter_select"/>
+ <reg32 offset="0x0e95" name="UCHE_PERFCTR_UCHE_SEL_7" type="a4xx_uche_perfcounter_select"/>
+
+ <!-- HLSQ registers -->
+ <reg32 offset="0x0e00" name="HLSQ_TIMEOUT_THRESHOLD"/>
+ <reg32 offset="0x0e04" name="HLSQ_DEBUG_ECO_CONTROL"/>
+ <!-- always 00000000: -->
+ <reg32 offset="0x0e05" name="HLSQ_MODE_CONTROL"/>
+ <reg32 offset="0x0e0e" name="HLSQ_PERF_PIPE_MASK"/>
+ <reg32 offset="0x0e06" name="HLSQ_PERFCTR_HLSQ_SEL_0" type="a4xx_hlsq_perfcounter_select"/>
+ <reg32 offset="0x0e07" name="HLSQ_PERFCTR_HLSQ_SEL_1" type="a4xx_hlsq_perfcounter_select"/>
+ <reg32 offset="0x0e08" name="HLSQ_PERFCTR_HLSQ_SEL_2" type="a4xx_hlsq_perfcounter_select"/>
+ <reg32 offset="0x0e09" name="HLSQ_PERFCTR_HLSQ_SEL_3" type="a4xx_hlsq_perfcounter_select"/>
+ <reg32 offset="0x0e0a" name="HLSQ_PERFCTR_HLSQ_SEL_4" type="a4xx_hlsq_perfcounter_select"/>
+ <reg32 offset="0x0e0b" name="HLSQ_PERFCTR_HLSQ_SEL_5" type="a4xx_hlsq_perfcounter_select"/>
+ <reg32 offset="0x0e0c" name="HLSQ_PERFCTR_HLSQ_SEL_6" type="a4xx_hlsq_perfcounter_select"/>
+ <reg32 offset="0x0e0d" name="HLSQ_PERFCTR_HLSQ_SEL_7" type="a4xx_hlsq_perfcounter_select"/>
+ <reg32 offset="0x23c0" name="HLSQ_CONTROL_0_REG">
+ <!-- I guess same as a3xx, but so far only seen 08000050 -->
+ <bitfield name="FSTHREADSIZE" pos="4" type="a3xx_threadsize"/>
+ <bitfield name="FSSUPERTHREADENABLE" pos="6" type="boolean"/>
+ <bitfield name="SPSHADERRESTART" pos="9" type="boolean"/>
+ <bitfield name="RESERVED2" pos="10" type="boolean"/>
+ <bitfield name="CHUNKDISABLE" pos="26" type="boolean"/>
+ <bitfield name="CONSTMODE" pos="27" type="uint"/>
+ <bitfield name="LAZYUPDATEDISABLE" pos="28" type="boolean"/>
+ <bitfield name="SPCONSTFULLUPDATE" pos="29" type="boolean"/>
+ <bitfield name="TPFULLUPDATE" pos="30" type="boolean"/>
+ <bitfield name="SINGLECONTEXT" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x23c1" name="HLSQ_CONTROL_1_REG">
+ <bitfield name="VSTHREADSIZE" pos="6" type="a3xx_threadsize"/>
+ <bitfield name="VSSUPERTHREADENABLE" pos="8" type="boolean"/>
+ <bitfield name="RESERVED1" pos="9" type="boolean"/>
+ <bitfield name="COORDREGID" low="16" high="23" type="a3xx_regid"/>
+ <!-- set if gl_FragCoord.[zw] used in frag shader: -->
+ <bitfield name="ZWCOORDREGID" low="24" high="31" type="a3xx_regid"/>
+ </reg32>
+ <reg32 offset="0x23c2" name="HLSQ_CONTROL_2_REG">
+ <bitfield name="PRIMALLOCTHRESHOLD" low="26" high="31" type="uint"/>
+ <bitfield name="FACEREGID" low="2" high="9" type="a3xx_regid"/>
+ <bitfield name="SAMPLEID_REGID" low="10" high="17" type="a3xx_regid"/>
+ <bitfield name="SAMPLEMASK_REGID" low="18" high="25" type="a3xx_regid"/>
+ </reg32>
+ <reg32 offset="0x23c3" name="HLSQ_CONTROL_3_REG">
+ <!-- register loaded with position (bary.f) -->
+ <bitfield name="IJ_PERSP_PIXEL" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="IJ_LINEAR_PIXEL" low="8" high="15" type="a3xx_regid"/>
+ <bitfield name="IJ_PERSP_CENTROID" low="16" high="23" type="a3xx_regid"/>
+ <bitfield name="IJ_LINEAR_CENTROID" low="24" high="31" type="a3xx_regid"/>
+ </reg32>
+ <!-- 0x23c4 3 regids, lowest one goes to 0 when *not* per-sample shading -->
+ <reg32 offset="0x23c4" name="HLSQ_CONTROL_4_REG">
+ <bitfield name="IJ_PERSP_SAMPLE" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="IJ_LINEAR_SAMPLE" low="8" high="15" type="a3xx_regid"/>
+ </reg32>
+
+ <bitset name="a4xx_xs_control_reg" inline="yes">
+ <bitfield name="CONSTLENGTH" low="0" high="7" type="uint"/>
+ <bitfield name="CONSTOBJECTOFFSET" low="8" high="14" type="uint"/>
+ <bitfield name="SSBO_ENABLE" pos="15" type="boolean"/>
+ <bitfield name="ENABLED" pos="16" type="boolean"/>
+ <bitfield name="SHADEROBJOFFSET" low="17" high="23" type="uint"/>
+ <bitfield name="INSTRLENGTH" low="24" high="31" type="uint"/>
+ </bitset>
+ <reg32 offset="0x23c5" name="HLSQ_VS_CONTROL_REG" type="a4xx_xs_control_reg"/>
+ <reg32 offset="0x23c6" name="HLSQ_FS_CONTROL_REG" type="a4xx_xs_control_reg"/>
+ <reg32 offset="0x23c7" name="HLSQ_HS_CONTROL_REG" type="a4xx_xs_control_reg"/>
+ <reg32 offset="0x23c8" name="HLSQ_DS_CONTROL_REG" type="a4xx_xs_control_reg"/>
+ <reg32 offset="0x23c9" name="HLSQ_GS_CONTROL_REG" type="a4xx_xs_control_reg"/>
+ <reg32 offset="0x23ca" name="HLSQ_CS_CONTROL_REG" type="a4xx_xs_control_reg"/>
+ <reg32 offset="0x23cd" name="HLSQ_CL_NDRANGE_0">
+ <bitfield name="KERNELDIM" low="0" high="1" type="uint"/>
+ <!-- localsize is value minus one: -->
+ <bitfield name="LOCALSIZEX" low="2" high="11" type="uint"/>
+ <bitfield name="LOCALSIZEY" low="12" high="21" type="uint"/>
+ <bitfield name="LOCALSIZEZ" low="22" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x23ce" name="HLSQ_CL_NDRANGE_1">
+ <bitfield name="SIZE_X" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x23cf" name="HLSQ_CL_NDRANGE_2"/>
+ <reg32 offset="0x23d0" name="HLSQ_CL_NDRANGE_3">
+ <bitfield name="SIZE_Y" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x23d1" name="HLSQ_CL_NDRANGE_4"/>
+ <reg32 offset="0x23d2" name="HLSQ_CL_NDRANGE_5">
+ <bitfield name="SIZE_Z" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x23d3" name="HLSQ_CL_NDRANGE_6"/>
+ <reg32 offset="0x23d4" name="HLSQ_CL_CONTROL_0">
+ <bitfield name="WGIDCONSTID" low="0" high="11" type="a3xx_regid"/>
+ <bitfield name="KERNELDIMCONSTID" low="12" high="23" type="a3xx_regid"/>
+ <bitfield name="LOCALIDREGID" low="24" high="31" type="a3xx_regid"/>
+ </reg32>
+ <reg32 offset="0x23d5" name="HLSQ_CL_CONTROL_1">
+ <!-- GLOBALSIZECONSTID? "kernel size" -->
+ <bitfield name="UNK0CONSTID" low="0" high="11" type="a3xx_regid"/>
+ <bitfield name="WORKGROUPSIZECONSTID" low="12" high="23" type="a3xx_regid"/>
+ </reg32>
+ <reg32 offset="0x23d6" name="HLSQ_CL_KERNEL_CONST">
+ <!-- GLOBALOFFSETCONSTID -->
+ <bitfield name="UNK0CONSTID" low="0" high="11" type="a3xx_regid"/>
+ <bitfield name="NUMWGCONSTID" low="12" high="23" type="a3xx_regid"/>
+ </reg32>
+ <reg32 offset="0x23d7" name="HLSQ_CL_KERNEL_GROUP_X"/>
+ <reg32 offset="0x23d8" name="HLSQ_CL_KERNEL_GROUP_Y"/>
+ <reg32 offset="0x23d9" name="HLSQ_CL_KERNEL_GROUP_Z"/>
+ <reg32 offset="0x23da" name="HLSQ_CL_WG_OFFSET">
+ <!-- WGOFFSETCONSTID -->
+ <bitfield name="UNK0CONSTID" low="0" high="11" type="a3xx_regid"/>
+ </reg32>
+ <reg32 offset="0x23db" name="HLSQ_UPDATE_CONTROL"/>
+
+ <!-- PC registers -->
+ <reg32 offset="0x0d00" name="PC_BINNING_COMMAND">
+ <bitfield name="BINNING_ENABLE" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0d08" name="PC_TESSFACTOR_ADDR"/>
+ <reg32 offset="0x0d0c" name="PC_DRAWCALL_SETUP_OVERRIDE"/>
+ <reg32 offset="0x0d10" name="PC_PERFCTR_PC_SEL_0" type="a4xx_pc_perfcounter_select"/>
+ <reg32 offset="0x0d11" name="PC_PERFCTR_PC_SEL_1" type="a4xx_pc_perfcounter_select"/>
+ <reg32 offset="0x0d12" name="PC_PERFCTR_PC_SEL_2" type="a4xx_pc_perfcounter_select"/>
+ <reg32 offset="0x0d13" name="PC_PERFCTR_PC_SEL_3" type="a4xx_pc_perfcounter_select"/>
+ <reg32 offset="0x0d14" name="PC_PERFCTR_PC_SEL_4" type="a4xx_pc_perfcounter_select"/>
+ <reg32 offset="0x0d15" name="PC_PERFCTR_PC_SEL_5" type="a4xx_pc_perfcounter_select"/>
+ <reg32 offset="0x0d16" name="PC_PERFCTR_PC_SEL_6" type="a4xx_pc_perfcounter_select"/>
+ <reg32 offset="0x0d17" name="PC_PERFCTR_PC_SEL_7" type="a4xx_pc_perfcounter_select"/>
+ <reg32 offset="0x21c0" name="PC_BIN_BASE"/>
+ <reg32 offset="0x21c2" name="PC_VSTREAM_CONTROL">
+ <doc>SIZE is current pipe width * height (in tiles)</doc>
+ <bitfield name="SIZE" low="16" high="21" type="uint"/>
+ <doc>
+ N is some sort of slot # between 0..(SIZE-1). In case
+ multiple tiles use same pipe, each tile gets unique slot #
+ </doc>
+ <bitfield name="N" low="22" high="26" type="uint"/>
+ </reg32>
+ <reg32 offset="0x21c4" name="PC_PRIM_VTX_CNTL">
+ <!-- bit0 set if there is >= 1 varying (actually used by FS) -->
+ <bitfield name="VAROUT" low="0" high="3" type="uint">
+ <doc>in groups of 4x vec4, blob only uses values
+ 0, 1, 2, 4, 6, 8</doc>
+ </bitfield>
+ <bitfield name="PRIMITIVE_RESTART" pos="20" type="boolean"/>
+ <bitfield name="PROVOKING_VTX_LAST" pos="25" type="boolean"/>
+ <!-- PSIZE bit set if gl_PointSize written: -->
+ <bitfield name="PSIZE" pos="26" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x21c5" name="PC_PRIM_VTX_CNTL2">
+ <bitfield name="POLYMODE_FRONT_PTYPE" low="0" high="2" type="adreno_pa_su_sc_draw"/>
+ <bitfield name="POLYMODE_BACK_PTYPE" low="3" high="5" type="adreno_pa_su_sc_draw"/>
+ <bitfield name="POLYMODE_ENABLE" pos="6" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x21c6" name="PC_RESTART_INDEX"/>
+ <reg32 offset="0x21e5" name="PC_GS_PARAM">
+ <bitfield name="MAX_VERTICES" low="0" high="9" type="uint"/><!-- +1, i.e. max is 1024 -->
+ <bitfield name="INVOCATIONS" low="11" high="15" type="uint"/><!-- +1, i.e. max is 32 -->
+ <bitfield name="PRIMTYPE" low="23" high="24" type="adreno_pa_su_sc_draw"/>
+ <bitfield name="LAYER" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x21e7" name="PC_HS_PARAM">
+ <bitfield name="VERTICES_OUT" low="0" high="5" type="uint"/>
+ <bitfield name="SPACING" low="21" high="22" type="a4xx_tess_spacing"/>
+ <bitfield name="CW" pos="23" type="boolean"/>
+ <bitfield name="CONNECTED" pos="24" type="boolean"/>
+ </reg32>
+
+ <!-- VBIF registers -->
+ <reg32 offset="0x3000" name="VBIF_VERSION"/>
+ <reg32 offset="0x3001" name="VBIF_CLKON">
+ <bitfield name="FORCE_ON_TESTBUS" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x301c" name="VBIF_ABIT_SORT"/>
+ <reg32 offset="0x301d" name="VBIF_ABIT_SORT_CONF"/>
+ <reg32 offset="0x302a" name="VBIF_GATE_OFF_WRREQ_EN"/>
+ <reg32 offset="0x302c" name="VBIF_IN_RD_LIM_CONF0"/>
+ <reg32 offset="0x302d" name="VBIF_IN_RD_LIM_CONF1"/>
+ <reg32 offset="0x3030" name="VBIF_IN_WR_LIM_CONF0"/>
+ <reg32 offset="0x3031" name="VBIF_IN_WR_LIM_CONF1"/>
+ <reg32 offset="0x3049" name="VBIF_ROUND_ROBIN_QOS_ARB"/>
+ <reg32 offset="0x30c0" name="VBIF_PERF_CNT_EN0"/>
+ <reg32 offset="0x30c1" name="VBIF_PERF_CNT_EN1"/>
+ <reg32 offset="0x30c2" name="VBIF_PERF_CNT_EN2"/>
+ <reg32 offset="0x30c3" name="VBIF_PERF_CNT_EN3"/>
+ <reg32 offset="0x30d0" name="VBIF_PERF_CNT_SEL0" type="a4xx_vbif_perfcounter_select"/>
+ <reg32 offset="0x30d1" name="VBIF_PERF_CNT_SEL1" type="a4xx_vbif_perfcounter_select"/>
+ <reg32 offset="0x30d2" name="VBIF_PERF_CNT_SEL2" type="a4xx_vbif_perfcounter_select"/>
+ <reg32 offset="0x30d3" name="VBIF_PERF_CNT_SEL3" type="a4xx_vbif_perfcounter_select"/>
+ <reg32 offset="0x30d8" name="VBIF_PERF_CNT_LOW0"/>
+ <reg32 offset="0x30d9" name="VBIF_PERF_CNT_LOW1"/>
+ <reg32 offset="0x30da" name="VBIF_PERF_CNT_LOW2"/>
+ <reg32 offset="0x30db" name="VBIF_PERF_CNT_LOW3"/>
+ <reg32 offset="0x30e0" name="VBIF_PERF_CNT_HIGH0"/>
+ <reg32 offset="0x30e1" name="VBIF_PERF_CNT_HIGH1"/>
+ <reg32 offset="0x30e2" name="VBIF_PERF_CNT_HIGH2"/>
+ <reg32 offset="0x30e3" name="VBIF_PERF_CNT_HIGH3"/>
+ <reg32 offset="0x3100" name="VBIF_PERF_PWR_CNT_EN0"/>
+ <reg32 offset="0x3101" name="VBIF_PERF_PWR_CNT_EN1"/>
+ <reg32 offset="0x3102" name="VBIF_PERF_PWR_CNT_EN2"/>
+
+ <!--
+ Unknown registers:
+ (mostly related to DX11 features not used yet, I guess?)
+ -->
+
+ <!-- always 00000006: -->
+ <reg32 offset="0x0cc5" name="UNKNOWN_0CC5"/>
+
+ <!-- always 00000000: -->
+ <reg32 offset="0x0cc6" name="UNKNOWN_0CC6"/>
+
+ <!-- always 00000001: -->
+ <reg32 offset="0x0d01" name="UNKNOWN_0D01"/>
+
+ <!-- always 00000000: -->
+ <reg32 offset="0x0e42" name="UNKNOWN_0E42"/>
+
+ <!-- always 00040000: -->
+ <reg32 offset="0x0ec2" name="UNKNOWN_0EC2"/>
+
+ <!-- always 00000000: -->
+ <reg32 offset="0x2001" name="UNKNOWN_2001"/>
+
+ <!-- always 00000000: -->
+ <reg32 offset="0x209b" name="UNKNOWN_209B"/>
+
+ <!-- always 00000000: -->
+ <reg32 offset="0x20ef" name="UNKNOWN_20EF"/>
+
+ <!-- always 00000000: -->
+ <reg32 offset="0x2152" name="UNKNOWN_2152"/>
+
+ <!-- always 00000000: -->
+ <reg32 offset="0x2153" name="UNKNOWN_2153"/>
+
+ <!-- always 00000000: -->
+ <reg32 offset="0x2154" name="UNKNOWN_2154"/>
+
+ <!-- always 00000000: -->
+ <reg32 offset="0x2155" name="UNKNOWN_2155"/>
+
+ <!-- always 00000000: -->
+ <reg32 offset="0x2156" name="UNKNOWN_2156"/>
+
+ <!-- always 00000000: -->
+ <reg32 offset="0x2157" name="UNKNOWN_2157"/>
+
+ <!-- always 0000000b: -->
+ <reg32 offset="0x21c3" name="UNKNOWN_21C3"/>
+
+ <!-- always 00000001: -->
+ <reg32 offset="0x21e6" name="UNKNOWN_21E6"/>
+
+ <!-- always 00000000: -->
+ <reg32 offset="0x2209" name="UNKNOWN_2209"/>
+
+ <!-- always 00000000: -->
+ <reg32 offset="0x22d7" name="UNKNOWN_22D7"/>
+
+ <!-- always 00fcfc00: -->
+ <reg32 offset="0x2352" name="UNKNOWN_2352"/>
+
+</domain>
+
+
+<domain name="A4XX_TEX_SAMP" width="32">
+ <doc>Texture sampler dwords</doc>
+ <enum name="a4xx_tex_filter">
+ <value name="A4XX_TEX_NEAREST" value="0"/>
+ <value name="A4XX_TEX_LINEAR" value="1"/>
+ <value name="A4XX_TEX_ANISO" value="2"/>
+ </enum>
+ <enum name="a4xx_tex_clamp">
+ <value name="A4XX_TEX_REPEAT" value="0"/>
+ <value name="A4XX_TEX_CLAMP_TO_EDGE" value="1"/>
+ <value name="A4XX_TEX_MIRROR_REPEAT" value="2"/>
+ <value name="A4XX_TEX_CLAMP_TO_BORDER" value="3"/>
+ <value name="A4XX_TEX_MIRROR_CLAMP" value="4"/>
+ </enum>
+ <enum name="a4xx_tex_aniso">
+ <value name="A4XX_TEX_ANISO_1" value="0"/>
+ <value name="A4XX_TEX_ANISO_2" value="1"/>
+ <value name="A4XX_TEX_ANISO_4" value="2"/>
+ <value name="A4XX_TEX_ANISO_8" value="3"/>
+ <value name="A4XX_TEX_ANISO_16" value="4"/>
+ </enum>
+ <reg32 offset="0" name="0">
+ <bitfield name="MIPFILTER_LINEAR_NEAR" pos="0" type="boolean"/>
+ <bitfield name="XY_MAG" low="1" high="2" type="a4xx_tex_filter"/>
+ <bitfield name="XY_MIN" low="3" high="4" type="a4xx_tex_filter"/>
+ <bitfield name="WRAP_S" low="5" high="7" type="a4xx_tex_clamp"/>
+ <bitfield name="WRAP_T" low="8" high="10" type="a4xx_tex_clamp"/>
+ <bitfield name="WRAP_R" low="11" high="13" type="a4xx_tex_clamp"/>
+ <bitfield name="ANISO" low="14" high="16" type="a4xx_tex_aniso"/>
+ <bitfield name="LOD_BIAS" low="19" high="31" type="fixed" radix="8"/><!-- no idea how many bits for real -->
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="COMPARE_FUNC" low="1" high="3" type="adreno_compare_func"/>
+ <bitfield name="CUBEMAPSEAMLESSFILTOFF" pos="4" type="boolean"/>
+ <bitfield name="UNNORM_COORDS" pos="5" type="boolean"/>
+ <bitfield name="MIPFILTER_LINEAR_FAR" pos="6" type="boolean"/>
+ <bitfield name="MAX_LOD" low="8" high="19" type="ufixed" radix="8"/>
+ <bitfield name="MIN_LOD" low="20" high="31" type="ufixed" radix="8"/>
+ </reg32>
+</domain>
+
+<domain name="A4XX_TEX_CONST" width="32">
+ <doc>Texture constant dwords</doc>
+ <enum name="a4xx_tex_swiz">
+ <!-- same as a2xx? -->
+ <value name="A4XX_TEX_X" value="0"/>
+ <value name="A4XX_TEX_Y" value="1"/>
+ <value name="A4XX_TEX_Z" value="2"/>
+ <value name="A4XX_TEX_W" value="3"/>
+ <value name="A4XX_TEX_ZERO" value="4"/>
+ <value name="A4XX_TEX_ONE" value="5"/>
+ </enum>
+ <enum name="a4xx_tex_type">
+ <value name="A4XX_TEX_1D" value="0"/>
+ <value name="A4XX_TEX_2D" value="1"/>
+ <value name="A4XX_TEX_CUBE" value="2"/>
+ <value name="A4XX_TEX_3D" value="3"/>
+ <value name="A4XX_TEX_BUFFER" value="4"/>
+ </enum>
+ <reg32 offset="0" name="0">
+ <bitfield name="TILED" pos="0" type="boolean"/>
+ <bitfield name="SRGB" pos="2" type="boolean"/>
+ <bitfield name="SWIZ_X" low="4" high="6" type="a4xx_tex_swiz"/>
+ <bitfield name="SWIZ_Y" low="7" high="9" type="a4xx_tex_swiz"/>
+ <bitfield name="SWIZ_Z" low="10" high="12" type="a4xx_tex_swiz"/>
+ <bitfield name="SWIZ_W" low="13" high="15" type="a4xx_tex_swiz"/>
+ <bitfield name="MIPLVLS" low="16" high="19" type="uint"/>
+ <bitfield name="FMT" low="22" high="28" type="a4xx_tex_fmt"/>
+ <bitfield name="TYPE" low="29" high="31" type="a4xx_tex_type"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="HEIGHT" low="0" high="14" type="uint"/>
+ <bitfield name="WIDTH" low="15" high="29" type="uint"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <!-- minimum pitch (for mipmap levels): log2(pitchalign / 32) -->
+ <bitfield name="PITCHALIGN" low="0" high="3" type="uint"/>
+ <bitfield name="BUFFER" pos="6" type="boolean"/>
+ <doc>Pitch in bytes (so actually stride)</doc>
+ <bitfield name="PITCH" low="9" high="29" type="uint"/>
+ <bitfield name="SWAP" low="30" high="31" type="a3xx_color_swap"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <bitfield name="LAYERSZ" low="0" high="13" shr="12" type="uint"/>
+ <bitfield name="DEPTH" low="18" high="30" type="uint"/>
+ </reg32>
+ <reg32 offset="4" name="4">
+ <!--
+ like a3xx we seem to have two LAYERSZ's.. although this one
+ seems too small to be useful, and when it overflows blob just
+ sets it to zero..
+ -->
+ <bitfield name="LAYERSZ" low="0" high="3" shr="12" type="uint"/>
+ <bitfield name="BASE" low="5" high="31" shr="5"/>
+ </reg32>
+ <reg32 offset="5" name="5"/>
+ <reg32 offset="6" name="6"/>
+ <reg32 offset="7" name="7"/>
+</domain>
+
+<domain name="A4XX_SSBO_0" width="32">
+ <reg32 offset="0" name="0">
+ <bitfield name="BASE" low="5" high="31" shr="5"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <doc>Pitch in bytes (so actually stride)</doc>
+ <bitfield name="PITCH" low="0" high="21" type="uint"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="ARRAY_PITCH" low="12" high="25" shr="12" type="uint"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <!-- bytes per pixel: -->
+ <bitfield name="CPP" low="0" high="5" type="uint"/>
+ </reg32>
+</domain>
+
+<domain name="A4XX_SSBO_1" width="32">
+ <reg32 offset="0" name="0">
+ <bitfield name="CPP" low="0" high="4" type="uint"/>
+ <bitfield name="FMT" low="8" high="15" type="a4xx_color_fmt"/>
+ <bitfield name="WIDTH" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="HEIGHT" low="0" high="15" type="uint"/>
+ <bitfield name="DEPTH" low="16" high="31" type="uint"/>
+ </reg32>
+</domain>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a5xx.xml b/drivers/gpu/drm/msm/registers/adreno/a5xx.xml
new file mode 100644
index 000000000000..bd8df5945166
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/adreno/a5xx.xml
@@ -0,0 +1,3039 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="adreno/adreno_common.xml"/>
+<import file="adreno/adreno_pm4.xml"/>
+
+<enum name="a5xx_color_fmt">
+ <value value="0x02" name="RB5_A8_UNORM"/>
+ <value value="0x03" name="RB5_R8_UNORM"/>
+ <value value="0x04" name="RB5_R8_SNORM"/>
+ <value value="0x05" name="RB5_R8_UINT"/>
+ <value value="0x06" name="RB5_R8_SINT"/>
+ <value value="0x08" name="RB5_R4G4B4A4_UNORM"/>
+ <value value="0x0a" name="RB5_R5G5B5A1_UNORM"/>
+ <value value="0x0e" name="RB5_R5G6B5_UNORM"/>
+ <value value="0x0f" name="RB5_R8G8_UNORM"/>
+ <value value="0x10" name="RB5_R8G8_SNORM"/>
+ <value value="0x11" name="RB5_R8G8_UINT"/>
+ <value value="0x12" name="RB5_R8G8_SINT"/>
+ <value value="0x15" name="RB5_R16_UNORM"/>
+ <value value="0x16" name="RB5_R16_SNORM"/>
+ <value value="0x17" name="RB5_R16_FLOAT"/>
+ <value value="0x18" name="RB5_R16_UINT"/>
+ <value value="0x19" name="RB5_R16_SINT"/>
+ <value value="0x30" name="RB5_R8G8B8A8_UNORM"/>
+ <value value="0x31" name="RB5_R8G8B8_UNORM"/>
+ <value value="0x32" name="RB5_R8G8B8A8_SNORM"/>
+ <value value="0x33" name="RB5_R8G8B8A8_UINT"/>
+ <value value="0x34" name="RB5_R8G8B8A8_SINT"/>
+ <value value="0x37" name="RB5_R10G10B10A2_UNORM"/> <!-- GL_RGB10_A2 -->
+ <value value="0x3a" name="RB5_R10G10B10A2_UINT"/> <!-- GL_RGB10_A2UI -->
+ <value value="0x42" name="RB5_R11G11B10_FLOAT"/> <!-- GL_R11F_G11F_B10F -->
+ <value value="0x43" name="RB5_R16G16_UNORM"/>
+ <value value="0x44" name="RB5_R16G16_SNORM"/>
+ <value value="0x45" name="RB5_R16G16_FLOAT"/>
+ <value value="0x46" name="RB5_R16G16_UINT"/>
+ <value value="0x47" name="RB5_R16G16_SINT"/>
+ <value value="0x4a" name="RB5_R32_FLOAT"/>
+ <value value="0x4b" name="RB5_R32_UINT"/>
+ <value value="0x4c" name="RB5_R32_SINT"/>
+ <value value="0x60" name="RB5_R16G16B16A16_UNORM"/>
+ <value value="0x61" name="RB5_R16G16B16A16_SNORM"/>
+ <value value="0x62" name="RB5_R16G16B16A16_FLOAT"/>
+ <value value="0x63" name="RB5_R16G16B16A16_UINT"/>
+ <value value="0x64" name="RB5_R16G16B16A16_SINT"/>
+ <value value="0x67" name="RB5_R32G32_FLOAT"/>
+ <value value="0x68" name="RB5_R32G32_UINT"/>
+ <value value="0x69" name="RB5_R32G32_SINT"/>
+ <value value="0x82" name="RB5_R32G32B32A32_FLOAT"/>
+ <value value="0x83" name="RB5_R32G32B32A32_UINT"/>
+ <value value="0x84" name="RB5_R32G32B32A32_SINT"/>
+
+ <value value="0xff" name="RB5_NONE"/>
+</enum>
+
+<enum name="a5xx_tile_mode">
+ <value name="TILE5_LINEAR" value="0"/>
+ <value name="TILE5_2" value="2"/>
+ <value name="TILE5_3" value="3"/>
+</enum>
+
+<enum name="a5xx_vtx_fmt" prefix="chipset">
+ <value value="0x03" name="VFMT5_8_UNORM"/>
+ <value value="0x04" name="VFMT5_8_SNORM"/>
+ <value value="0x05" name="VFMT5_8_UINT"/>
+ <value value="0x06" name="VFMT5_8_SINT"/>
+
+ <value value="0x0f" name="VFMT5_8_8_UNORM"/>
+ <value value="0x10" name="VFMT5_8_8_SNORM"/>
+ <value value="0x11" name="VFMT5_8_8_UINT"/>
+ <value value="0x12" name="VFMT5_8_8_SINT"/>
+
+ <value value="0x15" name="VFMT5_16_UNORM"/>
+ <value value="0x16" name="VFMT5_16_SNORM"/>
+ <value value="0x17" name="VFMT5_16_FLOAT"/>
+ <value value="0x18" name="VFMT5_16_UINT"/>
+ <value value="0x19" name="VFMT5_16_SINT"/>
+
+ <value value="0x21" name="VFMT5_8_8_8_UNORM"/>
+ <value value="0x22" name="VFMT5_8_8_8_SNORM"/>
+ <value value="0x23" name="VFMT5_8_8_8_UINT"/>
+ <value value="0x24" name="VFMT5_8_8_8_SINT"/>
+
+ <value value="0x30" name="VFMT5_8_8_8_8_UNORM"/>
+ <value value="0x32" name="VFMT5_8_8_8_8_SNORM"/>
+ <value value="0x33" name="VFMT5_8_8_8_8_UINT"/>
+ <value value="0x34" name="VFMT5_8_8_8_8_SINT"/>
+
+ <value value="0x36" name="VFMT5_10_10_10_2_UNORM"/>
+ <value value="0x39" name="VFMT5_10_10_10_2_SNORM"/>
+ <value value="0x3a" name="VFMT5_10_10_10_2_UINT"/>
+ <value value="0x3b" name="VFMT5_10_10_10_2_SINT"/>
+
+ <value value="0x42" name="VFMT5_11_11_10_FLOAT"/>
+
+ <value value="0x43" name="VFMT5_16_16_UNORM"/>
+ <value value="0x44" name="VFMT5_16_16_SNORM"/>
+ <value value="0x45" name="VFMT5_16_16_FLOAT"/>
+ <value value="0x46" name="VFMT5_16_16_UINT"/>
+ <value value="0x47" name="VFMT5_16_16_SINT"/>
+
+ <value value="0x48" name="VFMT5_32_UNORM"/>
+ <value value="0x49" name="VFMT5_32_SNORM"/>
+ <value value="0x4a" name="VFMT5_32_FLOAT"/>
+ <value value="0x4b" name="VFMT5_32_UINT"/>
+ <value value="0x4c" name="VFMT5_32_SINT"/>
+ <value value="0x4d" name="VFMT5_32_FIXED"/>
+
+ <value value="0x58" name="VFMT5_16_16_16_UNORM"/>
+ <value value="0x59" name="VFMT5_16_16_16_SNORM"/>
+ <value value="0x5a" name="VFMT5_16_16_16_FLOAT"/>
+ <value value="0x5b" name="VFMT5_16_16_16_UINT"/>
+ <value value="0x5c" name="VFMT5_16_16_16_SINT"/>
+
+ <value value="0x60" name="VFMT5_16_16_16_16_UNORM"/>
+ <value value="0x61" name="VFMT5_16_16_16_16_SNORM"/>
+ <value value="0x62" name="VFMT5_16_16_16_16_FLOAT"/>
+ <value value="0x63" name="VFMT5_16_16_16_16_UINT"/>
+ <value value="0x64" name="VFMT5_16_16_16_16_SINT"/>
+
+ <value value="0x65" name="VFMT5_32_32_UNORM"/>
+ <value value="0x66" name="VFMT5_32_32_SNORM"/>
+ <value value="0x67" name="VFMT5_32_32_FLOAT"/>
+ <value value="0x68" name="VFMT5_32_32_UINT"/>
+ <value value="0x69" name="VFMT5_32_32_SINT"/>
+ <value value="0x6a" name="VFMT5_32_32_FIXED"/>
+
+ <value value="0x70" name="VFMT5_32_32_32_UNORM"/>
+ <value value="0x71" name="VFMT5_32_32_32_SNORM"/>
+ <value value="0x72" name="VFMT5_32_32_32_UINT"/>
+ <value value="0x73" name="VFMT5_32_32_32_SINT"/>
+ <value value="0x74" name="VFMT5_32_32_32_FLOAT"/>
+ <value value="0x75" name="VFMT5_32_32_32_FIXED"/>
+
+ <value value="0x80" name="VFMT5_32_32_32_32_UNORM"/>
+ <value value="0x81" name="VFMT5_32_32_32_32_SNORM"/>
+ <value value="0x82" name="VFMT5_32_32_32_32_FLOAT"/>
+ <value value="0x83" name="VFMT5_32_32_32_32_UINT"/>
+ <value value="0x84" name="VFMT5_32_32_32_32_SINT"/>
+ <value value="0x85" name="VFMT5_32_32_32_32_FIXED"/>
+
+ <value value="0xff" name="VFMT5_NONE"/>
+</enum>
+
+<enum name="a5xx_tex_fmt">
+ <value value="0x02" name="TFMT5_A8_UNORM"/>
+ <value value="0x03" name="TFMT5_8_UNORM"/>
+ <value value="0x04" name="TFMT5_8_SNORM"/>
+ <value value="0x05" name="TFMT5_8_UINT"/>
+ <value value="0x06" name="TFMT5_8_SINT"/>
+ <value value="0x08" name="TFMT5_4_4_4_4_UNORM"/>
+ <value value="0x0a" name="TFMT5_5_5_5_1_UNORM"/>
+ <value value="0x0e" name="TFMT5_5_6_5_UNORM"/>
+ <value value="0x0f" name="TFMT5_8_8_UNORM"/>
+ <value value="0x10" name="TFMT5_8_8_SNORM"/>
+ <value value="0x11" name="TFMT5_8_8_UINT"/>
+ <value value="0x12" name="TFMT5_8_8_SINT"/>
+ <value value="0x13" name="TFMT5_L8_A8_UNORM"/>
+ <value value="0x15" name="TFMT5_16_UNORM"/>
+ <value value="0x16" name="TFMT5_16_SNORM"/>
+ <value value="0x17" name="TFMT5_16_FLOAT"/>
+ <value value="0x18" name="TFMT5_16_UINT"/>
+ <value value="0x19" name="TFMT5_16_SINT"/>
+ <value value="0x30" name="TFMT5_8_8_8_8_UNORM"/>
+ <value value="0x31" name="TFMT5_8_8_8_UNORM"/>
+ <value value="0x32" name="TFMT5_8_8_8_8_SNORM"/>
+ <value value="0x33" name="TFMT5_8_8_8_8_UINT"/>
+ <value value="0x34" name="TFMT5_8_8_8_8_SINT"/>
+ <value value="0x35" name="TFMT5_9_9_9_E5_FLOAT"/>
+ <value value="0x36" name="TFMT5_10_10_10_2_UNORM"/>
+ <value value="0x3a" name="TFMT5_10_10_10_2_UINT"/>
+ <value value="0x42" name="TFMT5_11_11_10_FLOAT"/>
+ <value value="0x43" name="TFMT5_16_16_UNORM"/>
+ <value value="0x44" name="TFMT5_16_16_SNORM"/>
+ <value value="0x45" name="TFMT5_16_16_FLOAT"/>
+ <value value="0x46" name="TFMT5_16_16_UINT"/>
+ <value value="0x47" name="TFMT5_16_16_SINT"/>
+ <value value="0x4a" name="TFMT5_32_FLOAT"/>
+ <value value="0x4b" name="TFMT5_32_UINT"/>
+ <value value="0x4c" name="TFMT5_32_SINT"/>
+ <value value="0x60" name="TFMT5_16_16_16_16_UNORM"/>
+ <value value="0x61" name="TFMT5_16_16_16_16_SNORM"/>
+ <value value="0x62" name="TFMT5_16_16_16_16_FLOAT"/>
+ <value value="0x63" name="TFMT5_16_16_16_16_UINT"/>
+ <value value="0x64" name="TFMT5_16_16_16_16_SINT"/>
+ <value value="0x67" name="TFMT5_32_32_FLOAT"/>
+ <value value="0x68" name="TFMT5_32_32_UINT"/>
+ <value value="0x69" name="TFMT5_32_32_SINT"/>
+ <value value="0x72" name="TFMT5_32_32_32_UINT"/>
+ <value value="0x73" name="TFMT5_32_32_32_SINT"/>
+ <value value="0x74" name="TFMT5_32_32_32_FLOAT"/>
+ <value value="0x82" name="TFMT5_32_32_32_32_FLOAT"/>
+ <value value="0x83" name="TFMT5_32_32_32_32_UINT"/>
+ <value value="0x84" name="TFMT5_32_32_32_32_SINT"/>
+ <value value="0xa0" name="TFMT5_X8Z24_UNORM"/>
+
+ <value value="0xab" name="TFMT5_ETC2_RG11_UNORM"/>
+ <value value="0xac" name="TFMT5_ETC2_RG11_SNORM"/>
+ <value value="0xad" name="TFMT5_ETC2_R11_UNORM"/>
+ <value value="0xae" name="TFMT5_ETC2_R11_SNORM"/>
+ <value value="0xaf" name="TFMT5_ETC1"/>
+ <value value="0xb0" name="TFMT5_ETC2_RGB8"/>
+ <value value="0xb1" name="TFMT5_ETC2_RGBA8"/>
+ <value value="0xb2" name="TFMT5_ETC2_RGB8A1"/>
+ <value value="0xb3" name="TFMT5_DXT1"/>
+ <value value="0xb4" name="TFMT5_DXT3"/>
+ <value value="0xb5" name="TFMT5_DXT5"/>
+ <value value="0xb7" name="TFMT5_RGTC1_UNORM"/>
+ <value value="0xb8" name="TFMT5_RGTC1_SNORM"/>
+ <value value="0xbb" name="TFMT5_RGTC2_UNORM"/>
+ <value value="0xbc" name="TFMT5_RGTC2_SNORM"/>
+ <value value="0xbe" name="TFMT5_BPTC_UFLOAT"/>
+ <value value="0xbf" name="TFMT5_BPTC_FLOAT"/>
+ <value value="0xc0" name="TFMT5_BPTC"/>
+ <value value="0xc1" name="TFMT5_ASTC_4x4"/>
+ <value value="0xc2" name="TFMT5_ASTC_5x4"/>
+ <value value="0xc3" name="TFMT5_ASTC_5x5"/>
+ <value value="0xc4" name="TFMT5_ASTC_6x5"/>
+ <value value="0xc5" name="TFMT5_ASTC_6x6"/>
+ <value value="0xc6" name="TFMT5_ASTC_8x5"/>
+ <value value="0xc7" name="TFMT5_ASTC_8x6"/>
+ <value value="0xc8" name="TFMT5_ASTC_8x8"/>
+ <value value="0xc9" name="TFMT5_ASTC_10x5"/>
+ <value value="0xca" name="TFMT5_ASTC_10x6"/>
+ <value value="0xcb" name="TFMT5_ASTC_10x8"/>
+ <value value="0xcc" name="TFMT5_ASTC_10x10"/>
+ <value value="0xcd" name="TFMT5_ASTC_12x10"/>
+ <value value="0xce" name="TFMT5_ASTC_12x12"/>
+
+ <value value="0xff" name="TFMT5_NONE"/>
+</enum>
+
+<enum name="a5xx_depth_format">
+ <value name="DEPTH5_NONE" value="0"/>
+ <value name="DEPTH5_16" value="1"/>
+ <value name="DEPTH5_24_8" value="2"/>
+ <value name="DEPTH5_32" value="4"/>
+</enum>
+
+<enum name="a5xx_blit_buf">
+ <value value="0" name="BLIT_MRT0"/>
+ <value value="1" name="BLIT_MRT1"/>
+ <value value="2" name="BLIT_MRT2"/>
+ <value value="3" name="BLIT_MRT3"/>
+ <value value="4" name="BLIT_MRT4"/>
+ <value value="5" name="BLIT_MRT5"/>
+ <value value="6" name="BLIT_MRT6"/>
+ <value value="7" name="BLIT_MRT7"/>
+ <value value="8" name="BLIT_ZS"/> <!-- depth or combined depth+stencil -->
+ <value value="9" name="BLIT_S"/> <!-- separate stencil -->
+</enum>
+
+<!-- see comment in a4xx.xml about script to extract countables from test-perf output -->
+<enum name="a5xx_cp_perfcounter_select">
+ <value value="0" name="PERF_CP_ALWAYS_COUNT"/>
+ <value value="1" name="PERF_CP_BUSY_GFX_CORE_IDLE"/>
+ <value value="2" name="PERF_CP_BUSY_CYCLES"/>
+ <value value="3" name="PERF_CP_PFP_IDLE"/>
+ <value value="4" name="PERF_CP_PFP_BUSY_WORKING"/>
+ <value value="5" name="PERF_CP_PFP_STALL_CYCLES_ANY"/>
+ <value value="6" name="PERF_CP_PFP_STARVE_CYCLES_ANY"/>
+ <value value="7" name="PERF_CP_PFP_ICACHE_MISS"/>
+ <value value="8" name="PERF_CP_PFP_ICACHE_HIT"/>
+ <value value="9" name="PERF_CP_PFP_MATCH_PM4_PKT_PROFILE"/>
+ <value value="10" name="PERF_CP_ME_BUSY_WORKING"/>
+ <value value="11" name="PERF_CP_ME_IDLE"/>
+ <value value="12" name="PERF_CP_ME_STARVE_CYCLES_ANY"/>
+ <value value="13" name="PERF_CP_ME_FIFO_EMPTY_PFP_IDLE"/>
+ <value value="14" name="PERF_CP_ME_FIFO_EMPTY_PFP_BUSY"/>
+ <value value="15" name="PERF_CP_ME_FIFO_FULL_ME_BUSY"/>
+ <value value="16" name="PERF_CP_ME_FIFO_FULL_ME_NON_WORKING"/>
+ <value value="17" name="PERF_CP_ME_STALL_CYCLES_ANY"/>
+ <value value="18" name="PERF_CP_ME_ICACHE_MISS"/>
+ <value value="19" name="PERF_CP_ME_ICACHE_HIT"/>
+ <value value="20" name="PERF_CP_NUM_PREEMPTIONS"/>
+ <value value="21" name="PERF_CP_PREEMPTION_REACTION_DELAY"/>
+ <value value="22" name="PERF_CP_PREEMPTION_SWITCH_OUT_TIME"/>
+ <value value="23" name="PERF_CP_PREEMPTION_SWITCH_IN_TIME"/>
+ <value value="24" name="PERF_CP_DEAD_DRAWS_IN_BIN_RENDER"/>
+ <value value="25" name="PERF_CP_PREDICATED_DRAWS_KILLED"/>
+ <value value="26" name="PERF_CP_MODE_SWITCH"/>
+ <value value="27" name="PERF_CP_ZPASS_DONE"/>
+ <value value="28" name="PERF_CP_CONTEXT_DONE"/>
+ <value value="29" name="PERF_CP_CACHE_FLUSH"/>
+ <value value="30" name="PERF_CP_LONG_PREEMPTIONS"/>
+</enum>
+
+<enum name="a5xx_rbbm_perfcounter_select">
+ <value value="0" name="PERF_RBBM_ALWAYS_COUNT"/>
+ <value value="1" name="PERF_RBBM_ALWAYS_ON"/>
+ <value value="2" name="PERF_RBBM_TSE_BUSY"/>
+ <value value="3" name="PERF_RBBM_RAS_BUSY"/>
+ <value value="4" name="PERF_RBBM_PC_DCALL_BUSY"/>
+ <value value="5" name="PERF_RBBM_PC_VSD_BUSY"/>
+ <value value="6" name="PERF_RBBM_STATUS_MASKED"/>
+ <value value="7" name="PERF_RBBM_COM_BUSY"/>
+ <value value="8" name="PERF_RBBM_DCOM_BUSY"/>
+ <value value="9" name="PERF_RBBM_VBIF_BUSY"/>
+ <value value="10" name="PERF_RBBM_VSC_BUSY"/>
+ <value value="11" name="PERF_RBBM_TESS_BUSY"/>
+ <value value="12" name="PERF_RBBM_UCHE_BUSY"/>
+ <value value="13" name="PERF_RBBM_HLSQ_BUSY"/>
+</enum>
+
+<enum name="a5xx_pc_perfcounter_select">
+ <value value="0" name="PERF_PC_BUSY_CYCLES"/>
+ <value value="1" name="PERF_PC_WORKING_CYCLES"/>
+ <value value="2" name="PERF_PC_STALL_CYCLES_VFD"/>
+ <value value="3" name="PERF_PC_STALL_CYCLES_TSE"/>
+ <value value="4" name="PERF_PC_STALL_CYCLES_VPC"/>
+ <value value="5" name="PERF_PC_STALL_CYCLES_UCHE"/>
+ <value value="6" name="PERF_PC_STALL_CYCLES_TESS"/>
+ <value value="7" name="PERF_PC_STALL_CYCLES_TSE_ONLY"/>
+ <value value="8" name="PERF_PC_STALL_CYCLES_VPC_ONLY"/>
+ <value value="9" name="PERF_PC_PASS1_TF_STALL_CYCLES"/>
+ <value value="10" name="PERF_PC_STARVE_CYCLES_FOR_INDEX"/>
+ <value value="11" name="PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR"/>
+ <value value="12" name="PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM"/>
+ <value value="13" name="PERF_PC_STARVE_CYCLES_FOR_POSITION"/>
+ <value value="14" name="PERF_PC_STARVE_CYCLES_DI"/>
+ <value value="15" name="PERF_PC_VIS_STREAMS_LOADED"/>
+ <value value="16" name="PERF_PC_INSTANCES"/>
+ <value value="17" name="PERF_PC_VPC_PRIMITIVES"/>
+ <value value="18" name="PERF_PC_DEAD_PRIM"/>
+ <value value="19" name="PERF_PC_LIVE_PRIM"/>
+ <value value="20" name="PERF_PC_VERTEX_HITS"/>
+ <value value="21" name="PERF_PC_IA_VERTICES"/>
+ <value value="22" name="PERF_PC_IA_PRIMITIVES"/>
+ <value value="23" name="PERF_PC_GS_PRIMITIVES"/>
+ <value value="24" name="PERF_PC_HS_INVOCATIONS"/>
+ <value value="25" name="PERF_PC_DS_INVOCATIONS"/>
+ <value value="26" name="PERF_PC_VS_INVOCATIONS"/>
+ <value value="27" name="PERF_PC_GS_INVOCATIONS"/>
+ <value value="28" name="PERF_PC_DS_PRIMITIVES"/>
+ <value value="29" name="PERF_PC_VPC_POS_DATA_TRANSACTION"/>
+ <value value="30" name="PERF_PC_3D_DRAWCALLS"/>
+ <value value="31" name="PERF_PC_2D_DRAWCALLS"/>
+ <value value="32" name="PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS"/>
+ <value value="33" name="PERF_TESS_BUSY_CYCLES"/>
+ <value value="34" name="PERF_TESS_WORKING_CYCLES"/>
+ <value value="35" name="PERF_TESS_STALL_CYCLES_PC"/>
+ <value value="36" name="PERF_TESS_STARVE_CYCLES_PC"/>
+</enum>
+
+<enum name="a5xx_vfd_perfcounter_select">
+ <value value="0" name="PERF_VFD_BUSY_CYCLES"/>
+ <value value="1" name="PERF_VFD_STALL_CYCLES_UCHE"/>
+ <value value="2" name="PERF_VFD_STALL_CYCLES_VPC_ALLOC"/>
+ <value value="3" name="PERF_VFD_STALL_CYCLES_MISS_VB"/>
+ <value value="4" name="PERF_VFD_STALL_CYCLES_MISS_Q"/>
+ <value value="5" name="PERF_VFD_STALL_CYCLES_SP_INFO"/>
+ <value value="6" name="PERF_VFD_STALL_CYCLES_SP_ATTR"/>
+ <value value="7" name="PERF_VFD_STALL_CYCLES_VFDP_VB"/>
+ <value value="8" name="PERF_VFD_STALL_CYCLES_VFDP_Q"/>
+ <value value="9" name="PERF_VFD_DECODER_PACKER_STALL"/>
+ <value value="10" name="PERF_VFD_STARVE_CYCLES_UCHE"/>
+ <value value="11" name="PERF_VFD_RBUFFER_FULL"/>
+ <value value="12" name="PERF_VFD_ATTR_INFO_FIFO_FULL"/>
+ <value value="13" name="PERF_VFD_DECODED_ATTRIBUTE_BYTES"/>
+ <value value="14" name="PERF_VFD_NUM_ATTRIBUTES"/>
+ <value value="15" name="PERF_VFD_INSTRUCTIONS"/>
+ <value value="16" name="PERF_VFD_UPPER_SHADER_FIBERS"/>
+ <value value="17" name="PERF_VFD_LOWER_SHADER_FIBERS"/>
+ <value value="18" name="PERF_VFD_MODE_0_FIBERS"/>
+ <value value="19" name="PERF_VFD_MODE_1_FIBERS"/>
+ <value value="20" name="PERF_VFD_MODE_2_FIBERS"/>
+ <value value="21" name="PERF_VFD_MODE_3_FIBERS"/>
+ <value value="22" name="PERF_VFD_MODE_4_FIBERS"/>
+ <value value="23" name="PERF_VFD_TOTAL_VERTICES"/>
+ <value value="24" name="PERF_VFD_NUM_ATTR_MISS"/>
+ <value value="25" name="PERF_VFD_1_BURST_REQ"/>
+ <value value="26" name="PERF_VFDP_STALL_CYCLES_VFD"/>
+ <value value="27" name="PERF_VFDP_STALL_CYCLES_VFD_INDEX"/>
+ <value value="28" name="PERF_VFDP_STALL_CYCLES_VFD_PROG"/>
+ <value value="29" name="PERF_VFDP_STARVE_CYCLES_PC"/>
+ <value value="30" name="PERF_VFDP_VS_STAGE_32_WAVES"/>
+</enum>
+
+<enum name="a5xx_hlsq_perfcounter_select">
+ <value value="0" name="PERF_HLSQ_BUSY_CYCLES"/>
+ <value value="1" name="PERF_HLSQ_STALL_CYCLES_UCHE"/>
+ <value value="2" name="PERF_HLSQ_STALL_CYCLES_SP_STATE"/>
+ <value value="3" name="PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE"/>
+ <value value="4" name="PERF_HLSQ_UCHE_LATENCY_CYCLES"/>
+ <value value="5" name="PERF_HLSQ_UCHE_LATENCY_COUNT"/>
+ <value value="6" name="PERF_HLSQ_FS_STAGE_32_WAVES"/>
+ <value value="7" name="PERF_HLSQ_FS_STAGE_64_WAVES"/>
+ <value value="8" name="PERF_HLSQ_QUADS"/>
+ <value value="9" name="PERF_HLSQ_SP_STATE_COPY_TRANS_FS_STAGE"/>
+ <value value="10" name="PERF_HLSQ_SP_STATE_COPY_TRANS_VS_STAGE"/>
+ <value value="11" name="PERF_HLSQ_TP_STATE_COPY_TRANS_FS_STAGE"/>
+ <value value="12" name="PERF_HLSQ_TP_STATE_COPY_TRANS_VS_STAGE"/>
+ <value value="13" name="PERF_HLSQ_CS_INVOCATIONS"/>
+ <value value="14" name="PERF_HLSQ_COMPUTE_DRAWCALLS"/>
+</enum>
+
+<enum name="a5xx_vpc_perfcounter_select">
+ <value value="0" name="PERF_VPC_BUSY_CYCLES"/>
+ <value value="1" name="PERF_VPC_WORKING_CYCLES"/>
+ <value value="2" name="PERF_VPC_STALL_CYCLES_UCHE"/>
+ <value value="3" name="PERF_VPC_STALL_CYCLES_VFD_WACK"/>
+ <value value="4" name="PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC"/>
+ <value value="5" name="PERF_VPC_STALL_CYCLES_PC"/>
+ <value value="6" name="PERF_VPC_STALL_CYCLES_SP_LM"/>
+ <value value="7" name="PERF_VPC_POS_EXPORT_STALL_CYCLES"/>
+ <value value="8" name="PERF_VPC_STARVE_CYCLES_SP"/>
+ <value value="9" name="PERF_VPC_STARVE_CYCLES_LRZ"/>
+ <value value="10" name="PERF_VPC_PC_PRIMITIVES"/>
+ <value value="11" name="PERF_VPC_SP_COMPONENTS"/>
+ <value value="12" name="PERF_VPC_SP_LM_PRIMITIVES"/>
+ <value value="13" name="PERF_VPC_SP_LM_COMPONENTS"/>
+ <value value="14" name="PERF_VPC_SP_LM_DWORDS"/>
+ <value value="15" name="PERF_VPC_STREAMOUT_COMPONENTS"/>
+ <value value="16" name="PERF_VPC_GRANT_PHASES"/>
+</enum>
+
+<enum name="a5xx_tse_perfcounter_select">
+ <value value="0" name="PERF_TSE_BUSY_CYCLES"/>
+ <value value="1" name="PERF_TSE_CLIPPING_CYCLES"/>
+ <value value="2" name="PERF_TSE_STALL_CYCLES_RAS"/>
+ <value value="3" name="PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE"/>
+ <value value="4" name="PERF_TSE_STALL_CYCLES_LRZ_ZPLANE"/>
+ <value value="5" name="PERF_TSE_STARVE_CYCLES_PC"/>
+ <value value="6" name="PERF_TSE_INPUT_PRIM"/>
+ <value value="7" name="PERF_TSE_INPUT_NULL_PRIM"/>
+ <value value="8" name="PERF_TSE_TRIVAL_REJ_PRIM"/>
+ <value value="9" name="PERF_TSE_CLIPPED_PRIM"/>
+ <value value="10" name="PERF_TSE_ZERO_AREA_PRIM"/>
+ <value value="11" name="PERF_TSE_FACENESS_CULLED_PRIM"/>
+ <value value="12" name="PERF_TSE_ZERO_PIXEL_PRIM"/>
+ <value value="13" name="PERF_TSE_OUTPUT_NULL_PRIM"/>
+ <value value="14" name="PERF_TSE_OUTPUT_VISIBLE_PRIM"/>
+ <value value="15" name="PERF_TSE_CINVOCATION"/>
+ <value value="16" name="PERF_TSE_CPRIMITIVES"/>
+ <value value="17" name="PERF_TSE_2D_INPUT_PRIM"/>
+ <value value="18" name="PERF_TSE_2D_ALIVE_CLCLES"/>
+</enum>
+
+<enum name="a5xx_ras_perfcounter_select">
+ <value value="0" name="PERF_RAS_BUSY_CYCLES"/>
+ <value value="1" name="PERF_RAS_SUPERTILE_ACTIVE_CYCLES"/>
+ <value value="2" name="PERF_RAS_STALL_CYCLES_LRZ"/>
+ <value value="3" name="PERF_RAS_STARVE_CYCLES_TSE"/>
+ <value value="4" name="PERF_RAS_SUPER_TILES"/>
+ <value value="5" name="PERF_RAS_8X4_TILES"/>
+ <value value="6" name="PERF_RAS_MASKGEN_ACTIVE"/>
+ <value value="7" name="PERF_RAS_FULLY_COVERED_SUPER_TILES"/>
+ <value value="8" name="PERF_RAS_FULLY_COVERED_8X4_TILES"/>
+ <value value="9" name="PERF_RAS_PRIM_KILLED_INVISILBE"/>
+</enum>
+
+<enum name="a5xx_lrz_perfcounter_select">
+ <value value="0" name="PERF_LRZ_BUSY_CYCLES"/>
+ <value value="1" name="PERF_LRZ_STARVE_CYCLES_RAS"/>
+ <value value="2" name="PERF_LRZ_STALL_CYCLES_RB"/>
+ <value value="3" name="PERF_LRZ_STALL_CYCLES_VSC"/>
+ <value value="4" name="PERF_LRZ_STALL_CYCLES_VPC"/>
+ <value value="5" name="PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH"/>
+ <value value="6" name="PERF_LRZ_STALL_CYCLES_UCHE"/>
+ <value value="7" name="PERF_LRZ_LRZ_READ"/>
+ <value value="8" name="PERF_LRZ_LRZ_WRITE"/>
+ <value value="9" name="PERF_LRZ_READ_LATENCY"/>
+ <value value="10" name="PERF_LRZ_MERGE_CACHE_UPDATING"/>
+ <value value="11" name="PERF_LRZ_PRIM_KILLED_BY_MASKGEN"/>
+ <value value="12" name="PERF_LRZ_PRIM_KILLED_BY_LRZ"/>
+ <value value="13" name="PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ"/>
+ <value value="14" name="PERF_LRZ_FULL_8X8_TILES"/>
+ <value value="15" name="PERF_LRZ_PARTIAL_8X8_TILES"/>
+ <value value="16" name="PERF_LRZ_TILE_KILLED"/>
+ <value value="17" name="PERF_LRZ_TOTAL_PIXEL"/>
+ <value value="18" name="PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ"/>
+</enum>
+
+<enum name="a5xx_uche_perfcounter_select">
+ <value value="0" name="PERF_UCHE_BUSY_CYCLES"/>
+ <value value="1" name="PERF_UCHE_STALL_CYCLES_VBIF"/>
+ <value value="2" name="PERF_UCHE_VBIF_LATENCY_CYCLES"/>
+ <value value="3" name="PERF_UCHE_VBIF_LATENCY_SAMPLES"/>
+ <value value="4" name="PERF_UCHE_VBIF_READ_BEATS_TP"/>
+ <value value="5" name="PERF_UCHE_VBIF_READ_BEATS_VFD"/>
+ <value value="6" name="PERF_UCHE_VBIF_READ_BEATS_HLSQ"/>
+ <value value="7" name="PERF_UCHE_VBIF_READ_BEATS_LRZ"/>
+ <value value="8" name="PERF_UCHE_VBIF_READ_BEATS_SP"/>
+ <value value="9" name="PERF_UCHE_READ_REQUESTS_TP"/>
+ <value value="10" name="PERF_UCHE_READ_REQUESTS_VFD"/>
+ <value value="11" name="PERF_UCHE_READ_REQUESTS_HLSQ"/>
+ <value value="12" name="PERF_UCHE_READ_REQUESTS_LRZ"/>
+ <value value="13" name="PERF_UCHE_READ_REQUESTS_SP"/>
+ <value value="14" name="PERF_UCHE_WRITE_REQUESTS_LRZ"/>
+ <value value="15" name="PERF_UCHE_WRITE_REQUESTS_SP"/>
+ <value value="16" name="PERF_UCHE_WRITE_REQUESTS_VPC"/>
+ <value value="17" name="PERF_UCHE_WRITE_REQUESTS_VSC"/>
+ <value value="18" name="PERF_UCHE_EVICTS"/>
+ <value value="19" name="PERF_UCHE_BANK_REQ0"/>
+ <value value="20" name="PERF_UCHE_BANK_REQ1"/>
+ <value value="21" name="PERF_UCHE_BANK_REQ2"/>
+ <value value="22" name="PERF_UCHE_BANK_REQ3"/>
+ <value value="23" name="PERF_UCHE_BANK_REQ4"/>
+ <value value="24" name="PERF_UCHE_BANK_REQ5"/>
+ <value value="25" name="PERF_UCHE_BANK_REQ6"/>
+ <value value="26" name="PERF_UCHE_BANK_REQ7"/>
+ <value value="27" name="PERF_UCHE_VBIF_READ_BEATS_CH0"/>
+ <value value="28" name="PERF_UCHE_VBIF_READ_BEATS_CH1"/>
+ <value value="29" name="PERF_UCHE_GMEM_READ_BEATS"/>
+ <value value="30" name="PERF_UCHE_FLAG_COUNT"/>
+</enum>
+
+<enum name="a5xx_tp_perfcounter_select">
+ <value value="0" name="PERF_TP_BUSY_CYCLES"/>
+ <value value="1" name="PERF_TP_STALL_CYCLES_UCHE"/>
+ <value value="2" name="PERF_TP_LATENCY_CYCLES"/>
+ <value value="3" name="PERF_TP_LATENCY_TRANS"/>
+ <value value="4" name="PERF_TP_FLAG_CACHE_REQUEST_SAMPLES"/>
+ <value value="5" name="PERF_TP_FLAG_CACHE_REQUEST_LATENCY"/>
+ <value value="6" name="PERF_TP_L1_CACHELINE_REQUESTS"/>
+ <value value="7" name="PERF_TP_L1_CACHELINE_MISSES"/>
+ <value value="8" name="PERF_TP_SP_TP_TRANS"/>
+ <value value="9" name="PERF_TP_TP_SP_TRANS"/>
+ <value value="10" name="PERF_TP_OUTPUT_PIXELS"/>
+ <value value="11" name="PERF_TP_FILTER_WORKLOAD_16BIT"/>
+ <value value="12" name="PERF_TP_FILTER_WORKLOAD_32BIT"/>
+ <value value="13" name="PERF_TP_QUADS_RECEIVED"/>
+ <value value="14" name="PERF_TP_QUADS_OFFSET"/>
+ <value value="15" name="PERF_TP_QUADS_SHADOW"/>
+ <value value="16" name="PERF_TP_QUADS_ARRAY"/>
+ <value value="17" name="PERF_TP_QUADS_GRADIENT"/>
+ <value value="18" name="PERF_TP_QUADS_1D"/>
+ <value value="19" name="PERF_TP_QUADS_2D"/>
+ <value value="20" name="PERF_TP_QUADS_BUFFER"/>
+ <value value="21" name="PERF_TP_QUADS_3D"/>
+ <value value="22" name="PERF_TP_QUADS_CUBE"/>
+ <value value="23" name="PERF_TP_STATE_CACHE_REQUESTS"/>
+ <value value="24" name="PERF_TP_STATE_CACHE_MISSES"/>
+ <value value="25" name="PERF_TP_DIVERGENT_QUADS_RECEIVED"/>
+ <value value="26" name="PERF_TP_BINDLESS_STATE_CACHE_REQUESTS"/>
+ <value value="27" name="PERF_TP_BINDLESS_STATE_CACHE_MISSES"/>
+ <value value="28" name="PERF_TP_PRT_NON_RESIDENT_EVENTS"/>
+ <value value="29" name="PERF_TP_OUTPUT_PIXELS_POINT"/>
+ <value value="30" name="PERF_TP_OUTPUT_PIXELS_BILINEAR"/>
+ <value value="31" name="PERF_TP_OUTPUT_PIXELS_MIP"/>
+ <value value="32" name="PERF_TP_OUTPUT_PIXELS_ANISO"/>
+ <value value="33" name="PERF_TP_OUTPUT_PIXELS_ZERO_LOD"/>
+ <value value="34" name="PERF_TP_FLAG_CACHE_REQUESTS"/>
+ <value value="35" name="PERF_TP_FLAG_CACHE_MISSES"/>
+ <value value="36" name="PERF_TP_L1_5_L2_REQUESTS"/>
+ <value value="37" name="PERF_TP_2D_OUTPUT_PIXELS"/>
+ <value value="38" name="PERF_TP_2D_OUTPUT_PIXELS_POINT"/>
+ <value value="39" name="PERF_TP_2D_OUTPUT_PIXELS_BILINEAR"/>
+ <value value="40" name="PERF_TP_2D_FILTER_WORKLOAD_16BIT"/>
+ <value value="41" name="PERF_TP_2D_FILTER_WORKLOAD_32BIT"/>
+</enum>
+
+<enum name="a5xx_sp_perfcounter_select">
+ <value value="0" name="PERF_SP_BUSY_CYCLES"/>
+ <value value="1" name="PERF_SP_ALU_WORKING_CYCLES"/>
+ <value value="2" name="PERF_SP_EFU_WORKING_CYCLES"/>
+ <value value="3" name="PERF_SP_STALL_CYCLES_VPC"/>
+ <value value="4" name="PERF_SP_STALL_CYCLES_TP"/>
+ <value value="5" name="PERF_SP_STALL_CYCLES_UCHE"/>
+ <value value="6" name="PERF_SP_STALL_CYCLES_RB"/>
+ <value value="7" name="PERF_SP_SCHEDULER_NON_WORKING"/>
+ <value value="8" name="PERF_SP_WAVE_CONTEXTS"/>
+ <value value="9" name="PERF_SP_WAVE_CONTEXT_CYCLES"/>
+ <value value="10" name="PERF_SP_FS_STAGE_WAVE_CYCLES"/>
+ <value value="11" name="PERF_SP_FS_STAGE_WAVE_SAMPLES"/>
+ <value value="12" name="PERF_SP_VS_STAGE_WAVE_CYCLES"/>
+ <value value="13" name="PERF_SP_VS_STAGE_WAVE_SAMPLES"/>
+ <value value="14" name="PERF_SP_FS_STAGE_DURATION_CYCLES"/>
+ <value value="15" name="PERF_SP_VS_STAGE_DURATION_CYCLES"/>
+ <value value="16" name="PERF_SP_WAVE_CTRL_CYCLES"/>
+ <value value="17" name="PERF_SP_WAVE_LOAD_CYCLES"/>
+ <value value="18" name="PERF_SP_WAVE_EMIT_CYCLES"/>
+ <value value="19" name="PERF_SP_WAVE_NOP_CYCLES"/>
+ <value value="20" name="PERF_SP_WAVE_WAIT_CYCLES"/>
+ <value value="21" name="PERF_SP_WAVE_FETCH_CYCLES"/>
+ <value value="22" name="PERF_SP_WAVE_IDLE_CYCLES"/>
+ <value value="23" name="PERF_SP_WAVE_END_CYCLES"/>
+ <value value="24" name="PERF_SP_WAVE_LONG_SYNC_CYCLES"/>
+ <value value="25" name="PERF_SP_WAVE_SHORT_SYNC_CYCLES"/>
+ <value value="26" name="PERF_SP_WAVE_JOIN_CYCLES"/>
+ <value value="27" name="PERF_SP_LM_LOAD_INSTRUCTIONS"/>
+ <value value="28" name="PERF_SP_LM_STORE_INSTRUCTIONS"/>
+ <value value="29" name="PERF_SP_LM_ATOMICS"/>
+ <value value="30" name="PERF_SP_GM_LOAD_INSTRUCTIONS"/>
+ <value value="31" name="PERF_SP_GM_STORE_INSTRUCTIONS"/>
+ <value value="32" name="PERF_SP_GM_ATOMICS"/>
+ <value value="33" name="PERF_SP_VS_STAGE_TEX_INSTRUCTIONS"/>
+ <value value="34" name="PERF_SP_VS_STAGE_CFLOW_INSTRUCTIONS"/>
+ <value value="35" name="PERF_SP_VS_STAGE_EFU_INSTRUCTIONS"/>
+ <value value="36" name="PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS"/>
+ <value value="37" name="PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS"/>
+ <value value="38" name="PERF_SP_FS_STAGE_TEX_INSTRUCTIONS"/>
+ <value value="39" name="PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS"/>
+ <value value="40" name="PERF_SP_FS_STAGE_EFU_INSTRUCTIONS"/>
+ <value value="41" name="PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS"/>
+ <value value="42" name="PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS"/>
+ <value value="43" name="PERF_SP_FS_STAGE_BARY_INSTRUCTIONS"/>
+ <value value="44" name="PERF_SP_VS_INSTRUCTIONS"/>
+ <value value="45" name="PERF_SP_FS_INSTRUCTIONS"/>
+ <value value="46" name="PERF_SP_ADDR_LOCK_COUNT"/>
+ <value value="47" name="PERF_SP_UCHE_READ_TRANS"/>
+ <value value="48" name="PERF_SP_UCHE_WRITE_TRANS"/>
+ <value value="49" name="PERF_SP_EXPORT_VPC_TRANS"/>
+ <value value="50" name="PERF_SP_EXPORT_RB_TRANS"/>
+ <value value="51" name="PERF_SP_PIXELS_KILLED"/>
+ <value value="52" name="PERF_SP_ICL1_REQUESTS"/>
+ <value value="53" name="PERF_SP_ICL1_MISSES"/>
+ <value value="54" name="PERF_SP_ICL0_REQUESTS"/>
+ <value value="55" name="PERF_SP_ICL0_MISSES"/>
+ <value value="56" name="PERF_SP_HS_INSTRUCTIONS"/>
+ <value value="57" name="PERF_SP_DS_INSTRUCTIONS"/>
+ <value value="58" name="PERF_SP_GS_INSTRUCTIONS"/>
+ <value value="59" name="PERF_SP_CS_INSTRUCTIONS"/>
+ <value value="60" name="PERF_SP_GPR_READ"/>
+ <value value="61" name="PERF_SP_GPR_WRITE"/>
+ <value value="62" name="PERF_SP_LM_CH0_REQUESTS"/>
+ <value value="63" name="PERF_SP_LM_CH1_REQUESTS"/>
+ <value value="64" name="PERF_SP_LM_BANK_CONFLICTS"/>
+</enum>
+
+<enum name="a5xx_rb_perfcounter_select">
+ <value value="0" name="PERF_RB_BUSY_CYCLES"/>
+ <value value="1" name="PERF_RB_STALL_CYCLES_CCU"/>
+ <value value="2" name="PERF_RB_STALL_CYCLES_HLSQ"/>
+ <value value="3" name="PERF_RB_STALL_CYCLES_FIFO0_FULL"/>
+ <value value="4" name="PERF_RB_STALL_CYCLES_FIFO1_FULL"/>
+ <value value="5" name="PERF_RB_STALL_CYCLES_FIFO2_FULL"/>
+ <value value="6" name="PERF_RB_STARVE_CYCLES_SP"/>
+ <value value="7" name="PERF_RB_STARVE_CYCLES_LRZ_TILE"/>
+ <value value="8" name="PERF_RB_STARVE_CYCLES_CCU"/>
+ <value value="9" name="PERF_RB_STARVE_CYCLES_Z_PLANE"/>
+ <value value="10" name="PERF_RB_STARVE_CYCLES_BARY_PLANE"/>
+ <value value="11" name="PERF_RB_Z_WORKLOAD"/>
+ <value value="12" name="PERF_RB_HLSQ_ACTIVE"/>
+ <value value="13" name="PERF_RB_Z_READ"/>
+ <value value="14" name="PERF_RB_Z_WRITE"/>
+ <value value="15" name="PERF_RB_C_READ"/>
+ <value value="16" name="PERF_RB_C_WRITE"/>
+ <value value="17" name="PERF_RB_TOTAL_PASS"/>
+ <value value="18" name="PERF_RB_Z_PASS"/>
+ <value value="19" name="PERF_RB_Z_FAIL"/>
+ <value value="20" name="PERF_RB_S_FAIL"/>
+ <value value="21" name="PERF_RB_BLENDED_FXP_COMPONENTS"/>
+ <value value="22" name="PERF_RB_BLENDED_FP16_COMPONENTS"/>
+ <value value="23" name="RB_RESERVED"/>
+ <value value="24" name="PERF_RB_2D_ALIVE_CYCLES"/>
+ <value value="25" name="PERF_RB_2D_STALL_CYCLES_A2D"/>
+ <value value="26" name="PERF_RB_2D_STARVE_CYCLES_SRC"/>
+ <value value="27" name="PERF_RB_2D_STARVE_CYCLES_SP"/>
+ <value value="28" name="PERF_RB_2D_STARVE_CYCLES_DST"/>
+ <value value="29" name="PERF_RB_2D_VALID_PIXELS"/>
+</enum>
+
+<enum name="a5xx_rb_samples_perfcounter_select">
+ <value value="0" name="TOTAL_SAMPLES"/>
+ <value value="1" name="ZPASS_SAMPLES"/>
+ <value value="2" name="ZFAIL_SAMPLES"/>
+ <value value="3" name="SFAIL_SAMPLES"/>
+</enum>
+
+<enum name="a5xx_vsc_perfcounter_select">
+ <value value="0" name="PERF_VSC_BUSY_CYCLES"/>
+ <value value="1" name="PERF_VSC_WORKING_CYCLES"/>
+ <value value="2" name="PERF_VSC_STALL_CYCLES_UCHE"/>
+ <value value="3" name="PERF_VSC_EOT_NUM"/>
+</enum>
+
+<enum name="a5xx_ccu_perfcounter_select">
+ <value value="0" name="PERF_CCU_BUSY_CYCLES"/>
+ <value value="1" name="PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN"/>
+ <value value="2" name="PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN"/>
+ <value value="3" name="PERF_CCU_STARVE_CYCLES_FLAG_RETURN"/>
+ <value value="4" name="PERF_CCU_DEPTH_BLOCKS"/>
+ <value value="5" name="PERF_CCU_COLOR_BLOCKS"/>
+ <value value="6" name="PERF_CCU_DEPTH_BLOCK_HIT"/>
+ <value value="7" name="PERF_CCU_COLOR_BLOCK_HIT"/>
+ <value value="8" name="PERF_CCU_PARTIAL_BLOCK_READ"/>
+ <value value="9" name="PERF_CCU_GMEM_READ"/>
+ <value value="10" name="PERF_CCU_GMEM_WRITE"/>
+ <value value="11" name="PERF_CCU_DEPTH_READ_FLAG0_COUNT"/>
+ <value value="12" name="PERF_CCU_DEPTH_READ_FLAG1_COUNT"/>
+ <value value="13" name="PERF_CCU_DEPTH_READ_FLAG2_COUNT"/>
+ <value value="14" name="PERF_CCU_DEPTH_READ_FLAG3_COUNT"/>
+ <value value="15" name="PERF_CCU_DEPTH_READ_FLAG4_COUNT"/>
+ <value value="16" name="PERF_CCU_COLOR_READ_FLAG0_COUNT"/>
+ <value value="17" name="PERF_CCU_COLOR_READ_FLAG1_COUNT"/>
+ <value value="18" name="PERF_CCU_COLOR_READ_FLAG2_COUNT"/>
+ <value value="19" name="PERF_CCU_COLOR_READ_FLAG3_COUNT"/>
+ <value value="20" name="PERF_CCU_COLOR_READ_FLAG4_COUNT"/>
+ <value value="21" name="PERF_CCU_2D_BUSY_CYCLES"/>
+ <value value="22" name="PERF_CCU_2D_RD_REQ"/>
+ <value value="23" name="PERF_CCU_2D_WR_REQ"/>
+ <value value="24" name="PERF_CCU_2D_REORDER_STARVE_CYCLES"/>
+ <value value="25" name="PERF_CCU_2D_PIXELS"/>
+</enum>
+
+<enum name="a5xx_cmp_perfcounter_select">
+ <value value="0" name="PERF_CMPDECMP_STALL_CYCLES_VBIF"/>
+ <value value="1" name="PERF_CMPDECMP_VBIF_LATENCY_CYCLES"/>
+ <value value="2" name="PERF_CMPDECMP_VBIF_LATENCY_SAMPLES"/>
+ <value value="3" name="PERF_CMPDECMP_VBIF_READ_DATA_CCU"/>
+ <value value="4" name="PERF_CMPDECMP_VBIF_WRITE_DATA_CCU"/>
+ <value value="5" name="PERF_CMPDECMP_VBIF_READ_REQUEST"/>
+ <value value="6" name="PERF_CMPDECMP_VBIF_WRITE_REQUEST"/>
+ <value value="7" name="PERF_CMPDECMP_VBIF_READ_DATA"/>
+ <value value="8" name="PERF_CMPDECMP_VBIF_WRITE_DATA"/>
+ <value value="9" name="PERF_CMPDECMP_FLAG_FETCH_CYCLES"/>
+ <value value="10" name="PERF_CMPDECMP_FLAG_FETCH_SAMPLES"/>
+ <value value="11" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT"/>
+ <value value="12" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT"/>
+ <value value="13" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT"/>
+ <value value="14" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT"/>
+ <value value="15" name="PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT"/>
+ <value value="16" name="PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT"/>
+ <value value="17" name="PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT"/>
+ <value value="18" name="PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT"/>
+ <value value="19" name="PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_REQ"/>
+ <value value="20" name="PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_WR"/>
+ <value value="21" name="PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_RETURN"/>
+ <value value="22" name="PERF_CMPDECMP_2D_RD_DATA"/>
+ <value value="23" name="PERF_CMPDECMP_2D_WR_DATA"/>
+</enum>
+
+<enum name="a5xx_vbif_perfcounter_select">
+ <value value="0" name="AXI_READ_REQUESTS_ID_0"/>
+ <value value="1" name="AXI_READ_REQUESTS_ID_1"/>
+ <value value="2" name="AXI_READ_REQUESTS_ID_2"/>
+ <value value="3" name="AXI_READ_REQUESTS_ID_3"/>
+ <value value="4" name="AXI_READ_REQUESTS_ID_4"/>
+ <value value="5" name="AXI_READ_REQUESTS_ID_5"/>
+ <value value="6" name="AXI_READ_REQUESTS_ID_6"/>
+ <value value="7" name="AXI_READ_REQUESTS_ID_7"/>
+ <value value="8" name="AXI_READ_REQUESTS_ID_8"/>
+ <value value="9" name="AXI_READ_REQUESTS_ID_9"/>
+ <value value="10" name="AXI_READ_REQUESTS_ID_10"/>
+ <value value="11" name="AXI_READ_REQUESTS_ID_11"/>
+ <value value="12" name="AXI_READ_REQUESTS_ID_12"/>
+ <value value="13" name="AXI_READ_REQUESTS_ID_13"/>
+ <value value="14" name="AXI_READ_REQUESTS_ID_14"/>
+ <value value="15" name="AXI_READ_REQUESTS_ID_15"/>
+ <value value="16" name="AXI0_READ_REQUESTS_TOTAL"/>
+ <value value="17" name="AXI1_READ_REQUESTS_TOTAL"/>
+ <value value="18" name="AXI2_READ_REQUESTS_TOTAL"/>
+ <value value="19" name="AXI3_READ_REQUESTS_TOTAL"/>
+ <value value="20" name="AXI_READ_REQUESTS_TOTAL"/>
+ <value value="21" name="AXI_WRITE_REQUESTS_ID_0"/>
+ <value value="22" name="AXI_WRITE_REQUESTS_ID_1"/>
+ <value value="23" name="AXI_WRITE_REQUESTS_ID_2"/>
+ <value value="24" name="AXI_WRITE_REQUESTS_ID_3"/>
+ <value value="25" name="AXI_WRITE_REQUESTS_ID_4"/>
+ <value value="26" name="AXI_WRITE_REQUESTS_ID_5"/>
+ <value value="27" name="AXI_WRITE_REQUESTS_ID_6"/>
+ <value value="28" name="AXI_WRITE_REQUESTS_ID_7"/>
+ <value value="29" name="AXI_WRITE_REQUESTS_ID_8"/>
+ <value value="30" name="AXI_WRITE_REQUESTS_ID_9"/>
+ <value value="31" name="AXI_WRITE_REQUESTS_ID_10"/>
+ <value value="32" name="AXI_WRITE_REQUESTS_ID_11"/>
+ <value value="33" name="AXI_WRITE_REQUESTS_ID_12"/>
+ <value value="34" name="AXI_WRITE_REQUESTS_ID_13"/>
+ <value value="35" name="AXI_WRITE_REQUESTS_ID_14"/>
+ <value value="36" name="AXI_WRITE_REQUESTS_ID_15"/>
+ <value value="37" name="AXI0_WRITE_REQUESTS_TOTAL"/>
+ <value value="38" name="AXI1_WRITE_REQUESTS_TOTAL"/>
+ <value value="39" name="AXI2_WRITE_REQUESTS_TOTAL"/>
+ <value value="40" name="AXI3_WRITE_REQUESTS_TOTAL"/>
+ <value value="41" name="AXI_WRITE_REQUESTS_TOTAL"/>
+ <value value="42" name="AXI_TOTAL_REQUESTS"/>
+ <value value="43" name="AXI_READ_DATA_BEATS_ID_0"/>
+ <value value="44" name="AXI_READ_DATA_BEATS_ID_1"/>
+ <value value="45" name="AXI_READ_DATA_BEATS_ID_2"/>
+ <value value="46" name="AXI_READ_DATA_BEATS_ID_3"/>
+ <value value="47" name="AXI_READ_DATA_BEATS_ID_4"/>
+ <value value="48" name="AXI_READ_DATA_BEATS_ID_5"/>
+ <value value="49" name="AXI_READ_DATA_BEATS_ID_6"/>
+ <value value="50" name="AXI_READ_DATA_BEATS_ID_7"/>
+ <value value="51" name="AXI_READ_DATA_BEATS_ID_8"/>
+ <value value="52" name="AXI_READ_DATA_BEATS_ID_9"/>
+ <value value="53" name="AXI_READ_DATA_BEATS_ID_10"/>
+ <value value="54" name="AXI_READ_DATA_BEATS_ID_11"/>
+ <value value="55" name="AXI_READ_DATA_BEATS_ID_12"/>
+ <value value="56" name="AXI_READ_DATA_BEATS_ID_13"/>
+ <value value="57" name="AXI_READ_DATA_BEATS_ID_14"/>
+ <value value="58" name="AXI_READ_DATA_BEATS_ID_15"/>
+ <value value="59" name="AXI0_READ_DATA_BEATS_TOTAL"/>
+ <value value="60" name="AXI1_READ_DATA_BEATS_TOTAL"/>
+ <value value="61" name="AXI2_READ_DATA_BEATS_TOTAL"/>
+ <value value="62" name="AXI3_READ_DATA_BEATS_TOTAL"/>
+ <value value="63" name="AXI_READ_DATA_BEATS_TOTAL"/>
+ <value value="64" name="AXI_WRITE_DATA_BEATS_ID_0"/>
+ <value value="65" name="AXI_WRITE_DATA_BEATS_ID_1"/>
+ <value value="66" name="AXI_WRITE_DATA_BEATS_ID_2"/>
+ <value value="67" name="AXI_WRITE_DATA_BEATS_ID_3"/>
+ <value value="68" name="AXI_WRITE_DATA_BEATS_ID_4"/>
+ <value value="69" name="AXI_WRITE_DATA_BEATS_ID_5"/>
+ <value value="70" name="AXI_WRITE_DATA_BEATS_ID_6"/>
+ <value value="71" name="AXI_WRITE_DATA_BEATS_ID_7"/>
+ <value value="72" name="AXI_WRITE_DATA_BEATS_ID_8"/>
+ <value value="73" name="AXI_WRITE_DATA_BEATS_ID_9"/>
+ <value value="74" name="AXI_WRITE_DATA_BEATS_ID_10"/>
+ <value value="75" name="AXI_WRITE_DATA_BEATS_ID_11"/>
+ <value value="76" name="AXI_WRITE_DATA_BEATS_ID_12"/>
+ <value value="77" name="AXI_WRITE_DATA_BEATS_ID_13"/>
+ <value value="78" name="AXI_WRITE_DATA_BEATS_ID_14"/>
+ <value value="79" name="AXI_WRITE_DATA_BEATS_ID_15"/>
+ <value value="80" name="AXI0_WRITE_DATA_BEATS_TOTAL"/>
+ <value value="81" name="AXI1_WRITE_DATA_BEATS_TOTAL"/>
+ <value value="82" name="AXI2_WRITE_DATA_BEATS_TOTAL"/>
+ <value value="83" name="AXI3_WRITE_DATA_BEATS_TOTAL"/>
+ <value value="84" name="AXI_WRITE_DATA_BEATS_TOTAL"/>
+ <value value="85" name="AXI_DATA_BEATS_TOTAL"/>
+</enum>
+
+<domain name="A5XX" width="32">
+ <bitset name="A5XX_INT0">
+ <bitfield name="RBBM_GPU_IDLE" pos="0" type="boolean"/>
+ <bitfield name="RBBM_AHB_ERROR" pos="1" type="boolean"/>
+ <bitfield name="RBBM_TRANSFER_TIMEOUT" pos="2" type="boolean"/>
+ <bitfield name="RBBM_ME_MS_TIMEOUT" pos="3" type="boolean"/>
+ <bitfield name="RBBM_PFP_MS_TIMEOUT" pos="4" type="boolean"/>
+ <bitfield name="RBBM_ETS_MS_TIMEOUT" pos="5" type="boolean"/>
+ <bitfield name="RBBM_ATB_ASYNC_OVERFLOW" pos="6" type="boolean"/>
+ <bitfield name="RBBM_GPC_ERROR" pos="7" type="boolean"/>
+ <bitfield name="CP_SW" pos="8" type="boolean"/>
+ <bitfield name="CP_HW_ERROR" pos="9" type="boolean"/>
+ <bitfield name="CP_CCU_FLUSH_DEPTH_TS" pos="10" type="boolean"/>
+ <bitfield name="CP_CCU_FLUSH_COLOR_TS" pos="11" type="boolean"/>
+ <bitfield name="CP_CCU_RESOLVE_TS" pos="12" type="boolean"/>
+ <bitfield name="CP_IB2" pos="13" type="boolean"/>
+ <bitfield name="CP_IB1" pos="14" type="boolean"/>
+ <bitfield name="CP_RB" pos="15" type="boolean"/>
+ <bitfield name="CP_UNUSED_1" pos="16" type="boolean"/>
+ <bitfield name="CP_RB_DONE_TS" pos="17" type="boolean"/>
+ <bitfield name="CP_WT_DONE_TS" pos="18" type="boolean"/>
+ <bitfield name="UNKNOWN_1" pos="19" type="boolean"/>
+ <bitfield name="CP_CACHE_FLUSH_TS" pos="20" type="boolean"/>
+ <bitfield name="UNUSED_2" pos="21" type="boolean"/>
+ <bitfield name="RBBM_ATB_BUS_OVERFLOW" pos="22" type="boolean"/>
+ <bitfield name="MISC_HANG_DETECT" pos="23" type="boolean"/>
+ <bitfield name="UCHE_OOB_ACCESS" pos="24" type="boolean"/>
+ <bitfield name="UCHE_TRAP_INTR" pos="25" type="boolean"/>
+ <bitfield name="DEBBUS_INTR_0" pos="26" type="boolean"/>
+ <bitfield name="DEBBUS_INTR_1" pos="27" type="boolean"/>
+ <bitfield name="GPMU_VOLTAGE_DROOP" pos="28" type="boolean"/>
+ <bitfield name="GPMU_FIRMWARE" pos="29" type="boolean"/>
+ <bitfield name="ISDB_CPU_IRQ" pos="30" type="boolean"/>
+ <bitfield name="ISDB_UNDER_DEBUG" pos="31" type="boolean"/>
+ </bitset>
+
+ <!-- CP Interrupt bits -->
+ <bitset name="A5XX_CP_INT">
+ <bitfield name="CP_OPCODE_ERROR" pos="0" type="boolean"/>
+ <bitfield name="CP_RESERVED_BIT_ERROR" pos="1" type="boolean"/>
+ <bitfield name="CP_HW_FAULT_ERROR" pos="2" type="boolean"/>
+ <bitfield name="CP_DMA_ERROR" pos="3" type="boolean"/>
+ <bitfield name="CP_REGISTER_PROTECTION_ERROR" pos="4" type="boolean"/>
+ <bitfield name="CP_AHB_ERROR" pos="5" type="boolean"/>
+ </bitset>
+
+ <!-- CP registers -->
+ <reg32 offset="0x0800" name="CP_RB_BASE"/>
+ <reg32 offset="0x0801" name="CP_RB_BASE_HI"/>
+ <reg32 offset="0x0802" name="CP_RB_CNTL"/>
+ <reg32 offset="0x0804" name="CP_RB_RPTR_ADDR"/>
+ <reg32 offset="0x0805" name="CP_RB_RPTR_ADDR_HI"/>
+ <reg32 offset="0x0806" name="CP_RB_RPTR"/>
+ <reg32 offset="0x0807" name="CP_RB_WPTR"/>
+ <reg32 offset="0x0808" name="CP_PFP_STAT_ADDR"/>
+ <reg32 offset="0x0809" name="CP_PFP_STAT_DATA"/>
+ <reg32 offset="0x080b" name="CP_DRAW_STATE_ADDR"/>
+ <reg32 offset="0x080c" name="CP_DRAW_STATE_DATA"/>
+ <reg32 offset="0x080d" name="CP_ME_NRT_ADDR_LO"/>
+ <reg32 offset="0x080e" name="CP_ME_NRT_ADDR_HI"/>
+ <reg32 offset="0x0810" name="CP_ME_NRT_DATA"/>
+ <reg32 offset="0x0817" name="CP_CRASH_SCRIPT_BASE_LO"/>
+ <reg32 offset="0x0818" name="CP_CRASH_SCRIPT_BASE_HI"/>
+ <reg32 offset="0x0819" name="CP_CRASH_DUMP_CNTL"/>
+ <reg32 offset="0x081a" name="CP_ME_STAT_ADDR"/>
+ <reg32 offset="0x081f" name="CP_ROQ_THRESHOLDS_1"/>
+ <reg32 offset="0x0820" name="CP_ROQ_THRESHOLDS_2"/>
+ <reg32 offset="0x0821" name="CP_ROQ_DBG_ADDR"/>
+ <reg32 offset="0x0822" name="CP_ROQ_DBG_DATA"/>
+ <reg32 offset="0x0823" name="CP_MEQ_DBG_ADDR"/>
+ <reg32 offset="0x0824" name="CP_MEQ_DBG_DATA"/>
+ <reg32 offset="0x0825" name="CP_MEQ_THRESHOLDS"/>
+ <reg32 offset="0x0826" name="CP_MERCIU_SIZE"/>
+ <reg32 offset="0x0827" name="CP_MERCIU_DBG_ADDR"/>
+ <reg32 offset="0x0828" name="CP_MERCIU_DBG_DATA_1"/>
+ <reg32 offset="0x0829" name="CP_MERCIU_DBG_DATA_2"/>
+ <reg32 offset="0x082a" name="CP_PFP_UCODE_DBG_ADDR"/>
+ <reg32 offset="0x082b" name="CP_PFP_UCODE_DBG_DATA"/>
+ <reg32 offset="0x082f" name="CP_ME_UCODE_DBG_ADDR"/>
+ <reg32 offset="0x0830" name="CP_ME_UCODE_DBG_DATA"/>
+ <reg32 offset="0x0831" name="CP_CNTL"/>
+ <reg32 offset="0x0832" name="CP_PFP_ME_CNTL"/>
+ <reg32 offset="0x0833" name="CP_CHICKEN_DBG"/>
+ <reg32 offset="0x0835" name="CP_PFP_INSTR_BASE_LO"/>
+ <reg32 offset="0x0836" name="CP_PFP_INSTR_BASE_HI"/>
+ <reg32 offset="0x0838" name="CP_ME_INSTR_BASE_LO"/>
+ <reg32 offset="0x0839" name="CP_ME_INSTR_BASE_HI"/>
+ <reg32 offset="0x083b" name="CP_CONTEXT_SWITCH_CNTL"/>
+ <reg32 offset="0x083c" name="CP_CONTEXT_SWITCH_RESTORE_ADDR_LO"/>
+ <reg32 offset="0x083d" name="CP_CONTEXT_SWITCH_RESTORE_ADDR_HI"/>
+ <reg32 offset="0x083e" name="CP_CONTEXT_SWITCH_SAVE_ADDR_LO"/>
+ <reg32 offset="0x083f" name="CP_CONTEXT_SWITCH_SAVE_ADDR_HI"/>
+ <reg32 offset="0x0840" name="CP_CONTEXT_SWITCH_SMMU_INFO_LO"/>
+ <reg32 offset="0x0841" name="CP_CONTEXT_SWITCH_SMMU_INFO_HI"/>
+ <reg32 offset="0x0860" name="CP_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
+ <reg32 offset="0x0b14" name="CP_ME_STAT_DATA"/>
+ <reg32 offset="0x0b15" name="CP_WFI_PEND_CTR"/>
+ <reg32 offset="0x0b18" name="CP_INTERRUPT_STATUS"/>
+ <reg32 offset="0x0b1a" name="CP_HW_FAULT"/>
+ <reg32 offset="0x0b1c" name="CP_PROTECT_STATUS"/>
+ <reg32 offset="0x0b1f" name="CP_IB1_BASE"/>
+ <reg32 offset="0x0b20" name="CP_IB1_BASE_HI"/>
+ <reg32 offset="0x0b21" name="CP_IB1_BUFSZ"/>
+ <reg32 offset="0x0b22" name="CP_IB2_BASE"/>
+ <reg32 offset="0x0b23" name="CP_IB2_BASE_HI"/>
+ <reg32 offset="0x0b24" name="CP_IB2_BUFSZ"/>
+ <array offset="0x0b78" name="CP_SCRATCH" stride="1" length="8">
+ <reg32 offset="0x0" name="REG" type="uint"/>
+ </array>
+ <array offset="0x0880" name="CP_PROTECT" stride="1" length="32">
+ <reg32 offset="0x0" name="REG" type="adreno_cp_protect"/>
+ </array>
+ <reg32 offset="0x08a0" name="CP_PROTECT_CNTL"/>
+ <reg32 offset="0x0b1b" name="CP_AHB_FAULT"/>
+ <reg32 offset="0x0bb0" name="CP_PERFCTR_CP_SEL_0" type="a5xx_cp_perfcounter_select"/>
+ <reg32 offset="0x0bb1" name="CP_PERFCTR_CP_SEL_1" type="a5xx_cp_perfcounter_select"/>
+ <reg32 offset="0x0bb2" name="CP_PERFCTR_CP_SEL_2" type="a5xx_cp_perfcounter_select"/>
+ <reg32 offset="0x0bb3" name="CP_PERFCTR_CP_SEL_3" type="a5xx_cp_perfcounter_select"/>
+ <reg32 offset="0x0bb4" name="CP_PERFCTR_CP_SEL_4" type="a5xx_cp_perfcounter_select"/>
+ <reg32 offset="0x0bb5" name="CP_PERFCTR_CP_SEL_5" type="a5xx_cp_perfcounter_select"/>
+ <reg32 offset="0x0bb6" name="CP_PERFCTR_CP_SEL_6" type="a5xx_cp_perfcounter_select"/>
+ <reg32 offset="0x0bb7" name="CP_PERFCTR_CP_SEL_7" type="a5xx_cp_perfcounter_select"/>
+ <reg32 offset="0x0bc1" name="VSC_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
+ <reg32 offset="0x0bba" name="CP_POWERCTR_CP_SEL_0"/>
+ <reg32 offset="0x0bbb" name="CP_POWERCTR_CP_SEL_1"/>
+ <reg32 offset="0x0bbc" name="CP_POWERCTR_CP_SEL_2"/>
+ <reg32 offset="0x0bbd" name="CP_POWERCTR_CP_SEL_3"/>
+
+ <!-- RBBM registers -->
+ <reg32 offset="0x0004" name="RBBM_CFG_DBGBUS_SEL_A"/>
+ <reg32 offset="0x0005" name="RBBM_CFG_DBGBUS_SEL_B"/>
+ <reg32 offset="0x0006" name="RBBM_CFG_DBGBUS_SEL_C"/>
+ <reg32 offset="0x0007" name="RBBM_CFG_DBGBUS_SEL_D"/>
+<!--
+#define A5XX_RBBM_CFG_DBGBUS_SEL_PING_INDEX_SHIFT 0x0
+#define A5XX_RBBM_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT 0x8
+#define A5XX_RBBM_CFG_DBGBUS_SEL_PONG_INDEX_SHIFT 0x10
+#define A5XX_RBBM_CFG_DBGBUS_SEL_PONG_BLK_SEL_SHIFT 0x18
+ -->
+ <reg32 offset="0x0008" name="RBBM_CFG_DBGBUS_CNTLT"/>
+ <reg32 offset="0x0009" name="RBBM_CFG_DBGBUS_CNTLM"/>
+ <reg32 offset="0x0018" name="RBBM_CFG_DEBBUS_CTLTM_ENABLE_SHIFT"/>
+ <reg32 offset="0x000a" name="RBBM_CFG_DBGBUS_OPL"/>
+ <reg32 offset="0x000b" name="RBBM_CFG_DBGBUS_OPE"/>
+ <reg32 offset="0x000c" name="RBBM_CFG_DBGBUS_IVTL_0"/>
+ <reg32 offset="0x000d" name="RBBM_CFG_DBGBUS_IVTL_1"/>
+ <reg32 offset="0x000e" name="RBBM_CFG_DBGBUS_IVTL_2"/>
+ <reg32 offset="0x000f" name="RBBM_CFG_DBGBUS_IVTL_3"/>
+ <reg32 offset="0x0010" name="RBBM_CFG_DBGBUS_MASKL_0"/>
+ <reg32 offset="0x0011" name="RBBM_CFG_DBGBUS_MASKL_1"/>
+ <reg32 offset="0x0012" name="RBBM_CFG_DBGBUS_MASKL_2"/>
+ <reg32 offset="0x0013" name="RBBM_CFG_DBGBUS_MASKL_3"/>
+ <reg32 offset="0x0014" name="RBBM_CFG_DBGBUS_BYTEL_0"/>
+ <reg32 offset="0x0015" name="RBBM_CFG_DBGBUS_BYTEL_1"/>
+ <reg32 offset="0x0016" name="RBBM_CFG_DBGBUS_IVTE_0"/>
+ <reg32 offset="0x0017" name="RBBM_CFG_DBGBUS_IVTE_1"/>
+ <reg32 offset="0x0018" name="RBBM_CFG_DBGBUS_IVTE_2"/>
+ <reg32 offset="0x0019" name="RBBM_CFG_DBGBUS_IVTE_3"/>
+ <reg32 offset="0x001a" name="RBBM_CFG_DBGBUS_MASKE_0"/>
+ <reg32 offset="0x001b" name="RBBM_CFG_DBGBUS_MASKE_1"/>
+ <reg32 offset="0x001c" name="RBBM_CFG_DBGBUS_MASKE_2"/>
+ <reg32 offset="0x001d" name="RBBM_CFG_DBGBUS_MASKE_3"/>
+ <reg32 offset="0x001e" name="RBBM_CFG_DBGBUS_NIBBLEE"/>
+ <reg32 offset="0x001f" name="RBBM_CFG_DBGBUS_PTRC0"/>
+ <reg32 offset="0x0020" name="RBBM_CFG_DBGBUS_PTRC1"/>
+ <reg32 offset="0x0021" name="RBBM_CFG_DBGBUS_LOADREG"/>
+ <reg32 offset="0x0022" name="RBBM_CFG_DBGBUS_IDX"/>
+ <reg32 offset="0x0023" name="RBBM_CFG_DBGBUS_CLRC"/>
+ <reg32 offset="0x0024" name="RBBM_CFG_DBGBUS_LOADIVT"/>
+ <reg32 offset="0x002f" name="RBBM_INTERFACE_HANG_INT_CNTL"/>
+ <reg32 offset="0x0037" name="RBBM_INT_CLEAR_CMD"/>
+ <reg32 offset="0x0038" name="RBBM_INT_0_MASK">
+ <bitfield name="RBBM_GPU_IDLE" pos="0" type="boolean"/>
+ <bitfield name="RBBM_AHB_ERROR" pos="1" type="boolean"/>
+ <bitfield name="RBBM_TRANSFER_TIMEOUT" pos="2" type="boolean"/>
+ <bitfield name="RBBM_ME_MS_TIMEOUT" pos="3" type="boolean"/>
+ <bitfield name="RBBM_PFP_MS_TIMEOUT" pos="4" type="boolean"/>
+ <bitfield name="RBBM_ETS_MS_TIMEOUT" pos="5" type="boolean"/>
+ <bitfield name="RBBM_ATB_ASYNC_OVERFLOW" pos="6" type="boolean"/>
+ <bitfield name="RBBM_GPC_ERROR" pos="7" type="boolean"/>
+ <bitfield name="CP_SW" pos="8" type="boolean"/>
+ <bitfield name="CP_HW_ERROR" pos="9" type="boolean"/>
+ <bitfield name="CP_CCU_FLUSH_DEPTH_TS" pos="10" type="boolean"/>
+ <bitfield name="CP_CCU_FLUSH_COLOR_TS" pos="11" type="boolean"/>
+ <bitfield name="CP_CCU_RESOLVE_TS" pos="12" type="boolean"/>
+ <bitfield name="CP_IB2" pos="13" type="boolean"/>
+ <bitfield name="CP_IB1" pos="14" type="boolean"/>
+ <bitfield name="CP_RB" pos="15" type="boolean"/>
+ <bitfield name="CP_RB_DONE_TS" pos="17" type="boolean"/>
+ <bitfield name="CP_WT_DONE_TS" pos="18" type="boolean"/>
+ <bitfield name="CP_CACHE_FLUSH_TS" pos="20" type="boolean"/>
+ <bitfield name="RBBM_ATB_BUS_OVERFLOW" pos="22" type="boolean"/>
+ <bitfield name="MISC_HANG_DETECT" pos="23" type="boolean"/>
+ <bitfield name="UCHE_OOB_ACCESS" pos="24" type="boolean"/>
+ <bitfield name="UCHE_TRAP_INTR" pos="25" type="boolean"/>
+ <bitfield name="DEBBUS_INTR_0" pos="26" type="boolean"/>
+ <bitfield name="DEBBUS_INTR_1" pos="27" type="boolean"/>
+ <bitfield name="GPMU_VOLTAGE_DROOP" pos="28" type="boolean"/>
+ <bitfield name="GPMU_FIRMWARE" pos="29" type="boolean"/>
+ <bitfield name="ISDB_CPU_IRQ" pos="30" type="boolean"/>
+ <bitfield name="ISDB_UNDER_DEBUG" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x003f" name="RBBM_AHB_DBG_CNTL"/>
+ <reg32 offset="0x0041" name="RBBM_EXT_VBIF_DBG_CNTL"/>
+ <reg32 offset="0x0043" name="RBBM_SW_RESET_CMD"/>
+ <reg32 offset="0x0045" name="RBBM_BLOCK_SW_RESET_CMD"/>
+ <reg32 offset="0x0046" name="RBBM_BLOCK_SW_RESET_CMD2"/>
+ <reg32 offset="0x0048" name="RBBM_DBG_LO_HI_GPIO"/>
+ <reg32 offset="0x0049" name="RBBM_EXT_TRACE_BUS_CNTL"/>
+ <reg32 offset="0x004a" name="RBBM_CLOCK_CNTL_TP0"/>
+ <reg32 offset="0x004b" name="RBBM_CLOCK_CNTL_TP1"/>
+ <reg32 offset="0x004c" name="RBBM_CLOCK_CNTL_TP2"/>
+ <reg32 offset="0x004d" name="RBBM_CLOCK_CNTL_TP3"/>
+ <reg32 offset="0x004e" name="RBBM_CLOCK_CNTL2_TP0"/>
+ <reg32 offset="0x004f" name="RBBM_CLOCK_CNTL2_TP1"/>
+ <reg32 offset="0x0050" name="RBBM_CLOCK_CNTL2_TP2"/>
+ <reg32 offset="0x0051" name="RBBM_CLOCK_CNTL2_TP3"/>
+ <reg32 offset="0x0052" name="RBBM_CLOCK_CNTL3_TP0"/>
+ <reg32 offset="0x0053" name="RBBM_CLOCK_CNTL3_TP1"/>
+ <reg32 offset="0x0054" name="RBBM_CLOCK_CNTL3_TP2"/>
+ <reg32 offset="0x0055" name="RBBM_CLOCK_CNTL3_TP3"/>
+ <reg32 offset="0x0059" name="RBBM_READ_AHB_THROUGH_DBG"/>
+ <reg32 offset="0x005a" name="RBBM_CLOCK_CNTL_UCHE"/>
+ <reg32 offset="0x005b" name="RBBM_CLOCK_CNTL2_UCHE"/>
+ <reg32 offset="0x005c" name="RBBM_CLOCK_CNTL3_UCHE"/>
+ <reg32 offset="0x005d" name="RBBM_CLOCK_CNTL4_UCHE"/>
+ <reg32 offset="0x005e" name="RBBM_CLOCK_HYST_UCHE"/>
+ <reg32 offset="0x005f" name="RBBM_CLOCK_DELAY_UCHE"/>
+ <reg32 offset="0x0060" name="RBBM_CLOCK_MODE_GPC"/>
+ <reg32 offset="0x0061" name="RBBM_CLOCK_DELAY_GPC"/>
+ <reg32 offset="0x0062" name="RBBM_CLOCK_HYST_GPC"/>
+ <reg32 offset="0x0063" name="RBBM_CLOCK_CNTL_TSE_RAS_RBBM"/>
+ <reg32 offset="0x0064" name="RBBM_CLOCK_HYST_TSE_RAS_RBBM"/>
+ <reg32 offset="0x0065" name="RBBM_CLOCK_DELAY_TSE_RAS_RBBM"/>
+ <reg32 offset="0x0066" name="RBBM_CLOCK_DELAY_HLSQ"/>
+ <reg32 offset="0x0067" name="RBBM_CLOCK_CNTL"/>
+ <reg32 offset="0x0068" name="RBBM_CLOCK_CNTL_SP0"/>
+ <reg32 offset="0x0069" name="RBBM_CLOCK_CNTL_SP1"/>
+ <reg32 offset="0x006a" name="RBBM_CLOCK_CNTL_SP2"/>
+ <reg32 offset="0x006b" name="RBBM_CLOCK_CNTL_SP3"/>
+ <reg32 offset="0x006c" name="RBBM_CLOCK_CNTL2_SP0"/>
+ <reg32 offset="0x006d" name="RBBM_CLOCK_CNTL2_SP1"/>
+ <reg32 offset="0x006e" name="RBBM_CLOCK_CNTL2_SP2"/>
+ <reg32 offset="0x006f" name="RBBM_CLOCK_CNTL2_SP3"/>
+ <reg32 offset="0x0070" name="RBBM_CLOCK_HYST_SP0"/>
+ <reg32 offset="0x0071" name="RBBM_CLOCK_HYST_SP1"/>
+ <reg32 offset="0x0072" name="RBBM_CLOCK_HYST_SP2"/>
+ <reg32 offset="0x0073" name="RBBM_CLOCK_HYST_SP3"/>
+ <reg32 offset="0x0074" name="RBBM_CLOCK_DELAY_SP0"/>
+ <reg32 offset="0x0075" name="RBBM_CLOCK_DELAY_SP1"/>
+ <reg32 offset="0x0076" name="RBBM_CLOCK_DELAY_SP2"/>
+ <reg32 offset="0x0077" name="RBBM_CLOCK_DELAY_SP3"/>
+ <reg32 offset="0x0078" name="RBBM_CLOCK_CNTL_RB0"/>
+ <reg32 offset="0x0079" name="RBBM_CLOCK_CNTL_RB1"/>
+ <reg32 offset="0x007a" name="RBBM_CLOCK_CNTL_RB2"/>
+ <reg32 offset="0x007b" name="RBBM_CLOCK_CNTL_RB3"/>
+ <reg32 offset="0x007c" name="RBBM_CLOCK_CNTL2_RB0"/>
+ <reg32 offset="0x007d" name="RBBM_CLOCK_CNTL2_RB1"/>
+ <reg32 offset="0x007e" name="RBBM_CLOCK_CNTL2_RB2"/>
+ <reg32 offset="0x007f" name="RBBM_CLOCK_CNTL2_RB3"/>
+ <reg32 offset="0x0080" name="RBBM_CLOCK_HYST_RAC"/>
+ <reg32 offset="0x0081" name="RBBM_CLOCK_DELAY_RAC"/>
+ <reg32 offset="0x0082" name="RBBM_CLOCK_CNTL_CCU0"/>
+ <reg32 offset="0x0083" name="RBBM_CLOCK_CNTL_CCU1"/>
+ <reg32 offset="0x0084" name="RBBM_CLOCK_CNTL_CCU2"/>
+ <reg32 offset="0x0085" name="RBBM_CLOCK_CNTL_CCU3"/>
+ <reg32 offset="0x0086" name="RBBM_CLOCK_HYST_RB_CCU0"/>
+ <reg32 offset="0x0087" name="RBBM_CLOCK_HYST_RB_CCU1"/>
+ <reg32 offset="0x0088" name="RBBM_CLOCK_HYST_RB_CCU2"/>
+ <reg32 offset="0x0089" name="RBBM_CLOCK_HYST_RB_CCU3"/>
+ <reg32 offset="0x008a" name="RBBM_CLOCK_CNTL_RAC"/>
+ <reg32 offset="0x008b" name="RBBM_CLOCK_CNTL2_RAC"/>
+ <reg32 offset="0x008c" name="RBBM_CLOCK_DELAY_RB_CCU_L1_0"/>
+ <reg32 offset="0x008d" name="RBBM_CLOCK_DELAY_RB_CCU_L1_1"/>
+ <reg32 offset="0x008e" name="RBBM_CLOCK_DELAY_RB_CCU_L1_2"/>
+ <reg32 offset="0x008f" name="RBBM_CLOCK_DELAY_RB_CCU_L1_3"/>
+ <reg32 offset="0x0090" name="RBBM_CLOCK_HYST_VFD"/>
+ <reg32 offset="0x0091" name="RBBM_CLOCK_MODE_VFD"/>
+ <reg32 offset="0x0092" name="RBBM_CLOCK_DELAY_VFD"/>
+ <reg32 offset="0x0093" name="RBBM_AHB_CNTL0"/>
+ <reg32 offset="0x0094" name="RBBM_AHB_CNTL1"/>
+ <reg32 offset="0x0095" name="RBBM_AHB_CNTL2"/>
+ <reg32 offset="0x0096" name="RBBM_AHB_CMD"/>
+ <reg32 offset="0x009c" name="RBBM_INTERFACE_HANG_MASK_CNTL11"/>
+ <reg32 offset="0x009d" name="RBBM_INTERFACE_HANG_MASK_CNTL12"/>
+ <reg32 offset="0x009e" name="RBBM_INTERFACE_HANG_MASK_CNTL13"/>
+ <reg32 offset="0x009f" name="RBBM_INTERFACE_HANG_MASK_CNTL14"/>
+ <reg32 offset="0x00a0" name="RBBM_INTERFACE_HANG_MASK_CNTL15"/>
+ <reg32 offset="0x00a1" name="RBBM_INTERFACE_HANG_MASK_CNTL16"/>
+ <reg32 offset="0x00a2" name="RBBM_INTERFACE_HANG_MASK_CNTL17"/>
+ <reg32 offset="0x00a3" name="RBBM_INTERFACE_HANG_MASK_CNTL18"/>
+ <reg32 offset="0x00a4" name="RBBM_CLOCK_DELAY_TP0"/>
+ <reg32 offset="0x00a5" name="RBBM_CLOCK_DELAY_TP1"/>
+ <reg32 offset="0x00a6" name="RBBM_CLOCK_DELAY_TP2"/>
+ <reg32 offset="0x00a7" name="RBBM_CLOCK_DELAY_TP3"/>
+ <reg32 offset="0x00a8" name="RBBM_CLOCK_DELAY2_TP0"/>
+ <reg32 offset="0x00a9" name="RBBM_CLOCK_DELAY2_TP1"/>
+ <reg32 offset="0x00aa" name="RBBM_CLOCK_DELAY2_TP2"/>
+ <reg32 offset="0x00ab" name="RBBM_CLOCK_DELAY2_TP3"/>
+ <reg32 offset="0x00ac" name="RBBM_CLOCK_DELAY3_TP0"/>
+ <reg32 offset="0x00ad" name="RBBM_CLOCK_DELAY3_TP1"/>
+ <reg32 offset="0x00ae" name="RBBM_CLOCK_DELAY3_TP2"/>
+ <reg32 offset="0x00af" name="RBBM_CLOCK_DELAY3_TP3"/>
+ <reg32 offset="0x00b0" name="RBBM_CLOCK_HYST_TP0"/>
+ <reg32 offset="0x00b1" name="RBBM_CLOCK_HYST_TP1"/>
+ <reg32 offset="0x00b2" name="RBBM_CLOCK_HYST_TP2"/>
+ <reg32 offset="0x00b3" name="RBBM_CLOCK_HYST_TP3"/>
+ <reg32 offset="0x00b4" name="RBBM_CLOCK_HYST2_TP0"/>
+ <reg32 offset="0x00b5" name="RBBM_CLOCK_HYST2_TP1"/>
+ <reg32 offset="0x00b6" name="RBBM_CLOCK_HYST2_TP2"/>
+ <reg32 offset="0x00b7" name="RBBM_CLOCK_HYST2_TP3"/>
+ <reg32 offset="0x00b8" name="RBBM_CLOCK_HYST3_TP0"/>
+ <reg32 offset="0x00b9" name="RBBM_CLOCK_HYST3_TP1"/>
+ <reg32 offset="0x00ba" name="RBBM_CLOCK_HYST3_TP2"/>
+ <reg32 offset="0x00bb" name="RBBM_CLOCK_HYST3_TP3"/>
+ <reg32 offset="0x00c8" name="RBBM_CLOCK_CNTL_GPMU"/>
+ <reg32 offset="0x00c9" name="RBBM_CLOCK_DELAY_GPMU"/>
+ <reg32 offset="0x00ca" name="RBBM_CLOCK_HYST_GPMU"/>
+ <reg32 offset="0x03a0" name="RBBM_PERFCTR_CP_0_LO"/>
+ <reg32 offset="0x03a1" name="RBBM_PERFCTR_CP_0_HI"/>
+ <reg32 offset="0x03a2" name="RBBM_PERFCTR_CP_1_LO"/>
+ <reg32 offset="0x03a3" name="RBBM_PERFCTR_CP_1_HI"/>
+ <reg32 offset="0x03a4" name="RBBM_PERFCTR_CP_2_LO"/>
+ <reg32 offset="0x03a5" name="RBBM_PERFCTR_CP_2_HI"/>
+ <reg32 offset="0x03a6" name="RBBM_PERFCTR_CP_3_LO"/>
+ <reg32 offset="0x03a7" name="RBBM_PERFCTR_CP_3_HI"/>
+ <reg32 offset="0x03a8" name="RBBM_PERFCTR_CP_4_LO"/>
+ <reg32 offset="0x03a9" name="RBBM_PERFCTR_CP_4_HI"/>
+ <reg32 offset="0x03aa" name="RBBM_PERFCTR_CP_5_LO"/>
+ <reg32 offset="0x03ab" name="RBBM_PERFCTR_CP_5_HI"/>
+ <reg32 offset="0x03ac" name="RBBM_PERFCTR_CP_6_LO"/>
+ <reg32 offset="0x03ad" name="RBBM_PERFCTR_CP_6_HI"/>
+ <reg32 offset="0x03ae" name="RBBM_PERFCTR_CP_7_LO"/>
+ <reg32 offset="0x03af" name="RBBM_PERFCTR_CP_7_HI"/>
+ <reg32 offset="0x03b0" name="RBBM_PERFCTR_RBBM_0_LO"/>
+ <reg32 offset="0x03b1" name="RBBM_PERFCTR_RBBM_0_HI"/>
+ <reg32 offset="0x03b2" name="RBBM_PERFCTR_RBBM_1_LO"/>
+ <reg32 offset="0x03b3" name="RBBM_PERFCTR_RBBM_1_HI"/>
+ <reg32 offset="0x03b4" name="RBBM_PERFCTR_RBBM_2_LO"/>
+ <reg32 offset="0x03b5" name="RBBM_PERFCTR_RBBM_2_HI"/>
+ <reg32 offset="0x03b6" name="RBBM_PERFCTR_RBBM_3_LO"/>
+ <reg32 offset="0x03b7" name="RBBM_PERFCTR_RBBM_3_HI"/>
+ <reg32 offset="0x03b8" name="RBBM_PERFCTR_PC_0_LO"/>
+ <reg32 offset="0x03b9" name="RBBM_PERFCTR_PC_0_HI"/>
+ <reg32 offset="0x03ba" name="RBBM_PERFCTR_PC_1_LO"/>
+ <reg32 offset="0x03bb" name="RBBM_PERFCTR_PC_1_HI"/>
+ <reg32 offset="0x03bc" name="RBBM_PERFCTR_PC_2_LO"/>
+ <reg32 offset="0x03bd" name="RBBM_PERFCTR_PC_2_HI"/>
+ <reg32 offset="0x03be" name="RBBM_PERFCTR_PC_3_LO"/>
+ <reg32 offset="0x03bf" name="RBBM_PERFCTR_PC_3_HI"/>
+ <reg32 offset="0x03c0" name="RBBM_PERFCTR_PC_4_LO"/>
+ <reg32 offset="0x03c1" name="RBBM_PERFCTR_PC_4_HI"/>
+ <reg32 offset="0x03c2" name="RBBM_PERFCTR_PC_5_LO"/>
+ <reg32 offset="0x03c3" name="RBBM_PERFCTR_PC_5_HI"/>
+ <reg32 offset="0x03c4" name="RBBM_PERFCTR_PC_6_LO"/>
+ <reg32 offset="0x03c5" name="RBBM_PERFCTR_PC_6_HI"/>
+ <reg32 offset="0x03c6" name="RBBM_PERFCTR_PC_7_LO"/>
+ <reg32 offset="0x03c7" name="RBBM_PERFCTR_PC_7_HI"/>
+ <reg32 offset="0x03c8" name="RBBM_PERFCTR_VFD_0_LO"/>
+ <reg32 offset="0x03c9" name="RBBM_PERFCTR_VFD_0_HI"/>
+ <reg32 offset="0x03ca" name="RBBM_PERFCTR_VFD_1_LO"/>
+ <reg32 offset="0x03cb" name="RBBM_PERFCTR_VFD_1_HI"/>
+ <reg32 offset="0x03cc" name="RBBM_PERFCTR_VFD_2_LO"/>
+ <reg32 offset="0x03cd" name="RBBM_PERFCTR_VFD_2_HI"/>
+ <reg32 offset="0x03ce" name="RBBM_PERFCTR_VFD_3_LO"/>
+ <reg32 offset="0x03cf" name="RBBM_PERFCTR_VFD_3_HI"/>
+ <reg32 offset="0x03d0" name="RBBM_PERFCTR_VFD_4_LO"/>
+ <reg32 offset="0x03d1" name="RBBM_PERFCTR_VFD_4_HI"/>
+ <reg32 offset="0x03d2" name="RBBM_PERFCTR_VFD_5_LO"/>
+ <reg32 offset="0x03d3" name="RBBM_PERFCTR_VFD_5_HI"/>
+ <reg32 offset="0x03d4" name="RBBM_PERFCTR_VFD_6_LO"/>
+ <reg32 offset="0x03d5" name="RBBM_PERFCTR_VFD_6_HI"/>
+ <reg32 offset="0x03d6" name="RBBM_PERFCTR_VFD_7_LO"/>
+ <reg32 offset="0x03d7" name="RBBM_PERFCTR_VFD_7_HI"/>
+ <reg32 offset="0x03d8" name="RBBM_PERFCTR_HLSQ_0_LO"/>
+ <reg32 offset="0x03d9" name="RBBM_PERFCTR_HLSQ_0_HI"/>
+ <reg32 offset="0x03da" name="RBBM_PERFCTR_HLSQ_1_LO"/>
+ <reg32 offset="0x03db" name="RBBM_PERFCTR_HLSQ_1_HI"/>
+ <reg32 offset="0x03dc" name="RBBM_PERFCTR_HLSQ_2_LO"/>
+ <reg32 offset="0x03dd" name="RBBM_PERFCTR_HLSQ_2_HI"/>
+ <reg32 offset="0x03de" name="RBBM_PERFCTR_HLSQ_3_LO"/>
+ <reg32 offset="0x03df" name="RBBM_PERFCTR_HLSQ_3_HI"/>
+ <reg32 offset="0x03e0" name="RBBM_PERFCTR_HLSQ_4_LO"/>
+ <reg32 offset="0x03e1" name="RBBM_PERFCTR_HLSQ_4_HI"/>
+ <reg32 offset="0x03e2" name="RBBM_PERFCTR_HLSQ_5_LO"/>
+ <reg32 offset="0x03e3" name="RBBM_PERFCTR_HLSQ_5_HI"/>
+ <reg32 offset="0x03e4" name="RBBM_PERFCTR_HLSQ_6_LO"/>
+ <reg32 offset="0x03e5" name="RBBM_PERFCTR_HLSQ_6_HI"/>
+ <reg32 offset="0x03e6" name="RBBM_PERFCTR_HLSQ_7_LO"/>
+ <reg32 offset="0x03e7" name="RBBM_PERFCTR_HLSQ_7_HI"/>
+ <reg32 offset="0x03e8" name="RBBM_PERFCTR_VPC_0_LO"/>
+ <reg32 offset="0x03e9" name="RBBM_PERFCTR_VPC_0_HI"/>
+ <reg32 offset="0x03ea" name="RBBM_PERFCTR_VPC_1_LO"/>
+ <reg32 offset="0x03eb" name="RBBM_PERFCTR_VPC_1_HI"/>
+ <reg32 offset="0x03ec" name="RBBM_PERFCTR_VPC_2_LO"/>
+ <reg32 offset="0x03ed" name="RBBM_PERFCTR_VPC_2_HI"/>
+ <reg32 offset="0x03ee" name="RBBM_PERFCTR_VPC_3_LO"/>
+ <reg32 offset="0x03ef" name="RBBM_PERFCTR_VPC_3_HI"/>
+ <reg32 offset="0x03f0" name="RBBM_PERFCTR_CCU_0_LO"/>
+ <reg32 offset="0x03f1" name="RBBM_PERFCTR_CCU_0_HI"/>
+ <reg32 offset="0x03f2" name="RBBM_PERFCTR_CCU_1_LO"/>
+ <reg32 offset="0x03f3" name="RBBM_PERFCTR_CCU_1_HI"/>
+ <reg32 offset="0x03f4" name="RBBM_PERFCTR_CCU_2_LO"/>
+ <reg32 offset="0x03f5" name="RBBM_PERFCTR_CCU_2_HI"/>
+ <reg32 offset="0x03f6" name="RBBM_PERFCTR_CCU_3_LO"/>
+ <reg32 offset="0x03f7" name="RBBM_PERFCTR_CCU_3_HI"/>
+ <reg32 offset="0x03f8" name="RBBM_PERFCTR_TSE_0_LO"/>
+ <reg32 offset="0x03f9" name="RBBM_PERFCTR_TSE_0_HI"/>
+ <reg32 offset="0x03fa" name="RBBM_PERFCTR_TSE_1_LO"/>
+ <reg32 offset="0x03fb" name="RBBM_PERFCTR_TSE_1_HI"/>
+ <reg32 offset="0x03fc" name="RBBM_PERFCTR_TSE_2_LO"/>
+ <reg32 offset="0x03fd" name="RBBM_PERFCTR_TSE_2_HI"/>
+ <reg32 offset="0x03fe" name="RBBM_PERFCTR_TSE_3_LO"/>
+ <reg32 offset="0x03ff" name="RBBM_PERFCTR_TSE_3_HI"/>
+ <reg32 offset="0x0400" name="RBBM_PERFCTR_RAS_0_LO"/>
+ <reg32 offset="0x0401" name="RBBM_PERFCTR_RAS_0_HI"/>
+ <reg32 offset="0x0402" name="RBBM_PERFCTR_RAS_1_LO"/>
+ <reg32 offset="0x0403" name="RBBM_PERFCTR_RAS_1_HI"/>
+ <reg32 offset="0x0404" name="RBBM_PERFCTR_RAS_2_LO"/>
+ <reg32 offset="0x0405" name="RBBM_PERFCTR_RAS_2_HI"/>
+ <reg32 offset="0x0406" name="RBBM_PERFCTR_RAS_3_LO"/>
+ <reg32 offset="0x0407" name="RBBM_PERFCTR_RAS_3_HI"/>
+ <reg32 offset="0x0408" name="RBBM_PERFCTR_UCHE_0_LO"/>
+ <reg32 offset="0x0409" name="RBBM_PERFCTR_UCHE_0_HI"/>
+ <reg32 offset="0x040a" name="RBBM_PERFCTR_UCHE_1_LO"/>
+ <reg32 offset="0x040b" name="RBBM_PERFCTR_UCHE_1_HI"/>
+ <reg32 offset="0x040c" name="RBBM_PERFCTR_UCHE_2_LO"/>
+ <reg32 offset="0x040d" name="RBBM_PERFCTR_UCHE_2_HI"/>
+ <reg32 offset="0x040e" name="RBBM_PERFCTR_UCHE_3_LO"/>
+ <reg32 offset="0x040f" name="RBBM_PERFCTR_UCHE_3_HI"/>
+ <reg32 offset="0x0410" name="RBBM_PERFCTR_UCHE_4_LO"/>
+ <reg32 offset="0x0411" name="RBBM_PERFCTR_UCHE_4_HI"/>
+ <reg32 offset="0x0412" name="RBBM_PERFCTR_UCHE_5_LO"/>
+ <reg32 offset="0x0413" name="RBBM_PERFCTR_UCHE_5_HI"/>
+ <reg32 offset="0x0414" name="RBBM_PERFCTR_UCHE_6_LO"/>
+ <reg32 offset="0x0415" name="RBBM_PERFCTR_UCHE_6_HI"/>
+ <reg32 offset="0x0416" name="RBBM_PERFCTR_UCHE_7_LO"/>
+ <reg32 offset="0x0417" name="RBBM_PERFCTR_UCHE_7_HI"/>
+ <reg32 offset="0x0418" name="RBBM_PERFCTR_TP_0_LO"/>
+ <reg32 offset="0x0419" name="RBBM_PERFCTR_TP_0_HI"/>
+ <reg32 offset="0x041a" name="RBBM_PERFCTR_TP_1_LO"/>
+ <reg32 offset="0x041b" name="RBBM_PERFCTR_TP_1_HI"/>
+ <reg32 offset="0x041c" name="RBBM_PERFCTR_TP_2_LO"/>
+ <reg32 offset="0x041d" name="RBBM_PERFCTR_TP_2_HI"/>
+ <reg32 offset="0x041e" name="RBBM_PERFCTR_TP_3_LO"/>
+ <reg32 offset="0x041f" name="RBBM_PERFCTR_TP_3_HI"/>
+ <reg32 offset="0x0420" name="RBBM_PERFCTR_TP_4_LO"/>
+ <reg32 offset="0x0421" name="RBBM_PERFCTR_TP_4_HI"/>
+ <reg32 offset="0x0422" name="RBBM_PERFCTR_TP_5_LO"/>
+ <reg32 offset="0x0423" name="RBBM_PERFCTR_TP_5_HI"/>
+ <reg32 offset="0x0424" name="RBBM_PERFCTR_TP_6_LO"/>
+ <reg32 offset="0x0425" name="RBBM_PERFCTR_TP_6_HI"/>
+ <reg32 offset="0x0426" name="RBBM_PERFCTR_TP_7_LO"/>
+ <reg32 offset="0x0427" name="RBBM_PERFCTR_TP_7_HI"/>
+ <reg32 offset="0x0428" name="RBBM_PERFCTR_SP_0_LO"/>
+ <reg32 offset="0x0429" name="RBBM_PERFCTR_SP_0_HI"/>
+ <reg32 offset="0x042a" name="RBBM_PERFCTR_SP_1_LO"/>
+ <reg32 offset="0x042b" name="RBBM_PERFCTR_SP_1_HI"/>
+ <reg32 offset="0x042c" name="RBBM_PERFCTR_SP_2_LO"/>
+ <reg32 offset="0x042d" name="RBBM_PERFCTR_SP_2_HI"/>
+ <reg32 offset="0x042e" name="RBBM_PERFCTR_SP_3_LO"/>
+ <reg32 offset="0x042f" name="RBBM_PERFCTR_SP_3_HI"/>
+ <reg32 offset="0x0430" name="RBBM_PERFCTR_SP_4_LO"/>
+ <reg32 offset="0x0431" name="RBBM_PERFCTR_SP_4_HI"/>
+ <reg32 offset="0x0432" name="RBBM_PERFCTR_SP_5_LO"/>
+ <reg32 offset="0x0433" name="RBBM_PERFCTR_SP_5_HI"/>
+ <reg32 offset="0x0434" name="RBBM_PERFCTR_SP_6_LO"/>
+ <reg32 offset="0x0435" name="RBBM_PERFCTR_SP_6_HI"/>
+ <reg32 offset="0x0436" name="RBBM_PERFCTR_SP_7_LO"/>
+ <reg32 offset="0x0437" name="RBBM_PERFCTR_SP_7_HI"/>
+ <reg32 offset="0x0438" name="RBBM_PERFCTR_SP_8_LO"/>
+ <reg32 offset="0x0439" name="RBBM_PERFCTR_SP_8_HI"/>
+ <reg32 offset="0x043a" name="RBBM_PERFCTR_SP_9_LO"/>
+ <reg32 offset="0x043b" name="RBBM_PERFCTR_SP_9_HI"/>
+ <reg32 offset="0x043c" name="RBBM_PERFCTR_SP_10_LO"/>
+ <reg32 offset="0x043d" name="RBBM_PERFCTR_SP_10_HI"/>
+ <reg32 offset="0x043e" name="RBBM_PERFCTR_SP_11_LO"/>
+ <reg32 offset="0x043f" name="RBBM_PERFCTR_SP_11_HI"/>
+ <reg32 offset="0x0440" name="RBBM_PERFCTR_RB_0_LO"/>
+ <reg32 offset="0x0441" name="RBBM_PERFCTR_RB_0_HI"/>
+ <reg32 offset="0x0442" name="RBBM_PERFCTR_RB_1_LO"/>
+ <reg32 offset="0x0443" name="RBBM_PERFCTR_RB_1_HI"/>
+ <reg32 offset="0x0444" name="RBBM_PERFCTR_RB_2_LO"/>
+ <reg32 offset="0x0445" name="RBBM_PERFCTR_RB_2_HI"/>
+ <reg32 offset="0x0446" name="RBBM_PERFCTR_RB_3_LO"/>
+ <reg32 offset="0x0447" name="RBBM_PERFCTR_RB_3_HI"/>
+ <reg32 offset="0x0448" name="RBBM_PERFCTR_RB_4_LO"/>
+ <reg32 offset="0x0449" name="RBBM_PERFCTR_RB_4_HI"/>
+ <reg32 offset="0x044a" name="RBBM_PERFCTR_RB_5_LO"/>
+ <reg32 offset="0x044b" name="RBBM_PERFCTR_RB_5_HI"/>
+ <reg32 offset="0x044c" name="RBBM_PERFCTR_RB_6_LO"/>
+ <reg32 offset="0x044d" name="RBBM_PERFCTR_RB_6_HI"/>
+ <reg32 offset="0x044e" name="RBBM_PERFCTR_RB_7_LO"/>
+ <reg32 offset="0x044f" name="RBBM_PERFCTR_RB_7_HI"/>
+ <reg32 offset="0x0450" name="RBBM_PERFCTR_VSC_0_LO"/>
+ <reg32 offset="0x0451" name="RBBM_PERFCTR_VSC_0_HI"/>
+ <reg32 offset="0x0452" name="RBBM_PERFCTR_VSC_1_LO"/>
+ <reg32 offset="0x0453" name="RBBM_PERFCTR_VSC_1_HI"/>
+ <reg32 offset="0x0454" name="RBBM_PERFCTR_LRZ_0_LO"/>
+ <reg32 offset="0x0455" name="RBBM_PERFCTR_LRZ_0_HI"/>
+ <reg32 offset="0x0456" name="RBBM_PERFCTR_LRZ_1_LO"/>
+ <reg32 offset="0x0457" name="RBBM_PERFCTR_LRZ_1_HI"/>
+ <reg32 offset="0x0458" name="RBBM_PERFCTR_LRZ_2_LO"/>
+ <reg32 offset="0x0459" name="RBBM_PERFCTR_LRZ_2_HI"/>
+ <reg32 offset="0x045a" name="RBBM_PERFCTR_LRZ_3_LO"/>
+ <reg32 offset="0x045b" name="RBBM_PERFCTR_LRZ_3_HI"/>
+ <reg32 offset="0x045c" name="RBBM_PERFCTR_CMP_0_LO"/>
+ <reg32 offset="0x045d" name="RBBM_PERFCTR_CMP_0_HI"/>
+ <reg32 offset="0x045e" name="RBBM_PERFCTR_CMP_1_LO"/>
+ <reg32 offset="0x045f" name="RBBM_PERFCTR_CMP_1_HI"/>
+ <reg32 offset="0x0460" name="RBBM_PERFCTR_CMP_2_LO"/>
+ <reg32 offset="0x0461" name="RBBM_PERFCTR_CMP_2_HI"/>
+ <reg32 offset="0x0462" name="RBBM_PERFCTR_CMP_3_LO"/>
+ <reg32 offset="0x0463" name="RBBM_PERFCTR_CMP_3_HI"/>
+ <reg32 offset="0x046b" name="RBBM_PERFCTR_RBBM_SEL_0" type="a5xx_rbbm_perfcounter_select"/>
+ <reg32 offset="0x046c" name="RBBM_PERFCTR_RBBM_SEL_1" type="a5xx_rbbm_perfcounter_select"/>
+ <reg32 offset="0x046d" name="RBBM_PERFCTR_RBBM_SEL_2" type="a5xx_rbbm_perfcounter_select"/>
+ <reg32 offset="0x046e" name="RBBM_PERFCTR_RBBM_SEL_3" type="a5xx_rbbm_perfcounter_select"/>
+ <reg32 offset="0x04d2" name="RBBM_ALWAYSON_COUNTER_LO"/>
+ <reg32 offset="0x04d3" name="RBBM_ALWAYSON_COUNTER_HI"/>
+ <reg32 offset="0x04f5" name="RBBM_STATUS">
+ <bitfield high="31" low="31" name="GPU_BUSY_IGN_AHB" />
+ <bitfield high="30" low="30" name="GPU_BUSY_IGN_AHB_CP" />
+ <bitfield high="29" low="29" name="HLSQ_BUSY" />
+ <bitfield high="28" low="28" name="VSC_BUSY" />
+ <bitfield high="27" low="27" name="TPL1_BUSY" />
+ <bitfield high="26" low="26" name="SP_BUSY" />
+ <bitfield high="25" low="25" name="UCHE_BUSY" />
+ <bitfield high="24" low="24" name="VPC_BUSY" />
+ <bitfield high="23" low="23" name="VFDP_BUSY" />
+ <bitfield high="22" low="22" name="VFD_BUSY" />
+ <bitfield high="21" low="21" name="TESS_BUSY" />
+ <bitfield high="20" low="20" name="PC_VSD_BUSY" />
+ <bitfield high="19" low="19" name="PC_DCALL_BUSY" />
+ <bitfield high="18" low="18" name="GPMU_SLAVE_BUSY" />
+ <bitfield high="17" low="17" name="DCOM_BUSY" />
+ <bitfield high="16" low="16" name="COM_BUSY" />
+ <bitfield high="15" low="15" name="LRZ_BUZY" />
+ <bitfield high="14" low="14" name="A2D_DSP_BUSY" />
+ <bitfield high="13" low="13" name="CCUFCHE_BUSY" />
+ <bitfield high="12" low="12" name="RB_BUSY" />
+ <bitfield high="11" low="11" name="RAS_BUSY" />
+ <bitfield high="10" low="10" name="TSE_BUSY" />
+ <bitfield high="9" low="9" name="VBIF_BUSY" />
+ <bitfield high="8" low="8" name="GPU_BUSY_IGN_AHB_HYST" />
+ <bitfield high="7" low="7" name="CP_BUSY_IGN_HYST" />
+ <bitfield high="6" low="6" name="CP_BUSY" />
+ <bitfield high="5" low="5" name="GPMU_MASTER_BUSY" />
+ <bitfield high="4" low="4" name="CP_CRASH_BUSY" />
+ <bitfield high="3" low="3" name="CP_ETS_BUSY" />
+ <bitfield high="2" low="2" name="CP_PFP_BUSY" />
+ <bitfield high="1" low="1" name="CP_ME_BUSY" />
+ <bitfield high="0" low="0" name="HI_BUSY" />
+ </reg32>
+ <reg32 offset="0x0530" name="RBBM_STATUS3">
+ <bitfield pos="24" name="SMMU_STALLED_ON_FAULT" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x04e1" name="RBBM_INT_0_STATUS"/>
+ <reg32 offset="0x04f0" name="RBBM_AHB_ME_SPLIT_STATUS"/>
+ <reg32 offset="0x04f1" name="RBBM_AHB_PFP_SPLIT_STATUS"/>
+ <reg32 offset="0x04f3" name="RBBM_AHB_ETS_SPLIT_STATUS"/>
+ <reg32 offset="0x04f4" name="RBBM_AHB_ERROR_STATUS"/>
+ <reg32 offset="0x0464" name="RBBM_PERFCTR_CNTL"/>
+ <reg32 offset="0x0465" name="RBBM_PERFCTR_LOAD_CMD0"/>
+ <reg32 offset="0x0466" name="RBBM_PERFCTR_LOAD_CMD1"/>
+ <reg32 offset="0x0467" name="RBBM_PERFCTR_LOAD_CMD2"/>
+ <reg32 offset="0x0468" name="RBBM_PERFCTR_LOAD_CMD3"/>
+ <reg32 offset="0x0469" name="RBBM_PERFCTR_LOAD_VALUE_LO"/>
+ <reg32 offset="0x046a" name="RBBM_PERFCTR_LOAD_VALUE_HI"/>
+ <reg32 offset="0x046f" name="RBBM_PERFCTR_GPU_BUSY_MASKED"/>
+ <reg32 offset="0x04ed" name="RBBM_AHB_ERROR"/>
+ <reg32 offset="0x0504" name="RBBM_CFG_DBGBUS_EVENT_LOGIC"/>
+ <reg32 offset="0x0505" name="RBBM_CFG_DBGBUS_OVER"/>
+ <reg32 offset="0x0506" name="RBBM_CFG_DBGBUS_COUNT0"/>
+ <reg32 offset="0x0507" name="RBBM_CFG_DBGBUS_COUNT1"/>
+ <reg32 offset="0x0508" name="RBBM_CFG_DBGBUS_COUNT2"/>
+ <reg32 offset="0x0509" name="RBBM_CFG_DBGBUS_COUNT3"/>
+ <reg32 offset="0x050a" name="RBBM_CFG_DBGBUS_COUNT4"/>
+ <reg32 offset="0x050b" name="RBBM_CFG_DBGBUS_COUNT5"/>
+ <reg32 offset="0x050c" name="RBBM_CFG_DBGBUS_TRACE_ADDR"/>
+ <reg32 offset="0x050d" name="RBBM_CFG_DBGBUS_TRACE_BUF0"/>
+ <reg32 offset="0x050e" name="RBBM_CFG_DBGBUS_TRACE_BUF1"/>
+ <reg32 offset="0x050f" name="RBBM_CFG_DBGBUS_TRACE_BUF2"/>
+ <reg32 offset="0x0510" name="RBBM_CFG_DBGBUS_TRACE_BUF3"/>
+ <reg32 offset="0x0511" name="RBBM_CFG_DBGBUS_TRACE_BUF4"/>
+ <reg32 offset="0x0512" name="RBBM_CFG_DBGBUS_MISR0"/>
+ <reg32 offset="0x0513" name="RBBM_CFG_DBGBUS_MISR1"/>
+ <reg32 offset="0x0533" name="RBBM_ISDB_CNT"/>
+ <reg32 offset="0xf000" name="RBBM_SECVID_TRUST_CONFIG"/>
+ <reg32 offset="0xf400" name="RBBM_SECVID_TRUST_CNTL"/>
+ <reg32 offset="0xf800" name="RBBM_SECVID_TSB_TRUSTED_BASE_LO"/>
+ <reg32 offset="0xf801" name="RBBM_SECVID_TSB_TRUSTED_BASE_HI"/>
+ <reg32 offset="0xf802" name="RBBM_SECVID_TSB_TRUSTED_SIZE"/>
+ <reg32 offset="0xf803" name="RBBM_SECVID_TSB_CNTL"/>
+ <reg32 offset="0xf804" name="RBBM_SECVID_TSB_COMP_STATUS_LO"/>
+ <reg32 offset="0xf805" name="RBBM_SECVID_TSB_COMP_STATUS_HI"/>
+ <reg32 offset="0xf806" name="RBBM_SECVID_TSB_UCHE_STATUS_LO"/>
+ <reg32 offset="0xf807" name="RBBM_SECVID_TSB_UCHE_STATUS_HI"/>
+ <reg32 offset="0xf810" name="RBBM_SECVID_TSB_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
+
+ <!-- VSC registers -->
+ <reg32 offset="0x0bc2" name="VSC_BIN_SIZE">
+ <bitfield name="WIDTH" low="0" high="7" shr="5" type="uint"/>
+ <bitfield name="HEIGHT" low="9" high="16" shr="5" type="uint"/>
+ <!-- b17 maybe BYPASS like RB_CNTL, but reg not written for bypass -->
+ </reg32>
+ <reg32 offset="0x0bc3" name="VSC_SIZE_ADDRESS_LO"/>
+ <reg32 offset="0x0bc4" name="VSC_SIZE_ADDRESS_HI"/>
+ <reg32 offset="0x0bc5" name="UNKNOWN_0BC5"/> <!-- always 00000000? -->
+ <reg32 offset="0x0bc6" name="UNKNOWN_0BC6"/> <!-- always 00000000? -->
+ <array offset="0x0bd0" name="VSC_PIPE_CONFIG" stride="1" length="16">
+ <reg32 offset="0x0" name="REG">
+ <doc>
+ Configures the mapping between VSC_PIPE buffer and
+ bin, X/Y specify the bin index in the horiz/vert
+ direction (0,0 is upper left, 0,1 is leftmost bin
+ on second row, and so on). W/H specify the number
+ of bins assigned to this VSC_PIPE in the horiz/vert
+ dimension.
+ </doc>
+ <bitfield name="X" low="0" high="9" type="uint"/>
+ <bitfield name="Y" low="10" high="19" type="uint"/>
+ <bitfield name="W" low="20" high="23" type="uint"/>
+ <bitfield name="H" low="24" high="27" type="uint"/>
+ </reg32>
+ </array>
+ <array offset="0x0be0" name="VSC_PIPE_DATA_ADDRESS" stride="2" length="16">
+ <reg32 offset="0x0" name="LO"/>
+ <reg32 offset="0x1" name="HI"/>
+ </array>
+ <array offset="0x0c00" name="VSC_PIPE_DATA_LENGTH" stride="1" length="16">
+ <reg32 offset="0x0" name="REG"/>
+ </array>
+ <reg32 offset="0x0c60" name="VSC_PERFCTR_VSC_SEL_0" type="a5xx_vsc_perfcounter_select"/>
+ <reg32 offset="0x0c61" name="VSC_PERFCTR_VSC_SEL_1" type="a5xx_vsc_perfcounter_select"/>
+
+ <!-- used for some blits?? -->
+ <reg32 offset="0x0cdd" name="VSC_RESOLVE_CNTL" type="adreno_reg_xy"/>
+
+ <!-- GRAS registers -->
+ <reg32 offset="0x0c81" name="GRAS_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
+ <reg32 offset="0x0c90" name="GRAS_PERFCTR_TSE_SEL_0" type="a5xx_tse_perfcounter_select"/>
+ <reg32 offset="0x0c91" name="GRAS_PERFCTR_TSE_SEL_1" type="a5xx_tse_perfcounter_select"/>
+ <reg32 offset="0x0c92" name="GRAS_PERFCTR_TSE_SEL_2" type="a5xx_tse_perfcounter_select"/>
+ <reg32 offset="0x0c93" name="GRAS_PERFCTR_TSE_SEL_3" type="a5xx_tse_perfcounter_select"/>
+ <reg32 offset="0x0c94" name="GRAS_PERFCTR_RAS_SEL_0" type="a5xx_ras_perfcounter_select"/>
+ <reg32 offset="0x0c95" name="GRAS_PERFCTR_RAS_SEL_1" type="a5xx_ras_perfcounter_select"/>
+ <reg32 offset="0x0c96" name="GRAS_PERFCTR_RAS_SEL_2" type="a5xx_ras_perfcounter_select"/>
+ <reg32 offset="0x0c97" name="GRAS_PERFCTR_RAS_SEL_3" type="a5xx_ras_perfcounter_select"/>
+ <reg32 offset="0x0c98" name="GRAS_PERFCTR_LRZ_SEL_0" type="a5xx_lrz_perfcounter_select"/>
+ <reg32 offset="0x0c99" name="GRAS_PERFCTR_LRZ_SEL_1" type="a5xx_lrz_perfcounter_select"/>
+ <reg32 offset="0x0c9a" name="GRAS_PERFCTR_LRZ_SEL_2" type="a5xx_lrz_perfcounter_select"/>
+ <reg32 offset="0x0c9b" name="GRAS_PERFCTR_LRZ_SEL_3" type="a5xx_lrz_perfcounter_select"/>
+
+ <reg32 offset="0x0cc4" name="RB_DBG_ECO_CNTL"/> <!-- always 00100000? -->
+ <reg32 offset="0x0cc5" name="RB_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
+ <reg32 offset="0x0cc6" name="RB_MODE_CNTL"/> <!-- always 00000044? -->
+ <reg32 offset="0x0cc7" name="RB_CCU_CNTL"/> <!-- always b0056080 or 10000000? -->
+ <reg32 offset="0x0cd0" name="RB_PERFCTR_RB_SEL_0" type="a5xx_rb_perfcounter_select"/>
+ <reg32 offset="0x0cd1" name="RB_PERFCTR_RB_SEL_1" type="a5xx_rb_perfcounter_select"/>
+ <reg32 offset="0x0cd2" name="RB_PERFCTR_RB_SEL_2" type="a5xx_rb_perfcounter_select"/>
+ <reg32 offset="0x0cd3" name="RB_PERFCTR_RB_SEL_3" type="a5xx_rb_perfcounter_select"/>
+ <reg32 offset="0x0cd4" name="RB_PERFCTR_RB_SEL_4" type="a5xx_rb_perfcounter_select"/>
+ <reg32 offset="0x0cd5" name="RB_PERFCTR_RB_SEL_5" type="a5xx_rb_perfcounter_select"/>
+ <reg32 offset="0x0cd6" name="RB_PERFCTR_RB_SEL_6" type="a5xx_rb_perfcounter_select"/>
+ <reg32 offset="0x0cd7" name="RB_PERFCTR_RB_SEL_7" type="a5xx_rb_perfcounter_select"/>
+ <reg32 offset="0x0cd8" name="RB_PERFCTR_CCU_SEL_0" type="a5xx_ccu_perfcounter_select"/>
+ <reg32 offset="0x0cd9" name="RB_PERFCTR_CCU_SEL_1" type="a5xx_ccu_perfcounter_select"/>
+ <reg32 offset="0x0cda" name="RB_PERFCTR_CCU_SEL_2" type="a5xx_ccu_perfcounter_select"/>
+ <reg32 offset="0x0cdb" name="RB_PERFCTR_CCU_SEL_3" type="a5xx_ccu_perfcounter_select"/>
+ <reg32 offset="0x0ce0" name="RB_POWERCTR_RB_SEL_0"/>
+ <reg32 offset="0x0ce1" name="RB_POWERCTR_RB_SEL_1"/>
+ <reg32 offset="0x0ce2" name="RB_POWERCTR_RB_SEL_2"/>
+ <reg32 offset="0x0ce3" name="RB_POWERCTR_RB_SEL_3"/>
+ <reg32 offset="0x0ce4" name="RB_POWERCTR_CCU_SEL_0"/>
+ <reg32 offset="0x0ce5" name="RB_POWERCTR_CCU_SEL_1"/>
+ <reg32 offset="0x0cec" name="RB_PERFCTR_CMP_SEL_0" type="a5xx_cmp_perfcounter_select"/>
+ <reg32 offset="0x0ced" name="RB_PERFCTR_CMP_SEL_1" type="a5xx_cmp_perfcounter_select"/>
+ <reg32 offset="0x0cee" name="RB_PERFCTR_CMP_SEL_2" type="a5xx_cmp_perfcounter_select"/>
+ <reg32 offset="0x0cef" name="RB_PERFCTR_CMP_SEL_3" type="a5xx_cmp_perfcounter_select"/>
+
+ <reg32 offset="0x0d00" name="PC_DBG_ECO_CNTL">
+ <bitfield name="TWOPASSUSEWFI" pos="8" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0d01" name="PC_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
+ <reg32 offset="0x0d02" name="PC_MODE_CNTL"/> <!-- always 0000001f? -->
+ <reg32 offset="0x0d04" name="PC_INDEX_BUF_LO"/>
+ <reg32 offset="0x0d05" name="PC_INDEX_BUF_HI"/>
+ <reg32 offset="0x0d06" name="PC_START_INDEX"/>
+ <reg32 offset="0x0d07" name="PC_MAX_INDEX"/>
+ <reg32 offset="0x0d08" name="PC_TESSFACTOR_ADDR_LO"/>
+ <reg32 offset="0x0d09" name="PC_TESSFACTOR_ADDR_HI"/>
+ <reg32 offset="0x0d10" name="PC_PERFCTR_PC_SEL_0" type="a5xx_pc_perfcounter_select"/>
+ <reg32 offset="0x0d11" name="PC_PERFCTR_PC_SEL_1" type="a5xx_pc_perfcounter_select"/>
+ <reg32 offset="0x0d12" name="PC_PERFCTR_PC_SEL_2" type="a5xx_pc_perfcounter_select"/>
+ <reg32 offset="0x0d13" name="PC_PERFCTR_PC_SEL_3" type="a5xx_pc_perfcounter_select"/>
+ <reg32 offset="0x0d14" name="PC_PERFCTR_PC_SEL_4" type="a5xx_pc_perfcounter_select"/>
+ <reg32 offset="0x0d15" name="PC_PERFCTR_PC_SEL_5" type="a5xx_pc_perfcounter_select"/>
+ <reg32 offset="0x0d16" name="PC_PERFCTR_PC_SEL_6" type="a5xx_pc_perfcounter_select"/>
+ <reg32 offset="0x0d17" name="PC_PERFCTR_PC_SEL_7" type="a5xx_pc_perfcounter_select"/>
+
+ <reg32 offset="0x0e00" name="HLSQ_TIMEOUT_THRESHOLD_0"/>
+ <reg32 offset="0x0e01" name="HLSQ_TIMEOUT_THRESHOLD_1"/>
+ <reg32 offset="0x0e04" name="HLSQ_DBG_ECO_CNTL"/>
+ <reg32 offset="0x0e05" name="HLSQ_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
+ <reg32 offset="0x0e06" name="HLSQ_MODE_CNTL"/> <!-- always 00000001? -->
+ <reg32 offset="0x0e10" name="HLSQ_PERFCTR_HLSQ_SEL_0" type="a5xx_hlsq_perfcounter_select"/>
+ <reg32 offset="0x0e11" name="HLSQ_PERFCTR_HLSQ_SEL_1" type="a5xx_hlsq_perfcounter_select"/>
+ <reg32 offset="0x0e12" name="HLSQ_PERFCTR_HLSQ_SEL_2" type="a5xx_hlsq_perfcounter_select"/>
+ <reg32 offset="0x0e13" name="HLSQ_PERFCTR_HLSQ_SEL_3" type="a5xx_hlsq_perfcounter_select"/>
+ <reg32 offset="0x0e14" name="HLSQ_PERFCTR_HLSQ_SEL_4" type="a5xx_hlsq_perfcounter_select"/>
+ <reg32 offset="0x0e15" name="HLSQ_PERFCTR_HLSQ_SEL_5" type="a5xx_hlsq_perfcounter_select"/>
+ <reg32 offset="0x0e16" name="HLSQ_PERFCTR_HLSQ_SEL_6" type="a5xx_hlsq_perfcounter_select"/>
+ <reg32 offset="0x0e17" name="HLSQ_PERFCTR_HLSQ_SEL_7" type="a5xx_hlsq_perfcounter_select"/>
+ <reg32 offset="0x0f08" name="HLSQ_SPTP_RDSEL"/>
+ <reg32 offset="0xbc00" name="HLSQ_DBG_READ_SEL"/>
+ <reg32 offset="0xa000" name="HLSQ_DBG_AHB_READ_APERTURE"/>
+
+ <reg32 offset="0x0e41" name="VFD_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
+ <reg32 offset="0x0e42" name="VFD_MODE_CNTL"/> <!-- always 00000000? -->
+ <reg32 offset="0x0e50" name="VFD_PERFCTR_VFD_SEL_0" type="a5xx_vfd_perfcounter_select"/>
+ <reg32 offset="0x0e51" name="VFD_PERFCTR_VFD_SEL_1" type="a5xx_vfd_perfcounter_select"/>
+ <reg32 offset="0x0e52" name="VFD_PERFCTR_VFD_SEL_2" type="a5xx_vfd_perfcounter_select"/>
+ <reg32 offset="0x0e53" name="VFD_PERFCTR_VFD_SEL_3" type="a5xx_vfd_perfcounter_select"/>
+ <reg32 offset="0x0e54" name="VFD_PERFCTR_VFD_SEL_4" type="a5xx_vfd_perfcounter_select"/>
+ <reg32 offset="0x0e55" name="VFD_PERFCTR_VFD_SEL_5" type="a5xx_vfd_perfcounter_select"/>
+ <reg32 offset="0x0e56" name="VFD_PERFCTR_VFD_SEL_6" type="a5xx_vfd_perfcounter_select"/>
+ <reg32 offset="0x0e57" name="VFD_PERFCTR_VFD_SEL_7" type="a5xx_vfd_perfcounter_select"/>
+ <reg32 offset="0x0e60" name="VPC_DBG_ECO_CNTL">
+ <bitfield name="ALLFLATOPTDIS" pos="10" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0e61" name="VPC_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
+ <reg32 offset="0x0e62" name="VPC_MODE_CNTL">
+ <bitfield name="BINNING_PASS" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0e64" name="VPC_PERFCTR_VPC_SEL_0" type="a5xx_vpc_perfcounter_select"/>
+ <reg32 offset="0x0e65" name="VPC_PERFCTR_VPC_SEL_1" type="a5xx_vpc_perfcounter_select"/>
+ <reg32 offset="0x0e66" name="VPC_PERFCTR_VPC_SEL_2" type="a5xx_vpc_perfcounter_select"/>
+ <reg32 offset="0x0e67" name="VPC_PERFCTR_VPC_SEL_3" type="a5xx_vpc_perfcounter_select"/>
+
+ <reg32 offset="0x0e80" name="UCHE_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
+ <reg32 offset="0x0e81" name="UCHE_MODE_CNTL"/>
+ <reg32 offset="0x0e82" name="UCHE_SVM_CNTL"/>
+ <reg32 offset="0x0e87" name="UCHE_WRITE_THRU_BASE_LO"/>
+ <reg32 offset="0x0e88" name="UCHE_WRITE_THRU_BASE_HI"/>
+ <reg32 offset="0x0e89" name="UCHE_TRAP_BASE_LO"/>
+ <reg32 offset="0x0e8a" name="UCHE_TRAP_BASE_HI"/>
+ <reg32 offset="0x0e8b" name="UCHE_GMEM_RANGE_MIN_LO"/>
+ <reg32 offset="0x0e8c" name="UCHE_GMEM_RANGE_MIN_HI"/>
+ <reg32 offset="0x0e8d" name="UCHE_GMEM_RANGE_MAX_LO"/>
+ <reg32 offset="0x0e8e" name="UCHE_GMEM_RANGE_MAX_HI"/>
+ <reg32 offset="0x0e8f" name="UCHE_DBG_ECO_CNTL_2"/>
+ <reg32 offset="0x0e90" name="UCHE_DBG_ECO_CNTL"/>
+ <reg32 offset="0x0e91" name="UCHE_CACHE_INVALIDATE_MIN_LO"/>
+ <reg32 offset="0x0e92" name="UCHE_CACHE_INVALIDATE_MIN_HI"/>
+ <reg32 offset="0x0e93" name="UCHE_CACHE_INVALIDATE_MAX_LO"/>
+ <reg32 offset="0x0e94" name="UCHE_CACHE_INVALIDATE_MAX_HI"/>
+ <reg32 offset="0x0e95" name="UCHE_CACHE_INVALIDATE"/>
+ <reg32 offset="0x0e96" name="UCHE_CACHE_WAYS"/>
+ <reg32 offset="0x0ea0" name="UCHE_PERFCTR_UCHE_SEL_0" type="a5xx_uche_perfcounter_select"/>
+ <reg32 offset="0x0ea1" name="UCHE_PERFCTR_UCHE_SEL_1" type="a5xx_uche_perfcounter_select"/>
+ <reg32 offset="0x0ea2" name="UCHE_PERFCTR_UCHE_SEL_2" type="a5xx_uche_perfcounter_select"/>
+ <reg32 offset="0x0ea3" name="UCHE_PERFCTR_UCHE_SEL_3" type="a5xx_uche_perfcounter_select"/>
+ <reg32 offset="0x0ea4" name="UCHE_PERFCTR_UCHE_SEL_4" type="a5xx_uche_perfcounter_select"/>
+ <reg32 offset="0x0ea5" name="UCHE_PERFCTR_UCHE_SEL_5" type="a5xx_uche_perfcounter_select"/>
+ <reg32 offset="0x0ea6" name="UCHE_PERFCTR_UCHE_SEL_6" type="a5xx_uche_perfcounter_select"/>
+ <reg32 offset="0x0ea7" name="UCHE_PERFCTR_UCHE_SEL_7" type="a5xx_uche_perfcounter_select"/>
+ <reg32 offset="0x0ea8" name="UCHE_POWERCTR_UCHE_SEL_0"/>
+ <reg32 offset="0x0ea9" name="UCHE_POWERCTR_UCHE_SEL_1"/>
+ <reg32 offset="0x0eaa" name="UCHE_POWERCTR_UCHE_SEL_2"/>
+ <reg32 offset="0x0eab" name="UCHE_POWERCTR_UCHE_SEL_3"/>
+ <reg32 offset="0x0eb1" name="UCHE_TRAP_LOG_LO"/>
+ <reg32 offset="0x0eb2" name="UCHE_TRAP_LOG_HI"/>
+
+ <reg32 offset="0x0ec0" name="SP_DBG_ECO_CNTL"/>
+ <reg32 offset="0x0ec1" name="SP_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
+ <reg32 offset="0x0ec2" name="SP_MODE_CNTL"/> <!-- always 0000001e? -->
+ <reg32 offset="0x0ed0" name="SP_PERFCTR_SP_SEL_0" type="a5xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ed1" name="SP_PERFCTR_SP_SEL_1" type="a5xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ed2" name="SP_PERFCTR_SP_SEL_2" type="a5xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ed3" name="SP_PERFCTR_SP_SEL_3" type="a5xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ed4" name="SP_PERFCTR_SP_SEL_4" type="a5xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ed5" name="SP_PERFCTR_SP_SEL_5" type="a5xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ed6" name="SP_PERFCTR_SP_SEL_6" type="a5xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ed7" name="SP_PERFCTR_SP_SEL_7" type="a5xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ed8" name="SP_PERFCTR_SP_SEL_8" type="a5xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0ed9" name="SP_PERFCTR_SP_SEL_9" type="a5xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0eda" name="SP_PERFCTR_SP_SEL_10" type="a5xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0edb" name="SP_PERFCTR_SP_SEL_11" type="a5xx_sp_perfcounter_select"/>
+ <reg32 offset="0x0edc" name="SP_POWERCTR_SP_SEL_0"/>
+ <reg32 offset="0x0edd" name="SP_POWERCTR_SP_SEL_1"/>
+ <reg32 offset="0x0ede" name="SP_POWERCTR_SP_SEL_2"/>
+ <reg32 offset="0x0edf" name="SP_POWERCTR_SP_SEL_3"/>
+
+ <reg32 offset="0x0f01" name="TPL1_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
+ <reg32 offset="0x0f02" name="TPL1_MODE_CNTL"/> <!-- always 00000544? -->
+ <reg32 offset="0x0f10" name="TPL1_PERFCTR_TP_SEL_0" type="a5xx_tp_perfcounter_select"/>
+ <reg32 offset="0x0f11" name="TPL1_PERFCTR_TP_SEL_1" type="a5xx_tp_perfcounter_select"/>
+ <reg32 offset="0x0f12" name="TPL1_PERFCTR_TP_SEL_2" type="a5xx_tp_perfcounter_select"/>
+ <reg32 offset="0x0f13" name="TPL1_PERFCTR_TP_SEL_3" type="a5xx_tp_perfcounter_select"/>
+ <reg32 offset="0x0f14" name="TPL1_PERFCTR_TP_SEL_4" type="a5xx_tp_perfcounter_select"/>
+ <reg32 offset="0x0f15" name="TPL1_PERFCTR_TP_SEL_5" type="a5xx_tp_perfcounter_select"/>
+ <reg32 offset="0x0f16" name="TPL1_PERFCTR_TP_SEL_6" type="a5xx_tp_perfcounter_select"/>
+ <reg32 offset="0x0f17" name="TPL1_PERFCTR_TP_SEL_7" type="a5xx_tp_perfcounter_select"/>
+ <reg32 offset="0x0f18" name="TPL1_POWERCTR_TP_SEL_0"/>
+ <reg32 offset="0x0f19" name="TPL1_POWERCTR_TP_SEL_1"/>
+ <reg32 offset="0x0f1a" name="TPL1_POWERCTR_TP_SEL_2"/>
+ <reg32 offset="0x0f1b" name="TPL1_POWERCTR_TP_SEL_3"/>
+
+ <reg32 offset="0x3000" name="VBIF_VERSION"/>
+ <reg32 offset="0x3001" name="VBIF_CLKON"/>
+<!--
+#define A5XX_VBIF_CLKON_FORCE_ON_TESTBUS_MASK 0x1
+#define A5XX_VBIF_CLKON_FORCE_ON_TESTBUS_SHIFT 0x1
+ -->
+ <reg32 offset="0x3028" name="VBIF_ABIT_SORT"/>
+ <reg32 offset="0x3029" name="VBIF_ABIT_SORT_CONF"/>
+ <reg32 offset="0x3049" name="VBIF_ROUND_ROBIN_QOS_ARB"/>
+ <reg32 offset="0x302a" name="VBIF_GATE_OFF_WRREQ_EN"/>
+ <reg32 offset="0x302c" name="VBIF_IN_RD_LIM_CONF0"/>
+ <reg32 offset="0x302d" name="VBIF_IN_RD_LIM_CONF1"/>
+ <reg32 offset="0x3080" name="VBIF_XIN_HALT_CTRL0"/>
+<!--
+#define A5XX_VBIF_XIN_HALT_CTRL0_MASK 0xF
+#define A510_VBIF_XIN_HALT_CTRL0_MASK 0x7
+ -->
+ <reg32 offset="0x3081" name="VBIF_XIN_HALT_CTRL1"/>
+ <reg32 offset="0x3084" name="VBIF_TEST_BUS_OUT_CTRL"/>
+<!--
+#define A5XX_VBIF_TEST_BUS_OUT_CTRL_EN_MASK 0x1
+#define A5XX_VBIF_TEST_BUS_OUT_CTRL_EN_SHIFT 0x0
+ -->
+ <reg32 offset="0x3085" name="VBIF_TEST_BUS1_CTRL0"/>
+ <reg32 offset="0x3086" name="VBIF_TEST_BUS1_CTRL1"/>
+<!--
+#define A5XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_MASK 0xF
+#define A5XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_SHIFT 0x0
+ -->
+ <reg32 offset="0x3087" name="VBIF_TEST_BUS2_CTRL0"/>
+ <reg32 offset="0x3088" name="VBIF_TEST_BUS2_CTRL1"/>
+<!--
+#define A5XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_MASK 0xF
+#define A5XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_SHIFT 0x0
+ -->
+ <reg32 offset="0x308c" name="VBIF_TEST_BUS_OUT"/>
+ <reg32 offset="0x30c0" name="VBIF_PERF_CNT_EN0"/>
+ <reg32 offset="0x30c1" name="VBIF_PERF_CNT_EN1"/>
+ <reg32 offset="0x30c2" name="VBIF_PERF_CNT_EN2"/>
+ <reg32 offset="0x30c3" name="VBIF_PERF_CNT_EN3"/>
+ <reg32 offset="0x30c8" name="VBIF_PERF_CNT_CLR0"/>
+ <reg32 offset="0x30c9" name="VBIF_PERF_CNT_CLR1"/>
+ <reg32 offset="0x30ca" name="VBIF_PERF_CNT_CLR2"/>
+ <reg32 offset="0x30cb" name="VBIF_PERF_CNT_CLR3"/>
+ <reg32 offset="0x30d0" name="VBIF_PERF_CNT_SEL0" type="a5xx_vbif_perfcounter_select"/>
+ <reg32 offset="0x30d1" name="VBIF_PERF_CNT_SEL1" type="a5xx_vbif_perfcounter_select"/>
+ <reg32 offset="0x30d2" name="VBIF_PERF_CNT_SEL2" type="a5xx_vbif_perfcounter_select"/>
+ <reg32 offset="0x30d3" name="VBIF_PERF_CNT_SEL3" type="a5xx_vbif_perfcounter_select"/>
+ <reg32 offset="0x30d8" name="VBIF_PERF_CNT_LOW0"/>
+ <reg32 offset="0x30d9" name="VBIF_PERF_CNT_LOW1"/>
+ <reg32 offset="0x30da" name="VBIF_PERF_CNT_LOW2"/>
+ <reg32 offset="0x30db" name="VBIF_PERF_CNT_LOW3"/>
+ <reg32 offset="0x30e0" name="VBIF_PERF_CNT_HIGH0"/>
+ <reg32 offset="0x30e1" name="VBIF_PERF_CNT_HIGH1"/>
+ <reg32 offset="0x30e2" name="VBIF_PERF_CNT_HIGH2"/>
+ <reg32 offset="0x30e3" name="VBIF_PERF_CNT_HIGH3"/>
+ <reg32 offset="0x3100" name="VBIF_PERF_PWR_CNT_EN0"/>
+ <reg32 offset="0x3101" name="VBIF_PERF_PWR_CNT_EN1"/>
+ <reg32 offset="0x3102" name="VBIF_PERF_PWR_CNT_EN2"/>
+ <reg32 offset="0x3110" name="VBIF_PERF_PWR_CNT_LOW0"/>
+ <reg32 offset="0x3111" name="VBIF_PERF_PWR_CNT_LOW1"/>
+ <reg32 offset="0x3112" name="VBIF_PERF_PWR_CNT_LOW2"/>
+ <reg32 offset="0x3118" name="VBIF_PERF_PWR_CNT_HIGH0"/>
+ <reg32 offset="0x3119" name="VBIF_PERF_PWR_CNT_HIGH1"/>
+ <reg32 offset="0x311a" name="VBIF_PERF_PWR_CNT_HIGH2"/>
+
+ <reg32 offset="0x8800" name="GPMU_INST_RAM_BASE"/>
+ <reg32 offset="0x9800" name="GPMU_DATA_RAM_BASE"/>
+
+<!--
+/* COUNTABLE FOR SP PERFCOUNTER */
+#define A5XX_SP_ALU_ACTIVE_CYCLES 0x1
+#define A5XX_SP0_ICL1_MISSES 0x35
+#define A5XX_SP_FS_CFLOW_INSTRUCTIONS 0x27
+
+/* COUNTABLE FOR TSE PERFCOUNTER */
+#define A5XX_TSE_INPUT_PRIM_NUM 0x6
+ -->
+ <reg32 offset="0xa840" name="SP_POWER_COUNTER_0_LO"/>
+ <reg32 offset="0xa841" name="SP_POWER_COUNTER_0_HI"/>
+ <reg32 offset="0xa842" name="SP_POWER_COUNTER_1_LO"/>
+ <reg32 offset="0xa843" name="SP_POWER_COUNTER_1_HI"/>
+ <reg32 offset="0xa844" name="SP_POWER_COUNTER_2_LO"/>
+ <reg32 offset="0xa845" name="SP_POWER_COUNTER_2_HI"/>
+ <reg32 offset="0xa846" name="SP_POWER_COUNTER_3_LO"/>
+ <reg32 offset="0xa847" name="SP_POWER_COUNTER_3_HI"/>
+ <reg32 offset="0xa848" name="TP_POWER_COUNTER_0_LO"/>
+ <reg32 offset="0xa849" name="TP_POWER_COUNTER_0_HI"/>
+ <reg32 offset="0xa84a" name="TP_POWER_COUNTER_1_LO"/>
+ <reg32 offset="0xa84b" name="TP_POWER_COUNTER_1_HI"/>
+ <reg32 offset="0xa84c" name="TP_POWER_COUNTER_2_LO"/>
+ <reg32 offset="0xa84d" name="TP_POWER_COUNTER_2_HI"/>
+ <reg32 offset="0xa84e" name="TP_POWER_COUNTER_3_LO"/>
+ <reg32 offset="0xa84f" name="TP_POWER_COUNTER_3_HI"/>
+ <reg32 offset="0xa850" name="RB_POWER_COUNTER_0_LO"/>
+ <reg32 offset="0xa851" name="RB_POWER_COUNTER_0_HI"/>
+ <reg32 offset="0xa852" name="RB_POWER_COUNTER_1_LO"/>
+ <reg32 offset="0xa853" name="RB_POWER_COUNTER_1_HI"/>
+ <reg32 offset="0xa854" name="RB_POWER_COUNTER_2_LO"/>
+ <reg32 offset="0xa855" name="RB_POWER_COUNTER_2_HI"/>
+ <reg32 offset="0xa856" name="RB_POWER_COUNTER_3_LO"/>
+ <reg32 offset="0xa857" name="RB_POWER_COUNTER_3_HI"/>
+ <reg32 offset="0xa858" name="CCU_POWER_COUNTER_0_LO"/>
+ <reg32 offset="0xa859" name="CCU_POWER_COUNTER_0_HI"/>
+ <reg32 offset="0xa85a" name="CCU_POWER_COUNTER_1_LO"/>
+ <reg32 offset="0xa85b" name="CCU_POWER_COUNTER_1_HI"/>
+ <reg32 offset="0xa85c" name="UCHE_POWER_COUNTER_0_LO"/>
+ <reg32 offset="0xa85d" name="UCHE_POWER_COUNTER_0_HI"/>
+ <reg32 offset="0xa85e" name="UCHE_POWER_COUNTER_1_LO"/>
+ <reg32 offset="0xa85f" name="UCHE_POWER_COUNTER_1_HI"/>
+ <reg32 offset="0xa860" name="UCHE_POWER_COUNTER_2_LO"/>
+ <reg32 offset="0xa861" name="UCHE_POWER_COUNTER_2_HI"/>
+ <reg32 offset="0xa862" name="UCHE_POWER_COUNTER_3_LO"/>
+ <reg32 offset="0xa863" name="UCHE_POWER_COUNTER_3_HI"/>
+ <reg32 offset="0xa864" name="CP_POWER_COUNTER_0_LO"/>
+ <reg32 offset="0xa865" name="CP_POWER_COUNTER_0_HI"/>
+ <reg32 offset="0xa866" name="CP_POWER_COUNTER_1_LO"/>
+ <reg32 offset="0xa867" name="CP_POWER_COUNTER_1_HI"/>
+ <reg32 offset="0xa868" name="CP_POWER_COUNTER_2_LO"/>
+ <reg32 offset="0xa869" name="CP_POWER_COUNTER_2_HI"/>
+ <reg32 offset="0xa86a" name="CP_POWER_COUNTER_3_LO"/>
+ <reg32 offset="0xa86b" name="CP_POWER_COUNTER_3_HI"/>
+ <reg32 offset="0xa86c" name="GPMU_POWER_COUNTER_0_LO"/>
+ <reg32 offset="0xa86d" name="GPMU_POWER_COUNTER_0_HI"/>
+ <reg32 offset="0xa86e" name="GPMU_POWER_COUNTER_1_LO"/>
+ <reg32 offset="0xa86f" name="GPMU_POWER_COUNTER_1_HI"/>
+ <reg32 offset="0xa870" name="GPMU_POWER_COUNTER_2_LO"/>
+ <reg32 offset="0xa871" name="GPMU_POWER_COUNTER_2_HI"/>
+ <reg32 offset="0xa872" name="GPMU_POWER_COUNTER_3_LO"/>
+ <reg32 offset="0xa873" name="GPMU_POWER_COUNTER_3_HI"/>
+ <reg32 offset="0xa874" name="GPMU_POWER_COUNTER_4_LO"/>
+ <reg32 offset="0xa875" name="GPMU_POWER_COUNTER_4_HI"/>
+ <reg32 offset="0xa876" name="GPMU_POWER_COUNTER_5_LO"/>
+ <reg32 offset="0xa877" name="GPMU_POWER_COUNTER_5_HI"/>
+ <reg32 offset="0xa878" name="GPMU_POWER_COUNTER_ENABLE"/>
+ <reg32 offset="0xa879" name="GPMU_ALWAYS_ON_COUNTER_LO"/>
+ <reg32 offset="0xa87a" name="GPMU_ALWAYS_ON_COUNTER_HI"/>
+ <reg32 offset="0xa87b" name="GPMU_ALWAYS_ON_COUNTER_RESET"/>
+ <reg32 offset="0xa87c" name="GPMU_POWER_COUNTER_SELECT_0"/>
+ <reg32 offset="0xa87d" name="GPMU_POWER_COUNTER_SELECT_1"/>
+
+ <reg32 offset="0xa880" name="GPMU_GPMU_SP_CLOCK_CONTROL"/>
+ <reg32 offset="0xa881" name="GPMU_SP_POWER_CNTL"/>
+ <reg32 offset="0xa886" name="GPMU_RBCCU_CLOCK_CNTL"/>
+ <reg32 offset="0xa887" name="GPMU_RBCCU_POWER_CNTL"/>
+ <reg32 offset="0xa88b" name="GPMU_SP_PWR_CLK_STATUS">
+ <bitfield name="PWR_ON" pos="20" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xa88d" name="GPMU_RBCCU_PWR_CLK_STATUS">
+ <bitfield name="PWR_ON" pos="20" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xa891" name="GPMU_PWR_COL_STAGGER_DELAY"/>
+ <reg32 offset="0xa892" name="GPMU_PWR_COL_INTER_FRAME_CTRL"/>
+ <reg32 offset="0xa893" name="GPMU_PWR_COL_INTER_FRAME_HYST"/>
+ <reg32 offset="0xa894" name="GPMU_PWR_COL_BINNING_CTRL"/>
+ <reg32 offset="0xa8a3" name="GPMU_CLOCK_THROTTLE_CTRL"/>
+ <reg32 offset="0xa8a8" name="GPMU_THROTTLE_UNMASK_FORCE_CTRL"/>
+ <reg32 offset="0xa8c1" name="GPMU_WFI_CONFIG"/>
+ <reg32 offset="0xa8d6" name="GPMU_RBBM_INTR_INFO"/>
+ <reg32 offset="0xa8d8" name="GPMU_CM3_SYSRESET"/>
+ <reg32 offset="0xa8e0" name="GPMU_GENERAL_0"/>
+ <reg32 offset="0xa8e1" name="GPMU_GENERAL_1"/>
+ <reg32 offset="0xac00" name="GPMU_TEMP_SENSOR_ID"/>
+ <reg32 offset="0xac01" name="GPMU_TEMP_SENSOR_CONFIG"/>
+ <reg32 offset="0xac02" name="GPMU_TEMP_VAL"/>
+ <reg32 offset="0xac03" name="GPMU_DELTA_TEMP_THRESHOLD"/>
+ <reg32 offset="0xac05" name="GPMU_TEMP_THRESHOLD_INTR_STATUS"/>
+ <reg32 offset="0xac06" name="GPMU_TEMP_THRESHOLD_INTR_EN_MASK"/>
+ <reg32 offset="0xac40" name="GPMU_LEAKAGE_TEMP_COEFF_0_1"/>
+ <reg32 offset="0xac41" name="GPMU_LEAKAGE_TEMP_COEFF_2_3"/>
+ <reg32 offset="0xac42" name="GPMU_LEAKAGE_VTG_COEFF_0_1"/>
+ <reg32 offset="0xac43" name="GPMU_LEAKAGE_VTG_COEFF_2_3"/>
+ <reg32 offset="0xac46" name="GPMU_BASE_LEAKAGE"/>
+ <reg32 offset="0xac60" name="GPMU_GPMU_VOLTAGE"/>
+ <reg32 offset="0xac61" name="GPMU_GPMU_VOLTAGE_INTR_STATUS"/>
+ <reg32 offset="0xac62" name="GPMU_GPMU_VOLTAGE_INTR_EN_MASK"/>
+ <reg32 offset="0xac80" name="GPMU_GPMU_PWR_THRESHOLD"/>
+ <reg32 offset="0xacc4" name="GPMU_GPMU_LLM_GLM_SLEEP_CTRL"/>
+ <reg32 offset="0xacc5" name="GPMU_GPMU_LLM_GLM_SLEEP_STATUS"/>
+ <reg32 offset="0xb80c" name="GDPM_CONFIG1"/>
+ <reg32 offset="0xb80d" name="GDPM_CONFIG2"/>
+ <reg32 offset="0xb80f" name="GDPM_INT_EN"/>
+ <reg32 offset="0xb811" name="GDPM_INT_MASK"/>
+ <reg32 offset="0xb9a0" name="GPMU_BEC_ENABLE"/>
+ <reg32 offset="0xc41a" name="GPU_CS_SENSOR_GENERAL_STATUS"/>
+ <reg32 offset="0xc41d" name="GPU_CS_AMP_CALIBRATION_STATUS1_0"/>
+ <reg32 offset="0xc41f" name="GPU_CS_AMP_CALIBRATION_STATUS1_2"/>
+ <reg32 offset="0xc421" name="GPU_CS_AMP_CALIBRATION_STATUS1_4"/>
+ <reg32 offset="0xc520" name="GPU_CS_ENABLE_REG"/>
+ <reg32 offset="0xc557" name="GPU_CS_AMP_CALIBRATION_CONTROL1"/>
+
+
+ <reg32 offset="0xe000" name="GRAS_CL_CNTL">
+ <bitfield name="ZERO_GB_SCALE_Z" pos="6" type="boolean"/>
+ </reg32>
+ <bitset name="a5xx_gras_xs_cl_cntl" inline="yes">
+ <bitfield name="CLIP_MASK" low="0" high="7"/>
+ <bitfield name="CULL_MASK" low="8" high="15"/>
+ </bitset>
+ <reg32 offset="0xe001" name="GRAS_VS_CL_CNTL" type="a5xx_gras_xs_cl_cntl"/>
+ <reg32 offset="0xe004" name="UNKNOWN_E004"/> <!-- always 00000000? -->
+ <reg32 offset="0xe005" name="GRAS_CNTL">
+ <!-- see also RB_RENDER_CONTROL0 -->
+ <bitfield name="IJ_PERSP_PIXEL" pos="0" type="boolean"/>
+ <bitfield name="IJ_PERSP_CENTROID" pos="1" type="boolean"/>
+ <bitfield name="IJ_PERSP_SAMPLE" pos="2" type="boolean"/>
+ <bitfield name="IJ_LINEAR_PIXEL" pos="3" type="boolean"/>
+ <bitfield name="IJ_LINEAR_CENTROID" pos="4" type="boolean"/>
+ <bitfield name="IJ_LINEAR_SAMPLE" pos="5" type="boolean"/>
+ <bitfield name="COORD_MASK" low="6" high="9" type="hex"/>
+ </reg32>
+ <reg32 offset="0xe006" name="GRAS_CL_GUARDBAND_CLIP_ADJ">
+ <bitfield name="HORZ" low="0" high="9" type="uint"/>
+ <bitfield name="VERT" low="10" high="19" type="uint"/>
+ </reg32>
+ <reg32 offset="0xe010" name="GRAS_CL_VPORT_XOFFSET_0" type="float"/>
+ <reg32 offset="0xe011" name="GRAS_CL_VPORT_XSCALE_0" type="float"/>
+ <reg32 offset="0xe012" name="GRAS_CL_VPORT_YOFFSET_0" type="float"/>
+ <reg32 offset="0xe013" name="GRAS_CL_VPORT_YSCALE_0" type="float"/>
+ <reg32 offset="0xe014" name="GRAS_CL_VPORT_ZOFFSET_0" type="float"/>
+ <reg32 offset="0xe015" name="GRAS_CL_VPORT_ZSCALE_0" type="float"/>
+ <reg32 offset="0xe090" name="GRAS_SU_CNTL">
+ <bitfield name="CULL_FRONT" pos="0" type="boolean"/>
+ <bitfield name="CULL_BACK" pos="1" type="boolean"/>
+ <bitfield name="FRONT_CW" pos="2" type="boolean"/>
+ <bitfield name="LINEHALFWIDTH" low="3" high="10" radix="2" type="fixed"/>
+ <bitfield name="POLY_OFFSET" pos="11" type="boolean"/>
+ <bitfield name="LINE_MODE" pos="13" type="a5xx_line_mode"/>
+ <!-- probably LINEHALFWIDTH is the same as a4xx.. -->
+ </reg32>
+ <reg32 offset="0xe091" name="GRAS_SU_POINT_MINMAX">
+ <bitfield name="MIN" low="0" high="15" type="ufixed" radix="4"/>
+ <bitfield name="MAX" low="16" high="31" type="ufixed" radix="4"/>
+ </reg32>
+ <reg32 offset="0xe092" name="GRAS_SU_POINT_SIZE" type="fixed" radix="4"/>
+ <reg32 offset="0xe093" name="GRAS_SU_LAYERED"/>
+ <reg32 offset="0xe094" name="GRAS_SU_DEPTH_PLANE_CNTL">
+ <bitfield name="FRAG_WRITES_Z" pos="0" type="boolean"/>
+ <bitfield name="UNK1" pos="1" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xe095" name="GRAS_SU_POLY_OFFSET_SCALE" type="float"/>
+ <reg32 offset="0xe096" name="GRAS_SU_POLY_OFFSET_OFFSET" type="float"/>
+ <reg32 offset="0xe097" name="GRAS_SU_POLY_OFFSET_OFFSET_CLAMP" type="float"/>
+ <!-- duplicates RB_DEPTH_INFO0: -->
+ <reg32 offset="0xe098" name="GRAS_SU_DEPTH_BUFFER_INFO">
+ <bitfield name="DEPTH_FORMAT" low="0" high="2" type="a5xx_depth_format"/>
+ </reg32>
+ <reg32 offset="0xe099" name="GRAS_SU_CONSERVATIVE_RAS_CNTL"/> <!-- always 00000000? -->
+ <!--
+ guessing about window/screen/extent, I think they can in the end be
+ used interchangeably?
+ -->
+ <reg32 offset="0xe0a0" name="GRAS_SC_CNTL">
+ <bitfield name="BINNING_PASS" pos="0" type="boolean"/>
+ <bitfield name="SAMPLES_PASSED" pos="15" type="boolean"/>
+ </reg32>
+ <!-- note, 0x4 for binning pass when frag writes z?? -->
+ <reg32 offset="0xe0a1" name="GRAS_SC_BIN_CNTL"/> <!-- always 00000000? -->
+ <reg32 offset="0xe0a2" name="GRAS_SC_RAS_MSAA_CNTL">
+ <bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
+ </reg32>
+ <reg32 offset="0xe0a3" name="GRAS_SC_DEST_MSAA_CNTL">
+ <bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
+ <bitfield name="MSAA_DISABLE" pos="2" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xe0a4" name="GRAS_SC_SCREEN_SCISSOR_CNTL"/> <!-- always 00000000? -->
+ <reg32 offset="0xe0aa" name="GRAS_SC_SCREEN_SCISSOR_TL_0" type="adreno_reg_xy"/>
+ <reg32 offset="0xe0ab" name="GRAS_SC_SCREEN_SCISSOR_BR_0" type="adreno_reg_xy"/>
+ <reg32 offset="0xe0ca" name="GRAS_SC_VIEWPORT_SCISSOR_TL_0" type="adreno_reg_xy"/>
+ <reg32 offset="0xe0cb" name="GRAS_SC_VIEWPORT_SCISSOR_BR_0" type="adreno_reg_xy"/>
+ <reg32 offset="0xe0ea" name="GRAS_SC_WINDOW_SCISSOR_TL" type="adreno_reg_xy"/>
+ <reg32 offset="0xe0eb" name="GRAS_SC_WINDOW_SCISSOR_BR" type="adreno_reg_xy"/>
+
+ <doc>
+ LRZ: (Low Resolution Z ??)
+ ----
+
+ I think it serves two functions, early discard of primitives in binning
+ pass without needing full resolution depth buffer, and also functions as
+ a depth-prepass, used during the GMEM draws to discard primitives that
+ would not be visible due to later draws.
+
+ The LRZ buffer always seems to be z16 format, regardless of actual
+ depth buffer format.
+
+ Note that LRZ write should be disabled when blend/stencil/etc is enabled,
+ since the occluded primitive can still contribute to final color value
+ of a fragment.
+
+ Only enabled for GL_LESS/GL_LEQUAL/GL_GREATER/GL_GEQUAL?
+ </doc>
+ <reg32 offset="0xe100" name="GRAS_LRZ_CNTL">
+ <bitfield name="ENABLE" pos="0" type="boolean"/>
+ <doc>LRZ write also disabled for blend/etc.</doc>
+ <bitfield name="LRZ_WRITE" pos="1" type="boolean"/>
+ <doc>update MAX instead of MIN value, ie. GL_GREATER/GL_GEQUAL</doc>
+ <bitfield name="GREATER" pos="2" type="boolean"/>
+ <!--
+ b3 set sometimes, when depth buffer isn't cleared.. maybe it
+ invalidates the LRZ buffer? (Or just the covered positions?
+ -->
+ </reg32>
+ <reg32 offset="0xe101" name="GRAS_LRZ_BUFFER_BASE_LO"/>
+ <reg32 offset="0xe102" name="GRAS_LRZ_BUFFER_BASE_HI"/>
+ <!--
+ lzr pitch is depth pitch (in pixels) / 8 (aligned to 32)..
+ -->
+ <doc>
+ Pitch is depth width (in pixels) / 8 (aligned to 32). Height
+ is also divided by 8 (ie. covers 8x8 pixels)
+ </doc>
+ <reg32 offset="0xe103" name="GRAS_LRZ_BUFFER_PITCH" shr="5" type="uint"/>
+ <reg32 offset="0xe104" name="GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO"/>
+ <reg32 offset="0xe105" name="GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI"/>
+
+ <reg32 offset="0xe140" name="RB_CNTL">
+ <bitfield name="WIDTH" low="0" high="7" shr="5" type="uint"/>
+ <bitfield name="HEIGHT" low="9" high="16" shr="5" type="uint"/>
+ <bitfield name="BYPASS" pos="17" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xe141" name="RB_RENDER_CNTL">
+<!--
+bit 3 set for normal draws
+bit 7 for RECTLIST (clear) when z32s8 (used for clear of depth32? not set
+ for z32 with no stencil, but maybe in that case separate z/s not used?
+ see mrt-fbo-* zs=2)
+ -->
+ <bitfield name="BINNING_PASS" pos="0" type="boolean"/>
+ <bitfield name="SAMPLES_PASSED" pos="6" type="boolean"/>
+ <bitfield name="DISABLE_COLOR_PIPE" pos="7" type="boolean"/>
+ <!-- why everything twice?? maybe read vs write? -->
+ <!-- UBWC flag buffer enabled for depth/stencil: -->
+ <bitfield name="FLAG_DEPTH" pos="14" type="boolean"/>
+ <bitfield name="FLAG_DEPTH2" pos="15" type="boolean"/>
+ <!-- bitmask of MRTs using UBWC flag buffer: -->
+ <bitfield name="FLAG_MRTS" low="16" high="23"/>
+ <bitfield name="FLAG_MRTS2" low="24" high="31"/>
+ </reg32>
+ <reg32 offset="0xe142" name="RB_RAS_MSAA_CNTL">
+ <bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
+ </reg32>
+ <reg32 offset="0xe143" name="RB_DEST_MSAA_CNTL">
+ <bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
+ <bitfield name="MSAA_DISABLE" pos="2" type="boolean"/>
+ </reg32>
+ <!--
+ note: maybe not actually called RB_RENDER_CONTROLn (since RB_RENDER_CNTL
+ name comes from kernel and is probably right)
+ -->
+ <reg32 offset="0xe144" name="RB_RENDER_CONTROL0">
+ <!-- see also GRAS_CNTL -->
+ <bitfield name="IJ_PERSP_PIXEL" pos="0" type="boolean"/>
+ <bitfield name="IJ_PERSP_CENTROID" pos="1" type="boolean"/>
+ <bitfield name="IJ_PERSP_SAMPLE" pos="2" type="boolean"/>
+ <bitfield name="IJ_LINEAR_PIXEL" pos="3" type="boolean"/>
+ <bitfield name="IJ_LINEAR_CENTROID" pos="4" type="boolean"/>
+ <bitfield name="IJ_LINEAR_SAMPLE" pos="5" type="boolean"/>
+ <bitfield name="COORD_MASK" low="6" high="9" type="hex"/>
+ </reg32>
+ <reg32 offset="0xe145" name="RB_RENDER_CONTROL1">
+ <bitfield name="SAMPLEMASK" pos="0" type="boolean"/>
+ <bitfield name="FACENESS" pos="1" type="boolean"/>
+ <bitfield name="SAMPLEID" pos="2" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xe146" name="RB_FS_OUTPUT_CNTL">
+ <!-- bit0 set except for binning pass.. -->
+ <bitfield name="MRT" low="0" high="3" type="uint"/>
+ <bitfield name="FRAG_WRITES_Z" pos="5" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xe147" name="RB_RENDER_COMPONENTS">
+ <bitfield name="RT0" low="0" high="3"/>
+ <bitfield name="RT1" low="4" high="7"/>
+ <bitfield name="RT2" low="8" high="11"/>
+ <bitfield name="RT3" low="12" high="15"/>
+ <bitfield name="RT4" low="16" high="19"/>
+ <bitfield name="RT5" low="20" high="23"/>
+ <bitfield name="RT6" low="24" high="27"/>
+ <bitfield name="RT7" low="28" high="31"/>
+ </reg32>
+ <array offset="0xe150" name="RB_MRT" stride="7" length="8">
+ <reg32 offset="0x0" name="CONTROL">
+ <bitfield name="BLEND" pos="0" type="boolean"/>
+ <bitfield name="BLEND2" pos="1" type="boolean"/>
+ <bitfield name="ROP_ENABLE" pos="2" type="boolean"/>
+ <bitfield name="ROP_CODE" low="3" high="6" type="a3xx_rop_code"/>
+ <bitfield name="COMPONENT_ENABLE" low="7" high="10" type="hex"/>
+ </reg32>
+ <reg32 offset="0x1" name="BLEND_CONTROL">
+ <bitfield name="RGB_SRC_FACTOR" low="0" high="4" type="adreno_rb_blend_factor"/>
+ <bitfield name="RGB_BLEND_OPCODE" low="5" high="7" type="a3xx_rb_blend_opcode"/>
+ <bitfield name="RGB_DEST_FACTOR" low="8" high="12" type="adreno_rb_blend_factor"/>
+ <bitfield name="ALPHA_SRC_FACTOR" low="16" high="20" type="adreno_rb_blend_factor"/>
+ <bitfield name="ALPHA_BLEND_OPCODE" low="21" high="23" type="a3xx_rb_blend_opcode"/>
+ <bitfield name="ALPHA_DEST_FACTOR" low="24" high="28" type="adreno_rb_blend_factor"/>
+ </reg32>
+ <reg32 offset="0x2" name="BUF_INFO">
+ <!--
+ not sure if there is a separate COLOR_SWAP field like on a3xx/a4xx,
+ or if it is inherent in the format. Will have to play with bits
+ once we get things working and see what happens. If it is a diff
+ field, it doesn't seem to have the same encoding as a3xx/a4xx.
+ -->
+ <bitfield name="COLOR_FORMAT" low="0" high="7" type="a5xx_color_fmt"/>
+ <bitfield name="COLOR_TILE_MODE" low="8" high="9" type="a5xx_tile_mode"/>
+ <bitfield name="DITHER_MODE" low="11" high="12" type="adreno_rb_dither_mode"/>
+ <bitfield name="COLOR_SWAP" low="13" high="14" type="a3xx_color_swap"/>
+ <bitfield name="COLOR_SRGB" pos="15" type="boolean"/>
+ </reg32>
+ <!--
+ at least in gmem, things seem to be aligned to pitch of 64..
+ maybe an artifact of tiled format used in gmem?
+ -->
+ <reg32 offset="0x3" name="PITCH" shr="6" type="uint"/>
+ <reg32 offset="0x4" name="ARRAY_PITCH" shr="6" type="uint"/>
+ <reg32 offset="0x5" name="BASE_LO"/>
+ <reg32 offset="0x6" name="BASE_HI"/>
+ </array>
+ <reg32 offset="0xe1a0" name="RB_BLEND_RED">
+ <bitfield name="UINT" low="0" high="7" type="hex"/>
+ <bitfield name="SINT" low="8" high="15" type="hex"/>
+ <bitfield name="FLOAT" low="16" high="31" type="float"/>
+ </reg32>
+ <reg32 offset="0xe1a1" name="RB_BLEND_RED_F32" type="float"/>
+ <reg32 offset="0xe1a2" name="RB_BLEND_GREEN">
+ <bitfield name="UINT" low="0" high="7" type="hex"/>
+ <bitfield name="SINT" low="8" high="15" type="hex"/>
+ <bitfield name="FLOAT" low="16" high="31" type="float"/>
+ </reg32>
+ <reg32 offset="0xe1a3" name="RB_BLEND_GREEN_F32" type="float"/>
+ <reg32 offset="0xe1a4" name="RB_BLEND_BLUE">
+ <bitfield name="UINT" low="0" high="7" type="hex"/>
+ <bitfield name="SINT" low="8" high="15" type="hex"/>
+ <bitfield name="FLOAT" low="16" high="31" type="float"/>
+ </reg32>
+ <reg32 offset="0xe1a5" name="RB_BLEND_BLUE_F32" type="float"/>
+ <reg32 offset="0xe1a6" name="RB_BLEND_ALPHA">
+ <bitfield name="UINT" low="0" high="7" type="hex"/>
+ <bitfield name="SINT" low="8" high="15" type="hex"/>
+ <bitfield name="FLOAT" low="16" high="31" type="float"/>
+ </reg32>
+ <reg32 offset="0xe1a7" name="RB_BLEND_ALPHA_F32" type="float"/>
+ <reg32 offset="0xe1a8" name="RB_ALPHA_CONTROL">
+ <bitfield name="ALPHA_REF" low="0" high="7" type="hex"/>
+ <bitfield name="ALPHA_TEST" pos="8" type="boolean"/>
+ <bitfield name="ALPHA_TEST_FUNC" low="9" high="11" type="adreno_compare_func"/>
+ </reg32>
+ <reg32 offset="0xe1a9" name="RB_BLEND_CNTL">
+ <!-- per-mrt enable bit -->
+ <bitfield name="ENABLE_BLEND" low="0" high="7"/>
+ <bitfield name="INDEPENDENT_BLEND" pos="8" type="boolean"/>
+ <bitfield name="ALPHA_TO_COVERAGE" pos="10" type="boolean"/>
+ <!-- a guess? -->
+ <bitfield name="SAMPLE_MASK" low="16" high="31"/>
+ </reg32>
+ <reg32 offset="0xe1b0" name="RB_DEPTH_PLANE_CNTL">
+ <bitfield name="FRAG_WRITES_Z" pos="0" type="boolean"/>
+ <bitfield name="UNK1" pos="1" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xe1b1" name="RB_DEPTH_CNTL">
+ <bitfield name="Z_TEST_ENABLE" pos="0" type="boolean"/>
+ <bitfield name="Z_WRITE_ENABLE" pos="1" type="boolean"/>
+ <bitfield name="ZFUNC" low="2" high="4" type="adreno_compare_func"/>
+ <doc>Z_READ_ENABLE bit is set for zfunc other than GL_ALWAYS or GL_NEVER</doc>
+ <bitfield name="Z_READ_ENABLE" pos="6" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xe1b2" name="RB_DEPTH_BUFFER_INFO">
+ <bitfield name="DEPTH_FORMAT" low="0" high="2" type="a5xx_depth_format"/>
+ </reg32>
+ <reg32 offset="0xe1b3" name="RB_DEPTH_BUFFER_BASE_LO"/>
+ <reg32 offset="0xe1b4" name="RB_DEPTH_BUFFER_BASE_HI"/>
+ <reg32 offset="0xe1b5" name="RB_DEPTH_BUFFER_PITCH" shr="6" type="uint">
+ <doc>stride of depth/stencil buffer</doc>
+ </reg32>
+ <reg32 offset="0xe1b6" name="RB_DEPTH_BUFFER_ARRAY_PITCH" shr="6" type="uint">
+ <doc>size of layer</doc>
+ </reg32>
+ <reg32 offset="0xe1c0" name="RB_STENCIL_CONTROL">
+ <bitfield name="STENCIL_ENABLE" pos="0" type="boolean"/>
+ <bitfield name="STENCIL_ENABLE_BF" pos="1" type="boolean"/>
+ <!--
+ set for stencil operations that require read from stencil
+ buffer, but not for example for stencil clear (which does
+ not require read).. so guessing this is analogous to
+ READ_DEST_ENABLE for color buffer..
+ -->
+ <bitfield name="STENCIL_READ" pos="2" type="boolean"/>
+ <bitfield name="FUNC" low="8" high="10" type="adreno_compare_func"/>
+ <bitfield name="FAIL" low="11" high="13" type="adreno_stencil_op"/>
+ <bitfield name="ZPASS" low="14" high="16" type="adreno_stencil_op"/>
+ <bitfield name="ZFAIL" low="17" high="19" type="adreno_stencil_op"/>
+ <bitfield name="FUNC_BF" low="20" high="22" type="adreno_compare_func"/>
+ <bitfield name="FAIL_BF" low="23" high="25" type="adreno_stencil_op"/>
+ <bitfield name="ZPASS_BF" low="26" high="28" type="adreno_stencil_op"/>
+ <bitfield name="ZFAIL_BF" low="29" high="31" type="adreno_stencil_op"/>
+ </reg32>
+ <reg32 offset="0xe1c1" name="RB_STENCIL_INFO">
+ <bitfield name="SEPARATE_STENCIL" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xe1c2" name="RB_STENCIL_BASE_LO"/>
+ <reg32 offset="0xe1c3" name="RB_STENCIL_BASE_HI"/>
+ <reg32 offset="0xe1c4" name="RB_STENCIL_PITCH" shr="6" type="uint"/>
+ <reg32 offset="0xe1c5" name="RB_STENCIL_ARRAY_PITCH" shr="6" type="uint"/>
+ <reg32 offset="0xe1c6" name="RB_STENCILREFMASK" type="adreno_rb_stencilrefmask"/>
+ <reg32 offset="0xe1c7" name="RB_STENCILREFMASK_BF" type="adreno_rb_stencilrefmask"/>
+ <reg32 offset="0xe1d0" name="RB_WINDOW_OFFSET" type="adreno_reg_xy"/>
+ <reg32 offset="0xe1d1" name="RB_SAMPLE_COUNT_CONTROL">
+ <bitfield name="COPY" pos="1" type="boolean"/>
+ </reg32>
+
+ <doc>
+ Blits:
+ ------
+
+ Blits are triggered by CP_EVENT_WRITE:BLIT, compared to previous
+ generations where they shared most of the gl pipeline and were
+ triggered by CP_DRAW_INDX*
+
+ For gmem->mem blob uses RB_BLIT_CNTL.BUF to specify src of
+ blit (ie MRTn, ZS, etc) and RB_BLIT_DST_LO/HI for destination
+ gpuaddr. The gmem offset is taken from RB_MRT[n].BASE_LO/HI
+
+ For mem->gmem blob uses just MRT0 or ZS and RB_BLIT_DST_LO/HI
+ for the GMEM offset, and gpuaddr from RB_MRT[0].BASE_LO/HI
+ (I suppose this is just to avoid trashing RB_MRT[1..7]??)
+ </doc>
+ <reg32 offset="0xe210" name="RB_BLIT_CNTL">
+ <bitfield name="BUF" low="0" high="3" type="a5xx_blit_buf"/>
+ </reg32>
+ <reg32 offset="0xe211" name="RB_RESOLVE_CNTL_1" type="adreno_reg_xy"/>
+ <reg32 offset="0xe212" name="RB_RESOLVE_CNTL_2" type="adreno_reg_xy"/>
+ <reg32 offset="0xe213" name="RB_RESOLVE_CNTL_3">
+ <!-- if b0 set, output is in TILE5_3 format -->
+ <bitfield name="TILED" pos="0" type="boolean"/>
+ <!--
+ 0xe213:
+ 0x0 mem->gmem
+ 0xf gmem->mem with flag buffer (color)
+ 0x4 gmem->mem without flag buffer (color)
+ 0x7 BYPASS mode flag buffer result (ie. on readpix)
+ also for gmem->mem preserving tiling
+ -->
+ </reg32>
+ <reg32 offset="0xe214" name="RB_BLIT_DST_LO"/>
+ <reg32 offset="0xe215" name="RB_BLIT_DST_HI"/>
+ <reg32 offset="0xe216" name="RB_BLIT_DST_PITCH" shr="6" type="uint"/>
+ <!-- array-pitch is size of layer -->
+ <reg32 offset="0xe217" name="RB_BLIT_DST_ARRAY_PITCH" shr="6" type="uint"/>
+ <reg32 offset="0xe218" name="RB_CLEAR_COLOR_DW0"/>
+ <reg32 offset="0xe219" name="RB_CLEAR_COLOR_DW1"/>
+ <reg32 offset="0xe21a" name="RB_CLEAR_COLOR_DW2"/>
+ <reg32 offset="0xe21b" name="RB_CLEAR_COLOR_DW3"/>
+ <reg32 offset="0xe21c" name="RB_CLEAR_CNTL">
+ <bitfield name="FAST_CLEAR" pos="1" type="boolean"/>
+ <bitfield name="MSAA_RESOLVE" pos="2" type="boolean"/>
+ <doc>
+ For MASK, if RB_BLIT_CNTL.BUF=BLIT_ZS:
+ 1 - depth
+ 2 - stencil
+ 3 - depth+stencil
+ if RB_BLIT_CNTL.BUF=BLIT_MRTn
+ then probably a component mask, I always see 0xf
+ </doc>
+ <bitfield name="MASK" low="4" high="7"/>
+ </reg32>
+
+ <doc>
+ Buffer Metadata (flag buffers):
+ -------------------------------
+
+ Blob seems to stick some metadata at the front of the buffer,
+ both z/s and MRT. I think this is same as UBWC (bandwidth
+ compression) metadata that mdp 1.7 and later supports. See
+ 1d3fae5698ce5358caab87a15383b690941697e8 in downstream kernel.
+ UBWC seems to stand for "universal bandwidth compression".
+
+ Before glReadPixels() it does a pair of BYPASS blits (at least
+ if metadata is used) presumably to resolve metadata.
+
+ NOTES: see: getUBwcBlockSize(), getUBwcMetaBufferSize() at
+ https://android.googlesource.com/platform/hardware/qcom/display/+/android-6.0.1_r40/msm8994/libgralloc/alloc_controller.cpp
+ (note that bpp in bytes, not bits, so really cpp)
+
+ Example Layout 2d w/ mipmap levels:
+
+ 100x2000, ifmt=GL_RG, fmt=GL_RG16F, type=GL_FLOAT, meta=64x512@0x8000 (7x500)
+ base=c072e000, offset=16384, size=1703936
+
+ color flags
+ 0 c073a000 c0732000 - level 0 flags is address
+ 1 c0838000 c0834000 programmed in texture state
+ 2 c0879000 c0877000
+ 3 c089a000 c0899000
+ 4 c08ab000 c08aa000
+ 5 c08b4000 c08b3000
+ 6 c08b9000 c08b8000
+ 7 c08bc000 c08bb000
+ 8 c08be000 c08bd000
+ 9 c08c0000 c08bf000
+ 10 c08c2000 c08c1000
+
+ ARRAY_PITCH is the combined size of all the levels plus flags,
+ so 0xc08c3000 - 0xc0732000 = 0x00191000 (1642496); each level
+ takes up a minimum of 2 pages (since color and flags parts are
+ each page aligned.
+
+ { TILE_MODE = TILE5_3 | SWIZ_X = A5XX_TEX_X | SWIZ_Y = A5XX_TEX_Y | SWIZ_Z = A5XX_TEX_ZERO | SWIZ_W = A5XX_TEX_ONE | MIPLVLS = 0 | FMT = TFMT5_16_16_FLOAT | SWAP = WZYX }
+ { WIDTH = 100 | HEIGHT = 2000 }
+ { FETCHSIZE = TFETCH5_4_BYTE | PITCH = 512 | TYPE = A5XX_TEX_2D }
+ { ARRAY_PITCH = 1642496 | 0x18800000 } - NOTE c2dc always has 0x18800000 but
+ { BASE_LO = 0xc0732000 } this varies for blob gles driver..
+ { BASE_HI = 0 | DEPTH = 1 } not sure what it is
+
+
+ </doc>
+ <reg32 offset="0xe240" name="RB_DEPTH_FLAG_BUFFER_BASE_LO"/>
+ <reg32 offset="0xe241" name="RB_DEPTH_FLAG_BUFFER_BASE_HI"/>
+ <reg32 offset="0xe242" name="RB_DEPTH_FLAG_BUFFER_PITCH">
+ </reg32>
+ <array offset="0xe243" name="RB_MRT_FLAG_BUFFER" stride="4" length="8">
+ <reg32 offset="0" name="ADDR_LO"/>
+ <reg32 offset="1" name="ADDR_HI"/>
+ <reg32 offset="2" name="PITCH" shr="6" type="uint"/>
+ <!-- array-pitch is size of layer -->
+ <reg32 offset="3" name="ARRAY_PITCH" shr="6" type="uint"/>
+ </array>
+ <reg32 offset="0xe263" name="RB_BLIT_FLAG_DST_LO"/>
+ <reg32 offset="0xe264" name="RB_BLIT_FLAG_DST_HI"/>
+ <reg32 offset="0xe265" name="RB_BLIT_FLAG_DST_PITCH" shr="6" type="uint"/>
+ <!-- array-pitch is size of layer -->
+ <reg32 offset="0xe266" name="RB_BLIT_FLAG_DST_ARRAY_PITCH" shr="6" type="uint"/>
+
+ <reg32 offset="0xe267" name="RB_SAMPLE_COUNT_ADDR_LO"/>
+ <reg32 offset="0xe268" name="RB_SAMPLE_COUNT_ADDR_HI"/>
+
+ <reg32 offset="0xe280" name="VPC_CNTL_0">
+ <doc>
+ num of varyings plus four for gl_Position (plus one if gl_PointSize)
+ plus # of transform-feedback (streamout) varyings if using the
+ hw streamout (rather than stg instructions in shader)
+ </doc>
+ <bitfield name="STRIDE_IN_VPC" low="0" high="6" type="uint"/>
+ <bitfield name="VARYING" pos="11" type="boolean"/>
+ </reg32>
+ <array offset="0xe282" name="VPC_VARYING_INTERP" stride="1" length="8">
+ <reg32 offset="0x0" name="MODE"/>
+ </array>
+ <array offset="0xe28a" name="VPC_VARYING_PS_REPL" stride="1" length="8">
+ <reg32 offset="0x0" name="MODE"/>
+ </array>
+ <reg32 offset="0xe292" name="UNKNOWN_E292"/>
+ <reg32 offset="0xe293" name="UNKNOWN_E293"/>
+ <array offset="0xe294" name="VPC_VAR" stride="1" length="4">
+ <!-- one bit per varying component: -->
+ <reg32 offset="0" name="DISABLE"/>
+ </array>
+ <reg32 offset="0xe298" name="VPC_GS_SIV_CNTL"/>
+ <reg32 offset="0xe29a" name="VPC_CLIP_CNTL">
+ <bitfield name="CLIP_MASK" low="0" high="7" type="uint"/>
+ <!-- there can be up to 8 total clip/cull distance outputs,
+ but apparenly VPC can only deal with vec4, so when there are
+ more than 4 outputs a second location needs to be programmed
+ -->
+ <bitfield name="CLIP_DIST_03_LOC" low="8" high="15" type="uint"/>
+ <bitfield name="CLIP_DIST_47_LOC" low="16" high="23" type="uint"/>
+ </reg32>
+
+ <reg32 offset="0xe29d" name="VPC_PACK">
+ <bitfield name="NUMNONPOSVAR" low="0" high="7" type="uint"/>
+ <!--
+ This seems to be the OUTLOC for the psize output. It could possibly
+ be the max-OUTLOC position, but it is only set when VS writes psize
+ (and blob always puts psize at highest OUTLOC)
+ -->
+ <bitfield name="PSIZELOC" low="8" high="15" type="uint"/>
+ </reg32>
+ <reg32 offset="0xe2a0" name="VPC_FS_PRIMITIVEID_CNTL"/>
+
+ <doc>
+ Stream-Out:
+ -----------
+
+ VPC_SO[0..3] registers setup details about streamout buffers, and
+ number of components to write to each.
+
+ VPC_SO_PROG provides the mapping between output varyings and the SO
+ buffers. It is written multiple times (via a CP_CONTEXT_REG_BUNCH
+ packet, not sure if that matters), each write can handle up to two
+ components of stream-out output. Order matches up to OUTLOC,
+ including padding. So, if outputting first 3 varyings:
+
+ SP_VS_OUT[0].REG: { A_REGID = r0.w | A_COMPMASK = 0xf | B_REGID = r0.x | B_COMPMASK = 0x7 }
+ SP_VS_OUT[0x1].REG: { A_REGID = r1.w | A_COMPMASK = 0x3 | B_REGID = r2.y | B_COMPMASK = 0xf }
+ SP_VS_VPC_DST[0].REG: { OUTLOC0 = 0 | OUTLOC1 = 4 | OUTLOC2 = 8 | OUTLOC3 = 12 }
+
+ Then:
+
+ VPC_SO_PROG: { A_BUF = 0 | A_OFF = 0 | A_EN | A_BUF = 0 | B_OFF = 4 | B_EN }
+ VPC_SO_PROG: { A_BUF = 0 | A_OFF = 8 | A_EN | A_BUF = 0 | B_OFF = 12 | B_EN }
+ VPC_SO_PROG: { A_BUF = 2 | A_OFF = 0 | A_EN | A_BUF = 2 | B_OFF = 4 | B_EN }
+ VPC_SO_PROG: { A_BUF = 2 | A_OFF = 8 | A_EN | A_BUF = 0 | B_OFF = 0 }
+ VPC_SO_PROG: { A_BUF = 1 | A_OFF = 0 | A_EN | A_BUF = 1 | B_OFF = 4 | B_EN }
+
+ Note that varying order is OUTLOC0, OUTLOC2, OUTLOC1, and note
+ the padding between OUTLOC1 and OUTLOC2.
+
+ The BUF bitfield indicates which of the four streamout buffers
+ to write into at the specified offset.
+
+ The VPC_SO[n].FLUSH_BASE_LO/HI is used for hw to write back next
+ offset which gets loaded back into VPC_SO[n].BUFFER_OFFSET via a
+ CP_MEM_TO_REG. Probably can be ignored until we have GS/etc, at
+ which point we can't calculate the offset on the CPU.
+ </doc>
+ <reg32 offset="0xe2a1" name="VPC_SO_BUF_CNTL">
+ <bitfield name="BUF0" pos="0" type="boolean"/>
+ <bitfield name="BUF1" pos="3" type="boolean"/>
+ <bitfield name="BUF2" pos="6" type="boolean"/>
+ <bitfield name="BUF3" pos="9" type="boolean"/>
+ <bitfield name="ENABLE" pos="15" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xe2a2" name="VPC_SO_OVERRIDE">
+ <bitfield name="SO_DISABLE" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xe2a3" name="VPC_SO_CNTL">
+ <!-- always 0x10000 when SO enabled.. -->
+ <bitfield name="ENABLE" pos="16" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xe2a4" name="VPC_SO_PROG">
+ <bitfield name="A_BUF" low="0" high="1" type="uint"/>
+ <bitfield name="A_OFF" low="2" high="10" shr="2" type="uint"/>
+ <bitfield name="A_EN" pos="11" type="boolean"/>
+ <bitfield name="B_BUF" low="12" high="13" type="uint"/>
+ <bitfield name="B_OFF" low="14" high="22" shr="2" type="uint"/>
+ <bitfield name="B_EN" pos="23" type="boolean"/>
+ </reg32>
+ <array offset="0xe2a7" name="VPC_SO" stride="7" length="4">
+ <reg32 offset="0" name="BUFFER_BASE_LO"/>
+ <reg32 offset="1" name="BUFFER_BASE_HI"/>
+ <reg32 offset="2" name="BUFFER_SIZE"/>
+ <reg32 offset="3" name="NCOMP"/> <!-- component count -->
+ <reg32 offset="4" name="BUFFER_OFFSET"/>
+ <reg32 offset="5" name="FLUSH_BASE_LO"/>
+ <reg32 offset="6" name="FLUSH_BASE_HI"/>
+ </array>
+
+ <reg32 offset="0xe384" name="PC_PRIMITIVE_CNTL">
+ <!-- # of varyings plus four for gl_Position (plus one if gl_PointSize) -->
+ <bitfield name="STRIDE_IN_VPC" low="0" high="6" type="uint"/>
+ <bitfield name="PRIMITIVE_RESTART" pos="8" type="boolean"/>
+ <bitfield name="COUNT_PRIMITIVES" pos="9" type="boolean"/><!-- enabled when gl_PrimitiveIDIn is used -->
+ <bitfield name="PROVOKING_VTX_LAST" pos="10" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xe385" name="PC_PRIM_VTX_CNTL">
+ <bitfield name="PSIZE" pos="11" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xe388" name="PC_RASTER_CNTL">
+ <bitfield name="POLYMODE_FRONT_PTYPE" low="0" high="2" type="adreno_pa_su_sc_draw"/>
+ <bitfield name="POLYMODE_BACK_PTYPE" low="3" high="5" type="adreno_pa_su_sc_draw"/>
+ <bitfield name="POLYMODE_ENABLE" pos="6" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xe389" name="PC_CLIP_CNTL">
+ <bitfield name="CLIP_MASK" low="0" high="7"/>
+ </reg32>
+ <reg32 offset="0xe38c" name="PC_RESTART_INDEX"/>
+ <reg32 offset="0xe38d" name="PC_GS_LAYERED"/>
+ <reg32 offset="0xe38e" name="PC_GS_PARAM">
+ <bitfield name="MAX_VERTICES" low="0" high="9" type="uint"/><!-- vertices - 1 -->
+ <bitfield name="INVOCATIONS" low="11" high="15" type="uint"/><!-- invoc - 1 -->
+ <bitfield name="PRIMTYPE" low="23" high="24" type="adreno_pa_su_sc_draw"/>
+ </reg32>
+ <reg32 offset="0xe38f" name="PC_HS_PARAM">
+ <bitfield name="VERTICES_OUT" low="0" high="5" type="uint"/>
+ <bitfield name="SPACING" low="21" high="22" type="a4xx_tess_spacing"/>
+ <bitfield name="CW" pos="23" type="boolean"/>
+ <bitfield name="CONNECTED" pos="24" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xe3b0" name="PC_POWER_CNTL"/>
+
+ <reg32 offset="0xe400" name="VFD_CONTROL_0">
+ <bitfield name="VTXCNT" low="0" high="5" type="uint"/>
+ </reg32>
+ <reg32 offset="0xe401" name="VFD_CONTROL_1">
+ <bitfield name="REGID4VTX" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="REGID4INST" low="8" high="15" type="a3xx_regid"/>
+ <bitfield name="REGID4PRIMID" low="16" high="23" type="a3xx_regid"/>
+ </reg32>
+ <reg32 offset="0xe402" name="VFD_CONTROL_2">
+ <bitfield name="REGID_PATCHID" low="0" high="7" type="a3xx_regid"/><!-- same as VFD_CONTROL_3.REGID_PATCHID? -->
+ </reg32>
+ <reg32 offset="0xe403" name="VFD_CONTROL_3">
+ <bitfield name="REGID_PATCHID" low="8" high="15" type="a3xx_regid"/>
+ <bitfield name="REGID_TESSX" low="16" high="23" type="a3xx_regid"/>
+ <bitfield name="REGID_TESSY" low="24" high="31" type="a3xx_regid"/>
+ </reg32>
+ <reg32 offset="0xe404" name="VFD_CONTROL_4">
+ </reg32>
+ <reg32 offset="0xe405" name="VFD_CONTROL_5">
+ <!-- b0 set if gl_PrimitiveID used in fs ?? -->
+ </reg32>
+ <reg32 offset="0xe408" name="VFD_INDEX_OFFSET"/>
+ <reg32 offset="0xe409" name="VFD_INSTANCE_START_OFFSET"/>
+ <array offset="0xe40a" name="VFD_FETCH" stride="4" length="32">
+ <reg32 offset="0x0" name="BASE_LO"/>
+ <reg32 offset="0x1" name="BASE_HI"/>
+ <reg32 offset="0x2" name="SIZE" type="uint"/>
+ <reg32 offset="0x3" name="STRIDE" type="uint"/>
+ </array>
+ <array offset="0xe48a" name="VFD_DECODE" stride="2" length="32">
+ <reg32 offset="0x0" name="INSTR">
+ <!-- IDX appears to index into VFD_FETCH[] -->
+ <bitfield name="IDX" low="0" high="4" type="uint"/>
+ <bitfield name="INSTANCED" pos="17" type="boolean"/>
+ <bitfield name="FORMAT" low="20" high="27" type="a5xx_vtx_fmt"/>
+ <bitfield name="SWAP" low="28" high="29" type="a3xx_color_swap"/>
+ <bitfield name="UNK30" pos="30" type="boolean"/>
+ <bitfield name="FLOAT" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x1" name="STEP_RATE"/> <!-- ??? -->
+ </array>
+ <array offset="0xe4ca" name="VFD_DEST_CNTL" stride="1" length="32">
+ <reg32 offset="0x0" name="INSTR">
+ <bitfield name="WRITEMASK" low="0" high="3" type="hex"/>
+ <bitfield name="REGID" low="4" high="11" type="a3xx_regid"/>
+ </reg32>
+ </array>
+ <reg32 offset="0xe4f0" name="VFD_POWER_CNTL"/>
+
+ <!-- 0x0 for compute, 0x10 for 3d? -->
+ <reg32 offset="0xe580" name="SP_SP_CNTL"/>
+
+ <bitset name="a5xx_xs_config" inline="yes">
+ <bitfield name="ENABLED" pos="0" type="boolean"/>
+ <bitfield name="CONSTOBJECTOFFSET" low="1" high="7" type="uint"/>
+ <bitfield name="SHADEROBJOFFSET" low="8" high="14" type="uint"/>
+ </bitset>
+ <bitset name="a5xx_xs_cntl" inline="yes">
+ <bitfield name="SSBO_ENABLE" pos="0" type="boolean"/>
+ <!--
+ no idea high bit.. could be this is amount of on-chip memory used
+ rather than total size?
+ -->
+ <bitfield name="INSTRLEN" low="1" high="31" type="uint"/>
+ </bitset>
+ <bitset name="a5xx_sp_xs_ctrl_reg0" inline="yes">
+ <!-- bit1 almost always set -->
+ <!-- set for "buffer mode" (ie. shader small enough to fit internally) -->
+ <bitfield name="BUFFER" pos="2" type="boolean"/>
+ <!-- 24 or more (full size) GPRS and blob uses TWO_QUADS instead of FOUR_QUADS -->
+ <bitfield name="THREADSIZE" pos="3" type="a3xx_threadsize"/>
+ <bitfield name="HALFREGFOOTPRINT" low="4" high="9" type="uint"/>
+ <bitfield name="FULLREGFOOTPRINT" low="10" high="15" type="uint"/>
+ <bitfield name="VARYING" pos="16" type="boolean"/>
+ <bitfield name="PIXLODENABLE" pos="20" type="boolean"/>
+ <!-- seems to be nesting level for flow control:.. -->
+ <bitfield name="BRANCHSTACK" low="25" high="31" type="uint"/>
+ </bitset>
+ <!-- assuming things appear in same relative order as a4xx: -->
+ <!-- duplicated exactly w/ corresponding HLSQ_ regs starting at 0xe78b.. -->
+ <reg32 offset="0xe584" name="SP_VS_CONFIG" type="a5xx_xs_config"/>
+ <reg32 offset="0xe585" name="SP_FS_CONFIG" type="a5xx_xs_config"/>
+ <reg32 offset="0xe586" name="SP_HS_CONFIG" type="a5xx_xs_config"/>
+ <reg32 offset="0xe587" name="SP_DS_CONFIG" type="a5xx_xs_config"/>
+ <reg32 offset="0xe588" name="SP_GS_CONFIG" type="a5xx_xs_config"/>
+ <reg32 offset="0xe589" name="SP_CS_CONFIG" type="a5xx_xs_config"/>
+ <reg32 offset="0xe58a" name="SP_VS_CONFIG_MAX_CONST"/>
+ <reg32 offset="0xe58b" name="SP_FS_CONFIG_MAX_CONST"/>
+ <reg32 offset="0xe590" name="SP_VS_CTRL_REG0" type="a5xx_sp_xs_ctrl_reg0"/>
+ <reg32 offset="0xe592" name="SP_PRIMITIVE_CNTL">
+ <!-- # of VS outputs including pos/psize -->
+ <bitfield name="VSOUT" low="0" high="4" type="uint"/>
+ </reg32>
+ <array offset="0xe593" name="SP_VS_OUT" stride="1" length="16">
+ <reg32 offset="0x0" name="REG">
+ <bitfield name="A_REGID" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="A_COMPMASK" low="8" high="11" type="hex"/>
+ <bitfield name="B_REGID" low="16" high="23" type="a3xx_regid"/>
+ <bitfield name="B_COMPMASK" low="24" high="27" type="hex"/>
+ </reg32>
+ </array>
+ <!--
+ Starting with a5xx, position/psize outputs from shader end up in the
+ SP_VS_OUT map, with highest OUTLOCn position. (Generally they are
+ the last entries too, except when gl_PointCoord is used, blob inserts
+ an extra varying after, but with a lower OUTLOC position. If present,
+ psize is last, preceded by position.
+ -->
+ <array offset="0xe5a3" name="SP_VS_VPC_DST" stride="1" length="8">
+ <reg32 offset="0x0" name="REG">
+ <bitfield name="OUTLOC0" low="0" high="7" type="uint"/>
+ <bitfield name="OUTLOC1" low="8" high="15" type="uint"/>
+ <bitfield name="OUTLOC2" low="16" high="23" type="uint"/>
+ <bitfield name="OUTLOC3" low="24" high="31" type="uint"/>
+ </reg32>
+ </array>
+ <reg32 offset="0xe5ab" name="UNKNOWN_E5AB"/>
+ <reg32 offset="0xe5ac" name="SP_VS_OBJ_START_LO"/>
+ <reg32 offset="0xe5ad" name="SP_VS_OBJ_START_HI"/>
+
+ <bitset name="a5xx_sp_xs_pvt_mem_param" inline="yes">
+ <bitfield name="MEMSIZEPERITEM" low="0" high="7" shr="9">
+ <doc>The size of memory that ldp/stp can address.</doc>
+ </bitfield>
+ <bitfield name="HWSTACKOFFSET" low="8" high="23" shr="11" type="uint"/>
+ <bitfield name="HWSTACKSIZEPERTHREAD" low="24" high="31">
+ <doc>Guessing that this is the same as a3xx/a6xx.</doc>
+ </bitfield>
+ </bitset>
+
+ <bitset name="a5xx_sp_xs_pvt_mem_size" inline="yes">
+ <bitfield name="TOTALPVTMEMSIZE" low="0" high="17" shr="12"/>
+ </bitset>
+
+ <reg32 offset="0xe5ae" name="SP_VS_PVT_MEM_PARAM" type="a5xx_sp_xs_pvt_mem_param"/>
+ <reg64 offset="0xe5af" name="SP_VS_PVT_MEM_ADDR" type="waddress" align="32"/>
+ <reg32 offset="0xe5b1" name="SP_VS_PVT_MEM_SIZE" type="a5xx_sp_xs_pvt_mem_size"/>
+ <reg32 offset="0xe5c0" name="SP_FS_CTRL_REG0" type="a5xx_sp_xs_ctrl_reg0"/>
+ <reg32 offset="0xe5c2" name="UNKNOWN_E5C2"/>
+ <reg32 offset="0xe5c3" name="SP_FS_OBJ_START_LO"/>
+ <reg32 offset="0xe5c4" name="SP_FS_OBJ_START_HI"/>
+ <reg32 offset="0xe5c5" name="SP_FS_PVT_MEM_PARAM" type="a5xx_sp_xs_pvt_mem_param"/>
+ <reg64 offset="0xe5c6" name="SP_FS_PVT_MEM_ADDR" type="waddress" align="32"/>
+ <reg32 offset="0xe5c8" name="SP_FS_PVT_MEM_SIZE" type="a5xx_sp_xs_pvt_mem_size"/>
+ <reg32 offset="0xe5c9" name="SP_BLEND_CNTL">
+ <!-- per-mrt enable bit -->
+ <bitfield name="ENABLE_BLEND" low="0" high="7"/>
+ <bitfield name="UNK8" pos="8" type="boolean"/>
+ <bitfield name="ALPHA_TO_COVERAGE" pos="10" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xe5ca" name="SP_FS_OUTPUT_CNTL">
+ <bitfield name="MRT" low="0" high="3" type="uint"/>
+ <bitfield name="DEPTH_REGID" low="5" high="12" type="a3xx_regid"/>
+ <bitfield name="SAMPLEMASK_REGID" low="13" high="20" type="a3xx_regid"/>
+ </reg32>
+ <array offset="0xe5cb" name="SP_FS_OUTPUT" stride="1" length="8">
+ <doc>per MRT</doc>
+ <reg32 offset="0x0" name="REG">
+ <bitfield name="REGID" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="HALF_PRECISION" pos="8" type="boolean"/>
+ </reg32>
+ </array>
+ <array offset="0xe5d3" name="SP_FS_MRT" stride="1" length="8">
+ <reg32 offset="0" name="REG">
+ <bitfield name="COLOR_FORMAT" low="0" high="7" type="a5xx_color_fmt"/>
+ <bitfield name="COLOR_SINT" pos="8" type="boolean"/>
+ <bitfield name="COLOR_UINT" pos="9" type="boolean"/>
+ <bitfield name="COLOR_SRGB" pos="10" type="boolean"/>
+ </reg32>
+ </array>
+ <!--
+ e5db/e5dc seems to look related to some optimization to do sample from
+ texture using varying value directly before shader thread starts? I
+ guess that could optimize common simple frag shaders..
+ -->
+ <reg32 offset="0xe5db" name="UNKNOWN_E5DB"/>
+ <reg32 offset="0xe5f0" name="SP_CS_CTRL_REG0" type="a5xx_sp_xs_ctrl_reg0"/>
+ <reg32 offset="0xe5f2" name="UNKNOWN_E5F2"/>
+ <reg32 offset="0xe5f3" name="SP_CS_OBJ_START_LO"/>
+ <reg32 offset="0xe5f4" name="SP_CS_OBJ_START_HI"/>
+ <reg32 offset="0xe5f5" name="SP_CS_PVT_MEM_PARAM" type="a5xx_sp_xs_pvt_mem_param"/>
+ <reg64 offset="0xe5f6" name="SP_CS_PVT_MEM_ADDR" type="waddress" align="32"/>
+ <reg32 offset="0xe5f8" name="SP_CS_PVT_MEM_SIZE" type="a5xx_sp_xs_pvt_mem_size"/>
+
+ <!-- e5f9 something compute related.. seems to change when HLSQ_CS_CNTL_1 changes -->
+
+ <reg32 offset="0xe600" name="SP_HS_CTRL_REG0" type="a5xx_sp_xs_ctrl_reg0"/>
+ <reg32 offset="0xe602" name="UNKNOWN_E602"/>
+ <reg32 offset="0xe603" name="SP_HS_OBJ_START_LO"/>
+ <reg32 offset="0xe604" name="SP_HS_OBJ_START_HI"/>
+ <reg32 offset="0xe605" name="SP_HS_PVT_MEM_PARAM" type="a5xx_sp_xs_pvt_mem_param"/>
+ <reg64 offset="0xe606" name="SP_HS_PVT_MEM_ADDR" type="waddress" align="32"/>
+ <reg32 offset="0xe608" name="SP_HS_PVT_MEM_SIZE" type="a5xx_sp_xs_pvt_mem_size"/>
+ <reg32 offset="0xe610" name="SP_DS_CTRL_REG0" type="a5xx_sp_xs_ctrl_reg0"/>
+ <reg32 offset="0xe62b" name="UNKNOWN_E62B"/>
+ <reg32 offset="0xe62c" name="SP_DS_OBJ_START_LO"/>
+ <reg32 offset="0xe62d" name="SP_DS_OBJ_START_HI"/>
+ <reg32 offset="0xe62e" name="SP_DS_PVT_MEM_PARAM" type="a5xx_sp_xs_pvt_mem_param"/>
+ <reg64 offset="0xe62f" name="SP_DS_PVT_MEM_ADDR" type="waddress" align="32"/>
+ <reg32 offset="0xe631" name="SP_DS_PVT_MEM_SIZE" type="a5xx_sp_xs_pvt_mem_size"/>
+ <reg32 offset="0xe640" name="SP_GS_CTRL_REG0" type="a5xx_sp_xs_ctrl_reg0"/>
+ <reg32 offset="0xe65b" name="UNKNOWN_E65B"/>
+ <reg32 offset="0xe65c" name="SP_GS_OBJ_START_LO"/>
+ <reg32 offset="0xe65d" name="SP_GS_OBJ_START_HI"/>
+ <reg32 offset="0xe65e" name="SP_GS_PVT_MEM_PARAM" type="a5xx_sp_xs_pvt_mem_param"/>
+ <reg64 offset="0xe65f" name="SP_GS_PVT_MEM_ADDR" type="waddress" align="32"/>
+ <reg32 offset="0xe661" name="SP_GS_PVT_MEM_SIZE" type="a5xx_sp_xs_pvt_mem_size"/>
+
+ <reg32 offset="0xe704" name="TPL1_TP_RAS_MSAA_CNTL">
+ <bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
+ </reg32>
+ <reg32 offset="0xe705" name="TPL1_TP_DEST_MSAA_CNTL">
+ <bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
+ <bitfield name="MSAA_DISABLE" pos="2" type="boolean"/>
+ </reg32>
+ <!-- either blob is doing it wrong, or this is not per-stage anymore: -->
+ <reg32 offset="0xe706" name="TPL1_TP_BORDER_COLOR_BASE_ADDR_LO"/>
+ <reg32 offset="0xe707" name="TPL1_TP_BORDER_COLOR_BASE_ADDR_HI"/>
+
+ <!--
+ so these have the same info that is normally in the CP_LOAD_STATE
+ packets.. not sure if they are normally written by pm4/me or if the
+ CP_LOAD_STATE mechanism is deprecated?
+ -->
+ <reg32 offset="0xe700" name="TPL1_VS_TEX_COUNT" type="uint"/>
+ <reg32 offset="0xe701" name="TPL1_HS_TEX_COUNT" type="uint"/>
+ <reg32 offset="0xe702" name="TPL1_DS_TEX_COUNT" type="uint"/>
+ <reg32 offset="0xe703" name="TPL1_GS_TEX_COUNT" type="uint"/>
+
+ <reg32 offset="0xe722" name="TPL1_VS_TEX_SAMP_LO"/>
+ <reg32 offset="0xe723" name="TPL1_VS_TEX_SAMP_HI"/>
+ <reg32 offset="0xe724" name="TPL1_HS_TEX_SAMP_LO"/>
+ <reg32 offset="0xe725" name="TPL1_HS_TEX_SAMP_HI"/>
+ <reg32 offset="0xe726" name="TPL1_DS_TEX_SAMP_LO"/>
+ <reg32 offset="0xe727" name="TPL1_DS_TEX_SAMP_HI"/>
+ <reg32 offset="0xe728" name="TPL1_GS_TEX_SAMP_LO"/>
+ <reg32 offset="0xe729" name="TPL1_GS_TEX_SAMP_HI"/>
+
+ <reg32 offset="0xe72a" name="TPL1_VS_TEX_CONST_LO"/>
+ <reg32 offset="0xe72b" name="TPL1_VS_TEX_CONST_HI"/>
+ <reg32 offset="0xe72c" name="TPL1_HS_TEX_CONST_LO"/>
+ <reg32 offset="0xe72d" name="TPL1_HS_TEX_CONST_HI"/>
+ <reg32 offset="0xe72e" name="TPL1_DS_TEX_CONST_LO"/>
+ <reg32 offset="0xe72f" name="TPL1_DS_TEX_CONST_HI"/>
+ <reg32 offset="0xe730" name="TPL1_GS_TEX_CONST_LO"/>
+ <reg32 offset="0xe731" name="TPL1_GS_TEX_CONST_HI"/>
+
+ <reg32 offset="0xe750" name="TPL1_FS_TEX_COUNT" type="uint"/>
+ <reg32 offset="0xe751" name="TPL1_CS_TEX_COUNT" type="uint"/>
+
+ <reg32 offset="0xe75a" name="TPL1_FS_TEX_SAMP_LO"/>
+ <reg32 offset="0xe75b" name="TPL1_FS_TEX_SAMP_HI"/>
+ <reg32 offset="0xe75c" name="TPL1_CS_TEX_SAMP_LO"/>
+ <reg32 offset="0xe75d" name="TPL1_CS_TEX_SAMP_HI"/>
+ <reg32 offset="0xe75e" name="TPL1_FS_TEX_CONST_LO"/>
+ <reg32 offset="0xe75f" name="TPL1_FS_TEX_CONST_HI"/>
+ <reg32 offset="0xe760" name="TPL1_CS_TEX_CONST_LO"/>
+ <reg32 offset="0xe761" name="TPL1_CS_TEX_CONST_HI"/>
+
+ <reg32 offset="0xe764" name="TPL1_TP_FS_ROTATION_CNTL"/>
+
+ <reg32 offset="0xe784" name="HLSQ_CONTROL_0_REG">
+ <!-- 24 or more (full size) GPRS and blob uses TWO_QUADS instead of FOUR_QUADS -->
+ <bitfield name="FSTHREADSIZE" pos="0" type="a3xx_threadsize"/>
+ <bitfield name="CSTHREADSIZE" pos="2" type="a3xx_threadsize"/>
+ </reg32>
+ <reg32 offset="0xe785" name="HLSQ_CONTROL_1_REG">
+ <!-- I guess.. not set exactly same as a4xx, but similar: -->
+ <bitfield name="PRIMALLOCTHRESHOLD" low="0" high="5" type="uint"/>
+ </reg32>
+ <reg32 offset="0xe786" name="HLSQ_CONTROL_2_REG">
+ <bitfield name="FACEREGID" low="0" high="7" type="a3xx_regid"/>
+ <!-- SAMPLEID is loaded into a half-precision register: -->
+ <bitfield name="SAMPLEID" low="8" high="15" type="a3xx_regid"/>
+ <bitfield name="SAMPLEMASK" low="16" high="23" type="a3xx_regid"/>
+ <bitfield name="CENTERRHW" low="24" high="31" type="a3xx_regid"/>
+ </reg32>
+ <reg32 offset="0xe787" name="HLSQ_CONTROL_3_REG">
+ <!-- register loaded with position (bary.f) -->
+ <bitfield name="IJ_PERSP_PIXEL" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="IJ_LINEAR_PIXEL" low="8" high="15" type="a3xx_regid"/>
+ <bitfield name="IJ_PERSP_CENTROID" low="16" high="23" type="a3xx_regid"/>
+ <bitfield name="IJ_LINEAR_CENTROID" low="24" high="31" type="a3xx_regid"/>
+ </reg32>
+ <reg32 offset="0xe788" name="HLSQ_CONTROL_4_REG">
+ <bitfield name="IJ_PERSP_SAMPLE" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="IJ_LINEAR_SAMPLE" low="8" high="15" type="a3xx_regid"/>
+ <bitfield name="XYCOORDREGID" low="16" high="23" type="a3xx_regid"/>
+ <bitfield name="ZWCOORDREGID" low="24" high="31" type="a3xx_regid"/>
+ </reg32>
+ <!--
+ 0x020fffff for normal draws, 0x1f00000 for compute.. maybe what state
+ is enabled? We could probably try disabling different bits and see
+ what breaks to figure out which is what:
+ -->
+ <reg32 offset="0xe78a" name="HLSQ_UPDATE_CNTL"/>
+ <reg32 offset="0xe78b" name="HLSQ_VS_CONFIG" type="a5xx_xs_config"/>
+ <reg32 offset="0xe78c" name="HLSQ_FS_CONFIG" type="a5xx_xs_config"/>
+ <reg32 offset="0xe78d" name="HLSQ_HS_CONFIG" type="a5xx_xs_config"/>
+ <reg32 offset="0xe78e" name="HLSQ_DS_CONFIG" type="a5xx_xs_config"/>
+ <reg32 offset="0xe78f" name="HLSQ_GS_CONFIG" type="a5xx_xs_config"/>
+ <reg32 offset="0xe790" name="HLSQ_CS_CONFIG" type="a5xx_xs_config"/>
+ <reg32 offset="0xe791" name="HLSQ_VS_CNTL" type="a5xx_xs_cntl"/>
+ <reg32 offset="0xe792" name="HLSQ_FS_CNTL" type="a5xx_xs_cntl"/>
+ <reg32 offset="0xe793" name="HLSQ_HS_CNTL" type="a5xx_xs_cntl"/>
+ <reg32 offset="0xe794" name="HLSQ_DS_CNTL" type="a5xx_xs_cntl"/>
+ <reg32 offset="0xe795" name="HLSQ_GS_CNTL" type="a5xx_xs_cntl"/>
+ <reg32 offset="0xe796" name="HLSQ_CS_CNTL" type="a5xx_xs_cntl"/>
+ <reg32 offset="0xe7b9" name="HLSQ_CS_KERNEL_GROUP_X"/>
+ <reg32 offset="0xe7ba" name="HLSQ_CS_KERNEL_GROUP_Y"/>
+ <reg32 offset="0xe7bb" name="HLSQ_CS_KERNEL_GROUP_Z"/>
+ <reg32 offset="0xe7b0" name="HLSQ_CS_NDRANGE_0">
+ <bitfield name="KERNELDIM" low="0" high="1" type="uint"/>
+ <!-- localsize is value minus one: -->
+ <bitfield name="LOCALSIZEX" low="2" high="11" type="uint"/>
+ <bitfield name="LOCALSIZEY" low="12" high="21" type="uint"/>
+ <bitfield name="LOCALSIZEZ" low="22" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0xe7b1" name="HLSQ_CS_NDRANGE_1">
+ <bitfield name="GLOBALSIZE_X" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0xe7b2" name="HLSQ_CS_NDRANGE_2">
+ <bitfield name="GLOBALOFF_X" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0xe7b3" name="HLSQ_CS_NDRANGE_3">
+ <bitfield name="GLOBALSIZE_Y" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0xe7b4" name="HLSQ_CS_NDRANGE_4">
+ <bitfield name="GLOBALOFF_Y" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0xe7b5" name="HLSQ_CS_NDRANGE_5">
+ <bitfield name="GLOBALSIZE_Z" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0xe7b6" name="HLSQ_CS_NDRANGE_6">
+ <bitfield name="GLOBALOFF_Z" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0xe7b7" name="HLSQ_CS_CNTL_0">
+ <bitfield name="WGIDCONSTID" low="0" high="7" type="a3xx_regid"/>
+ <!-- possibly one of these is KERNELDIMCONSTID? -->
+ <!--
+ UNK0 appears to be NUMWGCONSTID.. but only works in certain
+ cases? Blob doesn't appear to use it, but instead emits
+ these via const (uniform). Which requires some shenanigans
+ for indirect draws when the offset is not strongly aligned
+ enough to use as EXT_SRC_ADDR in CP_LOAD_STATE
+ -->
+ <bitfield name="UNK0" low="8" high="15" type="a3xx_regid"/>
+ <bitfield name="UNK1" low="16" high="23" type="a3xx_regid"/>
+ <bitfield name="LOCALIDREGID" low="24" high="31" type="a3xx_regid"/>
+ </reg32>
+ <reg32 offset="0xe7b8" name="HLSQ_CS_CNTL_1"/>
+ <reg32 offset="0xe7c0" name="UNKNOWN_E7C0"/>
+ <reg32 offset="0xe7c3" name="HLSQ_VS_CONSTLEN" type="uint"/>
+ <reg32 offset="0xe7c4" name="HLSQ_VS_INSTRLEN" type="uint"/>
+ <reg32 offset="0xe7c5" name="UNKNOWN_E7C5"/>
+ <reg32 offset="0xe7c8" name="HLSQ_HS_CONSTLEN" type="uint"/>
+ <reg32 offset="0xe7c9" name="HLSQ_HS_INSTRLEN" type="uint"/>
+ <reg32 offset="0xe7ca" name="UNKNOWN_E7CA"/>
+ <reg32 offset="0xe7cd" name="HLSQ_DS_CONSTLEN" type="uint"/>
+ <reg32 offset="0xe7ce" name="HLSQ_DS_INSTRLEN" type="uint"/>
+ <reg32 offset="0xe7cf" name="UNKNOWN_E7CF"/>
+ <reg32 offset="0xe7d2" name="HLSQ_GS_CONSTLEN" type="uint"/>
+ <reg32 offset="0xe7d3" name="HLSQ_GS_INSTRLEN" type="uint"/>
+ <reg32 offset="0xe7d4" name="UNKNOWN_E7D4"/>
+ <reg32 offset="0xe7d7" name="HLSQ_FS_CONSTLEN" type="uint"/>
+ <reg32 offset="0xe7d8" name="HLSQ_FS_INSTRLEN" type="uint"/>
+ <reg32 offset="0xe7d9" name="UNKNOWN_E7D9"/>
+ <reg32 offset="0xe7dc" name="HLSQ_CS_CONSTLEN" type="uint"/>
+ <reg32 offset="0xe7dd" name="HLSQ_CS_INSTRLEN" type="uint"/>
+
+ <!--
+ Separate blit/2d or dma engine? Seems to get used sometimes for
+ texture uploads, where a4xx blob would use normal draws. Used
+ in render-mode 0x5..
+
+ Note seems mostly to be used for small blits, large blits seem
+ to use the CP_EVENT_WRITE:BLIT style of doing things. See
+ cubemap-0003 (40x40) vs cubemap-0004 (256x256).
+
+ see cube-0000, cubemap-(1..3 but not 4+), quad-textured-10..17
+
+ Other nearby registers are probably color formats, etc. The
+ blit coords are in CP packet. Play more w/ glTexSubImage2D()
+ to work it out.
+
+ Separate this into a different domain?? Would that help to
+ restrict which registers we dump based on mode?
+
+ regs 0x2000 to 0x2004 (plus all-zero regs 0x2005-0x2009) look
+ like 2nd source for blending? Used in mipmap generation.. but
+ maybe layout is a bit different. (Possibly used for reading
+ src via sampler, to enable scaling??) 0x2040 also used in this
+ case.
+ -->
+ <reg32 offset="0x2100" name="RB_2D_BLIT_CNTL"/> <!-- same as 0x2180 -->
+ <reg32 offset="0x2101" name="RB_2D_SRC_SOLID_DW0"/>
+ <reg32 offset="0x2102" name="RB_2D_SRC_SOLID_DW1"/>
+ <reg32 offset="0x2103" name="RB_2D_SRC_SOLID_DW2"/>
+ <reg32 offset="0x2104" name="RB_2D_SRC_SOLID_DW3"/>
+
+ <bitset name="a5xx_2d_surf_info" inline="yes">
+ <bitfield name="COLOR_FORMAT" low="0" high="7" type="a5xx_color_fmt"/>
+ <bitfield name="TILE_MODE" low="8" high="9" type="a5xx_tile_mode"/>
+ <bitfield name="COLOR_SWAP" low="10" high="11" type="a3xx_color_swap"/>
+ <!-- b12 seems to be set when UBWC "FLAGS" buffer enabled -->
+ <bitfield name="FLAGS" pos="12" type="boolean"/>
+ <bitfield name="SRGB" pos="13" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x2107" name="RB_2D_SRC_INFO" type="a5xx_2d_surf_info"/>
+ <reg32 offset="0x2108" name="RB_2D_SRC_LO"/>
+ <reg32 offset="0x2109" name="RB_2D_SRC_HI"/>
+ <reg32 offset="0x210a" name="RB_2D_SRC_SIZE">
+ <bitfield name="PITCH" low="0" high="15" shr="6" type="uint"/>
+ <bitfield name="ARRAY_PITCH" low="16" high="31" shr="6" type="uint"/>
+ </reg32>
+
+ <reg32 offset="0x2110" name="RB_2D_DST_INFO" type="a5xx_2d_surf_info"/>
+ <reg32 offset="0x2111" name="RB_2D_DST_LO"/>
+ <reg32 offset="0x2112" name="RB_2D_DST_HI"/>
+ <reg32 offset="0x2113" name="RB_2D_DST_SIZE">
+ <bitfield name="PITCH" low="0" high="15" shr="6" type="uint"/>
+ <bitfield name="ARRAY_PITCH" low="16" high="31" shr="6" type="uint"/>
+ </reg32>
+ <reg32 offset="0x2140" name="RB_2D_SRC_FLAGS_LO"/>
+ <reg32 offset="0x2141" name="RB_2D_SRC_FLAGS_HI"/>
+ <reg32 offset="0x2142" name="RB_2D_SRC_FLAGS_PITCH" shr="6" type="uint"/>
+ <reg32 offset="0x2143" name="RB_2D_DST_FLAGS_LO"/>
+ <reg32 offset="0x2144" name="RB_2D_DST_FLAGS_HI"/>
+ <reg32 offset="0x2145" name="RB_2D_DST_FLAGS_PITCH" shr="6" type="uint"/>
+ <reg32 offset="0x2180" name="GRAS_2D_BLIT_CNTL"/> <!-- same as 0x2100 -->
+ <!-- looks same as 0x2107: -->
+ <reg32 offset="0x2181" name="GRAS_2D_SRC_INFO" type="a5xx_2d_surf_info"/>
+ <!-- looks same as 0x2110: -->
+ <reg32 offset="0x2182" name="GRAS_2D_DST_INFO" type="a5xx_2d_surf_info"/>
+<!--
+0x2100 and 0x2180 look like same thing (RB and GRAS versions)..
+ 0x86000000 for copy, 0x00000000 for fill?
+
+0x2184 0x9 for copy, 0x1 for blit (maybe bitmask of enabled src/dst???)
+ -->
+ <reg32 offset="0x2184" name="UNKNOWN_2184"/>
+</domain>
+
+<domain name="A5XX_TEX_SAMP" width="32">
+ <doc>Texture sampler dwords</doc>
+ <enum name="a5xx_tex_filter"> <!-- same as a4xx? -->
+ <value name="A5XX_TEX_NEAREST" value="0"/>
+ <value name="A5XX_TEX_LINEAR" value="1"/>
+ <value name="A5XX_TEX_ANISO" value="2"/>
+ </enum>
+ <enum name="a5xx_tex_clamp"> <!-- same as a4xx? -->
+ <value name="A5XX_TEX_REPEAT" value="0"/>
+ <value name="A5XX_TEX_CLAMP_TO_EDGE" value="1"/>
+ <value name="A5XX_TEX_MIRROR_REPEAT" value="2"/>
+ <value name="A5XX_TEX_CLAMP_TO_BORDER" value="3"/>
+ <value name="A5XX_TEX_MIRROR_CLAMP" value="4"/>
+ </enum>
+ <enum name="a5xx_tex_aniso"> <!-- same as a4xx? -->
+ <value name="A5XX_TEX_ANISO_1" value="0"/>
+ <value name="A5XX_TEX_ANISO_2" value="1"/>
+ <value name="A5XX_TEX_ANISO_4" value="2"/>
+ <value name="A5XX_TEX_ANISO_8" value="3"/>
+ <value name="A5XX_TEX_ANISO_16" value="4"/>
+ </enum>
+ <reg32 offset="0" name="0">
+ <bitfield name="MIPFILTER_LINEAR_NEAR" pos="0" type="boolean"/>
+ <bitfield name="XY_MAG" low="1" high="2" type="a5xx_tex_filter"/>
+ <bitfield name="XY_MIN" low="3" high="4" type="a5xx_tex_filter"/>
+ <bitfield name="WRAP_S" low="5" high="7" type="a5xx_tex_clamp"/>
+ <bitfield name="WRAP_T" low="8" high="10" type="a5xx_tex_clamp"/>
+ <bitfield name="WRAP_R" low="11" high="13" type="a5xx_tex_clamp"/>
+ <bitfield name="ANISO" low="14" high="16" type="a5xx_tex_aniso"/>
+ <bitfield name="LOD_BIAS" low="19" high="31" type="fixed" radix="8"/><!-- no idea how many bits for real -->
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="COMPARE_FUNC" low="1" high="3" type="adreno_compare_func"/>
+ <bitfield name="CUBEMAPSEAMLESSFILTOFF" pos="4" type="boolean"/>
+ <bitfield name="UNNORM_COORDS" pos="5" type="boolean"/>
+ <bitfield name="MIPFILTER_LINEAR_FAR" pos="6" type="boolean"/>
+ <bitfield name="MAX_LOD" low="8" high="19" type="ufixed" radix="8"/>
+ <bitfield name="MIN_LOD" low="20" high="31" type="ufixed" radix="8"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <!--
+ offset into border-color buffer? Blob always uses 0x80 for FS state
+ if both VS and FS have border-color.
+Seems like when both VS and FS have bcolor, one starts 0x300 after other..
+and 0x80 in TEX_SAMP.2 .. blob doesn't seem to be able to cope w/ multiple
+different border-color states per texture.. Looks something like:
+0000: 3f000000 00000000 00000000 3f800000 00008000 ffff0000 00004000 7fff0000
+0020: 00003800 3c000000 80100010 0000f008 ff000080 7f000040 c0000200 00800000
+0040: 00003800 3c000000 00000000 00000000 00000000 00000000 00000000 00000000
+*
+0300: 3f800000 3f800000 3f800000 3f800000 ffffffff ffffffff 7fff7fff 7fff7fff
+0320: 3c003c00 3c003c00 ffffffff 0000ffff ffffffff 7f7f7f7f ffffffff 00ffffff
+0340: 3c003c00 3c003c00 00000000 00000000 00000000 00000000 00000000 00000000
+
+ -->
+ <bitfield name="BCOLOR_OFFSET" low="7" high="31"/>
+ </reg32>
+ <reg32 offset="3" name="3"/>
+</domain>
+
+<domain name="A5XX_TEX_CONST" width="32">
+ <doc>Texture constant dwords</doc>
+ <enum name="a5xx_tex_swiz"> <!-- same as a4xx? -->
+ <value name="A5XX_TEX_X" value="0"/>
+ <value name="A5XX_TEX_Y" value="1"/>
+ <value name="A5XX_TEX_Z" value="2"/>
+ <value name="A5XX_TEX_W" value="3"/>
+ <value name="A5XX_TEX_ZERO" value="4"/>
+ <value name="A5XX_TEX_ONE" value="5"/>
+ </enum>
+ <enum name="a5xx_tex_type"> <!-- same as a4xx? -->
+ <value name="A5XX_TEX_1D" value="0"/>
+ <value name="A5XX_TEX_2D" value="1"/>
+ <value name="A5XX_TEX_CUBE" value="2"/>
+ <value name="A5XX_TEX_3D" value="3"/>
+ <value name="A5XX_TEX_BUFFER" value="4"/>
+ </enum>
+ <reg32 offset="0" name="0">
+ <bitfield name="TILE_MODE" low="0" high="1" type="a5xx_tile_mode"/>
+ <bitfield name="SRGB" pos="2" type="boolean"/>
+ <bitfield name="SWIZ_X" low="4" high="6" type="a5xx_tex_swiz"/>
+ <bitfield name="SWIZ_Y" low="7" high="9" type="a5xx_tex_swiz"/>
+ <bitfield name="SWIZ_Z" low="10" high="12" type="a5xx_tex_swiz"/>
+ <bitfield name="SWIZ_W" low="13" high="15" type="a5xx_tex_swiz"/>
+ <bitfield name="MIPLVLS" low="16" high="19" type="uint"/>
+ <bitfield name="SAMPLES" low="20" high="21" type="a3xx_msaa_samples"/>
+ <bitfield name="FMT" low="22" high="29" type="a5xx_tex_fmt"/>
+ <bitfield name="SWAP" low="30" high="31" type="a3xx_color_swap"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="WIDTH" low="0" high="14" type="uint"/>
+ <bitfield name="HEIGHT" low="15" high="29" type="uint"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <!--
+ b4 and b31 set for buffer/ssbo case, in which case low 15 bits
+ of size encoded in WIDTH, and high 15 bits encoded in HEIGHT
+
+ b31 is probably the 'BUFFER' bit.. it is the one that changes
+ behavior of texture in dEQP-GLES31.functional.texture.texture_buffer.render.as_fragment_texture.buffer_size_131071
+ -->
+ <bitfield name="BUFFER" pos="4" type="boolean"/>
+ <!-- minimum pitch (for mipmap levels): log2(pitchalign / 64) -->
+ <bitfield name="PITCHALIGN" low="0" high="3" type="uint"/>
+ <doc>Pitch in bytes (so actually stride)</doc>
+ <bitfield name="PITCH" low="7" high="28" type="uint"/>
+ <bitfield name="TYPE" low="29" high="31" type="a5xx_tex_type"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <!--
+ ARRAY_PITCH is basically LAYERSZ for the first mipmap level, and
+ for 3d textures (laid out mipmap level first) MIN_LAYERSZ is the
+ layer size at the point that it stops being reduced moving to
+ higher (smaller) mipmap levels
+ -->
+ <bitfield name="ARRAY_PITCH" low="0" high="13" shr="12" type="uint"/>
+ <bitfield name="MIN_LAYERSZ" low="23" high="26" shr="12"/>
+ <!--
+ by default levels with w < 16 are linear
+ TILE_ALL makes all levels have tiling
+ seems required when using UBWC, since all levels have UBWC (can possibly be disabled?)
+ -->
+ <bitfield name="TILE_ALL" pos="27" type="boolean"/>
+ <bitfield name="FLAG" pos="28" type="boolean"/>
+ </reg32>
+ <reg32 offset="4" name="4">
+ <bitfield name="BASE_LO" low="5" high="31" shr="5"/>
+ </reg32>
+ <reg32 offset="5" name="5">
+ <bitfield name="BASE_HI" low="0" high="16"/>
+ <bitfield name="DEPTH" low="17" high="29" type="uint"/>
+ </reg32>
+ <reg32 offset="6" name="6"/>
+ <reg32 offset="7" name="7"/>
+ <reg32 offset="8" name="8"/>
+ <reg32 offset="9" name="9"/>
+ <reg32 offset="10" name="10"/>
+ <reg32 offset="11" name="11"/>
+</domain>
+
+<!--
+Note the "SSBO" state blocks are actually used for both images and SSBOs,
+naming is just because I r/e'd SSBOs first. I should probably come up
+with a better name.
+-->
+<domain name="A5XX_SSBO_0" width="32">
+ <reg32 offset="0" name="0">
+ <bitfield name="BASE_LO" low="5" high="31" shr="5"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <!-- no BASE_HI here? Maybe this is only used for 32b mode? -->
+ <doc>Pitch in bytes (so actually stride)</doc>
+ <bitfield name="PITCH" low="0" high="21" type="uint"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="ARRAY_PITCH" low="12" high="25" shr="12" type="uint"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <!-- bytes per pixel: -->
+ <bitfield name="CPP" low="0" high="5" type="uint"/>
+ </reg32>
+</domain>
+
+<domain name="A5XX_SSBO_1" width="32">
+ <reg32 offset="0" name="0">
+ <bitfield name="FMT" low="8" high="15" type="a5xx_tex_fmt"/>
+ <bitfield name="WIDTH" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="HEIGHT" low="0" high="15" type="uint"/>
+ <bitfield name="DEPTH" low="16" high="31" type="uint"/>
+ </reg32>
+</domain>
+
+<domain name="A5XX_SSBO_2" width="32">
+ <reg32 offset="0" name="0">
+ <bitfield name="BASE_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="BASE_HI" low="0" high="31"/>
+ </reg32>
+</domain>
+
+<domain name="A5XX_UBO" width="32">
+ <reg32 offset="0" name="0">
+ <bitfield name="BASE_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="BASE_HI" low="0" high="16"/>
+ <!-- size probably in high bits -->
+ </reg32>
+</domain>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
new file mode 100644
index 000000000000..2dfe6913ab4f
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
@@ -0,0 +1,5011 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="adreno/adreno_common.xml"/>
+<import file="adreno/adreno_pm4.xml"/>
+
+<!--
+Each register that is actually being used by driver should have "usage" defined,
+currently there are following usages:
+- "cmd" - the register is used outside of renderpass and blits,
+ roughly corresponds to registers used in ib1 for Freedreno
+- "rp_blit" - the register is used inside renderpass or blits
+ (ib2 for Freedreno)
+
+It is expected that register with "cmd" usage may be written into only at
+the start of the command buffer (ib1), while "rp_blit" usage indicates that register
+is either overwritten by renderpass/blit (ib2) or not used if not overwritten
+by a particular renderpass/blit.
+-->
+
+<!-- these might be same as a5xx -->
+<enum name="a6xx_tile_mode">
+ <value name="TILE6_LINEAR" value="0"/>
+ <value name="TILE6_2" value="2"/>
+ <value name="TILE6_3" value="3"/>
+</enum>
+
+<enum name="a6xx_format">
+ <value value="0x02" name="FMT6_A8_UNORM"/>
+ <value value="0x03" name="FMT6_8_UNORM"/>
+ <value value="0x04" name="FMT6_8_SNORM"/>
+ <value value="0x05" name="FMT6_8_UINT"/>
+ <value value="0x06" name="FMT6_8_SINT"/>
+
+ <value value="0x08" name="FMT6_4_4_4_4_UNORM"/>
+ <value value="0x0a" name="FMT6_5_5_5_1_UNORM"/>
+ <value value="0x0c" name="FMT6_1_5_5_5_UNORM"/> <!-- read only -->
+ <value value="0x0e" name="FMT6_5_6_5_UNORM"/>
+
+ <value value="0x0f" name="FMT6_8_8_UNORM"/>
+ <value value="0x10" name="FMT6_8_8_SNORM"/>
+ <value value="0x11" name="FMT6_8_8_UINT"/>
+ <value value="0x12" name="FMT6_8_8_SINT"/>
+ <value value="0x13" name="FMT6_L8_A8_UNORM"/>
+
+ <value value="0x15" name="FMT6_16_UNORM"/>
+ <value value="0x16" name="FMT6_16_SNORM"/>
+ <value value="0x17" name="FMT6_16_FLOAT"/>
+ <value value="0x18" name="FMT6_16_UINT"/>
+ <value value="0x19" name="FMT6_16_SINT"/>
+
+ <value value="0x21" name="FMT6_8_8_8_UNORM"/>
+ <value value="0x22" name="FMT6_8_8_8_SNORM"/>
+ <value value="0x23" name="FMT6_8_8_8_UINT"/>
+ <value value="0x24" name="FMT6_8_8_8_SINT"/>
+
+ <value value="0x30" name="FMT6_8_8_8_8_UNORM"/>
+ <value value="0x31" name="FMT6_8_8_8_X8_UNORM"/> <!-- samples 1 for alpha -->
+ <value value="0x32" name="FMT6_8_8_8_8_SNORM"/>
+ <value value="0x33" name="FMT6_8_8_8_8_UINT"/>
+ <value value="0x34" name="FMT6_8_8_8_8_SINT"/>
+
+ <value value="0x35" name="FMT6_9_9_9_E5_FLOAT"/>
+
+ <value value="0x36" name="FMT6_10_10_10_2_UNORM"/>
+ <value value="0x37" name="FMT6_10_10_10_2_UNORM_DEST"/>
+ <value value="0x39" name="FMT6_10_10_10_2_SNORM"/>
+ <value value="0x3a" name="FMT6_10_10_10_2_UINT"/>
+ <value value="0x3b" name="FMT6_10_10_10_2_SINT"/>
+
+ <value value="0x42" name="FMT6_11_11_10_FLOAT"/>
+
+ <value value="0x43" name="FMT6_16_16_UNORM"/>
+ <value value="0x44" name="FMT6_16_16_SNORM"/>
+ <value value="0x45" name="FMT6_16_16_FLOAT"/>
+ <value value="0x46" name="FMT6_16_16_UINT"/>
+ <value value="0x47" name="FMT6_16_16_SINT"/>
+
+ <value value="0x48" name="FMT6_32_UNORM"/>
+ <value value="0x49" name="FMT6_32_SNORM"/>
+ <value value="0x4a" name="FMT6_32_FLOAT"/>
+ <value value="0x4b" name="FMT6_32_UINT"/>
+ <value value="0x4c" name="FMT6_32_SINT"/>
+ <value value="0x4d" name="FMT6_32_FIXED"/>
+
+ <value value="0x58" name="FMT6_16_16_16_UNORM"/>
+ <value value="0x59" name="FMT6_16_16_16_SNORM"/>
+ <value value="0x5a" name="FMT6_16_16_16_FLOAT"/>
+ <value value="0x5b" name="FMT6_16_16_16_UINT"/>
+ <value value="0x5c" name="FMT6_16_16_16_SINT"/>
+
+ <value value="0x60" name="FMT6_16_16_16_16_UNORM"/>
+ <value value="0x61" name="FMT6_16_16_16_16_SNORM"/>
+ <value value="0x62" name="FMT6_16_16_16_16_FLOAT"/>
+ <value value="0x63" name="FMT6_16_16_16_16_UINT"/>
+ <value value="0x64" name="FMT6_16_16_16_16_SINT"/>
+
+ <value value="0x65" name="FMT6_32_32_UNORM"/>
+ <value value="0x66" name="FMT6_32_32_SNORM"/>
+ <value value="0x67" name="FMT6_32_32_FLOAT"/>
+ <value value="0x68" name="FMT6_32_32_UINT"/>
+ <value value="0x69" name="FMT6_32_32_SINT"/>
+ <value value="0x6a" name="FMT6_32_32_FIXED"/>
+
+ <value value="0x70" name="FMT6_32_32_32_UNORM"/>
+ <value value="0x71" name="FMT6_32_32_32_SNORM"/>
+ <value value="0x72" name="FMT6_32_32_32_UINT"/>
+ <value value="0x73" name="FMT6_32_32_32_SINT"/>
+ <value value="0x74" name="FMT6_32_32_32_FLOAT"/>
+ <value value="0x75" name="FMT6_32_32_32_FIXED"/>
+
+ <value value="0x80" name="FMT6_32_32_32_32_UNORM"/>
+ <value value="0x81" name="FMT6_32_32_32_32_SNORM"/>
+ <value value="0x82" name="FMT6_32_32_32_32_FLOAT"/>
+ <value value="0x83" name="FMT6_32_32_32_32_UINT"/>
+ <value value="0x84" name="FMT6_32_32_32_32_SINT"/>
+ <value value="0x85" name="FMT6_32_32_32_32_FIXED"/>
+
+ <value value="0x8c" name="FMT6_G8R8B8R8_422_UNORM"/> <!-- UYVY -->
+ <value value="0x8d" name="FMT6_R8G8R8B8_422_UNORM"/> <!-- YUYV -->
+ <value value="0x8e" name="FMT6_R8_G8B8_2PLANE_420_UNORM"/> <!-- NV12 -->
+ <value value="0x8f" name="FMT6_NV21"/>
+ <value value="0x90" name="FMT6_R8_G8_B8_3PLANE_420_UNORM"/> <!-- YV12 -->
+
+ <value value="0x91" name="FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8"/>
+
+ <!-- Note: tiling/UBWC for these may be different from equivalent formats
+ For example FMT6_NV12_Y is not compatible with FMT6_8_UNORM
+ -->
+ <value value="0x94" name="FMT6_NV12_Y"/>
+ <value value="0x95" name="FMT6_NV12_UV"/>
+ <value value="0x96" name="FMT6_NV12_VU"/>
+ <value value="0x97" name="FMT6_NV12_4R"/>
+ <value value="0x98" name="FMT6_NV12_4R_Y"/>
+ <value value="0x99" name="FMT6_NV12_4R_UV"/>
+ <value value="0x9a" name="FMT6_P010"/>
+ <value value="0x9b" name="FMT6_P010_Y"/>
+ <value value="0x9c" name="FMT6_P010_UV"/>
+ <value value="0x9d" name="FMT6_TP10"/>
+ <value value="0x9e" name="FMT6_TP10_Y"/>
+ <value value="0x9f" name="FMT6_TP10_UV"/>
+
+ <value value="0xa0" name="FMT6_Z24_UNORM_S8_UINT"/>
+
+ <value value="0xab" name="FMT6_ETC2_RG11_UNORM"/>
+ <value value="0xac" name="FMT6_ETC2_RG11_SNORM"/>
+ <value value="0xad" name="FMT6_ETC2_R11_UNORM"/>
+ <value value="0xae" name="FMT6_ETC2_R11_SNORM"/>
+ <value value="0xaf" name="FMT6_ETC1"/>
+ <value value="0xb0" name="FMT6_ETC2_RGB8"/>
+ <value value="0xb1" name="FMT6_ETC2_RGBA8"/>
+ <value value="0xb2" name="FMT6_ETC2_RGB8A1"/>
+ <value value="0xb3" name="FMT6_DXT1"/>
+ <value value="0xb4" name="FMT6_DXT3"/>
+ <value value="0xb5" name="FMT6_DXT5"/>
+ <value value="0xb7" name="FMT6_RGTC1_UNORM"/>
+ <value value="0xb8" name="FMT6_RGTC1_SNORM"/>
+ <value value="0xbb" name="FMT6_RGTC2_UNORM"/>
+ <value value="0xbc" name="FMT6_RGTC2_SNORM"/>
+ <value value="0xbe" name="FMT6_BPTC_UFLOAT"/>
+ <value value="0xbf" name="FMT6_BPTC_FLOAT"/>
+ <value value="0xc0" name="FMT6_BPTC"/>
+ <value value="0xc1" name="FMT6_ASTC_4x4"/>
+ <value value="0xc2" name="FMT6_ASTC_5x4"/>
+ <value value="0xc3" name="FMT6_ASTC_5x5"/>
+ <value value="0xc4" name="FMT6_ASTC_6x5"/>
+ <value value="0xc5" name="FMT6_ASTC_6x6"/>
+ <value value="0xc6" name="FMT6_ASTC_8x5"/>
+ <value value="0xc7" name="FMT6_ASTC_8x6"/>
+ <value value="0xc8" name="FMT6_ASTC_8x8"/>
+ <value value="0xc9" name="FMT6_ASTC_10x5"/>
+ <value value="0xca" name="FMT6_ASTC_10x6"/>
+ <value value="0xcb" name="FMT6_ASTC_10x8"/>
+ <value value="0xcc" name="FMT6_ASTC_10x10"/>
+ <value value="0xcd" name="FMT6_ASTC_12x10"/>
+ <value value="0xce" name="FMT6_ASTC_12x12"/>
+
+ <!-- for sampling stencil (integer, 2nd channel), not available on a630 -->
+ <value value="0xea" name="FMT6_Z24_UINT_S8_UINT"/>
+
+ <!-- Not a hw enum, used internally in driver -->
+ <value value="0xff" name="FMT6_NONE"/>
+
+</enum>
+
+<!-- probably same as a5xx -->
+<enum name="a6xx_polygon_mode">
+ <value name="POLYMODE6_POINTS" value="1"/>
+ <value name="POLYMODE6_LINES" value="2"/>
+ <value name="POLYMODE6_TRIANGLES" value="3"/>
+</enum>
+
+<enum name="a6xx_depth_format">
+ <value name="DEPTH6_NONE" value="0"/>
+ <value name="DEPTH6_16" value="1"/>
+ <value name="DEPTH6_24_8" value="2"/>
+ <value name="DEPTH6_32" value="4"/>
+</enum>
+
+<bitset name="a6x_cp_protect" inline="yes">
+ <bitfield name="BASE_ADDR" low="0" high="17"/>
+ <bitfield name="MASK_LEN" low="18" high="30"/>
+ <bitfield name="READ" pos="31" type="boolean"/>
+</bitset>
+
+<enum name="a6xx_shader_id">
+ <value value="0x9" name="A6XX_TP0_TMO_DATA"/>
+ <value value="0xa" name="A6XX_TP0_SMO_DATA"/>
+ <value value="0xb" name="A6XX_TP0_MIPMAP_BASE_DATA"/>
+ <value value="0x19" name="A6XX_TP1_TMO_DATA"/>
+ <value value="0x1a" name="A6XX_TP1_SMO_DATA"/>
+ <value value="0x1b" name="A6XX_TP1_MIPMAP_BASE_DATA"/>
+ <value value="0x29" name="A6XX_SP_INST_DATA"/>
+ <value value="0x2a" name="A6XX_SP_LB_0_DATA"/>
+ <value value="0x2b" name="A6XX_SP_LB_1_DATA"/>
+ <value value="0x2c" name="A6XX_SP_LB_2_DATA"/>
+ <value value="0x2d" name="A6XX_SP_LB_3_DATA"/>
+ <value value="0x2e" name="A6XX_SP_LB_4_DATA"/>
+ <value value="0x2f" name="A6XX_SP_LB_5_DATA"/>
+ <value value="0x30" name="A6XX_SP_CB_BINDLESS_DATA"/>
+ <value value="0x31" name="A6XX_SP_CB_LEGACY_DATA"/>
+ <value value="0x32" name="A6XX_SP_UAV_DATA"/>
+ <value value="0x33" name="A6XX_SP_INST_TAG"/>
+ <value value="0x34" name="A6XX_SP_CB_BINDLESS_TAG"/>
+ <value value="0x35" name="A6XX_SP_TMO_UMO_TAG"/>
+ <value value="0x36" name="A6XX_SP_SMO_TAG"/>
+ <value value="0x37" name="A6XX_SP_STATE_DATA"/>
+ <value value="0x49" name="A6XX_HLSQ_CHUNK_CVS_RAM"/>
+ <value value="0x4a" name="A6XX_HLSQ_CHUNK_CPS_RAM"/>
+ <value value="0x4b" name="A6XX_HLSQ_CHUNK_CVS_RAM_TAG"/>
+ <value value="0x4c" name="A6XX_HLSQ_CHUNK_CPS_RAM_TAG"/>
+ <value value="0x4d" name="A6XX_HLSQ_ICB_CVS_CB_BASE_TAG"/>
+ <value value="0x4e" name="A6XX_HLSQ_ICB_CPS_CB_BASE_TAG"/>
+ <value value="0x50" name="A6XX_HLSQ_CVS_MISC_RAM"/>
+ <value value="0x51" name="A6XX_HLSQ_CPS_MISC_RAM"/>
+ <value value="0x52" name="A6XX_HLSQ_INST_RAM"/>
+ <value value="0x53" name="A6XX_HLSQ_GFX_CVS_CONST_RAM"/>
+ <value value="0x54" name="A6XX_HLSQ_GFX_CPS_CONST_RAM"/>
+ <value value="0x55" name="A6XX_HLSQ_CVS_MISC_RAM_TAG"/>
+ <value value="0x56" name="A6XX_HLSQ_CPS_MISC_RAM_TAG"/>
+ <value value="0x57" name="A6XX_HLSQ_INST_RAM_TAG"/>
+ <value value="0x58" name="A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG"/>
+ <value value="0x59" name="A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG"/>
+ <value value="0x5a" name="A6XX_HLSQ_PWR_REST_RAM"/>
+ <value value="0x5b" name="A6XX_HLSQ_PWR_REST_TAG"/>
+ <value value="0x60" name="A6XX_HLSQ_DATAPATH_META"/>
+ <value value="0x61" name="A6XX_HLSQ_FRONTEND_META"/>
+ <value value="0x62" name="A6XX_HLSQ_INDIRECT_META"/>
+ <value value="0x63" name="A6XX_HLSQ_BACKEND_META"/>
+ <value value="0x70" name="A6XX_SP_LB_6_DATA"/>
+ <value value="0x71" name="A6XX_SP_LB_7_DATA"/>
+ <value value="0x73" name="A6XX_HLSQ_INST_RAM_1"/>
+</enum>
+
+<enum name="a7xx_statetype_id">
+ <value value="0" name="A7XX_TP0_NCTX_REG"/>
+ <value value="1" name="A7XX_TP0_CTX0_3D_CVS_REG"/>
+ <value value="2" name="A7XX_TP0_CTX0_3D_CPS_REG"/>
+ <value value="3" name="A7XX_TP0_CTX1_3D_CVS_REG"/>
+ <value value="4" name="A7XX_TP0_CTX1_3D_CPS_REG"/>
+ <value value="5" name="A7XX_TP0_CTX2_3D_CPS_REG"/>
+ <value value="6" name="A7XX_TP0_CTX3_3D_CPS_REG"/>
+ <value value="9" name="A7XX_TP0_TMO_DATA"/>
+ <value value="10" name="A7XX_TP0_SMO_DATA"/>
+ <value value="11" name="A7XX_TP0_MIPMAP_BASE_DATA"/>
+ <value value="32" name="A7XX_SP_NCTX_REG"/>
+ <value value="33" name="A7XX_SP_CTX0_3D_CVS_REG"/>
+ <value value="34" name="A7XX_SP_CTX0_3D_CPS_REG"/>
+ <value value="35" name="A7XX_SP_CTX1_3D_CVS_REG"/>
+ <value value="36" name="A7XX_SP_CTX1_3D_CPS_REG"/>
+ <value value="37" name="A7XX_SP_CTX2_3D_CPS_REG"/>
+ <value value="38" name="A7XX_SP_CTX3_3D_CPS_REG"/>
+ <value value="39" name="A7XX_SP_INST_DATA"/>
+ <value value="40" name="A7XX_SP_INST_DATA_1"/>
+ <value value="41" name="A7XX_SP_LB_0_DATA"/>
+ <value value="42" name="A7XX_SP_LB_1_DATA"/>
+ <value value="43" name="A7XX_SP_LB_2_DATA"/>
+ <value value="44" name="A7XX_SP_LB_3_DATA"/>
+ <value value="45" name="A7XX_SP_LB_4_DATA"/>
+ <value value="46" name="A7XX_SP_LB_5_DATA"/>
+ <value value="47" name="A7XX_SP_LB_6_DATA"/>
+ <value value="48" name="A7XX_SP_LB_7_DATA"/>
+ <value value="49" name="A7XX_SP_CB_RAM"/>
+ <value value="50" name="A7XX_SP_LB_13_DATA"/>
+ <value value="51" name="A7XX_SP_LB_14_DATA"/>
+ <value value="52" name="A7XX_SP_INST_TAG"/>
+ <value value="53" name="A7XX_SP_INST_DATA_2"/>
+ <value value="54" name="A7XX_SP_TMO_TAG"/>
+ <value value="55" name="A7XX_SP_SMO_TAG"/>
+ <value value="56" name="A7XX_SP_STATE_DATA"/>
+ <value value="57" name="A7XX_SP_HWAVE_RAM"/>
+ <value value="58" name="A7XX_SP_L0_INST_BUF"/>
+ <value value="59" name="A7XX_SP_LB_8_DATA"/>
+ <value value="60" name="A7XX_SP_LB_9_DATA"/>
+ <value value="61" name="A7XX_SP_LB_10_DATA"/>
+ <value value="62" name="A7XX_SP_LB_11_DATA"/>
+ <value value="63" name="A7XX_SP_LB_12_DATA"/>
+ <value value="64" name="A7XX_HLSQ_DATAPATH_DSTR_META"/>
+ <value value="67" name="A7XX_HLSQ_L2STC_TAG_RAM"/>
+ <value value="68" name="A7XX_HLSQ_L2STC_INFO_CMD"/>
+ <value value="69" name="A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG"/>
+ <value value="70" name="A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG"/>
+ <value value="71" name="A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM"/>
+ <value value="72" name="A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM"/>
+ <value value="73" name="A7XX_HLSQ_CHUNK_CVS_RAM"/>
+ <value value="74" name="A7XX_HLSQ_CHUNK_CPS_RAM"/>
+ <value value="75" name="A7XX_HLSQ_CHUNK_CVS_RAM_TAG"/>
+ <value value="76" name="A7XX_HLSQ_CHUNK_CPS_RAM_TAG"/>
+ <value value="77" name="A7XX_HLSQ_ICB_CVS_CB_BASE_TAG"/>
+ <value value="78" name="A7XX_HLSQ_ICB_CPS_CB_BASE_TAG"/>
+ <value value="79" name="A7XX_HLSQ_CVS_MISC_RAM"/>
+ <value value="80" name="A7XX_HLSQ_CPS_MISC_RAM"/>
+ <value value="81" name="A7XX_HLSQ_CPS_MISC_RAM_1"/>
+ <value value="82" name="A7XX_HLSQ_INST_RAM"/>
+ <value value="83" name="A7XX_HLSQ_GFX_CVS_CONST_RAM"/>
+ <value value="84" name="A7XX_HLSQ_GFX_CPS_CONST_RAM"/>
+ <value value="85" name="A7XX_HLSQ_CVS_MISC_RAM_TAG"/>
+ <value value="86" name="A7XX_HLSQ_CPS_MISC_RAM_TAG"/>
+ <value value="87" name="A7XX_HLSQ_INST_RAM_TAG"/>
+ <value value="88" name="A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG"/>
+ <value value="89" name="A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG"/>
+ <value value="90" name="A7XX_HLSQ_GFX_LOCAL_MISC_RAM"/>
+ <value value="91" name="A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG"/>
+ <value value="92" name="A7XX_HLSQ_INST_RAM_1"/>
+ <value value="93" name="A7XX_HLSQ_STPROC_META"/>
+ <value value="94" name="A7XX_HLSQ_BV_BE_META"/>
+ <value value="95" name="A7XX_HLSQ_INST_RAM_2"/>
+ <value value="96" name="A7XX_HLSQ_DATAPATH_META"/>
+ <value value="97" name="A7XX_HLSQ_FRONTEND_META"/>
+ <value value="98" name="A7XX_HLSQ_INDIRECT_META"/>
+ <value value="99" name="A7XX_HLSQ_BACKEND_META"/>
+</enum>
+
+<enum name="a6xx_debugbus_id">
+ <value value="0x1" name="A6XX_DBGBUS_CP"/>
+ <value value="0x2" name="A6XX_DBGBUS_RBBM"/>
+ <value value="0x3" name="A6XX_DBGBUS_VBIF"/>
+ <value value="0x4" name="A6XX_DBGBUS_HLSQ"/>
+ <value value="0x5" name="A6XX_DBGBUS_UCHE"/>
+ <value value="0x6" name="A6XX_DBGBUS_DPM"/>
+ <value value="0x7" name="A6XX_DBGBUS_TESS"/>
+ <value value="0x8" name="A6XX_DBGBUS_PC"/>
+ <value value="0x9" name="A6XX_DBGBUS_VFDP"/>
+ <value value="0xa" name="A6XX_DBGBUS_VPC"/>
+ <value value="0xb" name="A6XX_DBGBUS_TSE"/>
+ <value value="0xc" name="A6XX_DBGBUS_RAS"/>
+ <value value="0xd" name="A6XX_DBGBUS_VSC"/>
+ <value value="0xe" name="A6XX_DBGBUS_COM"/>
+ <value value="0x10" name="A6XX_DBGBUS_LRZ"/>
+ <value value="0x11" name="A6XX_DBGBUS_A2D"/>
+ <value value="0x12" name="A6XX_DBGBUS_CCUFCHE"/>
+ <value value="0x13" name="A6XX_DBGBUS_GMU_CX"/>
+ <value value="0x14" name="A6XX_DBGBUS_RBP"/>
+ <value value="0x15" name="A6XX_DBGBUS_DCS"/>
+ <value value="0x16" name="A6XX_DBGBUS_DBGC"/>
+ <value value="0x17" name="A6XX_DBGBUS_CX"/>
+ <value value="0x18" name="A6XX_DBGBUS_GMU_GX"/>
+ <value value="0x19" name="A6XX_DBGBUS_TPFCHE"/>
+ <value value="0x1a" name="A6XX_DBGBUS_GBIF_GX"/>
+ <value value="0x1d" name="A6XX_DBGBUS_GPC"/>
+ <value value="0x1e" name="A6XX_DBGBUS_LARC"/>
+ <value value="0x1f" name="A6XX_DBGBUS_HLSQ_SPTP"/>
+ <value value="0x20" name="A6XX_DBGBUS_RB_0"/>
+ <value value="0x21" name="A6XX_DBGBUS_RB_1"/>
+ <value value="0x22" name="A6XX_DBGBUS_RB_2"/>
+ <value value="0x24" name="A6XX_DBGBUS_UCHE_WRAPPER"/>
+ <value value="0x28" name="A6XX_DBGBUS_CCU_0"/>
+ <value value="0x29" name="A6XX_DBGBUS_CCU_1"/>
+ <value value="0x2a" name="A6XX_DBGBUS_CCU_2"/>
+ <value value="0x38" name="A6XX_DBGBUS_VFD_0"/>
+ <value value="0x39" name="A6XX_DBGBUS_VFD_1"/>
+ <value value="0x3a" name="A6XX_DBGBUS_VFD_2"/>
+ <value value="0x3b" name="A6XX_DBGBUS_VFD_3"/>
+ <value value="0x3c" name="A6XX_DBGBUS_VFD_4"/>
+ <value value="0x3d" name="A6XX_DBGBUS_VFD_5"/>
+ <value value="0x40" name="A6XX_DBGBUS_SP_0"/>
+ <value value="0x41" name="A6XX_DBGBUS_SP_1"/>
+ <value value="0x42" name="A6XX_DBGBUS_SP_2"/>
+ <value value="0x48" name="A6XX_DBGBUS_TPL1_0"/>
+ <value value="0x49" name="A6XX_DBGBUS_TPL1_1"/>
+ <value value="0x4a" name="A6XX_DBGBUS_TPL1_2"/>
+ <value value="0x4b" name="A6XX_DBGBUS_TPL1_3"/>
+ <value value="0x4c" name="A6XX_DBGBUS_TPL1_4"/>
+ <value value="0x4d" name="A6XX_DBGBUS_TPL1_5"/>
+ <value value="0x58" name="A6XX_DBGBUS_SPTP_0"/>
+ <value value="0x59" name="A6XX_DBGBUS_SPTP_1"/>
+ <value value="0x5a" name="A6XX_DBGBUS_SPTP_2"/>
+ <value value="0x5b" name="A6XX_DBGBUS_SPTP_3"/>
+ <value value="0x5c" name="A6XX_DBGBUS_SPTP_4"/>
+ <value value="0x5d" name="A6XX_DBGBUS_SPTP_5"/>
+</enum>
+
+<enum name="a7xx_state_location">
+ <value value="0" name="A7XX_HLSQ_STATE"/>
+ <value value="1" name="A7XX_HLSQ_DP"/>
+ <value value="2" name="A7XX_SP_TOP"/>
+ <value value="3" name="A7XX_USPTP"/>
+ <value value="4" name="A7XX_HLSQ_DP_STR"/>
+</enum>
+
+<enum name="a7xx_pipe">
+ <value value="0" name="A7XX_PIPE_NONE"/>
+ <value value="1" name="A7XX_PIPE_BR"/>
+ <value value="2" name="A7XX_PIPE_BV"/>
+ <value value="3" name="A7XX_PIPE_LPAC"/>
+</enum>
+
+<enum name="a7xx_cluster">
+ <value value="0" name="A7XX_CLUSTER_NONE"/>
+ <value value="1" name="A7XX_CLUSTER_FE"/>
+ <value value="2" name="A7XX_CLUSTER_SP_VS"/>
+ <value value="3" name="A7XX_CLUSTER_PC_VS"/>
+ <value value="4" name="A7XX_CLUSTER_GRAS"/>
+ <value value="5" name="A7XX_CLUSTER_SP_PS"/>
+ <value value="6" name="A7XX_CLUSTER_VPC_PS"/>
+ <value value="7" name="A7XX_CLUSTER_PS"/>
+</enum>
+
+<enum name="a7xx_debugbus_id">
+ <value value="1" name="A7XX_DBGBUS_CP_0_0"/>
+ <value value="2" name="A7XX_DBGBUS_CP_0_1"/>
+ <value value="3" name="A7XX_DBGBUS_RBBM"/>
+ <value value="5" name="A7XX_DBGBUS_GBIF_GX"/>
+ <value value="6" name="A7XX_DBGBUS_GBIF_CX"/>
+ <value value="7" name="A7XX_DBGBUS_HLSQ"/>
+ <value value="9" name="A7XX_DBGBUS_UCHE_0"/>
+ <value value="10" name="A7XX_DBGBUS_UCHE_1"/>
+ <value value="13" name="A7XX_DBGBUS_TESS_BR"/>
+ <value value="14" name="A7XX_DBGBUS_TESS_BV"/>
+ <value value="17" name="A7XX_DBGBUS_PC_BR"/>
+ <value value="18" name="A7XX_DBGBUS_PC_BV"/>
+ <value value="21" name="A7XX_DBGBUS_VFDP_BR"/>
+ <value value="22" name="A7XX_DBGBUS_VFDP_BV"/>
+ <value value="25" name="A7XX_DBGBUS_VPC_BR"/>
+ <value value="26" name="A7XX_DBGBUS_VPC_BV"/>
+ <value value="29" name="A7XX_DBGBUS_TSE_BR"/>
+ <value value="30" name="A7XX_DBGBUS_TSE_BV"/>
+ <value value="33" name="A7XX_DBGBUS_RAS_BR"/>
+ <value value="34" name="A7XX_DBGBUS_RAS_BV"/>
+ <value value="37" name="A7XX_DBGBUS_VSC"/>
+ <value value="39" name="A7XX_DBGBUS_COM_0"/>
+ <value value="43" name="A7XX_DBGBUS_LRZ_BR"/>
+ <value value="44" name="A7XX_DBGBUS_LRZ_BV"/>
+ <value value="47" name="A7XX_DBGBUS_UFC_0"/>
+ <value value="48" name="A7XX_DBGBUS_UFC_1"/>
+ <value value="55" name="A7XX_DBGBUS_GMU_GX"/>
+ <value value="59" name="A7XX_DBGBUS_DBGC"/>
+ <value value="60" name="A7XX_DBGBUS_CX"/>
+ <value value="61" name="A7XX_DBGBUS_GMU_CX"/>
+ <value value="62" name="A7XX_DBGBUS_GPC_BR"/>
+ <value value="63" name="A7XX_DBGBUS_GPC_BV"/>
+ <value value="66" name="A7XX_DBGBUS_LARC"/>
+ <value value="68" name="A7XX_DBGBUS_HLSQ_SPTP"/>
+ <value value="70" name="A7XX_DBGBUS_RB_0"/>
+ <value value="71" name="A7XX_DBGBUS_RB_1"/>
+ <value value="72" name="A7XX_DBGBUS_RB_2"/>
+ <value value="73" name="A7XX_DBGBUS_RB_3"/>
+ <value value="74" name="A7XX_DBGBUS_RB_4"/>
+ <value value="75" name="A7XX_DBGBUS_RB_5"/>
+ <value value="102" name="A7XX_DBGBUS_UCHE_WRAPPER"/>
+ <value value="106" name="A7XX_DBGBUS_CCU_0"/>
+ <value value="107" name="A7XX_DBGBUS_CCU_1"/>
+ <value value="108" name="A7XX_DBGBUS_CCU_2"/>
+ <value value="109" name="A7XX_DBGBUS_CCU_3"/>
+ <value value="110" name="A7XX_DBGBUS_CCU_4"/>
+ <value value="111" name="A7XX_DBGBUS_CCU_5"/>
+ <value value="138" name="A7XX_DBGBUS_VFD_BR_0"/>
+ <value value="139" name="A7XX_DBGBUS_VFD_BR_1"/>
+ <value value="140" name="A7XX_DBGBUS_VFD_BR_2"/>
+ <value value="141" name="A7XX_DBGBUS_VFD_BR_3"/>
+ <value value="142" name="A7XX_DBGBUS_VFD_BR_4"/>
+ <value value="143" name="A7XX_DBGBUS_VFD_BR_5"/>
+ <value value="144" name="A7XX_DBGBUS_VFD_BR_6"/>
+ <value value="145" name="A7XX_DBGBUS_VFD_BR_7"/>
+ <value value="202" name="A7XX_DBGBUS_VFD_BV_0"/>
+ <value value="203" name="A7XX_DBGBUS_VFD_BV_1"/>
+ <value value="204" name="A7XX_DBGBUS_VFD_BV_2"/>
+ <value value="205" name="A7XX_DBGBUS_VFD_BV_3"/>
+ <value value="234" name="A7XX_DBGBUS_USP_0"/>
+ <value value="235" name="A7XX_DBGBUS_USP_1"/>
+ <value value="236" name="A7XX_DBGBUS_USP_2"/>
+ <value value="237" name="A7XX_DBGBUS_USP_3"/>
+ <value value="238" name="A7XX_DBGBUS_USP_4"/>
+ <value value="239" name="A7XX_DBGBUS_USP_5"/>
+ <value value="266" name="A7XX_DBGBUS_TP_0"/>
+ <value value="267" name="A7XX_DBGBUS_TP_1"/>
+ <value value="268" name="A7XX_DBGBUS_TP_2"/>
+ <value value="269" name="A7XX_DBGBUS_TP_3"/>
+ <value value="270" name="A7XX_DBGBUS_TP_4"/>
+ <value value="271" name="A7XX_DBGBUS_TP_5"/>
+ <value value="272" name="A7XX_DBGBUS_TP_6"/>
+ <value value="273" name="A7XX_DBGBUS_TP_7"/>
+ <value value="274" name="A7XX_DBGBUS_TP_8"/>
+ <value value="275" name="A7XX_DBGBUS_TP_9"/>
+ <value value="276" name="A7XX_DBGBUS_TP_10"/>
+ <value value="277" name="A7XX_DBGBUS_TP_11"/>
+ <value value="330" name="A7XX_DBGBUS_USPTP_0"/>
+ <value value="331" name="A7XX_DBGBUS_USPTP_1"/>
+ <value value="332" name="A7XX_DBGBUS_USPTP_2"/>
+ <value value="333" name="A7XX_DBGBUS_USPTP_3"/>
+ <value value="334" name="A7XX_DBGBUS_USPTP_4"/>
+ <value value="335" name="A7XX_DBGBUS_USPTP_5"/>
+ <value value="336" name="A7XX_DBGBUS_USPTP_6"/>
+ <value value="337" name="A7XX_DBGBUS_USPTP_7"/>
+ <value value="338" name="A7XX_DBGBUS_USPTP_8"/>
+ <value value="339" name="A7XX_DBGBUS_USPTP_9"/>
+ <value value="340" name="A7XX_DBGBUS_USPTP_10"/>
+ <value value="341" name="A7XX_DBGBUS_USPTP_11"/>
+ <value value="396" name="A7XX_DBGBUS_CCHE_0"/>
+ <value value="397" name="A7XX_DBGBUS_CCHE_1"/>
+ <value value="398" name="A7XX_DBGBUS_CCHE_2"/>
+ <value value="408" name="A7XX_DBGBUS_VPC_DSTR_0"/>
+ <value value="409" name="A7XX_DBGBUS_VPC_DSTR_1"/>
+ <value value="410" name="A7XX_DBGBUS_VPC_DSTR_2"/>
+ <value value="411" name="A7XX_DBGBUS_HLSQ_DP_STR_0"/>
+ <value value="412" name="A7XX_DBGBUS_HLSQ_DP_STR_1"/>
+ <value value="413" name="A7XX_DBGBUS_HLSQ_DP_STR_2"/>
+ <value value="414" name="A7XX_DBGBUS_HLSQ_DP_STR_3"/>
+ <value value="415" name="A7XX_DBGBUS_HLSQ_DP_STR_4"/>
+ <value value="416" name="A7XX_DBGBUS_HLSQ_DP_STR_5"/>
+ <value value="443" name="A7XX_DBGBUS_UFC_DSTR_0"/>
+ <value value="444" name="A7XX_DBGBUS_UFC_DSTR_1"/>
+ <value value="445" name="A7XX_DBGBUS_UFC_DSTR_2"/>
+ <value value="446" name="A7XX_DBGBUS_CGC_SUBCORE"/>
+ <value value="447" name="A7XX_DBGBUS_CGC_CORE"/>
+</enum>
+
+<enum name="a6xx_cp_perfcounter_select">
+ <value value="0" name="PERF_CP_ALWAYS_COUNT"/>
+ <value value="1" name="PERF_CP_BUSY_GFX_CORE_IDLE"/>
+ <value value="2" name="PERF_CP_BUSY_CYCLES"/>
+ <value value="3" name="PERF_CP_NUM_PREEMPTIONS"/>
+ <value value="4" name="PERF_CP_PREEMPTION_REACTION_DELAY"/>
+ <value value="5" name="PERF_CP_PREEMPTION_SWITCH_OUT_TIME"/>
+ <value value="6" name="PERF_CP_PREEMPTION_SWITCH_IN_TIME"/>
+ <value value="7" name="PERF_CP_DEAD_DRAWS_IN_BIN_RENDER"/>
+ <value value="8" name="PERF_CP_PREDICATED_DRAWS_KILLED"/>
+ <value value="9" name="PERF_CP_MODE_SWITCH"/>
+ <value value="10" name="PERF_CP_ZPASS_DONE"/>
+ <value value="11" name="PERF_CP_CONTEXT_DONE"/>
+ <value value="12" name="PERF_CP_CACHE_FLUSH"/>
+ <value value="13" name="PERF_CP_LONG_PREEMPTIONS"/>
+ <value value="14" name="PERF_CP_SQE_I_CACHE_STARVE"/>
+ <value value="15" name="PERF_CP_SQE_IDLE"/>
+ <value value="16" name="PERF_CP_SQE_PM4_STARVE_RB_IB"/>
+ <value value="17" name="PERF_CP_SQE_PM4_STARVE_SDS"/>
+ <value value="18" name="PERF_CP_SQE_MRB_STARVE"/>
+ <value value="19" name="PERF_CP_SQE_RRB_STARVE"/>
+ <value value="20" name="PERF_CP_SQE_VSD_STARVE"/>
+ <value value="21" name="PERF_CP_VSD_DECODE_STARVE"/>
+ <value value="22" name="PERF_CP_SQE_PIPE_OUT_STALL"/>
+ <value value="23" name="PERF_CP_SQE_SYNC_STALL"/>
+ <value value="24" name="PERF_CP_SQE_PM4_WFI_STALL"/>
+ <value value="25" name="PERF_CP_SQE_SYS_WFI_STALL"/>
+ <value value="26" name="PERF_CP_SQE_T4_EXEC"/>
+ <value value="27" name="PERF_CP_SQE_LOAD_STATE_EXEC"/>
+ <value value="28" name="PERF_CP_SQE_SAVE_SDS_STATE"/>
+ <value value="29" name="PERF_CP_SQE_DRAW_EXEC"/>
+ <value value="30" name="PERF_CP_SQE_CTXT_REG_BUNCH_EXEC"/>
+ <value value="31" name="PERF_CP_SQE_EXEC_PROFILED"/>
+ <value value="32" name="PERF_CP_MEMORY_POOL_EMPTY"/>
+ <value value="33" name="PERF_CP_MEMORY_POOL_SYNC_STALL"/>
+ <value value="34" name="PERF_CP_MEMORY_POOL_ABOVE_THRESH"/>
+ <value value="35" name="PERF_CP_AHB_WR_STALL_PRE_DRAWS"/>
+ <value value="36" name="PERF_CP_AHB_STALL_SQE_GMU"/>
+ <value value="37" name="PERF_CP_AHB_STALL_SQE_WR_OTHER"/>
+ <value value="38" name="PERF_CP_AHB_STALL_SQE_RD_OTHER"/>
+ <value value="39" name="PERF_CP_CLUSTER0_EMPTY"/>
+ <value value="40" name="PERF_CP_CLUSTER1_EMPTY"/>
+ <value value="41" name="PERF_CP_CLUSTER2_EMPTY"/>
+ <value value="42" name="PERF_CP_CLUSTER3_EMPTY"/>
+ <value value="43" name="PERF_CP_CLUSTER4_EMPTY"/>
+ <value value="44" name="PERF_CP_CLUSTER5_EMPTY"/>
+ <value value="45" name="PERF_CP_PM4_DATA"/>
+ <value value="46" name="PERF_CP_PM4_HEADERS"/>
+ <value value="47" name="PERF_CP_VBIF_READ_BEATS"/>
+ <value value="48" name="PERF_CP_VBIF_WRITE_BEATS"/>
+ <value value="49" name="PERF_CP_SQE_INSTR_COUNTER"/>
+</enum>
+
+<enum name="a6xx_rbbm_perfcounter_select">
+ <value value="0" name="PERF_RBBM_ALWAYS_COUNT"/>
+ <value value="1" name="PERF_RBBM_ALWAYS_ON"/>
+ <value value="2" name="PERF_RBBM_TSE_BUSY"/>
+ <value value="3" name="PERF_RBBM_RAS_BUSY"/>
+ <value value="4" name="PERF_RBBM_PC_DCALL_BUSY"/>
+ <value value="5" name="PERF_RBBM_PC_VSD_BUSY"/>
+ <value value="6" name="PERF_RBBM_STATUS_MASKED"/>
+ <value value="7" name="PERF_RBBM_COM_BUSY"/>
+ <value value="8" name="PERF_RBBM_DCOM_BUSY"/>
+ <value value="9" name="PERF_RBBM_VBIF_BUSY"/>
+ <value value="10" name="PERF_RBBM_VSC_BUSY"/>
+ <value value="11" name="PERF_RBBM_TESS_BUSY"/>
+ <value value="12" name="PERF_RBBM_UCHE_BUSY"/>
+ <value value="13" name="PERF_RBBM_HLSQ_BUSY"/>
+</enum>
+
+<enum name="a6xx_pc_perfcounter_select">
+ <value value="0" name="PERF_PC_BUSY_CYCLES"/>
+ <value value="1" name="PERF_PC_WORKING_CYCLES"/>
+ <value value="2" name="PERF_PC_STALL_CYCLES_VFD"/>
+ <value value="3" name="PERF_PC_STALL_CYCLES_TSE"/>
+ <value value="4" name="PERF_PC_STALL_CYCLES_VPC"/>
+ <value value="5" name="PERF_PC_STALL_CYCLES_UCHE"/>
+ <value value="6" name="PERF_PC_STALL_CYCLES_TESS"/>
+ <value value="7" name="PERF_PC_STALL_CYCLES_TSE_ONLY"/>
+ <value value="8" name="PERF_PC_STALL_CYCLES_VPC_ONLY"/>
+ <value value="9" name="PERF_PC_PASS1_TF_STALL_CYCLES"/>
+ <value value="10" name="PERF_PC_STARVE_CYCLES_FOR_INDEX"/>
+ <value value="11" name="PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR"/>
+ <value value="12" name="PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM"/>
+ <value value="13" name="PERF_PC_STARVE_CYCLES_FOR_POSITION"/>
+ <value value="14" name="PERF_PC_STARVE_CYCLES_DI"/>
+ <value value="15" name="PERF_PC_VIS_STREAMS_LOADED"/>
+ <value value="16" name="PERF_PC_INSTANCES"/>
+ <value value="17" name="PERF_PC_VPC_PRIMITIVES"/>
+ <value value="18" name="PERF_PC_DEAD_PRIM"/>
+ <value value="19" name="PERF_PC_LIVE_PRIM"/>
+ <value value="20" name="PERF_PC_VERTEX_HITS"/>
+ <value value="21" name="PERF_PC_IA_VERTICES"/>
+ <value value="22" name="PERF_PC_IA_PRIMITIVES"/>
+ <value value="23" name="PERF_PC_GS_PRIMITIVES"/>
+ <value value="24" name="PERF_PC_HS_INVOCATIONS"/>
+ <value value="25" name="PERF_PC_DS_INVOCATIONS"/>
+ <value value="26" name="PERF_PC_VS_INVOCATIONS"/>
+ <value value="27" name="PERF_PC_GS_INVOCATIONS"/>
+ <value value="28" name="PERF_PC_DS_PRIMITIVES"/>
+ <value value="29" name="PERF_PC_VPC_POS_DATA_TRANSACTION"/>
+ <value value="30" name="PERF_PC_3D_DRAWCALLS"/>
+ <value value="31" name="PERF_PC_2D_DRAWCALLS"/>
+ <value value="32" name="PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS"/>
+ <value value="33" name="PERF_TESS_BUSY_CYCLES"/>
+ <value value="34" name="PERF_TESS_WORKING_CYCLES"/>
+ <value value="35" name="PERF_TESS_STALL_CYCLES_PC"/>
+ <value value="36" name="PERF_TESS_STARVE_CYCLES_PC"/>
+ <value value="37" name="PERF_PC_TSE_TRANSACTION"/>
+ <value value="38" name="PERF_PC_TSE_VERTEX"/>
+ <value value="39" name="PERF_PC_TESS_PC_UV_TRANS"/>
+ <value value="40" name="PERF_PC_TESS_PC_UV_PATCHES"/>
+ <value value="41" name="PERF_PC_TESS_FACTOR_TRANS"/>
+</enum>
+
+<enum name="a6xx_vfd_perfcounter_select">
+ <value value="0" name="PERF_VFD_BUSY_CYCLES"/>
+ <value value="1" name="PERF_VFD_STALL_CYCLES_UCHE"/>
+ <value value="2" name="PERF_VFD_STALL_CYCLES_VPC_ALLOC"/>
+ <value value="3" name="PERF_VFD_STALL_CYCLES_SP_INFO"/>
+ <value value="4" name="PERF_VFD_STALL_CYCLES_SP_ATTR"/>
+ <value value="5" name="PERF_VFD_STARVE_CYCLES_UCHE"/>
+ <value value="6" name="PERF_VFD_RBUFFER_FULL"/>
+ <value value="7" name="PERF_VFD_ATTR_INFO_FIFO_FULL"/>
+ <value value="8" name="PERF_VFD_DECODED_ATTRIBUTE_BYTES"/>
+ <value value="9" name="PERF_VFD_NUM_ATTRIBUTES"/>
+ <value value="10" name="PERF_VFD_UPPER_SHADER_FIBERS"/>
+ <value value="11" name="PERF_VFD_LOWER_SHADER_FIBERS"/>
+ <value value="12" name="PERF_VFD_MODE_0_FIBERS"/>
+ <value value="13" name="PERF_VFD_MODE_1_FIBERS"/>
+ <value value="14" name="PERF_VFD_MODE_2_FIBERS"/>
+ <value value="15" name="PERF_VFD_MODE_3_FIBERS"/>
+ <value value="16" name="PERF_VFD_MODE_4_FIBERS"/>
+ <value value="17" name="PERF_VFD_TOTAL_VERTICES"/>
+ <value value="18" name="PERF_VFDP_STALL_CYCLES_VFD"/>
+ <value value="19" name="PERF_VFDP_STALL_CYCLES_VFD_INDEX"/>
+ <value value="20" name="PERF_VFDP_STALL_CYCLES_VFD_PROG"/>
+ <value value="21" name="PERF_VFDP_STARVE_CYCLES_PC"/>
+ <value value="22" name="PERF_VFDP_VS_STAGE_WAVES"/>
+</enum>
+
+<enum name="a6xx_hlsq_perfcounter_select">
+ <value value="0" name="PERF_HLSQ_BUSY_CYCLES"/>
+ <value value="1" name="PERF_HLSQ_STALL_CYCLES_UCHE"/>
+ <value value="2" name="PERF_HLSQ_STALL_CYCLES_SP_STATE"/>
+ <value value="3" name="PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE"/>
+ <value value="4" name="PERF_HLSQ_UCHE_LATENCY_CYCLES"/>
+ <value value="5" name="PERF_HLSQ_UCHE_LATENCY_COUNT"/>
+ <value value="6" name="PERF_HLSQ_FS_STAGE_1X_WAVES"/>
+ <value value="7" name="PERF_HLSQ_FS_STAGE_2X_WAVES"/>
+ <value value="8" name="PERF_HLSQ_QUADS"/>
+ <value value="9" name="PERF_HLSQ_CS_INVOCATIONS"/>
+ <value value="10" name="PERF_HLSQ_COMPUTE_DRAWCALLS"/>
+ <value value="11" name="PERF_HLSQ_FS_DATA_WAIT_PROGRAMMING"/>
+ <value value="12" name="PERF_HLSQ_DUAL_FS_PROG_ACTIVE"/>
+ <value value="13" name="PERF_HLSQ_DUAL_VS_PROG_ACTIVE"/>
+ <value value="14" name="PERF_HLSQ_FS_BATCH_COUNT_ZERO"/>
+ <value value="15" name="PERF_HLSQ_VS_BATCH_COUNT_ZERO"/>
+ <value value="16" name="PERF_HLSQ_WAVE_PENDING_NO_QUAD"/>
+ <value value="17" name="PERF_HLSQ_WAVE_PENDING_NO_PRIM_BASE"/>
+ <value value="18" name="PERF_HLSQ_STALL_CYCLES_VPC"/>
+ <value value="19" name="PERF_HLSQ_PIXELS"/>
+ <value value="20" name="PERF_HLSQ_DRAW_MODE_SWITCH_VSFS_SYNC"/>
+</enum>
+
+<enum name="a6xx_vpc_perfcounter_select">
+ <value value="0" name="PERF_VPC_BUSY_CYCLES"/>
+ <value value="1" name="PERF_VPC_WORKING_CYCLES"/>
+ <value value="2" name="PERF_VPC_STALL_CYCLES_UCHE"/>
+ <value value="3" name="PERF_VPC_STALL_CYCLES_VFD_WACK"/>
+ <value value="4" name="PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC"/>
+ <value value="5" name="PERF_VPC_STALL_CYCLES_PC"/>
+ <value value="6" name="PERF_VPC_STALL_CYCLES_SP_LM"/>
+ <value value="7" name="PERF_VPC_STARVE_CYCLES_SP"/>
+ <value value="8" name="PERF_VPC_STARVE_CYCLES_LRZ"/>
+ <value value="9" name="PERF_VPC_PC_PRIMITIVES"/>
+ <value value="10" name="PERF_VPC_SP_COMPONENTS"/>
+ <value value="11" name="PERF_VPC_STALL_CYCLES_VPCRAM_POS"/>
+ <value value="12" name="PERF_VPC_LRZ_ASSIGN_PRIMITIVES"/>
+ <value value="13" name="PERF_VPC_RB_VISIBLE_PRIMITIVES"/>
+ <value value="14" name="PERF_VPC_LM_TRANSACTION"/>
+ <value value="15" name="PERF_VPC_STREAMOUT_TRANSACTION"/>
+ <value value="16" name="PERF_VPC_VS_BUSY_CYCLES"/>
+ <value value="17" name="PERF_VPC_PS_BUSY_CYCLES"/>
+ <value value="18" name="PERF_VPC_VS_WORKING_CYCLES"/>
+ <value value="19" name="PERF_VPC_PS_WORKING_CYCLES"/>
+ <value value="20" name="PERF_VPC_STARVE_CYCLES_RB"/>
+ <value value="21" name="PERF_VPC_NUM_VPCRAM_READ_POS"/>
+ <value value="22" name="PERF_VPC_WIT_FULL_CYCLES"/>
+ <value value="23" name="PERF_VPC_VPCRAM_FULL_CYCLES"/>
+ <value value="24" name="PERF_VPC_LM_FULL_WAIT_FOR_INTP_END"/>
+ <value value="25" name="PERF_VPC_NUM_VPCRAM_WRITE"/>
+ <value value="26" name="PERF_VPC_NUM_VPCRAM_READ_SO"/>
+ <value value="27" name="PERF_VPC_NUM_ATTR_REQ_LM"/>
+</enum>
+
+<enum name="a6xx_tse_perfcounter_select">
+ <value value="0" name="PERF_TSE_BUSY_CYCLES"/>
+ <value value="1" name="PERF_TSE_CLIPPING_CYCLES"/>
+ <value value="2" name="PERF_TSE_STALL_CYCLES_RAS"/>
+ <value value="3" name="PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE"/>
+ <value value="4" name="PERF_TSE_STALL_CYCLES_LRZ_ZPLANE"/>
+ <value value="5" name="PERF_TSE_STARVE_CYCLES_PC"/>
+ <value value="6" name="PERF_TSE_INPUT_PRIM"/>
+ <value value="7" name="PERF_TSE_INPUT_NULL_PRIM"/>
+ <value value="8" name="PERF_TSE_TRIVAL_REJ_PRIM"/>
+ <value value="9" name="PERF_TSE_CLIPPED_PRIM"/>
+ <value value="10" name="PERF_TSE_ZERO_AREA_PRIM"/>
+ <value value="11" name="PERF_TSE_FACENESS_CULLED_PRIM"/>
+ <value value="12" name="PERF_TSE_ZERO_PIXEL_PRIM"/>
+ <value value="13" name="PERF_TSE_OUTPUT_NULL_PRIM"/>
+ <value value="14" name="PERF_TSE_OUTPUT_VISIBLE_PRIM"/>
+ <value value="15" name="PERF_TSE_CINVOCATION"/>
+ <value value="16" name="PERF_TSE_CPRIMITIVES"/>
+ <value value="17" name="PERF_TSE_2D_INPUT_PRIM"/>
+ <value value="18" name="PERF_TSE_2D_ALIVE_CYCLES"/>
+ <value value="19" name="PERF_TSE_CLIP_PLANES"/>
+</enum>
+
+<enum name="a6xx_ras_perfcounter_select">
+ <value value="0" name="PERF_RAS_BUSY_CYCLES"/>
+ <value value="1" name="PERF_RAS_SUPERTILE_ACTIVE_CYCLES"/>
+ <value value="2" name="PERF_RAS_STALL_CYCLES_LRZ"/>
+ <value value="3" name="PERF_RAS_STARVE_CYCLES_TSE"/>
+ <value value="4" name="PERF_RAS_SUPER_TILES"/>
+ <value value="5" name="PERF_RAS_8X4_TILES"/>
+ <value value="6" name="PERF_RAS_MASKGEN_ACTIVE"/>
+ <value value="7" name="PERF_RAS_FULLY_COVERED_SUPER_TILES"/>
+ <value value="8" name="PERF_RAS_FULLY_COVERED_8X4_TILES"/>
+ <value value="9" name="PERF_RAS_PRIM_KILLED_INVISILBE"/>
+ <value value="10" name="PERF_RAS_SUPERTILE_GEN_ACTIVE_CYCLES"/>
+ <value value="11" name="PERF_RAS_LRZ_INTF_WORKING_CYCLES"/>
+ <value value="12" name="PERF_RAS_BLOCKS"/>
+</enum>
+
+<enum name="a6xx_uche_perfcounter_select">
+ <value value="0" name="PERF_UCHE_BUSY_CYCLES"/>
+ <value value="1" name="PERF_UCHE_STALL_CYCLES_ARBITER"/>
+ <value value="2" name="PERF_UCHE_VBIF_LATENCY_CYCLES"/>
+ <value value="3" name="PERF_UCHE_VBIF_LATENCY_SAMPLES"/>
+ <value value="4" name="PERF_UCHE_VBIF_READ_BEATS_TP"/>
+ <value value="5" name="PERF_UCHE_VBIF_READ_BEATS_VFD"/>
+ <value value="6" name="PERF_UCHE_VBIF_READ_BEATS_HLSQ"/>
+ <value value="7" name="PERF_UCHE_VBIF_READ_BEATS_LRZ"/>
+ <value value="8" name="PERF_UCHE_VBIF_READ_BEATS_SP"/>
+ <value value="9" name="PERF_UCHE_READ_REQUESTS_TP"/>
+ <value value="10" name="PERF_UCHE_READ_REQUESTS_VFD"/>
+ <value value="11" name="PERF_UCHE_READ_REQUESTS_HLSQ"/>
+ <value value="12" name="PERF_UCHE_READ_REQUESTS_LRZ"/>
+ <value value="13" name="PERF_UCHE_READ_REQUESTS_SP"/>
+ <value value="14" name="PERF_UCHE_WRITE_REQUESTS_LRZ"/>
+ <value value="15" name="PERF_UCHE_WRITE_REQUESTS_SP"/>
+ <value value="16" name="PERF_UCHE_WRITE_REQUESTS_VPC"/>
+ <value value="17" name="PERF_UCHE_WRITE_REQUESTS_VSC"/>
+ <value value="18" name="PERF_UCHE_EVICTS"/>
+ <value value="19" name="PERF_UCHE_BANK_REQ0"/>
+ <value value="20" name="PERF_UCHE_BANK_REQ1"/>
+ <value value="21" name="PERF_UCHE_BANK_REQ2"/>
+ <value value="22" name="PERF_UCHE_BANK_REQ3"/>
+ <value value="23" name="PERF_UCHE_BANK_REQ4"/>
+ <value value="24" name="PERF_UCHE_BANK_REQ5"/>
+ <value value="25" name="PERF_UCHE_BANK_REQ6"/>
+ <value value="26" name="PERF_UCHE_BANK_REQ7"/>
+ <value value="27" name="PERF_UCHE_VBIF_READ_BEATS_CH0"/>
+ <value value="28" name="PERF_UCHE_VBIF_READ_BEATS_CH1"/>
+ <value value="29" name="PERF_UCHE_GMEM_READ_BEATS"/>
+ <value value="30" name="PERF_UCHE_TPH_REF_FULL"/>
+ <value value="31" name="PERF_UCHE_TPH_VICTIM_FULL"/>
+ <value value="32" name="PERF_UCHE_TPH_EXT_FULL"/>
+ <value value="33" name="PERF_UCHE_VBIF_STALL_WRITE_DATA"/>
+ <value value="34" name="PERF_UCHE_DCMP_LATENCY_SAMPLES"/>
+ <value value="35" name="PERF_UCHE_DCMP_LATENCY_CYCLES"/>
+ <value value="36" name="PERF_UCHE_VBIF_READ_BEATS_PC"/>
+ <value value="37" name="PERF_UCHE_READ_REQUESTS_PC"/>
+ <value value="38" name="PERF_UCHE_RAM_READ_REQ"/>
+ <value value="39" name="PERF_UCHE_RAM_WRITE_REQ"/>
+</enum>
+
+<enum name="a6xx_tp_perfcounter_select">
+ <value value="0" name="PERF_TP_BUSY_CYCLES"/>
+ <value value="1" name="PERF_TP_STALL_CYCLES_UCHE"/>
+ <value value="2" name="PERF_TP_LATENCY_CYCLES"/>
+ <value value="3" name="PERF_TP_LATENCY_TRANS"/>
+ <value value="4" name="PERF_TP_FLAG_CACHE_REQUEST_SAMPLES"/>
+ <value value="5" name="PERF_TP_FLAG_CACHE_REQUEST_LATENCY"/>
+ <value value="6" name="PERF_TP_L1_CACHELINE_REQUESTS"/>
+ <value value="7" name="PERF_TP_L1_CACHELINE_MISSES"/>
+ <value value="8" name="PERF_TP_SP_TP_TRANS"/>
+ <value value="9" name="PERF_TP_TP_SP_TRANS"/>
+ <value value="10" name="PERF_TP_OUTPUT_PIXELS"/>
+ <value value="11" name="PERF_TP_FILTER_WORKLOAD_16BIT"/>
+ <value value="12" name="PERF_TP_FILTER_WORKLOAD_32BIT"/>
+ <value value="13" name="PERF_TP_QUADS_RECEIVED"/>
+ <value value="14" name="PERF_TP_QUADS_OFFSET"/>
+ <value value="15" name="PERF_TP_QUADS_SHADOW"/>
+ <value value="16" name="PERF_TP_QUADS_ARRAY"/>
+ <value value="17" name="PERF_TP_QUADS_GRADIENT"/>
+ <value value="18" name="PERF_TP_QUADS_1D"/>
+ <value value="19" name="PERF_TP_QUADS_2D"/>
+ <value value="20" name="PERF_TP_QUADS_BUFFER"/>
+ <value value="21" name="PERF_TP_QUADS_3D"/>
+ <value value="22" name="PERF_TP_QUADS_CUBE"/>
+ <value value="23" name="PERF_TP_DIVERGENT_QUADS_RECEIVED"/>
+ <value value="24" name="PERF_TP_PRT_NON_RESIDENT_EVENTS"/>
+ <value value="25" name="PERF_TP_OUTPUT_PIXELS_POINT"/>
+ <value value="26" name="PERF_TP_OUTPUT_PIXELS_BILINEAR"/>
+ <value value="27" name="PERF_TP_OUTPUT_PIXELS_MIP"/>
+ <value value="28" name="PERF_TP_OUTPUT_PIXELS_ANISO"/>
+ <value value="29" name="PERF_TP_OUTPUT_PIXELS_ZERO_LOD"/>
+ <value value="30" name="PERF_TP_FLAG_CACHE_REQUESTS"/>
+ <value value="31" name="PERF_TP_FLAG_CACHE_MISSES"/>
+ <value value="32" name="PERF_TP_L1_5_L2_REQUESTS"/>
+ <value value="33" name="PERF_TP_2D_OUTPUT_PIXELS"/>
+ <value value="34" name="PERF_TP_2D_OUTPUT_PIXELS_POINT"/>
+ <value value="35" name="PERF_TP_2D_OUTPUT_PIXELS_BILINEAR"/>
+ <value value="36" name="PERF_TP_2D_FILTER_WORKLOAD_16BIT"/>
+ <value value="37" name="PERF_TP_2D_FILTER_WORKLOAD_32BIT"/>
+ <value value="38" name="PERF_TP_TPA2TPC_TRANS"/>
+ <value value="39" name="PERF_TP_L1_MISSES_ASTC_1TILE"/>
+ <value value="40" name="PERF_TP_L1_MISSES_ASTC_2TILE"/>
+ <value value="41" name="PERF_TP_L1_MISSES_ASTC_4TILE"/>
+ <value value="42" name="PERF_TP_L1_5_L2_COMPRESS_REQS"/>
+ <value value="43" name="PERF_TP_L1_5_L2_COMPRESS_MISS"/>
+ <value value="44" name="PERF_TP_L1_BANK_CONFLICT"/>
+ <value value="45" name="PERF_TP_L1_5_MISS_LATENCY_CYCLES"/>
+ <value value="46" name="PERF_TP_L1_5_MISS_LATENCY_TRANS"/>
+ <value value="47" name="PERF_TP_QUADS_CONSTANT_MULTIPLIED"/>
+ <value value="48" name="PERF_TP_FRONTEND_WORKING_CYCLES"/>
+ <value value="49" name="PERF_TP_L1_TAG_WORKING_CYCLES"/>
+ <value value="50" name="PERF_TP_L1_DATA_WRITE_WORKING_CYCLES"/>
+ <value value="51" name="PERF_TP_PRE_L1_DECOM_WORKING_CYCLES"/>
+ <value value="52" name="PERF_TP_BACKEND_WORKING_CYCLES"/>
+ <value value="53" name="PERF_TP_FLAG_CACHE_WORKING_CYCLES"/>
+ <value value="54" name="PERF_TP_L1_5_CACHE_WORKING_CYCLES"/>
+ <value value="55" name="PERF_TP_STARVE_CYCLES_SP"/>
+ <value value="56" name="PERF_TP_STARVE_CYCLES_UCHE"/>
+</enum>
+
+<enum name="a6xx_sp_perfcounter_select">
+ <value value="0" name="PERF_SP_BUSY_CYCLES"/>
+ <value value="1" name="PERF_SP_ALU_WORKING_CYCLES"/>
+ <value value="2" name="PERF_SP_EFU_WORKING_CYCLES"/>
+ <value value="3" name="PERF_SP_STALL_CYCLES_VPC"/>
+ <value value="4" name="PERF_SP_STALL_CYCLES_TP"/>
+ <value value="5" name="PERF_SP_STALL_CYCLES_UCHE"/>
+ <value value="6" name="PERF_SP_STALL_CYCLES_RB"/>
+ <value value="7" name="PERF_SP_NON_EXECUTION_CYCLES"/>
+ <value value="8" name="PERF_SP_WAVE_CONTEXTS"/>
+ <value value="9" name="PERF_SP_WAVE_CONTEXT_CYCLES"/>
+ <value value="10" name="PERF_SP_FS_STAGE_WAVE_CYCLES"/>
+ <value value="11" name="PERF_SP_FS_STAGE_WAVE_SAMPLES"/>
+ <value value="12" name="PERF_SP_VS_STAGE_WAVE_CYCLES"/>
+ <value value="13" name="PERF_SP_VS_STAGE_WAVE_SAMPLES"/>
+ <value value="14" name="PERF_SP_FS_STAGE_DURATION_CYCLES"/>
+ <value value="15" name="PERF_SP_VS_STAGE_DURATION_CYCLES"/>
+ <value value="16" name="PERF_SP_WAVE_CTRL_CYCLES"/>
+ <value value="17" name="PERF_SP_WAVE_LOAD_CYCLES"/>
+ <value value="18" name="PERF_SP_WAVE_EMIT_CYCLES"/>
+ <value value="19" name="PERF_SP_WAVE_NOP_CYCLES"/>
+ <value value="20" name="PERF_SP_WAVE_WAIT_CYCLES"/>
+ <value value="21" name="PERF_SP_WAVE_FETCH_CYCLES"/>
+ <value value="22" name="PERF_SP_WAVE_IDLE_CYCLES"/>
+ <value value="23" name="PERF_SP_WAVE_END_CYCLES"/>
+ <value value="24" name="PERF_SP_WAVE_LONG_SYNC_CYCLES"/>
+ <value value="25" name="PERF_SP_WAVE_SHORT_SYNC_CYCLES"/>
+ <value value="26" name="PERF_SP_WAVE_JOIN_CYCLES"/>
+ <value value="27" name="PERF_SP_LM_LOAD_INSTRUCTIONS"/>
+ <value value="28" name="PERF_SP_LM_STORE_INSTRUCTIONS"/>
+ <value value="29" name="PERF_SP_LM_ATOMICS"/>
+ <value value="30" name="PERF_SP_GM_LOAD_INSTRUCTIONS"/>
+ <value value="31" name="PERF_SP_GM_STORE_INSTRUCTIONS"/>
+ <value value="32" name="PERF_SP_GM_ATOMICS"/>
+ <value value="33" name="PERF_SP_VS_STAGE_TEX_INSTRUCTIONS"/>
+ <value value="34" name="PERF_SP_VS_STAGE_EFU_INSTRUCTIONS"/>
+ <value value="35" name="PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS"/>
+ <value value="36" name="PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS"/>
+ <value value="37" name="PERF_SP_FS_STAGE_TEX_INSTRUCTIONS"/>
+ <value value="38" name="PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS"/>
+ <value value="39" name="PERF_SP_FS_STAGE_EFU_INSTRUCTIONS"/>
+ <value value="40" name="PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS"/>
+ <value value="41" name="PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS"/>
+ <value value="42" name="PERF_SP_FS_STAGE_BARY_INSTRUCTIONS"/>
+ <value value="43" name="PERF_SP_VS_INSTRUCTIONS"/>
+ <value value="44" name="PERF_SP_FS_INSTRUCTIONS"/>
+ <value value="45" name="PERF_SP_ADDR_LOCK_COUNT"/>
+ <value value="46" name="PERF_SP_UCHE_READ_TRANS"/>
+ <value value="47" name="PERF_SP_UCHE_WRITE_TRANS"/>
+ <value value="48" name="PERF_SP_EXPORT_VPC_TRANS"/>
+ <value value="49" name="PERF_SP_EXPORT_RB_TRANS"/>
+ <value value="50" name="PERF_SP_PIXELS_KILLED"/>
+ <value value="51" name="PERF_SP_ICL1_REQUESTS"/>
+ <value value="52" name="PERF_SP_ICL1_MISSES"/>
+ <value value="53" name="PERF_SP_HS_INSTRUCTIONS"/>
+ <value value="54" name="PERF_SP_DS_INSTRUCTIONS"/>
+ <value value="55" name="PERF_SP_GS_INSTRUCTIONS"/>
+ <value value="56" name="PERF_SP_CS_INSTRUCTIONS"/>
+ <value value="57" name="PERF_SP_GPR_READ"/>
+ <value value="58" name="PERF_SP_GPR_WRITE"/>
+ <value value="59" name="PERF_SP_FS_STAGE_HALF_EFU_INSTRUCTIONS"/>
+ <value value="60" name="PERF_SP_VS_STAGE_HALF_EFU_INSTRUCTIONS"/>
+ <value value="61" name="PERF_SP_LM_BANK_CONFLICTS"/>
+ <value value="62" name="PERF_SP_TEX_CONTROL_WORKING_CYCLES"/>
+ <value value="63" name="PERF_SP_LOAD_CONTROL_WORKING_CYCLES"/>
+ <value value="64" name="PERF_SP_FLOW_CONTROL_WORKING_CYCLES"/>
+ <value value="65" name="PERF_SP_LM_WORKING_CYCLES"/>
+ <value value="66" name="PERF_SP_DISPATCHER_WORKING_CYCLES"/>
+ <value value="67" name="PERF_SP_SEQUENCER_WORKING_CYCLES"/>
+ <value value="68" name="PERF_SP_LOW_EFFICIENCY_STARVED_BY_TP"/>
+ <value value="69" name="PERF_SP_STARVE_CYCLES_HLSQ"/>
+ <value value="70" name="PERF_SP_NON_EXECUTION_LS_CYCLES"/>
+ <value value="71" name="PERF_SP_WORKING_EU"/>
+ <value value="72" name="PERF_SP_ANY_EU_WORKING"/>
+ <value value="73" name="PERF_SP_WORKING_EU_FS_STAGE"/>
+ <value value="74" name="PERF_SP_ANY_EU_WORKING_FS_STAGE"/>
+ <value value="75" name="PERF_SP_WORKING_EU_VS_STAGE"/>
+ <value value="76" name="PERF_SP_ANY_EU_WORKING_VS_STAGE"/>
+ <value value="77" name="PERF_SP_WORKING_EU_CS_STAGE"/>
+ <value value="78" name="PERF_SP_ANY_EU_WORKING_CS_STAGE"/>
+ <value value="79" name="PERF_SP_GPR_READ_PREFETCH"/>
+ <value value="80" name="PERF_SP_GPR_READ_CONFLICT"/>
+ <value value="81" name="PERF_SP_GPR_WRITE_CONFLICT"/>
+ <value value="82" name="PERF_SP_GM_LOAD_LATENCY_CYCLES"/>
+ <value value="83" name="PERF_SP_GM_LOAD_LATENCY_SAMPLES"/>
+ <value value="84" name="PERF_SP_EXECUTABLE_WAVES"/>
+</enum>
+
+<enum name="a6xx_rb_perfcounter_select">
+ <value value="0" name="PERF_RB_BUSY_CYCLES"/>
+ <value value="1" name="PERF_RB_STALL_CYCLES_HLSQ"/>
+ <value value="2" name="PERF_RB_STALL_CYCLES_FIFO0_FULL"/>
+ <value value="3" name="PERF_RB_STALL_CYCLES_FIFO1_FULL"/>
+ <value value="4" name="PERF_RB_STALL_CYCLES_FIFO2_FULL"/>
+ <value value="5" name="PERF_RB_STARVE_CYCLES_SP"/>
+ <value value="6" name="PERF_RB_STARVE_CYCLES_LRZ_TILE"/>
+ <value value="7" name="PERF_RB_STARVE_CYCLES_CCU"/>
+ <value value="8" name="PERF_RB_STARVE_CYCLES_Z_PLANE"/>
+ <value value="9" name="PERF_RB_STARVE_CYCLES_BARY_PLANE"/>
+ <value value="10" name="PERF_RB_Z_WORKLOAD"/>
+ <value value="11" name="PERF_RB_HLSQ_ACTIVE"/>
+ <value value="12" name="PERF_RB_Z_READ"/>
+ <value value="13" name="PERF_RB_Z_WRITE"/>
+ <value value="14" name="PERF_RB_C_READ"/>
+ <value value="15" name="PERF_RB_C_WRITE"/>
+ <value value="16" name="PERF_RB_TOTAL_PASS"/>
+ <value value="17" name="PERF_RB_Z_PASS"/>
+ <value value="18" name="PERF_RB_Z_FAIL"/>
+ <value value="19" name="PERF_RB_S_FAIL"/>
+ <value value="20" name="PERF_RB_BLENDED_FXP_COMPONENTS"/>
+ <value value="21" name="PERF_RB_BLENDED_FP16_COMPONENTS"/>
+ <value value="22" name="PERF_RB_PS_INVOCATIONS"/>
+ <value value="23" name="PERF_RB_2D_ALIVE_CYCLES"/>
+ <value value="24" name="PERF_RB_2D_STALL_CYCLES_A2D"/>
+ <value value="25" name="PERF_RB_2D_STARVE_CYCLES_SRC"/>
+ <value value="26" name="PERF_RB_2D_STARVE_CYCLES_SP"/>
+ <value value="27" name="PERF_RB_2D_STARVE_CYCLES_DST"/>
+ <value value="28" name="PERF_RB_2D_VALID_PIXELS"/>
+ <value value="29" name="PERF_RB_3D_PIXELS"/>
+ <value value="30" name="PERF_RB_BLENDER_WORKING_CYCLES"/>
+ <value value="31" name="PERF_RB_ZPROC_WORKING_CYCLES"/>
+ <value value="32" name="PERF_RB_CPROC_WORKING_CYCLES"/>
+ <value value="33" name="PERF_RB_SAMPLER_WORKING_CYCLES"/>
+ <value value="34" name="PERF_RB_STALL_CYCLES_CCU_COLOR_READ"/>
+ <value value="35" name="PERF_RB_STALL_CYCLES_CCU_COLOR_WRITE"/>
+ <value value="36" name="PERF_RB_STALL_CYCLES_CCU_DEPTH_READ"/>
+ <value value="37" name="PERF_RB_STALL_CYCLES_CCU_DEPTH_WRITE"/>
+ <value value="38" name="PERF_RB_STALL_CYCLES_VPC"/>
+ <value value="39" name="PERF_RB_2D_INPUT_TRANS"/>
+ <value value="40" name="PERF_RB_2D_OUTPUT_RB_DST_TRANS"/>
+ <value value="41" name="PERF_RB_2D_OUTPUT_RB_SRC_TRANS"/>
+ <value value="42" name="PERF_RB_BLENDED_FP32_COMPONENTS"/>
+ <value value="43" name="PERF_RB_COLOR_PIX_TILES"/>
+ <value value="44" name="PERF_RB_STALL_CYCLES_CCU"/>
+ <value value="45" name="PERF_RB_EARLY_Z_ARB3_GRANT"/>
+ <value value="46" name="PERF_RB_LATE_Z_ARB3_GRANT"/>
+ <value value="47" name="PERF_RB_EARLY_Z_SKIP_GRANT"/>
+</enum>
+
+<enum name="a6xx_vsc_perfcounter_select">
+ <value value="0" name="PERF_VSC_BUSY_CYCLES"/>
+ <value value="1" name="PERF_VSC_WORKING_CYCLES"/>
+ <value value="2" name="PERF_VSC_STALL_CYCLES_UCHE"/>
+ <value value="3" name="PERF_VSC_EOT_NUM"/>
+ <value value="4" name="PERF_VSC_INPUT_TILES"/>
+</enum>
+
+<enum name="a6xx_ccu_perfcounter_select">
+ <value value="0" name="PERF_CCU_BUSY_CYCLES"/>
+ <value value="1" name="PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN"/>
+ <value value="2" name="PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN"/>
+ <value value="3" name="PERF_CCU_STARVE_CYCLES_FLAG_RETURN"/>
+ <value value="4" name="PERF_CCU_DEPTH_BLOCKS"/>
+ <value value="5" name="PERF_CCU_COLOR_BLOCKS"/>
+ <value value="6" name="PERF_CCU_DEPTH_BLOCK_HIT"/>
+ <value value="7" name="PERF_CCU_COLOR_BLOCK_HIT"/>
+ <value value="8" name="PERF_CCU_PARTIAL_BLOCK_READ"/>
+ <value value="9" name="PERF_CCU_GMEM_READ"/>
+ <value value="10" name="PERF_CCU_GMEM_WRITE"/>
+ <value value="11" name="PERF_CCU_DEPTH_READ_FLAG0_COUNT"/>
+ <value value="12" name="PERF_CCU_DEPTH_READ_FLAG1_COUNT"/>
+ <value value="13" name="PERF_CCU_DEPTH_READ_FLAG2_COUNT"/>
+ <value value="14" name="PERF_CCU_DEPTH_READ_FLAG3_COUNT"/>
+ <value value="15" name="PERF_CCU_DEPTH_READ_FLAG4_COUNT"/>
+ <value value="16" name="PERF_CCU_DEPTH_READ_FLAG5_COUNT"/>
+ <value value="17" name="PERF_CCU_DEPTH_READ_FLAG6_COUNT"/>
+ <value value="18" name="PERF_CCU_DEPTH_READ_FLAG8_COUNT"/>
+ <value value="19" name="PERF_CCU_COLOR_READ_FLAG0_COUNT"/>
+ <value value="20" name="PERF_CCU_COLOR_READ_FLAG1_COUNT"/>
+ <value value="21" name="PERF_CCU_COLOR_READ_FLAG2_COUNT"/>
+ <value value="22" name="PERF_CCU_COLOR_READ_FLAG3_COUNT"/>
+ <value value="23" name="PERF_CCU_COLOR_READ_FLAG4_COUNT"/>
+ <value value="24" name="PERF_CCU_COLOR_READ_FLAG5_COUNT"/>
+ <value value="25" name="PERF_CCU_COLOR_READ_FLAG6_COUNT"/>
+ <value value="26" name="PERF_CCU_COLOR_READ_FLAG8_COUNT"/>
+ <value value="27" name="PERF_CCU_2D_RD_REQ"/>
+ <value value="28" name="PERF_CCU_2D_WR_REQ"/>
+</enum>
+
+<enum name="a6xx_lrz_perfcounter_select">
+ <value value="0" name="PERF_LRZ_BUSY_CYCLES"/>
+ <value value="1" name="PERF_LRZ_STARVE_CYCLES_RAS"/>
+ <value value="2" name="PERF_LRZ_STALL_CYCLES_RB"/>
+ <value value="3" name="PERF_LRZ_STALL_CYCLES_VSC"/>
+ <value value="4" name="PERF_LRZ_STALL_CYCLES_VPC"/>
+ <value value="5" name="PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH"/>
+ <value value="6" name="PERF_LRZ_STALL_CYCLES_UCHE"/>
+ <value value="7" name="PERF_LRZ_LRZ_READ"/>
+ <value value="8" name="PERF_LRZ_LRZ_WRITE"/>
+ <value value="9" name="PERF_LRZ_READ_LATENCY"/>
+ <value value="10" name="PERF_LRZ_MERGE_CACHE_UPDATING"/>
+ <value value="11" name="PERF_LRZ_PRIM_KILLED_BY_MASKGEN"/>
+ <value value="12" name="PERF_LRZ_PRIM_KILLED_BY_LRZ"/>
+ <value value="13" name="PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ"/>
+ <value value="14" name="PERF_LRZ_FULL_8X8_TILES"/>
+ <value value="15" name="PERF_LRZ_PARTIAL_8X8_TILES"/>
+ <value value="16" name="PERF_LRZ_TILE_KILLED"/>
+ <value value="17" name="PERF_LRZ_TOTAL_PIXEL"/>
+ <value value="18" name="PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ"/>
+ <value value="19" name="PERF_LRZ_FULLY_COVERED_TILES"/>
+ <value value="20" name="PERF_LRZ_PARTIAL_COVERED_TILES"/>
+ <value value="21" name="PERF_LRZ_FEEDBACK_ACCEPT"/>
+ <value value="22" name="PERF_LRZ_FEEDBACK_DISCARD"/>
+ <value value="23" name="PERF_LRZ_FEEDBACK_STALL"/>
+ <value value="24" name="PERF_LRZ_STALL_CYCLES_RB_ZPLANE"/>
+ <value value="25" name="PERF_LRZ_STALL_CYCLES_RB_BPLANE"/>
+ <value value="26" name="PERF_LRZ_STALL_CYCLES_VC"/>
+ <value value="27" name="PERF_LRZ_RAS_MASK_TRANS"/>
+</enum>
+
+<enum name="a6xx_cmp_perfcounter_select">
+ <value value="0" name="PERF_CMPDECMP_STALL_CYCLES_ARB"/>
+ <value value="1" name="PERF_CMPDECMP_VBIF_LATENCY_CYCLES"/>
+ <value value="2" name="PERF_CMPDECMP_VBIF_LATENCY_SAMPLES"/>
+ <value value="3" name="PERF_CMPDECMP_VBIF_READ_DATA_CCU"/>
+ <value value="4" name="PERF_CMPDECMP_VBIF_WRITE_DATA_CCU"/>
+ <value value="5" name="PERF_CMPDECMP_VBIF_READ_REQUEST"/>
+ <value value="6" name="PERF_CMPDECMP_VBIF_WRITE_REQUEST"/>
+ <value value="7" name="PERF_CMPDECMP_VBIF_READ_DATA"/>
+ <value value="8" name="PERF_CMPDECMP_VBIF_WRITE_DATA"/>
+ <value value="9" name="PERF_CMPDECMP_FLAG_FETCH_CYCLES"/>
+ <value value="10" name="PERF_CMPDECMP_FLAG_FETCH_SAMPLES"/>
+ <value value="11" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT"/>
+ <value value="12" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT"/>
+ <value value="13" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT"/>
+ <value value="14" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT"/>
+ <value value="15" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG5_COUNT"/>
+ <value value="16" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG6_COUNT"/>
+ <value value="17" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG8_COUNT"/>
+ <value value="18" name="PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT"/>
+ <value value="19" name="PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT"/>
+ <value value="20" name="PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT"/>
+ <value value="21" name="PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT"/>
+ <value value="22" name="PERF_CMPDECMP_COLOR_WRITE_FLAG5_COUNT"/>
+ <value value="23" name="PERF_CMPDECMP_COLOR_WRITE_FLAG6_COUNT"/>
+ <value value="24" name="PERF_CMPDECMP_COLOR_WRITE_FLAG8_COUNT"/>
+ <value value="25" name="PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_REQ"/>
+ <value value="26" name="PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_WR"/>
+ <value value="27" name="PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_RETURN"/>
+ <value value="28" name="PERF_CMPDECMP_2D_RD_DATA"/>
+ <value value="29" name="PERF_CMPDECMP_2D_WR_DATA"/>
+ <value value="30" name="PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH0"/>
+ <value value="31" name="PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH1"/>
+ <value value="32" name="PERF_CMPDECMP_2D_OUTPUT_TRANS"/>
+ <value value="33" name="PERF_CMPDECMP_VBIF_WRITE_DATA_UCHE"/>
+ <value value="34" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG0_COUNT"/>
+ <value value="35" name="PERF_CMPDECMP_COLOR_WRITE_FLAG0_COUNT"/>
+ <value value="36" name="PERF_CMPDECMP_COLOR_WRITE_FLAGALPHA_COUNT"/>
+ <value value="37" name="PERF_CMPDECMP_2D_BUSY_CYCLES"/>
+ <value value="38" name="PERF_CMPDECMP_2D_REORDER_STARVE_CYCLES"/>
+ <value value="39" name="PERF_CMPDECMP_2D_PIXELS"/>
+</enum>
+
+<!--
+Used in a6xx_2d_blit_cntl.. the value mostly seems to correlate to the
+component type/size, so I think it relates to internal format used for
+blending? The one exception is that 16b unorm and 32b float use the
+same value... maybe 16b unorm is uncommon enough that it was just easier
+to upconvert to 32b float internally?
+
+ 8b unorm: 10 (sometimes 0, is the high bit part of something else?)
+16b unorm: 4
+
+32b int: 7
+16b int: 6
+ 8b int: 5
+
+32b float: 4
+16b float: 3
+ -->
+<enum name="a6xx_2d_ifmt">
+ <value value="0x10" name="R2D_UNORM8"/>
+ <value value="0x7" name="R2D_INT32"/>
+ <value value="0x6" name="R2D_INT16"/>
+ <value value="0x5" name="R2D_INT8"/>
+ <value value="0x4" name="R2D_FLOAT32"/>
+ <value value="0x3" name="R2D_FLOAT16"/>
+ <value value="0x1" name="R2D_UNORM8_SRGB"/>
+ <value value="0x0" name="R2D_RAW"/>
+</enum>
+
+<enum name="a6xx_ztest_mode">
+ <doc>Allow early z-test and early-lrz (if applicable)</doc>
+ <value value="0x0" name="A6XX_EARLY_Z"/>
+ <doc>Disable early z-test and early-lrz test (if applicable)</doc>
+ <value value="0x1" name="A6XX_LATE_Z"/>
+ <doc>
+ A special mode that allows early-lrz test but disables
+ early-z test. Which might sound a bit funny, since
+ lrz-test happens before z-test. But as long as a couple
+ conditions are maintained this allows using lrz-test in
+ cases where fragment shader has kill/discard:
+
+ 1) Disable lrz-write in cases where it is uncertain during
+ binning pass that a fragment will pass. Ie. if frag
+ shader has-kill, writes-z, or alpha/stencil test is
+ enabled. (For correctness, lrz-write must be disabled
+ when blend is enabled.) This is analogous to how a
+ z-prepass works.
+
+ 2) Disable lrz-write and test if a depth-test direction
+ reversal is detected. Due to condition (1), the contents
+ of the lrz buffer are a conservative estimation of the
+ depth buffer during the draw pass. Meaning that geometry
+ that we know for certain will not be visible will not pass
+ lrz-test. But geometry which may be (or contributes to
+ blend) will pass the lrz-test.
+
+ This allows us to keep early-lrz-test in cases where the frag
+ shader does not write-z (ie. we know the z-value before FS)
+ and does not have side-effects (image/ssbo writes, etc), but
+ does have kill/discard. Which turns out to be a common
+ enough case that it is useful to keep early-lrz test against
+ the conservative lrz buffer to discard fragments that we
+ know will definitely not be visible.
+ </doc>
+ <value value="0x2" name="A6XX_EARLY_LRZ_LATE_Z"/>
+ <doc>Not a real hw value, used internally by mesa</doc>
+ <value value="0x3" name="A6XX_INVALID_ZTEST"/>
+</enum>
+
+<enum name="a6xx_tess_spacing">
+ <value value="0x0" name="TESS_EQUAL"/>
+ <value value="0x2" name="TESS_FRACTIONAL_ODD"/>
+ <value value="0x3" name="TESS_FRACTIONAL_EVEN"/>
+</enum>
+<enum name="a6xx_tess_output">
+ <value value="0x0" name="TESS_POINTS"/>
+ <value value="0x1" name="TESS_LINES"/>
+ <value value="0x2" name="TESS_CW_TRIS"/>
+ <value value="0x3" name="TESS_CCW_TRIS"/>
+</enum>
+
+<domain name="A6XX" width="32" prefix="variant" varset="chip">
+ <bitset name="A6XX_RBBM_INT_0_MASK" inline="no" varset="chip">
+ <bitfield name="RBBM_GPU_IDLE" pos="0" type="boolean"/>
+ <bitfield name="CP_AHB_ERROR" pos="1" type="boolean"/>
+ <bitfield name="CP_IPC_INTR_0" pos="4" type="boolean" variants="A7XX-"/>
+ <bitfield name="CP_IPC_INTR_1" pos="5" type="boolean" variants="A7XX-"/>
+ <bitfield name="RBBM_ATB_ASYNCFIFO_OVERFLOW" pos="6" type="boolean"/>
+ <bitfield name="RBBM_GPC_ERROR" pos="7" type="boolean"/>
+ <bitfield name="CP_SW" pos="8" type="boolean"/>
+ <bitfield name="CP_HW_ERROR" pos="9" type="boolean"/>
+ <bitfield name="CP_CCU_FLUSH_DEPTH_TS" pos="10" type="boolean"/>
+ <bitfield name="CP_CCU_FLUSH_COLOR_TS" pos="11" type="boolean"/>
+ <bitfield name="CP_CCU_RESOLVE_TS" pos="12" type="boolean"/>
+ <bitfield name="CP_IB2" pos="13" type="boolean"/>
+ <bitfield name="CP_IB1" pos="14" type="boolean"/>
+ <bitfield name="CP_RB" pos="15" type="boolean" variants="A6XX"/>
+ <!-- Same as above but different name??: -->
+ <bitfield name="PM4CPINTERRUPT" pos="15" type="boolean" variants="A7XX-"/>
+ <bitfield name="PM4CPINTERRUPTLPAC" pos="16" type="boolean" variants="A7XX-"/>
+ <bitfield name="CP_RB_DONE_TS" pos="17" type="boolean"/>
+ <bitfield name="CP_WT_DONE_TS" pos="18" type="boolean"/>
+ <bitfield name="CP_CACHE_FLUSH_TS" pos="20" type="boolean"/>
+ <bitfield name="CP_CACHE_FLUSH_TS_LPAC" pos="21" type="boolean" variants="A7XX-"/>
+ <bitfield name="RBBM_ATB_BUS_OVERFLOW" pos="22" type="boolean"/>
+ <bitfield name="RBBM_HANG_DETECT" pos="23" type="boolean"/>
+ <bitfield name="UCHE_OOB_ACCESS" pos="24" type="boolean"/>
+ <bitfield name="UCHE_TRAP_INTR" pos="25" type="boolean"/>
+ <bitfield name="DEBBUS_INTR_0" pos="26" type="boolean"/>
+ <bitfield name="DEBBUS_INTR_1" pos="27" type="boolean"/>
+ <bitfield name="TSBWRITEERROR" pos="28" type="boolean" variants="A7XX-"/>
+ <bitfield name="SWFUSEVIOLATION" pos="29" type="boolean" variants="A7XX-"/>
+ <bitfield name="ISDB_CPU_IRQ" pos="30" type="boolean"/>
+ <bitfield name="ISDB_UNDER_DEBUG" pos="31" type="boolean"/>
+ </bitset>
+
+ <!--
+ Note the _LPAC bits probably *actually* first appeared in a660, but the
+ _BV bits are new in a7xx
+ -->
+ <bitset name="A6XX_CP_INT" varset="chip">
+ <bitfield name="CP_OPCODE_ERROR" pos="0" type="boolean"/>
+ <bitfield name="CP_UCODE_ERROR" pos="1" type="boolean"/>
+ <bitfield name="CP_HW_FAULT_ERROR" pos="2" type="boolean"/>
+ <bitfield name="CP_REGISTER_PROTECTION_ERROR" pos="4" type="boolean"/>
+ <bitfield name="CP_AHB_ERROR" pos="5" type="boolean"/>
+ <bitfield name="CP_VSD_PARITY_ERROR" pos="6" type="boolean"/>
+ <bitfield name="CP_ILLEGAL_INSTR_ERROR" pos="7" type="boolean"/>
+ <bitfield name="CP_OPCODE_ERROR_LPAC" pos="8" type="boolean" variants="A7XX-"/>
+ <bitfield name="CP_UCODE_ERROR_LPAC" pos="9" type="boolean" variants="A7XX-"/>
+ <bitfield name="CP_HW_FAULT_ERROR_LPAC" pos="10" type="boolean" variants="A7XX-"/>
+ <bitfield name="CP_REGISTER_PROTECTION_ERROR_LPAC" pos="11" type="boolean" variants="A7XX-"/>
+ <bitfield name="CP_ILLEGAL_INSTR_ERROR_LPAC" pos="12" type="boolean" variants="A7XX-"/>
+ <bitfield name="CP_OPCODE_ERROR_BV" pos="13" type="boolean" variants="A7XX-"/>
+ <bitfield name="CP_UCODE_ERROR_BV" pos="14" type="boolean" variants="A7XX-"/>
+ <bitfield name="CP_HW_FAULT_ERROR_BV" pos="15" type="boolean" variants="A7XX-"/>
+ <bitfield name="CP_REGISTER_PROTECTION_ERROR_BV" pos="16" type="boolean" variants="A7XX-"/>
+ <bitfield name="CP_ILLEGAL_INSTR_ERROR_BV" pos="17" type="boolean" variants="A7XX-"/>
+ </bitset>
+
+ <reg64 offset="0x0800" name="CP_RB_BASE"/>
+ <reg32 offset="0x0802" name="CP_RB_CNTL"/>
+ <reg64 offset="0x0804" name="CP_RB_RPTR_ADDR"/>
+ <reg32 offset="0x0806" name="CP_RB_RPTR"/>
+ <reg32 offset="0x0807" name="CP_RB_WPTR"/>
+ <reg32 offset="0x0808" name="CP_SQE_CNTL"/>
+ <reg32 offset="0x0812" name="CP_CP2GMU_STATUS">
+ <bitfield name="IFPC" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0821" name="CP_HW_FAULT"/>
+ <reg32 offset="0x0823" name="CP_INTERRUPT_STATUS" type="A6XX_CP_INT"/>
+ <reg32 offset="0x0824" name="CP_PROTECT_STATUS"/>
+ <reg32 offset="0x0825" name="CP_STATUS_1"/>
+ <reg64 offset="0x0830" name="CP_SQE_INSTR_BASE"/>
+ <reg32 offset="0x0840" name="CP_MISC_CNTL"/>
+ <reg32 offset="0x0844" name="CP_APRIV_CNTL">
+ <!-- Crashdumper writes -->
+ <bitfield pos="6" name="CDWRITE" type="boolean"/>
+ <!-- Crashdumper reads -->
+ <bitfield pos="5" name="CDREAD" type="boolean"/>
+
+ <!-- 4 is unknown -->
+
+ <!-- RPTR shadow writes -->
+ <bitfield pos="3" name="RBRPWB" type="boolean"/>
+ <!-- Memory accesses from PM4 packets in the ringbuffer -->
+ <bitfield pos="2" name="RBPRIVLEVEL" type="boolean"/>
+ <!-- Ringbuffer reads -->
+ <bitfield pos="1" name="RBFETCH" type="boolean"/>
+ <!-- Instruction cache fetches -->
+ <bitfield pos="0" name="ICACHE" type="boolean"/>
+ </reg32>
+ <!-- Preemptions taking longer than this threshold increment PERF_CP_LONG_PREEMPTIONS: -->
+ <reg32 offset="0x08C0" name="CP_PREEMPT_THRESHOLD"/>
+ <!-- all the threshold values seem to be in units of quad-dwords: -->
+ <reg32 offset="0x08C1" name="CP_ROQ_THRESHOLDS_1">
+ <doc>
+ b0..7 identifies where MRB data starts (and RB data ends)
+ b8.15 identifies where VSD data starts (and MRB data ends)
+ b16..23 identifies where IB1 data starts (and RB data ends)
+ b24..31 identifies where IB2 data starts (and IB1 data ends)
+ </doc>
+ <bitfield name="MRB_START" low="0" high="7" shr="2"/>
+ <bitfield name="VSD_START" low="8" high="15" shr="2"/>
+ <bitfield name="IB1_START" low="16" high="23" shr="2"/>
+ <bitfield name="IB2_START" low="24" high="31" shr="2"/>
+ </reg32>
+ <reg32 offset="0x08C2" name="CP_ROQ_THRESHOLDS_2">
+ <doc>
+ low bits identify where CP_SET_DRAW_STATE stateobj
+ processing starts (and IB2 data ends). I'm guessing
+ b8 is part of this since (from downstream kgsl):
+
+ /* ROQ sizes are twice as big on a640/a680 than on a630 */
+ if (adreno_is_a640(adreno_dev) || adreno_is_a680(adreno_dev)) {
+ kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
+ kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362C);
+ } ...
+ </doc>
+ <bitfield name="SDS_START" low="0" high="8" shr="2"/>
+ <!-- total ROQ size: -->
+ <bitfield name="ROQ_SIZE" low="16" high="31" shr="2"/>
+ </reg32>
+ <reg32 offset="0x08C3" name="CP_MEM_POOL_SIZE"/>
+ <reg32 offset="0x0841" name="CP_CHICKEN_DBG"/>
+ <reg32 offset="0x0842" name="CP_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
+ <reg32 offset="0x0843" name="CP_DBG_ECO_CNTL"/>
+ <reg32 offset="0x084F" name="CP_PROTECT_CNTL">
+ <bitfield pos="3" name="LAST_SPAN_INF_RANGE" type="boolean"/>
+ <bitfield pos="1" name="ACCESS_FAULT_ON_VIOL_EN" type="boolean"/>
+ <bitfield pos="0" name="ACCESS_PROT_EN" type="boolean"/>
+ </reg32>
+
+ <array offset="0x0883" name="CP_SCRATCH" stride="1" length="8">
+ <reg32 offset="0x0" name="REG" type="uint"/>
+ </array>
+ <array offset="0x0850" name="CP_PROTECT" stride="1" length="32">
+ <reg32 offset="0x0" name="REG" type="a6x_cp_protect"/>
+ </array>
+
+ <reg32 offset="0x08A0" name="CP_CONTEXT_SWITCH_CNTL"/>
+ <reg64 offset="0x08A1" name="CP_CONTEXT_SWITCH_SMMU_INFO"/>
+ <reg64 offset="0x08A3" name="CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR"/>
+ <reg64 offset="0x08A5" name="CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR"/>
+ <reg64 offset="0x08A7" name="CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR"/>
+ <reg32 offset="0x08ab" name="CP_CONTEXT_SWITCH_LEVEL_STATUS" variants="A7XX-"/>
+ <array offset="0x08D0" name="CP_PERFCTR_CP_SEL" stride="1" length="14"/>
+ <array offset="0x08e0" name="CP_BV_PERFCTR_CP_SEL" stride="1" length="7" variants="A7XX-"/>
+ <reg64 offset="0x0900" name="CP_CRASH_SCRIPT_BASE"/>
+ <reg32 offset="0x0902" name="CP_CRASH_DUMP_CNTL"/>
+ <reg32 offset="0x0903" name="CP_CRASH_DUMP_STATUS"/>
+ <reg32 offset="0x0908" name="CP_SQE_STAT_ADDR"/>
+ <reg32 offset="0x0909" name="CP_SQE_STAT_DATA"/>
+ <reg32 offset="0x090A" name="CP_DRAW_STATE_ADDR"/>
+ <reg32 offset="0x090B" name="CP_DRAW_STATE_DATA"/>
+ <reg32 offset="0x090C" name="CP_ROQ_DBG_ADDR"/>
+ <reg32 offset="0x090D" name="CP_ROQ_DBG_DATA"/>
+ <reg32 offset="0x090E" name="CP_MEM_POOL_DBG_ADDR"/>
+ <reg32 offset="0x090F" name="CP_MEM_POOL_DBG_DATA"/>
+ <reg32 offset="0x0910" name="CP_SQE_UCODE_DBG_ADDR"/>
+ <reg32 offset="0x0911" name="CP_SQE_UCODE_DBG_DATA"/>
+ <reg64 offset="0x0928" name="CP_IB1_BASE"/>
+ <reg32 offset="0x092A" name="CP_IB1_REM_SIZE"/>
+ <reg64 offset="0x092B" name="CP_IB2_BASE"/>
+ <reg32 offset="0x092D" name="CP_IB2_REM_SIZE"/>
+ <!-- SDS == CP_SET_DRAW_STATE: -->
+ <reg64 offset="0x092e" name="CP_SDS_BASE"/>
+ <reg32 offset="0x0930" name="CP_SDS_REM_SIZE"/>
+ <!-- MRB == MEM_READ_ADDR/$addr in SQE firmware -->
+ <reg64 offset="0x0931" name="CP_MRB_BASE"/>
+ <reg32 offset="0x0933" name="CP_MRB_REM_SIZE"/>
+ <!--
+ VSD == Visibility Stream Decode
+ This is used by CP to read the draw stream and skip empty draws
+ -->
+ <reg64 offset="0x0934" name="CP_VSD_BASE"/>
+
+ <bitset name="a6xx_roq_stat" inline="yes">
+ <bitfield name="RPTR" low="0" high="9"/>
+ <bitfield name="WPTR" low="16" high="25"/>
+ </bitset>
+ <reg32 offset="0x0939" name="CP_ROQ_RB_STAT" type="a6xx_roq_stat"/>
+ <reg32 offset="0x093a" name="CP_ROQ_IB1_STAT" type="a6xx_roq_stat"/>
+ <reg32 offset="0x093b" name="CP_ROQ_IB2_STAT" type="a6xx_roq_stat"/>
+ <reg32 offset="0x093c" name="CP_ROQ_SDS_STAT" type="a6xx_roq_stat"/>
+ <reg32 offset="0x093d" name="CP_ROQ_MRB_STAT" type="a6xx_roq_stat"/>
+ <reg32 offset="0x093e" name="CP_ROQ_VSD_STAT" type="a6xx_roq_stat"/>
+
+ <reg32 offset="0x0943" name="CP_IB1_DWORDS"/>
+ <reg32 offset="0x0944" name="CP_IB2_DWORDS"/>
+ <reg32 offset="0x0945" name="CP_SDS_DWORDS"/>
+ <reg32 offset="0x0946" name="CP_MRB_DWORDS"/>
+ <reg32 offset="0x0947" name="CP_VSD_DWORDS"/>
+
+ <reg32 offset="0x0948" name="CP_ROQ_AVAIL_RB">
+ <doc>number of remaining dwords incl current dword being consumed?</doc>
+ <bitfield name="REM" low="16" high="31"/>
+ </reg32>
+ <reg32 offset="0x0949" name="CP_ROQ_AVAIL_IB1">
+ <doc>number of remaining dwords incl current dword being consumed?</doc>
+ <bitfield name="REM" low="16" high="31"/>
+ </reg32>
+ <reg32 offset="0x094a" name="CP_ROQ_AVAIL_IB2">
+ <doc>number of remaining dwords incl current dword being consumed?</doc>
+ <bitfield name="REM" low="16" high="31"/>
+ </reg32>
+ <reg32 offset="0x094b" name="CP_ROQ_AVAIL_SDS">
+ <doc>number of remaining dwords incl current dword being consumed?</doc>
+ <bitfield name="REM" low="16" high="31"/>
+ </reg32>
+ <reg32 offset="0x094c" name="CP_ROQ_AVAIL_MRB">
+ <doc>number of dwords that have already been read but haven't been consumed by $addr</doc>
+ <bitfield name="REM" low="16" high="31"/>
+ </reg32>
+ <reg32 offset="0x094d" name="CP_ROQ_AVAIL_VSD">
+ <doc>number of remaining dwords incl current dword being consumed?</doc>
+ <bitfield name="REM" low="16" high="31"/>
+ </reg32>
+
+ <bitset name="a7xx_aperture_cntl" inline="yes">
+ <bitfield name="PIPE" low="12" high="13" type="a7xx_pipe"/>
+ <bitfield name="CLUSTER" low="8" high="10" type="a7xx_cluster"/>
+ <bitfield name="CONTEXT" low="4" high="5"/>
+ </bitset>
+ <reg64 offset="0x0980" name="CP_ALWAYS_ON_COUNTER"/>
+ <reg32 offset="0x098D" name="CP_AHB_CNTL"/>
+ <reg32 offset="0x0A00" name="CP_APERTURE_CNTL_HOST" variants="A6XX"/>
+ <reg32 offset="0x0A00" name="CP_APERTURE_CNTL_HOST" type="a7xx_aperture_cntl" variants="A7XX-"/>
+ <reg32 offset="0x0A03" name="CP_APERTURE_CNTL_CD" variants="A6XX"/>
+ <reg32 offset="0x0A03" name="CP_APERTURE_CNTL_CD" type="a7xx_aperture_cntl" variants="A7XX-"/>
+
+ <reg32 offset="0x0a61" name="CP_BV_PROTECT_STATUS" variants="A7XX-"/>
+ <reg32 offset="0x0a64" name="CP_BV_HW_FAULT" variants="A7XX-"/>
+ <reg32 offset="0x0a81" name="CP_BV_DRAW_STATE_ADDR" variants="A7XX-"/>
+ <reg32 offset="0x0a82" name="CP_BV_DRAW_STATE_DATA" variants="A7XX-"/>
+ <reg32 offset="0x0a83" name="CP_BV_ROQ_DBG_ADDR" variants="A7XX-"/>
+ <reg32 offset="0x0a84" name="CP_BV_ROQ_DBG_DATA" variants="A7XX-"/>
+ <reg32 offset="0x0a85" name="CP_BV_SQE_UCODE_DBG_ADDR" variants="A7XX-"/>
+ <reg32 offset="0x0a86" name="CP_BV_SQE_UCODE_DBG_DATA" variants="A7XX-"/>
+ <reg32 offset="0x0a87" name="CP_BV_SQE_STAT_ADDR" variants="A7XX-"/>
+ <reg32 offset="0x0a88" name="CP_BV_SQE_STAT_DATA" variants="A7XX-"/>
+ <reg32 offset="0x0a96" name="CP_BV_MEM_POOL_DBG_ADDR" variants="A7XX-"/>
+ <reg32 offset="0x0a97" name="CP_BV_MEM_POOL_DBG_DATA" variants="A7XX-"/>
+ <reg64 offset="0x0a98" name="CP_BV_RB_RPTR_ADDR" variants="A7XX-"/>
+
+ <reg32 offset="0x0a9a" name="CP_RESOURCE_TBL_DBG_ADDR" variants="A7XX-"/>
+ <reg32 offset="0x0a9b" name="CP_RESOURCE_TBL_DBG_DATA" variants="A7XX-"/>
+ <reg32 offset="0x0ad0" name="CP_BV_APRIV_CNTL" variants="A7XX-"/>
+ <reg32 offset="0x0ada" name="CP_BV_CHICKEN_DBG" variants="A7XX-"/>
+
+ <reg32 offset="0x0b0a" name="CP_LPAC_DRAW_STATE_ADDR" variants="A7XX-"/>
+ <reg32 offset="0x0b0b" name="CP_LPAC_DRAW_STATE_DATA" variants="A7XX-"/>
+ <reg32 offset="0x0b0c" name="CP_LPAC_ROQ_DBG_ADDR" variants="A7XX-"/>
+ <reg32 offset="0x0b27" name="CP_SQE_AC_UCODE_DBG_ADDR" variants="A7XX-"/>
+ <reg32 offset="0x0b28" name="CP_SQE_AC_UCODE_DBG_DATA" variants="A7XX-"/>
+ <reg32 offset="0x0b29" name="CP_SQE_AC_STAT_ADDR" variants="A7XX-"/>
+ <reg32 offset="0x0b2a" name="CP_SQE_AC_STAT_DATA" variants="A7XX-"/>
+
+ <reg32 offset="0x0b31" name="CP_LPAC_APRIV_CNTL" variants="A7XX-"/>
+ <reg32 offset="0x0B34" name="CP_LPAC_PROG_FIFO_SIZE"/>
+ <reg32 offset="0x0b35" name="CP_LPAC_ROQ_DBG_DATA" variants="A7XX-"/>
+ <reg32 offset="0x0b36" name="CP_LPAC_FIFO_DBG_DATA" variants="A7XX-"/>
+ <reg32 offset="0x0b40" name="CP_LPAC_FIFO_DBG_ADDR" variants="A7XX-"/>
+ <reg32 offset="0x0b81" name="CP_LPAC_SQE_CNTL"/>
+ <reg64 offset="0x0b82" name="CP_LPAC_SQE_INSTR_BASE"/>
+
+ <reg64 offset="0x0b70" name="CP_AQE_INSTR_BASE_0" variants="A7XX-"/>
+ <reg64 offset="0x0b72" name="CP_AQE_INSTR_BASE_1" variants="A7XX-"/>
+ <reg32 offset="0x0b78" name="CP_AQE_APRIV_CNTL" variants="A7XX-"/>
+
+ <reg32 offset="0x0ba8" name="CP_AQE_ROQ_DBG_ADDR_0" variants="A7XX-"/>
+ <reg32 offset="0x0ba9" name="CP_AQE_ROQ_DBG_ADDR_1" variants="A7XX-"/>
+ <reg32 offset="0x0bac" name="CP_AQE_ROQ_DBG_DATA_0" variants="A7XX-"/>
+ <reg32 offset="0x0bad" name="CP_AQE_ROQ_DBG_DATA_1" variants="A7XX-"/>
+ <reg32 offset="0x0bb0" name="CP_AQE_UCODE_DBG_ADDR_0" variants="A7XX-"/>
+ <reg32 offset="0x0bb1" name="CP_AQE_UCODE_DBG_ADDR_1" variants="A7XX-"/>
+ <reg32 offset="0x0bb4" name="CP_AQE_UCODE_DBG_DATA_0" variants="A7XX-"/>
+ <reg32 offset="0x0bb5" name="CP_AQE_UCODE_DBG_DATA_1" variants="A7XX-"/>
+ <reg32 offset="0x0bb8" name="CP_AQE_STAT_ADDR_0" variants="A7XX-"/>
+ <reg32 offset="0x0bb9" name="CP_AQE_STAT_ADDR_1" variants="A7XX-"/>
+ <reg32 offset="0x0bbc" name="CP_AQE_STAT_DATA_0" variants="A7XX-"/>
+ <reg32 offset="0x0bbd" name="CP_AQE_STAT_DATA_1" variants="A7XX-"/>
+
+ <reg32 offset="0x0C01" name="VSC_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
+ <reg32 offset="0x0018" name="RBBM_GPR0_CNTL"/>
+ <reg32 offset="0x0201" name="RBBM_INT_0_STATUS" type="A6XX_RBBM_INT_0_MASK"/>
+ <reg32 offset="0x0210" name="RBBM_STATUS">
+ <bitfield pos="23" name="GPU_BUSY_IGN_AHB" type="boolean"/>
+ <bitfield pos="22" name="GPU_BUSY_IGN_AHB_CP" type="boolean"/>
+ <bitfield pos="21" name="HLSQ_BUSY" type="boolean"/>
+ <bitfield pos="20" name="VSC_BUSY" type="boolean"/>
+ <bitfield pos="19" name="TPL1_BUSY" type="boolean"/>
+ <bitfield pos="18" name="SP_BUSY" type="boolean"/>
+ <bitfield pos="17" name="UCHE_BUSY" type="boolean"/>
+ <bitfield pos="16" name="VPC_BUSY" type="boolean"/>
+ <bitfield pos="15" name="VFD_BUSY" type="boolean"/>
+ <bitfield pos="14" name="TESS_BUSY" type="boolean"/>
+ <bitfield pos="13" name="PC_VSD_BUSY" type="boolean"/>
+ <bitfield pos="12" name="PC_DCALL_BUSY" type="boolean"/>
+ <bitfield pos="11" name="COM_DCOM_BUSY" type="boolean"/>
+ <bitfield pos="10" name="LRZ_BUSY" type="boolean"/>
+ <bitfield pos="9" name="A2D_BUSY" type="boolean"/>
+ <bitfield pos="8" name="CCU_BUSY" type="boolean"/>
+ <bitfield pos="7" name="RB_BUSY" type="boolean"/>
+ <bitfield pos="6" name="RAS_BUSY" type="boolean"/>
+ <bitfield pos="5" name="TSE_BUSY" type="boolean"/>
+ <bitfield pos="4" name="VBIF_BUSY" type="boolean"/>
+ <bitfield pos="3" name="GFX_DBGC_BUSY" type="boolean"/>
+ <bitfield pos="2" name="CP_BUSY" type="boolean"/>
+ <bitfield pos="1" name="CP_AHB_BUSY_CP_MASTER" type="boolean"/>
+ <bitfield pos="0" name="CP_AHB_BUSY_CX_MASTER" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0211" name="RBBM_STATUS1"/>
+ <reg32 offset="0x0212" name="RBBM_STATUS2"/>
+ <reg32 offset="0x0213" name="RBBM_STATUS3">
+ <bitfield pos="24" name="SMMU_STALLED_ON_FAULT" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0215" name="RBBM_VBIF_GX_RESET_STATUS"/>
+
+ <reg32 offset="0x0260" name="RBBM_CLOCK_MODE_CP" variants="A7XX-"/>
+ <reg32 offset="0x0284" name="RBBM_CLOCK_MODE_BV_LRZ" variants="A7XX-"/>
+ <reg32 offset="0x0285" name="RBBM_CLOCK_MODE_BV_GRAS" variants="A7XX-"/>
+ <reg32 offset="0x0286" name="RBBM_CLOCK_MODE2_GRAS" variants="A7XX-"/>
+ <reg32 offset="0x0287" name="RBBM_CLOCK_MODE_BV_VFD" variants="A7XX-"/>
+ <reg32 offset="0x0288" name="RBBM_CLOCK_MODE_BV_GPC" variants="A7XX-"/>
+
+ <reg32 offset="0x02c0" name="RBBM_SW_FUSE_INT_STATUS" variants="A7XX-"/>
+ <reg32 offset="0x02c1" name="RBBM_SW_FUSE_INT_MASK" variants="A7XX-"/>
+
+ <array offset="0x0400" name="RBBM_PERFCTR_CP" stride="2" length="14" variants="A6XX"/>
+ <array offset="0x041c" name="RBBM_PERFCTR_RBBM" stride="2" length="4" variants="A6XX"/>
+ <array offset="0x0424" name="RBBM_PERFCTR_PC" stride="2" length="8" variants="A6XX"/>
+ <array offset="0x0434" name="RBBM_PERFCTR_VFD" stride="2" length="8" variants="A6XX"/>
+ <array offset="0x0444" name="RBBM_PERFCTR_HLSQ" stride="2" length="6" variants="A6XX"/>
+ <array offset="0x0450" name="RBBM_PERFCTR_VPC" stride="2" length="6" variants="A6XX"/>
+ <array offset="0x045c" name="RBBM_PERFCTR_CCU" stride="2" length="5" variants="A6XX"/>
+ <array offset="0x0466" name="RBBM_PERFCTR_TSE" stride="2" length="4" variants="A6XX"/>
+ <array offset="0x046e" name="RBBM_PERFCTR_RAS" stride="2" length="4" variants="A6XX"/>
+ <array offset="0x0476" name="RBBM_PERFCTR_UCHE" stride="2" length="12" variants="A6XX"/>
+ <array offset="0x048e" name="RBBM_PERFCTR_TP" stride="2" length="12" variants="A6XX"/>
+ <array offset="0x04a6" name="RBBM_PERFCTR_SP" stride="2" length="24" variants="A6XX"/>
+ <array offset="0x04d6" name="RBBM_PERFCTR_RB" stride="2" length="8" variants="A6XX"/>
+ <array offset="0x04e6" name="RBBM_PERFCTR_VSC" stride="2" length="2" variants="A6XX"/>
+ <array offset="0x04ea" name="RBBM_PERFCTR_LRZ" stride="2" length="4" variants="A6XX"/>
+ <array offset="0x04f2" name="RBBM_PERFCTR_CMP" stride="2" length="4" variants="A6XX"/>
+
+ <array offset="0x0300" name="RBBM_PERFCTR_CP" stride="2" length="14" variants="A7XX-"/>
+ <array offset="0x031c" name="RBBM_PERFCTR_RBBM" stride="2" length="4" variants="A7XX-"/>
+ <array offset="0x0324" name="RBBM_PERFCTR_PC" stride="2" length="8" variants="A7XX-"/>
+ <array offset="0x0334" name="RBBM_PERFCTR_VFD" stride="2" length="8" variants="A7XX-"/>
+ <array offset="0x0344" name="RBBM_PERFCTR_HLSQ" stride="2" length="6" variants="A7XX-"/>
+ <array offset="0x0350" name="RBBM_PERFCTR_VPC" stride="2" length="6" variants="A7XX-"/>
+ <array offset="0x035c" name="RBBM_PERFCTR_CCU" stride="2" length="5" variants="A7XX-"/>
+ <array offset="0x0366" name="RBBM_PERFCTR_TSE" stride="2" length="4" variants="A7XX-"/>
+ <array offset="0x036e" name="RBBM_PERFCTR_RAS" stride="2" length="4" variants="A7XX-"/>
+ <array offset="0x0376" name="RBBM_PERFCTR_UCHE" stride="2" length="12" variants="A7XX-"/>
+ <array offset="0x038e" name="RBBM_PERFCTR_TP" stride="2" length="12" variants="A7XX-"/>
+ <array offset="0x03a6" name="RBBM_PERFCTR_SP" stride="2" length="24" variants="A7XX-"/>
+ <array offset="0x03d6" name="RBBM_PERFCTR_RB" stride="2" length="8" variants="A7XX-"/>
+ <array offset="0x03e6" name="RBBM_PERFCTR_VSC" stride="2" length="2" variants="A7XX-"/>
+ <array offset="0x03ea" name="RBBM_PERFCTR_LRZ" stride="2" length="4" variants="A7XX-"/>
+ <array offset="0x03f2" name="RBBM_PERFCTR_CMP" stride="2" length="4" variants="A7XX-"/>
+ <array offset="0x03fa" name="RBBM_PERFCTR_UFC" stride="2" length="4" variants="A7XX-"/>
+ <array offset="0x0410" name="RBBM_PERFCTR2_HLSQ" stride="2" length="6" variants="A7XX-"/>
+ <array offset="0x041c" name="RBBM_PERFCTR2_CP" stride="2" length="7" variants="A7XX-"/>
+ <array offset="0x042a" name="RBBM_PERFCTR2_SP" stride="2" length="12" variants="A7XX-"/>
+ <array offset="0x0442" name="RBBM_PERFCTR2_TP" stride="2" length="6" variants="A7XX-"/>
+ <array offset="0x044e" name="RBBM_PERFCTR2_UFC" stride="2" length="2" variants="A7XX-"/>
+ <array offset="0x0460" name="RBBM_PERFCTR_BV_PC" stride="2" length="8" variants="A7XX-"/>
+ <array offset="0x0470" name="RBBM_PERFCTR_BV_VFD" stride="2" length="8" variants="A7XX-"/>
+ <array offset="0x0480" name="RBBM_PERFCTR_BV_VPC" stride="2" length="6" variants="A7XX-"/>
+ <array offset="0x048c" name="RBBM_PERFCTR_BV_TSE" stride="2" length="4" variants="A7XX-"/>
+ <array offset="0x0494" name="RBBM_PERFCTR_BV_RAS" stride="2" length="4" variants="A7XX-"/>
+ <array offset="0x049c" name="RBBM_PERFCTR_BV_LRZ" stride="2" length="4" variants="A7XX-"/>
+
+ <reg32 offset="0x0500" name="RBBM_PERFCTR_CNTL"/>
+ <reg32 offset="0x0501" name="RBBM_PERFCTR_LOAD_CMD0"/>
+ <reg32 offset="0x0502" name="RBBM_PERFCTR_LOAD_CMD1"/>
+ <reg32 offset="0x0503" name="RBBM_PERFCTR_LOAD_CMD2"/>
+ <reg32 offset="0x0504" name="RBBM_PERFCTR_LOAD_CMD3"/>
+ <reg32 offset="0x0505" name="RBBM_PERFCTR_LOAD_VALUE_LO"/>
+ <reg32 offset="0x0506" name="RBBM_PERFCTR_LOAD_VALUE_HI"/>
+ <array offset="0x0507" name="RBBM_PERFCTR_RBBM_SEL" stride="1" length="4"/>
+ <reg32 offset="0x050B" name="RBBM_PERFCTR_GPU_BUSY_MASKED"/>
+ <reg32 offset="0x050e" name="RBBM_PERFCTR_SRAM_INIT_CMD"/>
+ <reg32 offset="0x050f" name="RBBM_PERFCTR_SRAM_INIT_STATUS"/>
+ <reg32 offset="0x0533" name="RBBM_ISDB_CNT"/>
+ <reg32 offset="0x0534" name="RBBM_NC_MODE_CNTL" variants="A7XX-"/>
+ <reg32 offset="0x0535" name="RBBM_SNAPSHOT_STATUS" variants="A7XX-"/>
+
+ <!---
+ This block of registers aren't tied to perf counters. They
+ count various geometry stats, for example number of
+ vertices in, number of primnitives assembled etc.
+ -->
+
+ <reg32 offset="0x0540" name="RBBM_PRIMCTR_0_LO"/> <!-- vs vertices in -->
+ <reg32 offset="0x0541" name="RBBM_PRIMCTR_0_HI"/>
+ <reg32 offset="0x0542" name="RBBM_PRIMCTR_1_LO"/> <!-- vs primitives out -->
+ <reg32 offset="0x0543" name="RBBM_PRIMCTR_1_HI"/>
+ <reg32 offset="0x0544" name="RBBM_PRIMCTR_2_LO"/> <!-- hs vertices in -->
+ <reg32 offset="0x0545" name="RBBM_PRIMCTR_2_HI"/>
+ <reg32 offset="0x0546" name="RBBM_PRIMCTR_3_LO"/> <!-- hs patches out -->
+ <reg32 offset="0x0547" name="RBBM_PRIMCTR_3_HI"/>
+ <reg32 offset="0x0548" name="RBBM_PRIMCTR_4_LO"/> <!-- dss vertices in -->
+ <reg32 offset="0x0549" name="RBBM_PRIMCTR_4_HI"/>
+ <reg32 offset="0x054a" name="RBBM_PRIMCTR_5_LO"/> <!-- ds primitives out -->
+ <reg32 offset="0x054b" name="RBBM_PRIMCTR_5_HI"/>
+ <reg32 offset="0x054c" name="RBBM_PRIMCTR_6_LO"/> <!-- gs primitives in -->
+ <reg32 offset="0x054d" name="RBBM_PRIMCTR_6_HI"/>
+ <reg32 offset="0x054e" name="RBBM_PRIMCTR_7_LO"/> <!-- gs primitives out -->
+ <reg32 offset="0x054f" name="RBBM_PRIMCTR_7_HI"/>
+ <reg32 offset="0x0550" name="RBBM_PRIMCTR_8_LO"/> <!-- gs primitives out -->
+ <reg32 offset="0x0551" name="RBBM_PRIMCTR_8_HI"/>
+ <reg32 offset="0x0552" name="RBBM_PRIMCTR_9_LO"/> <!-- raster primitives in -->
+ <reg32 offset="0x0553" name="RBBM_PRIMCTR_9_HI"/>
+ <reg32 offset="0x0554" name="RBBM_PRIMCTR_10_LO"/>
+ <reg32 offset="0x0555" name="RBBM_PRIMCTR_10_HI"/>
+
+ <reg32 offset="0xF400" name="RBBM_SECVID_TRUST_CNTL"/>
+ <reg64 offset="0xF800" name="RBBM_SECVID_TSB_TRUSTED_BASE"/>
+ <reg32 offset="0xF802" name="RBBM_SECVID_TSB_TRUSTED_SIZE"/>
+ <reg32 offset="0xF803" name="RBBM_SECVID_TSB_CNTL"/>
+ <reg32 offset="0xF810" name="RBBM_SECVID_TSB_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
+ <reg64 offset="0xfc00" name="RBBM_SECVID_TSB_STATUS" variants="A7XX-"/>
+ <reg32 offset="0x00010" name="RBBM_VBIF_CLIENT_QOS_CNTL"/>
+ <reg32 offset="0x00011" name="RBBM_GBIF_CLIENT_QOS_CNTL"/>
+ <reg32 offset="0x00016" name="RBBM_GBIF_HALT"/>
+ <reg32 offset="0x00017" name="RBBM_GBIF_HALT_ACK"/>
+ <reg32 offset="0x0001c" name="RBBM_WAIT_FOR_GPU_IDLE_CMD">
+ <bitfield pos="0" name="WAIT_GPU_IDLE" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0x00016" name="RBBM_GBIF_HALT" variants="A7XX-"/>
+ <reg32 offset="0x00017" name="RBBM_GBIF_HALT_ACK" variants="A7XX-"/>
+ <reg32 offset="0x0001f" name="RBBM_INTERFACE_HANG_INT_CNTL"/>
+ <reg32 offset="0x00037" name="RBBM_INT_CLEAR_CMD" type="A6XX_RBBM_INT_0_MASK"/>
+ <reg32 offset="0x00038" name="RBBM_INT_0_MASK" type="A6XX_RBBM_INT_0_MASK"/>
+ <reg32 offset="0x0003a" name="RBBM_INT_2_MASK" variants="A7XX-"/>
+ <reg32 offset="0x00042" name="RBBM_SP_HYST_CNT"/>
+ <reg32 offset="0x00043" name="RBBM_SW_RESET_CMD"/>
+ <reg32 offset="0x00044" name="RBBM_RAC_THRESHOLD_CNT"/>
+ <reg32 offset="0x00045" name="RBBM_BLOCK_SW_RESET_CMD"/>
+ <reg32 offset="0x00046" name="RBBM_BLOCK_SW_RESET_CMD2"/>
+ <reg32 offset="0x000ad" name="RBBM_CLOCK_CNTL_GLOBAL" variants="A7XX-"/>
+ <reg32 offset="0x000ae" name="RBBM_CLOCK_CNTL"/>
+ <reg32 offset="0x000b0" name="RBBM_CLOCK_CNTL_SP0"/>
+ <reg32 offset="0x000b1" name="RBBM_CLOCK_CNTL_SP1"/>
+ <reg32 offset="0x000b2" name="RBBM_CLOCK_CNTL_SP2"/>
+ <reg32 offset="0x000b3" name="RBBM_CLOCK_CNTL_SP3"/>
+ <reg32 offset="0x000b4" name="RBBM_CLOCK_CNTL2_SP0"/>
+ <reg32 offset="0x000b5" name="RBBM_CLOCK_CNTL2_SP1"/>
+ <reg32 offset="0x000b6" name="RBBM_CLOCK_CNTL2_SP2"/>
+ <reg32 offset="0x000b7" name="RBBM_CLOCK_CNTL2_SP3"/>
+ <reg32 offset="0x000b8" name="RBBM_CLOCK_DELAY_SP0"/>
+ <reg32 offset="0x000b9" name="RBBM_CLOCK_DELAY_SP1"/>
+ <reg32 offset="0x000ba" name="RBBM_CLOCK_DELAY_SP2"/>
+ <reg32 offset="0x000bb" name="RBBM_CLOCK_DELAY_SP3"/>
+ <reg32 offset="0x000bc" name="RBBM_CLOCK_HYST_SP0"/>
+ <reg32 offset="0x000bd" name="RBBM_CLOCK_HYST_SP1"/>
+ <reg32 offset="0x000be" name="RBBM_CLOCK_HYST_SP2"/>
+ <reg32 offset="0x000bf" name="RBBM_CLOCK_HYST_SP3"/>
+ <reg32 offset="0x000c0" name="RBBM_CLOCK_CNTL_TP0"/>
+ <reg32 offset="0x000c1" name="RBBM_CLOCK_CNTL_TP1"/>
+ <reg32 offset="0x000c2" name="RBBM_CLOCK_CNTL_TP2"/>
+ <reg32 offset="0x000c3" name="RBBM_CLOCK_CNTL_TP3"/>
+ <reg32 offset="0x000c4" name="RBBM_CLOCK_CNTL2_TP0"/>
+ <reg32 offset="0x000c5" name="RBBM_CLOCK_CNTL2_TP1"/>
+ <reg32 offset="0x000c6" name="RBBM_CLOCK_CNTL2_TP2"/>
+ <reg32 offset="0x000c7" name="RBBM_CLOCK_CNTL2_TP3"/>
+ <reg32 offset="0x000c8" name="RBBM_CLOCK_CNTL3_TP0"/>
+ <reg32 offset="0x000c9" name="RBBM_CLOCK_CNTL3_TP1"/>
+ <reg32 offset="0x000ca" name="RBBM_CLOCK_CNTL3_TP2"/>
+ <reg32 offset="0x000cb" name="RBBM_CLOCK_CNTL3_TP3"/>
+ <reg32 offset="0x000cc" name="RBBM_CLOCK_CNTL4_TP0"/>
+ <reg32 offset="0x000cd" name="RBBM_CLOCK_CNTL4_TP1"/>
+ <reg32 offset="0x000ce" name="RBBM_CLOCK_CNTL4_TP2"/>
+ <reg32 offset="0x000cf" name="RBBM_CLOCK_CNTL4_TP3"/>
+ <reg32 offset="0x000d0" name="RBBM_CLOCK_DELAY_TP0"/>
+ <reg32 offset="0x000d1" name="RBBM_CLOCK_DELAY_TP1"/>
+ <reg32 offset="0x000d2" name="RBBM_CLOCK_DELAY_TP2"/>
+ <reg32 offset="0x000d3" name="RBBM_CLOCK_DELAY_TP3"/>
+ <reg32 offset="0x000d4" name="RBBM_CLOCK_DELAY2_TP0"/>
+ <reg32 offset="0x000d5" name="RBBM_CLOCK_DELAY2_TP1"/>
+ <reg32 offset="0x000d6" name="RBBM_CLOCK_DELAY2_TP2"/>
+ <reg32 offset="0x000d7" name="RBBM_CLOCK_DELAY2_TP3"/>
+ <reg32 offset="0x000d8" name="RBBM_CLOCK_DELAY3_TP0"/>
+ <reg32 offset="0x000d9" name="RBBM_CLOCK_DELAY3_TP1"/>
+ <reg32 offset="0x000da" name="RBBM_CLOCK_DELAY3_TP2"/>
+ <reg32 offset="0x000db" name="RBBM_CLOCK_DELAY3_TP3"/>
+ <reg32 offset="0x000dc" name="RBBM_CLOCK_DELAY4_TP0"/>
+ <reg32 offset="0x000dd" name="RBBM_CLOCK_DELAY4_TP1"/>
+ <reg32 offset="0x000de" name="RBBM_CLOCK_DELAY4_TP2"/>
+ <reg32 offset="0x000df" name="RBBM_CLOCK_DELAY4_TP3"/>
+ <reg32 offset="0x000e0" name="RBBM_CLOCK_HYST_TP0"/>
+ <reg32 offset="0x000e1" name="RBBM_CLOCK_HYST_TP1"/>
+ <reg32 offset="0x000e2" name="RBBM_CLOCK_HYST_TP2"/>
+ <reg32 offset="0x000e3" name="RBBM_CLOCK_HYST_TP3"/>
+ <reg32 offset="0x000e4" name="RBBM_CLOCK_HYST2_TP0"/>
+ <reg32 offset="0x000e5" name="RBBM_CLOCK_HYST2_TP1"/>
+ <reg32 offset="0x000e6" name="RBBM_CLOCK_HYST2_TP2"/>
+ <reg32 offset="0x000e7" name="RBBM_CLOCK_HYST2_TP3"/>
+ <reg32 offset="0x000e8" name="RBBM_CLOCK_HYST3_TP0"/>
+ <reg32 offset="0x000e9" name="RBBM_CLOCK_HYST3_TP1"/>
+ <reg32 offset="0x000ea" name="RBBM_CLOCK_HYST3_TP2"/>
+ <reg32 offset="0x000eb" name="RBBM_CLOCK_HYST3_TP3"/>
+ <reg32 offset="0x000ec" name="RBBM_CLOCK_HYST4_TP0"/>
+ <reg32 offset="0x000ed" name="RBBM_CLOCK_HYST4_TP1"/>
+ <reg32 offset="0x000ee" name="RBBM_CLOCK_HYST4_TP2"/>
+ <reg32 offset="0x000ef" name="RBBM_CLOCK_HYST4_TP3"/>
+ <reg32 offset="0x000f0" name="RBBM_CLOCK_CNTL_RB0"/>
+ <reg32 offset="0x000f1" name="RBBM_CLOCK_CNTL_RB1"/>
+ <reg32 offset="0x000f2" name="RBBM_CLOCK_CNTL_RB2"/>
+ <reg32 offset="0x000f3" name="RBBM_CLOCK_CNTL_RB3"/>
+ <reg32 offset="0x000f4" name="RBBM_CLOCK_CNTL2_RB0"/>
+ <reg32 offset="0x000f5" name="RBBM_CLOCK_CNTL2_RB1"/>
+ <reg32 offset="0x000f6" name="RBBM_CLOCK_CNTL2_RB2"/>
+ <reg32 offset="0x000f7" name="RBBM_CLOCK_CNTL2_RB3"/>
+ <reg32 offset="0x000f8" name="RBBM_CLOCK_CNTL_CCU0"/>
+ <reg32 offset="0x000f9" name="RBBM_CLOCK_CNTL_CCU1"/>
+ <reg32 offset="0x000fa" name="RBBM_CLOCK_CNTL_CCU2"/>
+ <reg32 offset="0x000fb" name="RBBM_CLOCK_CNTL_CCU3"/>
+ <reg32 offset="0x00100" name="RBBM_CLOCK_HYST_RB_CCU0"/>
+ <reg32 offset="0x00101" name="RBBM_CLOCK_HYST_RB_CCU1"/>
+ <reg32 offset="0x00102" name="RBBM_CLOCK_HYST_RB_CCU2"/>
+ <reg32 offset="0x00103" name="RBBM_CLOCK_HYST_RB_CCU3"/>
+ <reg32 offset="0x00104" name="RBBM_CLOCK_CNTL_RAC"/>
+ <reg32 offset="0x00105" name="RBBM_CLOCK_CNTL2_RAC"/>
+ <reg32 offset="0x00106" name="RBBM_CLOCK_DELAY_RAC"/>
+ <reg32 offset="0x00107" name="RBBM_CLOCK_HYST_RAC"/>
+ <reg32 offset="0x00108" name="RBBM_CLOCK_CNTL_TSE_RAS_RBBM"/>
+ <reg32 offset="0x00109" name="RBBM_CLOCK_DELAY_TSE_RAS_RBBM"/>
+ <reg32 offset="0x0010a" name="RBBM_CLOCK_HYST_TSE_RAS_RBBM"/>
+ <reg32 offset="0x0010b" name="RBBM_CLOCK_CNTL_UCHE"/>
+ <reg32 offset="0x0010c" name="RBBM_CLOCK_CNTL2_UCHE"/>
+ <reg32 offset="0x0010d" name="RBBM_CLOCK_CNTL3_UCHE"/>
+ <reg32 offset="0x0010e" name="RBBM_CLOCK_CNTL4_UCHE"/>
+ <reg32 offset="0x0010f" name="RBBM_CLOCK_DELAY_UCHE"/>
+ <reg32 offset="0x00110" name="RBBM_CLOCK_HYST_UCHE"/>
+ <reg32 offset="0x00111" name="RBBM_CLOCK_MODE_VFD"/>
+ <reg32 offset="0x00112" name="RBBM_CLOCK_DELAY_VFD"/>
+ <reg32 offset="0x00113" name="RBBM_CLOCK_HYST_VFD"/>
+ <reg32 offset="0x00114" name="RBBM_CLOCK_MODE_GPC"/>
+ <reg32 offset="0x00115" name="RBBM_CLOCK_DELAY_GPC"/>
+ <reg32 offset="0x00116" name="RBBM_CLOCK_HYST_GPC"/>
+ <reg32 offset="0x00117" name="RBBM_CLOCK_DELAY_HLSQ_2"/>
+ <reg32 offset="0x00118" name="RBBM_CLOCK_CNTL_GMU_GX"/>
+ <reg32 offset="0x00119" name="RBBM_CLOCK_DELAY_GMU_GX"/>
+ <reg32 offset="0x0011a" name="RBBM_CLOCK_HYST_GMU_GX"/>
+ <reg32 offset="0x0011b" name="RBBM_CLOCK_MODE_HLSQ"/>
+ <reg32 offset="0x0011c" name="RBBM_CLOCK_DELAY_HLSQ"/>
+ <reg32 offset="0x0011d" name="RBBM_CLOCK_HYST_HLSQ"/>
+ <reg32 offset="0x0011e" name="RBBM_CGC_GLOBAL_LOAD_CMD" variants="A7XX-"/>
+ <reg32 offset="0x0011f" name="RBBM_CGC_P2S_TRIG_CMD" variants="A7XX-"/>
+ <reg32 offset="0x00120" name="RBBM_CLOCK_CNTL_TEX_FCHE"/>
+ <reg32 offset="0x00121" name="RBBM_CLOCK_DELAY_TEX_FCHE"/>
+ <reg32 offset="0x00122" name="RBBM_CLOCK_HYST_TEX_FCHE"/>
+ <reg32 offset="0x00122" name="RBBM_CGC_P2S_STATUS" variants="A7XX-">
+ <bitfield name="TXDONE" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00123" name="RBBM_CLOCK_CNTL_FCHE"/>
+ <reg32 offset="0x00124" name="RBBM_CLOCK_DELAY_FCHE"/>
+ <reg32 offset="0x00125" name="RBBM_CLOCK_HYST_FCHE"/>
+ <reg32 offset="0x00126" name="RBBM_CLOCK_CNTL_MHUB"/>
+ <reg32 offset="0x00127" name="RBBM_CLOCK_DELAY_MHUB"/>
+ <reg32 offset="0x00128" name="RBBM_CLOCK_HYST_MHUB"/>
+ <reg32 offset="0x00129" name="RBBM_CLOCK_DELAY_GLC"/>
+ <reg32 offset="0x0012a" name="RBBM_CLOCK_HYST_GLC"/>
+ <reg32 offset="0x0012b" name="RBBM_CLOCK_CNTL_GLC"/>
+ <reg32 offset="0x0012f" name="RBBM_CLOCK_HYST2_VFD" variants="A7XX-"/>
+ <reg32 offset="0x005ff" name="RBBM_LPAC_GBIF_CLIENT_QOS_CNTL"/>
+
+ <reg32 offset="0x0600" name="DBGC_CFG_DBGBUS_SEL_A"/>
+ <reg32 offset="0x0601" name="DBGC_CFG_DBGBUS_SEL_B"/>
+ <reg32 offset="0x0602" name="DBGC_CFG_DBGBUS_SEL_C"/>
+ <reg32 offset="0x0603" name="DBGC_CFG_DBGBUS_SEL_D">
+ <bitfield high="7" low="0" name="PING_INDEX"/>
+ <bitfield high="15" low="8" name="PING_BLK_SEL"/>
+ </reg32>
+ <reg32 offset="0x0604" name="DBGC_CFG_DBGBUS_CNTLT">
+ <bitfield high="5" low="0" name="TRACEEN"/>
+ <bitfield high="14" low="12" name="GRANU"/>
+ <bitfield high="31" low="28" name="SEGT"/>
+ </reg32>
+ <reg32 offset="0x0605" name="DBGC_CFG_DBGBUS_CNTLM">
+ <bitfield high="27" low="24" name="ENABLE"/>
+ </reg32>
+ <reg32 offset="0x0608" name="DBGC_CFG_DBGBUS_IVTL_0"/>
+ <reg32 offset="0x0609" name="DBGC_CFG_DBGBUS_IVTL_1"/>
+ <reg32 offset="0x060a" name="DBGC_CFG_DBGBUS_IVTL_2"/>
+ <reg32 offset="0x060b" name="DBGC_CFG_DBGBUS_IVTL_3"/>
+ <reg32 offset="0x060c" name="DBGC_CFG_DBGBUS_MASKL_0"/>
+ <reg32 offset="0x060d" name="DBGC_CFG_DBGBUS_MASKL_1"/>
+ <reg32 offset="0x060e" name="DBGC_CFG_DBGBUS_MASKL_2"/>
+ <reg32 offset="0x060f" name="DBGC_CFG_DBGBUS_MASKL_3"/>
+ <reg32 offset="0x0610" name="DBGC_CFG_DBGBUS_BYTEL_0">
+ <bitfield high="3" low="0" name="BYTEL0"/>
+ <bitfield high="7" low="4" name="BYTEL1"/>
+ <bitfield high="11" low="8" name="BYTEL2"/>
+ <bitfield high="15" low="12" name="BYTEL3"/>
+ <bitfield high="19" low="16" name="BYTEL4"/>
+ <bitfield high="23" low="20" name="BYTEL5"/>
+ <bitfield high="27" low="24" name="BYTEL6"/>
+ <bitfield high="31" low="28" name="BYTEL7"/>
+ </reg32>
+ <reg32 offset="0x0611" name="DBGC_CFG_DBGBUS_BYTEL_1">
+ <bitfield high="3" low="0" name="BYTEL8"/>
+ <bitfield high="7" low="4" name="BYTEL9"/>
+ <bitfield high="11" low="8" name="BYTEL10"/>
+ <bitfield high="15" low="12" name="BYTEL11"/>
+ <bitfield high="19" low="16" name="BYTEL12"/>
+ <bitfield high="23" low="20" name="BYTEL13"/>
+ <bitfield high="27" low="24" name="BYTEL14"/>
+ <bitfield high="31" low="28" name="BYTEL15"/>
+ </reg32>
+ <reg32 offset="0x062f" name="DBGC_CFG_DBGBUS_TRACE_BUF1"/>
+ <reg32 offset="0x0630" name="DBGC_CFG_DBGBUS_TRACE_BUF2"/>
+ <array offset="0x0CD8" name="VSC_PERFCTR_VSC_SEL" stride="1" length="2"/>
+ <reg32 offset="0x0CD8" name="VSC_UNKNOWN_0CD8" variants="A7XX">
+ <doc>
+ Set to true when binning, isn't changed afterwards
+ </doc>
+ <bitfield name="BINNING" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xC800" name="HLSQ_DBG_AHB_READ_APERTURE"/>
+ <reg32 offset="0xD000" name="HLSQ_DBG_READ_SEL"/>
+ <reg32 offset="0x0E00" name="UCHE_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
+ <reg32 offset="0x0E01" name="UCHE_MODE_CNTL"/>
+ <reg64 offset="0x0E05" name="UCHE_WRITE_RANGE_MAX"/>
+ <reg64 offset="0x0E07" name="UCHE_WRITE_THRU_BASE"/>
+ <reg64 offset="0x0E09" name="UCHE_TRAP_BASE"/>
+ <reg64 offset="0x0E0B" name="UCHE_GMEM_RANGE_MIN"/>
+ <reg64 offset="0x0E0D" name="UCHE_GMEM_RANGE_MAX"/>
+ <reg32 offset="0x0E17" name="UCHE_CACHE_WAYS" usage="cmd"/>
+ <reg32 offset="0x0E18" name="UCHE_FILTER_CNTL"/>
+ <reg32 offset="0x0E19" name="UCHE_CLIENT_PF" usage="cmd">
+ <bitfield high="7" low="0" name="PERFSEL"/>
+ </reg32>
+ <array offset="0x0E1C" name="UCHE_PERFCTR_UCHE_SEL" stride="1" length="12"/>
+ <reg32 offset="0x0e3a" name="UCHE_GBIF_GX_CONFIG"/>
+ <reg32 offset="0x0e3c" name="UCHE_CMDQ_CONFIG"/>
+
+ <reg32 offset="0x3000" name="VBIF_VERSION"/>
+ <reg32 offset="0x3001" name="VBIF_CLKON">
+ <bitfield pos="1" name="FORCE_ON_TESTBUS" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x302A" name="VBIF_GATE_OFF_WRREQ_EN"/>
+ <reg32 offset="0x3080" name="VBIF_XIN_HALT_CTRL0"/>
+ <reg32 offset="0x3081" name="VBIF_XIN_HALT_CTRL1"/>
+ <reg32 offset="0x3084" name="VBIF_TEST_BUS_OUT_CTRL"/>
+ <reg32 offset="0x3085" name="VBIF_TEST_BUS1_CTRL0"/>
+ <reg32 offset="0x3086" name="VBIF_TEST_BUS1_CTRL1">
+ <bitfield low="0" high="3" name="DATA_SEL"/>
+ </reg32>
+ <reg32 offset="0x3087" name="VBIF_TEST_BUS2_CTRL0"/>
+ <reg32 offset="0x3088" name="VBIF_TEST_BUS2_CTRL1">
+ <bitfield low="0" high="8" name="DATA_SEL"/>
+ </reg32>
+ <reg32 offset="0x308c" name="VBIF_TEST_BUS_OUT"/>
+ <reg32 offset="0x30d0" name="VBIF_PERF_CNT_SEL0"/>
+ <reg32 offset="0x30d1" name="VBIF_PERF_CNT_SEL1"/>
+ <reg32 offset="0x30d2" name="VBIF_PERF_CNT_SEL2"/>
+ <reg32 offset="0x30d3" name="VBIF_PERF_CNT_SEL3"/>
+ <reg32 offset="0x30d8" name="VBIF_PERF_CNT_LOW0"/>
+ <reg32 offset="0x30d9" name="VBIF_PERF_CNT_LOW1"/>
+ <reg32 offset="0x30da" name="VBIF_PERF_CNT_LOW2"/>
+ <reg32 offset="0x30db" name="VBIF_PERF_CNT_LOW3"/>
+ <reg32 offset="0x30e0" name="VBIF_PERF_CNT_HIGH0"/>
+ <reg32 offset="0x30e1" name="VBIF_PERF_CNT_HIGH1"/>
+ <reg32 offset="0x30e2" name="VBIF_PERF_CNT_HIGH2"/>
+ <reg32 offset="0x30e3" name="VBIF_PERF_CNT_HIGH3"/>
+ <reg32 offset="0x3100" name="VBIF_PERF_PWR_CNT_EN0"/>
+ <reg32 offset="0x3101" name="VBIF_PERF_PWR_CNT_EN1"/>
+ <reg32 offset="0x3102" name="VBIF_PERF_PWR_CNT_EN2"/>
+ <reg32 offset="0x3110" name="VBIF_PERF_PWR_CNT_LOW0"/>
+ <reg32 offset="0x3111" name="VBIF_PERF_PWR_CNT_LOW1"/>
+ <reg32 offset="0x3112" name="VBIF_PERF_PWR_CNT_LOW2"/>
+ <reg32 offset="0x3118" name="VBIF_PERF_PWR_CNT_HIGH0"/>
+ <reg32 offset="0x3119" name="VBIF_PERF_PWR_CNT_HIGH1"/>
+ <reg32 offset="0x311a" name="VBIF_PERF_PWR_CNT_HIGH2"/>
+
+ <reg32 offset="0x3c01" name="GBIF_SCACHE_CNTL0"/>
+ <reg32 offset="0x3c02" name="GBIF_SCACHE_CNTL1"/>
+ <reg32 offset="0x3c03" name="GBIF_QSB_SIDE0"/>
+ <reg32 offset="0x3c04" name="GBIF_QSB_SIDE1"/>
+ <reg32 offset="0x3c05" name="GBIF_QSB_SIDE2"/>
+ <reg32 offset="0x3c06" name="GBIF_QSB_SIDE3"/>
+ <reg32 offset="0x3c45" name="GBIF_HALT"/>
+ <reg32 offset="0x3c46" name="GBIF_HALT_ACK"/>
+ <reg32 offset="0x3cc0" name="GBIF_PERF_PWR_CNT_EN"/>
+ <reg32 offset="0x3cc1" name="GBIF_PERF_PWR_CNT_CLR"/>
+ <reg32 offset="0x3cc2" name="GBIF_PERF_CNT_SEL"/>
+ <reg32 offset="0x3cc3" name="GBIF_PERF_PWR_CNT_SEL"/>
+ <reg32 offset="0x3cc4" name="GBIF_PERF_CNT_LOW0"/>
+ <reg32 offset="0x3cc5" name="GBIF_PERF_CNT_LOW1"/>
+ <reg32 offset="0x3cc6" name="GBIF_PERF_CNT_LOW2"/>
+ <reg32 offset="0x3cc7" name="GBIF_PERF_CNT_LOW3"/>
+ <reg32 offset="0x3cc8" name="GBIF_PERF_CNT_HIGH0"/>
+ <reg32 offset="0x3cc9" name="GBIF_PERF_CNT_HIGH1"/>
+ <reg32 offset="0x3cca" name="GBIF_PERF_CNT_HIGH2"/>
+ <reg32 offset="0x3ccb" name="GBIF_PERF_CNT_HIGH3"/>
+ <reg32 offset="0x3ccc" name="GBIF_PWR_CNT_LOW0"/>
+ <reg32 offset="0x3ccd" name="GBIF_PWR_CNT_LOW1"/>
+ <reg32 offset="0x3cce" name="GBIF_PWR_CNT_LOW2"/>
+ <reg32 offset="0x3ccf" name="GBIF_PWR_CNT_HIGH0"/>
+ <reg32 offset="0x3cd0" name="GBIF_PWR_CNT_HIGH1"/>
+ <reg32 offset="0x3cd1" name="GBIF_PWR_CNT_HIGH2"/>
+
+ <reg32 offset="0x0c00" name="VSC_DBG_ECO_CNTL"/>
+ <reg32 offset="0x0c02" name="VSC_BIN_SIZE" usage="rp_blit">
+ <bitfield name="WIDTH" low="0" high="7" shr="5" type="uint"/>
+ <bitfield name="HEIGHT" low="8" high="16" shr="4" type="uint"/>
+ </reg32>
+ <reg64 offset="0x0c03" name="VSC_DRAW_STRM_SIZE_ADDRESS" type="waddress" usage="cmd"/>
+ <reg32 offset="0x0c06" name="VSC_BIN_COUNT" usage="rp_blit">
+ <bitfield name="NX" low="1" high="10" type="uint"/>
+ <bitfield name="NY" low="11" high="20" type="uint"/>
+ </reg32>
+ <array offset="0x0c10" name="VSC_PIPE_CONFIG" stride="1" length="32" usage="rp_blit">
+ <reg32 offset="0x0" name="REG">
+ <doc>
+ Configures the mapping between VSC_PIPE buffer and
+ bin, X/Y specify the bin index in the horiz/vert
+ direction (0,0 is upper left, 0,1 is leftmost bin
+ on second row, and so on). W/H specify the number
+ of bins assigned to this VSC_PIPE in the horiz/vert
+ dimension.
+ </doc>
+ <bitfield name="X" low="0" high="9" type="uint"/>
+ <bitfield name="Y" low="10" high="19" type="uint"/>
+ <bitfield name="W" low="20" high="25" type="uint"/>
+ <bitfield name="H" low="26" high="31" type="uint"/>
+ </reg32>
+ </array>
+ <!--
+ HW binning primitive & draw streams, which enable draws and primitives
+ within a draw to be skipped in the main tile pass. See:
+ https://github.com/freedreno/freedreno/wiki/Visibility-Stream-Format
+
+ Compared to a5xx and earlier, we just program the address of the first
+ stream and hw adds (pipe_num * VSC_*_STRM_PITCH)
+
+ LIMIT is set to PITCH - 64, to make room for a bit of overflow
+ -->
+ <reg64 offset="0x0c30" name="VSC_PRIM_STRM_ADDRESS" type="waddress" usage="cmd"/>
+ <reg32 offset="0x0c32" name="VSC_PRIM_STRM_PITCH" usage="cmd"/>
+ <reg32 offset="0x0c33" name="VSC_PRIM_STRM_LIMIT" usage="cmd"/>
+ <reg64 offset="0x0c34" name="VSC_DRAW_STRM_ADDRESS" type="waddress" usage="cmd"/>
+ <reg32 offset="0x0c36" name="VSC_DRAW_STRM_PITCH" usage="cmd"/>
+ <reg32 offset="0x0c37" name="VSC_DRAW_STRM_LIMIT" usage="cmd"/>
+
+ <array offset="0x0c38" name="VSC_STATE" stride="1" length="32" usage="rp_blit">
+ <doc>
+ Seems to be a bitmap of which tiles mapped to the VSC
+ pipe contain geometry.
+
+ I suppose we can connect a maximum of 32 tiles to a
+ single VSC pipe.
+ </doc>
+ <reg32 offset="0x0" name="REG"/>
+ </array>
+
+ <array offset="0x0c58" name="VSC_PRIM_STRM_SIZE" stride="1" length="32" variants="A6XX" usage="rp_blit">
+ <doc>
+ Has the size of data written to corresponding VSC_PRIM_STRM
+ buffer.
+ </doc>
+ <reg32 offset="0x0" name="REG"/>
+ </array>
+
+ <array offset="0x0c78" name="VSC_DRAW_STRM_SIZE" stride="1" length="32" variants="A6XX" usage="rp_blit">
+ <doc>
+ Has the size of data written to corresponding VSC pipe, ie.
+ same thing that is written out to VSC_DRAW_STRM_SIZE_ADDRESS_LO/HI
+ </doc>
+ <reg32 offset="0x0" name="REG"/>
+ </array>
+
+ <reg32 offset="0x0d08" name="VSC_UNKNOWN_0D08" variants="A7XX-" usage="rp_blit"/>
+
+ <reg32 offset="0x0E10" name="UCHE_UNKNOWN_0E10" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x0E11" name="UCHE_UNKNOWN_0E11" variants="A7XX-" usage="cmd"/>
+ <!-- always 0x03200000 ? -->
+ <reg32 offset="0x0e12" name="UCHE_UNKNOWN_0E12" usage="cmd"/>
+
+ <!-- adreno_reg_xy has 15 bits per coordinate, but a6xx registers only have 14 -->
+ <bitset name="a6xx_reg_xy" inline="yes">
+ <bitfield name="X" low="0" high="13" type="uint"/>
+ <bitfield name="Y" low="16" high="29" type="uint"/>
+ </bitset>
+
+ <reg32 offset="0x8000" name="GRAS_CL_CNTL" usage="rp_blit">
+ <bitfield name="CLIP_DISABLE" pos="0" type="boolean"/>
+ <bitfield name="ZNEAR_CLIP_DISABLE" pos="1" type="boolean"/>
+ <bitfield name="ZFAR_CLIP_DISABLE" pos="2" type="boolean"/>
+ <bitfield name="Z_CLAMP_ENABLE" pos="5" type="boolean"/>
+ <!-- controls near z clip behavior (set for vulkan) -->
+ <bitfield name="ZERO_GB_SCALE_Z" pos="6" type="boolean"/>
+ <!-- guess based on a3xx and meaning of bits 8 and 9
+ if the guess is right then this is related to point sprite clipping -->
+ <bitfield name="VP_CLIP_CODE_IGNORE" pos="7" type="boolean"/>
+ <bitfield name="VP_XFORM_DISABLE" pos="8" type="boolean"/>
+ <bitfield name="PERSP_DIVISION_DISABLE" pos="9" type="boolean"/>
+ </reg32>
+
+ <bitset name="a6xx_gras_xs_cl_cntl" inline="yes">
+ <bitfield name="CLIP_MASK" low="0" high="7"/>
+ <bitfield name="CULL_MASK" low="8" high="15"/>
+ </bitset>
+ <reg32 offset="0x8001" name="GRAS_VS_CL_CNTL" type="a6xx_gras_xs_cl_cntl" usage="rp_blit"/>
+ <reg32 offset="0x8002" name="GRAS_DS_CL_CNTL" type="a6xx_gras_xs_cl_cntl" usage="rp_blit"/>
+ <reg32 offset="0x8003" name="GRAS_GS_CL_CNTL" type="a6xx_gras_xs_cl_cntl" usage="rp_blit"/>
+ <reg32 offset="0x8004" name="GRAS_MAX_LAYER_INDEX" low="0" high="10" type="uint" usage="rp_blit"/>
+
+ <reg32 offset="0x8005" name="GRAS_CNTL" usage="rp_blit">
+ <!-- see also RB_RENDER_CONTROL0 -->
+ <bitfield name="IJ_PERSP_PIXEL" pos="0" type="boolean"/>
+ <bitfield name="IJ_PERSP_CENTROID" pos="1" type="boolean"/>
+ <bitfield name="IJ_PERSP_SAMPLE" pos="2" type="boolean"/>
+ <bitfield name="IJ_LINEAR_PIXEL" pos="3" type="boolean"/>
+ <bitfield name="IJ_LINEAR_CENTROID" pos="4" type="boolean"/>
+ <bitfield name="IJ_LINEAR_SAMPLE" pos="5" type="boolean"/>
+ <bitfield name="COORD_MASK" low="6" high="9" type="hex"/>
+ <bitfield name="UNK10" pos="10" type="boolean" variants="A7XX-"/>
+ <bitfield name="UNK11" pos="11" type="boolean" variants="A7XX-"/>
+ </reg32>
+ <reg32 offset="0x8006" name="GRAS_CL_GUARDBAND_CLIP_ADJ" usage="rp_blit">
+ <bitfield name="HORZ" low="0" high="8" type="uint"/>
+ <bitfield name="VERT" low="10" high="18" type="uint"/>
+ </reg32>
+
+ <!-- Something connected to depth-stencil attachment size -->
+ <reg32 offset="0x8007" name="GRAS_UNKNOWN_8007" variants="A7XX-" usage="rp_blit"/>
+
+ <reg32 offset="0x8008" name="GRAS_UNKNOWN_8008" variants="A7XX-" usage="cmd"/>
+
+ <reg32 offset="0x8009" name="GRAS_UNKNOWN_8009" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x800a" name="GRAS_UNKNOWN_800A" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x800b" name="GRAS_UNKNOWN_800B" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x800c" name="GRAS_UNKNOWN_800C" variants="A7XX-" usage="cmd"/>
+
+ <!-- <reg32 offset="0x80f0" name="GRAS_UNKNOWN_80F0" type="a6xx_reg_xy"/> -->
+
+ <!-- 0x8006-0x800f invalid -->
+ <array offset="0x8010" name="GRAS_CL_VPORT" stride="6" length="16" usage="rp_blit">
+ <reg32 offset="0" name="XOFFSET" type="float"/>
+ <reg32 offset="1" name="XSCALE" type="float"/>
+ <reg32 offset="2" name="YOFFSET" type="float"/>
+ <reg32 offset="3" name="YSCALE" type="float"/>
+ <reg32 offset="4" name="ZOFFSET" type="float"/>
+ <reg32 offset="5" name="ZSCALE" type="float"/>
+ </array>
+ <array offset="0x8070" name="GRAS_CL_Z_CLAMP" stride="2" length="16" usage="rp_blit">
+ <reg32 offset="0" name="MIN" type="float"/>
+ <reg32 offset="1" name="MAX" type="float"/>
+ </array>
+
+ <reg32 offset="0x8090" name="GRAS_SU_CNTL" usage="rp_blit">
+ <bitfield name="CULL_FRONT" pos="0" type="boolean"/>
+ <bitfield name="CULL_BACK" pos="1" type="boolean"/>
+ <bitfield name="FRONT_CW" pos="2" type="boolean"/>
+ <bitfield name="LINEHALFWIDTH" low="3" high="10" radix="2" type="fixed"/>
+ <bitfield name="POLY_OFFSET" pos="11" type="boolean"/>
+ <bitfield name="UNK12" pos="12"/>
+ <bitfield name="LINE_MODE" pos="13" type="a5xx_line_mode"/>
+ <bitfield name="UNK15" low="15" high="16"/>
+ <!--
+ On gen1 only MULTIVIEW_ENABLE exists. On gen3 we have
+ the ability to add the view index to either the RT array
+ index or the viewport index, and it seems that
+ MULTIVIEW_ENABLE doesn't do anything, instead we need to
+ set at least one of RENDERTARGETINDEXINCR or
+ VIEWPORTINDEXINCR to enable multiview. The blob still
+ sets MULTIVIEW_ENABLE regardless.
+ TODO: what about gen2 (a640)?
+ -->
+ <bitfield name="MULTIVIEW_ENABLE" pos="17" type="boolean"/>
+ <bitfield name="RENDERTARGETINDEXINCR" pos="18" type="boolean"/>
+ <bitfield name="VIEWPORTINDEXINCR" pos="19" type="boolean"/>
+ <bitfield name="UNK20" low="20" high="22"/>
+ </reg32>
+ <reg32 offset="0x8091" name="GRAS_SU_POINT_MINMAX" usage="rp_blit">
+ <bitfield name="MIN" low="0" high="15" type="ufixed" radix="4"/>
+ <bitfield name="MAX" low="16" high="31" type="ufixed" radix="4"/>
+ </reg32>
+ <reg32 offset="0x8092" name="GRAS_SU_POINT_SIZE" low="0" high="15" type="fixed" radix="4" usage="rp_blit"/>
+ <!-- 0x8093 invalid -->
+ <reg32 offset="0x8094" name="GRAS_SU_DEPTH_PLANE_CNTL" usage="rp_blit">
+ <bitfield name="Z_MODE" low="0" high="1" type="a6xx_ztest_mode"/>
+ </reg32>
+ <reg32 offset="0x8095" name="GRAS_SU_POLY_OFFSET_SCALE" type="float" usage="rp_blit"/>
+ <reg32 offset="0x8096" name="GRAS_SU_POLY_OFFSET_OFFSET" type="float" usage="rp_blit"/>
+ <reg32 offset="0x8097" name="GRAS_SU_POLY_OFFSET_OFFSET_CLAMP" type="float" usage="rp_blit"/>
+ <!-- duplicates RB_DEPTH_BUFFER_INFO: -->
+ <reg32 offset="0x8098" name="GRAS_SU_DEPTH_BUFFER_INFO" usage="rp_blit">
+ <bitfield name="DEPTH_FORMAT" low="0" high="2" type="a6xx_depth_format"/>
+ <bitfield name="UNK3" pos="3"/>
+ </reg32>
+
+ <reg32 offset="0x8099" name="GRAS_SU_CONSERVATIVE_RAS_CNTL" usage="cmd">
+ <bitfield name="CONSERVATIVERASEN" pos="0" type="boolean"/>
+ <bitfield name="SHIFTAMOUNT" low="1" high="2"/>
+ <bitfield name="INNERCONSERVATIVERASEN" pos="3" type="boolean"/>
+ <bitfield name="UNK4" low="4" high="5"/>
+ </reg32>
+ <reg32 offset="0x809a" name="GRAS_SU_PATH_RENDERING_CNTL">
+ <bitfield name="UNK0" pos="0" type="boolean"/>
+ <bitfield name="LINELENGTHEN" pos="1" type="boolean"/>
+ </reg32>
+
+ <bitset name="a6xx_gras_layer_cntl" inline="yes">
+ <bitfield name="WRITES_LAYER" pos="0" type="boolean"/>
+ <bitfield name="WRITES_VIEW" pos="1" type="boolean"/>
+ </bitset>
+ <reg32 offset="0x809b" name="GRAS_VS_LAYER_CNTL" type="a6xx_gras_layer_cntl" usage="rp_blit"/>
+ <reg32 offset="0x809c" name="GRAS_GS_LAYER_CNTL" type="a6xx_gras_layer_cntl" usage="rp_blit"/>
+ <reg32 offset="0x809d" name="GRAS_DS_LAYER_CNTL" type="a6xx_gras_layer_cntl" usage="rp_blit"/>
+ <!-- 0x809e/0x809f invalid -->
+
+ <enum name="a6xx_sequenced_thread_dist">
+ <value value="0x0" name="DIST_SCREEN_COORD"/>
+ <value value="0x1" name="DIST_ALL_TO_RB0"/>
+ </enum>
+
+ <enum name="a6xx_single_prim_mode">
+ <value value="0x0" name="NO_FLUSH"/>
+ <doc>
+ In addition to FLUSH_PER_OVERLAP, guarantee that UCHE
+ and CCU don't get out of sync when fetching the previous
+ value for the current pixel. With NO_FLUSH, there's the
+ possibility that the flags for the current pixel are
+ flushed before the data or vice-versa, leading to
+ texture fetches via UCHE getting out of sync values.
+ This mode should eliminate that. It's used in bypass
+ mode for coherent blending
+ (GL_KHR_blend_equation_advanced_coherent) as well as
+ non-coherent blending.
+ </doc>
+ <value value="0x1" name="FLUSH_PER_OVERLAP_AND_OVERWRITE"/>
+ <doc>
+ Invalidate UCHE and wait for any pending work to finish
+ if there was possibly an overlapping primitive prior to
+ the current one. This is similar to a combination of
+ GRAS_SC_CONTROL::INJECT_L2_INVALIDATE_EVENT and
+ WAIT_RB_IDLE_ALL_TRI on a3xx. It's used in GMEM mode for
+ coherent blending
+ (GL_KHR_blend_equation_advanced_coherent).
+ </doc>
+ <value value="0x3" name="FLUSH_PER_OVERLAP"/>
+ </enum>
+
+ <!-- this probably has the same meaning as a3xx GRAS_SC_CONTROL::RASTER_MODE -->
+ <enum name="a6xx_raster_mode">
+ <value value="0x0" name="TYPE_TILED"/>
+ <value value="0x1" name="TYPE_WRITER"/>
+ </enum>
+
+ <!-- I'm guessing this is the same as a3xx -->
+ <enum name="a6xx_raster_direction">
+ <value value="0x0" name="LR_TB"/>
+ <value value="0x1" name="RL_TB"/>
+ <value value="0x2" name="LR_BT"/>
+ <value value="0x3" name="RB_BT"/>
+ </enum>
+
+ <reg32 offset="0x80a0" name="GRAS_SC_CNTL" usage="rp_blit">
+ <bitfield name="CCUSINGLECACHELINESIZE" low="0" high="2"/>
+ <bitfield name="SINGLE_PRIM_MODE" low="3" high="4" type="a6xx_single_prim_mode"/>
+ <bitfield name="RASTER_MODE" pos="5" type="a6xx_raster_mode"/>
+ <bitfield name="RASTER_DIRECTION" low="6" high="7" type="a6xx_raster_direction"/>
+ <bitfield name="SEQUENCED_THREAD_DISTRIBUTION" pos="8" type="a6xx_sequenced_thread_dist"/>
+ <!-- CCUSINGLECACHELINESIZE is ignored unless bit 9 is set -->
+ <bitfield name="UNK9" pos="9" type="boolean"/>
+ <bitfield name="ROTATION" low="10" high="11" type="uint"/>
+ <bitfield name="EARLYVIZOUTEN" pos="12" type="boolean"/>
+ </reg32>
+
+ <enum name="a6xx_render_mode">
+ <value value="0x0" name="RENDERING_PASS"/>
+ <value value="0x1" name="BINNING_PASS"/>
+ </enum>
+
+ <enum name="a6xx_buffers_location">
+ <value value="0" name="BUFFERS_IN_GMEM"/>
+ <value value="3" name="BUFFERS_IN_SYSMEM"/>
+ </enum>
+
+ <reg32 offset="0x80a1" name="GRAS_BIN_CONTROL" usage="rp_blit">
+ <bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
+ <bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
+ <bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
+ <bitfield name="FORCE_LRZ_WRITE_DIS" pos="21" type="boolean"/>
+ <bitfield name="BUFFERS_LOCATION" low="22" high="23" type="a6xx_buffers_location" variants="A6XX"/>
+ <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26"/>
+ <bitfield name="UNK27" pos="27"/>
+ </reg32>
+
+ <reg32 offset="0x80a2" name="GRAS_RAS_MSAA_CNTL" usage="rp_blit">
+ <bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
+ <bitfield name="UNK2" pos="2"/>
+ <bitfield name="UNK3" pos="3"/>
+ </reg32>
+ <reg32 offset="0x80a3" name="GRAS_DEST_MSAA_CNTL" usage="rp_blit">
+ <bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
+ <bitfield name="MSAA_DISABLE" pos="2" type="boolean"/>
+ </reg32>
+
+ <bitset name="a6xx_sample_config" inline="yes">
+ <bitfield name="UNK0" pos="0"/>
+ <bitfield name="LOCATION_ENABLE" pos="1" type="boolean"/>
+ </bitset>
+
+ <bitset name="a6xx_sample_locations" inline="yes">
+ <bitfield name="SAMPLE_0_X" low="0" high="3" radix="4" type="fixed"/>
+ <bitfield name="SAMPLE_0_Y" low="4" high="7" radix="4" type="fixed"/>
+ <bitfield name="SAMPLE_1_X" low="8" high="11" radix="4" type="fixed"/>
+ <bitfield name="SAMPLE_1_Y" low="12" high="15" radix="4" type="fixed"/>
+ <bitfield name="SAMPLE_2_X" low="16" high="19" radix="4" type="fixed"/>
+ <bitfield name="SAMPLE_2_Y" low="20" high="23" radix="4" type="fixed"/>
+ <bitfield name="SAMPLE_3_X" low="24" high="27" radix="4" type="fixed"/>
+ <bitfield name="SAMPLE_3_Y" low="28" high="31" radix="4" type="fixed"/>
+ </bitset>
+
+ <reg32 offset="0x80a4" name="GRAS_SAMPLE_CONFIG" type="a6xx_sample_config" usage="rp_blit"/>
+ <reg32 offset="0x80a5" name="GRAS_SAMPLE_LOCATION_0" type="a6xx_sample_locations" usage="rp_blit"/>
+ <reg32 offset="0x80a6" name="GRAS_SAMPLE_LOCATION_1" type="a6xx_sample_locations" usage="rp_blit"/>
+
+ <reg32 offset="0x80a7" name="GRAS_UNKNOWN_80A7" variants="A7XX-" usage="cmd"/>
+
+ <!-- 0x80a7-0x80ae invalid -->
+ <reg32 offset="0x80af" name="GRAS_UNKNOWN_80AF" pos="0" usage="cmd"/>
+
+ <bitset name="a6xx_scissor_xy" inline="yes">
+ <bitfield name="X" low="0" high="15" type="uint"/>
+ <bitfield name="Y" low="16" high="31" type="uint"/>
+ </bitset>
+ <array offset="0x80b0" name="GRAS_SC_SCREEN_SCISSOR" stride="2" length="16" usage="rp_blit">
+ <reg32 offset="0" name="TL" type="a6xx_scissor_xy"/>
+ <reg32 offset="1" name="BR" type="a6xx_scissor_xy"/>
+ </array>
+ <array offset="0x80d0" name="GRAS_SC_VIEWPORT_SCISSOR" stride="2" length="16" usage="rp_blit">
+ <reg32 offset="0" name="TL" type="a6xx_scissor_xy"/>
+ <reg32 offset="1" name="BR" type="a6xx_scissor_xy"/>
+ </array>
+
+ <reg32 offset="0x80f0" name="GRAS_SC_WINDOW_SCISSOR_TL" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x80f1" name="GRAS_SC_WINDOW_SCISSOR_BR" type="a6xx_reg_xy" usage="rp_blit"/>
+
+ <!-- 0x80f4 - 0x80fa are used for VK_KHR_fragment_shading_rate -->
+ <reg64 offset="0x80f4" name="GRAS_UNKNOWN_80F4" variants="A7XX-" usage="cmd"/>
+ <reg64 offset="0x80f5" name="GRAS_UNKNOWN_80F5" variants="A7XX-" usage="cmd"/>
+ <reg64 offset="0x80f6" name="GRAS_UNKNOWN_80F6" variants="A7XX-" usage="cmd"/>
+ <reg64 offset="0x80f8" name="GRAS_UNKNOWN_80F8" variants="A7XX-" usage="cmd"/>
+ <reg64 offset="0x80f9" name="GRAS_UNKNOWN_80F9" variants="A7XX-" usage="cmd"/>
+ <reg64 offset="0x80fa" name="GRAS_UNKNOWN_80FA" variants="A7XX-" usage="cmd"/>
+
+ <enum name="a6xx_lrz_dir_status">
+ <value value="0x1" name="LRZ_DIR_LE"/>
+ <value value="0x2" name="LRZ_DIR_GE"/>
+ <value value="0x3" name="LRZ_DIR_INVALID"/>
+ </enum>
+
+ <reg32 offset="0x8100" name="GRAS_LRZ_CNTL" usage="rp_blit">
+ <bitfield name="ENABLE" pos="0" type="boolean"/>
+ <doc>LRZ write also disabled for blend/etc.</doc>
+ <bitfield name="LRZ_WRITE" pos="1" type="boolean"/>
+ <doc>update MAX instead of MIN value, ie. GL_GREATER/GL_GEQUAL</doc>
+ <bitfield name="GREATER" pos="2" type="boolean"/>
+ <doc>
+ Clears the LRZ block being touched to:
+ - 0.0 if GREATER
+ - 1.0 if LESS
+ </doc>
+ <bitfield name="FC_ENABLE" pos="3" type="boolean"/>
+ <!-- set when depth-test + depth-write enabled -->
+ <bitfield name="Z_TEST_ENABLE" pos="4" type="boolean"/>
+ <bitfield name="Z_BOUNDS_ENABLE" pos="5" type="boolean"/>
+ <bitfield name="DIR" low="6" high="7" type="a6xx_lrz_dir_status"/>
+ <doc>
+ If DISABLE_ON_WRONG_DIR enabled - write new LRZ direction into
+ buffer, in case of mismatched direction writes 0 (disables LRZ).
+ </doc>
+ <bitfield name="DIR_WRITE" pos="8" type="boolean"/>
+ <doc>
+ Disable LRZ based on previous direction and the current one.
+ If DIR_WRITE is not enabled - there is no write to direction buffer.
+ </doc>
+ <bitfield name="DISABLE_ON_WRONG_DIR" pos="9" type="boolean"/>
+ <bitfield name="Z_FUNC" low="11" high="13" type="adreno_compare_func" variants="A7XX-"/>
+ </reg32>
+
+ <enum name="a6xx_fragcoord_sample_mode">
+ <value value="0" name="FRAGCOORD_CENTER"/>
+ <value value="3" name="FRAGCOORD_SAMPLE"/>
+ </enum>
+
+ <reg32 offset="0x8101" name="GRAS_LRZ_PS_INPUT_CNTL" low="0" high="2" usage="rp_blit">
+ <bitfield name="SAMPLEID" pos="0" type="boolean"/>
+ <bitfield name="FRAGCOORDSAMPLEMODE" low="1" high="2" type="a6xx_fragcoord_sample_mode"/>
+ </reg32>
+
+ <reg32 offset="0x8102" name="GRAS_LRZ_MRT_BUF_INFO_0" usage="rp_blit">
+ <bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
+ </reg32>
+ <reg64 offset="0x8103" name="GRAS_LRZ_BUFFER_BASE" align="256" type="waddress" usage="rp_blit"/>
+ <reg32 offset="0x8105" name="GRAS_LRZ_BUFFER_PITCH" usage="rp_blit">
+ <!-- TODO: fix the shr fields -->
+ <bitfield name="PITCH" low="0" high="7" shr="5" type="uint"/>
+ <bitfield name="ARRAY_PITCH" low="10" high="28" shr="4" type="uint"/>
+ </reg32>
+
+ <!--
+ The LRZ "fast clear" buffer is initialized to zero's by blob, and
+ read/written when GRAS_LRZ_CNTL.FC_ENABLE (b3) is set. It appears
+ to store 1b/block. It appears that '0' means block has original
+ depth clear value, and '1' means that the corresponding block in
+ LRZ has been modified. Ignoring alignment/padding, the size is
+ given by the formula:
+
+ // calculate LRZ size from depth size:
+ if (nr_samples == 4) {
+ width *= 2;
+ height *= 2;
+ } else if (nr_samples == 2) {
+ height *= 2;
+ }
+
+ lrz_width = div_round_up(width, 8);
+ lrz_heigh = div_round_up(height, 8);
+
+ // calculate # of blocks:
+ nblocksx = div_round_up(lrz_width, 16);
+ nblocksy = div_round_up(lrz_height, 4);
+
+ // fast-clear buffer is 1bit/block:
+ fc_sz = div_round_up(nblocksx * nblocksy, 8);
+
+ In practice the blob seems to switch off FC_ENABLE once the size
+ increases beyond 1 page. Not sure if that is an actual limit or
+ not.
+ -->
+ <reg64 offset="0x8106" name="GRAS_LRZ_FAST_CLEAR_BUFFER_BASE" align="64" type="waddress" usage="rp_blit"/>
+ <!-- 0x8108 invalid -->
+ <reg32 offset="0x8109" name="GRAS_SAMPLE_CNTL" usage="rp_blit">
+ <bitfield name="PER_SAMP_MODE" pos="0" type="boolean"/>
+ </reg32>
+ <!--
+ LRZ buffer represents a single array layer + mip level, and there is
+ a single buffer per depth image. Thus to reuse LRZ between renderpasses
+ it is necessary to track the depth view used in the past renderpass, which
+ GRAS_LRZ_DEPTH_VIEW is for.
+ GRAS_LRZ_CNTL checks if current value of GRAS_LRZ_DEPTH_VIEW is equal to
+ the value stored in the LRZ buffer, if not - LRZ is disabled.
+ -->
+ <reg32 offset="0x810a" name="GRAS_LRZ_DEPTH_VIEW" usage="cmd">
+ <bitfield name="BASE_LAYER" low="0" high="10" type="uint"/>
+ <bitfield name="LAYER_COUNT" low="16" high="26" type="uint"/>
+ <bitfield name="BASE_MIP_LEVEL" low="28" high="31" type="uint"/>
+ </reg32>
+
+ <reg32 offset="0x810b" name="GRAS_UNKNOWN_810B" variants="A7XX-" usage="cmd"/>
+
+ <!-- 0x810c-0x810f invalid -->
+
+ <reg32 offset="0x8110" name="GRAS_UNKNOWN_8110" low="0" high="1" usage="cmd"/>
+
+ <!-- A bit tentative but it's a color and it is followed by LRZ_CLEAR -->
+ <reg32 offset="0x8111" name="GRAS_LRZ_CLEAR_DEPTH_F32" type="float" variants="A7XX-"/>
+
+ <reg32 offset="0x8113" name="GRAS_LRZ_DEPTH_BUFFER_INFO" variants="A7XX-" usage="rp_blit"/>
+
+ <!-- Always written together and always equal 09510840 00000a62 -->
+ <reg32 offset="0x8120" name="GRAS_UNKNOWN_8120" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x8121" name="GRAS_UNKNOWN_8121" variants="A7XX-" usage="cmd"/>
+
+ <!-- 0x8112-0x83ff invalid -->
+
+ <enum name="a6xx_rotation">
+ <value value="0x0" name="ROTATE_0"/>
+ <value value="0x1" name="ROTATE_90"/>
+ <value value="0x2" name="ROTATE_180"/>
+ <value value="0x3" name="ROTATE_270"/>
+ <value value="0x4" name="ROTATE_HFLIP"/>
+ <value value="0x5" name="ROTATE_VFLIP"/>
+ </enum>
+
+ <bitset name="a6xx_2d_blit_cntl" inline="yes">
+ <bitfield name="ROTATE" low="0" high="2" type="a6xx_rotation"/>
+ <bitfield name="OVERWRITEEN" pos="3" type="boolean"/>
+ <bitfield name="UNK4" low="4" high="6"/>
+ <bitfield name="SOLID_COLOR" pos="7" type="boolean"/>
+ <bitfield name="COLOR_FORMAT" low="8" high="15" type="a6xx_format"/>
+ <bitfield name="SCISSOR" pos="16" type="boolean"/>
+ <bitfield name="UNK17" low="17" high="18"/>
+ <!-- required when blitting D24S8/D24X8 -->
+ <bitfield name="D24S8" pos="19" type="boolean"/>
+ <!-- some sort of channel mask, disabled channels are set to zero ? -->
+ <bitfield name="MASK" low="20" high="23"/>
+ <bitfield name="IFMT" low="24" high="28" type="a6xx_2d_ifmt"/>
+ <bitfield name="RASTER_MODE" pos="29" type="a6xx_raster_mode"/>
+ <bitfield name="UNK30" pos="30" type="boolean" variants="A7XX-"/>
+ </bitset>
+
+ <reg32 offset="0x8400" name="GRAS_2D_BLIT_CNTL" type="a6xx_2d_blit_cntl" usage="rp_blit"/>
+ <!-- note: the low 8 bits for src coords are valid, probably fixed point
+ it would be a bit weird though, since we subtract 1 from BR coords
+ apparently signed, gallium driver uses negative coords and it works?
+ -->
+ <reg32 offset="0x8401" name="GRAS_2D_SRC_TL_X" low="8" high="24" type="int" usage="rp_blit"/>
+ <reg32 offset="0x8402" name="GRAS_2D_SRC_BR_X" low="8" high="24" type="int" usage="rp_blit"/>
+ <reg32 offset="0x8403" name="GRAS_2D_SRC_TL_Y" low="8" high="24" type="int" usage="rp_blit"/>
+ <reg32 offset="0x8404" name="GRAS_2D_SRC_BR_Y" low="8" high="24" type="int" usage="rp_blit"/>
+ <reg32 offset="0x8405" name="GRAS_2D_DST_TL" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x8406" name="GRAS_2D_DST_BR" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x8407" name="GRAS_2D_UNKNOWN_8407" low="0" high="31"/>
+ <reg32 offset="0x8408" name="GRAS_2D_UNKNOWN_8408" low="0" high="31"/>
+ <reg32 offset="0x8409" name="GRAS_2D_UNKNOWN_8409" low="0" high="31"/>
+ <reg32 offset="0x840a" name="GRAS_2D_RESOLVE_CNTL_1" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x840b" name="GRAS_2D_RESOLVE_CNTL_2" type="a6xx_reg_xy" usage="rp_blit"/>
+ <!-- 0x840c-0x85ff invalid -->
+
+ <!-- always 0x880 ? (and 0 in a640/a650 traces?) -->
+ <reg32 offset="0x8600" name="GRAS_DBG_ECO_CNTL" usage="cmd">
+ <bitfield name="UNK7" pos="7" type="boolean"/>
+ <bitfield name="LRZCACHELOCKDIS" pos="11" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x8601" name="GRAS_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode"/>
+ <reg32 offset="0x8602" name="GRAS_NC_MODE_CNTL" variants="A7XX-"/>
+ <array offset="0x8610" name="GRAS_PERFCTR_TSE_SEL" stride="1" length="4"/>
+ <array offset="0x8614" name="GRAS_PERFCTR_RAS_SEL" stride="1" length="4"/>
+ <array offset="0x8618" name="GRAS_PERFCTR_LRZ_SEL" stride="1" length="4"/>
+
+ <!-- note 0x8620-0x87ff are not all invalid
+ (in particular, 0x8631/0x8632 have 0x3fff3fff mask and would be xy coords)
+ -->
+
+ <!-- same as GRAS_BIN_CONTROL, but without bit 27: -->
+ <reg32 offset="0x8800" name="RB_BIN_CONTROL" variants="A6XX" usage="rp_blit">
+ <bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
+ <bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
+ <bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
+ <bitfield name="FORCE_LRZ_WRITE_DIS" pos="21" type="boolean"/>
+ <bitfield name="BUFFERS_LOCATION" low="22" high="23" type="a6xx_buffers_location"/>
+ <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26"/>
+ </reg32>
+
+ <reg32 offset="0x8800" name="RB_BIN_CONTROL" variants="A7XX-" usage="rp_blit">
+ <bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
+ <bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
+ <bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
+ <bitfield name="FORCE_LRZ_WRITE_DIS" pos="21" type="boolean"/>
+ <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26"/>
+ </reg32>
+
+ <reg32 offset="0x8801" name="RB_RENDER_CNTL" variants="A6XX" usage="rp_blit">
+ <bitfield name="CCUSINGLECACHELINESIZE" low="3" high="5"/>
+ <bitfield name="EARLYVIZOUTEN" pos="6" type="boolean"/>
+ <!-- set during binning pass: -->
+ <bitfield name="BINNING" pos="7" type="boolean"/>
+ <bitfield name="UNK8" low="8" high="10"/>
+ <bitfield name="RASTER_MODE" pos="8" type="a6xx_raster_mode"/>
+ <bitfield name="RASTER_DIRECTION" low="9" high="10" type="a6xx_raster_direction"/>
+ <bitfield name="CONSERVATIVERASEN" pos="11" type="boolean"/>
+ <bitfield name="INNERCONSERVATIVERASEN" pos="12" type="boolean"/>
+ <!-- bit seems to be set whenever depth buffer enabled: -->
+ <bitfield name="FLAG_DEPTH" pos="14" type="boolean"/>
+ <!-- bitmask of MRTs using UBWC flag buffer: -->
+ <bitfield name="FLAG_MRTS" low="16" high="23"/>
+ </reg32>
+ <reg32 offset="0x8801" name="RB_RENDER_CNTL" variants="A7XX-" usage="rp_blit">
+ <bitfield name="EARLYVIZOUTEN" pos="6" type="boolean"/>
+ <!-- set during binning pass: -->
+ <bitfield name="BINNING" pos="7" type="boolean"/>
+ <bitfield name="RASTER_MODE" pos="8" type="a6xx_raster_mode"/>
+ <bitfield name="RASTER_DIRECTION" low="9" high="10" type="a6xx_raster_direction"/>
+ <bitfield name="CONSERVATIVERASEN" pos="11" type="boolean"/>
+ <bitfield name="INNERCONSERVATIVERASEN" pos="12" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x8116" name="GRAS_SU_RENDER_CNTL" variants="A7XX-" usage="rp_blit">
+ <bitfield name="BINNING" pos="7" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0x8802" name="RB_RAS_MSAA_CNTL" usage="rp_blit">
+ <bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
+ <bitfield name="UNK2" pos="2"/>
+ <bitfield name="UNK3" pos="3"/>
+ </reg32>
+ <reg32 offset="0x8803" name="RB_DEST_MSAA_CNTL" usage="rp_blit">
+ <bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
+ <bitfield name="MSAA_DISABLE" pos="2" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0x8804" name="RB_SAMPLE_CONFIG" type="a6xx_sample_config" usage="rp_blit"/>
+ <reg32 offset="0x8805" name="RB_SAMPLE_LOCATION_0" type="a6xx_sample_locations" usage="rp_blit"/>
+ <reg32 offset="0x8806" name="RB_SAMPLE_LOCATION_1" type="a6xx_sample_locations" usage="rp_blit"/>
+ <!-- 0x8807-0x8808 invalid -->
+ <!--
+ note: maybe not actually called RB_RENDER_CONTROLn (since RB_RENDER_CNTL
+ name comes from kernel and is probably right)
+ -->
+ <reg32 offset="0x8809" name="RB_RENDER_CONTROL0" usage="rp_blit">
+ <!-- see also GRAS_CNTL -->
+ <bitfield name="IJ_PERSP_PIXEL" pos="0" type="boolean"/>
+ <bitfield name="IJ_PERSP_CENTROID" pos="1" type="boolean"/>
+ <bitfield name="IJ_PERSP_SAMPLE" pos="2" type="boolean"/>
+ <bitfield name="IJ_LINEAR_PIXEL" pos="3" type="boolean"/>
+ <bitfield name="IJ_LINEAR_CENTROID" pos="4" type="boolean"/>
+ <bitfield name="IJ_LINEAR_SAMPLE" pos="5" type="boolean"/>
+ <bitfield name="COORD_MASK" low="6" high="9" type="hex"/>
+ <bitfield name="UNK10" pos="10" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x880a" name="RB_RENDER_CONTROL1" usage="rp_blit">
+ <!-- enable bits for various FS sysvalue regs: -->
+ <bitfield name="SAMPLEMASK" pos="0" type="boolean"/>
+ <bitfield name="POSTDEPTHCOVERAGE" pos="1" type="boolean"/>
+ <bitfield name="FACENESS" pos="2" type="boolean"/>
+ <bitfield name="SAMPLEID" pos="3" type="boolean"/>
+ <bitfield name="FRAGCOORDSAMPLEMODE" low="4" high="5" type="a6xx_fragcoord_sample_mode"/>
+ <bitfield name="CENTERRHW" pos="6" type="boolean"/>
+ <bitfield name="LINELENGTHEN" pos="7" type="boolean"/>
+ <bitfield name="FOVEATION" pos="8" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0x880b" name="RB_FS_OUTPUT_CNTL0" usage="rp_blit">
+ <bitfield name="DUAL_COLOR_IN_ENABLE" pos="0" type="boolean"/>
+ <bitfield name="FRAG_WRITES_Z" pos="1" type="boolean"/>
+ <bitfield name="FRAG_WRITES_SAMPMASK" pos="2" type="boolean"/>
+ <bitfield name="FRAG_WRITES_STENCILREF" pos="3" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x880c" name="RB_FS_OUTPUT_CNTL1" usage="rp_blit">
+ <bitfield name="MRT" low="0" high="3" type="uint"/>
+ </reg32>
+ <reg32 offset="0x880d" name="RB_RENDER_COMPONENTS" usage="rp_blit">
+ <bitfield name="RT0" low="0" high="3"/>
+ <bitfield name="RT1" low="4" high="7"/>
+ <bitfield name="RT2" low="8" high="11"/>
+ <bitfield name="RT3" low="12" high="15"/>
+ <bitfield name="RT4" low="16" high="19"/>
+ <bitfield name="RT5" low="20" high="23"/>
+ <bitfield name="RT6" low="24" high="27"/>
+ <bitfield name="RT7" low="28" high="31"/>
+ </reg32>
+ <reg32 offset="0x880e" name="RB_DITHER_CNTL" usage="cmd">
+ <bitfield name="DITHER_MODE_MRT0" low="0" high="1" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT1" low="2" high="3" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT2" low="4" high="5" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT3" low="6" high="7" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT4" low="8" high="9" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT5" low="10" high="11" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT6" low="12" high="13" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT7" low="14" high="15" type="adreno_rb_dither_mode"/>
+ </reg32>
+ <reg32 offset="0x880f" name="RB_SRGB_CNTL" usage="rp_blit">
+ <!-- Same as SP_SRGB_CNTL -->
+ <bitfield name="SRGB_MRT0" pos="0" type="boolean"/>
+ <bitfield name="SRGB_MRT1" pos="1" type="boolean"/>
+ <bitfield name="SRGB_MRT2" pos="2" type="boolean"/>
+ <bitfield name="SRGB_MRT3" pos="3" type="boolean"/>
+ <bitfield name="SRGB_MRT4" pos="4" type="boolean"/>
+ <bitfield name="SRGB_MRT5" pos="5" type="boolean"/>
+ <bitfield name="SRGB_MRT6" pos="6" type="boolean"/>
+ <bitfield name="SRGB_MRT7" pos="7" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0x8810" name="RB_SAMPLE_CNTL" usage="rp_blit">
+ <bitfield name="PER_SAMP_MODE" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x8811" name="RB_UNKNOWN_8811" low="4" high="6" usage="cmd"/>
+ <reg32 offset="0x8812" name="RB_UNKNOWN_8812" variants="A7XX-" usage="rp_blit"/>
+ <!-- 0x8813-0x8817 invalid -->
+ <!-- always 0x0 ? -->
+ <reg32 offset="0x8818" name="RB_UNKNOWN_8818" low="0" high="6" usage="cmd"/>
+ <!-- 0x8819-0x881e all 32 bits -->
+ <reg32 offset="0x8819" name="RB_UNKNOWN_8819" usage="cmd"/>
+ <reg32 offset="0x881a" name="RB_UNKNOWN_881A" usage="cmd"/>
+ <reg32 offset="0x881b" name="RB_UNKNOWN_881B" usage="cmd"/>
+ <reg32 offset="0x881c" name="RB_UNKNOWN_881C" usage="cmd"/>
+ <reg32 offset="0x881d" name="RB_UNKNOWN_881D" usage="cmd"/>
+ <reg32 offset="0x881e" name="RB_UNKNOWN_881E" usage="cmd"/>
+ <!-- 0x881f invalid -->
+ <array offset="0x8820" name="RB_MRT" stride="8" length="8" usage="rp_blit">
+ <reg32 offset="0x0" name="CONTROL">
+ <bitfield name="BLEND" pos="0" type="boolean"/>
+ <bitfield name="BLEND2" pos="1" type="boolean"/>
+ <bitfield name="ROP_ENABLE" pos="2" type="boolean"/>
+ <bitfield name="ROP_CODE" low="3" high="6" type="a3xx_rop_code"/>
+ <bitfield name="COMPONENT_ENABLE" low="7" high="10" type="hex"/>
+ </reg32>
+ <reg32 offset="0x1" name="BLEND_CONTROL">
+ <bitfield name="RGB_SRC_FACTOR" low="0" high="4" type="adreno_rb_blend_factor"/>
+ <bitfield name="RGB_BLEND_OPCODE" low="5" high="7" type="a3xx_rb_blend_opcode"/>
+ <bitfield name="RGB_DEST_FACTOR" low="8" high="12" type="adreno_rb_blend_factor"/>
+ <bitfield name="ALPHA_SRC_FACTOR" low="16" high="20" type="adreno_rb_blend_factor"/>
+ <bitfield name="ALPHA_BLEND_OPCODE" low="21" high="23" type="a3xx_rb_blend_opcode"/>
+ <bitfield name="ALPHA_DEST_FACTOR" low="24" high="28" type="adreno_rb_blend_factor"/>
+ </reg32>
+ <reg32 offset="0x2" name="BUF_INFO" variants="A6XX">
+ <bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
+ <bitfield name="COLOR_TILE_MODE" low="8" high="9" type="a6xx_tile_mode"/>
+ <bitfield name="UNK10" pos="10"/>
+ <bitfield name="COLOR_SWAP" low="13" high="14" type="a3xx_color_swap"/>
+ </reg32>
+ <reg32 offset="0x2" name="BUF_INFO" variants="A7XX-">
+ <bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
+ <bitfield name="COLOR_TILE_MODE" low="8" high="9" type="a6xx_tile_mode"/>
+ <bitfield name="UNK10" pos="10"/>
+ <bitfield name="LOSSLESSCOMPEN" pos="11" type="boolean"/>
+ <bitfield name="COLOR_SWAP" low="13" high="14" type="a3xx_color_swap"/>
+ </reg32>
+ <!--
+ at least in gmem, things seem to be aligned to pitch of 64..
+ maybe an artifact of tiled format used in gmem?
+ -->
+ <reg32 offset="0x3" name="PITCH" shr="6" high="15" type="uint"/>
+ <reg32 offset="0x4" name="ARRAY_PITCH" shr="6" high="28" type="uint"/>
+ <!--
+ Compared to a5xx and before, we configure both a GMEM base and
+ external base. Not sure if this is to facilitate GMEM save/
+ restore for context switch, or just to simplify state setup to
+ not have to care about GMEM vs BYPASS mode.
+ -->
+ <!-- maybe something in low bits since alignment of 1 doesn't make sense? -->
+ <reg64 offset="0x5" name="BASE" type="waddress" align="1"/>
+
+ <reg32 offset="0x7" name="BASE_GMEM" low="12" high="31" shr="12"/>
+ </array>
+
+ <reg32 offset="0x8860" name="RB_BLEND_RED_F32" type="float" usage="rp_blit"/>
+ <reg32 offset="0x8861" name="RB_BLEND_GREEN_F32" type="float" usage="rp_blit"/>
+ <reg32 offset="0x8862" name="RB_BLEND_BLUE_F32" type="float" usage="rp_blit"/>
+ <reg32 offset="0x8863" name="RB_BLEND_ALPHA_F32" type="float" usage="rp_blit"/>
+ <reg32 offset="0x8864" name="RB_ALPHA_CONTROL" usage="cmd">
+ <bitfield name="ALPHA_REF" low="0" high="7" type="hex"/>
+ <bitfield name="ALPHA_TEST" pos="8" type="boolean"/>
+ <bitfield name="ALPHA_TEST_FUNC" low="9" high="11" type="adreno_compare_func"/>
+ </reg32>
+ <reg32 offset="0x8865" name="RB_BLEND_CNTL" usage="rp_blit">
+ <!-- per-mrt enable bit -->
+ <bitfield name="ENABLE_BLEND" low="0" high="7"/>
+ <bitfield name="INDEPENDENT_BLEND" pos="8" type="boolean"/>
+ <bitfield name="DUAL_COLOR_IN_ENABLE" pos="9" type="boolean"/>
+ <bitfield name="ALPHA_TO_COVERAGE" pos="10" type="boolean"/>
+ <bitfield name="ALPHA_TO_ONE" pos="11" type="boolean"/>
+ <bitfield name="SAMPLE_MASK" low="16" high="31"/>
+ </reg32>
+ <!-- 0x8866-0x886f invalid -->
+ <reg32 offset="0x8870" name="RB_DEPTH_PLANE_CNTL" usage="rp_blit">
+ <bitfield name="Z_MODE" low="0" high="1" type="a6xx_ztest_mode"/>
+ </reg32>
+
+ <reg32 offset="0x8871" name="RB_DEPTH_CNTL" usage="rp_blit">
+ <bitfield name="Z_TEST_ENABLE" pos="0" type="boolean"/>
+ <bitfield name="Z_WRITE_ENABLE" pos="1" type="boolean"/>
+ <bitfield name="ZFUNC" low="2" high="4" type="adreno_compare_func"/>
+ <bitfield name="Z_CLAMP_ENABLE" pos="5" type="boolean"/>
+ <doc>
+ Z_READ_ENABLE bit is set for zfunc other than GL_ALWAYS or GL_NEVER
+ also set when Z_BOUNDS_ENABLE is set
+ </doc>
+ <bitfield name="Z_READ_ENABLE" pos="6" type="boolean"/>
+ <bitfield name="Z_BOUNDS_ENABLE" pos="7" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x8114" name="GRAS_SU_DEPTH_CNTL" usage="rp_blit">
+ <bitfield name="Z_TEST_ENABLE" pos="0" type="boolean"/>
+ </reg32>
+ <!-- duplicates GRAS_SU_DEPTH_BUFFER_INFO: -->
+ <reg32 offset="0x8872" name="RB_DEPTH_BUFFER_INFO" variants="A6XX" usage="rp_blit">
+ <bitfield name="DEPTH_FORMAT" low="0" high="2" type="a6xx_depth_format"/>
+ <bitfield name="UNK3" low="3" high="4"/>
+ </reg32>
+ <!-- first 4 bits duplicates GRAS_SU_DEPTH_BUFFER_INFO -->
+ <reg32 offset="0x8872" name="RB_DEPTH_BUFFER_INFO" variants="A7XX-" usage="rp_blit">
+ <bitfield name="DEPTH_FORMAT" low="0" high="2" type="a6xx_depth_format"/>
+ <bitfield name="UNK3" low="3" high="4"/>
+ <bitfield name="TILEMODE" low="5" high="6" type="a6xx_tile_mode"/>
+ <bitfield name="LOSSLESSCOMPEN" pos="7" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0x8873" name="RB_DEPTH_BUFFER_PITCH" low="0" high="13" shr="6" type="uint" usage="rp_blit"/>
+ <reg32 offset="0x8874" name="RB_DEPTH_BUFFER_ARRAY_PITCH" low="0" high="27" shr="6" type="uint" usage="rp_blit"/>
+ <reg64 offset="0x8875" name="RB_DEPTH_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x8877" name="RB_DEPTH_BUFFER_BASE_GMEM" low="12" high="31" shr="12" usage="rp_blit"/>
+
+ <reg32 offset="0x8878" name="RB_Z_BOUNDS_MIN" type="float" usage="rp_blit"/>
+ <reg32 offset="0x8879" name="RB_Z_BOUNDS_MAX" type="float" usage="rp_blit"/>
+ <!-- 0x887a-0x887f invalid -->
+ <reg32 offset="0x8880" name="RB_STENCIL_CONTROL" usage="rp_blit">
+ <bitfield name="STENCIL_ENABLE" pos="0" type="boolean"/>
+ <bitfield name="STENCIL_ENABLE_BF" pos="1" type="boolean"/>
+ <!--
+ set for stencil operations that require read from stencil
+ buffer, but not for example for stencil clear (which does
+ not require read).. so guessing this is analogous to
+ READ_DEST_ENABLE for color buffer..
+ -->
+ <bitfield name="STENCIL_READ" pos="2" type="boolean"/>
+ <bitfield name="FUNC" low="8" high="10" type="adreno_compare_func"/>
+ <bitfield name="FAIL" low="11" high="13" type="adreno_stencil_op"/>
+ <bitfield name="ZPASS" low="14" high="16" type="adreno_stencil_op"/>
+ <bitfield name="ZFAIL" low="17" high="19" type="adreno_stencil_op"/>
+ <bitfield name="FUNC_BF" low="20" high="22" type="adreno_compare_func"/>
+ <bitfield name="FAIL_BF" low="23" high="25" type="adreno_stencil_op"/>
+ <bitfield name="ZPASS_BF" low="26" high="28" type="adreno_stencil_op"/>
+ <bitfield name="ZFAIL_BF" low="29" high="31" type="adreno_stencil_op"/>
+ </reg32>
+ <reg32 offset="0x8115" name="GRAS_SU_STENCIL_CNTL" usage="rp_blit">
+ <bitfield name="STENCIL_ENABLE" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x8881" name="RB_STENCIL_INFO" variants="A6XX" usage="rp_blit">
+ <bitfield name="SEPARATE_STENCIL" pos="0" type="boolean"/>
+ <bitfield name="UNK1" pos="1" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x8881" name="RB_STENCIL_INFO" variants="A7XX-" usage="rp_blit">
+ <bitfield name="SEPARATE_STENCIL" pos="0" type="boolean"/>
+ <bitfield name="UNK1" pos="1" type="boolean"/>
+ <bitfield name="TILEMODE" low="2" high="3" type="a6xx_tile_mode"/>
+ </reg32>
+ <reg32 offset="0x8882" name="RB_STENCIL_BUFFER_PITCH" low="0" high="11" shr="6" type="uint" usage="rp_blit"/>
+ <reg32 offset="0x8883" name="RB_STENCIL_BUFFER_ARRAY_PITCH" low="0" high="23" shr="6" type="uint" usage="rp_blit"/>
+ <reg64 offset="0x8884" name="RB_STENCIL_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x8886" name="RB_STENCIL_BUFFER_BASE_GMEM" low="12" high="31" shr="12" usage="rp_blit"/>
+ <reg32 offset="0x8887" name="RB_STENCILREF" usage="rp_blit">
+ <bitfield name="REF" low="0" high="7"/>
+ <bitfield name="BFREF" low="8" high="15"/>
+ </reg32>
+ <reg32 offset="0x8888" name="RB_STENCILMASK" usage="rp_blit">
+ <bitfield name="MASK" low="0" high="7"/>
+ <bitfield name="BFMASK" low="8" high="15"/>
+ </reg32>
+ <reg32 offset="0x8889" name="RB_STENCILWRMASK" usage="rp_blit">
+ <bitfield name="WRMASK" low="0" high="7"/>
+ <bitfield name="BFWRMASK" low="8" high="15"/>
+ </reg32>
+ <!-- 0x888a-0x888f invalid -->
+ <reg32 offset="0x8890" name="RB_WINDOW_OFFSET" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x8891" name="RB_SAMPLE_COUNT_CONTROL" usage="cmd">
+ <bitfield name="DISABLE" pos="0" type="boolean"/>
+ <bitfield name="COPY" pos="1" type="boolean"/>
+ </reg32>
+ <!-- 0x8892-0x8897 invalid -->
+ <reg32 offset="0x8898" name="RB_LRZ_CNTL" usage="rp_blit">
+ <bitfield name="ENABLE" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x8899" name="RB_UNKNOWN_8899" variants="A7XX-" usage="cmd"/>
+ <!-- 0x8899-0x88bf invalid -->
+ <!-- clamps depth value for depth test/write -->
+ <reg32 offset="0x88c0" name="RB_Z_CLAMP_MIN" type="float" usage="rp_blit"/>
+ <reg32 offset="0x88c1" name="RB_Z_CLAMP_MAX" type="float" usage="rp_blit"/>
+ <!-- 0x88c2-0x88cf invalid-->
+ <reg32 offset="0x88d0" name="RB_UNKNOWN_88D0" usage="rp_blit">
+ <bitfield name="UNK0" low="0" high="12"/>
+ <bitfield name="UNK16" low="16" high="26"/>
+ </reg32>
+ <reg32 offset="0x88d1" name="RB_BLIT_SCISSOR_TL" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x88d2" name="RB_BLIT_SCISSOR_BR" type="a6xx_reg_xy" usage="rp_blit"/>
+ <!-- weird to duplicate other regs from same block?? -->
+ <reg32 offset="0x88d3" name="RB_BIN_CONTROL2" usage="rp_blit">
+ <bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
+ <bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
+ </reg32>
+ <reg32 offset="0x88d4" name="RB_WINDOW_OFFSET2" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x88d5" name="RB_BLIT_GMEM_MSAA_CNTL" usage="rp_blit">
+ <bitfield name="SAMPLES" low="3" high="4" type="a3xx_msaa_samples"/>
+ </reg32>
+ <reg32 offset="0x88d6" name="RB_BLIT_BASE_GMEM" low="12" high="31" shr="12" usage="rp_blit"/>
+ <!-- s/DST_FORMAT/DST_INFO/ probably: -->
+ <reg32 offset="0x88d7" name="RB_BLIT_DST_INFO" usage="rp_blit">
+ <bitfield name="TILE_MODE" low="0" high="1" type="a6xx_tile_mode"/>
+ <bitfield name="FLAGS" pos="2" type="boolean"/>
+ <bitfield name="SAMPLES" low="3" high="4" type="a3xx_msaa_samples"/>
+ <bitfield name="COLOR_SWAP" low="5" high="6" type="a3xx_color_swap"/>
+ <bitfield name="COLOR_FORMAT" low="7" high="14" type="a6xx_format"/>
+ <bitfield name="UNK15" pos="15" type="boolean"/>
+ </reg32>
+ <reg64 offset="0x88d8" name="RB_BLIT_DST" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x88da" name="RB_BLIT_DST_PITCH" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
+ <!-- array-pitch is size of layer -->
+ <reg32 offset="0x88db" name="RB_BLIT_DST_ARRAY_PITCH" low="0" high="28" shr="6" type="uint" usage="rp_blit"/>
+ <reg64 offset="0x88dc" name="RB_BLIT_FLAG_DST" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x88de" name="RB_BLIT_FLAG_DST_PITCH" usage="rp_blit">
+ <bitfield name="PITCH" low="0" high="10" shr="6" type="uint"/>
+ <bitfield name="ARRAY_PITCH" low="11" high="27" shr="7" type="uint"/>
+ </reg32>
+
+ <reg32 offset="0x88df" name="RB_BLIT_CLEAR_COLOR_DW0" usage="rp_blit"/>
+ <reg32 offset="0x88e0" name="RB_BLIT_CLEAR_COLOR_DW1" usage="rp_blit"/>
+ <reg32 offset="0x88e1" name="RB_BLIT_CLEAR_COLOR_DW2" usage="rp_blit"/>
+ <reg32 offset="0x88e2" name="RB_BLIT_CLEAR_COLOR_DW3" usage="rp_blit"/>
+
+ <!-- seems somewhat similar to what we called RB_CLEAR_CNTL on a5xx: -->
+ <reg32 offset="0x88e3" name="RB_BLIT_INFO" usage="rp_blit">
+ <bitfield name="UNK0" pos="0" type="boolean"/> <!-- s8 stencil restore/clear? But also color restore? -->
+ <bitfield name="GMEM" pos="1" type="boolean"/> <!-- set for restore and clear to gmem? -->
+ <bitfield name="SAMPLE_0" pos="2" type="boolean"/> <!-- takes sample 0 instead of averaging -->
+ <bitfield name="DEPTH" pos="3" type="boolean"/> <!-- z16/z32/z24s8/x24x8 clear or resolve? -->
+ <doc>
+ For clearing depth/stencil
+ 1 - depth
+ 2 - stencil
+ 3 - depth+stencil
+ For clearing color buffer:
+ then probably a component mask, I always see 0xf
+ </doc>
+ <bitfield name="CLEAR_MASK" low="4" high="7"/>
+ <!-- set when this is the last resolve on a650+ -->
+ <bitfield name="LAST" low="8" high="9"/>
+ <!--
+ a618 GLES: color render target number being resolved for RM6_RESOLVE, 0x8 for depth, 0x9 for separate stencil.
+ a618 VK: 0x8 for depth RM6_RESOLVE, 0x9 for separate stencil, 0 otherwise.
+
+ We believe this is related to concurrent resolves
+ -->
+ <bitfield name="BUFFER_ID" low="12" high="15"/>
+ </reg32>
+ <reg32 offset="0x88e4" name="RB_UNKNOWN_88E4" variants="A7XX-" usage="rp_blit">
+ <!-- Value conditioned based on predicate, changed before blits -->
+ <bitfield name="UNK0" pos="0" type="boolean"/>
+ </reg32>
+
+ <enum name="a6xx_ccu_cache_size">
+ <value value="0x0" name="CCU_CACHE_SIZE_FULL"/>
+ <value value="0x1" name="CCU_CACHE_SIZE_HALF"/>
+ <value value="0x2" name="CCU_CACHE_SIZE_QUARTER"/>
+ <value value="0x3" name="CCU_CACHE_SIZE_EIGHTH"/>
+ </enum>
+ <reg32 offset="0x88e5" name="RB_CCU_CNTL2" variants="A7XX-" usage="cmd">
+ <bitfield name="DEPTH_OFFSET_HI" pos="0" type="hex"/>
+ <bitfield name="COLOR_OFFSET_HI" pos="2" type="hex"/>
+ <bitfield name="DEPTH_CACHE_SIZE" low="10" high="11" type="a6xx_ccu_cache_size"/>
+ <!-- GMEM offset of CCU depth cache -->
+ <bitfield name="DEPTH_OFFSET" low="12" high="20" shr="12" type="hex"/>
+ <bitfield name="COLOR_CACHE_SIZE" low="21" high="22" type="a6xx_ccu_cache_size"/>
+ <!-- GMEM offset of CCU color cache
+ for GMEM rendering, we set it to GMEM size minus the minimum
+ CCU color cache size. CCU color cache will be needed in some
+ resolve cases, and in those cases we need to reserve the end
+ of GMEM for color cache.
+ -->
+ <bitfield name="COLOR_OFFSET" low="23" high="31" shr="12" type="hex"/>
+ </reg32>
+ <!-- 0x88e6-0x88ef invalid -->
+ <!-- always 0x0 ? -->
+ <reg32 offset="0x88f0" name="RB_UNKNOWN_88F0" low="0" high="11" usage="cmd"/>
+ <!-- could be for separate stencil? (or may not be a flag buffer at all) -->
+ <reg64 offset="0x88f1" name="RB_UNK_FLAG_BUFFER_BASE" type="waddress" align="64"/>
+ <reg32 offset="0x88f3" name="RB_UNK_FLAG_BUFFER_PITCH">
+ <bitfield name="PITCH" low="0" high="10" shr="6" type="uint"/>
+ <bitfield name="ARRAY_PITCH" low="11" high="23" shr="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x88f4" name="RB_UNKNOWN_88F4" low="0" high="2"/>
+ <!-- Connected to VK_EXT_fragment_density_map? -->
+ <reg32 offset="0x88f5" name="RB_UNKNOWN_88F5" variants="A7XX-"/>
+ <!-- 0x88f6-0x88ff invalid -->
+ <reg64 offset="0x8900" name="RB_DEPTH_FLAG_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x8902" name="RB_DEPTH_FLAG_BUFFER_PITCH" usage="rp_blit">
+ <bitfield name="PITCH" low="0" high="6" shr="6" type="uint"/>
+ <!-- TODO: actually part of array pitch -->
+ <bitfield name="UNK8" low="8" high="10"/>
+ <bitfield name="ARRAY_PITCH" low="11" high="27" shr="7" type="uint"/>
+ </reg32>
+ <array offset="0x8903" name="RB_MRT_FLAG_BUFFER" stride="3" length="8" usage="rp_blit">
+ <reg64 offset="0" name="ADDR" type="waddress" align="64"/>
+ <reg32 offset="2" name="PITCH">
+ <bitfield name="PITCH" low="0" high="10" shr="6" type="uint"/>
+ <bitfield name="ARRAY_PITCH" low="11" high="28" shr="7" type="uint"/>
+ </reg32>
+ </array>
+ <!-- 0x891b-0x8926 invalid -->
+ <doc>
+ RB_SAMPLE_COUNT_ADDR register is used up to (and including) a730. After that
+ the address is specified through CP_EVENT_WRITE7::WRITE_SAMPLE_COUNT.
+ </doc>
+ <reg64 offset="0x8927" name="RB_SAMPLE_COUNT_ADDR" type="waddress" align="16" usage="cmd"/>
+ <!-- 0x8929-0x89ff invalid -->
+
+ <!-- TODO: there are some registers in the 0x8a00-0x8bff range -->
+
+ <!--
+ These show up in a6xx gen3+ but so far haven't found an example of
+ blob writing non-zero:
+ -->
+ <reg32 offset="0x8a00" name="RB_UNKNOWN_8A00" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0x8a10" name="RB_UNKNOWN_8A10" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0x8a20" name="RB_UNKNOWN_8A20" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0x8a30" name="RB_UNKNOWN_8A30" variants="A6XX" usage="rp_blit"/>
+
+ <reg32 offset="0x8c00" name="RB_2D_BLIT_CNTL" type="a6xx_2d_blit_cntl" usage="rp_blit"/>
+ <reg32 offset="0x8c01" name="RB_2D_UNKNOWN_8C01" low="0" high="31" usage="rp_blit"/>
+
+ <bitset name="a6xx_2d_surf_info" inline="yes">
+ <bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
+ <bitfield name="TILE_MODE" low="8" high="9" type="a6xx_tile_mode"/>
+ <bitfield name="COLOR_SWAP" low="10" high="11" type="a3xx_color_swap"/>
+ <bitfield name="FLAGS" pos="12" type="boolean"/>
+ <bitfield name="SRGB" pos="13" type="boolean"/>
+ <!-- the rest is only for src -->
+ <bitfield name="SAMPLES" low="14" high="15" type="a3xx_msaa_samples"/>
+ <bitfield name="FILTER" pos="16" type="boolean"/>
+ <bitfield name="UNK17" pos="17" type="boolean"/>
+ <bitfield name="SAMPLES_AVERAGE" pos="18" type="boolean"/>
+ <bitfield name="UNK19" pos="19" type="boolean"/>
+ <bitfield name="UNK20" pos="20" type="boolean"/>
+ <bitfield name="UNK21" pos="21" type="boolean"/>
+ <bitfield name="UNK22" pos="22" type="boolean"/>
+ <bitfield name="UNK23" low="23" high="26"/>
+ <bitfield name="UNK28" pos="28" type="boolean"/>
+ </bitset>
+
+ <!-- 0x8c02-0x8c16 invalid -->
+ <!-- TODO: RB_2D_DST_INFO has 17 valid bits (doesn't match a6xx_2d_surf_info) -->
+ <reg32 offset="0x8c17" name="RB_2D_DST_INFO" type="a6xx_2d_surf_info" usage="rp_blit"/>
+ <reg64 offset="0x8c18" name="RB_2D_DST" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x8c1a" name="RB_2D_DST_PITCH" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
+ <!-- this is a guess but seems likely (for NV12/IYUV): -->
+ <reg64 offset="0x8c1b" name="RB_2D_DST_PLANE1" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x8c1d" name="RB_2D_DST_PLANE_PITCH" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
+ <reg64 offset="0x8c1e" name="RB_2D_DST_PLANE2" type="waddress" align="64" usage="rp_blit"/>
+
+ <reg64 offset="0x8c20" name="RB_2D_DST_FLAGS" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x8c22" name="RB_2D_DST_FLAGS_PITCH" low="0" high="7" shr="6" type="uint" usage="rp_blit"/>
+ <!-- this is a guess but seems likely (for NV12 with UBWC): -->
+ <reg64 offset="0x8c23" name="RB_2D_DST_FLAGS_PLANE" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x8c25" name="RB_2D_DST_FLAGS_PLANE_PITCH" low="0" high="7" shr="6" type="uint" usage="rp_blit"/>
+
+ <!-- TODO: 0x8c26-0x8c33 are all full 32-bit registers -->
+ <!-- unlike a5xx, these are per channel values rather than packed -->
+ <reg32 offset="0x8c2c" name="RB_2D_SRC_SOLID_C0" usage="rp_blit"/>
+ <reg32 offset="0x8c2d" name="RB_2D_SRC_SOLID_C1" usage="rp_blit"/>
+ <reg32 offset="0x8c2e" name="RB_2D_SRC_SOLID_C2" usage="rp_blit"/>
+ <reg32 offset="0x8c2f" name="RB_2D_SRC_SOLID_C3" usage="rp_blit"/>
+ <!-- 0x8c34-0x8dff invalid -->
+
+ <!-- always 0x1 ? either doesn't exist for a650 or write-only: -->
+ <reg32 offset="0x8e01" name="RB_UNKNOWN_8E01" usage="cmd"/>
+ <!-- 0x8e00-0x8e03 invalid -->
+ <reg32 offset="0x8e04" name="RB_DBG_ECO_CNTL" usage="cmd"/> <!-- TODO: valid mask 0xfffffeff -->
+ <reg32 offset="0x8e05" name="RB_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode"/>
+ <!-- 0x02080000 in GMEM, zero otherwise? -->
+ <reg32 offset="0x8e06" name="RB_UNKNOWN_8E06" variants="A7XX-" usage="cmd"/>
+
+ <reg32 offset="0x8e07" name="RB_CCU_CNTL" usage="cmd" variants="A6XX">
+ <bitfield name="GMEM_FAST_CLEAR_DISABLE" pos="0" type="boolean"/>
+ <!-- concurrent resolves are apparently a 2-bit enum on a650+ -->
+ <bitfield name="CONCURRENT_RESOLVE" pos="2" type="boolean"/>
+ <bitfield name="DEPTH_OFFSET_HI" pos="7" type="hex"/>
+ <bitfield name="COLOR_OFFSET_HI" pos="9" type="hex"/>
+ <bitfield name="DEPTH_CACHE_SIZE" low="10" high="11" type="a6xx_ccu_cache_size"/>
+ <!-- GMEM offset of CCU depth cache -->
+ <bitfield name="DEPTH_OFFSET" low="12" high="20" shr="12" type="hex"/>
+ <bitfield name="COLOR_CACHE_SIZE" low="21" high="22" type="a6xx_ccu_cache_size"/>
+ <!-- GMEM offset of CCU color cache
+ for GMEM rendering, we set it to GMEM size minus the minimum
+ CCU color cache size. CCU color cache will be needed in some
+ resolve cases, and in those cases we need to reserve the end
+ of GMEM for color cache.
+ -->
+ <bitfield name="COLOR_OFFSET" low="23" high="31" shr="12" type="hex"/>
+ <!--TODO: valid mask 0xfffffc1f -->
+ </reg32>
+ <reg32 offset="0x8e07" name="RB_CCU_CNTL" usage="cmd" variants="A7XX-">
+ <bitfield name="GMEM_FAST_CLEAR_DISABLE" pos="0" type="boolean"/>
+ <bitfield name="CONCURRENT_RESOLVE" pos="2" type="boolean"/>
+ <!-- rest of the bits were moved to RB_CCU_CNTL2 -->
+ </reg32>
+ <reg32 offset="0x8e08" name="RB_NC_MODE_CNTL">
+ <bitfield name="MODE" pos="0" type="boolean"/>
+ <bitfield name="LOWER_BIT" low="1" high="2" type="uint"/>
+ <bitfield name="MIN_ACCESS_LENGTH" pos="3" type="boolean"/> <!-- true=64b false=32b -->
+ <bitfield name="AMSBC" pos="4" type="boolean"/>
+ <bitfield name="UPPER_BIT" pos="10" type="uint"/>
+ <bitfield name="RGB565_PREDICATOR" pos="11" type="boolean"/>
+ <bitfield name="UNK12" low="12" high="13"/>
+ </reg32>
+ <reg32 offset="0x8e09" name="RB_UNKNOWN_8E09" variants="A7XX-" usage="cmd"/>
+ <!-- 0x8e09-0x8e0f invalid -->
+ <array offset="0x8e10" name="RB_PERFCTR_RB_SEL" stride="1" length="8"/>
+ <array offset="0x8e18" name="RB_PERFCTR_CCU_SEL" stride="1" length="5"/>
+ <!-- 0x8e1d-0x8e1f invalid -->
+ <!-- 0x8e20-0x8e25 more perfcntr sel? -->
+ <!-- 0x8e26-0x8e27 invalid -->
+ <reg32 offset="0x8e28" name="RB_CMP_DBG_ECO_CNTL"/>
+ <!-- 0x8e29-0x8e2b invalid -->
+ <array offset="0x8e2c" name="RB_PERFCTR_CMP_SEL" stride="1" length="4"/>
+ <array offset="0x8e30" name="RB_PERFCTR_UFC_SEL" stride="1" length="6" variants="A7XX-"/>
+ <reg32 offset="0x8e3b" name="RB_RB_SUB_BLOCK_SEL_CNTL_HOST"/>
+ <reg32 offset="0x8e3d" name="RB_RB_SUB_BLOCK_SEL_CNTL_CD"/>
+ <!-- 0x8e3e-0x8e4f invalid -->
+ <!-- GMEM save/restore for preemption: -->
+ <reg32 offset="0x8e50" name="RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE" pos="0" type="boolean"/>
+ <!-- address for GMEM save/restore? -->
+ <reg32 offset="0x8e51" name="RB_UNKNOWN_8E51" type="waddress" align="1"/>
+ <!-- 0x8e53-0x8e7f invalid -->
+ <reg32 offset="0x8e79" name="RB_UNKNOWN_8E79" variants="A7XX-" usage="cmd"/>
+ <!-- 0x8e80-0x8e83 are valid -->
+ <!-- 0x8e84-0x90ff invalid -->
+
+ <!-- 0x9000-0x90ff invalid -->
+
+ <reg32 offset="0x9100" name="VPC_GS_PARAM" variants="A6XX" usage="rp_blit">
+ <bitfield name="LINELENGTHLOC" low="0" high="7" type="uint"/>
+ </reg32>
+
+ <bitset name="a6xx_vpc_xs_clip_cntl" inline="yes">
+ <bitfield name="CLIP_MASK" low="0" high="7" type="uint"/>
+ <!-- there can be up to 8 total clip/cull distance outputs,
+ but apparenly VPC can only deal with vec4, so when there are
+ more than 4 outputs a second location needs to be programmed
+ -->
+ <bitfield name="CLIP_DIST_03_LOC" low="8" high="15" type="uint"/>
+ <bitfield name="CLIP_DIST_47_LOC" low="16" high="23" type="uint"/>
+ </bitset>
+ <reg32 offset="0x9101" name="VPC_VS_CLIP_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9102" name="VPC_GS_CLIP_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9103" name="VPC_DS_CLIP_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+
+ <reg32 offset="0x9311" name="VPC_VS_CLIP_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9312" name="VPC_GS_CLIP_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9313" name="VPC_DS_CLIP_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+
+ <bitset name="a6xx_vpc_xs_layer_cntl" inline="yes">
+ <bitfield name="LAYERLOC" low="0" high="7" type="uint"/>
+ <bitfield name="VIEWLOC" low="8" high="15" type="uint"/>
+ <bitfield name="SHADINGRATELOC" low="16" high="23" type="uint" variants="A7XX-"/>
+ </bitset>
+
+ <reg32 offset="0x9104" name="VPC_VS_LAYER_CNTL" type="a6xx_vpc_xs_layer_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9105" name="VPC_GS_LAYER_CNTL" type="a6xx_vpc_xs_layer_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9106" name="VPC_DS_LAYER_CNTL" type="a6xx_vpc_xs_layer_cntl" usage="rp_blit"/>
+
+ <reg32 offset="0x9314" name="VPC_VS_LAYER_CNTL_V2" type="a6xx_vpc_xs_layer_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9315" name="VPC_GS_LAYER_CNTL_V2" type="a6xx_vpc_xs_layer_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9316" name="VPC_DS_LAYER_CNTL_V2" type="a6xx_vpc_xs_layer_cntl" usage="rp_blit"/>
+
+ <reg32 offset="0x9107" name="VPC_UNKNOWN_9107" variants="A6XX" usage="rp_blit">
+ <!-- this mirrors PC_RASTER_CNTL::DISCARD, although it seems it's unused -->
+ <bitfield name="RASTER_DISCARD" pos="0" type="boolean"/>
+ <bitfield name="UNK2" pos="2" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x9108" name="VPC_POLYGON_MODE" usage="rp_blit">
+ <bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
+ </reg32>
+
+ <bitset name="a6xx_primitive_cntl_0" inline="yes">
+ <bitfield name="PRIMITIVE_RESTART" pos="0" type="boolean"/>
+ <bitfield name="PROVOKING_VTX_LAST" pos="1" type="boolean"/>
+ <bitfield name="D3D_VERTEX_ORDERING" pos="2" type="boolean">
+ <doc>
+ Swaps TESS_CW_TRIS/TESS_CCW_TRIS, and also makes
+ triangle fans and triangle strips use the D3D
+ order instead of the OpenGL order.
+ </doc>
+ </bitfield>
+ <bitfield name="UNK3" pos="3" type="boolean"/>
+ </bitset>
+
+ <bitset name="a6xx_primitive_cntl_5" inline="yes">
+ <doc>
+ geometry shader
+ </doc>
+ <!-- TODO: first 16 bits are valid so something is wrong or missing here -->
+ <bitfield name="GS_VERTICES_OUT" low="0" high="7" type="uint"/>
+ <bitfield name="GS_INVOCATIONS" low="10" high="14" type="uint"/>
+ <bitfield name="LINELENGTHEN" pos="15" type="boolean"/>
+ <bitfield name="GS_OUTPUT" low="16" high="17" type="a6xx_tess_output"/>
+ <bitfield name="UNK18" pos="18"/>
+ </bitset>
+
+ <bitset name="a6xx_multiview_cntl" inline="yes">
+ <bitfield name="ENABLE" pos="0" type="boolean"/>
+ <bitfield name="DISABLEMULTIPOS" pos="1" type="boolean">
+ <doc>
+ Multi-position output lets the last geometry
+ stage shader write multiple copies of
+ gl_Position. If disabled then the VS is run once
+ for each view, and ViewID is passed as a
+ register to the VS.
+ </doc>
+ </bitfield>
+ <bitfield name="VIEWS" low="2" high="6" type="uint"/>
+ </bitset>
+
+ <reg32 offset="0x9109" name="VPC_PRIMITIVE_CNTL_0" type="a6xx_primitive_cntl_0" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0x910a" name="VPC_PRIMITIVE_CNTL_5" type="a6xx_primitive_cntl_5" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0x910b" name="VPC_MULTIVIEW_MASK" type="hex" low="0" high="15" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0x910c" name="VPC_MULTIVIEW_CNTL" type="a6xx_multiview_cntl" variants="A7XX-" usage="rp_blit"/>
+
+ <enum name="a6xx_varying_interp_mode">
+ <value value="0" name="INTERP_SMOOTH"/>
+ <value value="1" name="INTERP_FLAT"/>
+ <value value="2" name="INTERP_ZERO"/>
+ <value value="3" name="INTERP_ONE"/>
+ </enum>
+
+ <enum name="a6xx_varying_ps_repl_mode">
+ <value value="0" name="PS_REPL_NONE"/>
+ <value value="1" name="PS_REPL_S"/>
+ <value value="2" name="PS_REPL_T"/>
+ <value value="3" name="PS_REPL_ONE_MINUS_T"/>
+ </enum>
+
+ <!-- 0x9109-0x91ff invalid -->
+ <array offset="0x9200" name="VPC_VARYING_INTERP" stride="1" length="8" usage="rp_blit">
+ <doc>Packed array of a6xx_varying_interp_mode</doc>
+ <reg32 offset="0x0" name="MODE"/>
+ </array>
+ <array offset="0x9208" name="VPC_VARYING_PS_REPL" stride="1" length="8" usage="rp_blit">
+ <doc>Packed array of a6xx_varying_ps_repl_mode</doc>
+ <reg32 offset="0x0" name="MODE"/>
+ </array>
+
+ <!-- always 0x0 -->
+ <reg32 offset="0x9210" name="VPC_UNKNOWN_9210" low="0" high="31" variants="A6XX" usage="cmd"/>
+ <reg32 offset="0x9211" name="VPC_UNKNOWN_9211" low="0" high="31" variants="A6XX" usage="cmd"/>
+
+ <array offset="0x9212" name="VPC_VAR" stride="1" length="4" usage="rp_blit">
+ <!-- one bit per varying component: -->
+ <reg32 offset="0" name="DISABLE"/>
+ </array>
+
+ <reg32 offset="0x9216" name="VPC_SO_CNTL" usage="rp_blit">
+ <!--
+ Choose which DWORD to write to. There is an array of
+ (4 * 64) DWORD's, dumped in the devcoredump at
+ HLSQ_INST_RAM dword 0x400. Each DWORD corresponds to a
+ (VPC location, stream) pair like so:
+
+ location 0, stream 0
+ location 2, stream 0
+ ...
+ location 126, stream 0
+ location 0, stream 1
+ location 2, stream 1
+ ...
+ location 126, stream 1
+ location 0, stream 2
+ ...
+
+ When EmitStreamVertex(N) happens, the HW goes to DWORD
+ 64 * N and then "executes" the next 64 DWORD's.
+
+ This field is auto-incremented when VPC_SO_PROG is
+ written to.
+ -->
+ <bitfield name="ADDR" low="0" high="7" type="hex"/>
+ <!-- clear all A_EN and B_EN bits for all DWORD's -->
+ <bitfield name="RESET" pos="16" type="boolean"/>
+ </reg32>
+ <!-- special register, write multiple times to load SO program (not readable) -->
+ <reg32 offset="0x9217" name="VPC_SO_PROG" usage="rp_blit">
+ <bitfield name="A_BUF" low="0" high="1" type="uint"/>
+ <bitfield name="A_OFF" low="2" high="10" shr="2" type="uint"/>
+ <bitfield name="A_EN" pos="11" type="boolean"/>
+ <bitfield name="B_BUF" low="12" high="13" type="uint"/>
+ <bitfield name="B_OFF" low="14" high="22" shr="2" type="uint"/>
+ <bitfield name="B_EN" pos="23" type="boolean"/>
+ </reg32>
+
+ <reg64 offset="0x9218" name="VPC_SO_STREAM_COUNTS" type="waddress" align="32" usage="cmd"/>
+
+ <array offset="0x921a" name="VPC_SO" stride="7" length="4" usage="cmd">
+ <reg64 offset="0" name="BUFFER_BASE" type="waddress" align="32"/>
+ <reg32 offset="2" name="BUFFER_SIZE" low="2" high="31" shr="2"/>
+ <reg32 offset="3" name="BUFFER_STRIDE" low="0" high="9" shr="2"/>
+ <reg32 offset="4" name="BUFFER_OFFSET" low="2" high="31" shr="2"/>
+ <reg64 offset="5" name="FLUSH_BASE" type="waddress" align="32"/>
+ </array>
+
+ <reg32 offset="0x9236" name="VPC_POINT_COORD_INVERT" usage="cmd">
+ <bitfield name="INVERT" pos="0" type="boolean"/>
+ </reg32>
+ <!-- 0x9237-0x92ff invalid -->
+ <!-- always 0x0 ? -->
+ <reg32 offset="0x9300" name="VPC_UNKNOWN_9300" low="0" high="2" usage="cmd"/>
+
+ <bitset name="a6xx_vpc_xs_pack" inline="yes">
+ <doc>
+ num of varyings plus four for gl_Position (plus one if gl_PointSize)
+ plus # of transform-feedback (streamout) varyings if using the
+ hw streamout (rather than stg instructions in shader)
+ </doc>
+ <bitfield name="STRIDE_IN_VPC" low="0" high="7" type="uint"/>
+ <bitfield name="POSITIONLOC" low="8" high="15" type="uint"/>
+ <bitfield name="PSIZELOC" low="16" high="23" type="uint"/>
+ <bitfield name="EXTRAPOS" low="24" high="27" type="uint">
+ <doc>
+ The number of extra copies of POSITION, i.e.
+ number of views minus one when multi-position
+ output is enabled, otherwise 0.
+ </doc>
+ </bitfield>
+ </bitset>
+ <reg32 offset="0x9301" name="VPC_VS_PACK" type="a6xx_vpc_xs_pack" usage="rp_blit"/>
+ <reg32 offset="0x9302" name="VPC_GS_PACK" type="a6xx_vpc_xs_pack" usage="rp_blit"/>
+ <reg32 offset="0x9303" name="VPC_DS_PACK" type="a6xx_vpc_xs_pack" usage="rp_blit"/>
+
+ <reg32 offset="0x9304" name="VPC_CNTL_0" usage="rp_blit">
+ <bitfield name="NUMNONPOSVAR" low="0" high="7" type="uint"/>
+ <!-- for fixed-function (i.e. no GS) gl_PrimitiveID in FS -->
+ <bitfield name="PRIMIDLOC" low="8" high="15" type="uint"/>
+ <bitfield name="VARYING" pos="16" type="boolean"/>
+ <bitfield name="VIEWIDLOC" low="24" high="31" type="uint">
+ <doc>
+ This VPC location will be overwritten with
+ ViewID when multiview is enabled. It's used when
+ fragment shaders read ViewID. It's only
+ strictly required for multi-position output,
+ where the same VS invocation is used for all the
+ views at once, but it can be used when multi-pos
+ output is disabled too, to avoid having to pass
+ ViewID through the VS.
+ </doc>
+ </bitfield>
+ </reg32>
+
+ <reg32 offset="0x9305" name="VPC_SO_STREAM_CNTL" usage="rp_blit">
+ <!--
+ It's offset by 1, and 0 means "disabled"
+ -->
+ <bitfield name="BUF0_STREAM" low="0" high="2" type="uint"/>
+ <bitfield name="BUF1_STREAM" low="3" high="5" type="uint"/>
+ <bitfield name="BUF2_STREAM" low="6" high="8" type="uint"/>
+ <bitfield name="BUF3_STREAM" low="9" high="11" type="uint"/>
+ <bitfield name="STREAM_ENABLE" low="15" high="18" type="hex"/>
+ </reg32>
+ <reg32 offset="0x9306" name="VPC_SO_DISABLE" usage="rp_blit">
+ <bitfield name="DISABLE" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x9307" name="VPC_POLYGON_MODE2" variants="A7XX-" usage="rp_blit">
+ <bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
+ </reg32>
+ <reg32 offset="0x9308" name="VPC_ATTR_BUF_SIZE_GMEM" variants="A7XX-" usage="rp_blit">
+ <bitfield name="SIZE_GMEM" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="0x9309" name="VPC_ATTR_BUF_BASE_GMEM" variants="A7XX-" usage="rp_blit">
+ <bitfield name="BASE_GMEM" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="0x9b09" name="PC_ATTR_BUF_SIZE_GMEM" variants="A7XX-" usage="rp_blit">
+ <bitfield name="SIZE_GMEM" low="0" high="31"/>
+ </reg32>
+
+ <!-- 0x9307-0x95ff invalid -->
+
+ <!-- TODO: 0x9600-0x97ff range -->
+ <reg32 offset="0x9600" name="VPC_DBG_ECO_CNTL" usage="cmd"/> <!-- always 0x0 ? TODO: 0x1fbf37ff valid mask -->
+ <reg32 offset="0x9601" name="VPC_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode" usage="cmd"/>
+ <reg32 offset="0x9602" name="VPC_UNKNOWN_9602" pos="0" usage="cmd"/> <!-- always 0x0 ? -->
+ <reg32 offset="0x9603" name="VPC_UNKNOWN_9603" low="0" high="26"/>
+ <array offset="0x9604" name="VPC_PERFCTR_VPC_SEL" stride="1" length="6" variants="A6XX"/>
+ <array offset="0x960b" name="VPC_PERFCTR_VPC_SEL" stride="1" length="12" variants="A7XX-"/>
+ <!-- 0x960a-0x9623 invalid -->
+ <!-- TODO: regs from 0x9624-0x963a -->
+ <!-- 0x963b-0x97ff invalid -->
+
+ <reg32 offset="0x9800" name="PC_TESS_NUM_VERTEX" low="0" high="5" type="uint" usage="rp_blit"/>
+
+ <!-- always 0x0 ? -->
+ <reg32 offset="0x9801" name="PC_HS_INPUT_SIZE" usage="rp_blit">
+ <bitfield name="SIZE" low="0" high="10" type="uint"/>
+ <bitfield name="UNK13" pos="13"/>
+ </reg32>
+
+ <reg32 offset="0x9802" name="PC_TESS_CNTL" usage="rp_blit">
+ <bitfield name="SPACING" low="0" high="1" type="a6xx_tess_spacing"/>
+ <bitfield name="OUTPUT" low="2" high="3" type="a6xx_tess_output"/>
+ </reg32>
+
+ <reg32 offset="0x9803" name="PC_RESTART_INDEX" low="0" high="31" type="uint" usage="rp_blit"/>
+ <reg32 offset="0x9804" name="PC_MODE_CNTL" low="0" high="7" usage="rp_blit"/>
+
+ <reg32 offset="0x9805" name="PC_POWER_CNTL" low="0" high="2" usage="rp_blit"/>
+
+ <reg32 offset="0x9806" name="PC_PS_CNTL" usage="rp_blit">
+ <bitfield name="PRIMITIVEIDEN" pos="0" type="boolean"/>
+ </reg32>
+
+ <!-- New in a6xx gen3+ -->
+ <reg32 offset="0x9808" name="PC_SO_STREAM_CNTL" usage="rp_blit">
+ <bitfield name="STREAM_ENABLE" low="15" high="18" type="hex"/>
+ </reg32>
+
+ <reg32 offset="0x980a" name="PC_DGEN_SU_CONSERVATIVE_RAS_CNTL">
+ <bitfield name="CONSERVATIVERASEN" pos="0" type="boolean"/>
+ </reg32>
+ <!-- 0x980b-0x983f invalid -->
+
+ <!-- 0x9840 - 0x9842 are not readable -->
+ <reg32 offset="0x9840" name="PC_DRAW_CMD">
+ <bitfield name="STATE_ID" low="0" high="7"/>
+ </reg32>
+
+ <reg32 offset="0x9841" name="PC_DISPATCH_CMD">
+ <bitfield name="STATE_ID" low="0" high="7"/>
+ </reg32>
+
+ <reg32 offset="0x9842" name="PC_EVENT_CMD">
+ <!-- I think only the low bit is actually used? -->
+ <bitfield name="STATE_ID" low="16" high="23"/>
+ <bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/>
+ </reg32>
+
+ <!--
+ 0x9880 written in a lot of places by SQE, same value gets written
+ to control reg 0x12a. Set by CP_SET_MARKER, so lets name it after
+ that
+ -->
+ <reg32 offset="0x9880" name="PC_MARKER"/>
+
+ <!-- 0x9843-0x997f invalid -->
+
+ <reg32 offset="0x9981" name="PC_POLYGON_MODE" variants="A6XX" usage="rp_blit">
+ <bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
+ </reg32>
+ <reg32 offset="0x9809" name="PC_POLYGON_MODE" variants="A7XX-" usage="rp_blit">
+ <bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
+ </reg32>
+
+ <reg32 offset="0x9980" name="PC_RASTER_CNTL" variants="A6XX" usage="rp_blit">
+ <!-- which stream to send to GRAS -->
+ <bitfield name="STREAM" low="0" high="1" type="uint"/>
+ <!-- discard primitives before rasterization -->
+ <bitfield name="DISCARD" pos="2" type="boolean"/>
+ </reg32>
+ <!-- VPC_RASTER_CNTL -->
+ <reg32 offset="0x9107" name="PC_RASTER_CNTL" variants="A7XX-" usage="rp_blit">
+ <!-- which stream to send to GRAS -->
+ <bitfield name="STREAM" low="0" high="1" type="uint"/>
+ <!-- discard primitives before rasterization -->
+ <bitfield name="DISCARD" pos="2" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x9317" name="PC_RASTER_CNTL_V2" variants="A7XX-" usage="rp_blit">
+ <!-- which stream to send to GRAS -->
+ <bitfield name="STREAM" low="0" high="1" type="uint"/>
+ <!-- discard primitives before rasterization -->
+ <bitfield name="DISCARD" pos="2" type="boolean"/>
+ </reg32>
+
+ <!-- Both are a750+.
+ Probably needed to correctly overlap execution of several draws.
+ -->
+ <reg32 offset="0x9885" name="PC_TESS_PARAM_SIZE" variants="A7XX-" usage="cmd"/>
+ <!-- Blob adds a bit more space {0x10, 0x20, 0x30, 0x40} bytes, but the meaning of
+ this additional space is not known.
+ -->
+ <reg32 offset="0x9886" name="PC_TESS_FACTOR_SIZE" variants="A7XX-" usage="cmd"/>
+
+ <!-- 0x9982-0x9aff invalid -->
+
+ <reg32 offset="0x9b00" name="PC_PRIMITIVE_CNTL_0" type="a6xx_primitive_cntl_0" usage="rp_blit"/>
+
+ <bitset name="a6xx_xs_out_cntl" inline="yes">
+ <doc>
+ num of varyings plus four for gl_Position (plus one if gl_PointSize)
+ plus # of transform-feedback (streamout) varyings if using the
+ hw streamout (rather than stg instructions in shader)
+ </doc>
+ <bitfield name="STRIDE_IN_VPC" low="0" high="7" type="uint"/>
+ <bitfield name="PSIZE" pos="8" type="boolean"/>
+ <bitfield name="LAYER" pos="9" type="boolean"/>
+ <bitfield name="VIEW" pos="10" type="boolean"/>
+ <!-- note: PC_VS_OUT_CNTL doesn't have the PRIMITIVE_ID bit -->
+ <bitfield name="PRIMITIVE_ID" pos="11" type="boolean"/>
+ <bitfield name="CLIP_MASK" low="16" high="23" type="uint"/>
+ <bitfield name="SHADINGRATE" pos="24" type="boolean" variants="A7XX-"/>
+ </bitset>
+
+ <reg32 offset="0x9b01" name="PC_VS_OUT_CNTL" type="a6xx_xs_out_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b02" name="PC_GS_OUT_CNTL" type="a6xx_xs_out_cntl" usage="rp_blit"/>
+ <!-- since HS can't output anything, only PRIMITIVE_ID is valid -->
+ <reg32 offset="0x9b03" name="PC_HS_OUT_CNTL" type="a6xx_xs_out_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b04" name="PC_DS_OUT_CNTL" type="a6xx_xs_out_cntl" usage="rp_blit"/>
+
+ <reg32 offset="0x9b05" name="PC_PRIMITIVE_CNTL_5" type="a6xx_primitive_cntl_5" usage="rp_blit"/>
+
+ <reg32 offset="0x9b06" name="PC_PRIMITIVE_CNTL_6" variants="A6XX" usage="rp_blit">
+ <doc>
+ size in vec4s of per-primitive storage for gs. TODO: not actually in VPC
+ </doc>
+ <bitfield name="STRIDE_IN_VPC" low="0" high="10" type="uint"/>
+ </reg32>
+
+ <reg32 offset="0x9b07" name="PC_MULTIVIEW_CNTL" type="a6xx_multiview_cntl" usage="rp_blit"/>
+ <!-- mask of enabled views, doesn't exist on A630 -->
+ <reg32 offset="0x9b08" name="PC_MULTIVIEW_MASK" type="hex" low="0" high="15" usage="rp_blit"/>
+ <!-- 0x9b09-0x9bff invalid -->
+ <reg32 offset="0x9c00" name="PC_2D_EVENT_CMD">
+ <!-- special register (but note first 8 bits can be written/read) -->
+ <bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/>
+ <bitfield name="STATE_ID" low="8" high="15"/>
+ </reg32>
+ <!-- 0x9c01-0x9dff invalid -->
+ <!-- TODO: 0x9e00-0xa000 range incomplete -->
+ <reg32 offset="0x9e00" name="PC_DBG_ECO_CNTL"/>
+ <reg32 offset="0x9e01" name="PC_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
+ <reg64 offset="0x9e04" name="PC_DRAW_INDX_BASE"/>
+ <reg32 offset="0x9e06" name="PC_DRAW_FIRST_INDX" type="uint"/>
+ <reg32 offset="0x9e07" name="PC_DRAW_MAX_INDICES" type="uint"/>
+ <reg64 offset="0x9e08" name="PC_TESSFACTOR_ADDR" variants="A6XX" type="waddress" align="32" usage="cmd"/>
+ <reg64 offset="0x9810" name="PC_TESSFACTOR_ADDR" variants="A7XX-" type="waddress" align="32" usage="cmd"/>
+
+ <reg32 offset="0x9e0b" name="PC_DRAW_INITIATOR" type="vgt_draw_initiator_a4xx">
+ <doc>
+ Possibly not really "initiating" the draw but the layout is similar
+ to VGT_DRAW_INITIATOR on older gens
+ </doc>
+ </reg32>
+ <reg32 offset="0x9e0c" name="PC_DRAW_NUM_INSTANCES" type="uint"/>
+ <reg32 offset="0x9e0d" name="PC_DRAW_NUM_INDICES" type="uint"/>
+
+ <!-- These match the contents of CP_SET_BIN_DATA (not written directly) -->
+ <reg32 offset="0x9e11" name="PC_VSTREAM_CONTROL">
+ <bitfield name="UNK0" low="0" high="15"/>
+ <bitfield name="VSC_SIZE" low="16" high="21" type="uint"/>
+ <bitfield name="VSC_N" low="22" high="26" type="uint"/>
+ </reg32>
+ <reg64 offset="0x9e12" name="PC_BIN_PRIM_STRM" type="waddress" align="32"/>
+ <reg64 offset="0x9e14" name="PC_BIN_DRAW_STRM" type="waddress" align="32"/>
+
+ <reg32 offset="0x9e1c" name="PC_VISIBILITY_OVERRIDE">
+ <doc>Written by CP_SET_VISIBILITY_OVERRIDE handler</doc>
+ <bitfield name="OVERRIDE" pos="0" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0x9e24" name="PC_UNKNOWN_9E24" variants="A7XX-" usage="cmd"/>
+
+ <array offset="0x9e34" name="PC_PERFCTR_PC_SEL" stride="1" length="8" variants="A6XX"/>
+ <array offset="0x9e42" name="PC_PERFCTR_PC_SEL" stride="1" length="16" variants="A7XX-"/>
+
+ <!-- always 0x0 -->
+ <reg32 offset="0x9e72" name="PC_UNKNOWN_9E72" usage="cmd"/>
+
+ <reg32 offset="0xa000" name="VFD_CONTROL_0" usage="rp_blit">
+ <bitfield name="FETCH_CNT" low="0" high="5" type="uint"/>
+ <bitfield name="DECODE_CNT" low="8" high="13" type="uint"/>
+ </reg32>
+ <reg32 offset="0xa001" name="VFD_CONTROL_1" usage="rp_blit">
+ <bitfield name="REGID4VTX" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="REGID4INST" low="8" high="15" type="a3xx_regid"/>
+ <bitfield name="REGID4PRIMID" low="16" high="23" type="a3xx_regid"/>
+ <!-- only used for VS in non-multi-position-output case -->
+ <bitfield name="REGID4VIEWID" low="24" high="31" type="a3xx_regid"/>
+ </reg32>
+ <reg32 offset="0xa002" name="VFD_CONTROL_2" usage="rp_blit">
+ <bitfield name="REGID_HSRELPATCHID" low="0" high="7" type="a3xx_regid">
+ <doc>
+ This is the ID of the current patch within the
+ subdraw, used to calculate the offset of the
+ patch within the HS->DS buffers. When a draw is
+ split into multiple subdraws then this differs
+ from gl_PrimitiveID on the second, third, etc.
+ subdraws.
+ </doc>
+ </bitfield>
+ <bitfield name="REGID_INVOCATIONID" low="8" high="15" type="a3xx_regid"/>
+ </reg32>
+ <reg32 offset="0xa003" name="VFD_CONTROL_3" usage="rp_blit">
+ <bitfield name="REGID_DSPRIMID" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="REGID_DSRELPATCHID" low="8" high="15" type="a3xx_regid"/>
+ <bitfield name="REGID_TESSX" low="16" high="23" type="a3xx_regid"/>
+ <bitfield name="REGID_TESSY" low="24" high="31" type="a3xx_regid"/>
+ </reg32>
+ <reg32 offset="0xa004" name="VFD_CONTROL_4" usage="rp_blit">
+ <bitfield name="UNK0" low="0" high="7" type="a3xx_regid"/>
+ </reg32>
+ <reg32 offset="0xa005" name="VFD_CONTROL_5" usage="rp_blit">
+ <bitfield name="REGID_GSHEADER" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="UNK8" low="8" high="15" type="a3xx_regid"/>
+ </reg32>
+ <reg32 offset="0xa006" name="VFD_CONTROL_6" usage="rp_blit">
+ <!--
+ True if gl_PrimitiveID is read via the FS
+ -->
+ <bitfield name="PRIMID4PSEN" pos="0" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0xa007" name="VFD_MODE_CNTL" usage="cmd">
+ <bitfield name="RENDER_MODE" low="0" high="2" type="a6xx_render_mode"/>
+ </reg32>
+
+ <reg32 offset="0xa008" name="VFD_MULTIVIEW_CNTL" type="a6xx_multiview_cntl" usage="rp_blit"/>
+ <reg32 offset="0xa009" name="VFD_ADD_OFFSET" usage="cmd">
+ <!-- add VFD_INDEX_OFFSET to REGID4VTX -->
+ <bitfield name="VERTEX" pos="0" type="boolean"/>
+ <!-- add VFD_INSTANCE_START_OFFSET to REGID4INST -->
+ <bitfield name="INSTANCE" pos="1" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0xa00e" name="VFD_INDEX_OFFSET" usage="rp_blit"/>
+ <reg32 offset="0xa00f" name="VFD_INSTANCE_START_OFFSET" usage="rp_blit"/>
+ <array offset="0xa010" name="VFD_FETCH" stride="4" length="32" usage="rp_blit">
+ <reg64 offset="0x0" name="BASE" type="address" align="1"/>
+ <reg32 offset="0x2" name="SIZE" type="uint"/>
+ <reg32 offset="0x3" name="STRIDE" low="0" high="11" type="uint"/>
+ </array>
+ <array offset="0xa090" name="VFD_DECODE" stride="2" length="32" usage="rp_blit">
+ <reg32 offset="0x0" name="INSTR">
+ <!-- IDX and byte OFFSET into VFD_FETCH -->
+ <bitfield name="IDX" low="0" high="4" type="uint"/>
+ <bitfield name="OFFSET" low="5" high="16"/>
+ <bitfield name="INSTANCED" pos="17" type="boolean"/>
+ <bitfield name="FORMAT" low="20" high="27" type="a6xx_format"/>
+ <bitfield name="SWAP" low="28" high="29" type="a3xx_color_swap"/>
+ <bitfield name="UNK30" pos="30" type="boolean"/>
+ <bitfield name="FLOAT" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x1" name="STEP_RATE" type="uint"/>
+ </array>
+ <array offset="0xa0d0" name="VFD_DEST_CNTL" stride="1" length="32" usage="rp_blit">
+ <reg32 offset="0x0" name="INSTR">
+ <bitfield name="WRITEMASK" low="0" high="3" type="hex"/>
+ <bitfield name="REGID" low="4" high="11" type="a3xx_regid"/>
+ </reg32>
+ </array>
+
+ <reg32 offset="0xa0f8" name="VFD_POWER_CNTL" low="0" high="2" usage="rp_blit"/>
+
+ <reg32 offset="0xa600" name="VFD_UNKNOWN_A600" variants="A7XX-" usage="cmd"/>
+
+ <reg32 offset="0xa601" name="VFD_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
+ <array offset="0xa610" name="VFD_PERFCTR_VFD_SEL" stride="1" length="8" variants="A6XX"/>
+ <array offset="0xa610" name="VFD_PERFCTR_VFD_SEL" stride="1" length="16" variants="A7XX-"/>
+
+ <!--
+ Note: this seems to always be paired with another bit in another
+ block.
+ -->
+ <enum name="a6xx_threadsize">
+ <value value="0" name="THREAD64"/>
+ <value value="1" name="THREAD128"/>
+ </enum>
+
+ <bitset name="a6xx_sp_xs_ctrl_reg0" inline="yes">
+ <!-- if set to SINGLE, only use 1 concurrent wave on each SP -->
+ <bitfield name="THREADMODE" pos="0" type="a3xx_threadmode"/>
+ <!--
+ When b31 set we just see FULLREGFOOTPRINT set. The pattern of
+ used registers is a bit odd too:
+ - used (half): 0-15 68-179 (cnt=128, max=179)
+ - used (full): 0-33 50-69 71 73 75 77 79 81 83 85 87 89-105 107 109 111 113 115 117 119 121 123 125 127>
+ whereas we usually see a (mostly) contiguous range of regs used. But if
+ I merge the full and half ranges (ie. rN counts as hr(N*2) and hr(N*2+1)),
+ then:
+ - used (merged): 0-191 (cnt=192, max=191)
+ So I think if b31 is set, then the half precision registers overlap
+ the full precision registers. (Which seems like a pretty sensible
+ feature, actually I'm not sure when you *wouldn't* want to use that,
+ since it gives register allocation more flexibility)
+ -->
+ <bitfield name="HALFREGFOOTPRINT" low="1" high="6" type="uint"/>
+ <bitfield name="FULLREGFOOTPRINT" low="7" high="12" type="uint"/>
+ <!-- could it be a low bit of branchstack? -->
+ <bitfield name="UNK13" pos="13" type="boolean"/>
+ <!-- seems to be nesting level for flow control:.. -->
+ <bitfield name="BRANCHSTACK" low="14" high="19" type="uint"/>
+ </bitset>
+
+ <bitset name="a6xx_sp_xs_config" inline="yes">
+ <!--
+ Each of these are set if the given resource type is used
+ with the Vulkan/bindless binding model.
+ -->
+ <bitfield name="BINDLESS_TEX" pos="0" type="boolean"/>
+ <bitfield name="BINDLESS_SAMP" pos="1" type="boolean"/>
+ <bitfield name="BINDLESS_IBO" pos="2" type="boolean"/>
+ <bitfield name="BINDLESS_UBO" pos="3" type="boolean"/>
+
+ <bitfield name="ENABLED" pos="8" type="boolean"/>
+ <!--
+ number of textures and samplers.. these might be swapped, with GL I
+ always see the same value for both.
+ -->
+ <bitfield name="NTEX" low="9" high="16" type="uint"/>
+ <bitfield name="NSAMP" low="17" high="21" type="uint"/>
+ <bitfield name="NIBO" low="22" high="28" type="uint"/>
+ </bitset>
+
+ <bitset name="a6xx_sp_xs_prim_cntl" inline="yes">
+ <!-- # of VS outputs including pos/psize -->
+ <bitfield name="OUT" low="0" high="5" type="uint"/>
+ <!-- FLAGS_REGID only for GS -->
+ <bitfield name="FLAGS_REGID" low="6" high="13" type="a3xx_regid"/>
+ </bitset>
+
+ <reg32 offset="0xa800" name="SP_VS_CTRL_REG0" type="a6xx_sp_xs_ctrl_reg0" usage="rp_blit">
+ <!--
+ This field actually controls all geometry stages. TCS, TES, and
+ GS must have the same mergedregs setting as VS.
+ -->
+ <bitfield name="MERGEDREGS" pos="20" type="boolean"/>
+ <!--
+ Creates a separate preamble-only thread?
+
+ Early preamble has the following limitations:
+ - Only shared, a1, and consts regs could be used
+ (accessing other regs would result in GPU fault);
+ - No cat5/cat6, only stc/ldc variants are working;
+ - Values writen to shared regs are not accessible by the rest
+ of the shader;
+ - Instructions before shps are also considered to be a part of
+ early preamble;
+
+ Note, for all shaders from d3d11 games blob produced preambles
+ compatible with early preamble mode.
+ -->
+ <bitfield name="EARLYPREAMBLE" pos="21" type="boolean"/>
+ </reg32>
+ <!-- bitmask of true/false conditions for VS brac.N instructions,
+ bit N corresponds to brac.N -->
+ <reg32 offset="0xa801" name="SP_VS_BRANCH_COND" type="hex"/>
+ <!-- # of VS outputs including pos/psize -->
+ <reg32 offset="0xa802" name="SP_VS_PRIMITIVE_CNTL" type="a6xx_sp_xs_prim_cntl" usage="rp_blit"/>
+ <array offset="0xa803" name="SP_VS_OUT" stride="1" length="16" usage="rp_blit">
+ <reg32 offset="0x0" name="REG">
+ <bitfield name="A_REGID" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="A_COMPMASK" low="8" high="11" type="hex"/>
+ <bitfield name="B_REGID" low="16" high="23" type="a3xx_regid"/>
+ <bitfield name="B_COMPMASK" low="24" high="27" type="hex"/>
+ </reg32>
+ </array>
+ <!--
+ Starting with a5xx, position/psize outputs from shader end up in the
+ SP_VS_OUT map, with highest OUTLOCn position. (Generally they are
+ the last entries too, except when gl_PointCoord is used, blob inserts
+ an extra varying after, but with a lower OUTLOC position. If present,
+ psize is last, preceded by position.
+ -->
+ <array offset="0xa813" name="SP_VS_VPC_DST" stride="1" length="8" usage="rp_blit">
+ <reg32 offset="0x0" name="REG">
+ <bitfield name="OUTLOC0" low="0" high="7" type="uint"/>
+ <bitfield name="OUTLOC1" low="8" high="15" type="uint"/>
+ <bitfield name="OUTLOC2" low="16" high="23" type="uint"/>
+ <bitfield name="OUTLOC3" low="24" high="31" type="uint"/>
+ </reg32>
+ </array>
+
+ <bitset name="a6xx_sp_xs_pvt_mem_param" inline="yes">
+ <bitfield name="MEMSIZEPERITEM" low="0" high="7" shr="9">
+ <doc>The size of memory that ldp/stp can address.</doc>
+ </bitfield>
+ <bitfield name="HWSTACKSIZEPERTHREAD" low="24" high="31">
+ <doc>
+ Seems to be the same as a3xx. The maximum stack
+ size in units of 4 calls, so a call depth of 7
+ would result in a value of 2.
+ TODO: What's the actual size per call, i.e. the
+ size of the PC? a3xx docs say it's 16 bits
+ there, but the length register now takes 28 bits
+ so it's probably been bumped to 32 bits.
+ </doc>
+ </bitfield>
+ </bitset>
+
+ <bitset name="a6xx_sp_xs_pvt_mem_size" inline="yes">
+ <bitfield name="TOTALPVTMEMSIZE" low="0" high="17" shr="12"/>
+ <bitfield name="PERWAVEMEMLAYOUT" pos="31" type="boolean">
+ <doc>
+ There are four indices used to compute the
+ private memory location for an access:
+
+ - stp/ldp offset
+ - fiber id
+ - wavefront id (a swizzled version of what "getwid" returns)
+ - SP ID (the same as what "getspid" returns)
+
+ The stride for the SP ID is always set by
+ TOTALPVTMEMSIZE. In the per-wave layout, the
+ indices are used in this order:
+
+ - offset % 4 (offset within dword)
+ - fiber id
+ - offset / 4
+ - wavefront id
+ - SP ID
+
+ and the stride for the wavefront ID is
+ MEMSIZEPERITEM, multiplied by 128 (fibers per
+ wavefront). In the per-fiber layout, the indices
+ are used in this order:
+
+ - offset
+ - fiber id % 4
+ - wavefront id
+ - fiber id / 4
+ - SP ID
+
+ and the stride for the fiber id/wavefront id
+ combo is MEMSIZEPERITEM.
+
+ Note: Accesses of more than 1 dword do not work
+ with per-fiber layout. The blob will fall back
+ to per-wave instead.
+ </doc>
+ </bitfield>
+ </bitset>
+
+ <bitset name="a6xx_sp_xs_pvt_mem_hw_stack_offset" inline="yes">
+ <doc>
+ This seems to be be the equivalent of HWSTACKOFFSET in
+ a3xx. The ldp/stp offset formula above isn't affected by
+ HWSTACKSIZEPERTHREAD at all, so the HW return address
+ stack seems to be after all the normal per-SP private
+ memory.
+ </doc>
+ <bitfield name="OFFSET" low="0" high="18" shr="11"/>
+ </bitset>
+
+ <reg32 offset="0xa81b" name="SP_VS_OBJ_FIRST_EXEC_OFFSET" type="uint" usage="rp_blit"/>
+ <reg64 offset="0xa81c" name="SP_VS_OBJ_START" type="address" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa81e" name="SP_VS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="rp_blit"/>
+ <reg64 offset="0xa81f" name="SP_VS_PVT_MEM_ADDR" type="waddress" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa821" name="SP_VS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="rp_blit"/>
+ <reg32 offset="0xa822" name="SP_VS_TEX_COUNT" low="0" high="7" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa823" name="SP_VS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
+ <reg32 offset="0xa824" name="SP_VS_INSTRLEN" low="0" high="27" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa825" name="SP_VS_PVT_MEM_HW_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_hw_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa82d" name="SP_VS_VGPR_CONFIG" variants="A7XX-" usage="cmd"/>
+
+ <reg32 offset="0xa830" name="SP_HS_CTRL_REG0" type="a6xx_sp_xs_ctrl_reg0" usage="rp_blit">
+ <!-- There is no mergedregs bit, that comes from the VS. -->
+ <bitfield name="EARLYPREAMBLE" pos="20" type="boolean"/>
+ </reg32>
+ <!--
+ Total size of local storage in dwords divided by the wave size.
+ The maximum value is 64. With the wave size being always 64 for HS,
+ the maximum size of local storage should be:
+ 64 (wavesize) * 64 (SP_HS_WAVE_INPUT_SIZE) * 4 = 16k
+ -->
+ <reg32 offset="0xa831" name="SP_HS_WAVE_INPUT_SIZE" low="0" high="7" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa832" name="SP_HS_BRANCH_COND" type="hex" usage="rp_blit"/>
+
+ <!-- TODO: exact same layout as 0xa81b-0xa825 -->
+ <reg32 offset="0xa833" name="SP_HS_OBJ_FIRST_EXEC_OFFSET" type="uint" usage="rp_blit"/>
+ <reg64 offset="0xa834" name="SP_HS_OBJ_START" type="address" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa836" name="SP_HS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="rp_blit"/>
+ <reg64 offset="0xa837" name="SP_HS_PVT_MEM_ADDR" type="waddress" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa839" name="SP_HS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="rp_blit"/>
+ <reg32 offset="0xa83a" name="SP_HS_TEX_COUNT" low="0" high="7" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa83b" name="SP_HS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
+ <reg32 offset="0xa83c" name="SP_HS_INSTRLEN" low="0" high="27" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa83d" name="SP_HS_PVT_MEM_HW_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_hw_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa82f" name="SP_HS_VGPR_CONFIG" variants="A7XX-" usage="cmd"/>
+
+ <reg32 offset="0xa840" name="SP_DS_CTRL_REG0" type="a6xx_sp_xs_ctrl_reg0" usage="rp_blit">
+ <!-- There is no mergedregs bit, that comes from the VS. -->
+ <bitfield name="EARLYPREAMBLE" pos="20" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xa841" name="SP_DS_BRANCH_COND" type="hex"/>
+
+ <!-- TODO: exact same layout as 0xa802-0xa81a -->
+ <reg32 offset="0xa842" name="SP_DS_PRIMITIVE_CNTL" type="a6xx_sp_xs_prim_cntl" usage="rp_blit"/>
+ <array offset="0xa843" name="SP_DS_OUT" stride="1" length="16" usage="rp_blit">
+ <reg32 offset="0x0" name="REG">
+ <bitfield name="A_REGID" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="A_COMPMASK" low="8" high="11" type="hex"/>
+ <bitfield name="B_REGID" low="16" high="23" type="a3xx_regid"/>
+ <bitfield name="B_COMPMASK" low="24" high="27" type="hex"/>
+ </reg32>
+ </array>
+ <array offset="0xa853" name="SP_DS_VPC_DST" stride="1" length="8" usage="rp_blit">
+ <reg32 offset="0x0" name="REG">
+ <bitfield name="OUTLOC0" low="0" high="7" type="uint"/>
+ <bitfield name="OUTLOC1" low="8" high="15" type="uint"/>
+ <bitfield name="OUTLOC2" low="16" high="23" type="uint"/>
+ <bitfield name="OUTLOC3" low="24" high="31" type="uint"/>
+ </reg32>
+ </array>
+
+ <!-- TODO: exact same layout as 0xa81b-0xa825 -->
+ <reg32 offset="0xa85b" name="SP_DS_OBJ_FIRST_EXEC_OFFSET" type="uint" usage="rp_blit"/>
+ <reg64 offset="0xa85c" name="SP_DS_OBJ_START" type="address" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa85e" name="SP_DS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="rp_blit"/>
+ <reg64 offset="0xa85f" name="SP_DS_PVT_MEM_ADDR" type="waddress" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa861" name="SP_DS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="rp_blit"/>
+ <reg32 offset="0xa862" name="SP_DS_TEX_COUNT" low="0" high="7" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa863" name="SP_DS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
+ <reg32 offset="0xa864" name="SP_DS_INSTRLEN" low="0" high="27" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa865" name="SP_DS_PVT_MEM_HW_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_hw_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa868" name="SP_DS_VGPR_CONFIG" variants="A7XX-" usage="cmd"/>
+
+ <reg32 offset="0xa870" name="SP_GS_CTRL_REG0" type="a6xx_sp_xs_ctrl_reg0" usage="rp_blit">
+ <!-- There is no mergedregs bit, that comes from the VS. -->
+ <bitfield name="EARLYPREAMBLE" pos="20" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xa871" name="SP_GS_PRIM_SIZE" low="0" high="7" type="uint" usage="rp_blit">
+ <doc>
+ Normally the size of the output of the last stage in
+ dwords. It should be programmed as follows:
+
+ size less than 63 - size
+ size of 63 (?) or 64 - 63
+ size greater than 64 - 64
+
+ What to program when the size is 61-63 is a guess, but
+ both the blob and ir3 align the size to 4 dword's so it
+ doesn't matter in practice.
+ </doc>
+ </reg32>
+ <reg32 offset="0xa872" name="SP_GS_BRANCH_COND" type="hex" usage="rp_blit"/>
+
+ <!-- TODO: exact same layout as 0xa802-0xa81a -->
+ <reg32 offset="0xa873" name="SP_GS_PRIMITIVE_CNTL" type="a6xx_sp_xs_prim_cntl" usage="rp_blit"/>
+ <array offset="0xa874" name="SP_GS_OUT" stride="1" length="16" usage="rp_blit">
+ <reg32 offset="0x0" name="REG">
+ <bitfield name="A_REGID" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="A_COMPMASK" low="8" high="11" type="hex"/>
+ <bitfield name="B_REGID" low="16" high="23" type="a3xx_regid"/>
+ <bitfield name="B_COMPMASK" low="24" high="27" type="hex"/>
+ </reg32>
+ </array>
+
+ <array offset="0xa884" name="SP_GS_VPC_DST" stride="1" length="8" usage="rp_blit">
+ <reg32 offset="0x0" name="REG">
+ <bitfield name="OUTLOC0" low="0" high="7" type="uint"/>
+ <bitfield name="OUTLOC1" low="8" high="15" type="uint"/>
+ <bitfield name="OUTLOC2" low="16" high="23" type="uint"/>
+ <bitfield name="OUTLOC3" low="24" high="31" type="uint"/>
+ </reg32>
+ </array>
+
+ <!-- TODO: exact same layout as 0xa81b-0xa825 -->
+ <reg32 offset="0xa88c" name="SP_GS_OBJ_FIRST_EXEC_OFFSET" type="uint" usage="rp_blit"/>
+ <reg64 offset="0xa88d" name="SP_GS_OBJ_START" type="address" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa88f" name="SP_GS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="rp_blit"/>
+ <reg64 offset="0xa890" name="SP_GS_PVT_MEM_ADDR" type="waddress" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa892" name="SP_GS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="rp_blit"/>
+ <reg32 offset="0xa893" name="SP_GS_TEX_COUNT" low="0" high="7" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa894" name="SP_GS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
+ <reg32 offset="0xa895" name="SP_GS_INSTRLEN" low="0" high="27" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa896" name="SP_GS_PVT_MEM_HW_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_hw_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa899" name="SP_GS_VGPR_CONFIG" variants="A7XX-" usage="cmd"/>
+
+ <reg64 offset="0xa8a0" name="SP_VS_TEX_SAMP" type="address" align="16" usage="cmd"/>
+ <reg64 offset="0xa8a2" name="SP_HS_TEX_SAMP" type="address" align="16" usage="cmd"/>
+ <reg64 offset="0xa8a4" name="SP_DS_TEX_SAMP" type="address" align="16" usage="cmd"/>
+ <reg64 offset="0xa8a6" name="SP_GS_TEX_SAMP" type="address" align="16" usage="cmd"/>
+ <reg64 offset="0xa8a8" name="SP_VS_TEX_CONST" type="address" align="64" usage="cmd"/>
+ <reg64 offset="0xa8aa" name="SP_HS_TEX_CONST" type="address" align="64" usage="cmd"/>
+ <reg64 offset="0xa8ac" name="SP_DS_TEX_CONST" type="address" align="64" usage="cmd"/>
+ <reg64 offset="0xa8ae" name="SP_GS_TEX_CONST" type="address" align="64" usage="cmd"/>
+
+ <!-- TODO: 4 unknown bool registers 0xa8c0-0xa8c3 -->
+
+ <reg32 offset="0xa980" name="SP_FS_CTRL_REG0" type="a6xx_sp_xs_ctrl_reg0" usage="rp_blit">
+ <bitfield name="THREADSIZE" pos="20" type="a6xx_threadsize"/>
+ <bitfield name="UNK21" pos="21" type="boolean"/>
+ <bitfield name="VARYING" pos="22" type="boolean"/>
+ <bitfield name="LODPIXMASK" pos="23" type="boolean">
+ <doc>
+ Enable ALL helper invocations in a quad. Necessary for
+ fine derivatives and quad subgroup ops.
+ </doc>
+ </bitfield>
+ <!-- note: vk blob uses bit24 -->
+ <bitfield name="UNK24" pos="24" type="boolean"/>
+ <bitfield name="UNK25" pos="25" type="boolean"/>
+ <bitfield name="PIXLODENABLE" pos="26" type="boolean">
+ <doc>
+ Enable helper invocations. Enables 3 out of 4 fragments,
+ because the coarse derivatives only use half of the quad
+ and so one pixel's value is always unused.
+ </doc>
+ </bitfield>
+ <bitfield name="UNK27" pos="27" type="boolean"/>
+ <bitfield name="EARLYPREAMBLE" pos="28" type="boolean"/>
+ <bitfield name="MERGEDREGS" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xa981" name="SP_FS_BRANCH_COND" type="hex"/>
+ <reg32 offset="0xa982" name="SP_FS_OBJ_FIRST_EXEC_OFFSET" type="uint" usage="rp_blit"/>
+ <reg64 offset="0xa983" name="SP_FS_OBJ_START" type="address" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa985" name="SP_FS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="rp_blit"/>
+ <reg64 offset="0xa986" name="SP_FS_PVT_MEM_ADDR" type="waddress" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa988" name="SP_FS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="rp_blit"/>
+
+ <reg32 offset="0xa989" name="SP_BLEND_CNTL" usage="rp_blit">
+ <!-- per-mrt enable bit -->
+ <bitfield name="ENABLE_BLEND" low="0" high="7"/>
+ <bitfield name="UNK8" pos="8" type="boolean"/>
+ <bitfield name="DUAL_COLOR_IN_ENABLE" pos="9" type="boolean"/>
+ <bitfield name="ALPHA_TO_COVERAGE" pos="10" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xa98a" name="SP_SRGB_CNTL" usage="rp_blit">
+ <!-- Same as RB_SRGB_CNTL -->
+ <bitfield name="SRGB_MRT0" pos="0" type="boolean"/>
+ <bitfield name="SRGB_MRT1" pos="1" type="boolean"/>
+ <bitfield name="SRGB_MRT2" pos="2" type="boolean"/>
+ <bitfield name="SRGB_MRT3" pos="3" type="boolean"/>
+ <bitfield name="SRGB_MRT4" pos="4" type="boolean"/>
+ <bitfield name="SRGB_MRT5" pos="5" type="boolean"/>
+ <bitfield name="SRGB_MRT6" pos="6" type="boolean"/>
+ <bitfield name="SRGB_MRT7" pos="7" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xa98b" name="SP_FS_RENDER_COMPONENTS" usage="rp_blit">
+ <bitfield name="RT0" low="0" high="3"/>
+ <bitfield name="RT1" low="4" high="7"/>
+ <bitfield name="RT2" low="8" high="11"/>
+ <bitfield name="RT3" low="12" high="15"/>
+ <bitfield name="RT4" low="16" high="19"/>
+ <bitfield name="RT5" low="20" high="23"/>
+ <bitfield name="RT6" low="24" high="27"/>
+ <bitfield name="RT7" low="28" high="31"/>
+ </reg32>
+ <reg32 offset="0xa98c" name="SP_FS_OUTPUT_CNTL0" usage="rp_blit">
+ <bitfield name="DUAL_COLOR_IN_ENABLE" pos="0" type="boolean"/>
+ <bitfield name="DEPTH_REGID" low="8" high="15" type="a3xx_regid"/>
+ <bitfield name="SAMPMASK_REGID" low="16" high="23" type="a3xx_regid"/>
+ <bitfield name="STENCILREF_REGID" low="24" high="31" type="a3xx_regid"/>
+ </reg32>
+ <reg32 offset="0xa98d" name="SP_FS_OUTPUT_CNTL1" usage="rp_blit">
+ <bitfield name="MRT" low="0" high="3" type="uint"/>
+ </reg32>
+
+ <array offset="0xa98e" name="SP_FS_OUTPUT" stride="1" length="8" usage="rp_blit">
+ <doc>per MRT</doc>
+ <reg32 offset="0x0" name="REG">
+ <bitfield name="REGID" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="HALF_PRECISION" pos="8" type="boolean"/>
+ </reg32>
+ </array>
+
+ <array offset="0xa996" name="SP_FS_MRT" stride="1" length="8" usage="rp_blit">
+ <reg32 offset="0" name="REG">
+ <bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
+ <bitfield name="COLOR_SINT" pos="8" type="boolean"/>
+ <bitfield name="COLOR_UINT" pos="9" type="boolean"/>
+ <bitfield name="UNK10" pos="10" type="boolean"/>
+ </reg32>
+ </array>
+
+ <reg32 offset="0xa99e" name="SP_FS_PREFETCH_CNTL" usage="rp_blit">
+ <bitfield name="COUNT" low="0" high="2" type="uint"/>
+ <bitfield name="IJ_WRITE_DISABLE" pos="3" type="boolean"/>
+ <doc>
+ Similar to "(eq)" flag but disables helper invocations
+ after the texture prefetch.
+ </doc>
+ <bitfield name="ENDOFQUAD" pos="4" type="boolean" />
+ <doc>
+ Bypass writing to regs and overwrite output with color from
+ CONSTSLOTID const regs.
+ </doc>
+ <bitfield name="WRITE_COLOR_TO_OUTPUT" pos="5" type="boolean"/>
+ <bitfield name="CONSTSLOTID" low="6" high="14" type="uint"/>
+ <!-- Blob never uses it -->
+ <bitfield name="CONSTSLOTID4COORD" low="16" high="24" type="uint" variants="A7XX-"/>
+ </reg32>
+ <array offset="0xa99f" name="SP_FS_PREFETCH" stride="1" length="4" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0" name="CMD" variants="A6XX">
+ <bitfield name="SRC" low="0" high="6" type="uint"/>
+ <bitfield name="SAMP_ID" low="7" high="10" type="uint"/>
+ <bitfield name="TEX_ID" low="11" high="15" type="uint"/>
+ <bitfield name="DST" low="16" high="21" type="a3xx_regid"/>
+ <bitfield name="WRMASK" low="22" high="25" type="hex"/>
+ <bitfield name="HALF" pos="26" type="boolean"/>
+ <doc>Results in color being zero</doc>
+ <bitfield name="UNK27" pos="27" type="boolean"/>
+ <bitfield name="BINDLESS" pos="28" type="boolean"/>
+ <bitfield name="CMD" low="29" high="31" type="a6xx_tex_prefetch_cmd"/>
+ </reg32>
+ </array>
+ <array offset="0xa99f" name="SP_FS_PREFETCH" stride="1" length="4" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0" name="CMD" variants="A7XX-">
+ <bitfield name="SRC" low="0" high="6" type="uint"/>
+ <bitfield name="SAMP_ID" low="7" high="9" type="uint"/>
+ <bitfield name="TEX_ID" low="10" high="12" type="uint"/>
+ <bitfield name="DST" low="13" high="18" type="a3xx_regid"/>
+ <bitfield name="WRMASK" low="19" high="22" type="hex"/>
+ <bitfield name="HALF" pos="23" type="boolean"/>
+ <bitfield name="BINDLESS" pos="25" type="boolean"/>
+ <bitfield name="CMD" low="26" high="29" type="a6xx_tex_prefetch_cmd"/>
+ </reg32>
+ </array>
+ <array offset="0xa9a3" name="SP_FS_BINDLESS_PREFETCH" stride="1" length="4" usage="rp_blit">
+ <reg32 offset="0" name="CMD">
+ <bitfield name="SAMP_ID" low="0" high="15" type="uint"/>
+ <bitfield name="TEX_ID" low="16" high="31" type="uint"/>
+ </reg32>
+ </array>
+ <reg32 offset="0xa9a7" name="SP_FS_TEX_COUNT" low="0" high="7" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa9a8" name="SP_UNKNOWN_A9A8" low="0" high="16" usage="cmd"/> <!-- always 0x0 ? -->
+ <reg32 offset="0xa9a9" name="SP_FS_PVT_MEM_HW_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_hw_stack_offset" usage="rp_blit"/>
+
+ <!-- TODO: unknown bool register at 0xa9aa, likely same as 0xa8c0-0xa8c3 but for FS -->
+
+
+
+
+ <reg32 offset="0xa9b0" name="SP_CS_CTRL_REG0" type="a6xx_sp_xs_ctrl_reg0" usage="cmd">
+ <bitfield name="THREADSIZE" pos="20" type="a6xx_threadsize"/>
+ <!-- seems to make SP use less concurrent threads when possible? -->
+ <bitfield name="UNK21" pos="21" type="boolean"/>
+ <!-- has a small impact on performance, not clear what it does -->
+ <bitfield name="UNK22" pos="22" type="boolean"/>
+ <bitfield name="EARLYPREAMBLE" pos="23" type="boolean"/>
+ <bitfield name="MERGEDREGS" pos="31" type="boolean"/>
+ </reg32>
+
+ <!-- set for compute shaders -->
+ <reg32 offset="0xa9b1" name="SP_CS_UNKNOWN_A9B1" usage="cmd">
+ <bitfield name="SHARED_SIZE" low="0" high="4" type="uint">
+ <doc>
+ If 0 - all 32k of shared storage is enabled, otherwise
+ (SHARED_SIZE + 1) * 1k is enabled.
+ The ldl/stl offset seems to be rewritten to 0 when it is beyond
+ this limit. This is different from ldlw/stlw, which wraps at
+ 64k (and has 36k of storage on A640 - reads between 36k-64k
+ always return 0)
+ </doc>
+ </bitfield>
+ <bitfield name="UNK5" pos="5" type="boolean"/>
+ <!-- always 1 ? -->
+ <bitfield name="UNK6" pos="6" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xa9b2" name="SP_CS_BRANCH_COND" type="hex" usage="cmd"/>
+ <reg32 offset="0xa9b3" name="SP_CS_OBJ_FIRST_EXEC_OFFSET" type="uint" usage="cmd"/>
+ <reg64 offset="0xa9b4" name="SP_CS_OBJ_START" type="address" align="32" usage="cmd"/>
+ <reg32 offset="0xa9b6" name="SP_CS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="cmd"/>
+ <reg64 offset="0xa9b7" name="SP_CS_PVT_MEM_ADDR" align="32" usage="cmd"/>
+ <reg32 offset="0xa9b9" name="SP_CS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="cmd"/>
+ <reg32 offset="0xa9ba" name="SP_CS_TEX_COUNT" low="0" high="7" type="uint" usage="cmd"/>
+ <reg32 offset="0xa9bb" name="SP_CS_CONFIG" type="a6xx_sp_xs_config" usage="cmd"/>
+ <reg32 offset="0xa9bc" name="SP_CS_INSTRLEN" low="0" high="27" type="uint" usage="cmd"/>
+ <reg32 offset="0xa9bd" name="SP_CS_PVT_MEM_HW_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_hw_stack_offset" usage="cmd"/>
+ <reg32 offset="0xa9be" name="SP_CS_UNKNOWN_A9BE" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa9c5" name="SP_CS_VGPR_CONFIG" variants="A7XX-" usage="cmd"/>
+
+ <!-- new in a6xx gen4, matches HLSQ_CS_CNTL_0 -->
+ <reg32 offset="0xa9c2" name="SP_CS_CNTL_0" usage="cmd">
+ <bitfield name="WGIDCONSTID" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="WGSIZECONSTID" low="8" high="15" type="a3xx_regid"/>
+ <bitfield name="WGOFFSETCONSTID" low="16" high="23" type="a3xx_regid"/>
+ <bitfield name="LOCALIDREGID" low="24" high="31" type="a3xx_regid"/>
+ </reg32>
+ <!-- new in a6xx gen4, matches HLSQ_CS_CNTL_1 -->
+ <reg32 offset="0xa9c3" name="SP_CS_CNTL_1" variants="A6XX" usage="cmd">
+ <!-- gl_LocalInvocationIndex -->
+ <bitfield name="LINEARLOCALIDREGID" low="0" high="7" type="a3xx_regid"/>
+ <!-- a650 has 6 "SP cores" (but 3 "SP"). this makes it use only
+ one of those 6 "SP cores" -->
+ <bitfield name="SINGLE_SP_CORE" pos="8" type="boolean"/>
+ <!-- Must match SP_CS_CTRL -->
+ <bitfield name="THREADSIZE" pos="9" type="a6xx_threadsize"/>
+ <!-- 1 thread per wave (ignored if bit9 set) -->
+ <bitfield name="THREADSIZE_SCALAR" pos="10" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0xa9c3" name="SP_CS_CNTL_1" variants="A7XX-" usage="cmd">
+ <!-- gl_LocalInvocationIndex -->
+ <bitfield name="LINEARLOCALIDREGID" low="0" high="7" type="a3xx_regid"/>
+ <!-- Must match SP_CS_CTRL -->
+ <bitfield name="THREADSIZE" pos="8" type="a6xx_threadsize"/>
+ <!-- 1 thread per wave (would hang if THREAD128 is also set) -->
+ <bitfield name="THREADSIZE_SCALAR" pos="9" type="boolean"/>
+
+ <!-- Affects getone. If enabled, getone sometimes executed 1? less times
+ than there are subgroups.
+ -->
+ <bitfield name="UNK15" pos="15" type="boolean"/>
+ </reg32>
+
+ <!-- TODO: two 64kb aligned addresses at a9d0/a9d2 -->
+
+ <reg64 offset="0xa9e0" name="SP_FS_TEX_SAMP" type="address" align="16" usage="rp_blit"/>
+ <reg64 offset="0xa9e2" name="SP_CS_TEX_SAMP" type="address" align="16" usage="cmd"/>
+ <reg64 offset="0xa9e4" name="SP_FS_TEX_CONST" type="address" align="64" usage="rp_blit"/>
+ <reg64 offset="0xa9e6" name="SP_CS_TEX_CONST" type="address" align="64" usage="cmd"/>
+
+ <enum name="a6xx_bindless_descriptor_size">
+ <doc>
+ This can alternatively be interpreted as a pitch shift, ie, the
+ descriptor size is 2 &lt;&lt; N dwords
+ </doc>
+ <value value="1" name="BINDLESS_DESCRIPTOR_16B"/>
+ <value value="3" name="BINDLESS_DESCRIPTOR_64B"/>
+ </enum>
+
+ <array offset="0xa9e8" name="SP_CS_BINDLESS_BASE" stride="2" length="5" variants="A6XX" usage="cmd">
+ <reg64 offset="0" name="DESCRIPTOR" variants="A6XX">
+ <bitfield name="DESC_SIZE" low="0" high="1" type="a6xx_bindless_descriptor_size"/>
+ <bitfield name="ADDR" low="2" high="63" shr="2" type="address"/>
+ </reg64>
+ </array>
+ <array offset="0xa9e8" name="SP_CS_BINDLESS_BASE" stride="2" length="8" variants="A7XX-" usage="cmd">
+ <reg64 offset="0" name="DESCRIPTOR" variants="A7XX-">
+ <bitfield name="DESC_SIZE" low="0" high="1" type="a6xx_bindless_descriptor_size"/>
+ <bitfield name="ADDR" low="2" high="63" shr="2" type="address"/>
+ </reg64>
+ </array>
+
+ <!--
+ IBO state for compute shader:
+ -->
+ <reg64 offset="0xa9f2" name="SP_CS_IBO" type="address" align="16"/>
+ <reg32 offset="0xaa00" name="SP_CS_IBO_COUNT" low="0" high="6" type="uint"/>
+
+ <!-- Correlated with avgs/uvgs usage in FS -->
+ <reg32 offset="0xaa01" name="SP_FS_VGPR_CONFIG" type="uint" variants="A7XX-" usage="cmd"/>
+
+ <reg32 offset="0xaa02" name="SP_PS_ALIASED_COMPONENTS_CONTROL" variants="A7XX-" usage="cmd">
+ <bitfield name="ENABLED" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0xaa03" name="SP_PS_ALIASED_COMPONENTS" variants="A7XX-" usage="cmd">
+ <doc>
+ Specify for which components the output color should be read
+ from alias, e.g. for:
+
+ alias.1.b32.0 r3.x, c8.x
+ alias.1.b32.0 r2.x, c4.x
+ alias.1.b32.0 r1.x, c4.x
+ alias.1.b32.0 r0.x, c0.x
+
+ the SP_PS_ALIASED_COMPONENTS would be 0x00001111
+ </doc>
+
+ <bitfield name="RT0" low="0" high="3"/>
+ <bitfield name="RT1" low="4" high="7"/>
+ <bitfield name="RT2" low="8" high="11"/>
+ <bitfield name="RT3" low="12" high="15"/>
+ <bitfield name="RT4" low="16" high="19"/>
+ <bitfield name="RT5" low="20" high="23"/>
+ <bitfield name="RT6" low="24" high="27"/>
+ <bitfield name="RT7" low="28" high="31"/>
+ </reg32>
+
+ <reg32 offset="0xaaf2" name="SP_UNKNOWN_AAF2" type="uint" usage="cmd"/>
+
+ <!--
+ This enum is probably similar in purpose to SNORMMODE on a3xx,
+ minus the snorm stuff, i.e. it controls what happens with an
+ out-of-bounds isam/isamm. GL and Vulkan robustness require us to
+ return 0 on out-of-bound textureFetch().
+ -->
+ <enum name="a6xx_isam_mode">
+ <value value="0x1" name="ISAMMODE_CL"/>
+ <value value="0x2" name="ISAMMODE_GL"/>
+ </enum>
+
+ <reg32 offset="0xab00" name="SP_MODE_CONTROL" usage="rp_blit">
+ <!--
+ When set, half register loads from the constant file will
+ load a 32-bit value (so hc0.y loads the same value as c0.y)
+ and implicitly convert it to 16b (f2f16, or u2u16, based on
+ operand type). When unset, half register loads from the
+ constant file will load 16 bits from the packed constant
+ file (so hc0.y loads the top 16 bits of the value of c0.x)
+ -->
+ <bitfield name="CONSTANT_DEMOTION_ENABLE" pos="0" type="boolean"/>
+ <bitfield name="ISAMMODE" low="1" high="2" type="a6xx_isam_mode"/>
+ <bitfield name="SHARED_CONSTS_ENABLE" pos="3" type="boolean"/> <!-- see HLSQ_SHARED_CONSTS -->
+ </reg32>
+
+ <reg32 offset="0xab01" name="SP_UNKNOWN_AB01" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xab02" name="SP_UNKNOWN_AB02" variants="A7XX-" usage="cmd"/>
+
+ <reg32 offset="0xab04" name="SP_FS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
+ <reg32 offset="0xab05" name="SP_FS_INSTRLEN" low="0" high="27" type="uint" usage="rp_blit"/>
+
+ <array offset="0xab10" name="SP_BINDLESS_BASE" stride="2" length="5" variants="A6XX" usage="rp_blit">
+ <reg64 offset="0" name="DESCRIPTOR" variants="A6XX">
+ <bitfield name="DESC_SIZE" low="0" high="1" type="a6xx_bindless_descriptor_size"/>
+ <bitfield name="ADDR" low="2" high="63" shr="2" type="address"/>
+ </reg64>
+ </array>
+ <array offset="0xab0a" name="SP_BINDLESS_BASE" stride="2" length="8" variants="A7XX-" usage="rp_blit">
+ <reg64 offset="0" name="DESCRIPTOR" variants="A7XX-">
+ <bitfield name="DESC_SIZE" low="0" high="1" type="a6xx_bindless_descriptor_size"/>
+ <bitfield name="ADDR" low="2" high="63" shr="2" type="address"/>
+ </reg64>
+ </array>
+
+ <!--
+ Combined IBO state for 3d pipe, used for Image and SSBO write/atomic
+ instructions VS/HS/DS/GS/FS. See SP_CS_IBO_* for compute shaders.
+ -->
+ <reg64 offset="0xab1a" name="SP_IBO" type="address" align="16" usage="cmd"/>
+ <reg32 offset="0xab20" name="SP_IBO_COUNT" low="0" high="6" type="uint" usage="cmd"/>
+
+ <reg32 offset="0xab22" name="SP_UNKNOWN_AB22" variants="A7XX-" usage="cmd"/>
+
+ <bitset name="a6xx_sp_2d_dst_format" inline="yes">
+ <bitfield name="NORM" pos="0" type="boolean"/>
+ <bitfield name="SINT" pos="1" type="boolean"/>
+ <bitfield name="UINT" pos="2" type="boolean"/>
+ <!-- looks like HW only cares about the base type of this format,
+ which matches the ifmt? -->
+ <bitfield name="COLOR_FORMAT" low="3" high="10" type="a6xx_format"/>
+ <!-- set when ifmt is R2D_UNORM8_SRGB -->
+ <bitfield name="SRGB" pos="11" type="boolean"/>
+ <!-- some sort of channel mask, not sure what it is for -->
+ <bitfield name="MASK" low="12" high="15"/>
+ </bitset>
+
+ <reg32 offset="0xacc0" name="SP_2D_DST_FORMAT" type="a6xx_sp_2d_dst_format" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xa9bf" name="SP_2D_DST_FORMAT" type="a6xx_sp_2d_dst_format" variants="A7XX-" usage="rp_blit"/>
+
+ <reg32 offset="0xae00" name="SP_DBG_ECO_CNTL" usage="cmd"/>
+ <reg32 offset="0xae01" name="SP_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode"/>
+ <reg32 offset="0xae02" name="SP_NC_MODE_CNTL">
+ <!-- TODO: valid bits 0x3c3f, see kernel -->
+ </reg32>
+ <reg32 offset="0xae03" name="SP_CHICKEN_BITS" usage="cmd"/>
+ <reg32 offset="0xae04" name="SP_FLOAT_CNTL" usage="cmd">
+ <bitfield name="F16_NO_INF" pos="3" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0xae06" name="SP_UNKNOWN_AE06" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xae08" name="SP_UNKNOWN_AE08" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xae09" name="SP_UNKNOWN_AE09" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xae0a" name="SP_UNKNOWN_AE0A" variants="A7XX-" usage="cmd"/>
+
+ <reg32 offset="0xae0f" name="SP_PERFCTR_ENABLE" usage="cmd">
+ <!-- some perfcntrs are affected by a per-stage enable bit
+ (PERF_SP_ALU_WORKING_CYCLES for example)
+ TODO: verify position of HS/DS/GS bits -->
+ <bitfield name="VS" pos="0" type="boolean"/>
+ <bitfield name="HS" pos="1" type="boolean"/>
+ <bitfield name="DS" pos="2" type="boolean"/>
+ <bitfield name="GS" pos="3" type="boolean"/>
+ <bitfield name="FS" pos="4" type="boolean"/>
+ <bitfield name="CS" pos="5" type="boolean"/>
+ </reg32>
+ <array offset="0xae10" name="SP_PERFCTR_SP_SEL" stride="1" length="24"/>
+ <array offset="0xae60" name="SP_PERFCTR_HLSQ_SEL" stride="1" length="6" variants="A7XX-"/>
+ <reg32 offset="0xae6a" name="SP_UNKNOWN_AE6A" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xae6b" name="SP_UNKNOWN_AE6B" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xae6c" name="SP_UNKNOWN_AE6C" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xae6d" name="SP_READ_SEL" variants="A7XX-">
+ <bitfield name="LOCATION" low="18" high="19" type="a7xx_state_location"/>
+ <bitfield name="PIPE" low="16" high="17" type="a7xx_pipe"/>
+ <bitfield name="STATETYPE" low="8" high="15" type="a7xx_statetype_id"/>
+ <bitfield name="USPTP" low="4" high="7"/>
+ <bitfield name="SPTP" low="0" high="3"/>
+ </reg32>
+ <reg32 offset="0xae71" name="SP_DBG_CNTL" variants="A7XX-"/>
+ <reg32 offset="0xae73" name="SP_UNKNOWN_AE73" variants="A7XX-" usage="cmd"/>
+ <array offset="0xae80" name="SP_PERFCTR_SP_SEL" stride="1" length="36" variants="A7XX-"/>
+ <!-- TODO: there are 4 more percntr select registers (0xae28-0xae2b) -->
+ <!-- TODO: there are a few unknown registers in the 0xae30-0xae52 range -->
+ <reg32 offset="0xbe22" name="SP_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE"/>
+
+ <!--
+ The downstream kernel calls the debug cluster of registers
+ "a6xx_sp_ps_tp_cluster" but this actually specifies the border
+ color base for compute shaders.
+ -->
+ <reg64 offset="0xb180" name="SP_PS_TP_BORDER_COLOR_BASE_ADDR" type="address" align="128" usage="cmd"/>
+ <reg32 offset="0xb182" name="SP_UNKNOWN_B182" low="0" high="2" usage="cmd"/>
+ <reg32 offset="0xb183" name="SP_UNKNOWN_B183" low="0" high="23" usage="cmd"/>
+
+ <reg32 offset="0xb190" name="SP_UNKNOWN_B190"/>
+ <reg32 offset="0xb191" name="SP_UNKNOWN_B191"/>
+
+ <!-- could be all the stuff below here is actually TPL1?? -->
+
+ <reg32 offset="0xb300" name="SP_TP_RAS_MSAA_CNTL" usage="rp_blit">
+ <bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
+ <bitfield name="UNK2" low="2" high="3"/>
+ </reg32>
+ <reg32 offset="0xb301" name="SP_TP_DEST_MSAA_CNTL" usage="rp_blit">
+ <bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
+ <bitfield name="MSAA_DISABLE" pos="2" type="boolean"/>
+ </reg32>
+
+ <!-- looks to work in the same way as a5xx: -->
+ <reg64 offset="0xb302" name="SP_TP_BORDER_COLOR_BASE_ADDR" type="address" align="128" usage="cmd"/>
+ <reg32 offset="0xb304" name="SP_TP_SAMPLE_CONFIG" type="a6xx_sample_config" usage="rp_blit"/>
+ <reg32 offset="0xb305" name="SP_TP_SAMPLE_LOCATION_0" type="a6xx_sample_locations" usage="rp_blit"/>
+ <reg32 offset="0xb306" name="SP_TP_SAMPLE_LOCATION_1" type="a6xx_sample_locations" usage="rp_blit"/>
+ <reg32 offset="0xb307" name="SP_TP_WINDOW_OFFSET" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0xb309" name="SP_TP_MODE_CNTL" usage="cmd">
+ <bitfield name="ISAMMODE" low="0" high="1" type="a6xx_isam_mode"/>
+ <bitfield name="UNK3" low="2" high="7"/>
+ </reg32>
+ <reg32 offset="0xb310" name="SP_UNKNOWN_B310" variants="A7XX-" usage="cmd"/>
+
+ <!--
+ Equiv to corresponding RB_2D_SRC_* regs on a5xx.. which were either
+ badly named or the functionality moved in a6xx. But downstream kernel
+ calls this "a6xx_sp_ps_tp_2d_cluster"
+ -->
+ <reg32 offset="0xb4c0" name="SP_PS_2D_SRC_INFO" type="a6xx_2d_surf_info" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb4c1" name="SP_PS_2D_SRC_SIZE" variants="A6XX" usage="rp_blit">
+ <bitfield name="WIDTH" low="0" high="14" type="uint"/>
+ <bitfield name="HEIGHT" low="15" high="29" type="uint"/>
+ </reg32>
+ <reg64 offset="0xb4c2" name="SP_PS_2D_SRC" type="address" align="16" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb4c4" name="SP_PS_2D_SRC_PITCH" variants="A6XX" usage="rp_blit">
+ <bitfield name="UNK0" low="0" high="8"/>
+ <bitfield name="PITCH" low="9" high="23" shr="6" type="uint"/>
+ </reg32>
+
+ <reg32 offset="0xb2c0" name="SP_PS_2D_SRC_INFO" type="a6xx_2d_surf_info" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xb2c1" name="SP_PS_2D_SRC_SIZE" variants="A7XX">
+ <bitfield name="WIDTH" low="0" high="14" type="uint"/>
+ <bitfield name="HEIGHT" low="15" high="29" type="uint"/>
+ </reg32>
+ <reg64 offset="0xb2c2" name="SP_PS_2D_SRC" type="address" align="16" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xb2c4" name="SP_PS_2D_SRC_PITCH" variants="A7XX">
+ <bitfield name="UNK0" low="0" high="8"/>
+ <bitfield name="PITCH" low="9" high="23" shr="6" type="uint"/>
+ </reg32>
+
+ <!-- planes for NV12, etc. (TODO: not tested) -->
+ <reg64 offset="0xb4c5" name="SP_PS_2D_SRC_PLANE1" type="address" align="16" variants="A6XX"/>
+ <reg32 offset="0xb4c7" name="SP_PS_2D_SRC_PLANE_PITCH" low="0" high="11" shr="6" type="uint" variants="A6XX"/>
+ <reg64 offset="0xb4c8" name="SP_PS_2D_SRC_PLANE2" type="address" align="16" variants="A6XX"/>
+
+ <reg64 offset="0xb2c5" name="SP_PS_2D_SRC_PLANE1" type="address" align="16" variants="A7XX-"/>
+ <reg32 offset="0xb2c7" name="SP_PS_2D_SRC_PLANE_PITCH" low="0" high="11" shr="6" type="uint" variants="A7XX-"/>
+ <reg64 offset="0xb2c8" name="SP_PS_2D_SRC_PLANE2" type="address" align="16" variants="A7XX-"/>
+
+ <reg64 offset="0xb4ca" name="SP_PS_2D_SRC_FLAGS" type="address" align="16" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb4cc" name="SP_PS_2D_SRC_FLAGS_PITCH" low="0" high="7" shr="6" type="uint" variants="A6XX" usage="rp_blit"/>
+
+ <reg64 offset="0xb2ca" name="SP_PS_2D_SRC_FLAGS" type="address" align="16" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xb2cc" name="SP_PS_2D_SRC_FLAGS_PITCH" low="0" high="7" shr="6" type="uint" variants="A7XX-" usage="rp_blit"/>
+
+ <reg32 offset="0xb4cd" name="SP_PS_UNKNOWN_B4CD" low="6" high="31" variants="A6XX"/>
+ <reg32 offset="0xb4ce" name="SP_PS_UNKNOWN_B4CE" low="0" high="31" variants="A6XX"/>
+ <reg32 offset="0xb4cf" name="SP_PS_UNKNOWN_B4CF" low="0" high="30" variants="A6XX"/>
+ <reg32 offset="0xb4d0" name="SP_PS_UNKNOWN_B4D0" low="0" high="29" variants="A6XX"/>
+ <reg32 offset="0xb4d1" name="SP_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A6XX" usage="rp_blit"/>
+
+ <reg32 offset="0xb2cd" name="SP_PS_UNKNOWN_B4CD" low="6" high="31" variants="A7XX"/>
+ <reg32 offset="0xb2ce" name="SP_PS_UNKNOWN_B4CE" low="0" high="31" variants="A7XX"/>
+ <reg32 offset="0xb2cf" name="SP_PS_UNKNOWN_B4CF" low="0" high="30" variants="A7XX"/>
+ <reg32 offset="0xb2d0" name="SP_PS_UNKNOWN_B4D0" low="0" high="29" variants="A7XX"/>
+ <reg32 offset="0xb2d1" name="SP_PS_2D_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A7XX"/>
+ <reg32 offset="0xb2d2" name="SP_PS_UNKNOWN_B2D2" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xab21" name="SP_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A7XX-" usage="rp_blit"/>
+
+ <!-- always 0x100000 or 0x1000000? -->
+ <reg32 offset="0xb600" name="TPL1_DBG_ECO_CNTL" low="0" high="25" usage="cmd"/>
+ <reg32 offset="0xb601" name="TPL1_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
+ <reg32 offset="0xb602" name="TPL1_DBG_ECO_CNTL1" usage="cmd"/>
+ <reg32 offset="0xb604" name="TPL1_NC_MODE_CNTL">
+ <bitfield name="MODE" pos="0" type="boolean"/>
+ <bitfield name="LOWER_BIT" low="1" high="2" type="uint"/>
+ <bitfield name="MIN_ACCESS_LENGTH" pos="3" type="boolean"/> <!-- true=64b false=32b -->
+ <bitfield name="UPPER_BIT" pos="4" type="uint"/>
+ <bitfield name="UNK6" low="6" high="7"/>
+ </reg32>
+ <reg32 offset="0xb605" name="TPL1_UNKNOWN_B605" low="0" high="7" type="uint" variants="A6XX" usage="cmd"/> <!-- always 0x0 or 0x44 ? -->
+
+ <reg32 offset="0xb608" name="TPL1_BICUBIC_WEIGHTS_TABLE_0" low="0" high="29" variants="A6XX"/>
+ <reg32 offset="0xb609" name="TPL1_BICUBIC_WEIGHTS_TABLE_1" low="0" high="29" variants="A6XX"/>
+ <reg32 offset="0xb60a" name="TPL1_BICUBIC_WEIGHTS_TABLE_2" low="0" high="29" variants="A6XX"/>
+ <reg32 offset="0xb60b" name="TPL1_BICUBIC_WEIGHTS_TABLE_3" low="0" high="29" variants="A6XX"/>
+ <reg32 offset="0xb60c" name="TPL1_BICUBIC_WEIGHTS_TABLE_4" low="0" high="29" variants="A6XX"/>
+
+ <reg32 offset="0xb608" name="TPL1_BICUBIC_WEIGHTS_TABLE_0" low="0" high="29" variants="A7XX" usage="cmd"/>
+ <reg32 offset="0xb609" name="TPL1_BICUBIC_WEIGHTS_TABLE_1" low="0" high="29" variants="A7XX" usage="cmd"/>
+ <reg32 offset="0xb60a" name="TPL1_BICUBIC_WEIGHTS_TABLE_2" low="0" high="29" variants="A7XX" usage="cmd"/>
+ <reg32 offset="0xb60b" name="TPL1_BICUBIC_WEIGHTS_TABLE_3" low="0" high="29" variants="A7XX" usage="cmd"/>
+ <reg32 offset="0xb60c" name="TPL1_BICUBIC_WEIGHTS_TABLE_4" low="0" high="29" variants="A7XX" usage="cmd"/>
+
+ <array offset="0xb610" name="TPL1_PERFCTR_TP_SEL" stride="1" length="12"/>
+
+ <!-- TODO: 4 more perfcntr sel at 0xb620 ? -->
+
+ <bitset name="a6xx_hlsq_xs_cntl" inline="yes">
+ <bitfield name="CONSTLEN" low="0" high="7" shr="2" type="uint"/>
+ <bitfield name="ENABLED" pos="8" type="boolean"/>
+ <bitfield name="READ_IMM_SHARED_CONSTS" pos="9" type="boolean" variants="A7XX-"/>
+ </bitset>
+
+ <reg32 offset="0xb800" name="HLSQ_VS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb801" name="HLSQ_HS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb802" name="HLSQ_DS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb803" name="HLSQ_GS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A6XX" usage="rp_blit"/>
+
+ <reg32 offset="0xa827" name="HLSQ_VS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa83f" name="HLSQ_HS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa867" name="HLSQ_DS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa898" name="HLSQ_GS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A7XX-" usage="rp_blit"/>
+
+ <reg32 offset="0xa9aa" name="HLSQ_FS_UNKNOWN_A9AA" variants="A7XX-" usage="rp_blit">
+ <!-- Tentatively named, appears to disable consts being loaded via CP_LOAD_STATE6_FRAG -->
+ <bitfield name="CONSTS_LOAD_DISABLE" pos="0" type="boolean"/>
+ </reg32>
+
+ <!-- Always 0 -->
+ <reg32 offset="0xa9ac" name="HLSQ_UNKNOWN_A9AC" variants="A7XX-" usage="cmd"/>
+
+ <!-- Used in VK_KHR_fragment_shading_rate -->
+ <reg32 offset="0xa9ad" name="HLSQ_UNKNOWN_A9AD" variants="A7XX-" usage="cmd"/>
+
+ <reg32 offset="0xa9ae" name="HLSQ_UNKNOWN_A9AE" variants="A7XX-" usage="rp_blit">
+ <bitfield name="SYSVAL_REGS_COUNT" low="0" high="7" type="uint"/>
+ <!-- UNK8 is set on a730/a740 -->
+ <bitfield name="UNK8" pos="8" type="boolean"/>
+ <!-- UNK9 is set on a750 -->
+ <bitfield name="UNK9" pos="9" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0xb820" name="HLSQ_LOAD_STATE_GEOM_CMD"/>
+ <reg64 offset="0xb821" name="HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR" align="16" type="address"/>
+ <reg32 offset="0xb823" name="HLSQ_LOAD_STATE_GEOM_DATA"/>
+
+
+ <bitset name="a6xx_hlsq_fs_cntl_0" inline="yes">
+ <!-- must match SP_FS_CTRL -->
+ <bitfield name="THREADSIZE" pos="0" type="a6xx_threadsize"/>
+ <bitfield name="VARYINGS" pos="1" type="boolean"/>
+ <bitfield name="UNK2" low="2" high="11"/>
+ </bitset>
+ <bitset name="a6xx_hlsq_control_3_reg" inline="yes">
+ <!-- register loaded with position (bary.f) -->
+ <bitfield name="IJ_PERSP_PIXEL" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="IJ_LINEAR_PIXEL" low="8" high="15" type="a3xx_regid"/>
+ <bitfield name="IJ_PERSP_CENTROID" low="16" high="23" type="a3xx_regid"/>
+ <bitfield name="IJ_LINEAR_CENTROID" low="24" high="31" type="a3xx_regid"/>
+ </bitset>
+ <bitset name="a6xx_hlsq_control_4_reg" inline="yes">
+ <bitfield name="IJ_PERSP_SAMPLE" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="IJ_LINEAR_SAMPLE" low="8" high="15" type="a3xx_regid"/>
+ <bitfield name="XYCOORDREGID" low="16" high="23" type="a3xx_regid"/>
+ <bitfield name="ZWCOORDREGID" low="24" high="31" type="a3xx_regid"/>
+ </bitset>
+ <bitset name="a6xx_hlsq_control_5_reg" inline="yes">
+ <bitfield name="LINELENGTHREGID" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="FOVEATIONQUALITYREGID" low="8" high="15" type="a3xx_regid"/>
+ </bitset>
+
+ <reg32 offset="0xb980" type="a6xx_hlsq_fs_cntl_0" name="HLSQ_FS_CNTL_0" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb981" name="HLSQ_UNKNOWN_B981" pos="0" type="boolean" variants="A6XX"/> <!-- never used by blob -->
+ <reg32 offset="0xb982" name="HLSQ_CONTROL_1_REG" low="0" high="2" variants="A6XX" usage="rp_blit">
+ <!-- Sets the maximum number of primitives allowed in one FS wave minus one, similarly to the
+ A3xx field, except that it's not necessary to set it to anything but the maximum, since
+ the hardware will simply emit smaller waves when it runs out of space. -->
+ <bitfield name="PRIMALLOCTHRESHOLD" low="0" high="2" type="uint"/>
+ </reg32>
+ <reg32 offset="0xb983" name="HLSQ_CONTROL_2_REG" variants="A6XX" usage="rp_blit">
+ <bitfield name="FACEREGID" low="0" high="7" type="a3xx_regid"/>
+ <!-- SAMPLEID is loaded into a half-precision register: -->
+ <bitfield name="SAMPLEID" low="8" high="15" type="a3xx_regid"/>
+ <bitfield name="SAMPLEMASK" low="16" high="23" type="a3xx_regid"/>
+ <bitfield name="CENTERRHW" low="24" high="31" type="a3xx_regid"/>
+ </reg32>
+ <reg32 offset="0xb984" type="a6xx_hlsq_control_3_reg" name="HLSQ_CONTROL_3_REG" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb985" type="a6xx_hlsq_control_4_reg" name="HLSQ_CONTROL_4_REG" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb986" type="a6xx_hlsq_control_5_reg" name="HLSQ_CONTROL_5_REG" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb987" name="HLSQ_CS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A6XX" usage="cmd"/>
+ <reg32 offset="0xa9c6" type="a6xx_hlsq_fs_cntl_0" name="HLSQ_FS_CNTL_0" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9c7" name="HLSQ_CONTROL_1_REG" low="0" high="2" variants="A7XX-" usage="rp_blit">
+ <bitfield name="PRIMALLOCTHRESHOLD" low="0" high="2" type="uint"/>
+ </reg32>
+ <reg32 offset="0xa9c8" name="HLSQ_CONTROL_2_REG" variants="A7XX-" usage="rp_blit">
+ <bitfield name="FACEREGID" low="0" high="7" type="a3xx_regid"/>
+ <!-- SAMPLEID is loaded into a half-precision register: -->
+ <bitfield name="SAMPLEID" low="8" high="15" type="a3xx_regid"/>
+ <bitfield name="SAMPLEMASK" low="16" high="23" type="a3xx_regid"/>
+ <bitfield name="CENTERRHW" low="24" high="31" type="a3xx_regid"/>
+ </reg32>
+ <reg32 offset="0xa9c9" type="a6xx_hlsq_control_3_reg" name="HLSQ_CONTROL_3_REG" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9ca" type="a6xx_hlsq_control_4_reg" name="HLSQ_CONTROL_4_REG" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9cb" type="a6xx_hlsq_control_5_reg" name="HLSQ_CONTROL_5_REG" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9cd" name="HLSQ_CS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A7XX-" usage="cmd"/>
+
+ <!-- TODO: what does KERNELDIM do exactly (blob sets it differently from turnip) -->
+ <reg32 offset="0xb990" name="HLSQ_CS_NDRANGE_0" variants="A6XX" usage="rp_blit">
+ <bitfield name="KERNELDIM" low="0" high="1" type="uint"/>
+ <!-- localsize is value minus one: -->
+ <bitfield name="LOCALSIZEX" low="2" high="11" type="uint"/>
+ <bitfield name="LOCALSIZEY" low="12" high="21" type="uint"/>
+ <bitfield name="LOCALSIZEZ" low="22" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0xb991" name="HLSQ_CS_NDRANGE_1" variants="A6XX" usage="rp_blit">
+ <bitfield name="GLOBALSIZE_X" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0xb992" name="HLSQ_CS_NDRANGE_2" variants="A6XX" usage="rp_blit">
+ <bitfield name="GLOBALOFF_X" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0xb993" name="HLSQ_CS_NDRANGE_3" variants="A6XX" usage="rp_blit">
+ <bitfield name="GLOBALSIZE_Y" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0xb994" name="HLSQ_CS_NDRANGE_4" variants="A6XX" usage="rp_blit">
+ <bitfield name="GLOBALOFF_Y" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0xb995" name="HLSQ_CS_NDRANGE_5" variants="A6XX" usage="rp_blit">
+ <bitfield name="GLOBALSIZE_Z" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0xb996" name="HLSQ_CS_NDRANGE_6" variants="A6XX" usage="rp_blit">
+ <bitfield name="GLOBALOFF_Z" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0xb997" name="HLSQ_CS_CNTL_0" variants="A6XX" usage="rp_blit">
+ <!-- these are all vec3. first 3 need to be high regs
+ WGSIZECONSTID is the local size (from HLSQ_CS_NDRANGE_0)
+ WGOFFSETCONSTID is WGIDCONSTID*WGSIZECONSTID
+ -->
+ <bitfield name="WGIDCONSTID" low="0" high="7" type="a3xx_regid"/>
+ <bitfield name="WGSIZECONSTID" low="8" high="15" type="a3xx_regid"/>
+ <bitfield name="WGOFFSETCONSTID" low="16" high="23" type="a3xx_regid"/>
+ <bitfield name="LOCALIDREGID" low="24" high="31" type="a3xx_regid"/>
+ </reg32>
+ <reg32 offset="0xb998" name="HLSQ_CS_CNTL_1" variants="A6XX" usage="rp_blit">
+ <!-- gl_LocalInvocationIndex -->
+ <bitfield name="LINEARLOCALIDREGID" low="0" high="7" type="a3xx_regid"/>
+ <!-- a650 has 6 "SP cores" (but 3 "SP"). this makes it use only
+ one of those 6 "SP cores" -->
+ <bitfield name="SINGLE_SP_CORE" pos="8" type="boolean"/>
+ <!-- Must match SP_CS_CTRL -->
+ <bitfield name="THREADSIZE" pos="9" type="a6xx_threadsize"/>
+ <!-- 1 thread per wave (ignored if bit9 set) -->
+ <bitfield name="THREADSIZE_SCALAR" pos="10" type="boolean"/>
+ </reg32>
+ <!--note: vulkan blob doesn't use these -->
+ <reg32 offset="0xb999" name="HLSQ_CS_KERNEL_GROUP_X" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb99a" name="HLSQ_CS_KERNEL_GROUP_Y" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb99b" name="HLSQ_CS_KERNEL_GROUP_Z" variants="A6XX" usage="rp_blit"/>
+
+ <!-- TODO: what does KERNELDIM do exactly (blob sets it differently from turnip) -->
+ <reg32 offset="0xa9d4" name="HLSQ_CS_NDRANGE_0" variants="A7XX-" usage="rp_blit">
+ <bitfield name="KERNELDIM" low="0" high="1" type="uint"/>
+ <!-- localsize is value minus one: -->
+ <bitfield name="LOCALSIZEX" low="2" high="11" type="uint"/>
+ <bitfield name="LOCALSIZEY" low="12" high="21" type="uint"/>
+ <bitfield name="LOCALSIZEZ" low="22" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0xa9d5" name="HLSQ_CS_NDRANGE_1" variants="A7XX-" usage="rp_blit">
+ <bitfield name="GLOBALSIZE_X" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0xa9d6" name="HLSQ_CS_NDRANGE_2" variants="A7XX-" usage="rp_blit">
+ <bitfield name="GLOBALOFF_X" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0xa9d7" name="HLSQ_CS_NDRANGE_3" variants="A7XX-" usage="rp_blit">
+ <bitfield name="GLOBALSIZE_Y" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0xa9d8" name="HLSQ_CS_NDRANGE_4" variants="A7XX-" usage="rp_blit">
+ <bitfield name="GLOBALOFF_Y" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0xa9d9" name="HLSQ_CS_NDRANGE_5" variants="A7XX-" usage="rp_blit">
+ <bitfield name="GLOBALSIZE_Z" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0xa9da" name="HLSQ_CS_NDRANGE_6" variants="A7XX-" usage="rp_blit">
+ <bitfield name="GLOBALOFF_Z" low="0" high="31" type="uint"/>
+ </reg32>
+ <!--note: vulkan blob doesn't use these -->
+ <reg32 offset="0xa9dc" name="HLSQ_CS_KERNEL_GROUP_X" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9dd" name="HLSQ_CS_KERNEL_GROUP_Y" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9de" name="HLSQ_CS_KERNEL_GROUP_Z" variants="A7XX-" usage="rp_blit"/>
+
+ <enum name="a7xx_cs_yalign">
+ <value name="CS_YALIGN_1" value="8"/>
+ <value name="CS_YALIGN_2" value="4"/>
+ <value name="CS_YALIGN_4" value="2"/>
+ <value name="CS_YALIGN_8" value="1"/>
+ </enum>
+
+ <reg32 offset="0xa9db" name="HLSQ_CS_CNTL_1" variants="A7XX-" usage="rp_blit">
+ <!-- gl_LocalInvocationIndex -->
+ <bitfield name="LINEARLOCALIDREGID" low="0" high="7" type="a3xx_regid"/>
+ <!-- Must match SP_CS_CTRL -->
+ <bitfield name="THREADSIZE" pos="9" type="a6xx_threadsize"/>
+ <bitfield name="UNK11" pos="11" type="boolean"/>
+ <bitfield name="UNK22" pos="22" type="boolean"/>
+ <bitfield name="UNK26" pos="26" type="boolean"/>
+ <bitfield name="YALIGN" low="27" high="30" type="a7xx_cs_yalign"/>
+ </reg32>
+
+ <reg32 offset="0xa9df" name="HLSQ_CS_LOCAL_SIZE" variants="A7XX-" usage="cmd">
+ <!-- localsize is value minus one: -->
+ <bitfield name="LOCALSIZEX" low="2" high="11" type="uint"/>
+ <bitfield name="LOCALSIZEY" low="12" high="21" type="uint"/>
+ <bitfield name="LOCALSIZEZ" low="22" high="31" type="uint"/>
+ </reg32>
+
+ <reg32 offset="0xb9a0" name="HLSQ_LOAD_STATE_FRAG_CMD"/>
+ <reg64 offset="0xb9a1" name="HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR" align="16" type="address"/>
+ <reg32 offset="0xb9a3" name="HLSQ_LOAD_STATE_FRAG_DATA"/>
+
+ <!-- mirror of SP_CS_BINDLESS_BASE -->
+ <array offset="0xb9c0" name="HLSQ_CS_BINDLESS_BASE" stride="2" length="5" variants="A6XX" usage="rp_blit">
+ <reg64 offset="0" name="DESCRIPTOR">
+ <bitfield name="DESC_SIZE" low="0" high="1" type="a6xx_bindless_descriptor_size"/>
+ <bitfield name="ADDR" low="2" high="63" shr="2" type="address"/>
+ </reg64>
+ </array>
+
+ <!-- new in a6xx gen4, mirror of SP_CS_UNKNOWN_A9B1? -->
+ <reg32 offset="0xb9d0" name="HLSQ_CS_UNKNOWN_B9D0" variants="A6XX" usage="cmd">
+ <bitfield name="SHARED_SIZE" low="0" high="4" type="uint"/>
+ <bitfield name="UNK5" pos="5" type="boolean"/>
+ <!-- always 1 ? -->
+ <bitfield name="UNK6" pos="6" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0xbb00" name="HLSQ_DRAW_CMD">
+ <bitfield name="STATE_ID" low="0" high="7"/>
+ </reg32>
+
+ <reg32 offset="0xbb01" name="HLSQ_DISPATCH_CMD">
+ <bitfield name="STATE_ID" low="0" high="7"/>
+ </reg32>
+
+ <reg32 offset="0xbb02" name="HLSQ_EVENT_CMD">
+ <!-- I think only the low bit is actually used? -->
+ <bitfield name="STATE_ID" low="16" high="23"/>
+ <bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/>
+ </reg32>
+
+ <reg32 offset="0xbb08" name="HLSQ_INVALIDATE_CMD" variants="A6XX" usage="cmd">
+ <doc>
+ This register clears pending loads queued up by
+ CP_LOAD_STATE6. Each bit resets a particular kind(s) of
+ CP_LOAD_STATE6.
+ </doc>
+
+ <!-- per-stage state: shader, non-bindless UBO, textures, and samplers -->
+ <bitfield name="VS_STATE" pos="0" type="boolean"/>
+ <bitfield name="HS_STATE" pos="1" type="boolean"/>
+ <bitfield name="DS_STATE" pos="2" type="boolean"/>
+ <bitfield name="GS_STATE" pos="3" type="boolean"/>
+ <bitfield name="FS_STATE" pos="4" type="boolean"/>
+ <bitfield name="CS_STATE" pos="5" type="boolean"/>
+
+ <bitfield name="CS_IBO" pos="6" type="boolean"/>
+ <bitfield name="GFX_IBO" pos="7" type="boolean"/>
+
+ <!-- Note: these only do something when HLSQ_SHARED_CONSTS is set to 1 -->
+ <bitfield name="CS_SHARED_CONST" pos="19" type="boolean"/>
+ <bitfield name="GFX_SHARED_CONST" pos="8" type="boolean"/>
+
+ <!-- SS6_BINDLESS: one bit per bindless base -->
+ <bitfield name="CS_BINDLESS" low="9" high="13" type="hex"/>
+ <bitfield name="GFX_BINDLESS" low="14" high="18" type="hex"/>
+ </reg32>
+
+ <reg32 offset="0xab1f" name="HLSQ_INVALIDATE_CMD" variants="A7XX-" usage="cmd">
+ <doc>
+ This register clears pending loads queued up by
+ CP_LOAD_STATE6. Each bit resets a particular kind(s) of
+ CP_LOAD_STATE6.
+ </doc>
+
+ <!-- per-stage state: shader, non-bindless UBO, textures, and samplers -->
+ <bitfield name="VS_STATE" pos="0" type="boolean"/>
+ <bitfield name="HS_STATE" pos="1" type="boolean"/>
+ <bitfield name="DS_STATE" pos="2" type="boolean"/>
+ <bitfield name="GS_STATE" pos="3" type="boolean"/>
+ <bitfield name="FS_STATE" pos="4" type="boolean"/>
+ <bitfield name="CS_STATE" pos="5" type="boolean"/>
+
+ <bitfield name="CS_IBO" pos="6" type="boolean"/>
+ <bitfield name="GFX_IBO" pos="7" type="boolean"/>
+
+ <!-- SS6_BINDLESS: one bit per bindless base -->
+ <bitfield name="CS_BINDLESS" low="9" high="16" type="hex"/>
+ <bitfield name="GFX_BINDLESS" low="17" high="24" type="hex"/>
+ </reg32>
+
+ <reg32 offset="0xbb10" name="HLSQ_FS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xab03" name="HLSQ_FS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A7XX-" usage="rp_blit"/>
+
+ <array offset="0xab40" name="HLSQ_SHARED_CONSTS_IMM" stride="1" length="64" variants="A7XX-"/>
+
+ <reg32 offset="0xbb11" name="HLSQ_SHARED_CONSTS" variants="A6XX" usage="cmd">
+ <doc>
+ Shared constants are intended to be used for Vulkan push
+ constants. When enabled, 8 vec4's are reserved in the FS
+ const pool and 16 in the geometry const pool although
+ only 8 are actually used (why?) and they are mapped to
+ c504-c511 in each stage. Both VS and FS shared consts
+ are written using ST6_CONSTANTS/SB6_IBO, so that both
+ the geometry and FS shared consts can be written at once
+ by using CP_LOAD_STATE6 rather than
+ CP_LOAD_STATE6_FRAG/CP_LOAD_STATE6_GEOM. In addition
+ DST_OFF and NUM_UNIT are in units of dwords instead of
+ vec4's.
+
+ There is also a separate shared constant pool for CS,
+ which is loaded through CP_LOAD_STATE6_FRAG with
+ ST6_UBO/ST6_IBO. However the only real difference for CS
+ is the dword units.
+ </doc>
+ <bitfield name="ENABLE" pos="0" type="boolean"/>
+ </reg32>
+
+ <!-- mirror of SP_BINDLESS_BASE -->
+ <array offset="0xbb20" name="HLSQ_BINDLESS_BASE" stride="2" length="5" variants="A6XX" usage="cmd">
+ <reg64 offset="0" name="DESCRIPTOR">
+ <bitfield name="DESC_SIZE" low="0" high="1" type="a6xx_bindless_descriptor_size"/>
+ <bitfield name="ADDR" low="2" high="63" shr="2" type="address"/>
+ </reg64>
+ </array>
+
+ <reg32 offset="0xbd80" name="HLSQ_2D_EVENT_CMD">
+ <bitfield name="STATE_ID" low="8" high="15"/>
+ <bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/>
+ </reg32>
+
+ <reg32 offset="0xbe00" name="HLSQ_UNKNOWN_BE00" variants="A6XX" usage="cmd"/> <!-- all bits valid except bit 29 -->
+ <reg32 offset="0xbe01" name="HLSQ_UNKNOWN_BE01" low="4" high="6" variants="A6XX" usage="cmd"/>
+ <reg32 offset="0xbe04" name="HLSQ_DBG_ECO_CNTL" variants="A6XX" usage="cmd"/>
+ <reg32 offset="0xbe05" name="HLSQ_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
+ <reg32 offset="0xbe08" name="HLSQ_UNKNOWN_BE08" low="0" high="15"/>
+ <array offset="0xbe10" name="HLSQ_PERFCTR_HLSQ_SEL" stride="1" length="6"/>
+
+ <!-- TODO: some valid registers between 0xbe20 and 0xbe33 -->
+ <reg32 offset="0xbe22" name="HLSQ_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE"/>
+
+ <reg32 offset="0xc000" name="SP_AHB_READ_APERTURE" variants="A7XX-"/>
+
+ <!-- Don't know if these are SP, always 0 -->
+ <reg64 offset="0x0ce2" name="SP_UNKNOWN_0CE2" variants="A7XX-" usage="cmd"/>
+ <reg64 offset="0x0ce4" name="SP_UNKNOWN_0CE4" variants="A7XX-" usage="cmd"/>
+ <reg64 offset="0x0ce6" name="SP_UNKNOWN_0CE6" variants="A7XX-" usage="cmd"/>
+
+ <!--
+ These special registers signal the beginning/end of an event
+ sequence. The sequence used internally for an event looks like:
+ - write EVENT_CMD pipe register
+ - write CP_EVENT_START
+ - write HLSQ_EVENT_CMD with event or HLSQ_DRAW_CMD
+ - write PC_EVENT_CMD with event or PC_DRAW_CMD
+ - write HLSQ_EVENT_CMD(CONTEXT_DONE)
+ - write PC_EVENT_CMD(CONTEXT_DONE)
+ - write CP_EVENT_END
+ Writing to CP_EVENT_END seems to actually trigger the context roll
+ -->
+ <reg32 offset="0xd600" name="CP_EVENT_START">
+ <bitfield name="STATE_ID" low="0" high="7"/>
+ </reg32>
+ <reg32 offset="0xd601" name="CP_EVENT_END">
+ <bitfield name="STATE_ID" low="0" high="7"/>
+ </reg32>
+ <reg32 offset="0xd700" name="CP_2D_EVENT_START">
+ <bitfield name="STATE_ID" low="0" high="7"/>
+ </reg32>
+ <reg32 offset="0xd701" name="CP_2D_EVENT_END">
+ <bitfield name="STATE_ID" low="0" high="7"/>
+ </reg32>
+</domain>
+
+<!-- Seems basically the same as a5xx, maybe move to common.xml.. -->
+<domain name="A6XX_TEX_SAMP" width="32">
+ <doc>Texture sampler dwords</doc>
+ <enum name="a6xx_tex_filter"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_NEAREST" value="0"/>
+ <value name="A6XX_TEX_LINEAR" value="1"/>
+ <value name="A6XX_TEX_ANISO" value="2"/>
+ <value name="A6XX_TEX_CUBIC" value="3"/> <!-- a650 only -->
+ </enum>
+ <enum name="a6xx_tex_clamp"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_REPEAT" value="0"/>
+ <value name="A6XX_TEX_CLAMP_TO_EDGE" value="1"/>
+ <value name="A6XX_TEX_MIRROR_REPEAT" value="2"/>
+ <value name="A6XX_TEX_CLAMP_TO_BORDER" value="3"/>
+ <value name="A6XX_TEX_MIRROR_CLAMP" value="4"/>
+ </enum>
+ <enum name="a6xx_tex_aniso"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_ANISO_1" value="0"/>
+ <value name="A6XX_TEX_ANISO_2" value="1"/>
+ <value name="A6XX_TEX_ANISO_4" value="2"/>
+ <value name="A6XX_TEX_ANISO_8" value="3"/>
+ <value name="A6XX_TEX_ANISO_16" value="4"/>
+ </enum>
+ <enum name="a6xx_reduction_mode">
+ <value name="A6XX_REDUCTION_MODE_AVERAGE" value="0"/>
+ <value name="A6XX_REDUCTION_MODE_MIN" value="1"/>
+ <value name="A6XX_REDUCTION_MODE_MAX" value="2"/>
+ </enum>
+
+ <reg32 offset="0" name="0">
+ <bitfield name="MIPFILTER_LINEAR_NEAR" pos="0" type="boolean"/>
+ <bitfield name="XY_MAG" low="1" high="2" type="a6xx_tex_filter"/>
+ <bitfield name="XY_MIN" low="3" high="4" type="a6xx_tex_filter"/>
+ <bitfield name="WRAP_S" low="5" high="7" type="a6xx_tex_clamp"/>
+ <bitfield name="WRAP_T" low="8" high="10" type="a6xx_tex_clamp"/>
+ <bitfield name="WRAP_R" low="11" high="13" type="a6xx_tex_clamp"/>
+ <bitfield name="ANISO" low="14" high="16" type="a6xx_tex_aniso"/>
+ <bitfield name="LOD_BIAS" low="19" high="31" type="fixed" radix="8"/><!-- no idea how many bits for real -->
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="CLAMPENABLE" pos="0" type="boolean">
+ <doc>
+ clamp result to [0, 1] if the format is unorm or
+ [-1, 1] if the format is snorm, *after*
+ filtering. Has no effect for other formats.
+ </doc>
+ </bitfield>
+ <bitfield name="COMPARE_FUNC" low="1" high="3" type="adreno_compare_func"/>
+ <bitfield name="CUBEMAPSEAMLESSFILTOFF" pos="4" type="boolean"/>
+ <bitfield name="UNNORM_COORDS" pos="5" type="boolean"/>
+ <bitfield name="MIPFILTER_LINEAR_FAR" pos="6" type="boolean"/>
+ <bitfield name="MAX_LOD" low="8" high="19" type="ufixed" radix="8"/>
+ <bitfield name="MIN_LOD" low="20" high="31" type="ufixed" radix="8"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="REDUCTION_MODE" low="0" high="1" type="a6xx_reduction_mode"/>
+ <bitfield name="CHROMA_LINEAR" pos="5" type="boolean"/>
+ <bitfield name="BCOLOR" low="7" high="31"/>
+ </reg32>
+ <reg32 offset="3" name="3"/>
+</domain>
+
+<domain name="A6XX_TEX_CONST" width="32">
+ <doc>Texture constant dwords</doc>
+ <enum name="a6xx_tex_swiz"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_X" value="0"/>
+ <value name="A6XX_TEX_Y" value="1"/>
+ <value name="A6XX_TEX_Z" value="2"/>
+ <value name="A6XX_TEX_W" value="3"/>
+ <value name="A6XX_TEX_ZERO" value="4"/>
+ <value name="A6XX_TEX_ONE" value="5"/>
+ </enum>
+ <enum name="a6xx_tex_type"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_1D" value="0"/>
+ <value name="A6XX_TEX_2D" value="1"/>
+ <value name="A6XX_TEX_CUBE" value="2"/>
+ <value name="A6XX_TEX_3D" value="3"/>
+ <value name="A6XX_TEX_BUFFER" value="4"/>
+ </enum>
+ <reg32 offset="0" name="0">
+ <bitfield name="TILE_MODE" low="0" high="1" type="a6xx_tile_mode"/>
+ <bitfield name="SRGB" pos="2" type="boolean"/>
+ <bitfield name="SWIZ_X" low="4" high="6" type="a6xx_tex_swiz"/>
+ <bitfield name="SWIZ_Y" low="7" high="9" type="a6xx_tex_swiz"/>
+ <bitfield name="SWIZ_Z" low="10" high="12" type="a6xx_tex_swiz"/>
+ <bitfield name="SWIZ_W" low="13" high="15" type="a6xx_tex_swiz"/>
+ <bitfield name="MIPLVLS" low="16" high="19" type="uint"/>
+ <!-- overlaps with MIPLVLS -->
+ <bitfield name="CHROMA_MIDPOINT_X" pos="16" type="boolean"/>
+ <bitfield name="CHROMA_MIDPOINT_Y" pos="18" type="boolean"/>
+ <bitfield name="SAMPLES" low="20" high="21" type="a3xx_msaa_samples"/>
+ <bitfield name="FMT" low="22" high="29" type="a6xx_format"/>
+ <!--
+ Why is the swap needed in addition to SWIZ_*? The swap
+ is performed before border color replacement, while the
+ swizzle is applied after after it.
+ -->
+ <bitfield name="SWAP" low="30" high="31" type="a3xx_color_swap"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="WIDTH" low="0" high="14" type="uint"/>
+ <bitfield name="HEIGHT" low="15" high="29" type="uint"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <!--
+ These fields overlap PITCH, and are used instead of
+ PITCH/PITCHALIGN when TYPE is A6XX_TEX_BUFFER.
+ -->
+ <doc> probably for D3D structured UAVs, normally set to 1 </doc>
+ <bitfield name="STRUCTSIZETEXELS" low="4" high="15" type="uint"/>
+ <bitfield name="STARTOFFSETTEXELS" low="16" high="21" type="uint"/>
+
+ <!-- minimum pitch (for mipmap levels): log2(pitchalign / 64) -->
+ <bitfield name="PITCHALIGN" low="0" high="3" type="uint"/>
+ <doc>Pitch in bytes (so actually stride)</doc>
+ <bitfield name="PITCH" low="7" high="28" type="uint"/>
+ <bitfield name="TYPE" low="29" high="31" type="a6xx_tex_type"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <!--
+ ARRAY_PITCH is basically LAYERSZ for the first mipmap level, and
+ for 3d textures (laid out mipmap level first) MIN_LAYERSZ is the
+ layer size at the point that it stops being reduced moving to
+ higher (smaller) mipmap levels
+ -->
+ <bitfield name="ARRAY_PITCH" low="0" high="22" shr="12" type="uint"/>
+ <bitfield name="MIN_LAYERSZ" low="23" high="26" shr="12"/>
+ <!--
+ by default levels with w < 16 are linear
+ TILE_ALL makes all levels have tiling
+ seems required when using UBWC, since all levels have UBWC (can possibly be disabled?)
+ -->
+ <bitfield name="TILE_ALL" pos="27" type="boolean"/>
+ <bitfield name="FLAG" pos="28" type="boolean"/>
+ </reg32>
+ <!-- for 2-3 plane format, BASE is flag buffer address (if enabled)
+ the address of the non-flag base buffer is determined automatically,
+ and must follow the flag buffer
+ -->
+ <reg32 offset="4" name="4">
+ <bitfield name="BASE_LO" low="5" high="31" shr="5"/>
+ </reg32>
+ <reg32 offset="5" name="5">
+ <bitfield name="BASE_HI" low="0" high="16"/>
+ <bitfield name="DEPTH" low="17" high="29" type="uint"/>
+ </reg32>
+ <reg32 offset="6" name="6">
+ <!-- overlaps with PLANE_PITCH -->
+ <bitfield name="MIN_LOD_CLAMP" low="0" high="11" type="ufixed" radix="8"/>
+ <!-- pitch for plane 2 / plane 3 -->
+ <bitfield name="PLANE_PITCH" low="8" high="31" type="uint"/>
+ </reg32>
+ <!-- 7/8 is plane 2 address for planar formats -->
+ <reg32 offset="7" name="7">
+ <bitfield name="FLAG_LO" low="5" high="31" shr="5"/>
+ </reg32>
+ <reg32 offset="8" name="8">
+ <bitfield name="FLAG_HI" low="0" high="16"/>
+ </reg32>
+ <!-- 9/10 is plane 3 address for planar formats -->
+ <reg32 offset="9" name="9">
+ <bitfield name="FLAG_BUFFER_ARRAY_PITCH" low="0" high="16" shr="4" type="uint"/>
+ </reg32>
+ <reg32 offset="10" name="10">
+ <bitfield name="FLAG_BUFFER_PITCH" low="0" high="6" shr="6" type="uint"/>
+ <!-- log2 size of the first level, required for mipmapping -->
+ <bitfield name="FLAG_BUFFER_LOGW" low="8" high="11" type="uint"/>
+ <bitfield name="FLAG_BUFFER_LOGH" low="12" high="15" type="uint"/>
+ </reg32>
+ <reg32 offset="11" name="11"/>
+ <reg32 offset="12" name="12"/>
+ <reg32 offset="13" name="13"/>
+ <reg32 offset="14" name="14"/>
+ <reg32 offset="15" name="15"/>
+</domain>
+
+<domain name="A6XX_UBO" width="32">
+ <reg32 offset="0" name="0">
+ <bitfield name="BASE_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="BASE_HI" low="0" high="16"/>
+ <bitfield name="SIZE" low="17" high="31"/> <!-- size in vec4 (4xDWORD) units -->
+ </reg32>
+</domain>
+
+<domain name="A6XX_PDC" width="32">
+ <reg32 offset="0x1140" name="GPU_ENABLE_PDC"/>
+ <reg32 offset="0x1148" name="GPU_SEQ_START_ADDR"/>
+ <reg32 offset="0x1540" name="GPU_TCS0_CONTROL"/>
+ <reg32 offset="0x1541" name="GPU_TCS0_CMD_ENABLE_BANK"/>
+ <reg32 offset="0x1542" name="GPU_TCS0_CMD_WAIT_FOR_CMPL_BANK"/>
+ <reg32 offset="0x1543" name="GPU_TCS0_CMD0_MSGID"/>
+ <reg32 offset="0x1544" name="GPU_TCS0_CMD0_ADDR"/>
+ <reg32 offset="0x1545" name="GPU_TCS0_CMD0_DATA"/>
+ <reg32 offset="0x1572" name="GPU_TCS1_CONTROL"/>
+ <reg32 offset="0x1573" name="GPU_TCS1_CMD_ENABLE_BANK"/>
+ <reg32 offset="0x1574" name="GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK"/>
+ <reg32 offset="0x1575" name="GPU_TCS1_CMD0_MSGID"/>
+ <reg32 offset="0x1576" name="GPU_TCS1_CMD0_ADDR"/>
+ <reg32 offset="0x1577" name="GPU_TCS1_CMD0_DATA"/>
+ <reg32 offset="0x15A4" name="GPU_TCS2_CONTROL"/>
+ <reg32 offset="0x15A5" name="GPU_TCS2_CMD_ENABLE_BANK"/>
+ <reg32 offset="0x15A6" name="GPU_TCS2_CMD_WAIT_FOR_CMPL_BANK"/>
+ <reg32 offset="0x15A7" name="GPU_TCS2_CMD0_MSGID"/>
+ <reg32 offset="0x15A8" name="GPU_TCS2_CMD0_ADDR"/>
+ <reg32 offset="0x15A9" name="GPU_TCS2_CMD0_DATA"/>
+ <reg32 offset="0x15D6" name="GPU_TCS3_CONTROL"/>
+ <reg32 offset="0x15D7" name="GPU_TCS3_CMD_ENABLE_BANK"/>
+ <reg32 offset="0x15D8" name="GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK"/>
+ <reg32 offset="0x15D9" name="GPU_TCS3_CMD0_MSGID"/>
+ <reg32 offset="0x15DA" name="GPU_TCS3_CMD0_ADDR"/>
+ <reg32 offset="0x15DB" name="GPU_TCS3_CMD0_DATA"/>
+</domain>
+
+<domain name="A6XX_PDC_GPU_SEQ" width="32">
+ <reg32 offset="0x0" name="MEM_0"/>
+</domain>
+
+<domain name="A6XX_CX_DBGC" width="32">
+ <reg32 offset="0x0000" name="CFG_DBGBUS_SEL_A">
+ <bitfield high="7" low="0" name="PING_INDEX"/>
+ <bitfield high="15" low="8" name="PING_BLK_SEL"/>
+ </reg32>
+ <reg32 offset="0x0001" name="CFG_DBGBUS_SEL_B"/>
+ <reg32 offset="0x0002" name="CFG_DBGBUS_SEL_C"/>
+ <reg32 offset="0x0003" name="CFG_DBGBUS_SEL_D"/>
+ <reg32 offset="0x0004" name="CFG_DBGBUS_CNTLT">
+ <bitfield high="5" low="0" name="TRACEEN"/>
+ <bitfield high="14" low="12" name="GRANU"/>
+ <bitfield high="31" low="28" name="SEGT"/>
+ </reg32>
+ <reg32 offset="0x0005" name="CFG_DBGBUS_CNTLM">
+ <bitfield high="27" low="24" name="ENABLE"/>
+ </reg32>
+ <reg32 offset="0x0008" name="CFG_DBGBUS_IVTL_0"/>
+ <reg32 offset="0x0009" name="CFG_DBGBUS_IVTL_1"/>
+ <reg32 offset="0x000a" name="CFG_DBGBUS_IVTL_2"/>
+ <reg32 offset="0x000b" name="CFG_DBGBUS_IVTL_3"/>
+ <reg32 offset="0x000c" name="CFG_DBGBUS_MASKL_0"/>
+ <reg32 offset="0x000d" name="CFG_DBGBUS_MASKL_1"/>
+ <reg32 offset="0x000e" name="CFG_DBGBUS_MASKL_2"/>
+ <reg32 offset="0x000f" name="CFG_DBGBUS_MASKL_3"/>
+ <reg32 offset="0x0010" name="CFG_DBGBUS_BYTEL_0">
+ <bitfield high="3" low="0" name="BYTEL0"/>
+ <bitfield high="7" low="4" name="BYTEL1"/>
+ <bitfield high="11" low="8" name="BYTEL2"/>
+ <bitfield high="15" low="12" name="BYTEL3"/>
+ <bitfield high="19" low="16" name="BYTEL4"/>
+ <bitfield high="23" low="20" name="BYTEL5"/>
+ <bitfield high="27" low="24" name="BYTEL6"/>
+ <bitfield high="31" low="28" name="BYTEL7"/>
+ </reg32>
+ <reg32 offset="0x0011" name="CFG_DBGBUS_BYTEL_1">
+ <bitfield high="3" low="0" name="BYTEL8"/>
+ <bitfield high="7" low="4" name="BYTEL9"/>
+ <bitfield high="11" low="8" name="BYTEL10"/>
+ <bitfield high="15" low="12" name="BYTEL11"/>
+ <bitfield high="19" low="16" name="BYTEL12"/>
+ <bitfield high="23" low="20" name="BYTEL13"/>
+ <bitfield high="27" low="24" name="BYTEL14"/>
+ <bitfield high="31" low="28" name="BYTEL15"/>
+ </reg32>
+
+ <reg32 offset="0x002f" name="CFG_DBGBUS_TRACE_BUF1"/>
+ <reg32 offset="0x0030" name="CFG_DBGBUS_TRACE_BUF2"/>
+</domain>
+
+<domain name="A6XX_CX_MISC" width="32" prefix="variant" varset="chip">
+ <reg32 offset="0x0001" name="SYSTEM_CACHE_CNTL_0"/>
+ <reg32 offset="0x0002" name="SYSTEM_CACHE_CNTL_1"/>
+ <reg32 offset="0x0039" name="CX_MISC_TCM_RET_CNTL" variants="A7XX-"/>
+ <reg32 offset="0x0400" name="CX_MISC_SW_FUSE_VALUE" variants="A7XX-">
+ <bitfield pos="0" name="FASTBLEND" type="boolean"/>
+ <bitfield pos="1" name="LPAC" type="boolean"/>
+ <bitfield pos="2" name="RAYTRACING" type="boolean"/>
+ </reg32>
+</domain>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml
new file mode 100644
index 000000000000..6531749d30f4
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml
@@ -0,0 +1,228 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="adreno/adreno_common.xml"/>
+
+<domain name="A6XX" width="32" prefix="variant" varset="chip">
+
+ <bitset name="A6XX_GMU_GPU_IDLE_STATUS">
+ <bitfield name="BUSY_IGN_AHB" pos="23"/>
+ <bitfield name="CX_GX_CPU_BUSY_IGN_AHB" pos="30"/>
+ </bitset>
+
+ <bitset name="A6XX_GMU_OOB">
+ <bitfield name="BOOT_SLUMBER_SET_MASK" pos="22"/>
+ <bitfield name="BOOT_SLUMBER_CHECK_MASK" pos="30"/>
+ <bitfield name="BOOT_SLUMBER_CLEAR_MASK" pos="30"/>
+ <bitfield name="DCVS_SET_MASK" pos="23"/>
+ <bitfield name="DCVS_CHECK_MASK" pos="31"/>
+ <bitfield name="DCVS_CLEAR_MASK" pos="31"/>
+ <bitfield name="GPU_SET_MASK" pos="18"/>
+ <bitfield name="GPU_CHECK_MASK" pos="26"/>
+ <bitfield name="GPU_CLEAR_MASK" pos="26"/>
+ <bitfield name="PERFCNTR_SET_MASK" pos="17"/>
+ <bitfield name="PERFCNTR_CHECK_MASK" pos="25"/>
+ <bitfield name="PERFCNTR_CLEAR_MASK" pos="25"/>
+ </bitset>
+
+ <bitset name="A6XX_HFI_IRQ">
+ <bitfield name="MSGQ_MASK" pos="0" />
+ <bitfield name="DSGQ_MASK" pos="1"/>
+ <bitfield name="BLOCKED_MSG_MASK" pos="2"/>
+ <bitfield name="CM3_FAULT_MASK" pos="23"/>
+ <bitfield name="GMU_ERR_MASK" low="16" high="22"/>
+ <bitfield name="OOB_MASK" low="24" high="31"/>
+ </bitset>
+
+ <bitset name="A6XX_HFI_H2F">
+ <bitfield name="IRQ_MASK_BIT" pos="0" />
+ </bitset>
+
+ <reg32 offset="0x80" name="GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL"/>
+ <reg32 offset="0x81" name="GMU_GX_SPTPRAC_POWER_CONTROL"/>
+ <reg32 offset="0xc00" name="GMU_CM3_ITCM_START"/>
+ <reg32 offset="0x1c00" name="GMU_CM3_DTCM_START"/>
+ <reg32 offset="0x23f0" name="GMU_NMI_CONTROL_STATUS"/>
+ <reg32 offset="0x23f8" name="GMU_BOOT_SLUMBER_OPTION"/>
+ <reg32 offset="0x23f9" name="GMU_GX_VOTE_IDX"/>
+ <reg32 offset="0x23fa" name="GMU_MX_VOTE_IDX"/>
+ <reg32 offset="0x23fc" name="GMU_DCVS_ACK_OPTION"/>
+ <reg32 offset="0x23fd" name="GMU_DCVS_PERF_SETTING"/>
+ <reg32 offset="0x23fe" name="GMU_DCVS_BW_SETTING"/>
+ <reg32 offset="0x23ff" name="GMU_DCVS_RETURN"/>
+ <reg32 offset="0x4c00" name="GMU_ICACHE_CONFIG"/>
+ <reg32 offset="0x4c01" name="GMU_DCACHE_CONFIG"/>
+ <reg32 offset="0x4c0f" name="GMU_SYS_BUS_CONFIG"/>
+ <reg32 offset="0x5000" name="GMU_CM3_SYSRESET"/>
+ <reg32 offset="0x5001" name="GMU_CM3_BOOT_CONFIG"/>
+ <reg32 offset="0x501a" name="GMU_CM3_FW_BUSY"/>
+ <reg32 offset="0x501c" name="GMU_CM3_FW_INIT_RESULT"/>
+ <reg32 offset="0x502d" name="GMU_CM3_CFG"/>
+ <reg32 offset="0x5040" name="GMU_CX_GMU_POWER_COUNTER_ENABLE"/>
+ <reg32 offset="0x5041" name="GMU_CX_GMU_POWER_COUNTER_SELECT_0"/>
+ <reg32 offset="0x5042" name="GMU_CX_GMU_POWER_COUNTER_SELECT_1"/>
+ <reg32 offset="0x5044" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L"/>
+ <reg32 offset="0x5045" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H"/>
+ <reg32 offset="0x5046" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L"/>
+ <reg32 offset="0x5047" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H"/>
+ <reg32 offset="0x5048" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L"/>
+ <reg32 offset="0x5049" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H"/>
+ <reg32 offset="0x504a" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L"/>
+ <reg32 offset="0x504b" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H"/>
+ <reg32 offset="0x504c" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L"/>
+ <reg32 offset="0x504d" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H"/>
+ <reg32 offset="0x504e" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L"/>
+ <reg32 offset="0x504f" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H"/>
+ <reg32 offset="0x50c0" name="GMU_PWR_COL_INTER_FRAME_CTRL">
+ <bitfield name="IFPC_ENABLE" pos="0" type="boolean"/>
+ <bitfield name="HM_POWER_COLLAPSE_ENABLE" pos="1" type="boolean"/>
+ <bitfield name="SPTPRAC_POWER_CONTROL_ENABLE" pos="2" type="boolean"/>
+ <bitfield name="NUM_PASS_SKIPS" low="10" high="13"/>
+ <bitfield name="MIN_PASS_LENGTH" low="14" high="31"/>
+ </reg32>
+ <reg32 offset="0x50c1" name="GMU_PWR_COL_INTER_FRAME_HYST"/>
+ <reg32 offset="0x50c2" name="GMU_PWR_COL_SPTPRAC_HYST"/>
+ <reg32 offset="0x50d0" name="GMU_SPTPRAC_PWR_CLK_STATUS">
+ <bitfield name="SPTPRAC_GDSC_POWERING_OFF" pos="0" type="boolean"/>
+ <bitfield name="SPTPRAC_GDSC_POWERING_ON" pos="1" type="boolean"/>
+ <bitfield name="SPTPRAC_GDSC_POWER_OFF" pos="2" type="boolean"/>
+ <bitfield name="SPTPRAC_GDSC_POWER_ON" pos="3" type="boolean"/>
+ <bitfield name="SP_CLOCK_OFF" pos="4" type="boolean"/>
+ <bitfield name="GMU_UP_POWER_STATE" pos="5" type="boolean"/>
+ <bitfield name="GX_HM_GDSC_POWER_OFF" pos="6" type="boolean"/>
+ <bitfield name="GX_HM_CLK_OFF" pos="7" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x50e4" name="GMU_GPU_NAP_CTRL">
+ <bitfield name="HW_NAP_ENABLE" pos="0"/>
+ <bitfield name="SID" low="4" high="8"/>
+ </reg32>
+ <reg32 offset="0x50e8" name="GMU_RPMH_CTRL">
+ <bitfield name="RPMH_INTERFACE_ENABLE" pos="0" type="boolean"/>
+ <bitfield name="LLC_VOTE_ENABLE" pos="4" type="boolean"/>
+ <bitfield name="DDR_VOTE_ENABLE" pos="8" type="boolean"/>
+ <bitfield name="MX_VOTE_ENABLE" pos="9" type="boolean"/>
+ <bitfield name="CX_VOTE_ENABLE" pos="10" type="boolean"/>
+ <bitfield name="GFX_VOTE_ENABLE" pos="11" type="boolean"/>
+ <bitfield name="DDR_MIN_VOTE_ENABLE" pos="12" type="boolean"/>
+ <bitfield name="MX_MIN_VOTE_ENABLE" pos="13" type="boolean"/>
+ <bitfield name="CX_MIN_VOTE_ENABLE" pos="14" type="boolean"/>
+ <bitfield name="GFX_MIN_VOTE_ENABLE" pos="15" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x50e9" name="GMU_RPMH_HYST_CTRL"/>
+ <reg32 offset="0x50ec" name="GPU_GMU_CX_GMU_RPMH_POWER_STATE"/>
+ <reg32 offset="0x50f0" name="GPU_GMU_CX_GMU_CX_FAL_INTF"/>
+ <reg32 offset="0x50f1" name="GPU_GMU_CX_GMU_CX_FALNEXT_INTF"/>
+ <reg32 offset="0x5100" name="GPU_GMU_CX_GMU_PWR_COL_CP_MSG"/>
+ <reg32 offset="0x5101" name="GPU_GMU_CX_GMU_PWR_COL_CP_RESP"/>
+ <reg32 offset="0x51f0" name="GMU_BOOT_KMD_LM_HANDSHAKE"/>
+ <reg32 offset="0x5157" name="GMU_LLM_GLM_SLEEP_CTRL"/>
+ <reg32 offset="0x5158" name="GMU_LLM_GLM_SLEEP_STATUS"/>
+ <reg32 offset="0x5088" name="GMU_ALWAYS_ON_COUNTER_L"/>
+ <reg32 offset="0x5089" name="GMU_ALWAYS_ON_COUNTER_H"/>
+ <reg32 offset="0x50c3" name="GMU_GMU_PWR_COL_KEEPALIVE"/>
+ <reg32 offset="0x5180" name="GMU_HFI_CTRL_STATUS"/>
+ <reg32 offset="0x5181" name="GMU_HFI_VERSION_INFO"/>
+ <reg32 offset="0x5182" name="GMU_HFI_SFR_ADDR"/>
+ <reg32 offset="0x5183" name="GMU_HFI_MMAP_ADDR"/>
+ <reg32 offset="0x5184" name="GMU_HFI_QTBL_INFO"/>
+ <reg32 offset="0x5185" name="GMU_HFI_QTBL_ADDR"/>
+ <reg32 offset="0x5186" name="GMU_HFI_CTRL_INIT"/>
+ <reg32 offset="0x5190" name="GMU_GMU2HOST_INTR_SET"/>
+ <reg32 offset="0x5191" name="GMU_GMU2HOST_INTR_CLR"/>
+ <reg32 offset="0x5192" name="GMU_GMU2HOST_INTR_INFO">
+ <bitfield name="MSGQ" pos="0" type="boolean"/>
+ <bitfield name="CM3_FAULT" pos="23" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x5193" name="GMU_GMU2HOST_INTR_MASK"/>
+ <reg32 offset="0x5194" name="GMU_HOST2GMU_INTR_SET"/>
+ <reg32 offset="0x5195" name="GMU_HOST2GMU_INTR_CLR"/>
+ <reg32 offset="0x5196" name="GMU_HOST2GMU_INTR_RAW_INFO"/>
+ <reg32 offset="0x5197" name="GMU_HOST2GMU_INTR_EN_0"/>
+ <reg32 offset="0x5198" name="GMU_HOST2GMU_INTR_EN_1"/>
+ <reg32 offset="0x5199" name="GMU_HOST2GMU_INTR_EN_2"/>
+ <reg32 offset="0x519a" name="GMU_HOST2GMU_INTR_EN_3"/>
+ <reg32 offset="0x519b" name="GMU_HOST2GMU_INTR_INFO_0"/>
+ <reg32 offset="0x519c" name="GMU_HOST2GMU_INTR_INFO_1"/>
+ <reg32 offset="0x519d" name="GMU_HOST2GMU_INTR_INFO_2"/>
+ <reg32 offset="0x519e" name="GMU_HOST2GMU_INTR_INFO_3"/>
+ <reg32 offset="0x51c5" name="GMU_GENERAL_0"/>
+ <reg32 offset="0x51c6" name="GMU_GENERAL_1"/>
+ <reg32 offset="0x51cb" name="GMU_GENERAL_6"/>
+ <reg32 offset="0x51cc" name="GMU_GENERAL_7"/>
+ <reg32 offset="0x51cd" name="GMU_GENERAL_8" variants="A7XX"/>
+ <reg32 offset="0x51ce" name="GMU_GENERAL_9" variants="A7XX"/>
+ <reg32 offset="0x51cf" name="GMU_GENERAL_10" variants="A7XX"/>
+ <reg32 offset="0x515d" name="GMU_ISENSE_CTRL"/>
+ <reg32 offset="0x8920" name="GPU_CS_ENABLE_REG"/>
+ <reg32 offset="0x515d" name="GPU_GMU_CX_GMU_ISENSE_CTRL"/>
+ <reg32 offset="0x8578" name="GPU_CS_AMP_CALIBRATION_CONTROL3"/>
+ <reg32 offset="0x8558" name="GPU_CS_AMP_CALIBRATION_CONTROL2"/>
+ <reg32 offset="0x8580" name="GPU_CS_A_SENSOR_CTRL_0"/>
+ <reg32 offset="0x27ada" name="GPU_CS_A_SENSOR_CTRL_2"/>
+ <reg32 offset="0x881a" name="GPU_CS_SENSOR_GENERAL_STATUS"/>
+ <reg32 offset="0x8957" name="GPU_CS_AMP_CALIBRATION_CONTROL1"/>
+ <reg32 offset="0x881a" name="GPU_CS_SENSOR_GENERAL_STATUS"/>
+ <reg32 offset="0x881d" name="GPU_CS_AMP_CALIBRATION_STATUS1_0"/>
+ <reg32 offset="0x881f" name="GPU_CS_AMP_CALIBRATION_STATUS1_2"/>
+ <reg32 offset="0x8821" name="GPU_CS_AMP_CALIBRATION_STATUS1_4"/>
+ <reg32 offset="0x8965" name="GPU_CS_AMP_CALIBRATION_DONE"/>
+ <reg32 offset="0x896d" name="GPU_CS_AMP_PERIOD_CTRL"/>
+ <reg32 offset="0x8965" name="GPU_CS_AMP_CALIBRATION_DONE"/>
+ <reg32 offset="0x514d" name="GPU_GMU_CX_GMU_PWR_THRESHOLD"/>
+ <reg32 offset="0x9303" name="GMU_AO_INTERRUPT_EN"/>
+ <reg32 offset="0x9304" name="GMU_AO_HOST_INTERRUPT_CLR"/>
+ <reg32 offset="0x9305" name="GMU_AO_HOST_INTERRUPT_STATUS">
+ <bitfield name="WDOG_BITE" pos="0" type="boolean"/>
+ <bitfield name="RSCC_COMP" pos="1" type="boolean"/>
+ <bitfield name="VDROOP" pos="2" type="boolean"/>
+ <bitfield name="FENCE_ERR" pos="3" type="boolean"/>
+ <bitfield name="DBD_WAKEUP" pos="4" type="boolean"/>
+ <bitfield name="HOST_AHB_BUS_ERROR" pos="5" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x9306" name="GMU_AO_HOST_INTERRUPT_MASK"/>
+ <reg32 offset="0x9309" name="GPU_GMU_AO_GMU_CGC_MODE_CNTL"/>
+ <reg32 offset="0x930a" name="GPU_GMU_AO_GMU_CGC_DELAY_CNTL"/>
+ <reg32 offset="0x930b" name="GPU_GMU_AO_GMU_CGC_HYST_CNTL"/>
+ <reg32 offset="0x930c" name="GPU_GMU_AO_GPU_CX_BUSY_STATUS">
+ <bitfield name = "GPUBUSYIGNAHB" pos="23" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x930d" name="GPU_GMU_AO_GPU_CX_BUSY_STATUS2"/>
+ <reg32 offset="0x930e" name="GPU_GMU_AO_GPU_CX_BUSY_MASK"/>
+ <reg32 offset="0x9310" name="GMU_AO_AHB_FENCE_CTRL"/>
+ <reg32 offset="0x9313" name="GMU_AHB_FENCE_STATUS"/>
+ <reg32 offset="0x9314" name="GMU_AHB_FENCE_STATUS_CLR"/>
+ <reg32 offset="0x9315" name="GMU_RBBM_INT_UNMASKED_STATUS"/>
+ <reg32 offset="0x9316" name="GMU_AO_SPARE_CNTL"/>
+ <reg32 offset="0x9307" name="GMU_RSCC_CONTROL_REQ"/>
+ <reg32 offset="0x9308" name="GMU_RSCC_CONTROL_ACK"/>
+ <reg32 offset="0x9311" name="GMU_AHB_FENCE_RANGE_0"/>
+ <reg32 offset="0x9312" name="GMU_AHB_FENCE_RANGE_1"/>
+ <reg32 offset="0x9c03" name="GPU_CC_GX_GDSCR"/>
+ <reg32 offset="0x9d42" name="GPU_CC_GX_DOMAIN_MISC"/>
+ <reg32 offset="0xc001" name="GPU_CPR_FSM_CTL"/>
+
+ <!-- starts at offset 0x8c00 on most gpus -->
+ <reg32 offset="0x0004" name="GPU_RSCC_RSC_STATUS0_DRV0"/>
+ <reg32 offset="0x0008" name="RSCC_PDC_SEQ_START_ADDR"/>
+ <reg32 offset="0x0009" name="RSCC_PDC_MATCH_VALUE_LO"/>
+ <reg32 offset="0x000a" name="RSCC_PDC_MATCH_VALUE_HI"/>
+ <reg32 offset="0x000b" name="RSCC_PDC_SLAVE_ID_DRV0"/>
+ <reg32 offset="0x000d" name="RSCC_HIDDEN_TCS_CMD0_ADDR"/>
+ <reg32 offset="0x000e" name="RSCC_HIDDEN_TCS_CMD0_DATA"/>
+ <reg32 offset="0x0082" name="RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0"/>
+ <reg32 offset="0x0083" name="RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0"/>
+ <reg32 offset="0x0089" name="RSCC_TIMESTAMP_UNIT1_EN_DRV0"/>
+ <reg32 offset="0x008c" name="RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0"/>
+ <reg32 offset="0x0100" name="RSCC_OVERRIDE_START_ADDR"/>
+ <reg32 offset="0x0101" name="RSCC_SEQ_BUSY_DRV0"/>
+ <reg32 offset="0x0154" name="RSCC_SEQ_MEM_0_DRV0_A740" variants="A7XX"/>
+ <reg32 offset="0x0180" name="RSCC_SEQ_MEM_0_DRV0"/>
+ <reg32 offset="0x0346" name="RSCC_TCS0_DRV0_STATUS"/>
+ <reg32 offset="0x03ee" name="RSCC_TCS1_DRV0_STATUS"/>
+ <reg32 offset="0x0496" name="RSCC_TCS2_DRV0_STATUS"/>
+ <reg32 offset="0x053e" name="RSCC_TCS3_DRV0_STATUS"/>
+</domain>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_common.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_common.xml
new file mode 100644
index 000000000000..218ec8bb966e
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/adreno/adreno_common.xml
@@ -0,0 +1,400 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+
+<enum name="chip" bare="yes">
+ <value name="A2XX" value="2"/>
+ <value name="A3XX" value="3"/>
+ <value name="A4XX" value="4"/>
+ <value name="A5XX" value="5"/>
+ <value name="A6XX" value="6"/>
+ <value name="A7XX" value="7"/>
+</enum>
+
+<enum name="adreno_pa_su_sc_draw">
+ <value name="PC_DRAW_POINTS" value="0"/>
+ <value name="PC_DRAW_LINES" value="1"/>
+ <value name="PC_DRAW_TRIANGLES" value="2"/>
+</enum>
+
+<enum name="adreno_compare_func">
+ <value name="FUNC_NEVER" value="0"/>
+ <value name="FUNC_LESS" value="1"/>
+ <value name="FUNC_EQUAL" value="2"/>
+ <value name="FUNC_LEQUAL" value="3"/>
+ <value name="FUNC_GREATER" value="4"/>
+ <value name="FUNC_NOTEQUAL" value="5"/>
+ <value name="FUNC_GEQUAL" value="6"/>
+ <value name="FUNC_ALWAYS" value="7"/>
+</enum>
+
+<enum name="adreno_stencil_op">
+ <value name="STENCIL_KEEP" value="0"/>
+ <value name="STENCIL_ZERO" value="1"/>
+ <value name="STENCIL_REPLACE" value="2"/>
+ <value name="STENCIL_INCR_CLAMP" value="3"/>
+ <value name="STENCIL_DECR_CLAMP" value="4"/>
+ <value name="STENCIL_INVERT" value="5"/>
+ <value name="STENCIL_INCR_WRAP" value="6"/>
+ <value name="STENCIL_DECR_WRAP" value="7"/>
+</enum>
+
+<enum name="adreno_rb_blend_factor">
+ <value name="FACTOR_ZERO" value="0"/>
+ <value name="FACTOR_ONE" value="1"/>
+ <value name="FACTOR_SRC_COLOR" value="4"/>
+ <value name="FACTOR_ONE_MINUS_SRC_COLOR" value="5"/>
+ <value name="FACTOR_SRC_ALPHA" value="6"/>
+ <value name="FACTOR_ONE_MINUS_SRC_ALPHA" value="7"/>
+ <value name="FACTOR_DST_COLOR" value="8"/>
+ <value name="FACTOR_ONE_MINUS_DST_COLOR" value="9"/>
+ <value name="FACTOR_DST_ALPHA" value="10"/>
+ <value name="FACTOR_ONE_MINUS_DST_ALPHA" value="11"/>
+ <value name="FACTOR_CONSTANT_COLOR" value="12"/>
+ <value name="FACTOR_ONE_MINUS_CONSTANT_COLOR" value="13"/>
+ <value name="FACTOR_CONSTANT_ALPHA" value="14"/>
+ <value name="FACTOR_ONE_MINUS_CONSTANT_ALPHA" value="15"/>
+ <value name="FACTOR_SRC_ALPHA_SATURATE" value="16"/>
+ <value name="FACTOR_SRC1_COLOR" value="20"/>
+ <value name="FACTOR_ONE_MINUS_SRC1_COLOR" value="21"/>
+ <value name="FACTOR_SRC1_ALPHA" value="22"/>
+ <value name="FACTOR_ONE_MINUS_SRC1_ALPHA" value="23"/>
+</enum>
+
+<bitset name="adreno_rb_stencilrefmask" inline="yes">
+ <bitfield name="STENCILREF" low="0" high="7" type="hex"/>
+ <bitfield name="STENCILMASK" low="8" high="15" type="hex"/>
+ <bitfield name="STENCILWRITEMASK" low="16" high="23" type="hex"/>
+</bitset>
+
+<enum name="adreno_rb_surface_endian">
+ <value name="ENDIAN_NONE" value="0"/>
+ <value name="ENDIAN_8IN16" value="1"/>
+ <value name="ENDIAN_8IN32" value="2"/>
+ <value name="ENDIAN_16IN32" value="3"/>
+ <value name="ENDIAN_8IN64" value="4"/>
+ <value name="ENDIAN_8IN128" value="5"/>
+</enum>
+
+<enum name="adreno_rb_dither_mode">
+ <value name="DITHER_DISABLE" value="0"/>
+ <value name="DITHER_ALWAYS" value="1"/>
+ <value name="DITHER_IF_ALPHA_OFF" value="2"/>
+</enum>
+
+<enum name="adreno_rb_depth_format">
+ <value name="DEPTHX_16" value="0"/>
+ <value name="DEPTHX_24_8" value="1"/>
+ <value name="DEPTHX_32" value="2"/>
+</enum>
+
+<enum name="adreno_rb_copy_control_mode">
+ <value name="RB_COPY_RESOLVE" value="1"/>
+ <value name="RB_COPY_CLEAR" value="2"/>
+ <value name="RB_COPY_DEPTH_STENCIL" value="5"/> <!-- not sure if this is part of MODE or another bitfield?? -->
+</enum>
+
+<bitset name="adreno_reg_xy" inline="yes">
+ <bitfield name="WINDOW_OFFSET_DISABLE" pos="31" type="boolean"/>
+ <bitfield name="X" low="0" high="14" type="uint"/>
+ <bitfield name="Y" low="16" high="30" type="uint"/>
+</bitset>
+
+<bitset name="adreno_cp_protect" inline="yes">
+ <bitfield name="BASE_ADDR" low="0" high="16"/>
+ <bitfield name="MASK_LEN" low="24" high="28"/>
+ <bitfield name="TRAP_WRITE" pos="29"/>
+ <bitfield name="TRAP_READ" pos="30"/>
+</bitset>
+
+<domain name="AXXX" width="32">
+ <brief>Registers in common between a2xx and a3xx</brief>
+
+ <reg32 offset="0x01c0" name="CP_RB_BASE"/>
+ <reg32 offset="0x01c1" name="CP_RB_CNTL">
+ <bitfield name="BUFSZ" low="0" high="5"/>
+ <bitfield name="BLKSZ" low="8" high="13"/>
+ <bitfield name="BUF_SWAP" low="16" high="17"/>
+ <bitfield name="POLL_EN" pos="20" type="boolean"/>
+ <bitfield name="NO_UPDATE" pos="27" type="boolean"/>
+ <bitfield name="RPTR_WR_EN" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x01c3" name="CP_RB_RPTR_ADDR">
+ <bitfield name="SWAP" low="0" high="1" type="uint"/>
+ <bitfield name="ADDR" low="2" high="31" shr="2"/>
+ </reg32>
+ <reg32 offset="0x01c4" name="CP_RB_RPTR" type="uint"/>
+ <reg32 offset="0x01c5" name="CP_RB_WPTR" type="uint"/>
+ <reg32 offset="0x01c6" name="CP_RB_WPTR_DELAY"/>
+ <reg32 offset="0x01c7" name="CP_RB_RPTR_WR"/>
+ <reg32 offset="0x01c8" name="CP_RB_WPTR_BASE"/>
+ <reg32 offset="0x01d5" name="CP_QUEUE_THRESHOLDS">
+ <bitfield name="CSQ_IB1_START" low="0" high="3" type="uint"/>
+ <bitfield name="CSQ_IB2_START" low="8" high="11" type="uint"/>
+ <bitfield name="CSQ_ST_START" low="16" high="19" type="uint"/>
+ </reg32>
+ <reg32 offset="0x01d6" name="CP_MEQ_THRESHOLDS">
+ <bitfield name="MEQ_END" low="16" high="20" type="uint"/>
+ <bitfield name="ROQ_END" low="24" high="28" type="uint"/>
+ </reg32>
+ <reg32 offset="0x01d7" name="CP_CSQ_AVAIL">
+ <bitfield name="RING" low="0" high="6" type="uint"/>
+ <bitfield name="IB1" low="8" high="14" type="uint"/>
+ <bitfield name="IB2" low="16" high="22" type="uint"/>
+ </reg32>
+ <reg32 offset="0x01d8" name="CP_STQ_AVAIL">
+ <bitfield name="ST" low="0" high="6" type="uint"/>
+ </reg32>
+ <reg32 offset="0x01d9" name="CP_MEQ_AVAIL">
+ <bitfield name="MEQ" low="0" high="4" type="uint"/>
+ </reg32>
+ <reg32 offset="0x01dc" name="SCRATCH_UMSK">
+ <bitfield name="UMSK" low="0" high="7" type="uint"/>
+ <bitfield name="SWAP" low="16" high="17" type="uint"/>
+ </reg32>
+ <reg32 offset="0x01dd" name="SCRATCH_ADDR"/>
+ <reg32 offset="0x01ea" name="CP_ME_RDADDR"/>
+
+ <reg32 offset="0x01ec" name="CP_STATE_DEBUG_INDEX"/>
+ <reg32 offset="0x01ed" name="CP_STATE_DEBUG_DATA"/>
+ <reg32 offset="0x01f2" name="CP_INT_CNTL">
+ <bitfield name="SW_INT_MASK" pos="19" type="boolean"/>
+ <bitfield name="T0_PACKET_IN_IB_MASK" pos="23" type="boolean"/>
+ <bitfield name="OPCODE_ERROR_MASK" pos="24" type="boolean"/>
+ <bitfield name="PROTECTED_MODE_ERROR_MASK" pos="25" type="boolean"/>
+ <bitfield name="RESERVED_BIT_ERROR_MASK" pos="26" type="boolean"/>
+ <bitfield name="IB_ERROR_MASK" pos="27" type="boolean"/>
+ <bitfield name="IB2_INT_MASK" pos="29" type="boolean"/>
+ <bitfield name="IB1_INT_MASK" pos="30" type="boolean"/>
+ <bitfield name="RB_INT_MASK" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x01f3" name="CP_INT_STATUS"/>
+ <reg32 offset="0x01f4" name="CP_INT_ACK"/>
+ <reg32 offset="0x01f6" name="CP_ME_CNTL">
+ <bitfield name="BUSY" pos="29" type="boolean"/>
+ <bitfield name="HALT" pos="28" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x01f7" name="CP_ME_STATUS"/>
+ <reg32 offset="0x01f8" name="CP_ME_RAM_WADDR"/>
+ <reg32 offset="0x01f9" name="CP_ME_RAM_RADDR"/>
+ <reg32 offset="0x01fa" name="CP_ME_RAM_DATA"/>
+ <reg32 offset="0x01fc" name="CP_DEBUG">
+ <bitfield name="PREDICATE_DISABLE" pos="23" type="boolean"/>
+ <bitfield name="PROG_END_PTR_ENABLE" pos="24" type="boolean"/>
+ <bitfield name="MIU_128BIT_WRITE_ENABLE" pos="25" type="boolean"/>
+ <bitfield name="PREFETCH_PASS_NOPS" pos="26" type="boolean"/>
+ <bitfield name="DYNAMIC_CLK_DISABLE" pos="27" type="boolean"/>
+ <bitfield name="PREFETCH_MATCH_DISABLE" pos="28" type="boolean"/>
+ <bitfield name="SIMPLE_ME_FLOW_CONTROL" pos="30" type="boolean"/>
+ <bitfield name="MIU_WRITE_PACK_DISABLE" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x01fd" name="CP_CSQ_RB_STAT">
+ <bitfield name="RPTR" low="0" high="6" type="uint"/>
+ <bitfield name="WPTR" low="16" high="22" type="uint"/>
+ </reg32>
+ <reg32 offset="0x01fe" name="CP_CSQ_IB1_STAT">
+ <bitfield name="RPTR" low="0" high="6" type="uint"/>
+ <bitfield name="WPTR" low="16" high="22" type="uint"/>
+ </reg32>
+ <reg32 offset="0x01ff" name="CP_CSQ_IB2_STAT">
+ <bitfield name="RPTR" low="0" high="6" type="uint"/>
+ <bitfield name="WPTR" low="16" high="22" type="uint"/>
+ </reg32>
+
+ <reg32 offset="0x0440" name="CP_NON_PREFETCH_CNTRS"/>
+ <reg32 offset="0x0443" name="CP_STQ_ST_STAT"/>
+ <reg32 offset="0x044d" name="CP_ST_BASE"/>
+ <reg32 offset="0x044e" name="CP_ST_BUFSZ"/>
+ <reg32 offset="0x044f" name="CP_MEQ_STAT"/>
+ <reg32 offset="0x0452" name="CP_MIU_TAG_STAT"/>
+ <reg32 offset="0x0454" name="CP_BIN_MASK_LO"/>
+ <reg32 offset="0x0455" name="CP_BIN_MASK_HI"/>
+ <reg32 offset="0x0456" name="CP_BIN_SELECT_LO"/>
+ <reg32 offset="0x0457" name="CP_BIN_SELECT_HI"/>
+ <reg32 offset="0x0458" name="CP_IB1_BASE"/>
+ <reg32 offset="0x0459" name="CP_IB1_BUFSZ"/>
+ <reg32 offset="0x045a" name="CP_IB2_BASE"/>
+ <reg32 offset="0x045b" name="CP_IB2_BUFSZ"/>
+ <reg32 offset="0x047f" name="CP_STAT">
+ <bitfield pos="31" name="CP_BUSY"/>
+ <bitfield pos="30" name="VS_EVENT_FIFO_BUSY"/>
+ <bitfield pos="29" name="PS_EVENT_FIFO_BUSY"/>
+ <bitfield pos="28" name="CF_EVENT_FIFO_BUSY"/>
+ <bitfield pos="27" name="RB_EVENT_FIFO_BUSY"/>
+ <bitfield pos="26" name="ME_BUSY"/>
+ <bitfield pos="25" name="MIU_WR_C_BUSY"/>
+ <bitfield pos="23" name="CP_3D_BUSY"/>
+ <bitfield pos="22" name="CP_NRT_BUSY"/>
+ <bitfield pos="21" name="RBIU_SCRATCH_BUSY"/>
+ <bitfield pos="20" name="RCIU_ME_BUSY"/>
+ <bitfield pos="19" name="RCIU_PFP_BUSY"/>
+ <bitfield pos="18" name="MEQ_RING_BUSY"/>
+ <bitfield pos="17" name="PFP_BUSY"/>
+ <bitfield pos="16" name="ST_QUEUE_BUSY"/>
+ <bitfield pos="13" name="INDIRECT2_QUEUE_BUSY"/>
+ <bitfield pos="12" name="INDIRECTS_QUEUE_BUSY"/>
+ <bitfield pos="11" name="RING_QUEUE_BUSY"/>
+ <bitfield pos="10" name="CSF_BUSY"/>
+ <bitfield pos="9" name="CSF_ST_BUSY"/>
+ <bitfield pos="8" name="EVENT_BUSY"/>
+ <bitfield pos="7" name="CSF_INDIRECT2_BUSY"/>
+ <bitfield pos="6" name="CSF_INDIRECTS_BUSY"/>
+ <bitfield pos="5" name="CSF_RING_BUSY"/>
+ <bitfield pos="4" name="RCIU_BUSY"/>
+ <bitfield pos="3" name="RBIU_BUSY"/>
+ <bitfield pos="2" name="MIU_RD_RETURN_BUSY"/>
+ <bitfield pos="1" name="MIU_RD_REQ_BUSY"/>
+ <bitfield pos="0" name="MIU_WR_BUSY"/>
+ </reg32>
+ <reg32 offset="0x0578" name="CP_SCRATCH_REG0" type="uint"/>
+ <reg32 offset="0x0579" name="CP_SCRATCH_REG1" type="uint"/>
+ <reg32 offset="0x057a" name="CP_SCRATCH_REG2" type="uint"/>
+ <reg32 offset="0x057b" name="CP_SCRATCH_REG3" type="uint"/>
+ <reg32 offset="0x057c" name="CP_SCRATCH_REG4" type="uint"/>
+ <reg32 offset="0x057d" name="CP_SCRATCH_REG5" type="uint"/>
+ <reg32 offset="0x057e" name="CP_SCRATCH_REG6" type="uint"/>
+ <reg32 offset="0x057f" name="CP_SCRATCH_REG7" type="uint"/>
+
+ <reg32 offset="0x0600" name="CP_ME_VS_EVENT_SRC"/>
+ <reg32 offset="0x0601" name="CP_ME_VS_EVENT_ADDR"/>
+ <reg32 offset="0x0602" name="CP_ME_VS_EVENT_DATA"/>
+ <reg32 offset="0x0603" name="CP_ME_VS_EVENT_ADDR_SWM"/>
+ <reg32 offset="0x0604" name="CP_ME_VS_EVENT_DATA_SWM"/>
+ <reg32 offset="0x0605" name="CP_ME_PS_EVENT_SRC"/>
+ <reg32 offset="0x0606" name="CP_ME_PS_EVENT_ADDR"/>
+ <reg32 offset="0x0607" name="CP_ME_PS_EVENT_DATA"/>
+ <reg32 offset="0x0608" name="CP_ME_PS_EVENT_ADDR_SWM"/>
+ <reg32 offset="0x0609" name="CP_ME_PS_EVENT_DATA_SWM"/>
+ <reg32 offset="0x060a" name="CP_ME_CF_EVENT_SRC"/>
+ <reg32 offset="0x060b" name="CP_ME_CF_EVENT_ADDR"/>
+ <reg32 offset="0x060c" name="CP_ME_CF_EVENT_DATA" type="uint"/>
+ <reg32 offset="0x060d" name="CP_ME_NRT_ADDR"/>
+ <reg32 offset="0x060e" name="CP_ME_NRT_DATA"/>
+ <reg32 offset="0x0612" name="CP_ME_VS_FETCH_DONE_SRC"/>
+ <reg32 offset="0x0613" name="CP_ME_VS_FETCH_DONE_ADDR"/>
+ <reg32 offset="0x0614" name="CP_ME_VS_FETCH_DONE_DATA"/>
+
+</domain>
+
+<!--
+ Common between A3xx and A4xx:
+ -->
+
+<enum name="a3xx_rop_code">
+ <value name="ROP_CLEAR" value="0"/>
+ <value name="ROP_NOR" value="1"/>
+ <value name="ROP_AND_INVERTED" value="2"/>
+ <value name="ROP_COPY_INVERTED" value="3"/>
+ <value name="ROP_AND_REVERSE" value="4"/>
+ <value name="ROP_INVERT" value="5"/>
+ <value name="ROP_XOR" value="6"/>
+ <value name="ROP_NAND" value="7"/>
+ <value name="ROP_AND" value="8"/>
+ <value name="ROP_EQUIV" value="9"/>
+ <value name="ROP_NOOP" value="10"/>
+ <value name="ROP_OR_INVERTED" value="11"/>
+ <value name="ROP_COPY" value="12"/>
+ <value name="ROP_OR_REVERSE" value="13"/>
+ <value name="ROP_OR" value="14"/>
+ <value name="ROP_SET" value="15"/>
+</enum>
+
+<enum name="a3xx_render_mode">
+ <value name="RB_RENDERING_PASS" value="0"/>
+ <value name="RB_TILING_PASS" value="1"/>
+ <value name="RB_RESOLVE_PASS" value="2"/>
+ <value name="RB_COMPUTE_PASS" value="3"/>
+</enum>
+
+<enum name="a3xx_msaa_samples">
+ <value name="MSAA_ONE" value="0"/>
+ <value name="MSAA_TWO" value="1"/>
+ <value name="MSAA_FOUR" value="2"/>
+ <value name="MSAA_EIGHT" value="3"/>
+</enum>
+
+<enum name="a3xx_threadmode">
+ <value value="0" name="MULTI"/>
+ <value value="1" name="SINGLE"/>
+</enum>
+
+<enum name="a3xx_instrbuffermode">
+ <!--
+ When shader size goes above ~128 or so, blob switches to '0'
+ and doesn't emit shader in cmdstream. When either is '0' it
+ doesn't get emitted via CP_LOAD_STATE. When only one is
+ '0' the other gets size 256-others_size. So I think that:
+ BUFFER => execute out of state memory
+ CACHE => use available state memory as local cache
+ NOTE that when CACHE mode, also set CACHEINVALID flag!
+
+ TODO check if that 256 size is same for all a3xx
+ -->
+ <value value="0" name="CACHE"/>
+ <value value="1" name="BUFFER"/>
+</enum>
+
+<enum name="a3xx_threadsize">
+ <value value="0" name="TWO_QUADS"/>
+ <value value="1" name="FOUR_QUADS"/>
+</enum>
+
+<enum name="a3xx_color_swap">
+ <value name="WZYX" value="0"/>
+ <value name="WXYZ" value="1"/>
+ <value name="ZYXW" value="2"/>
+ <value name="XYZW" value="3"/>
+</enum>
+
+<enum name="a3xx_rb_blend_opcode">
+ <value name="BLEND_DST_PLUS_SRC" value="0"/>
+ <value name="BLEND_SRC_MINUS_DST" value="1"/>
+ <value name="BLEND_DST_MINUS_SRC" value="2"/>
+ <value name="BLEND_MIN_DST_SRC" value="3"/>
+ <value name="BLEND_MAX_DST_SRC" value="4"/>
+</enum>
+
+<enum name="a4xx_tess_spacing">
+ <value name="EQUAL_SPACING" value="0"/>
+ <value name="ODD_SPACING" value="2"/>
+ <value name="EVEN_SPACING" value="3"/>
+</enum>
+
+<doc>Address mode for a5xx+</doc>
+<enum name="a5xx_address_mode">
+ <value name="ADDR_32B" value="0"/>
+ <value name="ADDR_64B" value="1"/>
+</enum>
+
+<doc>
+ Line mode for a5xx+
+ Note that Bresenham lines are only supported with MSAA disabled.
+</doc>
+<enum name="a5xx_line_mode">
+ <value value="0x0" name="BRESENHAM"/>
+ <value value="0x1" name="RECTANGULAR"/>
+</enum>
+
+<doc>
+ Blob (v615) seem to only use SAM and I wasn't able to coerce
+ it to produce any other command.
+ Probably valid for a4xx+ but not enabled or tested on anything
+ but a6xx.
+</doc>
+<enum name="a6xx_tex_prefetch_cmd">
+ <doc> Produces garbage </doc>
+ <value value="0x0" name="TEX_PREFETCH_UNK0"/>
+ <value value="0x1" name="TEX_PREFETCH_SAM"/>
+ <value value="0x2" name="TEX_PREFETCH_GATHER4R"/>
+ <value value="0x3" name="TEX_PREFETCH_GATHER4G"/>
+ <value value="0x4" name="TEX_PREFETCH_GATHER4B"/>
+ <value value="0x5" name="TEX_PREFETCH_GATHER4A"/>
+ <doc> Causes reads from an invalid address </doc>
+ <value value="0x6" name="TEX_PREFETCH_UNK6"/>
+ <doc> Results in color being zero </doc>
+ <value value="0x7" name="TEX_PREFETCH_UNK7"/>
+</enum>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
new file mode 100644
index 000000000000..cab01af55d22
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
@@ -0,0 +1,2268 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="adreno/adreno_common.xml"/>
+
+<enum name="vgt_event_type" varset="chip">
+ <value name="VS_DEALLOC" value="0"/>
+ <value name="PS_DEALLOC" value="1" variants="A2XX-A6XX"/>
+ <value name="VS_DONE_TS" value="2"/>
+ <value name="PS_DONE_TS" value="3"/>
+ <doc>
+ Flushes dirty data from UCHE, and also writes a GPU timestamp to
+ the address if one is provided.
+ </doc>
+ <value name="CACHE_FLUSH_TS" value="4"/>
+ <value name="CONTEXT_DONE" value="5"/>
+ <value name="CACHE_FLUSH" value="6" variants="A2XX-A4XX"/>
+ <value name="VIZQUERY_START" value="7" variants="A2XX"/>
+ <value name="HLSQ_FLUSH" value="7" variants="A3XX-A4XX"/>
+ <value name="VIZQUERY_END" value="8" variants="A2XX"/>
+ <value name="SC_WAIT_WC" value="9" variants="A2XX"/>
+ <value name="WRITE_PRIMITIVE_COUNTS" value="9" variants="A6XX"/>
+ <value name="START_PRIMITIVE_CTRS" value="11" variants="A6XX"/>
+ <value name="STOP_PRIMITIVE_CTRS" value="12" variants="A6XX"/>
+ <!-- Not sure that these 4 events don't have the same meaning as on A5XX+ -->
+ <value name="RST_PIX_CNT" value="13" variants="A2XX-A4XX"/>
+ <value name="RST_VTX_CNT" value="14" variants="A2XX-A4XX"/>
+ <value name="TILE_FLUSH" value="15" variants="A2XX-A4XX"/>
+ <value name="STAT_EVENT" value="16" variants="A2XX-A4XX"/>
+ <value name="CACHE_FLUSH_AND_INV_TS_EVENT" value="20" variants="A2XX-A4XX"/>
+ <doc>
+ If A6XX_RB_SAMPLE_COUNT_CONTROL.copy is true, writes OQ Z passed
+ sample counts to RB_SAMPLE_COUNT_ADDR. This writes to main
+ memory, skipping UCHE.
+ </doc>
+ <value name="ZPASS_DONE" value="21"/>
+ <value name="CACHE_FLUSH_AND_INV_EVENT" value="22" variants="A2XX"/>
+
+ <doc>
+ Writes the GPU timestamp to the address that follows, once RB
+ access and flushes are complete.
+ </doc>
+ <value name="RB_DONE_TS" value="22" variants="A3XX-"/>
+
+ <value name="PERFCOUNTER_START" value="23" variants="A2XX-A4XX"/>
+ <value name="PERFCOUNTER_STOP" value="24" variants="A2XX-A4XX"/>
+ <value name="VS_FETCH_DONE" value="27"/>
+ <value name="FACENESS_FLUSH" value="28" variants="A2XX-A4XX"/>
+
+ <!-- a5xx events -->
+ <value name="WT_DONE_TS" value="8" variants="A5XX-"/>
+ <value name="START_FRAGMENT_CTRS" value="13" variants="A5XX-"/>
+ <value name="STOP_FRAGMENT_CTRS" value="14" variants="A5XX-"/>
+ <value name="START_COMPUTE_CTRS" value="15" variants="A5XX-"/>
+ <value name="STOP_COMPUTE_CTRS" value="16" variants="A5XX-"/>
+ <value name="FLUSH_SO_0" value="17" variants="A5XX-"/>
+ <value name="FLUSH_SO_1" value="18" variants="A5XX-"/>
+ <value name="FLUSH_SO_2" value="19" variants="A5XX-"/>
+ <value name="FLUSH_SO_3" value="20" variants="A5XX-"/>
+
+ <doc>
+ Invalidates depth attachment data from the CCU. We assume this
+ happens in the last stage.
+ </doc>
+ <value name="PC_CCU_INVALIDATE_DEPTH" value="24" variants="A5XX-"/>
+
+ <doc>
+ Invalidates color attachment data from the CCU. We assume this
+ happens in the last stage.
+ </doc>
+ <value name="PC_CCU_INVALIDATE_COLOR" value="25" variants="A5XX-"/>
+
+ <doc>
+ Flushes the small cache used by CP_EVENT_WRITE::BLIT (which,
+ along with its registers, would be better named RESOLVE).
+ </doc>
+ <value name="PC_CCU_RESOLVE_TS" value="26" variants="A6XX"/>
+
+ <doc>
+ Flushes depth attachment data from the CCU. We assume this
+ happens in the last stage.
+ </doc>
+ <value name="PC_CCU_FLUSH_DEPTH_TS" value="28" variants="A5XX-"/>
+
+ <doc>
+ Flushes color attachment data from the CCU. We assume this
+ happens in the last stage.
+ </doc>
+ <value name="PC_CCU_FLUSH_COLOR_TS" value="29" variants="A5XX-"/>
+
+ <doc>
+ 2D blit to resolve GMEM to system memory (skipping CCU) at the
+ end of a render pass. Compare to CP_BLIT's BLIT_OP_SCALE for
+ more general blitting.
+ </doc>
+ <value name="BLIT" value="30" variants="A5XX-"/>
+
+ <doc>
+ Clears based on GRAS_LRZ_CNTL configuration, could clear
+ fast-clear buffer or LRZ direction.
+ LRZ direction is stored at lrz_fc_offset + 0x200, has 1 byte which
+ could be expressed by enum:
+ CUR_DIR_DISABLED = 0x0
+ CUR_DIR_GE = 0x1
+ CUR_DIR_LE = 0x2
+ CUR_DIR_UNSET = 0x3
+ Clear of direction means setting the direction to CUR_DIR_UNSET.
+ </doc>
+ <value name="LRZ_CLEAR" value="37" variants="A5XX-"/>
+
+ <value name="LRZ_FLUSH" value="38" variants="A5XX-"/>
+ <value name="BLIT_OP_FILL_2D" value="39" variants="A5XX-"/>
+ <value name="BLIT_OP_COPY_2D" value="40" variants="A5XX-A6XX"/>
+ <value name="UNK_40" value="40" variants="A7XX"/>
+ <value name="BLIT_OP_SCALE_2D" value="42" variants="A5XX-"/>
+ <value name="CONTEXT_DONE_2D" value="43" variants="A5XX-"/>
+ <value name="UNK_2C" value="44" variants="A5XX-"/>
+ <value name="UNK_2D" value="45" variants="A5XX-"/>
+
+ <!-- a6xx events -->
+ <doc>
+ Invalidates UCHE.
+ </doc>
+ <value name="CACHE_INVALIDATE" value="49" variants="A6XX"/>
+
+ <value name="LABEL" value="63" variants="A6XX-"/>
+
+ <!-- note, some of these are the same as a6xx, just named differently -->
+
+ <doc> Doesn't seem to do anything </doc>
+ <value name="DUMMY_EVENT" value="1" variants="A7XX"/>
+ <value name="CCU_INVALIDATE_DEPTH" value="24" variants="A7XX"/>
+ <value name="CCU_INVALIDATE_COLOR" value="25" variants="A7XX"/>
+ <value name="CCU_RESOLVE_CLEAN" value="26" variants="A7XX"/>
+ <value name="CCU_FLUSH_DEPTH" value="28" variants="A7XX"/>
+ <value name="CCU_FLUSH_COLOR" value="29" variants="A7XX"/>
+ <value name="CCU_RESOLVE" value="30" variants="A7XX"/>
+ <value name="CCU_END_RESOLVE_GROUP" value="31" variants="A7XX"/>
+ <value name="CCU_CLEAN_DEPTH" value="32" variants="A7XX"/>
+ <value name="CCU_CLEAN_COLOR" value="33" variants="A7XX"/>
+ <value name="CACHE_RESET" value="48" variants="A7XX"/>
+ <value name="CACHE_CLEAN" value="49" variants="A7XX"/>
+ <!-- TODO: deal with name conflicts with other gens -->
+ <value name="CACHE_FLUSH7" value="50" variants="A7XX"/>
+ <value name="CACHE_INVALIDATE7" value="51" variants="A7XX"/>
+</enum>
+
+<enum name="pc_di_primtype">
+ <value name="DI_PT_NONE" value="0"/>
+ <!-- POINTLIST_PSIZE is used on a3xx/a4xx when gl_PointSize is written: -->
+ <value name="DI_PT_POINTLIST_PSIZE" value="1"/>
+ <value name="DI_PT_LINELIST" value="2"/>
+ <value name="DI_PT_LINESTRIP" value="3"/>
+ <value name="DI_PT_TRILIST" value="4"/>
+ <value name="DI_PT_TRIFAN" value="5"/>
+ <value name="DI_PT_TRISTRIP" value="6"/>
+ <value name="DI_PT_LINELOOP" value="7"/> <!-- a22x, a3xx -->
+ <value name="DI_PT_RECTLIST" value="8"/>
+ <value name="DI_PT_POINTLIST" value="9"/>
+ <value name="DI_PT_LINE_ADJ" value="0xa"/>
+ <value name="DI_PT_LINESTRIP_ADJ" value="0xb"/>
+ <value name="DI_PT_TRI_ADJ" value="0xc"/>
+ <value name="DI_PT_TRISTRIP_ADJ" value="0xd"/>
+
+ <value name="DI_PT_PATCHES0" value="0x1f"/>
+ <value name="DI_PT_PATCHES1" value="0x20"/>
+ <value name="DI_PT_PATCHES2" value="0x21"/>
+ <value name="DI_PT_PATCHES3" value="0x22"/>
+ <value name="DI_PT_PATCHES4" value="0x23"/>
+ <value name="DI_PT_PATCHES5" value="0x24"/>
+ <value name="DI_PT_PATCHES6" value="0x25"/>
+ <value name="DI_PT_PATCHES7" value="0x26"/>
+ <value name="DI_PT_PATCHES8" value="0x27"/>
+ <value name="DI_PT_PATCHES9" value="0x28"/>
+ <value name="DI_PT_PATCHES10" value="0x29"/>
+ <value name="DI_PT_PATCHES11" value="0x2a"/>
+ <value name="DI_PT_PATCHES12" value="0x2b"/>
+ <value name="DI_PT_PATCHES13" value="0x2c"/>
+ <value name="DI_PT_PATCHES14" value="0x2d"/>
+ <value name="DI_PT_PATCHES15" value="0x2e"/>
+ <value name="DI_PT_PATCHES16" value="0x2f"/>
+ <value name="DI_PT_PATCHES17" value="0x30"/>
+ <value name="DI_PT_PATCHES18" value="0x31"/>
+ <value name="DI_PT_PATCHES19" value="0x32"/>
+ <value name="DI_PT_PATCHES20" value="0x33"/>
+ <value name="DI_PT_PATCHES21" value="0x34"/>
+ <value name="DI_PT_PATCHES22" value="0x35"/>
+ <value name="DI_PT_PATCHES23" value="0x36"/>
+ <value name="DI_PT_PATCHES24" value="0x37"/>
+ <value name="DI_PT_PATCHES25" value="0x38"/>
+ <value name="DI_PT_PATCHES26" value="0x39"/>
+ <value name="DI_PT_PATCHES27" value="0x3a"/>
+ <value name="DI_PT_PATCHES28" value="0x3b"/>
+ <value name="DI_PT_PATCHES29" value="0x3c"/>
+ <value name="DI_PT_PATCHES30" value="0x3d"/>
+ <value name="DI_PT_PATCHES31" value="0x3e"/>
+</enum>
+
+<enum name="pc_di_src_sel">
+ <value name="DI_SRC_SEL_DMA" value="0"/>
+ <value name="DI_SRC_SEL_IMMEDIATE" value="1"/>
+ <value name="DI_SRC_SEL_AUTO_INDEX" value="2"/>
+ <value name="DI_SRC_SEL_AUTO_XFB" value="3"/>
+</enum>
+
+<enum name="pc_di_face_cull_sel">
+ <value name="DI_FACE_CULL_NONE" value="0"/>
+ <value name="DI_FACE_CULL_FETCH" value="1"/>
+ <value name="DI_FACE_BACKFACE_CULL" value="2"/>
+ <value name="DI_FACE_FRONTFACE_CULL" value="3"/>
+</enum>
+
+<enum name="pc_di_index_size">
+ <value name="INDEX_SIZE_IGN" value="0"/>
+ <value name="INDEX_SIZE_16_BIT" value="0"/>
+ <value name="INDEX_SIZE_32_BIT" value="1"/>
+ <value name="INDEX_SIZE_8_BIT" value="2"/>
+ <value name="INDEX_SIZE_INVALID"/>
+</enum>
+
+<enum name="pc_di_vis_cull_mode">
+ <value name="IGNORE_VISIBILITY" value="0"/>
+ <value name="USE_VISIBILITY" value="1"/>
+</enum>
+
+<enum name="adreno_pm4_packet_type">
+ <value name="CP_TYPE0_PKT" value="0x00000000"/>
+ <value name="CP_TYPE1_PKT" value="0x40000000"/>
+ <value name="CP_TYPE2_PKT" value="0x80000000"/>
+ <value name="CP_TYPE3_PKT" value="0xc0000000"/>
+ <value name="CP_TYPE4_PKT" value="0x40000000"/>
+ <value name="CP_TYPE7_PKT" value="0x70000000"/>
+</enum>
+
+<!--
+ Note that in some cases, the same packet id is recycled on a later
+ generation, so variants attribute is used to distinguish. They
+ may not be completely accurate, we would probably have to analyze
+ the pfp and me/pm4 firmware to verify the packet is actually
+ handled on a particular generation. But it is at least enough to
+ disambiguate the packet-id's that were re-used for different
+ packets starting with a5xx.
+ -->
+<enum name="adreno_pm4_type3_packets" varset="chip">
+ <doc>initialize CP's micro-engine</doc>
+ <value name="CP_ME_INIT" value="0x48"/>
+ <doc>skip N 32-bit words to get to the next packet</doc>
+ <value name="CP_NOP" value="0x10"/>
+ <doc>
+ indirect buffer dispatch. prefetch parser uses this packet
+ type to determine whether to pre-fetch the IB
+ </doc>
+ <value name="CP_PREEMPT_ENABLE" value="0x1c" variants="A5XX"/>
+ <value name="CP_PREEMPT_TOKEN" value="0x1e" variants="A5XX"/>
+ <value name="CP_INDIRECT_BUFFER" value="0x3f"/>
+ <doc>
+ Takes the same arguments as CP_INDIRECT_BUFFER, but jumps to
+ another buffer at the same level. Must be at the end of IB, and
+ doesn't work with draw state IB's.
+ </doc>
+ <value name="CP_INDIRECT_BUFFER_CHAIN" value="0x57" variants="A5XX-"/>
+ <doc>indirect buffer dispatch. same as IB, but init is pipelined</doc>
+ <value name="CP_INDIRECT_BUFFER_PFD" value="0x37"/>
+ <doc>
+ Waits for the IDLE state of the engine before further drawing.
+ This is pipelined, so the CP may continue.
+ </doc>
+ <value name="CP_WAIT_FOR_IDLE" value="0x26"/>
+ <doc>wait until a register or memory location is a specific value</doc>
+ <value name="CP_WAIT_REG_MEM" value="0x3c"/>
+ <doc>wait until a register location is equal to a specific value</doc>
+ <value name="CP_WAIT_REG_EQ" value="0x52"/>
+ <doc>wait until a register location is >= a specific value</doc>
+ <value name="CP_WAIT_REG_GTE" value="0x53" variants="A2XX-A4XX"/>
+ <doc>wait until a read completes</doc>
+ <value name="CP_WAIT_UNTIL_READ" value="0x5c" variants="A2XX-A4XX"/>
+ <doc>wait until all base/size writes from an IB_PFD packet have completed</doc>
+ <!--
+ NOTE: CP_WAIT_IB_PFD_COMPLETE unimplemented at least since a5xx fw, and
+ recycled for something new on a7xx
+ -->
+ <value name="CP_WAIT_IB_PFD_COMPLETE" value="0x5d" varset="chip" variants="A2XX-A4XX"/>
+ <doc>register read/modify/write</doc>
+ <value name="CP_REG_RMW" value="0x21"/>
+ <doc>Set binning configuration registers</doc>
+ <value name="CP_SET_BIN_DATA" value="0x2f" variants="A2XX-A4XX"/>
+ <value name="CP_SET_BIN_DATA5" value="0x2f" variants="A5XX-"/>
+ <doc>reads register in chip and writes to memory</doc>
+ <value name="CP_REG_TO_MEM" value="0x3e"/>
+ <doc>write N 32-bit words to memory</doc>
+ <value name="CP_MEM_WRITE" value="0x3d"/>
+ <doc>write CP_PROG_COUNTER value to memory</doc>
+ <value name="CP_MEM_WRITE_CNTR" value="0x4f"/>
+ <doc>conditional execution of a sequence of packets</doc>
+ <value name="CP_COND_EXEC" value="0x44"/>
+ <doc>conditional write to memory or register</doc>
+ <value name="CP_COND_WRITE" value="0x45" variants="A2XX-A4XX"/>
+ <value name="CP_COND_WRITE5" value="0x45" variants="A5XX-"/>
+ <doc>generate an event that creates a write to memory when completed</doc>
+ <value name="CP_EVENT_WRITE" value="0x46" variants="A2XX-A6XX"/>
+ <value name="CP_EVENT_WRITE7" value="0x46" variants="A7XX-"/>
+ <doc>generate a VS|PS_done event</doc>
+ <value name="CP_EVENT_WRITE_SHD" value="0x58"/>
+ <doc>generate a cache flush done event</doc>
+ <value name="CP_EVENT_WRITE_CFL" value="0x59"/>
+ <doc>generate a z_pass done event</doc>
+ <value name="CP_EVENT_WRITE_ZPD" value="0x5b"/>
+ <doc>
+ not sure the real name, but this seems to be what is used for
+ opencl, instead of CP_DRAW_INDX..
+ </doc>
+ <value name="CP_RUN_OPENCL" value="0x31"/>
+ <doc>initiate fetch of index buffer and draw</doc>
+ <value name="CP_DRAW_INDX" value="0x22"/>
+ <doc>draw using supplied indices in packet</doc>
+ <value name="CP_DRAW_INDX_2" value="0x36" variants="A2XX-A4XX"/> <!-- this is something different on a6xx and unused on a5xx -->
+ <doc>initiate fetch of index buffer and binIDs and draw</doc>
+ <value name="CP_DRAW_INDX_BIN" value="0x34" variants="A2XX-A4XX"/>
+ <doc>initiate fetch of bin IDs and draw using supplied indices</doc>
+ <value name="CP_DRAW_INDX_2_BIN" value="0x35" variants="A2XX-A4XX"/>
+ <doc>begin/end initiator for viz query extent processing</doc>
+ <value name="CP_VIZ_QUERY" value="0x23" variants="A2XX-A4XX"/>
+ <doc>fetch state sub-blocks and initiate shader code DMAs</doc>
+ <value name="CP_SET_STATE" value="0x25"/>
+ <doc>load constant into chip and to memory</doc>
+ <value name="CP_SET_CONSTANT" value="0x2d" variants="A2XX"/>
+ <doc>load sequencer instruction memory (pointer-based)</doc>
+ <value name="CP_IM_LOAD" value="0x27"/>
+ <doc>load sequencer instruction memory (code embedded in packet)</doc>
+ <value name="CP_IM_LOAD_IMMEDIATE" value="0x2b"/>
+ <doc>load constants from a location in memory</doc>
+ <value name="CP_LOAD_CONSTANT_CONTEXT" value="0x2e" variants="A2XX"/>
+ <doc>selective invalidation of state pointers</doc>
+ <value name="CP_INVALIDATE_STATE" value="0x3b"/>
+ <doc>dynamically changes shader instruction memory partition</doc>
+ <value name="CP_SET_SHADER_BASES" value="0x4a" variants="A2XX-A4XX"/>
+ <doc>sets the 64-bit BIN_MASK register in the PFP</doc>
+ <value name="CP_SET_BIN_MASK" value="0x50" variants="A2XX-A4XX"/>
+ <doc>sets the 64-bit BIN_SELECT register in the PFP</doc>
+ <value name="CP_SET_BIN_SELECT" value="0x51" variants="A2XX-A4XX"/>
+ <doc>updates the current context, if needed</doc>
+ <value name="CP_CONTEXT_UPDATE" value="0x5e"/>
+ <doc>generate interrupt from the command stream</doc>
+ <value name="CP_INTERRUPT" value="0x40"/>
+ <doc>copy sequencer instruction memory to system memory</doc>
+ <value name="CP_IM_STORE" value="0x2c" variants="A2XX"/>
+
+ <!-- For a20x -->
+<!-- TODO handle variants..
+ <doc>
+ Program an offset that will added to the BIN_BASE value of
+ the 3D_DRAW_INDX_BIN packet
+ </doc>
+ <value name="CP_SET_BIN_BASE_OFFSET" value="0x4b"/>
+ -->
+
+ <!-- for a22x -->
+ <doc>
+ sets draw initiator flags register in PFP, gets bitwise-ORed into
+ every draw initiator
+ </doc>
+ <value name="CP_SET_DRAW_INIT_FLAGS" value="0x4b"/>
+ <doc>sets the register protection mode</doc>
+ <value name="CP_SET_PROTECTED_MODE" value="0x5f"/>
+
+ <value name="CP_BOOTSTRAP_UCODE" value="0x6f"/>
+
+ <!-- for a3xx -->
+ <doc>load high level sequencer command</doc>
+ <value name="CP_LOAD_STATE" value="0x30" variants="A3XX"/>
+ <value name="CP_LOAD_STATE4" value="0x30" variants="A4XX-A5XX"/>
+ <doc>Conditionally load a IB based on a flag, prefetch enabled</doc>
+ <value name="CP_COND_INDIRECT_BUFFER_PFE" value="0x3a"/>
+ <doc>Conditionally load a IB based on a flag, prefetch disabled</doc>
+ <value name="CP_COND_INDIRECT_BUFFER_PFD" value="0x32" variants="A3XX"/>
+ <doc>Load a buffer with pre-fetch enabled</doc>
+ <value name="CP_INDIRECT_BUFFER_PFE" value="0x3f" variants="A5XX"/>
+ <doc>Set bin (?)</doc>
+ <value name="CP_SET_BIN" value="0x4c" variants="A2XX"/>
+
+ <doc>test 2 memory locations to dword values specified</doc>
+ <value name="CP_TEST_TWO_MEMS" value="0x71"/>
+
+ <doc>Write register, ignoring context state for context sensitive registers</doc>
+ <value name="CP_REG_WR_NO_CTXT" value="0x78"/>
+
+ <doc>Record the real-time when this packet is processed by PFP</doc>
+ <value name="CP_RECORD_PFP_TIMESTAMP" value="0x11"/>
+
+ <!-- Used to switch GPU between secure and non-secure modes -->
+ <value name="CP_SET_SECURE_MODE" value="0x66"/>
+
+ <doc>PFP waits until the FIFO between the PFP and the ME is empty</doc>
+ <value name="CP_WAIT_FOR_ME" value="0x13"/>
+
+ <!-- for a4xx -->
+ <doc>
+ Used a bit like CP_SET_CONSTANT on a2xx, but can write multiple
+ groups of registers. Looks like it can be used to create state
+ objects in GPU memory, and on state change only emit pointer
+ (via CP_SET_DRAW_STATE), which should be nice for reducing CPU
+ overhead:
+
+ (A4x) save PM4 stream pointers to execute upon a visible draw
+ </doc>
+ <value name="CP_SET_DRAW_STATE" value="0x43" variants="A4XX-"/>
+ <value name="CP_DRAW_INDX_OFFSET" value="0x38"/>
+ <value name="CP_DRAW_INDIRECT" value="0x28" variants="A4XX-"/>
+ <value name="CP_DRAW_INDX_INDIRECT" value="0x29" variants="A4XX-"/>
+ <value name="CP_DRAW_INDIRECT_MULTI" value="0x2a" variants="A6XX-"/>
+ <value name="CP_DRAW_AUTO" value="0x24"/>
+
+ <doc>
+ Enable or disable predication globally. Also resets the
+ predicate to "passing" and the local bit to enabled when
+ enabling global predication.
+ </doc>
+ <value name="CP_DRAW_PRED_ENABLE_GLOBAL" value="0x19"/>
+
+ <doc>
+ Enable or disable predication locally. Unlike globally enabling
+ predication, this packet doesn't touch any other state.
+ Predication only happens when enabled globally and locally and a
+ predicate has been set. This should be used for internal draws
+ which aren't supposed to use the predication state:
+
+ CP_DRAW_PRED_ENABLE_LOCAL(0)
+ ... do draw...
+ CP_DRAW_PRED_ENABLE_LOCAL(1)
+ </doc>
+ <value name="CP_DRAW_PRED_ENABLE_LOCAL" value="0x1a"/>
+
+ <doc>
+ Latch a draw predicate into the internal register.
+ </doc>
+ <value name="CP_DRAW_PRED_SET" value="0x4e"/>
+
+ <doc>
+ for A4xx
+ Write to register with address that does not fit into type-0 pkt
+ </doc>
+ <value name="CP_WIDE_REG_WRITE" value="0x74" variants="A4XX"/>
+
+ <doc>copy from ME scratch RAM to a register</doc>
+ <value name="CP_SCRATCH_TO_REG" value="0x4d"/>
+
+ <doc>Copy from REG to ME scratch RAM</doc>
+ <value name="CP_REG_TO_SCRATCH" value="0x4a"/>
+
+ <doc>Wait for memory writes to complete</doc>
+ <value name="CP_WAIT_MEM_WRITES" value="0x12"/>
+
+ <doc>Conditional execution based on register comparison</doc>
+ <value name="CP_COND_REG_EXEC" value="0x47"/>
+
+ <doc>Memory to REG copy</doc>
+ <value name="CP_MEM_TO_REG" value="0x42"/>
+
+ <value name="CP_EXEC_CS_INDIRECT" value="0x41" variants="A4XX-"/>
+ <value name="CP_EXEC_CS" value="0x33"/>
+
+ <doc>
+ for a5xx
+ </doc>
+ <value name="CP_PERFCOUNTER_ACTION" value="0x50" variants="A5XX"/>
+ <!-- switches SMMU pagetable, used on a5xx+ only -->
+ <value name="CP_SMMU_TABLE_UPDATE" value="0x53" variants="A5XX-"/>
+ <!-- for a6xx -->
+ <doc>Tells CP the current mode of GPU operation</doc>
+ <value name="CP_SET_MARKER" value="0x65" variants="A6XX-"/>
+ <doc>Instruct CP to set a few internal CP registers</doc>
+ <value name="CP_SET_PSEUDO_REG" value="0x56" variants="A6XX-"/>
+ <!--
+ pairs of regid and value.. seems to be used to program some TF
+ related regs:
+ -->
+ <value name="CP_CONTEXT_REG_BUNCH" value="0x5c" variants="A5XX-"/>
+ <!-- A5XX Enable yield in RB only -->
+ <value name="CP_YIELD_ENABLE" value="0x1c" variants="A5XX"/>
+ <doc>
+ Enables IB2 skipping. If both GLOBAL and LOCAL are 1 and
+ nothing is left in the visibility stream, then
+ CP_INDIRECT_BUFFER will be skipped, and draws will early return
+ from their IB.
+ </doc>
+ <value name="CP_SKIP_IB2_ENABLE_GLOBAL" value="0x1d" variants="A5XX-"/>
+ <value name="CP_SKIP_IB2_ENABLE_LOCAL" value="0x23" variants="A5XX-"/>
+ <value name="CP_SET_SUBDRAW_SIZE" value="0x35" variants="A5XX-"/>
+ <value name="CP_WHERE_AM_I" value="0x62" variants="A5XX-"/>
+ <value name="CP_SET_VISIBILITY_OVERRIDE" value="0x64" variants="A5XX-"/>
+ <!-- Enable/Disable/Defer A5x global preemption model -->
+ <value name="CP_PREEMPT_ENABLE_GLOBAL" value="0x69" variants="A5XX"/>
+ <!-- Enable/Disable A5x local preemption model -->
+ <value name="CP_PREEMPT_ENABLE_LOCAL" value="0x6a" variants="A5XX"/>
+ <!-- Yield token on a5xx similar to CP_PREEMPT on a4xx -->
+ <value name="CP_CONTEXT_SWITCH_YIELD" value="0x6b" variants="A5XX-"/>
+ <!-- Inform CP about current render mode (needed for a5xx preemption) -->
+ <value name="CP_SET_RENDER_MODE" value="0x6c" variants="A5XX"/>
+ <value name="CP_COMPUTE_CHECKPOINT" value="0x6e" variants="A5XX"/>
+ <!-- check if this works on earlier.. -->
+ <value name="CP_MEM_TO_MEM" value="0x73" variants="A5XX-"/>
+
+ <doc>
+ General purpose 2D blit engine for image transfers and mipmap
+ generation. Reads through UCHE, writes through the CCU cache in
+ the PS stage.
+ </doc>
+ <value name="CP_BLIT" value="0x2c" variants="A5XX-"/>
+
+ <!-- Test specified bit in specified register and set predicate -->
+ <value name="CP_REG_TEST" value="0x39" variants="A5XX-"/>
+
+ <!--
+ Seems to set the mode flags which control which CP_SET_DRAW_STATE
+ packets are executed, based on their ENABLE_MASK values
+
+ CP_SET_MODE w/ payload of 0x1 seems to cause CP_SET_DRAW_STATE
+ packets w/ ENABLE_MASK & 0x6 to execute immediately
+ -->
+ <value name="CP_SET_MODE" value="0x63" variants="A6XX-"/>
+
+ <!--
+ Seems like there are now separate blocks of state for VS vs FS/CS
+ (probably these amounts to geometry vs fragments so that geometry
+ stage of the pipeline for next draw can start while fragment stage
+ of current draw is still running. The format of the payload of the
+ packets is the same, the only difference is the offsets of the regs
+ the firmware code that handles the packet writes.
+
+ Note that for CL, starting with a6xx, the preferred # of local
+ threads is no longer the same as the max, implying that the shader
+ core can now run warps from unrelated shaders (ie.
+ CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE vs
+ CL_KERNEL_WORK_GROUP_SIZE)
+ -->
+ <value name="CP_LOAD_STATE6_GEOM" value="0x32" variants="A6XX-"/>
+ <value name="CP_LOAD_STATE6_FRAG" value="0x34" variants="A6XX-"/>
+ <!--
+ Note: For IBO state (Image/SSBOs) which have shared state across
+ shader stages, for 3d pipeline CP_LOAD_STATE6 is used. But for
+ compute shaders, CP_LOAD_STATE6_FRAG is used. Possibly they are
+ interchangable.
+ -->
+ <value name="CP_LOAD_STATE6" value="0x36" variants="A6XX-"/>
+
+ <!-- internal packets: -->
+ <value name="IN_IB_PREFETCH_END" value="0x17" variants="A2XX"/>
+ <value name="IN_SUBBLK_PREFETCH" value="0x1f" variants="A2XX"/>
+ <value name="IN_INSTR_PREFETCH" value="0x20" variants="A2XX"/>
+ <value name="IN_INSTR_MATCH" value="0x47" variants="A2XX"/>
+ <value name="IN_CONST_PREFETCH" value="0x49" variants="A2XX"/>
+ <value name="IN_INCR_UPDT_STATE" value="0x55" variants="A2XX"/>
+ <value name="IN_INCR_UPDT_CONST" value="0x56" variants="A2XX"/>
+ <value name="IN_INCR_UPDT_INSTR" value="0x57" variants="A2XX"/>
+
+ <!-- internal jumptable entries on a6xx+, possibly a5xx: -->
+
+ <!-- jmptable entry used to handle type4 packet on a5xx+: -->
+ <value name="PKT4" value="0x04" variants="A5XX-"/>
+ <!-- called when ROQ is empty, "returns" from an IB or merged sequence of IBs -->
+ <value name="IN_IB_END" value="0x0a" variants="A6XX-"/>
+ <!-- handles IFPC save/restore -->
+ <value name="IN_GMU_INTERRUPT" value="0x0b" variants="A6XX-"/>
+ <!-- preemption/context-swtich routine -->
+ <value name="IN_PREEMPT" value="0x0f" variants="A6XX-"/>
+
+ <!-- TODO do these exist on A5xx? -->
+ <value name="CP_SCRATCH_WRITE" value="0x4c" variants="A6XX"/>
+ <value name="CP_REG_TO_MEM_OFFSET_MEM" value="0x74" variants="A6XX-"/>
+ <value name="CP_REG_TO_MEM_OFFSET_REG" value="0x72" variants="A6XX-"/>
+ <value name="CP_WAIT_MEM_GTE" value="0x14" variants="A6XX"/>
+ <value name="CP_WAIT_TWO_REGS" value="0x70" variants="A6XX"/>
+ <value name="CP_MEMCPY" value="0x75" variants="A6XX-"/>
+ <value name="CP_SET_BIN_DATA5_OFFSET" value="0x2e" variants="A6XX-"/>
+ <!-- A750+, set in place of CP_SET_BIN_DATA5_OFFSET but has different values -->
+ <value name="CP_SET_UNK_BIN_DATA" value="0x2d" variants="A7XX-"/>
+ <doc>
+ Write CP_CONTEXT_SWITCH_*_INFO from CP to the following dwords,
+ and forcibly switch to the indicated context.
+ </doc>
+ <value name="CP_CONTEXT_SWITCH" value="0x54" variants="A6XX"/>
+ <!-- Note, kgsl calls this CP_SET_AMBLE: -->
+ <value name="CP_SET_CTXSWITCH_IB" value="0x55" variants="A6XX-"/>
+
+ <!--
+ Seems to always have the payload:
+ 00000002 00008801 00004010
+ or:
+ 00000002 00008801 00004090
+ or:
+ 00000002 00008801 00000010
+ 00000002 00008801 00010010
+ 00000002 00008801 00d64010
+ ...
+ Note set for compute shaders..
+ Is 0x8801 a register offset?
+ This appears to be a special sort of register write packet
+ more or less, but the firmware has some special handling..
+ Seems like it intercepts/modifies certain register offsets,
+ but others are treated like a normal PKT4 reg write. I
+ guess there are some registers that the fw controls certain
+ bits.
+ -->
+ <value name="CP_REG_WRITE" value="0x6d" variants="A6XX"/>
+
+ <doc>
+ These first appear in a650_sqe.bin. They can in theory be used
+ to loop any sequence of IB1 commands, but in practice they are
+ used to loop over bins. There is a fixed-size per-iteration
+ prefix, used to set per-bin state, and then the following IB1
+ commands are executed until CP_END_BIN which are always the same
+ for each iteration and usually contain a list of
+ CP_INDIRECT_BUFFER calls to IB2 commands which setup state and
+ execute restore/draw/save commands. This replaces the previous
+ technique of just repeating the CP_INDIRECT_BUFFER calls and
+ "unrolling" the loop.
+ </doc>
+ <value name="CP_START_BIN" value="0x50" variants="A6XX-"/>
+ <value name="CP_END_BIN" value="0x51" variants="A6XX-"/>
+
+ <doc> Make next dword 1 to disable preemption, 0 to re-enable it. </doc>
+ <value name="CP_PREEMPT_DISABLE" value="0x6c" variants="A6XX"/>
+
+ <value name="CP_WAIT_TIMESTAMP" value="0x14" variants="A7XX-"/>
+ <value name="CP_GLOBAL_TIMESTAMP" value="0x15" variants="A7XX-"/> <!-- payload 1 dword -->
+ <value name="CP_LOCAL_TIMESTAMP" value="0x16" variants="A7XX-"/> <!-- payload 1 dword, follows 0x15 -->
+ <value name="CP_THREAD_CONTROL" value="0x17" variants="A7XX-"/>
+ <!-- payload 4 dwords, last two could be render target addr (one pkt per MRT), possibly used for GMEM save/restore?-->
+ <value name="CP_RESOURCE_LIST" value="0x18" variants="A7XX-"/>
+ <doc> Can clear BV/BR counters, or wait until one catches up to another </doc>
+ <value name="CP_BV_BR_COUNT_OPS" value="0x1b" variants="A7XX-"/>
+ <doc> Clears, adds to local, or adds to global timestamp </doc>
+ <value name="CP_MODIFY_TIMESTAMP" value="0x1c" variants="A7XX-"/>
+ <!-- similar to CP_CONTEXT_REG_BUNCH, but discards first two dwords?? -->
+ <value name="CP_CONTEXT_REG_BUNCH2" value="0x5d" variants="A7XX-"/>
+ <doc>
+ Write to a scratch memory that is read by CP_REG_TEST with
+ SOURCE_SCRATCH_MEM set. It's not the same scratch as scratch registers.
+ However it uses the same memory space.
+ </doc>
+ <value name="CP_MEM_TO_SCRATCH_MEM" value="0x49" variants="A7XX-"/>
+
+ <doc>
+ Executes an array of fixed-size command buffers where each
+ buffer is assumed to have one draw call, skipping buffers with
+ non-visible draw calls.
+ </doc>
+ <value name="CP_FIXED_STRIDE_DRAW_TABLE" value="0x7f" variants="A7XX-"/>
+
+ <doc>Reset various on-chip state used for synchronization</doc>
+ <value name="CP_RESET_CONTEXT_STATE" value="0x1f" variants="A7XX-"/>
+</enum>
+
+
+<domain name="CP_LOAD_STATE" width="32">
+ <doc>Load state, a3xx (and later?)</doc>
+ <enum name="adreno_state_block">
+ <value name="SB_VERT_TEX" value="0"/>
+ <value name="SB_VERT_MIPADDR" value="1"/>
+ <value name="SB_FRAG_TEX" value="2"/>
+ <value name="SB_FRAG_MIPADDR" value="3"/>
+ <value name="SB_VERT_SHADER" value="4"/>
+ <value name="SB_GEOM_SHADER" value="5"/>
+ <value name="SB_FRAG_SHADER" value="6"/>
+ <value name="SB_COMPUTE_SHADER" value="7"/>
+ </enum>
+ <enum name="adreno_state_type">
+ <value name="ST_SHADER" value="0"/>
+ <value name="ST_CONSTANTS" value="1"/>
+ </enum>
+ <enum name="adreno_state_src">
+ <value name="SS_DIRECT" value="0">
+ <doc>inline with the CP_LOAD_STATE packet</doc>
+ </value>
+ <value name="SS_INVALID_ALL_IC" value="2"/>
+ <value name="SS_INVALID_PART_IC" value="3"/>
+ <value name="SS_INDIRECT" value="4">
+ <doc>in buffer pointed to by EXT_SRC_ADDR</doc>
+ </value>
+ <value name="SS_INDIRECT_TCM" value="5"/>
+ <value name="SS_INDIRECT_STM" value="6"/>
+ </enum>
+ <reg32 offset="0" name="0">
+ <bitfield name="DST_OFF" low="0" high="15" type="uint"/>
+ <bitfield name="STATE_SRC" low="16" high="18" type="adreno_state_src"/>
+ <bitfield name="STATE_BLOCK" low="19" high="21" type="adreno_state_block"/>
+ <bitfield name="NUM_UNIT" low="22" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="STATE_TYPE" low="0" high="1" type="adreno_state_type"/>
+ <bitfield name="EXT_SRC_ADDR" low="2" high="31" shr="2"/>
+ </reg32>
+</domain>
+
+<domain name="CP_LOAD_STATE4" width="32" varset="chip">
+ <doc>Load state, a4xx+</doc>
+ <enum name="a4xx_state_block">
+ <!--
+ unknown: 0x7 and 0xf <- seen in compute shader
+
+ STATE_BLOCK = 0x6, STATE_TYPE = 0x2 possibly used for preemption?
+ Seen in some GL shaders. Payload is NUM_UNIT dwords, and it contains
+ the gpuaddr of the following shader constants block. DST_OFF seems
+ to specify which shader stage:
+
+ 16 -> vert
+ 36 -> tcs
+ 56 -> tes
+ 76 -> geom
+ 96 -> frag
+
+ Example:
+
+opcode: CP_LOAD_STATE4 (30) (12 dwords)
+ { DST_OFF = 16 | STATE_SRC = SS4_DIRECT | STATE_BLOCK = 0x6 | NUM_UNIT = 4 }
+ { STATE_TYPE = 0x2 | EXT_SRC_ADDR = 0 }
+ { EXT_SRC_ADDR_HI = 0 }
+ 0000: c0264100 00000000 00000000 00000000
+ 0000: 70b0000b 01180010 00000002 00000000 c0264100 00000000 00000000 00000000
+
+opcode: CP_LOAD_STATE4 (30) (4 dwords)
+ { DST_OFF = 16 | STATE_SRC = SS4_INDIRECT | STATE_BLOCK = SB4_VS_SHADER | NUM_UNIT = 4 }
+ { STATE_TYPE = ST4_CONSTANTS | EXT_SRC_ADDR = 0xc0264100 }
+ { EXT_SRC_ADDR_HI = 0 }
+ 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
+ 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
+ 0000: 00000040 0000000c 00000000 00000000 00000000 00000000 00000000 00000000
+
+ STATE_BLOCK = 0x6, STATE_TYPE = 0x1, seen in compute shader. NUM_UNITS * 2 dwords.
+
+ -->
+ <value name="SB4_VS_TEX" value="0x0"/>
+ <value name="SB4_HS_TEX" value="0x1"/> <!-- aka. TCS -->
+ <value name="SB4_DS_TEX" value="0x2"/> <!-- aka. TES -->
+ <value name="SB4_GS_TEX" value="0x3"/>
+ <value name="SB4_FS_TEX" value="0x4"/>
+ <value name="SB4_CS_TEX" value="0x5"/>
+ <value name="SB4_VS_SHADER" value="0x8"/>
+ <value name="SB4_HS_SHADER" value="0x9"/>
+ <value name="SB4_DS_SHADER" value="0xa"/>
+ <value name="SB4_GS_SHADER" value="0xb"/>
+ <value name="SB4_FS_SHADER" value="0xc"/>
+ <value name="SB4_CS_SHADER" value="0xd"/>
+ <!--
+ for SSBO, STATE_TYPE=0 appears to be addresses (four dwords each),
+ STATE_TYPE=1 sizes, STATE_TYPE=2 addresses again (two dwords each)
+
+ Compute has it's own dedicated SSBO state, it seems, but the rest
+ of the stages share state
+ -->
+ <value name="SB4_SSBO" value="0xe"/>
+ <value name="SB4_CS_SSBO" value="0xf"/>
+ </enum>
+ <enum name="a4xx_state_type">
+ <value name="ST4_SHADER" value="0"/>
+ <value name="ST4_CONSTANTS" value="1"/>
+ <value name="ST4_UBO" value="2"/>
+ </enum>
+ <enum name="a4xx_state_src">
+ <value name="SS4_DIRECT" value="0"/>
+ <value name="SS4_INDIRECT" value="2"/>
+ </enum>
+ <reg32 offset="0" name="0">
+ <bitfield name="DST_OFF" low="0" high="13" type="uint"/>
+ <bitfield name="STATE_SRC" low="16" high="17" type="a4xx_state_src"/>
+ <bitfield name="STATE_BLOCK" low="18" high="21" type="a4xx_state_block"/>
+ <bitfield name="NUM_UNIT" low="22" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="STATE_TYPE" low="0" high="1" type="a4xx_state_type"/>
+ <bitfield name="EXT_SRC_ADDR" low="2" high="31" shr="2"/>
+ </reg32>
+ <reg32 offset="2" name="2" varset="chip" variants="A5XX-">
+ <bitfield name="EXT_SRC_ADDR_HI" low="0" high="31" shr="0"/>
+ </reg32>
+</domain>
+
+<!-- looks basically same CP_LOAD_STATE4 -->
+<domain name="CP_LOAD_STATE6" width="32" varset="chip">
+ <doc>Load state, a6xx+</doc>
+ <enum name="a6xx_state_block">
+ <value name="SB6_VS_TEX" value="0x0"/>
+ <value name="SB6_HS_TEX" value="0x1"/> <!-- aka. TCS -->
+ <value name="SB6_DS_TEX" value="0x2"/> <!-- aka. TES -->
+ <value name="SB6_GS_TEX" value="0x3"/>
+ <value name="SB6_FS_TEX" value="0x4"/>
+ <value name="SB6_CS_TEX" value="0x5"/>
+ <value name="SB6_VS_SHADER" value="0x8"/>
+ <value name="SB6_HS_SHADER" value="0x9"/>
+ <value name="SB6_DS_SHADER" value="0xa"/>
+ <value name="SB6_GS_SHADER" value="0xb"/>
+ <value name="SB6_FS_SHADER" value="0xc"/>
+ <value name="SB6_CS_SHADER" value="0xd"/>
+ <value name="SB6_IBO" value="0xe"/>
+ <value name="SB6_CS_IBO" value="0xf"/>
+ </enum>
+ <enum name="a6xx_state_type">
+ <value name="ST6_SHADER" value="0"/>
+ <value name="ST6_CONSTANTS" value="1"/>
+ <value name="ST6_UBO" value="2"/>
+ <value name="ST6_IBO" value="3"/>
+ </enum>
+ <enum name="a6xx_state_src">
+ <value name="SS6_DIRECT" value="0"/>
+ <value name="SS6_BINDLESS" value="1"/> <!-- TODO does this exist on a4xx/a5xx? -->
+ <value name="SS6_INDIRECT" value="2"/>
+ <doc>
+ SS6_UBO used by the a6xx vulkan blob with tesselation constants
+ in this case, EXT_SRC_ADDR is (ubo_id shl 16 | offset)
+ to load constants from a UBO loaded with DST_OFF = 14 and offset 0,
+ EXT_SRC_ADDR = 0xe0000
+ (offset is a guess, should be in bytes given that maxUniformBufferRange=64k)
+ </doc>
+ <value name="SS6_UBO" value="3"/>
+ </enum>
+ <reg32 offset="0" name="0">
+ <bitfield name="DST_OFF" low="0" high="13" type="uint"/>
+ <bitfield name="STATE_TYPE" low="14" high="15" type="a6xx_state_type"/>
+ <bitfield name="STATE_SRC" low="16" high="17" type="a6xx_state_src"/>
+ <bitfield name="STATE_BLOCK" low="18" high="21" type="a6xx_state_block"/>
+ <bitfield name="NUM_UNIT" low="22" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="EXT_SRC_ADDR" low="2" high="31" shr="2"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="EXT_SRC_ADDR_HI" low="0" high="31" shr="0"/>
+ </reg32>
+ <reg64 offset="1" name="EXT_SRC_ADDR" type="address"/>
+</domain>
+
+<bitset name="vgt_draw_initiator" inline="yes">
+ <bitfield name="PRIM_TYPE" low="0" high="5" type="pc_di_primtype"/>
+ <bitfield name="SOURCE_SELECT" low="6" high="7" type="pc_di_src_sel"/>
+ <bitfield name="VIS_CULL" low="9" high="10" type="pc_di_vis_cull_mode"/>
+ <bitfield name="INDEX_SIZE" pos="11" type="pc_di_index_size"/>
+ <bitfield name="NOT_EOP" pos="12" type="boolean"/>
+ <bitfield name="SMALL_INDEX" pos="13" type="boolean"/>
+ <bitfield name="PRE_DRAW_INITIATOR_ENABLE" pos="14" type="boolean"/>
+ <bitfield name="NUM_INSTANCES" low="24" high="31" type="uint"/>
+</bitset>
+
+<!-- changed on a4xx: -->
+<enum name="a4xx_index_size">
+ <value name="INDEX4_SIZE_8_BIT" value="0"/>
+ <value name="INDEX4_SIZE_16_BIT" value="1"/>
+ <value name="INDEX4_SIZE_32_BIT" value="2"/>
+</enum>
+
+<enum name="a6xx_patch_type">
+ <value name="TESS_QUADS" value="0"/>
+ <value name="TESS_TRIANGLES" value="1"/>
+ <value name="TESS_ISOLINES" value="2"/>
+</enum>
+
+<bitset name="vgt_draw_initiator_a4xx" inline="yes">
+ <!-- When the 0x20 bit is set, it's the number of patch vertices - 1 -->
+ <bitfield name="PRIM_TYPE" low="0" high="5" type="pc_di_primtype"/>
+ <bitfield name="SOURCE_SELECT" low="6" high="7" type="pc_di_src_sel"/>
+ <bitfield name="VIS_CULL" low="8" high="9" type="pc_di_vis_cull_mode"/>
+ <bitfield name="INDEX_SIZE" low="10" high="11" type="a4xx_index_size"/>
+ <bitfield name="PATCH_TYPE" low="12" high="13" type="a6xx_patch_type"/>
+ <bitfield name="GS_ENABLE" pos="16" type="boolean"/>
+ <bitfield name="TESS_ENABLE" pos="17" type="boolean"/>
+</bitset>
+
+<domain name="CP_DRAW_INDX" width="32">
+ <reg32 offset="0" name="0">
+ <bitfield name="VIZ_QUERY" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="1" name="1" type="vgt_draw_initiator"/>
+ <reg32 offset="2" name="2">
+ <bitfield name="NUM_INDICES" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <bitfield name="INDX_BASE" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="4" name="4">
+ <bitfield name="INDX_SIZE" low="0" high="31"/>
+ </reg32>
+</domain>
+
+<domain name="CP_DRAW_INDX_2" width="32">
+ <reg32 offset="0" name="0">
+ <bitfield name="VIZ_QUERY" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="1" name="1" type="vgt_draw_initiator"/>
+ <reg32 offset="2" name="2">
+ <bitfield name="NUM_INDICES" low="0" high="31" type="uint"/>
+ </reg32>
+ <!-- followed by NUM_INDICES indices.. -->
+</domain>
+
+<domain name="CP_DRAW_INDX_OFFSET" width="32">
+ <reg32 offset="0" name="0" type="vgt_draw_initiator_a4xx"/>
+ <reg32 offset="1" name="1">
+ <bitfield name="NUM_INSTANCES" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="NUM_INDICES" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <bitfield name="FIRST_INDX" low="0" high="31"/>
+ </reg32>
+
+ <stripe varset="chip" variants="A5XX-">
+ <reg32 offset="4" name="4">
+ <bitfield name="INDX_BASE_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="5" name="5">
+ <bitfield name="INDX_BASE_HI" low="0" high="31"/>
+ </reg32>
+ <reg64 offset="4" name="INDX_BASE" type="address"/>
+ <reg32 offset="6" name="6">
+ <!-- max # of elements in index buffer -->
+ <bitfield name="MAX_INDICES" low="0" high="31"/>
+ </reg32>
+ </stripe>
+
+ <reg32 offset="4" name="4">
+ <bitfield name="INDX_BASE" low="0" high="31" type="address"/>
+ </reg32>
+
+ <reg32 offset="5" name="5">
+ <bitfield name="INDX_SIZE" low="0" high="31" type="uint"/>
+ </reg32>
+</domain>
+
+<domain name="CP_DRAW_INDIRECT" width="32" varset="chip" prefix="chip" variants="A4XX-">
+ <reg32 offset="0" name="0" type="vgt_draw_initiator_a4xx"/>
+ <stripe varset="chip" variants="A4XX">
+ <reg32 offset="1" name="1">
+ <bitfield name="INDIRECT" low="0" high="31"/>
+ </reg32>
+ </stripe>
+ <stripe varset="chip" variants="A5XX-">
+ <reg32 offset="1" name="1">
+ <bitfield name="INDIRECT_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="INDIRECT_HI" low="0" high="31"/>
+ </reg32>
+ <reg64 offset="1" name="INDIRECT" type="address"/>
+ </stripe>
+</domain>
+
+<domain name="CP_DRAW_INDX_INDIRECT" width="32" varset="chip" prefix="chip" variants="A4XX-">
+ <reg32 offset="0" name="0" type="vgt_draw_initiator_a4xx"/>
+ <stripe varset="chip" variants="A4XX">
+ <reg32 offset="1" name="1">
+ <bitfield name="INDX_BASE" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <!-- max # of bytes in index buffer -->
+ <bitfield name="INDX_SIZE" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <bitfield name="INDIRECT" low="0" high="31"/>
+ </reg32>
+ </stripe>
+ <stripe varset="chip" variants="A5XX-">
+ <reg32 offset="1" name="1">
+ <bitfield name="INDX_BASE_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="INDX_BASE_HI" low="0" high="31"/>
+ </reg32>
+ <reg64 offset="1" name="INDX_BASE" type="address"/>
+ <reg32 offset="3" name="3">
+ <!-- max # of elements in index buffer -->
+ <bitfield name="MAX_INDICES" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="4" name="4">
+ <bitfield name="INDIRECT_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="5" name="5">
+ <bitfield name="INDIRECT_HI" low="0" high="31"/>
+ </reg32>
+ <reg64 offset="4" name="INDIRECT" type="address"/>
+ </stripe>
+</domain>
+
+<domain name="CP_DRAW_INDIRECT_MULTI" width="32" varset="chip" prefix="chip" variants="A6XX-">
+ <enum name="a6xx_draw_indirect_opcode">
+ <value name="INDIRECT_OP_NORMAL" value="0x2"/>
+ <value name="INDIRECT_OP_INDEXED" value="0x4"/>
+ <value name="INDIRECT_OP_INDIRECT_COUNT" value="0x6"/>
+ <value name="INDIRECT_OP_INDIRECT_COUNT_INDEXED" value="0x7"/>
+ </enum>
+ <reg32 offset="0" name="0" type="vgt_draw_initiator_a4xx"/>
+ <reg32 offset="1" name="1">
+ <bitfield name="OPCODE" low="0" high="3" type="a6xx_draw_indirect_opcode" addvariant="yes"/>
+ <doc>
+ DST_OFF same as in CP_LOAD_STATE6 - vec4 VS const at this offset will
+ be updated for each draw to {draw_id, first_vertex, first_instance, 0}
+ value of 0 disables it
+ </doc>
+ <bitfield name="DST_OFF" low="8" high="21" type="hex"/>
+ </reg32>
+ <reg32 offset="2" name="DRAW_COUNT" type="uint"/>
+ <stripe varset="a6xx_draw_indirect_opcode" variants="INDIRECT_OP_NORMAL">
+ <reg64 offset="3" name="INDIRECT" type="address"/>
+ <reg32 offset="5" name="STRIDE" type="uint"/>
+ </stripe>
+ <stripe varset="a6xx_draw_indirect_opcode" variants="INDIRECT_OP_INDEXED" prefix="INDEXED">
+ <reg64 offset="3" name="INDEX" type="address"/>
+ <reg32 offset="5" name="MAX_INDICES" type="uint"/>
+ <reg64 offset="6" name="INDIRECT" type="address"/>
+ <reg32 offset="8" name="STRIDE" type="uint"/>
+ </stripe>
+ <stripe varset="a6xx_draw_indirect_opcode" variants="INDIRECT_OP_INDIRECT_COUNT" prefix="INDIRECT">
+ <reg64 offset="3" name="INDIRECT" type="address"/>
+ <reg64 offset="5" name="INDIRECT_COUNT" type="address"/>
+ <reg32 offset="7" name="STRIDE" type="uint"/>
+ </stripe>
+ <stripe varset="a6xx_draw_indirect_opcode" variants="INDIRECT_OP_INDIRECT_COUNT_INDEXED" prefix="INDIRECT_INDEXED">
+ <reg64 offset="3" name="INDEX" type="address"/>
+ <reg32 offset="5" name="MAX_INDICES" type="uint"/>
+ <reg64 offset="6" name="INDIRECT" type="address"/>
+ <reg64 offset="8" name="INDIRECT_COUNT" type="address"/>
+ <reg32 offset="10" name="STRIDE" type="uint"/>
+ </stripe>
+</domain>
+
+<domain name="CP_DRAW_AUTO" width="32">
+ <reg32 offset="0" name="0" type="vgt_draw_initiator_a4xx"/>
+ <reg32 offset="1" name="1">
+ <bitfield name="NUM_INSTANCES" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg64 offset="2" name="NUM_VERTICES_BASE" type="address"/>
+ <reg32 offset="4" name="4">
+ <bitfield name="NUM_VERTICES_OFFSET" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="5" name="5">
+ <bitfield name="STRIDE" low="0" high="31" type="uint"/>
+ </reg32>
+</domain>
+
+<domain name="CP_DRAW_PRED_ENABLE_GLOBAL" width="32" varset="chip">
+ <reg32 offset="0" name="0">
+ <bitfield name="ENABLE" pos="0" type="boolean"/>
+ </reg32>
+</domain>
+
+<domain name="CP_DRAW_PRED_ENABLE_LOCAL" width="32" varset="chip">
+ <reg32 offset="0" name="0">
+ <bitfield name="ENABLE" pos="0" type="boolean"/>
+ </reg32>
+</domain>
+
+<domain name="CP_DRAW_PRED_SET" width="32" varset="chip">
+ <enum name="cp_draw_pred_src">
+ <!--
+ Sources 1-4 seem to be about combining reading
+ SO/primitive queries and setting the predicate, which is
+ a DX11-specific optimization (since in DX11 you can only
+ predicate on the result of queries).
+ -->
+ <value name="PRED_SRC_MEM" value="5">
+ <doc>
+ Read a 64-bit value at the given address and
+ test if it equals/doesn't equal 0.
+ </doc>
+ </value>
+ </enum>
+ <enum name="cp_draw_pred_test">
+ <value name="NE_0_PASS" value="0"/>
+ <value name="EQ_0_PASS" value="1"/>
+ </enum>
+ <reg32 offset="0" name="0">
+ <bitfield name="SRC" low="4" high="7" type="cp_draw_pred_src"/>
+ <bitfield name="TEST" pos="8" type="cp_draw_pred_test"/>
+ </reg32>
+ <reg64 offset="1" name="MEM_ADDR" type="address"/>
+</domain>
+
+<domain name="CP_SET_DRAW_STATE" width="32" varset="chip" variants="A4XX-">
+ <array offset="0" stride="3" length="100">
+ <reg32 offset="0" name="0">
+ <bitfield name="COUNT" low="0" high="15" type="uint"/>
+ <bitfield name="DIRTY" pos="16" type="boolean"/>
+ <bitfield name="DISABLE" pos="17" type="boolean"/>
+ <bitfield name="DISABLE_ALL_GROUPS" pos="18" type="boolean"/>
+ <bitfield name="LOAD_IMMED" pos="19" type="boolean"/>
+ <bitfield name="BINNING" pos="20" varset="chip" variants="A6XX-" type="boolean"/>
+ <bitfield name="GMEM" pos="21" varset="chip" variants="A6XX-" type="boolean"/>
+ <bitfield name="SYSMEM" pos="22" varset="chip" variants="A6XX-" type="boolean"/>
+ <bitfield name="GROUP_ID" low="24" high="28" type="uint"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="ADDR_LO" low="0" high="31" type="hex"/>
+ </reg32>
+ <reg32 offset="2" name="2" varset="chip" variants="A5XX-">
+ <bitfield name="ADDR_HI" low="0" high="31" type="hex"/>
+ </reg32>
+ </array>
+</domain>
+
+<domain name="CP_SET_BIN" width="32">
+ <doc>value at offset 0 always seems to be 0x00000000..</doc>
+ <reg32 offset="0" name="0"/>
+ <reg32 offset="1" name="1">
+ <bitfield name="X1" low="0" high="15" type="uint"/>
+ <bitfield name="Y1" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="X2" low="0" high="15" type="uint"/>
+ <bitfield name="Y2" low="16" high="31" type="uint"/>
+ </reg32>
+</domain>
+
+<domain name="CP_SET_BIN_DATA" width="32">
+ <reg32 offset="0" name="0">
+ <!-- corresponds to VSC_PIPE[n].DATA_ADDR -->
+ <bitfield name="BIN_DATA_ADDR" low="0" high="31" type="hex"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <!-- seesm to correspond to VSC_SIZE_ADDRESS -->
+ <bitfield name="BIN_SIZE_ADDRESS" low="0" high="31"/>
+ </reg32>
+</domain>
+
+<domain name="CP_SET_BIN_DATA5" width="32">
+ <reg32 offset="0" name="0">
+ <!-- equiv to PC_VSTREAM_CONTROL.SIZE on a3xx/a4xx: -->
+ <bitfield name="VSC_SIZE" low="16" high="21" type="uint"/>
+ <!-- equiv to PC_VSTREAM_CONTROL.N on a3xx/a4xx: -->
+ <bitfield name="VSC_N" low="22" high="26" type="uint"/>
+ </reg32>
+ <!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
+ <reg32 offset="1" name="1">
+ <bitfield name="BIN_DATA_ADDR_LO" low="0" high="31" type="hex"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="BIN_DATA_ADDR_HI" low="0" high="31" type="hex"/>
+ </reg32>
+ <!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
+ <reg32 offset="3" name="3">
+ <bitfield name="BIN_SIZE_ADDRESS_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="4" name="4">
+ <bitfield name="BIN_SIZE_ADDRESS_HI" low="0" high="31"/>
+ </reg32>
+ <!-- new on a6xx, where BIN_DATA_ADDR is the DRAW_STRM: -->
+ <reg32 offset="5" name="5">
+ <bitfield name="BIN_PRIM_STRM_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="6" name="6">
+ <bitfield name="BIN_PRIM_STRM_HI" low="0" high="31"/>
+ </reg32>
+ <!--
+ a7xx adds a few more addresses to the end of the pkt
+ -->
+ <reg64 offset="7" name="7"/>
+ <reg64 offset="9" name="9"/>
+</domain>
+
+<domain name="CP_SET_BIN_DATA5_OFFSET" width="32">
+ <doc>
+ Like CP_SET_BIN_DATA5, but set the pointers as offsets from the
+ pointers stored in VSC_PIPE_{DATA,DATA2,SIZE}_ADDRESS. Useful
+ for Vulkan where these values aren't known when the command
+ stream is recorded.
+ </doc>
+ <reg32 offset="0" name="0">
+ <!-- equiv to PC_VSTREAM_CONTROL.SIZE on a3xx/a4xx: -->
+ <bitfield name="VSC_SIZE" low="16" high="21" type="uint"/>
+ <!-- equiv to PC_VSTREAM_CONTROL.N on a3xx/a4xx: -->
+ <bitfield name="VSC_N" low="22" high="26" type="uint"/>
+ </reg32>
+ <!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
+ <reg32 offset="1" name="1">
+ <bitfield name="BIN_DATA_OFFSET" low="0" high="31" type="uint"/>
+ </reg32>
+ <!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
+ <reg32 offset="2" name="2">
+ <bitfield name="BIN_SIZE_OFFSET" low="0" high="31" type="uint"/>
+ </reg32>
+ <!-- BIN_DATA2_ADDR -> VSC_PIPE[p].DATA2_ADDRESS -->
+ <reg32 offset="3" name="3">
+ <bitfield name="BIN_DATA2_OFFSET" low="0" high="31" type="uint"/>
+ </reg32>
+</domain>
+
+<domain name="CP_REG_RMW" width="32">
+ <doc>
+ Modifies DST_REG using two sources that can either be registers
+ or immediates. If SRC1_ADD is set, then do the following:
+
+ $dst = (($dst &amp; $src0) rot $rotate) + $src1
+
+ Otherwise:
+
+ $dst = (($dst &amp; $src0) rot $rotate) | $src1
+
+ Here "rot" means rotate left.
+ </doc>
+ <reg32 offset="0" name="0">
+ <bitfield name="DST_REG" low="0" high="17" type="hex"/>
+ <bitfield name="ROTATE" low="24" high="28" type="uint"/>
+ <bitfield name="SRC1_ADD" pos="29" type="boolean"/>
+ <bitfield name="SRC1_IS_REG" pos="30" type="boolean"/>
+ <bitfield name="SRC0_IS_REG" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="SRC0" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="SRC1" low="0" high="31" type="uint"/>
+ </reg32>
+</domain>
+
+<domain name="CP_REG_TO_MEM" width="32">
+ <reg32 offset="0" name="0">
+ <bitfield name="REG" low="0" high="17" type="hex"/>
+ <!-- number of registers/dwords copied is max(CNT, 1). -->
+ <bitfield name="CNT" low="18" high="29" type="uint"/>
+ <bitfield name="64B" pos="30" type="boolean"/>
+ <bitfield name="ACCUMULATE" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="DEST" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="2" name="2" varset="chip" variants="A5XX-">
+ <bitfield name="DEST_HI" low="0" high="31"/>
+ </reg32>
+</domain>
+
+<domain name="CP_REG_TO_MEM_OFFSET_REG" width="32">
+ <doc>
+ Like CP_REG_TO_MEM, but the memory address to write to can be
+ offsetted using either one or two registers or scratch
+ registers.
+ </doc>
+ <reg32 offset="0" name="0">
+ <bitfield name="REG" low="0" high="17" type="hex"/>
+ <!-- number of registers/dwords copied is max(CNT, 1). -->
+ <bitfield name="CNT" low="18" high="29" type="uint"/>
+ <bitfield name="64B" pos="30" type="boolean"/>
+ <bitfield name="ACCUMULATE" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="DEST" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="2" name="2" varset="chip" variants="A5XX-">
+ <bitfield name="DEST_HI" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <bitfield name="OFFSET0" low="0" high="17" type="hex"/>
+ <bitfield name="OFFSET0_SCRATCH" pos="19" type="boolean"/>
+ </reg32>
+ <!-- followed by an optional identical OFFSET1 dword -->
+</domain>
+
+<domain name="CP_REG_TO_MEM_OFFSET_MEM" width="32">
+ <doc>
+ Like CP_REG_TO_MEM, but the memory address to write to can be
+ offsetted using a DWORD in memory.
+ </doc>
+ <reg32 offset="0" name="0">
+ <bitfield name="REG" low="0" high="17" type="hex"/>
+ <!-- number of registers/dwords copied is max(CNT, 1). -->
+ <bitfield name="CNT" low="18" high="29" type="uint"/>
+ <bitfield name="64B" pos="30" type="boolean"/>
+ <bitfield name="ACCUMULATE" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="DEST" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="2" name="2" varset="chip" variants="A5XX-">
+ <bitfield name="DEST_HI" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <bitfield name="OFFSET_LO" low="0" high="31" type="hex"/>
+ </reg32>
+ <reg32 offset="4" name="4">
+ <bitfield name="OFFSET_HI" low="0" high="31" type="hex"/>
+ </reg32>
+</domain>
+
+<domain name="CP_MEM_TO_REG" width="32">
+ <reg32 offset="0" name="0">
+ <bitfield name="REG" low="0" high="17" type="hex"/>
+ <!-- number of registers/dwords copied is max(CNT, 1). -->
+ <bitfield name="CNT" low="19" high="29" type="uint"/>
+ <!-- shift each DWORD left by 2 while copying -->
+ <bitfield name="SHIFT_BY_2" pos="30" type="boolean"/>
+ <!-- does the same thing as CP_MEM_TO_MEM::UNK31 -->
+ <bitfield name="UNK31" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="SRC" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="2" name="2" varset="chip" variants="A5XX-">
+ <bitfield name="SRC_HI" low="0" high="31"/>
+ </reg32>
+</domain>
+
+<domain name="CP_MEM_TO_MEM" width="32">
+ <reg32 offset="0" name="0">
+ <!--
+ not sure how many src operands we have, but the low
+ bits negate the n'th src argument.
+ -->
+ <bitfield name="NEG_A" pos="0" type="boolean"/>
+ <bitfield name="NEG_B" pos="1" type="boolean"/>
+ <bitfield name="NEG_C" pos="2" type="boolean"/>
+
+ <!-- if set treat src/dst as 64bit values -->
+ <bitfield name="DOUBLE" pos="29" type="boolean"/>
+ <!-- execute CP_WAIT_FOR_MEM_WRITES beforehand -->
+ <bitfield name="WAIT_FOR_MEM_WRITES" pos="30" type="boolean"/>
+ <!-- some other kind of wait -->
+ <bitfield name="UNK31" pos="31" type="boolean"/>
+ </reg32>
+ <!--
+ followed by sequence of addresses.. the first is the
+ destination and the rest are N src addresses which are
+ summed (after being negated if NEG_x bit set) allowing
+ to do things like 'result += end - start' (which turns
+ out to be useful for queries and accumulating results
+ across multiple tiles)
+ -->
+</domain>
+
+<domain name="CP_MEMCPY" width="32">
+ <reg32 offset="0" name="0">
+ <bitfield name="DWORDS" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="SRC_LO" low="0" high="31" type="hex"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="SRC_HI" low="0" high="31" type="hex"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <bitfield name="DST_LO" low="0" high="31" type="hex"/>
+ </reg32>
+ <reg32 offset="4" name="4">
+ <bitfield name="DST_HI" low="0" high="31" type="hex"/>
+ </reg32>
+</domain>
+
+<domain name="CP_REG_TO_SCRATCH" width="32">
+ <reg32 offset="0" name="0">
+ <bitfield name="REG" low="0" high="17" type="hex"/>
+ <bitfield name="SCRATCH" low="20" high="22" type="uint"/>
+ <!-- number of registers/dwords copied is CNT + 1. -->
+ <bitfield name="CNT" low="24" high="26" type="uint"/>
+ </reg32>
+</domain>
+
+<domain name="CP_SCRATCH_TO_REG" width="32">
+ <reg32 offset="0" name="0">
+ <bitfield name="REG" low="0" high="17" type="hex"/>
+ <!-- note: CP_MEM_TO_REG always sets this when writing to the register -->
+ <bitfield name="UNK18" pos="18" type="boolean"/>
+ <bitfield name="SCRATCH" low="20" high="22" type="uint"/>
+ <!-- number of registers/dwords copied is CNT + 1. -->
+ <bitfield name="CNT" low="24" high="26" type="uint"/>
+ </reg32>
+</domain>
+
+<domain name="CP_SCRATCH_WRITE" width="32">
+ <reg32 offset="0" name="0">
+ <bitfield name="SCRATCH" low="20" high="22" type="uint"/>
+ </reg32>
+ <!-- followed by one or more DWORDs to write to scratch registers -->
+</domain>
+
+<domain name="CP_MEM_WRITE" width="32">
+ <reg32 offset="0" name="0">
+ <bitfield name="ADDR_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="ADDR_HI" low="0" high="31"/>
+ </reg32>
+ <!-- followed by the DWORDs to write -->
+</domain>
+
+<enum name="cp_cond_function">
+ <value value="0" name="WRITE_ALWAYS"/>
+ <value value="1" name="WRITE_LT"/>
+ <value value="2" name="WRITE_LE"/>
+ <value value="3" name="WRITE_EQ"/>
+ <value value="4" name="WRITE_NE"/>
+ <value value="5" name="WRITE_GE"/>
+ <value value="6" name="WRITE_GT"/>
+</enum>
+
+<domain name="CP_COND_WRITE" width="32">
+ <reg32 offset="0" name="0">
+ <bitfield name="FUNCTION" low="0" high="2" type="cp_cond_function"/>
+ <bitfield name="POLL_MEMORY" pos="4" type="boolean"/>
+ <bitfield name="WRITE_MEMORY" pos="8" type="boolean"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="POLL_ADDR" low="0" high="31" type="hex"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="REF" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <bitfield name="MASK" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="4" name="4">
+ <bitfield name="WRITE_ADDR" low="0" high="31" type="hex"/>
+ </reg32>
+ <reg32 offset="5" name="5">
+ <bitfield name="WRITE_DATA" low="0" high="31"/>
+ </reg32>
+</domain>
+
+<enum name="poll_memory_type">
+ <value value="0" name="POLL_REGISTER"/>
+ <value value="1" name="POLL_MEMORY"/>
+ <value value="2" name="POLL_SCRATCH"/>
+ <value value="3" name="POLL_ON_CHIP" varset="chip" variants="A7XX-"/>
+</enum>
+
+<domain name="CP_COND_WRITE5" width="32">
+ <reg32 offset="0" name="0">
+ <bitfield name="FUNCTION" low="0" high="2" type="cp_cond_function"/>
+ <bitfield name="SIGNED_COMPARE" pos="3" type="boolean"/>
+ <!-- POLL_REGISTER polls a register at POLL_ADDR_LO. -->
+ <bitfield name="POLL" low="4" high="5" type="poll_memory_type"/>
+ <bitfield name="WRITE_MEMORY" pos="8" type="boolean"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="POLL_ADDR_LO" low="0" high="31" type="hex"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="POLL_ADDR_HI" low="0" high="31" type="hex"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <bitfield name="REF" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="4" name="4">
+ <bitfield name="MASK" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="5" name="5">
+ <bitfield name="WRITE_ADDR_LO" low="0" high="31" type="hex"/>
+ </reg32>
+ <reg32 offset="6" name="6">
+ <bitfield name="WRITE_ADDR_HI" low="0" high="31" type="hex"/>
+ </reg32>
+ <reg32 offset="7" name="7">
+ <bitfield name="WRITE_DATA" low="0" high="31"/>
+ </reg32>
+</domain>
+
+<domain name="CP_WAIT_MEM_GTE" width="32">
+ <doc>
+ Wait until a memory value is greater than or equal to the
+ reference, using signed comparison.
+ </doc>
+ <reg32 offset="0" name="0">
+ <!-- Reserved for flags, presumably? Unused in FW -->
+ <bitfield name="RESERVED" low="0" high="31" type="hex"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="POLL_ADDR_LO" low="0" high="31" type="hex"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="POLL_ADDR_HI" low="0" high="31" type="hex"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <bitfield name="REF" low="0" high="31"/>
+ </reg32>
+</domain>
+
+<domain name="CP_WAIT_REG_MEM" width="32">
+ <doc>
+ This uses the same internal comparison as CP_COND_WRITE,
+ but waits until the comparison is true instead. It busy-loops in
+ the CP for the given number of cycles before trying again.
+ </doc>
+ <reg32 offset="0" name="0">
+ <bitfield name="FUNCTION" low="0" high="2" type="cp_cond_function"/>
+ <bitfield name="SIGNED_COMPARE" pos="3" type="boolean"/>
+ <bitfield name="POLL" low="4" high="5" type="poll_memory_type"/>
+ <bitfield name="WRITE_MEMORY" pos="8" type="boolean"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="POLL_ADDR_LO" low="0" high="31" type="hex"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="POLL_ADDR_HI" low="0" high="31" type="hex"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <bitfield name="REF" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="4" name="4">
+ <bitfield name="MASK" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="5" name="5">
+ <bitfield name="DELAY_LOOP_CYCLES" low="0" high="31"/>
+ </reg32>
+</domain>
+
+<domain name="CP_WAIT_TWO_REGS" width="32">
+ <doc>
+ Waits for REG0 to not be 0 or REG1 to not equal REF
+ </doc>
+ <reg32 offset="0" name="0">
+ <bitfield name="REG0" low="0" high="17" type="hex"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="REG1" low="0" high="17" type="hex"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="REF" low="0" high="31" type="uint"/>
+ </reg32>
+</domain>
+
+<domain name="CP_DISPATCH_COMPUTE" width="32">
+ <reg32 offset="0" name="0"/>
+ <reg32 offset="1" name="1">
+ <bitfield name="X" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="Y" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <bitfield name="Z" low="0" high="31"/>
+ </reg32>
+</domain>
+
+<domain name="CP_SET_RENDER_MODE" width="32">
+ <enum name="render_mode_cmd">
+ <value value="1" name="BYPASS"/>
+ <value value="2" name="BINNING"/>
+ <value value="3" name="GMEM"/>
+ <value value="5" name="BLIT2D"/>
+ <!-- placeholder name.. used when CP_BLIT packets with BLIT_OP_SCALE?? -->
+ <value value="7" name="BLIT2DSCALE"/>
+ <!-- 8 set before going back to BYPASS exiting 2D -->
+ <value value="8" name="END2D"/>
+ </enum>
+ <reg32 offset="0" name="0">
+ <bitfield name="MODE" low="0" high="8" type="render_mode_cmd"/>
+ <!--
+ normally 0x1/0x3, sometimes see 0x5/0x8 with unknown registers in
+ 0x21xx range.. possibly (at least some) a5xx variants have a
+ 2d core?
+ -->
+ </reg32>
+ <!-- I think first buffer is for GPU to save context in case of ctx switch? -->
+ <reg32 offset="1" name="1">
+ <bitfield name="ADDR_0_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="ADDR_0_HI" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <!--
+ set when in GMEM.. maybe indicates GMEM contents need to be
+ preserved on ctx switch?
+ -->
+ <bitfield name="VSC_ENABLE" pos="3" type="boolean"/>
+ <bitfield name="GMEM_ENABLE" pos="4" type="boolean"/>
+ </reg32>
+ <reg32 offset="4" name="4"/>
+ <!-- second buffer looks like some cmdstream.. length in dwords: -->
+ <reg32 offset="5" name="5">
+ <bitfield name="ADDR_1_LEN" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="6" name="6">
+ <bitfield name="ADDR_1_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="7" name="7">
+ <bitfield name="ADDR_1_HI" low="0" high="31"/>
+ </reg32>
+</domain>
+
+<!-- this looks fairly similar to CP_SET_RENDER_MODE minus first dword -->
+<domain name="CP_COMPUTE_CHECKPOINT" width="32">
+ <!-- I think first buffer is for GPU to save context in case of ctx switch? -->
+ <reg32 offset="0" name="0">
+ <bitfield name="ADDR_0_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="ADDR_0_HI" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ </reg32>
+ <reg32 offset="3" name="3"/>
+ <!-- second buffer looks like some cmdstream.. length in dwords: -->
+ <reg32 offset="4" name="4">
+ <bitfield name="ADDR_1_LEN" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="5" name="5">
+ <bitfield name="ADDR_1_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="6" name="6">
+ <bitfield name="ADDR_1_HI" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="7" name="7"/>
+</domain>
+
+<domain name="CP_PERFCOUNTER_ACTION" width="32">
+ <reg32 offset="0" name="0">
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="ADDR_0_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="ADDR_0_HI" low="0" high="31"/>
+ </reg32>
+</domain>
+
+<domain varset="chip" name="CP_EVENT_WRITE" width="32">
+ <reg32 offset="0" name="0">
+ <bitfield name="EVENT" low="0" high="7" type="vgt_event_type"/>
+ <!-- when set, write back timestamp instead of value from packet: -->
+ <bitfield name="TIMESTAMP" pos="30" type="boolean"/>
+ <bitfield name="IRQ" pos="31" type="boolean"/>
+ </reg32>
+ <!--
+ TODO what is gpuaddr for, seems to be all 0's.. maybe needed for
+ context switch?
+ -->
+ <reg32 offset="1" name="1">
+ <bitfield name="ADDR_0_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="ADDR_0_HI" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <!-- ??? -->
+ </reg32>
+</domain>
+
+<domain varset="chip" name="CP_EVENT_WRITE7" width="32">
+ <enum name="event_write_src">
+ <!-- Write payload[0] -->
+ <value value="0" name="EV_WRITE_USER_32B"/>
+ <!-- Write payload[0] payload[1] -->
+ <value value="1" name="EV_WRITE_USER_64B"/>
+ <!-- Write (TIMESTAMP_GLOBAL + TIMESTAMP_LOCAL) -->
+ <value value="2" name="EV_WRITE_TIMESTAMP_SUM"/>
+ <value value="3" name="EV_WRITE_ALWAYSON"/>
+ <!-- Write payload[1] regs starting at payload[0] offset -->
+ <value value="4" name="EV_WRITE_REGS_CONTENT"/>
+ </enum>
+
+ <enum name="event_write_dst">
+ <value value="0" name="EV_DST_RAM"/>
+ <value value="1" name="EV_DST_ONCHIP"/>
+ </enum>
+
+ <reg32 offset="0" name="0">
+ <bitfield name="EVENT" low="0" high="7" type="vgt_event_type"/>
+ <bitfield name="WRITE_SAMPLE_COUNT" pos="12" type="boolean"/>
+ <!-- Write sample count at (iova + 16) -->
+ <bitfield name="SAMPLE_COUNT_END_OFFSET" pos="13" type="boolean"/>
+ <!-- *(iova + 8) = *(iova + 16) - *iova -->
+ <bitfield name="WRITE_SAMPLE_COUNT_DIFF" pos="14" type="boolean"/>
+
+ <!-- Next 4 flags are valid to set only when concurrent binning is enabled -->
+ <!-- Increment 16b BV counter. Valid only in BV pipe -->
+ <bitfield name="INC_BV_COUNT" pos="16" type="boolean"/>
+ <!-- Increment 16b BR counter. Valid only in BR pipe -->
+ <bitfield name="INC_BR_COUNT" pos="17" type="boolean"/>
+ <bitfield name="CLEAR_RENDER_RESOURCE" pos="18" type="boolean"/>
+ <bitfield name="CLEAR_LRZ_RESOURCE" pos="19" type="boolean"/>
+
+ <bitfield name="WRITE_SRC" low="20" high="22" type="event_write_src"/>
+ <bitfield name="WRITE_DST" pos="24" type="event_write_dst" addvariant="yes"/>
+ <!-- Writes into WRITE_DST from WRITE_SRC. RB_DONE_TS requires WRITE_ENABLED. -->
+ <bitfield name="WRITE_ENABLED" pos="27" type="boolean"/>
+ </reg32>
+
+ <stripe varset="event_write_dst" variants="EV_DST_RAM">
+ <reg32 offset="1" name="1">
+ <bitfield name="ADDR_0_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="ADDR_0_HI" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <bitfield name="PAYLOAD_0" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="4" name="4">
+ <bitfield name="PAYLOAD_1" low="0" high="31"/>
+ </reg32>
+ </stripe>
+
+ <stripe varset="event_write_dst" variants="EV_DST_ONCHIP">
+ <reg32 offset="1" name="1">
+ <bitfield name="ONCHIP_ADDR_0" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <bitfield name="PAYLOAD_0" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="4" name="4">
+ <bitfield name="PAYLOAD_1" low="0" high="31"/>
+ </reg32>
+ </stripe>
+</domain>
+
+<domain name="CP_BLIT" width="32">
+ <enum name="cp_blit_cmd">
+ <value value="0" name="BLIT_OP_FILL"/>
+ <value value="1" name="BLIT_OP_COPY"/>
+ <value value="3" name="BLIT_OP_SCALE"/> <!-- used for mipmap generation -->
+ </enum>
+ <reg32 offset="0" name="0">
+ <bitfield name="OP" low="0" high="3" type="cp_blit_cmd"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="SRC_X1" low="0" high="13" type="uint"/>
+ <bitfield name="SRC_Y1" low="16" high="29" type="uint"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="SRC_X2" low="0" high="13" type="uint"/>
+ <bitfield name="SRC_Y2" low="16" high="29" type="uint"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <bitfield name="DST_X1" low="0" high="13" type="uint"/>
+ <bitfield name="DST_Y1" low="16" high="29" type="uint"/>
+ </reg32>
+ <reg32 offset="4" name="4">
+ <bitfield name="DST_X2" low="0" high="13" type="uint"/>
+ <bitfield name="DST_Y2" low="16" high="29" type="uint"/>
+ </reg32>
+</domain>
+
+<domain name="CP_EXEC_CS" width="32">
+ <reg32 offset="0" name="0">
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="NGROUPS_X" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="NGROUPS_Y" low="0" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <bitfield name="NGROUPS_Z" low="0" high="31" type="uint"/>
+ </reg32>
+</domain>
+
+<domain name="CP_EXEC_CS_INDIRECT" width="32" varset="chip" prefix="chip" variants="A4XX-">
+ <reg32 offset="0" name="0">
+ </reg32>
+ <stripe varset="chip" variants="A4XX">
+ <reg32 offset="1" name="1">
+ <bitfield name="ADDR" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <!-- localsize is value minus one: -->
+ <bitfield name="LOCALSIZEX" low="2" high="11" type="uint"/>
+ <bitfield name="LOCALSIZEY" low="12" high="21" type="uint"/>
+ <bitfield name="LOCALSIZEZ" low="22" high="31" type="uint"/>
+ </reg32>
+ </stripe>
+ <stripe varset="chip" variants="A5XX-">
+ <reg32 offset="1" name="1">
+ <bitfield name="ADDR_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="ADDR_HI" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <!-- localsize is value minus one: -->
+ <bitfield name="LOCALSIZEX" low="2" high="11" type="uint"/>
+ <bitfield name="LOCALSIZEY" low="12" high="21" type="uint"/>
+ <bitfield name="LOCALSIZEZ" low="22" high="31" type="uint"/>
+ </reg32>
+ </stripe>
+</domain>
+
+<domain name="CP_SET_MARKER" width="32" varset="chip" prefix="chip" variants="A6XX-">
+ <doc>Tell CP the current operation mode, indicates save and restore procedure</doc>
+ <enum name="a6xx_marker">
+ <value value="1" name="RM6_BYPASS"/>
+ <value value="2" name="RM6_BINNING"/>
+ <value value="4" name="RM6_GMEM"/>
+ <value value="5" name="RM6_ENDVIS"/>
+ <value value="6" name="RM6_RESOLVE"/>
+ <value value="7" name="RM6_YIELD"/>
+ <value value="8" name="RM6_COMPUTE"/>
+ <value value="0xc" name="RM6_BLIT2DSCALE"/> <!-- no-op (at least on current sqe fw) -->
+
+ <!--
+ These values come from a6xx_set_marker() in the
+ downstream kernel, and they can only be set by the kernel
+ -->
+ <value value="0xd" name="RM6_IB1LIST_START"/>
+ <value value="0xe" name="RM6_IB1LIST_END"/>
+ <!-- IFPC - inter-frame power collapse -->
+ <value value="0x100" name="RM6_IFPC_ENABLE"/>
+ <value value="0x101" name="RM6_IFPC_DISABLE"/>
+ </enum>
+ <reg32 offset="0" name="0">
+ <!--
+ NOTE: blob driver and some versions of freedreno/turnip set
+ b4, which is unused (at least by current sqe fw), but interferes
+ with parsing if we extend the size of the bitfield to include
+ b8 (only sent by kernel mode driver). Really, the way the
+ parsing works in the firmware, only b0-b3 are considered, but
+ if b8 is set, the low bits are interpreted differently. To
+ model this, without getting confused by spurious b4, this is
+ described as two overlapping bitfields:
+ -->
+ <bitfield name="MODE" low="0" high="8" type="a6xx_marker"/>
+ <bitfield name="MARKER" low="0" high="3" type="a6xx_marker"/>
+ </reg32>
+</domain>
+
+<domain name="CP_SET_PSEUDO_REG" width="32" varset="chip" prefix="chip" variants="A6XX-">
+ <doc>Set internal CP registers, used to indicate context save data addresses</doc>
+ <enum name="pseudo_reg">
+ <value value="0" name="SMMU_INFO"/>
+ <value value="1" name="NON_SECURE_SAVE_ADDR"/>
+ <value value="2" name="SECURE_SAVE_ADDR"/>
+ <value value="3" name="NON_PRIV_SAVE_ADDR"/>
+ <value value="4" name="COUNTER"/>
+
+ <!--
+ On a6xx the registers are set directly and CP_SET_BIN_DATA5_OFFSET reads them,
+ but that doesn't work with concurrent binning because BR will be reading from
+ a different set of streams than BV is writing, so on a7xx we have these
+ pseudo-regs instead, which do the right thing.
+
+ The corresponding VSC registers exist, and they're written by BV when it
+ encounters CP_SET_PSEUDO_REG. When BR later encounters the same CP_SET_PSEUDO_REG
+ it will only write some private scratch registers which are read by
+ CP_SET_BIN_DATA5_OFFSET.
+
+ If concurrent binning is disabled then BR also does binning so it will also
+ write the "real" registers in BR.
+ -->
+ <value value="8" name="DRAW_STRM_ADDRESS"/>
+ <value value="9" name="DRAW_STRM_SIZE_ADDRESS"/>
+ <value value="10" name="PRIM_STRM_ADDRESS"/>
+ <value value="11" name="UNK_STRM_ADDRESS"/>
+ <value value="12" name="UNK_STRM_SIZE_ADDRESS"/>
+
+ <value value="16" name="BINDLESS_BASE_0_ADDR"/>
+ <value value="17" name="BINDLESS_BASE_1_ADDR"/>
+ <value value="18" name="BINDLESS_BASE_2_ADDR"/>
+ <value value="19" name="BINDLESS_BASE_3_ADDR"/>
+ <value value="20" name="BINDLESS_BASE_4_ADDR"/>
+ <value value="21" name="BINDLESS_BASE_5_ADDR"/>
+ <value value="22" name="BINDLESS_BASE_6_ADDR"/>
+ </enum>
+ <array offset="0" stride="3" length="100">
+ <reg32 offset="0" name="0">
+ <bitfield name="PSEUDO_REG" low="0" high="10" type="pseudo_reg"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="HI" low="0" high="31"/>
+ </reg32>
+ </array>
+</domain>
+
+<domain name="CP_REG_TEST" width="32" varset="chip" prefix="chip" variants="A6XX-">
+ <doc>
+ Tests bit in specified register and sets predicate for CP_COND_REG_EXEC.
+ So:
+
+ opcode: CP_REG_TEST (39) (2 dwords)
+ { REG = 0xc10 | BIT = 0 }
+ 0000: 70b90001 00000c10
+ opcode: CP_COND_REG_EXEC (47) (3 dwords)
+ 0000: 70c70002 10000000 00000004
+ opcode: CP_INDIRECT_BUFFER (3f) (4 dwords)
+
+ Will execute the CP_INDIRECT_BUFFER only if b0 in the register at
+ offset 0x0c10 is 1
+ </doc>
+ <enum name="source_type">
+ <value value="0" name="SOURCE_REG"/>
+ <!-- Don't confuse with scratch registers, this is a separate memory
+ written into by CP_MEM_TO_SCRATCH_MEM. -->
+ <value value="1" name="SOURCE_SCRATCH_MEM" varset="chip" variants="A7XX-"/>
+ </enum>
+ <reg32 offset="0" name="0">
+ <!-- the register to test -->
+ <bitfield name="REG" low="0" high="17" varset="source_type" variants="SOURCE_REG"/>
+ <bitfield name="SCRATCH_MEM_OFFSET" low="0" high="17" varset="source_type" variants="SOURCE_SCRATCH_MEM"/>
+ <bitfield name="SOURCE" pos="18" type="source_type" addvariant="yes"/>
+ <!-- the bit to test -->
+ <bitfield name="BIT" low="20" high="24" type="uint"/>
+ <!-- skip implied CP_WAIT_FOR_ME -->
+ <bitfield name="SKIP_WAIT_FOR_ME" pos="25" type="boolean"/>
+ <!-- the predicate bit to set (new in gen3+) -->
+ <bitfield name="PRED_BIT" low="26" high="30" type="uint"/>
+ <!-- update the predicate reg directly (new in gen3+) -->
+ <bitfield name="PRED_UPDATE" pos="31" type="boolean"/>
+ </reg32>
+
+ <!--
+ In PRED_UPDATE mode, the predicate reg is updated directly using two
+ more dwords, ignoring other bits:
+
+ PRED_REG = (PRED_REG & ~PRED_MASK) | (PRED_VAL & PRED_MASK);
+ -->
+ <reg32 offset="1" name="PRED_MASK" type="hex"/>
+ <reg32 offset="2" name="PRED_VAL" type="hex"/>
+</domain>
+
+<!-- I *think* this existed at least as far back as a4xx -->
+<domain name="CP_COND_REG_EXEC" width="32">
+ <enum name="compare_mode">
+ <!-- use the predicate bit set by CP_REG_TEST -->
+ <value value="1" name="PRED_TEST"/>
+ <!-- compare two registers directly for equality -->
+ <value value="2" name="REG_COMPARE"/>
+ <!-- test if certain render modes are set via CP_SET_MARKER -->
+ <value value="3" name="RENDER_MODE" varset="chip" variants="A6XX-"/>
+ <!-- compare REG0 for equality with immediate -->
+ <value value="4" name="REG_COMPARE_IMM" varset="chip" variants="A7XX-"/>
+ <!-- test which of BR/BV are enabled -->
+ <value value="5" name="THREAD_MODE" varset="chip" variants="A7XX-"/>
+ </enum>
+ <reg32 offset="0" name="0" varset="compare_mode">
+ <bitfield name="REG0" low="0" high="17" variants="REG_COMPARE" type="hex"/>
+
+ <!-- the predicate bit to test (new in gen3+) -->
+ <bitfield name="PRED_BIT" low="18" high="22" variants="PRED_TEST" type="uint"/>
+ <bitfield name="SKIP_WAIT_FOR_ME" pos="23" varset="chip" variants="A7XX-" type="boolean"/>
+ <!-- With REG_COMPARE instead of register read from ONCHIP memory -->
+ <bitfield name="ONCHIP_MEM" pos="24" varset="chip" variants="A7XX-" type="boolean"/>
+
+ <!--
+ Note: these bits have the same meaning, and use the same
+ internal mechanism as the bits in CP_SET_DRAW_STATE.
+ When RENDER_MODE is selected, they're used as
+ a bitmask of which modes pass the test.
+ -->
+
+ <!-- RM6_BINNING -->
+ <bitfield name="BINNING" pos="25" variants="RENDER_MODE" type="boolean"/>
+ <!-- all others -->
+ <bitfield name="GMEM" pos="26" variants="RENDER_MODE" type="boolean"/>
+ <!-- RM6_BYPASS -->
+ <bitfield name="SYSMEM" pos="27" variants="RENDER_MODE" type="boolean"/>
+
+ <bitfield name="BV" pos="25" variants="THREAD_MODE" type="boolean"/>
+ <bitfield name="BR" pos="26" variants="THREAD_MODE" type="boolean"/>
+ <bitfield name="LPAC" pos="27" variants="THREAD_MODE" type="boolean"/>
+
+ <bitfield name="MODE" low="28" high="31" type="compare_mode" addvariant="yes"/>
+ </reg32>
+
+ <stripe varset="compare_mode" variants="PRED_TEST">
+ <reg32 offset="1" name="1">
+ <bitfield name="DWORDS" low="0" high="23" type="uint"/>
+ </reg32>
+ </stripe>
+
+ <stripe varset="compare_mode" variants="REG_COMPARE">
+ <reg32 offset="1" name="1">
+ <bitfield name="REG1" low="0" high="17" type="hex"/>
+ <!-- Instead of register read from ONCHIP memory -->
+ <bitfield name="ONCHIP_MEM" pos="24" varset="chip" variants="A7XX-" type="boolean"/>
+ </reg32>
+ </stripe>
+
+ <stripe varset="compare_mode" variants="RENDER_MODE">
+ <reg32 offset="1" name="1">
+ <bitfield name="DWORDS" low="0" high="23" type="uint"/>
+ </reg32>
+ </stripe>
+
+ <stripe varset="compare_mode" variants="REG_COMPARE_IMM">
+ <reg32 offset="1" name="1">
+ <bitfield name="IMM" low="0" high="31"/>
+ </reg32>
+ </stripe>
+
+ <stripe varset="compare_mode" variants="THREAD_MODE">
+ <reg32 offset="1" name="1">
+ <bitfield name="DWORDS" low="0" high="23" type="uint"/>
+ </reg32>
+ </stripe>
+
+ <reg32 offset="2" name="2">
+ <bitfield name="DWORDS" low="0" high="23" type="uint"/>
+ </reg32>
+</domain>
+
+<domain name="CP_COND_EXEC" width="32">
+ <doc>
+ Executes the following DWORDs of commands if the dword at ADDR0
+ is not equal to 0 and the dword at ADDR1 is less than REF
+ (signed comparison).
+ </doc>
+ <reg32 offset="0" name="0">
+ <bitfield name="ADDR0_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="ADDR0_HI" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="ADDR1_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <bitfield name="ADDR1_HI" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="4" name="4">
+ <bitfield name="REF" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="5" name="5">
+ <bitfield name="DWORDS" low="0" high="31" type="uint"/>
+ </reg32>
+</domain>
+
+<domain name="CP_SET_CTXSWITCH_IB" width="32">
+ <doc>
+ Used by the userspace driver to set various IB's which are
+ executed during context save/restore for handling
+ state that isn't restored by the
+ context switch routine itself.
+ </doc>
+ <enum name="ctxswitch_ib">
+ <value name="RESTORE_IB" value="0">
+ <doc>Executed unconditionally when switching back to the context.</doc>
+ </value>
+ <value name="YIELD_RESTORE_IB" value="1">
+ <doc>
+ Executed when switching back after switching
+ away during execution of
+ a CP_SET_MARKER packet with RM6_YIELD as the
+ payload *and* the normal save routine was
+ bypassed for a shorter one. I think this is
+ connected to the "skipsaverestore" bit set by
+ the kernel when preempting.
+ </doc>
+ </value>
+ <value name="SAVE_IB" value="2">
+ <doc>
+ Executed when switching away from the context,
+ except for context switches initiated via
+ CP_YIELD.
+ </doc>
+ </value>
+ <value name="RB_SAVE_IB" value="3">
+ <doc>
+ This can only be set by the RB (i.e. the kernel)
+ and executes with protected mode off, but
+ is otherwise similar to SAVE_IB.
+
+ Note, kgsl calls this CP_KMD_AMBLE_TYPE
+ </doc>
+ </value>
+ </enum>
+ <reg32 offset="0" name="0">
+ <bitfield name="ADDR_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="ADDR_HI" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="DWORDS" low="0" high="19" type="uint"/>
+ <bitfield name="TYPE" low="20" high="21" type="ctxswitch_ib"/>
+ </reg32>
+</domain>
+
+<domain name="CP_REG_WRITE" width="32">
+ <enum name="reg_tracker">
+ <doc>
+ Keep shadow copies of these registers and only set them
+ when drawing, avoiding redundant writes:
+ - VPC_CNTL_0
+ - HLSQ_CONTROL_1_REG
+ - HLSQ_UNKNOWN_B980
+ </doc>
+ <value name="TRACK_CNTL_REG" value="0x1"/>
+ <doc>
+ Track RB_RENDER_CNTL, and insert a WFI in the following
+ situation:
+ - There is a write that disables binning
+ - There was a draw with binning left enabled, but in
+ BYPASS mode
+ Presumably this is a hang workaround?
+ </doc>
+ <value name="TRACK_RENDER_CNTL" value="0x2"/>
+ <doc>
+ Do a mysterious CP_EVENT_WRITE 0x3f when the low bit of
+ the data to write is 0. Used by the Vulkan blob with
+ PC_MULTIVIEW_CNTL, but this isn't predicated on particular
+ register(s) like the others.
+ </doc>
+ <value name="UNK_EVENT_WRITE" value="0x4"/>
+ <doc>
+ Tracks GRAS_LRZ_CNTL::GREATER, GRAS_LRZ_CNTL::DIR, and
+ GRAS_LRZ_DEPTH_VIEW with previous values, and if one of
+ the following is true:
+ - GRAS_LRZ_CNTL::GREATER has changed
+ - GRAS_LRZ_CNTL::DIR has changed, the old value is not
+ CUR_DIR_GE, and the new value is not CUR_DIR_DISABLED
+ - GRAS_LRZ_DEPTH_VIEW has changed
+ then it does a LRZ_FLUSH with GRAS_LRZ_CNTL::ENABLE
+ forced to 1.
+ Only exists in a650_sqe.fw.
+ </doc>
+ <value name="TRACK_LRZ" value="0x8"/>
+ </enum>
+ <reg32 offset="0" name="0">
+ <bitfield name="TRACKER" low="0" high="3" type="reg_tracker"/>
+ </reg32>
+ <reg32 offset="1" name="1"/>
+ <reg32 offset="2" name="2"/>
+</domain>
+
+<domain name="CP_SMMU_TABLE_UPDATE" width="32">
+ <doc>
+ Note that the SMMU's definition of TTBRn can take different forms
+ depending on the pgtable format. But a5xx+ only uses aarch64
+ format.
+ </doc>
+ <reg32 offset="0" name="0">
+ <bitfield name="TTBR0_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="TTBR0_HI" low="0" high="15"/>
+ <bitfield name="ASID" low="16" high="31"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <doc>Unused, does not apply to aarch64 pgtable format</doc>
+ <bitfield name="CONTEXTIDR" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <bitfield name="CONTEXTBANK" low="0" high="31"/>
+ </reg32>
+</domain>
+
+<domain name="CP_START_BIN" width="32">
+ <reg32 offset="0" name="BIN_COUNT" type="uint"/>
+ <reg64 offset="1" name="PREFIX_ADDR" type="address"/>
+ <reg32 offset="3" name="PREFIX_DWORDS">
+ <doc>
+ Size of prefix for each bin. For each bin index i, the
+ prefix commands at PREFIX_ADDR + i * PREFIX_DWORDS are
+ executed in an IB2 before the IB1 commands following
+ this packet.
+ </doc>
+ </reg32>
+ <reg32 offset="4" name="BODY_DWORDS">
+ <doc>Number of dwords after this packet until CP_END_BIN</doc>
+ </reg32>
+</domain>
+
+<domain name="CP_WAIT_TIMESTAMP" width="32">
+ <enum name="ts_wait_value_src">
+ <!-- Wait for value at memory address to be >= SRC_0 (signed comparison) -->
+ <value value="0" name="TS_WAIT_GE_32B"/>
+ <!-- Wait for value at memory address to be >= SRC_0 (unsigned) -->
+ <value value="1" name="TS_WAIT_GE_64B"/>
+ <!-- Write (TIMESTAMP_GLOBAL + TIMESTAMP_LOCAL) -->
+ <value value="2" name="TS_WAIT_GE_TIMESTAMP_SUM"/>
+ </enum>
+
+ <enum name="ts_wait_type">
+ <value value="0" name="TS_WAIT_RAM"/>
+ <value value="1" name="TS_WAIT_ONCHIP"/>
+ </enum>
+
+ <reg32 offset="0" name="0">
+ <bitfield name="WAIT_VALUE_SRC" low="0" high="1" type="ts_wait_value_src"/>
+ <bitfield name="WAIT_DST" pos="4" type="ts_wait_type" addvariant="yes"/>
+ </reg32>
+
+ <stripe varset="ts_wait_type" variants="TS_WAIT_RAM">
+ <reg64 offset="1" name="ADDR" type="address"/>
+ </stripe>
+
+ <stripe varset="ts_wait_type" variants="TS_WAIT_ONCHIP">
+ <reg32 offset="1" name="ONCHIP_ADDR_0" low="0" high="31"/>
+ </stripe>
+
+ <reg32 offset="3" name="SRC_0"/>
+ <reg32 offset="4" name="SRC_1"/>
+</domain>
+
+<domain name="CP_BV_BR_COUNT_OPS" width="32">
+ <enum name="pipe_count_op">
+ <value name="PIPE_CLEAR_BV_BR" value="0x1"/>
+ <value name="PIPE_SET_BR_OFFSET" value="0x2"/>
+ <!-- Wait until for BV_counter > BR_counter -->
+ <value name="PIPE_BR_WAIT_FOR_BV" value="0x3"/>
+ <!-- Wait until (BR_counter + BR_OFFSET) > BV_counter -->
+ <value name="PIPE_BV_WAIT_FOR_BR" value="0x4"/>
+ </enum>
+ <reg32 offset="0" name="0">
+ <bitfield name="OP" low="0" high="3" type="pipe_count_op"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="BR_OFFSET" low="0" high="15" type="uint"/>
+ </reg32>
+</domain>
+
+<domain name="CP_MODIFY_TIMESTAMP" width="32">
+ <enum name="timestamp_op">
+ <value name="MODIFY_TIMESTAMP_CLEAR" value="0"/>
+ <value name="MODIFY_TIMESTAMP_ADD_GLOBAL" value="1"/>
+ <value name="MODIFY_TIMESTAMP_ADD_LOCAL" value="2"/>
+ </enum>
+ <reg32 offset="0" name="0">
+ <bitfield name="ADD" low="0" high="7" type="uint"/>
+ <bitfield name="OP" low="28" high="31" type="timestamp_op"/>
+ </reg32>
+</domain>
+
+<domain name="CP_MEM_TO_SCRATCH_MEM" width="32">
+ <doc>
+ Best guess is that it is a faster way to fetch all the VSC_STATE registers
+ and keep them in a local scratch memory instead of fetching every time
+ when skipping IBs.
+ </doc>
+ <reg32 offset="0" name="0">
+ <bitfield name="CNT" low="0" high="5" type="uint"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <doc>Scratch memory size is 48 dwords`</doc>
+ <bitfield name="OFFSET" low="0" high="5" type="uint"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="SRC" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <bitfield name="SRC_HI" low="0" high="31"/>
+ </reg32>
+</domain>
+
+<domain name="CP_THREAD_CONTROL" width="32">
+ <enum name="cp_thread">
+ <value name="CP_SET_THREAD_BR" value="1"/> <!-- Render -->
+ <value name="CP_SET_THREAD_BV" value="2"/> <!-- Visibility -->
+ <value name="CP_SET_THREAD_BOTH" value="3"/>
+ </enum>
+ <reg32 offset="0" name="0">
+ <bitfield low="0" high="1" name="THREAD" type="cp_thread"/>
+ <bitfield pos="27" name="CONCURRENT_BIN_DISABLE" type="boolean"/>
+ <bitfield pos="31" name="SYNC_THREADS" type="boolean"/>
+ </reg32>
+</domain>
+
+<domain name="CP_FIXED_STRIDE_DRAW_TABLE" width="32">
+ <reg64 offset="0" name="IB_BASE"/>
+ <reg32 offset="2" name="2">
+ <!-- STRIDE * COUNT -->
+ <bitfield name="IB_SIZE" low="0" high="11"/>
+ <bitfield name="STRIDE" low="20" high="31"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <bitfield name="COUNT" low="0" high="31"/>
+ </reg32>
+</domain>
+
+<domain name="CP_RESET_CONTEXT_STATE" width="32">
+ <reg32 offset="0" name="0">
+ <bitfield name="CLEAR_ON_CHIP_TS" pos="0" type="boolean"/>
+ <bitfield name="CLEAR_RESOURCE_TABLE" pos="1" type="boolean"/>
+ <bitfield name="CLEAR_GLOBAL_LOCAL_TS" pos="2" type="boolean"/>
+ </reg32>
+</domain>
+
+</database>
+
diff --git a/drivers/gpu/drm/msm/registers/display/dsi.xml b/drivers/gpu/drm/msm/registers/display/dsi.xml
new file mode 100644
index 000000000000..501ffc585a9f
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/display/dsi.xml
@@ -0,0 +1,390 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+
+<domain name="DSI" width="32">
+ <enum name="dsi_traffic_mode">
+ <value name="NON_BURST_SYNCH_PULSE" value="0"/>
+ <value name="NON_BURST_SYNCH_EVENT" value="1"/>
+ <value name="BURST_MODE" value="2"/>
+ </enum>
+ <enum name="dsi_vid_dst_format">
+ <value name="VID_DST_FORMAT_RGB565" value="0"/>
+ <value name="VID_DST_FORMAT_RGB666" value="1"/>
+ <value name="VID_DST_FORMAT_RGB666_LOOSE" value="2"/>
+ <value name="VID_DST_FORMAT_RGB888" value="3"/>
+ </enum>
+ <enum name="dsi_rgb_swap">
+ <value name="SWAP_RGB" value="0"/>
+ <value name="SWAP_RBG" value="1"/>
+ <value name="SWAP_BGR" value="2"/>
+ <value name="SWAP_BRG" value="3"/>
+ <value name="SWAP_GRB" value="4"/>
+ <value name="SWAP_GBR" value="5"/>
+ </enum>
+ <enum name="dsi_cmd_trigger">
+ <value name="TRIGGER_NONE" value="0"/>
+ <value name="TRIGGER_SEOF" value="1"/>
+ <value name="TRIGGER_TE" value="2"/>
+ <value name="TRIGGER_SW" value="4"/>
+ <value name="TRIGGER_SW_SEOF" value="5"/>
+ <value name="TRIGGER_SW_TE" value="6"/>
+ </enum>
+ <enum name="dsi_cmd_dst_format">
+ <value name="CMD_DST_FORMAT_RGB111" value="0"/>
+ <value name="CMD_DST_FORMAT_RGB332" value="3"/>
+ <value name="CMD_DST_FORMAT_RGB444" value="4"/>
+ <value name="CMD_DST_FORMAT_RGB565" value="6"/>
+ <value name="CMD_DST_FORMAT_RGB666" value="7"/>
+ <value name="CMD_DST_FORMAT_RGB888" value="8"/>
+ </enum>
+ <enum name="dsi_lane_swap">
+ <value name="LANE_SWAP_0123" value="0"/>
+ <value name="LANE_SWAP_3012" value="1"/>
+ <value name="LANE_SWAP_2301" value="2"/>
+ <value name="LANE_SWAP_1230" value="3"/>
+ <value name="LANE_SWAP_0321" value="4"/>
+ <value name="LANE_SWAP_1032" value="5"/>
+ <value name="LANE_SWAP_2103" value="6"/>
+ <value name="LANE_SWAP_3210" value="7"/>
+ </enum>
+ <enum name="video_config_bpp">
+ <value name="VIDEO_CONFIG_18BPP" value="0"/>
+ <value name="VIDEO_CONFIG_24BPP" value="1"/>
+ </enum>
+ <enum name="video_pattern_sel">
+ <value name="VID_PRBS" value="0"/>
+ <value name="VID_INCREMENTAL" value="1"/>
+ <value name="VID_FIXED" value="2"/>
+ <value name="VID_MDSS_GENERAL_PATTERN" value="3"/>
+ </enum>
+ <enum name="cmd_mdp_stream0_pattern_sel">
+ <value name="CMD_MDP_PRBS" value="0"/>
+ <value name="CMD_MDP_INCREMENTAL" value="1"/>
+ <value name="CMD_MDP_FIXED" value="2"/>
+ <value name="CMD_MDP_MDSS_GENERAL_PATTERN" value="3"/>
+ </enum>
+ <enum name="cmd_dma_pattern_sel">
+ <value name="CMD_DMA_PRBS" value="0"/>
+ <value name="CMD_DMA_INCREMENTAL" value="1"/>
+ <value name="CMD_DMA_FIXED" value="2"/>
+ <value name="CMD_DMA_CUSTOM_PATTERN_DMA_FIFO" value="3"/>
+ </enum>
+ <bitset name="DSI_IRQ">
+ <bitfield name="CMD_DMA_DONE" pos="0" type="boolean"/>
+ <bitfield name="MASK_CMD_DMA_DONE" pos="1" type="boolean"/>
+ <bitfield name="CMD_MDP_DONE" pos="8" type="boolean"/>
+ <bitfield name="MASK_CMD_MDP_DONE" pos="9" type="boolean"/>
+ <bitfield name="VIDEO_DONE" pos="16" type="boolean"/>
+ <bitfield name="MASK_VIDEO_DONE" pos="17" type="boolean"/>
+ <bitfield name="BTA_DONE" pos="20" type="boolean"/>
+ <bitfield name="MASK_BTA_DONE" pos="21" type="boolean"/>
+ <bitfield name="ERROR" pos="24" type="boolean"/>
+ <bitfield name="MASK_ERROR" pos="25" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x00000" name="6G_HW_VERSION">
+ <bitfield name="MAJOR" low="28" high="31" type="uint"/>
+ <bitfield name="MINOR" low="16" high="27" type="uint"/>
+ <bitfield name="STEP" low="0" high="15" type="uint"/>
+ </reg32>
+
+ <reg32 offset="0x00000" name="CTRL">
+ <bitfield name="ENABLE" pos="0" type="boolean"/>
+ <bitfield name="VID_MODE_EN" pos="1" type="boolean"/>
+ <bitfield name="CMD_MODE_EN" pos="2" type="boolean"/>
+ <bitfield name="LANE0" pos="4" type="boolean"/>
+ <bitfield name="LANE1" pos="5" type="boolean"/>
+ <bitfield name="LANE2" pos="6" type="boolean"/>
+ <bitfield name="LANE3" pos="7" type="boolean"/>
+ <bitfield name="CLK_EN" pos="8" type="boolean"/>
+ <bitfield name="ECC_CHECK" pos="20" type="boolean"/>
+ <bitfield name="CRC_CHECK" pos="24" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0x00004" name="STATUS0">
+ <bitfield name="CMD_MODE_ENGINE_BUSY" pos="0" type="boolean"/>
+ <bitfield name="CMD_MODE_DMA_BUSY" pos="1" type="boolean"/>
+ <bitfield name="CMD_MODE_MDP_BUSY" pos="2" type="boolean"/>
+ <bitfield name="VIDEO_MODE_ENGINE_BUSY" pos="3" type="boolean"/>
+ <bitfield name="DSI_BUSY" pos="4" type="boolean"/> <!-- see mipi_dsi_cmd_bta_sw_trigger() -->
+ <bitfield name="INTERLEAVE_OP_CONTENTION" pos="31" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0x00008" name="FIFO_STATUS">
+ <bitfield name="VIDEO_MDP_FIFO_OVERFLOW" pos="0" type="boolean"/>
+ <bitfield name="VIDEO_MDP_FIFO_UNDERFLOW" pos="3" type="boolean"/>
+ <bitfield name="CMD_MDP_FIFO_UNDERFLOW" pos="7" type="boolean"/>
+ <bitfield name="CMD_DMA_FIFO_RD_WATERMARK_REACH" pos="8" type="boolean"/>
+ <bitfield name="CMD_DMA_FIFO_WR_WATERMARK_REACH" pos="9" type="boolean"/>
+ <bitfield name="CMD_DMA_FIFO_UNDERFLOW" pos="10" type="boolean"/>
+ <bitfield name="DLN0_LP_FIFO_EMPTY" pos="12" type="boolean"/>
+ <bitfield name="DLN0_LP_FIFO_FULL" pos="13" type="boolean"/>
+ <bitfield name="DLN0_LP_FIFO_OVERFLOW" pos="14" type="boolean"/>
+ <bitfield name="DLN0_HS_FIFO_EMPTY" pos="16" type="boolean"/>
+ <bitfield name="DLN0_HS_FIFO_FULL" pos="17" type="boolean"/>
+ <bitfield name="DLN0_HS_FIFO_OVERFLOW" pos="18" type="boolean"/>
+ <bitfield name="DLN0_HS_FIFO_UNDERFLOW" pos="19" type="boolean"/>
+ <bitfield name="DLN1_HS_FIFO_EMPTY" pos="20" type="boolean"/>
+ <bitfield name="DLN1_HS_FIFO_FULL" pos="21" type="boolean"/>
+ <bitfield name="DLN1_HS_FIFO_OVERFLOW" pos="22" type="boolean"/>
+ <bitfield name="DLN1_HS_FIFO_UNDERFLOW" pos="23" type="boolean"/>
+ <bitfield name="DLN2_HS_FIFO_EMPTY" pos="24" type="boolean"/>
+ <bitfield name="DLN2_HS_FIFO_FULL" pos="25" type="boolean"/>
+ <bitfield name="DLN2_HS_FIFO_OVERFLOW" pos="26" type="boolean"/>
+ <bitfield name="DLN2_HS_FIFO_UNDERFLOW" pos="27" type="boolean"/>
+ <bitfield name="DLN3_HS_FIFO_EMPTY" pos="28" type="boolean"/>
+ <bitfield name="DLN3_HS_FIFO_FULL" pos="29" type="boolean"/>
+ <bitfield name="DLN3_HS_FIFO_OVERFLOW" pos="30" type="boolean"/>
+ <bitfield name="DLN3_HS_FIFO_UNDERFLOW" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0000c" name="VID_CFG0">
+ <bitfield name="VIRT_CHANNEL" low="0" high="1" type="uint"/> <!-- always zero? -->
+ <bitfield name="DST_FORMAT" low="4" high="5" type="dsi_vid_dst_format"/>
+ <bitfield name="TRAFFIC_MODE" low="8" high="9" type="dsi_traffic_mode"/>
+ <bitfield name="BLLP_POWER_STOP" pos="12" type="boolean"/>
+ <bitfield name="EOF_BLLP_POWER_STOP" pos="15" type="boolean"/>
+ <bitfield name="HSA_POWER_STOP" pos="16" type="boolean"/>
+ <bitfield name="HBP_POWER_STOP" pos="20" type="boolean"/>
+ <bitfield name="HFP_POWER_STOP" pos="24" type="boolean"/>
+ <bitfield name="DATABUS_WIDEN" pos="25" type="boolean"/>
+ <bitfield name="PULSE_MODE_HSA_HE" pos="28" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0001c" name="VID_CFG1">
+ <bitfield name="R_SEL" pos="0" type="boolean"/>
+ <bitfield name="G_SEL" pos="4" type="boolean"/>
+ <bitfield name="B_SEL" pos="8" type="boolean"/>
+ <bitfield name="RGB_SWAP" low="12" high="14" type="dsi_rgb_swap"/>
+ </reg32>
+ <reg32 offset="0x00020" name="ACTIVE_H">
+ <bitfield name="START" low="0" high="11" type="uint"/>
+ <bitfield name="END" low="16" high="27" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00024" name="ACTIVE_V">
+ <bitfield name="START" low="0" high="11" type="uint"/>
+ <bitfield name="END" low="16" high="27" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00028" name="TOTAL">
+ <bitfield name="H_TOTAL" low="0" high="11" type="uint"/>
+ <bitfield name="V_TOTAL" low="16" high="27" type="uint"/>
+ </reg32>
+ <reg32 offset="0x0002c" name="ACTIVE_HSYNC">
+ <bitfield name="START" low="0" high="11" type="uint"/>
+ <bitfield name="END" low="16" high="27" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00030" name="ACTIVE_VSYNC_HPOS">
+ <bitfield name="START" low="0" high="11" type="uint"/>
+ <bitfield name="END" low="16" high="27" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00034" name="ACTIVE_VSYNC_VPOS">
+ <bitfield name="START" low="0" high="11" type="uint"/>
+ <bitfield name="END" low="16" high="27" type="uint"/>
+ </reg32>
+
+ <reg32 offset="0x00038" name="CMD_DMA_CTRL">
+ <bitfield name="BROADCAST_EN" pos="31" type="boolean"/>
+ <bitfield name="FROM_FRAME_BUFFER" pos="28" type="boolean"/>
+ <bitfield name="LOW_POWER" pos="26" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0003c" name="CMD_CFG0">
+ <bitfield name="DST_FORMAT" low="0" high="3" type="dsi_cmd_dst_format"/>
+ <bitfield name="R_SEL" pos="4" type="boolean"/>
+ <bitfield name="G_SEL" pos="8" type="boolean"/>
+ <bitfield name="B_SEL" pos="12" type="boolean"/>
+ <bitfield name="INTERLEAVE_MAX" low="20" high="23" type="uint"/>
+ <bitfield name="RGB_SWAP" low="16" high="18" type="dsi_rgb_swap"/>
+ </reg32>
+ <reg32 offset="0x00040" name="CMD_CFG1">
+ <bitfield name="WR_MEM_START" low="0" high="7" type="uint"/>
+ <bitfield name="WR_MEM_CONTINUE" low="8" high="15" type="uint"/>
+ <bitfield name="INSERT_DCS_COMMAND" pos="16" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00044" name="DMA_BASE"/>
+ <reg32 offset="0x00048" name="DMA_LEN"/>
+ <reg32 offset="0x00054" name="CMD_MDP_STREAM0_CTRL">
+ <bitfield name="DATA_TYPE" low="0" high="5" type="uint"/>
+ <bitfield name="VIRTUAL_CHANNEL" low="8" high="9" type="uint"/>
+ <bitfield name="WORD_COUNT" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00058" name="CMD_MDP_STREAM0_TOTAL">
+ <bitfield name="H_TOTAL" low="0" high="11" type="uint"/>
+ <bitfield name="V_TOTAL" low="16" high="27" type="uint"/>
+ </reg32>
+ <reg32 offset="0x0005c" name="CMD_MDP_STREAM1_CTRL">
+ <bitfield name="DATA_TYPE" low="0" high="5" type="uint"/>
+ <bitfield name="VIRTUAL_CHANNEL" low="8" high="9" type="uint"/>
+ <bitfield name="WORD_COUNT" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00060" name="CMD_MDP_STREAM1_TOTAL">
+ <bitfield name="H_TOTAL" low="0" high="15" type="uint"/>
+ <bitfield name="V_TOTAL" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00064" name="ACK_ERR_STATUS"/>
+ <array offset="0x00068" name="RDBK" length="4" stride="4">
+ <reg32 offset="0x0" name="DATA"/>
+ </array>
+ <reg32 offset="0x00080" name="TRIG_CTRL">
+ <bitfield name="DMA_TRIGGER" low="0" high="2" type="dsi_cmd_trigger"/>
+ <bitfield name="MDP_TRIGGER" low="4" high="6" type="dsi_cmd_trigger"/>
+ <bitfield name="STREAM" low="8" high="9" type="uint"/>
+ <bitfield name="BLOCK_DMA_WITHIN_FRAME" pos="12" type="boolean"/>
+ <bitfield name="TE" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0008c" name="TRIG_DMA"/>
+ <reg32 offset="0x000b0" name="DLN0_PHY_ERR">
+ <bitfield name="DLN0_ERR_ESC" pos="0" type="boolean"/>
+ <bitfield name="DLN0_ERR_SYNC_ESC" pos="4" type="boolean"/>
+ <bitfield name="DLN0_ERR_CONTROL" pos="8" type="boolean"/>
+ <bitfield name="DLN0_ERR_CONTENTION_LP0" pos="12" type="boolean"/>
+ <bitfield name="DLN0_ERR_CONTENTION_LP1" pos="16" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x000b4" name="LP_TIMER_CTRL">
+ <bitfield name="LP_RX_TO" low="0" high="15" type="uint"/>
+ <bitfield name="BTA_TO" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x000b8" name="HS_TIMER_CTRL">
+ <bitfield name="HS_TX_TO" low="0" high="15" type="uint"/>
+ <bitfield name="TIMER_RESOLUTION" low="16" high="19" type="uint"/>
+ <bitfield name="HS_TX_TO_STOP_EN" pos="28" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x000bc" name="TIMEOUT_STATUS"/>
+ <reg32 offset="0x000c0" name="CLKOUT_TIMING_CTRL">
+ <bitfield name="T_CLK_PRE" low="0" high="5" type="uint"/>
+ <bitfield name="T_CLK_POST" low="8" high="13" type="uint"/>
+ </reg32>
+ <reg32 offset="0x000c8" name="EOT_PACKET_CTRL">
+ <bitfield name="TX_EOT_APPEND" pos="0" type="boolean"/>
+ <bitfield name="RX_EOT_IGNORE" pos="4" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x000a4" name="LANE_STATUS">
+ <bitfield name="DLN0_STOPSTATE" pos="0" type="boolean"/>
+ <bitfield name="DLN1_STOPSTATE" pos="1" type="boolean"/>
+ <bitfield name="DLN2_STOPSTATE" pos="2" type="boolean"/>
+ <bitfield name="DLN3_STOPSTATE" pos="3" type="boolean"/>
+ <bitfield name="CLKLN_STOPSTATE" pos="4" type="boolean"/>
+ <bitfield name="DLN0_ULPS_ACTIVE_NOT" pos="8" type="boolean"/>
+ <bitfield name="DLN1_ULPS_ACTIVE_NOT" pos="9" type="boolean"/>
+ <bitfield name="DLN2_ULPS_ACTIVE_NOT" pos="10" type="boolean"/>
+ <bitfield name="DLN3_ULPS_ACTIVE_NOT" pos="11" type="boolean"/>
+ <bitfield name="CLKLN_ULPS_ACTIVE_NOT" pos="12" type="boolean"/>
+ <bitfield name="DLN0_DIRECTION" pos="16" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x000a8" name="LANE_CTRL">
+ <bitfield name="HS_REQ_SEL_PHY" pos="24" type="boolean"/>
+ <bitfield name="CLKLN_HS_FORCE_REQUEST" pos="28" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x000ac" name="LANE_SWAP_CTRL">
+ <bitfield name="DLN_SWAP_SEL" low="0" high="2" type="dsi_lane_swap"/>
+ </reg32>
+ <reg32 offset="0x00108" name="ERR_INT_MASK0"/>
+ <reg32 offset="0x0010c" name="INTR_CTRL" type="DSI_IRQ"/>
+ <reg32 offset="0x00114" name="RESET"/>
+ <reg32 offset="0x00118" name="CLK_CTRL">
+ <bitfield name="AHBS_HCLK_ON" pos="0" type="boolean"/>
+ <bitfield name="AHBM_SCLK_ON" pos="1" type="boolean"/>
+ <bitfield name="PCLK_ON" pos="2" type="boolean"/>
+ <bitfield name="DSICLK_ON" pos="3" type="boolean"/>
+ <bitfield name="BYTECLK_ON" pos="4" type="boolean"/>
+ <bitfield name="ESCCLK_ON" pos="5" type="boolean"/>
+ <bitfield name="FORCE_ON_DYN_AHBM_HCLK" pos="9" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0011c" name="CLK_STATUS">
+ <bitfield name="DSI_AON_AHBM_HCLK_ACTIVE" pos="0" type="boolean"/>
+ <bitfield name="DSI_DYN_AHBM_HCLK_ACTIVE" pos="1" type="boolean"/>
+ <bitfield name="DSI_AON_AHBS_HCLK_ACTIVE" pos="2" type="boolean"/>
+ <bitfield name="DSI_DYN_AHBS_HCLK_ACTIVE" pos="3" type="boolean"/>
+ <bitfield name="DSI_AON_DSICLK_ACTIVE" pos="4" type="boolean"/>
+ <bitfield name="DSI_DYN_DSICLK_ACTIVE" pos="5" type="boolean"/>
+ <bitfield name="DSI_AON_BYTECLK_ACTIVE" pos="6" type="boolean"/>
+ <bitfield name="DSI_DYN_BYTECLK_ACTIVE" pos="7" type="boolean"/>
+ <bitfield name="DSI_AON_ESCCLK_ACTIVE" pos="8" type="boolean"/>
+ <bitfield name="DSI_AON_PCLK_ACTIVE" pos="9" type="boolean"/>
+ <bitfield name="DSI_DYN_PCLK_ACTIVE" pos="10" type="boolean"/>
+ <bitfield name="DSI_DYN_CMD_PCLK_ACTIVE" pos="12" type="boolean"/>
+ <bitfield name="DSI_CMD_PCLK_ACTIVE" pos="13" type="boolean"/>
+ <bitfield name="DSI_VID_PCLK_ACTIVE" pos="14" type="boolean"/>
+ <bitfield name="DSI_CAM_BIST_PCLK_ACT" pos="15" type="boolean"/>
+ <bitfield name="PLL_UNLOCKED" pos="16" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00128" name="PHY_RESET">
+ <bitfield name="RESET" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00160" name="TEST_PATTERN_GEN_VIDEO_INIT_VAL"/>
+ <reg32 offset="0x00198" name="TPG_MAIN_CONTROL">
+ <bitfield name="CHECKERED_RECTANGLE_PATTERN" pos="8" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x001a0" name="TPG_VIDEO_CONFIG">
+ <bitfield name="BPP" low="0" high="1" type="video_config_bpp"/>
+ <bitfield name="RGB" pos="2" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00158" name="TEST_PATTERN_GEN_CTRL">
+ <bitfield name="CMD_DMA_PATTERN_SEL" low="16" high="17" type="cmd_dma_pattern_sel"/>
+ <bitfield name="CMD_MDP_STREAM0_PATTERN_SEL" low="8" high="9" type="cmd_mdp_stream0_pattern_sel"/>
+ <bitfield name="VIDEO_PATTERN_SEL" low="4" high="5" type="video_pattern_sel"/>
+ <bitfield name="TPG_DMA_FIFO_MODE" pos="2" type="boolean"/>
+ <bitfield name="CMD_DMA_TPG_EN" pos="1" type="boolean"/>
+ <bitfield name="EN" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00168" name="TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0"/>
+ <reg32 offset="0x00180" name="TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER">
+ <bitfield name="SW_TRIGGER" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0019c" name="TPG_MAIN_CONTROL2">
+ <bitfield name="CMD_MDP0_CHECKERED_RECTANGLE_PATTERN" pos="7" type="boolean"/>
+ <bitfield name="CMD_MDP1_CHECKERED_RECTANGLE_PATTERN" pos="16" type="boolean"/>
+ <bitfield name="CMD_MDP2_CHECKERED_RECTANGLE_PATTERN" pos="25" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0017c" name="T_CLK_PRE_EXTEND">
+ <bitfield name="INC_BY_2_BYTECLK" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x001b4" name="CMD_MODE_MDP_CTRL2">
+ <bitfield name="DST_FORMAT2" low="0" high="3" type="dsi_cmd_dst_format"/>
+ <bitfield name="R_SEL" pos="4" type="boolean"/>
+ <bitfield name="G_SEL" pos="5" type="boolean"/>
+ <bitfield name="B_SEL" pos="6" type="boolean"/>
+ <bitfield name="BYTE_MSB_LSB_FLIP" pos="7" type="boolean"/>
+ <bitfield name="RGB_SWAP" low="8" high="10" type="dsi_rgb_swap"/>
+ <bitfield name="INPUT_RGB_SWAP" low="12" high="14" type="dsi_rgb_swap"/>
+ <bitfield name="BURST_MODE" pos="16" type="boolean"/>
+ <bitfield name="DATABUS_WIDEN" pos="20" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x001b8" name="CMD_MODE_MDP_STREAM2_CTRL">
+ <bitfield name="DATA_TYPE" low="0" high="5" type="uint"/>
+ <bitfield name="VIRTUAL_CHANNEL" low="8" high="9" type="uint"/>
+ <bitfield name="WORD_COUNT" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x001d0" name="RDBK_DATA_CTRL">
+ <bitfield name="COUNT" low="16" high="23" type="uint"/>
+ <bitfield name="CLR" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x001f0" name="VERSION">
+ <bitfield name="MAJOR" low="24" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x002d4" name="CPHY_MODE_CTRL"/>
+ <reg32 offset="0x0029c" name="VIDEO_COMPRESSION_MODE_CTRL">
+ <bitfield name="WC" low="16" high="31" type="uint"/>
+ <bitfield name="DATATYPE" low="8" high="13" type="uint"/>
+ <bitfield name="PKT_PER_LINE" low="6" high="7" type="uint"/>
+ <bitfield name="EOL_BYTE_NUM" low="4" high="5" type="uint"/>
+ <bitfield name="EN" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x002a4" name="COMMAND_COMPRESSION_MODE_CTRL">
+ <bitfield name="STREAM1_DATATYPE" low="24" high="29" type="uint"/>
+ <bitfield name="STREAM1_PKT_PER_LINE" low="22" high="23" type="uint"/>
+ <bitfield name="STREAM1_EOL_BYTE_NUM" low="20" high="21" type="uint"/>
+ <bitfield name="STREAM1_EN" pos="16" type="boolean"/>
+ <bitfield name="STREAM0_DATATYPE" low="8" high="13" type="uint"/>
+ <bitfield name="STREAM0_PKT_PER_LINE" low="6" high="7" type="uint"/>
+ <bitfield name="STREAM0_EOL_BYTE_NUM" low="4" high="5" type="uint"/>
+ <bitfield name="STREAM0_EN" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x002a8" name="COMMAND_COMPRESSION_MODE_CTRL2">
+ <bitfield name="STREAM1_SLICE_WIDTH" low="16" high="31" type="uint"/>
+ <bitfield name="STREAM0_SLICE_WIDTH" low="0" high="15" type="uint"/>
+ </reg32>
+
+</domain>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_10nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_10nm.xml
new file mode 100644
index 000000000000..874c3db3e126
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_10nm.xml
@@ -0,0 +1,102 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+
+<domain name="DSI_10nm_PHY_CMN" width="32">
+ <reg32 offset="0x00000" name="REVISION_ID0"/>
+ <reg32 offset="0x00004" name="REVISION_ID1"/>
+ <reg32 offset="0x00008" name="REVISION_ID2"/>
+ <reg32 offset="0x0000c" name="REVISION_ID3"/>
+ <reg32 offset="0x00010" name="CLK_CFG0"/>
+ <reg32 offset="0x00014" name="CLK_CFG1"/>
+ <reg32 offset="0x00018" name="GLBL_CTRL"/>
+ <reg32 offset="0x0001c" name="RBUF_CTRL"/>
+ <reg32 offset="0x00020" name="VREG_CTRL"/>
+ <reg32 offset="0x00024" name="CTRL_0"/>
+ <reg32 offset="0x00028" name="CTRL_1"/>
+ <reg32 offset="0x0002c" name="CTRL_2"/>
+ <reg32 offset="0x00030" name="LANE_CFG0"/>
+ <reg32 offset="0x00034" name="LANE_CFG1"/>
+ <reg32 offset="0x00038" name="PLL_CNTRL"/>
+ <reg32 offset="0x00098" name="LANE_CTRL0"/>
+ <reg32 offset="0x0009c" name="LANE_CTRL1"/>
+ <reg32 offset="0x000a0" name="LANE_CTRL2"/>
+ <reg32 offset="0x000a4" name="LANE_CTRL3"/>
+ <reg32 offset="0x000a8" name="LANE_CTRL4"/>
+ <reg32 offset="0x000ac" name="TIMING_CTRL_0"/>
+ <reg32 offset="0x000b0" name="TIMING_CTRL_1"/>
+ <reg32 offset="0x000b4" name="TIMING_CTRL_2"/>
+ <reg32 offset="0x000b8" name="TIMING_CTRL_3"/>
+ <reg32 offset="0x000bc" name="TIMING_CTRL_4"/>
+ <reg32 offset="0x000c0" name="TIMING_CTRL_5"/>
+ <reg32 offset="0x000c4" name="TIMING_CTRL_6"/>
+ <reg32 offset="0x000c8" name="TIMING_CTRL_7"/>
+ <reg32 offset="0x000cc" name="TIMING_CTRL_8"/>
+ <reg32 offset="0x000d0" name="TIMING_CTRL_9"/>
+ <reg32 offset="0x000d4" name="TIMING_CTRL_10"/>
+ <reg32 offset="0x000d8" name="TIMING_CTRL_11"/>
+ <reg32 offset="0x000ec" name="PHY_STATUS"/>
+ <reg32 offset="0x000f4" name="LANE_STATUS0"/>
+ <reg32 offset="0x000f8" name="LANE_STATUS1"/>
+</domain>
+
+<domain name="DSI_10nm_PHY" width="32">
+ <array offset="0x00000" name="LN" length="5" stride="0x80">
+ <reg32 offset="0x00" name="CFG0"/>
+ <reg32 offset="0x04" name="CFG1"/>
+ <reg32 offset="0x08" name="CFG2"/>
+ <reg32 offset="0x0c" name="CFG3"/>
+ <reg32 offset="0x10" name="TEST_DATAPATH"/>
+ <reg32 offset="0x14" name="PIN_SWAP"/>
+ <reg32 offset="0x18" name="HSTX_STR_CTRL"/>
+ <reg32 offset="0x1c" name="OFFSET_TOP_CTRL"/>
+ <reg32 offset="0x20" name="OFFSET_BOT_CTRL"/>
+ <reg32 offset="0x24" name="LPTX_STR_CTRL"/>
+ <reg32 offset="0x28" name="LPRX_CTRL"/>
+ <reg32 offset="0x2c" name="TX_DCTRL"/>
+ </array>
+</domain>
+
+<domain name="DSI_10nm_PHY_PLL" width="32">
+ <reg32 offset="0x0000" name="ANALOG_CONTROLS_ONE"/>
+ <reg32 offset="0x0004" name="ANALOG_CONTROLS_TWO"/>
+ <reg32 offset="0x0010" name="ANALOG_CONTROLS_THREE"/>
+ <reg32 offset="0x001c" name="DSM_DIVIDER"/>
+ <reg32 offset="0x0020" name="FEEDBACK_DIVIDER"/>
+ <reg32 offset="0x0024" name="SYSTEM_MUXES"/>
+ <reg32 offset="0x002c" name="CMODE"/>
+ <reg32 offset="0x0030" name="CALIBRATION_SETTINGS"/>
+ <reg32 offset="0x0054" name="BAND_SEL_CAL_SETTINGS_THREE"/>
+ <reg32 offset="0x0064" name="FREQ_DETECT_SETTINGS_ONE"/>
+ <reg32 offset="0x007c" name="PFILT"/>
+ <reg32 offset="0x0080" name="IFILT"/>
+ <reg32 offset="0x0094" name="OUTDIV"/>
+ <reg32 offset="0x00a4" name="CORE_OVERRIDE"/>
+ <reg32 offset="0x00a8" name="CORE_INPUT_OVERRIDE"/>
+ <reg32 offset="0x00b4" name="PLL_DIGITAL_TIMERS_TWO"/>
+ <reg32 offset="0x00cc" name="DECIMAL_DIV_START_1"/>
+ <reg32 offset="0x00d0" name="FRAC_DIV_START_LOW_1"/>
+ <reg32 offset="0x00d4" name="FRAC_DIV_START_MID_1"/>
+ <reg32 offset="0x00d8" name="FRAC_DIV_START_HIGH_1"/>
+ <reg32 offset="0x010c" name="SSC_STEPSIZE_LOW_1"/>
+ <reg32 offset="0x0110" name="SSC_STEPSIZE_HIGH_1"/>
+ <reg32 offset="0x0114" name="SSC_DIV_PER_LOW_1"/>
+ <reg32 offset="0x0118" name="SSC_DIV_PER_HIGH_1"/>
+ <reg32 offset="0x011c" name="SSC_DIV_ADJPER_LOW_1"/>
+ <reg32 offset="0x0120" name="SSC_DIV_ADJPER_HIGH_1"/>
+ <reg32 offset="0x013c" name="SSC_CONTROL"/>
+ <reg32 offset="0x0140" name="PLL_OUTDIV_RATE"/>
+ <reg32 offset="0x0144" name="PLL_LOCKDET_RATE_1"/>
+ <reg32 offset="0x014c" name="PLL_PROP_GAIN_RATE_1"/>
+ <reg32 offset="0x0154" name="PLL_BAND_SET_RATE_1"/>
+ <reg32 offset="0x015c" name="PLL_INT_GAIN_IFILT_BAND_1"/>
+ <reg32 offset="0x0164" name="PLL_FL_INT_GAIN_PFILT_BAND_1"/>
+ <reg32 offset="0x0180" name="PLL_LOCK_OVERRIDE"/>
+ <reg32 offset="0x0184" name="PLL_LOCK_DELAY"/>
+ <reg32 offset="0x018c" name="CLOCK_INVERTERS"/>
+ <reg32 offset="0x01a0" name="COMMON_STATUS_ONE"/>
+</domain>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_14nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_14nm.xml
new file mode 100644
index 000000000000..314b74489d49
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_14nm.xml
@@ -0,0 +1,135 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+
+<domain name="DSI_14nm_PHY_CMN" width="32">
+ <reg32 offset="0x00000" name="REVISION_ID0"/>
+ <reg32 offset="0x00004" name="REVISION_ID1"/>
+ <reg32 offset="0x00008" name="REVISION_ID2"/>
+ <reg32 offset="0x0000c" name="REVISION_ID3"/>
+ <reg32 offset="0x00010" name="CLK_CFG0">
+ <bitfield name="DIV_CTRL_3_0" low="4" high="7" type="uint"/>
+ <bitfield name="DIV_CTRL_7_4" low="4" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00014" name="CLK_CFG1">
+ <bitfield name="DSICLK_SEL" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00018" name="GLBL_TEST_CTRL">
+ <bitfield name="BITCLK_HS_SEL" pos="2" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0001C" name="CTRL_0"/>
+ <reg32 offset="0x00020" name="CTRL_1">
+ </reg32>
+ <reg32 offset="0x00024" name="HW_TRIGGER"/>
+ <reg32 offset="0x00028" name="SW_CFG0"/>
+ <reg32 offset="0x0002C" name="SW_CFG1"/>
+ <reg32 offset="0x00030" name="SW_CFG2"/>
+ <reg32 offset="0x00034" name="HW_CFG0"/>
+ <reg32 offset="0x00038" name="HW_CFG1"/>
+ <reg32 offset="0x0003C" name="HW_CFG2"/>
+ <reg32 offset="0x00040" name="HW_CFG3"/>
+ <reg32 offset="0x00044" name="HW_CFG4"/>
+ <reg32 offset="0x00048" name="PLL_CNTRL">
+ <bitfield name="PLL_START" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0004C" name="LDO_CNTRL">
+ <bitfield name="VREG_CTRL" low="0" high="5" type="uint"/>
+ </reg32>
+</domain>
+
+<domain name="DSI_14nm_PHY" width="32">
+ <array offset="0x00000" name="LN" length="5" stride="0x80">
+ <reg32 offset="0x00" name="CFG0">
+ <bitfield name="PREPARE_DLY" low="6" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x04" name="CFG1">
+ <bitfield name="HALFBYTECLK_EN" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x08" name="CFG2"/>
+ <reg32 offset="0x0c" name="CFG3"/>
+ <reg32 offset="0x10" name="TEST_DATAPATH"/>
+ <reg32 offset="0x14" name="TEST_STR"/>
+ <reg32 offset="0x18" name="TIMING_CTRL_4">
+ <bitfield name="HS_EXIT" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x1c" name="TIMING_CTRL_5">
+ <bitfield name="HS_ZERO" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x20" name="TIMING_CTRL_6">
+ <bitfield name="HS_PREPARE" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x24" name="TIMING_CTRL_7">
+ <bitfield name="HS_TRAIL" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x28" name="TIMING_CTRL_8">
+ <bitfield name="HS_RQST" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x2c" name="TIMING_CTRL_9">
+ <bitfield name="TA_GO" low="0" high="2" type="uint"/>
+ <bitfield name="TA_SURE" low="4" high="6" type="uint"/>
+ </reg32>
+ <reg32 offset="0x30" name="TIMING_CTRL_10">
+ <bitfield name="TA_GET" low="0" high="2" type="uint"/>
+ </reg32>
+ <reg32 offset="0x34" name="TIMING_CTRL_11">
+ <bitfield name="TRIG3_CMD" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x38" name="STRENGTH_CTRL_0"/>
+ <reg32 offset="0x3c" name="STRENGTH_CTRL_1"/>
+ <reg32 offset="0x64" name="VREG_CNTRL"/>
+ </array>
+</domain>
+
+<domain name="DSI_14nm_PHY_PLL" width="32">
+ <reg32 offset="0x000" name="IE_TRIM"/>
+ <reg32 offset="0x004" name="IP_TRIM"/>
+ <reg32 offset="0x010" name="IPTAT_TRIM"/>
+ <reg32 offset="0x01c" name="CLKBUFLR_EN"/>
+ <reg32 offset="0x028" name="SYSCLK_EN_RESET"/>
+ <reg32 offset="0x02c" name="RESETSM_CNTRL"/>
+ <reg32 offset="0x030" name="RESETSM_CNTRL2"/>
+ <reg32 offset="0x034" name="RESETSM_CNTRL3"/>
+ <reg32 offset="0x038" name="RESETSM_CNTRL4"/>
+ <reg32 offset="0x03c" name="RESETSM_CNTRL5"/>
+ <reg32 offset="0x040" name="KVCO_DIV_REF1"/>
+ <reg32 offset="0x044" name="KVCO_DIV_REF2"/>
+ <reg32 offset="0x048" name="KVCO_COUNT1"/>
+ <reg32 offset="0x04c" name="KVCO_COUNT2"/>
+ <reg32 offset="0x05c" name="VREF_CFG1"/>
+ <reg32 offset="0x058" name="KVCO_CODE"/>
+ <reg32 offset="0x06c" name="VCO_DIV_REF1"/>
+ <reg32 offset="0x070" name="VCO_DIV_REF2"/>
+ <reg32 offset="0x074" name="VCO_COUNT1"/>
+ <reg32 offset="0x078" name="VCO_COUNT2"/>
+ <reg32 offset="0x07c" name="PLLLOCK_CMP1"/>
+ <reg32 offset="0x080" name="PLLLOCK_CMP2"/>
+ <reg32 offset="0x084" name="PLLLOCK_CMP3"/>
+ <reg32 offset="0x088" name="PLLLOCK_CMP_EN"/>
+ <reg32 offset="0x08c" name="PLL_VCO_TUNE"/>
+ <reg32 offset="0x090" name="DEC_START"/>
+ <reg32 offset="0x094" name="SSC_EN_CENTER"/>
+ <reg32 offset="0x098" name="SSC_ADJ_PER1"/>
+ <reg32 offset="0x09c" name="SSC_ADJ_PER2"/>
+ <reg32 offset="0x0a0" name="SSC_PER1"/>
+ <reg32 offset="0x0a4" name="SSC_PER2"/>
+ <reg32 offset="0x0a8" name="SSC_STEP_SIZE1"/>
+ <reg32 offset="0x0ac" name="SSC_STEP_SIZE2"/>
+ <reg32 offset="0x0b4" name="DIV_FRAC_START1"/>
+ <reg32 offset="0x0b8" name="DIV_FRAC_START2"/>
+ <reg32 offset="0x0bc" name="DIV_FRAC_START3"/>
+ <reg32 offset="0x0c0" name="TXCLK_EN"/>
+ <reg32 offset="0x0c4" name="PLL_CRCTRL"/>
+ <reg32 offset="0x0cc" name="RESET_SM_READY_STATUS"/>
+ <reg32 offset="0x0e8" name="PLL_MISC1"/>
+ <reg32 offset="0x0f0" name="CP_SET_CUR"/>
+ <reg32 offset="0x0f4" name="PLL_ICPMSET"/>
+ <reg32 offset="0x0f8" name="PLL_ICPCSET"/>
+ <reg32 offset="0x0fc" name="PLL_ICP_SET"/>
+ <reg32 offset="0x100" name="PLL_LPF1"/>
+ <reg32 offset="0x104" name="PLL_LPF2_POSTDIV"/>
+ <reg32 offset="0x108" name="PLL_BANDGAP"/>
+</domain>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_20nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_20nm.xml
new file mode 100644
index 000000000000..99e9deb361b6
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_20nm.xml
@@ -0,0 +1,100 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+
+<domain name="DSI_20nm_PHY" width="32">
+ <array offset="0x00000" name="LN" length="4" stride="0x40">
+ <reg32 offset="0x00" name="CFG_0"/>
+ <reg32 offset="0x04" name="CFG_1"/>
+ <reg32 offset="0x08" name="CFG_2"/>
+ <reg32 offset="0x0c" name="CFG_3"/>
+ <reg32 offset="0x10" name="CFG_4"/>
+ <reg32 offset="0x14" name="TEST_DATAPATH"/>
+ <reg32 offset="0x18" name="DEBUG_SEL"/>
+ <reg32 offset="0x1c" name="TEST_STR_0"/>
+ <reg32 offset="0x20" name="TEST_STR_1"/>
+ </array>
+
+ <reg32 offset="0x00100" name="LNCK_CFG_0"/>
+ <reg32 offset="0x00104" name="LNCK_CFG_1"/>
+ <reg32 offset="0x00108" name="LNCK_CFG_2"/>
+ <reg32 offset="0x0010c" name="LNCK_CFG_3"/>
+ <reg32 offset="0x00110" name="LNCK_CFG_4"/>
+ <reg32 offset="0x00114" name="LNCK_TEST_DATAPATH"/>
+ <reg32 offset="0x00118" name="LNCK_DEBUG_SEL"/>
+ <reg32 offset="0x0011c" name="LNCK_TEST_STR0"/>
+ <reg32 offset="0x00120" name="LNCK_TEST_STR1"/>
+
+ <reg32 offset="0x00140" name="TIMING_CTRL_0">
+ <bitfield name="CLK_ZERO" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00144" name="TIMING_CTRL_1">
+ <bitfield name="CLK_TRAIL" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00148" name="TIMING_CTRL_2">
+ <bitfield name="CLK_PREPARE" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x0014c" name="TIMING_CTRL_3">
+ <bitfield name="CLK_ZERO_8" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00150" name="TIMING_CTRL_4">
+ <bitfield name="HS_EXIT" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00154" name="TIMING_CTRL_5">
+ <bitfield name="HS_ZERO" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00158" name="TIMING_CTRL_6">
+ <bitfield name="HS_PREPARE" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x0015c" name="TIMING_CTRL_7">
+ <bitfield name="HS_TRAIL" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00160" name="TIMING_CTRL_8">
+ <bitfield name="HS_RQST" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00164" name="TIMING_CTRL_9">
+ <bitfield name="TA_GO" low="0" high="2" type="uint"/>
+ <bitfield name="TA_SURE" low="4" high="6" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00168" name="TIMING_CTRL_10">
+ <bitfield name="TA_GET" low="0" high="2" type="uint"/>
+ </reg32>
+ <reg32 offset="0x0016c" name="TIMING_CTRL_11">
+ <bitfield name="TRIG3_CMD" low="0" high="7" type="uint"/>
+ </reg32>
+
+ <reg32 offset="0x00170" name="CTRL_0"/>
+ <reg32 offset="0x00174" name="CTRL_1"/>
+ <reg32 offset="0x00178" name="CTRL_2"/>
+ <reg32 offset="0x0017c" name="CTRL_3"/>
+ <reg32 offset="0x00180" name="CTRL_4"/>
+
+ <reg32 offset="0x00184" name="STRENGTH_0"/>
+ <reg32 offset="0x00188" name="STRENGTH_1"/>
+
+ <reg32 offset="0x001b4" name="BIST_CTRL_0"/>
+ <reg32 offset="0x001b8" name="BIST_CTRL_1"/>
+ <reg32 offset="0x001bc" name="BIST_CTRL_2"/>
+ <reg32 offset="0x001c0" name="BIST_CTRL_3"/>
+ <reg32 offset="0x001c4" name="BIST_CTRL_4"/>
+ <reg32 offset="0x001c8" name="BIST_CTRL_5"/>
+
+ <reg32 offset="0x001d4" name="GLBL_TEST_CTRL">
+ <bitfield name="BITCLK_HS_SEL" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x001dc" name="LDO_CNTRL"/>
+</domain>
+
+<domain name="DSI_20nm_PHY_REGULATOR" width="32">
+ <reg32 offset="0x00000" name="CTRL_0"/>
+ <reg32 offset="0x00004" name="CTRL_1"/>
+ <reg32 offset="0x00008" name="CTRL_2"/>
+ <reg32 offset="0x0000c" name="CTRL_3"/>
+ <reg32 offset="0x00010" name="CTRL_4"/>
+ <reg32 offset="0x00014" name="CTRL_5"/>
+ <reg32 offset="0x00018" name="CAL_PWR_CFG"/>
+</domain>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_28nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_28nm.xml
new file mode 100644
index 000000000000..81d5b96f18c4
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_28nm.xml
@@ -0,0 +1,180 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+
+<domain name="DSI_28nm_PHY" width="32">
+ <array offset="0x00000" name="LN" length="4" stride="0x40">
+ <reg32 offset="0x00" name="CFG_0"/>
+ <reg32 offset="0x04" name="CFG_1"/>
+ <reg32 offset="0x08" name="CFG_2"/>
+ <reg32 offset="0x0c" name="CFG_3"/>
+ <reg32 offset="0x10" name="CFG_4"/>
+ <reg32 offset="0x14" name="TEST_DATAPATH"/>
+ <reg32 offset="0x18" name="DEBUG_SEL"/>
+ <reg32 offset="0x1c" name="TEST_STR_0"/>
+ <reg32 offset="0x20" name="TEST_STR_1"/>
+ </array>
+
+ <reg32 offset="0x00100" name="LNCK_CFG_0"/>
+ <reg32 offset="0x00104" name="LNCK_CFG_1"/>
+ <reg32 offset="0x00108" name="LNCK_CFG_2"/>
+ <reg32 offset="0x0010c" name="LNCK_CFG_3"/>
+ <reg32 offset="0x00110" name="LNCK_CFG_4"/>
+ <reg32 offset="0x00114" name="LNCK_TEST_DATAPATH"/>
+ <reg32 offset="0x00118" name="LNCK_DEBUG_SEL"/>
+ <reg32 offset="0x0011c" name="LNCK_TEST_STR0"/>
+ <reg32 offset="0x00120" name="LNCK_TEST_STR1"/>
+
+ <reg32 offset="0x00140" name="TIMING_CTRL_0">
+ <bitfield name="CLK_ZERO" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00144" name="TIMING_CTRL_1">
+ <bitfield name="CLK_TRAIL" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00148" name="TIMING_CTRL_2">
+ <bitfield name="CLK_PREPARE" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x0014c" name="TIMING_CTRL_3">
+ <bitfield name="CLK_ZERO_8" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00150" name="TIMING_CTRL_4">
+ <bitfield name="HS_EXIT" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00154" name="TIMING_CTRL_5">
+ <bitfield name="HS_ZERO" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00158" name="TIMING_CTRL_6">
+ <bitfield name="HS_PREPARE" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x0015c" name="TIMING_CTRL_7">
+ <bitfield name="HS_TRAIL" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00160" name="TIMING_CTRL_8">
+ <bitfield name="HS_RQST" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00164" name="TIMING_CTRL_9">
+ <bitfield name="TA_GO" low="0" high="2" type="uint"/>
+ <bitfield name="TA_SURE" low="4" high="6" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00168" name="TIMING_CTRL_10">
+ <bitfield name="TA_GET" low="0" high="2" type="uint"/>
+ </reg32>
+ <reg32 offset="0x0016c" name="TIMING_CTRL_11">
+ <bitfield name="TRIG3_CMD" low="0" high="7" type="uint"/>
+ </reg32>
+
+ <reg32 offset="0x00170" name="CTRL_0"/>
+ <reg32 offset="0x00174" name="CTRL_1"/>
+ <reg32 offset="0x00178" name="CTRL_2"/>
+ <reg32 offset="0x0017c" name="CTRL_3"/>
+ <reg32 offset="0x00180" name="CTRL_4"/>
+
+ <reg32 offset="0x00184" name="STRENGTH_0"/>
+ <reg32 offset="0x00188" name="STRENGTH_1"/>
+
+ <reg32 offset="0x001b4" name="BIST_CTRL_0"/>
+ <reg32 offset="0x001b8" name="BIST_CTRL_1"/>
+ <reg32 offset="0x001bc" name="BIST_CTRL_2"/>
+ <reg32 offset="0x001c0" name="BIST_CTRL_3"/>
+ <reg32 offset="0x001c4" name="BIST_CTRL_4"/>
+ <reg32 offset="0x001c8" name="BIST_CTRL_5"/>
+
+ <reg32 offset="0x001d4" name="GLBL_TEST_CTRL">
+ <bitfield name="BITCLK_HS_SEL" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x001dc" name="LDO_CNTRL"/>
+</domain>
+
+<domain name="DSI_28nm_PHY_REGULATOR" width="32">
+ <reg32 offset="0x00000" name="CTRL_0"/>
+ <reg32 offset="0x00004" name="CTRL_1"/>
+ <reg32 offset="0x00008" name="CTRL_2"/>
+ <reg32 offset="0x0000c" name="CTRL_3"/>
+ <reg32 offset="0x00010" name="CTRL_4"/>
+ <reg32 offset="0x00014" name="CTRL_5"/>
+ <reg32 offset="0x00018" name="CAL_PWR_CFG"/>
+</domain>
+
+<domain name="DSI_28nm_PHY_PLL" width="32">
+ <reg32 offset="0x00000" name="REFCLK_CFG">
+ <bitfield name="DBLR" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00004" name="POSTDIV1_CFG"/>
+ <reg32 offset="0x00008" name="CHGPUMP_CFG"/>
+ <reg32 offset="0x0000C" name="VCOLPF_CFG"/>
+ <reg32 offset="0x00010" name="VREG_CFG">
+ <bitfield name="POSTDIV1_BYPASS_B" pos="1" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00014" name="PWRGEN_CFG"/>
+ <reg32 offset="0x00018" name="DMUX_CFG"/>
+ <reg32 offset="0x0001C" name="AMUX_CFG"/>
+ <reg32 offset="0x00020" name="GLB_CFG">
+ <bitfield name="PLL_PWRDN_B" pos="0" type="boolean"/>
+ <bitfield name="PLL_LDO_PWRDN_B" pos="1" type="boolean"/>
+ <bitfield name="PLL_PWRGEN_PWRDN_B" pos="2" type="boolean"/>
+ <bitfield name="PLL_ENABLE" pos="3" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00024" name="POSTDIV2_CFG"/>
+ <reg32 offset="0x00028" name="POSTDIV3_CFG"/>
+ <reg32 offset="0x0002C" name="LPFR_CFG"/>
+ <reg32 offset="0x00030" name="LPFC1_CFG"/>
+ <reg32 offset="0x00034" name="LPFC2_CFG"/>
+ <reg32 offset="0x00038" name="SDM_CFG0">
+ <bitfield name="BYP_DIV" low="0" high="5" type="uint"/>
+ <bitfield name="BYP" pos="6" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0003C" name="SDM_CFG1">
+ <bitfield name="DC_OFFSET" low="0" high="5" type="uint"/>
+ <bitfield name="DITHER_EN" pos="6" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00040" name="SDM_CFG2">
+ <bitfield name="FREQ_SEED_7_0" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00044" name="SDM_CFG3">
+ <bitfield name="FREQ_SEED_15_8" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00048" name="SDM_CFG4"/>
+ <reg32 offset="0x0004C" name="SSC_CFG0"/>
+ <reg32 offset="0x00050" name="SSC_CFG1"/>
+ <reg32 offset="0x00054" name="SSC_CFG2"/>
+ <reg32 offset="0x00058" name="SSC_CFG3"/>
+ <reg32 offset="0x0005C" name="LKDET_CFG0"/>
+ <reg32 offset="0x00060" name="LKDET_CFG1"/>
+ <reg32 offset="0x00064" name="LKDET_CFG2"/>
+ <reg32 offset="0x00068" name="TEST_CFG">
+ <bitfield name="PLL_SW_RESET" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0006C" name="CAL_CFG0"/>
+ <reg32 offset="0x00070" name="CAL_CFG1"/>
+ <reg32 offset="0x00074" name="CAL_CFG2"/>
+ <reg32 offset="0x00078" name="CAL_CFG3"/>
+ <reg32 offset="0x0007C" name="CAL_CFG4"/>
+ <reg32 offset="0x00080" name="CAL_CFG5"/>
+ <reg32 offset="0x00084" name="CAL_CFG6"/>
+ <reg32 offset="0x00088" name="CAL_CFG7"/>
+ <reg32 offset="0x0008C" name="CAL_CFG8"/>
+ <reg32 offset="0x00090" name="CAL_CFG9"/>
+ <reg32 offset="0x00094" name="CAL_CFG10"/>
+ <reg32 offset="0x00098" name="CAL_CFG11"/>
+ <reg32 offset="0x0009C" name="EFUSE_CFG"/>
+ <reg32 offset="0x000A0" name="DEBUG_BUS_SEL"/>
+ <reg32 offset="0x000A4" name="CTRL_42"/>
+ <reg32 offset="0x000A8" name="CTRL_43"/>
+ <reg32 offset="0x000AC" name="CTRL_44"/>
+ <reg32 offset="0x000B0" name="CTRL_45"/>
+ <reg32 offset="0x000B4" name="CTRL_46"/>
+ <reg32 offset="0x000B8" name="CTRL_47"/>
+ <reg32 offset="0x000BC" name="CTRL_48"/>
+ <reg32 offset="0x000C0" name="STATUS">
+ <bitfield name="PLL_RDY" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x000C4" name="DEBUG_BUS0"/>
+ <reg32 offset="0x000C8" name="DEBUG_BUS1"/>
+ <reg32 offset="0x000CC" name="DEBUG_BUS2"/>
+ <reg32 offset="0x000D0" name="DEBUG_BUS3"/>
+ <reg32 offset="0x000D4" name="CTRL_54"/>
+</domain>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_28nm_8960.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_28nm_8960.xml
new file mode 100644
index 000000000000..4c4de4dda640
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_28nm_8960.xml
@@ -0,0 +1,134 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+
+<domain name="DSI_28nm_8960_PHY" width="32">
+
+ <array offset="0x00000" name="LN" length="4" stride="0x40">
+ <reg32 offset="0x00" name="CFG_0"/>
+ <reg32 offset="0x04" name="CFG_1"/>
+ <reg32 offset="0x08" name="CFG_2"/>
+ <reg32 offset="0x0c" name="TEST_DATAPATH"/>
+ <reg32 offset="0x14" name="TEST_STR_0"/>
+ <reg32 offset="0x18" name="TEST_STR_1"/>
+ </array>
+
+ <reg32 offset="0x00100" name="LNCK_CFG_0"/>
+ <reg32 offset="0x00104" name="LNCK_CFG_1"/>
+ <reg32 offset="0x00108" name="LNCK_CFG_2"/>
+
+ <reg32 offset="0x0010c" name="LNCK_TEST_DATAPATH"/>
+ <reg32 offset="0x00114" name="LNCK_TEST_STR0"/>
+ <reg32 offset="0x00118" name="LNCK_TEST_STR1"/>
+
+ <reg32 offset="0x00140" name="TIMING_CTRL_0">
+ <bitfield name="CLK_ZERO" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00144" name="TIMING_CTRL_1">
+ <bitfield name="CLK_TRAIL" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00148" name="TIMING_CTRL_2">
+ <bitfield name="CLK_PREPARE" low="0" high="7" type="uint"/>
+ </reg32>
+
+ <reg32 offset="0x0014c" name="TIMING_CTRL_3"/>
+
+ <reg32 offset="0x00150" name="TIMING_CTRL_4">
+ <bitfield name="HS_EXIT" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00154" name="TIMING_CTRL_5">
+ <bitfield name="HS_ZERO" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00158" name="TIMING_CTRL_6">
+ <bitfield name="HS_PREPARE" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x0015c" name="TIMING_CTRL_7">
+ <bitfield name="HS_TRAIL" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00160" name="TIMING_CTRL_8">
+ <bitfield name="HS_RQST" low="0" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00164" name="TIMING_CTRL_9">
+ <bitfield name="TA_GO" low="0" high="2" type="uint"/>
+ <bitfield name="TA_SURE" low="4" high="6" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00168" name="TIMING_CTRL_10">
+ <bitfield name="TA_GET" low="0" high="2" type="uint"/>
+ </reg32>
+ <reg32 offset="0x0016c" name="TIMING_CTRL_11">
+ <bitfield name="TRIG3_CMD" low="0" high="7" type="uint"/>
+ </reg32>
+
+ <reg32 offset="0x00170" name="CTRL_0"/>
+ <reg32 offset="0x00174" name="CTRL_1"/>
+ <reg32 offset="0x00178" name="CTRL_2"/>
+ <reg32 offset="0x0017c" name="CTRL_3"/>
+
+ <reg32 offset="0x00180" name="STRENGTH_0"/>
+ <reg32 offset="0x00184" name="STRENGTH_1"/>
+ <reg32 offset="0x00188" name="STRENGTH_2"/>
+
+ <reg32 offset="0x0018c" name="BIST_CTRL_0"/>
+ <reg32 offset="0x00190" name="BIST_CTRL_1"/>
+ <reg32 offset="0x00194" name="BIST_CTRL_2"/>
+ <reg32 offset="0x00198" name="BIST_CTRL_3"/>
+ <reg32 offset="0x0019c" name="BIST_CTRL_4"/>
+
+ <reg32 offset="0x001b0" name="LDO_CTRL"/>
+</domain>
+
+<domain name="DSI_28nm_8960_PHY_MISC" width="32">
+ <reg32 offset="0x00000" name="REGULATOR_CTRL_0"/>
+ <reg32 offset="0x00004" name="REGULATOR_CTRL_1"/>
+ <reg32 offset="0x00008" name="REGULATOR_CTRL_2"/>
+ <reg32 offset="0x0000c" name="REGULATOR_CTRL_3"/>
+ <reg32 offset="0x00010" name="REGULATOR_CTRL_4"/>
+ <reg32 offset="0x00014" name="REGULATOR_CTRL_5"/>
+ <reg32 offset="0x00018" name="REGULATOR_CAL_PWR_CFG"/>
+ <reg32 offset="0x00028" name="CAL_HW_TRIGGER"/>
+ <reg32 offset="0x0002c" name="CAL_SW_CFG_0"/>
+ <reg32 offset="0x00030" name="CAL_SW_CFG_1"/>
+ <reg32 offset="0x00034" name="CAL_SW_CFG_2"/>
+ <reg32 offset="0x00038" name="CAL_HW_CFG_0"/>
+ <reg32 offset="0x0003c" name="CAL_HW_CFG_1"/>
+ <reg32 offset="0x00040" name="CAL_HW_CFG_2"/>
+ <reg32 offset="0x00044" name="CAL_HW_CFG_3"/>
+ <reg32 offset="0x00048" name="CAL_HW_CFG_4"/>
+ <reg32 offset="0x00050" name="CAL_STATUS">
+ <bitfield name="CAL_BUSY" pos="4" type="boolean"/>
+ </reg32>
+</domain>
+
+<domain name="DSI_28nm_8960_PHY_PLL" width="32">
+ <reg32 offset="0x00000" name="CTRL_0">
+ <bitfield name="ENABLE" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00004" name="CTRL_1"/>
+ <reg32 offset="0x00008" name="CTRL_2"/>
+ <reg32 offset="0x0000c" name="CTRL_3"/>
+ <reg32 offset="0x00010" name="CTRL_4"/>
+ <reg32 offset="0x00014" name="CTRL_5"/>
+ <reg32 offset="0x00018" name="CTRL_6"/>
+ <reg32 offset="0x0001c" name="CTRL_7"/>
+ <reg32 offset="0x00020" name="CTRL_8"/>
+ <reg32 offset="0x00024" name="CTRL_9"/>
+ <reg32 offset="0x00028" name="CTRL_10"/>
+ <reg32 offset="0x0002c" name="CTRL_11"/>
+ <reg32 offset="0x00030" name="CTRL_12"/>
+ <reg32 offset="0x00034" name="CTRL_13"/>
+ <reg32 offset="0x00038" name="CTRL_14"/>
+ <reg32 offset="0x0003c" name="CTRL_15"/>
+ <reg32 offset="0x00040" name="CTRL_16"/>
+ <reg32 offset="0x00044" name="CTRL_17"/>
+ <reg32 offset="0x00048" name="CTRL_18"/>
+ <reg32 offset="0x0004c" name="CTRL_19"/>
+ <reg32 offset="0x00050" name="CTRL_20"/>
+
+ <reg32 offset="0x00080" name="RDY">
+ <bitfield name="PLL_RDY" pos="0" type="boolean"/>
+ </reg32>
+</domain>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
new file mode 100644
index 000000000000..d54b72f92449
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
@@ -0,0 +1,230 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+
+<domain name="DSI_7nm_PHY_CMN" width="32">
+ <reg32 offset="0x00000" name="REVISION_ID0"/>
+ <reg32 offset="0x00004" name="REVISION_ID1"/>
+ <reg32 offset="0x00008" name="REVISION_ID2"/>
+ <reg32 offset="0x0000c" name="REVISION_ID3"/>
+ <reg32 offset="0x00010" name="CLK_CFG0"/>
+ <reg32 offset="0x00014" name="CLK_CFG1"/>
+ <reg32 offset="0x00018" name="GLBL_CTRL"/>
+ <reg32 offset="0x0001c" name="RBUF_CTRL"/>
+ <reg32 offset="0x00020" name="VREG_CTRL_0"/>
+ <reg32 offset="0x00024" name="CTRL_0"/>
+ <reg32 offset="0x00028" name="CTRL_1"/>
+ <reg32 offset="0x0002c" name="CTRL_2"/>
+ <reg32 offset="0x00030" name="CTRL_3"/>
+ <reg32 offset="0x00034" name="LANE_CFG0"/>
+ <reg32 offset="0x00038" name="LANE_CFG1"/>
+ <reg32 offset="0x0003c" name="PLL_CNTRL"/>
+ <reg32 offset="0x00040" name="DPHY_SOT"/>
+ <reg32 offset="0x000a0" name="LANE_CTRL0"/>
+ <reg32 offset="0x000a4" name="LANE_CTRL1"/>
+ <reg32 offset="0x000a8" name="LANE_CTRL2"/>
+ <reg32 offset="0x000ac" name="LANE_CTRL3"/>
+ <reg32 offset="0x000b0" name="LANE_CTRL4"/>
+ <reg32 offset="0x000b4" name="TIMING_CTRL_0"/>
+ <reg32 offset="0x000b8" name="TIMING_CTRL_1"/>
+ <reg32 offset="0x000bc" name="TIMING_CTRL_2"/>
+ <reg32 offset="0x000c0" name="TIMING_CTRL_3"/>
+ <reg32 offset="0x000c4" name="TIMING_CTRL_4"/>
+ <reg32 offset="0x000c8" name="TIMING_CTRL_5"/>
+ <reg32 offset="0x000cc" name="TIMING_CTRL_6"/>
+ <reg32 offset="0x000d0" name="TIMING_CTRL_7"/>
+ <reg32 offset="0x000d4" name="TIMING_CTRL_8"/>
+ <reg32 offset="0x000d8" name="TIMING_CTRL_9"/>
+ <reg32 offset="0x000dc" name="TIMING_CTRL_10"/>
+ <reg32 offset="0x000e0" name="TIMING_CTRL_11"/>
+ <reg32 offset="0x000e4" name="TIMING_CTRL_12"/>
+ <reg32 offset="0x000e8" name="TIMING_CTRL_13"/>
+ <reg32 offset="0x000ec" name="GLBL_HSTX_STR_CTRL_0"/>
+ <reg32 offset="0x000f0" name="GLBL_HSTX_STR_CTRL_1"/>
+ <reg32 offset="0x000f4" name="GLBL_RESCODE_OFFSET_TOP_CTRL"/>
+ <reg32 offset="0x000f8" name="GLBL_RESCODE_OFFSET_BOT_CTRL"/>
+ <reg32 offset="0x000fc" name="GLBL_RESCODE_OFFSET_MID_CTRL"/>
+ <reg32 offset="0x00100" name="GLBL_LPTX_STR_CTRL"/>
+ <reg32 offset="0x00104" name="GLBL_PEMPH_CTRL_0"/>
+ <reg32 offset="0x00108" name="GLBL_PEMPH_CTRL_1"/>
+ <reg32 offset="0x0010c" name="GLBL_STR_SWI_CAL_SEL_CTRL"/>
+ <reg32 offset="0x00110" name="VREG_CTRL_1"/>
+ <reg32 offset="0x00114" name="CTRL_4"/>
+ <reg32 offset="0x00128" name="GLBL_DIGTOP_SPARE4"/>
+ <reg32 offset="0x00140" name="PHY_STATUS"/>
+ <reg32 offset="0x00148" name="LANE_STATUS0"/>
+ <reg32 offset="0x0014c" name="LANE_STATUS1"/>
+ <reg32 offset="0x001ac" name="GLBL_DIGTOP_SPARE10"/>
+</domain>
+
+<domain name="DSI_7nm_PHY" width="32">
+ <array offset="0x00000" name="LN" length="5" stride="0x80">
+ <reg32 offset="0x00" name="CFG0"/>
+ <reg32 offset="0x04" name="CFG1"/>
+ <reg32 offset="0x08" name="CFG2"/>
+ <reg32 offset="0x0c" name="TEST_DATAPATH"/>
+ <reg32 offset="0x10" name="PIN_SWAP"/>
+ <reg32 offset="0x14" name="LPRX_CTRL"/>
+ <reg32 offset="0x18" name="TX_DCTRL"/>
+ </array>
+</domain>
+
+<domain name="DSI_7nm_PHY_PLL" width="32">
+ <reg32 offset="0x0000" name="ANALOG_CONTROLS_ONE"/>
+ <reg32 offset="0x0004" name="ANALOG_CONTROLS_TWO"/>
+ <reg32 offset="0x0008" name="INT_LOOP_SETTINGS"/>
+ <reg32 offset="0x000c" name="INT_LOOP_SETTINGS_TWO"/>
+ <reg32 offset="0x0010" name="ANALOG_CONTROLS_THREE"/>
+ <reg32 offset="0x0014" name="ANALOG_CONTROLS_FOUR"/>
+ <reg32 offset="0x0018" name="ANALOG_CONTROLS_FIVE"/>
+ <reg32 offset="0x001c" name="INT_LOOP_CONTROLS"/>
+ <reg32 offset="0x0020" name="DSM_DIVIDER"/>
+ <reg32 offset="0x0024" name="FEEDBACK_DIVIDER"/>
+ <reg32 offset="0x0028" name="SYSTEM_MUXES"/>
+ <reg32 offset="0x002c" name="FREQ_UPDATE_CONTROL_OVERRIDES"/>
+ <reg32 offset="0x0030" name="CMODE"/>
+ <reg32 offset="0x0034" name="PSM_CTRL"/>
+ <reg32 offset="0x0038" name="RSM_CTRL"/>
+ <reg32 offset="0x003c" name="VCO_TUNE_MAP"/>
+ <reg32 offset="0x0040" name="PLL_CNTRL"/>
+ <reg32 offset="0x0044" name="CALIBRATION_SETTINGS"/>
+ <reg32 offset="0x0048" name="BAND_SEL_CAL_TIMER_LOW"/>
+ <reg32 offset="0x004c" name="BAND_SEL_CAL_TIMER_HIGH"/>
+ <reg32 offset="0x0050" name="BAND_SEL_CAL_SETTINGS"/>
+ <reg32 offset="0x0054" name="BAND_SEL_MIN"/>
+ <reg32 offset="0x0058" name="BAND_SEL_MAX"/>
+ <reg32 offset="0x005c" name="BAND_SEL_PFILT"/>
+ <reg32 offset="0x0060" name="BAND_SEL_IFILT"/>
+ <reg32 offset="0x0064" name="BAND_SEL_CAL_SETTINGS_TWO"/>
+ <reg32 offset="0x0068" name="BAND_SEL_CAL_SETTINGS_THREE"/>
+ <reg32 offset="0x006c" name="BAND_SEL_CAL_SETTINGS_FOUR"/>
+ <reg32 offset="0x0070" name="BAND_SEL_ICODE_HIGH"/>
+ <reg32 offset="0x0074" name="BAND_SEL_ICODE_LOW"/>
+ <reg32 offset="0x0078" name="FREQ_DETECT_SETTINGS_ONE"/>
+ <reg32 offset="0x007c" name="FREQ_DETECT_THRESH"/>
+ <reg32 offset="0x0080" name="FREQ_DET_REFCLK_HIGH"/>
+ <reg32 offset="0x0084" name="FREQ_DET_REFCLK_LOW"/>
+ <reg32 offset="0x0088" name="FREQ_DET_PLLCLK_HIGH"/>
+ <reg32 offset="0x008c" name="FREQ_DET_PLLCLK_LOW"/>
+ <reg32 offset="0x0090" name="PFILT"/>
+ <reg32 offset="0x0094" name="IFILT"/>
+ <reg32 offset="0x0098" name="PLL_GAIN"/>
+ <reg32 offset="0x009c" name="ICODE_LOW"/>
+ <reg32 offset="0x00a0" name="ICODE_HIGH"/>
+ <reg32 offset="0x00a4" name="LOCKDET"/>
+ <reg32 offset="0x00a8" name="OUTDIV"/>
+ <reg32 offset="0x00ac" name="FASTLOCK_CONTROL"/>
+ <reg32 offset="0x00b0" name="PASS_OUT_OVERRIDE_ONE"/>
+ <reg32 offset="0x00b4" name="PASS_OUT_OVERRIDE_TWO"/>
+ <reg32 offset="0x00b8" name="CORE_OVERRIDE"/>
+ <reg32 offset="0x00bc" name="CORE_INPUT_OVERRIDE"/>
+ <reg32 offset="0x00c0" name="RATE_CHANGE"/>
+ <reg32 offset="0x00c4" name="PLL_DIGITAL_TIMERS"/>
+ <reg32 offset="0x00c8" name="PLL_DIGITAL_TIMERS_TWO"/>
+ <reg32 offset="0x00cc" name="DECIMAL_DIV_START"/>
+ <reg32 offset="0x00d0" name="FRAC_DIV_START_LOW"/>
+ <reg32 offset="0x00d4" name="FRAC_DIV_START_MID"/>
+ <reg32 offset="0x00d8" name="FRAC_DIV_START_HIGH"/>
+ <reg32 offset="0x00dc" name="DEC_FRAC_MUXES"/>
+ <reg32 offset="0x00e0" name="DECIMAL_DIV_START_1"/>
+ <reg32 offset="0x00e4" name="FRAC_DIV_START_LOW_1"/>
+ <reg32 offset="0x00e8" name="FRAC_DIV_START_MID_1"/>
+ <reg32 offset="0x00ec" name="FRAC_DIV_START_HIGH_1"/>
+ <reg32 offset="0x00f0" name="DECIMAL_DIV_START_2"/>
+ <reg32 offset="0x00f4" name="FRAC_DIV_START_LOW_2"/>
+ <reg32 offset="0x00f8" name="FRAC_DIV_START_MID_2"/>
+ <reg32 offset="0x00fc" name="FRAC_DIV_START_HIGH_2"/>
+ <reg32 offset="0x0100" name="MASH_CONTROL"/>
+ <reg32 offset="0x0104" name="SSC_STEPSIZE_LOW"/>
+ <reg32 offset="0x0108" name="SSC_STEPSIZE_HIGH"/>
+ <reg32 offset="0x010c" name="SSC_DIV_PER_LOW"/>
+ <reg32 offset="0x0110" name="SSC_DIV_PER_HIGH"/>
+ <reg32 offset="0x0114" name="SSC_ADJPER_LOW"/>
+ <reg32 offset="0x0118" name="SSC_ADJPER_HIGH"/>
+ <reg32 offset="0x011c" name="SSC_MUX_CONTROL"/>
+ <reg32 offset="0x0120" name="SSC_STEPSIZE_LOW_1"/>
+ <reg32 offset="0x0124" name="SSC_STEPSIZE_HIGH_1"/>
+ <reg32 offset="0x0128" name="SSC_DIV_PER_LOW_1"/>
+ <reg32 offset="0x012c" name="SSC_DIV_PER_HIGH_1"/>
+ <reg32 offset="0x0130" name="SSC_ADJPER_LOW_1"/>
+ <reg32 offset="0x0134" name="SSC_ADJPER_HIGH_1"/>
+ <reg32 offset="0x0138" name="SSC_STEPSIZE_LOW_2"/>
+ <reg32 offset="0x013c" name="SSC_STEPSIZE_HIGH_2"/>
+ <reg32 offset="0x0140" name="SSC_DIV_PER_LOW_2"/>
+ <reg32 offset="0x0144" name="SSC_DIV_PER_HIGH_2"/>
+ <reg32 offset="0x0148" name="SSC_ADJPER_LOW_2"/>
+ <reg32 offset="0x014c" name="SSC_ADJPER_HIGH_2"/>
+ <reg32 offset="0x0150" name="SSC_CONTROL"/>
+ <reg32 offset="0x0154" name="PLL_OUTDIV_RATE"/>
+ <reg32 offset="0x0158" name="PLL_LOCKDET_RATE_1"/>
+ <reg32 offset="0x015c" name="PLL_LOCKDET_RATE_2"/>
+ <reg32 offset="0x0160" name="PLL_PROP_GAIN_RATE_1"/>
+ <reg32 offset="0x0164" name="PLL_PROP_GAIN_RATE_2"/>
+ <reg32 offset="0x0168" name="PLL_BAND_SEL_RATE_1"/>
+ <reg32 offset="0x016c" name="PLL_BAND_SEL_RATE_2"/>
+ <reg32 offset="0x0170" name="PLL_INT_GAIN_IFILT_BAND_1"/>
+ <reg32 offset="0x0174" name="PLL_INT_GAIN_IFILT_BAND_2"/>
+ <reg32 offset="0x0178" name="PLL_FL_INT_GAIN_PFILT_BAND_1"/>
+ <reg32 offset="0x017c" name="PLL_FL_INT_GAIN_PFILT_BAND_2"/>
+ <reg32 offset="0x0180" name="PLL_FASTLOCK_EN_BAND"/>
+ <reg32 offset="0x0184" name="FREQ_TUNE_ACCUM_INIT_MID"/>
+ <reg32 offset="0x0188" name="FREQ_TUNE_ACCUM_INIT_HIGH"/>
+ <reg32 offset="0x018c" name="FREQ_TUNE_ACCUM_INIT_MUX"/>
+ <reg32 offset="0x0190" name="PLL_LOCK_OVERRIDE"/>
+ <reg32 offset="0x0194" name="PLL_LOCK_DELAY"/>
+ <reg32 offset="0x0198" name="PLL_LOCK_MIN_DELAY"/>
+ <reg32 offset="0x019c" name="CLOCK_INVERTERS"/>
+ <reg32 offset="0x01a0" name="SPARE_AND_JPC_OVERRIDES"/>
+ <reg32 offset="0x01a4" name="BIAS_CONTROL_1"/>
+ <reg32 offset="0x01a8" name="BIAS_CONTROL_2"/>
+ <reg32 offset="0x01ac" name="ALOG_OBSV_BUS_CTRL_1"/>
+ <reg32 offset="0x01b0" name="COMMON_STATUS_ONE"/>
+ <reg32 offset="0x01b4" name="COMMON_STATUS_TWO"/>
+ <reg32 offset="0x01b8" name="BAND_SEL_CAL"/>
+ <reg32 offset="0x01bc" name="ICODE_ACCUM_STATUS_LOW"/>
+ <reg32 offset="0x01c0" name="ICODE_ACCUM_STATUS_HIGH"/>
+ <reg32 offset="0x01c4" name="FD_OUT_LOW"/>
+ <reg32 offset="0x01c8" name="FD_OUT_HIGH"/>
+ <reg32 offset="0x01cc" name="ALOG_OBSV_BUS_STATUS_1"/>
+ <reg32 offset="0x01d0" name="PLL_MISC_CONFIG"/>
+ <reg32 offset="0x01d4" name="FLL_CONFIG"/>
+ <reg32 offset="0x01d8" name="FLL_FREQ_ACQ_TIME"/>
+ <reg32 offset="0x01dc" name="FLL_CODE0"/>
+ <reg32 offset="0x01e0" name="FLL_CODE1"/>
+ <reg32 offset="0x01e4" name="FLL_GAIN0"/>
+ <reg32 offset="0x01e8" name="FLL_GAIN1"/>
+ <reg32 offset="0x01ec" name="SW_RESET"/>
+ <reg32 offset="0x01f0" name="FAST_PWRUP"/>
+ <reg32 offset="0x01f4" name="LOCKTIME0"/>
+ <reg32 offset="0x01f8" name="LOCKTIME1"/>
+ <reg32 offset="0x01fc" name="DEBUG_BUS_SEL"/>
+ <reg32 offset="0x0200" name="DEBUG_BUS0"/>
+ <reg32 offset="0x0204" name="DEBUG_BUS1"/>
+ <reg32 offset="0x0208" name="DEBUG_BUS2"/>
+ <reg32 offset="0x020c" name="DEBUG_BUS3"/>
+ <reg32 offset="0x0210" name="ANALOG_FLL_CONTROL_OVERRIDES"/>
+ <reg32 offset="0x0214" name="VCO_CONFIG"/>
+ <reg32 offset="0x0218" name="VCO_CAL_CODE1_MODE0_STATUS"/>
+ <reg32 offset="0x021c" name="VCO_CAL_CODE1_MODE1_STATUS"/>
+ <reg32 offset="0x0220" name="RESET_SM_STATUS"/>
+ <reg32 offset="0x0224" name="TDC_OFFSET"/>
+ <reg32 offset="0x0228" name="PS3_PWRDOWN_CONTROLS"/>
+ <reg32 offset="0x022c" name="PS4_PWRDOWN_CONTROLS"/>
+ <reg32 offset="0x0230" name="PLL_RST_CONTROLS"/>
+ <reg32 offset="0x0234" name="GEAR_BAND_SELECT_CONTROLS"/>
+ <reg32 offset="0x0238" name="PSM_CLK_CONTROLS"/>
+ <reg32 offset="0x023c" name="SYSTEM_MUXES_2"/>
+ <reg32 offset="0x0240" name="VCO_CONFIG_1"/>
+ <reg32 offset="0x0244" name="VCO_CONFIG_2"/>
+ <reg32 offset="0x0248" name="CLOCK_INVERTERS_1"/>
+ <reg32 offset="0x024c" name="CLOCK_INVERTERS_2"/>
+ <reg32 offset="0x0250" name="CMODE_1"/>
+ <reg32 offset="0x0254" name="CMODE_2"/>
+ <reg32 offset="0x0258" name="ANALOG_CONTROLS_FIVE_1"/>
+ <reg32 offset="0x025c" name="ANALOG_CONTROLS_FIVE_2"/>
+ <reg32 offset="0x0260" name="PERF_OPTIMIZE"/>
+</domain>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/display/edp.xml b/drivers/gpu/drm/msm/registers/display/edp.xml
new file mode 100644
index 000000000000..354f90eb6de5
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/display/edp.xml
@@ -0,0 +1,239 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+
+<domain name="EDP" width="32">
+ <enum name="edp_color_depth">
+ <value name="EDP_6BIT" value="0"/>
+ <value name="EDP_8BIT" value="1"/>
+ <value name="EDP_10BIT" value="2"/>
+ <value name="EDP_12BIT" value="3"/>
+ <value name="EDP_16BIT" value="4"/>
+ </enum>
+
+ <enum name="edp_component_format">
+ <value name="EDP_RGB" value="0"/>
+ <value name="EDP_YUV422" value="1"/>
+ <value name="EDP_YUV444" value="2"/>
+ </enum>
+
+ <reg32 offset="0x0004" name="MAINLINK_CTRL">
+ <bitfield name="ENABLE" pos="0" type="boolean"/>
+ <bitfield name="RESET" pos="1" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0x0008" name="STATE_CTRL">
+ <bitfield name="TRAIN_PATTERN_1" pos="0" type="boolean"/>
+ <bitfield name="TRAIN_PATTERN_2" pos="1" type="boolean"/>
+ <bitfield name="TRAIN_PATTERN_3" pos="2" type="boolean"/>
+ <bitfield name="SYMBOL_ERR_RATE_MEAS" pos="3" type="boolean"/>
+ <bitfield name="PRBS7" pos="4" type="boolean"/>
+ <bitfield name="CUSTOM_80_BIT_PATTERN" pos="5" type="boolean"/>
+ <bitfield name="SEND_VIDEO" pos="6" type="boolean"/>
+ <bitfield name="PUSH_IDLE" pos="7" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0x000c" name="CONFIGURATION_CTRL">
+ <!-- next two may be swapped? -->
+ <bitfield name="SYNC_CLK" pos="0" type="boolean"/>
+ <bitfield name="STATIC_MVID" pos="1" type="boolean"/>
+ <bitfield name="PROGRESSIVE" pos="2" type="boolean"/>
+ <!-- # of lanes minus one: -->
+ <bitfield name="LANES" low="4" high="5" type="uint"/>
+ <bitfield name="ENHANCED_FRAMING" pos="6" type="boolean"/>
+ <!--
+ NOTE: only 6bit and 8bit valid
+ -->
+ <bitfield name="COLOR" pos="8" type="edp_color_depth"/>
+ </reg32>
+
+ <reg32 offset="0x0014" name="SOFTWARE_MVID" type="uint"/>
+ <reg32 offset="0x0018" name="SOFTWARE_NVID" type="uint"/>
+
+ <reg32 offset="0x001c" name="TOTAL_HOR_VER">
+ <bitfield name="HORIZ" low="0" high="15" type="uint"/>
+ <bitfield name="VERT" low="16" high="31" type="uint"/>
+ </reg32>
+
+ <reg32 offset="0x0020" name="START_HOR_VER_FROM_SYNC">
+ <bitfield name="HORIZ" low="0" high="15" type="uint"/>
+ <bitfield name="VERT" low="16" high="31" type="uint"/>
+ </reg32>
+
+ <reg32 offset="0x0024" name="HSYNC_VSYNC_WIDTH_POLARITY">
+ <bitfield name="HORIZ" low="0" high="14" type="uint"/>
+ <bitfield name="NHSYNC" pos="15" type="boolean"/>
+ <bitfield name="VERT" low="16" high="30" type="uint"/>
+ <bitfield name="NVSYNC" pos="31" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0x0028" name="ACTIVE_HOR_VER">
+ <bitfield name="HORIZ" low="0" high="15" type="uint"/>
+ <bitfield name="VERT" low="16" high="31" type="uint"/>
+ </reg32>
+
+ <reg32 offset="0x002c" name="MISC1_MISC0">
+ <!-- MISC0 from DisplayPort v1.2 spec: -->
+ <bitfield name="MISC0" low="0" high="7"/>
+ <!-- aliased MISC0 bitfields: -->
+ <bitfield name="SYNC" pos="0" type="boolean"/>
+ <bitfield name="COMPONENT_FORMAT" low="1" high="2" type="edp_component_format"/>
+ <!-- CEA (vs VESA) color range: -->
+ <bitfield name="CEA" pos="3" type="boolean"/>
+ <!-- YCbCr Colorimetry ITU-R BT709-5 (vs ITU-R BT601-5): -->
+ <bitfield name="BT709_5" pos="4" type="boolean"/>
+ <bitfield name="COLOR" low="5" high="7" type="edp_color_depth"/>
+
+ <!-- MISC1 from DisplayPort v1.2 spec: -->
+ <bitfield name="MISC1" low="8" high="15"/>
+ <!-- aliased MISC1 bitfields: -->
+ <bitfield name="INTERLACED_ODD" pos="8" type="boolean"/>
+ <bitfield name="STEREO" low="9" high="10" type="uint"/>
+ </reg32>
+
+ <reg32 offset="0x0074" name="PHY_CTRL">
+ <bitfield name="SW_RESET_PLL" pos="0" type="boolean"/>
+ <bitfield name="SW_RESET" pos="2" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0084" name="MAINLINK_READY">
+ <bitfield name="TRAIN_PATTERN_1_READY" pos="3" type="boolean"/>
+ <bitfield name="TRAIN_PATTERN_2_READY" pos="4" type="boolean"/>
+ <bitfield name="TRAIN_PATTERN_3_READY" pos="5" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0x0300" name="AUX_CTRL">
+ <bitfield name="ENABLE" pos="0" type="boolean"/>
+ <bitfield name="RESET" pos="1" type="boolean"/>
+ </reg32>
+
+ <!-- interrupt registers come in sets of 3 bits, status/ack/en -->
+ <reg32 offset="0x0308" name="INTERRUPT_REG_1">
+ <bitfield name="HPD" pos="0" type="boolean"/>
+ <bitfield name="HPD_ACK" pos="1" type="boolean"/>
+ <bitfield name="HPD_EN" pos="2" type="boolean"/>
+ <bitfield name="AUX_I2C_DONE" pos="3" type="boolean"/>
+ <bitfield name="AUX_I2C_DONE_ACK" pos="4" type="boolean"/>
+ <bitfield name="AUX_I2C_DONE_EN" pos="5" type="boolean"/>
+ <bitfield name="WRONG_ADDR" pos="6" type="boolean"/>
+ <bitfield name="WRONG_ADDR_ACK" pos="7" type="boolean"/>
+ <bitfield name="WRONG_ADDR_EN" pos="8" type="boolean"/>
+ <bitfield name="TIMEOUT" pos="9" type="boolean"/>
+ <bitfield name="TIMEOUT_ACK" pos="10" type="boolean"/>
+ <bitfield name="TIMEOUT_EN" pos="11" type="boolean"/>
+ <bitfield name="NACK_DEFER" pos="12" type="boolean"/>
+ <bitfield name="NACK_DEFER_ACK" pos="13" type="boolean"/>
+ <bitfield name="NACK_DEFER_EN" pos="14" type="boolean"/>
+ <bitfield name="WRONG_DATA_CNT" pos="15" type="boolean"/>
+ <bitfield name="WRONG_DATA_CNT_ACK" pos="16" type="boolean"/>
+ <bitfield name="WRONG_DATA_CNT_EN" pos="17" type="boolean"/>
+ <bitfield name="I2C_NACK" pos="18" type="boolean"/>
+ <bitfield name="I2C_NACK_ACK" pos="19" type="boolean"/>
+ <bitfield name="I2C_NACK_EN" pos="20" type="boolean"/>
+ <bitfield name="I2C_DEFER" pos="21" type="boolean"/>
+ <bitfield name="I2C_DEFER_ACK" pos="22" type="boolean"/>
+ <bitfield name="I2C_DEFER_EN" pos="23" type="boolean"/>
+ <bitfield name="PLL_UNLOCK" pos="24" type="boolean"/>
+ <bitfield name="PLL_UNLOCK_ACK" pos="25" type="boolean"/>
+ <bitfield name="PLL_UNLOCK_EN" pos="26" type="boolean"/>
+ <bitfield name="AUX_ERROR" pos="27" type="boolean"/>
+ <bitfield name="AUX_ERROR_ACK" pos="28" type="boolean"/>
+ <bitfield name="AUX_ERROR_EN" pos="29" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0x030c" name="INTERRUPT_REG_2">
+ <bitfield name="READY_FOR_VIDEO" pos="0" type="boolean"/>
+ <bitfield name="READY_FOR_VIDEO_ACK" pos="1" type="boolean"/>
+ <bitfield name="READY_FOR_VIDEO_EN" pos="2" type="boolean"/>
+ <bitfield name="IDLE_PATTERNs_SENT" pos="3" type="boolean"/>
+ <bitfield name="IDLE_PATTERNs_SENT_ACK" pos="4" type="boolean"/>
+ <bitfield name="IDLE_PATTERNs_SENT_EN" pos="5" type="boolean"/>
+ <bitfield name="FRAME_END" pos="9" type="boolean"/>
+ <bitfield name="FRAME_END_ACK" pos="7" type="boolean"/>
+ <bitfield name="FRAME_END_EN" pos="8" type="boolean"/>
+ <bitfield name="CRC_UPDATED" pos="9" type="boolean"/>
+ <bitfield name="CRC_UPDATED_ACK" pos="10" type="boolean"/>
+ <bitfield name="CRC_UPDATED_EN" pos="11" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0x0310" name="INTERRUPT_TRANS_NUM"/>
+ <reg32 offset="0x0314" name="AUX_DATA">
+ <bitfield name="READ" pos="0" type="boolean"/>
+ <bitfield name="DATA" low="8" high="15"/>
+ <bitfield name="INDEX" low="16" high="23"/>
+ <bitfield name="INDEX_WRITE" pos="31" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0x0318" name="AUX_TRANS_CTRL">
+ <bitfield name="I2C" pos="8" type="boolean"/>
+ <bitfield name="GO" pos="9" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0x0324" name="AUX_STATUS"/>
+</domain>
+
+<domain name="EDP_PHY" width="32">
+ <array offset="0x0400" name="LN" length="4" stride="0x40">
+ <reg32 offset="0x04" name="PD_CTL"/>
+ </array>
+ <reg32 offset="0x0510" name="GLB_VM_CFG0"/>
+ <reg32 offset="0x0514" name="GLB_VM_CFG1"/>
+ <reg32 offset="0x0518" name="GLB_MISC9"/>
+ <reg32 offset="0x0528" name="GLB_CFG"/>
+ <reg32 offset="0x052c" name="GLB_PD_CTL"/>
+ <reg32 offset="0x0598" name="GLB_PHY_STATUS"/>
+</domain>
+
+<domain name="EDP_28nm_PHY_PLL" width="32">
+ <reg32 offset="0x00000" name="REFCLK_CFG"/>
+ <reg32 offset="0x00004" name="POSTDIV1_CFG"/>
+ <reg32 offset="0x00008" name="CHGPUMP_CFG"/>
+ <reg32 offset="0x0000C" name="VCOLPF_CFG"/>
+ <reg32 offset="0x00010" name="VREG_CFG"/>
+ <reg32 offset="0x00014" name="PWRGEN_CFG"/>
+ <reg32 offset="0x00018" name="DMUX_CFG"/>
+ <reg32 offset="0x0001C" name="AMUX_CFG"/>
+ <reg32 offset="0x00020" name="GLB_CFG">
+ <bitfield name="PLL_PWRDN_B" pos="0" type="boolean"/>
+ <bitfield name="PLL_LDO_PWRDN_B" pos="1" type="boolean"/>
+ <bitfield name="PLL_PWRGEN_PWRDN_B" pos="2" type="boolean"/>
+ <bitfield name="PLL_ENABLE" pos="3" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00024" name="POSTDIV2_CFG"/>
+ <reg32 offset="0x00028" name="POSTDIV3_CFG"/>
+ <reg32 offset="0x0002C" name="LPFR_CFG"/>
+ <reg32 offset="0x00030" name="LPFC1_CFG"/>
+ <reg32 offset="0x00034" name="LPFC2_CFG"/>
+ <reg32 offset="0x00038" name="SDM_CFG0"/>
+ <reg32 offset="0x0003C" name="SDM_CFG1"/>
+ <reg32 offset="0x00040" name="SDM_CFG2"/>
+ <reg32 offset="0x00044" name="SDM_CFG3"/>
+ <reg32 offset="0x00048" name="SDM_CFG4"/>
+ <reg32 offset="0x0004C" name="SSC_CFG0"/>
+ <reg32 offset="0x00050" name="SSC_CFG1"/>
+ <reg32 offset="0x00054" name="SSC_CFG2"/>
+ <reg32 offset="0x00058" name="SSC_CFG3"/>
+ <reg32 offset="0x0005C" name="LKDET_CFG0"/>
+ <reg32 offset="0x00060" name="LKDET_CFG1"/>
+ <reg32 offset="0x00064" name="LKDET_CFG2"/>
+ <reg32 offset="0x00068" name="TEST_CFG">
+ <bitfield name="PLL_SW_RESET" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0006C" name="CAL_CFG0"/>
+ <reg32 offset="0x00070" name="CAL_CFG1"/>
+ <reg32 offset="0x00074" name="CAL_CFG2"/>
+ <reg32 offset="0x00078" name="CAL_CFG3"/>
+ <reg32 offset="0x0007C" name="CAL_CFG4"/>
+ <reg32 offset="0x00080" name="CAL_CFG5"/>
+ <reg32 offset="0x00084" name="CAL_CFG6"/>
+ <reg32 offset="0x00088" name="CAL_CFG7"/>
+ <reg32 offset="0x0008C" name="CAL_CFG8"/>
+ <reg32 offset="0x00090" name="CAL_CFG9"/>
+ <reg32 offset="0x00094" name="CAL_CFG10"/>
+ <reg32 offset="0x00098" name="CAL_CFG11"/>
+ <reg32 offset="0x0009C" name="EFUSE_CFG"/>
+ <reg32 offset="0x000A0" name="DEBUG_BUS_SEL"/>
+</domain>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/display/hdmi.xml b/drivers/gpu/drm/msm/registers/display/hdmi.xml
new file mode 100644
index 000000000000..6c81581016c7
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/display/hdmi.xml
@@ -0,0 +1,1015 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+
+<!--
+ NOTE: also see mdss_hdmi_util.h.. newer devices using MDSS appear
+ to have the same HDMI block (or maybe a newer version?) but for
+ some reason duplicate the code under drivers/video/msm/mdss
+ -->
+
+<domain name="HDMI" width="32">
+ <enum name="hdmi_hdcp_key_state">
+ <value name="HDCP_KEYS_STATE_NO_KEYS" value="0"/>
+ <value name="HDCP_KEYS_STATE_NOT_CHECKED" value="1"/>
+ <value name="HDCP_KEYS_STATE_CHECKING" value="2"/>
+ <value name="HDCP_KEYS_STATE_VALID" value="3"/>
+ <value name="HDCP_KEYS_STATE_AKSV_NOT_VALID" value="4"/>
+ <value name="HDCP_KEYS_STATE_CHKSUM_MISMATCH" value="5"/>
+ <value name="HDCP_KEYS_STATE_PROD_AKSV" value="6"/>
+ <value name="HDCP_KEYS_STATE_RESERVED" value="7"/>
+ </enum>
+ <enum name="hdmi_ddc_read_write">
+ <value name="DDC_WRITE" value="0"/>
+ <value name="DDC_READ" value="1"/>
+ </enum>
+ <enum name="hdmi_acr_cts">
+ <value name="ACR_NONE" value="0"/>
+ <value name="ACR_32" value="1"/>
+ <value name="ACR_44" value="2"/>
+ <value name="ACR_48" value="3"/>
+ </enum>
+
+ <enum name="hdmi_cec_tx_status">
+ <value name="CEC_TX_OK" value="0"/>
+ <value name="CEC_TX_NACK" value="1"/>
+ <value name="CEC_TX_ARB_LOSS" value="2"/>
+ <value name="CEC_TX_MAX_RETRIES" value="3"/>
+ </enum>
+
+ <reg32 offset="0x00000" name="CTRL">
+ <bitfield name="ENABLE" pos="0" type="boolean"/>
+ <bitfield name="HDMI" pos="1" type="boolean"/>
+ <bitfield name="ENCRYPTED" pos="2" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00020" name="AUDIO_PKT_CTRL1">
+ <bitfield name="AUDIO_SAMPLE_SEND" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00024" name="ACR_PKT_CTRL">
+ <!--
+ Guessing on order of bitfields from these comments:
+ /* AUDIO_PRIORITY | SOURCE */
+ acr_pck_ctrl_reg |= 0x80000100;
+ /* N_MULTIPLE(multiplier) */
+ acr_pck_ctrl_reg |= (multiplier & 7) << 16;
+ /* SEND | CONT */
+ acr_pck_ctrl_reg |= 0x00000003;
+ -->
+ <bitfield name="CONT" pos="0" type="boolean"/>
+ <bitfield name="SEND" pos="1" type="boolean"/>
+ <bitfield name="SELECT" low="4" high="5" type="hdmi_acr_cts"/>
+ <bitfield name="SOURCE" pos="8" type="boolean"/>
+ <bitfield name="N_MULTIPLIER" low="16" high="18" type="uint"/>
+ <bitfield name="AUDIO_PRIORITY" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0028" name="VBI_PKT_CTRL">
+ <!--
+ Guessing on the order of bits from:
+ /* GC packet enable (every frame) */
+ /* HDMI_VBI_PKT_CTRL[0x0028] */
+ hdmi_msm_rmw32or(0x0028, 3 << 4);
+ /* HDMI_VBI_PKT_CTRL[0x0028] */
+ /* ISRC Send + Continuous */
+ hdmi_msm_rmw32or(0x0028, 3 << 8);
+ /* HDMI_VBI_PKT_CTRL[0x0028] */
+ /* ACP send, s/w source */
+ hdmi_msm_rmw32or(0x0028, 3 << 12);
+ -->
+ <bitfield name="GC_ENABLE" pos="4" type="boolean"/>
+ <bitfield name="GC_EVERY_FRAME" pos="5" type="boolean"/>
+ <bitfield name="ISRC_SEND" pos="8" type="boolean"/>
+ <bitfield name="ISRC_CONTINUOUS" pos="9" type="boolean"/>
+ <bitfield name="ACP_SEND" pos="12" type="boolean"/>
+ <bitfield name="ACP_SRC_SW" pos="13" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0002c" name="INFOFRAME_CTRL0">
+ <!--
+ Guessing on the order of these flags, from this comment:
+ /* Set these flags */
+ /* AUDIO_INFO_UPDATE | AUDIO_INFO_SOURCE | AUDIO_INFO_CONT
+ | AUDIO_INFO_SEND */
+ audio_info_ctrl_reg |= 0x000000F0;
+ /* 0x3 for AVI InfFrame enable (every frame) */
+ HDMI_OUTP(0x002C, HDMI_INP(0x002C) | 0x00000003L);
+ -->
+ <bitfield name="AVI_SEND" pos="0" type="boolean"/>
+ <bitfield name="AVI_CONT" pos="1" type="boolean"/> <!-- every frame -->
+ <bitfield name="AUDIO_INFO_SEND" pos="4" type="boolean"/>
+ <bitfield name="AUDIO_INFO_CONT" pos="5" type="boolean"/> <!-- every frame -->
+ <bitfield name="AUDIO_INFO_SOURCE" pos="6" type="boolean"/>
+ <bitfield name="AUDIO_INFO_UPDATE" pos="7" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00030" name="INFOFRAME_CTRL1">
+ <bitfield name="AVI_INFO_LINE" low="0" high="5" type="uint"/>
+ <bitfield name="AUDIO_INFO_LINE" low="8" high="13" type="uint"/>
+ <bitfield name="MPEG_INFO_LINE" low="16" high="21" type="uint"/>
+ <bitfield name="VENSPEC_INFO_LINE" low="24" high="29" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00034" name="GEN_PKT_CTRL">
+ <!--
+ 0x0034 GEN_PKT_CTRL
+ GENERIC0_SEND 0 0 = Disable Generic0 Packet Transmission
+ 1 = Enable Generic0 Packet Transmission
+ GENERIC0_CONT 1 0 = Send Generic0 Packet on next frame only
+ 1 = Send Generic0 Packet on every frame
+ GENERIC0_UPDATE 2 NUM
+ GENERIC1_SEND 4 0 = Disable Generic1 Packet Transmission
+ 1 = Enable Generic1 Packet Transmission
+ GENERIC1_CONT 5 0 = Send Generic1 Packet on next frame only
+ 1 = Send Generic1 Packet on every frame
+ GENERIC0_LINE 21:16 NUM
+ GENERIC1_LINE 29:24 NUM
+
+ GENERIC0_LINE | GENERIC0_UPDATE | GENERIC0_CONT | GENERIC0_SEND
+ Setup HDMI TX generic packet control
+ Enable this packet to transmit every frame
+ Enable this packet to transmit every frame
+ Enable HDMI TX engine to transmit Generic packet 0
+ HDMI_OUTP(0x0034, (1 << 16) | (1 << 2) | BIT(1) | BIT(0));
+ -->
+ <bitfield name="GENERIC0_SEND" pos="0" type="boolean"/>
+ <bitfield name="GENERIC0_CONT" pos="1" type="boolean"/>
+ <bitfield name="GENERIC0_UPDATE" low="2" high="3" type="uint"/> <!-- ??? -->
+ <bitfield name="GENERIC1_SEND" pos="4" type="boolean"/>
+ <bitfield name="GENERIC1_CONT" pos="5" type="boolean"/>
+ <bitfield name="GENERIC0_LINE" low="16" high="21" type="uint"/>
+ <bitfield name="GENERIC1_LINE" low="24" high="29" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00040" name="GC">
+ <bitfield name="MUTE" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00044" name="AUDIO_PKT_CTRL2">
+ <bitfield name="OVERRIDE" pos="0" type="boolean"/>
+ <bitfield name="LAYOUT" pos="1" type="boolean"/> <!-- 1 for >2 channels -->
+ </reg32>
+
+ <!--
+ AVI_INFO appears to be the infoframe in a slightly weird order..
+ starts with PB0 (checksum), and ends with version..
+ -->
+ <reg32 offset="0x0006c" name="AVI_INFO" stride="4" length="4"/>
+
+ <reg32 offset="0x00084" name="GENERIC0_HDR"/>
+ <reg32 offset="0x00088" name="GENERIC0" stride="4" length="7"/>
+
+ <reg32 offset="0x000a4" name="GENERIC1_HDR"/>
+ <reg32 offset="0x000a8" name="GENERIC1" stride="4" length="7"/>
+
+ <!--
+ TODO add a way to show symbolic offsets into array: hdmi_acr_cts-1
+ -->
+ <array offset="0x00c4" name="ACR" length="3" stride="8" index="hdmi_acr_cts">
+ <reg32 offset="0" name="0">
+ <bitfield name="CTS" low="12" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="4" name="1">
+ <!-- not sure the actual # of bits.. -->
+ <bitfield name="N" low="0" high="31" type="uint"/>
+ </reg32>
+ </array>
+
+ <reg32 offset="0x000e4" name="AUDIO_INFO0">
+ <bitfield name="CHECKSUM" low="0" high="7"/>
+ <bitfield name="CC" low="8" high="10" type="uint"/> <!-- channel count -->
+ </reg32>
+ <reg32 offset="0x000e8" name="AUDIO_INFO1">
+ <bitfield name="CA" low="0" high="7"/> <!-- Channel Allocation -->
+ <bitfield name="LSV" low="11" high="14"/> <!-- Level Shift -->
+ <bitfield name="DM_INH" pos="15" type="boolean"/> <!-- down-mix inhibit flag -->
+ </reg32>
+ <reg32 offset="0x00110" name="HDCP_CTRL">
+ <bitfield name="ENABLE" pos="0" type="boolean"/>
+ <bitfield name="ENCRYPTION_ENABLE" pos="8" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00114" name="HDCP_DEBUG_CTRL">
+ <bitfield name="RNG_CIPHER" pos="2" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00118" name="HDCP_INT_CTRL">
+ <bitfield name="AUTH_SUCCESS_INT" pos="0" type="boolean"/>
+ <bitfield name="AUTH_SUCCESS_ACK" pos="1" type="boolean"/>
+ <bitfield name="AUTH_SUCCESS_MASK" pos="2" type="boolean"/>
+ <bitfield name="AUTH_FAIL_INT" pos="4" type="boolean"/>
+ <bitfield name="AUTH_FAIL_ACK" pos="5" type="boolean"/>
+ <bitfield name="AUTH_FAIL_MASK" pos="6" type="boolean"/>
+ <bitfield name="AUTH_FAIL_INFO_ACK" pos="7" type="boolean"/>
+ <bitfield name="AUTH_XFER_REQ_INT" pos="8" type="boolean"/>
+ <bitfield name="AUTH_XFER_REQ_ACK" pos="9" type="boolean"/>
+ <bitfield name="AUTH_XFER_REQ_MASK" pos="10" type="boolean"/>
+ <bitfield name="AUTH_XFER_DONE_INT" pos="12" type="boolean"/>
+ <bitfield name="AUTH_XFER_DONE_ACK" pos="13" type="boolean"/>
+ <bitfield name="AUTH_XFER_DONE_MASK" pos="14" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0011c" name="HDCP_LINK0_STATUS">
+ <bitfield name="AN_0_READY" pos="8" type="boolean"/>
+ <bitfield name="AN_1_READY" pos="9" type="boolean"/>
+ <bitfield name="RI_MATCHES" pos="12" type="boolean"/>
+ <bitfield name="V_MATCHES" pos="20" type="boolean"/>
+ <bitfield name="KEY_STATE" low="28" high="30" type="hdmi_hdcp_key_state"/>
+ </reg32>
+ <reg32 offset="0x00120" name="HDCP_DDC_CTRL_0">
+ <bitfield name="DISABLE" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00124" name="HDCP_DDC_CTRL_1">
+ <bitfield name="FAILED_ACK" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00128" name="HDCP_DDC_STATUS">
+ <bitfield name="XFER_REQ" pos="4" type="boolean"/>
+ <bitfield name="XFER_DONE" pos="10" type="boolean"/>
+ <bitfield name="ABORTED" pos="12" type="boolean"/>
+ <bitfield name="TIMEOUT" pos="13" type="boolean"/>
+ <bitfield name="NACK0" pos="14" type="boolean"/>
+ <bitfield name="NACK1" pos="15" type="boolean"/>
+ <bitfield name="FAILED" pos="16" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0x0012c" name="HDCP_ENTROPY_CTRL0"/>
+ <reg32 offset="0x0025c" name="HDCP_ENTROPY_CTRL1"/>
+
+ <reg32 offset="0x00130" name="HDCP_RESET">
+ <bitfield name="LINK0_DEAUTHENTICATE" pos="0" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0x00134" name="HDCP_RCVPORT_DATA0"/>
+ <reg32 offset="0x00138" name="HDCP_RCVPORT_DATA1"/>
+ <reg32 offset="0x0013C" name="HDCP_RCVPORT_DATA2_0"/>
+ <reg32 offset="0x00140" name="HDCP_RCVPORT_DATA2_1"/>
+ <reg32 offset="0x00144" name="HDCP_RCVPORT_DATA3"/>
+ <reg32 offset="0x00148" name="HDCP_RCVPORT_DATA4"/>
+ <reg32 offset="0x0014c" name="HDCP_RCVPORT_DATA5"/>
+ <reg32 offset="0x00150" name="HDCP_RCVPORT_DATA6"/>
+ <reg32 offset="0x00154" name="HDCP_RCVPORT_DATA7"/>
+ <reg32 offset="0x00158" name="HDCP_RCVPORT_DATA8"/>
+ <reg32 offset="0x0015c" name="HDCP_RCVPORT_DATA9"/>
+ <reg32 offset="0x00160" name="HDCP_RCVPORT_DATA10"/>
+ <reg32 offset="0x00164" name="HDCP_RCVPORT_DATA11"/>
+ <reg32 offset="0x00168" name="HDCP_RCVPORT_DATA12"/>
+
+ <reg32 offset="0x0016c" name="VENSPEC_INFO0"/>
+ <reg32 offset="0x00170" name="VENSPEC_INFO1"/>
+ <reg32 offset="0x00174" name="VENSPEC_INFO2"/>
+ <reg32 offset="0x00178" name="VENSPEC_INFO3"/>
+ <reg32 offset="0x0017c" name="VENSPEC_INFO4"/>
+ <reg32 offset="0x00180" name="VENSPEC_INFO5"/>
+ <reg32 offset="0x00184" name="VENSPEC_INFO6"/>
+
+ <reg32 offset="0x001d0" name="AUDIO_CFG">
+ <bitfield name="ENGINE_ENABLE" pos="0" type="boolean"/>
+ <bitfield name="FIFO_WATERMARK" low="4" high="7" type="uint"/>
+ </reg32>
+
+ <reg32 offset="0x00208" name="USEC_REFTIMER"/>
+ <reg32 offset="0x0020c" name="DDC_CTRL">
+ <!--
+ 0x020C HDMI_DDC_CTRL
+ [21:20] TRANSACTION_CNT
+ Number of transactions to be done in current transfer.
+ * 0x0: transaction0 only
+ * 0x1: transaction0, transaction1
+ * 0x2: transaction0, transaction1, transaction2
+ * 0x3: transaction0, transaction1, transaction2, transaction3
+ [3] SW_STATUS_RESET
+ Write 1 to reset HDMI_DDC_SW_STATUS flags, will reset SW_DONE,
+ ABORTED, TIMEOUT, SW_INTERRUPTED, BUFFER_OVERFLOW,
+ STOPPED_ON_NACK, NACK0, NACK1, NACK2, NACK3
+ [2] SEND_RESET Set to 1 to send reset sequence (9 clocks with no
+ data) at start of transfer. This sequence is sent after GO is
+ written to 1, before the first transaction only.
+ [1] SOFT_RESET Write 1 to reset DDC controller
+ [0] GO WRITE ONLY. Write 1 to start DDC transfer.
+ -->
+ <bitfield name="GO" pos="0" type="boolean"/>
+ <bitfield name="SOFT_RESET" pos="1" type="boolean"/>
+ <bitfield name="SEND_RESET" pos="2" type="boolean"/>
+ <bitfield name="SW_STATUS_RESET" pos="3" type="boolean"/>
+ <bitfield name="TRANSACTION_CNT" low="20" high="21" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00210" name="DDC_ARBITRATION">
+ <bitfield name="HW_ARBITRATION" pos="4" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00214" name="DDC_INT_CTRL">
+ <!--
+ HDMI_DDC_INT_CTRL[0x0214]
+ [2] SW_DONE_MK Mask bit for SW_DONE_INT. Set to 1 to enable
+ interrupt.
+ [1] SW_DONE_ACK WRITE ONLY. Acknowledge bit for SW_DONE_INT.
+ Write 1 to clear interrupt.
+ [0] SW_DONE_INT READ ONLY. SW_DONE interrupt status */
+ -->
+ <bitfield name="SW_DONE_INT" pos="0" type="boolean"/>
+ <bitfield name="SW_DONE_ACK" pos="1" type="boolean"/>
+ <bitfield name="SW_DONE_MASK" pos="2" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00218" name="DDC_SW_STATUS">
+ <bitfield name="NACK0" pos="12" type="boolean"/>
+ <bitfield name="NACK1" pos="13" type="boolean"/>
+ <bitfield name="NACK2" pos="14" type="boolean"/>
+ <bitfield name="NACK3" pos="15" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0021c" name="DDC_HW_STATUS">
+ <bitfield name="DONE" pos="3" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00220" name="DDC_SPEED">
+ <!--
+ 0x0220 HDMI_DDC_SPEED
+ [31:16] PRESCALE prescale = (m * xtal_frequency) /
+ (desired_i2c_speed), where m is multiply
+ factor, default: m = 1
+ [1:0] THRESHOLD Select threshold to use to determine whether value
+ sampled on SDA is a 1 or 0. Specified in terms of the ratio
+ between the number of sampled ones and the total number of times
+ SDA is sampled.
+ * 0x0: >0
+ * 0x1: 1/4 of total samples
+ * 0x2: 1/2 of total samples
+ * 0x3: 3/4 of total samples */
+ -->
+ <bitfield name="THRESHOLD" low="0" high="1" type="uint"/>
+ <bitfield name="PRESCALE" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00224" name="DDC_SETUP">
+ <!--
+ * 0x0224 HDMI_DDC_SETUP
+ * Setting 31:24 bits : Time units to wait before timeout
+ * when clock is being stalled by external sink device
+ -->
+ <bitfield name="TIMEOUT" low="24" high="31" type="uint"/>
+ </reg32>
+ <!-- Guessing length is 4, as elsewhere the are references to trans0 thru trans3 -->
+ <array offset="0x00228" name="I2C_TRANSACTION" length="4" stride="4">
+ <reg32 offset="0" name="REG">
+ <!--
+ 0x0228 HDMI_DDC_TRANS0
+ [23:16] CNT0 Byte count for first transaction (excluding the first
+ byte, which is usually the address).
+ [13] STOP0 Determines whether a stop bit will be sent after the first
+ transaction
+ * 0: NO STOP
+ * 1: STOP
+ [12] START0 Determines whether a start bit will be sent before the
+ first transaction
+ * 0: NO START
+ * 1: START
+ [8] STOP_ON_NACK0 Determines whether the current transfer will stop
+ if a NACK is received during the first transaction (current
+ transaction always stops).
+ * 0: STOP CURRENT TRANSACTION, GO TO NEXT TRANSACTION
+ * 1: STOP ALL TRANSACTIONS, SEND STOP BIT
+ [0] RW0 Read/write indicator for first transaction - set to 0 for
+ write, 1 for read. This bit only controls HDMI_DDC behaviour -
+ the R/W bit in the transaction is programmed into the DDC buffer
+ as the LSB of the address byte.
+ * 0: WRITE
+ * 1: READ
+ -->
+ <bitfield name="RW" pos="0" type="hdmi_ddc_read_write"/>
+ <bitfield name="STOP_ON_NACK" pos="8" type="boolean"/>
+ <bitfield name="START" pos="12" type="boolean"/>
+ <bitfield name="STOP" pos="13" type="boolean"/>
+ <bitfield name="CNT" low="16" high="23" type="uint"/>
+ </reg32>
+ </array>
+ <reg32 offset="0x00238" name="DDC_DATA">
+ <!--
+ 0x0238 HDMI_DDC_DATA
+ [31] INDEX_WRITE WRITE ONLY. To write index field, set this bit to
+ 1 while writing HDMI_DDC_DATA.
+ [23:16] INDEX Use to set index into DDC buffer for next read or
+ current write, or to read index of current read or next write.
+ Writable only when INDEX_WRITE=1.
+ [15:8] DATA Use to fill or read the DDC buffer
+ [0] DATA_RW Select whether buffer access will be a read or write.
+ For writes, address auto-increments on write to HDMI_DDC_DATA.
+ For reads, address autoincrements on reads to HDMI_DDC_DATA.
+ * 0: Write
+ * 1: Read
+ -->
+ <bitfield name="DATA_RW" pos="0" type="hdmi_ddc_read_write"/>
+ <bitfield name="DATA" low="8" high="15" type="uint"/>
+ <bitfield name="INDEX" low="16" high="23" type="uint"/>
+ <bitfield name="INDEX_WRITE" pos="31" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0x0023c" name="HDCP_SHA_CTRL"/>
+ <reg32 offset="0x00240" name="HDCP_SHA_STATUS">
+ <bitfield name="BLOCK_DONE" pos="0" type="boolean"/>
+ <bitfield name="COMP_DONE" pos="4" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00244" name="HDCP_SHA_DATA">
+ <bitfield name="DONE" pos="0" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0x00250" name="HPD_INT_STATUS">
+ <bitfield name="INT" pos="0" type="boolean"/> <!-- an irq has occurred -->
+ <bitfield name="CABLE_DETECTED" pos="1" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00254" name="HPD_INT_CTRL">
+ <!-- (this useful comment was removed in df6b645.. git archaeology is fun)
+ HPD_INT_CTRL[0x0254]
+ 31:10 Reserved
+ 9 RCV_PLUGIN_DET_MASK receiver plug in interrupt mask.
+ When programmed to 1,
+ RCV_PLUGIN_DET_INT will toggle
+ the interrupt line
+ 8:6 Reserved
+ 5 RX_INT_EN Panel RX interrupt enable
+ 0: Disable
+ 1: Enable
+ 4 RX_INT_ACK WRITE ONLY. Panel RX interrupt
+ ack
+ 3 Reserved
+ 2 INT_EN Panel interrupt control
+ 0: Disable
+ 1: Enable
+ 1 INT_POLARITY Panel interrupt polarity
+ 0: generate interrupt on disconnect
+ 1: generate interrupt on connect
+ 0 INT_ACK WRITE ONLY. Panel interrupt ack
+ -->
+ <bitfield name="INT_ACK" pos="0" type="boolean"/>
+ <bitfield name="INT_CONNECT" pos="1" type="boolean"/>
+ <bitfield name="INT_EN" pos="2" type="boolean"/>
+ <bitfield name="RX_INT_ACK" pos="4" type="boolean"/>
+ <bitfield name="RX_INT_EN" pos="5" type="boolean"/>
+ <bitfield name="RCV_PLUGIN_DET_MASK" pos="9" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00258" name="HPD_CTRL">
+ <bitfield name="TIMEOUT" low="0" high="12" type="uint"/>
+ <bitfield name="ENABLE" pos="28" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0027c" name="DDC_REF">
+ <!--
+ 0x027C HDMI_DDC_REF
+ [16] REFTIMER_ENABLE Enable the timer
+ * 0: Disable
+ * 1: Enable
+ [15:0] REFTIMER Value to set the register in order to generate
+ DDC strobe. This register counts on HDCP application clock
+
+ /* Enable reference timer
+ * 27 micro-seconds */
+ HDMI_OUTP_ND(0x027C, (1 << 16) | (27 << 0));
+ -->
+ <bitfield name="REFTIMER_ENABLE" pos="16" type="boolean"/>
+ <bitfield name="REFTIMER" low="0" high="15" type="uint"/>
+ </reg32>
+
+ <reg32 offset="0x00284" name="HDCP_SW_UPPER_AKSV"/>
+ <reg32 offset="0x00288" name="HDCP_SW_LOWER_AKSV"/>
+
+ <reg32 offset="0x0028c" name="CEC_CTRL">
+ <bitfield name="ENABLE" pos="0" type="boolean"/>
+ <bitfield name="SEND_TRIGGER" pos="1" type="boolean"/>
+ <bitfield name="FRAME_SIZE" low="4" high="8" type="uint"/>
+ <bitfield name="LINE_OE" pos="9" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00290" name="CEC_WR_DATA">
+ <bitfield name="BROADCAST" pos="0" type="boolean"/>
+ <bitfield name="DATA" low="8" high="15" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00294" name="CEC_RETRANSMIT">
+ <bitfield name="ENABLE" pos="0" type="boolean"/>
+ <bitfield name="COUNT" low="1" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00298" name="CEC_STATUS">
+ <bitfield name="BUSY" pos="0" type="boolean"/>
+ <bitfield name="TX_FRAME_DONE" pos="3" type="boolean"/>
+ <bitfield name="TX_STATUS" low="4" high="7" type="hdmi_cec_tx_status"/>
+ </reg32>
+ <reg32 offset="0x0029c" name="CEC_INT">
+ <bitfield name="TX_DONE" pos="0" type="boolean"/>
+ <bitfield name="TX_DONE_MASK" pos="1" type="boolean"/>
+ <bitfield name="TX_ERROR" pos="2" type="boolean"/>
+ <bitfield name="TX_ERROR_MASK" pos="3" type="boolean"/>
+ <bitfield name="MONITOR" pos="4" type="boolean"/>
+ <bitfield name="MONITOR_MASK" pos="5" type="boolean"/>
+ <bitfield name="RX_DONE" pos="6" type="boolean"/>
+ <bitfield name="RX_DONE_MASK" pos="7" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x002a0" name="CEC_ADDR"/>
+ <reg32 offset="0x002a4" name="CEC_TIME">
+ <bitfield name="ENABLE" pos="0" type="boolean"/>
+ <bitfield name="SIGNAL_FREE_TIME" low="7" high="15" type="uint"/>
+ </reg32>
+ <reg32 offset="0x002a8" name="CEC_REFTIMER">
+ <bitfield name="REFTIMER" low="0" high="15" type="uint"/>
+ <bitfield name="ENABLE" pos="16" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x002ac" name="CEC_RD_DATA">
+ <bitfield name="DATA" low="0" high="7" type="uint"/>
+ <bitfield name="SIZE" low="8" high="12" type="uint"/>
+ </reg32>
+ <reg32 offset="0x002b0" name="CEC_RD_FILTER"/>
+
+ <reg32 offset="0x002b4" name="ACTIVE_HSYNC">
+ <bitfield name="START" low="0" high="12" type="uint"/>
+ <bitfield name="END" low="16" high="27" type="uint"/>
+ </reg32>
+ <reg32 offset="0x002b8" name="ACTIVE_VSYNC">
+ <bitfield name="START" low="0" high="12" type="uint"/>
+ <bitfield name="END" low="16" high="28" type="uint"/>
+ </reg32>
+ <reg32 offset="0x002bc" name="VSYNC_ACTIVE_F2">
+ <!-- interlaced, frame 2 -->
+ <bitfield name="START" low="0" high="12" type="uint"/>
+ <bitfield name="END" low="16" high="28" type="uint"/>
+ </reg32>
+ <reg32 offset="0x002c0" name="TOTAL">
+ <bitfield name="H_TOTAL" low="0" high="12" type="uint"/>
+ <bitfield name="V_TOTAL" low="16" high="28" type="uint"/>
+ </reg32>
+ <reg32 offset="0x002c4" name="VSYNC_TOTAL_F2">
+ <!-- interlaced, frame 2 -->
+ <bitfield name="V_TOTAL" low="0" high="12" type="uint"/>
+ </reg32>
+ <reg32 offset="0x002c8" name="FRAME_CTRL">
+ <bitfield name="RGB_MUX_SEL_BGR" pos="12" type="boolean"/>
+ <bitfield name="VSYNC_LOW" pos="28" type="boolean"/>
+ <bitfield name="HSYNC_LOW" pos="29" type="boolean"/>
+ <bitfield name="INTERLACED_EN" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x002cc" name="AUD_INT">
+ <!--
+ HDMI_AUD_INT[0x02CC]
+ [3] AUD_SAM_DROP_MASK [R/W]
+ [2] AUD_SAM_DROP_ACK [W], AUD_SAM_DROP_INT [R]
+ [1] AUD_FIFO_URUN_MASK [R/W]
+ [0] AUD_FIFO_URUN_ACK [W], AUD_FIFO_URUN_INT [R]
+ -->
+ <bitfield name="AUD_FIFO_URUN_INT" pos="0" type="boolean"/> <!-- write to ack irq -->
+ <bitfield name="AUD_FIFO_URAN_MASK" pos="1" type="boolean"/> <!-- r/w, enables irq -->
+ <bitfield name="AUD_SAM_DROP_INT" pos="2" type="boolean"/> <!-- write to ack irq -->
+ <bitfield name="AUD_SAM_DROP_MASK" pos="3" type="boolean"/> <!-- r/w, enables irq -->
+ </reg32>
+ <reg32 offset="0x002d4" name="PHY_CTRL">
+ <!--
+ in hdmi_phy_reset() it appears to be toggling SW_RESET/
+ SW_RESET_PLL based on the value of the bit above, so
+ I'm guessing the bit above is a polarit bit
+ -->
+ <bitfield name="SW_RESET_PLL" pos="0" type="boolean"/>
+ <bitfield name="SW_RESET_PLL_LOW" pos="1" type="boolean"/>
+ <bitfield name="SW_RESET" pos="2" type="boolean"/>
+ <bitfield name="SW_RESET_LOW" pos="3" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x002dc" name="CEC_WR_RANGE"/>
+ <reg32 offset="0x002e0" name="CEC_RD_RANGE"/>
+ <reg32 offset="0x002e4" name="VERSION"/>
+ <reg32 offset="0x00360" name="CEC_COMPL_CTL"/>
+ <reg32 offset="0x00364" name="CEC_RD_START_RANGE"/>
+ <reg32 offset="0x00368" name="CEC_RD_TOTAL_RANGE"/>
+ <reg32 offset="0x0036c" name="CEC_RD_ERR_RESP_LO"/>
+ <reg32 offset="0x00370" name="CEC_WR_CHECK_CONFIG"/>
+
+</domain>
+
+<domain name="HDMI_8x60" width="32">
+ <reg32 offset="0x00000" name="PHY_REG0">
+ <bitfield name="DESER_DEL_CTRL" low="2" high="4" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00004" name="PHY_REG1">
+ <bitfield name="DTEST_MUX_SEL" low="4" high="7" type="uint"/>
+ <bitfield name="OUTVOL_SWING_CTRL" low="0" high="3" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00008" name="PHY_REG2">
+ <bitfield name="PD_DESER" pos="0" type="boolean"/>
+ <bitfield name="PD_DRIVE_1" pos="1" type="boolean"/>
+ <bitfield name="PD_DRIVE_2" pos="2" type="boolean"/>
+ <bitfield name="PD_DRIVE_3" pos="3" type="boolean"/>
+ <bitfield name="PD_DRIVE_4" pos="4" type="boolean"/>
+ <bitfield name="PD_PLL" pos="5" type="boolean"/>
+ <bitfield name="PD_PWRGEN" pos="6" type="boolean"/>
+ <bitfield name="RCV_SENSE_EN" pos="7" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0000c" name="PHY_REG3">
+ <bitfield name="PLL_ENABLE" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00010" name="PHY_REG4"/>
+ <reg32 offset="0x00014" name="PHY_REG5"/>
+ <reg32 offset="0x00018" name="PHY_REG6"/>
+ <reg32 offset="0x0001c" name="PHY_REG7"/>
+ <reg32 offset="0x00020" name="PHY_REG8"/>
+ <reg32 offset="0x00024" name="PHY_REG9"/>
+ <reg32 offset="0x00028" name="PHY_REG10"/>
+ <reg32 offset="0x0002c" name="PHY_REG11"/>
+ <reg32 offset="0x00030" name="PHY_REG12">
+ <bitfield name="RETIMING_EN" pos="0" type="boolean"/>
+ <bitfield name="PLL_LOCK_DETECT_EN" pos="1" type="boolean"/>
+ <bitfield name="FORCE_LOCK" pos="4" type="boolean"/>
+ </reg32>
+</domain>
+
+<domain name="HDMI_8960" width="32">
+ <!--
+ some of the bitfields may be same as 8x60.. but no helpful comments
+ in msm_dss_io_8960.c
+ -->
+ <reg32 offset="0x00000" name="PHY_REG0"/>
+ <reg32 offset="0x00004" name="PHY_REG1"/>
+ <reg32 offset="0x00008" name="PHY_REG2"/>
+ <reg32 offset="0x0000c" name="PHY_REG3"/>
+ <reg32 offset="0x00010" name="PHY_REG4"/>
+ <reg32 offset="0x00014" name="PHY_REG5"/>
+ <reg32 offset="0x00018" name="PHY_REG6"/>
+ <reg32 offset="0x0001c" name="PHY_REG7"/>
+ <reg32 offset="0x00020" name="PHY_REG8"/>
+ <reg32 offset="0x00024" name="PHY_REG9"/>
+ <reg32 offset="0x00028" name="PHY_REG10"/>
+ <reg32 offset="0x0002c" name="PHY_REG11"/>
+ <reg32 offset="0x00030" name="PHY_REG12">
+ <bitfield name="SW_RESET" pos="5" type="boolean"/>
+ <bitfield name="PWRDN_B" pos="7" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00034" name="PHY_REG_BIST_CFG"/>
+ <reg32 offset="0x00038" name="PHY_DEBUG_BUS_SEL"/>
+ <reg32 offset="0x0003c" name="PHY_REG_MISC0"/>
+ <reg32 offset="0x00040" name="PHY_REG13"/>
+ <reg32 offset="0x00044" name="PHY_REG14"/>
+ <reg32 offset="0x00048" name="PHY_REG15"/>
+</domain>
+
+<domain name="HDMI_8960_PHY_PLL" width="32">
+ <reg32 offset="0x00000" name="REFCLK_CFG"/>
+ <reg32 offset="0x00004" name="CHRG_PUMP_CFG"/>
+ <reg32 offset="0x00008" name="LOOP_FLT_CFG0"/>
+ <reg32 offset="0x0000c" name="LOOP_FLT_CFG1"/>
+ <reg32 offset="0x00010" name="IDAC_ADJ_CFG"/>
+ <reg32 offset="0x00014" name="I_VI_KVCO_CFG"/>
+ <reg32 offset="0x00018" name="PWRDN_B">
+ <bitfield name="PD_PLL" pos="1" type="boolean"/>
+ <bitfield name="PLL_PWRDN_B" pos="3" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0001c" name="SDM_CFG0"/>
+ <reg32 offset="0x00020" name="SDM_CFG1"/>
+ <reg32 offset="0x00024" name="SDM_CFG2"/>
+ <reg32 offset="0x00028" name="SDM_CFG3"/>
+ <reg32 offset="0x0002c" name="SDM_CFG4"/>
+ <reg32 offset="0x00030" name="SSC_CFG0"/>
+ <reg32 offset="0x00034" name="SSC_CFG1"/>
+ <reg32 offset="0x00038" name="SSC_CFG2"/>
+ <reg32 offset="0x0003c" name="SSC_CFG3"/>
+ <reg32 offset="0x00040" name="LOCKDET_CFG0"/>
+ <reg32 offset="0x00044" name="LOCKDET_CFG1"/>
+ <reg32 offset="0x00048" name="LOCKDET_CFG2"/>
+ <reg32 offset="0x0004c" name="VCOCAL_CFG0"/>
+ <reg32 offset="0x00050" name="VCOCAL_CFG1"/>
+ <reg32 offset="0x00054" name="VCOCAL_CFG2"/>
+ <reg32 offset="0x00058" name="VCOCAL_CFG3"/>
+ <reg32 offset="0x0005c" name="VCOCAL_CFG4"/>
+ <reg32 offset="0x00060" name="VCOCAL_CFG5"/>
+ <reg32 offset="0x00064" name="VCOCAL_CFG6"/>
+ <reg32 offset="0x00068" name="VCOCAL_CFG7"/>
+ <reg32 offset="0x0006c" name="DEBUG_SEL"/>
+ <reg32 offset="0x00070" name="MISC0"/>
+ <reg32 offset="0x00074" name="MISC1"/>
+ <reg32 offset="0x00078" name="MISC2"/>
+ <reg32 offset="0x0007c" name="MISC3"/>
+ <reg32 offset="0x00080" name="MISC4"/>
+ <reg32 offset="0x00084" name="MISC5"/>
+ <reg32 offset="0x00088" name="MISC6"/>
+ <reg32 offset="0x0008c" name="DEBUG_BUS0"/>
+ <reg32 offset="0x00090" name="DEBUG_BUS1"/>
+ <reg32 offset="0x00094" name="DEBUG_BUS2"/>
+ <reg32 offset="0x00098" name="STATUS0">
+ <bitfield name="PLL_LOCK" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0009c" name="STATUS1"/>
+</domain>
+
+<domain name="HDMI_8x74" width="32">
+ <!--
+ seems to be all mdp5+ have same?
+ -->
+ <reg32 offset="0x00000" name="ANA_CFG0"/>
+ <reg32 offset="0x00004" name="ANA_CFG1"/>
+ <reg32 offset="0x00008" name="ANA_CFG2"/>
+ <reg32 offset="0x0000c" name="ANA_CFG3"/>
+ <reg32 offset="0x00010" name="PD_CTRL0"/>
+ <reg32 offset="0x00014" name="PD_CTRL1"/>
+ <reg32 offset="0x00018" name="GLB_CFG"/>
+ <reg32 offset="0x0001c" name="DCC_CFG0"/>
+ <reg32 offset="0x00020" name="DCC_CFG1"/>
+ <reg32 offset="0x00024" name="TXCAL_CFG0"/>
+ <reg32 offset="0x00028" name="TXCAL_CFG1"/>
+ <reg32 offset="0x0002c" name="TXCAL_CFG2"/>
+ <reg32 offset="0x00030" name="TXCAL_CFG3"/>
+ <reg32 offset="0x00034" name="BIST_CFG0"/>
+ <reg32 offset="0x0003c" name="BIST_PATN0"/>
+ <reg32 offset="0x00040" name="BIST_PATN1"/>
+ <reg32 offset="0x00044" name="BIST_PATN2"/>
+ <reg32 offset="0x00048" name="BIST_PATN3"/>
+ <reg32 offset="0x0005c" name="STATUS"/>
+</domain>
+
+<domain name="HDMI_28nm_PHY_PLL" width="32">
+ <reg32 offset="0x00000" name="REFCLK_CFG"/>
+ <reg32 offset="0x00004" name="POSTDIV1_CFG"/>
+ <reg32 offset="0x00008" name="CHGPUMP_CFG"/>
+ <reg32 offset="0x0000C" name="VCOLPF_CFG"/>
+ <reg32 offset="0x00010" name="VREG_CFG"/>
+ <reg32 offset="0x00014" name="PWRGEN_CFG"/>
+ <reg32 offset="0x00018" name="DMUX_CFG"/>
+ <reg32 offset="0x0001C" name="AMUX_CFG"/>
+ <reg32 offset="0x00020" name="GLB_CFG">
+ <bitfield name="PLL_PWRDN_B" pos="0" type="boolean"/>
+ <bitfield name="PLL_LDO_PWRDN_B" pos="1" type="boolean"/>
+ <bitfield name="PLL_PWRGEN_PWRDN_B" pos="2" type="boolean"/>
+ <bitfield name="PLL_ENABLE" pos="3" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x00024" name="POSTDIV2_CFG"/>
+ <reg32 offset="0x00028" name="POSTDIV3_CFG"/>
+ <reg32 offset="0x0002C" name="LPFR_CFG"/>
+ <reg32 offset="0x00030" name="LPFC1_CFG"/>
+ <reg32 offset="0x00034" name="LPFC2_CFG"/>
+ <reg32 offset="0x00038" name="SDM_CFG0"/>
+ <reg32 offset="0x0003C" name="SDM_CFG1"/>
+ <reg32 offset="0x00040" name="SDM_CFG2"/>
+ <reg32 offset="0x00044" name="SDM_CFG3"/>
+ <reg32 offset="0x00048" name="SDM_CFG4"/>
+ <reg32 offset="0x0004C" name="SSC_CFG0"/>
+ <reg32 offset="0x00050" name="SSC_CFG1"/>
+ <reg32 offset="0x00054" name="SSC_CFG2"/>
+ <reg32 offset="0x00058" name="SSC_CFG3"/>
+ <reg32 offset="0x0005C" name="LKDET_CFG0"/>
+ <reg32 offset="0x00060" name="LKDET_CFG1"/>
+ <reg32 offset="0x00064" name="LKDET_CFG2"/>
+ <reg32 offset="0x00068" name="TEST_CFG">
+ <bitfield name="PLL_SW_RESET" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0006C" name="CAL_CFG0"/>
+ <reg32 offset="0x00070" name="CAL_CFG1"/>
+ <reg32 offset="0x00074" name="CAL_CFG2"/>
+ <reg32 offset="0x00078" name="CAL_CFG3"/>
+ <reg32 offset="0x0007C" name="CAL_CFG4"/>
+ <reg32 offset="0x00080" name="CAL_CFG5"/>
+ <reg32 offset="0x00084" name="CAL_CFG6"/>
+ <reg32 offset="0x00088" name="CAL_CFG7"/>
+ <reg32 offset="0x0008C" name="CAL_CFG8"/>
+ <reg32 offset="0x00090" name="CAL_CFG9"/>
+ <reg32 offset="0x00094" name="CAL_CFG10"/>
+ <reg32 offset="0x00098" name="CAL_CFG11"/>
+ <reg32 offset="0x0009C" name="EFUSE_CFG"/>
+ <reg32 offset="0x000A0" name="DEBUG_BUS_SEL"/>
+ <reg32 offset="0x000C0" name="STATUS"/>
+</domain>
+
+<domain name="HDMI_8996_PHY" width="32">
+ <reg32 offset="0x00000" name="CFG"/>
+ <reg32 offset="0x00004" name="PD_CTL"/>
+ <reg32 offset="0x00008" name="MODE"/>
+ <reg32 offset="0x0000C" name="MISR_CLEAR"/>
+ <reg32 offset="0x00010" name="TX0_TX1_BIST_CFG0"/>
+ <reg32 offset="0x00014" name="TX0_TX1_BIST_CFG1"/>
+ <reg32 offset="0x00018" name="TX0_TX1_PRBS_SEED_BYTE0"/>
+ <reg32 offset="0x0001C" name="TX0_TX1_PRBS_SEED_BYTE1"/>
+ <reg32 offset="0x00020" name="TX0_TX1_BIST_PATTERN0"/>
+ <reg32 offset="0x00024" name="TX0_TX1_BIST_PATTERN1"/>
+ <reg32 offset="0x00028" name="TX2_TX3_BIST_CFG0"/>
+ <reg32 offset="0x0002C" name="TX2_TX3_BIST_CFG1"/>
+ <reg32 offset="0x00030" name="TX2_TX3_PRBS_SEED_BYTE0"/>
+ <reg32 offset="0x00034" name="TX2_TX3_PRBS_SEED_BYTE1"/>
+ <reg32 offset="0x00038" name="TX2_TX3_BIST_PATTERN0"/>
+ <reg32 offset="0x0003C" name="TX2_TX3_BIST_PATTERN1"/>
+ <reg32 offset="0x00040" name="DEBUG_BUS_SEL"/>
+ <reg32 offset="0x00044" name="TXCAL_CFG0"/>
+ <reg32 offset="0x00048" name="TXCAL_CFG1"/>
+ <reg32 offset="0x0004C" name="TX0_TX1_LANE_CTL"/>
+ <reg32 offset="0x00050" name="TX2_TX3_LANE_CTL"/>
+ <reg32 offset="0x00054" name="LANE_BIST_CONFIG"/>
+ <reg32 offset="0x00058" name="CLOCK"/>
+ <reg32 offset="0x0005C" name="MISC1"/>
+ <reg32 offset="0x00060" name="MISC2"/>
+ <reg32 offset="0x00064" name="TX0_TX1_BIST_STATUS0"/>
+ <reg32 offset="0x00068" name="TX0_TX1_BIST_STATUS1"/>
+ <reg32 offset="0x0006C" name="TX0_TX1_BIST_STATUS2"/>
+ <reg32 offset="0x00070" name="TX2_TX3_BIST_STATUS0"/>
+ <reg32 offset="0x00074" name="TX2_TX3_BIST_STATUS1"/>
+ <reg32 offset="0x00078" name="TX2_TX3_BIST_STATUS2"/>
+ <reg32 offset="0x0007C" name="PRE_MISR_STATUS0"/>
+ <reg32 offset="0x00080" name="PRE_MISR_STATUS1"/>
+ <reg32 offset="0x00084" name="PRE_MISR_STATUS2"/>
+ <reg32 offset="0x00088" name="PRE_MISR_STATUS3"/>
+ <reg32 offset="0x0008C" name="POST_MISR_STATUS0"/>
+ <reg32 offset="0x00090" name="POST_MISR_STATUS1"/>
+ <reg32 offset="0x00094" name="POST_MISR_STATUS2"/>
+ <reg32 offset="0x00098" name="POST_MISR_STATUS3"/>
+ <reg32 offset="0x0009C" name="STATUS"/>
+ <reg32 offset="0x000A0" name="MISC3_STATUS"/>
+ <reg32 offset="0x000A4" name="MISC4_STATUS"/>
+ <reg32 offset="0x000A8" name="DEBUG_BUS0"/>
+ <reg32 offset="0x000AC" name="DEBUG_BUS1"/>
+ <reg32 offset="0x000B0" name="DEBUG_BUS2"/>
+ <reg32 offset="0x000B4" name="DEBUG_BUS3"/>
+ <reg32 offset="0x000B8" name="PHY_REVISION_ID0"/>
+ <reg32 offset="0x000BC" name="PHY_REVISION_ID1"/>
+ <reg32 offset="0x000C0" name="PHY_REVISION_ID2"/>
+ <reg32 offset="0x000C4" name="PHY_REVISION_ID3"/>
+</domain>
+
+<domain name="HDMI_PHY_QSERDES_COM" width="32">
+ <reg32 offset="0x00000" name="ATB_SEL1"/>
+ <reg32 offset="0x00004" name="ATB_SEL2"/>
+ <reg32 offset="0x00008" name="FREQ_UPDATE"/>
+ <reg32 offset="0x0000C" name="BG_TIMER"/>
+ <reg32 offset="0x00010" name="SSC_EN_CENTER"/>
+ <reg32 offset="0x00014" name="SSC_ADJ_PER1"/>
+ <reg32 offset="0x00018" name="SSC_ADJ_PER2"/>
+ <reg32 offset="0x0001C" name="SSC_PER1"/>
+ <reg32 offset="0x00020" name="SSC_PER2"/>
+ <reg32 offset="0x00024" name="SSC_STEP_SIZE1"/>
+ <reg32 offset="0x00028" name="SSC_STEP_SIZE2"/>
+ <reg32 offset="0x0002C" name="POST_DIV"/>
+ <reg32 offset="0x00030" name="POST_DIV_MUX"/>
+ <reg32 offset="0x00034" name="BIAS_EN_CLKBUFLR_EN"/>
+ <reg32 offset="0x00038" name="CLK_ENABLE1"/>
+ <reg32 offset="0x0003C" name="SYS_CLK_CTRL"/>
+ <reg32 offset="0x00040" name="SYSCLK_BUF_ENABLE"/>
+ <reg32 offset="0x00044" name="PLL_EN"/>
+ <reg32 offset="0x00048" name="PLL_IVCO"/>
+ <reg32 offset="0x0004C" name="LOCK_CMP1_MODE0"/>
+ <reg32 offset="0x00050" name="LOCK_CMP2_MODE0"/>
+ <reg32 offset="0x00054" name="LOCK_CMP3_MODE0"/>
+ <reg32 offset="0x00058" name="LOCK_CMP1_MODE1"/>
+ <reg32 offset="0x0005C" name="LOCK_CMP2_MODE1"/>
+ <reg32 offset="0x00060" name="LOCK_CMP3_MODE1"/>
+ <reg32 offset="0x00064" name="LOCK_CMP1_MODE2"/>
+ <reg32 offset="0x00064" name="CMN_RSVD0"/>
+ <reg32 offset="0x00068" name="LOCK_CMP2_MODE2"/>
+ <reg32 offset="0x00068" name="EP_CLOCK_DETECT_CTRL"/>
+ <reg32 offset="0x0006C" name="LOCK_CMP3_MODE2"/>
+ <reg32 offset="0x0006C" name="SYSCLK_DET_COMP_STATUS"/>
+ <reg32 offset="0x00070" name="BG_TRIM"/>
+ <reg32 offset="0x00074" name="CLK_EP_DIV"/>
+ <reg32 offset="0x00078" name="CP_CTRL_MODE0"/>
+ <reg32 offset="0x0007C" name="CP_CTRL_MODE1"/>
+ <reg32 offset="0x00080" name="CP_CTRL_MODE2"/>
+ <reg32 offset="0x00080" name="CMN_RSVD1"/>
+ <reg32 offset="0x00084" name="PLL_RCTRL_MODE0"/>
+ <reg32 offset="0x00088" name="PLL_RCTRL_MODE1"/>
+ <reg32 offset="0x0008C" name="PLL_RCTRL_MODE2"/>
+ <reg32 offset="0x0008C" name="CMN_RSVD2"/>
+ <reg32 offset="0x00090" name="PLL_CCTRL_MODE0"/>
+ <reg32 offset="0x00094" name="PLL_CCTRL_MODE1"/>
+ <reg32 offset="0x00098" name="PLL_CCTRL_MODE2"/>
+ <reg32 offset="0x00098" name="CMN_RSVD3"/>
+ <reg32 offset="0x0009C" name="PLL_CNTRL"/>
+ <reg32 offset="0x000A0" name="PHASE_SEL_CTRL"/>
+ <reg32 offset="0x000A4" name="PHASE_SEL_DC"/>
+ <reg32 offset="0x000A8" name="CORE_CLK_IN_SYNC_SEL"/>
+ <reg32 offset="0x000A8" name="BIAS_EN_CTRL_BY_PSM"/>
+ <reg32 offset="0x000AC" name="SYSCLK_EN_SEL"/>
+ <reg32 offset="0x000B0" name="CML_SYSCLK_SEL"/>
+ <reg32 offset="0x000B4" name="RESETSM_CNTRL"/>
+ <reg32 offset="0x000B8" name="RESETSM_CNTRL2"/>
+ <reg32 offset="0x000BC" name="RESTRIM_CTRL"/>
+ <reg32 offset="0x000C0" name="RESTRIM_CTRL2"/>
+ <reg32 offset="0x000C4" name="RESCODE_DIV_NUM"/>
+ <reg32 offset="0x000C8" name="LOCK_CMP_EN"/>
+ <reg32 offset="0x000CC" name="LOCK_CMP_CFG"/>
+ <reg32 offset="0x000D0" name="DEC_START_MODE0"/>
+ <reg32 offset="0x000D4" name="DEC_START_MODE1"/>
+ <reg32 offset="0x000D8" name="DEC_START_MODE2"/>
+ <reg32 offset="0x000D8" name="VCOCAL_DEADMAN_CTRL"/>
+ <reg32 offset="0x000DC" name="DIV_FRAC_START1_MODE0"/>
+ <reg32 offset="0x000E0" name="DIV_FRAC_START2_MODE0"/>
+ <reg32 offset="0x000E4" name="DIV_FRAC_START3_MODE0"/>
+ <reg32 offset="0x000E8" name="DIV_FRAC_START1_MODE1"/>
+ <reg32 offset="0x000EC" name="DIV_FRAC_START2_MODE1"/>
+ <reg32 offset="0x000F0" name="DIV_FRAC_START3_MODE1"/>
+ <reg32 offset="0x000F4" name="DIV_FRAC_START1_MODE2"/>
+ <reg32 offset="0x000F4" name="VCO_TUNE_MINVAL1"/>
+ <reg32 offset="0x000F8" name="DIV_FRAC_START2_MODE2"/>
+ <reg32 offset="0x000F8" name="VCO_TUNE_MINVAL2"/>
+ <reg32 offset="0x000FC" name="DIV_FRAC_START3_MODE2"/>
+ <reg32 offset="0x000FC" name="CMN_RSVD4"/>
+ <reg32 offset="0x00100" name="INTEGLOOP_INITVAL"/>
+ <reg32 offset="0x00104" name="INTEGLOOP_EN"/>
+ <reg32 offset="0x00108" name="INTEGLOOP_GAIN0_MODE0"/>
+ <reg32 offset="0x0010C" name="INTEGLOOP_GAIN1_MODE0"/>
+ <reg32 offset="0x00110" name="INTEGLOOP_GAIN0_MODE1"/>
+ <reg32 offset="0x00114" name="INTEGLOOP_GAIN1_MODE1"/>
+ <reg32 offset="0x00118" name="INTEGLOOP_GAIN0_MODE2"/>
+ <reg32 offset="0x00118" name="VCO_TUNE_MAXVAL1"/>
+ <reg32 offset="0x0011C" name="INTEGLOOP_GAIN1_MODE2"/>
+ <reg32 offset="0x0011C" name="VCO_TUNE_MAXVAL2"/>
+ <reg32 offset="0x00120" name="RES_TRIM_CONTROL2"/>
+ <reg32 offset="0x00124" name="VCO_TUNE_CTRL"/>
+ <reg32 offset="0x00128" name="VCO_TUNE_MAP"/>
+ <reg32 offset="0x0012C" name="VCO_TUNE1_MODE0"/>
+ <reg32 offset="0x00130" name="VCO_TUNE2_MODE0"/>
+ <reg32 offset="0x00134" name="VCO_TUNE1_MODE1"/>
+ <reg32 offset="0x00138" name="VCO_TUNE2_MODE1"/>
+ <reg32 offset="0x0013C" name="VCO_TUNE1_MODE2"/>
+ <reg32 offset="0x0013C" name="VCO_TUNE_INITVAL1"/>
+ <reg32 offset="0x00140" name="VCO_TUNE2_MODE2"/>
+ <reg32 offset="0x00140" name="VCO_TUNE_INITVAL2"/>
+ <reg32 offset="0x00144" name="VCO_TUNE_TIMER1"/>
+ <reg32 offset="0x00148" name="VCO_TUNE_TIMER2"/>
+ <reg32 offset="0x0014C" name="SAR"/>
+ <reg32 offset="0x00150" name="SAR_CLK"/>
+ <reg32 offset="0x00154" name="SAR_CODE_OUT_STATUS"/>
+ <reg32 offset="0x00158" name="SAR_CODE_READY_STATUS"/>
+ <reg32 offset="0x0015C" name="CMN_STATUS"/>
+ <reg32 offset="0x00160" name="RESET_SM_STATUS"/>
+ <reg32 offset="0x00164" name="RESTRIM_CODE_STATUS"/>
+ <reg32 offset="0x00168" name="PLLCAL_CODE1_STATUS"/>
+ <reg32 offset="0x0016C" name="PLLCAL_CODE2_STATUS"/>
+ <reg32 offset="0x00170" name="BG_CTRL"/>
+ <reg32 offset="0x00174" name="CLK_SELECT"/>
+ <reg32 offset="0x00178" name="HSCLK_SEL"/>
+ <reg32 offset="0x0017C" name="INTEGLOOP_BINCODE_STATUS"/>
+ <reg32 offset="0x00180" name="PLL_ANALOG"/>
+ <reg32 offset="0x00184" name="CORECLK_DIV"/>
+ <reg32 offset="0x00188" name="SW_RESET"/>
+ <reg32 offset="0x0018C" name="CORE_CLK_EN"/>
+ <reg32 offset="0x00190" name="C_READY_STATUS"/>
+ <reg32 offset="0x00194" name="CMN_CONFIG"/>
+ <reg32 offset="0x00198" name="CMN_RATE_OVERRIDE"/>
+ <reg32 offset="0x0019C" name="SVS_MODE_CLK_SEL"/>
+ <reg32 offset="0x001A0" name="DEBUG_BUS0"/>
+ <reg32 offset="0x001A4" name="DEBUG_BUS1"/>
+ <reg32 offset="0x001A8" name="DEBUG_BUS2"/>
+ <reg32 offset="0x001AC" name="DEBUG_BUS3"/>
+ <reg32 offset="0x001B0" name="DEBUG_BUS_SEL"/>
+ <reg32 offset="0x001B4" name="CMN_MISC1"/>
+ <reg32 offset="0x001B8" name="CMN_MISC2"/>
+ <reg32 offset="0x001BC" name="CORECLK_DIV_MODE1"/>
+ <reg32 offset="0x001C0" name="CORECLK_DIV_MODE2"/>
+ <reg32 offset="0x001C4" name="CMN_RSVD5"/>
+</domain>
+
+
+<domain name="HDMI_PHY_QSERDES_TX_LX" width="32">
+ <reg32 offset="0x00000" name="BIST_MODE_LANENO"/>
+ <reg32 offset="0x00004" name="BIST_INVERT"/>
+ <reg32 offset="0x00008" name="CLKBUF_ENABLE"/>
+ <reg32 offset="0x0000C" name="CMN_CONTROL_ONE"/>
+ <reg32 offset="0x00010" name="CMN_CONTROL_TWO"/>
+ <reg32 offset="0x00014" name="CMN_CONTROL_THREE"/>
+ <reg32 offset="0x00018" name="TX_EMP_POST1_LVL"/>
+ <reg32 offset="0x0001C" name="TX_POST2_EMPH"/>
+ <reg32 offset="0x00020" name="TX_BOOST_LVL_UP_DN"/>
+ <reg32 offset="0x00024" name="HP_PD_ENABLES"/>
+ <reg32 offset="0x00028" name="TX_IDLE_LVL_LARGE_AMP"/>
+ <reg32 offset="0x0002C" name="TX_DRV_LVL"/>
+ <reg32 offset="0x00030" name="TX_DRV_LVL_OFFSET"/>
+ <reg32 offset="0x00034" name="RESET_TSYNC_EN"/>
+ <reg32 offset="0x00038" name="PRE_STALL_LDO_BOOST_EN"/>
+ <reg32 offset="0x0003C" name="TX_BAND"/>
+ <reg32 offset="0x00040" name="SLEW_CNTL"/>
+ <reg32 offset="0x00044" name="INTERFACE_SELECT"/>
+ <reg32 offset="0x00048" name="LPB_EN"/>
+ <reg32 offset="0x0004C" name="RES_CODE_LANE_TX"/>
+ <reg32 offset="0x00050" name="RES_CODE_LANE_RX"/>
+ <reg32 offset="0x00054" name="RES_CODE_LANE_OFFSET"/>
+ <reg32 offset="0x00058" name="PERL_LENGTH1"/>
+ <reg32 offset="0x0005C" name="PERL_LENGTH2"/>
+ <reg32 offset="0x00060" name="SERDES_BYP_EN_OUT"/>
+ <reg32 offset="0x00064" name="DEBUG_BUS_SEL"/>
+ <reg32 offset="0x00068" name="HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN"/>
+ <reg32 offset="0x0006C" name="TX_POL_INV"/>
+ <reg32 offset="0x00070" name="PARRATE_REC_DETECT_IDLE_EN"/>
+ <reg32 offset="0x00074" name="BIST_PATTERN1"/>
+ <reg32 offset="0x00078" name="BIST_PATTERN2"/>
+ <reg32 offset="0x0007C" name="BIST_PATTERN3"/>
+ <reg32 offset="0x00080" name="BIST_PATTERN4"/>
+ <reg32 offset="0x00084" name="BIST_PATTERN5"/>
+ <reg32 offset="0x00088" name="BIST_PATTERN6"/>
+ <reg32 offset="0x0008C" name="BIST_PATTERN7"/>
+ <reg32 offset="0x00090" name="BIST_PATTERN8"/>
+ <reg32 offset="0x00094" name="LANE_MODE"/>
+ <reg32 offset="0x00098" name="IDAC_CAL_LANE_MODE"/>
+ <reg32 offset="0x0009C" name="IDAC_CAL_LANE_MODE_CONFIGURATION"/>
+ <reg32 offset="0x000A0" name="ATB_SEL1"/>
+ <reg32 offset="0x000A4" name="ATB_SEL2"/>
+ <reg32 offset="0x000A8" name="RCV_DETECT_LVL"/>
+ <reg32 offset="0x000AC" name="RCV_DETECT_LVL_2"/>
+ <reg32 offset="0x000B0" name="PRBS_SEED1"/>
+ <reg32 offset="0x000B4" name="PRBS_SEED2"/>
+ <reg32 offset="0x000B8" name="PRBS_SEED3"/>
+ <reg32 offset="0x000BC" name="PRBS_SEED4"/>
+ <reg32 offset="0x000C0" name="RESET_GEN"/>
+ <reg32 offset="0x000C4" name="RESET_GEN_MUXES"/>
+ <reg32 offset="0x000C8" name="TRAN_DRVR_EMP_EN"/>
+ <reg32 offset="0x000CC" name="TX_INTERFACE_MODE"/>
+ <reg32 offset="0x000D0" name="PWM_CTRL"/>
+ <reg32 offset="0x000D4" name="PWM_ENCODED_OR_DATA"/>
+ <reg32 offset="0x000D8" name="PWM_GEAR_1_DIVIDER_BAND2"/>
+ <reg32 offset="0x000DC" name="PWM_GEAR_2_DIVIDER_BAND2"/>
+ <reg32 offset="0x000E0" name="PWM_GEAR_3_DIVIDER_BAND2"/>
+ <reg32 offset="0x000E4" name="PWM_GEAR_4_DIVIDER_BAND2"/>
+ <reg32 offset="0x000E8" name="PWM_GEAR_1_DIVIDER_BAND0_1"/>
+ <reg32 offset="0x000EC" name="PWM_GEAR_2_DIVIDER_BAND0_1"/>
+ <reg32 offset="0x000F0" name="PWM_GEAR_3_DIVIDER_BAND0_1"/>
+ <reg32 offset="0x000F4" name="PWM_GEAR_4_DIVIDER_BAND0_1"/>
+ <reg32 offset="0x000F8" name="VMODE_CTRL1"/>
+ <reg32 offset="0x000FC" name="VMODE_CTRL2"/>
+ <reg32 offset="0x00100" name="TX_ALOG_INTF_OBSV_CNTL"/>
+ <reg32 offset="0x00104" name="BIST_STATUS"/>
+ <reg32 offset="0x00108" name="BIST_ERROR_COUNT1"/>
+ <reg32 offset="0x0010C" name="BIST_ERROR_COUNT2"/>
+ <reg32 offset="0x00110" name="TX_ALOG_INTF_OBSV"/>
+</domain>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/display/mdp4.xml b/drivers/gpu/drm/msm/registers/display/mdp4.xml
new file mode 100644
index 000000000000..6abb4a3c04da
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/display/mdp4.xml
@@ -0,0 +1,504 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="display/mdp_common.xml"/>
+
+<domain name="MDP4" width="32">
+ <enum name="mdp4_pipe">
+ <brief>pipe names, index into PIPE[]</brief>
+ <value name="VG1" value="0"/>
+ <value name="VG2" value="1"/>
+ <value name="RGB1" value="2"/>
+ <value name="RGB2" value="3"/>
+ <value name="RGB3" value="4"/>
+ <value name="VG3" value="5"/>
+ <value name="VG4" value="6"/>
+ </enum>
+
+ <enum name="mdp4_mixer">
+ <value name="MIXER0" value="0"/>
+ <value name="MIXER1" value="1"/>
+ <value name="MIXER2" value="2"/>
+ </enum>
+
+ <enum name="mdp4_intf">
+ <!--
+ A bit confusing the enums for interface selection:
+ enum {
+ LCDC_RGB_INTF, /* 0 */
+ DTV_INTF = LCDC_RGB_INTF, /* 0 */
+ MDDI_LCDC_INTF, /* 1 */
+ MDDI_INTF, /* 2 */
+ EBI2_INTF, /* 3 */
+ TV_INTF = EBI2_INTF, /* 3 */
+ DSI_VIDEO_INTF,
+ DSI_CMD_INTF
+ };
+ there is some overlap, and not all the values end up getting
+ written to hw (mdp4_display_intf_sel() remaps the last two
+ values to MDDI_LCDC_INTF/MDDI_INTF with extra bits set).. so
+ taking some liberties in guessing the actual meanings/names:
+ -->
+ <value name="INTF_LCDC_DTV" value="0"/> <!-- LCDC RGB or DTV (external) -->
+ <value name="INTF_DSI_VIDEO" value="1"/>
+ <value name="INTF_DSI_CMD" value="2"/>
+ <value name="INTF_EBI2_TV" value="3"/> <!-- EBI2 or TV (external) -->
+ </enum>
+ <enum name="mdp4_cursor_format">
+ <value name="CURSOR_ARGB" value="1"/>
+ <value name="CURSOR_XRGB" value="2"/>
+ </enum>
+ <enum name="mdp4_frame_format">
+ <value name="FRAME_LINEAR" value="0"/>
+ <value name="FRAME_TILE_ARGB_4X4" value="1"/>
+ <value name="FRAME_TILE_YCBCR_420" value="2"/>
+ </enum>
+ <enum name="mdp4_scale_unit">
+ <value name="SCALE_FIR" value="0"/>
+ <value name="SCALE_MN_PHASE" value="1"/>
+ <value name="SCALE_PIXEL_RPT" value="2"/>
+ </enum>
+
+ <bitset name="mdp4_layermixer_in_cfg" inline="yes">
+ <brief>appears to map pipe to mixer stage</brief>
+ <bitfield name="PIPE0" low="0" high="2" type="mdp_mixer_stage_id"/>
+ <bitfield name="PIPE0_MIXER1" pos="3" type="boolean"/>
+ <bitfield name="PIPE1" low="4" high="6" type="mdp_mixer_stage_id"/>
+ <bitfield name="PIPE1_MIXER1" pos="7" type="boolean"/>
+ <bitfield name="PIPE2" low="8" high="10" type="mdp_mixer_stage_id"/>
+ <bitfield name="PIPE2_MIXER1" pos="11" type="boolean"/>
+ <bitfield name="PIPE3" low="12" high="14" type="mdp_mixer_stage_id"/>
+ <bitfield name="PIPE3_MIXER1" pos="15" type="boolean"/>
+ <bitfield name="PIPE4" low="16" high="18" type="mdp_mixer_stage_id"/>
+ <bitfield name="PIPE4_MIXER1" pos="19" type="boolean"/>
+ <bitfield name="PIPE5" low="20" high="22" type="mdp_mixer_stage_id"/>
+ <bitfield name="PIPE5_MIXER1" pos="23" type="boolean"/>
+ <bitfield name="PIPE6" low="24" high="26" type="mdp_mixer_stage_id"/>
+ <bitfield name="PIPE6_MIXER1" pos="27" type="boolean"/>
+ <bitfield name="PIPE7" low="28" high="30" type="mdp_mixer_stage_id"/>
+ <bitfield name="PIPE7_MIXER1" pos="31" type="boolean"/>
+ </bitset>
+
+ <bitset name="MDP4_IRQ">
+ <bitfield name="OVERLAY0_DONE" pos="0" type="boolean"/>
+ <bitfield name="OVERLAY1_DONE" pos="1" type="boolean"/>
+ <bitfield name="DMA_S_DONE" pos="2" type="boolean"/>
+ <bitfield name="DMA_E_DONE" pos="3" type="boolean"/>
+ <bitfield name="DMA_P_DONE" pos="4" type="boolean"/>
+ <bitfield name="VG1_HISTOGRAM" pos="5" type="boolean"/>
+ <bitfield name="VG2_HISTOGRAM" pos="6" type="boolean"/>
+ <bitfield name="PRIMARY_VSYNC" pos="7" type="boolean"/>
+ <bitfield name="PRIMARY_INTF_UDERRUN" pos="8" type="boolean"/>
+ <bitfield name="EXTERNAL_VSYNC" pos="9" type="boolean"/>
+ <bitfield name="EXTERNAL_INTF_UDERRUN" pos="10" type="boolean"/>
+ <bitfield name="PRIMARY_RDPTR" pos="11" type="boolean"/> <!-- read pointer -->
+ <bitfield name="DMA_P_HISTOGRAM" pos="17" type="boolean"/>
+ <bitfield name="DMA_S_HISTOGRAM" pos="26" type="boolean"/>
+ <bitfield name="OVERLAY2_DONE" pos="30" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x00000" name="VERSION">
+ <!--
+ from mdp_probe() we can see minor rev starts at 16.. assume
+ major is above that.. not sure the rest of bits but doesn't
+ really seem to matter
+ -->
+ <bitfield name="MINOR" low="16" high="23" type="uint"/>
+ <bitfield name="MAJOR" low="24" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00004" name="OVLP0_KICK"/>
+ <reg32 offset="0x00008" name="OVLP1_KICK"/>
+ <reg32 offset="0x000d0" name="OVLP2_KICK"/>
+ <reg32 offset="0x0000c" name="DMA_P_KICK"/>
+ <reg32 offset="0x00010" name="DMA_S_KICK"/>
+ <reg32 offset="0x00014" name="DMA_E_KICK"/>
+ <reg32 offset="0x00018" name="DISP_STATUS"/>
+
+ <reg32 offset="0x00038" name="DISP_INTF_SEL">
+ <bitfield name="PRIM" low="0" high="1" type="mdp4_intf"/>
+ <bitfield name="SEC" low="2" high="3" type="mdp4_intf"/>
+ <bitfield name="EXT" low="4" high="5" type="mdp4_intf"/>
+ <bitfield name="DSI_VIDEO" pos="6" type="boolean"/>
+ <bitfield name="DSI_CMD" pos="7" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0003c" name="RESET_STATUS"/> <!-- only mdp4 >v2.1 -->
+ <reg32 offset="0x0004c" name="READ_CNFG"/> <!-- something about # of pending requests.. -->
+ <reg32 offset="0x00050" name="INTR_ENABLE" type="MDP4_IRQ"/>
+ <reg32 offset="0x00054" name="INTR_STATUS" type="MDP4_IRQ"/>
+ <reg32 offset="0x00058" name="INTR_CLEAR" type="MDP4_IRQ"/>
+ <reg32 offset="0x00060" name="EBI2_LCD0"/>
+ <reg32 offset="0x00064" name="EBI2_LCD1"/>
+ <reg32 offset="0x00070" name="PORTMAP_MODE"/>
+
+ <!-- mdp chip-select controller: -->
+ <reg32 offset="0x000c0" name="CS_CONTROLLER0"/>
+ <reg32 offset="0x000c4" name="CS_CONTROLLER1"/>
+
+ <reg32 offset="0x100f0" name="LAYERMIXER2_IN_CFG" type="mdp4_layermixer_in_cfg"/>
+ <reg32 offset="0x100fc" name="LAYERMIXER_IN_CFG_UPDATE_METHOD"/>
+ <reg32 offset="0x10100" name="LAYERMIXER_IN_CFG" type="mdp4_layermixer_in_cfg"/>
+
+ <reg32 offset="0x30050" name="VG2_SRC_FORMAT"/>
+ <reg32 offset="0x31008" name="VG2_CONST_COLOR"/>
+
+ <reg32 offset="0x18000" name="OVERLAY_FLUSH">
+ <bitfield name="OVLP0" pos="0" type="boolean"/>
+ <bitfield name="OVLP1" pos="1" type="boolean"/>
+ <bitfield name="VG1" pos="2" type="boolean"/>
+ <bitfield name="VG2" pos="3" type="boolean"/>
+ <bitfield name="RGB1" pos="4" type="boolean"/>
+ <bitfield name="RGB2" pos="5" type="boolean"/>
+ </reg32>
+
+ <array offsets="0x10000,0x18000,0x88000" name="OVLP" length="3" stride="0x8000">
+ <reg32 offset="0x0004" name="CFG"/>
+ <reg32 offset="0x0008" name="SIZE" type="reg_wh"/>
+ <reg32 offset="0x000c" name="BASE"/>
+ <reg32 offset="0x0010" name="STRIDE" type="uint"/>
+ <reg32 offset="0x0014" name="OPMODE"/>
+
+ <array offsets="0x0104,0x0124,0x0144,0x0160" name="STAGE" length="4" stride="0x1c">
+ <reg32 offset="0x00" name="OP">
+ <bitfield name="FG_ALPHA" low="0" high="1" type="mdp_alpha_type"/>
+ <bitfield name="FG_INV_ALPHA" pos="2" type="boolean"/>
+ <bitfield name="FG_MOD_ALPHA" pos="3" type="boolean"/>
+ <bitfield name="BG_ALPHA" low="4" high="5" type="mdp_alpha_type"/>
+ <bitfield name="BG_INV_ALPHA" pos="6" type="boolean"/>
+ <bitfield name="BG_MOD_ALPHA" pos="7" type="boolean"/>
+ <bitfield name="FG_TRANSP" pos="8" type="boolean"/>
+ <bitfield name="BG_TRANSP" pos="9" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x04" name="FG_ALPHA"/>
+ <reg32 offset="0x08" name="BG_ALPHA"/>
+ <reg32 offset="0x0c" name="TRANSP_LOW0"/>
+ <reg32 offset="0x10" name="TRANSP_LOW1"/>
+ <reg32 offset="0x14" name="TRANSP_HIGH0"/>
+ <reg32 offset="0x18" name="TRANSP_HIGH1"/>
+ </array>
+
+ <array offsets="0x1004,0x1404,0x1804,0x1b84" name="STAGE_CO3" length="4" stride="4">
+ <reg32 offset="0" name="SEL">
+ <bitfield name="FG_ALPHA" pos="0" type="boolean"/> <!-- otherwise bg alpha -->
+ </reg32>
+ </array>
+
+ <reg32 offset="0x0180" name="TRANSP_LOW0"/>
+ <reg32 offset="0x0184" name="TRANSP_LOW1"/>
+ <reg32 offset="0x0188" name="TRANSP_HIGH0"/>
+ <reg32 offset="0x018c" name="TRANSP_HIGH1"/>
+
+ <reg32 offset="0x0200" name="CSC_CONFIG"/>
+
+ <array offset="0x2000" name="CSC" length="1" stride="0x700">
+ <array offset="0x400" name="MV" length="9" stride="4">
+ <reg32 offset="0" name="VAL"/>
+ </array>
+ <array offset="0x500" name="PRE_BV" length="3" stride="4">
+ <reg32 offset="0" name="VAL"/>
+ </array>
+ <array offset="0x580" name="POST_BV" length="3" stride="4">
+ <reg32 offset="0" name="VAL"/>
+ </array>
+ <array offset="0x600" name="PRE_LV" length="6" stride="4">
+ <reg32 offset="0" name="VAL"/>
+ </array>
+ <array offset="0x680" name="POST_LV" length="6" stride="4">
+ <reg32 offset="0" name="VAL"/>
+ </array>
+ </array>
+ </array>
+
+ <enum name="mdp4_dma">
+ <value name="DMA_P" value="0"/>
+ <value name="DMA_S" value="1"/>
+ <value name="DMA_E" value="2"/>
+ </enum>
+ <reg32 offset="0x90070" name="DMA_P_OP_MODE"/>
+ <array offset="0x94800" name="LUTN" length="2" stride="0x400">
+ <array offset="0" name="LUT" length="0x100" stride="4">
+ <reg32 offset="0" name="VAL"/>
+ </array>
+ </array>
+ <reg32 offset="0xa0028" name="DMA_S_OP_MODE"/>
+ <!-- I guess if DMA_S has an OP_MODE, it must have a LUT too.. -->
+ <reg32 offset="0xb0070" name="DMA_E_QUANT" length="3" stride="4"/>
+ <array offsets="0x90000,0xa0000,0xb0000" name="DMA" length="3" stride="0x10000" index="mdp4_dma">
+ <reg32 offset="0x0000" name="CONFIG">
+ <bitfield name="G_BPC" low="0" high="1" type="mdp_bpc"/>
+ <bitfield name="B_BPC" low="2" high="3" type="mdp_bpc"/>
+ <bitfield name="R_BPC" low="4" high="5" type="mdp_bpc"/>
+ <bitfield name="PACK_ALIGN_MSB" pos="7" type="boolean"/>
+ <bitfield name="PACK" low="8" high="15"/>
+ <!-- bit 24 is DITHER_EN on DMA_P, DEFLKR_EN on DMA_E -->
+ <bitfield name="DEFLKR_EN" pos="24" type="boolean"/>
+ <bitfield name="DITHER_EN" pos="24" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0004" name="SRC_SIZE" type="reg_wh"/>
+ <reg32 offset="0x0008" name="SRC_BASE"/>
+ <reg32 offset="0x000c" name="SRC_STRIDE" type="uint"/>
+ <reg32 offset="0x0010" name="DST_SIZE" type="reg_wh"/>
+
+ <reg32 offset="0x0044" name="CURSOR_SIZE">
+ <!-- seems the limit is 64x64: -->
+ <bitfield name="WIDTH" low="0" high="6" type="uint"/>
+ <bitfield name="HEIGHT" low="16" high="22" type="uint"/>
+ </reg32>
+ <reg32 offset="0x0048" name="CURSOR_BASE"/>
+ <reg32 offset="0x004c" name="CURSOR_POS">
+ <bitfield name="X" low="0" high="15" type="uint"/>
+ <bitfield name="Y" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x0060" name="CURSOR_BLEND_CONFIG">
+ <bitfield name="CURSOR_EN" pos="0" type="boolean"/>
+ <bitfield name="FORMAT" low="1" high="2" type="mdp4_cursor_format"/>
+ <bitfield name="TRANSP_EN" pos="3" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0064" name="CURSOR_BLEND_PARAM"/>
+ <reg32 offset="0x0068" name="BLEND_TRANS_LOW"/>
+ <reg32 offset="0x006c" name="BLEND_TRANS_HIGH"/>
+
+ <reg32 offset="0x1004" name="FETCH_CONFIG"/>
+ <array offset="0x3000" name="CSC" length="1" stride="0x700">
+ <array offset="0x400" name="MV" length="9" stride="4">
+ <reg32 offset="0" name="VAL"/>
+ </array>
+ <array offset="0x500" name="PRE_BV" length="3" stride="4">
+ <reg32 offset="0" name="VAL"/>
+ </array>
+ <array offset="0x580" name="POST_BV" length="3" stride="4">
+ <reg32 offset="0" name="VAL"/>
+ </array>
+ <array offset="0x600" name="PRE_LV" length="6" stride="4">
+ <reg32 offset="0" name="VAL"/>
+ </array>
+ <array offset="0x680" name="POST_LV" length="6" stride="4">
+ <reg32 offset="0" name="VAL"/>
+ </array>
+ </array>
+ </array>
+
+ <!--
+ TODO length should be 7, but that would collide w/ OVLP2..!?!
+ this register map is a bit strange..
+ -->
+ <array offset="0x20000" name="PIPE" length="6" stride="0x10000" index="mdp4_pipe">
+ <reg32 offset="0x0000" name="SRC_SIZE" type="reg_wh"/>
+ <reg32 offset="0x0004" name="SRC_XY" type="reg_xy"/>
+ <reg32 offset="0x0008" name="DST_SIZE" type="reg_wh"/>
+ <reg32 offset="0x000c" name="DST_XY" type="reg_xy"/>
+ <reg32 offset="0x0010" name="SRCP0_BASE"/>
+ <reg32 offset="0x0014" name="SRCP1_BASE"/>
+ <reg32 offset="0x0018" name="SRCP2_BASE"/>
+ <reg32 offset="0x001c" name="SRCP3_BASE"/>
+ <reg32 offset="0x0040" name="SRC_STRIDE_A">
+ <bitfield name="P0" low="0" high="15" type="uint"/>
+ <bitfield name="P1" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x0044" name="SRC_STRIDE_B">
+ <bitfield name="P2" low="0" high="15" type="uint"/>
+ <bitfield name="P3" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x0048" name="SSTILE_FRAME_SIZE" type="reg_wh"/>
+ <reg32 offset="0x0050" name="SRC_FORMAT">
+ <bitfield name="G_BPC" low="0" high="1" type="mdp_bpc"/>
+ <bitfield name="B_BPC" low="2" high="3" type="mdp_bpc"/>
+ <bitfield name="R_BPC" low="4" high="5" type="mdp_bpc"/>
+ <bitfield name="A_BPC" low="6" high="7" type="mdp_bpc_alpha"/>
+ <bitfield name="ALPHA_ENABLE" pos="8" type="boolean"/>
+ <bitfield name="CPP" low="9" high="10" type="uint">
+ <brief>8bit characters per pixel minus 1</brief>
+ </bitfield>
+ <bitfield name="ROTATED_90" pos="12" type="boolean"/>
+ <bitfield name="UNPACK_COUNT" low="13" high="14" type="uint"/>
+ <bitfield name="UNPACK_TIGHT" pos="17" type="boolean"/>
+ <bitfield name="UNPACK_ALIGN_MSB" pos="18" type="boolean"/>
+ <bitfield name="FETCH_PLANES" low="19" high="20" type="uint"/>
+ <bitfield name="SOLID_FILL" pos="22" type="boolean"/>
+ <bitfield name="CHROMA_SAMP" low="26" high="27" type="mdp_chroma_samp_type"/>
+ <bitfield name="FRAME_FORMAT" low="29" high="30" type="mdp4_frame_format"/>
+ </reg32>
+ <reg32 offset="0x0054" name="SRC_UNPACK" type="mdp_unpack_pattern"/>
+ <reg32 offset="0x0058" name="OP_MODE">
+ <bitfield name="SCALEX_EN" pos="0" type="boolean"/>
+ <bitfield name="SCALEY_EN" pos="1" type="boolean"/>
+ <bitfield name="SCALEX_UNIT_SEL" low="2" high="3" type="mdp4_scale_unit"/>
+ <bitfield name="SCALEY_UNIT_SEL" low="4" high="5" type="mdp4_scale_unit"/>
+ <bitfield name="SRC_YCBCR" pos="9" type="boolean"/>
+ <bitfield name="DST_YCBCR" pos="10" type="boolean"/>
+ <bitfield name="CSC_EN" pos="11" type="boolean"/>
+ <bitfield name="FLIP_LR" pos="13" type="boolean"/>
+ <bitfield name="FLIP_UD" pos="14" type="boolean"/>
+ <bitfield name="DITHER_EN" pos="15" type="boolean"/>
+ <bitfield name="IGC_LUT_EN" pos="16" type="boolean"/>
+ <bitfield name="DEINT_EN" pos="18" type="boolean"/>
+ <bitfield name="DEINT_ODD_REF" pos="19" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x005c" name="PHASEX_STEP"/>
+ <reg32 offset="0x0060" name="PHASEY_STEP"/>
+ <reg32 offset="0x1004" name="FETCH_CONFIG"/>
+ <reg32 offset="0x1008" name="SOLID_COLOR"/>
+
+ <array offset="0x4000" name="CSC" length="1" stride="0x700">
+ <array offset="0x400" name="MV" length="9" stride="4">
+ <reg32 offset="0" name="VAL"/>
+ </array>
+ <array offset="0x500" name="PRE_BV" length="3" stride="4">
+ <reg32 offset="0" name="VAL"/>
+ </array>
+ <array offset="0x580" name="POST_BV" length="3" stride="4">
+ <reg32 offset="0" name="VAL"/>
+ </array>
+ <array offset="0x600" name="PRE_LV" length="6" stride="4">
+ <reg32 offset="0" name="VAL"/>
+ </array>
+ <array offset="0x680" name="POST_LV" length="6" stride="4">
+ <reg32 offset="0" name="VAL"/>
+ </array>
+ </array>
+ </array>
+
+ <!--
+ ENCODERS
+ LCDC and DSI seem the same, DTV is just slightly different..
+ -->
+
+ <bitset name="mdp4_ctrl_polarity" inline="yes">
+ <!-- not entirely sure if these bits mean hi or low.. -->
+ <bitfield name="HSYNC_LOW" pos="0" type="boolean"/>
+ <bitfield name="VSYNC_LOW" pos="1" type="boolean"/>
+ <bitfield name="DATA_EN_LOW" pos="2" type="boolean"/>
+ </bitset>
+
+ <bitset name="mdp4_active_hctl" inline="yes">
+ <bitfield name="START" low="0" high="14" type="uint"/>
+ <bitfield name="END" low="16" high="30" type="uint"/>
+ <bitfield name="ACTIVE_START_X" pos="31" type="boolean"/>
+ </bitset>
+
+ <bitset name="mdp4_display_hctl" inline="yes">
+ <bitfield name="START" low="0" high="15" type="uint"/>
+ <bitfield name="END" low="16" high="31" type="uint"/>
+ </bitset>
+
+ <bitset name="mdp4_hsync_ctrl" inline="yes">
+ <bitfield name="PULSEW" low="0" high="15" type="uint"/>
+ <bitfield name="PERIOD" low="16" high="31" type="uint"/>
+ </bitset>
+
+ <bitset name="mdp4_underflow_clr" inline="yes">
+ <bitfield name="COLOR" low="0" high="23"/>
+ <bitfield name="ENABLE_RECOVERY" pos="31" type="boolean"/>
+ </bitset>
+
+ <!-- offset is 0xe0000 on !mdp4.. -->
+ <array offset="0xc0000" name="LCDC" length="1" stride="0x1000">
+ <reg32 offset="0x0000" name="ENABLE"/>
+ <reg32 offset="0x0004" name="HSYNC_CTRL" type="mdp4_hsync_ctrl"/>
+ <reg32 offset="0x0008" name="VSYNC_PERIOD" type="uint"/>
+ <reg32 offset="0x000c" name="VSYNC_LEN" type="uint"/>
+ <reg32 offset="0x0010" name="DISPLAY_HCTRL" type="mdp4_display_hctl"/>
+ <reg32 offset="0x0014" name="DISPLAY_VSTART" type="uint"/>
+ <reg32 offset="0x0018" name="DISPLAY_VEND" type="uint"/>
+ <reg32 offset="0x001c" name="ACTIVE_HCTL" type="mdp4_active_hctl"/>
+ <reg32 offset="0x0020" name="ACTIVE_VSTART" type="uint"/>
+ <reg32 offset="0x0024" name="ACTIVE_VEND" type="uint"/>
+ <reg32 offset="0x0028" name="BORDER_CLR"/>
+ <reg32 offset="0x002c" name="UNDERFLOW_CLR" type="mdp4_underflow_clr"/>
+ <reg32 offset="0x0030" name="HSYNC_SKEW"/>
+ <reg32 offset="0x0034" name="TEST_CNTL"/>
+ <reg32 offset="0x0038" name="CTRL_POLARITY" type="mdp4_ctrl_polarity"/>
+ </array>
+
+ <reg32 offset="0xc2000" name="LCDC_LVDS_INTF_CTL">
+ <bitfield name="MODE_SEL" pos="2" type="boolean"/>
+ <bitfield name="RGB_OUT" pos="3" type="boolean"/>
+ <bitfield name="CH_SWAP" pos="4" type="boolean"/>
+ <bitfield name="CH1_RES_BIT" pos="5" type="boolean"/>
+ <bitfield name="CH2_RES_BIT" pos="6" type="boolean"/>
+ <bitfield name="ENABLE" pos="7" type="boolean"/>
+ <bitfield name="CH1_DATA_LANE0_EN" pos="8" type="boolean"/>
+ <bitfield name="CH1_DATA_LANE1_EN" pos="9" type="boolean"/>
+ <bitfield name="CH1_DATA_LANE2_EN" pos="10" type="boolean"/>
+ <bitfield name="CH1_DATA_LANE3_EN" pos="11" type="boolean"/>
+ <bitfield name="CH2_DATA_LANE0_EN" pos="12" type="boolean"/>
+ <bitfield name="CH2_DATA_LANE1_EN" pos="13" type="boolean"/>
+ <bitfield name="CH2_DATA_LANE2_EN" pos="14" type="boolean"/>
+ <bitfield name="CH2_DATA_LANE3_EN" pos="15" type="boolean"/>
+ <bitfield name="CH1_CLK_LANE_EN" pos="16" type="boolean"/>
+ <bitfield name="CH2_CLK_LANE_EN" pos="17" type="boolean"/>
+ </reg32>
+
+ <array offset="0xc2014" name="LCDC_LVDS_MUX_CTL" length="4" stride="0x8">
+ <reg32 offset="0x0" name="3_TO_0">
+ <bitfield name="BIT0" low="0" high="7"/>
+ <bitfield name="BIT1" low="8" high="15"/>
+ <bitfield name="BIT2" low="16" high="23"/>
+ <bitfield name="BIT3" low="24" high="31"/>
+ </reg32>
+ <reg32 offset="0x4" name="6_TO_4">
+ <bitfield name="BIT4" low="0" high="7"/>
+ <bitfield name="BIT5" low="8" high="15"/>
+ <bitfield name="BIT6" low="16" high="23"/>
+ </reg32>
+ </array>
+
+ <reg32 offset="0xc2034" name="LCDC_LVDS_PHY_RESET"/>
+
+ <reg32 offset="0xc3000" name="LVDS_PHY_PLL_CTRL_0"/>
+ <reg32 offset="0xc3004" name="LVDS_PHY_PLL_CTRL_1"/>
+ <reg32 offset="0xc3008" name="LVDS_PHY_PLL_CTRL_2"/>
+ <reg32 offset="0xc300c" name="LVDS_PHY_PLL_CTRL_3"/>
+ <reg32 offset="0xc3014" name="LVDS_PHY_PLL_CTRL_5"/>
+ <reg32 offset="0xc3018" name="LVDS_PHY_PLL_CTRL_6"/>
+ <reg32 offset="0xc301c" name="LVDS_PHY_PLL_CTRL_7"/>
+ <reg32 offset="0xc3020" name="LVDS_PHY_PLL_CTRL_8"/>
+ <reg32 offset="0xc3024" name="LVDS_PHY_PLL_CTRL_9"/>
+ <reg32 offset="0xc3080" name="LVDS_PHY_PLL_LOCKED"/>
+ <reg32 offset="0xc3108" name="LVDS_PHY_CFG2"/>
+
+ <reg32 offset="0xc3100" name="LVDS_PHY_CFG0">
+ <bitfield name="SERIALIZATION_ENBLE" pos="4" type="boolean"/>
+ <bitfield name="CHANNEL0" pos="6" type="boolean"/>
+ <bitfield name="CHANNEL1" pos="7" type="boolean"/>
+ </reg32>
+
+ <array offset="0xd0000" name="DTV" length="1" stride="0x1000">
+ <reg32 offset="0x0000" name="ENABLE"/>
+ <reg32 offset="0x0004" name="HSYNC_CTRL" type="mdp4_hsync_ctrl"/>
+ <reg32 offset="0x0008" name="VSYNC_PERIOD" type="uint"/>
+ <reg32 offset="0x000c" name="VSYNC_LEN" type="uint"/>
+ <reg32 offset="0x0018" name="DISPLAY_HCTRL" type="mdp4_display_hctl"/>
+ <reg32 offset="0x001c" name="DISPLAY_VSTART" type="uint"/>
+ <reg32 offset="0x0020" name="DISPLAY_VEND" type="uint"/>
+ <reg32 offset="0x002c" name="ACTIVE_HCTL" type="mdp4_active_hctl"/>
+ <reg32 offset="0x0030" name="ACTIVE_VSTART" type="uint"/>
+ <reg32 offset="0x0038" name="ACTIVE_VEND" type="uint"/>
+ <reg32 offset="0x0040" name="BORDER_CLR"/>
+ <reg32 offset="0x0044" name="UNDERFLOW_CLR" type="mdp4_underflow_clr"/>
+ <reg32 offset="0x0048" name="HSYNC_SKEW"/>
+ <reg32 offset="0x004c" name="TEST_CNTL"/>
+ <reg32 offset="0x0050" name="CTRL_POLARITY" type="mdp4_ctrl_polarity"/>
+ </array>
+
+ <array offset="0xe0000" name="DSI" length="1" stride="0x1000">
+ <reg32 offset="0x0000" name="ENABLE"/>
+ <reg32 offset="0x0004" name="HSYNC_CTRL" type="mdp4_hsync_ctrl"/>
+ <reg32 offset="0x0008" name="VSYNC_PERIOD" type="uint"/>
+ <reg32 offset="0x000c" name="VSYNC_LEN" type="uint"/>
+ <reg32 offset="0x0010" name="DISPLAY_HCTRL" type="mdp4_display_hctl"/>
+ <reg32 offset="0x0014" name="DISPLAY_VSTART" type="uint"/>
+ <reg32 offset="0x0018" name="DISPLAY_VEND" type="uint"/>
+ <reg32 offset="0x001c" name="ACTIVE_HCTL" type="mdp4_active_hctl"/>
+ <reg32 offset="0x0020" name="ACTIVE_VSTART" type="uint"/>
+ <reg32 offset="0x0024" name="ACTIVE_VEND" type="uint"/>
+ <reg32 offset="0x0028" name="BORDER_CLR"/>
+ <reg32 offset="0x002c" name="UNDERFLOW_CLR" type="mdp4_underflow_clr"/>
+ <reg32 offset="0x0030" name="HSYNC_SKEW"/>
+ <reg32 offset="0x0034" name="TEST_CNTL"/>
+ <reg32 offset="0x0038" name="CTRL_POLARITY" type="mdp4_ctrl_polarity"/>
+ </array>
+</domain>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/display/mdp5.xml b/drivers/gpu/drm/msm/registers/display/mdp5.xml
new file mode 100644
index 000000000000..92f3263af170
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/display/mdp5.xml
@@ -0,0 +1,806 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="display/mdp_common.xml"/>
+
+<!-- where does this belong? -->
+<domain name="VBIF" width="32">
+</domain>
+
+<domain name="MDSS" width="32">
+ <reg32 offset="0x00000" name="HW_VERSION">
+ <bitfield name="STEP" low="0" high="15" type="uint"/>
+ <bitfield name="MINOR" low="16" high="27" type="uint"/>
+ <bitfield name="MAJOR" low="28" high="31" type="uint"/>
+ </reg32>
+
+ <reg32 offset="0x00010" name="HW_INTR_STATUS">
+ <bitfield name="INTR_MDP" pos="0" type="boolean"/>
+ <bitfield name="INTR_DSI0" pos="4" type="boolean"/>
+ <bitfield name="INTR_DSI1" pos="5" type="boolean"/>
+ <bitfield name="INTR_HDMI" pos="8" type="boolean"/>
+ <bitfield name="INTR_EDP" pos="12" type="boolean"/>
+ </reg32>
+</domain>
+
+<domain name="MDP5" width="32">
+
+ <enum name="mdp5_intf_type">
+ <value name="INTF_DISABLED" value="0x0"/>
+ <value name="INTF_DSI" value="0x1"/>
+ <value name="INTF_HDMI" value="0x3"/>
+ <value name="INTF_LCDC" value="0x5"/>
+ <value name="INTF_eDP" value="0x9"/>
+ <value name="INTF_VIRTUAL" value="0x64"/>
+ <!-- non-display interfaces are listed below: -->
+ <value name="INTF_WB" value="0x65"/>
+ </enum>
+
+ <enum name="mdp5_intfnum">
+ <value name="NO_INTF" value="0"/>
+ <value name="INTF0" value="1"/>
+ <value name="INTF1" value="2"/>
+ <value name="INTF2" value="3"/>
+ <value name="INTF3" value="4"/>
+ </enum>
+
+ <enum name="mdp5_pipe">
+ <value name="SSPP_NONE" value="0"/>
+ <value name="SSPP_VIG0" value="1"/>
+ <value name="SSPP_VIG1" value="2"/>
+ <value name="SSPP_VIG2" value="3"/>
+ <value name="SSPP_RGB0" value="4"/>
+ <value name="SSPP_RGB1" value="5"/>
+ <value name="SSPP_RGB2" value="6"/>
+ <value name="SSPP_DMA0" value="7"/>
+ <value name="SSPP_DMA1" value="8"/>
+ <value name="SSPP_VIG3" value="9"/>
+ <value name="SSPP_RGB3" value="10"/>
+ <value name="SSPP_CURSOR0" value="11"/>
+ <value name="SSPP_CURSOR1" value="12"/>
+ </enum>
+
+ <enum name="mdp5_format">
+ <!-- TODO -->
+ <value name="DUMMY" value="0"/>
+ </enum>
+
+ <enum name="mdp5_ctl_mode">
+ <value name="MODE_NONE" value="0"/>
+ <value name="MODE_WB_0_BLOCK" value="1"/>
+ <value name="MODE_WB_1_BLOCK" value="2"/>
+ <value name="MODE_WB_0_LINE" value="3"/>
+ <value name="MODE_WB_1_LINE" value="4"/>
+ <value name="MODE_WB_2_LINE" value="5"/>
+ </enum>
+
+ <enum name="mdp5_pack_3d">
+ <value name="PACK_3D_FRAME_INT" value="0"/>
+ <value name="PACK_3D_H_ROW_INT" value="1"/>
+ <value name="PACK_3D_V_ROW_INT" value="2"/>
+ <value name="PACK_3D_COL_INT" value="3"/>
+ </enum>
+
+ <enum name="mdp5_scale_filter">
+ <value name="SCALE_FILTER_NEAREST" value="0"/>
+ <value name="SCALE_FILTER_BIL" value="1"/>
+ <value name="SCALE_FILTER_PCMN" value="2"/>
+ <value name="SCALE_FILTER_CA" value="3"/>
+ </enum>
+
+ <enum name="mdp5_pipe_bwc">
+ <value name="BWC_LOSSLESS" value="0"/>
+ <value name="BWC_Q_HIGH" value="1"/>
+ <value name="BWC_Q_MED" value="2"/>
+ </enum>
+
+ <enum name="mdp5_cursor_format">
+ <value name="CURSOR_FMT_ARGB8888" value="0"/>
+ <value name="CURSOR_FMT_ARGB1555" value="2"/>
+ <value name="CURSOR_FMT_ARGB4444" value="4"/>
+ </enum>
+
+ <enum name="mdp5_cursor_alpha">
+ <value name="CURSOR_ALPHA_CONST" value="0"/>
+ <value name="CURSOR_ALPHA_PER_PIXEL" value="2"/>
+ </enum>
+
+ <bitset name="MDP5_IRQ">
+ <bitfield name="WB_0_DONE" pos="0" type="boolean"/>
+ <bitfield name="WB_1_DONE" pos="1" type="boolean"/>
+ <bitfield name="WB_2_DONE" pos="4" type="boolean"/>
+ <bitfield name="PING_PONG_0_DONE" pos="8" type="boolean"/>
+ <bitfield name="PING_PONG_1_DONE" pos="9" type="boolean"/>
+ <bitfield name="PING_PONG_2_DONE" pos="10" type="boolean"/>
+ <bitfield name="PING_PONG_3_DONE" pos="11" type="boolean"/>
+ <bitfield name="PING_PONG_0_RD_PTR" pos="12" type="boolean"/>
+ <bitfield name="PING_PONG_1_RD_PTR" pos="13" type="boolean"/>
+ <bitfield name="PING_PONG_2_RD_PTR" pos="14" type="boolean"/>
+ <bitfield name="PING_PONG_3_RD_PTR" pos="15" type="boolean"/>
+ <bitfield name="PING_PONG_0_WR_PTR" pos="16" type="boolean"/>
+ <bitfield name="PING_PONG_1_WR_PTR" pos="17" type="boolean"/>
+ <bitfield name="PING_PONG_2_WR_PTR" pos="18" type="boolean"/>
+ <bitfield name="PING_PONG_3_WR_PTR" pos="19" type="boolean"/>
+ <bitfield name="PING_PONG_0_AUTO_REF" pos="20" type="boolean"/>
+ <bitfield name="PING_PONG_1_AUTO_REF" pos="21" type="boolean"/>
+ <bitfield name="PING_PONG_2_AUTO_REF" pos="22" type="boolean"/>
+ <bitfield name="PING_PONG_3_AUTO_REF" pos="23" type="boolean"/>
+ <bitfield name="INTF0_UNDER_RUN" pos="24" type="boolean"/>
+ <bitfield name="INTF0_VSYNC" pos="25" type="boolean"/>
+ <bitfield name="INTF1_UNDER_RUN" pos="26" type="boolean"/>
+ <bitfield name="INTF1_VSYNC" pos="27" type="boolean"/>
+ <bitfield name="INTF2_UNDER_RUN" pos="28" type="boolean"/>
+ <bitfield name="INTF2_VSYNC" pos="29" type="boolean"/>
+ <bitfield name="INTF3_UNDER_RUN" pos="30" type="boolean"/>
+ <bitfield name="INTF3_VSYNC" pos="31" type="boolean"/>
+ </bitset>
+
+ <bitset name="mdp5_smp_alloc" inline="yes">
+ <!-- Use "mdp5_cfg->mdp.smp.clients[enum mdp5_pipe]" instead -->
+ <bitfield name="CLIENT0" low="0" high="7" type="uint"/>
+ <bitfield name="CLIENT1" low="8" high="15" type="uint"/>
+ <bitfield name="CLIENT2" low="16" high="23" type="uint"/>
+ </bitset>
+
+ <reg32 offset="0x00000" name="HW_VERSION">
+ <bitfield name="STEP" low="0" high="15" type="uint"/>
+ <bitfield name="MINOR" low="16" high="27" type="uint"/>
+ <bitfield name="MAJOR" low="28" high="31" type="uint"/>
+ </reg32>
+
+ <reg32 offset="0x00004" name="DISP_INTF_SEL">
+ <bitfield name="INTF0" low="0" high="7" type="mdp5_intf_type"/>
+ <bitfield name="INTF1" low="8" high="15" type="mdp5_intf_type"/>
+ <bitfield name="INTF2" low="16" high="23" type="mdp5_intf_type"/>
+ <bitfield name="INTF3" low="24" high="31" type="mdp5_intf_type"/>
+ </reg32>
+ <reg32 offset="0x00010" name="INTR_EN" type="MDP5_IRQ"/>
+ <reg32 offset="0x00014" name="INTR_STATUS" type="MDP5_IRQ"/>
+ <reg32 offset="0x00018" name="INTR_CLEAR" type="MDP5_IRQ"/>
+ <reg32 offset="0x0001C" name="HIST_INTR_EN"/>
+ <reg32 offset="0x00020" name="HIST_INTR_STATUS"/>
+ <reg32 offset="0x00024" name="HIST_INTR_CLEAR"/>
+ <reg32 offset="0x00028" name="SPARE_0">
+ <bitfield name="SPLIT_DPL_SINGLE_FLUSH_EN" pos="0"/>
+ </reg32>
+
+ <array offset="0x00080" name="SMP_ALLOC_W" length="8" stride="4">
+ <reg32 offset="0" name="REG" type="mdp5_smp_alloc"/>
+ </array>
+ <array offset="0x00130" name="SMP_ALLOC_R" length="8" stride="4">
+ <reg32 offset="0" name="REG" type="mdp5_smp_alloc"/>
+ </array>
+
+ <enum name="mdp5_igc_type">
+ <value name="IGC_VIG" value="0"/> <!-- 0x200 -->
+ <value name="IGC_RGB" value="1"/> <!-- 0x210 -->
+ <value name="IGC_DMA" value="2"/> <!-- 0x220 -->
+ <value name="IGC_DSPP" value="3"/> <!-- 0x300 -->
+ </enum>
+ <array offsets="0x00200,0x00210,0x00220,0x00300" name="IGC" length="3" stride="0x10" index="mdp5_igc_type">
+ <array offset="0x00" name="LUT" length="3" stride="4">
+ <reg32 offset="0" name="REG">
+ <bitfield name="VAL" low="0" high="11"/>
+ <bitfield name="INDEX_UPDATE" pos="25" type="boolean"/>
+ <!--
+ not sure about these:
+ /* INDEX_UPDATE */
+ data = (1 << 25) | (((~(1 << blk_idx)) & 0x7) << 28);
+ MDSS_MDP_REG_WRITE(offset, (cfg->c0_c1_data[0] & 0xFFF) | data);
+ -->
+ <bitfield name="DISABLE_PIPE_0" pos="28" type="boolean"/>
+ <bitfield name="DISABLE_PIPE_1" pos="29" type="boolean"/>
+ <bitfield name="DISABLE_PIPE_2" pos="30" type="boolean"/>
+ </reg32>
+ </array>
+ </array>
+ <reg32 offset="0x002f4" name="SPLIT_DPL_EN"/>
+ <reg32 offset="0x002f8" name="SPLIT_DPL_UPPER">
+ <bitfield name="SMART_PANEL" pos="1" type="boolean"/>
+ <bitfield name="SMART_PANEL_FREE_RUN" pos="2" type="boolean"/>
+ <bitfield name="INTF1_SW_TRG_MUX" pos="4" type="boolean"/>
+ <bitfield name="INTF2_SW_TRG_MUX" pos="8" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x003f0" name="SPLIT_DPL_LOWER">
+ <bitfield name="SMART_PANEL" pos="1" type="boolean"/>
+ <bitfield name="SMART_PANEL_FREE_RUN" pos="2" type="boolean"/>
+ <bitfield name="INTF1_TG_SYNC" pos="4" type="boolean"/>
+ <bitfield name="INTF2_TG_SYNC" pos="8" type="boolean"/>
+ </reg32>
+
+<!-- check length/index.. -->
+ <array doffsets="mdp5_cfg->ctl.base[0],mdp5_cfg->ctl.base[1],mdp5_cfg->ctl.base[2],mdp5_cfg->ctl.base[3],mdp5_cfg->ctl.base[4]" name="CTL" length="5" stride="0x400">
+ <array offsets="0x000,0x004,0x008,0x00C,0x010,0x024" name="LAYER" length="6" stride="4">
+ <!--
+ NOTE: for backwards compat (from when there were fewer stages),
+ this register has the low three bits of mdp_mixer_stage_id, with
+ the high bit coming from LAYER_EXT
+ -->
+ <reg32 offset="0" name="REG">
+ <bitfield name="VIG0" low="0" high="2" type="uint"/>
+ <bitfield name="VIG1" low="3" high="5" type="uint"/>
+ <bitfield name="VIG2" low="6" high="8" type="uint"/>
+ <bitfield name="RGB0" low="9" high="11" type="uint"/>
+ <bitfield name="RGB1" low="12" high="14" type="uint"/>
+ <bitfield name="RGB2" low="15" high="17" type="uint"/>
+ <bitfield name="DMA0" low="18" high="20" type="uint"/>
+ <bitfield name="DMA1" low="21" high="23" type="uint"/>
+ <bitfield name="BORDER_COLOR" pos="24" type="boolean"/>
+ <bitfield name="CURSOR_OUT" pos="25" type="boolean"/>
+ <bitfield name="VIG3" low="26" high="28" type="uint"/>
+ <bitfield name="RGB3" low="29" high="31" type="uint"/>
+ </reg32>
+ </array>
+ <reg32 offset="0x014" name="OP">
+ <bitfield name="MODE" low="0" high="3" type="mdp5_ctl_mode"/>
+ <bitfield name="INTF_NUM" low="4" high="6" type="mdp5_intfnum"/>
+ <bitfield name="CMD_MODE" pos="17" type="boolean"/>
+ <bitfield name="PACK_3D_ENABLE" pos="19" type="boolean"/>
+ <bitfield name="PACK_3D" low="20" high="21" type="mdp5_pack_3d"/>
+ </reg32>
+ <reg32 offset="0x018" name="FLUSH">
+ <bitfield name="VIG0" pos="0" type="boolean"/>
+ <bitfield name="VIG1" pos="1" type="boolean"/>
+ <bitfield name="VIG2" pos="2" type="boolean"/>
+ <bitfield name="RGB0" pos="3" type="boolean"/>
+ <bitfield name="RGB1" pos="4" type="boolean"/>
+ <bitfield name="RGB2" pos="5" type="boolean"/>
+ <bitfield name="LM0" pos="6" type="boolean"/>
+ <bitfield name="LM1" pos="7" type="boolean"/>
+ <bitfield name="LM2" pos="8" type="boolean"/>
+ <bitfield name="LM3" pos="9" type="boolean"/>
+ <bitfield name="LM4" pos="10" type="boolean"/>
+ <bitfield name="DMA0" pos="11" type="boolean"/>
+ <bitfield name="DMA1" pos="12" type="boolean"/>
+ <bitfield name="DSPP0" pos="13" type="boolean"/>
+ <bitfield name="DSPP1" pos="14" type="boolean"/>
+ <bitfield name="DSPP2" pos="15" type="boolean"/>
+ <bitfield name="WB" pos="16" type="boolean"/>
+ <bitfield name="CTL" pos="17" type="boolean"/>
+ <bitfield name="VIG3" pos="18" type="boolean"/>
+ <bitfield name="RGB3" pos="19" type="boolean"/>
+ <bitfield name="LM5" pos="20" type="boolean"/>
+ <bitfield name="DSPP3" pos="21" type="boolean"/>
+ <bitfield name="CURSOR_0" pos="22" type="boolean"/>
+ <bitfield name="CURSOR_1" pos="23" type="boolean"/>
+ <bitfield name="CHROMADOWN_0" pos="26" type="boolean"/>
+ <bitfield name="TIMING_3" pos="28" type="boolean"/>
+ <bitfield name="TIMING_2" pos="29" type="boolean"/>
+ <bitfield name="TIMING_1" pos="30" type="boolean"/>
+ <bitfield name="TIMING_0" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x01C" name="START"/>
+ <reg32 offset="0x020" name="PACK_3D"/>
+ <array offsets="0x040,0x044,0x048,0x04C,0x050,0x054" name="LAYER_EXT" length="6" stride="4">
+ <reg32 offset="0" name="REG">
+ <bitfield name="VIG0_BIT3" pos="0" type="boolean"/>
+ <bitfield name="VIG1_BIT3" pos="2" type="boolean"/>
+ <bitfield name="VIG2_BIT3" pos="4" type="boolean"/>
+ <bitfield name="VIG3_BIT3" pos="6" type="boolean"/>
+ <bitfield name="RGB0_BIT3" pos="8" type="boolean"/>
+ <bitfield name="RGB1_BIT3" pos="10" type="boolean"/>
+ <bitfield name="RGB2_BIT3" pos="12" type="boolean"/>
+ <bitfield name="RGB3_BIT3" pos="14" type="boolean"/>
+ <bitfield name="DMA0_BIT3" pos="16" type="boolean"/>
+ <bitfield name="DMA1_BIT3" pos="18" type="boolean"/>
+ <bitfield name="CURSOR0" low="20" high="23" type="mdp_mixer_stage_id"/>
+ <bitfield name="CURSOR1" low="26" high="29" type="mdp_mixer_stage_id"/>
+ </reg32>
+ </array>
+ </array>
+
+ <enum name="mdp5_data_format">
+ <value name="DATA_FORMAT_RGB" value="0"/>
+ <value name="DATA_FORMAT_YUV" value="1"/>
+ </enum>
+
+ <array doffsets="INVALID_IDX(idx),mdp5_cfg->pipe_vig.base[0],mdp5_cfg->pipe_vig.base[1],mdp5_cfg->pipe_vig.base[2],mdp5_cfg->pipe_rgb.base[0],mdp5_cfg->pipe_rgb.base[1],mdp5_cfg->pipe_rgb.base[2],mdp5_cfg->pipe_dma.base[0],mdp5_cfg->pipe_dma.base[1],mdp5_cfg->pipe_vig.base[3],mdp5_cfg->pipe_rgb.base[3],mdp5_cfg->pipe_cursor.base[0],mdp5_cfg->pipe_cursor.base[1]" name="PIPE" length="10" stride="0x400" index="mdp5_pipe">
+ <reg32 offset="0x200" name="OP_MODE">
+ <bitfield name="CSC_DST_DATA_FORMAT" pos="19" type="mdp5_data_format"/>
+ <bitfield name="CSC_SRC_DATA_FORMAT" pos="18" type="mdp5_data_format"/>
+ <bitfield name="CSC_1_EN" pos="17" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x2C4" name="HIST_CTL_BASE"/>
+ <reg32 offset="0x2F0" name="HIST_LUT_BASE"/>
+ <reg32 offset="0x300" name="HIST_LUT_SWAP"/>
+ <reg32 offset="0x320" name="CSC_1_MATRIX_COEFF_0">
+ <bitfield name="COEFF_11" low="0" high="12" type="uint"/>
+ <bitfield name="COEFF_12" low="16" high="28" type="uint"/>
+ </reg32>
+ <reg32 offset="0x324" name="CSC_1_MATRIX_COEFF_1">
+ <bitfield name="COEFF_13" low="0" high="12" type="uint"/>
+ <bitfield name="COEFF_21" low="16" high="28" type="uint"/>
+ </reg32>
+ <reg32 offset="0x328" name="CSC_1_MATRIX_COEFF_2">
+ <bitfield name="COEFF_22" low="0" high="12" type="uint"/>
+ <bitfield name="COEFF_23" low="16" high="28" type="uint"/>
+ </reg32>
+ <reg32 offset="0x32c" name="CSC_1_MATRIX_COEFF_3">
+ <bitfield name="COEFF_31" low="0" high="12" type="uint"/>
+ <bitfield name="COEFF_32" low="16" high="28" type="uint"/>
+ </reg32>
+ <reg32 offset="0x330" name="CSC_1_MATRIX_COEFF_4">
+ <bitfield name="COEFF_33" low="0" high="12" type="uint"/>
+ </reg32>
+ <array offset="0x334" name="CSC_1_PRE_CLAMP" length="3" stride="4">
+ <reg32 offset="0" name="REG">
+ <bitfield name="HIGH" low="0" high="7" type="uint"/>
+ <bitfield name="LOW" low="8" high="15" type="uint"/>
+ </reg32>
+ </array>
+ <array offset="0x340" name="CSC_1_POST_CLAMP" length="3" stride="4">
+ <reg32 offset="0" name="REG">
+ <bitfield name="HIGH" low="0" high="7" type="uint"/>
+ <bitfield name="LOW" low="8" high="15" type="uint"/>
+ </reg32>
+ </array>
+ <array offset="0x34c" name="CSC_1_PRE_BIAS" length="3" stride="4">
+ <reg32 offset="0" name="REG">
+ <bitfield name="VALUE" low="0" high="8" type="uint"/>
+ </reg32>
+ </array>
+ <array offset="0x358" name="CSC_1_POST_BIAS" length="3" stride="4">
+ <reg32 offset="0" name="REG">
+ <bitfield name="VALUE" low="0" high="8" type="uint"/>
+ </reg32>
+ </array>
+ <!-- SSPP: -->
+ <reg32 offset="0x000" name="SRC_SIZE" type="reg_wh"/>
+ <reg32 offset="0x004" name="SRC_IMG_SIZE" type="reg_wh"/>
+ <reg32 offset="0x008" name="SRC_XY" type="reg_xy"/>
+ <reg32 offset="0x00C" name="OUT_SIZE" type="reg_wh"/>
+ <reg32 offset="0x010" name="OUT_XY" type="reg_xy"/>
+ <reg32 offset="0x014" name="SRC0_ADDR"/>
+ <reg32 offset="0x018" name="SRC1_ADDR"/>
+ <reg32 offset="0x01C" name="SRC2_ADDR"/>
+ <reg32 offset="0x020" name="SRC3_ADDR"/>
+ <reg32 offset="0x024" name="SRC_STRIDE_A">
+ <bitfield name="P0" low="0" high="15" type="uint"/>
+ <bitfield name="P1" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x028" name="SRC_STRIDE_B">
+ <bitfield name="P2" low="0" high="15" type="uint"/>
+ <bitfield name="P3" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x02C" name="STILE_FRAME_SIZE"/>
+ <reg32 offset="0x030" name="SRC_FORMAT">
+ <bitfield name="G_BPC" low="0" high="1" type="mdp_bpc"/>
+ <bitfield name="B_BPC" low="2" high="3" type="mdp_bpc"/>
+ <bitfield name="R_BPC" low="4" high="5" type="mdp_bpc"/>
+ <bitfield name="A_BPC" low="6" high="7" type="mdp_bpc_alpha"/>
+ <bitfield name="ALPHA_ENABLE" pos="8" type="boolean"/>
+ <bitfield name="CPP" low="9" high="10" type="uint">
+ <brief>8bit characters per pixel minus 1</brief>
+ </bitfield>
+ <bitfield name="ROT90" pos="11" type="boolean"/>
+ <bitfield name="UNPACK_COUNT" low="12" high="13" type="uint"/>
+ <bitfield name="UNPACK_TIGHT" pos="17" type="boolean"/>
+ <bitfield name="UNPACK_ALIGN_MSB" pos="18" type="boolean"/>
+ <bitfield name="FETCH_TYPE" low="19" high="20" type="mdp_fetch_type"/>
+ <bitfield name="CHROMA_SAMP" low="23" high="24" type="mdp_chroma_samp_type"/>
+ </reg32>
+ <reg32 offset="0x034" name="SRC_UNPACK" type="mdp_unpack_pattern"/>
+ <reg32 offset="0x038" name="SRC_OP_MODE">
+ <bitfield name="BWC_EN" pos="0" type="boolean"/>
+ <bitfield name="BWC" low="1" high="2" type="mdp5_pipe_bwc"/>
+ <bitfield name="FLIP_LR" pos="13" type="boolean"/>
+ <bitfield name="FLIP_UD" pos="14" type="boolean"/>
+ <bitfield name="IGC_EN" pos="16" type="boolean"/>
+ <bitfield name="IGC_ROM_0" pos="17" type="boolean"/>
+ <bitfield name="IGC_ROM_1" pos="18" type="boolean"/>
+ <bitfield name="DEINTERLACE" pos="22" type="boolean"/>
+ <bitfield name="DEINTERLACE_ODD" pos="23" type="boolean"/>
+ <bitfield name="SW_PIX_EXT_OVERRIDE" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x03c" name="SRC_CONSTANT_COLOR"/>
+ <reg32 offset="0x048" name="FETCH_CONFIG"/>
+ <reg32 offset="0x04c" name="VC1_RANGE"/>
+ <reg32 offset="0x050" name="REQPRIO_FIFO_WM_0"/>
+ <reg32 offset="0x054" name="REQPRIO_FIFO_WM_1"/>
+ <reg32 offset="0x058" name="REQPRIO_FIFO_WM_2"/>
+ <reg32 offset="0x070" name="SRC_ADDR_SW_STATUS"/>
+ <reg32 offset="0x0a4" name="CURRENT_SRC0_ADDR"/>
+ <reg32 offset="0x0a8" name="CURRENT_SRC1_ADDR"/>
+ <reg32 offset="0x0ac" name="CURRENT_SRC2_ADDR"/>
+ <reg32 offset="0x0b0" name="CURRENT_SRC3_ADDR"/>
+ <reg32 offset="0x0b4" name="DECIMATION">
+ <bitfield name="VERT" low="0" high="7" type="uint"/>
+ <bitfield name="HORZ" low="8" high="15" type="uint"/>
+ </reg32>
+ <array offsets="0x100,0x110,0x120" name="SW_PIX_EXT" length="3" stride="0x10" index="mdp_component_type">
+ <!--
+ Notes:
+ o These value only take effect if SW_PIX_EXT_OVERRIDE is set in SRC_OP_MODE register
+ o For signed values (int): + indicates overfetch, - indicates line drop
+ -->
+ <reg32 offset="0x00" name="LR">
+ <bitfield name="LEFT_RPT" low="0" high="7" type="uint"/>
+ <bitfield name="LEFT_OVF" low="8" high="15" type="int"/>
+ <bitfield name="RIGHT_RPT" low="16" high="23" type="uint"/>
+ <bitfield name="RIGHT_OVF" low="24" high="31" type="int"/>
+ </reg32>
+ <reg32 offset="0x04" name="TB">
+ <bitfield name="TOP_RPT" low="0" high="7" type="uint"/>
+ <bitfield name="TOP_OVF" low="8" high="15" type="int"/>
+ <bitfield name="BOTTOM_RPT" low="16" high="23" type="uint"/>
+ <bitfield name="BOTTOM_OVF" low="24" high="31" type="int"/>
+ </reg32>
+ <reg32 offset="0x08" name="REQ_PIXELS">
+ <bitfield name="LEFT_RIGHT" low="0" high="15" type="uint"/>
+ <bitfield name="TOP_BOTTOM" low="16" high="31" type="uint"/>
+ </reg32>
+ </array>
+ <reg32 offset="0x204" name="SCALE_CONFIG">
+ <bitfield name="SCALEX_EN" pos="0" type="boolean"/>
+ <bitfield name="SCALEY_EN" pos="1" type="boolean"/>
+ <bitfield name="SCALEX_FILTER_COMP_0" low="8" high="9" type="mdp5_scale_filter"/>
+ <bitfield name="SCALEY_FILTER_COMP_0" low="10" high="11" type="mdp5_scale_filter"/>
+ <bitfield name="SCALEX_FILTER_COMP_1_2" low="12" high="13" type="mdp5_scale_filter"/>
+ <bitfield name="SCALEY_FILTER_COMP_1_2" low="14" high="15" type="mdp5_scale_filter"/>
+ <bitfield name="SCALEX_FILTER_COMP_3" low="16" high="17" type="mdp5_scale_filter"/>
+ <bitfield name="SCALEY_FILTER_COMP_3" low="18" high="19" type="mdp5_scale_filter"/>
+ </reg32>
+ <reg32 offset="0x210" name="SCALE_PHASE_STEP_X"/>
+ <reg32 offset="0x214" name="SCALE_PHASE_STEP_Y"/>
+ <reg32 offset="0x218" name="SCALE_CR_PHASE_STEP_X"/>
+ <reg32 offset="0x21c" name="SCALE_CR_PHASE_STEP_Y"/>
+ <reg32 offset="0x220" name="SCALE_INIT_PHASE_X"/>
+ <reg32 offset="0x224" name="SCALE_INIT_PHASE_Y"/>
+ </array>
+
+ <array doffsets="mdp5_cfg->lm.base[0],mdp5_cfg->lm.base[1],mdp5_cfg->lm.base[2],mdp5_cfg->lm.base[3],mdp5_cfg->lm.base[4],mdp5_cfg->lm.base[5]" name="LM" length="6" stride="0x400">
+ <reg32 offset="0x000" name="BLEND_COLOR_OUT">
+ <bitfield name="STAGE0_FG_ALPHA" pos="1" type="boolean"/>
+ <bitfield name="STAGE1_FG_ALPHA" pos="2" type="boolean"/>
+ <bitfield name="STAGE2_FG_ALPHA" pos="3" type="boolean"/>
+ <bitfield name="STAGE3_FG_ALPHA" pos="4" type="boolean"/>
+ <bitfield name="STAGE4_FG_ALPHA" pos="5" type="boolean"/>
+ <bitfield name="STAGE5_FG_ALPHA" pos="6" type="boolean"/>
+ <bitfield name="STAGE6_FG_ALPHA" pos="7" type="boolean"/>
+ <bitfield name="SPLIT_LEFT_RIGHT" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x004" name="OUT_SIZE" type="reg_wh"/>
+ <reg32 offset="0x008" name="BORDER_COLOR_0"/>
+ <reg32 offset="0x010" name="BORDER_COLOR_1"/>
+ <array offsets="0x020,0x050,0x080,0x0B0,0x230,0x260,0x290" name="BLEND" length="7" stride="0x30">
+ <reg32 offset="0x00" name="OP_MODE">
+ <bitfield name="FG_ALPHA" low="0" high="1" type="mdp_alpha_type"/>
+ <bitfield name="FG_INV_ALPHA" pos="2" type="boolean"/>
+ <bitfield name="FG_MOD_ALPHA" pos="3" type="boolean"/>
+ <bitfield name="FG_INV_MOD_ALPHA" pos="4" type="boolean"/>
+ <bitfield name="FG_TRANSP_EN" pos="5" type="boolean"/>
+ <bitfield name="BG_ALPHA" low="8" high="9" type="mdp_alpha_type"/>
+ <bitfield name="BG_INV_ALPHA" pos="10" type="boolean"/>
+ <bitfield name="BG_MOD_ALPHA" pos="11" type="boolean"/>
+ <bitfield name="BG_INV_MOD_ALPHA" pos="12" type="boolean"/>
+ <bitfield name="BG_TRANSP_EN" pos="13" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x04" name="FG_ALPHA"/>
+ <reg32 offset="0x08" name="BG_ALPHA"/>
+ <reg32 offset="0x0c" name="FG_TRANSP_LOW0"/>
+ <reg32 offset="0x10" name="FG_TRANSP_LOW1"/>
+ <reg32 offset="0x14" name="FG_TRANSP_HIGH0"/>
+ <reg32 offset="0x18" name="FG_TRANSP_HIGH1"/>
+ <reg32 offset="0x1c" name="BG_TRANSP_LOW0"/>
+ <reg32 offset="0x20" name="BG_TRANSP_LOW1"/>
+ <reg32 offset="0x24" name="BG_TRANSP_HIGH0"/>
+ <reg32 offset="0x28" name="BG_TRANSP_HIGH1"/>
+ </array>
+ <reg32 offset="0x0e0" name="CURSOR_IMG_SIZE">
+ <bitfield name="SRC_W" low="0" high="15" type="uint"/>
+ <bitfield name="SRC_H" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x0e4" name="CURSOR_SIZE">
+ <bitfield name="ROI_W" low="0" high="15" type="uint"/>
+ <bitfield name="ROI_H" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x0e8" name="CURSOR_XY">
+ <bitfield name="SRC_X" low="0" high="15" type="uint"/>
+ <bitfield name="SRC_Y" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x0dc" name="CURSOR_STRIDE">
+ <bitfield name="STRIDE" low="0" high="15" type="uint"/>
+ </reg32>
+ <reg32 offset="0x0ec" name="CURSOR_FORMAT">
+ <bitfield name="FORMAT" low="0" high="2" type="mdp5_cursor_format"/>
+ </reg32>
+ <reg32 offset="0x0f0" name="CURSOR_BASE_ADDR"/>
+ <reg32 offset="0x0f4" name="CURSOR_START_XY">
+ <bitfield name="X_START" low="0" high="15" type="uint"/>
+ <bitfield name="Y_START" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x0f8" name="CURSOR_BLEND_CONFIG">
+ <bitfield name="BLEND_EN" pos="0" type="boolean"/>
+ <bitfield name="BLEND_ALPHA_SEL" low="1" high="2" type="mdp5_cursor_alpha"/>
+ <bitfield name="BLEND_TRANSP_EN" pos="3" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0fc" name="CURSOR_BLEND_PARAM"/>
+ <reg32 offset="0x100" name="CURSOR_BLEND_TRANSP_LOW0"/>
+ <reg32 offset="0x104" name="CURSOR_BLEND_TRANSP_LOW1"/>
+ <reg32 offset="0x108" name="CURSOR_BLEND_TRANSP_HIGH0"/>
+ <reg32 offset="0x10c" name="CURSOR_BLEND_TRANSP_HIGH1"/>
+ <reg32 offset="0x110" name="GC_LUT_BASE"/>
+ </array>
+
+ <array doffsets="mdp5_cfg->dspp.base[0],mdp5_cfg->dspp.base[1],mdp5_cfg->dspp.base[2],mdp5_cfg->dspp.base[3]" name="DSPP" length="4" stride="0x400">
+ <reg32 offset="0x000" name="OP_MODE">
+ <bitfield name="IGC_LUT_EN" pos="0" type="boolean"/>
+ <bitfield name="IGC_TBL_IDX" low="1" high="3" type="uint"/>
+ <bitfield name="PCC_EN" pos="4" type="boolean"/>
+ <bitfield name="DITHER_EN" pos="8" type="boolean"/>
+ <bitfield name="HIST_EN" pos="16" type="boolean"/>
+ <bitfield name="AUTO_CLEAR" pos="17" type="boolean"/>
+ <bitfield name="HIST_LUT_EN" pos="19" type="boolean"/>
+ <bitfield name="PA_EN" pos="20" type="boolean"/>
+ <bitfield name="GAMUT_EN" pos="23" type="boolean"/>
+ <bitfield name="GAMUT_ORDER" pos="24" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x030" name="PCC_BASE"/>
+ <reg32 offset="0x150" name="DITHER_DEPTH"/>
+ <reg32 offset="0x210" name="HIST_CTL_BASE"/>
+ <reg32 offset="0x230" name="HIST_LUT_BASE"/>
+ <reg32 offset="0x234" name="HIST_LUT_SWAP"/>
+ <reg32 offset="0x238" name="PA_BASE"/>
+ <reg32 offset="0x2dc" name="GAMUT_BASE"/>
+ <reg32 offset="0x2b0" name="GC_BASE"/>
+ </array>
+
+ <array doffsets="mdp5_cfg->pp.base[0],mdp5_cfg->pp.base[1],mdp5_cfg->pp.base[2],mdp5_cfg->pp.base[3]" name="PP" length="4" stride="0x100">
+ <reg32 offset="0x000" name="TEAR_CHECK_EN"/>
+ <reg32 offset="0x004" name="SYNC_CONFIG_VSYNC">
+ <bitfield name="COUNT" low="0" high="18" type="uint"/>
+ <bitfield name="COUNTER_EN" pos="19" type="boolean"/>
+ <bitfield name="IN_EN" pos="20" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x008" name="SYNC_CONFIG_HEIGHT"/>
+ <reg32 offset="0x00c" name="SYNC_WRCOUNT">
+ <bitfield name="LINE_COUNT" low="0" high="15" type="uint"/>
+ <bitfield name="FRAME_COUNT" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x010" name="VSYNC_INIT_VAL"/>
+ <reg32 offset="0x014" name="INT_COUNT_VAL">
+ <bitfield name="LINE_COUNT" low="0" high="15" type="uint"/>
+ <bitfield name="FRAME_COUNT" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x018" name="SYNC_THRESH">
+ <bitfield name="START" low="0" high="15" type="uint"/>
+ <bitfield name="CONTINUE" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x01c" name="START_POS"/>
+ <reg32 offset="0x020" name="RD_PTR_IRQ"/>
+ <reg32 offset="0x024" name="WR_PTR_IRQ"/>
+ <reg32 offset="0x028" name="OUT_LINE_COUNT"/>
+ <reg32 offset="0x02c" name="PP_LINE_COUNT"/>
+ <reg32 offset="0x030" name="AUTOREFRESH_CONFIG"/>
+ <reg32 offset="0x034" name="FBC_MODE"/>
+ <reg32 offset="0x038" name="FBC_BUDGET_CTL"/>
+ <reg32 offset="0x03c" name="FBC_LOSSY_MODE"/>
+ </array>
+
+ <enum name="mdp5_block_size">
+ <value name="BLOCK_SIZE_64" value="0"/>
+ <value name="BLOCK_SIZE_128" value="1"/>
+ </enum>
+
+ <enum name="mdp5_rotate_mode">
+ <value name="ROTATE_0" value="0"/>
+ <value name="ROTATE_90" value="1"/>
+ </enum>
+
+ <enum name="mdp5_chroma_downsample_method">
+ <value name="DS_MTHD_NO_PIXEL_DROP" value="0"/>
+ <value name="DS_MTHD_PIXEL_DROP" value="1"/>
+ </enum>
+
+ <array doffsets="mdp5_cfg->wb.base[0],mdp5_cfg->wb.base[1],mdp5_cfg->wb.base[2],mdp5_cfg->wb.base[3],mdp5_cfg->wb.base[4]" name="WB" length="5" stride="0x400">
+ <reg32 offset="0x000" name="DST_FORMAT">
+ <bitfield name="DSTC0_OUT" low="0" high="1" type="uint"/>
+ <bitfield name="DSTC1_OUT" low="2" high="3" type="uint"/>
+ <bitfield name="DSTC2_OUT" low="4" high="5" type="uint"/>
+ <bitfield name="DSTC3_OUT" low="6" high="7" type="uint"/>
+ <bitfield name="DSTC3_EN" pos="8" type="boolean"/>
+ <bitfield name="DST_BPP" low="9" high="10" type="uint"/>
+ <bitfield name="PACK_COUNT" low="12" high="13" type="uint"/>
+ <bitfield name="DST_ALPHA_X" pos="14" type="boolean"/>
+ <bitfield name="PACK_TIGHT" pos="17" type="boolean"/>
+ <bitfield name="PACK_ALIGN_MSB" pos="18" type="boolean"/>
+ <bitfield name="WRITE_PLANES" low="19" high="20" type="uint"/>
+ <bitfield name="DST_DITHER_EN" pos="22" type="boolean"/>
+ <bitfield name="DST_CHROMA_SAMP" low="23" high="25" type="uint"/>
+ <bitfield name="DST_CHROMA_SITE" low="26" high="29" type="uint"/>
+ <bitfield name="FRAME_FORMAT" low="30" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x004" name="DST_OP_MODE">
+ <bitfield name="BWC_ENC_EN" pos="0" type="boolean"/>
+ <bitfield name="BWC_ENC_OP" low="1" high="2" type="uint"/>
+ <bitfield name="BLOCK_SIZE" low="4" high="4" type="uint"/>
+ <bitfield name="ROT_MODE" low="5" high="5" type="uint"/>
+ <bitfield name="ROT_EN" pos="6" type="boolean"/>
+ <bitfield name="CSC_EN" pos="8" type="boolean"/>
+ <bitfield name="CSC_SRC_DATA_FORMAT" low="9" high="9" type="uint"/>
+ <bitfield name="CSC_DST_DATA_FORMAT" low="10" high="10" type="uint"/>
+ <bitfield name="CHROMA_DWN_SAMPLE_EN" pos="11" type="boolean"/>
+ <bitfield name="CHROMA_DWN_SAMPLE_FORMAT" low="12" high="12" type="uint"/>
+ <bitfield name="CHROMA_DWN_SAMPLE_H_MTHD" low="13" high="13" type="uint"/>
+ <bitfield name="CHROMA_DWN_SAMPLE_V_MTHD" low="14" high="14" type="uint"/>
+ </reg32>
+ <reg32 offset="0x008" name="DST_PACK_PATTERN">
+ <bitfield name="ELEMENT0" low="0" high="1" type="uint"/>
+ <bitfield name="ELEMENT1" low="8" high="9" type="uint"/>
+ <bitfield name="ELEMENT2" low="16" high="17" type="uint"/>
+ <bitfield name="ELEMENT3" low="24" high="25" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00c" name="DST0_ADDR"/>
+ <reg32 offset="0x010" name="DST1_ADDR"/>
+ <reg32 offset="0x014" name="DST2_ADDR"/>
+ <reg32 offset="0x018" name="DST3_ADDR"/>
+ <reg32 offset="0x01c" name="DST_YSTRIDE0">
+ <bitfield name="DST0_YSTRIDE" low="0" high="15" type="uint"/>
+ <bitfield name="DST1_YSTRIDE" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x020" name="DST_YSTRIDE1">
+ <bitfield name="DST2_YSTRIDE" low="0" high="15" type="uint"/>
+ <bitfield name="DST3_YSTRIDE" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x024" name="DST_DITHER_BITDEPTH"/>
+ <reg32 offset="0x030" name="DITHER_MATRIX_ROW0"/>
+ <reg32 offset="0x034" name="DITHER_MATRIX_ROW1"/>
+ <reg32 offset="0x038" name="DITHER_MATRIX_ROW2"/>
+ <reg32 offset="0x03c" name="DITHER_MATRIX_ROW3"/>
+ <reg32 offset="0x048" name="DST_WRITE_CONFIG"/>
+ <reg32 offset="0x050" name="ROTATION_DNSCALER"/>
+ <reg32 offset="0x060" name="N16_INIT_PHASE_X_0_3"/>
+ <reg32 offset="0x064" name="N16_INIT_PHASE_X_1_2"/>
+ <reg32 offset="0x068" name="N16_INIT_PHASE_Y_0_3"/>
+ <reg32 offset="0x06c" name="N16_INIT_PHASE_Y_1_2"/>
+ <reg32 offset="0x074" name="OUT_SIZE">
+ <bitfield name="DST_W" low="0" high="15" type="uint"/>
+ <bitfield name="DST_H" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x078" name="ALPHA_X_VALUE"/>
+ <reg32 offset="0x260" name="CSC_MATRIX_COEFF_0">
+ <bitfield name="COEFF_11" low="0" high="12" type="uint"/>
+ <bitfield name="COEFF_12" low="16" high="28" type="uint"/>
+ </reg32>
+ <reg32 offset="0x264" name="CSC_MATRIX_COEFF_1">
+ <bitfield name="COEFF_13" low="0" high="12" type="uint"/>
+ <bitfield name="COEFF_21" low="16" high="28" type="uint"/>
+ </reg32>
+ <reg32 offset="0x268" name="CSC_MATRIX_COEFF_2">
+ <bitfield name="COEFF_22" low="0" high="12" type="uint"/>
+ <bitfield name="COEFF_23" low="16" high="28" type="uint"/>
+ </reg32>
+ <reg32 offset="0x26c" name="CSC_MATRIX_COEFF_3">
+ <bitfield name="COEFF_31" low="0" high="12" type="uint"/>
+ <bitfield name="COEFF_32" low="16" high="28" type="uint"/>
+ </reg32>
+ <reg32 offset="0x270" name="CSC_MATRIX_COEFF_4">
+ <bitfield name="COEFF_33" low="0" high="12" type="uint"/>
+ </reg32>
+ <array offset="0x274" name="CSC_COMP_PRECLAMP" length="3" stride="4">
+ <reg32 offset="0" name="REG">
+ <bitfield name="HIGH" low="0" high="7" type="uint"/>
+ <bitfield name="LOW" low="8" high="15" type="uint"/>
+ </reg32>
+ </array>
+ <array offset="0x280" name="CSC_COMP_POSTCLAMP" length="3" stride="4">
+ <reg32 offset="0" name="REG">
+ <bitfield name="HIGH" low="0" high="7" type="uint"/>
+ <bitfield name="LOW" low="8" high="15" type="uint"/>
+ </reg32>
+ </array>
+ <array offset="0x28c" name="CSC_COMP_PREBIAS" length="3" stride="4">
+ <reg32 offset="0" name="REG">
+ <bitfield name="VALUE" low="0" high="8" type="uint"/>
+ </reg32>
+ </array>
+ <array offset="0x298" name="CSC_COMP_POSTBIAS" length="3" stride="4">
+ <reg32 offset="0" name="REG">
+ <bitfield name="VALUE" low="0" high="8" type="uint"/>
+ </reg32>
+ </array>
+ </array>
+
+ <array doffsets="mdp5_cfg->intf.base[0],mdp5_cfg->intf.base[1],mdp5_cfg->intf.base[2],mdp5_cfg->intf.base[3],mdp5_cfg->intf.base[4]" name="INTF" length="5" stride="0x200">
+ <reg32 offset="0x000" name="TIMING_ENGINE_EN"/>
+ <reg32 offset="0x004" name="CONFIG"/>
+ <reg32 offset="0x008" name="HSYNC_CTL">
+ <bitfield name="PULSEW" low="0" high="15" type="uint"/>
+ <bitfield name="PERIOD" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00c" name="VSYNC_PERIOD_F0" type="uint"/>
+ <reg32 offset="0x010" name="VSYNC_PERIOD_F1" type="uint"/>
+ <reg32 offset="0x014" name="VSYNC_LEN_F0" type="uint"/>
+ <reg32 offset="0x018" name="VSYNC_LEN_F1" type="uint"/>
+ <reg32 offset="0x01c" name="DISPLAY_VSTART_F0" type="uint"/>
+ <reg32 offset="0x020" name="DISPLAY_VSTART_F1" type="uint"/>
+ <reg32 offset="0x024" name="DISPLAY_VEND_F0" type="uint"/>
+ <reg32 offset="0x028" name="DISPLAY_VEND_F1" type="uint"/>
+ <reg32 offset="0x02c" name="ACTIVE_VSTART_F0">
+ <bitfield name="VAL" low="0" high="30" type="uint"/>
+ <bitfield name="ACTIVE_V_ENABLE" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x030" name="ACTIVE_VSTART_F1">
+ <bitfield name="VAL" low="0" high="30" type="uint"/>
+ </reg32>
+ <reg32 offset="0x034" name="ACTIVE_VEND_F0" type="uint"/>
+ <reg32 offset="0x038" name="ACTIVE_VEND_F1" type="uint"/>
+ <reg32 offset="0x03c" name="DISPLAY_HCTL">
+ <bitfield name="START" low="0" high="15" type="uint"/>
+ <bitfield name="END" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="0x040" name="ACTIVE_HCTL">
+ <bitfield name="START" low="0" high="14" type="uint"/>
+ <bitfield name="END" low="16" high="30" type="uint"/>
+ <bitfield name="ACTIVE_H_ENABLE" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x044" name="BORDER_COLOR"/>
+ <reg32 offset="0x048" name="UNDERFLOW_COLOR"/>
+ <reg32 offset="0x04c" name="HSYNC_SKEW"/>
+ <reg32 offset="0x050" name="POLARITY_CTL">
+ <bitfield name="HSYNC_LOW" pos="0" type="boolean"/>
+ <bitfield name="VSYNC_LOW" pos="1" type="boolean"/>
+ <bitfield name="DATA_EN_LOW" pos="2" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x054" name="TEST_CTL"/>
+ <reg32 offset="0x058" name="TP_COLOR0"/>
+ <reg32 offset="0x05c" name="TP_COLOR1"/>
+ <reg32 offset="0x084" name="DSI_CMD_MODE_TRIGGER_EN"/>
+ <reg32 offset="0x090" name="PANEL_FORMAT" type="mdp5_format"/>
+ <reg32 offset="0x0a8" name="FRAME_LINE_COUNT_EN"/>
+ <reg32 offset="0x0ac" name="FRAME_COUNT"/>
+ <reg32 offset="0x0b0" name="LINE_COUNT"/>
+ <reg32 offset="0x0f0" name="DEFLICKER_CONFIG"/>
+ <reg32 offset="0x0f4" name="DEFLICKER_STRNG_COEFF"/>
+ <reg32 offset="0x0f8" name="DEFLICKER_WEAK_COEFF"/>
+ <reg32 offset="0x100" name="TPG_ENABLE"/>
+ <reg32 offset="0x104" name="TPG_MAIN_CONTROL"/>
+ <reg32 offset="0x108" name="TPG_VIDEO_CONFIG"/>
+ <reg32 offset="0x10c" name="TPG_COMPONENT_LIMITS"/>
+ <reg32 offset="0x110" name="TPG_RECTANGLE"/>
+ <reg32 offset="0x114" name="TPG_INITIAL_VALUE"/>
+ <reg32 offset="0x118" name="TPG_BLK_WHITE_PATTERN_FRAME"/>
+ <reg32 offset="0x11c" name="TPG_RGB_MAPPING"/>
+ </array>
+
+ <array doffsets="mdp5_cfg->ad.base[0],mdp5_cfg->ad.base[1]" name="AD" length="2" stride="0x200">
+ <reg32 offset="0x000" name="BYPASS"/>
+ <reg32 offset="0x004" name="CTRL_0"/>
+ <reg32 offset="0x008" name="CTRL_1"/>
+ <reg32 offset="0x00c" name="FRAME_SIZE"/>
+ <reg32 offset="0x010" name="CON_CTRL_0"/>
+ <reg32 offset="0x014" name="CON_CTRL_1"/>
+ <reg32 offset="0x018" name="STR_MAN"/>
+ <reg32 offset="0x01c" name="VAR"/>
+ <reg32 offset="0x020" name="DITH"/>
+ <reg32 offset="0x024" name="DITH_CTRL"/>
+ <reg32 offset="0x028" name="AMP_LIM"/>
+ <reg32 offset="0x02c" name="SLOPE"/>
+ <reg32 offset="0x030" name="BW_LVL"/>
+ <reg32 offset="0x034" name="LOGO_POS"/>
+ <reg32 offset="0x038" name="LUT_FI"/>
+ <reg32 offset="0x07c" name="LUT_CC"/>
+ <reg32 offset="0x0c8" name="STR_LIM"/>
+ <reg32 offset="0x0cc" name="CALIB_AB"/>
+ <reg32 offset="0x0d0" name="CALIB_CD"/>
+ <reg32 offset="0x0d4" name="MODE_SEL"/>
+ <reg32 offset="0x0d8" name="TFILT_CTRL"/>
+ <reg32 offset="0x0dc" name="BL_MINMAX"/>
+ <reg32 offset="0x0e0" name="BL"/>
+ <reg32 offset="0x0e8" name="BL_MAX"/>
+ <reg32 offset="0x0ec" name="AL"/>
+ <reg32 offset="0x0f0" name="AL_MIN"/>
+ <reg32 offset="0x0f4" name="AL_FILT"/>
+ <reg32 offset="0x0f8" name="CFG_BUF"/>
+ <reg32 offset="0x100" name="LUT_AL"/>
+ <reg32 offset="0x144" name="TARG_STR"/>
+ <reg32 offset="0x148" name="START_CALC"/>
+ <reg32 offset="0x14c" name="STR_OUT"/>
+ <reg32 offset="0x154" name="BL_OUT"/>
+ <reg32 offset="0x158" name="CALC_DONE"/>
+ </array>
+</domain>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/display/mdp_common.xml b/drivers/gpu/drm/msm/registers/display/mdp_common.xml
new file mode 100644
index 000000000000..f1b6345c1323
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/display/mdp_common.xml
@@ -0,0 +1,90 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+
+
+<!-- random bits that seem same between mdp4 and mdp5 (ie. not much) -->
+
+<enum name="mdp_chroma_samp_type">
+ <value name="CHROMA_FULL" value="0"/>
+ <value name="CHROMA_H2V1" value="1"/>
+ <value name="CHROMA_H1V2" value="2"/>
+ <value name="CHROMA_420" value="3"/>
+</enum>
+
+<enum name="mdp_fetch_type">
+ <value name="MDP_PLANE_INTERLEAVED" value="0"/>
+ <value name="MDP_PLANE_PLANAR" value="1"/>
+ <value name="MDP_PLANE_PSEUDO_PLANAR" value="2"/>
+</enum>
+
+<enum name="mdp_mixer_stage_id">
+ <value name="STAGE_UNUSED" value="0"/>
+ <value name="STAGE_BASE" value="1"/>
+ <value name="STAGE0" value="2"/> <!-- zorder 0 -->
+ <value name="STAGE1" value="3"/> <!-- zorder 1 -->
+ <value name="STAGE2" value="4"/> <!-- zorder 2 -->
+ <value name="STAGE3" value="5"/> <!-- zorder 3 -->
+ <value name="STAGE4" value="6"/> <!-- zorder 4 -->
+ <value name="STAGE5" value="7"/> <!-- zorder 5 -->
+ <value name="STAGE6" value="8"/> <!-- zorder 6 -->
+ <value name="STAGE_MAX" value="8"/> <!-- maximum zorder -->
+</enum>
+
+<enum name="mdp_alpha_type">
+ <value name="FG_CONST" value="0"/>
+ <value name="BG_CONST" value="1"/>
+ <value name="FG_PIXEL" value="2"/>
+ <value name="BG_PIXEL" value="3"/>
+</enum>
+
+<enum name="mdp_component_type">
+ <value name="COMP_0" value="0"/> <!-- Y component -->
+ <value name="COMP_1_2" value="1"/> <!-- Cb/Cr comp. -->
+ <value name="COMP_3" value="2"/> <!-- Trans comp. -->
+ <value name="COMP_MAX" value="3"/>
+</enum>
+
+<enum name="mdp_bpc">
+ <brief>bits per component (non-alpha channel)</brief>
+ <value name="BPC4" value="0"/> <!-- 4 bits -->
+ <value name="BPC5" value="1"/> <!-- 5 bits -->
+ <value name="BPC6" value="2"/> <!-- 6 bits -->
+ <value name="BPC8" value="3"/> <!-- 8 bits -->
+</enum>
+
+<enum name="mdp_bpc_alpha">
+ <brief>bits per component (alpha channel)</brief>
+ <value name="BPC1A" value="0"/> <!-- 1 bit -->
+ <value name="BPC4A" value="1"/> <!-- 4 bits -->
+ <value name="BPC6A" value="2"/> <!-- 6 bits -->
+ <value name="BPC8A" value="3"/> <!-- 8 bits -->
+</enum>
+
+<enum name="mdp_fetch_mode">
+ <value name="MDP_FETCH_LINEAR" value="0"/>
+ <value name="MDP_FETCH_TILE" value="1"/>
+ <value name="MDP_FETCH_UBWC" value="2"/>
+</enum>
+
+<bitset name="reg_wh" inline="yes">
+ <bitfield name="HEIGHT" low="16" high="31" type="uint"/>
+ <bitfield name="WIDTH" low="0" high="15" type="uint"/>
+</bitset>
+
+<bitset name="reg_xy" inline="yes">
+ <bitfield name="Y" low="16" high="31" type="uint"/>
+ <bitfield name="X" low="0" high="15" type="uint"/>
+</bitset>
+
+<bitset name="mdp_unpack_pattern" inline="yes">
+ <bitfield name="ELEM0" low="0" high="7"/>
+ <bitfield name="ELEM1" low="8" high="15"/>
+ <bitfield name="ELEM2" low="16" high="23"/>
+ <bitfield name="ELEM3" low="24" high="31"/>
+</bitset>
+
+</database>
+
diff --git a/drivers/gpu/drm/msm/registers/display/msm.xml b/drivers/gpu/drm/msm/registers/display/msm.xml
new file mode 100644
index 000000000000..429c35b73bad
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/display/msm.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="http://nouveau.freedesktop.org/ rules-ng.xsd">
+<import file="freedreno_copyright.xml"/>
+
+<doc>
+ Register definitions for the display related hw blocks on
+ msm/snapdragon
+</doc>
+
+<!--
+<enum name="chipset">
+ <value name="MDP40"/>
+ <value name="MDP50"/>
+</enum>
+-->
+
+<import file="mdp4.xml"/>
+<import file="mdp5.xml"/>
+<import file="dsi.xml"/>
+<import file="dsi_phy_28nm_8960.xml"/>
+<import file="dsi_phy_28nm.xml"/>
+<import file="dsi_phy_20nm.xml"/>
+<import file="dsi_phy_14nm.xml"/>
+<import file="dsi_phy_10nm.xml"/>
+<import file="dsi_phy_7nm.xml"/>
+<import file="sfpb.xml"/>
+<import file="hdmi.xml"/>
+<import file="edp.xml"/>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/display/sfpb.xml b/drivers/gpu/drm/msm/registers/display/sfpb.xml
new file mode 100644
index 000000000000..de1cf43c131f
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/display/sfpb.xml
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+
+<domain name="SFPB" width="32">
+ <enum name="sfpb_ahb_arb_master_port_en">
+ <value name="SFPB_MASTER_PORT_ENABLE" value="3"/>
+ <value name="SFPB_MASTER_PORT_DISABLE" value="0"/>
+ </enum>
+ <reg32 offset="0x0058" name="GPREG">
+ <bitfield name="MASTER_PORT_EN" low="11" high="12" type="sfpb_ahb_arb_master_port_en"/>
+ </reg32>
+</domain>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/freedreno_copyright.xml b/drivers/gpu/drm/msm/registers/freedreno_copyright.xml
new file mode 100644
index 000000000000..854efdd2e5fc
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/freedreno_copyright.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+
+<copyright year="2013">
+
+<author name="Rob Clark" email="robdclark@gmail.com"><nick name="robclark"/>
+Initial Author.
+</author>
+
+<author name="Ilia Mirkin" email="imirkin@alum.mit.edu"><nick name="imirkin"/>
+many a3xx/a4xx contributions
+</author>
+
+<license>
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+</license>
+
+</copyright>
+</database>
+
diff --git a/drivers/gpu/drm/msm/registers/gen_header.py b/drivers/gpu/drm/msm/registers/gen_header.py
new file mode 100644
index 000000000000..fc3bfdc991d2
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/gen_header.py
@@ -0,0 +1,970 @@
+#!/usr/bin/python3
+#
+# Copyright © 2019-2024 Google, Inc.
+#
+# SPDX-License-Identifier: MIT
+
+import xml.parsers.expat
+import sys
+import os
+import collections
+import argparse
+import time
+import datetime
+
+class Error(Exception):
+ def __init__(self, message):
+ self.message = message
+
+class Enum(object):
+ def __init__(self, name):
+ self.name = name
+ self.values = []
+
+ def has_name(self, name):
+ for (n, value) in self.values:
+ if n == name:
+ return True
+ return False
+
+ def names(self):
+ return [n for (n, value) in self.values]
+
+ def dump(self):
+ use_hex = False
+ for (name, value) in self.values:
+ if value > 0x1000:
+ use_hex = True
+
+ print("enum %s {" % self.name)
+ for (name, value) in self.values:
+ if use_hex:
+ print("\t%s = 0x%08x," % (name, value))
+ else:
+ print("\t%s = %d," % (name, value))
+ print("};\n")
+
+ def dump_pack_struct(self):
+ pass
+
+class Field(object):
+ def __init__(self, name, low, high, shr, type, parser):
+ self.name = name
+ self.low = low
+ self.high = high
+ self.shr = shr
+ self.type = type
+
+ builtin_types = [ None, "a3xx_regid", "boolean", "uint", "hex", "int", "fixed", "ufixed", "float", "address", "waddress" ]
+
+ maxpos = parser.current_bitsize - 1
+
+ if low < 0 or low > maxpos:
+ raise parser.error("low attribute out of range: %d" % low)
+ if high < 0 or high > maxpos:
+ raise parser.error("high attribute out of range: %d" % high)
+ if high < low:
+ raise parser.error("low is greater than high: low=%d, high=%d" % (low, high))
+ if self.type == "boolean" and not low == high:
+ raise parser.error("booleans should be 1 bit fields")
+ elif self.type == "float" and not (high - low == 31 or high - low == 15):
+ raise parser.error("floats should be 16 or 32 bit fields")
+ elif not self.type in builtin_types and not self.type in parser.enums:
+ raise parser.error("unknown type '%s'" % self.type)
+
+ def ctype(self, var_name):
+ if self.type == None:
+ type = "uint32_t"
+ val = var_name
+ elif self.type == "boolean":
+ type = "bool"
+ val = var_name
+ elif self.type == "uint" or self.type == "hex" or self.type == "a3xx_regid":
+ type = "uint32_t"
+ val = var_name
+ elif self.type == "int":
+ type = "int32_t"
+ val = var_name
+ elif self.type == "fixed":
+ type = "float"
+ val = "((int32_t)(%s * %d.0))" % (var_name, 1 << self.radix)
+ elif self.type == "ufixed":
+ type = "float"
+ val = "((uint32_t)(%s * %d.0))" % (var_name, 1 << self.radix)
+ elif self.type == "float" and self.high - self.low == 31:
+ type = "float"
+ val = "fui(%s)" % var_name
+ elif self.type == "float" and self.high - self.low == 15:
+ type = "float"
+ val = "_mesa_float_to_half(%s)" % var_name
+ elif self.type in [ "address", "waddress" ]:
+ type = "uint64_t"
+ val = var_name
+ else:
+ type = "enum %s" % self.type
+ val = var_name
+
+ if self.shr > 0:
+ val = "(%s >> %d)" % (val, self.shr)
+
+ return (type, val)
+
+def tab_to(name, value):
+ tab_count = (68 - (len(name) & ~7)) // 8
+ if tab_count <= 0:
+ tab_count = 1
+ print(name + ('\t' * tab_count) + value)
+
+def mask(low, high):
+ return ((0xffffffffffffffff >> (64 - (high + 1 - low))) << low)
+
+def field_name(reg, f):
+ if f.name:
+ name = f.name.lower()
+ else:
+ # We hit this path when a reg is defined with no bitset fields, ie.
+ # <reg32 offset="0x88db" name="RB_BLIT_DST_ARRAY_PITCH" low="0" high="28" shr="6" type="uint"/>
+ name = reg.name.lower()
+
+ if (name in [ "double", "float", "int" ]) or not (name[0].isalpha()):
+ name = "_" + name
+
+ return name
+
+# indices - array of (ctype, stride, __offsets_NAME)
+def indices_varlist(indices):
+ return ", ".join(["i%d" % i for i in range(len(indices))])
+
+def indices_prototype(indices):
+ return ", ".join(["%s i%d" % (ctype, idx)
+ for (idx, (ctype, stride, offset)) in enumerate(indices)])
+
+def indices_strides(indices):
+ return " + ".join(["0x%x*i%d" % (stride, idx)
+ if stride else
+ "%s(i%d)" % (offset, idx)
+ for (idx, (ctype, stride, offset)) in enumerate(indices)])
+
+class Bitset(object):
+ def __init__(self, name, template):
+ self.name = name
+ self.inline = False
+ if template:
+ self.fields = template.fields[:]
+ else:
+ self.fields = []
+
+ # Get address field if there is one in the bitset, else return None:
+ def get_address_field(self):
+ for f in self.fields:
+ if f.type in [ "address", "waddress" ]:
+ return f
+ return None
+
+ def dump_regpair_builder(self, reg):
+ print("#ifndef NDEBUG")
+ known_mask = 0
+ for f in self.fields:
+ known_mask |= mask(f.low, f.high)
+ if f.type in [ "boolean", "address", "waddress" ]:
+ continue
+ type, val = f.ctype("fields.%s" % field_name(reg, f))
+ print(" assert((%-40s & 0x%08x) == 0);" % (val, 0xffffffff ^ mask(0 , f.high - f.low)))
+ print(" assert((%-40s & 0x%08x) == 0);" % ("fields.unknown", known_mask))
+ print("#endif\n")
+
+ print(" return (struct fd_reg_pair) {")
+ if reg.array:
+ print(" .reg = REG_%s(__i)," % reg.full_name)
+ else:
+ print(" .reg = REG_%s," % reg.full_name)
+
+ print(" .value =")
+ for f in self.fields:
+ if f.type in [ "address", "waddress" ]:
+ continue
+ else:
+ type, val = f.ctype("fields.%s" % field_name(reg, f))
+ print(" (%-40s << %2d) |" % (val, f.low))
+ value_name = "dword"
+ if reg.bit_size == 64:
+ value_name = "qword"
+ print(" fields.unknown | fields.%s," % (value_name,))
+
+ address = self.get_address_field()
+ if address:
+ print(" .bo = fields.bo,")
+ print(" .is_address = true,")
+ if f.type == "waddress":
+ print(" .bo_write = true,")
+ print(" .bo_offset = fields.bo_offset,")
+ print(" .bo_shift = %d," % address.shr)
+ print(" .bo_low = %d," % address.low)
+
+ print(" };")
+
+ def dump_pack_struct(self, reg=None):
+ if not reg:
+ return
+
+ prefix = reg.full_name
+
+ print("struct %s {" % prefix)
+ for f in self.fields:
+ if f.type in [ "address", "waddress" ]:
+ tab_to(" __bo_type", "bo;")
+ tab_to(" uint32_t", "bo_offset;")
+ continue
+ name = field_name(reg, f)
+
+ type, val = f.ctype("var")
+
+ tab_to(" %s" % type, "%s;" % name)
+ if reg.bit_size == 64:
+ tab_to(" uint64_t", "unknown;")
+ tab_to(" uint64_t", "qword;")
+ else:
+ tab_to(" uint32_t", "unknown;")
+ tab_to(" uint32_t", "dword;")
+ print("};\n")
+
+ if reg.array:
+ print("static inline struct fd_reg_pair\npack_%s(uint32_t __i, struct %s fields)\n{" %
+ (prefix, prefix))
+ else:
+ print("static inline struct fd_reg_pair\npack_%s(struct %s fields)\n{" %
+ (prefix, prefix))
+
+ self.dump_regpair_builder(reg)
+
+ print("\n}\n")
+
+ if self.get_address_field():
+ skip = ", { .reg = 0 }"
+ else:
+ skip = ""
+
+ if reg.array:
+ print("#define %s(__i, ...) pack_%s(__i, __struct_cast(%s) { __VA_ARGS__ })%s\n" %
+ (prefix, prefix, prefix, skip))
+ else:
+ print("#define %s(...) pack_%s(__struct_cast(%s) { __VA_ARGS__ })%s\n" %
+ (prefix, prefix, prefix, skip))
+
+
+ def dump(self, prefix=None):
+ if prefix == None:
+ prefix = self.name
+ for f in self.fields:
+ if f.name:
+ name = prefix + "_" + f.name
+ else:
+ name = prefix
+
+ if not f.name and f.low == 0 and f.shr == 0 and not f.type in ["float", "fixed", "ufixed"]:
+ pass
+ elif f.type == "boolean" or (f.type == None and f.low == f.high):
+ tab_to("#define %s" % name, "0x%08x" % (1 << f.low))
+ else:
+ tab_to("#define %s__MASK" % name, "0x%08x" % mask(f.low, f.high))
+ tab_to("#define %s__SHIFT" % name, "%d" % f.low)
+ type, val = f.ctype("val")
+
+ print("static inline uint32_t %s(%s val)\n{" % (name, type))
+ if f.shr > 0:
+ print("\tassert(!(val & 0x%x));" % mask(0, f.shr - 1))
+ print("\treturn ((%s) << %s__SHIFT) & %s__MASK;\n}" % (val, name, name))
+ print()
+
+class Array(object):
+ def __init__(self, attrs, domain, variant, parent, index_type):
+ if "name" in attrs:
+ self.local_name = attrs["name"]
+ else:
+ self.local_name = ""
+ self.domain = domain
+ self.variant = variant
+ self.parent = parent
+ if self.parent:
+ self.name = self.parent.name + "_" + self.local_name
+ else:
+ self.name = self.local_name
+ if "offsets" in attrs:
+ self.offsets = map(lambda i: "0x%08x" % int(i, 0), attrs["offsets"].split(","))
+ self.fixed_offsets = True
+ elif "doffsets" in attrs:
+ self.offsets = map(lambda s: "(%s)" % s , attrs["doffsets"].split(","))
+ self.fixed_offsets = True
+ else:
+ self.offset = int(attrs["offset"], 0)
+ self.stride = int(attrs["stride"], 0)
+ self.fixed_offsets = False
+ if "index" in attrs:
+ self.index_type = index_type
+ else:
+ self.index_type = None
+ self.length = int(attrs["length"], 0)
+ if "usage" in attrs:
+ self.usages = attrs["usage"].split(',')
+ else:
+ self.usages = None
+
+ def index_ctype(self):
+ if not self.index_type:
+ return "uint32_t"
+ else:
+ return "enum %s" % self.index_type.name
+
+ # Generate array of (ctype, stride, __offsets_NAME)
+ def indices(self):
+ if self.parent:
+ indices = self.parent.indices()
+ else:
+ indices = []
+ if self.length != 1:
+ if self.fixed_offsets:
+ indices.append((self.index_ctype(), None, "__offset_%s" % self.local_name))
+ else:
+ indices.append((self.index_ctype(), self.stride, None))
+ return indices
+
+ def total_offset(self):
+ offset = 0
+ if not self.fixed_offsets:
+ offset += self.offset
+ if self.parent:
+ offset += self.parent.total_offset()
+ return offset
+
+ def dump(self):
+ proto = indices_varlist(self.indices())
+ strides = indices_strides(self.indices())
+ array_offset = self.total_offset()
+ if self.fixed_offsets:
+ print("static inline uint32_t __offset_%s(%s idx)" % (self.local_name, self.index_ctype()))
+ print("{\n\tswitch (idx) {")
+ if self.index_type:
+ for val, offset in zip(self.index_type.names(), self.offsets):
+ print("\t\tcase %s: return %s;" % (val, offset))
+ else:
+ for idx, offset in enumerate(self.offsets):
+ print("\t\tcase %d: return %s;" % (idx, offset))
+ print("\t\tdefault: return INVALID_IDX(idx);")
+ print("\t}\n}")
+ if proto == '':
+ tab_to("#define REG_%s_%s" % (self.domain, self.name), "0x%08x\n" % array_offset)
+ else:
+ tab_to("#define REG_%s_%s(%s)" % (self.domain, self.name, proto), "(0x%08x + %s )\n" % (array_offset, strides))
+
+ def dump_pack_struct(self):
+ pass
+
+ def dump_regpair_builder(self):
+ pass
+
+class Reg(object):
+ def __init__(self, attrs, domain, array, bit_size):
+ self.name = attrs["name"]
+ self.domain = domain
+ self.array = array
+ self.offset = int(attrs["offset"], 0)
+ self.type = None
+ self.bit_size = bit_size
+ if array:
+ self.name = array.name + "_" + self.name
+ self.full_name = self.domain + "_" + self.name
+ if "stride" in attrs:
+ self.stride = int(attrs["stride"], 0)
+ self.length = int(attrs["length"], 0)
+ else:
+ self.stride = None
+ self.length = None
+
+ # Generate array of (ctype, stride, __offsets_NAME)
+ def indices(self):
+ if self.array:
+ indices = self.array.indices()
+ else:
+ indices = []
+ if self.stride:
+ indices.append(("uint32_t", self.stride, None))
+ return indices
+
+ def total_offset(self):
+ if self.array:
+ return self.array.total_offset() + self.offset
+ else:
+ return self.offset
+
+ def dump(self):
+ proto = indices_prototype(self.indices())
+ strides = indices_strides(self.indices())
+ offset = self.total_offset()
+ if proto == '':
+ tab_to("#define REG_%s" % self.full_name, "0x%08x" % offset)
+ else:
+ print("static inline uint32_t REG_%s(%s) { return 0x%08x + %s; }" % (self.full_name, proto, offset, strides))
+
+ if self.bitset.inline:
+ self.bitset.dump(self.full_name)
+
+ def dump_pack_struct(self):
+ if self.bitset.inline:
+ self.bitset.dump_pack_struct(self)
+
+ def dump_regpair_builder(self):
+ if self.bitset.inline:
+ self.bitset.dump_regpair_builder(self)
+
+ def dump_py(self):
+ print("\tREG_%s = 0x%08x" % (self.full_name, self.offset))
+
+
+class Parser(object):
+ def __init__(self):
+ self.current_array = None
+ self.current_domain = None
+ self.current_prefix = None
+ self.current_prefix_type = None
+ self.current_stripe = None
+ self.current_bitset = None
+ self.current_bitsize = 32
+ # The varset attribute on the domain specifies the enum which
+ # specifies all possible hw variants:
+ self.current_varset = None
+ # Regs that have multiple variants.. we only generated the C++
+ # template based struct-packers for these
+ self.variant_regs = {}
+ # Information in which contexts regs are used, to be used in
+ # debug options
+ self.usage_regs = collections.defaultdict(list)
+ self.bitsets = {}
+ self.enums = {}
+ self.variants = set()
+ self.file = []
+ self.xml_files = []
+ self.copyright_year = None
+ self.authors = []
+ self.license = None
+
+ def error(self, message):
+ parser, filename = self.stack[-1]
+ return Error("%s:%d:%d: %s" % (filename, parser.CurrentLineNumber, parser.CurrentColumnNumber, message))
+
+ def prefix(self, variant=None):
+ if self.current_prefix_type == "variant" and variant:
+ return variant
+ elif self.current_stripe:
+ return self.current_stripe + "_" + self.current_domain
+ elif self.current_prefix:
+ return self.current_prefix + "_" + self.current_domain
+ else:
+ return self.current_domain
+
+ def parse_field(self, name, attrs):
+ try:
+ if "pos" in attrs:
+ high = low = int(attrs["pos"], 0)
+ elif "high" in attrs and "low" in attrs:
+ high = int(attrs["high"], 0)
+ low = int(attrs["low"], 0)
+ else:
+ low = 0
+ high = self.current_bitsize - 1
+
+ if "type" in attrs:
+ type = attrs["type"]
+ else:
+ type = None
+
+ if "shr" in attrs:
+ shr = int(attrs["shr"], 0)
+ else:
+ shr = 0
+
+ b = Field(name, low, high, shr, type, self)
+
+ if type == "fixed" or type == "ufixed":
+ b.radix = int(attrs["radix"], 0)
+
+ self.current_bitset.fields.append(b)
+ except ValueError as e:
+ raise self.error(e)
+
+ def parse_varset(self, attrs):
+ # Inherit the varset from the enclosing domain if not overriden:
+ varset = self.current_varset
+ if "varset" in attrs:
+ varset = self.enums[attrs["varset"]]
+ return varset
+
+ def parse_variants(self, attrs):
+ if not "variants" in attrs:
+ return None
+ variant = attrs["variants"].split(",")[0]
+ if "-" in variant:
+ variant = variant[:variant.index("-")]
+
+ varset = self.parse_varset(attrs)
+
+ assert varset.has_name(variant)
+
+ return variant
+
+ def add_all_variants(self, reg, attrs, parent_variant):
+ # TODO this should really handle *all* variants, including dealing
+ # with open ended ranges (ie. "A2XX,A4XX-") (we have the varset
+ # enum now to make that possible)
+ variant = self.parse_variants(attrs)
+ if not variant:
+ variant = parent_variant
+
+ if reg.name not in self.variant_regs:
+ self.variant_regs[reg.name] = {}
+ else:
+ # All variants must be same size:
+ v = next(iter(self.variant_regs[reg.name]))
+ assert self.variant_regs[reg.name][v].bit_size == reg.bit_size
+
+ self.variant_regs[reg.name][variant] = reg
+
+ def add_all_usages(self, reg, usages):
+ if not usages:
+ return
+
+ for usage in usages:
+ self.usage_regs[usage].append(reg)
+
+ self.variants.add(reg.domain)
+
+ def do_validate(self, schemafile):
+ if self.validate == False:
+ return
+
+ try:
+ from lxml import etree
+
+ parser, filename = self.stack[-1]
+ dirname = os.path.dirname(filename)
+
+ # we expect this to look like <namespace url> schema.xsd.. I think
+ # technically it is supposed to be just a URL, but that doesn't
+ # quite match up to what we do.. Just skip over everything up to
+ # and including the first whitespace character:
+ schemafile = schemafile[schemafile.rindex(" ")+1:]
+
+ # this is a bit cheezy, but the xml file to validate could be
+ # in a child director, ie. we don't really know where the schema
+ # file is, the way the rnn C code does. So if it doesn't exist
+ # just look one level up
+ if not os.path.exists(dirname + "/" + schemafile):
+ schemafile = "../" + schemafile
+
+ if not os.path.exists(dirname + "/" + schemafile):
+ raise self.error("Cannot find schema for: " + filename)
+
+ xmlschema_doc = etree.parse(dirname + "/" + schemafile)
+ xmlschema = etree.XMLSchema(xmlschema_doc)
+
+ xml_doc = etree.parse(filename)
+ if not xmlschema.validate(xml_doc):
+ error_str = str(xmlschema.error_log.filter_from_errors()[0])
+ raise self.error("Schema validation failed for: " + filename + "\n" + error_str)
+ except ImportError as e:
+ if self.validate:
+ raise e
+
+ print("lxml not found, skipping validation", file=sys.stderr)
+
+ def do_parse(self, filename):
+ filepath = os.path.abspath(filename)
+ if filepath in self.xml_files:
+ return
+ self.xml_files.append(filepath)
+ file = open(filename, "rb")
+ parser = xml.parsers.expat.ParserCreate()
+ self.stack.append((parser, filename))
+ parser.StartElementHandler = self.start_element
+ parser.EndElementHandler = self.end_element
+ parser.CharacterDataHandler = self.character_data
+ parser.buffer_text = True
+ parser.ParseFile(file)
+ self.stack.pop()
+ file.close()
+
+ def parse(self, rnn_path, filename, validate):
+ self.path = rnn_path
+ self.stack = []
+ self.validate = validate
+ self.do_parse(filename)
+
+ def parse_reg(self, attrs, bit_size):
+ self.current_bitsize = bit_size
+ if "type" in attrs and attrs["type"] in self.bitsets:
+ bitset = self.bitsets[attrs["type"]]
+ if bitset.inline:
+ self.current_bitset = Bitset(attrs["name"], bitset)
+ self.current_bitset.inline = True
+ else:
+ self.current_bitset = bitset
+ else:
+ self.current_bitset = Bitset(attrs["name"], None)
+ self.current_bitset.inline = True
+ if "type" in attrs:
+ self.parse_field(None, attrs)
+
+ variant = self.parse_variants(attrs)
+ if not variant and self.current_array:
+ variant = self.current_array.variant
+
+ self.current_reg = Reg(attrs, self.prefix(variant), self.current_array, bit_size)
+ self.current_reg.bitset = self.current_bitset
+
+ if len(self.stack) == 1:
+ self.file.append(self.current_reg)
+
+ if variant is not None:
+ self.add_all_variants(self.current_reg, attrs, variant)
+
+ usages = None
+ if "usage" in attrs:
+ usages = attrs["usage"].split(',')
+ elif self.current_array:
+ usages = self.current_array.usages
+
+ self.add_all_usages(self.current_reg, usages)
+
+ def start_element(self, name, attrs):
+ self.cdata = ""
+ if name == "import":
+ filename = attrs["file"]
+ self.do_parse(os.path.join(self.path, filename))
+ elif name == "domain":
+ self.current_domain = attrs["name"]
+ if "prefix" in attrs:
+ self.current_prefix = self.parse_variants(attrs)
+ self.current_prefix_type = attrs["prefix"]
+ else:
+ self.current_prefix = None
+ self.current_prefix_type = None
+ if "varset" in attrs:
+ self.current_varset = self.enums[attrs["varset"]]
+ elif name == "stripe":
+ self.current_stripe = self.parse_variants(attrs)
+ elif name == "enum":
+ self.current_enum_value = 0
+ self.current_enum = Enum(attrs["name"])
+ self.enums[attrs["name"]] = self.current_enum
+ if len(self.stack) == 1:
+ self.file.append(self.current_enum)
+ elif name == "value":
+ if "value" in attrs:
+ value = int(attrs["value"], 0)
+ else:
+ value = self.current_enum_value
+ self.current_enum.values.append((attrs["name"], value))
+ elif name == "reg32":
+ self.parse_reg(attrs, 32)
+ elif name == "reg64":
+ self.parse_reg(attrs, 64)
+ elif name == "array":
+ self.current_bitsize = 32
+ variant = self.parse_variants(attrs)
+ index_type = self.enums[attrs["index"]] if "index" in attrs else None
+ self.current_array = Array(attrs, self.prefix(variant), variant, self.current_array, index_type)
+ if len(self.stack) == 1:
+ self.file.append(self.current_array)
+ elif name == "bitset":
+ self.current_bitset = Bitset(attrs["name"], None)
+ if "inline" in attrs and attrs["inline"] == "yes":
+ self.current_bitset.inline = True
+ self.bitsets[self.current_bitset.name] = self.current_bitset
+ if len(self.stack) == 1 and not self.current_bitset.inline:
+ self.file.append(self.current_bitset)
+ elif name == "bitfield" and self.current_bitset:
+ self.parse_field(attrs["name"], attrs)
+ elif name == "database":
+ self.do_validate(attrs["xsi:schemaLocation"])
+ elif name == "copyright":
+ self.copyright_year = attrs["year"]
+ elif name == "author":
+ self.authors.append(attrs["name"] + " <" + attrs["email"] + "> " + attrs["name"])
+
+ def end_element(self, name):
+ if name == "domain":
+ self.current_domain = None
+ self.current_prefix = None
+ self.current_prefix_type = None
+ elif name == "stripe":
+ self.current_stripe = None
+ elif name == "bitset":
+ self.current_bitset = None
+ elif name == "reg32":
+ self.current_reg = None
+ elif name == "array":
+ self.current_array = self.current_array.parent
+ elif name == "enum":
+ self.current_enum = None
+ elif name == "license":
+ self.license = self.cdata
+
+ def character_data(self, data):
+ self.cdata += data
+
+ def dump_reg_usages(self):
+ d = collections.defaultdict(list)
+ for usage, regs in self.usage_regs.items():
+ for reg in regs:
+ variants = self.variant_regs.get(reg.name)
+ if variants:
+ for variant, vreg in variants.items():
+ if reg == vreg:
+ d[(usage, variant)].append(reg)
+ else:
+ for variant in self.variants:
+ d[(usage, variant)].append(reg)
+
+ print("#ifdef __cplusplus")
+
+ for usage, regs in self.usage_regs.items():
+ print("template<chip CHIP> constexpr inline uint16_t %s_REGS[] = {};" % (usage.upper()))
+
+ for (usage, variant), regs in d.items():
+ offsets = []
+
+ for reg in regs:
+ if reg.array:
+ for i in range(reg.array.length):
+ offsets.append(reg.array.offset + reg.offset + i * reg.array.stride)
+ if reg.bit_size == 64:
+ offsets.append(offsets[-1] + 1)
+ else:
+ offsets.append(reg.offset)
+ if reg.bit_size == 64:
+ offsets.append(offsets[-1] + 1)
+
+ offsets.sort()
+
+ print("template<> constexpr inline uint16_t %s_REGS<%s>[] = {" % (usage.upper(), variant))
+ for offset in offsets:
+ print("\t%s," % hex(offset))
+ print("};")
+
+ print("#endif")
+
+ def dump(self):
+ enums = []
+ bitsets = []
+ regs = []
+ for e in self.file:
+ if isinstance(e, Enum):
+ enums.append(e)
+ elif isinstance(e, Bitset):
+ bitsets.append(e)
+ else:
+ regs.append(e)
+
+ for e in enums + bitsets + regs:
+ e.dump()
+
+ self.dump_reg_usages()
+
+
+ def dump_regs_py(self):
+ regs = []
+ for e in self.file:
+ if isinstance(e, Reg):
+ regs.append(e)
+
+ for e in regs:
+ e.dump_py()
+
+
+ def dump_reg_variants(self, regname, variants):
+ # Don't bother for things that only have a single variant:
+ if len(variants) == 1:
+ return
+ print("#ifdef __cplusplus")
+ print("struct __%s {" % regname)
+ # TODO be more clever.. we should probably figure out which
+ # fields have the same type in all variants (in which they
+ # appear) and stuff everything else in a variant specific
+ # sub-structure.
+ seen_fields = []
+ bit_size = 32
+ array = False
+ address = None
+ for variant in variants.keys():
+ print(" /* %s fields: */" % variant)
+ reg = variants[variant]
+ bit_size = reg.bit_size
+ array = reg.array
+ for f in reg.bitset.fields:
+ fld_name = field_name(reg, f)
+ if fld_name in seen_fields:
+ continue
+ seen_fields.append(fld_name)
+ name = fld_name.lower()
+ if f.type in [ "address", "waddress" ]:
+ if address:
+ continue
+ address = f
+ tab_to(" __bo_type", "bo;")
+ tab_to(" uint32_t", "bo_offset;")
+ continue
+ type, val = f.ctype("var")
+ tab_to(" %s" %type, "%s;" %name)
+ print(" /* fallback fields: */")
+ if bit_size == 64:
+ tab_to(" uint64_t", "unknown;")
+ tab_to(" uint64_t", "qword;")
+ else:
+ tab_to(" uint32_t", "unknown;")
+ tab_to(" uint32_t", "dword;")
+ print("};")
+ # TODO don't hardcode the varset enum name
+ varenum = "chip"
+ print("template <%s %s>" % (varenum, varenum.upper()))
+ print("static inline struct fd_reg_pair")
+ xtra = ""
+ xtravar = ""
+ if array:
+ xtra = "int __i, "
+ xtravar = "__i, "
+ print("__%s(%sstruct __%s fields) {" % (regname, xtra, regname))
+ for variant in variants.keys():
+ print(" if (%s == %s) {" % (varenum.upper(), variant))
+ reg = variants[variant]
+ reg.dump_regpair_builder()
+ print(" } else")
+ print(" assert(!\"invalid variant\");")
+ print("}")
+
+ if bit_size == 64:
+ skip = ", { .reg = 0 }"
+ else:
+ skip = ""
+
+ print("#define %s(VARIANT, %s...) __%s<VARIANT>(%s{__VA_ARGS__})%s" % (regname, xtravar, regname, xtravar, skip))
+ print("#endif /* __cplusplus */")
+
+ def dump_structs(self):
+ for e in self.file:
+ e.dump_pack_struct()
+
+ for regname in self.variant_regs:
+ self.dump_reg_variants(regname, self.variant_regs[regname])
+
+
+def dump_c(args, guard, func):
+ p = Parser()
+
+ try:
+ p.parse(args.rnn, args.xml, args.validate)
+ except Error as e:
+ print(e, file=sys.stderr)
+ exit(1)
+
+ print("#ifndef %s\n#define %s\n" % (guard, guard))
+
+ print("""/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
+http://gitlab.freedesktop.org/mesa/mesa/
+git clone https://gitlab.freedesktop.org/mesa/mesa.git
+
+The rules-ng-ng source files this header was generated from are:
+""")
+ maxlen = 0
+ for filepath in p.xml_files:
+ maxlen = max(maxlen, len(filepath))
+ for filepath in p.xml_files:
+ pad = " " * (maxlen - len(filepath))
+ filesize = str(os.path.getsize(filepath))
+ filesize = " " * (7 - len(filesize)) + filesize
+ filetime = time.ctime(os.path.getmtime(filepath))
+ print("- " + filepath + pad + " (" + filesize + " bytes, from " + filetime + ")")
+ if p.copyright_year:
+ current_year = str(datetime.date.today().year)
+ print()
+ print("Copyright (C) %s-%s by the following authors:" % (p.copyright_year, current_year))
+ for author in p.authors:
+ print("- " + author)
+ if p.license:
+ print(p.license)
+ print("*/")
+
+ print()
+ print("#ifdef __KERNEL__")
+ print("#include <linux/bug.h>")
+ print("#define assert(x) BUG_ON(!(x))")
+ print("#else")
+ print("#include <assert.h>")
+ print("#endif")
+ print()
+
+ print("#ifdef __cplusplus")
+ print("#define __struct_cast(X)")
+ print("#else")
+ print("#define __struct_cast(X) (struct X)")
+ print("#endif")
+ print()
+
+ func(p)
+
+ print("\n#endif /* %s */" % guard)
+
+
+def dump_c_defines(args):
+ guard = str.replace(os.path.basename(args.xml), '.', '_').upper()
+ dump_c(args, guard, lambda p: p.dump())
+
+
+def dump_c_pack_structs(args):
+ guard = str.replace(os.path.basename(args.xml), '.', '_').upper() + '_STRUCTS'
+ dump_c(args, guard, lambda p: p.dump_structs())
+
+
+def dump_py_defines(args):
+ p = Parser()
+
+ try:
+ p.parse(args.rnn, args.xml)
+ except Error as e:
+ print(e, file=sys.stderr)
+ exit(1)
+
+ file_name = os.path.splitext(os.path.basename(args.xml))[0]
+
+ print("from enum import IntEnum")
+ print("class %sRegs(IntEnum):" % file_name.upper())
+
+ os.path.basename(args.xml)
+
+ p.dump_regs_py()
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--rnn', type=str, required=True)
+ parser.add_argument('--xml', type=str, required=True)
+ parser.add_argument('--validate', action=argparse.BooleanOptionalAction)
+
+ subparsers = parser.add_subparsers()
+ subparsers.required = True
+
+ parser_c_defines = subparsers.add_parser('c-defines')
+ parser_c_defines.set_defaults(func=dump_c_defines)
+
+ parser_c_pack_structs = subparsers.add_parser('c-pack-structs')
+ parser_c_pack_structs.set_defaults(func=dump_c_pack_structs)
+
+ parser_py_defines = subparsers.add_parser('py-defines')
+ parser_py_defines.set_defaults(func=dump_py_defines)
+
+ args = parser.parse_args()
+ args.func(args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/drivers/gpu/drm/msm/registers/rules-fd.xsd b/drivers/gpu/drm/msm/registers/rules-fd.xsd
new file mode 100644
index 000000000000..2eedb099a4eb
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/rules-fd.xsd
@@ -0,0 +1,404 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<schema xmlns="http://www.w3.org/2001/XMLSchema"
+ targetNamespace="http://nouveau.freedesktop.org/"
+ xmlns:rng="http://nouveau.freedesktop.org/"
+ elementFormDefault="qualified">
+
+ <annotation>
+ <documentation>
+ An updated version of the old rules.xml file from the
+ RivaTV project. Specifications by Pekka Paalanen,
+ preliminary attempt by KoalaBR,
+ first working version by Jakob Bornecrantz.
+ For specifications, see the file rules-ng-format.txt
+ in Nouveau CVS module 'rules-ng'.
+ </documentation>
+ <documentation>Version 0.1</documentation>
+ </annotation>
+
+
+ <!-- Elements -->
+
+ <element name="database" type="rng:databaseType" />
+ <element name="import" type="rng:importType" />
+ <element name="copyright" type="rng:copyrightType" />
+ <element name="domain" type="rng:domainType" />
+ <element name="array" type="rng:arrayType" />
+ <element name="stripe" type="rng:stripeType" />
+ <element name="reg64" type="rng:registerType" />
+ <element name="reg32" type="rng:registerType" />
+ <element name="bitset" type="rng:bitsetType" />
+ <element name="bitfield" type="rng:bitfieldType" />
+ <element name="enum" type="rng:enumType" />
+ <element name="value" type="rng:valueType" />
+
+ <!-- Copyright elements -->
+ <element name="author" type="rng:authorType" />
+ <element name="nick" type="rng:nickType" />
+ <element name="license" type="rng:docType" />
+
+ <!-- Documentation elements -->
+
+ <!-- FIXME: allowed only one per parent element -->
+ <element name="brief" type="rng:briefType" />
+
+ <element name="doc" type="rng:docType" />
+ <element name="b" type="rng:textformatType" />
+ <element name="i" type="rng:textformatType" />
+ <element name="u" type="rng:textformatType" />
+ <element name="code" type="rng:textcodeType" />
+ <element name="ul" type="rng:listType" />
+ <element name="ol" type="rng:listType" />
+ <element name="li" type="rng:listitemType" />
+
+ <!-- Copyright element types -->
+
+ <complexType name="authorType" mixed="true">
+ <annotation>
+ <documentation>
+ register database author
+ </documentation>
+ </annotation>
+ <choice minOccurs="0" maxOccurs="unbounded">
+ <element ref="rng:nick" />
+ </choice>
+ <attribute name="name" type="string" use="required" />
+ <attribute name="email" type="string" use="required" />
+ </complexType>
+
+ <complexType name="nickType">
+ <annotation>
+ <documentation>nickType</documentation>
+ </annotation>
+ <attribute name="name" type="string" use="required" />
+ </complexType>
+
+ <!-- Database element types -->
+
+ <complexType name="databaseType">
+ <annotation>
+ <documentation>databaseType</documentation>
+ </annotation>
+ <choice minOccurs="0" maxOccurs="unbounded">
+ <group ref="rng:docGroup" />
+ <group ref="rng:topGroup" />
+ </choice>
+ </complexType>
+
+ <complexType name="importType">
+ <annotation>
+ <documentation>importType</documentation>
+ </annotation>
+ <attribute name="file" type="string" use="required" />
+ </complexType>
+
+ <complexType name="copyrightType">
+ <annotation>
+ <documentation>copyrightType</documentation>
+ </annotation>
+ <choice minOccurs="0" maxOccurs="unbounded">
+ <group ref="rng:docGroup" />
+ <group ref="rng:topGroup" />
+ <element ref="rng:author" />
+ <element ref="rng:license" />
+ </choice>
+ <attribute name="year" type="nonNegativeInteger" use="optional" />
+ </complexType>
+
+ <complexType name="domainType">
+ <annotation>
+ <documentation>domainType</documentation>
+ </annotation>
+ <choice minOccurs="0" maxOccurs="unbounded">
+ <group ref="rng:docGroup" />
+ <group ref="rng:topGroup" />
+ <group ref="rng:regarrayGroup" />
+ </choice>
+ <attribute name="name" type="NMTOKEN" use="required" />
+ <attribute name="prefix" type="NMTOKENS" use="optional" />
+ <attribute name="width" type="rng:DomainWidth" use="optional" />
+ <attribute name="varset" type="NMTOKEN" use="optional" />
+ <attribute name="variants" type="string" use="optional" />
+ </complexType>
+
+ <complexType name="arrayType">
+ <annotation>
+ <documentation>arrayType</documentation>
+ </annotation>
+ <choice minOccurs="0" maxOccurs="unbounded">
+ <group ref="rng:docGroup" />
+ <group ref="rng:topGroup" />
+ <group ref="rng:regarrayGroup" />
+ </choice>
+ <attribute name="name" type="NMTOKEN" use="optional" />
+ <attribute name="offset" type="rng:HexOrNumber" use="optional" />
+ <attribute name="offsets" type="string" use="optional"/>
+ <attribute name="doffsets" type="string" use="optional"/>
+ <attribute name="index" type="NMTOKENS" use="optional"/>
+ <attribute name="stride" type="rng:HexOrNumber" use="required" />
+ <attribute name="length" type="rng:HexOrNumber" use="required" />
+ <attribute name="varset" type="NMTOKEN" use="optional" />
+ <attribute name="variants" type="string" use="optional" />
+ <attribute name="usage" type="string" use="optional" />
+ </complexType>
+
+ <complexType name="stripeType">
+ <annotation>
+ <documentation>stripeType</documentation>
+ </annotation>
+ <choice minOccurs="0" maxOccurs="unbounded">
+ <group ref="rng:docGroup" />
+ <group ref="rng:topGroup" />
+ <group ref="rng:regarrayGroup" minOccurs="0" />
+ </choice>
+ <attribute name="varset" type="NMTOKEN" use="optional" />
+ <attribute name="variants" type="string" use="optional" />
+ <attribute name="prefix" type="NMTOKENS" use="optional" />
+ </complexType>
+
+ <complexType name="registerType">
+ <annotation>
+ <documentation>
+ registerType used by reg32, reg64
+ </documentation>
+ </annotation>
+ <choice minOccurs="0" maxOccurs="unbounded">
+ <group ref="rng:docGroup" />
+ <group ref="rng:topGroup" />
+ <element ref="rng:value" />
+ <element ref="rng:bitfield" />
+ </choice>
+ <attribute name="name" type="NMTOKEN" use="required" />
+ <attribute name="offset" type="rng:HexOrNumber" use="required" />
+ <attribute name="type" type="NMTOKENS" use="optional" />
+ <attribute name="shr" type="nonNegativeInteger" use="optional" />
+ <attribute name="varset" type="NMTOKEN" use="optional" />
+ <attribute name="variants" type="string" use="optional" />
+ <attribute name="stride" type="rng:HexOrNumber" use="optional" />
+ <attribute name="length" type="rng:HexOrNumber" use="optional" />
+ <attribute name="high" type="nonNegativeInteger" use="optional" />
+ <attribute name="low" type="nonNegativeInteger" use="optional" />
+ <attribute name="pos" type="nonNegativeInteger" use="optional" />
+ <attribute name="align" type="nonNegativeInteger" use="optional" />
+ <attribute name="radix" type="nonNegativeInteger" use="optional" />
+ <attribute name="usage" type="string" use="optional" />
+ </complexType>
+
+ <complexType name="bitsetType">
+ <annotation>
+ <documentation>bitsetType</documentation>
+ </annotation>
+ <choice maxOccurs="unbounded">
+ <element ref="rng:bitfield" />
+ <group ref="rng:docGroup" />
+ <group ref="rng:topGroup" />
+ </choice>
+ <attribute name="name" type="NMTOKEN" use="required" />
+ <attribute name="inline" type="rng:Boolean" use="optional" />
+ <attribute name="varset" type="NMTOKEN" use="optional" />
+ </complexType>
+
+ <complexType name="bitfieldType">
+ <annotation>
+ <documentation>bitfieldType</documentation>
+ </annotation>
+ <choice minOccurs="0" maxOccurs="unbounded">
+ <element ref="rng:value" maxOccurs="unbounded" />
+ <group ref="rng:docGroup" />
+ <group ref="rng:topGroup" />
+ </choice>
+ <attribute name="name" type="NMTOKEN" use="required" />
+ <attribute name="high" type="nonNegativeInteger" use="optional" />
+ <attribute name="low" type="nonNegativeInteger" use="optional" />
+ <attribute name="pos" type="nonNegativeInteger" use="optional" />
+ <attribute name="radix" type="nonNegativeInteger" use="optional" />
+ <attribute name="type" type="NMTOKENS" use="optional" />
+ <attribute name="varset" type="NMTOKEN" use="optional" />
+ <attribute name="variants" type="string" use="optional" />
+ <attribute name="addvariant" type="rng:Boolean" use="optional" />
+ <attribute name="shr" type="nonNegativeInteger" use="optional" />
+ </complexType>
+
+ <complexType name="enumType">
+ <annotation>
+ <documentation>enumType</documentation>
+ </annotation>
+ <choice maxOccurs="unbounded">
+ <element ref="rng:value" />
+ <group ref="rng:docGroup" />
+ <group ref="rng:topGroup" />
+ </choice>
+ <attribute name="name" type="NMTOKEN" use="required" />
+ <attribute name="bare" type="rng:Boolean" use="optional" />
+ <attribute name="prefix" type="NMTOKENS" use="optional" />
+ <attribute name="varset" type="NMTOKEN" use="optional" />
+ </complexType>
+
+ <complexType name="valueType">
+ <annotation>
+ <documentation>valueType</documentation>
+ </annotation>
+ <choice minOccurs="0" maxOccurs="unbounded">
+ <group ref="rng:docGroup" />
+ <group ref="rng:topGroup" />
+ </choice>
+ <attribute name="name" type="NMTOKEN" use="required" />
+ <attribute name="value" type="string" use="optional" />
+ <attribute name="varset" type="NMTOKEN" use="optional" />
+ <attribute name="variants" type="string" use="optional" />
+ </complexType>
+
+ <!-- Documentation element types -->
+
+ <complexType name="briefType">
+ <annotation>
+ <documentation>
+ brief documentation, no markup
+ </documentation>
+ </annotation>
+ <simpleContent>
+ <extension base="string" />
+ </simpleContent>
+ </complexType>
+
+ <complexType name="docType" mixed="true">
+ <annotation>
+ <documentation>
+ root element of documentation sub-tree
+ </documentation>
+ </annotation>
+ <choice minOccurs="0" maxOccurs="unbounded">
+ <group ref="rng:textformatGroup" />
+ <group ref="rng:listGroup" />
+ <element ref="rng:code" />
+ </choice>
+ </complexType>
+
+ <complexType name="textformatType" mixed="true">
+ <annotation>
+ <documentation>
+ for bold, underline, italics
+ </documentation>
+ </annotation>
+ <choice minOccurs="0" maxOccurs="unbounded">
+ <group ref="rng:textformatGroup" />
+ </choice>
+ </complexType>
+
+ <complexType name="textcodeType">
+ <simpleContent>
+ <extension base="string">
+ <attribute name="title" type="string" />
+ </extension>
+ </simpleContent>
+ </complexType>
+
+ <complexType name="listType">
+ <annotation>
+ <documentation>
+ definition of a list, ordered or unordered
+ </documentation>
+ </annotation>
+ <choice minOccurs="0" maxOccurs="unbounded">
+ <element ref="rng:li" />
+ </choice>
+ </complexType>
+
+ <complexType name="listitemType" mixed="true">
+ <annotation>
+ <documentation>
+ items of a list
+ </documentation>
+ </annotation>
+ <choice minOccurs="0" maxOccurs="unbounded">
+ <group ref="rng:textformatGroup" />
+ <group ref="rng:listGroup" />
+ <element ref="rng:code" />
+ </choice>
+ </complexType>
+
+
+
+ <!-- Attribute value types -->
+
+ <simpleType name="Hexadecimal">
+ <restriction base="string">
+ <pattern value="0x[0-9a-f]+" />
+ <pattern value="0x[0-9A-F]+" />
+ <pattern value="[0-9]" />
+ </restriction>
+ </simpleType>
+
+ <simpleType name="HexOrNumber">
+ <annotation>
+ <documentation>HexOrNumber</documentation>
+ </annotation>
+ <union memberTypes="rng:Hexadecimal nonNegativeInteger" />
+ </simpleType>
+
+ <simpleType name="Boolean">
+ <restriction base="string">
+ <enumeration value="true" />
+ <enumeration value="1" />
+ <enumeration value="yes" />
+ <enumeration value="false" />
+ <enumeration value="0" />
+ <enumeration value="no" />
+ </restriction>
+ </simpleType>
+
+ <simpleType name="DomainWidth">
+ <annotation>
+ <documentation>DomainWidth</documentation>
+ </annotation>
+ <restriction base="string">
+ <enumeration value="32" />
+ </restriction>
+ </simpleType>
+
+
+
+ <!-- Element groups -->
+
+ <group name="topGroup">
+ <choice>
+ <element ref="rng:copyright" />
+ <element ref="rng:domain" />
+ <element ref="rng:enum" />
+ <element ref="rng:bitset" />
+ <element ref="rng:import" />
+ </choice>
+ </group>
+
+ <group name="regarrayGroup">
+ <choice>
+ <element ref="rng:reg64" />
+ <element ref="rng:reg32" />
+ <element ref="rng:array" />
+ <element ref="rng:stripe" />
+ </choice>
+ </group>
+
+ <group name="docGroup">
+ <choice>
+ <element ref="rng:brief" />
+ <element ref="rng:doc" />
+ </choice>
+ </group>
+
+ <group name="textformatGroup">
+ <choice>
+ <element ref="rng:b" />
+ <element ref="rng:i" />
+ <element ref="rng:u" />
+ </choice>
+ </group>
+
+ <group name="listGroup">
+ <choice>
+ <element ref="rng:ul" />
+ <element ref="rng:ol" />
+ </choice>
+ </group>
+
+</schema>
diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c
index ea10bf81582e..0f895b8a99d6 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_drv.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c
@@ -343,6 +343,9 @@ static int __maybe_unused lcdif_suspend(struct device *dev)
if (ret)
return ret;
+ if (pm_runtime_suspended(dev))
+ return 0;
+
return lcdif_rpm_suspend(dev);
}
@@ -350,7 +353,8 @@ static int __maybe_unused lcdif_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- lcdif_rpm_resume(dev);
+ if (!pm_runtime_suspended(dev))
+ lcdif_rpm_resume(dev);
return drm_mode_config_helper_resume(drm);
}
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index cf6b3a80c0c8..c32c01827c1d 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -1,10 +1,8 @@
-NOUVEAU_PATH ?= $(srctree)
-
# SPDX-License-Identifier: MIT
-ccflags-y += -I $(NOUVEAU_PATH)/$(src)/include
-ccflags-y += -I $(NOUVEAU_PATH)/$(src)/include/nvkm
-ccflags-y += -I $(NOUVEAU_PATH)/$(src)/nvkm
-ccflags-y += -I $(NOUVEAU_PATH)/$(src)
+ccflags-y += -I $(src)/include
+ccflags-y += -I $(src)/include/nvkm
+ccflags-y += -I $(src)/nvkm
+ccflags-y += -I $(src)
# NVKM - HW resource manager
#- code also used by various userspace tools/tests
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.c b/drivers/gpu/drm/nouveau/dispnv50/crc.c
index 9c942fbd836d..5936b6b3b15d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crc.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: MIT
+#include <linux/debugfs.h>
#include <linux/string.h>
+
#include <drm/drm_crtc.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index 6f5d376d8fcc..a11d16a16c3b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -15,7 +15,9 @@ struct nvkm_gsp_mem {
};
struct nvkm_gsp_radix3 {
- struct nvkm_gsp_mem mem[3];
+ struct nvkm_gsp_mem lvl0;
+ struct nvkm_gsp_mem lvl1;
+ struct sg_table lvl2;
};
int nvkm_gsp_sg(struct nvkm_device *, u64 size, struct sg_table *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 80f74ee0fc78..f465fe93b1f7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -312,11 +312,21 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
if (init->fb_ctxdma_handle == ~0) {
switch (init->tt_ctxdma_handle) {
- case 0x01: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR ; break;
- case 0x02: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPDEC; break;
- case 0x04: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPPP ; break;
- case 0x08: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSVLD ; break;
- case 0x30: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_CE ; break;
+ case NOUVEAU_FIFO_ENGINE_GR:
+ engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR;
+ break;
+ case NOUVEAU_FIFO_ENGINE_VP:
+ engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPDEC;
+ break;
+ case NOUVEAU_FIFO_ENGINE_PPP:
+ engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPPP;
+ break;
+ case NOUVEAU_FIFO_ENGINE_BSP:
+ engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSVLD;
+ break;
+ case NOUVEAU_FIFO_ENGINE_CE:
+ engine = NV_DEVICE_HOST_RUNLIST_ENGINES_CE;
+ break;
default:
return nouveau_abi16_put(abi16, -ENOSYS);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 11c8c4a80079..661b901d8ecc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -50,18 +50,6 @@ struct drm_nouveau_grobj_alloc {
int class;
};
-struct drm_nouveau_notifierobj_alloc {
- uint32_t channel;
- uint32_t handle;
- uint32_t size;
- uint32_t offset;
-};
-
-struct drm_nouveau_gpuobj_free {
- int channel;
- uint32_t handle;
-};
-
struct drm_nouveau_setparam {
uint64_t param;
uint64_t value;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 479effcf607e..79cfab53f80e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -23,6 +23,7 @@
*/
#include "nouveau_drv.h"
+#include "nouveau_bios.h"
#include "nouveau_reg.h"
#include "dispnv04/hw.h"
#include "nouveau_encoder.h"
@@ -1677,7 +1678,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
*/
if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
if (*conn == 0xf2005014 && *conf == 0xffffffff) {
- fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1);
+ fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, DCB_OUTPUT_B);
return false;
}
}
@@ -1763,26 +1764,26 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
#ifdef __powerpc__
/* Apple iMac G4 NV17 */
if (of_machine_is_compatible("PowerMac4,5")) {
- fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1);
- fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2);
+ fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, DCB_OUTPUT_B);
+ fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, DCB_OUTPUT_C);
return;
}
#endif
/* Make up some sane defaults */
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
- bios->legacy.i2c_indices.crt, 1, 1);
+ bios->legacy.i2c_indices.crt, 1, DCB_OUTPUT_B);
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
bios->legacy.i2c_indices.tv,
- all_heads, 0);
+ all_heads, DCB_OUTPUT_A);
else if (bios->tmds.output0_script_ptr ||
bios->tmds.output1_script_ptr)
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
bios->legacy.i2c_indices.panel,
- all_heads, 1);
+ all_heads, DCB_OUTPUT_B);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index db8cbf615112..1e2d28fd10dc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -467,17 +467,14 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
set_placement_range(nvbo, domain);
}
-int
-nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
+int nouveau_bo_pin_locked(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
bool force = false, evict = false;
- int ret;
+ int ret = 0;
- ret = ttm_bo_reserve(bo, false, false, NULL);
- if (ret)
- return ret;
+ dma_resv_assert_held(bo->base.resv);
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) {
@@ -540,20 +537,15 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
out:
if (force && ret)
nvbo->contig = false;
- ttm_bo_unreserve(bo);
return ret;
}
-int
-nouveau_bo_unpin(struct nouveau_bo *nvbo)
+void nouveau_bo_unpin_locked(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
- int ret;
- ret = ttm_bo_reserve(bo, false, false, NULL);
- if (ret)
- return ret;
+ dma_resv_assert_held(bo->base.resv);
ttm_bo_unpin(&nvbo->bo);
if (!nvbo->bo.pin_count) {
@@ -568,8 +560,33 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
break;
}
}
+}
+
+int nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
+{
+ struct ttm_buffer_object *bo = &nvbo->bo;
+ int ret;
+ ret = ttm_bo_reserve(bo, false, false, NULL);
+ if (ret)
+ return ret;
+ ret = nouveau_bo_pin_locked(nvbo, domain, contig);
+ ttm_bo_unreserve(bo);
+
+ return ret;
+}
+
+int nouveau_bo_unpin(struct nouveau_bo *nvbo)
+{
+ struct ttm_buffer_object *bo = &nvbo->bo;
+ int ret;
+
+ ret = ttm_bo_reserve(bo, false, false, NULL);
+ if (ret)
+ return ret;
+ nouveau_bo_unpin_locked(nvbo);
ttm_bo_unreserve(bo);
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index e9dfab6a8156..4e891752c255 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -85,6 +85,8 @@ int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 domain,
u32 tile_mode, u32 tile_flags, struct sg_table *sg,
struct dma_resv *robj,
struct nouveau_bo **);
+int nouveau_bo_pin_locked(struct nouveau_bo *nvbo, uint32_t domain, bool contig);
+void nouveau_bo_unpin_locked(struct nouveau_bo *nvbo);
int nouveau_bo_pin(struct nouveau_bo *, u32 flags, bool contig);
int nouveau_bo_unpin(struct nouveau_bo *);
int nouveau_bo_map(struct nouveau_bo *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index f28f9a857458..aed5d5b51b43 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -83,7 +83,7 @@ static bool
nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime)
{
- struct drm_vblank_crtc *vblank = &crtc->dev->vblank[drm_crtc_index(crtc)];
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
struct nvif_head *head = &nouveau_crtc(crtc)->head;
struct nvif_head_scanoutpos_v0 args;
int retry = 20;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 12feecf71e75..6fb65b01d778 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -378,9 +378,9 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
dma_addr_t *dma_addrs;
struct nouveau_fence *fence;
- src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
- dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
- dma_addrs = kcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL);
+ src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
+ dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
+ dma_addrs = kvcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL | __GFP_NOFAIL);
migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
npages);
@@ -406,11 +406,11 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
migrate_device_pages(src_pfns, dst_pfns, npages);
nouveau_dmem_fence_done(&fence);
migrate_device_finalize(src_pfns, dst_pfns, npages);
- kfree(src_pfns);
- kfree(dst_pfns);
+ kvfree(src_pfns);
+ kvfree(dst_pfns);
for (i = 0; i < npages; i++)
dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
- kfree(dma_addrs);
+ kvfree(dma_addrs);
}
void
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 7de7707ec6a8..bcda0105160f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -181,7 +181,7 @@ nouveau_dp_probe_dpcd(struct nouveau_connector *nv_connector,
if (nouveau_mst) {
mstm = outp->dp.mstm;
if (mstm)
- mstm->can_mst = drm_dp_read_mst_cap(aux, dpcd);
+ mstm->can_mst = drm_dp_read_mst_cap(aux, dpcd) == DRM_DP_MST;
}
if (nouveau_dp_has_sink_count(connector, outp)) {
@@ -225,12 +225,18 @@ nouveau_dp_detect(struct nouveau_connector *nv_connector,
u8 *dpcd = nv_encoder->dp.dpcd;
int ret = NOUVEAU_DP_NONE, hpd;
- /* If we've already read the DPCD on an eDP device, we don't need to
- * reread it as it won't change
+ /* eDP ports don't support hotplugging - so there's no point in probing eDP ports unless we
+ * haven't probed them once before.
*/
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
- dpcd[DP_DPCD_REV] != 0)
- return NOUVEAU_DP_SST;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (connector->status == connector_status_connected)
+ return NOUVEAU_DP_SST;
+ else if (connector->status == connector_status_disconnected)
+ return NOUVEAU_DP_NONE;
+ }
+
+ // Ensure that the aux bus is enabled for probing
+ drm_dp_dpcd_set_powered(&nv_connector->aux, true);
mutex_lock(&nv_encoder->dp.hpd_irq_lock);
if (mstm) {
@@ -293,6 +299,13 @@ out:
if (mstm && !mstm->suspended && ret != NOUVEAU_DP_MST)
nv50_mstm_remove(mstm);
+ /* GSP doesn't like when we try to do aux transactions on a port it considers disconnected,
+ * and since we don't really have a usecase for that anyway - just disable the aux bus here
+ * if we've decided the connector is disconnected
+ */
+ if (ret == NOUVEAU_DP_NONE)
+ drm_dp_dpcd_set_powered(&nv_connector->aux, false);
+
mutex_unlock(&nv_encoder->dp.hpd_irq_lock);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 1b2ff0c40fc1..b58ab595faf8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -89,18 +89,18 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
int ret;
/* pin buffer into GTT */
- ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_GART, false);
+ ret = nouveau_bo_pin_locked(nvbo, NOUVEAU_GEM_DOMAIN_GART, false);
if (ret)
- return -EINVAL;
+ ret = -EINVAL;
- return 0;
+ return ret;
}
void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
{
struct nouveau_bo *nvbo = nouveau_gem_object(obj);
- nouveau_bo_unpin(nvbo);
+ nouveau_bo_unpin_locked(nvbo);
}
struct dma_buf *nouveau_gem_prime_export(struct drm_gem_object *gobj,
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 0a0a11dc9ec0..ee02cd833c5e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -812,15 +812,15 @@ op_remap(struct drm_gpuva_op_remap *r,
struct drm_gpuva_op_unmap *u = r->unmap;
struct nouveau_uvma *uvma = uvma_from_va(u->va);
u64 addr = uvma->va.va.addr;
- u64 range = uvma->va.va.range;
+ u64 end = uvma->va.va.addr + uvma->va.va.range;
if (r->prev)
addr = r->prev->va.addr + r->prev->va.range;
if (r->next)
- range = r->next->va.addr - addr;
+ end = r->next->va.addr;
- op_unmap_range(u, addr, range);
+ op_unmap_range(u, addr, end - addr);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
index 6a0a4d3b8902..027867c2a8c5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
@@ -1080,7 +1080,7 @@ r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize)
ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
if (ret) {
nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
- return PTR_ERR(ctrl);
+ return ret;
}
memcpy(data, ctrl->data, size);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 986e8d547c94..060c74a80eb1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -420,7 +420,7 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
return ret;
} else {
ret = nvkm_memory_map(gr->attrib_cb, 0, chan->vmm, chan->attrib_cb,
- &args, sizeof(args));;
+ &args, sizeof(args));
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
index 4bf486b57101..cb05f7f48a98 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
@@ -66,11 +66,16 @@ of_init(struct nvkm_bios *bios, const char *name)
return ERR_PTR(-EINVAL);
}
+static void of_fini(void *p)
+{
+ kfree(p);
+}
+
const struct nvbios_source
nvbios_of = {
.name = "OpenFirmware",
.init = of_init,
- .fini = (void(*)(void *))kfree,
+ .fini = of_fini,
.read = of_read,
.size = of_size,
.rw = false,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
index 7bcbc4895ec2..271bfa038f5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
@@ -25,6 +25,7 @@
#include <subdev/bios.h>
#include <subdev/bios/init.h>
+#include <subdev/gsp.h>
void
gm107_devinit_disable(struct nvkm_devinit *init)
@@ -33,10 +34,13 @@ gm107_devinit_disable(struct nvkm_devinit *init)
u32 r021c00 = nvkm_rd32(device, 0x021c00);
u32 r021c04 = nvkm_rd32(device, 0x021c04);
- if (r021c00 & 0x00000001)
- nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
- if (r021c00 & 0x00000004)
- nvkm_subdev_disable(device, NVKM_ENGINE_CE, 2);
+ /* gsp only wants to enable/disable display */
+ if (!nvkm_gsp_rm(device->gsp)) {
+ if (r021c00 & 0x00000001)
+ nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
+ if (r021c00 & 0x00000004)
+ nvkm_subdev_disable(device, NVKM_ENGINE_CE, 2);
+ }
if (r021c04 & 0x00000001)
nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
index 11b4c9c274a1..666eb93b1742 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
@@ -41,6 +41,7 @@ r535_devinit_new(const struct nvkm_devinit_func *hw,
rm->dtor = r535_devinit_dtor;
rm->post = hw->post;
+ rm->disable = hw->disable;
ret = nv50_devinit_new_(rm, device, type, inst, pdevinit);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index 9994cbd6f1c4..abe41f7a3404 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -1112,7 +1112,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
rpc->numEntries = NV_GSP_REG_NUM_ENTRIES;
str_offset = offsetof(typeof(*rpc), entries[NV_GSP_REG_NUM_ENTRIES]);
- strings = (char *)&rpc->entries[NV_GSP_REG_NUM_ENTRIES];
+ strings = (char *)rpc + str_offset;
for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) {
int name_len = strlen(r535_registry_entries[i].name) + 1;
@@ -1624,7 +1624,7 @@ r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp)
meta->magic = GSP_FW_WPR_META_MAGIC;
meta->revision = GSP_FW_WPR_META_REVISION;
- meta->sysmemAddrOfRadix3Elf = gsp->radix3.mem[0].addr;
+ meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr;
meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size;
meta->sysmemAddrOfBootloader = gsp->boot.fw.addr;
@@ -1919,8 +1919,9 @@ nvkm_gsp_sg(struct nvkm_device *device, u64 size, struct sg_table *sgt)
static void
nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
{
- for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--)
- nvkm_gsp_mem_dtor(gsp, &rx3->mem[i]);
+ nvkm_gsp_sg_free(gsp->subdev.device, &rx3->lvl2);
+ nvkm_gsp_mem_dtor(gsp, &rx3->lvl1);
+ nvkm_gsp_mem_dtor(gsp, &rx3->lvl0);
}
/**
@@ -1960,36 +1961,60 @@ static int
nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size,
struct nvkm_gsp_radix3 *rx3)
{
- u64 addr;
+ struct sg_dma_page_iter sg_dma_iter;
+ struct scatterlist *sg;
+ size_t bufsize;
+ u64 *pte;
+ int ret, i, page_idx = 0;
- for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) {
- u64 *ptes;
- size_t bufsize;
- int ret, idx;
+ ret = nvkm_gsp_mem_ctor(gsp, GSP_PAGE_SIZE, &rx3->lvl0);
+ if (ret)
+ return ret;
- bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE);
- ret = nvkm_gsp_mem_ctor(gsp, bufsize, &rx3->mem[i]);
- if (ret)
- return ret;
+ ret = nvkm_gsp_mem_ctor(gsp, GSP_PAGE_SIZE, &rx3->lvl1);
+ if (ret)
+ goto lvl1_fail;
- ptes = rx3->mem[i].data;
- if (i == 2) {
- struct scatterlist *sgl;
+ // Allocate level 2
+ bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE);
+ ret = nvkm_gsp_sg(gsp->subdev.device, bufsize, &rx3->lvl2);
+ if (ret)
+ goto lvl2_fail;
- for_each_sgtable_dma_sg(sgt, sgl, idx) {
- for (int j = 0; j < sg_dma_len(sgl) / GSP_PAGE_SIZE; j++)
- *ptes++ = sg_dma_address(sgl) + (GSP_PAGE_SIZE * j);
- }
- } else {
- for (int j = 0; j < size / GSP_PAGE_SIZE; j++)
- *ptes++ = addr + GSP_PAGE_SIZE * j;
+ // Write the bus address of level 1 to level 0
+ pte = rx3->lvl0.data;
+ *pte = rx3->lvl1.addr;
+
+ // Write the bus address of each page in level 2 to level 1
+ pte = rx3->lvl1.data;
+ for_each_sgtable_dma_page(&rx3->lvl2, &sg_dma_iter, 0)
+ *pte++ = sg_page_iter_dma_address(&sg_dma_iter);
+
+ // Finally, write the bus address of each page in sgt to level 2
+ for_each_sgtable_sg(&rx3->lvl2, sg, i) {
+ void *sgl_end;
+
+ pte = sg_virt(sg);
+ sgl_end = (void *)pte + sg->length;
+
+ for_each_sgtable_dma_page(sgt, &sg_dma_iter, page_idx) {
+ *pte++ = sg_page_iter_dma_address(&sg_dma_iter);
+ page_idx++;
+
+ // Go to the next scatterlist for level 2 if we've reached the end
+ if ((void *)pte >= sgl_end)
+ break;
}
+ }
- size = rx3->mem[i].size;
- addr = rx3->mem[i].addr;
+ if (ret) {
+lvl2_fail:
+ nvkm_gsp_mem_dtor(gsp, &rx3->lvl1);
+lvl1_fail:
+ nvkm_gsp_mem_dtor(gsp, &rx3->lvl0);
}
- return 0;
+ return ret;
}
int
@@ -2021,7 +2046,7 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
sr = gsp->sr.meta.data;
sr->magic = GSP_FW_SR_META_MAGIC;
sr->revision = GSP_FW_SR_META_REVISION;
- sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.mem[0].addr;
+ sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.lvl0.addr;
sr->sizeOfSuspendResumeData = len;
mbox0 = lower_32_bits(gsp->sr.meta.addr);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index a7f3fc342d87..dd5b5a17ece0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -222,8 +222,11 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
void __iomem *map = NULL;
/* Already mapped? */
- if (refcount_inc_not_zero(&iobj->maps))
+ if (refcount_inc_not_zero(&iobj->maps)) {
+ /* read barrier match the wmb on refcount set */
+ smp_rmb();
return iobj->map;
+ }
/* Take the lock, and re-check that another thread hasn't
* already mapped the object in the meantime.
@@ -250,6 +253,8 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
iobj->base.memory.ptrs = &nv50_instobj_fast;
else
iobj->base.memory.ptrs = &nv50_instobj_slow;
+ /* barrier to ensure the ptrs are written before refcount is set */
+ smp_wmb();
refcount_set(&iobj->maps, 1);
}
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index b715301ec79f..6c49270cb290 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -4,7 +4,7 @@ config DRM_OMAP
depends on DRM && OF
depends on ARCH_OMAP2PLUS
select DRM_KMS_HELPER
- select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION
+ select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
select VIDEOMODE_HELPERS
select HDMI
default n
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 9753c1e1f994..1aca3060333e 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -1212,7 +1212,6 @@ struct platform_driver omap_dmm_driver = {
.probe = omap_dmm_probe,
.remove_new = omap_dmm_remove,
.driver = {
- .owner = THIS_MODULE,
.name = DMM_DRIVER_NAME,
.of_match_table = of_match_ptr(dmm_of_match),
.pm = &omap_dmm_pm_ops,
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 1d414b33fee3..449d521c78fe 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -5,6 +5,7 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/seq_file.h>
#include <drm/drm_blend.h>
#include <drm/drm_modeset_helper.h>
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 6b08b137af1a..523be34682ca 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -51,6 +51,10 @@ static void pan_worker(struct work_struct *work)
omap_gem_roll(bo, fbi->var.yoffset * npages);
}
+FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(omap_fbdev,
+ drm_fb_helper_damage_range,
+ drm_fb_helper_damage_area)
+
static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
struct fb_info *fbi)
{
@@ -78,11 +82,9 @@ fallback:
static int omap_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
- struct drm_fb_helper *helper = info->par;
- struct drm_framebuffer *fb = helper->fb;
- struct drm_gem_object *bo = drm_gem_fb_get_obj(fb, 0);
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
- return drm_gem_mmap_obj(bo, omap_gem_mmap_size(bo), vma);
+ return fb_deferred_io_mmap(info, vma);
}
static void omap_fbdev_fb_destroy(struct fb_info *info)
@@ -94,6 +96,7 @@ static void omap_fbdev_fb_destroy(struct fb_info *info)
DBG();
+ fb_deferred_io_cleanup(info);
drm_fb_helper_fini(helper);
omap_gem_unpin(bo);
@@ -104,15 +107,19 @@ static void omap_fbdev_fb_destroy(struct fb_info *info)
kfree(fbdev);
}
+/*
+ * For now, we cannot use FB_DEFAULT_DEFERRED_OPS and fb_deferred_io_mmap()
+ * because we use write-combine.
+ */
static const struct fb_ops omap_fb_ops = {
.owner = THIS_MODULE,
- __FB_DEFAULT_DMAMEM_OPS_RDWR,
+ __FB_DEFAULT_DEFERRED_OPS_RDWR(omap_fbdev),
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_setcmap = drm_fb_helper_setcmap,
.fb_blank = drm_fb_helper_blank,
.fb_pan_display = omap_fbdev_pan_display,
- __FB_DEFAULT_DMAMEM_OPS_DRAW,
+ __FB_DEFAULT_DEFERRED_OPS_DRAW(omap_fbdev),
.fb_ioctl = drm_fb_helper_ioctl,
.fb_mmap = omap_fbdev_fb_mmap,
.fb_destroy = omap_fbdev_fb_destroy,
@@ -213,6 +220,15 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
fbi->fix.smem_start = dma_addr;
fbi->fix.smem_len = bo->size;
+ /* deferred I/O */
+ helper->fbdefio.delay = HZ / 20;
+ helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
+
+ fbi->fbdefio = &helper->fbdefio;
+ ret = fb_deferred_io_init(fbi);
+ if (ret)
+ goto fail;
+
/* if we have DMM, then we can use it for scrolling by just
* shuffling pages around in DMM rather than doing sw blit.
*/
@@ -238,8 +254,20 @@ fail:
return ret;
}
+static int omap_fbdev_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
+{
+ if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
+ return 0;
+
+ if (helper->fb->funcs->dirty)
+ return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
+
+ return 0;
+}
+
static const struct drm_fb_helper_funcs omap_fb_helper_funcs = {
.fb_probe = omap_fbdev_create,
+ .fb_dirty = omap_fbdev_dirty,
};
static struct drm_fb_helper *get_fb(struct fb_info *fbi)
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 3421e8389222..9ea0c64c26b5 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -9,6 +9,7 @@
#include <linux/shmem_fs.h>
#include <linux/spinlock.h>
#include <linux/pfn_t.h>
+#include <linux/vmalloc.h>
#include <drm/drm_prime.h>
#include <drm/drm_vma_manager.h>
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index d037b3b8b999..982324ef5a41 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -177,7 +177,7 @@ config DRM_PANEL_ILITEK_IL9322
config DRM_PANEL_ILITEK_ILI9341
tristate "Ilitek ILI9341 240x320 QVGA panels"
- depends on OF && SPI
+ depends on SPI
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
depends on BACKLIGHT_CLASS_DEVICE
@@ -335,6 +335,17 @@ config DRM_PANEL_LG_LG4573
Say Y here if you want to enable support for LG4573 RGB panel.
To compile this driver as a module, choose M here.
+config DRM_PANEL_LG_SW43408
+ tristate "LG SW43408 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for LG sw43408 panel.
+ The panel has a 1080x2160@60Hz resolution and uses 24 bit RGB per
+ pixel. It provides a MIPI DSI interface to the host and has a
+ built-in LED backlight.
+
config DRM_PANEL_MAGNACHIP_D53E6EA8966
tristate "Magnachip D53E6EA8966 DSI panel"
depends on OF && SPI
@@ -542,6 +553,18 @@ config DRM_PANEL_RAYDIUM_RM692E5
Say Y here if you want to enable support for Raydium RM692E5-based
display panels, such as the one found in the Fairphone 5 smartphone.
+config DRM_PANEL_RAYDIUM_RM69380
+ tristate "Raydium RM69380-based DSI panel"
+ depends on OF && GPIOLIB
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Raydium RM69380-based
+ display panels.
+
+ This panel controller can be found in the Lenovo Xiaoxin Pad Pro 2021
+ in combination with an EDO OLED panel.
+
config DRM_PANEL_RONBO_RB070D30
tristate "Ronbo Electronics RB070D30 panel"
depends on OF
@@ -564,7 +587,7 @@ config DRM_PANEL_SAMSUNG_ATNA33XC20
depends on PM
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
- select DRM_DP_AUX_BUS
+ select DRM_DISPLAY_DP_AUX_BUS
help
DRM panel driver for the Samsung ATNA33XC20 panel. This panel can't
be handled by the DRM_PANEL_SIMPLE driver because its power
@@ -586,6 +609,15 @@ config DRM_PANEL_SAMSUNG_LD9040
depends on BACKLIGHT_CLASS_DEVICE
select VIDEOMODE_HELPERS
+config DRM_PANEL_SAMSUNG_S6E3FA7
+ tristate "Samsung S6E3FA7 panel driver"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Samsung S6E3FA7
+ 1920x2220 panel.
+
config DRM_PANEL_SAMSUNG_S6D16D0
tristate "Samsung S6D16D0 DSI video mode panel"
depends on OF
@@ -796,7 +828,7 @@ config DRM_PANEL_EDP
select VIDEOMODE_HELPERS
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
- select DRM_DP_AUX_BUS
+ select DRM_DISPLAY_DP_AUX_BUS
select DRM_KMS_HELPER
help
DRM panel driver for dumb eDP panels that need at most a regulator and
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index f156d7fa0bcc..f0203f6e02f4 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o
obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o
obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
+obj-$(CONFIG_DRM_PANEL_LG_SW43408) += panel-lg-sw43408.o
obj-$(CONFIG_DRM_PANEL_MAGNACHIP_D53E6EA8966) += panel-magnachip-d53e6ea8966.o
obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
obj-$(CONFIG_DRM_PANEL_NEWVISION_NV3051D) += panel-newvision-nv3051d.o
@@ -55,6 +56,7 @@ obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM67191) += panel-raydium-rm67191.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM692E5) += panel-raydium-rm692e5.o
+obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM69380) += panel-raydium-rm69380.o
obj-$(CONFIG_DRM_PANEL_RONBO_RB070D30) += panel-ronbo-rb070d30.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20) += panel-samsung-atna33xc20.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_DB7430) += panel-samsung-db7430.o
@@ -62,6 +64,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D27A1) += panel-samsung-s6d27a1.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0) += panel-samsung-s6d7aa0.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3FA7) += panel-samsung-s6e3fa7.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index d58f90bc48fb..6db277efcbb7 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -210,15 +210,12 @@ struct panel_desc {
* struct edp_panel_entry - Maps panel ID to delay / panel name.
*/
struct edp_panel_entry {
- /** @panel_id: 32-bit ID for panel, encoded with drm_edid_encode_panel_id(). */
- u32 panel_id;
+ /** @ident: edid identity used for panel matching. */
+ const struct drm_edid_ident ident;
/** @delay: The power sequencing delays needed for this panel. */
const struct panel_delay *delay;
- /** @name: Name of this panel (for printing to logs). */
- const char *name;
-
/** @override_edid_mode: Override the mode obtained by edid. */
const struct drm_display_mode *override_edid_mode;
};
@@ -245,7 +242,7 @@ struct panel_edp {
const struct edp_panel_entry *detected_panel;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
struct drm_display_mode override_mode;
@@ -620,13 +617,16 @@ static int panel_edp_get_modes(struct drm_panel *panel,
if (p->ddc) {
pm_runtime_get_sync(panel->dev);
- if (!p->edid)
- p->edid = drm_get_edid(connector, p->ddc);
+ if (!p->drm_edid)
+ p->drm_edid = drm_edid_read_ddc(connector, p->ddc);
+
+ drm_edid_connector_update(connector, p->drm_edid);
+
/*
* If both edid and hard-coded modes exists, skip edid modes to
* avoid multiple preferred modes.
*/
- if (p->edid && !has_hard_coded_modes) {
+ if (p->drm_edid && !has_hard_coded_modes) {
if (has_override_edid_mode) {
/*
* override_edid_mode is specified. Use
@@ -635,7 +635,7 @@ static int panel_edp_get_modes(struct drm_panel *panel,
num += panel_edp_override_edid_mode(p, connector,
p->detected_panel->override_edid_mode);
} else {
- num += drm_add_edid_modes(connector, p->edid);
+ num += drm_edid_connector_add_modes(connector);
}
}
@@ -691,7 +691,7 @@ static int detected_panel_show(struct seq_file *s, void *data)
else if (!p->detected_panel)
seq_puts(s, "HARDCODED\n");
else
- seq_printf(s, "%s\n", p->detected_panel->name);
+ seq_printf(s, "%s\n", p->detected_panel->ident.name);
return 0;
}
@@ -761,11 +761,31 @@ static void panel_edp_parse_panel_timing_node(struct device *dev,
dev_err(dev, "Reject override mode: No display_timing found\n");
}
-static const struct edp_panel_entry *find_edp_panel(u32 panel_id);
+static const struct edp_panel_entry *find_edp_panel(u32 panel_id, const struct drm_edid *edid);
+
+static void panel_edp_set_conservative_timings(struct panel_edp *panel, struct panel_desc *desc)
+{
+ /*
+ * It's highly likely that the panel will work if we use very
+ * conservative timings, so let's do that.
+ *
+ * Nearly all panels have a "unprepare" delay of 500 ms though
+ * there are a few with 1000. Let's stick 2000 in just to be
+ * super conservative.
+ *
+ * An "enable" delay of 80 ms seems the most common, but we'll
+ * throw in 200 ms to be safe.
+ */
+ desc->delay.unprepare = 2000;
+ desc->delay.enable = 200;
+
+ panel->detected_panel = ERR_PTR(-EINVAL);
+}
static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel)
{
struct panel_desc *desc;
+ const struct drm_edid *base_block;
u32 panel_id;
char vend[4];
u16 product_id;
@@ -791,19 +811,26 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel)
/* Power the panel on so we can read the EDID */
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
- dev_err(dev, "Couldn't power on panel to read EDID: %d\n", ret);
+ dev_err(dev,
+ "Couldn't power on panel to ID it; using conservative timings: %d\n",
+ ret);
+ panel_edp_set_conservative_timings(panel, desc);
goto exit;
}
- panel_id = drm_edid_get_panel_id(panel->ddc);
- if (!panel_id) {
- dev_err(dev, "Couldn't identify panel via EDID\n");
- ret = -EIO;
+ base_block = drm_edid_read_base_block(panel->ddc);
+ if (base_block) {
+ panel_id = drm_edid_get_panel_id(base_block);
+ } else {
+ dev_err(dev, "Couldn't read EDID for ID; using conservative timings\n");
+ panel_edp_set_conservative_timings(panel, desc);
goto exit;
}
drm_edid_decode_panel_id(panel_id, vend, &product_id);
- panel->detected_panel = find_edp_panel(panel_id);
+ panel->detected_panel = find_edp_panel(panel_id, base_block);
+
+ drm_edid_free(base_block);
/*
* We're using non-optimized timings and want it really obvious that
@@ -814,40 +841,20 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel)
dev_warn(dev,
"Unknown panel %s %#06x, using conservative timings\n",
vend, product_id);
-
- /*
- * It's highly likely that the panel will work if we use very
- * conservative timings, so let's do that. We already know that
- * the HPD-related delays must have worked since we got this
- * far, so we really just need the "unprepare" / "enable"
- * delays. We don't need "prepare_to_enable" since that
- * overlaps the "enable" delay anyway.
- *
- * Nearly all panels have a "unprepare" delay of 500 ms though
- * there are a few with 1000. Let's stick 2000 in just to be
- * super conservative.
- *
- * An "enable" delay of 80 ms seems the most common, but we'll
- * throw in 200 ms to be safe.
- */
- desc->delay.unprepare = 2000;
- desc->delay.enable = 200;
-
- panel->detected_panel = ERR_PTR(-EINVAL);
+ panel_edp_set_conservative_timings(panel, desc);
} else {
dev_info(dev, "Detected %s %s (%#06x)\n",
- vend, panel->detected_panel->name, product_id);
+ vend, panel->detected_panel->ident.name, product_id);
/* Update the delay; everything else comes from EDID */
desc->delay = *panel->detected_panel->delay;
}
- ret = 0;
exit:
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
- return ret;
+ return 0;
}
static int panel_edp_probe(struct device *dev, const struct panel_desc *desc,
@@ -940,8 +947,14 @@ static int panel_edp_probe(struct device *dev, const struct panel_desc *desc,
err = drm_panel_dp_aux_backlight(&panel->base, panel->aux);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
+
+ /*
+ * Warn if we get an error, but don't consider it fatal. Having
+ * a panel where we can't control the backlight is better than
+ * no panel.
+ */
if (err)
- goto err_finished_pm_runtime;
+ dev_warn(dev, "failed to register dp aux backlight: %d\n", err);
}
drm_panel_add(&panel->base);
@@ -971,8 +984,8 @@ static void panel_edp_remove(struct device *dev)
if (panel->ddc && (!panel->aux || panel->ddc != &panel->aux->ddc))
put_device(&panel->ddc->dev);
- kfree(panel->edid);
- panel->edid = NULL;
+ drm_edid_free(panel->drm_edid);
+ panel->drm_edid = NULL;
}
static void panel_edp_shutdown(struct device *dev)
@@ -1005,6 +1018,19 @@ static const struct panel_desc auo_b101ean01 = {
},
};
+static const struct drm_display_mode auo_b116xa3_mode = {
+ .clock = 70589,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 40,
+ .hsync_end = 1366 + 40 + 40,
+ .htotal = 1366 + 40 + 40 + 32,
+ .vdisplay = 768,
+ .vsync_start = 768 + 10,
+ .vsync_end = 768 + 10 + 12,
+ .vtotal = 768 + 10 + 12 + 6,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
static const struct drm_display_mode auo_b116xak01_mode = {
.clock = 69300,
.hdisplay = 1366,
@@ -1865,6 +1891,13 @@ static const struct panel_delay delay_200_500_e50 = {
.enable = 50,
};
+static const struct panel_delay delay_200_500_e50_p2e200 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 50,
+ .prepare_to_enable = 200,
+};
+
static const struct panel_delay delay_200_500_e80 = {
.hpd_absent = 200,
.unprepare = 500,
@@ -1919,17 +1952,21 @@ static const struct panel_delay delay_200_500_e50_po2e200 = {
#define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \
{ \
- .name = _name, \
- .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
- product_id), \
+ .ident = { \
+ .name = _name, \
+ .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
+ product_id), \
+ }, \
.delay = _delay \
}
#define EDP_PANEL_ENTRY2(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name, _mode) \
{ \
- .name = _name, \
- .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
- product_id), \
+ .ident = { \
+ .name = _name, \
+ .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
+ product_id), \
+ }, \
.delay = _delay, \
.override_edid_mode = _mode \
}
@@ -1953,7 +1990,9 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x239b, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x255c, &delay_200_500_e50, "B116XTN02.5"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x403d, &delay_200_500_e50, "B140HAN04.0"),
- EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAN04.0"),
+ EDP_PANEL_ENTRY2('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0",
+ &auo_b116xa3_mode),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x435c, &delay_200_500_e50, "Unknown"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"),
@@ -1961,6 +2000,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x639c, &delay_200_500_e50, "B140HAK02.7"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x723c, &delay_200_500_e50, "B140XTN07.2"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0xd497, &delay_200_500_e50, "B120XAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xf390, &delay_200_500_e50, "B140XTN07.7"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0607, &delay_200_500_e200, "Unknown"),
@@ -2010,6 +2050,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b43, &delay_200_500_e200, "NV140FHM-T09"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c20, &delay_200_500_e80, "NT140FHM-N47"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cb6, &delay_200_500_e200, "NT116WHM-N44"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1130, &delay_200_500_e50, "N116BGE-EB2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1132, &delay_200_500_e80_d50, "N116BGE-EA2"),
@@ -2025,6 +2066,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1156, &delay_200_500_e80_d50, "Unknown"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1157, &delay_200_500_e80_d50, "N116BGE-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x115b, &delay_200_500_e80_d50, "N116BCN-EB1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x115e, &delay_200_500_e80_d50, "N116BCA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142e, &delay_200_500_e80_d50, "N140BGA-EA4"),
@@ -2034,7 +2076,9 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"),
- EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50, "MNC207QS1-1"),
+ EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_p2e200, "MNC207QS1-1"),
+
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1100, &delay_200_500_e80_d50, "MNB601LS1-1"),
EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d51, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5b, &delay_200_500_e200, "Unknown"),
@@ -2076,15 +2120,25 @@ static const struct edp_panel_entry edp_panels[] = {
{ /* sentinal */ }
};
-static const struct edp_panel_entry *find_edp_panel(u32 panel_id)
+static const struct edp_panel_entry *find_edp_panel(u32 panel_id, const struct drm_edid *edid)
{
const struct edp_panel_entry *panel;
if (!panel_id)
return NULL;
- for (panel = edp_panels; panel->panel_id; panel++)
- if (panel->panel_id == panel_id)
+ /*
+ * Match with identity first. This allows handling the case where
+ * vendors incorrectly reused the same panel ID for multiple panels that
+ * need different settings. If there's no match, try again with panel
+ * ID, which should be unique.
+ */
+ for (panel = edp_panels; panel->ident.panel_id; panel++)
+ if (drm_edid_match(edid, &panel->ident))
+ return panel;
+
+ for (panel = edp_panels; panel->ident.panel_id; panel++)
+ if (panel->ident.panel_id == panel_id)
return panel;
return NULL;
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
index 3574681891e8..b933380b7eb7 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
@@ -22,8 +22,9 @@
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
@@ -421,7 +422,7 @@ static int ili9341_dpi_prepare(struct drm_panel *panel)
ili9341_dpi_init(ili);
- return ret;
+ return 0;
}
static int ili9341_dpi_enable(struct drm_panel *panel)
@@ -691,7 +692,7 @@ static int ili9341_dpi_probe(struct spi_device *spi, struct gpio_desc *dc,
* Every new incarnation of this display must have a unique
* data entry for the system in this driver.
*/
- ili->conf = of_device_get_match_data(dev);
+ ili->conf = device_get_match_data(dev);
if (!ili->conf) {
dev_err(dev, "missing device configuration\n");
return -ENODEV;
@@ -714,18 +715,18 @@ static int ili9341_probe(struct spi_device *spi)
reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(reset))
- dev_err(dev, "Failed to get gpio 'reset'\n");
+ return dev_err_probe(dev, PTR_ERR(reset), "Failed to get gpio 'reset'\n");
dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
if (IS_ERR(dc))
- dev_err(dev, "Failed to get gpio 'dc'\n");
+ return dev_err_probe(dev, PTR_ERR(dc), "Failed to get gpio 'dc'\n");
if (!strcmp(id->name, "sf-tc240t-9370-t"))
return ili9341_dpi_probe(spi, dc, reset);
else if (!strcmp(id->name, "yx240qv29"))
return ili9341_dbi_probe(spi, dc, reset);
- return -1;
+ return -ENODEV;
}
static void ili9341_remove(struct spi_device *spi)
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index 2ffe5f68a890..084c37fa7348 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -455,6 +455,202 @@ static const struct ili9881c_instr k101_im2byl02_init[] = {
ILI9881C_COMMAND_INSTR(0xD3, 0x3F), /* VN0 */
};
+static const struct ili9881c_instr kd050hdfia020_init[] = {
+ ILI9881C_SWITCH_PAGE_INSTR(3),
+ ILI9881C_COMMAND_INSTR(0x01, 0x00),
+ ILI9881C_COMMAND_INSTR(0x02, 0x00),
+ ILI9881C_COMMAND_INSTR(0x03, 0x72),
+ ILI9881C_COMMAND_INSTR(0x04, 0x00),
+ ILI9881C_COMMAND_INSTR(0x05, 0x00),
+ ILI9881C_COMMAND_INSTR(0x06, 0x09),
+ ILI9881C_COMMAND_INSTR(0x07, 0x00),
+ ILI9881C_COMMAND_INSTR(0x08, 0x00),
+ ILI9881C_COMMAND_INSTR(0x09, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0c, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x10, 0x00),
+ ILI9881C_COMMAND_INSTR(0x11, 0x00),
+ ILI9881C_COMMAND_INSTR(0x12, 0x00),
+ ILI9881C_COMMAND_INSTR(0x13, 0x00),
+ ILI9881C_COMMAND_INSTR(0x14, 0x00),
+ ILI9881C_COMMAND_INSTR(0x15, 0x00),
+ ILI9881C_COMMAND_INSTR(0x16, 0x00),
+ ILI9881C_COMMAND_INSTR(0x17, 0x00),
+ ILI9881C_COMMAND_INSTR(0x18, 0x00),
+ ILI9881C_COMMAND_INSTR(0x19, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1e, 0x40),
+ ILI9881C_COMMAND_INSTR(0x1f, 0x80),
+ ILI9881C_COMMAND_INSTR(0x20, 0x05),
+ ILI9881C_COMMAND_INSTR(0x20, 0x05),
+ ILI9881C_COMMAND_INSTR(0x21, 0x02),
+ ILI9881C_COMMAND_INSTR(0x22, 0x00),
+ ILI9881C_COMMAND_INSTR(0x23, 0x00),
+ ILI9881C_COMMAND_INSTR(0x24, 0x00),
+ ILI9881C_COMMAND_INSTR(0x25, 0x00),
+ ILI9881C_COMMAND_INSTR(0x26, 0x00),
+ ILI9881C_COMMAND_INSTR(0x27, 0x00),
+ ILI9881C_COMMAND_INSTR(0x28, 0x33),
+ ILI9881C_COMMAND_INSTR(0x29, 0x02),
+ ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x30, 0x00),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x32, 0x00),
+ ILI9881C_COMMAND_INSTR(0x32, 0x00),
+ ILI9881C_COMMAND_INSTR(0x33, 0x00),
+ ILI9881C_COMMAND_INSTR(0x34, 0x04),
+ ILI9881C_COMMAND_INSTR(0x35, 0x00),
+ ILI9881C_COMMAND_INSTR(0x36, 0x00),
+ ILI9881C_COMMAND_INSTR(0x37, 0x00),
+ ILI9881C_COMMAND_INSTR(0x38, 0x3C),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x40),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x40),
+ ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x40, 0x00),
+ ILI9881C_COMMAND_INSTR(0x41, 0x00),
+ ILI9881C_COMMAND_INSTR(0x42, 0x00),
+ ILI9881C_COMMAND_INSTR(0x43, 0x00),
+ ILI9881C_COMMAND_INSTR(0x44, 0x00),
+ ILI9881C_COMMAND_INSTR(0x50, 0x01),
+ ILI9881C_COMMAND_INSTR(0x51, 0x23),
+ ILI9881C_COMMAND_INSTR(0x52, 0x45),
+ ILI9881C_COMMAND_INSTR(0x53, 0x67),
+ ILI9881C_COMMAND_INSTR(0x54, 0x89),
+ ILI9881C_COMMAND_INSTR(0x55, 0xab),
+ ILI9881C_COMMAND_INSTR(0x56, 0x01),
+ ILI9881C_COMMAND_INSTR(0x57, 0x23),
+ ILI9881C_COMMAND_INSTR(0x58, 0x45),
+ ILI9881C_COMMAND_INSTR(0x59, 0x67),
+ ILI9881C_COMMAND_INSTR(0x5a, 0x89),
+ ILI9881C_COMMAND_INSTR(0x5b, 0xab),
+ ILI9881C_COMMAND_INSTR(0x5c, 0xcd),
+ ILI9881C_COMMAND_INSTR(0x5d, 0xef),
+ ILI9881C_COMMAND_INSTR(0x5e, 0x11),
+ ILI9881C_COMMAND_INSTR(0x5f, 0x01),
+ ILI9881C_COMMAND_INSTR(0x60, 0x00),
+ ILI9881C_COMMAND_INSTR(0x61, 0x15),
+ ILI9881C_COMMAND_INSTR(0x62, 0x14),
+ ILI9881C_COMMAND_INSTR(0x63, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x64, 0x0F),
+ ILI9881C_COMMAND_INSTR(0x65, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x66, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x67, 0x06),
+ ILI9881C_COMMAND_INSTR(0x68, 0x02),
+ ILI9881C_COMMAND_INSTR(0x69, 0x07),
+ ILI9881C_COMMAND_INSTR(0x6a, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6d, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x70, 0x02),
+ ILI9881C_COMMAND_INSTR(0x71, 0x02),
+ ILI9881C_COMMAND_INSTR(0x72, 0x02),
+ ILI9881C_COMMAND_INSTR(0x73, 0x02),
+ ILI9881C_COMMAND_INSTR(0x74, 0x02),
+ ILI9881C_COMMAND_INSTR(0x75, 0x01),
+ ILI9881C_COMMAND_INSTR(0x76, 0x00),
+ ILI9881C_COMMAND_INSTR(0x77, 0x14),
+ ILI9881C_COMMAND_INSTR(0x78, 0x15),
+ ILI9881C_COMMAND_INSTR(0x79, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x0F),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x7c, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x7d, 0x06),
+ ILI9881C_COMMAND_INSTR(0x7e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7f, 0x07),
+ ILI9881C_COMMAND_INSTR(0x80, 0x02),
+ ILI9881C_COMMAND_INSTR(0x81, 0x02),
+ ILI9881C_COMMAND_INSTR(0x83, 0x02),
+ ILI9881C_COMMAND_INSTR(0x84, 0x02),
+ ILI9881C_COMMAND_INSTR(0x85, 0x02),
+ ILI9881C_COMMAND_INSTR(0x86, 0x02),
+ ILI9881C_COMMAND_INSTR(0x87, 0x02),
+ ILI9881C_COMMAND_INSTR(0x88, 0x02),
+ ILI9881C_COMMAND_INSTR(0x89, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+ ILI9881C_SWITCH_PAGE_INSTR(0x4),
+ ILI9881C_COMMAND_INSTR(0x6C, 0x15),
+ ILI9881C_COMMAND_INSTR(0x6E, 0x2A),
+ ILI9881C_COMMAND_INSTR(0x6F, 0x33),
+ ILI9881C_COMMAND_INSTR(0x3A, 0x94),
+ ILI9881C_COMMAND_INSTR(0x8D, 0x15),
+ ILI9881C_COMMAND_INSTR(0x87, 0xBA),
+ ILI9881C_COMMAND_INSTR(0x26, 0x76),
+ ILI9881C_COMMAND_INSTR(0xB2, 0xD1),
+ ILI9881C_COMMAND_INSTR(0xB5, 0x06),
+ ILI9881C_SWITCH_PAGE_INSTR(0x1),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0A),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x53, 0x90),
+ ILI9881C_COMMAND_INSTR(0x55, 0xA2),
+ ILI9881C_COMMAND_INSTR(0x50, 0xB7),
+ ILI9881C_COMMAND_INSTR(0x51, 0xB7),
+ ILI9881C_COMMAND_INSTR(0x60, 0x22),
+ ILI9881C_COMMAND_INSTR(0x61, 0x00),
+ ILI9881C_COMMAND_INSTR(0x62, 0x19),
+ ILI9881C_COMMAND_INSTR(0x63, 0x10),
+ ILI9881C_COMMAND_INSTR(0xA0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xA1, 0x1A),
+ ILI9881C_COMMAND_INSTR(0xA2, 0x27),
+ ILI9881C_COMMAND_INSTR(0xA3, 0x15),
+ ILI9881C_COMMAND_INSTR(0xA4, 0x17),
+ ILI9881C_COMMAND_INSTR(0xA5, 0x2A),
+ ILI9881C_COMMAND_INSTR(0xA6, 0x1E),
+ ILI9881C_COMMAND_INSTR(0xA7, 0x1F),
+ ILI9881C_COMMAND_INSTR(0xA8, 0x8B),
+ ILI9881C_COMMAND_INSTR(0xA9, 0x1B),
+ ILI9881C_COMMAND_INSTR(0xAA, 0x27),
+ ILI9881C_COMMAND_INSTR(0xAB, 0x78),
+ ILI9881C_COMMAND_INSTR(0xAC, 0x18),
+ ILI9881C_COMMAND_INSTR(0xAD, 0x18),
+ ILI9881C_COMMAND_INSTR(0xAE, 0x4C),
+ ILI9881C_COMMAND_INSTR(0xAF, 0x21),
+ ILI9881C_COMMAND_INSTR(0xB0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xB1, 0x54),
+ ILI9881C_COMMAND_INSTR(0xB2, 0x67),
+ ILI9881C_COMMAND_INSTR(0xB3, 0x39),
+ ILI9881C_COMMAND_INSTR(0xC0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xC1, 0x1A),
+ ILI9881C_COMMAND_INSTR(0xC2, 0x27),
+ ILI9881C_COMMAND_INSTR(0xC3, 0x15),
+ ILI9881C_COMMAND_INSTR(0xC4, 0x17),
+ ILI9881C_COMMAND_INSTR(0xC5, 0x2A),
+ ILI9881C_COMMAND_INSTR(0xC6, 0x1E),
+ ILI9881C_COMMAND_INSTR(0xC7, 0x1F),
+ ILI9881C_COMMAND_INSTR(0xC8, 0x8B),
+ ILI9881C_COMMAND_INSTR(0xC9, 0x1B),
+ ILI9881C_COMMAND_INSTR(0xCA, 0x27),
+ ILI9881C_COMMAND_INSTR(0xCB, 0x78),
+ ILI9881C_COMMAND_INSTR(0xCC, 0x18),
+ ILI9881C_COMMAND_INSTR(0xCD, 0x18),
+ ILI9881C_COMMAND_INSTR(0xCE, 0x4C),
+ ILI9881C_COMMAND_INSTR(0xCF, 0x21),
+ ILI9881C_COMMAND_INSTR(0xD0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xD1, 0x54),
+ ILI9881C_COMMAND_INSTR(0xD2, 0x67),
+ ILI9881C_COMMAND_INSTR(0xD3, 0x39),
+ ILI9881C_SWITCH_PAGE_INSTR(0),
+ ILI9881C_COMMAND_INSTR(0x35, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3A, 0x7),
+};
+
static const struct ili9881c_instr tl050hdv35_init[] = {
ILI9881C_SWITCH_PAGE_INSTR(3),
ILI9881C_COMMAND_INSTR(0x01, 0x00),
@@ -1080,10 +1276,10 @@ static int ili9881c_prepare(struct drm_panel *panel)
msleep(5);
/* And reset it */
- gpiod_set_value(ctx->reset, 1);
+ gpiod_set_value_cansleep(ctx->reset, 1);
msleep(20);
- gpiod_set_value(ctx->reset, 0);
+ gpiod_set_value_cansleep(ctx->reset, 0);
msleep(20);
for (i = 0; i < ctx->desc->init_length; i++) {
@@ -1138,7 +1334,7 @@ static int ili9881c_unprepare(struct drm_panel *panel)
mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
regulator_disable(ctx->power);
- gpiod_set_value(ctx->reset, 1);
+ gpiod_set_value_cansleep(ctx->reset, 1);
return 0;
}
@@ -1177,6 +1373,23 @@ static const struct drm_display_mode k101_im2byl02_default_mode = {
.height_mm = 217,
};
+static const struct drm_display_mode kd050hdfia020_default_mode = {
+ .clock = 62000,
+
+ .hdisplay = 720,
+ .hsync_start = 720 + 10,
+ .hsync_end = 720 + 10 + 20,
+ .htotal = 720 + 10 + 20 + 30,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 10,
+ .vsync_end = 1280 + 10 + 10,
+ .vtotal = 1280 + 10 + 10 + 20,
+
+ .width_mm = 62,
+ .height_mm = 110,
+};
+
static const struct drm_display_mode tl050hdv35_default_mode = {
.clock = 59400,
@@ -1345,6 +1558,14 @@ static const struct ili9881c_desc k101_im2byl02_desc = {
.mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
};
+static const struct ili9881c_desc kd050hdfia020_desc = {
+ .init = kd050hdfia020_init,
+ .init_length = ARRAY_SIZE(kd050hdfia020_init),
+ .mode = &kd050hdfia020_default_mode,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+};
+
static const struct ili9881c_desc tl050hdv35_desc = {
.init = tl050hdv35_init,
.init_length = ARRAY_SIZE(tl050hdv35_init),
@@ -1372,6 +1593,7 @@ static const struct ili9881c_desc am8001280g_desc = {
static const struct of_device_id ili9881c_of_match[] = {
{ .compatible = "bananapi,lhr050h41", .data = &lhr050h41_desc },
{ .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc },
+ { .compatible = "startek,kd050hdfia020", .data = &kd050hdfia020_desc },
{ .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc },
{ .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc },
{ .compatible = "ampire,am8001280g", .data = &am8001280g_desc },
diff --git a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
index 3e0a8e0d58a0..483dc88d16d8 100644
--- a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
+++ b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
@@ -247,6 +247,7 @@ static int jdi_fhd_r63452_probe(struct mipi_dsi_device *dsi)
drm_panel_init(&ctx->panel, dev, &jdi_fhd_r63452_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.prepare_prev_first = true;
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
diff --git a/drivers/gpu/drm/panel/panel-khadas-ts050.c b/drivers/gpu/drm/panel/panel-khadas-ts050.c
index b942a0162274..c54be0cc3f08 100644
--- a/drivers/gpu/drm/panel/panel-khadas-ts050.c
+++ b/drivers/gpu/drm/panel/panel-khadas-ts050.c
@@ -25,6 +25,7 @@ struct khadas_ts050_panel {
struct regulator *supply;
struct gpio_desc *reset_gpio;
struct gpio_desc *enable_gpio;
+ struct khadas_ts050_panel_data *panel_data;
bool prepared;
bool enabled;
@@ -32,544 +33,601 @@ struct khadas_ts050_panel {
struct khadas_ts050_panel_cmd {
u8 cmd;
- u8 data;
+ u8 data[55];
+ u8 size;
+};
+
+struct khadas_ts050_panel_data {
+ struct khadas_ts050_panel_cmd *init_code;
+ int len;
+};
+
+static const struct khadas_ts050_panel_cmd ts050v2_init_code[] = {
+ {0xB9, {0xFF, 0x83, 0x99}, 0x03},
+ {0xBA, {0x63, 0x23, 0x68, 0xCF}, 0x04},
+ {0xD2, {0x55}, 0x01},
+ {0xB1, {0x02, 0x04, 0x70, 0x90, 0x01, 0x32, 0x33,
+ 0x11, 0x11, 0x4D, 0x57, 0x56, 0x73, 0x02, 0x02}, 0x0f},
+ {0xB2, {0x00, 0x80, 0x80, 0xAE, 0x0A, 0x0E, 0x75, 0x11, 0x00, 0x00, 0x00}, 0x0b},
+ {0xB4, {0x00, 0xFF, 0x04, 0xA4, 0x02, 0xA0, 0x00, 0x00, 0x10, 0x00, 0x00, 0x02,
+ 0x00, 0x24, 0x02, 0x04, 0x0A, 0x21, 0x03, 0x00, 0x00, 0x08, 0xA6, 0x88,
+ 0x04, 0xA4, 0x02, 0xA0, 0x00, 0x00, 0x10, 0x00, 0x00, 0x02, 0x00, 0x24,
+ 0x02, 0x04, 0x0A, 0x00, 0x00, 0x08, 0xA6, 0x00, 0x08, 0x11}, 0x2e},
+ {0xD3, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18,
+ 0x18, 0x32, 0x10, 0x09, 0x00, 0x09, 0x32,
+ 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x11, 0x00, 0x02, 0x02, 0x03, 0x00, 0x00, 0x00, 0x0A,
+ 0x40}, 0x21},
+ {0xD5, {0x18, 0x18, 0x18, 0x18, 0x21, 0x20, 0x18, 0x18, 0x19, 0x19, 0x19,
+ 0x19, 0x18, 0x18, 0x18, 0x18, 0x03, 0x02, 0x01, 0x00, 0x2F, 0x2F,
+ 0x30, 0x30, 0x31, 0x31, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18}, 0x20},
+ {0xD6, {0x18, 0x18, 0x18, 0x18, 0x20, 0x21, 0x19, 0x19, 0x18, 0x18, 0x19,
+ 0x19, 0x18, 0x18, 0x18, 0x18, 0x00, 0x01, 0x02, 0x03, 0x2F, 0x2F,
+ 0x30, 0x30, 0x31, 0x31, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18}, 0x20},
+ {0xD8, {0x0A, 0xBE, 0xFA, 0xA0, 0x0A, 0xBE, 0xFA, 0xA0}, 0x08},
+ {0xBD, {0x01}, 0x01},
+ {0xD8, {0x0F, 0xFF, 0xFF, 0xE0, 0x0F, 0xFF, 0xFF, 0xE0}, 0x08},
+ {0xBD, {0x02}, 0x01},
+ {0xD8, {0x0F, 0xFF, 0xFF, 0xE0, 0x0F, 0xFF, 0xFF, 0xE0}, 0x08},
+ {0xBD, {0x00}, 0x01},
+ {0xE0, {0x01, 0x35, 0x41, 0x3B, 0x79, 0x81, 0x8C, 0x85, 0x8E,
+ 0x95, 0x9B, 0xA0, 0xA4, 0xAB, 0xB1, 0xB3, 0xB7, 0xC5, 0xBD, 0xC5,
+ 0xB6, 0xC2, 0xC2, 0x62, 0x5D, 0x66, 0x73, 0x01, 0x35, 0x41, 0x3B,
+ 0x79, 0x81, 0x8C, 0x85, 0x8E, 0x95, 0x9B, 0xA0, 0xA4, 0xAB, 0xB1,
+ 0xB3, 0xB7, 0xB5, 0xBD, 0xC5, 0xB6, 0xC2, 0xC2, 0x62, 0x5D, 0x66,
+ 0x73}, 0x36},
+ {0xB6, {0x97, 0x97}, 0x02},
+ {0xCC, {0xC8}, 0x02},
+ {0xBF, {0x40, 0x41, 0x50, 0x19}, 0x04},
+ {0xC6, {0xFF, 0xF9}, 0x02},
+ {0xC0, {0x25, 0x5A}, 0x02},
};
/* Only the CMD1 User Command set is documented */
-static const struct khadas_ts050_panel_cmd init_code[] = {
+static const struct khadas_ts050_panel_cmd ts050_init_code[] = {
/* Select Unknown CMD Page (Undocumented) */
- {0xff, 0xee},
+ {0xff, {0xee}, 0x01},
/* Reload CMD1: Don't reload default value to register */
- {0xfb, 0x01},
- {0x1f, 0x45},
- {0x24, 0x4f},
- {0x38, 0xc8},
- {0x39, 0x27},
- {0x1e, 0x77},
- {0x1d, 0x0f},
- {0x7e, 0x71},
- {0x7c, 0x03},
- {0xff, 0x00},
- {0xfb, 0x01},
- {0x35, 0x01},
+ {0xfb, {0x01}, 0x01},
+ {0x1f, {0x45}, 0x01},
+ {0x24, {0x4f}, 0x01},
+ {0x38, {0xc8}, 0x01},
+ {0x39, {0x27}, 0x01},
+ {0x1e, {0x77}, 0x01},
+ {0x1d, {0x0f}, 0x01},
+ {0x7e, {0x71}, 0x01},
+ {0x7c, {0x03}, 0x01},
+ {0xff, {0x00}, 0x01},
+ {0xfb, {0x01}, 0x01},
+ {0x35, {0x01}, 0x01},
/* Select CMD2 Page0 (Undocumented) */
- {0xff, 0x01},
+ {0xff, {0x01}, 0x01},
/* Reload CMD1: Don't reload default value to register */
- {0xfb, 0x01},
- {0x00, 0x01},
- {0x01, 0x55},
- {0x02, 0x40},
- {0x05, 0x40},
- {0x06, 0x4a},
- {0x07, 0x24},
- {0x08, 0x0c},
- {0x0b, 0x7d},
- {0x0c, 0x7d},
- {0x0e, 0xb0},
- {0x0f, 0xae},
- {0x11, 0x10},
- {0x12, 0x10},
- {0x13, 0x03},
- {0x14, 0x4a},
- {0x15, 0x12},
- {0x16, 0x12},
- {0x18, 0x00},
- {0x19, 0x77},
- {0x1a, 0x55},
- {0x1b, 0x13},
- {0x1c, 0x00},
- {0x1d, 0x00},
- {0x1e, 0x13},
- {0x1f, 0x00},
- {0x23, 0x00},
- {0x24, 0x00},
- {0x25, 0x00},
- {0x26, 0x00},
- {0x27, 0x00},
- {0x28, 0x00},
- {0x35, 0x00},
- {0x66, 0x00},
- {0x58, 0x82},
- {0x59, 0x02},
- {0x5a, 0x02},
- {0x5b, 0x02},
- {0x5c, 0x82},
- {0x5d, 0x82},
- {0x5e, 0x02},
- {0x5f, 0x02},
- {0x72, 0x31},
+ {0xfb, {0x01}, 0x01},
+ {0x00, {0x01}, 0x01},
+ {0x01, {0x55}, 0x01},
+ {0x02, {0x40}, 0x01},
+ {0x05, {0x40}, 0x01},
+ {0x06, {0x4a}, 0x01},
+ {0x07, {0x24}, 0x01},
+ {0x08, {0x0c}, 0x01},
+ {0x0b, {0x7d}, 0x01},
+ {0x0c, {0x7d}, 0x01},
+ {0x0e, {0xb0}, 0x01},
+ {0x0f, {0xae}, 0x01},
+ {0x11, {0x10}, 0x01},
+ {0x12, {0x10}, 0x01},
+ {0x13, {0x03}, 0x01},
+ {0x14, {0x4a}, 0x01},
+ {0x15, {0x12}, 0x01},
+ {0x16, {0x12}, 0x01},
+ {0x18, {0x00}, 0x01},
+ {0x19, {0x77}, 0x01},
+ {0x1a, {0x55}, 0x01},
+ {0x1b, {0x13}, 0x01},
+ {0x1c, {0x00}, 0x01},
+ {0x1d, {0x00}, 0x01},
+ {0x1e, {0x13}, 0x01},
+ {0x1f, {0x00}, 0x01},
+ {0x23, {0x00}, 0x01},
+ {0x24, {0x00}, 0x01},
+ {0x25, {0x00}, 0x01},
+ {0x26, {0x00}, 0x01},
+ {0x27, {0x00}, 0x01},
+ {0x28, {0x00}, 0x01},
+ {0x35, {0x00}, 0x01},
+ {0x66, {0x00}, 0x01},
+ {0x58, {0x82}, 0x01},
+ {0x59, {0x02}, 0x01},
+ {0x5a, {0x02}, 0x01},
+ {0x5b, {0x02}, 0x01},
+ {0x5c, {0x82}, 0x01},
+ {0x5d, {0x82}, 0x01},
+ {0x5e, {0x02}, 0x01},
+ {0x5f, {0x02}, 0x01},
+ {0x72, {0x31}, 0x01},
/* Select CMD2 Page4 (Undocumented) */
- {0xff, 0x05},
+ {0xff, {0x05}, 0x01},
/* Reload CMD1: Don't reload default value to register */
- {0xfb, 0x01},
- {0x00, 0x01},
- {0x01, 0x0b},
- {0x02, 0x0c},
- {0x03, 0x09},
- {0x04, 0x0a},
- {0x05, 0x00},
- {0x06, 0x0f},
- {0x07, 0x10},
- {0x08, 0x00},
- {0x09, 0x00},
- {0x0a, 0x00},
- {0x0b, 0x00},
- {0x0c, 0x00},
- {0x0d, 0x13},
- {0x0e, 0x15},
- {0x0f, 0x17},
- {0x10, 0x01},
- {0x11, 0x0b},
- {0x12, 0x0c},
- {0x13, 0x09},
- {0x14, 0x0a},
- {0x15, 0x00},
- {0x16, 0x0f},
- {0x17, 0x10},
- {0x18, 0x00},
- {0x19, 0x00},
- {0x1a, 0x00},
- {0x1b, 0x00},
- {0x1c, 0x00},
- {0x1d, 0x13},
- {0x1e, 0x15},
- {0x1f, 0x17},
- {0x20, 0x00},
- {0x21, 0x03},
- {0x22, 0x01},
- {0x23, 0x40},
- {0x24, 0x40},
- {0x25, 0xed},
- {0x29, 0x58},
- {0x2a, 0x12},
- {0x2b, 0x01},
- {0x4b, 0x06},
- {0x4c, 0x11},
- {0x4d, 0x20},
- {0x4e, 0x02},
- {0x4f, 0x02},
- {0x50, 0x20},
- {0x51, 0x61},
- {0x52, 0x01},
- {0x53, 0x63},
- {0x54, 0x77},
- {0x55, 0xed},
- {0x5b, 0x00},
- {0x5c, 0x00},
- {0x5d, 0x00},
- {0x5e, 0x00},
- {0x5f, 0x15},
- {0x60, 0x75},
- {0x61, 0x00},
- {0x62, 0x00},
- {0x63, 0x00},
- {0x64, 0x00},
- {0x65, 0x00},
- {0x66, 0x00},
- {0x67, 0x00},
- {0x68, 0x04},
- {0x69, 0x00},
- {0x6a, 0x00},
- {0x6c, 0x40},
- {0x75, 0x01},
- {0x76, 0x01},
- {0x7a, 0x80},
- {0x7b, 0xa3},
- {0x7c, 0xd8},
- {0x7d, 0x60},
- {0x7f, 0x15},
- {0x80, 0x81},
- {0x83, 0x05},
- {0x93, 0x08},
- {0x94, 0x10},
- {0x8a, 0x00},
- {0x9b, 0x0f},
- {0xea, 0xff},
- {0xec, 0x00},
+ {0xfb, {0x01}, 0x01},
+ {0x00, {0x01}, 0x01},
+ {0x01, {0x0b}, 0x01},
+ {0x02, {0x0c}, 0x01},
+ {0x03, {0x09}, 0x01},
+ {0x04, {0x0a}, 0x01},
+ {0x05, {0x00}, 0x01},
+ {0x06, {0x0f}, 0x01},
+ {0x07, {0x10}, 0x01},
+ {0x08, {0x00}, 0x01},
+ {0x09, {0x00}, 0x01},
+ {0x0a, {0x00}, 0x01},
+ {0x0b, {0x00}, 0x01},
+ {0x0c, {0x00}, 0x01},
+ {0x0d, {0x13}, 0x01},
+ {0x0e, {0x15}, 0x01},
+ {0x0f, {0x17}, 0x01},
+ {0x10, {0x01}, 0x01},
+ {0x11, {0x0b}, 0x01},
+ {0x12, {0x0c}, 0x01},
+ {0x13, {0x09}, 0x01},
+ {0x14, {0x0a}, 0x01},
+ {0x15, {0x00}, 0x01},
+ {0x16, {0x0f}, 0x01},
+ {0x17, {0x10}, 0x01},
+ {0x18, {0x00}, 0x01},
+ {0x19, {0x00}, 0x01},
+ {0x1a, {0x00}, 0x01},
+ {0x1b, {0x00}, 0x01},
+ {0x1c, {0x00}, 0x01},
+ {0x1d, {0x13}, 0x01},
+ {0x1e, {0x15}, 0x01},
+ {0x1f, {0x17}, 0x01},
+ {0x20, {0x00}, 0x01},
+ {0x21, {0x03}, 0x01},
+ {0x22, {0x01}, 0x01},
+ {0x23, {0x40}, 0x01},
+ {0x24, {0x40}, 0x01},
+ {0x25, {0xed}, 0x01},
+ {0x29, {0x58}, 0x01},
+ {0x2a, {0x12}, 0x01},
+ {0x2b, {0x01}, 0x01},
+ {0x4b, {0x06}, 0x01},
+ {0x4c, {0x11}, 0x01},
+ {0x4d, {0x20}, 0x01},
+ {0x4e, {0x02}, 0x01},
+ {0x4f, {0x02}, 0x01},
+ {0x50, {0x20}, 0x01},
+ {0x51, {0x61}, 0x01},
+ {0x52, {0x01}, 0x01},
+ {0x53, {0x63}, 0x01},
+ {0x54, {0x77}, 0x01},
+ {0x55, {0xed}, 0x01},
+ {0x5b, {0x00}, 0x01},
+ {0x5c, {0x00}, 0x01},
+ {0x5d, {0x00}, 0x01},
+ {0x5e, {0x00}, 0x01},
+ {0x5f, {0x15}, 0x01},
+ {0x60, {0x75}, 0x01},
+ {0x61, {0x00}, 0x01},
+ {0x62, {0x00}, 0x01},
+ {0x63, {0x00}, 0x01},
+ {0x64, {0x00}, 0x01},
+ {0x65, {0x00}, 0x01},
+ {0x66, {0x00}, 0x01},
+ {0x67, {0x00}, 0x01},
+ {0x68, {0x04}, 0x01},
+ {0x69, {0x00}, 0x01},
+ {0x6a, {0x00}, 0x01},
+ {0x6c, {0x40}, 0x01},
+ {0x75, {0x01}, 0x01},
+ {0x76, {0x01}, 0x01},
+ {0x7a, {0x80}, 0x01},
+ {0x7b, {0xa3}, 0x01},
+ {0x7c, {0xd8}, 0x01},
+ {0x7d, {0x60}, 0x01},
+ {0x7f, {0x15}, 0x01},
+ {0x80, {0x81}, 0x01},
+ {0x83, {0x05}, 0x01},
+ {0x93, {0x08}, 0x01},
+ {0x94, {0x10}, 0x01},
+ {0x8a, {0x00}, 0x01},
+ {0x9b, {0x0f}, 0x01},
+ {0xea, {0xff}, 0x01},
+ {0xec, {0x00}, 0x01},
/* Select CMD2 Page0 (Undocumented) */
- {0xff, 0x01},
+ {0xff, {0x01}, 0x01},
/* Reload CMD1: Don't reload default value to register */
- {0xfb, 0x01},
- {0x75, 0x00},
- {0x76, 0xdf},
- {0x77, 0x00},
- {0x78, 0xe4},
- {0x79, 0x00},
- {0x7a, 0xed},
- {0x7b, 0x00},
- {0x7c, 0xf6},
- {0x7d, 0x00},
- {0x7e, 0xff},
- {0x7f, 0x01},
- {0x80, 0x07},
- {0x81, 0x01},
- {0x82, 0x10},
- {0x83, 0x01},
- {0x84, 0x18},
- {0x85, 0x01},
- {0x86, 0x20},
- {0x87, 0x01},
- {0x88, 0x3d},
- {0x89, 0x01},
- {0x8a, 0x56},
- {0x8b, 0x01},
- {0x8c, 0x84},
- {0x8d, 0x01},
- {0x8e, 0xab},
- {0x8f, 0x01},
- {0x90, 0xec},
- {0x91, 0x02},
- {0x92, 0x22},
- {0x93, 0x02},
- {0x94, 0x23},
- {0x95, 0x02},
- {0x96, 0x55},
- {0x97, 0x02},
- {0x98, 0x8b},
- {0x99, 0x02},
- {0x9a, 0xaf},
- {0x9b, 0x02},
- {0x9c, 0xdf},
- {0x9d, 0x03},
- {0x9e, 0x01},
- {0x9f, 0x03},
- {0xa0, 0x2c},
- {0xa2, 0x03},
- {0xa3, 0x39},
- {0xa4, 0x03},
- {0xa5, 0x47},
- {0xa6, 0x03},
- {0xa7, 0x56},
- {0xa9, 0x03},
- {0xaa, 0x66},
- {0xab, 0x03},
- {0xac, 0x76},
- {0xad, 0x03},
- {0xae, 0x85},
- {0xaf, 0x03},
- {0xb0, 0x90},
- {0xb1, 0x03},
- {0xb2, 0xcb},
- {0xb3, 0x00},
- {0xb4, 0xdf},
- {0xb5, 0x00},
- {0xb6, 0xe4},
- {0xb7, 0x00},
- {0xb8, 0xed},
- {0xb9, 0x00},
- {0xba, 0xf6},
- {0xbb, 0x00},
- {0xbc, 0xff},
- {0xbd, 0x01},
- {0xbe, 0x07},
- {0xbf, 0x01},
- {0xc0, 0x10},
- {0xc1, 0x01},
- {0xc2, 0x18},
- {0xc3, 0x01},
- {0xc4, 0x20},
- {0xc5, 0x01},
- {0xc6, 0x3d},
- {0xc7, 0x01},
- {0xc8, 0x56},
- {0xc9, 0x01},
- {0xca, 0x84},
- {0xcb, 0x01},
- {0xcc, 0xab},
- {0xcd, 0x01},
- {0xce, 0xec},
- {0xcf, 0x02},
- {0xd0, 0x22},
- {0xd1, 0x02},
- {0xd2, 0x23},
- {0xd3, 0x02},
- {0xd4, 0x55},
- {0xd5, 0x02},
- {0xd6, 0x8b},
- {0xd7, 0x02},
- {0xd8, 0xaf},
- {0xd9, 0x02},
- {0xda, 0xdf},
- {0xdb, 0x03},
- {0xdc, 0x01},
- {0xdd, 0x03},
- {0xde, 0x2c},
- {0xdf, 0x03},
- {0xe0, 0x39},
- {0xe1, 0x03},
- {0xe2, 0x47},
- {0xe3, 0x03},
- {0xe4, 0x56},
- {0xe5, 0x03},
- {0xe6, 0x66},
- {0xe7, 0x03},
- {0xe8, 0x76},
- {0xe9, 0x03},
- {0xea, 0x85},
- {0xeb, 0x03},
- {0xec, 0x90},
- {0xed, 0x03},
- {0xee, 0xcb},
- {0xef, 0x00},
- {0xf0, 0xbb},
- {0xf1, 0x00},
- {0xf2, 0xc0},
- {0xf3, 0x00},
- {0xf4, 0xcc},
- {0xf5, 0x00},
- {0xf6, 0xd6},
- {0xf7, 0x00},
- {0xf8, 0xe1},
- {0xf9, 0x00},
- {0xfa, 0xea},
+ {0xfb, {0x01}, 0x01},
+ {0x75, {0x00}, 0x01},
+ {0x76, {0xdf}, 0x01},
+ {0x77, {0x00}, 0x01},
+ {0x78, {0xe4}, 0x01},
+ {0x79, {0x00}, 0x01},
+ {0x7a, {0xed}, 0x01},
+ {0x7b, {0x00}, 0x01},
+ {0x7c, {0xf6}, 0x01},
+ {0x7d, {0x00}, 0x01},
+ {0x7e, {0xff}, 0x01},
+ {0x7f, {0x01}, 0x01},
+ {0x80, {0x07}, 0x01},
+ {0x81, {0x01}, 0x01},
+ {0x82, {0x10}, 0x01},
+ {0x83, {0x01}, 0x01},
+ {0x84, {0x18}, 0x01},
+ {0x85, {0x01}, 0x01},
+ {0x86, {0x20}, 0x01},
+ {0x87, {0x01}, 0x01},
+ {0x88, {0x3d}, 0x01},
+ {0x89, {0x01}, 0x01},
+ {0x8a, {0x56}, 0x01},
+ {0x8b, {0x01}, 0x01},
+ {0x8c, {0x84}, 0x01},
+ {0x8d, {0x01}, 0x01},
+ {0x8e, {0xab}, 0x01},
+ {0x8f, {0x01}, 0x01},
+ {0x90, {0xec}, 0x01},
+ {0x91, {0x02}, 0x01},
+ {0x92, {0x22}, 0x01},
+ {0x93, {0x02}, 0x01},
+ {0x94, {0x23}, 0x01},
+ {0x95, {0x02}, 0x01},
+ {0x96, {0x55}, 0x01},
+ {0x97, {0x02}, 0x01},
+ {0x98, {0x8b}, 0x01},
+ {0x99, {0x02}, 0x01},
+ {0x9a, {0xaf}, 0x01},
+ {0x9b, {0x02}, 0x01},
+ {0x9c, {0xdf}, 0x01},
+ {0x9d, {0x03}, 0x01},
+ {0x9e, {0x01}, 0x01},
+ {0x9f, {0x03}, 0x01},
+ {0xa0, {0x2c}, 0x01},
+ {0xa2, {0x03}, 0x01},
+ {0xa3, {0x39}, 0x01},
+ {0xa4, {0x03}, 0x01},
+ {0xa5, {0x47}, 0x01},
+ {0xa6, {0x03}, 0x01},
+ {0xa7, {0x56}, 0x01},
+ {0xa9, {0x03}, 0x01},
+ {0xaa, {0x66}, 0x01},
+ {0xab, {0x03}, 0x01},
+ {0xac, {0x76}, 0x01},
+ {0xad, {0x03}, 0x01},
+ {0xae, {0x85}, 0x01},
+ {0xaf, {0x03}, 0x01},
+ {0xb0, {0x90}, 0x01},
+ {0xb1, {0x03}, 0x01},
+ {0xb2, {0xcb}, 0x01},
+ {0xb3, {0x00}, 0x01},
+ {0xb4, {0xdf}, 0x01},
+ {0xb5, {0x00}, 0x01},
+ {0xb6, {0xe4}, 0x01},
+ {0xb7, {0x00}, 0x01},
+ {0xb8, {0xed}, 0x01},
+ {0xb9, {0x00}, 0x01},
+ {0xba, {0xf6}, 0x01},
+ {0xbb, {0x00}, 0x01},
+ {0xbc, {0xff}, 0x01},
+ {0xbd, {0x01}, 0x01},
+ {0xbe, {0x07}, 0x01},
+ {0xbf, {0x01}, 0x01},
+ {0xc0, {0x10}, 0x01},
+ {0xc1, {0x01}, 0x01},
+ {0xc2, {0x18}, 0x01},
+ {0xc3, {0x01}, 0x01},
+ {0xc4, {0x20}, 0x01},
+ {0xc5, {0x01}, 0x01},
+ {0xc6, {0x3d}, 0x01},
+ {0xc7, {0x01}, 0x01},
+ {0xc8, {0x56}, 0x01},
+ {0xc9, {0x01}, 0x01},
+ {0xca, {0x84}, 0x01},
+ {0xcb, {0x01}, 0x01},
+ {0xcc, {0xab}, 0x01},
+ {0xcd, {0x01}, 0x01},
+ {0xce, {0xec}, 0x01},
+ {0xcf, {0x02}, 0x01},
+ {0xd0, {0x22}, 0x01},
+ {0xd1, {0x02}, 0x01},
+ {0xd2, {0x23}, 0x01},
+ {0xd3, {0x02}, 0x01},
+ {0xd4, {0x55}, 0x01},
+ {0xd5, {0x02}, 0x01},
+ {0xd6, {0x8b}, 0x01},
+ {0xd7, {0x02}, 0x01},
+ {0xd8, {0xaf}, 0x01},
+ {0xd9, {0x02}, 0x01},
+ {0xda, {0xdf}, 0x01},
+ {0xdb, {0x03}, 0x01},
+ {0xdc, {0x01}, 0x01},
+ {0xdd, {0x03}, 0x01},
+ {0xde, {0x2c}, 0x01},
+ {0xdf, {0x03}, 0x01},
+ {0xe0, {0x39}, 0x01},
+ {0xe1, {0x03}, 0x01},
+ {0xe2, {0x47}, 0x01},
+ {0xe3, {0x03}, 0x01},
+ {0xe4, {0x56}, 0x01},
+ {0xe5, {0x03}, 0x01},
+ {0xe6, {0x66}, 0x01},
+ {0xe7, {0x03}, 0x01},
+ {0xe8, {0x76}, 0x01},
+ {0xe9, {0x03}, 0x01},
+ {0xea, {0x85}, 0x01},
+ {0xeb, {0x03}, 0x01},
+ {0xec, {0x90}, 0x01},
+ {0xed, {0x03}, 0x01},
+ {0xee, {0xcb}, 0x01},
+ {0xef, {0x00}, 0x01},
+ {0xf0, {0xbb}, 0x01},
+ {0xf1, {0x00}, 0x01},
+ {0xf2, {0xc0}, 0x01},
+ {0xf3, {0x00}, 0x01},
+ {0xf4, {0xcc}, 0x01},
+ {0xf5, {0x00}, 0x01},
+ {0xf6, {0xd6}, 0x01},
+ {0xf7, {0x00}, 0x01},
+ {0xf8, {0xe1}, 0x01},
+ {0xf9, {0x00}, 0x01},
+ {0xfa, {0xea}, 0x01},
/* Select CMD2 Page2 (Undocumented) */
- {0xff, 0x02},
+ {0xff, {0x02}, 0x01},
/* Reload CMD1: Don't reload default value to register */
- {0xfb, 0x01},
- {0x00, 0x00},
- {0x01, 0xf4},
- {0x02, 0x00},
- {0x03, 0xef},
- {0x04, 0x01},
- {0x05, 0x07},
- {0x06, 0x01},
- {0x07, 0x28},
- {0x08, 0x01},
- {0x09, 0x44},
- {0x0a, 0x01},
- {0x0b, 0x76},
- {0x0c, 0x01},
- {0x0d, 0xa0},
- {0x0e, 0x01},
- {0x0f, 0xe7},
- {0x10, 0x02},
- {0x11, 0x1f},
- {0x12, 0x02},
- {0x13, 0x22},
- {0x14, 0x02},
- {0x15, 0x54},
- {0x16, 0x02},
- {0x17, 0x8b},
- {0x18, 0x02},
- {0x19, 0xaf},
- {0x1a, 0x02},
- {0x1b, 0xe0},
- {0x1c, 0x03},
- {0x1d, 0x01},
- {0x1e, 0x03},
- {0x1f, 0x2d},
- {0x20, 0x03},
- {0x21, 0x39},
- {0x22, 0x03},
- {0x23, 0x47},
- {0x24, 0x03},
- {0x25, 0x57},
- {0x26, 0x03},
- {0x27, 0x65},
- {0x28, 0x03},
- {0x29, 0x77},
- {0x2a, 0x03},
- {0x2b, 0x85},
- {0x2d, 0x03},
- {0x2f, 0x8f},
- {0x30, 0x03},
- {0x31, 0xcb},
- {0x32, 0x00},
- {0x33, 0xbb},
- {0x34, 0x00},
- {0x35, 0xc0},
- {0x36, 0x00},
- {0x37, 0xcc},
- {0x38, 0x00},
- {0x39, 0xd6},
- {0x3a, 0x00},
- {0x3b, 0xe1},
- {0x3d, 0x00},
- {0x3f, 0xea},
- {0x40, 0x00},
- {0x41, 0xf4},
- {0x42, 0x00},
- {0x43, 0xfe},
- {0x44, 0x01},
- {0x45, 0x07},
- {0x46, 0x01},
- {0x47, 0x28},
- {0x48, 0x01},
- {0x49, 0x44},
- {0x4a, 0x01},
- {0x4b, 0x76},
- {0x4c, 0x01},
- {0x4d, 0xa0},
- {0x4e, 0x01},
- {0x4f, 0xe7},
- {0x50, 0x02},
- {0x51, 0x1f},
- {0x52, 0x02},
- {0x53, 0x22},
- {0x54, 0x02},
- {0x55, 0x54},
- {0x56, 0x02},
- {0x58, 0x8b},
- {0x59, 0x02},
- {0x5a, 0xaf},
- {0x5b, 0x02},
- {0x5c, 0xe0},
- {0x5d, 0x03},
- {0x5e, 0x01},
- {0x5f, 0x03},
- {0x60, 0x2d},
- {0x61, 0x03},
- {0x62, 0x39},
- {0x63, 0x03},
- {0x64, 0x47},
- {0x65, 0x03},
- {0x66, 0x57},
- {0x67, 0x03},
- {0x68, 0x65},
- {0x69, 0x03},
- {0x6a, 0x77},
- {0x6b, 0x03},
- {0x6c, 0x85},
- {0x6d, 0x03},
- {0x6e, 0x8f},
- {0x6f, 0x03},
- {0x70, 0xcb},
- {0x71, 0x00},
- {0x72, 0x00},
- {0x73, 0x00},
- {0x74, 0x21},
- {0x75, 0x00},
- {0x76, 0x4c},
- {0x77, 0x00},
- {0x78, 0x6b},
- {0x79, 0x00},
- {0x7a, 0x85},
- {0x7b, 0x00},
- {0x7c, 0x9a},
- {0x7d, 0x00},
- {0x7e, 0xad},
- {0x7f, 0x00},
- {0x80, 0xbe},
- {0x81, 0x00},
- {0x82, 0xcd},
- {0x83, 0x01},
- {0x84, 0x01},
- {0x85, 0x01},
- {0x86, 0x29},
- {0x87, 0x01},
- {0x88, 0x68},
- {0x89, 0x01},
- {0x8a, 0x98},
- {0x8b, 0x01},
- {0x8c, 0xe5},
- {0x8d, 0x02},
- {0x8e, 0x1e},
- {0x8f, 0x02},
- {0x90, 0x30},
- {0x91, 0x02},
- {0x92, 0x52},
- {0x93, 0x02},
- {0x94, 0x88},
- {0x95, 0x02},
- {0x96, 0xaa},
- {0x97, 0x02},
- {0x98, 0xd7},
- {0x99, 0x02},
- {0x9a, 0xf7},
- {0x9b, 0x03},
- {0x9c, 0x21},
- {0x9d, 0x03},
- {0x9e, 0x2e},
- {0x9f, 0x03},
- {0xa0, 0x3d},
- {0xa2, 0x03},
- {0xa3, 0x4c},
- {0xa4, 0x03},
- {0xa5, 0x5e},
- {0xa6, 0x03},
- {0xa7, 0x71},
- {0xa9, 0x03},
- {0xaa, 0x86},
- {0xab, 0x03},
- {0xac, 0x94},
- {0xad, 0x03},
- {0xae, 0xfa},
- {0xaf, 0x00},
- {0xb0, 0x00},
- {0xb1, 0x00},
- {0xb2, 0x21},
- {0xb3, 0x00},
- {0xb4, 0x4c},
- {0xb5, 0x00},
- {0xb6, 0x6b},
- {0xb7, 0x00},
- {0xb8, 0x85},
- {0xb9, 0x00},
- {0xba, 0x9a},
- {0xbb, 0x00},
- {0xbc, 0xad},
- {0xbd, 0x00},
- {0xbe, 0xbe},
- {0xbf, 0x00},
- {0xc0, 0xcd},
- {0xc1, 0x01},
- {0xc2, 0x01},
- {0xc3, 0x01},
- {0xc4, 0x29},
- {0xc5, 0x01},
- {0xc6, 0x68},
- {0xc7, 0x01},
- {0xc8, 0x98},
- {0xc9, 0x01},
- {0xca, 0xe5},
- {0xcb, 0x02},
- {0xcc, 0x1e},
- {0xcd, 0x02},
- {0xce, 0x20},
- {0xcf, 0x02},
- {0xd0, 0x52},
- {0xd1, 0x02},
- {0xd2, 0x88},
- {0xd3, 0x02},
- {0xd4, 0xaa},
- {0xd5, 0x02},
- {0xd6, 0xd7},
- {0xd7, 0x02},
- {0xd8, 0xf7},
- {0xd9, 0x03},
- {0xda, 0x21},
- {0xdb, 0x03},
- {0xdc, 0x2e},
- {0xdd, 0x03},
- {0xde, 0x3d},
- {0xdf, 0x03},
- {0xe0, 0x4c},
- {0xe1, 0x03},
- {0xe2, 0x5e},
- {0xe3, 0x03},
- {0xe4, 0x71},
- {0xe5, 0x03},
- {0xe6, 0x86},
- {0xe7, 0x03},
- {0xe8, 0x94},
- {0xe9, 0x03},
- {0xea, 0xfa},
+ {0xfb, {0x01}, 0x01},
+ {0x00, {0x00}, 0x01},
+ {0x01, {0xf4}, 0x01},
+ {0x02, {0x00}, 0x01},
+ {0x03, {0xef}, 0x01},
+ {0x04, {0x01}, 0x01},
+ {0x05, {0x07}, 0x01},
+ {0x06, {0x01}, 0x01},
+ {0x07, {0x28}, 0x01},
+ {0x08, {0x01}, 0x01},
+ {0x09, {0x44}, 0x01},
+ {0x0a, {0x01}, 0x01},
+ {0x0b, {0x76}, 0x01},
+ {0x0c, {0x01}, 0x01},
+ {0x0d, {0xa0}, 0x01},
+ {0x0e, {0x01}, 0x01},
+ {0x0f, {0xe7}, 0x01},
+ {0x10, {0x02}, 0x01},
+ {0x11, {0x1f}, 0x01},
+ {0x12, {0x02}, 0x01},
+ {0x13, {0x22}, 0x01},
+ {0x14, {0x02}, 0x01},
+ {0x15, {0x54}, 0x01},
+ {0x16, {0x02}, 0x01},
+ {0x17, {0x8b}, 0x01},
+ {0x18, {0x02}, 0x01},
+ {0x19, {0xaf}, 0x01},
+ {0x1a, {0x02}, 0x01},
+ {0x1b, {0xe0}, 0x01},
+ {0x1c, {0x03}, 0x01},
+ {0x1d, {0x01}, 0x01},
+ {0x1e, {0x03}, 0x01},
+ {0x1f, {0x2d}, 0x01},
+ {0x20, {0x03}, 0x01},
+ {0x21, {0x39}, 0x01},
+ {0x22, {0x03}, 0x01},
+ {0x23, {0x47}, 0x01},
+ {0x24, {0x03}, 0x01},
+ {0x25, {0x57}, 0x01},
+ {0x26, {0x03}, 0x01},
+ {0x27, {0x65}, 0x01},
+ {0x28, {0x03}, 0x01},
+ {0x29, {0x77}, 0x01},
+ {0x2a, {0x03}, 0x01},
+ {0x2b, {0x85}, 0x01},
+ {0x2d, {0x03}, 0x01},
+ {0x2f, {0x8f}, 0x01},
+ {0x30, {0x03}, 0x01},
+ {0x31, {0xcb}, 0x01},
+ {0x32, {0x00}, 0x01},
+ {0x33, {0xbb}, 0x01},
+ {0x34, {0x00}, 0x01},
+ {0x35, {0xc0}, 0x01},
+ {0x36, {0x00}, 0x01},
+ {0x37, {0xcc}, 0x01},
+ {0x38, {0x00}, 0x01},
+ {0x39, {0xd6}, 0x01},
+ {0x3a, {0x00}, 0x01},
+ {0x3b, {0xe1}, 0x01},
+ {0x3d, {0x00}, 0x01},
+ {0x3f, {0xea}, 0x01},
+ {0x40, {0x00}, 0x01},
+ {0x41, {0xf4}, 0x01},
+ {0x42, {0x00}, 0x01},
+ {0x43, {0xfe}, 0x01},
+ {0x44, {0x01}, 0x01},
+ {0x45, {0x07}, 0x01},
+ {0x46, {0x01}, 0x01},
+ {0x47, {0x28}, 0x01},
+ {0x48, {0x01}, 0x01},
+ {0x49, {0x44}, 0x01},
+ {0x4a, {0x01}, 0x01},
+ {0x4b, {0x76}, 0x01},
+ {0x4c, {0x01}, 0x01},
+ {0x4d, {0xa0}, 0x01},
+ {0x4e, {0x01}, 0x01},
+ {0x4f, {0xe7}, 0x01},
+ {0x50, {0x02}, 0x01},
+ {0x51, {0x1f}, 0x01},
+ {0x52, {0x02}, 0x01},
+ {0x53, {0x22}, 0x01},
+ {0x54, {0x02}, 0x01},
+ {0x55, {0x54}, 0x01},
+ {0x56, {0x02}, 0x01},
+ {0x58, {0x8b}, 0x01},
+ {0x59, {0x02}, 0x01},
+ {0x5a, {0xaf}, 0x01},
+ {0x5b, {0x02}, 0x01},
+ {0x5c, {0xe0}, 0x01},
+ {0x5d, {0x03}, 0x01},
+ {0x5e, {0x01}, 0x01},
+ {0x5f, {0x03}, 0x01},
+ {0x60, {0x2d}, 0x01},
+ {0x61, {0x03}, 0x01},
+ {0x62, {0x39}, 0x01},
+ {0x63, {0x03}, 0x01},
+ {0x64, {0x47}, 0x01},
+ {0x65, {0x03}, 0x01},
+ {0x66, {0x57}, 0x01},
+ {0x67, {0x03}, 0x01},
+ {0x68, {0x65}, 0x01},
+ {0x69, {0x03}, 0x01},
+ {0x6a, {0x77}, 0x01},
+ {0x6b, {0x03}, 0x01},
+ {0x6c, {0x85}, 0x01},
+ {0x6d, {0x03}, 0x01},
+ {0x6e, {0x8f}, 0x01},
+ {0x6f, {0x03}, 0x01},
+ {0x70, {0xcb}, 0x01},
+ {0x71, {0x00}, 0x01},
+ {0x72, {0x00}, 0x01},
+ {0x73, {0x00}, 0x01},
+ {0x74, {0x21}, 0x01},
+ {0x75, {0x00}, 0x01},
+ {0x76, {0x4c}, 0x01},
+ {0x77, {0x00}, 0x01},
+ {0x78, {0x6b}, 0x01},
+ {0x79, {0x00}, 0x01},
+ {0x7a, {0x85}, 0x01},
+ {0x7b, {0x00}, 0x01},
+ {0x7c, {0x9a}, 0x01},
+ {0x7d, {0x00}, 0x01},
+ {0x7e, {0xad}, 0x01},
+ {0x7f, {0x00}, 0x01},
+ {0x80, {0xbe}, 0x01},
+ {0x81, {0x00}, 0x01},
+ {0x82, {0xcd}, 0x01},
+ {0x83, {0x01}, 0x01},
+ {0x84, {0x01}, 0x01},
+ {0x85, {0x01}, 0x01},
+ {0x86, {0x29}, 0x01},
+ {0x87, {0x01}, 0x01},
+ {0x88, {0x68}, 0x01},
+ {0x89, {0x01}, 0x01},
+ {0x8a, {0x98}, 0x01},
+ {0x8b, {0x01}, 0x01},
+ {0x8c, {0xe5}, 0x01},
+ {0x8d, {0x02}, 0x01},
+ {0x8e, {0x1e}, 0x01},
+ {0x8f, {0x02}, 0x01},
+ {0x90, {0x30}, 0x01},
+ {0x91, {0x02}, 0x01},
+ {0x92, {0x52}, 0x01},
+ {0x93, {0x02}, 0x01},
+ {0x94, {0x88}, 0x01},
+ {0x95, {0x02}, 0x01},
+ {0x96, {0xaa}, 0x01},
+ {0x97, {0x02}, 0x01},
+ {0x98, {0xd7}, 0x01},
+ {0x99, {0x02}, 0x01},
+ {0x9a, {0xf7}, 0x01},
+ {0x9b, {0x03}, 0x01},
+ {0x9c, {0x21}, 0x01},
+ {0x9d, {0x03}, 0x01},
+ {0x9e, {0x2e}, 0x01},
+ {0x9f, {0x03}, 0x01},
+ {0xa0, {0x3d}, 0x01},
+ {0xa2, {0x03}, 0x01},
+ {0xa3, {0x4c}, 0x01},
+ {0xa4, {0x03}, 0x01},
+ {0xa5, {0x5e}, 0x01},
+ {0xa6, {0x03}, 0x01},
+ {0xa7, {0x71}, 0x01},
+ {0xa9, {0x03}, 0x01},
+ {0xaa, {0x86}, 0x01},
+ {0xab, {0x03}, 0x01},
+ {0xac, {0x94}, 0x01},
+ {0xad, {0x03}, 0x01},
+ {0xae, {0xfa}, 0x01},
+ {0xaf, {0x00}, 0x01},
+ {0xb0, {0x00}, 0x01},
+ {0xb1, {0x00}, 0x01},
+ {0xb2, {0x21}, 0x01},
+ {0xb3, {0x00}, 0x01},
+ {0xb4, {0x4c}, 0x01},
+ {0xb5, {0x00}, 0x01},
+ {0xb6, {0x6b}, 0x01},
+ {0xb7, {0x00}, 0x01},
+ {0xb8, {0x85}, 0x01},
+ {0xb9, {0x00}, 0x01},
+ {0xba, {0x9a}, 0x01},
+ {0xbb, {0x00}, 0x01},
+ {0xbc, {0xad}, 0x01},
+ {0xbd, {0x00}, 0x01},
+ {0xbe, {0xbe}, 0x01},
+ {0xbf, {0x00}, 0x01},
+ {0xc0, {0xcd}, 0x01},
+ {0xc1, {0x01}, 0x01},
+ {0xc2, {0x01}, 0x01},
+ {0xc3, {0x01}, 0x01},
+ {0xc4, {0x29}, 0x01},
+ {0xc5, {0x01}, 0x01},
+ {0xc6, {0x68}, 0x01},
+ {0xc7, {0x01}, 0x01},
+ {0xc8, {0x98}, 0x01},
+ {0xc9, {0x01}, 0x01},
+ {0xca, {0xe5}, 0x01},
+ {0xcb, {0x02}, 0x01},
+ {0xcc, {0x1e}, 0x01},
+ {0xcd, {0x02}, 0x01},
+ {0xce, {0x20}, 0x01},
+ {0xcf, {0x02}, 0x01},
+ {0xd0, {0x52}, 0x01},
+ {0xd1, {0x02}, 0x01},
+ {0xd2, {0x88}, 0x01},
+ {0xd3, {0x02}, 0x01},
+ {0xd4, {0xaa}, 0x01},
+ {0xd5, {0x02}, 0x01},
+ {0xd6, {0xd7}, 0x01},
+ {0xd7, {0x02}, 0x01},
+ {0xd8, {0xf7}, 0x01},
+ {0xd9, {0x03}, 0x01},
+ {0xda, {0x21}, 0x01},
+ {0xdb, {0x03}, 0x01},
+ {0xdc, {0x2e}, 0x01},
+ {0xdd, {0x03}, 0x01},
+ {0xde, {0x3d}, 0x01},
+ {0xdf, {0x03}, 0x01},
+ {0xe0, {0x4c}, 0x01},
+ {0xe1, {0x03}, 0x01},
+ {0xe2, {0x5e}, 0x01},
+ {0xe3, {0x03}, 0x01},
+ {0xe4, {0x71}, 0x01},
+ {0xe5, {0x03}, 0x01},
+ {0xe6, {0x86}, 0x01},
+ {0xe7, {0x03}, 0x01},
+ {0xe8, {0x94}, 0x01},
+ {0xe9, {0x03}, 0x01},
+ {0xea, {0xfa}, 0x01},
/* Select CMD2 Page0 (Undocumented) */
- {0xff, 0x01},
+ {0xff, {0x01}, 0x01},
/* Reload CMD1: Don't reload default value to register */
- {0xfb, 0x01},
+ {0xfb, {0x01}, 0x01},
/* Select CMD2 Page1 (Undocumented) */
- {0xff, 0x02},
+ {0xff, {0x02}, 0x01},
/* Reload CMD1: Don't reload default value to register */
- {0xfb, 0x01},
+ {0xfb, {0x01}, 0x01},
/* Select CMD2 Page3 (Undocumented) */
- {0xff, 0x04},
+ {0xff, {0x04}, 0x01},
/* Reload CMD1: Don't reload default value to register */
- {0xfb, 0x01},
+ {0xfb, {0x01}, 0x01},
/* Select CMD1 */
- {0xff, 0x00},
- {0xd3, 0x22}, /* RGBMIPICTRL: VSYNC back porch = 34 */
- {0xd4, 0x04}, /* RGBMIPICTRL: VSYNC front porch = 4 */
+ {0xff, {0x00}, 0x01},
+ {0xd3, {0x22}, 0x01}, /* RGBMIPICTRL: VSYNC back porch = 34 */
+ {0xd4, {0x04}, 0x01}, /* RGBMIPICTRL: VSYNC front porch = 4 */
+};
+
+struct khadas_ts050_panel_data ts050_panel_data = {
+ .init_code = (struct khadas_ts050_panel_cmd *)ts050_init_code,
+ .len = ARRAY_SIZE(ts050_init_code)
+};
+
+struct khadas_ts050_panel_data ts050v2_panel_data = {
+ .init_code = (struct khadas_ts050_panel_cmd *)ts050v2_init_code,
+ .len = ARRAY_SIZE(ts050v2_init_code)
};
static inline
@@ -613,10 +671,11 @@ static int khadas_ts050_panel_prepare(struct drm_panel *panel)
msleep(100);
- for (i = 0; i < ARRAY_SIZE(init_code); i++) {
+ for (i = 0; i < khadas_ts050->panel_data->len; i++) {
err = mipi_dsi_dcs_write(khadas_ts050->link,
- init_code[i].cmd,
- &init_code[i].data, 1);
+ khadas_ts050->panel_data->init_code[i].cmd,
+ &khadas_ts050->panel_data->init_code[i].data,
+ khadas_ts050->panel_data->init_code[i].size);
if (err < 0) {
dev_err(panel->dev, "failed write cmds: %d\n", err);
goto poweroff;
@@ -762,7 +821,8 @@ static const struct drm_panel_funcs khadas_ts050_panel_funcs = {
};
static const struct of_device_id khadas_ts050_of_match[] = {
- { .compatible = "khadas,ts050", },
+ { .compatible = "khadas,ts050", .data = &ts050_panel_data, },
+ { .compatible = "khadas,ts050v2", .data = &ts050v2_panel_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, khadas_ts050_of_match);
@@ -806,6 +866,13 @@ static int khadas_ts050_panel_probe(struct mipi_dsi_device *dsi)
struct khadas_ts050_panel *khadas_ts050;
int err;
+ const void *data = of_device_get_match_data(&dsi->dev);
+
+ if (!data) {
+ dev_err(&dsi->dev, "No matching data\n");
+ return -ENODEV;
+ }
+
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
@@ -816,6 +883,7 @@ static int khadas_ts050_panel_probe(struct mipi_dsi_device *dsi)
if (!khadas_ts050)
return -ENOMEM;
+ khadas_ts050->panel_data = (struct khadas_ts050_panel_data *)data;
mipi_dsi_set_drvdata(dsi, khadas_ts050);
khadas_ts050->link = dsi;
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
index 9d87cc1a357e..1a26205701b5 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
@@ -295,8 +295,6 @@ static int ltk050h3148w_init_sequence(struct ltk050h3146w *ctx)
mipi_dsi_dcs_write_seq(dsi, 0xbd, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0xc6, 0xef);
mipi_dsi_dcs_write_seq(dsi, 0xd4, 0x02);
- mipi_dsi_dcs_write_seq(dsi, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x29);
ret = mipi_dsi_dcs_set_tear_on(dsi, 1);
if (ret < 0) {
@@ -326,7 +324,8 @@ static const struct drm_display_mode ltk050h3148w_mode = {
static const struct ltk050h3146w_desc ltk050h3148w_data = {
.mode = &ltk050h3148w_mode,
.init = ltk050h3148w_init_sequence,
- .mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_VIDEO_BURST,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_VIDEO_BURST,
};
static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx)
diff --git a/drivers/gpu/drm/panel/panel-lg-sw43408.c b/drivers/gpu/drm/panel/panel-lg-sw43408.c
new file mode 100644
index 000000000000..115f4702d59f
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-lg-sw43408.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019-2024 Linaro Ltd
+ * Author: Sumit Semwal <sumit.semwal@linaro.org>
+ * Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/display/drm_dsc.h>
+#include <drm/display/drm_dsc_helper.h>
+
+#define NUM_SUPPLIES 2
+
+struct sw43408_panel {
+ struct drm_panel base;
+ struct mipi_dsi_device *link;
+
+ struct regulator_bulk_data supplies[NUM_SUPPLIES];
+
+ struct gpio_desc *reset_gpio;
+
+ struct drm_dsc_config dsc;
+};
+
+static inline struct sw43408_panel *to_panel_info(struct drm_panel *panel)
+{
+ return container_of(panel, struct sw43408_panel, base);
+}
+
+static int sw43408_unprepare(struct drm_panel *panel)
+{
+ struct sw43408_panel *ctx = to_panel_info(panel);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(ctx->link);
+ if (ret < 0)
+ dev_err(panel->dev, "set_display_off cmd failed ret = %d\n", ret);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(ctx->link);
+ if (ret < 0)
+ dev_err(panel->dev, "enter_sleep cmd failed ret = %d\n", ret);
+
+ msleep(100);
+
+ gpiod_set_value(ctx->reset_gpio, 1);
+
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static int sw43408_program(struct drm_panel *panel)
+{
+ struct sw43408_panel *ctx = to_panel_info(panel);
+ struct drm_dsc_picture_parameter_set pps;
+
+ mipi_dsi_dcs_write_seq(ctx->link, MIPI_DCS_SET_GAMMA_CURVE, 0x02);
+
+ mipi_dsi_dcs_set_tear_on(ctx->link, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+
+ mipi_dsi_dcs_write_seq(ctx->link, 0x53, 0x0c, 0x30);
+ mipi_dsi_dcs_write_seq(ctx->link, 0x55, 0x00, 0x70, 0xdf, 0x00, 0x70, 0xdf);
+ mipi_dsi_dcs_write_seq(ctx->link, 0xf7, 0x01, 0x49, 0x0c);
+
+ mipi_dsi_dcs_exit_sleep_mode(ctx->link);
+
+ msleep(135);
+
+ /* COMPRESSION_MODE moved after setting the PPS */
+
+ mipi_dsi_dcs_write_seq(ctx->link, 0xb0, 0xac);
+ mipi_dsi_dcs_write_seq(ctx->link, 0xe5,
+ 0x00, 0x3a, 0x00, 0x3a, 0x00, 0x0e, 0x10);
+ mipi_dsi_dcs_write_seq(ctx->link, 0xb5,
+ 0x75, 0x60, 0x2d, 0x5d, 0x80, 0x00, 0x0a, 0x0b,
+ 0x00, 0x05, 0x0b, 0x00, 0x80, 0x0d, 0x0e, 0x40,
+ 0x00, 0x0c, 0x00, 0x16, 0x00, 0xb8, 0x00, 0x80,
+ 0x0d, 0x0e, 0x40, 0x00, 0x0c, 0x00, 0x16, 0x00,
+ 0xb8, 0x00, 0x81, 0x00, 0x03, 0x03, 0x03, 0x01,
+ 0x01);
+ msleep(85);
+ mipi_dsi_dcs_write_seq(ctx->link, 0xcd,
+ 0x00, 0x00, 0x00, 0x19, 0x19, 0x19, 0x19, 0x19,
+ 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19,
+ 0x16, 0x16);
+ mipi_dsi_dcs_write_seq(ctx->link, 0xcb, 0x80, 0x5c, 0x07, 0x03, 0x28);
+ mipi_dsi_dcs_write_seq(ctx->link, 0xc0, 0x02, 0x02, 0x0f);
+ mipi_dsi_dcs_write_seq(ctx->link, 0x55, 0x04, 0x61, 0xdb, 0x04, 0x70, 0xdb);
+ mipi_dsi_dcs_write_seq(ctx->link, 0xb0, 0xca);
+
+ mipi_dsi_dcs_set_display_on(ctx->link);
+
+ msleep(50);
+
+ ctx->link->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ drm_dsc_pps_payload_pack(&pps, ctx->link->dsc);
+ mipi_dsi_picture_parameter_set(ctx->link, &pps);
+
+ ctx->link->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ /*
+ * This panel uses PPS selectors with offset:
+ * PPS 1 if pps_identifier is 0
+ * PPS 2 if pps_identifier is 1
+ */
+ mipi_dsi_compression_mode_ext(ctx->link, true,
+ MIPI_DSI_COMPRESSION_DSC, 1);
+
+ return 0;
+}
+
+static int sw43408_prepare(struct drm_panel *panel)
+{
+ struct sw43408_panel *ctx = to_panel_info(panel);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(5000, 6000);
+
+ gpiod_set_value(ctx->reset_gpio, 0);
+ usleep_range(9000, 10000);
+ gpiod_set_value(ctx->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value(ctx->reset_gpio, 0);
+ usleep_range(9000, 10000);
+
+ ret = sw43408_program(panel);
+ if (ret)
+ goto poweroff;
+
+ return 0;
+
+poweroff:
+ gpiod_set_value(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ return ret;
+}
+
+static const struct drm_display_mode sw43408_mode = {
+ .clock = (1080 + 20 + 32 + 20) * (2160 + 20 + 4 + 20) * 60 / 1000,
+
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 20,
+ .hsync_end = 1080 + 20 + 32,
+ .htotal = 1080 + 20 + 32 + 20,
+
+ .vdisplay = 2160,
+ .vsync_start = 2160 + 20,
+ .vsync_end = 2160 + 20 + 4,
+ .vtotal = 2160 + 20 + 4 + 20,
+
+ .width_mm = 62,
+ .height_mm = 124,
+
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static int sw43408_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &sw43408_mode);
+}
+
+static int sw43408_backlight_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+
+ return mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
+}
+
+const struct backlight_ops sw43408_backlight_ops = {
+ .update_status = sw43408_backlight_update_status,
+};
+
+static int sw43408_backlight_init(struct sw43408_panel *ctx)
+{
+ struct device *dev = &ctx->link->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_PLATFORM,
+ .brightness = 255,
+ .max_brightness = 255,
+ };
+
+ ctx->base.backlight = devm_backlight_device_register(dev, dev_name(dev), dev,
+ ctx->link,
+ &sw43408_backlight_ops,
+ &props);
+
+ if (IS_ERR(ctx->base.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->base.backlight),
+ "Failed to create backlight\n");
+
+ return 0;
+}
+
+static const struct drm_panel_funcs sw43408_funcs = {
+ .unprepare = sw43408_unprepare,
+ .prepare = sw43408_prepare,
+ .get_modes = sw43408_get_modes,
+};
+
+static const struct of_device_id sw43408_of_match[] = {
+ { .compatible = "lg,sw43408", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sw43408_of_match);
+
+static int sw43408_add(struct sw43408_panel *ctx)
+{
+ struct device *dev = &ctx->link->dev;
+ int ret;
+
+ ctx->supplies[0].supply = "vddi"; /* 1.88 V */
+ ctx->supplies[0].init_load_uA = 62000;
+ ctx->supplies[1].supply = "vpnl"; /* 3.0 V */
+ ctx->supplies[1].init_load_uA = 857000;
+
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ ret = PTR_ERR(ctx->reset_gpio);
+ return dev_err_probe(dev, ret, "cannot get reset gpio\n");
+ }
+
+ ret = sw43408_backlight_init(ctx);
+ if (ret < 0)
+ return ret;
+
+ ctx->base.prepare_prev_first = true;
+
+ drm_panel_init(&ctx->base, dev, &sw43408_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ drm_panel_add(&ctx->base);
+ return ret;
+}
+
+static int sw43408_probe(struct mipi_dsi_device *dsi)
+{
+ struct sw43408_panel *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ dsi->mode_flags = MIPI_DSI_MODE_LPM;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = 4;
+
+ ctx->link = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ret = sw43408_add(ctx);
+ if (ret < 0)
+ return ret;
+
+ /* The panel works only in the DSC mode. Set DSC params. */
+ ctx->dsc.dsc_version_major = 0x1;
+ ctx->dsc.dsc_version_minor = 0x1;
+
+ /* slice_count * slice_width == width */
+ ctx->dsc.slice_height = 16;
+ ctx->dsc.slice_width = 540;
+ ctx->dsc.slice_count = 2;
+ ctx->dsc.bits_per_component = 8;
+ ctx->dsc.bits_per_pixel = 8 << 4;
+ ctx->dsc.block_pred_enable = true;
+
+ dsi->dsc = &ctx->dsc;
+
+ return mipi_dsi_attach(dsi);
+}
+
+static void sw43408_remove(struct mipi_dsi_device *dsi)
+{
+ struct sw43408_panel *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = sw43408_unprepare(&ctx->base);
+ if (ret < 0)
+ dev_err(&dsi->dev, "failed to unprepare panel: %d\n", ret);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->base);
+}
+
+static struct mipi_dsi_driver sw43408_driver = {
+ .driver = {
+ .name = "panel-lg-sw43408",
+ .of_match_table = sw43408_of_match,
+ },
+ .probe = sw43408_probe,
+ .remove = sw43408_remove,
+};
+module_mipi_dsi_driver(sw43408_driver);
+
+MODULE_AUTHOR("Sumit Semwal <sumit.semwal@linaro.org>");
+MODULE_DESCRIPTION("LG SW436408 MIPI-DSI LED panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
index 648ce9201426..028fdac293f7 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
@@ -556,10 +556,8 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
}
dsi_r_host = of_find_mipi_dsi_host_by_node(dsi_r);
of_node_put(dsi_r);
- if (!dsi_r_host) {
- dev_err(dev, "Cannot get secondary DSI host\n");
- return -EPROBE_DEFER;
- }
+ if (!dsi_r_host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "Cannot get secondary DSI host\n");
nt->dsi[1] = mipi_dsi_device_register_full(dsi_r_host, info);
if (!nt->dsi[1]) {
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
index 33fb3d715e54..3886372415c2 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
@@ -605,21 +605,16 @@ static int nt36672a_panel_add(struct nt36672a_panel *pinfo)
struct device *dev = &pinfo->link->dev;
int i, ret;
- for (i = 0; i < ARRAY_SIZE(pinfo->supplies); i++)
+ for (i = 0; i < ARRAY_SIZE(pinfo->supplies); i++) {
pinfo->supplies[i].supply = nt36672a_regulator_names[i];
+ pinfo->supplies[i].init_load_uA = nt36672a_regulator_enable_loads[i];
+ }
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(pinfo->supplies),
pinfo->supplies);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to get regulators\n");
- for (i = 0; i < ARRAY_SIZE(pinfo->supplies); i++) {
- ret = regulator_set_load(pinfo->supplies[i].consumer,
- nt36672a_regulator_enable_loads[i]);
- if (ret)
- return dev_err_probe(dev, ret, "failed to set regulator enable loads\n");
- }
-
pinfo->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(pinfo->reset_gpio))
return dev_err_probe(dev, PTR_ERR(pinfo->reset_gpio),
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
index cb7406d74466..20b7bfe4aa12 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
@@ -25,12 +25,6 @@ static const unsigned long regulator_enable_loads[] = {
100000,
};
-static const unsigned long regulator_disable_loads[] = {
- 80,
- 100,
- 100,
-};
-
struct panel_desc {
const struct drm_display_mode *display_mode;
u32 width_mm;
@@ -349,17 +343,7 @@ static int nt36672e_1080x2408_60hz_init(struct mipi_dsi_device *dsi)
static int nt36672e_power_on(struct nt36672e_panel *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- int ret, i;
-
- for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++) {
- ret = regulator_set_load(ctx->supplies[i].consumer,
- regulator_enable_loads[i]);
- if (ret) {
- dev_err(&dsi->dev, "regulator set load failed for supply %s: %d\n",
- ctx->supplies[i].supply, ret);
- return ret;
- }
- }
+ int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret < 0) {
@@ -385,20 +369,9 @@ static int nt36672e_power_off(struct nt36672e_panel *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
int ret = 0;
- int i;
gpiod_set_value(ctx->reset_gpio, 0);
- for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++) {
- ret = regulator_set_load(ctx->supplies[i].consumer,
- regulator_disable_loads[i]);
- if (ret) {
- dev_err(&dsi->dev, "regulator set load failed for supply %s: %d\n",
- ctx->supplies[i].supply, ret);
- return ret;
- }
- }
-
ret = regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret)
dev_err(&dsi->dev, "regulator bulk disable failed: %d\n", ret);
@@ -567,8 +540,10 @@ static int nt36672e_panel_probe(struct mipi_dsi_device *dsi)
return -ENODEV;
}
- for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
+ for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++) {
ctx->supplies[i].supply = regulator_names[i];
+ ctx->supplies[i].init_load_uA = regulator_enable_loads[i];
+ }
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
ctx->supplies);
@@ -614,8 +589,6 @@ static void nt36672e_panel_remove(struct mipi_dsi_device *dsi)
struct nt36672e_panel *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(ctx->dsi);
- mipi_dsi_device_unregister(ctx->dsi);
-
drm_panel_remove(&ctx->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm69380.c b/drivers/gpu/drm/panel/panel-raydium-rm69380.c
new file mode 100644
index 000000000000..4dca6802faef
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-raydium-rm69380.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree.
+ * Copyright (c) 2024 David Wronek <david@mainlining.org>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+struct rm69380_panel {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi[2];
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *reset_gpio;
+};
+
+static inline
+struct rm69380_panel *to_rm69380_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct rm69380_panel, panel);
+}
+
+static void rm69380_reset(struct rm69380_panel *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(15000, 16000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(30);
+}
+
+static int rm69380_on(struct rm69380_panel *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi[0];
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+ if (ctx->dsi[1])
+ ctx->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_dcs_write_seq(dsi, 0xfe, 0xd4);
+ mipi_dsi_dcs_write_seq(dsi, 0x00, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, 0xfe, 0xd0);
+ mipi_dsi_dcs_write_seq(dsi, 0x48, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x26);
+ mipi_dsi_dcs_write_seq(dsi, 0x75, 0x3f);
+ mipi_dsi_dcs_write_seq(dsi, 0x1d, 0x1a);
+ mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x28);
+ mipi_dsi_dcs_write_seq(dsi, 0xc2, 0x08);
+
+ ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set tear on: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display on: %d\n", ret);
+ return ret;
+ }
+ msleep(36);
+
+ return 0;
+}
+
+static int rm69380_off(struct rm69380_panel *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi[0];
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+ if (ctx->dsi[1])
+ ctx->dsi[1]->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display off: %d\n", ret);
+ return ret;
+ }
+ msleep(35);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ return 0;
+}
+
+static int rm69380_prepare(struct drm_panel *panel)
+{
+ struct rm69380_panel *ctx = to_rm69380_panel(panel);
+ struct device *dev = &ctx->dsi[0]->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ rm69380_reset(ctx);
+
+ ret = rm69380_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rm69380_unprepare(struct drm_panel *panel)
+{
+ struct rm69380_panel *ctx = to_rm69380_panel(panel);
+ struct device *dev = &ctx->dsi[0]->dev;
+ int ret;
+
+ ret = rm69380_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode rm69380_mode = {
+ .clock = (2560 + 32 + 12 + 38) * (1600 + 20 + 4 + 8) * 90 / 1000,
+ .hdisplay = 2560,
+ .hsync_start = 2560 + 32,
+ .hsync_end = 2560 + 32 + 12,
+ .htotal = 2560 + 32 + 12 + 38,
+ .vdisplay = 1600,
+ .vsync_start = 1600 + 20,
+ .vsync_end = 1600 + 20 + 4,
+ .vtotal = 1600 + 20 + 4 + 8,
+ .width_mm = 248,
+ .height_mm = 155,
+ .type = DRM_MODE_TYPE_DRIVER,
+};
+
+static int rm69380_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &rm69380_mode);
+}
+
+static const struct drm_panel_funcs rm69380_panel_funcs = {
+ .prepare = rm69380_prepare,
+ .unprepare = rm69380_unprepare,
+ .get_modes = rm69380_get_modes,
+};
+
+static int rm69380_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static int rm69380_bl_get_brightness(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_get_display_brightness_large(dsi, &brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return brightness;
+}
+
+static const struct backlight_ops rm69380_bl_ops = {
+ .update_status = rm69380_bl_update_status,
+ .get_brightness = rm69380_bl_get_brightness,
+};
+
+static struct backlight_device *
+rm69380_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 511,
+ .max_brightness = 2047,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &rm69380_bl_ops, &props);
+}
+
+static int rm69380_probe(struct mipi_dsi_device *dsi)
+{
+ struct mipi_dsi_host *dsi_sec_host;
+ struct rm69380_panel *ctx;
+ struct device *dev = &dsi->dev;
+ struct device_node *dsi_sec;
+ int ret, i;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->supplies[0].supply = "vddio";
+ ctx->supplies[1].supply = "avdd";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get regulators\n");
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ dsi_sec = of_graph_get_remote_node(dsi->dev.of_node, 1, -1);
+
+ if (dsi_sec) {
+ const struct mipi_dsi_device_info info = { "RM69380 DSI1", 0,
+ dsi_sec };
+
+ dsi_sec_host = of_find_mipi_dsi_host_by_node(dsi_sec);
+ of_node_put(dsi_sec);
+ if (!dsi_sec_host)
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "Cannot get secondary DSI host\n");
+
+ ctx->dsi[1] =
+ devm_mipi_dsi_device_register_full(dev, dsi_sec_host, &info);
+ if (IS_ERR(ctx->dsi[1]))
+ return dev_err_probe(dev, PTR_ERR(ctx->dsi[1]),
+ "Cannot get secondary DSI node\n");
+
+ mipi_dsi_set_drvdata(ctx->dsi[1], ctx);
+ }
+
+ ctx->dsi[0] = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ drm_panel_init(&ctx->panel, dev, &rm69380_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.prepare_prev_first = true;
+
+ ctx->panel.backlight = rm69380_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ for (i = 0; i < ARRAY_SIZE(ctx->dsi); i++) {
+ if (!ctx->dsi[i])
+ continue;
+
+ dev_dbg(&ctx->dsi[i]->dev, "Binding DSI %d\n", i);
+
+ ctx->dsi[i]->lanes = 4;
+ ctx->dsi[i]->format = MIPI_DSI_FMT_RGB888;
+ ctx->dsi[i]->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ ret = devm_mipi_dsi_attach(dev, ctx->dsi[i]);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret,
+ "Failed to attach to DSI%d\n", i);
+ }
+ }
+
+ return 0;
+}
+
+static void rm69380_remove(struct mipi_dsi_device *dsi)
+{
+ struct rm69380_panel *ctx = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id rm69380_of_match[] = {
+ { .compatible = "lenovo,j716f-edo-rm69380" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rm69380_of_match);
+
+static struct mipi_dsi_driver rm69380_panel_driver = {
+ .probe = rm69380_probe,
+ .remove = rm69380_remove,
+ .driver = {
+ .name = "panel-raydium-rm69380",
+ .of_match_table = rm69380_of_match,
+ },
+};
+module_mipi_dsi_driver(rm69380_panel_driver);
+
+MODULE_AUTHOR("David Wronek <david@mainlining.org");
+MODULE_DESCRIPTION("DRM driver for Raydium RM69380-equipped DSI panels");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
index 76c2a8f6718c..a9f0d214a900 100644
--- a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
+++ b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
@@ -36,7 +36,7 @@ struct atana33xc20_panel {
struct gpio_desc *el_on3_gpio;
struct drm_dp_aux *aux;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
ktime_t powered_off_time;
ktime_t powered_on_time;
@@ -109,19 +109,17 @@ static int atana33xc20_resume(struct device *dev)
if (hpd_asserted < 0)
ret = hpd_asserted;
- if (ret)
+ if (ret) {
dev_warn(dev, "Error waiting for HPD GPIO: %d\n", ret);
-
- return ret;
- }
-
- if (p->aux->wait_hpd_asserted) {
+ goto error;
+ }
+ } else if (p->aux->wait_hpd_asserted) {
ret = p->aux->wait_hpd_asserted(p->aux, HPD_MAX_US);
- if (ret)
+ if (ret) {
dev_warn(dev, "Controller error waiting for HPD: %d\n", ret);
-
- return ret;
+ goto error;
+ }
}
/*
@@ -133,6 +131,12 @@ static int atana33xc20_resume(struct device *dev)
* right times.
*/
return 0;
+
+error:
+ drm_dp_dpcd_set_powered(p->aux, false);
+ regulator_disable(p->supply);
+
+ return ret;
}
static int atana33xc20_disable(struct drm_panel *panel)
@@ -249,9 +253,12 @@ static int atana33xc20_get_modes(struct drm_panel *panel,
pm_runtime_get_sync(panel->dev);
- if (!p->edid)
- p->edid = drm_get_edid(connector, &aux_ep->aux->ddc);
- num = drm_add_edid_modes(connector, p->edid);
+ if (!p->drm_edid)
+ p->drm_edid = drm_edid_read_ddc(connector, &aux_ep->aux->ddc);
+
+ drm_edid_connector_update(connector, p->drm_edid);
+
+ num = drm_edid_connector_add_modes(connector);
pm_runtime_mark_last_busy(panel->dev);
pm_runtime_put_autosuspend(panel->dev);
@@ -324,9 +331,14 @@ static int atana33xc20_probe(struct dp_aux_ep_device *aux_ep)
ret = drm_panel_dp_aux_backlight(&panel->base, aux_ep->aux);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
+
+ /*
+ * Warn if we get an error, but don't consider it fatal. Having
+ * a panel where we can't control the backlight is better than
+ * no panel.
+ */
if (ret)
- return dev_err_probe(dev, ret,
- "failed to register dp aux backlight\n");
+ dev_warn(dev, "failed to register dp aux backlight: %d\n", ret);
drm_panel_add(&panel->base);
@@ -342,7 +354,7 @@ static void atana33xc20_remove(struct dp_aux_ep_device *aux_ep)
drm_panel_disable(&panel->base);
drm_panel_unprepare(&panel->base);
- kfree(panel->edid);
+ drm_edid_free(panel->drm_edid);
}
static void atana33xc20_shutdown(struct dp_aux_ep_device *aux_ep)
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c b/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c
new file mode 100644
index 000000000000..10bc8fb5f1f9
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for the Samsung S6E3FA7 panel.
+ *
+ * Copyright (c) 2022-2024, The Linux Foundation. All rights reserved.
+ * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct s6e3fa7_panel {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct gpio_desc *reset_gpio;
+};
+
+static inline struct s6e3fa7_panel *to_s6e3fa7_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct s6e3fa7_panel, panel);
+}
+
+static void s6e3fa7_panel_reset(struct s6e3fa7_panel *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+}
+
+static int s6e3fa7_panel_on(struct s6e3fa7_panel *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set tear on: %d\n", ret);
+ return ret;
+ }
+
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq(dsi, 0xf4,
+ 0xbb, 0x23, 0x19, 0x3a, 0x9f, 0x0f, 0x09, 0xc0,
+ 0x00, 0xb4, 0x37, 0x70, 0x79, 0x69);
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display on: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int s6e3fa7_panel_prepare(struct drm_panel *panel)
+{
+ struct s6e3fa7_panel *ctx = to_s6e3fa7_panel(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ s6e3fa7_panel_reset(ctx);
+
+ ret = s6e3fa7_panel_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int s6e3fa7_panel_unprepare(struct drm_panel *panel)
+{
+ struct s6e3fa7_panel *ctx = to_s6e3fa7_panel(panel);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+
+ return 0;
+}
+
+static int s6e3fa7_panel_disable(struct drm_panel *panel)
+{
+ struct s6e3fa7_panel *ctx = to_s6e3fa7_panel(panel);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display off: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ return 0;
+}
+
+static const struct drm_display_mode s6e3fa7_panel_mode = {
+ .clock = (1080 + 32 + 32 + 78) * (2220 + 32 + 4 + 78) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 32,
+ .hsync_end = 1080 + 32 + 32,
+ .htotal = 1080 + 32 + 32 + 78,
+ .vdisplay = 2220,
+ .vsync_start = 2220 + 32,
+ .vsync_end = 2220 + 32 + 4,
+ .vtotal = 2220 + 32 + 4 + 78,
+ .width_mm = 62,
+ .height_mm = 127,
+};
+
+static int s6e3fa7_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &s6e3fa7_panel_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs s6e3fa7_panel_funcs = {
+ .prepare = s6e3fa7_panel_prepare,
+ .unprepare = s6e3fa7_panel_unprepare,
+ .disable = s6e3fa7_panel_disable,
+ .get_modes = s6e3fa7_panel_get_modes,
+};
+
+static int s6e3fa7_panel_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int s6e3fa7_panel_bl_get_brightness(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness;
+ int ret;
+
+ ret = mipi_dsi_dcs_get_display_brightness_large(dsi, &brightness);
+ if (ret < 0)
+ return ret;
+
+ return brightness;
+}
+
+static const struct backlight_ops s6e3fa7_panel_bl_ops = {
+ .update_status = s6e3fa7_panel_bl_update_status,
+ .get_brightness = s6e3fa7_panel_bl_get_brightness,
+};
+
+static struct backlight_device *
+s6e3fa7_panel_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 1023,
+ .max_brightness = 1023,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &s6e3fa7_panel_bl_ops, &props);
+}
+
+static int s6e3fa7_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct s6e3fa7_panel *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
+
+ drm_panel_init(&ctx->panel, dev, &s6e3fa7_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.prepare_prev_first = true;
+
+ ctx->panel.backlight = s6e3fa7_panel_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void s6e3fa7_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct s6e3fa7_panel *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id s6e3fa7_panel_of_match[] = {
+ { .compatible = "samsung,s6e3fa7-ams559nk06" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, s6e3fa7_panel_of_match);
+
+static struct mipi_dsi_driver s6e3fa7_panel_driver = {
+ .probe = s6e3fa7_panel_probe,
+ .remove = s6e3fa7_panel_remove,
+ .driver = {
+ .name = "panel-samsung-s6e3fa7",
+ .of_match_table = s6e3fa7_panel_of_match,
+ },
+};
+module_mipi_dsi_driver(s6e3fa7_panel_driver);
+
+MODULE_AUTHOR("Richard Acayan <mailingradian@gmail.com>");
+MODULE_DESCRIPTION("DRM driver for Samsung S6E3FA7 command mode DSI panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 20e3df1c59d4..dcb6d0b6ced0 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -151,7 +151,7 @@ struct panel_simple {
struct gpio_desc *enable_gpio;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
struct drm_display_mode override_mode;
@@ -309,8 +309,8 @@ static int panel_simple_suspend(struct device *dev)
regulator_disable(p->supply);
p->unprepared_time = ktime_get_boottime();
- kfree(p->edid);
- p->edid = NULL;
+ drm_edid_free(p->drm_edid);
+ p->drm_edid = NULL;
return 0;
}
@@ -399,11 +399,12 @@ static int panel_simple_get_modes(struct drm_panel *panel,
if (p->ddc) {
pm_runtime_get_sync(panel->dev);
- if (!p->edid)
- p->edid = drm_get_edid(connector, p->ddc);
+ if (!p->drm_edid)
+ p->drm_edid = drm_edid_read_ddc(connector, p->ddc);
- if (p->edid)
- num += drm_add_edid_modes(connector, p->edid);
+ drm_edid_connector_update(connector, p->drm_edid);
+
+ num += drm_edid_connector_add_modes(connector);
pm_runtime_mark_last_busy(panel->dev);
pm_runtime_put_autosuspend(panel->dev);
@@ -1457,6 +1458,32 @@ static const struct panel_desc boe_hv070wsa = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct display_timing cct_cmt430b19n00_timing = {
+ .pixelclock = { 8000000, 9000000, 12000000 },
+ .hactive = { 480, 480, 480 },
+ .hfront_porch = { 2, 8, 75 },
+ .hback_porch = { 3, 43, 43 },
+ .hsync_len = { 2, 4, 75 },
+ .vactive = { 272, 272, 272 },
+ .vfront_porch = { 2, 8, 37 },
+ .vback_porch = { 2, 12, 12 },
+ .vsync_len = { 2, 4, 37 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW
+};
+
+static const struct panel_desc cct_cmt430b19n00 = {
+ .timings = &cct_cmt430b19n00_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 95,
+ .height = 53,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
static const struct drm_display_mode cdtech_s043wq26h_ct7_mode = {
.clock = 9000,
.hdisplay = 480,
@@ -2565,22 +2592,22 @@ static const struct panel_desc innolux_g121i1_l01 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct drm_display_mode innolux_g121x1_l03_mode = {
- .clock = 65000,
- .hdisplay = 1024,
- .hsync_start = 1024 + 0,
- .hsync_end = 1024 + 1,
- .htotal = 1024 + 0 + 1 + 320,
- .vdisplay = 768,
- .vsync_start = 768 + 38,
- .vsync_end = 768 + 38 + 1,
- .vtotal = 768 + 38 + 1 + 0,
- .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+static const struct display_timing innolux_g121x1_l03_timings = {
+ .pixelclock = { 57500000, 64900000, 74400000 },
+ .hactive = { 1024, 1024, 1024 },
+ .hfront_porch = { 90, 140, 190 },
+ .hback_porch = { 90, 140, 190 },
+ .hsync_len = { 36, 40, 60 },
+ .vactive = { 768, 768, 768 },
+ .vfront_porch = { 2, 15, 30 },
+ .vback_porch = { 2, 15, 30 },
+ .vsync_len = { 2, 8, 20 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
};
static const struct panel_desc innolux_g121x1_l03 = {
- .modes = &innolux_g121x1_l03_mode,
- .num_modes = 1,
+ .timings = &innolux_g121x1_l03_timings,
+ .num_timings = 1,
.bpc = 6,
.size = {
.width = 246,
@@ -2591,6 +2618,27 @@ static const struct panel_desc innolux_g121x1_l03 = {
.unprepare = 200,
.disable = 400,
},
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
+static const struct panel_desc innolux_g121xce_l01 = {
+ .timings = &innolux_g121x1_l03_timings,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 246,
+ .height = 185,
+ },
+ .delay = {
+ .enable = 200,
+ .unprepare = 200,
+ .disable = 400,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing innolux_g156hce_l01_timings = {
@@ -3465,6 +3513,32 @@ static const struct panel_desc pda_91_00156_a0 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct drm_display_mode powertip_ph128800t006_zhc01_mode = {
+ .clock = 66500,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 12,
+ .hsync_end = 1280 + 12 + 20,
+ .htotal = 1280 + 12 + 20 + 56,
+ .vdisplay = 800,
+ .vsync_start = 800 + 1,
+ .vsync_end = 800 + 1 + 3,
+ .vtotal = 800 + 1 + 3 + 20,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc powertip_ph128800t006_zhc01 = {
+ .modes = &powertip_ph128800t006_zhc01_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 216,
+ .height = 135,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode powertip_ph800480t013_idf02_mode = {
.clock = 24750,
.hdisplay = 800,
@@ -4403,6 +4477,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "boe,hv070wsa-100",
.data = &boe_hv070wsa
}, {
+ .compatible = "cct,cmt430b19n00",
+ .data = &cct_cmt430b19n00,
+ }, {
.compatible = "cdtech,s043wq26h-ct7",
.data = &cdtech_s043wq26h_ct7,
}, {
@@ -4538,6 +4615,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,g121x1-l03",
.data = &innolux_g121x1_l03,
}, {
+ .compatible = "innolux,g121xce-l01",
+ .data = &innolux_g121xce_l01,
+ }, {
.compatible = "innolux,g156hce-l01",
.data = &innolux_g156hce_l01,
}, {
@@ -4640,6 +4720,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "pda,91-00156-a0",
.data = &pda_91_00156_a0,
}, {
+ .compatible = "powertip,ph128800t006-zhc01",
+ .data = &powertip_ph128800t006_zhc01,
+ }, {
.compatible = "powertip,ph800480t013-idf02",
.data = &powertip_ph800480t013_idf02,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
index a3e142f156d5..7d8302cca091 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -612,6 +612,92 @@ static const struct st7703_panel_desc rgb10max3_panel_desc = {
.init_sequence = rgb10max3_panel_init_sequence,
};
+static int gameforcechi_init_sequence(struct st7703 *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ /*
+ * Init sequence was supplied by the panel vendor. Panel will not
+ * respond to commands until it is brought out of sleep mode first.
+ */
+
+ mipi_dsi_dcs_exit_sleep_mode(dsi);
+ msleep(250);
+
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETEXTC, 0xf1, 0x12, 0x83);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETMIPI, 0x31, 0x81, 0x05, 0xf9,
+ 0x0e, 0x0e, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x44, 0x25, 0x00, 0x91, 0x0a, 0x00,
+ 0x00, 0x02, 0x4f, 0xd1, 0x00, 0x00, 0x37);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPOWER_EXT, 0x25);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_UNKNOWN_BF, 0x02, 0x11, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETRGBIF, 0x0c, 0x10, 0x0a,
+ 0x50, 0x03, 0xff, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETSCR, 0x73, 0x73, 0x50, 0x50,
+ 0x00, 0x00, 0x08, 0x70, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETVDC, 0x46);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPANEL, 0x0b);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETCYC, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETDISP, 0x00, 0x13, 0xf0);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETEQ, 0x07, 0x07, 0x0b, 0x0b,
+ 0x03, 0x0b, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00,
+ 0xc0, 0x10);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPOWER, 0x53, 0x00, 0x1e,
+ 0x1e, 0x77, 0xe1, 0xcc, 0xdd, 0x67, 0x77, 0x33,
+ 0x33);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETBGP, 0x10, 0x10);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETVCOM, 0x6c, 0x7c);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGIP1, 0x08, 0x00, 0x0e, 0x00,
+ 0x00, 0xb0, 0xb1, 0x11, 0x31, 0x23, 0x28, 0x10,
+ 0xb0, 0xb1, 0x27, 0x08, 0x00, 0x04, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x04, 0x02, 0x00, 0x00, 0x00,
+ 0x88, 0x88, 0xba, 0x60, 0x24, 0x08, 0x88, 0x88,
+ 0x88, 0x88, 0x88, 0x88, 0x88, 0xba, 0x71, 0x35,
+ 0x18, 0x88, 0x88, 0x88, 0x88, 0x88, 0x00, 0x00,
+ 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGIP2, 0x97, 0x0a, 0x82, 0x02,
+ 0x13, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x80, 0x88, 0xba, 0x17, 0x53, 0x88, 0x88, 0x88,
+ 0x88, 0x88, 0x88, 0x81, 0x88, 0xba, 0x06, 0x42,
+ 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x23, 0x10,
+ 0x00, 0x02, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGAMMA, 0x00, 0x07, 0x0b,
+ 0x27, 0x2d, 0x3f, 0x3b, 0x37, 0x05, 0x0a, 0x0b,
+ 0x0f, 0x11, 0x0f, 0x12, 0x12, 0x18, 0x00, 0x07,
+ 0x0b, 0x27, 0x2d, 0x3f, 0x3b, 0x37, 0x05, 0xa0,
+ 0x0b, 0x0f, 0x11, 0x0f, 0x12, 0x12, 0x18);
+
+ return 0;
+}
+
+static const struct drm_display_mode gameforcechi_mode = {
+ .hdisplay = 640,
+ .hsync_start = 640 + 40,
+ .hsync_end = 640 + 40 + 2,
+ .htotal = 640 + 40 + 2 + 80,
+ .vdisplay = 480,
+ .vsync_start = 480 + 17,
+ .vsync_end = 480 + 17 + 5,
+ .vtotal = 480 + 17 + 5 + 13,
+ .clock = 23546,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 71,
+ .height_mm = 53,
+};
+
+static const struct st7703_panel_desc gameforcechi_desc = {
+ .mode = &gameforcechi_mode,
+ .lanes = 2,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_LPM,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init_sequence = gameforcechi_init_sequence,
+};
+
static int st7703_enable(struct drm_panel *panel)
{
struct st7703 *ctx = panel_to_st7703(panel);
@@ -887,6 +973,7 @@ static void st7703_remove(struct mipi_dsi_device *dsi)
static const struct of_device_id st7703_of_match[] = {
{ .compatible = "anbernic,rg353v-panel-v2", .data = &rg353v2_desc },
+ { .compatible = "gameforce,chi-panel", .data = &gameforcechi_desc },
{ .compatible = "powkiddy,rgb10max3-panel", .data = &rgb10max3_panel_desc },
{ .compatible = "powkiddy,rgb30-panel", .data = &rgb30panel_desc },
{ .compatible = "rocktech,jh057n00900", .data = &jh057n00900_panel_desc },
diff --git a/drivers/gpu/drm/panel/panel-truly-nt35597.c b/drivers/gpu/drm/panel/panel-truly-nt35597.c
index b73448cf349d..d447db912a61 100644
--- a/drivers/gpu/drm/panel/panel-truly-nt35597.c
+++ b/drivers/gpu/drm/panel/panel-truly-nt35597.c
@@ -550,10 +550,8 @@ static int truly_nt35597_probe(struct mipi_dsi_device *dsi)
dsi1_host = of_find_mipi_dsi_host_by_node(dsi1);
of_node_put(dsi1);
- if (!dsi1_host) {
- dev_err(dev, "failed to find dsi host\n");
- return -EPROBE_DEFER;
- }
+ if (!dsi1_host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
/* register the second DSI device */
dsi1_device = mipi_dsi_device_register_full(dsi1_host, &info);
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
index 775144695283..272490b9565b 100644
--- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
@@ -197,7 +197,9 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
ctx->dsi = dsi;
ctx->supplies[0].supply = "vdda";
+ ctx->supplies[0].init_load_uA = 32000;
ctx->supplies[1].supply = "vdd3p3";
+ ctx->supplies[1].init_load_uA = 13200;
ret = devm_regulator_bulk_get(ctx->panel.dev, ARRAY_SIZE(ctx->supplies),
ctx->supplies);
@@ -227,22 +229,8 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
goto err_dsi_attach;
}
- ret = regulator_set_load(ctx->supplies[0].consumer, 32000);
- if (ret) {
- dev_err(dev, "regulator set load failed for vdda supply ret = %d\n", ret);
- goto err_set_load;
- }
-
- ret = regulator_set_load(ctx->supplies[1].consumer, 13200);
- if (ret) {
- dev_err(dev, "regulator set load failed for vdd3p3 supply ret = %d\n", ret);
- goto err_set_load;
- }
-
return 0;
-err_set_load:
- mipi_dsi_detach(dsi);
err_dsi_attach:
drm_panel_remove(&ctx->panel);
return ret;
@@ -253,8 +241,6 @@ static void visionox_rm69299_remove(struct mipi_dsi_device *dsi)
struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(ctx->dsi);
- mipi_dsi_device_unregister(ctx->dsi);
-
drm_panel_remove(&ctx->panel);
}
diff --git a/drivers/gpu/drm/panfrost/Makefile b/drivers/gpu/drm/panfrost/Makefile
index 2c01c1e7523e..7da2b3f02ed9 100644
--- a/drivers/gpu/drm/panfrost/Makefile
+++ b/drivers/gpu/drm/panfrost/Makefile
@@ -12,6 +12,4 @@ panfrost-y := \
panfrost_perfcnt.o \
panfrost_dump.o
-panfrost-$(CONFIG_DEBUG_FS) += panfrost_debugfs.o
-
obj-$(CONFIG_DRM_PANFROST) += panfrost.o
diff --git a/drivers/gpu/drm/panfrost/panfrost_debugfs.c b/drivers/gpu/drm/panfrost/panfrost_debugfs.c
deleted file mode 100644
index 72d4286a6bf7..000000000000
--- a/drivers/gpu/drm/panfrost/panfrost_debugfs.c
+++ /dev/null
@@ -1,21 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2023 Collabora ltd. */
-/* Copyright 2023 Amazon.com, Inc. or its affiliates. */
-
-#include <linux/debugfs.h>
-#include <linux/platform_device.h>
-#include <drm/drm_debugfs.h>
-#include <drm/drm_file.h>
-#include <drm/panfrost_drm.h>
-
-#include "panfrost_device.h"
-#include "panfrost_gpu.h"
-#include "panfrost_debugfs.h"
-
-void panfrost_debugfs_init(struct drm_minor *minor)
-{
- struct drm_device *dev = minor->dev;
- struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev->dev));
-
- debugfs_create_atomic_t("profile", 0600, minor->debugfs_root, &pfdev->profile_mode);
-}
diff --git a/drivers/gpu/drm/panfrost/panfrost_debugfs.h b/drivers/gpu/drm/panfrost/panfrost_debugfs.h
deleted file mode 100644
index c5af5f35877f..000000000000
--- a/drivers/gpu/drm/panfrost/panfrost_debugfs.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright 2023 Collabora ltd.
- * Copyright 2023 Amazon.com, Inc. or its affiliates.
- */
-
-#ifndef PANFROST_DEBUGFS_H
-#define PANFROST_DEBUGFS_H
-
-#ifdef CONFIG_DEBUG_FS
-void panfrost_debugfs_init(struct drm_minor *minor);
-#endif
-
-#endif /* PANFROST_DEBUGFS_H */
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 62f7e3527385..cffcb0ac7c11 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -130,7 +130,7 @@ struct panfrost_device {
struct list_head scheduled_jobs;
struct panfrost_perfcnt *perfcnt;
- atomic_t profile_mode;
+ bool profile_mode;
struct mutex sched_lock;
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index a926d71e8131..ef9f6c0716d5 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -20,7 +20,6 @@
#include "panfrost_job.h"
#include "panfrost_gpu.h"
#include "panfrost_perfcnt.h"
-#include "panfrost_debugfs.h"
static bool unstable_ioctls;
module_param_unsafe(unstable_ioctls, bool, 0600);
@@ -551,10 +550,12 @@ static void panfrost_gpu_show_fdinfo(struct panfrost_device *pfdev,
BUILD_BUG_ON(ARRAY_SIZE(engine_names) != NUM_JOB_SLOTS);
for (i = 0; i < NUM_JOB_SLOTS - 1; i++) {
- drm_printf(p, "drm-engine-%s:\t%llu ns\n",
- engine_names[i], panfrost_priv->engine_usage.elapsed_ns[i]);
- drm_printf(p, "drm-cycles-%s:\t%llu\n",
- engine_names[i], panfrost_priv->engine_usage.cycles[i]);
+ if (pfdev->profile_mode) {
+ drm_printf(p, "drm-engine-%s:\t%llu ns\n",
+ engine_names[i], panfrost_priv->engine_usage.elapsed_ns[i]);
+ drm_printf(p, "drm-cycles-%s:\t%llu\n",
+ engine_names[i], panfrost_priv->engine_usage.cycles[i]);
+ }
drm_printf(p, "drm-maxfreq-%s:\t%lu Hz\n",
engine_names[i], pfdev->pfdevfreq.fast_rate);
drm_printf(p, "drm-curfreq-%s:\t%lu Hz\n",
@@ -600,10 +601,6 @@ static const struct drm_driver panfrost_drm_driver = {
.gem_create_object = panfrost_gem_create_object,
.gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
-
-#ifdef CONFIG_DEBUG_FS
- .debugfs_init = panfrost_debugfs_init,
-#endif
};
static int panfrost_probe(struct platform_device *pdev)
@@ -692,6 +689,40 @@ static void panfrost_remove(struct platform_device *pdev)
drm_dev_put(ddev);
}
+static ssize_t profiling_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", pfdev->profile_mode);
+}
+
+static ssize_t profiling_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
+ bool value;
+ int err;
+
+ err = kstrtobool(buf, &value);
+ if (err)
+ return err;
+
+ pfdev->profile_mode = value;
+
+ return len;
+}
+
+static DEVICE_ATTR_RW(profiling);
+
+static struct attribute *panfrost_attrs[] = {
+ &dev_attr_profiling.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(panfrost);
+
/*
* The OPP core wants the supply names to be NULL terminated, but we need the
* correct num_supplies value for regulator core. Hence, we NULL terminate here
@@ -789,6 +820,7 @@ static struct platform_driver panfrost_driver = {
.name = "panfrost",
.pm = pm_ptr(&panfrost_pm_ops),
.of_match_table = dt_match,
+ .dev_groups = panfrost_groups,
},
};
module_platform_driver(panfrost_driver);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index 9063ce254642..fd8e44992184 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -441,19 +441,19 @@ void panfrost_gpu_power_off(struct panfrost_device *pfdev)
gpu_write(pfdev, SHADER_PWROFF_LO, pfdev->features.shader_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_PWRTRANS_LO,
- val, !val, 1, 1000);
+ val, !val, 1, 2000);
if (ret)
dev_err(pfdev->dev, "shader power transition timeout");
gpu_write(pfdev, TILER_PWROFF_LO, pfdev->features.tiler_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_PWRTRANS_LO,
- val, !val, 1, 1000);
+ val, !val, 1, 2000);
if (ret)
dev_err(pfdev->dev, "tiler power transition timeout");
gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present);
ret = readl_poll_timeout(pfdev->iomem + L2_PWRTRANS_LO,
- val, !val, 0, 1000);
+ val, !val, 0, 2000);
if (ret)
dev_err(pfdev->dev, "l2 power transition timeout");
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 0c2dbf6ef2a5..a61ef0af9a4e 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -243,7 +243,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
subslot = panfrost_enqueue_job(pfdev, js, job);
/* Don't queue the job if a reset is in progress */
if (!atomic_read(&pfdev->reset.pending)) {
- if (atomic_read(&pfdev->profile_mode)) {
+ if (pfdev->profile_mode) {
panfrost_cycle_counter_get(pfdev);
job->is_profiled = true;
job->start_time = ktime_get();
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index f38385fe76bb..b91019cd5acb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -502,11 +502,18 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
mapping_set_unevictable(mapping);
for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
+ /* Can happen if the last fault only partially filled this
+ * section of the pages array before failing. In that case
+ * we skip already filled pages.
+ */
+ if (pages[i])
+ continue;
+
pages[i] = shmem_read_mapping_page(mapping, i);
if (IS_ERR(pages[i])) {
ret = PTR_ERR(pages[i]);
pages[i] = NULL;
- goto err_pages;
+ goto err_unlock;
}
}
@@ -514,7 +521,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
if (ret)
- goto err_pages;
+ goto err_unlock;
ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
if (ret)
@@ -537,8 +544,6 @@ out:
err_map:
sg_free_table(sgt);
-err_pages:
- drm_gem_shmem_put_pages(&bo->base);
err_unlock:
dma_resv_unlock(obj->resv);
err_bo:
diff --git a/drivers/gpu/drm/panthor/Kconfig b/drivers/gpu/drm/panthor/Kconfig
new file mode 100644
index 000000000000..55b40ad07f3b
--- /dev/null
+++ b/drivers/gpu/drm/panthor/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0 or MIT
+
+config DRM_PANTHOR
+ tristate "Panthor (DRM support for ARM Mali CSF-based GPUs)"
+ depends on DRM
+ depends on ARM || ARM64 || COMPILE_TEST
+ depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
+ depends on MMU
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ select DRM_EXEC
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_GPUVM
+ select DRM_SCHED
+ select IOMMU_IO_PGTABLE_LPAE
+ select IOMMU_SUPPORT
+ select PM_DEVFREQ
+ help
+ DRM driver for ARM Mali CSF-based GPUs.
+
+ This driver is for Mali (or Immortalis) Valhall Gxxx GPUs.
+
+ Note that the Mali-G68 and Mali-G78, while Valhall architecture, will
+ be supported with the panfrost driver as they are not CSF GPUs.
diff --git a/drivers/gpu/drm/panthor/Makefile b/drivers/gpu/drm/panthor/Makefile
new file mode 100644
index 000000000000..15294719b09c
--- /dev/null
+++ b/drivers/gpu/drm/panthor/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0 or MIT
+
+panthor-y := \
+ panthor_devfreq.o \
+ panthor_device.o \
+ panthor_drv.o \
+ panthor_fw.o \
+ panthor_gem.o \
+ panthor_gpu.o \
+ panthor_heap.o \
+ panthor_mmu.o \
+ panthor_sched.o
+
+obj-$(CONFIG_DRM_PANTHOR) += panthor.o
diff --git a/drivers/gpu/drm/panthor/panthor_devfreq.c b/drivers/gpu/drm/panthor/panthor_devfreq.c
new file mode 100644
index 000000000000..c6d3c327cc24
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_devfreq.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/* Copyright 2019 Collabora ltd. */
+
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include <linux/devfreq_cooling.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+
+#include <drm/drm_managed.h>
+
+#include "panthor_devfreq.h"
+#include "panthor_device.h"
+
+/**
+ * struct panthor_devfreq - Device frequency management
+ */
+struct panthor_devfreq {
+ /** @devfreq: devfreq device. */
+ struct devfreq *devfreq;
+
+ /** @gov_data: Governor data. */
+ struct devfreq_simple_ondemand_data gov_data;
+
+ /** @busy_time: Busy time. */
+ ktime_t busy_time;
+
+ /** @idle_time: Idle time. */
+ ktime_t idle_time;
+
+ /** @time_last_update: Last update time. */
+ ktime_t time_last_update;
+
+ /** @last_busy_state: True if the GPU was busy last time we updated the state. */
+ bool last_busy_state;
+
+ /**
+ * @lock: Lock used to protect busy_time, idle_time, time_last_update and
+ * last_busy_state.
+ *
+ * These fields can be accessed concurrently by panthor_devfreq_get_dev_status()
+ * and panthor_devfreq_record_{busy,idle}().
+ */
+ spinlock_t lock;
+};
+
+static void panthor_devfreq_update_utilization(struct panthor_devfreq *pdevfreq)
+{
+ ktime_t now, last;
+
+ now = ktime_get();
+ last = pdevfreq->time_last_update;
+
+ if (pdevfreq->last_busy_state)
+ pdevfreq->busy_time += ktime_sub(now, last);
+ else
+ pdevfreq->idle_time += ktime_sub(now, last);
+
+ pdevfreq->time_last_update = now;
+}
+
+static int panthor_devfreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct dev_pm_opp *opp;
+
+ opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+ dev_pm_opp_put(opp);
+
+ return dev_pm_opp_set_rate(dev, *freq);
+}
+
+static void panthor_devfreq_reset(struct panthor_devfreq *pdevfreq)
+{
+ pdevfreq->busy_time = 0;
+ pdevfreq->idle_time = 0;
+ pdevfreq->time_last_update = ktime_get();
+}
+
+static int panthor_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *status)
+{
+ struct panthor_device *ptdev = dev_get_drvdata(dev);
+ struct panthor_devfreq *pdevfreq = ptdev->devfreq;
+ unsigned long irqflags;
+
+ status->current_frequency = clk_get_rate(ptdev->clks.core);
+
+ spin_lock_irqsave(&pdevfreq->lock, irqflags);
+
+ panthor_devfreq_update_utilization(pdevfreq);
+
+ status->total_time = ktime_to_ns(ktime_add(pdevfreq->busy_time,
+ pdevfreq->idle_time));
+
+ status->busy_time = ktime_to_ns(pdevfreq->busy_time);
+
+ panthor_devfreq_reset(pdevfreq);
+
+ spin_unlock_irqrestore(&pdevfreq->lock, irqflags);
+
+ drm_dbg(&ptdev->base, "busy %lu total %lu %lu %% freq %lu MHz\n",
+ status->busy_time, status->total_time,
+ status->busy_time / (status->total_time / 100),
+ status->current_frequency / 1000 / 1000);
+
+ return 0;
+}
+
+static struct devfreq_dev_profile panthor_devfreq_profile = {
+ .timer = DEVFREQ_TIMER_DELAYED,
+ .polling_ms = 50, /* ~3 frames */
+ .target = panthor_devfreq_target,
+ .get_dev_status = panthor_devfreq_get_dev_status,
+};
+
+int panthor_devfreq_init(struct panthor_device *ptdev)
+{
+ /* There's actually 2 regulators (mali and sram), but the OPP core only
+ * supports one.
+ *
+ * We assume the sram regulator is coupled with the mali one and let
+ * the coupling logic deal with voltage updates.
+ */
+ static const char * const reg_names[] = { "mali", NULL };
+ struct thermal_cooling_device *cooling;
+ struct device *dev = ptdev->base.dev;
+ struct panthor_devfreq *pdevfreq;
+ struct dev_pm_opp *opp;
+ unsigned long cur_freq;
+ int ret;
+
+ pdevfreq = drmm_kzalloc(&ptdev->base, sizeof(*ptdev->devfreq), GFP_KERNEL);
+ if (!pdevfreq)
+ return -ENOMEM;
+
+ ptdev->devfreq = pdevfreq;
+
+ ret = devm_pm_opp_set_regulators(dev, reg_names);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "Couldn't set OPP regulators\n");
+
+ return ret;
+ }
+
+ ret = devm_pm_opp_of_add_table(dev);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&pdevfreq->lock);
+
+ panthor_devfreq_reset(pdevfreq);
+
+ cur_freq = clk_get_rate(ptdev->clks.core);
+
+ opp = devfreq_recommended_opp(dev, &cur_freq, 0);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ panthor_devfreq_profile.initial_freq = cur_freq;
+
+ /* Regulator coupling only takes care of synchronizing/balancing voltage
+ * updates, but the coupled regulator needs to be enabled manually.
+ *
+ * We use devm_regulator_get_enable_optional() and keep the sram supply
+ * enabled until the device is removed, just like we do for the mali
+ * supply, which is enabled when dev_pm_opp_set_opp(dev, opp) is called,
+ * and disabled when the opp_table is torn down, using the devm action.
+ *
+ * If we really care about disabling regulators on suspend, we should:
+ * - use devm_regulator_get_optional() here
+ * - call dev_pm_opp_set_opp(dev, NULL) before leaving this function
+ * (this disables the regulator passed to the OPP layer)
+ * - call dev_pm_opp_set_opp(dev, NULL) and
+ * regulator_disable(ptdev->regulators.sram) in
+ * panthor_devfreq_suspend()
+ * - call dev_pm_opp_set_opp(dev, default_opp) and
+ * regulator_enable(ptdev->regulators.sram) in
+ * panthor_devfreq_resume()
+ *
+ * But without knowing if it's beneficial or not (in term of power
+ * consumption), or how much it slows down the suspend/resume steps,
+ * let's just keep regulators enabled for the device lifetime.
+ */
+ ret = devm_regulator_get_enable_optional(dev, "sram");
+ if (ret && ret != -ENODEV) {
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "Couldn't retrieve/enable sram supply\n");
+ return ret;
+ }
+
+ /*
+ * Set the recommend OPP this will enable and configure the regulator
+ * if any and will avoid a switch off by regulator_late_cleanup()
+ */
+ ret = dev_pm_opp_set_opp(dev, opp);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n");
+ return ret;
+ }
+
+ dev_pm_opp_put(opp);
+
+ /*
+ * Setup default thresholds for the simple_ondemand governor.
+ * The values are chosen based on experiments.
+ */
+ pdevfreq->gov_data.upthreshold = 45;
+ pdevfreq->gov_data.downdifferential = 5;
+
+ pdevfreq->devfreq = devm_devfreq_add_device(dev, &panthor_devfreq_profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ &pdevfreq->gov_data);
+ if (IS_ERR(pdevfreq->devfreq)) {
+ DRM_DEV_ERROR(dev, "Couldn't initialize GPU devfreq\n");
+ ret = PTR_ERR(pdevfreq->devfreq);
+ pdevfreq->devfreq = NULL;
+ return ret;
+ }
+
+ cooling = devfreq_cooling_em_register(pdevfreq->devfreq, NULL);
+ if (IS_ERR(cooling))
+ DRM_DEV_INFO(dev, "Failed to register cooling device\n");
+
+ return 0;
+}
+
+int panthor_devfreq_resume(struct panthor_device *ptdev)
+{
+ struct panthor_devfreq *pdevfreq = ptdev->devfreq;
+
+ if (!pdevfreq->devfreq)
+ return 0;
+
+ panthor_devfreq_reset(pdevfreq);
+
+ return devfreq_resume_device(pdevfreq->devfreq);
+}
+
+int panthor_devfreq_suspend(struct panthor_device *ptdev)
+{
+ struct panthor_devfreq *pdevfreq = ptdev->devfreq;
+
+ if (!pdevfreq->devfreq)
+ return 0;
+
+ return devfreq_suspend_device(pdevfreq->devfreq);
+}
+
+void panthor_devfreq_record_busy(struct panthor_device *ptdev)
+{
+ struct panthor_devfreq *pdevfreq = ptdev->devfreq;
+ unsigned long irqflags;
+
+ if (!pdevfreq->devfreq)
+ return;
+
+ spin_lock_irqsave(&pdevfreq->lock, irqflags);
+
+ panthor_devfreq_update_utilization(pdevfreq);
+ pdevfreq->last_busy_state = true;
+
+ spin_unlock_irqrestore(&pdevfreq->lock, irqflags);
+}
+
+void panthor_devfreq_record_idle(struct panthor_device *ptdev)
+{
+ struct panthor_devfreq *pdevfreq = ptdev->devfreq;
+ unsigned long irqflags;
+
+ if (!pdevfreq->devfreq)
+ return;
+
+ spin_lock_irqsave(&pdevfreq->lock, irqflags);
+
+ panthor_devfreq_update_utilization(pdevfreq);
+ pdevfreq->last_busy_state = false;
+
+ spin_unlock_irqrestore(&pdevfreq->lock, irqflags);
+}
diff --git a/drivers/gpu/drm/panthor/panthor_devfreq.h b/drivers/gpu/drm/panthor/panthor_devfreq.h
new file mode 100644
index 000000000000..83a5c9522493
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_devfreq.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/* Copyright 2019 Collabora ltd. */
+
+#ifndef __PANTHOR_DEVFREQ_H__
+#define __PANTHOR_DEVFREQ_H__
+
+struct devfreq;
+struct thermal_cooling_device;
+
+struct panthor_device;
+struct panthor_devfreq;
+
+int panthor_devfreq_init(struct panthor_device *ptdev);
+
+int panthor_devfreq_resume(struct panthor_device *ptdev);
+int panthor_devfreq_suspend(struct panthor_device *ptdev);
+
+void panthor_devfreq_record_busy(struct panthor_device *ptdev);
+void panthor_devfreq_record_idle(struct panthor_device *ptdev);
+
+#endif /* __PANTHOR_DEVFREQ_H__ */
diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c
new file mode 100644
index 000000000000..75276cbeba20
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_device.c
@@ -0,0 +1,561 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/* Copyright 2023 Collabora ltd. */
+
+#include <linux/clk.h>
+#include <linux/mm.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
+
+#include "panthor_devfreq.h"
+#include "panthor_device.h"
+#include "panthor_fw.h"
+#include "panthor_gpu.h"
+#include "panthor_mmu.h"
+#include "panthor_regs.h"
+#include "panthor_sched.h"
+
+static int panthor_clk_init(struct panthor_device *ptdev)
+{
+ ptdev->clks.core = devm_clk_get(ptdev->base.dev, NULL);
+ if (IS_ERR(ptdev->clks.core))
+ return dev_err_probe(ptdev->base.dev,
+ PTR_ERR(ptdev->clks.core),
+ "get 'core' clock failed");
+
+ ptdev->clks.stacks = devm_clk_get_optional(ptdev->base.dev, "stacks");
+ if (IS_ERR(ptdev->clks.stacks))
+ return dev_err_probe(ptdev->base.dev,
+ PTR_ERR(ptdev->clks.stacks),
+ "get 'stacks' clock failed");
+
+ ptdev->clks.coregroup = devm_clk_get_optional(ptdev->base.dev, "coregroup");
+ if (IS_ERR(ptdev->clks.coregroup))
+ return dev_err_probe(ptdev->base.dev,
+ PTR_ERR(ptdev->clks.coregroup),
+ "get 'coregroup' clock failed");
+
+ drm_info(&ptdev->base, "clock rate = %lu\n", clk_get_rate(ptdev->clks.core));
+ return 0;
+}
+
+void panthor_device_unplug(struct panthor_device *ptdev)
+{
+ /* This function can be called from two different path: the reset work
+ * and the platform device remove callback. drm_dev_unplug() doesn't
+ * deal with concurrent callers, so we have to protect drm_dev_unplug()
+ * calls with our own lock, and bail out if the device is already
+ * unplugged.
+ */
+ mutex_lock(&ptdev->unplug.lock);
+ if (drm_dev_is_unplugged(&ptdev->base)) {
+ /* Someone beat us, release the lock and wait for the unplug
+ * operation to be reported as done.
+ **/
+ mutex_unlock(&ptdev->unplug.lock);
+ wait_for_completion(&ptdev->unplug.done);
+ return;
+ }
+
+ /* Call drm_dev_unplug() so any access to HW blocks happening after
+ * that point get rejected.
+ */
+ drm_dev_unplug(&ptdev->base);
+
+ /* We do the rest of the unplug with the unplug lock released,
+ * future callers will wait on ptdev->unplug.done anyway.
+ */
+ mutex_unlock(&ptdev->unplug.lock);
+
+ drm_WARN_ON(&ptdev->base, pm_runtime_get_sync(ptdev->base.dev) < 0);
+
+ /* Now, try to cleanly shutdown the GPU before the device resources
+ * get reclaimed.
+ */
+ panthor_sched_unplug(ptdev);
+ panthor_fw_unplug(ptdev);
+ panthor_mmu_unplug(ptdev);
+ panthor_gpu_unplug(ptdev);
+
+ pm_runtime_dont_use_autosuspend(ptdev->base.dev);
+ pm_runtime_put_sync_suspend(ptdev->base.dev);
+
+ /* If PM is disabled, we need to call the suspend handler manually. */
+ if (!IS_ENABLED(CONFIG_PM))
+ panthor_device_suspend(ptdev->base.dev);
+
+ /* Report the unplug operation as done to unblock concurrent
+ * panthor_device_unplug() callers.
+ */
+ complete_all(&ptdev->unplug.done);
+}
+
+static void panthor_device_reset_cleanup(struct drm_device *ddev, void *data)
+{
+ struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
+
+ cancel_work_sync(&ptdev->reset.work);
+ destroy_workqueue(ptdev->reset.wq);
+}
+
+static void panthor_device_reset_work(struct work_struct *work)
+{
+ struct panthor_device *ptdev = container_of(work, struct panthor_device, reset.work);
+ int ret = 0, cookie;
+
+ if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE) {
+ /*
+ * No need for a reset as the device has been (or will be)
+ * powered down
+ */
+ atomic_set(&ptdev->reset.pending, 0);
+ return;
+ }
+
+ if (!drm_dev_enter(&ptdev->base, &cookie))
+ return;
+
+ panthor_sched_pre_reset(ptdev);
+ panthor_fw_pre_reset(ptdev, true);
+ panthor_mmu_pre_reset(ptdev);
+ panthor_gpu_soft_reset(ptdev);
+ panthor_gpu_l2_power_on(ptdev);
+ panthor_mmu_post_reset(ptdev);
+ ret = panthor_fw_post_reset(ptdev);
+ if (ret)
+ goto out_dev_exit;
+
+ atomic_set(&ptdev->reset.pending, 0);
+ panthor_sched_post_reset(ptdev);
+
+out_dev_exit:
+ drm_dev_exit(cookie);
+
+ if (ret) {
+ panthor_device_unplug(ptdev);
+ drm_err(&ptdev->base, "Failed to boot MCU after reset, making device unusable.");
+ }
+}
+
+static bool panthor_device_is_initialized(struct panthor_device *ptdev)
+{
+ return !!ptdev->scheduler;
+}
+
+static void panthor_device_free_page(struct drm_device *ddev, void *data)
+{
+ __free_page(data);
+}
+
+int panthor_device_init(struct panthor_device *ptdev)
+{
+ u32 *dummy_page_virt;
+ struct resource *res;
+ struct page *p;
+ int ret;
+
+ ptdev->coherent = device_get_dma_attr(ptdev->base.dev) == DEV_DMA_COHERENT;
+
+ init_completion(&ptdev->unplug.done);
+ ret = drmm_mutex_init(&ptdev->base, &ptdev->unplug.lock);
+ if (ret)
+ return ret;
+
+ ret = drmm_mutex_init(&ptdev->base, &ptdev->pm.mmio_lock);
+ if (ret)
+ return ret;
+
+ atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
+ p = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!p)
+ return -ENOMEM;
+
+ ptdev->pm.dummy_latest_flush = p;
+ dummy_page_virt = page_address(p);
+ ret = drmm_add_action_or_reset(&ptdev->base, panthor_device_free_page,
+ ptdev->pm.dummy_latest_flush);
+ if (ret)
+ return ret;
+
+ /*
+ * Set the dummy page holding the latest flush to 1. This will cause the
+ * flush to avoided as we know it isn't necessary if the submission
+ * happens while the dummy page is mapped. Zero cannot be used because
+ * that means 'always flush'.
+ */
+ *dummy_page_virt = 1;
+
+ INIT_WORK(&ptdev->reset.work, panthor_device_reset_work);
+ ptdev->reset.wq = alloc_ordered_workqueue("panthor-reset-wq", 0);
+ if (!ptdev->reset.wq)
+ return -ENOMEM;
+
+ ret = drmm_add_action_or_reset(&ptdev->base, panthor_device_reset_cleanup, NULL);
+ if (ret)
+ return ret;
+
+ ret = panthor_clk_init(ptdev);
+ if (ret)
+ return ret;
+
+ ret = panthor_devfreq_init(ptdev);
+ if (ret)
+ return ret;
+
+ ptdev->iomem = devm_platform_get_and_ioremap_resource(to_platform_device(ptdev->base.dev),
+ 0, &res);
+ if (IS_ERR(ptdev->iomem))
+ return PTR_ERR(ptdev->iomem);
+
+ ptdev->phys_addr = res->start;
+
+ ret = devm_pm_runtime_enable(ptdev->base.dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(ptdev->base.dev);
+ if (ret)
+ return ret;
+
+ /* If PM is disabled, we need to call panthor_device_resume() manually. */
+ if (!IS_ENABLED(CONFIG_PM)) {
+ ret = panthor_device_resume(ptdev->base.dev);
+ if (ret)
+ return ret;
+ }
+
+ ret = panthor_gpu_init(ptdev);
+ if (ret)
+ goto err_rpm_put;
+
+ ret = panthor_mmu_init(ptdev);
+ if (ret)
+ goto err_unplug_gpu;
+
+ ret = panthor_fw_init(ptdev);
+ if (ret)
+ goto err_unplug_mmu;
+
+ ret = panthor_sched_init(ptdev);
+ if (ret)
+ goto err_unplug_fw;
+
+ /* ~3 frames */
+ pm_runtime_set_autosuspend_delay(ptdev->base.dev, 50);
+ pm_runtime_use_autosuspend(ptdev->base.dev);
+
+ ret = drm_dev_register(&ptdev->base, 0);
+ if (ret)
+ goto err_disable_autosuspend;
+
+ pm_runtime_put_autosuspend(ptdev->base.dev);
+ return 0;
+
+err_disable_autosuspend:
+ pm_runtime_dont_use_autosuspend(ptdev->base.dev);
+ panthor_sched_unplug(ptdev);
+
+err_unplug_fw:
+ panthor_fw_unplug(ptdev);
+
+err_unplug_mmu:
+ panthor_mmu_unplug(ptdev);
+
+err_unplug_gpu:
+ panthor_gpu_unplug(ptdev);
+
+err_rpm_put:
+ pm_runtime_put_sync_suspend(ptdev->base.dev);
+ return ret;
+}
+
+#define PANTHOR_EXCEPTION(id) \
+ [DRM_PANTHOR_EXCEPTION_ ## id] = { \
+ .name = #id, \
+ }
+
+struct panthor_exception_info {
+ const char *name;
+};
+
+static const struct panthor_exception_info panthor_exception_infos[] = {
+ PANTHOR_EXCEPTION(OK),
+ PANTHOR_EXCEPTION(TERMINATED),
+ PANTHOR_EXCEPTION(KABOOM),
+ PANTHOR_EXCEPTION(EUREKA),
+ PANTHOR_EXCEPTION(ACTIVE),
+ PANTHOR_EXCEPTION(CS_RES_TERM),
+ PANTHOR_EXCEPTION(CS_CONFIG_FAULT),
+ PANTHOR_EXCEPTION(CS_ENDPOINT_FAULT),
+ PANTHOR_EXCEPTION(CS_BUS_FAULT),
+ PANTHOR_EXCEPTION(CS_INSTR_INVALID),
+ PANTHOR_EXCEPTION(CS_CALL_STACK_OVERFLOW),
+ PANTHOR_EXCEPTION(CS_INHERIT_FAULT),
+ PANTHOR_EXCEPTION(INSTR_INVALID_PC),
+ PANTHOR_EXCEPTION(INSTR_INVALID_ENC),
+ PANTHOR_EXCEPTION(INSTR_BARRIER_FAULT),
+ PANTHOR_EXCEPTION(DATA_INVALID_FAULT),
+ PANTHOR_EXCEPTION(TILE_RANGE_FAULT),
+ PANTHOR_EXCEPTION(ADDR_RANGE_FAULT),
+ PANTHOR_EXCEPTION(IMPRECISE_FAULT),
+ PANTHOR_EXCEPTION(OOM),
+ PANTHOR_EXCEPTION(CSF_FW_INTERNAL_ERROR),
+ PANTHOR_EXCEPTION(CSF_RES_EVICTION_TIMEOUT),
+ PANTHOR_EXCEPTION(GPU_BUS_FAULT),
+ PANTHOR_EXCEPTION(GPU_SHAREABILITY_FAULT),
+ PANTHOR_EXCEPTION(SYS_SHAREABILITY_FAULT),
+ PANTHOR_EXCEPTION(GPU_CACHEABILITY_FAULT),
+ PANTHOR_EXCEPTION(TRANSLATION_FAULT_0),
+ PANTHOR_EXCEPTION(TRANSLATION_FAULT_1),
+ PANTHOR_EXCEPTION(TRANSLATION_FAULT_2),
+ PANTHOR_EXCEPTION(TRANSLATION_FAULT_3),
+ PANTHOR_EXCEPTION(TRANSLATION_FAULT_4),
+ PANTHOR_EXCEPTION(PERM_FAULT_0),
+ PANTHOR_EXCEPTION(PERM_FAULT_1),
+ PANTHOR_EXCEPTION(PERM_FAULT_2),
+ PANTHOR_EXCEPTION(PERM_FAULT_3),
+ PANTHOR_EXCEPTION(ACCESS_FLAG_1),
+ PANTHOR_EXCEPTION(ACCESS_FLAG_2),
+ PANTHOR_EXCEPTION(ACCESS_FLAG_3),
+ PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_IN),
+ PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT0),
+ PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT1),
+ PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT2),
+ PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT3),
+ PANTHOR_EXCEPTION(MEM_ATTR_FAULT_0),
+ PANTHOR_EXCEPTION(MEM_ATTR_FAULT_1),
+ PANTHOR_EXCEPTION(MEM_ATTR_FAULT_2),
+ PANTHOR_EXCEPTION(MEM_ATTR_FAULT_3),
+};
+
+const char *panthor_exception_name(struct panthor_device *ptdev, u32 exception_code)
+{
+ if (exception_code >= ARRAY_SIZE(panthor_exception_infos) ||
+ !panthor_exception_infos[exception_code].name)
+ return "Unknown exception type";
+
+ return panthor_exception_infos[exception_code].name;
+}
+
+static vm_fault_t panthor_mmio_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct panthor_device *ptdev = vma->vm_private_data;
+ u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long pfn;
+ pgprot_t pgprot;
+ vm_fault_t ret;
+ bool active;
+ int cookie;
+
+ if (!drm_dev_enter(&ptdev->base, &cookie))
+ return VM_FAULT_SIGBUS;
+
+ mutex_lock(&ptdev->pm.mmio_lock);
+ active = atomic_read(&ptdev->pm.state) == PANTHOR_DEVICE_PM_STATE_ACTIVE;
+
+ switch (offset) {
+ case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET:
+ if (active)
+ pfn = __phys_to_pfn(ptdev->phys_addr + CSF_GPU_LATEST_FLUSH_ID);
+ else
+ pfn = page_to_pfn(ptdev->pm.dummy_latest_flush);
+ break;
+
+ default:
+ ret = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+
+ pgprot = vma->vm_page_prot;
+ if (active)
+ pgprot = pgprot_noncached(pgprot);
+
+ ret = vmf_insert_pfn_prot(vma, vmf->address, pfn, pgprot);
+
+out_unlock:
+ mutex_unlock(&ptdev->pm.mmio_lock);
+ drm_dev_exit(cookie);
+ return ret;
+}
+
+static const struct vm_operations_struct panthor_mmio_vm_ops = {
+ .fault = panthor_mmio_vm_fault,
+};
+
+int panthor_device_mmap_io(struct panthor_device *ptdev, struct vm_area_struct *vma)
+{
+ u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
+
+ switch (offset) {
+ case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET:
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE ||
+ (vma->vm_flags & (VM_WRITE | VM_EXEC)))
+ return -EINVAL;
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Defer actual mapping to the fault handler. */
+ vma->vm_private_data = ptdev;
+ vma->vm_ops = &panthor_mmio_vm_ops;
+ vm_flags_set(vma,
+ VM_IO | VM_DONTCOPY | VM_DONTEXPAND |
+ VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
+ return 0;
+}
+
+int panthor_device_resume(struct device *dev)
+{
+ struct panthor_device *ptdev = dev_get_drvdata(dev);
+ int ret, cookie;
+
+ if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_SUSPENDED)
+ return -EINVAL;
+
+ atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_RESUMING);
+
+ ret = clk_prepare_enable(ptdev->clks.core);
+ if (ret)
+ goto err_set_suspended;
+
+ ret = clk_prepare_enable(ptdev->clks.stacks);
+ if (ret)
+ goto err_disable_core_clk;
+
+ ret = clk_prepare_enable(ptdev->clks.coregroup);
+ if (ret)
+ goto err_disable_stacks_clk;
+
+ ret = panthor_devfreq_resume(ptdev);
+ if (ret)
+ goto err_disable_coregroup_clk;
+
+ if (panthor_device_is_initialized(ptdev) &&
+ drm_dev_enter(&ptdev->base, &cookie)) {
+ panthor_gpu_resume(ptdev);
+ panthor_mmu_resume(ptdev);
+ ret = drm_WARN_ON(&ptdev->base, panthor_fw_resume(ptdev));
+ if (!ret) {
+ panthor_sched_resume(ptdev);
+ } else {
+ panthor_mmu_suspend(ptdev);
+ panthor_gpu_suspend(ptdev);
+ }
+
+ drm_dev_exit(cookie);
+
+ if (ret)
+ goto err_suspend_devfreq;
+ }
+
+ if (atomic_read(&ptdev->reset.pending))
+ queue_work(ptdev->reset.wq, &ptdev->reset.work);
+
+ /* Clear all IOMEM mappings pointing to this device after we've
+ * resumed. This way the fake mappings pointing to the dummy pages
+ * are removed and the real iomem mapping will be restored on next
+ * access.
+ */
+ mutex_lock(&ptdev->pm.mmio_lock);
+ unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
+ DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
+ atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_ACTIVE);
+ mutex_unlock(&ptdev->pm.mmio_lock);
+ return 0;
+
+err_suspend_devfreq:
+ panthor_devfreq_suspend(ptdev);
+
+err_disable_coregroup_clk:
+ clk_disable_unprepare(ptdev->clks.coregroup);
+
+err_disable_stacks_clk:
+ clk_disable_unprepare(ptdev->clks.stacks);
+
+err_disable_core_clk:
+ clk_disable_unprepare(ptdev->clks.core);
+
+err_set_suspended:
+ atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
+ return ret;
+}
+
+int panthor_device_suspend(struct device *dev)
+{
+ struct panthor_device *ptdev = dev_get_drvdata(dev);
+ int ret, cookie;
+
+ if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE)
+ return -EINVAL;
+
+ /* Clear all IOMEM mappings pointing to this device before we
+ * shutdown the power-domain and clocks. Failing to do that results
+ * in external aborts when the process accesses the iomem region.
+ * We change the state and call unmap_mapping_range() with the
+ * mmio_lock held to make sure the vm_fault handler won't set up
+ * invalid mappings.
+ */
+ mutex_lock(&ptdev->pm.mmio_lock);
+ atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDING);
+ unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
+ DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
+ mutex_unlock(&ptdev->pm.mmio_lock);
+
+ if (panthor_device_is_initialized(ptdev) &&
+ drm_dev_enter(&ptdev->base, &cookie)) {
+ cancel_work_sync(&ptdev->reset.work);
+
+ /* We prepare everything as if we were resetting the GPU.
+ * The end of the reset will happen in the resume path though.
+ */
+ panthor_sched_suspend(ptdev);
+ panthor_fw_suspend(ptdev);
+ panthor_mmu_suspend(ptdev);
+ panthor_gpu_suspend(ptdev);
+ drm_dev_exit(cookie);
+ }
+
+ ret = panthor_devfreq_suspend(ptdev);
+ if (ret) {
+ if (panthor_device_is_initialized(ptdev) &&
+ drm_dev_enter(&ptdev->base, &cookie)) {
+ panthor_gpu_resume(ptdev);
+ panthor_mmu_resume(ptdev);
+ drm_WARN_ON(&ptdev->base, panthor_fw_resume(ptdev));
+ panthor_sched_resume(ptdev);
+ drm_dev_exit(cookie);
+ }
+
+ goto err_set_active;
+ }
+
+ clk_disable_unprepare(ptdev->clks.coregroup);
+ clk_disable_unprepare(ptdev->clks.stacks);
+ clk_disable_unprepare(ptdev->clks.core);
+ atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
+ return 0;
+
+err_set_active:
+ /* If something failed and we have to revert back to an
+ * active state, we also need to clear the MMIO userspace
+ * mappings, so any dumb pages that were mapped while we
+ * were trying to suspend gets invalidated.
+ */
+ mutex_lock(&ptdev->pm.mmio_lock);
+ atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_ACTIVE);
+ unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
+ DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
+ mutex_unlock(&ptdev->pm.mmio_lock);
+ return ret;
+}
diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h
new file mode 100644
index 000000000000..2fdd671b38fd
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_device.h
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/* Copyright 2023 Collabora ltd. */
+
+#ifndef __PANTHOR_DEVICE_H__
+#define __PANTHOR_DEVICE_H__
+
+#include <linux/atomic.h>
+#include <linux/io-pgtable.h>
+#include <linux/regulator/consumer.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_mm.h>
+#include <drm/gpu_scheduler.h>
+#include <drm/panthor_drm.h>
+
+struct panthor_csf;
+struct panthor_csf_ctx;
+struct panthor_device;
+struct panthor_gpu;
+struct panthor_group_pool;
+struct panthor_heap_pool;
+struct panthor_job;
+struct panthor_mmu;
+struct panthor_fw;
+struct panthor_perfcnt;
+struct panthor_vm;
+struct panthor_vm_pool;
+
+/**
+ * enum panthor_device_pm_state - PM state
+ */
+enum panthor_device_pm_state {
+ /** @PANTHOR_DEVICE_PM_STATE_SUSPENDED: Device is suspended. */
+ PANTHOR_DEVICE_PM_STATE_SUSPENDED = 0,
+
+ /** @PANTHOR_DEVICE_PM_STATE_RESUMING: Device is being resumed. */
+ PANTHOR_DEVICE_PM_STATE_RESUMING,
+
+ /** @PANTHOR_DEVICE_PM_STATE_ACTIVE: Device is active. */
+ PANTHOR_DEVICE_PM_STATE_ACTIVE,
+
+ /** @PANTHOR_DEVICE_PM_STATE_SUSPENDING: Device is being suspended. */
+ PANTHOR_DEVICE_PM_STATE_SUSPENDING,
+};
+
+/**
+ * struct panthor_irq - IRQ data
+ *
+ * Used to automate IRQ handling for the 3 different IRQs we have in this driver.
+ */
+struct panthor_irq {
+ /** @ptdev: Panthor device */
+ struct panthor_device *ptdev;
+
+ /** @irq: IRQ number. */
+ int irq;
+
+ /** @mask: Current mask being applied to xxx_INT_MASK. */
+ u32 mask;
+
+ /** @suspended: Set to true when the IRQ is suspended. */
+ atomic_t suspended;
+};
+
+/**
+ * struct panthor_device - Panthor device
+ */
+struct panthor_device {
+ /** @base: Base drm_device. */
+ struct drm_device base;
+
+ /** @phys_addr: Physical address of the iomem region. */
+ phys_addr_t phys_addr;
+
+ /** @iomem: CPU mapping of the IOMEM region. */
+ void __iomem *iomem;
+
+ /** @clks: GPU clocks. */
+ struct {
+ /** @core: Core clock. */
+ struct clk *core;
+
+ /** @stacks: Stacks clock. This clock is optional. */
+ struct clk *stacks;
+
+ /** @coregroup: Core group clock. This clock is optional. */
+ struct clk *coregroup;
+ } clks;
+
+ /** @coherent: True if the CPU/GPU are memory coherent. */
+ bool coherent;
+
+ /** @gpu_info: GPU information. */
+ struct drm_panthor_gpu_info gpu_info;
+
+ /** @csif_info: Command stream interface information. */
+ struct drm_panthor_csif_info csif_info;
+
+ /** @gpu: GPU management data. */
+ struct panthor_gpu *gpu;
+
+ /** @fw: FW management data. */
+ struct panthor_fw *fw;
+
+ /** @mmu: MMU management data. */
+ struct panthor_mmu *mmu;
+
+ /** @scheduler: Scheduler management data. */
+ struct panthor_scheduler *scheduler;
+
+ /** @devfreq: Device frequency scaling management data. */
+ struct panthor_devfreq *devfreq;
+
+ /** @unplug: Device unplug related fields. */
+ struct {
+ /** @lock: Lock used to serialize unplug operations. */
+ struct mutex lock;
+
+ /**
+ * @done: Completion object signaled when the unplug
+ * operation is done.
+ */
+ struct completion done;
+ } unplug;
+
+ /** @reset: Reset related fields. */
+ struct {
+ /** @wq: Ordered worqueud used to schedule reset operations. */
+ struct workqueue_struct *wq;
+
+ /** @work: Reset work. */
+ struct work_struct work;
+
+ /** @pending: Set to true if a reset is pending. */
+ atomic_t pending;
+ } reset;
+
+ /** @pm: Power management related data. */
+ struct {
+ /** @state: Power state. */
+ atomic_t state;
+
+ /**
+ * @mmio_lock: Lock protecting MMIO userspace CPU mappings.
+ *
+ * This is needed to ensure we map the dummy IO pages when
+ * the device is being suspended, and the real IO pages when
+ * the device is being resumed. We can't just do with the
+ * state atomicity to deal with this race.
+ */
+ struct mutex mmio_lock;
+
+ /**
+ * @dummy_latest_flush: Dummy LATEST_FLUSH page.
+ *
+ * Used to replace the real LATEST_FLUSH page when the GPU
+ * is suspended.
+ */
+ struct page *dummy_latest_flush;
+ } pm;
+};
+
+/**
+ * struct panthor_file - Panthor file
+ */
+struct panthor_file {
+ /** @ptdev: Device attached to this file. */
+ struct panthor_device *ptdev;
+
+ /** @vms: VM pool attached to this file. */
+ struct panthor_vm_pool *vms;
+
+ /** @groups: Scheduling group pool attached to this file. */
+ struct panthor_group_pool *groups;
+};
+
+int panthor_device_init(struct panthor_device *ptdev);
+void panthor_device_unplug(struct panthor_device *ptdev);
+
+/**
+ * panthor_device_schedule_reset() - Schedules a reset operation
+ */
+static inline void panthor_device_schedule_reset(struct panthor_device *ptdev)
+{
+ if (!atomic_cmpxchg(&ptdev->reset.pending, 0, 1) &&
+ atomic_read(&ptdev->pm.state) == PANTHOR_DEVICE_PM_STATE_ACTIVE)
+ queue_work(ptdev->reset.wq, &ptdev->reset.work);
+}
+
+/**
+ * panthor_device_reset_is_pending() - Checks if a reset is pending.
+ *
+ * Return: true if a reset is pending, false otherwise.
+ */
+static inline bool panthor_device_reset_is_pending(struct panthor_device *ptdev)
+{
+ return atomic_read(&ptdev->reset.pending) != 0;
+}
+
+int panthor_device_mmap_io(struct panthor_device *ptdev,
+ struct vm_area_struct *vma);
+
+int panthor_device_resume(struct device *dev);
+int panthor_device_suspend(struct device *dev);
+
+enum drm_panthor_exception_type {
+ DRM_PANTHOR_EXCEPTION_OK = 0x00,
+ DRM_PANTHOR_EXCEPTION_TERMINATED = 0x04,
+ DRM_PANTHOR_EXCEPTION_KABOOM = 0x05,
+ DRM_PANTHOR_EXCEPTION_EUREKA = 0x06,
+ DRM_PANTHOR_EXCEPTION_ACTIVE = 0x08,
+ DRM_PANTHOR_EXCEPTION_CS_RES_TERM = 0x0f,
+ DRM_PANTHOR_EXCEPTION_MAX_NON_FAULT = 0x3f,
+ DRM_PANTHOR_EXCEPTION_CS_CONFIG_FAULT = 0x40,
+ DRM_PANTHOR_EXCEPTION_CS_ENDPOINT_FAULT = 0x44,
+ DRM_PANTHOR_EXCEPTION_CS_BUS_FAULT = 0x48,
+ DRM_PANTHOR_EXCEPTION_CS_INSTR_INVALID = 0x49,
+ DRM_PANTHOR_EXCEPTION_CS_CALL_STACK_OVERFLOW = 0x4a,
+ DRM_PANTHOR_EXCEPTION_CS_INHERIT_FAULT = 0x4b,
+ DRM_PANTHOR_EXCEPTION_INSTR_INVALID_PC = 0x50,
+ DRM_PANTHOR_EXCEPTION_INSTR_INVALID_ENC = 0x51,
+ DRM_PANTHOR_EXCEPTION_INSTR_BARRIER_FAULT = 0x55,
+ DRM_PANTHOR_EXCEPTION_DATA_INVALID_FAULT = 0x58,
+ DRM_PANTHOR_EXCEPTION_TILE_RANGE_FAULT = 0x59,
+ DRM_PANTHOR_EXCEPTION_ADDR_RANGE_FAULT = 0x5a,
+ DRM_PANTHOR_EXCEPTION_IMPRECISE_FAULT = 0x5b,
+ DRM_PANTHOR_EXCEPTION_OOM = 0x60,
+ DRM_PANTHOR_EXCEPTION_CSF_FW_INTERNAL_ERROR = 0x68,
+ DRM_PANTHOR_EXCEPTION_CSF_RES_EVICTION_TIMEOUT = 0x69,
+ DRM_PANTHOR_EXCEPTION_GPU_BUS_FAULT = 0x80,
+ DRM_PANTHOR_EXCEPTION_GPU_SHAREABILITY_FAULT = 0x88,
+ DRM_PANTHOR_EXCEPTION_SYS_SHAREABILITY_FAULT = 0x89,
+ DRM_PANTHOR_EXCEPTION_GPU_CACHEABILITY_FAULT = 0x8a,
+ DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_0 = 0xc0,
+ DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_1 = 0xc1,
+ DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_2 = 0xc2,
+ DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_3 = 0xc3,
+ DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_4 = 0xc4,
+ DRM_PANTHOR_EXCEPTION_PERM_FAULT_0 = 0xc8,
+ DRM_PANTHOR_EXCEPTION_PERM_FAULT_1 = 0xc9,
+ DRM_PANTHOR_EXCEPTION_PERM_FAULT_2 = 0xca,
+ DRM_PANTHOR_EXCEPTION_PERM_FAULT_3 = 0xcb,
+ DRM_PANTHOR_EXCEPTION_ACCESS_FLAG_1 = 0xd9,
+ DRM_PANTHOR_EXCEPTION_ACCESS_FLAG_2 = 0xda,
+ DRM_PANTHOR_EXCEPTION_ACCESS_FLAG_3 = 0xdb,
+ DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_IN = 0xe0,
+ DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT0 = 0xe4,
+ DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT1 = 0xe5,
+ DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT2 = 0xe6,
+ DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT3 = 0xe7,
+ DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_0 = 0xe8,
+ DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_1 = 0xe9,
+ DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_2 = 0xea,
+ DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_3 = 0xeb,
+};
+
+/**
+ * panthor_exception_is_fault() - Checks if an exception is a fault.
+ *
+ * Return: true if the exception is a fault, false otherwise.
+ */
+static inline bool
+panthor_exception_is_fault(u32 exception_code)
+{
+ return exception_code > DRM_PANTHOR_EXCEPTION_MAX_NON_FAULT;
+}
+
+const char *panthor_exception_name(struct panthor_device *ptdev,
+ u32 exception_code);
+
+/**
+ * PANTHOR_IRQ_HANDLER() - Define interrupt handlers and the interrupt
+ * registration function.
+ *
+ * The boiler-plate to gracefully deal with shared interrupts is
+ * auto-generated. All you have to do is call PANTHOR_IRQ_HANDLER()
+ * just after the actual handler. The handler prototype is:
+ *
+ * void (*handler)(struct panthor_device *, u32 status);
+ */
+#define PANTHOR_IRQ_HANDLER(__name, __reg_prefix, __handler) \
+static irqreturn_t panthor_ ## __name ## _irq_raw_handler(int irq, void *data) \
+{ \
+ struct panthor_irq *pirq = data; \
+ struct panthor_device *ptdev = pirq->ptdev; \
+ \
+ if (atomic_read(&pirq->suspended)) \
+ return IRQ_NONE; \
+ if (!gpu_read(ptdev, __reg_prefix ## _INT_STAT)) \
+ return IRQ_NONE; \
+ \
+ gpu_write(ptdev, __reg_prefix ## _INT_MASK, 0); \
+ return IRQ_WAKE_THREAD; \
+} \
+ \
+static irqreturn_t panthor_ ## __name ## _irq_threaded_handler(int irq, void *data) \
+{ \
+ struct panthor_irq *pirq = data; \
+ struct panthor_device *ptdev = pirq->ptdev; \
+ irqreturn_t ret = IRQ_NONE; \
+ \
+ while (true) { \
+ u32 status = gpu_read(ptdev, __reg_prefix ## _INT_RAWSTAT) & pirq->mask; \
+ \
+ if (!status) \
+ break; \
+ \
+ gpu_write(ptdev, __reg_prefix ## _INT_CLEAR, status); \
+ \
+ __handler(ptdev, status); \
+ ret = IRQ_HANDLED; \
+ } \
+ \
+ if (!atomic_read(&pirq->suspended)) \
+ gpu_write(ptdev, __reg_prefix ## _INT_MASK, pirq->mask); \
+ \
+ return ret; \
+} \
+ \
+static inline void panthor_ ## __name ## _irq_suspend(struct panthor_irq *pirq) \
+{ \
+ pirq->mask = 0; \
+ gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, 0); \
+ synchronize_irq(pirq->irq); \
+ atomic_set(&pirq->suspended, true); \
+} \
+ \
+static inline void panthor_ ## __name ## _irq_resume(struct panthor_irq *pirq, u32 mask) \
+{ \
+ atomic_set(&pirq->suspended, false); \
+ pirq->mask = mask; \
+ gpu_write(pirq->ptdev, __reg_prefix ## _INT_CLEAR, mask); \
+ gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, mask); \
+} \
+ \
+static int panthor_request_ ## __name ## _irq(struct panthor_device *ptdev, \
+ struct panthor_irq *pirq, \
+ int irq, u32 mask) \
+{ \
+ pirq->ptdev = ptdev; \
+ pirq->irq = irq; \
+ panthor_ ## __name ## _irq_resume(pirq, mask); \
+ \
+ return devm_request_threaded_irq(ptdev->base.dev, irq, \
+ panthor_ ## __name ## _irq_raw_handler, \
+ panthor_ ## __name ## _irq_threaded_handler, \
+ IRQF_SHARED, KBUILD_MODNAME "-" # __name, \
+ pirq); \
+}
+
+extern struct workqueue_struct *panthor_cleanup_wq;
+
+#endif
diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
new file mode 100644
index 000000000000..b8a84f26b3ef
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -0,0 +1,1488 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
+/* Copyright 2019 Collabora ltd. */
+
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/pagemap.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_exec.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_syncobj.h>
+#include <drm/drm_utils.h>
+#include <drm/gpu_scheduler.h>
+#include <drm/panthor_drm.h>
+
+#include "panthor_device.h"
+#include "panthor_fw.h"
+#include "panthor_gem.h"
+#include "panthor_gpu.h"
+#include "panthor_heap.h"
+#include "panthor_mmu.h"
+#include "panthor_regs.h"
+#include "panthor_sched.h"
+
+/**
+ * DOC: user <-> kernel object copy helpers.
+ */
+
+/**
+ * panthor_set_uobj() - Copy kernel object to user object.
+ * @usr_ptr: Users pointer.
+ * @usr_size: Size of the user object.
+ * @min_size: Minimum size for this object.
+ * @kern_size: Size of the kernel object.
+ * @in: Address of the kernel object to copy.
+ *
+ * Helper automating kernel -> user object copies.
+ *
+ * Don't use this function directly, use PANTHOR_UOBJ_SET() instead.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int
+panthor_set_uobj(u64 usr_ptr, u32 usr_size, u32 min_size, u32 kern_size, const void *in)
+{
+ /* User size shouldn't be smaller than the minimal object size. */
+ if (usr_size < min_size)
+ return -EINVAL;
+
+ if (copy_to_user(u64_to_user_ptr(usr_ptr), in, min_t(u32, usr_size, kern_size)))
+ return -EFAULT;
+
+ /* When the kernel object is smaller than the user object, we fill the gap with
+ * zeros.
+ */
+ if (usr_size > kern_size &&
+ clear_user(u64_to_user_ptr(usr_ptr + kern_size), usr_size - kern_size)) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * panthor_get_uobj_array() - Copy a user object array into a kernel accessible object array.
+ * @in: The object array to copy.
+ * @min_stride: Minimum array stride.
+ * @obj_size: Kernel object size.
+ *
+ * Helper automating user -> kernel object copies.
+ *
+ * Don't use this function directly, use PANTHOR_UOBJ_GET_ARRAY() instead.
+ *
+ * Return: newly allocated object array or an ERR_PTR on error.
+ */
+static void *
+panthor_get_uobj_array(const struct drm_panthor_obj_array *in, u32 min_stride,
+ u32 obj_size)
+{
+ int ret = 0;
+ void *out_alloc;
+
+ /* User stride must be at least the minimum object size, otherwise it might
+ * lack useful information.
+ */
+ if (in->stride < min_stride)
+ return ERR_PTR(-EINVAL);
+
+ if (!in->count)
+ return NULL;
+
+ out_alloc = kvmalloc_array(in->count, obj_size, GFP_KERNEL);
+ if (!out_alloc)
+ return ERR_PTR(-ENOMEM);
+
+ if (obj_size == in->stride) {
+ /* Fast path when user/kernel have the same uAPI header version. */
+ if (copy_from_user(out_alloc, u64_to_user_ptr(in->array),
+ (unsigned long)obj_size * in->count))
+ ret = -EFAULT;
+ } else {
+ void __user *in_ptr = u64_to_user_ptr(in->array);
+ void *out_ptr = out_alloc;
+
+ /* If the sizes differ, we need to copy elements one by one. */
+ for (u32 i = 0; i < in->count; i++) {
+ ret = copy_struct_from_user(out_ptr, obj_size, in_ptr, in->stride);
+ if (ret)
+ break;
+
+ out_ptr += obj_size;
+ in_ptr += in->stride;
+ }
+ }
+
+ if (ret) {
+ kvfree(out_alloc);
+ return ERR_PTR(ret);
+ }
+
+ return out_alloc;
+}
+
+/**
+ * PANTHOR_UOBJ_MIN_SIZE_INTERNAL() - Get the minimum user object size
+ * @_typename: Object type.
+ * @_last_mandatory_field: Last mandatory field.
+ *
+ * Get the minimum user object size based on the last mandatory field name,
+ * A.K.A, the name of the last field of the structure at the time this
+ * structure was added to the uAPI.
+ *
+ * Don't use directly, use PANTHOR_UOBJ_DECL() instead.
+ */
+#define PANTHOR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field) \
+ (offsetof(_typename, _last_mandatory_field) + \
+ sizeof(((_typename *)NULL)->_last_mandatory_field))
+
+/**
+ * PANTHOR_UOBJ_DECL() - Declare a new uAPI object whose subject to
+ * evolutions.
+ * @_typename: Object type.
+ * @_last_mandatory_field: Last mandatory field.
+ *
+ * Should be used to extend the PANTHOR_UOBJ_MIN_SIZE() list.
+ */
+#define PANTHOR_UOBJ_DECL(_typename, _last_mandatory_field) \
+ _typename : PANTHOR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field)
+
+/**
+ * PANTHOR_UOBJ_MIN_SIZE() - Get the minimum size of a given uAPI object
+ * @_obj_name: Object to get the minimum size of.
+ *
+ * Don't use this macro directly, it's automatically called by
+ * PANTHOR_UOBJ_{SET,GET_ARRAY}().
+ */
+#define PANTHOR_UOBJ_MIN_SIZE(_obj_name) \
+ _Generic(_obj_name, \
+ PANTHOR_UOBJ_DECL(struct drm_panthor_gpu_info, tiler_present), \
+ PANTHOR_UOBJ_DECL(struct drm_panthor_csif_info, pad), \
+ PANTHOR_UOBJ_DECL(struct drm_panthor_sync_op, timeline_value), \
+ PANTHOR_UOBJ_DECL(struct drm_panthor_queue_submit, syncs), \
+ PANTHOR_UOBJ_DECL(struct drm_panthor_queue_create, ringbuf_size), \
+ PANTHOR_UOBJ_DECL(struct drm_panthor_vm_bind_op, syncs))
+
+/**
+ * PANTHOR_UOBJ_SET() - Copy a kernel object to a user object.
+ * @_dest_usr_ptr: User pointer to copy to.
+ * @_usr_size: Size of the user object.
+ * @_src_obj: Kernel object to copy (not a pointer).
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+#define PANTHOR_UOBJ_SET(_dest_usr_ptr, _usr_size, _src_obj) \
+ panthor_set_uobj(_dest_usr_ptr, _usr_size, \
+ PANTHOR_UOBJ_MIN_SIZE(_src_obj), \
+ sizeof(_src_obj), &(_src_obj))
+
+/**
+ * PANTHOR_UOBJ_GET_ARRAY() - Copy a user object array to a kernel accessible
+ * object array.
+ * @_dest_array: Local variable that will hold the newly allocated kernel
+ * object array.
+ * @_uobj_array: The drm_panthor_obj_array object describing the user object
+ * array.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+#define PANTHOR_UOBJ_GET_ARRAY(_dest_array, _uobj_array) \
+ ({ \
+ typeof(_dest_array) _tmp; \
+ _tmp = panthor_get_uobj_array(_uobj_array, \
+ PANTHOR_UOBJ_MIN_SIZE((_dest_array)[0]), \
+ sizeof((_dest_array)[0])); \
+ if (!IS_ERR(_tmp)) \
+ _dest_array = _tmp; \
+ PTR_ERR_OR_ZERO(_tmp); \
+ })
+
+/**
+ * struct panthor_sync_signal - Represent a synchronization object point to attach
+ * our job fence to.
+ *
+ * This structure is here to keep track of fences that are currently bound to
+ * a specific syncobj point.
+ *
+ * At the beginning of a job submission, the fence
+ * is retrieved from the syncobj itself, and can be NULL if no fence was attached
+ * to this point.
+ *
+ * At the end, it points to the fence of the last job that had a
+ * %DRM_PANTHOR_SYNC_OP_SIGNAL on this syncobj.
+ *
+ * With jobs being submitted in batches, the fence might change several times during
+ * the process, allowing one job to wait on a job that's part of the same submission
+ * but appears earlier in the drm_panthor_group_submit::queue_submits array.
+ */
+struct panthor_sync_signal {
+ /** @node: list_head to track signal ops within a submit operation */
+ struct list_head node;
+
+ /** @handle: The syncobj handle. */
+ u32 handle;
+
+ /**
+ * @point: The syncobj point.
+ *
+ * Zero for regular syncobjs, and non-zero for timeline syncobjs.
+ */
+ u64 point;
+
+ /**
+ * @syncobj: The sync object pointed by @handle.
+ */
+ struct drm_syncobj *syncobj;
+
+ /**
+ * @chain: Chain object used to link the new fence to an existing
+ * timeline syncobj.
+ *
+ * NULL for regular syncobj, non-NULL for timeline syncobjs.
+ */
+ struct dma_fence_chain *chain;
+
+ /**
+ * @fence: The fence to assign to the syncobj or syncobj-point.
+ */
+ struct dma_fence *fence;
+};
+
+/**
+ * struct panthor_job_ctx - Job context
+ */
+struct panthor_job_ctx {
+ /** @job: The job that is about to be submitted to drm_sched. */
+ struct drm_sched_job *job;
+
+ /** @syncops: Array of sync operations. */
+ struct drm_panthor_sync_op *syncops;
+
+ /** @syncop_count: Number of sync operations. */
+ u32 syncop_count;
+};
+
+/**
+ * struct panthor_submit_ctx - Submission context
+ *
+ * Anything that's related to a submission (%DRM_IOCTL_PANTHOR_VM_BIND or
+ * %DRM_IOCTL_PANTHOR_GROUP_SUBMIT) is kept here, so we can automate the
+ * initialization and cleanup steps.
+ */
+struct panthor_submit_ctx {
+ /** @file: DRM file this submission happens on. */
+ struct drm_file *file;
+
+ /**
+ * @signals: List of struct panthor_sync_signal.
+ *
+ * %DRM_PANTHOR_SYNC_OP_SIGNAL operations will be recorded here,
+ * and %DRM_PANTHOR_SYNC_OP_WAIT will first check if an entry
+ * matching the syncobj+point exists before calling
+ * drm_syncobj_find_fence(). This allows us to describe dependencies
+ * existing between jobs that are part of the same batch.
+ */
+ struct list_head signals;
+
+ /** @jobs: Array of jobs. */
+ struct panthor_job_ctx *jobs;
+
+ /** @job_count: Number of entries in the @jobs array. */
+ u32 job_count;
+
+ /** @exec: drm_exec context used to acquire and prepare resv objects. */
+ struct drm_exec exec;
+};
+
+#define PANTHOR_SYNC_OP_FLAGS_MASK \
+ (DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK | DRM_PANTHOR_SYNC_OP_SIGNAL)
+
+static bool sync_op_is_signal(const struct drm_panthor_sync_op *sync_op)
+{
+ return !!(sync_op->flags & DRM_PANTHOR_SYNC_OP_SIGNAL);
+}
+
+static bool sync_op_is_wait(const struct drm_panthor_sync_op *sync_op)
+{
+ /* Note that DRM_PANTHOR_SYNC_OP_WAIT == 0 */
+ return !(sync_op->flags & DRM_PANTHOR_SYNC_OP_SIGNAL);
+}
+
+/**
+ * panthor_check_sync_op() - Check drm_panthor_sync_op fields
+ * @sync_op: The sync operation to check.
+ *
+ * Return: 0 on success, -EINVAL otherwise.
+ */
+static int
+panthor_check_sync_op(const struct drm_panthor_sync_op *sync_op)
+{
+ u8 handle_type;
+
+ if (sync_op->flags & ~PANTHOR_SYNC_OP_FLAGS_MASK)
+ return -EINVAL;
+
+ handle_type = sync_op->flags & DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK;
+ if (handle_type != DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ &&
+ handle_type != DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ)
+ return -EINVAL;
+
+ if (handle_type == DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ &&
+ sync_op->timeline_value != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * panthor_sync_signal_free() - Release resources and free a panthor_sync_signal object
+ * @sig_sync: Signal object to free.
+ */
+static void
+panthor_sync_signal_free(struct panthor_sync_signal *sig_sync)
+{
+ if (!sig_sync)
+ return;
+
+ drm_syncobj_put(sig_sync->syncobj);
+ dma_fence_chain_free(sig_sync->chain);
+ dma_fence_put(sig_sync->fence);
+ kfree(sig_sync);
+}
+
+/**
+ * panthor_submit_ctx_add_sync_signal() - Add a signal operation to a submit context
+ * @ctx: Context to add the signal operation to.
+ * @handle: Syncobj handle.
+ * @point: Syncobj point.
+ *
+ * Return: 0 on success, otherwise negative error value.
+ */
+static int
+panthor_submit_ctx_add_sync_signal(struct panthor_submit_ctx *ctx, u32 handle, u64 point)
+{
+ struct panthor_sync_signal *sig_sync;
+ struct dma_fence *cur_fence;
+ int ret;
+
+ sig_sync = kzalloc(sizeof(*sig_sync), GFP_KERNEL);
+ if (!sig_sync)
+ return -ENOMEM;
+
+ sig_sync->handle = handle;
+ sig_sync->point = point;
+
+ if (point > 0) {
+ sig_sync->chain = dma_fence_chain_alloc();
+ if (!sig_sync->chain) {
+ ret = -ENOMEM;
+ goto err_free_sig_sync;
+ }
+ }
+
+ sig_sync->syncobj = drm_syncobj_find(ctx->file, handle);
+ if (!sig_sync->syncobj) {
+ ret = -EINVAL;
+ goto err_free_sig_sync;
+ }
+
+ /* Retrieve the current fence attached to that point. It's
+ * perfectly fine to get a NULL fence here, it just means there's
+ * no fence attached to that point yet.
+ */
+ if (!drm_syncobj_find_fence(ctx->file, handle, point, 0, &cur_fence))
+ sig_sync->fence = cur_fence;
+
+ list_add_tail(&sig_sync->node, &ctx->signals);
+
+ return 0;
+
+err_free_sig_sync:
+ panthor_sync_signal_free(sig_sync);
+ return ret;
+}
+
+/**
+ * panthor_submit_ctx_search_sync_signal() - Search an existing signal operation in a
+ * submit context.
+ * @ctx: Context to search the signal operation in.
+ * @handle: Syncobj handle.
+ * @point: Syncobj point.
+ *
+ * Return: A valid panthor_sync_signal object if found, NULL otherwise.
+ */
+static struct panthor_sync_signal *
+panthor_submit_ctx_search_sync_signal(struct panthor_submit_ctx *ctx, u32 handle, u64 point)
+{
+ struct panthor_sync_signal *sig_sync;
+
+ list_for_each_entry(sig_sync, &ctx->signals, node) {
+ if (handle == sig_sync->handle && point == sig_sync->point)
+ return sig_sync;
+ }
+
+ return NULL;
+}
+
+/**
+ * panthor_submit_ctx_add_job() - Add a job to a submit context
+ * @ctx: Context to search the signal operation in.
+ * @idx: Index of the job in the context.
+ * @job: Job to add.
+ * @syncs: Sync operations provided by userspace.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int
+panthor_submit_ctx_add_job(struct panthor_submit_ctx *ctx, u32 idx,
+ struct drm_sched_job *job,
+ const struct drm_panthor_obj_array *syncs)
+{
+ int ret;
+
+ ctx->jobs[idx].job = job;
+
+ ret = PANTHOR_UOBJ_GET_ARRAY(ctx->jobs[idx].syncops, syncs);
+ if (ret)
+ return ret;
+
+ ctx->jobs[idx].syncop_count = syncs->count;
+ return 0;
+}
+
+/**
+ * panthor_submit_ctx_get_sync_signal() - Search signal operation and add one if none was found.
+ * @ctx: Context to search the signal operation in.
+ * @handle: Syncobj handle.
+ * @point: Syncobj point.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int
+panthor_submit_ctx_get_sync_signal(struct panthor_submit_ctx *ctx, u32 handle, u64 point)
+{
+ struct panthor_sync_signal *sig_sync;
+
+ sig_sync = panthor_submit_ctx_search_sync_signal(ctx, handle, point);
+ if (sig_sync)
+ return 0;
+
+ return panthor_submit_ctx_add_sync_signal(ctx, handle, point);
+}
+
+/**
+ * panthor_submit_ctx_update_job_sync_signal_fences() - Update fences
+ * on the signal operations specified by a job.
+ * @ctx: Context to search the signal operation in.
+ * @job_idx: Index of the job to operate on.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int
+panthor_submit_ctx_update_job_sync_signal_fences(struct panthor_submit_ctx *ctx,
+ u32 job_idx)
+{
+ struct panthor_device *ptdev = container_of(ctx->file->minor->dev,
+ struct panthor_device,
+ base);
+ struct dma_fence *done_fence = &ctx->jobs[job_idx].job->s_fence->finished;
+ const struct drm_panthor_sync_op *sync_ops = ctx->jobs[job_idx].syncops;
+ u32 sync_op_count = ctx->jobs[job_idx].syncop_count;
+
+ for (u32 i = 0; i < sync_op_count; i++) {
+ struct dma_fence *old_fence;
+ struct panthor_sync_signal *sig_sync;
+
+ if (!sync_op_is_signal(&sync_ops[i]))
+ continue;
+
+ sig_sync = panthor_submit_ctx_search_sync_signal(ctx, sync_ops[i].handle,
+ sync_ops[i].timeline_value);
+ if (drm_WARN_ON(&ptdev->base, !sig_sync))
+ return -EINVAL;
+
+ old_fence = sig_sync->fence;
+ sig_sync->fence = dma_fence_get(done_fence);
+ dma_fence_put(old_fence);
+
+ if (drm_WARN_ON(&ptdev->base, !sig_sync->fence))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * panthor_submit_ctx_collect_job_signal_ops() - Iterate over all job signal operations
+ * and add them to the context.
+ * @ctx: Context to search the signal operation in.
+ * @job_idx: Index of the job to operate on.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int
+panthor_submit_ctx_collect_job_signal_ops(struct panthor_submit_ctx *ctx,
+ u32 job_idx)
+{
+ const struct drm_panthor_sync_op *sync_ops = ctx->jobs[job_idx].syncops;
+ u32 sync_op_count = ctx->jobs[job_idx].syncop_count;
+
+ for (u32 i = 0; i < sync_op_count; i++) {
+ int ret;
+
+ if (!sync_op_is_signal(&sync_ops[i]))
+ continue;
+
+ ret = panthor_check_sync_op(&sync_ops[i]);
+ if (ret)
+ return ret;
+
+ ret = panthor_submit_ctx_get_sync_signal(ctx,
+ sync_ops[i].handle,
+ sync_ops[i].timeline_value);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * panthor_submit_ctx_push_fences() - Iterate over the signal array, and for each entry, push
+ * the currently assigned fence to the associated syncobj.
+ * @ctx: Context to push fences on.
+ *
+ * This is the last step of a submission procedure, and is done once we know the submission
+ * is effective and job fences are guaranteed to be signaled in finite time.
+ */
+static void
+panthor_submit_ctx_push_fences(struct panthor_submit_ctx *ctx)
+{
+ struct panthor_sync_signal *sig_sync;
+
+ list_for_each_entry(sig_sync, &ctx->signals, node) {
+ if (sig_sync->chain) {
+ drm_syncobj_add_point(sig_sync->syncobj, sig_sync->chain,
+ sig_sync->fence, sig_sync->point);
+ sig_sync->chain = NULL;
+ } else {
+ drm_syncobj_replace_fence(sig_sync->syncobj, sig_sync->fence);
+ }
+ }
+}
+
+/**
+ * panthor_submit_ctx_add_sync_deps_to_job() - Add sync wait operations as
+ * job dependencies.
+ * @ctx: Submit context.
+ * @job_idx: Index of the job to operate on.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int
+panthor_submit_ctx_add_sync_deps_to_job(struct panthor_submit_ctx *ctx,
+ u32 job_idx)
+{
+ struct panthor_device *ptdev = container_of(ctx->file->minor->dev,
+ struct panthor_device,
+ base);
+ const struct drm_panthor_sync_op *sync_ops = ctx->jobs[job_idx].syncops;
+ struct drm_sched_job *job = ctx->jobs[job_idx].job;
+ u32 sync_op_count = ctx->jobs[job_idx].syncop_count;
+ int ret = 0;
+
+ for (u32 i = 0; i < sync_op_count; i++) {
+ struct panthor_sync_signal *sig_sync;
+ struct dma_fence *fence;
+
+ if (!sync_op_is_wait(&sync_ops[i]))
+ continue;
+
+ ret = panthor_check_sync_op(&sync_ops[i]);
+ if (ret)
+ return ret;
+
+ sig_sync = panthor_submit_ctx_search_sync_signal(ctx, sync_ops[i].handle,
+ sync_ops[i].timeline_value);
+ if (sig_sync) {
+ if (drm_WARN_ON(&ptdev->base, !sig_sync->fence))
+ return -EINVAL;
+
+ fence = dma_fence_get(sig_sync->fence);
+ } else {
+ ret = drm_syncobj_find_fence(ctx->file, sync_ops[i].handle,
+ sync_ops[i].timeline_value,
+ 0, &fence);
+ if (ret)
+ return ret;
+ }
+
+ ret = drm_sched_job_add_dependency(job, fence);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * panthor_submit_ctx_collect_jobs_signal_ops() - Collect all signal operations
+ * and add them to the submit context.
+ * @ctx: Submit context.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int
+panthor_submit_ctx_collect_jobs_signal_ops(struct panthor_submit_ctx *ctx)
+{
+ for (u32 i = 0; i < ctx->job_count; i++) {
+ int ret;
+
+ ret = panthor_submit_ctx_collect_job_signal_ops(ctx, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * panthor_submit_ctx_add_deps_and_arm_jobs() - Add jobs dependencies and arm jobs
+ * @ctx: Submit context.
+ *
+ * Must be called after the resv preparation has been taken care of.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int
+panthor_submit_ctx_add_deps_and_arm_jobs(struct panthor_submit_ctx *ctx)
+{
+ for (u32 i = 0; i < ctx->job_count; i++) {
+ int ret;
+
+ ret = panthor_submit_ctx_add_sync_deps_to_job(ctx, i);
+ if (ret)
+ return ret;
+
+ drm_sched_job_arm(ctx->jobs[i].job);
+
+ ret = panthor_submit_ctx_update_job_sync_signal_fences(ctx, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * panthor_submit_ctx_push_jobs() - Push jobs to their scheduling entities.
+ * @ctx: Submit context.
+ * @upd_resvs: Callback used to update reservation objects that were previously
+ * preapred.
+ */
+static void
+panthor_submit_ctx_push_jobs(struct panthor_submit_ctx *ctx,
+ void (*upd_resvs)(struct drm_exec *, struct drm_sched_job *))
+{
+ for (u32 i = 0; i < ctx->job_count; i++) {
+ upd_resvs(&ctx->exec, ctx->jobs[i].job);
+ drm_sched_entity_push_job(ctx->jobs[i].job);
+
+ /* Job is owned by the scheduler now. */
+ ctx->jobs[i].job = NULL;
+ }
+
+ panthor_submit_ctx_push_fences(ctx);
+}
+
+/**
+ * panthor_submit_ctx_init() - Initializes a submission context
+ * @ctx: Submit context to initialize.
+ * @file: drm_file this submission happens on.
+ * @job_count: Number of jobs that will be submitted.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int panthor_submit_ctx_init(struct panthor_submit_ctx *ctx,
+ struct drm_file *file, u32 job_count)
+{
+ ctx->jobs = kvmalloc_array(job_count, sizeof(*ctx->jobs),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!ctx->jobs)
+ return -ENOMEM;
+
+ ctx->file = file;
+ ctx->job_count = job_count;
+ INIT_LIST_HEAD(&ctx->signals);
+ drm_exec_init(&ctx->exec,
+ DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES,
+ 0);
+ return 0;
+}
+
+/**
+ * panthor_submit_ctx_cleanup() - Cleanup a submission context
+ * @ctx: Submit context to cleanup.
+ * @job_put: Job put callback.
+ */
+static void panthor_submit_ctx_cleanup(struct panthor_submit_ctx *ctx,
+ void (*job_put)(struct drm_sched_job *))
+{
+ struct panthor_sync_signal *sig_sync, *tmp;
+ unsigned long i;
+
+ drm_exec_fini(&ctx->exec);
+
+ list_for_each_entry_safe(sig_sync, tmp, &ctx->signals, node)
+ panthor_sync_signal_free(sig_sync);
+
+ for (i = 0; i < ctx->job_count; i++) {
+ job_put(ctx->jobs[i].job);
+ kvfree(ctx->jobs[i].syncops);
+ }
+
+ kvfree(ctx->jobs);
+}
+
+static int panthor_ioctl_dev_query(struct drm_device *ddev, void *data, struct drm_file *file)
+{
+ struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
+ struct drm_panthor_dev_query *args = data;
+
+ if (!args->pointer) {
+ switch (args->type) {
+ case DRM_PANTHOR_DEV_QUERY_GPU_INFO:
+ args->size = sizeof(ptdev->gpu_info);
+ return 0;
+
+ case DRM_PANTHOR_DEV_QUERY_CSIF_INFO:
+ args->size = sizeof(ptdev->csif_info);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ switch (args->type) {
+ case DRM_PANTHOR_DEV_QUERY_GPU_INFO:
+ return PANTHOR_UOBJ_SET(args->pointer, args->size, ptdev->gpu_info);
+
+ case DRM_PANTHOR_DEV_QUERY_CSIF_INFO:
+ return PANTHOR_UOBJ_SET(args->pointer, args->size, ptdev->csif_info);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+#define PANTHOR_VM_CREATE_FLAGS 0
+
+static int panthor_ioctl_vm_create(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_panthor_vm_create *args = data;
+ int cookie, ret;
+
+ if (!drm_dev_enter(ddev, &cookie))
+ return -ENODEV;
+
+ ret = panthor_vm_pool_create_vm(ptdev, pfile->vms, args);
+ if (ret >= 0) {
+ args->id = ret;
+ ret = 0;
+ }
+
+ drm_dev_exit(cookie);
+ return ret;
+}
+
+static int panthor_ioctl_vm_destroy(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_panthor_vm_destroy *args = data;
+
+ if (args->pad)
+ return -EINVAL;
+
+ return panthor_vm_pool_destroy_vm(pfile->vms, args->id);
+}
+
+#define PANTHOR_BO_FLAGS DRM_PANTHOR_BO_NO_MMAP
+
+static int panthor_ioctl_bo_create(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_panthor_bo_create *args = data;
+ struct panthor_vm *vm = NULL;
+ int cookie, ret;
+
+ if (!drm_dev_enter(ddev, &cookie))
+ return -ENODEV;
+
+ if (!args->size || args->pad ||
+ (args->flags & ~PANTHOR_BO_FLAGS)) {
+ ret = -EINVAL;
+ goto out_dev_exit;
+ }
+
+ if (args->exclusive_vm_id) {
+ vm = panthor_vm_pool_get_vm(pfile->vms, args->exclusive_vm_id);
+ if (!vm) {
+ ret = -EINVAL;
+ goto out_dev_exit;
+ }
+ }
+
+ ret = panthor_gem_create_with_handle(file, ddev, vm, &args->size,
+ args->flags, &args->handle);
+
+ panthor_vm_put(vm);
+
+out_dev_exit:
+ drm_dev_exit(cookie);
+ return ret;
+}
+
+static int panthor_ioctl_bo_mmap_offset(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct drm_panthor_bo_mmap_offset *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (args->pad)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto out;
+
+ args->offset = drm_vma_node_offset_addr(&obj->vma_node);
+
+out:
+ drm_gem_object_put(obj);
+ return ret;
+}
+
+static int panthor_ioctl_group_submit(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_panthor_group_submit *args = data;
+ struct drm_panthor_queue_submit *jobs_args;
+ struct panthor_submit_ctx ctx;
+ int ret = 0, cookie;
+
+ if (args->pad)
+ return -EINVAL;
+
+ if (!drm_dev_enter(ddev, &cookie))
+ return -ENODEV;
+
+ ret = PANTHOR_UOBJ_GET_ARRAY(jobs_args, &args->queue_submits);
+ if (ret)
+ goto out_dev_exit;
+
+ ret = panthor_submit_ctx_init(&ctx, file, args->queue_submits.count);
+ if (ret)
+ goto out_free_jobs_args;
+
+ /* Create jobs and attach sync operations */
+ for (u32 i = 0; i < args->queue_submits.count; i++) {
+ const struct drm_panthor_queue_submit *qsubmit = &jobs_args[i];
+ struct drm_sched_job *job;
+
+ job = panthor_job_create(pfile, args->group_handle, qsubmit);
+ if (IS_ERR(job)) {
+ ret = PTR_ERR(job);
+ goto out_cleanup_submit_ctx;
+ }
+
+ ret = panthor_submit_ctx_add_job(&ctx, i, job, &qsubmit->syncs);
+ if (ret)
+ goto out_cleanup_submit_ctx;
+ }
+
+ /*
+ * Collect signal operations on all jobs, such that each job can pick
+ * from it for its dependencies and update the fence to signal when the
+ * job is submitted.
+ */
+ ret = panthor_submit_ctx_collect_jobs_signal_ops(&ctx);
+ if (ret)
+ goto out_cleanup_submit_ctx;
+
+ /*
+ * We acquire/prepare revs on all jobs before proceeding with the
+ * dependency registration.
+ *
+ * This is solving two problems:
+ * 1. drm_sched_job_arm() and drm_sched_entity_push_job() must be
+ * protected by a lock to make sure no concurrent access to the same
+ * entity get interleaved, which would mess up with the fence seqno
+ * ordering. Luckily, one of the resv being acquired is the VM resv,
+ * and a scheduling entity is only bound to a single VM. As soon as
+ * we acquire the VM resv, we should be safe.
+ * 2. Jobs might depend on fences that were issued by previous jobs in
+ * the same batch, so we can't add dependencies on all jobs before
+ * arming previous jobs and registering the fence to the signal
+ * array, otherwise we might miss dependencies, or point to an
+ * outdated fence.
+ */
+ if (args->queue_submits.count > 0) {
+ /* All jobs target the same group, so they also point to the same VM. */
+ struct panthor_vm *vm = panthor_job_vm(ctx.jobs[0].job);
+
+ drm_exec_until_all_locked(&ctx.exec) {
+ ret = panthor_vm_prepare_mapped_bos_resvs(&ctx.exec, vm,
+ args->queue_submits.count);
+ }
+
+ if (ret)
+ goto out_cleanup_submit_ctx;
+ }
+
+ /*
+ * Now that resvs are locked/prepared, we can iterate over each job to
+ * add the dependencies, arm the job fence, register the job fence to
+ * the signal array.
+ */
+ ret = panthor_submit_ctx_add_deps_and_arm_jobs(&ctx);
+ if (ret)
+ goto out_cleanup_submit_ctx;
+
+ /* Nothing can fail after that point, so we can make our job fences
+ * visible to the outside world. Push jobs and set the job fences to
+ * the resv slots we reserved. This also pushes the fences to the
+ * syncobjs that are part of the signal array.
+ */
+ panthor_submit_ctx_push_jobs(&ctx, panthor_job_update_resvs);
+
+out_cleanup_submit_ctx:
+ panthor_submit_ctx_cleanup(&ctx, panthor_job_put);
+
+out_free_jobs_args:
+ kvfree(jobs_args);
+
+out_dev_exit:
+ drm_dev_exit(cookie);
+ return ret;
+}
+
+static int panthor_ioctl_group_destroy(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_panthor_group_destroy *args = data;
+
+ if (args->pad)
+ return -EINVAL;
+
+ return panthor_group_destroy(pfile, args->group_handle);
+}
+
+static int panthor_ioctl_group_create(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_panthor_group_create *args = data;
+ struct drm_panthor_queue_create *queue_args;
+ int ret;
+
+ if (!args->queues.count)
+ return -EINVAL;
+
+ ret = PANTHOR_UOBJ_GET_ARRAY(queue_args, &args->queues);
+ if (ret)
+ return ret;
+
+ ret = panthor_group_create(pfile, args, queue_args);
+ if (ret >= 0) {
+ args->group_handle = ret;
+ ret = 0;
+ }
+
+ kvfree(queue_args);
+ return ret;
+}
+
+static int panthor_ioctl_group_get_state(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_panthor_group_get_state *args = data;
+
+ return panthor_group_get_state(pfile, args);
+}
+
+static int panthor_ioctl_tiler_heap_create(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_panthor_tiler_heap_create *args = data;
+ struct panthor_heap_pool *pool;
+ struct panthor_vm *vm;
+ int ret;
+
+ vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id);
+ if (!vm)
+ return -EINVAL;
+
+ pool = panthor_vm_get_heap_pool(vm, true);
+ if (IS_ERR(pool)) {
+ ret = PTR_ERR(pool);
+ goto out_put_vm;
+ }
+
+ ret = panthor_heap_create(pool,
+ args->initial_chunk_count,
+ args->chunk_size,
+ args->max_chunks,
+ args->target_in_flight,
+ &args->tiler_heap_ctx_gpu_va,
+ &args->first_heap_chunk_gpu_va);
+ if (ret < 0)
+ goto out_put_heap_pool;
+
+ /* Heap pools are per-VM. We combine the VM and HEAP id to make
+ * a unique heap handle.
+ */
+ args->handle = (args->vm_id << 16) | ret;
+ ret = 0;
+
+out_put_heap_pool:
+ panthor_heap_pool_put(pool);
+
+out_put_vm:
+ panthor_vm_put(vm);
+ return ret;
+}
+
+static int panthor_ioctl_tiler_heap_destroy(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_panthor_tiler_heap_destroy *args = data;
+ struct panthor_heap_pool *pool;
+ struct panthor_vm *vm;
+ int ret;
+
+ if (args->pad)
+ return -EINVAL;
+
+ vm = panthor_vm_pool_get_vm(pfile->vms, args->handle >> 16);
+ if (!vm)
+ return -EINVAL;
+
+ pool = panthor_vm_get_heap_pool(vm, false);
+ if (IS_ERR(pool)) {
+ ret = PTR_ERR(pool);
+ goto out_put_vm;
+ }
+
+ ret = panthor_heap_destroy(pool, args->handle & GENMASK(15, 0));
+ panthor_heap_pool_put(pool);
+
+out_put_vm:
+ panthor_vm_put(vm);
+ return ret;
+}
+
+static int panthor_ioctl_vm_bind_async(struct drm_device *ddev,
+ struct drm_panthor_vm_bind *args,
+ struct drm_file *file)
+{
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_panthor_vm_bind_op *jobs_args;
+ struct panthor_submit_ctx ctx;
+ struct panthor_vm *vm;
+ int ret = 0;
+
+ vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id);
+ if (!vm)
+ return -EINVAL;
+
+ ret = PANTHOR_UOBJ_GET_ARRAY(jobs_args, &args->ops);
+ if (ret)
+ goto out_put_vm;
+
+ ret = panthor_submit_ctx_init(&ctx, file, args->ops.count);
+ if (ret)
+ goto out_free_jobs_args;
+
+ for (u32 i = 0; i < args->ops.count; i++) {
+ struct drm_panthor_vm_bind_op *op = &jobs_args[i];
+ struct drm_sched_job *job;
+
+ job = panthor_vm_bind_job_create(file, vm, op);
+ if (IS_ERR(job)) {
+ ret = PTR_ERR(job);
+ goto out_cleanup_submit_ctx;
+ }
+
+ ret = panthor_submit_ctx_add_job(&ctx, i, job, &op->syncs);
+ if (ret)
+ goto out_cleanup_submit_ctx;
+ }
+
+ ret = panthor_submit_ctx_collect_jobs_signal_ops(&ctx);
+ if (ret)
+ goto out_cleanup_submit_ctx;
+
+ /* Prepare reservation objects for each VM_BIND job. */
+ drm_exec_until_all_locked(&ctx.exec) {
+ for (u32 i = 0; i < ctx.job_count; i++) {
+ ret = panthor_vm_bind_job_prepare_resvs(&ctx.exec, ctx.jobs[i].job);
+ drm_exec_retry_on_contention(&ctx.exec);
+ if (ret)
+ goto out_cleanup_submit_ctx;
+ }
+ }
+
+ ret = panthor_submit_ctx_add_deps_and_arm_jobs(&ctx);
+ if (ret)
+ goto out_cleanup_submit_ctx;
+
+ /* Nothing can fail after that point. */
+ panthor_submit_ctx_push_jobs(&ctx, panthor_vm_bind_job_update_resvs);
+
+out_cleanup_submit_ctx:
+ panthor_submit_ctx_cleanup(&ctx, panthor_vm_bind_job_put);
+
+out_free_jobs_args:
+ kvfree(jobs_args);
+
+out_put_vm:
+ panthor_vm_put(vm);
+ return ret;
+}
+
+static int panthor_ioctl_vm_bind_sync(struct drm_device *ddev,
+ struct drm_panthor_vm_bind *args,
+ struct drm_file *file)
+{
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_panthor_vm_bind_op *jobs_args;
+ struct panthor_vm *vm;
+ int ret;
+
+ vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id);
+ if (!vm)
+ return -EINVAL;
+
+ ret = PANTHOR_UOBJ_GET_ARRAY(jobs_args, &args->ops);
+ if (ret)
+ goto out_put_vm;
+
+ for (u32 i = 0; i < args->ops.count; i++) {
+ ret = panthor_vm_bind_exec_sync_op(file, vm, &jobs_args[i]);
+ if (ret) {
+ /* Update ops.count so the user knows where things failed. */
+ args->ops.count = i;
+ break;
+ }
+ }
+
+ kvfree(jobs_args);
+
+out_put_vm:
+ panthor_vm_put(vm);
+ return ret;
+}
+
+#define PANTHOR_VM_BIND_FLAGS DRM_PANTHOR_VM_BIND_ASYNC
+
+static int panthor_ioctl_vm_bind(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct drm_panthor_vm_bind *args = data;
+ int cookie, ret;
+
+ if (!drm_dev_enter(ddev, &cookie))
+ return -ENODEV;
+
+ if (args->flags & DRM_PANTHOR_VM_BIND_ASYNC)
+ ret = panthor_ioctl_vm_bind_async(ddev, args, file);
+ else
+ ret = panthor_ioctl_vm_bind_sync(ddev, args, file);
+
+ drm_dev_exit(cookie);
+ return ret;
+}
+
+static int panthor_ioctl_vm_get_state(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_panthor_vm_get_state *args = data;
+ struct panthor_vm *vm;
+
+ vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id);
+ if (!vm)
+ return -EINVAL;
+
+ if (panthor_vm_is_unusable(vm))
+ args->state = DRM_PANTHOR_VM_STATE_UNUSABLE;
+ else
+ args->state = DRM_PANTHOR_VM_STATE_USABLE;
+
+ panthor_vm_put(vm);
+ return 0;
+}
+
+static int
+panthor_open(struct drm_device *ddev, struct drm_file *file)
+{
+ struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
+ struct panthor_file *pfile;
+ int ret;
+
+ if (!try_module_get(THIS_MODULE))
+ return -EINVAL;
+
+ pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
+ if (!pfile) {
+ ret = -ENOMEM;
+ goto err_put_mod;
+ }
+
+ pfile->ptdev = ptdev;
+
+ ret = panthor_vm_pool_create(pfile);
+ if (ret)
+ goto err_free_file;
+
+ ret = panthor_group_pool_create(pfile);
+ if (ret)
+ goto err_destroy_vm_pool;
+
+ file->driver_priv = pfile;
+ return 0;
+
+err_destroy_vm_pool:
+ panthor_vm_pool_destroy(pfile);
+
+err_free_file:
+ kfree(pfile);
+
+err_put_mod:
+ module_put(THIS_MODULE);
+ return ret;
+}
+
+static void
+panthor_postclose(struct drm_device *ddev, struct drm_file *file)
+{
+ struct panthor_file *pfile = file->driver_priv;
+
+ panthor_group_pool_destroy(pfile);
+ panthor_vm_pool_destroy(pfile);
+
+ kfree(pfile);
+ module_put(THIS_MODULE);
+}
+
+static const struct drm_ioctl_desc panthor_drm_driver_ioctls[] = {
+#define PANTHOR_IOCTL(n, func, flags) \
+ DRM_IOCTL_DEF_DRV(PANTHOR_##n, panthor_ioctl_##func, flags)
+
+ PANTHOR_IOCTL(DEV_QUERY, dev_query, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(VM_CREATE, vm_create, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(VM_DESTROY, vm_destroy, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(VM_BIND, vm_bind, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(VM_GET_STATE, vm_get_state, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(BO_CREATE, bo_create, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(BO_MMAP_OFFSET, bo_mmap_offset, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(GROUP_CREATE, group_create, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(GROUP_DESTROY, group_destroy, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(GROUP_GET_STATE, group_get_state, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(TILER_HEAP_CREATE, tiler_heap_create, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(TILER_HEAP_DESTROY, tiler_heap_destroy, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(GROUP_SUBMIT, group_submit, DRM_RENDER_ALLOW),
+};
+
+static int panthor_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file = filp->private_data;
+ struct panthor_file *pfile = file->driver_priv;
+ struct panthor_device *ptdev = pfile->ptdev;
+ u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
+ int ret, cookie;
+
+ if (!drm_dev_enter(file->minor->dev, &cookie))
+ return -ENODEV;
+
+#ifdef CONFIG_ARM64
+ /*
+ * With 32-bit systems being limited by the 32-bit representation of
+ * mmap2's pgoffset field, we need to make the MMIO offset arch
+ * specific. This converts a user MMIO offset into something the kernel
+ * driver understands.
+ */
+ if (test_tsk_thread_flag(current, TIF_32BIT) &&
+ offset >= DRM_PANTHOR_USER_MMIO_OFFSET_32BIT) {
+ offset += DRM_PANTHOR_USER_MMIO_OFFSET_64BIT -
+ DRM_PANTHOR_USER_MMIO_OFFSET_32BIT;
+ vma->vm_pgoff = offset >> PAGE_SHIFT;
+ }
+#endif
+
+ if (offset >= DRM_PANTHOR_USER_MMIO_OFFSET)
+ ret = panthor_device_mmap_io(ptdev, vma);
+ else
+ ret = drm_gem_mmap(filp, vma);
+
+ drm_dev_exit(cookie);
+ return ret;
+}
+
+static const struct file_operations panthor_drm_driver_fops = {
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
+ .mmap = panthor_mmap,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static void panthor_debugfs_init(struct drm_minor *minor)
+{
+ panthor_mmu_debugfs_init(minor);
+}
+#endif
+
+/*
+ * PanCSF driver version:
+ * - 1.0 - initial interface
+ */
+static const struct drm_driver panthor_drm_driver = {
+ .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ |
+ DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA,
+ .open = panthor_open,
+ .postclose = panthor_postclose,
+ .ioctls = panthor_drm_driver_ioctls,
+ .num_ioctls = ARRAY_SIZE(panthor_drm_driver_ioctls),
+ .fops = &panthor_drm_driver_fops,
+ .name = "panthor",
+ .desc = "Panthor DRM driver",
+ .date = "20230801",
+ .major = 1,
+ .minor = 0,
+
+ .gem_create_object = panthor_gem_create_object,
+ .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = panthor_debugfs_init,
+#endif
+};
+
+static int panthor_probe(struct platform_device *pdev)
+{
+ struct panthor_device *ptdev;
+
+ ptdev = devm_drm_dev_alloc(&pdev->dev, &panthor_drm_driver,
+ struct panthor_device, base);
+ if (IS_ERR(ptdev))
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ptdev);
+
+ return panthor_device_init(ptdev);
+}
+
+static void panthor_remove(struct platform_device *pdev)
+{
+ struct panthor_device *ptdev = platform_get_drvdata(pdev);
+
+ panthor_device_unplug(ptdev);
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "rockchip,rk3588-mali" },
+ { .compatible = "arm,mali-valhall-csf" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static DEFINE_RUNTIME_DEV_PM_OPS(panthor_pm_ops,
+ panthor_device_suspend,
+ panthor_device_resume,
+ NULL);
+
+static struct platform_driver panthor_driver = {
+ .probe = panthor_probe,
+ .remove_new = panthor_remove,
+ .driver = {
+ .name = "panthor",
+ .pm = pm_ptr(&panthor_pm_ops),
+ .of_match_table = dt_match,
+ },
+};
+
+/*
+ * Workqueue used to cleanup stuff.
+ *
+ * We create a dedicated workqueue so we can drain on unplug and
+ * make sure all resources are freed before the module is unloaded.
+ */
+struct workqueue_struct *panthor_cleanup_wq;
+
+static int __init panthor_init(void)
+{
+ int ret;
+
+ ret = panthor_mmu_pt_cache_init();
+ if (ret)
+ return ret;
+
+ panthor_cleanup_wq = alloc_workqueue("panthor-cleanup", WQ_UNBOUND, 0);
+ if (!panthor_cleanup_wq) {
+ pr_err("panthor: Failed to allocate the workqueues");
+ ret = -ENOMEM;
+ goto err_mmu_pt_cache_fini;
+ }
+
+ ret = platform_driver_register(&panthor_driver);
+ if (ret)
+ goto err_destroy_cleanup_wq;
+
+ return 0;
+
+err_destroy_cleanup_wq:
+ destroy_workqueue(panthor_cleanup_wq);
+
+err_mmu_pt_cache_fini:
+ panthor_mmu_pt_cache_fini();
+ return ret;
+}
+module_init(panthor_init);
+
+static void __exit panthor_exit(void)
+{
+ platform_driver_unregister(&panthor_driver);
+ destroy_workqueue(panthor_cleanup_wq);
+ panthor_mmu_pt_cache_fini();
+}
+module_exit(panthor_exit);
+
+MODULE_AUTHOR("Panthor Project Developers");
+MODULE_DESCRIPTION("Panthor DRM Driver");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c
new file mode 100644
index 000000000000..fedf9627453f
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_fw.c
@@ -0,0 +1,1363 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/* Copyright 2023 Collabora ltd. */
+
+#ifdef CONFIG_ARM_ARCH_TIMER
+#include <asm/arch_timer.h>
+#endif
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/iopoll.h>
+#include <linux/iosys-map.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
+
+#include "panthor_device.h"
+#include "panthor_fw.h"
+#include "panthor_gem.h"
+#include "panthor_gpu.h"
+#include "panthor_mmu.h"
+#include "panthor_regs.h"
+#include "panthor_sched.h"
+
+#define CSF_FW_NAME "mali_csffw.bin"
+
+#define PING_INTERVAL_MS 12000
+#define PROGRESS_TIMEOUT_CYCLES (5ull * 500 * 1024 * 1024)
+#define PROGRESS_TIMEOUT_SCALE_SHIFT 10
+#define IDLE_HYSTERESIS_US 800
+#define PWROFF_HYSTERESIS_US 10000
+
+/**
+ * struct panthor_fw_binary_hdr - Firmware binary header.
+ */
+struct panthor_fw_binary_hdr {
+ /** @magic: Magic value to check binary validity. */
+ u32 magic;
+#define CSF_FW_BINARY_HEADER_MAGIC 0xc3f13a6e
+
+ /** @minor: Minor FW version. */
+ u8 minor;
+
+ /** @major: Major FW version. */
+ u8 major;
+#define CSF_FW_BINARY_HEADER_MAJOR_MAX 0
+
+ /** @padding1: MBZ. */
+ u16 padding1;
+
+ /** @version_hash: FW version hash. */
+ u32 version_hash;
+
+ /** @padding2: MBZ. */
+ u32 padding2;
+
+ /** @size: FW binary size. */
+ u32 size;
+};
+
+/**
+ * enum panthor_fw_binary_entry_type - Firmware binary entry type
+ */
+enum panthor_fw_binary_entry_type {
+ /** @CSF_FW_BINARY_ENTRY_TYPE_IFACE: Host <-> FW interface. */
+ CSF_FW_BINARY_ENTRY_TYPE_IFACE = 0,
+
+ /** @CSF_FW_BINARY_ENTRY_TYPE_CONFIG: FW config. */
+ CSF_FW_BINARY_ENTRY_TYPE_CONFIG = 1,
+
+ /** @CSF_FW_BINARY_ENTRY_TYPE_FUTF_TEST: Unit-tests. */
+ CSF_FW_BINARY_ENTRY_TYPE_FUTF_TEST = 2,
+
+ /** @CSF_FW_BINARY_ENTRY_TYPE_TRACE_BUFFER: Trace buffer interface. */
+ CSF_FW_BINARY_ENTRY_TYPE_TRACE_BUFFER = 3,
+
+ /** @CSF_FW_BINARY_ENTRY_TYPE_TIMELINE_METADATA: Timeline metadata interface. */
+ CSF_FW_BINARY_ENTRY_TYPE_TIMELINE_METADATA = 4,
+};
+
+#define CSF_FW_BINARY_ENTRY_TYPE(ehdr) ((ehdr) & 0xff)
+#define CSF_FW_BINARY_ENTRY_SIZE(ehdr) (((ehdr) >> 8) & 0xff)
+#define CSF_FW_BINARY_ENTRY_UPDATE BIT(30)
+#define CSF_FW_BINARY_ENTRY_OPTIONAL BIT(31)
+
+#define CSF_FW_BINARY_IFACE_ENTRY_RD_RD BIT(0)
+#define CSF_FW_BINARY_IFACE_ENTRY_RD_WR BIT(1)
+#define CSF_FW_BINARY_IFACE_ENTRY_RD_EX BIT(2)
+#define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_NONE (0 << 3)
+#define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_CACHED (1 << 3)
+#define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_UNCACHED_COHERENT (2 << 3)
+#define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_CACHED_COHERENT (3 << 3)
+#define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_MASK GENMASK(4, 3)
+#define CSF_FW_BINARY_IFACE_ENTRY_RD_PROT BIT(5)
+#define CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED BIT(30)
+#define CSF_FW_BINARY_IFACE_ENTRY_RD_ZERO BIT(31)
+
+#define CSF_FW_BINARY_IFACE_ENTRY_RD_SUPPORTED_FLAGS \
+ (CSF_FW_BINARY_IFACE_ENTRY_RD_RD | \
+ CSF_FW_BINARY_IFACE_ENTRY_RD_WR | \
+ CSF_FW_BINARY_IFACE_ENTRY_RD_EX | \
+ CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_MASK | \
+ CSF_FW_BINARY_IFACE_ENTRY_RD_PROT | \
+ CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED | \
+ CSF_FW_BINARY_IFACE_ENTRY_RD_ZERO)
+
+/**
+ * struct panthor_fw_binary_section_entry_hdr - Describes a section of FW binary
+ */
+struct panthor_fw_binary_section_entry_hdr {
+ /** @flags: Section flags. */
+ u32 flags;
+
+ /** @va: MCU virtual range to map this binary section to. */
+ struct {
+ /** @start: Start address. */
+ u32 start;
+
+ /** @end: End address. */
+ u32 end;
+ } va;
+
+ /** @data: Data to initialize the FW section with. */
+ struct {
+ /** @start: Start offset in the FW binary. */
+ u32 start;
+
+ /** @end: End offset in the FW binary. */
+ u32 end;
+ } data;
+};
+
+/**
+ * struct panthor_fw_binary_iter - Firmware binary iterator
+ *
+ * Used to parse a firmware binary.
+ */
+struct panthor_fw_binary_iter {
+ /** @data: FW binary data. */
+ const void *data;
+
+ /** @size: FW binary size. */
+ size_t size;
+
+ /** @offset: Iterator offset. */
+ size_t offset;
+};
+
+/**
+ * struct panthor_fw_section - FW section
+ */
+struct panthor_fw_section {
+ /** @node: Used to keep track of FW sections. */
+ struct list_head node;
+
+ /** @flags: Section flags, as encoded in the FW binary. */
+ u32 flags;
+
+ /** @mem: Section memory. */
+ struct panthor_kernel_bo *mem;
+
+ /**
+ * @name: Name of the section, as specified in the binary.
+ *
+ * Can be NULL.
+ */
+ const char *name;
+
+ /**
+ * @data: Initial data copied to the FW memory.
+ *
+ * We keep data around so we can reload sections after a reset.
+ */
+ struct {
+ /** @buf: Buffed used to store init data. */
+ const void *buf;
+
+ /** @size: Size of @buf in bytes. */
+ size_t size;
+ } data;
+};
+
+#define CSF_MCU_SHARED_REGION_START 0x04000000ULL
+#define CSF_MCU_SHARED_REGION_SIZE 0x04000000ULL
+
+#define MIN_CS_PER_CSG 8
+#define MIN_CSGS 3
+#define MAX_CSG_PRIO 0xf
+
+#define CSF_IFACE_VERSION(major, minor, patch) \
+ (((major) << 24) | ((minor) << 16) | (patch))
+#define CSF_IFACE_VERSION_MAJOR(v) ((v) >> 24)
+#define CSF_IFACE_VERSION_MINOR(v) (((v) >> 16) & 0xff)
+#define CSF_IFACE_VERSION_PATCH(v) ((v) & 0xffff)
+
+#define CSF_GROUP_CONTROL_OFFSET 0x1000
+#define CSF_STREAM_CONTROL_OFFSET 0x40
+#define CSF_UNPRESERVED_REG_COUNT 4
+
+/**
+ * struct panthor_fw_iface - FW interfaces
+ */
+struct panthor_fw_iface {
+ /** @global: Global interface. */
+ struct panthor_fw_global_iface global;
+
+ /** @groups: Group slot interfaces. */
+ struct panthor_fw_csg_iface groups[MAX_CSGS];
+
+ /** @streams: Command stream slot interfaces. */
+ struct panthor_fw_cs_iface streams[MAX_CSGS][MAX_CS_PER_CSG];
+};
+
+/**
+ * struct panthor_fw - Firmware management
+ */
+struct panthor_fw {
+ /** @vm: MCU VM. */
+ struct panthor_vm *vm;
+
+ /** @sections: List of FW sections. */
+ struct list_head sections;
+
+ /** @shared_section: The section containing the FW interfaces. */
+ struct panthor_fw_section *shared_section;
+
+ /** @iface: FW interfaces. */
+ struct panthor_fw_iface iface;
+
+ /** @watchdog: Collection of fields relating to the FW watchdog. */
+ struct {
+ /** @ping_work: Delayed work used to ping the FW. */
+ struct delayed_work ping_work;
+ } watchdog;
+
+ /**
+ * @req_waitqueue: FW request waitqueue.
+ *
+ * Everytime a request is sent to a command stream group or the global
+ * interface, the caller will first busy wait for the request to be
+ * acknowledged, and then fallback to a sleeping wait.
+ *
+ * This wait queue is here to support the sleeping wait flavor.
+ */
+ wait_queue_head_t req_waitqueue;
+
+ /** @booted: True is the FW is booted */
+ bool booted;
+
+ /**
+ * @fast_reset: True if the post_reset logic can proceed with a fast reset.
+ *
+ * A fast reset is just a reset where the driver doesn't reload the FW sections.
+ *
+ * Any time the firmware is properly suspended, a fast reset can take place.
+ * On the other hand, if the halt operation failed, the driver will reload
+ * all sections to make sure we start from a fresh state.
+ */
+ bool fast_reset;
+
+ /** @irq: Job irq data. */
+ struct panthor_irq irq;
+};
+
+struct panthor_vm *panthor_fw_vm(struct panthor_device *ptdev)
+{
+ return ptdev->fw->vm;
+}
+
+/**
+ * panthor_fw_get_glb_iface() - Get the global interface
+ * @ptdev: Device.
+ *
+ * Return: The global interface.
+ */
+struct panthor_fw_global_iface *
+panthor_fw_get_glb_iface(struct panthor_device *ptdev)
+{
+ return &ptdev->fw->iface.global;
+}
+
+/**
+ * panthor_fw_get_csg_iface() - Get a command stream group slot interface
+ * @ptdev: Device.
+ * @csg_slot: Index of the command stream group slot.
+ *
+ * Return: The command stream group slot interface.
+ */
+struct panthor_fw_csg_iface *
+panthor_fw_get_csg_iface(struct panthor_device *ptdev, u32 csg_slot)
+{
+ if (drm_WARN_ON(&ptdev->base, csg_slot >= MAX_CSGS))
+ return NULL;
+
+ return &ptdev->fw->iface.groups[csg_slot];
+}
+
+/**
+ * panthor_fw_get_cs_iface() - Get a command stream slot interface
+ * @ptdev: Device.
+ * @csg_slot: Index of the command stream group slot.
+ * @cs_slot: Index of the command stream slot.
+ *
+ * Return: The command stream slot interface.
+ */
+struct panthor_fw_cs_iface *
+panthor_fw_get_cs_iface(struct panthor_device *ptdev, u32 csg_slot, u32 cs_slot)
+{
+ if (drm_WARN_ON(&ptdev->base, csg_slot >= MAX_CSGS || cs_slot >= MAX_CS_PER_CSG))
+ return NULL;
+
+ return &ptdev->fw->iface.streams[csg_slot][cs_slot];
+}
+
+/**
+ * panthor_fw_conv_timeout() - Convert a timeout into a cycle-count
+ * @ptdev: Device.
+ * @timeout_us: Timeout expressed in micro-seconds.
+ *
+ * The FW has two timer sources: the GPU counter or arch-timer. We need
+ * to express timeouts in term of number of cycles and specify which
+ * timer source should be used.
+ *
+ * Return: A value suitable for timeout fields in the global interface.
+ */
+static u32 panthor_fw_conv_timeout(struct panthor_device *ptdev, u32 timeout_us)
+{
+ bool use_cycle_counter = false;
+ u32 timer_rate = 0;
+ u64 mod_cycles;
+
+#ifdef CONFIG_ARM_ARCH_TIMER
+ timer_rate = arch_timer_get_cntfrq();
+#endif
+
+ if (!timer_rate) {
+ use_cycle_counter = true;
+ timer_rate = clk_get_rate(ptdev->clks.core);
+ }
+
+ if (drm_WARN_ON(&ptdev->base, !timer_rate)) {
+ /* We couldn't get a valid clock rate, let's just pick the
+ * maximum value so the FW still handles the core
+ * power on/off requests.
+ */
+ return GLB_TIMER_VAL(~0) |
+ GLB_TIMER_SOURCE_GPU_COUNTER;
+ }
+
+ mod_cycles = DIV_ROUND_UP_ULL((u64)timeout_us * timer_rate,
+ 1000000ull << 10);
+ if (drm_WARN_ON(&ptdev->base, mod_cycles > GLB_TIMER_VAL(~0)))
+ mod_cycles = GLB_TIMER_VAL(~0);
+
+ return GLB_TIMER_VAL(mod_cycles) |
+ (use_cycle_counter ? GLB_TIMER_SOURCE_GPU_COUNTER : 0);
+}
+
+static int panthor_fw_binary_iter_read(struct panthor_device *ptdev,
+ struct panthor_fw_binary_iter *iter,
+ void *out, size_t size)
+{
+ size_t new_offset = iter->offset + size;
+
+ if (new_offset > iter->size || new_offset < iter->offset) {
+ drm_err(&ptdev->base, "Firmware too small\n");
+ return -EINVAL;
+ }
+
+ memcpy(out, iter->data + iter->offset, size);
+ iter->offset = new_offset;
+ return 0;
+}
+
+static int panthor_fw_binary_sub_iter_init(struct panthor_device *ptdev,
+ struct panthor_fw_binary_iter *iter,
+ struct panthor_fw_binary_iter *sub_iter,
+ size_t size)
+{
+ size_t new_offset = iter->offset + size;
+
+ if (new_offset > iter->size || new_offset < iter->offset) {
+ drm_err(&ptdev->base, "Firmware entry too long\n");
+ return -EINVAL;
+ }
+
+ sub_iter->offset = 0;
+ sub_iter->data = iter->data + iter->offset;
+ sub_iter->size = size;
+ iter->offset = new_offset;
+ return 0;
+}
+
+static void panthor_fw_init_section_mem(struct panthor_device *ptdev,
+ struct panthor_fw_section *section)
+{
+ bool was_mapped = !!section->mem->kmap;
+ int ret;
+
+ if (!section->data.size &&
+ !(section->flags & CSF_FW_BINARY_IFACE_ENTRY_RD_ZERO))
+ return;
+
+ ret = panthor_kernel_bo_vmap(section->mem);
+ if (drm_WARN_ON(&ptdev->base, ret))
+ return;
+
+ memcpy(section->mem->kmap, section->data.buf, section->data.size);
+ if (section->flags & CSF_FW_BINARY_IFACE_ENTRY_RD_ZERO) {
+ memset(section->mem->kmap + section->data.size, 0,
+ panthor_kernel_bo_size(section->mem) - section->data.size);
+ }
+
+ if (!was_mapped)
+ panthor_kernel_bo_vunmap(section->mem);
+}
+
+/**
+ * panthor_fw_alloc_queue_iface_mem() - Allocate a ring-buffer interfaces.
+ * @ptdev: Device.
+ * @input: Pointer holding the input interface on success.
+ * Should be ignored on failure.
+ * @output: Pointer holding the output interface on success.
+ * Should be ignored on failure.
+ * @input_fw_va: Pointer holding the input interface FW VA on success.
+ * Should be ignored on failure.
+ * @output_fw_va: Pointer holding the output interface FW VA on success.
+ * Should be ignored on failure.
+ *
+ * Allocates panthor_fw_ringbuf_{input,out}_iface interfaces. The input
+ * interface is at offset 0, and the output interface at offset 4096.
+ *
+ * Return: A valid pointer in case of success, an ERR_PTR() otherwise.
+ */
+struct panthor_kernel_bo *
+panthor_fw_alloc_queue_iface_mem(struct panthor_device *ptdev,
+ struct panthor_fw_ringbuf_input_iface **input,
+ const struct panthor_fw_ringbuf_output_iface **output,
+ u32 *input_fw_va, u32 *output_fw_va)
+{
+ struct panthor_kernel_bo *mem;
+ int ret;
+
+ mem = panthor_kernel_bo_create(ptdev, ptdev->fw->vm, SZ_8K,
+ DRM_PANTHOR_BO_NO_MMAP,
+ DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
+ DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
+ PANTHOR_VM_KERNEL_AUTO_VA);
+ if (IS_ERR(mem))
+ return mem;
+
+ ret = panthor_kernel_bo_vmap(mem);
+ if (ret) {
+ panthor_kernel_bo_destroy(panthor_fw_vm(ptdev), mem);
+ return ERR_PTR(ret);
+ }
+
+ memset(mem->kmap, 0, panthor_kernel_bo_size(mem));
+ *input = mem->kmap;
+ *output = mem->kmap + SZ_4K;
+ *input_fw_va = panthor_kernel_bo_gpuva(mem);
+ *output_fw_va = *input_fw_va + SZ_4K;
+
+ return mem;
+}
+
+/**
+ * panthor_fw_alloc_suspend_buf_mem() - Allocate a suspend buffer for a command stream group.
+ * @ptdev: Device.
+ * @size: Size of the suspend buffer.
+ *
+ * Return: A valid pointer in case of success, an ERR_PTR() otherwise.
+ */
+struct panthor_kernel_bo *
+panthor_fw_alloc_suspend_buf_mem(struct panthor_device *ptdev, size_t size)
+{
+ return panthor_kernel_bo_create(ptdev, panthor_fw_vm(ptdev), size,
+ DRM_PANTHOR_BO_NO_MMAP,
+ DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
+ PANTHOR_VM_KERNEL_AUTO_VA);
+}
+
+static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
+ const struct firmware *fw,
+ struct panthor_fw_binary_iter *iter,
+ u32 ehdr)
+{
+ struct panthor_fw_binary_section_entry_hdr hdr;
+ struct panthor_fw_section *section;
+ u32 section_size;
+ u32 name_len;
+ int ret;
+
+ ret = panthor_fw_binary_iter_read(ptdev, iter, &hdr, sizeof(hdr));
+ if (ret)
+ return ret;
+
+ if (hdr.data.end < hdr.data.start) {
+ drm_err(&ptdev->base, "Firmware corrupted, data.end < data.start (0x%x < 0x%x)\n",
+ hdr.data.end, hdr.data.start);
+ return -EINVAL;
+ }
+
+ if (hdr.va.end < hdr.va.start) {
+ drm_err(&ptdev->base, "Firmware corrupted, hdr.va.end < hdr.va.start (0x%x < 0x%x)\n",
+ hdr.va.end, hdr.va.start);
+ return -EINVAL;
+ }
+
+ if (hdr.data.end > fw->size) {
+ drm_err(&ptdev->base, "Firmware corrupted, file truncated? data_end=0x%x > fw size=0x%zx\n",
+ hdr.data.end, fw->size);
+ return -EINVAL;
+ }
+
+ if ((hdr.va.start & ~PAGE_MASK) != 0 ||
+ (hdr.va.end & ~PAGE_MASK) != 0) {
+ drm_err(&ptdev->base, "Firmware corrupted, virtual addresses not page aligned: 0x%x-0x%x\n",
+ hdr.va.start, hdr.va.end);
+ return -EINVAL;
+ }
+
+ if (hdr.flags & ~CSF_FW_BINARY_IFACE_ENTRY_RD_SUPPORTED_FLAGS) {
+ drm_err(&ptdev->base, "Firmware contains interface with unsupported flags (0x%x)\n",
+ hdr.flags);
+ return -EINVAL;
+ }
+
+ if (hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_PROT) {
+ drm_warn(&ptdev->base,
+ "Firmware protected mode entry not be supported, ignoring");
+ return 0;
+ }
+
+ if (hdr.va.start == CSF_MCU_SHARED_REGION_START &&
+ !(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED)) {
+ drm_err(&ptdev->base,
+ "Interface at 0x%llx must be shared", CSF_MCU_SHARED_REGION_START);
+ return -EINVAL;
+ }
+
+ name_len = iter->size - iter->offset;
+
+ section = drmm_kzalloc(&ptdev->base, sizeof(*section), GFP_KERNEL);
+ if (!section)
+ return -ENOMEM;
+
+ list_add_tail(&section->node, &ptdev->fw->sections);
+ section->flags = hdr.flags;
+ section->data.size = hdr.data.end - hdr.data.start;
+
+ if (section->data.size > 0) {
+ void *data = drmm_kmalloc(&ptdev->base, section->data.size, GFP_KERNEL);
+
+ if (!data)
+ return -ENOMEM;
+
+ memcpy(data, fw->data + hdr.data.start, section->data.size);
+ section->data.buf = data;
+ }
+
+ if (name_len > 0) {
+ char *name = drmm_kmalloc(&ptdev->base, name_len + 1, GFP_KERNEL);
+
+ if (!name)
+ return -ENOMEM;
+
+ memcpy(name, iter->data + iter->offset, name_len);
+ name[name_len] = '\0';
+ section->name = name;
+ }
+
+ section_size = hdr.va.end - hdr.va.start;
+ if (section_size) {
+ u32 cache_mode = hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_MASK;
+ struct panthor_gem_object *bo;
+ u32 vm_map_flags = 0;
+ struct sg_table *sgt;
+ u64 va = hdr.va.start;
+
+ if (!(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_WR))
+ vm_map_flags |= DRM_PANTHOR_VM_BIND_OP_MAP_READONLY;
+
+ if (!(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_EX))
+ vm_map_flags |= DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC;
+
+ /* TODO: CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_*_COHERENT are mapped to
+ * non-cacheable for now. We might want to introduce a new
+ * IOMMU_xxx flag (or abuse IOMMU_MMIO, which maps to device
+ * memory and is currently not used by our driver) for
+ * AS_MEMATTR_AARCH64_SHARED memory, so we can take benefit
+ * of IO-coherent systems.
+ */
+ if (cache_mode != CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_CACHED)
+ vm_map_flags |= DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED;
+
+ section->mem = panthor_kernel_bo_create(ptdev, panthor_fw_vm(ptdev),
+ section_size,
+ DRM_PANTHOR_BO_NO_MMAP,
+ vm_map_flags, va);
+ if (IS_ERR(section->mem))
+ return PTR_ERR(section->mem);
+
+ if (drm_WARN_ON(&ptdev->base, section->mem->va_node.start != hdr.va.start))
+ return -EINVAL;
+
+ if (section->flags & CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED) {
+ ret = panthor_kernel_bo_vmap(section->mem);
+ if (ret)
+ return ret;
+ }
+
+ panthor_fw_init_section_mem(ptdev, section);
+
+ bo = to_panthor_bo(section->mem->obj);
+ sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ dma_sync_sgtable_for_device(ptdev->base.dev, sgt, DMA_TO_DEVICE);
+ }
+
+ if (hdr.va.start == CSF_MCU_SHARED_REGION_START)
+ ptdev->fw->shared_section = section;
+
+ return 0;
+}
+
+static void
+panthor_reload_fw_sections(struct panthor_device *ptdev, bool full_reload)
+{
+ struct panthor_fw_section *section;
+
+ list_for_each_entry(section, &ptdev->fw->sections, node) {
+ struct sg_table *sgt;
+
+ if (!full_reload && !(section->flags & CSF_FW_BINARY_IFACE_ENTRY_RD_WR))
+ continue;
+
+ panthor_fw_init_section_mem(ptdev, section);
+ sgt = drm_gem_shmem_get_pages_sgt(&to_panthor_bo(section->mem->obj)->base);
+ if (!drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(sgt)))
+ dma_sync_sgtable_for_device(ptdev->base.dev, sgt, DMA_TO_DEVICE);
+ }
+}
+
+static int panthor_fw_load_entry(struct panthor_device *ptdev,
+ const struct firmware *fw,
+ struct panthor_fw_binary_iter *iter)
+{
+ struct panthor_fw_binary_iter eiter;
+ u32 ehdr;
+ int ret;
+
+ ret = panthor_fw_binary_iter_read(ptdev, iter, &ehdr, sizeof(ehdr));
+ if (ret)
+ return ret;
+
+ if ((iter->offset % sizeof(u32)) ||
+ (CSF_FW_BINARY_ENTRY_SIZE(ehdr) % sizeof(u32))) {
+ drm_err(&ptdev->base, "Firmware entry isn't 32 bit aligned, offset=0x%x size=0x%x\n",
+ (u32)(iter->offset - sizeof(u32)), CSF_FW_BINARY_ENTRY_SIZE(ehdr));
+ return -EINVAL;
+ }
+
+ if (panthor_fw_binary_sub_iter_init(ptdev, iter, &eiter,
+ CSF_FW_BINARY_ENTRY_SIZE(ehdr) - sizeof(ehdr)))
+ return -EINVAL;
+
+ switch (CSF_FW_BINARY_ENTRY_TYPE(ehdr)) {
+ case CSF_FW_BINARY_ENTRY_TYPE_IFACE:
+ return panthor_fw_load_section_entry(ptdev, fw, &eiter, ehdr);
+
+ /* FIXME: handle those entry types? */
+ case CSF_FW_BINARY_ENTRY_TYPE_CONFIG:
+ case CSF_FW_BINARY_ENTRY_TYPE_FUTF_TEST:
+ case CSF_FW_BINARY_ENTRY_TYPE_TRACE_BUFFER:
+ case CSF_FW_BINARY_ENTRY_TYPE_TIMELINE_METADATA:
+ return 0;
+ default:
+ break;
+ }
+
+ if (ehdr & CSF_FW_BINARY_ENTRY_OPTIONAL)
+ return 0;
+
+ drm_err(&ptdev->base,
+ "Unsupported non-optional entry type %u in firmware\n",
+ CSF_FW_BINARY_ENTRY_TYPE(ehdr));
+ return -EINVAL;
+}
+
+static int panthor_fw_load(struct panthor_device *ptdev)
+{
+ const struct firmware *fw = NULL;
+ struct panthor_fw_binary_iter iter = {};
+ struct panthor_fw_binary_hdr hdr;
+ char fw_path[128];
+ int ret;
+
+ snprintf(fw_path, sizeof(fw_path), "arm/mali/arch%d.%d/%s",
+ (u32)GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id),
+ (u32)GPU_ARCH_MINOR(ptdev->gpu_info.gpu_id),
+ CSF_FW_NAME);
+
+ ret = request_firmware(&fw, fw_path, ptdev->base.dev);
+ if (ret) {
+ drm_err(&ptdev->base, "Failed to load firmware image '%s'\n",
+ CSF_FW_NAME);
+ return ret;
+ }
+
+ iter.data = fw->data;
+ iter.size = fw->size;
+ ret = panthor_fw_binary_iter_read(ptdev, &iter, &hdr, sizeof(hdr));
+ if (ret)
+ goto out;
+
+ if (hdr.magic != CSF_FW_BINARY_HEADER_MAGIC) {
+ ret = -EINVAL;
+ drm_err(&ptdev->base, "Invalid firmware magic\n");
+ goto out;
+ }
+
+ if (hdr.major != CSF_FW_BINARY_HEADER_MAJOR_MAX) {
+ ret = -EINVAL;
+ drm_err(&ptdev->base, "Unsupported firmware binary header version %d.%d (expected %d.x)\n",
+ hdr.major, hdr.minor, CSF_FW_BINARY_HEADER_MAJOR_MAX);
+ goto out;
+ }
+
+ if (hdr.size > iter.size) {
+ drm_err(&ptdev->base, "Firmware image is truncated\n");
+ goto out;
+ }
+
+ iter.size = hdr.size;
+
+ while (iter.offset < hdr.size) {
+ ret = panthor_fw_load_entry(ptdev, fw, &iter);
+ if (ret)
+ goto out;
+ }
+
+ if (!ptdev->fw->shared_section) {
+ drm_err(&ptdev->base, "Shared interface region not found\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ release_firmware(fw);
+ return ret;
+}
+
+/**
+ * iface_fw_to_cpu_addr() - Turn an MCU address into a CPU address
+ * @ptdev: Device.
+ * @mcu_va: MCU address.
+ *
+ * Return: NULL if the address is not part of the shared section, non-NULL otherwise.
+ */
+static void *iface_fw_to_cpu_addr(struct panthor_device *ptdev, u32 mcu_va)
+{
+ u64 shared_mem_start = panthor_kernel_bo_gpuva(ptdev->fw->shared_section->mem);
+ u64 shared_mem_end = shared_mem_start +
+ panthor_kernel_bo_size(ptdev->fw->shared_section->mem);
+ if (mcu_va < shared_mem_start || mcu_va >= shared_mem_end)
+ return NULL;
+
+ return ptdev->fw->shared_section->mem->kmap + (mcu_va - shared_mem_start);
+}
+
+static int panthor_init_cs_iface(struct panthor_device *ptdev,
+ unsigned int csg_idx, unsigned int cs_idx)
+{
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+ struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, csg_idx);
+ struct panthor_fw_cs_iface *cs_iface = &ptdev->fw->iface.streams[csg_idx][cs_idx];
+ u64 shared_section_sz = panthor_kernel_bo_size(ptdev->fw->shared_section->mem);
+ u32 iface_offset = CSF_GROUP_CONTROL_OFFSET +
+ (csg_idx * glb_iface->control->group_stride) +
+ CSF_STREAM_CONTROL_OFFSET +
+ (cs_idx * csg_iface->control->stream_stride);
+ struct panthor_fw_cs_iface *first_cs_iface =
+ panthor_fw_get_cs_iface(ptdev, 0, 0);
+
+ if (iface_offset + sizeof(*cs_iface) >= shared_section_sz)
+ return -EINVAL;
+
+ spin_lock_init(&cs_iface->lock);
+ cs_iface->control = ptdev->fw->shared_section->mem->kmap + iface_offset;
+ cs_iface->input = iface_fw_to_cpu_addr(ptdev, cs_iface->control->input_va);
+ cs_iface->output = iface_fw_to_cpu_addr(ptdev, cs_iface->control->output_va);
+
+ if (!cs_iface->input || !cs_iface->output) {
+ drm_err(&ptdev->base, "Invalid stream control interface input/output VA");
+ return -EINVAL;
+ }
+
+ if (cs_iface != first_cs_iface) {
+ if (cs_iface->control->features != first_cs_iface->control->features) {
+ drm_err(&ptdev->base, "Expecting identical CS slots");
+ return -EINVAL;
+ }
+ } else {
+ u32 reg_count = CS_FEATURES_WORK_REGS(cs_iface->control->features);
+
+ ptdev->csif_info.cs_reg_count = reg_count;
+ ptdev->csif_info.unpreserved_cs_reg_count = CSF_UNPRESERVED_REG_COUNT;
+ }
+
+ return 0;
+}
+
+static bool compare_csg(const struct panthor_fw_csg_control_iface *a,
+ const struct panthor_fw_csg_control_iface *b)
+{
+ if (a->features != b->features)
+ return false;
+ if (a->suspend_size != b->suspend_size)
+ return false;
+ if (a->protm_suspend_size != b->protm_suspend_size)
+ return false;
+ if (a->stream_num != b->stream_num)
+ return false;
+ return true;
+}
+
+static int panthor_init_csg_iface(struct panthor_device *ptdev,
+ unsigned int csg_idx)
+{
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+ struct panthor_fw_csg_iface *csg_iface = &ptdev->fw->iface.groups[csg_idx];
+ u64 shared_section_sz = panthor_kernel_bo_size(ptdev->fw->shared_section->mem);
+ u32 iface_offset = CSF_GROUP_CONTROL_OFFSET + (csg_idx * glb_iface->control->group_stride);
+ unsigned int i;
+
+ if (iface_offset + sizeof(*csg_iface) >= shared_section_sz)
+ return -EINVAL;
+
+ spin_lock_init(&csg_iface->lock);
+ csg_iface->control = ptdev->fw->shared_section->mem->kmap + iface_offset;
+ csg_iface->input = iface_fw_to_cpu_addr(ptdev, csg_iface->control->input_va);
+ csg_iface->output = iface_fw_to_cpu_addr(ptdev, csg_iface->control->output_va);
+
+ if (csg_iface->control->stream_num < MIN_CS_PER_CSG ||
+ csg_iface->control->stream_num > MAX_CS_PER_CSG)
+ return -EINVAL;
+
+ if (!csg_iface->input || !csg_iface->output) {
+ drm_err(&ptdev->base, "Invalid group control interface input/output VA");
+ return -EINVAL;
+ }
+
+ if (csg_idx > 0) {
+ struct panthor_fw_csg_iface *first_csg_iface =
+ panthor_fw_get_csg_iface(ptdev, 0);
+
+ if (!compare_csg(first_csg_iface->control, csg_iface->control)) {
+ drm_err(&ptdev->base, "Expecting identical CSG slots");
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < csg_iface->control->stream_num; i++) {
+ int ret = panthor_init_cs_iface(ptdev, csg_idx, i);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static u32 panthor_get_instr_features(struct panthor_device *ptdev)
+{
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+
+ if (glb_iface->control->version < CSF_IFACE_VERSION(1, 1, 0))
+ return 0;
+
+ return glb_iface->control->instr_features;
+}
+
+static int panthor_fw_init_ifaces(struct panthor_device *ptdev)
+{
+ struct panthor_fw_global_iface *glb_iface = &ptdev->fw->iface.global;
+ unsigned int i;
+
+ if (!ptdev->fw->shared_section->mem->kmap)
+ return -EINVAL;
+
+ spin_lock_init(&glb_iface->lock);
+ glb_iface->control = ptdev->fw->shared_section->mem->kmap;
+
+ if (!glb_iface->control->version) {
+ drm_err(&ptdev->base, "Firmware version is 0. Firmware may have failed to boot");
+ return -EINVAL;
+ }
+
+ glb_iface->input = iface_fw_to_cpu_addr(ptdev, glb_iface->control->input_va);
+ glb_iface->output = iface_fw_to_cpu_addr(ptdev, glb_iface->control->output_va);
+ if (!glb_iface->input || !glb_iface->output) {
+ drm_err(&ptdev->base, "Invalid global control interface input/output VA");
+ return -EINVAL;
+ }
+
+ if (glb_iface->control->group_num > MAX_CSGS ||
+ glb_iface->control->group_num < MIN_CSGS) {
+ drm_err(&ptdev->base, "Invalid number of control groups");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < glb_iface->control->group_num; i++) {
+ int ret = panthor_init_csg_iface(ptdev, i);
+
+ if (ret)
+ return ret;
+ }
+
+ drm_info(&ptdev->base, "CSF FW v%d.%d.%d, Features %#x Instrumentation features %#x",
+ CSF_IFACE_VERSION_MAJOR(glb_iface->control->version),
+ CSF_IFACE_VERSION_MINOR(glb_iface->control->version),
+ CSF_IFACE_VERSION_PATCH(glb_iface->control->version),
+ glb_iface->control->features,
+ panthor_get_instr_features(ptdev));
+ return 0;
+}
+
+static void panthor_fw_init_global_iface(struct panthor_device *ptdev)
+{
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+
+ /* Enable all cores. */
+ glb_iface->input->core_en_mask = ptdev->gpu_info.shader_present;
+
+ /* Setup timers. */
+ glb_iface->input->poweroff_timer = panthor_fw_conv_timeout(ptdev, PWROFF_HYSTERESIS_US);
+ glb_iface->input->progress_timer = PROGRESS_TIMEOUT_CYCLES >> PROGRESS_TIMEOUT_SCALE_SHIFT;
+ glb_iface->input->idle_timer = panthor_fw_conv_timeout(ptdev, IDLE_HYSTERESIS_US);
+
+ /* Enable interrupts we care about. */
+ glb_iface->input->ack_irq_mask = GLB_CFG_ALLOC_EN |
+ GLB_PING |
+ GLB_CFG_PROGRESS_TIMER |
+ GLB_CFG_POWEROFF_TIMER |
+ GLB_IDLE_EN |
+ GLB_IDLE;
+
+ panthor_fw_update_reqs(glb_iface, req, GLB_IDLE_EN, GLB_IDLE_EN);
+ panthor_fw_toggle_reqs(glb_iface, req, ack,
+ GLB_CFG_ALLOC_EN |
+ GLB_CFG_POWEROFF_TIMER |
+ GLB_CFG_PROGRESS_TIMER);
+
+ gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1);
+
+ /* Kick the watchdog. */
+ mod_delayed_work(ptdev->reset.wq, &ptdev->fw->watchdog.ping_work,
+ msecs_to_jiffies(PING_INTERVAL_MS));
+}
+
+static void panthor_job_irq_handler(struct panthor_device *ptdev, u32 status)
+{
+ if (!ptdev->fw->booted && (status & JOB_INT_GLOBAL_IF))
+ ptdev->fw->booted = true;
+
+ wake_up_all(&ptdev->fw->req_waitqueue);
+
+ /* If the FW is not booted, don't process IRQs, just flag the FW as booted. */
+ if (!ptdev->fw->booted)
+ return;
+
+ panthor_sched_report_fw_events(ptdev, status);
+}
+PANTHOR_IRQ_HANDLER(job, JOB, panthor_job_irq_handler);
+
+static int panthor_fw_start(struct panthor_device *ptdev)
+{
+ bool timedout = false;
+
+ ptdev->fw->booted = false;
+ panthor_job_irq_resume(&ptdev->fw->irq, ~0);
+ gpu_write(ptdev, MCU_CONTROL, MCU_CONTROL_AUTO);
+
+ if (!wait_event_timeout(ptdev->fw->req_waitqueue,
+ ptdev->fw->booted,
+ msecs_to_jiffies(1000))) {
+ if (!ptdev->fw->booted &&
+ !(gpu_read(ptdev, JOB_INT_STAT) & JOB_INT_GLOBAL_IF))
+ timedout = true;
+ }
+
+ if (timedout) {
+ static const char * const status_str[] = {
+ [MCU_STATUS_DISABLED] = "disabled",
+ [MCU_STATUS_ENABLED] = "enabled",
+ [MCU_STATUS_HALT] = "halt",
+ [MCU_STATUS_FATAL] = "fatal",
+ };
+ u32 status = gpu_read(ptdev, MCU_STATUS);
+
+ drm_err(&ptdev->base, "Failed to boot MCU (status=%s)",
+ status < ARRAY_SIZE(status_str) ? status_str[status] : "unknown");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void panthor_fw_stop(struct panthor_device *ptdev)
+{
+ u32 status;
+
+ gpu_write(ptdev, MCU_CONTROL, MCU_CONTROL_DISABLE);
+ if (readl_poll_timeout(ptdev->iomem + MCU_STATUS, status,
+ status == MCU_STATUS_DISABLED, 10, 100000))
+ drm_err(&ptdev->base, "Failed to stop MCU");
+}
+
+/**
+ * panthor_fw_pre_reset() - Call before a reset.
+ * @ptdev: Device.
+ * @on_hang: true if the reset was triggered on a GPU hang.
+ *
+ * If the reset is not triggered on a hang, we try to gracefully halt the
+ * MCU, so we can do a fast-reset when panthor_fw_post_reset() is called.
+ */
+void panthor_fw_pre_reset(struct panthor_device *ptdev, bool on_hang)
+{
+ /* Make sure we won't be woken up by a ping. */
+ cancel_delayed_work_sync(&ptdev->fw->watchdog.ping_work);
+
+ ptdev->fw->fast_reset = false;
+
+ if (!on_hang) {
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+ u32 status;
+
+ panthor_fw_update_reqs(glb_iface, req, GLB_HALT, GLB_HALT);
+ gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1);
+ if (!readl_poll_timeout(ptdev->iomem + MCU_STATUS, status,
+ status == MCU_STATUS_HALT, 10, 100000) &&
+ glb_iface->output->halt_status == PANTHOR_FW_HALT_OK) {
+ ptdev->fw->fast_reset = true;
+ } else {
+ drm_warn(&ptdev->base, "Failed to cleanly suspend MCU");
+ }
+
+ /* The FW detects 0 -> 1 transitions. Make sure we reset
+ * the HALT bit before the FW is rebooted.
+ */
+ panthor_fw_update_reqs(glb_iface, req, 0, GLB_HALT);
+ }
+
+ panthor_job_irq_suspend(&ptdev->fw->irq);
+}
+
+/**
+ * panthor_fw_post_reset() - Call after a reset.
+ * @ptdev: Device.
+ *
+ * Start the FW. If this is not a fast reset, all FW sections are reloaded to
+ * make sure we can recover from a memory corruption.
+ */
+int panthor_fw_post_reset(struct panthor_device *ptdev)
+{
+ int ret;
+
+ /* Make the MCU VM active. */
+ ret = panthor_vm_active(ptdev->fw->vm);
+ if (ret)
+ return ret;
+
+ /* If this is a fast reset, try to start the MCU without reloading
+ * the FW sections. If it fails, go for a full reset.
+ */
+ if (ptdev->fw->fast_reset) {
+ ret = panthor_fw_start(ptdev);
+ if (!ret)
+ goto out;
+
+ /* Forcibly reset the MCU and force a slow reset, so we get a
+ * fresh boot on the next panthor_fw_start() call.
+ */
+ panthor_fw_stop(ptdev);
+ ptdev->fw->fast_reset = false;
+ drm_err(&ptdev->base, "FW fast reset failed, trying a slow reset");
+ }
+
+ /* Reload all sections, including RO ones. We're not supposed
+ * to end up here anyway, let's just assume the overhead of
+ * reloading everything is acceptable.
+ */
+ panthor_reload_fw_sections(ptdev, true);
+
+ ret = panthor_fw_start(ptdev);
+ if (ret) {
+ drm_err(&ptdev->base, "FW slow reset failed");
+ return ret;
+ }
+
+out:
+ /* We must re-initialize the global interface even on fast-reset. */
+ panthor_fw_init_global_iface(ptdev);
+ return 0;
+}
+
+/**
+ * panthor_fw_unplug() - Called when the device is unplugged.
+ * @ptdev: Device.
+ *
+ * This function must make sure all pending operations are flushed before
+ * will release device resources, thus preventing any interaction with
+ * the HW.
+ *
+ * If there is still FW-related work running after this function returns,
+ * they must use drm_dev_{enter,exit}() and skip any HW access when
+ * drm_dev_enter() returns false.
+ */
+void panthor_fw_unplug(struct panthor_device *ptdev)
+{
+ struct panthor_fw_section *section;
+
+ cancel_delayed_work_sync(&ptdev->fw->watchdog.ping_work);
+
+ /* Make sure the IRQ handler can be called after that point. */
+ if (ptdev->fw->irq.irq)
+ panthor_job_irq_suspend(&ptdev->fw->irq);
+
+ panthor_fw_stop(ptdev);
+
+ list_for_each_entry(section, &ptdev->fw->sections, node)
+ panthor_kernel_bo_destroy(panthor_fw_vm(ptdev), section->mem);
+
+ /* We intentionally don't call panthor_vm_idle() and let
+ * panthor_mmu_unplug() release the AS we acquired with
+ * panthor_vm_active() so we don't have to track the VM active/idle
+ * state to keep the active_refcnt balanced.
+ */
+ panthor_vm_put(ptdev->fw->vm);
+
+ panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000);
+}
+
+/**
+ * panthor_fw_wait_acks() - Wait for requests to be acknowledged by the FW.
+ * @req_ptr: Pointer to the req register.
+ * @ack_ptr: Pointer to the ack register.
+ * @wq: Wait queue to use for the sleeping wait.
+ * @req_mask: Mask of requests to wait for.
+ * @acked: Pointer to field that's updated with the acked requests.
+ * If the function returns 0, *acked == req_mask.
+ * @timeout_ms: Timeout expressed in milliseconds.
+ *
+ * Return: 0 on success, -ETIMEDOUT otherwise.
+ */
+static int panthor_fw_wait_acks(const u32 *req_ptr, const u32 *ack_ptr,
+ wait_queue_head_t *wq,
+ u32 req_mask, u32 *acked,
+ u32 timeout_ms)
+{
+ u32 ack, req = READ_ONCE(*req_ptr) & req_mask;
+ int ret;
+
+ /* Busy wait for a few µsecs before falling back to a sleeping wait. */
+ *acked = req_mask;
+ ret = read_poll_timeout_atomic(READ_ONCE, ack,
+ (ack & req_mask) == req,
+ 0, 10, 0,
+ *ack_ptr);
+ if (!ret)
+ return 0;
+
+ if (wait_event_timeout(*wq, (READ_ONCE(*ack_ptr) & req_mask) == req,
+ msecs_to_jiffies(timeout_ms)))
+ return 0;
+
+ /* Check one last time, in case we were not woken up for some reason. */
+ ack = READ_ONCE(*ack_ptr);
+ if ((ack & req_mask) == req)
+ return 0;
+
+ *acked = ~(req ^ ack) & req_mask;
+ return -ETIMEDOUT;
+}
+
+/**
+ * panthor_fw_glb_wait_acks() - Wait for global requests to be acknowledged.
+ * @ptdev: Device.
+ * @req_mask: Mask of requests to wait for.
+ * @acked: Pointer to field that's updated with the acked requests.
+ * If the function returns 0, *acked == req_mask.
+ * @timeout_ms: Timeout expressed in milliseconds.
+ *
+ * Return: 0 on success, -ETIMEDOUT otherwise.
+ */
+int panthor_fw_glb_wait_acks(struct panthor_device *ptdev,
+ u32 req_mask, u32 *acked,
+ u32 timeout_ms)
+{
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+
+ /* GLB_HALT doesn't get acked through the FW interface. */
+ if (drm_WARN_ON(&ptdev->base, req_mask & (~GLB_REQ_MASK | GLB_HALT)))
+ return -EINVAL;
+
+ return panthor_fw_wait_acks(&glb_iface->input->req,
+ &glb_iface->output->ack,
+ &ptdev->fw->req_waitqueue,
+ req_mask, acked, timeout_ms);
+}
+
+/**
+ * panthor_fw_csg_wait_acks() - Wait for command stream group requests to be acknowledged.
+ * @ptdev: Device.
+ * @csg_slot: CSG slot ID.
+ * @req_mask: Mask of requests to wait for.
+ * @acked: Pointer to field that's updated with the acked requests.
+ * If the function returns 0, *acked == req_mask.
+ * @timeout_ms: Timeout expressed in milliseconds.
+ *
+ * Return: 0 on success, -ETIMEDOUT otherwise.
+ */
+int panthor_fw_csg_wait_acks(struct panthor_device *ptdev, u32 csg_slot,
+ u32 req_mask, u32 *acked, u32 timeout_ms)
+{
+ struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, csg_slot);
+ int ret;
+
+ if (drm_WARN_ON(&ptdev->base, req_mask & ~CSG_REQ_MASK))
+ return -EINVAL;
+
+ ret = panthor_fw_wait_acks(&csg_iface->input->req,
+ &csg_iface->output->ack,
+ &ptdev->fw->req_waitqueue,
+ req_mask, acked, timeout_ms);
+
+ /*
+ * Check that all bits in the state field were updated, if any mismatch
+ * then clear all bits in the state field. This allows code to do
+ * (acked & CSG_STATE_MASK) and get the right value.
+ */
+
+ if ((*acked & CSG_STATE_MASK) != CSG_STATE_MASK)
+ *acked &= ~CSG_STATE_MASK;
+
+ return ret;
+}
+
+/**
+ * panthor_fw_ring_csg_doorbells() - Ring command stream group doorbells.
+ * @ptdev: Device.
+ * @csg_mask: Bitmask encoding the command stream group doorbells to ring.
+ *
+ * This function is toggling bits in the doorbell_req and ringing the
+ * global doorbell. It doesn't require a user doorbell to be attached to
+ * the group.
+ */
+void panthor_fw_ring_csg_doorbells(struct panthor_device *ptdev, u32 csg_mask)
+{
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+
+ panthor_fw_toggle_reqs(glb_iface, doorbell_req, doorbell_ack, csg_mask);
+ gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1);
+}
+
+static void panthor_fw_ping_work(struct work_struct *work)
+{
+ struct panthor_fw *fw = container_of(work, struct panthor_fw, watchdog.ping_work.work);
+ struct panthor_device *ptdev = fw->irq.ptdev;
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+ u32 acked;
+ int ret;
+
+ if (panthor_device_reset_is_pending(ptdev))
+ return;
+
+ panthor_fw_toggle_reqs(glb_iface, req, ack, GLB_PING);
+ gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1);
+
+ ret = panthor_fw_glb_wait_acks(ptdev, GLB_PING, &acked, 100);
+ if (ret) {
+ panthor_device_schedule_reset(ptdev);
+ drm_err(&ptdev->base, "FW ping timeout, scheduling a reset");
+ } else {
+ mod_delayed_work(ptdev->reset.wq, &fw->watchdog.ping_work,
+ msecs_to_jiffies(PING_INTERVAL_MS));
+ }
+}
+
+/**
+ * panthor_fw_init() - Initialize FW related data.
+ * @ptdev: Device.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_fw_init(struct panthor_device *ptdev)
+{
+ struct panthor_fw *fw;
+ int ret, irq;
+
+ fw = drmm_kzalloc(&ptdev->base, sizeof(*fw), GFP_KERNEL);
+ if (!fw)
+ return -ENOMEM;
+
+ ptdev->fw = fw;
+ init_waitqueue_head(&fw->req_waitqueue);
+ INIT_LIST_HEAD(&fw->sections);
+ INIT_DELAYED_WORK(&fw->watchdog.ping_work, panthor_fw_ping_work);
+
+ irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "job");
+ if (irq <= 0)
+ return -ENODEV;
+
+ ret = panthor_request_job_irq(ptdev, &fw->irq, irq, 0);
+ if (ret) {
+ drm_err(&ptdev->base, "failed to request job irq");
+ return ret;
+ }
+
+ ret = panthor_gpu_l2_power_on(ptdev);
+ if (ret)
+ return ret;
+
+ fw->vm = panthor_vm_create(ptdev, true,
+ 0, SZ_4G,
+ CSF_MCU_SHARED_REGION_START,
+ CSF_MCU_SHARED_REGION_SIZE);
+ if (IS_ERR(fw->vm)) {
+ ret = PTR_ERR(fw->vm);
+ fw->vm = NULL;
+ goto err_unplug_fw;
+ }
+
+ ret = panthor_fw_load(ptdev);
+ if (ret)
+ goto err_unplug_fw;
+
+ ret = panthor_vm_active(fw->vm);
+ if (ret)
+ goto err_unplug_fw;
+
+ ret = panthor_fw_start(ptdev);
+ if (ret)
+ goto err_unplug_fw;
+
+ ret = panthor_fw_init_ifaces(ptdev);
+ if (ret)
+ goto err_unplug_fw;
+
+ panthor_fw_init_global_iface(ptdev);
+ return 0;
+
+err_unplug_fw:
+ panthor_fw_unplug(ptdev);
+ return ret;
+}
+
+MODULE_FIRMWARE("arm/mali/arch10.8/mali_csffw.bin");
diff --git a/drivers/gpu/drm/panthor/panthor_fw.h b/drivers/gpu/drm/panthor/panthor_fw.h
new file mode 100644
index 000000000000..22448abde992
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_fw.h
@@ -0,0 +1,503 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/* Copyright 2023 Collabora ltd. */
+
+#ifndef __PANTHOR_MCU_H__
+#define __PANTHOR_MCU_H__
+
+#include <linux/types.h>
+
+struct panthor_device;
+struct panthor_kernel_bo;
+
+#define MAX_CSGS 31
+#define MAX_CS_PER_CSG 32
+
+struct panthor_fw_ringbuf_input_iface {
+ u64 insert;
+ u64 extract;
+};
+
+struct panthor_fw_ringbuf_output_iface {
+ u64 extract;
+ u32 active;
+};
+
+struct panthor_fw_cs_control_iface {
+#define CS_FEATURES_WORK_REGS(x) (((x) & GENMASK(7, 0)) + 1)
+#define CS_FEATURES_SCOREBOARDS(x) (((x) & GENMASK(15, 8)) >> 8)
+#define CS_FEATURES_COMPUTE BIT(16)
+#define CS_FEATURES_FRAGMENT BIT(17)
+#define CS_FEATURES_TILER BIT(18)
+ u32 features;
+ u32 input_va;
+ u32 output_va;
+};
+
+struct panthor_fw_cs_input_iface {
+#define CS_STATE_MASK GENMASK(2, 0)
+#define CS_STATE_STOP 0
+#define CS_STATE_START 1
+#define CS_EXTRACT_EVENT BIT(4)
+#define CS_IDLE_SYNC_WAIT BIT(8)
+#define CS_IDLE_PROTM_PENDING BIT(9)
+#define CS_IDLE_EMPTY BIT(10)
+#define CS_IDLE_RESOURCE_REQ BIT(11)
+#define CS_TILER_OOM BIT(26)
+#define CS_PROTM_PENDING BIT(27)
+#define CS_FATAL BIT(30)
+#define CS_FAULT BIT(31)
+#define CS_REQ_MASK (CS_STATE_MASK | \
+ CS_EXTRACT_EVENT | \
+ CS_IDLE_SYNC_WAIT | \
+ CS_IDLE_PROTM_PENDING | \
+ CS_IDLE_EMPTY | \
+ CS_IDLE_RESOURCE_REQ)
+#define CS_EVT_MASK (CS_TILER_OOM | \
+ CS_PROTM_PENDING | \
+ CS_FATAL | \
+ CS_FAULT)
+ u32 req;
+
+#define CS_CONFIG_PRIORITY(x) ((x) & GENMASK(3, 0))
+#define CS_CONFIG_DOORBELL(x) (((x) << 8) & GENMASK(15, 8))
+ u32 config;
+ u32 reserved1;
+ u32 ack_irq_mask;
+ u64 ringbuf_base;
+ u32 ringbuf_size;
+ u32 reserved2;
+ u64 heap_start;
+ u64 heap_end;
+ u64 ringbuf_input;
+ u64 ringbuf_output;
+ u32 instr_config;
+ u32 instrbuf_size;
+ u64 instrbuf_base;
+ u64 instrbuf_offset_ptr;
+};
+
+struct panthor_fw_cs_output_iface {
+ u32 ack;
+ u32 reserved1[15];
+ u64 status_cmd_ptr;
+
+#define CS_STATUS_WAIT_SB_MASK GENMASK(15, 0)
+#define CS_STATUS_WAIT_SB_SRC_MASK GENMASK(19, 16)
+#define CS_STATUS_WAIT_SB_SRC_NONE (0 << 16)
+#define CS_STATUS_WAIT_SB_SRC_WAIT (8 << 16)
+#define CS_STATUS_WAIT_SYNC_COND_LE (0 << 24)
+#define CS_STATUS_WAIT_SYNC_COND_GT (1 << 24)
+#define CS_STATUS_WAIT_SYNC_COND_MASK GENMASK(27, 24)
+#define CS_STATUS_WAIT_PROGRESS BIT(28)
+#define CS_STATUS_WAIT_PROTM BIT(29)
+#define CS_STATUS_WAIT_SYNC_64B BIT(30)
+#define CS_STATUS_WAIT_SYNC BIT(31)
+ u32 status_wait;
+ u32 status_req_resource;
+ u64 status_wait_sync_ptr;
+ u32 status_wait_sync_value;
+ u32 status_scoreboards;
+
+#define CS_STATUS_BLOCKED_REASON_UNBLOCKED 0
+#define CS_STATUS_BLOCKED_REASON_SB_WAIT 1
+#define CS_STATUS_BLOCKED_REASON_PROGRESS_WAIT 2
+#define CS_STATUS_BLOCKED_REASON_SYNC_WAIT 3
+#define CS_STATUS_BLOCKED_REASON_DEFERRED 5
+#define CS_STATUS_BLOCKED_REASON_RES 6
+#define CS_STATUS_BLOCKED_REASON_FLUSH 7
+#define CS_STATUS_BLOCKED_REASON_MASK GENMASK(3, 0)
+ u32 status_blocked_reason;
+ u32 status_wait_sync_value_hi;
+ u32 reserved2[6];
+
+#define CS_EXCEPTION_TYPE(x) ((x) & GENMASK(7, 0))
+#define CS_EXCEPTION_DATA(x) (((x) >> 8) & GENMASK(23, 0))
+ u32 fault;
+ u32 fatal;
+ u64 fault_info;
+ u64 fatal_info;
+ u32 reserved3[10];
+ u32 heap_vt_start;
+ u32 heap_vt_end;
+ u32 reserved4;
+ u32 heap_frag_end;
+ u64 heap_address;
+};
+
+struct panthor_fw_csg_control_iface {
+ u32 features;
+ u32 input_va;
+ u32 output_va;
+ u32 suspend_size;
+ u32 protm_suspend_size;
+ u32 stream_num;
+ u32 stream_stride;
+};
+
+struct panthor_fw_csg_input_iface {
+#define CSG_STATE_MASK GENMASK(2, 0)
+#define CSG_STATE_TERMINATE 0
+#define CSG_STATE_START 1
+#define CSG_STATE_SUSPEND 2
+#define CSG_STATE_RESUME 3
+#define CSG_ENDPOINT_CONFIG BIT(4)
+#define CSG_STATUS_UPDATE BIT(5)
+#define CSG_SYNC_UPDATE BIT(28)
+#define CSG_IDLE BIT(29)
+#define CSG_DOORBELL BIT(30)
+#define CSG_PROGRESS_TIMER_EVENT BIT(31)
+#define CSG_REQ_MASK (CSG_STATE_MASK | \
+ CSG_ENDPOINT_CONFIG | \
+ CSG_STATUS_UPDATE)
+#define CSG_EVT_MASK (CSG_SYNC_UPDATE | \
+ CSG_IDLE | \
+ CSG_PROGRESS_TIMER_EVENT)
+ u32 req;
+ u32 ack_irq_mask;
+
+ u32 doorbell_req;
+ u32 cs_irq_ack;
+ u32 reserved1[4];
+ u64 allow_compute;
+ u64 allow_fragment;
+ u32 allow_other;
+
+#define CSG_EP_REQ_COMPUTE(x) ((x) & GENMASK(7, 0))
+#define CSG_EP_REQ_FRAGMENT(x) (((x) << 8) & GENMASK(15, 8))
+#define CSG_EP_REQ_TILER(x) (((x) << 16) & GENMASK(19, 16))
+#define CSG_EP_REQ_EXCL_COMPUTE BIT(20)
+#define CSG_EP_REQ_EXCL_FRAGMENT BIT(21)
+#define CSG_EP_REQ_PRIORITY(x) (((x) << 28) & GENMASK(31, 28))
+#define CSG_EP_REQ_PRIORITY_MASK GENMASK(31, 28)
+ u32 endpoint_req;
+ u32 reserved2[2];
+ u64 suspend_buf;
+ u64 protm_suspend_buf;
+ u32 config;
+ u32 iter_trace_config;
+};
+
+struct panthor_fw_csg_output_iface {
+ u32 ack;
+ u32 reserved1;
+ u32 doorbell_ack;
+ u32 cs_irq_req;
+ u32 status_endpoint_current;
+ u32 status_endpoint_req;
+
+#define CSG_STATUS_STATE_IS_IDLE BIT(0)
+ u32 status_state;
+ u32 resource_dep;
+};
+
+struct panthor_fw_global_control_iface {
+ u32 version;
+ u32 features;
+ u32 input_va;
+ u32 output_va;
+ u32 group_num;
+ u32 group_stride;
+ u32 perfcnt_size;
+ u32 instr_features;
+};
+
+struct panthor_fw_global_input_iface {
+#define GLB_HALT BIT(0)
+#define GLB_CFG_PROGRESS_TIMER BIT(1)
+#define GLB_CFG_ALLOC_EN BIT(2)
+#define GLB_CFG_POWEROFF_TIMER BIT(3)
+#define GLB_PROTM_ENTER BIT(4)
+#define GLB_PERFCNT_EN BIT(5)
+#define GLB_PERFCNT_SAMPLE BIT(6)
+#define GLB_COUNTER_EN BIT(7)
+#define GLB_PING BIT(8)
+#define GLB_FWCFG_UPDATE BIT(9)
+#define GLB_IDLE_EN BIT(10)
+#define GLB_SLEEP BIT(12)
+#define GLB_INACTIVE_COMPUTE BIT(20)
+#define GLB_INACTIVE_FRAGMENT BIT(21)
+#define GLB_INACTIVE_TILER BIT(22)
+#define GLB_PROTM_EXIT BIT(23)
+#define GLB_PERFCNT_THRESHOLD BIT(24)
+#define GLB_PERFCNT_OVERFLOW BIT(25)
+#define GLB_IDLE BIT(26)
+#define GLB_DBG_CSF BIT(30)
+#define GLB_DBG_HOST BIT(31)
+#define GLB_REQ_MASK GENMASK(10, 0)
+#define GLB_EVT_MASK GENMASK(26, 20)
+ u32 req;
+ u32 ack_irq_mask;
+ u32 doorbell_req;
+ u32 reserved1;
+ u32 progress_timer;
+
+#define GLB_TIMER_VAL(x) ((x) & GENMASK(30, 0))
+#define GLB_TIMER_SOURCE_GPU_COUNTER BIT(31)
+ u32 poweroff_timer;
+ u64 core_en_mask;
+ u32 reserved2;
+ u32 perfcnt_as;
+ u64 perfcnt_base;
+ u32 perfcnt_extract;
+ u32 reserved3[3];
+ u32 perfcnt_config;
+ u32 perfcnt_csg_select;
+ u32 perfcnt_fw_enable;
+ u32 perfcnt_csg_enable;
+ u32 perfcnt_csf_enable;
+ u32 perfcnt_shader_enable;
+ u32 perfcnt_tiler_enable;
+ u32 perfcnt_mmu_l2_enable;
+ u32 reserved4[8];
+ u32 idle_timer;
+};
+
+enum panthor_fw_halt_status {
+ PANTHOR_FW_HALT_OK = 0,
+ PANTHOR_FW_HALT_ON_PANIC = 0x4e,
+ PANTHOR_FW_HALT_ON_WATCHDOG_EXPIRATION = 0x4f,
+};
+
+struct panthor_fw_global_output_iface {
+ u32 ack;
+ u32 reserved1;
+ u32 doorbell_ack;
+ u32 reserved2;
+ u32 halt_status;
+ u32 perfcnt_status;
+ u32 perfcnt_insert;
+};
+
+/**
+ * struct panthor_fw_cs_iface - Firmware command stream slot interface
+ */
+struct panthor_fw_cs_iface {
+ /**
+ * @lock: Lock protecting access to the panthor_fw_cs_input_iface::req
+ * field.
+ *
+ * Needed so we can update the req field concurrently from the interrupt
+ * handler and the scheduler logic.
+ *
+ * TODO: Ideally we'd want to use a cmpxchg() to update the req, but FW
+ * interface sections are mapped uncached/write-combined right now, and
+ * using cmpxchg() on such mappings leads to SError faults. Revisit when
+ * we have 'SHARED' GPU mappings hooked up.
+ */
+ spinlock_t lock;
+
+ /**
+ * @control: Command stream slot control interface.
+ *
+ * Used to expose command stream slot properties.
+ *
+ * This interface is read-only.
+ */
+ struct panthor_fw_cs_control_iface *control;
+
+ /**
+ * @input: Command stream slot input interface.
+ *
+ * Used for host updates/events.
+ */
+ struct panthor_fw_cs_input_iface *input;
+
+ /**
+ * @output: Command stream slot output interface.
+ *
+ * Used for FW updates/events.
+ *
+ * This interface is read-only.
+ */
+ const struct panthor_fw_cs_output_iface *output;
+};
+
+/**
+ * struct panthor_fw_csg_iface - Firmware command stream group slot interface
+ */
+struct panthor_fw_csg_iface {
+ /**
+ * @lock: Lock protecting access to the panthor_fw_csg_input_iface::req
+ * field.
+ *
+ * Needed so we can update the req field concurrently from the interrupt
+ * handler and the scheduler logic.
+ *
+ * TODO: Ideally we'd want to use a cmpxchg() to update the req, but FW
+ * interface sections are mapped uncached/write-combined right now, and
+ * using cmpxchg() on such mappings leads to SError faults. Revisit when
+ * we have 'SHARED' GPU mappings hooked up.
+ */
+ spinlock_t lock;
+
+ /**
+ * @control: Command stream group slot control interface.
+ *
+ * Used to expose command stream group slot properties.
+ *
+ * This interface is read-only.
+ */
+ const struct panthor_fw_csg_control_iface *control;
+
+ /**
+ * @input: Command stream slot input interface.
+ *
+ * Used for host updates/events.
+ */
+ struct panthor_fw_csg_input_iface *input;
+
+ /**
+ * @output: Command stream group slot output interface.
+ *
+ * Used for FW updates/events.
+ *
+ * This interface is read-only.
+ */
+ const struct panthor_fw_csg_output_iface *output;
+};
+
+/**
+ * struct panthor_fw_global_iface - Firmware global interface
+ */
+struct panthor_fw_global_iface {
+ /**
+ * @lock: Lock protecting access to the panthor_fw_global_input_iface::req
+ * field.
+ *
+ * Needed so we can update the req field concurrently from the interrupt
+ * handler and the scheduler/FW management logic.
+ *
+ * TODO: Ideally we'd want to use a cmpxchg() to update the req, but FW
+ * interface sections are mapped uncached/write-combined right now, and
+ * using cmpxchg() on such mappings leads to SError faults. Revisit when
+ * we have 'SHARED' GPU mappings hooked up.
+ */
+ spinlock_t lock;
+
+ /**
+ * @control: Command stream group slot control interface.
+ *
+ * Used to expose global FW properties.
+ *
+ * This interface is read-only.
+ */
+ const struct panthor_fw_global_control_iface *control;
+
+ /**
+ * @input: Global input interface.
+ *
+ * Used for host updates/events.
+ */
+ struct panthor_fw_global_input_iface *input;
+
+ /**
+ * @output: Global output interface.
+ *
+ * Used for FW updates/events.
+ *
+ * This interface is read-only.
+ */
+ const struct panthor_fw_global_output_iface *output;
+};
+
+/**
+ * panthor_fw_toggle_reqs() - Toggle acknowledge bits to send an event to the FW
+ * @__iface: The interface to operate on.
+ * @__in_reg: Name of the register to update in the input section of the interface.
+ * @__out_reg: Name of the register to take as a reference in the output section of the
+ * interface.
+ * @__mask: Mask to apply to the update.
+ *
+ * The Host -> FW event/message passing was designed to be lockless, with each side of
+ * the channel having its writeable section. Events are signaled as a difference between
+ * the host and FW side in the req/ack registers (when a bit differs, there's an event
+ * pending, when they are the same, nothing needs attention).
+ *
+ * This helper allows one to update the req register based on the current value of the
+ * ack register managed by the FW. Toggling a specific bit will flag an event. In order
+ * for events to be re-evaluated, the interface doorbell needs to be rung.
+ *
+ * Concurrent accesses to the same req register is covered.
+ *
+ * Anything requiring atomic updates to multiple registers requires a dedicated lock.
+ */
+#define panthor_fw_toggle_reqs(__iface, __in_reg, __out_reg, __mask) \
+ do { \
+ u32 __cur_val, __new_val, __out_val; \
+ spin_lock(&(__iface)->lock); \
+ __cur_val = READ_ONCE((__iface)->input->__in_reg); \
+ __out_val = READ_ONCE((__iface)->output->__out_reg); \
+ __new_val = ((__out_val ^ (__mask)) & (__mask)) | (__cur_val & ~(__mask)); \
+ WRITE_ONCE((__iface)->input->__in_reg, __new_val); \
+ spin_unlock(&(__iface)->lock); \
+ } while (0)
+
+/**
+ * panthor_fw_update_reqs() - Update bits to reflect a configuration change
+ * @__iface: The interface to operate on.
+ * @__in_reg: Name of the register to update in the input section of the interface.
+ * @__val: Value to set.
+ * @__mask: Mask to apply to the update.
+ *
+ * Some configuration get passed through req registers that are also used to
+ * send events to the FW. Those req registers being updated from the interrupt
+ * handler, they require special helpers to update the configuration part as well.
+ *
+ * Concurrent accesses to the same req register is covered.
+ *
+ * Anything requiring atomic updates to multiple registers requires a dedicated lock.
+ */
+#define panthor_fw_update_reqs(__iface, __in_reg, __val, __mask) \
+ do { \
+ u32 __cur_val, __new_val; \
+ spin_lock(&(__iface)->lock); \
+ __cur_val = READ_ONCE((__iface)->input->__in_reg); \
+ __new_val = (__cur_val & ~(__mask)) | ((__val) & (__mask)); \
+ WRITE_ONCE((__iface)->input->__in_reg, __new_val); \
+ spin_unlock(&(__iface)->lock); \
+ } while (0)
+
+struct panthor_fw_global_iface *
+panthor_fw_get_glb_iface(struct panthor_device *ptdev);
+
+struct panthor_fw_csg_iface *
+panthor_fw_get_csg_iface(struct panthor_device *ptdev, u32 csg_slot);
+
+struct panthor_fw_cs_iface *
+panthor_fw_get_cs_iface(struct panthor_device *ptdev, u32 csg_slot, u32 cs_slot);
+
+int panthor_fw_csg_wait_acks(struct panthor_device *ptdev, u32 csg_id, u32 req_mask,
+ u32 *acked, u32 timeout_ms);
+
+int panthor_fw_glb_wait_acks(struct panthor_device *ptdev, u32 req_mask, u32 *acked,
+ u32 timeout_ms);
+
+void panthor_fw_ring_csg_doorbells(struct panthor_device *ptdev, u32 csg_slot);
+
+struct panthor_kernel_bo *
+panthor_fw_alloc_queue_iface_mem(struct panthor_device *ptdev,
+ struct panthor_fw_ringbuf_input_iface **input,
+ const struct panthor_fw_ringbuf_output_iface **output,
+ u32 *input_fw_va, u32 *output_fw_va);
+struct panthor_kernel_bo *
+panthor_fw_alloc_suspend_buf_mem(struct panthor_device *ptdev, size_t size);
+
+struct panthor_vm *panthor_fw_vm(struct panthor_device *ptdev);
+
+void panthor_fw_pre_reset(struct panthor_device *ptdev, bool on_hang);
+int panthor_fw_post_reset(struct panthor_device *ptdev);
+
+static inline void panthor_fw_suspend(struct panthor_device *ptdev)
+{
+ panthor_fw_pre_reset(ptdev, false);
+}
+
+static inline int panthor_fw_resume(struct panthor_device *ptdev)
+{
+ return panthor_fw_post_reset(ptdev);
+}
+
+int panthor_fw_init(struct panthor_device *ptdev);
+void panthor_fw_unplug(struct panthor_device *ptdev);
+
+#endif
diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c
new file mode 100644
index 000000000000..d6483266d0c2
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_gem.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/* Copyright 2023 Collabora ltd. */
+
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <drm/panthor_drm.h>
+
+#include "panthor_device.h"
+#include "panthor_gem.h"
+#include "panthor_mmu.h"
+
+static void panthor_gem_free_object(struct drm_gem_object *obj)
+{
+ struct panthor_gem_object *bo = to_panthor_bo(obj);
+ struct drm_gem_object *vm_root_gem = bo->exclusive_vm_root_gem;
+
+ drm_gem_free_mmap_offset(&bo->base.base);
+ mutex_destroy(&bo->gpuva_list_lock);
+ drm_gem_shmem_free(&bo->base);
+ drm_gem_object_put(vm_root_gem);
+}
+
+/**
+ * panthor_kernel_bo_destroy() - Destroy a kernel buffer object
+ * @vm: The VM this BO was mapped to.
+ * @bo: Kernel buffer object to destroy. If NULL or an ERR_PTR(), the destruction
+ * is skipped.
+ */
+void panthor_kernel_bo_destroy(struct panthor_vm *vm,
+ struct panthor_kernel_bo *bo)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(bo))
+ return;
+
+ panthor_kernel_bo_vunmap(bo);
+
+ if (drm_WARN_ON(bo->obj->dev,
+ to_panthor_bo(bo->obj)->exclusive_vm_root_gem != panthor_vm_root_gem(vm)))
+ goto out_free_bo;
+
+ ret = panthor_vm_unmap_range(vm, bo->va_node.start,
+ panthor_kernel_bo_size(bo));
+ if (ret)
+ goto out_free_bo;
+
+ panthor_vm_free_va(vm, &bo->va_node);
+ drm_gem_object_put(bo->obj);
+
+out_free_bo:
+ kfree(bo);
+}
+
+/**
+ * panthor_kernel_bo_create() - Create and map a GEM object to a VM
+ * @ptdev: Device.
+ * @vm: VM to map the GEM to. If NULL, the kernel object is not GPU mapped.
+ * @size: Size of the buffer object.
+ * @bo_flags: Combination of drm_panthor_bo_flags flags.
+ * @vm_map_flags: Combination of drm_panthor_vm_bind_op_flags (only those
+ * that are related to map operations).
+ * @gpu_va: GPU address assigned when mapping to the VM.
+ * If gpu_va == PANTHOR_VM_KERNEL_AUTO_VA, the virtual address will be
+ * automatically allocated.
+ *
+ * Return: A valid pointer in case of success, an ERR_PTR() otherwise.
+ */
+struct panthor_kernel_bo *
+panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
+ size_t size, u32 bo_flags, u32 vm_map_flags,
+ u64 gpu_va)
+{
+ struct drm_gem_shmem_object *obj;
+ struct panthor_kernel_bo *kbo;
+ struct panthor_gem_object *bo;
+ int ret;
+
+ if (drm_WARN_ON(&ptdev->base, !vm))
+ return ERR_PTR(-EINVAL);
+
+ kbo = kzalloc(sizeof(*kbo), GFP_KERNEL);
+ if (!kbo)
+ return ERR_PTR(-ENOMEM);
+
+ obj = drm_gem_shmem_create(&ptdev->base, size);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto err_free_bo;
+ }
+
+ bo = to_panthor_bo(&obj->base);
+ size = obj->base.size;
+ kbo->obj = &obj->base;
+ bo->flags = bo_flags;
+
+ ret = panthor_vm_alloc_va(vm, gpu_va, size, &kbo->va_node);
+ if (ret)
+ goto err_put_obj;
+
+ ret = panthor_vm_map_bo_range(vm, bo, 0, size, kbo->va_node.start, vm_map_flags);
+ if (ret)
+ goto err_free_va;
+
+ bo->exclusive_vm_root_gem = panthor_vm_root_gem(vm);
+ drm_gem_object_get(bo->exclusive_vm_root_gem);
+ bo->base.base.resv = bo->exclusive_vm_root_gem->resv;
+ return kbo;
+
+err_free_va:
+ panthor_vm_free_va(vm, &kbo->va_node);
+
+err_put_obj:
+ drm_gem_object_put(&obj->base);
+
+err_free_bo:
+ kfree(kbo);
+ return ERR_PTR(ret);
+}
+
+static int panthor_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct panthor_gem_object *bo = to_panthor_bo(obj);
+
+ /* Don't allow mmap on objects that have the NO_MMAP flag set. */
+ if (bo->flags & DRM_PANTHOR_BO_NO_MMAP)
+ return -EINVAL;
+
+ return drm_gem_shmem_object_mmap(obj, vma);
+}
+
+static struct dma_buf *
+panthor_gem_prime_export(struct drm_gem_object *obj, int flags)
+{
+ /* We can't export GEMs that have an exclusive VM. */
+ if (to_panthor_bo(obj)->exclusive_vm_root_gem)
+ return ERR_PTR(-EINVAL);
+
+ return drm_gem_prime_export(obj, flags);
+}
+
+static const struct drm_gem_object_funcs panthor_gem_funcs = {
+ .free = panthor_gem_free_object,
+ .print_info = drm_gem_shmem_object_print_info,
+ .pin = drm_gem_shmem_object_pin,
+ .unpin = drm_gem_shmem_object_unpin,
+ .get_sg_table = drm_gem_shmem_object_get_sg_table,
+ .vmap = drm_gem_shmem_object_vmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
+ .mmap = panthor_gem_mmap,
+ .export = panthor_gem_prime_export,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+};
+
+/**
+ * panthor_gem_create_object - Implementation of driver->gem_create_object.
+ * @ddev: DRM device
+ * @size: Size in bytes of the memory the object will reference
+ *
+ * This lets the GEM helpers allocate object structs for us, and keep
+ * our BO stats correct.
+ */
+struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t size)
+{
+ struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
+ struct panthor_gem_object *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ obj->base.base.funcs = &panthor_gem_funcs;
+ obj->base.map_wc = !ptdev->coherent;
+ mutex_init(&obj->gpuva_list_lock);
+ drm_gem_gpuva_set_lock(&obj->base.base, &obj->gpuva_list_lock);
+
+ return &obj->base.base;
+}
+
+/**
+ * panthor_gem_create_with_handle() - Create a GEM object and attach it to a handle.
+ * @file: DRM file.
+ * @ddev: DRM device.
+ * @exclusive_vm: Exclusive VM. Not NULL if the GEM object can't be shared.
+ * @size: Size of the GEM object to allocate.
+ * @flags: Combination of drm_panthor_bo_flags flags.
+ * @handle: Pointer holding the handle pointing to the new GEM object.
+ *
+ * Return: Zero on success
+ */
+int
+panthor_gem_create_with_handle(struct drm_file *file,
+ struct drm_device *ddev,
+ struct panthor_vm *exclusive_vm,
+ u64 *size, u32 flags, u32 *handle)
+{
+ int ret;
+ struct drm_gem_shmem_object *shmem;
+ struct panthor_gem_object *bo;
+
+ shmem = drm_gem_shmem_create(ddev, *size);
+ if (IS_ERR(shmem))
+ return PTR_ERR(shmem);
+
+ bo = to_panthor_bo(&shmem->base);
+ bo->flags = flags;
+
+ if (exclusive_vm) {
+ bo->exclusive_vm_root_gem = panthor_vm_root_gem(exclusive_vm);
+ drm_gem_object_get(bo->exclusive_vm_root_gem);
+ bo->base.base.resv = bo->exclusive_vm_root_gem->resv;
+ }
+
+ /*
+ * Allocate an id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file, &shmem->base, handle);
+ if (!ret)
+ *size = bo->base.base.size;
+
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_put(&shmem->base);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/panthor/panthor_gem.h b/drivers/gpu/drm/panthor/panthor_gem.h
new file mode 100644
index 000000000000..3bccba394d00
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_gem.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/* Copyright 2023 Collabora ltd. */
+
+#ifndef __PANTHOR_GEM_H__
+#define __PANTHOR_GEM_H__
+
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_mm.h>
+
+#include <linux/iosys-map.h>
+#include <linux/rwsem.h>
+
+struct panthor_vm;
+
+/**
+ * struct panthor_gem_object - Driver specific GEM object.
+ */
+struct panthor_gem_object {
+ /** @base: Inherit from drm_gem_shmem_object. */
+ struct drm_gem_shmem_object base;
+
+ /**
+ * @exclusive_vm_root_gem: Root GEM of the exclusive VM this GEM object
+ * is attached to.
+ *
+ * If @exclusive_vm_root_gem != NULL, any attempt to bind the GEM to a
+ * different VM will fail.
+ *
+ * All FW memory objects have this field set to the root GEM of the MCU
+ * VM.
+ */
+ struct drm_gem_object *exclusive_vm_root_gem;
+
+ /**
+ * @gpuva_list_lock: Custom GPUVA lock.
+ *
+ * Used to protect insertion of drm_gpuva elements to the
+ * drm_gem_object.gpuva.list list.
+ *
+ * We can't use the GEM resv for that, because drm_gpuva_link() is
+ * called in a dma-signaling path, where we're not allowed to take
+ * resv locks.
+ */
+ struct mutex gpuva_list_lock;
+
+ /** @flags: Combination of drm_panthor_bo_flags flags. */
+ u32 flags;
+};
+
+/**
+ * struct panthor_kernel_bo - Kernel buffer object.
+ *
+ * These objects are only manipulated by the kernel driver and not
+ * directly exposed to the userspace. The GPU address of a kernel
+ * BO might be passed to userspace though.
+ */
+struct panthor_kernel_bo {
+ /**
+ * @obj: The GEM object backing this kernel buffer object.
+ */
+ struct drm_gem_object *obj;
+
+ /**
+ * @va_node: VA space allocated to this GEM.
+ */
+ struct drm_mm_node va_node;
+
+ /**
+ * @kmap: Kernel CPU mapping of @gem.
+ */
+ void *kmap;
+};
+
+static inline
+struct panthor_gem_object *to_panthor_bo(struct drm_gem_object *obj)
+{
+ return container_of(to_drm_gem_shmem_obj(obj), struct panthor_gem_object, base);
+}
+
+struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t size);
+
+struct drm_gem_object *
+panthor_gem_prime_import_sg_table(struct drm_device *ddev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+
+int
+panthor_gem_create_with_handle(struct drm_file *file,
+ struct drm_device *ddev,
+ struct panthor_vm *exclusive_vm,
+ u64 *size, u32 flags, uint32_t *handle);
+
+static inline u64
+panthor_kernel_bo_gpuva(struct panthor_kernel_bo *bo)
+{
+ return bo->va_node.start;
+}
+
+static inline size_t
+panthor_kernel_bo_size(struct panthor_kernel_bo *bo)
+{
+ return bo->obj->size;
+}
+
+static inline int
+panthor_kernel_bo_vmap(struct panthor_kernel_bo *bo)
+{
+ struct iosys_map map;
+ int ret;
+
+ if (bo->kmap)
+ return 0;
+
+ ret = drm_gem_vmap_unlocked(bo->obj, &map);
+ if (ret)
+ return ret;
+
+ bo->kmap = map.vaddr;
+ return 0;
+}
+
+static inline void
+panthor_kernel_bo_vunmap(struct panthor_kernel_bo *bo)
+{
+ if (bo->kmap) {
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->kmap);
+
+ drm_gem_vunmap_unlocked(bo->obj, &map);
+ bo->kmap = NULL;
+ }
+}
+
+struct panthor_kernel_bo *
+panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
+ size_t size, u32 bo_flags, u32 vm_map_flags,
+ u64 gpu_va);
+
+void panthor_kernel_bo_destroy(struct panthor_vm *vm,
+ struct panthor_kernel_bo *bo);
+
+#endif /* __PANTHOR_GEM_H__ */
diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c
new file mode 100644
index 000000000000..5251d8764e7d
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_gpu.c
@@ -0,0 +1,482 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
+/* Copyright 2019 Collabora ltd. */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
+
+#include "panthor_device.h"
+#include "panthor_gpu.h"
+#include "panthor_regs.h"
+
+/**
+ * struct panthor_gpu - GPU block management data.
+ */
+struct panthor_gpu {
+ /** @irq: GPU irq. */
+ struct panthor_irq irq;
+
+ /** @reqs_lock: Lock protecting access to pending_reqs. */
+ spinlock_t reqs_lock;
+
+ /** @pending_reqs: Pending GPU requests. */
+ u32 pending_reqs;
+
+ /** @reqs_acked: GPU request wait queue. */
+ wait_queue_head_t reqs_acked;
+};
+
+/**
+ * struct panthor_model - GPU model description
+ */
+struct panthor_model {
+ /** @name: Model name. */
+ const char *name;
+
+ /** @arch_major: Major version number of architecture. */
+ u8 arch_major;
+
+ /** @product_major: Major version number of product. */
+ u8 product_major;
+};
+
+/**
+ * GPU_MODEL() - Define a GPU model. A GPU product can be uniquely identified
+ * by a combination of the major architecture version and the major product
+ * version.
+ * @_name: Name for the GPU model.
+ * @_arch_major: Architecture major.
+ * @_product_major: Product major.
+ */
+#define GPU_MODEL(_name, _arch_major, _product_major) \
+{\
+ .name = __stringify(_name), \
+ .arch_major = _arch_major, \
+ .product_major = _product_major, \
+}
+
+static const struct panthor_model gpu_models[] = {
+ GPU_MODEL(g610, 10, 7),
+ {},
+};
+
+#define GPU_INTERRUPTS_MASK \
+ (GPU_IRQ_FAULT | \
+ GPU_IRQ_PROTM_FAULT | \
+ GPU_IRQ_RESET_COMPLETED | \
+ GPU_IRQ_CLEAN_CACHES_COMPLETED)
+
+static void panthor_gpu_init_info(struct panthor_device *ptdev)
+{
+ const struct panthor_model *model;
+ u32 arch_major, product_major;
+ u32 major, minor, status;
+ unsigned int i;
+
+ ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID);
+ ptdev->gpu_info.csf_id = gpu_read(ptdev, GPU_CSF_ID);
+ ptdev->gpu_info.gpu_rev = gpu_read(ptdev, GPU_REVID);
+ ptdev->gpu_info.core_features = gpu_read(ptdev, GPU_CORE_FEATURES);
+ ptdev->gpu_info.l2_features = gpu_read(ptdev, GPU_L2_FEATURES);
+ ptdev->gpu_info.tiler_features = gpu_read(ptdev, GPU_TILER_FEATURES);
+ ptdev->gpu_info.mem_features = gpu_read(ptdev, GPU_MEM_FEATURES);
+ ptdev->gpu_info.mmu_features = gpu_read(ptdev, GPU_MMU_FEATURES);
+ ptdev->gpu_info.thread_features = gpu_read(ptdev, GPU_THREAD_FEATURES);
+ ptdev->gpu_info.max_threads = gpu_read(ptdev, GPU_THREAD_MAX_THREADS);
+ ptdev->gpu_info.thread_max_workgroup_size = gpu_read(ptdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
+ ptdev->gpu_info.thread_max_barrier_size = gpu_read(ptdev, GPU_THREAD_MAX_BARRIER_SIZE);
+ ptdev->gpu_info.coherency_features = gpu_read(ptdev, GPU_COHERENCY_FEATURES);
+ for (i = 0; i < 4; i++)
+ ptdev->gpu_info.texture_features[i] = gpu_read(ptdev, GPU_TEXTURE_FEATURES(i));
+
+ ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT);
+
+ ptdev->gpu_info.shader_present = gpu_read(ptdev, GPU_SHADER_PRESENT_LO);
+ ptdev->gpu_info.shader_present |= (u64)gpu_read(ptdev, GPU_SHADER_PRESENT_HI) << 32;
+
+ ptdev->gpu_info.tiler_present = gpu_read(ptdev, GPU_TILER_PRESENT_LO);
+ ptdev->gpu_info.tiler_present |= (u64)gpu_read(ptdev, GPU_TILER_PRESENT_HI) << 32;
+
+ ptdev->gpu_info.l2_present = gpu_read(ptdev, GPU_L2_PRESENT_LO);
+ ptdev->gpu_info.l2_present |= (u64)gpu_read(ptdev, GPU_L2_PRESENT_HI) << 32;
+
+ arch_major = GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id);
+ product_major = GPU_PROD_MAJOR(ptdev->gpu_info.gpu_id);
+ major = GPU_VER_MAJOR(ptdev->gpu_info.gpu_id);
+ minor = GPU_VER_MINOR(ptdev->gpu_info.gpu_id);
+ status = GPU_VER_STATUS(ptdev->gpu_info.gpu_id);
+
+ for (model = gpu_models; model->name; model++) {
+ if (model->arch_major == arch_major &&
+ model->product_major == product_major)
+ break;
+ }
+
+ drm_info(&ptdev->base,
+ "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x",
+ model->name ?: "unknown", ptdev->gpu_info.gpu_id >> 16,
+ major, minor, status);
+
+ drm_info(&ptdev->base,
+ "Features: L2:%#x Tiler:%#x Mem:%#x MMU:%#x AS:%#x",
+ ptdev->gpu_info.l2_features,
+ ptdev->gpu_info.tiler_features,
+ ptdev->gpu_info.mem_features,
+ ptdev->gpu_info.mmu_features,
+ ptdev->gpu_info.as_present);
+
+ drm_info(&ptdev->base,
+ "shader_present=0x%0llx l2_present=0x%0llx tiler_present=0x%0llx",
+ ptdev->gpu_info.shader_present, ptdev->gpu_info.l2_present,
+ ptdev->gpu_info.tiler_present);
+}
+
+static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status)
+{
+ if (status & GPU_IRQ_FAULT) {
+ u32 fault_status = gpu_read(ptdev, GPU_FAULT_STATUS);
+ u64 address = ((u64)gpu_read(ptdev, GPU_FAULT_ADDR_HI) << 32) |
+ gpu_read(ptdev, GPU_FAULT_ADDR_LO);
+
+ drm_warn(&ptdev->base, "GPU Fault 0x%08x (%s) at 0x%016llx\n",
+ fault_status, panthor_exception_name(ptdev, fault_status & 0xFF),
+ address);
+ }
+ if (status & GPU_IRQ_PROTM_FAULT)
+ drm_warn(&ptdev->base, "GPU Fault in protected mode\n");
+
+ spin_lock(&ptdev->gpu->reqs_lock);
+ if (status & ptdev->gpu->pending_reqs) {
+ ptdev->gpu->pending_reqs &= ~status;
+ wake_up_all(&ptdev->gpu->reqs_acked);
+ }
+ spin_unlock(&ptdev->gpu->reqs_lock);
+}
+PANTHOR_IRQ_HANDLER(gpu, GPU, panthor_gpu_irq_handler);
+
+/**
+ * panthor_gpu_unplug() - Called when the GPU is unplugged.
+ * @ptdev: Device to unplug.
+ */
+void panthor_gpu_unplug(struct panthor_device *ptdev)
+{
+ unsigned long flags;
+
+ /* Make sure the IRQ handler is not running after that point. */
+ panthor_gpu_irq_suspend(&ptdev->gpu->irq);
+
+ /* Wake-up all waiters. */
+ spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
+ ptdev->gpu->pending_reqs = 0;
+ wake_up_all(&ptdev->gpu->reqs_acked);
+ spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
+}
+
+/**
+ * panthor_gpu_init() - Initialize the GPU block
+ * @ptdev: Device.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_gpu_init(struct panthor_device *ptdev)
+{
+ struct panthor_gpu *gpu;
+ u32 pa_bits;
+ int ret, irq;
+
+ gpu = drmm_kzalloc(&ptdev->base, sizeof(*gpu), GFP_KERNEL);
+ if (!gpu)
+ return -ENOMEM;
+
+ spin_lock_init(&gpu->reqs_lock);
+ init_waitqueue_head(&gpu->reqs_acked);
+ ptdev->gpu = gpu;
+ panthor_gpu_init_info(ptdev);
+
+ dma_set_max_seg_size(ptdev->base.dev, UINT_MAX);
+ pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features);
+ ret = dma_set_mask_and_coherent(ptdev->base.dev, DMA_BIT_MASK(pa_bits));
+ if (ret)
+ return ret;
+
+ irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "gpu");
+ if (irq < 0)
+ return irq;
+
+ ret = panthor_request_gpu_irq(ptdev, &ptdev->gpu->irq, irq, GPU_INTERRUPTS_MASK);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * panthor_gpu_block_power_off() - Power-off a specific block of the GPU
+ * @ptdev: Device.
+ * @blk_name: Block name.
+ * @pwroff_reg: Power-off register for this block.
+ * @pwrtrans_reg: Power transition register for this block.
+ * @mask: Sub-elements to power-off.
+ * @timeout_us: Timeout in microseconds.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_gpu_block_power_off(struct panthor_device *ptdev,
+ const char *blk_name,
+ u32 pwroff_reg, u32 pwrtrans_reg,
+ u64 mask, u32 timeout_us)
+{
+ u32 val, i;
+ int ret;
+
+ for (i = 0; i < 2; i++) {
+ u32 mask32 = mask >> (i * 32);
+
+ if (!mask32)
+ continue;
+
+ ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4),
+ val, !(mask32 & val),
+ 100, timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition",
+ blk_name, mask);
+ return ret;
+ }
+ }
+
+ if (mask & GENMASK(31, 0))
+ gpu_write(ptdev, pwroff_reg, mask);
+
+ if (mask >> 32)
+ gpu_write(ptdev, pwroff_reg + 4, mask >> 32);
+
+ for (i = 0; i < 2; i++) {
+ u32 mask32 = mask >> (i * 32);
+
+ if (!mask32)
+ continue;
+
+ ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4),
+ val, !(mask32 & val),
+ 100, timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition",
+ blk_name, mask);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * panthor_gpu_block_power_on() - Power-on a specific block of the GPU
+ * @ptdev: Device.
+ * @blk_name: Block name.
+ * @pwron_reg: Power-on register for this block.
+ * @pwrtrans_reg: Power transition register for this block.
+ * @rdy_reg: Power transition ready register.
+ * @mask: Sub-elements to power-on.
+ * @timeout_us: Timeout in microseconds.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_gpu_block_power_on(struct panthor_device *ptdev,
+ const char *blk_name,
+ u32 pwron_reg, u32 pwrtrans_reg,
+ u32 rdy_reg, u64 mask, u32 timeout_us)
+{
+ u32 val, i;
+ int ret;
+
+ for (i = 0; i < 2; i++) {
+ u32 mask32 = mask >> (i * 32);
+
+ if (!mask32)
+ continue;
+
+ ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4),
+ val, !(mask32 & val),
+ 100, timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition",
+ blk_name, mask);
+ return ret;
+ }
+ }
+
+ if (mask & GENMASK(31, 0))
+ gpu_write(ptdev, pwron_reg, mask);
+
+ if (mask >> 32)
+ gpu_write(ptdev, pwron_reg + 4, mask >> 32);
+
+ for (i = 0; i < 2; i++) {
+ u32 mask32 = mask >> (i * 32);
+
+ if (!mask32)
+ continue;
+
+ ret = readl_relaxed_poll_timeout(ptdev->iomem + rdy_reg + (i * 4),
+ val, (mask32 & val) == mask32,
+ 100, timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base, "timeout waiting on %s:%llx readiness",
+ blk_name, mask);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * panthor_gpu_l2_power_on() - Power-on the L2-cache
+ * @ptdev: Device.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_gpu_l2_power_on(struct panthor_device *ptdev)
+{
+ if (ptdev->gpu_info.l2_present != 1) {
+ /*
+ * Only support one core group now.
+ * ~(l2_present - 1) unsets all bits in l2_present except
+ * the bottom bit. (l2_present - 2) has all the bits in
+ * the first core group set. AND them together to generate
+ * a mask of cores in the first core group.
+ */
+ u64 core_mask = ~(ptdev->gpu_info.l2_present - 1) &
+ (ptdev->gpu_info.l2_present - 2);
+ drm_info_once(&ptdev->base, "using only 1st core group (%lu cores from %lu)\n",
+ hweight64(core_mask),
+ hweight64(ptdev->gpu_info.shader_present));
+ }
+
+ return panthor_gpu_power_on(ptdev, L2, 1, 20000);
+}
+
+/**
+ * panthor_gpu_flush_caches() - Flush caches
+ * @ptdev: Device.
+ * @l2: L2 flush type.
+ * @lsc: LSC flush type.
+ * @other: Other flush type.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_gpu_flush_caches(struct panthor_device *ptdev,
+ u32 l2, u32 lsc, u32 other)
+{
+ bool timedout = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
+ if (!drm_WARN_ON(&ptdev->base,
+ ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED)) {
+ ptdev->gpu->pending_reqs |= GPU_IRQ_CLEAN_CACHES_COMPLETED;
+ gpu_write(ptdev, GPU_CMD, GPU_FLUSH_CACHES(l2, lsc, other));
+ }
+ spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
+
+ if (!wait_event_timeout(ptdev->gpu->reqs_acked,
+ !(ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED),
+ msecs_to_jiffies(100))) {
+ spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
+ if ((ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED) != 0 &&
+ !(gpu_read(ptdev, GPU_INT_RAWSTAT) & GPU_IRQ_CLEAN_CACHES_COMPLETED))
+ timedout = true;
+ else
+ ptdev->gpu->pending_reqs &= ~GPU_IRQ_CLEAN_CACHES_COMPLETED;
+ spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
+ }
+
+ if (timedout) {
+ drm_err(&ptdev->base, "Flush caches timeout");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * panthor_gpu_soft_reset() - Issue a soft-reset
+ * @ptdev: Device.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_gpu_soft_reset(struct panthor_device *ptdev)
+{
+ bool timedout = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
+ if (!drm_WARN_ON(&ptdev->base,
+ ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED)) {
+ ptdev->gpu->pending_reqs |= GPU_IRQ_RESET_COMPLETED;
+ gpu_write(ptdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED);
+ gpu_write(ptdev, GPU_CMD, GPU_SOFT_RESET);
+ }
+ spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
+
+ if (!wait_event_timeout(ptdev->gpu->reqs_acked,
+ !(ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED),
+ msecs_to_jiffies(100))) {
+ spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
+ if ((ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED) != 0 &&
+ !(gpu_read(ptdev, GPU_INT_RAWSTAT) & GPU_IRQ_RESET_COMPLETED))
+ timedout = true;
+ else
+ ptdev->gpu->pending_reqs &= ~GPU_IRQ_RESET_COMPLETED;
+ spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
+ }
+
+ if (timedout) {
+ drm_err(&ptdev->base, "Soft reset timeout");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * panthor_gpu_suspend() - Suspend the GPU block.
+ * @ptdev: Device.
+ *
+ * Suspend the GPU irq. This should be called last in the suspend procedure,
+ * after all other blocks have been suspented.
+ */
+void panthor_gpu_suspend(struct panthor_device *ptdev)
+{
+ /*
+ * It may be preferable to simply power down the L2, but for now just
+ * soft-reset which will leave the L2 powered down.
+ */
+ panthor_gpu_soft_reset(ptdev);
+ panthor_gpu_irq_suspend(&ptdev->gpu->irq);
+}
+
+/**
+ * panthor_gpu_resume() - Resume the GPU block.
+ * @ptdev: Device.
+ *
+ * Resume the IRQ handler and power-on the L2-cache.
+ * The FW takes care of powering the other blocks.
+ */
+void panthor_gpu_resume(struct panthor_device *ptdev)
+{
+ panthor_gpu_irq_resume(&ptdev->gpu->irq, GPU_INTERRUPTS_MASK);
+ panthor_gpu_l2_power_on(ptdev);
+}
diff --git a/drivers/gpu/drm/panthor/panthor_gpu.h b/drivers/gpu/drm/panthor/panthor_gpu.h
new file mode 100644
index 000000000000..bba7555dd3c6
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_gpu.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Collabora ltd. */
+
+#ifndef __PANTHOR_GPU_H__
+#define __PANTHOR_GPU_H__
+
+struct panthor_device;
+
+int panthor_gpu_init(struct panthor_device *ptdev);
+void panthor_gpu_unplug(struct panthor_device *ptdev);
+void panthor_gpu_suspend(struct panthor_device *ptdev);
+void panthor_gpu_resume(struct panthor_device *ptdev);
+
+int panthor_gpu_block_power_on(struct panthor_device *ptdev,
+ const char *blk_name,
+ u32 pwron_reg, u32 pwrtrans_reg,
+ u32 rdy_reg, u64 mask, u32 timeout_us);
+int panthor_gpu_block_power_off(struct panthor_device *ptdev,
+ const char *blk_name,
+ u32 pwroff_reg, u32 pwrtrans_reg,
+ u64 mask, u32 timeout_us);
+
+/**
+ * panthor_gpu_power_on() - Power on the GPU block.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+#define panthor_gpu_power_on(ptdev, type, mask, timeout_us) \
+ panthor_gpu_block_power_on(ptdev, #type, \
+ type ## _PWRON_LO, \
+ type ## _PWRTRANS_LO, \
+ type ## _READY_LO, \
+ mask, timeout_us)
+
+/**
+ * panthor_gpu_power_off() - Power off the GPU block.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+#define panthor_gpu_power_off(ptdev, type, mask, timeout_us) \
+ panthor_gpu_block_power_off(ptdev, #type, \
+ type ## _PWROFF_LO, \
+ type ## _PWRTRANS_LO, \
+ mask, timeout_us)
+
+int panthor_gpu_l2_power_on(struct panthor_device *ptdev);
+int panthor_gpu_flush_caches(struct panthor_device *ptdev,
+ u32 l2, u32 lsc, u32 other);
+int panthor_gpu_soft_reset(struct panthor_device *ptdev);
+
+#endif
diff --git a/drivers/gpu/drm/panthor/panthor_heap.c b/drivers/gpu/drm/panthor/panthor_heap.c
new file mode 100644
index 000000000000..143fa35f2e74
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_heap.c
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/* Copyright 2023 Collabora ltd. */
+
+#include <linux/iosys-map.h>
+#include <linux/rwsem.h>
+
+#include <drm/panthor_drm.h>
+
+#include "panthor_device.h"
+#include "panthor_gem.h"
+#include "panthor_heap.h"
+#include "panthor_mmu.h"
+#include "panthor_regs.h"
+
+/*
+ * The GPU heap context is an opaque structure used by the GPU to track the
+ * heap allocations. The driver should only touch it to initialize it (zero all
+ * fields). Because the CPU and GPU can both access this structure it is
+ * required to be GPU cache line aligned.
+ */
+#define HEAP_CONTEXT_SIZE 32
+
+/**
+ * struct panthor_heap_chunk_header - Heap chunk header
+ */
+struct panthor_heap_chunk_header {
+ /**
+ * @next: Next heap chunk in the list.
+ *
+ * This is a GPU VA.
+ */
+ u64 next;
+
+ /** @unknown: MBZ. */
+ u32 unknown[14];
+};
+
+/**
+ * struct panthor_heap_chunk - Structure used to keep track of allocated heap chunks.
+ */
+struct panthor_heap_chunk {
+ /** @node: Used to insert the heap chunk in panthor_heap::chunks. */
+ struct list_head node;
+
+ /** @bo: Buffer object backing the heap chunk. */
+ struct panthor_kernel_bo *bo;
+};
+
+/**
+ * struct panthor_heap - Structure used to manage tiler heap contexts.
+ */
+struct panthor_heap {
+ /** @chunks: List containing all heap chunks allocated so far. */
+ struct list_head chunks;
+
+ /** @lock: Lock protecting insertion in the chunks list. */
+ struct mutex lock;
+
+ /** @chunk_size: Size of each chunk. */
+ u32 chunk_size;
+
+ /** @max_chunks: Maximum number of chunks. */
+ u32 max_chunks;
+
+ /**
+ * @target_in_flight: Number of in-flight render passes after which
+ * we'd let the FW wait for fragment job to finish instead of allocating new chunks.
+ */
+ u32 target_in_flight;
+
+ /** @chunk_count: Number of heap chunks currently allocated. */
+ u32 chunk_count;
+};
+
+#define MAX_HEAPS_PER_POOL 128
+
+/**
+ * struct panthor_heap_pool - Pool of heap contexts
+ *
+ * The pool is attached to a panthor_file and can't be shared across processes.
+ */
+struct panthor_heap_pool {
+ /** @refcount: Reference count. */
+ struct kref refcount;
+
+ /** @ptdev: Device. */
+ struct panthor_device *ptdev;
+
+ /** @vm: VM this pool is bound to. */
+ struct panthor_vm *vm;
+
+ /** @lock: Lock protecting access to @xa. */
+ struct rw_semaphore lock;
+
+ /** @xa: Array storing panthor_heap objects. */
+ struct xarray xa;
+
+ /** @gpu_contexts: Buffer object containing the GPU heap contexts. */
+ struct panthor_kernel_bo *gpu_contexts;
+};
+
+static int panthor_heap_ctx_stride(struct panthor_device *ptdev)
+{
+ u32 l2_features = ptdev->gpu_info.l2_features;
+ u32 gpu_cache_line_size = GPU_L2_FEATURES_LINE_SIZE(l2_features);
+
+ return ALIGN(HEAP_CONTEXT_SIZE, gpu_cache_line_size);
+}
+
+static int panthor_get_heap_ctx_offset(struct panthor_heap_pool *pool, int id)
+{
+ return panthor_heap_ctx_stride(pool->ptdev) * id;
+}
+
+static void *panthor_get_heap_ctx(struct panthor_heap_pool *pool, int id)
+{
+ return pool->gpu_contexts->kmap +
+ panthor_get_heap_ctx_offset(pool, id);
+}
+
+static void panthor_free_heap_chunk(struct panthor_vm *vm,
+ struct panthor_heap *heap,
+ struct panthor_heap_chunk *chunk)
+{
+ mutex_lock(&heap->lock);
+ list_del(&chunk->node);
+ heap->chunk_count--;
+ mutex_unlock(&heap->lock);
+
+ panthor_kernel_bo_destroy(vm, chunk->bo);
+ kfree(chunk);
+}
+
+static int panthor_alloc_heap_chunk(struct panthor_device *ptdev,
+ struct panthor_vm *vm,
+ struct panthor_heap *heap,
+ bool initial_chunk)
+{
+ struct panthor_heap_chunk *chunk;
+ struct panthor_heap_chunk_header *hdr;
+ int ret;
+
+ chunk = kmalloc(sizeof(*chunk), GFP_KERNEL);
+ if (!chunk)
+ return -ENOMEM;
+
+ chunk->bo = panthor_kernel_bo_create(ptdev, vm, heap->chunk_size,
+ DRM_PANTHOR_BO_NO_MMAP,
+ DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
+ PANTHOR_VM_KERNEL_AUTO_VA);
+ if (IS_ERR(chunk->bo)) {
+ ret = PTR_ERR(chunk->bo);
+ goto err_free_chunk;
+ }
+
+ ret = panthor_kernel_bo_vmap(chunk->bo);
+ if (ret)
+ goto err_destroy_bo;
+
+ hdr = chunk->bo->kmap;
+ memset(hdr, 0, sizeof(*hdr));
+
+ if (initial_chunk && !list_empty(&heap->chunks)) {
+ struct panthor_heap_chunk *prev_chunk;
+ u64 prev_gpuva;
+
+ prev_chunk = list_first_entry(&heap->chunks,
+ struct panthor_heap_chunk,
+ node);
+
+ prev_gpuva = panthor_kernel_bo_gpuva(prev_chunk->bo);
+ hdr->next = (prev_gpuva & GENMASK_ULL(63, 12)) |
+ (heap->chunk_size >> 12);
+ }
+
+ panthor_kernel_bo_vunmap(chunk->bo);
+
+ mutex_lock(&heap->lock);
+ list_add(&chunk->node, &heap->chunks);
+ heap->chunk_count++;
+ mutex_unlock(&heap->lock);
+
+ return 0;
+
+err_destroy_bo:
+ panthor_kernel_bo_destroy(vm, chunk->bo);
+
+err_free_chunk:
+ kfree(chunk);
+
+ return ret;
+}
+
+static void panthor_free_heap_chunks(struct panthor_vm *vm,
+ struct panthor_heap *heap)
+{
+ struct panthor_heap_chunk *chunk, *tmp;
+
+ list_for_each_entry_safe(chunk, tmp, &heap->chunks, node)
+ panthor_free_heap_chunk(vm, heap, chunk);
+}
+
+static int panthor_alloc_heap_chunks(struct panthor_device *ptdev,
+ struct panthor_vm *vm,
+ struct panthor_heap *heap,
+ u32 chunk_count)
+{
+ int ret;
+ u32 i;
+
+ for (i = 0; i < chunk_count; i++) {
+ ret = panthor_alloc_heap_chunk(ptdev, vm, heap, true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+panthor_heap_destroy_locked(struct panthor_heap_pool *pool, u32 handle)
+{
+ struct panthor_heap *heap;
+
+ heap = xa_erase(&pool->xa, handle);
+ if (!heap)
+ return -EINVAL;
+
+ panthor_free_heap_chunks(pool->vm, heap);
+ mutex_destroy(&heap->lock);
+ kfree(heap);
+ return 0;
+}
+
+/**
+ * panthor_heap_destroy() - Destroy a heap context
+ * @pool: Pool this context belongs to.
+ * @handle: Handle returned by panthor_heap_create().
+ */
+int panthor_heap_destroy(struct panthor_heap_pool *pool, u32 handle)
+{
+ int ret;
+
+ down_write(&pool->lock);
+ ret = panthor_heap_destroy_locked(pool, handle);
+ up_write(&pool->lock);
+
+ return ret;
+}
+
+/**
+ * panthor_heap_create() - Create a heap context
+ * @pool: Pool to instantiate the heap context from.
+ * @initial_chunk_count: Number of chunk allocated at initialization time.
+ * Must be at least 1.
+ * @chunk_size: The size of each chunk. Must be a power of two between 256k
+ * and 2M.
+ * @max_chunks: Maximum number of chunks that can be allocated.
+ * @target_in_flight: Maximum number of in-flight render passes.
+ * @heap_ctx_gpu_va: Pointer holding the GPU address of the allocated heap
+ * context.
+ * @first_chunk_gpu_va: Pointer holding the GPU address of the first chunk
+ * assigned to the heap context.
+ *
+ * Return: a positive handle on success, a negative error otherwise.
+ */
+int panthor_heap_create(struct panthor_heap_pool *pool,
+ u32 initial_chunk_count,
+ u32 chunk_size,
+ u32 max_chunks,
+ u32 target_in_flight,
+ u64 *heap_ctx_gpu_va,
+ u64 *first_chunk_gpu_va)
+{
+ struct panthor_heap *heap;
+ struct panthor_heap_chunk *first_chunk;
+ struct panthor_vm *vm;
+ int ret = 0;
+ u32 id;
+
+ if (initial_chunk_count == 0)
+ return -EINVAL;
+
+ if (hweight32(chunk_size) != 1 ||
+ chunk_size < SZ_256K || chunk_size > SZ_2M)
+ return -EINVAL;
+
+ down_read(&pool->lock);
+ vm = panthor_vm_get(pool->vm);
+ up_read(&pool->lock);
+
+ /* The pool has been destroyed, we can't create a new heap. */
+ if (!vm)
+ return -EINVAL;
+
+ heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+ if (!heap) {
+ ret = -ENOMEM;
+ goto err_put_vm;
+ }
+
+ mutex_init(&heap->lock);
+ INIT_LIST_HEAD(&heap->chunks);
+ heap->chunk_size = chunk_size;
+ heap->max_chunks = max_chunks;
+ heap->target_in_flight = target_in_flight;
+
+ ret = panthor_alloc_heap_chunks(pool->ptdev, vm, heap,
+ initial_chunk_count);
+ if (ret)
+ goto err_free_heap;
+
+ first_chunk = list_first_entry(&heap->chunks,
+ struct panthor_heap_chunk,
+ node);
+ *first_chunk_gpu_va = panthor_kernel_bo_gpuva(first_chunk->bo);
+
+ down_write(&pool->lock);
+ /* The pool has been destroyed, we can't create a new heap. */
+ if (!pool->vm) {
+ ret = -EINVAL;
+ } else {
+ ret = xa_alloc(&pool->xa, &id, heap, XA_LIMIT(1, MAX_HEAPS_PER_POOL), GFP_KERNEL);
+ if (!ret) {
+ void *gpu_ctx = panthor_get_heap_ctx(pool, id);
+
+ memset(gpu_ctx, 0, panthor_heap_ctx_stride(pool->ptdev));
+ *heap_ctx_gpu_va = panthor_kernel_bo_gpuva(pool->gpu_contexts) +
+ panthor_get_heap_ctx_offset(pool, id);
+ }
+ }
+ up_write(&pool->lock);
+
+ if (ret)
+ goto err_free_heap;
+
+ panthor_vm_put(vm);
+ return id;
+
+err_free_heap:
+ panthor_free_heap_chunks(pool->vm, heap);
+ mutex_destroy(&heap->lock);
+ kfree(heap);
+
+err_put_vm:
+ panthor_vm_put(vm);
+ return ret;
+}
+
+/**
+ * panthor_heap_return_chunk() - Return an unused heap chunk
+ * @pool: The pool this heap belongs to.
+ * @heap_gpu_va: The GPU address of the heap context.
+ * @chunk_gpu_va: The chunk VA to return.
+ *
+ * This function is used when a chunk allocated with panthor_heap_grow()
+ * couldn't be linked to the heap context through the FW interface because
+ * the group requesting the allocation was scheduled out in the meantime.
+ */
+int panthor_heap_return_chunk(struct panthor_heap_pool *pool,
+ u64 heap_gpu_va,
+ u64 chunk_gpu_va)
+{
+ u64 offset = heap_gpu_va - panthor_kernel_bo_gpuva(pool->gpu_contexts);
+ u32 heap_id = (u32)offset / panthor_heap_ctx_stride(pool->ptdev);
+ struct panthor_heap_chunk *chunk, *tmp, *removed = NULL;
+ struct panthor_heap *heap;
+ int ret;
+
+ if (offset > U32_MAX || heap_id >= MAX_HEAPS_PER_POOL)
+ return -EINVAL;
+
+ down_read(&pool->lock);
+ heap = xa_load(&pool->xa, heap_id);
+ if (!heap) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ chunk_gpu_va &= GENMASK_ULL(63, 12);
+
+ mutex_lock(&heap->lock);
+ list_for_each_entry_safe(chunk, tmp, &heap->chunks, node) {
+ if (panthor_kernel_bo_gpuva(chunk->bo) == chunk_gpu_va) {
+ removed = chunk;
+ list_del(&chunk->node);
+ heap->chunk_count--;
+ break;
+ }
+ }
+ mutex_unlock(&heap->lock);
+
+ if (removed) {
+ panthor_kernel_bo_destroy(pool->vm, chunk->bo);
+ kfree(chunk);
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+
+out_unlock:
+ up_read(&pool->lock);
+ return ret;
+}
+
+/**
+ * panthor_heap_grow() - Make a heap context grow.
+ * @pool: The pool this heap belongs to.
+ * @heap_gpu_va: The GPU address of the heap context.
+ * @renderpasses_in_flight: Number of render passes currently in-flight.
+ * @pending_frag_count: Number of fragment jobs waiting for execution/completion.
+ * @new_chunk_gpu_va: Pointer used to return the chunk VA.
+ */
+int panthor_heap_grow(struct panthor_heap_pool *pool,
+ u64 heap_gpu_va,
+ u32 renderpasses_in_flight,
+ u32 pending_frag_count,
+ u64 *new_chunk_gpu_va)
+{
+ u64 offset = heap_gpu_va - panthor_kernel_bo_gpuva(pool->gpu_contexts);
+ u32 heap_id = (u32)offset / panthor_heap_ctx_stride(pool->ptdev);
+ struct panthor_heap_chunk *chunk;
+ struct panthor_heap *heap;
+ int ret;
+
+ if (offset > U32_MAX || heap_id >= MAX_HEAPS_PER_POOL)
+ return -EINVAL;
+
+ down_read(&pool->lock);
+ heap = xa_load(&pool->xa, heap_id);
+ if (!heap) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* If we reached the target in-flight render passes, or if we
+ * reached the maximum number of chunks, let the FW figure another way to
+ * find some memory (wait for render passes to finish, or call the exception
+ * handler provided by the userspace driver, if any).
+ */
+ if (renderpasses_in_flight > heap->target_in_flight ||
+ (pending_frag_count > 0 && heap->chunk_count >= heap->max_chunks)) {
+ ret = -EBUSY;
+ goto out_unlock;
+ } else if (heap->chunk_count >= heap->max_chunks) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ /* FIXME: panthor_alloc_heap_chunk() triggers a kernel BO creation,
+ * which goes through the blocking allocation path. Ultimately, we
+ * want a non-blocking allocation, so we can immediately report to the
+ * FW when the system is running out of memory. In that case, the FW
+ * can call a user-provided exception handler, which might try to free
+ * some tiler memory by issuing an intermediate fragment job. If the
+ * exception handler can't do anything, it will flag the queue as
+ * faulty so the job that triggered this tiler chunk allocation and all
+ * further jobs in this queue fail immediately instead of having to
+ * wait for the job timeout.
+ */
+ ret = panthor_alloc_heap_chunk(pool->ptdev, pool->vm, heap, false);
+ if (ret)
+ goto out_unlock;
+
+ chunk = list_first_entry(&heap->chunks,
+ struct panthor_heap_chunk,
+ node);
+ *new_chunk_gpu_va = (panthor_kernel_bo_gpuva(chunk->bo) & GENMASK_ULL(63, 12)) |
+ (heap->chunk_size >> 12);
+ ret = 0;
+
+out_unlock:
+ up_read(&pool->lock);
+ return ret;
+}
+
+static void panthor_heap_pool_release(struct kref *refcount)
+{
+ struct panthor_heap_pool *pool =
+ container_of(refcount, struct panthor_heap_pool, refcount);
+
+ xa_destroy(&pool->xa);
+ kfree(pool);
+}
+
+/**
+ * panthor_heap_pool_put() - Release a heap pool reference
+ * @pool: Pool to release the reference on. Can be NULL.
+ */
+void panthor_heap_pool_put(struct panthor_heap_pool *pool)
+{
+ if (pool)
+ kref_put(&pool->refcount, panthor_heap_pool_release);
+}
+
+/**
+ * panthor_heap_pool_get() - Get a heap pool reference
+ * @pool: Pool to get the reference on. Can be NULL.
+ *
+ * Return: @pool.
+ */
+struct panthor_heap_pool *
+panthor_heap_pool_get(struct panthor_heap_pool *pool)
+{
+ if (pool)
+ kref_get(&pool->refcount);
+
+ return pool;
+}
+
+/**
+ * panthor_heap_pool_create() - Create a heap pool
+ * @ptdev: Device.
+ * @vm: The VM this heap pool will be attached to.
+ *
+ * Heap pools might contain up to 128 heap contexts, and are per-VM.
+ *
+ * Return: A valid pointer on success, a negative error code otherwise.
+ */
+struct panthor_heap_pool *
+panthor_heap_pool_create(struct panthor_device *ptdev, struct panthor_vm *vm)
+{
+ size_t bosize = ALIGN(MAX_HEAPS_PER_POOL *
+ panthor_heap_ctx_stride(ptdev),
+ 4096);
+ struct panthor_heap_pool *pool;
+ int ret = 0;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ /* We want a weak ref here: the heap pool belongs to the VM, so we're
+ * sure that, as long as the heap pool exists, the VM exists too.
+ */
+ pool->vm = vm;
+ pool->ptdev = ptdev;
+ init_rwsem(&pool->lock);
+ xa_init_flags(&pool->xa, XA_FLAGS_ALLOC1);
+ kref_init(&pool->refcount);
+
+ pool->gpu_contexts = panthor_kernel_bo_create(ptdev, vm, bosize,
+ DRM_PANTHOR_BO_NO_MMAP,
+ DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
+ PANTHOR_VM_KERNEL_AUTO_VA);
+ if (IS_ERR(pool->gpu_contexts)) {
+ ret = PTR_ERR(pool->gpu_contexts);
+ goto err_destroy_pool;
+ }
+
+ ret = panthor_kernel_bo_vmap(pool->gpu_contexts);
+ if (ret)
+ goto err_destroy_pool;
+
+ return pool;
+
+err_destroy_pool:
+ panthor_heap_pool_destroy(pool);
+ return ERR_PTR(ret);
+}
+
+/**
+ * panthor_heap_pool_destroy() - Destroy a heap pool.
+ * @pool: Pool to destroy.
+ *
+ * This function destroys all heap contexts and their resources. Thus
+ * preventing any use of the heap context or the chunk attached to them
+ * after that point.
+ *
+ * If the GPU still has access to some heap contexts, a fault should be
+ * triggered, which should flag the command stream groups using these
+ * context as faulty.
+ *
+ * The heap pool object is only released when all references to this pool
+ * are released.
+ */
+void panthor_heap_pool_destroy(struct panthor_heap_pool *pool)
+{
+ struct panthor_heap *heap;
+ unsigned long i;
+
+ if (!pool)
+ return;
+
+ down_write(&pool->lock);
+ xa_for_each(&pool->xa, i, heap)
+ drm_WARN_ON(&pool->ptdev->base, panthor_heap_destroy_locked(pool, i));
+
+ if (!IS_ERR_OR_NULL(pool->gpu_contexts))
+ panthor_kernel_bo_destroy(pool->vm, pool->gpu_contexts);
+
+ /* Reflects the fact the pool has been destroyed. */
+ pool->vm = NULL;
+ up_write(&pool->lock);
+
+ panthor_heap_pool_put(pool);
+}
diff --git a/drivers/gpu/drm/panthor/panthor_heap.h b/drivers/gpu/drm/panthor/panthor_heap.h
new file mode 100644
index 000000000000..25a5f2bba445
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_heap.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/* Copyright 2023 Collabora ltd. */
+
+#ifndef __PANTHOR_HEAP_H__
+#define __PANTHOR_HEAP_H__
+
+#include <linux/types.h>
+
+struct panthor_device;
+struct panthor_heap_pool;
+struct panthor_vm;
+
+int panthor_heap_create(struct panthor_heap_pool *pool,
+ u32 initial_chunk_count,
+ u32 chunk_size,
+ u32 max_chunks,
+ u32 target_in_flight,
+ u64 *heap_ctx_gpu_va,
+ u64 *first_chunk_gpu_va);
+int panthor_heap_destroy(struct panthor_heap_pool *pool, u32 handle);
+
+struct panthor_heap_pool *
+panthor_heap_pool_create(struct panthor_device *ptdev, struct panthor_vm *vm);
+void panthor_heap_pool_destroy(struct panthor_heap_pool *pool);
+
+struct panthor_heap_pool *
+panthor_heap_pool_get(struct panthor_heap_pool *pool);
+void panthor_heap_pool_put(struct panthor_heap_pool *pool);
+
+int panthor_heap_grow(struct panthor_heap_pool *pool,
+ u64 heap_gpu_va,
+ u32 renderpasses_in_flight,
+ u32 pending_frag_count,
+ u64 *new_chunk_gpu_va);
+int panthor_heap_return_chunk(struct panthor_heap_pool *pool,
+ u64 heap_gpu_va,
+ u64 chunk_gpu_va);
+
+#endif
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
new file mode 100644
index 000000000000..fa0a002b1016
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -0,0 +1,2774 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/* Copyright 2023 Collabora ltd. */
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_exec.h>
+#include <drm/drm_gpuvm.h>
+#include <drm/drm_managed.h>
+#include <drm/gpu_scheduler.h>
+#include <drm/panthor_drm.h>
+
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/io-pgtable.h>
+#include <linux/iommu.h>
+#include <linux/kmemleak.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/shmem_fs.h>
+#include <linux/sizes.h>
+
+#include "panthor_device.h"
+#include "panthor_gem.h"
+#include "panthor_heap.h"
+#include "panthor_mmu.h"
+#include "panthor_regs.h"
+#include "panthor_sched.h"
+
+#define MAX_AS_SLOTS 32
+
+struct panthor_vm;
+
+/**
+ * struct panthor_as_slot - Address space slot
+ */
+struct panthor_as_slot {
+ /** @vm: VM bound to this slot. NULL is no VM is bound. */
+ struct panthor_vm *vm;
+};
+
+/**
+ * struct panthor_mmu - MMU related data
+ */
+struct panthor_mmu {
+ /** @irq: The MMU irq. */
+ struct panthor_irq irq;
+
+ /** @as: Address space related fields.
+ *
+ * The GPU has a limited number of address spaces (AS) slots, forcing
+ * us to re-assign them to re-assign slots on-demand.
+ */
+ struct {
+ /** @slots_lock: Lock protecting access to all other AS fields. */
+ struct mutex slots_lock;
+
+ /** @alloc_mask: Bitmask encoding the allocated slots. */
+ unsigned long alloc_mask;
+
+ /** @faulty_mask: Bitmask encoding the faulty slots. */
+ unsigned long faulty_mask;
+
+ /** @slots: VMs currently bound to the AS slots. */
+ struct panthor_as_slot slots[MAX_AS_SLOTS];
+
+ /**
+ * @lru_list: List of least recently used VMs.
+ *
+ * We use this list to pick a VM to evict when all slots are
+ * used.
+ *
+ * There should be no more active VMs than there are AS slots,
+ * so this LRU is just here to keep VMs bound until there's
+ * a need to release a slot, thus avoid unnecessary TLB/cache
+ * flushes.
+ */
+ struct list_head lru_list;
+ } as;
+
+ /** @vm: VMs management fields */
+ struct {
+ /** @lock: Lock protecting access to list. */
+ struct mutex lock;
+
+ /** @list: List containing all VMs. */
+ struct list_head list;
+
+ /** @reset_in_progress: True if a reset is in progress. */
+ bool reset_in_progress;
+
+ /** @wq: Workqueue used for the VM_BIND queues. */
+ struct workqueue_struct *wq;
+ } vm;
+};
+
+/**
+ * struct panthor_vm_pool - VM pool object
+ */
+struct panthor_vm_pool {
+ /** @xa: Array used for VM handle tracking. */
+ struct xarray xa;
+};
+
+/**
+ * struct panthor_vma - GPU mapping object
+ *
+ * This is used to track GEM mappings in GPU space.
+ */
+struct panthor_vma {
+ /** @base: Inherits from drm_gpuva. */
+ struct drm_gpuva base;
+
+ /** @node: Used to implement deferred release of VMAs. */
+ struct list_head node;
+
+ /**
+ * @flags: Combination of drm_panthor_vm_bind_op_flags.
+ *
+ * Only map related flags are accepted.
+ */
+ u32 flags;
+};
+
+/**
+ * struct panthor_vm_op_ctx - VM operation context
+ *
+ * With VM operations potentially taking place in a dma-signaling path, we
+ * need to make sure everything that might require resource allocation is
+ * pre-allocated upfront. This is what this operation context is far.
+ *
+ * We also collect resources that have been freed, so we can release them
+ * asynchronously, and let the VM_BIND scheduler process the next VM_BIND
+ * request.
+ */
+struct panthor_vm_op_ctx {
+ /** @rsvd_page_tables: Pages reserved for the MMU page table update. */
+ struct {
+ /** @count: Number of pages reserved. */
+ u32 count;
+
+ /** @ptr: Point to the first unused page in the @pages table. */
+ u32 ptr;
+
+ /**
+ * @page: Array of pages that can be used for an MMU page table update.
+ *
+ * After an VM operation, there might be free pages left in this array.
+ * They should be returned to the pt_cache as part of the op_ctx cleanup.
+ */
+ void **pages;
+ } rsvd_page_tables;
+
+ /**
+ * @preallocated_vmas: Pre-allocated VMAs to handle the remap case.
+ *
+ * Partial unmap requests or map requests overlapping existing mappings will
+ * trigger a remap call, which need to register up to three panthor_vma objects
+ * (one for the new mapping, and two for the previous and next mappings).
+ */
+ struct panthor_vma *preallocated_vmas[3];
+
+ /** @flags: Combination of drm_panthor_vm_bind_op_flags. */
+ u32 flags;
+
+ /** @va: Virtual range targeted by the VM operation. */
+ struct {
+ /** @addr: Start address. */
+ u64 addr;
+
+ /** @range: Range size. */
+ u64 range;
+ } va;
+
+ /**
+ * @returned_vmas: List of panthor_vma objects returned after a VM operation.
+ *
+ * For unmap operations, this will contain all VMAs that were covered by the
+ * specified VA range.
+ *
+ * For map operations, this will contain all VMAs that previously mapped to
+ * the specified VA range.
+ *
+ * Those VMAs, and the resources they point to will be released as part of
+ * the op_ctx cleanup operation.
+ */
+ struct list_head returned_vmas;
+
+ /** @map: Fields specific to a map operation. */
+ struct {
+ /** @vm_bo: Buffer object to map. */
+ struct drm_gpuvm_bo *vm_bo;
+
+ /** @bo_offset: Offset in the buffer object. */
+ u64 bo_offset;
+
+ /**
+ * @sgt: sg-table pointing to pages backing the GEM object.
+ *
+ * This is gathered at job creation time, such that we don't have
+ * to allocate in ::run_job().
+ */
+ struct sg_table *sgt;
+
+ /**
+ * @new_vma: The new VMA object that will be inserted to the VA tree.
+ */
+ struct panthor_vma *new_vma;
+ } map;
+};
+
+/**
+ * struct panthor_vm - VM object
+ *
+ * A VM is an object representing a GPU (or MCU) virtual address space.
+ * It embeds the MMU page table for this address space, a tree containing
+ * all the virtual mappings of GEM objects, and other things needed to manage
+ * the VM.
+ *
+ * Except for the MCU VM, which is managed by the kernel, all other VMs are
+ * created by userspace and mostly managed by userspace, using the
+ * %DRM_IOCTL_PANTHOR_VM_BIND ioctl.
+ *
+ * A portion of the virtual address space is reserved for kernel objects,
+ * like heap chunks, and userspace gets to decide how much of the virtual
+ * address space is left to the kernel (half of the virtual address space
+ * by default).
+ */
+struct panthor_vm {
+ /**
+ * @base: Inherit from drm_gpuvm.
+ *
+ * We delegate all the VA management to the common drm_gpuvm framework
+ * and only implement hooks to update the MMU page table.
+ */
+ struct drm_gpuvm base;
+
+ /**
+ * @sched: Scheduler used for asynchronous VM_BIND request.
+ *
+ * We use a 1:1 scheduler here.
+ */
+ struct drm_gpu_scheduler sched;
+
+ /**
+ * @entity: Scheduling entity representing the VM_BIND queue.
+ *
+ * There's currently one bind queue per VM. It doesn't make sense to
+ * allow more given the VM operations are serialized anyway.
+ */
+ struct drm_sched_entity entity;
+
+ /** @ptdev: Device. */
+ struct panthor_device *ptdev;
+
+ /** @memattr: Value to program to the AS_MEMATTR register. */
+ u64 memattr;
+
+ /** @pgtbl_ops: Page table operations. */
+ struct io_pgtable_ops *pgtbl_ops;
+
+ /** @root_page_table: Stores the root page table pointer. */
+ void *root_page_table;
+
+ /**
+ * @op_lock: Lock used to serialize operations on a VM.
+ *
+ * The serialization of jobs queued to the VM_BIND queue is already
+ * taken care of by drm_sched, but we need to serialize synchronous
+ * and asynchronous VM_BIND request. This is what this lock is for.
+ */
+ struct mutex op_lock;
+
+ /**
+ * @op_ctx: The context attached to the currently executing VM operation.
+ *
+ * NULL when no operation is in progress.
+ */
+ struct panthor_vm_op_ctx *op_ctx;
+
+ /**
+ * @mm: Memory management object representing the auto-VA/kernel-VA.
+ *
+ * Used to auto-allocate VA space for kernel-managed objects (tiler
+ * heaps, ...).
+ *
+ * For the MCU VM, this is managing the VA range that's used to map
+ * all shared interfaces.
+ *
+ * For user VMs, the range is specified by userspace, and must not
+ * exceed half of the VA space addressable.
+ */
+ struct drm_mm mm;
+
+ /** @mm_lock: Lock protecting the @mm field. */
+ struct mutex mm_lock;
+
+ /** @kernel_auto_va: Automatic VA-range for kernel BOs. */
+ struct {
+ /** @start: Start of the automatic VA-range for kernel BOs. */
+ u64 start;
+
+ /** @size: Size of the automatic VA-range for kernel BOs. */
+ u64 end;
+ } kernel_auto_va;
+
+ /** @as: Address space related fields. */
+ struct {
+ /**
+ * @id: ID of the address space this VM is bound to.
+ *
+ * A value of -1 means the VM is inactive/not bound.
+ */
+ int id;
+
+ /** @active_cnt: Number of active users of this VM. */
+ refcount_t active_cnt;
+
+ /**
+ * @lru_node: Used to instead the VM in the panthor_mmu::as::lru_list.
+ *
+ * Active VMs should not be inserted in the LRU list.
+ */
+ struct list_head lru_node;
+ } as;
+
+ /**
+ * @heaps: Tiler heap related fields.
+ */
+ struct {
+ /**
+ * @pool: The heap pool attached to this VM.
+ *
+ * Will stay NULL until someone creates a heap context on this VM.
+ */
+ struct panthor_heap_pool *pool;
+
+ /** @lock: Lock used to protect access to @pool. */
+ struct mutex lock;
+ } heaps;
+
+ /** @node: Used to insert the VM in the panthor_mmu::vm::list. */
+ struct list_head node;
+
+ /** @for_mcu: True if this is the MCU VM. */
+ bool for_mcu;
+
+ /**
+ * @destroyed: True if the VM was destroyed.
+ *
+ * No further bind requests should be queued to a destroyed VM.
+ */
+ bool destroyed;
+
+ /**
+ * @unusable: True if the VM has turned unusable because something
+ * bad happened during an asynchronous request.
+ *
+ * We don't try to recover from such failures, because this implies
+ * informing userspace about the specific operation that failed, and
+ * hoping the userspace driver can replay things from there. This all
+ * sounds very complicated for little gain.
+ *
+ * Instead, we should just flag the VM as unusable, and fail any
+ * further request targeting this VM.
+ *
+ * We also provide a way to query a VM state, so userspace can destroy
+ * it and create a new one.
+ *
+ * As an analogy, this would be mapped to a VK_ERROR_DEVICE_LOST
+ * situation, where the logical device needs to be re-created.
+ */
+ bool unusable;
+
+ /**
+ * @unhandled_fault: Unhandled fault happened.
+ *
+ * This should be reported to the scheduler, and the queue/group be
+ * flagged as faulty as a result.
+ */
+ bool unhandled_fault;
+};
+
+/**
+ * struct panthor_vm_bind_job - VM bind job
+ */
+struct panthor_vm_bind_job {
+ /** @base: Inherit from drm_sched_job. */
+ struct drm_sched_job base;
+
+ /** @refcount: Reference count. */
+ struct kref refcount;
+
+ /** @cleanup_op_ctx_work: Work used to cleanup the VM operation context. */
+ struct work_struct cleanup_op_ctx_work;
+
+ /** @vm: VM targeted by the VM operation. */
+ struct panthor_vm *vm;
+
+ /** @ctx: Operation context. */
+ struct panthor_vm_op_ctx ctx;
+};
+
+/**
+ * @pt_cache: Cache used to allocate MMU page tables.
+ *
+ * The pre-allocation pattern forces us to over-allocate to plan for
+ * the worst case scenario, and return the pages we didn't use.
+ *
+ * Having a kmem_cache allows us to speed allocations.
+ */
+static struct kmem_cache *pt_cache;
+
+/**
+ * alloc_pt() - Custom page table allocator
+ * @cookie: Cookie passed at page table allocation time.
+ * @size: Size of the page table. This size should be fixed,
+ * and determined at creation time based on the granule size.
+ * @gfp: GFP flags.
+ *
+ * We want a custom allocator so we can use a cache for page table
+ * allocations and amortize the cost of the over-reservation that's
+ * done to allow asynchronous VM operations.
+ *
+ * Return: non-NULL on success, NULL if the allocation failed for any
+ * reason.
+ */
+static void *alloc_pt(void *cookie, size_t size, gfp_t gfp)
+{
+ struct panthor_vm *vm = cookie;
+ void *page;
+
+ /* Allocation of the root page table happening during init. */
+ if (unlikely(!vm->root_page_table)) {
+ struct page *p;
+
+ drm_WARN_ON(&vm->ptdev->base, vm->op_ctx);
+ p = alloc_pages_node(dev_to_node(vm->ptdev->base.dev),
+ gfp | __GFP_ZERO, get_order(size));
+ page = p ? page_address(p) : NULL;
+ vm->root_page_table = page;
+ return page;
+ }
+
+ /* We're not supposed to have anything bigger than 4k here, because we picked a
+ * 4k granule size at init time.
+ */
+ if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K))
+ return NULL;
+
+ /* We must have some op_ctx attached to the VM and it must have at least one
+ * free page.
+ */
+ if (drm_WARN_ON(&vm->ptdev->base, !vm->op_ctx) ||
+ drm_WARN_ON(&vm->ptdev->base,
+ vm->op_ctx->rsvd_page_tables.ptr >= vm->op_ctx->rsvd_page_tables.count))
+ return NULL;
+
+ page = vm->op_ctx->rsvd_page_tables.pages[vm->op_ctx->rsvd_page_tables.ptr++];
+ memset(page, 0, SZ_4K);
+
+ /* Page table entries don't use virtual addresses, which trips out
+ * kmemleak. kmemleak_alloc_phys() might work, but physical addresses
+ * are mixed with other fields, and I fear kmemleak won't detect that
+ * either.
+ *
+ * Let's just ignore memory passed to the page-table driver for now.
+ */
+ kmemleak_ignore(page);
+ return page;
+}
+
+/**
+ * @free_pt() - Custom page table free function
+ * @cookie: Cookie passed at page table allocation time.
+ * @data: Page table to free.
+ * @size: Size of the page table. This size should be fixed,
+ * and determined at creation time based on the granule size.
+ */
+static void free_pt(void *cookie, void *data, size_t size)
+{
+ struct panthor_vm *vm = cookie;
+
+ if (unlikely(vm->root_page_table == data)) {
+ free_pages((unsigned long)data, get_order(size));
+ vm->root_page_table = NULL;
+ return;
+ }
+
+ if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K))
+ return;
+
+ /* Return the page to the pt_cache. */
+ kmem_cache_free(pt_cache, data);
+}
+
+static int wait_ready(struct panthor_device *ptdev, u32 as_nr)
+{
+ int ret;
+ u32 val;
+
+ /* Wait for the MMU status to indicate there is no active command, in
+ * case one is pending.
+ */
+ ret = readl_relaxed_poll_timeout_atomic(ptdev->iomem + AS_STATUS(as_nr),
+ val, !(val & AS_STATUS_AS_ACTIVE),
+ 10, 100000);
+
+ if (ret) {
+ panthor_device_schedule_reset(ptdev);
+ drm_err(&ptdev->base, "AS_ACTIVE bit stuck\n");
+ }
+
+ return ret;
+}
+
+static int write_cmd(struct panthor_device *ptdev, u32 as_nr, u32 cmd)
+{
+ int status;
+
+ /* write AS_COMMAND when MMU is ready to accept another command */
+ status = wait_ready(ptdev, as_nr);
+ if (!status)
+ gpu_write(ptdev, AS_COMMAND(as_nr), cmd);
+
+ return status;
+}
+
+static void lock_region(struct panthor_device *ptdev, u32 as_nr,
+ u64 region_start, u64 size)
+{
+ u8 region_width;
+ u64 region;
+ u64 region_end = region_start + size;
+
+ if (!size)
+ return;
+
+ /*
+ * The locked region is a naturally aligned power of 2 block encoded as
+ * log2 minus(1).
+ * Calculate the desired start/end and look for the highest bit which
+ * differs. The smallest naturally aligned block must include this bit
+ * change, the desired region starts with this bit (and subsequent bits)
+ * zeroed and ends with the bit (and subsequent bits) set to one.
+ */
+ region_width = max(fls64(region_start ^ (region_end - 1)),
+ const_ilog2(AS_LOCK_REGION_MIN_SIZE)) - 1;
+
+ /*
+ * Mask off the low bits of region_start (which would be ignored by
+ * the hardware anyway)
+ */
+ region_start &= GENMASK_ULL(63, region_width);
+
+ region = region_width | region_start;
+
+ /* Lock the region that needs to be updated */
+ gpu_write(ptdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region));
+ gpu_write(ptdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region));
+ write_cmd(ptdev, as_nr, AS_COMMAND_LOCK);
+}
+
+static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr,
+ u64 iova, u64 size, u32 op)
+{
+ lockdep_assert_held(&ptdev->mmu->as.slots_lock);
+
+ if (as_nr < 0)
+ return 0;
+
+ if (op != AS_COMMAND_UNLOCK)
+ lock_region(ptdev, as_nr, iova, size);
+
+ /* Run the MMU operation */
+ write_cmd(ptdev, as_nr, op);
+
+ /* Wait for the flush to complete */
+ return wait_ready(ptdev, as_nr);
+}
+
+static int mmu_hw_do_operation(struct panthor_vm *vm,
+ u64 iova, u64 size, u32 op)
+{
+ struct panthor_device *ptdev = vm->ptdev;
+ int ret;
+
+ mutex_lock(&ptdev->mmu->as.slots_lock);
+ ret = mmu_hw_do_operation_locked(ptdev, vm->as.id, iova, size, op);
+ mutex_unlock(&ptdev->mmu->as.slots_lock);
+
+ return ret;
+}
+
+static int panthor_mmu_as_enable(struct panthor_device *ptdev, u32 as_nr,
+ u64 transtab, u64 transcfg, u64 memattr)
+{
+ int ret;
+
+ ret = mmu_hw_do_operation_locked(ptdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
+ if (ret)
+ return ret;
+
+ gpu_write(ptdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab));
+ gpu_write(ptdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab));
+
+ gpu_write(ptdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
+ gpu_write(ptdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
+
+ gpu_write(ptdev, AS_TRANSCFG_LO(as_nr), lower_32_bits(transcfg));
+ gpu_write(ptdev, AS_TRANSCFG_HI(as_nr), upper_32_bits(transcfg));
+
+ return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE);
+}
+
+static int panthor_mmu_as_disable(struct panthor_device *ptdev, u32 as_nr)
+{
+ int ret;
+
+ ret = mmu_hw_do_operation_locked(ptdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
+ if (ret)
+ return ret;
+
+ gpu_write(ptdev, AS_TRANSTAB_LO(as_nr), 0);
+ gpu_write(ptdev, AS_TRANSTAB_HI(as_nr), 0);
+
+ gpu_write(ptdev, AS_MEMATTR_LO(as_nr), 0);
+ gpu_write(ptdev, AS_MEMATTR_HI(as_nr), 0);
+
+ gpu_write(ptdev, AS_TRANSCFG_LO(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED);
+ gpu_write(ptdev, AS_TRANSCFG_HI(as_nr), 0);
+
+ return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE);
+}
+
+static u32 panthor_mmu_fault_mask(struct panthor_device *ptdev, u32 value)
+{
+ /* Bits 16 to 31 mean REQ_COMPLETE. */
+ return value & GENMASK(15, 0);
+}
+
+static u32 panthor_mmu_as_fault_mask(struct panthor_device *ptdev, u32 as)
+{
+ return BIT(as);
+}
+
+/**
+ * panthor_vm_has_unhandled_faults() - Check if a VM has unhandled faults
+ * @vm: VM to check.
+ *
+ * Return: true if the VM has unhandled faults, false otherwise.
+ */
+bool panthor_vm_has_unhandled_faults(struct panthor_vm *vm)
+{
+ return vm->unhandled_fault;
+}
+
+/**
+ * panthor_vm_is_unusable() - Check if the VM is still usable
+ * @vm: VM to check.
+ *
+ * Return: true if the VM is unusable, false otherwise.
+ */
+bool panthor_vm_is_unusable(struct panthor_vm *vm)
+{
+ return vm->unusable;
+}
+
+static void panthor_vm_release_as_locked(struct panthor_vm *vm)
+{
+ struct panthor_device *ptdev = vm->ptdev;
+
+ lockdep_assert_held(&ptdev->mmu->as.slots_lock);
+
+ if (drm_WARN_ON(&ptdev->base, vm->as.id < 0))
+ return;
+
+ ptdev->mmu->as.slots[vm->as.id].vm = NULL;
+ clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask);
+ refcount_set(&vm->as.active_cnt, 0);
+ list_del_init(&vm->as.lru_node);
+ vm->as.id = -1;
+}
+
+/**
+ * panthor_vm_active() - Flag a VM as active
+ * @VM: VM to flag as active.
+ *
+ * Assigns an address space to a VM so it can be used by the GPU/MCU.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_vm_active(struct panthor_vm *vm)
+{
+ struct panthor_device *ptdev = vm->ptdev;
+ u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
+ struct io_pgtable_cfg *cfg = &io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg;
+ int ret = 0, as, cookie;
+ u64 transtab, transcfg;
+
+ if (!drm_dev_enter(&ptdev->base, &cookie))
+ return -ENODEV;
+
+ if (refcount_inc_not_zero(&vm->as.active_cnt))
+ goto out_dev_exit;
+
+ mutex_lock(&ptdev->mmu->as.slots_lock);
+
+ if (refcount_inc_not_zero(&vm->as.active_cnt))
+ goto out_unlock;
+
+ as = vm->as.id;
+ if (as >= 0) {
+ /* Unhandled pagefault on this AS, the MMU was disabled. We need to
+ * re-enable the MMU after clearing+unmasking the AS interrupts.
+ */
+ if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as))
+ goto out_enable_as;
+
+ goto out_make_active;
+ }
+
+ /* Check for a free AS */
+ if (vm->for_mcu) {
+ drm_WARN_ON(&ptdev->base, ptdev->mmu->as.alloc_mask & BIT(0));
+ as = 0;
+ } else {
+ as = ffz(ptdev->mmu->as.alloc_mask | BIT(0));
+ }
+
+ if (!(BIT(as) & ptdev->gpu_info.as_present)) {
+ struct panthor_vm *lru_vm;
+
+ lru_vm = list_first_entry_or_null(&ptdev->mmu->as.lru_list,
+ struct panthor_vm,
+ as.lru_node);
+ if (drm_WARN_ON(&ptdev->base, !lru_vm)) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ drm_WARN_ON(&ptdev->base, refcount_read(&lru_vm->as.active_cnt));
+ as = lru_vm->as.id;
+ panthor_vm_release_as_locked(lru_vm);
+ }
+
+ /* Assign the free or reclaimed AS to the FD */
+ vm->as.id = as;
+ set_bit(as, &ptdev->mmu->as.alloc_mask);
+ ptdev->mmu->as.slots[as].vm = vm;
+
+out_enable_as:
+ transtab = cfg->arm_lpae_s1_cfg.ttbr;
+ transcfg = AS_TRANSCFG_PTW_MEMATTR_WB |
+ AS_TRANSCFG_PTW_RA |
+ AS_TRANSCFG_ADRMODE_AARCH64_4K |
+ AS_TRANSCFG_INA_BITS(55 - va_bits);
+ if (ptdev->coherent)
+ transcfg |= AS_TRANSCFG_PTW_SH_OS;
+
+ /* If the VM is re-activated, we clear the fault. */
+ vm->unhandled_fault = false;
+
+ /* Unhandled pagefault on this AS, clear the fault and re-enable interrupts
+ * before enabling the AS.
+ */
+ if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as)) {
+ gpu_write(ptdev, MMU_INT_CLEAR, panthor_mmu_as_fault_mask(ptdev, as));
+ ptdev->mmu->as.faulty_mask &= ~panthor_mmu_as_fault_mask(ptdev, as);
+ gpu_write(ptdev, MMU_INT_MASK, ~ptdev->mmu->as.faulty_mask);
+ }
+
+ ret = panthor_mmu_as_enable(vm->ptdev, vm->as.id, transtab, transcfg, vm->memattr);
+
+out_make_active:
+ if (!ret) {
+ refcount_set(&vm->as.active_cnt, 1);
+ list_del_init(&vm->as.lru_node);
+ }
+
+out_unlock:
+ mutex_unlock(&ptdev->mmu->as.slots_lock);
+
+out_dev_exit:
+ drm_dev_exit(cookie);
+ return ret;
+}
+
+/**
+ * panthor_vm_idle() - Flag a VM idle
+ * @VM: VM to flag as idle.
+ *
+ * When we know the GPU is done with the VM (no more jobs to process),
+ * we can relinquish the AS slot attached to this VM, if any.
+ *
+ * We don't release the slot immediately, but instead place the VM in
+ * the LRU list, so it can be evicted if another VM needs an AS slot.
+ * This way, VMs keep attached to the AS they were given until we run
+ * out of free slot, limiting the number of MMU operations (TLB flush
+ * and other AS updates).
+ */
+void panthor_vm_idle(struct panthor_vm *vm)
+{
+ struct panthor_device *ptdev = vm->ptdev;
+
+ if (!refcount_dec_and_mutex_lock(&vm->as.active_cnt, &ptdev->mmu->as.slots_lock))
+ return;
+
+ if (!drm_WARN_ON(&ptdev->base, vm->as.id == -1 || !list_empty(&vm->as.lru_node)))
+ list_add_tail(&vm->as.lru_node, &ptdev->mmu->as.lru_list);
+
+ refcount_set(&vm->as.active_cnt, 0);
+ mutex_unlock(&ptdev->mmu->as.slots_lock);
+}
+
+static void panthor_vm_stop(struct panthor_vm *vm)
+{
+ drm_sched_stop(&vm->sched, NULL);
+}
+
+static void panthor_vm_start(struct panthor_vm *vm)
+{
+ drm_sched_start(&vm->sched, true);
+}
+
+/**
+ * panthor_vm_as() - Get the AS slot attached to a VM
+ * @vm: VM to get the AS slot of.
+ *
+ * Return: -1 if the VM is not assigned an AS slot yet, >= 0 otherwise.
+ */
+int panthor_vm_as(struct panthor_vm *vm)
+{
+ return vm->as.id;
+}
+
+static size_t get_pgsize(u64 addr, size_t size, size_t *count)
+{
+ /*
+ * io-pgtable only operates on multiple pages within a single table
+ * entry, so we need to split at boundaries of the table size, i.e.
+ * the next block size up. The distance from address A to the next
+ * boundary of block size B is logically B - A % B, but in unsigned
+ * two's complement where B is a power of two we get the equivalence
+ * B - A % B == (B - A) % B == (n * B - A) % B, and choose n = 0 :)
+ */
+ size_t blk_offset = -addr % SZ_2M;
+
+ if (blk_offset || size < SZ_2M) {
+ *count = min_not_zero(blk_offset, size) / SZ_4K;
+ return SZ_4K;
+ }
+ blk_offset = -addr % SZ_1G ?: SZ_1G;
+ *count = min(blk_offset, size) / SZ_2M;
+ return SZ_2M;
+}
+
+static int panthor_vm_flush_range(struct panthor_vm *vm, u64 iova, u64 size)
+{
+ struct panthor_device *ptdev = vm->ptdev;
+ int ret = 0, cookie;
+
+ if (vm->as.id < 0)
+ return 0;
+
+ /* If the device is unplugged, we just silently skip the flush. */
+ if (!drm_dev_enter(&ptdev->base, &cookie))
+ return 0;
+
+ /* Flush the PTs only if we're already awake */
+ if (pm_runtime_active(ptdev->base.dev))
+ ret = mmu_hw_do_operation(vm, iova, size, AS_COMMAND_FLUSH_PT);
+
+ drm_dev_exit(cookie);
+ return ret;
+}
+
+static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size)
+{
+ struct panthor_device *ptdev = vm->ptdev;
+ struct io_pgtable_ops *ops = vm->pgtbl_ops;
+ u64 offset = 0;
+
+ drm_dbg(&ptdev->base, "unmap: as=%d, iova=%llx, len=%llx", vm->as.id, iova, size);
+
+ while (offset < size) {
+ size_t unmapped_sz = 0, pgcount;
+ size_t pgsize = get_pgsize(iova + offset, size - offset, &pgcount);
+
+ unmapped_sz = ops->unmap_pages(ops, iova + offset, pgsize, pgcount, NULL);
+
+ if (drm_WARN_ON(&ptdev->base, unmapped_sz != pgsize * pgcount)) {
+ drm_err(&ptdev->base, "failed to unmap range %llx-%llx (requested range %llx-%llx)\n",
+ iova + offset + unmapped_sz,
+ iova + offset + pgsize * pgcount,
+ iova, iova + size);
+ panthor_vm_flush_range(vm, iova, offset + unmapped_sz);
+ return -EINVAL;
+ }
+ offset += unmapped_sz;
+ }
+
+ return panthor_vm_flush_range(vm, iova, size);
+}
+
+static int
+panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot,
+ struct sg_table *sgt, u64 offset, u64 size)
+{
+ struct panthor_device *ptdev = vm->ptdev;
+ unsigned int count;
+ struct scatterlist *sgl;
+ struct io_pgtable_ops *ops = vm->pgtbl_ops;
+ u64 start_iova = iova;
+ int ret;
+
+ if (!size)
+ return 0;
+
+ for_each_sgtable_dma_sg(sgt, sgl, count) {
+ dma_addr_t paddr = sg_dma_address(sgl);
+ size_t len = sg_dma_len(sgl);
+
+ if (len <= offset) {
+ offset -= len;
+ continue;
+ }
+
+ paddr += offset;
+ len -= offset;
+ len = min_t(size_t, len, size);
+ size -= len;
+
+ drm_dbg(&ptdev->base, "map: as=%d, iova=%llx, paddr=%pad, len=%zx",
+ vm->as.id, iova, &paddr, len);
+
+ while (len) {
+ size_t pgcount, mapped = 0;
+ size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
+
+ ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
+ GFP_KERNEL, &mapped);
+ iova += mapped;
+ paddr += mapped;
+ len -= mapped;
+
+ if (drm_WARN_ON(&ptdev->base, !ret && !mapped))
+ ret = -ENOMEM;
+
+ if (ret) {
+ /* If something failed, unmap what we've already mapped before
+ * returning. The unmap call is not supposed to fail.
+ */
+ drm_WARN_ON(&ptdev->base,
+ panthor_vm_unmap_pages(vm, start_iova,
+ iova - start_iova));
+ return ret;
+ }
+ }
+
+ if (!size)
+ break;
+ }
+
+ return panthor_vm_flush_range(vm, start_iova, iova - start_iova);
+}
+
+static int flags_to_prot(u32 flags)
+{
+ int prot = 0;
+
+ if (flags & DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC)
+ prot |= IOMMU_NOEXEC;
+
+ if (!(flags & DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED))
+ prot |= IOMMU_CACHE;
+
+ if (flags & DRM_PANTHOR_VM_BIND_OP_MAP_READONLY)
+ prot |= IOMMU_READ;
+ else
+ prot |= IOMMU_READ | IOMMU_WRITE;
+
+ return prot;
+}
+
+/**
+ * panthor_vm_alloc_va() - Allocate a region in the auto-va space
+ * @VM: VM to allocate a region on.
+ * @va: start of the VA range. Can be PANTHOR_VM_KERNEL_AUTO_VA if the user
+ * wants the VA to be automatically allocated from the auto-VA range.
+ * @size: size of the VA range.
+ * @va_node: drm_mm_node to initialize. Must be zero-initialized.
+ *
+ * Some GPU objects, like heap chunks, are fully managed by the kernel and
+ * need to be mapped to the userspace VM, in the region reserved for kernel
+ * objects.
+ *
+ * This function takes care of allocating a region in the kernel auto-VA space.
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+int
+panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size,
+ struct drm_mm_node *va_node)
+{
+ int ret;
+
+ if (!size || (size & ~PAGE_MASK))
+ return -EINVAL;
+
+ if (va != PANTHOR_VM_KERNEL_AUTO_VA && (va & ~PAGE_MASK))
+ return -EINVAL;
+
+ mutex_lock(&vm->mm_lock);
+ if (va != PANTHOR_VM_KERNEL_AUTO_VA) {
+ va_node->start = va;
+ va_node->size = size;
+ ret = drm_mm_reserve_node(&vm->mm, va_node);
+ } else {
+ ret = drm_mm_insert_node_in_range(&vm->mm, va_node, size,
+ size >= SZ_2M ? SZ_2M : SZ_4K,
+ 0, vm->kernel_auto_va.start,
+ vm->kernel_auto_va.end,
+ DRM_MM_INSERT_BEST);
+ }
+ mutex_unlock(&vm->mm_lock);
+
+ return ret;
+}
+
+/**
+ * panthor_vm_free_va() - Free a region allocated with panthor_vm_alloc_va()
+ * @VM: VM to free the region on.
+ * @va_node: Memory node representing the region to free.
+ */
+void panthor_vm_free_va(struct panthor_vm *vm, struct drm_mm_node *va_node)
+{
+ mutex_lock(&vm->mm_lock);
+ drm_mm_remove_node(va_node);
+ mutex_unlock(&vm->mm_lock);
+}
+
+static void panthor_vm_bo_put(struct drm_gpuvm_bo *vm_bo)
+{
+ struct panthor_gem_object *bo = to_panthor_bo(vm_bo->obj);
+ struct drm_gpuvm *vm = vm_bo->vm;
+ bool unpin;
+
+ /* We must retain the GEM before calling drm_gpuvm_bo_put(),
+ * otherwise the mutex might be destroyed while we hold it.
+ * Same goes for the VM, since we take the VM resv lock.
+ */
+ drm_gem_object_get(&bo->base.base);
+ drm_gpuvm_get(vm);
+
+ /* We take the resv lock to protect against concurrent accesses to the
+ * gpuvm evicted/extobj lists that are modified in
+ * drm_gpuvm_bo_destroy(), which is called if drm_gpuvm_bo_put()
+ * releases sthe last vm_bo reference.
+ * We take the BO GPUVA list lock to protect the vm_bo removal from the
+ * GEM vm_bo list.
+ */
+ dma_resv_lock(drm_gpuvm_resv(vm), NULL);
+ mutex_lock(&bo->gpuva_list_lock);
+ unpin = drm_gpuvm_bo_put(vm_bo);
+ mutex_unlock(&bo->gpuva_list_lock);
+ dma_resv_unlock(drm_gpuvm_resv(vm));
+
+ /* If the vm_bo object was destroyed, release the pin reference that
+ * was hold by this object.
+ */
+ if (unpin && !bo->base.base.import_attach)
+ drm_gem_shmem_unpin(&bo->base);
+
+ drm_gpuvm_put(vm);
+ drm_gem_object_put(&bo->base.base);
+}
+
+static void panthor_vm_cleanup_op_ctx(struct panthor_vm_op_ctx *op_ctx,
+ struct panthor_vm *vm)
+{
+ struct panthor_vma *vma, *tmp_vma;
+
+ u32 remaining_pt_count = op_ctx->rsvd_page_tables.count -
+ op_ctx->rsvd_page_tables.ptr;
+
+ if (remaining_pt_count) {
+ kmem_cache_free_bulk(pt_cache, remaining_pt_count,
+ op_ctx->rsvd_page_tables.pages +
+ op_ctx->rsvd_page_tables.ptr);
+ }
+
+ kfree(op_ctx->rsvd_page_tables.pages);
+
+ if (op_ctx->map.vm_bo)
+ panthor_vm_bo_put(op_ctx->map.vm_bo);
+
+ for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++)
+ kfree(op_ctx->preallocated_vmas[i]);
+
+ list_for_each_entry_safe(vma, tmp_vma, &op_ctx->returned_vmas, node) {
+ list_del(&vma->node);
+ panthor_vm_bo_put(vma->base.vm_bo);
+ kfree(vma);
+ }
+}
+
+static struct panthor_vma *
+panthor_vm_op_ctx_get_vma(struct panthor_vm_op_ctx *op_ctx)
+{
+ for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++) {
+ struct panthor_vma *vma = op_ctx->preallocated_vmas[i];
+
+ if (vma) {
+ op_ctx->preallocated_vmas[i] = NULL;
+ return vma;
+ }
+ }
+
+ return NULL;
+}
+
+static int
+panthor_vm_op_ctx_prealloc_vmas(struct panthor_vm_op_ctx *op_ctx)
+{
+ u32 vma_count;
+
+ switch (op_ctx->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {
+ case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
+ /* One VMA for the new mapping, and two more VMAs for the remap case
+ * which might contain both a prev and next VA.
+ */
+ vma_count = 3;
+ break;
+
+ case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
+ /* Partial unmaps might trigger a remap with either a prev or a next VA,
+ * but not both.
+ */
+ vma_count = 1;
+ break;
+
+ default:
+ return 0;
+ }
+
+ for (u32 i = 0; i < vma_count; i++) {
+ struct panthor_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+
+ if (!vma)
+ return -ENOMEM;
+
+ op_ctx->preallocated_vmas[i] = vma;
+ }
+
+ return 0;
+}
+
+#define PANTHOR_VM_BIND_OP_MAP_FLAGS \
+ (DRM_PANTHOR_VM_BIND_OP_MAP_READONLY | \
+ DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | \
+ DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED | \
+ DRM_PANTHOR_VM_BIND_OP_TYPE_MASK)
+
+static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
+ struct panthor_vm *vm,
+ struct panthor_gem_object *bo,
+ u64 offset,
+ u64 size, u64 va,
+ u32 flags)
+{
+ struct drm_gpuvm_bo *preallocated_vm_bo;
+ struct sg_table *sgt = NULL;
+ u64 pt_count;
+ int ret;
+
+ if (!bo)
+ return -EINVAL;
+
+ if ((flags & ~PANTHOR_VM_BIND_OP_MAP_FLAGS) ||
+ (flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) != DRM_PANTHOR_VM_BIND_OP_TYPE_MAP)
+ return -EINVAL;
+
+ /* Make sure the VA and size are aligned and in-bounds. */
+ if (size > bo->base.base.size || offset > bo->base.base.size - size)
+ return -EINVAL;
+
+ /* If the BO has an exclusive VM attached, it can't be mapped to other VMs. */
+ if (bo->exclusive_vm_root_gem &&
+ bo->exclusive_vm_root_gem != panthor_vm_root_gem(vm))
+ return -EINVAL;
+
+ memset(op_ctx, 0, sizeof(*op_ctx));
+ INIT_LIST_HEAD(&op_ctx->returned_vmas);
+ op_ctx->flags = flags;
+ op_ctx->va.range = size;
+ op_ctx->va.addr = va;
+
+ ret = panthor_vm_op_ctx_prealloc_vmas(op_ctx);
+ if (ret)
+ goto err_cleanup;
+
+ if (!bo->base.base.import_attach) {
+ /* Pre-reserve the BO pages, so the map operation doesn't have to
+ * allocate.
+ */
+ ret = drm_gem_shmem_pin(&bo->base);
+ if (ret)
+ goto err_cleanup;
+ }
+
+ sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
+ if (IS_ERR(sgt)) {
+ if (!bo->base.base.import_attach)
+ drm_gem_shmem_unpin(&bo->base);
+
+ ret = PTR_ERR(sgt);
+ goto err_cleanup;
+ }
+
+ op_ctx->map.sgt = sgt;
+
+ preallocated_vm_bo = drm_gpuvm_bo_create(&vm->base, &bo->base.base);
+ if (!preallocated_vm_bo) {
+ if (!bo->base.base.import_attach)
+ drm_gem_shmem_unpin(&bo->base);
+
+ ret = -ENOMEM;
+ goto err_cleanup;
+ }
+
+ mutex_lock(&bo->gpuva_list_lock);
+ op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
+ mutex_unlock(&bo->gpuva_list_lock);
+
+ /* If the a vm_bo for this <VM,BO> combination exists, it already
+ * retains a pin ref, and we can release the one we took earlier.
+ *
+ * If our pre-allocated vm_bo is picked, it now retains the pin ref,
+ * which will be released in panthor_vm_bo_put().
+ */
+ if (preallocated_vm_bo != op_ctx->map.vm_bo &&
+ !bo->base.base.import_attach)
+ drm_gem_shmem_unpin(&bo->base);
+
+ op_ctx->map.bo_offset = offset;
+
+ /* L1, L2 and L3 page tables.
+ * We could optimize L3 allocation by iterating over the sgt and merging
+ * 2M contiguous blocks, but it's simpler to over-provision and return
+ * the pages if they're not used.
+ */
+ pt_count = ((ALIGN(va + size, 1ull << 39) - ALIGN_DOWN(va, 1ull << 39)) >> 39) +
+ ((ALIGN(va + size, 1ull << 30) - ALIGN_DOWN(va, 1ull << 30)) >> 30) +
+ ((ALIGN(va + size, 1ull << 21) - ALIGN_DOWN(va, 1ull << 21)) >> 21);
+
+ op_ctx->rsvd_page_tables.pages = kcalloc(pt_count,
+ sizeof(*op_ctx->rsvd_page_tables.pages),
+ GFP_KERNEL);
+ if (!op_ctx->rsvd_page_tables.pages) {
+ ret = -ENOMEM;
+ goto err_cleanup;
+ }
+
+ ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, pt_count,
+ op_ctx->rsvd_page_tables.pages);
+ op_ctx->rsvd_page_tables.count = ret;
+ if (ret != pt_count) {
+ ret = -ENOMEM;
+ goto err_cleanup;
+ }
+
+ /* Insert BO into the extobj list last, when we know nothing can fail. */
+ dma_resv_lock(panthor_vm_resv(vm), NULL);
+ drm_gpuvm_bo_extobj_add(op_ctx->map.vm_bo);
+ dma_resv_unlock(panthor_vm_resv(vm));
+
+ return 0;
+
+err_cleanup:
+ panthor_vm_cleanup_op_ctx(op_ctx, vm);
+ return ret;
+}
+
+static int panthor_vm_prepare_unmap_op_ctx(struct panthor_vm_op_ctx *op_ctx,
+ struct panthor_vm *vm,
+ u64 va, u64 size)
+{
+ u32 pt_count = 0;
+ int ret;
+
+ memset(op_ctx, 0, sizeof(*op_ctx));
+ INIT_LIST_HEAD(&op_ctx->returned_vmas);
+ op_ctx->va.range = size;
+ op_ctx->va.addr = va;
+ op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP;
+
+ /* Pre-allocate L3 page tables to account for the split-2M-block
+ * situation on unmap.
+ */
+ if (va != ALIGN(va, SZ_2M))
+ pt_count++;
+
+ if (va + size != ALIGN(va + size, SZ_2M) &&
+ ALIGN(va + size, SZ_2M) != ALIGN(va, SZ_2M))
+ pt_count++;
+
+ ret = panthor_vm_op_ctx_prealloc_vmas(op_ctx);
+ if (ret)
+ goto err_cleanup;
+
+ if (pt_count) {
+ op_ctx->rsvd_page_tables.pages = kcalloc(pt_count,
+ sizeof(*op_ctx->rsvd_page_tables.pages),
+ GFP_KERNEL);
+ if (!op_ctx->rsvd_page_tables.pages) {
+ ret = -ENOMEM;
+ goto err_cleanup;
+ }
+
+ ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, pt_count,
+ op_ctx->rsvd_page_tables.pages);
+ if (ret != pt_count) {
+ ret = -ENOMEM;
+ goto err_cleanup;
+ }
+ op_ctx->rsvd_page_tables.count = pt_count;
+ }
+
+ return 0;
+
+err_cleanup:
+ panthor_vm_cleanup_op_ctx(op_ctx, vm);
+ return ret;
+}
+
+static void panthor_vm_prepare_sync_only_op_ctx(struct panthor_vm_op_ctx *op_ctx,
+ struct panthor_vm *vm)
+{
+ memset(op_ctx, 0, sizeof(*op_ctx));
+ INIT_LIST_HEAD(&op_ctx->returned_vmas);
+ op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY;
+}
+
+/**
+ * panthor_vm_get_bo_for_va() - Get the GEM object mapped at a virtual address
+ * @vm: VM to look into.
+ * @va: Virtual address to search for.
+ * @bo_offset: Offset of the GEM object mapped at this virtual address.
+ * Only valid on success.
+ *
+ * The object returned by this function might no longer be mapped when the
+ * function returns. It's the caller responsibility to ensure there's no
+ * concurrent map/unmap operations making the returned value invalid, or
+ * make sure it doesn't matter if the object is no longer mapped.
+ *
+ * Return: A valid pointer on success, an ERR_PTR() otherwise.
+ */
+struct panthor_gem_object *
+panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset)
+{
+ struct panthor_gem_object *bo = ERR_PTR(-ENOENT);
+ struct drm_gpuva *gpuva;
+ struct panthor_vma *vma;
+
+ /* Take the VM lock to prevent concurrent map/unmap operations. */
+ mutex_lock(&vm->op_lock);
+ gpuva = drm_gpuva_find_first(&vm->base, va, 1);
+ vma = gpuva ? container_of(gpuva, struct panthor_vma, base) : NULL;
+ if (vma && vma->base.gem.obj) {
+ drm_gem_object_get(vma->base.gem.obj);
+ bo = to_panthor_bo(vma->base.gem.obj);
+ *bo_offset = vma->base.gem.offset + (va - vma->base.va.addr);
+ }
+ mutex_unlock(&vm->op_lock);
+
+ return bo;
+}
+
+#define PANTHOR_VM_MIN_KERNEL_VA_SIZE SZ_256M
+
+static u64
+panthor_vm_create_get_user_va_range(const struct drm_panthor_vm_create *args,
+ u64 full_va_range)
+{
+ u64 user_va_range;
+
+ /* Make sure we have a minimum amount of VA space for kernel objects. */
+ if (full_va_range < PANTHOR_VM_MIN_KERNEL_VA_SIZE)
+ return 0;
+
+ if (args->user_va_range) {
+ /* Use the user provided value if != 0. */
+ user_va_range = args->user_va_range;
+ } else if (TASK_SIZE_OF(current) < full_va_range) {
+ /* If the task VM size is smaller than the GPU VA range, pick this
+ * as our default user VA range, so userspace can CPU/GPU map buffers
+ * at the same address.
+ */
+ user_va_range = TASK_SIZE_OF(current);
+ } else {
+ /* If the GPU VA range is smaller than the task VM size, we
+ * just have to live with the fact we won't be able to map
+ * all buffers at the same GPU/CPU address.
+ *
+ * If the GPU VA range is bigger than 4G (more than 32-bit of
+ * VA), we split the range in two, and assign half of it to
+ * the user and the other half to the kernel, if it's not, we
+ * keep the kernel VA space as small as possible.
+ */
+ user_va_range = full_va_range > SZ_4G ?
+ full_va_range / 2 :
+ full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE;
+ }
+
+ if (full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE < user_va_range)
+ user_va_range = full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE;
+
+ return user_va_range;
+}
+
+#define PANTHOR_VM_CREATE_FLAGS 0
+
+static int
+panthor_vm_create_check_args(const struct panthor_device *ptdev,
+ const struct drm_panthor_vm_create *args,
+ u64 *kernel_va_start, u64 *kernel_va_range)
+{
+ u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
+ u64 full_va_range = 1ull << va_bits;
+ u64 user_va_range;
+
+ if (args->flags & ~PANTHOR_VM_CREATE_FLAGS)
+ return -EINVAL;
+
+ user_va_range = panthor_vm_create_get_user_va_range(args, full_va_range);
+ if (!user_va_range || (args->user_va_range && args->user_va_range > user_va_range))
+ return -EINVAL;
+
+ /* Pick a kernel VA range that's a power of two, to have a clear split. */
+ *kernel_va_range = rounddown_pow_of_two(full_va_range - user_va_range);
+ *kernel_va_start = full_va_range - *kernel_va_range;
+ return 0;
+}
+
+/*
+ * Only 32 VMs per open file. If that becomes a limiting factor, we can
+ * increase this number.
+ */
+#define PANTHOR_MAX_VMS_PER_FILE 32
+
+/**
+ * panthor_vm_pool_create_vm() - Create a VM
+ * @pool: The VM to create this VM on.
+ * @kernel_va_start: Start of the region reserved for kernel objects.
+ * @kernel_va_range: Size of the region reserved for kernel objects.
+ *
+ * Return: a positive VM ID on success, a negative error code otherwise.
+ */
+int panthor_vm_pool_create_vm(struct panthor_device *ptdev,
+ struct panthor_vm_pool *pool,
+ struct drm_panthor_vm_create *args)
+{
+ u64 kernel_va_start, kernel_va_range;
+ struct panthor_vm *vm;
+ int ret;
+ u32 id;
+
+ ret = panthor_vm_create_check_args(ptdev, args, &kernel_va_start, &kernel_va_range);
+ if (ret)
+ return ret;
+
+ vm = panthor_vm_create(ptdev, false, kernel_va_start, kernel_va_range,
+ kernel_va_start, kernel_va_range);
+ if (IS_ERR(vm))
+ return PTR_ERR(vm);
+
+ ret = xa_alloc(&pool->xa, &id, vm,
+ XA_LIMIT(1, PANTHOR_MAX_VMS_PER_FILE), GFP_KERNEL);
+
+ if (ret) {
+ panthor_vm_put(vm);
+ return ret;
+ }
+
+ args->user_va_range = kernel_va_start;
+ return id;
+}
+
+static void panthor_vm_destroy(struct panthor_vm *vm)
+{
+ if (!vm)
+ return;
+
+ vm->destroyed = true;
+
+ mutex_lock(&vm->heaps.lock);
+ panthor_heap_pool_destroy(vm->heaps.pool);
+ vm->heaps.pool = NULL;
+ mutex_unlock(&vm->heaps.lock);
+
+ drm_WARN_ON(&vm->ptdev->base,
+ panthor_vm_unmap_range(vm, vm->base.mm_start, vm->base.mm_range));
+ panthor_vm_put(vm);
+}
+
+/**
+ * panthor_vm_pool_destroy_vm() - Destroy a VM.
+ * @pool: VM pool.
+ * @handle: VM handle.
+ *
+ * This function doesn't free the VM object or its resources, it just kills
+ * all mappings, and makes sure nothing can be mapped after that point.
+ *
+ * If there was any active jobs at the time this function is called, these
+ * jobs should experience page faults and be killed as a result.
+ *
+ * The VM resources are freed when the last reference on the VM object is
+ * dropped.
+ */
+int panthor_vm_pool_destroy_vm(struct panthor_vm_pool *pool, u32 handle)
+{
+ struct panthor_vm *vm;
+
+ vm = xa_erase(&pool->xa, handle);
+
+ panthor_vm_destroy(vm);
+
+ return vm ? 0 : -EINVAL;
+}
+
+/**
+ * panthor_vm_pool_get_vm() - Retrieve VM object bound to a VM handle
+ * @pool: VM pool to check.
+ * @handle: Handle of the VM to retrieve.
+ *
+ * Return: A valid pointer if the VM exists, NULL otherwise.
+ */
+struct panthor_vm *
+panthor_vm_pool_get_vm(struct panthor_vm_pool *pool, u32 handle)
+{
+ struct panthor_vm *vm;
+
+ vm = panthor_vm_get(xa_load(&pool->xa, handle));
+
+ return vm;
+}
+
+/**
+ * panthor_vm_pool_destroy() - Destroy a VM pool.
+ * @pfile: File.
+ *
+ * Destroy all VMs in the pool, and release the pool resources.
+ *
+ * Note that VMs can outlive the pool they were created from if other
+ * objects hold a reference to there VMs.
+ */
+void panthor_vm_pool_destroy(struct panthor_file *pfile)
+{
+ struct panthor_vm *vm;
+ unsigned long i;
+
+ if (!pfile->vms)
+ return;
+
+ xa_for_each(&pfile->vms->xa, i, vm)
+ panthor_vm_destroy(vm);
+
+ xa_destroy(&pfile->vms->xa);
+ kfree(pfile->vms);
+}
+
+/**
+ * panthor_vm_pool_create() - Create a VM pool
+ * @pfile: File.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_vm_pool_create(struct panthor_file *pfile)
+{
+ pfile->vms = kzalloc(sizeof(*pfile->vms), GFP_KERNEL);
+ if (!pfile->vms)
+ return -ENOMEM;
+
+ xa_init_flags(&pfile->vms->xa, XA_FLAGS_ALLOC1);
+ return 0;
+}
+
+/* dummy TLB ops, the real TLB flush happens in panthor_vm_flush_range() */
+static void mmu_tlb_flush_all(void *cookie)
+{
+}
+
+static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, void *cookie)
+{
+}
+
+static const struct iommu_flush_ops mmu_tlb_ops = {
+ .tlb_flush_all = mmu_tlb_flush_all,
+ .tlb_flush_walk = mmu_tlb_flush_walk,
+};
+
+static const char *access_type_name(struct panthor_device *ptdev,
+ u32 fault_status)
+{
+ switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
+ case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
+ return "ATOMIC";
+ case AS_FAULTSTATUS_ACCESS_TYPE_READ:
+ return "READ";
+ case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
+ return "WRITE";
+ case AS_FAULTSTATUS_ACCESS_TYPE_EX:
+ return "EXECUTE";
+ default:
+ drm_WARN_ON(&ptdev->base, 1);
+ return NULL;
+ }
+}
+
+static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status)
+{
+ bool has_unhandled_faults = false;
+
+ status = panthor_mmu_fault_mask(ptdev, status);
+ while (status) {
+ u32 as = ffs(status | (status >> 16)) - 1;
+ u32 mask = panthor_mmu_as_fault_mask(ptdev, as);
+ u32 new_int_mask;
+ u64 addr;
+ u32 fault_status;
+ u32 exception_type;
+ u32 access_type;
+ u32 source_id;
+
+ fault_status = gpu_read(ptdev, AS_FAULTSTATUS(as));
+ addr = gpu_read(ptdev, AS_FAULTADDRESS_LO(as));
+ addr |= (u64)gpu_read(ptdev, AS_FAULTADDRESS_HI(as)) << 32;
+
+ /* decode the fault status */
+ exception_type = fault_status & 0xFF;
+ access_type = (fault_status >> 8) & 0x3;
+ source_id = (fault_status >> 16);
+
+ mutex_lock(&ptdev->mmu->as.slots_lock);
+
+ ptdev->mmu->as.faulty_mask |= mask;
+ new_int_mask =
+ panthor_mmu_fault_mask(ptdev, ~ptdev->mmu->as.faulty_mask);
+
+ /* terminal fault, print info about the fault */
+ drm_err(&ptdev->base,
+ "Unhandled Page fault in AS%d at VA 0x%016llX\n"
+ "raw fault status: 0x%X\n"
+ "decoded fault status: %s\n"
+ "exception type 0x%X: %s\n"
+ "access type 0x%X: %s\n"
+ "source id 0x%X\n",
+ as, addr,
+ fault_status,
+ (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
+ exception_type, panthor_exception_name(ptdev, exception_type),
+ access_type, access_type_name(ptdev, fault_status),
+ source_id);
+
+ /* Ignore MMU interrupts on this AS until it's been
+ * re-enabled.
+ */
+ ptdev->mmu->irq.mask = new_int_mask;
+ gpu_write(ptdev, MMU_INT_MASK, new_int_mask);
+
+ if (ptdev->mmu->as.slots[as].vm)
+ ptdev->mmu->as.slots[as].vm->unhandled_fault = true;
+
+ /* Disable the MMU to kill jobs on this AS. */
+ panthor_mmu_as_disable(ptdev, as);
+ mutex_unlock(&ptdev->mmu->as.slots_lock);
+
+ status &= ~mask;
+ has_unhandled_faults = true;
+ }
+
+ if (has_unhandled_faults)
+ panthor_sched_report_mmu_fault(ptdev);
+}
+PANTHOR_IRQ_HANDLER(mmu, MMU, panthor_mmu_irq_handler);
+
+/**
+ * panthor_mmu_suspend() - Suspend the MMU logic
+ * @ptdev: Device.
+ *
+ * All we do here is de-assign the AS slots on all active VMs, so things
+ * get flushed to the main memory, and no further access to these VMs are
+ * possible.
+ *
+ * We also suspend the MMU IRQ.
+ */
+void panthor_mmu_suspend(struct panthor_device *ptdev)
+{
+ mutex_lock(&ptdev->mmu->as.slots_lock);
+ for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
+ struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm;
+
+ if (vm) {
+ drm_WARN_ON(&ptdev->base, panthor_mmu_as_disable(ptdev, i));
+ panthor_vm_release_as_locked(vm);
+ }
+ }
+ mutex_unlock(&ptdev->mmu->as.slots_lock);
+
+ panthor_mmu_irq_suspend(&ptdev->mmu->irq);
+}
+
+/**
+ * panthor_mmu_resume() - Resume the MMU logic
+ * @ptdev: Device.
+ *
+ * Resume the IRQ.
+ *
+ * We don't re-enable previously active VMs. We assume other parts of the
+ * driver will call panthor_vm_active() on the VMs they intend to use.
+ */
+void panthor_mmu_resume(struct panthor_device *ptdev)
+{
+ mutex_lock(&ptdev->mmu->as.slots_lock);
+ ptdev->mmu->as.alloc_mask = 0;
+ ptdev->mmu->as.faulty_mask = 0;
+ mutex_unlock(&ptdev->mmu->as.slots_lock);
+
+ panthor_mmu_irq_resume(&ptdev->mmu->irq, panthor_mmu_fault_mask(ptdev, ~0));
+}
+
+/**
+ * panthor_mmu_pre_reset() - Prepare for a reset
+ * @ptdev: Device.
+ *
+ * Suspend the IRQ, and make sure all VM_BIND queues are stopped, so we
+ * don't get asked to do a VM operation while the GPU is down.
+ *
+ * We don't cleanly shutdown the AS slots here, because the reset might
+ * come from an AS_ACTIVE_BIT stuck situation.
+ */
+void panthor_mmu_pre_reset(struct panthor_device *ptdev)
+{
+ struct panthor_vm *vm;
+
+ panthor_mmu_irq_suspend(&ptdev->mmu->irq);
+
+ mutex_lock(&ptdev->mmu->vm.lock);
+ ptdev->mmu->vm.reset_in_progress = true;
+ list_for_each_entry(vm, &ptdev->mmu->vm.list, node)
+ panthor_vm_stop(vm);
+ mutex_unlock(&ptdev->mmu->vm.lock);
+}
+
+/**
+ * panthor_mmu_post_reset() - Restore things after a reset
+ * @ptdev: Device.
+ *
+ * Put the MMU logic back in action after a reset. That implies resuming the
+ * IRQ and re-enabling the VM_BIND queues.
+ */
+void panthor_mmu_post_reset(struct panthor_device *ptdev)
+{
+ struct panthor_vm *vm;
+
+ mutex_lock(&ptdev->mmu->as.slots_lock);
+
+ /* Now that the reset is effective, we can assume that none of the
+ * AS slots are setup, and clear the faulty flags too.
+ */
+ ptdev->mmu->as.alloc_mask = 0;
+ ptdev->mmu->as.faulty_mask = 0;
+
+ for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
+ struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm;
+
+ if (vm)
+ panthor_vm_release_as_locked(vm);
+ }
+
+ mutex_unlock(&ptdev->mmu->as.slots_lock);
+
+ panthor_mmu_irq_resume(&ptdev->mmu->irq, panthor_mmu_fault_mask(ptdev, ~0));
+
+ /* Restart the VM_BIND queues. */
+ mutex_lock(&ptdev->mmu->vm.lock);
+ list_for_each_entry(vm, &ptdev->mmu->vm.list, node) {
+ panthor_vm_start(vm);
+ }
+ ptdev->mmu->vm.reset_in_progress = false;
+ mutex_unlock(&ptdev->mmu->vm.lock);
+}
+
+static void panthor_vm_free(struct drm_gpuvm *gpuvm)
+{
+ struct panthor_vm *vm = container_of(gpuvm, struct panthor_vm, base);
+ struct panthor_device *ptdev = vm->ptdev;
+
+ mutex_lock(&vm->heaps.lock);
+ if (drm_WARN_ON(&ptdev->base, vm->heaps.pool))
+ panthor_heap_pool_destroy(vm->heaps.pool);
+ mutex_unlock(&vm->heaps.lock);
+ mutex_destroy(&vm->heaps.lock);
+
+ mutex_lock(&ptdev->mmu->vm.lock);
+ list_del(&vm->node);
+ /* Restore the scheduler state so we can call drm_sched_entity_destroy()
+ * and drm_sched_fini(). If get there, that means we have no job left
+ * and no new jobs can be queued, so we can start the scheduler without
+ * risking interfering with the reset.
+ */
+ if (ptdev->mmu->vm.reset_in_progress)
+ panthor_vm_start(vm);
+ mutex_unlock(&ptdev->mmu->vm.lock);
+
+ drm_sched_entity_destroy(&vm->entity);
+ drm_sched_fini(&vm->sched);
+
+ mutex_lock(&ptdev->mmu->as.slots_lock);
+ if (vm->as.id >= 0) {
+ int cookie;
+
+ if (drm_dev_enter(&ptdev->base, &cookie)) {
+ panthor_mmu_as_disable(ptdev, vm->as.id);
+ drm_dev_exit(cookie);
+ }
+
+ ptdev->mmu->as.slots[vm->as.id].vm = NULL;
+ clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask);
+ list_del(&vm->as.lru_node);
+ }
+ mutex_unlock(&ptdev->mmu->as.slots_lock);
+
+ free_io_pgtable_ops(vm->pgtbl_ops);
+
+ drm_mm_takedown(&vm->mm);
+ kfree(vm);
+}
+
+/**
+ * panthor_vm_put() - Release a reference on a VM
+ * @vm: VM to release the reference on. Can be NULL.
+ */
+void panthor_vm_put(struct panthor_vm *vm)
+{
+ drm_gpuvm_put(vm ? &vm->base : NULL);
+}
+
+/**
+ * panthor_vm_get() - Get a VM reference
+ * @vm: VM to get the reference on. Can be NULL.
+ *
+ * Return: @vm value.
+ */
+struct panthor_vm *panthor_vm_get(struct panthor_vm *vm)
+{
+ if (vm)
+ drm_gpuvm_get(&vm->base);
+
+ return vm;
+}
+
+/**
+ * panthor_vm_get_heap_pool() - Get the heap pool attached to a VM
+ * @vm: VM to query the heap pool on.
+ * @create: True if the heap pool should be created when it doesn't exist.
+ *
+ * Heap pools are per-VM. This function allows one to retrieve the heap pool
+ * attached to a VM.
+ *
+ * If no heap pool exists yet, and @create is true, we create one.
+ *
+ * The returned panthor_heap_pool should be released with panthor_heap_pool_put().
+ *
+ * Return: A valid pointer on success, an ERR_PTR() otherwise.
+ */
+struct panthor_heap_pool *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create)
+{
+ struct panthor_heap_pool *pool;
+
+ mutex_lock(&vm->heaps.lock);
+ if (!vm->heaps.pool && create) {
+ if (vm->destroyed)
+ pool = ERR_PTR(-EINVAL);
+ else
+ pool = panthor_heap_pool_create(vm->ptdev, vm);
+
+ if (!IS_ERR(pool))
+ vm->heaps.pool = panthor_heap_pool_get(pool);
+ } else {
+ pool = panthor_heap_pool_get(vm->heaps.pool);
+ if (!pool)
+ pool = ERR_PTR(-ENOENT);
+ }
+ mutex_unlock(&vm->heaps.lock);
+
+ return pool;
+}
+
+static u64 mair_to_memattr(u64 mair)
+{
+ u64 memattr = 0;
+ u32 i;
+
+ for (i = 0; i < 8; i++) {
+ u8 in_attr = mair >> (8 * i), out_attr;
+ u8 outer = in_attr >> 4, inner = in_attr & 0xf;
+
+ /* For caching to be enabled, inner and outer caching policy
+ * have to be both write-back, if one of them is write-through
+ * or non-cacheable, we just choose non-cacheable. Device
+ * memory is also translated to non-cacheable.
+ */
+ if (!(outer & 3) || !(outer & 4) || !(inner & 4)) {
+ out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_NC |
+ AS_MEMATTR_AARCH64_SH_MIDGARD_INNER |
+ AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(false, false);
+ } else {
+ /* Use SH_CPU_INNER mode so SH_IS, which is used when
+ * IOMMU_CACHE is set, actually maps to the standard
+ * definition of inner-shareable and not Mali's
+ * internal-shareable mode.
+ */
+ out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_WB |
+ AS_MEMATTR_AARCH64_SH_CPU_INNER |
+ AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(inner & 1, inner & 2);
+ }
+
+ memattr |= (u64)out_attr << (8 * i);
+ }
+
+ return memattr;
+}
+
+static void panthor_vma_link(struct panthor_vm *vm,
+ struct panthor_vma *vma,
+ struct drm_gpuvm_bo *vm_bo)
+{
+ struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
+
+ mutex_lock(&bo->gpuva_list_lock);
+ drm_gpuva_link(&vma->base, vm_bo);
+ drm_WARN_ON(&vm->ptdev->base, drm_gpuvm_bo_put(vm_bo));
+ mutex_unlock(&bo->gpuva_list_lock);
+}
+
+static void panthor_vma_unlink(struct panthor_vm *vm,
+ struct panthor_vma *vma)
+{
+ struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
+ struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_get(vma->base.vm_bo);
+
+ mutex_lock(&bo->gpuva_list_lock);
+ drm_gpuva_unlink(&vma->base);
+ mutex_unlock(&bo->gpuva_list_lock);
+
+ /* drm_gpuva_unlink() release the vm_bo, but we manually retained it
+ * when entering this function, so we can implement deferred VMA
+ * destruction. Re-assign it here.
+ */
+ vma->base.vm_bo = vm_bo;
+ list_add_tail(&vma->node, &vm->op_ctx->returned_vmas);
+}
+
+static void panthor_vma_init(struct panthor_vma *vma, u32 flags)
+{
+ INIT_LIST_HEAD(&vma->node);
+ vma->flags = flags;
+}
+
+#define PANTHOR_VM_MAP_FLAGS \
+ (DRM_PANTHOR_VM_BIND_OP_MAP_READONLY | \
+ DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | \
+ DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED)
+
+static int panthor_gpuva_sm_step_map(struct drm_gpuva_op *op, void *priv)
+{
+ struct panthor_vm *vm = priv;
+ struct panthor_vm_op_ctx *op_ctx = vm->op_ctx;
+ struct panthor_vma *vma = panthor_vm_op_ctx_get_vma(op_ctx);
+ int ret;
+
+ if (!vma)
+ return -EINVAL;
+
+ panthor_vma_init(vma, op_ctx->flags & PANTHOR_VM_MAP_FLAGS);
+
+ ret = panthor_vm_map_pages(vm, op->map.va.addr, flags_to_prot(vma->flags),
+ op_ctx->map.sgt, op->map.gem.offset,
+ op->map.va.range);
+ if (ret)
+ return ret;
+
+ /* Ref owned by the mapping now, clear the obj field so we don't release the
+ * pinning/obj ref behind GPUVA's back.
+ */
+ drm_gpuva_map(&vm->base, &vma->base, &op->map);
+ panthor_vma_link(vm, vma, op_ctx->map.vm_bo);
+ op_ctx->map.vm_bo = NULL;
+ return 0;
+}
+
+static int panthor_gpuva_sm_step_remap(struct drm_gpuva_op *op,
+ void *priv)
+{
+ struct panthor_vma *unmap_vma = container_of(op->remap.unmap->va, struct panthor_vma, base);
+ struct panthor_vm *vm = priv;
+ struct panthor_vm_op_ctx *op_ctx = vm->op_ctx;
+ struct panthor_vma *prev_vma = NULL, *next_vma = NULL;
+ u64 unmap_start, unmap_range;
+ int ret;
+
+ drm_gpuva_op_remap_to_unmap_range(&op->remap, &unmap_start, &unmap_range);
+ ret = panthor_vm_unmap_pages(vm, unmap_start, unmap_range);
+ if (ret)
+ return ret;
+
+ if (op->remap.prev) {
+ prev_vma = panthor_vm_op_ctx_get_vma(op_ctx);
+ panthor_vma_init(prev_vma, unmap_vma->flags);
+ }
+
+ if (op->remap.next) {
+ next_vma = panthor_vm_op_ctx_get_vma(op_ctx);
+ panthor_vma_init(next_vma, unmap_vma->flags);
+ }
+
+ drm_gpuva_remap(prev_vma ? &prev_vma->base : NULL,
+ next_vma ? &next_vma->base : NULL,
+ &op->remap);
+
+ if (prev_vma) {
+ /* panthor_vma_link() transfers the vm_bo ownership to
+ * the VMA object. Since the vm_bo we're passing is still
+ * owned by the old mapping which will be released when this
+ * mapping is destroyed, we need to grab a ref here.
+ */
+ panthor_vma_link(vm, prev_vma,
+ drm_gpuvm_bo_get(op->remap.unmap->va->vm_bo));
+ }
+
+ if (next_vma) {
+ panthor_vma_link(vm, next_vma,
+ drm_gpuvm_bo_get(op->remap.unmap->va->vm_bo));
+ }
+
+ panthor_vma_unlink(vm, unmap_vma);
+ return 0;
+}
+
+static int panthor_gpuva_sm_step_unmap(struct drm_gpuva_op *op,
+ void *priv)
+{
+ struct panthor_vma *unmap_vma = container_of(op->unmap.va, struct panthor_vma, base);
+ struct panthor_vm *vm = priv;
+ int ret;
+
+ ret = panthor_vm_unmap_pages(vm, unmap_vma->base.va.addr,
+ unmap_vma->base.va.range);
+ if (drm_WARN_ON(&vm->ptdev->base, ret))
+ return ret;
+
+ drm_gpuva_unmap(&op->unmap);
+ panthor_vma_unlink(vm, unmap_vma);
+ return 0;
+}
+
+static const struct drm_gpuvm_ops panthor_gpuvm_ops = {
+ .vm_free = panthor_vm_free,
+ .sm_step_map = panthor_gpuva_sm_step_map,
+ .sm_step_remap = panthor_gpuva_sm_step_remap,
+ .sm_step_unmap = panthor_gpuva_sm_step_unmap,
+};
+
+/**
+ * panthor_vm_resv() - Get the dma_resv object attached to a VM.
+ * @vm: VM to get the dma_resv of.
+ *
+ * Return: A dma_resv object.
+ */
+struct dma_resv *panthor_vm_resv(struct panthor_vm *vm)
+{
+ return drm_gpuvm_resv(&vm->base);
+}
+
+struct drm_gem_object *panthor_vm_root_gem(struct panthor_vm *vm)
+{
+ if (!vm)
+ return NULL;
+
+ return vm->base.r_obj;
+}
+
+static int
+panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op,
+ bool flag_vm_unusable_on_failure)
+{
+ u32 op_type = op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK;
+ int ret;
+
+ if (op_type == DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY)
+ return 0;
+
+ mutex_lock(&vm->op_lock);
+ vm->op_ctx = op;
+ switch (op_type) {
+ case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
+ if (vm->unusable) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range,
+ op->map.vm_bo->obj, op->map.bo_offset);
+ break;
+
+ case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
+ ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret && flag_vm_unusable_on_failure)
+ vm->unusable = true;
+
+ vm->op_ctx = NULL;
+ mutex_unlock(&vm->op_lock);
+
+ return ret;
+}
+
+static struct dma_fence *
+panthor_vm_bind_run_job(struct drm_sched_job *sched_job)
+{
+ struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base);
+ bool cookie;
+ int ret;
+
+ /* Not only we report an error whose result is propagated to the
+ * drm_sched finished fence, but we also flag the VM as unusable, because
+ * a failure in the async VM_BIND results in an inconsistent state. VM needs
+ * to be destroyed and recreated.
+ */
+ cookie = dma_fence_begin_signalling();
+ ret = panthor_vm_exec_op(job->vm, &job->ctx, true);
+ dma_fence_end_signalling(cookie);
+
+ return ret ? ERR_PTR(ret) : NULL;
+}
+
+static void panthor_vm_bind_job_release(struct kref *kref)
+{
+ struct panthor_vm_bind_job *job = container_of(kref, struct panthor_vm_bind_job, refcount);
+
+ if (job->base.s_fence)
+ drm_sched_job_cleanup(&job->base);
+
+ panthor_vm_cleanup_op_ctx(&job->ctx, job->vm);
+ panthor_vm_put(job->vm);
+ kfree(job);
+}
+
+/**
+ * panthor_vm_bind_job_put() - Release a VM_BIND job reference
+ * @sched_job: Job to release the reference on.
+ */
+void panthor_vm_bind_job_put(struct drm_sched_job *sched_job)
+{
+ struct panthor_vm_bind_job *job =
+ container_of(sched_job, struct panthor_vm_bind_job, base);
+
+ if (sched_job)
+ kref_put(&job->refcount, panthor_vm_bind_job_release);
+}
+
+static void
+panthor_vm_bind_free_job(struct drm_sched_job *sched_job)
+{
+ struct panthor_vm_bind_job *job =
+ container_of(sched_job, struct panthor_vm_bind_job, base);
+
+ drm_sched_job_cleanup(sched_job);
+
+ /* Do the heavy cleanups asynchronously, so we're out of the
+ * dma-signaling path and can acquire dma-resv locks safely.
+ */
+ queue_work(panthor_cleanup_wq, &job->cleanup_op_ctx_work);
+}
+
+static enum drm_gpu_sched_stat
+panthor_vm_bind_timedout_job(struct drm_sched_job *sched_job)
+{
+ WARN(1, "VM_BIND ops are synchronous for now, there should be no timeout!");
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+}
+
+static const struct drm_sched_backend_ops panthor_vm_bind_ops = {
+ .run_job = panthor_vm_bind_run_job,
+ .free_job = panthor_vm_bind_free_job,
+ .timedout_job = panthor_vm_bind_timedout_job,
+};
+
+/**
+ * panthor_vm_create() - Create a VM
+ * @ptdev: Device.
+ * @for_mcu: True if this is the FW MCU VM.
+ * @kernel_va_start: Start of the range reserved for kernel BO mapping.
+ * @kernel_va_size: Size of the range reserved for kernel BO mapping.
+ * @auto_kernel_va_start: Start of the auto-VA kernel range.
+ * @auto_kernel_va_size: Size of the auto-VA kernel range.
+ *
+ * Return: A valid pointer on success, an ERR_PTR() otherwise.
+ */
+struct panthor_vm *
+panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
+ u64 kernel_va_start, u64 kernel_va_size,
+ u64 auto_kernel_va_start, u64 auto_kernel_va_size)
+{
+ u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
+ u32 pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features);
+ u64 full_va_range = 1ull << va_bits;
+ struct drm_gem_object *dummy_gem;
+ struct drm_gpu_scheduler *sched;
+ struct io_pgtable_cfg pgtbl_cfg;
+ u64 mair, min_va, va_range;
+ struct panthor_vm *vm;
+ int ret;
+
+ vm = kzalloc(sizeof(*vm), GFP_KERNEL);
+ if (!vm)
+ return ERR_PTR(-ENOMEM);
+
+ /* We allocate a dummy GEM for the VM. */
+ dummy_gem = drm_gpuvm_resv_object_alloc(&ptdev->base);
+ if (!dummy_gem) {
+ ret = -ENOMEM;
+ goto err_free_vm;
+ }
+
+ mutex_init(&vm->heaps.lock);
+ vm->for_mcu = for_mcu;
+ vm->ptdev = ptdev;
+ mutex_init(&vm->op_lock);
+
+ if (for_mcu) {
+ /* CSF MCU is a cortex M7, and can only address 4G */
+ min_va = 0;
+ va_range = SZ_4G;
+ } else {
+ min_va = 0;
+ va_range = full_va_range;
+ }
+
+ mutex_init(&vm->mm_lock);
+ drm_mm_init(&vm->mm, kernel_va_start, kernel_va_size);
+ vm->kernel_auto_va.start = auto_kernel_va_start;
+ vm->kernel_auto_va.end = vm->kernel_auto_va.start + auto_kernel_va_size - 1;
+
+ INIT_LIST_HEAD(&vm->node);
+ INIT_LIST_HEAD(&vm->as.lru_node);
+ vm->as.id = -1;
+ refcount_set(&vm->as.active_cnt, 0);
+
+ pgtbl_cfg = (struct io_pgtable_cfg) {
+ .pgsize_bitmap = SZ_4K | SZ_2M,
+ .ias = va_bits,
+ .oas = pa_bits,
+ .coherent_walk = ptdev->coherent,
+ .tlb = &mmu_tlb_ops,
+ .iommu_dev = ptdev->base.dev,
+ .alloc = alloc_pt,
+ .free = free_pt,
+ };
+
+ vm->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1, &pgtbl_cfg, vm);
+ if (!vm->pgtbl_ops) {
+ ret = -EINVAL;
+ goto err_mm_takedown;
+ }
+
+ /* Bind operations are synchronous for now, no timeout needed. */
+ ret = drm_sched_init(&vm->sched, &panthor_vm_bind_ops, ptdev->mmu->vm.wq,
+ 1, 1, 0,
+ MAX_SCHEDULE_TIMEOUT, NULL, NULL,
+ "panthor-vm-bind", ptdev->base.dev);
+ if (ret)
+ goto err_free_io_pgtable;
+
+ sched = &vm->sched;
+ ret = drm_sched_entity_init(&vm->entity, 0, &sched, 1, NULL);
+ if (ret)
+ goto err_sched_fini;
+
+ mair = io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg.arm_lpae_s1_cfg.mair;
+ vm->memattr = mair_to_memattr(mair);
+
+ mutex_lock(&ptdev->mmu->vm.lock);
+ list_add_tail(&vm->node, &ptdev->mmu->vm.list);
+
+ /* If a reset is in progress, stop the scheduler. */
+ if (ptdev->mmu->vm.reset_in_progress)
+ panthor_vm_stop(vm);
+ mutex_unlock(&ptdev->mmu->vm.lock);
+
+ /* We intentionally leave the reserved range to zero, because we want kernel VMAs
+ * to be handled the same way user VMAs are.
+ */
+ drm_gpuvm_init(&vm->base, for_mcu ? "panthor-MCU-VM" : "panthor-GPU-VM",
+ DRM_GPUVM_RESV_PROTECTED, &ptdev->base, dummy_gem,
+ min_va, va_range, 0, 0, &panthor_gpuvm_ops);
+ drm_gem_object_put(dummy_gem);
+ return vm;
+
+err_sched_fini:
+ drm_sched_fini(&vm->sched);
+
+err_free_io_pgtable:
+ free_io_pgtable_ops(vm->pgtbl_ops);
+
+err_mm_takedown:
+ drm_mm_takedown(&vm->mm);
+ drm_gem_object_put(dummy_gem);
+
+err_free_vm:
+ kfree(vm);
+ return ERR_PTR(ret);
+}
+
+static int
+panthor_vm_bind_prepare_op_ctx(struct drm_file *file,
+ struct panthor_vm *vm,
+ const struct drm_panthor_vm_bind_op *op,
+ struct panthor_vm_op_ctx *op_ctx)
+{
+ struct drm_gem_object *gem;
+ int ret;
+
+ /* Aligned on page size. */
+ if ((op->va | op->size) & ~PAGE_MASK)
+ return -EINVAL;
+
+ switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {
+ case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
+ gem = drm_gem_object_lookup(file, op->bo_handle);
+ ret = panthor_vm_prepare_map_op_ctx(op_ctx, vm,
+ gem ? to_panthor_bo(gem) : NULL,
+ op->bo_offset,
+ op->size,
+ op->va,
+ op->flags);
+ drm_gem_object_put(gem);
+ return ret;
+
+ case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
+ if (op->flags & ~DRM_PANTHOR_VM_BIND_OP_TYPE_MASK)
+ return -EINVAL;
+
+ if (op->bo_handle || op->bo_offset)
+ return -EINVAL;
+
+ return panthor_vm_prepare_unmap_op_ctx(op_ctx, vm, op->va, op->size);
+
+ case DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY:
+ if (op->flags & ~DRM_PANTHOR_VM_BIND_OP_TYPE_MASK)
+ return -EINVAL;
+
+ if (op->bo_handle || op->bo_offset)
+ return -EINVAL;
+
+ if (op->va || op->size)
+ return -EINVAL;
+
+ if (!op->syncs.count)
+ return -EINVAL;
+
+ panthor_vm_prepare_sync_only_op_ctx(op_ctx, vm);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static void panthor_vm_bind_job_cleanup_op_ctx_work(struct work_struct *work)
+{
+ struct panthor_vm_bind_job *job =
+ container_of(work, struct panthor_vm_bind_job, cleanup_op_ctx_work);
+
+ panthor_vm_bind_job_put(&job->base);
+}
+
+/**
+ * panthor_vm_bind_job_create() - Create a VM_BIND job
+ * @file: File.
+ * @vm: VM targeted by the VM_BIND job.
+ * @op: VM operation data.
+ *
+ * Return: A valid pointer on success, an ERR_PTR() otherwise.
+ */
+struct drm_sched_job *
+panthor_vm_bind_job_create(struct drm_file *file,
+ struct panthor_vm *vm,
+ const struct drm_panthor_vm_bind_op *op)
+{
+ struct panthor_vm_bind_job *job;
+ int ret;
+
+ if (!vm)
+ return ERR_PTR(-EINVAL);
+
+ if (vm->destroyed || vm->unusable)
+ return ERR_PTR(-EINVAL);
+
+ job = kzalloc(sizeof(*job), GFP_KERNEL);
+ if (!job)
+ return ERR_PTR(-ENOMEM);
+
+ ret = panthor_vm_bind_prepare_op_ctx(file, vm, op, &job->ctx);
+ if (ret) {
+ kfree(job);
+ return ERR_PTR(ret);
+ }
+
+ INIT_WORK(&job->cleanup_op_ctx_work, panthor_vm_bind_job_cleanup_op_ctx_work);
+ kref_init(&job->refcount);
+ job->vm = panthor_vm_get(vm);
+
+ ret = drm_sched_job_init(&job->base, &vm->entity, 1, vm);
+ if (ret)
+ goto err_put_job;
+
+ return &job->base;
+
+err_put_job:
+ panthor_vm_bind_job_put(&job->base);
+ return ERR_PTR(ret);
+}
+
+/**
+ * panthor_vm_bind_job_prepare_resvs() - Prepare VM_BIND job dma_resvs
+ * @exec: The locking/preparation context.
+ * @sched_job: The job to prepare resvs on.
+ *
+ * Locks and prepare the VM resv.
+ *
+ * If this is a map operation, locks and prepares the GEM resv.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_vm_bind_job_prepare_resvs(struct drm_exec *exec,
+ struct drm_sched_job *sched_job)
+{
+ struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base);
+ int ret;
+
+ /* Acquire the VM lock an reserve a slot for this VM bind job. */
+ ret = drm_gpuvm_prepare_vm(&job->vm->base, exec, 1);
+ if (ret)
+ return ret;
+
+ if (job->ctx.map.vm_bo) {
+ /* Lock/prepare the GEM being mapped. */
+ ret = drm_exec_prepare_obj(exec, job->ctx.map.vm_bo->obj, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * panthor_vm_bind_job_update_resvs() - Update the resv objects touched by a job
+ * @exec: drm_exec context.
+ * @sched_job: Job to update the resvs on.
+ */
+void panthor_vm_bind_job_update_resvs(struct drm_exec *exec,
+ struct drm_sched_job *sched_job)
+{
+ struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base);
+
+ /* Explicit sync => we just register our job finished fence as bookkeep. */
+ drm_gpuvm_resv_add_fence(&job->vm->base, exec,
+ &sched_job->s_fence->finished,
+ DMA_RESV_USAGE_BOOKKEEP,
+ DMA_RESV_USAGE_BOOKKEEP);
+}
+
+void panthor_vm_update_resvs(struct panthor_vm *vm, struct drm_exec *exec,
+ struct dma_fence *fence,
+ enum dma_resv_usage private_usage,
+ enum dma_resv_usage extobj_usage)
+{
+ drm_gpuvm_resv_add_fence(&vm->base, exec, fence, private_usage, extobj_usage);
+}
+
+/**
+ * panthor_vm_bind_exec_sync_op() - Execute a VM_BIND operation synchronously.
+ * @file: File.
+ * @vm: VM targeted by the VM operation.
+ * @op: Data describing the VM operation.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_vm_bind_exec_sync_op(struct drm_file *file,
+ struct panthor_vm *vm,
+ struct drm_panthor_vm_bind_op *op)
+{
+ struct panthor_vm_op_ctx op_ctx;
+ int ret;
+
+ /* No sync objects allowed on synchronous operations. */
+ if (op->syncs.count)
+ return -EINVAL;
+
+ if (!op->size)
+ return 0;
+
+ ret = panthor_vm_bind_prepare_op_ctx(file, vm, op, &op_ctx);
+ if (ret)
+ return ret;
+
+ ret = panthor_vm_exec_op(vm, &op_ctx, false);
+ panthor_vm_cleanup_op_ctx(&op_ctx, vm);
+
+ return ret;
+}
+
+/**
+ * panthor_vm_map_bo_range() - Map a GEM object range to a VM
+ * @vm: VM to map the GEM to.
+ * @bo: GEM object to map.
+ * @offset: Offset in the GEM object.
+ * @size: Size to map.
+ * @va: Virtual address to map the object to.
+ * @flags: Combination of drm_panthor_vm_bind_op_flags flags.
+ * Only map-related flags are valid.
+ *
+ * Internal use only. For userspace requests, use
+ * panthor_vm_bind_exec_sync_op() instead.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_vm_map_bo_range(struct panthor_vm *vm, struct panthor_gem_object *bo,
+ u64 offset, u64 size, u64 va, u32 flags)
+{
+ struct panthor_vm_op_ctx op_ctx;
+ int ret;
+
+ ret = panthor_vm_prepare_map_op_ctx(&op_ctx, vm, bo, offset, size, va, flags);
+ if (ret)
+ return ret;
+
+ ret = panthor_vm_exec_op(vm, &op_ctx, false);
+ panthor_vm_cleanup_op_ctx(&op_ctx, vm);
+
+ return ret;
+}
+
+/**
+ * panthor_vm_unmap_range() - Unmap a portion of the VA space
+ * @vm: VM to unmap the region from.
+ * @va: Virtual address to unmap. Must be 4k aligned.
+ * @size: Size of the region to unmap. Must be 4k aligned.
+ *
+ * Internal use only. For userspace requests, use
+ * panthor_vm_bind_exec_sync_op() instead.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_vm_unmap_range(struct panthor_vm *vm, u64 va, u64 size)
+{
+ struct panthor_vm_op_ctx op_ctx;
+ int ret;
+
+ ret = panthor_vm_prepare_unmap_op_ctx(&op_ctx, vm, va, size);
+ if (ret)
+ return ret;
+
+ ret = panthor_vm_exec_op(vm, &op_ctx, false);
+ panthor_vm_cleanup_op_ctx(&op_ctx, vm);
+
+ return ret;
+}
+
+/**
+ * panthor_vm_prepare_mapped_bos_resvs() - Prepare resvs on VM BOs.
+ * @exec: Locking/preparation context.
+ * @vm: VM targeted by the GPU job.
+ * @slot_count: Number of slots to reserve.
+ *
+ * GPU jobs assume all BOs bound to the VM at the time the job is submitted
+ * are available when the job is executed. In order to guarantee that, we
+ * need to reserve a slot on all BOs mapped to a VM and update this slot with
+ * the job fence after its submission.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_vm_prepare_mapped_bos_resvs(struct drm_exec *exec, struct panthor_vm *vm,
+ u32 slot_count)
+{
+ int ret;
+
+ /* Acquire the VM lock and reserve a slot for this GPU job. */
+ ret = drm_gpuvm_prepare_vm(&vm->base, exec, slot_count);
+ if (ret)
+ return ret;
+
+ return drm_gpuvm_prepare_objects(&vm->base, exec, slot_count);
+}
+
+/**
+ * panthor_mmu_unplug() - Unplug the MMU logic
+ * @ptdev: Device.
+ *
+ * No access to the MMU regs should be done after this function is called.
+ * We suspend the IRQ and disable all VMs to guarantee that.
+ */
+void panthor_mmu_unplug(struct panthor_device *ptdev)
+{
+ panthor_mmu_irq_suspend(&ptdev->mmu->irq);
+
+ mutex_lock(&ptdev->mmu->as.slots_lock);
+ for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
+ struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm;
+
+ if (vm) {
+ drm_WARN_ON(&ptdev->base, panthor_mmu_as_disable(ptdev, i));
+ panthor_vm_release_as_locked(vm);
+ }
+ }
+ mutex_unlock(&ptdev->mmu->as.slots_lock);
+}
+
+static void panthor_mmu_release_wq(struct drm_device *ddev, void *res)
+{
+ destroy_workqueue(res);
+}
+
+/**
+ * panthor_mmu_init() - Initialize the MMU logic.
+ * @ptdev: Device.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_mmu_init(struct panthor_device *ptdev)
+{
+ u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
+ struct panthor_mmu *mmu;
+ int ret, irq;
+
+ mmu = drmm_kzalloc(&ptdev->base, sizeof(*mmu), GFP_KERNEL);
+ if (!mmu)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&mmu->as.lru_list);
+
+ ret = drmm_mutex_init(&ptdev->base, &mmu->as.slots_lock);
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&mmu->vm.list);
+ ret = drmm_mutex_init(&ptdev->base, &mmu->vm.lock);
+ if (ret)
+ return ret;
+
+ ptdev->mmu = mmu;
+
+ irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "mmu");
+ if (irq <= 0)
+ return -ENODEV;
+
+ ret = panthor_request_mmu_irq(ptdev, &mmu->irq, irq,
+ panthor_mmu_fault_mask(ptdev, ~0));
+ if (ret)
+ return ret;
+
+ mmu->vm.wq = alloc_workqueue("panthor-vm-bind", WQ_UNBOUND, 0);
+ if (!mmu->vm.wq)
+ return -ENOMEM;
+
+ /* On 32-bit kernels, the VA space is limited by the io_pgtable_ops abstraction,
+ * which passes iova as an unsigned long. Patch the mmu_features to reflect this
+ * limitation.
+ */
+ if (sizeof(unsigned long) * 8 < va_bits) {
+ ptdev->gpu_info.mmu_features &= ~GENMASK(7, 0);
+ ptdev->gpu_info.mmu_features |= sizeof(unsigned long) * 8;
+ }
+
+ return drmm_add_action_or_reset(&ptdev->base, panthor_mmu_release_wq, mmu->vm.wq);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int show_vm_gpuvas(struct panthor_vm *vm, struct seq_file *m)
+{
+ int ret;
+
+ mutex_lock(&vm->op_lock);
+ ret = drm_debugfs_gpuva_info(m, &vm->base);
+ mutex_unlock(&vm->op_lock);
+
+ return ret;
+}
+
+static int show_each_vm(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *ddev = node->minor->dev;
+ struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
+ int (*show)(struct panthor_vm *, struct seq_file *) = node->info_ent->data;
+ struct panthor_vm *vm;
+ int ret = 0;
+
+ mutex_lock(&ptdev->mmu->vm.lock);
+ list_for_each_entry(vm, &ptdev->mmu->vm.list, node) {
+ ret = show(vm, m);
+ if (ret < 0)
+ break;
+
+ seq_puts(m, "\n");
+ }
+ mutex_unlock(&ptdev->mmu->vm.lock);
+
+ return ret;
+}
+
+static struct drm_info_list panthor_mmu_debugfs_list[] = {
+ DRM_DEBUGFS_GPUVA_INFO(show_each_vm, show_vm_gpuvas),
+};
+
+/**
+ * panthor_mmu_debugfs_init() - Initialize MMU debugfs entries
+ * @minor: Minor.
+ */
+void panthor_mmu_debugfs_init(struct drm_minor *minor)
+{
+ drm_debugfs_create_files(panthor_mmu_debugfs_list,
+ ARRAY_SIZE(panthor_mmu_debugfs_list),
+ minor->debugfs_root, minor);
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * panthor_mmu_pt_cache_init() - Initialize the page table cache.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_mmu_pt_cache_init(void)
+{
+ pt_cache = kmem_cache_create("panthor-mmu-pt", SZ_4K, SZ_4K, 0, NULL);
+ if (!pt_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * panthor_mmu_pt_cache_fini() - Destroy the page table cache.
+ */
+void panthor_mmu_pt_cache_fini(void)
+{
+ kmem_cache_destroy(pt_cache);
+}
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.h b/drivers/gpu/drm/panthor/panthor_mmu.h
new file mode 100644
index 000000000000..f3c1ed19f973
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_mmu.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/* Copyright 2023 Collabora ltd. */
+
+#ifndef __PANTHOR_MMU_H__
+#define __PANTHOR_MMU_H__
+
+#include <linux/dma-resv.h>
+
+struct drm_exec;
+struct drm_sched_job;
+struct panthor_gem_object;
+struct panthor_heap_pool;
+struct panthor_vm;
+struct panthor_vma;
+struct panthor_mmu;
+
+int panthor_mmu_init(struct panthor_device *ptdev);
+void panthor_mmu_unplug(struct panthor_device *ptdev);
+void panthor_mmu_pre_reset(struct panthor_device *ptdev);
+void panthor_mmu_post_reset(struct panthor_device *ptdev);
+void panthor_mmu_suspend(struct panthor_device *ptdev);
+void panthor_mmu_resume(struct panthor_device *ptdev);
+
+int panthor_vm_map_bo_range(struct panthor_vm *vm, struct panthor_gem_object *bo,
+ u64 offset, u64 size, u64 va, u32 flags);
+int panthor_vm_unmap_range(struct panthor_vm *vm, u64 va, u64 size);
+struct panthor_gem_object *
+panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset);
+
+int panthor_vm_active(struct panthor_vm *vm);
+void panthor_vm_idle(struct panthor_vm *vm);
+int panthor_vm_as(struct panthor_vm *vm);
+
+struct panthor_heap_pool *
+panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create);
+
+struct panthor_vm *panthor_vm_get(struct panthor_vm *vm);
+void panthor_vm_put(struct panthor_vm *vm);
+struct panthor_vm *panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
+ u64 kernel_va_start, u64 kernel_va_size,
+ u64 kernel_auto_va_start,
+ u64 kernel_auto_va_size);
+
+int panthor_vm_prepare_mapped_bos_resvs(struct drm_exec *exec,
+ struct panthor_vm *vm,
+ u32 slot_count);
+int panthor_vm_add_bos_resvs_deps_to_job(struct panthor_vm *vm,
+ struct drm_sched_job *job);
+void panthor_vm_add_job_fence_to_bos_resvs(struct panthor_vm *vm,
+ struct drm_sched_job *job);
+
+struct dma_resv *panthor_vm_resv(struct panthor_vm *vm);
+struct drm_gem_object *panthor_vm_root_gem(struct panthor_vm *vm);
+
+void panthor_vm_pool_destroy(struct panthor_file *pfile);
+int panthor_vm_pool_create(struct panthor_file *pfile);
+int panthor_vm_pool_create_vm(struct panthor_device *ptdev,
+ struct panthor_vm_pool *pool,
+ struct drm_panthor_vm_create *args);
+int panthor_vm_pool_destroy_vm(struct panthor_vm_pool *pool, u32 handle);
+struct panthor_vm *panthor_vm_pool_get_vm(struct panthor_vm_pool *pool, u32 handle);
+
+bool panthor_vm_has_unhandled_faults(struct panthor_vm *vm);
+bool panthor_vm_is_unusable(struct panthor_vm *vm);
+
+/*
+ * PANTHOR_VM_KERNEL_AUTO_VA: Use this magic address when you want the GEM
+ * logic to auto-allocate the virtual address in the reserved kernel VA range.
+ */
+#define PANTHOR_VM_KERNEL_AUTO_VA ~0ull
+
+int panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size,
+ struct drm_mm_node *va_node);
+void panthor_vm_free_va(struct panthor_vm *vm, struct drm_mm_node *va_node);
+
+int panthor_vm_bind_exec_sync_op(struct drm_file *file,
+ struct panthor_vm *vm,
+ struct drm_panthor_vm_bind_op *op);
+
+struct drm_sched_job *
+panthor_vm_bind_job_create(struct drm_file *file,
+ struct panthor_vm *vm,
+ const struct drm_panthor_vm_bind_op *op);
+void panthor_vm_bind_job_put(struct drm_sched_job *job);
+int panthor_vm_bind_job_prepare_resvs(struct drm_exec *exec,
+ struct drm_sched_job *job);
+void panthor_vm_bind_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *job);
+
+void panthor_vm_update_resvs(struct panthor_vm *vm, struct drm_exec *exec,
+ struct dma_fence *fence,
+ enum dma_resv_usage private_usage,
+ enum dma_resv_usage extobj_usage);
+
+int panthor_mmu_pt_cache_init(void);
+void panthor_mmu_pt_cache_fini(void);
+
+#ifdef CONFIG_DEBUG_FS
+void panthor_mmu_debugfs_init(struct drm_minor *minor);
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/panthor/panthor_regs.h b/drivers/gpu/drm/panthor/panthor_regs.h
new file mode 100644
index 000000000000..b7b3b3add166
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_regs.h
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/* Copyright 2023 Collabora ltd. */
+/*
+ * Register definitions based on mali_kbase_gpu_regmap.h and
+ * mali_kbase_gpu_regmap_csf.h
+ * (C) COPYRIGHT 2010-2022 ARM Limited. All rights reserved.
+ */
+#ifndef __PANTHOR_REGS_H__
+#define __PANTHOR_REGS_H__
+
+#define GPU_ID 0x0
+#define GPU_ARCH_MAJOR(x) ((x) >> 28)
+#define GPU_ARCH_MINOR(x) (((x) & GENMASK(27, 24)) >> 24)
+#define GPU_ARCH_REV(x) (((x) & GENMASK(23, 20)) >> 20)
+#define GPU_PROD_MAJOR(x) (((x) & GENMASK(19, 16)) >> 16)
+#define GPU_VER_MAJOR(x) (((x) & GENMASK(15, 12)) >> 12)
+#define GPU_VER_MINOR(x) (((x) & GENMASK(11, 4)) >> 4)
+#define GPU_VER_STATUS(x) ((x) & GENMASK(3, 0))
+
+#define GPU_L2_FEATURES 0x4
+#define GPU_L2_FEATURES_LINE_SIZE(x) (1 << ((x) & GENMASK(7, 0)))
+
+#define GPU_CORE_FEATURES 0x8
+
+#define GPU_TILER_FEATURES 0xC
+#define GPU_MEM_FEATURES 0x10
+#define GROUPS_L2_COHERENT BIT(0)
+
+#define GPU_MMU_FEATURES 0x14
+#define GPU_MMU_FEATURES_VA_BITS(x) ((x) & GENMASK(7, 0))
+#define GPU_MMU_FEATURES_PA_BITS(x) (((x) >> 8) & GENMASK(7, 0))
+#define GPU_AS_PRESENT 0x18
+#define GPU_CSF_ID 0x1C
+
+#define GPU_INT_RAWSTAT 0x20
+#define GPU_INT_CLEAR 0x24
+#define GPU_INT_MASK 0x28
+#define GPU_INT_STAT 0x2c
+#define GPU_IRQ_FAULT BIT(0)
+#define GPU_IRQ_PROTM_FAULT BIT(1)
+#define GPU_IRQ_RESET_COMPLETED BIT(8)
+#define GPU_IRQ_POWER_CHANGED BIT(9)
+#define GPU_IRQ_POWER_CHANGED_ALL BIT(10)
+#define GPU_IRQ_CLEAN_CACHES_COMPLETED BIT(17)
+#define GPU_IRQ_DOORBELL_MIRROR BIT(18)
+#define GPU_IRQ_MCU_STATUS_CHANGED BIT(19)
+#define GPU_CMD 0x30
+#define GPU_CMD_DEF(type, payload) ((type) | ((payload) << 8))
+#define GPU_SOFT_RESET GPU_CMD_DEF(1, 1)
+#define GPU_HARD_RESET GPU_CMD_DEF(1, 2)
+#define CACHE_CLEAN BIT(0)
+#define CACHE_INV BIT(1)
+#define GPU_FLUSH_CACHES(l2, lsc, oth) \
+ GPU_CMD_DEF(4, ((l2) << 0) | ((lsc) << 4) | ((oth) << 8))
+
+#define GPU_STATUS 0x34
+#define GPU_STATUS_ACTIVE BIT(0)
+#define GPU_STATUS_PWR_ACTIVE BIT(1)
+#define GPU_STATUS_PAGE_FAULT BIT(4)
+#define GPU_STATUS_PROTM_ACTIVE BIT(7)
+#define GPU_STATUS_DBG_ENABLED BIT(8)
+
+#define GPU_FAULT_STATUS 0x3C
+#define GPU_FAULT_ADDR_LO 0x40
+#define GPU_FAULT_ADDR_HI 0x44
+
+#define GPU_PWR_KEY 0x50
+#define GPU_PWR_KEY_UNLOCK 0x2968A819
+#define GPU_PWR_OVERRIDE0 0x54
+#define GPU_PWR_OVERRIDE1 0x58
+
+#define GPU_TIMESTAMP_OFFSET_LO 0x88
+#define GPU_TIMESTAMP_OFFSET_HI 0x8C
+#define GPU_CYCLE_COUNT_LO 0x90
+#define GPU_CYCLE_COUNT_HI 0x94
+#define GPU_TIMESTAMP_LO 0x98
+#define GPU_TIMESTAMP_HI 0x9C
+
+#define GPU_THREAD_MAX_THREADS 0xA0
+#define GPU_THREAD_MAX_WORKGROUP_SIZE 0xA4
+#define GPU_THREAD_MAX_BARRIER_SIZE 0xA8
+#define GPU_THREAD_FEATURES 0xAC
+
+#define GPU_TEXTURE_FEATURES(n) (0xB0 + ((n) * 4))
+
+#define GPU_SHADER_PRESENT_LO 0x100
+#define GPU_SHADER_PRESENT_HI 0x104
+#define GPU_TILER_PRESENT_LO 0x110
+#define GPU_TILER_PRESENT_HI 0x114
+#define GPU_L2_PRESENT_LO 0x120
+#define GPU_L2_PRESENT_HI 0x124
+
+#define SHADER_READY_LO 0x140
+#define SHADER_READY_HI 0x144
+#define TILER_READY_LO 0x150
+#define TILER_READY_HI 0x154
+#define L2_READY_LO 0x160
+#define L2_READY_HI 0x164
+
+#define SHADER_PWRON_LO 0x180
+#define SHADER_PWRON_HI 0x184
+#define TILER_PWRON_LO 0x190
+#define TILER_PWRON_HI 0x194
+#define L2_PWRON_LO 0x1A0
+#define L2_PWRON_HI 0x1A4
+
+#define SHADER_PWROFF_LO 0x1C0
+#define SHADER_PWROFF_HI 0x1C4
+#define TILER_PWROFF_LO 0x1D0
+#define TILER_PWROFF_HI 0x1D4
+#define L2_PWROFF_LO 0x1E0
+#define L2_PWROFF_HI 0x1E4
+
+#define SHADER_PWRTRANS_LO 0x200
+#define SHADER_PWRTRANS_HI 0x204
+#define TILER_PWRTRANS_LO 0x210
+#define TILER_PWRTRANS_HI 0x214
+#define L2_PWRTRANS_LO 0x220
+#define L2_PWRTRANS_HI 0x224
+
+#define SHADER_PWRACTIVE_LO 0x240
+#define SHADER_PWRACTIVE_HI 0x244
+#define TILER_PWRACTIVE_LO 0x250
+#define TILER_PWRACTIVE_HI 0x254
+#define L2_PWRACTIVE_LO 0x260
+#define L2_PWRACTIVE_HI 0x264
+
+#define GPU_REVID 0x280
+
+#define GPU_COHERENCY_FEATURES 0x300
+#define GPU_COHERENCY_PROT_BIT(name) BIT(GPU_COHERENCY_ ## name)
+
+#define GPU_COHERENCY_PROTOCOL 0x304
+#define GPU_COHERENCY_ACE 0
+#define GPU_COHERENCY_ACE_LITE 1
+#define GPU_COHERENCY_NONE 31
+
+#define MCU_CONTROL 0x700
+#define MCU_CONTROL_ENABLE 1
+#define MCU_CONTROL_AUTO 2
+#define MCU_CONTROL_DISABLE 0
+
+#define MCU_STATUS 0x704
+#define MCU_STATUS_DISABLED 0
+#define MCU_STATUS_ENABLED 1
+#define MCU_STATUS_HALT 2
+#define MCU_STATUS_FATAL 3
+
+/* Job Control regs */
+#define JOB_INT_RAWSTAT 0x1000
+#define JOB_INT_CLEAR 0x1004
+#define JOB_INT_MASK 0x1008
+#define JOB_INT_STAT 0x100c
+#define JOB_INT_GLOBAL_IF BIT(31)
+#define JOB_INT_CSG_IF(x) BIT(x)
+
+/* MMU regs */
+#define MMU_INT_RAWSTAT 0x2000
+#define MMU_INT_CLEAR 0x2004
+#define MMU_INT_MASK 0x2008
+#define MMU_INT_STAT 0x200c
+
+/* AS_COMMAND register commands */
+
+#define MMU_BASE 0x2400
+#define MMU_AS_SHIFT 6
+#define MMU_AS(as) (MMU_BASE + ((as) << MMU_AS_SHIFT))
+
+#define AS_TRANSTAB_LO(as) (MMU_AS(as) + 0x0)
+#define AS_TRANSTAB_HI(as) (MMU_AS(as) + 0x4)
+#define AS_MEMATTR_LO(as) (MMU_AS(as) + 0x8)
+#define AS_MEMATTR_HI(as) (MMU_AS(as) + 0xC)
+#define AS_MEMATTR_AARCH64_INNER_ALLOC_IMPL (2 << 2)
+#define AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(w, r) ((3 << 2) | \
+ ((w) ? BIT(0) : 0) | \
+ ((r) ? BIT(1) : 0))
+#define AS_MEMATTR_AARCH64_SH_MIDGARD_INNER (0 << 4)
+#define AS_MEMATTR_AARCH64_SH_CPU_INNER (1 << 4)
+#define AS_MEMATTR_AARCH64_SH_CPU_INNER_SHADER_COH (2 << 4)
+#define AS_MEMATTR_AARCH64_SHARED (0 << 6)
+#define AS_MEMATTR_AARCH64_INNER_OUTER_NC (1 << 6)
+#define AS_MEMATTR_AARCH64_INNER_OUTER_WB (2 << 6)
+#define AS_MEMATTR_AARCH64_FAULT (3 << 6)
+#define AS_LOCKADDR_LO(as) (MMU_AS(as) + 0x10)
+#define AS_LOCKADDR_HI(as) (MMU_AS(as) + 0x14)
+#define AS_COMMAND(as) (MMU_AS(as) + 0x18)
+#define AS_COMMAND_NOP 0
+#define AS_COMMAND_UPDATE 1
+#define AS_COMMAND_LOCK 2
+#define AS_COMMAND_UNLOCK 3
+#define AS_COMMAND_FLUSH_PT 4
+#define AS_COMMAND_FLUSH_MEM 5
+#define AS_LOCK_REGION_MIN_SIZE (1ULL << 15)
+#define AS_FAULTSTATUS(as) (MMU_AS(as) + 0x1C)
+#define AS_FAULTSTATUS_ACCESS_TYPE_MASK (0x3 << 8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC (0x0 << 8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_EX (0x1 << 8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2 << 8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3 << 8)
+#define AS_FAULTADDRESS_LO(as) (MMU_AS(as) + 0x20)
+#define AS_FAULTADDRESS_HI(as) (MMU_AS(as) + 0x24)
+#define AS_STATUS(as) (MMU_AS(as) + 0x28)
+#define AS_STATUS_AS_ACTIVE BIT(0)
+#define AS_TRANSCFG_LO(as) (MMU_AS(as) + 0x30)
+#define AS_TRANSCFG_HI(as) (MMU_AS(as) + 0x34)
+#define AS_TRANSCFG_ADRMODE_UNMAPPED (1 << 0)
+#define AS_TRANSCFG_ADRMODE_IDENTITY (2 << 0)
+#define AS_TRANSCFG_ADRMODE_AARCH64_4K (6 << 0)
+#define AS_TRANSCFG_ADRMODE_AARCH64_64K (8 << 0)
+#define AS_TRANSCFG_INA_BITS(x) ((x) << 6)
+#define AS_TRANSCFG_OUTA_BITS(x) ((x) << 14)
+#define AS_TRANSCFG_SL_CONCAT BIT(22)
+#define AS_TRANSCFG_PTW_MEMATTR_NC (1 << 24)
+#define AS_TRANSCFG_PTW_MEMATTR_WB (2 << 24)
+#define AS_TRANSCFG_PTW_SH_NS (0 << 28)
+#define AS_TRANSCFG_PTW_SH_OS (2 << 28)
+#define AS_TRANSCFG_PTW_SH_IS (3 << 28)
+#define AS_TRANSCFG_PTW_RA BIT(30)
+#define AS_TRANSCFG_DISABLE_HIER_AP BIT(33)
+#define AS_TRANSCFG_DISABLE_AF_FAULT BIT(34)
+#define AS_TRANSCFG_WXN BIT(35)
+#define AS_TRANSCFG_XREADABLE BIT(36)
+#define AS_FAULTEXTRA_LO(as) (MMU_AS(as) + 0x38)
+#define AS_FAULTEXTRA_HI(as) (MMU_AS(as) + 0x3C)
+
+#define CSF_GPU_LATEST_FLUSH_ID 0x10000
+
+#define CSF_DOORBELL(i) (0x80000 + ((i) * 0x10000))
+#define CSF_GLB_DOORBELL_ID 0
+
+#define gpu_write(dev, reg, data) \
+ writel(data, (dev)->iomem + (reg))
+
+#define gpu_read(dev, reg) \
+ readl((dev)->iomem + (reg))
+
+#endif
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
new file mode 100644
index 000000000000..7f16a4a14e9a
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -0,0 +1,3528 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/* Copyright 2023 Collabora ltd. */
+
+#include <drm/drm_drv.h>
+#include <drm/drm_exec.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/gpu_scheduler.h>
+#include <drm/panthor_drm.h>
+
+#include <linux/build_bug.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-resv.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/iosys-map.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "panthor_devfreq.h"
+#include "panthor_device.h"
+#include "panthor_fw.h"
+#include "panthor_gem.h"
+#include "panthor_gpu.h"
+#include "panthor_heap.h"
+#include "panthor_mmu.h"
+#include "panthor_regs.h"
+#include "panthor_sched.h"
+
+/**
+ * DOC: Scheduler
+ *
+ * Mali CSF hardware adopts a firmware-assisted scheduling model, where
+ * the firmware takes care of scheduling aspects, to some extent.
+ *
+ * The scheduling happens at the scheduling group level, each group
+ * contains 1 to N queues (N is FW/hardware dependent, and exposed
+ * through the firmware interface). Each queue is assigned a command
+ * stream ring buffer, which serves as a way to get jobs submitted to
+ * the GPU, among other things.
+ *
+ * The firmware can schedule a maximum of M groups (M is FW/hardware
+ * dependent, and exposed through the firmware interface). Passed
+ * this maximum number of groups, the kernel must take care of
+ * rotating the groups passed to the firmware so every group gets
+ * a chance to have his queues scheduled for execution.
+ *
+ * The current implementation only supports with kernel-mode queues.
+ * In other terms, userspace doesn't have access to the ring-buffer.
+ * Instead, userspace passes indirect command stream buffers that are
+ * called from the queue ring-buffer by the kernel using a pre-defined
+ * sequence of command stream instructions to ensure the userspace driver
+ * always gets consistent results (cache maintenance,
+ * synchronization, ...).
+ *
+ * We rely on the drm_gpu_scheduler framework to deal with job
+ * dependencies and submission. As any other driver dealing with a
+ * FW-scheduler, we use the 1:1 entity:scheduler mode, such that each
+ * entity has its own job scheduler. When a job is ready to be executed
+ * (all its dependencies are met), it is pushed to the appropriate
+ * queue ring-buffer, and the group is scheduled for execution if it
+ * wasn't already active.
+ *
+ * Kernel-side group scheduling is timeslice-based. When we have less
+ * groups than there are slots, the periodic tick is disabled and we
+ * just let the FW schedule the active groups. When there are more
+ * groups than slots, we let each group a chance to execute stuff for
+ * a given amount of time, and then re-evaluate and pick new groups
+ * to schedule. The group selection algorithm is based on
+ * priority+round-robin.
+ *
+ * Even though user-mode queues is out of the scope right now, the
+ * current design takes them into account by avoiding any guess on the
+ * group/queue state that would be based on information we wouldn't have
+ * if userspace was in charge of the ring-buffer. That's also one of the
+ * reason we don't do 'cooperative' scheduling (encoding FW group slot
+ * reservation as dma_fence that would be returned from the
+ * drm_gpu_scheduler::prepare_job() hook, and treating group rotation as
+ * a queue of waiters, ordered by job submission order). This approach
+ * would work for kernel-mode queues, but would make user-mode queues a
+ * lot more complicated to retrofit.
+ */
+
+#define JOB_TIMEOUT_MS 5000
+
+#define MIN_CS_PER_CSG 8
+
+#define MIN_CSGS 3
+#define MAX_CSG_PRIO 0xf
+
+struct panthor_group;
+
+/**
+ * struct panthor_csg_slot - Command stream group slot
+ *
+ * This represents a FW slot for a scheduling group.
+ */
+struct panthor_csg_slot {
+ /** @group: Scheduling group bound to this slot. */
+ struct panthor_group *group;
+
+ /** @priority: Group priority. */
+ u8 priority;
+
+ /**
+ * @idle: True if the group bound to this slot is idle.
+ *
+ * A group is idle when it has nothing waiting for execution on
+ * all its queues, or when queues are blocked waiting for something
+ * to happen (synchronization object).
+ */
+ bool idle;
+};
+
+/**
+ * enum panthor_csg_priority - Group priority
+ */
+enum panthor_csg_priority {
+ /** @PANTHOR_CSG_PRIORITY_LOW: Low priority group. */
+ PANTHOR_CSG_PRIORITY_LOW = 0,
+
+ /** @PANTHOR_CSG_PRIORITY_MEDIUM: Medium priority group. */
+ PANTHOR_CSG_PRIORITY_MEDIUM,
+
+ /** @PANTHOR_CSG_PRIORITY_HIGH: High priority group. */
+ PANTHOR_CSG_PRIORITY_HIGH,
+
+ /**
+ * @PANTHOR_CSG_PRIORITY_RT: Real-time priority group.
+ *
+ * Real-time priority allows one to preempt scheduling of other
+ * non-real-time groups. When such a group becomes executable,
+ * it will evict the group with the lowest non-rt priority if
+ * there's no free group slot available.
+ *
+ * Currently not exposed to userspace.
+ */
+ PANTHOR_CSG_PRIORITY_RT,
+
+ /** @PANTHOR_CSG_PRIORITY_COUNT: Number of priority levels. */
+ PANTHOR_CSG_PRIORITY_COUNT,
+};
+
+/**
+ * struct panthor_scheduler - Object used to manage the scheduler
+ */
+struct panthor_scheduler {
+ /** @ptdev: Device. */
+ struct panthor_device *ptdev;
+
+ /**
+ * @wq: Workqueue used by our internal scheduler logic and
+ * drm_gpu_scheduler.
+ *
+ * Used for the scheduler tick, group update or other kind of FW
+ * event processing that can't be handled in the threaded interrupt
+ * path. Also passed to the drm_gpu_scheduler instances embedded
+ * in panthor_queue.
+ */
+ struct workqueue_struct *wq;
+
+ /**
+ * @heap_alloc_wq: Workqueue used to schedule tiler_oom works.
+ *
+ * We have a queue dedicated to heap chunk allocation works to avoid
+ * blocking the rest of the scheduler if the allocation tries to
+ * reclaim memory.
+ */
+ struct workqueue_struct *heap_alloc_wq;
+
+ /** @tick_work: Work executed on a scheduling tick. */
+ struct delayed_work tick_work;
+
+ /**
+ * @sync_upd_work: Work used to process synchronization object updates.
+ *
+ * We use this work to unblock queues/groups that were waiting on a
+ * synchronization object.
+ */
+ struct work_struct sync_upd_work;
+
+ /**
+ * @fw_events_work: Work used to process FW events outside the interrupt path.
+ *
+ * Even if the interrupt is threaded, we need any event processing
+ * that require taking the panthor_scheduler::lock to be processed
+ * outside the interrupt path so we don't block the tick logic when
+ * it calls panthor_fw_{csg,wait}_wait_acks(). Since most of the
+ * event processing requires taking this lock, we just delegate all
+ * FW event processing to the scheduler workqueue.
+ */
+ struct work_struct fw_events_work;
+
+ /**
+ * @fw_events: Bitmask encoding pending FW events.
+ */
+ atomic_t fw_events;
+
+ /**
+ * @resched_target: When the next tick should occur.
+ *
+ * Expressed in jiffies.
+ */
+ u64 resched_target;
+
+ /**
+ * @last_tick: When the last tick occurred.
+ *
+ * Expressed in jiffies.
+ */
+ u64 last_tick;
+
+ /** @tick_period: Tick period in jiffies. */
+ u64 tick_period;
+
+ /**
+ * @lock: Lock protecting access to all the scheduler fields.
+ *
+ * Should be taken in the tick work, the irq handler, and anywhere the @groups
+ * fields are touched.
+ */
+ struct mutex lock;
+
+ /** @groups: Various lists used to classify groups. */
+ struct {
+ /**
+ * @runnable: Runnable group lists.
+ *
+ * When a group has queues that want to execute something,
+ * its panthor_group::run_node should be inserted here.
+ *
+ * One list per-priority.
+ */
+ struct list_head runnable[PANTHOR_CSG_PRIORITY_COUNT];
+
+ /**
+ * @idle: Idle group lists.
+ *
+ * When all queues of a group are idle (either because they
+ * have nothing to execute, or because they are blocked), the
+ * panthor_group::run_node field should be inserted here.
+ *
+ * One list per-priority.
+ */
+ struct list_head idle[PANTHOR_CSG_PRIORITY_COUNT];
+
+ /**
+ * @waiting: List of groups whose queues are blocked on a
+ * synchronization object.
+ *
+ * Insert panthor_group::wait_node here when a group is waiting
+ * for synchronization objects to be signaled.
+ *
+ * This list is evaluated in the @sync_upd_work work.
+ */
+ struct list_head waiting;
+ } groups;
+
+ /**
+ * @csg_slots: FW command stream group slots.
+ */
+ struct panthor_csg_slot csg_slots[MAX_CSGS];
+
+ /** @csg_slot_count: Number of command stream group slots exposed by the FW. */
+ u32 csg_slot_count;
+
+ /** @cs_slot_count: Number of command stream slot per group slot exposed by the FW. */
+ u32 cs_slot_count;
+
+ /** @as_slot_count: Number of address space slots supported by the MMU. */
+ u32 as_slot_count;
+
+ /** @used_csg_slot_count: Number of command stream group slot currently used. */
+ u32 used_csg_slot_count;
+
+ /** @sb_slot_count: Number of scoreboard slots. */
+ u32 sb_slot_count;
+
+ /**
+ * @might_have_idle_groups: True if an active group might have become idle.
+ *
+ * This will force a tick, so other runnable groups can be scheduled if one
+ * or more active groups became idle.
+ */
+ bool might_have_idle_groups;
+
+ /** @pm: Power management related fields. */
+ struct {
+ /** @has_ref: True if the scheduler owns a runtime PM reference. */
+ bool has_ref;
+ } pm;
+
+ /** @reset: Reset related fields. */
+ struct {
+ /** @lock: Lock protecting the other reset fields. */
+ struct mutex lock;
+
+ /**
+ * @in_progress: True if a reset is in progress.
+ *
+ * Set to true in panthor_sched_pre_reset() and back to false in
+ * panthor_sched_post_reset().
+ */
+ atomic_t in_progress;
+
+ /**
+ * @stopped_groups: List containing all groups that were stopped
+ * before a reset.
+ *
+ * Insert panthor_group::run_node in the pre_reset path.
+ */
+ struct list_head stopped_groups;
+ } reset;
+};
+
+/**
+ * struct panthor_syncobj_32b - 32-bit FW synchronization object
+ */
+struct panthor_syncobj_32b {
+ /** @seqno: Sequence number. */
+ u32 seqno;
+
+ /**
+ * @status: Status.
+ *
+ * Not zero on failure.
+ */
+ u32 status;
+};
+
+/**
+ * struct panthor_syncobj_64b - 64-bit FW synchronization object
+ */
+struct panthor_syncobj_64b {
+ /** @seqno: Sequence number. */
+ u64 seqno;
+
+ /**
+ * @status: Status.
+ *
+ * Not zero on failure.
+ */
+ u32 status;
+
+ /** @pad: MBZ. */
+ u32 pad;
+};
+
+/**
+ * struct panthor_queue - Execution queue
+ */
+struct panthor_queue {
+ /** @scheduler: DRM scheduler used for this queue. */
+ struct drm_gpu_scheduler scheduler;
+
+ /** @entity: DRM scheduling entity used for this queue. */
+ struct drm_sched_entity entity;
+
+ /**
+ * @remaining_time: Time remaining before the job timeout expires.
+ *
+ * The job timeout is suspended when the queue is not scheduled by the
+ * FW. Every time we suspend the timer, we need to save the remaining
+ * time so we can restore it later on.
+ */
+ unsigned long remaining_time;
+
+ /** @timeout_suspended: True if the job timeout was suspended. */
+ bool timeout_suspended;
+
+ /**
+ * @doorbell_id: Doorbell assigned to this queue.
+ *
+ * Right now, all groups share the same doorbell, and the doorbell ID
+ * is assigned to group_slot + 1 when the group is assigned a slot. But
+ * we might decide to provide fine grained doorbell assignment at some
+ * point, so don't have to wake up all queues in a group every time one
+ * of them is updated.
+ */
+ u8 doorbell_id;
+
+ /**
+ * @priority: Priority of the queue inside the group.
+ *
+ * Must be less than 16 (Only 4 bits available).
+ */
+ u8 priority;
+#define CSF_MAX_QUEUE_PRIO GENMASK(3, 0)
+
+ /** @ringbuf: Command stream ring-buffer. */
+ struct panthor_kernel_bo *ringbuf;
+
+ /** @iface: Firmware interface. */
+ struct {
+ /** @mem: FW memory allocated for this interface. */
+ struct panthor_kernel_bo *mem;
+
+ /** @input: Input interface. */
+ struct panthor_fw_ringbuf_input_iface *input;
+
+ /** @output: Output interface. */
+ const struct panthor_fw_ringbuf_output_iface *output;
+
+ /** @input_fw_va: FW virtual address of the input interface buffer. */
+ u32 input_fw_va;
+
+ /** @output_fw_va: FW virtual address of the output interface buffer. */
+ u32 output_fw_va;
+ } iface;
+
+ /**
+ * @syncwait: Stores information about the synchronization object this
+ * queue is waiting on.
+ */
+ struct {
+ /** @gpu_va: GPU address of the synchronization object. */
+ u64 gpu_va;
+
+ /** @ref: Reference value to compare against. */
+ u64 ref;
+
+ /** @gt: True if this is a greater-than test. */
+ bool gt;
+
+ /** @sync64: True if this is a 64-bit sync object. */
+ bool sync64;
+
+ /** @bo: Buffer object holding the synchronization object. */
+ struct drm_gem_object *obj;
+
+ /** @offset: Offset of the synchronization object inside @bo. */
+ u64 offset;
+
+ /**
+ * @kmap: Kernel mapping of the buffer object holding the
+ * synchronization object.
+ */
+ void *kmap;
+ } syncwait;
+
+ /** @fence_ctx: Fence context fields. */
+ struct {
+ /** @lock: Used to protect access to all fences allocated by this context. */
+ spinlock_t lock;
+
+ /**
+ * @id: Fence context ID.
+ *
+ * Allocated with dma_fence_context_alloc().
+ */
+ u64 id;
+
+ /** @seqno: Sequence number of the last initialized fence. */
+ atomic64_t seqno;
+
+ /**
+ * @in_flight_jobs: List containing all in-flight jobs.
+ *
+ * Used to keep track and signal panthor_job::done_fence when the
+ * synchronization object attached to the queue is signaled.
+ */
+ struct list_head in_flight_jobs;
+ } fence_ctx;
+};
+
+/**
+ * enum panthor_group_state - Scheduling group state.
+ */
+enum panthor_group_state {
+ /** @PANTHOR_CS_GROUP_CREATED: Group was created, but not scheduled yet. */
+ PANTHOR_CS_GROUP_CREATED,
+
+ /** @PANTHOR_CS_GROUP_ACTIVE: Group is currently scheduled. */
+ PANTHOR_CS_GROUP_ACTIVE,
+
+ /**
+ * @PANTHOR_CS_GROUP_SUSPENDED: Group was scheduled at least once, but is
+ * inactive/suspended right now.
+ */
+ PANTHOR_CS_GROUP_SUSPENDED,
+
+ /**
+ * @PANTHOR_CS_GROUP_TERMINATED: Group was terminated.
+ *
+ * Can no longer be scheduled. The only allowed action is a destruction.
+ */
+ PANTHOR_CS_GROUP_TERMINATED,
+
+ /**
+ * @PANTHOR_CS_GROUP_UNKNOWN_STATE: Group is an unknown state.
+ *
+ * The FW returned an inconsistent state. The group is flagged unusable
+ * and can no longer be scheduled. The only allowed action is a
+ * destruction.
+ *
+ * When that happens, we also schedule a FW reset, to start from a fresh
+ * state.
+ */
+ PANTHOR_CS_GROUP_UNKNOWN_STATE,
+};
+
+/**
+ * struct panthor_group - Scheduling group object
+ */
+struct panthor_group {
+ /** @refcount: Reference count */
+ struct kref refcount;
+
+ /** @ptdev: Device. */
+ struct panthor_device *ptdev;
+
+ /** @vm: VM bound to the group. */
+ struct panthor_vm *vm;
+
+ /** @compute_core_mask: Mask of shader cores that can be used for compute jobs. */
+ u64 compute_core_mask;
+
+ /** @fragment_core_mask: Mask of shader cores that can be used for fragment jobs. */
+ u64 fragment_core_mask;
+
+ /** @tiler_core_mask: Mask of tiler cores that can be used for tiler jobs. */
+ u64 tiler_core_mask;
+
+ /** @max_compute_cores: Maximum number of shader cores used for compute jobs. */
+ u8 max_compute_cores;
+
+ /** @max_fragment_cores: Maximum number of shader cores used for fragment jobs. */
+ u8 max_fragment_cores;
+
+ /** @max_tiler_cores: Maximum number of tiler cores used for tiler jobs. */
+ u8 max_tiler_cores;
+
+ /** @priority: Group priority (check panthor_csg_priority). */
+ u8 priority;
+
+ /** @blocked_queues: Bitmask reflecting the blocked queues. */
+ u32 blocked_queues;
+
+ /** @idle_queues: Bitmask reflecting the idle queues. */
+ u32 idle_queues;
+
+ /** @fatal_lock: Lock used to protect access to fatal fields. */
+ spinlock_t fatal_lock;
+
+ /** @fatal_queues: Bitmask reflecting the queues that hit a fatal exception. */
+ u32 fatal_queues;
+
+ /** @tiler_oom: Mask of queues that have a tiler OOM event to process. */
+ atomic_t tiler_oom;
+
+ /** @queue_count: Number of queues in this group. */
+ u32 queue_count;
+
+ /** @queues: Queues owned by this group. */
+ struct panthor_queue *queues[MAX_CS_PER_CSG];
+
+ /**
+ * @csg_id: ID of the FW group slot.
+ *
+ * -1 when the group is not scheduled/active.
+ */
+ int csg_id;
+
+ /**
+ * @destroyed: True when the group has been destroyed.
+ *
+ * If a group is destroyed it becomes useless: no further jobs can be submitted
+ * to its queues. We simply wait for all references to be dropped so we can
+ * release the group object.
+ */
+ bool destroyed;
+
+ /**
+ * @timedout: True when a timeout occurred on any of the queues owned by
+ * this group.
+ *
+ * Timeouts can be reported by drm_sched or by the FW. In any case, any
+ * timeout situation is unrecoverable, and the group becomes useless.
+ * We simply wait for all references to be dropped so we can release the
+ * group object.
+ */
+ bool timedout;
+
+ /**
+ * @syncobjs: Pool of per-queue synchronization objects.
+ *
+ * One sync object per queue. The position of the sync object is
+ * determined by the queue index.
+ */
+ struct panthor_kernel_bo *syncobjs;
+
+ /** @state: Group state. */
+ enum panthor_group_state state;
+
+ /**
+ * @suspend_buf: Suspend buffer.
+ *
+ * Stores the state of the group and its queues when a group is suspended.
+ * Used at resume time to restore the group in its previous state.
+ *
+ * The size of the suspend buffer is exposed through the FW interface.
+ */
+ struct panthor_kernel_bo *suspend_buf;
+
+ /**
+ * @protm_suspend_buf: Protection mode suspend buffer.
+ *
+ * Stores the state of the group and its queues when a group that's in
+ * protection mode is suspended.
+ *
+ * Used at resume time to restore the group in its previous state.
+ *
+ * The size of the protection mode suspend buffer is exposed through the
+ * FW interface.
+ */
+ struct panthor_kernel_bo *protm_suspend_buf;
+
+ /** @sync_upd_work: Work used to check/signal job fences. */
+ struct work_struct sync_upd_work;
+
+ /** @tiler_oom_work: Work used to process tiler OOM events happening on this group. */
+ struct work_struct tiler_oom_work;
+
+ /** @term_work: Work used to finish the group termination procedure. */
+ struct work_struct term_work;
+
+ /**
+ * @release_work: Work used to release group resources.
+ *
+ * We need to postpone the group release to avoid a deadlock when
+ * the last ref is released in the tick work.
+ */
+ struct work_struct release_work;
+
+ /**
+ * @run_node: Node used to insert the group in the
+ * panthor_group::groups::{runnable,idle} and
+ * panthor_group::reset.stopped_groups lists.
+ */
+ struct list_head run_node;
+
+ /**
+ * @wait_node: Node used to insert the group in the
+ * panthor_group::groups::waiting list.
+ */
+ struct list_head wait_node;
+};
+
+/**
+ * group_queue_work() - Queue a group work
+ * @group: Group to queue the work for.
+ * @wname: Work name.
+ *
+ * Grabs a ref and queue a work item to the scheduler workqueue. If
+ * the work was already queued, we release the reference we grabbed.
+ *
+ * Work callbacks must release the reference we grabbed here.
+ */
+#define group_queue_work(group, wname) \
+ do { \
+ group_get(group); \
+ if (!queue_work((group)->ptdev->scheduler->wq, &(group)->wname ## _work)) \
+ group_put(group); \
+ } while (0)
+
+/**
+ * sched_queue_work() - Queue a scheduler work.
+ * @sched: Scheduler object.
+ * @wname: Work name.
+ *
+ * Conditionally queues a scheduler work if no reset is pending/in-progress.
+ */
+#define sched_queue_work(sched, wname) \
+ do { \
+ if (!atomic_read(&(sched)->reset.in_progress) && \
+ !panthor_device_reset_is_pending((sched)->ptdev)) \
+ queue_work((sched)->wq, &(sched)->wname ## _work); \
+ } while (0)
+
+/**
+ * sched_queue_delayed_work() - Queue a scheduler delayed work.
+ * @sched: Scheduler object.
+ * @wname: Work name.
+ * @delay: Work delay in jiffies.
+ *
+ * Conditionally queues a scheduler delayed work if no reset is
+ * pending/in-progress.
+ */
+#define sched_queue_delayed_work(sched, wname, delay) \
+ do { \
+ if (!atomic_read(&sched->reset.in_progress) && \
+ !panthor_device_reset_is_pending((sched)->ptdev)) \
+ mod_delayed_work((sched)->wq, &(sched)->wname ## _work, delay); \
+ } while (0)
+
+/*
+ * We currently set the maximum of groups per file to an arbitrary low value.
+ * But this can be updated if we need more.
+ */
+#define MAX_GROUPS_PER_POOL 128
+
+/**
+ * struct panthor_group_pool - Group pool
+ *
+ * Each file get assigned a group pool.
+ */
+struct panthor_group_pool {
+ /** @xa: Xarray used to manage group handles. */
+ struct xarray xa;
+};
+
+/**
+ * struct panthor_job - Used to manage GPU job
+ */
+struct panthor_job {
+ /** @base: Inherit from drm_sched_job. */
+ struct drm_sched_job base;
+
+ /** @refcount: Reference count. */
+ struct kref refcount;
+
+ /** @group: Group of the queue this job will be pushed to. */
+ struct panthor_group *group;
+
+ /** @queue_idx: Index of the queue inside @group. */
+ u32 queue_idx;
+
+ /** @call_info: Information about the userspace command stream call. */
+ struct {
+ /** @start: GPU address of the userspace command stream. */
+ u64 start;
+
+ /** @size: Size of the userspace command stream. */
+ u32 size;
+
+ /**
+ * @latest_flush: Flush ID at the time the userspace command
+ * stream was built.
+ *
+ * Needed for the flush reduction mechanism.
+ */
+ u32 latest_flush;
+ } call_info;
+
+ /** @ringbuf: Position of this job is in the ring buffer. */
+ struct {
+ /** @start: Start offset. */
+ u64 start;
+
+ /** @end: End offset. */
+ u64 end;
+ } ringbuf;
+
+ /**
+ * @node: Used to insert the job in the panthor_queue::fence_ctx::in_flight_jobs
+ * list.
+ */
+ struct list_head node;
+
+ /** @done_fence: Fence signaled when the job is finished or cancelled. */
+ struct dma_fence *done_fence;
+};
+
+static void
+panthor_queue_put_syncwait_obj(struct panthor_queue *queue)
+{
+ if (queue->syncwait.kmap) {
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(queue->syncwait.kmap);
+
+ drm_gem_vunmap_unlocked(queue->syncwait.obj, &map);
+ queue->syncwait.kmap = NULL;
+ }
+
+ drm_gem_object_put(queue->syncwait.obj);
+ queue->syncwait.obj = NULL;
+}
+
+static void *
+panthor_queue_get_syncwait_obj(struct panthor_group *group, struct panthor_queue *queue)
+{
+ struct panthor_device *ptdev = group->ptdev;
+ struct panthor_gem_object *bo;
+ struct iosys_map map;
+ int ret;
+
+ if (queue->syncwait.kmap)
+ return queue->syncwait.kmap + queue->syncwait.offset;
+
+ bo = panthor_vm_get_bo_for_va(group->vm,
+ queue->syncwait.gpu_va,
+ &queue->syncwait.offset);
+ if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(bo)))
+ goto err_put_syncwait_obj;
+
+ queue->syncwait.obj = &bo->base.base;
+ ret = drm_gem_vmap_unlocked(queue->syncwait.obj, &map);
+ if (drm_WARN_ON(&ptdev->base, ret))
+ goto err_put_syncwait_obj;
+
+ queue->syncwait.kmap = map.vaddr;
+ if (drm_WARN_ON(&ptdev->base, !queue->syncwait.kmap))
+ goto err_put_syncwait_obj;
+
+ return queue->syncwait.kmap + queue->syncwait.offset;
+
+err_put_syncwait_obj:
+ panthor_queue_put_syncwait_obj(queue);
+ return NULL;
+}
+
+static void group_free_queue(struct panthor_group *group, struct panthor_queue *queue)
+{
+ if (IS_ERR_OR_NULL(queue))
+ return;
+
+ if (queue->entity.fence_context)
+ drm_sched_entity_destroy(&queue->entity);
+
+ if (queue->scheduler.ops)
+ drm_sched_fini(&queue->scheduler);
+
+ panthor_queue_put_syncwait_obj(queue);
+
+ panthor_kernel_bo_destroy(group->vm, queue->ringbuf);
+ panthor_kernel_bo_destroy(panthor_fw_vm(group->ptdev), queue->iface.mem);
+
+ kfree(queue);
+}
+
+static void group_release_work(struct work_struct *work)
+{
+ struct panthor_group *group = container_of(work,
+ struct panthor_group,
+ release_work);
+ struct panthor_device *ptdev = group->ptdev;
+ u32 i;
+
+ for (i = 0; i < group->queue_count; i++)
+ group_free_queue(group, group->queues[i]);
+
+ panthor_kernel_bo_destroy(panthor_fw_vm(ptdev), group->suspend_buf);
+ panthor_kernel_bo_destroy(panthor_fw_vm(ptdev), group->protm_suspend_buf);
+ panthor_kernel_bo_destroy(group->vm, group->syncobjs);
+
+ panthor_vm_put(group->vm);
+ kfree(group);
+}
+
+static void group_release(struct kref *kref)
+{
+ struct panthor_group *group = container_of(kref,
+ struct panthor_group,
+ refcount);
+ struct panthor_device *ptdev = group->ptdev;
+
+ drm_WARN_ON(&ptdev->base, group->csg_id >= 0);
+ drm_WARN_ON(&ptdev->base, !list_empty(&group->run_node));
+ drm_WARN_ON(&ptdev->base, !list_empty(&group->wait_node));
+
+ queue_work(panthor_cleanup_wq, &group->release_work);
+}
+
+static void group_put(struct panthor_group *group)
+{
+ if (group)
+ kref_put(&group->refcount, group_release);
+}
+
+static struct panthor_group *
+group_get(struct panthor_group *group)
+{
+ if (group)
+ kref_get(&group->refcount);
+
+ return group;
+}
+
+/**
+ * group_bind_locked() - Bind a group to a group slot
+ * @group: Group.
+ * @csg_id: Slot.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int
+group_bind_locked(struct panthor_group *group, u32 csg_id)
+{
+ struct panthor_device *ptdev = group->ptdev;
+ struct panthor_csg_slot *csg_slot;
+ int ret;
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ if (drm_WARN_ON(&ptdev->base, group->csg_id != -1 || csg_id >= MAX_CSGS ||
+ ptdev->scheduler->csg_slots[csg_id].group))
+ return -EINVAL;
+
+ ret = panthor_vm_active(group->vm);
+ if (ret)
+ return ret;
+
+ csg_slot = &ptdev->scheduler->csg_slots[csg_id];
+ group_get(group);
+ group->csg_id = csg_id;
+
+ /* Dummy doorbell allocation: doorbell is assigned to the group and
+ * all queues use the same doorbell.
+ *
+ * TODO: Implement LRU-based doorbell assignment, so the most often
+ * updated queues get their own doorbell, thus avoiding useless checks
+ * on queues belonging to the same group that are rarely updated.
+ */
+ for (u32 i = 0; i < group->queue_count; i++)
+ group->queues[i]->doorbell_id = csg_id + 1;
+
+ csg_slot->group = group;
+
+ return 0;
+}
+
+/**
+ * group_unbind_locked() - Unbind a group from a slot.
+ * @group: Group to unbind.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int
+group_unbind_locked(struct panthor_group *group)
+{
+ struct panthor_device *ptdev = group->ptdev;
+ struct panthor_csg_slot *slot;
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ if (drm_WARN_ON(&ptdev->base, group->csg_id < 0 || group->csg_id >= MAX_CSGS))
+ return -EINVAL;
+
+ if (drm_WARN_ON(&ptdev->base, group->state == PANTHOR_CS_GROUP_ACTIVE))
+ return -EINVAL;
+
+ slot = &ptdev->scheduler->csg_slots[group->csg_id];
+ panthor_vm_idle(group->vm);
+ group->csg_id = -1;
+
+ /* Tiler OOM events will be re-issued next time the group is scheduled. */
+ atomic_set(&group->tiler_oom, 0);
+ cancel_work(&group->tiler_oom_work);
+
+ for (u32 i = 0; i < group->queue_count; i++)
+ group->queues[i]->doorbell_id = -1;
+
+ slot->group = NULL;
+
+ group_put(group);
+ return 0;
+}
+
+/**
+ * cs_slot_prog_locked() - Program a queue slot
+ * @ptdev: Device.
+ * @csg_id: Group slot ID.
+ * @cs_id: Queue slot ID.
+ *
+ * Program a queue slot with the queue information so things can start being
+ * executed on this queue.
+ *
+ * The group slot must have a group bound to it already (group_bind_locked()).
+ */
+static void
+cs_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
+{
+ struct panthor_queue *queue = ptdev->scheduler->csg_slots[csg_id].group->queues[cs_id];
+ struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ queue->iface.input->extract = queue->iface.output->extract;
+ drm_WARN_ON(&ptdev->base, queue->iface.input->insert < queue->iface.input->extract);
+
+ cs_iface->input->ringbuf_base = panthor_kernel_bo_gpuva(queue->ringbuf);
+ cs_iface->input->ringbuf_size = panthor_kernel_bo_size(queue->ringbuf);
+ cs_iface->input->ringbuf_input = queue->iface.input_fw_va;
+ cs_iface->input->ringbuf_output = queue->iface.output_fw_va;
+ cs_iface->input->config = CS_CONFIG_PRIORITY(queue->priority) |
+ CS_CONFIG_DOORBELL(queue->doorbell_id);
+ cs_iface->input->ack_irq_mask = ~0;
+ panthor_fw_update_reqs(cs_iface, req,
+ CS_IDLE_SYNC_WAIT |
+ CS_IDLE_EMPTY |
+ CS_STATE_START |
+ CS_EXTRACT_EVENT,
+ CS_IDLE_SYNC_WAIT |
+ CS_IDLE_EMPTY |
+ CS_STATE_MASK |
+ CS_EXTRACT_EVENT);
+ if (queue->iface.input->insert != queue->iface.input->extract && queue->timeout_suspended) {
+ drm_sched_resume_timeout(&queue->scheduler, queue->remaining_time);
+ queue->timeout_suspended = false;
+ }
+}
+
+/**
+ * cs_slot_reset_locked() - Reset a queue slot
+ * @ptdev: Device.
+ * @csg_id: Group slot.
+ * @cs_id: Queue slot.
+ *
+ * Change the queue slot state to STOP and suspend the queue timeout if
+ * the queue is not blocked.
+ *
+ * The group slot must have a group bound to it (group_bind_locked()).
+ */
+static int
+cs_slot_reset_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
+{
+ struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
+ struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group;
+ struct panthor_queue *queue = group->queues[cs_id];
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ panthor_fw_update_reqs(cs_iface, req,
+ CS_STATE_STOP,
+ CS_STATE_MASK);
+
+ /* If the queue is blocked, we want to keep the timeout running, so
+ * we can detect unbounded waits and kill the group when that happens.
+ */
+ if (!(group->blocked_queues & BIT(cs_id)) && !queue->timeout_suspended) {
+ queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler);
+ queue->timeout_suspended = true;
+ WARN_ON(queue->remaining_time > msecs_to_jiffies(JOB_TIMEOUT_MS));
+ }
+
+ return 0;
+}
+
+/**
+ * csg_slot_sync_priority_locked() - Synchronize the group slot priority
+ * @ptdev: Device.
+ * @csg_id: Group slot ID.
+ *
+ * Group slot priority update happens asynchronously. When we receive a
+ * %CSG_ENDPOINT_CONFIG, we know the update is effective, and can
+ * reflect it to our panthor_csg_slot object.
+ */
+static void
+csg_slot_sync_priority_locked(struct panthor_device *ptdev, u32 csg_id)
+{
+ struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
+ struct panthor_fw_csg_iface *csg_iface;
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
+ csg_slot->priority = (csg_iface->input->endpoint_req & CSG_EP_REQ_PRIORITY_MASK) >> 28;
+}
+
+/**
+ * cs_slot_sync_queue_state_locked() - Synchronize the queue slot priority
+ * @ptdev: Device.
+ * @csg_id: Group slot.
+ * @cs_id: Queue slot.
+ *
+ * Queue state is updated on group suspend or STATUS_UPDATE event.
+ */
+static void
+cs_slot_sync_queue_state_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
+{
+ struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group;
+ struct panthor_queue *queue = group->queues[cs_id];
+ struct panthor_fw_cs_iface *cs_iface =
+ panthor_fw_get_cs_iface(group->ptdev, csg_id, cs_id);
+
+ u32 status_wait_cond;
+
+ switch (cs_iface->output->status_blocked_reason) {
+ case CS_STATUS_BLOCKED_REASON_UNBLOCKED:
+ if (queue->iface.input->insert == queue->iface.output->extract &&
+ cs_iface->output->status_scoreboards == 0)
+ group->idle_queues |= BIT(cs_id);
+ break;
+
+ case CS_STATUS_BLOCKED_REASON_SYNC_WAIT:
+ if (list_empty(&group->wait_node)) {
+ list_move_tail(&group->wait_node,
+ &group->ptdev->scheduler->groups.waiting);
+ }
+ group->blocked_queues |= BIT(cs_id);
+ queue->syncwait.gpu_va = cs_iface->output->status_wait_sync_ptr;
+ queue->syncwait.ref = cs_iface->output->status_wait_sync_value;
+ status_wait_cond = cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_COND_MASK;
+ queue->syncwait.gt = status_wait_cond == CS_STATUS_WAIT_SYNC_COND_GT;
+ if (cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_64B) {
+ u64 sync_val_hi = cs_iface->output->status_wait_sync_value_hi;
+
+ queue->syncwait.sync64 = true;
+ queue->syncwait.ref |= sync_val_hi << 32;
+ } else {
+ queue->syncwait.sync64 = false;
+ }
+ break;
+
+ default:
+ /* Other reasons are not blocking. Consider the queue as runnable
+ * in those cases.
+ */
+ break;
+ }
+}
+
+static void
+csg_slot_sync_queues_state_locked(struct panthor_device *ptdev, u32 csg_id)
+{
+ struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
+ struct panthor_group *group = csg_slot->group;
+ u32 i;
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ group->idle_queues = 0;
+ group->blocked_queues = 0;
+
+ for (i = 0; i < group->queue_count; i++) {
+ if (group->queues[i])
+ cs_slot_sync_queue_state_locked(ptdev, csg_id, i);
+ }
+}
+
+static void
+csg_slot_sync_state_locked(struct panthor_device *ptdev, u32 csg_id)
+{
+ struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
+ struct panthor_fw_csg_iface *csg_iface;
+ struct panthor_group *group;
+ enum panthor_group_state new_state, old_state;
+ u32 csg_state;
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
+ group = csg_slot->group;
+
+ if (!group)
+ return;
+
+ old_state = group->state;
+ csg_state = csg_iface->output->ack & CSG_STATE_MASK;
+ switch (csg_state) {
+ case CSG_STATE_START:
+ case CSG_STATE_RESUME:
+ new_state = PANTHOR_CS_GROUP_ACTIVE;
+ break;
+ case CSG_STATE_TERMINATE:
+ new_state = PANTHOR_CS_GROUP_TERMINATED;
+ break;
+ case CSG_STATE_SUSPEND:
+ new_state = PANTHOR_CS_GROUP_SUSPENDED;
+ break;
+ default:
+ /* The unknown state might be caused by a FW state corruption,
+ * which means the group metadata can't be trusted anymore, and
+ * the SUSPEND operation might propagate the corruption to the
+ * suspend buffers. Flag the group state as unknown to make
+ * sure it's unusable after that point.
+ */
+ drm_err(&ptdev->base, "Invalid state on CSG %d (state=%d)",
+ csg_id, csg_state);
+ new_state = PANTHOR_CS_GROUP_UNKNOWN_STATE;
+ break;
+ }
+
+ if (old_state == new_state)
+ return;
+
+ /* The unknown state might be caused by a FW issue, reset the FW to
+ * take a fresh start.
+ */
+ if (new_state == PANTHOR_CS_GROUP_UNKNOWN_STATE)
+ panthor_device_schedule_reset(ptdev);
+
+ if (new_state == PANTHOR_CS_GROUP_SUSPENDED)
+ csg_slot_sync_queues_state_locked(ptdev, csg_id);
+
+ if (old_state == PANTHOR_CS_GROUP_ACTIVE) {
+ u32 i;
+
+ /* Reset the queue slots so we start from a clean
+ * state when starting/resuming a new group on this
+ * CSG slot. No wait needed here, and no ringbell
+ * either, since the CS slot will only be re-used
+ * on the next CSG start operation.
+ */
+ for (i = 0; i < group->queue_count; i++) {
+ if (group->queues[i])
+ cs_slot_reset_locked(ptdev, csg_id, i);
+ }
+ }
+
+ group->state = new_state;
+}
+
+static int
+csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority)
+{
+ struct panthor_fw_csg_iface *csg_iface;
+ struct panthor_csg_slot *csg_slot;
+ struct panthor_group *group;
+ u32 queue_mask = 0, i;
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ if (priority > MAX_CSG_PRIO)
+ return -EINVAL;
+
+ if (drm_WARN_ON(&ptdev->base, csg_id >= MAX_CSGS))
+ return -EINVAL;
+
+ csg_slot = &ptdev->scheduler->csg_slots[csg_id];
+ group = csg_slot->group;
+ if (!group || group->state == PANTHOR_CS_GROUP_ACTIVE)
+ return 0;
+
+ csg_iface = panthor_fw_get_csg_iface(group->ptdev, csg_id);
+
+ for (i = 0; i < group->queue_count; i++) {
+ if (group->queues[i]) {
+ cs_slot_prog_locked(ptdev, csg_id, i);
+ queue_mask |= BIT(i);
+ }
+ }
+
+ csg_iface->input->allow_compute = group->compute_core_mask;
+ csg_iface->input->allow_fragment = group->fragment_core_mask;
+ csg_iface->input->allow_other = group->tiler_core_mask;
+ csg_iface->input->endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) |
+ CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) |
+ CSG_EP_REQ_TILER(group->max_tiler_cores) |
+ CSG_EP_REQ_PRIORITY(priority);
+ csg_iface->input->config = panthor_vm_as(group->vm);
+
+ if (group->suspend_buf)
+ csg_iface->input->suspend_buf = panthor_kernel_bo_gpuva(group->suspend_buf);
+ else
+ csg_iface->input->suspend_buf = 0;
+
+ if (group->protm_suspend_buf) {
+ csg_iface->input->protm_suspend_buf =
+ panthor_kernel_bo_gpuva(group->protm_suspend_buf);
+ } else {
+ csg_iface->input->protm_suspend_buf = 0;
+ }
+
+ csg_iface->input->ack_irq_mask = ~0;
+ panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, queue_mask);
+ return 0;
+}
+
+static void
+cs_slot_process_fatal_event_locked(struct panthor_device *ptdev,
+ u32 csg_id, u32 cs_id)
+{
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
+ struct panthor_group *group = csg_slot->group;
+ struct panthor_fw_cs_iface *cs_iface;
+ u32 fatal;
+ u64 info;
+
+ lockdep_assert_held(&sched->lock);
+
+ cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
+ fatal = cs_iface->output->fatal;
+ info = cs_iface->output->fatal_info;
+
+ if (group)
+ group->fatal_queues |= BIT(cs_id);
+
+ sched_queue_delayed_work(sched, tick, 0);
+ drm_warn(&ptdev->base,
+ "CSG slot %d CS slot: %d\n"
+ "CS_FATAL.EXCEPTION_TYPE: 0x%x (%s)\n"
+ "CS_FATAL.EXCEPTION_DATA: 0x%x\n"
+ "CS_FATAL_INFO.EXCEPTION_DATA: 0x%llx\n",
+ csg_id, cs_id,
+ (unsigned int)CS_EXCEPTION_TYPE(fatal),
+ panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fatal)),
+ (unsigned int)CS_EXCEPTION_DATA(fatal),
+ info);
+}
+
+static void
+cs_slot_process_fault_event_locked(struct panthor_device *ptdev,
+ u32 csg_id, u32 cs_id)
+{
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
+ struct panthor_group *group = csg_slot->group;
+ struct panthor_queue *queue = group && cs_id < group->queue_count ?
+ group->queues[cs_id] : NULL;
+ struct panthor_fw_cs_iface *cs_iface;
+ u32 fault;
+ u64 info;
+
+ lockdep_assert_held(&sched->lock);
+
+ cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
+ fault = cs_iface->output->fault;
+ info = cs_iface->output->fault_info;
+
+ if (queue && CS_EXCEPTION_TYPE(fault) == DRM_PANTHOR_EXCEPTION_CS_INHERIT_FAULT) {
+ u64 cs_extract = queue->iface.output->extract;
+ struct panthor_job *job;
+
+ spin_lock(&queue->fence_ctx.lock);
+ list_for_each_entry(job, &queue->fence_ctx.in_flight_jobs, node) {
+ if (cs_extract >= job->ringbuf.end)
+ continue;
+
+ if (cs_extract < job->ringbuf.start)
+ break;
+
+ dma_fence_set_error(job->done_fence, -EINVAL);
+ }
+ spin_unlock(&queue->fence_ctx.lock);
+ }
+
+ drm_warn(&ptdev->base,
+ "CSG slot %d CS slot: %d\n"
+ "CS_FAULT.EXCEPTION_TYPE: 0x%x (%s)\n"
+ "CS_FAULT.EXCEPTION_DATA: 0x%x\n"
+ "CS_FAULT_INFO.EXCEPTION_DATA: 0x%llx\n",
+ csg_id, cs_id,
+ (unsigned int)CS_EXCEPTION_TYPE(fault),
+ panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fault)),
+ (unsigned int)CS_EXCEPTION_DATA(fault),
+ info);
+}
+
+static int group_process_tiler_oom(struct panthor_group *group, u32 cs_id)
+{
+ struct panthor_device *ptdev = group->ptdev;
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ u32 renderpasses_in_flight, pending_frag_count;
+ struct panthor_heap_pool *heaps = NULL;
+ u64 heap_address, new_chunk_va = 0;
+ u32 vt_start, vt_end, frag_end;
+ int ret, csg_id;
+
+ mutex_lock(&sched->lock);
+ csg_id = group->csg_id;
+ if (csg_id >= 0) {
+ struct panthor_fw_cs_iface *cs_iface;
+
+ cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
+ heaps = panthor_vm_get_heap_pool(group->vm, false);
+ heap_address = cs_iface->output->heap_address;
+ vt_start = cs_iface->output->heap_vt_start;
+ vt_end = cs_iface->output->heap_vt_end;
+ frag_end = cs_iface->output->heap_frag_end;
+ renderpasses_in_flight = vt_start - frag_end;
+ pending_frag_count = vt_end - frag_end;
+ }
+ mutex_unlock(&sched->lock);
+
+ /* The group got scheduled out, we stop here. We will get a new tiler OOM event
+ * when it's scheduled again.
+ */
+ if (unlikely(csg_id < 0))
+ return 0;
+
+ if (IS_ERR(heaps) || frag_end > vt_end || vt_end >= vt_start) {
+ ret = -EINVAL;
+ } else {
+ /* We do the allocation without holding the scheduler lock to avoid
+ * blocking the scheduling.
+ */
+ ret = panthor_heap_grow(heaps, heap_address,
+ renderpasses_in_flight,
+ pending_frag_count, &new_chunk_va);
+ }
+
+ if (ret && ret != -EBUSY) {
+ drm_warn(&ptdev->base, "Failed to extend the tiler heap\n");
+ group->fatal_queues |= BIT(cs_id);
+ sched_queue_delayed_work(sched, tick, 0);
+ goto out_put_heap_pool;
+ }
+
+ mutex_lock(&sched->lock);
+ csg_id = group->csg_id;
+ if (csg_id >= 0) {
+ struct panthor_fw_csg_iface *csg_iface;
+ struct panthor_fw_cs_iface *cs_iface;
+
+ csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
+ cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
+
+ cs_iface->input->heap_start = new_chunk_va;
+ cs_iface->input->heap_end = new_chunk_va;
+ panthor_fw_update_reqs(cs_iface, req, cs_iface->output->ack, CS_TILER_OOM);
+ panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, BIT(cs_id));
+ panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id));
+ }
+ mutex_unlock(&sched->lock);
+
+ /* We allocated a chunck, but couldn't link it to the heap
+ * context because the group was scheduled out while we were
+ * allocating memory. We need to return this chunk to the heap.
+ */
+ if (unlikely(csg_id < 0 && new_chunk_va))
+ panthor_heap_return_chunk(heaps, heap_address, new_chunk_va);
+
+ ret = 0;
+
+out_put_heap_pool:
+ panthor_heap_pool_put(heaps);
+ return ret;
+}
+
+static void group_tiler_oom_work(struct work_struct *work)
+{
+ struct panthor_group *group =
+ container_of(work, struct panthor_group, tiler_oom_work);
+ u32 tiler_oom = atomic_xchg(&group->tiler_oom, 0);
+
+ while (tiler_oom) {
+ u32 cs_id = ffs(tiler_oom) - 1;
+
+ group_process_tiler_oom(group, cs_id);
+ tiler_oom &= ~BIT(cs_id);
+ }
+
+ group_put(group);
+}
+
+static void
+cs_slot_process_tiler_oom_event_locked(struct panthor_device *ptdev,
+ u32 csg_id, u32 cs_id)
+{
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
+ struct panthor_group *group = csg_slot->group;
+
+ lockdep_assert_held(&sched->lock);
+
+ if (drm_WARN_ON(&ptdev->base, !group))
+ return;
+
+ atomic_or(BIT(cs_id), &group->tiler_oom);
+
+ /* We don't use group_queue_work() here because we want to queue the
+ * work item to the heap_alloc_wq.
+ */
+ group_get(group);
+ if (!queue_work(sched->heap_alloc_wq, &group->tiler_oom_work))
+ group_put(group);
+}
+
+static bool cs_slot_process_irq_locked(struct panthor_device *ptdev,
+ u32 csg_id, u32 cs_id)
+{
+ struct panthor_fw_cs_iface *cs_iface;
+ u32 req, ack, events;
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
+ req = cs_iface->input->req;
+ ack = cs_iface->output->ack;
+ events = (req ^ ack) & CS_EVT_MASK;
+
+ if (events & CS_FATAL)
+ cs_slot_process_fatal_event_locked(ptdev, csg_id, cs_id);
+
+ if (events & CS_FAULT)
+ cs_slot_process_fault_event_locked(ptdev, csg_id, cs_id);
+
+ if (events & CS_TILER_OOM)
+ cs_slot_process_tiler_oom_event_locked(ptdev, csg_id, cs_id);
+
+ /* We don't acknowledge the TILER_OOM event since its handling is
+ * deferred to a separate work.
+ */
+ panthor_fw_update_reqs(cs_iface, req, ack, CS_FATAL | CS_FAULT);
+
+ return (events & (CS_FAULT | CS_TILER_OOM)) != 0;
+}
+
+static void csg_slot_sync_idle_state_locked(struct panthor_device *ptdev, u32 csg_id)
+{
+ struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
+ struct panthor_fw_csg_iface *csg_iface;
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
+ csg_slot->idle = csg_iface->output->status_state & CSG_STATUS_STATE_IS_IDLE;
+}
+
+static void csg_slot_process_idle_event_locked(struct panthor_device *ptdev, u32 csg_id)
+{
+ struct panthor_scheduler *sched = ptdev->scheduler;
+
+ lockdep_assert_held(&sched->lock);
+
+ sched->might_have_idle_groups = true;
+
+ /* Schedule a tick so we can evict idle groups and schedule non-idle
+ * ones. This will also update runtime PM and devfreq busy/idle states,
+ * so the device can lower its frequency or get suspended.
+ */
+ sched_queue_delayed_work(sched, tick, 0);
+}
+
+static void csg_slot_sync_update_locked(struct panthor_device *ptdev,
+ u32 csg_id)
+{
+ struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
+ struct panthor_group *group = csg_slot->group;
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ if (group)
+ group_queue_work(group, sync_upd);
+
+ sched_queue_work(ptdev->scheduler, sync_upd);
+}
+
+static void
+csg_slot_process_progress_timer_event_locked(struct panthor_device *ptdev, u32 csg_id)
+{
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
+ struct panthor_group *group = csg_slot->group;
+
+ lockdep_assert_held(&sched->lock);
+
+ drm_warn(&ptdev->base, "CSG slot %d progress timeout\n", csg_id);
+
+ group = csg_slot->group;
+ if (!drm_WARN_ON(&ptdev->base, !group))
+ group->timedout = true;
+
+ sched_queue_delayed_work(sched, tick, 0);
+}
+
+static void sched_process_csg_irq_locked(struct panthor_device *ptdev, u32 csg_id)
+{
+ u32 req, ack, cs_irq_req, cs_irq_ack, cs_irqs, csg_events;
+ struct panthor_fw_csg_iface *csg_iface;
+ u32 ring_cs_db_mask = 0;
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ if (drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count))
+ return;
+
+ csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
+ req = READ_ONCE(csg_iface->input->req);
+ ack = READ_ONCE(csg_iface->output->ack);
+ cs_irq_req = READ_ONCE(csg_iface->output->cs_irq_req);
+ cs_irq_ack = READ_ONCE(csg_iface->input->cs_irq_ack);
+ csg_events = (req ^ ack) & CSG_EVT_MASK;
+
+ /* There may not be any pending CSG/CS interrupts to process */
+ if (req == ack && cs_irq_req == cs_irq_ack)
+ return;
+
+ /* Immediately set IRQ_ACK bits to be same as the IRQ_REQ bits before
+ * examining the CS_ACK & CS_REQ bits. This would ensure that Host
+ * doesn't miss an interrupt for the CS in the race scenario where
+ * whilst Host is servicing an interrupt for the CS, firmware sends
+ * another interrupt for that CS.
+ */
+ csg_iface->input->cs_irq_ack = cs_irq_req;
+
+ panthor_fw_update_reqs(csg_iface, req, ack,
+ CSG_SYNC_UPDATE |
+ CSG_IDLE |
+ CSG_PROGRESS_TIMER_EVENT);
+
+ if (csg_events & CSG_IDLE)
+ csg_slot_process_idle_event_locked(ptdev, csg_id);
+
+ if (csg_events & CSG_PROGRESS_TIMER_EVENT)
+ csg_slot_process_progress_timer_event_locked(ptdev, csg_id);
+
+ cs_irqs = cs_irq_req ^ cs_irq_ack;
+ while (cs_irqs) {
+ u32 cs_id = ffs(cs_irqs) - 1;
+
+ if (cs_slot_process_irq_locked(ptdev, csg_id, cs_id))
+ ring_cs_db_mask |= BIT(cs_id);
+
+ cs_irqs &= ~BIT(cs_id);
+ }
+
+ if (csg_events & CSG_SYNC_UPDATE)
+ csg_slot_sync_update_locked(ptdev, csg_id);
+
+ if (ring_cs_db_mask)
+ panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, ring_cs_db_mask);
+
+ panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id));
+}
+
+static void sched_process_idle_event_locked(struct panthor_device *ptdev)
+{
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ /* Acknowledge the idle event and schedule a tick. */
+ panthor_fw_update_reqs(glb_iface, req, glb_iface->output->ack, GLB_IDLE);
+ sched_queue_delayed_work(ptdev->scheduler, tick, 0);
+}
+
+/**
+ * sched_process_global_irq_locked() - Process the scheduling part of a global IRQ
+ * @ptdev: Device.
+ */
+static void sched_process_global_irq_locked(struct panthor_device *ptdev)
+{
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+ u32 req, ack, evts;
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ req = READ_ONCE(glb_iface->input->req);
+ ack = READ_ONCE(glb_iface->output->ack);
+ evts = (req ^ ack) & GLB_EVT_MASK;
+
+ if (evts & GLB_IDLE)
+ sched_process_idle_event_locked(ptdev);
+}
+
+static void process_fw_events_work(struct work_struct *work)
+{
+ struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler,
+ fw_events_work);
+ u32 events = atomic_xchg(&sched->fw_events, 0);
+ struct panthor_device *ptdev = sched->ptdev;
+
+ mutex_lock(&sched->lock);
+
+ if (events & JOB_INT_GLOBAL_IF) {
+ sched_process_global_irq_locked(ptdev);
+ events &= ~JOB_INT_GLOBAL_IF;
+ }
+
+ while (events) {
+ u32 csg_id = ffs(events) - 1;
+
+ sched_process_csg_irq_locked(ptdev, csg_id);
+ events &= ~BIT(csg_id);
+ }
+
+ mutex_unlock(&sched->lock);
+}
+
+/**
+ * panthor_sched_report_fw_events() - Report FW events to the scheduler.
+ */
+void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events)
+{
+ if (!ptdev->scheduler)
+ return;
+
+ atomic_or(events, &ptdev->scheduler->fw_events);
+ sched_queue_work(ptdev->scheduler, fw_events);
+}
+
+static const char *fence_get_driver_name(struct dma_fence *fence)
+{
+ return "panthor";
+}
+
+static const char *queue_fence_get_timeline_name(struct dma_fence *fence)
+{
+ return "queue-fence";
+}
+
+static const struct dma_fence_ops panthor_queue_fence_ops = {
+ .get_driver_name = fence_get_driver_name,
+ .get_timeline_name = queue_fence_get_timeline_name,
+};
+
+struct panthor_csg_slots_upd_ctx {
+ u32 update_mask;
+ u32 timedout_mask;
+ struct {
+ u32 value;
+ u32 mask;
+ } requests[MAX_CSGS];
+};
+
+static void csgs_upd_ctx_init(struct panthor_csg_slots_upd_ctx *ctx)
+{
+ memset(ctx, 0, sizeof(*ctx));
+}
+
+static void csgs_upd_ctx_queue_reqs(struct panthor_device *ptdev,
+ struct panthor_csg_slots_upd_ctx *ctx,
+ u32 csg_id, u32 value, u32 mask)
+{
+ if (drm_WARN_ON(&ptdev->base, !mask) ||
+ drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count))
+ return;
+
+ ctx->requests[csg_id].value = (ctx->requests[csg_id].value & ~mask) | (value & mask);
+ ctx->requests[csg_id].mask |= mask;
+ ctx->update_mask |= BIT(csg_id);
+}
+
+static int csgs_upd_ctx_apply_locked(struct panthor_device *ptdev,
+ struct panthor_csg_slots_upd_ctx *ctx)
+{
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ u32 update_slots = ctx->update_mask;
+
+ lockdep_assert_held(&sched->lock);
+
+ if (!ctx->update_mask)
+ return 0;
+
+ while (update_slots) {
+ struct panthor_fw_csg_iface *csg_iface;
+ u32 csg_id = ffs(update_slots) - 1;
+
+ update_slots &= ~BIT(csg_id);
+ csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
+ panthor_fw_update_reqs(csg_iface, req,
+ ctx->requests[csg_id].value,
+ ctx->requests[csg_id].mask);
+ }
+
+ panthor_fw_ring_csg_doorbells(ptdev, ctx->update_mask);
+
+ update_slots = ctx->update_mask;
+ while (update_slots) {
+ struct panthor_fw_csg_iface *csg_iface;
+ u32 csg_id = ffs(update_slots) - 1;
+ u32 req_mask = ctx->requests[csg_id].mask, acked;
+ int ret;
+
+ update_slots &= ~BIT(csg_id);
+ csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
+
+ ret = panthor_fw_csg_wait_acks(ptdev, csg_id, req_mask, &acked, 100);
+
+ if (acked & CSG_ENDPOINT_CONFIG)
+ csg_slot_sync_priority_locked(ptdev, csg_id);
+
+ if (acked & CSG_STATE_MASK)
+ csg_slot_sync_state_locked(ptdev, csg_id);
+
+ if (acked & CSG_STATUS_UPDATE) {
+ csg_slot_sync_queues_state_locked(ptdev, csg_id);
+ csg_slot_sync_idle_state_locked(ptdev, csg_id);
+ }
+
+ if (ret && acked != req_mask &&
+ ((csg_iface->input->req ^ csg_iface->output->ack) & req_mask) != 0) {
+ drm_err(&ptdev->base, "CSG %d update request timedout", csg_id);
+ ctx->timedout_mask |= BIT(csg_id);
+ }
+ }
+
+ if (ctx->timedout_mask)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+struct panthor_sched_tick_ctx {
+ struct list_head old_groups[PANTHOR_CSG_PRIORITY_COUNT];
+ struct list_head groups[PANTHOR_CSG_PRIORITY_COUNT];
+ u32 idle_group_count;
+ u32 group_count;
+ enum panthor_csg_priority min_priority;
+ struct panthor_vm *vms[MAX_CS_PER_CSG];
+ u32 as_count;
+ bool immediate_tick;
+ u32 csg_upd_failed_mask;
+};
+
+static bool
+tick_ctx_is_full(const struct panthor_scheduler *sched,
+ const struct panthor_sched_tick_ctx *ctx)
+{
+ return ctx->group_count == sched->csg_slot_count;
+}
+
+static bool
+group_is_idle(struct panthor_group *group)
+{
+ struct panthor_device *ptdev = group->ptdev;
+ u32 inactive_queues;
+
+ if (group->csg_id >= 0)
+ return ptdev->scheduler->csg_slots[group->csg_id].idle;
+
+ inactive_queues = group->idle_queues | group->blocked_queues;
+ return hweight32(inactive_queues) == group->queue_count;
+}
+
+static bool
+group_can_run(struct panthor_group *group)
+{
+ return group->state != PANTHOR_CS_GROUP_TERMINATED &&
+ group->state != PANTHOR_CS_GROUP_UNKNOWN_STATE &&
+ !group->destroyed && group->fatal_queues == 0 &&
+ !group->timedout;
+}
+
+static void
+tick_ctx_pick_groups_from_list(const struct panthor_scheduler *sched,
+ struct panthor_sched_tick_ctx *ctx,
+ struct list_head *queue,
+ bool skip_idle_groups,
+ bool owned_by_tick_ctx)
+{
+ struct panthor_group *group, *tmp;
+
+ if (tick_ctx_is_full(sched, ctx))
+ return;
+
+ list_for_each_entry_safe(group, tmp, queue, run_node) {
+ u32 i;
+
+ if (!group_can_run(group))
+ continue;
+
+ if (skip_idle_groups && group_is_idle(group))
+ continue;
+
+ for (i = 0; i < ctx->as_count; i++) {
+ if (ctx->vms[i] == group->vm)
+ break;
+ }
+
+ if (i == ctx->as_count && ctx->as_count == sched->as_slot_count)
+ continue;
+
+ if (!owned_by_tick_ctx)
+ group_get(group);
+
+ list_move_tail(&group->run_node, &ctx->groups[group->priority]);
+ ctx->group_count++;
+ if (group_is_idle(group))
+ ctx->idle_group_count++;
+
+ if (i == ctx->as_count)
+ ctx->vms[ctx->as_count++] = group->vm;
+
+ if (ctx->min_priority > group->priority)
+ ctx->min_priority = group->priority;
+
+ if (tick_ctx_is_full(sched, ctx))
+ return;
+ }
+}
+
+static void
+tick_ctx_insert_old_group(struct panthor_scheduler *sched,
+ struct panthor_sched_tick_ctx *ctx,
+ struct panthor_group *group,
+ bool full_tick)
+{
+ struct panthor_csg_slot *csg_slot = &sched->csg_slots[group->csg_id];
+ struct panthor_group *other_group;
+
+ if (!full_tick) {
+ list_add_tail(&group->run_node, &ctx->old_groups[group->priority]);
+ return;
+ }
+
+ /* Rotate to make sure groups with lower CSG slot
+ * priorities have a chance to get a higher CSG slot
+ * priority next time they get picked. This priority
+ * has an impact on resource request ordering, so it's
+ * important to make sure we don't let one group starve
+ * all other groups with the same group priority.
+ */
+ list_for_each_entry(other_group,
+ &ctx->old_groups[csg_slot->group->priority],
+ run_node) {
+ struct panthor_csg_slot *other_csg_slot = &sched->csg_slots[other_group->csg_id];
+
+ if (other_csg_slot->priority > csg_slot->priority) {
+ list_add_tail(&csg_slot->group->run_node, &other_group->run_node);
+ return;
+ }
+ }
+
+ list_add_tail(&group->run_node, &ctx->old_groups[group->priority]);
+}
+
+static void
+tick_ctx_init(struct panthor_scheduler *sched,
+ struct panthor_sched_tick_ctx *ctx,
+ bool full_tick)
+{
+ struct panthor_device *ptdev = sched->ptdev;
+ struct panthor_csg_slots_upd_ctx upd_ctx;
+ int ret;
+ u32 i;
+
+ memset(ctx, 0, sizeof(*ctx));
+ csgs_upd_ctx_init(&upd_ctx);
+
+ ctx->min_priority = PANTHOR_CSG_PRIORITY_COUNT;
+ for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
+ INIT_LIST_HEAD(&ctx->groups[i]);
+ INIT_LIST_HEAD(&ctx->old_groups[i]);
+ }
+
+ for (i = 0; i < sched->csg_slot_count; i++) {
+ struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
+ struct panthor_group *group = csg_slot->group;
+ struct panthor_fw_csg_iface *csg_iface;
+
+ if (!group)
+ continue;
+
+ csg_iface = panthor_fw_get_csg_iface(ptdev, i);
+ group_get(group);
+
+ /* If there was unhandled faults on the VM, force processing of
+ * CSG IRQs, so we can flag the faulty queue.
+ */
+ if (panthor_vm_has_unhandled_faults(group->vm)) {
+ sched_process_csg_irq_locked(ptdev, i);
+
+ /* No fatal fault reported, flag all queues as faulty. */
+ if (!group->fatal_queues)
+ group->fatal_queues |= GENMASK(group->queue_count - 1, 0);
+ }
+
+ tick_ctx_insert_old_group(sched, ctx, group, full_tick);
+ csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i,
+ csg_iface->output->ack ^ CSG_STATUS_UPDATE,
+ CSG_STATUS_UPDATE);
+ }
+
+ ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
+ if (ret) {
+ panthor_device_schedule_reset(ptdev);
+ ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
+ }
+}
+
+#define NUM_INSTRS_PER_SLOT 16
+
+static void
+group_term_post_processing(struct panthor_group *group)
+{
+ struct panthor_job *job, *tmp;
+ LIST_HEAD(faulty_jobs);
+ bool cookie;
+ u32 i = 0;
+
+ if (drm_WARN_ON(&group->ptdev->base, group_can_run(group)))
+ return;
+
+ cookie = dma_fence_begin_signalling();
+ for (i = 0; i < group->queue_count; i++) {
+ struct panthor_queue *queue = group->queues[i];
+ struct panthor_syncobj_64b *syncobj;
+ int err;
+
+ if (group->fatal_queues & BIT(i))
+ err = -EINVAL;
+ else if (group->timedout)
+ err = -ETIMEDOUT;
+ else
+ err = -ECANCELED;
+
+ if (!queue)
+ continue;
+
+ spin_lock(&queue->fence_ctx.lock);
+ list_for_each_entry_safe(job, tmp, &queue->fence_ctx.in_flight_jobs, node) {
+ list_move_tail(&job->node, &faulty_jobs);
+ dma_fence_set_error(job->done_fence, err);
+ dma_fence_signal_locked(job->done_fence);
+ }
+ spin_unlock(&queue->fence_ctx.lock);
+
+ /* Manually update the syncobj seqno to unblock waiters. */
+ syncobj = group->syncobjs->kmap + (i * sizeof(*syncobj));
+ syncobj->status = ~0;
+ syncobj->seqno = atomic64_read(&queue->fence_ctx.seqno);
+ sched_queue_work(group->ptdev->scheduler, sync_upd);
+ }
+ dma_fence_end_signalling(cookie);
+
+ list_for_each_entry_safe(job, tmp, &faulty_jobs, node) {
+ list_del_init(&job->node);
+ panthor_job_put(&job->base);
+ }
+}
+
+static void group_term_work(struct work_struct *work)
+{
+ struct panthor_group *group =
+ container_of(work, struct panthor_group, term_work);
+
+ group_term_post_processing(group);
+ group_put(group);
+}
+
+static void
+tick_ctx_cleanup(struct panthor_scheduler *sched,
+ struct panthor_sched_tick_ctx *ctx)
+{
+ struct panthor_group *group, *tmp;
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->old_groups); i++) {
+ list_for_each_entry_safe(group, tmp, &ctx->old_groups[i], run_node) {
+ /* If everything went fine, we should only have groups
+ * to be terminated in the old_groups lists.
+ */
+ drm_WARN_ON(&group->ptdev->base, !ctx->csg_upd_failed_mask &&
+ group_can_run(group));
+
+ if (!group_can_run(group)) {
+ list_del_init(&group->run_node);
+ list_del_init(&group->wait_node);
+ group_queue_work(group, term);
+ } else if (group->csg_id >= 0) {
+ list_del_init(&group->run_node);
+ } else {
+ list_move(&group->run_node,
+ group_is_idle(group) ?
+ &sched->groups.idle[group->priority] :
+ &sched->groups.runnable[group->priority]);
+ }
+ group_put(group);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
+ /* If everything went fine, the groups to schedule lists should
+ * be empty.
+ */
+ drm_WARN_ON(&group->ptdev->base,
+ !ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i]));
+
+ list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) {
+ if (group->csg_id >= 0) {
+ list_del_init(&group->run_node);
+ } else {
+ list_move(&group->run_node,
+ group_is_idle(group) ?
+ &sched->groups.idle[group->priority] :
+ &sched->groups.runnable[group->priority]);
+ }
+ group_put(group);
+ }
+ }
+}
+
+static void
+tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *ctx)
+{
+ struct panthor_group *group, *tmp;
+ struct panthor_device *ptdev = sched->ptdev;
+ struct panthor_csg_slot *csg_slot;
+ int prio, new_csg_prio = MAX_CSG_PRIO, i;
+ u32 free_csg_slots = 0;
+ struct panthor_csg_slots_upd_ctx upd_ctx;
+ int ret;
+
+ csgs_upd_ctx_init(&upd_ctx);
+
+ for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
+ /* Suspend or terminate evicted groups. */
+ list_for_each_entry(group, &ctx->old_groups[prio], run_node) {
+ bool term = !group_can_run(group);
+ int csg_id = group->csg_id;
+
+ if (drm_WARN_ON(&ptdev->base, csg_id < 0))
+ continue;
+
+ csg_slot = &sched->csg_slots[csg_id];
+ csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
+ term ? CSG_STATE_TERMINATE : CSG_STATE_SUSPEND,
+ CSG_STATE_MASK);
+ }
+
+ /* Update priorities on already running groups. */
+ list_for_each_entry(group, &ctx->groups[prio], run_node) {
+ struct panthor_fw_csg_iface *csg_iface;
+ int csg_id = group->csg_id;
+
+ if (csg_id < 0) {
+ new_csg_prio--;
+ continue;
+ }
+
+ csg_slot = &sched->csg_slots[csg_id];
+ csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
+ if (csg_slot->priority == new_csg_prio) {
+ new_csg_prio--;
+ continue;
+ }
+
+ panthor_fw_update_reqs(csg_iface, endpoint_req,
+ CSG_EP_REQ_PRIORITY(new_csg_prio),
+ CSG_EP_REQ_PRIORITY_MASK);
+ csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
+ csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
+ CSG_ENDPOINT_CONFIG);
+ new_csg_prio--;
+ }
+ }
+
+ ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
+ if (ret) {
+ panthor_device_schedule_reset(ptdev);
+ ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
+ return;
+ }
+
+ /* Unbind evicted groups. */
+ for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
+ list_for_each_entry(group, &ctx->old_groups[prio], run_node) {
+ /* This group is gone. Process interrupts to clear
+ * any pending interrupts before we start the new
+ * group.
+ */
+ if (group->csg_id >= 0)
+ sched_process_csg_irq_locked(ptdev, group->csg_id);
+
+ group_unbind_locked(group);
+ }
+ }
+
+ for (i = 0; i < sched->csg_slot_count; i++) {
+ if (!sched->csg_slots[i].group)
+ free_csg_slots |= BIT(i);
+ }
+
+ csgs_upd_ctx_init(&upd_ctx);
+ new_csg_prio = MAX_CSG_PRIO;
+
+ /* Start new groups. */
+ for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
+ list_for_each_entry(group, &ctx->groups[prio], run_node) {
+ int csg_id = group->csg_id;
+ struct panthor_fw_csg_iface *csg_iface;
+
+ if (csg_id >= 0) {
+ new_csg_prio--;
+ continue;
+ }
+
+ csg_id = ffs(free_csg_slots) - 1;
+ if (drm_WARN_ON(&ptdev->base, csg_id < 0))
+ break;
+
+ csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
+ csg_slot = &sched->csg_slots[csg_id];
+ group_bind_locked(group, csg_id);
+ csg_slot_prog_locked(ptdev, csg_id, new_csg_prio--);
+ csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
+ group->state == PANTHOR_CS_GROUP_SUSPENDED ?
+ CSG_STATE_RESUME : CSG_STATE_START,
+ CSG_STATE_MASK);
+ csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
+ csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
+ CSG_ENDPOINT_CONFIG);
+ free_csg_slots &= ~BIT(csg_id);
+ }
+ }
+
+ ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
+ if (ret) {
+ panthor_device_schedule_reset(ptdev);
+ ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
+ return;
+ }
+
+ for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
+ list_for_each_entry_safe(group, tmp, &ctx->groups[prio], run_node) {
+ list_del_init(&group->run_node);
+
+ /* If the group has been destroyed while we were
+ * scheduling, ask for an immediate tick to
+ * re-evaluate as soon as possible and get rid of
+ * this dangling group.
+ */
+ if (group->destroyed)
+ ctx->immediate_tick = true;
+ group_put(group);
+ }
+
+ /* Return evicted groups to the idle or run queues. Groups
+ * that can no longer be run (because they've been destroyed
+ * or experienced an unrecoverable error) will be scheduled
+ * for destruction in tick_ctx_cleanup().
+ */
+ list_for_each_entry_safe(group, tmp, &ctx->old_groups[prio], run_node) {
+ if (!group_can_run(group))
+ continue;
+
+ if (group_is_idle(group))
+ list_move_tail(&group->run_node, &sched->groups.idle[prio]);
+ else
+ list_move_tail(&group->run_node, &sched->groups.runnable[prio]);
+ group_put(group);
+ }
+ }
+
+ sched->used_csg_slot_count = ctx->group_count;
+ sched->might_have_idle_groups = ctx->idle_group_count > 0;
+}
+
+static u64
+tick_ctx_update_resched_target(struct panthor_scheduler *sched,
+ const struct panthor_sched_tick_ctx *ctx)
+{
+ /* We had space left, no need to reschedule until some external event happens. */
+ if (!tick_ctx_is_full(sched, ctx))
+ goto no_tick;
+
+ /* If idle groups were scheduled, no need to wake up until some external
+ * event happens (group unblocked, new job submitted, ...).
+ */
+ if (ctx->idle_group_count)
+ goto no_tick;
+
+ if (drm_WARN_ON(&sched->ptdev->base, ctx->min_priority >= PANTHOR_CSG_PRIORITY_COUNT))
+ goto no_tick;
+
+ /* If there are groups of the same priority waiting, we need to
+ * keep the scheduler ticking, otherwise, we'll just wait for
+ * new groups with higher priority to be queued.
+ */
+ if (!list_empty(&sched->groups.runnable[ctx->min_priority])) {
+ u64 resched_target = sched->last_tick + sched->tick_period;
+
+ if (time_before64(sched->resched_target, sched->last_tick) ||
+ time_before64(resched_target, sched->resched_target))
+ sched->resched_target = resched_target;
+
+ return sched->resched_target - sched->last_tick;
+ }
+
+no_tick:
+ sched->resched_target = U64_MAX;
+ return U64_MAX;
+}
+
+static void tick_work(struct work_struct *work)
+{
+ struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler,
+ tick_work.work);
+ struct panthor_device *ptdev = sched->ptdev;
+ struct panthor_sched_tick_ctx ctx;
+ u64 remaining_jiffies = 0, resched_delay;
+ u64 now = get_jiffies_64();
+ int prio, ret, cookie;
+
+ if (!drm_dev_enter(&ptdev->base, &cookie))
+ return;
+
+ ret = pm_runtime_resume_and_get(ptdev->base.dev);
+ if (drm_WARN_ON(&ptdev->base, ret))
+ goto out_dev_exit;
+
+ if (time_before64(now, sched->resched_target))
+ remaining_jiffies = sched->resched_target - now;
+
+ mutex_lock(&sched->lock);
+ if (panthor_device_reset_is_pending(sched->ptdev))
+ goto out_unlock;
+
+ tick_ctx_init(sched, &ctx, remaining_jiffies != 0);
+ if (ctx.csg_upd_failed_mask)
+ goto out_cleanup_ctx;
+
+ if (remaining_jiffies) {
+ /* Scheduling forced in the middle of a tick. Only RT groups
+ * can preempt non-RT ones. Currently running RT groups can't be
+ * preempted.
+ */
+ for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
+ prio >= 0 && !tick_ctx_is_full(sched, &ctx);
+ prio--) {
+ tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio],
+ true, true);
+ if (prio == PANTHOR_CSG_PRIORITY_RT) {
+ tick_ctx_pick_groups_from_list(sched, &ctx,
+ &sched->groups.runnable[prio],
+ true, false);
+ }
+ }
+ }
+
+ /* First pick non-idle groups */
+ for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
+ prio >= 0 && !tick_ctx_is_full(sched, &ctx);
+ prio--) {
+ tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.runnable[prio],
+ true, false);
+ tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], true, true);
+ }
+
+ /* If we have free CSG slots left, pick idle groups */
+ for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
+ prio >= 0 && !tick_ctx_is_full(sched, &ctx);
+ prio--) {
+ /* Check the old_group queue first to avoid reprogramming the slots */
+ tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], false, true);
+ tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.idle[prio],
+ false, false);
+ }
+
+ tick_ctx_apply(sched, &ctx);
+ if (ctx.csg_upd_failed_mask)
+ goto out_cleanup_ctx;
+
+ if (ctx.idle_group_count == ctx.group_count) {
+ panthor_devfreq_record_idle(sched->ptdev);
+ if (sched->pm.has_ref) {
+ pm_runtime_put_autosuspend(ptdev->base.dev);
+ sched->pm.has_ref = false;
+ }
+ } else {
+ panthor_devfreq_record_busy(sched->ptdev);
+ if (!sched->pm.has_ref) {
+ pm_runtime_get(ptdev->base.dev);
+ sched->pm.has_ref = true;
+ }
+ }
+
+ sched->last_tick = now;
+ resched_delay = tick_ctx_update_resched_target(sched, &ctx);
+ if (ctx.immediate_tick)
+ resched_delay = 0;
+
+ if (resched_delay != U64_MAX)
+ sched_queue_delayed_work(sched, tick, resched_delay);
+
+out_cleanup_ctx:
+ tick_ctx_cleanup(sched, &ctx);
+
+out_unlock:
+ mutex_unlock(&sched->lock);
+ pm_runtime_mark_last_busy(ptdev->base.dev);
+ pm_runtime_put_autosuspend(ptdev->base.dev);
+
+out_dev_exit:
+ drm_dev_exit(cookie);
+}
+
+static int panthor_queue_eval_syncwait(struct panthor_group *group, u8 queue_idx)
+{
+ struct panthor_queue *queue = group->queues[queue_idx];
+ union {
+ struct panthor_syncobj_64b sync64;
+ struct panthor_syncobj_32b sync32;
+ } *syncobj;
+ bool result;
+ u64 value;
+
+ syncobj = panthor_queue_get_syncwait_obj(group, queue);
+ if (!syncobj)
+ return -EINVAL;
+
+ value = queue->syncwait.sync64 ?
+ syncobj->sync64.seqno :
+ syncobj->sync32.seqno;
+
+ if (queue->syncwait.gt)
+ result = value > queue->syncwait.ref;
+ else
+ result = value <= queue->syncwait.ref;
+
+ if (result)
+ panthor_queue_put_syncwait_obj(queue);
+
+ return result;
+}
+
+static void sync_upd_work(struct work_struct *work)
+{
+ struct panthor_scheduler *sched = container_of(work,
+ struct panthor_scheduler,
+ sync_upd_work);
+ struct panthor_group *group, *tmp;
+ bool immediate_tick = false;
+
+ mutex_lock(&sched->lock);
+ list_for_each_entry_safe(group, tmp, &sched->groups.waiting, wait_node) {
+ u32 tested_queues = group->blocked_queues;
+ u32 unblocked_queues = 0;
+
+ while (tested_queues) {
+ u32 cs_id = ffs(tested_queues) - 1;
+ int ret;
+
+ ret = panthor_queue_eval_syncwait(group, cs_id);
+ drm_WARN_ON(&group->ptdev->base, ret < 0);
+ if (ret)
+ unblocked_queues |= BIT(cs_id);
+
+ tested_queues &= ~BIT(cs_id);
+ }
+
+ if (unblocked_queues) {
+ group->blocked_queues &= ~unblocked_queues;
+
+ if (group->csg_id < 0) {
+ list_move(&group->run_node,
+ &sched->groups.runnable[group->priority]);
+ if (group->priority == PANTHOR_CSG_PRIORITY_RT)
+ immediate_tick = true;
+ }
+ }
+
+ if (!group->blocked_queues)
+ list_del_init(&group->wait_node);
+ }
+ mutex_unlock(&sched->lock);
+
+ if (immediate_tick)
+ sched_queue_delayed_work(sched, tick, 0);
+}
+
+static void group_schedule_locked(struct panthor_group *group, u32 queue_mask)
+{
+ struct panthor_device *ptdev = group->ptdev;
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ struct list_head *queue = &sched->groups.runnable[group->priority];
+ u64 delay_jiffies = 0;
+ bool was_idle;
+ u64 now;
+
+ if (!group_can_run(group))
+ return;
+
+ /* All updated queues are blocked, no need to wake up the scheduler. */
+ if ((queue_mask & group->blocked_queues) == queue_mask)
+ return;
+
+ was_idle = group_is_idle(group);
+ group->idle_queues &= ~queue_mask;
+
+ /* Don't mess up with the lists if we're in a middle of a reset. */
+ if (atomic_read(&sched->reset.in_progress))
+ return;
+
+ if (was_idle && !group_is_idle(group))
+ list_move_tail(&group->run_node, queue);
+
+ /* RT groups are preemptive. */
+ if (group->priority == PANTHOR_CSG_PRIORITY_RT) {
+ sched_queue_delayed_work(sched, tick, 0);
+ return;
+ }
+
+ /* Some groups might be idle, force an immediate tick to
+ * re-evaluate.
+ */
+ if (sched->might_have_idle_groups) {
+ sched_queue_delayed_work(sched, tick, 0);
+ return;
+ }
+
+ /* Scheduler is ticking, nothing to do. */
+ if (sched->resched_target != U64_MAX) {
+ /* If there are free slots, force immediating ticking. */
+ if (sched->used_csg_slot_count < sched->csg_slot_count)
+ sched_queue_delayed_work(sched, tick, 0);
+
+ return;
+ }
+
+ /* Scheduler tick was off, recalculate the resched_target based on the
+ * last tick event, and queue the scheduler work.
+ */
+ now = get_jiffies_64();
+ sched->resched_target = sched->last_tick + sched->tick_period;
+ if (sched->used_csg_slot_count == sched->csg_slot_count &&
+ time_before64(now, sched->resched_target))
+ delay_jiffies = min_t(unsigned long, sched->resched_target - now, ULONG_MAX);
+
+ sched_queue_delayed_work(sched, tick, delay_jiffies);
+}
+
+static void queue_stop(struct panthor_queue *queue,
+ struct panthor_job *bad_job)
+{
+ drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL);
+}
+
+static void queue_start(struct panthor_queue *queue)
+{
+ struct panthor_job *job;
+
+ /* Re-assign the parent fences. */
+ list_for_each_entry(job, &queue->scheduler.pending_list, base.list)
+ job->base.s_fence->parent = dma_fence_get(job->done_fence);
+
+ drm_sched_start(&queue->scheduler, true);
+}
+
+static void panthor_group_stop(struct panthor_group *group)
+{
+ struct panthor_scheduler *sched = group->ptdev->scheduler;
+
+ lockdep_assert_held(&sched->reset.lock);
+
+ for (u32 i = 0; i < group->queue_count; i++)
+ queue_stop(group->queues[i], NULL);
+
+ group_get(group);
+ list_move_tail(&group->run_node, &sched->reset.stopped_groups);
+}
+
+static void panthor_group_start(struct panthor_group *group)
+{
+ struct panthor_scheduler *sched = group->ptdev->scheduler;
+
+ lockdep_assert_held(&group->ptdev->scheduler->reset.lock);
+
+ for (u32 i = 0; i < group->queue_count; i++)
+ queue_start(group->queues[i]);
+
+ if (group_can_run(group)) {
+ list_move_tail(&group->run_node,
+ group_is_idle(group) ?
+ &sched->groups.idle[group->priority] :
+ &sched->groups.runnable[group->priority]);
+ } else {
+ list_del_init(&group->run_node);
+ list_del_init(&group->wait_node);
+ group_queue_work(group, term);
+ }
+
+ group_put(group);
+}
+
+static void panthor_sched_immediate_tick(struct panthor_device *ptdev)
+{
+ struct panthor_scheduler *sched = ptdev->scheduler;
+
+ sched_queue_delayed_work(sched, tick, 0);
+}
+
+/**
+ * panthor_sched_report_mmu_fault() - Report MMU faults to the scheduler.
+ */
+void panthor_sched_report_mmu_fault(struct panthor_device *ptdev)
+{
+ /* Force a tick to immediately kill faulty groups. */
+ if (ptdev->scheduler)
+ panthor_sched_immediate_tick(ptdev);
+}
+
+void panthor_sched_resume(struct panthor_device *ptdev)
+{
+ /* Force a tick to re-evaluate after a resume. */
+ panthor_sched_immediate_tick(ptdev);
+}
+
+void panthor_sched_suspend(struct panthor_device *ptdev)
+{
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ struct panthor_csg_slots_upd_ctx upd_ctx;
+ struct panthor_group *group;
+ u32 suspended_slots;
+ u32 i;
+
+ mutex_lock(&sched->lock);
+ csgs_upd_ctx_init(&upd_ctx);
+ for (i = 0; i < sched->csg_slot_count; i++) {
+ struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
+
+ if (csg_slot->group) {
+ csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i,
+ group_can_run(csg_slot->group) ?
+ CSG_STATE_SUSPEND : CSG_STATE_TERMINATE,
+ CSG_STATE_MASK);
+ }
+ }
+
+ suspended_slots = upd_ctx.update_mask;
+
+ csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
+ suspended_slots &= ~upd_ctx.timedout_mask;
+
+ if (upd_ctx.timedout_mask) {
+ u32 slot_mask = upd_ctx.timedout_mask;
+
+ drm_err(&ptdev->base, "CSG suspend failed, escalating to termination");
+ csgs_upd_ctx_init(&upd_ctx);
+ while (slot_mask) {
+ u32 csg_id = ffs(slot_mask) - 1;
+
+ csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
+ CSG_STATE_TERMINATE,
+ CSG_STATE_MASK);
+ slot_mask &= ~BIT(csg_id);
+ }
+
+ csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
+
+ slot_mask = upd_ctx.timedout_mask;
+ while (slot_mask) {
+ u32 csg_id = ffs(slot_mask) - 1;
+ struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
+
+ /* Terminate command timedout, but the soft-reset will
+ * automatically terminate all active groups, so let's
+ * force the state to halted here.
+ */
+ if (csg_slot->group->state != PANTHOR_CS_GROUP_TERMINATED)
+ csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED;
+ slot_mask &= ~BIT(csg_id);
+ }
+ }
+
+ /* Flush L2 and LSC caches to make sure suspend state is up-to-date.
+ * If the flush fails, flag all queues for termination.
+ */
+ if (suspended_slots) {
+ bool flush_caches_failed = false;
+ u32 slot_mask = suspended_slots;
+
+ if (panthor_gpu_flush_caches(ptdev, CACHE_CLEAN, CACHE_CLEAN, 0))
+ flush_caches_failed = true;
+
+ while (slot_mask) {
+ u32 csg_id = ffs(slot_mask) - 1;
+ struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
+
+ if (flush_caches_failed)
+ csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED;
+ else
+ csg_slot_sync_update_locked(ptdev, csg_id);
+
+ slot_mask &= ~BIT(csg_id);
+ }
+ }
+
+ for (i = 0; i < sched->csg_slot_count; i++) {
+ struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
+
+ group = csg_slot->group;
+ if (!group)
+ continue;
+
+ group_get(group);
+
+ if (group->csg_id >= 0)
+ sched_process_csg_irq_locked(ptdev, group->csg_id);
+
+ group_unbind_locked(group);
+
+ drm_WARN_ON(&group->ptdev->base, !list_empty(&group->run_node));
+
+ if (group_can_run(group)) {
+ list_add(&group->run_node,
+ &sched->groups.idle[group->priority]);
+ } else {
+ /* We don't bother stopping the scheduler if the group is
+ * faulty, the group termination work will finish the job.
+ */
+ list_del_init(&group->wait_node);
+ group_queue_work(group, term);
+ }
+ group_put(group);
+ }
+ mutex_unlock(&sched->lock);
+}
+
+void panthor_sched_pre_reset(struct panthor_device *ptdev)
+{
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ struct panthor_group *group, *group_tmp;
+ u32 i;
+
+ mutex_lock(&sched->reset.lock);
+ atomic_set(&sched->reset.in_progress, true);
+
+ /* Cancel all scheduler works. Once this is done, these works can't be
+ * scheduled again until the reset operation is complete.
+ */
+ cancel_work_sync(&sched->sync_upd_work);
+ cancel_delayed_work_sync(&sched->tick_work);
+
+ panthor_sched_suspend(ptdev);
+
+ /* Stop all groups that might still accept jobs, so we don't get passed
+ * new jobs while we're resetting.
+ */
+ for (i = 0; i < ARRAY_SIZE(sched->groups.runnable); i++) {
+ /* All groups should be in the idle lists. */
+ drm_WARN_ON(&ptdev->base, !list_empty(&sched->groups.runnable[i]));
+ list_for_each_entry_safe(group, group_tmp, &sched->groups.runnable[i], run_node)
+ panthor_group_stop(group);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sched->groups.idle); i++) {
+ list_for_each_entry_safe(group, group_tmp, &sched->groups.idle[i], run_node)
+ panthor_group_stop(group);
+ }
+
+ mutex_unlock(&sched->reset.lock);
+}
+
+void panthor_sched_post_reset(struct panthor_device *ptdev)
+{
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ struct panthor_group *group, *group_tmp;
+
+ mutex_lock(&sched->reset.lock);
+
+ list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node)
+ panthor_group_start(group);
+
+ /* We're done resetting the GPU, clear the reset.in_progress bit so we can
+ * kick the scheduler.
+ */
+ atomic_set(&sched->reset.in_progress, false);
+ mutex_unlock(&sched->reset.lock);
+
+ sched_queue_delayed_work(sched, tick, 0);
+
+ sched_queue_work(sched, sync_upd);
+}
+
+static void group_sync_upd_work(struct work_struct *work)
+{
+ struct panthor_group *group =
+ container_of(work, struct panthor_group, sync_upd_work);
+ struct panthor_job *job, *job_tmp;
+ LIST_HEAD(done_jobs);
+ u32 queue_idx;
+ bool cookie;
+
+ cookie = dma_fence_begin_signalling();
+ for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) {
+ struct panthor_queue *queue = group->queues[queue_idx];
+ struct panthor_syncobj_64b *syncobj;
+
+ if (!queue)
+ continue;
+
+ syncobj = group->syncobjs->kmap + (queue_idx * sizeof(*syncobj));
+
+ spin_lock(&queue->fence_ctx.lock);
+ list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) {
+ if (!job->call_info.size)
+ continue;
+
+ if (syncobj->seqno < job->done_fence->seqno)
+ break;
+
+ list_move_tail(&job->node, &done_jobs);
+ dma_fence_signal_locked(job->done_fence);
+ }
+ spin_unlock(&queue->fence_ctx.lock);
+ }
+ dma_fence_end_signalling(cookie);
+
+ list_for_each_entry_safe(job, job_tmp, &done_jobs, node) {
+ list_del_init(&job->node);
+ panthor_job_put(&job->base);
+ }
+
+ group_put(group);
+}
+
+static struct dma_fence *
+queue_run_job(struct drm_sched_job *sched_job)
+{
+ struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
+ struct panthor_group *group = job->group;
+ struct panthor_queue *queue = group->queues[job->queue_idx];
+ struct panthor_device *ptdev = group->ptdev;
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ u32 ringbuf_size = panthor_kernel_bo_size(queue->ringbuf);
+ u32 ringbuf_insert = queue->iface.input->insert & (ringbuf_size - 1);
+ u64 addr_reg = ptdev->csif_info.cs_reg_count -
+ ptdev->csif_info.unpreserved_cs_reg_count;
+ u64 val_reg = addr_reg + 2;
+ u64 sync_addr = panthor_kernel_bo_gpuva(group->syncobjs) +
+ job->queue_idx * sizeof(struct panthor_syncobj_64b);
+ u32 waitall_mask = GENMASK(sched->sb_slot_count - 1, 0);
+ struct dma_fence *done_fence;
+ int ret;
+
+ u64 call_instrs[NUM_INSTRS_PER_SLOT] = {
+ /* MOV32 rX+2, cs.latest_flush */
+ (2ull << 56) | (val_reg << 48) | job->call_info.latest_flush,
+
+ /* FLUSH_CACHE2.clean_inv_all.no_wait.signal(0) rX+2 */
+ (36ull << 56) | (0ull << 48) | (val_reg << 40) | (0 << 16) | 0x233,
+
+ /* MOV48 rX:rX+1, cs.start */
+ (1ull << 56) | (addr_reg << 48) | job->call_info.start,
+
+ /* MOV32 rX+2, cs.size */
+ (2ull << 56) | (val_reg << 48) | job->call_info.size,
+
+ /* WAIT(0) => waits for FLUSH_CACHE2 instruction */
+ (3ull << 56) | (1 << 16),
+
+ /* CALL rX:rX+1, rX+2 */
+ (32ull << 56) | (addr_reg << 40) | (val_reg << 32),
+
+ /* MOV48 rX:rX+1, sync_addr */
+ (1ull << 56) | (addr_reg << 48) | sync_addr,
+
+ /* MOV48 rX+2, #1 */
+ (1ull << 56) | (val_reg << 48) | 1,
+
+ /* WAIT(all) */
+ (3ull << 56) | (waitall_mask << 16),
+
+ /* SYNC_ADD64.system_scope.propage_err.nowait rX:rX+1, rX+2*/
+ (51ull << 56) | (0ull << 48) | (addr_reg << 40) | (val_reg << 32) | (0 << 16) | 1,
+
+ /* ERROR_BARRIER, so we can recover from faults at job
+ * boundaries.
+ */
+ (47ull << 56),
+ };
+
+ /* Need to be cacheline aligned to please the prefetcher. */
+ static_assert(sizeof(call_instrs) % 64 == 0,
+ "call_instrs is not aligned on a cacheline");
+
+ /* Stream size is zero, nothing to do => return a NULL fence and let
+ * drm_sched signal the parent.
+ */
+ if (!job->call_info.size)
+ return NULL;
+
+ ret = pm_runtime_resume_and_get(ptdev->base.dev);
+ if (drm_WARN_ON(&ptdev->base, ret))
+ return ERR_PTR(ret);
+
+ mutex_lock(&sched->lock);
+ if (!group_can_run(group)) {
+ done_fence = ERR_PTR(-ECANCELED);
+ goto out_unlock;
+ }
+
+ dma_fence_init(job->done_fence,
+ &panthor_queue_fence_ops,
+ &queue->fence_ctx.lock,
+ queue->fence_ctx.id,
+ atomic64_inc_return(&queue->fence_ctx.seqno));
+
+ memcpy(queue->ringbuf->kmap + ringbuf_insert,
+ call_instrs, sizeof(call_instrs));
+
+ panthor_job_get(&job->base);
+ spin_lock(&queue->fence_ctx.lock);
+ list_add_tail(&job->node, &queue->fence_ctx.in_flight_jobs);
+ spin_unlock(&queue->fence_ctx.lock);
+
+ job->ringbuf.start = queue->iface.input->insert;
+ job->ringbuf.end = job->ringbuf.start + sizeof(call_instrs);
+
+ /* Make sure the ring buffer is updated before the INSERT
+ * register.
+ */
+ wmb();
+
+ queue->iface.input->extract = queue->iface.output->extract;
+ queue->iface.input->insert = job->ringbuf.end;
+
+ if (group->csg_id < 0) {
+ /* If the queue is blocked, we want to keep the timeout running, so we
+ * can detect unbounded waits and kill the group when that happens.
+ * Otherwise, we suspend the timeout so the time we spend waiting for
+ * a CSG slot is not counted.
+ */
+ if (!(group->blocked_queues & BIT(job->queue_idx)) &&
+ !queue->timeout_suspended) {
+ queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler);
+ queue->timeout_suspended = true;
+ }
+
+ group_schedule_locked(group, BIT(job->queue_idx));
+ } else {
+ gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1);
+ if (!sched->pm.has_ref &&
+ !(group->blocked_queues & BIT(job->queue_idx))) {
+ pm_runtime_get(ptdev->base.dev);
+ sched->pm.has_ref = true;
+ }
+ }
+
+ done_fence = dma_fence_get(job->done_fence);
+
+out_unlock:
+ mutex_unlock(&sched->lock);
+ pm_runtime_mark_last_busy(ptdev->base.dev);
+ pm_runtime_put_autosuspend(ptdev->base.dev);
+
+ return done_fence;
+}
+
+static enum drm_gpu_sched_stat
+queue_timedout_job(struct drm_sched_job *sched_job)
+{
+ struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
+ struct panthor_group *group = job->group;
+ struct panthor_device *ptdev = group->ptdev;
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ struct panthor_queue *queue = group->queues[job->queue_idx];
+
+ drm_warn(&ptdev->base, "job timeout\n");
+
+ drm_WARN_ON(&ptdev->base, atomic_read(&sched->reset.in_progress));
+
+ queue_stop(queue, job);
+
+ mutex_lock(&sched->lock);
+ group->timedout = true;
+ if (group->csg_id >= 0) {
+ sched_queue_delayed_work(ptdev->scheduler, tick, 0);
+ } else {
+ /* Remove from the run queues, so the scheduler can't
+ * pick the group on the next tick.
+ */
+ list_del_init(&group->run_node);
+ list_del_init(&group->wait_node);
+
+ group_queue_work(group, term);
+ }
+ mutex_unlock(&sched->lock);
+
+ queue_start(queue);
+
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+}
+
+static void queue_free_job(struct drm_sched_job *sched_job)
+{
+ drm_sched_job_cleanup(sched_job);
+ panthor_job_put(sched_job);
+}
+
+static const struct drm_sched_backend_ops panthor_queue_sched_ops = {
+ .run_job = queue_run_job,
+ .timedout_job = queue_timedout_job,
+ .free_job = queue_free_job,
+};
+
+static struct panthor_queue *
+group_create_queue(struct panthor_group *group,
+ const struct drm_panthor_queue_create *args)
+{
+ struct drm_gpu_scheduler *drm_sched;
+ struct panthor_queue *queue;
+ int ret;
+
+ if (args->pad[0] || args->pad[1] || args->pad[2])
+ return ERR_PTR(-EINVAL);
+
+ if (args->ringbuf_size < SZ_4K || args->ringbuf_size > SZ_64K ||
+ !is_power_of_2(args->ringbuf_size))
+ return ERR_PTR(-EINVAL);
+
+ if (args->priority > CSF_MAX_QUEUE_PRIO)
+ return ERR_PTR(-EINVAL);
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue)
+ return ERR_PTR(-ENOMEM);
+
+ queue->fence_ctx.id = dma_fence_context_alloc(1);
+ spin_lock_init(&queue->fence_ctx.lock);
+ INIT_LIST_HEAD(&queue->fence_ctx.in_flight_jobs);
+
+ queue->priority = args->priority;
+
+ queue->ringbuf = panthor_kernel_bo_create(group->ptdev, group->vm,
+ args->ringbuf_size,
+ DRM_PANTHOR_BO_NO_MMAP,
+ DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
+ DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
+ PANTHOR_VM_KERNEL_AUTO_VA);
+ if (IS_ERR(queue->ringbuf)) {
+ ret = PTR_ERR(queue->ringbuf);
+ goto err_free_queue;
+ }
+
+ ret = panthor_kernel_bo_vmap(queue->ringbuf);
+ if (ret)
+ goto err_free_queue;
+
+ queue->iface.mem = panthor_fw_alloc_queue_iface_mem(group->ptdev,
+ &queue->iface.input,
+ &queue->iface.output,
+ &queue->iface.input_fw_va,
+ &queue->iface.output_fw_va);
+ if (IS_ERR(queue->iface.mem)) {
+ ret = PTR_ERR(queue->iface.mem);
+ goto err_free_queue;
+ }
+
+ ret = drm_sched_init(&queue->scheduler, &panthor_queue_sched_ops,
+ group->ptdev->scheduler->wq, 1,
+ args->ringbuf_size / (NUM_INSTRS_PER_SLOT * sizeof(u64)),
+ 0, msecs_to_jiffies(JOB_TIMEOUT_MS),
+ group->ptdev->reset.wq,
+ NULL, "panthor-queue", group->ptdev->base.dev);
+ if (ret)
+ goto err_free_queue;
+
+ drm_sched = &queue->scheduler;
+ ret = drm_sched_entity_init(&queue->entity, 0, &drm_sched, 1, NULL);
+
+ return queue;
+
+err_free_queue:
+ group_free_queue(group, queue);
+ return ERR_PTR(ret);
+}
+
+#define MAX_GROUPS_PER_POOL 128
+
+int panthor_group_create(struct panthor_file *pfile,
+ const struct drm_panthor_group_create *group_args,
+ const struct drm_panthor_queue_create *queue_args)
+{
+ struct panthor_device *ptdev = pfile->ptdev;
+ struct panthor_group_pool *gpool = pfile->groups;
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0);
+ struct panthor_group *group = NULL;
+ u32 gid, i, suspend_size;
+ int ret;
+
+ if (group_args->pad)
+ return -EINVAL;
+
+ if (group_args->priority > PANTHOR_CSG_PRIORITY_HIGH)
+ return -EINVAL;
+
+ if ((group_args->compute_core_mask & ~ptdev->gpu_info.shader_present) ||
+ (group_args->fragment_core_mask & ~ptdev->gpu_info.shader_present) ||
+ (group_args->tiler_core_mask & ~ptdev->gpu_info.tiler_present))
+ return -EINVAL;
+
+ if (hweight64(group_args->compute_core_mask) < group_args->max_compute_cores ||
+ hweight64(group_args->fragment_core_mask) < group_args->max_fragment_cores ||
+ hweight64(group_args->tiler_core_mask) < group_args->max_tiler_cores)
+ return -EINVAL;
+
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ if (!group)
+ return -ENOMEM;
+
+ spin_lock_init(&group->fatal_lock);
+ kref_init(&group->refcount);
+ group->state = PANTHOR_CS_GROUP_CREATED;
+ group->csg_id = -1;
+
+ group->ptdev = ptdev;
+ group->max_compute_cores = group_args->max_compute_cores;
+ group->compute_core_mask = group_args->compute_core_mask;
+ group->max_fragment_cores = group_args->max_fragment_cores;
+ group->fragment_core_mask = group_args->fragment_core_mask;
+ group->max_tiler_cores = group_args->max_tiler_cores;
+ group->tiler_core_mask = group_args->tiler_core_mask;
+ group->priority = group_args->priority;
+
+ INIT_LIST_HEAD(&group->wait_node);
+ INIT_LIST_HEAD(&group->run_node);
+ INIT_WORK(&group->term_work, group_term_work);
+ INIT_WORK(&group->sync_upd_work, group_sync_upd_work);
+ INIT_WORK(&group->tiler_oom_work, group_tiler_oom_work);
+ INIT_WORK(&group->release_work, group_release_work);
+
+ group->vm = panthor_vm_pool_get_vm(pfile->vms, group_args->vm_id);
+ if (!group->vm) {
+ ret = -EINVAL;
+ goto err_put_group;
+ }
+
+ suspend_size = csg_iface->control->suspend_size;
+ group->suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size);
+ if (IS_ERR(group->suspend_buf)) {
+ ret = PTR_ERR(group->suspend_buf);
+ group->suspend_buf = NULL;
+ goto err_put_group;
+ }
+
+ suspend_size = csg_iface->control->protm_suspend_size;
+ group->protm_suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size);
+ if (IS_ERR(group->protm_suspend_buf)) {
+ ret = PTR_ERR(group->protm_suspend_buf);
+ group->protm_suspend_buf = NULL;
+ goto err_put_group;
+ }
+
+ group->syncobjs = panthor_kernel_bo_create(ptdev, group->vm,
+ group_args->queues.count *
+ sizeof(struct panthor_syncobj_64b),
+ DRM_PANTHOR_BO_NO_MMAP,
+ DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
+ DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
+ PANTHOR_VM_KERNEL_AUTO_VA);
+ if (IS_ERR(group->syncobjs)) {
+ ret = PTR_ERR(group->syncobjs);
+ goto err_put_group;
+ }
+
+ ret = panthor_kernel_bo_vmap(group->syncobjs);
+ if (ret)
+ goto err_put_group;
+
+ memset(group->syncobjs->kmap, 0,
+ group_args->queues.count * sizeof(struct panthor_syncobj_64b));
+
+ for (i = 0; i < group_args->queues.count; i++) {
+ group->queues[i] = group_create_queue(group, &queue_args[i]);
+ if (IS_ERR(group->queues[i])) {
+ ret = PTR_ERR(group->queues[i]);
+ group->queues[i] = NULL;
+ goto err_put_group;
+ }
+
+ group->queue_count++;
+ }
+
+ group->idle_queues = GENMASK(group->queue_count - 1, 0);
+
+ ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
+ if (ret)
+ goto err_put_group;
+
+ mutex_lock(&sched->reset.lock);
+ if (atomic_read(&sched->reset.in_progress)) {
+ panthor_group_stop(group);
+ } else {
+ mutex_lock(&sched->lock);
+ list_add_tail(&group->run_node,
+ &sched->groups.idle[group->priority]);
+ mutex_unlock(&sched->lock);
+ }
+ mutex_unlock(&sched->reset.lock);
+
+ return gid;
+
+err_put_group:
+ group_put(group);
+ return ret;
+}
+
+int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle)
+{
+ struct panthor_group_pool *gpool = pfile->groups;
+ struct panthor_device *ptdev = pfile->ptdev;
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ struct panthor_group *group;
+
+ group = xa_erase(&gpool->xa, group_handle);
+ if (!group)
+ return -EINVAL;
+
+ for (u32 i = 0; i < group->queue_count; i++) {
+ if (group->queues[i])
+ drm_sched_entity_destroy(&group->queues[i]->entity);
+ }
+
+ mutex_lock(&sched->reset.lock);
+ mutex_lock(&sched->lock);
+ group->destroyed = true;
+ if (group->csg_id >= 0) {
+ sched_queue_delayed_work(sched, tick, 0);
+ } else if (!atomic_read(&sched->reset.in_progress)) {
+ /* Remove from the run queues, so the scheduler can't
+ * pick the group on the next tick.
+ */
+ list_del_init(&group->run_node);
+ list_del_init(&group->wait_node);
+ group_queue_work(group, term);
+ }
+ mutex_unlock(&sched->lock);
+ mutex_unlock(&sched->reset.lock);
+
+ group_put(group);
+ return 0;
+}
+
+int panthor_group_get_state(struct panthor_file *pfile,
+ struct drm_panthor_group_get_state *get_state)
+{
+ struct panthor_group_pool *gpool = pfile->groups;
+ struct panthor_device *ptdev = pfile->ptdev;
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ struct panthor_group *group;
+
+ if (get_state->pad)
+ return -EINVAL;
+
+ group = group_get(xa_load(&gpool->xa, get_state->group_handle));
+ if (!group)
+ return -EINVAL;
+
+ memset(get_state, 0, sizeof(*get_state));
+
+ mutex_lock(&sched->lock);
+ if (group->timedout)
+ get_state->state |= DRM_PANTHOR_GROUP_STATE_TIMEDOUT;
+ if (group->fatal_queues) {
+ get_state->state |= DRM_PANTHOR_GROUP_STATE_FATAL_FAULT;
+ get_state->fatal_queues = group->fatal_queues;
+ }
+ mutex_unlock(&sched->lock);
+
+ group_put(group);
+ return 0;
+}
+
+int panthor_group_pool_create(struct panthor_file *pfile)
+{
+ struct panthor_group_pool *gpool;
+
+ gpool = kzalloc(sizeof(*gpool), GFP_KERNEL);
+ if (!gpool)
+ return -ENOMEM;
+
+ xa_init_flags(&gpool->xa, XA_FLAGS_ALLOC1);
+ pfile->groups = gpool;
+ return 0;
+}
+
+void panthor_group_pool_destroy(struct panthor_file *pfile)
+{
+ struct panthor_group_pool *gpool = pfile->groups;
+ struct panthor_group *group;
+ unsigned long i;
+
+ if (IS_ERR_OR_NULL(gpool))
+ return;
+
+ xa_for_each(&gpool->xa, i, group)
+ panthor_group_destroy(pfile, i);
+
+ xa_destroy(&gpool->xa);
+ kfree(gpool);
+ pfile->groups = NULL;
+}
+
+static void job_release(struct kref *ref)
+{
+ struct panthor_job *job = container_of(ref, struct panthor_job, refcount);
+
+ drm_WARN_ON(&job->group->ptdev->base, !list_empty(&job->node));
+
+ if (job->base.s_fence)
+ drm_sched_job_cleanup(&job->base);
+
+ if (job->done_fence && job->done_fence->ops)
+ dma_fence_put(job->done_fence);
+ else
+ dma_fence_free(job->done_fence);
+
+ group_put(job->group);
+
+ kfree(job);
+}
+
+struct drm_sched_job *panthor_job_get(struct drm_sched_job *sched_job)
+{
+ if (sched_job) {
+ struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
+
+ kref_get(&job->refcount);
+ }
+
+ return sched_job;
+}
+
+void panthor_job_put(struct drm_sched_job *sched_job)
+{
+ struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
+
+ if (sched_job)
+ kref_put(&job->refcount, job_release);
+}
+
+struct panthor_vm *panthor_job_vm(struct drm_sched_job *sched_job)
+{
+ struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
+
+ return job->group->vm;
+}
+
+struct drm_sched_job *
+panthor_job_create(struct panthor_file *pfile,
+ u16 group_handle,
+ const struct drm_panthor_queue_submit *qsubmit)
+{
+ struct panthor_group_pool *gpool = pfile->groups;
+ struct panthor_job *job;
+ int ret;
+
+ if (qsubmit->pad)
+ return ERR_PTR(-EINVAL);
+
+ /* If stream_addr is zero, so stream_size should be. */
+ if ((qsubmit->stream_size == 0) != (qsubmit->stream_addr == 0))
+ return ERR_PTR(-EINVAL);
+
+ /* Make sure the address is aligned on 64-byte (cacheline) and the size is
+ * aligned on 8-byte (instruction size).
+ */
+ if ((qsubmit->stream_addr & 63) || (qsubmit->stream_size & 7))
+ return ERR_PTR(-EINVAL);
+
+ /* bits 24:30 must be zero. */
+ if (qsubmit->latest_flush & GENMASK(30, 24))
+ return ERR_PTR(-EINVAL);
+
+ job = kzalloc(sizeof(*job), GFP_KERNEL);
+ if (!job)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&job->refcount);
+ job->queue_idx = qsubmit->queue_index;
+ job->call_info.size = qsubmit->stream_size;
+ job->call_info.start = qsubmit->stream_addr;
+ job->call_info.latest_flush = qsubmit->latest_flush;
+ INIT_LIST_HEAD(&job->node);
+
+ job->group = group_get(xa_load(&gpool->xa, group_handle));
+ if (!job->group) {
+ ret = -EINVAL;
+ goto err_put_job;
+ }
+
+ if (job->queue_idx >= job->group->queue_count ||
+ !job->group->queues[job->queue_idx]) {
+ ret = -EINVAL;
+ goto err_put_job;
+ }
+
+ job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL);
+ if (!job->done_fence) {
+ ret = -ENOMEM;
+ goto err_put_job;
+ }
+
+ ret = drm_sched_job_init(&job->base,
+ &job->group->queues[job->queue_idx]->entity,
+ 1, job->group);
+ if (ret)
+ goto err_put_job;
+
+ return &job->base;
+
+err_put_job:
+ panthor_job_put(&job->base);
+ return ERR_PTR(ret);
+}
+
+void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *sched_job)
+{
+ struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
+
+ /* Still not sure why we want USAGE_WRITE for external objects, since I
+ * was assuming this would be handled through explicit syncs being imported
+ * to external BOs with DMA_BUF_IOCTL_IMPORT_SYNC_FILE, but other drivers
+ * seem to pass DMA_RESV_USAGE_WRITE, so there must be a good reason.
+ */
+ panthor_vm_update_resvs(job->group->vm, exec, &sched_job->s_fence->finished,
+ DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE);
+}
+
+void panthor_sched_unplug(struct panthor_device *ptdev)
+{
+ struct panthor_scheduler *sched = ptdev->scheduler;
+
+ cancel_delayed_work_sync(&sched->tick_work);
+
+ mutex_lock(&sched->lock);
+ if (sched->pm.has_ref) {
+ pm_runtime_put(ptdev->base.dev);
+ sched->pm.has_ref = false;
+ }
+ mutex_unlock(&sched->lock);
+}
+
+static void panthor_sched_fini(struct drm_device *ddev, void *res)
+{
+ struct panthor_scheduler *sched = res;
+ int prio;
+
+ if (!sched || !sched->csg_slot_count)
+ return;
+
+ cancel_delayed_work_sync(&sched->tick_work);
+
+ if (sched->wq)
+ destroy_workqueue(sched->wq);
+
+ if (sched->heap_alloc_wq)
+ destroy_workqueue(sched->heap_alloc_wq);
+
+ for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
+ drm_WARN_ON(ddev, !list_empty(&sched->groups.runnable[prio]));
+ drm_WARN_ON(ddev, !list_empty(&sched->groups.idle[prio]));
+ }
+
+ drm_WARN_ON(ddev, !list_empty(&sched->groups.waiting));
+}
+
+int panthor_sched_init(struct panthor_device *ptdev)
+{
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+ struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0);
+ struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, 0, 0);
+ struct panthor_scheduler *sched;
+ u32 gpu_as_count, num_groups;
+ int prio, ret;
+
+ sched = drmm_kzalloc(&ptdev->base, sizeof(*sched), GFP_KERNEL);
+ if (!sched)
+ return -ENOMEM;
+
+ /* The highest bit in JOB_INT_* is reserved for globabl IRQs. That
+ * leaves 31 bits for CSG IRQs, hence the MAX_CSGS clamp here.
+ */
+ num_groups = min_t(u32, MAX_CSGS, glb_iface->control->group_num);
+
+ /* The FW-side scheduler might deadlock if two groups with the same
+ * priority try to access a set of resources that overlaps, with part
+ * of the resources being allocated to one group and the other part to
+ * the other group, both groups waiting for the remaining resources to
+ * be allocated. To avoid that, it is recommended to assign each CSG a
+ * different priority. In theory we could allow several groups to have
+ * the same CSG priority if they don't request the same resources, but
+ * that makes the scheduling logic more complicated, so let's clamp
+ * the number of CSG slots to MAX_CSG_PRIO + 1 for now.
+ */
+ num_groups = min_t(u32, MAX_CSG_PRIO + 1, num_groups);
+
+ /* We need at least one AS for the MCU and one for the GPU contexts. */
+ gpu_as_count = hweight32(ptdev->gpu_info.as_present & GENMASK(31, 1));
+ if (!gpu_as_count) {
+ drm_err(&ptdev->base, "Not enough AS (%d, expected at least 2)",
+ gpu_as_count + 1);
+ return -EINVAL;
+ }
+
+ sched->ptdev = ptdev;
+ sched->sb_slot_count = CS_FEATURES_SCOREBOARDS(cs_iface->control->features);
+ sched->csg_slot_count = num_groups;
+ sched->cs_slot_count = csg_iface->control->stream_num;
+ sched->as_slot_count = gpu_as_count;
+ ptdev->csif_info.csg_slot_count = sched->csg_slot_count;
+ ptdev->csif_info.cs_slot_count = sched->cs_slot_count;
+ ptdev->csif_info.scoreboard_slot_count = sched->sb_slot_count;
+
+ sched->last_tick = 0;
+ sched->resched_target = U64_MAX;
+ sched->tick_period = msecs_to_jiffies(10);
+ INIT_DELAYED_WORK(&sched->tick_work, tick_work);
+ INIT_WORK(&sched->sync_upd_work, sync_upd_work);
+ INIT_WORK(&sched->fw_events_work, process_fw_events_work);
+
+ ret = drmm_mutex_init(&ptdev->base, &sched->lock);
+ if (ret)
+ return ret;
+
+ for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
+ INIT_LIST_HEAD(&sched->groups.runnable[prio]);
+ INIT_LIST_HEAD(&sched->groups.idle[prio]);
+ }
+ INIT_LIST_HEAD(&sched->groups.waiting);
+
+ ret = drmm_mutex_init(&ptdev->base, &sched->reset.lock);
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&sched->reset.stopped_groups);
+
+ /* sched->heap_alloc_wq will be used for heap chunk allocation on
+ * tiler OOM events, which means we can't use the same workqueue for
+ * the scheduler because works queued by the scheduler are in
+ * the dma-signalling path. Allocate a dedicated heap_alloc_wq to
+ * work around this limitation.
+ *
+ * FIXME: Ultimately, what we need is a failable/non-blocking GEM
+ * allocation path that we can call when a heap OOM is reported. The
+ * FW is smart enough to fall back on other methods if the kernel can't
+ * allocate memory, and fail the tiling job if none of these
+ * countermeasures worked.
+ *
+ * Set WQ_MEM_RECLAIM on sched->wq to unblock the situation when the
+ * system is running out of memory.
+ */
+ sched->heap_alloc_wq = alloc_workqueue("panthor-heap-alloc", WQ_UNBOUND, 0);
+ sched->wq = alloc_workqueue("panthor-csf-sched", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
+ if (!sched->wq || !sched->heap_alloc_wq) {
+ panthor_sched_fini(&ptdev->base, sched);
+ drm_err(&ptdev->base, "Failed to allocate the workqueues");
+ return -ENOMEM;
+ }
+
+ ret = drmm_add_action_or_reset(&ptdev->base, panthor_sched_fini, sched);
+ if (ret)
+ return ret;
+
+ ptdev->scheduler = sched;
+ return 0;
+}
diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h
new file mode 100644
index 000000000000..66438b1f331f
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_sched.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/* Copyright 2023 Collabora ltd. */
+
+#ifndef __PANTHOR_SCHED_H__
+#define __PANTHOR_SCHED_H__
+
+struct drm_exec;
+struct dma_fence;
+struct drm_file;
+struct drm_gem_object;
+struct drm_sched_job;
+struct drm_panthor_group_create;
+struct drm_panthor_queue_create;
+struct drm_panthor_group_get_state;
+struct drm_panthor_queue_submit;
+struct panthor_device;
+struct panthor_file;
+struct panthor_group_pool;
+struct panthor_job;
+
+int panthor_group_create(struct panthor_file *pfile,
+ const struct drm_panthor_group_create *group_args,
+ const struct drm_panthor_queue_create *queue_args);
+int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle);
+int panthor_group_get_state(struct panthor_file *pfile,
+ struct drm_panthor_group_get_state *get_state);
+
+struct drm_sched_job *
+panthor_job_create(struct panthor_file *pfile,
+ u16 group_handle,
+ const struct drm_panthor_queue_submit *qsubmit);
+struct drm_sched_job *panthor_job_get(struct drm_sched_job *job);
+struct panthor_vm *panthor_job_vm(struct drm_sched_job *sched_job);
+void panthor_job_put(struct drm_sched_job *job);
+void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *job);
+
+int panthor_group_pool_create(struct panthor_file *pfile);
+void panthor_group_pool_destroy(struct panthor_file *pfile);
+
+int panthor_sched_init(struct panthor_device *ptdev);
+void panthor_sched_unplug(struct panthor_device *ptdev);
+void panthor_sched_pre_reset(struct panthor_device *ptdev);
+void panthor_sched_post_reset(struct panthor_device *ptdev);
+void panthor_sched_suspend(struct panthor_device *ptdev);
+void panthor_sched_resume(struct panthor_device *ptdev);
+
+void panthor_sched_report_mmu_fault(struct panthor_device *ptdev);
+void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events);
+
+#endif
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 281edab518cd..d6ea01f3797b 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -421,7 +421,6 @@ int qxl_surface_id_alloc(struct qxl_device *qdev,
{
uint32_t handle;
int idr_ret;
- int count = 0;
again:
idr_preload(GFP_ATOMIC);
spin_lock(&qdev->surf_id_idr_lock);
@@ -433,7 +432,6 @@ again:
handle = idr_ret;
if (handle >= qdev->rom->n_surfaces) {
- count++;
spin_lock(&qdev->surf_id_idr_lock);
idr_remove(&qdev->surf_id_idr, handle);
spin_unlock(&qdev->surf_id_idr_lock);
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index dd0f834d881c..506ae1f5e099 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -145,7 +145,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
struct qxl_release *release;
struct qxl_bo *cmd_bo;
void *fb_cmd;
- int i, ret, num_relocs;
+ int i, ret;
int unwritten;
switch (cmd->type) {
@@ -200,7 +200,6 @@ static int qxl_process_single_command(struct qxl_device *qdev,
}
/* fill out reloc info structs */
- num_relocs = 0;
for (i = 0; i < cmd->relocs_num; ++i) {
struct drm_qxl_reloc reloc;
struct drm_qxl_reloc __user *u = u64_to_user_ptr(cmd->relocs);
@@ -230,7 +229,6 @@ static int qxl_process_single_command(struct qxl_device *qdev,
reloc_info[i].dst_bo = cmd_bo;
reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
}
- num_relocs++;
/* reserve and validate the reloc dst bo */
if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) {
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 1e46b0a6e478..5893e27a7ae5 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -29,9 +29,6 @@
#include "qxl_drv.h"
#include "qxl_object.h"
-static int __qxl_bo_pin(struct qxl_bo *bo);
-static void __qxl_bo_unpin(struct qxl_bo *bo);
-
static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct qxl_bo *bo;
@@ -167,13 +164,9 @@ int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map)
goto out;
}
- r = __qxl_bo_pin(bo);
- if (r)
- return r;
-
r = ttm_bo_vmap(&bo->tbo, &bo->map);
if (r) {
- __qxl_bo_unpin(bo);
+ qxl_bo_unpin_locked(bo);
return r;
}
bo->map_count = 1;
@@ -246,7 +239,6 @@ void qxl_bo_vunmap_locked(struct qxl_bo *bo)
return;
bo->kptr = NULL;
ttm_bo_vunmap(&bo->tbo, &bo->map);
- __qxl_bo_unpin(bo);
}
int qxl_bo_vunmap(struct qxl_bo *bo)
@@ -290,12 +282,14 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
return bo;
}
-static int __qxl_bo_pin(struct qxl_bo *bo)
+int qxl_bo_pin_locked(struct qxl_bo *bo)
{
struct ttm_operation_ctx ctx = { false, false };
struct drm_device *ddev = bo->tbo.base.dev;
int r;
+ dma_resv_assert_held(bo->tbo.base.resv);
+
if (bo->tbo.pin_count) {
ttm_bo_pin(&bo->tbo);
return 0;
@@ -309,14 +303,16 @@ static int __qxl_bo_pin(struct qxl_bo *bo)
return r;
}
-static void __qxl_bo_unpin(struct qxl_bo *bo)
+void qxl_bo_unpin_locked(struct qxl_bo *bo)
{
+ dma_resv_assert_held(bo->tbo.base.resv);
+
ttm_bo_unpin(&bo->tbo);
}
/*
* Reserve the BO before pinning the object. If the BO was reserved
- * beforehand, use the internal version directly __qxl_bo_pin.
+ * beforehand, use the internal version directly qxl_bo_pin_locked.
*
*/
int qxl_bo_pin(struct qxl_bo *bo)
@@ -327,14 +323,14 @@ int qxl_bo_pin(struct qxl_bo *bo)
if (r)
return r;
- r = __qxl_bo_pin(bo);
+ r = qxl_bo_pin_locked(bo);
qxl_bo_unreserve(bo);
return r;
}
/*
* Reserve the BO before pinning the object. If the BO was reserved
- * beforehand, use the internal version directly __qxl_bo_unpin.
+ * beforehand, use the internal version directly qxl_bo_unpin_locked.
*
*/
int qxl_bo_unpin(struct qxl_bo *bo)
@@ -345,7 +341,7 @@ int qxl_bo_unpin(struct qxl_bo *bo)
if (r)
return r;
- __qxl_bo_unpin(bo);
+ qxl_bo_unpin_locked(bo);
qxl_bo_unreserve(bo);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index 53392cb90eec..1cf5bc759101 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -67,6 +67,8 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int pa
void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
extern void qxl_bo_unref(struct qxl_bo **bo);
+extern int qxl_bo_pin_locked(struct qxl_bo *bo);
+extern void qxl_bo_unpin_locked(struct qxl_bo *bo);
extern int qxl_bo_pin(struct qxl_bo *bo);
extern int qxl_bo_unpin(struct qxl_bo *bo);
extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain);
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
index 9169c26357d3..19bf551a7b31 100644
--- a/drivers/gpu/drm/qxl/qxl_prime.c
+++ b/drivers/gpu/drm/qxl/qxl_prime.c
@@ -32,14 +32,14 @@ int qxl_gem_prime_pin(struct drm_gem_object *obj)
{
struct qxl_bo *bo = gem_to_qxl_bo(obj);
- return qxl_bo_pin(bo);
+ return qxl_bo_pin_locked(bo);
}
void qxl_gem_prime_unpin(struct drm_gem_object *obj)
{
struct qxl_bo *bo = gem_to_qxl_bo(obj);
- qxl_bo_unpin(bo);
+ qxl_bo_unpin_locked(bo);
}
struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h
index 94947229888b..b7f22597ee95 100644
--- a/drivers/gpu/drm/radeon/pptable.h
+++ b/drivers/gpu/drm/radeon/pptable.h
@@ -424,7 +424,7 @@ typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
typedef struct _ATOM_PPLIB_STATE_V2
{
//number of valid dpm levels in this state; Driver uses it to calculate the whole
- //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
+ //size of the state: struct_size(ATOM_PPLIB_STATE_V2, clockInfoIndex, ucNumDPMLevels)
UCHAR ucNumDPMLevels;
//a index to the array of nonClockInfos
@@ -432,14 +432,14 @@ typedef struct _ATOM_PPLIB_STATE_V2
/**
* Driver will read the first ucNumDPMLevels in this array
*/
- UCHAR clockInfoIndex[1];
+ UCHAR clockInfoIndex[] __counted_by(ucNumDPMLevels);
} ATOM_PPLIB_STATE_V2;
typedef struct _StateArray{
//how many states we have
UCHAR ucNumEntries;
- ATOM_PPLIB_STATE_V2 states[1];
+ ATOM_PPLIB_STATE_V2 states[] __counted_by(ucNumEntries);
}StateArray;
@@ -450,7 +450,7 @@ typedef struct _ClockInfoArray{
//sizeof(ATOM_PPLIB_CLOCK_INFO)
UCHAR ucEntrySize;
- UCHAR clockInfo[1];
+ UCHAR clockInfo[] __counted_by(ucNumEntries);
}ClockInfoArray;
typedef struct _NonClockInfoArray{
@@ -460,7 +460,7 @@ typedef struct _NonClockInfoArray{
//sizeof(ATOM_PPLIB_NONCLOCK_INFO)
UCHAR ucEntrySize;
- ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
+ ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[] __counted_by(ucNumEntries);
}NonClockInfoArray;
typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 86b8b770af19..0b1e19345f43 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
+#include <linux/debugfs.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 25201b9a5aae..1620f534f55f 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
+#include <linux/debugfs.h>
#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index eae8a6389f5e..a979662eaa73 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
+#include <linux/debugfs.h>
#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index b5e97d95a19f..087d41e370fd 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -26,11 +26,12 @@
* Jerome Glisse
*/
+#include <linux/debugfs.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/slab.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
#include <drm/drm_device.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 3e5ff17e3caf..0999c8eaae94 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -132,7 +132,6 @@ extern int radeon_cik_support;
/* RADEON_IB_POOL_SIZE must be a power of 2 */
#define RADEON_IB_POOL_SIZE 16
#define RADEON_DEBUGFS_MAX_COMPONENTS 32
-#define RADEONFB_CONN_LIMIT 4
#define RADEON_BIOS_NUM_SCRATCH 8
/* internal ring indices */
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index bb1f0a3371ab..10793a433bf5 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -923,8 +923,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
for (i = 0; i < max_device; i++) {
- ATOM_CONNECTOR_INFO_I2C ci =
- supported_devices->info.asConnInfo[i];
+ ATOM_CONNECTOR_INFO_I2C ci;
+
+ if (frev > 1)
+ ci = supported_devices->info_2d1.asConnInfo[i];
+ else
+ ci = supported_devices->info.asConnInfo[i];
bios_connectors[i].valid = false;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index efd18c8d84c8..5f1d24d3120c 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -683,7 +683,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc;
- radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+ radeon_crtc = kzalloc(sizeof(*radeon_crtc), GFP_KERNEL);
if (radeon_crtc == NULL)
return;
@@ -709,12 +709,6 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
dev->mode_config.cursor_width = radeon_crtc->max_cursor_width;
dev->mode_config.cursor_height = radeon_crtc->max_cursor_height;
-#if 0
- radeon_crtc->mode_set.crtc = &radeon_crtc->base;
- radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
- radeon_crtc->mode_set.num_connectors = 0;
-#endif
-
if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom))
radeon_atombios_init_crtc(dev, radeon_crtc);
else
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 9ebe4a0b9a6c..4fb780d96f32 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -30,6 +30,7 @@
*/
#include <linux/atomic.h>
+#include <linux/debugfs.h>
#include <linux/firmware.h>
#include <linux/kref.h>
#include <linux/sched/signal.h>
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 3fec3acdaf28..2ef201a072f1 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
+#include <linux/debugfs.h>
#include <linux/iosys-map.h>
#include <linux/pci.h>
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
index fb9ecf5dbe2b..63d914f3414d 100644
--- a/drivers/gpu/drm/radeon/radeon_ib.c
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -27,6 +27,8 @@
* Christian König
*/
+#include <linux/debugfs.h>
+
#include <drm/drm_file.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 4482c8c5f5ce..2d9d9f46f243 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -21,6 +21,7 @@
* Alex Deucher <alexdeucher@gmail.com>
*/
+#include <linux/debugfs.h>
#include <linux/hwmon-sysfs.h>
#include <linux/hwmon.h>
#include <linux/pci.h>
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index b3cfc99f4d7e..a77881f035e7 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -73,32 +73,21 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj)
struct radeon_bo *bo = gem_to_radeon_bo(obj);
int ret = 0;
- ret = radeon_bo_reserve(bo, false);
- if (unlikely(ret != 0))
- return ret;
-
/* pin buffer into GTT */
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
if (likely(ret == 0))
bo->prime_shared_count++;
- radeon_bo_unreserve(bo);
return ret;
}
void radeon_gem_prime_unpin(struct drm_gem_object *obj)
{
struct radeon_bo *bo = gem_to_radeon_bo(obj);
- int ret = 0;
-
- ret = radeon_bo_reserve(bo, false);
- if (unlikely(ret != 0))
- return;
radeon_bo_unpin(bo);
if (bo->prime_shared_count)
bo->prime_shared_count--;
- radeon_bo_unreserve(bo);
}
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 38048593bb4a..8d1d458286a8 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -27,6 +27,8 @@
* Christian König
*/
+#include <linux/debugfs.h>
+
#include <drm/drm_device.h>
#include <drm/drm_file.h>
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 2078b0000e22..5c65b6dfb99a 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -30,6 +30,7 @@
* Dave Airlie
*/
+#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
#include <linux/pagemap.h>
#include <linux/pci.h>
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index d7f552d441ab..d4d1501e6576 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
+#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 79709d26d983..bbc6ccabf788 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
+#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c
index 0ae6331d6430..8643ff2eec46 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c
@@ -66,9 +66,6 @@ void rzg2l_du_vsp_disable(struct rzg2l_du_crtc *crtc)
void rzg2l_du_vsp_atomic_flush(struct rzg2l_du_crtc *crtc)
{
struct vsp1_du_atomic_pipe_config cfg = { { 0, } };
- struct rzg2l_du_crtc_state *state;
-
- state = to_rzg2l_crtc_state(crtc->crtc.state);
vsp1_du_atomic_flush(crtc->vsp->vsp, crtc->vsp_pipe, &cfg);
}
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index a855c45ae7f3..bd7aa891b839 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -262,20 +262,21 @@ static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = {
static int cdn_dp_connector_get_modes(struct drm_connector *connector)
{
struct cdn_dp_device *dp = connector_to_dp(connector);
- struct edid *edid;
int ret = 0;
mutex_lock(&dp->lock);
- edid = dp->edid;
- if (edid) {
+
+ if (dp->drm_edid) {
+ /* FIXME: get rid of drm_edid_raw() */
+ const struct edid *edid = drm_edid_raw(dp->drm_edid);
+
DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n",
edid->width_cm, edid->height_cm);
- dp->sink_has_audio = drm_detect_monitor_audio(edid);
-
- drm_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
}
+
+ ret = drm_edid_connector_add_modes(connector);
+
mutex_unlock(&dp->lock);
return ret;
@@ -380,9 +381,13 @@ static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
return ret;
}
- kfree(dp->edid);
- dp->edid = drm_do_get_edid(&dp->connector,
- cdn_dp_get_edid_block, dp);
+ drm_edid_free(dp->drm_edid);
+ dp->drm_edid = drm_edid_read_custom(&dp->connector,
+ cdn_dp_get_edid_block, dp);
+ drm_edid_connector_update(&dp->connector, dp->drm_edid);
+
+ dp->sink_has_audio = dp->connector.display_info.has_audio;
+
return 0;
}
@@ -488,8 +493,8 @@ static int cdn_dp_disable(struct cdn_dp_device *dp)
dp->max_lanes = 0;
dp->max_rate = 0;
if (!dp->connected) {
- kfree(dp->edid);
- dp->edid = NULL;
+ drm_edid_free(dp->drm_edid);
+ dp->drm_edid = NULL;
}
return 0;
@@ -1131,8 +1136,8 @@ static void cdn_dp_unbind(struct device *dev, struct device *master, void *data)
pm_runtime_disable(dev);
if (dp->fw_loaded)
release_firmware(dp->fw);
- kfree(dp->edid);
- dp->edid = NULL;
+ drm_edid_free(dp->drm_edid);
+ dp->drm_edid = NULL;
}
static const struct component_ops cdn_dp_component_ops = {
@@ -1259,7 +1264,6 @@ struct platform_driver cdn_dp_driver = {
.shutdown = cdn_dp_shutdown,
.driver = {
.name = "cdn-dp",
- .owner = THIS_MODULE,
.of_match_table = cdn_dp_dt_ids,
.pm = &cdn_dp_pm_ops,
},
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h
index 5b2fed1f5f55..8e6e95d269da 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.h
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h
@@ -70,7 +70,7 @@ struct cdn_dp_device {
struct drm_display_mode mode;
struct platform_device *audio_pdev;
struct work_struct event_work;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
struct mutex lock;
bool connected;
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 1d2261643743..3df2cfcf9998 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -606,18 +606,16 @@ inno_hdmi_connector_detect(struct drm_connector *connector, bool force)
static int inno_hdmi_connector_get_modes(struct drm_connector *connector)
{
struct inno_hdmi *hdmi = connector_to_inno_hdmi(connector);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int ret = 0;
if (!hdmi->ddc)
return 0;
- edid = drm_get_edid(connector, hdmi->ddc);
- if (edid) {
- drm_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
- }
+ drm_edid = drm_edid_read_ddc(connector, hdmi->ddc);
+ drm_edid_connector_update(connector, drm_edid);
+ ret = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
return ret;
}
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index 95cd1b49eda8..784de990da1b 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -466,18 +466,16 @@ rk3066_hdmi_connector_detect(struct drm_connector *connector, bool force)
static int rk3066_hdmi_connector_get_modes(struct drm_connector *connector)
{
struct rk3066_hdmi *hdmi = connector_to_rk3066_hdmi(connector);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int ret = 0;
if (!hdmi->ddc)
return 0;
- edid = drm_get_edid(connector, hdmi->ddc);
- if (edid) {
- drm_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
- }
+ drm_edid = drm_edid_read_ddc(connector, hdmi->ddc);
+ drm_edid_connector_update(connector, drm_edid);
+ ret = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
return ret;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index fdd768bbd487..62ebbdb16253 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -706,6 +706,8 @@ static void vop2_setup_scale(struct vop2 *vop2, const struct vop2_win *win,
const struct drm_format_info *info;
u16 hor_scl_mode, ver_scl_mode;
u16 hscl_filter_mode, vscl_filter_mode;
+ uint16_t cbcr_src_w = src_w;
+ uint16_t cbcr_src_h = src_h;
u8 gt2 = 0;
u8 gt4 = 0;
u32 val;
@@ -763,27 +765,27 @@ static void vop2_setup_scale(struct vop2 *vop2, const struct vop2_win *win,
vop2_win_write(win, VOP2_WIN_YRGB_VSCL_FILTER_MODE, vscl_filter_mode);
if (info->is_yuv) {
- src_w /= info->hsub;
- src_h /= info->vsub;
+ cbcr_src_w /= info->hsub;
+ cbcr_src_h /= info->vsub;
gt4 = 0;
gt2 = 0;
- if (src_h >= (4 * dst_h)) {
+ if (cbcr_src_h >= (4 * dst_h)) {
gt4 = 1;
- src_h >>= 2;
- } else if (src_h >= (2 * dst_h)) {
+ cbcr_src_h >>= 2;
+ } else if (cbcr_src_h >= (2 * dst_h)) {
gt2 = 1;
- src_h >>= 1;
+ cbcr_src_h >>= 1;
}
- hor_scl_mode = scl_get_scl_mode(src_w, dst_w);
- ver_scl_mode = scl_get_scl_mode(src_h, dst_h);
+ hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w);
+ ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h);
- val = vop2_scale_factor(src_w, dst_w);
+ val = vop2_scale_factor(cbcr_src_w, dst_w);
vop2_win_write(win, VOP2_WIN_SCALE_CBCR_X, val);
- val = vop2_scale_factor(src_h, dst_h);
+ val = vop2_scale_factor(cbcr_src_h, dst_h);
vop2_win_write(win, VOP2_WIN_SCALE_CBCR_Y, val);
vop2_win_write(win, VOP2_WIN_VSD_CBCR_GT4, gt4);
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 77b76cff1adb..9a01aa450741 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -17,7 +17,6 @@
#include <linux/regmap.h>
#include <linux/reset.h>
-#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index 48170694ac6b..18efb3fe1c00 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -17,9 +17,7 @@
static const uint32_t formats_cluster[] = {
DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_ARGB2101010,
DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_ABGR2101010,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XBGR8888,
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 3c4f5a392b06..58c8161289fe 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -71,13 +71,19 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
entity->guilty = guilty;
entity->num_sched_list = num_sched_list;
entity->priority = priority;
+ /*
+ * It's perfectly valid to initialize an entity without having a valid
+ * scheduler attached. It's just not valid to use the scheduler before it
+ * is initialized itself.
+ */
entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
RCU_INIT_POINTER(entity->last_scheduled, NULL);
RB_CLEAR_NODE(&entity->rb_tree_node);
- if (!sched_list[0]->sched_rq) {
- /* Warn drivers not to do this and to fix their DRM
- * calling order.
+ if (num_sched_list && !sched_list[0]->sched_rq) {
+ /* Since every entry covered by num_sched_list
+ * should be non-NULL and therefore we warn drivers
+ * not to do this and to fix their DRM calling order.
*/
pr_warn("%s: called with uninitialized scheduler\n", __func__);
} else if (num_sched_list) {
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 4bab93c4fefd..1799c12babf5 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -5,6 +5,7 @@
*/
#include <linux/component.h>
+#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 2d1880c61b50..245b34adca5a 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -214,20 +214,24 @@ sun4i_hdmi_connector_mode_valid(struct drm_connector *connector,
static int sun4i_hdmi_get_modes(struct drm_connector *connector)
{
struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int ret;
- edid = drm_get_edid(connector, hdmi->ddc_i2c ?: hdmi->i2c);
- if (!edid)
+ drm_edid = drm_edid_read_ddc(connector, hdmi->ddc_i2c ?: hdmi->i2c);
+
+ drm_edid_connector_update(connector, drm_edid);
+ cec_s_phys_addr(hdmi->cec_adap,
+ connector->display_info.source_physical_address, false);
+
+ if (!drm_edid)
return 0;
DRM_DEBUG_DRIVER("Monitor is %s monitor\n",
connector->display_info.is_hdmi ? "an HDMI" : "a DVI");
- drm_connector_update_edid_property(connector, edid);
- cec_s_phys_addr_from_edid(hdmi->cec_adap, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
+
+ ret = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
return ret;
}
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 84e7e6bc3a0c..782f51d3044a 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -8,7 +8,7 @@ config DRM_TEGRA
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HELPER
- select DRM_DP_AUX_BUS
+ select DRM_DISPLAY_DP_AUX_BUS
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
index e48863a44556..b3be68b03610 100644
--- a/drivers/gpu/drm/tests/drm_buddy_test.c
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -23,9 +23,11 @@ static inline u64 get_size(int order, u64 chunk_size)
static void drm_test_buddy_alloc_range_bias(struct kunit *test)
{
- u32 mm_size, ps, bias_size, bias_start, bias_end, bias_rem;
+ u32 mm_size, size, ps, bias_size, bias_start, bias_end, bias_rem;
DRM_RND_STATE(prng, random_seed);
unsigned int i, count, *order;
+ struct drm_buddy_block *block;
+ unsigned long flags;
struct drm_buddy mm;
LIST_HEAD(allocated);
@@ -103,7 +105,7 @@ static void drm_test_buddy_alloc_range_bias(struct kunit *test)
DRM_BUDDY_RANGE_ALLOCATION),
"buddy_alloc i failed with bias(%x-%x), size=%u, ps=%u\n",
bias_start, bias_end, bias_size, bias_size);
- drm_buddy_free_list(&mm, &tmp);
+ drm_buddy_free_list(&mm, &tmp, 0);
/* single page with internal round_up */
KUNIT_ASSERT_FALSE_MSG(test,
@@ -113,7 +115,7 @@ static void drm_test_buddy_alloc_range_bias(struct kunit *test)
DRM_BUDDY_RANGE_ALLOCATION),
"buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
bias_start, bias_end, ps, bias_size);
- drm_buddy_free_list(&mm, &tmp);
+ drm_buddy_free_list(&mm, &tmp, 0);
/* random size within */
size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
@@ -153,14 +155,14 @@ static void drm_test_buddy_alloc_range_bias(struct kunit *test)
* unallocated, and ideally not always on the bias
* boundaries.
*/
- drm_buddy_free_list(&mm, &tmp);
+ drm_buddy_free_list(&mm, &tmp, 0);
} else {
list_splice_tail(&tmp, &allocated);
}
}
kfree(order);
- drm_buddy_free_list(&mm, &allocated);
+ drm_buddy_free_list(&mm, &allocated, 0);
drm_buddy_fini(&mm);
/*
@@ -220,7 +222,181 @@ static void drm_test_buddy_alloc_range_bias(struct kunit *test)
"buddy_alloc passed with bias(%x-%x), size=%u\n",
bias_start, bias_end, ps);
- drm_buddy_free_list(&mm, &allocated);
+ drm_buddy_free_list(&mm, &allocated, 0);
+ drm_buddy_fini(&mm);
+
+ /*
+ * Allocate cleared blocks in the bias range when the DRM buddy's clear avail is
+ * zero. This will validate the bias range allocation in scenarios like system boot
+ * when no cleared blocks are available and exercise the fallback path too. The resulting
+ * blocks should always be dirty.
+ */
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
+ "buddy_init failed\n");
+
+ bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps);
+ bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps);
+ bias_end = max(bias_end, bias_start + ps);
+ bias_rem = bias_end - bias_start;
+
+ flags = DRM_BUDDY_CLEAR_ALLOCATION | DRM_BUDDY_RANGE_ALLOCATION;
+ size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
+
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, size, ps,
+ &allocated,
+ flags),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, size, ps);
+
+ list_for_each_entry(block, &allocated, link)
+ KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
+
+ drm_buddy_free_list(&mm, &allocated, 0);
+ drm_buddy_fini(&mm);
+}
+
+static void drm_test_buddy_alloc_clear(struct kunit *test)
+{
+ unsigned long n_pages, total, i = 0;
+ DRM_RND_STATE(prng, random_seed);
+ const unsigned long ps = SZ_4K;
+ struct drm_buddy_block *block;
+ const int max_order = 12;
+ LIST_HEAD(allocated);
+ struct drm_buddy mm;
+ unsigned int order;
+ u32 mm_size, size;
+ LIST_HEAD(dirty);
+ LIST_HEAD(clean);
+
+ mm_size = SZ_4K << max_order;
+ KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
+
+ KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
+
+ /*
+ * Idea is to allocate and free some random portion of the address space,
+ * returning those pages as non-dirty and randomly alternate between
+ * requesting dirty and non-dirty pages (not going over the limit
+ * we freed as non-dirty), putting that into two separate lists.
+ * Loop over both lists at the end checking that the dirty list
+ * is indeed all dirty pages and vice versa. Free it all again,
+ * keeping the dirty/clear status.
+ */
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 5 * ps, ps, &allocated,
+ DRM_BUDDY_TOPDOWN_ALLOCATION),
+ "buddy_alloc hit an error size=%lu\n", 5 * ps);
+ drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
+
+ n_pages = 10;
+ do {
+ unsigned long flags;
+ struct list_head *list;
+ int slot = i % 2;
+
+ if (slot == 0) {
+ list = &dirty;
+ flags = 0;
+ } else {
+ list = &clean;
+ flags = DRM_BUDDY_CLEAR_ALLOCATION;
+ }
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ ps, ps, list,
+ flags),
+ "buddy_alloc hit an error size=%lu\n", ps);
+ } while (++i < n_pages);
+
+ list_for_each_entry(block, &clean, link)
+ KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), true);
+
+ list_for_each_entry(block, &dirty, link)
+ KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
+
+ drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
+
+ /*
+ * Trying to go over the clear limit for some allocation.
+ * The allocation should never fail with reasonable page-size.
+ */
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 10 * ps, ps, &clean,
+ DRM_BUDDY_CLEAR_ALLOCATION),
+ "buddy_alloc hit an error size=%lu\n", 10 * ps);
+
+ drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
+ drm_buddy_free_list(&mm, &dirty, 0);
+ drm_buddy_fini(&mm);
+
+ KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
+
+ /*
+ * Create a new mm. Intentionally fragment the address space by creating
+ * two alternating lists. Free both lists, one as dirty the other as clean.
+ * Try to allocate double the previous size with matching min_page_size. The
+ * allocation should never fail as it calls the force_merge. Also check that
+ * the page is always dirty after force_merge. Free the page as dirty, then
+ * repeat the whole thing, increment the order until we hit the max_order.
+ */
+
+ i = 0;
+ n_pages = mm_size / ps;
+ do {
+ struct list_head *list;
+ int slot = i % 2;
+
+ if (slot == 0)
+ list = &dirty;
+ else
+ list = &clean;
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ ps, ps, list, 0),
+ "buddy_alloc hit an error size=%lu\n", ps);
+ } while (++i < n_pages);
+
+ drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
+ drm_buddy_free_list(&mm, &dirty, 0);
+
+ order = 1;
+ do {
+ size = SZ_4K << order;
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ size, size, &allocated,
+ DRM_BUDDY_CLEAR_ALLOCATION),
+ "buddy_alloc hit an error size=%u\n", size);
+ total = 0;
+ list_for_each_entry(block, &allocated, link) {
+ if (size != mm_size)
+ KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
+ total += drm_buddy_block_size(&mm, block);
+ }
+ KUNIT_EXPECT_EQ(test, total, size);
+
+ drm_buddy_free_list(&mm, &allocated, 0);
+ } while (++order <= max_order);
+
+ drm_buddy_fini(&mm);
+
+ /*
+ * Create a new mm with a non power-of-two size. Allocate a random size, free as
+ * cleared and then call fini. This will ensure the multi-root force merge during
+ * fini.
+ */
+ mm_size = 12 * SZ_4K;
+ size = max(round_up(prandom_u32_state(&prng) % mm_size, ps), ps);
+ KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ size, ps, &allocated,
+ DRM_BUDDY_TOPDOWN_ALLOCATION),
+ "buddy_alloc hit an error size=%u\n", size);
+ drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
drm_buddy_fini(&mm);
}
@@ -269,7 +445,7 @@ static void drm_test_buddy_alloc_contiguous(struct kunit *test)
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
"buddy_alloc didn't error size=%lu\n", 3 * ps);
- drm_buddy_free_list(&mm, &middle);
+ drm_buddy_free_list(&mm, &middle, 0);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
3 * ps, ps, &allocated,
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
@@ -279,7 +455,7 @@ static void drm_test_buddy_alloc_contiguous(struct kunit *test)
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
"buddy_alloc didn't error size=%lu\n", 2 * ps);
- drm_buddy_free_list(&mm, &right);
+ drm_buddy_free_list(&mm, &right, 0);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
3 * ps, ps, &allocated,
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
@@ -294,7 +470,7 @@ static void drm_test_buddy_alloc_contiguous(struct kunit *test)
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
"buddy_alloc hit an error size=%lu\n", 2 * ps);
- drm_buddy_free_list(&mm, &left);
+ drm_buddy_free_list(&mm, &left, 0);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
3 * ps, ps, &allocated,
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
@@ -306,7 +482,7 @@ static void drm_test_buddy_alloc_contiguous(struct kunit *test)
KUNIT_ASSERT_EQ(test, total, ps * 2 + ps * 3);
- drm_buddy_free_list(&mm, &allocated);
+ drm_buddy_free_list(&mm, &allocated, 0);
drm_buddy_fini(&mm);
}
@@ -375,7 +551,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
top, max_order);
}
- drm_buddy_free_list(&mm, &holes);
+ drm_buddy_free_list(&mm, &holes, 0);
/* Nothing larger than blocks of chunk_size now available */
for (order = 1; order <= max_order; order++) {
@@ -387,7 +563,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
}
list_splice_tail(&holes, &blocks);
- drm_buddy_free_list(&mm, &blocks);
+ drm_buddy_free_list(&mm, &blocks, 0);
drm_buddy_fini(&mm);
}
@@ -482,7 +658,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
list_del(&block->link);
drm_buddy_free_block(&mm, block);
- drm_buddy_free_list(&mm, &blocks);
+ drm_buddy_free_list(&mm, &blocks, 0);
drm_buddy_fini(&mm);
}
@@ -528,7 +704,7 @@ static void drm_test_buddy_alloc_optimistic(struct kunit *test)
size, size, &tmp, flags),
"buddy_alloc unexpectedly succeeded, it should be full!");
- drm_buddy_free_list(&mm, &blocks);
+ drm_buddy_free_list(&mm, &blocks, 0);
drm_buddy_fini(&mm);
}
@@ -563,7 +739,7 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
drm_buddy_block_size(&mm, block),
BIT_ULL(mm.max_order) * PAGE_SIZE);
- drm_buddy_free_list(&mm, &allocated);
+ drm_buddy_free_list(&mm, &allocated, 0);
drm_buddy_fini(&mm);
}
@@ -584,6 +760,7 @@ static struct kunit_case drm_buddy_tests[] = {
KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
KUNIT_CASE(drm_test_buddy_alloc_pathological),
KUNIT_CASE(drm_test_buddy_alloc_contiguous),
+ KUNIT_CASE(drm_test_buddy_alloc_clear),
KUNIT_CASE(drm_test_buddy_alloc_range_bias),
{}
};
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index a0e494c806a9..f371518f8697 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -135,8 +135,7 @@ static int tidss_dispc_modeset_init(struct tidss_device *tidss)
dev_dbg(dev, "no panel/bridge for port %d\n", i);
continue;
} else if (ret) {
- dev_dbg(dev, "port %d probe returned %d\n", i, ret);
- return ret;
+ return dev_err_probe(dev, ret, "port %d probe failed\n", i);
}
if (panel) {
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 9aefd010acde..68093d6b6b16 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -6,7 +6,6 @@
#include <linux/backlight.h>
#include <linux/gpio/consumer.h>
-#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <video/display_timing.h>
@@ -308,7 +307,6 @@ static int panel_probe(struct platform_device *pdev)
struct backlight_device *backlight;
struct panel_module *panel_mod;
struct tilcdc_module *mod;
- struct pinctrl *pinctrl;
int ret;
/* bail out early if no DT data: */
@@ -342,10 +340,6 @@ static int panel_probe(struct platform_device *pdev)
tilcdc_module_init(mod, "panel", &panel_module_ops);
- pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(pinctrl))
- dev_warn(&pdev->dev, "pins are not configured\n");
-
panel_mod->timings = of_get_display_timings(node);
if (!panel_mod->timings) {
dev_err(&pdev->dev, "could not get panel timings\n");
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index 7ce1c4617675..1d8fa07572c5 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -25,6 +25,7 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_panic.h>
#include <drm/drm_probe_helper.h>
#define DRIVER_NAME "simpledrm"
@@ -671,11 +672,26 @@ static void simpledrm_primary_plane_helper_atomic_disable(struct drm_plane *plan
drm_dev_exit(idx);
}
+static int simpledrm_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct simpledrm_device *sdev = simpledrm_device_of_dev(plane->dev);
+
+ sb->width = sdev->mode.hdisplay;
+ sb->height = sdev->mode.vdisplay;
+ sb->format = sdev->format;
+ sb->pitch[0] = sdev->pitch;
+ sb->map[0] = sdev->screen_base;
+
+ return 0;
+}
+
static const struct drm_plane_helper_funcs simpledrm_primary_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
.atomic_check = simpledrm_primary_plane_helper_atomic_check,
.atomic_update = simpledrm_primary_plane_helper_atomic_update,
.atomic_disable = simpledrm_primary_plane_helper_atomic_disable,
+ .get_scanout_buffer = simpledrm_primary_plane_helper_get_scanout_buffer,
};
static const struct drm_plane_funcs simpledrm_primary_plane_funcs = {
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 96a724e8f3ff..6396dece0db1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -402,7 +402,6 @@ void ttm_bo_put(struct ttm_buffer_object *bo)
EXPORT_SYMBOL(ttm_bo_put);
static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
- struct ttm_resource **mem,
struct ttm_operation_ctx *ctx,
struct ttm_place *hop)
{
@@ -469,7 +468,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
if (ret != -EMULTIHOP)
break;
- ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
+ ret = ttm_bo_bounce_temp_buffer(bo, ctx, &hop);
} while (!ret);
if (ret) {
@@ -698,7 +697,6 @@ EXPORT_SYMBOL(ttm_bo_unpin);
*/
static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
struct ttm_resource_manager *man,
- struct ttm_resource *mem,
bool no_wait_gpu)
{
struct dma_fence *fence;
@@ -724,64 +722,36 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
return ret;
}
-/*
- * Repeatedly evict memory from the LRU for @mem_type until we create enough
- * space, or we've evicted everything and there isn't enough space.
- */
-static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
- const struct ttm_place *place,
- struct ttm_resource **mem,
- struct ttm_operation_ctx *ctx)
-{
- struct ttm_device *bdev = bo->bdev;
- struct ttm_resource_manager *man;
- struct ww_acquire_ctx *ticket;
- int ret;
-
- man = ttm_manager_type(bdev, place->mem_type);
- ticket = dma_resv_locking_ctx(bo->base.resv);
- do {
- ret = ttm_resource_alloc(bo, place, mem);
- if (likely(!ret))
- break;
- if (unlikely(ret != -ENOSPC))
- return ret;
- ret = ttm_mem_evict_first(bdev, man, place, ctx,
- ticket);
- if (unlikely(ret != 0))
- return ret;
- } while (1);
-
- return ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
-}
-
/**
- * ttm_bo_mem_space
+ * ttm_bo_alloc_resource - Allocate backing store for a BO
*
- * @bo: Pointer to a struct ttm_buffer_object. the data of which
- * we want to allocate space for.
- * @placement: Proposed new placement for the buffer object.
- * @mem: A struct ttm_resource.
+ * @bo: Pointer to a struct ttm_buffer_object of which we want a resource for
+ * @placement: Proposed new placement for the buffer object
* @ctx: if and how to sleep, lock buffers and alloc memory
+ * @force_space: If we should evict buffers to force space
+ * @res: The resulting struct ttm_resource.
*
- * Allocate memory space for the buffer object pointed to by @bo, using
- * the placement flags in @placement, potentially evicting other idle buffer objects.
- * This function may sleep while waiting for space to become available.
+ * Allocates a resource for the buffer object pointed to by @bo, using the
+ * placement flags in @placement, potentially evicting other buffer objects when
+ * @force_space is true.
+ * This function may sleep while waiting for resources to become available.
* Returns:
- * -EBUSY: No space available (only if no_wait == 1).
+ * -EBUSY: No space available (only if no_wait == true).
* -ENOSPC: Could not allocate space for the buffer object, either due to
* fragmentation or concurrent allocators.
* -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
*/
-int ttm_bo_mem_space(struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- struct ttm_resource **mem,
- struct ttm_operation_ctx *ctx)
+static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_operation_ctx *ctx,
+ bool force_space,
+ struct ttm_resource **res)
{
struct ttm_device *bdev = bo->bdev;
- bool type_found = false;
+ struct ww_acquire_ctx *ticket;
int i, ret;
+ ticket = dma_resv_locking_ctx(bo->base.resv);
ret = dma_resv_reserve_fences(bo->base.resv, 1);
if (unlikely(ret))
return ret;
@@ -790,98 +760,73 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
const struct ttm_place *place = &placement->placement[i];
struct ttm_resource_manager *man;
- if (place->flags & TTM_PL_FLAG_FALLBACK)
- continue;
-
man = ttm_manager_type(bdev, place->mem_type);
if (!man || !ttm_resource_manager_used(man))
continue;
- type_found = true;
- ret = ttm_resource_alloc(bo, place, mem);
- if (ret == -ENOSPC)
+ if (place->flags & (force_space ? TTM_PL_FLAG_DESIRED :
+ TTM_PL_FLAG_FALLBACK))
+ continue;
+
+ do {
+ ret = ttm_resource_alloc(bo, place, res);
+ if (unlikely(ret && ret != -ENOSPC))
+ return ret;
+ if (likely(!ret) || !force_space)
+ break;
+
+ ret = ttm_mem_evict_first(bdev, man, place, ctx,
+ ticket);
+ if (unlikely(ret == -EBUSY))
+ break;
+ if (unlikely(ret))
+ return ret;
+ } while (1);
+ if (ret)
continue;
- if (unlikely(ret))
- goto error;
- ret = ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
+ ret = ttm_bo_add_move_fence(bo, man, ctx->no_wait_gpu);
if (unlikely(ret)) {
- ttm_resource_free(bo, mem);
+ ttm_resource_free(bo, res);
if (ret == -EBUSY)
continue;
- goto error;
+ return ret;
}
return 0;
}
- for (i = 0; i < placement->num_placement; ++i) {
- const struct ttm_place *place = &placement->placement[i];
- struct ttm_resource_manager *man;
-
- if (place->flags & TTM_PL_FLAG_DESIRED)
- continue;
-
- man = ttm_manager_type(bdev, place->mem_type);
- if (!man || !ttm_resource_manager_used(man))
- continue;
-
- type_found = true;
- ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
- if (likely(!ret))
- return 0;
-
- if (ret && ret != -EBUSY)
- goto error;
- }
-
- ret = -ENOSPC;
- if (!type_found) {
- pr_err(TTM_PFX "No compatible memory type found\n");
- ret = -EINVAL;
- }
-
-error:
- return ret;
+ return -ENOSPC;
}
-EXPORT_SYMBOL(ttm_bo_mem_space);
-static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- struct ttm_operation_ctx *ctx)
+/*
+ * ttm_bo_mem_space - Wrapper around ttm_bo_alloc_resource
+ *
+ * @bo: Pointer to a struct ttm_buffer_object of which we want a resource for
+ * @placement: Proposed new placement for the buffer object
+ * @res: The resulting struct ttm_resource.
+ * @ctx: if and how to sleep, lock buffers and alloc memory
+ *
+ * Tries both idle allocation and forcefully eviction of buffers. See
+ * ttm_bo_alloc_resource for details.
+ */
+int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_resource **res,
+ struct ttm_operation_ctx *ctx)
{
- struct ttm_resource *mem;
- struct ttm_place hop;
+ bool force_space = false;
int ret;
- dma_resv_assert_held(bo->base.resv);
+ do {
+ ret = ttm_bo_alloc_resource(bo, placement, ctx,
+ force_space, res);
+ force_space = !force_space;
+ } while (ret == -ENOSPC && force_space);
- /*
- * Determine where to move the buffer.
- *
- * If driver determines move is going to need
- * an extra step then it will return -EMULTIHOP
- * and the buffer will be moved to the temporary
- * stop and the driver will be called to make
- * the second hop.
- */
- ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
- if (ret)
- return ret;
-bounce:
- ret = ttm_bo_handle_move_mem(bo, mem, false, ctx, &hop);
- if (ret == -EMULTIHOP) {
- ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
- if (ret)
- goto out;
- /* try and move to final place now. */
- goto bounce;
- }
-out:
- if (ret)
- ttm_resource_free(bo, &mem);
return ret;
}
+EXPORT_SYMBOL(ttm_bo_mem_space);
/**
* ttm_bo_validate
@@ -902,6 +847,9 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_operation_ctx *ctx)
{
+ struct ttm_resource *res;
+ struct ttm_place hop;
+ bool force_space;
int ret;
dma_resv_assert_held(bo->base.resv);
@@ -912,20 +860,53 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
if (!placement->num_placement)
return ttm_bo_pipeline_gutting(bo);
- /* Check whether we need to move buffer. */
- if (bo->resource && ttm_resource_compatible(bo->resource, placement))
- return 0;
+ force_space = false;
+ do {
+ /* Check whether we need to move buffer. */
+ if (bo->resource &&
+ ttm_resource_compatible(bo->resource, placement,
+ force_space))
+ return 0;
- /* Moving of pinned BOs is forbidden */
- if (bo->pin_count)
- return -EINVAL;
+ /* Moving of pinned BOs is forbidden */
+ if (bo->pin_count)
+ return -EINVAL;
+
+ /*
+ * Determine where to move the buffer.
+ *
+ * If driver determines move is going to need
+ * an extra step then it will return -EMULTIHOP
+ * and the buffer will be moved to the temporary
+ * stop and the driver will be called to make
+ * the second hop.
+ */
+ ret = ttm_bo_alloc_resource(bo, placement, ctx, force_space,
+ &res);
+ force_space = !force_space;
+ if (ret == -ENOSPC)
+ continue;
+ if (ret)
+ return ret;
+
+bounce:
+ ret = ttm_bo_handle_move_mem(bo, res, false, ctx, &hop);
+ if (ret == -EMULTIHOP) {
+ ret = ttm_bo_bounce_temp_buffer(bo, ctx, &hop);
+ /* try and move to final place now. */
+ if (!ret)
+ goto bounce;
+ }
+ if (ret) {
+ ttm_resource_free(bo, &res);
+ return ret;
+ }
+
+ } while (ret && force_space);
- ret = ttm_bo_move_buffer(bo, placement, ctx);
/* For backward compatibility with userspace */
if (ret == -ENOSPC)
return -ENOMEM;
- if (ret)
- return ret;
/*
* We might need to add a TTM.
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 76027960054f..434cf0258000 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -27,6 +27,7 @@
#define pr_fmt(fmt) "[TTM DEVICE] " fmt
+#include <linux/debugfs.h>
#include <linux/mm.h>
#include <drm/ttm/ttm_bo.h>
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 112438d965ff..6e1fd6985ffc 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -288,17 +288,23 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
enum ttm_caching caching,
unsigned int order)
{
- if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE)
+ if (pool->use_dma_alloc)
return &pool->caching[caching].orders[order];
#ifdef CONFIG_X86
switch (caching) {
case ttm_write_combined:
+ if (pool->nid != NUMA_NO_NODE)
+ return &pool->caching[caching].orders[order];
+
if (pool->use_dma32)
return &global_dma32_write_combined[order];
return &global_write_combined[order];
case ttm_uncached:
+ if (pool->nid != NUMA_NO_NODE)
+ return &pool->caching[caching].orders[order];
+
if (pool->use_dma32)
return &global_dma32_uncached[order];
@@ -566,11 +572,17 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
pool->use_dma_alloc = use_dma_alloc;
pool->use_dma32 = use_dma32;
- if (use_dma_alloc || nid != NUMA_NO_NODE) {
- for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
- for (j = 0; j < NR_PAGE_ORDERS; ++j)
- ttm_pool_type_init(&pool->caching[i].orders[j],
- pool, i, j);
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+ for (j = 0; j < NR_PAGE_ORDERS; ++j) {
+ struct ttm_pool_type *pt;
+
+ /* Initialize only pool types which are actually used */
+ pt = ttm_pool_select_type(pool, i, j);
+ if (pt != &pool->caching[i].orders[j])
+ continue;
+
+ ttm_pool_type_init(pt, pool, i, j);
+ }
}
}
EXPORT_SYMBOL(ttm_pool_init);
@@ -599,10 +611,16 @@ void ttm_pool_fini(struct ttm_pool *pool)
{
unsigned int i, j;
- if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
- for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
- for (j = 0; j < NR_PAGE_ORDERS; ++j)
- ttm_pool_type_fini(&pool->caching[i].orders[j]);
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+ for (j = 0; j < NR_PAGE_ORDERS; ++j) {
+ struct ttm_pool_type *pt;
+
+ pt = ttm_pool_select_type(pool, i, j);
+ if (pt != &pool->caching[i].orders[j])
+ continue;
+
+ ttm_pool_type_fini(pt);
+ }
}
/* We removed the pool types from the LRU, but we need to also make sure
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index fb14f7716cf8..4a66b851b67d 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -22,8 +22,9 @@
* Authors: Christian König
*/
-#include <linux/iosys-map.h>
+#include <linux/debugfs.h>
#include <linux/io-mapping.h>
+#include <linux/iosys-map.h>
#include <linux/scatterlist.h>
#include <drm/ttm/ttm_bo.h>
@@ -105,6 +106,7 @@ static void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk,
pos->first = res;
pos->last = res;
} else {
+ WARN_ON(pos->first->bo->base.resv != res->bo->base.resv);
ttm_lru_bulk_move_pos_tail(pos, res);
}
}
@@ -295,11 +297,13 @@ bool ttm_resource_intersects(struct ttm_device *bdev,
*
* @res: the resource to check
* @placement: the placement to check against
+ * @evicting: true if the caller is doing evictions
*
* Returns true if the placement is compatible.
*/
bool ttm_resource_compatible(struct ttm_resource *res,
- struct ttm_placement *placement)
+ struct ttm_placement *placement,
+ bool evicting)
{
struct ttm_buffer_object *bo = res->bo;
struct ttm_device *bdev = bo->bdev;
@@ -315,14 +319,20 @@ bool ttm_resource_compatible(struct ttm_resource *res,
if (res->mem_type != place->mem_type)
continue;
+ if (place->flags & (evicting ? TTM_PL_FLAG_DESIRED :
+ TTM_PL_FLAG_FALLBACK))
+ continue;
+
+ if (place->flags & TTM_PL_FLAG_CONTIGUOUS &&
+ !(res->placement & TTM_PL_FLAG_CONTIGUOUS))
+ continue;
+
man = ttm_manager_type(bdev, res->mem_type);
if (man->func->compatible &&
!man->func->compatible(man, res, place, bo->base.size))
continue;
- if ((!(place->flags & TTM_PL_FLAG_CONTIGUOUS) ||
- (res->placement & TTM_PL_FLAG_CONTIGUOUS)))
- return true;
+ return true;
}
return false;
}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 578a7c37f00b..7b00ddf0ce49 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -32,10 +32,11 @@
#define pr_fmt(fmt) "[TTM] " fmt
#include <linux/cc_platform.h>
-#include <linux/sched.h>
-#include <linux/shmem_fs.h>
+#include <linux/debugfs.h>
#include <linux/file.h>
#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/shmem_fs.h>
#include <drm/drm_cache.h>
#include <drm/drm_device.h>
#include <drm/drm_util.h>
@@ -92,7 +93,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
*/
if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
page_flags |= TTM_TT_FLAG_DECRYPTED;
- drm_info(ddev, "TT memory decryption enabled.");
+ drm_info_once(ddev, "TT memory decryption enabled.");
}
bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index a07ede668cc1..a165cbcdd27b 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -21,6 +21,7 @@
#include <linux/dma-buf.h>
#include <linux/pfn_t.h>
+#include <linux/vmalloc.h>
#include "v3d_drv.h"
#include "uapi/drm/v3d_drm.h"
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 3debf37e7d9b..28b7ddce7747 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -115,14 +115,13 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
v3d_priv->v3d = v3d;
for (i = 0; i < V3D_MAX_QUEUES; i++) {
- v3d_priv->enabled_ns[i] = 0;
- v3d_priv->start_ns[i] = 0;
- v3d_priv->jobs_sent[i] = 0;
-
sched = &v3d->queue[i].sched;
drm_sched_entity_init(&v3d_priv->sched_entity[i],
DRM_SCHED_PRIORITY_NORMAL, &sched,
1, NULL);
+
+ memset(&v3d_priv->stats[i], 0, sizeof(v3d_priv->stats[i]));
+ seqcount_init(&v3d_priv->stats[i].lock);
}
v3d_perfmon_open_file(v3d_priv);
@@ -144,6 +143,20 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file)
kfree(v3d_priv);
}
+void v3d_get_stats(const struct v3d_stats *stats, u64 timestamp,
+ u64 *active_runtime, u64 *jobs_completed)
+{
+ unsigned int seq;
+
+ do {
+ seq = read_seqcount_begin(&stats->lock);
+ *active_runtime = stats->enabled_ns;
+ if (stats->start_ns)
+ *active_runtime += timestamp - stats->start_ns;
+ *jobs_completed = stats->jobs_completed;
+ } while (read_seqcount_retry(&stats->lock, seq));
+}
+
static void v3d_show_fdinfo(struct drm_printer *p, struct drm_file *file)
{
struct v3d_file_priv *file_priv = file->driver_priv;
@@ -151,20 +164,22 @@ static void v3d_show_fdinfo(struct drm_printer *p, struct drm_file *file)
enum v3d_queue queue;
for (queue = 0; queue < V3D_MAX_QUEUES; queue++) {
+ struct v3d_stats *stats = &file_priv->stats[queue];
+ u64 active_runtime, jobs_completed;
+
+ v3d_get_stats(stats, timestamp, &active_runtime, &jobs_completed);
+
/* Note that, in case of a GPU reset, the time spent during an
* attempt of executing the job is not computed in the runtime.
*/
drm_printf(p, "drm-engine-%s: \t%llu ns\n",
- v3d_queue_to_string(queue),
- file_priv->start_ns[queue] ? file_priv->enabled_ns[queue]
- + timestamp - file_priv->start_ns[queue]
- : file_priv->enabled_ns[queue]);
+ v3d_queue_to_string(queue), active_runtime);
/* Note that we only count jobs that completed. Therefore, jobs
* that were resubmitted due to a GPU reset are not computed.
*/
drm_printf(p, "v3d-jobs-%s: \t%llu jobs\n",
- v3d_queue_to_string(queue), file_priv->jobs_sent[queue]);
+ v3d_queue_to_string(queue), jobs_completed);
}
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 1950c723dde1..a2c516fe6d79 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -36,15 +36,27 @@ static inline char *v3d_queue_to_string(enum v3d_queue queue)
return "UNKNOWN";
}
+struct v3d_stats {
+ u64 start_ns;
+ u64 enabled_ns;
+ u64 jobs_completed;
+
+ /*
+ * This seqcount is used to protect the access to the GPU stats
+ * variables. It must be used as, while we are reading the stats,
+ * IRQs can happen and the stats can be updated.
+ */
+ seqcount_t lock;
+};
+
struct v3d_queue_state {
struct drm_gpu_scheduler sched;
u64 fence_context;
u64 emit_seqno;
- u64 start_ns;
- u64 enabled_ns;
- u64 jobs_sent;
+ /* Stores the GPU stats for this queue in the global context. */
+ struct v3d_stats stats;
};
/* Performance monitor object. The perform lifetime is controlled by userspace
@@ -188,11 +200,8 @@ struct v3d_file_priv {
struct drm_sched_entity sched_entity[V3D_MAX_QUEUES];
- u64 start_ns[V3D_MAX_QUEUES];
-
- u64 enabled_ns[V3D_MAX_QUEUES];
-
- u64 jobs_sent[V3D_MAX_QUEUES];
+ /* Stores the GPU stats for a specific queue for this fd. */
+ struct v3d_stats stats[V3D_MAX_QUEUES];
};
struct v3d_bo {
@@ -508,6 +517,10 @@ struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
/* v3d_debugfs.c */
void v3d_debugfs_init(struct drm_minor *minor);
+/* v3d_drv.c */
+void v3d_get_stats(const struct v3d_stats *stats, u64 timestamp,
+ u64 *active_runtime, u64 *jobs_completed);
+
/* v3d_fence.c */
extern const struct dma_fence_ops v3d_fence_ops;
struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue);
@@ -543,6 +556,7 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo);
void v3d_mmu_remove_ptes(struct v3d_bo *bo);
/* v3d_sched.c */
+void v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue);
int v3d_sched_init(struct v3d_dev *v3d);
void v3d_sched_fini(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index afc565078c78..da8faf3b9011 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -247,10 +247,11 @@ v3d_gem_init(struct drm_device *dev)
int ret, i;
for (i = 0; i < V3D_MAX_QUEUES; i++) {
- v3d->queue[i].fence_context = dma_fence_context_alloc(1);
- v3d->queue[i].start_ns = 0;
- v3d->queue[i].enabled_ns = 0;
- v3d->queue[i].jobs_sent = 0;
+ struct v3d_queue_state *queue = &v3d->queue[i];
+
+ queue->fence_context = dma_fence_context_alloc(1);
+ memset(&queue->stats, 0, sizeof(queue->stats));
+ seqcount_init(&queue->stats.lock);
}
spin_lock_init(&v3d->mm_lock);
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 2e04f6cb661e..d469bda52c1a 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -102,19 +102,8 @@ v3d_irq(int irq, void *arg)
if (intsts & V3D_INT_FLDONE) {
struct v3d_fence *fence =
to_v3d_fence(v3d->bin_job->base.irq_fence);
- struct v3d_file_priv *file = v3d->bin_job->base.file->driver_priv;
- u64 runtime = local_clock() - file->start_ns[V3D_BIN];
-
- file->enabled_ns[V3D_BIN] += local_clock() - file->start_ns[V3D_BIN];
- file->jobs_sent[V3D_BIN]++;
- v3d->queue[V3D_BIN].jobs_sent++;
-
- file->start_ns[V3D_BIN] = 0;
- v3d->queue[V3D_BIN].start_ns = 0;
-
- file->enabled_ns[V3D_BIN] += runtime;
- v3d->queue[V3D_BIN].enabled_ns += runtime;
+ v3d_job_update_stats(&v3d->bin_job->base, V3D_BIN);
trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
@@ -123,19 +112,8 @@ v3d_irq(int irq, void *arg)
if (intsts & V3D_INT_FRDONE) {
struct v3d_fence *fence =
to_v3d_fence(v3d->render_job->base.irq_fence);
- struct v3d_file_priv *file = v3d->render_job->base.file->driver_priv;
- u64 runtime = local_clock() - file->start_ns[V3D_RENDER];
-
- file->enabled_ns[V3D_RENDER] += local_clock() - file->start_ns[V3D_RENDER];
- file->jobs_sent[V3D_RENDER]++;
- v3d->queue[V3D_RENDER].jobs_sent++;
-
- file->start_ns[V3D_RENDER] = 0;
- v3d->queue[V3D_RENDER].start_ns = 0;
-
- file->enabled_ns[V3D_RENDER] += runtime;
- v3d->queue[V3D_RENDER].enabled_ns += runtime;
+ v3d_job_update_stats(&v3d->render_job->base, V3D_RENDER);
trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
@@ -144,19 +122,8 @@ v3d_irq(int irq, void *arg)
if (intsts & V3D_INT_CSDDONE(v3d->ver)) {
struct v3d_fence *fence =
to_v3d_fence(v3d->csd_job->base.irq_fence);
- struct v3d_file_priv *file = v3d->csd_job->base.file->driver_priv;
- u64 runtime = local_clock() - file->start_ns[V3D_CSD];
-
- file->enabled_ns[V3D_CSD] += local_clock() - file->start_ns[V3D_CSD];
- file->jobs_sent[V3D_CSD]++;
- v3d->queue[V3D_CSD].jobs_sent++;
-
- file->start_ns[V3D_CSD] = 0;
- v3d->queue[V3D_CSD].start_ns = 0;
-
- file->enabled_ns[V3D_CSD] += runtime;
- v3d->queue[V3D_CSD].enabled_ns += runtime;
+ v3d_job_update_stats(&v3d->csd_job->base, V3D_CSD);
trace_v3d_csd_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
@@ -192,19 +159,8 @@ v3d_hub_irq(int irq, void *arg)
if (intsts & V3D_HUB_INT_TFUC) {
struct v3d_fence *fence =
to_v3d_fence(v3d->tfu_job->base.irq_fence);
- struct v3d_file_priv *file = v3d->tfu_job->base.file->driver_priv;
- u64 runtime = local_clock() - file->start_ns[V3D_TFU];
-
- file->enabled_ns[V3D_TFU] += local_clock() - file->start_ns[V3D_TFU];
- file->jobs_sent[V3D_TFU]++;
- v3d->queue[V3D_TFU].jobs_sent++;
-
- file->start_ns[V3D_TFU] = 0;
- v3d->queue[V3D_TFU].start_ns = 0;
-
- file->enabled_ns[V3D_TFU] += runtime;
- v3d->queue[V3D_TFU].enabled_ns += runtime;
+ v3d_job_update_stats(&v3d->tfu_job->base, V3D_TFU);
trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 54015ad765c7..7cd8c335cd9b 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -105,11 +105,51 @@ v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job)
v3d_perfmon_start(v3d, job->perfmon);
}
+static void
+v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue)
+{
+ struct v3d_dev *v3d = job->v3d;
+ struct v3d_file_priv *file = job->file->driver_priv;
+ struct v3d_stats *global_stats = &v3d->queue[queue].stats;
+ struct v3d_stats *local_stats = &file->stats[queue];
+ u64 now = local_clock();
+
+ write_seqcount_begin(&local_stats->lock);
+ local_stats->start_ns = now;
+ write_seqcount_end(&local_stats->lock);
+
+ write_seqcount_begin(&global_stats->lock);
+ global_stats->start_ns = now;
+ write_seqcount_end(&global_stats->lock);
+}
+
+static void
+v3d_stats_update(struct v3d_stats *stats, u64 now)
+{
+ write_seqcount_begin(&stats->lock);
+ stats->enabled_ns += now - stats->start_ns;
+ stats->jobs_completed++;
+ stats->start_ns = 0;
+ write_seqcount_end(&stats->lock);
+}
+
+void
+v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue)
+{
+ struct v3d_dev *v3d = job->v3d;
+ struct v3d_file_priv *file = job->file->driver_priv;
+ struct v3d_stats *global_stats = &v3d->queue[queue].stats;
+ struct v3d_stats *local_stats = &file->stats[queue];
+ u64 now = local_clock();
+
+ v3d_stats_update(local_stats, now);
+ v3d_stats_update(global_stats, now);
+}
+
static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
{
struct v3d_bin_job *job = to_bin_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
- struct v3d_file_priv *file = job->base.file->driver_priv;
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
unsigned long irqflags;
@@ -141,9 +181,7 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
trace_v3d_submit_cl(dev, false, to_v3d_fence(fence)->seqno,
job->start, job->end);
- file->start_ns[V3D_BIN] = local_clock();
- v3d->queue[V3D_BIN].start_ns = file->start_ns[V3D_BIN];
-
+ v3d_job_start_stats(&job->base, V3D_BIN);
v3d_switch_perfmon(v3d, &job->base);
/* Set the current and end address of the control list.
@@ -168,7 +206,6 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
{
struct v3d_render_job *job = to_render_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
- struct v3d_file_priv *file = job->base.file->driver_priv;
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
@@ -196,9 +233,7 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
trace_v3d_submit_cl(dev, true, to_v3d_fence(fence)->seqno,
job->start, job->end);
- file->start_ns[V3D_RENDER] = local_clock();
- v3d->queue[V3D_RENDER].start_ns = file->start_ns[V3D_RENDER];
-
+ v3d_job_start_stats(&job->base, V3D_RENDER);
v3d_switch_perfmon(v3d, &job->base);
/* XXX: Set the QCFG */
@@ -217,7 +252,6 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
{
struct v3d_tfu_job *job = to_tfu_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
- struct v3d_file_priv *file = job->base.file->driver_priv;
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
@@ -232,8 +266,7 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno);
- file->start_ns[V3D_TFU] = local_clock();
- v3d->queue[V3D_TFU].start_ns = file->start_ns[V3D_TFU];
+ v3d_job_start_stats(&job->base, V3D_TFU);
V3D_WRITE(V3D_TFU_IIA(v3d->ver), job->args.iia);
V3D_WRITE(V3D_TFU_IIS(v3d->ver), job->args.iis);
@@ -260,7 +293,6 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
{
struct v3d_csd_job *job = to_csd_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
- struct v3d_file_priv *file = job->base.file->driver_priv;
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
int i, csd_cfg0_reg, csd_cfg_reg_count;
@@ -279,9 +311,7 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
trace_v3d_submit_csd(dev, to_v3d_fence(fence)->seqno);
- file->start_ns[V3D_CSD] = local_clock();
- v3d->queue[V3D_CSD].start_ns = file->start_ns[V3D_CSD];
-
+ v3d_job_start_stats(&job->base, V3D_CSD);
v3d_switch_perfmon(v3d, &job->base);
csd_cfg0_reg = V3D_CSD_QUEUED_CFG0(v3d->ver);
@@ -530,8 +560,6 @@ v3d_cpu_job_run(struct drm_sched_job *sched_job)
{
struct v3d_cpu_job *job = to_cpu_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
- struct v3d_file_priv *file = job->base.file->driver_priv;
- u64 runtime;
v3d->cpu_job = job;
@@ -540,25 +568,13 @@ v3d_cpu_job_run(struct drm_sched_job *sched_job)
return NULL;
}
- file->start_ns[V3D_CPU] = local_clock();
- v3d->queue[V3D_CPU].start_ns = file->start_ns[V3D_CPU];
-
+ v3d_job_start_stats(&job->base, V3D_CPU);
trace_v3d_cpu_job_begin(&v3d->drm, job->job_type);
cpu_job_function[job->job_type](job);
trace_v3d_cpu_job_end(&v3d->drm, job->job_type);
-
- runtime = local_clock() - file->start_ns[V3D_CPU];
-
- file->enabled_ns[V3D_CPU] += runtime;
- v3d->queue[V3D_CPU].enabled_ns += runtime;
-
- file->jobs_sent[V3D_CPU]++;
- v3d->queue[V3D_CPU].jobs_sent++;
-
- file->start_ns[V3D_CPU] = 0;
- v3d->queue[V3D_CPU].start_ns = 0;
+ v3d_job_update_stats(&job->base, V3D_CPU);
return NULL;
}
@@ -568,24 +584,12 @@ v3d_cache_clean_job_run(struct drm_sched_job *sched_job)
{
struct v3d_job *job = to_v3d_job(sched_job);
struct v3d_dev *v3d = job->v3d;
- struct v3d_file_priv *file = job->file->driver_priv;
- u64 runtime;
- file->start_ns[V3D_CACHE_CLEAN] = local_clock();
- v3d->queue[V3D_CACHE_CLEAN].start_ns = file->start_ns[V3D_CACHE_CLEAN];
+ v3d_job_start_stats(job, V3D_CACHE_CLEAN);
v3d_clean_caches(v3d);
- runtime = local_clock() - file->start_ns[V3D_CACHE_CLEAN];
-
- file->enabled_ns[V3D_CACHE_CLEAN] += runtime;
- v3d->queue[V3D_CACHE_CLEAN].enabled_ns += runtime;
-
- file->jobs_sent[V3D_CACHE_CLEAN]++;
- v3d->queue[V3D_CACHE_CLEAN].jobs_sent++;
-
- file->start_ns[V3D_CACHE_CLEAN] = 0;
- v3d->queue[V3D_CACHE_CLEAN].start_ns = 0;
+ v3d_job_update_stats(job, V3D_CACHE_CLEAN);
return NULL;
}
diff --git a/drivers/gpu/drm/v3d/v3d_sysfs.c b/drivers/gpu/drm/v3d/v3d_sysfs.c
index d106845ba890..d610e355964f 100644
--- a/drivers/gpu/drm/v3d/v3d_sysfs.c
+++ b/drivers/gpu/drm/v3d/v3d_sysfs.c
@@ -15,16 +15,15 @@ gpu_stats_show(struct device *dev, struct device_attribute *attr, char *buf)
struct v3d_dev *v3d = to_v3d_dev(drm);
enum v3d_queue queue;
u64 timestamp = local_clock();
- u64 active_runtime;
ssize_t len = 0;
len += sysfs_emit(buf, "queue\ttimestamp\tjobs\truntime\n");
for (queue = 0; queue < V3D_MAX_QUEUES; queue++) {
- if (v3d->queue[queue].start_ns)
- active_runtime = timestamp - v3d->queue[queue].start_ns;
- else
- active_runtime = 0;
+ struct v3d_stats *stats = &v3d->queue[queue].stats;
+ u64 active_runtime, jobs_completed;
+
+ v3d_get_stats(stats, timestamp, &active_runtime, &jobs_completed);
/* Each line will display the queue name, timestamp, the number
* of jobs sent to that queue and the runtime, as can be seem here:
@@ -38,9 +37,7 @@ gpu_stats_show(struct device *dev, struct device_attribute *attr, char *buf)
*/
len += sysfs_emit_at(buf, len, "%s\t%llu\t%llu\t%llu\n",
v3d_queue_to_string(queue),
- timestamp,
- v3d->queue[queue].jobs_sent,
- v3d->queue[queue].enabled_ns + active_runtime);
+ timestamp, jobs_completed, active_runtime);
}
return len;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index ab61e96e7e14..08e29fa82563 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -5,6 +5,7 @@
#ifndef _VC4_DRV_H_
#define _VC4_DRV_H_
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/refcount.h>
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index d8751ea20303..d30f8e8e8967 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -412,15 +412,14 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
enum drm_connector_status status)
{
struct drm_connector *connector = &vc4_hdmi->connector;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int ret;
/*
- * NOTE: This function should really be called with
- * vc4_hdmi->mutex held, but doing so results in reentrancy
- * issues since cec_s_phys_addr_from_edid might call
- * .adap_enable, which leads to that funtion being called with
- * our mutex held.
+ * NOTE: This function should really be called with vc4_hdmi->mutex
+ * held, but doing so results in reentrancy issues since
+ * cec_s_phys_addr() might call .adap_enable, which leads to that
+ * funtion being called with our mutex held.
*
* A similar situation occurs with vc4_hdmi_reset_link() that
* will call into our KMS hooks if the scrambling was enabled.
@@ -435,12 +434,16 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
return;
}
- edid = drm_get_edid(connector, vc4_hdmi->ddc);
- if (!edid)
+ drm_edid = drm_edid_read_ddc(connector, vc4_hdmi->ddc);
+
+ drm_edid_connector_update(connector, drm_edid);
+ cec_s_phys_addr(vc4_hdmi->cec_adap,
+ connector->display_info.source_physical_address, false);
+
+ if (!drm_edid)
return;
- cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
- kfree(edid);
+ drm_edid_free(drm_edid);
for (;;) {
ret = vc4_hdmi_reset_link(connector, ctx);
@@ -492,28 +495,29 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
{
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct vc4_dev *vc4 = to_vc4_dev(connector->dev);
+ const struct drm_edid *drm_edid;
int ret = 0;
- struct edid *edid;
/*
- * NOTE: This function should really take vc4_hdmi->mutex, but
- * doing so results in reentrancy issues since
- * cec_s_phys_addr_from_edid might call .adap_enable, which
- * leads to that funtion being called with our mutex held.
+ * NOTE: This function should really take vc4_hdmi->mutex, but doing so
+ * results in reentrancy issues since cec_s_phys_addr() might call
+ * .adap_enable, which leads to that funtion being called with our mutex
+ * held.
*
* Concurrency isn't an issue at the moment since we don't share
* any state with any of the other frameworks so we can ignore
* the lock for now.
*/
- edid = drm_get_edid(connector, vc4_hdmi->ddc);
- cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
- if (!edid)
+ drm_edid = drm_edid_read_ddc(connector, vc4_hdmi->ddc);
+ drm_edid_connector_update(connector, drm_edid);
+ cec_s_phys_addr(vc4_hdmi->cec_adap,
+ connector->display_info.source_physical_address, false);
+ if (!drm_edid)
return 0;
- drm_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
+ ret = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
if (!vc4->hvs->vc5_hdmi_enable_hdmi_20) {
struct drm_device *drm = connector->dev;
@@ -2740,6 +2744,8 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
index = 1;
addr = of_get_address(dev->of_node, index, NULL, NULL);
+ if (!addr)
+ return -EINVAL;
vc4_hdmi->audio.dma_data.addr = be32_to_cpup(addr) + mai_data->offset;
vc4_hdmi->audio.dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 61e500b8c9da..40b4d084e3ce 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -61,9 +61,7 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
static int vkms_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- unsigned int pipe = drm_crtc_index(crtc);
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
drm_calc_timestamping_constants(crtc, &crtc->mode);
@@ -88,10 +86,9 @@ static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc,
bool in_vblank_irq)
{
struct drm_device *dev = crtc->dev;
- unsigned int pipe = crtc->index;
struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
struct vkms_output *output = &vkmsdev->output;
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
if (!READ_ONCE(vblank->enabled)) {
*vblank_time = ktime_get();
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index e94479d9cd5b..46a4ab688a7f 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -10,6 +10,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \
vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o \
- vmwgfx_gem.o
+ vmwgfx_gem.o vmwgfx_vkms.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c
index 6806c05e57f6..3353e97687d1 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.c
@@ -87,14 +87,11 @@ struct ttm_object_file {
*
* @object_lock: lock that protects idr.
*
- * @object_count: Per device object count.
- *
* This is the per-device data structure needed for ttm object management.
*/
struct ttm_object_device {
spinlock_t object_lock;
- atomic_t object_count;
struct dma_buf_ops ops;
void (*dmabuf_release)(struct dma_buf *dma_buf);
struct idr idr;
@@ -431,7 +428,6 @@ ttm_object_device_init(const struct dma_buf_ops *ops)
return NULL;
spin_lock_init(&tdev->object_lock);
- atomic_set(&tdev->object_count, 0);
/*
* Our base is at VMWGFX_NUM_MOB + 1 because we want to create
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
index ae2de914eb89..2731f6ded1c2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -54,6 +54,7 @@
#include "vmwgfx_drv.h"
#include "vmwgfx_binding.h"
#include "device_include/svga3d_reg.h"
+#include <linux/vmalloc.h>
#define VMW_BINDING_RT_BIT 0
#define VMW_BINDING_PS_BIT 1
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index c52c7bf1485b..717d624e9a05 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -456,8 +456,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
.no_wait_gpu = false
};
u32 j, initial_line = dst_offset / dst_stride;
- struct vmw_bo_blit_line_data d;
+ struct vmw_bo_blit_line_data d = {0};
int ret = 0;
+ struct page **dst_pages = NULL;
+ struct page **src_pages = NULL;
/* Buffer objects need to be either pinned or reserved: */
if (!(dst->pin_count))
@@ -477,12 +479,35 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
return ret;
}
+ if (!src->ttm->pages && src->ttm->sg) {
+ src_pages = kvmalloc_array(src->ttm->num_pages,
+ sizeof(struct page *), GFP_KERNEL);
+ if (!src_pages)
+ return -ENOMEM;
+ ret = drm_prime_sg_to_page_array(src->ttm->sg, src_pages,
+ src->ttm->num_pages);
+ if (ret)
+ goto out;
+ }
+ if (!dst->ttm->pages && dst->ttm->sg) {
+ dst_pages = kvmalloc_array(dst->ttm->num_pages,
+ sizeof(struct page *), GFP_KERNEL);
+ if (!dst_pages) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = drm_prime_sg_to_page_array(dst->ttm->sg, dst_pages,
+ dst->ttm->num_pages);
+ if (ret)
+ goto out;
+ }
+
d.mapped_dst = 0;
d.mapped_src = 0;
d.dst_addr = NULL;
d.src_addr = NULL;
- d.dst_pages = dst->ttm->pages;
- d.src_pages = src->ttm->pages;
+ d.dst_pages = dst->ttm->pages ? dst->ttm->pages : dst_pages;
+ d.src_pages = src->ttm->pages ? src->ttm->pages : src_pages;
d.dst_num_pages = PFN_UP(dst->resource->size);
d.src_num_pages = PFN_UP(src->resource->size);
d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
@@ -504,6 +529,10 @@ out:
kunmap_atomic(d.src_addr);
if (d.dst_addr)
kunmap_atomic(d.dst_addr);
+ if (src_pages)
+ kvfree(src_pages);
+ if (dst_pages)
+ kvfree(dst_pages);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index bfd41ce3c8f4..00144632c600 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -204,6 +204,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_VRAM);
buf->places[0].lpfn = PFN_UP(bo->resource->size);
+ buf->busy_places[0].lpfn = PFN_UP(bo->resource->size);
ret = ttm_bo_validate(bo, &buf->placement, &ctx);
/* For some reason we didn't end up at the start of vram */
@@ -377,7 +378,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
{
struct ttm_operation_ctx ctx = {
.interruptible = params->bo_type != ttm_bo_type_kernel,
- .no_wait_gpu = false
+ .no_wait_gpu = false,
+ .resv = params->resv,
};
struct ttm_device *bdev = &dev_priv->bdev;
struct drm_device *vdev = &dev_priv->drm;
@@ -394,8 +396,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
- &vmw_bo->placement, 0, &ctx, NULL,
- NULL, destroy);
+ &vmw_bo->placement, 0, &ctx,
+ params->sg, params->resv, destroy);
if (unlikely(ret))
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
index 0d496dc9c6af..f349642e6190 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -55,6 +55,8 @@ struct vmw_bo_params {
enum ttm_bo_type bo_type;
size_t size;
bool pin;
+ struct dma_resv *resv;
+ struct sg_table *sg;
};
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
index 195ff8792e5a..dd4ca6a9c690 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
@@ -31,6 +31,7 @@
#include <drm/ttm/ttm_placement.h>
#include <linux/sched/signal.h>
+#include <linux/vmalloc.h>
bool vmw_supports_3d(struct vmw_private *dev_priv)
{
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c b/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c
index 829df395c2ed..6e6beff9e262 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c
@@ -25,6 +25,7 @@
*
**************************************************************************/
+#include <linux/vmalloc.h>
#include "vmwgfx_devcaps.h"
#include "vmwgfx_drv.h"
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index d3e308fdfd5b..8f1730aeacc9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -32,6 +32,7 @@
#include "vmwgfx_binding.h"
#include "vmwgfx_devcaps.h"
#include "vmwgfx_mksstat.h"
+#include "vmwgfx_vkms.h"
#include "ttm_object.h"
#include <drm/drm_aperture.h>
@@ -53,6 +54,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/version.h>
+#include <linux/vmalloc.h>
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
@@ -666,11 +668,12 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
[vmw_dma_map_populate] = "Caching DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
- /* TTM currently doesn't fully support SEV encryption. */
- if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
- return -EINVAL;
-
- if (vmw_force_coherent)
+ /*
+ * When running with SEV we always want dma mappings, because
+ * otherwise ttm tt pool pages will bounce through swiotlb running
+ * out of available space.
+ */
+ if (vmw_force_coherent || cc_platform_has(CC_ATTR_MEM_ENCRYPT))
dev_priv->map_mode = vmw_dma_alloc_coherent;
else if (vmw_restrict_iommu)
dev_priv->map_mode = vmw_dma_map_bind;
@@ -910,6 +913,8 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
"Please switch to a supported graphics device to avoid problems.");
}
+ vmw_vkms_init(dev_priv);
+
ret = vmw_dma_select_mode(dev_priv);
if (unlikely(ret != 0)) {
drm_info(&dev_priv->drm,
@@ -1195,6 +1200,7 @@ static void vmw_driver_unload(struct drm_device *dev)
vmw_svga_disable(dev_priv);
+ vmw_vkms_cleanup(dev_priv);
vmw_kms_close(dev_priv);
vmw_overlay_close(dev_priv);
@@ -1444,12 +1450,15 @@ static void vmw_debugfs_resource_managers_init(struct vmw_private *vmw)
root, "system_ttm");
ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_VRAM),
root, "vram_ttm");
- ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR),
- root, "gmr_ttm");
- ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB),
- root, "mob_ttm");
- ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM),
- root, "system_mob_ttm");
+ if (vmw->has_gmr)
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR),
+ root, "gmr_ttm");
+ if (vmw->has_mob) {
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB),
+ root, "mob_ttm");
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM),
+ root, "system_mob_ttm");
+ }
}
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
@@ -1624,6 +1633,7 @@ static const struct drm_driver driver = {
.prime_fd_to_handle = vmw_prime_fd_to_handle,
.prime_handle_to_fd = vmw_prime_handle_to_fd,
+ .gem_prime_import_sg_table = vmw_prime_import_sg_table,
.fops = &vmwgfx_driver_fops,
.name = VMWGFX_DRIVER_NAME,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 12efecc17df6..4ecaea0026fc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -117,25 +117,8 @@ struct vmwgfx_hash_item {
unsigned long key;
};
-
-/**
- * struct vmw_validate_buffer - Carries validation info about buffers.
- *
- * @base: Validation info for TTM.
- * @hash: Hash entry for quick lookup of the TTM buffer object.
- *
- * This structure contains also driver private validation info
- * on top of the info needed by TTM.
- */
-struct vmw_validate_buffer {
- struct ttm_validate_buffer base;
- struct vmwgfx_hash_item hash;
- bool validate_as_mob;
-};
-
struct vmw_res_func;
-
/**
* struct vmw-resource - base class for hardware resources
*
@@ -445,15 +428,6 @@ struct vmw_sw_context{
struct vmw_legacy_display;
struct vmw_overlay;
-struct vmw_vga_topology_state {
- uint32_t width;
- uint32_t height;
- uint32_t primary;
- uint32_t pos_x;
- uint32_t pos_y;
-};
-
-
/*
* struct vmw_otable - Guest Memory OBject table metadata
*
@@ -501,7 +475,6 @@ struct vmw_private {
struct drm_device drm;
struct ttm_device bdev;
- struct drm_vma_offset_manager vma_manager;
u32 pci_id;
resource_size_t io_start;
resource_size_t vram_start;
@@ -642,6 +615,9 @@ struct vmw_private {
uint32 *devcaps;
+ bool vkms_enabled;
+ struct workqueue_struct *crc_workq;
+
/*
* mksGuestStat instance-descriptor and pid arrays
*/
@@ -836,6 +812,7 @@ void vmw_resource_mob_attach(struct vmw_resource *res);
void vmw_resource_mob_detach(struct vmw_resource *res);
void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
pgoff_t end);
+int vmw_resource_clean(struct vmw_resource *res);
int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start,
pgoff_t end, pgoff_t *num_prefault);
@@ -1130,6 +1107,9 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv,
uint32_t handle, uint32_t flags,
int *prime_fd);
+struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *table);
/*
* MemoryOBject management - vmwgfx_mob.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index cc3086e649eb..2e52d73eba48 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -35,6 +35,7 @@
#include <linux/sync_file.h>
#include <linux/hashtable.h>
+#include <linux/vmalloc.h>
/*
* Helper macro to get dx_ctx_node if available otherwise print an error
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 2a0cda324703..5efc6a766f64 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -991,7 +991,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
}
event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
- event->event.base.length = sizeof(*event);
+ event->event.base.length = sizeof(event->event);
event->event.user_data = user_data;
ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index 12787bb9c111..07185c108218 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -30,6 +30,8 @@
#include "drm/drm_prime.h"
#include "drm/drm_gem_ttm_helper.h"
+#include <linux/debugfs.h>
+
static void vmw_gem_object_free(struct drm_gem_object *gobj)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj);
@@ -48,33 +50,20 @@ static void vmw_gem_object_close(struct drm_gem_object *obj,
{
}
-static int vmw_gem_pin_private(struct drm_gem_object *obj, bool do_pin)
+static int vmw_gem_object_pin(struct drm_gem_object *obj)
{
- struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
struct vmw_bo *vbo = to_vmw_bo(obj);
- int ret;
-
- ret = ttm_bo_reserve(bo, false, false, NULL);
- if (unlikely(ret != 0))
- goto err;
-
- vmw_bo_pin_reserved(vbo, do_pin);
-
- ttm_bo_unreserve(bo);
-
-err:
- return ret;
-}
+ vmw_bo_pin_reserved(vbo, true);
-static int vmw_gem_object_pin(struct drm_gem_object *obj)
-{
- return vmw_gem_pin_private(obj, true);
+ return 0;
}
static void vmw_gem_object_unpin(struct drm_gem_object *obj)
{
- vmw_gem_pin_private(obj, false);
+ struct vmw_bo *vbo = to_vmw_bo(obj);
+
+ vmw_bo_pin_reserved(vbo, false);
}
static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj)
@@ -149,6 +138,38 @@ out_no_bo:
return ret;
}
+struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *table)
+{
+ int ret;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_gem_object *gem = NULL;
+ struct vmw_bo *vbo;
+ struct vmw_bo_params params = {
+ .domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
+ .busy_domain = VMW_BO_DOMAIN_SYS,
+ .bo_type = ttm_bo_type_sg,
+ .size = attach->dmabuf->size,
+ .pin = false,
+ .resv = attach->dmabuf->resv,
+ .sg = table,
+
+ };
+
+ dma_resv_lock(params.resv, NULL);
+
+ ret = vmw_bo_create(dev_priv, &params, &vbo);
+ if (ret != 0)
+ goto out_no_bo;
+
+ vbo->tbo.base.funcs = &vmw_gem_object_funcs;
+
+ gem = &vbo->tbo.base;
+out_no_bo:
+ dma_resv_unlock(params.resv);
+ return gem;
+}
int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index a1da5678c731..835d1eed8dd9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -31,6 +31,7 @@
#include <drm/vmwgfx_drm.h>
#include <linux/pci.h>
+#include <linux/vmalloc.h>
int vmw_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index cd4925346ed4..13b2820cae51 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -27,6 +27,7 @@
#include "vmwgfx_kms.h"
#include "vmwgfx_bo.h"
+#include "vmwgfx_vkms.h"
#include "vmw_surface_cache.h"
#include <drm/drm_atomic.h>
@@ -37,9 +38,16 @@
#include <drm/drm_sysfs.h>
#include <drm/drm_edid.h>
+void vmw_du_init(struct vmw_display_unit *du)
+{
+ vmw_vkms_crtc_init(&du->crtc);
+}
+
void vmw_du_cleanup(struct vmw_display_unit *du)
{
struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
+
+ vmw_vkms_crtc_cleanup(&du->crtc);
drm_plane_cleanup(&du->primary);
if (vmw_cmd_supported(dev_priv))
drm_plane_cleanup(&du->cursor.base);
@@ -775,7 +783,6 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
hotspot_y = du->hotspot_y + new_state->hotspot_y;
du->cursor_surface = vps->surf;
- du->cursor_bo = vps->bo;
if (!vps->surf && !vps->bo) {
vmw_cursor_update_position(dev_priv, false, 0, 0);
@@ -858,15 +865,6 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
false, true);
-
- if (!ret && new_fb) {
- struct drm_crtc *crtc = new_state->crtc;
- struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
-
- vmw_connector_state_to_vcs(du->connector.state);
- }
-
-
return ret;
}
@@ -933,6 +931,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
@@ -940,9 +939,13 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
bool has_primary = new_state->plane_mask &
drm_plane_mask(crtc->primary);
- /* We always want to have an active plane with an active CRTC */
- if (has_primary != new_state->enable)
- return -EINVAL;
+ /*
+ * This is fine in general, but broken userspace might expect
+ * some actual rendering so give a clue as why it's blank.
+ */
+ if (new_state->enable && !has_primary)
+ drm_dbg_driver(&vmw->drm,
+ "CRTC without a primary plane will be blank.\n");
if (new_state->connector_mask != connector_mask &&
@@ -965,15 +968,9 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ vmw_vkms_crtc_atomic_begin(crtc, state);
}
-
-void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
-}
-
-
/**
* vmw_du_crtc_duplicate_state - duplicate crtc state
* @crtc: DRM crtc
@@ -1361,7 +1358,6 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
vfbs->surface = vmw_surface_reference(surface);
- vfbs->base.user_handle = mode_cmd->handles[0];
vfbs->is_bo_proxy = is_bo_proxy;
*out = &vfbs->base;
@@ -1529,7 +1525,6 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
vfbd->base.bo = true;
vfbd->buffer = vmw_bo_reference(bo);
- vfbd->base.user_handle = mode_cmd->handles[0];
*out = &vfbd->base;
ret = drm_framebuffer_init(dev, &vfbd->base.base,
@@ -2040,6 +2035,29 @@ vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
"hotplug_mode_update", 0, 1);
}
+static void
+vmw_atomic_commit_tail(struct drm_atomic_state *old_state)
+{
+ struct vmw_private *vmw = vmw_priv(old_state->dev);
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ drm_atomic_helper_commit_tail(old_state);
+
+ if (vmw->vkms_enabled) {
+ for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ (void)old_crtc_state;
+ flush_work(&du->vkms.crc_generator_work);
+ }
+ }
+}
+
+static const struct drm_mode_config_helper_funcs vmw_mode_config_helpers = {
+ .atomic_commit_tail = vmw_atomic_commit_tail,
+};
+
int vmw_kms_init(struct vmw_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
@@ -2059,6 +2077,7 @@ int vmw_kms_init(struct vmw_private *dev_priv)
dev->mode_config.max_width = dev_priv->texture_max_width;
dev->mode_config.max_height = dev_priv->texture_max_height;
dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
+ dev->mode_config.helper_private = &vmw_mode_config_helpers;
drm_mode_create_suggested_offset_properties(dev);
vmw_kms_create_hotplug_mode_update_property(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index a94947b588e8..bf24f2f0dcfc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -217,21 +217,11 @@ struct vmw_kms_dirty {
struct vmw_framebuffer {
struct drm_framebuffer base;
bool bo;
- uint32_t user_handle;
-};
-
-/*
- * Clip rectangle
- */
-struct vmw_clip_rect {
- int x1, x2, y1, y2;
};
struct vmw_framebuffer_surface {
struct vmw_framebuffer base;
struct vmw_surface *surface;
- struct vmw_bo *buffer;
- struct list_head head;
bool is_bo_proxy; /* true if this is proxy surface for DMA buf */
};
@@ -243,10 +233,10 @@ struct vmw_framebuffer_bo {
static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB1555,
};
static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
@@ -359,7 +349,6 @@ struct vmw_display_unit {
struct vmw_cursor_plane cursor;
struct vmw_surface *cursor_surface;
- struct vmw_bo *cursor_bo;
size_t cursor_age;
int cursor_x;
@@ -387,11 +376,25 @@ struct vmw_display_unit {
bool is_implicit;
int set_gui_x;
int set_gui_y;
-};
-struct vmw_validation_ctx {
- struct vmw_resource *res;
- struct vmw_bo *buf;
+ struct {
+ struct work_struct crc_generator_work;
+ struct hrtimer timer;
+ ktime_t period_ns;
+
+ /* protects concurrent access to the vblank handler */
+ atomic_t atomic_lock;
+ /* protected by @atomic_lock */
+ bool crc_enabled;
+ struct vmw_surface *surface;
+
+ /* protects concurrent access to the crc worker */
+ spinlock_t crc_state_lock;
+ /* protected by @crc_state_lock */
+ bool crc_pending;
+ u64 frame_start;
+ u64 frame_end;
+ } vkms;
};
#define vmw_crtc_to_du(x) \
@@ -403,6 +406,7 @@ struct vmw_validation_ctx {
/*
* Shared display unit functions - vmwgfx_kms.c
*/
+void vmw_du_init(struct vmw_display_unit *du);
void vmw_du_cleanup(struct vmw_display_unit *du);
void vmw_du_crtc_save(struct drm_crtc *crtc);
void vmw_du_crtc_restore(struct drm_crtc *crtc);
@@ -489,8 +493,6 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state);
void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state);
-void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_atomic_state *state);
void vmw_du_crtc_reset(struct drm_crtc *crtc);
struct drm_crtc_state *vmw_du_crtc_duplicate_state(struct drm_crtc *crtc);
void vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index c4db4aecca6c..5befc2719a49 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -27,6 +27,7 @@
#include "vmwgfx_bo.h"
#include "vmwgfx_kms.h"
+#include "vmwgfx_vkms.h"
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -241,33 +242,6 @@ static void vmw_ldu_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
}
-/**
- * vmw_ldu_crtc_atomic_enable - Noop
- *
- * @crtc: CRTC associated with the new screen
- * @state: Unused
- *
- * This is called after a mode set has been completed. Here's
- * usually a good place to call vmw_ldu_add_active/vmw_ldu_del_active
- * but since for LDU the display plane is closely tied to the
- * CRTC, it makes more sense to do those at plane update time.
- */
-static void vmw_ldu_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
-}
-
-/**
- * vmw_ldu_crtc_atomic_disable - Turns off CRTC
- *
- * @crtc: CRTC to be turned off
- * @state: Unused
- */
-static void vmw_ldu_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
-}
-
static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
.gamma_set = vmw_du_crtc_gamma_set,
.destroy = vmw_ldu_crtc_destroy,
@@ -276,6 +250,9 @@ static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
+ .enable_vblank = vmw_vkms_enable_vblank,
+ .disable_vblank = vmw_vkms_disable_vblank,
+ .get_vblank_timestamp = vmw_vkms_get_vblank_timestamp,
};
@@ -418,9 +395,9 @@ static const struct drm_crtc_helper_funcs vmw_ldu_crtc_helper_funcs = {
.mode_set_nofb = vmw_ldu_crtc_mode_set_nofb,
.atomic_check = vmw_du_crtc_atomic_check,
.atomic_begin = vmw_du_crtc_atomic_begin,
- .atomic_flush = vmw_du_crtc_atomic_flush,
- .atomic_enable = vmw_ldu_crtc_atomic_enable,
- .atomic_disable = vmw_ldu_crtc_atomic_disable,
+ .atomic_flush = vmw_vkms_crtc_atomic_flush,
+ .atomic_enable = vmw_vkms_crtc_atomic_enable,
+ .atomic_disable = vmw_vkms_crtc_atomic_disable,
};
@@ -541,6 +518,8 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
dev_priv->implicit_placement_property,
1);
+ vmw_du_init(&ldu->base);
+
return 0;
err_free_unregister:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
index 2d72a5ee7c0c..c99cad444991 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -75,8 +75,12 @@ int vmw_prime_fd_to_handle(struct drm_device *dev,
int fd, u32 *handle)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret = ttm_prime_fd_to_handle(tfile, fd, handle);
- return ttm_prime_fd_to_handle(tfile, fd, handle);
+ if (ret)
+ ret = drm_gem_prime_fd_to_handle(dev, file_priv, fd, handle);
+
+ return ret;
}
int vmw_prime_handle_to_fd(struct drm_device *dev,
@@ -85,5 +89,12 @@ int vmw_prime_handle_to_fd(struct drm_device *dev,
int *prime_fd)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
+ int ret;
+
+ if (handle > VMWGFX_NUM_MOB)
+ ret = ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
+ else
+ ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, prime_fd);
+
+ return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index ca300c7427d2..848dba09981b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1064,6 +1064,22 @@ void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
end << PAGE_SHIFT);
}
+int vmw_resource_clean(struct vmw_resource *res)
+{
+ int ret = 0;
+
+ if (res->res_dirty) {
+ if (!res->func->clean)
+ return -EINVAL;
+
+ ret = res->func->clean(res);
+ if (ret)
+ return ret;
+ res->res_dirty = false;
+ }
+ return ret;
+}
+
/**
* vmw_resources_clean - Clean resources intersecting a mob range
* @vbo: The mob buffer object
@@ -1080,6 +1096,7 @@ int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start,
unsigned long res_start = start << PAGE_SHIFT;
unsigned long res_end = end << PAGE_SHIFT;
unsigned long last_cleaned = 0;
+ int ret;
/*
* Find the resource with lowest backup_offset that intersects the
@@ -1106,18 +1123,9 @@ int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start,
* intersecting the range.
*/
while (found) {
- if (found->res_dirty) {
- int ret;
-
- if (!found->func->clean)
- return -EINVAL;
-
- ret = found->func->clean(found);
- if (ret)
- return ret;
-
- found->res_dirty = false;
- }
+ ret = vmw_resource_clean(found);
+ if (ret)
+ return ret;
last_cleaned = found->guest_memory_offset + found->guest_memory_size;
cur = rb_next(&found->mob_node);
if (!cur)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 30c3ad27b662..df0039a8ef29 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -27,11 +27,13 @@
#include "vmwgfx_bo.h"
#include "vmwgfx_kms.h"
+#include "vmwgfx_vkms.h"
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_vblank.h>
#define vmw_crtc_to_sou(x) \
container_of(x, struct vmw_screen_object_unit, base.crtc)
@@ -89,7 +91,6 @@ struct vmw_kms_sou_define_gmrfb {
struct vmw_screen_object_unit {
struct vmw_display_unit base;
- unsigned long buffer_size; /**< Size of allocated buffer */
struct vmw_bo *buffer; /**< Backing store buffer */
bool defined;
@@ -240,7 +241,6 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
int x, y;
sou->buffer = vps->bo;
- sou->buffer_size = vps->bo_size;
conn_state = sou->base.connector.state;
vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
@@ -255,7 +255,6 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
} else {
sou->buffer = NULL;
- sou->buffer_size = 0;
}
}
@@ -271,19 +270,6 @@ static void vmw_sou_crtc_helper_prepare(struct drm_crtc *crtc)
}
/**
- * vmw_sou_crtc_atomic_enable - Noop
- *
- * @crtc: CRTC associated with the new screen
- * @state: Unused
- *
- * This is called after a mode set has been completed.
- */
-static void vmw_sou_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
-}
-
-/**
* vmw_sou_crtc_atomic_disable - Turns off CRTC
*
* @crtc: CRTC to be turned off
@@ -305,6 +291,9 @@ static void vmw_sou_crtc_atomic_disable(struct drm_crtc *crtc,
sou = vmw_crtc_to_sou(crtc);
dev_priv = vmw_priv(crtc->dev);
+ if (dev_priv->vkms_enabled)
+ drm_crtc_vblank_off(crtc);
+
if (sou->defined) {
ret = vmw_sou_fifo_destroy(dev_priv, sou);
if (ret)
@@ -320,6 +309,9 @@ static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
+ .enable_vblank = vmw_vkms_enable_vblank,
+ .disable_vblank = vmw_vkms_disable_vblank,
+ .get_vblank_timestamp = vmw_vkms_get_vblank_timestamp,
};
/*
@@ -797,8 +789,8 @@ static const struct drm_crtc_helper_funcs vmw_sou_crtc_helper_funcs = {
.mode_set_nofb = vmw_sou_crtc_mode_set_nofb,
.atomic_check = vmw_du_crtc_atomic_check,
.atomic_begin = vmw_du_crtc_atomic_begin,
- .atomic_flush = vmw_du_crtc_atomic_flush,
- .atomic_enable = vmw_sou_crtc_atomic_enable,
+ .atomic_flush = vmw_vkms_crtc_atomic_flush,
+ .atomic_enable = vmw_vkms_crtc_atomic_enable,
.atomic_disable = vmw_sou_crtc_atomic_disable,
};
@@ -908,6 +900,9 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
dev->mode_config.suggested_x_property, 0);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_y_property, 0);
+
+ vmw_du_init(&sou->base);
+
return 0;
err_free_unregister:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 3c8414a13dba..2041c4d48daa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -27,12 +27,14 @@
#include "vmwgfx_bo.h"
#include "vmwgfx_kms.h"
+#include "vmwgfx_vkms.h"
#include "vmw_surface_cache.h"
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_vblank.h>
#define vmw_crtc_to_stdu(x) \
container_of(x, struct vmw_screen_target_display_unit, base.crtc)
@@ -407,16 +409,6 @@ static void vmw_stdu_crtc_mode_set_nofb(struct drm_crtc *crtc)
crtc->x, crtc->y);
}
-
-static void vmw_stdu_crtc_helper_prepare(struct drm_crtc *crtc)
-{
-}
-
-static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
-}
-
static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -424,7 +416,6 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
struct vmw_screen_target_display_unit *stdu;
int ret;
-
if (!crtc) {
DRM_ERROR("CRTC is NULL\n");
return;
@@ -433,6 +424,9 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev);
+ if (dev_priv->vkms_enabled)
+ drm_crtc_vblank_off(crtc);
+
if (stdu->defined) {
ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
if (ret)
@@ -770,7 +764,6 @@ out_unref:
return ret;
}
-
/*
* Screen Target CRTC dispatch table
*/
@@ -782,6 +775,12 @@ static const struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
+ .enable_vblank = vmw_vkms_enable_vblank,
+ .disable_vblank = vmw_vkms_disable_vblank,
+ .get_vblank_timestamp = vmw_vkms_get_vblank_timestamp,
+ .get_crc_sources = vmw_vkms_get_crc_sources,
+ .set_crc_source = vmw_vkms_set_crc_source,
+ .verify_crc_source = vmw_vkms_verify_crc_source,
};
@@ -1413,6 +1412,17 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
vmw_fence_obj_unreference(&fence);
}
+static void
+vmw_stdu_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+ struct vmw_screen_target_display_unit *stdu = vmw_crtc_to_stdu(crtc);
+
+ if (vmw->vkms_enabled)
+ vmw_vkms_set_crc_surface(crtc, stdu->display_srf);
+ vmw_vkms_crtc_atomic_flush(crtc, state);
+}
static const struct drm_plane_funcs vmw_stdu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
@@ -1453,12 +1463,11 @@ drm_plane_helper_funcs vmw_stdu_primary_plane_helper_funcs = {
};
static const struct drm_crtc_helper_funcs vmw_stdu_crtc_helper_funcs = {
- .prepare = vmw_stdu_crtc_helper_prepare,
.mode_set_nofb = vmw_stdu_crtc_mode_set_nofb,
.atomic_check = vmw_du_crtc_atomic_check,
.atomic_begin = vmw_du_crtc_atomic_begin,
- .atomic_flush = vmw_du_crtc_atomic_flush,
- .atomic_enable = vmw_stdu_crtc_atomic_enable,
+ .atomic_flush = vmw_stdu_crtc_atomic_flush,
+ .atomic_enable = vmw_vkms_crtc_atomic_enable,
.atomic_disable = vmw_stdu_crtc_atomic_disable,
};
@@ -1575,6 +1584,9 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
dev->mode_config.suggested_x_property, 0);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_y_property, 0);
+
+ vmw_du_init(&stdu->base);
+
return 0;
err_free_unregister:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 4d23d0a70bcb..621d98b376bb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -188,13 +188,18 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
switch (dev_priv->map_mode) {
case vmw_dma_map_bind:
case vmw_dma_map_populate:
- vsgt->sgt = &vmw_tt->sgt;
- ret = sg_alloc_table_from_pages_segment(
- &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
- (unsigned long)vsgt->num_pages << PAGE_SHIFT,
- dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
- if (ret)
- goto out_sg_alloc_fail;
+ if (vmw_tt->dma_ttm.page_flags & TTM_TT_FLAG_EXTERNAL) {
+ vsgt->sgt = vmw_tt->dma_ttm.sg;
+ } else {
+ vsgt->sgt = &vmw_tt->sgt;
+ ret = sg_alloc_table_from_pages_segment(&vmw_tt->sgt,
+ vsgt->pages, vsgt->num_pages, 0,
+ (unsigned long)vsgt->num_pages << PAGE_SHIFT,
+ dma_get_max_seg_size(dev_priv->drm.dev),
+ GFP_KERNEL);
+ if (ret)
+ goto out_sg_alloc_fail;
+ }
ret = vmw_ttm_map_for_dma(vmw_tt);
if (unlikely(ret != 0))
@@ -209,8 +214,9 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
return 0;
out_map_fail:
- sg_free_table(vmw_tt->vsgt.sgt);
- vmw_tt->vsgt.sgt = NULL;
+ drm_warn(&dev_priv->drm, "VSG table map failed!");
+ sg_free_table(vsgt->sgt);
+ vsgt->sgt = NULL;
out_sg_alloc_fail:
return ret;
}
@@ -356,15 +362,17 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
static int vmw_ttm_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
- int ret;
+ bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
- /* TODO: maybe completely drop this ? */
if (ttm_tt_is_populated(ttm))
return 0;
- ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
+ if (external && ttm->sg)
+ return drm_prime_sg_to_dma_addr_array(ttm->sg,
+ ttm->dma_address,
+ ttm->num_pages);
- return ret;
+ return ttm_pool_alloc(&bdev->pool, ttm, ctx);
}
static void vmw_ttm_unpopulate(struct ttm_device *bdev,
@@ -372,6 +380,10 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
{
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
dma_ttm);
+ bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
+
+ if (external)
+ return;
vmw_ttm_unbind(bdev, ttm);
@@ -390,6 +402,7 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
{
struct vmw_ttm_tt *vmw_be;
int ret;
+ bool external = bo->type == ttm_bo_type_sg;
vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
if (!vmw_be)
@@ -398,7 +411,10 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
vmw_be->mob = NULL;
- if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
+ if (external)
+ page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE;
+
+ if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent || external)
ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
ttm_cached);
else
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
deleted file mode 100644
index 90097d04b45f..000000000000
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ /dev/null
@@ -1,110 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/**************************************************************************
- *
- * Copyright 2009-2011 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#include "vmwgfx_drv.h"
-
-static int vmw_bo_vm_lookup(struct ttm_device *bdev,
- struct drm_file *filp,
- unsigned long offset,
- unsigned long pages,
- struct ttm_buffer_object **p_bo)
-{
- struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
- struct drm_device *drm = &dev_priv->drm;
- struct drm_vma_offset_node *node;
- int ret;
-
- *p_bo = NULL;
-
- drm_vma_offset_lock_lookup(bdev->vma_manager);
-
- node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages);
- if (likely(node)) {
- *p_bo = container_of(node, struct ttm_buffer_object,
- base.vma_node);
- *p_bo = ttm_bo_get_unless_zero(*p_bo);
- }
-
- drm_vma_offset_unlock_lookup(bdev->vma_manager);
-
- if (!*p_bo) {
- drm_err(drm, "Could not find buffer object to map\n");
- return -EINVAL;
- }
-
- if (!drm_vma_node_is_allowed(node, filp)) {
- ret = -EACCES;
- goto out_no_access;
- }
-
- return 0;
-out_no_access:
- ttm_bo_put(*p_bo);
- return ret;
-}
-
-int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- static const struct vm_operations_struct vmw_vm_ops = {
- .pfn_mkwrite = vmw_bo_vm_mkwrite,
- .page_mkwrite = vmw_bo_vm_mkwrite,
- .fault = vmw_bo_vm_fault,
- .open = ttm_bo_vm_open,
- .close = ttm_bo_vm_close,
- };
- struct drm_file *file_priv = filp->private_data;
- struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
- struct ttm_device *bdev = &dev_priv->bdev;
- struct ttm_buffer_object *bo;
- int ret;
-
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START))
- return -EINVAL;
-
- ret = vmw_bo_vm_lookup(bdev, file_priv, vma->vm_pgoff, vma_pages(vma), &bo);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_bo_mmap_obj(vma, bo);
- if (unlikely(ret != 0))
- goto out_unref;
-
- vma->vm_ops = &vmw_vm_ops;
-
- /* Use VM_PFNMAP rather than VM_MIXEDMAP if not a COW mapping */
- if (!is_cow_mapping(vma->vm_flags))
- vm_flags_mod(vma, VM_PFNMAP, VM_MIXEDMAP);
-
- ttm_bo_put(bo); /* release extra ref taken by ttm_bo_mmap_obj() */
-
- return 0;
-
-out_unref:
- ttm_bo_put(bo);
- return ret;
-}
-
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index aaacbdcbd742..e7625b3f71e0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -32,9 +32,6 @@
#include <linux/slab.h>
-
-#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
-
/**
* struct vmw_validation_bo_node - Buffer object validation metadata.
* @base: Metadata used for TTM reservation- and validation.
@@ -112,20 +109,10 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
return NULL;
if (ctx->mem_size_left < size) {
- struct page *page;
-
- if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
- ctx->vm_size_left += VMWGFX_VALIDATION_MEM_GRAN;
- ctx->total_mem += VMWGFX_VALIDATION_MEM_GRAN;
- }
-
- page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
return NULL;
- if (ctx->vm)
- ctx->vm_size_left -= PAGE_SIZE;
-
list_add_tail(&page->lru, &ctx->page_list);
ctx->page_address = page_address(page);
ctx->mem_size_left = PAGE_SIZE;
@@ -155,10 +142,6 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
}
ctx->mem_size_left = 0;
- if (ctx->vm && ctx->total_mem) {
- ctx->total_mem = 0;
- ctx->vm_size_left = 0;
- }
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index 240ee0c4ebfd..353d837907d8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -52,10 +52,6 @@
* buffer objects
* @mem_size_left: Free memory left in the last page in @page_list
* @page_address: Kernel virtual address of the last page in @page_list
- * @vm: A pointer to the memory reservation interface or NULL if no
- * memory reservation is needed.
- * @vm_size_left: Amount of reserved memory that so far has not been allocated.
- * @total_mem: Amount of reserved memory.
*/
struct vmw_validation_context {
struct vmw_sw_context *sw_context;
@@ -68,9 +64,6 @@ struct vmw_validation_context {
unsigned int merge_dups;
unsigned int mem_size_left;
u8 *page_address;
- struct vmw_validation_mem *vm;
- size_t vm_size_left;
- size_t total_mem;
};
struct vmw_bo;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
new file mode 100644
index 000000000000..7e93a45948f7
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
@@ -0,0 +1,632 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright (c) 2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_vkms.h"
+
+#include "vmwgfx_bo.h"
+#include "vmwgfx_drv.h"
+#include "vmwgfx_kms.h"
+#include "vmwgfx_vkms.h"
+
+#include "vmw_surface_cache.h"
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_debugfs_crc.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
+
+#include <linux/crc32.h>
+#include <linux/delay.h>
+
+#define GUESTINFO_VBLANK "guestinfo.vmwgfx.vkms_enable"
+
+static int
+vmw_surface_sync(struct vmw_private *vmw,
+ struct vmw_surface *surf)
+{
+ int ret;
+ struct vmw_fence_obj *fence = NULL;
+ struct vmw_bo *bo = surf->res.guest_memory_bo;
+
+ vmw_resource_clean(&surf->res);
+
+ ret = ttm_bo_reserve(&bo->tbo, false, false, NULL);
+ if (ret != 0) {
+ drm_warn(&vmw->drm, "%s: failed reserve\n", __func__);
+ goto done;
+ }
+
+ ret = vmw_execbuf_fence_commands(NULL, vmw, &fence, NULL);
+ if (ret != 0) {
+ drm_warn(&vmw->drm, "%s: failed execbuf\n", __func__);
+ ttm_bo_unreserve(&bo->tbo);
+ goto done;
+ }
+
+ dma_fence_wait(&fence->base, false);
+ dma_fence_put(&fence->base);
+
+ ttm_bo_unreserve(&bo->tbo);
+done:
+ return ret;
+}
+
+static int
+compute_crc(struct drm_crtc *crtc,
+ struct vmw_surface *surf,
+ u32 *crc)
+{
+ u8 *mapped_surface;
+ struct vmw_bo *bo = surf->res.guest_memory_bo;
+ const struct SVGA3dSurfaceDesc *desc =
+ vmw_surface_get_desc(surf->metadata.format);
+ u32 row_pitch_bytes;
+ SVGA3dSize blocks;
+ u32 y;
+
+ *crc = 0;
+
+ vmw_surface_get_size_in_blocks(desc, &surf->metadata.base_size, &blocks);
+ row_pitch_bytes = blocks.width * desc->pitchBytesPerBlock;
+ WARN_ON(!bo);
+ mapped_surface = vmw_bo_map_and_cache(bo);
+
+ for (y = 0; y < blocks.height; y++) {
+ *crc = crc32_le(*crc, mapped_surface, row_pitch_bytes);
+ mapped_surface += row_pitch_bytes;
+ }
+
+ vmw_bo_unmap(bo);
+
+ return 0;
+}
+
+static void
+crc_generate_worker(struct work_struct *work)
+{
+ struct vmw_display_unit *du =
+ container_of(work, struct vmw_display_unit, vkms.crc_generator_work);
+ struct drm_crtc *crtc = &du->crtc;
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+ bool crc_pending;
+ u64 frame_start, frame_end;
+ u32 crc32 = 0;
+ struct vmw_surface *surf = 0;
+ int ret;
+
+ spin_lock_irq(&du->vkms.crc_state_lock);
+ crc_pending = du->vkms.crc_pending;
+ spin_unlock_irq(&du->vkms.crc_state_lock);
+
+ /*
+ * We raced with the vblank hrtimer and previous work already computed
+ * the crc, nothing to do.
+ */
+ if (!crc_pending)
+ return;
+
+ spin_lock_irq(&du->vkms.crc_state_lock);
+ surf = du->vkms.surface;
+ spin_unlock_irq(&du->vkms.crc_state_lock);
+
+ if (vmw_surface_sync(vmw, surf)) {
+ drm_warn(crtc->dev, "CRC worker wasn't able to sync the crc surface!\n");
+ return;
+ }
+
+ ret = compute_crc(crtc, surf, &crc32);
+ if (ret)
+ return;
+
+ spin_lock_irq(&du->vkms.crc_state_lock);
+ frame_start = du->vkms.frame_start;
+ frame_end = du->vkms.frame_end;
+ crc_pending = du->vkms.crc_pending;
+ du->vkms.frame_start = 0;
+ du->vkms.frame_end = 0;
+ du->vkms.crc_pending = false;
+ spin_unlock_irq(&du->vkms.crc_state_lock);
+
+ /*
+ * The worker can fall behind the vblank hrtimer, make sure we catch up.
+ */
+ while (frame_start <= frame_end)
+ drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
+}
+
+static enum hrtimer_restart
+vmw_vkms_vblank_simulate(struct hrtimer *timer)
+{
+ struct vmw_display_unit *du = container_of(timer, struct vmw_display_unit, vkms.timer);
+ struct drm_crtc *crtc = &du->crtc;
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+ struct vmw_surface *surf = NULL;
+ u64 ret_overrun;
+ bool locked, ret;
+
+ ret_overrun = hrtimer_forward_now(&du->vkms.timer,
+ du->vkms.period_ns);
+ if (ret_overrun != 1)
+ drm_dbg_driver(crtc->dev, "vblank timer missed %lld frames.\n",
+ ret_overrun - 1);
+
+ locked = vmw_vkms_vblank_trylock(crtc);
+ ret = drm_crtc_handle_vblank(crtc);
+ WARN_ON(!ret);
+ if (!locked)
+ return HRTIMER_RESTART;
+ surf = du->vkms.surface;
+ vmw_vkms_unlock(crtc);
+
+ if (du->vkms.crc_enabled && surf) {
+ u64 frame = drm_crtc_accurate_vblank_count(crtc);
+
+ spin_lock(&du->vkms.crc_state_lock);
+ if (!du->vkms.crc_pending)
+ du->vkms.frame_start = frame;
+ else
+ drm_dbg_driver(crtc->dev,
+ "crc worker falling behind, frame_start: %llu, frame_end: %llu\n",
+ du->vkms.frame_start, frame);
+ du->vkms.frame_end = frame;
+ du->vkms.crc_pending = true;
+ spin_unlock(&du->vkms.crc_state_lock);
+
+ ret = queue_work(vmw->crc_workq, &du->vkms.crc_generator_work);
+ if (!ret)
+ drm_dbg_driver(crtc->dev, "Composer worker already queued\n");
+ }
+
+ return HRTIMER_RESTART;
+}
+
+void
+vmw_vkms_init(struct vmw_private *vmw)
+{
+ char buffer[64];
+ const size_t max_buf_len = sizeof(buffer) - 1;
+ size_t buf_len = max_buf_len;
+ int ret;
+
+ vmw->vkms_enabled = false;
+
+ ret = vmw_host_get_guestinfo(GUESTINFO_VBLANK, buffer, &buf_len);
+ if (ret || buf_len > max_buf_len)
+ return;
+ buffer[buf_len] = '\0';
+
+ ret = kstrtobool(buffer, &vmw->vkms_enabled);
+ if (!ret && vmw->vkms_enabled) {
+ ret = drm_vblank_init(&vmw->drm, VMWGFX_NUM_DISPLAY_UNITS);
+ vmw->vkms_enabled = (ret == 0);
+ }
+
+ vmw->crc_workq = alloc_ordered_workqueue("vmwgfx_crc_generator", 0);
+ if (!vmw->crc_workq) {
+ drm_warn(&vmw->drm, "crc workqueue allocation failed. Disabling vkms.");
+ vmw->vkms_enabled = false;
+ }
+ if (vmw->vkms_enabled)
+ drm_info(&vmw->drm, "VKMS enabled\n");
+}
+
+void
+vmw_vkms_cleanup(struct vmw_private *vmw)
+{
+ destroy_workqueue(vmw->crc_workq);
+}
+
+bool
+vmw_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq)
+{
+ struct drm_device *dev = crtc->dev;
+ struct vmw_private *vmw = vmw_priv(dev);
+ unsigned int pipe = crtc->index;
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+
+ if (!vmw->vkms_enabled)
+ return false;
+
+ if (!READ_ONCE(vblank->enabled)) {
+ *vblank_time = ktime_get();
+ return true;
+ }
+
+ *vblank_time = READ_ONCE(du->vkms.timer.node.expires);
+
+ if (WARN_ON(*vblank_time == vblank->time))
+ return true;
+
+ /*
+ * To prevent races we roll the hrtimer forward before we do any
+ * interrupt processing - this is how real hw works (the interrupt is
+ * only generated after all the vblank registers are updated) and what
+ * the vblank core expects. Therefore we need to always correct the
+ * timestampe by one frame.
+ */
+ *vblank_time -= du->vkms.period_ns;
+
+ return true;
+}
+
+int
+vmw_vkms_enable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct vmw_private *vmw = vmw_priv(dev);
+ unsigned int pipe = drm_crtc_index(crtc);
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+
+ if (!vmw->vkms_enabled)
+ return -EINVAL;
+
+ drm_calc_timestamping_constants(crtc, &crtc->mode);
+
+ hrtimer_init(&du->vkms.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ du->vkms.timer.function = &vmw_vkms_vblank_simulate;
+ du->vkms.period_ns = ktime_set(0, vblank->framedur_ns);
+ hrtimer_start(&du->vkms.timer, du->vkms.period_ns, HRTIMER_MODE_REL);
+
+ return 0;
+}
+
+void
+vmw_vkms_disable_vblank(struct drm_crtc *crtc)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+
+ if (!vmw->vkms_enabled)
+ return;
+
+ hrtimer_cancel(&du->vkms.timer);
+ du->vkms.surface = NULL;
+ du->vkms.period_ns = ktime_set(0, 0);
+}
+
+enum vmw_vkms_lock_state {
+ VMW_VKMS_LOCK_UNLOCKED = 0,
+ VMW_VKMS_LOCK_MODESET = 1,
+ VMW_VKMS_LOCK_VBLANK = 2
+};
+
+void
+vmw_vkms_crtc_init(struct drm_crtc *crtc)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+
+ atomic_set(&du->vkms.atomic_lock, VMW_VKMS_LOCK_UNLOCKED);
+ spin_lock_init(&du->vkms.crc_state_lock);
+
+ INIT_WORK(&du->vkms.crc_generator_work, crc_generate_worker);
+ du->vkms.surface = NULL;
+}
+
+void
+vmw_vkms_crtc_cleanup(struct drm_crtc *crtc)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+
+ WARN_ON(work_pending(&du->vkms.crc_generator_work));
+ hrtimer_cancel(&du->vkms.timer);
+}
+
+void
+vmw_vkms_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+
+ if (vmw->vkms_enabled)
+ vmw_vkms_modeset_lock(crtc);
+}
+
+void
+vmw_vkms_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ unsigned long flags;
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+
+ if (!vmw->vkms_enabled)
+ return;
+
+ if (crtc->state->event) {
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+ if (drm_crtc_vblank_get(crtc) != 0)
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ else
+ drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ crtc->state->event = NULL;
+ }
+
+ vmw_vkms_unlock(crtc);
+}
+
+void
+vmw_vkms_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+
+ if (vmw->vkms_enabled)
+ drm_crtc_vblank_on(crtc);
+}
+
+void
+vmw_vkms_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+
+ if (vmw->vkms_enabled)
+ drm_crtc_vblank_off(crtc);
+}
+
+static bool
+is_crc_supported(struct drm_crtc *crtc)
+{
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+
+ if (!vmw->vkms_enabled)
+ return false;
+
+ if (vmw->active_display_unit != vmw_du_screen_target)
+ return false;
+
+ return true;
+}
+
+static const char * const pipe_crc_sources[] = {"auto"};
+
+static int
+crc_parse_source(const char *src_name,
+ bool *enabled)
+{
+ int ret = 0;
+
+ if (!src_name) {
+ *enabled = false;
+ } else if (strcmp(src_name, "auto") == 0) {
+ *enabled = true;
+ } else {
+ *enabled = false;
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+const char *const *
+vmw_vkms_get_crc_sources(struct drm_crtc *crtc,
+ size_t *count)
+{
+ *count = 0;
+ if (!is_crc_supported(crtc))
+ return NULL;
+
+ *count = ARRAY_SIZE(pipe_crc_sources);
+ return pipe_crc_sources;
+}
+
+int
+vmw_vkms_verify_crc_source(struct drm_crtc *crtc,
+ const char *src_name,
+ size_t *values_cnt)
+{
+ bool enabled;
+
+ if (!is_crc_supported(crtc))
+ return -EINVAL;
+
+ if (crc_parse_source(src_name, &enabled) < 0) {
+ drm_dbg_driver(crtc->dev, "unknown source '%s'\n", src_name);
+ return -EINVAL;
+ }
+
+ *values_cnt = 1;
+
+ return 0;
+}
+
+int
+vmw_vkms_set_crc_source(struct drm_crtc *crtc,
+ const char *src_name)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ bool enabled, prev_enabled, locked;
+ int ret;
+
+ if (!is_crc_supported(crtc))
+ return -EINVAL;
+
+ ret = crc_parse_source(src_name, &enabled);
+
+ if (enabled)
+ drm_crtc_vblank_get(crtc);
+
+ locked = vmw_vkms_modeset_lock_relaxed(crtc);
+ prev_enabled = du->vkms.crc_enabled;
+ du->vkms.crc_enabled = enabled;
+ if (locked)
+ vmw_vkms_unlock(crtc);
+
+ if (prev_enabled)
+ drm_crtc_vblank_put(crtc);
+
+ return ret;
+}
+
+void
+vmw_vkms_set_crc_surface(struct drm_crtc *crtc,
+ struct vmw_surface *surf)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+
+ if (vmw->vkms_enabled) {
+ WARN_ON(atomic_read(&du->vkms.atomic_lock) != VMW_VKMS_LOCK_MODESET);
+ du->vkms.surface = surf;
+ }
+}
+
+/**
+ * vmw_vkms_lock_max_wait_ns - Return the max wait for the vkms lock
+ * @du: The vmw_display_unit from which to grab the vblank timings
+ *
+ * Returns the maximum wait time used to acquire the vkms lock. By
+ * default uses a time of a single frame and in case where vblank
+ * was not initialized for the display unit 1/60th of a second.
+ */
+static inline u64
+vmw_vkms_lock_max_wait_ns(struct vmw_display_unit *du)
+{
+ s64 nsecs = ktime_to_ns(du->vkms.period_ns);
+
+ return (nsecs > 0) ? nsecs : 16666666;
+}
+
+/**
+ * vmw_vkms_modeset_lock - Protects access to crtc during modeset
+ * @crtc: The crtc to lock for vkms
+ *
+ * This function prevents the VKMS timers/callbacks from being called
+ * while a modeset operation is in process. We don't want the callbacks
+ * e.g. the vblank simulator to be trying to access incomplete state
+ * so we need to make sure they execute only when the modeset has
+ * finished.
+ *
+ * Normally this would have been done with a spinlock but locking the
+ * entire atomic modeset with vmwgfx is impossible because kms prepare
+ * executes non-atomic ops (e.g. vmw_validation_prepare holds a mutex to
+ * guard various bits of state). Which means that we need to synchronize
+ * atomic context (the vblank handler) with the non-atomic entirity
+ * of kms - so use an atomic_t to track which part of vkms has access
+ * to the basic vkms state.
+ */
+void
+vmw_vkms_modeset_lock(struct drm_crtc *crtc)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ const u64 nsecs_delay = 10;
+ const u64 MAX_NSECS_DELAY = vmw_vkms_lock_max_wait_ns(du);
+ u64 total_delay = 0;
+ int ret;
+
+ do {
+ ret = atomic_cmpxchg(&du->vkms.atomic_lock,
+ VMW_VKMS_LOCK_UNLOCKED,
+ VMW_VKMS_LOCK_MODESET);
+ if (ret == VMW_VKMS_LOCK_UNLOCKED || total_delay >= MAX_NSECS_DELAY)
+ break;
+ ndelay(nsecs_delay);
+ total_delay += nsecs_delay;
+ } while (1);
+
+ if (total_delay >= MAX_NSECS_DELAY) {
+ drm_warn(crtc->dev, "VKMS lock expired! total_delay = %lld, ret = %d, cur = %d\n",
+ total_delay, ret, atomic_read(&du->vkms.atomic_lock));
+ }
+}
+
+/**
+ * vmw_vkms_modeset_lock_relaxed - Protects access to crtc during modeset
+ * @crtc: The crtc to lock for vkms
+ *
+ * Much like vmw_vkms_modeset_lock except that when the crtc is currently
+ * in a modeset it will return immediately.
+ *
+ * Returns true if actually locked vkms to modeset or false otherwise.
+ */
+bool
+vmw_vkms_modeset_lock_relaxed(struct drm_crtc *crtc)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ const u64 nsecs_delay = 10;
+ const u64 MAX_NSECS_DELAY = vmw_vkms_lock_max_wait_ns(du);
+ u64 total_delay = 0;
+ int ret;
+
+ do {
+ ret = atomic_cmpxchg(&du->vkms.atomic_lock,
+ VMW_VKMS_LOCK_UNLOCKED,
+ VMW_VKMS_LOCK_MODESET);
+ if (ret == VMW_VKMS_LOCK_UNLOCKED ||
+ ret == VMW_VKMS_LOCK_MODESET ||
+ total_delay >= MAX_NSECS_DELAY)
+ break;
+ ndelay(nsecs_delay);
+ total_delay += nsecs_delay;
+ } while (1);
+
+ if (total_delay >= MAX_NSECS_DELAY) {
+ drm_warn(crtc->dev, "VKMS relaxed lock expired!\n");
+ return false;
+ }
+
+ return ret == VMW_VKMS_LOCK_UNLOCKED;
+}
+
+/**
+ * vmw_vkms_vblank_trylock - Protects access to crtc during vblank
+ * @crtc: The crtc to lock for vkms
+ *
+ * Tries to lock vkms for vblank, returns immediately.
+ *
+ * Returns true if locked vkms to vblank or false otherwise.
+ */
+bool
+vmw_vkms_vblank_trylock(struct drm_crtc *crtc)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ u32 ret;
+
+ ret = atomic_cmpxchg(&du->vkms.atomic_lock,
+ VMW_VKMS_LOCK_UNLOCKED,
+ VMW_VKMS_LOCK_VBLANK);
+
+ return ret == VMW_VKMS_LOCK_UNLOCKED;
+}
+
+void
+vmw_vkms_unlock(struct drm_crtc *crtc)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+
+ /* Release flag; mark it as unlocked. */
+ atomic_set(&du->vkms.atomic_lock, VMW_VKMS_LOCK_UNLOCKED);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.h
new file mode 100644
index 000000000000..69ddd33a8444
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright (c) 2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef VMWGFX_VKMS_H_
+#define VMWGFX_VKMS_H_
+
+#include <linux/hrtimer_types.h>
+#include <linux/types.h>
+
+struct drm_atomic_state;
+struct drm_crtc;
+struct vmw_private;
+struct vmw_surface;
+
+void vmw_vkms_init(struct vmw_private *vmw);
+void vmw_vkms_cleanup(struct vmw_private *vmw);
+
+void vmw_vkms_modeset_lock(struct drm_crtc *crtc);
+bool vmw_vkms_modeset_lock_relaxed(struct drm_crtc *crtc);
+bool vmw_vkms_vblank_trylock(struct drm_crtc *crtc);
+void vmw_vkms_unlock(struct drm_crtc *crtc);
+
+bool vmw_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq);
+int vmw_vkms_enable_vblank(struct drm_crtc *crtc);
+void vmw_vkms_disable_vblank(struct drm_crtc *crtc);
+
+void vmw_vkms_crtc_init(struct drm_crtc *crtc);
+void vmw_vkms_crtc_cleanup(struct drm_crtc *crtc);
+void vmw_vkms_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
+void vmw_vkms_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state);
+void vmw_vkms_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
+void vmw_vkms_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
+
+const char *const *vmw_vkms_get_crc_sources(struct drm_crtc *crtc,
+ size_t *count);
+int vmw_vkms_verify_crc_source(struct drm_crtc *crtc,
+ const char *src_name,
+ size_t *values_cnt);
+int vmw_vkms_set_crc_source(struct drm_crtc *crtc,
+ const char *src_name);
+void vmw_vkms_set_crc_surface(struct drm_crtc *crtc,
+ struct vmw_surface *surf);
+
+#endif
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index 1a556d087e63..63f1e2d1649f 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -26,6 +26,7 @@ config DRM_XE
select INPUT if ACPI
select ACPI_VIDEO if X86 && ACPI
select ACPI_BUTTON if ACPI
+ select X86_PLATFORM_DEVICES if X86 && ACPI
select ACPI_WMI if X86 && ACPI
select SYNC_FILE
select IOSF_MBI
@@ -41,6 +42,7 @@ config DRM_XE
select MMU_NOTIFIER
select WANT_DEV_COREDUMP
select AUXILIARY_BUS
+ select HMM_MIRROR
help
Experimental driver for Intel Xe series GPUs
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 5a428ca00f10..b165bbf52aef 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -32,7 +32,7 @@ endif
# Enable -Werror in CI and development
subdir-ccflags-$(CONFIG_DRM_XE_WERROR) += -Werror
-subdir-ccflags-y += -I$(obj) -I$(srctree)/$(src)
+subdir-ccflags-y += -I$(obj) -I$(src)
# generated sources
hostprogs := xe_gen_wa_oob
@@ -43,12 +43,13 @@ quiet_cmd_wa_oob = GEN $(notdir $(generated_oob))
cmd_wa_oob = mkdir -p $(@D); $^ $(generated_oob)
$(obj)/generated/%_wa_oob.c $(obj)/generated/%_wa_oob.h: $(obj)/xe_gen_wa_oob \
- $(srctree)/$(src)/xe_wa_oob.rules
+ $(src)/xe_wa_oob.rules
$(call cmd,wa_oob)
uses_generated_oob := \
$(obj)/xe_gsc.o \
$(obj)/xe_guc.o \
+ $(obj)/xe_guc_ads.o \
$(obj)/xe_migrate.o \
$(obj)/xe_ring_ops.o \
$(obj)/xe_vm.o \
@@ -97,6 +98,8 @@ xe-y += xe_bb.o \
xe_guc_db_mgr.o \
xe_guc_debugfs.o \
xe_guc_hwconfig.o \
+ xe_guc_id_mgr.o \
+ xe_guc_klv_helpers.o \
xe_guc_log.o \
xe_guc_pc.o \
xe_guc_submit.o \
@@ -145,6 +148,8 @@ xe-y += xe_bb.o \
xe_wa.o \
xe_wopcm.o
+xe-$(CONFIG_HMM_MIRROR) += xe_hmm.o
+
# graphics hardware monitoring (HWMON) support
xe-$(CONFIG_HWMON) += xe_hwmon.o
@@ -155,9 +160,14 @@ xe-y += \
xe_sriov.o
xe-$(CONFIG_PCI_IOV) += \
+ xe_gt_sriov_pf.o \
+ xe_gt_sriov_pf_config.o \
+ xe_gt_sriov_pf_control.o \
+ xe_gt_sriov_pf_policy.o \
xe_lmtt.o \
xe_lmtt_2l.o \
- xe_lmtt_ml.o
+ xe_lmtt_ml.o \
+ xe_sriov_pf.o
# include helpers for tests even when XE is built-in
ifdef CONFIG_DRM_XE_KUNIT_TEST
@@ -166,15 +176,12 @@ endif
# i915 Display compat #defines and #includes
subdir-ccflags-$(CONFIG_DRM_XE_DISPLAY) += \
- -I$(srctree)/$(src)/display/ext \
- -I$(srctree)/$(src)/compat-i915-headers \
+ -I$(src)/display/ext \
+ -I$(src)/compat-i915-headers \
-I$(srctree)/drivers/gpu/drm/i915/display/ \
-Ddrm_i915_gem_object=xe_bo \
-Ddrm_i915_private=xe_device
-CFLAGS_i915-display/intel_fbdev.o = $(call cc-disable-warning, override-init)
-CFLAGS_i915-display/intel_display_device.o = $(call cc-disable-warning, override-init)
-
# Rule to build SOC code shared with i915
$(obj)/i915-soc/%.o: $(srctree)/drivers/gpu/drm/i915/soc/%.c FORCE
$(call cmd,force_checksrc)
@@ -257,6 +264,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_global_state.o \
i915-display/intel_gmbus.o \
i915-display/intel_hdcp.o \
+ i915-display/intel_hdcp_gsc_message.o \
i915-display/intel_hdmi.o \
i915-display/intel_hotplug.o \
i915-display/intel_hotplug_irq.o \
@@ -278,6 +286,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_vdsc.o \
i915-display/intel_vga.o \
i915-display/intel_vrr.o \
+ i915-display/intel_dmc_wl.o \
i915-display/intel_wm.o \
i915-display/skl_scaler.o \
i915-display/skl_universal_plane.o \
@@ -310,7 +319,7 @@ ifneq ($(CONFIG_DRM_XE_DISPLAY),y)
endif
always-$(CONFIG_DRM_XE_WERROR) += \
- $(patsubst %.h,%.hdrtest, $(shell cd $(srctree)/$(src) && find * -name '*.h' $(hdrtest_find_args)))
+ $(patsubst %.h,%.hdrtest, $(shell cd $(src) && find * -name '*.h' $(hdrtest_find_args)))
quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
cmd_hdrtest = $(CC) -DHDRTEST $(filter-out $(CFLAGS_GCOV), $(c_flags)) -S -o /dev/null -x c /dev/null -include $<; touch $@
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h
index 5496a5890847..c1ad09b36453 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h
@@ -3,8 +3,8 @@
* Copyright © 2023 Intel Corporation
*/
-#ifndef _GUC_ACTIONS_PF_ABI_H
-#define _GUC_ACTIONS_PF_ABI_H
+#ifndef _ABI_GUC_ACTIONS_SRIOV_ABI_H
+#define _ABI_GUC_ACTIONS_SRIOV_ABI_H
#include "guc_communication_ctb_abi.h"
@@ -171,4 +171,200 @@
#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_n_RELAY_DATAx GUC_HXG_REQUEST_MSG_n_DATAn
#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_NUM_RELAY_DATA GUC_RELAY_MSG_MAX_LEN
+/**
+ * DOC: GUC2PF_VF_STATE_NOTIFY
+ *
+ * The GUC2PF_VF_STATE_NOTIFY message is used by the GuC to notify PF about change
+ * of the VF state.
+ *
+ * This G2H message is sent as `CTB HXG Message`_.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_EVENT_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | DATA0 = MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`GUC_ACTION_GUC2PF_VF_STATE_NOTIFY` = 0x5106 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | DATA1 = **VFID** - VF identifier |
+ * +---+-------+--------------------------------------------------------------+
+ * | 2 | 31:0 | DATA2 = **EVENT** - notification event: |
+ * | | | |
+ * | | | - _`GUC_PF_NOTIFY_VF_ENABLE` = 1 (only if VFID = 0) |
+ * | | | - _`GUC_PF_NOTIFY_VF_FLR` = 1 |
+ * | | | - _`GUC_PF_NOTIFY_VF_FLR_DONE` = 2 |
+ * | | | - _`GUC_PF_NOTIFY_VF_PAUSE_DONE` = 3 |
+ * | | | - _`GUC_PF_NOTIFY_VF_FIXUP_DONE` = 4 |
+ * +---+-------+--------------------------------------------------------------+
+ */
+#define GUC_ACTION_GUC2PF_VF_STATE_NOTIFY 0x5106u
+
+#define GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_LEN (GUC_HXG_EVENT_MSG_MIN_LEN + 2u)
+#define GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_0_MBZ GUC_HXG_EVENT_MSG_0_DATA0
+#define GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_1_VFID GUC_HXG_EVENT_MSG_n_DATAn
+#define GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_2_EVENT GUC_HXG_EVENT_MSG_n_DATAn
+#define GUC_PF_NOTIFY_VF_ENABLE 1u
+#define GUC_PF_NOTIFY_VF_FLR 1u
+#define GUC_PF_NOTIFY_VF_FLR_DONE 2u
+#define GUC_PF_NOTIFY_VF_PAUSE_DONE 3u
+#define GUC_PF_NOTIFY_VF_FIXUP_DONE 4u
+
+/**
+ * DOC: PF2GUC_UPDATE_VGT_POLICY
+ *
+ * This message is used by the PF to set `GuC VGT Policy KLVs`_.
+ *
+ * This message must be sent as `CTB HXG Message`_.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`GUC_ACTION_PF2GUC_UPDATE_VGT_POLICY` = 0x5502 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | **CFG_ADDR_LO** - dword aligned GGTT offset that |
+ * | | | represents the start of `GuC VGT Policy KLVs`_ list. |
+ * +---+-------+--------------------------------------------------------------+
+ * | 2 | 31:0 | **CFG_ADDR_HI** - upper 32 bits of above offset. |
+ * +---+-------+--------------------------------------------------------------+
+ * | 3 | 31:0 | **CFG_SIZE** - size (in dwords) of the config buffer |
+ * +---+-------+--------------------------------------------------------------+
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | **COUNT** - number of KLVs successfully applied |
+ * +---+-------+--------------------------------------------------------------+
+ */
+#define GUC_ACTION_PF2GUC_UPDATE_VGT_POLICY 0x5502u
+
+#define PF2GUC_UPDATE_VGT_POLICY_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 3u)
+#define PF2GUC_UPDATE_VGT_POLICY_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
+#define PF2GUC_UPDATE_VGT_POLICY_REQUEST_MSG_1_CFG_ADDR_LO GUC_HXG_REQUEST_MSG_n_DATAn
+#define PF2GUC_UPDATE_VGT_POLICY_REQUEST_MSG_2_CFG_ADDR_HI GUC_HXG_REQUEST_MSG_n_DATAn
+#define PF2GUC_UPDATE_VGT_POLICY_REQUEST_MSG_3_CFG_SIZE GUC_HXG_REQUEST_MSG_n_DATAn
+
+#define PF2GUC_UPDATE_VGT_POLICY_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN
+#define PF2GUC_UPDATE_VGT_POLICY_RESPONSE_MSG_0_COUNT GUC_HXG_RESPONSE_MSG_0_DATA0
+
+/**
+ * DOC: PF2GUC_UPDATE_VF_CFG
+ *
+ * The `PF2GUC_UPDATE_VF_CFG`_ message is used by PF to provision single VF in GuC.
+ *
+ * This message must be sent as `CTB HXG Message`_.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`GUC_ACTION_PF2GUC_UPDATE_VF_CFG` = 0x5503 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | **VFID** - identifier of the VF that the KLV |
+ * | | | configurations are being applied to |
+ * +---+-------+--------------------------------------------------------------+
+ * | 2 | 31:0 | **CFG_ADDR_LO** - dword aligned GGTT offset that represents |
+ * | | | the start of a list of virtualization related KLV configs |
+ * | | | that are to be applied to the VF. |
+ * | | | If this parameter is zero, the list is not parsed. |
+ * | | | If full configs address parameter is zero and configs_size is|
+ * | | | zero associated VF config shall be reset to its default state|
+ * +---+-------+--------------------------------------------------------------+
+ * | 3 | 31:0 | **CFG_ADDR_HI** - upper 32 bits of configs address. |
+ * +---+-------+--------------------------------------------------------------+
+ * | 4 | 31:0 | **CFG_SIZE** - size (in dwords) of the config buffer |
+ * +---+-------+--------------------------------------------------------------+
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | **COUNT** - number of KLVs successfully applied |
+ * +---+-------+--------------------------------------------------------------+
+ */
+#define GUC_ACTION_PF2GUC_UPDATE_VF_CFG 0x5503u
+
+#define PF2GUC_UPDATE_VF_CFG_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 4u)
+#define PF2GUC_UPDATE_VF_CFG_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
+#define PF2GUC_UPDATE_VF_CFG_REQUEST_MSG_1_VFID GUC_HXG_REQUEST_MSG_n_DATAn
+#define PF2GUC_UPDATE_VF_CFG_REQUEST_MSG_2_CFG_ADDR_LO GUC_HXG_REQUEST_MSG_n_DATAn
+#define PF2GUC_UPDATE_VF_CFG_REQUEST_MSG_3_CFG_ADDR_HI GUC_HXG_REQUEST_MSG_n_DATAn
+#define PF2GUC_UPDATE_VF_CFG_REQUEST_MSG_4_CFG_SIZE GUC_HXG_REQUEST_MSG_n_DATAn
+
+#define PF2GUC_UPDATE_VF_CFG_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN
+#define PF2GUC_UPDATE_VF_CFG_RESPONSE_MSG_0_COUNT GUC_HXG_RESPONSE_MSG_0_DATA0
+
+/**
+ * DOC: PF2GUC_VF_CONTROL
+ *
+ * The PF2GUC_VF_CONTROL message is used by the PF to trigger VF state change
+ * maintained by the GuC.
+ *
+ * This H2G message must be sent as `CTB HXG Message`_.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | DATA0 = MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`GUC_ACTION_PF2GUC_VF_CONTROL_CMD` = 0x5506 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | DATA1 = **VFID** - VF identifier |
+ * +---+-------+--------------------------------------------------------------+
+ * | 2 | 31:0 | DATA2 = **COMMAND** - control command: |
+ * | | | |
+ * | | | - _`GUC_PF_TRIGGER_VF_PAUSE` = 1 |
+ * | | | - _`GUC_PF_TRIGGER_VF_RESUME` = 2 |
+ * | | | - _`GUC_PF_TRIGGER_VF_STOP` = 3 |
+ * | | | - _`GUC_PF_TRIGGER_VF_FLR_START` = 4 |
+ * | | | - _`GUC_PF_TRIGGER_VF_FLR_FINISH` = 5 |
+ * +---+-------+--------------------------------------------------------------+
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | DATA0 = MBZ |
+ * +---+-------+--------------------------------------------------------------+
+ */
+#define GUC_ACTION_PF2GUC_VF_CONTROL 0x5506u
+
+#define PF2GUC_VF_CONTROL_REQUEST_MSG_LEN (GUC_HXG_EVENT_MSG_MIN_LEN + 2u)
+#define PF2GUC_VF_CONTROL_REQUEST_MSG_0_MBZ GUC_HXG_EVENT_MSG_0_DATA0
+#define PF2GUC_VF_CONTROL_REQUEST_MSG_1_VFID GUC_HXG_EVENT_MSG_n_DATAn
+#define PF2GUC_VF_CONTROL_REQUEST_MSG_2_COMMAND GUC_HXG_EVENT_MSG_n_DATAn
+#define GUC_PF_TRIGGER_VF_PAUSE 1u
+#define GUC_PF_TRIGGER_VF_RESUME 2u
+#define GUC_PF_TRIGGER_VF_STOP 3u
+#define GUC_PF_TRIGGER_VF_FLR_START 4u
+#define GUC_PF_TRIGGER_VF_FLR_FINISH 5u
+
#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
index 0400bc0fccdc..511cf974d585 100644
--- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
@@ -319,4 +319,14 @@ enum {
#define GUC_KLV_VF_CFG_BEGIN_CONTEXT_ID_KEY 0x8a0b
#define GUC_KLV_VF_CFG_BEGIN_CONTEXT_ID_LEN 1u
+/*
+ * Workaround keys:
+ */
+enum xe_guc_klv_ids {
+ GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED = 0x9002,
+ GUC_WORKAROUND_KLV_ID_GAM_PFQ_SHADOW_TAIL_POLLING = 0x9005,
+ GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE = 0x9007,
+ GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE = 0x9008,
+};
+
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
index 420eba0e4be0..cd4632276141 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
@@ -19,14 +19,12 @@
#include "xe_bo.h"
#include "xe_pm.h"
#include "xe_step.h"
-#include "i915_gem.h"
#include "i915_gem_stolen.h"
#include "i915_gpu_error.h"
#include "i915_reg_defs.h"
#include "i915_utils.h"
#include "intel_gt_types.h"
#include "intel_step.h"
-#include "intel_uc_fw.h"
#include "intel_uncore.h"
#include "intel_runtime_pm.h"
#include <linux/pm_runtime.h>
@@ -41,12 +39,8 @@ static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
return dev_get_drvdata(kdev);
}
-
-#define INTEL_JASPERLAKE 0
-#define INTEL_ELKHARTLAKE 0
#define IS_PLATFORM(xe, x) ((xe)->info.platform == x)
#define INTEL_INFO(dev_priv) (&((dev_priv)->info))
-#define INTEL_DEVID(dev_priv) ((dev_priv)->info.devid)
#define IS_I830(dev_priv) (dev_priv && 0)
#define IS_I845G(dev_priv) (dev_priv && 0)
#define IS_I85X(dev_priv) (dev_priv && 0)
@@ -84,12 +78,12 @@ static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_ROCKETLAKE)
#define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, XE_DG1)
#define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_S)
-#define IS_ALDERLAKE_P(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_P)
-#define IS_XEHPSDV(dev_priv) (dev_priv && 0)
+#define IS_ALDERLAKE_P(dev_priv) (IS_PLATFORM(dev_priv, XE_ALDERLAKE_P) || \
+ IS_PLATFORM(dev_priv, XE_ALDERLAKE_N))
#define IS_DG2(dev_priv) IS_PLATFORM(dev_priv, XE_DG2)
-#define IS_PONTEVECCHIO(dev_priv) IS_PLATFORM(dev_priv, XE_PVC)
#define IS_METEORLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_METEORLAKE)
#define IS_LUNARLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_LUNARLAKE)
+#define IS_BATTLEMAGE(dev_priv) IS_PLATFORM(dev_priv, XE_BATTLEMAGE)
#define IS_HASWELL_ULT(dev_priv) (dev_priv && 0)
#define IS_BROADWELL_ULT(dev_priv) (dev_priv && 0)
@@ -97,19 +91,12 @@ static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
#define IP_VER(ver, rel) ((ver) << 8 | (rel))
-#define INTEL_DISPLAY_ENABLED(xe) (HAS_DISPLAY((xe)) && !intel_opregion_headless_sku((xe)))
-
-#define IS_GRAPHICS_VER(xe, first, last) \
- ((xe)->info.graphics_verx100 >= first * 100 && \
- (xe)->info.graphics_verx100 <= (last*100 + 99))
#define IS_MOBILE(xe) (xe && 0)
-#define HAS_LLC(xe) (!IS_DGFX((xe)))
#define HAS_GMD_ID(xe) GRAPHICS_VERx100(xe) >= 1270
/* Workarounds not handled yet */
#define IS_DISPLAY_STEP(xe, first, last) ({u8 __step = (xe)->info.step.display; first <= __step && __step <= last; })
-#define IS_GRAPHICS_STEP(xe, first, last) ({u8 __step = (xe)->info.step.graphics; first <= __step && __step <= last; })
#define IS_LP(xe) (0)
#define IS_GEN9_LP(xe) (0)
@@ -126,27 +113,6 @@ static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
#define IS_KABYLAKE_ULT(xe) (xe && 0)
#define IS_SKYLAKE_ULT(xe) (xe && 0)
-#define IS_DG1_GRAPHICS_STEP(xe, first, last) (IS_DG1(xe) && IS_GRAPHICS_STEP(xe, first, last))
-#define IS_DG2_GRAPHICS_STEP(xe, variant, first, last) \
- ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_ ## variant && \
- IS_GRAPHICS_STEP(xe, first, last))
-#define IS_XEHPSDV_GRAPHICS_STEP(xe, first, last) (IS_XEHPSDV(xe) && IS_GRAPHICS_STEP(xe, first, last))
-
-/* XXX: No basedie stepping support yet */
-#define IS_PVC_BD_STEP(xe, first, last) (!WARN_ON(1) && IS_PONTEVECCHIO(xe))
-
-#define IS_TIGERLAKE_DISPLAY_STEP(xe, first, last) (IS_TIGERLAKE(xe) && IS_DISPLAY_STEP(xe, first, last))
-#define IS_ROCKETLAKE_DISPLAY_STEP(xe, first, last) (IS_ROCKETLAKE(xe) && IS_DISPLAY_STEP(xe, first, last))
-#define IS_DG1_DISPLAY_STEP(xe, first, last) (IS_DG1(xe) && IS_DISPLAY_STEP(xe, first, last))
-#define IS_DG2_DISPLAY_STEP(xe, first, last) (IS_DG2(xe) && IS_DISPLAY_STEP(xe, first, last))
-#define IS_ADLP_DISPLAY_STEP(xe, first, last) (IS_ALDERLAKE_P(xe) && IS_DISPLAY_STEP(xe, first, last))
-#define IS_ADLS_DISPLAY_STEP(xe, first, last) (IS_ALDERLAKE_S(xe) && IS_DISPLAY_STEP(xe, first, last))
-#define IS_JSL_EHL_DISPLAY_STEP(xe, first, last) (IS_JSL_EHL(xe) && IS_DISPLAY_STEP(xe, first, last))
-#define IS_MTL_DISPLAY_STEP(xe, first, last) (IS_METEORLAKE(xe) && IS_DISPLAY_STEP(xe, first, last))
-
-/* FIXME: Add subplatform here */
-#define IS_MTL_GRAPHICS_STEP(xe, sub, first, last) (IS_METEORLAKE(xe) && IS_DISPLAY_STEP(xe, first, last))
-
#define IS_DG2_G10(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G10)
#define IS_DG2_G11(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G11)
#define IS_DG2_G12(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G12)
@@ -154,30 +120,31 @@ static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
#define IS_ICL_WITH_PORT_F(xe) (xe && 0)
#define HAS_FLAT_CCS(xe) (xe_device_has_flat_ccs(xe))
#define to_intel_bo(x) gem_to_xe_bo((x))
-#define mkwrite_device_info(xe) (INTEL_INFO(xe))
#define HAS_128_BYTE_Y_TILING(xe) (xe || 1)
-#define intel_has_gpu_reset(a) (a && 0)
-
#include "intel_wakeref.h"
static inline intel_wakeref_t intel_runtime_pm_get(struct xe_runtime_pm *pm)
{
struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
- if (xe_pm_runtime_get(xe) < 0) {
- xe_pm_runtime_put(xe);
- return 0;
- }
- return 1;
+ return xe_pm_runtime_resume_and_get(xe);
}
static inline intel_wakeref_t intel_runtime_pm_get_if_in_use(struct xe_runtime_pm *pm)
{
struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
- return xe_pm_runtime_get_if_active(xe);
+ return xe_pm_runtime_get_if_in_use(xe);
+}
+
+static inline intel_wakeref_t intel_runtime_pm_get_noresume(struct xe_runtime_pm *pm)
+{
+ struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
+
+ xe_pm_runtime_get_noresume(xe);
+ return true;
}
static inline void intel_runtime_pm_put_unchecked(struct xe_runtime_pm *pm)
@@ -217,7 +184,6 @@ struct i915_sched_attr {
#define RUNTIME_INFO(xe) (&(xe)->info.i915_runtime)
#define FORCEWAKE_ALL XE_FORCEWAKE_ALL
-#define HPD_STORM_DEFAULT_THRESHOLD 50
#ifdef CONFIG_ARM64
/*
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_fixed.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_fixed.h
deleted file mode 100644
index 12c671fd5235..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_fixed.h
+++ /dev/null
@@ -1,6 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#include "../../i915/i915_fixed.h"
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_gem.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_gem.h
deleted file mode 100644
index 06b723a479c5..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_gem.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef __I915_GEM_H__
-#define __I915_GEM_H__
-#define GEM_BUG_ON
-#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h
index bd233007c1b7..cb6c7598824b 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h
@@ -17,10 +17,15 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe,
{
struct xe_bo *bo;
int err;
- u32 flags = XE_BO_CREATE_PINNED_BIT | XE_BO_CREATE_STOLEN_BIT;
+ u32 flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_STOLEN;
- if (align)
+ if (start < SZ_4K)
+ start = SZ_4K;
+
+ if (align) {
size = ALIGN(size, align);
+ start = ALIGN(start, align);
+ }
bo = xe_bo_create_locked_range(xe, xe_device_get_root_tile(xe),
NULL, size, start, end,
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_vgpu.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_vgpu.h
index 80b024d435dc..4931c7198f13 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_vgpu.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_vgpu.h
@@ -9,36 +9,10 @@
#include <linux/types.h>
struct drm_i915_private;
-struct i915_ggtt;
-static inline void intel_vgpu_detect(struct drm_i915_private *i915)
-{
-}
static inline bool intel_vgpu_active(struct drm_i915_private *i915)
{
return false;
}
-static inline void intel_vgpu_register(struct drm_i915_private *i915)
-{
-}
-static inline bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *i915)
-{
- return false;
-}
-static inline bool intel_vgpu_has_hwsp_emulation(struct drm_i915_private *i915)
-{
- return false;
-}
-static inline bool intel_vgpu_has_huge_gtt(struct drm_i915_private *i915)
-{
- return false;
-}
-static inline int intel_vgt_balloon(struct i915_ggtt *ggtt)
-{
- return 0;
-}
-static inline void intel_vgt_deballoon(struct i915_ggtt *ggtt)
-{
-}
#endif /* _I915_VGPU_H_ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uc_fw.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uc_fw.h
deleted file mode 100644
index 009745328992..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uc_fw.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _INTEL_UC_FW_H_
-#define _INTEL_UC_FW_H_
-
-#define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git"
-
-#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
index cd26ddc0f69e..ef79793caa72 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
@@ -25,15 +25,15 @@ static inline u32 intel_uncore_read(struct intel_uncore *uncore,
return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
}
-static inline u32 intel_uncore_read8(struct intel_uncore *uncore,
- i915_reg_t i915_reg)
+static inline u8 intel_uncore_read8(struct intel_uncore *uncore,
+ i915_reg_t i915_reg)
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
return xe_mmio_read8(__compat_uncore_to_gt(uncore), reg);
}
-static inline u32 intel_uncore_read16(struct intel_uncore *uncore,
+static inline u16 intel_uncore_read16(struct intel_uncore *uncore,
i915_reg_t i915_reg)
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.c b/drivers/gpu/drm/xe/display/intel_fb_bo.c
index b21da7b745a5..e18521acc516 100644
--- a/drivers/gpu/drm/xe/display/intel_fb_bo.c
+++ b/drivers/gpu/drm/xe/display/intel_fb_bo.c
@@ -11,7 +11,7 @@
void intel_fb_bo_framebuffer_fini(struct xe_bo *bo)
{
- if (bo->flags & XE_BO_CREATE_PINNED_BIT) {
+ if (bo->flags & XE_BO_FLAG_PINNED) {
/* Unpin our kernel fb first */
xe_bo_lock(bo, false);
xe_bo_unpin(bo);
@@ -31,23 +31,27 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
ret = ttm_bo_reserve(&bo->ttm, true, false, NULL);
if (ret)
- return ret;
+ goto err;
- if (!(bo->flags & XE_BO_SCANOUT_BIT)) {
+ if (!(bo->flags & XE_BO_FLAG_SCANOUT)) {
/*
- * XE_BO_SCANOUT_BIT should ideally be set at creation, or is
+ * XE_BO_FLAG_SCANOUT should ideally be set at creation, or is
* automatically set when creating FB. We cannot change caching
* mode when the boect is VM_BINDed, so we can only set
* coherency with display when unbound.
*/
if (XE_IOCTL_DBG(i915, !list_empty(&bo->ttm.base.gpuva.list))) {
ttm_bo_unreserve(&bo->ttm);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
- bo->flags |= XE_BO_SCANOUT_BIT;
+ bo->flags |= XE_BO_FLAG_SCANOUT;
}
ttm_bo_unreserve(&bo->ttm);
+ return 0;
+err:
+ xe_bo_put(bo);
return ret;
}
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
index 51ae3561fd0d..9e4bcfdbc7e5 100644
--- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
@@ -42,9 +42,9 @@ struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
if (!IS_DGFX(dev_priv)) {
obj = xe_bo_create_pin_map(dev_priv, xe_device_get_root_tile(dev_priv),
NULL, size,
- ttm_bo_type_kernel, XE_BO_SCANOUT_BIT |
- XE_BO_CREATE_STOLEN_BIT |
- XE_BO_CREATE_PINNED_BIT);
+ ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
+ XE_BO_FLAG_STOLEN |
+ XE_BO_FLAG_PINNED);
if (!IS_ERR(obj))
drm_info(&dev_priv->drm, "Allocated fbdev into stolen\n");
else
@@ -52,9 +52,9 @@ struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
}
if (IS_ERR(obj)) {
obj = xe_bo_create_pin_map(dev_priv, xe_device_get_root_tile(dev_priv), NULL, size,
- ttm_bo_type_kernel, XE_BO_SCANOUT_BIT |
- XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(dev_priv)) |
- XE_BO_CREATE_PINNED_BIT);
+ ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
+ XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(dev_priv)) |
+ XE_BO_FLAG_PINNED);
}
if (IS_ERR(obj)) {
@@ -81,8 +81,8 @@ int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
- if (!(obj->flags & XE_BO_CREATE_SYSTEM_BIT)) {
- if (obj->flags & XE_BO_CREATE_STOLEN_BIT)
+ if (!(obj->flags & XE_BO_FLAG_SYSTEM)) {
+ if (obj->flags & XE_BO_FLAG_STOLEN)
info->fix.smem_start = xe_ttm_stolen_io_offset(obj, 0);
else
info->fix.smem_start =
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index e4db069f0db3..0de0566e5b39 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -51,14 +51,6 @@ bool xe_display_driver_probe_defer(struct pci_dev *pdev)
return intel_display_driver_probe_defer(pdev);
}
-static void xe_display_last_close(struct drm_device *dev)
-{
- struct xe_device *xe = to_xe_device(dev);
-
- if (xe->info.enable_display)
- intel_fbdev_restore_mode(to_xe_device(dev));
-}
-
/**
* xe_display_driver_set_hooks - Add driver flags and hooks for display
* @driver: DRM device driver
@@ -73,7 +65,6 @@ void xe_display_driver_set_hooks(struct drm_driver *driver)
return;
driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
- driver->lastclose = xe_display_last_close;
}
static void unset_display_features(struct xe_device *xe)
@@ -101,25 +92,14 @@ static void display_destroy(struct drm_device *dev, void *dummy)
*/
int xe_display_create(struct xe_device *xe)
{
- int err;
-
spin_lock_init(&xe->display.fb_tracking.lock);
xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
drmm_mutex_init(&xe->drm, &xe->sb_lock);
- drmm_mutex_init(&xe->drm, &xe->display.backlight.lock);
- drmm_mutex_init(&xe->drm, &xe->display.audio.mutex);
- drmm_mutex_init(&xe->drm, &xe->display.wm.wm_mutex);
- drmm_mutex_init(&xe->drm, &xe->display.pps.mutex);
- drmm_mutex_init(&xe->drm, &xe->display.hdcp.hdcp_mutex);
xe->enabled_irq_mask = ~0;
- err = drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
- if (err)
- return err;
-
- return 0;
+ return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
}
static void xe_display_fini_nommio(struct drm_device *dev, void *dummy)
@@ -223,9 +203,7 @@ void xe_display_fini(struct xe_device *xe)
if (!xe->info.enable_display)
return;
- /* poll work can call into fbdev, hence clean that up afterwards */
intel_hpd_poll_fini(xe);
- intel_fbdev_fini(xe);
intel_hdcp_component_fini(xe);
intel_audio_deinit(xe);
diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
index 27c2fb1c002a..44c9fd2143cc 100644
--- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
+++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
@@ -45,8 +45,8 @@ bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *d
obj = xe_bo_create_pin_map(i915, xe_device_get_root_tile(i915),
NULL, PAGE_ALIGN(size),
ttm_bo_type_kernel,
- XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(i915)) |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(i915)) |
+ XE_BO_FLAG_GGTT);
if (IS_ERR(obj)) {
kfree(vma);
return false;
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
index 722c84a56607..3e1ae37c4c8b 100644
--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -10,6 +10,7 @@
#include "intel_fb_pin.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
+#include "xe_pm.h"
#include <drm/ttm/ttm_bo.h>
@@ -30,7 +31,7 @@ write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_
for (row = 0; row < height; row++) {
u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
- xe->pat.idx[XE_CACHE_WB]);
+ xe->pat.idx[XE_CACHE_NONE]);
iosys_map_wr(map, *dpt_ofs, u64, pte);
*dpt_ofs += 8;
@@ -62,7 +63,7 @@ write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs,
for (column = 0; column < width; column++) {
iosys_map_wr(map, *dpt_ofs, u64,
pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
- xe->pat.idx[XE_CACHE_WB]));
+ xe->pat.idx[XE_CACHE_NONE]));
*dpt_ofs += 8;
src_idx++;
@@ -99,18 +100,21 @@ static int __xe_pin_fb_vma_dpt(struct intel_framebuffer *fb,
if (IS_DGFX(xe))
dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
ttm_bo_type_kernel,
- XE_BO_CREATE_VRAM0_BIT |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_VRAM0 |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_PAGETABLE);
else
dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
ttm_bo_type_kernel,
- XE_BO_CREATE_STOLEN_BIT |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_STOLEN |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_PAGETABLE);
if (IS_ERR(dpt))
dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
ttm_bo_type_kernel,
- XE_BO_CREATE_SYSTEM_BIT |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_PAGETABLE);
if (IS_ERR(dpt))
return PTR_ERR(dpt);
@@ -119,7 +123,7 @@ static int __xe_pin_fb_vma_dpt(struct intel_framebuffer *fb,
for (x = 0; x < size / XE_PAGE_SIZE; x++) {
u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x * XE_PAGE_SIZE,
- xe->pat.idx[XE_CACHE_WB]);
+ xe->pat.idx[XE_CACHE_NONE]);
iosys_map_wr(&dpt->vmap, x * 8, u64, pte);
}
@@ -165,7 +169,7 @@ write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo
for (row = 0; row < height; row++) {
u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
- xe->pat.idx[XE_CACHE_WB]);
+ xe->pat.idx[XE_CACHE_NONE]);
xe_ggtt_set_pte(ggtt, *ggtt_ofs, pte);
*ggtt_ofs += XE_PAGE_SIZE;
@@ -190,7 +194,7 @@ static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb,
/* TODO: Consider sharing framebuffer mapping?
* embed i915_vma inside intel_framebuffer
*/
- xe_device_mem_access_get(tile_to_xe(ggtt->tile));
+ xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
ret = mutex_lock_interruptible(&ggtt->lock);
if (ret)
goto out;
@@ -211,7 +215,7 @@ static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb,
for (x = 0; x < size; x += XE_PAGE_SIZE) {
u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x,
- xe->pat.idx[XE_CACHE_WB]);
+ xe->pat.idx[XE_CACHE_NONE]);
xe_ggtt_set_pte(ggtt, vma->node.start + x, pte);
}
@@ -238,11 +242,10 @@ static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb,
rot_info->plane[i].dst_stride);
}
- xe_ggtt_invalidate(ggtt);
out_unlock:
mutex_unlock(&ggtt->lock);
out:
- xe_device_mem_access_put(tile_to_xe(ggtt->tile));
+ xe_pm_runtime_put(tile_to_xe(ggtt->tile));
return ret;
}
@@ -260,7 +263,7 @@ static struct i915_vma *__xe_pin_fb_vma(struct intel_framebuffer *fb,
if (IS_DGFX(to_xe_device(bo->ttm.base.dev)) &&
intel_fb_rc_ccs_cc_plane(&fb->base) >= 0 &&
- !(bo->flags & XE_BO_NEEDS_CPU_ACCESS)) {
+ !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS)) {
struct xe_tile *tile = xe_device_get_root_tile(xe);
/*
@@ -321,7 +324,7 @@ static void __xe_unpin_fb_vma(struct i915_vma *vma)
xe_bo_unpin_map_no_vm(vma->dpt);
else if (!drm_mm_node_allocated(&vma->bo->ggtt_node) ||
vma->bo->ggtt_node.start != vma->node.start)
- xe_ggtt_remove_node(ggtt, &vma->node);
+ xe_ggtt_remove_node(ggtt, &vma->node, false);
ttm_bo_reserve(&vma->bo->ttm, false, false, NULL);
ttm_bo_unpin(&vma->bo->ttm);
@@ -353,7 +356,7 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state)
struct i915_vma *vma;
/* We reject creating !SCANOUT fb's, so this is weird.. */
- drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_SCANOUT_BIT));
+ drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_FLAG_SCANOUT));
vma = __xe_pin_fb_vma(to_intel_framebuffer(fb), &plane_state->view.gtt);
if (IS_ERR(vma))
@@ -381,4 +384,4 @@ struct i915_address_space *intel_dpt_create(struct intel_framebuffer *fb)
void intel_dpt_destroy(struct i915_address_space *vm)
{
return;
-} \ No newline at end of file
+}
diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
index 0f11a39333e2..d46f87a039f2 100644
--- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
+++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
@@ -3,32 +3,250 @@
* Copyright 2023, Intel Corporation.
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+#include <drm/i915_hdcp_interface.h>
+#include <linux/delay.h>
+
+#include "abi/gsc_command_header_abi.h"
#include "intel_hdcp_gsc.h"
+#include "intel_hdcp_gsc_message.h"
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_device_types.h"
+#include "xe_gsc_proxy.h"
+#include "xe_gsc_submit.h"
+#include "xe_gt.h"
+#include "xe_map.h"
+#include "xe_pm.h"
+#include "xe_uc_fw.h"
+
+#define HECI_MEADDRESS_HDCP 18
+
+struct intel_hdcp_gsc_message {
+ struct xe_bo *hdcp_bo;
+ u64 hdcp_cmd_in;
+ u64 hdcp_cmd_out;
+};
-bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915)
+#define HDCP_GSC_HEADER_SIZE sizeof(struct intel_gsc_mtl_header)
+
+bool intel_hdcp_gsc_cs_required(struct xe_device *xe)
{
- return true;
+ return DISPLAY_VER(xe) >= 14;
}
-bool intel_hdcp_gsc_check_status(struct drm_i915_private *i915)
+bool intel_hdcp_gsc_check_status(struct xe_device *xe)
{
- return false;
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+ struct xe_gt *gt = tile->media_gt;
+ bool ret = true;
+
+ if (!xe_uc_fw_is_enabled(&gt->uc.gsc.fw))
+ return false;
+
+ xe_pm_runtime_get(xe);
+ if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC)) {
+ drm_dbg_kms(&xe->drm,
+ "failed to get forcewake to check proxy status\n");
+ ret = false;
+ goto out;
+ }
+
+ if (!xe_gsc_proxy_init_done(&gt->uc.gsc))
+ ret = false;
+
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
+out:
+ xe_pm_runtime_put(xe);
+ return ret;
}
-int intel_hdcp_gsc_init(struct drm_i915_private *i915)
+/*This function helps allocate memory for the command that we will send to gsc cs */
+static int intel_hdcp_gsc_initialize_message(struct xe_device *xe,
+ struct intel_hdcp_gsc_message *hdcp_message)
{
- drm_info(&i915->drm, "HDCP support not yet implemented\n");
- return -ENODEV;
+ struct xe_bo *bo = NULL;
+ u64 cmd_in, cmd_out;
+ int ret = 0;
+
+ /* allocate object of two page for HDCP command memory and store it */
+ bo = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_SIZE * 2,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT);
+
+ if (IS_ERR(bo)) {
+ drm_err(&xe->drm, "Failed to allocate bo for HDCP streaming command!\n");
+ ret = PTR_ERR(bo);
+ goto out;
+ }
+
+ cmd_in = xe_bo_ggtt_addr(bo);
+ cmd_out = cmd_in + PAGE_SIZE;
+ xe_map_memset(xe, &bo->vmap, 0, 0, bo->size);
+
+ hdcp_message->hdcp_bo = bo;
+ hdcp_message->hdcp_cmd_in = cmd_in;
+ hdcp_message->hdcp_cmd_out = cmd_out;
+out:
+ return ret;
}
-void intel_hdcp_gsc_fini(struct drm_i915_private *i915)
+static int intel_hdcp_gsc_hdcp2_init(struct xe_device *xe)
{
+ struct intel_hdcp_gsc_message *hdcp_message;
+ int ret;
+
+ hdcp_message = kzalloc(sizeof(*hdcp_message), GFP_KERNEL);
+
+ if (!hdcp_message)
+ return -ENOMEM;
+
+ /*
+ * NOTE: No need to lock the comp mutex here as it is already
+ * going to be taken before this function called
+ */
+ ret = intel_hdcp_gsc_initialize_message(xe, hdcp_message);
+ if (ret) {
+ drm_err(&xe->drm, "Could not initialize hdcp_message\n");
+ kfree(hdcp_message);
+ return ret;
+ }
+
+ xe->display.hdcp.hdcp_message = hdcp_message;
+ return ret;
}
-ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
+static const struct i915_hdcp_ops gsc_hdcp_ops = {
+ .initiate_hdcp2_session = intel_hdcp_gsc_initiate_session,
+ .verify_receiver_cert_prepare_km =
+ intel_hdcp_gsc_verify_receiver_cert_prepare_km,
+ .verify_hprime = intel_hdcp_gsc_verify_hprime,
+ .store_pairing_info = intel_hdcp_gsc_store_pairing_info,
+ .initiate_locality_check = intel_hdcp_gsc_initiate_locality_check,
+ .verify_lprime = intel_hdcp_gsc_verify_lprime,
+ .get_session_key = intel_hdcp_gsc_get_session_key,
+ .repeater_check_flow_prepare_ack =
+ intel_hdcp_gsc_repeater_check_flow_prepare_ack,
+ .verify_mprime = intel_hdcp_gsc_verify_mprime,
+ .enable_hdcp_authentication = intel_hdcp_gsc_enable_authentication,
+ .close_hdcp_session = intel_hdcp_gsc_close_session,
+};
+
+int intel_hdcp_gsc_init(struct xe_device *xe)
+{
+ struct i915_hdcp_arbiter *data;
+ int ret;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ mutex_lock(&xe->display.hdcp.hdcp_mutex);
+ xe->display.hdcp.arbiter = data;
+ xe->display.hdcp.arbiter->hdcp_dev = xe->drm.dev;
+ xe->display.hdcp.arbiter->ops = &gsc_hdcp_ops;
+ ret = intel_hdcp_gsc_hdcp2_init(xe);
+ if (ret)
+ kfree(data);
+
+ mutex_unlock(&xe->display.hdcp.hdcp_mutex);
+
+ return ret;
+}
+
+void intel_hdcp_gsc_fini(struct xe_device *xe)
+{
+ struct intel_hdcp_gsc_message *hdcp_message =
+ xe->display.hdcp.hdcp_message;
+
+ if (!hdcp_message)
+ return;
+
+ xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo);
+ kfree(hdcp_message);
+}
+
+static int xe_gsc_send_sync(struct xe_device *xe,
+ struct intel_hdcp_gsc_message *hdcp_message,
+ u32 msg_size_in, u32 msg_size_out,
+ u32 addr_out_off)
+{
+ struct xe_gt *gt = hdcp_message->hdcp_bo->tile->media_gt;
+ struct iosys_map *map = &hdcp_message->hdcp_bo->vmap;
+ struct xe_gsc *gsc = &gt->uc.gsc;
+ int ret;
+
+ ret = xe_gsc_pkt_submit_kernel(gsc, hdcp_message->hdcp_cmd_in, msg_size_in,
+ hdcp_message->hdcp_cmd_out, msg_size_out);
+ if (ret) {
+ drm_err(&xe->drm, "failed to send gsc HDCP msg (%d)\n", ret);
+ return ret;
+ }
+
+ if (xe_gsc_check_and_update_pending(xe, map, 0, map, addr_out_off))
+ return -EAGAIN;
+
+ ret = xe_gsc_read_out_header(xe, map, addr_out_off,
+ sizeof(struct hdcp_cmd_header), NULL);
+
+ return ret;
+}
+
+ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
size_t msg_in_len, u8 *msg_out,
size_t msg_out_len)
{
- return -ENODEV;
+ const size_t max_msg_size = PAGE_SIZE - HDCP_GSC_HEADER_SIZE;
+ struct intel_hdcp_gsc_message *hdcp_message;
+ u64 host_session_id;
+ u32 msg_size_in, msg_size_out;
+ u32 addr_out_off, addr_in_wr_off = 0;
+ int ret, tries = 0;
+
+ if (msg_in_len > max_msg_size || msg_out_len > max_msg_size) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ msg_size_in = msg_in_len + HDCP_GSC_HEADER_SIZE;
+ msg_size_out = msg_out_len + HDCP_GSC_HEADER_SIZE;
+ hdcp_message = xe->display.hdcp.hdcp_message;
+ addr_out_off = PAGE_SIZE;
+
+ host_session_id = xe_gsc_create_host_session_id();
+ xe_pm_runtime_get_noresume(xe);
+ addr_in_wr_off = xe_gsc_emit_header(xe, &hdcp_message->hdcp_bo->vmap,
+ addr_in_wr_off, HECI_MEADDRESS_HDCP,
+ host_session_id, msg_in_len);
+ xe_map_memcpy_to(xe, &hdcp_message->hdcp_bo->vmap, addr_in_wr_off,
+ msg_in, msg_in_len);
+ /*
+ * Keep sending request in case the pending bit is set no need to add
+ * message handle as we are using same address hence loc. of header is
+ * same and it will contain the message handle. we will send the message
+ * 20 times each message 50 ms apart
+ */
+ do {
+ ret = xe_gsc_send_sync(xe, hdcp_message, msg_size_in, msg_size_out,
+ addr_out_off);
+
+ /* Only try again if gsc says so */
+ if (ret != -EAGAIN)
+ break;
+
+ msleep(50);
+
+ } while (++tries < 20);
+
+ if (ret)
+ goto out;
+
+ xe_map_memcpy_from(xe, msg_out, &hdcp_message->hdcp_bo->vmap,
+ addr_out_off + HDCP_GSC_HEADER_SIZE,
+ msg_out_len);
+
+out:
+ xe_pm_runtime_put(xe);
+ return ret;
}
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index 866d1dd6eeb4..9693c56d386b 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -6,6 +6,7 @@
/* for ioread64 */
#include <linux/io-64-nonatomic-lo-hi.h>
+#include "regs/xe_gtt_defs.h"
#include "xe_ggtt.h"
#include "i915_drv.h"
@@ -62,7 +63,7 @@ initial_plane_bo(struct xe_device *xe,
if (plane_config->size == 0)
return NULL;
- flags = XE_BO_CREATE_PINNED_BIT | XE_BO_SCANOUT_BIT | XE_BO_CREATE_GGTT_BIT;
+ flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT;
base = round_down(plane_config->base, page_size);
if (IS_DGFX(xe)) {
@@ -79,7 +80,7 @@ initial_plane_bo(struct xe_device *xe,
}
phys_base = pte & ~(page_size - 1);
- flags |= XE_BO_CREATE_VRAM0_BIT;
+ flags |= XE_BO_FLAG_VRAM0;
/*
* We don't currently expect this to ever be placed in the
@@ -101,7 +102,7 @@ initial_plane_bo(struct xe_device *xe,
if (!stolen)
return NULL;
phys_base = base;
- flags |= XE_BO_CREATE_STOLEN_BIT;
+ flags |= XE_BO_FLAG_STOLEN;
/*
* If the FB is too big, just don't use it since fbdev is not very
diff --git a/drivers/gpu/drm/xe/instructions/xe_gfx_state_commands.h b/drivers/gpu/drm/xe/instructions/xe_gfx_state_commands.h
new file mode 100644
index 000000000000..dca62af5a5d5
--- /dev/null
+++ b/drivers/gpu/drm/xe/instructions/xe_gfx_state_commands.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GFX_STATE_COMMANDS_H_
+#define _XE_GFX_STATE_COMMANDS_H_
+
+#include "instructions/xe_instr_defs.h"
+
+#define GFX_STATE_OPCODE REG_GENMASK(28, 26)
+
+#define GFX_STATE_CMD(opcode) \
+ (XE_INSTR_GFX_STATE | REG_FIELD_PREP(GFX_STATE_OPCODE, opcode))
+
+#define STATE_WRITE_INLINE GFX_STATE_CMD(0x0)
+
+#endif
diff --git a/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h b/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h
index 8e6dd061f2ae..31d28a67ef6a 100644
--- a/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h
@@ -47,6 +47,8 @@
#define GPGPU_CSR_BASE_ADDRESS GFXPIPE_COMMON_CMD(0x1, 0x4)
#define STATE_COMPUTE_MODE GFXPIPE_COMMON_CMD(0x1, 0x5)
#define CMD_3DSTATE_BTD GFXPIPE_COMMON_CMD(0x1, 0x6)
+#define STATE_SYSTEM_MEM_FENCE_ADDRESS GFXPIPE_COMMON_CMD(0x1, 0x9)
+#define STATE_CONTEXT_DATA_BASE_ADDRESS GFXPIPE_COMMON_CMD(0x1, 0xB)
#define CMD_3DSTATE_VF_STATISTICS GFXPIPE_SINGLE_DW_CMD(0x0, 0xB)
@@ -71,6 +73,7 @@
#define CMD_3DSTATE_WM GFXPIPE_3D_CMD(0x0, 0x14)
#define CMD_3DSTATE_CONSTANT_VS GFXPIPE_3D_CMD(0x0, 0x15)
#define CMD_3DSTATE_CONSTANT_GS GFXPIPE_3D_CMD(0x0, 0x16)
+#define CMD_3DSTATE_CONSTANT_PS GFXPIPE_3D_CMD(0x0, 0x17)
#define CMD_3DSTATE_SAMPLE_MASK GFXPIPE_3D_CMD(0x0, 0x18)
#define CMD_3DSTATE_CONSTANT_HS GFXPIPE_3D_CMD(0x0, 0x19)
#define CMD_3DSTATE_CONSTANT_DS GFXPIPE_3D_CMD(0x0, 0x1A)
diff --git a/drivers/gpu/drm/xe/instructions/xe_instr_defs.h b/drivers/gpu/drm/xe/instructions/xe_instr_defs.h
index 04179b2a48e1..fd2ce7ace510 100644
--- a/drivers/gpu/drm/xe/instructions/xe_instr_defs.h
+++ b/drivers/gpu/drm/xe/instructions/xe_instr_defs.h
@@ -17,6 +17,7 @@
#define XE_INSTR_MI REG_FIELD_PREP(XE_INSTR_CMD_TYPE, 0x0)
#define XE_INSTR_GSC REG_FIELD_PREP(XE_INSTR_CMD_TYPE, 0x2)
#define XE_INSTR_GFXPIPE REG_FIELD_PREP(XE_INSTR_CMD_TYPE, 0x3)
+#define XE_INSTR_GFX_STATE REG_FIELD_PREP(XE_INSTR_CMD_TYPE, 0x4)
/*
* Most (but not all) instructions have a "length" field in the instruction
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index 0b1266c88a6a..af71b87d8030 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -104,9 +104,6 @@
#define FF_SLICE_CS_CHICKEN1(base) XE_REG((base) + 0xe0, XE_REG_OPTION_MASKED)
#define FFSC_PERCTX_PREEMPT_CTRL REG_BIT(14)
-#define FF_SLICE_CS_CHICKEN2(base) XE_REG((base) + 0xe4, XE_REG_OPTION_MASKED)
-#define PERF_FIX_BALANCING_CFE_DISABLE REG_BIT(15)
-
#define CS_DEBUG_MODE1(base) XE_REG((base) + 0xec, XE_REG_OPTION_MASKED)
#define FF_DOP_CLOCK_GATE_DISABLE REG_BIT(1)
#define REPLAY_MODE_GRANULARITY REG_BIT(0)
@@ -125,7 +122,7 @@
#define RING_EXECLIST_STATUS_LO(base) XE_REG((base) + 0x234)
#define RING_EXECLIST_STATUS_HI(base) XE_REG((base) + 0x234 + 4)
-#define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244)
+#define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244, XE_REG_OPTION_MASKED)
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH REG_BIT(3)
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT REG_BIT(0)
diff --git a/drivers/gpu/drm/xe/regs/xe_gsc_regs.h b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
index 9886ec9cb08e..e2a925be137c 100644
--- a/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
@@ -38,4 +38,11 @@
#define HECI_H_GS1(base) XE_REG((base) + 0xc4c)
#define HECI_H_GS1_ER_PREP REG_BIT(0)
+#define GSCI_TIMER_STATUS XE_REG(0x11ca28)
+#define GSCI_TIMER_STATUS_VALUE REG_GENMASK(1, 0)
+#define GSCI_TIMER_STATUS_RESET_IN_PROGRESS 0
+#define GSCI_TIMER_STATUS_TIMER_EXPIRED 1
+#define GSCI_TIMER_STATUS_RESET_COMPLETE 2
+#define GSCI_TIMER_STATUS_OUT_OF_RESET 3
+
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index 15ac2d284d48..94445810ccc9 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -69,10 +69,14 @@
#define XEHP_TILE_ADDR_RANGE(_idx) XE_REG_MCR(0x4900 + (_idx) * 4)
#define XEHP_FLAT_CCS_BASE_ADDR XE_REG_MCR(0x4910)
+#define XEHP_FLAT_CCS_PTR REG_GENMASK(31, 8)
#define WM_CHICKEN3 XE_REG_MCR(0x5588, XE_REG_OPTION_MASKED)
#define HIZ_PLANE_COMPRESSION_DIS REG_BIT(10)
+#define CHICKEN_RASTER_1 XE_REG_MCR(0x6204, XE_REG_OPTION_MASKED)
+#define DIS_SF_ROUND_NEAREST_EVEN REG_BIT(8)
+
#define CHICKEN_RASTER_2 XE_REG_MCR(0x6208, XE_REG_OPTION_MASKED)
#define TBIMR_FAST_CLIP REG_BIT(5)
@@ -97,7 +101,8 @@
#define CACHE_MODE_1 XE_REG(0x7004, XE_REG_OPTION_MASKED)
#define MSAA_OPTIMIZATION_REDUC_DISABLE REG_BIT(11)
-#define COMMON_SLICE_CHICKEN1 XE_REG(0x7010)
+#define COMMON_SLICE_CHICKEN1 XE_REG(0x7010, XE_REG_OPTION_MASKED)
+#define DISABLE_BOTTOM_CLIP_RECTANGLE_TEST REG_BIT(14)
#define HIZ_CHICKEN XE_REG(0x7018, XE_REG_OPTION_MASKED)
#define DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE REG_BIT(14)
@@ -141,6 +146,10 @@
#define XE2_FLAT_CCS_BASE_RANGE_LOWER XE_REG_MCR(0x8800)
#define XE2_FLAT_CCS_ENABLE REG_BIT(0)
+#define XE2_FLAT_CCS_BASE_LOWER_ADDR_MASK REG_GENMASK(31, 6)
+
+#define XE2_FLAT_CCS_BASE_RANGE_UPPER XE_REG_MCR(0x8804)
+#define XE2_FLAT_CCS_BASE_UPPER_ADDR_MASK REG_GENMASK(7, 0)
#define GSCPSMI_BASE XE_REG(0x880c)
@@ -156,7 +165,10 @@
#define MIRROR_FUSE3 XE_REG(0x9118)
#define XE2_NODE_ENABLE_MASK REG_GENMASK(31, 16)
#define L3BANK_PAIR_COUNT 4
+#define XEHPC_GT_L3_MODE_MASK REG_GENMASK(7, 4)
+#define XE2_GT_L3_MODE_MASK REG_GENMASK(7, 4)
#define L3BANK_MASK REG_GENMASK(3, 0)
+#define XELP_GT_L3_MODE_MASK REG_GENMASK(7, 0)
/* on Xe_HP the same fuses indicates mslices instead of L3 banks */
#define MAX_MSLICES 4
#define MEML3_EN_MASK REG_GENMASK(3, 0)
@@ -271,6 +283,10 @@
#define FORCEWAKE_GT XE_REG(0xa188)
#define PG_ENABLE XE_REG(0xa210)
+#define VD2_MFXVDENC_POWERGATE_ENABLE REG_BIT(8)
+#define VD2_HCP_POWERGATE_ENABLE REG_BIT(7)
+#define VD0_MFXVDENC_POWERGATE_ENABLE REG_BIT(4)
+#define VD0_HCP_POWERGATE_ENABLE REG_BIT(3)
#define CTC_MODE XE_REG(0xa26c)
#define CTC_SHIFT_PARAMETER_MASK REG_GENMASK(2, 1)
@@ -349,6 +365,7 @@
#define THREAD_EX_ARB_MODE_RR_AFTER_DEP REG_FIELD_PREP(THREAD_EX_ARB_MODE, 0x2)
#define ROW_CHICKEN3 XE_REG_MCR(0xe49c, XE_REG_OPTION_MASKED)
+#define XE2_EUPEND_CHK_FLUSH_DIS REG_BIT(14)
#define DIS_FIX_EOT1_FLUSH REG_BIT(9)
#define TDL_TSL_CHICKEN XE_REG_MCR(0xe4c4, XE_REG_OPTION_MASKED)
@@ -364,17 +381,22 @@
#define DISABLE_EARLY_READ REG_BIT(14)
#define ENABLE_LARGE_GRF_MODE REG_BIT(12)
#define PUSH_CONST_DEREF_HOLD_DIS REG_BIT(8)
+#define DISABLE_TDL_SVHS_GATING REG_BIT(1)
#define DISABLE_DOP_GATING REG_BIT(0)
#define RT_CTRL XE_REG_MCR(0xe530)
#define DIS_NULL_QUERY REG_BIT(10)
+#define EU_SYSTOLIC_LIC_THROTTLE_CTL_WITH_LOCK XE_REG_MCR(0xe534)
+#define EU_SYSTOLIC_LIC_THROTTLE_CTL_LOCK_BIT REG_BIT(31)
+
#define XEHP_HDC_CHICKEN0 XE_REG_MCR(0xe5f0, XE_REG_OPTION_MASKED)
#define LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK REG_GENMASK(13, 11)
#define DIS_ATOMIC_CHAINING_TYPED_WRITES REG_BIT(3)
#define LSC_CHICKEN_BIT_0 XE_REG_MCR(0xe7c8)
#define DISABLE_D8_D16_COASLESCE REG_BIT(30)
+#define WR_REQ_CHAINING_DIS REG_BIT(26)
#define TGM_WRITE_EOM_FORCE REG_BIT(17)
#define FORCE_1_SUB_MESSAGE_PER_FRAGMENT REG_BIT(15)
#define SEQUENTIAL_ACCESS_UPGRADE_DISABLE REG_BIT(13)
@@ -439,7 +461,13 @@
#define GT_PERF_STATUS XE_REG(0x1381b4)
#define VOLTAGE_MASK REG_GENMASK(10, 0)
-#define GT_INTR_DW(x) XE_REG(0x190018 + ((x) * 4))
+/*
+ * Note: Interrupt registers 1900xx are VF accessible only until version 12.50.
+ * On newer platforms, VFs are using memory-based interrupts instead.
+ * However, for simplicity we keep this XE_REG_OPTION_VF tag intact.
+ */
+
+#define GT_INTR_DW(x) XE_REG(0x190018 + ((x) * 4), XE_REG_OPTION_VF)
#define INTR_GSC REG_BIT(31)
#define INTR_GUC REG_BIT(25)
#define INTR_MGUC REG_BIT(24)
@@ -450,16 +478,16 @@
#define INTR_VECS(x) REG_BIT(31 - (x))
#define INTR_VCS(x) REG_BIT(x)
-#define RENDER_COPY_INTR_ENABLE XE_REG(0x190030)
-#define VCS_VECS_INTR_ENABLE XE_REG(0x190034)
-#define GUC_SG_INTR_ENABLE XE_REG(0x190038)
+#define RENDER_COPY_INTR_ENABLE XE_REG(0x190030, XE_REG_OPTION_VF)
+#define VCS_VECS_INTR_ENABLE XE_REG(0x190034, XE_REG_OPTION_VF)
+#define GUC_SG_INTR_ENABLE XE_REG(0x190038, XE_REG_OPTION_VF)
#define ENGINE1_MASK REG_GENMASK(31, 16)
#define ENGINE0_MASK REG_GENMASK(15, 0)
-#define GPM_WGBOXPERF_INTR_ENABLE XE_REG(0x19003c)
-#define GUNIT_GSC_INTR_ENABLE XE_REG(0x190044)
-#define CCS_RSVD_INTR_ENABLE XE_REG(0x190048)
+#define GPM_WGBOXPERF_INTR_ENABLE XE_REG(0x19003c, XE_REG_OPTION_VF)
+#define GUNIT_GSC_INTR_ENABLE XE_REG(0x190044, XE_REG_OPTION_VF)
+#define CCS_RSVD_INTR_ENABLE XE_REG(0x190048, XE_REG_OPTION_VF)
-#define INTR_IDENTITY_REG(x) XE_REG(0x190060 + ((x) * 4))
+#define INTR_IDENTITY_REG(x) XE_REG(0x190060 + ((x) * 4), XE_REG_OPTION_VF)
#define INTR_DATA_VALID REG_BIT(31)
#define INTR_ENGINE_INSTANCE(x) REG_FIELD_GET(GENMASK(25, 20), x)
#define INTR_ENGINE_CLASS(x) REG_FIELD_GET(GENMASK(18, 16), x)
@@ -468,16 +496,16 @@
#define OTHER_GSC_HECI2_INSTANCE 3
#define OTHER_GSC_INSTANCE 6
-#define IIR_REG_SELECTOR(x) XE_REG(0x190070 + ((x) * 4))
-#define RCS0_RSVD_INTR_MASK XE_REG(0x190090)
-#define BCS_RSVD_INTR_MASK XE_REG(0x1900a0)
-#define VCS0_VCS1_INTR_MASK XE_REG(0x1900a8)
-#define VCS2_VCS3_INTR_MASK XE_REG(0x1900ac)
-#define VECS0_VECS1_INTR_MASK XE_REG(0x1900d0)
+#define IIR_REG_SELECTOR(x) XE_REG(0x190070 + ((x) * 4), XE_REG_OPTION_VF)
+#define RCS0_RSVD_INTR_MASK XE_REG(0x190090, XE_REG_OPTION_VF)
+#define BCS_RSVD_INTR_MASK XE_REG(0x1900a0, XE_REG_OPTION_VF)
+#define VCS0_VCS1_INTR_MASK XE_REG(0x1900a8, XE_REG_OPTION_VF)
+#define VCS2_VCS3_INTR_MASK XE_REG(0x1900ac, XE_REG_OPTION_VF)
+#define VECS0_VECS1_INTR_MASK XE_REG(0x1900d0, XE_REG_OPTION_VF)
#define HECI2_RSVD_INTR_MASK XE_REG(0x1900e4)
-#define GUC_SG_INTR_MASK XE_REG(0x1900e8)
-#define GPM_WGBOXPERF_INTR_MASK XE_REG(0x1900ec)
-#define GUNIT_GSC_INTR_MASK XE_REG(0x1900f4)
+#define GUC_SG_INTR_MASK XE_REG(0x1900e8, XE_REG_OPTION_VF)
+#define GPM_WGBOXPERF_INTR_MASK XE_REG(0x1900ec, XE_REG_OPTION_VF)
+#define GUNIT_GSC_INTR_MASK XE_REG(0x1900f4, XE_REG_OPTION_VF)
#define CCS0_CCS1_INTR_MASK XE_REG(0x190100)
#define CCS2_CCS3_INTR_MASK XE_REG(0x190104)
#define XEHPC_BCS1_BCS2_INTR_MASK XE_REG(0x190110)
@@ -486,6 +514,7 @@
#define XEHPC_BCS7_BCS8_INTR_MASK XE_REG(0x19011c)
#define GT_WAIT_SEMAPHORE_INTERRUPT REG_BIT(11)
#define GT_CONTEXT_SWITCH_INTERRUPT REG_BIT(8)
+#define GSC_ER_COMPLETE REG_BIT(5)
#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT REG_BIT(4)
#define GT_CS_MASTER_ERROR_INTERRUPT REG_BIT(3)
#define GT_RENDER_USER_INTERRUPT REG_BIT(0)
diff --git a/drivers/gpu/drm/xe/regs/xe_gtt_defs.h b/drivers/gpu/drm/xe/regs/xe_gtt_defs.h
new file mode 100644
index 000000000000..4389e5a76f89
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_gtt_defs.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GTT_DEFS_H_
+#define _XE_GTT_DEFS_H_
+
+#define XELPG_GGTT_PTE_PAT0 BIT_ULL(52)
+#define XELPG_GGTT_PTE_PAT1 BIT_ULL(53)
+
+#define GGTT_PTE_VFID GENMASK_ULL(11, 2)
+
+#define GUC_GGTT_TOP 0xFEE00000
+
+#define XELPG_PPGTT_PTE_PAT3 BIT_ULL(62)
+#define XE2_PPGTT_PTE_PAT4 BIT_ULL(61)
+#define XE_PPGTT_PDE_PDPE_PAT2 BIT_ULL(12)
+#define XE_PPGTT_PTE_PAT2 BIT_ULL(7)
+#define XE_PPGTT_PTE_PAT1 BIT_ULL(4)
+#define XE_PPGTT_PTE_PAT0 BIT_ULL(3)
+
+#define XE_PDE_PS_2M BIT_ULL(7)
+#define XE_PDPE_PS_1G BIT_ULL(7)
+#define XE_PDE_IPS_64K BIT_ULL(11)
+
+#define XE_GGTT_PTE_DM BIT_ULL(1)
+#define XE_USM_PPGTT_PTE_AE BIT_ULL(10)
+#define XE_PPGTT_PTE_DM BIT_ULL(11)
+#define XE_PDE_64K BIT_ULL(6)
+#define XE_PTE_PS64 BIT_ULL(8)
+#define XE_PTE_NULL BIT_ULL(9)
+
+#define XE_PAGE_PRESENT BIT_ULL(0)
+#define XE_PAGE_RW BIT_ULL(1)
+
+#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_guc_regs.h b/drivers/gpu/drm/xe/regs/xe_guc_regs.h
index 92320bbc9d3d..11682e675e0f 100644
--- a/drivers/gpu/drm/xe/regs/xe_guc_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_guc_regs.h
@@ -100,16 +100,23 @@
#define GT_PM_CONFIG XE_REG(0x13816c)
#define GT_DOORBELL_ENABLE REG_BIT(0)
-#define GUC_HOST_INTERRUPT XE_REG(0x1901f0)
+#define GUC_HOST_INTERRUPT XE_REG(0x1901f0, XE_REG_OPTION_VF)
-#define VF_SW_FLAG(n) XE_REG(0x190240 + (n) * 4)
+#define VF_SW_FLAG(n) XE_REG(0x190240 + (n) * 4, XE_REG_OPTION_VF)
#define VF_SW_FLAG_COUNT 4
-#define MED_GUC_HOST_INTERRUPT XE_REG(0x190304)
+#define MED_GUC_HOST_INTERRUPT XE_REG(0x190304, XE_REG_OPTION_VF)
-#define MED_VF_SW_FLAG(n) XE_REG(0x190310 + (n) * 4)
+#define MED_VF_SW_FLAG(n) XE_REG(0x190310 + (n) * 4, XE_REG_OPTION_VF)
#define MED_VF_SW_FLAG_COUNT 4
+#define GUC_TLB_INV_CR XE_REG(0xcee8)
+#define GUC_TLB_INV_CR_INVALIDATE REG_BIT(0)
+#define PVC_GUC_TLB_INV_DESC0 XE_REG(0xcf7c)
+#define PVC_GUC_TLB_INV_DESC0_VALID REG_BIT(0)
+#define PVC_GUC_TLB_INV_DESC1 XE_REG(0xcf80)
+#define PVC_GUC_TLB_INV_DESC1_INVALIDATE REG_BIT(6)
+
/* GuC Interrupt Vector */
#define GUC_INTR_GUC2HOST REG_BIT(15)
#define GUC_INTR_EXEC_ERROR REG_BIT(14)
diff --git a/drivers/gpu/drm/xe/regs/xe_reg_defs.h b/drivers/gpu/drm/xe/regs/xe_reg_defs.h
index c50e7650c09a..23f7dc5bbe99 100644
--- a/drivers/gpu/drm/xe/regs/xe_reg_defs.h
+++ b/drivers/gpu/drm/xe/regs/xe_reg_defs.h
@@ -6,6 +6,8 @@
#ifndef _XE_REG_DEFS_H_
#define _XE_REG_DEFS_H_
+#include <linux/build_bug.h>
+
#include "compat-i915-headers/i915_reg_defs.h"
/**
@@ -36,6 +38,10 @@ struct xe_reg {
*/
u32 mcr:1;
/**
+ * @vf: register is accessible from the Virtual Function.
+ */
+ u32 vf:1;
+ /**
* @ext: access MMIO extension space for current register.
*/
u32 ext:1;
@@ -44,6 +50,7 @@ struct xe_reg {
u32 raw;
};
};
+static_assert(sizeof(struct xe_reg) == sizeof(u32));
/**
* struct xe_reg_mcr - MCR register definition
@@ -76,6 +83,13 @@ struct xe_reg_mcr {
#define XE_REG_OPTION_MASKED .masked = 1
/**
+ * XE_REG_OPTION_VF - Register is "VF" accessible.
+ *
+ * To be used with XE_REG() and XE_REG_INITIALIZER().
+ */
+#define XE_REG_OPTION_VF .vf = 1
+
+/**
* XE_REG_INITIALIZER - Initializer for xe_reg_t.
* @r_: Register offset
* @...: Additional options like access mode. See struct xe_reg for available
@@ -117,4 +131,9 @@ struct xe_reg_mcr {
.__reg = XE_REG_INITIALIZER(r_, ##__VA_ARGS__, .mcr = 1) \
})
+static inline bool xe_reg_is_valid(struct xe_reg r)
+{
+ return r.addr;
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_regs.h b/drivers/gpu/drm/xe/regs/xe_regs.h
index 2c214bb9b671..722fb6dbb72e 100644
--- a/drivers/gpu/drm/xe/regs/xe_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_regs.h
@@ -57,7 +57,7 @@
#define DG1_MSTR_IRQ REG_BIT(31)
#define DG1_MSTR_TILE(t) REG_BIT(t)
-#define GFX_MSTR_IRQ XE_REG(0x190010)
+#define GFX_MSTR_IRQ XE_REG(0x190010, XE_REG_OPTION_VF)
#define MASTER_IRQ REG_BIT(31)
#define GU_MISC_IRQ REG_BIT(29)
#define DISPLAY_IRQ REG_BIT(16)
diff --git a/drivers/gpu/drm/xe/regs/xe_sriov_regs.h b/drivers/gpu/drm/xe/regs/xe_sriov_regs.h
index 58a4e0fad1e1..617ddb84b7fa 100644
--- a/drivers/gpu/drm/xe/regs/xe_sriov_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_sriov_regs.h
@@ -14,4 +14,7 @@
#define LMEM_EN REG_BIT(31)
#define LMTT_DIR_PTR REG_GENMASK(30, 0) /* in multiples of 64KB */
+#define VF_CAP_REG XE_REG(0x1901f8, XE_REG_OPTION_VF)
+#define VF_CAP REG_BIT(0)
+
#endif
diff --git a/drivers/gpu/drm/xe/tests/Makefile b/drivers/gpu/drm/xe/tests/Makefile
index 9d1d88af8b2f..8cf2367449d8 100644
--- a/drivers/gpu/drm/xe/tests/Makefile
+++ b/drivers/gpu/drm/xe/tests/Makefile
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
# "live" kunit tests
-obj-$(CONFIG_DRM_XE_KUNIT_TEST) += \
+obj-$(CONFIG_DRM_XE_KUNIT_TEST) += xe_live_test.o
+xe_live_test-y = xe_live_test_mod.o \
xe_bo_test.o \
xe_dma_buf_test.o \
xe_migrate_test.o \
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index 3436fd9cf2b2..9f3c02826464 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -116,7 +116,7 @@ static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
int ret;
/* TODO: Sanity check */
- unsigned int bo_flags = XE_BO_CREATE_VRAM_IF_DGFX(tile);
+ unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
if (IS_DGFX(xe))
kunit_info(test, "Testing vram id %u\n", tile->id);
@@ -163,7 +163,7 @@ static int ccs_test_run_device(struct xe_device *xe)
return 0;
}
- xe_device_mem_access_get(xe);
+ xe_pm_runtime_get(xe);
for_each_tile(tile, xe, id) {
/* For igfx run only for primary tile */
@@ -172,7 +172,7 @@ static int ccs_test_run_device(struct xe_device *xe)
ccs_test_run_tile(xe, tile, test);
}
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
return 0;
}
@@ -186,7 +186,7 @@ EXPORT_SYMBOL_IF_KUNIT(xe_ccs_migrate_kunit);
static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struct kunit *test)
{
struct xe_bo *bo, *external;
- unsigned int bo_flags = XE_BO_CREATE_VRAM_IF_DGFX(tile);
+ unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
struct xe_vm *vm = xe_migrate_get_vm(xe_device_get_root_tile(xe)->migrate);
struct xe_gt *__gt;
int err, i, id;
@@ -335,12 +335,12 @@ static int evict_test_run_device(struct xe_device *xe)
return 0;
}
- xe_device_mem_access_get(xe);
+ xe_pm_runtime_get(xe);
for_each_tile(tile, xe, id)
evict_test_run_tile(xe, tile, test);
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
return 0;
}
diff --git a/drivers/gpu/drm/xe/tests/xe_bo_test.c b/drivers/gpu/drm/xe/tests/xe_bo_test.c
index f408f17f2164..a324cde77db8 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo_test.c
@@ -19,8 +19,3 @@ static struct kunit_suite xe_bo_test_suite = {
};
kunit_test_suite(xe_bo_test_suite);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("xe_bo kunit test");
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
index 9f6d571d7fa9..e7f9b531c465 100644
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
@@ -12,6 +12,7 @@
#include "tests/xe_pci_test.h"
#include "xe_pci.h"
+#include "xe_pm.h"
static bool p2p_enabled(struct dma_buf_test_params *params)
{
@@ -36,14 +37,14 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
xe_bo_assert_held(imported);
mem_type = XE_PL_VRAM0;
- if (!(params->mem_mask & XE_BO_CREATE_VRAM0_BIT))
+ if (!(params->mem_mask & XE_BO_FLAG_VRAM0))
/* No VRAM allowed */
mem_type = XE_PL_TT;
else if (params->force_different_devices && !p2p_enabled(params))
/* No P2P */
mem_type = XE_PL_TT;
else if (params->force_different_devices && !is_dynamic(params) &&
- (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT))
+ (params->mem_mask & XE_BO_FLAG_SYSTEM))
/* Pin migrated to TT */
mem_type = XE_PL_TT;
@@ -93,7 +94,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
* possible, saving a migration step as the transfer is just
* likely as fast from system memory.
*/
- if (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)
+ if (params->mem_mask & XE_BO_FLAG_SYSTEM)
KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, XE_PL_TT));
else
KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
@@ -115,17 +116,17 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
/* No VRAM on this device? */
if (!ttm_manager_type(&xe->ttm, XE_PL_VRAM0) &&
- (params->mem_mask & XE_BO_CREATE_VRAM0_BIT))
+ (params->mem_mask & XE_BO_FLAG_VRAM0))
return;
size = PAGE_SIZE;
- if ((params->mem_mask & XE_BO_CREATE_VRAM0_BIT) &&
+ if ((params->mem_mask & XE_BO_FLAG_VRAM0) &&
xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
size = SZ_64K;
kunit_info(test, "running %s\n", __func__);
bo = xe_bo_create_user(xe, NULL, NULL, size, DRM_XE_GEM_CPU_CACHING_WC,
- ttm_bo_type_device, XE_BO_CREATE_USER_BIT | params->mem_mask);
+ ttm_bo_type_device, params->mem_mask);
if (IS_ERR(bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
PTR_ERR(bo));
@@ -148,7 +149,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
*/
if (params->force_different_devices &&
!p2p_enabled(params) &&
- !(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) {
+ !(params->mem_mask & XE_BO_FLAG_SYSTEM)) {
KUNIT_FAIL(test,
"xe_gem_prime_import() succeeded when it shouldn't have\n");
} else {
@@ -161,7 +162,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
/* Pinning in VRAM is not allowed. */
if (!is_dynamic(params) &&
params->force_different_devices &&
- !(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT))
+ !(params->mem_mask & XE_BO_FLAG_SYSTEM))
KUNIT_EXPECT_EQ(test, err, -EINVAL);
/* Otherwise only expect interrupts or success. */
else if (err && err != -EINTR && err != -ERESTARTSYS)
@@ -180,7 +181,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
PTR_ERR(import));
} else if (!params->force_different_devices ||
p2p_enabled(params) ||
- (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) {
+ (params->mem_mask & XE_BO_FLAG_SYSTEM)) {
/* Shouldn't fail if we can reuse same bo, use p2p or use system */
KUNIT_FAIL(test, "dynamic p2p attachment failed with err=%ld\n",
PTR_ERR(import));
@@ -203,52 +204,52 @@ static const struct dma_buf_attach_ops nop2p_attach_ops = {
* gem object.
*/
static const struct dma_buf_test_params test_params[] = {
- {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops},
- {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true},
- {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &nop2p_attach_ops},
- {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &nop2p_attach_ops,
.force_different_devices = true},
- {.mem_mask = XE_BO_CREATE_VRAM0_BIT},
- {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_VRAM0},
+ {.mem_mask = XE_BO_FLAG_VRAM0,
.force_different_devices = true},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM,
.attach_ops = &xe_dma_buf_attach_ops},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM,
.attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM,
.attach_ops = &nop2p_attach_ops},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM,
.attach_ops = &nop2p_attach_ops,
.force_different_devices = true},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM},
+ {.mem_mask = XE_BO_FLAG_SYSTEM,
.force_different_devices = true},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &nop2p_attach_ops},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &nop2p_attach_ops,
.force_different_devices = true},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0},
+ {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.force_different_devices = true},
{}
@@ -259,6 +260,7 @@ static int dma_buf_run_device(struct xe_device *xe)
const struct dma_buf_test_params *params;
struct kunit *test = xe_cur_kunit();
+ xe_pm_runtime_get(xe);
for (params = test_params; params->mem_mask; ++params) {
struct dma_buf_test_params p = *params;
@@ -266,6 +268,7 @@ static int dma_buf_run_device(struct xe_device *xe)
test->priv = &p;
xe_test_dmabuf_import_same_driver(xe);
}
+ xe_pm_runtime_put(xe);
/* A non-zero return would halt iteration over driver devices */
return 0;
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c b/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c
index 9f5a9cda8c0f..99cdb718b6c6 100644
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c
@@ -18,8 +18,3 @@ static struct kunit_suite xe_dma_buf_test_suite = {
};
kunit_test_suite(xe_dma_buf_test_suite);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("xe_dma_buf kunit test");
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_guc_id_mgr_test.c b/drivers/gpu/drm/xe/tests/xe_guc_id_mgr_test.c
new file mode 100644
index 000000000000..ee30a1939eb0
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_guc_id_mgr_test.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <kunit/test.h>
+
+#include "xe_device.h"
+#include "xe_kunit_helpers.h"
+
+static int guc_id_mgr_test_init(struct kunit *test)
+{
+ struct xe_guc_id_mgr *idm;
+
+ xe_kunit_helper_xe_device_test_init(test);
+ idm = &xe_device_get_gt(test->priv, 0)->uc.guc.submission_state.idm;
+
+ mutex_init(idm_mutex(idm));
+ test->priv = idm;
+ return 0;
+}
+
+static void bad_init(struct kunit *test)
+{
+ struct xe_guc_id_mgr *idm = test->priv;
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, xe_guc_id_mgr_init(idm, 0));
+ KUNIT_EXPECT_EQ(test, -ERANGE, xe_guc_id_mgr_init(idm, GUC_ID_MAX + 1));
+}
+
+static void no_init(struct kunit *test)
+{
+ struct xe_guc_id_mgr *idm = test->priv;
+
+ mutex_lock(idm_mutex(idm));
+ KUNIT_EXPECT_EQ(test, -ENODATA, xe_guc_id_mgr_reserve_locked(idm, 0));
+ mutex_unlock(idm_mutex(idm));
+
+ KUNIT_EXPECT_EQ(test, -ENODATA, xe_guc_id_mgr_reserve(idm, 1, 1));
+}
+
+static void init_fini(struct kunit *test)
+{
+ struct xe_guc_id_mgr *idm = test->priv;
+
+ KUNIT_ASSERT_EQ(test, 0, xe_guc_id_mgr_init(idm, -1));
+ KUNIT_EXPECT_NOT_NULL(test, idm->bitmap);
+ KUNIT_EXPECT_EQ(test, idm->total, GUC_ID_MAX);
+ __fini_idm(NULL, idm);
+ KUNIT_EXPECT_NULL(test, idm->bitmap);
+ KUNIT_EXPECT_EQ(test, idm->total, 0);
+}
+
+static void check_used(struct kunit *test)
+{
+ struct xe_guc_id_mgr *idm = test->priv;
+ unsigned int n;
+
+ KUNIT_ASSERT_EQ(test, 0, xe_guc_id_mgr_init(idm, 2));
+
+ mutex_lock(idm_mutex(idm));
+
+ for (n = 0; n < idm->total; n++) {
+ kunit_info(test, "n=%u", n);
+ KUNIT_EXPECT_EQ(test, idm->used, n);
+ KUNIT_EXPECT_GE(test, idm_reserve_chunk_locked(idm, 1, 0), 0);
+ KUNIT_EXPECT_EQ(test, idm->used, n + 1);
+ }
+ KUNIT_EXPECT_EQ(test, idm->used, idm->total);
+ idm_release_chunk_locked(idm, 0, idm->used);
+ KUNIT_EXPECT_EQ(test, idm->used, 0);
+
+ mutex_unlock(idm_mutex(idm));
+}
+
+static void check_quota(struct kunit *test)
+{
+ struct xe_guc_id_mgr *idm = test->priv;
+ unsigned int n;
+
+ KUNIT_ASSERT_EQ(test, 0, xe_guc_id_mgr_init(idm, 2));
+
+ mutex_lock(idm_mutex(idm));
+
+ for (n = 0; n < idm->total - 1; n++) {
+ kunit_info(test, "n=%u", n);
+ KUNIT_EXPECT_EQ(test, idm_reserve_chunk_locked(idm, 1, idm->total), -EDQUOT);
+ KUNIT_EXPECT_EQ(test, idm_reserve_chunk_locked(idm, 1, idm->total - n), -EDQUOT);
+ KUNIT_EXPECT_EQ(test, idm_reserve_chunk_locked(idm, idm->total - n, 1), -EDQUOT);
+ KUNIT_EXPECT_GE(test, idm_reserve_chunk_locked(idm, 1, 1), 0);
+ }
+ KUNIT_EXPECT_LE(test, 0, idm_reserve_chunk_locked(idm, 1, 0));
+ KUNIT_EXPECT_EQ(test, idm->used, idm->total);
+ idm_release_chunk_locked(idm, 0, idm->total);
+ KUNIT_EXPECT_EQ(test, idm->used, 0);
+
+ mutex_unlock(idm_mutex(idm));
+}
+
+static void check_all(struct kunit *test)
+{
+ struct xe_guc_id_mgr *idm = test->priv;
+ unsigned int n;
+
+ KUNIT_ASSERT_EQ(test, 0, xe_guc_id_mgr_init(idm, -1));
+
+ mutex_lock(idm_mutex(idm));
+
+ for (n = 0; n < idm->total; n++)
+ KUNIT_EXPECT_LE(test, 0, idm_reserve_chunk_locked(idm, 1, 0));
+ KUNIT_EXPECT_EQ(test, idm->used, idm->total);
+ for (n = 0; n < idm->total; n++)
+ idm_release_chunk_locked(idm, n, 1);
+
+ mutex_unlock(idm_mutex(idm));
+}
+
+static struct kunit_case guc_id_mgr_test_cases[] = {
+ KUNIT_CASE(bad_init),
+ KUNIT_CASE(no_init),
+ KUNIT_CASE(init_fini),
+ KUNIT_CASE(check_used),
+ KUNIT_CASE(check_quota),
+ KUNIT_CASE_SLOW(check_all),
+ {}
+};
+
+static struct kunit_suite guc_id_mgr_suite = {
+ .name = "guc_idm",
+ .test_cases = guc_id_mgr_test_cases,
+
+ .init = guc_id_mgr_test_init,
+ .exit = NULL,
+};
+
+kunit_test_suites(&guc_id_mgr_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
new file mode 100644
index 000000000000..eb1ea99a5a8b
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <linux/module.h>
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("xe live kunit tests");
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index c347e2c29f81..977d5f4e4490 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -10,6 +10,7 @@
#include "tests/xe_pci_test.h"
#include "xe_pci.h"
+#include "xe_pm.h"
static bool sanity_fence_failed(struct xe_device *xe, struct dma_fence *fence,
const char *str, struct kunit *test)
@@ -112,7 +113,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
bo->size,
ttm_bo_type_kernel,
region |
- XE_BO_NEEDS_CPU_ACCESS);
+ XE_BO_FLAG_NEEDS_CPU_ACCESS);
if (IS_ERR(remote)) {
KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %pe\n",
str, remote);
@@ -190,7 +191,7 @@ out_unlock:
static void test_copy_sysmem(struct xe_migrate *m, struct xe_bo *bo,
struct kunit *test)
{
- test_copy(m, bo, test, XE_BO_CREATE_SYSTEM_BIT);
+ test_copy(m, bo, test, XE_BO_FLAG_SYSTEM);
}
static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
@@ -202,9 +203,9 @@ static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
return;
if (bo->ttm.resource->mem_type == XE_PL_VRAM0)
- region = XE_BO_CREATE_VRAM1_BIT;
+ region = XE_BO_FLAG_VRAM1;
else
- region = XE_BO_CREATE_VRAM0_BIT;
+ region = XE_BO_FLAG_VRAM0;
test_copy(m, bo, test, region);
}
@@ -280,8 +281,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
ttm_bo_type_kernel,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
- XE_BO_CREATE_PINNED_BIT);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_PINNED);
if (IS_ERR(big)) {
KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
goto vunmap;
@@ -289,8 +290,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
ttm_bo_type_kernel,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
- XE_BO_CREATE_PINNED_BIT);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_PINNED);
if (IS_ERR(pt)) {
KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
PTR_ERR(pt));
@@ -300,8 +301,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
2 * SZ_4K,
ttm_bo_type_kernel,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
- XE_BO_CREATE_PINNED_BIT);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_PINNED);
if (IS_ERR(tiny)) {
KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
PTR_ERR(pt));
@@ -423,17 +424,19 @@ static int migrate_test_run_device(struct xe_device *xe)
struct xe_tile *tile;
int id;
+ xe_pm_runtime_get(xe);
+
for_each_tile(tile, xe, id) {
struct xe_migrate *m = tile->migrate;
kunit_info(test, "Testing tile id %d.\n", id);
xe_vm_lock(m->q->vm, true);
- xe_device_mem_access_get(xe);
xe_migrate_sanity_test(m, test);
- xe_device_mem_access_put(xe);
xe_vm_unlock(m->q->vm);
}
+ xe_pm_runtime_put(xe);
+
return 0;
}
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate_test.c b/drivers/gpu/drm/xe/tests/xe_migrate_test.c
index cf0c173b945f..eb0d8963419c 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate_test.c
@@ -18,8 +18,3 @@ static struct kunit_suite xe_migrate_test_suite = {
};
kunit_test_suite(xe_migrate_test_suite);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("xe_migrate kunit test");
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
index df5c36b70ab4..1b8617075b37 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
@@ -10,10 +10,11 @@
#include "tests/xe_pci_test.h"
#include "tests/xe_test.h"
-#include "xe_pci.h"
+#include "xe_device.h"
#include "xe_gt.h"
#include "xe_mocs.h"
-#include "xe_device.h"
+#include "xe_pci.h"
+#include "xe_pm.h"
struct live_mocs {
struct xe_mocs_info table;
@@ -28,6 +29,8 @@ static int live_mocs_init(struct live_mocs *arg, struct xe_gt *gt)
flags = get_mocs_settings(gt_to_xe(gt), &arg->table);
+ kunit_info(test, "gt %d", gt->info.id);
+ kunit_info(test, "gt type %d", gt->info.type);
kunit_info(test, "table size %d", arg->table.size);
kunit_info(test, "table uc_index %d", arg->table.uc_index);
kunit_info(test, "table n_entries %d", arg->table.n_entries);
@@ -38,69 +41,72 @@ static int live_mocs_init(struct live_mocs *arg, struct xe_gt *gt)
static void read_l3cc_table(struct xe_gt *gt,
const struct xe_mocs_info *info)
{
+ struct kunit *test = xe_cur_kunit();
+ u32 l3cc, l3cc_expected;
unsigned int i;
- u32 l3cc;
u32 reg_val;
u32 ret;
- struct kunit *test = xe_cur_kunit();
-
- xe_device_mem_access_get(gt_to_xe(gt));
ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n");
- mocs_dbg(&gt_to_xe(gt)->drm, "L3CC entries:%d\n", info->n_entries);
- for (i = 0;
- i < (info->n_entries + 1) / 2 ?
- (l3cc = l3cc_combine(get_entry_l3cc(info, 2 * i),
- get_entry_l3cc(info, 2 * i + 1))), 1 : 0;
- i++) {
- if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1250)
- reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i));
- else
- reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i));
- mocs_dbg(&gt_to_xe(gt)->drm, "%d 0x%x 0x%x 0x%x\n", i,
- XELP_LNCFCMOCS(i).addr, reg_val, l3cc);
- if (reg_val != l3cc)
- KUNIT_FAIL(test, "l3cc reg 0x%x has incorrect val.\n",
- XELP_LNCFCMOCS(i).addr);
+
+ for (i = 0; i < info->n_entries; i++) {
+ if (!(i & 1)) {
+ if (regs_are_mcr(gt))
+ reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i >> 1));
+ else
+ reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i >> 1));
+
+ mocs_dbg(gt, "reg_val=0x%x\n", reg_val);
+ } else {
+ /* Just re-use value read on previous iteration */
+ reg_val >>= 16;
+ }
+
+ l3cc_expected = get_entry_l3cc(info, i);
+ l3cc = reg_val & 0xffff;
+
+ mocs_dbg(gt, "[%u] expected=0x%x actual=0x%x\n",
+ i, l3cc_expected, l3cc);
+
+ KUNIT_EXPECT_EQ_MSG(test, l3cc_expected, l3cc,
+ "l3cc idx=%u has incorrect val.\n", i);
}
xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
- xe_device_mem_access_put(gt_to_xe(gt));
}
static void read_mocs_table(struct xe_gt *gt,
const struct xe_mocs_info *info)
{
- struct xe_device *xe = gt_to_xe(gt);
-
+ struct kunit *test = xe_cur_kunit();
+ u32 mocs, mocs_expected;
unsigned int i;
- u32 mocs;
u32 reg_val;
u32 ret;
- struct kunit *test = xe_cur_kunit();
+ KUNIT_EXPECT_TRUE_MSG(test, info->unused_entries_index,
+ "Unused entries index should have been defined\n");
- xe_device_mem_access_get(gt_to_xe(gt));
ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n");
- mocs_dbg(&gt_to_xe(gt)->drm, "Global MOCS entries:%d\n", info->n_entries);
- drm_WARN_ONCE(&xe->drm, !info->unused_entries_index,
- "Unused entries index should have been defined\n");
- for (i = 0;
- i < info->n_entries ? (mocs = get_entry_control(info, i)), 1 : 0;
- i++) {
- if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1250)
+
+ for (i = 0; i < info->n_entries; i++) {
+ if (regs_are_mcr(gt))
reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_GLOBAL_MOCS(i));
else
reg_val = xe_mmio_read32(gt, XELP_GLOBAL_MOCS(i));
- mocs_dbg(&gt_to_xe(gt)->drm, "%d 0x%x 0x%x 0x%x\n", i,
- XELP_GLOBAL_MOCS(i).addr, reg_val, mocs);
- if (reg_val != mocs)
- KUNIT_FAIL(test, "mocs reg 0x%x has incorrect val.\n",
- XELP_GLOBAL_MOCS(i).addr);
+
+ mocs_expected = get_entry_control(info, i);
+ mocs = reg_val;
+
+ mocs_dbg(gt, "[%u] expected=0x%x actual=0x%x\n",
+ i, mocs_expected, mocs);
+
+ KUNIT_EXPECT_EQ_MSG(test, mocs_expected, mocs,
+ "mocs reg 0x%x has incorrect val.\n", i);
}
+
xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
- xe_device_mem_access_put(gt_to_xe(gt));
}
static int mocs_kernel_test_run_device(struct xe_device *xe)
@@ -113,6 +119,8 @@ static int mocs_kernel_test_run_device(struct xe_device *xe)
unsigned int flags;
int id;
+ xe_pm_runtime_get(xe);
+
for_each_gt(gt, xe, id) {
flags = live_mocs_init(&mocs, gt);
if (flags & HAS_GLOBAL_MOCS)
@@ -120,6 +128,9 @@ static int mocs_kernel_test_run_device(struct xe_device *xe)
if (flags & HAS_LNCF_MOCS)
read_l3cc_table(gt, &mocs.table);
}
+
+ xe_pm_runtime_put(xe);
+
return 0;
}
@@ -139,6 +150,8 @@ static int mocs_reset_test_run_device(struct xe_device *xe)
int id;
struct kunit *test = xe_cur_kunit();
+ xe_pm_runtime_get(xe);
+
for_each_gt(gt, xe, id) {
flags = live_mocs_init(&mocs, gt);
kunit_info(test, "mocs_reset_test before reset\n");
@@ -156,6 +169,9 @@ static int mocs_reset_test_run_device(struct xe_device *xe)
if (flags & HAS_LNCF_MOCS)
read_l3cc_table(gt, &mocs.table);
}
+
+ xe_pm_runtime_put(xe);
+
return 0;
}
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.c b/drivers/gpu/drm/xe/tests/xe_mocs_test.c
index ee40f31e1e12..6315886b659e 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs_test.c
@@ -19,8 +19,3 @@ static struct kunit_suite xe_mocs_test_suite = {
};
kunit_test_suite(xe_mocs_test_suite);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("xe_mocs kunit test");
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_wa_test.c b/drivers/gpu/drm/xe/tests/xe_wa_test.c
index 44570d888355..9d0c715142b9 100644
--- a/drivers/gpu/drm/xe/tests/xe_wa_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_wa_test.c
@@ -71,6 +71,7 @@ static const struct platform_test_case cases[] = {
SUBPLATFORM_CASE(DG2, G12, A1),
GMDID_CASE(METEORLAKE, 1270, A0, 1300, A0),
GMDID_CASE(METEORLAKE, 1271, A0, 1300, A0),
+ GMDID_CASE(METEORLAKE, 1274, A0, 1300, A0),
GMDID_CASE(LUNARLAKE, 2004, A0, 2000, A0),
GMDID_CASE(LUNARLAKE, 2004, B0, 2000, A0),
};
diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c
index 7c124475c428..541361caff3b 100644
--- a/drivers/gpu/drm/xe/xe_bb.c
+++ b/drivers/gpu/drm/xe/xe_bb.c
@@ -86,7 +86,8 @@ struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
};
xe_gt_assert(q->gt, second_idx <= bb->len);
- xe_gt_assert(q->gt, q->vm->flags & XE_VM_FLAG_MIGRATION);
+ xe_gt_assert(q->gt, xe_sched_job_is_migration(q));
+ xe_gt_assert(q->gt, q->width == 1);
return __xe_bb_create_job(q, bb, addr);
}
@@ -96,7 +97,8 @@ struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q,
{
u64 addr = xe_sa_bo_gpu_addr(bb->bo);
- xe_gt_assert(q->gt, !(q->vm && q->vm->flags & XE_VM_FLAG_MIGRATION));
+ xe_gt_assert(q->gt, !xe_sched_job_is_migration(q));
+ xe_gt_assert(q->gt, q->width == 1);
return __xe_bb_create_job(q, bb, &addr);
}
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 6603a0ea79c5..bc1f794e3e61 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -22,6 +22,7 @@
#include "xe_gt.h"
#include "xe_map.h"
#include "xe_migrate.h"
+#include "xe_pm.h"
#include "xe_preempt_fence.h"
#include "xe_res_cursor.h"
#include "xe_trace.h"
@@ -111,7 +112,7 @@ bool xe_bo_is_stolen_devmem(struct xe_bo *bo)
static bool xe_bo_is_user(struct xe_bo *bo)
{
- return bo->flags & XE_BO_CREATE_USER_BIT;
+ return bo->flags & XE_BO_FLAG_USER;
}
static struct xe_migrate *
@@ -137,16 +138,13 @@ static struct xe_mem_region *res_to_mem_region(struct ttm_resource *res)
static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
u32 bo_flags, u32 *c)
{
- if (bo_flags & XE_BO_CREATE_SYSTEM_BIT) {
+ if (bo_flags & XE_BO_FLAG_SYSTEM) {
xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
bo->placements[*c] = (struct ttm_place) {
.mem_type = XE_PL_TT,
};
*c += 1;
-
- if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
- bo->props.preferred_mem_type = XE_PL_TT;
}
}
@@ -167,12 +165,12 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
* For eviction / restore on suspend / resume objects
* pinned in VRAM must be contiguous
*/
- if (bo_flags & (XE_BO_CREATE_PINNED_BIT |
- XE_BO_CREATE_GGTT_BIT))
+ if (bo_flags & (XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_GGTT))
place.flags |= TTM_PL_FLAG_CONTIGUOUS;
if (io_size < vram->usable_size) {
- if (bo_flags & XE_BO_NEEDS_CPU_ACCESS) {
+ if (bo_flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) {
place.fpfn = 0;
place.lpfn = io_size >> PAGE_SHIFT;
} else {
@@ -181,37 +179,27 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
}
places[*c] = place;
*c += 1;
-
- if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
- bo->props.preferred_mem_type = mem_type;
}
static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
u32 bo_flags, u32 *c)
{
- if (bo->props.preferred_gt == XE_GT1) {
- if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
- add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
- if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
- add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
- } else {
- if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
- add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
- if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
- add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
- }
+ if (bo_flags & XE_BO_FLAG_VRAM0)
+ add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
+ if (bo_flags & XE_BO_FLAG_VRAM1)
+ add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
}
static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
u32 bo_flags, u32 *c)
{
- if (bo_flags & XE_BO_CREATE_STOLEN_BIT) {
+ if (bo_flags & XE_BO_FLAG_STOLEN) {
xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
bo->placements[*c] = (struct ttm_place) {
.mem_type = XE_PL_STOLEN,
- .flags = bo_flags & (XE_BO_CREATE_PINNED_BIT |
- XE_BO_CREATE_GGTT_BIT) ?
+ .flags = bo_flags & (XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_GGTT) ?
TTM_PL_FLAG_CONTIGUOUS : 0,
};
*c += 1;
@@ -223,17 +211,8 @@ static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
{
u32 c = 0;
- bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;
-
- /* The order of placements should indicate preferred location */
-
- if (bo->props.preferred_mem_class == DRM_XE_MEM_REGION_CLASS_SYSMEM) {
- try_add_system(xe, bo, bo_flags, &c);
- try_add_vram(xe, bo, bo_flags, &c);
- } else {
- try_add_vram(xe, bo, bo_flags, &c);
- try_add_system(xe, bo, bo_flags, &c);
- }
+ try_add_vram(xe, bo, bo_flags, &c);
+ try_add_system(xe, bo, bo_flags, &c);
try_add_stolen(xe, bo, bo_flags, &c);
if (!c)
@@ -361,7 +340,7 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
break;
}
- WARN_ON((bo->flags & XE_BO_CREATE_USER_BIT) && !bo->cpu_caching);
+ WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching);
/*
* Display scanout is always non-coherent with the CPU cache.
@@ -369,8 +348,8 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
* For Xe_LPG and beyond, PPGTT PTE lookups are also non-coherent and
* require a CPU:WC mapping.
*/
- if ((!bo->cpu_caching && bo->flags & XE_BO_SCANOUT_BIT) ||
- (xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_PAGETABLE))
+ if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) ||
+ (xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_FLAG_PAGETABLE))
caching = ttm_write_combined;
err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages);
@@ -737,7 +716,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
xe_assert(xe, migrate);
trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source);
- xe_device_mem_access_get(xe);
+ xe_pm_runtime_get_noresume(xe);
if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
/*
@@ -761,7 +740,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) {
ret = -EINVAL;
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
goto out;
}
@@ -779,7 +758,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
new_mem, handle_system_ccs);
if (IS_ERR(fence)) {
ret = PTR_ERR(fence);
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
goto out;
}
if (!move_lacks_source) {
@@ -804,7 +783,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
dma_fence_put(fence);
}
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
out:
return ret;
@@ -816,7 +795,6 @@ out:
* @bo: The buffer object to move.
*
* On successful completion, the object memory will be moved to sytem memory.
- * This function blocks until the object has been fully moved.
*
* This is needed to for special handling of pinned VRAM object during
* suspend-resume.
@@ -873,9 +851,6 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
if (ret)
goto err_res_free;
- dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
- false, MAX_SCHEDULE_TIMEOUT);
-
return 0;
err_res_free:
@@ -888,7 +863,6 @@ err_res_free:
* @bo: The buffer object to move.
*
* On successful completion, the object memory will be moved back to VRAM.
- * This function blocks until the object has been fully moved.
*
* This is needed to for special handling of pinned VRAM object during
* suspend-resume.
@@ -930,9 +904,6 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
if (ret)
goto err_res_free;
- dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
- false, MAX_SCHEDULE_TIMEOUT);
-
return 0;
err_res_free:
@@ -1126,25 +1097,18 @@ static void xe_gem_object_close(struct drm_gem_object *obj,
}
}
-static bool should_migrate_to_system(struct xe_bo *bo)
-{
- struct xe_device *xe = xe_bo_device(bo);
-
- return xe_device_in_fault_mode(xe) && bo->props.cpu_atomic;
-}
-
static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
struct drm_device *ddev = tbo->base.dev;
struct xe_device *xe = to_xe_device(ddev);
struct xe_bo *bo = ttm_to_xe_bo(tbo);
- bool needs_rpm = bo->flags & XE_BO_CREATE_VRAM_MASK;
+ bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK;
vm_fault_t ret;
- int idx, r = 0;
+ int idx;
if (needs_rpm)
- xe_device_mem_access_get(xe);
+ xe_pm_runtime_get(xe);
ret = ttm_bo_vm_reserve(tbo, vmf);
if (ret)
@@ -1153,17 +1117,8 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
if (drm_dev_enter(ddev, &idx)) {
trace_xe_bo_cpu_fault(bo);
- if (should_migrate_to_system(bo)) {
- r = xe_bo_migrate(bo, XE_PL_TT);
- if (r == -EBUSY || r == -ERESTARTSYS || r == -EINTR)
- ret = VM_FAULT_NOPAGE;
- else if (r)
- ret = VM_FAULT_SIGBUS;
- }
- if (!ret)
- ret = ttm_bo_vm_fault_reserved(vmf,
- vmf->vma->vm_page_prot,
- TTM_BO_VM_NUM_PREFAULT);
+ ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+ TTM_BO_VM_NUM_PREFAULT);
drm_dev_exit(idx);
} else {
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
@@ -1184,7 +1139,7 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
dma_resv_unlock(tbo->base.resv);
out:
if (needs_rpm)
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
return ret;
}
@@ -1261,18 +1216,19 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
return ERR_PTR(-EINVAL);
}
- if (flags & (XE_BO_CREATE_VRAM_MASK | XE_BO_CREATE_STOLEN_BIT) &&
- !(flags & XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT) &&
- xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) {
+ if (flags & (XE_BO_FLAG_VRAM_MASK | XE_BO_FLAG_STOLEN) &&
+ !(flags & XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE) &&
+ ((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) ||
+ (flags & XE_BO_NEEDS_64K))) {
aligned_size = ALIGN(size, SZ_64K);
if (type != ttm_bo_type_device)
size = ALIGN(size, SZ_64K);
- flags |= XE_BO_INTERNAL_64K;
+ flags |= XE_BO_FLAG_INTERNAL_64K;
alignment = SZ_64K >> PAGE_SHIFT;
} else {
aligned_size = ALIGN(size, SZ_4K);
- flags &= ~XE_BO_INTERNAL_64K;
+ flags &= ~XE_BO_FLAG_INTERNAL_64K;
alignment = SZ_4K >> PAGE_SHIFT;
}
@@ -1291,9 +1247,6 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
bo->flags = flags;
bo->cpu_caching = cpu_caching;
bo->ttm.base.funcs = &xe_gem_object_funcs;
- bo->props.preferred_mem_class = XE_BO_PROPS_INVALID;
- bo->props.preferred_gt = XE_BO_PROPS_INVALID;
- bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;
bo->ttm.priority = XE_BO_PRIORITY_NORMAL;
INIT_LIST_HEAD(&bo->pinned_link);
#ifdef CONFIG_PROC_FS
@@ -1304,11 +1257,11 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size);
if (resv) {
- ctx.allow_res_evict = !(flags & XE_BO_CREATE_NO_RESV_EVICT);
+ ctx.allow_res_evict = !(flags & XE_BO_FLAG_NO_RESV_EVICT);
ctx.resv = resv;
}
- if (!(flags & XE_BO_FIXED_PLACEMENT_BIT)) {
+ if (!(flags & XE_BO_FLAG_FIXED_PLACEMENT)) {
err = __xe_bo_placement_for_flags(xe, bo, bo->flags);
if (WARN_ON(err)) {
xe_ttm_bo_destroy(&bo->ttm);
@@ -1318,7 +1271,7 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
/* Defer populating type_sg bos */
placement = (type == ttm_bo_type_sg ||
- bo->flags & XE_BO_DEFER_BACKING) ? &sys_placement :
+ bo->flags & XE_BO_FLAG_DEFER_BACKING) ? &sys_placement :
&bo->placement;
err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type,
placement, alignment,
@@ -1373,21 +1326,21 @@ static int __xe_bo_fixed_placement(struct xe_device *xe,
{
struct ttm_place *place = bo->placements;
- if (flags & (XE_BO_CREATE_USER_BIT|XE_BO_CREATE_SYSTEM_BIT))
+ if (flags & (XE_BO_FLAG_USER | XE_BO_FLAG_SYSTEM))
return -EINVAL;
place->flags = TTM_PL_FLAG_CONTIGUOUS;
place->fpfn = start >> PAGE_SHIFT;
place->lpfn = end >> PAGE_SHIFT;
- switch (flags & (XE_BO_CREATE_STOLEN_BIT | XE_BO_CREATE_VRAM_MASK)) {
- case XE_BO_CREATE_VRAM0_BIT:
+ switch (flags & (XE_BO_FLAG_STOLEN | XE_BO_FLAG_VRAM_MASK)) {
+ case XE_BO_FLAG_VRAM0:
place->mem_type = XE_PL_VRAM0;
break;
- case XE_BO_CREATE_VRAM1_BIT:
+ case XE_BO_FLAG_VRAM1:
place->mem_type = XE_PL_VRAM1;
break;
- case XE_BO_CREATE_STOLEN_BIT:
+ case XE_BO_FLAG_STOLEN:
place->mem_type = XE_PL_STOLEN;
break;
@@ -1421,7 +1374,7 @@ __xe_bo_create_locked(struct xe_device *xe,
if (IS_ERR(bo))
return bo;
- flags |= XE_BO_FIXED_PLACEMENT_BIT;
+ flags |= XE_BO_FLAG_FIXED_PLACEMENT;
err = __xe_bo_fixed_placement(xe, bo, flags, start, end, size);
if (err) {
xe_bo_free(bo);
@@ -1431,7 +1384,7 @@ __xe_bo_create_locked(struct xe_device *xe,
bo = ___xe_bo_create_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL,
vm && !xe_vm_in_fault_mode(vm) &&
- flags & XE_BO_CREATE_USER_BIT ?
+ flags & XE_BO_FLAG_USER ?
&vm->lru_bulk_move : NULL, size,
cpu_caching, type, flags);
if (IS_ERR(bo))
@@ -1448,13 +1401,13 @@ __xe_bo_create_locked(struct xe_device *xe,
xe_vm_get(vm);
bo->vm = vm;
- if (bo->flags & XE_BO_CREATE_GGTT_BIT) {
- if (!tile && flags & XE_BO_CREATE_STOLEN_BIT)
+ if (bo->flags & XE_BO_FLAG_GGTT) {
+ if (!tile && flags & XE_BO_FLAG_STOLEN)
tile = xe_device_get_root_tile(xe);
xe_assert(xe, tile);
- if (flags & XE_BO_FIXED_PLACEMENT_BIT) {
+ if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo,
start + bo->size, U64_MAX);
} else {
@@ -1497,7 +1450,7 @@ struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
{
struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL,
cpu_caching, type,
- flags | XE_BO_CREATE_USER_BIT);
+ flags | XE_BO_FLAG_USER);
if (!IS_ERR(bo))
xe_bo_unlock_vm_held(bo);
@@ -1526,12 +1479,12 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile
u64 start = offset == ~0ull ? 0 : offset;
u64 end = offset == ~0ull ? offset : start + size;
- if (flags & XE_BO_CREATE_STOLEN_BIT &&
+ if (flags & XE_BO_FLAG_STOLEN &&
xe_ttm_stolen_cpu_access_needs_ggtt(xe))
- flags |= XE_BO_CREATE_GGTT_BIT;
+ flags |= XE_BO_FLAG_GGTT;
bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
- flags | XE_BO_NEEDS_CPU_ACCESS);
+ flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
if (IS_ERR(bo))
return bo;
@@ -1628,13 +1581,15 @@ struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_til
int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src)
{
struct xe_bo *bo;
+ u32 dst_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT;
+
+ dst_flags |= (*src)->flags & XE_BO_FLAG_GGTT_INVALIDATE;
xe_assert(xe, IS_DGFX(xe));
xe_assert(xe, !(*src)->vmap.is_iomem);
- bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr, (*src)->size,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
- XE_BO_CREATE_GGTT_BIT);
+ bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr,
+ (*src)->size, dst_flags);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -1709,8 +1664,8 @@ int xe_bo_pin(struct xe_bo *bo)
xe_assert(xe, !xe_bo_is_user(bo));
/* Pinned object must be in GGTT or have pinned flag */
- xe_assert(xe, bo->flags & (XE_BO_CREATE_PINNED_BIT |
- XE_BO_CREATE_GGTT_BIT));
+ xe_assert(xe, bo->flags & (XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_GGTT));
/*
* No reason we can't support pinning imported dma-bufs we just don't
@@ -1731,7 +1686,7 @@ int xe_bo_pin(struct xe_bo *bo)
* during suspend / resume (force restore to same physical address).
*/
if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
- bo->flags & XE_BO_INTERNAL_TEST)) {
+ bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
struct ttm_place *place = &(bo->placements[0]);
if (mem_type_is_vram(place->mem_type)) {
@@ -1799,7 +1754,7 @@ void xe_bo_unpin(struct xe_bo *bo)
xe_assert(xe, xe_bo_is_pinned(bo));
if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
- bo->flags & XE_BO_INTERNAL_TEST)) {
+ bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
struct ttm_place *place = &(bo->placements[0]);
if (mem_type_is_vram(place->mem_type)) {
@@ -1902,7 +1857,7 @@ int xe_bo_vmap(struct xe_bo *bo)
xe_bo_assert_held(bo);
- if (!(bo->flags & XE_BO_NEEDS_CPU_ACCESS))
+ if (!(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS))
return -EINVAL;
if (!iosys_map_is_null(&bo->vmap))
@@ -1984,29 +1939,29 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
bo_flags = 0;
if (args->flags & DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING)
- bo_flags |= XE_BO_DEFER_BACKING;
+ bo_flags |= XE_BO_FLAG_DEFER_BACKING;
if (args->flags & DRM_XE_GEM_CREATE_FLAG_SCANOUT)
- bo_flags |= XE_BO_SCANOUT_BIT;
+ bo_flags |= XE_BO_FLAG_SCANOUT;
- bo_flags |= args->placement << (ffs(XE_BO_CREATE_SYSTEM_BIT) - 1);
+ bo_flags |= args->placement << (ffs(XE_BO_FLAG_SYSTEM) - 1);
if (args->flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) {
- if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_CREATE_VRAM_MASK)))
+ if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_FLAG_VRAM_MASK)))
return -EINVAL;
- bo_flags |= XE_BO_NEEDS_CPU_ACCESS;
+ bo_flags |= XE_BO_FLAG_NEEDS_CPU_ACCESS;
}
if (XE_IOCTL_DBG(xe, !args->cpu_caching ||
args->cpu_caching > DRM_XE_GEM_CPU_CACHING_WC))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_CREATE_VRAM_MASK &&
+ if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_VRAM_MASK &&
args->cpu_caching != DRM_XE_GEM_CPU_CACHING_WC))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_SCANOUT_BIT &&
+ if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_SCANOUT &&
args->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB))
return -EINVAL;
@@ -2247,6 +2202,9 @@ bool xe_bo_needs_ccs_pages(struct xe_bo *bo)
{
struct xe_device *xe = xe_bo_device(bo);
+ if (GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe))
+ return false;
+
if (!xe_device_has_flat_ccs(xe) || bo->ttm.type != ttm_bo_type_device)
return false;
@@ -2255,7 +2213,7 @@ bool xe_bo_needs_ccs_pages(struct xe_bo *bo)
* can't be used since there's no CCS storage associated with
* non-VRAM addresses.
*/
- if (IS_DGFX(xe) && (bo->flags & XE_BO_CREATE_SYSTEM_BIT))
+ if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM))
return false;
return true;
@@ -2324,9 +2282,9 @@ int xe_bo_dumb_create(struct drm_file *file_priv,
bo = xe_bo_create_user(xe, NULL, NULL, args->size,
DRM_XE_GEM_CPU_CACHING_WC,
ttm_bo_type_device,
- XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
- XE_BO_CREATE_USER_BIT | XE_BO_SCANOUT_BIT |
- XE_BO_NEEDS_CPU_ACCESS);
+ XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
+ XE_BO_FLAG_SCANOUT |
+ XE_BO_FLAG_NEEDS_CPU_ACCESS);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index c59ad15961ce..a885b14bf595 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -13,48 +13,34 @@
#include "xe_vm_types.h"
#include "xe_vm.h"
-/**
- * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
- * @vm: The vm
- */
-#define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
-
-
-
#define XE_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
-#define XE_BO_CREATE_USER_BIT BIT(0)
+#define XE_BO_FLAG_USER BIT(0)
/* The bits below need to be contiguous, or things break */
-#define XE_BO_CREATE_SYSTEM_BIT BIT(1)
-#define XE_BO_CREATE_VRAM0_BIT BIT(2)
-#define XE_BO_CREATE_VRAM1_BIT BIT(3)
-#define XE_BO_CREATE_VRAM_MASK (XE_BO_CREATE_VRAM0_BIT | \
- XE_BO_CREATE_VRAM1_BIT)
+#define XE_BO_FLAG_SYSTEM BIT(1)
+#define XE_BO_FLAG_VRAM0 BIT(2)
+#define XE_BO_FLAG_VRAM1 BIT(3)
+#define XE_BO_FLAG_VRAM_MASK (XE_BO_FLAG_VRAM0 | XE_BO_FLAG_VRAM1)
/* -- */
-#define XE_BO_CREATE_STOLEN_BIT BIT(4)
-#define XE_BO_CREATE_VRAM_IF_DGFX(tile) \
- (IS_DGFX(tile_to_xe(tile)) ? XE_BO_CREATE_VRAM0_BIT << (tile)->id : \
- XE_BO_CREATE_SYSTEM_BIT)
-#define XE_BO_CREATE_GGTT_BIT BIT(5)
-#define XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT BIT(6)
-#define XE_BO_CREATE_PINNED_BIT BIT(7)
-#define XE_BO_CREATE_NO_RESV_EVICT BIT(8)
-#define XE_BO_DEFER_BACKING BIT(9)
-#define XE_BO_SCANOUT_BIT BIT(10)
-#define XE_BO_FIXED_PLACEMENT_BIT BIT(11)
-#define XE_BO_PAGETABLE BIT(12)
-#define XE_BO_NEEDS_CPU_ACCESS BIT(13)
-#define XE_BO_NEEDS_UC BIT(14)
+#define XE_BO_FLAG_STOLEN BIT(4)
+#define XE_BO_FLAG_VRAM_IF_DGFX(tile) (IS_DGFX(tile_to_xe(tile)) ? \
+ XE_BO_FLAG_VRAM0 << (tile)->id : \
+ XE_BO_FLAG_SYSTEM)
+#define XE_BO_FLAG_GGTT BIT(5)
+#define XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE BIT(6)
+#define XE_BO_FLAG_PINNED BIT(7)
+#define XE_BO_FLAG_NO_RESV_EVICT BIT(8)
+#define XE_BO_FLAG_DEFER_BACKING BIT(9)
+#define XE_BO_FLAG_SCANOUT BIT(10)
+#define XE_BO_FLAG_FIXED_PLACEMENT BIT(11)
+#define XE_BO_FLAG_PAGETABLE BIT(12)
+#define XE_BO_FLAG_NEEDS_CPU_ACCESS BIT(13)
+#define XE_BO_FLAG_NEEDS_UC BIT(14)
+#define XE_BO_NEEDS_64K BIT(15)
+#define XE_BO_FLAG_GGTT_INVALIDATE BIT(16)
/* this one is trigger internally only */
-#define XE_BO_INTERNAL_TEST BIT(30)
-#define XE_BO_INTERNAL_64K BIT(31)
-
-#define XELPG_PPGTT_PTE_PAT3 BIT_ULL(62)
-#define XE2_PPGTT_PTE_PAT4 BIT_ULL(61)
-#define XE_PPGTT_PDE_PDPE_PAT2 BIT_ULL(12)
-#define XE_PPGTT_PTE_PAT2 BIT_ULL(7)
-#define XE_PPGTT_PTE_PAT1 BIT_ULL(4)
-#define XE_PPGTT_PTE_PAT0 BIT_ULL(3)
+#define XE_BO_FLAG_INTERNAL_TEST BIT(30)
+#define XE_BO_FLAG_INTERNAL_64K BIT(31)
#define XE_PTE_SHIFT 12
#define XE_PAGE_SIZE (1 << XE_PTE_SHIFT)
@@ -68,20 +54,6 @@
#define XE_64K_PTE_MASK (XE_64K_PAGE_SIZE - 1)
#define XE_64K_PDE_MASK (XE_PDE_MASK >> 4)
-#define XE_PDE_PS_2M BIT_ULL(7)
-#define XE_PDPE_PS_1G BIT_ULL(7)
-#define XE_PDE_IPS_64K BIT_ULL(11)
-
-#define XE_GGTT_PTE_DM BIT_ULL(1)
-#define XE_USM_PPGTT_PTE_AE BIT_ULL(10)
-#define XE_PPGTT_PTE_DM BIT_ULL(11)
-#define XE_PDE_64K BIT_ULL(6)
-#define XE_PTE_PS64 BIT_ULL(8)
-#define XE_PTE_NULL BIT_ULL(9)
-
-#define XE_PAGE_PRESENT BIT_ULL(0)
-#define XE_PAGE_RW BIT_ULL(1)
-
#define XE_PL_SYSTEM TTM_PL_SYSTEM
#define XE_PL_TT TTM_PL_TT
#define XE_PL_VRAM0 TTM_PL_VRAM
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c
index 7a264a9ca06e..541b49007d73 100644
--- a/drivers/gpu/drm/xe/xe_bo_evict.c
+++ b/drivers/gpu/drm/xe/xe_bo_evict.c
@@ -146,7 +146,7 @@ int xe_bo_restore_kernel(struct xe_device *xe)
return ret;
}
- if (bo->flags & XE_BO_CREATE_GGTT_BIT) {
+ if (bo->flags & XE_BO_FLAG_GGTT) {
struct xe_tile *tile = bo->tile;
mutex_lock(&tile->mem.ggtt->lock);
@@ -220,7 +220,7 @@ int xe_bo_restore_user(struct xe_device *xe)
list_splice_tail(&still_in_list, &xe->pinned.external_vram);
spin_unlock(&xe->pinned.lock);
- /* Wait for validate to complete */
+ /* Wait for restore to complete */
for_each_tile(tile, xe, id)
xe_tile_migrate_wait(tile);
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index 14ef13b7b421..86422e113d39 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -56,25 +56,6 @@ struct xe_bo {
*/
struct list_head client_link;
#endif
- /** @props: BO user controlled properties */
- struct {
- /** @preferred_mem: preferred memory class for this BO */
- s16 preferred_mem_class;
- /** @prefered_gt: preferred GT for this BO */
- s16 preferred_gt;
- /** @preferred_mem_type: preferred memory type */
- s32 preferred_mem_type;
- /**
- * @cpu_atomic: the CPU expects to do atomics operations to
- * this BO
- */
- bool cpu_atomic;
- /**
- * @device_atomic: the device expects to do atomics operations
- * to this BO
- */
- bool device_atomic;
- } props;
/** @freed: List node for delayed put. */
struct llist_node freed;
/** @created: Whether the bo has passed initial creation */
diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
index 01db5b27bec5..0b7aebaae843 100644
--- a/drivers/gpu/drm/xe/xe_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_debugfs.c
@@ -5,6 +5,7 @@
#include "xe_debugfs.h"
+#include <linux/debugfs.h>
#include <linux/string_helpers.h>
#include <drm/drm_debugfs.h>
@@ -12,6 +13,8 @@
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_gt_debugfs.h"
+#include "xe_pm.h"
+#include "xe_sriov.h"
#include "xe_step.h"
#ifdef CONFIG_DRM_XE_DEBUG
@@ -37,6 +40,8 @@ static int info(struct seq_file *m, void *data)
struct xe_gt *gt;
u8 id;
+ xe_pm_runtime_get(xe);
+
drm_printf(&p, "graphics_verx100 %d\n", xe->info.graphics_verx100);
drm_printf(&p, "media_verx100 %d\n", xe->info.media_verx100);
drm_printf(&p, "stepping G:%s M:%s D:%s B:%s\n",
@@ -63,11 +68,22 @@ static int info(struct seq_file *m, void *data)
gt->info.engine_mask);
}
+ xe_pm_runtime_put(xe);
+ return 0;
+}
+
+static int sriov_info(struct seq_file *m, void *data)
+{
+ struct xe_device *xe = node_to_xe(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ xe_sriov_print_info(xe, &p);
return 0;
}
static const struct drm_info_list debugfs_list[] = {
{"info", info, 0},
+ { .name = "sriov_info", .show = sriov_info, },
};
static int forcewake_open(struct inode *inode, struct file *file)
@@ -76,8 +92,7 @@ static int forcewake_open(struct inode *inode, struct file *file)
struct xe_gt *gt;
u8 id;
- xe_device_mem_access_get(xe);
-
+ xe_pm_runtime_get(xe);
for_each_gt(gt, xe, id)
XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
@@ -92,8 +107,7 @@ static int forcewake_release(struct inode *inode, struct file *file)
for_each_gt(gt, xe, id)
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
-
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
return 0;
}
@@ -127,7 +141,7 @@ void xe_debugfs_register(struct xe_device *xe)
if (man) {
char name[16];
- sprintf(name, "vram%d_mm", mem_type - XE_PL_VRAM0);
+ snprintf(name, sizeof(name), "vram%d_mm", mem_type - XE_PL_VRAM0);
ttm_resource_manager_create_debugfs(man, root, name);
}
}
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index 68d3d623a05b..3d7980232be1 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -9,10 +9,13 @@
#include <linux/devcoredump.h>
#include <generated/utsrelease.h>
+#include <drm/drm_managed.h>
+
#include "xe_device.h"
#include "xe_exec_queue.h"
#include "xe_force_wake.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_guc_ct.h"
#include "xe_guc_submit.h"
#include "xe_hw_engine.h"
@@ -64,9 +67,11 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
{
struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work);
- xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
- if (ss->vm)
- xe_vm_snapshot_capture_delayed(ss->vm);
+ /* keep going if fw fails as we still want to save the memory and SW data */
+ if (xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL))
+ xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
+ xe_vm_snapshot_capture_delayed(ss->vm);
+ xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
xe_force_wake_put(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
}
@@ -74,17 +79,19 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
size_t count, void *data, size_t datalen)
{
struct xe_devcoredump *coredump = data;
- struct xe_device *xe = coredump_to_xe(coredump);
- struct xe_devcoredump_snapshot *ss = &coredump->snapshot;
+ struct xe_device *xe;
+ struct xe_devcoredump_snapshot *ss;
struct drm_printer p;
struct drm_print_iterator iter;
struct timespec64 ts;
int i;
- /* Our device is gone already... */
- if (!data || !coredump_to_xe(coredump))
+ if (!coredump)
return -ENODEV;
+ xe = coredump_to_xe(coredump);
+ ss = &coredump->snapshot;
+
/* Ensure delayed work is captured before continuing */
flush_work(&ss->work);
@@ -117,10 +124,8 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
if (coredump->snapshot.hwe[i])
xe_hw_engine_snapshot_print(coredump->snapshot.hwe[i],
&p);
- if (coredump->snapshot.vm) {
- drm_printf(&p, "\n**** VM state ****\n");
- xe_vm_snapshot_print(coredump->snapshot.vm, &p);
- }
+ drm_printf(&p, "\n**** VM state ****\n");
+ xe_vm_snapshot_print(coredump->snapshot.vm, &p);
return count - iter.remain;
}
@@ -180,10 +185,12 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
}
}
- xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
+ /* keep going if fw fails as we still want to save the memory and SW data */
+ if (xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL))
+ xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
coredump->snapshot.ct = xe_guc_ct_snapshot_capture(&guc->ct, true);
- coredump->snapshot.ge = xe_guc_exec_queue_snapshot_capture(job);
+ coredump->snapshot.ge = xe_guc_exec_queue_snapshot_capture(q);
coredump->snapshot.job = xe_sched_job_snapshot_capture(job);
coredump->snapshot.vm = xe_vm_snapshot_capture(q->vm);
@@ -196,8 +203,7 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
coredump->snapshot.hwe[id] = xe_hw_engine_snapshot_capture(hwe);
}
- if (ss->vm)
- queue_work(system_unbound_wq, &ss->work);
+ queue_work(system_unbound_wq, &ss->work);
xe_force_wake_put(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
dma_fence_end_signalling(cookie);
@@ -231,5 +237,14 @@ void xe_devcoredump(struct xe_sched_job *job)
dev_coredumpm(xe->drm.dev, THIS_MODULE, coredump, 0, GFP_KERNEL,
xe_devcoredump_read, xe_devcoredump_free);
}
-#endif
+static void xe_driver_devcoredump_fini(struct drm_device *drm, void *arg)
+{
+ dev_coredump_put(drm->dev);
+}
+
+int xe_devcoredump_init(struct xe_device *xe)
+{
+ return drmm_add_action_or_reset(&xe->drm, xe_driver_devcoredump_fini, xe);
+}
+#endif
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.h b/drivers/gpu/drm/xe/xe_devcoredump.h
index df8671f0b5eb..e2fa65ce0932 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.h
+++ b/drivers/gpu/drm/xe/xe_devcoredump.h
@@ -11,10 +11,16 @@ struct xe_sched_job;
#ifdef CONFIG_DEV_COREDUMP
void xe_devcoredump(struct xe_sched_job *job);
+int xe_devcoredump_init(struct xe_device *xe);
#else
static inline void xe_devcoredump(struct xe_sched_job *job)
{
}
+
+static inline int xe_devcoredump_init(struct xe_device *xe)
+{
+ return 0;
+}
#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index ca85e81fdb44..5ef9b50a20d0 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -9,6 +9,7 @@
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_client.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
@@ -20,6 +21,7 @@
#include "regs/xe_regs.h"
#include "xe_bo.h"
#include "xe_debugfs.h"
+#include "xe_devcoredump.h"
#include "xe_dma_buf.h"
#include "xe_drm_client.h"
#include "xe_drv.h"
@@ -45,12 +47,6 @@
#include "xe_vm.h"
#include "xe_wait_user_fence.h"
-#ifdef CONFIG_LOCKDEP
-struct lockdep_map xe_device_mem_access_lockdep_map = {
- .name = "xe_device_mem_access_lockdep_map"
-};
-#endif
-
static int xe_file_open(struct drm_device *dev, struct drm_file *file)
{
struct xe_device *xe = to_xe_device(dev);
@@ -136,15 +132,48 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
DRM_RENDER_ALLOW),
};
+static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct drm_file *file_priv = file->private_data;
+ struct xe_device *xe = to_xe_device(file_priv->minor->dev);
+ long ret;
+
+ ret = xe_pm_runtime_get_ioctl(xe);
+ if (ret >= 0)
+ ret = drm_ioctl(file, cmd, arg);
+ xe_pm_runtime_put(xe);
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static long xe_drm_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct drm_file *file_priv = file->private_data;
+ struct xe_device *xe = to_xe_device(file_priv->minor->dev);
+ long ret;
+
+ ret = xe_pm_runtime_get_ioctl(xe);
+ if (ret >= 0)
+ ret = drm_compat_ioctl(file, cmd, arg);
+ xe_pm_runtime_put(xe);
+
+ return ret;
+}
+#else
+/* similarly to drm_compat_ioctl, let's it be assigned to .compat_ioct unconditionally */
+#define xe_drm_compat_ioctl NULL
+#endif
+
static const struct file_operations xe_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release_noglobal,
- .unlocked_ioctl = drm_ioctl,
+ .unlocked_ioctl = xe_drm_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
- .compat_ioctl = drm_compat_ioctl,
+ .compat_ioctl = xe_drm_compat_ioctl,
.llseek = noop_llseek,
#ifdef CONFIG_PROC_FS
.show_fdinfo = drm_show_fdinfo,
@@ -193,6 +222,9 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy)
{
struct xe_device *xe = to_xe_device(dev);
+ if (xe->preempt_fence_wq)
+ destroy_workqueue(xe->preempt_fence_wq);
+
if (xe->ordered_wq)
destroy_workqueue(xe->ordered_wq);
@@ -258,9 +290,15 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
INIT_LIST_HEAD(&xe->pinned.external_vram);
INIT_LIST_HEAD(&xe->pinned.evicted);
+ xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", 0);
xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0);
xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0);
- if (!xe->ordered_wq || !xe->unordered_wq) {
+ if (!xe->ordered_wq || !xe->unordered_wq ||
+ !xe->preempt_fence_wq) {
+ /*
+ * Cleanup done in xe_device_destroy via
+ * drmm_add_action_or_reset register above
+ */
drm_err(&xe->drm, "Failed to allocate xe workqueues\n");
err = -ENOMEM;
goto err;
@@ -380,8 +418,70 @@ mask_err:
return err;
}
-/*
- * Initialize MMIO resources that don't require any knowledge about tile count.
+static bool verify_lmem_ready(struct xe_gt *gt)
+{
+ u32 val = xe_mmio_read32(gt, GU_CNTL) & LMEM_INIT;
+
+ return !!val;
+}
+
+static int wait_for_lmem_ready(struct xe_device *xe)
+{
+ struct xe_gt *gt = xe_root_mmio_gt(xe);
+ unsigned long timeout, start;
+
+ if (!IS_DGFX(xe))
+ return 0;
+
+ if (IS_SRIOV_VF(xe))
+ return 0;
+
+ if (verify_lmem_ready(gt))
+ return 0;
+
+ drm_dbg(&xe->drm, "Waiting for lmem initialization\n");
+
+ start = jiffies;
+ timeout = start + msecs_to_jiffies(60 * 1000); /* 60 sec! */
+
+ do {
+ if (signal_pending(current))
+ return -EINTR;
+
+ /*
+ * The boot firmware initializes local memory and
+ * assesses its health. If memory training fails,
+ * the punit will have been instructed to keep the GT powered
+ * down.we won't be able to communicate with it
+ *
+ * If the status check is done before punit updates the register,
+ * it can lead to the system being unusable.
+ * use a timeout and defer the probe to prevent this.
+ */
+ if (time_after(jiffies, timeout)) {
+ drm_dbg(&xe->drm, "lmem not initialized by firmware\n");
+ return -EPROBE_DEFER;
+ }
+
+ msleep(20);
+
+ } while (!verify_lmem_ready(gt));
+
+ drm_dbg(&xe->drm, "lmem ready after %ums",
+ jiffies_to_msecs(jiffies - start));
+
+ return 0;
+}
+
+/**
+ * xe_device_probe_early: Device early probe
+ * @xe: xe device instance
+ *
+ * Initialize MMIO resources that don't require any
+ * knowledge about tile count. Also initialize pcode and
+ * check vram initialization on root tile.
+ *
+ * Return: 0 on success, error code on failure
*/
int xe_device_probe_early(struct xe_device *xe)
{
@@ -391,7 +491,13 @@ int xe_device_probe_early(struct xe_device *xe)
if (err)
return err;
- err = xe_mmio_root_tile_init(xe);
+ xe_sriov_probe_early(xe);
+
+ err = xe_pcode_probe_early(xe);
+ if (err)
+ return err;
+
+ err = wait_for_lmem_ready(xe);
if (err)
return err;
@@ -469,15 +575,15 @@ int xe_device_probe(struct xe_device *xe)
return err;
}
+ err = xe_devcoredump_init(xe);
+ if (err)
+ return err;
err = drmm_add_action_or_reset(&xe->drm, xe_driver_flr_fini, xe);
if (err)
return err;
- for_each_gt(gt, xe, id) {
- err = xe_pcode_probe(gt);
- if (err)
- return err;
- }
+ for_each_gt(gt, xe, id)
+ xe_pcode_init(gt);
err = xe_display_init_noirq(xe);
if (err)
@@ -544,11 +650,7 @@ int xe_device_probe(struct xe_device *xe)
xe_hwmon_register(xe);
- err = drmm_add_action_or_reset(&xe->drm, xe_device_sanitize, xe);
- if (err)
- return err;
-
- return 0;
+ return drmm_add_action_or_reset(&xe->drm, xe_device_sanitize, xe);
err_fini_display:
xe_display_driver_remove(xe);
@@ -612,87 +714,20 @@ u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
DIV_ROUND_UP_ULL(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0;
}
-bool xe_device_mem_access_ongoing(struct xe_device *xe)
-{
- if (xe_pm_read_callback_task(xe) != NULL)
- return true;
-
- return atomic_read(&xe->mem_access.ref);
-}
-
+/**
+ * xe_device_assert_mem_access - Inspect the current runtime_pm state.
+ * @xe: xe device instance
+ *
+ * To be used before any kind of memory access. It will splat a debug warning
+ * if the device is currently sleeping. But it doesn't guarantee in any way
+ * that the device is going to remain awake. Xe PM runtime get and put
+ * functions might be added to the outer bound of the memory access, while
+ * this check is intended for inner usage to splat some warning if the worst
+ * case has just happened.
+ */
void xe_device_assert_mem_access(struct xe_device *xe)
{
- XE_WARN_ON(!xe_device_mem_access_ongoing(xe));
-}
-
-bool xe_device_mem_access_get_if_ongoing(struct xe_device *xe)
-{
- bool active;
-
- if (xe_pm_read_callback_task(xe) == current)
- return true;
-
- active = xe_pm_runtime_get_if_active(xe);
- if (active) {
- int ref = atomic_inc_return(&xe->mem_access.ref);
-
- xe_assert(xe, ref != S32_MAX);
- }
-
- return active;
-}
-
-void xe_device_mem_access_get(struct xe_device *xe)
-{
- int ref;
-
- /*
- * This looks racy, but should be fine since the pm_callback_task only
- * transitions from NULL -> current (and back to NULL again), during the
- * runtime_resume() or runtime_suspend() callbacks, for which there can
- * only be a single one running for our device. We only need to prevent
- * recursively calling the runtime_get or runtime_put from those
- * callbacks, as well as preventing triggering any access_ongoing
- * asserts.
- */
- if (xe_pm_read_callback_task(xe) == current)
- return;
-
- /*
- * Since the resume here is synchronous it can be quite easy to deadlock
- * if we are not careful. Also in practice it might be quite timing
- * sensitive to ever see the 0 -> 1 transition with the callers locks
- * held, so deadlocks might exist but are hard for lockdep to ever see.
- * With this in mind, help lockdep learn about the potentially scary
- * stuff that can happen inside the runtime_resume callback by acquiring
- * a dummy lock (it doesn't protect anything and gets compiled out on
- * non-debug builds). Lockdep then only needs to see the
- * mem_access_lockdep_map -> runtime_resume callback once, and then can
- * hopefully validate all the (callers_locks) -> mem_access_lockdep_map.
- * For example if the (callers_locks) are ever grabbed in the
- * runtime_resume callback, lockdep should give us a nice splat.
- */
- lock_map_acquire(&xe_device_mem_access_lockdep_map);
- lock_map_release(&xe_device_mem_access_lockdep_map);
-
- xe_pm_runtime_get(xe);
- ref = atomic_inc_return(&xe->mem_access.ref);
-
- xe_assert(xe, ref != S32_MAX);
-
-}
-
-void xe_device_mem_access_put(struct xe_device *xe)
-{
- int ref;
-
- if (xe_pm_read_callback_task(xe) == current)
- return;
-
- ref = atomic_dec_return(&xe->mem_access.ref);
- xe_pm_runtime_put(xe);
-
- xe_assert(xe, ref >= 0);
+ xe_assert(xe, !xe_pm_runtime_suspended(xe));
}
void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p)
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index 14be34d9f543..36d4434ebccc 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -16,10 +16,6 @@ struct xe_file;
#include "xe_force_wake.h"
#include "xe_macros.h"
-#ifdef CONFIG_LOCKDEP
-extern struct lockdep_map xe_device_mem_access_lockdep_map;
-#endif
-
static inline struct xe_device *to_xe_device(const struct drm_device *dev)
{
return container_of(dev, struct xe_device, drm);
@@ -58,7 +54,7 @@ static inline struct xe_tile *xe_device_get_root_tile(struct xe_device *xe)
static inline struct xe_gt *xe_tile_get_gt(struct xe_tile *tile, u8 gt_id)
{
- if (drm_WARN_ON(&tile_to_xe(tile)->drm, gt_id > XE_MAX_GT_PER_TILE))
+ if (drm_WARN_ON(&tile_to_xe(tile)->drm, gt_id >= XE_MAX_GT_PER_TILE))
gt_id = 0;
return gt_id ? tile->media_gt : tile->primary_gt;
@@ -79,7 +75,7 @@ static inline struct xe_gt *xe_device_get_gt(struct xe_device *xe, u8 gt_id)
if (MEDIA_VER(xe) >= 13) {
gt = xe_tile_get_gt(root_tile, gt_id);
} else {
- if (drm_WARN_ON(&xe->drm, gt_id > XE_MAX_TILES_PER_DEVICE))
+ if (drm_WARN_ON(&xe->drm, gt_id >= XE_MAX_TILES_PER_DEVICE))
gt_id = 0;
gt = xe->tiles[gt_id].primary_gt;
@@ -137,12 +133,7 @@ static inline struct xe_force_wake *gt_to_fw(struct xe_gt *gt)
return &gt->mmio.fw;
}
-void xe_device_mem_access_get(struct xe_device *xe);
-bool xe_device_mem_access_get_if_ongoing(struct xe_device *xe);
-void xe_device_mem_access_put(struct xe_device *xe);
-
void xe_device_assert_mem_access(struct xe_device *xe);
-bool xe_device_mem_access_ongoing(struct xe_device *xe);
static inline bool xe_device_in_fault_mode(struct xe_device *xe)
{
diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.c b/drivers/gpu/drm/xe/xe_device_sysfs.c
index 99113a5a2b84..21677b8cd977 100644
--- a/drivers/gpu/drm/xe/xe_device_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_device_sysfs.c
@@ -35,7 +35,9 @@ vram_d3cold_threshold_show(struct device *dev,
if (!xe)
return -EINVAL;
+ xe_pm_runtime_get(xe);
ret = sysfs_emit(buf, "%d\n", xe->d3cold.vram_threshold);
+ xe_pm_runtime_put(xe);
return ret;
}
@@ -58,7 +60,9 @@ vram_d3cold_threshold_store(struct device *dev, struct device_attribute *attr,
drm_dbg(&xe->drm, "vram_d3cold_threshold: %u\n", vram_d3cold_threshold);
+ xe_pm_runtime_get(xe);
ret = xe_pm_set_vram_threshold(xe, vram_d3cold_threshold);
+ xe_pm_runtime_put(xe);
return ret ?: count;
}
@@ -72,18 +76,14 @@ static void xe_device_sysfs_fini(struct drm_device *drm, void *arg)
sysfs_remove_file(&xe->drm.dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
}
-void xe_device_sysfs_init(struct xe_device *xe)
+int xe_device_sysfs_init(struct xe_device *xe)
{
struct device *dev = xe->drm.dev;
int ret;
ret = sysfs_create_file(&dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
- if (ret) {
- drm_warn(&xe->drm, "Failed to create sysfs file\n");
- return;
- }
-
- ret = drmm_add_action_or_reset(&xe->drm, xe_device_sysfs_fini, xe);
if (ret)
- drm_warn(&xe->drm, "Failed to add sysfs fini drm action\n");
+ return ret;
+
+ return drmm_add_action_or_reset(&xe->drm, xe_device_sysfs_fini, xe);
}
diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.h b/drivers/gpu/drm/xe/xe_device_sysfs.h
index 38b240684bee..f9e83d8bd2c7 100644
--- a/drivers/gpu/drm/xe/xe_device_sysfs.h
+++ b/drivers/gpu/drm/xe/xe_device_sysfs.h
@@ -8,6 +8,6 @@
struct xe_device;
-void xe_device_sysfs_init(struct xe_device *xe);
+int xe_device_sysfs_init(struct xe_device *xe);
#endif
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 9785eef2e5a4..2e62450d86e1 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -321,6 +321,10 @@ struct xe_device {
struct {
/** @sriov.__mode: SR-IOV mode (Don't access directly!) */
enum xe_sriov_mode __mode;
+
+ /** @sriov.pf: PF specific data */
+ struct xe_device_pf pf;
+
/** @sriov.wq: workqueue used by the virtualization workers */
struct workqueue_struct *wq;
} sriov;
@@ -363,6 +367,9 @@ struct xe_device {
/** @ufence_wq: user fence wait queue */
wait_queue_head_t ufence_wq;
+ /** @preempt_fence_wq: used to serialize preempt fences */
+ struct workqueue_struct *preempt_fence_wq;
+
/** @ordered_wq: used to serialize compute mode resume */
struct workqueue_struct *ordered_wq;
@@ -377,9 +384,6 @@ struct xe_device {
* triggering additional actions when they occur.
*/
struct {
- /** @mem_access.ref: ref count of memory accesses */
- atomic_t ref;
-
/**
* @mem_access.vram_userfault: Encapsulate vram_userfault
* related stuff
@@ -497,20 +501,9 @@ struct xe_device {
/* For pcode */
struct mutex sb_lock;
- /* Should be in struct intel_display */
- u32 skl_preferred_vco_freq, max_dotclk_freq, hti_state;
- u8 snps_phy_failed_calibration;
- struct drm_atomic_state *modeset_restore_state;
- struct list_head global_obj_list;
-
- union {
- /* only to allow build, not used functionally */
- u32 irq_mask;
- u32 de_irq_mask[I915_MAX_PIPES];
- };
- u32 pipestat_irq_mask[I915_MAX_PIPES];
+ /* only to allow build, not used functionally */
+ u32 irq_mask;
- bool display_irqs_enabled;
u32 enabled_irq_mask;
struct intel_uncore {
@@ -522,11 +515,7 @@ struct xe_device {
unsigned int hpll_freq;
unsigned int czclk_freq;
unsigned int fsb_freq, mem_freq, is_ddr3;
- u8 vblank_enabled;
};
- struct {
- const char *dmc_firmware_path;
- } params;
void *pxp;
#endif
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index da2627ed6ae7..68f309f5e981 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -16,6 +16,7 @@
#include "tests/xe_test.h"
#include "xe_bo.h"
#include "xe_device.h"
+#include "xe_pm.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_vm.h"
@@ -33,7 +34,7 @@ static int xe_dma_buf_attach(struct dma_buf *dmabuf,
if (!attach->peer2peer && !xe_bo_can_migrate(gem_to_xe_bo(obj), XE_PL_TT))
return -EOPNOTSUPP;
- xe_device_mem_access_get(to_xe_device(obj->dev));
+ xe_pm_runtime_get(to_xe_device(obj->dev));
return 0;
}
@@ -42,7 +43,7 @@ static void xe_dma_buf_detach(struct dma_buf *dmabuf,
{
struct drm_gem_object *obj = attach->dmabuf->priv;
- xe_device_mem_access_put(to_xe_device(obj->dev));
+ xe_pm_runtime_put(to_xe_device(obj->dev));
}
static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
@@ -216,7 +217,7 @@ xe_dma_buf_init_obj(struct drm_device *dev, struct xe_bo *storage,
dma_resv_lock(resv, NULL);
bo = ___xe_bo_create_locked(xe, storage, NULL, resv, NULL, dma_buf->size,
0, /* Will require 1way or 2way for vm_bind */
- ttm_bo_type_sg, XE_BO_CREATE_SYSTEM_BIT);
+ ttm_bo_type_sg, XE_BO_FLAG_SYSTEM);
if (IS_ERR(bo)) {
ret = PTR_ERR(bo);
goto error;
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index 87c10bd7958b..08f0b7c95901 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -78,7 +78,7 @@ void xe_drm_client_add_bo(struct xe_drm_client *client,
spin_lock(&client->bos_lock);
bo->client = xe_drm_client_get(client);
- list_add_tail_rcu(&bo->client_link, &client->bos_list);
+ list_add_tail(&bo->client_link, &client->bos_list);
spin_unlock(&client->bos_lock);
}
@@ -96,7 +96,7 @@ void xe_drm_client_remove_bo(struct xe_bo *bo)
struct xe_drm_client *client = bo->client;
spin_lock(&client->bos_lock);
- list_del_rcu(&bo->client_link);
+ list_del(&bo->client_link);
spin_unlock(&client->bos_lock);
xe_drm_client_put(client);
@@ -154,8 +154,8 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
/* Internal objects. */
spin_lock(&client->bos_lock);
- list_for_each_entry_rcu(bo, &client->bos_list, client_link) {
- if (!bo || !kref_get_unless_zero(&bo->ttm.base.refcount))
+ list_for_each_entry(bo, &client->bos_list, client_link) {
+ if (!kref_get_unless_zero(&bo->ttm.base.refcount))
continue;
bo_meminfo(bo, stats);
xe_bo_put(bo);
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index 826c8b389672..97eeb973e897 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -94,48 +94,16 @@
* Unlock all
*/
+/*
+ * Add validation and rebinding to the drm_exec locking loop, since both can
+ * trigger eviction which may require sleeping dma_resv locks.
+ */
static int xe_exec_fn(struct drm_gpuvm_exec *vm_exec)
{
struct xe_vm *vm = container_of(vm_exec->vm, struct xe_vm, gpuvm);
- struct drm_gem_object *obj;
- unsigned long index;
- int num_fences;
- int ret;
-
- ret = drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec);
- if (ret)
- return ret;
-
- /*
- * 1 fence slot for the final submit, and 1 more for every per-tile for
- * GPU bind and 1 extra for CPU bind. Note that there are potentially
- * many vma per object/dma-resv, however the fence slot will just be
- * re-used, since they are largely the same timeline and the seqno
- * should be in order. In the case of CPU bind there is dummy fence used
- * for all CPU binds, so no need to have a per-tile slot for that.
- */
- num_fences = 1 + 1 + vm->xe->info.tile_count;
- /*
- * We don't know upfront exactly how many fence slots we will need at
- * the start of the exec, since the TTM bo_validate above can consume
- * numerous fence slots. Also due to how the dma_resv_reserve_fences()
- * works it only ensures that at least that many fence slots are
- * available i.e if there are already 10 slots available and we reserve
- * two more, it can just noop without reserving anything. With this it
- * is quite possible that TTM steals some of the fence slots and then
- * when it comes time to do the vma binding and final exec stage we are
- * lacking enough fence slots, leading to some nasty BUG_ON() when
- * adding the fences. Hence just add our own fences here, after the
- * validate stage.
- */
- drm_exec_for_each_locked_object(&vm_exec->exec, index, obj) {
- ret = dma_resv_reserve_fences(obj->resv, num_fences);
- if (ret)
- return ret;
- }
-
- return 0;
+ /* The fence slot added here is intended for the exec sched job. */
+ return xe_vm_validate_rebind(vm, &vm_exec->exec, 1);
}
int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
@@ -152,7 +120,6 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct drm_exec *exec = &vm_exec.exec;
u32 i, num_syncs = 0, num_ufence = 0;
struct xe_sched_job *job;
- struct dma_fence *rebind_fence;
struct xe_vm *vm;
bool write_locked, skip_retry = false;
ktime_t end = 0;
@@ -249,7 +216,7 @@ retry:
goto err_unlock_list;
}
for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], NULL, fence);
+ xe_sync_entry_signal(&syncs[i], fence);
xe_exec_queue_last_fence_set(q, vm, fence);
dma_fence_put(fence);
}
@@ -290,39 +257,7 @@ retry:
goto err_exec;
}
- /*
- * Rebind any invalidated userptr or evicted BOs in the VM, non-compute
- * VM mode only.
- */
- rebind_fence = xe_vm_rebind(vm, false);
- if (IS_ERR(rebind_fence)) {
- err = PTR_ERR(rebind_fence);
- goto err_put_job;
- }
-
- /*
- * We store the rebind_fence in the VM so subsequent execs don't get
- * scheduled before the rebinds of userptrs / evicted BOs is complete.
- */
- if (rebind_fence) {
- dma_fence_put(vm->rebind_fence);
- vm->rebind_fence = rebind_fence;
- }
- if (vm->rebind_fence) {
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &vm->rebind_fence->flags)) {
- dma_fence_put(vm->rebind_fence);
- vm->rebind_fence = NULL;
- } else {
- dma_fence_get(vm->rebind_fence);
- err = drm_sched_job_add_dependency(&job->drm,
- vm->rebind_fence);
- if (err)
- goto err_put_job;
- }
- }
-
- /* Wait behind munmap style rebinds */
+ /* Wait behind rebinds */
if (!xe_vm_in_lr_mode(vm)) {
err = drm_sched_job_add_resv_dependencies(&job->drm,
xe_vm_resv(vm),
@@ -359,9 +294,10 @@ retry:
drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished,
DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE);
- for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], job,
- &job->drm.s_fence->finished);
+ for (i = 0; i < num_syncs; i++) {
+ xe_sync_entry_signal(&syncs[i], &job->drm.s_fence->finished);
+ xe_sched_job_init_user_fence(job, &syncs[i]);
+ }
if (xe_exec_queue_is_lr(q))
q->ring_ops->emit_job(job);
@@ -385,10 +321,7 @@ err_put_job:
err_exec:
drm_exec_fini(exec);
err_unlock_list:
- if (write_locked)
- up_write(&vm->lock);
- else
- up_read(&vm->lock);
+ up_read(&vm->lock);
if (err == -EAGAIN && !skip_retry)
goto retry;
err_syncs:
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 11e150f4c0c1..395de93579fa 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -31,7 +31,14 @@ enum xe_exec_queue_sched_prop {
};
static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
- u64 extensions, int ext_number, bool create);
+ u64 extensions, int ext_number);
+
+static void __xe_exec_queue_free(struct xe_exec_queue *q)
+{
+ if (q->vm)
+ xe_vm_put(q->vm);
+ kfree(q);
+}
static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
struct xe_vm *vm,
@@ -74,21 +81,21 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
else
q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
+ if (vm)
+ q->vm = xe_vm_get(vm);
+
if (extensions) {
/*
* may set q->usm, must come before xe_lrc_init(),
* may overwrite q->sched_props, must come before q->ops->init()
*/
- err = exec_queue_user_extensions(xe, q, extensions, 0, true);
+ err = exec_queue_user_extensions(xe, q, extensions, 0);
if (err) {
- kfree(q);
+ __xe_exec_queue_free(q);
return ERR_PTR(err);
}
}
- if (vm)
- q->vm = xe_vm_get(vm);
-
if (xe_exec_queue_is_parallel(q)) {
q->parallel.composite_fence_ctx = dma_fence_context_alloc(1);
q->parallel.composite_fence_seqno = XE_FENCE_INITIAL_SEQNO;
@@ -97,13 +104,6 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
return q;
}
-static void __xe_exec_queue_free(struct xe_exec_queue *q)
-{
- if (q->vm)
- xe_vm_put(q->vm);
- kfree(q);
-}
-
static int __xe_exec_queue_init(struct xe_exec_queue *q)
{
struct xe_device *xe = gt_to_xe(q->gt);
@@ -128,7 +128,7 @@ static int __xe_exec_queue_init(struct xe_exec_queue *q)
* already grabbed the rpm ref outside any sensitive locks.
*/
if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm))
- drm_WARN_ON(&xe->drm, !xe_device_mem_access_get_if_ongoing(xe));
+ xe_pm_runtime_get_noresume(xe);
return 0;
@@ -217,7 +217,7 @@ void xe_exec_queue_fini(struct xe_exec_queue *q)
for (i = 0; i < q->width; ++i)
xe_lrc_finish(q->lrc + i);
if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm))
- xe_device_mem_access_put(gt_to_xe(q->gt));
+ xe_pm_runtime_put(gt_to_xe(q->gt));
__xe_exec_queue_free(q);
}
@@ -225,22 +225,22 @@ void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
{
switch (q->class) {
case XE_ENGINE_CLASS_RENDER:
- sprintf(q->name, "rcs%d", instance);
+ snprintf(q->name, sizeof(q->name), "rcs%d", instance);
break;
case XE_ENGINE_CLASS_VIDEO_DECODE:
- sprintf(q->name, "vcs%d", instance);
+ snprintf(q->name, sizeof(q->name), "vcs%d", instance);
break;
case XE_ENGINE_CLASS_VIDEO_ENHANCE:
- sprintf(q->name, "vecs%d", instance);
+ snprintf(q->name, sizeof(q->name), "vecs%d", instance);
break;
case XE_ENGINE_CLASS_COPY:
- sprintf(q->name, "bcs%d", instance);
+ snprintf(q->name, sizeof(q->name), "bcs%d", instance);
break;
case XE_ENGINE_CLASS_COMPUTE:
- sprintf(q->name, "ccs%d", instance);
+ snprintf(q->name, sizeof(q->name), "ccs%d", instance);
break;
case XE_ENGINE_CLASS_OTHER:
- sprintf(q->name, "gsccs%d", instance);
+ snprintf(q->name, sizeof(q->name), "gsccs%d", instance);
break;
default:
XE_WARN_ON(q->class);
@@ -268,7 +268,7 @@ xe_exec_queue_device_get_max_priority(struct xe_device *xe)
}
static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
- u64 value, bool create)
+ u64 value)
{
if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH))
return -EINVAL;
@@ -276,9 +276,6 @@ static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q
if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
return -EPERM;
- if (!create)
- return q->ops->set_priority(q, value);
-
q->sched_props.priority = value;
return 0;
}
@@ -336,7 +333,7 @@ xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass,
}
static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
- u64 value, bool create)
+ u64 value)
{
u32 min = 0, max = 0;
@@ -347,16 +344,13 @@ static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *
!xe_hw_engine_timeout_in_range(value, min, max))
return -EINVAL;
- if (!create)
- return q->ops->set_timeslice(q, value);
-
q->sched_props.timeslice_us = value;
return 0;
}
typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
struct xe_exec_queue *q,
- u64 value, bool create);
+ u64 value);
static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
@@ -365,8 +359,7 @@ static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
static int exec_queue_user_ext_set_property(struct xe_device *xe,
struct xe_exec_queue *q,
- u64 extension,
- bool create)
+ u64 extension)
{
u64 __user *address = u64_to_user_ptr(extension);
struct drm_xe_ext_set_property ext;
@@ -388,21 +381,20 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe,
if (!exec_queue_set_property_funcs[idx])
return -EINVAL;
- return exec_queue_set_property_funcs[idx](xe, q, ext.value, create);
+ return exec_queue_set_property_funcs[idx](xe, q, ext.value);
}
typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
struct xe_exec_queue *q,
- u64 extension,
- bool create);
+ u64 extension);
-static const xe_exec_queue_set_property_fn exec_queue_user_extension_funcs[] = {
+static const xe_exec_queue_user_extension_fn exec_queue_user_extension_funcs[] = {
[DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
};
#define MAX_USER_EXTENSIONS 16
static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
- u64 extensions, int ext_number, bool create)
+ u64 extensions, int ext_number)
{
u64 __user *address = u64_to_user_ptr(extensions);
struct drm_xe_user_extension ext;
@@ -423,13 +415,13 @@ static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue
idx = array_index_nospec(ext.name,
ARRAY_SIZE(exec_queue_user_extension_funcs));
- err = exec_queue_user_extension_funcs[idx](xe, q, extensions, create);
+ err = exec_queue_user_extension_funcs[idx](xe, q, extensions);
if (XE_IOCTL_DBG(xe, err))
return err;
if (ext.next_extension)
return exec_queue_user_extensions(xe, q, ext.next_extension,
- ++ext_number, create);
+ ++ext_number);
return 0;
}
@@ -448,7 +440,7 @@ find_hw_engine(struct xe_device *xe,
{
u32 idx;
- if (eci.engine_class > ARRAY_SIZE(user_to_xe_engine_class))
+ if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
return NULL;
if (eci.gt_id >= xe->info.gt_count)
@@ -597,7 +589,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
/* The migration vm doesn't hold rpm ref */
- xe_device_mem_access_get(xe);
+ xe_pm_runtime_get_noresume(xe);
flags = EXEC_QUEUE_FLAG_VM | (id ? EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD : 0);
@@ -606,7 +598,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
args->width, hwe, flags,
args->extensions);
- xe_device_mem_access_put(xe); /* now held by engine */
+ xe_pm_runtime_put(xe); /* now held by engine */
xe_vm_put(migrate_vm);
if (IS_ERR(new)) {
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 62b3d9d1d7cd..ee78d497d838 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -76,14 +76,12 @@ struct xe_exec_queue {
#define EXEC_QUEUE_FLAG_KERNEL BIT(1)
/* kernel engine only destroyed at driver unload */
#define EXEC_QUEUE_FLAG_PERMANENT BIT(2)
-/* queue keeps running pending jobs after destroy ioctl */
-#define EXEC_QUEUE_FLAG_PERSISTENT BIT(3)
/* for VM jobs. Caller needs to hold rpm ref when creating queue with this flag */
-#define EXEC_QUEUE_FLAG_VM BIT(4)
+#define EXEC_QUEUE_FLAG_VM BIT(3)
/* child of VM queue for multi-tile VM jobs */
-#define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(5)
+#define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(4)
/* kernel exec_queue only, set priority to highest level */
-#define EXEC_QUEUE_FLAG_HIGH_PRIORITY BIT(6)
+#define EXEC_QUEUE_FLAG_HIGH_PRIORITY BIT(5)
/**
* @flags: flags for this exec queue, should statically setup aside from ban
@@ -148,6 +146,11 @@ struct xe_exec_queue {
const struct xe_ring_ops *ring_ops;
/** @entity: DRM sched entity for this exec queue (1 to 1 relationship) */
struct drm_sched_entity *entity;
+ /**
+ * @tlb_flush_seqno: The seqno of the last rebind tlb flush performed
+ * Protected by @vm's resv. Unused if @vm == NULL.
+ */
+ u64 tlb_flush_seqno;
/** @lrc: logical ring context for this exec queue */
struct xe_lrc lrc[];
};
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index ab96edb058d6..0d541f55b4fc 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -5,12 +5,14 @@
#include "xe_ggtt.h"
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/sizes.h>
#include <drm/drm_managed.h>
#include <drm/i915_drm.h>
#include "regs/xe_gt_regs.h"
+#include "regs/xe_gtt_defs.h"
#include "regs/xe_regs.h"
#include "xe_assert.h"
#include "xe_bo.h"
@@ -19,16 +21,10 @@
#include "xe_gt_printk.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_map.h"
-#include "xe_mmio.h"
+#include "xe_pm.h"
#include "xe_sriov.h"
#include "xe_wopcm.h"
-#define XELPG_GGTT_PTE_PAT0 BIT_ULL(52)
-#define XELPG_GGTT_PTE_PAT1 BIT_ULL(53)
-
-/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
-#define GUC_GGTT_TOP 0xFEE00000
-
static u64 xelp_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
u16 pat_index)
{
@@ -200,20 +196,20 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
return drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt);
}
+static void xe_ggtt_invalidate(struct xe_ggtt *ggtt);
+
static void xe_ggtt_initial_clear(struct xe_ggtt *ggtt)
{
struct drm_mm_node *hole;
u64 start, end;
/* Display may have allocated inside ggtt, so be careful with clearing here */
- xe_device_mem_access_get(tile_to_xe(ggtt->tile));
mutex_lock(&ggtt->lock);
drm_mm_for_each_hole(hole, &ggtt->mm, start, end)
xe_ggtt_clear(ggtt, start, end - start);
xe_ggtt_invalidate(ggtt);
mutex_unlock(&ggtt->lock);
- xe_device_mem_access_put(tile_to_xe(ggtt->tile));
}
int xe_ggtt_init(struct xe_ggtt *ggtt)
@@ -227,11 +223,11 @@ int xe_ggtt_init(struct xe_ggtt *ggtt)
* scratch entires, rather keep the scratch page in system memory on
* platforms where 64K pages are needed for VRAM.
*/
- flags = XE_BO_CREATE_PINNED_BIT;
+ flags = XE_BO_FLAG_PINNED;
if (ggtt->flags & XE_GGTT_FLAGS_64K)
- flags |= XE_BO_CREATE_SYSTEM_BIT;
+ flags |= XE_BO_FLAG_SYSTEM;
else
- flags |= XE_BO_CREATE_VRAM_IF_DGFX(ggtt->tile);
+ flags |= XE_BO_FLAG_VRAM_IF_DGFX(ggtt->tile);
ggtt->scratch = xe_managed_bo_create_pin_map(xe, ggtt->tile, XE_PAGE_SIZE, flags);
if (IS_ERR(ggtt->scratch)) {
@@ -249,51 +245,19 @@ err:
return err;
}
-#define GUC_TLB_INV_CR XE_REG(0xcee8)
-#define GUC_TLB_INV_CR_INVALIDATE REG_BIT(0)
-#define PVC_GUC_TLB_INV_DESC0 XE_REG(0xcf7c)
-#define PVC_GUC_TLB_INV_DESC0_VALID REG_BIT(0)
-#define PVC_GUC_TLB_INV_DESC1 XE_REG(0xcf80)
-#define PVC_GUC_TLB_INV_DESC1_INVALIDATE REG_BIT(6)
-
static void ggtt_invalidate_gt_tlb(struct xe_gt *gt)
{
+ int err;
+
if (!gt)
return;
- /*
- * Invalidation can happen when there's no in-flight work keeping the
- * GT awake. We need to explicitly grab forcewake to ensure the GT
- * and GuC are accessible.
- */
- xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
-
- /* TODO: vfunc for GuC vs. non-GuC */
-
- if (gt->uc.guc.submission_state.enabled) {
- int seqno;
-
- seqno = xe_gt_tlb_invalidation_guc(gt);
- xe_gt_assert(gt, seqno > 0);
- if (seqno > 0)
- xe_gt_tlb_invalidation_wait(gt, seqno);
- } else if (xe_device_uc_enabled(gt_to_xe(gt))) {
- struct xe_device *xe = gt_to_xe(gt);
-
- if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
- xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC1,
- PVC_GUC_TLB_INV_DESC1_INVALIDATE);
- xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC0,
- PVC_GUC_TLB_INV_DESC0_VALID);
- } else
- xe_mmio_write32(gt, GUC_TLB_INV_CR,
- GUC_TLB_INV_CR_INVALIDATE);
- }
-
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ err = xe_gt_tlb_invalidation_ggtt(gt);
+ if (err)
+ drm_warn(&gt_to_xe(gt)->drm, "xe_gt_tlb_invalidation_ggtt error=%d", err);
}
-void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
+static void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
{
/* Each GT in a tile has its own TLB to cache GGTT lookups */
ggtt_invalidate_gt_tlb(ggtt->tile->primary_gt);
@@ -410,7 +374,7 @@ int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
{
- u16 cache_mode = bo->flags & XE_BO_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
+ u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
u64 start = bo->ggtt_node.start;
u64 offset, pte;
@@ -419,8 +383,6 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
pte = ggtt->pt_ops->pte_encode_bo(bo, offset, pat_index);
xe_ggtt_set_pte(ggtt, start + offset, pte);
}
-
- xe_ggtt_invalidate(ggtt);
}
static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
@@ -442,14 +404,17 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
if (err)
return err;
- xe_device_mem_access_get(tile_to_xe(ggtt->tile));
+ xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
mutex_lock(&ggtt->lock);
err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node, bo->size,
alignment, 0, start, end, 0);
if (!err)
xe_ggtt_map_bo(ggtt, bo);
mutex_unlock(&ggtt->lock);
- xe_device_mem_access_put(tile_to_xe(ggtt->tile));
+
+ if (!err && bo->flags & XE_BO_FLAG_GGTT_INVALIDATE)
+ xe_ggtt_invalidate(ggtt);
+ xe_pm_runtime_put(tile_to_xe(ggtt->tile));
return err;
}
@@ -465,19 +430,21 @@ int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
return __xe_ggtt_insert_bo_at(ggtt, bo, 0, U64_MAX);
}
-void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node)
+void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
+ bool invalidate)
{
- xe_device_mem_access_get(tile_to_xe(ggtt->tile));
- mutex_lock(&ggtt->lock);
+ xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
+ mutex_lock(&ggtt->lock);
xe_ggtt_clear(ggtt, node->start, node->size);
drm_mm_remove_node(node);
node->size = 0;
+ mutex_unlock(&ggtt->lock);
- xe_ggtt_invalidate(ggtt);
+ if (invalidate)
+ xe_ggtt_invalidate(ggtt);
- mutex_unlock(&ggtt->lock);
- xe_device_mem_access_put(tile_to_xe(ggtt->tile));
+ xe_pm_runtime_put(tile_to_xe(ggtt->tile));
}
void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
@@ -488,8 +455,53 @@ void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
/* This BO is not currently in the GGTT */
xe_tile_assert(ggtt->tile, bo->ggtt_node.size == bo->size);
- xe_ggtt_remove_node(ggtt, &bo->ggtt_node);
+ xe_ggtt_remove_node(ggtt, &bo->ggtt_node,
+ bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
+}
+
+#ifdef CONFIG_PCI_IOV
+static u64 xe_encode_vfid_pte(u16 vfid)
+{
+ return FIELD_PREP(GGTT_PTE_VFID, vfid) | XE_PAGE_PRESENT;
+}
+
+static void xe_ggtt_assign_locked(struct xe_ggtt *ggtt, const struct drm_mm_node *node, u16 vfid)
+{
+ u64 start = node->start;
+ u64 size = node->size;
+ u64 end = start + size - 1;
+ u64 pte = xe_encode_vfid_pte(vfid);
+
+ lockdep_assert_held(&ggtt->lock);
+
+ if (!drm_mm_node_allocated(node))
+ return;
+
+ while (start < end) {
+ xe_ggtt_set_pte(ggtt, start, pte);
+ start += XE_PAGE_SIZE;
+ }
+
+ xe_ggtt_invalidate(ggtt);
+}
+
+/**
+ * xe_ggtt_assign - assign a GGTT region to the VF
+ * @ggtt: the &xe_ggtt where the node belongs
+ * @node: the &drm_mm_node to update
+ * @vfid: the VF identifier
+ *
+ * This function is used by the PF driver to assign a GGTT region to the VF.
+ * In addition to PTE's VFID bits 11:2 also PRESENT bit 0 is set as on some
+ * platforms VFs can't modify that either.
+ */
+void xe_ggtt_assign(struct xe_ggtt *ggtt, const struct drm_mm_node *node, u16 vfid)
+{
+ mutex_lock(&ggtt->lock);
+ xe_ggtt_assign_locked(ggtt, node, vfid);
+ mutex_unlock(&ggtt->lock);
}
+#endif
int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p)
{
diff --git a/drivers/gpu/drm/xe/xe_ggtt.h b/drivers/gpu/drm/xe/xe_ggtt.h
index 42705e1338e1..4a41a1762358 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.h
+++ b/drivers/gpu/drm/xe/xe_ggtt.h
@@ -11,7 +11,6 @@
struct drm_printer;
void xe_ggtt_set_pte(struct xe_ggtt *ggtt, u64 addr, u64 pte);
-void xe_ggtt_invalidate(struct xe_ggtt *ggtt);
int xe_ggtt_init_early(struct xe_ggtt *ggtt);
int xe_ggtt_init(struct xe_ggtt *ggtt);
void xe_ggtt_printk(struct xe_ggtt *ggtt, const char *prefix);
@@ -24,7 +23,8 @@ int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
int xe_ggtt_insert_special_node_locked(struct xe_ggtt *ggtt,
struct drm_mm_node *node,
u32 size, u32 align, u32 mm_flags);
-void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node);
+void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
+ bool invalidate);
void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
@@ -33,4 +33,8 @@ void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p);
+#ifdef CONFIG_PCI_IOV
+void xe_ggtt_assign(struct xe_ggtt *ggtt, const struct drm_mm_node *node, u16 vfid);
+#endif
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
index a61994292c43..60202b903687 100644
--- a/drivers/gpu/drm/xe/xe_gsc.c
+++ b/drivers/gpu/drm/xe/xe_gsc.c
@@ -17,15 +17,18 @@
#include "xe_gsc_proxy.h"
#include "xe_gsc_submit.h"
#include "xe_gt.h"
+#include "xe_gt_mcr.h"
#include "xe_gt_printk.h"
#include "xe_huc.h"
#include "xe_map.h"
#include "xe_mmio.h"
+#include "xe_pm.h"
#include "xe_sched_job.h"
#include "xe_uc_fw.h"
#include "xe_wa.h"
#include "instructions/xe_gsc_commands.h"
#include "regs/xe_gsc_regs.h"
+#include "regs/xe_gt_regs.h"
static struct xe_gt *
gsc_to_gt(struct xe_gsc *gsc)
@@ -127,8 +130,8 @@ static int query_compatibility_version(struct xe_gsc *gsc)
bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_VER_PKT_SZ * 2,
ttm_bo_type_kernel,
- XE_BO_CREATE_SYSTEM_BIT |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT);
if (IS_ERR(bo)) {
xe_gt_err(gt, "failed to allocate bo for GSC version query\n");
return PTR_ERR(bo);
@@ -250,9 +253,30 @@ static int gsc_upload(struct xe_gsc *gsc)
static int gsc_upload_and_init(struct xe_gsc *gsc)
{
struct xe_gt *gt = gsc_to_gt(gsc);
+ struct xe_tile *tile = gt_to_tile(gt);
int ret;
+ if (XE_WA(gt, 14018094691)) {
+ ret = xe_force_wake_get(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL);
+
+ /*
+ * If the forcewake fails we want to keep going, because the worst
+ * case outcome in failing to apply the WA is that PXP won't work,
+ * which is not fatal. We still throw a warning so the issue is
+ * seen if it happens.
+ */
+ xe_gt_WARN_ON(tile->primary_gt, ret);
+
+ xe_gt_mcr_multicast_write(tile->primary_gt,
+ EU_SYSTOLIC_LIC_THROTTLE_CTL_WITH_LOCK,
+ EU_SYSTOLIC_LIC_THROTTLE_CTL_LOCK_BIT);
+ }
+
ret = gsc_upload(gsc);
+
+ if (XE_WA(gt, 14018094691))
+ xe_force_wake_put(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL);
+
if (ret)
return ret;
@@ -272,6 +296,44 @@ static int gsc_upload_and_init(struct xe_gsc *gsc)
return 0;
}
+static int gsc_er_complete(struct xe_gt *gt)
+{
+ u32 er_status;
+
+ if (!gsc_fw_is_loaded(gt))
+ return 0;
+
+ /*
+ * Starting on Xe2, the GSCCS engine reset is a 2-step process. When the
+ * driver or the GuC hit the GDRST register, the CS is immediately reset
+ * and a success is reported, but the GSC shim keeps resetting in the
+ * background. While the shim reset is ongoing, the CS is able to accept
+ * new context submission, but any commands that require the shim will
+ * be stalled until the reset is completed. This means that we can keep
+ * submitting to the GSCCS as long as we make sure that the preemption
+ * timeout is big enough to cover any delay introduced by the reset.
+ * When the shim reset completes, a specific CS interrupt is triggered,
+ * in response to which we need to check the GSCI_TIMER_STATUS register
+ * to see if the reset was successful or not.
+ * Note that the GSCI_TIMER_STATUS register is not power save/restored,
+ * so it gets reset on MC6 entry. However, a reset failure stops MC6,
+ * so in that scenario we're always guaranteed to find the correct
+ * value.
+ */
+ er_status = xe_mmio_read32(gt, GSCI_TIMER_STATUS) & GSCI_TIMER_STATUS_VALUE;
+
+ if (er_status == GSCI_TIMER_STATUS_TIMER_EXPIRED) {
+ /*
+ * XXX: we should trigger an FLR here, but we don't have support
+ * for that yet.
+ */
+ xe_gt_err(gt, "GSC ER timed out!\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
static void gsc_work(struct work_struct *work)
{
struct xe_gsc *gsc = container_of(work, typeof(*gsc), work);
@@ -285,8 +347,14 @@ static void gsc_work(struct work_struct *work)
gsc->work_actions = 0;
spin_unlock_irq(&gsc->lock);
- xe_device_mem_access_get(xe);
- xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
+ xe_pm_runtime_get(xe);
+ xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC));
+
+ if (actions & GSC_ACTION_ER_COMPLETE) {
+ ret = gsc_er_complete(gt);
+ if (ret)
+ goto out;
+ }
if (actions & GSC_ACTION_FW_LOAD) {
ret = gsc_upload_and_init(gsc);
@@ -299,8 +367,26 @@ static void gsc_work(struct work_struct *work)
if (actions & GSC_ACTION_SW_PROXY)
xe_gsc_proxy_request_handler(gsc);
+out:
xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
+}
+
+void xe_gsc_hwe_irq_handler(struct xe_hw_engine *hwe, u16 intr_vec)
+{
+ struct xe_gt *gt = hwe->gt;
+ struct xe_gsc *gsc = &gt->uc.gsc;
+
+ if (unlikely(!intr_vec))
+ return;
+
+ if (intr_vec & GSC_ER_COMPLETE) {
+ spin_lock(&gsc->lock);
+ gsc->work_actions |= GSC_ACTION_ER_COMPLETE;
+ spin_unlock(&gsc->lock);
+
+ queue_work(gsc->wq, &gsc->work);
+ }
}
int xe_gsc_init(struct xe_gsc *gsc)
@@ -382,8 +468,8 @@ int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc)
bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4M,
ttm_bo_type_kernel,
- XE_BO_CREATE_STOLEN_BIT |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_STOLEN |
+ XE_BO_FLAG_GGTT);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_gsc.h b/drivers/gpu/drm/xe/xe_gsc.h
index c6fb32e3fd79..dd16e9b8b894 100644
--- a/drivers/gpu/drm/xe/xe_gsc.h
+++ b/drivers/gpu/drm/xe/xe_gsc.h
@@ -9,12 +9,14 @@
#include "xe_gsc_types.h"
struct xe_gt;
+struct xe_hw_engine;
int xe_gsc_init(struct xe_gsc *gsc);
int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc);
void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc);
void xe_gsc_load_start(struct xe_gsc *gsc);
void xe_gsc_remove(struct xe_gsc *gsc);
+void xe_gsc_hwe_irq_handler(struct xe_hw_engine *hwe, u16 intr_vec);
void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep);
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c
index 309ef80e3b95..1b908d238bd1 100644
--- a/drivers/gpu/drm/xe/xe_gsc_proxy.c
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c
@@ -66,7 +66,7 @@ static inline struct xe_device *kdev_to_xe(struct device *kdev)
return dev_get_drvdata(kdev);
}
-static bool gsc_proxy_init_done(struct xe_gsc *gsc)
+bool xe_gsc_proxy_init_done(struct xe_gsc *gsc)
{
struct xe_gt *gt = gsc_to_gt(gsc);
u32 fwsts1 = xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE));
@@ -403,7 +403,6 @@ static int proxy_channel_alloc(struct xe_gsc *gsc)
struct xe_device *xe = gt_to_xe(gt);
struct xe_bo *bo;
void *csme;
- int err;
csme = kzalloc(GSC_PROXY_CHANNEL_SIZE, GFP_KERNEL);
if (!csme)
@@ -411,8 +410,8 @@ static int proxy_channel_alloc(struct xe_gsc *gsc)
bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_PROXY_CHANNEL_SIZE,
ttm_bo_type_kernel,
- XE_BO_CREATE_SYSTEM_BIT |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT);
if (IS_ERR(bo)) {
kfree(csme);
return PTR_ERR(bo);
@@ -424,11 +423,7 @@ static int proxy_channel_alloc(struct xe_gsc *gsc)
gsc->proxy.to_csme = csme;
gsc->proxy.from_csme = csme + GSC_PROXY_BUFFER_SIZE;
- err = drmm_add_action_or_reset(&xe->drm, proxy_channel_free, gsc);
- if (err)
- return err;
-
- return 0;
+ return drmm_add_action_or_reset(&xe->drm, proxy_channel_free, gsc);
}
/**
@@ -528,7 +523,7 @@ int xe_gsc_proxy_start(struct xe_gsc *gsc)
if (err)
return err;
- if (!gsc_proxy_init_done(gsc)) {
+ if (!xe_gsc_proxy_init_done(gsc)) {
xe_gt_err(gsc_to_gt(gsc), "GSC FW reports proxy init not completed\n");
return -EIO;
}
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.h b/drivers/gpu/drm/xe/xe_gsc_proxy.h
index 908f9441f093..c511ade6b863 100644
--- a/drivers/gpu/drm/xe/xe_gsc_proxy.h
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.h
@@ -11,6 +11,7 @@
struct xe_gsc;
int xe_gsc_proxy_init(struct xe_gsc *gsc);
+bool xe_gsc_proxy_init_done(struct xe_gsc *gsc);
void xe_gsc_proxy_remove(struct xe_gsc *gsc);
int xe_gsc_proxy_start(struct xe_gsc *gsc);
diff --git a/drivers/gpu/drm/xe/xe_gsc_submit.c b/drivers/gpu/drm/xe/xe_gsc_submit.c
index 348994b271be..d34d03248843 100644
--- a/drivers/gpu/drm/xe/xe_gsc_submit.c
+++ b/drivers/gpu/drm/xe/xe_gsc_submit.c
@@ -41,6 +41,21 @@ gsc_to_gt(struct xe_gsc *gsc)
}
/**
+ * xe_gsc_create_host_session_id - Creates a random 64 bit host_session id with
+ * bits 56-63 masked.
+ *
+ * Returns: random host_session_id which can be used to send messages to gsc cs
+ */
+u64 xe_gsc_create_host_session_id(void)
+{
+ u64 host_session_id;
+
+ get_random_bytes(&host_session_id, sizeof(u64));
+ host_session_id &= ~HOST_SESSION_CLIENT_MASK;
+ return host_session_id;
+}
+
+/**
* xe_gsc_emit_header - write the MTL GSC header in memory
* @xe: the Xe device
* @map: the iosys map to write to
diff --git a/drivers/gpu/drm/xe/xe_gsc_submit.h b/drivers/gpu/drm/xe/xe_gsc_submit.h
index 1939855031a6..1416b5745a4c 100644
--- a/drivers/gpu/drm/xe/xe_gsc_submit.h
+++ b/drivers/gpu/drm/xe/xe_gsc_submit.h
@@ -28,4 +28,5 @@ int xe_gsc_read_out_header(struct xe_device *xe,
int xe_gsc_pkt_submit_kernel(struct xe_gsc *gsc, u64 addr_in, u32 size_in,
u64 addr_out, u32 size_out);
+u64 xe_gsc_create_host_session_id(void);
#endif
diff --git a/drivers/gpu/drm/xe/xe_gsc_types.h b/drivers/gpu/drm/xe/xe_gsc_types.h
index 138d8cc0f19c..5926de20214c 100644
--- a/drivers/gpu/drm/xe/xe_gsc_types.h
+++ b/drivers/gpu/drm/xe/xe_gsc_types.h
@@ -47,6 +47,7 @@ struct xe_gsc {
u32 work_actions;
#define GSC_ACTION_FW_LOAD BIT(0)
#define GSC_ACTION_SW_PROXY BIT(1)
+#define GSC_ACTION_ER_COMPLETE BIT(2)
/** @proxy: sub-structure containing the SW proxy-related variables */
struct {
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index a0afe1ba6dd5..491d0413de15 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -29,6 +29,7 @@
#include "xe_gt_mcr.h"
#include "xe_gt_pagefault.h"
#include "xe_gt_printk.h"
+#include "xe_gt_sriov_pf.h"
#include "xe_gt_sysfs.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_gt_topology.h"
@@ -43,6 +44,7 @@
#include "xe_migrate.h"
#include "xe_mmio.h"
#include "xe_pat.h"
+#include "xe_pm.h"
#include "xe_mocs.h"
#include "xe_reg_sr.h"
#include "xe_ring_ops.h"
@@ -310,6 +312,12 @@ int xe_gt_init_early(struct xe_gt *gt)
{
int err;
+ if (IS_SRIOV_PF(gt_to_xe(gt))) {
+ err = xe_gt_sriov_pf_init_early(gt);
+ if (err)
+ return err;
+ }
+
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (err)
return err;
@@ -346,7 +354,6 @@ static int gt_fw_domain_init(struct xe_gt *gt)
{
int err, i;
- xe_device_mem_access_get(gt_to_xe(gt));
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (err)
goto err_hw_fence_irq;
@@ -359,7 +366,9 @@ static int gt_fw_domain_init(struct xe_gt *gt)
xe_lmtt_init(&gt_to_tile(gt)->sriov.pf.lmtt);
}
- xe_gt_idle_sysfs_init(&gt->gtidle);
+ err = xe_gt_idle_sysfs_init(&gt->gtidle);
+ if (err)
+ goto err_force_wake;
/* Enable per hw engine IRQs */
xe_irq_enable_hwe(gt);
@@ -373,12 +382,12 @@ static int gt_fw_domain_init(struct xe_gt *gt)
err = xe_hw_engine_class_sysfs_init(gt);
if (err)
- drm_warn(&gt_to_xe(gt)->drm,
- "failed to register engines sysfs directory, err: %d\n",
- err);
+ goto err_force_wake;
/* Initialize CCS mode sysfs after early initialization of HW engines */
- xe_gt_ccs_mode_sysfs_init(gt);
+ err = xe_gt_ccs_mode_sysfs_init(gt);
+ if (err)
+ goto err_force_wake;
/*
* Stash hardware-reported version. Since this register does not exist
@@ -388,7 +397,6 @@ static int gt_fw_domain_init(struct xe_gt *gt)
err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
XE_WARN_ON(err);
- xe_device_mem_access_put(gt_to_xe(gt));
return 0;
@@ -398,7 +406,6 @@ err_force_wake:
err_hw_fence_irq:
for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
xe_hw_fence_irq_finish(&gt->fence_irq[i]);
- xe_device_mem_access_put(gt_to_xe(gt));
return err;
}
@@ -407,7 +414,6 @@ static int all_fw_domain_init(struct xe_gt *gt)
{
int err, i;
- xe_device_mem_access_get(gt_to_xe(gt));
err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (err)
goto err_hw_fence_irq;
@@ -473,7 +479,6 @@ static int all_fw_domain_init(struct xe_gt *gt)
err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
XE_WARN_ON(err);
- xe_device_mem_access_put(gt_to_xe(gt));
return 0;
@@ -482,7 +487,6 @@ err_force_wake:
err_hw_fence_irq:
for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
xe_hw_fence_irq_finish(&gt->fence_irq[i]);
- xe_device_mem_access_put(gt_to_xe(gt));
return err;
}
@@ -495,7 +499,6 @@ int xe_gt_init_hwconfig(struct xe_gt *gt)
{
int err;
- xe_device_mem_access_get(gt_to_xe(gt));
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (err)
goto out;
@@ -518,8 +521,6 @@ int xe_gt_init_hwconfig(struct xe_gt *gt)
out_fw:
xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
out:
- xe_device_mem_access_put(gt_to_xe(gt));
-
return err;
}
@@ -545,13 +546,17 @@ int xe_gt_init(struct xe_gt *gt)
xe_mocs_init_early(gt);
- xe_gt_sysfs_init(gt);
+ err = xe_gt_sysfs_init(gt);
+ if (err)
+ return err;
err = gt_fw_domain_init(gt);
if (err)
return err;
- xe_gt_freq_init(gt);
+ err = xe_gt_freq_init(gt);
+ if (err)
+ return err;
xe_force_wake_init_engines(gt, gt_to_fw(gt));
@@ -559,11 +564,7 @@ int xe_gt_init(struct xe_gt *gt)
if (err)
return err;
- err = drmm_add_action_or_reset(&gt_to_xe(gt)->drm, gt_fini, gt);
- if (err)
- return err;
-
- return 0;
+ return drmm_add_action_or_reset(&gt_to_xe(gt)->drm, gt_fini, gt);
}
static int do_gt_reset(struct xe_gt *gt)
@@ -643,9 +644,9 @@ static int gt_reset(struct xe_gt *gt)
goto err_fail;
}
+ xe_pm_runtime_get(gt_to_xe(gt));
xe_gt_sanitize(gt);
- xe_device_mem_access_get(gt_to_xe(gt));
err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (err)
goto err_msg;
@@ -669,8 +670,8 @@ static int gt_reset(struct xe_gt *gt)
goto err_out;
err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- xe_device_mem_access_put(gt_to_xe(gt));
XE_WARN_ON(err);
+ xe_pm_runtime_put(gt_to_xe(gt));
xe_gt_info(gt, "reset done\n");
@@ -680,7 +681,7 @@ err_out:
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
err_msg:
XE_WARN_ON(xe_uc_start(&gt->uc));
- xe_device_mem_access_put(gt_to_xe(gt));
+ xe_pm_runtime_put(gt_to_xe(gt));
err_fail:
xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
@@ -710,22 +711,20 @@ void xe_gt_reset_async(struct xe_gt *gt)
void xe_gt_suspend_prepare(struct xe_gt *gt)
{
- xe_device_mem_access_get(gt_to_xe(gt));
XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
xe_uc_stop_prepare(&gt->uc);
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
- xe_device_mem_access_put(gt_to_xe(gt));
}
int xe_gt_suspend(struct xe_gt *gt)
{
int err;
+ xe_gt_dbg(gt, "suspending\n");
xe_gt_sanitize(gt);
- xe_device_mem_access_get(gt_to_xe(gt));
err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (err)
goto err_msg;
@@ -735,15 +734,13 @@ int xe_gt_suspend(struct xe_gt *gt)
goto err_force_wake;
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
- xe_device_mem_access_put(gt_to_xe(gt));
- xe_gt_info(gt, "suspended\n");
+ xe_gt_dbg(gt, "suspended\n");
return 0;
err_force_wake:
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
err_msg:
- xe_device_mem_access_put(gt_to_xe(gt));
xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
return err;
@@ -753,7 +750,7 @@ int xe_gt_resume(struct xe_gt *gt)
{
int err;
- xe_device_mem_access_get(gt_to_xe(gt));
+ xe_gt_dbg(gt, "resuming\n");
err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (err)
goto err_msg;
@@ -763,15 +760,13 @@ int xe_gt_resume(struct xe_gt *gt)
goto err_force_wake;
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
- xe_device_mem_access_put(gt_to_xe(gt));
- xe_gt_info(gt, "resumed\n");
+ xe_gt_dbg(gt, "resumed\n");
return 0;
err_force_wake:
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
err_msg:
- xe_device_mem_access_put(gt_to_xe(gt));
xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err));
return err;
diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
index 529fc286cd06..396aeb5b9924 100644
--- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
+++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
@@ -167,25 +167,20 @@ static void xe_gt_ccs_mode_sysfs_fini(struct drm_device *drm, void *arg)
* and it is expected that there are no open drm clients while doing so.
* The number of available compute slices is exposed to user through a per-gt
* 'num_cslices' sysfs interface.
+ *
+ * Returns: Returns error value for failure and 0 for success.
*/
-void xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt)
+int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
int err;
if (!xe_gt_ccs_mode_enabled(gt))
- return;
+ return 0;
err = sysfs_create_files(gt->sysfs, gt_ccs_mode_attrs);
- if (err) {
- drm_warn(&xe->drm, "Sysfs creation for ccs_mode failed err: %d\n", err);
- return;
- }
+ if (err)
+ return err;
- err = drmm_add_action_or_reset(&xe->drm, xe_gt_ccs_mode_sysfs_fini, gt);
- if (err) {
- sysfs_remove_files(gt->sysfs, gt_ccs_mode_attrs);
- drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
- __func__, err);
- }
+ return drmm_add_action_or_reset(&xe->drm, xe_gt_ccs_mode_sysfs_fini, gt);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.h b/drivers/gpu/drm/xe/xe_gt_ccs_mode.h
index f39975aaaab0..f8779852cf0d 100644
--- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.h
+++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.h
@@ -12,7 +12,7 @@
#include "xe_platform_types.h"
void xe_gt_apply_ccs_mode(struct xe_gt *gt);
-void xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt);
+int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt);
static inline bool xe_gt_ccs_mode_enabled(const struct xe_gt *gt)
{
diff --git a/drivers/gpu/drm/xe/xe_gt_clock.c b/drivers/gpu/drm/xe/xe_gt_clock.c
index 937054e31d72..c7bca20f6b65 100644
--- a/drivers/gpu/drm/xe/xe_gt_clock.c
+++ b/drivers/gpu/drm/xe/xe_gt_clock.c
@@ -78,8 +78,3 @@ int xe_gt_clock_init(struct xe_gt *gt)
gt->info.reference_clock = freq;
return 0;
}
-
-u64 xe_gt_clock_cycles_to_ns(const struct xe_gt *gt, u64 count)
-{
- return DIV_ROUND_CLOSEST_ULL(count * NSEC_PER_SEC, gt->info.reference_clock);
-}
diff --git a/drivers/gpu/drm/xe/xe_gt_clock.h b/drivers/gpu/drm/xe/xe_gt_clock.h
index aa162722f859..44fa0371b973 100644
--- a/drivers/gpu/drm/xe/xe_gt_clock.h
+++ b/drivers/gpu/drm/xe/xe_gt_clock.h
@@ -11,5 +11,5 @@
struct xe_gt;
int xe_gt_clock_init(struct xe_gt *gt);
-u64 xe_gt_clock_cycles_to_ns(const struct xe_gt *gt, u64 count);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index c4b67cf09f8f..8cf0b2625efc 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -5,6 +5,8 @@
#include "xe_gt_debugfs.h"
+#include <linux/debugfs.h>
+
#include <drm/drm_debugfs.h>
#include <drm/drm_managed.h>
@@ -18,193 +20,246 @@
#include "xe_lrc.h"
#include "xe_macros.h"
#include "xe_pat.h"
+#include "xe_pm.h"
#include "xe_reg_sr.h"
#include "xe_reg_whitelist.h"
#include "xe_uc_debugfs.h"
#include "xe_wa.h"
-static struct xe_gt *node_to_gt(struct drm_info_node *node)
+/**
+ * xe_gt_debugfs_simple_show - A show callback for struct drm_info_list
+ * @m: the &seq_file
+ * @data: data used by the drm debugfs helpers
+ *
+ * This callback can be used in struct drm_info_list to describe debugfs
+ * files that are &xe_gt specific.
+ *
+ * It is assumed that those debugfs files will be created on directory entry
+ * which struct dentry d_inode->i_private points to &xe_gt.
+ *
+ * This function assumes that &m->private will be set to the &struct
+ * drm_info_node corresponding to the instance of the info on a given &struct
+ * drm_minor (see struct drm_info_list.show for details).
+ *
+ * This function also assumes that struct drm_info_list.data will point to the
+ * function code that will actually print a file content::
+ *
+ * int (*print)(struct xe_gt *, struct drm_printer *)
+ *
+ * Example::
+ *
+ * int foo(struct xe_gt *gt, struct drm_printer *p)
+ * {
+ * drm_printf(p, "GT%u\n", gt->info.id);
+ * return 0;
+ * }
+ *
+ * static const struct drm_info_list bar[] = {
+ * { name = "foo", .show = xe_gt_debugfs_simple_show, .data = foo },
+ * };
+ *
+ * dir = debugfs_create_dir("gt", parent);
+ * dir->d_inode->i_private = gt;
+ * drm_debugfs_create_files(bar, ARRAY_SIZE(bar), dir, minor);
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_debugfs_simple_show(struct seq_file *m, void *data)
{
- return node->info_ent->data;
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_info_node *node = m->private;
+ struct dentry *parent = node->dent->d_parent;
+ struct xe_gt *gt = parent->d_inode->i_private;
+ int (*print)(struct xe_gt *, struct drm_printer *) = node->info_ent->data;
+
+ if (WARN_ON(!print))
+ return -EINVAL;
+
+ return print(gt, &p);
}
-static int hw_engines(struct seq_file *m, void *data)
+static int hw_engines(struct xe_gt *gt, struct drm_printer *p)
{
- struct xe_gt *gt = node_to_gt(m->private);
struct xe_device *xe = gt_to_xe(gt);
- struct drm_printer p = drm_seq_file_printer(m);
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
int err;
- xe_device_mem_access_get(xe);
+ xe_pm_runtime_get(xe);
err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (err) {
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
return err;
}
for_each_hw_engine(hwe, gt, id)
- xe_hw_engine_print(hwe, &p);
+ xe_hw_engine_print(hwe, p);
err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
if (err)
return err;
return 0;
}
-static int force_reset(struct seq_file *m, void *data)
+static int force_reset(struct xe_gt *gt, struct drm_printer *p)
{
- struct xe_gt *gt = node_to_gt(m->private);
-
+ xe_pm_runtime_get(gt_to_xe(gt));
xe_gt_reset_async(gt);
+ xe_pm_runtime_put(gt_to_xe(gt));
return 0;
}
-static int sa_info(struct seq_file *m, void *data)
+static int sa_info(struct xe_gt *gt, struct drm_printer *p)
{
- struct xe_tile *tile = gt_to_tile(node_to_gt(m->private));
- struct drm_printer p = drm_seq_file_printer(m);
+ struct xe_tile *tile = gt_to_tile(gt);
- drm_suballoc_dump_debug_info(&tile->mem.kernel_bb_pool->base, &p,
+ xe_pm_runtime_get(gt_to_xe(gt));
+ drm_suballoc_dump_debug_info(&tile->mem.kernel_bb_pool->base, p,
tile->mem.kernel_bb_pool->gpu_addr);
+ xe_pm_runtime_put(gt_to_xe(gt));
return 0;
}
-static int topology(struct seq_file *m, void *data)
+static int topology(struct xe_gt *gt, struct drm_printer *p)
{
- struct xe_gt *gt = node_to_gt(m->private);
- struct drm_printer p = drm_seq_file_printer(m);
-
- xe_gt_topology_dump(gt, &p);
+ xe_pm_runtime_get(gt_to_xe(gt));
+ xe_gt_topology_dump(gt, p);
+ xe_pm_runtime_put(gt_to_xe(gt));
return 0;
}
-static int steering(struct seq_file *m, void *data)
+static int steering(struct xe_gt *gt, struct drm_printer *p)
{
- struct xe_gt *gt = node_to_gt(m->private);
- struct drm_printer p = drm_seq_file_printer(m);
-
- xe_gt_mcr_steering_dump(gt, &p);
+ xe_pm_runtime_get(gt_to_xe(gt));
+ xe_gt_mcr_steering_dump(gt, p);
+ xe_pm_runtime_put(gt_to_xe(gt));
return 0;
}
-static int ggtt(struct seq_file *m, void *data)
+static int ggtt(struct xe_gt *gt, struct drm_printer *p)
{
- struct xe_gt *gt = node_to_gt(m->private);
- struct drm_printer p = drm_seq_file_printer(m);
+ int ret;
+
+ xe_pm_runtime_get(gt_to_xe(gt));
+ ret = xe_ggtt_dump(gt_to_tile(gt)->mem.ggtt, p);
+ xe_pm_runtime_put(gt_to_xe(gt));
- return xe_ggtt_dump(gt_to_tile(gt)->mem.ggtt, &p);
+ return ret;
}
-static int register_save_restore(struct seq_file *m, void *data)
+static int register_save_restore(struct xe_gt *gt, struct drm_printer *p)
{
- struct xe_gt *gt = node_to_gt(m->private);
- struct drm_printer p = drm_seq_file_printer(m);
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
- xe_reg_sr_dump(&gt->reg_sr, &p);
- drm_printf(&p, "\n");
+ xe_pm_runtime_get(gt_to_xe(gt));
- drm_printf(&p, "Engine\n");
+ xe_reg_sr_dump(&gt->reg_sr, p);
+ drm_printf(p, "\n");
+
+ drm_printf(p, "Engine\n");
for_each_hw_engine(hwe, gt, id)
- xe_reg_sr_dump(&hwe->reg_sr, &p);
- drm_printf(&p, "\n");
+ xe_reg_sr_dump(&hwe->reg_sr, p);
+ drm_printf(p, "\n");
- drm_printf(&p, "LRC\n");
+ drm_printf(p, "LRC\n");
for_each_hw_engine(hwe, gt, id)
- xe_reg_sr_dump(&hwe->reg_lrc, &p);
- drm_printf(&p, "\n");
+ xe_reg_sr_dump(&hwe->reg_lrc, p);
+ drm_printf(p, "\n");
- drm_printf(&p, "Whitelist\n");
+ drm_printf(p, "Whitelist\n");
for_each_hw_engine(hwe, gt, id)
- xe_reg_whitelist_dump(&hwe->reg_whitelist, &p);
+ xe_reg_whitelist_dump(&hwe->reg_whitelist, p);
+
+ xe_pm_runtime_put(gt_to_xe(gt));
return 0;
}
-static int workarounds(struct seq_file *m, void *data)
+static int workarounds(struct xe_gt *gt, struct drm_printer *p)
{
- struct xe_gt *gt = node_to_gt(m->private);
- struct drm_printer p = drm_seq_file_printer(m);
-
- xe_wa_dump(gt, &p);
+ xe_pm_runtime_get(gt_to_xe(gt));
+ xe_wa_dump(gt, p);
+ xe_pm_runtime_put(gt_to_xe(gt));
return 0;
}
-static int pat(struct seq_file *m, void *data)
+static int pat(struct xe_gt *gt, struct drm_printer *p)
{
- struct xe_gt *gt = node_to_gt(m->private);
- struct drm_printer p = drm_seq_file_printer(m);
-
- xe_pat_dump(gt, &p);
+ xe_pm_runtime_get(gt_to_xe(gt));
+ xe_pat_dump(gt, p);
+ xe_pm_runtime_put(gt_to_xe(gt));
return 0;
}
-static int rcs_default_lrc(struct seq_file *m, void *data)
+static int rcs_default_lrc(struct xe_gt *gt, struct drm_printer *p)
{
- struct drm_printer p = drm_seq_file_printer(m);
+ xe_pm_runtime_get(gt_to_xe(gt));
+ xe_lrc_dump_default(p, gt, XE_ENGINE_CLASS_RENDER);
+ xe_pm_runtime_put(gt_to_xe(gt));
- xe_lrc_dump_default(&p, node_to_gt(m->private), XE_ENGINE_CLASS_RENDER);
return 0;
}
-static int ccs_default_lrc(struct seq_file *m, void *data)
+static int ccs_default_lrc(struct xe_gt *gt, struct drm_printer *p)
{
- struct drm_printer p = drm_seq_file_printer(m);
+ xe_pm_runtime_get(gt_to_xe(gt));
+ xe_lrc_dump_default(p, gt, XE_ENGINE_CLASS_COMPUTE);
+ xe_pm_runtime_put(gt_to_xe(gt));
- xe_lrc_dump_default(&p, node_to_gt(m->private), XE_ENGINE_CLASS_COMPUTE);
return 0;
}
-static int bcs_default_lrc(struct seq_file *m, void *data)
+static int bcs_default_lrc(struct xe_gt *gt, struct drm_printer *p)
{
- struct drm_printer p = drm_seq_file_printer(m);
+ xe_pm_runtime_get(gt_to_xe(gt));
+ xe_lrc_dump_default(p, gt, XE_ENGINE_CLASS_COPY);
+ xe_pm_runtime_put(gt_to_xe(gt));
- xe_lrc_dump_default(&p, node_to_gt(m->private), XE_ENGINE_CLASS_COPY);
return 0;
}
-static int vcs_default_lrc(struct seq_file *m, void *data)
+static int vcs_default_lrc(struct xe_gt *gt, struct drm_printer *p)
{
- struct drm_printer p = drm_seq_file_printer(m);
+ xe_pm_runtime_get(gt_to_xe(gt));
+ xe_lrc_dump_default(p, gt, XE_ENGINE_CLASS_VIDEO_DECODE);
+ xe_pm_runtime_put(gt_to_xe(gt));
- xe_lrc_dump_default(&p, node_to_gt(m->private), XE_ENGINE_CLASS_VIDEO_DECODE);
return 0;
}
-static int vecs_default_lrc(struct seq_file *m, void *data)
+static int vecs_default_lrc(struct xe_gt *gt, struct drm_printer *p)
{
- struct drm_printer p = drm_seq_file_printer(m);
+ xe_pm_runtime_get(gt_to_xe(gt));
+ xe_lrc_dump_default(p, gt, XE_ENGINE_CLASS_VIDEO_ENHANCE);
+ xe_pm_runtime_put(gt_to_xe(gt));
- xe_lrc_dump_default(&p, node_to_gt(m->private), XE_ENGINE_CLASS_VIDEO_ENHANCE);
return 0;
}
static const struct drm_info_list debugfs_list[] = {
- {"hw_engines", hw_engines, 0},
- {"force_reset", force_reset, 0},
- {"sa_info", sa_info, 0},
- {"topology", topology, 0},
- {"steering", steering, 0},
- {"ggtt", ggtt, 0},
- {"register-save-restore", register_save_restore, 0},
- {"workarounds", workarounds, 0},
- {"pat", pat, 0},
- {"default_lrc_rcs", rcs_default_lrc},
- {"default_lrc_ccs", ccs_default_lrc},
- {"default_lrc_bcs", bcs_default_lrc},
- {"default_lrc_vcs", vcs_default_lrc},
- {"default_lrc_vecs", vecs_default_lrc},
+ {"hw_engines", .show = xe_gt_debugfs_simple_show, .data = hw_engines},
+ {"force_reset", .show = xe_gt_debugfs_simple_show, .data = force_reset},
+ {"sa_info", .show = xe_gt_debugfs_simple_show, .data = sa_info},
+ {"topology", .show = xe_gt_debugfs_simple_show, .data = topology},
+ {"steering", .show = xe_gt_debugfs_simple_show, .data = steering},
+ {"ggtt", .show = xe_gt_debugfs_simple_show, .data = ggtt},
+ {"register-save-restore", .show = xe_gt_debugfs_simple_show, .data = register_save_restore},
+ {"workarounds", .show = xe_gt_debugfs_simple_show, .data = workarounds},
+ {"pat", .show = xe_gt_debugfs_simple_show, .data = pat},
+ {"default_lrc_rcs", .show = xe_gt_debugfs_simple_show, .data = rcs_default_lrc},
+ {"default_lrc_ccs", .show = xe_gt_debugfs_simple_show, .data = ccs_default_lrc},
+ {"default_lrc_bcs", .show = xe_gt_debugfs_simple_show, .data = bcs_default_lrc},
+ {"default_lrc_vcs", .show = xe_gt_debugfs_simple_show, .data = vcs_default_lrc},
+ {"default_lrc_vecs", .show = xe_gt_debugfs_simple_show, .data = vecs_default_lrc},
};
void xe_gt_debugfs_register(struct xe_gt *gt)
@@ -212,13 +267,11 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
struct xe_device *xe = gt_to_xe(gt);
struct drm_minor *minor = gt_to_xe(gt)->drm.primary;
struct dentry *root;
- struct drm_info_list *local;
char name[8];
- int i;
xe_gt_assert(gt, minor->debugfs_root);
- sprintf(name, "gt%d", gt->info.id);
+ snprintf(name, sizeof(name), "gt%d", gt->info.id);
root = debugfs_create_dir(name, minor->debugfs_root);
if (IS_ERR(root)) {
drm_warn(&xe->drm, "Create GT directory failed");
@@ -226,22 +279,13 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
}
/*
- * Allocate local copy as we need to pass in the GT to the debugfs
- * entry and drm_debugfs_create_files just references the drm_info_list
- * passed in (e.g. can't define this on the stack).
+ * Store the xe_gt pointer as private data of the gt/ directory node
+ * so other GT specific attributes under that directory may refer to
+ * it by looking at its parent node private data.
*/
-#define DEBUGFS_SIZE (ARRAY_SIZE(debugfs_list) * sizeof(struct drm_info_list))
- local = drmm_kmalloc(&xe->drm, DEBUGFS_SIZE, GFP_KERNEL);
- if (!local)
- return;
-
- memcpy(local, debugfs_list, DEBUGFS_SIZE);
-#undef DEBUGFS_SIZE
-
- for (i = 0; i < ARRAY_SIZE(debugfs_list); ++i)
- local[i].data = gt;
+ root->d_inode->i_private = gt;
- drm_debugfs_create_files(local,
+ drm_debugfs_create_files(debugfs_list,
ARRAY_SIZE(debugfs_list),
root, minor);
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.h b/drivers/gpu/drm/xe/xe_gt_debugfs.h
index 5a329f118a57..05a6cc93c78c 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.h
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.h
@@ -6,8 +6,10 @@
#ifndef _XE_GT_DEBUGFS_H_
#define _XE_GT_DEBUGFS_H_
+struct seq_file;
struct xe_gt;
void xe_gt_debugfs_register(struct xe_gt *gt);
+int xe_gt_debugfs_simple_show(struct seq_file *m, void *data);
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c
index e5b0f4ecdbe8..855de40e40ea 100644
--- a/drivers/gpu/drm/xe/xe_gt_freq.c
+++ b/drivers/gpu/drm/xe/xe_gt_freq.c
@@ -15,6 +15,7 @@
#include "xe_gt_sysfs.h"
#include "xe_gt_throttle_sysfs.h"
#include "xe_guc_pc.h"
+#include "xe_pm.h"
/**
* DOC: Xe GT Frequency Management
@@ -49,12 +50,23 @@ dev_to_pc(struct device *dev)
return &kobj_to_gt(dev->kobj.parent)->uc.guc.pc;
}
+static struct xe_device *
+dev_to_xe(struct device *dev)
+{
+ return gt_to_xe(kobj_to_gt(dev->kobj.parent));
+}
+
static ssize_t act_freq_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct xe_guc_pc *pc = dev_to_pc(dev);
+ u32 freq;
- return sysfs_emit(buf, "%d\n", xe_guc_pc_get_act_freq(pc));
+ xe_pm_runtime_get(dev_to_xe(dev));
+ freq = xe_guc_pc_get_act_freq(pc);
+ xe_pm_runtime_put(dev_to_xe(dev));
+
+ return sysfs_emit(buf, "%d\n", freq);
}
static DEVICE_ATTR_RO(act_freq);
@@ -65,7 +77,9 @@ static ssize_t cur_freq_show(struct device *dev,
u32 freq;
ssize_t ret;
+ xe_pm_runtime_get(dev_to_xe(dev));
ret = xe_guc_pc_get_cur_freq(pc, &freq);
+ xe_pm_runtime_put(dev_to_xe(dev));
if (ret)
return ret;
@@ -77,8 +91,13 @@ static ssize_t rp0_freq_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct xe_guc_pc *pc = dev_to_pc(dev);
+ u32 freq;
- return sysfs_emit(buf, "%d\n", xe_guc_pc_get_rp0_freq(pc));
+ xe_pm_runtime_get(dev_to_xe(dev));
+ freq = xe_guc_pc_get_rp0_freq(pc);
+ xe_pm_runtime_put(dev_to_xe(dev));
+
+ return sysfs_emit(buf, "%d\n", freq);
}
static DEVICE_ATTR_RO(rp0_freq);
@@ -86,8 +105,13 @@ static ssize_t rpe_freq_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct xe_guc_pc *pc = dev_to_pc(dev);
+ u32 freq;
+
+ xe_pm_runtime_get(dev_to_xe(dev));
+ freq = xe_guc_pc_get_rpe_freq(pc);
+ xe_pm_runtime_put(dev_to_xe(dev));
- return sysfs_emit(buf, "%d\n", xe_guc_pc_get_rpe_freq(pc));
+ return sysfs_emit(buf, "%d\n", freq);
}
static DEVICE_ATTR_RO(rpe_freq);
@@ -107,7 +131,9 @@ static ssize_t min_freq_show(struct device *dev,
u32 freq;
ssize_t ret;
+ xe_pm_runtime_get(dev_to_xe(dev));
ret = xe_guc_pc_get_min_freq(pc, &freq);
+ xe_pm_runtime_put(dev_to_xe(dev));
if (ret)
return ret;
@@ -125,7 +151,9 @@ static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
+ xe_pm_runtime_get(dev_to_xe(dev));
ret = xe_guc_pc_set_min_freq(pc, freq);
+ xe_pm_runtime_put(dev_to_xe(dev));
if (ret)
return ret;
@@ -140,7 +168,9 @@ static ssize_t max_freq_show(struct device *dev,
u32 freq;
ssize_t ret;
+ xe_pm_runtime_get(dev_to_xe(dev));
ret = xe_guc_pc_get_max_freq(pc, &freq);
+ xe_pm_runtime_put(dev_to_xe(dev));
if (ret)
return ret;
@@ -158,7 +188,9 @@ static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
+ xe_pm_runtime_get(dev_to_xe(dev));
ret = xe_guc_pc_set_max_freq(pc, freq);
+ xe_pm_runtime_put(dev_to_xe(dev));
if (ret)
return ret;
@@ -190,33 +222,28 @@ static void freq_fini(struct drm_device *drm, void *arg)
* @gt: Xe GT object
*
* It needs to be initialized after GT Sysfs and GuC PC components are ready.
+ *
+ * Returns: Returns error value for failure and 0 for success.
*/
-void xe_gt_freq_init(struct xe_gt *gt)
+int xe_gt_freq_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
int err;
if (xe->info.skip_guc_pc)
- return;
+ return 0;
gt->freq = kobject_create_and_add("freq0", gt->sysfs);
- if (!gt->freq) {
- drm_warn(&xe->drm, "failed to add freq0 directory to %s\n",
- kobject_name(gt->sysfs));
- return;
- }
+ if (!gt->freq)
+ return -ENOMEM;
err = drmm_add_action_or_reset(&xe->drm, freq_fini, gt->freq);
- if (err) {
- drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
- __func__, err);
- return;
- }
+ if (err)
+ return err;
err = sysfs_create_files(gt->freq, freq_attrs);
if (err)
- drm_warn(&xe->drm, "failed to add freq attrs to %s, err: %d\n",
- kobject_name(gt->freq), err);
+ return err;
- xe_gt_throttle_sysfs_init(gt);
+ return xe_gt_throttle_sysfs_init(gt);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_freq.h b/drivers/gpu/drm/xe/xe_gt_freq.h
index f3fe3c90491a..b7fddbe7b9b6 100644
--- a/drivers/gpu/drm/xe/xe_gt_freq.h
+++ b/drivers/gpu/drm/xe/xe_gt_freq.h
@@ -8,6 +8,6 @@
struct xe_gt;
-void xe_gt_freq_init(struct xe_gt *gt);
+int xe_gt_freq_init(struct xe_gt *gt);
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index 9fcae65b6469..8fc0f3f6ecc5 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -12,6 +12,7 @@
#include "xe_guc_pc.h"
#include "regs/xe_gt_regs.h"
#include "xe_mmio.h"
+#include "xe_pm.h"
/**
* DOC: Xe GT Idle
@@ -40,6 +41,15 @@ static struct xe_guc_pc *gtidle_to_pc(struct xe_gt_idle *gtidle)
return &gtidle_to_gt(gtidle)->uc.guc.pc;
}
+static struct xe_device *
+pc_to_xe(struct xe_guc_pc *pc)
+{
+ struct xe_guc *guc = container_of(pc, struct xe_guc, pc);
+ struct xe_gt *gt = container_of(guc, struct xe_gt, uc.guc);
+
+ return gt_to_xe(gt);
+}
+
static const char *gt_idle_state_to_string(enum xe_gt_idle_state state)
{
switch (state) {
@@ -86,8 +96,14 @@ static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buff)
{
struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
+ struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
+ ssize_t ret;
+
+ xe_pm_runtime_get(pc_to_xe(pc));
+ ret = sysfs_emit(buff, "%s\n", gtidle->name);
+ xe_pm_runtime_put(pc_to_xe(pc));
- return sysfs_emit(buff, "%s\n", gtidle->name);
+ return ret;
}
static DEVICE_ATTR_RO(name);
@@ -98,7 +114,9 @@ static ssize_t idle_status_show(struct device *dev,
struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
enum xe_gt_idle_state state;
+ xe_pm_runtime_get(pc_to_xe(pc));
state = gtidle->idle_status(pc);
+ xe_pm_runtime_put(pc_to_xe(pc));
return sysfs_emit(buff, "%s\n", gt_idle_state_to_string(state));
}
@@ -111,7 +129,10 @@ static ssize_t idle_residency_ms_show(struct device *dev,
struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
u64 residency;
+ xe_pm_runtime_get(pc_to_xe(pc));
residency = gtidle->idle_residency(pc);
+ xe_pm_runtime_put(pc_to_xe(pc));
+
return sysfs_emit(buff, "%llu\n", get_residency_ms(gtidle, residency));
}
static DEVICE_ATTR_RO(idle_residency_ms);
@@ -131,7 +152,7 @@ static void gt_idle_sysfs_fini(struct drm_device *drm, void *arg)
kobject_put(kobj);
}
-void xe_gt_idle_sysfs_init(struct xe_gt_idle *gtidle)
+int xe_gt_idle_sysfs_init(struct xe_gt_idle *gtidle)
{
struct xe_gt *gt = gtidle_to_gt(gtidle);
struct xe_device *xe = gt_to_xe(gt);
@@ -139,16 +160,14 @@ void xe_gt_idle_sysfs_init(struct xe_gt_idle *gtidle)
int err;
kobj = kobject_create_and_add("gtidle", gt->sysfs);
- if (!kobj) {
- drm_warn(&xe->drm, "%s failed, err: %d\n", __func__, -ENOMEM);
- return;
- }
+ if (!kobj)
+ return -ENOMEM;
if (xe_gt_is_media_type(gt)) {
- sprintf(gtidle->name, "gt%d-mc", gt->info.id);
+ snprintf(gtidle->name, sizeof(gtidle->name), "gt%d-mc", gt->info.id);
gtidle->idle_residency = xe_guc_pc_mc6_residency;
} else {
- sprintf(gtidle->name, "gt%d-rc", gt->info.id);
+ snprintf(gtidle->name, sizeof(gtidle->name), "gt%d-rc", gt->info.id);
gtidle->idle_residency = xe_guc_pc_rc6_residency;
}
@@ -159,14 +178,10 @@ void xe_gt_idle_sysfs_init(struct xe_gt_idle *gtidle)
err = sysfs_create_files(kobj, gt_idle_attrs);
if (err) {
kobject_put(kobj);
- drm_warn(&xe->drm, "failed to register gtidle sysfs, err: %d\n", err);
- return;
+ return err;
}
- err = drmm_add_action_or_reset(&xe->drm, gt_idle_sysfs_fini, kobj);
- if (err)
- drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
- __func__, err);
+ return drmm_add_action_or_reset(&xe->drm, gt_idle_sysfs_fini, kobj);
}
void xe_gt_idle_enable_c6(struct xe_gt *gt)
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.h b/drivers/gpu/drm/xe/xe_gt_idle.h
index 69280fd16b03..75bd99659b1b 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.h
+++ b/drivers/gpu/drm/xe/xe_gt_idle.h
@@ -10,7 +10,7 @@
struct xe_gt;
-void xe_gt_idle_sysfs_init(struct xe_gt_idle *gtidle);
+int xe_gt_idle_sysfs_init(struct xe_gt_idle *gtidle);
void xe_gt_idle_enable_c6(struct xe_gt *gt);
void xe_gt_idle_disable_c6(struct xe_gt *gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index a7ab9ba645f9..577bd7043740 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -6,6 +6,7 @@
#include "xe_gt_mcr.h"
#include "regs/xe_gt_regs.h"
+#include "xe_assert.h"
#include "xe_gt.h"
#include "xe_gt_topology.h"
#include "xe_gt_types.h"
@@ -294,14 +295,40 @@ static void init_steering_mslice(struct xe_gt *gt)
gt->steering[LNCF].instance_target = 0; /* unused */
}
-static void init_steering_dss(struct xe_gt *gt)
+static unsigned int dss_per_group(struct xe_gt *gt)
{
- unsigned int dss = min(xe_dss_mask_group_ffs(gt->fuse_topo.g_dss_mask, 0, 0),
- xe_dss_mask_group_ffs(gt->fuse_topo.c_dss_mask, 0, 0));
- unsigned int dss_per_grp = gt_to_xe(gt)->info.platform == XE_PVC ? 8 : 4;
+ if (gt_to_xe(gt)->info.platform == XE_PVC)
+ return 8;
+ else if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1250)
+ return 4;
+ else
+ return 6;
+}
- gt->steering[DSS].group_target = dss / dss_per_grp;
- gt->steering[DSS].instance_target = dss % dss_per_grp;
+/**
+ * xe_gt_mcr_get_dss_steering - Get the group/instance steering for a DSS
+ * @gt: GT structure
+ * @dss: DSS ID to obtain steering for
+ * @group: pointer to storage for steering group ID
+ * @instance: pointer to storage for steering instance ID
+ */
+void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance)
+{
+ int dss_per_grp = dss_per_group(gt);
+
+ xe_gt_assert(gt, dss < XE_MAX_DSS_FUSE_BITS);
+
+ *group = dss / dss_per_grp;
+ *instance = dss % dss_per_grp;
+}
+
+static void init_steering_dss(struct xe_gt *gt)
+{
+ xe_gt_mcr_get_dss_steering(gt,
+ min(xe_dss_mask_group_ffs(gt->fuse_topo.g_dss_mask, 0, 0),
+ xe_dss_mask_group_ffs(gt->fuse_topo.c_dss_mask, 0, 0)),
+ &gt->steering[DSS].group_target,
+ &gt->steering[DSS].instance_target);
}
static void init_steering_oaddrm(struct xe_gt *gt)
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.h b/drivers/gpu/drm/xe/xe_gt_mcr.h
index 27ca1bc880a0..a7f4ab1aa584 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.h
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.h
@@ -7,6 +7,7 @@
#define _XE_GT_MCR_H_
#include "regs/xe_reg_defs.h"
+#include "xe_gt_topology.h"
struct drm_printer;
struct xe_gt;
@@ -25,5 +26,18 @@ void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr mcr_reg,
u32 value);
void xe_gt_mcr_steering_dump(struct xe_gt *gt, struct drm_printer *p);
+void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance);
+
+/*
+ * Loop over each DSS and determine the group and instance IDs that
+ * should be used to steer MCR accesses toward this DSS.
+ * @dss: DSS ID to obtain steering for
+ * @gt: GT structure
+ * @group: steering group ID, data type: u16
+ * @instance: steering instance ID, data type: u16
+ */
+#define for_each_dss_steering(dss, gt, group, instance) \
+ for_each_dss((dss), (gt)) \
+ for_each_if((xe_gt_mcr_get_dss_steering((gt), (dss), &(group), &(instance)), true))
#endif /* _XE_GT_MCR_H_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 241c294270d9..fa9e9853c53b 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -100,10 +100,9 @@ static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
{
struct xe_bo *bo = xe_vma_bo(vma);
struct xe_vm *vm = xe_vma_vm(vma);
- unsigned int num_shared = 2; /* slots for bind + move */
int err;
- err = xe_vm_prepare_vma(exec, vma, num_shared);
+ err = xe_vm_lock_vma(exec, vma);
if (err)
return err;
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
new file mode 100644
index 000000000000..791dcdd767e2
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "xe_gt_sriov_pf.h"
+#include "xe_gt_sriov_pf_helpers.h"
+
+/*
+ * VF's metadata is maintained in the flexible array where:
+ * - entry [0] contains metadata for the PF (only if applicable),
+ * - entries [1..n] contain metadata for VF1..VFn::
+ *
+ * <--------------------------- 1 + total_vfs ----------->
+ * +-------+-------+-------+-----------------------+-------+
+ * | 0 | 1 | 2 | | n |
+ * +-------+-------+-------+-----------------------+-------+
+ * | PF | VF1 | VF2 | ... ... | VFn |
+ * +-------+-------+-------+-----------------------+-------+
+ */
+static int pf_alloc_metadata(struct xe_gt *gt)
+{
+ unsigned int num_vfs = xe_gt_sriov_pf_get_totalvfs(gt);
+
+ gt->sriov.pf.vfs = drmm_kcalloc(&gt_to_xe(gt)->drm, 1 + num_vfs,
+ sizeof(*gt->sriov.pf.vfs), GFP_KERNEL);
+ if (!gt->sriov.pf.vfs)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_init_early - Prepare SR-IOV PF data structures on PF.
+ * @gt: the &xe_gt to initialize
+ *
+ * Early initialization of the PF data.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
+{
+ int err;
+
+ err = pf_alloc_metadata(gt);
+ if (err)
+ return err;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
new file mode 100644
index 000000000000..05142ffc4319
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_H_
+#define _XE_GT_SRIOV_PF_H_
+
+struct xe_gt;
+
+#ifdef CONFIG_PCI_IOV
+int xe_gt_sriov_pf_init_early(struct xe_gt *gt);
+#else
+static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
new file mode 100644
index 000000000000..79116ad58620
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -0,0 +1,1977 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#include <linux/string_choices.h>
+#include <linux/wordpart.h>
+
+#include "abi/guc_actions_sriov_abi.h"
+#include "abi/guc_klvs_abi.h"
+
+#include "regs/xe_guc_regs.h"
+
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_ggtt.h"
+#include "xe_gt.h"
+#include "xe_gt_sriov_pf_config.h"
+#include "xe_gt_sriov_pf_helpers.h"
+#include "xe_gt_sriov_pf_policy.h"
+#include "xe_gt_sriov_printk.h"
+#include "xe_guc.h"
+#include "xe_guc_ct.h"
+#include "xe_guc_db_mgr.h"
+#include "xe_guc_fwif.h"
+#include "xe_guc_id_mgr.h"
+#include "xe_guc_klv_helpers.h"
+#include "xe_guc_submit.h"
+#include "xe_lmtt.h"
+#include "xe_map.h"
+#include "xe_sriov.h"
+#include "xe_ttm_vram_mgr.h"
+#include "xe_wopcm.h"
+
+/*
+ * Return: number of KLVs that were successfully parsed and saved,
+ * negative error code on failure.
+ */
+static int guc_action_update_vf_cfg(struct xe_guc *guc, u32 vfid,
+ u64 addr, u32 size)
+{
+ u32 request[] = {
+ GUC_ACTION_PF2GUC_UPDATE_VF_CFG,
+ vfid,
+ lower_32_bits(addr),
+ upper_32_bits(addr),
+ size,
+ };
+
+ return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
+}
+
+/*
+ * Return: 0 on success, negative error code on failure.
+ */
+static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid)
+{
+ struct xe_guc *guc = &gt->uc.guc;
+ int ret;
+
+ ret = guc_action_update_vf_cfg(guc, vfid, 0, 0);
+
+ return ret <= 0 ? ret : -EPROTO;
+}
+
+/*
+ * Return: number of KLVs that were successfully parsed and saved,
+ * negative error code on failure.
+ */
+static int pf_send_vf_cfg_klvs(struct xe_gt *gt, u32 vfid, const u32 *klvs, u32 num_dwords)
+{
+ const u32 bytes = num_dwords * sizeof(u32);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_guc *guc = &gt->uc.guc;
+ struct xe_bo *bo;
+ int ret;
+
+ bo = xe_bo_create_pin_map(xe, tile, NULL,
+ ALIGN(bytes, PAGE_SIZE),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ xe_map_memcpy_to(xe, &bo->vmap, 0, klvs, bytes);
+
+ ret = guc_action_update_vf_cfg(guc, vfid, xe_bo_ggtt_addr(bo), num_dwords);
+
+ xe_bo_unpin_map_no_vm(bo);
+
+ return ret;
+}
+
+/*
+ * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
+ * negative error code on failure.
+ */
+static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
+ const u32 *klvs, u32 num_dwords)
+{
+ int ret;
+
+ xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
+
+ ret = pf_send_vf_cfg_klvs(gt, vfid, klvs, num_dwords);
+
+ if (ret != num_klvs) {
+ int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
+ struct drm_printer p = xe_gt_info_printer(gt);
+ char name[8];
+
+ xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n",
+ xe_sriov_function_name(vfid, name, sizeof(name)),
+ num_klvs, str_plural(num_klvs), ERR_PTR(err));
+ xe_guc_klv_print(klvs, num_dwords, &p);
+ return err;
+ }
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
+ struct drm_printer p = xe_gt_info_printer(gt);
+
+ xe_guc_klv_print(klvs, num_dwords, &p);
+ }
+
+ return 0;
+}
+
+static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value)
+{
+ u32 klv[] = {
+ FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 1),
+ value,
+ };
+
+ return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
+}
+
+static int pf_push_vf_cfg_u64(struct xe_gt *gt, unsigned int vfid, u16 key, u64 value)
+{
+ u32 klv[] = {
+ FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 2),
+ lower_32_bits(value),
+ upper_32_bits(value),
+ };
+
+ return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
+}
+
+static int pf_push_vf_cfg_ggtt(struct xe_gt *gt, unsigned int vfid, u64 start, u64 size)
+{
+ u32 klvs[] = {
+ PREP_GUC_KLV_TAG(VF_CFG_GGTT_START),
+ lower_32_bits(start),
+ upper_32_bits(start),
+ PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE),
+ lower_32_bits(size),
+ upper_32_bits(size),
+ };
+
+ return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
+}
+
+static int pf_push_vf_cfg_ctxs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
+{
+ u32 klvs[] = {
+ PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID),
+ begin,
+ PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS),
+ num,
+ };
+
+ return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
+}
+
+static int pf_push_vf_cfg_dbs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
+{
+ u32 klvs[] = {
+ PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID),
+ begin,
+ PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS),
+ num,
+ };
+
+ return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
+}
+
+static int pf_push_vf_cfg_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 exec_quantum)
+{
+ return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, exec_quantum);
+}
+
+static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 preempt_timeout)
+{
+ return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, preempt_timeout);
+}
+
+static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
+{
+ return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size);
+}
+
+static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned int vfid)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ return &gt->sriov.pf.vfs[vfid].config;
+}
+
+/* Return: number of configuration dwords written */
+static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config)
+{
+ u32 n = 0;
+
+ if (drm_mm_node_allocated(&config->ggtt_region)) {
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
+ cfg[n++] = lower_32_bits(config->ggtt_region.start);
+ cfg[n++] = upper_32_bits(config->ggtt_region.start);
+
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
+ cfg[n++] = lower_32_bits(config->ggtt_region.size);
+ cfg[n++] = upper_32_bits(config->ggtt_region.size);
+ }
+
+ return n;
+}
+
+/* Return: number of configuration dwords written */
+static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config)
+{
+ u32 n = 0;
+
+ n += encode_config_ggtt(cfg, config);
+
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
+ cfg[n++] = config->begin_ctx;
+
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS);
+ cfg[n++] = config->num_ctxs;
+
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
+ cfg[n++] = config->begin_db;
+
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS);
+ cfg[n++] = config->num_dbs;
+
+ if (config->lmem_obj) {
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE);
+ cfg[n++] = lower_32_bits(config->lmem_obj->size);
+ cfg[n++] = upper_32_bits(config->lmem_obj->size);
+ }
+
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM);
+ cfg[n++] = config->exec_quantum;
+
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT);
+ cfg[n++] = config->preempt_timeout;
+
+ return n;
+}
+
+static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ u32 max_cfg_dwords = SZ_4K / sizeof(u32);
+ u32 num_dwords;
+ int num_klvs;
+ u32 *cfg;
+ int err;
+
+ cfg = kcalloc(max_cfg_dwords, sizeof(u32), GFP_KERNEL);
+ if (!cfg)
+ return -ENOMEM;
+
+ num_dwords = encode_config(cfg, config);
+ xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
+
+ if (xe_gt_is_media_type(gt)) {
+ struct xe_gt *primary = gt->tile->primary_gt;
+ struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid);
+
+ /* media-GT will never include a GGTT config */
+ xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config));
+
+ /* the GGTT config must be taken from the primary-GT instead */
+ num_dwords += encode_config_ggtt(cfg + num_dwords, other);
+ }
+ xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
+
+ num_klvs = xe_guc_klv_count(cfg, num_dwords);
+ err = pf_push_vf_cfg_klvs(gt, vfid, num_klvs, cfg, num_dwords);
+
+ kfree(cfg);
+ return err;
+}
+
+static u64 pf_get_ggtt_alignment(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ return IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
+}
+
+static u64 pf_get_min_spare_ggtt(struct xe_gt *gt)
+{
+ /* XXX: preliminary */
+ return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
+ pf_get_ggtt_alignment(gt) : SZ_64M;
+}
+
+static u64 pf_get_spare_ggtt(struct xe_gt *gt)
+{
+ u64 spare;
+
+ xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ spare = gt->sriov.pf.spare.ggtt_size;
+ spare = max_t(u64, spare, pf_get_min_spare_ggtt(gt));
+
+ return spare;
+}
+
+static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size)
+{
+ xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (size && size < pf_get_min_spare_ggtt(gt))
+ return -EINVAL;
+
+ size = round_up(size, pf_get_ggtt_alignment(gt));
+ gt->sriov.pf.spare.ggtt_size = size;
+
+ return 0;
+}
+
+static int pf_distribute_config_ggtt(struct xe_tile *tile, unsigned int vfid, u64 start, u64 size)
+{
+ int err, err2 = 0;
+
+ err = pf_push_vf_cfg_ggtt(tile->primary_gt, vfid, start, size);
+
+ if (tile->media_gt && !err)
+ err2 = pf_push_vf_cfg_ggtt(tile->media_gt, vfid, start, size);
+
+ return err ?: err2;
+}
+
+static void pf_release_ggtt(struct xe_tile *tile, struct drm_mm_node *node)
+{
+ struct xe_ggtt *ggtt = tile->mem.ggtt;
+
+ if (drm_mm_node_allocated(node)) {
+ /*
+ * explicit GGTT PTE assignment to the PF using xe_ggtt_assign()
+ * is redundant, as PTE will be implicitly re-assigned to PF by
+ * the xe_ggtt_clear() called by below xe_ggtt_remove_node().
+ */
+ xe_ggtt_remove_node(ggtt, node, false);
+ }
+}
+
+static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_config *config)
+{
+ pf_release_ggtt(gt_to_tile(gt), &config->ggtt_region);
+}
+
+static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ struct drm_mm_node *node = &config->ggtt_region;
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_ggtt *ggtt = tile->mem.ggtt;
+ u64 alignment = pf_get_ggtt_alignment(gt);
+ int err;
+
+ xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+
+ size = round_up(size, alignment);
+
+ if (drm_mm_node_allocated(node)) {
+ err = pf_distribute_config_ggtt(tile, vfid, 0, 0);
+ if (unlikely(err))
+ return err;
+
+ pf_release_ggtt(tile, node);
+ }
+ xe_gt_assert(gt, !drm_mm_node_allocated(node));
+
+ if (!size)
+ return 0;
+
+ err = xe_ggtt_insert_special_node(ggtt, node, size, alignment);
+ if (unlikely(err))
+ return err;
+
+ xe_ggtt_assign(ggtt, node, vfid);
+ xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n",
+ vfid, node->start, node->start + node->size - 1);
+
+ err = pf_distribute_config_ggtt(gt->tile, vfid, node->start, node->size);
+ if (unlikely(err))
+ return err;
+
+ return 0;
+}
+
+static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ struct drm_mm_node *node = &config->ggtt_region;
+
+ xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ return drm_mm_node_allocated(node) ? node->size : 0;
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_ggtt - Query size of GGTT address space of the VF.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function can only be called on PF.
+ *
+ * Return: size of the VF's assigned (or PF's spare) GGTT address space.
+ */
+u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid)
+{
+ u64 size;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ if (vfid)
+ size = pf_get_vf_config_ggtt(gt_to_tile(gt)->primary_gt, vfid);
+ else
+ size = pf_get_spare_ggtt(gt_to_tile(gt)->primary_gt);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return size;
+}
+
+static int pf_config_set_u64_done(struct xe_gt *gt, unsigned int vfid, u64 value,
+ u64 actual, const char *what, int err)
+{
+ char size[10];
+ char name[8];
+
+ xe_sriov_function_name(vfid, name, sizeof(name));
+
+ if (unlikely(err)) {
+ string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
+ xe_gt_sriov_notice(gt, "Failed to provision %s with %llu (%s) %s (%pe)\n",
+ name, value, size, what, ERR_PTR(err));
+ string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
+ xe_gt_sriov_info(gt, "%s provisioning remains at %llu (%s) %s\n",
+ name, actual, size, what);
+ return err;
+ }
+
+ /* the actual value may have changed during provisioning */
+ string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
+ xe_gt_sriov_info(gt, "%s provisioned with %llu (%s) %s\n",
+ name, actual, size, what);
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_ggtt - Provision VF with GGTT space.
+ * @gt: the &xe_gt (can't be media)
+ * @vfid: the VF identifier
+ * @size: requested GGTT size
+ *
+ * If &vfid represents PF, then function will change PF's spare GGTT config.
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
+{
+ int err;
+
+ xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ if (vfid)
+ err = pf_provision_vf_ggtt(gt, vfid, size);
+ else
+ err = pf_set_spare_ggtt(gt, size);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_config_set_u64_done(gt, vfid, size,
+ xe_gt_sriov_pf_config_get_ggtt(gt, vfid),
+ vfid ? "GGTT" : "spare GGTT", err);
+}
+
+static int pf_config_bulk_set_u64_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
+ u64 value, u64 (*get)(struct xe_gt*, unsigned int),
+ const char *what, unsigned int last, int err)
+{
+ char size[10];
+
+ xe_gt_assert(gt, first);
+ xe_gt_assert(gt, num_vfs);
+ xe_gt_assert(gt, first <= last);
+
+ if (num_vfs == 1)
+ return pf_config_set_u64_done(gt, first, value, get(gt, first), what, err);
+
+ if (unlikely(err)) {
+ xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
+ first, first + num_vfs - 1, what);
+ if (last > first)
+ pf_config_bulk_set_u64_done(gt, first, last - first, value,
+ get, what, last, 0);
+ return pf_config_set_u64_done(gt, last, value, get(gt, last), what, err);
+ }
+
+ /* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
+ value = get(gt, first);
+ string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
+ xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %llu (%s) %s\n",
+ first, first + num_vfs - 1, value, size, what);
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_config_bulk_set_ggtt - Provision many VFs with GGTT.
+ * @gt: the &xe_gt (can't be media)
+ * @vfid: starting VF identifier (can't be 0)
+ * @num_vfs: number of VFs to provision
+ * @size: requested GGTT size
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
+ unsigned int num_vfs, u64 size)
+{
+ unsigned int n;
+ int err = 0;
+
+ xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+
+ if (!num_vfs)
+ return 0;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ for (n = vfid; n < vfid + num_vfs; n++) {
+ err = pf_provision_vf_ggtt(gt, n, size);
+ if (err)
+ break;
+ }
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
+ xe_gt_sriov_pf_config_get_ggtt,
+ "GGTT", n, err);
+}
+
+/* Return: size of the largest continuous GGTT region */
+static u64 pf_get_max_ggtt(struct xe_gt *gt)
+{
+ struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
+ const struct drm_mm *mm = &ggtt->mm;
+ const struct drm_mm_node *entry;
+ u64 alignment = pf_get_ggtt_alignment(gt);
+ u64 spare = pf_get_spare_ggtt(gt);
+ u64 hole_min_start = xe_wopcm_size(gt_to_xe(gt));
+ u64 hole_start, hole_end, hole_size;
+ u64 max_hole = 0;
+
+ mutex_lock(&ggtt->lock);
+
+ drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
+ hole_start = max(hole_start, hole_min_start);
+ hole_start = ALIGN(hole_start, alignment);
+ hole_end = ALIGN_DOWN(hole_end, alignment);
+ if (hole_start >= hole_end)
+ continue;
+ hole_size = hole_end - hole_start;
+ xe_gt_sriov_dbg_verbose(gt, "HOLE start %llx size %lluK\n",
+ hole_start, hole_size / SZ_1K);
+ spare -= min3(spare, hole_size, max_hole);
+ max_hole = max(max_hole, hole_size);
+ }
+
+ mutex_unlock(&ggtt->lock);
+
+ xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n",
+ max_hole / SZ_1K, spare / SZ_1K);
+ return max_hole > spare ? max_hole - spare : 0;
+}
+
+static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
+{
+ u64 available = pf_get_max_ggtt(gt);
+ u64 alignment = pf_get_ggtt_alignment(gt);
+ u64 fair;
+
+ /*
+ * To simplify the logic we only look at single largest GGTT region
+ * as that will be always the best fit for 1 VF case, and most likely
+ * will also nicely cover other cases where VFs are provisioned on the
+ * fresh and idle PF driver, without any stale GGTT allocations spread
+ * in the middle of the full GGTT range.
+ */
+
+ fair = div_u64(available, num_vfs);
+ fair = ALIGN_DOWN(fair, alignment);
+ xe_gt_sriov_dbg_verbose(gt, "GGTT available(%lluK) fair(%u x %lluK)\n",
+ available / SZ_1K, num_vfs, fair / SZ_1K);
+ return fair;
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT.
+ * @gt: the &xe_gt (can't be media)
+ * @vfid: starting VF identifier (can't be 0)
+ * @num_vfs: number of VFs to provision
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
+ unsigned int num_vfs)
+{
+ u64 fair;
+
+ xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, num_vfs);
+ xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ fair = pf_estimate_fair_ggtt(gt, num_vfs);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (!fair)
+ return -ENOSPC;
+
+ return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair);
+}
+
+static u32 pf_get_min_spare_ctxs(struct xe_gt *gt)
+{
+ /* XXX: preliminary */
+ return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
+ hweight64(gt->info.engine_mask) : SZ_256;
+}
+
+static u32 pf_get_spare_ctxs(struct xe_gt *gt)
+{
+ u32 spare;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ spare = gt->sriov.pf.spare.num_ctxs;
+ spare = max_t(u32, spare, pf_get_min_spare_ctxs(gt));
+
+ return spare;
+}
+
+static int pf_set_spare_ctxs(struct xe_gt *gt, u32 spare)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (spare > GUC_ID_MAX)
+ return -EINVAL;
+
+ if (spare && spare < pf_get_min_spare_ctxs(gt))
+ return -EINVAL;
+
+ gt->sriov.pf.spare.num_ctxs = spare;
+
+ return 0;
+}
+
+/* Return: start ID or negative error code on failure */
+static int pf_reserve_ctxs(struct xe_gt *gt, u32 num)
+{
+ struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
+ unsigned int spare = pf_get_spare_ctxs(gt);
+
+ return xe_guc_id_mgr_reserve(idm, num, spare);
+}
+
+static void pf_release_ctxs(struct xe_gt *gt, u32 start, u32 num)
+{
+ struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
+
+ if (num)
+ xe_guc_id_mgr_release(idm, start, num);
+}
+
+static void pf_release_config_ctxs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
+{
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ pf_release_ctxs(gt, config->begin_ctx, config->num_ctxs);
+ config->begin_ctx = 0;
+ config->num_ctxs = 0;
+}
+
+static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ int ret;
+
+ xe_gt_assert(gt, vfid);
+
+ if (num_ctxs > GUC_ID_MAX)
+ return -EINVAL;
+
+ if (config->num_ctxs) {
+ ret = pf_push_vf_cfg_ctxs(gt, vfid, 0, 0);
+ if (unlikely(ret))
+ return ret;
+
+ pf_release_config_ctxs(gt, config);
+ }
+
+ if (!num_ctxs)
+ return 0;
+
+ ret = pf_reserve_ctxs(gt, num_ctxs);
+ if (unlikely(ret < 0))
+ return ret;
+
+ config->begin_ctx = ret;
+ config->num_ctxs = num_ctxs;
+
+ ret = pf_push_vf_cfg_ctxs(gt, vfid, config->begin_ctx, config->num_ctxs);
+ if (unlikely(ret)) {
+ pf_release_config_ctxs(gt, config);
+ return ret;
+ }
+
+ xe_gt_sriov_dbg_verbose(gt, "VF%u contexts %u-%u\n",
+ vfid, config->begin_ctx, config->begin_ctx + config->num_ctxs - 1);
+ return 0;
+}
+
+static u32 pf_get_vf_config_ctxs(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+
+ return config->num_ctxs;
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_ctxs - Get VF's GuC contexts IDs quota.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function can only be called on PF.
+ * If &vfid represents a PF then number of PF's spare GuC context IDs is returned.
+ *
+ * Return: VF's quota (or PF's spare).
+ */
+u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid)
+{
+ u32 num_ctxs;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ if (vfid)
+ num_ctxs = pf_get_vf_config_ctxs(gt, vfid);
+ else
+ num_ctxs = pf_get_spare_ctxs(gt);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return num_ctxs;
+}
+
+static const char *no_unit(u32 unused)
+{
+ return "";
+}
+
+static const char *spare_unit(u32 unused)
+{
+ return " spare";
+}
+
+static int pf_config_set_u32_done(struct xe_gt *gt, unsigned int vfid, u32 value, u32 actual,
+ const char *what, const char *(*unit)(u32), int err)
+{
+ char name[8];
+
+ xe_sriov_function_name(vfid, name, sizeof(name));
+
+ if (unlikely(err)) {
+ xe_gt_sriov_notice(gt, "Failed to provision %s with %u%s %s (%pe)\n",
+ name, value, unit(value), what, ERR_PTR(err));
+ xe_gt_sriov_info(gt, "%s provisioning remains at %u%s %s\n",
+ name, actual, unit(actual), what);
+ return err;
+ }
+
+ /* the actual value may have changed during provisioning */
+ xe_gt_sriov_info(gt, "%s provisioned with %u%s %s\n",
+ name, actual, unit(actual), what);
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_ctxs - Configure GuC contexts IDs quota for the VF.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ * @num_ctxs: requested number of GuC contexts IDs (0 to release)
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
+{
+ int err;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ if (vfid)
+ err = pf_provision_vf_ctxs(gt, vfid, num_ctxs);
+ else
+ err = pf_set_spare_ctxs(gt, num_ctxs);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_config_set_u32_done(gt, vfid, num_ctxs,
+ xe_gt_sriov_pf_config_get_ctxs(gt, vfid),
+ "GuC context IDs", vfid ? no_unit : spare_unit, err);
+}
+
+static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
+ u32 value, u32 (*get)(struct xe_gt*, unsigned int),
+ const char *what, const char *(*unit)(u32),
+ unsigned int last, int err)
+{
+ xe_gt_assert(gt, first);
+ xe_gt_assert(gt, num_vfs);
+ xe_gt_assert(gt, first <= last);
+
+ if (num_vfs == 1)
+ return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err);
+
+ if (unlikely(err)) {
+ xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
+ first, first + num_vfs - 1, what);
+ if (last > first)
+ pf_config_bulk_set_u32_done(gt, first, last - first, value,
+ get, what, unit, last, 0);
+ return pf_config_set_u32_done(gt, last, value, get(gt, last), what, unit, err);
+ }
+
+ /* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
+ value = get(gt, first);
+ xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n",
+ first, first + num_vfs - 1, value, unit(value), what);
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_config_bulk_set_ctxs - Provision many VFs with GuC context IDs.
+ * @gt: the &xe_gt
+ * @vfid: starting VF identifier
+ * @num_vfs: number of VFs to provision
+ * @num_ctxs: requested number of GuC contexts IDs (0 to release)
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid,
+ unsigned int num_vfs, u32 num_ctxs)
+{
+ unsigned int n;
+ int err = 0;
+
+ xe_gt_assert(gt, vfid);
+
+ if (!num_vfs)
+ return 0;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ for (n = vfid; n < vfid + num_vfs; n++) {
+ err = pf_provision_vf_ctxs(gt, n, num_ctxs);
+ if (err)
+ break;
+ }
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_ctxs,
+ xe_gt_sriov_pf_config_get_ctxs,
+ "GuC context IDs", no_unit, n, err);
+}
+
+static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
+{
+ struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
+ u32 spare = pf_get_spare_ctxs(gt);
+ u32 fair = (idm->total - spare) / num_vfs;
+ int ret;
+
+ for (; fair; --fair) {
+ ret = xe_guc_id_mgr_reserve(idm, fair * num_vfs, spare);
+ if (ret < 0)
+ continue;
+ xe_guc_id_mgr_release(idm, ret, fair * num_vfs);
+ break;
+ }
+
+ xe_gt_sriov_dbg_verbose(gt, "contexts fair(%u x %u)\n", num_vfs, fair);
+ return fair;
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_fair_ctxs - Provision many VFs with fair GuC context IDs.
+ * @gt: the &xe_gt
+ * @vfid: starting VF identifier (can't be 0)
+ * @num_vfs: number of VFs to provision (can't be 0)
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid,
+ unsigned int num_vfs)
+{
+ u32 fair;
+
+ xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, num_vfs);
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ fair = pf_estimate_fair_ctxs(gt, num_vfs);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (!fair)
+ return -ENOSPC;
+
+ return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair);
+}
+
+static u32 pf_get_min_spare_dbs(struct xe_gt *gt)
+{
+ /* XXX: preliminary, we don't use doorbells yet! */
+ return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 1 : 0;
+}
+
+static u32 pf_get_spare_dbs(struct xe_gt *gt)
+{
+ u32 spare;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ spare = gt->sriov.pf.spare.num_dbs;
+ spare = max_t(u32, spare, pf_get_min_spare_dbs(gt));
+
+ return spare;
+}
+
+static int pf_set_spare_dbs(struct xe_gt *gt, u32 spare)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (spare > GUC_NUM_DOORBELLS)
+ return -EINVAL;
+
+ if (spare && spare < pf_get_min_spare_dbs(gt))
+ return -EINVAL;
+
+ gt->sriov.pf.spare.num_dbs = spare;
+ return 0;
+}
+
+/* Return: start ID or negative error code on failure */
+static int pf_reserve_dbs(struct xe_gt *gt, u32 num)
+{
+ struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
+ unsigned int spare = pf_get_spare_dbs(gt);
+
+ return xe_guc_db_mgr_reserve_range(dbm, num, spare);
+}
+
+static void pf_release_dbs(struct xe_gt *gt, u32 start, u32 num)
+{
+ struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
+
+ if (num)
+ xe_guc_db_mgr_release_range(dbm, start, num);
+}
+
+static void pf_release_config_dbs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
+{
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ pf_release_dbs(gt, config->begin_db, config->num_dbs);
+ config->begin_db = 0;
+ config->num_dbs = 0;
+}
+
+static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ int ret;
+
+ xe_gt_assert(gt, vfid);
+
+ if (num_dbs > GUC_NUM_DOORBELLS)
+ return -EINVAL;
+
+ if (config->num_dbs) {
+ ret = pf_push_vf_cfg_dbs(gt, vfid, 0, 0);
+ if (unlikely(ret))
+ return ret;
+
+ pf_release_config_dbs(gt, config);
+ }
+
+ if (!num_dbs)
+ return 0;
+
+ ret = pf_reserve_dbs(gt, num_dbs);
+ if (unlikely(ret < 0))
+ return ret;
+
+ config->begin_db = ret;
+ config->num_dbs = num_dbs;
+
+ ret = pf_push_vf_cfg_dbs(gt, vfid, config->begin_db, config->num_dbs);
+ if (unlikely(ret)) {
+ pf_release_config_dbs(gt, config);
+ return ret;
+ }
+
+ xe_gt_sriov_dbg_verbose(gt, "VF%u doorbells %u-%u\n",
+ vfid, config->begin_db, config->begin_db + config->num_dbs - 1);
+ return 0;
+}
+
+static u32 pf_get_vf_config_dbs(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+
+ return config->num_dbs;
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_dbs - Get VF's GuC doorbells IDs quota.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function can only be called on PF.
+ * If &vfid represents a PF then number of PF's spare GuC doorbells IDs is returned.
+ *
+ * Return: VF's quota (or PF's spare).
+ */
+u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid)
+{
+ u32 num_dbs;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ if (vfid)
+ num_dbs = pf_get_vf_config_dbs(gt, vfid);
+ else
+ num_dbs = pf_get_spare_dbs(gt);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return num_dbs;
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_dbs - Configure GuC doorbells IDs quota for the VF.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ * @num_dbs: requested number of GuC doorbells IDs (0 to release)
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
+{
+ int err;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ if (vfid)
+ err = pf_provision_vf_dbs(gt, vfid, num_dbs);
+ else
+ err = pf_set_spare_dbs(gt, num_dbs);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_config_set_u32_done(gt, vfid, num_dbs,
+ xe_gt_sriov_pf_config_get_dbs(gt, vfid),
+ "GuC doorbell IDs", vfid ? no_unit : spare_unit, err);
+}
+
+/**
+ * xe_gt_sriov_pf_config_bulk_set_dbs - Provision many VFs with GuC context IDs.
+ * @gt: the &xe_gt
+ * @vfid: starting VF identifier (can't be 0)
+ * @num_vfs: number of VFs to provision
+ * @num_dbs: requested number of GuC doorbell IDs (0 to release)
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid,
+ unsigned int num_vfs, u32 num_dbs)
+{
+ unsigned int n;
+ int err = 0;
+
+ xe_gt_assert(gt, vfid);
+
+ if (!num_vfs)
+ return 0;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ for (n = vfid; n < vfid + num_vfs; n++) {
+ err = pf_provision_vf_dbs(gt, n, num_dbs);
+ if (err)
+ break;
+ }
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_dbs,
+ xe_gt_sriov_pf_config_get_dbs,
+ "GuC doorbell IDs", no_unit, n, err);
+}
+
+static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
+{
+ struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
+ u32 spare = pf_get_spare_dbs(gt);
+ u32 fair = (GUC_NUM_DOORBELLS - spare) / num_vfs;
+ int ret;
+
+ for (; fair; --fair) {
+ ret = xe_guc_db_mgr_reserve_range(dbm, fair * num_vfs, spare);
+ if (ret < 0)
+ continue;
+ xe_guc_db_mgr_release_range(dbm, ret, fair * num_vfs);
+ break;
+ }
+
+ xe_gt_sriov_dbg_verbose(gt, "doorbells fair(%u x %u)\n", num_vfs, fair);
+ return fair;
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_fair_dbs - Provision many VFs with fair GuC doorbell IDs.
+ * @gt: the &xe_gt
+ * @vfid: starting VF identifier (can't be 0)
+ * @num_vfs: number of VFs to provision (can't be 0)
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid,
+ unsigned int num_vfs)
+{
+ u32 fair;
+
+ xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, num_vfs);
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ fair = pf_estimate_fair_dbs(gt, num_vfs);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (!fair)
+ return -ENOSPC;
+
+ return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair);
+}
+
+static u64 pf_get_lmem_alignment(struct xe_gt *gt)
+{
+ /* this might be platform dependent */
+ return SZ_2M;
+}
+
+static u64 pf_get_min_spare_lmem(struct xe_gt *gt)
+{
+ /* this might be platform dependent */
+ return SZ_128M; /* XXX: preliminary */
+}
+
+static u64 pf_get_spare_lmem(struct xe_gt *gt)
+{
+ u64 spare;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ spare = gt->sriov.pf.spare.lmem_size;
+ spare = max_t(u64, spare, pf_get_min_spare_lmem(gt));
+
+ return spare;
+}
+
+static int pf_set_spare_lmem(struct xe_gt *gt, u64 size)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (size && size < pf_get_min_spare_lmem(gt))
+ return -EINVAL;
+
+ gt->sriov.pf.spare.lmem_size = size;
+ return 0;
+}
+
+static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ struct xe_bo *bo;
+
+ bo = config->lmem_obj;
+ return bo ? bo->size : 0;
+}
+
+static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_tile *tile;
+ unsigned int tid;
+ int err;
+
+ for_each_tile(tile, xe, tid) {
+ if (tile->primary_gt == gt) {
+ err = pf_push_vf_cfg_lmem(gt, vfid, size);
+ } else {
+ u64 lmem = pf_get_vf_config_lmem(tile->primary_gt, vfid);
+
+ if (!lmem)
+ continue;
+ err = pf_push_vf_cfg_lmem(gt, vfid, lmem);
+ }
+ if (unlikely(err))
+ return err;
+ }
+ return 0;
+}
+
+static void pf_force_lmtt_invalidate(struct xe_device *xe)
+{
+ /* TODO */
+}
+
+static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_lmtt *lmtt;
+ struct xe_tile *tile;
+ unsigned int tid;
+
+ for_each_tile(tile, xe, tid) {
+ lmtt = &tile->sriov.pf.lmtt;
+ xe_lmtt_drop_pages(lmtt, vfid);
+ }
+}
+
+static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_gt_sriov_config *config;
+ struct xe_tile *tile;
+ struct xe_lmtt *lmtt;
+ struct xe_bo *bo;
+ struct xe_gt *gt;
+ u64 total, offset;
+ unsigned int gtid;
+ unsigned int tid;
+ int err;
+
+ total = 0;
+ for_each_tile(tile, xe, tid)
+ total += pf_get_vf_config_lmem(tile->primary_gt, vfid);
+
+ for_each_tile(tile, xe, tid) {
+ lmtt = &tile->sriov.pf.lmtt;
+
+ xe_lmtt_drop_pages(lmtt, vfid);
+ if (!total)
+ continue;
+
+ err = xe_lmtt_prepare_pages(lmtt, vfid, total);
+ if (err)
+ goto fail;
+
+ offset = 0;
+ for_each_gt(gt, xe, gtid) {
+ if (xe_gt_is_media_type(gt))
+ continue;
+
+ config = pf_pick_vf_config(gt, vfid);
+ bo = config->lmem_obj;
+ if (!bo)
+ continue;
+
+ err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset);
+ if (err)
+ goto fail;
+ offset += bo->size;
+ }
+ }
+
+ pf_force_lmtt_invalidate(xe);
+ return 0;
+
+fail:
+ for_each_tile(tile, xe, tid) {
+ lmtt = &tile->sriov.pf.lmtt;
+ xe_lmtt_drop_pages(lmtt, vfid);
+ }
+ return err;
+}
+
+static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
+{
+ xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (config->lmem_obj) {
+ xe_bo_unpin_map_no_vm(config->lmem_obj);
+ config->lmem_obj = NULL;
+ }
+}
+
+static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_bo *bo;
+ int err;
+
+ xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+
+ size = round_up(size, pf_get_lmem_alignment(gt));
+
+ if (config->lmem_obj) {
+ err = pf_distribute_config_lmem(gt, vfid, 0);
+ if (unlikely(err))
+ return err;
+
+ pf_reset_vf_lmtt(xe, vfid);
+ pf_release_vf_config_lmem(gt, config);
+ }
+ xe_gt_assert(gt, !config->lmem_obj);
+
+ if (!size)
+ return 0;
+
+ xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
+ bo = xe_bo_create_pin_map(xe, tile, NULL,
+ ALIGN(size, PAGE_SIZE),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_PINNED);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ config->lmem_obj = bo;
+
+ err = pf_update_vf_lmtt(xe, vfid);
+ if (unlikely(err))
+ goto release;
+
+ err = pf_push_vf_cfg_lmem(gt, vfid, bo->size);
+ if (unlikely(err))
+ goto reset_lmtt;
+
+ xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n",
+ vfid, bo->size, bo->size / SZ_1M);
+ return 0;
+
+reset_lmtt:
+ pf_reset_vf_lmtt(xe, vfid);
+release:
+ pf_release_vf_config_lmem(gt, config);
+ return err;
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_lmem - Get VF's LMEM quota.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function can only be called on PF.
+ *
+ * Return: VF's (or PF's spare) LMEM quota.
+ */
+u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid)
+{
+ u64 size;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ if (vfid)
+ size = pf_get_vf_config_lmem(gt, vfid);
+ else
+ size = pf_get_spare_lmem(gt);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return size;
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_lmem - Provision VF with LMEM.
+ * @gt: the &xe_gt (can't be media)
+ * @vfid: the VF identifier
+ * @size: requested LMEM size
+ *
+ * This function can only be called on PF.
+ */
+int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
+{
+ int err;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ if (vfid)
+ err = pf_provision_vf_lmem(gt, vfid, size);
+ else
+ err = pf_set_spare_lmem(gt, size);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_config_set_u64_done(gt, vfid, size,
+ xe_gt_sriov_pf_config_get_lmem(gt, vfid),
+ vfid ? "LMEM" : "spare LMEM", err);
+}
+
+/**
+ * xe_gt_sriov_pf_config_bulk_set_lmem - Provision many VFs with LMEM.
+ * @gt: the &xe_gt (can't be media)
+ * @vfid: starting VF identifier (can't be 0)
+ * @num_vfs: number of VFs to provision
+ * @size: requested LMEM size
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid,
+ unsigned int num_vfs, u64 size)
+{
+ unsigned int n;
+ int err = 0;
+
+ xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+
+ if (!num_vfs)
+ return 0;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ for (n = vfid; n < vfid + num_vfs; n++) {
+ err = pf_provision_vf_lmem(gt, n, size);
+ if (err)
+ break;
+ }
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
+ xe_gt_sriov_pf_config_get_lmem,
+ "LMEM", n, err);
+}
+
+static u64 pf_query_free_lmem(struct xe_gt *gt)
+{
+ struct xe_tile *tile = gt->tile;
+
+ return xe_ttm_vram_get_avail(&tile->mem.vram_mgr->manager);
+}
+
+static u64 pf_query_max_lmem(struct xe_gt *gt)
+{
+ u64 alignment = pf_get_lmem_alignment(gt);
+ u64 spare = pf_get_spare_lmem(gt);
+ u64 free = pf_query_free_lmem(gt);
+ u64 avail;
+
+ /* XXX: need to account for 2MB blocks only */
+ avail = free > spare ? free - spare : 0;
+ avail = round_down(avail, alignment);
+
+ return avail;
+}
+
+#ifdef CONFIG_DRM_XE_DEBUG_SRIOV
+#define MAX_FAIR_LMEM SZ_128M /* XXX: make it small for the driver bringup */
+#else
+#define MAX_FAIR_LMEM SZ_2G /* XXX: known issue with allocating BO over 2GiB */
+#endif
+
+static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
+{
+ u64 available = pf_query_max_lmem(gt);
+ u64 alignment = pf_get_lmem_alignment(gt);
+ u64 fair;
+
+ fair = div_u64(available, num_vfs);
+ fair = ALIGN_DOWN(fair, alignment);
+#ifdef MAX_FAIR_LMEM
+ fair = min_t(u64, MAX_FAIR_LMEM, fair);
+#endif
+ xe_gt_sriov_dbg_verbose(gt, "LMEM available(%lluM) fair(%u x %lluM)\n",
+ available / SZ_1M, num_vfs, fair / SZ_1M);
+ return fair;
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_fair_lmem - Provision many VFs with fair LMEM.
+ * @gt: the &xe_gt (can't be media)
+ * @vfid: starting VF identifier (can't be 0)
+ * @num_vfs: number of VFs to provision (can't be 0)
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid,
+ unsigned int num_vfs)
+{
+ u64 fair;
+
+ xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, num_vfs);
+ xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+
+ if (!IS_DGFX(gt_to_xe(gt)))
+ return 0;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ fair = pf_estimate_fair_lmem(gt, num_vfs);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (!fair)
+ return -ENOSPC;
+
+ return xe_gt_sriov_pf_config_bulk_set_lmem(gt, vfid, num_vfs, fair);
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_fair - Provision many VFs with fair resources.
+ * @gt: the &xe_gt
+ * @vfid: starting VF identifier (can't be 0)
+ * @num_vfs: number of VFs to provision (can't be 0)
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid,
+ unsigned int num_vfs)
+{
+ int result = 0;
+ int err;
+
+ xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, num_vfs);
+
+ if (!xe_gt_is_media_type(gt)) {
+ err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs);
+ result = result ?: err;
+ err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs);
+ result = result ?: err;
+ }
+ err = xe_gt_sriov_pf_config_set_fair_ctxs(gt, vfid, num_vfs);
+ result = result ?: err;
+ err = xe_gt_sriov_pf_config_set_fair_dbs(gt, vfid, num_vfs);
+ result = result ?: err;
+
+ return result;
+}
+
+static const char *exec_quantum_unit(u32 exec_quantum)
+{
+ return exec_quantum ? "ms" : "(infinity)";
+}
+
+static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid,
+ u32 exec_quantum)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ int err;
+
+ err = pf_push_vf_cfg_exec_quantum(gt, vfid, exec_quantum);
+ if (unlikely(err))
+ return err;
+
+ config->exec_quantum = exec_quantum;
+ return 0;
+}
+
+static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+
+ return config->exec_quantum;
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_exec_quantum - Configure execution quantum for the VF.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ * @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid,
+ u32 exec_quantum)
+{
+ int err;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ err = pf_provision_exec_quantum(gt, vfid, exec_quantum);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_config_set_u32_done(gt, vfid, exec_quantum,
+ xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid),
+ "execution quantum", exec_quantum_unit, err);
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_exec_quantum - Get VF's execution quantum.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function can only be called on PF.
+ *
+ * Return: VF's (or PF's) execution quantum in milliseconds.
+ */
+u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
+{
+ u32 exec_quantum;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ exec_quantum = pf_get_exec_quantum(gt, vfid);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return exec_quantum;
+}
+
+static const char *preempt_timeout_unit(u32 preempt_timeout)
+{
+ return preempt_timeout ? "us" : "(infinity)";
+}
+
+static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
+ u32 preempt_timeout)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ int err;
+
+ err = pf_push_vf_cfg_preempt_timeout(gt, vfid, preempt_timeout);
+ if (unlikely(err))
+ return err;
+
+ config->preempt_timeout = preempt_timeout;
+
+ return 0;
+}
+
+static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+
+ return config->preempt_timeout;
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_preempt_timeout - Configure preemption timeout for the VF.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
+ u32 preempt_timeout)
+{
+ int err;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_config_set_u32_done(gt, vfid, preempt_timeout,
+ xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid),
+ "preemption timeout", preempt_timeout_unit, err);
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_preempt_timeout - Get VF's preemption timeout.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function can only be called on PF.
+ *
+ * Return: VF's (or PF's) preemption timeout in microseconds.
+ */
+u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
+{
+ u32 preempt_timeout;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ preempt_timeout = pf_get_preempt_timeout(gt, vfid);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return preempt_timeout;
+}
+
+static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config)
+{
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ config->exec_quantum = 0;
+ config->preempt_timeout = 0;
+}
+
+static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+
+ if (!xe_gt_is_media_type(gt)) {
+ pf_release_vf_config_ggtt(gt, config);
+ pf_release_vf_config_lmem(gt, config);
+ }
+ pf_release_config_ctxs(gt, config);
+ pf_release_config_dbs(gt, config);
+ pf_reset_config_sched(gt, config);
+}
+
+/**
+ * xe_gt_sriov_pf_config_release - Release and reset VF configuration.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier (can't be PF)
+ * @force: force configuration release
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force)
+{
+ int err;
+
+ xe_gt_assert(gt, vfid);
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ err = pf_send_vf_cfg_reset(gt, vfid);
+ if (!err || force)
+ pf_release_vf_config(gt, vfid);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (unlikely(err)) {
+ xe_gt_sriov_notice(gt, "VF%u unprovisioning failed with error (%pe)%s\n",
+ vfid, ERR_PTR(err),
+ force ? " but all resources were released anyway!" : "");
+ }
+
+ return force ? 0 : err;
+}
+
+/**
+ * xe_gt_sriov_pf_config_push - Reprovision VF's configuration.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier (can't be PF)
+ * @refresh: explicit refresh
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh)
+{
+ int err = 0;
+
+ xe_gt_assert(gt, vfid);
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ if (refresh)
+ err = pf_send_vf_cfg_reset(gt, vfid);
+ if (!err)
+ err = pf_push_full_vf_config(gt, vfid);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (unlikely(err)) {
+ xe_gt_sriov_notice(gt, "Failed to %s VF%u configuration (%pe)\n",
+ refresh ? "refresh" : "push", vfid, ERR_PTR(err));
+ }
+
+ return err;
+}
+
+/**
+ * xe_gt_sriov_pf_config_print_ggtt - Print GGTT configurations.
+ * @gt: the &xe_gt
+ * @p: the &drm_printer
+ *
+ * Print GGTT configuration data for all VFs.
+ * VFs without provisioned GGTT are ignored.
+ *
+ * This function can only be called on PF.
+ */
+int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p)
+{
+ unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
+ const struct xe_gt_sriov_config *config;
+ char buf[10];
+
+ for (n = 1; n <= total_vfs; n++) {
+ config = &gt->sriov.pf.vfs[n].config;
+ if (!drm_mm_node_allocated(&config->ggtt_region))
+ continue;
+
+ string_get_size(config->ggtt_region.size, 1, STRING_UNITS_2, buf, sizeof(buf));
+ drm_printf(p, "VF%u:\t%#0llx-%#llx\t(%s)\n",
+ n, config->ggtt_region.start,
+ config->ggtt_region.start + config->ggtt_region.size - 1, buf);
+ }
+
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_config_print_ctxs - Print GuC context IDs configurations.
+ * @gt: the &xe_gt
+ * @p: the &drm_printer
+ *
+ * Print GuC context ID allocations across all VFs.
+ * VFs without GuC context IDs are skipped.
+ *
+ * This function can only be called on PF.
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p)
+{
+ unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
+ const struct xe_gt_sriov_config *config;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+
+ for (n = 1; n <= total_vfs; n++) {
+ config = &gt->sriov.pf.vfs[n].config;
+ if (!config->num_ctxs)
+ continue;
+
+ drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
+ n,
+ config->begin_ctx,
+ config->begin_ctx + config->num_ctxs - 1,
+ config->num_ctxs);
+ }
+
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_config_print_dbs - Print GuC doorbell ID configurations.
+ * @gt: the &xe_gt
+ * @p: the &drm_printer
+ *
+ * Print GuC doorbell IDs allocations across all VFs.
+ * VFs without GuC doorbell IDs are skipped.
+ *
+ * This function can only be called on PF.
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p)
+{
+ unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
+ const struct xe_gt_sriov_config *config;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+
+ for (n = 1; n <= total_vfs; n++) {
+ config = &gt->sriov.pf.vfs[n].config;
+ if (!config->num_dbs)
+ continue;
+
+ drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
+ n,
+ config->begin_db,
+ config->begin_db + config->num_dbs - 1,
+ config->num_dbs);
+ }
+
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges.
+ * @gt: the &xe_gt
+ * @p: the &drm_printer
+ *
+ * Print GGTT ranges that are available for the provisioning.
+ *
+ * This function can only be called on PF.
+ */
+int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p)
+{
+ struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
+ const struct drm_mm *mm = &ggtt->mm;
+ const struct drm_mm_node *entry;
+ u64 alignment = pf_get_ggtt_alignment(gt);
+ u64 hole_min_start = xe_wopcm_size(gt_to_xe(gt));
+ u64 hole_start, hole_end, hole_size;
+ u64 spare, avail, total = 0;
+ char buf[10];
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+
+ spare = pf_get_spare_ggtt(gt);
+
+ mutex_lock(&ggtt->lock);
+
+ drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
+ hole_start = max(hole_start, hole_min_start);
+ hole_start = ALIGN(hole_start, alignment);
+ hole_end = ALIGN_DOWN(hole_end, alignment);
+ if (hole_start >= hole_end)
+ continue;
+ hole_size = hole_end - hole_start;
+ total += hole_size;
+
+ string_get_size(hole_size, 1, STRING_UNITS_2, buf, sizeof(buf));
+ drm_printf(p, "range:\t%#llx-%#llx\t(%s)\n",
+ hole_start, hole_end - 1, buf);
+ }
+
+ mutex_unlock(&ggtt->lock);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ string_get_size(total, 1, STRING_UNITS_2, buf, sizeof(buf));
+ drm_printf(p, "total:\t%llu\t(%s)\n", total, buf);
+
+ string_get_size(spare, 1, STRING_UNITS_2, buf, sizeof(buf));
+ drm_printf(p, "spare:\t%llu\t(%s)\n", spare, buf);
+
+ avail = total > spare ? total - spare : 0;
+
+ string_get_size(avail, 1, STRING_UNITS_2, buf, sizeof(buf));
+ drm_printf(p, "avail:\t%llu\t(%s)\n", avail, buf);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
new file mode 100644
index 000000000000..5e6b36f00b5b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_CONFIG_H_
+#define _XE_GT_SRIOV_PF_CONFIG_H_
+
+#include <linux/types.h>
+
+struct drm_printer;
+struct xe_gt;
+
+u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size);
+int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt,
+ unsigned int vfid, unsigned int num_vfs);
+int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt,
+ unsigned int vfid, unsigned int num_vfs, u64 size);
+
+u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs);
+int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs);
+int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs,
+ u32 num_ctxs);
+
+u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs);
+int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs);
+int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs,
+ u32 num_dbs);
+
+u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size);
+int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs);
+int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs,
+ u64 size);
+
+u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 exec_quantum);
+
+u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
+ u32 preempt_timeout);
+
+int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs);
+int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force);
+int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh);
+
+int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p);
+int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p);
+int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p);
+
+int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h
new file mode 100644
index 000000000000..d3745c355957
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_CONFIG_TYPES_H_
+#define _XE_GT_SRIOV_PF_CONFIG_TYPES_H_
+
+#include <drm/drm_mm.h>
+
+struct xe_bo;
+
+/**
+ * struct xe_gt_sriov_config - GT level per-VF configuration data.
+ *
+ * Used by the PF driver to maintain per-VF provisioning data.
+ */
+struct xe_gt_sriov_config {
+ /** @ggtt_region: GGTT region assigned to the VF. */
+ struct drm_mm_node ggtt_region;
+ /** @lmem_obj: LMEM allocation for use by the VF. */
+ struct xe_bo *lmem_obj;
+ /** @num_ctxs: number of GuC contexts IDs. */
+ u16 num_ctxs;
+ /** @begin_ctx: start index of GuC context ID range. */
+ u16 begin_ctx;
+ /** @num_dbs: number of GuC doorbells IDs. */
+ u16 num_dbs;
+ /** @begin_db: start index of GuC doorbell ID range. */
+ u16 begin_db;
+ /** @exec_quantum: execution-quantum in milliseconds. */
+ u32 exec_quantum;
+ /** @preempt_timeout: preemption timeout in microseconds. */
+ u32 preempt_timeout;
+};
+
+/**
+ * struct xe_gt_sriov_spare_config - GT-level PF spare configuration data.
+ *
+ * Used by the PF driver to maintain it's own reserved (spare) provisioning
+ * data that is not applicable to be tracked in struct xe_gt_sriov_config.
+ */
+struct xe_gt_sriov_spare_config {
+ /** @ggtt_size: GGTT size. */
+ u64 ggtt_size;
+ /** @lmem_size: LMEM size. */
+ u64 lmem_size;
+ /** @num_ctxs: number of GuC submission contexts. */
+ u16 num_ctxs;
+ /** @num_dbs: number of GuC doorbells. */
+ u16 num_dbs;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
new file mode 100644
index 000000000000..40b8f881fe04
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#include "abi/guc_actions_sriov_abi.h"
+
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_gt_sriov_pf_control.h"
+#include "xe_gt_sriov_printk.h"
+#include "xe_guc_ct.h"
+#include "xe_sriov.h"
+
+static const char *control_cmd_to_string(u32 cmd)
+{
+ switch (cmd) {
+ case GUC_PF_TRIGGER_VF_PAUSE:
+ return "PAUSE";
+ case GUC_PF_TRIGGER_VF_RESUME:
+ return "RESUME";
+ case GUC_PF_TRIGGER_VF_STOP:
+ return "STOP";
+ case GUC_PF_TRIGGER_VF_FLR_START:
+ return "FLR_START";
+ case GUC_PF_TRIGGER_VF_FLR_FINISH:
+ return "FLR_FINISH";
+ default:
+ return "<unknown>";
+ }
+}
+
+static int guc_action_vf_control_cmd(struct xe_guc *guc, u32 vfid, u32 cmd)
+{
+ u32 request[PF2GUC_VF_CONTROL_REQUEST_MSG_LEN] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_PF2GUC_VF_CONTROL),
+ FIELD_PREP(PF2GUC_VF_CONTROL_REQUEST_MSG_1_VFID, vfid),
+ FIELD_PREP(PF2GUC_VF_CONTROL_REQUEST_MSG_2_COMMAND, cmd),
+ };
+ int ret;
+
+ /* XXX those two commands are now sent from the G2H handler */
+ if (cmd == GUC_PF_TRIGGER_VF_FLR_START || cmd == GUC_PF_TRIGGER_VF_FLR_FINISH)
+ return xe_guc_ct_send_g2h_handler(&guc->ct, request, ARRAY_SIZE(request));
+
+ ret = xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
+ return ret > 0 ? -EPROTO : ret;
+}
+
+static int pf_send_vf_control_cmd(struct xe_gt *gt, unsigned int vfid, u32 cmd)
+{
+ int err;
+
+ xe_gt_assert(gt, vfid != PFID);
+
+ err = guc_action_vf_control_cmd(&gt->uc.guc, vfid, cmd);
+ if (unlikely(err))
+ xe_gt_sriov_err(gt, "VF%u control command %s failed (%pe)\n",
+ vfid, control_cmd_to_string(cmd), ERR_PTR(err));
+ return err;
+}
+
+static int pf_send_vf_pause(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_PAUSE);
+}
+
+static int pf_send_vf_resume(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_RESUME);
+}
+
+static int pf_send_vf_stop(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_STOP);
+}
+
+static int pf_send_vf_flr_start(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_FLR_START);
+}
+
+static int pf_send_vf_flr_finish(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_FLR_FINISH);
+}
+
+/**
+ * xe_gt_sriov_pf_control_pause_vf - Pause a VF.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_send_vf_pause(gt, vfid);
+}
+
+/**
+ * xe_gt_sriov_pf_control_resume_vf - Resume a VF.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_send_vf_resume(gt, vfid);
+}
+
+/**
+ * xe_gt_sriov_pf_control_stop_vf - Stop a VF.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_send_vf_stop(gt, vfid);
+}
+
+/**
+ * DOC: The VF FLR Flow with GuC
+ *
+ * PF GUC PCI
+ * ========================================================
+ * | | |
+ * (1) | [ ] <----- FLR --|
+ * | [ ] :
+ * (2) [ ] <-------- NOTIFY FLR --[ ]
+ * [ ] |
+ * (3) [ ] |
+ * [ ] |
+ * [ ]-- START FLR ---------> [ ]
+ * | [ ]
+ * (4) | [ ]
+ * | [ ]
+ * [ ] <--------- FLR DONE -- [ ]
+ * [ ] |
+ * (5) [ ] |
+ * [ ] |
+ * [ ]-- FINISH FLR --------> [ ]
+ * | |
+ *
+ * Step 1: PCI HW generates interrupt to the GuC about VF FLR
+ * Step 2: GuC FW sends G2H notification to the PF about VF FLR
+ * Step 2a: on some platforms G2H is only received from root GuC
+ * Step 3: PF sends H2G request to the GuC to start VF FLR sequence
+ * Step 3a: on some platforms PF must send H2G to all other GuCs
+ * Step 4: GuC FW performs VF FLR cleanups and notifies the PF when done
+ * Step 5: PF performs VF FLR cleanups and notifies the GuC FW when finished
+ */
+
+static bool needs_dispatch_flr(struct xe_device *xe)
+{
+ return xe->info.platform == XE_PVC;
+}
+
+static void pf_handle_vf_flr(struct xe_gt *gt, u32 vfid)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_gt *gtit;
+ unsigned int gtid;
+
+ xe_gt_sriov_info(gt, "VF%u FLR\n", vfid);
+
+ if (needs_dispatch_flr(xe)) {
+ for_each_gt(gtit, xe, gtid)
+ pf_send_vf_flr_start(gtit, vfid);
+ } else {
+ pf_send_vf_flr_start(gt, vfid);
+ }
+}
+
+static void pf_handle_vf_flr_done(struct xe_gt *gt, u32 vfid)
+{
+ pf_send_vf_flr_finish(gt, vfid);
+}
+
+static int pf_handle_vf_event(struct xe_gt *gt, u32 vfid, u32 eventid)
+{
+ switch (eventid) {
+ case GUC_PF_NOTIFY_VF_FLR:
+ pf_handle_vf_flr(gt, vfid);
+ break;
+ case GUC_PF_NOTIFY_VF_FLR_DONE:
+ pf_handle_vf_flr_done(gt, vfid);
+ break;
+ case GUC_PF_NOTIFY_VF_PAUSE_DONE:
+ break;
+ case GUC_PF_NOTIFY_VF_FIXUP_DONE:
+ break;
+ default:
+ return -ENOPKG;
+ }
+ return 0;
+}
+
+static int pf_handle_pf_event(struct xe_gt *gt, u32 eventid)
+{
+ switch (eventid) {
+ case GUC_PF_NOTIFY_VF_ENABLE:
+ xe_gt_sriov_dbg_verbose(gt, "VFs %s/%s\n",
+ str_enabled_disabled(true),
+ str_enabled_disabled(false));
+ break;
+ default:
+ return -ENOPKG;
+ }
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_control_process_guc2pf - Handle VF state notification from GuC.
+ * @gt: the &xe_gt
+ * @msg: the G2H message
+ * @len: the length of the G2H message
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_control_process_guc2pf(struct xe_gt *gt, const u32 *msg, u32 len)
+{
+ u32 vfid;
+ u32 eventid;
+
+ xe_gt_assert(gt, len);
+ xe_gt_assert(gt, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) == GUC_HXG_ORIGIN_GUC);
+ xe_gt_assert(gt, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) == GUC_HXG_TYPE_EVENT);
+ xe_gt_assert(gt, FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[0]) ==
+ GUC_ACTION_GUC2PF_VF_STATE_NOTIFY);
+
+ if (unlikely(!xe_device_is_sriov_pf(gt_to_xe(gt))))
+ return -EPROTO;
+
+ if (unlikely(FIELD_GET(GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_0_MBZ, msg[0])))
+ return -EPFNOSUPPORT;
+
+ if (unlikely(len != GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_LEN))
+ return -EPROTO;
+
+ vfid = FIELD_GET(GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_1_VFID, msg[1]);
+ eventid = FIELD_GET(GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_2_EVENT, msg[2]);
+
+ return vfid ? pf_handle_vf_event(gt, vfid, eventid) : pf_handle_pf_event(gt, eventid);
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h
new file mode 100644
index 000000000000..850a3e37661f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_CONTROL_H_
+#define _XE_GT_SRIOV_PF_CONTROL_H_
+
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct xe_gt;
+
+int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid);
+
+#ifdef CONFIG_PCI_IOV
+int xe_gt_sriov_pf_control_process_guc2pf(struct xe_gt *gt, const u32 *msg, u32 len);
+#else
+static inline int xe_gt_sriov_pf_control_process_guc2pf(struct xe_gt *gt, const u32 *msg, u32 len)
+{
+ return -EPROTO;
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h
new file mode 100644
index 000000000000..0bf12d89ceb2
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_HELPERS_H_
+#define _XE_GT_SRIOV_PF_HELPERS_H_
+
+#include "xe_gt_types.h"
+#include "xe_sriov_pf_helpers.h"
+
+/**
+ * xe_gt_sriov_pf_assert_vfid() - warn if &id is not a supported VF number when debugging.
+ * @gt: the PF &xe_gt to assert on
+ * @vfid: the VF number to assert
+ *
+ * Assert that &gt belongs to the Physical Function (PF) device and provided &vfid
+ * is within a range of supported VF numbers (up to maximum number of VFs that
+ * driver can support, including VF0 that represents the PF itself).
+ *
+ * Note: Effective only on debug builds. See `Xe ASSERTs`_ for more information.
+ */
+#define xe_gt_sriov_pf_assert_vfid(gt, vfid) xe_sriov_pf_assert_vfid(gt_to_xe(gt), (vfid))
+
+static inline int xe_gt_sriov_pf_get_totalvfs(struct xe_gt *gt)
+{
+ return xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
+}
+
+static inline struct mutex *xe_gt_sriov_pf_master_mutex(struct xe_gt *gt)
+{
+ return xe_sriov_pf_master_mutex(gt_to_xe(gt));
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c
new file mode 100644
index 000000000000..fae5be5a2a11
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#include "abi/guc_actions_sriov_abi.h"
+
+#include "xe_bo.h"
+#include "xe_gt.h"
+#include "xe_gt_sriov_pf_helpers.h"
+#include "xe_gt_sriov_pf_policy.h"
+#include "xe_gt_sriov_printk.h"
+#include "xe_guc_ct.h"
+#include "xe_guc_klv_helpers.h"
+#include "xe_pm.h"
+
+/*
+ * Return: number of KLVs that were successfully parsed and saved,
+ * negative error code on failure.
+ */
+static int guc_action_update_vgt_policy(struct xe_guc *guc, u64 addr, u32 size)
+{
+ u32 request[] = {
+ GUC_ACTION_PF2GUC_UPDATE_VGT_POLICY,
+ lower_32_bits(addr),
+ upper_32_bits(addr),
+ size,
+ };
+
+ return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
+}
+
+/*
+ * Return: number of KLVs that were successfully parsed and saved,
+ * negative error code on failure.
+ */
+static int pf_send_policy_klvs(struct xe_gt *gt, const u32 *klvs, u32 num_dwords)
+{
+ const u32 bytes = num_dwords * sizeof(u32);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_guc *guc = &gt->uc.guc;
+ struct xe_bo *bo;
+ int ret;
+
+ bo = xe_bo_create_pin_map(xe, tile, NULL,
+ ALIGN(bytes, PAGE_SIZE),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ xe_map_memcpy_to(xe, &bo->vmap, 0, klvs, bytes);
+
+ ret = guc_action_update_vgt_policy(guc, xe_bo_ggtt_addr(bo), num_dwords);
+
+ xe_bo_unpin_map_no_vm(bo);
+
+ return ret;
+}
+
+/*
+ * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
+ * negative error code on failure.
+ */
+static int pf_push_policy_klvs(struct xe_gt *gt, u32 num_klvs,
+ const u32 *klvs, u32 num_dwords)
+{
+ int ret;
+
+ xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
+
+ ret = pf_send_policy_klvs(gt, klvs, num_dwords);
+
+ if (ret != num_klvs) {
+ int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
+ struct drm_printer p = xe_gt_info_printer(gt);
+
+ xe_gt_sriov_notice(gt, "Failed to push %u policy KLV%s (%pe)\n",
+ num_klvs, str_plural(num_klvs), ERR_PTR(err));
+ xe_guc_klv_print(klvs, num_dwords, &p);
+ return err;
+ }
+
+ return 0;
+}
+
+static int pf_push_policy_u32(struct xe_gt *gt, u16 key, u32 value)
+{
+ u32 klv[] = {
+ PREP_GUC_KLV(key, 1),
+ value,
+ };
+
+ return pf_push_policy_klvs(gt, 1, klv, ARRAY_SIZE(klv));
+}
+
+static int pf_update_policy_bool(struct xe_gt *gt, u16 key, bool *policy, bool value)
+{
+ int err;
+
+ err = pf_push_policy_u32(gt, key, value);
+ if (unlikely(err)) {
+ xe_gt_sriov_notice(gt, "Failed to update policy %#x '%s' to '%s' (%pe)\n",
+ key, xe_guc_klv_key_to_string(key),
+ str_enabled_disabled(value), ERR_PTR(err));
+ return err;
+ }
+
+ xe_gt_sriov_dbg(gt, "policy key %#x '%s' updated to '%s'\n",
+ key, xe_guc_klv_key_to_string(key),
+ str_enabled_disabled(value));
+
+ *policy = value;
+ return 0;
+}
+
+static int pf_update_policy_u32(struct xe_gt *gt, u16 key, u32 *policy, u32 value)
+{
+ int err;
+
+ err = pf_push_policy_u32(gt, key, value);
+ if (unlikely(err)) {
+ xe_gt_sriov_notice(gt, "Failed to update policy %#x '%s' to '%s' (%pe)\n",
+ key, xe_guc_klv_key_to_string(key),
+ str_enabled_disabled(value), ERR_PTR(err));
+ return err;
+ }
+
+ xe_gt_sriov_dbg(gt, "policy key %#x '%s' updated to %u\n",
+ key, xe_guc_klv_key_to_string(key), value);
+
+ *policy = value;
+ return 0;
+}
+
+static int pf_provision_sched_if_idle(struct xe_gt *gt, bool enable)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_update_policy_bool(gt, GUC_KLV_VGT_POLICY_SCHED_IF_IDLE_KEY,
+ &gt->sriov.pf.policy.guc.sched_if_idle,
+ enable);
+}
+
+static int pf_reprovision_sched_if_idle(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_provision_sched_if_idle(gt, gt->sriov.pf.policy.guc.sched_if_idle);
+}
+
+static void pf_sanitize_sched_if_idle(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ gt->sriov.pf.policy.guc.sched_if_idle = false;
+}
+
+/**
+ * xe_gt_sriov_pf_policy_set_sched_if_idle - Control the 'sched_if_idle' policy.
+ * @gt: the &xe_gt where to apply the policy
+ * @enable: the value of the 'sched_if_idle' policy
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_policy_set_sched_if_idle(struct xe_gt *gt, bool enable)
+{
+ int err;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ err = pf_provision_sched_if_idle(gt, enable);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return err;
+}
+
+/**
+ * xe_gt_sriov_pf_policy_get_sched_if_idle - Retrieve value of 'sched_if_idle' policy.
+ * @gt: the &xe_gt where to read the policy from
+ *
+ * This function can only be called on PF.
+ *
+ * Return: value of 'sched_if_idle' policy.
+ */
+bool xe_gt_sriov_pf_policy_get_sched_if_idle(struct xe_gt *gt)
+{
+ bool enable;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ enable = gt->sriov.pf.policy.guc.sched_if_idle;
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return enable;
+}
+
+static int pf_provision_reset_engine(struct xe_gt *gt, bool enable)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_update_policy_bool(gt, GUC_KLV_VGT_POLICY_RESET_AFTER_VF_SWITCH_KEY,
+ &gt->sriov.pf.policy.guc.reset_engine, enable);
+}
+
+static int pf_reprovision_reset_engine(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_provision_reset_engine(gt, gt->sriov.pf.policy.guc.reset_engine);
+}
+
+static void pf_sanitize_reset_engine(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ gt->sriov.pf.policy.guc.reset_engine = false;
+}
+
+/**
+ * xe_gt_sriov_pf_policy_set_reset_engine - Control the 'reset_engine' policy.
+ * @gt: the &xe_gt where to apply the policy
+ * @enable: the value of the 'reset_engine' policy
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_policy_set_reset_engine(struct xe_gt *gt, bool enable)
+{
+ int err;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ err = pf_provision_reset_engine(gt, enable);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return err;
+}
+
+/**
+ * xe_gt_sriov_pf_policy_get_reset_engine - Retrieve value of 'reset_engine' policy.
+ * @gt: the &xe_gt where to read the policy from
+ *
+ * This function can only be called on PF.
+ *
+ * Return: value of 'reset_engine' policy.
+ */
+bool xe_gt_sriov_pf_policy_get_reset_engine(struct xe_gt *gt)
+{
+ bool enable;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ enable = gt->sriov.pf.policy.guc.reset_engine;
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return enable;
+}
+
+static int pf_provision_sample_period(struct xe_gt *gt, u32 value)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_update_policy_u32(gt, GUC_KLV_VGT_POLICY_ADVERSE_SAMPLE_PERIOD_KEY,
+ &gt->sriov.pf.policy.guc.sample_period, value);
+}
+
+static int pf_reprovision_sample_period(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_provision_sample_period(gt, gt->sriov.pf.policy.guc.sample_period);
+}
+
+static void pf_sanitize_sample_period(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ gt->sriov.pf.policy.guc.sample_period = 0;
+}
+
+/**
+ * xe_gt_sriov_pf_policy_set_sample_period - Control the 'sample_period' policy.
+ * @gt: the &xe_gt where to apply the policy
+ * @value: the value of the 'sample_period' policy
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_policy_set_sample_period(struct xe_gt *gt, u32 value)
+{
+ int err;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ err = pf_provision_sample_period(gt, value);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return err;
+}
+
+/**
+ * xe_gt_sriov_pf_policy_get_sample_period - Retrieve value of 'sample_period' policy.
+ * @gt: the &xe_gt where to read the policy from
+ *
+ * This function can only be called on PF.
+ *
+ * Return: value of 'sample_period' policy.
+ */
+u32 xe_gt_sriov_pf_policy_get_sample_period(struct xe_gt *gt)
+{
+ u32 value;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ value = gt->sriov.pf.policy.guc.sample_period;
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return value;
+}
+
+static void pf_sanitize_guc_policies(struct xe_gt *gt)
+{
+ pf_sanitize_sched_if_idle(gt);
+ pf_sanitize_reset_engine(gt);
+ pf_sanitize_sample_period(gt);
+}
+
+/**
+ * xe_gt_sriov_pf_policy_sanitize - Reset policy settings.
+ * @gt: the &xe_gt
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+void xe_gt_sriov_pf_policy_sanitize(struct xe_gt *gt)
+{
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ pf_sanitize_guc_policies(gt);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+}
+
+/**
+ * xe_gt_sriov_pf_policy_reprovision - Reprovision (and optionally reset) policy settings.
+ * @gt: the &xe_gt
+ * @reset: if true will reprovision using default values instead of latest
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_policy_reprovision(struct xe_gt *gt, bool reset)
+{
+ int err = 0;
+
+ xe_pm_runtime_get_noresume(gt_to_xe(gt));
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ if (reset)
+ pf_sanitize_guc_policies(gt);
+ err |= pf_reprovision_sched_if_idle(gt);
+ err |= pf_reprovision_reset_engine(gt);
+ err |= pf_reprovision_sample_period(gt);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ xe_pm_runtime_put(gt_to_xe(gt));
+
+ return err ? -ENXIO : 0;
+}
+
+static void print_guc_policies(struct drm_printer *p, struct xe_gt_sriov_guc_policies *policy)
+{
+ drm_printf(p, "%s:\t%s\n",
+ xe_guc_klv_key_to_string(GUC_KLV_VGT_POLICY_SCHED_IF_IDLE_KEY),
+ str_enabled_disabled(policy->sched_if_idle));
+ drm_printf(p, "%s:\t%s\n",
+ xe_guc_klv_key_to_string(GUC_KLV_VGT_POLICY_RESET_AFTER_VF_SWITCH_KEY),
+ str_enabled_disabled(policy->reset_engine));
+ drm_printf(p, "%s:\t%u %s\n",
+ xe_guc_klv_key_to_string(GUC_KLV_VGT_POLICY_ADVERSE_SAMPLE_PERIOD_KEY),
+ policy->sample_period, policy->sample_period ? "ms" : "(disabled)");
+}
+
+/**
+ * xe_gt_sriov_pf_policy_print - Dump actual policy values.
+ * @gt: the &xe_gt where to read the policy from
+ * @p: the &drm_printer
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_policy_print(struct xe_gt *gt, struct drm_printer *p)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ print_guc_policies(p, &gt->sriov.pf.policy.guc);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.h
new file mode 100644
index 000000000000..2a5dc33dc6d7
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_POLICY_H_
+#define _XE_GT_SRIOV_PF_POLICY_H_
+
+#include <linux/types.h>
+
+struct drm_printer;
+struct xe_gt;
+
+int xe_gt_sriov_pf_policy_set_sched_if_idle(struct xe_gt *gt, bool enable);
+bool xe_gt_sriov_pf_policy_get_sched_if_idle(struct xe_gt *gt);
+int xe_gt_sriov_pf_policy_set_reset_engine(struct xe_gt *gt, bool enable);
+bool xe_gt_sriov_pf_policy_get_reset_engine(struct xe_gt *gt);
+int xe_gt_sriov_pf_policy_set_sample_period(struct xe_gt *gt, u32 value);
+u32 xe_gt_sriov_pf_policy_get_sample_period(struct xe_gt *gt);
+
+void xe_gt_sriov_pf_policy_sanitize(struct xe_gt *gt);
+int xe_gt_sriov_pf_policy_reprovision(struct xe_gt *gt, bool reset);
+int xe_gt_sriov_pf_policy_print(struct xe_gt *gt, struct drm_printer *p);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy_types.h
new file mode 100644
index 000000000000..4de532af135e
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy_types.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_POLICY_TYPES_H_
+#define _XE_GT_SRIOV_PF_POLICY_TYPES_H_
+
+#include <linux/types.h>
+
+/**
+ * struct xe_gt_sriov_guc_policies - GuC SR-IOV policies.
+ * @sched_if_idle: controls strict scheduling policy.
+ * @reset_engine: controls engines reset on VF switch policy.
+ * @sample_period: adverse events sampling period (in milliseconds).
+ */
+struct xe_gt_sriov_guc_policies {
+ bool sched_if_idle;
+ bool reset_engine;
+ u32 sample_period;
+};
+
+/**
+ * struct xe_gt_sriov_pf_policy - PF policy data.
+ * @guc: GuC scheduling policies.
+ */
+struct xe_gt_sriov_pf_policy {
+ struct xe_gt_sriov_guc_policies guc;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
new file mode 100644
index 000000000000..faf9ee8266ce
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_TYPES_H_
+#define _XE_GT_SRIOV_PF_TYPES_H_
+
+#include <linux/types.h>
+
+#include "xe_gt_sriov_pf_config_types.h"
+#include "xe_gt_sriov_pf_policy_types.h"
+
+/**
+ * struct xe_gt_sriov_metadata - GT level per-VF metadata.
+ */
+struct xe_gt_sriov_metadata {
+ /** @config: per-VF provisioning data. */
+ struct xe_gt_sriov_config config;
+};
+
+/**
+ * struct xe_gt_sriov_pf - GT level PF virtualization data.
+ * @policy: policy data.
+ * @spare: PF-only provisioning configuration.
+ * @vfs: metadata for all VFs.
+ */
+struct xe_gt_sriov_pf {
+ struct xe_gt_sriov_pf_policy policy;
+ struct xe_gt_sriov_spare_config spare;
+ struct xe_gt_sriov_metadata *vfs;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sysfs.c b/drivers/gpu/drm/xe/xe_gt_sysfs.c
index c69d2e8a0fe1..1e5971072bc8 100644
--- a/drivers/gpu/drm/xe/xe_gt_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_sysfs.c
@@ -29,7 +29,7 @@ static void gt_sysfs_fini(struct drm_device *drm, void *arg)
kobject_put(gt->sysfs);
}
-void xe_gt_sysfs_init(struct xe_gt *gt)
+int xe_gt_sysfs_init(struct xe_gt *gt)
{
struct xe_tile *tile = gt_to_tile(gt);
struct xe_device *xe = gt_to_xe(gt);
@@ -38,24 +38,18 @@ void xe_gt_sysfs_init(struct xe_gt *gt)
kg = kzalloc(sizeof(*kg), GFP_KERNEL);
if (!kg)
- return;
+ return -ENOMEM;
kobject_init(&kg->base, &xe_gt_sysfs_kobj_type);
kg->gt = gt;
err = kobject_add(&kg->base, tile->sysfs, "gt%d", gt->info.id);
if (err) {
- drm_warn(&xe->drm, "failed to add GT sysfs directory, err: %d\n", err);
kobject_put(&kg->base);
- return;
+ return err;
}
gt->sysfs = &kg->base;
- err = drmm_add_action_or_reset(&xe->drm, gt_sysfs_fini, gt);
- if (err) {
- drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
- __func__, err);
- return;
- }
+ return drmm_add_action_or_reset(&xe->drm, gt_sysfs_fini, gt);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sysfs.h b/drivers/gpu/drm/xe/xe_gt_sysfs.h
index e3ec278ca0be..ecbfcc5c7d42 100644
--- a/drivers/gpu/drm/xe/xe_gt_sysfs.h
+++ b/drivers/gpu/drm/xe/xe_gt_sysfs.h
@@ -8,7 +8,7 @@
#include "xe_gt_sysfs_types.h"
-void xe_gt_sysfs_init(struct xe_gt *gt);
+int xe_gt_sysfs_init(struct xe_gt *gt);
static inline struct xe_gt *
kobj_to_gt(struct kobject *kobj)
diff --git a/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.c b/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.c
index 63d640591a52..fbe21a8599ca 100644
--- a/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.c
@@ -11,6 +11,7 @@
#include "xe_gt_sysfs.h"
#include "xe_gt_throttle_sysfs.h"
#include "xe_mmio.h"
+#include "xe_pm.h"
/**
* DOC: Xe GT Throttle
@@ -38,10 +39,12 @@ static u32 read_perf_limit_reasons(struct xe_gt *gt)
{
u32 reg;
+ xe_pm_runtime_get(gt_to_xe(gt));
if (xe_gt_is_media_type(gt))
reg = xe_mmio_read32(gt, MTL_MEDIA_PERF_LIMIT_REASONS);
else
reg = xe_mmio_read32(gt, GT0_PERF_LIMIT_REASONS);
+ xe_pm_runtime_put(gt_to_xe(gt));
return reg;
}
@@ -233,19 +236,14 @@ static void gt_throttle_sysfs_fini(struct drm_device *drm, void *arg)
sysfs_remove_group(gt->freq, &throttle_group_attrs);
}
-void xe_gt_throttle_sysfs_init(struct xe_gt *gt)
+int xe_gt_throttle_sysfs_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
int err;
err = sysfs_create_group(gt->freq, &throttle_group_attrs);
- if (err) {
- drm_warn(&xe->drm, "failed to register throttle sysfs, err: %d\n", err);
- return;
- }
-
- err = drmm_add_action_or_reset(&xe->drm, gt_throttle_sysfs_fini, gt);
if (err)
- drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
- __func__, err);
+ return err;
+
+ return drmm_add_action_or_reset(&xe->drm, gt_throttle_sysfs_fini, gt);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.h b/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.h
index 3ecfd4beffe1..6c61e6f228a8 100644
--- a/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.h
+++ b/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.h
@@ -10,7 +10,7 @@
struct xe_gt;
-void xe_gt_throttle_sysfs_init(struct xe_gt *gt);
+int xe_gt_throttle_sysfs_init(struct xe_gt *gt);
#endif /* _XE_GT_THROTTLE_SYSFS_H_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index f03e077f81a0..93df2d7969b3 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -11,7 +11,9 @@
#include "xe_gt_printk.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"
+#include "xe_mmio.h"
#include "xe_trace.h"
+#include "regs/xe_guc_regs.h"
#define TLB_TIMEOUT (HZ / 4)
@@ -61,7 +63,6 @@ int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
INIT_LIST_HEAD(&gt->tlb_invalidation.pending_fences);
spin_lock_init(&gt->tlb_invalidation.pending_lock);
spin_lock_init(&gt->tlb_invalidation.lock);
- gt->tlb_invalidation.fence_context = dma_fence_context_alloc(1);
INIT_DELAYED_WORK(&gt->tlb_invalidation.fence_tdr,
xe_gt_tlb_fence_timeout);
@@ -210,7 +211,7 @@ static int send_tlb_invalidation(struct xe_guc *guc,
* Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
* negative error code on error.
*/
-int xe_gt_tlb_invalidation_guc(struct xe_gt *gt)
+static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt)
{
u32 action[] = {
XE_GUC_ACTION_TLB_INVALIDATION,
@@ -223,6 +224,45 @@ int xe_gt_tlb_invalidation_guc(struct xe_gt *gt)
}
/**
+ * xe_gt_tlb_invalidation_ggtt - Issue a TLB invalidation on this GT for the GGTT
+ * @gt: graphics tile
+ *
+ * Issue a TLB invalidation for the GGTT. Completion of TLB invalidation is
+ * synchronous.
+ *
+ * Return: 0 on success, negative error code on error
+ */
+int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ if (xe_guc_ct_enabled(&gt->uc.guc.ct) &&
+ gt->uc.guc.submission_state.enabled) {
+ int seqno;
+
+ seqno = xe_gt_tlb_invalidation_guc(gt);
+ if (seqno <= 0)
+ return seqno;
+
+ xe_gt_tlb_invalidation_wait(gt, seqno);
+ } else if (xe_device_uc_enabled(xe)) {
+ xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
+ if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
+ xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC1,
+ PVC_GUC_TLB_INV_DESC1_INVALIDATE);
+ xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC0,
+ PVC_GUC_TLB_INV_DESC0_VALID);
+ } else {
+ xe_mmio_write32(gt, GUC_TLB_INV_CR,
+ GUC_TLB_INV_CR_INVALIDATE);
+ }
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ }
+
+ return 0;
+}
+
+/**
* xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
* @gt: graphics tile
* @fence: invalidation fence which will be signal on TLB invalidation
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
index b333c1709397..fbb743d80d2c 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
@@ -16,7 +16,7 @@ struct xe_vma;
int xe_gt_tlb_invalidation_init(struct xe_gt *gt);
void xe_gt_tlb_invalidation_reset(struct xe_gt *gt);
-int xe_gt_tlb_invalidation_guc(struct xe_gt *gt);
+int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt);
int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
struct xe_vma *vma);
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c
index 5dc62fe1be49..3733e7a6860d 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.c
+++ b/drivers/gpu/drm/xe/xe_gt_topology.c
@@ -8,12 +8,10 @@
#include <linux/bitmap.h>
#include "regs/xe_gt_regs.h"
+#include "xe_assert.h"
#include "xe_gt.h"
#include "xe_mmio.h"
-#define XE_MAX_DSS_FUSE_BITS (32 * XE_MAX_DSS_FUSE_REGS)
-#define XE_MAX_EU_FUSE_BITS (32 * XE_MAX_EU_FUSE_REGS)
-
static void
load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...)
{
@@ -62,6 +60,114 @@ load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask)
bitmap_from_arr32(mask, &val, XE_MAX_EU_FUSE_BITS);
}
+/**
+ * gen_l3_mask_from_pattern - Replicate a bit pattern according to a mask
+ *
+ * It is used to compute the L3 bank masks in a generic format on
+ * various platforms where the internal representation of L3 node
+ * and masks from registers are different.
+ *
+ * @xe: device
+ * @dst: destination
+ * @pattern: pattern to replicate
+ * @patternbits: size of the pattern, in bits
+ * @mask: mask describing where to replicate the pattern
+ *
+ * Example 1:
+ * ----------
+ * @pattern = 0b1111
+ * └┬─┘
+ * @patternbits = 4 (bits)
+ * @mask = 0b0101
+ * ││││
+ * │││└────────────────── 0b1111 (=1×0b1111)
+ * ││└──────────── 0b0000 │ (=0×0b1111)
+ * │└────── 0b1111 │ │ (=1×0b1111)
+ * └ 0b0000 │ │ │ (=0×0b1111)
+ * │ │ │ │
+ * @dst = 0b0000 0b1111 0b0000 0b1111
+ *
+ * Example 2:
+ * ----------
+ * @pattern = 0b11111111
+ * └┬─────┘
+ * @patternbits = 8 (bits)
+ * @mask = 0b10
+ * ││
+ * ││
+ * ││
+ * │└────────── 0b00000000 (=0×0b11111111)
+ * └ 0b11111111 │ (=1×0b11111111)
+ * │ │
+ * @dst = 0b11111111 0b00000000
+ */
+static void
+gen_l3_mask_from_pattern(struct xe_device *xe, xe_l3_bank_mask_t dst,
+ xe_l3_bank_mask_t pattern, int patternbits,
+ unsigned long mask)
+{
+ unsigned long bit;
+
+ xe_assert(xe, fls(mask) <= patternbits);
+ for_each_set_bit(bit, &mask, 32) {
+ xe_l3_bank_mask_t shifted_pattern = {};
+
+ bitmap_shift_left(shifted_pattern, pattern, bit * patternbits,
+ XE_MAX_L3_BANK_MASK_BITS);
+ bitmap_or(dst, dst, shifted_pattern, XE_MAX_L3_BANK_MASK_BITS);
+ }
+}
+
+static void
+load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 fuse3 = xe_mmio_read32(gt, MIRROR_FUSE3);
+
+ if (GRAPHICS_VER(xe) >= 20) {
+ xe_l3_bank_mask_t per_node = {};
+ u32 meml3_en = REG_FIELD_GET(XE2_NODE_ENABLE_MASK, fuse3);
+ u32 bank_val = REG_FIELD_GET(XE2_GT_L3_MODE_MASK, fuse3);
+
+ bitmap_from_arr32(per_node, &bank_val, 32);
+ gen_l3_mask_from_pattern(xe, l3_bank_mask, per_node, 4,
+ meml3_en);
+ } else if (GRAPHICS_VERx100(xe) >= 1270) {
+ xe_l3_bank_mask_t per_node = {};
+ xe_l3_bank_mask_t per_mask_bit = {};
+ u32 meml3_en = REG_FIELD_GET(MEML3_EN_MASK, fuse3);
+ u32 fuse4 = xe_mmio_read32(gt, XEHP_FUSE4);
+ u32 bank_val = REG_FIELD_GET(GT_L3_EXC_MASK, fuse4);
+
+ bitmap_set_value8(per_mask_bit, 0x3, 0);
+ gen_l3_mask_from_pattern(xe, per_node, per_mask_bit, 2, bank_val);
+ gen_l3_mask_from_pattern(xe, l3_bank_mask, per_node, 4,
+ meml3_en);
+ } else if (xe->info.platform == XE_PVC) {
+ xe_l3_bank_mask_t per_node = {};
+ xe_l3_bank_mask_t per_mask_bit = {};
+ u32 meml3_en = REG_FIELD_GET(MEML3_EN_MASK, fuse3);
+ u32 bank_val = REG_FIELD_GET(XEHPC_GT_L3_MODE_MASK, fuse3);
+
+ bitmap_set_value8(per_mask_bit, 0xf, 0);
+ gen_l3_mask_from_pattern(xe, per_node, per_mask_bit, 4,
+ bank_val);
+ gen_l3_mask_from_pattern(xe, l3_bank_mask, per_node, 16,
+ meml3_en);
+ } else if (xe->info.platform == XE_DG2) {
+ xe_l3_bank_mask_t per_node = {};
+ u32 mask = REG_FIELD_GET(MEML3_EN_MASK, fuse3);
+
+ bitmap_set_value8(per_node, 0xff, 0);
+ gen_l3_mask_from_pattern(xe, l3_bank_mask, per_node, 8, mask);
+ } else {
+ /* 1:1 register bit to mask bit (inverted register bits) */
+ u32 mask = REG_FIELD_GET(XELP_GT_L3_MODE_MASK, ~fuse3);
+
+ bitmap_from_arr32(l3_bank_mask, &mask, 32);
+ }
+}
+
static void
get_num_dss_regs(struct xe_device *xe, int *geometry_regs, int *compute_regs)
{
@@ -106,6 +212,7 @@ xe_gt_topology_init(struct xe_gt *gt)
XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,
XE2_GT_COMPUTE_DSS_2);
load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss);
+ load_l3_bank_mask(gt, gt->fuse_topo.l3_bank_mask);
p = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, "GT topology");
@@ -123,6 +230,8 @@ xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p)
drm_printf(p, "EU mask per DSS: %*pb\n", XE_MAX_EU_FUSE_BITS,
gt->fuse_topo.eu_mask_per_dss);
+ drm_printf(p, "L3 bank mask: %*pb\n", XE_MAX_L3_BANK_MASK_BITS,
+ gt->fuse_topo.l3_bank_mask);
}
/*
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.h b/drivers/gpu/drm/xe/xe_gt_topology.h
index d1b54fb52ea6..b3e357777a6e 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.h
+++ b/drivers/gpu/drm/xe/xe_gt_topology.h
@@ -8,6 +8,17 @@
#include "xe_gt_types.h"
+/*
+ * Loop over each DSS with the bit is 1 in geometry or compute mask
+ * @dss: iterated DSS bit from the DSS mask
+ * @gt: GT structure
+ */
+#define for_each_dss(dss, gt) \
+ for_each_or_bit((dss), \
+ (gt)->fuse_topo.g_dss_mask, \
+ (gt)->fuse_topo.c_dss_mask, \
+ XE_MAX_DSS_FUSE_BITS)
+
struct drm_printer;
void xe_gt_topology_init(struct xe_gt *gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index 70c615dd1498..cfdc761ff7f4 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -8,6 +8,7 @@
#include "xe_force_wake_types.h"
#include "xe_gt_idle_types.h"
+#include "xe_gt_sriov_pf_types.h"
#include "xe_hw_engine_types.h"
#include "xe_hw_fence_types.h"
#include "xe_reg_sr_types.h"
@@ -24,11 +25,15 @@ enum xe_gt_type {
XE_GT_TYPE_MEDIA,
};
-#define XE_MAX_DSS_FUSE_REGS 3
-#define XE_MAX_EU_FUSE_REGS 1
+#define XE_MAX_DSS_FUSE_REGS 3
+#define XE_MAX_DSS_FUSE_BITS (32 * XE_MAX_DSS_FUSE_REGS)
+#define XE_MAX_EU_FUSE_REGS 1
+#define XE_MAX_EU_FUSE_BITS (32 * XE_MAX_EU_FUSE_REGS)
+#define XE_MAX_L3_BANK_MASK_BITS 64
-typedef unsigned long xe_dss_mask_t[BITS_TO_LONGS(32 * XE_MAX_DSS_FUSE_REGS)];
-typedef unsigned long xe_eu_mask_t[BITS_TO_LONGS(32 * XE_MAX_EU_FUSE_REGS)];
+typedef unsigned long xe_dss_mask_t[BITS_TO_LONGS(XE_MAX_DSS_FUSE_BITS)];
+typedef unsigned long xe_eu_mask_t[BITS_TO_LONGS(XE_MAX_EU_FUSE_BITS)];
+typedef unsigned long xe_l3_bank_mask_t[BITS_TO_LONGS(XE_MAX_L3_BANK_MASK_BITS)];
struct xe_mmio_range {
u32 start;
@@ -138,6 +143,12 @@ struct xe_gt {
u32 adj_offset;
} mmio;
+ /** @sriov: virtualization data related to GT */
+ union {
+ /** @sriov.pf: PF data. Valid only if driver is running as PF */
+ struct xe_gt_sriov_pf pf;
+ } sriov;
+
/**
* @reg_sr: table with registers to be restored on GT init/resume/reset
*/
@@ -177,13 +188,6 @@ struct xe_gt {
* xe_gt_tlb_fence_timeout after the timeut interval is over.
*/
struct delayed_work fence_tdr;
- /** @tlb_invalidation.fence_context: context for TLB invalidation fences */
- u64 fence_context;
- /**
- * @tlb_invalidation.fence_seqno: seqno to TLB invalidation fences, protected by
- * tlb_invalidation.lock
- */
- u32 fence_seqno;
/** @tlb_invalidation.lock: protects TLB invalidation fences */
spinlock_t lock;
} tlb_invalidation;
@@ -332,6 +336,9 @@ struct xe_gt {
/** @fuse_topo.eu_mask_per_dss: EU mask per DSS*/
xe_eu_mask_t eu_mask_per_dss;
+
+ /** @fuse_topo.l3_bank_mask: L3 bank mask */
+ xe_l3_bank_mask_t l3_bank_mask;
} fuse_topo;
/** @steering: register steering for individual HW units */
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index 0d2a2dd13f11..240e7a4bbff1 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -12,11 +12,13 @@
#include "abi/guc_actions_abi.h"
#include "abi/guc_errors_abi.h"
#include "regs/xe_gt_regs.h"
+#include "regs/xe_gtt_defs.h"
#include "regs/xe_guc_regs.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_force_wake.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_guc_ads.h"
#include "xe_guc_ct.h"
#include "xe_guc_hwconfig.h"
@@ -33,14 +35,13 @@
#include "xe_wa.h"
#include "xe_wopcm.h"
-/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
-#define GUC_GGTT_TOP 0xFEE00000
static u32 guc_bo_ggtt_addr(struct xe_guc *guc,
struct xe_bo *bo)
{
struct xe_device *xe = guc_to_xe(guc);
u32 addr = xe_bo_ggtt_addr(bo);
+ /* GuC addresses above GUC_GGTT_TOP don't map through the GTT */
xe_assert(xe, addr >= xe_wopcm_size(guc_to_xe(guc)));
xe_assert(xe, addr < GUC_GGTT_TOP);
xe_assert(xe, bo->size <= GUC_GGTT_TOP - addr);
@@ -133,15 +134,10 @@ static u32 guc_ctl_ads_flags(struct xe_guc *guc)
return flags;
}
-#define GUC_VER(maj, min, pat) (((maj) << 16) | ((min) << 8) | (pat))
-
static u32 guc_ctl_wa_flags(struct xe_guc *guc)
{
struct xe_device *xe = guc_to_xe(guc);
struct xe_gt *gt = guc_to_gt(guc);
- struct xe_uc_fw *uc_fw = &guc->fw;
- struct xe_uc_fw_version *version = &uc_fw->versions.found[XE_UC_FW_VER_RELEASE];
-
u32 flags = 0;
if (XE_WA(gt, 22012773006))
@@ -164,20 +160,15 @@ static u32 guc_ctl_wa_flags(struct xe_guc *guc)
if (XE_WA(gt, 22012727170) || XE_WA(gt, 22012727685))
flags |= GUC_WA_CONTEXT_ISOLATION;
- if ((XE_WA(gt, 16015675438) || XE_WA(gt, 18020744125)) &&
+ if (XE_WA(gt, 18020744125) &&
!xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_RENDER))
flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
if (XE_WA(gt, 1509372804))
flags |= GUC_WA_RENDER_RST_RC6_EXIT;
- if (XE_WA(gt, 14018913170)) {
- if (GUC_VER(version->major, version->minor, version->patch) >= GUC_VER(70, 7, 0))
- flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
- else
- drm_dbg(&xe->drm, "Skip WA 14018913170: GUC version expected >= 70.7.0, found %u.%u.%u\n",
- version->major, version->minor, version->patch);
- }
+ if (XE_WA(gt, 14018913170))
+ flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
return flags;
}
@@ -189,15 +180,23 @@ static u32 guc_ctl_devid(struct xe_guc *guc)
return (((u32)xe->info.devid) << 16) | xe->info.revid;
}
-static void guc_init_params(struct xe_guc *guc)
+static void guc_print_params(struct xe_guc *guc)
{
- struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
u32 *params = guc->params;
int i;
BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
BUILD_BUG_ON(GUC_CTL_MAX_DWORDS + 2 != SOFT_SCRATCH_COUNT);
+ for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
+ xe_gt_dbg(gt, "GuC param[%2d] = 0x%08x\n", i, params[i]);
+}
+
+static void guc_init_params(struct xe_guc *guc)
+{
+ u32 *params = guc->params;
+
params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
params[GUC_CTL_FEATURE] = 0;
params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
@@ -205,18 +204,12 @@ static void guc_init_params(struct xe_guc *guc)
params[GUC_CTL_WA] = 0;
params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
- for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
- drm_dbg(&xe->drm, "GuC param[%2d] = 0x%08x\n", i, params[i]);
+ guc_print_params(guc);
}
static void guc_init_params_post_hwconfig(struct xe_guc *guc)
{
- struct xe_device *xe = guc_to_xe(guc);
u32 *params = guc->params;
- int i;
-
- BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
- BUILD_BUG_ON(GUC_CTL_MAX_DWORDS + 2 != SOFT_SCRATCH_COUNT);
params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
@@ -225,8 +218,7 @@ static void guc_init_params_post_hwconfig(struct xe_guc *guc)
params[GUC_CTL_WA] = guc_ctl_wa_flags(guc);
params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
- for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
- drm_dbg(&xe->drm, "GuC param[%2d] = 0x%08x\n", i, params[i]);
+ guc_print_params(guc);
}
/*
@@ -250,10 +242,11 @@ static void guc_write_params(struct xe_guc *guc)
static void guc_fini(struct drm_device *drm, void *arg)
{
struct xe_guc *guc = arg;
+ struct xe_gt *gt = guc_to_gt(guc);
- xe_force_wake_get(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
+ xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
xe_uc_fini_hw(&guc_to_gt(guc)->uc);
- xe_force_wake_put(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
+ xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
}
/**
@@ -330,7 +323,7 @@ int xe_guc_init(struct xe_guc *guc)
if (ret)
goto out;
- ret = drmm_add_action_or_reset(&gt_to_xe(gt)->drm, guc_fini, guc);
+ ret = drmm_add_action_or_reset(&xe->drm, guc_fini, guc);
if (ret)
goto out;
@@ -343,7 +336,7 @@ int xe_guc_init(struct xe_guc *guc)
return 0;
out:
- drm_err(&xe->drm, "GuC init failed with %d", ret);
+ xe_gt_err(gt, "GuC init failed with %pe\n", ERR_PTR(ret));
return ret;
}
@@ -380,7 +373,6 @@ int xe_guc_post_load_init(struct xe_guc *guc)
int xe_guc_reset(struct xe_guc *guc)
{
- struct xe_device *xe = guc_to_xe(guc);
struct xe_gt *gt = guc_to_gt(guc);
u32 guc_status, gdrst;
int ret;
@@ -391,16 +383,14 @@ int xe_guc_reset(struct xe_guc *guc)
ret = xe_mmio_wait32(gt, GDRST, GRDOM_GUC, 0, 5000, &gdrst, false);
if (ret) {
- drm_err(&xe->drm, "GuC reset timed out, GDRST=0x%8x\n",
- gdrst);
+ xe_gt_err(gt, "GuC reset timed out, GDRST=%#x\n", gdrst);
goto err_out;
}
guc_status = xe_mmio_read32(gt, GUC_STATUS);
if (!(guc_status & GS_MIA_IN_RESET)) {
- drm_err(&xe->drm,
- "GuC status: 0x%x, MIA core expected to be in reset\n",
- guc_status);
+ xe_gt_err(gt, "GuC status: %#x, MIA core expected to be in reset\n",
+ guc_status);
ret = -EIO;
goto err_out;
}
@@ -463,7 +453,7 @@ static int guc_xfer_rsa(struct xe_guc *guc)
static int guc_wait_ucode(struct xe_guc *guc)
{
- struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
u32 status;
int ret;
@@ -484,35 +474,32 @@ static int guc_wait_ucode(struct xe_guc *guc)
* 200ms. Even at slowest clock, this should be sufficient. And
* in the working case, a larger timeout makes no difference.
*/
- ret = xe_mmio_wait32(guc_to_gt(guc), GUC_STATUS, GS_UKERNEL_MASK,
+ ret = xe_mmio_wait32(gt, GUC_STATUS, GS_UKERNEL_MASK,
FIELD_PREP(GS_UKERNEL_MASK, XE_GUC_LOAD_STATUS_READY),
200000, &status, false);
if (ret) {
- struct drm_device *drm = &xe->drm;
-
- drm_info(drm, "GuC load failed: status = 0x%08X\n", status);
- drm_info(drm, "GuC load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n",
- REG_FIELD_GET(GS_MIA_IN_RESET, status),
- REG_FIELD_GET(GS_BOOTROM_MASK, status),
- REG_FIELD_GET(GS_UKERNEL_MASK, status),
- REG_FIELD_GET(GS_MIA_MASK, status),
- REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
+ xe_gt_info(gt, "GuC load failed: status = 0x%08X\n", status);
+ xe_gt_info(gt, "GuC status: Reset = %u, BootROM = %#X, UKernel = %#X, MIA = %#X, Auth = %#X\n",
+ REG_FIELD_GET(GS_MIA_IN_RESET, status),
+ REG_FIELD_GET(GS_BOOTROM_MASK, status),
+ REG_FIELD_GET(GS_UKERNEL_MASK, status),
+ REG_FIELD_GET(GS_MIA_MASK, status),
+ REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
- drm_info(drm, "GuC firmware signature verification failed\n");
+ xe_gt_info(gt, "GuC firmware signature verification failed\n");
ret = -ENOEXEC;
}
if (REG_FIELD_GET(GS_UKERNEL_MASK, status) ==
XE_GUC_LOAD_STATUS_EXCEPTION) {
- drm_info(drm, "GuC firmware exception. EIP: %#x\n",
- xe_mmio_read32(guc_to_gt(guc),
- SOFT_SCRATCH(13)));
+ xe_gt_info(gt, "GuC firmware exception. EIP: %#x\n",
+ xe_mmio_read32(gt, SOFT_SCRATCH(13)));
ret = -ENXIO;
}
} else {
- drm_dbg(&xe->drm, "GuC successfully loaded");
+ xe_gt_dbg(gt, "GuC successfully loaded\n");
}
return ret;
@@ -604,6 +591,9 @@ static void guc_handle_mmio_msg(struct xe_guc *guc)
struct xe_gt *gt = guc_to_gt(guc);
u32 msg;
+ if (IS_SRIOV_VF(guc_to_xe(guc)))
+ return;
+
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
msg = xe_mmio_read32(gt, SOFT_SCRATCH(15));
@@ -612,12 +602,10 @@ static void guc_handle_mmio_msg(struct xe_guc *guc)
xe_mmio_write32(gt, SOFT_SCRATCH(15), 0);
if (msg & XE_GUC_RECV_MSG_CRASH_DUMP_POSTED)
- drm_err(&guc_to_xe(guc)->drm,
- "Received early GuC crash dump notification!\n");
+ xe_gt_err(gt, "Received early GuC crash dump notification!\n");
if (msg & XE_GUC_RECV_MSG_EXCEPTION)
- drm_err(&guc_to_xe(guc)->drm,
- "Received early GuC exception notification!\n");
+ xe_gt_err(gt, "Received early GuC exception notification!\n");
}
static void guc_enable_irq(struct xe_guc *guc)
@@ -668,15 +656,15 @@ int xe_guc_enable_communication(struct xe_guc *guc)
int xe_guc_suspend(struct xe_guc *guc)
{
- int ret;
+ struct xe_gt *gt = guc_to_gt(guc);
u32 action[] = {
XE_GUC_ACTION_CLIENT_SOFT_RESET,
};
+ int ret;
ret = xe_guc_mmio_send(guc, action, ARRAY_SIZE(action));
if (ret) {
- drm_err(&guc_to_xe(guc)->drm,
- "GuC suspend: CLIENT_SOFT_RESET fail: %d!\n", ret);
+ xe_gt_err(gt, "GuC suspend failed: %pe\n", ERR_PTR(ret));
return ret;
}
@@ -751,8 +739,8 @@ retry:
50000, &reply, false);
if (ret) {
timeout:
- drm_err(&xe->drm, "mmio request %#x: no reply %#x\n",
- request[0], reply);
+ xe_gt_err(gt, "GuC mmio request %#x: no reply %#x\n",
+ request[0], reply);
return ret;
}
@@ -790,8 +778,8 @@ timeout:
GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header);
- drm_dbg(&xe->drm, "mmio request %#x: retrying, reason %#x\n",
- request[0], reason);
+ xe_gt_dbg(gt, "GuC mmio request %#x: retrying, reason %#x\n",
+ request[0], reason);
goto retry;
}
@@ -800,16 +788,16 @@ timeout:
u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header);
u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header);
- drm_err(&xe->drm, "mmio request %#x: failure %#x/%#x\n",
- request[0], error, hint);
+ xe_gt_err(gt, "GuC mmio request %#x: failure %#x hint %#x\n",
+ request[0], error, hint);
return -ENXIO;
}
if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) !=
GUC_HXG_TYPE_RESPONSE_SUCCESS) {
proto:
- drm_err(&xe->drm, "mmio request %#x: unexpected reply %#x\n",
- request[0], header);
+ xe_gt_err(gt, "GuC mmio request %#x: unexpected reply %#x\n",
+ request[0], header);
return -EPROTO;
}
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index 6ad4c1a90a78..7f5a523795c8 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -7,6 +7,8 @@
#include <drm/drm_managed.h>
+#include <generated/xe_wa_oob.h>
+
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
#include "regs/xe_guc_regs.h"
@@ -19,6 +21,7 @@
#include "xe_map.h"
#include "xe_mmio.h"
#include "xe_platform_types.h"
+#include "xe_wa.h"
/* Slack of a few additional entries per engine */
#define ADS_REGSET_EXTRA_MAX 8
@@ -80,6 +83,10 @@ ads_to_map(struct xe_guc_ads *ads)
* +---------------------------------------+
* | padding |
* +---------------------------------------+ <== 4K aligned
+ * | w/a KLVs |
+ * +---------------------------------------+
+ * | padding |
+ * +---------------------------------------+ <== 4K aligned
* | capture lists |
* +---------------------------------------+
* | padding |
@@ -100,7 +107,7 @@ struct __guc_ads_blob {
struct guc_engine_usage engine_usage;
struct guc_um_init_params um_init_params;
/* From here on, location is dynamic! Refer to above diagram. */
- struct guc_mmio_reg regset[0];
+ struct guc_mmio_reg regset[];
} __packed;
#define ads_blob_read(ads_, field_) \
@@ -131,6 +138,11 @@ static size_t guc_ads_golden_lrc_size(struct xe_guc_ads *ads)
return PAGE_ALIGN(ads->golden_lrc_size);
}
+static u32 guc_ads_waklv_size(struct xe_guc_ads *ads)
+{
+ return PAGE_ALIGN(ads->ads_waklv_size);
+}
+
static size_t guc_ads_capture_size(struct xe_guc_ads *ads)
{
/* FIXME: Allocate a proper capture list */
@@ -167,12 +179,22 @@ static size_t guc_ads_golden_lrc_offset(struct xe_guc_ads *ads)
return PAGE_ALIGN(offset);
}
+static size_t guc_ads_waklv_offset(struct xe_guc_ads *ads)
+{
+ u32 offset;
+
+ offset = guc_ads_golden_lrc_offset(ads) +
+ guc_ads_golden_lrc_size(ads);
+
+ return PAGE_ALIGN(offset);
+}
+
static size_t guc_ads_capture_offset(struct xe_guc_ads *ads)
{
size_t offset;
- offset = guc_ads_golden_lrc_offset(ads) +
- guc_ads_golden_lrc_size(ads);
+ offset = guc_ads_waklv_offset(ads) +
+ guc_ads_waklv_size(ads);
return PAGE_ALIGN(offset);
}
@@ -260,6 +282,110 @@ static size_t calculate_golden_lrc_size(struct xe_guc_ads *ads)
return total_size;
}
+static void guc_waklv_enable_one_word(struct xe_guc_ads *ads,
+ enum xe_guc_klv_ids klv_id,
+ u32 value,
+ u32 *offset, u32 *remain)
+{
+ u32 size;
+ u32 klv_entry[] = {
+ /* 16:16 key/length */
+ FIELD_PREP(GUC_KLV_0_KEY, klv_id) |
+ FIELD_PREP(GUC_KLV_0_LEN, 1),
+ value,
+ /* 1 dword data */
+ };
+
+ size = sizeof(klv_entry);
+
+ if (*remain < size) {
+ drm_warn(&ads_to_xe(ads)->drm,
+ "w/a klv buffer too small to add klv id %d\n", klv_id);
+ } else {
+ xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), *offset,
+ klv_entry, size);
+ *offset += size;
+ *remain -= size;
+ }
+}
+
+static void guc_waklv_enable_simple(struct xe_guc_ads *ads,
+ enum xe_guc_klv_ids klv_id, u32 *offset, u32 *remain)
+{
+ u32 klv_entry[] = {
+ /* 16:16 key/length */
+ FIELD_PREP(GUC_KLV_0_KEY, klv_id) |
+ FIELD_PREP(GUC_KLV_0_LEN, 0),
+ /* 0 dwords data */
+ };
+ u32 size;
+
+ size = sizeof(klv_entry);
+
+ if (xe_gt_WARN(ads_to_gt(ads), *remain < size,
+ "w/a klv buffer too small to add klv id %d\n", klv_id))
+ return;
+
+ xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), *offset,
+ klv_entry, size);
+ *offset += size;
+ *remain -= size;
+}
+
+static void guc_waklv_init(struct xe_guc_ads *ads)
+{
+ struct xe_gt *gt = ads_to_gt(ads);
+ u64 addr_ggtt;
+ u32 offset, remain, size;
+
+ offset = guc_ads_waklv_offset(ads);
+ remain = guc_ads_waklv_size(ads);
+
+ if (XE_WA(gt, 14019882105))
+ guc_waklv_enable_simple(ads,
+ GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED,
+ &offset, &remain);
+ if (XE_WA(gt, 18024947630))
+ guc_waklv_enable_simple(ads,
+ GUC_WORKAROUND_KLV_ID_GAM_PFQ_SHADOW_TAIL_POLLING,
+ &offset, &remain);
+ if (XE_WA(gt, 16022287689))
+ guc_waklv_enable_simple(ads,
+ GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE,
+ &offset, &remain);
+
+ /*
+ * On RC6 exit, GuC will write register 0xB04 with the default value provided. As of now,
+ * the default value for this register is determined to be 0xC40. This could change in the
+ * future, so GuC depends on KMD to send it the correct value.
+ */
+ if (XE_WA(gt, 13011645652))
+ guc_waklv_enable_one_word(ads,
+ GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE,
+ 0xC40,
+ &offset, &remain);
+
+ size = guc_ads_waklv_size(ads) - remain;
+ if (!size)
+ return;
+
+ offset = guc_ads_waklv_offset(ads);
+ addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset;
+
+ ads_blob_write(ads, ads.wa_klv_addr_lo, lower_32_bits(addr_ggtt));
+ ads_blob_write(ads, ads.wa_klv_addr_hi, upper_32_bits(addr_ggtt));
+ ads_blob_write(ads, ads.wa_klv_size, size);
+}
+
+static int calculate_waklv_size(struct xe_guc_ads *ads)
+{
+ /*
+ * A single page is both the minimum size possible and
+ * is sufficiently large enough for all current platforms.
+ */
+ return SZ_4K;
+}
+
#define MAX_GOLDEN_LRC_SIZE (SZ_4K * 64)
int xe_guc_ads_init(struct xe_guc_ads *ads)
@@ -271,10 +397,12 @@ int xe_guc_ads_init(struct xe_guc_ads *ads)
ads->golden_lrc_size = calculate_golden_lrc_size(ads);
ads->regset_size = calculate_regset_size(gt);
+ ads->ads_waklv_size = calculate_waklv_size(ads);
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ads_size(ads) + MAX_GOLDEN_LRC_SIZE,
- XE_BO_CREATE_SYSTEM_BIT |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -597,6 +725,7 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads)
guc_mapping_table_init(gt, &info_map);
guc_capture_list_init(ads);
guc_doorbell_init(ads);
+ guc_waklv_init(ads);
if (xe->info.has_usm) {
guc_um_init_params(ads);
diff --git a/drivers/gpu/drm/xe/xe_guc_ads_types.h b/drivers/gpu/drm/xe/xe_guc_ads_types.h
index 4afe44bece4b..2de5decfe0fd 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_ads_types.h
@@ -20,6 +20,8 @@ struct xe_guc_ads {
size_t golden_lrc_size;
/** @regset_size: size of register set passed to GuC for save/restore */
u32 regset_size;
+ /** @ads_waklv_size: total waklv size supported by platform */
+ u32 ads_waklv_size;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 355edd4d758a..0151d29b3c58 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -21,6 +21,7 @@
#include "xe_gt.h"
#include "xe_gt_pagefault.h"
#include "xe_gt_printk.h"
+#include "xe_gt_sriov_pf_control.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
#include "xe_guc_relay.h"
@@ -120,6 +121,7 @@ static void guc_ct_fini(struct drm_device *drm, void *arg)
{
struct xe_guc_ct *ct = arg;
+ destroy_workqueue(ct->g2h_wq);
xa_destroy(&ct->fence_lookup);
}
@@ -143,20 +145,28 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
struct xe_bo *bo;
int err;
- xe_assert(xe, !(guc_ct_size() % PAGE_SIZE));
+ xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE));
+
+ ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", 0);
+ if (!ct->g2h_wq)
+ return -ENOMEM;
- drmm_mutex_init(&xe->drm, &ct->lock);
spin_lock_init(&ct->fast_lock);
xa_init(&ct->fence_lookup);
INIT_WORK(&ct->g2h_worker, g2h_worker_func);
init_waitqueue_head(&ct->wq);
init_waitqueue_head(&ct->g2h_fence_wq);
+ err = drmm_mutex_init(&xe->drm, &ct->lock);
+ if (err)
+ return err;
+
primelockdep(ct);
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
- XE_BO_CREATE_SYSTEM_BIT |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -166,7 +176,7 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
if (err)
return err;
- xe_assert(xe, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED);
+ xe_gt_assert(gt, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED);
ct->state = XE_GUC_CT_STATE_DISABLED;
return 0;
}
@@ -313,9 +323,10 @@ static void xe_guc_ct_set_state(struct xe_guc_ct *ct,
int xe_guc_ct_enable(struct xe_guc_ct *ct)
{
struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
int err;
- xe_assert(xe, !xe_guc_ct_enabled(ct));
+ xe_gt_assert(gt, !xe_guc_ct_enabled(ct));
guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
@@ -336,12 +347,12 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
smp_mb();
wake_up_all(&ct->wq);
- drm_dbg(&xe->drm, "GuC CT communication channel enabled\n");
+ xe_gt_dbg(gt, "GuC CT communication channel enabled\n");
return 0;
err_out:
- drm_err(&xe->drm, "Failed to enable CT (%d)\n", err);
+ xe_gt_err(gt, "Failed to enable GuC CT (%pe)\n", ERR_PTR(err));
return err;
}
@@ -422,7 +433,7 @@ static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len)
static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
{
- xe_assert(ct_to_xe(ct), g2h_len <= ct->ctbs.g2h.info.space);
+ xe_gt_assert(ct_to_gt(ct), g2h_len <= ct->ctbs.g2h.info.space);
if (g2h_len) {
lockdep_assert_held(&ct->fast_lock);
@@ -435,8 +446,8 @@ static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
{
lockdep_assert_held(&ct->fast_lock);
- xe_assert(ct_to_xe(ct), ct->ctbs.g2h.info.space + g2h_len <=
- ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space);
+ xe_gt_assert(ct_to_gt(ct), ct->ctbs.g2h.info.space + g2h_len <=
+ ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space);
ct->ctbs.g2h.info.space += g2h_len;
--ct->g2h_outstanding;
@@ -455,6 +466,7 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
u32 ct_fence_value, bool want_response)
{
struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
struct guc_ctb *h2g = &ct->ctbs.h2g;
u32 cmd[H2G_CT_HEADERS];
u32 tail = h2g->info.tail;
@@ -465,8 +477,8 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
full_len = len + GUC_CTB_HDR_LEN;
lockdep_assert_held(&ct->lock);
- xe_assert(xe, full_len <= GUC_CTB_MSG_MAX_LEN);
- xe_assert(xe, tail <= h2g->info.size);
+ xe_gt_assert(gt, full_len <= GUC_CTB_MSG_MAX_LEN);
+ xe_gt_assert(gt, tail <= h2g->info.size);
/* Command will wrap, zero fill (NOPs), return and check credits again */
if (tail + full_len > h2g->info.size) {
@@ -515,7 +527,7 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
/* Update descriptor */
desc_write(xe, h2g, tail, h2g->info.tail);
- trace_xe_guc_ctb_h2g(ct_to_gt(ct)->info.id, *(action - 1), full_len,
+ trace_xe_guc_ctb_h2g(gt->info.id, *(action - 1), full_len,
desc_read(xe, h2g, head), h2g->info.tail);
return 0;
@@ -544,15 +556,15 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
u32 len, u32 g2h_len, u32 num_g2h,
struct g2h_fence *g2h_fence)
{
- struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt __maybe_unused = ct_to_gt(ct);
u16 seqno;
int ret;
- xe_assert(xe, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
- xe_assert(xe, !g2h_len || !g2h_fence);
- xe_assert(xe, !num_g2h || !g2h_fence);
- xe_assert(xe, !g2h_len || num_g2h);
- xe_assert(xe, g2h_len || !num_g2h);
+ xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
+ xe_gt_assert(gt, !g2h_len || !g2h_fence);
+ xe_gt_assert(gt, !num_g2h || !g2h_fence);
+ xe_gt_assert(gt, !g2h_len || num_g2h);
+ xe_gt_assert(gt, g2h_len || !num_g2h);
lockdep_assert_held(&ct->lock);
if (unlikely(ct->ctbs.h2g.info.broken)) {
@@ -570,7 +582,7 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
goto out;
}
- xe_assert(xe, xe_guc_ct_enabled(ct));
+ xe_gt_assert(gt, xe_guc_ct_enabled(ct));
if (g2h_fence) {
g2h_len = GUC_CTB_HXG_MSG_MAX_LEN;
@@ -628,12 +640,12 @@ static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
u32 g2h_len, u32 num_g2h,
struct g2h_fence *g2h_fence)
{
- struct drm_device *drm = &ct_to_xe(ct)->drm;
- struct drm_printer p = drm_info_printer(drm->dev);
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct drm_printer p = xe_gt_info_printer(gt);
unsigned int sleep_period_ms = 1;
int ret;
- xe_assert(ct_to_xe(ct), !g2h_len || !g2h_fence);
+ xe_gt_assert(gt, !g2h_len || !g2h_fence);
lockdep_assert_held(&ct->lock);
xe_device_assert_mem_access(ct_to_xe(ct));
@@ -691,7 +703,7 @@ try_again:
return ret;
broken:
- drm_err(drm, "No forward process on H2G, reset required");
+ xe_gt_err(gt, "No forward process on H2G, reset required\n");
xe_guc_ct_print(ct, &p, true);
ct->ctbs.h2g.info.broken = true;
@@ -703,7 +715,7 @@ static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
{
int ret;
- xe_assert(ct_to_xe(ct), !g2h_len || !g2h_fence);
+ xe_gt_assert(ct_to_gt(ct), !g2h_len || !g2h_fence);
mutex_lock(&ct->lock);
ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence);
@@ -771,7 +783,7 @@ static bool retry_failure(struct xe_guc_ct *ct, int ret)
static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
u32 *response_buffer, bool no_fail)
{
- struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
struct g2h_fence g2h_fence;
int ret = 0;
@@ -813,20 +825,20 @@ retry_same_fence:
ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
if (!ret) {
- drm_err(&xe->drm, "Timed out wait for G2H, fence %u, action %04x",
- g2h_fence.seqno, action[0]);
+ xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x",
+ g2h_fence.seqno, action[0]);
xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno);
return -ETIME;
}
if (g2h_fence.retry) {
- drm_warn(&xe->drm, "Send retry, action 0x%04x, reason %d",
- action[0], g2h_fence.reason);
+ xe_gt_warn(gt, "H2G retry, action 0x%04x, reason %u",
+ action[0], g2h_fence.reason);
goto retry;
}
if (g2h_fence.fail) {
- drm_err(&xe->drm, "Send failed, action 0x%04x, error %d, hint %d",
- action[0], g2h_fence.error, g2h_fence.hint);
+ xe_gt_err(gt, "H2G send failed, action 0x%04x, error %d, hint %u",
+ action[0], g2h_fence.error, g2h_fence.hint);
ret = -EIO;
}
@@ -895,7 +907,6 @@ static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
struct xe_gt *gt = ct_to_gt(ct);
- struct xe_device *xe = gt_to_xe(gt);
u32 *hxg = msg_to_hxg(msg);
u32 hxg_len = msg_len_to_hxg_len(len);
u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, msg[0]);
@@ -933,7 +944,7 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
return 0;
}
- xe_assert(xe, fence == g2h_fence->seqno);
+ xe_gt_assert(gt, fence == g2h_fence->seqno);
if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) {
g2h_fence->fail = true;
@@ -961,7 +972,7 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
- struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
u32 *hxg = msg_to_hxg(msg);
u32 origin, type;
int ret;
@@ -970,9 +981,8 @@ static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]);
if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
- drm_err(&xe->drm,
- "G2H channel broken on read, origin=%d, reset required\n",
- origin);
+ xe_gt_err(gt, "G2H channel broken on read, origin=%u, reset required\n",
+ origin);
ct->ctbs.g2h.info.broken = true;
return -EPROTO;
@@ -989,9 +999,8 @@ static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
ret = parse_g2h_response(ct, msg, len);
break;
default:
- drm_err(&xe->drm,
- "G2H channel broken on read, type=%d, reset required\n",
- type);
+ xe_gt_err(gt, "G2H channel broken on read, type=%u, reset required\n",
+ type);
ct->ctbs.g2h.info.broken = true;
ret = -EOPNOTSUPP;
@@ -1002,8 +1011,8 @@ static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
- struct xe_device *xe = ct_to_xe(ct);
struct xe_guc *guc = ct_to_guc(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
u32 hxg_len = msg_len_to_hxg_len(len);
u32 *hxg = msg_to_hxg(msg);
u32 action, adj_len;
@@ -1054,18 +1063,21 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
adj_len);
break;
case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF:
- ret = xe_guc_relay_process_guc2pf(&guc->relay, payload, adj_len);
+ ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len);
break;
case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF:
- ret = xe_guc_relay_process_guc2vf(&guc->relay, payload, adj_len);
+ ret = xe_guc_relay_process_guc2vf(&guc->relay, hxg, hxg_len);
+ break;
+ case GUC_ACTION_GUC2PF_VF_STATE_NOTIFY:
+ ret = xe_gt_sriov_pf_control_process_guc2pf(gt, hxg, hxg_len);
break;
default:
- drm_err(&xe->drm, "unexpected action 0x%04x\n", action);
+ xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action);
}
if (ret)
- drm_err(&xe->drm, "action 0x%04x failed processing, ret=%d\n",
- action, ret);
+ xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n",
+ action, ERR_PTR(ret));
return 0;
}
@@ -1073,13 +1085,14 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
{
struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
struct guc_ctb *g2h = &ct->ctbs.g2h;
u32 tail, head, len;
s32 avail;
u32 action;
u32 *hxg;
- xe_assert(xe, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
+ xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
lockdep_assert_held(&ct->fast_lock);
if (ct->state == XE_GUC_CT_STATE_DISABLED)
@@ -1091,7 +1104,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
if (g2h->info.broken)
return -EPIPE;
- xe_assert(xe, xe_guc_ct_enabled(ct));
+ xe_gt_assert(gt, xe_guc_ct_enabled(ct));
/* Calculate DW available to read */
tail = desc_read(xe, g2h, tail);
@@ -1107,9 +1120,8 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
sizeof(u32));
len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN;
if (len > avail) {
- drm_err(&xe->drm,
- "G2H channel broken on read, avail=%d, len=%d, reset required\n",
- avail, len);
+ xe_gt_err(gt, "G2H channel broken on read, avail=%d, len=%d, reset required\n",
+ avail, len);
g2h->info.broken = true;
return -EPROTO;
@@ -1162,7 +1174,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
- struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
struct xe_guc *guc = ct_to_guc(ct);
u32 hxg_len = msg_len_to_hxg_len(len);
u32 *hxg = msg_to_hxg(msg);
@@ -1181,12 +1193,12 @@ static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
adj_len);
break;
default:
- drm_warn(&xe->drm, "NOT_POSSIBLE");
+ xe_gt_warn(gt, "NOT_POSSIBLE");
}
if (ret)
- drm_err(&xe->drm, "action 0x%04x failed processing, ret=%d\n",
- action, ret);
+ xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n",
+ action, ERR_PTR(ret));
}
/**
@@ -1203,7 +1215,7 @@ void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
bool ongoing;
int len;
- ongoing = xe_device_mem_access_get_if_ongoing(ct_to_xe(ct));
+ ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
return;
@@ -1216,7 +1228,7 @@ void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
spin_unlock(&ct->fast_lock);
if (ongoing)
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
}
/* Returns less than zero on error, 0 on done, 1 on more available */
@@ -1247,6 +1259,7 @@ static int dequeue_one_g2h(struct xe_guc_ct *ct)
static void g2h_worker_func(struct work_struct *w)
{
struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker);
+ struct xe_gt *gt = ct_to_gt(ct);
bool ongoing;
int ret;
@@ -1273,7 +1286,7 @@ static void g2h_worker_func(struct work_struct *w)
* responses, if the worker here is blocked on those callbacks
* completing, creating a deadlock.
*/
- ongoing = xe_device_mem_access_get_if_ongoing(ct_to_xe(ct));
+ ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
return;
@@ -1283,8 +1296,7 @@ static void g2h_worker_func(struct work_struct *w)
mutex_unlock(&ct->lock);
if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) {
- struct drm_device *drm = &ct_to_xe(ct)->drm;
- struct drm_printer p = drm_info_printer(drm->dev);
+ struct drm_printer p = xe_gt_info_printer(gt);
xe_guc_ct_print(ct, &p, false);
kick_reset(ct);
@@ -1292,7 +1304,7 @@ static void g2h_worker_func(struct work_struct *w)
} while (ret == 1);
if (ongoing)
- xe_device_mem_access_put(ct_to_xe(ct));
+ xe_pm_runtime_put(ct_to_xe(ct));
}
static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,
@@ -1394,7 +1406,7 @@ struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct,
return NULL;
}
- if (xe_guc_ct_enabled(ct)) {
+ if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) {
snapshot->ct_enabled = true;
snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding);
guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g,
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h
index 5083e099064f..105bb8e99a8d 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct.h
@@ -34,7 +34,7 @@ static inline void xe_guc_ct_irq_handler(struct xe_guc_ct *ct)
return;
wake_up_all(&ct->wq);
- queue_work(system_unbound_wq, &ct->g2h_worker);
+ queue_work(ct->g2h_wq, &ct->g2h_worker);
xe_guc_ct_fast_path(ct);
}
diff --git a/drivers/gpu/drm/xe/xe_guc_ct_types.h b/drivers/gpu/drm/xe/xe_guc_ct_types.h
index d29144c9f20b..fede4c6e93cb 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct_types.h
@@ -120,6 +120,8 @@ struct xe_guc_ct {
wait_queue_head_t wq;
/** @g2h_fence_wq: wait queue used for G2H fencing */
wait_queue_head_t g2h_fence_wq;
+ /** @g2h_wq: used to process G2H */
+ struct workqueue_struct *g2h_wq;
/** @msg: Message buffer */
u32 msg[GUC_CTB_MSG_MAX_LEN];
/** @fast_msg: Message buffer */
diff --git a/drivers/gpu/drm/xe/xe_guc_debugfs.c b/drivers/gpu/drm/xe/xe_guc_debugfs.c
index ffd7d53bcc42..d3822cbea273 100644
--- a/drivers/gpu/drm/xe/xe_guc_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_guc_debugfs.c
@@ -14,6 +14,7 @@
#include "xe_guc_ct.h"
#include "xe_guc_log.h"
#include "xe_macros.h"
+#include "xe_pm.h"
static struct xe_guc *node_to_guc(struct drm_info_node *node)
{
@@ -26,9 +27,9 @@ static int guc_info(struct seq_file *m, void *data)
struct xe_device *xe = guc_to_xe(guc);
struct drm_printer p = drm_seq_file_printer(m);
- xe_device_mem_access_get(xe);
+ xe_pm_runtime_get(xe);
xe_guc_print_info(guc, &p);
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
return 0;
}
@@ -39,9 +40,9 @@ static int guc_log(struct seq_file *m, void *data)
struct xe_device *xe = guc_to_xe(guc);
struct drm_printer p = drm_seq_file_printer(m);
- xe_device_mem_access_get(xe);
+ xe_pm_runtime_get(xe);
xe_guc_log_print(&guc->log, &p);
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_guc_fwif.h b/drivers/gpu/drm/xe/xe_guc_fwif.h
index c281fdbfd2d6..19ee71aeaf17 100644
--- a/drivers/gpu/drm/xe/xe_guc_fwif.h
+++ b/drivers/gpu/drm/xe/xe_guc_fwif.h
@@ -14,6 +14,8 @@
#define G2H_LEN_DW_DEREGISTER_CONTEXT 3
#define G2H_LEN_DW_TLB_INVALIDATE 3
+#define GUC_ID_MAX 65535
+
#define GUC_CONTEXT_DISABLE 0
#define GUC_CONTEXT_ENABLE 1
@@ -207,7 +209,10 @@ struct guc_ads {
u32 capture_instance[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
u32 capture_class[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
u32 capture_global[GUC_CAPTURE_LIST_INDEX_MAX];
- u32 reserved[14];
+ u32 wa_klv_addr_lo;
+ u32 wa_klv_addr_hi;
+ u32 wa_klv_size;
+ u32 reserved[11];
} __packed;
/* Engine usage stats */
diff --git a/drivers/gpu/drm/xe/xe_guc_hwconfig.c b/drivers/gpu/drm/xe/xe_guc_hwconfig.c
index ea49f3885c10..d9b570a154a2 100644
--- a/drivers/gpu/drm/xe/xe_guc_hwconfig.c
+++ b/drivers/gpu/drm/xe/xe_guc_hwconfig.c
@@ -14,7 +14,7 @@
#include "xe_guc.h"
#include "xe_map.h"
-static int send_get_hwconfig(struct xe_guc *guc, u32 ggtt_addr, u32 size)
+static int send_get_hwconfig(struct xe_guc *guc, u64 ggtt_addr, u32 size)
{
u32 action[] = {
XE_GUC_ACTION_GET_HWCONFIG,
@@ -78,8 +78,9 @@ int xe_guc_hwconfig_init(struct xe_guc *guc)
return -EINVAL;
bo = xe_managed_bo_create_pin_map(xe, tile, PAGE_ALIGN(size),
- XE_BO_CREATE_SYSTEM_BIT |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo))
return PTR_ERR(bo);
guc->hwconfig.bo = bo;
diff --git a/drivers/gpu/drm/xe/xe_guc_id_mgr.c b/drivers/gpu/drm/xe/xe_guc_id_mgr.c
new file mode 100644
index 000000000000..0fb7c6b78c31
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_id_mgr.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <linux/bitmap.h>
+#include <linux/mutex.h>
+
+#include <drm/drm_managed.h>
+
+#include "xe_assert.h"
+#include "xe_gt_printk.h"
+#include "xe_guc.h"
+#include "xe_guc_id_mgr.h"
+#include "xe_guc_types.h"
+
+static struct xe_guc *idm_to_guc(struct xe_guc_id_mgr *idm)
+{
+ return container_of(idm, struct xe_guc, submission_state.idm);
+}
+
+static struct xe_gt *idm_to_gt(struct xe_guc_id_mgr *idm)
+{
+ return guc_to_gt(idm_to_guc(idm));
+}
+
+static struct xe_device *idm_to_xe(struct xe_guc_id_mgr *idm)
+{
+ return gt_to_xe(idm_to_gt(idm));
+}
+
+#define idm_assert(idm, cond) xe_gt_assert(idm_to_gt(idm), cond)
+#define idm_mutex(idm) (&idm_to_guc(idm)->submission_state.lock)
+
+static void idm_print_locked(struct xe_guc_id_mgr *idm, struct drm_printer *p, int indent);
+
+static void __fini_idm(struct drm_device *drm, void *arg)
+{
+ struct xe_guc_id_mgr *idm = arg;
+
+ mutex_lock(idm_mutex(idm));
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
+ unsigned int weight = bitmap_weight(idm->bitmap, idm->total);
+
+ if (weight) {
+ struct drm_printer p = xe_gt_info_printer(idm_to_gt(idm));
+
+ xe_gt_err(idm_to_gt(idm), "GUC ID manager unclean (%u/%u)\n",
+ weight, idm->total);
+ idm_print_locked(idm, &p, 1);
+ }
+ }
+
+ bitmap_free(idm->bitmap);
+ idm->bitmap = NULL;
+ idm->total = 0;
+ idm->used = 0;
+
+ mutex_unlock(idm_mutex(idm));
+}
+
+/**
+ * xe_guc_id_mgr_init() - Initialize GuC context ID Manager.
+ * @idm: the &xe_guc_id_mgr to initialize
+ * @limit: number of IDs to manage
+ *
+ * The bare-metal or PF driver can pass ~0 as &limit to indicate that all
+ * context IDs supported by the GuC firmware are available for use.
+ *
+ * Only VF drivers will have to provide explicit number of context IDs
+ * that they can use.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_guc_id_mgr_init(struct xe_guc_id_mgr *idm, unsigned int limit)
+{
+ int ret;
+
+ idm_assert(idm, !idm->bitmap);
+ idm_assert(idm, !idm->total);
+ idm_assert(idm, !idm->used);
+
+ if (limit == ~0)
+ limit = GUC_ID_MAX;
+ else if (limit > GUC_ID_MAX)
+ return -ERANGE;
+ else if (!limit)
+ return -EINVAL;
+
+ idm->bitmap = bitmap_zalloc(limit, GFP_KERNEL);
+ if (!idm->bitmap)
+ return -ENOMEM;
+ idm->total = limit;
+
+ ret = drmm_add_action_or_reset(&idm_to_xe(idm)->drm, __fini_idm, idm);
+ if (ret)
+ return ret;
+
+ xe_gt_info(idm_to_gt(idm), "using %u GUC ID(s)\n", idm->total);
+ return 0;
+}
+
+static unsigned int find_last_zero_area(unsigned long *bitmap,
+ unsigned int total,
+ unsigned int count)
+{
+ unsigned int found = total;
+ unsigned int rs, re, range;
+
+ for_each_clear_bitrange(rs, re, bitmap, total) {
+ range = re - rs;
+ if (range < count)
+ continue;
+ found = rs + (range - count);
+ }
+ return found;
+}
+
+static int idm_reserve_chunk_locked(struct xe_guc_id_mgr *idm,
+ unsigned int count, unsigned int retain)
+{
+ int id;
+
+ idm_assert(idm, count);
+ lockdep_assert_held(idm_mutex(idm));
+
+ if (!idm->total)
+ return -ENODATA;
+
+ if (retain) {
+ /*
+ * For IDs reservations (used on PF for VFs) we want to make
+ * sure there will be at least 'retain' available for the PF
+ */
+ if (idm->used + count + retain > idm->total)
+ return -EDQUOT;
+ /*
+ * ... and we want to reserve highest IDs close to the end.
+ */
+ id = find_last_zero_area(idm->bitmap, idm->total, count);
+ } else {
+ /*
+ * For regular IDs reservations (used by submission code)
+ * we start searching from the lower range of IDs.
+ */
+ id = bitmap_find_next_zero_area(idm->bitmap, idm->total, 0, count, 0);
+ }
+ if (id >= idm->total)
+ return -ENOSPC;
+
+ bitmap_set(idm->bitmap, id, count);
+ idm->used += count;
+
+ return id;
+}
+
+static void idm_release_chunk_locked(struct xe_guc_id_mgr *idm,
+ unsigned int start, unsigned int count)
+{
+ idm_assert(idm, count);
+ idm_assert(idm, count <= idm->used);
+ idm_assert(idm, start < idm->total);
+ idm_assert(idm, start + count - 1 < idm->total);
+ lockdep_assert_held(idm_mutex(idm));
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
+ unsigned int n;
+
+ for (n = 0; n < count; n++)
+ idm_assert(idm, test_bit(start + n, idm->bitmap));
+ }
+ bitmap_clear(idm->bitmap, start, count);
+ idm->used -= count;
+}
+
+/**
+ * xe_guc_id_mgr_reserve_locked() - Reserve one or more GuC context IDs.
+ * @idm: the &xe_guc_id_mgr
+ * @count: number of IDs to allocate (can't be 0)
+ *
+ * This function is dedicated for the use by the GuC submission code,
+ * where submission lock is already taken.
+ *
+ * Return: ID of allocated GuC context or a negative error code on failure.
+ */
+int xe_guc_id_mgr_reserve_locked(struct xe_guc_id_mgr *idm, unsigned int count)
+{
+ return idm_reserve_chunk_locked(idm, count, 0);
+}
+
+/**
+ * xe_guc_id_mgr_release_locked() - Release one or more GuC context IDs.
+ * @idm: the &xe_guc_id_mgr
+ * @id: the GuC context ID to release
+ * @count: number of IDs to release (can't be 0)
+ *
+ * This function is dedicated for the use by the GuC submission code,
+ * where submission lock is already taken.
+ */
+void xe_guc_id_mgr_release_locked(struct xe_guc_id_mgr *idm, unsigned int id,
+ unsigned int count)
+{
+ return idm_release_chunk_locked(idm, id, count);
+}
+
+/**
+ * xe_guc_id_mgr_reserve() - Reserve a range of GuC context IDs.
+ * @idm: the &xe_guc_id_mgr
+ * @count: number of GuC context IDs to reserve (can't be 0)
+ * @retain: number of GuC context IDs to keep available (can't be 0)
+ *
+ * This function is dedicated for the use by the PF driver which expects that
+ * reserved range of IDs will be contiguous and that there will be at least
+ * &retain IDs still available for the PF after this reservation.
+ *
+ * Return: starting ID of the allocated GuC context ID range or
+ * a negative error code on failure.
+ */
+int xe_guc_id_mgr_reserve(struct xe_guc_id_mgr *idm,
+ unsigned int count, unsigned int retain)
+{
+ int ret;
+
+ idm_assert(idm, count);
+ idm_assert(idm, retain);
+
+ mutex_lock(idm_mutex(idm));
+ ret = idm_reserve_chunk_locked(idm, count, retain);
+ mutex_unlock(idm_mutex(idm));
+
+ return ret;
+}
+
+/**
+ * xe_guc_id_mgr_release() - Release a range of GuC context IDs.
+ * @idm: the &xe_guc_id_mgr
+ * @start: the starting ID of GuC context range to release
+ * @count: number of GuC context IDs to release
+ */
+void xe_guc_id_mgr_release(struct xe_guc_id_mgr *idm,
+ unsigned int start, unsigned int count)
+{
+ mutex_lock(idm_mutex(idm));
+ idm_release_chunk_locked(idm, start, count);
+ mutex_unlock(idm_mutex(idm));
+}
+
+static void idm_print_locked(struct xe_guc_id_mgr *idm, struct drm_printer *p, int indent)
+{
+ unsigned int rs, re;
+
+ lockdep_assert_held(idm_mutex(idm));
+
+ drm_printf_indent(p, indent, "total %u\n", idm->total);
+ if (!idm->bitmap)
+ return;
+
+ drm_printf_indent(p, indent, "used %u\n", idm->used);
+ for_each_set_bitrange(rs, re, idm->bitmap, idm->total)
+ drm_printf_indent(p, indent, "range %u..%u (%u)\n", rs, re - 1, re - rs);
+}
+
+/**
+ * xe_guc_id_mgr_print() - Print status of GuC ID Manager.
+ * @idm: the &xe_guc_id_mgr to print
+ * @p: the &drm_printer to print to
+ * @indent: tab indentation level
+ */
+void xe_guc_id_mgr_print(struct xe_guc_id_mgr *idm, struct drm_printer *p, int indent)
+{
+ mutex_lock(idm_mutex(idm));
+ idm_print_locked(idm, p, indent);
+ mutex_unlock(idm_mutex(idm));
+}
+
+#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
+#include "tests/xe_guc_id_mgr_test.c"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_id_mgr.h b/drivers/gpu/drm/xe/xe_guc_id_mgr.h
new file mode 100644
index 000000000000..368f8c80e4c7
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_id_mgr.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GUC_ID_MGR_H_
+#define _XE_GUC_ID_MGR_H_
+
+struct drm_printer;
+struct xe_guc_id_mgr;
+
+int xe_guc_id_mgr_init(struct xe_guc_id_mgr *idm, unsigned int count);
+
+int xe_guc_id_mgr_reserve_locked(struct xe_guc_id_mgr *idm, unsigned int count);
+void xe_guc_id_mgr_release_locked(struct xe_guc_id_mgr *idm, unsigned int id, unsigned int count);
+
+int xe_guc_id_mgr_reserve(struct xe_guc_id_mgr *idm, unsigned int count, unsigned int retain);
+void xe_guc_id_mgr_release(struct xe_guc_id_mgr *idm, unsigned int start, unsigned int count);
+
+void xe_guc_id_mgr_print(struct xe_guc_id_mgr *idm, struct drm_printer *p, int indent);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_klv_helpers.c b/drivers/gpu/drm/xe/xe_guc_klv_helpers.c
new file mode 100644
index 000000000000..ceca949932a0
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_klv_helpers.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <drm/drm_print.h>
+
+#include "abi/guc_klvs_abi.h"
+#include "xe_guc_klv_helpers.h"
+
+#define make_u64(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
+
+/**
+ * xe_guc_klv_key_to_string - Convert KLV key into friendly name.
+ * @key: the `GuC KLV`_ key
+ *
+ * Return: name of the KLV key.
+ */
+const char *xe_guc_klv_key_to_string(u16 key)
+{
+ switch (key) {
+ /* VGT POLICY keys */
+ case GUC_KLV_VGT_POLICY_SCHED_IF_IDLE_KEY:
+ return "sched_if_idle";
+ case GUC_KLV_VGT_POLICY_ADVERSE_SAMPLE_PERIOD_KEY:
+ return "sample_period";
+ case GUC_KLV_VGT_POLICY_RESET_AFTER_VF_SWITCH_KEY:
+ return "reset_engine";
+ /* VF CFG keys */
+ case GUC_KLV_VF_CFG_GGTT_START_KEY:
+ return "ggtt_start";
+ case GUC_KLV_VF_CFG_GGTT_SIZE_KEY:
+ return "ggtt_size";
+ case GUC_KLV_VF_CFG_LMEM_SIZE_KEY:
+ return "lmem_size";
+ case GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY:
+ return "num_contexts";
+ case GUC_KLV_VF_CFG_TILE_MASK_KEY:
+ return "tile_mask";
+ case GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY:
+ return "num_doorbells";
+ case GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY:
+ return "exec_quantum";
+ case GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY:
+ return "preempt_timeout";
+ case GUC_KLV_VF_CFG_BEGIN_DOORBELL_ID_KEY:
+ return "begin_db_id";
+ case GUC_KLV_VF_CFG_BEGIN_CONTEXT_ID_KEY:
+ return "begin_ctx_id";
+ default:
+ return "(unknown)";
+ }
+}
+
+/**
+ * xe_guc_klv_print - Print content of the buffer with `GuC KLV`_.
+ * @klvs: the buffer with KLVs
+ * @num_dwords: number of dwords (u32) available in the buffer
+ * @p: the &drm_printer
+ *
+ * The buffer may contain more than one KLV.
+ */
+void xe_guc_klv_print(const u32 *klvs, u32 num_dwords, struct drm_printer *p)
+{
+ while (num_dwords >= GUC_KLV_LEN_MIN) {
+ u32 key = FIELD_GET(GUC_KLV_0_KEY, klvs[0]);
+ u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]);
+
+ klvs += GUC_KLV_LEN_MIN;
+ num_dwords -= GUC_KLV_LEN_MIN;
+
+ if (num_dwords < len) {
+ drm_printf(p, "{ key %#06x : truncated %zu of %zu bytes %*ph } # %s\n",
+ key, num_dwords * sizeof(u32), len * sizeof(u32),
+ (int)(num_dwords * sizeof(u32)), klvs,
+ xe_guc_klv_key_to_string(key));
+ return;
+ }
+
+ switch (len) {
+ case 0:
+ drm_printf(p, "{ key %#06x : no value } # %s\n",
+ key, xe_guc_klv_key_to_string(key));
+ break;
+ case 1:
+ drm_printf(p, "{ key %#06x : 32b value %u } # %s\n",
+ key, klvs[0], xe_guc_klv_key_to_string(key));
+ break;
+ case 2:
+ drm_printf(p, "{ key %#06x : 64b value %#llx } # %s\n",
+ key, make_u64(klvs[1], klvs[0]),
+ xe_guc_klv_key_to_string(key));
+ break;
+ default:
+ drm_printf(p, "{ key %#06x : %zu bytes %*ph } # %s\n",
+ key, len * sizeof(u32), (int)(len * sizeof(u32)),
+ klvs, xe_guc_klv_key_to_string(key));
+ break;
+ }
+
+ klvs += len;
+ num_dwords -= len;
+ }
+
+ /* we don't expect any leftovers, fix if KLV header is ever changed */
+ BUILD_BUG_ON(GUC_KLV_LEN_MIN > 1);
+}
+
+/**
+ * xe_guc_klv_count - Count KLVs present in the buffer.
+ * @klvs: the buffer with KLVs
+ * @num_dwords: number of dwords (u32) in the buffer
+ *
+ * Return: number of recognized KLVs or
+ * a negative error code if KLV buffer is truncated.
+ */
+int xe_guc_klv_count(const u32 *klvs, u32 num_dwords)
+{
+ int num_klvs = 0;
+
+ while (num_dwords >= GUC_KLV_LEN_MIN) {
+ u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]);
+
+ if (num_dwords < len + GUC_KLV_LEN_MIN)
+ break;
+
+ klvs += GUC_KLV_LEN_MIN + len;
+ num_dwords -= GUC_KLV_LEN_MIN + len;
+ num_klvs++;
+ }
+
+ return num_dwords ? -ENODATA : num_klvs;
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_klv_helpers.h b/drivers/gpu/drm/xe/xe_guc_klv_helpers.h
new file mode 100644
index 000000000000..b835e0ebe6db
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_klv_helpers.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GUC_KLV_HELPERS_H_
+#define _XE_GUC_KLV_HELPERS_H_
+
+#include <linux/types.h>
+
+struct drm_printer;
+
+const char *xe_guc_klv_key_to_string(u16 key);
+
+void xe_guc_klv_print(const u32 *klvs, u32 num_dwords, struct drm_printer *p);
+int xe_guc_klv_count(const u32 *klvs, u32 num_dwords);
+
+/**
+ * PREP_GUC_KLV - Prepare KLV header value based on provided key and len.
+ * @key: KLV key
+ * @len: KLV length
+ *
+ * Return: value of the KLV header (u32).
+ */
+#define PREP_GUC_KLV(key, len) \
+ (FIELD_PREP(GUC_KLV_0_KEY, (key)) | \
+ FIELD_PREP(GUC_KLV_0_LEN, (len)))
+
+/**
+ * PREP_GUC_KLV_CONST - Prepare KLV header value based on const key and len.
+ * @key: const KLV key
+ * @len: const KLV length
+ *
+ * Return: value of the KLV header (u32).
+ */
+#define PREP_GUC_KLV_CONST(key, len) \
+ (FIELD_PREP_CONST(GUC_KLV_0_KEY, (key)) | \
+ FIELD_PREP_CONST(GUC_KLV_0_LEN, (len)))
+
+/**
+ * PREP_GUC_KLV_TAG - Prepare KLV header value based on unique KLV definition tag.
+ * @TAG: unique tag of the KLV definition
+ *
+ * Combine separate KEY and LEN definitions of the KLV identified by the TAG.
+ *
+ * Return: value of the KLV header (u32).
+ */
+#define PREP_GUC_KLV_TAG(TAG) \
+ PREP_GUC_KLV_CONST(GUC_KLV_##TAG##_KEY, GUC_KLV_##TAG##_LEN)
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c
index 45135c3520e5..a37ee3419428 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.c
+++ b/drivers/gpu/drm/xe/xe_guc_log.c
@@ -84,8 +84,9 @@ int xe_guc_log_init(struct xe_guc_log *log)
struct xe_bo *bo;
bo = xe_managed_bo_create_pin_map(xe, tile, guc_log_size(),
- XE_BO_CREATE_SYSTEM_BIT |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index 2839d685631b..509649d0e65e 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -145,25 +145,6 @@ static int pc_action_reset(struct xe_guc_pc *pc)
return ret;
}
-static int pc_action_shutdown(struct xe_guc_pc *pc)
-{
- struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
- int ret;
- u32 action[] = {
- GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
- SLPC_EVENT(SLPC_EVENT_SHUTDOWN, 2),
- xe_bo_ggtt_addr(pc->bo),
- 0,
- };
-
- ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
- if (ret)
- drm_err(&pc_to_xe(pc)->drm, "GuC PC shutdown %pe",
- ERR_PTR(ret));
-
- return ret;
-}
-
static int pc_action_query_task_state(struct xe_guc_pc *pc)
{
struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
@@ -381,8 +362,6 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
struct xe_device *xe = gt_to_xe(gt);
u32 freq;
- xe_device_mem_access_get(gt_to_xe(gt));
-
/* When in RC6, actual frequency reported will be 0. */
if (GRAPHICS_VERx100(xe) >= 1270) {
freq = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
@@ -394,8 +373,6 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
freq = decode_freq(freq);
- xe_device_mem_access_put(gt_to_xe(gt));
-
return freq;
}
@@ -412,14 +389,13 @@ int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
struct xe_gt *gt = pc_to_gt(pc);
int ret;
- xe_device_mem_access_get(gt_to_xe(gt));
/*
* GuC SLPC plays with cur freq request when GuCRC is enabled
* Block RC6 for a more reliable read.
*/
ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (ret)
- goto out;
+ return ret;
*freq = xe_mmio_read32(gt, RPNSWREQ);
@@ -427,9 +403,7 @@ int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
*freq = decode_freq(*freq);
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
-out:
- xe_device_mem_access_put(gt_to_xe(gt));
- return ret;
+ return 0;
}
/**
@@ -451,12 +425,7 @@ u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
*/
u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
{
- struct xe_gt *gt = pc_to_gt(pc);
- struct xe_device *xe = gt_to_xe(gt);
-
- xe_device_mem_access_get(xe);
pc_update_rp_values(pc);
- xe_device_mem_access_put(xe);
return pc->rpe_freq;
}
@@ -485,7 +454,6 @@ int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
struct xe_gt *gt = pc_to_gt(pc);
int ret;
- xe_device_mem_access_get(pc_to_xe(pc));
mutex_lock(&pc->freq_lock);
if (!pc->freq_ready) {
/* Might be in the middle of a gt reset */
@@ -511,7 +479,6 @@ fw:
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
out:
mutex_unlock(&pc->freq_lock);
- xe_device_mem_access_put(pc_to_xe(pc));
return ret;
}
@@ -528,7 +495,6 @@ int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
{
int ret;
- xe_device_mem_access_get(pc_to_xe(pc));
mutex_lock(&pc->freq_lock);
if (!pc->freq_ready) {
/* Might be in the middle of a gt reset */
@@ -544,8 +510,6 @@ int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
out:
mutex_unlock(&pc->freq_lock);
- xe_device_mem_access_put(pc_to_xe(pc));
-
return ret;
}
@@ -561,7 +525,6 @@ int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
{
int ret;
- xe_device_mem_access_get(pc_to_xe(pc));
mutex_lock(&pc->freq_lock);
if (!pc->freq_ready) {
/* Might be in the middle of a gt reset */
@@ -577,7 +540,6 @@ int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
out:
mutex_unlock(&pc->freq_lock);
- xe_device_mem_access_put(pc_to_xe(pc));
return ret;
}
@@ -594,7 +556,6 @@ int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
{
int ret;
- xe_device_mem_access_get(pc_to_xe(pc));
mutex_lock(&pc->freq_lock);
if (!pc->freq_ready) {
/* Might be in the middle of a gt reset */
@@ -610,7 +571,6 @@ int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
out:
mutex_unlock(&pc->freq_lock);
- xe_device_mem_access_put(pc_to_xe(pc));
return ret;
}
@@ -623,8 +583,6 @@ enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
struct xe_gt *gt = pc_to_gt(pc);
u32 reg, gt_c_state;
- xe_device_mem_access_get(gt_to_xe(gt));
-
if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
reg = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
@@ -633,8 +591,6 @@ enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
}
- xe_device_mem_access_put(gt_to_xe(gt));
-
switch (gt_c_state) {
case GT_C6:
return GT_IDLE_C6;
@@ -654,9 +610,7 @@ u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
struct xe_gt *gt = pc_to_gt(pc);
u32 reg;
- xe_device_mem_access_get(gt_to_xe(gt));
reg = xe_mmio_read32(gt, GT_GFX_RC6);
- xe_device_mem_access_put(gt_to_xe(gt));
return reg;
}
@@ -670,9 +624,7 @@ u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
struct xe_gt *gt = pc_to_gt(pc);
u64 reg;
- xe_device_mem_access_get(gt_to_xe(gt));
reg = xe_mmio_read32(gt, MTL_MEDIA_MC6);
- xe_device_mem_access_put(gt_to_xe(gt));
return reg;
}
@@ -743,24 +695,28 @@ static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
ret = pc_action_query_task_state(pc);
if (ret)
- return ret;
+ goto out;
/*
* GuC defaults to some RPmax that is not actually achievable without
* overclocking. Let's adjust it to the Hardware RP0, which is the
* regular maximum
*/
- if (pc_get_max_freq(pc) > pc->rp0_freq)
- pc_set_max_freq(pc, pc->rp0_freq);
+ if (pc_get_max_freq(pc) > pc->rp0_freq) {
+ ret = pc_set_max_freq(pc, pc->rp0_freq);
+ if (ret)
+ goto out;
+ }
/*
* Same thing happens for Server platforms where min is listed as
* RPMax
*/
if (pc_get_min_freq(pc) > pc->rp0_freq)
- pc_set_min_freq(pc, pc->rp0_freq);
+ ret = pc_set_min_freq(pc, pc->rp0_freq);
- return 0;
+out:
+ return ret;
}
static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
@@ -801,23 +757,19 @@ int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
if (xe->info.skip_guc_pc)
return 0;
- xe_device_mem_access_get(pc_to_xe(pc));
-
ret = pc_action_setup_gucrc(pc, XE_GUCRC_HOST_CONTROL);
if (ret)
- goto out;
+ return ret;
ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (ret)
- goto out;
+ return ret;
xe_gt_idle_disable_c6(gt);
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
-out:
- xe_device_mem_access_put(pc_to_xe(pc));
- return ret;
+ return 0;
}
static void pc_init_pcode_freq(struct xe_guc_pc *pc)
@@ -870,11 +822,9 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
xe_gt_assert(gt, xe_device_uc_enabled(xe));
- xe_device_mem_access_get(pc_to_xe(pc));
-
ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (ret)
- goto out_fail_force_wake;
+ return ret;
if (xe->info.skip_guc_pc) {
if (xe->info.platform != XE_PVC)
@@ -914,8 +864,6 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
out:
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
-out_fail_force_wake:
- xe_device_mem_access_put(pc_to_xe(pc));
return ret;
}
@@ -926,32 +874,17 @@ out_fail_force_wake:
int xe_guc_pc_stop(struct xe_guc_pc *pc)
{
struct xe_device *xe = pc_to_xe(pc);
- int ret;
-
- xe_device_mem_access_get(pc_to_xe(pc));
if (xe->info.skip_guc_pc) {
xe_gt_idle_disable_c6(pc_to_gt(pc));
- ret = 0;
- goto out;
+ return 0;
}
mutex_lock(&pc->freq_lock);
pc->freq_ready = false;
mutex_unlock(&pc->freq_lock);
- ret = pc_action_shutdown(pc);
- if (ret)
- goto out;
-
- if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_NOT_RUNNING)) {
- drm_err(&pc_to_xe(pc)->drm, "GuC PC Shutdown failed\n");
- ret = -EIO;
- }
-
-out:
- xe_device_mem_access_put(pc_to_xe(pc));
- return ret;
+ return 0;
}
/**
@@ -965,13 +898,11 @@ static void xe_guc_pc_fini(struct drm_device *drm, void *arg)
struct xe_device *xe = pc_to_xe(pc);
if (xe->info.skip_guc_pc) {
- xe_device_mem_access_get(xe);
xe_gt_idle_disable_c6(pc_to_gt(pc));
- xe_device_mem_access_put(xe);
return;
}
- xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
+ XE_WARN_ON(xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL));
XE_WARN_ON(xe_guc_pc_gucrc_disable(pc));
XE_WARN_ON(xe_guc_pc_stop(pc));
xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
@@ -998,16 +929,13 @@ int xe_guc_pc_init(struct xe_guc_pc *pc)
return err;
bo = xe_managed_bo_create_pin_map(xe, tile, size,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo))
return PTR_ERR(bo);
pc->bo = bo;
- err = drmm_add_action_or_reset(&xe->drm, xe_guc_pc_fini, pc);
- if (err)
- return err;
-
- return 0;
+ return drmm_add_action_or_reset(&xe->drm, xe_guc_pc_fini, pc);
}
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index ff77bc8da1b2..c7d38469fb46 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -27,6 +27,7 @@
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_guc_exec_queue_types.h"
+#include "xe_guc_id_mgr.h"
#include "xe_guc_submit_types.h"
#include "xe_hw_engine.h"
#include "xe_hw_fence.h"
@@ -236,17 +237,9 @@ static void guc_submit_fini(struct drm_device *drm, void *arg)
struct xe_guc *guc = arg;
xa_destroy(&guc->submission_state.exec_queue_lookup);
- ida_destroy(&guc->submission_state.guc_ids);
- bitmap_free(guc->submission_state.guc_ids_bitmap);
free_submit_wq(guc);
- mutex_destroy(&guc->submission_state.lock);
}
-#define GUC_ID_MAX 65535
-#define GUC_ID_NUMBER_MLRC 4096
-#define GUC_ID_NUMBER_SLRC (GUC_ID_MAX - GUC_ID_NUMBER_MLRC)
-#define GUC_ID_START_MLRC GUC_ID_NUMBER_SLRC
-
static const struct xe_exec_queue_ops guc_exec_queue_ops;
static void primelockdep(struct xe_guc *guc)
@@ -269,33 +262,28 @@ int xe_guc_submit_init(struct xe_guc *guc)
struct xe_gt *gt = guc_to_gt(guc);
int err;
- guc->submission_state.guc_ids_bitmap =
- bitmap_zalloc(GUC_ID_NUMBER_MLRC, GFP_KERNEL);
- if (!guc->submission_state.guc_ids_bitmap)
- return -ENOMEM;
+ err = drmm_mutex_init(&xe->drm, &guc->submission_state.lock);
+ if (err)
+ return err;
+
+ err = xe_guc_id_mgr_init(&guc->submission_state.idm, ~0);
+ if (err)
+ return err;
err = alloc_submit_wq(guc);
- if (err) {
- bitmap_free(guc->submission_state.guc_ids_bitmap);
+ if (err)
return err;
- }
gt->exec_queue_ops = &guc_exec_queue_ops;
- mutex_init(&guc->submission_state.lock);
xa_init(&guc->submission_state.exec_queue_lookup);
- ida_init(&guc->submission_state.guc_ids);
spin_lock_init(&guc->submission_state.suspend.lock);
guc->submission_state.suspend.context = dma_fence_context_alloc(1);
primelockdep(guc);
- err = drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
- if (err)
- return err;
-
- return 0;
+ return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
}
static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count)
@@ -307,12 +295,8 @@ static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa
for (i = 0; i < xa_count; ++i)
xa_erase(&guc->submission_state.exec_queue_lookup, q->guc->id + i);
- if (xe_exec_queue_is_parallel(q))
- bitmap_release_region(guc->submission_state.guc_ids_bitmap,
- q->guc->id - GUC_ID_START_MLRC,
- order_base_2(q->width));
- else
- ida_free(&guc->submission_state.guc_ids, q->guc->id);
+ xe_guc_id_mgr_release_locked(&guc->submission_state.idm,
+ q->guc->id, q->width);
}
static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
@@ -330,21 +314,12 @@ static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
*/
lockdep_assert_held(&guc->submission_state.lock);
- if (xe_exec_queue_is_parallel(q)) {
- void *bitmap = guc->submission_state.guc_ids_bitmap;
-
- ret = bitmap_find_free_region(bitmap, GUC_ID_NUMBER_MLRC,
- order_base_2(q->width));
- } else {
- ret = ida_alloc_max(&guc->submission_state.guc_ids,
- GUC_ID_NUMBER_SLRC - 1, GFP_NOWAIT);
- }
+ ret = xe_guc_id_mgr_reserve_locked(&guc->submission_state.idm,
+ q->width);
if (ret < 0)
return ret;
q->guc->id = ret;
- if (xe_exec_queue_is_parallel(q))
- q->guc->id += GUC_ID_START_MLRC;
for (i = 0; i < q->width; ++i) {
ptr = xa_store(&guc->submission_state.exec_queue_lookup,
@@ -533,7 +508,7 @@ static void register_engine(struct xe_exec_queue *q)
info.flags = CONTEXT_REGISTRATION_FLAG_KMD;
if (xe_exec_queue_is_parallel(q)) {
- u32 ggtt_addr = xe_lrc_parallel_ggtt_addr(lrc);
+ u64 ggtt_addr = xe_lrc_parallel_ggtt_addr(lrc);
struct iosys_map map = xe_lrc_parallel_map(lrc);
info.wq_desc_lo = lower_32_bits(ggtt_addr +
@@ -833,7 +808,9 @@ static void simple_error_capture(struct xe_exec_queue *q)
}
}
- xe_force_wake_get(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
+ if (xe_force_wake_get(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL))
+ xe_gt_info(guc_to_gt(guc),
+ "failed to get forcewake for error capture");
xe_guc_ct_print(&guc->ct, &p, true);
guc_exec_queue_print(q, &p);
for_each_hw_engine(hwe, guc_to_gt(guc), id) {
@@ -929,20 +906,26 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
int err = -ETIME;
int i = 0;
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) {
- drm_notice(&xe->drm, "Timedout job: seqno=%u, guc_id=%d, flags=0x%lx",
- xe_sched_job_seqno(job), q->guc->id, q->flags);
- xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_KERNEL,
- "Kernel-submitted job timed out\n");
- xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q),
- "VM job timed out on non-killed execqueue\n");
+ /*
+ * TDR has fired before free job worker. Common if exec queue
+ * immediately closed after last fence signaled.
+ */
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) {
+ guc_exec_queue_free_job(drm_job);
- simple_error_capture(q);
- xe_devcoredump(job);
- } else {
- drm_dbg(&xe->drm, "Timedout signaled job: seqno=%u, guc_id=%d, flags=0x%lx",
- xe_sched_job_seqno(job), q->guc->id, q->flags);
+ return DRM_GPU_SCHED_STAT_NOMINAL;
}
+
+ drm_notice(&xe->drm, "Timedout job: seqno=%u, guc_id=%d, flags=0x%lx",
+ xe_sched_job_seqno(job), q->guc->id, q->flags);
+ xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_KERNEL,
+ "Kernel-submitted job timed out\n");
+ xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q),
+ "VM job timed out on non-killed execqueue\n");
+
+ simple_error_capture(q);
+ xe_devcoredump(job);
+
trace_xe_sched_job_timedout(job);
/* Kill the run_job entry point */
@@ -1220,7 +1203,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
init_waitqueue_head(&ge->suspend_wait);
timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
- q->sched_props.job_timeout_ms;
+ msecs_to_jiffies(q->sched_props.job_timeout_ms);
err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
get_submit_wq(guc),
q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES, 64,
@@ -1568,28 +1551,8 @@ static void deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q)
xe_guc_ct_send_g2h_handler(&guc->ct, action, ARRAY_SIZE(action));
}
-int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
+static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q)
{
- struct xe_device *xe = guc_to_xe(guc);
- struct xe_exec_queue *q;
- u32 guc_id = msg[0];
-
- if (unlikely(len < 2)) {
- drm_err(&xe->drm, "Invalid length %u", len);
- return -EPROTO;
- }
-
- q = g2h_exec_queue_lookup(guc, guc_id);
- if (unlikely(!q))
- return -EPROTO;
-
- if (unlikely(!exec_queue_pending_enable(q) &&
- !exec_queue_pending_disable(q))) {
- drm_err(&xe->drm, "Unexpected engine state 0x%04x",
- atomic_read(&q->guc->state));
- return -EPROTO;
- }
-
trace_xe_exec_queue_scheduling_done(q);
if (exec_queue_pending_enable(q)) {
@@ -1609,17 +1572,15 @@ int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
deregister_exec_queue(guc, q);
}
}
-
- return 0;
}
-int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
+int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
struct xe_device *xe = guc_to_xe(guc);
struct xe_exec_queue *q;
u32 guc_id = msg[0];
- if (unlikely(len < 1)) {
+ if (unlikely(len < 2)) {
drm_err(&xe->drm, "Invalid length %u", len);
return -EPROTO;
}
@@ -1628,13 +1589,20 @@ int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
if (unlikely(!q))
return -EPROTO;
- if (!exec_queue_destroyed(q) || exec_queue_pending_disable(q) ||
- exec_queue_pending_enable(q) || exec_queue_enabled(q)) {
+ if (unlikely(!exec_queue_pending_enable(q) &&
+ !exec_queue_pending_disable(q))) {
drm_err(&xe->drm, "Unexpected engine state 0x%04x",
atomic_read(&q->guc->state));
return -EPROTO;
}
+ handle_sched_done(guc, q);
+
+ return 0;
+}
+
+static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q)
+{
trace_xe_exec_queue_deregister_done(q);
clear_exec_queue_registered(q);
@@ -1643,6 +1611,31 @@ int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
xe_exec_queue_put(q);
else
__guc_exec_queue_fini(guc, q);
+}
+
+int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_exec_queue *q;
+ u32 guc_id = msg[0];
+
+ if (unlikely(len < 1)) {
+ drm_err(&xe->drm, "Invalid length %u", len);
+ return -EPROTO;
+ }
+
+ q = g2h_exec_queue_lookup(guc, guc_id);
+ if (unlikely(!q))
+ return -EPROTO;
+
+ if (!exec_queue_destroyed(q) || exec_queue_pending_disable(q) ||
+ exec_queue_pending_enable(q) || exec_queue_enabled(q)) {
+ drm_err(&xe->drm, "Unexpected engine state 0x%04x",
+ atomic_read(&q->guc->state));
+ return -EPROTO;
+ }
+
+ handle_deregister_done(guc, q);
return 0;
}
@@ -1782,7 +1775,7 @@ guc_exec_queue_wq_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snaps
/**
* xe_guc_exec_queue_snapshot_capture - Take a quick snapshot of the GuC Engine.
- * @job: faulty Xe scheduled job.
+ * @q: faulty exec queue
*
* This can be printed out in a later stage like during dev_coredump
* analysis.
@@ -1791,9 +1784,8 @@ guc_exec_queue_wq_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snaps
* caller, using `xe_guc_exec_queue_snapshot_free`.
*/
struct xe_guc_submit_exec_queue_snapshot *
-xe_guc_exec_queue_snapshot_capture(struct xe_sched_job *job)
+xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q)
{
- struct xe_exec_queue *q = job->q;
struct xe_gpu_scheduler *sched = &q->guc->sched;
struct xe_guc_submit_exec_queue_snapshot *snapshot;
int i;
@@ -1814,21 +1806,14 @@ xe_guc_exec_queue_snapshot_capture(struct xe_sched_job *job)
snapshot->sched_props.preempt_timeout_us =
q->sched_props.preempt_timeout_us;
- snapshot->lrc = kmalloc_array(q->width, sizeof(struct lrc_snapshot),
+ snapshot->lrc = kmalloc_array(q->width, sizeof(struct xe_lrc_snapshot *),
GFP_ATOMIC);
if (snapshot->lrc) {
for (i = 0; i < q->width; ++i) {
struct xe_lrc *lrc = q->lrc + i;
- snapshot->lrc[i].context_desc =
- lower_32_bits(xe_lrc_ggtt_addr(lrc));
- snapshot->lrc[i].head = xe_lrc_ring_head(lrc);
- snapshot->lrc[i].tail.internal = lrc->ring.tail;
- snapshot->lrc[i].tail.memory =
- xe_lrc_read_ctx_reg(lrc, CTX_RING_TAIL);
- snapshot->lrc[i].start_seqno = xe_lrc_start_seqno(lrc);
- snapshot->lrc[i].seqno = xe_lrc_seqno(lrc);
+ snapshot->lrc[i] = xe_lrc_snapshot_capture(lrc);
}
}
@@ -1867,6 +1852,24 @@ xe_guc_exec_queue_snapshot_capture(struct xe_sched_job *job)
}
/**
+ * xe_guc_exec_queue_snapshot_capture_delayed - Take delayed part of snapshot of the GuC Engine.
+ * @snapshot: Previously captured snapshot of job.
+ *
+ * This captures some data that requires taking some locks, so it cannot be done in signaling path.
+ */
+void
+xe_guc_exec_queue_snapshot_capture_delayed(struct xe_guc_submit_exec_queue_snapshot *snapshot)
+{
+ int i;
+
+ if (!snapshot || !snapshot->lrc)
+ return;
+
+ for (i = 0; i < snapshot->width; ++i)
+ xe_lrc_snapshot_capture_delayed(snapshot->lrc[i]);
+}
+
+/**
* xe_guc_exec_queue_snapshot_print - Print out a given GuC Engine snapshot.
* @snapshot: GuC Submit Engine snapshot object.
* @p: drm_printer where it will be printed out.
@@ -1894,18 +1897,9 @@ xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snaps
drm_printf(p, "\tPreempt timeout: %u (us)\n",
snapshot->sched_props.preempt_timeout_us);
- for (i = 0; snapshot->lrc && i < snapshot->width; ++i) {
- drm_printf(p, "\tHW Context Desc: 0x%08x\n",
- snapshot->lrc[i].context_desc);
- drm_printf(p, "\tLRC Head: (memory) %u\n",
- snapshot->lrc[i].head);
- drm_printf(p, "\tLRC Tail: (internal) %u, (memory) %u\n",
- snapshot->lrc[i].tail.internal,
- snapshot->lrc[i].tail.memory);
- drm_printf(p, "\tStart seqno: (memory) %d\n",
- snapshot->lrc[i].start_seqno);
- drm_printf(p, "\tSeqno: (memory) %d\n", snapshot->lrc[i].seqno);
- }
+ for (i = 0; snapshot->lrc && i < snapshot->width; ++i)
+ xe_lrc_snapshot_print(snapshot->lrc[i], p);
+
drm_printf(p, "\tSchedule State: 0x%x\n", snapshot->schedule_state);
drm_printf(p, "\tFlags: 0x%lx\n", snapshot->exec_queue_flags);
@@ -1930,10 +1924,16 @@ xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snaps
*/
void xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *snapshot)
{
+ int i;
+
if (!snapshot)
return;
- kfree(snapshot->lrc);
+ if (snapshot->lrc) {
+ for (i = 0; i < snapshot->width; i++)
+ xe_lrc_snapshot_free(snapshot->lrc[i]);
+ kfree(snapshot->lrc);
+ }
kfree(snapshot->pending_list);
kfree(snapshot);
}
@@ -1941,28 +1941,10 @@ void xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *s
static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p)
{
struct xe_guc_submit_exec_queue_snapshot *snapshot;
- struct xe_gpu_scheduler *sched = &q->guc->sched;
- struct xe_sched_job *job;
- bool found = false;
-
- spin_lock(&sched->base.job_list_lock);
- list_for_each_entry(job, &sched->base.pending_list, drm.list) {
- if (job->q == q) {
- xe_sched_job_get(job);
- found = true;
- break;
- }
- }
- spin_unlock(&sched->base.job_list_lock);
- if (!found)
- return;
-
- snapshot = xe_guc_exec_queue_snapshot_capture(job);
+ snapshot = xe_guc_exec_queue_snapshot_capture(q);
xe_guc_exec_queue_snapshot_print(snapshot, p);
xe_guc_exec_queue_snapshot_free(snapshot);
-
- xe_sched_job_put(job);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h
index 723dc2bd8df9..fad0421ead36 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.h
+++ b/drivers/gpu/drm/xe/xe_guc_submit.h
@@ -9,8 +9,8 @@
#include <linux/types.h>
struct drm_printer;
+struct xe_exec_queue;
struct xe_guc;
-struct xe_sched_job;
int xe_guc_submit_init(struct xe_guc *guc);
@@ -27,7 +27,9 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len);
struct xe_guc_submit_exec_queue_snapshot *
-xe_guc_exec_queue_snapshot_capture(struct xe_sched_job *job);
+xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q);
+void
+xe_guc_exec_queue_snapshot_capture_delayed(struct xe_guc_submit_exec_queue_snapshot *snapshot);
void
xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
struct drm_printer *p);
diff --git a/drivers/gpu/drm/xe/xe_guc_submit_types.h b/drivers/gpu/drm/xe/xe_guc_submit_types.h
index 72fc0f42b0a5..dc7456c34583 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_submit_types.h
@@ -61,17 +61,6 @@ struct guc_submit_parallel_scratch {
u32 wq[WQ_SIZE / sizeof(u32)];
};
-struct lrc_snapshot {
- u32 context_desc;
- u32 head;
- struct {
- u32 internal;
- u32 memory;
- } tail;
- u32 start_seqno;
- u32 seqno;
-};
-
struct pending_list_snapshot {
u32 seqno;
bool fence;
@@ -109,7 +98,7 @@ struct xe_guc_submit_exec_queue_snapshot {
} sched_props;
/** @lrc: LRC Snapshot */
- struct lrc_snapshot *lrc;
+ struct xe_lrc_snapshot **lrc;
/** @schedule_state: Schedule State at the moment of Crash */
u32 schedule_state;
diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h
index edcd1a950bd3..82bd93f7867d 100644
--- a/drivers/gpu/drm/xe/xe_guc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_types.h
@@ -32,6 +32,21 @@ struct xe_guc_db_mgr {
};
/**
+ * struct xe_guc_id_mgr - GuC context ID Manager.
+ *
+ * Note: GuC context ID Manager is relying on &xe_guc::submission_state.lock
+ * to protect its members.
+ */
+struct xe_guc_id_mgr {
+ /** @bitmap: bitmap to track allocated IDs */
+ unsigned long *bitmap;
+ /** @total: total number of IDs being managed */
+ unsigned int total;
+ /** @used: number of IDs currently in use */
+ unsigned int used;
+};
+
+/**
* struct xe_guc - Graphic micro controller
*/
struct xe_guc {
@@ -49,12 +64,10 @@ struct xe_guc {
struct xe_guc_db_mgr dbm;
/** @submission_state: GuC submission state */
struct {
+ /** @submission_state.idm: GuC context ID Manager */
+ struct xe_guc_id_mgr idm;
/** @submission_state.exec_queue_lookup: Lookup an xe_engine from guc_id */
struct xarray exec_queue_lookup;
- /** @submission_state.guc_ids: used to allocate new guc_ids, single-lrc */
- struct ida guc_ids;
- /** @submission_state.guc_ids_bitmap: used to allocate new guc_ids, multi-lrc */
- unsigned long *guc_ids_bitmap;
/** @submission_state.stopped: submissions are stopped */
atomic_t stopped;
/** @submission_state.lock: protects submission state */
diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c
new file mode 100644
index 000000000000..2c32dc46f7d4
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hmm.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <linux/scatterlist.h>
+#include <linux/mmu_notifier.h>
+#include <linux/dma-mapping.h>
+#include <linux/memremap.h>
+#include <linux/swap.h>
+#include <linux/hmm.h>
+#include <linux/mm.h>
+#include "xe_hmm.h"
+#include "xe_vm.h"
+#include "xe_bo.h"
+
+static u64 xe_npages_in_range(unsigned long start, unsigned long end)
+{
+ return (end - start) >> PAGE_SHIFT;
+}
+
+/*
+ * xe_mark_range_accessed() - mark a range is accessed, so core mm
+ * have such information for memory eviction or write back to
+ * hard disk
+ *
+ * @range: the range to mark
+ * @write: if write to this range, we mark pages in this range
+ * as dirty
+ */
+static void xe_mark_range_accessed(struct hmm_range *range, bool write)
+{
+ struct page *page;
+ u64 i, npages;
+
+ npages = xe_npages_in_range(range->start, range->end);
+ for (i = 0; i < npages; i++) {
+ page = hmm_pfn_to_page(range->hmm_pfns[i]);
+ if (write)
+ set_page_dirty_lock(page);
+
+ mark_page_accessed(page);
+ }
+}
+
+/*
+ * xe_build_sg() - build a scatter gather table for all the physical pages/pfn
+ * in a hmm_range. dma-map pages if necessary. dma-address is save in sg table
+ * and will be used to program GPU page table later.
+ *
+ * @xe: the xe device who will access the dma-address in sg table
+ * @range: the hmm range that we build the sg table from. range->hmm_pfns[]
+ * has the pfn numbers of pages that back up this hmm address range.
+ * @st: pointer to the sg table.
+ * @write: whether we write to this range. This decides dma map direction
+ * for system pages. If write we map it bi-diretional; otherwise
+ * DMA_TO_DEVICE
+ *
+ * All the contiguous pfns will be collapsed into one entry in
+ * the scatter gather table. This is for the purpose of efficiently
+ * programming GPU page table.
+ *
+ * The dma_address in the sg table will later be used by GPU to
+ * access memory. So if the memory is system memory, we need to
+ * do a dma-mapping so it can be accessed by GPU/DMA.
+ *
+ * FIXME: This function currently only support pages in system
+ * memory. If the memory is GPU local memory (of the GPU who
+ * is going to access memory), we need gpu dpa (device physical
+ * address), and there is no need of dma-mapping. This is TBD.
+ *
+ * FIXME: dma-mapping for peer gpu device to access remote gpu's
+ * memory. Add this when you support p2p
+ *
+ * This function allocates the storage of the sg table. It is
+ * caller's responsibility to free it calling sg_free_table.
+ *
+ * Returns 0 if successful; -ENOMEM if fails to allocate memory
+ */
+static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
+ struct sg_table *st, bool write)
+{
+ struct device *dev = xe->drm.dev;
+ struct page **pages;
+ u64 i, npages;
+ int ret;
+
+ npages = xe_npages_in_range(range->start, range->end);
+ pages = kvmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ for (i = 0; i < npages; i++) {
+ pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
+ xe_assert(xe, !is_device_private_page(pages[i]));
+ }
+
+ ret = sg_alloc_table_from_pages_segment(st, pages, npages, 0, npages << PAGE_SHIFT,
+ xe_sg_segment_size(dev), GFP_KERNEL);
+ if (ret)
+ goto free_pages;
+
+ ret = dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
+ if (ret) {
+ sg_free_table(st);
+ st = NULL;
+ }
+
+free_pages:
+ kvfree(pages);
+ return ret;
+}
+
+/*
+ * xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr
+ *
+ * @uvma: the userptr vma which hold the scatter gather table
+ *
+ * With function xe_userptr_populate_range, we allocate storage of
+ * the userptr sg table. This is a helper function to free this
+ * sg table, and dma unmap the address in the table.
+ */
+void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma)
+{
+ struct xe_userptr *userptr = &uvma->userptr;
+ struct xe_vma *vma = &uvma->vma;
+ bool write = !xe_vma_read_only(vma);
+ struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_device *xe = vm->xe;
+ struct device *dev = xe->drm.dev;
+
+ xe_assert(xe, userptr->sg);
+ dma_unmap_sgtable(dev, userptr->sg,
+ write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
+
+ sg_free_table(userptr->sg);
+ userptr->sg = NULL;
+}
+
+/**
+ * xe_hmm_userptr_populate_range() - Populate physical pages of a virtual
+ * address range
+ *
+ * @uvma: userptr vma which has information of the range to populate.
+ * @is_mm_mmap_locked: True if mmap_read_lock is already acquired by caller.
+ *
+ * This function populate the physical pages of a virtual
+ * address range. The populated physical pages is saved in
+ * userptr's sg table. It is similar to get_user_pages but call
+ * hmm_range_fault.
+ *
+ * This function also read mmu notifier sequence # (
+ * mmu_interval_read_begin), for the purpose of later
+ * comparison (through mmu_interval_read_retry).
+ *
+ * This must be called with mmap read or write lock held.
+ *
+ * This function allocates the storage of the userptr sg table.
+ * It is caller's responsibility to free it calling sg_free_table.
+ *
+ * returns: 0 for succuss; negative error no on failure
+ */
+int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
+ bool is_mm_mmap_locked)
+{
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ unsigned long *pfns, flags = HMM_PFN_REQ_FAULT;
+ struct xe_userptr *userptr;
+ struct xe_vma *vma = &uvma->vma;
+ u64 userptr_start = xe_vma_userptr(vma);
+ u64 userptr_end = userptr_start + xe_vma_size(vma);
+ struct xe_vm *vm = xe_vma_vm(vma);
+ struct hmm_range hmm_range;
+ bool write = !xe_vma_read_only(vma);
+ unsigned long notifier_seq;
+ u64 npages;
+ int ret;
+
+ userptr = &uvma->userptr;
+
+ if (is_mm_mmap_locked)
+ mmap_assert_locked(userptr->notifier.mm);
+
+ if (vma->gpuva.flags & XE_VMA_DESTROYED)
+ return 0;
+
+ notifier_seq = mmu_interval_read_begin(&userptr->notifier);
+ if (notifier_seq == userptr->notifier_seq)
+ return 0;
+
+ if (userptr->sg)
+ xe_hmm_userptr_free_sg(uvma);
+
+ npages = xe_npages_in_range(userptr_start, userptr_end);
+ pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
+ if (unlikely(!pfns))
+ return -ENOMEM;
+
+ if (write)
+ flags |= HMM_PFN_REQ_WRITE;
+
+ if (!mmget_not_zero(userptr->notifier.mm)) {
+ ret = -EFAULT;
+ goto free_pfns;
+ }
+
+ hmm_range.default_flags = flags;
+ hmm_range.hmm_pfns = pfns;
+ hmm_range.notifier = &userptr->notifier;
+ hmm_range.start = userptr_start;
+ hmm_range.end = userptr_end;
+ hmm_range.dev_private_owner = vm->xe;
+
+ while (true) {
+ hmm_range.notifier_seq = mmu_interval_read_begin(&userptr->notifier);
+
+ if (!is_mm_mmap_locked)
+ mmap_read_lock(userptr->notifier.mm);
+
+ ret = hmm_range_fault(&hmm_range);
+
+ if (!is_mm_mmap_locked)
+ mmap_read_unlock(userptr->notifier.mm);
+
+ if (ret == -EBUSY) {
+ if (time_after(jiffies, timeout))
+ break;
+
+ continue;
+ }
+ break;
+ }
+
+ mmput(userptr->notifier.mm);
+
+ if (ret)
+ goto free_pfns;
+
+ ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt, write);
+ if (ret)
+ goto free_pfns;
+
+ xe_mark_range_accessed(&hmm_range, write);
+ userptr->sg = &userptr->sgt;
+ userptr->notifier_seq = hmm_range.notifier_seq;
+
+free_pfns:
+ kvfree(pfns);
+ return ret;
+}
+
diff --git a/drivers/gpu/drm/xe/xe_hmm.h b/drivers/gpu/drm/xe/xe_hmm.h
new file mode 100644
index 000000000000..909dc2bdcd97
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hmm.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <linux/types.h>
+
+struct xe_userptr_vma;
+
+int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, bool is_mm_mmap_locked);
+void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma);
diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c
index b545f850087c..39a484a57585 100644
--- a/drivers/gpu/drm/xe/xe_huc.c
+++ b/drivers/gpu/drm/xe/xe_huc.c
@@ -53,26 +53,19 @@ static int huc_alloc_gsc_pkt(struct xe_huc *huc)
struct xe_gt *gt = huc_to_gt(huc);
struct xe_device *xe = gt_to_xe(gt);
struct xe_bo *bo;
- int err;
/* we use a single object for both input and output */
bo = xe_bo_create_pin_map(xe, gt_to_tile(gt), NULL,
PXP43_HUC_AUTH_INOUT_SIZE * 2,
ttm_bo_type_kernel,
- XE_BO_CREATE_SYSTEM_BIT |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT);
if (IS_ERR(bo))
return PTR_ERR(bo);
huc->gsc_pkt = bo;
- err = drmm_add_action_or_reset(&xe->drm, free_gsc_pkt, huc);
- if (err) {
- free_gsc_pkt(&xe->drm, huc);
- return err;
- }
-
- return 0;
+ return drmm_add_action_or_reset(&xe->drm, free_gsc_pkt, huc);
}
int xe_huc_init(struct xe_huc *huc)
diff --git a/drivers/gpu/drm/xe/xe_huc_debugfs.c b/drivers/gpu/drm/xe/xe_huc_debugfs.c
index 18585a7eeb9d..3a888a40188b 100644
--- a/drivers/gpu/drm/xe/xe_huc_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_huc_debugfs.c
@@ -12,6 +12,7 @@
#include "xe_gt.h"
#include "xe_huc.h"
#include "xe_macros.h"
+#include "xe_pm.h"
static struct xe_gt *
huc_to_gt(struct xe_huc *huc)
@@ -36,9 +37,9 @@ static int huc_info(struct seq_file *m, void *data)
struct xe_device *xe = huc_to_xe(huc);
struct drm_printer p = drm_seq_file_printer(m);
- xe_device_mem_access_get(xe);
+ xe_pm_runtime_get(xe);
xe_huc_print_info(huc, &p);
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index b5e83ea172f3..455f375c1cbd 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -14,8 +14,10 @@
#include "xe_device.h"
#include "xe_execlist.h"
#include "xe_force_wake.h"
+#include "xe_gsc.h"
#include "xe_gt.h"
#include "xe_gt_ccs_mode.h"
+#include "xe_gt_printk.h"
#include "xe_gt_topology.h"
#include "xe_hw_fence.h"
#include "xe_irq.h"
@@ -463,6 +465,32 @@ static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
hwe->eclass->sched_props.preempt_timeout_us = XE_HW_ENGINE_PREEMPT_TIMEOUT;
hwe->eclass->sched_props.preempt_timeout_min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
hwe->eclass->sched_props.preempt_timeout_max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
+
+ /*
+ * The GSC engine can accept submissions while the GSC shim is
+ * being reset, during which time the submission is stalled. In
+ * the worst case, the shim reset can take up to the maximum GSC
+ * command execution time (250ms), so the request start can be
+ * delayed by that much; the request itself can take that long
+ * without being preemptible, which means worst case it can
+ * theoretically take up to 500ms for a preemption to go through
+ * on the GSC engine. Adding to that an extra 100ms as a safety
+ * margin, we get a minimum recommended timeout of 600ms.
+ * The preempt_timeout value can't be tuned for OTHER_CLASS
+ * because the class is reserved for kernel usage, so we just
+ * need to make sure that the starting value is above that
+ * threshold; since our default value (640ms) is greater than
+ * 600ms, the only way we can go below is via a kconfig setting.
+ * If that happens, log it in dmesg and update the value.
+ */
+ if (hwe->class == XE_ENGINE_CLASS_OTHER) {
+ const u32 min_preempt_timeout = 600 * 1000;
+ if (hwe->eclass->sched_props.preempt_timeout_us < min_preempt_timeout) {
+ hwe->eclass->sched_props.preempt_timeout_us = min_preempt_timeout;
+ xe_gt_notice(gt, "Increasing preempt_timeout for GSC to 600ms\n");
+ }
+ }
+
/* Record default props */
hwe->eclass->defaults = hwe->eclass->sched_props;
}
@@ -490,8 +518,9 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
xe_reg_sr_apply_whitelist(hwe);
hwe->hwsp = xe_managed_bo_create_pin_map(xe, tile, SZ_4K,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(hwe->hwsp)) {
err = PTR_ERR(hwe->hwsp);
goto err_name;
@@ -509,18 +538,19 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
}
}
- if (xe_device_uc_enabled(xe))
+ if (xe_device_uc_enabled(xe)) {
+ /* GSCCS has a special interrupt for reset */
+ if (hwe->class == XE_ENGINE_CLASS_OTHER)
+ hwe->irq_handler = xe_gsc_hwe_irq_handler;
+
xe_hw_engine_enable_ring(hwe);
+ }
/* We reserve the highest BCS instance for USM */
if (xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY)
gt->usm.reserved_bcs_instance = hwe->instance;
- err = drmm_add_action_or_reset(&xe->drm, hw_engine_fini, hwe);
- if (err)
- return err;
-
- return 0;
+ return drmm_add_action_or_reset(&xe->drm, hw_engine_fini, hwe);
err_kernel_lrc:
xe_lrc_finish(&hwe->kernel_lrc);
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
index 2345fb42fa39..844ec68cbbb8 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
@@ -7,8 +7,10 @@
#include <linux/kobject.h>
#include <linux/sysfs.h>
+#include "xe_device.h"
#include "xe_gt.h"
#include "xe_hw_engine_class_sysfs.h"
+#include "xe_pm.h"
#define MAX_ENGINE_CLASS_NAME_LEN 16
static int xe_add_hw_engine_class_defaults(struct xe_device *xe,
@@ -70,7 +72,7 @@ static ssize_t job_timeout_max_show(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
- return sprintf(buf, "%u\n", eclass->sched_props.job_timeout_max);
+ return sysfs_emit(buf, "%u\n", eclass->sched_props.job_timeout_max);
}
static const struct kobj_attribute job_timeout_max_attr =
@@ -106,7 +108,7 @@ static ssize_t job_timeout_min_show(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
- return sprintf(buf, "%u\n", eclass->sched_props.job_timeout_min);
+ return sysfs_emit(buf, "%u\n", eclass->sched_props.job_timeout_min);
}
static const struct kobj_attribute job_timeout_min_attr =
@@ -139,7 +141,7 @@ static ssize_t job_timeout_show(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
- return sprintf(buf, "%u\n", eclass->sched_props.job_timeout_ms);
+ return sysfs_emit(buf, "%u\n", eclass->sched_props.job_timeout_ms);
}
static const struct kobj_attribute job_timeout_attr =
@@ -150,7 +152,7 @@ static ssize_t job_timeout_default(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
- return sprintf(buf, "%u\n", eclass->defaults.job_timeout_ms);
+ return sysfs_emit(buf, "%u\n", eclass->defaults.job_timeout_ms);
}
static const struct kobj_attribute job_timeout_def =
@@ -161,7 +163,7 @@ static ssize_t job_timeout_min_default(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
- return sprintf(buf, "%u\n", eclass->defaults.job_timeout_min);
+ return sysfs_emit(buf, "%u\n", eclass->defaults.job_timeout_min);
}
static const struct kobj_attribute job_timeout_min_def =
@@ -172,7 +174,7 @@ static ssize_t job_timeout_max_default(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
- return sprintf(buf, "%u\n", eclass->defaults.job_timeout_max);
+ return sysfs_emit(buf, "%u\n", eclass->defaults.job_timeout_max);
}
static const struct kobj_attribute job_timeout_max_def =
@@ -231,7 +233,7 @@ static ssize_t timeslice_duration_max_show(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
- return sprintf(buf, "%u\n", eclass->sched_props.timeslice_max);
+ return sysfs_emit(buf, "%u\n", eclass->sched_props.timeslice_max);
}
static const struct kobj_attribute timeslice_duration_max_attr =
@@ -269,7 +271,7 @@ static ssize_t timeslice_duration_min_show(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
- return sprintf(buf, "%u\n", eclass->sched_props.timeslice_min);
+ return sysfs_emit(buf, "%u\n", eclass->sched_props.timeslice_min);
}
static const struct kobj_attribute timeslice_duration_min_attr =
@@ -281,7 +283,7 @@ static ssize_t timeslice_duration_show(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
- return sprintf(buf, "%u\n", eclass->sched_props.timeslice_us);
+ return sysfs_emit(buf, "%u\n", eclass->sched_props.timeslice_us);
}
static const struct kobj_attribute timeslice_duration_attr =
@@ -293,7 +295,7 @@ static ssize_t timeslice_default(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
- return sprintf(buf, "%u\n", eclass->defaults.timeslice_us);
+ return sysfs_emit(buf, "%u\n", eclass->defaults.timeslice_us);
}
static const struct kobj_attribute timeslice_duration_def =
@@ -304,7 +306,7 @@ static ssize_t timeslice_min_default(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
- return sprintf(buf, "%u\n", eclass->defaults.timeslice_min);
+ return sysfs_emit(buf, "%u\n", eclass->defaults.timeslice_min);
}
static const struct kobj_attribute timeslice_duration_min_def =
@@ -315,7 +317,7 @@ static ssize_t timeslice_max_default(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
- return sprintf(buf, "%u\n", eclass->defaults.timeslice_max);
+ return sysfs_emit(buf, "%u\n", eclass->defaults.timeslice_max);
}
static const struct kobj_attribute timeslice_duration_max_def =
@@ -348,7 +350,7 @@ static ssize_t preempt_timeout_show(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
- return sprintf(buf, "%u\n", eclass->sched_props.preempt_timeout_us);
+ return sysfs_emit(buf, "%u\n", eclass->sched_props.preempt_timeout_us);
}
static const struct kobj_attribute preempt_timeout_attr =
@@ -360,7 +362,7 @@ static ssize_t preempt_timeout_default(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
- return sprintf(buf, "%u\n", eclass->defaults.preempt_timeout_us);
+ return sysfs_emit(buf, "%u\n", eclass->defaults.preempt_timeout_us);
}
static const struct kobj_attribute preempt_timeout_def =
@@ -372,7 +374,7 @@ static ssize_t preempt_timeout_min_default(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
- return sprintf(buf, "%u\n", eclass->defaults.preempt_timeout_min);
+ return sysfs_emit(buf, "%u\n", eclass->defaults.preempt_timeout_min);
}
static const struct kobj_attribute preempt_timeout_min_def =
@@ -384,7 +386,7 @@ static ssize_t preempt_timeout_max_default(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
- return sprintf(buf, "%u\n", eclass->defaults.preempt_timeout_max);
+ return sysfs_emit(buf, "%u\n", eclass->defaults.preempt_timeout_max);
}
static const struct kobj_attribute preempt_timeout_max_def =
@@ -420,7 +422,7 @@ static ssize_t preempt_timeout_max_show(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
- return sprintf(buf, "%u\n", eclass->sched_props.preempt_timeout_max);
+ return sysfs_emit(buf, "%u\n", eclass->sched_props.preempt_timeout_max);
}
static const struct kobj_attribute preempt_timeout_max_attr =
@@ -457,7 +459,7 @@ static ssize_t preempt_timeout_min_show(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
- return sprintf(buf, "%u\n", eclass->sched_props.preempt_timeout_min);
+ return sysfs_emit(buf, "%u\n", eclass->sched_props.preempt_timeout_min);
}
static const struct kobj_attribute preempt_timeout_min_attr =
@@ -498,8 +500,8 @@ static void kobj_xe_hw_engine_class_fini(struct drm_device *drm, void *arg)
kobject_put(kobj);
}
- static struct kobj_eclass *
-kobj_xe_hw_engine_class(struct xe_device *xe, struct kobject *parent, char *name)
+static struct kobj_eclass *
+kobj_xe_hw_engine_class(struct xe_device *xe, struct kobject *parent, const char *name)
{
struct kobj_eclass *keclass;
int err = 0;
@@ -513,13 +515,13 @@ kobj_xe_hw_engine_class(struct xe_device *xe, struct kobject *parent, char *name
kobject_put(&keclass->base);
return NULL;
}
+ keclass->xe = xe;
err = drmm_add_action_or_reset(&xe->drm, kobj_xe_hw_engine_class_fini,
&keclass->base);
if (err)
- drm_warn(&xe->drm,
- "%s: drmm_add_action_or_reset failed, err: %d\n",
- __func__, err);
+ return NULL;
+
return keclass;
}
@@ -550,13 +552,8 @@ static int xe_add_hw_engine_class_defaults(struct xe_device *xe,
if (err)
goto err_object;
- err = drmm_add_action_or_reset(&xe->drm, hw_engine_class_defaults_fini,
- kobj);
- if (err)
- drm_warn(&xe->drm,
- "%s: drmm_add_action_or_reset failed, err: %d\n",
- __func__, err);
- return err;
+ return drmm_add_action_or_reset(&xe->drm, hw_engine_class_defaults_fini, kobj);
+
err_object:
kobject_put(kobj);
return err;
@@ -567,9 +564,51 @@ static void xe_hw_engine_sysfs_kobj_release(struct kobject *kobj)
kfree(kobj);
}
+static ssize_t xe_hw_engine_class_sysfs_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct xe_device *xe = kobj_to_xe(kobj);
+ struct kobj_attribute *kattr;
+ ssize_t ret = -EIO;
+
+ kattr = container_of(attr, struct kobj_attribute, attr);
+ if (kattr->show) {
+ xe_pm_runtime_get(xe);
+ ret = kattr->show(kobj, kattr, buf);
+ xe_pm_runtime_put(xe);
+ }
+
+ return ret;
+}
+
+static ssize_t xe_hw_engine_class_sysfs_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct xe_device *xe = kobj_to_xe(kobj);
+ struct kobj_attribute *kattr;
+ ssize_t ret = -EIO;
+
+ kattr = container_of(attr, struct kobj_attribute, attr);
+ if (kattr->store) {
+ xe_pm_runtime_get(xe);
+ ret = kattr->store(kobj, kattr, buf, count);
+ xe_pm_runtime_put(xe);
+ }
+
+ return ret;
+}
+
+static const struct sysfs_ops xe_hw_engine_class_sysfs_ops = {
+ .show = xe_hw_engine_class_sysfs_attr_show,
+ .store = xe_hw_engine_class_sysfs_attr_store,
+};
+
static const struct kobj_type xe_hw_engine_sysfs_kobj_type = {
.release = xe_hw_engine_sysfs_kobj_release,
- .sysfs_ops = &kobj_sysfs_ops,
+ .sysfs_ops = &xe_hw_engine_class_sysfs_ops,
};
static void hw_engine_class_sysfs_fini(struct drm_device *drm, void *arg)
@@ -579,6 +618,24 @@ static void hw_engine_class_sysfs_fini(struct drm_device *drm, void *arg)
kobject_put(kobj);
}
+static const char *xe_hw_engine_class_to_str(enum xe_engine_class class)
+{
+ switch (class) {
+ case XE_ENGINE_CLASS_RENDER:
+ return "rcs";
+ case XE_ENGINE_CLASS_VIDEO_DECODE:
+ return "vcs";
+ case XE_ENGINE_CLASS_VIDEO_ENHANCE:
+ return "vecs";
+ case XE_ENGINE_CLASS_COPY:
+ return "bcs";
+ case XE_ENGINE_CLASS_COMPUTE:
+ return "ccs";
+ default:
+ return NULL;
+ }
+}
+
/**
* xe_hw_engine_class_sysfs_init - Init HW engine classes on GT.
* @gt: Xe GT.
@@ -608,7 +665,7 @@ int xe_hw_engine_class_sysfs_init(struct xe_gt *gt)
goto err_object;
for_each_hw_engine(hwe, gt, id) {
- char name[MAX_ENGINE_CLASS_NAME_LEN];
+ const char *name;
struct kobj_eclass *keclass;
if (hwe->class == XE_ENGINE_CLASS_OTHER ||
@@ -619,24 +676,8 @@ int xe_hw_engine_class_sysfs_init(struct xe_gt *gt)
continue;
class_mask |= 1 << hwe->class;
-
- switch (hwe->class) {
- case XE_ENGINE_CLASS_RENDER:
- strcpy(name, "rcs");
- break;
- case XE_ENGINE_CLASS_VIDEO_DECODE:
- strcpy(name, "vcs");
- break;
- case XE_ENGINE_CLASS_VIDEO_ENHANCE:
- strcpy(name, "vecs");
- break;
- case XE_ENGINE_CLASS_COPY:
- strcpy(name, "bcs");
- break;
- case XE_ENGINE_CLASS_COMPUTE:
- strcpy(name, "ccs");
- break;
- default:
+ name = xe_hw_engine_class_to_str(hwe->class);
+ if (!name) {
err = -EINVAL;
goto err_object;
}
@@ -649,26 +690,16 @@ int xe_hw_engine_class_sysfs_init(struct xe_gt *gt)
keclass->eclass = hwe->eclass;
err = xe_add_hw_engine_class_defaults(xe, &keclass->base);
- if (err) {
- drm_warn(&xe->drm,
- "Add .defaults to engines failed!, err: %d\n",
- err);
+ if (err)
goto err_object;
- }
err = sysfs_create_files(&keclass->base, files);
if (err)
goto err_object;
}
- err = drmm_add_action_or_reset(&xe->drm, hw_engine_class_sysfs_fini,
- kobj);
- if (err)
- drm_warn(&xe->drm,
- "%s: drmm_add_action_or_reset failed, err: %d\n",
- __func__, err);
+ return drmm_add_action_or_reset(&xe->drm, hw_engine_class_sysfs_fini, kobj);
- return err;
err_object:
kobject_put(kobj);
return err;
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.h b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.h
index ec5ba673b314..28a0d7c909c0 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.h
+++ b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.h
@@ -26,6 +26,8 @@ struct kobj_eclass {
struct kobject base;
/** @eclass: A pointer to the hw engine class interface */
struct xe_hw_engine_class_intf *eclass;
+ /** @xe: A pointer to the xe device */
+ struct xe_device *xe;
};
static inline struct xe_hw_engine_class_intf *kobj_to_eclass(struct kobject *kobj)
@@ -33,4 +35,9 @@ static inline struct xe_hw_engine_class_intf *kobj_to_eclass(struct kobject *kob
return container_of(kobj, struct kobj_eclass, base)->eclass;
}
+static inline struct xe_device *kobj_to_xe(struct kobject *kobj)
+{
+ return container_of(kobj, struct kobj_eclass, base)->xe;
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_hw_fence.c b/drivers/gpu/drm/xe/xe_hw_fence.c
index a5de3e7b0bd6..f872ef103127 100644
--- a/drivers/gpu/drm/xe/xe_hw_fence.c
+++ b/drivers/gpu/drm/xe/xe_hw_fence.c
@@ -130,7 +130,7 @@ void xe_hw_fence_ctx_init(struct xe_hw_fence_ctx *ctx, struct xe_gt *gt,
ctx->irq = irq;
ctx->dma_fence_ctx = dma_fence_context_alloc(1);
ctx->next_seqno = XE_FENCE_INITIAL_SEQNO;
- sprintf(ctx->name, "%s", name);
+ snprintf(ctx->name, sizeof(ctx->name), "%s", name);
}
void xe_hw_fence_ctx_finish(struct xe_hw_fence_ctx *ctx)
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index b82233a41606..453e601ddd5e 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -18,6 +18,7 @@
#include "xe_pcode.h"
#include "xe_pcode_api.h"
#include "xe_sriov.h"
+#include "xe_pm.h"
enum xe_hwmon_reg {
REG_PKG_RAPL_LIMIT,
@@ -33,6 +34,12 @@ enum xe_hwmon_reg_operation {
REG_READ64,
};
+enum xe_hwmon_channel {
+ CHANNEL_CARD,
+ CHANNEL_PKG,
+ CHANNEL_MAX,
+};
+
/*
* SF_* - scale factors for particular quantities according to hwmon spec.
*/
@@ -68,61 +75,61 @@ struct xe_hwmon {
int scl_shift_energy;
/** @scl_shift_time: pkg time unit */
int scl_shift_time;
- /** @ei: Energy info for energy1_input */
- struct xe_hwmon_energy_info ei;
+ /** @ei: Energy info for energyN_input */
+ struct xe_hwmon_energy_info ei[CHANNEL_MAX];
};
-static u32 xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg)
+static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
+ int channel)
{
struct xe_device *xe = gt_to_xe(hwmon->gt);
- struct xe_reg reg = XE_REG(0);
switch (hwmon_reg) {
case REG_PKG_RAPL_LIMIT:
- if (xe->info.platform == XE_PVC)
- reg = PVC_GT0_PACKAGE_RAPL_LIMIT;
- else if (xe->info.platform == XE_DG2)
- reg = PCU_CR_PACKAGE_RAPL_LIMIT;
+ if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG)
+ return PVC_GT0_PACKAGE_RAPL_LIMIT;
+ else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG))
+ return PCU_CR_PACKAGE_RAPL_LIMIT;
break;
case REG_PKG_POWER_SKU:
- if (xe->info.platform == XE_PVC)
- reg = PVC_GT0_PACKAGE_POWER_SKU;
- else if (xe->info.platform == XE_DG2)
- reg = PCU_CR_PACKAGE_POWER_SKU;
+ if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG)
+ return PVC_GT0_PACKAGE_POWER_SKU;
+ else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG))
+ return PCU_CR_PACKAGE_POWER_SKU;
break;
case REG_PKG_POWER_SKU_UNIT:
if (xe->info.platform == XE_PVC)
- reg = PVC_GT0_PACKAGE_POWER_SKU_UNIT;
+ return PVC_GT0_PACKAGE_POWER_SKU_UNIT;
else if (xe->info.platform == XE_DG2)
- reg = PCU_CR_PACKAGE_POWER_SKU_UNIT;
+ return PCU_CR_PACKAGE_POWER_SKU_UNIT;
break;
case REG_GT_PERF_STATUS:
- if (xe->info.platform == XE_DG2)
- reg = GT_PERF_STATUS;
+ if (xe->info.platform == XE_DG2 && channel == CHANNEL_PKG)
+ return GT_PERF_STATUS;
break;
case REG_PKG_ENERGY_STATUS:
- if (xe->info.platform == XE_PVC)
- reg = PVC_GT0_PLATFORM_ENERGY_STATUS;
- else if (xe->info.platform == XE_DG2)
- reg = PCU_CR_PACKAGE_ENERGY_STATUS;
+ if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG)
+ return PVC_GT0_PLATFORM_ENERGY_STATUS;
+ else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG))
+ return PCU_CR_PACKAGE_ENERGY_STATUS;
break;
default:
drm_warn(&xe->drm, "Unknown xe hwmon reg id: %d\n", hwmon_reg);
break;
}
- return reg.raw;
+ return XE_REG(0);
}
static void xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
enum xe_hwmon_reg_operation operation, u64 *value,
- u32 clr, u32 set)
+ u32 clr, u32 set, int channel)
{
struct xe_reg reg;
- reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
+ reg = xe_hwmon_get_reg(hwmon, hwmon_reg, channel);
- if (!reg.raw)
+ if (!xe_reg_is_valid(reg))
return;
switch (operation) {
@@ -150,13 +157,13 @@ static void xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon
* same pattern for sysfs, allow arbitrary PL1 limits to be set but display
* clamped values when read.
*/
-static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
+static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *value)
{
u64 reg_val, min, max;
mutex_lock(&hwmon->hwmon_lock);
- xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ32, &reg_val, 0, 0);
+ xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ32, &reg_val, 0, 0, channel);
/* Check if PL1 limit is disabled */
if (!(reg_val & PKG_PWR_LIM_1_EN)) {
*value = PL1_DISABLE;
@@ -166,7 +173,7 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
- xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ64, &reg_val, 0, 0);
+ xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ64, &reg_val, 0, 0, channel);
min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
@@ -178,7 +185,7 @@ unlock:
mutex_unlock(&hwmon->hwmon_lock);
}
-static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
+static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long value)
{
int ret = 0;
u64 reg_val;
@@ -188,9 +195,9 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
/* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
if (value == PL1_DISABLE) {
xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW32, &reg_val,
- PKG_PWR_LIM_1_EN, 0);
+ PKG_PWR_LIM_1_EN, 0, channel);
xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ32, &reg_val,
- PKG_PWR_LIM_1_EN, 0);
+ PKG_PWR_LIM_1_EN, 0, channel);
if (reg_val & PKG_PWR_LIM_1_EN) {
ret = -EOPNOTSUPP;
@@ -203,17 +210,17 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
reg_val = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, reg_val);
xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW32, &reg_val,
- PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
+ PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val, channel);
unlock:
mutex_unlock(&hwmon->hwmon_lock);
return ret;
}
-static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, long *value)
+static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, long *value)
{
u64 reg_val;
- xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ32, &reg_val, 0, 0);
+ xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ32, &reg_val, 0, 0, channel);
reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
}
@@ -236,16 +243,16 @@ static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, long *value)
* the hwmon API. Using x86_64 128 bit arithmetic (see mul_u64_u32_shr()),
* a 'long' of 63 bits, SF_ENERGY of 1e6 (~20 bits) and
* hwmon->scl_shift_energy of 14 bits we have 57 (63 - 20 + 14) bits before
- * energy1_input overflows. This at 1000 W is an overflow duration of 278 years.
+ * energyN_input overflows. This at 1000 W is an overflow duration of 278 years.
*/
static void
-xe_hwmon_energy_get(struct xe_hwmon *hwmon, long *energy)
+xe_hwmon_energy_get(struct xe_hwmon *hwmon, int channel, long *energy)
{
- struct xe_hwmon_energy_info *ei = &hwmon->ei;
+ struct xe_hwmon_energy_info *ei = &hwmon->ei[channel];
u64 reg_val;
xe_hwmon_process_reg(hwmon, REG_PKG_ENERGY_STATUS, REG_READ32,
- &reg_val, 0, 0);
+ &reg_val, 0, 0, channel);
if (reg_val >= ei->reg_val_prev)
ei->accum_energy += reg_val - ei->reg_val_prev;
@@ -259,23 +266,24 @@ xe_hwmon_energy_get(struct xe_hwmon *hwmon, long *energy)
}
static ssize_t
-xe_hwmon_power1_max_interval_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
u32 x, y, x_w = 2; /* 2 bits */
u64 r, tau4, out;
+ int sensor_index = to_sensor_dev_attr(attr)->index;
- xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(gt_to_xe(hwmon->gt));
mutex_lock(&hwmon->hwmon_lock);
xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT,
- REG_READ32, &r, 0, 0);
+ REG_READ32, &r, 0, 0, sensor_index);
mutex_unlock(&hwmon->hwmon_lock);
- xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(gt_to_xe(hwmon->gt));
x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
@@ -290,7 +298,7 @@ xe_hwmon_power1_max_interval_show(struct device *dev, struct device_attribute *a
* As y can be < 2, we compute tau4 = (4 | x) << y
* and then add 2 when doing the final right shift to account for units
*/
- tau4 = ((1 << x_w) | x) << y;
+ tau4 = (u64)((1 << x_w) | x) << y;
/* val in hwmon interface units (millisec) */
out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
@@ -299,14 +307,15 @@ xe_hwmon_power1_max_interval_show(struct device *dev, struct device_attribute *a
}
static ssize_t
-xe_hwmon_power1_max_interval_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
u32 x, y, rxy, x_w = 2; /* 2 bits */
u64 tau4, r, max_win;
unsigned long val;
int ret;
+ int sensor_index = to_sensor_dev_attr(attr)->index;
ret = kstrtoul(buf, 0, &val);
if (ret)
@@ -325,12 +334,12 @@ xe_hwmon_power1_max_interval_store(struct device *dev, struct device_attribute *
/*
* val must be < max in hwmon interface units. The steps below are
- * explained in xe_hwmon_power1_max_interval_show()
+ * explained in xe_hwmon_power_max_interval_show()
*/
r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
- tau4 = ((1 << x_w) | x) << y;
+ tau4 = (u64)((1 << x_w) | x) << y;
max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
if (val > max_win)
@@ -354,26 +363,31 @@ xe_hwmon_power1_max_interval_store(struct device *dev, struct device_attribute *
rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
- xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(gt_to_xe(hwmon->gt));
mutex_lock(&hwmon->hwmon_lock);
xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW32, (u64 *)&r,
- PKG_PWR_LIM_1_TIME, rxy);
+ PKG_PWR_LIM_1_TIME, rxy, sensor_index);
mutex_unlock(&hwmon->hwmon_lock);
- xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(gt_to_xe(hwmon->gt));
return count;
}
static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
- xe_hwmon_power1_max_interval_show,
- xe_hwmon_power1_max_interval_store, 0);
+ xe_hwmon_power_max_interval_show,
+ xe_hwmon_power_max_interval_store, CHANNEL_CARD);
+
+static SENSOR_DEVICE_ATTR(power2_max_interval, 0664,
+ xe_hwmon_power_max_interval_show,
+ xe_hwmon_power_max_interval_store, CHANNEL_PKG);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_power1_max_interval.dev_attr.attr,
+ &sensor_dev_attr_power2_max_interval.dev_attr.attr,
NULL
};
@@ -384,12 +398,11 @@ static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret = 0;
- xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(gt_to_xe(hwmon->gt));
- if (attr == &sensor_dev_attr_power1_max_interval.dev_attr.attr)
- ret = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT) ? attr->mode : 0;
+ ret = xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, index)) ? attr->mode : 0;
- xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(gt_to_xe(hwmon->gt));
return ret;
}
@@ -405,10 +418,11 @@ static const struct attribute_group *hwmon_groups[] = {
};
static const struct hwmon_channel_info * const hwmon_info[] = {
- HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT),
- HWMON_CHANNEL_INFO(curr, HWMON_C_CRIT),
- HWMON_CHANNEL_INFO(in, HWMON_I_INPUT),
- HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT),
+ HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL,
+ HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT | HWMON_P_LABEL),
+ HWMON_CHANNEL_INFO(curr, HWMON_C_LABEL, HWMON_C_CRIT | HWMON_C_LABEL),
+ HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT | HWMON_E_LABEL, HWMON_E_INPUT | HWMON_E_LABEL),
NULL
};
@@ -431,7 +445,8 @@ static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32 uval)
uval);
}
-static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, long *value, u32 scale_factor)
+static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel,
+ long *value, u32 scale_factor)
{
int ret;
u32 uval;
@@ -449,7 +464,8 @@ unlock:
return ret;
}
-static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, long value, u32 scale_factor)
+static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel,
+ long value, u32 scale_factor)
{
int ret;
u32 uval;
@@ -463,117 +479,131 @@ static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, long value, u3
return ret;
}
-static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, long *value)
+static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, int channel, long *value)
{
u64 reg_val;
xe_hwmon_process_reg(hwmon, REG_GT_PERF_STATUS,
- REG_READ32, &reg_val, 0, 0);
+ REG_READ32, &reg_val, 0, 0, channel);
/* HW register value in units of 2.5 millivolt */
*value = DIV_ROUND_CLOSEST(REG_FIELD_GET(VOLTAGE_MASK, reg_val) * 2500, SF_VOLTAGE);
}
static umode_t
-xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int chan)
+xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
{
u32 uval;
switch (attr) {
case hwmon_power_max:
- return xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT) ? 0664 : 0;
+ return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT,
+ channel)) ? 0664 : 0;
case hwmon_power_rated_max:
- return xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU) ? 0444 : 0;
+ return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU,
+ channel)) ? 0444 : 0;
case hwmon_power_crit:
- return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
- !(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
+ if (channel == CHANNEL_PKG)
+ return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
+ !(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
+ break;
+ case hwmon_power_label:
+ return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
+ channel)) ? 0444 : 0;
default:
return 0;
}
+ return 0;
}
static int
-xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int chan, long *val)
+xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
{
switch (attr) {
case hwmon_power_max:
- xe_hwmon_power_max_read(hwmon, val);
+ xe_hwmon_power_max_read(hwmon, channel, val);
return 0;
case hwmon_power_rated_max:
- xe_hwmon_power_rated_max_read(hwmon, val);
+ xe_hwmon_power_rated_max_read(hwmon, channel, val);
return 0;
case hwmon_power_crit:
- return xe_hwmon_power_curr_crit_read(hwmon, val, SF_POWER);
+ return xe_hwmon_power_curr_crit_read(hwmon, channel, val, SF_POWER);
default:
return -EOPNOTSUPP;
}
}
static int
-xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int chan, long val)
+xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val)
{
switch (attr) {
case hwmon_power_max:
- return xe_hwmon_power_max_write(hwmon, val);
+ return xe_hwmon_power_max_write(hwmon, channel, val);
case hwmon_power_crit:
- return xe_hwmon_power_curr_crit_write(hwmon, val, SF_POWER);
+ return xe_hwmon_power_curr_crit_write(hwmon, channel, val, SF_POWER);
default:
return -EOPNOTSUPP;
}
}
static umode_t
-xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr)
+xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr, int channel)
{
u32 uval;
switch (attr) {
case hwmon_curr_crit:
- return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
- (uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
+ case hwmon_curr_label:
+ if (channel == CHANNEL_PKG)
+ return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
+ (uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
+ break;
default:
return 0;
}
+ return 0;
}
static int
-xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, long *val)
+xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
{
switch (attr) {
case hwmon_curr_crit:
- return xe_hwmon_power_curr_crit_read(hwmon, val, SF_CURR);
+ return xe_hwmon_power_curr_crit_read(hwmon, channel, val, SF_CURR);
default:
return -EOPNOTSUPP;
}
}
static int
-xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, long val)
+xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val)
{
switch (attr) {
case hwmon_curr_crit:
- return xe_hwmon_power_curr_crit_write(hwmon, val, SF_CURR);
+ return xe_hwmon_power_curr_crit_write(hwmon, channel, val, SF_CURR);
default:
return -EOPNOTSUPP;
}
}
static umode_t
-xe_hwmon_in_is_visible(struct xe_hwmon *hwmon, u32 attr)
+xe_hwmon_in_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
{
switch (attr) {
case hwmon_in_input:
- return xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS) ? 0444 : 0;
+ case hwmon_in_label:
+ return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS,
+ channel)) ? 0444 : 0;
default:
return 0;
}
}
static int
-xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, long *val)
+xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
{
switch (attr) {
case hwmon_in_input:
- xe_hwmon_get_voltage(hwmon, val);
+ xe_hwmon_get_voltage(hwmon, channel, val);
return 0;
default:
return -EOPNOTSUPP;
@@ -581,22 +611,24 @@ xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, long *val)
}
static umode_t
-xe_hwmon_energy_is_visible(struct xe_hwmon *hwmon, u32 attr)
+xe_hwmon_energy_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
{
switch (attr) {
case hwmon_energy_input:
- return xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS) ? 0444 : 0;
+ case hwmon_energy_label:
+ return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
+ channel)) ? 0444 : 0;
default:
return 0;
}
}
static int
-xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, long *val)
+xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
{
switch (attr) {
case hwmon_energy_input:
- xe_hwmon_energy_get(hwmon, val);
+ xe_hwmon_energy_get(hwmon, channel, val);
return 0;
default:
return -EOPNOTSUPP;
@@ -610,27 +642,27 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
struct xe_hwmon *hwmon = (struct xe_hwmon *)drvdata;
int ret;
- xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(gt_to_xe(hwmon->gt));
switch (type) {
case hwmon_power:
ret = xe_hwmon_power_is_visible(hwmon, attr, channel);
break;
case hwmon_curr:
- ret = xe_hwmon_curr_is_visible(hwmon, attr);
+ ret = xe_hwmon_curr_is_visible(hwmon, attr, channel);
break;
case hwmon_in:
- ret = xe_hwmon_in_is_visible(hwmon, attr);
+ ret = xe_hwmon_in_is_visible(hwmon, attr, channel);
break;
case hwmon_energy:
- ret = xe_hwmon_energy_is_visible(hwmon, attr);
+ ret = xe_hwmon_energy_is_visible(hwmon, attr, channel);
break;
default:
ret = 0;
break;
}
- xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(gt_to_xe(hwmon->gt));
return ret;
}
@@ -642,27 +674,27 @@ xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret;
- xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(gt_to_xe(hwmon->gt));
switch (type) {
case hwmon_power:
ret = xe_hwmon_power_read(hwmon, attr, channel, val);
break;
case hwmon_curr:
- ret = xe_hwmon_curr_read(hwmon, attr, val);
+ ret = xe_hwmon_curr_read(hwmon, attr, channel, val);
break;
case hwmon_in:
- ret = xe_hwmon_in_read(hwmon, attr, val);
+ ret = xe_hwmon_in_read(hwmon, attr, channel, val);
break;
case hwmon_energy:
- ret = xe_hwmon_energy_read(hwmon, attr, val);
+ ret = xe_hwmon_energy_read(hwmon, attr, channel, val);
break;
default:
ret = -EOPNOTSUPP;
break;
}
- xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(gt_to_xe(hwmon->gt));
return ret;
}
@@ -674,29 +706,49 @@ xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret;
- xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(gt_to_xe(hwmon->gt));
switch (type) {
case hwmon_power:
ret = xe_hwmon_power_write(hwmon, attr, channel, val);
break;
case hwmon_curr:
- ret = xe_hwmon_curr_write(hwmon, attr, val);
+ ret = xe_hwmon_curr_write(hwmon, attr, channel, val);
break;
default:
ret = -EOPNOTSUPP;
break;
}
- xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(gt_to_xe(hwmon->gt));
return ret;
}
+static int xe_hwmon_read_label(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_power:
+ case hwmon_energy:
+ case hwmon_curr:
+ case hwmon_in:
+ if (channel == CHANNEL_CARD)
+ *str = "card";
+ else if (channel == CHANNEL_PKG)
+ *str = "pkg";
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct hwmon_ops hwmon_ops = {
.is_visible = xe_hwmon_is_visible,
.read = xe_hwmon_read,
.write = xe_hwmon_write,
+ .read_string = xe_hwmon_read_label,
};
static const struct hwmon_chip_info hwmon_chip_info = {
@@ -710,14 +762,15 @@ xe_hwmon_get_preregistration_info(struct xe_device *xe)
struct xe_hwmon *hwmon = xe->hwmon;
long energy;
u64 val_sku_unit = 0;
+ int channel;
/*
* The contents of register PKG_POWER_SKU_UNIT do not change,
* so read it once and store the shift values.
*/
- if (xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT)) {
+ if (xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT, 0))) {
xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
- REG_READ32, &val_sku_unit, 0, 0);
+ REG_READ32, &val_sku_unit, 0, 0, 0);
hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
@@ -727,8 +780,9 @@ xe_hwmon_get_preregistration_info(struct xe_device *xe)
* Initialize 'struct xe_hwmon_energy_info', i.e. set fields to the
* first value of the energy register read
*/
- if (xe_hwmon_is_visible(hwmon, hwmon_energy, hwmon_energy_input, 0))
- xe_hwmon_energy_get(hwmon, &energy);
+ for (channel = 0; channel < CHANNEL_MAX; channel++)
+ if (xe_hwmon_is_visible(hwmon, hwmon_energy, hwmon_energy_input, channel))
+ xe_hwmon_energy_get(hwmon, channel, &energy);
}
static void xe_hwmon_mutex_destroy(void *arg)
diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
index 2f5d179e0d00..996806353171 100644
--- a/drivers/gpu/drm/xe/xe_irq.c
+++ b/drivers/gpu/drm/xe/xe_irq.c
@@ -187,7 +187,7 @@ void xe_irq_enable_hwe(struct xe_gt *gt)
* GSCCS interrupts, but it has its own mask register.
*/
if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER)) {
- gsc_mask = irqs;
+ gsc_mask = irqs | GSC_ER_COMPLETE;
heci_mask = GSC_IRQ_INTF(1);
} else if (HAS_HECI_GSCFI(xe)) {
gsc_mask = GSC_IRQ_INTF(1);
@@ -326,7 +326,6 @@ static void gt_irq_handler(struct xe_tile *tile,
xe_heci_gsc_irq_handler(xe, intr_vec);
else
gt_other_irq_handler(engine_gt, instance, intr_vec);
- continue;
}
}
}
diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c
index 0d7c5514e092..418661a88918 100644
--- a/drivers/gpu/drm/xe/xe_lmtt.c
+++ b/drivers/gpu/drm/xe/xe_lmtt.c
@@ -35,7 +35,7 @@
static bool xe_has_multi_level_lmtt(struct xe_device *xe)
{
- return xe->info.platform == XE_PVC;
+ return GRAPHICS_VERx100(xe) >= 1260;
}
static struct xe_tile *lmtt_to_tile(struct xe_lmtt *lmtt)
@@ -70,8 +70,8 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) *
lmtt->ops->lmtt_pte_num(level)),
ttm_bo_type_kernel,
- XE_BO_CREATE_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
- XE_BO_CREATE_PINNED_BIT);
+ XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
+ XE_BO_NEEDS_64K | XE_BO_FLAG_PINNED);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out_free_pt;
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 7ad853b0788a..615bbc372ac6 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -5,8 +5,11 @@
#include "xe_lrc.h"
+#include <linux/ascii85.h>
+
#include "instructions/xe_mi_commands.h"
#include "instructions/xe_gfxpipe_commands.h"
+#include "instructions/xe_gfx_state_commands.h"
#include "regs/xe_engine_regs.h"
#include "regs/xe_gpu_commands.h"
#include "regs/xe_lrc_layout.h"
@@ -23,13 +26,28 @@
#include "xe_sriov.h"
#include "xe_vm.h"
-#define LRC_VALID (1 << 0)
-#define LRC_PRIVILEGE (1 << 8)
-#define LRC_ADDRESSING_MODE_SHIFT 3
+#define LRC_VALID BIT_ULL(0)
+#define LRC_PRIVILEGE BIT_ULL(8)
+#define LRC_ADDRESSING_MODE GENMASK_ULL(4, 3)
#define LRC_LEGACY_64B_CONTEXT 3
-#define ENGINE_CLASS_SHIFT 61
-#define ENGINE_INSTANCE_SHIFT 48
+#define LRC_ENGINE_CLASS GENMASK_ULL(63, 61)
+#define LRC_ENGINE_INSTANCE GENMASK_ULL(53, 48)
+
+struct xe_lrc_snapshot {
+ struct xe_bo *lrc_bo;
+ void *lrc_snapshot;
+ unsigned long lrc_size, lrc_offset;
+
+ u32 context_desc;
+ u32 head;
+ struct {
+ u32 internal;
+ u32 memory;
+ } tail;
+ u32 start_seqno;
+ u32 seqno;
+};
static struct xe_device *
lrc_to_xe(struct xe_lrc *lrc)
@@ -97,7 +115,6 @@ static void set_offsets(u32 *regs,
#define REG16(x) \
(((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
(((x) >> 2) & 0x7f)
-#define END 0
{
const u32 base = hwe->mmio_base;
@@ -168,7 +185,7 @@ static const u8 gen12_xcs_offsets[] = {
REG16(0x274),
REG16(0x270),
- END
+ 0
};
static const u8 dg2_xcs_offsets[] = {
@@ -202,7 +219,7 @@ static const u8 dg2_xcs_offsets[] = {
REG16(0x274),
REG16(0x270),
- END
+ 0
};
static const u8 gen12_rcs_offsets[] = {
@@ -298,7 +315,7 @@ static const u8 gen12_rcs_offsets[] = {
REG(0x084),
NOP(1),
- END
+ 0
};
static const u8 xehp_rcs_offsets[] = {
@@ -339,7 +356,7 @@ static const u8 xehp_rcs_offsets[] = {
LRI(1, 0),
REG(0x0c8),
- END
+ 0
};
static const u8 dg2_rcs_offsets[] = {
@@ -382,7 +399,7 @@ static const u8 dg2_rcs_offsets[] = {
LRI(1, 0),
REG(0x0c8),
- END
+ 0
};
static const u8 mtl_rcs_offsets[] = {
@@ -425,7 +442,7 @@ static const u8 mtl_rcs_offsets[] = {
LRI(1, 0),
REG(0x0c8),
- END
+ 0
};
#define XE2_CTX_COMMON \
@@ -471,7 +488,7 @@ static const u8 xe2_rcs_offsets[] = {
LRI(1, 0), /* [0x47] */
REG(0x0c8), /* [0x48] R_PWR_CLK_STATE */
- END
+ 0
};
static const u8 xe2_bcs_offsets[] = {
@@ -482,16 +499,15 @@ static const u8 xe2_bcs_offsets[] = {
REG16(0x200), /* [0x42] BCS_SWCTRL */
REG16(0x204), /* [0x44] BLIT_CCTL */
- END
+ 0
};
static const u8 xe2_xcs_offsets[] = {
XE2_CTX_COMMON,
- END
+ 0
};
-#undef END
#undef REG16
#undef REG
#undef LRI
@@ -527,9 +543,8 @@ static const u8 *reg_offsets(struct xe_device *xe, enum xe_engine_class class)
static void set_context_control(u32 *regs, struct xe_hw_engine *hwe)
{
- regs[CTX_CONTEXT_CONTROL] = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH) |
- _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
- CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
+ regs[CTX_CONTEXT_CONTROL] = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
+ CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
/* TODO: Timestamp */
}
@@ -637,7 +652,7 @@ static inline struct iosys_map __xe_lrc_##elem##_map(struct xe_lrc *lrc) \
iosys_map_incr(&map, __xe_lrc_##elem##_offset(lrc)); \
return map; \
} \
-static inline u32 __xe_lrc_##elem##_ggtt_addr(struct xe_lrc *lrc) \
+static inline u32 __maybe_unused __xe_lrc_##elem##_ggtt_addr(struct xe_lrc *lrc) \
{ \
return xe_bo_ggtt_addr(lrc->bo) + __xe_lrc_##elem##_offset(lrc); \
} \
@@ -727,8 +742,9 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
lrc->bo = xe_bo_create_pin_map(xe, tile, vm,
ring_size + xe_lrc_size(xe, hwe->class),
ttm_bo_type_kernel,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(lrc->bo))
return PTR_ERR(lrc->bo);
@@ -779,7 +795,7 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm->usm.asid);
lrc->desc = LRC_VALID;
- lrc->desc |= LRC_LEGACY_64B_CONTEXT << LRC_ADDRESSING_MODE_SHIFT;
+ lrc->desc |= FIELD_PREP(LRC_ADDRESSING_MODE, LRC_LEGACY_64B_CONTEXT);
/* TODO: Priority */
/* While this appears to have something about privileged batches or
@@ -789,8 +805,8 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
lrc->desc |= LRC_PRIVILEGE;
if (GRAPHICS_VERx100(xe) < 1250) {
- lrc->desc |= (u64)hwe->instance << ENGINE_INSTANCE_SHIFT;
- lrc->desc |= (u64)hwe->class << ENGINE_CLASS_SHIFT;
+ lrc->desc |= FIELD_PREP(LRC_ENGINE_INSTANCE, hwe->instance);
+ lrc->desc |= FIELD_PREP(LRC_ENGINE_CLASS, hwe->class);
}
arb_enable = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -1037,6 +1053,8 @@ static int dump_gfxpipe_command(struct drm_printer *p,
MATCH(GPGPU_CSR_BASE_ADDRESS);
MATCH(STATE_COMPUTE_MODE);
MATCH3D(3DSTATE_BTD);
+ MATCH(STATE_SYSTEM_MEM_FENCE_ADDRESS);
+ MATCH(STATE_CONTEXT_DATA_BASE_ADDRESS);
MATCH3D(3DSTATE_VF_STATISTICS);
@@ -1061,6 +1079,7 @@ static int dump_gfxpipe_command(struct drm_printer *p,
MATCH3D(3DSTATE_WM);
MATCH3D(3DSTATE_CONSTANT_VS);
MATCH3D(3DSTATE_CONSTANT_GS);
+ MATCH3D(3DSTATE_CONSTANT_PS);
MATCH3D(3DSTATE_SAMPLE_MASK);
MATCH3D(3DSTATE_CONSTANT_HS);
MATCH3D(3DSTATE_CONSTANT_DS);
@@ -1153,6 +1172,31 @@ static int dump_gfxpipe_command(struct drm_printer *p,
}
}
+static int dump_gfx_state_command(struct drm_printer *p,
+ struct xe_gt *gt,
+ u32 *dw,
+ int remaining_dw)
+{
+ u32 numdw = instr_dw(*dw);
+ u32 opcode = REG_FIELD_GET(GFX_STATE_OPCODE, *dw);
+
+ /*
+ * Make sure we haven't mis-parsed a number of dwords that exceeds the
+ * remaining size of the LRC.
+ */
+ if (xe_gt_WARN_ON(gt, numdw > remaining_dw))
+ numdw = remaining_dw;
+
+ switch (*dw & (XE_INSTR_GFX_STATE | GFX_STATE_OPCODE)) {
+ MATCH(STATE_WRITE_INLINE);
+
+ default:
+ drm_printf(p, "[%#010x] unknown GFX_STATE command (opcode=%#x), likely %d dwords\n",
+ *dw, opcode, numdw);
+ return numdw;
+ }
+}
+
void xe_lrc_dump_default(struct drm_printer *p,
struct xe_gt *gt,
enum xe_engine_class hwe_class)
@@ -1177,6 +1221,8 @@ void xe_lrc_dump_default(struct drm_printer *p,
num_dw = dump_mi_command(p, gt, dw, remaining_dw);
} else if ((*dw & XE_INSTR_CMD_TYPE) == XE_INSTR_GFXPIPE) {
num_dw = dump_gfxpipe_command(p, gt, dw, remaining_dw);
+ } else if ((*dw & XE_INSTR_CMD_TYPE) == XE_INSTR_GFX_STATE) {
+ num_dw = dump_gfx_state_command(p, gt, dw, remaining_dw);
} else {
num_dw = min(instr_dw(*dw), remaining_dw);
drm_printf(p, "[%#10x] Unknown instruction of type %#x, likely %d dwords\n",
@@ -1300,3 +1346,101 @@ void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *b
bb->len += num_dw;
}
}
+
+struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
+{
+ struct xe_lrc_snapshot *snapshot = kmalloc(sizeof(*snapshot), GFP_NOWAIT);
+
+ if (!snapshot)
+ return NULL;
+
+ snapshot->context_desc = lower_32_bits(xe_lrc_ggtt_addr(lrc));
+ snapshot->head = xe_lrc_ring_head(lrc);
+ snapshot->tail.internal = lrc->ring.tail;
+ snapshot->tail.memory = xe_lrc_read_ctx_reg(lrc, CTX_RING_TAIL);
+ snapshot->start_seqno = xe_lrc_start_seqno(lrc);
+ snapshot->seqno = xe_lrc_seqno(lrc);
+ snapshot->lrc_bo = xe_bo_get(lrc->bo);
+ snapshot->lrc_offset = xe_lrc_pphwsp_offset(lrc);
+ snapshot->lrc_size = lrc->bo->size - snapshot->lrc_offset;
+ snapshot->lrc_snapshot = NULL;
+ return snapshot;
+}
+
+void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot)
+{
+ struct xe_bo *bo;
+ struct iosys_map src;
+
+ if (!snapshot)
+ return;
+
+ bo = snapshot->lrc_bo;
+ snapshot->lrc_bo = NULL;
+
+ snapshot->lrc_snapshot = kvmalloc(snapshot->lrc_size, GFP_KERNEL);
+ if (!snapshot->lrc_snapshot)
+ goto put_bo;
+
+ dma_resv_lock(bo->ttm.base.resv, NULL);
+ if (!ttm_bo_vmap(&bo->ttm, &src)) {
+ xe_map_memcpy_from(xe_bo_device(bo),
+ snapshot->lrc_snapshot, &src, snapshot->lrc_offset,
+ snapshot->lrc_size);
+ ttm_bo_vunmap(&bo->ttm, &src);
+ } else {
+ kvfree(snapshot->lrc_snapshot);
+ snapshot->lrc_snapshot = NULL;
+ }
+ dma_resv_unlock(bo->ttm.base.resv);
+put_bo:
+ xe_bo_put(bo);
+}
+
+void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer *p)
+{
+ unsigned long i;
+
+ if (!snapshot)
+ return;
+
+ drm_printf(p, "\tHW Context Desc: 0x%08x\n", snapshot->context_desc);
+ drm_printf(p, "\tLRC Head: (memory) %u\n", snapshot->head);
+ drm_printf(p, "\tLRC Tail: (internal) %u, (memory) %u\n",
+ snapshot->tail.internal, snapshot->tail.memory);
+ drm_printf(p, "\tStart seqno: (memory) %d\n", snapshot->start_seqno);
+ drm_printf(p, "\tSeqno: (memory) %d\n", snapshot->seqno);
+
+ if (!snapshot->lrc_snapshot)
+ return;
+
+ drm_printf(p, "\t[HWSP].length: 0x%x\n", LRC_PPHWSP_SIZE);
+ drm_puts(p, "\t[HWSP].data: ");
+ for (i = 0; i < LRC_PPHWSP_SIZE; i += sizeof(u32)) {
+ u32 *val = snapshot->lrc_snapshot + i;
+ char dumped[ASCII85_BUFSZ];
+
+ drm_puts(p, ascii85_encode(*val, dumped));
+ }
+
+ drm_printf(p, "\n\t[HWCTX].length: 0x%lx\n", snapshot->lrc_size - LRC_PPHWSP_SIZE);
+ drm_puts(p, "\t[HWCTX].data: ");
+ for (; i < snapshot->lrc_size; i += sizeof(u32)) {
+ u32 *val = snapshot->lrc_snapshot + i;
+ char dumped[ASCII85_BUFSZ];
+
+ drm_puts(p, ascii85_encode(*val, dumped));
+ }
+ drm_puts(p, "\n");
+}
+
+void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot)
+{
+ if (!snapshot)
+ return;
+
+ kvfree(snapshot->lrc_snapshot);
+ if (snapshot->lrc_bo)
+ xe_bo_put(snapshot->lrc_bo);
+ kfree(snapshot);
+}
diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h
index 28b1d3f404d4..d32fa31faa2c 100644
--- a/drivers/gpu/drm/xe/xe_lrc.h
+++ b/drivers/gpu/drm/xe/xe_lrc.h
@@ -55,4 +55,9 @@ void xe_lrc_dump_default(struct drm_printer *p,
void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *bb);
+struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc);
+void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot);
+void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer *p);
+void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_lrc_types.h b/drivers/gpu/drm/xe/xe_lrc_types.h
index 24f20ed66fd1..b716df0dfb4e 100644
--- a/drivers/gpu/drm/xe/xe_lrc_types.h
+++ b/drivers/gpu/drm/xe/xe_lrc_types.h
@@ -43,4 +43,6 @@ struct xe_lrc {
struct xe_hw_fence_ctx fence_ctx;
};
+struct xe_lrc_snapshot;
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_memirq.c b/drivers/gpu/drm/xe/xe_memirq.c
index 76e95535d7f6..95b6e9d7b7db 100644
--- a/drivers/gpu/drm/xe/xe_memirq.c
+++ b/drivers/gpu/drm/xe/xe_memirq.c
@@ -127,10 +127,11 @@ static int memirq_alloc_pages(struct xe_memirq *memirq)
/* XXX: convert to managed bo */
bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K,
ttm_bo_type_kernel,
- XE_BO_CREATE_SYSTEM_BIT |
- XE_BO_CREATE_GGTT_BIT |
- XE_BO_NEEDS_UC |
- XE_BO_NEEDS_CPU_ACCESS);
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_NEEDS_UC |
+ XE_BO_FLAG_NEEDS_CPU_ACCESS);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out;
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index ee1bb938c493..9f6e9b7f11c8 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -16,6 +16,7 @@
#include "instructions/xe_mi_commands.h"
#include "regs/xe_gpu_commands.h"
+#include "regs/xe_gtt_defs.h"
#include "tests/xe_test.h"
#include "xe_assert.h"
#include "xe_bb.h"
@@ -155,8 +156,8 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
bo = xe_bo_create_pin_map(vm->xe, tile, vm,
num_entries * XE_PAGE_SIZE,
ttm_bo_type_kernel,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
- XE_BO_CREATE_PINNED_BIT);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_PINNED);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -227,7 +228,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
if (vm->flags & XE_VM_FLAG_64K && level == 1)
flags = XE_PDE_64K;
- entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (level - 1) *
+ entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) *
XE_PAGE_SIZE, pat_index);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
entry | flags);
@@ -235,7 +236,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
/* Write PDE's that point to our BO. */
for (i = 0; i < num_entries - num_level; i++) {
- entry = vm->pt_ops->pde_encode_bo(bo, i * XE_PAGE_SIZE,
+ entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE,
pat_index);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
@@ -291,7 +292,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
#define VM_SA_UPDATE_UNIT_SIZE (XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE)
#define NUM_VMUSA_WRITES_PER_UNIT (VM_SA_UPDATE_UNIT_SIZE / sizeof(u64))
drm_suballoc_manager_init(&m->vm_update_sa,
- (map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
+ (size_t)(map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
NUM_VMUSA_UNIT_PER_PAGE, 0);
m->pt_bo = bo;
@@ -490,7 +491,7 @@ static void emit_pte(struct xe_migrate *m,
struct xe_vm *vm = m->q->vm;
u16 pat_index;
u32 ptes;
- u64 ofs = at_pt * XE_PAGE_SIZE;
+ u64 ofs = (u64)at_pt * XE_PAGE_SIZE;
u64 cur_ofs;
/* Indirect access needs compression enabled uncached PAT index */
@@ -984,7 +985,6 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
struct xe_res_cursor src_it;
struct ttm_resource *src = dst;
int err;
- int pass = 0;
if (!clear_vram)
xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &src_it);
@@ -1005,8 +1005,6 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
clear_L0 = xe_migrate_res_sizes(m, &src_it);
- drm_dbg(&xe->drm, "Pass %u, size: %llu\n", pass++, clear_L0);
-
/* Calculate final sizes and batch size.. */
batch_size = 2 +
pte_update_size(m, clear_vram, src, &src_it,
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index 7ba2477452d7..334637511e75 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -163,6 +163,42 @@ static int xe_determine_lmem_bar_size(struct xe_device *xe)
return 0;
}
+static inline u64 get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ u64 offset;
+ u32 reg;
+
+ if (GRAPHICS_VER(xe) >= 20) {
+ u64 ccs_size = tile_size / 512;
+ u64 offset_hi, offset_lo;
+ u32 nodes, num_enabled;
+
+ reg = xe_mmio_read32(gt, MIRROR_FUSE3);
+ nodes = REG_FIELD_GET(XE2_NODE_ENABLE_MASK, reg);
+ num_enabled = hweight32(nodes); /* Number of enabled l3 nodes */
+
+ reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_LOWER);
+ offset_lo = REG_FIELD_GET(XE2_FLAT_CCS_BASE_LOWER_ADDR_MASK, reg);
+
+ reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_UPPER);
+ offset_hi = REG_FIELD_GET(XE2_FLAT_CCS_BASE_UPPER_ADDR_MASK, reg);
+
+ offset = offset_hi << 32; /* HW view bits 39:32 */
+ offset |= offset_lo << 6; /* HW view bits 31:6 */
+ offset *= num_enabled; /* convert to SW view */
+
+ /* We don't expect any holes */
+ xe_assert_msg(xe, offset == (xe_mmio_read64_2x32(gt, GSMBASE) - ccs_size),
+ "Hole between CCS and GSM.\n");
+ } else {
+ reg = xe_gt_mcr_unicast_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR);
+ offset = (u64)REG_FIELD_GET(XEHP_FLAT_CCS_PTR, reg) * SZ_64K;
+ }
+
+ return offset;
+}
+
/**
* xe_mmio_tile_vram_size() - Collect vram size and offset information
* @tile: tile to get info for
@@ -207,8 +243,7 @@ static int xe_mmio_tile_vram_size(struct xe_tile *tile, u64 *vram_size,
/* minus device usage */
if (xe->info.has_flat_ccs) {
- reg = xe_gt_mcr_unicast_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR);
- offset = (u64)REG_FIELD_GET(GENMASK(31, 8), reg) * SZ_64K;
+ offset = get_flat_ccs_offset(gt, *tile_size);
} else {
offset = xe_mmio_read64_2x32(gt, GSMBASE);
}
@@ -360,32 +395,9 @@ static void mmio_fini(struct drm_device *drm, void *arg)
iounmap(xe->mem.vram.mapping);
}
-static int xe_verify_lmem_ready(struct xe_device *xe)
-{
- struct xe_gt *gt = xe_root_mmio_gt(xe);
-
- if (!IS_DGFX(xe))
- return 0;
-
- if (IS_SRIOV_VF(xe))
- return 0;
-
- /*
- * The boot firmware initializes local memory and assesses its health.
- * If memory training fails, the punit will have been instructed to
- * keep the GT powered down; we won't be able to communicate with it
- * and we should not continue with driver initialization.
- */
- if (!(xe_mmio_read32(gt, GU_CNTL) & LMEM_INIT)) {
- drm_err(&xe->drm, "VRAM not initialized by firmware\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
int xe_mmio_init(struct xe_device *xe)
{
+ struct xe_tile *root_tile = xe_device_get_root_tile(xe);
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
const int mmio_bar = 0;
@@ -401,23 +413,83 @@ int xe_mmio_init(struct xe_device *xe)
return -EIO;
}
+ /* Setup first tile; other tiles (if present) will be setup later. */
+ root_tile->mmio.size = SZ_16M;
+ root_tile->mmio.regs = xe->mmio.regs;
+
return drmm_add_action_or_reset(&xe->drm, mmio_fini, xe);
}
-int xe_mmio_root_tile_init(struct xe_device *xe)
+u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
{
- struct xe_tile *root_tile = xe_device_get_root_tile(xe);
- int err;
+ struct xe_tile *tile = gt_to_tile(gt);
- /* Setup first tile; other tiles (if present) will be setup later. */
- root_tile->mmio.size = SZ_16M;
- root_tile->mmio.regs = xe->mmio.regs;
+ if (reg.addr < gt->mmio.adj_limit)
+ reg.addr += gt->mmio.adj_offset;
- err = xe_verify_lmem_ready(xe);
- if (err)
- return err;
+ return readb((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
+}
- return 0;
+u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg)
+{
+ struct xe_tile *tile = gt_to_tile(gt);
+
+ if (reg.addr < gt->mmio.adj_limit)
+ reg.addr += gt->mmio.adj_offset;
+
+ return readw((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
+}
+
+void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
+{
+ struct xe_tile *tile = gt_to_tile(gt);
+
+ if (reg.addr < gt->mmio.adj_limit)
+ reg.addr += gt->mmio.adj_offset;
+
+ writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
+}
+
+u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
+{
+ struct xe_tile *tile = gt_to_tile(gt);
+
+ if (reg.addr < gt->mmio.adj_limit)
+ reg.addr += gt->mmio.adj_offset;
+
+ return readl((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
+}
+
+u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set)
+{
+ u32 old, reg_val;
+
+ old = xe_mmio_read32(gt, reg);
+ reg_val = (old & ~clr) | set;
+ xe_mmio_write32(gt, reg, reg_val);
+
+ return old;
+}
+
+int xe_mmio_write32_and_verify(struct xe_gt *gt,
+ struct xe_reg reg, u32 val, u32 mask, u32 eval)
+{
+ u32 reg_val;
+
+ xe_mmio_write32(gt, reg, val);
+ reg_val = xe_mmio_read32(gt, reg);
+
+ return (reg_val & mask) != eval ? -EINVAL : 0;
+}
+
+bool xe_mmio_in_range(const struct xe_gt *gt,
+ const struct xe_mmio_range *range,
+ struct xe_reg reg)
+{
+ if (reg.addr < gt->mmio.adj_limit)
+ reg.addr += gt->mmio.adj_offset;
+
+ return range && reg.addr >= range->start && reg.addr <= range->end;
}
/**
diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
index 98de5c13c89b..a3cd7b3036c7 100644
--- a/drivers/gpu/drm/xe/xe_mmio.h
+++ b/drivers/gpu/drm/xe/xe_mmio.h
@@ -21,83 +21,15 @@ struct xe_device;
#define LMEM_BAR 2
int xe_mmio_init(struct xe_device *xe);
-int xe_mmio_root_tile_init(struct xe_device *xe);
void xe_mmio_probe_tiles(struct xe_device *xe);
-static inline u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
-{
- struct xe_tile *tile = gt_to_tile(gt);
-
- if (reg.addr < gt->mmio.adj_limit)
- reg.addr += gt->mmio.adj_offset;
-
- return readb((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
-}
-
-static inline u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg)
-{
- struct xe_tile *tile = gt_to_tile(gt);
-
- if (reg.addr < gt->mmio.adj_limit)
- reg.addr += gt->mmio.adj_offset;
-
- return readw((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
-}
-
-static inline void xe_mmio_write32(struct xe_gt *gt,
- struct xe_reg reg, u32 val)
-{
- struct xe_tile *tile = gt_to_tile(gt);
-
- if (reg.addr < gt->mmio.adj_limit)
- reg.addr += gt->mmio.adj_offset;
-
- writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
-}
-
-static inline u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
-{
- struct xe_tile *tile = gt_to_tile(gt);
-
- if (reg.addr < gt->mmio.adj_limit)
- reg.addr += gt->mmio.adj_offset;
-
- return readl((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
-}
-
-static inline u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr,
- u32 set)
-{
- u32 old, reg_val;
-
- old = xe_mmio_read32(gt, reg);
- reg_val = (old & ~clr) | set;
- xe_mmio_write32(gt, reg, reg_val);
-
- return old;
-}
-
-static inline int xe_mmio_write32_and_verify(struct xe_gt *gt,
- struct xe_reg reg, u32 val,
- u32 mask, u32 eval)
-{
- u32 reg_val;
-
- xe_mmio_write32(gt, reg, val);
- reg_val = xe_mmio_read32(gt, reg);
-
- return (reg_val & mask) != eval ? -EINVAL : 0;
-}
-
-static inline bool xe_mmio_in_range(const struct xe_gt *gt,
- const struct xe_mmio_range *range,
- struct xe_reg reg)
-{
- if (reg.addr < gt->mmio.adj_limit)
- reg.addr += gt->mmio.adj_offset;
-
- return range && reg.addr >= range->start && reg.addr <= range->end;
-}
+u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg);
+u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg);
+void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val);
+u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg);
+u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set);
+int xe_mmio_write32_and_verify(struct xe_gt *gt, struct xe_reg reg, u32 val, u32 mask, u32 eval);
+bool xe_mmio_in_range(const struct xe_gt *gt, const struct xe_mmio_range *range, struct xe_reg reg);
int xe_mmio_probe_vram(struct xe_device *xe);
u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg);
diff --git a/drivers/gpu/drm/xe/xe_mocs.c b/drivers/gpu/drm/xe/xe_mocs.c
index 609d997b3e9b..1e92f8ee07ba 100644
--- a/drivers/gpu/drm/xe/xe_mocs.c
+++ b/drivers/gpu/drm/xe/xe_mocs.c
@@ -17,10 +17,10 @@
#include "xe_step_types.h"
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
-#define mocs_dbg drm_dbg
+#define mocs_dbg xe_gt_dbg
#else
__printf(2, 3)
-static inline void mocs_dbg(const struct drm_device *dev,
+static inline void mocs_dbg(const struct xe_gt *gt,
const char *format, ...)
{ /* noop */ }
#endif
@@ -72,7 +72,7 @@ struct xe_mocs_info {
/* Helper defines */
#define XELP_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
#define PVC_NUM_MOCS_ENTRIES 3
-#define MTL_NUM_MOCS_ENTRIES 16
+#define MTL_NUM_MOCS_ENTRIES 16
#define XE2_NUM_MOCS_ENTRIES 16
/* (e)LLC caching options */
@@ -375,6 +375,7 @@ static unsigned int get_mocs_settings(struct xe_device *xe,
switch (xe->info.platform) {
case XE_LUNARLAKE:
+ case XE_BATTLEMAGE:
info->size = ARRAY_SIZE(xe2_mocs_table);
info->table = xe2_mocs_table;
info->n_entries = XE2_NUM_MOCS_ENTRIES;
@@ -401,7 +402,11 @@ static unsigned int get_mocs_settings(struct xe_device *xe,
info->size = ARRAY_SIZE(dg2_mocs_desc);
info->table = dg2_mocs_desc;
info->uc_index = 1;
- info->n_entries = XELP_NUM_MOCS_ENTRIES;
+ /*
+ * Last entry is RO on hardware, don't bother with what was
+ * written when checking later
+ */
+ info->n_entries = XELP_NUM_MOCS_ENTRIES - 1;
info->unused_entries_index = 3;
break;
case XE_DG1:
@@ -462,24 +467,34 @@ static u32 get_entry_control(const struct xe_mocs_info *info,
return info->table[info->unused_entries_index].control_value;
}
-static void __init_mocs_table(struct xe_gt *gt,
- const struct xe_mocs_info *info)
+static bool regs_are_mcr(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
+ if (xe_gt_is_media_type(gt))
+ return MEDIA_VER(xe) >= 20;
+ else
+ return GRAPHICS_VERx100(xe) >= 1250;
+}
+
+static void __init_mocs_table(struct xe_gt *gt,
+ const struct xe_mocs_info *info)
+{
unsigned int i;
u32 mocs;
- mocs_dbg(&gt_to_xe(gt)->drm, "entries:%d\n", info->n_entries);
- drm_WARN_ONCE(&xe->drm, !info->unused_entries_index,
- "Unused entries index should have been defined\n");
- for (i = 0;
- i < info->n_entries ? (mocs = get_entry_control(info, i)), 1 : 0;
- i++) {
- mocs_dbg(&gt_to_xe(gt)->drm, "GLOB_MOCS[%d] 0x%x 0x%x\n", i,
+ xe_gt_WARN_ONCE(gt, !info->unused_entries_index,
+ "Unused entries index should have been defined\n");
+
+ mocs_dbg(gt, "mocs entries: %d\n", info->n_entries);
+
+ for (i = 0; i < info->n_entries; i++) {
+ mocs = get_entry_control(info, i);
+
+ mocs_dbg(gt, "GLOB_MOCS[%d] 0x%x 0x%x\n", i,
XELP_GLOBAL_MOCS(i).addr, mocs);
- if (GRAPHICS_VERx100(gt_to_xe(gt)) > 1250)
+ if (regs_are_mcr(gt))
xe_gt_mcr_multicast_write(gt, XEHP_GLOBAL_MOCS(i), mocs);
else
xe_mmio_write32(gt, XELP_GLOBAL_MOCS(i), mocs);
@@ -510,16 +525,16 @@ static void init_l3cc_table(struct xe_gt *gt,
unsigned int i;
u32 l3cc;
- mocs_dbg(&gt_to_xe(gt)->drm, "entries:%d\n", info->n_entries);
- for (i = 0;
- i < (info->n_entries + 1) / 2 ?
- (l3cc = l3cc_combine(get_entry_l3cc(info, 2 * i),
- get_entry_l3cc(info, 2 * i + 1))), 1 : 0;
- i++) {
- mocs_dbg(&gt_to_xe(gt)->drm, "LNCFCMOCS[%d] 0x%x 0x%x\n", i, XELP_LNCFCMOCS(i).addr,
- l3cc);
+ mocs_dbg(gt, "l3cc entries: %d\n", info->n_entries);
+
+ for (i = 0; i < (info->n_entries + 1) / 2; i++) {
+ l3cc = l3cc_combine(get_entry_l3cc(info, 2 * i),
+ get_entry_l3cc(info, 2 * i + 1));
- if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1250)
+ mocs_dbg(gt, "LNCFCMOCS[%d] 0x%x 0x%x\n", i,
+ XELP_LNCFCMOCS(i).addr, l3cc);
+
+ if (regs_are_mcr(gt))
xe_gt_mcr_multicast_write(gt, XEHP_LNCFCMOCS(i), l3cc);
else
xe_mmio_write32(gt, XELP_LNCFCMOCS(i), l3cc);
@@ -552,7 +567,10 @@ void xe_mocs_init(struct xe_gt *gt)
* performed by the GuC.
*/
flags = get_mocs_settings(gt_to_xe(gt), &table);
- mocs_dbg(&gt_to_xe(gt)->drm, "flag:0x%x\n", flags);
+ mocs_dbg(gt, "flag:0x%x\n", flags);
+
+ if (IS_SRIOV_VF(gt_to_xe(gt)))
+ return;
if (flags & HAS_GLOBAL_MOCS)
__init_mocs_table(gt, &table);
diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
index 110b69864656..ceb8345cbca6 100644
--- a/drivers/gpu/drm/xe/xe_module.c
+++ b/drivers/gpu/drm/xe/xe_module.c
@@ -48,6 +48,13 @@ module_param_named_unsafe(force_probe, xe_modparam.force_probe, charp, 0400);
MODULE_PARM_DESC(force_probe,
"Force probe options for specified devices. See CONFIG_DRM_XE_FORCE_PROBE for details.");
+#ifdef CONFIG_PCI_IOV
+module_param_named(max_vfs, xe_modparam.max_vfs, uint, 0400);
+MODULE_PARM_DESC(max_vfs,
+ "Limit number of Virtual Functions (VFs) that could be managed. "
+ "(0 = no VFs [default]; N = allow up to N VFs)");
+#endif
+
struct init_funcs {
int (*init)(void);
void (*exit)(void);
diff --git a/drivers/gpu/drm/xe/xe_module.h b/drivers/gpu/drm/xe/xe_module.h
index 88ef0e8b2bfd..b369984f08ec 100644
--- a/drivers/gpu/drm/xe/xe_module.h
+++ b/drivers/gpu/drm/xe/xe_module.h
@@ -18,6 +18,9 @@ struct xe_modparam {
char *huc_firmware_path;
char *gsc_firmware_path;
char *force_probe;
+#ifdef CONFIG_PCI_IOV
+ unsigned int max_vfs;
+#endif
};
extern struct xe_modparam xe_modparam;
diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c
index e148934d554b..d5b516f115ad 100644
--- a/drivers/gpu/drm/xe/xe_pat.c
+++ b/drivers/gpu/drm/xe/xe_pat.c
@@ -142,6 +142,7 @@ static const struct xe_pat_table_entry xe2_pat_table[] = {
/* Special PAT values programmed outside the main table */
static const struct xe_pat_table_entry xe2_pat_ats = XE2_PAT( 0, 0, 0, 0, 3, 3 );
+static const struct xe_pat_table_entry xe2_pat_pta = XE2_PAT( 0, 0, 0, 0, 3, 0 );
u16 xe_pat_index_get_coh_mode(struct xe_device *xe, u16 pat_index)
{
@@ -174,7 +175,6 @@ static void xelp_dump(struct xe_gt *gt, struct drm_printer *p)
struct xe_device *xe = gt_to_xe(gt);
int i, err;
- xe_device_mem_access_get(xe);
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (err)
goto err_fw;
@@ -192,7 +192,6 @@ static void xelp_dump(struct xe_gt *gt, struct drm_printer *p)
err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
err_fw:
xe_assert(xe, !err);
- xe_device_mem_access_put(xe);
}
static const struct xe_pat_ops xelp_pat_ops = {
@@ -205,7 +204,6 @@ static void xehp_dump(struct xe_gt *gt, struct drm_printer *p)
struct xe_device *xe = gt_to_xe(gt);
int i, err;
- xe_device_mem_access_get(xe);
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (err)
goto err_fw;
@@ -225,7 +223,6 @@ static void xehp_dump(struct xe_gt *gt, struct drm_printer *p)
err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
err_fw:
xe_assert(xe, !err);
- xe_device_mem_access_put(xe);
}
static const struct xe_pat_ops xehp_pat_ops = {
@@ -238,7 +235,6 @@ static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p)
struct xe_device *xe = gt_to_xe(gt);
int i, err;
- xe_device_mem_access_get(xe);
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (err)
goto err_fw;
@@ -256,7 +252,6 @@ static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p)
err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
err_fw:
xe_assert(xe, !err);
- xe_device_mem_access_put(xe);
}
static const struct xe_pat_ops xehpc_pat_ops = {
@@ -269,7 +264,6 @@ static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p)
struct xe_device *xe = gt_to_xe(gt);
int i, err;
- xe_device_mem_access_get(xe);
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (err)
goto err_fw;
@@ -292,7 +286,6 @@ static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p)
err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
err_fw:
xe_assert(xe, !err);
- xe_device_mem_access_put(xe);
}
/*
@@ -310,6 +303,9 @@ static void xe2lpg_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry
{
program_pat_mcr(gt, table, n_entries);
xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_ATS), xe2_pat_ats.value);
+
+ if (IS_DGFX(gt_to_xe(gt)))
+ xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_PTA), xe2_pat_pta.value);
}
static void xe2lpm_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
@@ -317,6 +313,9 @@ static void xe2lpm_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry
{
program_pat(gt, table, n_entries);
xe_mmio_write32(gt, XE_REG(_PAT_ATS), xe2_pat_ats.value);
+
+ if (IS_DGFX(gt_to_xe(gt)))
+ xe_mmio_write32(gt, XE_REG(_PAT_PTA), xe2_pat_pta.value);
}
static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
@@ -325,7 +324,6 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
int i, err;
u32 pat;
- xe_device_mem_access_get(xe);
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (err)
goto err_fw;
@@ -370,7 +368,6 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
err_fw:
xe_assert(xe, !err);
- xe_device_mem_access_put(xe);
}
static const struct xe_pat_ops xe2_pat_ops = {
@@ -438,6 +435,10 @@ void xe_pat_init_early(struct xe_device *xe)
/* VFs can't program nor dump PAT settings */
if (IS_SRIOV_VF(xe))
xe->pat.ops = NULL;
+
+ xe_assert(xe, !xe->pat.ops || xe->pat.ops->dump);
+ xe_assert(xe, !xe->pat.ops || xe->pat.ops->program_graphics);
+ xe_assert(xe, !xe->pat.ops || MEDIA_VER(xe) < 13 || xe->pat.ops->program_media);
}
void xe_pat_init(struct xe_gt *gt)
@@ -457,7 +458,7 @@ void xe_pat_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
- if (!xe->pat.ops->dump)
+ if (!xe->pat.ops)
return;
xe->pat.ops->dump(gt, p);
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 557f2d88a8c1..f326dbb1cecd 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -174,7 +174,7 @@ static const struct xe_graphics_desc graphics_xelpg = {
GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0)
static const struct xe_graphics_desc graphics_xe2 = {
- .name = "Xe2_LPG",
+ .name = "Xe2_LPG / Xe2_HPG",
XE2_GFX_FEATURES,
};
@@ -185,8 +185,8 @@ static const struct xe_media_desc media_xem = {
.rel = 0,
.hw_engine_mask =
- BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VCS2) |
- BIT(XE_HW_ENGINE_VECS0),
+ GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
+ GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0),
};
static const struct xe_media_desc media_xehpm = {
@@ -195,21 +195,23 @@ static const struct xe_media_desc media_xehpm = {
.rel = 55,
.hw_engine_mask =
- BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VCS2) |
- BIT(XE_HW_ENGINE_VECS0) | BIT(XE_HW_ENGINE_VECS1),
+ GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
+ GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0),
};
static const struct xe_media_desc media_xelpmp = {
.name = "Xe_LPM+",
.hw_engine_mask =
- BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VCS2) |
- BIT(XE_HW_ENGINE_VECS0) | BIT(XE_HW_ENGINE_GSCCS0)
+ GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
+ GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) |
+ BIT(XE_HW_ENGINE_GSCCS0)
};
static const struct xe_media_desc media_xe2 = {
- .name = "Xe2_LPM",
+ .name = "Xe2_LPM / Xe2_HPM",
.hw_engine_mask =
- BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VECS0), /* TODO: GSC0 */
+ GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
+ GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0), /* TODO: GSC0 */
};
static const struct xe_device_desc tgl_desc = {
@@ -333,6 +335,13 @@ static const struct xe_device_desc mtl_desc = {
static const struct xe_device_desc lnl_desc = {
PLATFORM(XE_LUNARLAKE),
+ .has_display = true,
+ .require_force_probe = true,
+};
+
+static const struct xe_device_desc bmg_desc __maybe_unused = {
+ DGFX_FEATURES,
+ PLATFORM(XE_BATTLEMAGE),
.require_force_probe = true,
};
@@ -343,12 +352,15 @@ __diag_pop();
static const struct gmdid_map graphics_ip_map[] = {
{ 1270, &graphics_xelpg },
{ 1271, &graphics_xelpg },
+ { 1274, &graphics_xelpg }, /* Xe_LPG+ */
+ { 2001, &graphics_xe2 },
{ 2004, &graphics_xe2 },
};
/* Map of GMD_ID values to media IP */
static const struct gmdid_map media_ip_map[] = {
{ 1300, &media_xelpmp },
+ { 1301, &media_xe2 },
{ 2000, &media_xe2 },
};
@@ -737,8 +749,6 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- xe_sriov_probe_early(xe, desc->has_sriov);
-
err = xe_device_probe_early(xe);
if (err)
return err;
@@ -774,18 +784,26 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
str_yes_no(xe_device_has_sriov(xe)),
xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
- xe_pm_init_early(xe);
+ err = xe_pm_init_early(xe);
+ if (err)
+ return err;
err = xe_device_probe(xe);
if (err)
return err;
- xe_pm_init(xe);
+ err = xe_pm_init(xe);
+ if (err)
+ goto err_driver_cleanup;
drm_dbg(&xe->drm, "d3cold: capable=%s\n",
str_yes_no(xe->d3cold.capable));
return 0;
+
+err_driver_cleanup:
+ xe_pci_remove(pdev);
+ return err;
}
static void xe_pci_shutdown(struct pci_dev *pdev)
diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c
index b324dc2a5deb..c010ef16fbf5 100644
--- a/drivers/gpu/drm/xe/xe_pcode.c
+++ b/drivers/gpu/drm/xe/xe_pcode.c
@@ -10,6 +10,7 @@
#include <drm/drm_managed.h>
+#include "xe_device.h"
#include "xe_gt.h"
#include "xe_mmio.h"
#include "xe_pcode_api.h"
@@ -43,8 +44,6 @@ static int pcode_mailbox_status(struct xe_gt *gt)
[PCODE_ERROR_MASK] = {-EPROTO, "Unknown"},
};
- lockdep_assert_held(&gt->pcode.lock);
-
err = xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_ERROR_MASK;
if (err) {
drm_err(&gt_to_xe(gt)->drm, "PCODE Mailbox failed: %d %s", err,
@@ -55,17 +54,15 @@ static int pcode_mailbox_status(struct xe_gt *gt)
return 0;
}
-static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
- unsigned int timeout_ms, bool return_data,
- bool atomic)
+static int __pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
+ unsigned int timeout_ms, bool return_data,
+ bool atomic)
{
int err;
if (gt_to_xe(gt)->info.skip_pcode)
return 0;
- lockdep_assert_held(&gt->pcode.lock);
-
if ((xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_READY) != 0)
return -EAGAIN;
@@ -74,7 +71,7 @@ static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
xe_mmio_write32(gt, PCODE_MAILBOX, PCODE_READY | mbox);
err = xe_mmio_wait32(gt, PCODE_MAILBOX, PCODE_READY, 0,
- timeout_ms * 1000, NULL, atomic);
+ timeout_ms * USEC_PER_MSEC, NULL, atomic);
if (err)
return err;
@@ -87,6 +84,18 @@ static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
return pcode_mailbox_status(gt);
}
+static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
+ unsigned int timeout_ms, bool return_data,
+ bool atomic)
+{
+ if (gt_to_xe(gt)->info.skip_pcode)
+ return 0;
+
+ lockdep_assert_held(&gt->pcode.lock);
+
+ return __pcode_mailbox_rw(gt, mbox, data0, data1, timeout_ms, return_data, atomic);
+}
+
int xe_pcode_write_timeout(struct xe_gt *gt, u32 mbox, u32 data, int timeout)
{
int err;
@@ -109,15 +118,19 @@ int xe_pcode_read(struct xe_gt *gt, u32 mbox, u32 *val, u32 *val1)
return err;
}
-static int xe_pcode_try_request(struct xe_gt *gt, u32 mbox,
- u32 request, u32 reply_mask, u32 reply,
- u32 *status, bool atomic, int timeout_us)
+static int pcode_try_request(struct xe_gt *gt, u32 mbox,
+ u32 request, u32 reply_mask, u32 reply,
+ u32 *status, bool atomic, int timeout_us, bool locked)
{
int slept, wait = 10;
for (slept = 0; slept < timeout_us; slept += wait) {
- *status = pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true,
- atomic);
+ if (locked)
+ *status = pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true,
+ atomic);
+ else
+ *status = __pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true,
+ atomic);
if ((*status == 0) && ((request & reply_mask) == reply))
return 0;
@@ -158,8 +171,8 @@ int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
mutex_lock(&gt->pcode.lock);
- ret = xe_pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
- false, timeout_base_ms * 1000);
+ ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
+ false, timeout_base_ms * 1000, true);
if (!ret)
goto out;
@@ -177,8 +190,8 @@ int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
"PCODE timeout, retrying with preemption disabled\n");
drm_WARN_ON_ONCE(&gt_to_xe(gt)->drm, timeout_base_ms > 1);
preempt_disable();
- ret = xe_pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
- true, timeout_base_ms * 1000);
+ ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
+ true, timeout_base_ms * 1000, true);
preempt_enable();
out:
@@ -238,59 +251,71 @@ unlock:
}
/**
- * xe_pcode_init - Ensure PCODE is initialized
- * @gt: gt instance
+ * xe_pcode_ready - Ensure PCODE is initialized
+ * @xe: xe instance
+ * @locked: true if lock held, false otherwise
*
- * This function ensures that PCODE is properly initialized. To be called during
- * probe and resume paths.
+ * PCODE init mailbox is polled only on root gt of root tile
+ * as the root tile provides the initialization is complete only
+ * after all the tiles have completed the initialization.
+ * Called only on early probe without locks and with locks in
+ * resume path.
*
- * It returns 0 on success, and -error number on failure.
+ * Returns 0 on success, and -error number on failure.
*/
-int xe_pcode_init(struct xe_gt *gt)
+int xe_pcode_ready(struct xe_device *xe, bool locked)
{
u32 status, request = DGFX_GET_INIT_STATUS;
+ struct xe_gt *gt = xe_root_mmio_gt(xe);
int timeout_us = 180000000; /* 3 min */
int ret;
- if (gt_to_xe(gt)->info.skip_pcode)
+ if (xe->info.skip_pcode)
return 0;
- if (!IS_DGFX(gt_to_xe(gt)))
+ if (!IS_DGFX(xe))
return 0;
- mutex_lock(&gt->pcode.lock);
- ret = xe_pcode_try_request(gt, DGFX_PCODE_STATUS, request,
- DGFX_INIT_STATUS_COMPLETE,
- DGFX_INIT_STATUS_COMPLETE,
- &status, false, timeout_us);
- mutex_unlock(&gt->pcode.lock);
+ if (locked)
+ mutex_lock(&gt->pcode.lock);
+
+ ret = pcode_try_request(gt, DGFX_PCODE_STATUS, request,
+ DGFX_INIT_STATUS_COMPLETE,
+ DGFX_INIT_STATUS_COMPLETE,
+ &status, false, timeout_us, locked);
+
+ if (locked)
+ mutex_unlock(&gt->pcode.lock);
if (ret)
- drm_err(&gt_to_xe(gt)->drm,
+ drm_err(&xe->drm,
"PCODE initialization timedout after: 3 min\n");
return ret;
}
/**
- * xe_pcode_probe - Prepare xe_pcode and also ensure PCODE is initialized.
+ * xe_pcode_init: initialize components of PCODE
* @gt: gt instance
*
- * This function initializes the xe_pcode component, and when needed, it ensures
- * that PCODE has properly performed its initialization and it is really ready
- * to go. To be called once only during probe.
- *
- * It returns 0 on success, and -error number on failure.
+ * This function initializes the xe_pcode component.
+ * To be called once only during probe.
*/
-int xe_pcode_probe(struct xe_gt *gt)
+void xe_pcode_init(struct xe_gt *gt)
{
drmm_mutex_init(&gt_to_xe(gt)->drm, &gt->pcode.lock);
+}
- if (gt_to_xe(gt)->info.skip_pcode)
- return 0;
-
- if (!IS_DGFX(gt_to_xe(gt)))
- return 0;
-
- return xe_pcode_init(gt);
+/**
+ * xe_pcode_probe_early: initializes PCODE
+ * @xe: xe instance
+ *
+ * This function checks the initialization status of PCODE
+ * To be called once only during early probe without locks.
+ *
+ * Returns 0 on success, error code otherwise
+ */
+int xe_pcode_probe_early(struct xe_device *xe)
+{
+ return xe_pcode_ready(xe, false);
}
diff --git a/drivers/gpu/drm/xe/xe_pcode.h b/drivers/gpu/drm/xe/xe_pcode.h
index 08cb1d047cba..3f54c6d2a57d 100644
--- a/drivers/gpu/drm/xe/xe_pcode.h
+++ b/drivers/gpu/drm/xe/xe_pcode.h
@@ -8,9 +8,11 @@
#include <linux/types.h>
struct xe_gt;
+struct xe_device;
-int xe_pcode_probe(struct xe_gt *gt);
-int xe_pcode_init(struct xe_gt *gt);
+void xe_pcode_init(struct xe_gt *gt);
+int xe_pcode_probe_early(struct xe_device *xe);
+int xe_pcode_ready(struct xe_device *xe, bool locked);
int xe_pcode_init_min_freq_table(struct xe_gt *gt, u32 min_gt_freq,
u32 max_gt_freq);
int xe_pcode_read(struct xe_gt *gt, u32 mbox, u32 *val, u32 *val1);
diff --git a/drivers/gpu/drm/xe/xe_platform_types.h b/drivers/gpu/drm/xe/xe_platform_types.h
index 553f53dbd093..79b7042c4534 100644
--- a/drivers/gpu/drm/xe/xe_platform_types.h
+++ b/drivers/gpu/drm/xe/xe_platform_types.h
@@ -22,6 +22,7 @@ enum xe_platform {
XE_PVC,
XE_METEORLAKE,
XE_LUNARLAKE,
+ XE_BATTLEMAGE,
};
enum xe_subplatform {
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index 53b3b0b019ac..37fbeda12d3b 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -25,23 +25,55 @@
/**
* DOC: Xe Power Management
*
- * Xe PM shall be guided by the simplicity.
- * Use the simplest hook options whenever possible.
- * Let's not reinvent the runtime_pm references and hooks.
- * Shall have a clear separation of display and gt underneath this component.
+ * Xe PM implements the main routines for both system level suspend states and
+ * for the opportunistic runtime suspend states.
*
- * What's next:
+ * System Level Suspend (S-States) - In general this is OS initiated suspend
+ * driven by ACPI for achieving S0ix (a.k.a. S2idle, freeze), S3 (suspend to ram),
+ * S4 (disk). The main functions here are `xe_pm_suspend` and `xe_pm_resume`. They
+ * are the main point for the suspend to and resume from these states.
*
- * For now s2idle and s3 are only working in integrated devices. The next step
- * is to iterate through all VRAM's BO backing them up into the system memory
- * before allowing the system suspend.
+ * PCI Device Suspend (D-States) - This is the opportunistic PCIe device low power
+ * state D3, controlled by the PCI subsystem and ACPI with the help from the
+ * runtime_pm infrastructure.
+ * PCI D3 is special and can mean D3hot, where Vcc power is on for keeping memory
+ * alive and quicker low latency resume or D3Cold where Vcc power is off for
+ * better power savings.
+ * The Vcc control of PCI hierarchy can only be controlled at the PCI root port
+ * level, while the device driver can be behind multiple bridges/switches and
+ * paired with other devices. For this reason, the PCI subsystem cannot perform
+ * the transition towards D3Cold. The lowest runtime PM possible from the PCI
+ * subsystem is D3hot. Then, if all these paired devices in the same root port
+ * are in D3hot, ACPI will assist here and run its own methods (_PR3 and _OFF)
+ * to perform the transition from D3hot to D3cold. Xe may disallow this
+ * transition by calling pci_d3cold_disable(root_pdev) before going to runtime
+ * suspend. It will be based on runtime conditions such as VRAM usage for a
+ * quick and low latency resume for instance.
*
- * Also runtime_pm needs to be here from the beginning.
+ * Runtime PM - This infrastructure provided by the Linux kernel allows the
+ * device drivers to indicate when the can be runtime suspended, so the device
+ * could be put at D3 (if supported), or allow deeper package sleep states
+ * (PC-states), and/or other low level power states. Xe PM component provides
+ * `xe_pm_runtime_suspend` and `xe_pm_runtime_resume` functions that PCI
+ * subsystem will call before transition to/from runtime suspend.
*
- * RC6/RPS are also critical PM features. Let's start with GuCRC and GuC SLPC
- * and no wait boost. Frequency optimizations should come on a next stage.
+ * Also, Xe PM provides get and put functions that Xe driver will use to
+ * indicate activity. In order to avoid locking complications with the memory
+ * management, whenever possible, these get and put functions needs to be called
+ * from the higher/outer levels.
+ * The main cases that need to be protected from the outer levels are: IOCTL,
+ * sysfs, debugfs, dma-buf sharing, GPU execution.
+ *
+ * This component is not responsible for GT idleness (RC6) nor GT frequency
+ * management (RPS).
*/
+#ifdef CONFIG_LOCKDEP
+struct lockdep_map xe_pm_runtime_lockdep_map = {
+ .name = "xe_pm_runtime_lockdep_map"
+};
+#endif
+
/**
* xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
* @xe: xe device instance
@@ -54,13 +86,15 @@ int xe_pm_suspend(struct xe_device *xe)
u8 id;
int err;
+ drm_dbg(&xe->drm, "Suspending device\n");
+
for_each_gt(gt, xe, id)
xe_gt_suspend_prepare(gt);
/* FIXME: Super racey... */
err = xe_bo_evict_all(xe);
if (err)
- return err;
+ goto err;
xe_display_pm_suspend(xe);
@@ -68,7 +102,7 @@ int xe_pm_suspend(struct xe_device *xe)
err = xe_gt_suspend(gt);
if (err) {
xe_display_pm_resume(xe);
- return err;
+ goto err;
}
}
@@ -76,7 +110,11 @@ int xe_pm_suspend(struct xe_device *xe)
xe_display_pm_suspend_late(xe);
+ drm_dbg(&xe->drm, "Device suspended\n");
return 0;
+err:
+ drm_dbg(&xe->drm, "Device suspend failed %d\n", err);
+ return err;
}
/**
@@ -92,14 +130,14 @@ int xe_pm_resume(struct xe_device *xe)
u8 id;
int err;
+ drm_dbg(&xe->drm, "Resuming device\n");
+
for_each_tile(tile, xe, id)
xe_wa_apply_tile_workarounds(tile);
- for_each_gt(gt, xe, id) {
- err = xe_pcode_init(gt);
- if (err)
- return err;
- }
+ err = xe_pcode_ready(xe, true);
+ if (err)
+ return err;
xe_display_pm_resume_early(xe);
@@ -109,7 +147,7 @@ int xe_pm_resume(struct xe_device *xe)
*/
err = xe_bo_restore_kernel(xe);
if (err)
- return err;
+ goto err;
xe_irq_resume(xe);
@@ -120,9 +158,13 @@ int xe_pm_resume(struct xe_device *xe)
err = xe_bo_restore_user(xe);
if (err)
- return err;
+ goto err;
+ drm_dbg(&xe->drm, "Device resumed\n");
return 0;
+err:
+ drm_dbg(&xe->drm, "Device resume failed %d\n", err);
+ return err;
}
static bool xe_pm_pci_d3cold_capable(struct xe_device *xe)
@@ -172,30 +214,60 @@ static void xe_pm_runtime_init(struct xe_device *xe)
pm_runtime_put(dev);
}
-void xe_pm_init_early(struct xe_device *xe)
+int xe_pm_init_early(struct xe_device *xe)
{
+ int err;
+
INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list);
- drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock);
+
+ err = drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock);
+ if (err)
+ return err;
+
+ err = drmm_mutex_init(&xe->drm, &xe->d3cold.lock);
+ if (err)
+ return err;
+
+ return 0;
}
-void xe_pm_init(struct xe_device *xe)
+/**
+ * xe_pm_init - Initialize Xe Power Management
+ * @xe: xe device instance
+ *
+ * This component is responsible for System and Device sleep states.
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int xe_pm_init(struct xe_device *xe)
{
+ int err;
+
/* For now suspend/resume is only allowed with GuC */
if (!xe_device_uc_enabled(xe))
- return;
-
- drmm_mutex_init(&xe->drm, &xe->d3cold.lock);
+ return 0;
xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
if (xe->d3cold.capable) {
- xe_device_sysfs_init(xe);
- xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD);
+ err = xe_device_sysfs_init(xe);
+ if (err)
+ return err;
+
+ err = xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD);
+ if (err)
+ return err;
}
xe_pm_runtime_init(xe);
+
+ return 0;
}
+/**
+ * xe_pm_runtime_fini - Finalize Runtime PM
+ * @xe: xe device instance
+ */
void xe_pm_runtime_fini(struct xe_device *xe)
{
struct device *dev = xe->drm.dev;
@@ -225,6 +297,28 @@ struct task_struct *xe_pm_read_callback_task(struct xe_device *xe)
return READ_ONCE(xe->pm_callback_task);
}
+/**
+ * xe_pm_runtime_suspended - Check if runtime_pm state is suspended
+ * @xe: xe device instance
+ *
+ * This does not provide any guarantee that the device is going to remain
+ * suspended as it might be racing with the runtime state transitions.
+ * It can be used only as a non-reliable assertion, to ensure that we are not in
+ * the sleep state while trying to access some memory for instance.
+ *
+ * Returns true if PCI device is suspended, false otherwise.
+ */
+bool xe_pm_runtime_suspended(struct xe_device *xe)
+{
+ return pm_runtime_suspended(xe->drm.dev);
+}
+
+/**
+ * xe_pm_runtime_suspend - Prepare our device for D3hot/D3Cold
+ * @xe: xe device instance
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
int xe_pm_runtime_suspend(struct xe_device *xe)
{
struct xe_bo *bo, *on;
@@ -232,18 +326,15 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
u8 id;
int err = 0;
- if (xe->d3cold.allowed && xe_device_mem_access_ongoing(xe))
- return -EBUSY;
-
/* Disable access_ongoing asserts and prevent recursive pm calls */
xe_pm_write_callback_task(xe, current);
/*
- * The actual xe_device_mem_access_put() is always async underneath, so
+ * The actual xe_pm_runtime_put() is always async underneath, so
* exactly where that is called should makes no difference to us. However
* we still need to be very careful with the locks that this callback
* acquires and the locks that are acquired and held by any callers of
- * xe_device_mem_access_get(). We already have the matching annotation
+ * xe_runtime_pm_get(). We already have the matching annotation
* on that side, but we also need it here. For example lockdep should be
* able to tell us if the following scenario is in theory possible:
*
@@ -251,15 +342,15 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
* lock(A) |
* | xe_pm_runtime_suspend()
* | lock(A)
- * xe_device_mem_access_get() |
+ * xe_pm_runtime_get() |
*
* This will clearly deadlock since rpm core needs to wait for
* xe_pm_runtime_suspend() to complete, but here we are holding lock(A)
* on CPU0 which prevents CPU1 making forward progress. With the
- * annotation here and in xe_device_mem_access_get() lockdep will see
+ * annotation here and in xe_pm_runtime_get() lockdep will see
* the potential lock inversion and give us a nice splat.
*/
- lock_map_acquire(&xe_device_mem_access_lockdep_map);
+ lock_map_acquire(&xe_pm_runtime_lockdep_map);
/*
* Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
@@ -285,11 +376,17 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
xe_irq_suspend(xe);
out:
- lock_map_release(&xe_device_mem_access_lockdep_map);
+ lock_map_release(&xe_pm_runtime_lockdep_map);
xe_pm_write_callback_task(xe, NULL);
return err;
}
+/**
+ * xe_pm_runtime_resume - Waking up from D3hot/D3Cold
+ * @xe: xe device instance
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
int xe_pm_runtime_resume(struct xe_device *xe)
{
struct xe_gt *gt;
@@ -299,7 +396,7 @@ int xe_pm_runtime_resume(struct xe_device *xe)
/* Disable access_ongoing asserts and prevent recursive pm calls */
xe_pm_write_callback_task(xe, current);
- lock_map_acquire(&xe_device_mem_access_lockdep_map);
+ lock_map_acquire(&xe_pm_runtime_lockdep_map);
/*
* It can be possible that xe has allowed d3cold but other pcie devices
@@ -310,11 +407,9 @@ int xe_pm_runtime_resume(struct xe_device *xe)
xe->d3cold.power_lost = xe_guc_in_reset(&gt->uc.guc);
if (xe->d3cold.allowed && xe->d3cold.power_lost) {
- for_each_gt(gt, xe, id) {
- err = xe_pcode_init(gt);
- if (err)
- goto out;
- }
+ err = xe_pcode_ready(xe, true);
+ if (err)
+ goto out;
/*
* This only restores pinned memory which is the memory
@@ -336,27 +431,147 @@ int xe_pm_runtime_resume(struct xe_device *xe)
goto out;
}
out:
- lock_map_release(&xe_device_mem_access_lockdep_map);
+ lock_map_release(&xe_pm_runtime_lockdep_map);
xe_pm_write_callback_task(xe, NULL);
return err;
}
-int xe_pm_runtime_get(struct xe_device *xe)
+/*
+ * For places where resume is synchronous it can be quite easy to deadlock
+ * if we are not careful. Also in practice it might be quite timing
+ * sensitive to ever see the 0 -> 1 transition with the callers locks
+ * held, so deadlocks might exist but are hard for lockdep to ever see.
+ * With this in mind, help lockdep learn about the potentially scary
+ * stuff that can happen inside the runtime_resume callback by acquiring
+ * a dummy lock (it doesn't protect anything and gets compiled out on
+ * non-debug builds). Lockdep then only needs to see the
+ * xe_pm_runtime_lockdep_map -> runtime_resume callback once, and then can
+ * hopefully validate all the (callers_locks) -> xe_pm_runtime_lockdep_map.
+ * For example if the (callers_locks) are ever grabbed in the
+ * runtime_resume callback, lockdep should give us a nice splat.
+ */
+static void pm_runtime_lockdep_prime(void)
+{
+ lock_map_acquire(&xe_pm_runtime_lockdep_map);
+ lock_map_release(&xe_pm_runtime_lockdep_map);
+}
+
+/**
+ * xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
+ * @xe: xe device instance
+ */
+void xe_pm_runtime_get(struct xe_device *xe)
{
- return pm_runtime_get_sync(xe->drm.dev);
+ pm_runtime_get_noresume(xe->drm.dev);
+
+ if (xe_pm_read_callback_task(xe) == current)
+ return;
+
+ pm_runtime_lockdep_prime();
+ pm_runtime_resume(xe->drm.dev);
}
-int xe_pm_runtime_put(struct xe_device *xe)
+/**
+ * xe_pm_runtime_put - Put the runtime_pm reference back and mark as idle
+ * @xe: xe device instance
+ */
+void xe_pm_runtime_put(struct xe_device *xe)
{
- pm_runtime_mark_last_busy(xe->drm.dev);
- return pm_runtime_put(xe->drm.dev);
+ if (xe_pm_read_callback_task(xe) == current) {
+ pm_runtime_put_noidle(xe->drm.dev);
+ } else {
+ pm_runtime_mark_last_busy(xe->drm.dev);
+ pm_runtime_put(xe->drm.dev);
+ }
}
+/**
+ * xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl
+ * @xe: xe device instance
+ *
+ * Returns: Any number greater than or equal to 0 for success, negative error
+ * code otherwise.
+ */
+int xe_pm_runtime_get_ioctl(struct xe_device *xe)
+{
+ if (WARN_ON(xe_pm_read_callback_task(xe) == current))
+ return -ELOOP;
+
+ pm_runtime_lockdep_prime();
+ return pm_runtime_get_sync(xe->drm.dev);
+}
+
+/**
+ * xe_pm_runtime_get_if_active - Get a runtime_pm reference if device active
+ * @xe: xe device instance
+ *
+ * Returns: Any number greater than or equal to 0 for success, negative error
+ * code otherwise.
+ */
int xe_pm_runtime_get_if_active(struct xe_device *xe)
{
return pm_runtime_get_if_active(xe->drm.dev);
}
+/**
+ * xe_pm_runtime_get_if_in_use - Get a runtime_pm reference and resume if needed
+ * @xe: xe device instance
+ *
+ * Returns: True if device is awake and the reference was taken, false otherwise.
+ */
+bool xe_pm_runtime_get_if_in_use(struct xe_device *xe)
+{
+ if (xe_pm_read_callback_task(xe) == current) {
+ /* The device is awake, grab the ref and move on */
+ pm_runtime_get_noresume(xe->drm.dev);
+ return true;
+ }
+
+ return pm_runtime_get_if_in_use(xe->drm.dev) > 0;
+}
+
+/**
+ * xe_pm_runtime_get_noresume - Bump runtime PM usage counter without resuming
+ * @xe: xe device instance
+ *
+ * This function should be used in inner places where it is surely already
+ * protected by outer-bound callers of `xe_pm_runtime_get`.
+ * It will warn if not protected.
+ * The reference should be put back after this function regardless, since it
+ * will always bump the usage counter, regardless.
+ */
+void xe_pm_runtime_get_noresume(struct xe_device *xe)
+{
+ bool ref;
+
+ ref = xe_pm_runtime_get_if_in_use(xe);
+
+ if (drm_WARN(&xe->drm, !ref, "Missing outer runtime PM protection\n"))
+ pm_runtime_get_noresume(xe->drm.dev);
+}
+
+/**
+ * xe_pm_runtime_resume_and_get - Resume, then get a runtime_pm ref if awake.
+ * @xe: xe device instance
+ *
+ * Returns: True if device is awake and the reference was taken, false otherwise.
+ */
+bool xe_pm_runtime_resume_and_get(struct xe_device *xe)
+{
+ if (xe_pm_read_callback_task(xe) == current) {
+ /* The device is awake, grab the ref and move on */
+ pm_runtime_get_noresume(xe->drm.dev);
+ return true;
+ }
+
+ pm_runtime_lockdep_prime();
+ return pm_runtime_resume_and_get(xe->drm.dev) >= 0;
+}
+
+/**
+ * xe_pm_assert_unbounded_bridge - Disable PM on unbounded pcie parent bridge
+ * @xe: xe device instance
+ */
void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@@ -371,6 +586,13 @@ void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
}
}
+/**
+ * xe_pm_set_vram_threshold - Set a vram threshold for allowing/blocking D3Cold
+ * @xe: xe device instance
+ * @threshold: VRAM size in bites for the D3cold threshold
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
{
struct ttm_resource_manager *man;
@@ -395,6 +617,13 @@ int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
return 0;
}
+/**
+ * xe_pm_d3cold_allowed_toggle - Check conditions to toggle d3cold.allowed
+ * @xe: xe device instance
+ *
+ * To be called during runtime_pm idle callback.
+ * Check for all the D3Cold conditions ahead of runtime suspend.
+ */
void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
{
struct ttm_resource_manager *man;
diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h
index 64a97c6726a7..18b0613fe57b 100644
--- a/drivers/gpu/drm/xe/xe_pm.h
+++ b/drivers/gpu/drm/xe/xe_pm.h
@@ -20,14 +20,19 @@ struct xe_device;
int xe_pm_suspend(struct xe_device *xe);
int xe_pm_resume(struct xe_device *xe);
-void xe_pm_init_early(struct xe_device *xe);
-void xe_pm_init(struct xe_device *xe);
+int xe_pm_init_early(struct xe_device *xe);
+int xe_pm_init(struct xe_device *xe);
void xe_pm_runtime_fini(struct xe_device *xe);
+bool xe_pm_runtime_suspended(struct xe_device *xe);
int xe_pm_runtime_suspend(struct xe_device *xe);
int xe_pm_runtime_resume(struct xe_device *xe);
-int xe_pm_runtime_get(struct xe_device *xe);
-int xe_pm_runtime_put(struct xe_device *xe);
+void xe_pm_runtime_get(struct xe_device *xe);
+int xe_pm_runtime_get_ioctl(struct xe_device *xe);
+void xe_pm_runtime_put(struct xe_device *xe);
int xe_pm_runtime_get_if_active(struct xe_device *xe);
+bool xe_pm_runtime_get_if_in_use(struct xe_device *xe);
+void xe_pm_runtime_get_noresume(struct xe_device *xe);
+bool xe_pm_runtime_resume_and_get(struct xe_device *xe);
void xe_pm_assert_unbounded_bridge(struct xe_device *xe);
int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold);
void xe_pm_d3cold_allowed_toggle(struct xe_device *xe);
diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c
index 7bce2a332603..7d50c6e89d8e 100644
--- a/drivers/gpu/drm/xe/xe_preempt_fence.c
+++ b/drivers/gpu/drm/xe/xe_preempt_fence.c
@@ -49,7 +49,7 @@ static bool preempt_fence_enable_signaling(struct dma_fence *fence)
struct xe_exec_queue *q = pfence->q;
pfence->error = q->ops->suspend(q);
- queue_work(system_unbound_wq, &pfence->preempt_work);
+ queue_work(q->vm->xe->preempt_fence_wq, &pfence->preempt_work);
return true;
}
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 7f54bc3e389d..5b7930f46cf3 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -5,6 +5,7 @@
#include "xe_pt.h"
+#include "regs/xe_gtt_defs.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_drm_client.h"
@@ -108,11 +109,11 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
pt->level = level;
bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
ttm_bo_type_kernel,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
- XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT |
- XE_BO_CREATE_PINNED_BIT |
- XE_BO_CREATE_NO_RESV_EVICT |
- XE_BO_PAGETABLE);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE |
+ XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_NO_RESV_EVICT |
+ XE_BO_FLAG_PAGETABLE);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto err_kfree;
@@ -618,7 +619,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
int ret;
- if (vma && (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) &&
+ if ((vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) &&
(is_devmem || !IS_DGFX(xe)))
xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
@@ -1135,8 +1136,7 @@ static int invalidation_fence_init(struct xe_gt *gt,
spin_lock_irq(&gt->tlb_invalidation.lock);
dma_fence_init(&ifence->base.base, &invalidation_fence_ops,
&gt->tlb_invalidation.lock,
- gt->tlb_invalidation.fence_context,
- ++gt->tlb_invalidation.fence_seqno);
+ dma_fence_context_alloc(1), 1);
spin_unlock_irq(&gt->tlb_invalidation.lock);
INIT_LIST_HEAD(&ifence->base.link);
@@ -1236,6 +1236,13 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue
err = xe_pt_prepare_bind(tile, vma, entries, &num_entries);
if (err)
goto err;
+
+ err = dma_resv_reserve_fences(xe_vm_resv(vm), 1);
+ if (!err && !xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
+ err = dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, 1);
+ if (err)
+ goto err;
+
xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries));
xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries);
@@ -1254,11 +1261,13 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue
* non-faulting LR, in particular on user-space batch buffer chaining,
* it needs to be done here.
*/
- if ((rebind && !xe_vm_in_lr_mode(vm) && !vm->batch_invalidate_tlb) ||
- (!rebind && xe_vm_has_scratch(vm) && xe_vm_in_preempt_fence_mode(vm))) {
+ if ((!rebind && xe_vm_has_scratch(vm) && xe_vm_in_preempt_fence_mode(vm))) {
ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
if (!ifence)
return ERR_PTR(-ENOMEM);
+ } else if (rebind && !xe_vm_in_lr_mode(vm)) {
+ /* We bump also if batch_invalidate_tlb is true */
+ vm->tlb_flush_seqno++;
}
rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
@@ -1297,7 +1306,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue
}
/* add shared fence now for pagetable delayed destroy */
- dma_resv_add_fence(xe_vm_resv(vm), fence, !rebind &&
+ dma_resv_add_fence(xe_vm_resv(vm), fence, rebind ||
last_munmap_rebind ?
DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
@@ -1576,6 +1585,7 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queu
struct dma_fence *fence = NULL;
struct invalidation_fence *ifence;
struct xe_range_fence *rfence;
+ int err;
LLIST_HEAD(deferred);
@@ -1593,6 +1603,12 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queu
xe_pt_calc_rfence_interval(vma, &unbind_pt_update, entries,
num_entries);
+ err = dma_resv_reserve_fences(xe_vm_resv(vm), 1);
+ if (!err && !xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
+ err = dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, 1);
+ if (err)
+ return ERR_PTR(err);
+
ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
if (!ifence)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 92bb06c0586e..df407d73e5f5 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -12,6 +12,7 @@
#include <drm/xe_drm.h>
#include "regs/xe_engine_regs.h"
+#include "regs/xe_gt_regs.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_exec_queue.h"
@@ -132,7 +133,7 @@ query_engine_cycles(struct xe_device *xe,
return -EINVAL;
eci = &resp.eci;
- if (eci->gt_id > XE_MAX_GT_PER_TILE)
+ if (eci->gt_id >= XE_MAX_GT_PER_TILE)
return -EINVAL;
gt = xe_device_get_gt(xe, eci->gt_id);
@@ -147,8 +148,8 @@ query_engine_cycles(struct xe_device *xe,
if (!hwe)
return -EINVAL;
- xe_device_mem_access_get(xe);
- xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL))
+ return -EIO;
__read_timestamps(gt,
RING_TIMESTAMP(hwe->mmio_base),
@@ -159,7 +160,6 @@ query_engine_cycles(struct xe_device *xe,
cpu_clock);
xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- xe_device_mem_access_put(xe);
resp.width = 36;
/* Only write to the output fields of user query */
@@ -403,6 +403,13 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query
BIT(gt_to_tile(gt)->id) << 1;
gt_list->gt_list[id].far_mem_regions = xe->info.mem_region_mask ^
gt_list->gt_list[id].near_mem_regions;
+
+ gt_list->gt_list[id].ip_ver_major =
+ REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid);
+ gt_list->gt_list[id].ip_ver_minor =
+ REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid);
+ gt_list->gt_list[id].ip_ver_rev =
+ REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid);
}
if (copy_to_user(query_ptr, gt_list, size)) {
@@ -433,9 +440,7 @@ static int query_hwconfig(struct xe_device *xe,
if (!hwconfig)
return -ENOMEM;
- xe_device_mem_access_get(xe);
xe_guc_hwconfig_copy(&gt->uc.guc, hwconfig);
- xe_device_mem_access_put(xe);
if (copy_to_user(query_ptr, hwconfig, size)) {
kfree(hwconfig);
@@ -544,14 +549,44 @@ query_uc_fw_version(struct xe_device *xe, struct drm_xe_device_query *query)
version = &guc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY];
break;
}
+ case XE_QUERY_UC_TYPE_HUC: {
+ struct xe_gt *media_gt = NULL;
+ struct xe_huc *huc;
+
+ if (MEDIA_VER(xe) >= 13) {
+ struct xe_tile *tile;
+ u8 gt_id;
+
+ for_each_tile(tile, xe, gt_id) {
+ if (tile->media_gt) {
+ media_gt = tile->media_gt;
+ break;
+ }
+ }
+ } else {
+ media_gt = xe->tiles[0].primary_gt;
+ }
+
+ if (!media_gt)
+ break;
+
+ huc = &media_gt->uc.huc;
+ if (huc->fw.status == XE_UC_FIRMWARE_RUNNING)
+ version = &huc->fw.versions.found[XE_UC_FW_VER_RELEASE];
+ break;
+ }
default:
return -EINVAL;
}
- resp.branch_ver = 0;
- resp.major_ver = version->major;
- resp.minor_ver = version->minor;
- resp.patch_ver = version->patch;
+ if (version) {
+ resp.branch_ver = 0;
+ resp.major_ver = version->major;
+ resp.minor_ver = version->minor;
+ resp.patch_ver = version->patch;
+ } else {
+ return -ENODEV;
+ }
if (copy_to_user(query_ptr, &resp, size))
return -EFAULT;
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index c4edffcd4a32..d42b3f33bd7a 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -17,6 +17,7 @@
#include "xe_lrc.h"
#include "xe_macros.h"
#include "xe_sched_job.h"
+#include "xe_sriov.h"
#include "xe_vm_types.h"
#include "xe_vm.h"
#include "xe_wa.h"
@@ -219,10 +220,9 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc
{
u32 dw[MAX_JOB_SIZE_DW], i = 0;
u32 ppgtt_flag = get_ppgtt_flag(job);
- struct xe_vm *vm = job->q->vm;
struct xe_gt *gt = job->q->gt;
- if (vm && vm->batch_invalidate_tlb) {
+ if (job->ring_ops_flush_tlb) {
dw[i++] = preparser_disable(true);
i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
seqno, true, dw, i);
@@ -270,7 +270,6 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
struct xe_gt *gt = job->q->gt;
struct xe_device *xe = gt_to_xe(gt);
bool decode = job->q->class == XE_ENGINE_CLASS_VIDEO_DECODE;
- struct xe_vm *vm = job->q->vm;
dw[i++] = preparser_disable(true);
@@ -282,13 +281,13 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
i = emit_aux_table_inv(gt, VE0_AUX_INV, dw, i);
}
- if (vm && vm->batch_invalidate_tlb)
+ if (job->ring_ops_flush_tlb)
i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
seqno, true, dw, i);
dw[i++] = preparser_disable(false);
- if (!vm || !vm->batch_invalidate_tlb)
+ if (!job->ring_ops_flush_tlb)
i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
seqno, dw, i);
@@ -317,7 +316,6 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
struct xe_gt *gt = job->q->gt;
struct xe_device *xe = gt_to_xe(gt);
bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
- struct xe_vm *vm = job->q->vm;
u32 mask_flags = 0;
dw[i++] = preparser_disable(true);
@@ -327,7 +325,7 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
mask_flags = PIPE_CONTROL_3D_ENGINE_FLAGS;
/* See __xe_pt_bind_vma() for a discussion on TLB invalidations. */
- i = emit_pipe_invalidate(mask_flags, vm && vm->batch_invalidate_tlb, dw, i);
+ i = emit_pipe_invalidate(mask_flags, job->ring_ops_flush_tlb, dw, i);
/* hsdes: 1809175790 */
if (has_aux_ccs(xe))
@@ -370,10 +368,12 @@ static void emit_migration_job_gen12(struct xe_sched_job *job,
i = emit_bb_start(job->batch_addr[0], BIT(8), dw, i);
- /* XXX: Do we need this? Leaving for now. */
- dw[i++] = preparser_disable(true);
- i = emit_flush_invalidate(0, dw, i);
- dw[i++] = preparser_disable(false);
+ if (!IS_SRIOV_VF(gt_to_xe(job->q->gt))) {
+ /* XXX: Do we need this? Leaving for now. */
+ dw[i++] = preparser_disable(true);
+ i = emit_flush_invalidate(0, dw, i);
+ dw[i++] = preparser_disable(false);
+ }
i = emit_bb_start(job->batch_addr[1], BIT(8), dw, i);
diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c
index 2c4632259edd..8941522b7705 100644
--- a/drivers/gpu/drm/xe/xe_sa.c
+++ b/drivers/gpu/drm/xe/xe_sa.c
@@ -48,8 +48,9 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32
sa_manager->bo = NULL;
bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo)) {
drm_err(&xe->drm, "failed to allocate bo for sa manager: %ld\n",
PTR_ERR(bo));
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
index 8151ddafb940..cd8a2fba5438 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -5,6 +5,7 @@
#include "xe_sched_job.h"
+#include <drm/xe_drm.h>
#include <linux/dma-fence-array.h>
#include <linux/slab.h>
@@ -15,6 +16,8 @@
#include "xe_hw_fence.h"
#include "xe_lrc.h"
#include "xe_macros.h"
+#include "xe_pm.h"
+#include "xe_sync_types.h"
#include "xe_trace.h"
#include "xe_vm.h"
@@ -157,7 +160,7 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
/* All other jobs require a VM to be open which has a ref */
if (unlikely(q->flags & EXEC_QUEUE_FLAG_KERNEL))
- xe_device_mem_access_get(job_to_xe(job));
+ xe_pm_runtime_get_noresume(job_to_xe(job));
xe_device_assert_mem_access(job_to_xe(job));
trace_xe_sched_job_create(job);
@@ -190,7 +193,7 @@ void xe_sched_job_destroy(struct kref *ref)
container_of(ref, struct xe_sched_job, refcount);
if (unlikely(job->q->flags & EXEC_QUEUE_FLAG_KERNEL))
- xe_device_mem_access_put(job_to_xe(job));
+ xe_pm_runtime_put(job_to_xe(job));
xe_exec_queue_put(job->q);
dma_fence_put(job->fence);
drm_sched_job_cleanup(&job->drm);
@@ -250,6 +253,16 @@ bool xe_sched_job_completed(struct xe_sched_job *job)
void xe_sched_job_arm(struct xe_sched_job *job)
{
+ struct xe_exec_queue *q = job->q;
+ struct xe_vm *vm = q->vm;
+
+ if (vm && !xe_sched_job_is_migration(q) && !xe_vm_in_lr_mode(vm) &&
+ (vm->batch_invalidate_tlb || vm->tlb_flush_seqno != q->tlb_flush_seqno)) {
+ xe_vm_assert_held(vm);
+ q->tlb_flush_seqno = vm->tlb_flush_seqno;
+ job->ring_ops_flush_tlb = true;
+ }
+
drm_sched_job_arm(&job->drm);
}
@@ -278,6 +291,22 @@ int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm)
return drm_sched_job_add_dependency(&job->drm, fence);
}
+/**
+ * xe_sched_job_init_user_fence - Initialize user_fence for the job
+ * @job: job whose user_fence needs an init
+ * @sync: sync to be use to init user_fence
+ */
+void xe_sched_job_init_user_fence(struct xe_sched_job *job,
+ struct xe_sync_entry *sync)
+{
+ if (sync->type != DRM_XE_SYNC_TYPE_USER_FENCE)
+ return;
+
+ job->user_fence.used = true;
+ job->user_fence.addr = sync->addr;
+ job->user_fence.value = sync->timeline_value;
+}
+
struct xe_sched_job_snapshot *
xe_sched_job_snapshot_capture(struct xe_sched_job *job)
{
diff --git a/drivers/gpu/drm/xe/xe_sched_job.h b/drivers/gpu/drm/xe/xe_sched_job.h
index f1a660648cf0..c75018f4660d 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.h
+++ b/drivers/gpu/drm/xe/xe_sched_job.h
@@ -10,6 +10,7 @@
struct drm_printer;
struct xe_vm;
+struct xe_sync_entry;
#define XE_SCHED_HANG_LIMIT 1
#define XE_SCHED_JOB_TIMEOUT LONG_MAX
@@ -58,6 +59,8 @@ void xe_sched_job_arm(struct xe_sched_job *job);
void xe_sched_job_push(struct xe_sched_job *job);
int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm);
+void xe_sched_job_init_user_fence(struct xe_sched_job *job,
+ struct xe_sync_entry *sync);
static inline struct xe_sched_job *
to_xe_sched_job(struct drm_sched_job *drm)
diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h
index b1d83da50a53..5e12724219fd 100644
--- a/drivers/gpu/drm/xe/xe_sched_job_types.h
+++ b/drivers/gpu/drm/xe/xe_sched_job_types.h
@@ -39,6 +39,8 @@ struct xe_sched_job {
} user_fence;
/** @migrate_flush_flags: Additional flush flags for migration jobs */
u32 migrate_flush_flags;
+ /** @ring_ops_flush_tlb: The ring ops need to flush TLB before payload. */
+ bool ring_ops_flush_tlb;
/** @batch_addr: batch buffer address of job */
u64 batch_addr[];
};
diff --git a/drivers/gpu/drm/xe/xe_sriov.c b/drivers/gpu/drm/xe/xe_sriov.c
index f295d91886b1..1c3fa84b6adb 100644
--- a/drivers/gpu/drm/xe/xe_sriov.c
+++ b/drivers/gpu/drm/xe/xe_sriov.c
@@ -5,8 +5,13 @@
#include <drm/drm_managed.h>
+#include "regs/xe_sriov_regs.h"
+
#include "xe_assert.h"
+#include "xe_device.h"
+#include "xe_mmio.h"
#include "xe_sriov.h"
+#include "xe_sriov_pf.h"
/**
* xe_sriov_mode_to_string - Convert enum value to string.
@@ -28,10 +33,16 @@ const char *xe_sriov_mode_to_string(enum xe_sriov_mode mode)
}
}
+static bool test_is_vf(struct xe_device *xe)
+{
+ u32 value = xe_mmio_read32(xe_root_mmio_gt(xe), VF_CAP_REG);
+
+ return value & VF_CAP;
+}
+
/**
* xe_sriov_probe_early - Probe a SR-IOV mode.
* @xe: the &xe_device to probe mode on
- * @has_sriov: flag indicating hardware support for SR-IOV
*
* This function should be called only once and as soon as possible during
* driver probe to detect whether we are running a SR-IOV Physical Function
@@ -40,12 +51,17 @@ const char *xe_sriov_mode_to_string(enum xe_sriov_mode mode)
* SR-IOV PF mode detection is based on PCI @dev_is_pf() function.
* SR-IOV VF mode detection is based on dedicated MMIO register read.
*/
-void xe_sriov_probe_early(struct xe_device *xe, bool has_sriov)
+void xe_sriov_probe_early(struct xe_device *xe)
{
enum xe_sriov_mode mode = XE_SRIOV_MODE_NONE;
+ bool has_sriov = xe->info.has_sriov;
- /* TODO: replace with proper mode detection */
- xe_assert(xe, !has_sriov);
+ if (has_sriov) {
+ if (test_is_vf(xe))
+ mode = XE_SRIOV_MODE_VF;
+ else if (xe_sriov_pf_readiness(xe))
+ mode = XE_SRIOV_MODE_PF;
+ }
xe_assert(xe, !xe->sriov.__mode);
xe->sriov.__mode = mode;
@@ -78,6 +94,13 @@ int xe_sriov_init(struct xe_device *xe)
if (!IS_SRIOV(xe))
return 0;
+ if (IS_SRIOV_PF(xe)) {
+ int err = xe_sriov_pf_init_early(xe);
+
+ if (err)
+ return err;
+ }
+
xe_assert(xe, !xe->sriov.wq);
xe->sriov.wq = alloc_workqueue("xe-sriov-wq", 0, 0);
if (!xe->sriov.wq)
@@ -85,3 +108,34 @@ int xe_sriov_init(struct xe_device *xe)
return drmm_add_action_or_reset(&xe->drm, fini_sriov, xe);
}
+
+/**
+ * xe_sriov_print_info - Print basic SR-IOV information.
+ * @xe: the &xe_device to print info from
+ * @p: the &drm_printer
+ *
+ * Print SR-IOV related information into provided DRM printer.
+ */
+void xe_sriov_print_info(struct xe_device *xe, struct drm_printer *p)
+{
+ drm_printf(p, "supported: %s\n", str_yes_no(xe_device_has_sriov(xe)));
+ drm_printf(p, "enabled: %s\n", str_yes_no(IS_SRIOV(xe)));
+ drm_printf(p, "mode: %s\n", xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
+}
+
+/**
+ * xe_sriov_function_name() - Get SR-IOV Function name.
+ * @n: the Function number (identifier) to get name of
+ * @buf: the buffer to format to
+ * @size: size of the buffer (shall be at least 5 bytes)
+ *
+ * Return: formatted function name ("PF" or "VF%u").
+ */
+const char *xe_sriov_function_name(unsigned int n, char *buf, size_t size)
+{
+ if (n)
+ snprintf(buf, size, "VF%u", n);
+ else
+ strscpy(buf, "PF", size);
+ return buf;
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov.h b/drivers/gpu/drm/xe/xe_sriov.h
index 1545552162c9..486bb21c3256 100644
--- a/drivers/gpu/drm/xe/xe_sriov.h
+++ b/drivers/gpu/drm/xe/xe_sriov.h
@@ -10,9 +10,13 @@
#include "xe_device_types.h"
#include "xe_sriov_types.h"
+struct drm_printer;
+
const char *xe_sriov_mode_to_string(enum xe_sriov_mode mode);
+const char *xe_sriov_function_name(unsigned int n, char *buf, size_t len);
-void xe_sriov_probe_early(struct xe_device *xe, bool has_sriov);
+void xe_sriov_probe_early(struct xe_device *xe);
+void xe_sriov_print_info(struct xe_device *xe, struct drm_printer *p);
int xe_sriov_init(struct xe_device *xe);
static inline enum xe_sriov_mode xe_device_sriov_mode(struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.c b/drivers/gpu/drm/xe/xe_sriov_pf.c
new file mode 100644
index 000000000000..0f721ae17b26
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "xe_assert.h"
+#include "xe_device.h"
+#include "xe_module.h"
+#include "xe_sriov.h"
+#include "xe_sriov_pf.h"
+#include "xe_sriov_printk.h"
+
+static unsigned int wanted_max_vfs(struct xe_device *xe)
+{
+ return xe_modparam.max_vfs;
+}
+
+static int pf_reduce_totalvfs(struct xe_device *xe, int limit)
+{
+ struct device *dev = xe->drm.dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int err;
+
+ err = pci_sriov_set_totalvfs(pdev, limit);
+ if (err)
+ xe_sriov_notice(xe, "Failed to set number of VFs to %d (%pe)\n",
+ limit, ERR_PTR(err));
+ return err;
+}
+
+static bool pf_continue_as_native(struct xe_device *xe, const char *why)
+{
+ xe_sriov_dbg(xe, "%s, continuing as native\n", why);
+ pf_reduce_totalvfs(xe, 0);
+ return false;
+}
+
+/**
+ * xe_sriov_pf_readiness - Check if PF functionality can be enabled.
+ * @xe: the &xe_device to check
+ *
+ * This function is called as part of the SR-IOV probe to validate if all
+ * PF prerequisites are satisfied and we can continue with enabling PF mode.
+ *
+ * Return: true if the PF mode can be turned on.
+ */
+bool xe_sriov_pf_readiness(struct xe_device *xe)
+{
+ struct device *dev = xe->drm.dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int totalvfs = pci_sriov_get_totalvfs(pdev);
+ int newlimit = min_t(u16, wanted_max_vfs(xe), totalvfs);
+
+ xe_assert(xe, totalvfs <= U16_MAX);
+
+ if (!dev_is_pf(dev))
+ return false;
+
+ if (!xe_device_uc_enabled(xe))
+ return pf_continue_as_native(xe, "Guc submission disabled");
+
+ if (!newlimit)
+ return pf_continue_as_native(xe, "all VFs disabled");
+
+ pf_reduce_totalvfs(xe, newlimit);
+
+ xe->sriov.pf.device_total_vfs = totalvfs;
+ xe->sriov.pf.driver_max_vfs = newlimit;
+
+ return true;
+}
+
+/**
+ * xe_sriov_pf_init_early - Initialize SR-IOV PF specific data.
+ * @xe: the &xe_device to initialize
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_init_early(struct xe_device *xe)
+{
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ return drmm_mutex_init(&xe->drm, &xe->sriov.pf.master_lock);
+}
+
+/**
+ * xe_sriov_pf_print_vfs_summary - Print SR-IOV PF information.
+ * @xe: the &xe_device to print info from
+ * @p: the &drm_printer
+ *
+ * Print SR-IOV PF related information into provided DRM printer.
+ */
+void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ drm_printf(p, "total: %u\n", xe->sriov.pf.device_total_vfs);
+ drm_printf(p, "supported: %u\n", xe->sriov.pf.driver_max_vfs);
+ drm_printf(p, "enabled: %u\n", pci_num_vf(pdev));
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.h b/drivers/gpu/drm/xe/xe_sriov_pf.h
new file mode 100644
index 000000000000..d1220e70e1c0
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_H_
+#define _XE_SRIOV_PF_H_
+
+#include <linux/types.h>
+
+struct drm_printer;
+struct xe_device;
+
+#ifdef CONFIG_PCI_IOV
+bool xe_sriov_pf_readiness(struct xe_device *xe);
+int xe_sriov_pf_init_early(struct xe_device *xe);
+void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p);
+#else
+static inline bool xe_sriov_pf_readiness(struct xe_device *xe)
+{
+ return false;
+}
+
+static inline int xe_sriov_pf_init_early(struct xe_device *xe)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h b/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h
new file mode 100644
index 000000000000..7d156ba82479
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_HELPERS_H_
+#define _XE_SRIOV_PF_HELPERS_H_
+
+#include "xe_assert.h"
+#include "xe_device_types.h"
+#include "xe_sriov.h"
+#include "xe_sriov_types.h"
+
+/**
+ * xe_sriov_pf_assert_vfid() - warn if &id is not a supported VF number when debugging.
+ * @xe: the PF &xe_device to assert on
+ * @vfid: the VF number to assert
+ *
+ * Assert that &xe represents the Physical Function (PF) device and provided &vfid
+ * is within a range of supported VF numbers (up to maximum number of VFs that
+ * driver can support, including VF0 that represents the PF itself).
+ *
+ * Note: Effective only on debug builds. See `Xe ASSERTs`_ for more information.
+ */
+#define xe_sriov_pf_assert_vfid(xe, vfid) \
+ xe_assert((xe), (vfid) <= xe_sriov_pf_get_totalvfs(xe))
+
+/**
+ * xe_sriov_pf_get_totalvfs() - Get maximum number of VFs that driver can support.
+ * @xe: the &xe_device to query (shall be PF)
+ *
+ * Return: Maximum number of VFs that this PF driver supports.
+ */
+static inline int xe_sriov_pf_get_totalvfs(struct xe_device *xe)
+{
+ xe_assert(xe, IS_SRIOV_PF(xe));
+ return xe->sriov.pf.driver_max_vfs;
+}
+
+static inline struct mutex *xe_sriov_pf_master_mutex(struct xe_device *xe)
+{
+ xe_assert(xe, IS_SRIOV_PF(xe));
+ return &xe->sriov.pf.master_lock;
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_types.h b/drivers/gpu/drm/xe/xe_sriov_types.h
index 1a138108d139..c7b7ad4af5c8 100644
--- a/drivers/gpu/drm/xe/xe_sriov_types.h
+++ b/drivers/gpu/drm/xe/xe_sriov_types.h
@@ -7,6 +7,8 @@
#define _XE_SRIOV_TYPES_H_
#include <linux/build_bug.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
/**
* VFID - Virtual Function Identifier
@@ -37,4 +39,21 @@ enum xe_sriov_mode {
};
static_assert(XE_SRIOV_MODE_NONE);
+/**
+ * struct xe_device_pf - Xe PF related data
+ *
+ * The data in this structure is valid only if driver is running in the
+ * @XE_SRIOV_MODE_PF mode.
+ */
+struct xe_device_pf {
+ /** @device_total_vfs: Maximum number of VFs supported by the device. */
+ u16 device_total_vfs;
+
+ /** @driver_max_vfs: Maximum number of VFs supported by the driver. */
+ u16 driver_max_vfs;
+
+ /** @master_lock: protects all VFs configurations across GTs */
+ struct mutex master_lock;
+};
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index 02c9577fe418..65f1f1628235 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -224,8 +224,7 @@ int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job)
return 0;
}
-void xe_sync_entry_signal(struct xe_sync_entry *sync, struct xe_sched_job *job,
- struct dma_fence *fence)
+void xe_sync_entry_signal(struct xe_sync_entry *sync, struct dma_fence *fence)
{
if (!(sync->flags & DRM_XE_SYNC_FLAG_SIGNAL))
return;
@@ -254,10 +253,6 @@ void xe_sync_entry_signal(struct xe_sync_entry *sync, struct xe_sched_job *job,
user_fence_put(sync->ufence);
dma_fence_put(fence);
}
- } else if (sync->type == DRM_XE_SYNC_TYPE_USER_FENCE) {
- job->user_fence.used = true;
- job->user_fence.addr = sync->addr;
- job->user_fence.value = sync->timeline_value;
}
}
diff --git a/drivers/gpu/drm/xe/xe_sync.h b/drivers/gpu/drm/xe/xe_sync.h
index 0fd0d51208e6..3e03396af2c6 100644
--- a/drivers/gpu/drm/xe/xe_sync.h
+++ b/drivers/gpu/drm/xe/xe_sync.h
@@ -26,7 +26,6 @@ int xe_sync_entry_wait(struct xe_sync_entry *sync);
int xe_sync_entry_add_deps(struct xe_sync_entry *sync,
struct xe_sched_job *job);
void xe_sync_entry_signal(struct xe_sync_entry *sync,
- struct xe_sched_job *job,
struct dma_fence *fence);
void xe_sync_entry_cleanup(struct xe_sync_entry *sync);
struct dma_fence *
diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c
index 0650b2fa75ef..15ea0a942f67 100644
--- a/drivers/gpu/drm/xe/xe_tile.c
+++ b/drivers/gpu/drm/xe/xe_tile.c
@@ -160,24 +160,19 @@ int xe_tile_init_noalloc(struct xe_tile *tile)
{
int err;
- xe_device_mem_access_get(tile_to_xe(tile));
-
err = tile_ttm_mgr_init(tile);
if (err)
- goto err_mem_access;
+ return err;
tile->mem.kernel_bb_pool = xe_sa_bo_manager_init(tile, SZ_1M, 16);
- if (IS_ERR(tile->mem.kernel_bb_pool)) {
- err = PTR_ERR(tile->mem.kernel_bb_pool);
- goto err_mem_access;
- }
+ if (IS_ERR(tile->mem.kernel_bb_pool))
+ return PTR_ERR(tile->mem.kernel_bb_pool);
+
xe_wa_apply_tile_workarounds(tile);
- xe_tile_sysfs_init(tile);
+ err = xe_tile_sysfs_init(tile);
-err_mem_access:
- xe_device_mem_access_put(tile_to_xe(tile));
- return err;
+ return 0;
}
void xe_tile_migrate_wait(struct xe_tile *tile)
diff --git a/drivers/gpu/drm/xe/xe_tile_sysfs.c b/drivers/gpu/drm/xe/xe_tile_sysfs.c
index 0662968d7bcb..64661403afcd 100644
--- a/drivers/gpu/drm/xe/xe_tile_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_tile_sysfs.c
@@ -7,6 +7,7 @@
#include <linux/sysfs.h>
#include <drm/drm_managed.h>
+#include "xe_pm.h"
#include "xe_tile.h"
#include "xe_tile_sysfs.h"
#include "xe_vram_freq.h"
@@ -28,7 +29,7 @@ static void tile_sysfs_fini(struct drm_device *drm, void *arg)
kobject_put(tile->sysfs);
}
-void xe_tile_sysfs_init(struct xe_tile *tile)
+int xe_tile_sysfs_init(struct xe_tile *tile)
{
struct xe_device *xe = tile_to_xe(tile);
struct device *dev = xe->drm.dev;
@@ -37,7 +38,7 @@ void xe_tile_sysfs_init(struct xe_tile *tile)
kt = kzalloc(sizeof(*kt), GFP_KERNEL);
if (!kt)
- return;
+ return -ENOMEM;
kobject_init(&kt->base, &xe_tile_sysfs_kobj_type);
kt->tile = tile;
@@ -45,16 +46,14 @@ void xe_tile_sysfs_init(struct xe_tile *tile)
err = kobject_add(&kt->base, &dev->kobj, "tile%d", tile->id);
if (err) {
kobject_put(&kt->base);
- drm_warn(&xe->drm, "failed to register TILE sysfs directory, err: %d\n", err);
- return;
+ return err;
}
tile->sysfs = &kt->base;
- xe_vram_freq_sysfs_init(tile);
-
- err = drmm_add_action_or_reset(&xe->drm, tile_sysfs_fini, tile);
+ err = xe_vram_freq_sysfs_init(tile);
if (err)
- drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
- __func__, err);
+ return err;
+
+ return drmm_add_action_or_reset(&xe->drm, tile_sysfs_fini, tile);
}
diff --git a/drivers/gpu/drm/xe/xe_tile_sysfs.h b/drivers/gpu/drm/xe/xe_tile_sysfs.h
index e4f065039eba..54a2ba8ba533 100644
--- a/drivers/gpu/drm/xe/xe_tile_sysfs.h
+++ b/drivers/gpu/drm/xe/xe_tile_sysfs.h
@@ -8,7 +8,7 @@
#include "xe_tile_sysfs_types.h"
-void xe_tile_sysfs_init(struct xe_tile *tile);
+int xe_tile_sysfs_init(struct xe_tile *tile);
static inline struct xe_tile *
kobj_to_tile(struct kobject *kobj)
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index 846f14507d5f..2d56cfc09e42 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -258,7 +258,7 @@ DECLARE_EVENT_CLASS(xe_sched_job,
__field(u32, guc_state)
__field(u32, flags)
__field(int, error)
- __field(u64, fence)
+ __field(struct dma_fence *, fence)
__field(u64, batch_addr)
),
@@ -269,11 +269,11 @@ DECLARE_EVENT_CLASS(xe_sched_job,
atomic_read(&job->q->guc->state);
__entry->flags = job->q->flags;
__entry->error = job->fence->error;
- __entry->fence = (unsigned long)job->fence;
+ __entry->fence = job->fence;
__entry->batch_addr = (u64)job->batch_addr[0];
),
- TP_printk("fence=0x%016llx, seqno=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
+ TP_printk("fence=%p, seqno=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
__entry->fence, __entry->seqno, __entry->guc_id,
__entry->batch_addr, __entry->guc_state,
__entry->flags, __entry->error)
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
index 3107d2a12426..f77367329760 100644
--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
@@ -204,9 +204,14 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
{
struct xe_ttm_stolen_mgr *mgr = drmm_kzalloc(&xe->drm, sizeof(*mgr), GFP_KERNEL);
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
- u64 stolen_size, io_size, pgsize;
+ u64 stolen_size, io_size;
int err;
+ if (!mgr) {
+ drm_dbg_kms(&xe->drm, "Stolen mgr init failed\n");
+ return;
+ }
+
if (IS_SRIOV_VF(xe))
stolen_size = 0;
else if (IS_DGFX(xe))
@@ -221,10 +226,6 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
return;
}
- pgsize = xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
- if (pgsize < PAGE_SIZE)
- pgsize = PAGE_SIZE;
-
/*
* We don't try to attempt partial visible support for stolen vram,
* since stolen is always at the end of vram, and the BAR size is pretty
@@ -235,7 +236,7 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
io_size = stolen_size;
err = __xe_ttm_vram_mgr_init(xe, &mgr->base, XE_PL_STOLEN, stolen_size,
- io_size, pgsize);
+ io_size, PAGE_SIZE);
if (err) {
drm_dbg_kms(&xe->drm, "Stolen mgr init failed: %i\n", err);
return;
@@ -298,7 +299,7 @@ static int __xe_ttm_stolen_io_mem_reserve_stolen(struct xe_device *xe,
XE_WARN_ON(IS_DGFX(xe));
/* XXX: Require BO to be mapped to GGTT? */
- if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_CREATE_GGTT_BIT)))
+ if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_FLAG_GGTT)))
return -EIO;
/* GGTT is always contiguously mapped */
diff --git a/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c b/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
index 3e1fa0c832ca..9844a8edbfe1 100644
--- a/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
@@ -73,7 +73,10 @@ static void xe_ttm_sys_mgr_del(struct ttm_resource_manager *man,
static void xe_ttm_sys_mgr_debug(struct ttm_resource_manager *man,
struct drm_printer *printer)
{
-
+ /*
+ * This function is called by debugfs entry and would require
+ * pm_runtime_{get,put} wrappers around any operation.
+ */
}
static const struct ttm_resource_manager_func xe_ttm_sys_mgr_func = {
diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
index 115ec745e502..fe3779fdba2c 100644
--- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
@@ -91,7 +91,7 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
min_page_size = mgr->default_page_size;
if (tbo->page_alignment)
- min_page_size = tbo->page_alignment << PAGE_SHIFT;
+ min_page_size = (u64)tbo->page_alignment << PAGE_SHIFT;
if (WARN_ON(min_page_size < mm->chunk_size)) {
err = -EINVAL;
@@ -196,7 +196,7 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
return 0;
error_free_blocks:
- drm_buddy_free_list(mm, &vres->blocks);
+ drm_buddy_free_list(mm, &vres->blocks, 0);
mutex_unlock(&mgr->lock);
error_fini:
ttm_resource_fini(man, &vres->base);
@@ -214,7 +214,7 @@ static void xe_ttm_vram_mgr_del(struct ttm_resource_manager *man,
struct drm_buddy *mm = &mgr->mm;
mutex_lock(&mgr->lock);
- drm_buddy_free_list(mm, &vres->blocks);
+ drm_buddy_free_list(mm, &vres->blocks, 0);
mgr->visible_avail += vres->used_visible_size;
mutex_unlock(&mgr->lock);
@@ -478,3 +478,15 @@ void xe_ttm_vram_get_used(struct ttm_resource_manager *man,
*used_visible = mgr->visible_size - mgr->visible_avail;
mutex_unlock(&mgr->lock);
}
+
+u64 xe_ttm_vram_get_avail(struct ttm_resource_manager *man)
+{
+ struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
+ u64 avail;
+
+ mutex_lock(&mgr->lock);
+ avail = mgr->mm.avail;
+ mutex_unlock(&mgr->lock);
+
+ return avail;
+}
diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.h b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.h
index d184e19a9230..cc76050e376d 100644
--- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.h
+++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.h
@@ -25,6 +25,7 @@ int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe,
void xe_ttm_vram_mgr_free_sgt(struct device *dev, enum dma_data_direction dir,
struct sg_table *sgt);
+u64 xe_ttm_vram_get_avail(struct ttm_resource_manager *man);
u64 xe_ttm_vram_get_cpu_visible_size(struct ttm_resource_manager *man);
void xe_ttm_vram_get_used(struct ttm_resource_manager *man,
u64 *used, u64 *used_visible);
diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c
index 5c83c75bc497..d4e6fa918942 100644
--- a/drivers/gpu/drm/xe/xe_tuning.c
+++ b/drivers/gpu/drm/xe/xe_tuning.c
@@ -28,7 +28,7 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
/* Xe2 */
{ XE_RTP_NAME("Tuning: L3 cache"),
- XE_RTP_RULES(GRAPHICS_VERSION(2004)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
XE_RTP_ACTIONS(FIELD_SET(XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)))
},
@@ -38,11 +38,11 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)))
},
{ XE_RTP_NAME("Tuning: Compression Overfetch"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2004, XE_RTP_END_VERSION_UNDEFINED)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
XE_RTP_ACTIONS(CLR(CCCHKNREG1, ENCOMPPERFFIX)),
},
{ XE_RTP_NAME("Tuning: Enable compressible partial write overfetch in L3"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2004, XE_RTP_END_VERSION_UNDEFINED)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
XE_RTP_ACTIONS(SET(L3SQCREG3, COMPPWOVERFETCHEN))
},
{}
@@ -50,7 +50,7 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
static const struct xe_rtp_entry_sr engine_tunings[] = {
{ XE_RTP_NAME("Tuning: Set Indirect State Override"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1271),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1274),
ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(SAMPLER_MODE, INDIRECT_STATE_BASE_ADDR_OVERRIDE))
},
@@ -88,7 +88,7 @@ static const struct xe_rtp_entry_sr lrc_tunings[] = {
/* Xe_LPG */
{ XE_RTP_NAME("Tuning: L3 cache"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1271), ENGINE_CLASS(RENDER)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1274), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(FIELD_SET(XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)))
},
diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c
index 7033f8c1b431..4feb35c95a1c 100644
--- a/drivers/gpu/drm/xe/xe_uc.c
+++ b/drivers/gpu/drm/xe/xe_uc.c
@@ -32,11 +32,8 @@ uc_to_xe(struct xe_uc *uc)
/* Should be called once at driver load only */
int xe_uc_init(struct xe_uc *uc)
{
- struct xe_device *xe = uc_to_xe(uc);
int ret;
- xe_device_mem_access_get(xe);
-
/*
* We call the GuC/HuC/GSC init functions even if GuC submission is off
* to correctly move our tracking of the FW state to "disabled".
@@ -65,16 +62,8 @@ int xe_uc_init(struct xe_uc *uc)
goto err;
ret = xe_guc_db_mgr_init(&uc->guc.dbm, ~0);
- if (ret)
- goto err;
-
- xe_device_mem_access_put(xe);
-
- return 0;
err:
- xe_device_mem_access_put(xe);
-
return ret;
}
diff --git a/drivers/gpu/drm/xe/xe_uc_debugfs.c b/drivers/gpu/drm/xe/xe_uc_debugfs.c
index 0a39ec5a6e99..78eb8db73791 100644
--- a/drivers/gpu/drm/xe/xe_uc_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_uc_debugfs.c
@@ -3,6 +3,8 @@
* Copyright © 2022 Intel Corporation
*/
+#include <linux/debugfs.h>
+
#include <drm/drm_debugfs.h>
#include "xe_gt.h"
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c
index a9d25b3fa67c..186f81640cef 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.c
+++ b/drivers/gpu/drm/xe/xe_uc_fw.c
@@ -17,6 +17,7 @@
#include "xe_map.h"
#include "xe_mmio.h"
#include "xe_module.h"
+#include "xe_sriov.h"
#include "xe_uc_fw.h"
/*
@@ -296,36 +297,28 @@ static void uc_fw_fini(struct drm_device *drm, void *arg)
xe_uc_fw_change_status(uc_fw, XE_UC_FIRMWARE_SELECTED);
}
-static void guc_read_css_info(struct xe_uc_fw *uc_fw, struct uc_css_header *css)
+static int guc_read_css_info(struct xe_uc_fw *uc_fw, struct uc_css_header *css)
{
struct xe_gt *gt = uc_fw_to_gt(uc_fw);
struct xe_uc_fw_version *release = &uc_fw->versions.found[XE_UC_FW_VER_RELEASE];
struct xe_uc_fw_version *compatibility = &uc_fw->versions.found[XE_UC_FW_VER_COMPATIBILITY];
xe_gt_assert(gt, uc_fw->type == XE_UC_FW_TYPE_GUC);
- xe_gt_assert(gt, release->major >= 70);
-
- if (release->major > 70 || release->minor >= 6) {
- /* v70.6.0 adds CSS header support */
- compatibility->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
- css->submission_version);
- compatibility->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
- css->submission_version);
- compatibility->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH,
- css->submission_version);
- } else if (release->minor >= 3) {
- /* v70.3.0 introduced v1.1.0 */
- compatibility->major = 1;
- compatibility->minor = 1;
- compatibility->patch = 0;
- } else {
- /* v70.0.0 introduced v1.0.0 */
- compatibility->major = 1;
- compatibility->minor = 0;
- compatibility->patch = 0;
+
+ /* We don't support GuC releases older than 70.19 */
+ if (release->major < 70 || (release->major == 70 && release->minor < 19)) {
+ xe_gt_err(gt, "Unsupported GuC v%u.%u! v70.19 or newer is required\n",
+ release->major, release->minor);
+ return -EINVAL;
}
+ compatibility->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, css->submission_version);
+ compatibility->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR, css->submission_version);
+ compatibility->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, css->submission_version);
+
uc_fw->private_data_size = css->private_data_size;
+
+ return 0;
}
int xe_uc_fw_check_version_requirements(struct xe_uc_fw *uc_fw)
@@ -424,7 +417,7 @@ static int parse_css_header(struct xe_uc_fw *uc_fw, const void *fw_data, size_t
release->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, css->sw_version);
if (uc_fw->type == XE_UC_FW_TYPE_GUC)
- guc_read_css_info(uc_fw, css);
+ return guc_read_css_info(uc_fw, css);
return 0;
}
@@ -658,7 +651,17 @@ static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmwar
xe_assert(xe, !uc_fw->path);
uc_fw_auto_select(xe, uc_fw);
+
+ if (IS_SRIOV_VF(xe)) {
+ /* VF will support only firmwares that driver can autoselect */
+ xe_uc_fw_change_status(uc_fw, uc_fw->path ?
+ XE_UC_FIRMWARE_PRELOADED :
+ XE_UC_FIRMWARE_NOT_SUPPORTED);
+ return 0;
+ }
+
uc_fw_override(uc_fw);
+
xe_uc_fw_change_status(uc_fw, uc_fw->path ?
XE_UC_FIRMWARE_SELECTED :
XE_UC_FIRMWARE_NOT_SUPPORTED);
@@ -771,7 +774,8 @@ int xe_uc_fw_init(struct xe_uc_fw *uc_fw)
return 0;
err = uc_fw_copy(uc_fw, fw->data, fw->size,
- XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
uc_fw_release(fw);
@@ -787,7 +791,8 @@ static int uc_fw_xfer(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags)
{
struct xe_device *xe = uc_fw_to_xe(uc_fw);
struct xe_gt *gt = uc_fw_to_gt(uc_fw);
- u32 src_offset, dma_ctrl;
+ u64 src_offset;
+ u32 dma_ctrl;
int ret;
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.h b/drivers/gpu/drm/xe/xe_uc_fw.h
index 85c20795d1f8..35078038797e 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.h
+++ b/drivers/gpu/drm/xe/xe_uc_fw.h
@@ -59,6 +59,8 @@ const char *xe_uc_fw_status_repr(enum xe_uc_fw_status status)
return "TRANSFERRED";
case XE_UC_FIRMWARE_RUNNING:
return "RUNNING";
+ case XE_UC_FIRMWARE_PRELOADED:
+ return "PRELOADED";
}
return "<invalid>";
}
@@ -85,6 +87,7 @@ static inline int xe_uc_fw_status_to_error(enum xe_uc_fw_status status)
case XE_UC_FIRMWARE_LOADABLE:
case XE_UC_FIRMWARE_TRANSFERRED:
case XE_UC_FIRMWARE_RUNNING:
+ case XE_UC_FIRMWARE_PRELOADED:
return 0;
}
return -EINVAL;
@@ -134,7 +137,8 @@ static inline bool xe_uc_fw_is_available(struct xe_uc_fw *uc_fw)
static inline bool xe_uc_fw_is_loadable(struct xe_uc_fw *uc_fw)
{
- return __xe_uc_fw_status(uc_fw) >= XE_UC_FIRMWARE_LOADABLE;
+ return __xe_uc_fw_status(uc_fw) >= XE_UC_FIRMWARE_LOADABLE &&
+ __xe_uc_fw_status(uc_fw) != XE_UC_FIRMWARE_PRELOADED;
}
static inline bool xe_uc_fw_is_loaded(struct xe_uc_fw *uc_fw)
@@ -144,7 +148,7 @@ static inline bool xe_uc_fw_is_loaded(struct xe_uc_fw *uc_fw)
static inline bool xe_uc_fw_is_running(struct xe_uc_fw *uc_fw)
{
- return __xe_uc_fw_status(uc_fw) == XE_UC_FIRMWARE_RUNNING;
+ return __xe_uc_fw_status(uc_fw) >= XE_UC_FIRMWARE_RUNNING;
}
static inline bool xe_uc_fw_is_overridden(const struct xe_uc_fw *uc_fw)
diff --git a/drivers/gpu/drm/xe/xe_uc_fw_types.h b/drivers/gpu/drm/xe/xe_uc_fw_types.h
index bc800b696866..0d8caa0e7354 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw_types.h
+++ b/drivers/gpu/drm/xe/xe_uc_fw_types.h
@@ -50,7 +50,8 @@ enum xe_uc_fw_status {
XE_UC_FIRMWARE_LOADABLE, /* all fw-required objects are ready */
XE_UC_FIRMWARE_LOAD_FAIL, /* failed to xfer or init/auth the fw */
XE_UC_FIRMWARE_TRANSFERRED, /* dma xfer done */
- XE_UC_FIRMWARE_RUNNING /* init/auth done */
+ XE_UC_FIRMWARE_RUNNING, /* init/auth done */
+ XE_UC_FIRMWARE_PRELOADED, /* preloaded by the PF driver */
};
enum xe_uc_fw_type {
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index f88faef4142b..4aa3943e6f29 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -21,12 +21,12 @@
#include <generated/xe_wa_oob.h>
+#include "regs/xe_gtt_defs.h"
#include "xe_assert.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_drm_client.h"
#include "xe_exec_queue.h"
-#include "xe_gt.h"
#include "xe_gt_pagefault.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
@@ -38,6 +38,7 @@
#include "xe_sync.h"
#include "xe_trace.h"
#include "xe_wa.h"
+#include "xe_hmm.h"
static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
{
@@ -65,113 +66,14 @@ int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
{
- struct xe_userptr *userptr = &uvma->userptr;
struct xe_vma *vma = &uvma->vma;
struct xe_vm *vm = xe_vma_vm(vma);
struct xe_device *xe = vm->xe;
- const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
- struct page **pages;
- bool in_kthread = !current->mm;
- unsigned long notifier_seq;
- int pinned, ret, i;
- bool read_only = xe_vma_read_only(vma);
lockdep_assert_held(&vm->lock);
xe_assert(xe, xe_vma_is_userptr(vma));
-retry:
- if (vma->gpuva.flags & XE_VMA_DESTROYED)
- return 0;
-
- notifier_seq = mmu_interval_read_begin(&userptr->notifier);
- if (notifier_seq == userptr->notifier_seq)
- return 0;
-
- pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
-
- if (userptr->sg) {
- dma_unmap_sgtable(xe->drm.dev,
- userptr->sg,
- read_only ? DMA_TO_DEVICE :
- DMA_BIDIRECTIONAL, 0);
- sg_free_table(userptr->sg);
- userptr->sg = NULL;
- }
-
- pinned = ret = 0;
- if (in_kthread) {
- if (!mmget_not_zero(userptr->notifier.mm)) {
- ret = -EFAULT;
- goto mm_closed;
- }
- kthread_use_mm(userptr->notifier.mm);
- }
-
- while (pinned < num_pages) {
- ret = get_user_pages_fast(xe_vma_userptr(vma) +
- pinned * PAGE_SIZE,
- num_pages - pinned,
- read_only ? 0 : FOLL_WRITE,
- &pages[pinned]);
- if (ret < 0)
- break;
-
- pinned += ret;
- ret = 0;
- }
-
- if (in_kthread) {
- kthread_unuse_mm(userptr->notifier.mm);
- mmput(userptr->notifier.mm);
- }
-mm_closed:
- if (ret)
- goto out;
-
- ret = sg_alloc_table_from_pages_segment(&userptr->sgt, pages,
- pinned, 0,
- (u64)pinned << PAGE_SHIFT,
- xe_sg_segment_size(xe->drm.dev),
- GFP_KERNEL);
- if (ret) {
- userptr->sg = NULL;
- goto out;
- }
- userptr->sg = &userptr->sgt;
-
- ret = dma_map_sgtable(xe->drm.dev, userptr->sg,
- read_only ? DMA_TO_DEVICE :
- DMA_BIDIRECTIONAL,
- DMA_ATTR_SKIP_CPU_SYNC |
- DMA_ATTR_NO_KERNEL_MAPPING);
- if (ret) {
- sg_free_table(userptr->sg);
- userptr->sg = NULL;
- goto out;
- }
-
- for (i = 0; i < pinned; ++i) {
- if (!read_only) {
- lock_page(pages[i]);
- set_page_dirty(pages[i]);
- unlock_page(pages[i]);
- }
-
- mark_page_accessed(pages[i]);
- }
-out:
- release_pages(pages, pinned);
- kvfree(pages);
-
- if (!(ret < 0)) {
- userptr->notifier_seq = notifier_seq;
- if (xe_vma_userptr_check_repin(uvma) == -EAGAIN)
- goto retry;
- }
-
- return ret < 0 ? ret : 0;
+ return xe_hmm_userptr_populate_range(uvma, false);
}
static bool preempt_fences_waiting(struct xe_vm *vm)
@@ -482,17 +384,53 @@ static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
return 0;
}
+/**
+ * xe_vm_validate_rebind() - Validate buffer objects and rebind vmas
+ * @vm: The vm for which we are rebinding.
+ * @exec: The struct drm_exec with the locked GEM objects.
+ * @num_fences: The number of fences to reserve for the operation, not
+ * including rebinds and validations.
+ *
+ * Validates all evicted gem objects and rebinds their vmas. Note that
+ * rebindings may cause evictions and hence the validation-rebind
+ * sequence is rerun until there are no more objects to validate.
+ *
+ * Return: 0 on success, negative error code on error. In particular,
+ * may return -EINTR or -ERESTARTSYS if interrupted, and -EDEADLK if
+ * the drm_exec transaction needs to be restarted.
+ */
+int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences)
+{
+ struct drm_gem_object *obj;
+ unsigned long index;
+ int ret;
+
+ do {
+ ret = drm_gpuvm_validate(&vm->gpuvm, exec);
+ if (ret)
+ return ret;
+
+ ret = xe_vm_rebind(vm, false);
+ if (ret)
+ return ret;
+ } while (!list_empty(&vm->gpuvm.evict.list));
+
+ drm_exec_for_each_locked_object(exec, index, obj) {
+ ret = dma_resv_reserve_fences(obj->resv, num_fences);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
bool *done)
{
int err;
- /*
- * 1 fence for each preempt fence plus a fence for each tile from a
- * possible rebind
- */
- err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, vm->preempt.num_exec_queues +
- vm->xe->info.tile_count);
+ err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, 0);
if (err)
return err;
@@ -507,7 +445,7 @@ static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
return 0;
}
- err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, vm->preempt.num_exec_queues);
+ err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, 0);
if (err)
return err;
@@ -515,14 +453,19 @@ static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
if (err)
return err;
- return drm_gpuvm_validate(&vm->gpuvm, exec);
+ /*
+ * Add validation and rebinding to the locking loop since both can
+ * cause evictions which may require blocing dma_resv locks.
+ * The fence reservation here is intended for the new preempt fences
+ * we attach at the end of the rebind work.
+ */
+ return xe_vm_validate_rebind(vm, exec, vm->preempt.num_exec_queues);
}
static void preempt_rebind_work_func(struct work_struct *w)
{
struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
struct drm_exec exec;
- struct dma_fence *rebind_fence;
unsigned int fence_count = 0;
LIST_HEAD(preempt_fences);
ktime_t end = 0;
@@ -568,18 +511,11 @@ retry:
if (err)
goto out_unlock;
- rebind_fence = xe_vm_rebind(vm, true);
- if (IS_ERR(rebind_fence)) {
- err = PTR_ERR(rebind_fence);
+ err = xe_vm_rebind(vm, true);
+ if (err)
goto out_unlock;
- }
-
- if (rebind_fence) {
- dma_fence_wait(rebind_fence, false);
- dma_fence_put(rebind_fence);
- }
- /* Wait on munmap style VM unbinds */
+ /* Wait on rebinds and munmap style VM unbinds */
wait = dma_resv_wait_timeout(xe_vm_resv(vm),
DMA_RESV_USAGE_KERNEL,
false, MAX_SCHEDULE_TIMEOUT);
@@ -648,6 +584,10 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
if (!mmu_notifier_range_blockable(range))
return false;
+ vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ "NOTIFIER: addr=0x%016llx, range=0x%016llx",
+ xe_vma_start(vma), xe_vma_size(vma));
+
down_write(&vm->userptr.notifier_lock);
mmu_interval_set_seq(mni, cur_seq);
@@ -773,14 +713,14 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
struct xe_sync_entry *syncs, u32 num_syncs,
bool first_op, bool last_op);
-struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
+int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
{
- struct dma_fence *fence = NULL;
+ struct dma_fence *fence;
struct xe_vma *vma, *next;
lockdep_assert_held(&vm->lock);
if (xe_vm_in_lr_mode(vm) && !rebind_worker)
- return NULL;
+ return 0;
xe_vm_assert_held(vm);
list_for_each_entry_safe(vma, next, &vm->rebind_list,
@@ -788,17 +728,17 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
xe_assert(vm->xe, vma->tile_present);
list_del_init(&vma->combined_links.rebind);
- dma_fence_put(fence);
if (rebind_worker)
trace_xe_vma_rebind_worker(vma);
else
trace_xe_vma_rebind_exec(vma);
fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
if (IS_ERR(fence))
- return fence;
+ return PTR_ERR(fence);
+ dma_fence_put(fence);
}
- return fence;
+ return 0;
}
static void xe_vma_free(struct xe_vma *vma)
@@ -917,8 +857,6 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
static void xe_vma_destroy_late(struct xe_vma *vma)
{
struct xe_vm *vm = xe_vma_vm(vma);
- struct xe_device *xe = vm->xe;
- bool read_only = xe_vma_read_only(vma);
if (vma->ufence) {
xe_sync_ufence_put(vma->ufence);
@@ -926,16 +864,11 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
}
if (xe_vma_is_userptr(vma)) {
- struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
+ struct xe_userptr_vma *uvma = to_userptr_vma(vma);
+ struct xe_userptr *userptr = &uvma->userptr;
- if (userptr->sg) {
- dma_unmap_sgtable(xe->drm.dev,
- userptr->sg,
- read_only ? DMA_TO_DEVICE :
- DMA_BIDIRECTIONAL, 0);
- sg_free_table(userptr->sg);
- userptr->sg = NULL;
- }
+ if (userptr->sg)
+ xe_hmm_userptr_free_sg(uvma);
/*
* Since userptr pages are not pinned, we can't remove
@@ -1004,35 +937,26 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
}
/**
- * xe_vm_prepare_vma() - drm_exec utility to lock a vma
+ * xe_vm_lock_vma() - drm_exec utility to lock a vma
* @exec: The drm_exec object we're currently locking for.
* @vma: The vma for witch we want to lock the vm resv and any attached
* object's resv.
- * @num_shared: The number of dma-fence slots to pre-allocate in the
- * objects' reservation objects.
*
* Return: 0 on success, negative error code on error. In particular
* may return -EDEADLK on WW transaction contention and -EINTR if
* an interruptible wait is terminated by a signal.
*/
-int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
- unsigned int num_shared)
+int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma)
{
struct xe_vm *vm = xe_vma_vm(vma);
struct xe_bo *bo = xe_vma_bo(vma);
int err;
XE_WARN_ON(!vm);
- if (num_shared)
- err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
- else
- err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
- if (!err && bo && !bo->vm) {
- if (num_shared)
- err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
- else
- err = drm_exec_lock_obj(exec, &bo->ttm.base);
- }
+
+ err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
+ if (!err && bo && !bo->vm)
+ err = drm_exec_lock_obj(exec, &bo->ttm.base);
return err;
}
@@ -1044,7 +968,7 @@ static void xe_vma_destroy_unlocked(struct xe_vma *vma)
drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
- err = xe_vm_prepare_vma(&exec, vma, 0);
+ err = xe_vm_lock_vma(&exec, vma);
drm_exec_retry_on_contention(&exec);
if (XE_WARN_ON(err))
break;
@@ -1249,8 +1173,6 @@ static const struct xe_pt_ops xelp_pt_ops = {
.pde_encode_bo = xelp_pde_encode_bo,
};
-static void vm_destroy_work_func(struct work_struct *w);
-
/**
* xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
* given tile and vm.
@@ -1330,8 +1252,6 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
init_rwsem(&vm->userptr.notifier_lock);
spin_lock_init(&vm->userptr.invalidated_lock);
- INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
-
INIT_LIST_HEAD(&vm->preempt.exec_queues);
vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
@@ -1341,7 +1261,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
vm->pt_ops = &xelp_pt_ops;
if (!(flags & XE_VM_FLAG_MIGRATION))
- xe_device_mem_access_get(xe);
+ xe_pm_runtime_get_noresume(xe);
vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
if (!vm_resv_obj) {
@@ -1386,9 +1306,8 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
vm->batch_invalidate_tlb = true;
}
- if (flags & XE_VM_FLAG_LR_MODE) {
+ if (vm->flags & XE_VM_FLAG_LR_MODE) {
INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
- vm->flags |= XE_VM_FLAG_LR_MODE;
vm->batch_invalidate_tlb = false;
}
@@ -1452,7 +1371,7 @@ err_no_resv:
xe_range_fence_tree_fini(&vm->rftree[id]);
kfree(vm);
if (!(flags & XE_VM_FLAG_MIGRATION))
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
return ERR_PTR(err);
}
@@ -1552,6 +1471,16 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe->usm.num_vm_in_fault_mode--;
else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
xe->usm.num_vm_in_non_fault_mode--;
+
+ if (vm->usm.asid) {
+ void *lookup;
+
+ xe_assert(xe, xe->info.has_asid);
+ xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION));
+
+ lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
+ xe_assert(xe, lookup == vm);
+ }
mutex_unlock(&xe->usm.lock);
for_each_tile(tile, xe, id)
@@ -1560,47 +1489,31 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe_vm_put(vm);
}
-static void vm_destroy_work_func(struct work_struct *w)
+static void xe_vm_free(struct drm_gpuvm *gpuvm)
{
- struct xe_vm *vm =
- container_of(w, struct xe_vm, destroy_work);
+ struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
struct xe_device *xe = vm->xe;
struct xe_tile *tile;
u8 id;
- void *lookup;
/* xe_vm_close_and_put was not called? */
xe_assert(xe, !vm->size);
- mutex_destroy(&vm->snap_mutex);
+ if (xe_vm_in_preempt_fence_mode(vm))
+ flush_work(&vm->preempt.rebind_work);
- if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
- xe_device_mem_access_put(xe);
+ mutex_destroy(&vm->snap_mutex);
- if (xe->info.has_asid && vm->usm.asid) {
- mutex_lock(&xe->usm.lock);
- lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
- xe_assert(xe, lookup == vm);
- mutex_unlock(&xe->usm.lock);
- }
- }
+ if (!(vm->flags & XE_VM_FLAG_MIGRATION))
+ xe_pm_runtime_put(xe);
for_each_tile(tile, xe, id)
XE_WARN_ON(vm->pt_root[id]);
trace_xe_vm_free(vm);
- dma_fence_put(vm->rebind_fence);
kfree(vm);
}
-static void xe_vm_free(struct drm_gpuvm *gpuvm)
-{
- struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
-
- /* To destroy the VM we need to be able to sleep */
- queue_work(system_unbound_wq, &vm->destroy_work);
-}
-
struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
{
struct xe_vm *vm;
@@ -1697,7 +1610,7 @@ next:
xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
if (last_op) {
for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], NULL, fence);
+ xe_sync_entry_signal(&syncs[i], fence);
}
return fence;
@@ -1771,7 +1684,7 @@ next:
if (last_op) {
for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], NULL,
+ xe_sync_entry_signal(&syncs[i],
cf ? &cf->base : fence);
}
@@ -1832,7 +1745,7 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
if (last_op) {
for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], NULL, fence);
+ xe_sync_entry_signal(&syncs[i], fence);
}
}
@@ -2033,7 +1946,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
int err;
- xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
+ xe_assert(vm->xe, region < ARRAY_SIZE(region_to_mem_type));
if (!xe_vma_has_no_bo(vma)) {
err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
@@ -2053,7 +1966,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
struct dma_fence *fence =
xe_exec_queue_last_fence_get(wait_exec_queue, vm);
- xe_sync_entry_signal(&syncs[i], NULL, fence);
+ xe_sync_entry_signal(&syncs[i], fence);
dma_fence_put(fence);
}
}
@@ -2185,6 +2098,10 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
if (__op->op == DRM_GPUVA_OP_MAP) {
+ op->map.immediate =
+ flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
+ op->map.read_only =
+ flags & DRM_XE_VM_BIND_FLAG_READONLY;
op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
op->map.pat_index = pat_index;
@@ -2379,6 +2296,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
{
+ flags |= op->map.read_only ?
+ VMA_CREATE_FLAG_READ_ONLY : 0;
flags |= op->map.is_null ?
VMA_CREATE_FLAG_IS_NULL : 0;
flags |= op->map.dumpable ?
@@ -2512,7 +2431,7 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
lockdep_assert_held_write(&vm->lock);
- err = xe_vm_prepare_vma(exec, vma, 1);
+ err = xe_vm_lock_vma(exec, vma);
if (err)
return err;
@@ -2523,7 +2442,7 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
case DRM_GPUVA_OP_MAP:
err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
op->syncs, op->num_syncs,
- !xe_vm_in_fault_mode(vm),
+ op->map.immediate || !xe_vm_in_fault_mode(vm),
op->flags & XE_VMA_OP_FIRST,
op->flags & XE_VMA_OP_LAST);
break;
@@ -2798,7 +2717,10 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
return 0;
}
-#define SUPPORTED_FLAGS (DRM_XE_VM_BIND_FLAG_NULL | \
+#define SUPPORTED_FLAGS \
+ (DRM_XE_VM_BIND_FLAG_READONLY | \
+ DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
+ DRM_XE_VM_BIND_FLAG_NULL | \
DRM_XE_VM_BIND_FLAG_DUMPABLE)
#define XE_64K_PAGE_MASK 0xffffull
#define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
@@ -2931,7 +2853,7 @@ static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
return PTR_ERR(fence);
for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], NULL, fence);
+ xe_sync_entry_signal(&syncs[i], fence);
xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm,
fence);
@@ -3042,7 +2964,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto put_obj;
}
- if (bos[i]->flags & XE_BO_INTERNAL_64K) {
+ if (bos[i]->flags & XE_BO_FLAG_INTERNAL_64K) {
if (XE_IOCTL_DBG(xe, obj_offset &
XE_64K_PAGE_MASK) ||
XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
@@ -3234,6 +3156,10 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
xe_assert(xe, !xe_vma_is_null(vma));
trace_xe_vma_invalidate(vma);
+ vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ "INVALIDATE: addr=0x%016llx, range=0x%016llx",
+ xe_vma_start(vma), xe_vma_size(vma));
+
/* Check that we don't race with page-table updates */
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
if (xe_vma_is_userptr(vma)) {
@@ -3352,8 +3278,10 @@ struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm)
if (num_snaps)
snap = kvzalloc(offsetof(struct xe_vm_snapshot, snap[num_snaps]), GFP_NOWAIT);
- if (!snap)
+ if (!snap) {
+ snap = num_snaps ? ERR_PTR(-ENOMEM) : ERR_PTR(-ENODEV);
goto out_unlock;
+ }
snap->num_snaps = num_snaps;
i = 0;
@@ -3393,6 +3321,9 @@ out_unlock:
void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
{
+ if (IS_ERR_OR_NULL(snap))
+ return;
+
for (int i = 0; i < snap->num_snaps; i++) {
struct xe_bo *bo = snap->snap[i].bo;
struct iosys_map src;
@@ -3447,13 +3378,21 @@ void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
{
unsigned long i, j;
- for (i = 0; i < snap->num_snaps; i++) {
- if (IS_ERR(snap->snap[i].data))
- goto uncaptured;
+ if (IS_ERR_OR_NULL(snap)) {
+ drm_printf(p, "[0].error: %li\n", PTR_ERR(snap));
+ return;
+ }
+ for (i = 0; i < snap->num_snaps; i++) {
drm_printf(p, "[%llx].length: 0x%lx\n", snap->snap[i].ofs, snap->snap[i].len);
- drm_printf(p, "[%llx].data: ",
- snap->snap[i].ofs);
+
+ if (IS_ERR(snap->snap[i].data)) {
+ drm_printf(p, "[%llx].error: %li\n", snap->snap[i].ofs,
+ PTR_ERR(snap->snap[i].data));
+ continue;
+ }
+
+ drm_printf(p, "[%llx].data: ", snap->snap[i].ofs);
for (j = 0; j < snap->snap[i].len; j += sizeof(u32)) {
u32 *val = snap->snap[i].data + j;
@@ -3463,12 +3402,6 @@ void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
}
drm_puts(p, "\n");
- continue;
-
-uncaptured:
- drm_printf(p, "Unable to capture range [%llx-%llx]: %li\n",
- snap->snap[i].ofs, snap->snap[i].ofs + snap->snap[i].len - 1,
- PTR_ERR(snap->snap[i].data));
}
}
@@ -3476,7 +3409,7 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
{
unsigned long i;
- if (!snap)
+ if (IS_ERR_OR_NULL(snap))
return;
for (i = 0; i < snap->num_snaps; i++) {
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 6df1f1c7f85d..306cd0934a19 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -207,7 +207,7 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
int xe_vm_userptr_check_repin(struct xe_vm *vm);
-struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
+int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
int xe_vm_invalidate_vma(struct xe_vma *vma);
@@ -242,8 +242,10 @@ bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id);
-int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
- unsigned int num_shared);
+int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma);
+
+int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences);
/**
* xe_vm_resv() - Return's the vm's reservation object
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index ae5fb565f6bf..72a100671e5d 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -177,16 +177,6 @@ struct xe_vm {
*/
struct list_head rebind_list;
- /** @rebind_fence: rebind fence from execbuf */
- struct dma_fence *rebind_fence;
-
- /**
- * @destroy_work: worker to destroy VM, needed as a dma_fence signaling
- * from an irq context can be last put and the destroy needs to be able
- * to sleep.
- */
- struct work_struct destroy_work;
-
/**
* @rftree: range fence tree to track updates to page table structure.
* Used to implement conflict tracking between independent bind engines.
@@ -264,6 +254,11 @@ struct xe_vm {
bool capture_once;
} error_capture;
+ /**
+ * @tlb_flush_seqno: Required TLB flush seqno for the next exec.
+ * protected by the vm resv.
+ */
+ u64 tlb_flush_seqno;
/** @batch_invalidate_tlb: Always invalidate TLB before batch start */
bool batch_invalidate_tlb;
/** @xef: XE file handle for tracking this VM's drm client */
@@ -274,6 +269,10 @@ struct xe_vm {
struct xe_vma_op_map {
/** @vma: VMA to map */
struct xe_vma *vma;
+ /** @immediate: Immediate bind */
+ bool immediate;
+ /** @read_only: Read only */
+ bool read_only;
/** @is_null: is NULL binding */
bool is_null;
/** @dumpable: whether BO is dumped on GPU hang */
diff --git a/drivers/gpu/drm/xe/xe_vram_freq.c b/drivers/gpu/drm/xe/xe_vram_freq.c
index c5f6b5a5d117..3e21ddc6e60c 100644
--- a/drivers/gpu/drm/xe/xe_vram_freq.c
+++ b/drivers/gpu/drm/xe/xe_vram_freq.c
@@ -100,31 +100,27 @@ static void vram_freq_sysfs_fini(struct drm_device *drm, void *arg)
* @tile: Xe Tile object
*
* It needs to be initialized after the main tile component is ready
+ *
+ * Returns: 0 on success, negative error code on error.
*/
-void xe_vram_freq_sysfs_init(struct xe_tile *tile)
+int xe_vram_freq_sysfs_init(struct xe_tile *tile)
{
struct xe_device *xe = tile_to_xe(tile);
struct kobject *kobj;
int err;
if (xe->info.platform != XE_PVC)
- return;
+ return 0;
kobj = kobject_create_and_add("memory", tile->sysfs);
- if (!kobj) {
- drm_warn(&xe->drm, "failed to add memory directory, err: %d\n", -ENOMEM);
- return;
- }
+ if (!kobj)
+ return -ENOMEM;
err = sysfs_create_group(kobj, &freq_group_attrs);
if (err) {
kobject_put(kobj);
- drm_warn(&xe->drm, "failed to register vram freq sysfs, err: %d\n", err);
- return;
+ return err;
}
- err = drmm_add_action_or_reset(&xe->drm, vram_freq_sysfs_fini, kobj);
- if (err)
- drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
- __func__, err);
+ return drmm_add_action_or_reset(&xe->drm, vram_freq_sysfs_fini, kobj);
}
diff --git a/drivers/gpu/drm/xe/xe_vram_freq.h b/drivers/gpu/drm/xe/xe_vram_freq.h
index cbe8c12fbd64..bf726bc5881f 100644
--- a/drivers/gpu/drm/xe/xe_vram_freq.h
+++ b/drivers/gpu/drm/xe/xe_vram_freq.h
@@ -8,6 +8,6 @@
struct xe_tile;
-void xe_vram_freq_sysfs_init(struct xe_tile *tile);
+int xe_vram_freq_sysfs_init(struct xe_tile *tile);
#endif /* _XE_VRAM_FREQ_H_ */
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index a0264eedd443..dd214d95e4b6 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -173,11 +173,11 @@ static const struct xe_rtp_entry_sr gt_was[] = {
XE_RTP_ACTIONS(CLR(MISCCPCTL, DOP_CLOCK_GATE_RENDER_ENABLE))
},
{ XE_RTP_NAME("14018575942"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1271)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1274)),
XE_RTP_ACTIONS(SET(COMP_MOD_CTRL, FORCE_MISS_FTLB))
},
{ XE_RTP_NAME("22016670082"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1271)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1274)),
XE_RTP_ACTIONS(SET(SQCNT1, ENFORCE_RAR))
},
@@ -228,6 +228,28 @@ static const struct xe_rtp_entry_sr gt_was[] = {
XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
},
+ /* Xe2_HPM */
+
+ { XE_RTP_NAME("16021867713"),
+ XE_RTP_RULES(MEDIA_VERSION(1301),
+ ENGINE_CLASS(VIDEO_DECODE)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F1C(0), MFXPIPE_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
+ { XE_RTP_NAME("14020316580"),
+ XE_RTP_RULES(MEDIA_VERSION(1301)),
+ XE_RTP_ACTIONS(CLR(PG_ENABLE,
+ VD0_HCP_POWERGATE_ENABLE |
+ VD0_MFXVDENC_POWERGATE_ENABLE |
+ VD2_HCP_POWERGATE_ENABLE |
+ VD2_MFXVDENC_POWERGATE_ENABLE)),
+ },
+ { XE_RTP_NAME("14019449301"),
+ XE_RTP_RULES(MEDIA_VERSION(1301), ENGINE_CLASS(VIDEO_DECODE)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F08(0), CG3DDISHRS_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
+
{}
};
@@ -328,12 +350,6 @@ static const struct xe_rtp_entry_sr engine_was[] = {
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE))
},
- { XE_RTP_NAME("16015675438"),
- XE_RTP_RULES(PLATFORM(DG2),
- FUNC(xe_rtp_match_first_render_or_compute)),
- XE_RTP_ACTIONS(SET(FF_SLICE_CS_CHICKEN2(RENDER_RING_BASE),
- PERF_FIX_BALANCING_CFE_DISABLE))
- },
{ XE_RTP_NAME("18028616096"),
XE_RTP_RULES(PLATFORM(DG2),
FUNC(xe_rtp_match_first_render_or_compute)),
@@ -383,10 +399,10 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_RULES(PLATFORM(PVC), FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE))
},
- { XE_RTP_NAME("16015675438"),
- XE_RTP_RULES(PLATFORM(PVC), FUNC(xe_rtp_match_first_render_or_compute)),
- XE_RTP_ACTIONS(SET(FF_SLICE_CS_CHICKEN2(RENDER_RING_BASE),
- PERF_FIX_BALANCING_CFE_DISABLE))
+ { XE_RTP_NAME("18020744125"),
+ XE_RTP_RULES(PLATFORM(PVC), FUNC(xe_rtp_match_first_render_or_compute),
+ ENGINE_CLASS(COMPUTE)),
+ XE_RTP_ACTIONS(SET(RING_HWSTAM(RENDER_RING_BASE), ~0))
},
{ XE_RTP_NAME("14014999345"),
XE_RTP_RULES(PLATFORM(PVC), ENGINE_CLASS(COMPUTE),
@@ -397,7 +413,7 @@ static const struct xe_rtp_entry_sr engine_was[] = {
/* Xe_LPG */
{ XE_RTP_NAME("14017856879"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1271),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1274),
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(ROW_CHICKEN3, DIS_FIX_EOT1_FLUSH))
},
@@ -407,6 +423,11 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_ACTIONS(SET(XEHP_HDC_CHICKEN0, DIS_ATOMIC_CHAINING_TYPED_WRITES,
XE_RTP_NOCHECK))
},
+ { XE_RTP_NAME("14020495402"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1274),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(ROW_CHICKEN2, DISABLE_TDL_SVHS_GATING))
+ },
/* Xe2_LPG */
@@ -424,8 +445,12 @@ static const struct xe_rtp_entry_sr engine_was[] = {
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN5, DISABLE_SAMPLE_G_PERFORMANCE))
},
- { XE_RTP_NAME("16021540221"),
- XE_RTP_RULES(GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0),
+ { XE_RTP_NAME("14020338487"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(ROW_CHICKEN3, XE2_EUPEND_CHK_FLUSH_DIS))
+ },
+ { XE_RTP_NAME("18034896535, 16021540221"), /* 16021540221: GRAPHICS_STEP(A0, B0) */
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2004),
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(ROW_CHICKEN4, DISABLE_TDL_PUSH))
},
@@ -460,6 +485,65 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION(2004), FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, SLM_WMTP_RESTORE))
},
+
+ /* Xe2_HPG */
+
+ { XE_RTP_NAME("16018712365"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, XE2_ALLOC_DPA_STARVE_FIX_DIS))
+ },
+ { XE_RTP_NAME("16018737384"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(ROW_CHICKEN, EARLY_EOT_DIS))
+ },
+ { XE_RTP_NAME("14019988906"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FLSH_IGNORES_PSD))
+ },
+ { XE_RTP_NAME("14019877138"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FD_END_COLLECT))
+ },
+ { XE_RTP_NAME("14020338487"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(ROW_CHICKEN3, XE2_EUPEND_CHK_FLUSH_DIS))
+ },
+ { XE_RTP_NAME("18032247524"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, SEQUENTIAL_ACCESS_UPGRADE_DISABLE))
+ },
+ { XE_RTP_NAME("14018471104"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, ENABLE_SMP_LD_RENDER_SURFACE_CONTROL))
+ },
+ /*
+ * Although this workaround isn't required for the RCS, disabling these
+ * reports has no impact for our driver or the GuC, so we go ahead and
+ * apply this to all engines for simplicity.
+ */
+ { XE_RTP_NAME("16021639441"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001)),
+ XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0),
+ GHWSP_CSB_REPORT_DIS |
+ PPHWSP_CSB_AND_TIMESTAMP_REPORT_DIS,
+ XE_RTP_ACTION_FLAG(ENGINE_BASE)))
+ },
+ { XE_RTP_NAME("14019811474"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, WR_REQ_CHAINING_DIS))
+ },
+
+ /* Xe2_HPM */
+
+ { XE_RTP_NAME("16021639441"),
+ XE_RTP_RULES(MEDIA_VERSION(1301)),
+ XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0),
+ GHWSP_CSB_REPORT_DIS |
+ PPHWSP_CSB_AND_TIMESTAMP_REPORT_DIS,
+ XE_RTP_ACTION_FLAG(ENGINE_BASE)))
+ },
+
{}
};
@@ -537,7 +621,7 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
/* Xe_LPG */
{ XE_RTP_NAME("18019271663"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1271)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1274)),
XE_RTP_ACTIONS(SET(CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE))
},
{ XE_RTP_NAME("14019877138"),
@@ -580,6 +664,24 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(INSTPM(RENDER_RING_BASE), ENABLE_SEMAPHORE_POLL_BIT))
},
+ { XE_RTP_NAME("18033852989"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2004), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN1, DISABLE_BOTTOM_CLIP_RECTANGLE_TEST))
+ },
+
+ /* Xe2_HPG */
+ { XE_RTP_NAME("15010599737"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN))
+ },
+ { XE_RTP_NAME("14019386621"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(VF_SCRATCHPAD, XE2_VFG_TED_CREDIT_INTERFACE_DISABLE))
+ },
+ { XE_RTP_NAME("14020756599"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(WM_CHICKEN3, HIZ_PLANE_COMPRESSION_DIS))
+ },
{}
};
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index b138cbd51bdb..12fe88796a49 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -4,9 +4,6 @@
22011391025 PLATFORM(DG2)
22012727170 SUBPLATFORM(DG2, G11)
22012727685 SUBPLATFORM(DG2, G11)
-16015675438 PLATFORM(PVC)
- SUBPLATFORM(DG2, G10)
- SUBPLATFORM(DG2, G12)
18020744125 PLATFORM(PVC)
1509372804 PLATFORM(PVC), GRAPHICS_STEP(A0, C0)
1409600907 GRAPHICS_VERSION_RANGE(1200, 1250)
@@ -22,3 +19,11 @@
GRAPHICS_VERSION_RANGE(1270, 1274)
MEDIA_VERSION(1300)
PLATFORM(DG2)
+14018094691 GRAPHICS_VERSION(2004)
+14019882105 GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0)
+18024947630 GRAPHICS_VERSION(2001)
+ GRAPHICS_VERSION(2004)
+ MEDIA_VERSION(2000)
+16022287689 GRAPHICS_VERSION(2001)
+ GRAPHICS_VERSION(2004)
+13011645652 GRAPHICS_VERSION(2004)
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 3ad2b4cfd1f0..63112ed975c4 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -11,6 +11,7 @@
#include <linux/dma-buf.h>
#include <linux/scatterlist.h>
#include <linux/shmem_fs.h>
+#include <linux/vmalloc.h>
#include <drm/drm_gem.h>
#include <drm/drm_prime.h>
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
index 8a39b3accce5..13157da0089e 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -18,6 +18,7 @@
#include <linux/dma/xilinx_dpdma.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -65,14 +66,26 @@
#define ZYNQMP_DISP_MAX_NUM_SUB_PLANES 3
/**
+ * enum zynqmp_dpsub_layer_mode - Layer mode
+ * @ZYNQMP_DPSUB_LAYER_NONLIVE: non-live (memory) mode
+ * @ZYNQMP_DPSUB_LAYER_LIVE: live (stream) mode
+ */
+enum zynqmp_dpsub_layer_mode {
+ ZYNQMP_DPSUB_LAYER_NONLIVE,
+ ZYNQMP_DPSUB_LAYER_LIVE,
+};
+
+/**
* struct zynqmp_disp_format - Display subsystem format information
* @drm_fmt: DRM format (4CC)
+ * @bus_fmt: Media bus format
* @buf_fmt: AV buffer format
* @swap: Flag to swap R & B for RGB formats, and U & V for YUV formats
* @sf: Scaling factors for color components
*/
struct zynqmp_disp_format {
u32 drm_fmt;
+ u32 bus_fmt;
u32 buf_fmt;
bool swap;
const u32 *sf;
@@ -172,6 +185,12 @@ static const u32 scaling_factors_565[] = {
ZYNQMP_DISP_AV_BUF_5BIT_SF,
};
+static const u32 scaling_factors_666[] = {
+ ZYNQMP_DISP_AV_BUF_6BIT_SF,
+ ZYNQMP_DISP_AV_BUF_6BIT_SF,
+ ZYNQMP_DISP_AV_BUF_6BIT_SF,
+};
+
static const u32 scaling_factors_888[] = {
ZYNQMP_DISP_AV_BUF_8BIT_SF,
ZYNQMP_DISP_AV_BUF_8BIT_SF,
@@ -354,6 +373,41 @@ static const struct zynqmp_disp_format avbuf_gfx_fmts[] = {
},
};
+/* List of live video layer formats */
+static const struct zynqmp_disp_format avbuf_live_fmts[] = {
+ {
+ .drm_fmt = DRM_FORMAT_RGB565,
+ .bus_fmt = MEDIA_BUS_FMT_RGB666_1X18,
+ .buf_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_6 |
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_RGB,
+ .sf = scaling_factors_666,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGB888,
+ .bus_fmt = MEDIA_BUS_FMT_RGB888_1X24,
+ .buf_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_8 |
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_RGB,
+ .sf = scaling_factors_888,
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV422,
+ .bus_fmt = MEDIA_BUS_FMT_UYVY8_1X16,
+ .buf_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_8 |
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV422,
+ .sf = scaling_factors_888,
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV444,
+ .bus_fmt = MEDIA_BUS_FMT_VUY8_1X24,
+ .buf_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_8 |
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV444,
+ .sf = scaling_factors_888,
+ }, {
+ .drm_fmt = DRM_FORMAT_P210,
+ .bus_fmt = MEDIA_BUS_FMT_UYVY10_1X20,
+ .buf_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_10 |
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV422,
+ .sf = scaling_factors_101010,
+ },
+};
+
static u32 zynqmp_disp_avbuf_read(struct zynqmp_disp *disp, int reg)
{
return readl(disp->avbuf.base + reg);
@@ -382,19 +436,29 @@ static void zynqmp_disp_avbuf_set_format(struct zynqmp_disp *disp,
const struct zynqmp_disp_format *fmt)
{
unsigned int i;
- u32 val;
-
- val = zynqmp_disp_avbuf_read(disp, ZYNQMP_DISP_AV_BUF_FMT);
- val &= zynqmp_disp_layer_is_video(layer)
- ? ~ZYNQMP_DISP_AV_BUF_FMT_NL_VID_MASK
- : ~ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_MASK;
- val |= fmt->buf_fmt;
- zynqmp_disp_avbuf_write(disp, ZYNQMP_DISP_AV_BUF_FMT, val);
+ u32 val, reg;
+
+ layer->disp_fmt = fmt;
+ if (layer->mode == ZYNQMP_DPSUB_LAYER_NONLIVE) {
+ reg = ZYNQMP_DISP_AV_BUF_FMT;
+ val = zynqmp_disp_avbuf_read(disp, ZYNQMP_DISP_AV_BUF_FMT);
+ val &= zynqmp_disp_layer_is_video(layer)
+ ? ~ZYNQMP_DISP_AV_BUF_FMT_NL_VID_MASK
+ : ~ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_MASK;
+ val |= fmt->buf_fmt;
+ zynqmp_disp_avbuf_write(disp, reg, val);
+ } else {
+ reg = zynqmp_disp_layer_is_video(layer)
+ ? ZYNQMP_DISP_AV_BUF_LIVE_VID_CONFIG
+ : ZYNQMP_DISP_AV_BUF_LIVE_GFX_CONFIG;
+ val = fmt->buf_fmt;
+ zynqmp_disp_avbuf_write(disp, reg, val);
+ }
for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_SF; i++) {
- unsigned int reg = zynqmp_disp_layer_is_video(layer)
- ? ZYNQMP_DISP_AV_BUF_VID_COMP_SF(i)
- : ZYNQMP_DISP_AV_BUF_GFX_COMP_SF(i);
+ reg = zynqmp_disp_layer_is_video(layer)
+ ? ZYNQMP_DISP_AV_BUF_VID_COMP_SF(i)
+ : ZYNQMP_DISP_AV_BUF_GFX_COMP_SF(i);
zynqmp_disp_avbuf_write(disp, reg, fmt->sf[i]);
}
@@ -873,10 +937,40 @@ zynqmp_disp_layer_find_format(struct zynqmp_disp_layer *layer,
}
/**
+ * zynqmp_disp_layer_find_live_format - Find format information for given
+ * media bus format
+ * @layer: The layer
+ * @drm_fmt: Media bus format to search
+ *
+ * Search display subsystem format information corresponding to the given media
+ * bus format @media_bus_format for the @layer, and return a pointer to the
+ * format descriptor.
+ *
+ * Return: A pointer to the format descriptor if found, NULL otherwise
+ */
+static const struct zynqmp_disp_format *
+zynqmp_disp_layer_find_live_format(struct zynqmp_disp_layer *layer,
+ u32 media_bus_format)
+{
+ unsigned int i;
+
+ for (i = 0; i < layer->info->num_formats; i++)
+ if (layer->info->formats[i].bus_fmt == media_bus_format)
+ return &layer->info->formats[i];
+
+ return NULL;
+}
+
+/**
* zynqmp_disp_layer_drm_formats - Return the DRM formats supported by the layer
* @layer: The layer
* @num_formats: Pointer to the returned number of formats
*
+ * NOTE: This function doesn't make sense for live video layers and will
+ * always return an empty list in such cases. zynqmp_disp_live_layer_formats()
+ * should be used to query a list of media bus formats supported by the live
+ * video input layer.
+ *
* Return: A newly allocated u32 array that stores all the DRM formats
* supported by the layer. The number of formats in the array is returned
* through the num_formats argument.
@@ -887,10 +981,17 @@ u32 *zynqmp_disp_layer_drm_formats(struct zynqmp_disp_layer *layer,
unsigned int i;
u32 *formats;
+ if (WARN_ON(!layer->mode == ZYNQMP_DPSUB_LAYER_NONLIVE)) {
+ *num_formats = 0;
+ return NULL;
+ }
+
formats = kcalloc(layer->info->num_formats, sizeof(*formats),
GFP_KERNEL);
- if (!formats)
+ if (!formats) {
+ *num_formats = 0;
return NULL;
+ }
for (i = 0; i < layer->info->num_formats; ++i)
formats[i] = layer->info->formats[i].drm_fmt;
@@ -900,17 +1001,51 @@ u32 *zynqmp_disp_layer_drm_formats(struct zynqmp_disp_layer *layer,
}
/**
+ * zynqmp_disp_live_layer_formats - Return the media bus formats supported by
+ * the live video layer
+ * @layer: The layer
+ * @num_formats: Pointer to the returned number of formats
+ *
+ * NOTE: This function should be used only for live video input layers.
+ *
+ * Return: A newly allocated u32 array of media bus formats supported by the
+ * layer. The number of formats in the array is returned through the
+ * @num_formats argument.
+ */
+u32 *zynqmp_disp_live_layer_formats(struct zynqmp_disp_layer *layer,
+ unsigned int *num_formats)
+{
+ unsigned int i;
+ u32 *formats;
+
+ if (WARN_ON(layer->mode != ZYNQMP_DPSUB_LAYER_LIVE)) {
+ *num_formats = 0;
+ return NULL;
+ }
+
+ formats = kcalloc(layer->info->num_formats, sizeof(*formats),
+ GFP_KERNEL);
+ if (!formats) {
+ *num_formats = 0;
+ return NULL;
+ }
+
+ for (i = 0; i < layer->info->num_formats; ++i)
+ formats[i] = layer->info->formats[i].bus_fmt;
+
+ *num_formats = layer->info->num_formats;
+ return formats;
+}
+
+/**
* zynqmp_disp_layer_enable - Enable a layer
* @layer: The layer
- * @mode: Operating mode of layer
*
* Enable the @layer in the audio/video buffer manager and the blender. DMA
* channels are started separately by zynqmp_disp_layer_update().
*/
-void zynqmp_disp_layer_enable(struct zynqmp_disp_layer *layer,
- enum zynqmp_dpsub_layer_mode mode)
+void zynqmp_disp_layer_enable(struct zynqmp_disp_layer *layer)
{
- layer->mode = mode;
zynqmp_disp_avbuf_enable_video(layer->disp, layer);
zynqmp_disp_blend_layer_enable(layer->disp, layer);
}
@@ -926,7 +1061,7 @@ void zynqmp_disp_layer_disable(struct zynqmp_disp_layer *layer)
{
unsigned int i;
- if (layer->disp->dpsub->dma_enabled) {
+ if (layer->mode == ZYNQMP_DPSUB_LAYER_NONLIVE) {
for (i = 0; i < layer->drm_fmt->num_planes; i++)
dmaengine_terminate_sync(layer->dmas[i].chan);
}
@@ -940,6 +1075,9 @@ void zynqmp_disp_layer_disable(struct zynqmp_disp_layer *layer)
* @layer: The layer
* @info: The format info
*
+ * NOTE: Use zynqmp_disp_layer_set_live_format() to set media bus format for
+ * live video layers.
+ *
* Set the format for @layer to @info. The layer must be disabled.
*/
void zynqmp_disp_layer_set_format(struct zynqmp_disp_layer *layer,
@@ -947,14 +1085,16 @@ void zynqmp_disp_layer_set_format(struct zynqmp_disp_layer *layer,
{
unsigned int i;
+ if (WARN_ON(layer->mode != ZYNQMP_DPSUB_LAYER_NONLIVE))
+ return;
+
layer->disp_fmt = zynqmp_disp_layer_find_format(layer, info->format);
+ if (WARN_ON(!layer->disp_fmt))
+ return;
layer->drm_fmt = info;
zynqmp_disp_avbuf_set_format(layer->disp, layer, layer->disp_fmt);
- if (!layer->disp->dpsub->dma_enabled)
- return;
-
/*
* Set pconfig for each DMA channel to indicate they're part of a
* video group.
@@ -975,6 +1115,32 @@ void zynqmp_disp_layer_set_format(struct zynqmp_disp_layer *layer,
}
/**
+ * zynqmp_disp_layer_set_live_format - Set the live video layer format
+ * @layer: The layer
+ * @info: The format info
+ *
+ * NOTE: This function should not be used to set format for non-live video
+ * layer. Use zynqmp_disp_layer_set_format() instead.
+ *
+ * Set the display format for the live @layer. The layer must be disabled.
+ */
+void zynqmp_disp_layer_set_live_format(struct zynqmp_disp_layer *layer,
+ u32 media_bus_format)
+{
+ if (WARN_ON(layer->mode != ZYNQMP_DPSUB_LAYER_LIVE))
+ return;
+
+ layer->disp_fmt = zynqmp_disp_layer_find_live_format(layer,
+ media_bus_format);
+ if (WARN_ON(!layer->disp_fmt))
+ return;
+
+ zynqmp_disp_avbuf_set_format(layer->disp, layer, layer->disp_fmt);
+
+ layer->drm_fmt = drm_format_info(layer->disp_fmt->drm_fmt);
+}
+
+/**
* zynqmp_disp_layer_update - Update the layer framebuffer
* @layer: The layer
* @state: The plane state
@@ -990,7 +1156,7 @@ int zynqmp_disp_layer_update(struct zynqmp_disp_layer *layer,
const struct drm_format_info *info = layer->drm_fmt;
unsigned int i;
- if (!layer->disp->dpsub->dma_enabled)
+ if (layer->mode == ZYNQMP_DPSUB_LAYER_LIVE)
return 0;
for (i = 0; i < info->num_planes; i++) {
@@ -1040,9 +1206,6 @@ static void zynqmp_disp_layer_release_dma(struct zynqmp_disp *disp,
{
unsigned int i;
- if (!layer->info || !disp->dpsub->dma_enabled)
- return;
-
for (i = 0; i < layer->info->num_channels; i++) {
struct zynqmp_disp_layer_dma *dma = &layer->dmas[i];
@@ -1083,9 +1246,6 @@ static int zynqmp_disp_layer_request_dma(struct zynqmp_disp *disp,
unsigned int i;
int ret;
- if (!disp->dpsub->dma_enabled)
- return 0;
-
for (i = 0; i < layer->info->num_channels; i++) {
struct zynqmp_disp_layer_dma *dma = &layer->dmas[i];
char dma_channel_name[16];
@@ -1124,6 +1284,11 @@ static int zynqmp_disp_create_layers(struct zynqmp_disp *disp)
.num_channels = 1,
},
};
+ static const struct zynqmp_disp_layer_info live_layer_info = {
+ .formats = avbuf_live_fmts,
+ .num_formats = ARRAY_SIZE(avbuf_live_fmts),
+ .num_channels = 0,
+ };
unsigned int i;
int ret;
@@ -1133,7 +1298,17 @@ static int zynqmp_disp_create_layers(struct zynqmp_disp *disp)
layer->id = i;
layer->disp = disp;
- layer->info = &layer_info[i];
+ /*
+ * For now assume dpsub works in either live or non-live mode for both layers.
+ * Hybrid mode is not supported yet.
+ */
+ if (disp->dpsub->dma_enabled) {
+ layer->mode = ZYNQMP_DPSUB_LAYER_NONLIVE;
+ layer->info = &layer_info[i];
+ } else {
+ layer->mode = ZYNQMP_DPSUB_LAYER_LIVE;
+ layer->info = &live_layer_info;
+ }
ret = zynqmp_disp_layer_request_dma(disp, layer);
if (ret)
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.h b/drivers/gpu/drm/xlnx/zynqmp_disp.h
index 123cffac08be..fa545533c9d1 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.h
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.h
@@ -42,16 +42,6 @@ enum zynqmp_dpsub_layer_id {
ZYNQMP_DPSUB_LAYER_GFX,
};
-/**
- * enum zynqmp_dpsub_layer_mode - Layer mode
- * @ZYNQMP_DPSUB_LAYER_NONLIVE: non-live (memory) mode
- * @ZYNQMP_DPSUB_LAYER_LIVE: live (stream) mode
- */
-enum zynqmp_dpsub_layer_mode {
- ZYNQMP_DPSUB_LAYER_NONLIVE,
- ZYNQMP_DPSUB_LAYER_LIVE,
-};
-
void zynqmp_disp_enable(struct zynqmp_disp *disp);
void zynqmp_disp_disable(struct zynqmp_disp *disp);
int zynqmp_disp_setup_clock(struct zynqmp_disp *disp,
@@ -62,11 +52,14 @@ void zynqmp_disp_blend_set_global_alpha(struct zynqmp_disp *disp,
u32 *zynqmp_disp_layer_drm_formats(struct zynqmp_disp_layer *layer,
unsigned int *num_formats);
-void zynqmp_disp_layer_enable(struct zynqmp_disp_layer *layer,
- enum zynqmp_dpsub_layer_mode mode);
+u32 *zynqmp_disp_live_layer_formats(struct zynqmp_disp_layer *layer,
+ unsigned int *num_formats);
+void zynqmp_disp_layer_enable(struct zynqmp_disp_layer *layer);
void zynqmp_disp_layer_disable(struct zynqmp_disp_layer *layer);
void zynqmp_disp_layer_set_format(struct zynqmp_disp_layer *layer,
const struct drm_format_info *info);
+void zynqmp_disp_layer_set_live_format(struct zynqmp_disp_layer *layer,
+ u32 media_bus_format);
int zynqmp_disp_layer_update(struct zynqmp_disp_layer *layer,
struct drm_plane_state *state);
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp_regs.h b/drivers/gpu/drm/xlnx/zynqmp_disp_regs.h
index f92a006d5070..fa3935384834 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp_regs.h
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp_regs.h
@@ -165,10 +165,10 @@
#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_10 0x2
#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_12 0x3
#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_MASK GENMASK(2, 0)
-#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_RGB 0x0
-#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV444 0x1
-#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV422 0x2
-#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YONLY 0x3
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_RGB (0x0 << 4)
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV444 (0x1 << 4)
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV422 (0x2 << 4)
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YONLY (0x3 << 4)
#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_MASK GENMASK(5, 4)
#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_CB_FIRST BIT(8)
#define ZYNQMP_DISP_AV_BUF_PALETTE_MEMORY 0x400
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index 1846c4971fd8..8c2d24809014 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -1276,28 +1277,45 @@ static void zynqmp_dp_encoder_mode_set_stream(struct zynqmp_dp *dp,
* DISP Configuration
*/
+/**
+ * zynqmp_dp_disp_connected_live_layer - Return the first connected live layer
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: The first connected live display layer or NULL if none of the live
+ * layers are connected.
+ */
+static struct zynqmp_disp_layer *
+zynqmp_dp_disp_connected_live_layer(struct zynqmp_dp *dp)
+{
+ if (dp->dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_VIDEO))
+ return dp->dpsub->layers[ZYNQMP_DPSUB_LAYER_VID];
+ else if (dp->dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_GFX))
+ return dp->dpsub->layers[ZYNQMP_DPSUB_LAYER_GFX];
+ else
+ return NULL;
+}
+
static void zynqmp_dp_disp_enable(struct zynqmp_dp *dp,
struct drm_bridge_state *old_bridge_state)
{
- enum zynqmp_dpsub_layer_id layer_id;
struct zynqmp_disp_layer *layer;
- const struct drm_format_info *info;
+ struct drm_bridge_state *bridge_state;
+ u32 bus_fmt;
- if (dp->dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_VIDEO))
- layer_id = ZYNQMP_DPSUB_LAYER_VID;
- else if (dp->dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_GFX))
- layer_id = ZYNQMP_DPSUB_LAYER_GFX;
- else
+ layer = zynqmp_dp_disp_connected_live_layer(dp);
+ if (!layer)
return;
- layer = dp->dpsub->layers[layer_id];
+ bridge_state = drm_atomic_get_new_bridge_state(old_bridge_state->base.state,
+ old_bridge_state->bridge);
+ if (WARN_ON(!bridge_state))
+ return;
- /* TODO: Make the format configurable. */
- info = drm_format_info(DRM_FORMAT_YUV422);
- zynqmp_disp_layer_set_format(layer, info);
- zynqmp_disp_layer_enable(layer, ZYNQMP_DPSUB_LAYER_LIVE);
+ bus_fmt = bridge_state->input_bus_cfg.format;
+ zynqmp_disp_layer_set_live_format(layer, bus_fmt);
+ zynqmp_disp_layer_enable(layer);
- if (layer_id == ZYNQMP_DPSUB_LAYER_GFX)
+ if (layer == dp->dpsub->layers[ZYNQMP_DPSUB_LAYER_GFX])
zynqmp_disp_blend_set_global_alpha(dp->dpsub->disp, true, 255);
else
zynqmp_disp_blend_set_global_alpha(dp->dpsub->disp, false, 0);
@@ -1310,11 +1328,8 @@ static void zynqmp_dp_disp_disable(struct zynqmp_dp *dp,
{
struct zynqmp_disp_layer *layer;
- if (dp->dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_VIDEO))
- layer = dp->dpsub->layers[ZYNQMP_DPSUB_LAYER_VID];
- else if (dp->dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_GFX))
- layer = dp->dpsub->layers[ZYNQMP_DPSUB_LAYER_GFX];
- else
+ layer = zynqmp_dp_disp_connected_live_layer(dp);
+ if (!layer)
return;
zynqmp_disp_disable(dp->dpsub->disp);
@@ -1568,6 +1583,35 @@ static const struct drm_edid *zynqmp_dp_bridge_edid_read(struct drm_bridge *brid
return drm_edid_read_ddc(connector, &dp->aux.ddc);
}
+static u32 *zynqmp_dp_bridge_default_bus_fmts(unsigned int *num_input_fmts)
+{
+ u32 *formats = kzalloc(sizeof(*formats), GFP_KERNEL);
+
+ if (formats)
+ *formats = MEDIA_BUS_FMT_FIXED;
+ *num_input_fmts = !!formats;
+
+ return formats;
+}
+
+static u32 *
+zynqmp_dp_bridge_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct zynqmp_dp *dp = bridge_to_dp(bridge);
+ struct zynqmp_disp_layer *layer;
+
+ layer = zynqmp_dp_disp_connected_live_layer(dp);
+ if (layer)
+ return zynqmp_disp_live_layer_formats(layer, num_input_fmts);
+ else
+ return zynqmp_dp_bridge_default_bus_fmts(num_input_fmts);
+}
+
static const struct drm_bridge_funcs zynqmp_dp_bridge_funcs = {
.attach = zynqmp_dp_bridge_attach,
.detach = zynqmp_dp_bridge_detach,
@@ -1580,6 +1624,7 @@ static const struct drm_bridge_funcs zynqmp_dp_bridge_funcs = {
.atomic_check = zynqmp_dp_bridge_atomic_check,
.detect = zynqmp_dp_bridge_detect,
.edid_read = zynqmp_dp_bridge_edid_read,
+ .atomic_get_input_bus_fmts = zynqmp_dp_bridge_get_input_bus_fmts,
};
/* -----------------------------------------------------------------------------
@@ -1714,6 +1759,10 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
goto err_free;
}
+ ret = zynqmp_dp_reset(dp, true);
+ if (ret < 0)
+ goto err_free;
+
ret = zynqmp_dp_reset(dp, false);
if (ret < 0)
goto err_free;
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
index 88eb33acd5f0..face8d6b2a6f 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
@@ -256,12 +256,12 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev)
if (ret)
goto err_dp;
+ drm_bridge_add(dpsub->bridge);
+
if (dpsub->dma_enabled) {
ret = zynqmp_dpsub_drm_init(dpsub);
if (ret)
goto err_disp;
- } else {
- drm_bridge_add(dpsub->bridge);
}
dev_info(&pdev->dev, "ZynqMP DisplayPort Subsystem driver probed");
@@ -288,9 +288,8 @@ static void zynqmp_dpsub_remove(struct platform_device *pdev)
if (dpsub->drm)
zynqmp_dpsub_drm_cleanup(dpsub);
- else
- drm_bridge_remove(dpsub->bridge);
+ drm_bridge_remove(dpsub->bridge);
zynqmp_disp_remove(dpsub);
zynqmp_dp_remove(dpsub);
diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c
index db3bb4afbfc4..43bf416b33d5 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_kms.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c
@@ -122,7 +122,7 @@ static void zynqmp_dpsub_plane_atomic_update(struct drm_plane *plane,
/* Enable or re-enable the plane if the format has changed. */
if (format_changed)
- zynqmp_disp_layer_enable(layer, ZYNQMP_DPSUB_LAYER_NONLIVE);
+ zynqmp_disp_layer_enable(layer);
}
static const struct drm_plane_helper_funcs zynqmp_dpsub_plane_helper_funcs = {
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 783975d1384f..7c52757a89db 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -351,11 +351,6 @@ static int host1x_device_uevent(const struct device *dev,
return 0;
}
-static int host1x_dma_configure(struct device *dev)
-{
- return of_dma_configure(dev, dev->of_node, true);
-}
-
static const struct dev_pm_ops host1x_device_pm_ops = {
.suspend = pm_generic_suspend,
.resume = pm_generic_resume,
@@ -369,7 +364,6 @@ const struct bus_type host1x_bus_type = {
.name = "host1x",
.match = host1x_device_match,
.uevent = host1x_device_uevent,
- .dma_configure = host1x_dma_configure,
.pm = &host1x_device_pm_ops,
};
@@ -458,8 +452,6 @@ static int host1x_device_add(struct host1x *host1x,
device->dev.bus = &host1x_bus_type;
device->dev.parent = host1x->dev;
- of_dma_configure(&device->dev, host1x->dev->of_node, true);
-
device->dev.dma_parms = &device->dma_parms;
dma_set_max_seg_size(&device->dev, UINT_MAX);
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 89983d7d73ca..3a0aaa68ac8d 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -216,6 +216,30 @@ static const struct host1x_info host1x07_info = {
*/
static const struct host1x_sid_entry tegra234_sid_table[] = {
{
+ /* SE2 MMIO */
+ .base = 0x1658,
+ .offset = 0x90,
+ .limit = 0x90
+ },
+ {
+ /* SE4 MMIO */
+ .base = 0x1660,
+ .offset = 0x90,
+ .limit = 0x90
+ },
+ {
+ /* SE2 channel */
+ .base = 0x1738,
+ .offset = 0x90,
+ .limit = 0x90
+ },
+ {
+ /* SE4 channel */
+ .base = 0x1740,
+ .offset = 0x90,
+ .limit = 0x90
+ },
+ {
/* VIC channel */
.base = 0x17b8,
.offset = 0x30,
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 4c682c650704..08446c89eff6 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -1236,6 +1236,22 @@ config HID_WIIMOTE
To compile this driver as a module, choose M here: the
module will be called hid-wiimote.
+config HID_WINWING
+ tristate "WinWing Orion2 throttle support"
+ depends on USB_HID
+ depends on NEW_LEDS
+ depends on LEDS_CLASS
+ help
+ Support for WinWing Orion2 throttle base with the following grips:
+
+ * TGRIP-16EX
+ * TGRIP-18
+
+ This driver enables all buttons and switches on the throttle base.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hid-winwing.
+
config HID_XINMO
tristate "Xin-Mo non-fully compliant devices"
help
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 082a728eac60..ce71b53ea6c5 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -150,6 +150,7 @@ wacom-objs := wacom_wac.o wacom_sys.o
obj-$(CONFIG_HID_WACOM) += wacom.o
obj-$(CONFIG_HID_WALTOP) += hid-waltop.o
obj-$(CONFIG_HID_WIIMOTE) += hid-wiimote.o
+obj-$(CONFIG_HID_WINWING) += hid-winwing.o
obj-$(CONFIG_HID_SENSOR_HUB) += hid-sensor-hub.o
obj-$(CONFIG_HID_SENSOR_CUSTOM_SENSOR) += hid-sensor-custom.o
diff --git a/drivers/hid/amd-sfh-hid/Makefile b/drivers/hid/amd-sfh-hid/Makefile
index 0222170ab7ad..106514b54d16 100644
--- a/drivers/hid/amd-sfh-hid/Makefile
+++ b/drivers/hid/amd-sfh-hid/Makefile
@@ -13,4 +13,4 @@ amd_sfh-objs += sfh1_1/amd_sfh_init.o
amd_sfh-objs += sfh1_1/amd_sfh_interface.o
amd_sfh-objs += sfh1_1/amd_sfh_desc.o
-ccflags-y += -I $(srctree)/$(src)/
+ccflags-y += -I $(src)/
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 9e97c26c4482..0c28ca349bcd 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -333,14 +333,11 @@ static const struct dmi_system_id dmi_nodevs[] = {
static void sfh1_1_init_work(struct work_struct *work)
{
struct amd_mp2_dev *mp2 = container_of(work, struct amd_mp2_dev, work);
- struct pci_dev *pdev = mp2->pdev;
int rc;
rc = mp2->sfh1_1_ops->init(mp2);
- if (rc) {
- dev_err(&pdev->dev, "sfh1_1_init failed err %d\n", rc);
+ if (rc)
return;
- }
amd_sfh_clear_intr(mp2);
mp2->init_done = 1;
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
index 5b24d5f63701..621793d92464 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
@@ -202,7 +202,7 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
}
if (!cl_data->is_any_sensor_enabled) {
- dev_warn(dev, "Failed to discover, sensors not enabled is %d\n",
+ dev_warn(dev, "No sensor registered, sensors not enabled is %d\n",
cl_data->is_any_sensor_enabled);
rc = -EOPNOTSUPP;
goto cleanup;
@@ -227,6 +227,11 @@ static void amd_sfh_resume(struct amd_mp2_dev *mp2)
struct amd_mp2_sensor_info info;
int i, status;
+ if (!cl_data->is_any_sensor_enabled) {
+ amd_sfh_clear_intr(mp2);
+ return;
+ }
+
for (i = 0; i < cl_data->num_hid_devices; i++) {
if (cl_data->sensor_sts[i] == SENSOR_DISABLED) {
info.sensor_idx = cl_data->sensor_idx[i];
@@ -252,6 +257,11 @@ static void amd_sfh_suspend(struct amd_mp2_dev *mp2)
struct amdtp_cl_data *cl_data = mp2->cl_data;
int i, status;
+ if (!cl_data->is_any_sensor_enabled) {
+ amd_sfh_clear_intr(mp2);
+ return;
+ }
+
for (i = 0; i < cl_data->num_hid_devices; i++) {
if (cl_data->sensor_idx[i] != HPD_IDX &&
cl_data->sensor_sts[i] == SENSOR_ENABLED) {
@@ -320,7 +330,7 @@ int amd_sfh1_1_init(struct amd_mp2_dev *mp2)
memcpy_fromio(&binfo, mp2->vsbase, sizeof(struct sfh_base_info));
if (binfo.sbase.fw_info.fw_ver == 0 || binfo.sbase.s_list.sl.sensors == 0) {
- dev_dbg(dev, "failed to get sensors\n");
+ dev_dbg(dev, "No sensor registered\n");
return -EOPNOTSUPP;
}
dev_dbg(dev, "firmware version 0x%x\n", binfo.sbase.fw_info.fw_ver);
@@ -337,7 +347,8 @@ int amd_sfh1_1_init(struct amd_mp2_dev *mp2)
rc = amd_sfh1_1_hid_client_init(mp2);
if (rc) {
sfh_deinit_emp2();
- dev_err(dev, "amd_sfh1_1_hid_client_init failed\n");
+ if ((rc != -ENODEV) && (rc != -EOPNOTSUPP))
+ dev_err(dev, "amd_sfh1_1_hid_client_init failed\n");
return rc;
}
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
index 2de2668a0277..4676f060da26 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
@@ -97,7 +97,7 @@ static int amd_sfh_hpd_info(u8 *user_present)
if (!emp2 || !emp2->dev_en.is_hpd_present)
return -ENODEV;
- hpdstatus.val = readl(emp2->mmio + AMD_C2P_MSG(4));
+ hpdstatus.val = readl(emp2->mmio + amd_get_c2p_val(emp2, 4));
*user_present = hpdstatus.shpd.presence;
return 0;
diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c
index e630caf644e8..10289f44d0cc 100644
--- a/drivers/hid/bpf/hid_bpf_dispatch.c
+++ b/drivers/hid/bpf/hid_bpf_dispatch.c
@@ -143,48 +143,6 @@ u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *s
}
EXPORT_SYMBOL_GPL(call_hid_bpf_rdesc_fixup);
-/* Disables missing prototype warnings */
-__bpf_kfunc_start_defs();
-
-/**
- * hid_bpf_get_data - Get the kernel memory pointer associated with the context @ctx
- *
- * @ctx: The HID-BPF context
- * @offset: The offset within the memory
- * @rdwr_buf_size: the const size of the buffer
- *
- * @returns %NULL on error, an %__u8 memory pointer on success
- */
-__bpf_kfunc __u8 *
-hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr_buf_size)
-{
- struct hid_bpf_ctx_kern *ctx_kern;
-
- if (!ctx)
- return NULL;
-
- ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
-
- if (rdwr_buf_size + offset > ctx->allocated_size)
- return NULL;
-
- return ctx_kern->data + offset;
-}
-__bpf_kfunc_end_defs();
-
-/*
- * The following set contains all functions we agree BPF programs
- * can use.
- */
-BTF_KFUNCS_START(hid_bpf_kfunc_ids)
-BTF_ID_FLAGS(func, hid_bpf_get_data, KF_RET_NULL)
-BTF_KFUNCS_END(hid_bpf_kfunc_ids)
-
-static const struct btf_kfunc_id_set hid_bpf_kfunc_set = {
- .owner = THIS_MODULE,
- .set = &hid_bpf_kfunc_ids,
-};
-
static int device_match_id(struct device *dev, const void *id)
{
struct hid_device *hdev = to_hid_device(dev);
@@ -282,6 +240,31 @@ static int do_hid_bpf_attach_prog(struct hid_device *hdev, int prog_fd, struct b
__bpf_kfunc_start_defs();
/**
+ * hid_bpf_get_data - Get the kernel memory pointer associated with the context @ctx
+ *
+ * @ctx: The HID-BPF context
+ * @offset: The offset within the memory
+ * @rdwr_buf_size: the const size of the buffer
+ *
+ * @returns %NULL on error, an %__u8 memory pointer on success
+ */
+__bpf_kfunc __u8 *
+hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr_buf_size)
+{
+ struct hid_bpf_ctx_kern *ctx_kern;
+
+ if (!ctx)
+ return NULL;
+
+ ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
+
+ if (rdwr_buf_size + offset > ctx->allocated_size)
+ return NULL;
+
+ return ctx_kern->data + offset;
+}
+
+/**
* hid_bpf_attach_prog - Attach the given @prog_fd to the given HID device
*
* @hid_id: the system unique identifier of the HID device
@@ -393,6 +376,46 @@ hid_bpf_release_context(struct hid_bpf_ctx *ctx)
put_device(&hid->dev);
}
+static int
+__hid_bpf_hw_check_params(struct hid_bpf_ctx *ctx, __u8 *buf, size_t *buf__sz,
+ enum hid_report_type rtype)
+{
+ struct hid_report_enum *report_enum;
+ struct hid_report *report;
+ struct hid_device *hdev;
+ u32 report_len;
+
+ /* check arguments */
+ if (!ctx || !hid_bpf_ops || !buf)
+ return -EINVAL;
+
+ switch (rtype) {
+ case HID_INPUT_REPORT:
+ case HID_OUTPUT_REPORT:
+ case HID_FEATURE_REPORT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (*buf__sz < 1)
+ return -EINVAL;
+
+ hdev = (struct hid_device *)ctx->hid; /* discard const */
+
+ report_enum = hdev->report_enum + rtype;
+ report = hid_bpf_ops->hid_get_report(report_enum, buf);
+ if (!report)
+ return -EINVAL;
+
+ report_len = hid_report_len(report);
+
+ if (*buf__sz > report_len)
+ *buf__sz = report_len;
+
+ return 0;
+}
+
/**
* hid_bpf_hw_request - Communicate with a HID device
*
@@ -409,24 +432,14 @@ hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
enum hid_report_type rtype, enum hid_class_request reqtype)
{
struct hid_device *hdev;
- struct hid_report *report;
- struct hid_report_enum *report_enum;
+ size_t size = buf__sz;
u8 *dma_data;
- u32 report_len;
int ret;
/* check arguments */
- if (!ctx || !hid_bpf_ops || !buf)
- return -EINVAL;
-
- switch (rtype) {
- case HID_INPUT_REPORT:
- case HID_OUTPUT_REPORT:
- case HID_FEATURE_REPORT:
- break;
- default:
- return -EINVAL;
- }
+ ret = __hid_bpf_hw_check_params(ctx, buf, &size, rtype);
+ if (ret)
+ return ret;
switch (reqtype) {
case HID_REQ_GET_REPORT:
@@ -440,29 +453,16 @@ hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
return -EINVAL;
}
- if (buf__sz < 1)
- return -EINVAL;
-
hdev = (struct hid_device *)ctx->hid; /* discard const */
- report_enum = hdev->report_enum + rtype;
- report = hid_bpf_ops->hid_get_report(report_enum, buf);
- if (!report)
- return -EINVAL;
-
- report_len = hid_report_len(report);
-
- if (buf__sz > report_len)
- buf__sz = report_len;
-
- dma_data = kmemdup(buf, buf__sz, GFP_KERNEL);
+ dma_data = kmemdup(buf, size, GFP_KERNEL);
if (!dma_data)
return -ENOMEM;
ret = hid_bpf_ops->hid_hw_raw_request(hdev,
dma_data[0],
dma_data,
- buf__sz,
+ size,
rtype,
reqtype);
@@ -472,8 +472,90 @@ hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
kfree(dma_data);
return ret;
}
+
+/**
+ * hid_bpf_hw_output_report - Send an output report to a HID device
+ *
+ * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
+ * @buf: a %PTR_TO_MEM buffer
+ * @buf__sz: the size of the data to transfer
+ *
+ * Returns the number of bytes transferred on success, a negative error code otherwise.
+ */
+__bpf_kfunc int
+hid_bpf_hw_output_report(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz)
+{
+ struct hid_device *hdev;
+ size_t size = buf__sz;
+ u8 *dma_data;
+ int ret;
+
+ /* check arguments */
+ ret = __hid_bpf_hw_check_params(ctx, buf, &size, HID_OUTPUT_REPORT);
+ if (ret)
+ return ret;
+
+ hdev = (struct hid_device *)ctx->hid; /* discard const */
+
+ dma_data = kmemdup(buf, size, GFP_KERNEL);
+ if (!dma_data)
+ return -ENOMEM;
+
+ ret = hid_bpf_ops->hid_hw_output_report(hdev,
+ dma_data,
+ size);
+
+ kfree(dma_data);
+ return ret;
+}
+
+/**
+ * hid_bpf_input_report - Inject a HID report in the kernel from a HID device
+ *
+ * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
+ * @type: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT)
+ * @buf: a %PTR_TO_MEM buffer
+ * @buf__sz: the size of the data to transfer
+ *
+ * Returns %0 on success, a negative error code otherwise.
+ */
+__bpf_kfunc int
+hid_bpf_input_report(struct hid_bpf_ctx *ctx, enum hid_report_type type, u8 *buf,
+ const size_t buf__sz)
+{
+ struct hid_device *hdev;
+ size_t size = buf__sz;
+ int ret;
+
+ /* check arguments */
+ ret = __hid_bpf_hw_check_params(ctx, buf, &size, type);
+ if (ret)
+ return ret;
+
+ hdev = (struct hid_device *)ctx->hid; /* discard const */
+
+ return hid_bpf_ops->hid_input_report(hdev, type, buf, size, 0);
+}
__bpf_kfunc_end_defs();
+/*
+ * The following set contains all functions we agree BPF programs
+ * can use.
+ */
+BTF_KFUNCS_START(hid_bpf_kfunc_ids)
+BTF_ID_FLAGS(func, hid_bpf_get_data, KF_RET_NULL)
+BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
+BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE | KF_SLEEPABLE)
+BTF_ID_FLAGS(func, hid_bpf_hw_request, KF_SLEEPABLE)
+BTF_ID_FLAGS(func, hid_bpf_hw_output_report, KF_SLEEPABLE)
+BTF_ID_FLAGS(func, hid_bpf_input_report, KF_SLEEPABLE)
+BTF_KFUNCS_END(hid_bpf_kfunc_ids)
+
+static const struct btf_kfunc_id_set hid_bpf_kfunc_set = {
+ .owner = THIS_MODULE,
+ .set = &hid_bpf_kfunc_ids,
+};
+
/* our HID-BPF entrypoints */
BTF_SET8_START(hid_bpf_fmodret_ids)
BTF_ID_FLAGS(func, hid_bpf_device_event)
@@ -492,6 +574,8 @@ BTF_ID_FLAGS(func, hid_bpf_attach_prog)
BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE)
BTF_ID_FLAGS(func, hid_bpf_hw_request)
+BTF_ID_FLAGS(func, hid_bpf_hw_output_report)
+BTF_ID_FLAGS(func, hid_bpf_input_report)
BTF_KFUNCS_END(hid_bpf_syscall_kfunc_ids)
static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = {
diff --git a/drivers/hid/bpf/progs/FR-TEC__Raptor-Mach-2.bpf.c b/drivers/hid/bpf/progs/FR-TEC__Raptor-Mach-2.bpf.c
new file mode 100644
index 000000000000..dc26a7677d36
--- /dev/null
+++ b/drivers/hid/bpf/progs/FR-TEC__Raptor-Mach-2.bpf.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Benjamin Tissoires
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_BETOP_2185PC 0x11C0
+#define PID_RAPTOR_MACH_2 0x5606
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_BETOP_2185PC, PID_RAPTOR_MACH_2),
+);
+
+/*
+ * For reference, this is the fixed report descriptor
+ *
+ * static const __u8 fixed_rdesc[] = {
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 0
+ * 0x09, 0x04, // Usage (Joystick) 2
+ * 0xa1, 0x01, // Collection (Application) 4
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 6
+ * 0x85, 0x01, // Report ID (1) 8
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 10
+ * 0x09, 0x30, // Usage (X) 12
+ * 0x75, 0x10, // Report Size (16) 14
+ * 0x95, 0x01, // Report Count (1) 16
+ * 0x15, 0x00, // Logical Minimum (0) 18
+ * 0x26, 0xff, 0x07, // Logical Maximum (2047) 20
+ * 0x46, 0xff, 0x07, // Physical Maximum (2047) 23
+ * 0x81, 0x02, // Input (Data,Var,Abs) 26
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 28
+ * 0x09, 0x31, // Usage (Y) 30
+ * 0x75, 0x10, // Report Size (16) 32
+ * 0x95, 0x01, // Report Count (1) 34
+ * 0x15, 0x00, // Logical Minimum (0) 36
+ * 0x26, 0xff, 0x07, // Logical Maximum (2047) 38
+ * 0x46, 0xff, 0x07, // Physical Maximum (2047) 41
+ * 0x81, 0x02, // Input (Data,Var,Abs) 44
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 46
+ * 0x09, 0x33, // Usage (Rx) 48
+ * 0x75, 0x10, // Report Size (16) 50
+ * 0x95, 0x01, // Report Count (1) 52
+ * 0x15, 0x00, // Logical Minimum (0) 54
+ * 0x26, 0xff, 0x03, // Logical Maximum (1023) 56
+ * 0x46, 0xff, 0x03, // Physical Maximum (1023) 59
+ * 0x81, 0x02, // Input (Data,Var,Abs) 62
+ * 0x05, 0x00, // Usage Page (Undefined) 64
+ * 0x09, 0x00, // Usage (Undefined) 66
+ * 0x75, 0x10, // Report Size (16) 68
+ * 0x95, 0x01, // Report Count (1) 70
+ * 0x15, 0x00, // Logical Minimum (0) 72
+ * 0x26, 0xff, 0x03, // Logical Maximum (1023) 74
+ * 0x46, 0xff, 0x03, // Physical Maximum (1023) 77
+ * 0x81, 0x02, // Input (Data,Var,Abs) 80
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 82
+ * 0x09, 0x32, // Usage (Z) 84
+ * 0x75, 0x10, // Report Size (16) 86
+ * 0x95, 0x01, // Report Count (1) 88
+ * 0x15, 0x00, // Logical Minimum (0) 90
+ * 0x26, 0xff, 0x03, // Logical Maximum (1023) 92
+ * 0x46, 0xff, 0x03, // Physical Maximum (1023) 95
+ * 0x81, 0x02, // Input (Data,Var,Abs) 98
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 100
+ * 0x09, 0x35, // Usage (Rz) 102
+ * 0x75, 0x10, // Report Size (16) 104
+ * 0x95, 0x01, // Report Count (1) 106
+ * 0x15, 0x00, // Logical Minimum (0) 108
+ * 0x26, 0xff, 0x03, // Logical Maximum (1023) 110
+ * 0x46, 0xff, 0x03, // Physical Maximum (1023) 113
+ * 0x81, 0x02, // Input (Data,Var,Abs) 116
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 118
+ * 0x09, 0x34, // Usage (Ry) 120
+ * 0x75, 0x10, // Report Size (16) 122
+ * 0x95, 0x01, // Report Count (1) 124
+ * 0x15, 0x00, // Logical Minimum (0) 126
+ * 0x26, 0xff, 0x07, // Logical Maximum (2047) 128
+ * 0x46, 0xff, 0x07, // Physical Maximum (2047) 131
+ * 0x81, 0x02, // Input (Data,Var,Abs) 134
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 136
+ * 0x09, 0x36, // Usage (Slider) 138
+ * 0x75, 0x10, // Report Size (16) 140
+ * 0x95, 0x01, // Report Count (1) 142
+ * 0x15, 0x00, // Logical Minimum (0) 144
+ * 0x26, 0xff, 0x03, // Logical Maximum (1023) 146
+ * 0x46, 0xff, 0x03, // Physical Maximum (1023) 149
+ * 0x81, 0x02, // Input (Data,Var,Abs) 152
+ * 0x05, 0x09, // Usage Page (Button) 154
+ * 0x19, 0x01, // Usage Minimum (1) 156
+ * 0x2a, 0x1d, 0x00, // Usage Maximum (29) 158
+ * 0x15, 0x00, // Logical Minimum (0) 161
+ * 0x25, 0x01, // Logical Maximum (1) 163
+ * 0x75, 0x01, // Report Size (1) 165
+ * 0x96, 0x80, 0x00, // Report Count (128) 167
+ * 0x81, 0x02, // Input (Data,Var,Abs) 170
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 172
+ * 0x09, 0x39, // Usage (Hat switch) 174
+ * 0x26, 0x07, 0x00, // Logical Maximum (7) 176 // changed (was 239)
+ * 0x46, 0x68, 0x01, // Physical Maximum (360) 179
+ * 0x65, 0x14, // Unit (EnglishRotation: deg) 182
+ * 0x75, 0x10, // Report Size (16) 184
+ * 0x95, 0x01, // Report Count (1) 186
+ * 0x81, 0x42, // Input (Data,Var,Abs,Null) 188
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 190
+ * 0x09, 0x00, // Usage (Undefined) 192
+ * 0x75, 0x08, // Report Size (8) 194
+ * 0x95, 0x1d, // Report Count (29) 196
+ * 0x81, 0x01, // Input (Cnst,Arr,Abs) 198
+ * 0x15, 0x00, // Logical Minimum (0) 200
+ * 0x26, 0xef, 0x00, // Logical Maximum (239) 202
+ * 0x85, 0x58, // Report ID (88) 205
+ * 0x26, 0xff, 0x00, // Logical Maximum (255) 207
+ * 0x46, 0xff, 0x00, // Physical Maximum (255) 210
+ * 0x75, 0x08, // Report Size (8) 213
+ * 0x95, 0x3f, // Report Count (63) 215
+ * 0x09, 0x00, // Usage (Undefined) 217
+ * 0x91, 0x02, // Output (Data,Var,Abs) 219
+ * 0x85, 0x59, // Report ID (89) 221
+ * 0x75, 0x08, // Report Size (8) 223
+ * 0x95, 0x80, // Report Count (128) 225
+ * 0x09, 0x00, // Usage (Undefined) 227
+ * 0xb1, 0x02, // Feature (Data,Var,Abs) 229
+ * 0xc0, // End Collection 231
+ * };
+ */
+
+/*
+ * We need to amend the report descriptor for the following:
+ * - the joystick sends its hat_switch data between 0 and 239 but
+ * the kernel expects the logical max to stick into a signed 8 bits
+ * integer. We thus divide it by 30 to match what other joysticks are
+ * doing
+ */
+SEC("fmod_ret/hid_bpf_rdesc_fixup")
+int BPF_PROG(hid_fix_rdesc_raptor_mach_2, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ data[177] = 0x07;
+
+ return 0;
+}
+
+/*
+ * The hat_switch value at offsets 33 and 34 (16 bits) needs
+ * to be reduced to a single 8 bit signed integer. So we
+ * divide it by 30.
+ * Byte 34 is always null, so it is ignored.
+ */
+SEC("fmod_ret/hid_bpf_device_event")
+int BPF_PROG(raptor_mach_2_fix_hat_switch, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 64 /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ if (data[0] != 0x01) /* not the joystick report ID */
+ return 0;
+
+ data[33] /= 30;
+
+ return 0;
+}
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ ctx->retval = ctx->rdesc_size != 232;
+ if (ctx->retval)
+ ctx->retval = -EINVAL;
+
+ /* ensure the kernel isn't fixed already */
+ if (ctx->rdesc[177] != 0xef) /* Logical Max of 239 */
+ ctx->retval = -EINVAL;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/HP__Elite-Presenter.bpf.c b/drivers/hid/bpf/progs/HP__Elite-Presenter.bpf.c
new file mode 100644
index 000000000000..3d14bbb6f276
--- /dev/null
+++ b/drivers/hid/bpf/progs/HP__Elite-Presenter.bpf.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2023 Benjamin Tissoires
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_HP 0x03F0
+#define PID_ELITE_PRESENTER 0x464A
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_GENERIC, VID_HP, PID_ELITE_PRESENTER)
+);
+
+/*
+ * Already fixed as of commit 0db117359e47 ("HID: add quirk for 03f0:464a
+ * HP Elite Presenter Mouse") in the kernel, but this is a slightly better
+ * fix.
+ *
+ * The HP Elite Presenter Mouse HID Record Descriptor shows
+ * two mice (Report ID 0x1 and 0x2), one keypad (Report ID 0x5),
+ * two Consumer Controls (Report IDs 0x6 and 0x3).
+ * Prior to these fixes it registers one mouse, one keypad
+ * and one Consumer Control, and it was usable only as a
+ * digital laser pointer (one of the two mouses).
+ * We replace the second mouse collection with a pointer collection,
+ * allowing to use the device both as a mouse and a digital laser
+ * pointer.
+ */
+
+SEC("fmod_ret/hid_bpf_rdesc_fixup")
+int BPF_PROG(hid_fix_rdesc, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* replace application mouse by application pointer on the second collection */
+ if (data[79] == 0x02)
+ data[79] = 0x01;
+
+ return 0;
+}
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ ctx->retval = ctx->rdesc_size != 264;
+ if (ctx->retval)
+ ctx->retval = -EINVAL;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c b/drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c
new file mode 100644
index 000000000000..ff759f2276f9
--- /dev/null
+++ b/drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Benjamin Tissoires
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_HUION 0x256C
+#define PID_KAMVAS_PRO_19 0x006B
+#define NAME_KAMVAS_PRO_19 "HUION Huion Tablet_GT1902"
+
+#define TEST_PREFIX "uhid test "
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, VID_HUION, PID_KAMVAS_PRO_19),
+);
+
+bool prev_was_out_of_range;
+bool in_eraser_mode;
+
+/*
+ * We need to amend the report descriptor for the following:
+ * - the second button is reported through Secondary Tip Switch instead of Secondary Barrel Switch
+ * - the third button is reported through Invert, and we need some room to report it.
+ *
+ */
+static const __u8 fixed_rdesc[] = {
+ 0x05, 0x0d, // Usage Page (Digitizers) 0
+ 0x09, 0x02, // Usage (Pen) 2
+ 0xa1, 0x01, // Collection (Application) 4
+ 0x85, 0x0a, // Report ID (10) 6
+ 0x09, 0x20, // Usage (Stylus) 8
+ 0xa1, 0x01, // Collection (Application) 10
+ 0x09, 0x42, // Usage (Tip Switch) 12
+ 0x09, 0x44, // Usage (Barrel Switch) 14
+ 0x09, 0x5a, // Usage (Secondary Barrel Switch) 16 /* changed from Secondary Tip Switch */
+ 0x09, 0x3c, // Usage (Invert) 18
+ 0x09, 0x45, // Usage (Eraser) 20
+ 0x15, 0x00, // Logical Minimum (0) 22
+ 0x25, 0x01, // Logical Maximum (1) 24
+ 0x75, 0x01, // Report Size (1) 26
+ 0x95, 0x05, // Report Count (5) 28 /* changed (was 5) */
+ 0x81, 0x02, // Input (Data,Var,Abs) 30
+ 0x05, 0x09, // Usage Page (Button) /* inserted */
+ 0x09, 0x4a, // Usage (0x4a) /* inserted to be translated as input usage 0x149: BTN_STYLUS3 */
+ 0x95, 0x01, // Report Count (1) /* inserted */
+ 0x81, 0x02, // Input (Data,Var,Abs) /* inserted */
+ 0x05, 0x0d, // Usage Page (Digitizers) /* inserted */
+ 0x09, 0x32, // Usage (In Range) 32
+ 0x75, 0x01, // Report Size (1) 34
+ 0x95, 0x01, // Report Count (1) 36
+ 0x81, 0x02, // Input (Data,Var,Abs) 38
+ 0x81, 0x03, // Input (Cnst,Var,Abs) 40
+ 0x05, 0x01, // Usage Page (Generic Desktop) 42
+ 0x09, 0x30, // Usage (X) 44
+ 0x09, 0x31, // Usage (Y) 46
+ 0x55, 0x0d, // Unit Exponent (-3) 48
+ 0x65, 0x33, // Unit (EnglishLinear: in³) 50
+ 0x26, 0xff, 0x7f, // Logical Maximum (32767) 52
+ 0x35, 0x00, // Physical Minimum (0) 55
+ 0x46, 0x00, 0x08, // Physical Maximum (2048) 57
+ 0x75, 0x10, // Report Size (16) 60
+ 0x95, 0x02, // Report Count (2) 62
+ 0x81, 0x02, // Input (Data,Var,Abs) 64
+ 0x05, 0x0d, // Usage Page (Digitizers) 66
+ 0x09, 0x30, // Usage (Tip Pressure) 68
+ 0x26, 0xff, 0x3f, // Logical Maximum (16383) 70
+ 0x75, 0x10, // Report Size (16) 73
+ 0x95, 0x01, // Report Count (1) 75
+ 0x81, 0x02, // Input (Data,Var,Abs) 77
+ 0x09, 0x3d, // Usage (X Tilt) 79
+ 0x09, 0x3e, // Usage (Y Tilt) 81
+ 0x15, 0xa6, // Logical Minimum (-90) 83
+ 0x25, 0x5a, // Logical Maximum (90) 85
+ 0x75, 0x08, // Report Size (8) 87
+ 0x95, 0x02, // Report Count (2) 89
+ 0x81, 0x02, // Input (Data,Var,Abs) 91
+ 0xc0, // End Collection 93
+ 0xc0, // End Collection 94
+ 0x05, 0x0d, // Usage Page (Digitizers) 95
+ 0x09, 0x04, // Usage (Touch Screen) 97
+ 0xa1, 0x01, // Collection (Application) 99
+ 0x85, 0x04, // Report ID (4) 101
+ 0x09, 0x22, // Usage (Finger) 103
+ 0xa1, 0x02, // Collection (Logical) 105
+ 0x05, 0x0d, // Usage Page (Digitizers) 107
+ 0x95, 0x01, // Report Count (1) 109
+ 0x75, 0x06, // Report Size (6) 111
+ 0x09, 0x51, // Usage (Contact Id) 113
+ 0x15, 0x00, // Logical Minimum (0) 115
+ 0x25, 0x3f, // Logical Maximum (63) 117
+ 0x81, 0x02, // Input (Data,Var,Abs) 119
+ 0x09, 0x42, // Usage (Tip Switch) 121
+ 0x25, 0x01, // Logical Maximum (1) 123
+ 0x75, 0x01, // Report Size (1) 125
+ 0x95, 0x01, // Report Count (1) 127
+ 0x81, 0x02, // Input (Data,Var,Abs) 129
+ 0x75, 0x01, // Report Size (1) 131
+ 0x95, 0x01, // Report Count (1) 133
+ 0x81, 0x03, // Input (Cnst,Var,Abs) 135
+ 0x05, 0x01, // Usage Page (Generic Desktop) 137
+ 0x75, 0x10, // Report Size (16) 139
+ 0x55, 0x0e, // Unit Exponent (-2) 141
+ 0x65, 0x11, // Unit (SILinear: cm) 143
+ 0x09, 0x30, // Usage (X) 145
+ 0x26, 0xff, 0x7f, // Logical Maximum (32767) 147
+ 0x35, 0x00, // Physical Minimum (0) 150
+ 0x46, 0x15, 0x0c, // Physical Maximum (3093) 152
+ 0x81, 0x42, // Input (Data,Var,Abs,Null) 155
+ 0x09, 0x31, // Usage (Y) 157
+ 0x26, 0xff, 0x7f, // Logical Maximum (32767) 159
+ 0x46, 0xcb, 0x06, // Physical Maximum (1739) 162
+ 0x81, 0x42, // Input (Data,Var,Abs,Null) 165
+ 0x05, 0x0d, // Usage Page (Digitizers) 167
+ 0x09, 0x30, // Usage (Tip Pressure) 169
+ 0x26, 0xff, 0x1f, // Logical Maximum (8191) 171
+ 0x75, 0x10, // Report Size (16) 174
+ 0x95, 0x01, // Report Count (1) 176
+ 0x81, 0x02, // Input (Data,Var,Abs) 178
+ 0xc0, // End Collection 180
+ 0x05, 0x0d, // Usage Page (Digitizers) 181
+ 0x09, 0x22, // Usage (Finger) 183
+ 0xa1, 0x02, // Collection (Logical) 185
+ 0x05, 0x0d, // Usage Page (Digitizers) 187
+ 0x95, 0x01, // Report Count (1) 189
+ 0x75, 0x06, // Report Size (6) 191
+ 0x09, 0x51, // Usage (Contact Id) 193
+ 0x15, 0x00, // Logical Minimum (0) 195
+ 0x25, 0x3f, // Logical Maximum (63) 197
+ 0x81, 0x02, // Input (Data,Var,Abs) 199
+ 0x09, 0x42, // Usage (Tip Switch) 201
+ 0x25, 0x01, // Logical Maximum (1) 203
+ 0x75, 0x01, // Report Size (1) 205
+ 0x95, 0x01, // Report Count (1) 207
+ 0x81, 0x02, // Input (Data,Var,Abs) 209
+ 0x75, 0x01, // Report Size (1) 211
+ 0x95, 0x01, // Report Count (1) 213
+ 0x81, 0x03, // Input (Cnst,Var,Abs) 215
+ 0x05, 0x01, // Usage Page (Generic Desktop) 217
+ 0x75, 0x10, // Report Size (16) 219
+ 0x55, 0x0e, // Unit Exponent (-2) 221
+ 0x65, 0x11, // Unit (SILinear: cm) 223
+ 0x09, 0x30, // Usage (X) 225
+ 0x26, 0xff, 0x7f, // Logical Maximum (32767) 227
+ 0x35, 0x00, // Physical Minimum (0) 230
+ 0x46, 0x15, 0x0c, // Physical Maximum (3093) 232
+ 0x81, 0x42, // Input (Data,Var,Abs,Null) 235
+ 0x09, 0x31, // Usage (Y) 237
+ 0x26, 0xff, 0x7f, // Logical Maximum (32767) 239
+ 0x46, 0xcb, 0x06, // Physical Maximum (1739) 242
+ 0x81, 0x42, // Input (Data,Var,Abs,Null) 245
+ 0x05, 0x0d, // Usage Page (Digitizers) 247
+ 0x09, 0x30, // Usage (Tip Pressure) 249
+ 0x26, 0xff, 0x1f, // Logical Maximum (8191) 251
+ 0x75, 0x10, // Report Size (16) 254
+ 0x95, 0x01, // Report Count (1) 256
+ 0x81, 0x02, // Input (Data,Var,Abs) 258
+ 0xc0, // End Collection 260
+ 0x05, 0x0d, // Usage Page (Digitizers) 261
+ 0x09, 0x56, // Usage (Scan Time) 263
+ 0x55, 0x00, // Unit Exponent (0) 265
+ 0x65, 0x00, // Unit (None) 267
+ 0x27, 0xff, 0xff, 0xff, 0x7f, // Logical Maximum (2147483647) 269
+ 0x95, 0x01, // Report Count (1) 274
+ 0x75, 0x20, // Report Size (32) 276
+ 0x81, 0x02, // Input (Data,Var,Abs) 278
+ 0x09, 0x54, // Usage (Contact Count) 280
+ 0x25, 0x7f, // Logical Maximum (127) 282
+ 0x95, 0x01, // Report Count (1) 284
+ 0x75, 0x08, // Report Size (8) 286
+ 0x81, 0x02, // Input (Data,Var,Abs) 288
+ 0x75, 0x08, // Report Size (8) 290
+ 0x95, 0x08, // Report Count (8) 292
+ 0x81, 0x03, // Input (Cnst,Var,Abs) 294
+ 0x85, 0x05, // Report ID (5) 296
+ 0x09, 0x55, // Usage (Contact Max) 298
+ 0x25, 0x0a, // Logical Maximum (10) 300
+ 0x75, 0x08, // Report Size (8) 302
+ 0x95, 0x01, // Report Count (1) 304
+ 0xb1, 0x02, // Feature (Data,Var,Abs) 306
+ 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page 1) 308
+ 0x09, 0xc5, // Usage (Vendor Usage 0xc5) 311
+ 0x85, 0x06, // Report ID (6) 313
+ 0x15, 0x00, // Logical Minimum (0) 315
+ 0x26, 0xff, 0x00, // Logical Maximum (255) 317
+ 0x75, 0x08, // Report Size (8) 320
+ 0x96, 0x00, 0x01, // Report Count (256) 322
+ 0xb1, 0x02, // Feature (Data,Var,Abs) 325
+ 0xc0, // End Collection 327
+};
+
+SEC("fmod_ret/hid_bpf_rdesc_fixup")
+int BPF_PROG(hid_fix_rdesc_huion_kamvas_pro_19, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ __builtin_memcpy(data, fixed_rdesc, sizeof(fixed_rdesc));
+
+ return sizeof(fixed_rdesc);
+}
+
+/*
+ * This tablet reports the 3rd button through invert, but this conflict
+ * with the normal eraser mode.
+ * Fortunately, before entering eraser mode, (so Invert = 1),
+ * the tablet always sends an out-of-proximity event.
+ * So we can detect that single event and:
+ * - if there was none but the invert bit was toggled: this is the
+ * third button
+ * - if there was this out-of-proximity event, we are entering
+ * eraser mode, and we will until the next out-of-proximity.
+ */
+SEC("fmod_ret/hid_bpf_device_event")
+int BPF_PROG(kamvas_pro_19_fix_3rd_button, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 10 /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ if (data[0] != 0x0a) /* not the pen report ID */
+ return 0;
+
+ /* stylus is out of range */
+ if (!(data[1] & 0x40)) {
+ prev_was_out_of_range = true;
+ in_eraser_mode = false;
+ return 0;
+ }
+
+ /* going into eraser mode (Invert = 1) only happens after an
+ * out of range event
+ */
+ if (prev_was_out_of_range && (data[1] & 0x18))
+ in_eraser_mode = true;
+
+ /* eraser mode works fine */
+ if (in_eraser_mode)
+ return 0;
+
+ /* copy the Invert bit reported for the 3rd button in bit 7 */
+ if (data[1] & 0x08)
+ data[1] |= 0x20;
+
+ /* clear Invert bit now that it was copied */
+ data[1] &= 0xf7;
+
+ prev_was_out_of_range = false;
+
+ return 0;
+}
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ ctx->retval = ctx->rdesc_size != 328;
+ if (ctx->retval)
+ ctx->retval = -EINVAL;
+
+ /* ensure the kernel isn't fixed already */
+ if (ctx->rdesc[17] != 0x43) /* Secondary Tip Switch */
+ ctx->retval = -EINVAL;
+
+ struct hid_bpf_ctx *hctx = hid_bpf_allocate_context(ctx->hid);
+
+ if (!hctx) {
+ return ctx->retval = -EINVAL;
+ return 0;
+ }
+
+ const char *name = hctx->hid->name;
+
+ /* strip out TEST_PREFIX */
+ if (!__builtin_memcmp(name, TEST_PREFIX, sizeof(TEST_PREFIX) - 1))
+ name += sizeof(TEST_PREFIX) - 1;
+
+ if (__builtin_memcmp(name, NAME_KAMVAS_PRO_19, sizeof(NAME_KAMVAS_PRO_19)))
+ ctx->retval = -EINVAL;
+
+ hid_bpf_release_context(hctx);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/IOGEAR__Kaliber-MMOmentum.bpf.c b/drivers/hid/bpf/progs/IOGEAR__Kaliber-MMOmentum.bpf.c
new file mode 100644
index 000000000000..225cbefdbf0e
--- /dev/null
+++ b/drivers/hid/bpf/progs/IOGEAR__Kaliber-MMOmentum.bpf.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2023 Benjamin Tissoires
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_IOGEAR 0x258A /* VID is shared with SinoWealth and Glorious and prob others */
+#define PID_MOMENTUM 0x0027
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_IOGEAR, PID_MOMENTUM)
+);
+
+/*
+ * The IOGear Kaliber Gaming MMOmentum Pro mouse has multiple buttons (12)
+ * but only 5 are accessible out of the box because the report descriptor
+ * marks the other buttons as constants.
+ * We just fix the report descriptor to enable those missing 7 buttons.
+ */
+
+SEC("fmod_ret/hid_bpf_rdesc_fixup")
+int BPF_PROG(hid_fix_rdesc, struct hid_bpf_ctx *hctx)
+{
+ const u8 offsets[] = {84, 112, 140};
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* if not Keyboard */
+ if (data[3] != 0x06)
+ return 0;
+
+ for (int idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
+ u8 offset = offsets[idx];
+
+ /* if Input (Cnst,Var,Abs) , make it Input (Data,Var,Abs) */
+ if (data[offset] == 0x81 && data[offset + 1] == 0x03)
+ data[offset + 1] = 0x02;
+ }
+
+ return 0;
+}
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ /* only bind to the keyboard interface */
+ ctx->retval = ctx->rdesc_size != 213;
+ if (ctx->retval)
+ ctx->retval = -EINVAL;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/Makefile b/drivers/hid/bpf/progs/Makefile
new file mode 100644
index 000000000000..63ed7e02adf1
--- /dev/null
+++ b/drivers/hid/bpf/progs/Makefile
@@ -0,0 +1,91 @@
+# SPDX-License-Identifier: GPL-2.0
+OUTPUT := .output
+abs_out := $(abspath $(OUTPUT))
+
+CLANG ?= clang
+LLC ?= llc
+LLVM_STRIP ?= llvm-strip
+
+TOOLS_PATH := $(abspath ../../../../tools)
+BPFTOOL_SRC := $(TOOLS_PATH)/bpf/bpftool
+BPFTOOL_OUTPUT := $(abs_out)/bpftool
+DEFAULT_BPFTOOL := $(BPFTOOL_OUTPUT)/bootstrap/bpftool
+BPFTOOL ?= $(DEFAULT_BPFTOOL)
+
+LIBBPF_SRC := $(TOOLS_PATH)/lib/bpf
+LIBBPF_OUTPUT := $(abs_out)/libbpf
+LIBBPF_DESTDIR := $(LIBBPF_OUTPUT)
+LIBBPF_INCLUDE := $(LIBBPF_DESTDIR)/include
+BPFOBJ := $(LIBBPF_OUTPUT)/libbpf.a
+
+INCLUDES := -I$(OUTPUT) -I$(LIBBPF_INCLUDE) -I$(TOOLS_PATH)/include/uapi
+CFLAGS := -g -Wall
+
+VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
+ $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
+ ../../../../vmlinux \
+ /sys/kernel/btf/vmlinux \
+ /boot/vmlinux-$(shell uname -r)
+VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
+ifeq ($(VMLINUX_BTF),)
+$(error Cannot find a vmlinux for VMLINUX_BTF at any of "$(VMLINUX_BTF_PATHS)")
+endif
+
+ifeq ($(V),1)
+Q =
+msg =
+else
+Q = @
+msg = @printf ' %-8s %s%s\n' "$(1)" "$(notdir $(2))" "$(if $(3), $(3))";
+MAKEFLAGS += --no-print-directory
+submake_extras := feature_display=0
+endif
+
+.DELETE_ON_ERROR:
+
+.PHONY: all clean
+
+SOURCES = $(wildcard *.bpf.c)
+TARGETS = $(SOURCES:.bpf.c=.bpf.o)
+
+all: $(TARGETS)
+
+clean:
+ $(call msg,CLEAN)
+ $(Q)rm -rf $(OUTPUT) $(TARGETS)
+
+%.bpf.o: %.bpf.c vmlinux.h $(BPFOBJ) | $(OUTPUT)
+ $(call msg,BPF,$@)
+ $(Q)$(CLANG) -g -O2 --target=bpf $(INCLUDES) \
+ -c $(filter %.c,$^) -o $@ && \
+ $(LLVM_STRIP) -g $@
+
+vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) | $(INCLUDE_DIR)
+ifeq ($(VMLINUX_H),)
+ $(call msg,GEN,,$@)
+ $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
+else
+ $(call msg,CP,,$@)
+ $(Q)cp "$(VMLINUX_H)" $@
+endif
+
+$(OUTPUT) $(LIBBPF_OUTPUT) $(BPFTOOL_OUTPUT):
+ $(call msg,MKDIR,$@)
+ $(Q)mkdir -p $@
+
+$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OUTPUT)
+ $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) \
+ OUTPUT=$(abspath $(dir $@))/ prefix= \
+ DESTDIR=$(LIBBPF_DESTDIR) $(abspath $@) install_headers
+
+ifeq ($(CROSS_COMPILE),)
+$(DEFAULT_BPFTOOL): $(BPFOBJ) | $(BPFTOOL_OUTPUT)
+ $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOL_SRC) \
+ OUTPUT=$(BPFTOOL_OUTPUT)/ \
+ LIBBPF_BOOTSTRAP_OUTPUT=$(LIBBPF_OUTPUT)/ \
+ LIBBPF_BOOTSTRAP_DESTDIR=$(LIBBPF_DESTDIR)/ bootstrap
+else
+$(DEFAULT_BPFTOOL): | $(BPFTOOL_OUTPUT)
+ $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOL_SRC) \
+ OUTPUT=$(BPFTOOL_OUTPUT)/ bootstrap
+endif
diff --git a/drivers/hid/bpf/progs/Microsoft__XBox-Elite-2.bpf.c b/drivers/hid/bpf/progs/Microsoft__XBox-Elite-2.bpf.c
new file mode 100644
index 000000000000..c04abecab8ee
--- /dev/null
+++ b/drivers/hid/bpf/progs/Microsoft__XBox-Elite-2.bpf.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Benjamin Tissoires
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_MICROSOFT 0x045e
+#define PID_XBOX_ELITE_2 0x0b22
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_GENERIC, VID_MICROSOFT, PID_XBOX_ELITE_2)
+);
+
+/*
+ * When using the XBox Wireless Controller Elite 2 over Bluetooth,
+ * the device exports the paddle on the back of the device as a single
+ * bitfield value of usage "Assign Selection".
+ *
+ * The kernel doesn't process those usages properly and report KEY_UNKNOWN
+ * for it.
+ *
+ * SDL doesn't know how to interprete that KEY_UNKNOWN and thus ignores the paddles.
+ *
+ * Given that over USB the kernel uses BTN_TRIGGER_HAPPY[5-8], we
+ * can tweak the report descriptor to make the kernel interprete it properly:
+ * - we need an application collection of gamepad (so we have to close the current
+ * Consumer Control one)
+ * - we need to change the usage to be buttons from 0x15 to 0x18
+ */
+
+#define OFFSET_ASSIGN_SELECTION 211
+#define ORIGINAL_RDESC_SIZE 464
+
+const __u8 rdesc_assign_selection[] = {
+ 0x0a, 0x99, 0x00, // Usage (Media Select Security) 211
+ 0x15, 0x00, // Logical Minimum (0) 214
+ 0x26, 0xff, 0x00, // Logical Maximum (255) 216
+ 0x95, 0x01, // Report Count (1) 219
+ 0x75, 0x04, // Report Size (4) 221
+ 0x81, 0x02, // Input (Data,Var,Abs) 223
+ 0x15, 0x00, // Logical Minimum (0) 225
+ 0x25, 0x00, // Logical Maximum (0) 227
+ 0x95, 0x01, // Report Count (1) 229
+ 0x75, 0x04, // Report Size (4) 231
+ 0x81, 0x03, // Input (Cnst,Var,Abs) 233
+ 0x0a, 0x81, 0x00, // Usage (Assign Selection) 235
+ 0x15, 0x00, // Logical Minimum (0) 238
+ 0x26, 0xff, 0x00, // Logical Maximum (255) 240
+ 0x95, 0x01, // Report Count (1) 243
+ 0x75, 0x04, // Report Size (4) 245
+ 0x81, 0x02, // Input (Data,Var,Abs) 247
+};
+
+/*
+ * we replace the above report descriptor extract
+ * with the one below.
+ * To make things equal in size, we take out a larger
+ * portion than just the "Assign Selection" range, because
+ * we need to insert a new application collection to force
+ * the kernel to use BTN_TRIGGER_HAPPY[4-7].
+ */
+const __u8 fixed_rdesc_assign_selection[] = {
+ 0x0a, 0x99, 0x00, // Usage (Media Select Security) 211
+ 0x15, 0x00, // Logical Minimum (0) 214
+ 0x26, 0xff, 0x00, // Logical Maximum (255) 216
+ 0x95, 0x01, // Report Count (1) 219
+ 0x75, 0x04, // Report Size (4) 221
+ 0x81, 0x02, // Input (Data,Var,Abs) 223
+ /* 0x15, 0x00, */ // Logical Minimum (0) ignored
+ 0x25, 0x01, // Logical Maximum (1) 225
+ 0x95, 0x04, // Report Count (4) 227
+ 0x75, 0x01, // Report Size (1) 229
+ 0x81, 0x03, // Input (Cnst,Var,Abs) 231
+ 0xc0, // End Collection 233
+ 0x05, 0x01, // Usage Page (Generic Desktop) 234
+ 0x0a, 0x05, 0x00, // Usage (Game Pad) 236
+ 0xa1, 0x01, // Collection (Application) 239
+ 0x05, 0x09, // Usage Page (Button) 241
+ 0x19, 0x15, // Usage Minimum (21) 243
+ 0x29, 0x18, // Usage Maximum (24) 245
+ /* 0x15, 0x00, */ // Logical Minimum (0) ignored
+ /* 0x25, 0x01, */ // Logical Maximum (1) ignored
+ /* 0x95, 0x01, */ // Report Size (1) ignored
+ /* 0x75, 0x04, */ // Report Count (4) ignored
+ 0x81, 0x02, // Input (Data,Var,Abs) 247
+};
+
+_Static_assert(sizeof(rdesc_assign_selection) == sizeof(fixed_rdesc_assign_selection),
+ "Rdesc and fixed rdesc of different size");
+_Static_assert(sizeof(rdesc_assign_selection) + OFFSET_ASSIGN_SELECTION < ORIGINAL_RDESC_SIZE,
+ "Rdesc at given offset is too big");
+
+SEC("fmod_ret/hid_bpf_rdesc_fixup")
+int BPF_PROG(hid_fix_rdesc, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* Check that the device is compatible */
+ if (__builtin_memcmp(data + OFFSET_ASSIGN_SELECTION,
+ rdesc_assign_selection,
+ sizeof(rdesc_assign_selection)))
+ return 0;
+
+ __builtin_memcpy(data + OFFSET_ASSIGN_SELECTION,
+ fixed_rdesc_assign_selection,
+ sizeof(fixed_rdesc_assign_selection));
+
+ return 0;
+}
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ /* only bind to the keyboard interface */
+ ctx->retval = ctx->rdesc_size != ORIGINAL_RDESC_SIZE;
+ if (ctx->retval)
+ ctx->retval = -EINVAL;
+
+ if (__builtin_memcmp(ctx->rdesc + OFFSET_ASSIGN_SELECTION,
+ rdesc_assign_selection,
+ sizeof(rdesc_assign_selection)))
+ ctx->retval = -EINVAL;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/README b/drivers/hid/bpf/progs/README
new file mode 100644
index 000000000000..20b0928f385b
--- /dev/null
+++ b/drivers/hid/bpf/progs/README
@@ -0,0 +1,102 @@
+# HID-BPF programs
+
+This directory contains various fixes for devices. They add new features or
+fix some behaviors without being entirely mandatory. It is better to load them
+when you have such a device, but they should not be a requirement for a device
+to be working during the boot stage.
+
+The .bpf.c files provided here are not automatically compiled in the kernel.
+They should be loaded in the kernel by `udev-hid-bpf`:
+
+https://gitlab.freedesktop.org/libevdev/udev-hid-bpf
+
+The main reasons for these fixes to be here is to have a central place to
+"upstream" them, but also this way we can test them thanks to the HID
+selftests.
+
+Once a .bpf.c file is accepted here, it is duplicated in `udev-hid-bpf`
+in the `src/bpf/stable` directory, and distributions are encouraged to
+only ship those bpf objects. So adding a file here should eventually
+land in distributions when they update `udev-hid-bpf`
+
+## Compilation
+
+Just run `make`
+
+## Installation
+
+### Automated way
+
+Just run `sudo udev-hid-bpf install ./my-awesome-fix.bpf.o`
+
+### Manual way
+
+- copy the `.bpf.o` you want in `/etc/udev-hid-bpf/`
+- create a new udev rule to automatically load it
+
+The following should do the trick (assuming udev-hid-bpf is available in
+/usr/bin):
+
+```
+$> cp xppen-ArtistPro16Gen2.bpf.o /etc/udev-hid-bpf/
+$> udev-hid-bpf inspect xppen-ArtistPro16Gen2.bpf.o
+[
+ {
+ "name": "xppen-ArtistPro16Gen2.bpf.o",
+ "devices": [
+ {
+ "bus": "0x0003",
+ "group": "0x0001",
+ "vid": "0x28BD",
+ "pid": "0x095A"
+ },
+ {
+ "bus": "0x0003",
+ "group": "0x0001",
+ "vid": "0x28BD",
+ "pid": "0x095B"
+ }
+ ],
+...
+$> cat <EOF > /etc/udev/rules.d/99-load-hid-bpf-xppen-ArtistPro16Gen2.rules
+ACTION!="add|remove", GOTO="hid_bpf_end"
+SUBSYSTEM!="hid", GOTO="hid_bpf_end"
+
+# xppen-ArtistPro16Gen2.bpf.o
+ACTION=="add",ENV{MODALIAS}=="hid:b0003g0001v000028BDp0000095A", RUN{program}+="/usr/local/bin/udev-hid-bpf add $sys$devpath /etc/udev-hid-bpf/xppen-ArtistPro16Gen2.bpf.o"
+ACTION=="remove",ENV{MODALIAS}=="hid:b0003g0001v000028BDp0000095A", RUN{program}+="/usr/local/bin/udev-hid-bpf remove $sys$devpath "
+# xppen-ArtistPro16Gen2.bpf.o
+ACTION=="add",ENV{MODALIAS}=="hid:b0003g0001v000028BDp0000095B", RUN{program}+="/usr/local/bin/udev-hid-bpf add $sys$devpath /etc/udev-hid-bpf/xppen-ArtistPro16Gen2.bpf.o"
+ACTION=="remove",ENV{MODALIAS}=="hid:b0003g0001v000028BDp0000095B", RUN{program}+="/usr/local/bin/udev-hid-bpf remove $sys$devpath "
+
+LABEL="hid_bpf_end"
+EOF
+$> udevadm control --reload
+```
+
+Then unplug and replug the device.
+
+## Checks
+
+### udev rule
+
+You can check that the udev rule is correctly working by issuing
+
+```
+$> udevadm test /sys/bus/hid/devices/0003:28BD:095B*
+...
+run: '/usr/local/bin/udev-hid-bpf add /sys/devices/virtual/misc/uhid/0003:28BD:095B.0E57 /etc/udev-hid-bpf/xppen-ArtistPro16Gen2.bpf.o'
+```
+
+### program loaded
+
+You can check that the program has been properly loaded with `bpftool`
+
+```
+$> bpftool prog
+...
+247: tracing name xppen_16_fix_eraser tag 18d389353ed2ef07 gpl
+ loaded_at 2024-03-28T16:02:28+0100 uid 0
+ xlated 120B jited 77B memlock 4096B
+ btf_id 487
+```
diff --git a/drivers/hid/bpf/progs/Wacom__ArtPen.bpf.c b/drivers/hid/bpf/progs/Wacom__ArtPen.bpf.c
new file mode 100644
index 000000000000..dc05aa48faa7
--- /dev/null
+++ b/drivers/hid/bpf/progs/Wacom__ArtPen.bpf.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Benjamin Tissoires
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_WACOM 0x056a
+#define ART_PEN_ID 0x0804
+#define PID_INTUOS_PRO_2_M 0x0357
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_WACOM, PID_INTUOS_PRO_2_M)
+);
+
+/*
+ * This filter is here for the Art Pen stylus only:
+ * - when used on some Wacom devices (see the list of attached PIDs), this pen
+ * reports pressure every other events.
+ * - to solve that, given that we know that the next event will be the same as
+ * the current one, we can emulate a smoother pressure reporting by reporting
+ * the mean of the previous value and the current one.
+ *
+ * We are effectively delaying the pressure by one event every other event, but
+ * that's less of an annoyance compared to the chunkiness of the reported data.
+ *
+ * For example, let's assume the following set of events:
+ * <Tip switch 0> <X 0> <Y 0> <Pressure 0 > <Tooltype 0x0804>
+ * <Tip switch 1> <X 1> <Y 1> <Pressure 100 > <Tooltype 0x0804>
+ * <Tip switch 1> <X 2> <Y 2> <Pressure 100 > <Tooltype 0x0804>
+ * <Tip switch 1> <X 3> <Y 3> <Pressure 200 > <Tooltype 0x0804>
+ * <Tip switch 1> <X 4> <Y 4> <Pressure 200 > <Tooltype 0x0804>
+ * <Tip switch 0> <X 5> <Y 5> <Pressure 0 > <Tooltype 0x0804>
+ *
+ * The filter will report:
+ * <Tip switch 0> <X 0> <Y 0> <Pressure 0 > <Tooltype 0x0804>
+ * <Tip switch 1> <X 1> <Y 1> <Pressure * 50*> <Tooltype 0x0804>
+ * <Tip switch 1> <X 2> <Y 2> <Pressure 100 > <Tooltype 0x0804>
+ * <Tip switch 1> <X 3> <Y 3> <Pressure *150*> <Tooltype 0x0804>
+ * <Tip switch 1> <X 4> <Y 4> <Pressure 200 > <Tooltype 0x0804>
+ * <Tip switch 0> <X 5> <Y 5> <Pressure 0 > <Tooltype 0x0804>
+ *
+ */
+
+struct wacom_params {
+ __u16 pid;
+ __u16 rdesc_len;
+ __u8 report_id;
+ __u8 report_len;
+ struct {
+ __u8 tip_switch;
+ __u8 pressure;
+ __u8 tool_type;
+ } offsets;
+};
+
+/*
+ * Multiple device can support the same stylus, so
+ * we need to know which device has which offsets
+ */
+static const struct wacom_params devices[] = {
+ {
+ .pid = PID_INTUOS_PRO_2_M,
+ .rdesc_len = 949,
+ .report_id = 16,
+ .report_len = 27,
+ .offsets = {
+ .tip_switch = 1,
+ .pressure = 8,
+ .tool_type = 25,
+ },
+ },
+};
+
+static struct wacom_params params = { 0 };
+
+/* HID-BPF reports a 64 bytes chunk anyway, so this ensures
+ * the verifier to know we are addressing the memory correctly
+ */
+#define PEN_REPORT_LEN 64
+
+/* only odd frames are modified */
+static bool odd;
+
+static __u16 prev_pressure;
+
+static inline void *get_bits(__u8 *data, unsigned int byte_offset)
+{
+ return data + byte_offset;
+}
+
+static inline __u16 *get_u16(__u8 *data, unsigned int offset)
+{
+ return (__u16 *)get_bits(data, offset);
+}
+
+static inline __u8 *get_u8(__u8 *data, unsigned int offset)
+{
+ return (__u8 *)get_bits(data, offset);
+}
+
+SEC("fmod_ret/hid_bpf_device_event")
+int BPF_PROG(artpen_pressure_interpolate, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, PEN_REPORT_LEN /* size */);
+ __u16 *pressure, *tool_type;
+ __u8 *tip_switch;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ if (data[0] != params.report_id ||
+ params.offsets.tip_switch >= PEN_REPORT_LEN ||
+ params.offsets.pressure >= PEN_REPORT_LEN - 1 ||
+ params.offsets.tool_type >= PEN_REPORT_LEN - 1)
+ return 0; /* invalid report or parameters */
+
+ tool_type = get_u16(data, params.offsets.tool_type);
+ if (*tool_type != ART_PEN_ID)
+ return 0;
+
+ tip_switch = get_u8(data, params.offsets.tip_switch);
+ if ((*tip_switch & 0x01) == 0) {
+ prev_pressure = 0;
+ odd = true;
+ return 0;
+ }
+
+ pressure = get_u16(data, params.offsets.pressure);
+
+ if (odd)
+ *pressure = (*pressure + prev_pressure) / 2;
+
+ prev_pressure = *pressure;
+ odd = !odd;
+
+ return 0;
+}
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ struct hid_bpf_ctx *hid_ctx;
+ __u16 pid;
+ int i;
+
+ /* get a struct hid_device to access the actual pid of the device */
+ hid_ctx = hid_bpf_allocate_context(ctx->hid);
+ if (!hid_ctx) {
+ ctx->retval = -ENODEV;
+ return -1; /* EPERM check */
+ }
+ pid = hid_ctx->hid->product;
+
+ ctx->retval = -EINVAL;
+
+ /* Match the given device with the list of known devices */
+ for (i = 0; i < ARRAY_SIZE(devices); i++) {
+ const struct wacom_params *device = &devices[i];
+
+ if (device->pid == pid && device->rdesc_len == ctx->rdesc_size) {
+ params = *device;
+ ctx->retval = 0;
+ }
+ }
+
+ hid_bpf_release_context(hid_ctx);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/XPPen__Artist24.bpf.c b/drivers/hid/bpf/progs/XPPen__Artist24.bpf.c
new file mode 100644
index 000000000000..e1be6a12bb75
--- /dev/null
+++ b/drivers/hid/bpf/progs/XPPen__Artist24.bpf.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2023 Benjamin Tissoires
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_UGEE 0x28BD /* VID is shared with SinoWealth and Glorious and prob others */
+#define PID_ARTIST_24 0x093A
+#define PID_ARTIST_24_PRO 0x092D
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_UGEE, PID_ARTIST_24),
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_UGEE, PID_ARTIST_24_PRO)
+);
+
+/*
+ * We need to amend the report descriptor for the following:
+ * - the device reports Eraser instead of using Secondary Barrel Switch
+ * - the pen doesn't have a rubber tail, so basically we are removing any
+ * eraser/invert bits
+ */
+static const __u8 fixed_rdesc[] = {
+ 0x05, 0x0d, // Usage Page (Digitizers) 0
+ 0x09, 0x02, // Usage (Pen) 2
+ 0xa1, 0x01, // Collection (Application) 4
+ 0x85, 0x07, // Report ID (7) 6
+ 0x09, 0x20, // Usage (Stylus) 8
+ 0xa1, 0x00, // Collection (Physical) 10
+ 0x09, 0x42, // Usage (Tip Switch) 12
+ 0x09, 0x44, // Usage (Barrel Switch) 14
+ 0x09, 0x5a, // Usage (Secondary Barrel Switch) 16 /* changed from 0x45 (Eraser) to 0x5a (Secondary Barrel Switch) */
+ 0x15, 0x00, // Logical Minimum (0) 18
+ 0x25, 0x01, // Logical Maximum (1) 20
+ 0x75, 0x01, // Report Size (1) 22
+ 0x95, 0x03, // Report Count (3) 24
+ 0x81, 0x02, // Input (Data,Var,Abs) 26
+ 0x95, 0x02, // Report Count (2) 28
+ 0x81, 0x03, // Input (Cnst,Var,Abs) 30
+ 0x09, 0x32, // Usage (In Range) 32
+ 0x95, 0x01, // Report Count (1) 34
+ 0x81, 0x02, // Input (Data,Var,Abs) 36
+ 0x95, 0x02, // Report Count (2) 38
+ 0x81, 0x03, // Input (Cnst,Var,Abs) 40
+ 0x75, 0x10, // Report Size (16) 42
+ 0x95, 0x01, // Report Count (1) 44
+ 0x35, 0x00, // Physical Minimum (0) 46
+ 0xa4, // Push 48
+ 0x05, 0x01, // Usage Page (Generic Desktop) 49
+ 0x09, 0x30, // Usage (X) 51
+ 0x65, 0x13, // Unit (EnglishLinear: in) 53
+ 0x55, 0x0d, // Unit Exponent (-3) 55
+ 0x46, 0xf0, 0x50, // Physical Maximum (20720) 57
+ 0x26, 0xff, 0x7f, // Logical Maximum (32767) 60
+ 0x81, 0x02, // Input (Data,Var,Abs) 63
+ 0x09, 0x31, // Usage (Y) 65
+ 0x46, 0x91, 0x2d, // Physical Maximum (11665) 67
+ 0x26, 0xff, 0x7f, // Logical Maximum (32767) 70
+ 0x81, 0x02, // Input (Data,Var,Abs) 73
+ 0xb4, // Pop 75
+ 0x09, 0x30, // Usage (Tip Pressure) 76
+ 0x45, 0x00, // Physical Maximum (0) 78
+ 0x26, 0xff, 0x1f, // Logical Maximum (8191) 80
+ 0x81, 0x42, // Input (Data,Var,Abs,Null) 83
+ 0x09, 0x3d, // Usage (X Tilt) 85
+ 0x15, 0x81, // Logical Minimum (-127) 87
+ 0x25, 0x7f, // Logical Maximum (127) 89
+ 0x75, 0x08, // Report Size (8) 91
+ 0x95, 0x01, // Report Count (1) 93
+ 0x81, 0x02, // Input (Data,Var,Abs) 95
+ 0x09, 0x3e, // Usage (Y Tilt) 97
+ 0x15, 0x81, // Logical Minimum (-127) 99
+ 0x25, 0x7f, // Logical Maximum (127) 101
+ 0x81, 0x02, // Input (Data,Var,Abs) 103
+ 0xc0, // End Collection 105
+ 0xc0, // End Collection 106
+};
+
+#define BIT(n) (1UL << n)
+
+#define TIP_SWITCH BIT(0)
+#define BARREL_SWITCH BIT(1)
+#define ERASER BIT(2)
+/* padding BIT(3) */
+/* padding BIT(4) */
+#define IN_RANGE BIT(5)
+/* padding BIT(6) */
+/* padding BIT(7) */
+
+#define U16(index) (data[index] | (data[index + 1] << 8))
+
+SEC("fmod_ret/hid_bpf_rdesc_fixup")
+int BPF_PROG(hid_fix_rdesc_xppen_artist24, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ __builtin_memcpy(data, fixed_rdesc, sizeof(fixed_rdesc));
+
+ return sizeof(fixed_rdesc);
+}
+
+static __u8 prev_state = 0;
+
+/*
+ * There are a few cases where the device is sending wrong event
+ * sequences, all related to the second button (the pen doesn't
+ * have an eraser switch on the tail end):
+ *
+ * whenever the second button gets pressed or released, an
+ * out-of-proximity event is generated and then the firmware
+ * compensate for the missing state (and the firmware uses
+ * eraser for that button):
+ *
+ * - if the pen is in range, an extra out-of-range is sent
+ * when the second button is pressed/released:
+ * // Pen is in range
+ * E: InRange
+ *
+ * // Second button is pressed
+ * E:
+ * E: Eraser InRange
+ *
+ * // Second button is released
+ * E:
+ * E: InRange
+ *
+ * This case is ignored by this filter, it's "valid"
+ * and userspace knows how to deal with it, there are just
+ * a few out-of-prox events generated, but the user doesn´t
+ * see them.
+ *
+ * - if the pen is in contact, 2 extra events are added when
+ * the second button is pressed/released: an out of range
+ * and an in range:
+ *
+ * // Pen is in contact
+ * E: TipSwitch InRange
+ *
+ * // Second button is pressed
+ * E: <- false release, needs to be filtered out
+ * E: Eraser InRange <- false release, needs to be filtered out
+ * E: TipSwitch Eraser InRange
+ *
+ * // Second button is released
+ * E: <- false release, needs to be filtered out
+ * E: InRange <- false release, needs to be filtered out
+ * E: TipSwitch InRange
+ *
+ */
+SEC("fmod_ret/hid_bpf_device_event")
+int BPF_PROG(xppen_24_fix_eraser, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 10 /* size */);
+ __u8 current_state, changed_state;
+ bool prev_tip;
+ __u16 tilt;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ current_state = data[1];
+
+ /* if the state is identical to previously, early return */
+ if (current_state == prev_state)
+ return 0;
+
+ prev_tip = !!(prev_state & TIP_SWITCH);
+
+ /*
+ * Illegal transition: pen is in range with the tip pressed, and
+ * it goes into out of proximity.
+ *
+ * Ideally we should hold the event, start a timer and deliver it
+ * only if the timer ends, but we are not capable of that now.
+ *
+ * And it doesn't matter because when we are in such cases, this
+ * means we are detecting a false release.
+ */
+ if ((current_state & IN_RANGE) == 0) {
+ if (prev_tip)
+ return HID_IGNORE_EVENT;
+ return 0;
+ }
+
+ /*
+ * XOR to only set the bits that have changed between
+ * previous and current state
+ */
+ changed_state = prev_state ^ current_state;
+
+ /* Store the new state for future processing */
+ prev_state = current_state;
+
+ /*
+ * We get both a tipswitch and eraser change in the same HID report:
+ * this is not an authorized transition and is unlikely to happen
+ * in real life.
+ * This is likely to be added by the firmware to emulate the
+ * eraser mode so we can skip the event.
+ */
+ if ((changed_state & (TIP_SWITCH | ERASER)) == (TIP_SWITCH | ERASER)) /* we get both a tipswitch and eraser change at the same time */
+ return HID_IGNORE_EVENT;
+
+ return 0;
+}
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ /*
+ * The device exports 3 interfaces.
+ */
+ ctx->retval = ctx->rdesc_size != 107;
+ if (ctx->retval)
+ ctx->retval = -EINVAL;
+
+ /* ensure the kernel isn't fixed already */
+ if (ctx->rdesc[17] != 0x45) /* Eraser */
+ ctx->retval = -EINVAL;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/XPPen__ArtistPro16Gen2.bpf.c b/drivers/hid/bpf/progs/XPPen__ArtistPro16Gen2.bpf.c
new file mode 100644
index 000000000000..65ef10036126
--- /dev/null
+++ b/drivers/hid/bpf/progs/XPPen__ArtistPro16Gen2.bpf.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2023 Benjamin Tissoires
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_UGEE 0x28BD /* VID is shared with SinoWealth and Glorious and prob others */
+#define PID_ARTIST_PRO14_GEN2 0x095A
+#define PID_ARTIST_PRO16_GEN2 0x095B
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_UGEE, PID_ARTIST_PRO14_GEN2),
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_UGEE, PID_ARTIST_PRO16_GEN2)
+);
+
+/*
+ * We need to amend the report descriptor for the following:
+ * - the device reports Eraser instead of using Secondary Barrel Switch
+ * - when the eraser button is pressed and the stylus is touching the tablet,
+ * the device sends Tip Switch instead of sending Eraser
+ *
+ * This descriptor uses physical dimensions of the 16" device.
+ */
+static const __u8 fixed_rdesc[] = {
+ 0x05, 0x0d, // Usage Page (Digitizers) 0
+ 0x09, 0x02, // Usage (Pen) 2
+ 0xa1, 0x01, // Collection (Application) 4
+ 0x85, 0x07, // Report ID (7) 6
+ 0x09, 0x20, // Usage (Stylus) 8
+ 0xa1, 0x00, // Collection (Physical) 10
+ 0x09, 0x42, // Usage (Tip Switch) 12
+ 0x09, 0x44, // Usage (Barrel Switch) 14
+ 0x09, 0x5a, // Usage (Secondary Barrel Switch) 16 /* changed from 0x45 (Eraser) to 0x5a (Secondary Barrel Switch) */
+ 0x09, 0x3c, // Usage (Invert) 18
+ 0x09, 0x45, // Usage (Eraser) 16 /* created over a padding bit at offset 29-33 */
+ 0x15, 0x00, // Logical Minimum (0) 20
+ 0x25, 0x01, // Logical Maximum (1) 22
+ 0x75, 0x01, // Report Size (1) 24
+ 0x95, 0x05, // Report Count (5) 26 /* changed from 4 to 5 */
+ 0x81, 0x02, // Input (Data,Var,Abs) 28
+ 0x09, 0x32, // Usage (In Range) 34
+ 0x15, 0x00, // Logical Minimum (0) 36
+ 0x25, 0x01, // Logical Maximum (1) 38
+ 0x95, 0x01, // Report Count (1) 40
+ 0x81, 0x02, // Input (Data,Var,Abs) 42
+ 0x95, 0x02, // Report Count (2) 44
+ 0x81, 0x03, // Input (Cnst,Var,Abs) 46
+ 0x75, 0x10, // Report Size (16) 48
+ 0x95, 0x01, // Report Count (1) 50
+ 0x35, 0x00, // Physical Minimum (0) 52
+ 0xa4, // Push 54
+ 0x05, 0x01, // Usage Page (Generic Desktop) 55
+ 0x09, 0x30, // Usage (X) 57
+ 0x65, 0x13, // Unit (EnglishLinear: in) 59
+ 0x55, 0x0d, // Unit Exponent (-3) 61
+ 0x46, 0xff, 0x34, // Physical Maximum (13567) 63
+ 0x26, 0xff, 0x7f, // Logical Maximum (32767) 66
+ 0x81, 0x02, // Input (Data,Var,Abs) 69
+ 0x09, 0x31, // Usage (Y) 71
+ 0x46, 0x20, 0x21, // Physical Maximum (8480) 73
+ 0x26, 0xff, 0x7f, // Logical Maximum (32767) 76
+ 0x81, 0x02, // Input (Data,Var,Abs) 79
+ 0xb4, // Pop 81
+ 0x09, 0x30, // Usage (Tip Pressure) 82
+ 0x45, 0x00, // Physical Maximum (0) 84
+ 0x26, 0xff, 0x3f, // Logical Maximum (16383) 86
+ 0x81, 0x42, // Input (Data,Var,Abs,Null) 89
+ 0x09, 0x3d, // Usage (X Tilt) 91
+ 0x15, 0x81, // Logical Minimum (-127) 93
+ 0x25, 0x7f, // Logical Maximum (127) 95
+ 0x75, 0x08, // Report Size (8) 97
+ 0x95, 0x01, // Report Count (1) 99
+ 0x81, 0x02, // Input (Data,Var,Abs) 101
+ 0x09, 0x3e, // Usage (Y Tilt) 103
+ 0x15, 0x81, // Logical Minimum (-127) 105
+ 0x25, 0x7f, // Logical Maximum (127) 107
+ 0x81, 0x02, // Input (Data,Var,Abs) 109
+ 0xc0, // End Collection 111
+ 0xc0, // End Collection 112
+};
+
+SEC("fmod_ret/hid_bpf_rdesc_fixup")
+int BPF_PROG(hid_fix_rdesc_xppen_artistpro16gen2, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ __builtin_memcpy(data, fixed_rdesc, sizeof(fixed_rdesc));
+
+ /* Fix the Physical maximum values for different sizes of the device
+ * The 14" screen device descriptor size is 11.874" x 7.421"
+ */
+ if (hctx->hid->product == PID_ARTIST_PRO14_GEN2) {
+ data[63] = 0x2e;
+ data[62] = 0x62;
+ data[73] = 0x1c;
+ data[72] = 0xfd;
+ }
+
+ return sizeof(fixed_rdesc);
+}
+
+SEC("fmod_ret/hid_bpf_device_event")
+int BPF_PROG(xppen_16_fix_eraser, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 10 /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ if ((data[1] & 0x29) != 0x29) /* tip switch=1 invert=1 inrange=1 */
+ return 0;
+
+ /* xor bits 0,3 and 4: convert Tip Switch + Invert into Eraser only */
+ data[1] ^= 0x19;
+
+ return 0;
+}
+
+/*
+ * Static coordinate offset table based on positive only angles
+ * Two tables are needed, because the logical coordinates are scaled
+ *
+ * The table can be generated by Python like this:
+ * >>> full_scale = 11.874 # the display width/height in inches
+ * >>> tip_height = 0.055677699 # the center of the pen coil distance from screen in inch (empirical)
+ * >>> h = tip_height * (32767 / full_scale) # height of the coil in logical coordinates
+ * >>> [round(h*math.sin(math.radians(d))) for d in range(0, 128)]
+ * [0, 13, 26, ....]
+ */
+
+/* 14" inch screen 11.874" x 7.421" */
+static const __u16 angle_offsets_horizontal_14[128] = {
+ 0, 3, 5, 8, 11, 13, 16, 19, 21, 24, 27, 29, 32, 35, 37, 40, 42, 45, 47, 50, 53,
+ 55, 58, 60, 62, 65, 67, 70, 72, 74, 77, 79, 81, 84, 86, 88, 90, 92, 95, 97, 99,
+ 101, 103, 105, 107, 109, 111, 112, 114, 116, 118, 119, 121, 123, 124, 126, 127,
+ 129, 130, 132, 133, 134, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146,
+ 147, 148, 148, 149, 150, 150, 151, 151, 152, 152, 153, 153, 153, 153, 153, 154,
+ 154, 154, 154, 154, 153, 153, 153, 153, 153, 152, 152, 151, 151, 150, 150, 149,
+ 148, 148, 147, 146, 145, 144, 143, 142, 141, 140, 139, 138, 137, 136, 134, 133,
+ 132, 130, 129, 127, 126, 124, 123
+};
+static const __u16 angle_offsets_vertical_14[128] = {
+ 0, 4, 9, 13, 17, 21, 26, 30, 34, 38, 43, 47, 51, 55, 59, 64, 68, 72, 76, 80, 84,
+ 88, 92, 96, 100, 104, 108, 112, 115, 119, 123, 127, 130, 134, 137, 141, 145, 148,
+ 151, 155, 158, 161, 165, 168, 171, 174, 177, 180, 183, 186, 188, 191, 194, 196,
+ 199, 201, 204, 206, 208, 211, 213, 215, 217, 219, 221, 223, 225, 226, 228, 230,
+ 231, 232, 234, 235, 236, 237, 239, 240, 240, 241, 242, 243, 243, 244, 244, 245,
+ 245, 246, 246, 246, 246, 246, 246, 246, 245, 245, 244, 244, 243, 243, 242, 241,
+ 240, 240, 239, 237, 236, 235, 234, 232, 231, 230, 228, 226, 225, 223, 221, 219,
+ 217, 215, 213, 211, 208, 206, 204, 201, 199, 196
+};
+
+/* 16" inch screen 13.567" x 8.480" */
+static const __u16 angle_offsets_horizontal_16[128] = {
+ 0, 2, 5, 7, 9, 12, 14, 16, 19, 21, 23, 26, 28, 30, 33, 35, 37, 39, 42, 44, 46, 48,
+ 50, 53, 55, 57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 86, 88, 90,
+ 92, 93, 95, 97, 98, 100, 101, 103, 105, 106, 107, 109, 110, 111, 113, 114, 115,
+ 116, 118, 119, 120, 121, 122, 123, 124, 125, 126, 126, 127, 128, 129, 129, 130,
+ 130, 131, 132, 132, 132, 133, 133, 133, 134, 134, 134, 134, 134, 134, 134, 134,
+ 134, 134, 134, 134, 134, 133, 133, 133, 132, 132, 132, 131, 130, 130, 129, 129,
+ 128, 127, 126, 126, 125, 124, 123, 122, 121, 120, 119, 118, 116, 115, 114, 113,
+ 111, 110, 109, 107
+};
+static const __u16 angle_offsets_vertical_16[128] = {
+ 0, 4, 8, 11, 15, 19, 22, 26, 30, 34, 37, 41, 45, 48, 52, 56, 59, 63, 66, 70, 74,
+ 77, 81, 84, 88, 91, 94, 98, 101, 104, 108, 111, 114, 117, 120, 123, 126, 129, 132,
+ 135, 138, 141, 144, 147, 149, 152, 155, 157, 160, 162, 165, 167, 170, 172, 174,
+ 176, 178, 180, 182, 184, 186, 188, 190, 192, 193, 195, 197, 198, 199, 201, 202,
+ 203, 205, 206, 207, 208, 209, 210, 210, 211, 212, 212, 213, 214, 214, 214, 215,
+ 215, 215, 215, 215, 215, 215, 215, 215, 214, 214, 214, 213, 212, 212, 211, 210,
+ 210, 209, 208, 207, 206, 205, 203, 202, 201, 199, 198, 197, 195, 193, 192, 190,
+ 188, 186, 184, 182, 180, 178, 176, 174, 172
+};
+
+static void compensate_coordinates_by_tilt(__u8 *data, const __u8 idx,
+ const __s8 tilt, const __u16 (*compensation_table)[128])
+{
+ __u16 coords = data[idx+1];
+
+ coords <<= 8;
+ coords += data[idx];
+
+ __u8 direction = tilt > 0 ? 0 : 1; /* Positive tilt means we need to subtract the compensation (vs. negative angle where we need to add) */
+ __u8 angle = tilt > 0 ? tilt : -tilt;
+
+ if (angle > 127)
+ return;
+
+ __u16 compensation = (*compensation_table)[angle];
+
+ if (direction == 0) {
+ coords = (coords > compensation) ? coords - compensation : 0;
+ } else {
+ const __u16 logical_maximum = 32767;
+ __u16 max = logical_maximum - compensation;
+
+ coords = (coords < max) ? coords + compensation : logical_maximum;
+ }
+
+ data[idx] = coords & 0xff;
+ data[idx+1] = coords >> 8;
+}
+
+SEC("fmod_ret/hid_bpf_device_event")
+int BPF_PROG(xppen_16_fix_angle_offset, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 10 /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /*
+ * Compensate X and Y offset caused by tilt.
+ *
+ * The magnetic center moves when the pen is tilted, because the coil
+ * is not touching the screen.
+ *
+ * a (tilt angle)
+ * | /... h (coil distance from tip)
+ * | /
+ * |/______
+ * |x (position offset)
+ *
+ * x = sin a * h
+ *
+ * Subtract the offset from the coordinates. Use the precomputed table!
+ *
+ * bytes 0 - report id
+ * 1 - buttons
+ * 2-3 - X coords (logical)
+ * 4-5 - Y coords
+ * 6-7 - pressure (ignore)
+ * 8 - tilt X
+ * 9 - tilt Y
+ */
+
+ __s8 tilt_x = (__s8) data[8];
+ __s8 tilt_y = (__s8) data[9];
+
+ if (hctx->hid->product == PID_ARTIST_PRO14_GEN2) {
+ compensate_coordinates_by_tilt(data, 2, tilt_x, &angle_offsets_horizontal_14);
+ compensate_coordinates_by_tilt(data, 4, tilt_y, &angle_offsets_vertical_14);
+ } else if (hctx->hid->product == PID_ARTIST_PRO16_GEN2) {
+ compensate_coordinates_by_tilt(data, 2, tilt_x, &angle_offsets_horizontal_16);
+ compensate_coordinates_by_tilt(data, 4, tilt_y, &angle_offsets_vertical_16);
+ }
+
+ return 0;
+}
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ /*
+ * The device exports 3 interfaces.
+ */
+ ctx->retval = ctx->rdesc_size != 113;
+ if (ctx->retval)
+ ctx->retval = -EINVAL;
+
+ /* ensure the kernel isn't fixed already */
+ if (ctx->rdesc[17] != 0x45) /* Eraser */
+ ctx->retval = -EINVAL;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/hid_bpf.h b/drivers/hid/bpf/progs/hid_bpf.h
new file mode 100644
index 000000000000..7ee371cac2e1
--- /dev/null
+++ b/drivers/hid/bpf/progs/hid_bpf.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2022 Benjamin Tissoires
+ */
+
+#ifndef ____HID_BPF__H
+#define ____HID_BPF__H
+
+struct hid_bpf_probe_args {
+ unsigned int hid;
+ unsigned int rdesc_size;
+ unsigned char rdesc[4096];
+ int retval;
+};
+
+#endif /* ____HID_BPF__H */
diff --git a/drivers/hid/bpf/progs/hid_bpf_helpers.h b/drivers/hid/bpf/progs/hid_bpf_helpers.h
new file mode 100644
index 000000000000..8f226f6e886b
--- /dev/null
+++ b/drivers/hid/bpf/progs/hid_bpf_helpers.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2022 Benjamin Tissoires
+ */
+
+#ifndef __HID_BPF_HELPERS_H
+#define __HID_BPF_HELPERS_H
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <linux/errno.h>
+
+extern __u8 *hid_bpf_get_data(struct hid_bpf_ctx *ctx,
+ unsigned int offset,
+ const size_t __sz) __ksym;
+extern struct hid_bpf_ctx *hid_bpf_allocate_context(unsigned int hid_id) __ksym;
+extern void hid_bpf_release_context(struct hid_bpf_ctx *ctx) __ksym;
+extern int hid_bpf_hw_request(struct hid_bpf_ctx *ctx,
+ __u8 *data,
+ size_t buf__sz,
+ enum hid_report_type type,
+ enum hid_class_request reqtype) __ksym;
+
+#define HID_MAX_DESCRIPTOR_SIZE 4096
+#define HID_IGNORE_EVENT -1
+
+/* extracted from <linux/input.h> */
+#define BUS_ANY 0x00
+#define BUS_PCI 0x01
+#define BUS_ISAPNP 0x02
+#define BUS_USB 0x03
+#define BUS_HIL 0x04
+#define BUS_BLUETOOTH 0x05
+#define BUS_VIRTUAL 0x06
+#define BUS_ISA 0x10
+#define BUS_I8042 0x11
+#define BUS_XTKBD 0x12
+#define BUS_RS232 0x13
+#define BUS_GAMEPORT 0x14
+#define BUS_PARPORT 0x15
+#define BUS_AMIGA 0x16
+#define BUS_ADB 0x17
+#define BUS_I2C 0x18
+#define BUS_HOST 0x19
+#define BUS_GSC 0x1A
+#define BUS_ATARI 0x1B
+#define BUS_SPI 0x1C
+#define BUS_RMI 0x1D
+#define BUS_CEC 0x1E
+#define BUS_INTEL_ISHTP 0x1F
+#define BUS_AMD_SFH 0x20
+
+/* extracted from <linux/hid.h> */
+#define HID_GROUP_ANY 0x0000
+#define HID_GROUP_GENERIC 0x0001
+#define HID_GROUP_MULTITOUCH 0x0002
+#define HID_GROUP_SENSOR_HUB 0x0003
+#define HID_GROUP_MULTITOUCH_WIN_8 0x0004
+#define HID_GROUP_RMI 0x0100
+#define HID_GROUP_WACOM 0x0101
+#define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102
+#define HID_GROUP_STEAM 0x0103
+#define HID_GROUP_LOGITECH_27MHZ_DEVICE 0x0104
+#define HID_GROUP_VIVALDI 0x0105
+
+/* include/linux/mod_devicetable.h defines as (~0), but that gives us negative size arrays */
+#define HID_VID_ANY 0x0000
+#define HID_PID_ANY 0x0000
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+/* Helper macro to convert (foo, __LINE__) into foo134 so we can use __LINE__ for
+ * field/variable names
+ */
+#define COMBINE1(X, Y) X ## Y
+#define COMBINE(X, Y) COMBINE1(X, Y)
+
+/* Macro magic:
+ * __uint(foo, 123) creates a int (*foo)[1234]
+ *
+ * We use that macro to declare an anonymous struct with several
+ * fields, each is the declaration of an pointer to an array of size
+ * bus/group/vid/pid. (Because it's a pointer to such an array, actual storage
+ * would be sizeof(pointer) rather than sizeof(array). Not that we ever
+ * instantiate it anyway).
+ *
+ * This is only used for BTF introspection, we can later check "what size
+ * is the bus array" in the introspection data and thus extract the bus ID
+ * again.
+ *
+ * And we use the __LINE__ to give each of our structs a unique name so the
+ * BPF program writer doesn't have to.
+ *
+ * $ bpftool btf dump file target/bpf/HP_Elite_Presenter.bpf.o
+ * shows the inspection data, start by searching for .hid_bpf_config
+ * and working backwards from that (each entry references the type_id of the
+ * content).
+ */
+
+#define HID_DEVICE(b, g, ven, prod) \
+ struct { \
+ __uint(name, 0); \
+ __uint(bus, (b)); \
+ __uint(group, (g)); \
+ __uint(vid, (ven)); \
+ __uint(pid, (prod)); \
+ } COMBINE(_entry, __LINE__)
+
+/* Macro magic below is to make HID_BPF_CONFIG() look like a function call that
+ * we can pass multiple HID_DEVICE() invocations in.
+ *
+ * For up to 16 arguments, HID_BPF_CONFIG(one, two) resolves to
+ *
+ * union {
+ * HID_DEVICE(...);
+ * HID_DEVICE(...);
+ * } _device_ids SEC(".hid_bpf_config")
+ *
+ */
+
+/* Returns the number of macro arguments, this expands
+ * NARGS(a, b, c) to NTH_ARG(a, b, c, 15, 14, 13, .... 4, 3, 2, 1).
+ * NTH_ARG always returns the 16th argument which in our case is 3.
+ *
+ * If we want more than 16 values _COUNTDOWN and _NTH_ARG both need to be
+ * updated.
+ */
+#define _NARGS(...) _NARGS1(__VA_ARGS__, _COUNTDOWN)
+#define _NARGS1(...) _NTH_ARG(__VA_ARGS__)
+
+/* Add to this if we need more than 16 args */
+#define _COUNTDOWN \
+ 15, 14, 13, 12, 11, 10, 9, 8, \
+ 7, 6, 5, 4, 3, 2, 1, 0
+
+/* Return the 16 argument passed in. See _NARGS above for usage. Note this is
+ * 1-indexed.
+ */
+#define _NTH_ARG( \
+ _1, _2, _3, _4, _5, _6, _7, _8, \
+ _9, _10, _11, _12, _13, _14, _15,\
+ N, ...) N
+
+/* Turns EXPAND(_ARG, a, b, c) into _ARG3(a, b, c) */
+#define _EXPAND(func, ...) COMBINE(func, _NARGS(__VA_ARGS__)) (__VA_ARGS__)
+
+/* And now define all the ARG macros for each number of args we want to accept */
+#define _ARG1(_1) _1;
+#define _ARG2(_1, _2) _1; _2;
+#define _ARG3(_1, _2, _3) _1; _2; _3;
+#define _ARG4(_1, _2, _3, _4) _1; _2; _3; _4;
+#define _ARG5(_1, _2, _3, _4, _5) _1; _2; _3; _4; _5;
+#define _ARG6(_1, _2, _3, _4, _5, _6) _1; _2; _3; _4; _5; _6;
+#define _ARG7(_1, _2, _3, _4, _5, _6, _7) _1; _2; _3; _4; _5; _6; _7;
+#define _ARG8(_1, _2, _3, _4, _5, _6, _7, _8) _1; _2; _3; _4; _5; _6; _7; _8;
+#define _ARG9(_1, _2, _3, _4, _5, _6, _7, _8, _9) _1; _2; _3; _4; _5; _6; _7; _8; _9;
+#define _ARG10(_1, _2, _3, _4, _5, _6, _7, _8, _9, _a) _1; _2; _3; _4; _5; _6; _7; _8; _9; _a;
+#define _ARG11(_1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b) _1; _2; _3; _4; _5; _6; _7; _8; _9; _a; _b;
+#define _ARG12(_1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c) _1; _2; _3; _4; _5; _6; _7; _8; _9; _a; _b; _c;
+#define _ARG13(_1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, _d) _1; _2; _3; _4; _5; _6; _7; _8; _9; _a; _b; _c; _d;
+#define _ARG14(_1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, _d, _e) _1; _2; _3; _4; _5; _6; _7; _8; _9; _a; _b; _c; _d; _e;
+#define _ARG15(_1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, _d, _e, _f) _1; _2; _3; _4; _5; _6; _7; _8; _9; _a; _b; _c; _d; _e; _f;
+
+
+#define HID_BPF_CONFIG(...) union { \
+ _EXPAND(_ARG, __VA_ARGS__) \
+} _device_ids SEC(".hid_bpf_config")
+
+#endif /* __HID_BPF_HELPERS_H */
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 78cdfb8b9a7a..02de2bf4f790 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -335,36 +335,20 @@ static int asus_raw_event(struct hid_device *hdev,
if (drvdata->quirks & QUIRK_MEDION_E1239T)
return asus_e1239t_event(drvdata, data, size);
- if (drvdata->quirks & QUIRK_USE_KBD_BACKLIGHT) {
+ /*
+ * Skip these report ID, the device emits a continuous stream associated
+ * with the AURA mode it is in which looks like an 'echo'.
+ */
+ if (report->id == FEATURE_KBD_LED_REPORT_ID1 || report->id == FEATURE_KBD_LED_REPORT_ID2)
+ return -1;
+ if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
/*
- * Skip these report ID, the device emits a continuous stream associated
- * with the AURA mode it is in which looks like an 'echo'.
+ * G713 and G733 send these codes on some keypresses, depending on
+ * the key pressed it can trigger a shutdown event if not caught.
*/
- if (report->id == FEATURE_KBD_LED_REPORT_ID1 ||
- report->id == FEATURE_KBD_LED_REPORT_ID2) {
+ if (data[0] == 0x02 && data[1] == 0x30) {
return -1;
- /* Additional report filtering */
- } else if (report->id == FEATURE_KBD_REPORT_ID) {
- /*
- * G14 and G15 send these codes on some keypresses with no
- * discernable reason for doing so. We'll filter them out to avoid
- * unmapped warning messages later.
- */
- if (data[1] == 0xea || data[1] == 0xec || data[1] == 0x02 ||
- data[1] == 0x8a || data[1] == 0x9e) {
- return -1;
- }
}
- if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
- /*
- * G713 and G733 send these codes on some keypresses, depending on
- * the key pressed it can trigger a shutdown event if not caught.
- */
- if(data[0] == 0x02 && data[1] == 0x30) {
- return -1;
- }
- }
-
}
if (drvdata->quirks & QUIRK_ROG_CLAYMORE_II_KEYBOARD) {
@@ -402,9 +386,9 @@ static int asus_kbd_set_report(struct hid_device *hdev, const u8 *buf, size_t bu
return ret;
}
-static int asus_kbd_init(struct hid_device *hdev)
+static int asus_kbd_init(struct hid_device *hdev, u8 report_id)
{
- const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x41, 0x53, 0x55, 0x53, 0x20, 0x54,
+ const u8 buf[] = { report_id, 0x41, 0x53, 0x55, 0x53, 0x20, 0x54,
0x65, 0x63, 0x68, 0x2e, 0x49, 0x6e, 0x63, 0x2e, 0x00 };
int ret;
@@ -416,9 +400,10 @@ static int asus_kbd_init(struct hid_device *hdev)
}
static int asus_kbd_get_functions(struct hid_device *hdev,
- unsigned char *kbd_func)
+ unsigned char *kbd_func,
+ u8 report_id)
{
- const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x05, 0x20, 0x31, 0x00, 0x08 };
+ const u8 buf[] = { report_id, 0x05, 0x20, 0x31, 0x00, 0x08 };
u8 *readbuf;
int ret;
@@ -447,51 +432,6 @@ static int asus_kbd_get_functions(struct hid_device *hdev,
return ret;
}
-static int rog_nkey_led_init(struct hid_device *hdev)
-{
- const u8 buf_init_start[] = { FEATURE_KBD_LED_REPORT_ID1, 0xB9 };
- u8 buf_init2[] = { FEATURE_KBD_LED_REPORT_ID1, 0x41, 0x53, 0x55, 0x53, 0x20,
- 0x54, 0x65, 0x63, 0x68, 0x2e, 0x49, 0x6e, 0x63, 0x2e, 0x00 };
- u8 buf_init3[] = { FEATURE_KBD_LED_REPORT_ID1,
- 0x05, 0x20, 0x31, 0x00, 0x08 };
- int ret;
-
- hid_info(hdev, "Asus initialise N-KEY Device");
- /* The first message is an init start */
- ret = asus_kbd_set_report(hdev, buf_init_start, sizeof(buf_init_start));
- if (ret < 0) {
- hid_warn(hdev, "Asus failed to send init start command: %d\n", ret);
- return ret;
- }
- /* Followed by a string */
- ret = asus_kbd_set_report(hdev, buf_init2, sizeof(buf_init2));
- if (ret < 0) {
- hid_warn(hdev, "Asus failed to send init command 1.0: %d\n", ret);
- return ret;
- }
- /* Followed by a string */
- ret = asus_kbd_set_report(hdev, buf_init3, sizeof(buf_init3));
- if (ret < 0) {
- hid_warn(hdev, "Asus failed to send init command 1.1: %d\n", ret);
- return ret;
- }
-
- /* begin second report ID with same data */
- buf_init2[0] = FEATURE_KBD_LED_REPORT_ID2;
- buf_init3[0] = FEATURE_KBD_LED_REPORT_ID2;
-
- ret = asus_kbd_set_report(hdev, buf_init2, sizeof(buf_init2));
- if (ret < 0) {
- hid_warn(hdev, "Asus failed to send init command 2.0: %d\n", ret);
- return ret;
- }
- ret = asus_kbd_set_report(hdev, buf_init3, sizeof(buf_init3));
- if (ret < 0)
- hid_warn(hdev, "Asus failed to send init command 2.1: %d\n", ret);
-
- return ret;
-}
-
static void asus_schedule_work(struct asus_kbd_leds *led)
{
unsigned long flags;
@@ -574,17 +514,27 @@ static int asus_kbd_register_leds(struct hid_device *hdev)
int ret;
if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
- ret = rog_nkey_led_init(hdev);
+ /* Initialize keyboard */
+ ret = asus_kbd_init(hdev, FEATURE_KBD_REPORT_ID);
+ if (ret < 0)
+ return ret;
+
+ /* The LED endpoint is initialised in two HID */
+ ret = asus_kbd_init(hdev, FEATURE_KBD_LED_REPORT_ID1);
+ if (ret < 0)
+ return ret;
+
+ ret = asus_kbd_init(hdev, FEATURE_KBD_LED_REPORT_ID2);
if (ret < 0)
return ret;
} else {
/* Initialize keyboard */
- ret = asus_kbd_init(hdev);
+ ret = asus_kbd_init(hdev, FEATURE_KBD_REPORT_ID);
if (ret < 0)
return ret;
/* Get keyboard functions */
- ret = asus_kbd_get_functions(hdev, &kbd_func);
+ ret = asus_kbd_get_functions(hdev, &kbd_func, FEATURE_KBD_REPORT_ID);
if (ret < 0)
return ret;
@@ -897,7 +847,10 @@ static int asus_input_mapping(struct hid_device *hdev,
case 0xb3: asus_map_key_clear(KEY_PROG3); break; /* Fn+Left next aura */
case 0x6a: asus_map_key_clear(KEY_F13); break; /* Screenpad toggle */
case 0x4b: asus_map_key_clear(KEY_F14); break; /* Arrows/Pg-Up/Dn toggle */
-
+ case 0xa5: asus_map_key_clear(KEY_F15); break; /* ROG Ally left back */
+ case 0xa6: asus_map_key_clear(KEY_F16); break; /* ROG Ally QAM button */
+ case 0xa7: asus_map_key_clear(KEY_F17); break; /* ROG Ally ROG long-press */
+ case 0xa8: asus_map_key_clear(KEY_F18); break; /* ROG Ally ROG long-press-release */
default:
/* ASUS lazily declares 256 usages, ignore the rest,
@@ -1250,6 +1203,19 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc[205] = 0x01;
}
+ /* match many more n-key devices */
+ if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
+ for (int i = 0; i < *rsize + 1; i++) {
+ /* offset to the count from 0x5a report part always 14 */
+ if (rdesc[i] == 0x85 && rdesc[i + 1] == 0x5a &&
+ rdesc[i + 14] == 0x95 && rdesc[i + 15] == 0x05) {
+ hid_info(hdev, "Fixing up Asus N-Key report descriptor\n");
+ rdesc[i + 15] = 0x01;
+ break;
+ }
+ }
+ }
+
return rdesc;
}
@@ -1277,6 +1243,12 @@ static const struct hid_device_id asus_devices[] = {
USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD3),
QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_ROG_Z13_LIGHTBAR),
+ QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY),
+ QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_ROG_CLAYMORE_II_KEYBOARD),
QUIRK_ROG_CLAYMORE_II_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
@@ -1319,4 +1291,4 @@ static struct hid_driver asus_driver = {
};
module_hid_driver(asus_driver);
-MODULE_LICENSE("GPL"); \ No newline at end of file
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index de7a477d6665..b1fa0378e8f4 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2974,6 +2974,8 @@ EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
static struct hid_bpf_ops hid_ops = {
.hid_get_report = hid_get_report,
.hid_hw_raw_request = hid_hw_raw_request,
+ .hid_hw_output_report = hid_hw_output_report,
+ .hid_input_report = hid_input_report,
.owner = THIS_MODULE,
.bus_type = &hid_bus_type,
};
diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c
index 8c895c820b67..702f50e9841d 100644
--- a/drivers/hid/hid-corsair.c
+++ b/drivers/hid/hid-corsair.c
@@ -298,7 +298,7 @@ static ssize_t k90_show_macro_mode(struct device *dev,
goto out;
}
- ret = snprintf(buf, PAGE_SIZE, "%s\n", macro_mode);
+ ret = sysfs_emit(buf, "%s\n", macro_mode);
out:
kfree(data);
@@ -367,7 +367,7 @@ static ssize_t k90_show_current_profile(struct device *dev,
goto out;
}
- ret = snprintf(buf, PAGE_SIZE, "%d\n", current_profile);
+ ret = sysfs_emit(buf, "%d\n", current_profile);
out:
kfree(data);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 7dd83ec74f8a..87a961cae775 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -37,437 +37,2808 @@ struct hid_usage_entry {
};
static const struct hid_usage_entry hid_usage_table[] = {
- { 0, 0, "Undefined" },
- { 1, 0, "GenericDesktop" },
- {0, 0x01, "Pointer"},
- {0, 0x02, "Mouse"},
- {0, 0x04, "Joystick"},
- {0, 0x05, "GamePad"},
- {0, 0x06, "Keyboard"},
- {0, 0x07, "Keypad"},
- {0, 0x08, "MultiAxis"},
- {0, 0x30, "X"},
- {0, 0x31, "Y"},
- {0, 0x32, "Z"},
- {0, 0x33, "Rx"},
- {0, 0x34, "Ry"},
- {0, 0x35, "Rz"},
- {0, 0x36, "Slider"},
- {0, 0x37, "Dial"},
- {0, 0x38, "Wheel"},
- {0, 0x39, "HatSwitch"},
- {0, 0x3a, "CountedBuffer"},
- {0, 0x3b, "ByteCount"},
- {0, 0x3c, "MotionWakeup"},
- {0, 0x3d, "Start"},
- {0, 0x3e, "Select"},
- {0, 0x40, "Vx"},
- {0, 0x41, "Vy"},
- {0, 0x42, "Vz"},
- {0, 0x43, "Vbrx"},
- {0, 0x44, "Vbry"},
- {0, 0x45, "Vbrz"},
- {0, 0x46, "Vno"},
- {0, 0x80, "SystemControl"},
- {0, 0x81, "SystemPowerDown"},
- {0, 0x82, "SystemSleep"},
- {0, 0x83, "SystemWakeUp"},
- {0, 0x84, "SystemContextMenu"},
- {0, 0x85, "SystemMainMenu"},
- {0, 0x86, "SystemAppMenu"},
- {0, 0x87, "SystemMenuHelp"},
- {0, 0x88, "SystemMenuExit"},
- {0, 0x89, "SystemMenuSelect"},
- {0, 0x8a, "SystemMenuRight"},
- {0, 0x8b, "SystemMenuLeft"},
- {0, 0x8c, "SystemMenuUp"},
- {0, 0x8d, "SystemMenuDown"},
- {0, 0x90, "D-PadUp"},
- {0, 0x91, "D-PadDown"},
- {0, 0x92, "D-PadRight"},
- {0, 0x93, "D-PadLeft"},
- { 2, 0, "Simulation" },
- {0, 0xb0, "Aileron"},
- {0, 0xb1, "AileronTrim"},
- {0, 0xb2, "Anti-Torque"},
- {0, 0xb3, "Autopilot"},
- {0, 0xb4, "Chaff"},
- {0, 0xb5, "Collective"},
- {0, 0xb6, "DiveBrake"},
- {0, 0xb7, "ElectronicCountermeasures"},
- {0, 0xb8, "Elevator"},
- {0, 0xb9, "ElevatorTrim"},
- {0, 0xba, "Rudder"},
- {0, 0xbb, "Throttle"},
- {0, 0xbc, "FlightCommunications"},
- {0, 0xbd, "FlareRelease"},
- {0, 0xbe, "LandingGear"},
- {0, 0xbf, "ToeBrake"},
- { 6, 0, "GenericDeviceControls" },
- {0, 0x20, "BatteryStrength" },
- {0, 0x21, "WirelessChannel" },
- {0, 0x22, "WirelessID" },
- {0, 0x23, "DiscoverWirelessControl" },
- {0, 0x24, "SecurityCodeCharacterEntered" },
- {0, 0x25, "SecurityCodeCharactedErased" },
- {0, 0x26, "SecurityCodeCleared" },
- { 7, 0, "Keyboard" },
- { 8, 0, "LED" },
- {0, 0x01, "NumLock"},
- {0, 0x02, "CapsLock"},
- {0, 0x03, "ScrollLock"},
- {0, 0x04, "Compose"},
- {0, 0x05, "Kana"},
- {0, 0x4b, "GenericIndicator"},
- { 9, 0, "Button" },
- { 10, 0, "Ordinal" },
- { 12, 0, "Consumer" },
- {0, 0x003, "ProgrammableButtons"},
- {0, 0x238, "HorizontalWheel"},
- { 13, 0, "Digitizers" },
- {0, 0x01, "Digitizer"},
- {0, 0x02, "Pen"},
- {0, 0x03, "LightPen"},
- {0, 0x04, "TouchScreen"},
- {0, 0x05, "TouchPad"},
- {0, 0x0e, "DeviceConfiguration"},
- {0, 0x20, "Stylus"},
- {0, 0x21, "Puck"},
- {0, 0x22, "Finger"},
- {0, 0x23, "DeviceSettings"},
- {0, 0x30, "TipPressure"},
- {0, 0x31, "BarrelPressure"},
- {0, 0x32, "InRange"},
- {0, 0x33, "Touch"},
- {0, 0x34, "UnTouch"},
- {0, 0x35, "Tap"},
- {0, 0x38, "Transducer Index"},
- {0, 0x39, "TabletFunctionKey"},
- {0, 0x3a, "ProgramChangeKey"},
- {0, 0x3B, "Battery Strength"},
- {0, 0x3c, "Invert"},
- {0, 0x42, "TipSwitch"},
- {0, 0x43, "SecondaryTipSwitch"},
- {0, 0x44, "BarrelSwitch"},
- {0, 0x45, "Eraser"},
- {0, 0x46, "TabletPick"},
- {0, 0x47, "Confidence"},
- {0, 0x48, "Width"},
- {0, 0x49, "Height"},
- {0, 0x51, "ContactID"},
- {0, 0x52, "InputMode"},
- {0, 0x53, "DeviceIndex"},
- {0, 0x54, "ContactCount"},
- {0, 0x55, "ContactMaximumNumber"},
- {0, 0x59, "ButtonType"},
- {0, 0x5A, "SecondaryBarrelSwitch"},
- {0, 0x5B, "TransducerSerialNumber"},
- {0, 0x5C, "Preferred Color"},
- {0, 0x5D, "Preferred Color is Locked"},
- {0, 0x5E, "Preferred Line Width"},
- {0, 0x5F, "Preferred Line Width is Locked"},
- {0, 0x6e, "TransducerSerialNumber2"},
- {0, 0x70, "Preferred Line Style"},
- {0, 0x71, "Preferred Line Style is Locked"},
- {0, 0x72, "Ink"},
- {0, 0x73, "Pencil"},
- {0, 0x74, "Highlighter"},
- {0, 0x75, "Chisel Marker"},
- {0, 0x76, "Brush"},
- {0, 0x77, "No Preference"},
- {0, 0x80, "Digitizer Diagnostic"},
- {0, 0x81, "Digitizer Error"},
- {0, 0x82, "Err Normal Status"},
- {0, 0x83, "Err Transducers Exceeded"},
- {0, 0x84, "Err Full Trans Features Unavailable"},
- {0, 0x85, "Err Charge Low"},
- {0, 0x90, "Transducer Software Info"},
- {0, 0x91, "Transducer Vendor Id"},
- {0, 0x92, "Transducer Product Id"},
- {0, 0x93, "Device Supported Protocols"},
- {0, 0x94, "Transducer Supported Protocols"},
- {0, 0x95, "No Protocol"},
- {0, 0x96, "Wacom AES Protocol"},
- {0, 0x97, "USI Protocol"},
- {0, 0x98, "Microsoft Pen Protocol"},
- {0, 0xA0, "Supported Report Rates"},
- {0, 0xA1, "Report Rate"},
- {0, 0xA2, "Transducer Connected"},
- {0, 0xA3, "Switch Disabled"},
- {0, 0xA4, "Switch Unimplemented"},
- {0, 0xA5, "Transducer Switches"},
- { 15, 0, "PhysicalInterfaceDevice" },
- {0, 0x00, "Undefined"},
- {0, 0x01, "Physical_Interface_Device"},
- {0, 0x20, "Normal"},
- {0, 0x21, "Set_Effect_Report"},
- {0, 0x22, "Effect_Block_Index"},
- {0, 0x23, "Parameter_Block_Offset"},
- {0, 0x24, "ROM_Flag"},
- {0, 0x25, "Effect_Type"},
- {0, 0x26, "ET_Constant_Force"},
- {0, 0x27, "ET_Ramp"},
- {0, 0x28, "ET_Custom_Force_Data"},
- {0, 0x30, "ET_Square"},
- {0, 0x31, "ET_Sine"},
- {0, 0x32, "ET_Triangle"},
- {0, 0x33, "ET_Sawtooth_Up"},
- {0, 0x34, "ET_Sawtooth_Down"},
- {0, 0x40, "ET_Spring"},
- {0, 0x41, "ET_Damper"},
- {0, 0x42, "ET_Inertia"},
- {0, 0x43, "ET_Friction"},
- {0, 0x50, "Duration"},
- {0, 0x51, "Sample_Period"},
- {0, 0x52, "Gain"},
- {0, 0x53, "Trigger_Button"},
- {0, 0x54, "Trigger_Repeat_Interval"},
- {0, 0x55, "Axes_Enable"},
- {0, 0x56, "Direction_Enable"},
- {0, 0x57, "Direction"},
- {0, 0x58, "Type_Specific_Block_Offset"},
- {0, 0x59, "Block_Type"},
- {0, 0x5A, "Set_Envelope_Report"},
- {0, 0x5B, "Attack_Level"},
- {0, 0x5C, "Attack_Time"},
- {0, 0x5D, "Fade_Level"},
- {0, 0x5E, "Fade_Time"},
- {0, 0x5F, "Set_Condition_Report"},
- {0, 0x60, "CP_Offset"},
- {0, 0x61, "Positive_Coefficient"},
- {0, 0x62, "Negative_Coefficient"},
- {0, 0x63, "Positive_Saturation"},
- {0, 0x64, "Negative_Saturation"},
- {0, 0x65, "Dead_Band"},
- {0, 0x66, "Download_Force_Sample"},
- {0, 0x67, "Isoch_Custom_Force_Enable"},
- {0, 0x68, "Custom_Force_Data_Report"},
- {0, 0x69, "Custom_Force_Data"},
- {0, 0x6A, "Custom_Force_Vendor_Defined_Data"},
- {0, 0x6B, "Set_Custom_Force_Report"},
- {0, 0x6C, "Custom_Force_Data_Offset"},
- {0, 0x6D, "Sample_Count"},
- {0, 0x6E, "Set_Periodic_Report"},
- {0, 0x6F, "Offset"},
- {0, 0x70, "Magnitude"},
- {0, 0x71, "Phase"},
- {0, 0x72, "Period"},
- {0, 0x73, "Set_Constant_Force_Report"},
- {0, 0x74, "Set_Ramp_Force_Report"},
- {0, 0x75, "Ramp_Start"},
- {0, 0x76, "Ramp_End"},
- {0, 0x77, "Effect_Operation_Report"},
- {0, 0x78, "Effect_Operation"},
- {0, 0x79, "Op_Effect_Start"},
- {0, 0x7A, "Op_Effect_Start_Solo"},
- {0, 0x7B, "Op_Effect_Stop"},
- {0, 0x7C, "Loop_Count"},
- {0, 0x7D, "Device_Gain_Report"},
- {0, 0x7E, "Device_Gain"},
- {0, 0x7F, "PID_Pool_Report"},
- {0, 0x80, "RAM_Pool_Size"},
- {0, 0x81, "ROM_Pool_Size"},
- {0, 0x82, "ROM_Effect_Block_Count"},
- {0, 0x83, "Simultaneous_Effects_Max"},
- {0, 0x84, "Pool_Alignment"},
- {0, 0x85, "PID_Pool_Move_Report"},
- {0, 0x86, "Move_Source"},
- {0, 0x87, "Move_Destination"},
- {0, 0x88, "Move_Length"},
- {0, 0x89, "PID_Block_Load_Report"},
- {0, 0x8B, "Block_Load_Status"},
- {0, 0x8C, "Block_Load_Success"},
- {0, 0x8D, "Block_Load_Full"},
- {0, 0x8E, "Block_Load_Error"},
- {0, 0x8F, "Block_Handle"},
- {0, 0x90, "PID_Block_Free_Report"},
- {0, 0x91, "Type_Specific_Block_Handle"},
- {0, 0x92, "PID_State_Report"},
- {0, 0x94, "Effect_Playing"},
- {0, 0x95, "PID_Device_Control_Report"},
- {0, 0x96, "PID_Device_Control"},
- {0, 0x97, "DC_Enable_Actuators"},
- {0, 0x98, "DC_Disable_Actuators"},
- {0, 0x99, "DC_Stop_All_Effects"},
- {0, 0x9A, "DC_Device_Reset"},
- {0, 0x9B, "DC_Device_Pause"},
- {0, 0x9C, "DC_Device_Continue"},
- {0, 0x9F, "Device_Paused"},
- {0, 0xA0, "Actuators_Enabled"},
- {0, 0xA4, "Safety_Switch"},
- {0, 0xA5, "Actuator_Override_Switch"},
- {0, 0xA6, "Actuator_Power"},
- {0, 0xA7, "Start_Delay"},
- {0, 0xA8, "Parameter_Block_Size"},
- {0, 0xA9, "Device_Managed_Pool"},
- {0, 0xAA, "Shared_Parameter_Blocks"},
- {0, 0xAB, "Create_New_Effect_Report"},
- {0, 0xAC, "RAM_Pool_Available"},
- { 0x20, 0, "Sensor" },
- { 0x20, 0x01, "Sensor" },
- { 0x20, 0x10, "Biometric" },
- { 0x20, 0x11, "BiometricHumanPresence" },
- { 0x20, 0x12, "BiometricHumanProximity" },
- { 0x20, 0x13, "BiometricHumanTouch" },
- { 0x20, 0x20, "Electrical" },
- { 0x20, 0x21, "ElectricalCapacitance" },
- { 0x20, 0x22, "ElectricalCurrent" },
- { 0x20, 0x23, "ElectricalPower" },
- { 0x20, 0x24, "ElectricalInductance" },
- { 0x20, 0x25, "ElectricalResistance" },
- { 0x20, 0x26, "ElectricalVoltage" },
- { 0x20, 0x27, "ElectricalPoteniometer" },
- { 0x20, 0x28, "ElectricalFrequency" },
- { 0x20, 0x29, "ElectricalPeriod" },
- { 0x20, 0x30, "Environmental" },
- { 0x20, 0x31, "EnvironmentalAtmosphericPressure" },
- { 0x20, 0x32, "EnvironmentalHumidity" },
- { 0x20, 0x33, "EnvironmentalTemperature" },
- { 0x20, 0x34, "EnvironmentalWindDirection" },
- { 0x20, 0x35, "EnvironmentalWindSpeed" },
- { 0x20, 0x40, "Light" },
- { 0x20, 0x41, "LightAmbientLight" },
- { 0x20, 0x42, "LightConsumerInfrared" },
- { 0x20, 0x50, "Location" },
- { 0x20, 0x51, "LocationBroadcast" },
- { 0x20, 0x52, "LocationDeadReckoning" },
- { 0x20, 0x53, "LocationGPS" },
- { 0x20, 0x54, "LocationLookup" },
- { 0x20, 0x55, "LocationOther" },
- { 0x20, 0x56, "LocationStatic" },
- { 0x20, 0x57, "LocationTriangulation" },
- { 0x20, 0x60, "Mechanical" },
- { 0x20, 0x61, "MechanicalBooleanSwitch" },
- { 0x20, 0x62, "MechanicalBooleanSwitchArray" },
- { 0x20, 0x63, "MechanicalMultivalueSwitch" },
- { 0x20, 0x64, "MechanicalForce" },
- { 0x20, 0x65, "MechanicalPressure" },
- { 0x20, 0x66, "MechanicalStrain" },
- { 0x20, 0x67, "MechanicalWeight" },
- { 0x20, 0x68, "MechanicalHapticVibrator" },
- { 0x20, 0x69, "MechanicalHallEffectSwitch" },
- { 0x20, 0x70, "Motion" },
- { 0x20, 0x71, "MotionAccelerometer1D" },
- { 0x20, 0x72, "MotionAccelerometer2D" },
- { 0x20, 0x73, "MotionAccelerometer3D" },
- { 0x20, 0x74, "MotionGyrometer1D" },
- { 0x20, 0x75, "MotionGyrometer2D" },
- { 0x20, 0x76, "MotionGyrometer3D" },
- { 0x20, 0x77, "MotionMotionDetector" },
- { 0x20, 0x78, "MotionSpeedometer" },
- { 0x20, 0x79, "MotionAccelerometer" },
- { 0x20, 0x7A, "MotionGyrometer" },
- { 0x20, 0x80, "Orientation" },
- { 0x20, 0x81, "OrientationCompass1D" },
- { 0x20, 0x82, "OrientationCompass2D" },
- { 0x20, 0x83, "OrientationCompass3D" },
- { 0x20, 0x84, "OrientationInclinometer1D" },
- { 0x20, 0x85, "OrientationInclinometer2D" },
- { 0x20, 0x86, "OrientationInclinometer3D" },
- { 0x20, 0x87, "OrientationDistance1D" },
- { 0x20, 0x88, "OrientationDistance2D" },
- { 0x20, 0x89, "OrientationDistance3D" },
- { 0x20, 0x8A, "OrientationDeviceOrientation" },
- { 0x20, 0x8B, "OrientationCompass" },
- { 0x20, 0x8C, "OrientationInclinometer" },
- { 0x20, 0x8D, "OrientationDistance" },
- { 0x20, 0x90, "Scanner" },
- { 0x20, 0x91, "ScannerBarcode" },
- { 0x20, 0x91, "ScannerRFID" },
- { 0x20, 0x91, "ScannerNFC" },
- { 0x20, 0xA0, "Time" },
- { 0x20, 0xA1, "TimeAlarmTimer" },
- { 0x20, 0xA2, "TimeRealTimeClock" },
- { 0x20, 0xE0, "Other" },
- { 0x20, 0xE1, "OtherCustom" },
- { 0x20, 0xE2, "OtherGeneric" },
- { 0x20, 0xE3, "OtherGenericEnumerator" },
- { 0x84, 0, "Power Device" },
- { 0x84, 0x02, "PresentStatus" },
- { 0x84, 0x03, "ChangeStatus" },
- { 0x84, 0x04, "UPS" },
- { 0x84, 0x05, "PowerSupply" },
- { 0x84, 0x10, "BatterySystem" },
- { 0x84, 0x11, "BatterySystemID" },
- { 0x84, 0x12, "Battery" },
- { 0x84, 0x13, "BatteryID" },
- { 0x84, 0x14, "Charger" },
- { 0x84, 0x15, "ChargerID" },
- { 0x84, 0x16, "PowerConverter" },
- { 0x84, 0x17, "PowerConverterID" },
- { 0x84, 0x18, "OutletSystem" },
- { 0x84, 0x19, "OutletSystemID" },
- { 0x84, 0x1a, "Input" },
- { 0x84, 0x1b, "InputID" },
- { 0x84, 0x1c, "Output" },
- { 0x84, 0x1d, "OutputID" },
- { 0x84, 0x1e, "Flow" },
- { 0x84, 0x1f, "FlowID" },
- { 0x84, 0x20, "Outlet" },
- { 0x84, 0x21, "OutletID" },
- { 0x84, 0x22, "Gang" },
- { 0x84, 0x24, "PowerSummary" },
- { 0x84, 0x25, "PowerSummaryID" },
- { 0x84, 0x30, "Voltage" },
- { 0x84, 0x31, "Current" },
- { 0x84, 0x32, "Frequency" },
- { 0x84, 0x33, "ApparentPower" },
- { 0x84, 0x35, "PercentLoad" },
- { 0x84, 0x40, "ConfigVoltage" },
- { 0x84, 0x41, "ConfigCurrent" },
- { 0x84, 0x43, "ConfigApparentPower" },
- { 0x84, 0x53, "LowVoltageTransfer" },
- { 0x84, 0x54, "HighVoltageTransfer" },
- { 0x84, 0x56, "DelayBeforeStartup" },
- { 0x84, 0x57, "DelayBeforeShutdown" },
- { 0x84, 0x58, "Test" },
- { 0x84, 0x5a, "AudibleAlarmControl" },
- { 0x84, 0x60, "Present" },
- { 0x84, 0x61, "Good" },
- { 0x84, 0x62, "InternalFailure" },
- { 0x84, 0x65, "Overload" },
- { 0x84, 0x66, "OverCharged" },
- { 0x84, 0x67, "OverTemperature" },
- { 0x84, 0x68, "ShutdownRequested" },
- { 0x84, 0x69, "ShutdownImminent" },
- { 0x84, 0x6b, "SwitchOn/Off" },
- { 0x84, 0x6c, "Switchable" },
- { 0x84, 0x6d, "Used" },
- { 0x84, 0x6e, "Boost" },
- { 0x84, 0x73, "CommunicationLost" },
- { 0x84, 0xfd, "iManufacturer" },
- { 0x84, 0xfe, "iProduct" },
- { 0x84, 0xff, "iSerialNumber" },
- { 0x85, 0, "Battery System" },
- { 0x85, 0x01, "SMBBatteryMode" },
- { 0x85, 0x02, "SMBBatteryStatus" },
- { 0x85, 0x03, "SMBAlarmWarning" },
- { 0x85, 0x04, "SMBChargerMode" },
- { 0x85, 0x05, "SMBChargerStatus" },
- { 0x85, 0x06, "SMBChargerSpecInfo" },
- { 0x85, 0x07, "SMBSelectorState" },
- { 0x85, 0x08, "SMBSelectorPresets" },
- { 0x85, 0x09, "SMBSelectorInfo" },
- { 0x85, 0x29, "RemainingCapacityLimit" },
- { 0x85, 0x2c, "CapacityMode" },
- { 0x85, 0x42, "BelowRemainingCapacityLimit" },
- { 0x85, 0x44, "Charging" },
- { 0x85, 0x45, "Discharging" },
- { 0x85, 0x4b, "NeedReplacement" },
- { 0x85, 0x65, "AbsoluteStateOfCharge" },
- { 0x85, 0x66, "RemainingCapacity" },
- { 0x85, 0x68, "RunTimeToEmpty" },
- { 0x85, 0x6a, "AverageTimeToFull" },
- { 0x85, 0x83, "DesignCapacity" },
- { 0x85, 0x85, "ManufacturerDate" },
- { 0x85, 0x89, "iDeviceChemistry" },
- { 0x85, 0x8b, "Rechargeable" },
- { 0x85, 0x8f, "iOEMInformation" },
- { 0x85, 0x8d, "CapacityGranularity1" },
- { 0x85, 0xd0, "ACPresent" },
- /* pages 0xff00 to 0xffff are vendor-specific */
- { 0xffff, 0, "Vendor-specific-FF" },
- { 0, 0, NULL }
+ { 0x00, 0, "Undefined" },
+ { 0x01, 0, "GenericDesktop" },
+ { 0x01, 0x0001, "Pointer" },
+ { 0x01, 0x0002, "Mouse" },
+ { 0x01, 0x0004, "Joystick" },
+ { 0x01, 0x0005, "Gamepad" },
+ { 0x01, 0x0006, "Keyboard" },
+ { 0x01, 0x0007, "Keypad" },
+ { 0x01, 0x0008, "MultiaxisController" },
+ { 0x01, 0x0009, "TabletPCSystemControls" },
+ { 0x01, 0x000a, "WaterCoolingDevice" },
+ { 0x01, 0x000b, "ComputerChassisDevice" },
+ { 0x01, 0x000c, "WirelessRadioControls" },
+ { 0x01, 0x000d, "PortableDeviceControl" },
+ { 0x01, 0x000e, "SystemMultiAxisController" },
+ { 0x01, 0x000f, "SpatialController" },
+ { 0x01, 0x0010, "AssistiveControl" },
+ { 0x01, 0x0011, "DeviceDock" },
+ { 0x01, 0x0012, "DockableDevice" },
+ { 0x01, 0x0013, "CallStateManagementControl" },
+ { 0x01, 0x0030, "X" },
+ { 0x01, 0x0031, "Y" },
+ { 0x01, 0x0032, "Z" },
+ { 0x01, 0x0033, "Rx" },
+ { 0x01, 0x0034, "Ry" },
+ { 0x01, 0x0035, "Rz" },
+ { 0x01, 0x0036, "Slider" },
+ { 0x01, 0x0037, "Dial" },
+ { 0x01, 0x0038, "Wheel" },
+ { 0x01, 0x0039, "HatSwitch" },
+ { 0x01, 0x003a, "CountedBuffer" },
+ { 0x01, 0x003b, "ByteCount" },
+ { 0x01, 0x003c, "MotionWakeup" },
+ { 0x01, 0x003d, "Start" },
+ { 0x01, 0x003e, "Select" },
+ { 0x01, 0x0040, "Vx" },
+ { 0x01, 0x0041, "Vy" },
+ { 0x01, 0x0042, "Vz" },
+ { 0x01, 0x0043, "Vbrx" },
+ { 0x01, 0x0044, "Vbry" },
+ { 0x01, 0x0045, "Vbrz" },
+ { 0x01, 0x0046, "Vno" },
+ { 0x01, 0x0047, "FeatureNotification" },
+ { 0x01, 0x0048, "ResolutionMultiplier" },
+ { 0x01, 0x0049, "Qx" },
+ { 0x01, 0x004a, "Qy" },
+ { 0x01, 0x004b, "Qz" },
+ { 0x01, 0x004c, "Qw" },
+ { 0x01, 0x0080, "SystemControl" },
+ { 0x01, 0x0081, "SystemPowerDown" },
+ { 0x01, 0x0082, "SystemSleep" },
+ { 0x01, 0x0083, "SystemWakeUp" },
+ { 0x01, 0x0084, "SystemContextMenu" },
+ { 0x01, 0x0085, "SystemMainMenu" },
+ { 0x01, 0x0086, "SystemAppMenu" },
+ { 0x01, 0x0087, "SystemMenuHelp" },
+ { 0x01, 0x0088, "SystemMenuExit" },
+ { 0x01, 0x0089, "SystemMenuSelect" },
+ { 0x01, 0x008a, "SystemMenuRight" },
+ { 0x01, 0x008b, "SystemMenuLeft" },
+ { 0x01, 0x008c, "SystemMenuUp" },
+ { 0x01, 0x008d, "SystemMenuDown" },
+ { 0x01, 0x008e, "SystemColdRestart" },
+ { 0x01, 0x008f, "SystemWarmRestart" },
+ { 0x01, 0x0090, "DpadUp" },
+ { 0x01, 0x0091, "DpadDown" },
+ { 0x01, 0x0092, "DpadRight" },
+ { 0x01, 0x0093, "DpadLeft" },
+ { 0x01, 0x0094, "IndexTrigger" },
+ { 0x01, 0x0095, "PalmTrigger" },
+ { 0x01, 0x0096, "Thumbstick" },
+ { 0x01, 0x0097, "SystemFunctionShift" },
+ { 0x01, 0x0098, "SystemFunctionShiftLock" },
+ { 0x01, 0x0099, "SystemFunctionShiftLockIndicator" },
+ { 0x01, 0x009a, "SystemDismissNotification" },
+ { 0x01, 0x009b, "SystemDoNotDisturb" },
+ { 0x01, 0x00a0, "SystemDock" },
+ { 0x01, 0x00a1, "SystemUndock" },
+ { 0x01, 0x00a2, "SystemSetup" },
+ { 0x01, 0x00a3, "SystemBreak" },
+ { 0x01, 0x00a4, "SystemDebuggerBreak" },
+ { 0x01, 0x00a5, "ApplicationBreak" },
+ { 0x01, 0x00a6, "ApplicationDebuggerBreak" },
+ { 0x01, 0x00a7, "SystemSpeakerMute" },
+ { 0x01, 0x00a8, "SystemHibernate" },
+ { 0x01, 0x00a9, "SystemMicrophoneMute" },
+ { 0x01, 0x00b0, "SystemDisplayInvert" },
+ { 0x01, 0x00b1, "SystemDisplayInternal" },
+ { 0x01, 0x00b2, "SystemDisplayExternal" },
+ { 0x01, 0x00b3, "SystemDisplayBoth" },
+ { 0x01, 0x00b4, "SystemDisplayDual" },
+ { 0x01, 0x00b5, "SystemDisplayToggleIntExtMode" },
+ { 0x01, 0x00b6, "SystemDisplaySwapPrimarySecondary" },
+ { 0x01, 0x00b7, "SystemDisplayToggleLCDAutoscale" },
+ { 0x01, 0x00c0, "SensorZone" },
+ { 0x01, 0x00c1, "RPM" },
+ { 0x01, 0x00c2, "CoolantLevel" },
+ { 0x01, 0x00c3, "CoolantCriticalLevel" },
+ { 0x01, 0x00c4, "CoolantPump" },
+ { 0x01, 0x00c5, "ChassisEnclosure" },
+ { 0x01, 0x00c6, "WirelessRadioButton" },
+ { 0x01, 0x00c7, "WirelessRadioLED" },
+ { 0x01, 0x00c8, "WirelessRadioSliderSwitch" },
+ { 0x01, 0x00c9, "SystemDisplayRotationLockButton" },
+ { 0x01, 0x00ca, "SystemDisplayRotationLockSliderSwitch" },
+ { 0x01, 0x00cb, "ControlEnable" },
+ { 0x01, 0x00d0, "DockableDeviceUniqueID" },
+ { 0x01, 0x00d1, "DockableDeviceVendorID" },
+ { 0x01, 0x00d2, "DockableDevicePrimaryUsagePage" },
+ { 0x01, 0x00d3, "DockableDevicePrimaryUsageID" },
+ { 0x01, 0x00d4, "DockableDeviceDockingState" },
+ { 0x01, 0x00d5, "DockableDeviceDisplayOcclusion" },
+ { 0x01, 0x00d6, "DockableDeviceObjectType" },
+ { 0x01, 0x00e0, "CallActiveLED" },
+ { 0x01, 0x00e1, "CallMuteToggle" },
+ { 0x01, 0x00e2, "CallMuteLED" },
+ { 0x02, 0, "SimulationControls" },
+ { 0x02, 0x0001, "FlightSimulationDevice" },
+ { 0x02, 0x0002, "AutomobileSimulationDevice" },
+ { 0x02, 0x0003, "TankSimulationDevice" },
+ { 0x02, 0x0004, "SpaceshipSimulationDevice" },
+ { 0x02, 0x0005, "SubmarineSimulationDevice" },
+ { 0x02, 0x0006, "SailingSimulationDevice" },
+ { 0x02, 0x0007, "MotorcycleSimulationDevice" },
+ { 0x02, 0x0008, "SportsSimulationDevice" },
+ { 0x02, 0x0009, "AirplaneSimulationDevice" },
+ { 0x02, 0x000a, "HelicopterSimulationDevice" },
+ { 0x02, 0x000b, "MagicCarpetSimulationDevice" },
+ { 0x02, 0x000c, "BicycleSimulationDevice" },
+ { 0x02, 0x0020, "FlightControlStick" },
+ { 0x02, 0x0021, "FlightStick" },
+ { 0x02, 0x0022, "CyclicControl" },
+ { 0x02, 0x0023, "CyclicTrim" },
+ { 0x02, 0x0024, "FlightYoke" },
+ { 0x02, 0x0025, "TrackControl" },
+ { 0x02, 0x00b0, "Aileron" },
+ { 0x02, 0x00b1, "AileronTrim" },
+ { 0x02, 0x00b2, "AntiTorqueControl" },
+ { 0x02, 0x00b3, "AutopilotEnable" },
+ { 0x02, 0x00b4, "ChaffRelease" },
+ { 0x02, 0x00b5, "CollectiveControl" },
+ { 0x02, 0x00b6, "DiveBrake" },
+ { 0x02, 0x00b7, "ElectronicCountermeasures" },
+ { 0x02, 0x00b8, "Elevator" },
+ { 0x02, 0x00b9, "ElevatorTrim" },
+ { 0x02, 0x00ba, "Rudder" },
+ { 0x02, 0x00bb, "Throttle" },
+ { 0x02, 0x00bc, "FlightCommunications" },
+ { 0x02, 0x00bd, "FlareRelease" },
+ { 0x02, 0x00be, "LandingGear" },
+ { 0x02, 0x00bf, "ToeBrake" },
+ { 0x02, 0x00c0, "Trigger" },
+ { 0x02, 0x00c1, "WeaponsArm" },
+ { 0x02, 0x00c2, "WeaponsSelect" },
+ { 0x02, 0x00c3, "WingFlaps" },
+ { 0x02, 0x00c4, "Accelerator" },
+ { 0x02, 0x00c5, "Brake" },
+ { 0x02, 0x00c6, "Clutch" },
+ { 0x02, 0x00c7, "Shifter" },
+ { 0x02, 0x00c8, "Steering" },
+ { 0x02, 0x00c9, "TurretDirection" },
+ { 0x02, 0x00ca, "BarrelElevation" },
+ { 0x02, 0x00cb, "DivePlane" },
+ { 0x02, 0x00cc, "Ballast" },
+ { 0x02, 0x00cd, "BicycleCrank" },
+ { 0x02, 0x00ce, "HandleBars" },
+ { 0x02, 0x00cf, "FrontBrake" },
+ { 0x02, 0x00d0, "RearBrake" },
+ { 0x03, 0, "VRControls" },
+ { 0x03, 0x0001, "Belt" },
+ { 0x03, 0x0002, "BodySuit" },
+ { 0x03, 0x0003, "Flexor" },
+ { 0x03, 0x0004, "Glove" },
+ { 0x03, 0x0005, "HeadTracker" },
+ { 0x03, 0x0006, "HeadMountedDisplay" },
+ { 0x03, 0x0007, "HandTracker" },
+ { 0x03, 0x0008, "Oculometer" },
+ { 0x03, 0x0009, "Vest" },
+ { 0x03, 0x000a, "AnimatronicDevice" },
+ { 0x03, 0x0020, "StereoEnable" },
+ { 0x03, 0x0021, "DisplayEnable" },
+ { 0x04, 0, "SportControls" },
+ { 0x04, 0x0001, "BaseballBat" },
+ { 0x04, 0x0002, "GolfClub" },
+ { 0x04, 0x0003, "RowingMachine" },
+ { 0x04, 0x0004, "Treadmill" },
+ { 0x04, 0x0030, "Oar" },
+ { 0x04, 0x0031, "Slope" },
+ { 0x04, 0x0032, "Rate" },
+ { 0x04, 0x0033, "StickSpeed" },
+ { 0x04, 0x0034, "StickFaceAngle" },
+ { 0x04, 0x0035, "StickHeelToe" },
+ { 0x04, 0x0036, "StickFollowThrough" },
+ { 0x04, 0x0037, "StickTempo" },
+ { 0x04, 0x0038, "StickType" },
+ { 0x04, 0x0039, "StickHeight" },
+ { 0x04, 0x0050, "Putter" },
+ { 0x04, 0x0051, "1Iron" },
+ { 0x04, 0x0052, "2Iron" },
+ { 0x04, 0x0053, "3Iron" },
+ { 0x04, 0x0054, "4Iron" },
+ { 0x04, 0x0055, "5Iron" },
+ { 0x04, 0x0056, "6Iron" },
+ { 0x04, 0x0057, "7Iron" },
+ { 0x04, 0x0058, "8Iron" },
+ { 0x04, 0x0059, "9Iron" },
+ { 0x04, 0x005a, "10Iron" },
+ { 0x04, 0x005b, "11Iron" },
+ { 0x04, 0x005c, "SandWedge" },
+ { 0x04, 0x005d, "LoftWedge" },
+ { 0x04, 0x005e, "PowerWedge" },
+ { 0x04, 0x005f, "1Wood" },
+ { 0x04, 0x0060, "3Wood" },
+ { 0x04, 0x0061, "5Wood" },
+ { 0x04, 0x0062, "7Wood" },
+ { 0x04, 0x0063, "9Wood" },
+ { 0x05, 0, "GameControls" },
+ { 0x05, 0x0001, "3DGameController" },
+ { 0x05, 0x0002, "PinballDevice" },
+ { 0x05, 0x0003, "GunDevice" },
+ { 0x05, 0x0020, "PointofView" },
+ { 0x05, 0x0021, "TurnRightLeft" },
+ { 0x05, 0x0022, "PitchForwardBackward" },
+ { 0x05, 0x0023, "RollRightLeft" },
+ { 0x05, 0x0024, "MoveRightLeft" },
+ { 0x05, 0x0025, "MoveForwardBackward" },
+ { 0x05, 0x0026, "MoveUpDown" },
+ { 0x05, 0x0027, "LeanRightLeft" },
+ { 0x05, 0x0028, "LeanForwardBackward" },
+ { 0x05, 0x0029, "HeightofPOV" },
+ { 0x05, 0x002a, "Flipper" },
+ { 0x05, 0x002b, "SecondaryFlipper" },
+ { 0x05, 0x002c, "Bump" },
+ { 0x05, 0x002d, "NewGame" },
+ { 0x05, 0x002e, "ShootBall" },
+ { 0x05, 0x002f, "Player" },
+ { 0x05, 0x0030, "GunBolt" },
+ { 0x05, 0x0031, "GunClip" },
+ { 0x05, 0x0032, "GunSelector" },
+ { 0x05, 0x0033, "GunSingleShot" },
+ { 0x05, 0x0034, "GunBurst" },
+ { 0x05, 0x0035, "GunAutomatic" },
+ { 0x05, 0x0036, "GunSafety" },
+ { 0x05, 0x0037, "GamepadFireJump" },
+ { 0x05, 0x0039, "GamepadTrigger" },
+ { 0x05, 0x003a, "FormfittingGamepad" },
+ { 0x06, 0, "GenericDeviceControls" },
+ { 0x06, 0x0001, "BackgroundNonuserControls" },
+ { 0x06, 0x0020, "BatteryStrength" },
+ { 0x06, 0x0021, "WirelessChannel" },
+ { 0x06, 0x0022, "WirelessID" },
+ { 0x06, 0x0023, "DiscoverWirelessControl" },
+ { 0x06, 0x0024, "SecurityCodeCharacterEntered" },
+ { 0x06, 0x0025, "SecurityCodeCharacterErased" },
+ { 0x06, 0x0026, "SecurityCodeCleared" },
+ { 0x06, 0x0027, "SequenceID" },
+ { 0x06, 0x0028, "SequenceIDReset" },
+ { 0x06, 0x0029, "RFSignalStrength" },
+ { 0x06, 0x002a, "SoftwareVersion" },
+ { 0x06, 0x002b, "ProtocolVersion" },
+ { 0x06, 0x002c, "HardwareVersion" },
+ { 0x06, 0x002d, "Major" },
+ { 0x06, 0x002e, "Minor" },
+ { 0x06, 0x002f, "Revision" },
+ { 0x06, 0x0030, "Handedness" },
+ { 0x06, 0x0031, "EitherHand" },
+ { 0x06, 0x0032, "LeftHand" },
+ { 0x06, 0x0033, "RightHand" },
+ { 0x06, 0x0034, "BothHands" },
+ { 0x06, 0x0040, "GripPoseOffset" },
+ { 0x06, 0x0041, "PointerPoseOffset" },
+ { 0x07, 0, "KeyboardKeypad" },
+ { 0x07, 0x0001, "ErrorRollOver" },
+ { 0x07, 0x0002, "POSTFail" },
+ { 0x07, 0x0003, "ErrorUndefined" },
+ { 0x07, 0x0004, "KeyboardA" },
+ { 0x07, 0x0005, "KeyboardB" },
+ { 0x07, 0x0006, "KeyboardC" },
+ { 0x07, 0x0007, "KeyboardD" },
+ { 0x07, 0x0008, "KeyboardE" },
+ { 0x07, 0x0009, "KeyboardF" },
+ { 0x07, 0x000a, "KeyboardG" },
+ { 0x07, 0x000b, "KeyboardH" },
+ { 0x07, 0x000c, "KeyboardI" },
+ { 0x07, 0x000d, "KeyboardJ" },
+ { 0x07, 0x000e, "KeyboardK" },
+ { 0x07, 0x000f, "KeyboardL" },
+ { 0x07, 0x0010, "KeyboardM" },
+ { 0x07, 0x0011, "KeyboardN" },
+ { 0x07, 0x0012, "KeyboardO" },
+ { 0x07, 0x0013, "KeyboardP" },
+ { 0x07, 0x0014, "KeyboardQ" },
+ { 0x07, 0x0015, "KeyboardR" },
+ { 0x07, 0x0016, "KeyboardS" },
+ { 0x07, 0x0017, "KeyboardT" },
+ { 0x07, 0x0018, "KeyboardU" },
+ { 0x07, 0x0019, "KeyboardV" },
+ { 0x07, 0x001a, "KeyboardW" },
+ { 0x07, 0x001b, "KeyboardX" },
+ { 0x07, 0x001c, "KeyboardY" },
+ { 0x07, 0x001d, "KeyboardZ" },
+ { 0x07, 0x001e, "Keyboard1andBang" },
+ { 0x07, 0x001f, "Keyboard2andAt" },
+ { 0x07, 0x0020, "Keyboard3andHash" },
+ { 0x07, 0x0021, "Keyboard4andDollar" },
+ { 0x07, 0x0022, "Keyboard5andPercent" },
+ { 0x07, 0x0023, "Keyboard6andCaret" },
+ { 0x07, 0x0024, "Keyboard7andAmpersand" },
+ { 0x07, 0x0025, "Keyboard8andStar" },
+ { 0x07, 0x0026, "Keyboard9andLeftBracket" },
+ { 0x07, 0x0027, "Keyboard0andRightBracket" },
+ { 0x07, 0x0028, "KeyboardReturnEnter" },
+ { 0x07, 0x0029, "KeyboardEscape" },
+ { 0x07, 0x002a, "KeyboardDelete" },
+ { 0x07, 0x002b, "KeyboardTab" },
+ { 0x07, 0x002c, "KeyboardSpacebar" },
+ { 0x07, 0x002d, "KeyboardDashandUnderscore" },
+ { 0x07, 0x002e, "KeyboardEqualsandPlus" },
+ { 0x07, 0x002f, "KeyboardLeftBrace" },
+ { 0x07, 0x0030, "KeyboardRightBrace" },
+ { 0x07, 0x0031, "KeyboardBackslashandPipe" },
+ { 0x07, 0x0032, "KeyboardNonUSHashandTilde" },
+ { 0x07, 0x0033, "KeyboardSemiColonandColon" },
+ { 0x07, 0x0034, "KeyboardLeftAposandDouble" },
+ { 0x07, 0x0035, "KeyboardGraveAccentandTilde" },
+ { 0x07, 0x0036, "KeyboardCommaandLessThan" },
+ { 0x07, 0x0037, "KeyboardPeriodandGreaterThan" },
+ { 0x07, 0x0038, "KeyboardForwardSlashandQuestionMark" },
+ { 0x07, 0x0039, "KeyboardCapsLock" },
+ { 0x07, 0x003a, "KeyboardF1" },
+ { 0x07, 0x003b, "KeyboardF2" },
+ { 0x07, 0x003c, "KeyboardF3" },
+ { 0x07, 0x003d, "KeyboardF4" },
+ { 0x07, 0x003e, "KeyboardF5" },
+ { 0x07, 0x003f, "KeyboardF6" },
+ { 0x07, 0x0040, "KeyboardF7" },
+ { 0x07, 0x0041, "KeyboardF8" },
+ { 0x07, 0x0042, "KeyboardF9" },
+ { 0x07, 0x0043, "KeyboardF10" },
+ { 0x07, 0x0044, "KeyboardF11" },
+ { 0x07, 0x0045, "KeyboardF12" },
+ { 0x07, 0x0046, "KeyboardPrintScreen" },
+ { 0x07, 0x0047, "KeyboardScrollLock" },
+ { 0x07, 0x0048, "KeyboardPause" },
+ { 0x07, 0x0049, "KeyboardInsert" },
+ { 0x07, 0x004a, "KeyboardHome" },
+ { 0x07, 0x004b, "KeyboardPageUp" },
+ { 0x07, 0x004c, "KeyboardDeleteForward" },
+ { 0x07, 0x004d, "KeyboardEnd" },
+ { 0x07, 0x004e, "KeyboardPageDown" },
+ { 0x07, 0x004f, "KeyboardRightArrow" },
+ { 0x07, 0x0050, "KeyboardLeftArrow" },
+ { 0x07, 0x0051, "KeyboardDownArrow" },
+ { 0x07, 0x0052, "KeyboardUpArrow" },
+ { 0x07, 0x0053, "KeypadNumLockandClear" },
+ { 0x07, 0x0054, "KeypadForwardSlash" },
+ { 0x07, 0x0055, "KeypadStar" },
+ { 0x07, 0x0056, "KeypadDash" },
+ { 0x07, 0x0057, "KeypadPlus" },
+ { 0x07, 0x0058, "KeypadENTER" },
+ { 0x07, 0x0059, "Keypad1andEnd" },
+ { 0x07, 0x005a, "Keypad2andDownArrow" },
+ { 0x07, 0x005b, "Keypad3andPageDn" },
+ { 0x07, 0x005c, "Keypad4andLeftArrow" },
+ { 0x07, 0x005d, "Keypad5" },
+ { 0x07, 0x005e, "Keypad6andRightArrow" },
+ { 0x07, 0x005f, "Keypad7andHome" },
+ { 0x07, 0x0060, "Keypad8andUpArrow" },
+ { 0x07, 0x0061, "Keypad9andPageUp" },
+ { 0x07, 0x0062, "Keypad0andInsert" },
+ { 0x07, 0x0063, "KeypadPeriodandDelete" },
+ { 0x07, 0x0064, "KeyboardNonUSBackslashandPipe" },
+ { 0x07, 0x0065, "KeyboardApplication" },
+ { 0x07, 0x0066, "KeyboardPower" },
+ { 0x07, 0x0067, "KeypadEquals" },
+ { 0x07, 0x0068, "KeyboardF13" },
+ { 0x07, 0x0069, "KeyboardF14" },
+ { 0x07, 0x006a, "KeyboardF15" },
+ { 0x07, 0x006b, "KeyboardF16" },
+ { 0x07, 0x006c, "KeyboardF17" },
+ { 0x07, 0x006d, "KeyboardF18" },
+ { 0x07, 0x006e, "KeyboardF19" },
+ { 0x07, 0x006f, "KeyboardF20" },
+ { 0x07, 0x0070, "KeyboardF21" },
+ { 0x07, 0x0071, "KeyboardF22" },
+ { 0x07, 0x0072, "KeyboardF23" },
+ { 0x07, 0x0073, "KeyboardF24" },
+ { 0x07, 0x0074, "KeyboardExecute" },
+ { 0x07, 0x0075, "KeyboardHelp" },
+ { 0x07, 0x0076, "KeyboardMenu" },
+ { 0x07, 0x0077, "KeyboardSelect" },
+ { 0x07, 0x0078, "KeyboardStop" },
+ { 0x07, 0x0079, "KeyboardAgain" },
+ { 0x07, 0x007a, "KeyboardUndo" },
+ { 0x07, 0x007b, "KeyboardCut" },
+ { 0x07, 0x007c, "KeyboardCopy" },
+ { 0x07, 0x007d, "KeyboardPaste" },
+ { 0x07, 0x007e, "KeyboardFind" },
+ { 0x07, 0x007f, "KeyboardMute" },
+ { 0x07, 0x0080, "KeyboardVolumeUp" },
+ { 0x07, 0x0081, "KeyboardVolumeDown" },
+ { 0x07, 0x0082, "KeyboardLockingCapsLock" },
+ { 0x07, 0x0083, "KeyboardLockingNumLock" },
+ { 0x07, 0x0084, "KeyboardLockingScrollLock" },
+ { 0x07, 0x0085, "KeypadComma" },
+ { 0x07, 0x0086, "KeypadEqualSign" },
+ { 0x07, 0x0087, "KeyboardInternational1" },
+ { 0x07, 0x0088, "KeyboardInternational2" },
+ { 0x07, 0x0089, "KeyboardInternational3" },
+ { 0x07, 0x008a, "KeyboardInternational4" },
+ { 0x07, 0x008b, "KeyboardInternational5" },
+ { 0x07, 0x008c, "KeyboardInternational6" },
+ { 0x07, 0x008d, "KeyboardInternational7" },
+ { 0x07, 0x008e, "KeyboardInternational8" },
+ { 0x07, 0x008f, "KeyboardInternational9" },
+ { 0x07, 0x0090, "KeyboardLANG1" },
+ { 0x07, 0x0091, "KeyboardLANG2" },
+ { 0x07, 0x0092, "KeyboardLANG3" },
+ { 0x07, 0x0093, "KeyboardLANG4" },
+ { 0x07, 0x0094, "KeyboardLANG5" },
+ { 0x07, 0x0095, "KeyboardLANG6" },
+ { 0x07, 0x0096, "KeyboardLANG7" },
+ { 0x07, 0x0097, "KeyboardLANG8" },
+ { 0x07, 0x0098, "KeyboardLANG9" },
+ { 0x07, 0x0099, "KeyboardAlternateErase" },
+ { 0x07, 0x009a, "KeyboardSysReqAttention" },
+ { 0x07, 0x009b, "KeyboardCancel" },
+ { 0x07, 0x009c, "KeyboardClear" },
+ { 0x07, 0x009d, "KeyboardPrior" },
+ { 0x07, 0x009e, "KeyboardReturn" },
+ { 0x07, 0x009f, "KeyboardSeparator" },
+ { 0x07, 0x00a0, "KeyboardOut" },
+ { 0x07, 0x00a1, "KeyboardOper" },
+ { 0x07, 0x00a2, "KeyboardClearAgain" },
+ { 0x07, 0x00a3, "KeyboardCrSelProps" },
+ { 0x07, 0x00a4, "KeyboardExSel" },
+ { 0x07, 0x00b0, "KeypadDouble0" },
+ { 0x07, 0x00b1, "KeypadTriple0" },
+ { 0x07, 0x00b2, "ThousandsSeparator" },
+ { 0x07, 0x00b3, "DecimalSeparator" },
+ { 0x07, 0x00b4, "CurrencyUnit" },
+ { 0x07, 0x00b5, "CurrencySubunit" },
+ { 0x07, 0x00b6, "KeypadLeftBracket" },
+ { 0x07, 0x00b7, "KeypadRightBracket" },
+ { 0x07, 0x00b8, "KeypadLeftBrace" },
+ { 0x07, 0x00b9, "KeypadRightBrace" },
+ { 0x07, 0x00ba, "KeypadTab" },
+ { 0x07, 0x00bb, "KeypadBackspace" },
+ { 0x07, 0x00bc, "KeypadA" },
+ { 0x07, 0x00bd, "KeypadB" },
+ { 0x07, 0x00be, "KeypadC" },
+ { 0x07, 0x00bf, "KeypadD" },
+ { 0x07, 0x00c0, "KeypadE" },
+ { 0x07, 0x00c1, "KeypadF" },
+ { 0x07, 0x00c2, "KeypadXOR" },
+ { 0x07, 0x00c3, "KeypadCaret" },
+ { 0x07, 0x00c4, "KeypadPercentage" },
+ { 0x07, 0x00c5, "KeypadLess" },
+ { 0x07, 0x00c6, "KeypadGreater" },
+ { 0x07, 0x00c7, "KeypadAmpersand" },
+ { 0x07, 0x00c8, "KeypadDoubleAmpersand" },
+ { 0x07, 0x00c9, "KeypadBar" },
+ { 0x07, 0x00ca, "KeypadDoubleBar" },
+ { 0x07, 0x00cb, "KeypadColon" },
+ { 0x07, 0x00cc, "KeypadHash" },
+ { 0x07, 0x00cd, "KeypadSpace" },
+ { 0x07, 0x00ce, "KeypadAt" },
+ { 0x07, 0x00cf, "KeypadBang" },
+ { 0x07, 0x00d0, "KeypadMemoryStore" },
+ { 0x07, 0x00d1, "KeypadMemoryRecall" },
+ { 0x07, 0x00d2, "KeypadMemoryClear" },
+ { 0x07, 0x00d3, "KeypadMemoryAdd" },
+ { 0x07, 0x00d4, "KeypadMemorySubtract" },
+ { 0x07, 0x00d5, "KeypadMemoryMultiply" },
+ { 0x07, 0x00d6, "KeypadMemoryDivide" },
+ { 0x07, 0x00d7, "KeypadPlusMinus" },
+ { 0x07, 0x00d8, "KeypadClear" },
+ { 0x07, 0x00d9, "KeypadClearEntry" },
+ { 0x07, 0x00da, "KeypadBinary" },
+ { 0x07, 0x00db, "KeypadOctal" },
+ { 0x07, 0x00dc, "KeypadDecimal" },
+ { 0x07, 0x00dd, "KeypadHexadecimal" },
+ { 0x07, 0x00e0, "KeyboardLeftControl" },
+ { 0x07, 0x00e1, "KeyboardLeftShift" },
+ { 0x07, 0x00e2, "KeyboardLeftAlt" },
+ { 0x07, 0x00e3, "KeyboardLeftGUI" },
+ { 0x07, 0x00e4, "KeyboardRightControl" },
+ { 0x07, 0x00e5, "KeyboardRightShift" },
+ { 0x07, 0x00e6, "KeyboardRightAlt" },
+ { 0x07, 0x00e7, "KeyboardRightGUI" },
+ { 0x08, 0, "LED" },
+ { 0x08, 0x0001, "NumLock" },
+ { 0x08, 0x0002, "CapsLock" },
+ { 0x08, 0x0003, "ScrollLock" },
+ { 0x08, 0x0004, "Compose" },
+ { 0x08, 0x0005, "Kana" },
+ { 0x08, 0x0006, "Power" },
+ { 0x08, 0x0007, "Shift" },
+ { 0x08, 0x0008, "DoNotDisturb" },
+ { 0x08, 0x0009, "Mute" },
+ { 0x08, 0x000a, "ToneEnable" },
+ { 0x08, 0x000b, "HighCutFilter" },
+ { 0x08, 0x000c, "LowCutFilter" },
+ { 0x08, 0x000d, "EqualizerEnable" },
+ { 0x08, 0x000e, "SoundFieldOn" },
+ { 0x08, 0x000f, "SurroundOn" },
+ { 0x08, 0x0010, "Repeat" },
+ { 0x08, 0x0011, "Stereo" },
+ { 0x08, 0x0012, "SamplingRateDetect" },
+ { 0x08, 0x0013, "Spinning" },
+ { 0x08, 0x0014, "CAV" },
+ { 0x08, 0x0015, "CLV" },
+ { 0x08, 0x0016, "RecordingFormatDetect" },
+ { 0x08, 0x0017, "OffHook" },
+ { 0x08, 0x0018, "Ring" },
+ { 0x08, 0x0019, "MessageWaiting" },
+ { 0x08, 0x001a, "DataMode" },
+ { 0x08, 0x001b, "BatteryOperation" },
+ { 0x08, 0x001c, "BatteryOK" },
+ { 0x08, 0x001d, "BatteryLow" },
+ { 0x08, 0x001e, "Speaker" },
+ { 0x08, 0x001f, "Headset" },
+ { 0x08, 0x0020, "Hold" },
+ { 0x08, 0x0021, "Microphone" },
+ { 0x08, 0x0022, "Coverage" },
+ { 0x08, 0x0023, "NightMode" },
+ { 0x08, 0x0024, "SendCalls" },
+ { 0x08, 0x0025, "CallPickup" },
+ { 0x08, 0x0026, "Conference" },
+ { 0x08, 0x0027, "Standby" },
+ { 0x08, 0x0028, "CameraOn" },
+ { 0x08, 0x0029, "CameraOff" },
+ { 0x08, 0x002a, "OnLine" },
+ { 0x08, 0x002b, "OffLine" },
+ { 0x08, 0x002c, "Busy" },
+ { 0x08, 0x002d, "Ready" },
+ { 0x08, 0x002e, "PaperOut" },
+ { 0x08, 0x002f, "PaperJam" },
+ { 0x08, 0x0030, "Remote" },
+ { 0x08, 0x0031, "Forward" },
+ { 0x08, 0x0032, "Reverse" },
+ { 0x08, 0x0033, "Stop" },
+ { 0x08, 0x0034, "Rewind" },
+ { 0x08, 0x0035, "FastForward" },
+ { 0x08, 0x0036, "Play" },
+ { 0x08, 0x0037, "Pause" },
+ { 0x08, 0x0038, "Record" },
+ { 0x08, 0x0039, "Error" },
+ { 0x08, 0x003a, "UsageSelectedIndicator" },
+ { 0x08, 0x003b, "UsageInUseIndicator" },
+ { 0x08, 0x003c, "UsageMultiModeIndicator" },
+ { 0x08, 0x003d, "IndicatorOn" },
+ { 0x08, 0x003e, "IndicatorFlash" },
+ { 0x08, 0x003f, "IndicatorSlowBlink" },
+ { 0x08, 0x0040, "IndicatorFastBlink" },
+ { 0x08, 0x0041, "IndicatorOff" },
+ { 0x08, 0x0042, "FlashOnTime" },
+ { 0x08, 0x0043, "SlowBlinkOnTime" },
+ { 0x08, 0x0044, "SlowBlinkOffTime" },
+ { 0x08, 0x0045, "FastBlinkOnTime" },
+ { 0x08, 0x0046, "FastBlinkOffTime" },
+ { 0x08, 0x0047, "UsageIndicatorColor" },
+ { 0x08, 0x0048, "IndicatorRed" },
+ { 0x08, 0x0049, "IndicatorGreen" },
+ { 0x08, 0x004a, "IndicatorAmber" },
+ { 0x08, 0x004b, "GenericIndicator" },
+ { 0x08, 0x004c, "SystemSuspend" },
+ { 0x08, 0x004d, "ExternalPowerConnected" },
+ { 0x08, 0x004e, "IndicatorBlue" },
+ { 0x08, 0x004f, "IndicatorOrange" },
+ { 0x08, 0x0050, "GoodStatus" },
+ { 0x08, 0x0051, "WarningStatus" },
+ { 0x08, 0x0052, "RGBLED" },
+ { 0x08, 0x0053, "RedLEDChannel" },
+ { 0x08, 0x0054, "BlueLEDChannel" },
+ { 0x08, 0x0055, "GreenLEDChannel" },
+ { 0x08, 0x0056, "LEDIntensity" },
+ { 0x08, 0x0057, "SystemMicrophoneMute" },
+ { 0x08, 0x0060, "PlayerIndicator" },
+ { 0x08, 0x0061, "Player1" },
+ { 0x08, 0x0062, "Player2" },
+ { 0x08, 0x0063, "Player3" },
+ { 0x08, 0x0064, "Player4" },
+ { 0x08, 0x0065, "Player5" },
+ { 0x08, 0x0066, "Player6" },
+ { 0x08, 0x0067, "Player7" },
+ { 0x08, 0x0068, "Player8" },
+ { 0x09, 0, "Button" },
+ { 0x0a, 0, "Ordinal" },
+ { 0x0b, 0, "TelephonyDevice" },
+ { 0x0b, 0x0001, "Phone" },
+ { 0x0b, 0x0002, "AnsweringMachine" },
+ { 0x0b, 0x0003, "MessageControls" },
+ { 0x0b, 0x0004, "Handset" },
+ { 0x0b, 0x0005, "Headset" },
+ { 0x0b, 0x0006, "TelephonyKeyPad" },
+ { 0x0b, 0x0007, "ProgrammableButton" },
+ { 0x0b, 0x0020, "HookSwitch" },
+ { 0x0b, 0x0021, "Flash" },
+ { 0x0b, 0x0022, "Feature" },
+ { 0x0b, 0x0023, "Hold" },
+ { 0x0b, 0x0024, "Redial" },
+ { 0x0b, 0x0025, "Transfer" },
+ { 0x0b, 0x0026, "Drop" },
+ { 0x0b, 0x0027, "Park" },
+ { 0x0b, 0x0028, "ForwardCalls" },
+ { 0x0b, 0x0029, "AlternateFunction" },
+ { 0x0b, 0x002a, "Line" },
+ { 0x0b, 0x002b, "SpeakerPhone" },
+ { 0x0b, 0x002c, "Conference" },
+ { 0x0b, 0x002d, "RingEnable" },
+ { 0x0b, 0x002e, "RingSelect" },
+ { 0x0b, 0x002f, "PhoneMute" },
+ { 0x0b, 0x0030, "CallerID" },
+ { 0x0b, 0x0031, "Send" },
+ { 0x0b, 0x0050, "SpeedDial" },
+ { 0x0b, 0x0051, "StoreNumber" },
+ { 0x0b, 0x0052, "RecallNumber" },
+ { 0x0b, 0x0053, "PhoneDirectory" },
+ { 0x0b, 0x0070, "VoiceMail" },
+ { 0x0b, 0x0071, "ScreenCalls" },
+ { 0x0b, 0x0072, "DoNotDisturb" },
+ { 0x0b, 0x0073, "Message" },
+ { 0x0b, 0x0074, "AnswerOnOff" },
+ { 0x0b, 0x0090, "InsideDialTone" },
+ { 0x0b, 0x0091, "OutsideDialTone" },
+ { 0x0b, 0x0092, "InsideRingTone" },
+ { 0x0b, 0x0093, "OutsideRingTone" },
+ { 0x0b, 0x0094, "PriorityRingTone" },
+ { 0x0b, 0x0095, "InsideRingback" },
+ { 0x0b, 0x0096, "PriorityRingback" },
+ { 0x0b, 0x0097, "LineBusyTone" },
+ { 0x0b, 0x0098, "ReorderTone" },
+ { 0x0b, 0x0099, "CallWaitingTone" },
+ { 0x0b, 0x009a, "ConfirmationTone1" },
+ { 0x0b, 0x009b, "ConfirmationTone2" },
+ { 0x0b, 0x009c, "TonesOff" },
+ { 0x0b, 0x009d, "OutsideRingback" },
+ { 0x0b, 0x009e, "Ringer" },
+ { 0x0b, 0x00b0, "PhoneKey0" },
+ { 0x0b, 0x00b1, "PhoneKey1" },
+ { 0x0b, 0x00b2, "PhoneKey2" },
+ { 0x0b, 0x00b3, "PhoneKey3" },
+ { 0x0b, 0x00b4, "PhoneKey4" },
+ { 0x0b, 0x00b5, "PhoneKey5" },
+ { 0x0b, 0x00b6, "PhoneKey6" },
+ { 0x0b, 0x00b7, "PhoneKey7" },
+ { 0x0b, 0x00b8, "PhoneKey8" },
+ { 0x0b, 0x00b9, "PhoneKey9" },
+ { 0x0b, 0x00ba, "PhoneKeyStar" },
+ { 0x0b, 0x00bb, "PhoneKeyPound" },
+ { 0x0b, 0x00bc, "PhoneKeyA" },
+ { 0x0b, 0x00bd, "PhoneKeyB" },
+ { 0x0b, 0x00be, "PhoneKeyC" },
+ { 0x0b, 0x00bf, "PhoneKeyD" },
+ { 0x0b, 0x00c0, "PhoneCallHistoryKey" },
+ { 0x0b, 0x00c1, "PhoneCallerIDKey" },
+ { 0x0b, 0x00c2, "PhoneSettingsKey" },
+ { 0x0b, 0x00f0, "HostControl" },
+ { 0x0b, 0x00f1, "HostAvailable" },
+ { 0x0b, 0x00f2, "HostCallActive" },
+ { 0x0b, 0x00f3, "ActivateHandsetAudio" },
+ { 0x0b, 0x00f4, "RingType" },
+ { 0x0b, 0x00f5, "RedialablePhoneNumber" },
+ { 0x0b, 0x00f8, "StopRingTone" },
+ { 0x0b, 0x00f9, "PSTNRingTone" },
+ { 0x0b, 0x00fa, "HostRingTone" },
+ { 0x0b, 0x00fb, "AlertSoundError" },
+ { 0x0b, 0x00fc, "AlertSoundConfirm" },
+ { 0x0b, 0x00fd, "AlertSoundNotification" },
+ { 0x0b, 0x00fe, "SilentRing" },
+ { 0x0b, 0x0108, "EmailMessageWaiting" },
+ { 0x0b, 0x0109, "VoicemailMessageWaiting" },
+ { 0x0b, 0x010a, "HostHold" },
+ { 0x0b, 0x0110, "IncomingCallHistoryCount" },
+ { 0x0b, 0x0111, "OutgoingCallHistoryCount" },
+ { 0x0b, 0x0112, "IncomingCallHistory" },
+ { 0x0b, 0x0113, "OutgoingCallHistory" },
+ { 0x0b, 0x0114, "PhoneLocale" },
+ { 0x0b, 0x0140, "PhoneTimeSecond" },
+ { 0x0b, 0x0141, "PhoneTimeMinute" },
+ { 0x0b, 0x0142, "PhoneTimeHour" },
+ { 0x0b, 0x0143, "PhoneDateDay" },
+ { 0x0b, 0x0144, "PhoneDateMonth" },
+ { 0x0b, 0x0145, "PhoneDateYear" },
+ { 0x0b, 0x0146, "HandsetNickname" },
+ { 0x0b, 0x0147, "AddressBookID" },
+ { 0x0b, 0x014a, "CallDuration" },
+ { 0x0b, 0x014b, "DualModePhone" },
+ { 0x0c, 0, "Consumer" },
+ { 0x0c, 0x0001, "ConsumerControl" },
+ { 0x0c, 0x0002, "NumericKeyPad" },
+ { 0x0c, 0x0003, "ProgrammableButtons" },
+ { 0x0c, 0x0004, "Microphone" },
+ { 0x0c, 0x0005, "Headphone" },
+ { 0x0c, 0x0006, "GraphicEqualizer" },
+ { 0x0c, 0x0020, "10" },
+ { 0x0c, 0x0021, "100" },
+ { 0x0c, 0x0022, "AMPM" },
+ { 0x0c, 0x0030, "Power" },
+ { 0x0c, 0x0031, "Reset" },
+ { 0x0c, 0x0032, "Sleep" },
+ { 0x0c, 0x0033, "SleepAfter" },
+ { 0x0c, 0x0034, "SleepMode" },
+ { 0x0c, 0x0035, "Illumination" },
+ { 0x0c, 0x0036, "FunctionButtons" },
+ { 0x0c, 0x0040, "Menu" },
+ { 0x0c, 0x0041, "MenuPick" },
+ { 0x0c, 0x0042, "MenuUp" },
+ { 0x0c, 0x0043, "MenuDown" },
+ { 0x0c, 0x0044, "MenuLeft" },
+ { 0x0c, 0x0045, "MenuRight" },
+ { 0x0c, 0x0046, "MenuEscape" },
+ { 0x0c, 0x0047, "MenuValueIncrease" },
+ { 0x0c, 0x0048, "MenuValueDecrease" },
+ { 0x0c, 0x0060, "DataOnScreen" },
+ { 0x0c, 0x0061, "ClosedCaption" },
+ { 0x0c, 0x0062, "ClosedCaptionSelect" },
+ { 0x0c, 0x0063, "VCRTV" },
+ { 0x0c, 0x0064, "BroadcastMode" },
+ { 0x0c, 0x0065, "Snapshot" },
+ { 0x0c, 0x0066, "Still" },
+ { 0x0c, 0x0067, "PictureinPictureToggle" },
+ { 0x0c, 0x0068, "PictureinPictureSwap" },
+ { 0x0c, 0x0069, "RedMenuButton" },
+ { 0x0c, 0x006a, "GreenMenuButton" },
+ { 0x0c, 0x006b, "BlueMenuButton" },
+ { 0x0c, 0x006c, "YellowMenuButton" },
+ { 0x0c, 0x006d, "Aspect" },
+ { 0x0c, 0x006e, "3DModeSelect" },
+ { 0x0c, 0x006f, "DisplayBrightnessIncrement" },
+ { 0x0c, 0x0070, "DisplayBrightnessDecrement" },
+ { 0x0c, 0x0071, "DisplayBrightness" },
+ { 0x0c, 0x0072, "DisplayBacklightToggle" },
+ { 0x0c, 0x0073, "DisplaySetBrightnesstoMinimum" },
+ { 0x0c, 0x0074, "DisplaySetBrightnesstoMaximum" },
+ { 0x0c, 0x0075, "DisplaySetAutoBrightness" },
+ { 0x0c, 0x0076, "CameraAccessEnabled" },
+ { 0x0c, 0x0077, "CameraAccessDisabled" },
+ { 0x0c, 0x0078, "CameraAccessToggle" },
+ { 0x0c, 0x0079, "KeyboardBrightnessIncrement" },
+ { 0x0c, 0x007a, "KeyboardBrightnessDecrement" },
+ { 0x0c, 0x007b, "KeyboardBacklightSetLevel" },
+ { 0x0c, 0x007c, "KeyboardBacklightOOC" },
+ { 0x0c, 0x007d, "KeyboardBacklightSetMinimum" },
+ { 0x0c, 0x007e, "KeyboardBacklightSetMaximum" },
+ { 0x0c, 0x007f, "KeyboardBacklightAuto" },
+ { 0x0c, 0x0080, "Selection" },
+ { 0x0c, 0x0081, "AssignSelection" },
+ { 0x0c, 0x0082, "ModeStep" },
+ { 0x0c, 0x0083, "RecallLast" },
+ { 0x0c, 0x0084, "EnterChannel" },
+ { 0x0c, 0x0085, "OrderMovie" },
+ { 0x0c, 0x0086, "Channel" },
+ { 0x0c, 0x0087, "MediaSelection" },
+ { 0x0c, 0x0088, "MediaSelectComputer" },
+ { 0x0c, 0x0089, "MediaSelectTV" },
+ { 0x0c, 0x008a, "MediaSelectWWW" },
+ { 0x0c, 0x008b, "MediaSelectDVD" },
+ { 0x0c, 0x008c, "MediaSelectTelephone" },
+ { 0x0c, 0x008d, "MediaSelectProgramGuide" },
+ { 0x0c, 0x008e, "MediaSelectVideoPhone" },
+ { 0x0c, 0x008f, "MediaSelectGames" },
+ { 0x0c, 0x0090, "MediaSelectMessages" },
+ { 0x0c, 0x0091, "MediaSelectCD" },
+ { 0x0c, 0x0092, "MediaSelectVCR" },
+ { 0x0c, 0x0093, "MediaSelectTuner" },
+ { 0x0c, 0x0094, "Quit" },
+ { 0x0c, 0x0095, "Help" },
+ { 0x0c, 0x0096, "MediaSelectTape" },
+ { 0x0c, 0x0097, "MediaSelectCable" },
+ { 0x0c, 0x0098, "MediaSelectSatellite" },
+ { 0x0c, 0x0099, "MediaSelectSecurity" },
+ { 0x0c, 0x009a, "MediaSelectHome" },
+ { 0x0c, 0x009b, "MediaSelectCall" },
+ { 0x0c, 0x009c, "ChannelIncrement" },
+ { 0x0c, 0x009d, "ChannelDecrement" },
+ { 0x0c, 0x009e, "MediaSelectSAP" },
+ { 0x0c, 0x00a0, "VCRPlus" },
+ { 0x0c, 0x00a1, "Once" },
+ { 0x0c, 0x00a2, "Daily" },
+ { 0x0c, 0x00a3, "Weekly" },
+ { 0x0c, 0x00a4, "Monthly" },
+ { 0x0c, 0x00b0, "Play" },
+ { 0x0c, 0x00b1, "Pause" },
+ { 0x0c, 0x00b2, "Record" },
+ { 0x0c, 0x00b3, "FastForward" },
+ { 0x0c, 0x00b4, "Rewind" },
+ { 0x0c, 0x00b5, "ScanNextTrack" },
+ { 0x0c, 0x00b6, "ScanPreviousTrack" },
+ { 0x0c, 0x00b7, "Stop" },
+ { 0x0c, 0x00b8, "Eject" },
+ { 0x0c, 0x00b9, "RandomPlay" },
+ { 0x0c, 0x00ba, "SelectDisc" },
+ { 0x0c, 0x00bb, "EnterDisc" },
+ { 0x0c, 0x00bc, "Repeat" },
+ { 0x0c, 0x00bd, "Tracking" },
+ { 0x0c, 0x00be, "TrackNormal" },
+ { 0x0c, 0x00bf, "SlowTracking" },
+ { 0x0c, 0x00c0, "FrameForward" },
+ { 0x0c, 0x00c1, "FrameBack" },
+ { 0x0c, 0x00c2, "Mark" },
+ { 0x0c, 0x00c3, "ClearMark" },
+ { 0x0c, 0x00c4, "RepeatFromMark" },
+ { 0x0c, 0x00c5, "ReturnToMark" },
+ { 0x0c, 0x00c6, "SearchMarkForward" },
+ { 0x0c, 0x00c7, "SearchMarkBackwards" },
+ { 0x0c, 0x00c8, "CounterReset" },
+ { 0x0c, 0x00c9, "ShowCounter" },
+ { 0x0c, 0x00ca, "TrackingIncrement" },
+ { 0x0c, 0x00cb, "TrackingDecrement" },
+ { 0x0c, 0x00cc, "StopEject" },
+ { 0x0c, 0x00cd, "PlayPause" },
+ { 0x0c, 0x00ce, "PlaySkip" },
+ { 0x0c, 0x00cf, "VoiceCommand" },
+ { 0x0c, 0x00d0, "InvokeCaptureInterface" },
+ { 0x0c, 0x00d1, "StartorStopGameRecording" },
+ { 0x0c, 0x00d2, "HistoricalGameCapture" },
+ { 0x0c, 0x00d3, "CaptureGameScreenshot" },
+ { 0x0c, 0x00d4, "ShoworHideRecordingIndicator" },
+ { 0x0c, 0x00d5, "StartorStopMicrophoneCapture" },
+ { 0x0c, 0x00d6, "StartorStopCameraCapture" },
+ { 0x0c, 0x00d7, "StartorStopGameBroadcast" },
+ { 0x0c, 0x00d8, "StartorStopVoiceDictationSession" },
+ { 0x0c, 0x00d9, "InvokeDismissEmojiPicker" },
+ { 0x0c, 0x00e0, "Volume" },
+ { 0x0c, 0x00e1, "Balance" },
+ { 0x0c, 0x00e2, "Mute" },
+ { 0x0c, 0x00e3, "Bass" },
+ { 0x0c, 0x00e4, "Treble" },
+ { 0x0c, 0x00e5, "BassBoost" },
+ { 0x0c, 0x00e6, "SurroundMode" },
+ { 0x0c, 0x00e7, "Loudness" },
+ { 0x0c, 0x00e8, "MPX" },
+ { 0x0c, 0x00e9, "VolumeIncrement" },
+ { 0x0c, 0x00ea, "VolumeDecrement" },
+ { 0x0c, 0x00f0, "SpeedSelect" },
+ { 0x0c, 0x00f1, "PlaybackSpeed" },
+ { 0x0c, 0x00f2, "StandardPlay" },
+ { 0x0c, 0x00f3, "LongPlay" },
+ { 0x0c, 0x00f4, "ExtendedPlay" },
+ { 0x0c, 0x00f5, "Slow" },
+ { 0x0c, 0x0100, "FanEnable" },
+ { 0x0c, 0x0101, "FanSpeed" },
+ { 0x0c, 0x0102, "LightEnable" },
+ { 0x0c, 0x0103, "LightIlluminationLevel" },
+ { 0x0c, 0x0104, "ClimateControlEnable" },
+ { 0x0c, 0x0105, "RoomTemperature" },
+ { 0x0c, 0x0106, "SecurityEnable" },
+ { 0x0c, 0x0107, "FireAlarm" },
+ { 0x0c, 0x0108, "PoliceAlarm" },
+ { 0x0c, 0x0109, "Proximity" },
+ { 0x0c, 0x010a, "Motion" },
+ { 0x0c, 0x010b, "DuressAlarm" },
+ { 0x0c, 0x010c, "HoldupAlarm" },
+ { 0x0c, 0x010d, "MedicalAlarm" },
+ { 0x0c, 0x0150, "BalanceRight" },
+ { 0x0c, 0x0151, "BalanceLeft" },
+ { 0x0c, 0x0152, "BassIncrement" },
+ { 0x0c, 0x0153, "BassDecrement" },
+ { 0x0c, 0x0154, "TrebleIncrement" },
+ { 0x0c, 0x0155, "TrebleDecrement" },
+ { 0x0c, 0x0160, "SpeakerSystem" },
+ { 0x0c, 0x0161, "ChannelLeft" },
+ { 0x0c, 0x0162, "ChannelRight" },
+ { 0x0c, 0x0163, "ChannelCenter" },
+ { 0x0c, 0x0164, "ChannelFront" },
+ { 0x0c, 0x0165, "ChannelCenterFront" },
+ { 0x0c, 0x0166, "ChannelSide" },
+ { 0x0c, 0x0167, "ChannelSurround" },
+ { 0x0c, 0x0168, "ChannelLowFrequencyEnhancement" },
+ { 0x0c, 0x0169, "ChannelTop" },
+ { 0x0c, 0x016a, "ChannelUnknown" },
+ { 0x0c, 0x0170, "Subchannel" },
+ { 0x0c, 0x0171, "SubchannelIncrement" },
+ { 0x0c, 0x0172, "SubchannelDecrement" },
+ { 0x0c, 0x0173, "AlternateAudioIncrement" },
+ { 0x0c, 0x0174, "AlternateAudioDecrement" },
+ { 0x0c, 0x0180, "ApplicationLaunchButtons" },
+ { 0x0c, 0x0181, "ALLaunchButtonConfigurationTool" },
+ { 0x0c, 0x0182, "ALProgrammableButtonConfiguration" },
+ { 0x0c, 0x0183, "ALConsumerControlConfiguration" },
+ { 0x0c, 0x0184, "ALWordProcessor" },
+ { 0x0c, 0x0185, "ALTextEditor" },
+ { 0x0c, 0x0186, "ALSpreadsheet" },
+ { 0x0c, 0x0187, "ALGraphicsEditor" },
+ { 0x0c, 0x0188, "ALPresentationApp" },
+ { 0x0c, 0x0189, "ALDatabaseApp" },
+ { 0x0c, 0x018a, "ALEmailReader" },
+ { 0x0c, 0x018b, "ALNewsreader" },
+ { 0x0c, 0x018c, "ALVoicemail" },
+ { 0x0c, 0x018d, "ALContactsAddressBook" },
+ { 0x0c, 0x018e, "ALCalendarSchedule" },
+ { 0x0c, 0x018f, "ALTaskProjectManager" },
+ { 0x0c, 0x0190, "ALLogJournalTimecard" },
+ { 0x0c, 0x0191, "ALCheckbookFinance" },
+ { 0x0c, 0x0192, "ALCalculator" },
+ { 0x0c, 0x0193, "ALAVCapturePlayback" },
+ { 0x0c, 0x0194, "ALLocalMachineBrowser" },
+ { 0x0c, 0x0195, "ALLANWANBrowser" },
+ { 0x0c, 0x0196, "ALInternetBrowser" },
+ { 0x0c, 0x0197, "ALRemoteNetworkingISPConnect" },
+ { 0x0c, 0x0198, "ALNetworkConference" },
+ { 0x0c, 0x0199, "ALNetworkChat" },
+ { 0x0c, 0x019a, "ALTelephonyDialer" },
+ { 0x0c, 0x019b, "ALLogon" },
+ { 0x0c, 0x019c, "ALLogoff" },
+ { 0x0c, 0x019d, "ALLogonLogoff" },
+ { 0x0c, 0x019e, "ALTerminalLockScreensaver" },
+ { 0x0c, 0x019f, "ALControlPanel" },
+ { 0x0c, 0x01a0, "ALCommandLineProcessorRun" },
+ { 0x0c, 0x01a1, "ALProcessTaskManager" },
+ { 0x0c, 0x01a2, "ALSelectTaskApplication" },
+ { 0x0c, 0x01a3, "ALNextTaskApplication" },
+ { 0x0c, 0x01a4, "ALPreviousTaskApplication" },
+ { 0x0c, 0x01a5, "ALPreemptiveHaltTaskApplication" },
+ { 0x0c, 0x01a6, "ALIntegratedHelpCenter" },
+ { 0x0c, 0x01a7, "ALDocuments" },
+ { 0x0c, 0x01a8, "ALThesaurus" },
+ { 0x0c, 0x01a9, "ALDictionary" },
+ { 0x0c, 0x01aa, "ALDesktop" },
+ { 0x0c, 0x01ab, "ALSpellCheck" },
+ { 0x0c, 0x01ac, "ALGrammarCheck" },
+ { 0x0c, 0x01ad, "ALWirelessStatus" },
+ { 0x0c, 0x01ae, "ALKeyboardLayout" },
+ { 0x0c, 0x01af, "ALVirusProtection" },
+ { 0x0c, 0x01b0, "ALEncryption" },
+ { 0x0c, 0x01b1, "ALScreenSaver" },
+ { 0x0c, 0x01b2, "ALAlarms" },
+ { 0x0c, 0x01b3, "ALClock" },
+ { 0x0c, 0x01b4, "ALFileBrowser" },
+ { 0x0c, 0x01b5, "ALPowerStatus" },
+ { 0x0c, 0x01b6, "ALImageBrowser" },
+ { 0x0c, 0x01b7, "ALAudioBrowser" },
+ { 0x0c, 0x01b8, "ALMovieBrowser" },
+ { 0x0c, 0x01b9, "ALDigitalRightsManager" },
+ { 0x0c, 0x01ba, "ALDigitalWallet" },
+ { 0x0c, 0x01bc, "ALInstantMessaging" },
+ { 0x0c, 0x01bd, "ALOEMFeaturesTipsTutorialBrowser" },
+ { 0x0c, 0x01be, "ALOEMHelp" },
+ { 0x0c, 0x01bf, "ALOnlineCommunity" },
+ { 0x0c, 0x01c0, "ALEntertainmentContentBrowser" },
+ { 0x0c, 0x01c1, "ALOnlineShoppingBrowser" },
+ { 0x0c, 0x01c2, "ALSmartCardInformationHelp" },
+ { 0x0c, 0x01c3, "ALMarketMonitorFinanceBrowser" },
+ { 0x0c, 0x01c4, "ALCustomizedCorporateNewsBrowser" },
+ { 0x0c, 0x01c5, "ALOnlineActivityBrowser" },
+ { 0x0c, 0x01c6, "ALResearchSearchBrowser" },
+ { 0x0c, 0x01c7, "ALAudioPlayer" },
+ { 0x0c, 0x01c8, "ALMessageStatus" },
+ { 0x0c, 0x01c9, "ALContactSync" },
+ { 0x0c, 0x01ca, "ALNavigation" },
+ { 0x0c, 0x01cb, "ALContextawareDesktopAssistant" },
+ { 0x0c, 0x0200, "GenericGUIApplicationControls" },
+ { 0x0c, 0x0201, "ACNew" },
+ { 0x0c, 0x0202, "ACOpen" },
+ { 0x0c, 0x0203, "ACClose" },
+ { 0x0c, 0x0204, "ACExit" },
+ { 0x0c, 0x0205, "ACMaximize" },
+ { 0x0c, 0x0206, "ACMinimize" },
+ { 0x0c, 0x0207, "ACSave" },
+ { 0x0c, 0x0208, "ACPrint" },
+ { 0x0c, 0x0209, "ACProperties" },
+ { 0x0c, 0x021a, "ACUndo" },
+ { 0x0c, 0x021b, "ACCopy" },
+ { 0x0c, 0x021c, "ACCut" },
+ { 0x0c, 0x021d, "ACPaste" },
+ { 0x0c, 0x021e, "ACSelectAll" },
+ { 0x0c, 0x021f, "ACFind" },
+ { 0x0c, 0x0220, "ACFindandReplace" },
+ { 0x0c, 0x0221, "ACSearch" },
+ { 0x0c, 0x0222, "ACGoTo" },
+ { 0x0c, 0x0223, "ACHome" },
+ { 0x0c, 0x0224, "ACBack" },
+ { 0x0c, 0x0225, "ACForward" },
+ { 0x0c, 0x0226, "ACStop" },
+ { 0x0c, 0x0227, "ACRefresh" },
+ { 0x0c, 0x0228, "ACPreviousLink" },
+ { 0x0c, 0x0229, "ACNextLink" },
+ { 0x0c, 0x022a, "ACBookmarks" },
+ { 0x0c, 0x022b, "ACHistory" },
+ { 0x0c, 0x022c, "ACSubscriptions" },
+ { 0x0c, 0x022d, "ACZoomIn" },
+ { 0x0c, 0x022e, "ACZoomOut" },
+ { 0x0c, 0x022f, "ACZoom" },
+ { 0x0c, 0x0230, "ACFullScreenView" },
+ { 0x0c, 0x0231, "ACNormalView" },
+ { 0x0c, 0x0232, "ACViewToggle" },
+ { 0x0c, 0x0233, "ACScrollUp" },
+ { 0x0c, 0x0234, "ACScrollDown" },
+ { 0x0c, 0x0235, "ACScroll" },
+ { 0x0c, 0x0236, "ACPanLeft" },
+ { 0x0c, 0x0237, "ACPanRight" },
+ { 0x0c, 0x0238, "ACPan" },
+ { 0x0c, 0x0239, "ACNewWindow" },
+ { 0x0c, 0x023a, "ACTileHorizontally" },
+ { 0x0c, 0x023b, "ACTileVertically" },
+ { 0x0c, 0x023c, "ACFormat" },
+ { 0x0c, 0x023d, "ACEdit" },
+ { 0x0c, 0x023e, "ACBold" },
+ { 0x0c, 0x023f, "ACItalics" },
+ { 0x0c, 0x0240, "ACUnderline" },
+ { 0x0c, 0x0241, "ACStrikethrough" },
+ { 0x0c, 0x0242, "ACSubscript" },
+ { 0x0c, 0x0243, "ACSuperscript" },
+ { 0x0c, 0x0244, "ACAllCaps" },
+ { 0x0c, 0x0245, "ACRotate" },
+ { 0x0c, 0x0246, "ACResize" },
+ { 0x0c, 0x0247, "ACFlipHorizontal" },
+ { 0x0c, 0x0248, "ACFlipVertical" },
+ { 0x0c, 0x0249, "ACMirrorHorizontal" },
+ { 0x0c, 0x024a, "ACMirrorVertical" },
+ { 0x0c, 0x024b, "ACFontSelect" },
+ { 0x0c, 0x024c, "ACFontColor" },
+ { 0x0c, 0x024d, "ACFontSize" },
+ { 0x0c, 0x024e, "ACJustifyLeft" },
+ { 0x0c, 0x024f, "ACJustifyCenterH" },
+ { 0x0c, 0x0250, "ACJustifyRight" },
+ { 0x0c, 0x0251, "ACJustifyBlockH" },
+ { 0x0c, 0x0252, "ACJustifyTop" },
+ { 0x0c, 0x0253, "ACJustifyCenterV" },
+ { 0x0c, 0x0254, "ACJustifyBottom" },
+ { 0x0c, 0x0255, "ACJustifyBlockV" },
+ { 0x0c, 0x0256, "ACIndentDecrease" },
+ { 0x0c, 0x0257, "ACIndentIncrease" },
+ { 0x0c, 0x0258, "ACNumberedList" },
+ { 0x0c, 0x0259, "ACRestartNumbering" },
+ { 0x0c, 0x025a, "ACBulletedList" },
+ { 0x0c, 0x025b, "ACPromote" },
+ { 0x0c, 0x025c, "ACDemote" },
+ { 0x0c, 0x025d, "ACYes" },
+ { 0x0c, 0x025e, "ACNo" },
+ { 0x0c, 0x025f, "ACCancel" },
+ { 0x0c, 0x0260, "ACCatalog" },
+ { 0x0c, 0x0261, "ACBuyCheckout" },
+ { 0x0c, 0x0262, "ACAddtoCart" },
+ { 0x0c, 0x0263, "ACExpand" },
+ { 0x0c, 0x0264, "ACExpandAll" },
+ { 0x0c, 0x0265, "ACCollapse" },
+ { 0x0c, 0x0266, "ACCollapseAll" },
+ { 0x0c, 0x0267, "ACPrintPreview" },
+ { 0x0c, 0x0268, "ACPasteSpecial" },
+ { 0x0c, 0x0269, "ACInsertMode" },
+ { 0x0c, 0x026a, "ACDelete" },
+ { 0x0c, 0x026b, "ACLock" },
+ { 0x0c, 0x026c, "ACUnlock" },
+ { 0x0c, 0x026d, "ACProtect" },
+ { 0x0c, 0x026e, "ACUnprotect" },
+ { 0x0c, 0x026f, "ACAttachComment" },
+ { 0x0c, 0x0270, "ACDeleteComment" },
+ { 0x0c, 0x0271, "ACViewComment" },
+ { 0x0c, 0x0272, "ACSelectWord" },
+ { 0x0c, 0x0273, "ACSelectSentence" },
+ { 0x0c, 0x0274, "ACSelectParagraph" },
+ { 0x0c, 0x0275, "ACSelectColumn" },
+ { 0x0c, 0x0276, "ACSelectRow" },
+ { 0x0c, 0x0277, "ACSelectTable" },
+ { 0x0c, 0x0278, "ACSelectObject" },
+ { 0x0c, 0x0279, "ACRedoRepeat" },
+ { 0x0c, 0x027a, "ACSort" },
+ { 0x0c, 0x027b, "ACSortAscending" },
+ { 0x0c, 0x027c, "ACSortDescending" },
+ { 0x0c, 0x027d, "ACFilter" },
+ { 0x0c, 0x027e, "ACSetClock" },
+ { 0x0c, 0x027f, "ACViewClock" },
+ { 0x0c, 0x0280, "ACSelectTimeZone" },
+ { 0x0c, 0x0281, "ACEditTimeZones" },
+ { 0x0c, 0x0282, "ACSetAlarm" },
+ { 0x0c, 0x0283, "ACClearAlarm" },
+ { 0x0c, 0x0284, "ACSnoozeAlarm" },
+ { 0x0c, 0x0285, "ACResetAlarm" },
+ { 0x0c, 0x0286, "ACSynchronize" },
+ { 0x0c, 0x0287, "ACSendReceive" },
+ { 0x0c, 0x0288, "ACSendTo" },
+ { 0x0c, 0x0289, "ACReply" },
+ { 0x0c, 0x028a, "ACReplyAll" },
+ { 0x0c, 0x028b, "ACForwardMsg" },
+ { 0x0c, 0x028c, "ACSend" },
+ { 0x0c, 0x028d, "ACAttachFile" },
+ { 0x0c, 0x028e, "ACUpload" },
+ { 0x0c, 0x028f, "ACDownloadSaveTargetAs" },
+ { 0x0c, 0x0290, "ACSetBorders" },
+ { 0x0c, 0x0291, "ACInsertRow" },
+ { 0x0c, 0x0292, "ACInsertColumn" },
+ { 0x0c, 0x0293, "ACInsertFile" },
+ { 0x0c, 0x0294, "ACInsertPicture" },
+ { 0x0c, 0x0295, "ACInsertObject" },
+ { 0x0c, 0x0296, "ACInsertSymbol" },
+ { 0x0c, 0x0297, "ACSaveandClose" },
+ { 0x0c, 0x0298, "ACRename" },
+ { 0x0c, 0x0299, "ACMerge" },
+ { 0x0c, 0x029a, "ACSplit" },
+ { 0x0c, 0x029b, "ACDisributeHorizontally" },
+ { 0x0c, 0x029c, "ACDistributeVertically" },
+ { 0x0c, 0x029d, "ACNextKeyboardLayoutSelect" },
+ { 0x0c, 0x029e, "ACNavigationGuidance" },
+ { 0x0c, 0x029f, "ACDesktopShowAllWindows" },
+ { 0x0c, 0x02a0, "ACSoftKeyLeft" },
+ { 0x0c, 0x02a1, "ACSoftKeyRight" },
+ { 0x0c, 0x02a2, "ACDesktopShowAllApplications" },
+ { 0x0c, 0x02b0, "ACIdleKeepAlive" },
+ { 0x0c, 0x02c0, "ExtendedKeyboardAttributesCollection" },
+ { 0x0c, 0x02c1, "KeyboardFormFactor" },
+ { 0x0c, 0x02c2, "KeyboardKeyType" },
+ { 0x0c, 0x02c3, "KeyboardPhysicalLayout" },
+ { 0x0c, 0x02c4, "VendorSpecificKeyboardPhysicalLayout" },
+ { 0x0c, 0x02c5, "KeyboardIETFLanguageTagIndex" },
+ { 0x0c, 0x02c6, "ImplementedKeyboardInputAssistControls" },
+ { 0x0c, 0x02c7, "KeyboardInputAssistPrevious" },
+ { 0x0c, 0x02c8, "KeyboardInputAssistNext" },
+ { 0x0c, 0x02c9, "KeyboardInputAssistPreviousGroup" },
+ { 0x0c, 0x02ca, "KeyboardInputAssistNextGroup" },
+ { 0x0c, 0x02cb, "KeyboardInputAssistAccept" },
+ { 0x0c, 0x02cc, "KeyboardInputAssistCancel" },
+ { 0x0c, 0x02d0, "PrivacyScreenToggle" },
+ { 0x0c, 0x02d1, "PrivacyScreenLevelDecrement" },
+ { 0x0c, 0x02d2, "PrivacyScreenLevelIncrement" },
+ { 0x0c, 0x02d3, "PrivacyScreenLevelMinimum" },
+ { 0x0c, 0x02d4, "PrivacyScreenLevelMaximum" },
+ { 0x0c, 0x0500, "ContactEdited" },
+ { 0x0c, 0x0501, "ContactAdded" },
+ { 0x0c, 0x0502, "ContactRecordActive" },
+ { 0x0c, 0x0503, "ContactIndex" },
+ { 0x0c, 0x0504, "ContactNickname" },
+ { 0x0c, 0x0505, "ContactFirstName" },
+ { 0x0c, 0x0506, "ContactLastName" },
+ { 0x0c, 0x0507, "ContactFullName" },
+ { 0x0c, 0x0508, "ContactPhoneNumberPersonal" },
+ { 0x0c, 0x0509, "ContactPhoneNumberBusiness" },
+ { 0x0c, 0x050a, "ContactPhoneNumberMobile" },
+ { 0x0c, 0x050b, "ContactPhoneNumberPager" },
+ { 0x0c, 0x050c, "ContactPhoneNumberFax" },
+ { 0x0c, 0x050d, "ContactPhoneNumberOther" },
+ { 0x0c, 0x050e, "ContactEmailPersonal" },
+ { 0x0c, 0x050f, "ContactEmailBusiness" },
+ { 0x0c, 0x0510, "ContactEmailOther" },
+ { 0x0c, 0x0511, "ContactEmailMain" },
+ { 0x0c, 0x0512, "ContactSpeedDialNumber" },
+ { 0x0c, 0x0513, "ContactStatusFlag" },
+ { 0x0c, 0x0514, "ContactMisc" },
+ { 0x0d, 0, "Digitizers" },
+ { 0x0d, 0x0001, "Digitizer" },
+ { 0x0d, 0x0002, "Pen" },
+ { 0x0d, 0x0003, "LightPen" },
+ { 0x0d, 0x0004, "TouchScreen" },
+ { 0x0d, 0x0005, "TouchPad" },
+ { 0x0d, 0x0006, "Whiteboard" },
+ { 0x0d, 0x0007, "CoordinateMeasuringMachine" },
+ { 0x0d, 0x0008, "3DDigitizer" },
+ { 0x0d, 0x0009, "StereoPlotter" },
+ { 0x0d, 0x000a, "ArticulatedArm" },
+ { 0x0d, 0x000b, "Armature" },
+ { 0x0d, 0x000c, "MultiplePointDigitizer" },
+ { 0x0d, 0x000d, "FreeSpaceWand" },
+ { 0x0d, 0x000e, "DeviceConfiguration" },
+ { 0x0d, 0x000f, "CapacitiveHeatMapDigitizer" },
+ { 0x0d, 0x0020, "Stylus" },
+ { 0x0d, 0x0021, "Puck" },
+ { 0x0d, 0x0022, "Finger" },
+ { 0x0d, 0x0023, "Devicesettings" },
+ { 0x0d, 0x0024, "CharacterGesture" },
+ { 0x0d, 0x0030, "TipPressure" },
+ { 0x0d, 0x0031, "BarrelPressure" },
+ { 0x0d, 0x0032, "InRange" },
+ { 0x0d, 0x0033, "Touch" },
+ { 0x0d, 0x0034, "Untouch" },
+ { 0x0d, 0x0035, "Tap" },
+ { 0x0d, 0x0036, "Quality" },
+ { 0x0d, 0x0037, "DataValid" },
+ { 0x0d, 0x0038, "TransducerIndex" },
+ { 0x0d, 0x0039, "TabletFunctionKeys" },
+ { 0x0d, 0x003a, "ProgramChangeKeys" },
+ { 0x0d, 0x003b, "BatteryStrength" },
+ { 0x0d, 0x003c, "Invert" },
+ { 0x0d, 0x003d, "XTilt" },
+ { 0x0d, 0x003e, "YTilt" },
+ { 0x0d, 0x003f, "Azimuth" },
+ { 0x0d, 0x0040, "Altitude" },
+ { 0x0d, 0x0041, "Twist" },
+ { 0x0d, 0x0042, "TipSwitch" },
+ { 0x0d, 0x0043, "SecondaryTipSwitch" },
+ { 0x0d, 0x0044, "BarrelSwitch" },
+ { 0x0d, 0x0045, "Eraser" },
+ { 0x0d, 0x0046, "TabletPick" },
+ { 0x0d, 0x0047, "TouchValid" },
+ { 0x0d, 0x0048, "Width" },
+ { 0x0d, 0x0049, "Height" },
+ { 0x0d, 0x0051, "ContactIdentifier" },
+ { 0x0d, 0x0052, "DeviceMode" },
+ { 0x0d, 0x0053, "DeviceIdentifier" },
+ { 0x0d, 0x0054, "ContactCount" },
+ { 0x0d, 0x0055, "ContactCountMaximum" },
+ { 0x0d, 0x0056, "ScanTime" },
+ { 0x0d, 0x0057, "SurfaceSwitch" },
+ { 0x0d, 0x0058, "ButtonSwitch" },
+ { 0x0d, 0x0059, "PadType" },
+ { 0x0d, 0x005a, "SecondaryBarrelSwitch" },
+ { 0x0d, 0x005b, "TransducerSerialNumber" },
+ { 0x0d, 0x005c, "PreferredColor" },
+ { 0x0d, 0x005d, "PreferredColorisLocked" },
+ { 0x0d, 0x005e, "PreferredLineWidth" },
+ { 0x0d, 0x005f, "PreferredLineWidthisLocked" },
+ { 0x0d, 0x0060, "LatencyMode" },
+ { 0x0d, 0x0061, "GestureCharacterQuality" },
+ { 0x0d, 0x0062, "CharacterGestureDataLength" },
+ { 0x0d, 0x0063, "CharacterGestureData" },
+ { 0x0d, 0x0064, "GestureCharacterEncoding" },
+ { 0x0d, 0x0065, "UTF8CharacterGestureEncoding" },
+ { 0x0d, 0x0066, "UTF16LittleEndianCharacterGestureEncoding" },
+ { 0x0d, 0x0067, "UTF16BigEndianCharacterGestureEncoding" },
+ { 0x0d, 0x0068, "UTF32LittleEndianCharacterGestureEncoding" },
+ { 0x0d, 0x0069, "UTF32BigEndianCharacterGestureEncoding" },
+ { 0x0d, 0x006a, "CapacitiveHeatMapProtocolVendorID" },
+ { 0x0d, 0x006b, "CapacitiveHeatMapProtocolVersion" },
+ { 0x0d, 0x006c, "CapacitiveHeatMapFrameData" },
+ { 0x0d, 0x006d, "GestureCharacterEnable" },
+ { 0x0d, 0x006e, "TransducerSerialNumberPart2" },
+ { 0x0d, 0x006f, "NoPreferredColor" },
+ { 0x0d, 0x0070, "PreferredLineStyle" },
+ { 0x0d, 0x0071, "PreferredLineStyleisLocked" },
+ { 0x0d, 0x0072, "Ink" },
+ { 0x0d, 0x0073, "Pencil" },
+ { 0x0d, 0x0074, "Highlighter" },
+ { 0x0d, 0x0075, "ChiselMarker" },
+ { 0x0d, 0x0076, "Brush" },
+ { 0x0d, 0x0077, "NoPreference" },
+ { 0x0d, 0x0080, "DigitizerDiagnostic" },
+ { 0x0d, 0x0081, "DigitizerError" },
+ { 0x0d, 0x0082, "ErrNormalStatus" },
+ { 0x0d, 0x0083, "ErrTransducersExceeded" },
+ { 0x0d, 0x0084, "ErrFullTransFeaturesUnavailable" },
+ { 0x0d, 0x0085, "ErrChargeLow" },
+ { 0x0d, 0x0090, "TransducerSoftwareInfo" },
+ { 0x0d, 0x0091, "TransducerVendorId" },
+ { 0x0d, 0x0092, "TransducerProductId" },
+ { 0x0d, 0x0093, "DeviceSupportedProtocols" },
+ { 0x0d, 0x0094, "TransducerSupportedProtocols" },
+ { 0x0d, 0x0095, "NoProtocol" },
+ { 0x0d, 0x0096, "WacomAESProtocol" },
+ { 0x0d, 0x0097, "USIProtocol" },
+ { 0x0d, 0x0098, "MicrosoftPenProtocol" },
+ { 0x0d, 0x00a0, "SupportedReportRates" },
+ { 0x0d, 0x00a1, "ReportRate" },
+ { 0x0d, 0x00a2, "TransducerConnected" },
+ { 0x0d, 0x00a3, "SwitchDisabled" },
+ { 0x0d, 0x00a4, "SwitchUnimplemented" },
+ { 0x0d, 0x00a5, "TransducerSwitches" },
+ { 0x0d, 0x00a6, "TransducerIndexSelector" },
+ { 0x0d, 0x00b0, "ButtonPressThreshold" },
+ { 0x0e, 0, "Haptics" },
+ { 0x0e, 0x0001, "SimpleHapticController" },
+ { 0x0e, 0x0010, "WaveformList" },
+ { 0x0e, 0x0011, "DurationList" },
+ { 0x0e, 0x0020, "AutoTrigger" },
+ { 0x0e, 0x0021, "ManualTrigger" },
+ { 0x0e, 0x0022, "AutoTriggerAssociatedControl" },
+ { 0x0e, 0x0023, "Intensity" },
+ { 0x0e, 0x0024, "RepeatCount" },
+ { 0x0e, 0x0025, "RetriggerPeriod" },
+ { 0x0e, 0x0026, "WaveformVendorPage" },
+ { 0x0e, 0x0027, "WaveformVendorID" },
+ { 0x0e, 0x0028, "WaveformCutoffTime" },
+ { 0x0e, 0x1001, "WaveformNone" },
+ { 0x0e, 0x1002, "WaveformStop" },
+ { 0x0e, 0x1003, "WaveformClick" },
+ { 0x0e, 0x1004, "WaveformBuzzContinuous" },
+ { 0x0e, 0x1005, "WaveformRumbleContinuous" },
+ { 0x0e, 0x1006, "WaveformPress" },
+ { 0x0e, 0x1007, "WaveformRelease" },
+ { 0x0e, 0x1008, "WaveformHover" },
+ { 0x0e, 0x1009, "WaveformSuccess" },
+ { 0x0e, 0x100a, "WaveformError" },
+ { 0x0e, 0x100b, "WaveformInkContinuous" },
+ { 0x0e, 0x100c, "WaveformPencilContinuous" },
+ { 0x0e, 0x100d, "WaveformMarkerContinuous" },
+ { 0x0e, 0x100e, "WaveformChiselMarkerContinuous" },
+ { 0x0e, 0x100f, "WaveformBrushContinuous" },
+ { 0x0e, 0x1010, "WaveformEraserContinuous" },
+ { 0x0e, 0x1011, "WaveformSparkleContinuous" },
+ { 0x0f, 0, "PhysicalInputDevice" },
+ { 0x0f, 0x0001, "PhysicalInputDevice" },
+ { 0x0f, 0x0020, "Normal" },
+ { 0x0f, 0x0021, "SetEffectReport" },
+ { 0x0f, 0x0022, "EffectParameterBlockIndex" },
+ { 0x0f, 0x0023, "ParameterBlockOffset" },
+ { 0x0f, 0x0024, "ROMFlag" },
+ { 0x0f, 0x0025, "EffectType" },
+ { 0x0f, 0x0026, "ETConstantForce" },
+ { 0x0f, 0x0027, "ETRamp" },
+ { 0x0f, 0x0028, "ETCustomForce" },
+ { 0x0f, 0x0030, "ETSquare" },
+ { 0x0f, 0x0031, "ETSine" },
+ { 0x0f, 0x0032, "ETTriangle" },
+ { 0x0f, 0x0033, "ETSawtoothUp" },
+ { 0x0f, 0x0034, "ETSawtoothDown" },
+ { 0x0f, 0x0040, "ETSpring" },
+ { 0x0f, 0x0041, "ETDamper" },
+ { 0x0f, 0x0042, "ETInertia" },
+ { 0x0f, 0x0043, "ETFriction" },
+ { 0x0f, 0x0050, "Duration" },
+ { 0x0f, 0x0051, "SamplePeriod" },
+ { 0x0f, 0x0052, "Gain" },
+ { 0x0f, 0x0053, "TriggerButton" },
+ { 0x0f, 0x0054, "TriggerRepeatInterval" },
+ { 0x0f, 0x0055, "AxesEnable" },
+ { 0x0f, 0x0056, "DirectionEnable" },
+ { 0x0f, 0x0057, "Direction" },
+ { 0x0f, 0x0058, "TypeSpecificBlockOffset" },
+ { 0x0f, 0x0059, "BlockType" },
+ { 0x0f, 0x005a, "SetEnvelopeReport" },
+ { 0x0f, 0x005b, "AttackLevel" },
+ { 0x0f, 0x005c, "AttackTime" },
+ { 0x0f, 0x005d, "FadeLevel" },
+ { 0x0f, 0x005e, "FadeTime" },
+ { 0x0f, 0x005f, "SetConditionReport" },
+ { 0x0f, 0x0060, "CenterPointOffset" },
+ { 0x0f, 0x0061, "PositiveCoefficient" },
+ { 0x0f, 0x0062, "NegativeCoefficient" },
+ { 0x0f, 0x0063, "PositiveSaturation" },
+ { 0x0f, 0x0064, "NegativeSaturation" },
+ { 0x0f, 0x0065, "DeadBand" },
+ { 0x0f, 0x0066, "DownloadForceSample" },
+ { 0x0f, 0x0067, "IsochCustomForceEnable" },
+ { 0x0f, 0x0068, "CustomForceDataReport" },
+ { 0x0f, 0x0069, "CustomForceData" },
+ { 0x0f, 0x006a, "CustomForceVendorDefinedData" },
+ { 0x0f, 0x006b, "SetCustomForceReport" },
+ { 0x0f, 0x006c, "CustomForceDataOffset" },
+ { 0x0f, 0x006d, "SampleCount" },
+ { 0x0f, 0x006e, "SetPeriodicReport" },
+ { 0x0f, 0x006f, "Offset" },
+ { 0x0f, 0x0070, "Magnitude" },
+ { 0x0f, 0x0071, "Phase" },
+ { 0x0f, 0x0072, "Period" },
+ { 0x0f, 0x0073, "SetConstantForceReport" },
+ { 0x0f, 0x0074, "SetRampForceReport" },
+ { 0x0f, 0x0075, "RampStart" },
+ { 0x0f, 0x0076, "RampEnd" },
+ { 0x0f, 0x0077, "EffectOperationReport" },
+ { 0x0f, 0x0078, "EffectOperation" },
+ { 0x0f, 0x0079, "OpEffectStart" },
+ { 0x0f, 0x007a, "OpEffectStartSolo" },
+ { 0x0f, 0x007b, "OpEffectStop" },
+ { 0x0f, 0x007c, "LoopCount" },
+ { 0x0f, 0x007d, "DeviceGainReport" },
+ { 0x0f, 0x007e, "DeviceGain" },
+ { 0x0f, 0x007f, "ParameterBlockPoolsReport" },
+ { 0x0f, 0x0080, "RAMPoolSize" },
+ { 0x0f, 0x0081, "ROMPoolSize" },
+ { 0x0f, 0x0082, "ROMEffectBlockCount" },
+ { 0x0f, 0x0083, "SimultaneousEffectsMax" },
+ { 0x0f, 0x0084, "PoolAlignment" },
+ { 0x0f, 0x0085, "ParameterBlockMoveReport" },
+ { 0x0f, 0x0086, "MoveSource" },
+ { 0x0f, 0x0087, "MoveDestination" },
+ { 0x0f, 0x0088, "MoveLength" },
+ { 0x0f, 0x0089, "EffectParameterBlockLoadReport" },
+ { 0x0f, 0x008b, "EffectParameterBlockLoadStatus" },
+ { 0x0f, 0x008c, "BlockLoadSuccess" },
+ { 0x0f, 0x008d, "BlockLoadFull" },
+ { 0x0f, 0x008e, "BlockLoadError" },
+ { 0x0f, 0x008f, "BlockHandle" },
+ { 0x0f, 0x0090, "EffectParameterBlockFreeReport" },
+ { 0x0f, 0x0091, "TypeSpecificBlockHandle" },
+ { 0x0f, 0x0092, "PIDStateReport" },
+ { 0x0f, 0x0094, "EffectPlaying" },
+ { 0x0f, 0x0095, "PIDDeviceControlReport" },
+ { 0x0f, 0x0096, "PIDDeviceControl" },
+ { 0x0f, 0x0097, "DCEnableActuators" },
+ { 0x0f, 0x0098, "DCDisableActuators" },
+ { 0x0f, 0x0099, "DCStopAllEffects" },
+ { 0x0f, 0x009a, "DCReset" },
+ { 0x0f, 0x009b, "DCPause" },
+ { 0x0f, 0x009c, "DCContinue" },
+ { 0x0f, 0x009f, "DevicePaused" },
+ { 0x0f, 0x00a0, "ActuatorsEnabled" },
+ { 0x0f, 0x00a4, "SafetySwitch" },
+ { 0x0f, 0x00a5, "ActuatorOverrideSwitch" },
+ { 0x0f, 0x00a6, "ActuatorPower" },
+ { 0x0f, 0x00a7, "StartDelay" },
+ { 0x0f, 0x00a8, "ParameterBlockSize" },
+ { 0x0f, 0x00a9, "DeviceManagedPool" },
+ { 0x0f, 0x00aa, "SharedParameterBlocks" },
+ { 0x0f, 0x00ab, "CreateNewEffectParameterBlockReport" },
+ { 0x0f, 0x00ac, "RAMPoolAvailable" },
+ { 0x11, 0, "SoC" },
+ { 0x11, 0x0001, "SocControl" },
+ { 0x11, 0x0002, "FirmwareTransfer" },
+ { 0x11, 0x0003, "FirmwareFileId" },
+ { 0x11, 0x0004, "FileOffsetInBytes" },
+ { 0x11, 0x0005, "FileTransferSizeMaxInBytes" },
+ { 0x11, 0x0006, "FilePayload" },
+ { 0x11, 0x0007, "FilePayloadSizeInBytes" },
+ { 0x11, 0x0008, "FilePayloadContainsLastBytes" },
+ { 0x11, 0x0009, "FileTransferStop" },
+ { 0x11, 0x000a, "FileTransferTillEnd" },
+ { 0x12, 0, "EyeandHeadTrackers" },
+ { 0x12, 0x0001, "EyeTracker" },
+ { 0x12, 0x0002, "HeadTracker" },
+ { 0x12, 0x0010, "TrackingData" },
+ { 0x12, 0x0011, "Capabilities" },
+ { 0x12, 0x0012, "Configuration" },
+ { 0x12, 0x0013, "Status" },
+ { 0x12, 0x0014, "Control" },
+ { 0x12, 0x0020, "SensorTimestamp" },
+ { 0x12, 0x0021, "PositionX" },
+ { 0x12, 0x0022, "PositionY" },
+ { 0x12, 0x0023, "PositionZ" },
+ { 0x12, 0x0024, "GazePoint" },
+ { 0x12, 0x0025, "LeftEyePosition" },
+ { 0x12, 0x0026, "RightEyePosition" },
+ { 0x12, 0x0027, "HeadPosition" },
+ { 0x12, 0x0028, "HeadDirectionPoint" },
+ { 0x12, 0x0029, "RotationaboutXaxis" },
+ { 0x12, 0x002a, "RotationaboutYaxis" },
+ { 0x12, 0x002b, "RotationaboutZaxis" },
+ { 0x12, 0x0100, "TrackerQuality" },
+ { 0x12, 0x0101, "MinimumTrackingDistance" },
+ { 0x12, 0x0102, "OptimumTrackingDistance" },
+ { 0x12, 0x0103, "MaximumTrackingDistance" },
+ { 0x12, 0x0104, "MaximumScreenPlaneWidth" },
+ { 0x12, 0x0105, "MaximumScreenPlaneHeight" },
+ { 0x12, 0x0200, "DisplayManufacturerID" },
+ { 0x12, 0x0201, "DisplayProductID" },
+ { 0x12, 0x0202, "DisplaySerialNumber" },
+ { 0x12, 0x0203, "DisplayManufacturerDate" },
+ { 0x12, 0x0204, "CalibratedScreenWidth" },
+ { 0x12, 0x0205, "CalibratedScreenHeight" },
+ { 0x12, 0x0300, "SamplingFrequency" },
+ { 0x12, 0x0301, "ConfigurationStatus" },
+ { 0x12, 0x0400, "DeviceModeRequest" },
+ { 0x14, 0, "AuxiliaryDisplay" },
+ { 0x14, 0x0001, "AlphanumericDisplay" },
+ { 0x14, 0x0002, "AuxiliaryDisplay" },
+ { 0x14, 0x0020, "DisplayAttributesReport" },
+ { 0x14, 0x0021, "ASCIICharacterSet" },
+ { 0x14, 0x0022, "DataReadBack" },
+ { 0x14, 0x0023, "FontReadBack" },
+ { 0x14, 0x0024, "DisplayControlReport" },
+ { 0x14, 0x0025, "ClearDisplay" },
+ { 0x14, 0x0026, "DisplayEnable" },
+ { 0x14, 0x0027, "ScreenSaverDelay" },
+ { 0x14, 0x0028, "ScreenSaverEnable" },
+ { 0x14, 0x0029, "VerticalScroll" },
+ { 0x14, 0x002a, "HorizontalScroll" },
+ { 0x14, 0x002b, "CharacterReport" },
+ { 0x14, 0x002c, "DisplayData" },
+ { 0x14, 0x002d, "DisplayStatus" },
+ { 0x14, 0x002e, "StatNotReady" },
+ { 0x14, 0x002f, "StatReady" },
+ { 0x14, 0x0030, "ErrNotaloadablecharacter" },
+ { 0x14, 0x0031, "ErrFontdatacannotberead" },
+ { 0x14, 0x0032, "CursorPositionReport" },
+ { 0x14, 0x0033, "Row" },
+ { 0x14, 0x0034, "Column" },
+ { 0x14, 0x0035, "Rows" },
+ { 0x14, 0x0036, "Columns" },
+ { 0x14, 0x0037, "CursorPixelPositioning" },
+ { 0x14, 0x0038, "CursorMode" },
+ { 0x14, 0x0039, "CursorEnable" },
+ { 0x14, 0x003a, "CursorBlink" },
+ { 0x14, 0x003b, "FontReport" },
+ { 0x14, 0x003c, "FontData" },
+ { 0x14, 0x003d, "CharacterWidth" },
+ { 0x14, 0x003e, "CharacterHeight" },
+ { 0x14, 0x003f, "CharacterSpacingHorizontal" },
+ { 0x14, 0x0040, "CharacterSpacingVertical" },
+ { 0x14, 0x0041, "UnicodeCharacterSet" },
+ { 0x14, 0x0042, "Font7Segment" },
+ { 0x14, 0x0043, "7SegmentDirectMap" },
+ { 0x14, 0x0044, "Font14Segment" },
+ { 0x14, 0x0045, "14SegmentDirectMap" },
+ { 0x14, 0x0046, "DisplayBrightness" },
+ { 0x14, 0x0047, "DisplayContrast" },
+ { 0x14, 0x0048, "CharacterAttribute" },
+ { 0x14, 0x0049, "AttributeReadback" },
+ { 0x14, 0x004a, "AttributeData" },
+ { 0x14, 0x004b, "CharAttrEnhance" },
+ { 0x14, 0x004c, "CharAttrUnderline" },
+ { 0x14, 0x004d, "CharAttrBlink" },
+ { 0x14, 0x0080, "BitmapSizeX" },
+ { 0x14, 0x0081, "BitmapSizeY" },
+ { 0x14, 0x0082, "MaxBlitSize" },
+ { 0x14, 0x0083, "BitDepthFormat" },
+ { 0x14, 0x0084, "DisplayOrientation" },
+ { 0x14, 0x0085, "PaletteReport" },
+ { 0x14, 0x0086, "PaletteDataSize" },
+ { 0x14, 0x0087, "PaletteDataOffset" },
+ { 0x14, 0x0088, "PaletteData" },
+ { 0x14, 0x008a, "BlitReport" },
+ { 0x14, 0x008b, "BlitRectangleX1" },
+ { 0x14, 0x008c, "BlitRectangleY1" },
+ { 0x14, 0x008d, "BlitRectangleX2" },
+ { 0x14, 0x008e, "BlitRectangleY2" },
+ { 0x14, 0x008f, "BlitData" },
+ { 0x14, 0x0090, "SoftButton" },
+ { 0x14, 0x0091, "SoftButtonID" },
+ { 0x14, 0x0092, "SoftButtonSide" },
+ { 0x14, 0x0093, "SoftButtonOffset1" },
+ { 0x14, 0x0094, "SoftButtonOffset2" },
+ { 0x14, 0x0095, "SoftButtonReport" },
+ { 0x14, 0x00c2, "SoftKeys" },
+ { 0x14, 0x00cc, "DisplayDataExtensions" },
+ { 0x14, 0x00cf, "CharacterMapping" },
+ { 0x14, 0x00dd, "UnicodeEquivalent" },
+ { 0x14, 0x00df, "CharacterPageMapping" },
+ { 0x14, 0x00ff, "RequestReport" },
+ { 0x20, 0, "Sensors" },
+ { 0x20, 0x0001, "Sensor" },
+ { 0x20, 0x0010, "Biometric" },
+ { 0x20, 0x0011, "BiometricHumanPresence" },
+ { 0x20, 0x0012, "BiometricHumanProximity" },
+ { 0x20, 0x0013, "BiometricHumanTouch" },
+ { 0x20, 0x0014, "BiometricBloodPressure" },
+ { 0x20, 0x0015, "BiometricBodyTemperature" },
+ { 0x20, 0x0016, "BiometricHeartRate" },
+ { 0x20, 0x0017, "BiometricHeartRateVariability" },
+ { 0x20, 0x0018, "BiometricPeripheralOxygenSaturation" },
+ { 0x20, 0x0019, "BiometricRespiratoryRate" },
+ { 0x20, 0x0020, "Electrical" },
+ { 0x20, 0x0021, "ElectricalCapacitance" },
+ { 0x20, 0x0022, "ElectricalCurrent" },
+ { 0x20, 0x0023, "ElectricalPower" },
+ { 0x20, 0x0024, "ElectricalInductance" },
+ { 0x20, 0x0025, "ElectricalResistance" },
+ { 0x20, 0x0026, "ElectricalVoltage" },
+ { 0x20, 0x0027, "ElectricalPotentiometer" },
+ { 0x20, 0x0028, "ElectricalFrequency" },
+ { 0x20, 0x0029, "ElectricalPeriod" },
+ { 0x20, 0x0030, "Environmental" },
+ { 0x20, 0x0031, "EnvironmentalAtmosphericPressure" },
+ { 0x20, 0x0032, "EnvironmentalHumidity" },
+ { 0x20, 0x0033, "EnvironmentalTemperature" },
+ { 0x20, 0x0034, "EnvironmentalWindDirection" },
+ { 0x20, 0x0035, "EnvironmentalWindSpeed" },
+ { 0x20, 0x0036, "EnvironmentalAirQuality" },
+ { 0x20, 0x0037, "EnvironmentalHeatIndex" },
+ { 0x20, 0x0038, "EnvironmentalSurfaceTemperature" },
+ { 0x20, 0x0039, "EnvironmentalVolatileOrganicCompounds" },
+ { 0x20, 0x003a, "EnvironmentalObjectPresence" },
+ { 0x20, 0x003b, "EnvironmentalObjectProximity" },
+ { 0x20, 0x0040, "Light" },
+ { 0x20, 0x0041, "LightAmbientLight" },
+ { 0x20, 0x0042, "LightConsumerInfrared" },
+ { 0x20, 0x0043, "LightInfraredLight" },
+ { 0x20, 0x0044, "LightVisibleLight" },
+ { 0x20, 0x0045, "LightUltravioletLight" },
+ { 0x20, 0x0050, "Location" },
+ { 0x20, 0x0051, "LocationBroadcast" },
+ { 0x20, 0x0052, "LocationDeadReckoning" },
+ { 0x20, 0x0053, "LocationGPSGlobalPositioningSystem" },
+ { 0x20, 0x0054, "LocationLookup" },
+ { 0x20, 0x0055, "LocationOther" },
+ { 0x20, 0x0056, "LocationStatic" },
+ { 0x20, 0x0057, "LocationTriangulation" },
+ { 0x20, 0x0060, "Mechanical" },
+ { 0x20, 0x0061, "MechanicalBooleanSwitch" },
+ { 0x20, 0x0062, "MechanicalBooleanSwitchArray" },
+ { 0x20, 0x0063, "MechanicalMultivalueSwitch" },
+ { 0x20, 0x0064, "MechanicalForce" },
+ { 0x20, 0x0065, "MechanicalPressure" },
+ { 0x20, 0x0066, "MechanicalStrain" },
+ { 0x20, 0x0067, "MechanicalWeight" },
+ { 0x20, 0x0068, "MechanicalHapticVibrator" },
+ { 0x20, 0x0069, "MechanicalHallEffectSwitch" },
+ { 0x20, 0x0070, "Motion" },
+ { 0x20, 0x0071, "MotionAccelerometer1D" },
+ { 0x20, 0x0072, "MotionAccelerometer2D" },
+ { 0x20, 0x0073, "MotionAccelerometer3D" },
+ { 0x20, 0x0074, "MotionGyrometer1D" },
+ { 0x20, 0x0075, "MotionGyrometer2D" },
+ { 0x20, 0x0076, "MotionGyrometer3D" },
+ { 0x20, 0x0077, "MotionMotionDetector" },
+ { 0x20, 0x0078, "MotionSpeedometer" },
+ { 0x20, 0x0079, "MotionAccelerometer" },
+ { 0x20, 0x007a, "MotionGyrometer" },
+ { 0x20, 0x007b, "MotionGravityVector" },
+ { 0x20, 0x007c, "MotionLinearAccelerometer" },
+ { 0x20, 0x0080, "Orientation" },
+ { 0x20, 0x0081, "OrientationCompass1D" },
+ { 0x20, 0x0082, "OrientationCompass2D" },
+ { 0x20, 0x0083, "OrientationCompass3D" },
+ { 0x20, 0x0084, "OrientationInclinometer1D" },
+ { 0x20, 0x0085, "OrientationInclinometer2D" },
+ { 0x20, 0x0086, "OrientationInclinometer3D" },
+ { 0x20, 0x0087, "OrientationDistance1D" },
+ { 0x20, 0x0088, "OrientationDistance2D" },
+ { 0x20, 0x0089, "OrientationDistance3D" },
+ { 0x20, 0x008a, "OrientationDeviceOrientation" },
+ { 0x20, 0x008b, "OrientationCompass" },
+ { 0x20, 0x008c, "OrientationInclinometer" },
+ { 0x20, 0x008d, "OrientationDistance" },
+ { 0x20, 0x008e, "OrientationRelativeOrientation" },
+ { 0x20, 0x008f, "OrientationSimpleOrientation" },
+ { 0x20, 0x0090, "Scanner" },
+ { 0x20, 0x0091, "ScannerBarcode" },
+ { 0x20, 0x0092, "ScannerRFID" },
+ { 0x20, 0x0093, "ScannerNFC" },
+ { 0x20, 0x00a0, "Time" },
+ { 0x20, 0x00a1, "TimeAlarmTimer" },
+ { 0x20, 0x00a2, "TimeRealTimeClock" },
+ { 0x20, 0x00b0, "PersonalActivity" },
+ { 0x20, 0x00b1, "PersonalActivityActivityDetection" },
+ { 0x20, 0x00b2, "PersonalActivityDevicePosition" },
+ { 0x20, 0x00b3, "PersonalActivityFloorTracker" },
+ { 0x20, 0x00b4, "PersonalActivityPedometer" },
+ { 0x20, 0x00b5, "PersonalActivityStepDetection" },
+ { 0x20, 0x00c0, "OrientationExtended" },
+ { 0x20, 0x00c1, "OrientationExtendedGeomagneticOrientation" },
+ { 0x20, 0x00c2, "OrientationExtendedMagnetometer" },
+ { 0x20, 0x00d0, "Gesture" },
+ { 0x20, 0x00d1, "GestureChassisFlipGesture" },
+ { 0x20, 0x00d2, "GestureHingeFoldGesture" },
+ { 0x20, 0x00e0, "Other" },
+ { 0x20, 0x00e1, "OtherCustom" },
+ { 0x20, 0x00e2, "OtherGeneric" },
+ { 0x20, 0x00e3, "OtherGenericEnumerator" },
+ { 0x20, 0x00e4, "OtherHingeAngle" },
+ { 0x20, 0x00f0, "VendorReserved1" },
+ { 0x20, 0x00f1, "VendorReserved2" },
+ { 0x20, 0x00f2, "VendorReserved3" },
+ { 0x20, 0x00f3, "VendorReserved4" },
+ { 0x20, 0x00f4, "VendorReserved5" },
+ { 0x20, 0x00f5, "VendorReserved6" },
+ { 0x20, 0x00f6, "VendorReserved7" },
+ { 0x20, 0x00f7, "VendorReserved8" },
+ { 0x20, 0x00f8, "VendorReserved9" },
+ { 0x20, 0x00f9, "VendorReserved10" },
+ { 0x20, 0x00fa, "VendorReserved11" },
+ { 0x20, 0x00fb, "VendorReserved12" },
+ { 0x20, 0x00fc, "VendorReserved13" },
+ { 0x20, 0x00fd, "VendorReserved14" },
+ { 0x20, 0x00fe, "VendorReserved15" },
+ { 0x20, 0x00ff, "VendorReserved16" },
+ { 0x20, 0x0200, "Event" },
+ { 0x20, 0x0201, "EventSensorState" },
+ { 0x20, 0x0202, "EventSensorEvent" },
+ { 0x20, 0x0300, "Property" },
+ { 0x20, 0x0301, "PropertyFriendlyName" },
+ { 0x20, 0x0302, "PropertyPersistentUniqueID" },
+ { 0x20, 0x0303, "PropertySensorStatus" },
+ { 0x20, 0x0304, "PropertyMinimumReportInterval" },
+ { 0x20, 0x0305, "PropertySensorManufacturer" },
+ { 0x20, 0x0306, "PropertySensorModel" },
+ { 0x20, 0x0307, "PropertySensorSerialNumber" },
+ { 0x20, 0x0308, "PropertySensorDescription" },
+ { 0x20, 0x0309, "PropertySensorConnectionType" },
+ { 0x20, 0x030a, "PropertySensorDevicePath" },
+ { 0x20, 0x030b, "PropertyHardwareRevision" },
+ { 0x20, 0x030c, "PropertyFirmwareVersion" },
+ { 0x20, 0x030d, "PropertyReleaseDate" },
+ { 0x20, 0x030e, "PropertyReportInterval" },
+ { 0x20, 0x030f, "PropertyChangeSensitivityAbsolute" },
+ { 0x20, 0x0310, "PropertyChangeSensitivityPercentofRange" },
+ { 0x20, 0x0311, "PropertyChangeSensitivityPercentRelative" },
+ { 0x20, 0x0312, "PropertyAccuracy" },
+ { 0x20, 0x0313, "PropertyResolution" },
+ { 0x20, 0x0314, "PropertyMaximum" },
+ { 0x20, 0x0315, "PropertyMinimum" },
+ { 0x20, 0x0316, "PropertyReportingState" },
+ { 0x20, 0x0317, "PropertySamplingRate" },
+ { 0x20, 0x0318, "PropertyResponseCurve" },
+ { 0x20, 0x0319, "PropertyPowerState" },
+ { 0x20, 0x031a, "PropertyMaximumFIFOEvents" },
+ { 0x20, 0x031b, "PropertyReportLatency" },
+ { 0x20, 0x031c, "PropertyFlushFIFOEvents" },
+ { 0x20, 0x031d, "PropertyMaximumPowerConsumption" },
+ { 0x20, 0x031e, "PropertyIsPrimary" },
+ { 0x20, 0x031f, "PropertyHumanPresenceDetectionType" },
+ { 0x20, 0x0400, "DataFieldLocation" },
+ { 0x20, 0x0402, "DataFieldAltitudeAntennaSeaLevel" },
+ { 0x20, 0x0403, "DataFieldDifferentialReferenceStationID" },
+ { 0x20, 0x0404, "DataFieldAltitudeEllipsoidError" },
+ { 0x20, 0x0405, "DataFieldAltitudeEllipsoid" },
+ { 0x20, 0x0406, "DataFieldAltitudeSeaLevelError" },
+ { 0x20, 0x0407, "DataFieldAltitudeSeaLevel" },
+ { 0x20, 0x0408, "DataFieldDifferentialGPSDataAge" },
+ { 0x20, 0x0409, "DataFieldErrorRadius" },
+ { 0x20, 0x040a, "DataFieldFixQuality" },
+ { 0x20, 0x040b, "DataFieldFixType" },
+ { 0x20, 0x040c, "DataFieldGeoidalSeparation" },
+ { 0x20, 0x040d, "DataFieldGPSOperationMode" },
+ { 0x20, 0x040e, "DataFieldGPSSelectionMode" },
+ { 0x20, 0x040f, "DataFieldGPSStatus" },
+ { 0x20, 0x0410, "DataFieldPositionDilutionofPrecision" },
+ { 0x20, 0x0411, "DataFieldHorizontalDilutionofPrecision" },
+ { 0x20, 0x0412, "DataFieldVerticalDilutionofPrecision" },
+ { 0x20, 0x0413, "DataFieldLatitude" },
+ { 0x20, 0x0414, "DataFieldLongitude" },
+ { 0x20, 0x0415, "DataFieldTrueHeading" },
+ { 0x20, 0x0416, "DataFieldMagneticHeading" },
+ { 0x20, 0x0417, "DataFieldMagneticVariation" },
+ { 0x20, 0x0418, "DataFieldSpeed" },
+ { 0x20, 0x0419, "DataFieldSatellitesinView" },
+ { 0x20, 0x041a, "DataFieldSatellitesinViewAzimuth" },
+ { 0x20, 0x041b, "DataFieldSatellitesinViewElevation" },
+ { 0x20, 0x041c, "DataFieldSatellitesinViewIDs" },
+ { 0x20, 0x041d, "DataFieldSatellitesinViewPRNs" },
+ { 0x20, 0x041e, "DataFieldSatellitesinViewSNRatios" },
+ { 0x20, 0x041f, "DataFieldSatellitesUsedCount" },
+ { 0x20, 0x0420, "DataFieldSatellitesUsedPRNs" },
+ { 0x20, 0x0421, "DataFieldNMEASentence" },
+ { 0x20, 0x0422, "DataFieldAddressLine1" },
+ { 0x20, 0x0423, "DataFieldAddressLine2" },
+ { 0x20, 0x0424, "DataFieldCity" },
+ { 0x20, 0x0425, "DataFieldStateorProvince" },
+ { 0x20, 0x0426, "DataFieldCountryorRegion" },
+ { 0x20, 0x0427, "DataFieldPostalCode" },
+ { 0x20, 0x042a, "PropertyLocation" },
+ { 0x20, 0x042b, "PropertyLocationDesiredAccuracy" },
+ { 0x20, 0x0430, "DataFieldEnvironmental" },
+ { 0x20, 0x0431, "DataFieldAtmosphericPressure" },
+ { 0x20, 0x0433, "DataFieldRelativeHumidity" },
+ { 0x20, 0x0434, "DataFieldTemperature" },
+ { 0x20, 0x0435, "DataFieldWindDirection" },
+ { 0x20, 0x0436, "DataFieldWindSpeed" },
+ { 0x20, 0x0437, "DataFieldAirQualityIndex" },
+ { 0x20, 0x0438, "DataFieldEquivalentCO2" },
+ { 0x20, 0x0439, "DataFieldVolatileOrganicCompoundConcentration" },
+ { 0x20, 0x043a, "DataFieldObjectPresence" },
+ { 0x20, 0x043b, "DataFieldObjectProximityRange" },
+ { 0x20, 0x043c, "DataFieldObjectProximityOutofRange" },
+ { 0x20, 0x0440, "PropertyEnvironmental" },
+ { 0x20, 0x0441, "PropertyReferencePressure" },
+ { 0x20, 0x0450, "DataFieldMotion" },
+ { 0x20, 0x0451, "DataFieldMotionState" },
+ { 0x20, 0x0452, "DataFieldAcceleration" },
+ { 0x20, 0x0453, "DataFieldAccelerationAxisX" },
+ { 0x20, 0x0454, "DataFieldAccelerationAxisY" },
+ { 0x20, 0x0455, "DataFieldAccelerationAxisZ" },
+ { 0x20, 0x0456, "DataFieldAngularVelocity" },
+ { 0x20, 0x0457, "DataFieldAngularVelocityaboutXAxis" },
+ { 0x20, 0x0458, "DataFieldAngularVelocityaboutYAxis" },
+ { 0x20, 0x0459, "DataFieldAngularVelocityaboutZAxis" },
+ { 0x20, 0x045a, "DataFieldAngularPosition" },
+ { 0x20, 0x045b, "DataFieldAngularPositionaboutXAxis" },
+ { 0x20, 0x045c, "DataFieldAngularPositionaboutYAxis" },
+ { 0x20, 0x045d, "DataFieldAngularPositionaboutZAxis" },
+ { 0x20, 0x045e, "DataFieldMotionSpeed" },
+ { 0x20, 0x045f, "DataFieldMotionIntensity" },
+ { 0x20, 0x0470, "DataFieldOrientation" },
+ { 0x20, 0x0471, "DataFieldHeading" },
+ { 0x20, 0x0472, "DataFieldHeadingXAxis" },
+ { 0x20, 0x0473, "DataFieldHeadingYAxis" },
+ { 0x20, 0x0474, "DataFieldHeadingZAxis" },
+ { 0x20, 0x0475, "DataFieldHeadingCompensatedMagneticNorth" },
+ { 0x20, 0x0476, "DataFieldHeadingCompensatedTrueNorth" },
+ { 0x20, 0x0477, "DataFieldHeadingMagneticNorth" },
+ { 0x20, 0x0478, "DataFieldHeadingTrueNorth" },
+ { 0x20, 0x0479, "DataFieldDistance" },
+ { 0x20, 0x047a, "DataFieldDistanceXAxis" },
+ { 0x20, 0x047b, "DataFieldDistanceYAxis" },
+ { 0x20, 0x047c, "DataFieldDistanceZAxis" },
+ { 0x20, 0x047d, "DataFieldDistanceOutofRange" },
+ { 0x20, 0x047e, "DataFieldTilt" },
+ { 0x20, 0x047f, "DataFieldTiltXAxis" },
+ { 0x20, 0x0480, "DataFieldTiltYAxis" },
+ { 0x20, 0x0481, "DataFieldTiltZAxis" },
+ { 0x20, 0x0482, "DataFieldRotationMatrix" },
+ { 0x20, 0x0483, "DataFieldQuaternion" },
+ { 0x20, 0x0484, "DataFieldMagneticFlux" },
+ { 0x20, 0x0485, "DataFieldMagneticFluxXAxis" },
+ { 0x20, 0x0486, "DataFieldMagneticFluxYAxis" },
+ { 0x20, 0x0487, "DataFieldMagneticFluxZAxis" },
+ { 0x20, 0x0488, "DataFieldMagnetometerAccuracy" },
+ { 0x20, 0x0489, "DataFieldSimpleOrientationDirection" },
+ { 0x20, 0x0490, "DataFieldMechanical" },
+ { 0x20, 0x0491, "DataFieldBooleanSwitchState" },
+ { 0x20, 0x0492, "DataFieldBooleanSwitchArrayStates" },
+ { 0x20, 0x0493, "DataFieldMultivalueSwitchValue" },
+ { 0x20, 0x0494, "DataFieldForce" },
+ { 0x20, 0x0495, "DataFieldAbsolutePressure" },
+ { 0x20, 0x0496, "DataFieldGaugePressure" },
+ { 0x20, 0x0497, "DataFieldStrain" },
+ { 0x20, 0x0498, "DataFieldWeight" },
+ { 0x20, 0x04a0, "PropertyMechanical" },
+ { 0x20, 0x04a1, "PropertyVibrationState" },
+ { 0x20, 0x04a2, "PropertyForwardVibrationSpeed" },
+ { 0x20, 0x04a3, "PropertyBackwardVibrationSpeed" },
+ { 0x20, 0x04b0, "DataFieldBiometric" },
+ { 0x20, 0x04b1, "DataFieldHumanPresence" },
+ { 0x20, 0x04b2, "DataFieldHumanProximityRange" },
+ { 0x20, 0x04b3, "DataFieldHumanProximityOutofRange" },
+ { 0x20, 0x04b4, "DataFieldHumanTouchState" },
+ { 0x20, 0x04b5, "DataFieldBloodPressure" },
+ { 0x20, 0x04b6, "DataFieldBloodPressureDiastolic" },
+ { 0x20, 0x04b7, "DataFieldBloodPressureSystolic" },
+ { 0x20, 0x04b8, "DataFieldHeartRate" },
+ { 0x20, 0x04b9, "DataFieldRestingHeartRate" },
+ { 0x20, 0x04ba, "DataFieldHeartbeatInterval" },
+ { 0x20, 0x04bb, "DataFieldRespiratoryRate" },
+ { 0x20, 0x04bc, "DataFieldSpO2" },
+ { 0x20, 0x04bd, "DataFieldHumanAttentionDetected" },
+ { 0x20, 0x04be, "DataFieldHumanHeadAzimuth" },
+ { 0x20, 0x04bf, "DataFieldHumanHeadAltitude" },
+ { 0x20, 0x04c0, "DataFieldHumanHeadRoll" },
+ { 0x20, 0x04c1, "DataFieldHumanHeadPitch" },
+ { 0x20, 0x04c2, "DataFieldHumanHeadYaw" },
+ { 0x20, 0x04c3, "DataFieldHumanCorrelationId" },
+ { 0x20, 0x04d0, "DataFieldLight" },
+ { 0x20, 0x04d1, "DataFieldIlluminance" },
+ { 0x20, 0x04d2, "DataFieldColorTemperature" },
+ { 0x20, 0x04d3, "DataFieldChromaticity" },
+ { 0x20, 0x04d4, "DataFieldChromaticityX" },
+ { 0x20, 0x04d5, "DataFieldChromaticityY" },
+ { 0x20, 0x04d6, "DataFieldConsumerIRSentenceReceive" },
+ { 0x20, 0x04d7, "DataFieldInfraredLight" },
+ { 0x20, 0x04d8, "DataFieldRedLight" },
+ { 0x20, 0x04d9, "DataFieldGreenLight" },
+ { 0x20, 0x04da, "DataFieldBlueLight" },
+ { 0x20, 0x04db, "DataFieldUltravioletALight" },
+ { 0x20, 0x04dc, "DataFieldUltravioletBLight" },
+ { 0x20, 0x04dd, "DataFieldUltravioletIndex" },
+ { 0x20, 0x04de, "DataFieldNearInfraredLight" },
+ { 0x20, 0x04df, "PropertyLight" },
+ { 0x20, 0x04e0, "PropertyConsumerIRSentenceSend" },
+ { 0x20, 0x04e2, "PropertyAutoBrightnessPreferred" },
+ { 0x20, 0x04e3, "PropertyAutoColorPreferred" },
+ { 0x20, 0x04f0, "DataFieldScanner" },
+ { 0x20, 0x04f1, "DataFieldRFIDTag40Bit" },
+ { 0x20, 0x04f2, "DataFieldNFCSentenceReceive" },
+ { 0x20, 0x04f8, "PropertyScanner" },
+ { 0x20, 0x04f9, "PropertyNFCSentenceSend" },
+ { 0x20, 0x0500, "DataFieldElectrical" },
+ { 0x20, 0x0501, "DataFieldCapacitance" },
+ { 0x20, 0x0502, "DataFieldCurrent" },
+ { 0x20, 0x0503, "DataFieldElectricalPower" },
+ { 0x20, 0x0504, "DataFieldInductance" },
+ { 0x20, 0x0505, "DataFieldResistance" },
+ { 0x20, 0x0506, "DataFieldVoltage" },
+ { 0x20, 0x0507, "DataFieldFrequency" },
+ { 0x20, 0x0508, "DataFieldPeriod" },
+ { 0x20, 0x0509, "DataFieldPercentofRange" },
+ { 0x20, 0x0520, "DataFieldTime" },
+ { 0x20, 0x0521, "DataFieldYear" },
+ { 0x20, 0x0522, "DataFieldMonth" },
+ { 0x20, 0x0523, "DataFieldDay" },
+ { 0x20, 0x0524, "DataFieldDayofWeek" },
+ { 0x20, 0x0525, "DataFieldHour" },
+ { 0x20, 0x0526, "DataFieldMinute" },
+ { 0x20, 0x0527, "DataFieldSecond" },
+ { 0x20, 0x0528, "DataFieldMillisecond" },
+ { 0x20, 0x0529, "DataFieldTimestamp" },
+ { 0x20, 0x052a, "DataFieldJulianDayofYear" },
+ { 0x20, 0x052b, "DataFieldTimeSinceSystemBoot" },
+ { 0x20, 0x0530, "PropertyTime" },
+ { 0x20, 0x0531, "PropertyTimeZoneOffsetfromUTC" },
+ { 0x20, 0x0532, "PropertyTimeZoneName" },
+ { 0x20, 0x0533, "PropertyDaylightSavingsTimeObserved" },
+ { 0x20, 0x0534, "PropertyTimeTrimAdjustment" },
+ { 0x20, 0x0535, "PropertyArmAlarm" },
+ { 0x20, 0x0540, "DataFieldCustom" },
+ { 0x20, 0x0541, "DataFieldCustomUsage" },
+ { 0x20, 0x0542, "DataFieldCustomBooleanArray" },
+ { 0x20, 0x0543, "DataFieldCustomValue" },
+ { 0x20, 0x0544, "DataFieldCustomValue1" },
+ { 0x20, 0x0545, "DataFieldCustomValue2" },
+ { 0x20, 0x0546, "DataFieldCustomValue3" },
+ { 0x20, 0x0547, "DataFieldCustomValue4" },
+ { 0x20, 0x0548, "DataFieldCustomValue5" },
+ { 0x20, 0x0549, "DataFieldCustomValue6" },
+ { 0x20, 0x054a, "DataFieldCustomValue7" },
+ { 0x20, 0x054b, "DataFieldCustomValue8" },
+ { 0x20, 0x054c, "DataFieldCustomValue9" },
+ { 0x20, 0x054d, "DataFieldCustomValue10" },
+ { 0x20, 0x054e, "DataFieldCustomValue11" },
+ { 0x20, 0x054f, "DataFieldCustomValue12" },
+ { 0x20, 0x0550, "DataFieldCustomValue13" },
+ { 0x20, 0x0551, "DataFieldCustomValue14" },
+ { 0x20, 0x0552, "DataFieldCustomValue15" },
+ { 0x20, 0x0553, "DataFieldCustomValue16" },
+ { 0x20, 0x0554, "DataFieldCustomValue17" },
+ { 0x20, 0x0555, "DataFieldCustomValue18" },
+ { 0x20, 0x0556, "DataFieldCustomValue19" },
+ { 0x20, 0x0557, "DataFieldCustomValue20" },
+ { 0x20, 0x0558, "DataFieldCustomValue21" },
+ { 0x20, 0x0559, "DataFieldCustomValue22" },
+ { 0x20, 0x055a, "DataFieldCustomValue23" },
+ { 0x20, 0x055b, "DataFieldCustomValue24" },
+ { 0x20, 0x055c, "DataFieldCustomValue25" },
+ { 0x20, 0x055d, "DataFieldCustomValue26" },
+ { 0x20, 0x055e, "DataFieldCustomValue27" },
+ { 0x20, 0x055f, "DataFieldCustomValue28" },
+ { 0x20, 0x0560, "DataFieldGeneric" },
+ { 0x20, 0x0561, "DataFieldGenericGUIDorPROPERTYKEY" },
+ { 0x20, 0x0562, "DataFieldGenericCategoryGUID" },
+ { 0x20, 0x0563, "DataFieldGenericTypeGUID" },
+ { 0x20, 0x0564, "DataFieldGenericEventPROPERTYKEY" },
+ { 0x20, 0x0565, "DataFieldGenericPropertyPROPERTYKEY" },
+ { 0x20, 0x0566, "DataFieldGenericDataFieldPROPERTYKEY" },
+ { 0x20, 0x0567, "DataFieldGenericEvent" },
+ { 0x20, 0x0568, "DataFieldGenericProperty" },
+ { 0x20, 0x0569, "DataFieldGenericDataField" },
+ { 0x20, 0x056a, "DataFieldEnumeratorTableRowIndex" },
+ { 0x20, 0x056b, "DataFieldEnumeratorTableRowCount" },
+ { 0x20, 0x056c, "DataFieldGenericGUIDorPROPERTYKEYkind" },
+ { 0x20, 0x056d, "DataFieldGenericGUID" },
+ { 0x20, 0x056e, "DataFieldGenericPROPERTYKEY" },
+ { 0x20, 0x056f, "DataFieldGenericTopLevelCollectionID" },
+ { 0x20, 0x0570, "DataFieldGenericReportID" },
+ { 0x20, 0x0571, "DataFieldGenericReportItemPositionIndex" },
+ { 0x20, 0x0572, "DataFieldGenericFirmwareVARTYPE" },
+ { 0x20, 0x0573, "DataFieldGenericUnitofMeasure" },
+ { 0x20, 0x0574, "DataFieldGenericUnitExponent" },
+ { 0x20, 0x0575, "DataFieldGenericReportSize" },
+ { 0x20, 0x0576, "DataFieldGenericReportCount" },
+ { 0x20, 0x0580, "PropertyGeneric" },
+ { 0x20, 0x0581, "PropertyEnumeratorTableRowIndex" },
+ { 0x20, 0x0582, "PropertyEnumeratorTableRowCount" },
+ { 0x20, 0x0590, "DataFieldPersonalActivity" },
+ { 0x20, 0x0591, "DataFieldActivityType" },
+ { 0x20, 0x0592, "DataFieldActivityState" },
+ { 0x20, 0x0593, "DataFieldDevicePosition" },
+ { 0x20, 0x0594, "DataFieldStepCount" },
+ { 0x20, 0x0595, "DataFieldStepCountReset" },
+ { 0x20, 0x0596, "DataFieldStepDuration" },
+ { 0x20, 0x0597, "DataFieldStepType" },
+ { 0x20, 0x05a0, "PropertyMinimumActivityDetectionInterval" },
+ { 0x20, 0x05a1, "PropertySupportedActivityTypes" },
+ { 0x20, 0x05a2, "PropertySubscribedActivityTypes" },
+ { 0x20, 0x05a3, "PropertySupportedStepTypes" },
+ { 0x20, 0x05a4, "PropertySubscribedStepTypes" },
+ { 0x20, 0x05a5, "PropertyFloorHeight" },
+ { 0x20, 0x05b0, "DataFieldCustomTypeID" },
+ { 0x20, 0x05c0, "PropertyCustom" },
+ { 0x20, 0x05c1, "PropertyCustomValue1" },
+ { 0x20, 0x05c2, "PropertyCustomValue2" },
+ { 0x20, 0x05c3, "PropertyCustomValue3" },
+ { 0x20, 0x05c4, "PropertyCustomValue4" },
+ { 0x20, 0x05c5, "PropertyCustomValue5" },
+ { 0x20, 0x05c6, "PropertyCustomValue6" },
+ { 0x20, 0x05c7, "PropertyCustomValue7" },
+ { 0x20, 0x05c8, "PropertyCustomValue8" },
+ { 0x20, 0x05c9, "PropertyCustomValue9" },
+ { 0x20, 0x05ca, "PropertyCustomValue10" },
+ { 0x20, 0x05cb, "PropertyCustomValue11" },
+ { 0x20, 0x05cc, "PropertyCustomValue12" },
+ { 0x20, 0x05cd, "PropertyCustomValue13" },
+ { 0x20, 0x05ce, "PropertyCustomValue14" },
+ { 0x20, 0x05cf, "PropertyCustomValue15" },
+ { 0x20, 0x05d0, "PropertyCustomValue16" },
+ { 0x20, 0x05e0, "DataFieldHinge" },
+ { 0x20, 0x05e1, "DataFieldHingeAngle" },
+ { 0x20, 0x05f0, "DataFieldGestureSensor" },
+ { 0x20, 0x05f1, "DataFieldGestureState" },
+ { 0x20, 0x05f2, "DataFieldHingeFoldInitialAngle" },
+ { 0x20, 0x05f3, "DataFieldHingeFoldFinalAngle" },
+ { 0x20, 0x05f4, "DataFieldHingeFoldContributingPanel" },
+ { 0x20, 0x05f5, "DataFieldHingeFoldType" },
+ { 0x20, 0x0800, "SensorStateUndefined" },
+ { 0x20, 0x0801, "SensorStateReady" },
+ { 0x20, 0x0802, "SensorStateNotAvailable" },
+ { 0x20, 0x0803, "SensorStateNoData" },
+ { 0x20, 0x0804, "SensorStateInitializing" },
+ { 0x20, 0x0805, "SensorStateAccessDenied" },
+ { 0x20, 0x0806, "SensorStateError" },
+ { 0x20, 0x0810, "SensorEventUnknown" },
+ { 0x20, 0x0811, "SensorEventStateChanged" },
+ { 0x20, 0x0812, "SensorEventPropertyChanged" },
+ { 0x20, 0x0813, "SensorEventDataUpdated" },
+ { 0x20, 0x0814, "SensorEventPollResponse" },
+ { 0x20, 0x0815, "SensorEventChangeSensitivity" },
+ { 0x20, 0x0816, "SensorEventRangeMaximumReached" },
+ { 0x20, 0x0817, "SensorEventRangeMinimumReached" },
+ { 0x20, 0x0818, "SensorEventHighThresholdCrossUpward" },
+ { 0x20, 0x0819, "SensorEventHighThresholdCrossDownward" },
+ { 0x20, 0x081a, "SensorEventLowThresholdCrossUpward" },
+ { 0x20, 0x081b, "SensorEventLowThresholdCrossDownward" },
+ { 0x20, 0x081c, "SensorEventZeroThresholdCrossUpward" },
+ { 0x20, 0x081d, "SensorEventZeroThresholdCrossDownward" },
+ { 0x20, 0x081e, "SensorEventPeriodExceeded" },
+ { 0x20, 0x081f, "SensorEventFrequencyExceeded" },
+ { 0x20, 0x0820, "SensorEventComplexTrigger" },
+ { 0x20, 0x0830, "ConnectionTypePCIntegrated" },
+ { 0x20, 0x0831, "ConnectionTypePCAttached" },
+ { 0x20, 0x0832, "ConnectionTypePCExternal" },
+ { 0x20, 0x0840, "ReportingStateReportNoEvents" },
+ { 0x20, 0x0841, "ReportingStateReportAllEvents" },
+ { 0x20, 0x0842, "ReportingStateReportThresholdEvents" },
+ { 0x20, 0x0843, "ReportingStateWakeOnNoEvents" },
+ { 0x20, 0x0844, "ReportingStateWakeOnAllEvents" },
+ { 0x20, 0x0845, "ReportingStateWakeOnThresholdEvents" },
+ { 0x20, 0x0846, "ReportingStateAnytime" },
+ { 0x20, 0x0850, "PowerStateUndefined" },
+ { 0x20, 0x0851, "PowerStateD0FullPower" },
+ { 0x20, 0x0852, "PowerStateD1LowPower" },
+ { 0x20, 0x0853, "PowerStateD2StandbyPowerwithWakeup" },
+ { 0x20, 0x0854, "PowerStateD3SleepwithWakeup" },
+ { 0x20, 0x0855, "PowerStateD4PowerOff" },
+ { 0x20, 0x0860, "AccuracyDefault" },
+ { 0x20, 0x0861, "AccuracyHigh" },
+ { 0x20, 0x0862, "AccuracyMedium" },
+ { 0x20, 0x0863, "AccuracyLow" },
+ { 0x20, 0x0870, "FixQualityNoFix" },
+ { 0x20, 0x0871, "FixQualityGPS" },
+ { 0x20, 0x0872, "FixQualityDGPS" },
+ { 0x20, 0x0880, "FixTypeNoFix" },
+ { 0x20, 0x0881, "FixTypeGPSSPSModeFixValid" },
+ { 0x20, 0x0882, "FixTypeDGPSSPSModeFixValid" },
+ { 0x20, 0x0883, "FixTypeGPSPPSModeFixValid" },
+ { 0x20, 0x0884, "FixTypeRealTimeKinematic" },
+ { 0x20, 0x0885, "FixTypeFloatRTK" },
+ { 0x20, 0x0886, "FixTypeEstimateddeadreckoned" },
+ { 0x20, 0x0887, "FixTypeManualInputMode" },
+ { 0x20, 0x0888, "FixTypeSimulatorMode" },
+ { 0x20, 0x0890, "GPSOperationModeManual" },
+ { 0x20, 0x0891, "GPSOperationModeAutomatic" },
+ { 0x20, 0x08a0, "GPSSelectionModeAutonomous" },
+ { 0x20, 0x08a1, "GPSSelectionModeDGPS" },
+ { 0x20, 0x08a2, "GPSSelectionModeEstimateddeadreckoned" },
+ { 0x20, 0x08a3, "GPSSelectionModeManualInput" },
+ { 0x20, 0x08a4, "GPSSelectionModeSimulator" },
+ { 0x20, 0x08a5, "GPSSelectionModeDataNotValid" },
+ { 0x20, 0x08b0, "GPSStatusDataValid" },
+ { 0x20, 0x08b1, "GPSStatusDataNotValid" },
+ { 0x20, 0x08c0, "DayofWeekSunday" },
+ { 0x20, 0x08c1, "DayofWeekMonday" },
+ { 0x20, 0x08c2, "DayofWeekTuesday" },
+ { 0x20, 0x08c3, "DayofWeekWednesday" },
+ { 0x20, 0x08c4, "DayofWeekThursday" },
+ { 0x20, 0x08c5, "DayofWeekFriday" },
+ { 0x20, 0x08c6, "DayofWeekSaturday" },
+ { 0x20, 0x08d0, "KindCategory" },
+ { 0x20, 0x08d1, "KindType" },
+ { 0x20, 0x08d2, "KindEvent" },
+ { 0x20, 0x08d3, "KindProperty" },
+ { 0x20, 0x08d4, "KindDataField" },
+ { 0x20, 0x08e0, "MagnetometerAccuracyLow" },
+ { 0x20, 0x08e1, "MagnetometerAccuracyMedium" },
+ { 0x20, 0x08e2, "MagnetometerAccuracyHigh" },
+ { 0x20, 0x08f0, "SimpleOrientationDirectionNotRotated" },
+ { 0x20, 0x08f1, "SimpleOrientationDirectionRotated90DegreesCCW" },
+ { 0x20, 0x08f2, "SimpleOrientationDirectionRotated180DegreesCCW" },
+ { 0x20, 0x08f3, "SimpleOrientationDirectionRotated270DegreesCCW" },
+ { 0x20, 0x08f4, "SimpleOrientationDirectionFaceUp" },
+ { 0x20, 0x08f5, "SimpleOrientationDirectionFaceDown" },
+ { 0x20, 0x0900, "VT_NULL" },
+ { 0x20, 0x0901, "VT_BOOL" },
+ { 0x20, 0x0902, "VT_UI1" },
+ { 0x20, 0x0903, "VT_I1" },
+ { 0x20, 0x0904, "VT_UI2" },
+ { 0x20, 0x0905, "VT_I2" },
+ { 0x20, 0x0906, "VT_UI4" },
+ { 0x20, 0x0907, "VT_I4" },
+ { 0x20, 0x0908, "VT_UI8" },
+ { 0x20, 0x0909, "VT_I8" },
+ { 0x20, 0x090a, "VT_R4" },
+ { 0x20, 0x090b, "VT_R8" },
+ { 0x20, 0x090c, "VT_WSTR" },
+ { 0x20, 0x090d, "VT_STR" },
+ { 0x20, 0x090e, "VT_CLSID" },
+ { 0x20, 0x090f, "VT_VECTORVT_UI1" },
+ { 0x20, 0x0910, "VT_F16E0" },
+ { 0x20, 0x0911, "VT_F16E1" },
+ { 0x20, 0x0912, "VT_F16E2" },
+ { 0x20, 0x0913, "VT_F16E3" },
+ { 0x20, 0x0914, "VT_F16E4" },
+ { 0x20, 0x0915, "VT_F16E5" },
+ { 0x20, 0x0916, "VT_F16E6" },
+ { 0x20, 0x0917, "VT_F16E7" },
+ { 0x20, 0x0918, "VT_F16E8" },
+ { 0x20, 0x0919, "VT_F16E9" },
+ { 0x20, 0x091a, "VT_F16EA" },
+ { 0x20, 0x091b, "VT_F16EB" },
+ { 0x20, 0x091c, "VT_F16EC" },
+ { 0x20, 0x091d, "VT_F16ED" },
+ { 0x20, 0x091e, "VT_F16EE" },
+ { 0x20, 0x091f, "VT_F16EF" },
+ { 0x20, 0x0920, "VT_F32E0" },
+ { 0x20, 0x0921, "VT_F32E1" },
+ { 0x20, 0x0922, "VT_F32E2" },
+ { 0x20, 0x0923, "VT_F32E3" },
+ { 0x20, 0x0924, "VT_F32E4" },
+ { 0x20, 0x0925, "VT_F32E5" },
+ { 0x20, 0x0926, "VT_F32E6" },
+ { 0x20, 0x0927, "VT_F32E7" },
+ { 0x20, 0x0928, "VT_F32E8" },
+ { 0x20, 0x0929, "VT_F32E9" },
+ { 0x20, 0x092a, "VT_F32EA" },
+ { 0x20, 0x092b, "VT_F32EB" },
+ { 0x20, 0x092c, "VT_F32EC" },
+ { 0x20, 0x092d, "VT_F32ED" },
+ { 0x20, 0x092e, "VT_F32EE" },
+ { 0x20, 0x092f, "VT_F32EF" },
+ { 0x20, 0x0930, "ActivityTypeUnknown" },
+ { 0x20, 0x0931, "ActivityTypeStationary" },
+ { 0x20, 0x0932, "ActivityTypeFidgeting" },
+ { 0x20, 0x0933, "ActivityTypeWalking" },
+ { 0x20, 0x0934, "ActivityTypeRunning" },
+ { 0x20, 0x0935, "ActivityTypeInVehicle" },
+ { 0x20, 0x0936, "ActivityTypeBiking" },
+ { 0x20, 0x0937, "ActivityTypeIdle" },
+ { 0x20, 0x0940, "UnitNotSpecified" },
+ { 0x20, 0x0941, "UnitLux" },
+ { 0x20, 0x0942, "UnitDegreesKelvin" },
+ { 0x20, 0x0943, "UnitDegreesCelsius" },
+ { 0x20, 0x0944, "UnitPascal" },
+ { 0x20, 0x0945, "UnitNewton" },
+ { 0x20, 0x0946, "UnitMetersSecond" },
+ { 0x20, 0x0947, "UnitKilogram" },
+ { 0x20, 0x0948, "UnitMeter" },
+ { 0x20, 0x0949, "UnitMetersSecondSecond" },
+ { 0x20, 0x094a, "UnitFarad" },
+ { 0x20, 0x094b, "UnitAmpere" },
+ { 0x20, 0x094c, "UnitWatt" },
+ { 0x20, 0x094d, "UnitHenry" },
+ { 0x20, 0x094e, "UnitOhm" },
+ { 0x20, 0x094f, "UnitVolt" },
+ { 0x20, 0x0950, "UnitHertz" },
+ { 0x20, 0x0951, "UnitBar" },
+ { 0x20, 0x0952, "UnitDegreesAnticlockwise" },
+ { 0x20, 0x0953, "UnitDegreesClockwise" },
+ { 0x20, 0x0954, "UnitDegrees" },
+ { 0x20, 0x0955, "UnitDegreesSecond" },
+ { 0x20, 0x0956, "UnitDegreesSecondSecond" },
+ { 0x20, 0x0957, "UnitKnot" },
+ { 0x20, 0x0958, "UnitPercent" },
+ { 0x20, 0x0959, "UnitSecond" },
+ { 0x20, 0x095a, "UnitMillisecond" },
+ { 0x20, 0x095b, "UnitG" },
+ { 0x20, 0x095c, "UnitBytes" },
+ { 0x20, 0x095d, "UnitMilligauss" },
+ { 0x20, 0x095e, "UnitBits" },
+ { 0x20, 0x0960, "ActivityStateNoStateChange" },
+ { 0x20, 0x0961, "ActivityStateStartActivity" },
+ { 0x20, 0x0962, "ActivityStateEndActivity" },
+ { 0x20, 0x0970, "Exponent0" },
+ { 0x20, 0x0971, "Exponent1" },
+ { 0x20, 0x0972, "Exponent2" },
+ { 0x20, 0x0973, "Exponent3" },
+ { 0x20, 0x0974, "Exponent4" },
+ { 0x20, 0x0975, "Exponent5" },
+ { 0x20, 0x0976, "Exponent6" },
+ { 0x20, 0x0977, "Exponent7" },
+ { 0x20, 0x0978, "Exponent8" },
+ { 0x20, 0x0979, "Exponent9" },
+ { 0x20, 0x097a, "ExponentA" },
+ { 0x20, 0x097b, "ExponentB" },
+ { 0x20, 0x097c, "ExponentC" },
+ { 0x20, 0x097d, "ExponentD" },
+ { 0x20, 0x097e, "ExponentE" },
+ { 0x20, 0x097f, "ExponentF" },
+ { 0x20, 0x0980, "DevicePositionUnknown" },
+ { 0x20, 0x0981, "DevicePositionUnchanged" },
+ { 0x20, 0x0982, "DevicePositionOnDesk" },
+ { 0x20, 0x0983, "DevicePositionInHand" },
+ { 0x20, 0x0984, "DevicePositionMovinginBag" },
+ { 0x20, 0x0985, "DevicePositionStationaryinBag" },
+ { 0x20, 0x0990, "StepTypeUnknown" },
+ { 0x20, 0x0991, "StepTypeWalking" },
+ { 0x20, 0x0992, "StepTypeRunning" },
+ { 0x20, 0x09a0, "GestureStateUnknown" },
+ { 0x20, 0x09a1, "GestureStateStarted" },
+ { 0x20, 0x09a2, "GestureStateCompleted" },
+ { 0x20, 0x09a3, "GestureStateCancelled" },
+ { 0x20, 0x09b0, "HingeFoldContributingPanelUnknown" },
+ { 0x20, 0x09b1, "HingeFoldContributingPanelPanel1" },
+ { 0x20, 0x09b2, "HingeFoldContributingPanelPanel2" },
+ { 0x20, 0x09b3, "HingeFoldContributingPanelBoth" },
+ { 0x20, 0x09b4, "HingeFoldTypeUnknown" },
+ { 0x20, 0x09b5, "HingeFoldTypeIncreasing" },
+ { 0x20, 0x09b6, "HingeFoldTypeDecreasing" },
+ { 0x20, 0x09c0, "HumanPresenceDetectionTypeVendorDefinedNonBiometric" },
+ { 0x20, 0x09c1, "HumanPresenceDetectionTypeVendorDefinedBiometric" },
+ { 0x20, 0x09c2, "HumanPresenceDetectionTypeFacialBiometric" },
+ { 0x20, 0x09c3, "HumanPresenceDetectionTypeAudioBiometric" },
+ { 0x20, 0x1000, "ModifierChangeSensitivityAbsolute" },
+ { 0x20, 0x2000, "ModifierMaximum" },
+ { 0x20, 0x3000, "ModifierMinimum" },
+ { 0x20, 0x4000, "ModifierAccuracy" },
+ { 0x20, 0x5000, "ModifierResolution" },
+ { 0x20, 0x6000, "ModifierThresholdHigh" },
+ { 0x20, 0x7000, "ModifierThresholdLow" },
+ { 0x20, 0x8000, "ModifierCalibrationOffset" },
+ { 0x20, 0x9000, "ModifierCalibrationMultiplier" },
+ { 0x20, 0xa000, "ModifierReportInterval" },
+ { 0x20, 0xb000, "ModifierFrequencyMax" },
+ { 0x20, 0xc000, "ModifierPeriodMax" },
+ { 0x20, 0xd000, "ModifierChangeSensitivityPercentofRange" },
+ { 0x20, 0xe000, "ModifierChangeSensitivityPercentRelative" },
+ { 0x20, 0xf000, "ModifierVendorReserved" },
+ { 0x40, 0, "MedicalInstrument" },
+ { 0x40, 0x0001, "MedicalUltrasound" },
+ { 0x40, 0x0020, "VCRAcquisition" },
+ { 0x40, 0x0021, "FreezeThaw" },
+ { 0x40, 0x0022, "ClipStore" },
+ { 0x40, 0x0023, "Update" },
+ { 0x40, 0x0024, "Next" },
+ { 0x40, 0x0025, "Save" },
+ { 0x40, 0x0026, "Print" },
+ { 0x40, 0x0027, "MicrophoneEnable" },
+ { 0x40, 0x0040, "Cine" },
+ { 0x40, 0x0041, "TransmitPower" },
+ { 0x40, 0x0042, "Volume" },
+ { 0x40, 0x0043, "Focus" },
+ { 0x40, 0x0044, "Depth" },
+ { 0x40, 0x0060, "SoftStepPrimary" },
+ { 0x40, 0x0061, "SoftStepSecondary" },
+ { 0x40, 0x0070, "DepthGainCompensation" },
+ { 0x40, 0x0080, "ZoomSelect" },
+ { 0x40, 0x0081, "ZoomAdjust" },
+ { 0x40, 0x0082, "SpectralDopplerModeSelect" },
+ { 0x40, 0x0083, "SpectralDopplerAdjust" },
+ { 0x40, 0x0084, "ColorDopplerModeSelect" },
+ { 0x40, 0x0085, "ColorDopplerAdjust" },
+ { 0x40, 0x0086, "MotionModeSelect" },
+ { 0x40, 0x0087, "MotionModeAdjust" },
+ { 0x40, 0x0088, "2DModeSelect" },
+ { 0x40, 0x0089, "2DModeAdjust" },
+ { 0x40, 0x00a0, "SoftControlSelect" },
+ { 0x40, 0x00a1, "SoftControlAdjust" },
+ { 0x41, 0, "BrailleDisplay" },
+ { 0x41, 0x0001, "BrailleDisplay" },
+ { 0x41, 0x0002, "BrailleRow" },
+ { 0x41, 0x0003, "8DotBrailleCell" },
+ { 0x41, 0x0004, "6DotBrailleCell" },
+ { 0x41, 0x0005, "NumberofBrailleCells" },
+ { 0x41, 0x0006, "ScreenReaderControl" },
+ { 0x41, 0x0007, "ScreenReaderIdentifier" },
+ { 0x41, 0x00fa, "RouterSet1" },
+ { 0x41, 0x00fb, "RouterSet2" },
+ { 0x41, 0x00fc, "RouterSet3" },
+ { 0x41, 0x0100, "RouterKey" },
+ { 0x41, 0x0101, "RowRouterKey" },
+ { 0x41, 0x0200, "BrailleButtons" },
+ { 0x41, 0x0201, "BrailleKeyboardDot1" },
+ { 0x41, 0x0202, "BrailleKeyboardDot2" },
+ { 0x41, 0x0203, "BrailleKeyboardDot3" },
+ { 0x41, 0x0204, "BrailleKeyboardDot4" },
+ { 0x41, 0x0205, "BrailleKeyboardDot5" },
+ { 0x41, 0x0206, "BrailleKeyboardDot6" },
+ { 0x41, 0x0207, "BrailleKeyboardDot7" },
+ { 0x41, 0x0208, "BrailleKeyboardDot8" },
+ { 0x41, 0x0209, "BrailleKeyboardSpace" },
+ { 0x41, 0x020a, "BrailleKeyboardLeftSpace" },
+ { 0x41, 0x020b, "BrailleKeyboardRightSpace" },
+ { 0x41, 0x020c, "BrailleFaceControls" },
+ { 0x41, 0x020d, "BrailleLeftControls" },
+ { 0x41, 0x020e, "BrailleRightControls" },
+ { 0x41, 0x020f, "BrailleTopControls" },
+ { 0x41, 0x0210, "BrailleJoystickCenter" },
+ { 0x41, 0x0211, "BrailleJoystickUp" },
+ { 0x41, 0x0212, "BrailleJoystickDown" },
+ { 0x41, 0x0213, "BrailleJoystickLeft" },
+ { 0x41, 0x0214, "BrailleJoystickRight" },
+ { 0x41, 0x0215, "BrailleDPadCenter" },
+ { 0x41, 0x0216, "BrailleDPadUp" },
+ { 0x41, 0x0217, "BrailleDPadDown" },
+ { 0x41, 0x0218, "BrailleDPadLeft" },
+ { 0x41, 0x0219, "BrailleDPadRight" },
+ { 0x41, 0x021a, "BraillePanLeft" },
+ { 0x41, 0x021b, "BraillePanRight" },
+ { 0x41, 0x021c, "BrailleRockerUp" },
+ { 0x41, 0x021d, "BrailleRockerDown" },
+ { 0x41, 0x021e, "BrailleRockerPress" },
+ { 0x59, 0, "LightingAndIllumination" },
+ { 0x59, 0x0001, "LampArray" },
+ { 0x59, 0x0002, "LampArrayAttributesReport" },
+ { 0x59, 0x0003, "LampCount" },
+ { 0x59, 0x0004, "BoundingBoxWidthInMicrometers" },
+ { 0x59, 0x0005, "BoundingBoxHeightInMicrometers" },
+ { 0x59, 0x0006, "BoundingBoxDepthInMicrometers" },
+ { 0x59, 0x0007, "LampArrayKind" },
+ { 0x59, 0x0008, "MinUpdateIntervalInMicroseconds" },
+ { 0x59, 0x0020, "LampAttributesRequestReport" },
+ { 0x59, 0x0021, "LampId" },
+ { 0x59, 0x0022, "LampAttributesResponseReport" },
+ { 0x59, 0x0023, "PositionXInMicrometers" },
+ { 0x59, 0x0024, "PositionYInMicrometers" },
+ { 0x59, 0x0025, "PositionZInMicrometers" },
+ { 0x59, 0x0026, "LampPurposes" },
+ { 0x59, 0x0027, "UpdateLatencyInMicroseconds" },
+ { 0x59, 0x0028, "RedLevelCount" },
+ { 0x59, 0x0029, "GreenLevelCount" },
+ { 0x59, 0x002a, "BlueLevelCount" },
+ { 0x59, 0x002b, "IntensityLevelCount" },
+ { 0x59, 0x002c, "IsProgrammable" },
+ { 0x59, 0x002d, "InputBinding" },
+ { 0x59, 0x0050, "LampMultiUpdateReport" },
+ { 0x59, 0x0051, "RedUpdateChannel" },
+ { 0x59, 0x0052, "GreenUpdateChannel" },
+ { 0x59, 0x0053, "BlueUpdateChannel" },
+ { 0x59, 0x0054, "IntensityUpdateChannel" },
+ { 0x59, 0x0055, "LampUpdateFlags" },
+ { 0x59, 0x0060, "LampRangeUpdateReport" },
+ { 0x59, 0x0061, "LampIdStart" },
+ { 0x59, 0x0062, "LampIdEnd" },
+ { 0x59, 0x0070, "LampArrayControlReport" },
+ { 0x59, 0x0071, "AutonomousMode" },
+ { 0x80, 0, "Monitor" },
+ { 0x80, 0x0001, "MonitorControl" },
+ { 0x80, 0x0002, "EDIDInformation" },
+ { 0x80, 0x0003, "VDIFInformation" },
+ { 0x80, 0x0004, "VESAVersion" },
+ { 0x81, 0, "MonitorEnumerated" },
+ { 0x82, 0, "VESAVirtualControls" },
+ { 0x82, 0x0001, "Degauss" },
+ { 0x82, 0x0010, "Brightness" },
+ { 0x82, 0x0012, "Contrast" },
+ { 0x82, 0x0016, "RedVideoGain" },
+ { 0x82, 0x0018, "GreenVideoGain" },
+ { 0x82, 0x001a, "BlueVideoGain" },
+ { 0x82, 0x001c, "Focus" },
+ { 0x82, 0x0020, "HorizontalPosition" },
+ { 0x82, 0x0022, "HorizontalSize" },
+ { 0x82, 0x0024, "HorizontalPincushion" },
+ { 0x82, 0x0026, "HorizontalPincushionBalance" },
+ { 0x82, 0x0028, "HorizontalMisconvergence" },
+ { 0x82, 0x002a, "HorizontalLinearity" },
+ { 0x82, 0x002c, "HorizontalLinearityBalance" },
+ { 0x82, 0x0030, "VerticalPosition" },
+ { 0x82, 0x0032, "VerticalSize" },
+ { 0x82, 0x0034, "VerticalPincushion" },
+ { 0x82, 0x0036, "VerticalPincushionBalance" },
+ { 0x82, 0x0038, "VerticalMisconvergence" },
+ { 0x82, 0x003a, "VerticalLinearity" },
+ { 0x82, 0x003c, "VerticalLinearityBalance" },
+ { 0x82, 0x0040, "ParallelogramDistortionKeyBalance" },
+ { 0x82, 0x0042, "TrapezoidalDistortionKey" },
+ { 0x82, 0x0044, "TiltRotation" },
+ { 0x82, 0x0046, "TopCornerDistortionControl" },
+ { 0x82, 0x0048, "TopCornerDistortionBalance" },
+ { 0x82, 0x004a, "BottomCornerDistortionControl" },
+ { 0x82, 0x004c, "BottomCornerDistortionBalance" },
+ { 0x82, 0x0056, "HorizontalMoire" },
+ { 0x82, 0x0058, "VerticalMoire" },
+ { 0x82, 0x005e, "InputLevelSelect" },
+ { 0x82, 0x0060, "InputSourceSelect" },
+ { 0x82, 0x006c, "RedVideoBlackLevel" },
+ { 0x82, 0x006e, "GreenVideoBlackLevel" },
+ { 0x82, 0x0070, "BlueVideoBlackLevel" },
+ { 0x82, 0x00a2, "AutoSizeCenter" },
+ { 0x82, 0x00a4, "PolarityHorizontalSynchronization" },
+ { 0x82, 0x00a6, "PolarityVerticalSynchronization" },
+ { 0x82, 0x00a8, "SynchronizationType" },
+ { 0x82, 0x00aa, "ScreenOrientation" },
+ { 0x82, 0x00ac, "HorizontalFrequency" },
+ { 0x82, 0x00ae, "VerticalFrequency" },
+ { 0x82, 0x00b0, "Settings" },
+ { 0x82, 0x00ca, "OnScreenDisplay" },
+ { 0x82, 0x00d4, "StereoMode" },
+ { 0x84, 0, "Power" },
+ { 0x84, 0x0001, "iName" },
+ { 0x84, 0x0002, "PresentStatus" },
+ { 0x84, 0x0003, "ChangedStatus" },
+ { 0x84, 0x0004, "UPS" },
+ { 0x84, 0x0005, "PowerSupply" },
+ { 0x84, 0x0010, "BatterySystem" },
+ { 0x84, 0x0011, "BatterySystemId" },
+ { 0x84, 0x0012, "Battery" },
+ { 0x84, 0x0013, "BatteryId" },
+ { 0x84, 0x0014, "Charger" },
+ { 0x84, 0x0015, "ChargerId" },
+ { 0x84, 0x0016, "PowerConverter" },
+ { 0x84, 0x0017, "PowerConverterId" },
+ { 0x84, 0x0018, "OutletSystem" },
+ { 0x84, 0x0019, "OutletSystemId" },
+ { 0x84, 0x001a, "Input" },
+ { 0x84, 0x001b, "InputId" },
+ { 0x84, 0x001c, "Output" },
+ { 0x84, 0x001d, "OutputId" },
+ { 0x84, 0x001e, "Flow" },
+ { 0x84, 0x001f, "FlowId" },
+ { 0x84, 0x0020, "Outlet" },
+ { 0x84, 0x0021, "OutletId" },
+ { 0x84, 0x0022, "Gang" },
+ { 0x84, 0x0023, "GangId" },
+ { 0x84, 0x0024, "PowerSummary" },
+ { 0x84, 0x0025, "PowerSummaryId" },
+ { 0x84, 0x0030, "Voltage" },
+ { 0x84, 0x0031, "Current" },
+ { 0x84, 0x0032, "Frequency" },
+ { 0x84, 0x0033, "ApparentPower" },
+ { 0x84, 0x0034, "ActivePower" },
+ { 0x84, 0x0035, "PercentLoad" },
+ { 0x84, 0x0036, "Temperature" },
+ { 0x84, 0x0037, "Humidity" },
+ { 0x84, 0x0038, "BadCount" },
+ { 0x84, 0x0040, "ConfigVoltage" },
+ { 0x84, 0x0041, "ConfigCurrent" },
+ { 0x84, 0x0042, "ConfigFrequency" },
+ { 0x84, 0x0043, "ConfigApparentPower" },
+ { 0x84, 0x0044, "ConfigActivePower" },
+ { 0x84, 0x0045, "ConfigPercentLoad" },
+ { 0x84, 0x0046, "ConfigTemperature" },
+ { 0x84, 0x0047, "ConfigHumidity" },
+ { 0x84, 0x0050, "SwitchOnControl" },
+ { 0x84, 0x0051, "SwitchOffControl" },
+ { 0x84, 0x0052, "ToggleControl" },
+ { 0x84, 0x0053, "LowVoltageTransfer" },
+ { 0x84, 0x0054, "HighVoltageTransfer" },
+ { 0x84, 0x0055, "DelayBeforeReboot" },
+ { 0x84, 0x0056, "DelayBeforeStartup" },
+ { 0x84, 0x0057, "DelayBeforeShutdown" },
+ { 0x84, 0x0058, "Test" },
+ { 0x84, 0x0059, "ModuleReset" },
+ { 0x84, 0x005a, "AudibleAlarmControl" },
+ { 0x84, 0x0060, "Present" },
+ { 0x84, 0x0061, "Good" },
+ { 0x84, 0x0062, "InternalFailure" },
+ { 0x84, 0x0063, "VoltagOutOfRange" },
+ { 0x84, 0x0064, "FrequencyOutOfRange" },
+ { 0x84, 0x0065, "Overload" },
+ { 0x84, 0x0066, "OverCharged" },
+ { 0x84, 0x0067, "OverTemperature" },
+ { 0x84, 0x0068, "ShutdownRequested" },
+ { 0x84, 0x0069, "ShutdownImminent" },
+ { 0x84, 0x006b, "SwitchOnOff" },
+ { 0x84, 0x006c, "Switchable" },
+ { 0x84, 0x006d, "Used" },
+ { 0x84, 0x006e, "Boost" },
+ { 0x84, 0x006f, "Buck" },
+ { 0x84, 0x0070, "Initialized" },
+ { 0x84, 0x0071, "Tested" },
+ { 0x84, 0x0072, "AwaitingPower" },
+ { 0x84, 0x0073, "CommunicationLost" },
+ { 0x84, 0x00fd, "iManufacturer" },
+ { 0x84, 0x00fe, "iProduct" },
+ { 0x84, 0x00ff, "iSerialNumber" },
+ { 0x85, 0, "BatterySystem" },
+ { 0x85, 0x0001, "SmartBatteryBatteryMode" },
+ { 0x85, 0x0002, "SmartBatteryBatteryStatus" },
+ { 0x85, 0x0003, "SmartBatteryAlarmWarning" },
+ { 0x85, 0x0004, "SmartBatteryChargerMode" },
+ { 0x85, 0x0005, "SmartBatteryChargerStatus" },
+ { 0x85, 0x0006, "SmartBatteryChargerSpecInfo" },
+ { 0x85, 0x0007, "SmartBatterySelectorState" },
+ { 0x85, 0x0008, "SmartBatterySelectorPresets" },
+ { 0x85, 0x0009, "SmartBatterySelectorInfo" },
+ { 0x85, 0x0010, "OptionalMfgFunction1" },
+ { 0x85, 0x0011, "OptionalMfgFunction2" },
+ { 0x85, 0x0012, "OptionalMfgFunction3" },
+ { 0x85, 0x0013, "OptionalMfgFunction4" },
+ { 0x85, 0x0014, "OptionalMfgFunction5" },
+ { 0x85, 0x0015, "ConnectionToSMBus" },
+ { 0x85, 0x0016, "OutputConnection" },
+ { 0x85, 0x0017, "ChargerConnection" },
+ { 0x85, 0x0018, "BatteryInsertion" },
+ { 0x85, 0x0019, "UseNext" },
+ { 0x85, 0x001a, "OKToUse" },
+ { 0x85, 0x001b, "BatterySupported" },
+ { 0x85, 0x001c, "SelectorRevision" },
+ { 0x85, 0x001d, "ChargingIndicator" },
+ { 0x85, 0x0028, "ManufacturerAccess" },
+ { 0x85, 0x0029, "RemainingCapacityLimit" },
+ { 0x85, 0x002a, "RemainingTimeLimit" },
+ { 0x85, 0x002b, "AtRate" },
+ { 0x85, 0x002c, "CapacityMode" },
+ { 0x85, 0x002d, "BroadcastToCharger" },
+ { 0x85, 0x002e, "PrimaryBattery" },
+ { 0x85, 0x002f, "ChargeController" },
+ { 0x85, 0x0040, "TerminateCharge" },
+ { 0x85, 0x0041, "TerminateDischarge" },
+ { 0x85, 0x0042, "BelowRemainingCapacityLimit" },
+ { 0x85, 0x0043, "RemainingTimeLimitExpired" },
+ { 0x85, 0x0044, "Charging" },
+ { 0x85, 0x0045, "Discharging" },
+ { 0x85, 0x0046, "FullyCharged" },
+ { 0x85, 0x0047, "FullyDischarged" },
+ { 0x85, 0x0048, "ConditioningFlag" },
+ { 0x85, 0x0049, "AtRateOK" },
+ { 0x85, 0x004a, "SmartBatteryErrorCode" },
+ { 0x85, 0x004b, "NeedReplacement" },
+ { 0x85, 0x0060, "AtRateTimeToFull" },
+ { 0x85, 0x0061, "AtRateTimeToEmpty" },
+ { 0x85, 0x0062, "AverageCurrent" },
+ { 0x85, 0x0063, "MaxError" },
+ { 0x85, 0x0064, "RelativeStateOfCharge" },
+ { 0x85, 0x0065, "AbsoluteStateOfCharge" },
+ { 0x85, 0x0066, "RemainingCapacity" },
+ { 0x85, 0x0067, "FullChargeCapacity" },
+ { 0x85, 0x0068, "RunTimeToEmpty" },
+ { 0x85, 0x0069, "AverageTimeToEmpty" },
+ { 0x85, 0x006a, "AverageTimeToFull" },
+ { 0x85, 0x006b, "CycleCount" },
+ { 0x85, 0x0080, "BatteryPackModelLevel" },
+ { 0x85, 0x0081, "InternalChargeController" },
+ { 0x85, 0x0082, "PrimaryBatterySupport" },
+ { 0x85, 0x0083, "DesignCapacity" },
+ { 0x85, 0x0084, "SpecificationInfo" },
+ { 0x85, 0x0085, "ManufactureDate" },
+ { 0x85, 0x0086, "SerialNumber" },
+ { 0x85, 0x0087, "iManufacturerName" },
+ { 0x85, 0x0088, "iDeviceName" },
+ { 0x85, 0x0089, "iDeviceChemistry" },
+ { 0x85, 0x008a, "ManufacturerData" },
+ { 0x85, 0x008b, "Rechargable" },
+ { 0x85, 0x008c, "WarningCapacityLimit" },
+ { 0x85, 0x008d, "CapacityGranularity1" },
+ { 0x85, 0x008e, "CapacityGranularity2" },
+ { 0x85, 0x008f, "iOEMInformation" },
+ { 0x85, 0x00c0, "InhibitCharge" },
+ { 0x85, 0x00c1, "EnablePolling" },
+ { 0x85, 0x00c2, "ResetToZero" },
+ { 0x85, 0x00d0, "ACPresent" },
+ { 0x85, 0x00d1, "BatteryPresent" },
+ { 0x85, 0x00d2, "PowerFail" },
+ { 0x85, 0x00d3, "AlarmInhibited" },
+ { 0x85, 0x00d4, "ThermistorUnderRange" },
+ { 0x85, 0x00d5, "ThermistorHot" },
+ { 0x85, 0x00d6, "ThermistorCold" },
+ { 0x85, 0x00d7, "ThermistorOverRange" },
+ { 0x85, 0x00d8, "VoltageOutOfRange" },
+ { 0x85, 0x00d9, "CurrentOutOfRange" },
+ { 0x85, 0x00da, "CurrentNotRegulated" },
+ { 0x85, 0x00db, "VoltageNotRegulated" },
+ { 0x85, 0x00dc, "MasterMode" },
+ { 0x85, 0x00f0, "ChargerSelectorSupport" },
+ { 0x85, 0x00f1, "ChargerSpec" },
+ { 0x85, 0x00f2, "Level2" },
+ { 0x85, 0x00f3, "Level3" },
+ { 0x8c, 0, "BarcodeScanner" },
+ { 0x8c, 0x0001, "BarcodeBadgeReader" },
+ { 0x8c, 0x0002, "BarcodeScanner" },
+ { 0x8c, 0x0003, "DumbBarCodeScanner" },
+ { 0x8c, 0x0004, "CordlessScannerBase" },
+ { 0x8c, 0x0005, "BarCodeScannerCradle" },
+ { 0x8c, 0x0010, "AttributeReport" },
+ { 0x8c, 0x0011, "SettingsReport" },
+ { 0x8c, 0x0012, "ScannedDataReport" },
+ { 0x8c, 0x0013, "RawScannedDataReport" },
+ { 0x8c, 0x0014, "TriggerReport" },
+ { 0x8c, 0x0015, "StatusReport" },
+ { 0x8c, 0x0016, "UPCEANControlReport" },
+ { 0x8c, 0x0017, "EAN23LabelControlReport" },
+ { 0x8c, 0x0018, "Code39ControlReport" },
+ { 0x8c, 0x0019, "Interleaved2of5ControlReport" },
+ { 0x8c, 0x001a, "Standard2of5ControlReport" },
+ { 0x8c, 0x001b, "MSIPlesseyControlReport" },
+ { 0x8c, 0x001c, "CodabarControlReport" },
+ { 0x8c, 0x001d, "Code128ControlReport" },
+ { 0x8c, 0x001e, "Misc1DControlReport" },
+ { 0x8c, 0x001f, "2DControlReport" },
+ { 0x8c, 0x0030, "AimingPointerMode" },
+ { 0x8c, 0x0031, "BarCodePresentSensor" },
+ { 0x8c, 0x0032, "Class1ALaser" },
+ { 0x8c, 0x0033, "Class2Laser" },
+ { 0x8c, 0x0034, "HeaterPresent" },
+ { 0x8c, 0x0035, "ContactScanner" },
+ { 0x8c, 0x0036, "ElectronicArticleSurveillanceNotification" },
+ { 0x8c, 0x0037, "ConstantElectronicArticleSurveillance" },
+ { 0x8c, 0x0038, "ErrorIndication" },
+ { 0x8c, 0x0039, "FixedBeeper" },
+ { 0x8c, 0x003a, "GoodDecodeIndication" },
+ { 0x8c, 0x003b, "HandsFreeScanning" },
+ { 0x8c, 0x003c, "IntrinsicallySafe" },
+ { 0x8c, 0x003d, "KlasseEinsLaser" },
+ { 0x8c, 0x003e, "LongRangeScanner" },
+ { 0x8c, 0x003f, "MirrorSpeedControl" },
+ { 0x8c, 0x0040, "NotOnFileIndication" },
+ { 0x8c, 0x0041, "ProgrammableBeeper" },
+ { 0x8c, 0x0042, "Triggerless" },
+ { 0x8c, 0x0043, "Wand" },
+ { 0x8c, 0x0044, "WaterResistant" },
+ { 0x8c, 0x0045, "MultiRangeScanner" },
+ { 0x8c, 0x0046, "ProximitySensor" },
+ { 0x8c, 0x004d, "FragmentDecoding" },
+ { 0x8c, 0x004e, "ScannerReadConfidence" },
+ { 0x8c, 0x004f, "DataPrefix" },
+ { 0x8c, 0x0050, "PrefixAIMI" },
+ { 0x8c, 0x0051, "PrefixNone" },
+ { 0x8c, 0x0052, "PrefixProprietary" },
+ { 0x8c, 0x0055, "ActiveTime" },
+ { 0x8c, 0x0056, "AimingLaserPattern" },
+ { 0x8c, 0x0057, "BarCodePresent" },
+ { 0x8c, 0x0058, "BeeperState" },
+ { 0x8c, 0x0059, "LaserOnTime" },
+ { 0x8c, 0x005a, "LaserState" },
+ { 0x8c, 0x005b, "LockoutTime" },
+ { 0x8c, 0x005c, "MotorState" },
+ { 0x8c, 0x005d, "MotorTimeout" },
+ { 0x8c, 0x005e, "PowerOnResetScanner" },
+ { 0x8c, 0x005f, "PreventReadofBarcodes" },
+ { 0x8c, 0x0060, "InitiateBarcodeRead" },
+ { 0x8c, 0x0061, "TriggerState" },
+ { 0x8c, 0x0062, "TriggerMode" },
+ { 0x8c, 0x0063, "TriggerModeBlinkingLaserOn" },
+ { 0x8c, 0x0064, "TriggerModeContinuousLaserOn" },
+ { 0x8c, 0x0065, "TriggerModeLaseronwhilePulled" },
+ { 0x8c, 0x0066, "TriggerModeLaserstaysonafterrelease" },
+ { 0x8c, 0x006d, "CommitParameterstoNVM" },
+ { 0x8c, 0x006e, "ParameterScanning" },
+ { 0x8c, 0x006f, "ParametersChanged" },
+ { 0x8c, 0x0070, "Setparameterdefaultvalues" },
+ { 0x8c, 0x0075, "ScannerInCradle" },
+ { 0x8c, 0x0076, "ScannerInRange" },
+ { 0x8c, 0x007a, "AimDuration" },
+ { 0x8c, 0x007b, "GoodReadLampDuration" },
+ { 0x8c, 0x007c, "GoodReadLampIntensity" },
+ { 0x8c, 0x007d, "GoodReadLED" },
+ { 0x8c, 0x007e, "GoodReadToneFrequency" },
+ { 0x8c, 0x007f, "GoodReadToneLength" },
+ { 0x8c, 0x0080, "GoodReadToneVolume" },
+ { 0x8c, 0x0082, "NoReadMessage" },
+ { 0x8c, 0x0083, "NotonFileVolume" },
+ { 0x8c, 0x0084, "PowerupBeep" },
+ { 0x8c, 0x0085, "SoundErrorBeep" },
+ { 0x8c, 0x0086, "SoundGoodReadBeep" },
+ { 0x8c, 0x0087, "SoundNotOnFileBeep" },
+ { 0x8c, 0x0088, "GoodReadWhentoWrite" },
+ { 0x8c, 0x0089, "GRWTIAfterDecode" },
+ { 0x8c, 0x008a, "GRWTIBeepLampaftertransmit" },
+ { 0x8c, 0x008b, "GRWTINoBeepLampuseatall" },
+ { 0x8c, 0x0091, "BooklandEAN" },
+ { 0x8c, 0x0092, "ConvertEAN8to13Type" },
+ { 0x8c, 0x0093, "ConvertUPCAtoEAN13" },
+ { 0x8c, 0x0094, "ConvertUPCEtoA" },
+ { 0x8c, 0x0095, "EAN13" },
+ { 0x8c, 0x0096, "EAN8" },
+ { 0x8c, 0x0097, "EAN99128Mandatory" },
+ { 0x8c, 0x0098, "EAN99P5128Optional" },
+ { 0x8c, 0x0099, "EnableEANTwoLabel" },
+ { 0x8c, 0x009a, "UPCEAN" },
+ { 0x8c, 0x009b, "UPCEANCouponCode" },
+ { 0x8c, 0x009c, "UPCEANPeriodicals" },
+ { 0x8c, 0x009d, "UPCA" },
+ { 0x8c, 0x009e, "UPCAwith128Mandatory" },
+ { 0x8c, 0x009f, "UPCAwith128Optional" },
+ { 0x8c, 0x00a0, "UPCAwithP5Optional" },
+ { 0x8c, 0x00a1, "UPCE" },
+ { 0x8c, 0x00a2, "UPCE1" },
+ { 0x8c, 0x00a9, "Periodical" },
+ { 0x8c, 0x00aa, "PeriodicalAutoDiscriminate2" },
+ { 0x8c, 0x00ab, "PeriodicalOnlyDecodewith2" },
+ { 0x8c, 0x00ac, "PeriodicalIgnore2" },
+ { 0x8c, 0x00ad, "PeriodicalAutoDiscriminate5" },
+ { 0x8c, 0x00ae, "PeriodicalOnlyDecodewith5" },
+ { 0x8c, 0x00af, "PeriodicalIgnore5" },
+ { 0x8c, 0x00b0, "Check" },
+ { 0x8c, 0x00b1, "CheckDisablePrice" },
+ { 0x8c, 0x00b2, "CheckEnable4digitPrice" },
+ { 0x8c, 0x00b3, "CheckEnable5digitPrice" },
+ { 0x8c, 0x00b4, "CheckEnableEuropean4digitPrice" },
+ { 0x8c, 0x00b5, "CheckEnableEuropean5digitPrice" },
+ { 0x8c, 0x00b7, "EANTwoLabel" },
+ { 0x8c, 0x00b8, "EANThreeLabel" },
+ { 0x8c, 0x00b9, "EAN8FlagDigit1" },
+ { 0x8c, 0x00ba, "EAN8FlagDigit2" },
+ { 0x8c, 0x00bb, "EAN8FlagDigit3" },
+ { 0x8c, 0x00bc, "EAN13FlagDigit1" },
+ { 0x8c, 0x00bd, "EAN13FlagDigit2" },
+ { 0x8c, 0x00be, "EAN13FlagDigit3" },
+ { 0x8c, 0x00bf, "AddEAN23LabelDefinition" },
+ { 0x8c, 0x00c0, "ClearallEAN23LabelDefinitions" },
+ { 0x8c, 0x00c3, "Codabar" },
+ { 0x8c, 0x00c4, "Code128" },
+ { 0x8c, 0x00c7, "Code39" },
+ { 0x8c, 0x00c8, "Code93" },
+ { 0x8c, 0x00c9, "FullASCIIConversion" },
+ { 0x8c, 0x00ca, "Interleaved2of5" },
+ { 0x8c, 0x00cb, "ItalianPharmacyCode" },
+ { 0x8c, 0x00cc, "MSIPlessey" },
+ { 0x8c, 0x00cd, "Standard2of5IATA" },
+ { 0x8c, 0x00ce, "Standard2of5" },
+ { 0x8c, 0x00d3, "TransmitStartStop" },
+ { 0x8c, 0x00d4, "TriOptic" },
+ { 0x8c, 0x00d5, "UCCEAN128" },
+ { 0x8c, 0x00d6, "CheckDigit" },
+ { 0x8c, 0x00d7, "CheckDigitDisable" },
+ { 0x8c, 0x00d8, "CheckDigitEnableInterleaved2of5OPCC" },
+ { 0x8c, 0x00d9, "CheckDigitEnableInterleaved2of5USS" },
+ { 0x8c, 0x00da, "CheckDigitEnableStandard2of5OPCC" },
+ { 0x8c, 0x00db, "CheckDigitEnableStandard2of5USS" },
+ { 0x8c, 0x00dc, "CheckDigitEnableOneMSIPlessey" },
+ { 0x8c, 0x00dd, "CheckDigitEnableTwoMSIPlessey" },
+ { 0x8c, 0x00de, "CheckDigitCodabarEnable" },
+ { 0x8c, 0x00df, "CheckDigitCode39Enable" },
+ { 0x8c, 0x00f0, "TransmitCheckDigit" },
+ { 0x8c, 0x00f1, "DisableCheckDigitTransmit" },
+ { 0x8c, 0x00f2, "EnableCheckDigitTransmit" },
+ { 0x8c, 0x00fb, "SymbologyIdentifier1" },
+ { 0x8c, 0x00fc, "SymbologyIdentifier2" },
+ { 0x8c, 0x00fd, "SymbologyIdentifier3" },
+ { 0x8c, 0x00fe, "DecodedData" },
+ { 0x8c, 0x00ff, "DecodeDataContinued" },
+ { 0x8c, 0x0100, "BarSpaceData" },
+ { 0x8c, 0x0101, "ScannerDataAccuracy" },
+ { 0x8c, 0x0102, "RawDataPolarity" },
+ { 0x8c, 0x0103, "PolarityInvertedBarCode" },
+ { 0x8c, 0x0104, "PolarityNormalBarCode" },
+ { 0x8c, 0x0106, "MinimumLengthtoDecode" },
+ { 0x8c, 0x0107, "MaximumLengthtoDecode" },
+ { 0x8c, 0x0108, "DiscreteLengthtoDecode1" },
+ { 0x8c, 0x0109, "DiscreteLengthtoDecode2" },
+ { 0x8c, 0x010a, "DataLengthMethod" },
+ { 0x8c, 0x010b, "DLMethodReadany" },
+ { 0x8c, 0x010c, "DLMethodCheckinRange" },
+ { 0x8c, 0x010d, "DLMethodCheckforDiscrete" },
+ { 0x8c, 0x0110, "AztecCode" },
+ { 0x8c, 0x0111, "BC412" },
+ { 0x8c, 0x0112, "ChannelCode" },
+ { 0x8c, 0x0113, "Code16" },
+ { 0x8c, 0x0114, "Code32" },
+ { 0x8c, 0x0115, "Code49" },
+ { 0x8c, 0x0116, "CodeOne" },
+ { 0x8c, 0x0117, "Colorcode" },
+ { 0x8c, 0x0118, "DataMatrix" },
+ { 0x8c, 0x0119, "MaxiCode" },
+ { 0x8c, 0x011a, "MicroPDF" },
+ { 0x8c, 0x011b, "PDF417" },
+ { 0x8c, 0x011c, "PosiCode" },
+ { 0x8c, 0x011d, "QRCode" },
+ { 0x8c, 0x011e, "SuperCode" },
+ { 0x8c, 0x011f, "UltraCode" },
+ { 0x8c, 0x0120, "USD5SlugCode" },
+ { 0x8c, 0x0121, "VeriCode" },
+ { 0x8d, 0, "Scales" },
+ { 0x8d, 0x0001, "Scales" },
+ { 0x8d, 0x0020, "ScaleDevice" },
+ { 0x8d, 0x0021, "ScaleClass" },
+ { 0x8d, 0x0022, "ScaleClassIMetric" },
+ { 0x8d, 0x0023, "ScaleClassIIMetric" },
+ { 0x8d, 0x0024, "ScaleClassIIIMetric" },
+ { 0x8d, 0x0025, "ScaleClassIIILMetric" },
+ { 0x8d, 0x0026, "ScaleClassIVMetric" },
+ { 0x8d, 0x0027, "ScaleClassIIIEnglish" },
+ { 0x8d, 0x0028, "ScaleClassIIILEnglish" },
+ { 0x8d, 0x0029, "ScaleClassIVEnglish" },
+ { 0x8d, 0x002a, "ScaleClassGeneric" },
+ { 0x8d, 0x0030, "ScaleAttributeReport" },
+ { 0x8d, 0x0031, "ScaleControlReport" },
+ { 0x8d, 0x0032, "ScaleDataReport" },
+ { 0x8d, 0x0033, "ScaleStatusReport" },
+ { 0x8d, 0x0034, "ScaleWeightLimitReport" },
+ { 0x8d, 0x0035, "ScaleStatisticsReport" },
+ { 0x8d, 0x0040, "DataWeight" },
+ { 0x8d, 0x0041, "DataScaling" },
+ { 0x8d, 0x0050, "WeightUnit" },
+ { 0x8d, 0x0051, "WeightUnitMilligram" },
+ { 0x8d, 0x0052, "WeightUnitGram" },
+ { 0x8d, 0x0053, "WeightUnitKilogram" },
+ { 0x8d, 0x0054, "WeightUnitCarats" },
+ { 0x8d, 0x0055, "WeightUnitTaels" },
+ { 0x8d, 0x0056, "WeightUnitGrains" },
+ { 0x8d, 0x0057, "WeightUnitPennyweights" },
+ { 0x8d, 0x0058, "WeightUnitMetricTon" },
+ { 0x8d, 0x0059, "WeightUnitAvoirTon" },
+ { 0x8d, 0x005a, "WeightUnitTroyOunce" },
+ { 0x8d, 0x005b, "WeightUnitOunce" },
+ { 0x8d, 0x005c, "WeightUnitPound" },
+ { 0x8d, 0x0060, "CalibrationCount" },
+ { 0x8d, 0x0061, "ReZeroCount" },
+ { 0x8d, 0x0070, "ScaleStatus" },
+ { 0x8d, 0x0071, "ScaleStatusFault" },
+ { 0x8d, 0x0072, "ScaleStatusStableatCenterofZero" },
+ { 0x8d, 0x0073, "ScaleStatusInMotion" },
+ { 0x8d, 0x0074, "ScaleStatusWeightStable" },
+ { 0x8d, 0x0075, "ScaleStatusUnderZero" },
+ { 0x8d, 0x0076, "ScaleStatusOverWeightLimit" },
+ { 0x8d, 0x0077, "ScaleStatusRequiresCalibration" },
+ { 0x8d, 0x0078, "ScaleStatusRequiresRezeroing" },
+ { 0x8d, 0x0080, "ZeroScale" },
+ { 0x8d, 0x0081, "EnforcedZeroReturn" },
+ { 0x8e, 0, "MagneticStripeReader" },
+ { 0x8e, 0x0001, "MSRDeviceReadOnly" },
+ { 0x8e, 0x0011, "Track1Length" },
+ { 0x8e, 0x0012, "Track2Length" },
+ { 0x8e, 0x0013, "Track3Length" },
+ { 0x8e, 0x0014, "TrackJISLength" },
+ { 0x8e, 0x0020, "TrackData" },
+ { 0x8e, 0x0021, "Track1Data" },
+ { 0x8e, 0x0022, "Track2Data" },
+ { 0x8e, 0x0023, "Track3Data" },
+ { 0x8e, 0x0024, "TrackJISData" },
+ { 0x90, 0, "CameraControl" },
+ { 0x90, 0x0020, "CameraAutofocus" },
+ { 0x90, 0x0021, "CameraShutter" },
+ { 0x91, 0, "Arcade" },
+ { 0x91, 0x0001, "GeneralPurposeIOCard" },
+ { 0x91, 0x0002, "CoinDoor" },
+ { 0x91, 0x0003, "WatchdogTimer" },
+ { 0x91, 0x0030, "GeneralPurposeAnalogInputState" },
+ { 0x91, 0x0031, "GeneralPurposeDigitalInputState" },
+ { 0x91, 0x0032, "GeneralPurposeOpticalInputState" },
+ { 0x91, 0x0033, "GeneralPurposeDigitalOutputState" },
+ { 0x91, 0x0034, "NumberofCoinDoors" },
+ { 0x91, 0x0035, "CoinDrawerDropCount" },
+ { 0x91, 0x0036, "CoinDrawerStart" },
+ { 0x91, 0x0037, "CoinDrawerService" },
+ { 0x91, 0x0038, "CoinDrawerTilt" },
+ { 0x91, 0x0039, "CoinDoorTest" },
+ { 0x91, 0x0040, "CoinDoorLockout" },
+ { 0x91, 0x0041, "WatchdogTimeout" },
+ { 0x91, 0x0042, "WatchdogAction" },
+ { 0x91, 0x0043, "WatchdogReboot" },
+ { 0x91, 0x0044, "WatchdogRestart" },
+ { 0x91, 0x0045, "AlarmInput" },
+ { 0x91, 0x0046, "CoinDoorCounter" },
+ { 0x91, 0x0047, "IODirectionMapping" },
+ { 0x91, 0x0048, "SetIODirectionMapping" },
+ { 0x91, 0x0049, "ExtendedOpticalInputState" },
+ { 0x91, 0x004a, "PinPadInputState" },
+ { 0x91, 0x004b, "PinPadStatus" },
+ { 0x91, 0x004c, "PinPadOutput" },
+ { 0x91, 0x004d, "PinPadCommand" },
+ { 0xf1d0, 0, "FIDOAlliance" },
+ { 0xf1d0, 0x0001, "U2FAuthenticatorDevice" },
+ { 0xf1d0, 0x0020, "InputReportData" },
+ { 0xf1d0, 0x0021, "OutputReportData" },
+ /* pages 0xff00 to 0xffff are vendor-specific */
+ { 0xffff, 0, "Vendor-specific-FF" },
+ { 0, 0, NULL }
};
/* Either output directly into simple seq_file, or (if f == NULL)
@@ -510,8 +2881,12 @@ static char *resolv_usage_page(unsigned page, struct seq_file *f) {
char *hid_resolv_usage(unsigned usage, struct seq_file *f) {
const struct hid_usage_entry *p;
+ const struct hid_usage_entry *m;
char *buf = NULL;
int len = 0;
+ const char *modifier = NULL;
+ unsigned int usage_modifier = usage & 0xF000;
+ unsigned int usage_actual = usage & 0xFFFF;
buf = resolv_usage_page(usage >> 16, f);
if (IS_ERR(buf)) {
@@ -529,16 +2904,33 @@ char *hid_resolv_usage(unsigned usage, struct seq_file *f) {
}
for (p = hid_usage_table; p->description; p++)
if (p->page == (usage >> 16)) {
+ if (p->page == 0x20 && usage_modifier) {
+ for (m = p; m->description; m++) {
+ if (p->page == m->page && m->usage
+ == usage_modifier) {
+ modifier = m->description;
+ break;
+ }
+ }
+ if (modifier)
+ usage_actual = usage_actual & 0x0FFF;
+ }
+
+ if (!modifier)
+ modifier = "";
+
for(++p; p->description && p->usage != 0; p++)
- if (p->usage == (usage & 0xffff)) {
+ if (p->usage == usage_actual) {
if (!f)
snprintf(buf + len,
HID_DEBUG_BUFSIZE - len,
- "%s", p->description);
+ "%s%s", p->description,
+ modifier);
else
seq_printf(f,
- "%s",
- p->description);
+ "%s%s",
+ p->description,
+ modifier);
return buf;
}
break;
@@ -753,12 +3145,12 @@ static const char *events[EV_MAX + 1] = {
[EV_MSC] = "Misc", [EV_LED] = "LED",
[EV_SND] = "Sound", [EV_REP] = "Repeat",
[EV_FF] = "ForceFeedback", [EV_PWR] = "Power",
- [EV_FF_STATUS] = "ForceFeedbackStatus",
+ [EV_FF_STATUS] = "ForceFeedbackStatus", [EV_SW] = "Software",
};
-static const char *syncs[3] = {
+static const char *syncs[SYN_CNT] = {
[SYN_REPORT] = "Report", [SYN_CONFIG] = "Config",
- [SYN_MT_REPORT] = "MT Report",
+ [SYN_MT_REPORT] = "MT Report", [SYN_DROPPED] = "Dropped",
};
static const char *keys[KEY_MAX + 1] = {
@@ -995,6 +3387,103 @@ static const char *keys[KEY_MAX + 1] = {
[KEY_MACRO22] = "Macro22", [KEY_MACRO23] = "Macro23", [KEY_MACRO24] = "Macro24",
[KEY_MACRO25] = "Macro25", [KEY_MACRO26] = "Macro26", [KEY_MACRO27] = "Macro27",
[KEY_MACRO28] = "Macro28", [KEY_MACRO29] = "Macro29", [KEY_MACRO30] = "Macro30",
+ [BTN_TRIGGER_HAPPY1] = "TriggerHappy1", [BTN_TRIGGER_HAPPY2] = "TriggerHappy2",
+ [BTN_TRIGGER_HAPPY3] = "TriggerHappy3", [BTN_TRIGGER_HAPPY4] = "TriggerHappy4",
+ [BTN_TRIGGER_HAPPY5] = "TriggerHappy5", [BTN_TRIGGER_HAPPY6] = "TriggerHappy6",
+ [BTN_TRIGGER_HAPPY7] = "TriggerHappy7", [BTN_TRIGGER_HAPPY8] = "TriggerHappy8",
+ [BTN_TRIGGER_HAPPY9] = "TriggerHappy9", [BTN_TRIGGER_HAPPY10] = "TriggerHappy10",
+ [BTN_TRIGGER_HAPPY11] = "TriggerHappy11", [BTN_TRIGGER_HAPPY12] = "TriggerHappy12",
+ [BTN_TRIGGER_HAPPY13] = "TriggerHappy13", [BTN_TRIGGER_HAPPY14] = "TriggerHappy14",
+ [BTN_TRIGGER_HAPPY15] = "TriggerHappy15", [BTN_TRIGGER_HAPPY16] = "TriggerHappy16",
+ [BTN_TRIGGER_HAPPY17] = "TriggerHappy17", [BTN_TRIGGER_HAPPY18] = "TriggerHappy18",
+ [BTN_TRIGGER_HAPPY19] = "TriggerHappy19", [BTN_TRIGGER_HAPPY20] = "TriggerHappy20",
+ [BTN_TRIGGER_HAPPY21] = "TriggerHappy21", [BTN_TRIGGER_HAPPY22] = "TriggerHappy22",
+ [BTN_TRIGGER_HAPPY23] = "TriggerHappy23", [BTN_TRIGGER_HAPPY24] = "TriggerHappy24",
+ [BTN_TRIGGER_HAPPY25] = "TriggerHappy25", [BTN_TRIGGER_HAPPY26] = "TriggerHappy26",
+ [BTN_TRIGGER_HAPPY27] = "TriggerHappy27", [BTN_TRIGGER_HAPPY28] = "TriggerHappy28",
+ [BTN_TRIGGER_HAPPY29] = "TriggerHappy29", [BTN_TRIGGER_HAPPY30] = "TriggerHappy30",
+ [BTN_TRIGGER_HAPPY31] = "TriggerHappy31", [BTN_TRIGGER_HAPPY32] = "TriggerHappy32",
+ [BTN_TRIGGER_HAPPY33] = "TriggerHappy33", [BTN_TRIGGER_HAPPY34] = "TriggerHappy34",
+ [BTN_TRIGGER_HAPPY35] = "TriggerHappy35", [BTN_TRIGGER_HAPPY36] = "TriggerHappy36",
+ [BTN_TRIGGER_HAPPY37] = "TriggerHappy37", [BTN_TRIGGER_HAPPY38] = "TriggerHappy38",
+ [BTN_TRIGGER_HAPPY39] = "TriggerHappy39", [BTN_TRIGGER_HAPPY40] = "TriggerHappy40",
+ [BTN_DIGI] = "Digi", [BTN_STYLUS3] = "Stylus3",
+ [BTN_TOOL_QUINTTAP] = "ToolQuintTap", [BTN_WHEEL] = "Wheel",
+ [KEY_10CHANNELSDOWN] = "10ChannelsDown",
+ [KEY_10CHANNELSUP] = "10ChannelsUp",
+ [KEY_3D_MODE] = "3DMode", [KEY_ADDRESSBOOK] = "Addressbook",
+ [KEY_ALS_TOGGLE] = "ALSToggle", [KEY_ASPECT_RATIO] = "AspectRatio",
+ [KEY_ATTENDANT_OFF] = "AttendantOff", [KEY_ATTENDANT_ON] = "AttendantOn",
+ [KEY_ATTENDANT_TOGGLE] = "AttendantToggle",
+ [KEY_AUDIO_DESC] = "AudioDesc",
+ [KEY_AUTOPILOT_ENGAGE_TOGGLE] = "AutoPiloteEngage",
+ [KEY_BATTERY] = "Battery", [KEY_BLUETOOTH] = "BlueTooth",
+ [KEY_BRIGHTNESS_CYCLE] = "BrightnessCycle",
+ [KEY_BRIGHTNESS_MENU] = "BrightnessMenu",
+ [KEY_BRL_DOT1] = "BrlDot1", [KEY_BRL_DOT10] = "BrlDot10",
+ [KEY_BRL_DOT2] = "BrlDot2", [KEY_BRL_DOT3] = "BrlDot3",
+ [KEY_BRL_DOT4] = "BrlDot4", [KEY_BRL_DOT5] = "BrlDot5",
+ [KEY_BRL_DOT6] = "BrlDot6", [KEY_BRL_DOT7] = "BrlDot7",
+ [KEY_BRL_DOT8] = "BrlDot8", [KEY_BRL_DOT9] = "BrlDot9",
+ [KEY_CAMERA_DOWN] = "CameraDown", [KEY_CAMERA_FOCUS] = "CameraFocus",
+ [KEY_CAMERA_LEFT] = "CameraLeft", [KEY_CAMERA_RIGHT] = "CameraRight",
+ [KEY_CAMERA_UP] = "CameraUp", [KEY_CAMERA_ZOOMIN] = "CameraZoomIn",
+ [KEY_CAMERA_ZOOMOUT] = "CameraZoomOut", [KEY_CLEARVU_SONAR] = "ClearVUSonar",
+ [KEY_CONTEXT_MENU] = "ContextMenu", [KEY_DATA] = "Data",
+ [KEY_DATABASE] = "DataBase", [KEY_DISPLAY_OFF] = "DisplayOff",
+ [KEY_DISPLAYTOGGLE] = "DisplayToggle", [KEY_DOLLAR] = "Dollar",
+ [KEY_DUAL_RANGE_RADAR] = "DualRangeRadat",
+ [KEY_EDITOR] = "Editor", [KEY_EURO] = "Euro",
+ [KEY_FASTREVERSE] = "FastReverse", [KEY_FISHING_CHART] = "FishingChart",
+ [KEY_FN_RIGHT_SHIFT] = "FnRightShift", [KEY_FRAMEBACK] = "FrameBack",
+ [KEY_FRAMEFORWARD] = "FrameForward", [KEY_FULL_SCREEN] = "FullScreen",
+ [KEY_GAMES] = "Games", [KEY_GRAPHICSEDITOR] = "GraphicsEditor",
+ [KEY_HANGEUL] = "HanGeul", [KEY_HANGUP_PHONE] = "HangUpPhone",
+ [KEY_IMAGES] = "Images", [KEY_KBD_LCD_MENU1] = "KbdLcdMenu1",
+ [KEY_KBD_LCD_MENU2] = "KbdLcdMenu2", [KEY_KBD_LCD_MENU3] = "KbdLcdMenu3",
+ [KEY_KBD_LCD_MENU4] = "KbdLcdMenu4", [KEY_KBD_LCD_MENU5] = "KbdLcdMenu5",
+ [KEY_LEFT_DOWN] = "LeftDown", [KEY_LEFT_UP] = "LeftUp",
+ [KEY_LIGHTS_TOGGLE] = "LightToggle", [KEY_MACRO_PRESET1] = "MacroPreset1",
+ [KEY_MACRO_PRESET2] = "MacroPreset2", [KEY_MACRO_PRESET3] = "MacroPrest3",
+ [KEY_MACRO_PRESET_CYCLE] = "MacroPresetCycle",
+ [KEY_MACRO_RECORD_START] = "MacroRecordStart",
+ [KEY_MACRO_RECORD_STOP] = "MacroRecordStop",
+ [KEY_MARK_WAYPOINT] = "MarkWayPoint", [KEY_MEDIA_REPEAT] = "MediaRepeat",
+ [KEY_MEDIA_TOP_MENU] = "MediaTopMenu", [KEY_MESSENGER] = "Messanger",
+ [KEY_NAV_CHART] = "NavChar", [KEY_NAV_INFO] = "NavInfo",
+ [KEY_NEWS] = "News", [KEY_NEXT_ELEMENT] = "NextElement",
+ [KEY_NEXT_FAVORITE] = "NextFavorite", [KEY_NOTIFICATION_CENTER] = "NotificationCenter",
+ [KEY_NUMERIC_0] = "Numeric0", [KEY_NUMERIC_1] = "Numeric1",
+ [KEY_NUMERIC_11] = "Numceric11", [KEY_NUMERIC_12] = "Numeric12",
+ [KEY_NUMERIC_2] = "Numeric2", [KEY_NUMERIC_3] = "Numeric3",
+ [KEY_NUMERIC_4] = "Numeric4", [KEY_NUMERIC_5] = "Numeric5",
+ [KEY_NUMERIC_6] = "Numeric6", [KEY_NUMERIC_7] = "Numeric7",
+ [KEY_NUMERIC_8] = "Numeric8", [KEY_NUMERIC_9] = "Numeric9",
+ [KEY_NUMERIC_A] = "NumericA", [KEY_NUMERIC_B] = "NumericB",
+ [KEY_NUMERIC_C] = "NumericC", [KEY_NUMERIC_D] = "NumericD",
+ [KEY_NUMERIC_POUND] = "NumericPound", [KEY_NUMERIC_STAR] = "NumericStar",
+ [KEY_ONSCREEN_KEYBOARD] = "OnScreenKeyBoard",
+ [KEY_PAUSE_RECORD] = "PauseRecord", [KEY_PICKUP_PHONE] = "PickUpPhone",
+ [KEY_PRESENTATION] = "Presentation", [KEY_PREVIOUS_ELEMENT] = "PreviousElement",
+ [KEY_PRIVACY_SCREEN_TOGGLE] = "PrivacyScreenToggle",
+ [KEY_RADAR_OVERLAY] = "RadarOverLay",
+ [KEY_RFKILL] = "RFKill", [KEY_RIGHT_DOWN] = "RightDown",
+ [KEY_RIGHT_UP] = "RightUp", [KEY_ROOT_MENU] = "RootMenu",
+ [KEY_ROTATE_LOCK_TOGGLE] = "RotateLockToggle",
+ [KEY_SCALE] = "Scale", [KEY_SELECTIVE_SCREENSHOT] = "SelectiveScreenshot",
+ [KEY_SIDEVU_SONAR] = "SideVUSonar", [KEY_SINGLE_RANGE_RADAR] = "SingleRangeRadar",
+ [KEY_SLOWREVERSE] = "SlowReverse", [KEY_SOS] = "SOS",
+ [KEY_SPREADSHEET] = "SpreadSheet", [KEY_STOP_RECORD] = "StopRecord",
+ [KEY_TOUCHPAD_OFF] = "TouchPadOff", [KEY_TOUCHPAD_ON] = "TouchPadOn",
+ [KEY_TOUCHPAD_TOGGLE] = "TouchPadToggle",
+ [KEY_TRADITIONAL_SONAR] = "TraditionalSonar",
+ [KEY_UNMUTE] = "Unmute", [KEY_UWB] = "UWB",
+ [KEY_VIDEO_NEXT] = "VideoNext", [KEY_VIDEOPHONE] = "VideoPhone",
+ [KEY_VIDEO_PREV] = "VideoPrev", [KEY_VOD] = "VOD",
+ [KEY_VOICEMAIL] = "VoiceMail", [KEY_WLAN] = "WLAN",
+ [KEY_WORDPROCESSOR] = "WordProcessor", [KEY_WPS_BUTTON] = "WPSButton",
+ [KEY_WWAN] = "WWAN", [KEY_ZOOMIN] = "ZoomIn",
+ [KEY_ZOOMOUT] = "ZoomOut", [KEY_ZOOMRESET] = "ZoomReset",
};
static const char *relatives[REL_MAX + 1] = {
@@ -1003,6 +3492,8 @@ static const char *relatives[REL_MAX + 1] = {
[REL_RY] = "Ry", [REL_RZ] = "Rz",
[REL_HWHEEL] = "HWheel", [REL_DIAL] = "Dial",
[REL_WHEEL] = "Wheel", [REL_MISC] = "Misc",
+ [REL_WHEEL_HI_RES] = "WheelHiRes",
+ [REL_HWHEEL_HI_RES] = "HWheelHiRes"
};
static const char *absolutes[ABS_CNT] = {
@@ -1020,6 +3511,7 @@ static const char *absolutes[ABS_CNT] = {
[ABS_TILT_Y] = "YTilt", [ABS_TOOL_WIDTH] = "ToolWidth",
[ABS_VOLUME] = "Volume", [ABS_PROFILE] = "Profile",
[ABS_MISC] = "Misc",
+ [ABS_MT_SLOT] = "MTSlot",
[ABS_MT_TOUCH_MAJOR] = "MTMajor",
[ABS_MT_TOUCH_MINOR] = "MTMinor",
[ABS_MT_WIDTH_MAJOR] = "MTMajorW",
@@ -1029,11 +3521,17 @@ static const char *absolutes[ABS_CNT] = {
[ABS_MT_POSITION_Y] = "MTPositionY",
[ABS_MT_TOOL_TYPE] = "MTToolType",
[ABS_MT_BLOB_ID] = "MTBlobID",
+ [ABS_MT_TRACKING_ID] = "MTTrackingID",
+ [ABS_MT_PRESSURE] = "MTPressure",
+ [ABS_MT_DISTANCE] = "MTDistance",
+ [ABS_MT_TOOL_X] = "MTToolX",
+ [ABS_MT_TOOL_Y] = "MTToolY",
};
static const char *misc[MSC_MAX + 1] = {
[MSC_SERIAL] = "Serial", [MSC_PULSELED] = "Pulseled",
- [MSC_GESTURE] = "Gesture", [MSC_RAW] = "RawData"
+ [MSC_GESTURE] = "Gesture", [MSC_RAW] = "RawData",
+ [MSC_SCAN] = "Scan", [MSC_TIMESTAMP] = "TimeStamp",
};
static const char *leds[LED_MAX + 1] = {
@@ -1041,7 +3539,8 @@ static const char *leds[LED_MAX + 1] = {
[LED_SCROLLL] = "ScrollLock", [LED_COMPOSE] = "Compose",
[LED_KANA] = "Kana", [LED_SLEEP] = "Sleep",
[LED_SUSPEND] = "Suspend", [LED_MUTE] = "Mute",
- [LED_MISC] = "Misc",
+ [LED_MISC] = "Misc", [LED_MAIL] = "Mail",
+ [LED_CHARGING] = "Charging",
};
static const char *repeats[REP_MAX + 1] = {
@@ -1053,17 +3552,71 @@ static const char *sounds[SND_MAX + 1] = {
[SND_TONE] = "Tone"
};
+static const char *software[SW_CNT] = {
+ [SW_LID] = "Lid",
+ [SW_TABLET_MODE] = "TabletMode",
+ [SW_HEADPHONE_INSERT] = "HeadPhoneInsert",
+ [SW_RFKILL_ALL] = "RFKillAll",
+ [SW_MICROPHONE_INSERT] = "MicrophoneInsert",
+ [SW_DOCK] = "Dock",
+ [SW_LINEOUT_INSERT] = "LineOutInsert",
+ [SW_JACK_PHYSICAL_INSERT] = "JackPhysicalInsert",
+ [SW_VIDEOOUT_INSERT] = "VideoOutInsert",
+ [SW_CAMERA_LENS_COVER] = "CameraLensCover",
+ [SW_KEYPAD_SLIDE] = "KeyPadSlide",
+ [SW_FRONT_PROXIMITY] = "FrontProximity",
+ [SW_ROTATE_LOCK] = "RotateLock",
+ [SW_LINEIN_INSERT] = "LineInInsert",
+ [SW_MUTE_DEVICE] = "MuteDevice",
+ [SW_PEN_INSERTED] = "PenInserted",
+ [SW_MACHINE_COVER] = "MachineCover",
+};
+
+static const char *force[FF_CNT] = {
+ [FF_RUMBLE] = "FF_RUMBLE",
+ [FF_PERIODIC] = "FF_PERIODIC",
+ [FF_CONSTANT] = "FF_CONSTANT",
+ [FF_SPRING] = "FF_SPRING",
+ [FF_FRICTION] = "FF_FRICTION",
+ [FF_DAMPER] = "FF_DAMPER",
+ [FF_INERTIA] = "FF_INERTIA",
+ [FF_RAMP] = "FF_RAMP",
+ [FF_SQUARE] = "FF_SQUARE",
+ [FF_TRIANGLE] = "FF_TRIANGLE",
+ [FF_SINE] = "FF_SINE",
+ [FF_SAW_UP] = "FF_SAW_UP",
+ [FF_SAW_DOWN] = "FF_SAW_DOWN",
+ [FF_CUSTOM] = "FF_CUSTOM",
+ [FF_GAIN] = "FF_GAIN",
+ [FF_AUTOCENTER] = "FF_AUTOCENTER",
+ [FF_MAX] = "FF_MAX",
+};
+
+static const char *force_status[FF_STATUS_MAX + 1] = {
+ [FF_STATUS_STOPPED] = "FF_STATUS_STOPPED",
+ [FF_STATUS_PLAYING] = "FF_STATUS_PLAYING",
+};
+
static const char **names[EV_MAX + 1] = {
[EV_SYN] = syncs, [EV_KEY] = keys,
[EV_REL] = relatives, [EV_ABS] = absolutes,
[EV_MSC] = misc, [EV_LED] = leds,
[EV_SND] = sounds, [EV_REP] = repeats,
+ [EV_SW] = software, [EV_FF] = force,
+ [EV_FF_STATUS] = force_status,
};
static void hid_resolv_event(__u8 type, __u16 code, struct seq_file *f)
{
- seq_printf(f, "%s.%s", events[type] ? events[type] : "?",
- names[type] ? (names[type][code] ? names[type][code] : "?") : "?");
+ if (events[type])
+ seq_printf(f, "%s.", events[type]);
+ else
+ seq_printf(f, "%02x.", type);
+
+ if (names[type] && names[type][code])
+ seq_printf(f, "%s", names[type][code]);
+ else
+ seq_printf(f, "%04x", code);
}
static void hid_dump_input_mapping(struct hid_device *hid, struct seq_file *f)
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index c6bdb9c4ef3e..25331695ae32 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -255,7 +255,7 @@ out:
return retval;
}
-static int cbas_ec_remove(struct platform_device *pdev)
+static void cbas_ec_remove(struct platform_device *pdev)
{
struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
@@ -266,7 +266,6 @@ static int cbas_ec_remove(struct platform_device *pdev)
cbas_ec_set_input(NULL);
mutex_unlock(&cbas_ec_reglock);
- return 0;
}
static const struct acpi_device_id cbas_ec_acpi_ids[] = {
@@ -285,7 +284,7 @@ MODULE_DEVICE_TABLE(of, cbas_ec_of_match);
static struct platform_driver cbas_ec_driver = {
.probe = cbas_ec_probe,
- .remove = cbas_ec_remove,
+ .remove_new = cbas_ec_remove,
.driver = {
.name = "cbas_ec",
.acpi_match_table = ACPI_PTR(cbas_ec_acpi_ids),
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 8376fb5e2d0b..61d2a21affa2 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -208,6 +208,8 @@
#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD 0x1866
#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2 0x19b6
#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD3 0x1a30
+#define USB_DEVICE_ID_ASUSTEK_ROG_Z13_LIGHTBAR 0x18c6
+#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY 0x1abe
#define USB_DEVICE_ID_ASUSTEK_ROG_CLAYMORE_II_KEYBOARD 0x196b
#define USB_DEVICE_ID_ASUSTEK_FX503VD_KEYBOARD 0x1869
@@ -823,6 +825,7 @@
#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
#define USB_DEVICE_ID_LOGITECH_T651 0xb00c
#define USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD 0xb309
+#define USB_DEVICE_ID_LOGITECH_CASA_TOUCHPAD 0xbb00
#define USB_DEVICE_ID_LOGITECH_C007 0xc007
#define USB_DEVICE_ID_LOGITECH_C077 0xc077
#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index eb9bf2829937..70ceb9437332 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -209,7 +209,7 @@ static const __u8 pensketch_t609a_control_rdesc[] = {
0xC0 /* End Collection */
};
-/* Fix indexes in kye_tablet_fixup if you change this */
+/* Fix indexes in kye_tablet_fixup() if you change this */
static const __u8 kye_tablet_rdesc[] = {
0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
0x09, 0x01, /* Usage (01h), */
@@ -262,12 +262,16 @@ static const __u8 kye_tablet_rdesc[] = {
0x27, 0xFF, 0x07, 0x00, 0x00, /* Logical Maximum (2047), */
0x81, 0x02, /* Input (Variable), */
0xC0, /* End Collection, */
- 0xC0, /* End Collection, */
- 0x05, 0x0D, /* Usage Page (Digitizer), */
- 0x09, 0x21, /* Usage (Puck), */
+ 0xC0 /* End Collection, */
+};
+
+/* Fix indexes in kye_tablet_fixup() if you change this */
+static const __u8 kye_tablet_mouse_rdesc[] = {
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x02, /* Usage (Mouse), */
0xA1, 0x01, /* Collection (Application), */
0x85, 0x11, /* Report ID (17), */
- 0x09, 0x21, /* Usage (Puck), */
+ 0x09, 0x01, /* Usage (Pointer), */
0xA0, /* Collection (Physical), */
0x05, 0x09, /* Usage Page (Button), */
0x19, 0x01, /* Usage Minimum (01h), */
@@ -280,7 +284,7 @@ static const __u8 kye_tablet_rdesc[] = {
0x95, 0x04, /* Report Count (4), */
0x81, 0x01, /* Input (Constant), */
0x05, 0x0D, /* Usage Page (Digitizer), */
- 0x09, 0x32, /* Usage (In Range), */
+ 0x09, 0x37, /* Usage (Data Valid), */
0x95, 0x01, /* Report Count (1), */
0x81, 0x02, /* Input (Variable), */
0x05, 0x01, /* Usage Page (Desktop), */
@@ -317,7 +321,7 @@ static const struct kye_tablet_info {
__s32 y_physical_maximum;
__s8 unit_exponent;
__s8 unit;
- bool has_punk;
+ bool has_mouse;
unsigned int control_rsize;
const __u8 *control_rdesc;
} kye_tablets_info[] = {
@@ -402,7 +406,7 @@ static __u8 *kye_consumer_control_fixup(struct hid_device *hdev, __u8 *rdesc,
static __u8 *kye_tablet_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize)
{
const struct kye_tablet_info *info;
- unsigned int newsize;
+ __u8 *newdesc = rdesc;
if (*rsize < sizeof(kye_tablet_rdesc)) {
hid_warn(hdev,
@@ -420,36 +424,45 @@ static __u8 *kye_tablet_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int
return rdesc;
}
- newsize = info->has_punk ? sizeof(kye_tablet_rdesc) : 112;
- memcpy(rdesc, kye_tablet_rdesc, newsize);
-
- put_unaligned_le32(info->x_logical_maximum, rdesc + 66);
- put_unaligned_le32(info->x_physical_maximum, rdesc + 72);
- rdesc[77] = info->unit;
- rdesc[79] = info->unit_exponent;
- put_unaligned_le32(info->y_logical_maximum, rdesc + 87);
- put_unaligned_le32(info->y_physical_maximum, rdesc + 92);
- put_unaligned_le32(info->pressure_logical_maximum, rdesc + 104);
-
- if (info->has_punk) {
- put_unaligned_le32(info->x_logical_maximum, rdesc + 156);
- put_unaligned_le32(info->x_physical_maximum, rdesc + 162);
- rdesc[167] = info->unit;
- rdesc[169] = info->unit_exponent;
- put_unaligned_le32(info->y_logical_maximum, rdesc + 177);
- put_unaligned_le32(info->y_physical_maximum, rdesc + 182);
+ memcpy(newdesc, kye_tablet_rdesc, sizeof(kye_tablet_rdesc));
+
+ put_unaligned_le32(info->x_logical_maximum, newdesc + 66);
+ put_unaligned_le32(info->x_physical_maximum, newdesc + 72);
+ newdesc[77] = info->unit;
+ newdesc[79] = info->unit_exponent;
+ put_unaligned_le32(info->y_logical_maximum, newdesc + 87);
+ put_unaligned_le32(info->y_physical_maximum, newdesc + 92);
+ put_unaligned_le32(info->pressure_logical_maximum, newdesc + 104);
+
+ newdesc += sizeof(kye_tablet_rdesc);
+
+ if (info->has_mouse) {
+ if (newdesc + sizeof(kye_tablet_mouse_rdesc) > rdesc + *rsize)
+ hid_err(hdev, "control desc unexpectedly large\n");
+ else {
+ memcpy(newdesc, kye_tablet_mouse_rdesc, sizeof(kye_tablet_mouse_rdesc));
+
+ put_unaligned_le32(info->x_logical_maximum, newdesc + 44);
+ put_unaligned_le32(info->x_physical_maximum, newdesc + 50);
+ newdesc[55] = info->unit;
+ newdesc[57] = info->unit_exponent;
+ put_unaligned_le32(info->y_logical_maximum, newdesc + 65);
+ put_unaligned_le32(info->y_physical_maximum, newdesc + 70);
+
+ newdesc += sizeof(kye_tablet_mouse_rdesc);
+ }
}
if (info->control_rsize) {
- if (newsize + info->control_rsize > *rsize)
- hid_err(hdev, "control rdesc unexpectedly large");
+ if (newdesc + info->control_rsize > rdesc + *rsize)
+ hid_err(hdev, "control desc unexpectedly large\n");
else {
- memcpy(rdesc + newsize, info->control_rdesc, info->control_rsize);
- newsize += info->control_rsize;
+ memcpy(newdesc, info->control_rdesc, info->control_rsize);
+ newdesc += info->control_rsize;
}
}
- *rsize = newsize;
+ *rsize = newdesc - rdesc;
return rdesc;
}
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index f86c1ea83a03..e6b2ae68b8fb 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -555,7 +555,7 @@ static ssize_t attr_fn_lock_show(struct device *dev,
struct hid_device *hdev = to_hid_device(dev);
struct lenovo_drvdata *data = hid_get_drvdata(hdev);
- return snprintf(buf, PAGE_SIZE, "%u\n", data->fn_lock);
+ return sysfs_emit(buf, "%u\n", data->fn_lock);
}
static ssize_t attr_fn_lock_store(struct device *dev,
@@ -599,8 +599,7 @@ static ssize_t attr_sensitivity_show_cptkbd(struct device *dev,
struct hid_device *hdev = to_hid_device(dev);
struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
- return snprintf(buf, PAGE_SIZE, "%u\n",
- cptkbd_data->sensitivity);
+ return sysfs_emit(buf, "%u\n", cptkbd_data->sensitivity);
}
static ssize_t attr_sensitivity_store_cptkbd(struct device *dev,
@@ -628,8 +627,8 @@ static ssize_t attr_middleclick_workaround_show_cptkbd(struct device *dev,
struct hid_device *hdev = to_hid_device(dev);
struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
- return snprintf(buf, PAGE_SIZE, "%u\n",
- cptkbd_data->middleclick_workaround_cptkbd);
+ return sysfs_emit(buf, "%u\n",
+ cptkbd_data->middleclick_workaround_cptkbd);
}
static ssize_t attr_middleclick_workaround_store_cptkbd(struct device *dev,
@@ -809,7 +808,7 @@ static ssize_t attr_press_to_select_show_tpkbd(struct device *dev,
struct hid_device *hdev = to_hid_device(dev);
struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
- return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->press_to_select);
+ return sysfs_emit(buf, "%u\n", data_pointer->press_to_select);
}
static ssize_t attr_press_to_select_store_tpkbd(struct device *dev,
@@ -839,7 +838,7 @@ static ssize_t attr_dragging_show_tpkbd(struct device *dev,
struct hid_device *hdev = to_hid_device(dev);
struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
- return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->dragging);
+ return sysfs_emit(buf, "%u\n", data_pointer->dragging);
}
static ssize_t attr_dragging_store_tpkbd(struct device *dev,
@@ -869,7 +868,7 @@ static ssize_t attr_release_to_select_show_tpkbd(struct device *dev,
struct hid_device *hdev = to_hid_device(dev);
struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
- return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->release_to_select);
+ return sysfs_emit(buf, "%u\n", data_pointer->release_to_select);
}
static ssize_t attr_release_to_select_store_tpkbd(struct device *dev,
@@ -899,7 +898,7 @@ static ssize_t attr_select_right_show_tpkbd(struct device *dev,
struct hid_device *hdev = to_hid_device(dev);
struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
- return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->select_right);
+ return sysfs_emit(buf, "%u\n", data_pointer->select_right);
}
static ssize_t attr_select_right_store_tpkbd(struct device *dev,
@@ -929,8 +928,7 @@ static ssize_t attr_sensitivity_show_tpkbd(struct device *dev,
struct hid_device *hdev = to_hid_device(dev);
struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
- return snprintf(buf, PAGE_SIZE, "%u\n",
- data_pointer->sensitivity);
+ return sysfs_emit(buf, "%u\n", data_pointer->sensitivity);
}
static ssize_t attr_sensitivity_store_tpkbd(struct device *dev,
@@ -958,8 +956,7 @@ static ssize_t attr_press_speed_show_tpkbd(struct device *dev,
struct hid_device *hdev = to_hid_device(dev);
struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
- return snprintf(buf, PAGE_SIZE, "%u\n",
- data_pointer->press_speed);
+ return sysfs_emit(buf, "%u\n", data_pointer->press_speed);
}
static ssize_t attr_press_speed_store_tpkbd(struct device *dev,
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index e6a8b6d8eab7..3c3c497b6b91 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -965,9 +965,7 @@ static void logi_hidpp_dev_conn_notif_equad(struct hid_device *hdev,
}
break;
case REPORT_TYPE_MOUSE:
- workitem->reports_supported |= STD_MOUSE | HIDPP;
- if (djrcv_dev->type == recvr_type_mouse_only)
- workitem->reports_supported |= MULTIMEDIA;
+ workitem->reports_supported |= STD_MOUSE | HIDPP | MULTIMEDIA;
break;
}
}
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index d2f3f234f29d..b81d5bcc76a7 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -4603,6 +4603,12 @@ static const struct hid_device_id hidpp_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC081) },
{ /* Logitech G903 Gaming Mouse over USB */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC086) },
+ { /* Logitech G Pro Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC088) },
+ { /* MX Vertical over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08A) },
+ { /* Logitech G703 Hero Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC090) },
{ /* Logitech G903 Hero Gaming Mouse over USB */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC091) },
{ /* Logitech G915 TKL Keyboard over USB */
@@ -4613,8 +4619,6 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* Logitech G923 Wheel (Xbox version) over USB */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G923_XBOX_WHEEL),
.driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS },
- { /* Logitech G Pro Gaming Mouse over USB */
- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC088) },
{ /* Logitech G Pro X Superlight Gaming Mouse over USB */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC094) },
{ /* Logitech G Pro X Superlight 2 Gaming Mouse over USB */
@@ -4641,9 +4645,13 @@ static const struct hid_device_id hidpp_devices[] = {
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012) },
{ /* M720 Triathlon mouse over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb015) },
+ { /* MX Master 2S mouse over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb019) },
{ /* MX Ergo trackball over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01d) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e) },
+ { /* MX Vertical mouse over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb020) },
{ /* Signature M650 over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb02a) },
{ /* MX Master 3 mouse over Bluetooth */
@@ -4652,6 +4660,8 @@ static const struct hid_device_id hidpp_devices[] = {
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb025) },
{ /* MX Master 3S mouse over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb034) },
+ { /* MX Anywhere 3SB mouse over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb038) },
{}
};
diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
index f9cceaeffd08..da5ea5a23b08 100644
--- a/drivers/hid/hid-mcp2221.c
+++ b/drivers/hid/hid-mcp2221.c
@@ -944,9 +944,11 @@ static void mcp2221_hid_unregister(void *ptr)
/* This is needed to be sure hid_hw_stop() isn't called twice by the subsystem */
static void mcp2221_remove(struct hid_device *hdev)
{
+#if IS_REACHABLE(CONFIG_IIO)
struct mcp2221 *mcp = hid_get_drvdata(hdev);
cancel_delayed_work_sync(&mcp->init_work);
+#endif
}
#if IS_REACHABLE(CONFIG_IIO)
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 04a014cd2a2f..56fc78841f24 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -2081,6 +2081,12 @@ static const struct hid_device_id mt_devices[] = {
USB_VENDOR_ID_LENOVO,
USB_DEVICE_ID_LENOVO_X12_TAB) },
+ /* Logitech devices */
+ { .driver_data = MT_CLS_NSMU,
+ HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_CASA_TOUCHPAD) },
+
/* MosArt panels */
{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
MT_USB_DEVICE(USB_VENDOR_ID_ASUS,
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index ab5953fc2436..b4a97803eca3 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -34,6 +34,7 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/hid.h>
+#include <linux/idr.h>
#include <linux/input.h>
#include <linux/jiffies.h>
#include <linux/leds.h>
@@ -481,10 +482,10 @@ static const struct joycon_ctlr_button_mapping n64con_button_mappings[] = {
{ BTN_TR, JC_BTN_R, },
{ BTN_TR2, JC_BTN_LSTICK, }, /* ZR */
{ BTN_START, JC_BTN_PLUS, },
- { BTN_FORWARD, JC_BTN_Y, }, /* C UP */
- { BTN_BACK, JC_BTN_ZR, }, /* C DOWN */
- { BTN_LEFT, JC_BTN_X, }, /* C LEFT */
- { BTN_RIGHT, JC_BTN_MINUS, }, /* C RIGHT */
+ { BTN_SELECT, JC_BTN_Y, }, /* C UP */
+ { BTN_X, JC_BTN_ZR, }, /* C DOWN */
+ { BTN_Y, JC_BTN_X, }, /* C LEFT */
+ { BTN_C, JC_BTN_MINUS, }, /* C RIGHT */
{ BTN_MODE, JC_BTN_HOME, },
{ BTN_Z, JC_BTN_CAP, },
{ /* sentinel */ },
@@ -569,6 +570,7 @@ static const enum led_brightness joycon_player_led_patterns[JC_NUM_LED_PATTERNS]
struct joycon_ctlr {
struct hid_device *hdev;
struct input_dev *input;
+ u32 player_id;
struct led_classdev leds[JC_NUM_LEDS]; /* player leds */
struct led_classdev home_led;
enum joycon_ctlr_state ctlr_state;
@@ -692,15 +694,6 @@ static inline bool joycon_device_is_n64con(struct joycon_ctlr *ctlr)
return ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_N64CON;
}
-static inline bool joycon_device_has_usb(struct joycon_ctlr *ctlr)
-{
- return joycon_device_is_procon(ctlr) ||
- joycon_device_is_chrggrip(ctlr) ||
- joycon_device_is_snescon(ctlr) ||
- joycon_device_is_gencon(ctlr) ||
- joycon_device_is_n64con(ctlr);
-}
-
/*
* Controller type helpers
*
@@ -2261,7 +2254,8 @@ static int joycon_home_led_brightness_set(struct led_classdev *led,
return ret;
}
-static DEFINE_SPINLOCK(joycon_input_num_spinlock);
+static DEFINE_IDA(nintendo_player_id_allocator);
+
static int joycon_leds_create(struct joycon_ctlr *ctlr)
{
struct hid_device *hdev = ctlr->hdev;
@@ -2272,20 +2266,19 @@ static int joycon_leds_create(struct joycon_ctlr *ctlr)
char *name;
int ret;
int i;
- unsigned long flags;
int player_led_pattern;
- static int input_num;
-
- /*
- * Set the player leds based on controller number
- * Because there is no standard concept of "player number", the pattern
- * number will simply increase by 1 every time a controller is connected.
- */
- spin_lock_irqsave(&joycon_input_num_spinlock, flags);
- player_led_pattern = input_num++ % JC_NUM_LED_PATTERNS;
- spin_unlock_irqrestore(&joycon_input_num_spinlock, flags);
/* configure the player LEDs */
+ ctlr->player_id = U32_MAX;
+ ret = ida_alloc(&nintendo_player_id_allocator, GFP_KERNEL);
+ if (ret < 0) {
+ hid_warn(hdev, "Failed to allocate player ID, skipping; ret=%d\n", ret);
+ goto home_led;
+ }
+ ctlr->player_id = ret;
+ player_led_pattern = ret % JC_NUM_LED_PATTERNS;
+ hid_info(ctlr->hdev, "assigned player %d led pattern", player_led_pattern + 1);
+
for (i = 0; i < JC_NUM_LEDS; i++) {
name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s:%s",
d_name,
@@ -2501,8 +2494,11 @@ static int joycon_init(struct hid_device *hdev)
/* set baudrate for improved latency */
ret = joycon_send_usb(ctlr, JC_USB_CMD_BAUDRATE_3M, HZ);
if (ret) {
- hid_err(hdev, "Failed to set baudrate; ret=%d\n", ret);
- goto out_unlock;
+ /*
+ * We can function with the default baudrate.
+ * Provide a warning, and continue on.
+ */
+ hid_warn(hdev, "Failed to set baudrate (ret=%d), continuing anyway\n", ret);
}
/* handshake */
ret = joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ);
@@ -2767,6 +2763,7 @@ static void nintendo_hid_remove(struct hid_device *hdev)
spin_unlock_irqrestore(&ctlr->lock, flags);
destroy_workqueue(ctlr->rumble_queue);
+ ida_free(&nintendo_player_id_allocator, ctlr->player_id);
hid_hw_close(hdev);
hid_hw_stop(hdev);
@@ -2824,7 +2821,19 @@ static struct hid_driver nintendo_hid_driver = {
.resume = nintendo_hid_resume,
#endif
};
-module_hid_driver(nintendo_hid_driver);
+static int __init nintendo_init(void)
+{
+ return hid_register_driver(&nintendo_hid_driver);
+}
+
+static void __exit nintendo_exit(void)
+{
+ hid_unregister_driver(&nintendo_hid_driver);
+ ida_destroy(&nintendo_player_id_allocator);
+}
+
+module_init(nintendo_init);
+module_exit(nintendo_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ryan McClelland <rymcclel@gmail.com>");
diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
index 5ddebe25eb91..297103be3381 100644
--- a/drivers/hid/hid-picolcd_core.c
+++ b/drivers/hid/hid-picolcd_core.c
@@ -256,9 +256,9 @@ static ssize_t picolcd_operation_mode_show(struct device *dev,
struct picolcd_data *data = dev_get_drvdata(dev);
if (data->status & PICOLCD_BOOTLOADER)
- return snprintf(buf, PAGE_SIZE, "[bootloader] lcd\n");
+ return sysfs_emit(buf, "[bootloader] lcd\n");
else
- return snprintf(buf, PAGE_SIZE, "bootloader [lcd]\n");
+ return sysfs_emit(buf, "bootloader [lcd]\n");
}
static ssize_t picolcd_operation_mode_store(struct device *dev,
@@ -301,7 +301,7 @@ static ssize_t picolcd_operation_mode_delay_show(struct device *dev,
{
struct picolcd_data *data = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%hu\n", data->opmode_delay);
+ return sysfs_emit(buf, "%hu\n", data->opmode_delay);
}
static ssize_t picolcd_operation_mode_delay_store(struct device *dev,
diff --git a/drivers/hid/hid-picolcd_fb.c b/drivers/hid/hid-picolcd_fb.c
index 750206f5fc67..83e3409d170c 100644
--- a/drivers/hid/hid-picolcd_fb.c
+++ b/drivers/hid/hid-picolcd_fb.c
@@ -421,12 +421,10 @@ static ssize_t picolcd_fb_update_rate_show(struct device *dev,
size_t ret = 0;
for (i = 1; i <= PICOLCDFB_UPDATE_RATE_LIMIT; i++)
- if (ret >= PAGE_SIZE)
- break;
- else if (i == fb_update_rate)
- ret += scnprintf(buf+ret, PAGE_SIZE-ret, "[%u] ", i);
+ if (i == fb_update_rate)
+ ret += sysfs_emit_at(buf, ret, "[%u] ", i);
else
- ret += scnprintf(buf+ret, PAGE_SIZE-ret, "%u ", i);
+ ret += sysfs_emit_at(buf, ret, "%u ", i);
if (ret > 0)
buf[min(ret, (size_t)PAGE_SIZE)-1] = '\n';
return ret;
diff --git a/drivers/hid/hid-playstation.c b/drivers/hid/hid-playstation.c
index 8ac8f7b8e317..e7c309cfe3a0 100644
--- a/drivers/hid/hid-playstation.c
+++ b/drivers/hid/hid-playstation.c
@@ -27,6 +27,11 @@ static DEFINE_IDA(ps_player_id_allocator);
#define HID_PLAYSTATION_VERSION_PATCH 0x8000
+enum PS_TYPE {
+ PS_TYPE_PS4_DUALSHOCK4,
+ PS_TYPE_PS5_DUALSENSE,
+};
+
/* Base class for playstation devices. */
struct ps_device {
struct list_head list;
@@ -287,6 +292,8 @@ struct dualsense_output_report {
#define DS4_INPUT_REPORT_USB 0x01
#define DS4_INPUT_REPORT_USB_SIZE 64
+#define DS4_INPUT_REPORT_BT_MINIMAL 0x01
+#define DS4_INPUT_REPORT_BT_MINIMAL_SIZE 10
#define DS4_INPUT_REPORT_BT 0x11
#define DS4_INPUT_REPORT_BT_SIZE 78
#define DS4_OUTPUT_REPORT_USB 0x05
@@ -1778,8 +1785,10 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
int retries;
buf = kzalloc(DS4_FEATURE_REPORT_CALIBRATION_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ if (!buf) {
+ ret = -ENOMEM;
+ goto transfer_failed;
+ }
/* We should normally receive the feature report data we asked
* for, but hidraw applications such as Steam can issue feature
@@ -1796,26 +1805,30 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
continue;
}
- hid_err(hdev, "Failed to retrieve DualShock4 calibration info: %d\n", ret);
+ hid_warn(hdev, "Failed to retrieve DualShock4 calibration info: %d\n", ret);
ret = -EILSEQ;
- goto err_free;
+ goto transfer_failed;
} else {
break;
}
}
} else { /* Bluetooth */
buf = kzalloc(DS4_FEATURE_REPORT_CALIBRATION_BT_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ if (!buf) {
+ ret = -ENOMEM;
+ goto transfer_failed;
+ }
ret = ps_get_report(hdev, DS4_FEATURE_REPORT_CALIBRATION_BT, buf,
DS4_FEATURE_REPORT_CALIBRATION_BT_SIZE, true);
+
if (ret) {
- hid_err(hdev, "Failed to retrieve DualShock4 calibration info: %d\n", ret);
- goto err_free;
+ hid_warn(hdev, "Failed to retrieve DualShock4 calibration info: %d\n", ret);
+ goto transfer_failed;
}
}
+ /* Transfer succeeded - parse the calibration data received. */
gyro_pitch_bias = get_unaligned_le16(&buf[1]);
gyro_yaw_bias = get_unaligned_le16(&buf[3]);
gyro_roll_bias = get_unaligned_le16(&buf[5]);
@@ -1844,6 +1857,9 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
acc_z_plus = get_unaligned_le16(&buf[31]);
acc_z_minus = get_unaligned_le16(&buf[33]);
+ /* Done parsing the buffer, so let's free it. */
+ kfree(buf);
+
/*
* Set gyroscope calibration and normalization parameters.
* Data values will be normalized to 1/DS4_GYRO_RES_PER_DEG_S degree/s.
@@ -1868,21 +1884,6 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
abs(gyro_roll_minus - gyro_roll_bias);
/*
- * Sanity check gyro calibration data. This is needed to prevent crashes
- * during report handling of virtual, clone or broken devices not implementing
- * calibration data properly.
- */
- for (i = 0; i < ARRAY_SIZE(ds4->gyro_calib_data); i++) {
- if (ds4->gyro_calib_data[i].sens_denom == 0) {
- hid_warn(hdev, "Invalid gyro calibration data for axis (%d), disabling calibration.",
- ds4->gyro_calib_data[i].abs_code);
- ds4->gyro_calib_data[i].bias = 0;
- ds4->gyro_calib_data[i].sens_numer = DS4_GYRO_RANGE;
- ds4->gyro_calib_data[i].sens_denom = S16_MAX;
- }
- }
-
- /*
* Set accelerometer calibration and normalization parameters.
* Data values will be normalized to 1/DS4_ACC_RES_PER_G g.
*/
@@ -1904,6 +1905,23 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
ds4->accel_calib_data[2].sens_numer = 2*DS4_ACC_RES_PER_G;
ds4->accel_calib_data[2].sens_denom = range_2g;
+transfer_failed:
+ /*
+ * Sanity check gyro calibration data. This is needed to prevent crashes
+ * during report handling of virtual, clone or broken devices not implementing
+ * calibration data properly.
+ */
+ for (i = 0; i < ARRAY_SIZE(ds4->gyro_calib_data); i++) {
+ if (ds4->gyro_calib_data[i].sens_denom == 0) {
+ ds4->gyro_calib_data[i].abs_code = ABS_RX + i;
+ hid_warn(hdev, "Invalid gyro calibration data for axis (%d), disabling calibration.",
+ ds4->gyro_calib_data[i].abs_code);
+ ds4->gyro_calib_data[i].bias = 0;
+ ds4->gyro_calib_data[i].sens_numer = DS4_GYRO_RANGE;
+ ds4->gyro_calib_data[i].sens_denom = S16_MAX;
+ }
+ }
+
/*
* Sanity check accelerometer calibration data. This is needed to prevent crashes
* during report handling of virtual, clone or broken devices not implementing calibration
@@ -1911,6 +1929,7 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
*/
for (i = 0; i < ARRAY_SIZE(ds4->accel_calib_data); i++) {
if (ds4->accel_calib_data[i].sens_denom == 0) {
+ ds4->accel_calib_data[i].abs_code = ABS_X + i;
hid_warn(hdev, "Invalid accelerometer calibration data for axis (%d), disabling calibration.",
ds4->accel_calib_data[i].abs_code);
ds4->accel_calib_data[i].bias = 0;
@@ -1919,8 +1938,6 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
}
}
-err_free:
- kfree(buf);
return ret;
}
@@ -2037,8 +2054,9 @@ static int dualshock4_led_set_blink(struct led_classdev *led, unsigned long *del
dualshock4_schedule_work(ds4);
- *delay_on = ds4->lightbar_blink_on;
- *delay_off = ds4->lightbar_blink_off;
+ /* Report scaled values back to LED subsystem */
+ *delay_on = ds4->lightbar_blink_on * 10;
+ *delay_off = ds4->lightbar_blink_off * 10;
return 0;
}
@@ -2065,6 +2083,13 @@ static int dualshock4_led_set_brightness(struct led_classdev *led, enum led_brig
break;
case 3:
ds4->lightbar_enabled = !!value;
+
+ /* brightness = 0 also cancels blinking in Linux. */
+ if (!ds4->lightbar_enabled) {
+ ds4->lightbar_blink_off = 0;
+ ds4->lightbar_blink_on = 0;
+ ds4->update_lightbar_blink = true;
+ }
}
ds4->update_lightbar = true;
@@ -2182,6 +2207,7 @@ static int dualshock4_parse_report(struct ps_device *ps_dev, struct hid_report *
int battery_status, i, j;
uint16_t sensor_timestamp;
unsigned long flags;
+ bool is_minimal = false;
/*
* DualShock4 in USB uses the full HID report for reportID 1, but
@@ -2209,6 +2235,18 @@ static int dualshock4_parse_report(struct ps_device *ps_dev, struct hid_report *
ds4_report = &bt->common;
num_touch_reports = bt->num_touch_reports;
touch_reports = bt->touch_reports;
+ } else if (hdev->bus == BUS_BLUETOOTH &&
+ report->id == DS4_INPUT_REPORT_BT_MINIMAL &&
+ size == DS4_INPUT_REPORT_BT_MINIMAL_SIZE) {
+ /* Some third-party pads never switch to the full 0x11 report.
+ * The short 0x01 report is 10 bytes long:
+ * u8 report_id == 0x01
+ * u8 first_bytes_of_full_report[9]
+ * So let's reuse the full report parser, and stop it after
+ * parsing the buttons.
+ */
+ ds4_report = (struct dualshock4_input_report_common *)&data[1];
+ is_minimal = true;
} else {
hid_err(hdev, "Unhandled reportID=%d\n", report->id);
return -1;
@@ -2242,6 +2280,9 @@ static int dualshock4_parse_report(struct ps_device *ps_dev, struct hid_report *
input_report_key(ds4->gamepad, BTN_MODE, ds4_report->buttons[2] & DS_BUTTONS2_PS_HOME);
input_sync(ds4->gamepad);
+ if (is_minimal)
+ return 0;
+
/* Parse and calibrate gyroscope data. */
for (i = 0; i < ARRAY_SIZE(ds4_report->gyro); i++) {
int raw_data = (short)le16_to_cpu(ds4_report->gyro[i]);
@@ -2550,8 +2591,8 @@ static struct ps_device *dualshock4_create(struct hid_device *hdev)
ret = dualshock4_get_firmware_info(ds4);
if (ret) {
- hid_err(hdev, "Failed to get firmware info from DualShock4\n");
- return ERR_PTR(ret);
+ hid_warn(hdev, "Failed to get firmware info from DualShock4\n");
+ hid_warn(hdev, "HW/FW version data in sysfs will be invalid.\n");
}
ret = ps_devices_list_add(ps_dev);
@@ -2560,8 +2601,8 @@ static struct ps_device *dualshock4_create(struct hid_device *hdev)
ret = dualshock4_get_calibration_data(ds4);
if (ret) {
- hid_err(hdev, "Failed to get calibration data from DualShock4\n");
- goto err;
+ hid_warn(hdev, "Failed to get calibration data from DualShock4\n");
+ hid_warn(hdev, "Gyroscope and accelerometer will be inaccurate.\n");
}
ds4->gamepad = ps_gamepad_create(hdev, dualshock4_play_effect);
@@ -2655,17 +2696,14 @@ static int ps_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto err_stop;
}
- if (hdev->product == USB_DEVICE_ID_SONY_PS4_CONTROLLER ||
- hdev->product == USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 ||
- hdev->product == USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) {
+ if (id->driver_data == PS_TYPE_PS4_DUALSHOCK4) {
dev = dualshock4_create(hdev);
if (IS_ERR(dev)) {
hid_err(hdev, "Failed to create dualshock4.\n");
ret = PTR_ERR(dev);
goto err_close;
}
- } else if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER ||
- hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) {
+ } else if (id->driver_data == PS_TYPE_PS5_DUALSENSE) {
dev = dualsense_create(hdev);
if (IS_ERR(dev)) {
hid_err(hdev, "Failed to create dualsense.\n");
@@ -2699,16 +2737,26 @@ static void ps_remove(struct hid_device *hdev)
static const struct hid_device_id ps_devices[] = {
/* Sony DualShock 4 controllers for PS4 */
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
+ .driver_data = PS_TYPE_PS4_DUALSHOCK4 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
+ .driver_data = PS_TYPE_PS4_DUALSHOCK4 },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+ .driver_data = PS_TYPE_PS4_DUALSHOCK4 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+ .driver_data = PS_TYPE_PS4_DUALSHOCK4 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE),
+ .driver_data = PS_TYPE_PS4_DUALSHOCK4 },
+
/* Sony DualSense controllers for PS5 */
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER),
+ .driver_data = PS_TYPE_PS5_DUALSENSE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER),
+ .driver_data = PS_TYPE_PS5_DUALSENSE },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER_2),
+ .driver_data = PS_TYPE_PS5_DUALSENSE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER_2),
+ .driver_data = PS_TYPE_PS5_DUALSENSE },
{ }
};
MODULE_DEVICE_TABLE(hid, ps_devices);
diff --git a/drivers/hid/hid-roccat-isku.c b/drivers/hid/hid-roccat-isku.c
index 458060403397..0cd6208fb371 100644
--- a/drivers/hid/hid-roccat-isku.c
+++ b/drivers/hid/hid-roccat-isku.c
@@ -61,7 +61,7 @@ static ssize_t isku_sysfs_show_actual_profile(struct device *dev,
{
struct isku_device *isku =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
- return snprintf(buf, PAGE_SIZE, "%d\n", isku->actual_profile);
+ return sysfs_emit(buf, "%d\n", isku->actual_profile);
}
static ssize_t isku_sysfs_set_actual_profile(struct device *dev,
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index 00a1abc7e839..3f8f459edcf3 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -400,7 +400,7 @@ static ssize_t kone_sysfs_show_actual_profile(struct device *dev,
{
struct kone_device *kone =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
- return snprintf(buf, PAGE_SIZE, "%d\n", kone->actual_profile);
+ return sysfs_emit(buf, "%d\n", kone->actual_profile);
}
static DEVICE_ATTR(actual_profile, 0440, kone_sysfs_show_actual_profile, NULL);
@@ -409,7 +409,7 @@ static ssize_t kone_sysfs_show_actual_dpi(struct device *dev,
{
struct kone_device *kone =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
- return snprintf(buf, PAGE_SIZE, "%d\n", kone->actual_dpi);
+ return sysfs_emit(buf, "%d\n", kone->actual_dpi);
}
static DEVICE_ATTR(actual_dpi, 0440, kone_sysfs_show_actual_dpi, NULL);
@@ -432,7 +432,7 @@ static ssize_t kone_sysfs_show_weight(struct device *dev,
if (retval)
return retval;
- return snprintf(buf, PAGE_SIZE, "%d\n", weight);
+ return sysfs_emit(buf, "%d\n", weight);
}
static DEVICE_ATTR(weight, 0440, kone_sysfs_show_weight, NULL);
@@ -441,7 +441,7 @@ static ssize_t kone_sysfs_show_firmware_version(struct device *dev,
{
struct kone_device *kone =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
- return snprintf(buf, PAGE_SIZE, "%d\n", kone->firmware_version);
+ return sysfs_emit(buf, "%d\n", kone->firmware_version);
}
static DEVICE_ATTR(firmware_version, 0440, kone_sysfs_show_firmware_version,
NULL);
@@ -451,7 +451,7 @@ static ssize_t kone_sysfs_show_tcu(struct device *dev,
{
struct kone_device *kone =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
- return snprintf(buf, PAGE_SIZE, "%d\n", kone->settings.tcu);
+ return sysfs_emit(buf, "%d\n", kone->settings.tcu);
}
static int kone_tcu_command(struct usb_device *usb_dev, int number)
@@ -553,7 +553,7 @@ static ssize_t kone_sysfs_show_startup_profile(struct device *dev,
{
struct kone_device *kone =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
- return snprintf(buf, PAGE_SIZE, "%d\n", kone->settings.startup_profile);
+ return sysfs_emit(buf, "%d\n", kone->settings.startup_profile);
}
static ssize_t kone_sysfs_set_startup_profile(struct device *dev,
diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c
index 22b895436a7c..8ccb3b14a1a9 100644
--- a/drivers/hid/hid-roccat-koneplus.c
+++ b/drivers/hid/hid-roccat-koneplus.c
@@ -242,7 +242,7 @@ static ssize_t koneplus_sysfs_show_actual_profile(struct device *dev,
{
struct koneplus_device *koneplus =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
- return snprintf(buf, PAGE_SIZE, "%d\n", koneplus->actual_profile);
+ return sysfs_emit(buf, "%d\n", koneplus->actual_profile);
}
static ssize_t koneplus_sysfs_set_actual_profile(struct device *dev,
@@ -309,7 +309,7 @@ static ssize_t koneplus_sysfs_show_firmware_version(struct device *dev,
&info, KONEPLUS_SIZE_INFO);
mutex_unlock(&koneplus->koneplus_lock);
- return snprintf(buf, PAGE_SIZE, "%d\n", info.firmware_version);
+ return sysfs_emit(buf, "%d\n", info.firmware_version);
}
static DEVICE_ATTR(firmware_version, 0440,
koneplus_sysfs_show_firmware_version, NULL);
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 86af538c10d6..748d4d7cb2fc 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -272,7 +272,7 @@ static ssize_t kovaplus_sysfs_show_actual_profile(struct device *dev,
{
struct kovaplus_device *kovaplus =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
- return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->actual_profile);
+ return sysfs_emit(buf, "%d\n", kovaplus->actual_profile);
}
static ssize_t kovaplus_sysfs_set_actual_profile(struct device *dev,
@@ -325,7 +325,7 @@ static ssize_t kovaplus_sysfs_show_actual_cpi(struct device *dev,
{
struct kovaplus_device *kovaplus =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
- return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->actual_cpi);
+ return sysfs_emit(buf, "%d\n", kovaplus->actual_cpi);
}
static DEVICE_ATTR(actual_cpi, 0440, kovaplus_sysfs_show_actual_cpi, NULL);
@@ -334,7 +334,7 @@ static ssize_t kovaplus_sysfs_show_actual_sensitivity_x(struct device *dev,
{
struct kovaplus_device *kovaplus =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
- return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->actual_x_sensitivity);
+ return sysfs_emit(buf, "%d\n", kovaplus->actual_x_sensitivity);
}
static DEVICE_ATTR(actual_sensitivity_x, 0440,
kovaplus_sysfs_show_actual_sensitivity_x, NULL);
@@ -344,7 +344,7 @@ static ssize_t kovaplus_sysfs_show_actual_sensitivity_y(struct device *dev,
{
struct kovaplus_device *kovaplus =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
- return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->actual_y_sensitivity);
+ return sysfs_emit(buf, "%d\n", kovaplus->actual_y_sensitivity);
}
static DEVICE_ATTR(actual_sensitivity_y, 0440,
kovaplus_sysfs_show_actual_sensitivity_y, NULL);
@@ -365,7 +365,7 @@ static ssize_t kovaplus_sysfs_show_firmware_version(struct device *dev,
&info, KOVAPLUS_SIZE_INFO);
mutex_unlock(&kovaplus->kovaplus_lock);
- return snprintf(buf, PAGE_SIZE, "%d\n", info.firmware_version);
+ return sysfs_emit(buf, "%d\n", info.firmware_version);
}
static DEVICE_ATTR(firmware_version, 0440,
kovaplus_sysfs_show_firmware_version, NULL);
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index 5663b9cd9c69..eeb3d38cd805 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -283,7 +283,7 @@ static ssize_t pyra_sysfs_show_actual_cpi(struct device *dev,
{
struct pyra_device *pyra =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
- return snprintf(buf, PAGE_SIZE, "%d\n", pyra->actual_cpi);
+ return sysfs_emit(buf, "%d\n", pyra->actual_cpi);
}
static DEVICE_ATTR(actual_cpi, 0440, pyra_sysfs_show_actual_cpi, NULL);
@@ -300,7 +300,7 @@ static ssize_t pyra_sysfs_show_actual_profile(struct device *dev,
&settings, PYRA_SIZE_SETTINGS);
mutex_unlock(&pyra->pyra_lock);
- return snprintf(buf, PAGE_SIZE, "%d\n", settings.startup_profile);
+ return sysfs_emit(buf, "%d\n", settings.startup_profile);
}
static DEVICE_ATTR(actual_profile, 0440, pyra_sysfs_show_actual_profile, NULL);
static DEVICE_ATTR(startup_profile, 0440, pyra_sysfs_show_actual_profile, NULL);
@@ -321,7 +321,7 @@ static ssize_t pyra_sysfs_show_firmware_version(struct device *dev,
&info, PYRA_SIZE_INFO);
mutex_unlock(&pyra->pyra_lock);
- return snprintf(buf, PAGE_SIZE, "%d\n", info.firmware_version);
+ return sysfs_emit(buf, "%d\n", info.firmware_version);
}
static DEVICE_ATTR(firmware_version, 0440, pyra_sysfs_show_firmware_version,
NULL);
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index d85398721659..bd400f6b472b 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -155,7 +155,7 @@ static ssize_t enable_sensor_show(struct device *dev,
{
struct hid_sensor_custom *sensor_inst = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", sensor_inst->enable);
+ return sysfs_emit(buf, "%d\n", sensor_inst->enable);
}
static int set_power_report_state(struct hid_sensor_custom *sensor_inst,
@@ -372,14 +372,13 @@ static ssize_t show_value(struct device *dev, struct device_attribute *attr,
sizeof(struct hid_custom_usage_desc),
usage_id_cmp);
if (usage_desc)
- return snprintf(buf, PAGE_SIZE, "%s\n",
- usage_desc->desc);
+ return sysfs_emit(buf, "%s\n", usage_desc->desc);
else
- return sprintf(buf, "not-specified\n");
+ return sysfs_emit(buf, "not-specified\n");
} else
return -EINVAL;
- return sprintf(buf, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
static ssize_t store_value(struct device *dev, struct device_attribute *attr,
@@ -1032,14 +1031,14 @@ err_remove_callback:
return ret;
}
-static int hid_sensor_custom_remove(struct platform_device *pdev)
+static void hid_sensor_custom_remove(struct platform_device *pdev)
{
struct hid_sensor_custom *sensor_inst = platform_get_drvdata(pdev);
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
if (sensor_inst->custom_pdev) {
platform_device_unregister(sensor_inst->custom_pdev);
- return 0;
+ return;
}
hid_sensor_custom_dev_if_remove(sensor_inst);
@@ -1047,8 +1046,6 @@ static int hid_sensor_custom_remove(struct platform_device *pdev)
sysfs_remove_group(&sensor_inst->pdev->dev.kobj,
&enable_sensor_attr_group);
sensor_hub_remove_callback(hsdev, hsdev->usage);
-
- return 0;
}
static const struct platform_device_id hid_sensor_custom_ids[] = {
@@ -1068,7 +1065,7 @@ static struct platform_driver hid_sensor_custom_platform_driver = {
.name = KBUILD_MODNAME,
},
.probe = hid_sensor_custom_probe,
- .remove = hid_sensor_custom_remove,
+ .remove_new = hid_sensor_custom_remove,
};
module_platform_driver(hid_sensor_custom_platform_driver);
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index ebc0aa4e4345..5a07a91a89ae 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -1844,8 +1844,7 @@ static int sony_set_device_id(struct sony_sc *sc)
* All others are set to -1.
*/
if (sc->quirks & SIXAXIS_CONTROLLER) {
- ret = ida_simple_get(&sony_device_id_allocator, 0, 0,
- GFP_KERNEL);
+ ret = ida_alloc(&sony_device_id_allocator, GFP_KERNEL);
if (ret < 0) {
sc->device_id = -1;
return ret;
@@ -1861,7 +1860,7 @@ static int sony_set_device_id(struct sony_sc *sc)
static void sony_release_device_id(struct sony_sc *sc)
{
if (sc->device_id >= 0) {
- ida_simple_remove(&sony_device_id_allocator, sc->device_id);
+ ida_free(&sony_device_id_allocator, sc->device_id);
sc->device_id = -1;
}
}
@@ -2016,8 +2015,6 @@ static int sony_input_configured(struct hid_device *hdev,
} else if (sc->quirks & MOTION_CONTROLLER) {
sony_init_output_report(sc, motion_send_output_report);
- } else {
- ret = 0;
}
if (sc->quirks & SONY_LED_SUPPORT) {
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index b08a5ab58528..f166188c21ec 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -66,6 +66,14 @@ static LIST_HEAD(steam_devices);
#define STEAM_DECK_TRIGGER_RESOLUTION 5461
/* Joystick runs are about 5 mm and 32768 units */
#define STEAM_DECK_JOYSTICK_RESOLUTION 6553
+/* Accelerometer has 16 bit resolution and a range of +/- 2g */
+#define STEAM_DECK_ACCEL_RES_PER_G 16384
+#define STEAM_DECK_ACCEL_RANGE 32768
+#define STEAM_DECK_ACCEL_FUZZ 32
+/* Gyroscope has 16 bit resolution and a range of +/- 2000 dps */
+#define STEAM_DECK_GYRO_RES_PER_DPS 16
+#define STEAM_DECK_GYRO_RANGE 32768
+#define STEAM_DECK_GYRO_FUZZ 1
#define STEAM_PAD_FUZZ 256
@@ -288,6 +296,7 @@ struct steam_device {
struct mutex report_mutex;
unsigned long client_opened;
struct input_dev __rcu *input;
+ struct input_dev __rcu *sensors;
unsigned long quirks;
struct work_struct work_connect;
bool connected;
@@ -302,6 +311,7 @@ struct steam_device {
struct work_struct rumble_work;
u16 rumble_left;
u16 rumble_right;
+ unsigned int sensor_timestamp_us;
};
static int steam_recv_report(struct steam_device *steam,
@@ -825,6 +835,74 @@ input_register_fail:
return ret;
}
+static int steam_sensors_register(struct steam_device *steam)
+{
+ struct hid_device *hdev = steam->hdev;
+ struct input_dev *sensors;
+ int ret;
+
+ if (!(steam->quirks & STEAM_QUIRK_DECK))
+ return 0;
+
+ rcu_read_lock();
+ sensors = rcu_dereference(steam->sensors);
+ rcu_read_unlock();
+ if (sensors) {
+ dbg_hid("%s: already connected\n", __func__);
+ return 0;
+ }
+
+ sensors = input_allocate_device();
+ if (!sensors)
+ return -ENOMEM;
+
+ input_set_drvdata(sensors, steam);
+ sensors->dev.parent = &hdev->dev;
+
+ sensors->name = "Steam Deck Motion Sensors";
+ sensors->phys = hdev->phys;
+ sensors->uniq = steam->serial_no;
+ sensors->id.bustype = hdev->bus;
+ sensors->id.vendor = hdev->vendor;
+ sensors->id.product = hdev->product;
+ sensors->id.version = hdev->version;
+
+ __set_bit(INPUT_PROP_ACCELEROMETER, sensors->propbit);
+ __set_bit(EV_MSC, sensors->evbit);
+ __set_bit(MSC_TIMESTAMP, sensors->mscbit);
+
+ input_set_abs_params(sensors, ABS_X, -STEAM_DECK_ACCEL_RANGE,
+ STEAM_DECK_ACCEL_RANGE, STEAM_DECK_ACCEL_FUZZ, 0);
+ input_set_abs_params(sensors, ABS_Y, -STEAM_DECK_ACCEL_RANGE,
+ STEAM_DECK_ACCEL_RANGE, STEAM_DECK_ACCEL_FUZZ, 0);
+ input_set_abs_params(sensors, ABS_Z, -STEAM_DECK_ACCEL_RANGE,
+ STEAM_DECK_ACCEL_RANGE, STEAM_DECK_ACCEL_FUZZ, 0);
+ input_abs_set_res(sensors, ABS_X, STEAM_DECK_ACCEL_RES_PER_G);
+ input_abs_set_res(sensors, ABS_Y, STEAM_DECK_ACCEL_RES_PER_G);
+ input_abs_set_res(sensors, ABS_Z, STEAM_DECK_ACCEL_RES_PER_G);
+
+ input_set_abs_params(sensors, ABS_RX, -STEAM_DECK_GYRO_RANGE,
+ STEAM_DECK_GYRO_RANGE, STEAM_DECK_GYRO_FUZZ, 0);
+ input_set_abs_params(sensors, ABS_RY, -STEAM_DECK_GYRO_RANGE,
+ STEAM_DECK_GYRO_RANGE, STEAM_DECK_GYRO_FUZZ, 0);
+ input_set_abs_params(sensors, ABS_RZ, -STEAM_DECK_GYRO_RANGE,
+ STEAM_DECK_GYRO_RANGE, STEAM_DECK_GYRO_FUZZ, 0);
+ input_abs_set_res(sensors, ABS_RX, STEAM_DECK_GYRO_RES_PER_DPS);
+ input_abs_set_res(sensors, ABS_RY, STEAM_DECK_GYRO_RES_PER_DPS);
+ input_abs_set_res(sensors, ABS_RZ, STEAM_DECK_GYRO_RES_PER_DPS);
+
+ ret = input_register_device(sensors);
+ if (ret)
+ goto sensors_register_fail;
+
+ rcu_assign_pointer(steam->sensors, sensors);
+ return 0;
+
+sensors_register_fail:
+ input_free_device(sensors);
+ return ret;
+}
+
static void steam_input_unregister(struct steam_device *steam)
{
struct input_dev *input;
@@ -838,6 +916,24 @@ static void steam_input_unregister(struct steam_device *steam)
input_unregister_device(input);
}
+static void steam_sensors_unregister(struct steam_device *steam)
+{
+ struct input_dev *sensors;
+
+ if (!(steam->quirks & STEAM_QUIRK_DECK))
+ return;
+
+ rcu_read_lock();
+ sensors = rcu_dereference(steam->sensors);
+ rcu_read_unlock();
+
+ if (!sensors)
+ return;
+ RCU_INIT_POINTER(steam->sensors, NULL);
+ synchronize_rcu();
+ input_unregister_device(sensors);
+}
+
static void steam_battery_unregister(struct steam_device *steam)
{
struct power_supply *battery;
@@ -890,18 +986,28 @@ static int steam_register(struct steam_device *steam)
spin_lock_irqsave(&steam->lock, flags);
client_opened = steam->client_opened;
spin_unlock_irqrestore(&steam->lock, flags);
+
if (!client_opened) {
steam_set_lizard_mode(steam, lizard_mode);
ret = steam_input_register(steam);
- } else
- ret = 0;
+ if (ret != 0)
+ goto steam_register_input_fail;
+ ret = steam_sensors_register(steam);
+ if (ret != 0)
+ goto steam_register_sensors_fail;
+ }
+ return 0;
+steam_register_sensors_fail:
+ steam_input_unregister(steam);
+steam_register_input_fail:
return ret;
}
static void steam_unregister(struct steam_device *steam)
{
steam_battery_unregister(steam);
+ steam_sensors_unregister(steam);
steam_input_unregister(steam);
if (steam->serial_no[0]) {
hid_info(steam->hdev, "Steam Controller '%s' disconnected",
@@ -1010,6 +1116,7 @@ static int steam_client_ll_open(struct hid_device *hdev)
steam->client_opened++;
spin_unlock_irqrestore(&steam->lock, flags);
+ steam_sensors_unregister(steam);
steam_input_unregister(steam);
return 0;
@@ -1030,6 +1137,7 @@ static void steam_client_ll_close(struct hid_device *hdev)
if (connected) {
steam_set_lizard_mode(steam, lizard_mode);
steam_input_register(steam);
+ steam_sensors_register(steam);
}
}
@@ -1121,6 +1229,7 @@ static int steam_probe(struct hid_device *hdev,
INIT_DELAYED_WORK(&steam->mode_switch, steam_mode_switch_cb);
INIT_LIST_HEAD(&steam->list);
INIT_WORK(&steam->rumble_work, steam_haptic_rumble_cb);
+ steam->sensor_timestamp_us = 0;
/*
* With the real steam controller interface, do not connect hidraw.
@@ -1380,12 +1489,12 @@ static void steam_do_input_event(struct steam_device *steam,
* 18-19 | s16 | ABS_HAT0Y | left-pad Y value
* 20-21 | s16 | ABS_HAT1X | right-pad X value
* 22-23 | s16 | ABS_HAT1Y | right-pad Y value
- * 24-25 | s16 | -- | accelerometer X value
- * 26-27 | s16 | -- | accelerometer Y value
- * 28-29 | s16 | -- | accelerometer Z value
- * 30-31 | s16 | -- | gyro X value
- * 32-33 | s16 | -- | gyro Y value
- * 34-35 | s16 | -- | gyro Z value
+ * 24-25 | s16 | IMU ABS_X | accelerometer X value
+ * 26-27 | s16 | IMU ABS_Z | accelerometer Y value
+ * 28-29 | s16 | IMU ABS_Y | accelerometer Z value
+ * 30-31 | s16 | IMU ABS_RX | gyro X value
+ * 32-33 | s16 | IMU ABS_RZ | gyro Y value
+ * 34-35 | s16 | IMU ABS_RY | gyro Z value
* 36-37 | s16 | -- | quaternion W value
* 38-39 | s16 | -- | quaternion X value
* 40-41 | s16 | -- | quaternion Y value
@@ -1546,6 +1655,32 @@ static void steam_do_deck_input_event(struct steam_device *steam,
input_sync(input);
}
+static void steam_do_deck_sensors_event(struct steam_device *steam,
+ struct input_dev *sensors, u8 *data)
+{
+ /*
+ * The deck input report is received every 4 ms on average,
+ * with a jitter of +/- 4 ms even though the USB descriptor claims
+ * that it uses 1 kHz.
+ * Since the HID report does not include a sensor timestamp,
+ * use a fixed increment here.
+ */
+ steam->sensor_timestamp_us += 4000;
+
+ if (!steam->gamepad_mode)
+ return;
+
+ input_event(sensors, EV_MSC, MSC_TIMESTAMP, steam->sensor_timestamp_us);
+ input_report_abs(sensors, ABS_X, steam_le16(data + 24));
+ input_report_abs(sensors, ABS_Z, -steam_le16(data + 26));
+ input_report_abs(sensors, ABS_Y, steam_le16(data + 28));
+ input_report_abs(sensors, ABS_RX, steam_le16(data + 30));
+ input_report_abs(sensors, ABS_RZ, -steam_le16(data + 32));
+ input_report_abs(sensors, ABS_RY, steam_le16(data + 34));
+
+ input_sync(sensors);
+}
+
/*
* The size for this message payload is 11.
* The known values are:
@@ -1583,6 +1718,7 @@ static int steam_raw_event(struct hid_device *hdev,
{
struct steam_device *steam = hid_get_drvdata(hdev);
struct input_dev *input;
+ struct input_dev *sensors;
struct power_supply *battery;
if (!steam)
@@ -1628,6 +1764,9 @@ static int steam_raw_event(struct hid_device *hdev,
input = rcu_dereference(steam->input);
if (likely(input))
steam_do_deck_input_event(steam, input, data);
+ sensors = rcu_dereference(steam->sensors);
+ if (likely(sensors))
+ steam_do_deck_sensors_event(steam, sensors, data);
rcu_read_unlock();
break;
case ID_CONTROLLER_WIRELESS:
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index 9859dad36495..5bab006ec165 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -884,6 +884,9 @@ static int uclogic_params_huion_init(struct uclogic_params *params,
goto cleanup;
}
+ /* The firmware is used in userspace as unique identifier */
+ strscpy(hdev->uniq, ver_ptr, sizeof(hdev->uniq));
+
/* If this is a transition firmware */
if (strcmp(ver_ptr, transition_ver) == 0) {
hid_dbg(hdev,
diff --git a/drivers/hid/hid-winwing.c b/drivers/hid/hid-winwing.c
new file mode 100644
index 000000000000..0e224d1a6466
--- /dev/null
+++ b/drivers/hid/hid-winwing.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * HID driver for WinWing Orion 2 throttle
+ *
+ * Copyright (c) 2023 Ivan Gorinov
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/hidraw.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+#define MAX_REPORT 16
+
+struct winwing_led {
+ struct led_classdev cdev;
+ struct hid_device *hdev;
+ int number;
+};
+
+struct winwing_led_info {
+ int number;
+ int max_brightness;
+ const char *led_name;
+};
+
+static struct winwing_led_info led_info[3] = {
+ { 0, 255, "backlight" },
+ { 1, 1, "a-a" },
+ { 2, 1, "a-g" },
+};
+
+struct winwing_drv_data {
+ struct hid_device *hdev;
+ __u8 *report_buf;
+ struct mutex lock;
+ unsigned int num_leds;
+ struct winwing_led leds[];
+};
+
+static int winwing_led_write(struct led_classdev *cdev,
+ enum led_brightness br)
+{
+ struct winwing_led *led = (struct winwing_led *) cdev;
+ struct winwing_drv_data *data = hid_get_drvdata(led->hdev);
+ __u8 *buf = data->report_buf;
+ int ret;
+
+ mutex_lock(&data->lock);
+
+ buf[0] = 0x02;
+ buf[1] = 0x60;
+ buf[2] = 0xbe;
+ buf[3] = 0x00;
+ buf[4] = 0x00;
+ buf[5] = 0x03;
+ buf[6] = 0x49;
+ buf[7] = led->number;
+ buf[8] = br;
+ buf[9] = 0x00;
+ buf[10] = 0;
+ buf[11] = 0;
+ buf[12] = 0;
+ buf[13] = 0;
+
+ ret = hid_hw_output_report(led->hdev, buf, 14);
+
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int winwing_init_led(struct hid_device *hdev,
+ struct input_dev *input)
+{
+ struct winwing_drv_data *data;
+ struct winwing_led *led;
+ int ret;
+ int i;
+
+ size_t data_size = struct_size(data, leds, 3);
+
+ data = devm_kzalloc(&hdev->dev, data_size, GFP_KERNEL);
+
+ if (!data)
+ return -ENOMEM;
+
+ data->report_buf = devm_kmalloc(&hdev->dev, MAX_REPORT, GFP_KERNEL);
+
+ if (!data->report_buf)
+ return -ENOMEM;
+
+ for (i = 0; i < 3; i += 1) {
+ struct winwing_led_info *info = &led_info[i];
+
+ led = &data->leds[i];
+ led->hdev = hdev;
+ led->number = info->number;
+ led->cdev.max_brightness = info->max_brightness;
+ led->cdev.brightness_set_blocking = winwing_led_write;
+ led->cdev.flags = LED_HW_PLUGGABLE;
+ led->cdev.name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
+ "%s::%s",
+ dev_name(&input->dev),
+ info->led_name);
+
+ ret = devm_led_classdev_register(&hdev->dev, &led->cdev);
+ if (ret)
+ return ret;
+ }
+
+ hid_set_drvdata(hdev, data);
+
+ return ret;
+}
+
+static int winwing_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int ret;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int winwing_input_configured(struct hid_device *hdev,
+ struct hid_input *hidinput)
+{
+ int ret;
+
+ ret = winwing_init_led(hdev, hidinput->input);
+
+ if (ret)
+ hid_err(hdev, "led init failed\n");
+
+ return ret;
+}
+
+static __u8 original_rdesc_buttons[] = {
+ 0x05, 0x09, 0x19, 0x01, 0x29, 0x6F,
+ 0x15, 0x00, 0x25, 0x01, 0x35, 0x00,
+ 0x45, 0x01, 0x75, 0x01, 0x95, 0x6F,
+ 0x81, 0x02, 0x75, 0x01, 0x95, 0x01,
+ 0x81, 0x01
+};
+
+/*
+ * HID report descriptor shows 111 buttons, which exceeds maximum
+ * number of buttons (80) supported by Linux kernel HID subsystem.
+ *
+ * This module skips numbers 32-63, unused on some throttle grips.
+ */
+
+static __u8 *winwing_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ int sig_length = sizeof(original_rdesc_buttons);
+ int unused_button_numbers = 32;
+
+ if (*rsize < 34)
+ return rdesc;
+
+ if (memcmp(rdesc + 8, original_rdesc_buttons, sig_length) == 0) {
+
+ /* Usage Maximum */
+ rdesc[13] -= unused_button_numbers;
+
+ /* Report Count for buttons */
+ rdesc[25] -= unused_button_numbers;
+
+ /* Report Count for padding [HID1_11, 6.2.2.9] */
+ rdesc[31] += unused_button_numbers;
+
+ hid_info(hdev, "winwing descriptor fixed\n");
+ }
+
+ return rdesc;
+}
+
+static int winwing_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *raw_data, int size)
+{
+ if (size >= 15) {
+ /* Skip buttons 32 .. 63 */
+ memmove(raw_data + 5, raw_data + 9, 6);
+
+ /* Clear the padding */
+ memset(raw_data + 11, 0, 4);
+ }
+
+ return 0;
+}
+
+static const struct hid_device_id winwing_devices[] = {
+ { HID_USB_DEVICE(0x4098, 0xbe62) }, /* TGRIP-18 */
+ { HID_USB_DEVICE(0x4098, 0xbe68) }, /* TGRIP-16EX */
+ {}
+};
+
+MODULE_DEVICE_TABLE(hid, winwing_devices);
+
+static struct hid_driver winwing_driver = {
+ .name = "winwing",
+ .id_table = winwing_devices,
+ .probe = winwing_probe,
+ .input_configured = winwing_input_configured,
+ .report_fixup = winwing_report_fixup,
+ .raw_event = winwing_raw_event,
+};
+module_hid_driver(winwing_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 2df1ab3c31cc..632eaf9e11a6 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -64,7 +64,6 @@
/* flags */
#define I2C_HID_STARTED 0
#define I2C_HID_RESET_PENDING 1
-#define I2C_HID_READ_PENDING 2
#define I2C_HID_PWR_ON 0x00
#define I2C_HID_PWR_SLEEP 0x01
@@ -164,6 +163,24 @@ static u32 i2c_hid_lookup_quirk(const u16 idVendor, const u16 idProduct)
return quirks;
}
+static int i2c_hid_probe_address(struct i2c_hid *ihid)
+{
+ int ret;
+
+ /*
+ * Some STM-based devices need 400µs after a rising clock edge to wake
+ * from deep sleep, in which case the first read will fail. Try after a
+ * short sleep to see if the device came alive on the bus. Certain
+ * Weida Tech devices also need this.
+ */
+ ret = i2c_smbus_read_byte(ihid->client);
+ if (ret < 0) {
+ usleep_range(400, 500);
+ ret = i2c_smbus_read_byte(ihid->client);
+ }
+ return ret < 0 ? ret : 0;
+}
+
static int i2c_hid_xfer(struct i2c_hid *ihid,
u8 *send_buf, int send_len, u8 *recv_buf, int recv_len)
{
@@ -190,15 +207,10 @@ static int i2c_hid_xfer(struct i2c_hid *ihid,
msgs[n].len = recv_len;
msgs[n].buf = recv_buf;
n++;
-
- set_bit(I2C_HID_READ_PENDING, &ihid->flags);
}
ret = i2c_transfer(client->adapter, msgs, n);
- if (recv_len)
- clear_bit(I2C_HID_READ_PENDING, &ihid->flags);
-
if (ret != n)
return ret < 0 ? ret : -EIO;
@@ -390,26 +402,11 @@ static int i2c_hid_set_power(struct i2c_hid *ihid, int power_state)
i2c_hid_dbg(ihid, "%s\n", __func__);
- /*
- * Some devices require to send a command to wakeup before power on.
- * The call will get a return value (EREMOTEIO) but device will be
- * triggered and activated. After that, it goes like a normal device.
- */
- if (power_state == I2C_HID_PWR_ON) {
- ret = i2c_hid_set_power_command(ihid, I2C_HID_PWR_ON);
-
- /* Device was already activated */
- if (!ret)
- goto set_pwr_exit;
- }
-
ret = i2c_hid_set_power_command(ihid, power_state);
if (ret)
dev_err(&ihid->client->dev,
"failed to change power setting.\n");
-set_pwr_exit:
-
/*
* The HID over I2C specification states that if a DEVICE needs time
* after the PWR_ON request, it should utilise CLOCK stretching.
@@ -556,9 +553,6 @@ static irqreturn_t i2c_hid_irq(int irq, void *dev_id)
{
struct i2c_hid *ihid = dev_id;
- if (test_bit(I2C_HID_READ_PENDING, &ihid->flags))
- return IRQ_HANDLED;
-
i2c_hid_get_input(ihid);
return IRQ_HANDLED;
@@ -735,12 +729,15 @@ static int i2c_hid_parse(struct hid_device *hid)
mutex_lock(&ihid->reset_lock);
do {
ret = i2c_hid_start_hwreset(ihid);
- if (ret)
+ if (ret == 0)
+ ret = i2c_hid_finish_hwreset(ihid);
+ else
msleep(1000);
} while (tries-- > 0 && ret);
+ mutex_unlock(&ihid->reset_lock);
if (ret)
- goto abort_reset;
+ return ret;
use_override = i2c_hid_get_dmi_hid_report_desc_override(client->name,
&rsize);
@@ -750,11 +747,8 @@ static int i2c_hid_parse(struct hid_device *hid)
i2c_hid_dbg(ihid, "Using a HID report descriptor override\n");
} else {
rdesc = kzalloc(rsize, GFP_KERNEL);
-
- if (!rdesc) {
- ret = -ENOMEM;
- goto abort_reset;
- }
+ if (!rdesc)
+ return -ENOMEM;
i2c_hid_dbg(ihid, "asking HID report descriptor\n");
@@ -763,23 +757,10 @@ static int i2c_hid_parse(struct hid_device *hid)
rdesc, rsize);
if (ret) {
hid_err(hid, "reading report descriptor failed\n");
- goto abort_reset;
+ goto out;
}
}
- /*
- * Windows directly reads the report-descriptor after sending reset
- * and then waits for resets completion afterwards. Some touchpads
- * actually wait for the report-descriptor to be read before signalling
- * reset completion.
- */
- ret = i2c_hid_finish_hwreset(ihid);
-abort_reset:
- clear_bit(I2C_HID_RESET_PENDING, &ihid->flags);
- mutex_unlock(&ihid->reset_lock);
- if (ret)
- goto out;
-
i2c_hid_dbg(ihid, "Report Descriptor: %*ph\n", rsize, rdesc);
ret = hid_parse_report(hid, rdesc, rsize);
@@ -981,6 +962,14 @@ static int i2c_hid_core_resume(struct i2c_hid *ihid)
enable_irq(client->irq);
+ /* Make sure the device is awake on the bus */
+ ret = i2c_hid_probe_address(ihid);
+ if (ret < 0) {
+ dev_err(&client->dev, "nothing at address after resume: %d\n",
+ ret);
+ return -ENXIO;
+ }
+
/* Instead of resetting device, simply powers the device on. This
* solves "incomplete reports" on Raydium devices 2386:3118 and
* 2386:4B33 and fixes various SIS touchscreens no longer sending
@@ -1014,8 +1003,7 @@ static int __i2c_hid_core_probe(struct i2c_hid *ihid)
struct hid_device *hid = ihid->hid;
int ret;
- /* Make sure there is something at this address */
- ret = i2c_smbus_read_byte(client);
+ ret = i2c_hid_probe_address(ihid);
if (ret < 0) {
i2c_hid_dbg(ihid, "nothing at this address: %d\n", ret);
return -ENXIO;
diff --git a/drivers/hid/intel-ish-hid/Makefile b/drivers/hid/intel-ish-hid/Makefile
index f0a82b1c7cb9..e1e062e4b542 100644
--- a/drivers/hid/intel-ish-hid/Makefile
+++ b/drivers/hid/intel-ish-hid/Makefile
@@ -11,6 +11,7 @@ intel-ishtp-objs += ishtp/client.o
intel-ishtp-objs += ishtp/bus.o
intel-ishtp-objs += ishtp/dma-if.o
intel-ishtp-objs += ishtp/client-buffers.o
+intel-ishtp-objs += ishtp/loader.o
obj-$(CONFIG_INTEL_ISH_HID) += intel-ish-ipc.o
intel-ish-ipc-objs := ipc/ipc.o
@@ -23,4 +24,4 @@ intel-ishtp-hid-objs += ishtp-hid-client.o
obj-$(CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ishtp-loader.o
intel-ishtp-loader-objs += ishtp-fw-loader.o
-ccflags-y += -I $(srctree)/$(src)/ishtp
+ccflags-y += -I $(src)/ishtp
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index f89b300417d7..cdd80c653918 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -13,28 +13,29 @@
#include "hw-ish-regs.h"
#include "ishtp-dev.h"
-#define CHV_DEVICE_ID 0x22D8
-#define BXT_Ax_DEVICE_ID 0x0AA2
-#define BXT_Bx_DEVICE_ID 0x1AA2
-#define APL_Ax_DEVICE_ID 0x5AA2
-#define SPT_Ax_DEVICE_ID 0x9D35
-#define CNL_Ax_DEVICE_ID 0x9DFC
-#define GLK_Ax_DEVICE_ID 0x31A2
-#define CNL_H_DEVICE_ID 0xA37C
-#define ICL_MOBILE_DEVICE_ID 0x34FC
-#define SPT_H_DEVICE_ID 0xA135
-#define CML_LP_DEVICE_ID 0x02FC
-#define CMP_H_DEVICE_ID 0x06FC
-#define EHL_Ax_DEVICE_ID 0x4BB3
-#define TGL_LP_DEVICE_ID 0xA0FC
-#define TGL_H_DEVICE_ID 0x43FC
-#define ADL_S_DEVICE_ID 0x7AF8
-#define ADL_P_DEVICE_ID 0x51FC
-#define ADL_N_DEVICE_ID 0x54FC
-#define RPL_S_DEVICE_ID 0x7A78
-#define MTL_P_DEVICE_ID 0x7E45
-#define ARL_H_DEVICE_ID 0x7745
-#define ARL_S_DEVICE_ID 0x7F78
+#define PCI_DEVICE_ID_INTEL_ISH_CHV 0x22D8
+#define PCI_DEVICE_ID_INTEL_ISH_BXT_Ax 0x0AA2
+#define PCI_DEVICE_ID_INTEL_ISH_BXT_Bx 0x1AA2
+#define PCI_DEVICE_ID_INTEL_ISH_APL_Ax 0x5AA2
+#define PCI_DEVICE_ID_INTEL_ISH_SPT_Ax 0x9D35
+#define PCI_DEVICE_ID_INTEL_ISH_CNL_Ax 0x9DFC
+#define PCI_DEVICE_ID_INTEL_ISH_GLK_Ax 0x31A2
+#define PCI_DEVICE_ID_INTEL_ISH_CNL_H 0xA37C
+#define PCI_DEVICE_ID_INTEL_ISH_ICL_MOBILE 0x34FC
+#define PCI_DEVICE_ID_INTEL_ISH_SPT_H 0xA135
+#define PCI_DEVICE_ID_INTEL_ISH_CML_LP 0x02FC
+#define PCI_DEVICE_ID_INTEL_ISH_CMP_H 0x06FC
+#define PCI_DEVICE_ID_INTEL_ISH_EHL_Ax 0x4BB3
+#define PCI_DEVICE_ID_INTEL_ISH_TGL_LP 0xA0FC
+#define PCI_DEVICE_ID_INTEL_ISH_TGL_H 0x43FC
+#define PCI_DEVICE_ID_INTEL_ISH_ADL_S 0x7AF8
+#define PCI_DEVICE_ID_INTEL_ISH_ADL_P 0x51FC
+#define PCI_DEVICE_ID_INTEL_ISH_ADL_N 0x54FC
+#define PCI_DEVICE_ID_INTEL_ISH_RPL_S 0x7A78
+#define PCI_DEVICE_ID_INTEL_ISH_MTL_P 0x7E45
+#define PCI_DEVICE_ID_INTEL_ISH_ARL_H 0x7745
+#define PCI_DEVICE_ID_INTEL_ISH_ARL_S 0x7F78
+#define PCI_DEVICE_ID_INTEL_ISH_LNL_M 0xA845
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index a49c6affd7c4..3cd53fc80634 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -78,7 +78,7 @@ static bool check_generated_interrupt(struct ishtp_device *dev)
bool interrupt_generated = true;
uint32_t pisr_val = 0;
- if (dev->pdev->device == CHV_DEVICE_ID) {
+ if (dev->pdev->device == PCI_DEVICE_ID_INTEL_ISH_CHV) {
pisr_val = ish_reg_read(dev, IPC_REG_PISR_CHV_AB);
interrupt_generated =
IPC_INT_FROM_ISH_TO_HOST_CHV_AB(pisr_val);
@@ -117,7 +117,7 @@ static bool ish_is_input_ready(struct ishtp_device *dev)
*/
static void set_host_ready(struct ishtp_device *dev)
{
- if (dev->pdev->device == CHV_DEVICE_ID) {
+ if (dev->pdev->device == PCI_DEVICE_ID_INTEL_ISH_CHV) {
if (dev->pdev->revision == REVISION_ID_CHT_A0 ||
(dev->pdev->revision & REVISION_ID_SI_MASK) ==
REVISION_ID_CHT_Ax_SI)
@@ -546,11 +546,11 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
/**
* fw_reset_work_fn() - FW reset worker function
- * @unused: not used
+ * @work: Work item
*
* Call ish_fw_reset_handler to complete FW reset
*/
-static void fw_reset_work_fn(struct work_struct *unused)
+static void fw_reset_work_fn(struct work_struct *work)
{
int rv;
@@ -562,7 +562,8 @@ static void fw_reset_work_fn(struct work_struct *unused)
wake_up_interruptible(&ishtp_dev->wait_hw_ready);
/* ISHTP notification in IPC_RESET sequence completion */
- ishtp_reset_compl_handler(ishtp_dev);
+ if (!work_pending(work))
+ ishtp_reset_compl_handler(ishtp_dev);
} else
dev_err(ishtp_dev->devc, "[ishtp-ish]: FW reset failed (%d)\n",
rv);
@@ -909,11 +910,11 @@ static uint32_t ish_ipc_get_header(struct ishtp_device *dev, int length,
*/
static bool _dma_no_cache_snooping(struct ishtp_device *dev)
{
- return (dev->pdev->device == EHL_Ax_DEVICE_ID ||
- dev->pdev->device == TGL_LP_DEVICE_ID ||
- dev->pdev->device == TGL_H_DEVICE_ID ||
- dev->pdev->device == ADL_S_DEVICE_ID ||
- dev->pdev->device == ADL_P_DEVICE_ID);
+ return (dev->pdev->device == PCI_DEVICE_ID_INTEL_ISH_EHL_Ax ||
+ dev->pdev->device == PCI_DEVICE_ID_INTEL_ISH_TGL_LP ||
+ dev->pdev->device == PCI_DEVICE_ID_INTEL_ISH_TGL_H ||
+ dev->pdev->device == PCI_DEVICE_ID_INTEL_ISH_ADL_S ||
+ dev->pdev->device == PCI_DEVICE_ID_INTEL_ISH_ADL_P);
}
static const struct ishtp_hw_ops ish_hw_ops = {
@@ -948,6 +949,7 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
if (!dev)
return NULL;
+ dev->devc = &pdev->dev;
ishtp_device_init(dev);
init_waitqueue_head(&dev->wait_hw_ready);
@@ -983,7 +985,6 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
}
dev->ops = &ish_hw_ops;
- dev->devc = &pdev->dev;
dev->mtu = IPC_PAYLOAD_SIZE - sizeof(struct ishtp_msg_hdr);
return dev;
}
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 56bd4f02f319..f82428d7f6c3 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -23,30 +23,44 @@
#include "ishtp-dev.h"
#include "hw-ish.h"
+enum ishtp_driver_data_index {
+ ISHTP_DRIVER_DATA_NONE,
+ ISHTP_DRIVER_DATA_LNL_M,
+};
+
+#define ISH_FW_FILENAME_LNL_M "intel/ish/ish_lnlm.bin"
+
+static struct ishtp_driver_data ishtp_driver_data[] = {
+ [ISHTP_DRIVER_DATA_LNL_M] = {
+ .fw_filename = ISH_FW_FILENAME_LNL_M,
+ },
+};
+
static const struct pci_device_id ish_pci_tbl[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CHV_DEVICE_ID)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, BXT_Ax_DEVICE_ID)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, BXT_Bx_DEVICE_ID)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, APL_Ax_DEVICE_ID)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_Ax_DEVICE_ID)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CMP_H_DEVICE_ID)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_LP_DEVICE_ID)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_H_DEVICE_ID)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_S_DEVICE_ID)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_P_DEVICE_ID)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_N_DEVICE_ID)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, RPL_S_DEVICE_ID)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MTL_P_DEVICE_ID)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ARL_H_DEVICE_ID)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ARL_S_DEVICE_ID)},
- {0, }
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_CHV)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_BXT_Ax)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_BXT_Bx)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_APL_Ax)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_SPT_Ax)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_CNL_Ax)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_GLK_Ax)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_CNL_H)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_ICL_MOBILE)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_SPT_H)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_CML_LP)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_CMP_H)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_EHL_Ax)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_TGL_LP)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_TGL_H)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_ADL_S)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_ADL_P)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_ADL_N)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_RPL_S)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_MTL_P)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_ARL_H)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_ARL_S)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_LNL_M), .driver_data = ISHTP_DRIVER_DATA_LNL_M},
+ {}
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
@@ -105,19 +119,19 @@ static int ish_init(struct ishtp_device *dev)
static const struct pci_device_id ish_invalid_pci_ids[] = {
/* Mehlow platform special pci ids */
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xA309)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xA30A)},
+ {PCI_VDEVICE(INTEL, 0xA309)},
+ {PCI_VDEVICE(INTEL, 0xA30A)},
{}
};
static inline bool ish_should_enter_d0i3(struct pci_dev *pdev)
{
- return !pm_suspend_via_firmware() || pdev->device == CHV_DEVICE_ID;
+ return !pm_suspend_via_firmware() || pdev->device == PCI_DEVICE_ID_INTEL_ISH_CHV;
}
static inline bool ish_should_leave_d0i3(struct pci_dev *pdev)
{
- return !pm_resume_via_firmware() || pdev->device == CHV_DEVICE_ID;
+ return !pm_resume_via_firmware() || pdev->device == PCI_DEVICE_ID_INTEL_ISH_CHV;
}
/**
@@ -166,6 +180,7 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
hw = to_ish_hw(ishtp);
ishtp->print_log = ish_event_tracer;
+ ishtp->driver_data = &ishtp_driver_data[ent->driver_data];
/* mapping IO device memory */
hw->mem_addr = pcim_iomap_table(pdev)[0];
@@ -173,6 +188,11 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* request and enable interrupt */
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0) {
+ dev_err(dev, "ISH: Failed to allocate IRQ vectors\n");
+ return ret;
+ }
+
if (!pdev->msi_enabled && !pdev->msix_enabled)
irq_flag = IRQF_SHARED;
@@ -189,7 +209,7 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
init_waitqueue_head(&ishtp->resume_wait);
/* Enable PME for EHL */
- if (pdev->device == EHL_Ax_DEVICE_ID)
+ if (pdev->device == PCI_DEVICE_ID_INTEL_ISH_EHL_Ax)
device_init_wakeup(dev, true);
ret = ish_init(ishtp);
@@ -222,7 +242,7 @@ static void ish_remove(struct pci_dev *pdev)
*/
static void ish_shutdown(struct pci_dev *pdev)
{
- if (pdev->device == EHL_Ax_DEVICE_ID)
+ if (pdev->device == PCI_DEVICE_ID_INTEL_ISH_EHL_Ax)
pci_prepare_to_sleep(pdev);
}
@@ -376,3 +396,5 @@ MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
MODULE_DESCRIPTION("Intel(R) Integrated Sensor Hub PCI Device Driver");
MODULE_LICENSE("GPL");
+
+MODULE_FIRMWARE(ISH_FW_FILENAME_LNL_M);
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.c b/drivers/hid/intel-ish-hid/ishtp/hbm.c
index 9c031a06e4c4..8ee5467127d8 100644
--- a/drivers/hid/intel-ish-hid/ishtp/hbm.c
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.c
@@ -13,6 +13,7 @@
#include "ishtp-dev.h"
#include "hbm.h"
#include "client.h"
+#include "loader.h"
/**
* ishtp_hbm_fw_cl_allocate() - Allocate FW clients
@@ -570,6 +571,10 @@ void ishtp_hbm_dispatch(struct ishtp_device *dev,
return;
}
+ /* Start firmware loading process if it has loader capability */
+ if (version_res->host_version_supported & ISHTP_SUPPORT_CAP_LOADER)
+ schedule_work(&dev->work_fw_loader);
+
dev->version.major_version = HBM_MAJOR_VERSION;
dev->version.minor_version = HBM_MINOR_VERSION;
if (dev->dev_state == ISHTP_DEV_INIT_CLIENTS &&
@@ -865,6 +870,20 @@ eoi:
}
/**
+ * ishtp_loader_recv_msg() - Receive a message from the ISHTP device
+ * @dev: The ISHTP device
+ * @buf: The buffer containing the message
+ */
+static void ishtp_loader_recv_msg(struct ishtp_device *dev, void *buf)
+{
+ if (dev->fw_loader_rx_buf)
+ memcpy(dev->fw_loader_rx_buf, buf, dev->fw_loader_rx_size);
+
+ dev->fw_loader_received = true;
+ wake_up_interruptible(&dev->wait_loader_recvd_msg);
+}
+
+/**
* recv_fixed_cl_msg() - Receive fixed client message
* @dev: ISHTP device instance
* @ishtp_hdr: received bus message
@@ -890,6 +909,8 @@ void recv_fixed_cl_msg(struct ishtp_device *dev,
else
dev_err(dev->devc, "unknown fixed client msg [%02X]\n",
msg_hdr->cmd);
+ } else if (ishtp_hdr->fw_addr == ISHTP_LOADER_CLIENT_ADDR) {
+ ishtp_loader_recv_msg(dev, rd_msg_buf);
}
}
diff --git a/drivers/hid/intel-ish-hid/ishtp/init.c b/drivers/hid/intel-ish-hid/ishtp/init.c
index 02a00cc2dd11..07fdd52e4c5e 100644
--- a/drivers/hid/intel-ish-hid/ishtp/init.c
+++ b/drivers/hid/intel-ish-hid/ishtp/init.c
@@ -5,12 +5,14 @@
* Copyright (c) 2003-2016, Intel Corporation.
*/
+#include <linux/devm-helpers.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include "ishtp-dev.h"
#include "hbm.h"
#include "client.h"
+#include "loader.h"
/**
* ishtp_dev_state_str() -Convert to string format
@@ -51,6 +53,8 @@ const char *ishtp_dev_state_str(int state)
*/
void ishtp_device_init(struct ishtp_device *dev)
{
+ int ret;
+
dev->dev_state = ISHTP_DEV_INITIALIZING;
INIT_LIST_HEAD(&dev->cl_list);
INIT_LIST_HEAD(&dev->device_list);
@@ -59,6 +63,7 @@ void ishtp_device_init(struct ishtp_device *dev)
spin_lock_init(&dev->rd_msg_spinlock);
init_waitqueue_head(&dev->wait_hbm_recvd_msg);
+ init_waitqueue_head(&dev->wait_loader_recvd_msg);
spin_lock_init(&dev->read_list_spinlock);
spin_lock_init(&dev->device_lock);
spin_lock_init(&dev->device_list_lock);
@@ -76,6 +81,9 @@ void ishtp_device_init(struct ishtp_device *dev)
INIT_LIST_HEAD(&dev->read_list.list);
+ ret = devm_work_autocancel(dev->devc, &dev->work_fw_loader, ishtp_loader_work);
+ if (ret)
+ dev_err_probe(dev->devc, ret, "Failed to initialise FW loader work\n");
}
EXPORT_SYMBOL(ishtp_device_init);
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
index 32142c7d9a04..181838c3d7ac 100644
--- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -123,11 +123,28 @@ struct ishtp_hw_ops {
};
/**
+ * struct ishtp_driver_data - Driver-specific data for ISHTP devices
+ *
+ * This structure holds driver-specific data that can be associated with each
+ * ISHTP device instance. It allows for the storage of data that is unique to
+ * a particular driver or hardware variant.
+ *
+ * @fw_filename: The firmware filename associated with a specific hardware
+ * variant of the Intel Integrated Sensor Hub (ISH). This allows
+ * the driver to load the correct firmware based on the device's
+ * hardware variant.
+ */
+struct ishtp_driver_data {
+ char *fw_filename;
+};
+
+/**
* struct ishtp_device - ISHTP private device struct
*/
struct ishtp_device {
struct device *devc; /* pointer to lowest device */
struct pci_dev *pdev; /* PCI device to get device ids */
+ struct ishtp_driver_data *driver_data; /* pointer to driver-specific data */
/* waitq for waiting for suspend response */
wait_queue_head_t suspend_wait;
@@ -147,6 +164,17 @@ struct ishtp_device {
struct hbm_version version;
int transfer_path; /* Choice of transfer path: IPC or DMA */
+ /* work structure for scheduling firmware loading tasks */
+ struct work_struct work_fw_loader;
+ /* waitq for waiting for command response from the firmware loader */
+ wait_queue_head_t wait_loader_recvd_msg;
+ /* indicating whether a message from the firmware loader has been received */
+ bool fw_loader_received;
+ /* pointer to a buffer for receiving messages from the firmware loader */
+ void *fw_loader_rx_buf;
+ /* size of the buffer pointed to by fw_loader_rx_buf */
+ int fw_loader_rx_size;
+
/* ishtp device states */
enum ishtp_dev_state dev_state;
enum ishtp_hbm_state hbm_state;
diff --git a/drivers/hid/intel-ish-hid/ishtp/loader.c b/drivers/hid/intel-ish-hid/ishtp/loader.c
new file mode 100644
index 000000000000..993f8b390e57
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/loader.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ISHTP firmware loader function
+ *
+ * Copyright (c) 2024, Intel Corporation.
+ *
+ * This module implements the functionality to load the main ISH firmware from the host, starting
+ * with the Lunar Lake generation. It leverages a new method that enhances space optimization and
+ * flexibility by dividing the ISH firmware into a bootloader and main firmware.
+ *
+ * Please refer to the [Documentation](Documentation/hid/intel-ish-hid.rst) for the details on
+ * flows.
+ *
+ * Additionally, address potential error scenarios to ensure graceful failure handling.
+ * - Firmware Image Not Found:
+ * Occurs when `request_firmware()` cannot locate the firmware image. The ISH firmware will
+ * remain in a state awaiting firmware loading from the host, with no further action from
+ * the ISHTP driver.
+ * Recovery: Re-insmod the ISH drivers allows for a retry of the firmware loading from the host.
+ *
+ * - DMA Buffer Allocation Failure:
+ * This happens if allocating a DMA buffer during `prepare_dma_bufs()` fails. The ISH firmware
+ * will stay in a waiting state, and the ISHTP driver will release any allocated DMA buffers and
+ * firmware without further actions.
+ * Recovery: Re-insmod the ISH drivers allows for a retry of the firmware loading from the host.
+ *
+ * - Incorrect Firmware Image:
+ * Using an incorrect firmware image will initiate the firmware loading process but will
+ * eventually be refused by the ISH firmware after three unsuccessful attempts, indicated by
+ * returning an error code. The ISHTP driver will stop attempting after three tries.
+ * Recovery: A platform reset is required to retry firmware loading from the host.
+ */
+
+#define dev_fmt(fmt) "ISH loader: " fmt
+
+#include <linux/cacheflush.h>
+#include <linux/container_of.h>
+#include <linux/dev_printk.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/firmware.h>
+#include <linux/gfp_types.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/pfn.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include "hbm.h"
+#include "loader.h"
+
+/**
+ * loader_write_message() - Write a message to the ISHTP device
+ * @dev: The ISHTP device
+ * @buf: The buffer containing the message
+ * @len: The length of the message
+ *
+ * Return: 0 on success, negative error code on failure
+ */
+static int loader_write_message(struct ishtp_device *dev, void *buf, int len)
+{
+ struct ishtp_msg_hdr ishtp_hdr = {
+ .fw_addr = ISHTP_LOADER_CLIENT_ADDR,
+ .length = len,
+ .msg_complete = 1,
+ };
+
+ dev->fw_loader_received = false;
+
+ return ishtp_write_message(dev, &ishtp_hdr, buf);
+}
+
+/**
+ * loader_xfer_cmd() - Transfer a command to the ISHTP device
+ * @dev: The ISHTP device
+ * @req: The request buffer
+ * @req_len: The length of the request
+ * @resp: The response buffer
+ * @resp_len: The length of the response
+ *
+ * Return: 0 on success, negative error code on failure
+ */
+static int loader_xfer_cmd(struct ishtp_device *dev, void *req, int req_len,
+ void *resp, int resp_len)
+{
+ struct loader_msg_header *req_hdr = req;
+ struct loader_msg_header *resp_hdr = resp;
+ struct device *devc = dev->devc;
+ int rv;
+
+ dev->fw_loader_rx_buf = resp;
+ dev->fw_loader_rx_size = resp_len;
+
+ rv = loader_write_message(dev, req, req_len);
+ if (rv < 0) {
+ dev_err(devc, "write cmd %u failed:%d\n", req_hdr->command, rv);
+ return rv;
+ }
+
+ /* Wait the ACK */
+ wait_event_interruptible_timeout(dev->wait_loader_recvd_msg, dev->fw_loader_received,
+ ISHTP_LOADER_TIMEOUT);
+ dev->fw_loader_rx_size = 0;
+ dev->fw_loader_rx_buf = NULL;
+ if (!dev->fw_loader_received) {
+ dev_err(devc, "wait response of cmd %u timeout\n", req_hdr->command);
+ return -ETIMEDOUT;
+ }
+
+ if (!resp_hdr->is_response) {
+ dev_err(devc, "not a response for %u\n", req_hdr->command);
+ return -EBADMSG;
+ }
+
+ if (req_hdr->command != resp_hdr->command) {
+ dev_err(devc, "unexpected cmd response %u:%u\n", req_hdr->command,
+ resp_hdr->command);
+ return -EBADMSG;
+ }
+
+ if (resp_hdr->status) {
+ dev_err(devc, "cmd %u failed %u\n", req_hdr->command, resp_hdr->status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * release_dma_bufs() - Release the DMA buffer for transferring firmware fragments
+ * @dev: The ISHTP device
+ * @fragment: The ISHTP firmware fragment descriptor
+ * @dma_bufs: The array of DMA fragment buffers
+ * @fragment_size: The size of a single DMA fragment
+ */
+static void release_dma_bufs(struct ishtp_device *dev,
+ struct loader_xfer_dma_fragment *fragment,
+ void **dma_bufs, u32 fragment_size)
+{
+ int i;
+
+ for (i = 0; i < FRAGMENT_MAX_NUM; i++) {
+ if (dma_bufs[i]) {
+ dma_free_coherent(dev->devc, fragment_size, dma_bufs[i],
+ fragment->fragment_tbl[i].ddr_adrs);
+ dma_bufs[i] = NULL;
+ }
+ }
+}
+
+/**
+ * prepare_dma_bufs() - Prepare the DMA buffer for transferring firmware fragments
+ * @dev: The ISHTP device
+ * @ish_fw: The ISH firmware
+ * @fragment: The ISHTP firmware fragment descriptor
+ * @dma_bufs: The array of DMA fragment buffers
+ * @fragment_size: The size of a single DMA fragment
+ *
+ * Return: 0 on success, negative error code on failure
+ */
+static int prepare_dma_bufs(struct ishtp_device *dev,
+ const struct firmware *ish_fw,
+ struct loader_xfer_dma_fragment *fragment,
+ void **dma_bufs, u32 fragment_size)
+{
+ u32 offset = 0;
+ int i;
+
+ for (i = 0; i < fragment->fragment_cnt && offset < ish_fw->size; i++) {
+ dma_bufs[i] = dma_alloc_coherent(dev->devc, fragment_size,
+ &fragment->fragment_tbl[i].ddr_adrs, GFP_KERNEL);
+ if (!dma_bufs[i])
+ return -ENOMEM;
+
+ fragment->fragment_tbl[i].length = clamp(ish_fw->size - offset, 0, fragment_size);
+ fragment->fragment_tbl[i].fw_off = offset;
+ memcpy(dma_bufs[i], ish_fw->data + offset, fragment->fragment_tbl[i].length);
+ clflush_cache_range(dma_bufs[i], fragment_size);
+
+ offset += fragment->fragment_tbl[i].length;
+ }
+
+ return 0;
+}
+
+/**
+ * ishtp_loader_work() - Load the ISHTP firmware
+ * @work: The work structure
+ *
+ * The ISH Loader attempts to load firmware by sending a series of commands
+ * to the ISH device. If a command fails to be acknowledged by the ISH device,
+ * the loader will retry sending the command, up to a maximum of
+ * ISHTP_LOADER_RETRY_TIMES.
+ *
+ * After the maximum number of retries has been reached without success, the
+ * ISH bootloader will return an error status code and will no longer respond
+ * to the driver's commands. This behavior indicates that the ISH Loader has
+ * encountered a critical error during the firmware loading process.
+ *
+ * In such a case, where the ISH bootloader is unresponsive after all retries
+ * have been exhausted, a platform reset is required to restore communication
+ * with the ISH device and to recover from this error state.
+ */
+void ishtp_loader_work(struct work_struct *work)
+{
+ DEFINE_RAW_FLEX(struct loader_xfer_dma_fragment, fragment, fragment_tbl, FRAGMENT_MAX_NUM);
+ struct ishtp_device *dev = container_of(work, struct ishtp_device, work_fw_loader);
+ struct loader_xfer_query query = {
+ .header.command = LOADER_CMD_XFER_QUERY,
+ };
+ struct loader_start start = {
+ .header.command = LOADER_CMD_START,
+ };
+ union loader_recv_message recv_msg;
+ char *filename = dev->driver_data->fw_filename;
+ const struct firmware *ish_fw;
+ void *dma_bufs[FRAGMENT_MAX_NUM] = {};
+ u32 fragment_size;
+ int retry = ISHTP_LOADER_RETRY_TIMES;
+ int rv;
+
+ rv = request_firmware(&ish_fw, filename, dev->devc);
+ if (rv < 0) {
+ dev_err(dev->devc, "request firmware %s failed:%d\n", filename, rv);
+ return;
+ }
+
+ fragment->fragment.header.command = LOADER_CMD_XFER_FRAGMENT;
+ fragment->fragment.xfer_mode = LOADER_XFER_MODE_DMA;
+ fragment->fragment.is_last = 1;
+ fragment->fragment.size = ish_fw->size;
+ /* Calculate the size of a single DMA fragment */
+ fragment_size = PFN_ALIGN(DIV_ROUND_UP(ish_fw->size, FRAGMENT_MAX_NUM));
+ /* Calculate the count of DMA fragments */
+ fragment->fragment_cnt = DIV_ROUND_UP(ish_fw->size, fragment_size);
+
+ rv = prepare_dma_bufs(dev, ish_fw, fragment, dma_bufs, fragment_size);
+ if (rv) {
+ dev_err(dev->devc, "prepare DMA buffer failed.\n");
+ goto out;
+ }
+
+ do {
+ query.image_size = ish_fw->size;
+ rv = loader_xfer_cmd(dev, &query, sizeof(query), recv_msg.raw_data,
+ sizeof(struct loader_xfer_query_ack));
+ if (rv)
+ continue; /* try again if failed */
+
+ dev_dbg(dev->devc, "ISH Version %u.%u.%u.%u\n",
+ recv_msg.query_ack.version_major,
+ recv_msg.query_ack.version_minor,
+ recv_msg.query_ack.version_hotfix,
+ recv_msg.query_ack.version_build);
+
+ rv = loader_xfer_cmd(dev, fragment,
+ struct_size(fragment, fragment_tbl, fragment->fragment_cnt),
+ recv_msg.raw_data, sizeof(struct loader_xfer_fragment_ack));
+ if (rv)
+ continue; /* try again if failed */
+
+ rv = loader_xfer_cmd(dev, &start, sizeof(start), recv_msg.raw_data,
+ sizeof(struct loader_start_ack));
+ if (rv)
+ continue; /* try again if failed */
+
+ dev_info(dev->devc, "firmware loaded. size:%zu\n", ish_fw->size);
+ break;
+ } while (--retry);
+
+out:
+ release_dma_bufs(dev, fragment, dma_bufs, fragment_size);
+ release_firmware(ish_fw);
+}
diff --git a/drivers/hid/intel-ish-hid/ishtp/loader.h b/drivers/hid/intel-ish-hid/ishtp/loader.h
new file mode 100644
index 000000000000..7aa45ebc3f7b
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/loader.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ISHTP firmware loader header
+ *
+ * Copyright (c) 2024, Intel Corporation.
+ */
+
+#ifndef _ISHTP_LOADER_H_
+#define _ISHTP_LOADER_H_
+
+#include <linux/bits.h>
+#include <linux/jiffies.h>
+#include <linux/types.h>
+
+#include "ishtp-dev.h"
+
+struct work_struct;
+
+#define LOADER_MSG_SIZE \
+ (IPC_PAYLOAD_SIZE - sizeof(struct ishtp_msg_hdr))
+
+/*
+ * ISHTP firmware loader protocol definition
+ */
+#define LOADER_CMD_XFER_QUERY 0 /* SW -> FW */
+#define LOADER_CMD_XFER_FRAGMENT 1 /* SW -> FW */
+#define LOADER_CMD_START 2 /* SW -> FW */
+
+/* Only support DMA mode */
+#define LOADER_XFER_MODE_DMA BIT(0)
+
+/**
+ * struct loader_msg_header - ISHTP firmware loader message header
+ * @command: Command type
+ * @is_response: Indicates if the message is a response
+ * @has_next: Indicates if there is a next message
+ * @reserved: Reserved for future use
+ * @status: Status of the message
+ */
+struct loader_msg_header {
+ __le32 command:7;
+ __le32 is_response:1;
+ __le32 has_next:1;
+ __le32 reserved:15;
+ __le32 status:8;
+};
+
+/**
+ * struct loader_xfer_query - ISHTP firmware loader transfer query packet
+ * @header: Header of the message
+ * @image_size: Size of the image
+ */
+struct loader_xfer_query {
+ struct loader_msg_header header;
+ __le32 image_size;
+};
+
+/**
+ * struct loader_version - ISHTP firmware loader version
+ * @value: Value of the version
+ * @major: Major version
+ * @minor: Minor version
+ * @hotfix: Hotfix version
+ * @build: Build version
+ */
+struct loader_version {
+ union {
+ __le32 value;
+ struct {
+ __u8 major;
+ __u8 minor;
+ __u8 hotfix;
+ __u8 build;
+ };
+ };
+};
+
+/**
+ * struct loader_capability - ISHTP firmware loader capability
+ * @max_fw_image_size: Maximum firmware image size
+ * @support_mode: Support mode
+ * @reserved: Reserved for future use
+ * @platform: Platform
+ * @max_dma_buf_size: Maximum DMA buffer size, multiples of 4096
+ */
+struct loader_capability {
+ __le32 max_fw_image_size;
+ __le16 support_mode;
+ __u8 reserved;
+ __u8 platform;
+ __le32 max_dma_buf_size;
+};
+
+/**
+ * struct loader_xfer_query_ack - ISHTP firmware loader transfer query acknowledgment
+ * @header: Header of the message
+ * @version_major: ISH Major version
+ * @version_minor: ISH Minor version
+ * @version_hotfix: ISH Hotfix version
+ * @version_build: ISH Build version
+ * @protocol_version: Protocol version
+ * @loader_version: Loader version
+ * @capability: Loader capability
+ */
+struct loader_xfer_query_ack {
+ struct loader_msg_header header;
+ __le16 version_major;
+ __le16 version_minor;
+ __le16 version_hotfix;
+ __le16 version_build;
+ __le32 protocol_version;
+ struct loader_version loader_version;
+ struct loader_capability capability;
+};
+
+/**
+ * struct loader_xfer_fragment - ISHTP firmware loader transfer fragment
+ * @header: Header of the message
+ * @xfer_mode: Transfer mode
+ * @offset: Offset
+ * @size: Size
+ * @is_last: Is last
+ */
+struct loader_xfer_fragment {
+ struct loader_msg_header header;
+ __le32 xfer_mode;
+ __le32 offset;
+ __le32 size;
+ __le32 is_last;
+};
+
+/**
+ * struct loader_xfer_fragment_ack - ISHTP firmware loader transfer fragment acknowledgment
+ * @header: Header of the message
+ */
+struct loader_xfer_fragment_ack {
+ struct loader_msg_header header;
+};
+
+/**
+ * struct fragment_dscrpt - ISHTP firmware loader fragment descriptor
+ * @ddr_adrs: The address in host DDR
+ * @fw_off: The offset of the fragment in the fw image
+ * @length: The length of the fragment
+ */
+struct fragment_dscrpt {
+ __le64 ddr_adrs;
+ __le32 fw_off;
+ __le32 length;
+};
+
+#define FRAGMENT_MAX_NUM \
+ ((LOADER_MSG_SIZE - sizeof(struct loader_xfer_dma_fragment)) / \
+ sizeof(struct fragment_dscrpt))
+
+/**
+ * struct loader_xfer_dma_fragment - ISHTP firmware loader transfer DMA fragment
+ * @fragment: Fragment
+ * @fragment_cnt: How many descriptors in the fragment_tbl
+ * @fragment_tbl: Fragment table
+ */
+struct loader_xfer_dma_fragment {
+ struct loader_xfer_fragment fragment;
+ __le32 fragment_cnt;
+ struct fragment_dscrpt fragment_tbl[] __counted_by(fragment_cnt);
+};
+
+/**
+ * struct loader_start - ISHTP firmware loader start
+ * @header: Header of the message
+ */
+struct loader_start {
+ struct loader_msg_header header;
+};
+
+/**
+ * struct loader_start_ack - ISHTP firmware loader start acknowledgment
+ * @header: Header of the message
+ */
+struct loader_start_ack {
+ struct loader_msg_header header;
+};
+
+union loader_recv_message {
+ struct loader_xfer_query_ack query_ack;
+ struct loader_xfer_fragment_ack fragment_ack;
+ struct loader_start_ack start_ack;
+ __u8 raw_data[LOADER_MSG_SIZE];
+};
+
+/*
+ * ISHTP firmware loader internal use
+ */
+/* ISHTP firmware loader command timeout */
+#define ISHTP_LOADER_TIMEOUT msecs_to_jiffies(100)
+
+/* ISHTP firmware loader retry times */
+#define ISHTP_LOADER_RETRY_TIMES 3
+
+/**
+ * struct ish_firmware_variant - ISH firmware variant
+ * @device: PCI Device ID
+ * @filename: The firmware file name
+ */
+struct ish_firmware_variant {
+ unsigned short device;
+ const char *filename;
+};
+
+/*
+ * ISHTP firmware loader API for ISHTP hbm
+ */
+
+/* ISHTP capability bit for firmware loader */
+#define ISHTP_SUPPORT_CAP_LOADER BIT(4)
+
+/* Firmware loader address */
+#define ISHTP_LOADER_CLIENT_ADDR 16
+
+/**
+ * ishtp_loader_work - The work function to start the firmware loading process
+ * @work: The work structure
+ */
+void ishtp_loader_work(struct work_struct *work);
+
+#endif /* _ISHTP_LOADER_H_ */
diff --git a/drivers/hid/surface-hid/surface_kbd.c b/drivers/hid/surface-hid/surface_kbd.c
index 4fbce201db6a..8c0cbb2deb11 100644
--- a/drivers/hid/surface-hid/surface_kbd.c
+++ b/drivers/hid/surface-hid/surface_kbd.c
@@ -271,10 +271,9 @@ static int surface_kbd_probe(struct platform_device *pdev)
return surface_hid_device_add(shid);
}
-static int surface_kbd_remove(struct platform_device *pdev)
+static void surface_kbd_remove(struct platform_device *pdev)
{
surface_hid_device_destroy(platform_get_drvdata(pdev));
- return 0;
}
static const struct acpi_device_id surface_kbd_match[] = {
@@ -285,7 +284,7 @@ MODULE_DEVICE_TABLE(acpi, surface_kbd_match);
static struct platform_driver surface_kbd_driver = {
.probe = surface_kbd_probe,
- .remove = surface_kbd_remove,
+ .remove_new = surface_kbd_remove,
.driver = {
.name = "surface_keyboard",
.acpi_match_table = surface_kbd_match,
diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
index 6802efb4d6cd..3140990a6164 100644
--- a/drivers/hsi/controllers/omap_ssi_core.c
+++ b/drivers/hsi/controllers/omap_ssi_core.c
@@ -547,7 +547,7 @@ out1:
return err;
}
-static int ssi_remove(struct platform_device *pd)
+static void ssi_remove(struct platform_device *pd)
{
struct hsi_controller *ssi = platform_get_drvdata(pd);
@@ -561,8 +561,6 @@ static int ssi_remove(struct platform_device *pd)
platform_set_drvdata(pd, NULL);
pm_runtime_disable(&pd->dev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -618,7 +616,7 @@ MODULE_DEVICE_TABLE(of, omap_ssi_of_match);
static struct platform_driver ssi_pdriver = {
.probe = ssi_probe,
- .remove = ssi_remove,
+ .remove_new = ssi_remove,
.driver = {
.name = "omap_ssi",
.pm = DEV_PM_OPS,
diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c
index c78d9c9f7371..f0b3eca7376e 100644
--- a/drivers/hsi/controllers/omap_ssi_port.c
+++ b/drivers/hsi/controllers/omap_ssi_port.c
@@ -1224,7 +1224,7 @@ error:
return err;
}
-static int ssi_port_remove(struct platform_device *pd)
+static void ssi_port_remove(struct platform_device *pd)
{
struct hsi_port *port = platform_get_drvdata(pd);
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
@@ -1251,8 +1251,6 @@ static int ssi_port_remove(struct platform_device *pd)
pm_runtime_dont_use_autosuspend(&pd->dev);
pm_runtime_disable(&pd->dev);
-
- return 0;
}
static int ssi_restore_divisor(struct omap_ssi_port *omap_port)
@@ -1387,7 +1385,7 @@ MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match);
struct platform_driver ssi_port_pdriver = {
.probe = ssi_port_probe,
- .remove = ssi_port_remove,
+ .remove_new = ssi_port_remove,
.driver = {
.name = "omap_ssi_port",
.of_match_table = omap_ssi_port_of_match,
diff --git a/drivers/hte/hte-tegra194-test.c b/drivers/hte/hte-tegra194-test.c
index ab2edff018eb..8ee038ccf601 100644
--- a/drivers/hte/hte-tegra194-test.c
+++ b/drivers/hte/hte-tegra194-test.c
@@ -214,7 +214,7 @@ out:
return ret;
}
-static int tegra_hte_test_remove(struct platform_device *pdev)
+static void tegra_hte_test_remove(struct platform_device *pdev)
{
(void)pdev;
@@ -222,13 +222,11 @@ static int tegra_hte_test_remove(struct platform_device *pdev)
gpiod_put(hte.gpio_in);
gpiod_put(hte.gpio_out);
del_timer_sync(&hte.timer);
-
- return 0;
}
static struct platform_driver tegra_hte_test_driver = {
.probe = tegra_hte_test_probe,
- .remove = tegra_hte_test_remove,
+ .remove_new = tegra_hte_test_remove,
.driver = {
.name = "tegra_hte_test",
.of_match_table = tegra_hte_test_of_match,
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index adbf674355b2..fb8cd8469328 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -153,7 +153,9 @@ void vmbus_free_ring(struct vmbus_channel *channel)
hv_ringbuffer_cleanup(&channel->inbound);
if (channel->ringbuffer_page) {
- __free_pages(channel->ringbuffer_page,
+ /* In a CoCo VM leak the memory if it didn't get re-encrypted */
+ if (!channel->ringbuffer_gpadlhandle.decrypted)
+ __free_pages(channel->ringbuffer_page,
get_order(channel->ringbuffer_pagecount
<< PAGE_SHIFT));
channel->ringbuffer_page = NULL;
@@ -436,9 +438,18 @@ static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
(atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
ret = create_gpadl_header(type, kbuffer, size, send_offset, &msginfo);
- if (ret)
+ if (ret) {
+ gpadl->decrypted = false;
return ret;
+ }
+ /*
+ * Set the "decrypted" flag to true for the set_memory_decrypted()
+ * success case. In the failure case, the encryption state of the
+ * memory is unknown. Leave "decrypted" as true to ensure the
+ * memory will be leaked instead of going back on the free list.
+ */
+ gpadl->decrypted = true;
ret = set_memory_decrypted((unsigned long)kbuffer,
PFN_UP(size));
if (ret) {
@@ -527,9 +538,15 @@ cleanup:
kfree(msginfo);
- if (ret)
- set_memory_encrypted((unsigned long)kbuffer,
- PFN_UP(size));
+ if (ret) {
+ /*
+ * If set_memory_encrypted() fails, the decrypted flag is
+ * left as true so the memory is leaked instead of being
+ * put back on the free list.
+ */
+ if (!set_memory_encrypted((unsigned long)kbuffer, PFN_UP(size)))
+ gpadl->decrypted = false;
+ }
return ret;
}
@@ -850,6 +867,8 @@ post_msg_err:
if (ret)
pr_warn("Fail to set mem host visibility in GPADL teardown %d.\n", ret);
+ gpadl->decrypted = ret;
+
return ret;
}
EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 3cabeeabb1ca..f001ae880e1d 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -237,8 +237,17 @@ int vmbus_connect(void)
vmbus_connection.monitor_pages[0], 1);
ret |= set_memory_decrypted((unsigned long)
vmbus_connection.monitor_pages[1], 1);
- if (ret)
+ if (ret) {
+ /*
+ * If set_memory_decrypted() fails, the encryption state
+ * of the memory is unknown. So leak the memory instead
+ * of risking returning decrypted memory to the free list.
+ * For simplicity, always handle both pages the same.
+ */
+ vmbus_connection.monitor_pages[0] = NULL;
+ vmbus_connection.monitor_pages[1] = NULL;
goto cleanup;
+ }
/*
* Set_memory_decrypted() will change the memory contents if
@@ -337,13 +346,19 @@ void vmbus_disconnect(void)
vmbus_connection.int_page = NULL;
}
- set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[0], 1);
- set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[1], 1);
+ if (vmbus_connection.monitor_pages[0]) {
+ if (!set_memory_encrypted(
+ (unsigned long)vmbus_connection.monitor_pages[0], 1))
+ hv_free_hyperv_page(vmbus_connection.monitor_pages[0]);
+ vmbus_connection.monitor_pages[0] = NULL;
+ }
- hv_free_hyperv_page(vmbus_connection.monitor_pages[0]);
- hv_free_hyperv_page(vmbus_connection.monitor_pages[1]);
- vmbus_connection.monitor_pages[0] = NULL;
- vmbus_connection.monitor_pages[1] = NULL;
+ if (vmbus_connection.monitor_pages[1]) {
+ if (!set_memory_encrypted(
+ (unsigned long)vmbus_connection.monitor_pages[1], 1))
+ hv_free_hyperv_page(vmbus_connection.monitor_pages[1]);
+ vmbus_connection.monitor_pages[1] = NULL;
+ }
}
/*
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index dde3f9b6871a..9c452bfbd571 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -561,11 +561,7 @@ EXPORT_SYMBOL_GPL(hv_query_ext_cap);
void hv_setup_dma_ops(struct device *dev, bool coherent)
{
- /*
- * Hyper-V does not offer a vIOMMU in the guest
- * VM, so pass 0/NULL for the IOMMU settings
- */
- arch_setup_dma_ops(dev, 0, 0, coherent);
+ arch_setup_dma_ops(dev, coherent);
}
EXPORT_SYMBOL_GPL(hv_setup_dma_ops);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 4cb17603a828..12a707ab73f8 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -131,7 +131,7 @@ static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
+ return sysfs_emit(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
}
static DEVICE_ATTR_RO(id);
@@ -142,7 +142,7 @@ static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n", hv_dev->channel->state);
+ return sysfs_emit(buf, "%d\n", hv_dev->channel->state);
}
static DEVICE_ATTR_RO(state);
@@ -153,7 +153,7 @@ static ssize_t monitor_id_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
+ return sysfs_emit(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
}
static DEVICE_ATTR_RO(monitor_id);
@@ -164,8 +164,8 @@ static ssize_t class_id_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "{%pUl}\n",
- &hv_dev->channel->offermsg.offer.if_type);
+ return sysfs_emit(buf, "{%pUl}\n",
+ &hv_dev->channel->offermsg.offer.if_type);
}
static DEVICE_ATTR_RO(class_id);
@@ -176,8 +176,8 @@ static ssize_t device_id_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "{%pUl}\n",
- &hv_dev->channel->offermsg.offer.if_instance);
+ return sysfs_emit(buf, "{%pUl}\n",
+ &hv_dev->channel->offermsg.offer.if_instance);
}
static DEVICE_ATTR_RO(device_id);
@@ -186,7 +186,7 @@ static ssize_t modalias_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
- return sprintf(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type);
+ return sysfs_emit(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type);
}
static DEVICE_ATTR_RO(modalias);
@@ -199,7 +199,7 @@ static ssize_t numa_node_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu));
+ return sysfs_emit(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu));
}
static DEVICE_ATTR_RO(numa_node);
#endif
@@ -212,9 +212,8 @@ static ssize_t server_monitor_pending_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n",
- channel_pending(hv_dev->channel,
- vmbus_connection.monitor_pages[0]));
+ return sysfs_emit(buf, "%d\n", channel_pending(hv_dev->channel,
+ vmbus_connection.monitor_pages[0]));
}
static DEVICE_ATTR_RO(server_monitor_pending);
@@ -226,9 +225,8 @@ static ssize_t client_monitor_pending_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n",
- channel_pending(hv_dev->channel,
- vmbus_connection.monitor_pages[1]));
+ return sysfs_emit(buf, "%d\n", channel_pending(hv_dev->channel,
+ vmbus_connection.monitor_pages[1]));
}
static DEVICE_ATTR_RO(client_monitor_pending);
@@ -240,9 +238,8 @@ static ssize_t server_monitor_latency_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n",
- channel_latency(hv_dev->channel,
- vmbus_connection.monitor_pages[0]));
+ return sysfs_emit(buf, "%d\n", channel_latency(hv_dev->channel,
+ vmbus_connection.monitor_pages[0]));
}
static DEVICE_ATTR_RO(server_monitor_latency);
@@ -254,9 +251,8 @@ static ssize_t client_monitor_latency_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n",
- channel_latency(hv_dev->channel,
- vmbus_connection.monitor_pages[1]));
+ return sysfs_emit(buf, "%d\n", channel_latency(hv_dev->channel,
+ vmbus_connection.monitor_pages[1]));
}
static DEVICE_ATTR_RO(client_monitor_latency);
@@ -268,9 +264,8 @@ static ssize_t server_monitor_conn_id_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n",
- channel_conn_id(hv_dev->channel,
- vmbus_connection.monitor_pages[0]));
+ return sysfs_emit(buf, "%d\n", channel_conn_id(hv_dev->channel,
+ vmbus_connection.monitor_pages[0]));
}
static DEVICE_ATTR_RO(server_monitor_conn_id);
@@ -282,9 +277,8 @@ static ssize_t client_monitor_conn_id_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n",
- channel_conn_id(hv_dev->channel,
- vmbus_connection.monitor_pages[1]));
+ return sysfs_emit(buf, "%d\n", channel_conn_id(hv_dev->channel,
+ vmbus_connection.monitor_pages[1]));
}
static DEVICE_ATTR_RO(client_monitor_conn_id);
@@ -303,7 +297,7 @@ static ssize_t out_intr_mask_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
+ return sysfs_emit(buf, "%d\n", outbound.current_interrupt_mask);
}
static DEVICE_ATTR_RO(out_intr_mask);
@@ -321,7 +315,7 @@ static ssize_t out_read_index_show(struct device *dev,
&outbound);
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", outbound.current_read_index);
+ return sysfs_emit(buf, "%d\n", outbound.current_read_index);
}
static DEVICE_ATTR_RO(out_read_index);
@@ -340,7 +334,7 @@ static ssize_t out_write_index_show(struct device *dev,
&outbound);
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", outbound.current_write_index);
+ return sysfs_emit(buf, "%d\n", outbound.current_write_index);
}
static DEVICE_ATTR_RO(out_write_index);
@@ -359,7 +353,7 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
&outbound);
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
+ return sysfs_emit(buf, "%d\n", outbound.bytes_avail_toread);
}
static DEVICE_ATTR_RO(out_read_bytes_avail);
@@ -378,7 +372,7 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
&outbound);
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
+ return sysfs_emit(buf, "%d\n", outbound.bytes_avail_towrite);
}
static DEVICE_ATTR_RO(out_write_bytes_avail);
@@ -396,7 +390,7 @@ static ssize_t in_intr_mask_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
+ return sysfs_emit(buf, "%d\n", inbound.current_interrupt_mask);
}
static DEVICE_ATTR_RO(in_intr_mask);
@@ -414,7 +408,7 @@ static ssize_t in_read_index_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", inbound.current_read_index);
+ return sysfs_emit(buf, "%d\n", inbound.current_read_index);
}
static DEVICE_ATTR_RO(in_read_index);
@@ -432,7 +426,7 @@ static ssize_t in_write_index_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", inbound.current_write_index);
+ return sysfs_emit(buf, "%d\n", inbound.current_write_index);
}
static DEVICE_ATTR_RO(in_write_index);
@@ -451,7 +445,7 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
+ return sysfs_emit(buf, "%d\n", inbound.bytes_avail_toread);
}
static DEVICE_ATTR_RO(in_read_bytes_avail);
@@ -470,7 +464,7 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
+ return sysfs_emit(buf, "%d\n", inbound.bytes_avail_towrite);
}
static DEVICE_ATTR_RO(in_write_bytes_avail);
@@ -480,7 +474,7 @@ static ssize_t channel_vp_mapping_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
- int buf_size = PAGE_SIZE, n_written, tot_written;
+ int n_written;
struct list_head *cur;
if (!channel)
@@ -488,25 +482,21 @@ static ssize_t channel_vp_mapping_show(struct device *dev,
mutex_lock(&vmbus_connection.channel_mutex);
- tot_written = snprintf(buf, buf_size, "%u:%u\n",
- channel->offermsg.child_relid, channel->target_cpu);
+ n_written = sysfs_emit(buf, "%u:%u\n",
+ channel->offermsg.child_relid,
+ channel->target_cpu);
list_for_each(cur, &channel->sc_list) {
- if (tot_written >= buf_size - 1)
- break;
cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
- n_written = scnprintf(buf + tot_written,
- buf_size - tot_written,
- "%u:%u\n",
- cur_sc->offermsg.child_relid,
- cur_sc->target_cpu);
- tot_written += n_written;
+ n_written += sysfs_emit_at(buf, n_written, "%u:%u\n",
+ cur_sc->offermsg.child_relid,
+ cur_sc->target_cpu);
}
mutex_unlock(&vmbus_connection.channel_mutex);
- return tot_written;
+ return n_written;
}
static DEVICE_ATTR_RO(channel_vp_mapping);
@@ -516,7 +506,7 @@ static ssize_t vendor_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
- return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
+ return sysfs_emit(buf, "0x%x\n", hv_dev->vendor_id);
}
static DEVICE_ATTR_RO(vendor);
@@ -526,7 +516,7 @@ static ssize_t device_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
- return sprintf(buf, "0x%x\n", hv_dev->device_id);
+ return sysfs_emit(buf, "0x%x\n", hv_dev->device_id);
}
static DEVICE_ATTR_RO(device);
@@ -551,7 +541,7 @@ static ssize_t driver_override_show(struct device *dev,
ssize_t len;
device_lock(dev);
- len = snprintf(buf, PAGE_SIZE, "%s\n", hv_dev->driver_override);
+ len = sysfs_emit(buf, "%s\n", hv_dev->driver_override);
device_unlock(dev);
return len;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 83945397b6eb..e14ae18a973b 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -40,7 +40,7 @@ comment "Native drivers"
config SENSORS_ABITUGURU
tristate "Abit uGuru (rev 1 & 2)"
- depends on (X86 && DMI) || COMPILE_TEST
+ depends on (X86 && DMI) || COMPILE_TEST && HAS_IOPORT
help
If you say yes here you get support for the sensor part of the first
and second revision of the Abit uGuru chip. The voltage and frequency
@@ -55,7 +55,7 @@ config SENSORS_ABITUGURU
config SENSORS_ABITUGURU3
tristate "Abit uGuru (rev 3)"
- depends on (X86 && DMI) || COMPILE_TEST
+ depends on (X86 && DMI) || COMPILE_TEST && HAS_IOPORT
help
If you say yes here you get support for the sensor part of the
third revision of the Abit uGuru chip. Only reading the sensors
@@ -611,6 +611,7 @@ config SENSORS_SPARX5
config SENSORS_F71805F
tristate "Fintek F71805F/FG, F71806F/FG and F71872F/FG"
+ depends on HAS_IOPORT
depends on !PPC
help
If you say yes here you get support for hardware monitoring
@@ -622,6 +623,7 @@ config SENSORS_F71805F
config SENSORS_F71882FG
tristate "Fintek F71882FG and compatibles"
+ depends on HAS_IOPORT
depends on !PPC
help
If you say yes here you get support for hardware monitoring
@@ -854,6 +856,7 @@ config SENSORS_CORETEMP
config SENSORS_IT87
tristate "ITE IT87xx and compatibles"
+ depends on HAS_IOPORT
depends on !PPC
select HWMON_VID
help
@@ -914,6 +917,16 @@ config SENSORS_LAN966X
This driver can also be built as a module. If so, the module
will be called lan966x-hwmon.
+config SENSORS_LENOVO_EC
+ tristate "Sensor reader for Lenovo ThinkStations"
+ depends on X86
+ help
+ If you say yes here you get support for LENOVO
+ EC Sensor data on newer ThinkStation systems
+
+ This driver can also be built as a module. If so, the module
+ will be called lenovo_ec_sensors.
+
config SENSORS_LINEAGE
tristate "Lineage Compact Power Line Power Entry Module"
depends on I2C
@@ -1220,6 +1233,7 @@ config SENSORS_MAX6621
config SENSORS_MAX6639
tristate "Maxim MAX6639 sensor chip"
depends on I2C
+ select REGMAP_I2C
help
If you say yes here you get support for the MAX6639
sensor chips.
@@ -1561,6 +1575,7 @@ config SENSORS_LM95245
config SENSORS_PC87360
tristate "National Semiconductor PC87360 family"
+ depends on HAS_IOPORT
depends on !PPC
select HWMON_VID
help
@@ -1575,6 +1590,7 @@ config SENSORS_PC87360
config SENSORS_PC87427
tristate "National Semiconductor PC87427"
+ depends on HAS_IOPORT
depends on !PPC
help
If you say yes here you get access to the hardware monitoring
@@ -1606,6 +1622,7 @@ config SENSORS_NTC_THERMISTOR
config SENSORS_NCT6683
tristate "Nuvoton NCT6683D"
+ depends on HAS_IOPORT
depends on !PPC
help
If you say yes here you get support for the hardware monitoring
@@ -1627,6 +1644,7 @@ config SENSORS_NCT6775_CORE
config SENSORS_NCT6775
tristate "Platform driver for Nuvoton NCT6775F and compatibles"
+ depends on HAS_IOPORT
depends on !PPC
depends on ACPI || ACPI=n
select HWMON_VID
@@ -1778,7 +1796,7 @@ config SENSORS_PT5161L
config SENSORS_PWM_FAN
tristate "PWM fan"
- depends on (PWM && OF) || COMPILE_TEST
+ depends on PWM || COMPILE_TEST
depends on THERMAL || THERMAL=n
help
If you say yes here you get support for fans connected to PWM lines.
@@ -1883,7 +1901,7 @@ config SENSORS_SHTC1
config SENSORS_SIS5595
tristate "Silicon Integrated Systems Corp. SiS5595"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
If you say yes here you get support for the integrated sensors in
SiS5595 South Bridges.
@@ -1903,6 +1921,7 @@ config SENSORS_SY7636A
config SENSORS_DME1737
tristate "SMSC DME1737, SCH311x and compatibles"
+ depends on HAS_IOPORT
depends on I2C && !PPC
select HWMON_VID
help
@@ -1959,6 +1978,7 @@ config SENSORS_EMC6W201
config SENSORS_SMSC47M1
tristate "SMSC LPC47M10x and compatibles"
+ depends on HAS_IOPORT
depends on !PPC
help
If you say yes here you get support for the integrated fan
@@ -1993,6 +2013,7 @@ config SENSORS_SMSC47M192
config SENSORS_SMSC47B397
tristate "SMSC LPC47B397-NC"
+ depends on HAS_IOPORT
depends on !PPC
help
If you say yes here you get support for the SMSC LPC47B397-NC
@@ -2007,6 +2028,7 @@ config SENSORS_SCH56XX_COMMON
config SENSORS_SCH5627
tristate "SMSC SCH5627"
+ depends on HAS_IOPORT
depends on !PPC && WATCHDOG
select SENSORS_SCH56XX_COMMON
select WATCHDOG_CORE
@@ -2020,6 +2042,7 @@ config SENSORS_SCH5627
config SENSORS_SCH5636
tristate "SMSC SCH5636"
+ depends on HAS_IOPORT
depends on !PPC && WATCHDOG
select SENSORS_SCH56XX_COMMON
select WATCHDOG_CORE
@@ -2272,7 +2295,7 @@ config SENSORS_VIA_CPUTEMP
config SENSORS_VIA686A
tristate "VIA686A"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
If you say yes here you get support for the integrated sensors in
Via 686A/B South Bridges.
@@ -2282,6 +2305,7 @@ config SENSORS_VIA686A
config SENSORS_VT1211
tristate "VIA VT1211"
+ depends on HAS_IOPORT
depends on !PPC
select HWMON_VID
help
@@ -2293,7 +2317,7 @@ config SENSORS_VT1211
config SENSORS_VT8231
tristate "VIA VT8231"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select HWMON_VID
help
If you say yes here then you get support for the integrated sensors
@@ -2401,6 +2425,7 @@ config SENSORS_W83L786NG
config SENSORS_W83627HF
tristate "Winbond W83627HF, W83627THF, W83637HF, W83687THF, W83697HF"
+ depends on HAS_IOPORT
depends on !PPC
select HWMON_VID
help
@@ -2413,6 +2438,7 @@ config SENSORS_W83627HF
config SENSORS_W83627EHF
tristate "Winbond W83627EHF/EHG/DHG/UHG, W83667HG"
+ depends on HAS_IOPORT
depends on !PPC
select HWMON_VID
help
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 5c31808f6378..e3f25475d1f0 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -108,6 +108,7 @@ obj-$(CONFIG_SENSORS_JC42) += jc42.o
obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o
obj-$(CONFIG_SENSORS_K10TEMP) += k10temp.o
obj-$(CONFIG_SENSORS_LAN966X) += lan966x-hwmon.o
+obj-$(CONFIG_SENSORS_LENOVO_EC) += lenovo-ec-sensors.o
obj-$(CONFIG_SENSORS_LINEAGE) += lineage-pem.o
obj-$(CONFIG_SENSORS_LOCHNAGAR) += lochnagar-hwmon.o
obj-$(CONFIG_SENSORS_LM63) += lm63.o
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 703666b95bf4..6c8a9c863528 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -883,6 +883,22 @@ static int acpi_power_meter_add(struct acpi_device *device)
strcpy(acpi_device_class(device), ACPI_POWER_METER_CLASS);
device->driver_data = resource;
+#if IS_REACHABLE(CONFIG_ACPI_IPMI)
+ /*
+ * On Dell systems several methods of acpi_power_meter access
+ * variables in IPMI region, so wait until IPMI space handler is
+ * installed by acpi_ipmi and also wait until SMI is selected to make
+ * the space handler fully functional.
+ */
+ if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.")) {
+ struct acpi_device *ipi_device = acpi_dev_get_first_match_dev("IPI0001", NULL, -1);
+
+ if (ipi_device && acpi_wait_for_acpi_ipmi())
+ dev_warn(&device->dev, "Waiting for ACPI IPMI timeout");
+ acpi_dev_put(ipi_device);
+ }
+#endif
+
res = read_capabilities(resource);
if (res)
goto exit_free;
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index 7f1bef59046f..f0b17e59827f 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -205,7 +205,7 @@ static int ad7414_probe(struct i2c_client *client)
}
static const struct i2c_device_id ad7414_id[] = {
- { "ad7414", 0 },
+ { "ad7414" },
{}
};
MODULE_DEVICE_TABLE(i2c, ad7414_id);
diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
index 46e3c8c50765..8ac6e735ec5c 100644
--- a/drivers/hwmon/adc128d818.c
+++ b/drivers/hwmon/adc128d818.c
@@ -58,7 +58,6 @@ static const u8 num_inputs[] = { 7, 8, 4, 6 };
struct adc128_data {
struct i2c_client *client;
- struct regulator *regulator;
int vref; /* Reference voltage in mV */
struct mutex update_lock;
u8 mode; /* Operation mode */
@@ -389,7 +388,7 @@ static int adc128_detect(struct i2c_client *client, struct i2c_board_info *info)
return 0;
}
-static int adc128_init_client(struct adc128_data *data)
+static int adc128_init_client(struct adc128_data *data, bool external_vref)
{
struct i2c_client *client = data->client;
int err;
@@ -408,7 +407,7 @@ static int adc128_init_client(struct adc128_data *data)
regval |= data->mode << 1;
/* If external vref is selected, configure the chip to use it */
- if (data->regulator)
+ if (external_vref)
regval |= 0x01;
/* Write advanced configuration register */
@@ -430,9 +429,9 @@ static int adc128_init_client(struct adc128_data *data)
static int adc128_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
- struct regulator *regulator;
struct device *hwmon_dev;
struct adc128_data *data;
+ bool external_vref;
int err, vref;
data = devm_kzalloc(dev, sizeof(struct adc128_data), GFP_KERNEL);
@@ -440,20 +439,15 @@ static int adc128_probe(struct i2c_client *client)
return -ENOMEM;
/* vref is optional. If specified, is used as chip reference voltage */
- regulator = devm_regulator_get_optional(dev, "vref");
- if (!IS_ERR(regulator)) {
- data->regulator = regulator;
- err = regulator_enable(regulator);
- if (err < 0)
- return err;
- vref = regulator_get_voltage(regulator);
- if (vref < 0) {
- err = vref;
- goto error;
- }
- data->vref = DIV_ROUND_CLOSEST(vref, 1000);
- } else {
+ vref = devm_regulator_get_enable_read_voltage(dev, "vref");
+ if (vref == -ENODEV) {
+ external_vref = false;
data->vref = 2560; /* 2.56V, in mV */
+ } else if (vref < 0) {
+ return vref;
+ } else {
+ external_vref = true;
+ data->vref = DIV_ROUND_CLOSEST(vref, 1000);
}
/* Operation mode is optional. If unspecified, keep current mode */
@@ -461,13 +455,12 @@ static int adc128_probe(struct i2c_client *client)
if (data->mode > 3) {
dev_err(dev, "invalid operation mode %d\n",
data->mode);
- err = -EINVAL;
- goto error;
+ return -EINVAL;
}
} else {
err = i2c_smbus_read_byte_data(client, ADC128_REG_CONFIG_ADV);
if (err < 0)
- goto error;
+ return err;
data->mode = (err >> 1) & ADC128_REG_MASK;
}
@@ -476,35 +469,18 @@ static int adc128_probe(struct i2c_client *client)
mutex_init(&data->update_lock);
/* Initialize the chip */
- err = adc128_init_client(data);
+ err = adc128_init_client(data, external_vref);
if (err < 0)
- goto error;
+ return err;
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
data, adc128_groups);
- if (IS_ERR(hwmon_dev)) {
- err = PTR_ERR(hwmon_dev);
- goto error;
- }
-
- return 0;
-
-error:
- if (data->regulator)
- regulator_disable(data->regulator);
- return err;
-}
-
-static void adc128_remove(struct i2c_client *client)
-{
- struct adc128_data *data = i2c_get_clientdata(client);
- if (data->regulator)
- regulator_disable(data->regulator);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct i2c_device_id adc128_id[] = {
- { "adc128d818", 0 },
+ { "adc128d818" },
{ }
};
MODULE_DEVICE_TABLE(i2c, adc128_id);
@@ -522,7 +498,6 @@ static struct i2c_driver adc128_driver = {
.of_match_table = of_match_ptr(adc128_of_match),
},
.probe = adc128_probe,
- .remove = adc128_remove,
.id_table = adc128_id,
.detect = adc128_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index 581d8edf70ea..80d09b017d3b 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -1849,7 +1849,7 @@ static int adm1026_probe(struct i2c_client *client)
}
static const struct i2c_device_id adm1026_id[] = {
- { "adm1026", 0 },
+ { "adm1026" },
{ }
};
MODULE_DEVICE_TABLE(i2c, adm1026_id);
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index 9a465f3f71c8..761c13092488 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -379,7 +379,7 @@ static int adm1029_probe(struct i2c_client *client)
}
static const struct i2c_device_id adm1029_id[] = {
- { "adm1029", 0 },
+ { "adm1029" },
{ }
};
MODULE_DEVICE_TABLE(i2c, adm1029_id);
diff --git a/drivers/hwmon/adm1177.c b/drivers/hwmon/adm1177.c
index 3390102d2d4a..8b2c965480e3 100644
--- a/drivers/hwmon/adm1177.c
+++ b/drivers/hwmon/adm1177.c
@@ -238,7 +238,7 @@ static int adm1177_probe(struct i2c_client *client)
}
static const struct i2c_device_id adm1177_id[] = {
- {"adm1177", 0},
+ {"adm1177"},
{}
};
MODULE_DEVICE_TABLE(i2c, adm1177_id);
diff --git a/drivers/hwmon/adt7410.c b/drivers/hwmon/adt7410.c
index d15f64d4b6e7..3bf0e0a0882c 100644
--- a/drivers/hwmon/adt7410.c
+++ b/drivers/hwmon/adt7410.c
@@ -88,8 +88,8 @@ static int adt7410_i2c_probe(struct i2c_client *client)
}
static const struct i2c_device_id adt7410_ids[] = {
- { "adt7410", 0 },
- { "adt7420", 0 },
+ { "adt7410" },
+ { "adt7420" },
{}
};
MODULE_DEVICE_TABLE(i2c, adt7410_ids);
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index 45fe4e8aae4e..08d0effd97f7 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -697,7 +697,7 @@ static int adt7411_probe(struct i2c_client *client)
}
static const struct i2c_device_id adt7411_id[] = {
- { "adt7411", 0 },
+ { "adt7411" },
{ }
};
MODULE_DEVICE_TABLE(i2c, adt7411_id);
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index 429566c4245d..174dfee47f7a 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -1809,7 +1809,7 @@ static int adt7462_probe(struct i2c_client *client)
}
static const struct i2c_device_id adt7462_id[] = {
- { "adt7462", 0 },
+ { "adt7462" },
{ }
};
MODULE_DEVICE_TABLE(i2c, adt7462_id);
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index c4b3a4a18670..517248d2994e 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -1304,7 +1304,7 @@ static void adt7470_remove(struct i2c_client *client)
}
static const struct i2c_device_id adt7470_id[] = {
- { "adt7470", 0 },
+ { "adt7470" },
{ }
};
MODULE_DEVICE_TABLE(i2c, adt7470_id);
diff --git a/drivers/hwmon/aquacomputer_d5next.c b/drivers/hwmon/aquacomputer_d5next.c
index 2efe97f8d003..8e55cd2f46f5 100644
--- a/drivers/hwmon/aquacomputer_d5next.c
+++ b/drivers/hwmon/aquacomputer_d5next.c
@@ -202,16 +202,19 @@ static u16 aquastreamult_sensor_fan_offsets[] = { AQUASTREAMULT_FAN_OFFSET };
#define OCTO_NUM_FANS 8
#define OCTO_NUM_SENSORS 4
#define OCTO_NUM_VIRTUAL_SENSORS 16
+#define OCTO_NUM_FLOW_SENSORS 1
#define OCTO_CTRL_REPORT_SIZE 0x65F
/* Sensor report offsets for the Octo */
#define OCTO_POWER_CYCLES 0x18
#define OCTO_SENSOR_START 0x3D
#define OCTO_VIRTUAL_SENSORS_START 0x45
+#define OCTO_FLOW_SENSOR_OFFSET 0x7B
static u16 octo_sensor_fan_offsets[] = { 0x7D, 0x8A, 0x97, 0xA4, 0xB1, 0xBE, 0xCB, 0xD8 };
/* Control report offsets for the Octo */
#define OCTO_TEMP_CTRL_OFFSET 0xA
+#define OCTO_FLOW_PULSES_CTRL_OFFSET 0x6
/* Fan speed offsets (0-100%) */
static u16 octo_ctrl_fan_offsets[] = { 0x5B, 0xB0, 0x105, 0x15A, 0x1AF, 0x204, 0x259, 0x2AE };
@@ -363,18 +366,6 @@ static const char *const label_aquaero_calc_temp_sensors[] = {
"Calc. virtual sensor 4"
};
-/* Labels for Octo and Quadro (except speed) */
-static const char *const label_fan_speed[] = {
- "Fan 1 speed",
- "Fan 2 speed",
- "Fan 3 speed",
- "Fan 4 speed",
- "Fan 5 speed",
- "Fan 6 speed",
- "Fan 7 speed",
- "Fan 8 speed"
-};
-
static const char *const label_fan_power[] = {
"Fan 1 power",
"Fan 2 power",
@@ -408,6 +399,19 @@ static const char *const label_fan_current[] = {
"Fan 8 current"
};
+/* Labels for Octo fan speeds */
+static const char *const label_octo_speeds[] = {
+ "Fan 1 speed",
+ "Fan 2 speed",
+ "Fan 3 speed",
+ "Fan 4 speed",
+ "Fan 5 speed",
+ "Fan 6 speed",
+ "Fan 7 speed",
+ "Fan 8 speed",
+ "Flow speed [dL/h]",
+};
+
/* Labels for Quadro fan speeds */
static const char *const label_quadro_speeds[] = {
"Fan 1 speed",
@@ -844,6 +848,7 @@ static umode_t aqc_is_visible(const void *data, enum hwmon_sensor_types type, u3
return 0444;
break;
case aquaero:
+ case octo:
case quadro:
case highflow:
/* Special case to support flow sensors */
@@ -857,9 +862,16 @@ static umode_t aqc_is_visible(const void *data, enum hwmon_sensor_types type, u3
}
break;
case hwmon_fan_pulses:
- /* Special case for Quadro flow sensor */
- if (priv->kind == quadro && channel == priv->num_fans)
- return 0644;
+ /* Special case for Quadro/Octo flow sensor */
+ if (channel == priv->num_fans) {
+ switch (priv->kind) {
+ case quadro:
+ case octo:
+ return 0644;
+ default:
+ break;
+ }
+ }
break;
case hwmon_fan_min:
case hwmon_fan_max:
@@ -1289,7 +1301,8 @@ static const struct hwmon_channel_info * const aqc_info[] = {
HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_PULSES,
HWMON_F_INPUT | HWMON_F_LABEL,
HWMON_F_INPUT | HWMON_F_LABEL,
- HWMON_F_INPUT | HWMON_F_LABEL),
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_PULSES),
HWMON_CHANNEL_INFO(power,
HWMON_P_INPUT | HWMON_P_LABEL,
HWMON_P_INPUT | HWMON_P_LABEL,
@@ -1658,16 +1671,20 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
priv->temp_sensor_start_offset = OCTO_SENSOR_START;
priv->num_virtual_temp_sensors = OCTO_NUM_VIRTUAL_SENSORS;
priv->virtual_temp_sensor_start_offset = OCTO_VIRTUAL_SENSORS_START;
+ priv->num_flow_sensors = OCTO_NUM_FLOW_SENSORS;
+ priv->flow_sensors_start_offset = OCTO_FLOW_SENSOR_OFFSET;
+
priv->temp_ctrl_offset = OCTO_TEMP_CTRL_OFFSET;
priv->buffer_size = OCTO_CTRL_REPORT_SIZE;
priv->ctrl_report_delay = CTRL_REPORT_DELAY;
+ priv->flow_pulses_ctrl_offset = OCTO_FLOW_PULSES_CTRL_OFFSET;
priv->power_cycle_count_offset = OCTO_POWER_CYCLES;
priv->temp_label = label_temp_sensors;
priv->virtual_temp_label = label_virtual_temp_sensors;
- priv->speed_label = label_fan_speed;
+ priv->speed_label = label_octo_speeds;
priv->power_label = label_fan_power;
priv->voltage_label = label_fan_voltage;
priv->current_label = label_fan_current;
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index 974521e9b6b4..14e7737866c2 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -213,7 +213,7 @@ static struct asb100_data *asb100_update_device(struct device *dev);
static void asb100_init_client(struct i2c_client *client);
static const struct i2c_device_id asb100_id[] = {
- { "asb100", 0 },
+ { "asb100" },
{ }
};
MODULE_DEVICE_TABLE(i2c, asb100_id);
diff --git a/drivers/hwmon/aspeed-g6-pwm-tach.c b/drivers/hwmon/aspeed-g6-pwm-tach.c
index 597b3b019d49..08a2ded95e45 100644
--- a/drivers/hwmon/aspeed-g6-pwm-tach.c
+++ b/drivers/hwmon/aspeed-g6-pwm-tach.c
@@ -136,7 +136,6 @@ struct aspeed_pwm_tach_data {
struct clk *clk;
struct reset_control *reset;
unsigned long clk_rate;
- struct pwm_chip chip;
bool tach_present[TACH_ASPEED_NR_TACHS];
u32 tach_divisor;
};
@@ -144,7 +143,7 @@ struct aspeed_pwm_tach_data {
static inline struct aspeed_pwm_tach_data *
aspeed_pwm_chip_to_data(struct pwm_chip *chip)
{
- return container_of(chip, struct aspeed_pwm_tach_data, chip);
+ return pwmchip_get_drvdata(chip);
}
static int aspeed_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -195,7 +194,7 @@ static int aspeed_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
expect_period = div64_u64(ULLONG_MAX, (u64)priv->clk_rate);
expect_period = min(expect_period, state->period);
- dev_dbg(chip->dev, "expect period: %lldns, duty_cycle: %lldns",
+ dev_dbg(pwmchip_parent(chip), "expect period: %lldns, duty_cycle: %lldns",
expect_period, state->duty_cycle);
/*
* Pick the smallest value for div_h so that div_l can be the biggest
@@ -218,12 +217,12 @@ static int aspeed_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (div_l > 255)
div_l = 255;
- dev_dbg(chip->dev, "clk source: %ld div_h %lld, div_l : %lld\n",
+ dev_dbg(pwmchip_parent(chip), "clk source: %ld div_h %lld, div_l : %lld\n",
priv->clk_rate, div_h, div_l);
/* duty_pt = duty_cycle * (PERIOD + 1) / period */
duty_pt = div64_u64(state->duty_cycle * priv->clk_rate,
(u64)NSEC_PER_SEC * (div_l + 1) << div_h);
- dev_dbg(chip->dev, "duty_cycle = %lld, duty_pt = %d\n",
+ dev_dbg(pwmchip_parent(chip), "duty_cycle = %lld, duty_pt = %d\n",
state->duty_cycle, duty_pt);
/*
@@ -459,6 +458,7 @@ static int aspeed_pwm_tach_probe(struct platform_device *pdev)
int ret;
struct device_node *child;
struct aspeed_pwm_tach_data *priv;
+ struct pwm_chip *chip;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -487,11 +487,14 @@ static int aspeed_pwm_tach_probe(struct platform_device *pdev)
if (ret)
return ret;
- priv->chip.dev = dev;
- priv->chip.ops = &aspeed_pwm_ops;
- priv->chip.npwm = PWM_ASPEED_NR_PWMS;
+ chip = devm_pwmchip_alloc(dev, PWM_ASPEED_NR_PWMS, 0);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
- ret = devm_pwmchip_add(dev, &priv->chip);
+ pwmchip_set_drvdata(chip, priv);
+ chip->ops = &aspeed_pwm_ops;
+
+ ret = devm_pwmchip_add(dev, chip);
if (ret)
return dev_err_probe(dev, ret, "Failed to add PWM chip\n");
@@ -516,13 +519,11 @@ static int aspeed_pwm_tach_probe(struct platform_device *pdev)
return 0;
}
-static int aspeed_pwm_tach_remove(struct platform_device *pdev)
+static void aspeed_pwm_tach_remove(struct platform_device *pdev)
{
struct aspeed_pwm_tach_data *priv = platform_get_drvdata(pdev);
reset_control_assert(priv->reset);
-
- return 0;
}
static const struct of_device_id aspeed_pwm_tach_match[] = {
@@ -535,7 +536,7 @@ MODULE_DEVICE_TABLE(of, aspeed_pwm_tach_match);
static struct platform_driver aspeed_pwm_tach_driver = {
.probe = aspeed_pwm_tach_probe,
- .remove = aspeed_pwm_tach_remove,
+ .remove_new = aspeed_pwm_tach_remove,
.driver = {
.name = "aspeed-g6-pwm-tach",
.of_match_table = aspeed_pwm_tach_match,
diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c
index d1de020abec6..1c7e9a98b757 100644
--- a/drivers/hwmon/atxp1.c
+++ b/drivers/hwmon/atxp1.c
@@ -278,7 +278,7 @@ static int atxp1_probe(struct i2c_client *client)
};
static const struct i2c_device_id atxp1_id[] = {
- { "atxp1", 0 },
+ { "atxp1" },
{ }
};
MODULE_DEVICE_TABLE(i2c, atxp1_id);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 616bd1a5b864..1b9203b20d70 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -411,7 +411,7 @@ static ssize_t show_temp(struct device *dev,
* Return it instead of reporting an error which doesn't
* really help at all.
*/
- tdata->temp = tjmax - ((eax >> 16) & 0x7f) * 1000;
+ tdata->temp = tjmax - ((eax >> 16) & 0xff) * 1000;
tdata->last_updated = jiffies;
}
diff --git a/drivers/hwmon/corsair-cpro.c b/drivers/hwmon/corsair-cpro.c
index a284a02839fb..3e63666a61bd 100644
--- a/drivers/hwmon/corsair-cpro.c
+++ b/drivers/hwmon/corsair-cpro.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/types.h>
#define USB_VENDOR_ID_CORSAIR 0x1b1c
@@ -77,8 +78,11 @@
struct ccp_device {
struct hid_device *hdev;
struct device *hwmon_dev;
+ /* For reinitializing the completion below */
+ spinlock_t wait_input_report_lock;
struct completion wait_input_report;
struct mutex mutex; /* whenever buffer is used, lock before send_usb_cmd */
+ u8 *cmd_buffer;
u8 *buffer;
int target[6];
DECLARE_BITMAP(temp_cnct, NUM_TEMP_SENSORS);
@@ -111,15 +115,23 @@ static int send_usb_cmd(struct ccp_device *ccp, u8 command, u8 byte1, u8 byte2,
unsigned long t;
int ret;
- memset(ccp->buffer, 0x00, OUT_BUFFER_SIZE);
- ccp->buffer[0] = command;
- ccp->buffer[1] = byte1;
- ccp->buffer[2] = byte2;
- ccp->buffer[3] = byte3;
-
+ memset(ccp->cmd_buffer, 0x00, OUT_BUFFER_SIZE);
+ ccp->cmd_buffer[0] = command;
+ ccp->cmd_buffer[1] = byte1;
+ ccp->cmd_buffer[2] = byte2;
+ ccp->cmd_buffer[3] = byte3;
+
+ /*
+ * Disable raw event parsing for a moment to safely reinitialize the
+ * completion. Reinit is done because hidraw could have triggered
+ * the raw event parsing and marked the ccp->wait_input_report
+ * completion as done.
+ */
+ spin_lock_bh(&ccp->wait_input_report_lock);
reinit_completion(&ccp->wait_input_report);
+ spin_unlock_bh(&ccp->wait_input_report_lock);
- ret = hid_hw_output_report(ccp->hdev, ccp->buffer, OUT_BUFFER_SIZE);
+ ret = hid_hw_output_report(ccp->hdev, ccp->cmd_buffer, OUT_BUFFER_SIZE);
if (ret < 0)
return ret;
@@ -135,11 +147,12 @@ static int ccp_raw_event(struct hid_device *hdev, struct hid_report *report, u8
struct ccp_device *ccp = hid_get_drvdata(hdev);
/* only copy buffer when requested */
- if (completion_done(&ccp->wait_input_report))
- return 0;
-
- memcpy(ccp->buffer, data, min(IN_BUFFER_SIZE, size));
- complete(&ccp->wait_input_report);
+ spin_lock(&ccp->wait_input_report_lock);
+ if (!completion_done(&ccp->wait_input_report)) {
+ memcpy(ccp->buffer, data, min(IN_BUFFER_SIZE, size));
+ complete_all(&ccp->wait_input_report);
+ }
+ spin_unlock(&ccp->wait_input_report_lock);
return 0;
}
@@ -492,7 +505,11 @@ static int ccp_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (!ccp)
return -ENOMEM;
- ccp->buffer = devm_kmalloc(&hdev->dev, OUT_BUFFER_SIZE, GFP_KERNEL);
+ ccp->cmd_buffer = devm_kmalloc(&hdev->dev, OUT_BUFFER_SIZE, GFP_KERNEL);
+ if (!ccp->cmd_buffer)
+ return -ENOMEM;
+
+ ccp->buffer = devm_kmalloc(&hdev->dev, IN_BUFFER_SIZE, GFP_KERNEL);
if (!ccp->buffer)
return -ENOMEM;
@@ -510,7 +527,9 @@ static int ccp_probe(struct hid_device *hdev, const struct hid_device_id *id)
ccp->hdev = hdev;
hid_set_drvdata(hdev, ccp);
+
mutex_init(&ccp->mutex);
+ spin_lock_init(&ccp->wait_input_report_lock);
init_completion(&ccp->wait_input_report);
hid_device_io_start(hdev);
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
index 2bd7ae8100d7..7fb0c57dfef5 100644
--- a/drivers/hwmon/da9052-hwmon.c
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -26,7 +26,6 @@ struct da9052_hwmon {
struct mutex hwmon_lock;
bool tsi_as_adc;
int tsiref_mv;
- struct regulator *tsiref;
struct completion tsidone;
};
@@ -397,7 +396,7 @@ static int da9052_hwmon_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct da9052_hwmon *hwmon;
struct device *hwmon_dev;
- int err;
+ int err, tsiref_uv;
hwmon = devm_kzalloc(dev, sizeof(struct da9052_hwmon), GFP_KERNEL);
if (!hwmon)
@@ -414,32 +413,20 @@ static int da9052_hwmon_probe(struct platform_device *pdev)
device_property_read_bool(pdev->dev.parent, "dlg,tsi-as-adc");
if (hwmon->tsi_as_adc) {
- hwmon->tsiref = devm_regulator_get(pdev->dev.parent, "tsiref");
- if (IS_ERR(hwmon->tsiref)) {
- err = PTR_ERR(hwmon->tsiref);
- dev_err(&pdev->dev, "failed to get tsiref: %d", err);
- return err;
- }
-
- err = regulator_enable(hwmon->tsiref);
- if (err)
- return err;
-
- hwmon->tsiref_mv = regulator_get_voltage(hwmon->tsiref);
- if (hwmon->tsiref_mv < 0) {
- err = hwmon->tsiref_mv;
- goto exit_regulator;
- }
+ tsiref_uv = devm_regulator_get_enable_read_voltage(dev->parent,
+ "tsiref");
+ if (tsiref_uv < 0)
+ return dev_err_probe(dev, tsiref_uv,
+ "failed to get tsiref voltage\n");
/* convert from microvolt (DT) to millivolt (hwmon) */
- hwmon->tsiref_mv /= 1000;
+ hwmon->tsiref_mv = tsiref_uv / 1000;
/* TSIREF limits from datasheet */
if (hwmon->tsiref_mv < 1800 || hwmon->tsiref_mv > 2600) {
dev_err(hwmon->da9052->dev, "invalid TSIREF voltage: %d",
hwmon->tsiref_mv);
- err = -ENXIO;
- goto exit_regulator;
+ return -ENXIO;
}
/* disable touchscreen features */
@@ -456,7 +443,7 @@ static int da9052_hwmon_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev, "Failed to register TSIRDY IRQ: %d",
err);
- goto exit_regulator;
+ return err;
}
}
@@ -472,9 +459,6 @@ static int da9052_hwmon_probe(struct platform_device *pdev)
exit_irq:
if (hwmon->tsi_as_adc)
da9052_free_irq(hwmon->da9052, DA9052_IRQ_TSIREADY, hwmon);
-exit_regulator:
- if (hwmon->tsiref)
- regulator_disable(hwmon->tsiref);
return err;
}
@@ -483,10 +467,8 @@ static void da9052_hwmon_remove(struct platform_device *pdev)
{
struct da9052_hwmon *hwmon = platform_get_drvdata(pdev);
- if (hwmon->tsi_as_adc) {
+ if (hwmon->tsi_as_adc)
da9052_free_irq(hwmon->da9052, DA9052_IRQ_TSIREADY, hwmon);
- regulator_disable(hwmon->tsiref);
- }
}
static struct platform_driver da9052_hwmon_driver = {
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index efcf78673e74..48a81c64f00d 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -1215,6 +1215,13 @@ static const struct dmi_system_id i8k_dmi_table[] __initconst = {
},
},
{
+ .ident = "Dell G5 5505",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G5 5505"),
+ },
+ },
+ {
.ident = "Dell Inspiron",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer"),
@@ -1507,6 +1514,14 @@ static const struct dmi_system_id i8k_whitelist_fan_control[] __initconst = {
.driver_data = (void *)&i8k_fan_control_data[I8K_FAN_34A3_35A3],
},
{
+ .ident = "Dell Precision 7540",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Precision 7540"),
+ },
+ .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_34A3_35A3],
+ },
+ {
.ident = "Dell XPS 13 7390",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
index 4fc4df012fac..ce397042d90b 100644
--- a/drivers/hwmon/ds620.c
+++ b/drivers/hwmon/ds620.c
@@ -233,7 +233,7 @@ static int ds620_probe(struct i2c_client *client)
}
static const struct i2c_device_id ds620_id[] = {
- {"ds620", 0},
+ {"ds620"},
{}
};
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index d370efd6f986..eca33220d34a 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -19,302 +19,56 @@
#include <linux/sysfs.h>
#include <linux/mutex.h>
#include <linux/regmap.h>
+#include <linux/util_macros.h>
#define THERMAL_PID_REG 0xfd
#define THERMAL_SMSC_ID_REG 0xfe
#define THERMAL_REVISION_REG 0xff
-enum emc1403_chip { emc1402, emc1403, emc1404 };
+enum emc1403_chip { emc1402, emc1403, emc1404, emc1428 };
struct thermal_data {
+ enum emc1403_chip chip;
struct regmap *regmap;
struct mutex mutex;
- const struct attribute_group *groups[4];
};
-static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t power_state_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
struct thermal_data *data = dev_get_drvdata(dev);
unsigned int val;
int retval;
- retval = regmap_read(data->regmap, sda->index, &val);
+ retval = regmap_read(data->regmap, 0x03, &val);
if (retval < 0)
return retval;
- return sprintf(buf, "%d000\n", val);
+ return sprintf(buf, "%d\n", !!(val & BIT(6)));
}
-static ssize_t bit_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t power_state_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr);
- struct thermal_data *data = dev_get_drvdata(dev);
- unsigned int val;
- int retval;
-
- retval = regmap_read(data->regmap, sda->nr, &val);
- if (retval < 0)
- return retval;
- return sprintf(buf, "%d\n", !!(val & sda->index));
-}
-
-static ssize_t temp_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
struct thermal_data *data = dev_get_drvdata(dev);
unsigned long val;
int retval;
if (kstrtoul(buf, 10, &val))
return -EINVAL;
- retval = regmap_write(data->regmap, sda->index,
- DIV_ROUND_CLOSEST(val, 1000));
- if (retval < 0)
- return retval;
- return count;
-}
-static ssize_t bit_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr);
- struct thermal_data *data = dev_get_drvdata(dev);
- unsigned long val;
- int retval;
-
- if (kstrtoul(buf, 10, &val))
- return -EINVAL;
-
- retval = regmap_update_bits(data->regmap, sda->nr, sda->index,
- val ? sda->index : 0);
+ retval = regmap_update_bits(data->regmap, 0x03, BIT(6),
+ val ? BIT(6) : 0);
if (retval < 0)
return retval;
return count;
}
-static ssize_t show_hyst_common(struct device *dev,
- struct device_attribute *attr, char *buf,
- bool is_min)
-{
- struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
- struct thermal_data *data = dev_get_drvdata(dev);
- struct regmap *regmap = data->regmap;
- unsigned int limit;
- unsigned int hyst;
- int retval;
-
- retval = regmap_read(regmap, sda->index, &limit);
- if (retval < 0)
- return retval;
-
- retval = regmap_read(regmap, 0x21, &hyst);
- if (retval < 0)
- return retval;
-
- return sprintf(buf, "%d000\n", is_min ? limit + hyst : limit - hyst);
-}
-
-static ssize_t hyst_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- return show_hyst_common(dev, attr, buf, false);
-}
-
-static ssize_t min_hyst_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return show_hyst_common(dev, attr, buf, true);
-}
-
-static ssize_t hyst_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
- struct thermal_data *data = dev_get_drvdata(dev);
- struct regmap *regmap = data->regmap;
- unsigned int limit;
- int retval;
- int hyst;
- unsigned long val;
-
- if (kstrtoul(buf, 10, &val))
- return -EINVAL;
-
- mutex_lock(&data->mutex);
- retval = regmap_read(regmap, sda->index, &limit);
- if (retval < 0)
- goto fail;
-
- hyst = limit * 1000 - val;
- hyst = clamp_val(DIV_ROUND_CLOSEST(hyst, 1000), 0, 255);
- retval = regmap_write(regmap, 0x21, hyst);
- if (retval == 0)
- retval = count;
-fail:
- mutex_unlock(&data->mutex);
- return retval;
-}
-
-/*
- * Sensors. We pass the actual i2c register to the methods.
- */
-
-static SENSOR_DEVICE_ATTR_RW(temp1_min, temp, 0x06);
-static SENSOR_DEVICE_ATTR_RW(temp1_max, temp, 0x05);
-static SENSOR_DEVICE_ATTR_RW(temp1_crit, temp, 0x20);
-static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0x00);
-static SENSOR_DEVICE_ATTR_2_RO(temp1_min_alarm, bit, 0x36, 0x01);
-static SENSOR_DEVICE_ATTR_2_RO(temp1_max_alarm, bit, 0x35, 0x01);
-static SENSOR_DEVICE_ATTR_2_RO(temp1_crit_alarm, bit, 0x37, 0x01);
-static SENSOR_DEVICE_ATTR_RO(temp1_min_hyst, min_hyst, 0x06);
-static SENSOR_DEVICE_ATTR_RO(temp1_max_hyst, hyst, 0x05);
-static SENSOR_DEVICE_ATTR_RW(temp1_crit_hyst, hyst, 0x20);
-
-static SENSOR_DEVICE_ATTR_RW(temp2_min, temp, 0x08);
-static SENSOR_DEVICE_ATTR_RW(temp2_max, temp, 0x07);
-static SENSOR_DEVICE_ATTR_RW(temp2_crit, temp, 0x19);
-static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 0x01);
-static SENSOR_DEVICE_ATTR_2_RO(temp2_fault, bit, 0x1b, 0x02);
-static SENSOR_DEVICE_ATTR_2_RO(temp2_min_alarm, bit, 0x36, 0x02);
-static SENSOR_DEVICE_ATTR_2_RO(temp2_max_alarm, bit, 0x35, 0x02);
-static SENSOR_DEVICE_ATTR_2_RO(temp2_crit_alarm, bit, 0x37, 0x02);
-static SENSOR_DEVICE_ATTR_RO(temp2_min_hyst, min_hyst, 0x08);
-static SENSOR_DEVICE_ATTR_RO(temp2_max_hyst, hyst, 0x07);
-static SENSOR_DEVICE_ATTR_RO(temp2_crit_hyst, hyst, 0x19);
-
-static SENSOR_DEVICE_ATTR_RW(temp3_min, temp, 0x16);
-static SENSOR_DEVICE_ATTR_RW(temp3_max, temp, 0x15);
-static SENSOR_DEVICE_ATTR_RW(temp3_crit, temp, 0x1A);
-static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 0x23);
-static SENSOR_DEVICE_ATTR_2_RO(temp3_fault, bit, 0x1b, 0x04);
-static SENSOR_DEVICE_ATTR_2_RO(temp3_min_alarm, bit, 0x36, 0x04);
-static SENSOR_DEVICE_ATTR_2_RO(temp3_max_alarm, bit, 0x35, 0x04);
-static SENSOR_DEVICE_ATTR_2_RO(temp3_crit_alarm, bit, 0x37, 0x04);
-static SENSOR_DEVICE_ATTR_RO(temp3_min_hyst, min_hyst, 0x16);
-static SENSOR_DEVICE_ATTR_RO(temp3_max_hyst, hyst, 0x15);
-static SENSOR_DEVICE_ATTR_RO(temp3_crit_hyst, hyst, 0x1A);
-
-static SENSOR_DEVICE_ATTR_RW(temp4_min, temp, 0x2D);
-static SENSOR_DEVICE_ATTR_RW(temp4_max, temp, 0x2C);
-static SENSOR_DEVICE_ATTR_RW(temp4_crit, temp, 0x30);
-static SENSOR_DEVICE_ATTR_RO(temp4_input, temp, 0x2A);
-static SENSOR_DEVICE_ATTR_2_RO(temp4_fault, bit, 0x1b, 0x08);
-static SENSOR_DEVICE_ATTR_2_RO(temp4_min_alarm, bit, 0x36, 0x08);
-static SENSOR_DEVICE_ATTR_2_RO(temp4_max_alarm, bit, 0x35, 0x08);
-static SENSOR_DEVICE_ATTR_2_RO(temp4_crit_alarm, bit, 0x37, 0x08);
-static SENSOR_DEVICE_ATTR_RO(temp4_min_hyst, min_hyst, 0x2D);
-static SENSOR_DEVICE_ATTR_RO(temp4_max_hyst, hyst, 0x2C);
-static SENSOR_DEVICE_ATTR_RO(temp4_crit_hyst, hyst, 0x30);
-
-static SENSOR_DEVICE_ATTR_2_RW(power_state, bit, 0x03, 0x40);
-
-static struct attribute *emc1402_attrs[] = {
- &sensor_dev_attr_temp1_min.dev_attr.attr,
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp1_crit.dev_attr.attr,
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_min_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
-
- &sensor_dev_attr_temp2_min.dev_attr.attr,
- &sensor_dev_attr_temp2_max.dev_attr.attr,
- &sensor_dev_attr_temp2_crit.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp2_min_hyst.dev_attr.attr,
- &sensor_dev_attr_temp2_max_hyst.dev_attr.attr,
- &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
-
- &sensor_dev_attr_power_state.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group emc1402_group = {
- .attrs = emc1402_attrs,
-};
+static DEVICE_ATTR_RW(power_state);
static struct attribute *emc1403_attrs[] = {
- &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
-
- &sensor_dev_attr_temp2_fault.dev_attr.attr,
- &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
-
- &sensor_dev_attr_temp3_min.dev_attr.attr,
- &sensor_dev_attr_temp3_max.dev_attr.attr,
- &sensor_dev_attr_temp3_crit.dev_attr.attr,
- &sensor_dev_attr_temp3_input.dev_attr.attr,
- &sensor_dev_attr_temp3_fault.dev_attr.attr,
- &sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp3_min_hyst.dev_attr.attr,
- &sensor_dev_attr_temp3_max_hyst.dev_attr.attr,
- &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group emc1403_group = {
- .attrs = emc1403_attrs,
-};
-
-static struct attribute *emc1404_attrs[] = {
- &sensor_dev_attr_temp4_min.dev_attr.attr,
- &sensor_dev_attr_temp4_max.dev_attr.attr,
- &sensor_dev_attr_temp4_crit.dev_attr.attr,
- &sensor_dev_attr_temp4_input.dev_attr.attr,
- &sensor_dev_attr_temp4_fault.dev_attr.attr,
- &sensor_dev_attr_temp4_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp4_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp4_min_hyst.dev_attr.attr,
- &sensor_dev_attr_temp4_max_hyst.dev_attr.attr,
- &sensor_dev_attr_temp4_crit_hyst.dev_attr.attr,
+ &dev_attr_power_state.attr,
NULL
};
-
-static const struct attribute_group emc1404_group = {
- .attrs = emc1404_attrs,
-};
-
-/*
- * EMC14x2 uses a different register and different bits to report alarm and
- * fault status. For simplicity, provide a separate attribute group for this
- * chip series.
- * Since we can not re-use the same attribute names, create a separate attribute
- * array.
- */
-static struct sensor_device_attribute_2 emc1402_alarms[] = {
- SENSOR_ATTR_2_RO(temp1_min_alarm, bit, 0x02, 0x20),
- SENSOR_ATTR_2_RO(temp1_max_alarm, bit, 0x02, 0x40),
- SENSOR_ATTR_2_RO(temp1_crit_alarm, bit, 0x02, 0x01),
-
- SENSOR_ATTR_2_RO(temp2_fault, bit, 0x02, 0x04),
- SENSOR_ATTR_2_RO(temp2_min_alarm, bit, 0x02, 0x08),
- SENSOR_ATTR_2_RO(temp2_max_alarm, bit, 0x02, 0x10),
- SENSOR_ATTR_2_RO(temp2_crit_alarm, bit, 0x02, 0x02),
-};
-
-static struct attribute *emc1402_alarm_attrs[] = {
- &emc1402_alarms[0].dev_attr.attr,
- &emc1402_alarms[1].dev_attr.attr,
- &emc1402_alarms[2].dev_attr.attr,
- &emc1402_alarms[3].dev_attr.attr,
- &emc1402_alarms[4].dev_attr.attr,
- &emc1402_alarms[5].dev_attr.attr,
- &emc1402_alarms[6].dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group emc1402_alarm_group = {
- .attrs = emc1402_alarm_attrs,
-};
+ATTRIBUTE_GROUPS(emc1403);
static int emc1403_detect(struct i2c_client *client,
struct i2c_board_info *info)
@@ -346,6 +100,12 @@ static int emc1403_detect(struct i2c_client *client,
case 0x27:
strscpy(info->type, "emc1424", I2C_NAME_SIZE);
break;
+ case 0x29:
+ strscpy(info->type, "emc1428", I2C_NAME_SIZE);
+ break;
+ case 0x59:
+ strscpy(info->type, "emc1438", I2C_NAME_SIZE);
+ break;
case 0x60:
strscpy(info->type, "emc1442", I2C_NAME_SIZE);
break;
@@ -376,6 +136,14 @@ static bool emc1403_regmap_is_volatile(struct device *dev, unsigned int reg)
case 0x35: /* high limit status */
case 0x36: /* low limit status */
case 0x37: /* therm limit status */
+ case 0x41: /* external diode 4 high byte */
+ case 0x42: /* external diode 4 low byte */
+ case 0x43: /* external diode 5 high byte */
+ case 0x44: /* external diode 5 low byte */
+ case 0x45: /* external diode 6 high byte */
+ case 0x46: /* external diode 6 low byte */
+ case 0x47: /* external diode 7 high byte */
+ case 0x48: /* external diode 7 low byte */
return true;
default:
return false;
@@ -389,51 +157,508 @@ static const struct regmap_config emc1403_regmap_config = {
.volatile_reg = emc1403_regmap_is_volatile,
};
-static const struct i2c_device_id emc1403_idtable[];
+enum emc1403_reg_map {temp_min, temp_max, temp_crit, temp_input};
-static int emc1403_probe(struct i2c_client *client)
+static u8 ema1403_temp_map[] = {
+ [hwmon_temp_min] = temp_min,
+ [hwmon_temp_max] = temp_max,
+ [hwmon_temp_crit] = temp_crit,
+ [hwmon_temp_input] = temp_input,
+};
+
+static u8 emc1403_temp_regs[][4] = {
+ [0] = {
+ [temp_min] = 0x06,
+ [temp_max] = 0x05,
+ [temp_crit] = 0x20,
+ [temp_input] = 0x00,
+ },
+ [1] = {
+ [temp_min] = 0x08,
+ [temp_max] = 0x07,
+ [temp_crit] = 0x19,
+ [temp_input] = 0x01,
+ },
+ [2] = {
+ [temp_min] = 0x16,
+ [temp_max] = 0x15,
+ [temp_crit] = 0x1a,
+ [temp_input] = 0x23,
+ },
+ [3] = {
+ [temp_min] = 0x2d,
+ [temp_max] = 0x2c,
+ [temp_crit] = 0x30,
+ [temp_input] = 0x2a,
+ },
+ [4] = {
+ [temp_min] = 0x51,
+ [temp_max] = 0x50,
+ [temp_crit] = 0x64,
+ [temp_input] = 0x41,
+ },
+ [5] = {
+ [temp_min] = 0x55,
+ [temp_max] = 0x54,
+ [temp_crit] = 0x65,
+ [temp_input] = 0x43
+ },
+ [6] = {
+ [temp_min] = 0x59,
+ [temp_max] = 0x58,
+ [temp_crit] = 0x66,
+ [temp_input] = 0x45,
+ },
+ [7] = {
+ [temp_min] = 0x5d,
+ [temp_max] = 0x5c,
+ [temp_crit] = 0x67,
+ [temp_input] = 0x47,
+ },
+};
+
+static s8 emc1403_temp_regs_low[][4] = {
+ [0] = {
+ [temp_min] = -1,
+ [temp_max] = -1,
+ [temp_crit] = -1,
+ [temp_input] = 0x29,
+ },
+ [1] = {
+ [temp_min] = 0x14,
+ [temp_max] = 0x13,
+ [temp_crit] = -1,
+ [temp_input] = 0x10,
+ },
+ [2] = {
+ [temp_min] = 0x18,
+ [temp_max] = 0x17,
+ [temp_crit] = -1,
+ [temp_input] = 0x24,
+ },
+ [3] = {
+ [temp_min] = 0x2f,
+ [temp_max] = 0x2e,
+ [temp_crit] = -1,
+ [temp_input] = 0x2b,
+ },
+ [4] = {
+ [temp_min] = 0x53,
+ [temp_max] = 0x52,
+ [temp_crit] = -1,
+ [temp_input] = 0x42,
+ },
+ [5] = {
+ [temp_min] = 0x57,
+ [temp_max] = 0x56,
+ [temp_crit] = -1,
+ [temp_input] = 0x44,
+ },
+ [6] = {
+ [temp_min] = 0x5b,
+ [temp_max] = 0x5a,
+ [temp_crit] = -1,
+ [temp_input] = 0x46,
+ },
+ [7] = {
+ [temp_min] = 0x5f,
+ [temp_max] = 0x5e,
+ [temp_crit] = -1,
+ [temp_input] = 0x48,
+ },
+};
+
+static int __emc1403_get_temp(struct thermal_data *data, int channel,
+ enum emc1403_reg_map map, long *val)
{
- struct thermal_data *data;
- struct device *hwmon_dev;
- const struct i2c_device_id *id = i2c_match_id(emc1403_idtable, client);
+ unsigned int regvalh;
+ unsigned int regvall = 0;
+ int ret;
+ s8 reg;
+
+ ret = regmap_read(data->regmap, emc1403_temp_regs[channel][map], &regvalh);
+ if (ret < 0)
+ return ret;
+
+ reg = emc1403_temp_regs_low[channel][map];
+ if (reg >= 0) {
+ ret = regmap_read(data->regmap, reg, &regvall);
+ if (ret < 0)
+ return ret;
+ }
- data = devm_kzalloc(&client->dev, sizeof(struct thermal_data),
- GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
+ if (data->chip == emc1428)
+ *val = sign_extend32((regvalh << 3) | (regvall >> 5), 10) * 125;
+ else
+ *val = ((regvalh << 3) | (regvall >> 5)) * 125;
- data->regmap = devm_regmap_init_i2c(client, &emc1403_regmap_config);
- if (IS_ERR(data->regmap))
- return PTR_ERR(data->regmap);
+ return 0;
+}
- mutex_init(&data->mutex);
+static int emc1403_get_temp(struct thermal_data *data, int channel,
+ enum emc1403_reg_map map, long *val)
+{
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = __emc1403_get_temp(data, channel, map, val);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int emc1403_get_hyst(struct thermal_data *data, int channel,
+ enum emc1403_reg_map map, long *val)
+{
+ int hyst, ret;
+ long limit;
- switch (id->driver_data) {
- case emc1404:
- data->groups[2] = &emc1404_group;
- fallthrough;
- case emc1403:
- data->groups[1] = &emc1403_group;
- fallthrough;
- case emc1402:
- data->groups[0] = &emc1402_group;
+ mutex_lock(&data->mutex);
+ ret = __emc1403_get_temp(data, channel, map, &limit);
+ if (ret < 0)
+ goto unlock;
+ ret = regmap_read(data->regmap, 0x21, &hyst);
+ if (ret < 0)
+ goto unlock;
+ if (map == temp_min)
+ *val = limit + hyst * 1000;
+ else
+ *val = limit - hyst * 1000;
+unlock:
+ mutex_unlock(&data->mutex);
+ return ret;
+}
+
+static int emc1403_temp_read(struct thermal_data *data, u32 attr, int channel, long *val)
+{
+ unsigned int regval;
+ int ret;
+
+ switch (attr) {
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ case hwmon_temp_crit:
+ case hwmon_temp_input:
+ ret = emc1403_get_temp(data, channel, ema1403_temp_map[attr], val);
+ break;
+ case hwmon_temp_min_hyst:
+ ret = emc1403_get_hyst(data, channel, temp_min, val);
+ break;
+ case hwmon_temp_max_hyst:
+ ret = emc1403_get_hyst(data, channel, temp_max, val);
+ break;
+ case hwmon_temp_crit_hyst:
+ ret = emc1403_get_hyst(data, channel, temp_crit, val);
+ break;
+ case hwmon_temp_min_alarm:
+ if (data->chip == emc1402) {
+ ret = regmap_read(data->regmap, 0x02, &regval);
+ if (ret < 0)
+ break;
+ *val = !!(regval & BIT(5 - 2 * channel));
+ } else {
+ ret = regmap_read(data->regmap, 0x36, &regval);
+ if (ret < 0)
+ break;
+ *val = !!(regval & BIT(channel));
+ }
+ break;
+ case hwmon_temp_max_alarm:
+ if (data->chip == emc1402) {
+ ret = regmap_read(data->regmap, 0x02, &regval);
+ if (ret < 0)
+ break;
+ *val = !!(regval & BIT(6 - 2 * channel));
+ } else {
+ ret = regmap_read(data->regmap, 0x35, &regval);
+ if (ret < 0)
+ break;
+ *val = !!(regval & BIT(channel));
+ }
+ break;
+ case hwmon_temp_crit_alarm:
+ if (data->chip == emc1402) {
+ ret = regmap_read(data->regmap, 0x02, &regval);
+ if (ret < 0)
+ break;
+ *val = !!(regval & BIT(channel));
+ } else {
+ ret = regmap_read(data->regmap, 0x37, &regval);
+ if (ret < 0)
+ break;
+ *val = !!(regval & BIT(channel));
+ }
+ break;
+ case hwmon_temp_fault:
+ ret = regmap_read(data->regmap, 0x1b, &regval);
+ if (ret < 0)
+ break;
+ *val = !!(regval & BIT(channel));
+ break;
+ default:
+ return -EOPNOTSUPP;
}
+ return ret;
+}
- if (id->driver_data == emc1402)
- data->groups[1] = &emc1402_alarm_group;
+static int emc1403_get_convrate(struct thermal_data *data, long *val)
+{
+ unsigned int convrate;
+ int ret;
- hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
- client->name, data,
- data->groups);
- if (IS_ERR(hwmon_dev))
- return PTR_ERR(hwmon_dev);
+ ret = regmap_read(data->regmap, 0x04, &convrate);
+ if (ret < 0)
+ return ret;
+ if (convrate > 10)
+ convrate = 4;
- dev_info(&client->dev, "%s Thermal chip found\n", id->name);
+ *val = 16000 >> convrate;
return 0;
}
-static const unsigned short emc1403_address_list[] = {
- 0x18, 0x1c, 0x29, 0x3c, 0x4c, 0x4d, 0x5c, I2C_CLIENT_END
+static int emc1403_chip_read(struct thermal_data *data, u32 attr, long *val)
+{
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return emc1403_get_convrate(data, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int emc1403_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct thermal_data *data = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_temp:
+ return emc1403_temp_read(data, attr, channel, val);
+ case hwmon_chip:
+ return emc1403_chip_read(data, attr, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int emc1403_set_hyst(struct thermal_data *data, long val)
+{
+ int hyst, ret;
+ long limit;
+
+ if (data->chip == emc1428)
+ val = clamp_val(val, -128000, 127000);
+ else
+ val = clamp_val(val, 0, 255000);
+
+ mutex_lock(&data->mutex);
+ ret = __emc1403_get_temp(data, 0, temp_crit, &limit);
+ if (ret < 0)
+ goto unlock;
+
+ hyst = limit - val;
+ if (data->chip == emc1428)
+ hyst = clamp_val(DIV_ROUND_CLOSEST(hyst, 1000), 0, 127);
+ else
+ hyst = clamp_val(DIV_ROUND_CLOSEST(hyst, 1000), 0, 255);
+ ret = regmap_write(data->regmap, 0x21, hyst);
+unlock:
+ mutex_unlock(&data->mutex);
+ return ret;
+}
+
+static int emc1403_set_temp(struct thermal_data *data, int channel,
+ enum emc1403_reg_map map, long val)
+{
+ unsigned int regval;
+ int ret;
+ u8 regh;
+ s8 regl;
+
+ regh = emc1403_temp_regs[channel][map];
+ regl = emc1403_temp_regs_low[channel][map];
+
+ mutex_lock(&data->mutex);
+ if (regl >= 0) {
+ if (data->chip == emc1428)
+ val = clamp_val(val, -128000, 127875);
+ else
+ val = clamp_val(val, 0, 255875);
+ regval = DIV_ROUND_CLOSEST(val, 125);
+ ret = regmap_write(data->regmap, regh, (regval >> 3) & 0xff);
+ if (ret < 0)
+ goto unlock;
+ ret = regmap_write(data->regmap, regl, (regval & 0x07) << 5);
+ } else {
+ if (data->chip == emc1428)
+ val = clamp_val(val, -128000, 127000);
+ else
+ val = clamp_val(val, 0, 255000);
+ regval = DIV_ROUND_CLOSEST(val, 1000);
+ ret = regmap_write(data->regmap, regh, regval);
+ }
+unlock:
+ mutex_unlock(&data->mutex);
+ return ret;
+}
+
+static int emc1403_temp_write(struct thermal_data *data, u32 attr, int channel, long val)
+{
+ switch (attr) {
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ case hwmon_temp_crit:
+ return emc1403_set_temp(data, channel, ema1403_temp_map[attr], val);
+ case hwmon_temp_crit_hyst:
+ return emc1403_set_hyst(data, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* Lookup table for temperature conversion times in msec */
+static const u16 ina3221_conv_time[] = {
+ 16000, 8000, 4000, 2000, 1000, 500, 250, 125, 62, 31, 16
+};
+
+static int emc1403_set_convrate(struct thermal_data *data, unsigned int interval)
+{
+ int convrate;
+
+ convrate = find_closest_descending(interval, ina3221_conv_time,
+ ARRAY_SIZE(ina3221_conv_time));
+ return regmap_write(data->regmap, 0x04, convrate);
+}
+
+static int emc1403_chip_write(struct thermal_data *data, u32 attr, long val)
+{
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return emc1403_set_convrate(data, clamp_val(val, 0, 100000));
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int emc1403_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct thermal_data *data = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_temp:
+ return emc1403_temp_write(data, attr, channel, val);
+ case hwmon_chip:
+ return emc1403_chip_write(data, attr, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t emc1403_temp_is_visible(const void *_data, u32 attr, int channel)
+{
+ const struct thermal_data *data = _data;
+
+ if (data->chip == emc1402 && channel > 1)
+ return 0;
+ if (data->chip == emc1403 && channel > 2)
+ return 0;
+ if (data->chip != emc1428 && channel > 3)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_crit_alarm:
+ case hwmon_temp_fault:
+ case hwmon_temp_min_hyst:
+ case hwmon_temp_max_hyst:
+ return 0444;
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ case hwmon_temp_crit:
+ return 0644;
+ case hwmon_temp_crit_hyst:
+ if (channel == 0)
+ return 0644;
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static umode_t emc1403_chip_is_visible(const void *_data, u32 attr)
+{
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static umode_t emc1403_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ return emc1403_temp_is_visible(data, attr, channel);
+ case hwmon_chip:
+ return emc1403_chip_is_visible(data, attr);
+ default:
+ return 0;
+ }
+}
+
+static const struct hwmon_channel_info * const emc1403_info[] = {
+ HWMON_CHANNEL_INFO(chip, HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_MIN_HYST | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_MIN_HYST | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_MIN_HYST | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_MIN_HYST | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_MIN_HYST | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_MIN_HYST | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_MIN_HYST | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_MIN_HYST | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_FAULT
+ ),
+ NULL
+};
+
+static const struct hwmon_ops emc1403_hwmon_ops = {
+ .is_visible = emc1403_is_visible,
+ .read = emc1403_read,
+ .write = emc1403_write,
+};
+
+static const struct hwmon_chip_info emc1403_chip_info = {
+ .ops = &emc1403_hwmon_ops,
+ .info = emc1403_info,
};
/* Last digit of chip name indicates number of channels */
@@ -447,11 +672,42 @@ static const struct i2c_device_id emc1403_idtable[] = {
{ "emc1422", emc1402 },
{ "emc1423", emc1403 },
{ "emc1424", emc1404 },
+ { "emc1428", emc1428 },
+ { "emc1438", emc1428 },
{ "emc1442", emc1402 },
{ }
};
MODULE_DEVICE_TABLE(i2c, emc1403_idtable);
+static int emc1403_probe(struct i2c_client *client)
+{
+ struct thermal_data *data;
+ struct device *hwmon_dev;
+ const struct i2c_device_id *id = i2c_match_id(emc1403_idtable, client);
+
+ data = devm_kzalloc(&client->dev, sizeof(struct thermal_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->chip = id->driver_data;
+ data->regmap = devm_regmap_init_i2c(client, &emc1403_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
+
+ mutex_init(&data->mutex);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(&client->dev,
+ client->name, data,
+ &emc1403_chip_info,
+ emc1403_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const unsigned short emc1403_address_list[] = {
+ 0x18, 0x1c, 0x29, 0x3c, 0x4c, 0x4d, 0x5c, I2C_CLIENT_END
+};
+
static struct i2c_driver sensor_emc1403 = {
.class = I2C_CLASS_HWMON,
.driver = {
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index b59472bbe5bf..60eddc7b0270 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -620,7 +620,7 @@ emc2103_probe(struct i2c_client *client)
}
static const struct i2c_device_id emc2103_ids[] = {
- { "emc2103", 0, },
+ { "emc2103" },
{ /* LIST END */ }
};
MODULE_DEVICE_TABLE(i2c, emc2103_ids);
diff --git a/drivers/hwmon/emc2305.c b/drivers/hwmon/emc2305.c
index 6ef733c0be16..4d39fbd83769 100644
--- a/drivers/hwmon/emc2305.c
+++ b/drivers/hwmon/emc2305.c
@@ -47,10 +47,10 @@ enum emc230x_product_id {
};
static const struct i2c_device_id emc2305_ids[] = {
- { "emc2305", 0 },
- { "emc2303", 0 },
- { "emc2302", 0 },
- { "emc2301", 0 },
+ { "emc2305" },
+ { "emc2303" },
+ { "emc2302" },
+ { "emc2301" },
{ }
};
MODULE_DEVICE_TABLE(i2c, emc2305_ids);
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index 9a4f868bf1e6..1100c6e5daa7 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -464,7 +464,7 @@ static int emc6w201_probe(struct i2c_client *client)
}
static const struct i2c_device_id emc6w201_id[] = {
- { "emc6w201", 0 },
+ { "emc6w201" },
{ }
};
MODULE_DEVICE_TABLE(i2c, emc6w201_id);
diff --git a/drivers/hwmon/ftsteutates.c b/drivers/hwmon/ftsteutates.c
index b74a2665e733..a3a07662e491 100644
--- a/drivers/hwmon/ftsteutates.c
+++ b/drivers/hwmon/ftsteutates.c
@@ -50,7 +50,7 @@
static const unsigned short normal_i2c[] = { 0x73, I2C_CLIENT_END };
static const struct i2c_device_id fts_id[] = {
- { "ftsteutates", 0 },
+ { "ftsteutates" },
{ }
};
MODULE_DEVICE_TABLE(i2c, fts_id);
diff --git a/drivers/hwmon/g760a.c b/drivers/hwmon/g760a.c
index b5edee00267b..39ae8f826417 100644
--- a/drivers/hwmon/g760a.c
+++ b/drivers/hwmon/g760a.c
@@ -197,7 +197,7 @@ static int g760a_probe(struct i2c_client *client)
}
static const struct i2c_device_id g760a_id[] = {
- { "g760a", 0 },
+ { "g760a" },
{ }
};
MODULE_DEVICE_TABLE(i2c, g760a_id);
diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c
index fad69ef56c75..af1228708e25 100644
--- a/drivers/hwmon/g762.c
+++ b/drivers/hwmon/g762.c
@@ -44,8 +44,8 @@
#define DRVNAME "g762"
static const struct i2c_device_id g762_id[] = {
- { "g762", 0 },
- { "g763", 0 },
+ { "g762" },
+ { "g763" },
{ }
};
MODULE_DEVICE_TABLE(i2c, g762_id);
diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c
index 03db6158b13a..9c68bc013950 100644
--- a/drivers/hwmon/gl518sm.c
+++ b/drivers/hwmon/gl518sm.c
@@ -642,7 +642,7 @@ static int gl518_probe(struct i2c_client *client)
}
static const struct i2c_device_id gl518_id[] = {
- { "gl518sm", 0 },
+ { "gl518sm" },
{ }
};
MODULE_DEVICE_TABLE(i2c, gl518_id);
diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
index 8bbc6a4f2928..972f4f8caa2b 100644
--- a/drivers/hwmon/gl520sm.c
+++ b/drivers/hwmon/gl520sm.c
@@ -885,7 +885,7 @@ static int gl520_probe(struct i2c_client *client)
}
static const struct i2c_device_id gl520_id[] = {
- { "gl520sm", 0 },
+ { "gl520sm" },
{ }
};
MODULE_DEVICE_TABLE(i2c, gl520_id);
diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c
index a9726b5370fb..85af8299150a 100644
--- a/drivers/hwmon/hih6130.c
+++ b/drivers/hwmon/hih6130.c
@@ -233,7 +233,7 @@ static int hih6130_probe(struct i2c_client *client)
/* Device ID table */
static const struct i2c_device_id hih6130_id[] = {
- { "hih6130", 0 },
+ { "hih6130" },
{ }
};
MODULE_DEVICE_TABLE(i2c, hih6130_id);
diff --git a/drivers/hwmon/hs3001.c b/drivers/hwmon/hs3001.c
index 01ea9a3062bc..24ed3fb9a43a 100644
--- a/drivers/hwmon/hs3001.c
+++ b/drivers/hwmon/hs3001.c
@@ -175,7 +175,7 @@ static const struct hwmon_chip_info hs3001_chip_info = {
/* device ID table */
static const struct i2c_device_id hs3001_ids[] = {
- { "hs3001", 0 },
+ { "hs3001" },
{ },
};
diff --git a/drivers/hwmon/ina209.c b/drivers/hwmon/ina209.c
index d9b57a4b3e41..bd7b3380d847 100644
--- a/drivers/hwmon/ina209.c
+++ b/drivers/hwmon/ina209.c
@@ -576,7 +576,7 @@ static void ina209_remove(struct i2c_client *client)
}
static const struct i2c_device_id ina209_id[] = {
- { "ina209", 0 },
+ { "ina209" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ina209_id);
diff --git a/drivers/hwmon/ina238.c b/drivers/hwmon/ina238.c
index 69289293bc38..855626f1bc01 100644
--- a/drivers/hwmon/ina238.c
+++ b/drivers/hwmon/ina238.c
@@ -616,7 +616,7 @@ static int ina238_probe(struct i2c_client *client)
}
static const struct i2c_device_id ina238_id[] = {
- { "ina238", 0 },
+ { "ina238" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ina238_id);
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index 2c9530b6f192..f0053f87e3e6 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -1031,7 +1031,7 @@ static const struct of_device_id ina3221_of_match_table[] = {
MODULE_DEVICE_TABLE(of, ina3221_of_match_table);
static const struct i2c_device_id ina3221_ids[] = {
- { "ina3221", 0 },
+ { "ina3221" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, ina3221_ids);
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index fbe86cec6055..e233aafa8856 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -117,7 +117,7 @@ static inline void superio_select(int ioreg, int ldn)
outb(ldn, ioreg + 1);
}
-static inline int superio_enter(int ioreg)
+static inline int superio_enter(int ioreg, bool noentry)
{
/*
* Try to reserve ioreg and ioreg + 1 for exclusive access.
@@ -125,7 +125,8 @@ static inline int superio_enter(int ioreg)
if (!request_muxed_region(ioreg, 2, DRVNAME))
return -EBUSY;
- __superio_enter(ioreg);
+ if (!noentry)
+ __superio_enter(ioreg);
return 0;
}
@@ -320,7 +321,7 @@ struct it87_devices {
* second SIO address. Never exit configuration mode on these
* chips to avoid the problem.
*/
-#define FEAT_CONF_NOEXIT BIT(19) /* Chip should not exit conf mode */
+#define FEAT_NOCONF BIT(19) /* Chip conf mode enabled on startup */
#define FEAT_FOUR_FANS BIT(20) /* Supports four fans */
#define FEAT_FOUR_PWM BIT(21) /* Supports four fan controls */
#define FEAT_FOUR_TEMP BIT(22)
@@ -452,7 +453,7 @@ static const struct it87_devices it87_devices[] = {
.model = "IT8790E",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
| FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL
- | FEAT_PWM_FREQ2 | FEAT_FANCTL_ONOFF | FEAT_CONF_NOEXIT,
+ | FEAT_PWM_FREQ2 | FEAT_FANCTL_ONOFF | FEAT_NOCONF,
.peci_mask = 0x07,
},
[it8792] = {
@@ -461,7 +462,7 @@ static const struct it87_devices it87_devices[] = {
.features = FEAT_NEWER_AUTOPWM | FEAT_16BIT_FANS
| FEAT_TEMP_OFFSET | FEAT_TEMP_OLD_PECI | FEAT_TEMP_PECI
| FEAT_10_9MV_ADC | FEAT_IN7_INTERNAL | FEAT_FANCTL_ONOFF
- | FEAT_CONF_NOEXIT,
+ | FEAT_NOCONF,
.peci_mask = 0x07,
.old_peci_mask = 0x02, /* Actually reports PCH */
},
@@ -507,7 +508,7 @@ static const struct it87_devices it87_devices[] = {
.features = FEAT_NEWER_AUTOPWM | FEAT_16BIT_FANS
| FEAT_TEMP_OFFSET | FEAT_TEMP_OLD_PECI | FEAT_TEMP_PECI
| FEAT_10_9MV_ADC | FEAT_IN7_INTERNAL | FEAT_FANCTL_ONOFF
- | FEAT_CONF_NOEXIT,
+ | FEAT_NOCONF,
.peci_mask = 0x07,
.old_peci_mask = 0x02, /* Actually reports PCH */
},
@@ -544,7 +545,7 @@ static const struct it87_devices it87_devices[] = {
#define has_four_temp(data) ((data)->features & FEAT_FOUR_TEMP)
#define has_six_temp(data) ((data)->features & FEAT_SIX_TEMP)
#define has_vin3_5v(data) ((data)->features & FEAT_VIN3_5V)
-#define has_conf_noexit(data) ((data)->features & FEAT_CONF_NOEXIT)
+#define has_noconf(data) ((data)->features & FEAT_NOCONF)
#define has_scaling(data) ((data)->features & (FEAT_12MV_ADC | \
FEAT_10_9MV_ADC))
#define has_fanctl_onoff(data) ((data)->features & FEAT_FANCTL_ONOFF)
@@ -742,13 +743,13 @@ static int smbus_disable(struct it87_data *data)
int err;
if (data->smbus_bitmap) {
- err = superio_enter(data->sioaddr);
+ err = superio_enter(data->sioaddr, has_noconf(data));
if (err)
return err;
superio_select(data->sioaddr, PME);
superio_outb(data->sioaddr, IT87_SPECIAL_CFG_REG,
data->ec_special_config & ~data->smbus_bitmap);
- superio_exit(data->sioaddr, has_conf_noexit(data));
+ superio_exit(data->sioaddr, has_noconf(data));
}
return 0;
}
@@ -758,14 +759,14 @@ static int smbus_enable(struct it87_data *data)
int err;
if (data->smbus_bitmap) {
- err = superio_enter(data->sioaddr);
+ err = superio_enter(data->sioaddr, has_noconf(data));
if (err)
return err;
superio_select(data->sioaddr, PME);
superio_outb(data->sioaddr, IT87_SPECIAL_CFG_REG,
data->ec_special_config);
- superio_exit(data->sioaddr, has_conf_noexit(data));
+ superio_exit(data->sioaddr, has_noconf(data));
}
return 0;
}
@@ -2666,6 +2667,27 @@ static const struct attribute_group it87_group_auto_pwm = {
.is_visible = it87_auto_pwm_is_visible,
};
+/*
+ * Original explanation:
+ * On various Gigabyte AM4 boards (AB350, AX370), the second Super-IO chip
+ * (IT8792E) needs to be in configuration mode before accessing the first
+ * due to a bug in IT8792E which otherwise results in LPC bus access errors.
+ * This needs to be done before accessing the first Super-IO chip since
+ * the second chip may have been accessed prior to loading this driver.
+ *
+ * The problem is also reported to affect IT8795E, which is used on X299 boards
+ * and has the same chip ID as IT8792E (0x8733). It also appears to affect
+ * systems with IT8790E, which is used on some Z97X-Gaming boards as well as
+ * Z87X-OC.
+ *
+ * From other information supplied:
+ * ChipIDs 0x8733, 0x8695 (early ID for IT87952E) and 0x8790 are initialized
+ * and left in configuration mode, and entering and/or exiting configuration
+ * mode is what causes the crash.
+ *
+ * The recommendation is to look up the chipID before doing any mode swap
+ * and then act accordingly.
+ */
/* SuperIO detection - will change isa_address if a chip is found */
static int __init it87_find(int sioaddr, unsigned short *address,
struct it87_sio_data *sio_data, int chip_cnt)
@@ -2673,16 +2695,25 @@ static int __init it87_find(int sioaddr, unsigned short *address,
int err;
u16 chip_type;
const struct it87_devices *config = NULL;
+ bool enabled = false;
- err = superio_enter(sioaddr);
+ /* First step, lock memory but don't enter configuration mode */
+ err = superio_enter(sioaddr, true);
if (err)
return err;
err = -ENODEV;
chip_type = superio_inw(sioaddr, DEVID);
- /* check first for a valid chip before forcing chip id */
- if (chip_type == 0xffff)
- goto exit;
+ /* Check for a valid chip before forcing chip id */
+ if (chip_type == 0xffff) {
+ /* Enter configuration mode */
+ __superio_enter(sioaddr);
+ enabled = true;
+ /* and then try again */
+ chip_type = superio_inw(sioaddr, DEVID);
+ if (chip_type == 0xffff)
+ goto exit;
+ }
if (force_id_cnt == 1) {
/* If only one value given use for all chips */
@@ -2766,6 +2797,18 @@ static int __init it87_find(int sioaddr, unsigned short *address,
config = &it87_devices[sio_data->type];
+ /*
+ * If previously we didn't enter configuration mode and it isn't a
+ * chip we know is initialised in configuration mode, then enter
+ * configuration mode.
+ *
+ * I don't know if any such chips can exist but be defensive.
+ */
+ if (!enabled && !has_noconf(config)) {
+ __superio_enter(sioaddr);
+ enabled = true;
+ }
+
superio_select(sioaddr, PME);
if (!(superio_inb(sioaddr, IT87_ACT_REG) & 0x01)) {
pr_info("Device (chip %s ioreg 0x%x) not activated, skipping\n",
@@ -3143,7 +3186,7 @@ static int __init it87_find(int sioaddr, unsigned short *address,
}
exit:
- superio_exit(sioaddr, config ? has_conf_noexit(config) : false);
+ superio_exit(sioaddr, !enabled);
return err;
}
@@ -3520,7 +3563,7 @@ static void it87_resume_sio(struct platform_device *pdev)
if (!data->need_in7_reroute)
return;
- err = superio_enter(data->sioaddr);
+ err = superio_enter(data->sioaddr, has_noconf(data));
if (err) {
dev_warn(&pdev->dev,
"Unable to enter Super I/O to reroute in7 (%d)",
@@ -3540,7 +3583,7 @@ static void it87_resume_sio(struct platform_device *pdev)
reg2c);
}
- superio_exit(data->sioaddr, has_conf_noexit(data));
+ superio_exit(data->sioaddr, has_noconf(data));
}
static int it87_resume(struct device *dev)
@@ -3641,27 +3684,6 @@ static int it87_dmi_cb(const struct dmi_system_id *dmi_entry)
}
/*
- * On various Gigabyte AM4 boards (AB350, AX370), the second Super-IO chip
- * (IT8792E) needs to be in configuration mode before accessing the first
- * due to a bug in IT8792E which otherwise results in LPC bus access errors.
- * This needs to be done before accessing the first Super-IO chip since
- * the second chip may have been accessed prior to loading this driver.
- *
- * The problem is also reported to affect IT8795E, which is used on X299 boards
- * and has the same chip ID as IT8792E (0x8733). It also appears to affect
- * systems with IT8790E, which is used on some Z97X-Gaming boards as well as
- * Z87X-OC.
- * DMI entries for those systems will be added as they become available and
- * as the problem is confirmed to affect those boards.
- */
-static int it87_sio_force(const struct dmi_system_id *dmi_entry)
-{
- __superio_enter(REG_4E);
-
- return it87_dmi_cb(dmi_entry);
-};
-
-/*
* On the Shuttle SN68PT, FAN_CTL2 is apparently not
* connected to a fan, but to something else. One user
* has reported instant system power-off when changing
@@ -3683,34 +3705,7 @@ static struct it87_dmi_data nvidia_fn68pt = {
.driver_data = data, \
}
-#define IT87_DMI_MATCH_GBT(name, cb, data) \
- IT87_DMI_MATCH_VND("Gigabyte Technology Co., Ltd.", name, cb, data)
-
static const struct dmi_system_id it87_dmi_table[] __initconst = {
- IT87_DMI_MATCH_GBT("AB350", it87_sio_force, NULL),
- /* ? + IT8792E/IT8795E */
- IT87_DMI_MATCH_GBT("AX370", it87_sio_force, NULL),
- /* ? + IT8792E/IT8795E */
- IT87_DMI_MATCH_GBT("Z97X-Gaming G1", it87_sio_force, NULL),
- /* ? + IT8790E */
- IT87_DMI_MATCH_GBT("TRX40 AORUS XTREME", it87_sio_force, NULL),
- /* IT8688E + IT8792E/IT8795E */
- IT87_DMI_MATCH_GBT("Z390 AORUS ULTRA-CF", it87_sio_force, NULL),
- /* IT8688E + IT8792E/IT8795E */
- IT87_DMI_MATCH_GBT("B550 AORUS PRO AC", it87_sio_force, NULL),
- /* IT8688E + IT8792E/IT8795E */
- IT87_DMI_MATCH_GBT("X570 AORUS MASTER", it87_sio_force, NULL),
- /* IT8688E + IT8792E/IT8795E */
- IT87_DMI_MATCH_GBT("X570 AORUS PRO", it87_sio_force, NULL),
- /* IT8688E + IT8792E/IT8795E */
- IT87_DMI_MATCH_GBT("X570 AORUS PRO WIFI", it87_sio_force, NULL),
- /* IT8688E + IT8792E/IT8795E */
- IT87_DMI_MATCH_GBT("X570S AERO G", it87_sio_force, NULL),
- /* IT8689E + IT87952E */
- IT87_DMI_MATCH_GBT("Z690 AORUS PRO DDR4", it87_sio_force, NULL),
- /* IT8689E + IT87952E */
- IT87_DMI_MATCH_GBT("Z690 AORUS PRO", it87_sio_force, NULL),
- /* IT8689E + IT87952E */
IT87_DMI_MATCH_VND("nVIDIA", "FN68PT", it87_dmi_cb, &nvidia_fn68pt),
{ }
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 75dc25df0f8b..7092f8f025b8 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -609,7 +609,7 @@ static const struct dev_pm_ops jc42_dev_pm_ops = {
#endif /* CONFIG_PM */
static const struct i2c_device_id jc42_id[] = {
- { "jc42", 0 },
+ { "jc42" },
{ }
};
MODULE_DEVICE_TABLE(i2c, jc42_id);
@@ -623,7 +623,7 @@ MODULE_DEVICE_TABLE(of, jc42_of_ids);
#endif
static struct i2c_driver jc42_driver = {
- .class = I2C_CLASS_SPD | I2C_CLASS_HWMON,
+ .class = I2C_CLASS_HWMON,
.driver = {
.name = "jc42",
.pm = JC42_DEV_PM_OPS,
diff --git a/drivers/hwmon/lenovo-ec-sensors.c b/drivers/hwmon/lenovo-ec-sensors.c
new file mode 100644
index 000000000000..143fb79713f7
--- /dev/null
+++ b/drivers/hwmon/lenovo-ec-sensors.c
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * HWMON driver for Lenovo ThinkStation based workstations
+ * via the embedded controller registers
+ *
+ * Copyright (C) 2024 David Ober (Lenovo) <dober@lenovo.com>
+ *
+ * EC provides:
+ * - CPU temperature
+ * - DIMM temperature
+ * - Chassis zone temperatures
+ * - CPU fan RPM
+ * - DIMM fan RPM
+ * - Chassis fans RPM
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#define MCHP_SING_IDX 0x0000
+#define MCHP_EMI0_APPLICATION_ID 0x090C
+#define MCHP_EMI0_EC_ADDRESS 0x0902
+#define MCHP_EMI0_EC_DATA_BYTE0 0x0904
+#define MCHP_EMI0_EC_DATA_BYTE1 0x0905
+#define MCHP_EMI0_EC_DATA_BYTE2 0x0906
+#define MCHP_EMI0_EC_DATA_BYTE3 0x0907
+#define IO_REGION_START 0x0900
+#define IO_REGION_LENGTH 0xD
+
+static inline u8
+get_ec_reg(unsigned char page, unsigned char index)
+{
+ u8 onebyte;
+ unsigned short m_index;
+ unsigned short phy_index = page * 256 + index;
+
+ outb_p(0x01, MCHP_EMI0_APPLICATION_ID);
+
+ m_index = phy_index & GENMASK(14, 2);
+ outw_p(m_index, MCHP_EMI0_EC_ADDRESS);
+
+ onebyte = inb_p(MCHP_EMI0_EC_DATA_BYTE0 + (phy_index & GENMASK(1, 0)));
+
+ outb_p(0x01, MCHP_EMI0_APPLICATION_ID); /* write 0x01 again to clean */
+ return onebyte;
+}
+
+enum systems {
+ LENOVO_PX,
+ LENOVO_P7,
+ LENOVO_P5,
+ LENOVO_P8,
+};
+
+static int px_temp_map[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+
+static const char * const lenovo_px_ec_temp_label[] = {
+ "CPU1",
+ "CPU2",
+ "R_DIMM1",
+ "L_DIMM1",
+ "R_DIMM2",
+ "L_DIMM2",
+ "PCH",
+ "M2_R",
+ "M2_Z1R",
+ "M2_Z2R",
+ "PCI_Z1",
+ "PCI_Z2",
+ "PCI_Z3",
+ "PCI_Z4",
+ "AMB",
+};
+
+static int gen_temp_map[] = {0, 2, 3, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+
+static const char * const lenovo_gen_ec_temp_label[] = {
+ "CPU1",
+ "R_DIMM",
+ "L_DIMM",
+ "PCH",
+ "M2_R",
+ "M2_Z1R",
+ "M2_Z2R",
+ "PCI_Z1",
+ "PCI_Z2",
+ "PCI_Z3",
+ "PCI_Z4",
+ "AMB",
+};
+
+static int px_fan_map[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+
+static const char * const px_ec_fan_label[] = {
+ "CPU1_Fan",
+ "CPU2_Fan",
+ "Front_Fan1-1",
+ "Front_Fan1-2",
+ "Front_Fan2",
+ "Front_Fan3",
+ "MEM_Fan1",
+ "MEM_Fan2",
+ "Rear_Fan1",
+ "Rear_Fan2",
+ "Flex_Bay_Fan1",
+ "Flex_Bay_Fan2",
+ "Flex_Bay_Fan2",
+ "PSU_HDD_Fan",
+ "PSU1_Fan",
+ "PSU2_Fan",
+};
+
+static int p7_fan_map[] = {0, 2, 3, 4, 5, 6, 7, 8, 10, 11, 14};
+
+static const char * const p7_ec_fan_label[] = {
+ "CPU1_Fan",
+ "HP_CPU_Fan1",
+ "HP_CPU_Fan2",
+ "PCIE1_4_Fan",
+ "PCIE5_7_Fan",
+ "MEM_Fan1",
+ "MEM_Fan2",
+ "Rear_Fan1",
+ "BCB_Fan",
+ "Flex_Bay_Fan",
+ "PSU_Fan",
+};
+
+static int p5_fan_map[] = {0, 5, 6, 7, 8, 10, 11, 14};
+
+static const char * const p5_ec_fan_label[] = {
+ "CPU_Fan",
+ "HDD_Fan",
+ "Duct_Fan1",
+ "MEM_Fan",
+ "Rear_Fan",
+ "Front_Fan",
+ "Flex_Bay_Fan",
+ "PSU_Fan",
+};
+
+static int p8_fan_map[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14};
+
+static const char * const p8_ec_fan_label[] = {
+ "CPU1_Fan",
+ "CPU2_Fan",
+ "HP_CPU_Fan1",
+ "HP_CPU_Fan2",
+ "PCIE1_4_Fan",
+ "PCIE5_7_Fan",
+ "DIMM1_Fan1",
+ "DIMM1_Fan2",
+ "DIMM2_Fan1",
+ "DIMM2_Fan2",
+ "Rear_Fan",
+ "HDD_Bay_Fan",
+ "Flex_Bay_Fan",
+ "PSU_Fan",
+};
+
+struct ec_sensors_data {
+ struct mutex mec_mutex; /* lock for sensor data access */
+ const char *const *fan_labels;
+ const char *const *temp_labels;
+ const int *fan_map;
+ const int *temp_map;
+};
+
+static int
+lenovo_ec_do_read_temp(struct ec_sensors_data *data, u32 attr, int channel, long *val)
+{
+ u8 lsb;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ mutex_lock(&data->mec_mutex);
+ lsb = get_ec_reg(2, 0x81 + channel);
+ mutex_unlock(&data->mec_mutex);
+ if (lsb <= 0x40)
+ return -ENODATA;
+ *val = (lsb - 0x40) * 1000;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+lenovo_ec_do_read_fan(struct ec_sensors_data *data, u32 attr, int channel, long *val)
+{
+ u8 lsb, msb;
+
+ channel *= 2;
+ switch (attr) {
+ case hwmon_fan_input:
+ mutex_lock(&data->mec_mutex);
+ lsb = get_ec_reg(4, 0x20 + channel);
+ msb = get_ec_reg(4, 0x21 + channel);
+ mutex_unlock(&data->mec_mutex);
+ *val = (msb << 8) + lsb;
+ return 0;
+ case hwmon_fan_max:
+ mutex_lock(&data->mec_mutex);
+ lsb = get_ec_reg(4, 0x40 + channel);
+ msb = get_ec_reg(4, 0x41 + channel);
+ mutex_unlock(&data->mec_mutex);
+ *val = (msb << 8) + lsb;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+lenovo_ec_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ struct ec_sensors_data *state = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_temp:
+ *str = state->temp_labels[channel];
+ return 0;
+ case hwmon_fan:
+ *str = state->fan_labels[channel];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+lenovo_ec_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct ec_sensors_data *data = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_temp:
+ return lenovo_ec_do_read_temp(data, attr, data->temp_map[channel], val);
+ case hwmon_fan:
+ return lenovo_ec_do_read_fan(data, attr, data->fan_map[channel], val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t
+lenovo_ec_hwmon_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ if (attr == hwmon_temp_input || attr == hwmon_temp_label)
+ return 0444;
+ return 0;
+ case hwmon_fan:
+ if (attr == hwmon_fan_input || attr == hwmon_fan_max || attr == hwmon_fan_label)
+ return 0444;
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+static const struct hwmon_channel_info *lenovo_ec_hwmon_info_px[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX),
+ NULL
+};
+
+static const struct hwmon_channel_info *lenovo_ec_hwmon_info_p8[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX),
+ NULL
+};
+
+static const struct hwmon_channel_info *lenovo_ec_hwmon_info_p7[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX),
+ NULL
+};
+
+static const struct hwmon_channel_info *lenovo_ec_hwmon_info_p5[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX),
+ NULL
+};
+
+static const struct hwmon_ops lenovo_ec_hwmon_ops = {
+ .is_visible = lenovo_ec_hwmon_is_visible,
+ .read = lenovo_ec_hwmon_read,
+ .read_string = lenovo_ec_hwmon_read_string,
+};
+
+static struct hwmon_chip_info lenovo_ec_chip_info = {
+ .ops = &lenovo_ec_hwmon_ops,
+};
+
+static const struct dmi_system_id thinkstation_dmi_table[] = {
+ {
+ .ident = "LENOVO_PX",
+ .driver_data = (void *)(long)LENOVO_PX,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "30EU"),
+ },
+ },
+ {
+ .ident = "LENOVO_PX",
+ .driver_data = (void *)(long)LENOVO_PX,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "30EV"),
+ },
+ },
+ {
+ .ident = "LENOVO_P7",
+ .driver_data = (void *)(long)LENOVO_P7,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "30F2"),
+ },
+ },
+ {
+ .ident = "LENOVO_P7",
+ .driver_data = (void *)(long)LENOVO_P7,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "30F3"),
+ },
+ },
+ {
+ .ident = "LENOVO_P5",
+ .driver_data = (void *)(long)LENOVO_P5,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "30G9"),
+ },
+ },
+ {
+ .ident = "LENOVO_P5",
+ .driver_data = (void *)(long)LENOVO_P5,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "30GA"),
+ },
+ },
+ {
+ .ident = "LENOVO_P8",
+ .driver_data = (void *)(long)LENOVO_P8,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "30HH"),
+ },
+ },
+ {
+ .ident = "LENOVO_P8",
+ .driver_data = (void *)(long)LENOVO_P8,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "30HJ"),
+ },
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(dmi, thinkstation_dmi_table);
+
+static int lenovo_ec_probe(struct platform_device *pdev)
+{
+ struct device *hwdev;
+ struct ec_sensors_data *ec_data;
+ const struct hwmon_chip_info *chip_info;
+ struct device *dev = &pdev->dev;
+ const struct dmi_system_id *dmi_id;
+ int app_id;
+
+ ec_data = devm_kzalloc(dev, sizeof(struct ec_sensors_data), GFP_KERNEL);
+ if (!ec_data)
+ return -ENOMEM;
+
+ if (!request_region(IO_REGION_START, IO_REGION_LENGTH, "LNV-WKS")) {
+ pr_err(":request fail\n");
+ return -EIO;
+ }
+
+ dev_set_drvdata(dev, ec_data);
+
+ chip_info = &lenovo_ec_chip_info;
+
+ mutex_init(&ec_data->mec_mutex);
+
+ mutex_lock(&ec_data->mec_mutex);
+ app_id = inb_p(MCHP_EMI0_APPLICATION_ID);
+ if (app_id) /* check EMI Application ID Value */
+ outb_p(app_id, MCHP_EMI0_APPLICATION_ID); /* set EMI Application ID to 0 */
+ outw_p(MCHP_SING_IDX, MCHP_EMI0_EC_ADDRESS);
+ mutex_unlock(&ec_data->mec_mutex);
+
+ if ((inb_p(MCHP_EMI0_EC_DATA_BYTE0) != 'M') &&
+ (inb_p(MCHP_EMI0_EC_DATA_BYTE1) != 'C') &&
+ (inb_p(MCHP_EMI0_EC_DATA_BYTE2) != 'H') &&
+ (inb_p(MCHP_EMI0_EC_DATA_BYTE3) != 'P')) {
+ release_region(IO_REGION_START, IO_REGION_LENGTH);
+ return -ENODEV;
+ }
+
+ dmi_id = dmi_first_match(thinkstation_dmi_table);
+
+ switch ((long)dmi_id->driver_data) {
+ case 0:
+ ec_data->fan_labels = px_ec_fan_label;
+ ec_data->temp_labels = lenovo_px_ec_temp_label;
+ ec_data->fan_map = px_fan_map;
+ ec_data->temp_map = px_temp_map;
+ lenovo_ec_chip_info.info = lenovo_ec_hwmon_info_px;
+ break;
+ case 1:
+ ec_data->fan_labels = p7_ec_fan_label;
+ ec_data->temp_labels = lenovo_gen_ec_temp_label;
+ ec_data->fan_map = p7_fan_map;
+ ec_data->temp_map = gen_temp_map;
+ lenovo_ec_chip_info.info = lenovo_ec_hwmon_info_p7;
+ break;
+ case 2:
+ ec_data->fan_labels = p5_ec_fan_label;
+ ec_data->temp_labels = lenovo_gen_ec_temp_label;
+ ec_data->fan_map = p5_fan_map;
+ ec_data->temp_map = gen_temp_map;
+ lenovo_ec_chip_info.info = lenovo_ec_hwmon_info_p5;
+ break;
+ case 3:
+ ec_data->fan_labels = p8_ec_fan_label;
+ ec_data->temp_labels = lenovo_gen_ec_temp_label;
+ ec_data->fan_map = p8_fan_map;
+ ec_data->temp_map = gen_temp_map;
+ lenovo_ec_chip_info.info = lenovo_ec_hwmon_info_p8;
+ break;
+ default:
+ release_region(IO_REGION_START, IO_REGION_LENGTH);
+ return -ENODEV;
+ }
+
+ hwdev = devm_hwmon_device_register_with_info(dev, "lenovo_ec",
+ ec_data,
+ chip_info, NULL);
+
+ return PTR_ERR_OR_ZERO(hwdev);
+}
+
+static struct platform_driver lenovo_ec_sensors_platform_driver = {
+ .driver = {
+ .name = "lenovo-ec-sensors",
+ },
+ .probe = lenovo_ec_probe,
+};
+
+static struct platform_device *lenovo_ec_sensors_platform_device;
+
+static int __init lenovo_ec_init(void)
+{
+ if (!dmi_check_system(thinkstation_dmi_table))
+ return -ENODEV;
+
+ lenovo_ec_sensors_platform_device =
+ platform_create_bundle(&lenovo_ec_sensors_platform_driver,
+ lenovo_ec_probe, NULL, 0, NULL, 0);
+
+ if (IS_ERR(lenovo_ec_sensors_platform_device)) {
+ release_region(IO_REGION_START, IO_REGION_LENGTH);
+ return PTR_ERR(lenovo_ec_sensors_platform_device);
+ }
+
+ return 0;
+}
+module_init(lenovo_ec_init);
+
+static void __exit lenovo_ec_exit(void)
+{
+ release_region(IO_REGION_START, IO_REGION_LENGTH);
+ platform_device_unregister(lenovo_ec_sensors_platform_device);
+ platform_driver_unregister(&lenovo_ec_sensors_platform_driver);
+}
+module_exit(lenovo_ec_exit);
+
+MODULE_AUTHOR("David Ober <dober@lenovo.com>");
+MODULE_DESCRIPTION("HWMON driver for sensors accessible via EC in LENOVO motherboards");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lineage-pem.c b/drivers/hwmon/lineage-pem.c
index df69c380cde7..64a335a64a2e 100644
--- a/drivers/hwmon/lineage-pem.c
+++ b/drivers/hwmon/lineage-pem.c
@@ -502,7 +502,7 @@ static int pem_probe(struct i2c_client *client)
}
static const struct i2c_device_id pem_id[] = {
- {"lineage_pem", 0},
+ {"lineage_pem"},
{}
};
MODULE_DEVICE_TABLE(i2c, pem_id);
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index c20a749fc7f2..481e4e1f8f4f 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -6,9 +6,9 @@
* Copyright (C) 2006 Kaiwan N Billimoria <kaiwan@designergraphix.com>
*
* The LM70 communicates with a host processor via an SPI/Microwire Bus
- * interface. The complete datasheet is available at National's website
+ * interface. The complete datasheet is available at TI's website
* here:
- * http://www.national.com/pf/LM/LM70.html
+ * https://www.ti.com/product/LM70
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
index 637d35c5ae23..581b01572e1b 100644
--- a/drivers/hwmon/lm73.c
+++ b/drivers/hwmon/lm73.c
@@ -220,7 +220,7 @@ lm73_probe(struct i2c_client *client)
}
static const struct i2c_device_id lm73_ids[] = {
- { "lm73", 0 },
+ { "lm73" },
{ /* LIST END */ }
};
MODULE_DEVICE_TABLE(i2c, lm73_ids);
diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c
index 8b9862519178..80f7a6a3f9a2 100644
--- a/drivers/hwmon/lm77.c
+++ b/drivers/hwmon/lm77.c
@@ -337,7 +337,7 @@ static int lm77_probe(struct i2c_client *client)
}
static const struct i2c_device_id lm77_id[] = {
- { "lm77", 0 },
+ { "lm77" },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm77_id);
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index 2195a735d28e..d2d970e73c61 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -975,8 +975,8 @@ static int lm87_probe(struct i2c_client *client)
*/
static const struct i2c_device_id lm87_id[] = {
- { "lm87", 0 },
- { "adm1024", 0 },
+ { "lm87" },
+ { "adm1024" },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm87_id);
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index 75bca805720e..be4853fad80f 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -2624,8 +2624,8 @@ static int lm93_probe(struct i2c_client *client)
}
static const struct i2c_device_id lm93_id[] = {
- { "lm93", 0 },
- { "lm94", 0 },
+ { "lm93" },
+ { "lm94" },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm93_id);
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 475551f5024b..cad0a0ff8416 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -457,8 +457,8 @@ static int lm95241_probe(struct i2c_client *client)
/* Driver data (common to all clients) */
static const struct i2c_device_id lm95241_id[] = {
- { "lm95231", 0 },
- { "lm95241", 0 },
+ { "lm95231" },
+ { "lm95241" },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm95241_id);
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
index 17ff54bd4015..d293b4f15dc1 100644
--- a/drivers/hwmon/lm95245.c
+++ b/drivers/hwmon/lm95245.c
@@ -578,8 +578,8 @@ static int lm95245_probe(struct i2c_client *client)
/* Driver data (common to all clients) */
static const struct i2c_device_id lm95245_id[] = {
- { "lm95235", 0 },
- { "lm95245", 0 },
+ { "lm95235" },
+ { "lm95245" },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm95245_id);
diff --git a/drivers/hwmon/ltc2945.c b/drivers/hwmon/ltc2945.c
index 45b87a863cae..3e0e0e0687bd 100644
--- a/drivers/hwmon/ltc2945.c
+++ b/drivers/hwmon/ltc2945.c
@@ -508,7 +508,7 @@ static int ltc2945_probe(struct i2c_client *client)
}
static const struct i2c_device_id ltc2945_id[] = {
- {"ltc2945", 0},
+ {"ltc2945"},
{ }
};
diff --git a/drivers/hwmon/ltc2947-i2c.c b/drivers/hwmon/ltc2947-i2c.c
index 33f574bf2ce7..176d710706dd 100644
--- a/drivers/hwmon/ltc2947-i2c.c
+++ b/drivers/hwmon/ltc2947-i2c.c
@@ -27,7 +27,7 @@ static int ltc2947_probe(struct i2c_client *i2c)
}
static const struct i2c_device_id ltc2947_id[] = {
- {"ltc2947", 0},
+ {"ltc2947"},
{}
};
MODULE_DEVICE_TABLE(i2c, ltc2947_id);
diff --git a/drivers/hwmon/ltc2990.c b/drivers/hwmon/ltc2990.c
index 1ad362c0fd2c..f1c1933c52cf 100644
--- a/drivers/hwmon/ltc2990.c
+++ b/drivers/hwmon/ltc2990.c
@@ -259,7 +259,7 @@ static int ltc2990_i2c_probe(struct i2c_client *i2c)
}
static const struct i2c_device_id ltc2990_i2c_id[] = {
- { "ltc2990", 0 },
+ { "ltc2990" },
{}
};
MODULE_DEVICE_TABLE(i2c, ltc2990_i2c_id);
diff --git a/drivers/hwmon/ltc2991.c b/drivers/hwmon/ltc2991.c
index 80a6e391f266..06750bb93c23 100644
--- a/drivers/hwmon/ltc2991.c
+++ b/drivers/hwmon/ltc2991.c
@@ -414,7 +414,7 @@ static const struct of_device_id ltc2991_of_match[] = {
MODULE_DEVICE_TABLE(of, ltc2991_of_match);
static const struct i2c_device_id ltc2991_i2c_id[] = {
- { "ltc2991", 0 },
+ { "ltc2991" },
{}
};
MODULE_DEVICE_TABLE(i2c, ltc2991_i2c_id);
diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
index 001799bc28ed..229aed15d5ca 100644
--- a/drivers/hwmon/ltc2992.c
+++ b/drivers/hwmon/ltc2992.c
@@ -922,7 +922,7 @@ static const struct of_device_id ltc2992_of_match[] = {
MODULE_DEVICE_TABLE(of, ltc2992_of_match);
static const struct i2c_device_id ltc2992_i2c_id[] = {
- {"ltc2992", 0},
+ {"ltc2992"},
{}
};
MODULE_DEVICE_TABLE(i2c, ltc2992_i2c_id);
diff --git a/drivers/hwmon/ltc4151.c b/drivers/hwmon/ltc4151.c
index f42ac3c9475e..fa66eda78efe 100644
--- a/drivers/hwmon/ltc4151.c
+++ b/drivers/hwmon/ltc4151.c
@@ -188,7 +188,7 @@ static int ltc4151_probe(struct i2c_client *client)
}
static const struct i2c_device_id ltc4151_id[] = {
- { "ltc4151", 0 },
+ { "ltc4151" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ltc4151_id);
diff --git a/drivers/hwmon/ltc4215.c b/drivers/hwmon/ltc4215.c
index 66fd28f713ab..cce452711cec 100644
--- a/drivers/hwmon/ltc4215.c
+++ b/drivers/hwmon/ltc4215.c
@@ -245,7 +245,7 @@ static int ltc4215_probe(struct i2c_client *client)
}
static const struct i2c_device_id ltc4215_id[] = {
- { "ltc4215", 0 },
+ { "ltc4215" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ltc4215_id);
diff --git a/drivers/hwmon/ltc4222.c b/drivers/hwmon/ltc4222.c
index 9098ef521739..f7eb007fd766 100644
--- a/drivers/hwmon/ltc4222.c
+++ b/drivers/hwmon/ltc4222.c
@@ -200,7 +200,7 @@ static int ltc4222_probe(struct i2c_client *client)
}
static const struct i2c_device_id ltc4222_id[] = {
- {"ltc4222", 0},
+ {"ltc4222"},
{ }
};
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
index b90184a3e0ec..14593bc81e85 100644
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -469,7 +469,7 @@ static int ltc4245_probe(struct i2c_client *client)
}
static const struct i2c_device_id ltc4245_id[] = {
- { "ltc4245", 0 },
+ { "ltc4245" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ltc4245_id);
diff --git a/drivers/hwmon/ltc4260.c b/drivers/hwmon/ltc4260.c
index 52f7a809b27b..9750dc9aa336 100644
--- a/drivers/hwmon/ltc4260.c
+++ b/drivers/hwmon/ltc4260.c
@@ -163,7 +163,7 @@ static int ltc4260_probe(struct i2c_client *client)
}
static const struct i2c_device_id ltc4260_id[] = {
- {"ltc4260", 0},
+ {"ltc4260"},
{ }
};
diff --git a/drivers/hwmon/ltc4261.c b/drivers/hwmon/ltc4261.c
index 509e68176c7a..2cd218a6a3be 100644
--- a/drivers/hwmon/ltc4261.c
+++ b/drivers/hwmon/ltc4261.c
@@ -222,7 +222,7 @@ static int ltc4261_probe(struct i2c_client *client)
}
static const struct i2c_device_id ltc4261_id[] = {
- {"ltc4261", 0},
+ {"ltc4261"},
{}
};
diff --git a/drivers/hwmon/max127.c b/drivers/hwmon/max127.c
index da2289e3560a..a9aab8862f5e 100644
--- a/drivers/hwmon/max127.c
+++ b/drivers/hwmon/max127.c
@@ -329,7 +329,7 @@ static int max127_probe(struct i2c_client *client)
}
static const struct i2c_device_id max127_id[] = {
- { "max127", 0 },
+ { "max127" },
{ }
};
MODULE_DEVICE_TABLE(i2c, max127_id);
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index 500dc926a7a7..a89a519cf5d9 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -285,7 +285,7 @@ static int max1619_probe(struct i2c_client *new_client)
}
static const struct i2c_device_id max1619_id[] = {
- { "max1619", 0 },
+ { "max1619" },
{ }
};
MODULE_DEVICE_TABLE(i2c, max1619_id);
diff --git a/drivers/hwmon/max31730.c b/drivers/hwmon/max31730.c
index 7d237db6e57c..2f4b419b6c9e 100644
--- a/drivers/hwmon/max31730.c
+++ b/drivers/hwmon/max31730.c
@@ -345,7 +345,7 @@ max31730_probe(struct i2c_client *client)
}
static const struct i2c_device_id max31730_ids[] = {
- { "max31730", 0, },
+ { "max31730" },
{ }
};
MODULE_DEVICE_TABLE(i2c, max31730_ids);
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index 3dc95196b229..f56913327004 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -49,6 +49,9 @@
#define NR_CHANNEL 6
+#define PWM_INPUT_SCALE 255
+#define MAX31790_REG_PWMOUT_SCALE 511
+
/*
* Client data (each client gets its own)
*/
@@ -343,10 +346,13 @@ static int max31790_write_pwm(struct device *dev, u32 attr, int channel,
err = -EINVAL;
break;
}
+
+ val = DIV_ROUND_CLOSEST(val * MAX31790_REG_PWMOUT_SCALE,
+ PWM_INPUT_SCALE);
data->valid = false;
err = i2c_smbus_write_word_swapped(client,
MAX31790_REG_PWMOUT(channel),
- val << 8);
+ val << 7);
break;
case hwmon_pwm_enable:
fan_config = data->fan_config[channel];
@@ -537,7 +543,7 @@ static int max31790_probe(struct i2c_client *client)
}
static const struct i2c_device_id max31790_id[] = {
- { "max31790", 0 },
+ { "max31790" },
{ }
};
MODULE_DEVICE_TABLE(i2c, max31790_id);
diff --git a/drivers/hwmon/max6620.c b/drivers/hwmon/max6620.c
index 5d12fb9c9786..13201fb755c9 100644
--- a/drivers/hwmon/max6620.c
+++ b/drivers/hwmon/max6620.c
@@ -493,7 +493,7 @@ static int max6620_probe(struct i2c_client *client)
}
static const struct i2c_device_id max6620_id[] = {
- { "max6620", 0 },
+ { "max6620" },
{ }
};
MODULE_DEVICE_TABLE(i2c, max6620_id);
diff --git a/drivers/hwmon/max6621.c b/drivers/hwmon/max6621.c
index 05426cde0e36..a7066f3a0bb4 100644
--- a/drivers/hwmon/max6621.c
+++ b/drivers/hwmon/max6621.c
@@ -537,7 +537,7 @@ static int max6621_probe(struct i2c_client *client)
}
static const struct i2c_device_id max6621_id[] = {
- { MAX6621_DRV_NAME, 0 },
+ { MAX6621_DRV_NAME },
{ }
};
MODULE_DEVICE_TABLE(i2c, max6621_id);
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index aa7f21ab2395..cbb595fe47aa 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -20,6 +20,7 @@
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/platform_data/max6639.h>
+#include <linux/regmap.h>
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2e, 0x2f, I2C_CLIENT_END };
@@ -57,6 +58,8 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2e, 0x2f, I2C_CLIENT_END };
#define MAX6639_FAN_CONFIG3_THERM_FULL_SPEED 0x40
+#define MAX6639_NUM_CHANNELS 2
+
static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 };
#define FAN_FROM_REG(val, rpm_range) ((val) == 0 || (val) == 255 ? \
@@ -67,22 +70,7 @@ static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 };
* Client data (each client gets its own)
*/
struct max6639_data {
- struct i2c_client *client;
- struct mutex update_lock;
- bool valid; /* true if following fields are valid */
- unsigned long last_updated; /* In jiffies */
-
- /* Register values sampled regularly */
- u16 temp[2]; /* Temperature, in 1/8 C, 0..255 C */
- bool temp_fault[2]; /* Detected temperature diode failure */
- u8 fan[2]; /* Register value: TACH count for fans >=30 */
- u8 status; /* Detected channel alarms and fan failures */
-
- /* Register values only written to */
- u8 pwm[2]; /* Register value: Duty cycle 0..120 */
- u8 temp_therm[2]; /* THERM Temperature, 0..255 C (->_max) */
- u8 temp_alert[2]; /* ALERT Temperature, 0..255 C (->_crit) */
- u8 temp_ot[2]; /* OT Temperature, 0..255 C (->_emergency) */
+ struct regmap *regmap;
/* Register values initialized only once */
u8 ppr; /* Pulses per rotation 0..3 for 1..4 ppr */
@@ -92,90 +80,47 @@ struct max6639_data {
struct regulator *reg;
};
-static struct max6639_data *max6639_update_device(struct device *dev)
-{
- struct max6639_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- struct max6639_data *ret = data;
- int i;
- int status_reg;
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) {
- int res;
-
- dev_dbg(&client->dev, "Starting max6639 update\n");
-
- status_reg = i2c_smbus_read_byte_data(client,
- MAX6639_REG_STATUS);
- if (status_reg < 0) {
- ret = ERR_PTR(status_reg);
- goto abort;
- }
-
- data->status = status_reg;
-
- for (i = 0; i < 2; i++) {
- res = i2c_smbus_read_byte_data(client,
- MAX6639_REG_FAN_CNT(i));
- if (res < 0) {
- ret = ERR_PTR(res);
- goto abort;
- }
- data->fan[i] = res;
-
- res = i2c_smbus_read_byte_data(client,
- MAX6639_REG_TEMP_EXT(i));
- if (res < 0) {
- ret = ERR_PTR(res);
- goto abort;
- }
- data->temp[i] = res >> 5;
- data->temp_fault[i] = res & 0x01;
-
- res = i2c_smbus_read_byte_data(client,
- MAX6639_REG_TEMP(i));
- if (res < 0) {
- ret = ERR_PTR(res);
- goto abort;
- }
- data->temp[i] |= res << 3;
- }
-
- data->last_updated = jiffies;
- data->valid = true;
- }
-abort:
- mutex_unlock(&data->update_lock);
-
- return ret;
-}
-
static ssize_t temp_input_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
long temp;
- struct max6639_data *data = max6639_update_device(dev);
+ struct max6639_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ unsigned int val;
+ int res;
+
+ /*
+ * Lock isn't needed as MAX6639_REG_TEMP wpnt change for at least 250ms after reading
+ * MAX6639_REG_TEMP_EXT
+ */
+ res = regmap_read(data->regmap, MAX6639_REG_TEMP_EXT(attr->index), &val);
+ if (res < 0)
+ return res;
+
+ temp = val >> 5;
+ res = regmap_read(data->regmap, MAX6639_REG_TEMP(attr->index), &val);
+ if (res < 0)
+ return res;
- if (IS_ERR(data))
- return PTR_ERR(data);
+ temp |= val << 3;
+ temp *= 125;
- temp = data->temp[attr->index] * 125;
return sprintf(buf, "%ld\n", temp);
}
static ssize_t temp_fault_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
- struct max6639_data *data = max6639_update_device(dev);
+ struct max6639_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ unsigned int val;
+ int res;
- if (IS_ERR(data))
- return PTR_ERR(data);
+ res = regmap_read(data->regmap, MAX6639_REG_TEMP_EXT(attr->index), &val);
+ if (res < 0)
+ return res;
- return sprintf(buf, "%d\n", data->temp_fault[attr->index]);
+ return sprintf(buf, "%d\n", val & 1);
}
static ssize_t temp_max_show(struct device *dev,
@@ -183,8 +128,14 @@ static ssize_t temp_max_show(struct device *dev,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
struct max6639_data *data = dev_get_drvdata(dev);
+ unsigned int val;
+ int res;
- return sprintf(buf, "%d\n", (data->temp_therm[attr->index] * 1000));
+ res = regmap_read(data->regmap, MAX6639_REG_THERM_LIMIT(attr->index), &val);
+ if (res < 0)
+ return res;
+
+ return sprintf(buf, "%d\n", (val * 1000));
}
static ssize_t temp_max_store(struct device *dev,
@@ -193,7 +144,6 @@ static ssize_t temp_max_store(struct device *dev,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
struct max6639_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
unsigned long val;
int res;
@@ -201,12 +151,8 @@ static ssize_t temp_max_store(struct device *dev,
if (res)
return res;
- mutex_lock(&data->update_lock);
- data->temp_therm[attr->index] = TEMP_LIMIT_TO_REG(val);
- i2c_smbus_write_byte_data(client,
- MAX6639_REG_THERM_LIMIT(attr->index),
- data->temp_therm[attr->index]);
- mutex_unlock(&data->update_lock);
+ regmap_write(data->regmap, MAX6639_REG_THERM_LIMIT(attr->index),
+ TEMP_LIMIT_TO_REG(val));
return count;
}
@@ -215,8 +161,14 @@ static ssize_t temp_crit_show(struct device *dev,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
struct max6639_data *data = dev_get_drvdata(dev);
+ unsigned int val;
+ int res;
+
+ res = regmap_read(data->regmap, MAX6639_REG_ALERT_LIMIT(attr->index), &val);
+ if (res < 0)
+ return res;
- return sprintf(buf, "%d\n", (data->temp_alert[attr->index] * 1000));
+ return sprintf(buf, "%d\n", (val * 1000));
}
static ssize_t temp_crit_store(struct device *dev,
@@ -225,7 +177,6 @@ static ssize_t temp_crit_store(struct device *dev,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
struct max6639_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
unsigned long val;
int res;
@@ -233,12 +184,8 @@ static ssize_t temp_crit_store(struct device *dev,
if (res)
return res;
- mutex_lock(&data->update_lock);
- data->temp_alert[attr->index] = TEMP_LIMIT_TO_REG(val);
- i2c_smbus_write_byte_data(client,
- MAX6639_REG_ALERT_LIMIT(attr->index),
- data->temp_alert[attr->index]);
- mutex_unlock(&data->update_lock);
+ regmap_write(data->regmap, MAX6639_REG_ALERT_LIMIT(attr->index),
+ TEMP_LIMIT_TO_REG(val));
return count;
}
@@ -248,8 +195,14 @@ static ssize_t temp_emergency_show(struct device *dev,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
struct max6639_data *data = dev_get_drvdata(dev);
+ unsigned int val;
+ int res;
+
+ res = regmap_read(data->regmap, MAX6639_REG_OT_LIMIT(attr->index), &val);
+ if (res < 0)
+ return res;
- return sprintf(buf, "%d\n", (data->temp_ot[attr->index] * 1000));
+ return sprintf(buf, "%d\n", (val * 1000));
}
static ssize_t temp_emergency_store(struct device *dev,
@@ -258,7 +211,6 @@ static ssize_t temp_emergency_store(struct device *dev,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
struct max6639_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
unsigned long val;
int res;
@@ -266,12 +218,8 @@ static ssize_t temp_emergency_store(struct device *dev,
if (res)
return res;
- mutex_lock(&data->update_lock);
- data->temp_ot[attr->index] = TEMP_LIMIT_TO_REG(val);
- i2c_smbus_write_byte_data(client,
- MAX6639_REG_OT_LIMIT(attr->index),
- data->temp_ot[attr->index]);
- mutex_unlock(&data->update_lock);
+ regmap_write(data->regmap, MAX6639_REG_OT_LIMIT(attr->index), TEMP_LIMIT_TO_REG(val));
+
return count;
}
@@ -280,8 +228,14 @@ static ssize_t pwm_show(struct device *dev, struct device_attribute *dev_attr,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
struct max6639_data *data = dev_get_drvdata(dev);
+ unsigned int val;
+ int res;
- return sprintf(buf, "%d\n", data->pwm[attr->index] * 255 / 120);
+ res = regmap_read(data->regmap, MAX6639_REG_TARGTDUTY(attr->index), &val);
+ if (res < 0)
+ return res;
+
+ return sprintf(buf, "%d\n", val * 255 / 120);
}
static ssize_t pwm_store(struct device *dev,
@@ -290,7 +244,6 @@ static ssize_t pwm_store(struct device *dev,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
struct max6639_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
unsigned long val;
int res;
@@ -300,38 +253,39 @@ static ssize_t pwm_store(struct device *dev,
val = clamp_val(val, 0, 255);
- mutex_lock(&data->update_lock);
- data->pwm[attr->index] = (u8)(val * 120 / 255);
- i2c_smbus_write_byte_data(client,
- MAX6639_REG_TARGTDUTY(attr->index),
- data->pwm[attr->index]);
- mutex_unlock(&data->update_lock);
+ regmap_write(data->regmap, MAX6639_REG_TARGTDUTY(attr->index), val * 120 / 255);
+
return count;
}
static ssize_t fan_input_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
- struct max6639_data *data = max6639_update_device(dev);
+ struct max6639_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ unsigned int val;
+ int res;
- if (IS_ERR(data))
- return PTR_ERR(data);
+ res = regmap_read(data->regmap, MAX6639_REG_FAN_CNT(attr->index), &val);
+ if (res < 0)
+ return res;
- return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[attr->index],
- data->rpm_range));
+ return sprintf(buf, "%d\n", FAN_FROM_REG(val, data->rpm_range));
}
static ssize_t alarm_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
- struct max6639_data *data = max6639_update_device(dev);
+ struct max6639_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ unsigned int val;
+ int res;
- if (IS_ERR(data))
- return PTR_ERR(data);
+ res = regmap_read(data->regmap, MAX6639_REG_STATUS, &val);
+ if (res < 0)
+ return res;
- return sprintf(buf, "%d\n", !!(data->status & (1 << attr->index)));
+ return sprintf(buf, "%d\n", !!(val & (1 << attr->index)));
}
static SENSOR_DEVICE_ATTR_RO(temp1_input, temp_input, 0);
@@ -401,6 +355,11 @@ static int rpm_range_to_reg(int range)
return 1; /* default: 4000 RPM */
}
+static int max6639_set_ppr(struct max6639_data *data, u8 channel, u8 ppr)
+{
+ return regmap_write(data->regmap, MAX6639_REG_FAN_PPR(channel), ppr << 6);
+}
+
static int max6639_init_client(struct i2c_client *client,
struct max6639_data *data)
{
@@ -408,94 +367,76 @@ static int max6639_init_client(struct i2c_client *client,
dev_get_platdata(&client->dev);
int i;
int rpm_range = 1; /* default: 4000 RPM */
- int err;
+ int err, ppr;
/* Reset chip to default values, see below for GCONFIG setup */
- err = i2c_smbus_write_byte_data(client, MAX6639_REG_GCONFIG,
- MAX6639_GCONFIG_POR);
+ err = regmap_write(data->regmap, MAX6639_REG_GCONFIG, MAX6639_GCONFIG_POR);
if (err)
- goto exit;
+ return err;
/* Fans pulse per revolution is 2 by default */
if (max6639_info && max6639_info->ppr > 0 &&
max6639_info->ppr < 5)
- data->ppr = max6639_info->ppr;
+ ppr = max6639_info->ppr;
else
- data->ppr = 2;
- data->ppr -= 1;
+ ppr = 2;
+ ppr -= 1;
if (max6639_info)
rpm_range = rpm_range_to_reg(max6639_info->rpm_range);
data->rpm_range = rpm_range;
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < MAX6639_NUM_CHANNELS; i++) {
/* Set Fan pulse per revolution */
- err = i2c_smbus_write_byte_data(client,
- MAX6639_REG_FAN_PPR(i),
- data->ppr << 6);
+ err = max6639_set_ppr(data, i, ppr);
if (err)
- goto exit;
+ return err;
/* Fans config PWM, RPM */
- err = i2c_smbus_write_byte_data(client,
- MAX6639_REG_FAN_CONFIG1(i),
- MAX6639_FAN_CONFIG1_PWM | rpm_range);
+ err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG1(i),
+ MAX6639_FAN_CONFIG1_PWM | rpm_range);
if (err)
- goto exit;
+ return err;
/* Fans PWM polarity high by default */
if (max6639_info && max6639_info->pwm_polarity == 0)
- err = i2c_smbus_write_byte_data(client,
- MAX6639_REG_FAN_CONFIG2a(i), 0x00);
+ err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG2a(i), 0x00);
else
- err = i2c_smbus_write_byte_data(client,
- MAX6639_REG_FAN_CONFIG2a(i), 0x02);
+ err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG2a(i), 0x02);
if (err)
- goto exit;
+ return err;
/*
* /THERM full speed enable,
* PWM frequency 25kHz, see also GCONFIG below
*/
- err = i2c_smbus_write_byte_data(client,
- MAX6639_REG_FAN_CONFIG3(i),
- MAX6639_FAN_CONFIG3_THERM_FULL_SPEED | 0x03);
+ err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG3(i),
+ MAX6639_FAN_CONFIG3_THERM_FULL_SPEED | 0x03);
if (err)
- goto exit;
+ return err;
/* Max. temp. 80C/90C/100C */
- data->temp_therm[i] = 80;
- data->temp_alert[i] = 90;
- data->temp_ot[i] = 100;
- err = i2c_smbus_write_byte_data(client,
- MAX6639_REG_THERM_LIMIT(i),
- data->temp_therm[i]);
+ err = regmap_write(data->regmap, MAX6639_REG_THERM_LIMIT(i), 80);
if (err)
- goto exit;
- err = i2c_smbus_write_byte_data(client,
- MAX6639_REG_ALERT_LIMIT(i),
- data->temp_alert[i]);
+ return err;
+ err = regmap_write(data->regmap, MAX6639_REG_ALERT_LIMIT(i), 90);
if (err)
- goto exit;
- err = i2c_smbus_write_byte_data(client,
- MAX6639_REG_OT_LIMIT(i), data->temp_ot[i]);
+ return err;
+ err = regmap_write(data->regmap, MAX6639_REG_OT_LIMIT(i), 100);
if (err)
- goto exit;
+ return err;
/* PWM 120/120 (i.e. 100%) */
- data->pwm[i] = 120;
- err = i2c_smbus_write_byte_data(client,
- MAX6639_REG_TARGTDUTY(i), data->pwm[i]);
+ err = regmap_write(data->regmap, MAX6639_REG_TARGTDUTY(i), 120);
if (err)
- goto exit;
+ return err;
}
/* Start monitoring */
- err = i2c_smbus_write_byte_data(client, MAX6639_REG_GCONFIG,
- MAX6639_GCONFIG_DISABLE_TIMEOUT | MAX6639_GCONFIG_CH2_LOCAL |
- MAX6639_GCONFIG_PWM_FREQ_HI);
-exit:
- return err;
+ return regmap_write(data->regmap, MAX6639_REG_GCONFIG,
+ MAX6639_GCONFIG_DISABLE_TIMEOUT | MAX6639_GCONFIG_CH2_LOCAL |
+ MAX6639_GCONFIG_PWM_FREQ_HI);
+
}
/* Return 0 if detection is successful, -ENODEV otherwise */
@@ -524,6 +465,32 @@ static void max6639_regulator_disable(void *data)
regulator_disable(data);
}
+static bool max6639_regmap_is_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX6639_REG_TEMP(0):
+ case MAX6639_REG_TEMP_EXT(0):
+ case MAX6639_REG_TEMP(1):
+ case MAX6639_REG_TEMP_EXT(1):
+ case MAX6639_REG_STATUS:
+ case MAX6639_REG_FAN_CNT(0):
+ case MAX6639_REG_FAN_CNT(1):
+ case MAX6639_REG_TARGTDUTY(0):
+ case MAX6639_REG_TARGTDUTY(1):
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config max6639_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX6639_REG_DEVREV,
+ .cache_type = REGCACHE_MAPLE,
+ .volatile_reg = max6639_regmap_is_volatile,
+};
+
static int max6639_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -535,7 +502,11 @@ static int max6639_probe(struct i2c_client *client)
if (!data)
return -ENOMEM;
- data->client = client;
+ data->regmap = devm_regmap_init_i2c(client, &max6639_regmap_config);
+ if (IS_ERR(data->regmap))
+ return dev_err_probe(dev,
+ PTR_ERR(data->regmap),
+ "regmap initialization failed\n");
data->reg = devm_regulator_get_optional(dev, "fan");
if (IS_ERR(data->reg)) {
@@ -558,8 +529,6 @@ static int max6639_probe(struct i2c_client *client)
}
}
- mutex_init(&data->update_lock);
-
/* Initialize the max6639 chip */
err = max6639_init_client(client, data);
if (err < 0)
@@ -573,23 +542,17 @@ static int max6639_probe(struct i2c_client *client)
static int max6639_suspend(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
struct max6639_data *data = dev_get_drvdata(dev);
- int ret = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG);
-
- if (ret < 0)
- return ret;
if (data->reg)
regulator_disable(data->reg);
- return i2c_smbus_write_byte_data(client,
- MAX6639_REG_GCONFIG, ret | MAX6639_GCONFIG_STANDBY);
+ return regmap_write_bits(data->regmap, MAX6639_REG_GCONFIG, MAX6639_GCONFIG_STANDBY,
+ MAX6639_GCONFIG_STANDBY);
}
static int max6639_resume(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
struct max6639_data *data = dev_get_drvdata(dev);
int ret;
@@ -601,16 +564,12 @@ static int max6639_resume(struct device *dev)
}
}
- ret = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG);
- if (ret < 0)
- return ret;
-
- return i2c_smbus_write_byte_data(client,
- MAX6639_REG_GCONFIG, ret & ~MAX6639_GCONFIG_STANDBY);
+ return regmap_write_bits(data->regmap, MAX6639_REG_GCONFIG, MAX6639_GCONFIG_STANDBY,
+ ~MAX6639_GCONFIG_STANDBY);
}
static const struct i2c_device_id max6639_id[] = {
- {"max6639", 0},
+ {"max6639"},
{ }
};
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index 8b2e4d6101a2..9302ab233910 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -291,7 +291,7 @@ static int max6642_probe(struct i2c_client *client)
*/
static const struct i2c_device_id max6642_id[] = {
- { "max6642", 0 },
+ { "max6642" },
{ }
};
MODULE_DEVICE_TABLE(i2c, max6642_id);
diff --git a/drivers/hwmon/mc34vr500.c b/drivers/hwmon/mc34vr500.c
index 0c8fd3fce83e..84458e4533d8 100644
--- a/drivers/hwmon/mc34vr500.c
+++ b/drivers/hwmon/mc34vr500.c
@@ -235,7 +235,7 @@ static int mc34vr500_probe(struct i2c_client *client)
}
static const struct i2c_device_id mc34vr500_id[] = {
- { "mc34vr500", 0 },
+ { "mc34vr500" },
{ },
};
MODULE_DEVICE_TABLE(i2c, mc34vr500_id);
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index a0e664d5ebfe..97e8c6424403 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -1212,7 +1212,7 @@ static const unsigned short nct7802_address_list[] = {
};
static const struct i2c_device_id nct7802_idtable[] = {
- { "nct7802", 0 },
+ { "nct7802" },
{ }
};
MODULE_DEVICE_TABLE(i2c, nct7802_idtable);
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index 8f867d4570e1..f1e6eda949ba 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -1161,7 +1161,7 @@ static int nct7904_probe(struct i2c_client *client)
}
static const struct i2c_device_id nct7904_id[] = {
- {"nct7904", 0},
+ {"nct7904"},
{}
};
MODULE_DEVICE_TABLE(i2c, nct7904_id);
diff --git a/drivers/hwmon/npcm750-pwm-fan.c b/drivers/hwmon/npcm750-pwm-fan.c
index 904816abb7c4..bc8db1dc595d 100644
--- a/drivers/hwmon/npcm750-pwm-fan.c
+++ b/drivers/hwmon/npcm750-pwm-fan.c
@@ -196,8 +196,6 @@ struct npcm7xx_pwm_fan_data {
void __iomem *pwm_base;
void __iomem *fan_base;
int pwm_modules;
- unsigned long pwm_clk_freq;
- unsigned long fan_clk_freq;
struct clk *pwm_clk;
struct clk *fan_clk;
struct mutex pwm_lock[NPCM7XX_PWM_MAX_MODULES];
@@ -692,11 +690,12 @@ static u32 npcm7xx_pwm_init(struct npcm7xx_pwm_fan_data *data)
{
int m, ch;
u32 prescale_val, output_freq;
+ unsigned long pwm_clk_freq;
- data->pwm_clk_freq = clk_get_rate(data->pwm_clk);
+ pwm_clk_freq = clk_get_rate(data->pwm_clk);
/* Adjust NPCM7xx PWMs output frequency to ~25Khz */
- output_freq = data->pwm_clk_freq / PWN_CNT_DEFAULT;
+ output_freq = pwm_clk_freq / PWN_CNT_DEFAULT;
prescale_val = DIV_ROUND_CLOSEST(output_freq, PWM_OUTPUT_FREQ_25KHZ);
/* If prescale_val = 0, then the prescale output clock is stopped */
diff --git a/drivers/hwmon/nzxt-kraken3.c b/drivers/hwmon/nzxt-kraken3.c
index 5806a3f32bcb..00f3ac90a290 100644
--- a/drivers/hwmon/nzxt-kraken3.c
+++ b/drivers/hwmon/nzxt-kraken3.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * hwmon driver for NZXT Kraken X53/X63/X73 and Z53/Z63/Z73 all in one coolers.
- * X53 and Z53 in code refer to all models in their respective series (shortened
- * for brevity).
+ * hwmon driver for NZXT Kraken X53/X63/X73, Z53/Z63/Z73 and 2023/2023 Elite all in one coolers.
+ * X53 and Z53 in code refer to all models in their respective series (shortened for brevity).
+ * 2023 models use the Z53 code paths.
*
* Copyright 2021 Jonas Malaco <jonas@protocubo.io>
* Copyright 2022 Aleksa Savic <savicaleksa83@gmail.com>
@@ -23,15 +23,12 @@
#define USB_PRODUCT_ID_X53 0x2007
#define USB_PRODUCT_ID_X53_SECOND 0x2014
#define USB_PRODUCT_ID_Z53 0x3008
+#define USB_PRODUCT_ID_KRAKEN2023 0x300E
+#define USB_PRODUCT_ID_KRAKEN2023_ELITE 0x300C
-enum kinds { X53, Z53 } __packed;
+enum kinds { X53, Z53, KRAKEN2023 } __packed;
enum pwm_enable { off, manual, curve } __packed;
-static const char *const kraken3_device_names[] = {
- [X53] = "x53",
- [Z53] = "z53",
-};
-
#define DRIVER_NAME "nzxt_kraken3"
#define STATUS_REPORT_ID 0x75
#define FIRMWARE_REPORT_ID 0x11
@@ -141,6 +138,7 @@ static umode_t kraken3_is_visible(const void *data, enum hwmon_sensor_types type
return 0444;
break;
case Z53:
+ case KRAKEN2023:
/* Pump and fan */
if (channel < 2)
return 0444;
@@ -160,6 +158,7 @@ static umode_t kraken3_is_visible(const void *data, enum hwmon_sensor_types type
return 0644;
break;
case Z53:
+ case KRAKEN2023:
/* Pump and fan */
if (channel < 2)
return 0644;
@@ -247,6 +246,7 @@ static int kraken3_read_x53(struct kraken3_data *priv)
return 0;
}
+/* Covers Z53 and KRAKEN2023 device kinds */
static int kraken3_read_z53(struct kraken3_data *priv)
{
int ret = mutex_lock_interruptible(&priv->z53_status_request_lock);
@@ -360,6 +360,13 @@ static int kraken3_write_curve(struct kraken3_data *priv, u8 *curve_array, int c
/* Set the correct ID for writing pump/fan duty (0x01 or 0x02, respectively) */
fixed_duty_cmd[SET_DUTY_ID_OFFSET] = channel + 1;
+ if (priv->kind == KRAKEN2023) {
+ /* These require 1s in the next one or two slots after SET_DUTY_ID_OFFSET */
+ fixed_duty_cmd[SET_DUTY_ID_OFFSET + 1] = 1;
+ if (channel == 1) /* Fan */
+ fixed_duty_cmd[SET_DUTY_ID_OFFSET + 2] = 1;
+ }
+
/* Copy curve to command */
memcpy(fixed_duty_cmd + SET_CURVE_DUTY_CMD_HEADER_LENGTH, curve_array, CUSTOM_CURVE_POINTS);
@@ -507,8 +514,8 @@ static umode_t kraken3_curve_props_are_visible(struct kobject *kobj, struct attr
struct device *dev = kobj_to_dev(kobj);
struct kraken3_data *priv = dev_get_drvdata(dev);
- /* Only Z53 has the fan curve */
- if (index >= CUSTOM_CURVE_POINTS && priv->kind != Z53)
+ /* X53 does not have a fan */
+ if (index >= CUSTOM_CURVE_POINTS && priv->kind == X53)
return 0;
return attr->mode;
@@ -774,8 +781,8 @@ static int kraken3_raw_event(struct hid_device *hdev, struct hid_report *report,
if (priv->kind == X53 && !completion_done(&priv->status_report_processed)) {
/* Mark first X-series device report as received */
complete_all(&priv->status_report_processed);
- } else if (priv->kind == Z53) {
- /* Additional readings for Z53 */
+ } else if (priv->kind == Z53 || priv->kind == KRAKEN2023) {
+ /* Additional readings for Z53 and KRAKEN2023 */
priv->fan_input[1] = get_unaligned_le16(data + Z53_FAN_SPEED_OFFSET);
priv->channel_info[1].reported_duty =
kraken3_percent_to_pwm(data[Z53_FAN_DUTY_OFFSET]);
@@ -849,14 +856,14 @@ static int firmware_version_show(struct seq_file *seqf, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(firmware_version);
-static void kraken3_debugfs_init(struct kraken3_data *priv)
+static void kraken3_debugfs_init(struct kraken3_data *priv, const char *device_name)
{
char name[64];
if (!priv->firmware_version[0])
return; /* Nothing to display in debugfs */
- scnprintf(name, sizeof(name), "%s_%s-%s", DRIVER_NAME, kraken3_device_names[priv->kind],
+ scnprintf(name, sizeof(name), "%s_%s-%s", DRIVER_NAME, device_name,
dev_name(&priv->hdev->dev));
priv->debugfs = debugfs_create_dir(name, NULL);
@@ -866,6 +873,7 @@ static void kraken3_debugfs_init(struct kraken3_data *priv)
static int kraken3_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct kraken3_data *priv;
+ const char *device_name;
int ret;
priv = devm_kzalloc(&hdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -905,12 +913,23 @@ static int kraken3_probe(struct hid_device *hdev, const struct hid_device_id *id
case USB_PRODUCT_ID_X53:
case USB_PRODUCT_ID_X53_SECOND:
priv->kind = X53;
+ device_name = "x53";
break;
case USB_PRODUCT_ID_Z53:
priv->kind = Z53;
+ device_name = "z53";
break;
- default:
+ case USB_PRODUCT_ID_KRAKEN2023:
+ priv->kind = KRAKEN2023;
+ device_name = "kraken2023";
+ break;
+ case USB_PRODUCT_ID_KRAKEN2023_ELITE:
+ priv->kind = KRAKEN2023;
+ device_name = "kraken2023elite";
break;
+ default:
+ ret = -ENODEV;
+ goto fail_and_close;
}
priv->buffer = devm_kzalloc(&hdev->dev, MAX_REPORT_LENGTH, GFP_KERNEL);
@@ -936,8 +955,7 @@ static int kraken3_probe(struct hid_device *hdev, const struct hid_device_id *id
if (ret < 0)
hid_warn(hdev, "fw version request failed with %d\n", ret);
- priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev,
- kraken3_device_names[priv->kind], priv,
+ priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, device_name, priv,
&kraken3_chip_info, kraken3_groups);
if (IS_ERR(priv->hwmon_dev)) {
ret = PTR_ERR(priv->hwmon_dev);
@@ -945,7 +963,7 @@ static int kraken3_probe(struct hid_device *hdev, const struct hid_device_id *id
goto fail_and_close;
}
- kraken3_debugfs_init(priv);
+ kraken3_debugfs_init(priv, device_name);
return 0;
@@ -972,6 +990,8 @@ static const struct hid_device_id kraken3_table[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_NZXT, USB_PRODUCT_ID_X53) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NZXT, USB_PRODUCT_ID_X53_SECOND) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NZXT, USB_PRODUCT_ID_Z53) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NZXT, USB_PRODUCT_ID_KRAKEN2023) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NZXT, USB_PRODUCT_ID_KRAKEN2023_ELITE) },
{ }
};
diff --git a/drivers/hwmon/pcf8591.c b/drivers/hwmon/pcf8591.c
index 66c76b28c9e0..167d2fe4d543 100644
--- a/drivers/hwmon/pcf8591.c
+++ b/drivers/hwmon/pcf8591.c
@@ -285,7 +285,7 @@ static int pcf8591_read_channel(struct device *dev, int channel)
}
static const struct i2c_device_id pcf8591_id[] = {
- { "pcf8591", 0 },
+ { "pcf8591" },
{ }
};
MODULE_DEVICE_TABLE(i2c, pcf8591_id);
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 557ae0c414b0..08e82c457356 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -51,12 +51,22 @@ config SENSORS_ADM1275
tristate "Analog Devices ADM1275 and compatibles"
help
If you say yes here you get hardware monitoring support for Analog
- Devices ADM1075, ADM1272, ADM1275, ADM1276, ADM1278, ADM1293,
- and ADM1294 Hot-Swap Controller and Digital Power Monitors.
+ Devices ADM1075, ADM1272, ADM1275, ADM1276, ADM1278, ADM1281,
+ ADM1293, and ADM1294 Hot-Swap Controller and Digital Power Monitors.
This driver can also be built as a module. If so, the module will
be called adm1275.
+config SENSORS_ADP1050
+ tristate "Analog Devices ADP1050 digital controller for Power Supplies"
+ help
+ If you say yes here you get hardware monitoring support for Analog
+ Devices ADP1050 digital controller for isolated power supply with
+ PMBus interface.
+
+ This driver can also be built as a module. If so, the module will
+ be called adp1050.
+
config SENSORS_BEL_PFE
tristate "Bel PFE Compatible Power Supplies"
help
@@ -511,6 +521,15 @@ config SENSORS_UCD9200
This driver can also be built as a module. If so, the module will
be called ucd9200.
+config SENSORS_XDP710
+ tristate "Infineon XDP710 family"
+ help
+ If you say yes here you get hardware monitoring support for Infineon
+ XDP710.
+
+ This driver can also be built as a module. If so, the module will
+ be called xdp710.
+
config SENSORS_XDPE152
tristate "Infineon XDPE152 family"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index f14ecf03ad77..2279b3327bbf 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o
obj-$(CONFIG_SENSORS_ACBEL_FSG032) += acbel-fsg032.o
obj-$(CONFIG_SENSORS_ADM1266) += adm1266.o
obj-$(CONFIG_SENSORS_ADM1275) += adm1275.o
+obj-$(CONFIG_SENSORS_ADP1050) += adp1050.o
obj-$(CONFIG_SENSORS_BEL_PFE) += bel-pfe.o
obj-$(CONFIG_SENSORS_BPA_RS600) += bpa-rs600.o
obj-$(CONFIG_SENSORS_DELTA_AHE50DC_FAN) += delta-ahe50dc-fan.o
@@ -51,6 +52,7 @@ obj-$(CONFIG_SENSORS_TPS53679) += tps53679.o
obj-$(CONFIG_SENSORS_TPS546D24) += tps546d24.o
obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o
obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o
+obj-$(CONFIG_SENSORS_XDP710) += xdp710.o
obj-$(CONFIG_SENSORS_XDPE122) += xdpe12284.o
obj-$(CONFIG_SENSORS_XDPE152) += xdpe152c4.o
obj-$(CONFIG_SENSORS_ZL6100) += zl6100.o
diff --git a/drivers/hwmon/pmbus/adm1266.c b/drivers/hwmon/pmbus/adm1266.c
index ed0a7b9fae4b..2c4d94cc8729 100644
--- a/drivers/hwmon/pmbus/adm1266.c
+++ b/drivers/hwmon/pmbus/adm1266.c
@@ -490,7 +490,7 @@ static const struct of_device_id adm1266_of_match[] = {
MODULE_DEVICE_TABLE(of, adm1266_of_match);
static const struct i2c_device_id adm1266_id[] = {
- { "adm1266", 0 },
+ { "adm1266" },
{ }
};
MODULE_DEVICE_TABLE(i2c, adm1266_id);
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index e2c61d6fa521..59ffc08289bd 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -18,7 +18,7 @@
#include <linux/log2.h>
#include "pmbus.h"
-enum chips { adm1075, adm1272, adm1275, adm1276, adm1278, adm1293, adm1294 };
+enum chips { adm1075, adm1272, adm1275, adm1276, adm1278, adm1281, adm1293, adm1294 };
#define ADM1275_MFR_STATUS_IOUT_WARN2 BIT(0)
#define ADM1293_MFR_STATUS_VAUX_UV_WARN BIT(5)
@@ -482,6 +482,7 @@ static const struct i2c_device_id adm1275_id[] = {
{ "adm1275", adm1275 },
{ "adm1276", adm1276 },
{ "adm1278", adm1278 },
+ { "adm1281", adm1281 },
{ "adm1293", adm1293 },
{ "adm1294", adm1294 },
{ }
@@ -555,7 +556,8 @@ static int adm1275_probe(struct i2c_client *client)
client->name, mid->name);
if (mid->driver_data == adm1272 || mid->driver_data == adm1278 ||
- mid->driver_data == adm1293 || mid->driver_data == adm1294)
+ mid->driver_data == adm1281 || mid->driver_data == adm1293 ||
+ mid->driver_data == adm1294)
config_read_fn = i2c_smbus_read_word_data;
else
config_read_fn = i2c_smbus_read_byte_data;
@@ -703,6 +705,7 @@ static int adm1275_probe(struct i2c_client *client)
PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
break;
case adm1278:
+ case adm1281:
data->have_vout = true;
data->have_pin_max = true;
data->have_temp_max = true;
diff --git a/drivers/hwmon/pmbus/adp1050.c b/drivers/hwmon/pmbus/adp1050.c
new file mode 100644
index 000000000000..20f22730fc01
--- /dev/null
+++ b/drivers/hwmon/pmbus/adp1050.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hardware monitoring driver for Analog Devices ADP1050
+ *
+ * Copyright (C) 2024 Analog Devices, Inc.
+ */
+#include <linux/bits.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+#include "pmbus.h"
+
+static struct pmbus_driver_info adp1050_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = linear,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_IIN | PMBUS_HAVE_TEMP
+ | PMBUS_HAVE_STATUS_TEMP,
+};
+
+static int adp1050_probe(struct i2c_client *client)
+{
+ return pmbus_do_probe(client, &adp1050_info);
+}
+
+static const struct i2c_device_id adp1050_id[] = {
+ {"adp1050"},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, adp1050_id);
+
+static const struct of_device_id adp1050_of_match[] = {
+ { .compatible = "adi,adp1050"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, adp1050_of_match);
+
+static struct i2c_driver adp1050_driver = {
+ .driver = {
+ .name = "adp1050",
+ .of_match_table = adp1050_of_match,
+ },
+ .probe = adp1050_probe,
+ .id_table = adp1050_id,
+};
+module_i2c_driver(adp1050_driver);
+
+MODULE_AUTHOR("Radu Sabau <radu.sabau@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADP1050 HWMON PMBus Driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/hwmon/pmbus/inspur-ipsps.c b/drivers/hwmon/pmbus/inspur-ipsps.c
index dfeae68b5e52..3e3cc9a0f116 100644
--- a/drivers/hwmon/pmbus/inspur-ipsps.c
+++ b/drivers/hwmon/pmbus/inspur-ipsps.c
@@ -197,7 +197,7 @@ static int ipsps_probe(struct i2c_client *client)
}
static const struct i2c_device_id ipsps_id[] = {
- { "ipsps1", 0 },
+ { "ipsps1" },
{}
};
MODULE_DEVICE_TABLE(i2c, ipsps_id);
diff --git a/drivers/hwmon/pmbus/ir35221.c b/drivers/hwmon/pmbus/ir35221.c
index e3ee5c1bd967..503be59c6c7f 100644
--- a/drivers/hwmon/pmbus/ir35221.c
+++ b/drivers/hwmon/pmbus/ir35221.c
@@ -126,7 +126,7 @@ static int ir35221_probe(struct i2c_client *client)
}
static const struct i2c_device_id ir35221_id[] = {
- {"ir35221", 0},
+ {"ir35221"},
{}
};
diff --git a/drivers/hwmon/pmbus/ir36021.c b/drivers/hwmon/pmbus/ir36021.c
index a263afeb8ac1..5148c9187c9e 100644
--- a/drivers/hwmon/pmbus/ir36021.c
+++ b/drivers/hwmon/pmbus/ir36021.c
@@ -51,7 +51,7 @@ static int ir36021_probe(struct i2c_client *client)
}
static const struct i2c_device_id ir36021_id[] = {
- { "ir36021", 0 },
+ { "ir36021" },
{},
};
MODULE_DEVICE_TABLE(i2c, ir36021_id);
diff --git a/drivers/hwmon/pmbus/ir38064.c b/drivers/hwmon/pmbus/ir38064.c
index 69e18cb468f6..d4bcc9c39774 100644
--- a/drivers/hwmon/pmbus/ir38064.c
+++ b/drivers/hwmon/pmbus/ir38064.c
@@ -53,10 +53,10 @@ static int ir38064_probe(struct i2c_client *client)
}
static const struct i2c_device_id ir38064_id[] = {
- {"ir38060", 0},
- {"ir38064", 0},
- {"ir38164", 0},
- {"ir38263", 0},
+ {"ir38060"},
+ {"ir38064"},
+ {"ir38164"},
+ {"ir38263"},
{}
};
diff --git a/drivers/hwmon/pmbus/irps5401.c b/drivers/hwmon/pmbus/irps5401.c
index 146d32a35a7c..f0bdf55c95bf 100644
--- a/drivers/hwmon/pmbus/irps5401.c
+++ b/drivers/hwmon/pmbus/irps5401.c
@@ -44,7 +44,7 @@ static int irps5401_probe(struct i2c_client *client)
}
static const struct i2c_device_id irps5401_id[] = {
- {"irps5401", 0},
+ {"irps5401"},
{}
};
diff --git a/drivers/hwmon/pmbus/lt7182s.c b/drivers/hwmon/pmbus/lt7182s.c
index 28afc5f15ae8..aebd97af2741 100644
--- a/drivers/hwmon/pmbus/lt7182s.c
+++ b/drivers/hwmon/pmbus/lt7182s.c
@@ -168,7 +168,7 @@ static int lt7182s_probe(struct i2c_client *client)
}
static const struct i2c_device_id lt7182s_id[] = {
- { "lt7182s", 0 },
+ { "lt7182s" },
{}
};
MODULE_DEVICE_TABLE(i2c, lt7182s_id);
diff --git a/drivers/hwmon/pmbus/ltc3815.c b/drivers/hwmon/pmbus/ltc3815.c
index f2023b17aa8d..f58a8cedb0d7 100644
--- a/drivers/hwmon/pmbus/ltc3815.c
+++ b/drivers/hwmon/pmbus/ltc3815.c
@@ -143,7 +143,7 @@ static int ltc3815_write_word_data(struct i2c_client *client, int page,
}
static const struct i2c_device_id ltc3815_id[] = {
- {"ltc3815", 0},
+ {"ltc3815"},
{ }
};
MODULE_DEVICE_TABLE(i2c, ltc3815_id);
diff --git a/drivers/hwmon/pmbus/max15301.c b/drivers/hwmon/pmbus/max15301.c
index 2cfaa62aedd6..986404fe6a31 100644
--- a/drivers/hwmon/pmbus/max15301.c
+++ b/drivers/hwmon/pmbus/max15301.c
@@ -23,8 +23,8 @@
#include "pmbus.h"
static const struct i2c_device_id max15301_id[] = {
- {"bmr461", 0},
- {"max15301", 0},
+ { "bmr461" },
+ { "max15301" },
{}
};
MODULE_DEVICE_TABLE(i2c, max15301_id);
diff --git a/drivers/hwmon/pmbus/max16064.c b/drivers/hwmon/pmbus/max16064.c
index a573a0ab9e48..98e2b5dd5841 100644
--- a/drivers/hwmon/pmbus/max16064.c
+++ b/drivers/hwmon/pmbus/max16064.c
@@ -91,7 +91,7 @@ static int max16064_probe(struct i2c_client *client)
}
static const struct i2c_device_id max16064_id[] = {
- {"max16064", 0},
+ {"max16064"},
{}
};
diff --git a/drivers/hwmon/pmbus/max20751.c b/drivers/hwmon/pmbus/max20751.c
index 6ebd71cd081b..8f23c1eb559e 100644
--- a/drivers/hwmon/pmbus/max20751.c
+++ b/drivers/hwmon/pmbus/max20751.c
@@ -32,7 +32,7 @@ static int max20751_probe(struct i2c_client *client)
}
static const struct i2c_device_id max20751_id[] = {
- {"max20751", 0},
+ {"max20751"},
{}
};
diff --git a/drivers/hwmon/pmbus/max31785.c b/drivers/hwmon/pmbus/max31785.c
index 5d13bbfc8f47..09218dba8965 100644
--- a/drivers/hwmon/pmbus/max31785.c
+++ b/drivers/hwmon/pmbus/max31785.c
@@ -518,9 +518,9 @@ static int max31785_probe(struct i2c_client *client)
}
static const struct i2c_device_id max31785_id[] = {
- { "max31785", 0 },
- { "max31785a", 0 },
- { "max31785b", 0 },
+ { "max31785" },
+ { "max31785a" },
+ { "max31785b" },
{ },
};
diff --git a/drivers/hwmon/pmbus/max8688.c b/drivers/hwmon/pmbus/max8688.c
index ae8573fdf5ba..5d5b6aeefa80 100644
--- a/drivers/hwmon/pmbus/max8688.c
+++ b/drivers/hwmon/pmbus/max8688.c
@@ -171,7 +171,7 @@ static int max8688_probe(struct i2c_client *client)
}
static const struct i2c_device_id max8688_id[] = {
- {"max8688", 0},
+ {"max8688"},
{ }
};
diff --git a/drivers/hwmon/pmbus/mp2888.c b/drivers/hwmon/pmbus/mp2888.c
index 50662ed8e3d5..3b45f126b611 100644
--- a/drivers/hwmon/pmbus/mp2888.c
+++ b/drivers/hwmon/pmbus/mp2888.c
@@ -378,7 +378,7 @@ static int mp2888_probe(struct i2c_client *client)
}
static const struct i2c_device_id mp2888_id[] = {
- {"mp2888", 0},
+ {"mp2888"},
{}
};
diff --git a/drivers/hwmon/pmbus/mp2975.c b/drivers/hwmon/pmbus/mp2975.c
index e5fa10b3b8bc..280bb12f762c 100644
--- a/drivers/hwmon/pmbus/mp2975.c
+++ b/drivers/hwmon/pmbus/mp2975.c
@@ -5,12 +5,14 @@
* Copyright (C) 2020 Nvidia Technologies Ltd.
*/
+#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+
#include "pmbus.h"
/* Vendor specific registers. */
@@ -97,6 +99,11 @@ static const int mp2975_max_phases[][MP2975_PAGE_NUM] = {
[mp2971] = { MP2971_MAX_PHASE_RAIL1, MP2971_MAX_PHASE_RAIL2 },
};
+struct mp2975_driver_info {
+ const struct pmbus_driver_info *info;
+ enum chips chip_id;
+};
+
struct mp2975_data {
struct pmbus_driver_info info;
enum chips chip_id;
@@ -110,15 +117,6 @@ struct mp2975_data {
int curr_sense_gain[MP2975_PAGE_NUM];
};
-static const struct i2c_device_id mp2975_id[] = {
- {"mp2971", mp2971},
- {"mp2973", mp2973},
- {"mp2975", mp2975},
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, mp2975_id);
-
static const struct regulator_desc __maybe_unused mp2975_reg_desc[] = {
PMBUS_REGULATOR("vout", 0),
PMBUS_REGULATOR("vout", 1),
@@ -392,6 +390,80 @@ static int mp2973_read_word_data(struct i2c_client *client, int page,
return ret;
}
+static int mp2973_write_word_data(struct i2c_client *client, int page,
+ int reg, u16 word)
+{
+ u8 target, mask;
+ long ret;
+
+ if (reg != PMBUS_SMBALERT_MASK)
+ return -ENODATA;
+
+ /*
+ * Vendor-specific SMBALERT_MASK register with 16 maskable bits.
+ */
+ ret = pmbus_read_word_data(client, 0, 0, PMBUS_SMBALERT_MASK);
+ if (ret < 0)
+ return ret;
+
+ target = word & 0xff;
+ mask = word >> 8;
+
+/*
+ * Set/Clear 'bit' in 'ret' based on condition followed by define for each bit in SMBALERT_MASK.
+ * Also bit 2 & 15 are reserved.
+ */
+
+#define MP2973_TEMP_OT 0
+#define MP2973_VIN_UVLO 1
+#define MP2973_VIN_OVP 3
+#define MP2973_MTP_FAULT 4
+#define MP2973_OTHER_COMM 5
+#define MP2973_MTP_BLK_TRIG 6
+#define MP2973_PACKET_ERROR 7
+#define MP2973_INVALID_DATA 8
+#define MP2973_INVALID_COMMAND 9
+#define MP2973_IOUT_OC_LV 10
+#define MP2973_IOUT_OC 11
+#define MP2973_VOUT_MAX_MIN_WARNING 12
+#define MP2973_VOLTAGE_UV 13
+#define MP2973_VOLTAGE_OV 14
+
+ switch (target) {
+ case PMBUS_STATUS_CML:
+ __assign_bit(MP2973_INVALID_DATA, &ret, !(mask & PB_CML_FAULT_INVALID_DATA));
+ __assign_bit(MP2973_INVALID_COMMAND, &ret, !(mask & PB_CML_FAULT_INVALID_COMMAND));
+ __assign_bit(MP2973_OTHER_COMM, &ret, !(mask & PB_CML_FAULT_OTHER_COMM));
+ __assign_bit(MP2973_PACKET_ERROR, &ret, !(mask & PB_CML_FAULT_PACKET_ERROR));
+ break;
+ case PMBUS_STATUS_VOUT:
+ __assign_bit(MP2973_VOLTAGE_UV, &ret, !(mask & PB_VOLTAGE_UV_FAULT));
+ __assign_bit(MP2973_VOLTAGE_OV, &ret, !(mask & PB_VOLTAGE_OV_FAULT));
+ break;
+ case PMBUS_STATUS_IOUT:
+ __assign_bit(MP2973_IOUT_OC, &ret, !(mask & PB_IOUT_OC_FAULT));
+ __assign_bit(MP2973_IOUT_OC_LV, &ret, !(mask & PB_IOUT_OC_LV_FAULT));
+ break;
+ case PMBUS_STATUS_TEMPERATURE:
+ __assign_bit(MP2973_TEMP_OT, &ret, !(mask & PB_TEMP_OT_FAULT));
+ break;
+ /*
+ * Map remaining bits to MFR specific to let the PMBUS core mask
+ * those bits by default.
+ */
+ case PMBUS_STATUS_MFR_SPECIFIC:
+ __assign_bit(MP2973_VIN_UVLO, &ret, !(mask & BIT(1)));
+ __assign_bit(MP2973_VIN_OVP, &ret, !(mask & BIT(3)));
+ __assign_bit(MP2973_MTP_FAULT, &ret, !(mask & BIT(4)));
+ __assign_bit(MP2973_MTP_BLK_TRIG, &ret, !(mask & BIT(6)));
+ break;
+ default:
+ return 0;
+ }
+
+ return pmbus_write_word_data(client, 0, PMBUS_SMBALERT_MASK, ret);
+}
+
static int mp2975_read_word_data(struct i2c_client *client, int page,
int phase, int reg)
{
@@ -867,7 +939,7 @@ mp2975_vout_per_rail_config_get(struct i2c_client *client,
return 0;
}
-static struct pmbus_driver_info mp2975_info = {
+static const struct pmbus_driver_info mp2975_info = {
.pages = 1,
.format[PSC_VOLTAGE_IN] = linear,
.format[PSC_VOLTAGE_OUT] = direct,
@@ -892,7 +964,7 @@ static struct pmbus_driver_info mp2975_info = {
#endif
};
-static struct pmbus_driver_info mp2973_info = {
+static const struct pmbus_driver_info mp2973_info = {
.pages = 1,
.format[PSC_VOLTAGE_IN] = linear,
.format[PSC_VOLTAGE_OUT] = direct,
@@ -907,35 +979,41 @@ static struct pmbus_driver_info mp2973_info = {
PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | PMBUS_HAVE_POUT |
PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT,
.read_word_data = mp2973_read_word_data,
+ .write_word_data = mp2973_write_word_data,
#if IS_ENABLED(CONFIG_SENSORS_MP2975_REGULATOR)
.num_regulators = 1,
.reg_desc = mp2975_reg_desc,
#endif
};
+static const struct mp2975_driver_info mp2975_ddinfo[] = {
+ [mp2975] = { .info = &mp2975_info, .chip_id = mp2975 },
+ [mp2973] = { .info = &mp2973_info, .chip_id = mp2973 },
+ [mp2971] = { .info = &mp2973_info, .chip_id = mp2971 },
+};
+
static int mp2975_probe(struct i2c_client *client)
{
+ const struct mp2975_driver_info *ddinfo;
struct pmbus_driver_info *info;
struct mp2975_data *data;
int ret;
+ ddinfo = i2c_get_match_data(client);
+ if (!ddinfo)
+ return -ENODEV;
+
data = devm_kzalloc(&client->dev, sizeof(struct mp2975_data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
- if (client->dev.of_node)
- data->chip_id = (enum chips)(unsigned long)of_device_get_match_data(&client->dev);
- else
- data->chip_id = i2c_match_id(mp2975_id, client)->driver_data;
+ data->chip_id = ddinfo->chip_id;
memcpy(data->max_phases, mp2975_max_phases[data->chip_id],
sizeof(data->max_phases));
- if (data->chip_id == mp2975)
- memcpy(&data->info, &mp2975_info, sizeof(*info));
- else
- memcpy(&data->info, &mp2973_info, sizeof(*info));
+ memcpy(&data->info, ddinfo->info, sizeof(data->info));
info = &data->info;
@@ -993,18 +1071,26 @@ static int mp2975_probe(struct i2c_client *client)
return pmbus_do_probe(client, info);
}
-static const struct of_device_id __maybe_unused mp2975_of_match[] = {
- {.compatible = "mps,mp2971", .data = (void *)mp2971},
- {.compatible = "mps,mp2973", .data = (void *)mp2973},
- {.compatible = "mps,mp2975", .data = (void *)mp2975},
+static const struct of_device_id mp2975_of_match[] = {
+ {.compatible = "mps,mp2971", .data = &mp2975_ddinfo[mp2971]},
+ {.compatible = "mps,mp2973", .data = &mp2975_ddinfo[mp2973]},
+ {.compatible = "mps,mp2975", .data = &mp2975_ddinfo[mp2975]},
{}
};
MODULE_DEVICE_TABLE(of, mp2975_of_match);
+static const struct i2c_device_id mp2975_id[] = {
+ {"mp2971", (kernel_ulong_t)&mp2975_ddinfo[mp2971]},
+ {"mp2973", (kernel_ulong_t)&mp2975_ddinfo[mp2973]},
+ {"mp2975", (kernel_ulong_t)&mp2975_ddinfo[mp2975]},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mp2975_id);
+
static struct i2c_driver mp2975_driver = {
.driver = {
.name = "mp2975",
- .of_match_table = of_match_ptr(mp2975_of_match),
+ .of_match_table = mp2975_of_match,
},
.probe = mp2975_probe,
.id_table = mp2975_id,
diff --git a/drivers/hwmon/pmbus/mp5990.c b/drivers/hwmon/pmbus/mp5990.c
index 1dfbab25a064..5d1d5eac89da 100644
--- a/drivers/hwmon/pmbus/mp5990.c
+++ b/drivers/hwmon/pmbus/mp5990.c
@@ -158,7 +158,7 @@ static const struct of_device_id mp5990_of_match[] = {
};
static const struct i2c_device_id mp5990_id[] = {
- {"mp5990", 0},
+ {"mp5990"},
{ }
};
MODULE_DEVICE_TABLE(i2c, mp5990_id);
diff --git a/drivers/hwmon/pmbus/mpq8785.c b/drivers/hwmon/pmbus/mpq8785.c
index 4e2549cc8120..7f87e117b49d 100644
--- a/drivers/hwmon/pmbus/mpq8785.c
+++ b/drivers/hwmon/pmbus/mpq8785.c
@@ -62,7 +62,7 @@ static int mpq8785_probe(struct i2c_client *client)
};
static const struct i2c_device_id mpq8785_id[] = {
- { "mpq8785", 0 },
+ { "mpq8785" },
{ },
};
MODULE_DEVICE_TABLE(i2c, mpq8785_id);
diff --git a/drivers/hwmon/pmbus/pli1209bc.c b/drivers/hwmon/pmbus/pli1209bc.c
index c95433790b11..2c6c9ec2a652 100644
--- a/drivers/hwmon/pmbus/pli1209bc.c
+++ b/drivers/hwmon/pmbus/pli1209bc.c
@@ -141,7 +141,7 @@ static int pli1209bc_probe(struct i2c_client *client)
}
static const struct i2c_device_id pli1209bc_id[] = {
- {"pli1209bc", 0},
+ {"pli1209bc"},
{}
};
diff --git a/drivers/hwmon/pmbus/pm6764tr.c b/drivers/hwmon/pmbus/pm6764tr.c
index 2a16504c85b7..23f15b608dcf 100644
--- a/drivers/hwmon/pmbus/pm6764tr.c
+++ b/drivers/hwmon/pmbus/pm6764tr.c
@@ -48,7 +48,7 @@ static int pm6764tr_probe(struct i2c_client *client)
}
static const struct i2c_device_id pm6764tr_id[] = {
- {"pm6764tr", 0},
+ {"pm6764tr"},
{}
};
MODULE_DEVICE_TABLE(i2c, pm6764tr_id);
diff --git a/drivers/hwmon/pmbus/pxe1610.c b/drivers/hwmon/pmbus/pxe1610.c
index e2790a682dc8..5ac476d3cdd2 100644
--- a/drivers/hwmon/pmbus/pxe1610.c
+++ b/drivers/hwmon/pmbus/pxe1610.c
@@ -127,9 +127,9 @@ static int pxe1610_probe(struct i2c_client *client)
}
static const struct i2c_device_id pxe1610_id[] = {
- {"pxe1610", 0},
- {"pxe1110", 0},
- {"pxm1310", 0},
+ {"pxe1610"},
+ {"pxe1110"},
+ {"pxm1310"},
{}
};
diff --git a/drivers/hwmon/pmbus/stpddc60.c b/drivers/hwmon/pmbus/stpddc60.c
index be9076dcc2db..34d0f06f4845 100644
--- a/drivers/hwmon/pmbus/stpddc60.c
+++ b/drivers/hwmon/pmbus/stpddc60.c
@@ -18,8 +18,8 @@
#define STPDDC60_MFR_UV_LIMIT_OFFSET 0xe6
static const struct i2c_device_id stpddc60_id[] = {
- {"stpddc60", 0},
- {"bmr481", 0},
+ {"stpddc60"},
+ {"bmr481"},
{}
};
MODULE_DEVICE_TABLE(i2c, stpddc60_id);
diff --git a/drivers/hwmon/pmbus/tda38640.c b/drivers/hwmon/pmbus/tda38640.c
index c31889a036f0..044d5fbdf9eb 100644
--- a/drivers/hwmon/pmbus/tda38640.c
+++ b/drivers/hwmon/pmbus/tda38640.c
@@ -195,7 +195,7 @@ static int tda38640_probe(struct i2c_client *client)
}
static const struct i2c_device_id tda38640_id[] = {
- {"tda38640", 0},
+ {"tda38640"},
{}
};
MODULE_DEVICE_TABLE(i2c, tda38640_id);
diff --git a/drivers/hwmon/pmbus/tps40422.c b/drivers/hwmon/pmbus/tps40422.c
index ea0074a6b9fc..d99b9850ea36 100644
--- a/drivers/hwmon/pmbus/tps40422.c
+++ b/drivers/hwmon/pmbus/tps40422.c
@@ -31,7 +31,7 @@ static int tps40422_probe(struct i2c_client *client)
}
static const struct i2c_device_id tps40422_id[] = {
- {"tps40422", 0},
+ {"tps40422"},
{}
};
diff --git a/drivers/hwmon/pmbus/tps546d24.c b/drivers/hwmon/pmbus/tps546d24.c
index 69bbdb6c680b..520ca37269f7 100644
--- a/drivers/hwmon/pmbus/tps546d24.c
+++ b/drivers/hwmon/pmbus/tps546d24.c
@@ -42,7 +42,7 @@ static int tps546d24_probe(struct i2c_client *client)
}
static const struct i2c_device_id tps546d24_id[] = {
- {"tps546d24", 0},
+ {"tps546d24"},
{}
};
MODULE_DEVICE_TABLE(i2c, tps546d24_id);
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index 8d9d422450e5..d817c719b90b 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -80,11 +80,11 @@ struct ucd9000_debugfs_entry {
* It has been observed that the UCD90320 randomly fails register access when
* doing another access right on the back of a register write. To mitigate this
* make sure that there is a minimum delay between a write access and the
- * following access. The 250us is based on experimental data. At a delay of
- * 200us the issue seems to go away. Add a bit of extra margin to allow for
+ * following access. The 500 is based on experimental data. At a delay of
+ * 350us the issue seems to go away. Add a bit of extra margin to allow for
* system to system differences.
*/
-#define UCD90320_WAIT_DELAY_US 250
+#define UCD90320_WAIT_DELAY_US 500
static inline void ucd90320_wait(const struct ucd9000_data *data)
{
diff --git a/drivers/hwmon/pmbus/xdp710.c b/drivers/hwmon/pmbus/xdp710.c
new file mode 100644
index 000000000000..dd107e83f612
--- /dev/null
+++ b/drivers/hwmon/pmbus/xdp710.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Infineon XDP710 Hot-Swap Controller
+ */
+
+#include <linux/bitops.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include "pmbus.h"
+
+#define XDP710_REG_CFG 0xD3
+#define XDP710_V_SNS_CFG 0xD4
+#define XDP710_CS_RNG 0xD5
+
+/*
+ * The table to map configuration register values
+ * to sense resistor values
+ */
+static const int micro_ohm_rsense[] = {
+ 200, 250, 300, 330, 400, 470, 500, 600,
+ 670, 700, 750, 800, 900, 1000, 1100, 1200,
+ 1250, 1300, 1400, 1500, 1600, 1700, 1800, 1900,
+ 2000, 2100, 2200, 2300, 2400, 2500, 2600, 2700,
+ 2800, 3000, 3100, 3200, 3300, 3400, 3500, 3600,
+ 3700, 3800, 3900, 4000, 4100, 4200, 4300, 4400,
+ 4500, 4600, 4700, 4800, 4900, 5000, 5500, 6000,
+ 6500, 7000, 7500, 8000, 8500, 9000, 9500, 10000
+};
+
+static struct pmbus_driver_info xdp710_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_POWER] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_VOLTAGE_IN] = 4653,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = -2,
+ .m[PSC_VOLTAGE_OUT] = 4653,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = -2,
+ .m[PSC_CURRENT_OUT] = 23165,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = -2,
+ .m[PSC_POWER] = 4211,
+ .b[PSC_POWER] = 0,
+ .R[PSC_POWER] = -2,
+ .m[PSC_TEMPERATURE] = 52,
+ .b[PSC_TEMPERATURE] = 14321,
+ .R[PSC_TEMPERATURE] = -1,
+ .func[0] =
+ PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_PIN |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP,
+};
+
+static int xdp710_probe(struct i2c_client *client)
+{
+ struct pmbus_driver_info *info;
+ u8 cs_rng;
+ u8 vtlm_rng;
+ int rsense;
+ int ret;
+ int m = 0;
+
+ info = devm_kmemdup(&client->dev, &xdp710_info, sizeof(*info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ret = i2c_smbus_read_word_data(client, XDP710_CS_RNG);
+ if (ret < 0) {
+ dev_err(&client->dev, "Can't get CS_RNG");
+ return ret;
+ }
+ cs_rng = (ret >> 6) & GENMASK(1, 0);
+
+ ret = i2c_smbus_read_word_data(client, XDP710_V_SNS_CFG);
+ if (ret < 0) {
+ dev_err(&client->dev, "Can't get V_SNS_CFG");
+ return ret;
+ }
+ vtlm_rng = ret & GENMASK(1, 0);
+
+ ret = i2c_smbus_read_word_data(client, XDP710_REG_CFG);
+ if (ret < 0) {
+ dev_err(&client->dev, "Can't get REG_CFG");
+ return ret;
+ }
+ ret &= GENMASK(5, 0);
+ rsense = micro_ohm_rsense[ret];
+
+ info->m[PSC_VOLTAGE_IN] <<= vtlm_rng;
+ info->m[PSC_VOLTAGE_OUT] <<= vtlm_rng;
+
+ m = info->m[PSC_CURRENT_OUT];
+ info->m[PSC_CURRENT_OUT] = DIV_ROUND_CLOSEST(m * rsense >> cs_rng, 1000);
+
+ m = info->m[PSC_POWER];
+ info->m[PSC_POWER] = DIV_ROUND_CLOSEST(m * rsense >> cs_rng, 1000);
+
+ return pmbus_do_probe(client, info);
+}
+
+static const struct of_device_id xdp710_of_match[] = {
+ { .compatible = "infineon,xdp710" },
+ {}
+};
+
+static const struct i2c_device_id xdp710_id[] = {
+ {"xdp710"},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, xdp710_id);
+
+static struct i2c_driver xdp710_driver = {
+ .driver = {
+ .name = "xdp710",
+ .of_match_table = xdp710_of_match,
+ },
+ .probe = xdp710_probe,
+ .id_table = xdp710_id,
+};
+module_i2c_driver(xdp710_driver);
+
+MODULE_AUTHOR("Peter Yin <peter.yin@quantatw.com>");
+MODULE_DESCRIPTION("PMBus driver for XDP710 HSC");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/hwmon/pmbus/xdpe12284.c b/drivers/hwmon/pmbus/xdpe12284.c
index 37d08dd427d5..facb1201aa43 100644
--- a/drivers/hwmon/pmbus/xdpe12284.c
+++ b/drivers/hwmon/pmbus/xdpe12284.c
@@ -164,9 +164,9 @@ static int xdpe122_probe(struct i2c_client *client)
}
static const struct i2c_device_id xdpe122_id[] = {
- {"xdpe11280", 0},
- {"xdpe12254", 0},
- {"xdpe12284", 0},
+ {"xdpe11280"},
+ {"xdpe12254"},
+ {"xdpe12284"},
{}
};
diff --git a/drivers/hwmon/pmbus/xdpe152c4.c b/drivers/hwmon/pmbus/xdpe152c4.c
index 235e6b41ae4c..7f3b31d4f033 100644
--- a/drivers/hwmon/pmbus/xdpe152c4.c
+++ b/drivers/hwmon/pmbus/xdpe152c4.c
@@ -44,8 +44,8 @@ static int xdpe152_probe(struct i2c_client *client)
}
static const struct i2c_device_id xdpe152_id[] = {
- {"xdpe152c4", 0},
- {"xdpe15284", 0},
+ {"xdpe152c4"},
+ {"xdpe15284"},
{}
};
diff --git a/drivers/hwmon/pt5161l.c b/drivers/hwmon/pt5161l.c
index 60361e39c474..b0d58a26d499 100644
--- a/drivers/hwmon/pt5161l.c
+++ b/drivers/hwmon/pt5161l.c
@@ -630,7 +630,7 @@ static const struct acpi_device_id __maybe_unused pt5161l_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, pt5161l_acpi_match);
static const struct i2c_device_id pt5161l_id[] = {
- { "pt5161l", 0 },
+ { "pt5161l" },
{}
};
MODULE_DEVICE_TABLE(i2c, pt5161l_id);
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index b67bc9e833c0..a1712649b07e 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -9,10 +9,11 @@
#include <linux/hwmon.h>
#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/pwm.h>
#include <linux/regulator/consumer.h>
#include <linux/sysfs.h>
@@ -25,7 +26,6 @@ struct pwm_fan_tach {
int irq;
atomic_t pulses;
unsigned int rpm;
- u8 pulses_per_revolution;
};
enum pwm_fan_enable_mode {
@@ -48,6 +48,7 @@ struct pwm_fan_ctx {
int tach_count;
struct pwm_fan_tach *tachs;
+ u32 *pulses_per_revolution;
ktime_t sample_start;
struct timer_list rpm_timer;
@@ -85,7 +86,7 @@ static void sample_timer(struct timer_list *t)
pulses = atomic_read(&tach->pulses);
atomic_sub(pulses, &tach->pulses);
tach->rpm = (unsigned int)(pulses * 1000 * 60) /
- (tach->pulses_per_revolution * delta);
+ (ctx->pulses_per_revolution[i] * delta);
}
ctx->sample_start = ktime_get();
@@ -421,16 +422,14 @@ static const struct thermal_cooling_device_ops pwm_fan_cooling_ops = {
.set_cur_state = pwm_fan_set_cur_state,
};
-static int pwm_fan_of_get_cooling_data(struct device *dev,
- struct pwm_fan_ctx *ctx)
+static int pwm_fan_get_cooling_data(struct device *dev, struct pwm_fan_ctx *ctx)
{
- struct device_node *np = dev->of_node;
int num, i, ret;
- if (!of_property_present(np, "cooling-levels"))
+ if (!device_property_present(dev, "cooling-levels"))
return 0;
- ret = of_property_count_u32_elems(np, "cooling-levels");
+ ret = device_property_count_u32(dev, "cooling-levels");
if (ret <= 0) {
dev_err(dev, "Wrong data!\n");
return ret ? : -EINVAL;
@@ -442,8 +441,8 @@ static int pwm_fan_of_get_cooling_data(struct device *dev,
if (!ctx->pwm_fan_cooling_levels)
return -ENOMEM;
- ret = of_property_read_u32_array(np, "cooling-levels",
- ctx->pwm_fan_cooling_levels, num);
+ ret = device_property_read_u32_array(dev, "cooling-levels",
+ ctx->pwm_fan_cooling_levels, num);
if (ret) {
dev_err(dev, "Property 'cooling-levels' cannot be read!\n");
return ret;
@@ -562,6 +561,20 @@ static int pwm_fan_probe(struct platform_device *pdev)
if (!fan_channel_config)
return -ENOMEM;
ctx->fan_channel.config = fan_channel_config;
+
+ ctx->pulses_per_revolution = devm_kmalloc_array(dev,
+ ctx->tach_count,
+ sizeof(*ctx->pulses_per_revolution),
+ GFP_KERNEL);
+ if (!ctx->pulses_per_revolution)
+ return -ENOMEM;
+
+ /* Setup default pulses per revolution */
+ for (i = 0; i < ctx->tach_count; i++)
+ ctx->pulses_per_revolution[i] = 2;
+
+ device_property_read_u32_array(dev, "pulses-per-revolution",
+ ctx->pulses_per_revolution, ctx->tach_count);
}
channels = devm_kcalloc(dev, channel_count + 1,
@@ -573,7 +586,6 @@ static int pwm_fan_probe(struct platform_device *pdev)
for (i = 0; i < ctx->tach_count; i++) {
struct pwm_fan_tach *tach = &ctx->tachs[i];
- u32 ppr = 2;
tach->irq = platform_get_irq(pdev, i);
if (tach->irq == -EPROBE_DEFER)
@@ -589,12 +601,7 @@ static int pwm_fan_probe(struct platform_device *pdev)
}
}
- of_property_read_u32_index(dev->of_node,
- "pulses-per-revolution",
- i,
- &ppr);
- tach->pulses_per_revolution = ppr;
- if (!tach->pulses_per_revolution) {
+ if (!ctx->pulses_per_revolution[i]) {
dev_err(dev, "pulses-per-revolution can't be zero.\n");
return -EINVAL;
}
@@ -602,7 +609,7 @@ static int pwm_fan_probe(struct platform_device *pdev)
fan_channel_config[i] = HWMON_F_INPUT;
dev_dbg(dev, "tach%d: irq=%d, pulses_per_revolution=%d\n",
- i, tach->irq, tach->pulses_per_revolution);
+ i, tach->irq, ctx->pulses_per_revolution[i]);
}
if (ctx->tach_count > 0) {
@@ -622,7 +629,7 @@ static int pwm_fan_probe(struct platform_device *pdev)
return PTR_ERR(hwmon);
}
- ret = pwm_fan_of_get_cooling_data(dev, ctx);
+ ret = pwm_fan_get_cooling_data(dev, ctx);
if (ret)
return ret;
diff --git a/drivers/hwmon/sbrmi.c b/drivers/hwmon/sbrmi.c
index 4318f5121145..d48d8e5460ff 100644
--- a/drivers/hwmon/sbrmi.c
+++ b/drivers/hwmon/sbrmi.c
@@ -328,7 +328,7 @@ static int sbrmi_probe(struct i2c_client *client)
}
static const struct i2c_device_id sbrmi_id[] = {
- {"sbrmi", 0},
+ {"sbrmi"},
{}
};
MODULE_DEVICE_TABLE(i2c, sbrmi_id);
diff --git a/drivers/hwmon/sbtsi_temp.c b/drivers/hwmon/sbtsi_temp.c
index a4181acb1aa6..3c839f56c460 100644
--- a/drivers/hwmon/sbtsi_temp.c
+++ b/drivers/hwmon/sbtsi_temp.c
@@ -218,7 +218,7 @@ static int sbtsi_probe(struct i2c_client *client)
}
static const struct i2c_device_id sbtsi_id[] = {
- {"sbtsi", 0},
+ {"sbtsi"},
{}
};
MODULE_DEVICE_TABLE(i2c, sbtsi_id);
diff --git a/drivers/hwmon/sht21.c b/drivers/hwmon/sht21.c
index 55c179475208..ad1b827ea782 100644
--- a/drivers/hwmon/sht21.c
+++ b/drivers/hwmon/sht21.c
@@ -278,7 +278,7 @@ static int sht21_probe(struct i2c_client *client)
/* Device ID table */
static const struct i2c_device_id sht21_id[] = {
- { "sht21", 0 },
+ { "sht21" },
{ }
};
MODULE_DEVICE_TABLE(i2c, sht21_id);
diff --git a/drivers/hwmon/sht4x.c b/drivers/hwmon/sht4x.c
index 4883755d4b1e..b8916d2735b5 100644
--- a/drivers/hwmon/sht4x.c
+++ b/drivers/hwmon/sht4x.c
@@ -276,7 +276,7 @@ static int sht4x_probe(struct i2c_client *client)
}
static const struct i2c_device_id sht4x_id[] = {
- { "sht4x", 0 },
+ { "sht4x" },
{ },
};
MODULE_DEVICE_TABLE(i2c, sht4x_id);
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index d20800a1f02b..21103af4e139 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -618,7 +618,7 @@ static int smsc47m192_probe(struct i2c_client *client)
}
static const struct i2c_device_id smsc47m192_id[] = {
- { "smsc47m192", 0 },
+ { "smsc47m192" },
{ }
};
MODULE_DEVICE_TABLE(i2c, smsc47m192_id);
diff --git a/drivers/hwmon/stts751.c b/drivers/hwmon/stts751.c
index 847c99376930..e7632081a1d1 100644
--- a/drivers/hwmon/stts751.c
+++ b/drivers/hwmon/stts751.c
@@ -72,7 +72,7 @@ static const int stts751_intervals[] = {
};
static const struct i2c_device_id stts751_id[] = {
- { "stts751", 0 },
+ { "stts751" },
{ }
};
@@ -91,7 +91,6 @@ struct stts751_priv {
int event_max, event_min;
int therm;
int hyst;
- bool smbus_timeout;
int temp;
unsigned long last_update, last_alert_update;
u8 config;
diff --git a/drivers/hwmon/tc654.c b/drivers/hwmon/tc654.c
index 42a9658f1bc2..39fe5836f237 100644
--- a/drivers/hwmon/tc654.c
+++ b/drivers/hwmon/tc654.c
@@ -550,8 +550,8 @@ static int tc654_probe(struct i2c_client *client)
}
static const struct i2c_device_id tc654_id[] = {
- {"tc654", 0},
- {"tc655", 0},
+ {"tc654"},
+ {"tc655"},
{}
};
diff --git a/drivers/hwmon/tc74.c b/drivers/hwmon/tc74.c
index 03950670bd78..9984373a25fb 100644
--- a/drivers/hwmon/tc74.c
+++ b/drivers/hwmon/tc74.c
@@ -151,7 +151,7 @@ static int tc74_probe(struct i2c_client *client)
}
static const struct i2c_device_id tc74_id[] = {
- { "tc74", 0 },
+ { "tc74" },
{}
};
MODULE_DEVICE_TABLE(i2c, tc74_id);
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 2506c78590af..8af44a33055f 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -286,7 +286,7 @@ static int tmp102_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(tmp102_dev_pm_ops, tmp102_suspend, tmp102_resume);
static const struct i2c_device_id tmp102_id[] = {
- { "tmp102", 0 },
+ { "tmp102" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tmp102_id);
diff --git a/drivers/hwmon/tmp103.c b/drivers/hwmon/tmp103.c
index a84c29a3a765..f271a03e05ae 100644
--- a/drivers/hwmon/tmp103.c
+++ b/drivers/hwmon/tmp103.c
@@ -197,7 +197,7 @@ static int tmp103_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(tmp103_dev_pm_ops, tmp103_suspend, tmp103_resume);
static const struct i2c_device_id tmp103_id[] = {
- { "tmp103", 0 },
+ { "tmp103" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tmp103_id);
diff --git a/drivers/hwmon/tmp108.c b/drivers/hwmon/tmp108.c
index d7a09ab2bc11..a82bbc959eb1 100644
--- a/drivers/hwmon/tmp108.c
+++ b/drivers/hwmon/tmp108.c
@@ -413,7 +413,7 @@ static int tmp108_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(tmp108_dev_pm_ops, tmp108_suspend, tmp108_resume);
static const struct i2c_device_id tmp108_i2c_ids[] = {
- { "tmp108", 0 },
+ { "tmp108" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tmp108_i2c_ids);
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 9681eaa06c8e..ace854b370a0 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -328,7 +328,7 @@ static void w83791d_print_debug(struct w83791d_data *data, struct device *dev);
static void w83791d_init_client(struct i2c_client *client);
static const struct i2c_device_id w83791d_id[] = {
- { "w83791d", 0 },
+ { "w83791d" },
{ }
};
MODULE_DEVICE_TABLE(i2c, w83791d_id);
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 69ce379a9e13..b0b5f60eea53 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -296,7 +296,7 @@ static void w83792d_print_debug(struct w83792d_data *data, struct device *dev);
static void w83792d_init_client(struct i2c_client *client);
static const struct i2c_device_id w83792d_id[] = {
- { "w83792d", 0 },
+ { "w83792d" },
{ }
};
MODULE_DEVICE_TABLE(i2c, w83792d_id);
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 96bab94ba899..0acf6bd0227f 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -291,7 +291,7 @@ static void w83793_update_nonvolatile(struct device *dev);
static struct w83793_data *w83793_update_device(struct device *dev);
static const struct i2c_device_id w83793_id[] = {
- { "w83793", 0 },
+ { "w83793" },
{ }
};
MODULE_DEVICE_TABLE(i2c, w83793_id);
diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c
index 9c11ed69c055..df77b53a1b2f 100644
--- a/drivers/hwmon/w83l785ts.c
+++ b/drivers/hwmon/w83l785ts.c
@@ -74,7 +74,7 @@ static struct w83l785ts_data *w83l785ts_update_device(struct device *dev);
*/
static const struct i2c_device_id w83l785ts_id[] = {
- { "w83l785ts", 0 },
+ { "w83l785ts" },
{ }
};
MODULE_DEVICE_TABLE(i2c, w83l785ts_id);
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index 75874cf7851c..9b81bd406e05 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -741,7 +741,7 @@ w83l786ng_probe(struct i2c_client *client)
}
static const struct i2c_device_id w83l786ng_id[] = {
- { "w83l786ng", 0 },
+ { "w83l786ng" },
{ }
};
MODULE_DEVICE_TABLE(i2c, w83l786ng_id);
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index 3949ded0d4fa..375bd0d89b0c 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -590,7 +590,6 @@ MODULE_DEVICE_TABLE(amba, catu_ids);
static struct amba_driver catu_driver = {
.drv = {
.name = "coresight-catu",
- .owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = catu_probe,
diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c
index e805617020d0..d2b5a5718c29 100644
--- a/drivers/hwtracing/coresight/coresight-cti-core.c
+++ b/drivers/hwtracing/coresight/coresight-cti-core.c
@@ -982,7 +982,6 @@ MODULE_DEVICE_TABLE(amba, cti_ids);
static struct amba_driver cti_driver = {
.drv = {
.name = "coresight-cti",
- .owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = cti_probe,
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 3aab182b562f..7edd3f1d0d46 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -844,7 +844,6 @@ MODULE_DEVICE_TABLE(amba, etb_ids);
static struct amba_driver etb_driver = {
.drv = {
.name = "coresight-etb10",
- .owner = THIS_MODULE,
.pm = &etb_dev_pm_ops,
.suppress_bind_attrs = true,
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c
index 9d5c1391ffb1..8b362605d242 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c
@@ -1008,7 +1008,6 @@ MODULE_DEVICE_TABLE(amba, etm_ids);
static struct amba_driver etm_driver = {
.drv = {
.name = "coresight-etm3x",
- .owner = THIS_MODULE,
.pm = &etm_dev_pm_ops,
.suppress_bind_attrs = true,
},
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index c2ca4a02dfce..e6cd9705596c 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -2344,7 +2344,6 @@ MODULE_DEVICE_TABLE(amba, etm4_ids);
static struct amba_driver etm4x_amba_driver = {
.drv = {
.name = "coresight-etm4x",
- .owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = etm4_probe_amba,
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index ef1a0abfee4e..5ab1f592917a 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -399,7 +399,6 @@ MODULE_DEVICE_TABLE(amba, dynamic_funnel_ids);
static struct amba_driver dynamic_funnel_driver = {
.drv = {
.name = "coresight-dynamic-funnel",
- .owner = THIS_MODULE,
.pm = &funnel_dev_pm_ops,
.suppress_bind_attrs = true,
},
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 73452d9dc13b..2bb9ba66e3c0 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -406,7 +406,6 @@ static struct amba_driver dynamic_replicator_driver = {
.drv = {
.name = "coresight-dynamic-replicator",
.pm = &replicator_dev_pm_ops,
- .owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = dynamic_replicator_probe,
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 974d37e5f94c..15b52358965c 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -954,7 +954,6 @@ MODULE_DEVICE_TABLE(amba, stm_ids);
static struct amba_driver stm_driver = {
.drv = {
.name = "coresight-stm",
- .owner = THIS_MODULE,
.pm = &stm_dev_pm_ops,
.suppress_bind_attrs = true,
},
diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
index 72005b0c633e..0d251cae814f 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-core.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
@@ -602,7 +602,6 @@ MODULE_DEVICE_TABLE(amba, tmc_ids);
static struct amba_driver tmc_driver = {
.drv = {
.name = "coresight-tmc",
- .owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = tmc_probe,
diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c
index 7739bc7adc44..bfca103f9f84 100644
--- a/drivers/hwtracing/coresight/coresight-tpda.c
+++ b/drivers/hwtracing/coresight/coresight-tpda.c
@@ -333,7 +333,6 @@ static struct amba_id tpda_ids[] = {
static struct amba_driver tpda_driver = {
.drv = {
.name = "coresight-tpda",
- .owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = tpda_probe,
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
index a9708ab0d488..0726f8842552 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.c
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -1310,7 +1310,6 @@ static struct amba_id tpdm_ids[] = {
static struct amba_driver tpdm_driver = {
.drv = {
.name = "coresight-tpdm",
- .owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = tpdm_probe,
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 29024f880fda..7dc9ea564bca 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -236,7 +236,6 @@ MODULE_DEVICE_TABLE(amba, tpiu_ids);
static struct amba_driver tpiu_driver = {
.drv = {
.name = "coresight-tpiu",
- .owner = THIS_MODULE,
.pm = &tpiu_dev_pm_ops,
.suppress_bind_attrs = true,
},
diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c
index 6136776482e6..96a32b213669 100644
--- a/drivers/hwtracing/coresight/coresight-trbe.c
+++ b/drivers/hwtracing/coresight/coresight-trbe.c
@@ -17,6 +17,7 @@
#include <asm/barrier.h>
#include <asm/cpufeature.h>
+#include <linux/vmalloc.h>
#include "coresight-self-hosted-trace.h"
#include "coresight-trbe.h"
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index cc7f879bb175..86c8efecd7c2 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -871,7 +871,7 @@ intel_th_alloc(struct device *dev, const struct intel_th_drvdata *drvdata,
if (!th)
return ERR_PTR(-ENOMEM);
- th->id = ida_simple_get(&intel_th_ida, 0, 0, GFP_KERNEL);
+ th->id = ida_alloc(&intel_th_ida, GFP_KERNEL);
if (th->id < 0) {
err = th->id;
goto err_alloc;
@@ -931,7 +931,7 @@ err_chrdev:
"intel_th/output");
err_ida:
- ida_simple_remove(&intel_th_ida, th->id);
+ ida_free(&intel_th_ida, th->id);
err_alloc:
kfree(th);
@@ -964,7 +964,7 @@ void intel_th_free(struct intel_th *th)
__unregister_chrdev(th->major, 0, TH_POSSIBLE_OUTPUTS,
"intel_th/output");
- ida_simple_remove(&intel_th_ida, th->id);
+ ida_free(&intel_th_ida, th->id);
kfree(th);
}
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 97989c914260..fe6e8a1bb607 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -18,7 +18,7 @@ config I2C_CCGX_UCSI
config I2C_ALI1535
tristate "ALI 1535"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
If you say yes to this option, support will be included for the SMB
Host controller on Acer Labs Inc. (ALI) M1535 South Bridges. The SMB
@@ -30,7 +30,7 @@ config I2C_ALI1535
config I2C_ALI1563
tristate "ALI 1563"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
If you say yes to this option, support will be included for the SMB
Host controller on Acer Labs Inc. (ALI) M1563 South Bridges. The SMB
@@ -42,7 +42,7 @@ config I2C_ALI1563
config I2C_ALI15X3
tristate "ALI 15x3"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
If you say yes to this option, support will be included for the
Acer Labs Inc. (ALI) M1514 and M1543 motherboard I2C interfaces.
@@ -52,7 +52,7 @@ config I2C_ALI15X3
config I2C_AMD756
tristate "AMD 756/766/768/8111 and nVidia nForce"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
If you say yes to this option, support will be included for the AMD
756/766/768 mainboard I2C interfaces. The driver also includes
@@ -77,7 +77,7 @@ config I2C_AMD756_S4882
config I2C_AMD8111
tristate "AMD 8111"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
If you say yes to this option, support will be included for the
second (SMBus 2.0) AMD 8111 mainboard I2C interface.
@@ -107,7 +107,7 @@ config I2C_HIX5HD2
config I2C_I801
tristate "Intel 82801 (ICH/PCH)"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select P2SB if X86
select CHECK_SIGNATURE if X86 && DMI
select I2C_SMBUS
@@ -163,9 +163,17 @@ config I2C_I801
This driver can also be built as a module. If so, the module
will be called i2c-i801.
+config I2C_I801_MUX
+ def_bool I2C_I801
+ depends on DMI && I2C_MUX_GPIO
+ depends on !(I2C_I801=y && I2C_MUX=m)
+ help
+ Optional support for multiplexed SMBUS on certain systems with
+ more than 8 memory slots.
+
config I2C_ISCH
tristate "Intel SCH SMBus 1.0"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select LPC_SCH
help
Say Y here if you want to use SMBus controller on the Intel SCH
@@ -186,7 +194,7 @@ config I2C_ISMT
config I2C_PIIX4
tristate "Intel PIIX4 and compatible (ATI/AMD/Serverworks/Broadcom/SMSC)"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
If you say yes to this option, support will be included for the Intel
PIIX4 family of mainboard I2C interfaces. Specifically, the following
@@ -232,7 +240,7 @@ config I2C_CHT_WC
config I2C_NFORCE2
tristate "Nvidia nForce2, nForce3 and nForce4"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
If you say yes to this option, support will be included for the Nvidia
nForce2, nForce3 and nForce4 families of mainboard I2C interfaces.
@@ -265,7 +273,7 @@ config I2C_NVIDIA_GPU
config I2C_SIS5595
tristate "SiS 5595"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
If you say yes to this option, support will be included for the
SiS5595 SMBus (a subset of I2C) interface.
@@ -275,7 +283,7 @@ config I2C_SIS5595
config I2C_SIS630
tristate "SiS 630/730/964"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
If you say yes to this option, support will be included for the
SiS630, SiS730 and SiS964 SMBus (a subset of I2C) interface.
@@ -285,7 +293,7 @@ config I2C_SIS630
config I2C_SIS96X
tristate "SiS 96x"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
If you say yes to this option, support will be included for the SiS
96x SMBus (a subset of I2C) interfaces. Specifically, the following
@@ -303,7 +311,7 @@ config I2C_SIS96X
config I2C_VIA
tristate "VIA VT82C586B"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select I2C_ALGOBIT
help
If you say yes to this option, support will be included for the VIA
@@ -314,7 +322,7 @@ config I2C_VIA
config I2C_VIAPRO
tristate "VIA VT82C596/82C686/82xx and CX700/VX8xx/VX900"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
If you say yes to this option, support will be included for the VIA
VT82C596 and later SMBus interface. Specifically, the following
@@ -336,6 +344,16 @@ config I2C_VIAPRO
if ACPI
+config I2C_ZHAOXIN
+ tristate "Zhaoxin I2C Interface"
+ depends on PCI || COMPILE_TEST
+ help
+ If you say yes to this option, support will be included for the
+ ZHAOXIN I2C interface
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-zhaoxin.
+
comment "ACPI drivers"
config I2C_SCMI
@@ -500,7 +518,7 @@ config I2C_BRCMSTB
config I2C_CADENCE
tristate "Cadence I2C Controller"
- depends on ARCH_ZYNQ || ARM64 || XTENSA || COMPILE_TEST
+ depends on ARCH_ZYNQ || ARM64 || XTENSA || RISCV || COMPILE_TEST
help
Say yes here to select Cadence I2C Host Controller. This controller is
e.g. used by Xilinx Zynq.
@@ -1397,6 +1415,7 @@ config I2C_ICY
config I2C_MLXCPLD
tristate "Mellanox I2C driver"
depends on X86_64 || (ARM64 && ACPI) || COMPILE_TEST
+ depends on HAS_IOPORT
help
This exposes the Mellanox platform I2C busses to the linux I2C layer
for X86 and ARM64/ACPI based systems.
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index aa0ee8ecd6f2..3d65934f5eb4 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -29,6 +29,8 @@ obj-$(CONFIG_I2C_SIS630) += i2c-sis630.o
obj-$(CONFIG_I2C_SIS96X) += i2c-sis96x.o
obj-$(CONFIG_I2C_VIA) += i2c-via.o
obj-$(CONFIG_I2C_VIAPRO) += i2c-viapro.o
+i2c-zhaoxin-objs := i2c-viai2c-zhaoxin.o i2c-viai2c-common.o
+obj-$(CONFIG_I2C_ZHAOXIN) += i2c-zhaoxin.o
# Mac SMBus host controller drivers
obj-$(CONFIG_I2C_HYDRA) += i2c-hydra.o
@@ -118,6 +120,7 @@ obj-$(CONFIG_I2C_TEGRA_BPMP) += i2c-tegra-bpmp.o
obj-$(CONFIG_I2C_UNIPHIER) += i2c-uniphier.o
obj-$(CONFIG_I2C_UNIPHIER_F) += i2c-uniphier-f.o
obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
+i2c-wmt-objs := i2c-viai2c-wmt.o i2c-viai2c-common.o
obj-$(CONFIG_I2C_WMT) += i2c-wmt.o
i2c-octeon-objs := i2c-octeon-core.o i2c-octeon-platdrv.o
obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index 461eb23f9d47..9d7b4efe26ad 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -285,10 +285,8 @@ static int ali1535_transaction(struct i2c_adapter *adap)
&& (timeout++ < MAX_TIMEOUT));
/* If the SMBus is still busy, we give up */
- if (timeout > MAX_TIMEOUT) {
+ if (timeout > MAX_TIMEOUT)
result = -ETIMEDOUT;
- dev_err(&adap->dev, "SMBus Timeout!\n");
- }
if (temp & ALI1535_STS_FAIL) {
result = -EIO;
@@ -313,10 +311,8 @@ static int ali1535_transaction(struct i2c_adapter *adap)
}
/* check to see if the "command complete" indication is set */
- if (!(temp & ALI1535_STS_DONE)) {
+ if (!(temp & ALI1535_STS_DONE))
result = -ETIMEDOUT;
- dev_err(&adap->dev, "Error: command never completed\n");
- }
dev_dbg(&adap->dev, "Transaction (post): STS=%02x, TYP=%02x, "
"CMD=%02x, ADD=%02x, DAT0=%02x, DAT1=%02x\n",
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index 307fb0666ecb..63897a89bb35 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -99,7 +99,6 @@ static int ali1563_transaction(struct i2c_adapter *a, int size)
return 0;
if (!timeout) {
- dev_err(&a->dev, "Timeout - Trying to KILL transaction!\n");
/* Issue 'kill' to host controller */
outb_p(HST_CNTL2_KILL, SMB_HST_CNTL2);
data = inb_p(SMB_HST_STS);
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index d2fa30deb054..956e5020d71e 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -294,10 +294,8 @@ static int ali15x3_transaction(struct i2c_adapter *adap)
&& (timeout++ < MAX_TIMEOUT));
/* If the SMBus is still busy, we give up */
- if (timeout > MAX_TIMEOUT) {
+ if (timeout > MAX_TIMEOUT)
result = -ETIMEDOUT;
- dev_err(&adap->dev, "SMBus Timeout!\n");
- }
if (temp & ALI15X3_STS_TERM) {
result = -EIO;
diff --git a/drivers/i2c/busses/i2c-amd-mp2-plat.c b/drivers/i2c/busses/i2c-amd-mp2-plat.c
index 112fe2bc5662..d3ac1c77a509 100644
--- a/drivers/i2c/busses/i2c-amd-mp2-plat.c
+++ b/drivers/i2c/busses/i2c-amd-mp2-plat.c
@@ -97,17 +97,17 @@ static void i2c_amd_cmd_completion(struct amd_i2c_common *i2c_common)
static int i2c_amd_check_cmd_completion(struct amd_i2c_dev *i2c_dev)
{
struct amd_i2c_common *i2c_common = &i2c_dev->common;
- unsigned long timeout;
+ unsigned long time_left;
- timeout = wait_for_completion_timeout(&i2c_dev->cmd_complete,
- i2c_dev->adap.timeout);
+ time_left = wait_for_completion_timeout(&i2c_dev->cmd_complete,
+ i2c_dev->adap.timeout);
if ((i2c_common->reqcmd == i2c_read ||
i2c_common->reqcmd == i2c_write) &&
i2c_common->msg->len > 32)
i2c_amd_dma_unmap(i2c_common);
- if (timeout == 0) {
+ if (time_left == 0) {
amd_mp2_rw_timeout(i2c_common);
return -ETIMEDOUT;
}
diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c
index d311981d3e60..ee3b469ddfb9 100644
--- a/drivers/i2c/busses/i2c-at91-master.c
+++ b/drivers/i2c/busses/i2c-at91-master.c
@@ -591,7 +591,6 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
dev->adapter.timeout);
if (time_left == 0) {
dev->transfer_status |= at91_twi_read(dev, AT91_TWI_SR);
- dev_err(dev->dev, "controller timed out\n");
at91_init_twi_bus(dev);
ret = -ETIMEDOUT;
goto error;
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index e905734c26a0..133d02899c6b 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -811,8 +811,6 @@ static int bcm_iproc_i2c_xfer_wait(struct bcm_iproc_i2c_dev *iproc_i2c,
}
if (!time_left && !iproc_i2c->xfer_is_done) {
- dev_err(iproc_i2c->device, "transaction timed out\n");
-
/* flush both TX/RX FIFOs */
val = BIT(M_FIFO_RX_FLUSH_SHIFT) | BIT(M_FIFO_TX_FLUSH_SHIFT);
iproc_i2c_wr_reg(iproc_i2c, M_FIFO_CTRL_OFFSET, val);
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index b92de1944221..3045ba82380d 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -370,7 +370,6 @@ static int bcm2835_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
if (!time_left) {
bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C,
BCM2835_I2C_C_CLEAR);
- dev_err(i2c_dev->dev, "i2c transfer timed out\n");
return -ETIMEDOUT;
}
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 4bb7d6756947..87b9ba95b2e1 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -633,6 +633,7 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
if (hold_clear) {
ctrl_reg &= ~CDNS_I2C_CR_HOLD;
+ ctrl_reg &= ~CDNS_I2C_CR_CLR_FIFO;
/*
* In case of Xilinx Zynq SOC, clear the HOLD bit before transfer size
* register reaches '0'. This is an IP bug which causes transfer size
@@ -789,8 +790,6 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
time_left = wait_for_completion_timeout(&id->xfer_done, msg_timeout);
if (time_left == 0) {
cdns_i2c_master_reset(adap);
- dev_err(id->adap.dev.parent,
- "timeout waiting on completion\n");
return -ETIMEDOUT;
}
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 02b3b1160fb0..7ae611120cfa 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -489,7 +489,6 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
time_left = wait_for_completion_timeout(&dev->cmd_complete,
dev->adapter.timeout);
if (!time_left) {
- dev_err(dev->dev, "controller timed out\n");
i2c_recover_bus(adap);
dev->buf_len = 0;
return -ETIMEDOUT;
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 9be9a2658e1f..a1b379a1e904 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -424,8 +424,6 @@ static struct pci_driver dw_i2c_driver = {
};
module_pci_driver(dw_i2c_driver);
-/* Work with hotplug and coldplug */
-MODULE_ALIAS("i2c_designware-pci");
MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
MODULE_DESCRIPTION("Synopsys DesignWare PCI I2C bus adapter");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 4ab41ba39d55..29aac9c87368 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -46,6 +46,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = {
{ "INT33C3", 0 },
{ "INT3432", 0 },
{ "INT3433", 0 },
+ { "INTC10EF", 0 },
{ "80860F41", ACCESS_NO_IRQ_SUSPEND },
{ "808622C1", ACCESS_NO_IRQ_SUSPEND },
{ "AMD0010", ACCESS_INTR_MASK },
@@ -479,8 +480,11 @@ static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend, dw_i2c_plat_runtime_resume, NULL)
};
-/* Work with hotplug and coldplug */
-MODULE_ALIAS("platform:i2c_designware");
+static const struct platform_device_id dw_i2c_platform_ids[] = {
+ { "i2c_designware" },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, dw_i2c_platform_ids);
static struct platform_driver dw_i2c_driver = {
.probe = dw_i2c_plat_probe,
@@ -491,6 +495,7 @@ static struct platform_driver dw_i2c_driver = {
.acpi_match_table = ACPI_PTR(dw_i2c_acpi_match),
.pm = pm_ptr(&dw_i2c_dev_pm_ops),
},
+ .id_table = dw_i2c_platform_ids,
};
static int __init dw_i2c_init_driver(void)
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
index 3462f2bc0fa8..737604ae11fc 100644
--- a/drivers/i2c/busses/i2c-digicolor.c
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -213,7 +213,7 @@ out:
static int dc_i2c_xfer_msg(struct dc_i2c *i2c, struct i2c_msg *msg, int first,
int last)
{
- unsigned long timeout = msecs_to_jiffies(TIMEOUT_MS);
+ unsigned long time_left = msecs_to_jiffies(TIMEOUT_MS);
unsigned long flags;
spin_lock_irqsave(&i2c->lock, flags);
@@ -227,9 +227,9 @@ static int dc_i2c_xfer_msg(struct dc_i2c *i2c, struct i2c_msg *msg, int first,
dc_i2c_start_msg(i2c, first);
spin_unlock_irqrestore(&i2c->lock, flags);
- timeout = wait_for_completion_timeout(&i2c->done, timeout);
+ time_left = wait_for_completion_timeout(&i2c->done, time_left);
dc_i2c_set_irq(i2c, 0);
- if (timeout == 0) {
+ if (time_left == 0) {
i2c->state = STATE_IDLE;
return -ETIMEDOUT;
}
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index 385ef9d9e4d4..d8baca9b610c 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -763,7 +763,7 @@ static bool exynos5_i2c_poll_irqs_timeout(struct exynos5_i2c *i2c,
static int exynos5_i2c_xfer_msg(struct exynos5_i2c *i2c,
struct i2c_msg *msgs, int stop)
{
- unsigned long timeout;
+ unsigned long time_left;
int ret;
i2c->msg = msgs;
@@ -775,13 +775,13 @@ static int exynos5_i2c_xfer_msg(struct exynos5_i2c *i2c,
exynos5_i2c_message_start(i2c, stop);
if (!i2c->atomic)
- timeout = wait_for_completion_timeout(&i2c->msg_complete,
- EXYNOS5_I2C_TIMEOUT);
- else
- timeout = exynos5_i2c_poll_irqs_timeout(i2c,
+ time_left = wait_for_completion_timeout(&i2c->msg_complete,
EXYNOS5_I2C_TIMEOUT);
+ else
+ time_left = exynos5_i2c_poll_irqs_timeout(i2c,
+ EXYNOS5_I2C_TIMEOUT);
- if (timeout == 0)
+ if (time_left == 0)
ret = -ETIMEDOUT;
else
ret = i2c->state;
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index 8e75515c3ca4..a47b9939fa2c 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -314,7 +314,7 @@ static void hix5hd2_i2c_message_start(struct hix5hd2_i2c_priv *priv, int stop)
static int hix5hd2_i2c_xfer_msg(struct hix5hd2_i2c_priv *priv,
struct i2c_msg *msgs, int stop)
{
- unsigned long timeout;
+ unsigned long time_left;
int ret;
priv->msg = msgs;
@@ -327,9 +327,9 @@ static int hix5hd2_i2c_xfer_msg(struct hix5hd2_i2c_priv *priv,
reinit_completion(&priv->msg_complete);
hix5hd2_i2c_message_start(priv, stop);
- timeout = wait_for_completion_timeout(&priv->msg_complete,
- priv->adap.timeout);
- if (timeout == 0) {
+ time_left = wait_for_completion_timeout(&priv->msg_complete,
+ priv->adap.timeout);
+ if (time_left == 0) {
priv->state = HIX5I2C_STAT_RW_ERR;
priv->err = -ETIMEDOUT;
dev_warn(priv->dev, "%s timeout=%d\n",
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index a6861660cb8c..d2d2a6dbe29f 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -105,6 +105,7 @@
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
#include <linux/i2c-smbus.h>
#include <linux/acpi.h>
#include <linux/io.h>
@@ -119,7 +120,7 @@
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
-#if IS_ENABLED(CONFIG_I2C_MUX_GPIO) && defined CONFIG_DMI
+#ifdef CONFIG_I2C_I801_MUX
#include <linux/gpio/machine.h>
#include <linux/platform_data/i2c-mux-gpio.h>
#endif
@@ -263,7 +264,6 @@ struct i801_mux_config {
char *gpio_chip;
unsigned values[3];
int n_values;
- unsigned classes[3];
unsigned gpios[2]; /* Relative to gpio_chip->base */
int n_gpios;
};
@@ -288,9 +288,10 @@ struct i801_priv {
int len;
u8 *data;
-#if IS_ENABLED(CONFIG_I2C_MUX_GPIO) && defined CONFIG_DMI
+#ifdef CONFIG_I2C_I801_MUX
struct platform_device *mux_pdev;
struct gpiod_lookup_table *lookup;
+ struct notifier_block mux_notifier_block;
#endif
struct platform_device *tco_pdev;
@@ -398,9 +399,7 @@ static int i801_check_post(struct i801_priv *priv, int status)
* If the SMBus is still busy, we give up
*/
if (unlikely(status < 0)) {
- dev_err(&priv->pci_dev->dev, "Transaction timeout\n");
/* try to stop the current command */
- dev_dbg(&priv->pci_dev->dev, "Terminating the current operation\n");
outb_p(SMBHSTCNT_KILL, SMBHSTCNT(priv));
usleep_range(1000, 2000);
outb_p(0, SMBHSTCNT(priv));
@@ -409,7 +408,7 @@ static int i801_check_post(struct i801_priv *priv, int status)
status = inb_p(SMBHSTSTS(priv));
if ((status & SMBHSTSTS_HOST_BUSY) ||
!(status & SMBHSTSTS_FAILED))
- dev_err(&priv->pci_dev->dev,
+ dev_dbg(&priv->pci_dev->dev,
"Failed terminating the transaction\n");
return -ETIMEDOUT;
}
@@ -536,11 +535,12 @@ static int i801_block_transaction_by_block(struct i801_priv *priv,
if (read_write == I2C_SMBUS_READ ||
command == I2C_SMBUS_BLOCK_PROC_CALL) {
- status = i801_get_block_len(priv);
- if (status < 0)
+ len = i801_get_block_len(priv);
+ if (len < 0) {
+ status = len;
goto out;
+ }
- len = status;
data->block[0] = len;
inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
for (i = 0; i < len; i++)
@@ -1058,7 +1058,7 @@ static const struct pci_device_id i801_ids[] = {
MODULE_DEVICE_TABLE(pci, i801_ids);
#if defined CONFIG_X86 && defined CONFIG_DMI
-static unsigned char apanel_addr;
+static unsigned char apanel_addr __ro_after_init;
/* Scan the system ROM for the signature "FJKEYINF" */
static __init const void __iomem *bios_signature(const void __iomem *bios)
@@ -1297,7 +1297,7 @@ static void i801_probe_optional_slaves(struct i801_priv *priv)
register_dell_lis3lv02d_i2c_device(priv);
/* Instantiate SPD EEPROMs unless the SMBus is multiplexed */
-#if IS_ENABLED(CONFIG_I2C_MUX_GPIO)
+#ifdef CONFIG_I2C_I801_MUX
if (!priv->mux_pdev)
#endif
i2c_register_spd(&priv->adapter);
@@ -1307,12 +1307,11 @@ static void __init input_apanel_init(void) {}
static void i801_probe_optional_slaves(struct i801_priv *priv) {}
#endif /* CONFIG_X86 && CONFIG_DMI */
-#if IS_ENABLED(CONFIG_I2C_MUX_GPIO) && defined CONFIG_DMI
+#ifdef CONFIG_I2C_I801_MUX
static struct i801_mux_config i801_mux_config_asus_z8_d12 = {
.gpio_chip = "gpio_ich",
.values = { 0x02, 0x03 },
.n_values = 2,
- .classes = { I2C_CLASS_SPD, I2C_CLASS_SPD },
.gpios = { 52, 53 },
.n_gpios = 2,
};
@@ -1321,7 +1320,6 @@ static struct i801_mux_config i801_mux_config_asus_z8_d18 = {
.gpio_chip = "gpio_ich",
.values = { 0x02, 0x03, 0x01 },
.n_values = 3,
- .classes = { I2C_CLASS_SPD, I2C_CLASS_SPD, I2C_CLASS_SPD },
.gpios = { 52, 53 },
.n_gpios = 2,
};
@@ -1393,6 +1391,23 @@ static const struct dmi_system_id mux_dmi_table[] = {
{ }
};
+static int i801_notifier_call(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct i801_priv *priv = container_of(nb, struct i801_priv, mux_notifier_block);
+ struct device *dev = data;
+
+ if (action != BUS_NOTIFY_ADD_DEVICE ||
+ dev->type != &i2c_adapter_type ||
+ i2c_root_adapter(dev) != &priv->adapter)
+ return NOTIFY_DONE;
+
+ /* Call i2c_register_spd for muxed child segments */
+ i2c_register_spd(to_i2c_adapter(dev));
+
+ return NOTIFY_OK;
+}
+
/* Setup multiplexing if needed */
static void i801_add_mux(struct i801_priv *priv)
{
@@ -1414,7 +1429,6 @@ static void i801_add_mux(struct i801_priv *priv)
gpio_data.parent = priv->adapter.nr;
gpio_data.values = mux_config->values;
gpio_data.n_values = mux_config->n_values;
- gpio_data.classes = mux_config->classes;
gpio_data.idle = I2C_MUX_GPIO_NO_IDLE;
/* Register GPIO descriptor lookup table */
@@ -1429,6 +1443,9 @@ static void i801_add_mux(struct i801_priv *priv)
mux_config->gpios[i], "mux", 0);
gpiod_add_lookup_table(lookup);
+ priv->mux_notifier_block.notifier_call = i801_notifier_call;
+ if (bus_register_notifier(&i2c_bus_type, &priv->mux_notifier_block))
+ return;
/*
* Register the mux device, we use PLATFORM_DEVID_NONE here
* because since we are referring to the GPIO chip by name we are
@@ -1450,6 +1467,7 @@ static void i801_add_mux(struct i801_priv *priv)
static void i801_del_mux(struct i801_priv *priv)
{
+ bus_unregister_notifier(&i2c_bus_type, &priv->mux_notifier_block);
platform_device_unregister(priv->mux_pdev);
gpiod_remove_lookup_table(priv->lookup);
}
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
index f9d4bfef511c..e0e87185f6bb 100644
--- a/drivers/i2c/busses/i2c-img-scb.c
+++ b/drivers/i2c/busses/i2c-img-scb.c
@@ -1124,11 +1124,8 @@ static int img_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
IMG_I2C_TIMEOUT);
del_timer_sync(&i2c->check_timer);
- if (time_left == 0) {
- dev_err(adap->dev.parent, "i2c transfer timed out\n");
+ if (time_left == 0)
i2c->msg_status = -ETIMEDOUT;
- break;
- }
if (i2c->msg_status)
break;
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 6d72e4e126dd..0197786892a2 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -99,6 +99,7 @@ struct lpi2c_imx_struct {
__u8 *rx_buf;
__u8 *tx_buf;
struct completion complete;
+ unsigned long rate_per;
unsigned int msglen;
unsigned int delivered;
unsigned int block_data;
@@ -212,9 +213,7 @@ static int lpi2c_imx_config(struct lpi2c_imx_struct *lpi2c_imx)
lpi2c_imx_set_mode(lpi2c_imx);
- clk_rate = clk_get_rate(lpi2c_imx->clks[0].clk);
- if (!clk_rate)
- return -EINVAL;
+ clk_rate = lpi2c_imx->rate_per;
if (lpi2c_imx->mode == HS || lpi2c_imx->mode == ULTRA_FAST)
filt = 0;
@@ -308,11 +307,11 @@ static int lpi2c_imx_master_disable(struct lpi2c_imx_struct *lpi2c_imx)
static int lpi2c_imx_msg_complete(struct lpi2c_imx_struct *lpi2c_imx)
{
- unsigned long timeout;
+ unsigned long time_left;
- timeout = wait_for_completion_timeout(&lpi2c_imx->complete, HZ);
+ time_left = wait_for_completion_timeout(&lpi2c_imx->complete, HZ);
- return timeout ? 0 : -ETIMEDOUT;
+ return time_left ? 0 : -ETIMEDOUT;
}
static int lpi2c_imx_txfifo_empty(struct lpi2c_imx_struct *lpi2c_imx)
@@ -611,6 +610,20 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
if (ret)
return ret;
+ /*
+ * Lock the parent clock rate to avoid getting parent clock upon
+ * each transfer
+ */
+ ret = devm_clk_rate_exclusive_get(&pdev->dev, lpi2c_imx->clks[0].clk);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "can't lock I2C peripheral clock rate\n");
+
+ lpi2c_imx->rate_per = clk_get_rate(lpi2c_imx->clks[0].clk);
+ if (!lpi2c_imx->rate_per)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "can't get I2C peripheral clock rate\n");
+
pm_runtime_set_autosuspend_delay(&pdev->dev, I2C_PM_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index c74985d77b0e..655b5d851c48 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -623,7 +623,6 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
dma_unmap_single(dev, dma_addr, dma_size, dma_direction);
if (unlikely(!time_left)) {
- dev_err(dev, "completion wait timed out\n");
ret = -ETIMEDOUT;
goto out;
}
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index 55035cca0ae5..7951891d6b97 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -565,7 +565,7 @@ static inline int jz4780_i2c_xfer_read(struct jz4780_i2c *i2c,
int idx)
{
int ret = 0;
- long timeout;
+ unsigned long time_left;
int wait_time = JZ4780_I2C_TIMEOUT * (len + 5);
unsigned short tmp;
unsigned long flags;
@@ -600,10 +600,10 @@ static inline int jz4780_i2c_xfer_read(struct jz4780_i2c *i2c,
spin_unlock_irqrestore(&i2c->lock, flags);
- timeout = wait_for_completion_timeout(&i2c->trans_waitq,
- msecs_to_jiffies(wait_time));
+ time_left = wait_for_completion_timeout(&i2c->trans_waitq,
+ msecs_to_jiffies(wait_time));
- if (!timeout) {
+ if (!time_left) {
dev_err(&i2c->adap.dev, "irq read timeout\n");
dev_dbg(&i2c->adap.dev, "send cmd count:%d %d\n",
i2c->cmd, i2c->cmd_buf[i2c->cmd]);
@@ -627,7 +627,7 @@ static inline int jz4780_i2c_xfer_write(struct jz4780_i2c *i2c,
{
int ret = 0;
int wait_time = JZ4780_I2C_TIMEOUT * (len + 5);
- long timeout;
+ unsigned long time_left;
unsigned short tmp;
unsigned long flags;
@@ -655,14 +655,14 @@ static inline int jz4780_i2c_xfer_write(struct jz4780_i2c *i2c,
spin_unlock_irqrestore(&i2c->lock, flags);
- timeout = wait_for_completion_timeout(&i2c->trans_waitq,
- msecs_to_jiffies(wait_time));
- if (timeout && !i2c->stop_hold) {
+ time_left = wait_for_completion_timeout(&i2c->trans_waitq,
+ msecs_to_jiffies(wait_time));
+ if (time_left && !i2c->stop_hold) {
unsigned short i2c_sta;
int write_in_process;
- timeout = JZ4780_I2C_TIMEOUT * 100;
- for (; timeout > 0; timeout--) {
+ time_left = JZ4780_I2C_TIMEOUT * 100;
+ for (; time_left > 0; time_left--) {
i2c_sta = jz4780_i2c_readw(i2c, JZ4780_I2C_STA);
write_in_process = (i2c_sta & JZ4780_I2C_STA_MSTACT) ||
@@ -673,7 +673,7 @@ static inline int jz4780_i2c_xfer_write(struct jz4780_i2c *i2c,
}
}
- if (!timeout) {
+ if (!time_left) {
dev_err(&i2c->adap.dev, "write wait timeout\n");
ret = -EIO;
}
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 8d73c0f405ed..c4223556b3b8 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -304,13 +304,12 @@ static void mpc_i2c_setup_512x(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock)
{
- struct device_node *node_ctrl;
void __iomem *ctrl;
u32 idx;
/* Enable I2C interrupts for mpc5121 */
- node_ctrl = of_find_compatible_node(NULL, NULL,
- "fsl,mpc5121-i2c-ctrl");
+ struct device_node *node_ctrl __free(device_node) =
+ of_find_compatible_node(NULL, NULL, "fsl,mpc5121-i2c-ctrl");
if (node_ctrl) {
ctrl = of_iomap(node_ctrl, 0);
if (ctrl) {
@@ -321,7 +320,6 @@ static void mpc_i2c_setup_512x(struct device_node *node,
setbits32(ctrl, 1 << (24 + idx * 2));
iounmap(ctrl);
}
- of_node_put(node_ctrl);
}
/* The clock setup for the 52xx works also fine for the 512x */
@@ -358,11 +356,11 @@ static const struct mpc_i2c_divider mpc_i2c_dividers_8xxx[] = {
static u32 mpc_i2c_get_sec_cfg_8xxx(void)
{
- struct device_node *node;
u32 __iomem *reg;
u32 val = 0;
- node = of_find_node_by_name(NULL, "global-utilities");
+ struct device_node *node __free(device_node) =
+ of_find_node_by_name(NULL, "global-utilities");
if (node) {
const u32 *prop = of_get_property(node, "reg", NULL);
if (prop) {
@@ -383,7 +381,6 @@ static u32 mpc_i2c_get_sec_cfg_8xxx(void)
iounmap(reg);
}
}
- of_node_put(node);
return val;
}
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 4f41a3c7824d..ad0f02acdb12 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -542,12 +542,9 @@ static int read_i2c(struct nmk_i2c_dev *priv, u16 flags)
xfer_done = nmk_i2c_wait_xfer_done(priv);
- if (!xfer_done) {
- /* Controller timed out */
- dev_err(&priv->adev->dev, "read from slave 0x%x timed out\n",
- priv->cli.slave_adr);
+ if (!xfer_done)
status = -ETIMEDOUT;
- }
+
return status;
}
@@ -1194,7 +1191,6 @@ MODULE_DEVICE_TABLE(amba, nmk_i2c_ids);
static struct amba_driver nmk_i2c_driver = {
.drv = {
- .owner = THIS_MODULE,
.name = DRIVER_NAME,
.pm = pm_ptr(&nmk_i2c_pm),
},
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index e106af83cef4..56a4dabf5a38 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -32,7 +32,6 @@
*/
struct ocores_i2c {
void __iomem *base;
- int iobase;
u32 reg_shift;
u32 reg_io_width;
unsigned long flags;
@@ -136,16 +135,6 @@ static inline u8 oc_getreg_32be(struct ocores_i2c *i2c, int reg)
return ioread32be(i2c->base + (reg << i2c->reg_shift));
}
-static void oc_setreg_io_8(struct ocores_i2c *i2c, int reg, u8 value)
-{
- outb(value, i2c->iobase + reg);
-}
-
-static inline u8 oc_getreg_io_8(struct ocores_i2c *i2c, int reg)
-{
- return inb(i2c->iobase + reg);
-}
-
static inline void oc_setreg(struct ocores_i2c *i2c, int reg, u8 value)
{
i2c->setreg(i2c, reg, value);
@@ -618,15 +607,19 @@ static int ocores_i2c_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!res)
return -EINVAL;
- i2c->iobase = res->start;
if (!devm_request_region(&pdev->dev, res->start,
resource_size(res),
pdev->name)) {
dev_err(&pdev->dev, "Can't get I/O resource.\n");
return -EBUSY;
}
- i2c->setreg = oc_setreg_io_8;
- i2c->getreg = oc_getreg_io_8;
+ i2c->base = devm_ioport_map(&pdev->dev, res->start,
+ resource_size(res));
+ if (!i2c->base) {
+ dev_err(&pdev->dev, "Can't map I/O resource.\n");
+ return -EBUSY;
+ }
+ i2c->reg_io_width = 1;
}
pdata = dev_get_platdata(&pdev->dev);
diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c
index 845eda70b8ca..5b7b942141e7 100644
--- a/drivers/i2c/busses/i2c-octeon-core.c
+++ b/drivers/i2c/busses/i2c-octeon-core.c
@@ -17,9 +17,14 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include "i2c-octeon-core.h"
+#define INITIAL_DELTA_HZ 1000000
+#define TWSI_MASTER_CLK_REG_DEF_VAL 0x18
+#define TWSI_MASTER_CLK_REG_OTX2_VAL 0x3
+
/* interrupt service routine */
irqreturn_t octeon_i2c_isr(int irq, void *dev_id)
{
@@ -80,7 +85,7 @@ static int octeon_i2c_wait(struct octeon_i2c *i2c)
static bool octeon_i2c_hlc_test_valid(struct octeon_i2c *i2c)
{
- return (__raw_readq(i2c->twsi_base + SW_TWSI(i2c)) & SW_TWSI_V) == 0;
+ return (__raw_readq(i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c)) & SW_TWSI_V) == 0;
}
static void octeon_i2c_hlc_int_clear(struct octeon_i2c *i2c)
@@ -177,13 +182,14 @@ static int octeon_i2c_hlc_wait(struct octeon_i2c *i2c)
static int octeon_i2c_check_status(struct octeon_i2c *i2c, int final_read)
{
u8 stat;
+ u64 mode;
/*
* This is ugly... in HLC mode the status is not in the status register
- * but in the lower 8 bits of SW_TWSI.
+ * but in the lower 8 bits of OCTEON_REG_SW_TWSI.
*/
if (i2c->hlc_enabled)
- stat = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
+ stat = __raw_readq(i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c));
else
stat = octeon_i2c_stat_read(i2c);
@@ -239,6 +245,13 @@ static int octeon_i2c_check_status(struct octeon_i2c *i2c, int final_read)
case STAT_RXADDR_NAK:
case STAT_AD2W_NAK:
return -ENXIO;
+
+ case STAT_WDOG_TOUT:
+ mode = __raw_readq(i2c->twsi_base + OCTEON_REG_MODE(i2c));
+ /* Set BUS_MON_RST to reset bus monitor */
+ mode |= BUS_MON_RST_MASK;
+ octeon_i2c_writeq_flush(mode, i2c->twsi_base + OCTEON_REG_MODE(i2c));
+ return -EIO;
default:
dev_err(i2c->dev, "unhandled state: %d\n", stat);
return -EIO;
@@ -419,12 +432,12 @@ static int octeon_i2c_hlc_read(struct octeon_i2c *i2c, struct i2c_msg *msgs)
else
cmd |= SW_TWSI_OP_7;
- octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI(i2c));
+ octeon_i2c_writeq_flush(cmd, i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c));
ret = octeon_i2c_hlc_wait(i2c);
if (ret)
goto err;
- cmd = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
+ cmd = __raw_readq(i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c));
if ((cmd & SW_TWSI_R) == 0)
return octeon_i2c_check_status(i2c, false);
@@ -432,7 +445,7 @@ static int octeon_i2c_hlc_read(struct octeon_i2c *i2c, struct i2c_msg *msgs)
msgs[0].buf[j] = (cmd >> (8 * i)) & 0xff;
if (msgs[0].len > 4) {
- cmd = __raw_readq(i2c->twsi_base + SW_TWSI_EXT(i2c));
+ cmd = __raw_readq(i2c->twsi_base + OCTEON_REG_SW_TWSI_EXT(i2c));
for (i = 0; i < msgs[0].len - 4 && i < 4; i++, j--)
msgs[0].buf[j] = (cmd >> (8 * i)) & 0xff;
}
@@ -469,15 +482,15 @@ static int octeon_i2c_hlc_write(struct octeon_i2c *i2c, struct i2c_msg *msgs)
for (i = 0; i < msgs[0].len - 4 && i < 4; i++, j--)
ext |= (u64)msgs[0].buf[j] << (8 * i);
- octeon_i2c_writeq_flush(ext, i2c->twsi_base + SW_TWSI_EXT(i2c));
+ octeon_i2c_writeq_flush(ext, i2c->twsi_base + OCTEON_REG_SW_TWSI_EXT(i2c));
}
- octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI(i2c));
+ octeon_i2c_writeq_flush(cmd, i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c));
ret = octeon_i2c_hlc_wait(i2c);
if (ret)
goto err;
- cmd = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
+ cmd = __raw_readq(i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c));
if ((cmd & SW_TWSI_R) == 0)
return octeon_i2c_check_status(i2c, false);
@@ -510,19 +523,19 @@ static int octeon_i2c_hlc_comp_read(struct octeon_i2c *i2c, struct i2c_msg *msgs
cmd |= SW_TWSI_EIA;
ext = (u64)msgs[0].buf[0] << SW_TWSI_IA_SHIFT;
cmd |= (u64)msgs[0].buf[1] << SW_TWSI_IA_SHIFT;
- octeon_i2c_writeq_flush(ext, i2c->twsi_base + SW_TWSI_EXT(i2c));
+ octeon_i2c_writeq_flush(ext, i2c->twsi_base + OCTEON_REG_SW_TWSI_EXT(i2c));
} else {
cmd |= (u64)msgs[0].buf[0] << SW_TWSI_IA_SHIFT;
}
octeon_i2c_hlc_int_clear(i2c);
- octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI(i2c));
+ octeon_i2c_writeq_flush(cmd, i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c));
ret = octeon_i2c_hlc_wait(i2c);
if (ret)
goto err;
- cmd = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
+ cmd = __raw_readq(i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c));
if ((cmd & SW_TWSI_R) == 0)
return octeon_i2c_check_status(i2c, false);
@@ -530,7 +543,7 @@ static int octeon_i2c_hlc_comp_read(struct octeon_i2c *i2c, struct i2c_msg *msgs
msgs[1].buf[j] = (cmd >> (8 * i)) & 0xff;
if (msgs[1].len > 4) {
- cmd = __raw_readq(i2c->twsi_base + SW_TWSI_EXT(i2c));
+ cmd = __raw_readq(i2c->twsi_base + OCTEON_REG_SW_TWSI_EXT(i2c));
for (i = 0; i < msgs[1].len - 4 && i < 4; i++, j--)
msgs[1].buf[j] = (cmd >> (8 * i)) & 0xff;
}
@@ -577,16 +590,16 @@ static int octeon_i2c_hlc_comp_write(struct octeon_i2c *i2c, struct i2c_msg *msg
set_ext = true;
}
if (set_ext)
- octeon_i2c_writeq_flush(ext, i2c->twsi_base + SW_TWSI_EXT(i2c));
+ octeon_i2c_writeq_flush(ext, i2c->twsi_base + OCTEON_REG_SW_TWSI_EXT(i2c));
octeon_i2c_hlc_int_clear(i2c);
- octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI(i2c));
+ octeon_i2c_writeq_flush(cmd, i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c));
ret = octeon_i2c_hlc_wait(i2c);
if (ret)
goto err;
- cmd = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
+ cmd = __raw_readq(i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c));
if ((cmd & SW_TWSI_R) == 0)
return octeon_i2c_check_status(i2c, false);
@@ -607,25 +620,27 @@ int octeon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
struct octeon_i2c *i2c = i2c_get_adapdata(adap);
int i, ret = 0;
- if (num == 1) {
- if (msgs[0].len > 0 && msgs[0].len <= 8) {
- if (msgs[0].flags & I2C_M_RD)
- ret = octeon_i2c_hlc_read(i2c, msgs);
- else
- ret = octeon_i2c_hlc_write(i2c, msgs);
- goto out;
- }
- } else if (num == 2) {
- if ((msgs[0].flags & I2C_M_RD) == 0 &&
- (msgs[1].flags & I2C_M_RECV_LEN) == 0 &&
- msgs[0].len > 0 && msgs[0].len <= 2 &&
- msgs[1].len > 0 && msgs[1].len <= 8 &&
- msgs[0].addr == msgs[1].addr) {
- if (msgs[1].flags & I2C_M_RD)
- ret = octeon_i2c_hlc_comp_read(i2c, msgs);
- else
- ret = octeon_i2c_hlc_comp_write(i2c, msgs);
- goto out;
+ if (IS_LS_FREQ(i2c->twsi_freq)) {
+ if (num == 1) {
+ if (msgs[0].len > 0 && msgs[0].len <= 8) {
+ if (msgs[0].flags & I2C_M_RD)
+ ret = octeon_i2c_hlc_read(i2c, msgs);
+ else
+ ret = octeon_i2c_hlc_write(i2c, msgs);
+ goto out;
+ }
+ } else if (num == 2) {
+ if ((msgs[0].flags & I2C_M_RD) == 0 &&
+ (msgs[1].flags & I2C_M_RECV_LEN) == 0 &&
+ msgs[0].len > 0 && msgs[0].len <= 2 &&
+ msgs[1].len > 0 && msgs[1].len <= 8 &&
+ msgs[0].addr == msgs[1].addr) {
+ if (msgs[1].flags & I2C_M_RD)
+ ret = octeon_i2c_hlc_comp_read(i2c, msgs);
+ else
+ ret = octeon_i2c_hlc_comp_write(i2c, msgs);
+ goto out;
+ }
}
}
@@ -658,31 +673,64 @@ out:
void octeon_i2c_set_clock(struct octeon_i2c *i2c)
{
int tclk, thp_base, inc, thp_idx, mdiv_idx, ndiv_idx, foscl, diff;
- int thp = 0x18, mdiv = 2, ndiv = 0, delta_hz = 1000000;
+ bool is_plat_otx2;
+ /*
+ * Find divisors to produce target frequency, start with large delta
+ * to cover wider range of divisors, note thp = TCLK half period and
+ * ds is OSCL output frequency divisor.
+ */
+ unsigned int thp, mdiv_min, mdiv = 2, ndiv = 0, ds = 10;
+ unsigned int delta_hz = INITIAL_DELTA_HZ;
+
+ is_plat_otx2 = octeon_i2c_is_otx2(to_pci_dev(i2c->dev));
+
+ if (is_plat_otx2) {
+ thp = TWSI_MASTER_CLK_REG_OTX2_VAL;
+ mdiv_min = 0;
+ if (!IS_LS_FREQ(i2c->twsi_freq))
+ ds = 15;
+ } else {
+ thp = TWSI_MASTER_CLK_REG_DEF_VAL;
+ mdiv_min = 2;
+ }
for (ndiv_idx = 0; ndiv_idx < 8 && delta_hz != 0; ndiv_idx++) {
/*
* An mdiv value of less than 2 seems to not work well
* with ds1337 RTCs, so we constrain it to larger values.
*/
- for (mdiv_idx = 15; mdiv_idx >= 2 && delta_hz != 0; mdiv_idx--) {
+ for (mdiv_idx = 15; mdiv_idx >= mdiv_min && delta_hz != 0; mdiv_idx--) {
/*
* For given ndiv and mdiv values check the
* two closest thp values.
*/
- tclk = i2c->twsi_freq * (mdiv_idx + 1) * 10;
+ tclk = i2c->twsi_freq * (mdiv_idx + 1) * ds;
tclk *= (1 << ndiv_idx);
- thp_base = (i2c->sys_freq / (tclk * 2)) - 1;
+ if (is_plat_otx2)
+ thp_base = (i2c->sys_freq / tclk) - 2;
+ else
+ thp_base = (i2c->sys_freq / (tclk * 2)) - 1;
for (inc = 0; inc <= 1; inc++) {
thp_idx = thp_base + inc;
if (thp_idx < 5 || thp_idx > 0xff)
continue;
- foscl = i2c->sys_freq / (2 * (thp_idx + 1));
+ if (is_plat_otx2)
+ foscl = i2c->sys_freq / (thp_idx + 2);
+ else
+ foscl = i2c->sys_freq /
+ (2 * (thp_idx + 1));
foscl = foscl / (1 << ndiv_idx);
- foscl = foscl / (mdiv_idx + 1) / 10;
+ foscl = foscl / (mdiv_idx + 1) / ds;
+ if (foscl > i2c->twsi_freq)
+ continue;
diff = abs(foscl - i2c->twsi_freq);
+ /*
+ * Diff holds difference between calculated frequency
+ * value vs desired frequency.
+ * Delta_hz is updated with last minimum diff.
+ */
if (diff < delta_hz) {
delta_hz = diff;
thp = thp_idx;
@@ -694,6 +742,17 @@ void octeon_i2c_set_clock(struct octeon_i2c *i2c)
}
octeon_i2c_reg_write(i2c, SW_TWSI_OP_TWSI_CLK, thp);
octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_CLKCTL, (mdiv << 3) | ndiv);
+ if (is_plat_otx2) {
+ u64 mode;
+
+ mode = __raw_readq(i2c->twsi_base + OCTEON_REG_MODE(i2c));
+ /* Set REFCLK_SRC and HS_MODE in TWSX_MODE register */
+ if (!IS_LS_FREQ(i2c->twsi_freq))
+ mode |= TWSX_MODE_HS_MASK;
+ else
+ mode &= ~TWSX_MODE_HS_MASK;
+ octeon_i2c_writeq_flush(mode, i2c->twsi_base + OCTEON_REG_MODE(i2c));
+ }
}
int octeon_i2c_init_lowlevel(struct octeon_i2c *i2c)
diff --git a/drivers/i2c/busses/i2c-octeon-core.h b/drivers/i2c/busses/i2c-octeon-core.h
index 9bb9f64fdda0..7af01864da75 100644
--- a/drivers/i2c/busses/i2c-octeon-core.h
+++ b/drivers/i2c/busses/i2c-octeon-core.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/atomic.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -7,6 +8,7 @@
#include <linux/i2c-smbus.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/pci.h>
/* Controller command patterns */
#define SW_TWSI_V BIT_ULL(63) /* Valid bit */
@@ -71,6 +73,7 @@
#define STAT_SLAVE_ACK 0xC8
#define STAT_AD2W_ACK 0xD0
#define STAT_AD2W_NAK 0xD8
+#define STAT_WDOG_TOUT 0xF0
#define STAT_IDLE 0xF8
/* TWSI_INT values */
@@ -92,11 +95,21 @@ struct octeon_i2c_reg_offset {
unsigned int sw_twsi;
unsigned int twsi_int;
unsigned int sw_twsi_ext;
+ unsigned int mode;
};
-#define SW_TWSI(x) (x->roff.sw_twsi)
-#define TWSI_INT(x) (x->roff.twsi_int)
-#define SW_TWSI_EXT(x) (x->roff.sw_twsi_ext)
+#define OCTEON_REG_SW_TWSI(x) ((x)->roff.sw_twsi)
+#define OCTEON_REG_TWSI_INT(x) ((x)->roff.twsi_int)
+#define OCTEON_REG_SW_TWSI_EXT(x) ((x)->roff.sw_twsi_ext)
+#define OCTEON_REG_MODE(x) ((x)->roff.mode)
+
+/* Set REFCLK_SRC and HS_MODE in TWSX_MODE register */
+#define TWSX_MODE_REFCLK_SRC BIT(4)
+#define TWSX_MODE_HS_MODE BIT(0)
+#define TWSX_MODE_HS_MASK (TWSX_MODE_REFCLK_SRC | TWSX_MODE_HS_MODE)
+
+/* Set BUS_MON_RST to reset bus monitor */
+#define BUS_MON_RST_MASK BIT(3)
struct octeon_i2c {
wait_queue_head_t queue;
@@ -134,16 +147,16 @@ static inline void octeon_i2c_writeq_flush(u64 val, void __iomem *addr)
* @eop_reg: Register selector
* @data: Value to be written
*
- * The I2C core registers are accessed indirectly via the SW_TWSI CSR.
+ * The I2C core registers are accessed indirectly via the OCTEON_REG_SW_TWSI CSR.
*/
static inline void octeon_i2c_reg_write(struct octeon_i2c *i2c, u64 eop_reg, u8 data)
{
int tries = 1000;
u64 tmp;
- __raw_writeq(SW_TWSI_V | eop_reg | data, i2c->twsi_base + SW_TWSI(i2c));
+ __raw_writeq(SW_TWSI_V | eop_reg | data, i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c));
do {
- tmp = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
+ tmp = __raw_readq(i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c));
if (--tries < 0)
return;
} while ((tmp & SW_TWSI_V) != 0);
@@ -169,9 +182,9 @@ static inline int octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg,
int tries = 1000;
u64 tmp;
- __raw_writeq(SW_TWSI_V | eop_reg | SW_TWSI_R, i2c->twsi_base + SW_TWSI(i2c));
+ __raw_writeq(SW_TWSI_V | eop_reg | SW_TWSI_R, i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c));
do {
- tmp = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
+ tmp = __raw_readq(i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c));
if (--tries < 0) {
/* signal that the returned data is invalid */
if (error)
@@ -191,24 +204,40 @@ static inline int octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg,
octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT, NULL)
/**
- * octeon_i2c_read_int - read the TWSI_INT register
+ * octeon_i2c_read_int - read the OCTEON_REG_TWSI_INT register
* @i2c: The struct octeon_i2c
*
* Returns the value of the register.
*/
static inline u64 octeon_i2c_read_int(struct octeon_i2c *i2c)
{
- return __raw_readq(i2c->twsi_base + TWSI_INT(i2c));
+ return __raw_readq(i2c->twsi_base + OCTEON_REG_TWSI_INT(i2c));
}
/**
- * octeon_i2c_write_int - write the TWSI_INT register
+ * octeon_i2c_write_int - write the OCTEON_REG_TWSI_INT register
* @i2c: The struct octeon_i2c
* @data: Value to be written
*/
static inline void octeon_i2c_write_int(struct octeon_i2c *i2c, u64 data)
{
- octeon_i2c_writeq_flush(data, i2c->twsi_base + TWSI_INT(i2c));
+ octeon_i2c_writeq_flush(data, i2c->twsi_base + OCTEON_REG_TWSI_INT(i2c));
+}
+
+#define IS_LS_FREQ(twsi_freq) ((twsi_freq) <= 400000)
+#define PCI_SUBSYS_DEVID_9XXX 0xB
+#define PCI_SUBSYS_MASK GENMASK(15, 12)
+/**
+ * octeon_i2c_is_otx2 - check for chip ID
+ * @pdev: PCI dev structure
+ *
+ * Returns true if the device is an OcteonTX2, false otherwise.
+ */
+static inline bool octeon_i2c_is_otx2(struct pci_dev *pdev)
+{
+ u32 chip_id = FIELD_GET(PCI_SUBSYS_MASK, pdev->subsystem_device);
+
+ return (chip_id == PCI_SUBSYS_DEVID_9XXX);
}
/* Prototypes */
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 42165ef57946..30a5ea282a8b 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -660,7 +660,7 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
struct i2c_msg *msg, int stop, bool polling)
{
struct omap_i2c_dev *omap = i2c_get_adapdata(adap);
- unsigned long timeout;
+ unsigned long time_left;
u16 w;
int ret;
@@ -740,19 +740,18 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
* into arbitration and we're currently unable to recover from it.
*/
if (!polling) {
- timeout = wait_for_completion_timeout(&omap->cmd_complete,
- OMAP_I2C_TIMEOUT);
+ time_left = wait_for_completion_timeout(&omap->cmd_complete,
+ OMAP_I2C_TIMEOUT);
} else {
do {
omap_i2c_wait(omap);
ret = omap_i2c_xfer_data(omap);
} while (ret == -EAGAIN);
- timeout = !ret;
+ time_left = !ret;
}
- if (timeout == 0) {
- dev_err(omap->dev, "controller timed out\n");
+ if (time_left == 0) {
omap_i2c_reset(omap);
__omap_i2c_init(omap);
return -ETIMEDOUT;
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 76f79b68cef8..f495560bd99c 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -324,6 +324,7 @@ static void decode_ISR(unsigned int val)
decode_bits(KERN_DEBUG "ISR", isr_bits, ARRAY_SIZE(isr_bits), val);
}
+#ifdef CONFIG_I2C_PXA_SLAVE
static const struct bits icr_bits[] = {
PXA_BIT(ICR_START, "START", NULL),
PXA_BIT(ICR_STOP, "STOP", NULL),
@@ -342,7 +343,6 @@ static const struct bits icr_bits[] = {
PXA_BIT(ICR_UR, "UR", "ur"),
};
-#ifdef CONFIG_I2C_PXA_SLAVE
static void decode_ICR(unsigned int val)
{
decode_bits(KERN_DEBUG "ICR", icr_bits, ARRAY_SIZE(icr_bits), val);
@@ -826,7 +826,7 @@ static inline void i2c_pxa_stop_message(struct pxa_i2c *i2c)
static int i2c_pxa_send_mastercode(struct pxa_i2c *i2c)
{
u32 icr;
- long timeout;
+ long time_left;
spin_lock_irq(&i2c->lock);
i2c->highmode_enter = true;
@@ -837,12 +837,12 @@ static int i2c_pxa_send_mastercode(struct pxa_i2c *i2c)
writel(icr, _ICR(i2c));
spin_unlock_irq(&i2c->lock);
- timeout = wait_event_timeout(i2c->wait,
- i2c->highmode_enter == false, HZ * 1);
+ time_left = wait_event_timeout(i2c->wait,
+ i2c->highmode_enter == false, HZ * 1);
i2c->highmode_enter = false;
- return (timeout == 0) ? I2C_RETRY : 0;
+ return (time_left == 0) ? I2C_RETRY : 0;
}
/*
@@ -1050,7 +1050,7 @@ static irqreturn_t i2c_pxa_handler(int this_irq, void *dev_id)
*/
static int i2c_pxa_do_xfer(struct pxa_i2c *i2c, struct i2c_msg *msg, int num)
{
- long timeout;
+ long time_left;
int ret;
/*
@@ -1095,7 +1095,7 @@ static int i2c_pxa_do_xfer(struct pxa_i2c *i2c, struct i2c_msg *msg, int num)
/*
* The rest of the processing occurs in the interrupt handler.
*/
- timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
+ time_left = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
i2c_pxa_stop_message(i2c);
/*
@@ -1103,7 +1103,7 @@ static int i2c_pxa_do_xfer(struct pxa_i2c *i2c, struct i2c_msg *msg, int num)
*/
ret = i2c->msg_idx;
- if (!timeout && i2c->msg_num) {
+ if (!time_left && i2c->msg_num) {
i2c_pxa_scream_blue_murder(i2c, "timeout with active message");
i2c_recover_bus(&i2c->adap);
ret = I2C_RETRY;
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index da94df466e83..0a8b95ce35f7 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -586,7 +586,8 @@ static int geni_i2c_gpi_xfer(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[], i
{
struct dma_slave_config config = {};
struct gpi_i2c_config peripheral = {};
- int i, ret = 0, timeout;
+ int i, ret = 0;
+ unsigned long time_left;
dma_addr_t tx_addr, rx_addr;
void *tx_buf = NULL, *rx_buf = NULL;
const struct geni_i2c_clk_fld *itr = gi2c->clk_fld;
@@ -629,12 +630,9 @@ static int geni_i2c_gpi_xfer(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[], i
dma_async_issue_pending(gi2c->tx_c);
- timeout = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
- if (!timeout) {
- dev_err(gi2c->se.dev, "I2C timeout gpi flags:%d addr:0x%x\n",
- gi2c->cur->flags, gi2c->cur->addr);
+ time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
+ if (!time_left)
gi2c->err = -ETIMEDOUT;
- }
if (gi2c->err) {
ret = gi2c->err;
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 598102d16677..c9b43a3c4bd3 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -793,10 +793,8 @@ static int qup_i2c_bam_schedule_desc(struct qup_i2c_dev *qup)
dma_async_issue_pending(qup->brx.dma);
}
- if (!wait_for_completion_timeout(&qup->xfer, qup->xfer_timeout)) {
- dev_err(qup->dev, "normal trans timed out\n");
+ if (!wait_for_completion_timeout(&qup->xfer, qup->xfer_timeout))
ret = -ETIMEDOUT;
- }
if (ret || qup->bus_err || qup->qup_err) {
reinit_completion(&qup->xfer);
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index e43ff483c56e..f608b1838cad 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -46,18 +46,6 @@
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-#define RIIC_ICCR1 0x00
-#define RIIC_ICCR2 0x04
-#define RIIC_ICMR1 0x08
-#define RIIC_ICMR3 0x10
-#define RIIC_ICSER 0x18
-#define RIIC_ICIER 0x1c
-#define RIIC_ICSR2 0x24
-#define RIIC_ICBRL 0x34
-#define RIIC_ICBRH 0x38
-#define RIIC_ICDRT 0x3c
-#define RIIC_ICDRR 0x40
-
#define ICCR1_ICE 0x80
#define ICCR1_IICRST 0x40
#define ICCR1_SOWP 0x10
@@ -87,6 +75,25 @@
#define RIIC_INIT_MSG -1
+enum riic_reg_list {
+ RIIC_ICCR1 = 0,
+ RIIC_ICCR2,
+ RIIC_ICMR1,
+ RIIC_ICMR3,
+ RIIC_ICSER,
+ RIIC_ICIER,
+ RIIC_ICSR2,
+ RIIC_ICBRL,
+ RIIC_ICBRH,
+ RIIC_ICDRT,
+ RIIC_ICDRR,
+ RIIC_REG_END,
+};
+
+struct riic_of_data {
+ u8 regs[RIIC_REG_END];
+};
+
struct riic_dev {
void __iomem *base;
u8 *buf;
@@ -94,6 +101,7 @@ struct riic_dev {
int bytes_left;
int err;
int is_last;
+ const struct riic_of_data *info;
struct completion msg_done;
struct i2c_adapter adapter;
struct clk *clk;
@@ -105,9 +113,19 @@ struct riic_irq_desc {
char *name;
};
+static inline void riic_writeb(struct riic_dev *riic, u8 val, u8 offset)
+{
+ writeb(val, riic->base + riic->info->regs[offset]);
+}
+
+static inline u8 riic_readb(struct riic_dev *riic, u8 offset)
+{
+ return readb(riic->base + riic->info->regs[offset]);
+}
+
static inline void riic_clear_set_bit(struct riic_dev *riic, u8 clear, u8 set, u8 reg)
{
- writeb((readb(riic->base + reg) & ~clear) | set, riic->base + reg);
+ riic_writeb(riic, (riic_readb(riic, reg) & ~clear) | set, reg);
}
static int riic_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
@@ -119,7 +137,7 @@ static int riic_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
pm_runtime_get_sync(adap->dev.parent);
- if (readb(riic->base + RIIC_ICCR2) & ICCR2_BBSY) {
+ if (riic_readb(riic, RIIC_ICCR2) & ICCR2_BBSY) {
riic->err = -EBUSY;
goto out;
}
@@ -127,7 +145,7 @@ static int riic_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
reinit_completion(&riic->msg_done);
riic->err = 0;
- writeb(0, riic->base + RIIC_ICSR2);
+ riic_writeb(riic, 0, RIIC_ICSR2);
for (i = 0, start_bit = ICCR2_ST; i < num; i++) {
riic->bytes_left = RIIC_INIT_MSG;
@@ -135,9 +153,9 @@ static int riic_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
riic->msg = &msgs[i];
riic->is_last = (i == num - 1);
- writeb(ICIER_NAKIE | ICIER_TIE, riic->base + RIIC_ICIER);
+ riic_writeb(riic, ICIER_NAKIE | ICIER_TIE, RIIC_ICIER);
- writeb(start_bit, riic->base + RIIC_ICCR2);
+ riic_writeb(riic, start_bit, RIIC_ICCR2);
time_left = wait_for_completion_timeout(&riic->msg_done, riic->adapter.timeout);
if (time_left == 0)
@@ -191,7 +209,7 @@ static irqreturn_t riic_tdre_isr(int irq, void *data)
* value could be moved to the shadow shift register right away. So
* this must be after updates to ICIER (where we want to disable TIE)!
*/
- writeb(val, riic->base + RIIC_ICDRT);
+ riic_writeb(riic, val, RIIC_ICDRT);
return IRQ_HANDLED;
}
@@ -200,9 +218,9 @@ static irqreturn_t riic_tend_isr(int irq, void *data)
{
struct riic_dev *riic = data;
- if (readb(riic->base + RIIC_ICSR2) & ICSR2_NACKF) {
+ if (riic_readb(riic, RIIC_ICSR2) & ICSR2_NACKF) {
/* We got a NACKIE */
- readb(riic->base + RIIC_ICDRR); /* dummy read */
+ riic_readb(riic, RIIC_ICDRR); /* dummy read */
riic_clear_set_bit(riic, ICSR2_NACKF, 0, RIIC_ICSR2);
riic->err = -ENXIO;
} else if (riic->bytes_left) {
@@ -211,7 +229,7 @@ static irqreturn_t riic_tend_isr(int irq, void *data)
if (riic->is_last || riic->err) {
riic_clear_set_bit(riic, ICIER_TEIE, ICIER_SPIE, RIIC_ICIER);
- writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
+ riic_writeb(riic, ICCR2_SP, RIIC_ICCR2);
} else {
/* Transfer is complete, but do not send STOP */
riic_clear_set_bit(riic, ICIER_TEIE, 0, RIIC_ICIER);
@@ -230,7 +248,7 @@ static irqreturn_t riic_rdrf_isr(int irq, void *data)
if (riic->bytes_left == RIIC_INIT_MSG) {
riic->bytes_left = riic->msg->len;
- readb(riic->base + RIIC_ICDRR); /* dummy read */
+ riic_readb(riic, RIIC_ICDRR); /* dummy read */
return IRQ_HANDLED;
}
@@ -238,7 +256,7 @@ static irqreturn_t riic_rdrf_isr(int irq, void *data)
/* STOP must come before we set ACKBT! */
if (riic->is_last) {
riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER);
- writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
+ riic_writeb(riic, ICCR2_SP, RIIC_ICCR2);
}
riic_clear_set_bit(riic, 0, ICMR3_ACKBT, RIIC_ICMR3);
@@ -248,7 +266,7 @@ static irqreturn_t riic_rdrf_isr(int irq, void *data)
}
/* Reading acks the RIE interrupt */
- *riic->buf = readb(riic->base + RIIC_ICDRR);
+ *riic->buf = riic_readb(riic, RIIC_ICDRR);
riic->buf++;
riic->bytes_left--;
@@ -260,10 +278,10 @@ static irqreturn_t riic_stop_isr(int irq, void *data)
struct riic_dev *riic = data;
/* read back registers to confirm writes have fully propagated */
- writeb(0, riic->base + RIIC_ICSR2);
- readb(riic->base + RIIC_ICSR2);
- writeb(0, riic->base + RIIC_ICIER);
- readb(riic->base + RIIC_ICIER);
+ riic_writeb(riic, 0, RIIC_ICSR2);
+ riic_readb(riic, RIIC_ICSR2);
+ riic_writeb(riic, 0, RIIC_ICIER);
+ riic_readb(riic, RIIC_ICIER);
complete(&riic->msg_done);
@@ -365,15 +383,15 @@ static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
t->scl_rise_ns / (1000000000 / rate), cks, brl, brh);
/* Changing the order of accessing IICRST and ICE may break things! */
- writeb(ICCR1_IICRST | ICCR1_SOWP, riic->base + RIIC_ICCR1);
+ riic_writeb(riic, ICCR1_IICRST | ICCR1_SOWP, RIIC_ICCR1);
riic_clear_set_bit(riic, 0, ICCR1_ICE, RIIC_ICCR1);
- writeb(ICMR1_CKS(cks), riic->base + RIIC_ICMR1);
- writeb(brh | ICBR_RESERVED, riic->base + RIIC_ICBRH);
- writeb(brl | ICBR_RESERVED, riic->base + RIIC_ICBRL);
+ riic_writeb(riic, ICMR1_CKS(cks), RIIC_ICMR1);
+ riic_writeb(riic, brh | ICBR_RESERVED, RIIC_ICBRH);
+ riic_writeb(riic, brl | ICBR_RESERVED, RIIC_ICBRL);
- writeb(0, riic->base + RIIC_ICSER);
- writeb(ICMR3_ACKWP | ICMR3_RDRFS, riic->base + RIIC_ICMR3);
+ riic_writeb(riic, 0, RIIC_ICSER);
+ riic_writeb(riic, ICMR3_ACKWP | ICMR3_RDRFS, RIIC_ICMR3);
riic_clear_set_bit(riic, ICCR1_IICRST, 0, RIIC_ICCR1);
@@ -443,6 +461,8 @@ static int riic_i2c_probe(struct platform_device *pdev)
}
}
+ riic->info = of_device_get_match_data(&pdev->dev);
+
adap = &riic->adapter;
i2c_set_adapdata(adap, riic);
strscpy(adap->name, "Renesas RIIC adapter", sizeof(adap->name));
@@ -481,14 +501,47 @@ static void riic_i2c_remove(struct platform_device *pdev)
struct riic_dev *riic = platform_get_drvdata(pdev);
pm_runtime_get_sync(&pdev->dev);
- writeb(0, riic->base + RIIC_ICIER);
+ riic_writeb(riic, 0, RIIC_ICIER);
pm_runtime_put(&pdev->dev);
i2c_del_adapter(&riic->adapter);
pm_runtime_disable(&pdev->dev);
}
+static const struct riic_of_data riic_rz_a_info = {
+ .regs = {
+ [RIIC_ICCR1] = 0x00,
+ [RIIC_ICCR2] = 0x04,
+ [RIIC_ICMR1] = 0x08,
+ [RIIC_ICMR3] = 0x10,
+ [RIIC_ICSER] = 0x18,
+ [RIIC_ICIER] = 0x1c,
+ [RIIC_ICSR2] = 0x24,
+ [RIIC_ICBRL] = 0x34,
+ [RIIC_ICBRH] = 0x38,
+ [RIIC_ICDRT] = 0x3c,
+ [RIIC_ICDRR] = 0x40,
+ },
+};
+
+static const struct riic_of_data riic_rz_v2h_info = {
+ .regs = {
+ [RIIC_ICCR1] = 0x00,
+ [RIIC_ICCR2] = 0x01,
+ [RIIC_ICMR1] = 0x02,
+ [RIIC_ICMR3] = 0x04,
+ [RIIC_ICSER] = 0x06,
+ [RIIC_ICIER] = 0x07,
+ [RIIC_ICSR2] = 0x09,
+ [RIIC_ICBRL] = 0x10,
+ [RIIC_ICBRH] = 0x11,
+ [RIIC_ICDRT] = 0x12,
+ [RIIC_ICDRR] = 0x13,
+ },
+};
+
static const struct of_device_id riic_i2c_dt_ids[] = {
- { .compatible = "renesas,riic-rz", },
+ { .compatible = "renesas,riic-rz", .data = &riic_rz_a_info },
+ { .compatible = "renesas,riic-r9a09g057", .data = &riic_rz_v2h_info },
{ /* Sentinel */ },
};
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 086fdf262e7b..beca61700c89 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -1060,7 +1060,8 @@ static int rk3x_i2c_xfer_common(struct i2c_adapter *adap,
struct i2c_msg *msgs, int num, bool polling)
{
struct rk3x_i2c *i2c = (struct rk3x_i2c *)adap->algo_data;
- unsigned long timeout, flags;
+ unsigned long flags;
+ long time_left;
u32 val;
int ret = 0;
int i;
@@ -1092,23 +1093,20 @@ static int rk3x_i2c_xfer_common(struct i2c_adapter *adap,
if (!polling) {
rk3x_i2c_start(i2c);
- timeout = wait_event_timeout(i2c->wait, !i2c->busy,
- msecs_to_jiffies(WAIT_TIMEOUT));
+ time_left = wait_event_timeout(i2c->wait, !i2c->busy,
+ msecs_to_jiffies(WAIT_TIMEOUT));
} else {
disable_irq(i2c->irq);
rk3x_i2c_start(i2c);
- timeout = rk3x_i2c_wait_xfer_poll(i2c);
+ time_left = rk3x_i2c_wait_xfer_poll(i2c);
enable_irq(i2c->irq);
}
spin_lock_irqsave(&i2c->lock, flags);
- if (timeout == 0) {
- dev_err(i2c->dev, "timeout, ipd: 0x%02x, state: %d\n",
- i2c_readl(i2c, REG_IPD), i2c->state);
-
+ if (time_left == 0) {
/* Force a STOP condition without interrupt */
i2c_writel(i2c, 0, REG_IEN);
val = i2c_readl(i2c, REG_CON) & REG_CON_TUNING_MASK;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 275f7c42165c..01419c738cfc 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -685,7 +685,7 @@ static void s3c24xx_i2c_wait_idle(struct s3c24xx_i2c *i2c)
static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
struct i2c_msg *msgs, int num)
{
- unsigned long timeout = 0;
+ long time_left = 0;
int ret;
ret = s3c24xx_i2c_set_master(i2c);
@@ -715,7 +715,7 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
dev_err(i2c->dev, "deal with arbitration loss\n");
}
} else {
- timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
+ time_left = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
}
ret = i2c->msg_idx;
@@ -724,7 +724,7 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
* Having these next two as dev_err() makes life very
* noisy when doing an i2cdetect
*/
- if (timeout == 0)
+ if (time_left == 0)
dev_dbg(i2c->dev, "timeout\n");
else if (ret != num)
dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret);
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index c65ac3d7eadc..f86c29737df1 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -688,7 +688,6 @@ static int sh_mobile_xfer(struct sh_mobile_i2c_data *pd,
}
if (!time_left) {
- dev_err(pd->dev, "Transfer request timed out\n");
if (pd->dma_direction != DMA_NONE)
sh_mobile_i2c_cleanup_dma(pd, true);
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index ce2333408904..5e01fe3dbb63 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -647,7 +647,7 @@ static int st_i2c_xfer_msg(struct st_i2c_dev *i2c_dev, struct i2c_msg *msg,
{
struct st_i2c_client *c = &i2c_dev->client;
u32 ctl, i2c, it;
- unsigned long timeout;
+ unsigned long time_left;
int ret;
c->addr = i2c_8bit_addr_from_msg(msg);
@@ -685,15 +685,12 @@ static int st_i2c_xfer_msg(struct st_i2c_dev *i2c_dev, struct i2c_msg *msg,
st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STRTG);
}
- timeout = wait_for_completion_timeout(&i2c_dev->complete,
- i2c_dev->adap.timeout);
+ time_left = wait_for_completion_timeout(&i2c_dev->complete,
+ i2c_dev->adap.timeout);
ret = c->result;
- if (!timeout) {
- dev_err(i2c_dev->dev, "Write to slave 0x%x timed out\n",
- c->addr);
+ if (!time_left)
ret = -ETIMEDOUT;
- }
i2c = SSC_I2C_STOPG | SSC_I2C_REPSTRTG;
st_i2c_clr_bits(i2c_dev->base + SSC_I2C, i2c);
diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c
index 859ac0cf7f6c..f8b12be6ef55 100644
--- a/drivers/i2c/busses/i2c-stm32f4.c
+++ b/drivers/i2c/busses/i2c-stm32f4.c
@@ -681,7 +681,7 @@ static int stm32f4_i2c_xfer_msg(struct stm32f4_i2c_dev *i2c_dev,
{
struct stm32f4_i2c_msg *f4_msg = &i2c_dev->msg;
void __iomem *reg = i2c_dev->base + STM32F4_I2C_CR1;
- unsigned long timeout;
+ unsigned long time_left;
u32 mask;
int ret;
@@ -706,11 +706,11 @@ static int stm32f4_i2c_xfer_msg(struct stm32f4_i2c_dev *i2c_dev,
stm32f4_i2c_set_bits(reg, STM32F4_I2C_CR1_START);
}
- timeout = wait_for_completion_timeout(&i2c_dev->complete,
- i2c_dev->adap.timeout);
+ time_left = wait_for_completion_timeout(&i2c_dev->complete,
+ i2c_dev->adap.timeout);
ret = f4_msg->result;
- if (!timeout)
+ if (!time_left)
ret = -ETIMEDOUT;
return ret;
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 01210452216b..cfee2d9c09de 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -1789,7 +1789,7 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
struct stm32_i2c_dma *dma = i2c_dev->dma;
struct device *dev = i2c_dev->dev;
- unsigned long timeout;
+ unsigned long time_left;
int i, ret;
f7_msg->addr = addr;
@@ -1809,8 +1809,8 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
if (ret)
goto pm_free;
- timeout = wait_for_completion_timeout(&i2c_dev->complete,
- i2c_dev->adap.timeout);
+ time_left = wait_for_completion_timeout(&i2c_dev->complete,
+ i2c_dev->adap.timeout);
ret = f7_msg->result;
if (ret) {
if (i2c_dev->use_dma)
@@ -1826,7 +1826,7 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
goto pm_free;
}
- if (!timeout) {
+ if (!time_left) {
dev_dbg(dev, "Access to slave 0x%x timed out\n", f7_msg->addr);
if (i2c_dev->use_dma)
dmaengine_terminate_sync(dma->chan_using);
diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
index bbea521b05dd..31ecb2c7e978 100644
--- a/drivers/i2c/busses/i2c-synquacer.c
+++ b/drivers/i2c/busses/i2c-synquacer.c
@@ -311,7 +311,7 @@ static int synquacer_i2c_doxfer(struct synquacer_i2c *i2c,
struct i2c_msg *msgs, int num)
{
unsigned char bsr;
- unsigned long timeout;
+ unsigned long time_left;
int ret;
synquacer_i2c_hw_init(i2c);
@@ -335,9 +335,9 @@ static int synquacer_i2c_doxfer(struct synquacer_i2c *i2c,
return ret;
}
- timeout = wait_for_completion_timeout(&i2c->completion,
- msecs_to_jiffies(i2c->timeout_ms));
- if (timeout == 0) {
+ time_left = wait_for_completion_timeout(&i2c->completion,
+ msecs_to_jiffies(i2c->timeout_ms));
+ if (time_left == 0) {
dev_dbg(i2c->dev, "timeout\n");
return -EAGAIN;
}
@@ -550,17 +550,13 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
device_property_read_u32(&pdev->dev, "socionext,pclk-rate",
&i2c->pclkrate);
- i2c->pclk = devm_clk_get(&pdev->dev, "pclk");
- if (PTR_ERR(i2c->pclk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- if (!IS_ERR_OR_NULL(i2c->pclk)) {
- dev_dbg(&pdev->dev, "clock source %p\n", i2c->pclk);
+ i2c->pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
+ if (IS_ERR(i2c->pclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(i2c->pclk),
+ "failed to get and enable clock\n");
- ret = clk_prepare_enable(i2c->pclk);
- if (ret)
- return dev_err_probe(&pdev->dev, ret, "failed to enable clock\n");
- i2c->pclkrate = clk_get_rate(i2c->pclk);
- }
+ dev_dbg(&pdev->dev, "clock source %p\n", i2c->pclk);
+ i2c->pclkrate = clk_get_rate(i2c->pclk);
if (i2c->pclkrate < SYNQUACER_I2C_MIN_CLK_RATE ||
i2c->pclkrate > SYNQUACER_I2C_MAX_CLK_RATE)
@@ -615,8 +611,6 @@ static void synquacer_i2c_remove(struct platform_device *pdev)
struct synquacer_i2c *i2c = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c->adapter);
- if (!IS_ERR(i2c->pclk))
- clk_disable_unprepare(i2c->pclk);
};
static const struct of_device_id synquacer_i2c_dt_ids[] __maybe_unused = {
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 920d5a8cbf4c..85b31edc558d 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -1331,7 +1331,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
dmaengine_terminate_sync(i2c_dev->dma_chan);
if (!time_left && !completion_done(&i2c_dev->dma_complete)) {
- dev_err(i2c_dev->dev, "DMA transfer timed out\n");
tegra_i2c_init(i2c_dev);
return -ETIMEDOUT;
}
@@ -1351,7 +1350,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
tegra_i2c_mask_irq(i2c_dev, int_mask);
if (time_left == 0) {
- dev_err(i2c_dev->dev, "I2C transfer timed out\n");
tegra_i2c_init(i2c_dev);
return -ETIMEDOUT;
}
diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
index a77cd86fe75e..32d0e3930b67 100644
--- a/drivers/i2c/busses/i2c-thunderx-pcidrv.c
+++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
@@ -27,7 +27,8 @@
#define PCI_DEVICE_ID_THUNDER_TWSI 0xa012
-#define SYS_FREQ_DEFAULT 700000000
+#define SYS_FREQ_DEFAULT 800000000
+#define OTX2_REF_FREQ_DEFAULT 100000000
#define TWSI_INT_ENA_W1C 0x1028
#define TWSI_INT_ENA_W1S 0x1030
@@ -99,7 +100,8 @@ static void thunder_i2c_clock_enable(struct device *dev, struct octeon_i2c *i2c)
i2c->sys_freq = clk_get_rate(i2c->clk);
} else {
/* ACPI */
- device_property_read_u32(dev, "sclk", &i2c->sys_freq);
+ if (device_property_read_u32(dev, "sclk", &i2c->sys_freq))
+ device_property_read_u32(dev, "ioclk", &i2c->sys_freq);
}
skip:
@@ -165,6 +167,7 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
i2c->roff.sw_twsi = 0x1000;
i2c->roff.twsi_int = 0x1010;
i2c->roff.sw_twsi_ext = 0x1018;
+ i2c->roff.mode = 0x1038;
i2c->dev = dev;
pci_set_drvdata(pdev, i2c);
@@ -205,6 +208,12 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
if (ret)
goto error;
+ /*
+ * For OcteonTX2 chips, set reference frequency to 100MHz
+ * as refclk_src in TWSI_MODE register defaults to 100MHz.
+ */
+ if (octeon_i2c_is_otx2(pdev) && IS_LS_FREQ(i2c->twsi_freq))
+ i2c->sys_freq = OTX2_REF_FREQ_DEFAULT;
octeon_i2c_set_clock(i2c);
i2c->adap = thunderx_i2c_ops;
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index dbc91c7c3788..6c3dac2cf568 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -358,7 +358,6 @@ static int uniphier_fi2c_master_xfer_one(struct i2c_adapter *adap,
spin_unlock_irqrestore(&priv->lock, flags);
if (!time_left) {
- dev_err(&adap->dev, "transaction timeout.\n");
uniphier_fi2c_recover(priv);
return -ETIMEDOUT;
}
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index 854ac25b5862..e1b4c80e0285 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -71,10 +71,8 @@ static int uniphier_i2c_xfer_byte(struct i2c_adapter *adap, u32 txdata,
writel(txdata, priv->membase + UNIPHIER_I2C_DTRM);
time_left = wait_for_completion_timeout(&priv->comp, adap->timeout);
- if (unlikely(!time_left)) {
- dev_err(&adap->dev, "transaction timeout\n");
+ if (unlikely(!time_left))
return -ETIMEDOUT;
- }
rxdata = readl(priv->membase + UNIPHIER_I2C_DREC);
if (rxdatap)
diff --git a/drivers/i2c/busses/i2c-viai2c-common.c b/drivers/i2c/busses/i2c-viai2c-common.c
new file mode 100644
index 000000000000..1844d13f1f79
--- /dev/null
+++ b/drivers/i2c/busses/i2c-viai2c-common.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/of_irq.h>
+#include "i2c-viai2c-common.h"
+
+int viai2c_wait_bus_not_busy(struct viai2c *i2c)
+{
+ unsigned long timeout;
+
+ timeout = jiffies + VIAI2C_TIMEOUT;
+ while (!(readw(i2c->base + VIAI2C_REG_CSR) & VIAI2C_CSR_READY_MASK)) {
+ if (time_after(jiffies, timeout)) {
+ dev_warn(i2c->dev, "timeout waiting for bus ready\n");
+ return -EBUSY;
+ }
+ msleep(20);
+ }
+
+ return 0;
+}
+
+static int viai2c_write(struct viai2c *i2c, struct i2c_msg *pmsg, int last)
+{
+ u16 val, tcr_val = i2c->tcr;
+
+ i2c->last = last;
+
+ if (pmsg->len == 0) {
+ /*
+ * We still need to run through the while (..) once, so
+ * start at -1 and break out early from the loop
+ */
+ i2c->xfered_len = -1;
+ writew(0, i2c->base + VIAI2C_REG_CDR);
+ } else {
+ writew(pmsg->buf[0] & 0xFF, i2c->base + VIAI2C_REG_CDR);
+ }
+
+ if (i2c->platform == VIAI2C_PLAT_WMT && !(pmsg->flags & I2C_M_NOSTART)) {
+ val = readw(i2c->base + VIAI2C_REG_CR);
+ val &= ~VIAI2C_CR_TX_END;
+ val |= VIAI2C_CR_CPU_RDY;
+ writew(val, i2c->base + VIAI2C_REG_CR);
+ }
+
+ reinit_completion(&i2c->complete);
+
+ tcr_val |= pmsg->addr & VIAI2C_TCR_ADDR_MASK;
+
+ writew(tcr_val, i2c->base + VIAI2C_REG_TCR);
+
+ if (i2c->platform == VIAI2C_PLAT_WMT && pmsg->flags & I2C_M_NOSTART) {
+ val = readw(i2c->base + VIAI2C_REG_CR);
+ val |= VIAI2C_CR_CPU_RDY;
+ writew(val, i2c->base + VIAI2C_REG_CR);
+ }
+
+ if (!wait_for_completion_timeout(&i2c->complete, VIAI2C_TIMEOUT))
+ return -ETIMEDOUT;
+
+ return i2c->ret;
+}
+
+static int viai2c_read(struct viai2c *i2c, struct i2c_msg *pmsg, bool first)
+{
+ u16 val, tcr_val = i2c->tcr;
+
+ val = readw(i2c->base + VIAI2C_REG_CR);
+ val &= ~(VIAI2C_CR_TX_END | VIAI2C_CR_RX_END);
+
+ if (i2c->platform == VIAI2C_PLAT_WMT && !(pmsg->flags & I2C_M_NOSTART))
+ val |= VIAI2C_CR_CPU_RDY;
+
+ if (pmsg->len == 1)
+ val |= VIAI2C_CR_RX_END;
+
+ writew(val, i2c->base + VIAI2C_REG_CR);
+
+ reinit_completion(&i2c->complete);
+
+ tcr_val |= VIAI2C_TCR_READ | (pmsg->addr & VIAI2C_TCR_ADDR_MASK);
+
+ writew(tcr_val, i2c->base + VIAI2C_REG_TCR);
+
+ if ((i2c->platform == VIAI2C_PLAT_WMT && (pmsg->flags & I2C_M_NOSTART)) ||
+ (i2c->platform == VIAI2C_PLAT_ZHAOXIN && !first)) {
+ val = readw(i2c->base + VIAI2C_REG_CR);
+ val |= VIAI2C_CR_CPU_RDY;
+ writew(val, i2c->base + VIAI2C_REG_CR);
+ }
+
+ if (!wait_for_completion_timeout(&i2c->complete, VIAI2C_TIMEOUT))
+ return -ETIMEDOUT;
+
+ return i2c->ret;
+}
+
+int viai2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+{
+ struct i2c_msg *pmsg;
+ int i;
+ int ret = 0;
+ struct viai2c *i2c = i2c_get_adapdata(adap);
+
+ i2c->mode = VIAI2C_BYTE_MODE;
+ for (i = 0; ret >= 0 && i < num; i++) {
+ pmsg = &msgs[i];
+ if (i2c->platform == VIAI2C_PLAT_WMT && !(pmsg->flags & I2C_M_NOSTART)) {
+ ret = viai2c_wait_bus_not_busy(i2c);
+ if (ret < 0)
+ return ret;
+ }
+
+ i2c->msg = pmsg;
+ i2c->xfered_len = 0;
+
+ if (pmsg->flags & I2C_M_RD)
+ ret = viai2c_read(i2c, pmsg, i == 0);
+ else
+ ret = viai2c_write(i2c, pmsg, (i + 1) == num);
+ }
+
+ return (ret < 0) ? ret : i;
+}
+
+/*
+ * Main process of the byte mode xfer
+ *
+ * Return value indicates whether the transfer is complete
+ * 1: all the data has been successfully transferred
+ * 0: there is still data that needs to be transferred
+ * -EIO: error occurred
+ */
+static int viai2c_irq_xfer(struct viai2c *i2c)
+{
+ u16 val;
+ struct i2c_msg *msg = i2c->msg;
+ u8 read = msg->flags & I2C_M_RD;
+ void __iomem *base = i2c->base;
+
+ if (read) {
+ msg->buf[i2c->xfered_len] = readw(base + VIAI2C_REG_CDR) >> 8;
+
+ val = readw(base + VIAI2C_REG_CR) | VIAI2C_CR_CPU_RDY;
+ if (i2c->xfered_len == msg->len - 2)
+ val |= VIAI2C_CR_RX_END;
+ writew(val, base + VIAI2C_REG_CR);
+ } else {
+ val = readw(base + VIAI2C_REG_CSR);
+ if (val & VIAI2C_CSR_RCV_NOT_ACK)
+ return -EIO;
+
+ /* I2C_SMBUS_QUICK */
+ if (msg->len == 0) {
+ val = VIAI2C_CR_TX_END | VIAI2C_CR_CPU_RDY | VIAI2C_CR_ENABLE;
+ writew(val, base + VIAI2C_REG_CR);
+ return 1;
+ }
+
+ if ((i2c->xfered_len + 1) == msg->len) {
+ if (i2c->platform == VIAI2C_PLAT_WMT && !i2c->last)
+ writew(VIAI2C_CR_ENABLE, base + VIAI2C_REG_CR);
+ else if (i2c->platform == VIAI2C_PLAT_ZHAOXIN && i2c->last)
+ writeb(VIAI2C_CR_TX_END, base + VIAI2C_REG_CR);
+ } else {
+ writew(msg->buf[i2c->xfered_len + 1] & 0xFF, base + VIAI2C_REG_CDR);
+ writew(VIAI2C_CR_CPU_RDY | VIAI2C_CR_ENABLE, base + VIAI2C_REG_CR);
+ }
+ }
+
+ i2c->xfered_len++;
+
+ return i2c->xfered_len == msg->len;
+}
+
+int __weak viai2c_fifo_irq_xfer(struct viai2c *i2c, bool irq)
+{
+ return 0;
+}
+
+static irqreturn_t viai2c_isr(int irq, void *data)
+{
+ struct viai2c *i2c = data;
+ u8 status;
+
+ /* save the status and write-clear it */
+ status = readw(i2c->base + VIAI2C_REG_ISR);
+ if (!status && i2c->platform == VIAI2C_PLAT_ZHAOXIN)
+ return IRQ_NONE;
+
+ writew(status, i2c->base + VIAI2C_REG_ISR);
+
+ i2c->ret = 0;
+ if (status & VIAI2C_ISR_NACK_ADDR)
+ i2c->ret = -EIO;
+
+ if (i2c->platform == VIAI2C_PLAT_WMT && (status & VIAI2C_ISR_SCL_TIMEOUT))
+ i2c->ret = -ETIMEDOUT;
+
+ if (!i2c->ret) {
+ if (i2c->mode == VIAI2C_BYTE_MODE)
+ i2c->ret = viai2c_irq_xfer(i2c);
+ else
+ i2c->ret = viai2c_fifo_irq_xfer(i2c, true);
+ }
+
+ /* All the data has been successfully transferred or error occurred */
+ if (i2c->ret)
+ complete(&i2c->complete);
+
+ return IRQ_HANDLED;
+}
+
+int viai2c_init(struct platform_device *pdev, struct viai2c **pi2c, int plat)
+{
+ int err;
+ int irq_flags;
+ struct viai2c *i2c;
+ struct device_node *np = pdev->dev.of_node;
+
+ i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ i2c->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(i2c->base))
+ return PTR_ERR(i2c->base);
+
+ if (plat == VIAI2C_PLAT_WMT) {
+ irq_flags = 0;
+ i2c->irq = irq_of_parse_and_map(np, 0);
+ if (!i2c->irq)
+ return -EINVAL;
+ } else if (plat == VIAI2C_PLAT_ZHAOXIN) {
+ irq_flags = IRQF_SHARED;
+ i2c->irq = platform_get_irq(pdev, 0);
+ if (i2c->irq < 0)
+ return i2c->irq;
+ } else {
+ return dev_err_probe(&pdev->dev, -EINVAL, "wrong platform type\n");
+ }
+
+ i2c->platform = plat;
+
+ err = devm_request_irq(&pdev->dev, i2c->irq, viai2c_isr,
+ irq_flags, pdev->name, i2c);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to request irq %i\n", i2c->irq);
+
+ i2c->dev = &pdev->dev;
+ init_completion(&i2c->complete);
+ platform_set_drvdata(pdev, i2c);
+
+ *pi2c = i2c;
+ return 0;
+}
diff --git a/drivers/i2c/busses/i2c-viai2c-common.h b/drivers/i2c/busses/i2c-viai2c-common.h
new file mode 100644
index 000000000000..81e827c54434
--- /dev/null
+++ b/drivers/i2c/busses/i2c-viai2c-common.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __I2C_VIAI2C_COMMON_H_
+#define __I2C_VIAI2C_COMMON_H_
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+/* REG_CR Bit fields */
+#define VIAI2C_REG_CR 0x00
+#define VIAI2C_CR_ENABLE BIT(0)
+#define VIAI2C_CR_RX_END BIT(1)
+#define VIAI2C_CR_TX_END BIT(2)
+#define VIAI2C_CR_CPU_RDY BIT(3)
+#define VIAI2C_CR_END_MASK GENMASK(2, 1)
+
+/* REG_TCR Bit fields */
+#define VIAI2C_REG_TCR 0x02
+#define VIAI2C_TCR_HS_MODE BIT(13)
+#define VIAI2C_TCR_READ BIT(14)
+#define VIAI2C_TCR_FAST BIT(15)
+#define VIAI2C_TCR_ADDR_MASK GENMASK(6, 0)
+
+/* REG_CSR Bit fields */
+#define VIAI2C_REG_CSR 0x04
+#define VIAI2C_CSR_RCV_NOT_ACK BIT(0)
+#define VIAI2C_CSR_RCV_ACK_MASK BIT(0)
+#define VIAI2C_CSR_READY_MASK BIT(1)
+
+/* REG_ISR Bit fields */
+#define VIAI2C_REG_ISR 0x06
+#define VIAI2C_ISR_NACK_ADDR BIT(0)
+#define VIAI2C_ISR_BYTE_END BIT(1)
+#define VIAI2C_ISR_SCL_TIMEOUT BIT(2)
+#define VIAI2C_ISR_MASK_ALL GENMASK(2, 0)
+
+/* REG_IMR Bit fields */
+#define VIAI2C_REG_IMR 0x08
+#define VIAI2C_IMR_BYTE BIT(1)
+#define VIAI2C_IMR_ENABLE_ALL GENMASK(2, 0)
+
+#define VIAI2C_REG_CDR 0x0A
+#define VIAI2C_REG_TR 0x0C
+#define VIAI2C_REG_MCR 0x0E
+
+#define VIAI2C_TIMEOUT (msecs_to_jiffies(1000))
+
+enum {
+ VIAI2C_PLAT_WMT,
+ VIAI2C_PLAT_ZHAOXIN
+};
+
+enum {
+ VIAI2C_BYTE_MODE,
+ VIAI2C_FIFO_MODE
+};
+
+struct viai2c {
+ struct i2c_adapter adapter;
+ struct completion complete;
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+ u16 tcr;
+ int irq;
+ u16 xfered_len;
+ struct i2c_msg *msg;
+ int ret;
+ bool last;
+ unsigned int mode;
+ unsigned int platform;
+ void *pltfm_priv;
+};
+
+int viai2c_wait_bus_not_busy(struct viai2c *i2c);
+int viai2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num);
+int viai2c_init(struct platform_device *pdev, struct viai2c **pi2c, int plat);
+int viai2c_fifo_irq_xfer(struct viai2c *i2c, bool irq);
+
+#endif
diff --git a/drivers/i2c/busses/i2c-viai2c-wmt.c b/drivers/i2c/busses/i2c-viai2c-wmt.c
new file mode 100644
index 000000000000..e1988f946026
--- /dev/null
+++ b/drivers/i2c/busses/i2c-viai2c-wmt.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Wondermedia I2C Master Mode Driver
+ *
+ * Copyright (C) 2012 Tony Prisk <linux@prisktech.co.nz>
+ *
+ * Derived from GPLv2+ licensed source:
+ * - Copyright (C) 2008 WonderMedia Technologies, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include "i2c-viai2c-common.h"
+
+#define REG_SLAVE_CR 0x10
+#define REG_SLAVE_SR 0x12
+#define REG_SLAVE_ISR 0x14
+#define REG_SLAVE_IMR 0x16
+#define REG_SLAVE_DR 0x18
+#define REG_SLAVE_TR 0x1A
+
+/* REG_TR */
+#define SCL_TIMEOUT(x) (((x) & 0xFF) << 8)
+#define TR_STD 0x0064
+#define TR_HS 0x0019
+
+/* REG_MCR */
+#define MCR_APB_96M 7
+#define MCR_APB_166M 12
+
+static u32 wmt_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_NOSTART;
+}
+
+static const struct i2c_algorithm wmt_i2c_algo = {
+ .master_xfer = viai2c_xfer,
+ .functionality = wmt_i2c_func,
+};
+
+static int wmt_i2c_reset_hardware(struct viai2c *i2c)
+{
+ int err;
+
+ err = clk_prepare_enable(i2c->clk);
+ if (err) {
+ dev_err(i2c->dev, "failed to enable clock\n");
+ return err;
+ }
+
+ err = clk_set_rate(i2c->clk, 20000000);
+ if (err) {
+ dev_err(i2c->dev, "failed to set clock = 20Mhz\n");
+ clk_disable_unprepare(i2c->clk);
+ return err;
+ }
+
+ writew(0, i2c->base + VIAI2C_REG_CR);
+ writew(MCR_APB_166M, i2c->base + VIAI2C_REG_MCR);
+ writew(VIAI2C_ISR_MASK_ALL, i2c->base + VIAI2C_REG_ISR);
+ writew(VIAI2C_IMR_ENABLE_ALL, i2c->base + VIAI2C_REG_IMR);
+ writew(VIAI2C_CR_ENABLE, i2c->base + VIAI2C_REG_CR);
+ readw(i2c->base + VIAI2C_REG_CSR); /* read clear */
+ writew(VIAI2C_ISR_MASK_ALL, i2c->base + VIAI2C_REG_ISR);
+
+ if (i2c->tcr == VIAI2C_TCR_FAST)
+ writew(SCL_TIMEOUT(128) | TR_HS, i2c->base + VIAI2C_REG_TR);
+ else
+ writew(SCL_TIMEOUT(128) | TR_STD, i2c->base + VIAI2C_REG_TR);
+
+ return 0;
+}
+
+static int wmt_i2c_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct viai2c *i2c;
+ struct i2c_adapter *adap;
+ int err;
+ u32 clk_rate;
+
+ err = viai2c_init(pdev, &i2c, VIAI2C_PLAT_WMT);
+ if (err)
+ return err;
+
+ i2c->clk = of_clk_get(np, 0);
+ if (IS_ERR(i2c->clk)) {
+ dev_err(&pdev->dev, "unable to request clock\n");
+ return PTR_ERR(i2c->clk);
+ }
+
+ err = of_property_read_u32(np, "clock-frequency", &clk_rate);
+ if (!err && clk_rate == I2C_MAX_FAST_MODE_FREQ)
+ i2c->tcr = VIAI2C_TCR_FAST;
+
+ adap = &i2c->adapter;
+ i2c_set_adapdata(adap, i2c);
+ strscpy(adap->name, "WMT I2C adapter", sizeof(adap->name));
+ adap->owner = THIS_MODULE;
+ adap->algo = &wmt_i2c_algo;
+ adap->dev.parent = &pdev->dev;
+ adap->dev.of_node = pdev->dev.of_node;
+
+ err = wmt_i2c_reset_hardware(i2c);
+ if (err) {
+ dev_err(&pdev->dev, "error initializing hardware\n");
+ return err;
+ }
+
+ err = i2c_add_adapter(adap);
+ if (err)
+ /* wmt_i2c_reset_hardware() enables i2c_dev->clk */
+ clk_disable_unprepare(i2c->clk);
+
+ return err;
+}
+
+static void wmt_i2c_remove(struct platform_device *pdev)
+{
+ struct viai2c *i2c = platform_get_drvdata(pdev);
+
+ /* Disable interrupts, clock and delete adapter */
+ writew(0, i2c->base + VIAI2C_REG_IMR);
+ clk_disable_unprepare(i2c->clk);
+ i2c_del_adapter(&i2c->adapter);
+}
+
+static const struct of_device_id wmt_i2c_dt_ids[] = {
+ { .compatible = "wm,wm8505-i2c" },
+ { /* Sentinel */ },
+};
+
+static struct platform_driver wmt_i2c_driver = {
+ .probe = wmt_i2c_probe,
+ .remove_new = wmt_i2c_remove,
+ .driver = {
+ .name = "wmt-i2c",
+ .of_match_table = wmt_i2c_dt_ids,
+ },
+};
+
+module_platform_driver(wmt_i2c_driver);
+
+MODULE_DESCRIPTION("Wondermedia I2C master-mode bus adapter");
+MODULE_AUTHOR("Tony Prisk <linux@prisktech.co.nz>");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, wmt_i2c_dt_ids);
diff --git a/drivers/i2c/busses/i2c-viai2c-zhaoxin.c b/drivers/i2c/busses/i2c-viai2c-zhaoxin.c
new file mode 100644
index 000000000000..7e3ac2a3e1fd
--- /dev/null
+++ b/drivers/i2c/busses/i2c-viai2c-zhaoxin.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright(c) 2024 Shanghai Zhaoxin Semiconductor Corporation.
+ * All rights reserved.
+ */
+
+#include <linux/acpi.h>
+#include "i2c-viai2c-common.h"
+
+/*
+ * registers
+ */
+/* Zhaoxin specific register bit fields */
+/* REG_CR Bit fields */
+#define ZXI2C_CR_MST_RST BIT(7)
+#define ZXI2C_CR_FIFO_MODE BIT(14)
+/* REG_ISR/IMR Bit fields */
+#define ZXI2C_IRQ_FIFONACK BIT(4)
+#define ZXI2C_IRQ_FIFOEND BIT(3)
+#define ZXI2C_IRQ_MASK (VIAI2C_ISR_MASK_ALL \
+ | ZXI2C_IRQ_FIFOEND \
+ | ZXI2C_IRQ_FIFONACK)
+/* Zhaoxin specific registers */
+#define ZXI2C_REG_CLK 0x10
+#define ZXI2C_CLK_50M BIT(0)
+#define ZXI2C_REG_REV 0x11
+#define ZXI2C_REG_HCR 0x12
+#define ZXI2C_HCR_RST_FIFO GENMASK(1, 0)
+#define ZXI2C_REG_HTDR 0x13
+#define ZXI2C_REG_HRDR 0x14
+#define ZXI2C_REG_HTLR 0x15
+#define ZXI2C_REG_HRLR 0x16
+#define ZXI2C_REG_HWCNTR 0x18
+#define ZXI2C_REG_HRCNTR 0x19
+
+/* parameters Constants */
+#define ZXI2C_GOLD_FSTP_100K 0xF3
+#define ZXI2C_GOLD_FSTP_400K 0x38
+#define ZXI2C_GOLD_FSTP_1M 0x13
+#define ZXI2C_GOLD_FSTP_3400K 0x37
+#define ZXI2C_HS_MASTER_CODE (0x08 << 8)
+
+#define ZXI2C_FIFO_SIZE 32
+
+struct viai2c_zhaoxin {
+ u8 hrv;
+ u16 tr;
+ u16 mcr;
+ u16 xfer_len;
+};
+
+/* 'irq == true' means in interrupt context */
+int viai2c_fifo_irq_xfer(struct viai2c *i2c, bool irq)
+{
+ u16 i;
+ u8 tmp;
+ struct i2c_msg *msg = i2c->msg;
+ void __iomem *base = i2c->base;
+ bool read = !!(msg->flags & I2C_M_RD);
+ struct viai2c_zhaoxin *priv = i2c->pltfm_priv;
+
+ if (irq) {
+ /* get the received data */
+ if (read)
+ for (i = 0; i < priv->xfer_len; i++)
+ msg->buf[i2c->xfered_len + i] = ioread8(base + ZXI2C_REG_HRDR);
+
+ i2c->xfered_len += priv->xfer_len;
+ if (i2c->xfered_len == msg->len)
+ return 1;
+ }
+
+ /* reset fifo buffer */
+ tmp = ioread8(base + ZXI2C_REG_HCR);
+ iowrite8(tmp | ZXI2C_HCR_RST_FIFO, base + ZXI2C_REG_HCR);
+
+ /* set xfer len */
+ priv->xfer_len = min_t(u16, msg->len - i2c->xfered_len, ZXI2C_FIFO_SIZE);
+ if (read) {
+ iowrite8(priv->xfer_len - 1, base + ZXI2C_REG_HRLR);
+ } else {
+ iowrite8(priv->xfer_len - 1, base + ZXI2C_REG_HTLR);
+ /* set write data */
+ for (i = 0; i < priv->xfer_len; i++)
+ iowrite8(msg->buf[i2c->xfered_len + i], base + ZXI2C_REG_HTDR);
+ }
+
+ /* prepare to stop transmission */
+ if (priv->hrv && msg->len == (i2c->xfered_len + priv->xfer_len)) {
+ tmp = ioread8(base + VIAI2C_REG_CR);
+ tmp |= read ? VIAI2C_CR_RX_END : VIAI2C_CR_TX_END;
+ iowrite8(tmp, base + VIAI2C_REG_CR);
+ }
+
+ if (irq) {
+ /* continue transmission */
+ tmp = ioread8(base + VIAI2C_REG_CR);
+ iowrite8(tmp |= VIAI2C_CR_CPU_RDY, base + VIAI2C_REG_CR);
+ } else {
+ u16 tcr_val = i2c->tcr;
+
+ /* start transmission */
+ tcr_val |= read ? VIAI2C_TCR_READ : 0;
+ writew(tcr_val | msg->addr, base + VIAI2C_REG_TCR);
+ }
+
+ return 0;
+}
+
+static int zxi2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ u8 tmp;
+ int ret;
+ struct viai2c *i2c = (struct viai2c *)i2c_get_adapdata(adap);
+ struct viai2c_zhaoxin *priv = i2c->pltfm_priv;
+ void __iomem *base = i2c->base;
+
+ ret = viai2c_wait_bus_not_busy(i2c);
+ if (ret)
+ return ret;
+
+ tmp = ioread8(base + VIAI2C_REG_CR);
+ tmp &= ~(VIAI2C_CR_RX_END | VIAI2C_CR_TX_END);
+
+ if (num == 1 && msgs->len >= 2 && (priv->hrv || msgs->len <= ZXI2C_FIFO_SIZE)) {
+ /* enable fifo mode */
+ iowrite16(ZXI2C_CR_FIFO_MODE | tmp, base + VIAI2C_REG_CR);
+ /* clear irq status */
+ iowrite8(ZXI2C_IRQ_MASK, base + VIAI2C_REG_ISR);
+ /* enable fifo irq */
+ iowrite8(VIAI2C_ISR_NACK_ADDR | ZXI2C_IRQ_FIFOEND, base + VIAI2C_REG_IMR);
+
+ i2c->msg = msgs;
+ i2c->mode = VIAI2C_FIFO_MODE;
+ priv->xfer_len = 0;
+ i2c->xfered_len = 0;
+
+ viai2c_fifo_irq_xfer(i2c, 0);
+
+ if (!wait_for_completion_timeout(&i2c->complete, VIAI2C_TIMEOUT))
+ return -ETIMEDOUT;
+
+ ret = i2c->ret;
+ } else {
+ /* enable byte mode */
+ iowrite16(tmp, base + VIAI2C_REG_CR);
+ /* clear irq status */
+ iowrite8(ZXI2C_IRQ_MASK, base + VIAI2C_REG_ISR);
+ /* enable byte irq */
+ iowrite8(VIAI2C_ISR_NACK_ADDR | VIAI2C_IMR_BYTE, base + VIAI2C_REG_IMR);
+
+ ret = viai2c_xfer(adap, msgs, num);
+ if (ret == -ETIMEDOUT)
+ iowrite16(tmp | VIAI2C_CR_END_MASK, base + VIAI2C_REG_CR);
+ }
+ /* dis interrupt */
+ iowrite8(0, base + VIAI2C_REG_IMR);
+
+ return ret;
+}
+
+static u32 zxi2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm zxi2c_algorithm = {
+ .master_xfer = zxi2c_master_xfer,
+ .functionality = zxi2c_func,
+};
+
+static const struct i2c_adapter_quirks zxi2c_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN | I2C_AQ_COMB_WRITE_THEN_READ,
+};
+
+static const u32 zxi2c_speed_params_table[][3] = {
+ /* speed, ZXI2C_TCR, ZXI2C_FSTP */
+ { I2C_MAX_STANDARD_MODE_FREQ, 0, ZXI2C_GOLD_FSTP_100K },
+ { I2C_MAX_FAST_MODE_FREQ, VIAI2C_TCR_FAST, ZXI2C_GOLD_FSTP_400K },
+ { I2C_MAX_FAST_MODE_PLUS_FREQ, VIAI2C_TCR_FAST, ZXI2C_GOLD_FSTP_1M },
+ { I2C_MAX_HIGH_SPEED_MODE_FREQ, VIAI2C_TCR_HS_MODE | VIAI2C_TCR_FAST,
+ ZXI2C_GOLD_FSTP_3400K },
+};
+
+static void zxi2c_set_bus_speed(struct viai2c *i2c)
+{
+ struct viai2c_zhaoxin *priv = i2c->pltfm_priv;
+
+ iowrite16(priv->tr, i2c->base + VIAI2C_REG_TR);
+ iowrite8(ZXI2C_CLK_50M, i2c->base + ZXI2C_REG_CLK);
+ iowrite16(priv->mcr, i2c->base + VIAI2C_REG_MCR);
+}
+
+static void zxi2c_get_bus_speed(struct viai2c *i2c)
+{
+ u8 i, count;
+ u8 fstp;
+ const u32 *params;
+ struct viai2c_zhaoxin *priv = i2c->pltfm_priv;
+ u32 acpi_speed = i2c_acpi_find_bus_speed(i2c->dev);
+
+ count = ARRAY_SIZE(zxi2c_speed_params_table);
+ for (i = 0; i < count; i++)
+ if (acpi_speed == zxi2c_speed_params_table[i][0])
+ break;
+ /* if not found, use 400k as default */
+ i = i < count ? i : 1;
+
+ params = zxi2c_speed_params_table[i];
+ fstp = ioread8(i2c->base + VIAI2C_REG_TR);
+ if (abs(fstp - params[2]) > 0x10) {
+ /*
+ * if BIOS setting value far from golden value,
+ * use golden value and warn user
+ */
+ dev_warn(i2c->dev, "FW FSTP[%x] might cause wrong timings, dropped\n", fstp);
+ priv->tr = params[2] | 0xff00;
+ } else {
+ priv->tr = fstp | 0xff00;
+ }
+
+ i2c->tcr = params[1];
+ priv->mcr = ioread16(i2c->base + VIAI2C_REG_MCR);
+ /* for Hs-mode, use 0x80 as master code */
+ if (params[0] == I2C_MAX_HIGH_SPEED_MODE_FREQ)
+ priv->mcr |= ZXI2C_HS_MASTER_CODE;
+
+ dev_info(i2c->dev, "speed mode is %s\n", i2c_freq_mode_string(params[0]));
+}
+
+static int zxi2c_probe(struct platform_device *pdev)
+{
+ int error;
+ struct viai2c *i2c;
+ struct i2c_adapter *adap;
+ struct viai2c_zhaoxin *priv;
+
+ error = viai2c_init(pdev, &i2c, VIAI2C_PLAT_ZHAOXIN);
+ if (error)
+ return error;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ i2c->pltfm_priv = priv;
+
+ zxi2c_get_bus_speed(i2c);
+ zxi2c_set_bus_speed(i2c);
+
+ priv->hrv = ioread8(i2c->base + ZXI2C_REG_REV);
+
+ adap = &i2c->adapter;
+ adap->owner = THIS_MODULE;
+ adap->algo = &zxi2c_algorithm;
+ adap->quirks = &zxi2c_quirks;
+ adap->dev.parent = &pdev->dev;
+ ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
+ snprintf(adap->name, sizeof(adap->name), "zhaoxin-%s-%s",
+ dev_name(pdev->dev.parent), dev_name(i2c->dev));
+ i2c_set_adapdata(adap, i2c);
+
+ return devm_i2c_add_adapter(&pdev->dev, adap);
+}
+
+static int __maybe_unused zxi2c_resume(struct device *dev)
+{
+ struct viai2c *i2c = dev_get_drvdata(dev);
+
+ iowrite8(ZXI2C_CR_MST_RST, i2c->base + VIAI2C_REG_CR);
+ zxi2c_set_bus_speed(i2c);
+
+ return 0;
+}
+
+static const struct dev_pm_ops zxi2c_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(NULL, zxi2c_resume)
+};
+
+static const struct acpi_device_id zxi2c_acpi_match[] = {
+ {"IIC1D17", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, zxi2c_acpi_match);
+
+static struct platform_driver zxi2c_driver = {
+ .probe = zxi2c_probe,
+ .driver = {
+ .name = "i2c_zhaoxin",
+ .acpi_match_table = zxi2c_acpi_match,
+ .pm = &zxi2c_pm,
+ },
+};
+
+module_platform_driver(zxi2c_driver);
+
+MODULE_AUTHOR("HansHu@zhaoxin.com");
+MODULE_DESCRIPTION("Shanghai Zhaoxin IIC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
index 9e153b5b0e8e..3784b07f5371 100644
--- a/drivers/i2c/busses/i2c-viperboard.c
+++ b/drivers/i2c/busses/i2c-viperboard.c
@@ -416,7 +416,6 @@ static void vprbrd_i2c_remove(struct platform_device *pdev)
static struct platform_driver vprbrd_i2c_driver = {
.driver.name = "viperboard-i2c",
- .driver.owner = THIS_MODULE,
.probe = vprbrd_i2c_probe,
.remove_new = vprbrd_i2c_remove,
};
diff --git a/drivers/i2c/busses/i2c-wmt.c b/drivers/i2c/busses/i2c-wmt.c
deleted file mode 100644
index 198afee5233c..000000000000
--- a/drivers/i2c/busses/i2c-wmt.c
+++ /dev/null
@@ -1,421 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Wondermedia I2C Master Mode Driver
- *
- * Copyright (C) 2012 Tony Prisk <linux@prisktech.co.nz>
- *
- * Derived from GPLv2+ licensed source:
- * - Copyright (C) 2008 WonderMedia Technologies, Inc.
- */
-
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/platform_device.h>
-
-#define REG_CR 0x00
-#define REG_TCR 0x02
-#define REG_CSR 0x04
-#define REG_ISR 0x06
-#define REG_IMR 0x08
-#define REG_CDR 0x0A
-#define REG_TR 0x0C
-#define REG_MCR 0x0E
-#define REG_SLAVE_CR 0x10
-#define REG_SLAVE_SR 0x12
-#define REG_SLAVE_ISR 0x14
-#define REG_SLAVE_IMR 0x16
-#define REG_SLAVE_DR 0x18
-#define REG_SLAVE_TR 0x1A
-
-/* REG_CR Bit fields */
-#define CR_TX_NEXT_ACK 0x0000
-#define CR_ENABLE 0x0001
-#define CR_TX_NEXT_NO_ACK 0x0002
-#define CR_TX_END 0x0004
-#define CR_CPU_RDY 0x0008
-#define SLAV_MODE_SEL 0x8000
-
-/* REG_TCR Bit fields */
-#define TCR_STANDARD_MODE 0x0000
-#define TCR_MASTER_WRITE 0x0000
-#define TCR_HS_MODE 0x2000
-#define TCR_MASTER_READ 0x4000
-#define TCR_FAST_MODE 0x8000
-#define TCR_SLAVE_ADDR_MASK 0x007F
-
-/* REG_ISR Bit fields */
-#define ISR_NACK_ADDR 0x0001
-#define ISR_BYTE_END 0x0002
-#define ISR_SCL_TIMEOUT 0x0004
-#define ISR_WRITE_ALL 0x0007
-
-/* REG_IMR Bit fields */
-#define IMR_ENABLE_ALL 0x0007
-
-/* REG_CSR Bit fields */
-#define CSR_RCV_NOT_ACK 0x0001
-#define CSR_RCV_ACK_MASK 0x0001
-#define CSR_READY_MASK 0x0002
-
-/* REG_TR */
-#define SCL_TIMEOUT(x) (((x) & 0xFF) << 8)
-#define TR_STD 0x0064
-#define TR_HS 0x0019
-
-/* REG_MCR */
-#define MCR_APB_96M 7
-#define MCR_APB_166M 12
-
-#define WMT_I2C_TIMEOUT (msecs_to_jiffies(1000))
-
-struct wmt_i2c_dev {
- struct i2c_adapter adapter;
- struct completion complete;
- struct device *dev;
- void __iomem *base;
- struct clk *clk;
- u16 tcr;
- int irq;
- u16 cmd_status;
-};
-
-static int wmt_i2c_wait_bus_not_busy(struct wmt_i2c_dev *i2c_dev)
-{
- unsigned long timeout;
-
- timeout = jiffies + WMT_I2C_TIMEOUT;
- while (!(readw(i2c_dev->base + REG_CSR) & CSR_READY_MASK)) {
- if (time_after(jiffies, timeout)) {
- dev_warn(i2c_dev->dev, "timeout waiting for bus ready\n");
- return -EBUSY;
- }
- msleep(20);
- }
-
- return 0;
-}
-
-static int wmt_check_status(struct wmt_i2c_dev *i2c_dev)
-{
- int ret = 0;
- unsigned long wait_result;
-
- wait_result = wait_for_completion_timeout(&i2c_dev->complete,
- msecs_to_jiffies(500));
- if (!wait_result)
- return -ETIMEDOUT;
-
- if (i2c_dev->cmd_status & ISR_NACK_ADDR)
- ret = -EIO;
-
- if (i2c_dev->cmd_status & ISR_SCL_TIMEOUT)
- ret = -ETIMEDOUT;
-
- return ret;
-}
-
-static int wmt_i2c_write(struct wmt_i2c_dev *i2c_dev, struct i2c_msg *pmsg,
- int last)
-{
- u16 val, tcr_val = i2c_dev->tcr;
- int ret;
- int xfer_len = 0;
-
- if (pmsg->len == 0) {
- /*
- * We still need to run through the while (..) once, so
- * start at -1 and break out early from the loop
- */
- xfer_len = -1;
- writew(0, i2c_dev->base + REG_CDR);
- } else {
- writew(pmsg->buf[0] & 0xFF, i2c_dev->base + REG_CDR);
- }
-
- if (!(pmsg->flags & I2C_M_NOSTART)) {
- val = readw(i2c_dev->base + REG_CR);
- val &= ~CR_TX_END;
- val |= CR_CPU_RDY;
- writew(val, i2c_dev->base + REG_CR);
- }
-
- reinit_completion(&i2c_dev->complete);
-
- tcr_val |= (TCR_MASTER_WRITE | (pmsg->addr & TCR_SLAVE_ADDR_MASK));
-
- writew(tcr_val, i2c_dev->base + REG_TCR);
-
- if (pmsg->flags & I2C_M_NOSTART) {
- val = readw(i2c_dev->base + REG_CR);
- val |= CR_CPU_RDY;
- writew(val, i2c_dev->base + REG_CR);
- }
-
- while (xfer_len < pmsg->len) {
- ret = wmt_check_status(i2c_dev);
- if (ret)
- return ret;
-
- xfer_len++;
-
- val = readw(i2c_dev->base + REG_CSR);
- if ((val & CSR_RCV_ACK_MASK) == CSR_RCV_NOT_ACK) {
- dev_dbg(i2c_dev->dev, "write RCV NACK error\n");
- return -EIO;
- }
-
- if (pmsg->len == 0) {
- val = CR_TX_END | CR_CPU_RDY | CR_ENABLE;
- writew(val, i2c_dev->base + REG_CR);
- break;
- }
-
- if (xfer_len == pmsg->len) {
- if (last != 1)
- writew(CR_ENABLE, i2c_dev->base + REG_CR);
- } else {
- writew(pmsg->buf[xfer_len] & 0xFF, i2c_dev->base +
- REG_CDR);
- writew(CR_CPU_RDY | CR_ENABLE, i2c_dev->base + REG_CR);
- }
- }
-
- return 0;
-}
-
-static int wmt_i2c_read(struct wmt_i2c_dev *i2c_dev, struct i2c_msg *pmsg)
-{
- u16 val, tcr_val = i2c_dev->tcr;
- int ret;
- u32 xfer_len = 0;
-
- val = readw(i2c_dev->base + REG_CR);
- val &= ~(CR_TX_END | CR_TX_NEXT_NO_ACK);
-
- if (!(pmsg->flags & I2C_M_NOSTART))
- val |= CR_CPU_RDY;
-
- if (pmsg->len == 1)
- val |= CR_TX_NEXT_NO_ACK;
-
- writew(val, i2c_dev->base + REG_CR);
-
- reinit_completion(&i2c_dev->complete);
-
- tcr_val |= TCR_MASTER_READ | (pmsg->addr & TCR_SLAVE_ADDR_MASK);
-
- writew(tcr_val, i2c_dev->base + REG_TCR);
-
- if (pmsg->flags & I2C_M_NOSTART) {
- val = readw(i2c_dev->base + REG_CR);
- val |= CR_CPU_RDY;
- writew(val, i2c_dev->base + REG_CR);
- }
-
- while (xfer_len < pmsg->len) {
- ret = wmt_check_status(i2c_dev);
- if (ret)
- return ret;
-
- pmsg->buf[xfer_len] = readw(i2c_dev->base + REG_CDR) >> 8;
- xfer_len++;
-
- val = readw(i2c_dev->base + REG_CR) | CR_CPU_RDY;
- if (xfer_len == pmsg->len - 1)
- val |= CR_TX_NEXT_NO_ACK;
- writew(val, i2c_dev->base + REG_CR);
- }
-
- return 0;
-}
-
-static int wmt_i2c_xfer(struct i2c_adapter *adap,
- struct i2c_msg msgs[],
- int num)
-{
- struct i2c_msg *pmsg;
- int i;
- int ret = 0;
- struct wmt_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
-
- for (i = 0; ret >= 0 && i < num; i++) {
- pmsg = &msgs[i];
- if (!(pmsg->flags & I2C_M_NOSTART)) {
- ret = wmt_i2c_wait_bus_not_busy(i2c_dev);
- if (ret < 0)
- return ret;
- }
-
- if (pmsg->flags & I2C_M_RD)
- ret = wmt_i2c_read(i2c_dev, pmsg);
- else
- ret = wmt_i2c_write(i2c_dev, pmsg, (i + 1) == num);
- }
-
- return (ret < 0) ? ret : i;
-}
-
-static u32 wmt_i2c_func(struct i2c_adapter *adap)
-{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_NOSTART;
-}
-
-static const struct i2c_algorithm wmt_i2c_algo = {
- .master_xfer = wmt_i2c_xfer,
- .functionality = wmt_i2c_func,
-};
-
-static irqreturn_t wmt_i2c_isr(int irq, void *data)
-{
- struct wmt_i2c_dev *i2c_dev = data;
-
- /* save the status and write-clear it */
- i2c_dev->cmd_status = readw(i2c_dev->base + REG_ISR);
- writew(i2c_dev->cmd_status, i2c_dev->base + REG_ISR);
-
- complete(&i2c_dev->complete);
-
- return IRQ_HANDLED;
-}
-
-static int wmt_i2c_reset_hardware(struct wmt_i2c_dev *i2c_dev)
-{
- int err;
-
- err = clk_prepare_enable(i2c_dev->clk);
- if (err) {
- dev_err(i2c_dev->dev, "failed to enable clock\n");
- return err;
- }
-
- err = clk_set_rate(i2c_dev->clk, 20000000);
- if (err) {
- dev_err(i2c_dev->dev, "failed to set clock = 20Mhz\n");
- clk_disable_unprepare(i2c_dev->clk);
- return err;
- }
-
- writew(0, i2c_dev->base + REG_CR);
- writew(MCR_APB_166M, i2c_dev->base + REG_MCR);
- writew(ISR_WRITE_ALL, i2c_dev->base + REG_ISR);
- writew(IMR_ENABLE_ALL, i2c_dev->base + REG_IMR);
- writew(CR_ENABLE, i2c_dev->base + REG_CR);
- readw(i2c_dev->base + REG_CSR); /* read clear */
- writew(ISR_WRITE_ALL, i2c_dev->base + REG_ISR);
-
- if (i2c_dev->tcr == TCR_FAST_MODE)
- writew(SCL_TIMEOUT(128) | TR_HS, i2c_dev->base + REG_TR);
- else
- writew(SCL_TIMEOUT(128) | TR_STD, i2c_dev->base + REG_TR);
-
- return 0;
-}
-
-static int wmt_i2c_probe(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct wmt_i2c_dev *i2c_dev;
- struct i2c_adapter *adap;
- int err;
- u32 clk_rate;
-
- i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL);
- if (!i2c_dev)
- return -ENOMEM;
-
- i2c_dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
- if (IS_ERR(i2c_dev->base))
- return PTR_ERR(i2c_dev->base);
-
- i2c_dev->irq = irq_of_parse_and_map(np, 0);
- if (!i2c_dev->irq) {
- dev_err(&pdev->dev, "irq missing or invalid\n");
- return -EINVAL;
- }
-
- i2c_dev->clk = of_clk_get(np, 0);
- if (IS_ERR(i2c_dev->clk)) {
- dev_err(&pdev->dev, "unable to request clock\n");
- return PTR_ERR(i2c_dev->clk);
- }
-
- err = of_property_read_u32(np, "clock-frequency", &clk_rate);
- if (!err && (clk_rate == I2C_MAX_FAST_MODE_FREQ))
- i2c_dev->tcr = TCR_FAST_MODE;
-
- i2c_dev->dev = &pdev->dev;
-
- err = devm_request_irq(&pdev->dev, i2c_dev->irq, wmt_i2c_isr, 0,
- "i2c", i2c_dev);
- if (err) {
- dev_err(&pdev->dev, "failed to request irq %i\n", i2c_dev->irq);
- return err;
- }
-
- adap = &i2c_dev->adapter;
- i2c_set_adapdata(adap, i2c_dev);
- strscpy(adap->name, "WMT I2C adapter", sizeof(adap->name));
- adap->owner = THIS_MODULE;
- adap->algo = &wmt_i2c_algo;
- adap->dev.parent = &pdev->dev;
- adap->dev.of_node = pdev->dev.of_node;
-
- init_completion(&i2c_dev->complete);
-
- err = wmt_i2c_reset_hardware(i2c_dev);
- if (err) {
- dev_err(&pdev->dev, "error initializing hardware\n");
- return err;
- }
-
- err = i2c_add_adapter(adap);
- if (err)
- goto err_disable_clk;
-
- platform_set_drvdata(pdev, i2c_dev);
-
- return 0;
-
-err_disable_clk:
- clk_disable_unprepare(i2c_dev->clk);
- return err;
-}
-
-static void wmt_i2c_remove(struct platform_device *pdev)
-{
- struct wmt_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
-
- /* Disable interrupts, clock and delete adapter */
- writew(0, i2c_dev->base + REG_IMR);
- clk_disable_unprepare(i2c_dev->clk);
- i2c_del_adapter(&i2c_dev->adapter);
-}
-
-static const struct of_device_id wmt_i2c_dt_ids[] = {
- { .compatible = "wm,wm8505-i2c" },
- { /* Sentinel */ },
-};
-
-static struct platform_driver wmt_i2c_driver = {
- .probe = wmt_i2c_probe,
- .remove_new = wmt_i2c_remove,
- .driver = {
- .name = "wmt-i2c",
- .of_match_table = wmt_i2c_dt_ids,
- },
-};
-
-module_platform_driver(wmt_i2c_driver);
-
-MODULE_DESCRIPTION("Wondermedia I2C master-mode bus adapter");
-MODULE_AUTHOR("Tony Prisk <linux@prisktech.co.nz>");
-MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(of, wmt_i2c_dt_ids);
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index d6037a328669..14ae0cfc325e 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -445,6 +445,11 @@ static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
return i2c_find_device_by_fwnode(acpi_fwnode_handle(adev));
}
+static struct i2c_adapter *i2c_acpi_find_adapter_by_adev(struct acpi_device *adev)
+{
+ return i2c_find_adapter_by_fwnode(acpi_fwnode_handle(adev));
+}
+
static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
void *arg)
{
@@ -471,11 +476,17 @@ static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
break;
client = i2c_acpi_find_client_by_adev(adev);
- if (!client)
- break;
+ if (client) {
+ i2c_unregister_device(client);
+ put_device(&client->dev);
+ }
+
+ adapter = i2c_acpi_find_adapter_by_adev(adev);
+ if (adapter) {
+ acpi_unbind_one(&adapter->dev);
+ put_device(&adapter->dev);
+ }
- i2c_unregister_device(client);
- put_device(&client->dev);
break;
}
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index ff5c486a1dbb..db0d1ac82910 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -2200,13 +2200,18 @@ static int i2c_check_for_quirks(struct i2c_adapter *adap, struct i2c_msg *msgs,
* Returns negative errno, else the number of messages executed.
*
* Adapter lock must be held when calling this function. No debug logging
- * takes place. adap->algo->master_xfer existence isn't checked.
+ * takes place.
*/
int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
unsigned long orig_jiffies;
int ret, try;
+ if (!adap->algo->master_xfer) {
+ dev_dbg(&adap->dev, "I2C level transfers not supported\n");
+ return -EOPNOTSUPP;
+ }
+
if (WARN_ON(!msgs || num < 1))
return -EINVAL;
@@ -2273,11 +2278,6 @@ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
int ret;
- if (!adap->algo->master_xfer) {
- dev_dbg(&adap->dev, "I2C level transfers not supported\n");
- return -EOPNOTSUPP;
- }
-
/* REVISIT the fault reporting model here is weak:
*
* - When we get an error after receiving N bytes from a slave,
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 57ff09f18c37..fda72e8be885 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -127,19 +127,6 @@ static u32 i2c_mux_functionality(struct i2c_adapter *adap)
return parent->algo->functionality(parent);
}
-/* Return all parent classes, merged */
-static unsigned int i2c_mux_parent_classes(struct i2c_adapter *parent)
-{
- unsigned int class = 0;
-
- do {
- class |= parent->class;
- parent = i2c_parent_is_i2c_adapter(parent);
- } while (parent);
-
- return class;
-}
-
static void i2c_mux_lock_bus(struct i2c_adapter *adapter, unsigned int flags)
{
struct i2c_mux_priv *priv = adapter->algo_data;
@@ -281,8 +268,7 @@ static const struct i2c_lock_operations i2c_parent_lock_ops = {
};
int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
- u32 force_nr, u32 chan_id,
- unsigned int class)
+ u32 force_nr, u32 chan_id)
{
struct i2c_adapter *parent = muxc->parent;
struct i2c_mux_priv *priv;
@@ -340,14 +326,6 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
else
priv->adap.lock_ops = &i2c_parent_lock_ops;
- /* Sanity check on class */
- if (i2c_mux_parent_classes(parent) & class & ~I2C_CLASS_DEPRECATED)
- dev_err(&parent->dev,
- "Segment %d behind mux can't share classes with ancestors\n",
- chan_id);
- else
- priv->adap.class = class;
-
/*
* Try to populate the mux adapter's of_node, expands to
* nothing if !CONFIG_OF.
diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
index 24168e9f7df4..7aa6e795d833 100644
--- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
+++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
@@ -167,7 +167,7 @@ static int i2c_arbitrator_probe(struct platform_device *pdev)
}
/* Actually add the mux adapter */
- ret = i2c_mux_add_adapter(muxc, 0, 0, 0);
+ ret = i2c_mux_add_adapter(muxc, 0, 0);
if (ret)
i2c_put_adapter(muxc->parent);
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index 6b979a0a6ab8..d6bbb8b68333 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -206,9 +206,8 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
for (i = 0; i < mux->data.n_values; i++) {
u32 nr = mux->data.base_nr ? (mux->data.base_nr + i) : 0;
- unsigned int class = mux->data.classes ? mux->data.classes[i] : 0;
- ret = i2c_mux_add_adapter(muxc, nr, mux->data.values[i], class);
+ ret = i2c_mux_add_adapter(muxc, nr, mux->data.values[i]);
if (ret)
goto add_adapter_failed;
}
diff --git a/drivers/i2c/muxes/i2c-mux-gpmux.c b/drivers/i2c/muxes/i2c-mux-gpmux.c
index 8305661e1253..10d63307b14d 100644
--- a/drivers/i2c/muxes/i2c-mux-gpmux.c
+++ b/drivers/i2c/muxes/i2c-mux-gpmux.c
@@ -124,7 +124,7 @@ static int i2c_mux_probe(struct platform_device *pdev)
goto err_children;
}
- ret = i2c_mux_add_adapter(muxc, 0, chan, 0);
+ ret = i2c_mux_add_adapter(muxc, 0, chan);
if (ret)
goto err_children;
}
diff --git a/drivers/i2c/muxes/i2c-mux-ltc4306.c b/drivers/i2c/muxes/i2c-mux-ltc4306.c
index 23766d853e76..19a7c370946d 100644
--- a/drivers/i2c/muxes/i2c-mux-ltc4306.c
+++ b/drivers/i2c/muxes/i2c-mux-ltc4306.c
@@ -279,7 +279,7 @@ static int ltc4306_probe(struct i2c_client *client)
/* Now create an adapter for each channel */
for (num = 0; num < chip->nchans; num++) {
- ret = i2c_mux_add_adapter(muxc, 0, num, 0);
+ ret = i2c_mux_add_adapter(muxc, 0, num);
if (ret) {
i2c_mux_del_adapters(muxc);
return ret;
diff --git a/drivers/i2c/muxes/i2c-mux-mlxcpld.c b/drivers/i2c/muxes/i2c-mux-mlxcpld.c
index 4c6ed1d58c79..3f06aa3331a7 100644
--- a/drivers/i2c/muxes/i2c-mux-mlxcpld.c
+++ b/drivers/i2c/muxes/i2c-mux-mlxcpld.c
@@ -154,7 +154,7 @@ static int mlxcpld_mux_probe(struct platform_device *pdev)
/* Create an adapter for each channel. */
for (num = 0; num < pdata->num_adaps; num++) {
- err = i2c_mux_add_adapter(muxc, 0, pdata->chan_ids[num], 0);
+ err = i2c_mux_add_adapter(muxc, 0, pdata->chan_ids[num]);
if (err)
goto virt_reg_failed;
}
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index ce0fb69249a8..e28694d991fb 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -314,7 +314,7 @@ static int pca9541_probe(struct i2c_client *client)
i2c_set_clientdata(client, muxc);
- ret = i2c_mux_add_adapter(muxc, 0, 0, 0);
+ ret = i2c_mux_add_adapter(muxc, 0, 0);
if (ret)
return ret;
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index c3f4ff08ac38..6f84018258c4 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -644,7 +644,7 @@ static int pca954x_probe(struct i2c_client *client)
/* Now create an adapter for each channel */
for (num = 0; num < data->chip->nchans; num++) {
- ret = i2c_mux_add_adapter(muxc, 0, num, 0);
+ ret = i2c_mux_add_adapter(muxc, 0, num);
if (ret)
goto fail_cleanup;
}
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index 6ebca7bfd8a2..02aaf0781e9c 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -151,7 +151,7 @@ static int i2c_mux_pinctrl_probe(struct platform_device *pdev)
/* Do not add any adapter for the idle state (if it's there at all). */
for (i = 0; i < num_names - !!muxc->deselect; i++) {
- ret = i2c_mux_add_adapter(muxc, 0, i, 0);
+ ret = i2c_mux_add_adapter(muxc, 0, i);
if (ret)
goto err_del_adapter;
}
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c
index 8489971babd3..ef765fcd33f5 100644
--- a/drivers/i2c/muxes/i2c-mux-reg.c
+++ b/drivers/i2c/muxes/i2c-mux-reg.c
@@ -213,7 +213,7 @@ static int i2c_mux_reg_probe(struct platform_device *pdev)
for (i = 0; i < mux->data.n_values; i++) {
nr = mux->data.base_nr ? (mux->data.base_nr + i) : 0;
- ret = i2c_mux_add_adapter(muxc, nr, mux->data.values[i], 0);
+ ret = i2c_mux_add_adapter(muxc, nr, mux->data.values[i]);
if (ret)
goto err_del_mux_adapters;
}
diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
index 61839be501c2..63c3566a533b 100644
--- a/drivers/iio/accel/mxc4005.c
+++ b/drivers/iio/accel/mxc4005.c
@@ -5,6 +5,7 @@
* Copyright (c) 2014, Intel Corporation.
*/
+#include <linux/delay.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
@@ -27,11 +28,16 @@
#define MXC4005_REG_ZOUT_UPPER 0x07
#define MXC4005_REG_ZOUT_LOWER 0x08
+#define MXC4005_REG_INT_MASK0 0x0A
+
#define MXC4005_REG_INT_MASK1 0x0B
#define MXC4005_REG_INT_MASK1_BIT_DRDYE 0x01
+#define MXC4005_REG_INT_CLR0 0x00
+
#define MXC4005_REG_INT_CLR1 0x01
#define MXC4005_REG_INT_CLR1_BIT_DRDYC 0x01
+#define MXC4005_REG_INT_CLR1_SW_RST 0x10
#define MXC4005_REG_CONTROL 0x0D
#define MXC4005_REG_CONTROL_MASK_FSR GENMASK(6, 5)
@@ -39,6 +45,9 @@
#define MXC4005_REG_DEVICE_ID 0x0E
+/* Datasheet does not specify a reset time, this is a conservative guess */
+#define MXC4005_RESET_TIME_US 2000
+
enum mxc4005_axis {
AXIS_X,
AXIS_Y,
@@ -62,6 +71,8 @@ struct mxc4005_data {
s64 timestamp __aligned(8);
} scan;
bool trigger_enabled;
+ unsigned int control;
+ unsigned int int_mask1;
};
/*
@@ -113,7 +124,9 @@ static bool mxc4005_is_readable_reg(struct device *dev, unsigned int reg)
static bool mxc4005_is_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
+ case MXC4005_REG_INT_CLR0:
case MXC4005_REG_INT_CLR1:
+ case MXC4005_REG_INT_MASK0:
case MXC4005_REG_INT_MASK1:
case MXC4005_REG_CONTROL:
return true;
@@ -330,23 +343,20 @@ static int mxc4005_set_trigger_state(struct iio_trigger *trig,
{
struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
struct mxc4005_data *data = iio_priv(indio_dev);
+ unsigned int val;
int ret;
mutex_lock(&data->mutex);
- if (state) {
- ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK1,
- MXC4005_REG_INT_MASK1_BIT_DRDYE);
- } else {
- ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK1,
- ~MXC4005_REG_INT_MASK1_BIT_DRDYE);
- }
+ val = state ? MXC4005_REG_INT_MASK1_BIT_DRDYE : 0;
+ ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK1, val);
if (ret < 0) {
mutex_unlock(&data->mutex);
dev_err(data->dev, "failed to update reg_int_mask1");
return ret;
}
+ data->int_mask1 = val;
data->trigger_enabled = state;
mutex_unlock(&data->mutex);
@@ -382,6 +392,21 @@ static int mxc4005_chip_init(struct mxc4005_data *data)
dev_dbg(data->dev, "MXC4005 chip id %02x\n", reg);
+ ret = regmap_write(data->regmap, MXC4005_REG_INT_CLR1,
+ MXC4005_REG_INT_CLR1_SW_RST);
+ if (ret < 0)
+ return dev_err_probe(data->dev, ret, "resetting chip\n");
+
+ fsleep(MXC4005_RESET_TIME_US);
+
+ ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK0, 0);
+ if (ret < 0)
+ return dev_err_probe(data->dev, ret, "writing INT_MASK0\n");
+
+ ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK1, 0);
+ if (ret < 0)
+ return dev_err_probe(data->dev, ret, "writing INT_MASK1\n");
+
return 0;
}
@@ -469,6 +494,58 @@ static int mxc4005_probe(struct i2c_client *client)
return devm_iio_device_register(&client->dev, indio_dev);
}
+static int mxc4005_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct mxc4005_data *data = iio_priv(indio_dev);
+ int ret;
+
+ /* Save control to restore it on resume */
+ ret = regmap_read(data->regmap, MXC4005_REG_CONTROL, &data->control);
+ if (ret < 0)
+ dev_err(data->dev, "failed to read reg_control\n");
+
+ return ret;
+}
+
+static int mxc4005_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct mxc4005_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = regmap_write(data->regmap, MXC4005_REG_INT_CLR1,
+ MXC4005_REG_INT_CLR1_SW_RST);
+ if (ret) {
+ dev_err(data->dev, "failed to reset chip: %d\n", ret);
+ return ret;
+ }
+
+ fsleep(MXC4005_RESET_TIME_US);
+
+ ret = regmap_write(data->regmap, MXC4005_REG_CONTROL, data->control);
+ if (ret) {
+ dev_err(data->dev, "failed to restore control register\n");
+ return ret;
+ }
+
+ ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK0, 0);
+ if (ret) {
+ dev_err(data->dev, "failed to restore interrupt 0 mask\n");
+ return ret;
+ }
+
+ ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK1, data->int_mask1);
+ if (ret) {
+ dev_err(data->dev, "failed to restore interrupt 1 mask\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(mxc4005_pm_ops, mxc4005_suspend, mxc4005_resume);
+
static const struct acpi_device_id mxc4005_acpi_match[] = {
{"MXC4005", 0},
{"MXC6655", 0},
@@ -496,6 +573,7 @@ static struct i2c_driver mxc4005_driver = {
.name = MXC4005_DRV_NAME,
.acpi_match_table = mxc4005_acpi_match,
.of_match_table = mxc4005_of_match,
+ .pm = pm_sleep_ptr(&mxc4005_pm_ops),
},
.probe = mxc4005_probe,
.id_table = mxc4005_id,
diff --git a/drivers/iio/addac/ad74115.c b/drivers/iio/addac/ad74115.c
index e6bc5eb3788d..12dc43d487b4 100644
--- a/drivers/iio/addac/ad74115.c
+++ b/drivers/iio/addac/ad74115.c
@@ -199,7 +199,6 @@ struct ad74115_state {
struct spi_device *spi;
struct regmap *regmap;
struct iio_trigger *trig;
- struct regulator *avdd;
/*
* Synchronize consecutive operations when doing a one-shot
@@ -1672,13 +1671,9 @@ static int ad74115_setup(struct iio_dev *indio_dev)
if (ret)
return ret;
- if (val == AD74115_DIN_THRESHOLD_MODE_AVDD) {
- ret = regulator_get_voltage(st->avdd);
- if (ret < 0)
- return ret;
-
- st->avdd_mv = ret / 1000;
- }
+ if (val == AD74115_DIN_THRESHOLD_MODE_AVDD && !st->avdd_mv)
+ return dev_err_probe(dev, -EINVAL,
+ "AVDD voltage is required for digital input threshold mode AVDD\n");
st->din_threshold_mode = val;
@@ -1788,11 +1783,6 @@ static int ad74115_reset(struct ad74115_state *st)
return 0;
}
-static void ad74115_regulator_disable(void *data)
-{
- regulator_disable(data);
-}
-
static int ad74115_setup_trigger(struct iio_dev *indio_dev)
{
struct ad74115_state *st = iio_priv(indio_dev);
@@ -1855,20 +1845,20 @@ static int ad74115_probe(struct spi_device *spi)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &ad74115_info;
- st->avdd = devm_regulator_get(dev, "avdd");
- if (IS_ERR(st->avdd))
- return PTR_ERR(st->avdd);
-
- ret = regulator_enable(st->avdd);
- if (ret) {
- dev_err(dev, "Failed to enable avdd regulator\n");
- return ret;
+ ret = devm_regulator_get_enable_read_voltage(dev, "avdd");
+ if (ret < 0) {
+ /*
+ * Since this is both a power supply and only optionally a
+ * reference voltage, make sure to enable it even when the
+ * voltage is not available.
+ */
+ ret = devm_regulator_get_enable(dev, "avdd");
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable avdd\n");
+ } else {
+ st->avdd_mv = ret / 1000;
}
- ret = devm_add_action_or_reset(dev, ad74115_regulator_disable, st->avdd);
- if (ret)
- return ret;
-
ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulator_names),
regulator_names);
if (ret)
diff --git a/drivers/iio/frequency/admv1013.c b/drivers/iio/frequency/admv1013.c
index 92923074f930..c0cd5d9844fe 100644
--- a/drivers/iio/frequency/admv1013.c
+++ b/drivers/iio/frequency/admv1013.c
@@ -95,7 +95,6 @@ struct admv1013_state {
struct clk *clkin;
/* Protect against concurrent accesses to the device and to data */
struct mutex lock;
- struct regulator *reg;
struct notifier_block nb;
unsigned int input_mode;
unsigned int quad_se_mode;
@@ -342,14 +341,9 @@ static int admv1013_update_quad_filters(struct admv1013_state *st)
FIELD_PREP(ADMV1013_QUAD_FILTERS_MSK, filt_raw));
}
-static int admv1013_update_mixer_vgate(struct admv1013_state *st)
+static int admv1013_update_mixer_vgate(struct admv1013_state *st, int vcm)
{
unsigned int mixer_vgate;
- int vcm;
-
- vcm = regulator_get_voltage(st->reg);
- if (vcm < 0)
- return vcm;
if (vcm <= 1800000)
mixer_vgate = (2389 * vcm / 1000000 + 8100) / 100;
@@ -443,7 +437,7 @@ static const struct iio_chan_spec admv1013_channels[] = {
ADMV1013_CHAN_CALIB(1, Q),
};
-static int admv1013_init(struct admv1013_state *st)
+static int admv1013_init(struct admv1013_state *st, int vcm_uv)
{
int ret;
unsigned int data;
@@ -483,7 +477,7 @@ static int admv1013_init(struct admv1013_state *st)
if (ret)
return ret;
- ret = admv1013_update_mixer_vgate(st);
+ ret = admv1013_update_mixer_vgate(st, vcm_uv);
if (ret)
return ret;
@@ -498,11 +492,6 @@ static int admv1013_init(struct admv1013_state *st)
st->input_mode);
}
-static void admv1013_reg_disable(void *data)
-{
- regulator_disable(data);
-}
-
static void admv1013_powerdown(void *data)
{
unsigned int enable_reg, enable_reg_msk;
@@ -557,11 +546,6 @@ static int admv1013_properties_parse(struct admv1013_state *st)
else
return -EINVAL;
- st->reg = devm_regulator_get(&spi->dev, "vcm");
- if (IS_ERR(st->reg))
- return dev_err_probe(&spi->dev, PTR_ERR(st->reg),
- "failed to get the common-mode voltage\n");
-
ret = devm_regulator_bulk_get_enable(&st->spi->dev,
ARRAY_SIZE(admv1013_vcc_regs),
admv1013_vcc_regs);
@@ -578,7 +562,7 @@ static int admv1013_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct admv1013_state *st;
- int ret;
+ int ret, vcm_uv;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (!indio_dev)
@@ -597,16 +581,12 @@ static int admv1013_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = regulator_enable(st->reg);
- if (ret) {
- dev_err(&spi->dev, "Failed to enable specified Common-Mode Voltage!\n");
- return ret;
- }
+ ret = devm_regulator_get_enable_read_voltage(&spi->dev, "vcm");
+ if (ret < 0)
+ return dev_err_probe(&spi->dev, ret,
+ "failed to get the common-mode voltage\n");
- ret = devm_add_action_or_reset(&spi->dev, admv1013_reg_disable,
- st->reg);
- if (ret)
- return ret;
+ vcm_uv = ret;
st->clkin = devm_clk_get_enabled(&spi->dev, "lo_in");
if (IS_ERR(st->clkin))
@@ -620,7 +600,7 @@ static int admv1013_probe(struct spi_device *spi)
mutex_init(&st->lock);
- ret = admv1013_init(st);
+ ret = admv1013_init(st, vcm_uv);
if (ret) {
dev_err(&spi->dev, "admv1013 init failed\n");
return ret;
diff --git a/drivers/iio/gyro/mpu3050-i2c.c b/drivers/iio/gyro/mpu3050-i2c.c
index 52b6feed2637..29ecfa6fd633 100644
--- a/drivers/iio/gyro/mpu3050-i2c.c
+++ b/drivers/iio/gyro/mpu3050-i2c.c
@@ -72,7 +72,7 @@ static int mpu3050_i2c_probe(struct i2c_client *client)
else {
mpu3050->i2cmux->priv = mpu3050;
/* Ignore failure, not critical */
- i2c_mux_add_adapter(mpu3050->i2cmux, 0, 0, 0);
+ i2c_mux_add_adapter(mpu3050->i2cmux, 0, 0);
}
return 0;
diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
index 01f55cc902fa..060a21c70460 100644
--- a/drivers/iio/imu/adis16475.c
+++ b/drivers/iio/imu/adis16475.c
@@ -1289,6 +1289,7 @@ static int adis16475_config_sync_mode(struct adis16475 *st)
struct device *dev = &st->adis.spi->dev;
const struct adis16475_sync *sync;
u32 sync_mode;
+ u16 val;
/* default to internal clk */
st->clk_freq = st->info->int_clk * 1000;
@@ -1350,8 +1351,9 @@ static int adis16475_config_sync_mode(struct adis16475 *st)
* I'm keeping this for simplicity and avoiding extra variables
* in chip_info.
*/
+ val = ADIS16475_SYNC_MODE(sync->sync_mode);
ret = __adis_update_bits(&st->adis, ADIS16475_REG_MSG_CTRL,
- ADIS16475_SYNC_MODE_MASK, sync->sync_mode);
+ ADIS16475_SYNC_MODE_MASK, val);
if (ret)
return ret;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 410ea39fd495..0e03137fb3d4 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -142,7 +142,7 @@ static int inv_mpu_probe(struct i2c_client *client)
if (!st->muxc)
return -ENOMEM;
st->muxc->priv = dev_get_drvdata(&client->dev);
- result = i2c_mux_add_adapter(st->muxc, 0, 0, 0);
+ result = i2c_mux_add_adapter(st->muxc, 0, 0);
if (result)
return result;
result = inv_mpu_acpi_create_mux_client(client);
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index fe8734468ed3..62e9e93d915d 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -1233,6 +1233,7 @@ const struct bmp280_chip_info bmp380_chip_info = {
.chip_id = bmp380_chip_ids,
.num_chip_id = ARRAY_SIZE(bmp380_chip_ids),
.regmap_config = &bmp380_regmap_config,
+ .spi_read_extra_byte = true,
.start_up_time = 2000,
.channels = bmp380_channels,
.num_channels = 2,
diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c
index a444d4b2978b..4e19ea0b4d39 100644
--- a/drivers/iio/pressure/bmp280-spi.c
+++ b/drivers/iio/pressure/bmp280-spi.c
@@ -96,15 +96,10 @@ static int bmp280_spi_probe(struct spi_device *spi)
chip_info = spi_get_device_match_data(spi);
- switch (chip_info->chip_id[0]) {
- case BMP380_CHIP_ID:
- case BMP390_CHIP_ID:
+ if (chip_info->spi_read_extra_byte)
bmp_regmap_bus = &bmp380_regmap_bus;
- break;
- default:
+ else
bmp_regmap_bus = &bmp280_regmap_bus;
- break;
- }
regmap = devm_regmap_init(&spi->dev,
bmp_regmap_bus,
@@ -127,7 +122,7 @@ static const struct of_device_id bmp280_of_spi_match[] = {
{ .compatible = "bosch,bmp180", .data = &bmp180_chip_info },
{ .compatible = "bosch,bmp181", .data = &bmp180_chip_info },
{ .compatible = "bosch,bmp280", .data = &bmp280_chip_info },
- { .compatible = "bosch,bme280", .data = &bmp280_chip_info },
+ { .compatible = "bosch,bme280", .data = &bme280_chip_info },
{ .compatible = "bosch,bmp380", .data = &bmp380_chip_info },
{ .compatible = "bosch,bmp580", .data = &bmp580_chip_info },
{ },
@@ -139,7 +134,7 @@ static const struct spi_device_id bmp280_spi_id[] = {
{ "bmp180", (kernel_ulong_t)&bmp180_chip_info },
{ "bmp181", (kernel_ulong_t)&bmp180_chip_info },
{ "bmp280", (kernel_ulong_t)&bmp280_chip_info },
- { "bme280", (kernel_ulong_t)&bmp280_chip_info },
+ { "bme280", (kernel_ulong_t)&bme280_chip_info },
{ "bmp380", (kernel_ulong_t)&bmp380_chip_info },
{ "bmp580", (kernel_ulong_t)&bmp580_chip_info },
{ }
diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h
index 4012387d7956..5812a344ed8e 100644
--- a/drivers/iio/pressure/bmp280.h
+++ b/drivers/iio/pressure/bmp280.h
@@ -423,6 +423,7 @@ struct bmp280_chip_info {
int num_chip_id;
const struct regmap_config *regmap_config;
+ bool spi_read_extra_byte;
const struct iio_chan_spec *channels;
int num_channels;
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index f253295795f0..be0743dac3ff 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -348,16 +348,10 @@ static int dst_fetch_ha(const struct dst_entry *dst,
static bool has_gateway(const struct dst_entry *dst, sa_family_t family)
{
- struct rtable *rt;
- struct rt6_info *rt6;
-
- if (family == AF_INET) {
- rt = container_of(dst, struct rtable, dst);
- return rt->rt_uses_gateway;
- }
+ if (family == AF_INET)
+ return dst_rtable(dst)->rt_uses_gateway;
- rt6 = container_of(dst, struct rt6_info, dst);
- return rt6->rt6i_flags & RTF_GATEWAY;
+ return dst_rt6_info(dst)->rt6i_flags & RTF_GATEWAY;
}
static int fetch_ha(const struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index bf0df6ee4f78..07fb8d3c037f 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1026,23 +1026,26 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
}
}
-static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id)
+static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id,
+ enum ib_cm_state old_state)
{
struct cm_id_private *cm_id_priv;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- pr_err("%s: cm_id=%p timed out. state=%d refcnt=%d\n", __func__,
- cm_id, cm_id->state, refcount_read(&cm_id_priv->refcount));
+ pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
+ cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
}
static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
{
struct cm_id_private *cm_id_priv;
+ enum ib_cm_state old_state;
struct cm_work *work;
int ret;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irq(&cm_id_priv->lock);
+ old_state = cm_id->state;
retest:
switch (cm_id->state) {
case IB_CM_LISTEN:
@@ -1151,7 +1154,7 @@ retest:
msecs_to_jiffies(
CM_DESTROY_ID_WAIT_TIMEOUT));
if (!ret) /* timeout happened */
- cm_destroy_id_wait_timeout(cm_id);
+ cm_destroy_id_wait_timeout(cm_id, old_state);
} while (!ret);
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 1e2cd7c8716e..64ace0b968f0 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -715,8 +715,10 @@ cma_validate_port(struct ib_device *device, u32 port,
rcu_read_lock();
ndev = rcu_dereference(sgid_attr->ndev);
if (!net_eq(dev_net(ndev), dev_addr->net) ||
- ndev->ifindex != bound_if_index)
+ ndev->ifindex != bound_if_index) {
+ rdma_put_gid_attr(sgid_attr);
sgid_attr = ERR_PTR(-ENODEV);
+ }
rcu_read_unlock();
goto out;
}
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 07cb6c5ffda0..55aa7aa32d4a 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -2174,8 +2174,7 @@ int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
spin_unlock_irqrestore(&pdata->netdev_lock, flags);
add_ndev_hash(pdata);
- if (old_ndev)
- __dev_put(old_ndev);
+ __dev_put(old_ndev);
return 0;
}
@@ -2235,8 +2234,7 @@ struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
spin_lock(&pdata->netdev_lock);
res = rcu_dereference_protected(
pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
- if (res)
- dev_hold(res);
+ dev_hold(res);
spin_unlock(&pdata->netdev_lock);
}
@@ -2311,9 +2309,7 @@ void ib_enum_roce_netdev(struct ib_device *ib_dev,
if (filter(ib_dev, port, idev, filter_cookie))
cb(ib_dev, port, idev, cookie);
-
- if (idev)
- dev_put(idev);
+ dev_put(idev);
}
}
diff --git a/drivers/infiniband/core/lag.c b/drivers/infiniband/core/lag.c
index eca6e37c72ba..8fd80adfe833 100644
--- a/drivers/infiniband/core/lag.c
+++ b/drivers/infiniband/core/lag.c
@@ -93,8 +93,7 @@ static struct net_device *rdma_get_xmit_slave_udp(struct ib_device *device,
slave = netdev_get_xmit_slave(master, skb,
!!(device->lag_flags &
RDMA_LAG_FLAGS_HASH_ALL_SLAVES));
- if (slave)
- dev_hold(slave);
+ dev_hold(slave);
rcu_read_unlock();
kfree_skb(skb);
return slave;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 4900a0848124..bc79ee630d8d 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -137,6 +137,8 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME]= { .type = NLA_NUL_STRING,
.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
[RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_RES_SUBTYPE] = { .type = NLA_NUL_STRING,
+ .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
[RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY]= { .type = NLA_U32 },
[RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 },
[RDMA_NLDEV_ATTR_RES_SRQ] = { .type = NLA_NESTED },
@@ -164,6 +166,7 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX] = { .type = NLA_U32 },
[RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC] = { .type = NLA_U8 },
[RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_DRIVER_DETAILS] = { .type = NLA_U8 },
};
static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
@@ -399,7 +402,8 @@ err:
return -EMSGSIZE;
}
-static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
+static int fill_res_info(struct sk_buff *msg, struct ib_device *device,
+ bool show_details)
{
static const char * const names[RDMA_RESTRACK_MAX] = {
[RDMA_RESTRACK_PD] = "pd",
@@ -424,7 +428,7 @@ static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
if (!names[i])
continue;
- curr = rdma_restrack_count(device, i);
+ curr = rdma_restrack_count(device, i, show_details);
ret = fill_res_info_entry(msg, names[i], curr);
if (ret)
goto err;
@@ -1305,6 +1309,7 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ bool show_details = false;
struct ib_device *device;
struct sk_buff *msg;
u32 index;
@@ -1320,6 +1325,9 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
if (!device)
return -EINVAL;
+ if (tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS])
+ show_details = nla_get_u8(tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS]);
+
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
@@ -1334,7 +1342,7 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
goto err_free;
}
- ret = fill_res_info(msg, device);
+ ret = fill_res_info(msg, device, show_details);
if (ret)
goto err_free;
@@ -1364,7 +1372,7 @@ static int _nldev_res_get_dumpit(struct ib_device *device,
RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
0, NLM_F_MULTI);
- if (!nlh || fill_res_info(skb, device)) {
+ if (!nlh || fill_res_info(skb, device, false)) {
nlmsg_cancel(skb, nlh);
goto out;
}
@@ -1534,6 +1542,7 @@ static int res_get_common_dumpit(struct sk_buff *skb,
struct rdma_restrack_entry *res;
struct rdma_restrack_root *rt;
int err, ret = 0, idx = 0;
+ bool show_details = false;
struct nlattr *table_attr;
struct nlattr *entry_attr;
struct ib_device *device;
@@ -1562,6 +1571,9 @@ static int res_get_common_dumpit(struct sk_buff *skb,
if (!device)
return -EINVAL;
+ if (tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS])
+ show_details = nla_get_u8(tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS]);
+
/*
* If no PORT_INDEX is supplied, we will return all QPs from that device
*/
@@ -1599,6 +1611,9 @@ static int res_get_common_dumpit(struct sk_buff *skb,
* objects.
*/
xa_for_each(&rt->xa, id, res) {
+ if (xa_get_mark(&rt->xa, res->id, RESTRACK_DD) && !show_details)
+ goto next;
+
if (idx < start || !rdma_restrack_get(res))
goto next;
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index 01a499a8b88d..3313410014cd 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -37,22 +37,6 @@ int rdma_restrack_init(struct ib_device *dev)
return 0;
}
-static const char *type2str(enum rdma_restrack_type type)
-{
- static const char * const names[RDMA_RESTRACK_MAX] = {
- [RDMA_RESTRACK_PD] = "PD",
- [RDMA_RESTRACK_CQ] = "CQ",
- [RDMA_RESTRACK_QP] = "QP",
- [RDMA_RESTRACK_CM_ID] = "CM_ID",
- [RDMA_RESTRACK_MR] = "MR",
- [RDMA_RESTRACK_CTX] = "CTX",
- [RDMA_RESTRACK_COUNTER] = "COUNTER",
- [RDMA_RESTRACK_SRQ] = "SRQ",
- };
-
- return names[type];
-};
-
/**
* rdma_restrack_clean() - clean resource tracking
* @dev: IB device
@@ -60,47 +44,14 @@ static const char *type2str(enum rdma_restrack_type type)
void rdma_restrack_clean(struct ib_device *dev)
{
struct rdma_restrack_root *rt = dev->res;
- struct rdma_restrack_entry *e;
- char buf[TASK_COMM_LEN];
- bool found = false;
- const char *owner;
int i;
for (i = 0 ; i < RDMA_RESTRACK_MAX; i++) {
struct xarray *xa = &dev->res[i].xa;
- if (!xa_empty(xa)) {
- unsigned long index;
-
- if (!found) {
- pr_err("restrack: %s", CUT_HERE);
- dev_err(&dev->dev, "BUG: RESTRACK detected leak of resources\n");
- }
- xa_for_each(xa, index, e) {
- if (rdma_is_kernel_res(e)) {
- owner = e->kern_name;
- } else {
- /*
- * There is no need to call get_task_struct here,
- * because we can be here only if there are more
- * get_task_struct() call than put_task_struct().
- */
- get_task_comm(buf, e->task);
- owner = buf;
- }
-
- pr_err("restrack: %s %s object allocated by %s is not freed\n",
- rdma_is_kernel_res(e) ? "Kernel" :
- "User",
- type2str(e->type), owner);
- }
- found = true;
- }
+ WARN_ON(!xa_empty(xa));
xa_destroy(xa);
}
- if (found)
- pr_err("restrack: %s", CUT_HERE);
-
kfree(rt);
}
@@ -108,8 +59,10 @@ void rdma_restrack_clean(struct ib_device *dev)
* rdma_restrack_count() - the current usage of specific object
* @dev: IB device
* @type: actual type of object to operate
+ * @show_details: count driver specific objects
*/
-int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type)
+int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type,
+ bool show_details)
{
struct rdma_restrack_root *rt = &dev->res[type];
struct rdma_restrack_entry *e;
@@ -117,8 +70,11 @@ int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type)
u32 cnt = 0;
xa_lock(&rt->xa);
- xas_for_each(&xas, e, U32_MAX)
+ xas_for_each(&xas, e, U32_MAX) {
+ if (xa_get_mark(&rt->xa, e->id, RESTRACK_DD) && !show_details)
+ continue;
cnt++;
+ }
xa_unlock(&rt->xa);
return cnt;
}
@@ -247,6 +203,9 @@ void rdma_restrack_add(struct rdma_restrack_entry *res)
ret = xa_insert(&rt->xa, res->id, res, GFP_KERNEL);
if (ret)
res->id = 0;
+
+ if (qp->qp_type >= IB_QPT_DRIVER)
+ xa_set_mark(&rt->xa, res->id, RESTRACK_DD);
} else if (res->type == RDMA_RESTRACK_COUNTER) {
/* Special case to ensure that cntn points to right counter */
struct rdma_counter *counter;
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index e958c43dd28f..d5131b3ba8ab 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -601,8 +601,7 @@ static void del_netdev_default_ips_join(struct ib_device *ib_dev, u32 port,
rcu_read_lock();
master_ndev = netdev_master_upper_dev_get_rcu(rdma_ndev);
- if (master_ndev)
- dev_hold(master_ndev);
+ dev_hold(master_ndev);
rcu_read_unlock();
if (master_ndev) {
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index f5feca7fa9b9..2ed749f50a29 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -63,6 +63,8 @@ MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
MODULE_LICENSE("Dual BSD/GPL");
+#define MAX_UMAD_RECV_LIST_SIZE 200000
+
enum {
IB_UMAD_MAX_PORTS = RDMA_MAX_PORTS,
IB_UMAD_MAX_AGENTS = 32,
@@ -113,6 +115,7 @@ struct ib_umad_file {
struct mutex mutex;
struct ib_umad_port *port;
struct list_head recv_list;
+ atomic_t recv_list_size;
struct list_head send_list;
struct list_head port_list;
spinlock_t send_lock;
@@ -180,24 +183,28 @@ static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id)
return file->agents_dead ? NULL : file->agent[id];
}
-static int queue_packet(struct ib_umad_file *file,
- struct ib_mad_agent *agent,
- struct ib_umad_packet *packet)
+static int queue_packet(struct ib_umad_file *file, struct ib_mad_agent *agent,
+ struct ib_umad_packet *packet, bool is_recv_mad)
{
int ret = 1;
mutex_lock(&file->mutex);
+ if (is_recv_mad &&
+ atomic_read(&file->recv_list_size) > MAX_UMAD_RECV_LIST_SIZE)
+ goto unlock;
+
for (packet->mad.hdr.id = 0;
packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
packet->mad.hdr.id++)
if (agent == __get_agent(file, packet->mad.hdr.id)) {
list_add_tail(&packet->list, &file->recv_list);
+ atomic_inc(&file->recv_list_size);
wake_up_interruptible(&file->recv_wait);
ret = 0;
break;
}
-
+unlock:
mutex_unlock(&file->mutex);
return ret;
@@ -224,7 +231,7 @@ static void send_handler(struct ib_mad_agent *agent,
if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) {
packet->length = IB_MGMT_MAD_HDR;
packet->mad.hdr.status = ETIMEDOUT;
- if (!queue_packet(file, agent, packet))
+ if (!queue_packet(file, agent, packet, false))
return;
}
kfree(packet);
@@ -284,7 +291,7 @@ static void recv_handler(struct ib_mad_agent *agent,
rdma_destroy_ah_attr(&ah_attr);
}
- if (queue_packet(file, agent, packet))
+ if (queue_packet(file, agent, packet, true))
goto err2;
return;
@@ -409,6 +416,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
list_del(&packet->list);
+ atomic_dec(&file->recv_list_size);
mutex_unlock(&file->mutex);
@@ -421,6 +429,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
/* Requeue packet */
mutex_lock(&file->mutex);
list_add(&packet->list, &file->recv_list);
+ atomic_inc(&file->recv_list_size);
mutex_unlock(&file->mutex);
} else {
if (packet->recv_wc)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 439d0c7c5d0c..04258676d072 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -1013,7 +1013,8 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.stride = sizeof(struct sq_sge);
hwq_attr.depth = bnxt_qplib_get_depth(sq);
hwq_attr.aux_stride = psn_sz;
- hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode);
+ hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode)
+ : 0;
/* Update msn tbl size */
if (BNXT_RE_HW_RETX(qp->dev_cap_flags) && psn_sz) {
hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index 7377c8a9f4d5..4296662e59c3 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -110,7 +110,10 @@ struct efa_admin_create_qp_cmd {
* virtual (IOVA returned by MR registration)
* 1 : rq_virt - If set, RQ ring base address is
* virtual (IOVA returned by MR registration)
- * 7:2 : reserved - MBZ
+ * 2 : unsolicited_write_recv - If set, work requests
+ * will not be consumed for incoming RDMA write with
+ * immediate
+ * 7:3 : reserved - MBZ
*/
u8 flags;
@@ -663,7 +666,9 @@ struct efa_admin_feature_device_attr_desc {
* polling is supported
* 3 : rdma_write - If set, RDMA Write is supported
* on TX queues
- * 31:4 : reserved - MBZ
+ * 4 : unsolicited_write_recv - If set, unsolicited
+ * write with imm. receive is supported
+ * 31:5 : reserved - MBZ
*/
u32 device_caps;
@@ -1009,6 +1014,7 @@ struct efa_admin_host_info {
/* create_qp_cmd */
#define EFA_ADMIN_CREATE_QP_CMD_SQ_VIRT_MASK BIT(0)
#define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_MASK BIT(1)
+#define EFA_ADMIN_CREATE_QP_CMD_UNSOLICITED_WRITE_RECV_MASK BIT(2)
/* modify_qp_cmd */
#define EFA_ADMIN_MODIFY_QP_CMD_QP_STATE_MASK BIT(0)
@@ -1044,6 +1050,7 @@ struct efa_admin_host_info {
#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RNR_RETRY_MASK BIT(1)
#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_DATA_POLLING_128_MASK BIT(2)
#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_WRITE_MASK BIT(3)
+#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_UNSOLICITED_WRITE_RECV_MASK BIT(4)
/* create_eq_cmd */
#define EFA_ADMIN_CREATE_EQ_CMD_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
index d3398c7b0bd0..5b9c2b16df0e 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.c
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -32,6 +32,9 @@ int efa_com_create_qp(struct efa_com_dev *edev,
params->rq_depth;
create_qp_cmd.uar = params->uarn;
+ if (params->unsolicited_write_recv)
+ EFA_SET(&create_qp_cmd.flags, EFA_ADMIN_CREATE_QP_CMD_UNSOLICITED_WRITE_RECV, 1);
+
err = efa_com_cmd_exec(aq,
(struct efa_admin_aq_entry *)&create_qp_cmd,
sizeof(create_qp_cmd),
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h
index 720a99ba0f7d..9714105fcf7e 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.h
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.h
@@ -27,6 +27,7 @@ struct efa_com_create_qp_params {
u16 pd;
u16 uarn;
u8 qp_type;
+ u8 unsolicited_write_recv : 1;
};
struct efa_com_create_qp_result {
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index 5fa3603c80d8..d1a48f988f6c 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -671,11 +671,22 @@ static void efa_remove(struct pci_dev *pdev)
efa_remove_device(pdev);
}
+static void efa_shutdown(struct pci_dev *pdev)
+{
+ struct efa_dev *dev = pci_get_drvdata(pdev);
+
+ efa_destroy_eqs(dev);
+ efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_SHUTDOWN);
+ efa_free_irq(dev, &dev->admin_irq);
+ efa_disable_msix(dev);
+}
+
static struct pci_driver efa_pci_driver = {
.name = DRV_MODULE_NAME,
.id_table = efa_pci_tbl,
.probe = efa_probe,
.remove = efa_remove,
+ .shutdown = efa_shutdown,
};
module_pci_driver(efa_pci_driver);
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 2f412db2edcd..8f7a13b79cdc 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -263,6 +263,9 @@ int efa_query_device(struct ib_device *ibdev,
if (EFA_DEV_CAP(dev, RDMA_WRITE))
resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_WRITE;
+ if (EFA_DEV_CAP(dev, UNSOLICITED_WRITE_RECV))
+ resp.device_caps |= EFA_QUERY_DEVICE_CAPS_UNSOLICITED_WRITE_RECV;
+
if (dev->neqs)
resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_NOTIFICATIONS;
@@ -639,6 +642,7 @@ int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
struct efa_ibv_create_qp cmd = {};
struct efa_qp *qp = to_eqp(ibqp);
struct efa_ucontext *ucontext;
+ u16 supported_efa_flags = 0;
int err;
ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext,
@@ -676,13 +680,23 @@ int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
goto err_out;
}
- if (cmd.comp_mask) {
+ if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_90)) {
ibdev_dbg(&dev->ibdev,
"Incompatible ABI params, unknown fields in udata\n");
err = -EINVAL;
goto err_out;
}
+ if (EFA_DEV_CAP(dev, UNSOLICITED_WRITE_RECV))
+ supported_efa_flags |= EFA_CREATE_QP_WITH_UNSOLICITED_WRITE_RECV;
+
+ if (cmd.flags & ~supported_efa_flags) {
+ ibdev_dbg(&dev->ibdev, "Unsupported EFA QP create flags[%#x], supported[%#x]\n",
+ cmd.flags, supported_efa_flags);
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+
create_qp_params.uarn = ucontext->uarn;
create_qp_params.pd = to_epd(ibqp->pd)->pdn;
@@ -722,6 +736,9 @@ int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
create_qp_params.rq_base_addr = qp->rq_dma_addr;
}
+ if (cmd.flags & EFA_CREATE_QP_WITH_UNSOLICITED_WRITE_RECV)
+ create_qp_params.unsolicited_write_recv = true;
+
err = efa_com_create_qp(&dev->edev, &create_qp_params,
&create_qp_resp);
if (err)
diff --git a/drivers/infiniband/hw/erdma/erdma.h b/drivers/infiniband/hw/erdma/erdma.h
index 5df401a30cb9..c8bd698e21b0 100644
--- a/drivers/infiniband/hw/erdma/erdma.h
+++ b/drivers/infiniband/hw/erdma/erdma.h
@@ -33,7 +33,8 @@ struct erdma_eq {
atomic64_t notify_num;
void __iomem *db;
- u64 *db_record;
+ u64 *dbrec;
+ dma_addr_t dbrec_dma;
};
struct erdma_cmdq_sq {
@@ -48,7 +49,8 @@ struct erdma_cmdq_sq {
u16 wqebb_cnt;
- u64 *db_record;
+ u64 *dbrec;
+ dma_addr_t dbrec_dma;
};
struct erdma_cmdq_cq {
@@ -61,7 +63,8 @@ struct erdma_cmdq_cq {
u32 ci;
u32 cmdsn;
- u64 *db_record;
+ u64 *dbrec;
+ dma_addr_t dbrec_dma;
atomic64_t armed_num;
};
@@ -177,9 +180,6 @@ enum {
ERDMA_RES_CNT = 2,
};
-#define ERDMA_EXTRA_BUFFER_SIZE ERDMA_DB_SIZE
-#define WARPPED_BUFSIZE(size) ((size) + ERDMA_EXTRA_BUFFER_SIZE)
-
struct erdma_dev {
struct ib_device ibdev;
struct net_device *netdev;
@@ -213,6 +213,7 @@ struct erdma_dev {
atomic_t num_ctx;
struct list_head cep_list;
+ struct dma_pool *db_pool;
struct dma_pool *resp_pool;
};
diff --git a/drivers/infiniband/hw/erdma/erdma_cmdq.c b/drivers/infiniband/hw/erdma/erdma_cmdq.c
index a151a7bdd504..43ff40b5a09d 100644
--- a/drivers/infiniband/hw/erdma/erdma_cmdq.c
+++ b/drivers/infiniband/hw/erdma/erdma_cmdq.c
@@ -14,7 +14,7 @@ static void arm_cmdq_cq(struct erdma_cmdq *cmdq)
FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cmdq->cq.cmdsn) |
FIELD_PREP(ERDMA_CQDB_IDX_MASK, cmdq->cq.cmdsn);
- *cmdq->cq.db_record = db_data;
+ *cmdq->cq.dbrec = db_data;
writeq(db_data, dev->func_bar + ERDMA_CMDQ_CQDB_REG);
atomic64_inc(&cmdq->cq.armed_num);
@@ -25,7 +25,7 @@ static void kick_cmdq_db(struct erdma_cmdq *cmdq)
struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq);
u64 db_data = FIELD_PREP(ERDMA_CMD_HDR_WQEBB_INDEX_MASK, cmdq->sq.pi);
- *cmdq->sq.db_record = db_data;
+ *cmdq->sq.dbrec = db_data;
writeq(db_data, dev->func_bar + ERDMA_CMDQ_SQDB_REG);
}
@@ -89,20 +89,18 @@ static int erdma_cmdq_sq_init(struct erdma_dev *dev)
{
struct erdma_cmdq *cmdq = &dev->cmdq;
struct erdma_cmdq_sq *sq = &cmdq->sq;
- u32 buf_size;
sq->wqebb_cnt = SQEBB_COUNT(ERDMA_CMDQ_SQE_SIZE);
sq->depth = cmdq->max_outstandings * sq->wqebb_cnt;
- buf_size = sq->depth << SQEBB_SHIFT;
-
- sq->qbuf =
- dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
- &sq->qbuf_dma_addr, GFP_KERNEL);
+ sq->qbuf = dma_alloc_coherent(&dev->pdev->dev, sq->depth << SQEBB_SHIFT,
+ &sq->qbuf_dma_addr, GFP_KERNEL);
if (!sq->qbuf)
return -ENOMEM;
- sq->db_record = (u64 *)(sq->qbuf + buf_size);
+ sq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &sq->dbrec_dma);
+ if (!sq->dbrec)
+ goto err_out;
spin_lock_init(&sq->lock);
@@ -111,30 +109,33 @@ static int erdma_cmdq_sq_init(struct erdma_dev *dev)
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_SQ_ADDR_L_REG,
lower_32_bits(sq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_DEPTH_REG, sq->depth);
- erdma_reg_write64(dev, ERDMA_CMDQ_SQ_DB_HOST_ADDR_REG,
- sq->qbuf_dma_addr + buf_size);
+ erdma_reg_write64(dev, ERDMA_CMDQ_SQ_DB_HOST_ADDR_REG, sq->dbrec_dma);
return 0;
+
+err_out:
+ dma_free_coherent(&dev->pdev->dev, sq->depth << SQEBB_SHIFT,
+ sq->qbuf, sq->qbuf_dma_addr);
+
+ return -ENOMEM;
}
static int erdma_cmdq_cq_init(struct erdma_dev *dev)
{
struct erdma_cmdq *cmdq = &dev->cmdq;
struct erdma_cmdq_cq *cq = &cmdq->cq;
- u32 buf_size;
cq->depth = cmdq->sq.depth;
- buf_size = cq->depth << CQE_SHIFT;
-
- cq->qbuf =
- dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
- &cq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
+ cq->qbuf = dma_alloc_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT,
+ &cq->qbuf_dma_addr, GFP_KERNEL);
if (!cq->qbuf)
return -ENOMEM;
spin_lock_init(&cq->lock);
- cq->db_record = (u64 *)(cq->qbuf + buf_size);
+ cq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &cq->dbrec_dma);
+ if (!cq->dbrec)
+ goto err_out;
atomic64_set(&cq->armed_num, 0);
@@ -142,24 +143,25 @@ static int erdma_cmdq_cq_init(struct erdma_dev *dev)
upper_32_bits(cq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_CQ_ADDR_L_REG,
lower_32_bits(cq->qbuf_dma_addr));
- erdma_reg_write64(dev, ERDMA_CMDQ_CQ_DB_HOST_ADDR_REG,
- cq->qbuf_dma_addr + buf_size);
+ erdma_reg_write64(dev, ERDMA_CMDQ_CQ_DB_HOST_ADDR_REG, cq->dbrec_dma);
return 0;
+
+err_out:
+ dma_free_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT, cq->qbuf,
+ cq->qbuf_dma_addr);
+
+ return -ENOMEM;
}
static int erdma_cmdq_eq_init(struct erdma_dev *dev)
{
struct erdma_cmdq *cmdq = &dev->cmdq;
struct erdma_eq *eq = &cmdq->eq;
- u32 buf_size;
eq->depth = cmdq->max_outstandings;
- buf_size = eq->depth << EQE_SHIFT;
-
- eq->qbuf =
- dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
- &eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
+ eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
+ &eq->qbuf_dma_addr, GFP_KERNEL);
if (!eq->qbuf)
return -ENOMEM;
@@ -167,17 +169,24 @@ static int erdma_cmdq_eq_init(struct erdma_dev *dev)
atomic64_set(&eq->event_num, 0);
eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG;
- eq->db_record = (u64 *)(eq->qbuf + buf_size);
+ eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
+ if (!eq->dbrec)
+ goto err_out;
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_H_REG,
upper_32_bits(eq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_L_REG,
lower_32_bits(eq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_DEPTH_REG, eq->depth);
- erdma_reg_write64(dev, ERDMA_CMDQ_EQ_DB_HOST_ADDR_REG,
- eq->qbuf_dma_addr + buf_size);
+ erdma_reg_write64(dev, ERDMA_CMDQ_EQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
return 0;
+
+err_out:
+ dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
+ eq->qbuf_dma_addr);
+
+ return -ENOMEM;
}
int erdma_cmdq_init(struct erdma_dev *dev)
@@ -211,17 +220,17 @@ int erdma_cmdq_init(struct erdma_dev *dev)
return 0;
err_destroy_cq:
- dma_free_coherent(&dev->pdev->dev,
- (cmdq->cq.depth << CQE_SHIFT) +
- ERDMA_EXTRA_BUFFER_SIZE,
+ dma_free_coherent(&dev->pdev->dev, cmdq->cq.depth << CQE_SHIFT,
cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr);
+ dma_pool_free(dev->db_pool, cmdq->cq.dbrec, cmdq->cq.dbrec_dma);
+
err_destroy_sq:
- dma_free_coherent(&dev->pdev->dev,
- (cmdq->sq.depth << SQEBB_SHIFT) +
- ERDMA_EXTRA_BUFFER_SIZE,
+ dma_free_coherent(&dev->pdev->dev, cmdq->sq.depth << SQEBB_SHIFT,
cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
+ dma_pool_free(dev->db_pool, cmdq->sq.dbrec, cmdq->sq.dbrec_dma);
+
return err;
}
@@ -238,18 +247,20 @@ void erdma_cmdq_destroy(struct erdma_dev *dev)
clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
- dma_free_coherent(&dev->pdev->dev,
- (cmdq->eq.depth << EQE_SHIFT) +
- ERDMA_EXTRA_BUFFER_SIZE,
+ dma_free_coherent(&dev->pdev->dev, cmdq->eq.depth << EQE_SHIFT,
cmdq->eq.qbuf, cmdq->eq.qbuf_dma_addr);
- dma_free_coherent(&dev->pdev->dev,
- (cmdq->sq.depth << SQEBB_SHIFT) +
- ERDMA_EXTRA_BUFFER_SIZE,
+
+ dma_pool_free(dev->db_pool, cmdq->eq.dbrec, cmdq->eq.dbrec_dma);
+
+ dma_free_coherent(&dev->pdev->dev, cmdq->sq.depth << SQEBB_SHIFT,
cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
- dma_free_coherent(&dev->pdev->dev,
- (cmdq->cq.depth << CQE_SHIFT) +
- ERDMA_EXTRA_BUFFER_SIZE,
+
+ dma_pool_free(dev->db_pool, cmdq->sq.dbrec, cmdq->sq.dbrec_dma);
+
+ dma_free_coherent(&dev->pdev->dev, cmdq->cq.depth << CQE_SHIFT,
cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr);
+
+ dma_pool_free(dev->db_pool, cmdq->cq.dbrec, cmdq->cq.dbrec_dma);
}
static void *get_next_valid_cmdq_cqe(struct erdma_cmdq *cmdq)
diff --git a/drivers/infiniband/hw/erdma/erdma_cq.c b/drivers/infiniband/hw/erdma/erdma_cq.c
index c1cb5568eab2..70f89f0162aa 100644
--- a/drivers/infiniband/hw/erdma/erdma_cq.c
+++ b/drivers/infiniband/hw/erdma/erdma_cq.c
@@ -26,7 +26,7 @@ static void notify_cq(struct erdma_cq *cq, u8 solcitied)
FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cq->kern_cq.cmdsn) |
FIELD_PREP(ERDMA_CQDB_CI_MASK, cq->kern_cq.ci);
- *cq->kern_cq.db_record = db_data;
+ *cq->kern_cq.dbrec = db_data;
writeq(db_data, cq->kern_cq.db);
}
diff --git a/drivers/infiniband/hw/erdma/erdma_eq.c b/drivers/infiniband/hw/erdma/erdma_eq.c
index ea47cb21fdb8..84ccdd8144c9 100644
--- a/drivers/infiniband/hw/erdma/erdma_eq.c
+++ b/drivers/infiniband/hw/erdma/erdma_eq.c
@@ -13,7 +13,7 @@ void notify_eq(struct erdma_eq *eq)
u64 db_data = FIELD_PREP(ERDMA_EQDB_CI_MASK, eq->ci) |
FIELD_PREP(ERDMA_EQDB_ARM_MASK, 1);
- *eq->db_record = db_data;
+ *eq->dbrec = db_data;
writeq(db_data, eq->db);
atomic64_inc(&eq->notify_num);
@@ -83,14 +83,11 @@ void erdma_aeq_event_handler(struct erdma_dev *dev)
int erdma_aeq_init(struct erdma_dev *dev)
{
struct erdma_eq *eq = &dev->aeq;
- u32 buf_size;
eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
- buf_size = eq->depth << EQE_SHIFT;
- eq->qbuf =
- dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
- &eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
+ eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
+ &eq->qbuf_dma_addr, GFP_KERNEL);
if (!eq->qbuf)
return -ENOMEM;
@@ -99,26 +96,34 @@ int erdma_aeq_init(struct erdma_dev *dev)
atomic64_set(&eq->notify_num, 0);
eq->db = dev->func_bar + ERDMA_REGS_AEQ_DB_REG;
- eq->db_record = (u64 *)(eq->qbuf + buf_size);
+ eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
+ if (!eq->dbrec)
+ goto err_out;
erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG,
upper_32_bits(eq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG,
lower_32_bits(eq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth);
- erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG,
- eq->qbuf_dma_addr + buf_size);
+ erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
return 0;
+
+err_out:
+ dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
+ eq->qbuf_dma_addr);
+
+ return -ENOMEM;
}
void erdma_aeq_destroy(struct erdma_dev *dev)
{
struct erdma_eq *eq = &dev->aeq;
- dma_free_coherent(&dev->pdev->dev,
- WARPPED_BUFSIZE(eq->depth << EQE_SHIFT), eq->qbuf,
+ dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
eq->qbuf_dma_addr);
+
+ dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
}
void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb)
@@ -209,7 +214,6 @@ static void erdma_free_ceq_irq(struct erdma_dev *dev, u16 ceqn)
static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
{
struct erdma_cmdq_create_eq_req req;
- dma_addr_t db_info_dma_addr;
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
CMDQ_OPCODE_CREATE_EQ);
@@ -219,9 +223,8 @@ static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
req.qtype = ERDMA_EQ_TYPE_CEQ;
/* Vector index is the same as EQN. */
req.vector_idx = eqn;
- db_info_dma_addr = eq->qbuf_dma_addr + (eq->depth << EQE_SHIFT);
- req.db_dma_addr_l = lower_32_bits(db_info_dma_addr);
- req.db_dma_addr_h = upper_32_bits(db_info_dma_addr);
+ req.db_dma_addr_l = lower_32_bits(eq->dbrec_dma);
+ req.db_dma_addr_h = upper_32_bits(eq->dbrec_dma);
return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
}
@@ -229,12 +232,11 @@ static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
{
struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
- u32 buf_size = ERDMA_DEFAULT_EQ_DEPTH << EQE_SHIFT;
int ret;
- eq->qbuf =
- dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
- &eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
+ eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
+ eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
+ &eq->qbuf_dma_addr, GFP_KERNEL);
if (!eq->qbuf)
return -ENOMEM;
@@ -242,10 +244,16 @@ static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
atomic64_set(&eq->event_num, 0);
atomic64_set(&eq->notify_num, 0);
- eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG +
(ceqn + 1) * ERDMA_DB_SIZE;
- eq->db_record = (u64 *)(eq->qbuf + buf_size);
+
+ eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
+ if (!eq->dbrec) {
+ dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
+ eq->qbuf, eq->qbuf_dma_addr);
+ return -ENOMEM;
+ }
+
eq->ci = 0;
dev->ceqs[ceqn].dev = dev;
@@ -259,7 +267,6 @@ static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
{
struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
- u32 buf_size = ERDMA_DEFAULT_EQ_DEPTH << EQE_SHIFT;
struct erdma_cmdq_destroy_eq_req req;
int err;
@@ -276,8 +283,9 @@ static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
if (err)
return;
- dma_free_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size), eq->qbuf,
+ dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
eq->qbuf_dma_addr);
+ dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
}
int erdma_ceqs_init(struct erdma_dev *dev)
diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h
index 3212a1222760..05978f3b1475 100644
--- a/drivers/infiniband/hw/erdma/erdma_hw.h
+++ b/drivers/infiniband/hw/erdma/erdma_hw.h
@@ -240,7 +240,7 @@ struct erdma_cmdq_create_cq_req {
u32 qbuf_addr_l;
u32 qbuf_addr_h;
u32 cfg1;
- u64 cq_db_info_addr;
+ u64 cq_dbrec_dma;
u32 first_page_offset;
u32 cfg2;
};
@@ -335,8 +335,8 @@ struct erdma_cmdq_create_qp_req {
u64 rq_buf_addr;
u32 sq_mtt_cfg;
u32 rq_mtt_cfg;
- u64 sq_db_info_dma_addr;
- u64 rq_db_info_dma_addr;
+ u64 sq_dbrec_dma;
+ u64 rq_dbrec_dma;
u64 sq_mtt_entry[3];
u64 rq_mtt_entry[3];
diff --git a/drivers/infiniband/hw/erdma/erdma_main.c b/drivers/infiniband/hw/erdma/erdma_main.c
index 472939172f0c..7080f8a71ec4 100644
--- a/drivers/infiniband/hw/erdma/erdma_main.c
+++ b/drivers/infiniband/hw/erdma/erdma_main.c
@@ -178,16 +178,26 @@ static int erdma_device_init(struct erdma_dev *dev, struct pci_dev *pdev)
if (!dev->resp_pool)
return -ENOMEM;
+ dev->db_pool = dma_pool_create("erdma_db_pool", &pdev->dev,
+ ERDMA_DB_SIZE, ERDMA_DB_SIZE, 0);
+ if (!dev->db_pool) {
+ ret = -ENOMEM;
+ goto destroy_resp_pool;
+ }
+
ret = dma_set_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK(ERDMA_PCI_WIDTH));
if (ret)
- goto destroy_pool;
+ goto destroy_db_pool;
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
return 0;
-destroy_pool:
+destroy_db_pool:
+ dma_pool_destroy(dev->db_pool);
+
+destroy_resp_pool:
dma_pool_destroy(dev->resp_pool);
return ret;
@@ -195,6 +205,7 @@ destroy_pool:
static void erdma_device_uninit(struct erdma_dev *dev)
{
+ dma_pool_destroy(dev->db_pool);
dma_pool_destroy(dev->resp_pool);
}
diff --git a/drivers/infiniband/hw/erdma/erdma_qp.c b/drivers/infiniband/hw/erdma/erdma_qp.c
index 6d0330badd68..4d1f9114cd97 100644
--- a/drivers/infiniband/hw/erdma/erdma_qp.c
+++ b/drivers/infiniband/hw/erdma/erdma_qp.c
@@ -492,7 +492,7 @@ static void kick_sq_db(struct erdma_qp *qp, u16 pi)
u64 db_data = FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp)) |
FIELD_PREP(ERDMA_SQE_HDR_WQEBB_INDEX_MASK, pi);
- *(u64 *)qp->kern_qp.sq_db_info = db_data;
+ *(u64 *)qp->kern_qp.sq_dbrec = db_data;
writeq(db_data, qp->kern_qp.hw_sq_db);
}
@@ -557,7 +557,7 @@ static int erdma_post_recv_one(struct erdma_qp *qp,
return -EINVAL;
}
- *(u64 *)qp->kern_qp.rq_db_info = *(u64 *)rqe;
+ *(u64 *)qp->kern_qp.rq_dbrec = *(u64 *)rqe;
writeq(*(u64 *)rqe, qp->kern_qp.hw_rq_db);
qp->kern_qp.rwr_tbl[qp->kern_qp.rq_pi & (qp->attrs.rq_size - 1)] =
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
index 23dfc01603f8..40c9b6e46b82 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -76,10 +76,8 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
req.rq_buf_addr = qp->kern_qp.rq_buf_dma_addr;
req.sq_buf_addr = qp->kern_qp.sq_buf_dma_addr;
- req.sq_db_info_dma_addr = qp->kern_qp.sq_buf_dma_addr +
- (qp->attrs.sq_size << SQEBB_SHIFT);
- req.rq_db_info_dma_addr = qp->kern_qp.rq_buf_dma_addr +
- (qp->attrs.rq_size << RQE_SHIFT);
+ req.sq_dbrec_dma = qp->kern_qp.sq_dbrec_dma;
+ req.rq_dbrec_dma = qp->kern_qp.rq_dbrec_dma;
} else {
user_qp = &qp->user_qp;
req.sq_cqn_mtt_cfg = FIELD_PREP(
@@ -107,8 +105,8 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
assemble_qbuf_mtt_for_cmd(&user_qp->rq_mem, &req.rq_mtt_cfg,
&req.rq_buf_addr, req.rq_mtt_entry);
- req.sq_db_info_dma_addr = user_qp->sq_db_info_dma_addr;
- req.rq_db_info_dma_addr = user_qp->rq_db_info_dma_addr;
+ req.sq_dbrec_dma = user_qp->sq_dbrec_dma;
+ req.rq_dbrec_dma = user_qp->rq_dbrec_dma;
if (uctx->ext_db.enable) {
req.sq_cqn_mtt_cfg |=
@@ -209,8 +207,7 @@ static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
ERDMA_MR_MTT_0LEVEL);
req.first_page_offset = 0;
- req.cq_db_info_addr =
- cq->kern_cq.qbuf_dma_addr + (cq->depth << CQE_SHIFT);
+ req.cq_dbrec_dma = cq->kern_cq.dbrec_dma;
} else {
mem = &cq->user_cq.qbuf_mem;
req.cfg0 |=
@@ -233,7 +230,7 @@ static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
mem->mtt_nents);
req.first_page_offset = mem->page_offset;
- req.cq_db_info_addr = cq->user_cq.db_info_dma_addr;
+ req.cq_dbrec_dma = cq->user_cq.dbrec_dma;
if (uctx->ext_db.enable) {
req.cfg1 |= FIELD_PREP(
@@ -482,16 +479,24 @@ static void free_kernel_qp(struct erdma_qp *qp)
vfree(qp->kern_qp.rwr_tbl);
if (qp->kern_qp.sq_buf)
- dma_free_coherent(
- &dev->pdev->dev,
- WARPPED_BUFSIZE(qp->attrs.sq_size << SQEBB_SHIFT),
- qp->kern_qp.sq_buf, qp->kern_qp.sq_buf_dma_addr);
+ dma_free_coherent(&dev->pdev->dev,
+ qp->attrs.sq_size << SQEBB_SHIFT,
+ qp->kern_qp.sq_buf,
+ qp->kern_qp.sq_buf_dma_addr);
+
+ if (qp->kern_qp.sq_dbrec)
+ dma_pool_free(dev->db_pool, qp->kern_qp.sq_dbrec,
+ qp->kern_qp.sq_dbrec_dma);
if (qp->kern_qp.rq_buf)
- dma_free_coherent(
- &dev->pdev->dev,
- WARPPED_BUFSIZE(qp->attrs.rq_size << RQE_SHIFT),
- qp->kern_qp.rq_buf, qp->kern_qp.rq_buf_dma_addr);
+ dma_free_coherent(&dev->pdev->dev,
+ qp->attrs.rq_size << RQE_SHIFT,
+ qp->kern_qp.rq_buf,
+ qp->kern_qp.rq_buf_dma_addr);
+
+ if (qp->kern_qp.rq_dbrec)
+ dma_pool_free(dev->db_pool, qp->kern_qp.rq_dbrec,
+ qp->kern_qp.rq_dbrec_dma);
}
static int init_kernel_qp(struct erdma_dev *dev, struct erdma_qp *qp,
@@ -516,20 +521,27 @@ static int init_kernel_qp(struct erdma_dev *dev, struct erdma_qp *qp,
if (!kqp->swr_tbl || !kqp->rwr_tbl)
goto err_out;
- size = (qp->attrs.sq_size << SQEBB_SHIFT) + ERDMA_EXTRA_BUFFER_SIZE;
+ size = qp->attrs.sq_size << SQEBB_SHIFT;
kqp->sq_buf = dma_alloc_coherent(&dev->pdev->dev, size,
&kqp->sq_buf_dma_addr, GFP_KERNEL);
if (!kqp->sq_buf)
goto err_out;
- size = (qp->attrs.rq_size << RQE_SHIFT) + ERDMA_EXTRA_BUFFER_SIZE;
+ kqp->sq_dbrec =
+ dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &kqp->sq_dbrec_dma);
+ if (!kqp->sq_dbrec)
+ goto err_out;
+
+ size = qp->attrs.rq_size << RQE_SHIFT;
kqp->rq_buf = dma_alloc_coherent(&dev->pdev->dev, size,
&kqp->rq_buf_dma_addr, GFP_KERNEL);
if (!kqp->rq_buf)
goto err_out;
- kqp->sq_db_info = kqp->sq_buf + (qp->attrs.sq_size << SQEBB_SHIFT);
- kqp->rq_db_info = kqp->rq_buf + (qp->attrs.rq_size << RQE_SHIFT);
+ kqp->rq_dbrec =
+ dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &kqp->rq_dbrec_dma);
+ if (!kqp->rq_dbrec)
+ goto err_out;
return 0;
@@ -864,9 +876,9 @@ erdma_unmap_user_dbrecords(struct erdma_ucontext *ctx,
}
static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
- u64 va, u32 len, u64 db_info_va)
+ u64 va, u32 len, u64 dbrec_va)
{
- dma_addr_t db_info_dma_addr;
+ dma_addr_t dbrec_dma;
u32 rq_offset;
int ret;
@@ -889,14 +901,14 @@ static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
if (ret)
goto put_sq_mtt;
- ret = erdma_map_user_dbrecords(uctx, db_info_va,
+ ret = erdma_map_user_dbrecords(uctx, dbrec_va,
&qp->user_qp.user_dbr_page,
- &db_info_dma_addr);
+ &dbrec_dma);
if (ret)
goto put_rq_mtt;
- qp->user_qp.sq_db_info_dma_addr = db_info_dma_addr;
- qp->user_qp.rq_db_info_dma_addr = db_info_dma_addr + ERDMA_DB_SIZE;
+ qp->user_qp.sq_dbrec_dma = dbrec_dma;
+ qp->user_qp.rq_dbrec_dma = dbrec_dma + ERDMA_DB_SIZE;
return 0;
@@ -1237,9 +1249,10 @@ int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
return err;
if (rdma_is_kernel_res(&cq->ibcq.res)) {
- dma_free_coherent(&dev->pdev->dev,
- WARPPED_BUFSIZE(cq->depth << CQE_SHIFT),
+ dma_free_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT,
cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
+ dma_pool_free(dev->db_pool, cq->kern_cq.dbrec,
+ cq->kern_cq.dbrec_dma);
} else {
erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
@@ -1279,16 +1292,7 @@ int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
wait_for_completion(&qp->safe_free);
if (rdma_is_kernel_res(&qp->ibqp.res)) {
- vfree(qp->kern_qp.swr_tbl);
- vfree(qp->kern_qp.rwr_tbl);
- dma_free_coherent(
- &dev->pdev->dev,
- WARPPED_BUFSIZE(qp->attrs.rq_size << RQE_SHIFT),
- qp->kern_qp.rq_buf, qp->kern_qp.rq_buf_dma_addr);
- dma_free_coherent(
- &dev->pdev->dev,
- WARPPED_BUFSIZE(qp->attrs.sq_size << SQEBB_SHIFT),
- qp->kern_qp.sq_buf, qp->kern_qp.sq_buf_dma_addr);
+ free_kernel_qp(qp);
} else {
put_mtt_entries(dev, &qp->user_qp.sq_mem);
put_mtt_entries(dev, &qp->user_qp.rq_mem);
@@ -1588,7 +1592,7 @@ static int erdma_init_user_cq(struct erdma_ucontext *ctx, struct erdma_cq *cq,
ret = erdma_map_user_dbrecords(ctx, ureq->db_record_va,
&cq->user_cq.user_dbr_page,
- &cq->user_cq.db_info_dma_addr);
+ &cq->user_cq.dbrec_dma);
if (ret)
put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
@@ -1600,19 +1604,27 @@ static int erdma_init_kernel_cq(struct erdma_cq *cq)
struct erdma_dev *dev = to_edev(cq->ibcq.device);
cq->kern_cq.qbuf =
- dma_alloc_coherent(&dev->pdev->dev,
- WARPPED_BUFSIZE(cq->depth << CQE_SHIFT),
+ dma_alloc_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT,
&cq->kern_cq.qbuf_dma_addr, GFP_KERNEL);
if (!cq->kern_cq.qbuf)
return -ENOMEM;
- cq->kern_cq.db_record =
- (u64 *)(cq->kern_cq.qbuf + (cq->depth << CQE_SHIFT));
+ cq->kern_cq.dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL,
+ &cq->kern_cq.dbrec_dma);
+ if (!cq->kern_cq.dbrec)
+ goto err_out;
+
spin_lock_init(&cq->kern_cq.lock);
/* use default cqdb addr */
cq->kern_cq.db = dev->func_bar + ERDMA_BAR_CQDB_SPACE_OFFSET;
return 0;
+
+err_out:
+ dma_free_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT,
+ cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
+
+ return -ENOMEM;
}
int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
@@ -1676,9 +1688,10 @@ err_free_res:
erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
} else {
- dma_free_coherent(&dev->pdev->dev,
- WARPPED_BUFSIZE(depth << CQE_SHIFT),
+ dma_free_coherent(&dev->pdev->dev, depth << CQE_SHIFT,
cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
+ dma_pool_free(dev->db_pool, cq->kern_cq.dbrec,
+ cq->kern_cq.dbrec_dma);
}
err_out_xa:
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
index db6018529ccc..4f02ba06b210 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.h
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
@@ -140,8 +140,8 @@ struct erdma_uqp {
struct erdma_mem sq_mem;
struct erdma_mem rq_mem;
- dma_addr_t sq_db_info_dma_addr;
- dma_addr_t rq_db_info_dma_addr;
+ dma_addr_t sq_dbrec_dma;
+ dma_addr_t rq_dbrec_dma;
struct erdma_user_dbrecords_page *user_dbr_page;
@@ -167,8 +167,11 @@ struct erdma_kqp {
void *rq_buf;
dma_addr_t rq_buf_dma_addr;
- void *sq_db_info;
- void *rq_db_info;
+ void *sq_dbrec;
+ void *rq_dbrec;
+
+ dma_addr_t sq_dbrec_dma;
+ dma_addr_t rq_dbrec_dma;
u8 sig_all;
};
@@ -246,13 +249,14 @@ struct erdma_kcq_info {
spinlock_t lock;
u8 __iomem *db;
- u64 *db_record;
+ u64 *dbrec;
+ dma_addr_t dbrec_dma;
};
struct erdma_ucq_info {
struct erdma_mem qbuf_mem;
struct erdma_user_dbrecords_page *user_dbr_page;
- dma_addr_t db_info_dma_addr;
+ dma_addr_t dbrec_dma;
};
struct erdma_cq {
diff --git a/drivers/infiniband/hw/hfi1/ipoib_main.c b/drivers/infiniband/hw/hfi1/ipoib_main.c
index 5d814afdf7f3..7c9d5203002b 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_main.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_main.c
@@ -21,36 +21,25 @@ static int hfi1_ipoib_dev_init(struct net_device *dev)
struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
int ret;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
ret = priv->netdev_ops->ndo_init(dev);
if (ret)
- goto out_ret;
+ return ret;
ret = hfi1_netdev_add_data(priv->dd,
qpn_from_mac(priv->netdev->dev_addr),
dev);
if (ret < 0) {
priv->netdev_ops->ndo_uninit(dev);
- goto out_ret;
+ return ret;
}
return 0;
-out_ret:
- free_percpu(dev->tstats);
- dev->tstats = NULL;
- return ret;
}
static void hfi1_ipoib_dev_uninit(struct net_device *dev)
{
struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
- free_percpu(dev->tstats);
- dev->tstats = NULL;
-
hfi1_netdev_remove_data(priv->dd, qpn_from_mac(priv->netdev->dev_addr));
priv->netdev_ops->ndo_uninit(dev);
@@ -107,7 +96,6 @@ static const struct net_device_ops hfi1_ipoib_netdev_ops = {
.ndo_uninit = hfi1_ipoib_dev_uninit,
.ndo_open = hfi1_ipoib_dev_open,
.ndo_stop = hfi1_ipoib_dev_stop,
- .ndo_get_stats64 = dev_get_tstats64,
};
static int hfi1_ipoib_mcast_attach(struct net_device *dev,
@@ -173,9 +161,6 @@ static void hfi1_ipoib_netdev_dtor(struct net_device *dev)
hfi1_ipoib_txreq_deinit(priv);
hfi1_ipoib_rxq_deinit(priv->netdev);
-
- free_percpu(dev->tstats);
- dev->tstats = NULL;
}
static void hfi1_ipoib_set_id(struct net_device *dev, int id)
@@ -234,6 +219,7 @@ static int hfi1_ipoib_setup_rn(struct ib_device *device,
netdev->priv_destructor = hfi1_ipoib_netdev_dtor;
netdev->needs_free_netdev = true;
+ netdev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
return 0;
}
diff --git a/drivers/infiniband/hw/hfi1/netdev.h b/drivers/infiniband/hw/hfi1/netdev.h
index 8aa074670a9c..07c8f77c9181 100644
--- a/drivers/infiniband/hw/hfi1/netdev.h
+++ b/drivers/infiniband/hw/hfi1/netdev.h
@@ -49,7 +49,7 @@ struct hfi1_netdev_rxq {
* When 0 receive queues will be freed.
*/
struct hfi1_netdev_rx {
- struct net_device rx_napi;
+ struct net_device *rx_napi;
struct hfi1_devdata *dd;
struct hfi1_netdev_rxq *rxq;
int num_rx_q;
diff --git a/drivers/infiniband/hw/hfi1/netdev_rx.c b/drivers/infiniband/hw/hfi1/netdev_rx.c
index 720d4c85c9c9..8608044203bb 100644
--- a/drivers/infiniband/hw/hfi1/netdev_rx.c
+++ b/drivers/infiniband/hw/hfi1/netdev_rx.c
@@ -188,7 +188,7 @@ static int hfi1_netdev_rxq_init(struct hfi1_netdev_rx *rx)
int i;
int rc;
struct hfi1_devdata *dd = rx->dd;
- struct net_device *dev = &rx->rx_napi;
+ struct net_device *dev = rx->rx_napi;
rx->num_rx_q = dd->num_netdev_contexts;
rx->rxq = kcalloc_node(rx->num_rx_q, sizeof(*rx->rxq),
@@ -360,7 +360,11 @@ int hfi1_alloc_rx(struct hfi1_devdata *dd)
if (!rx)
return -ENOMEM;
rx->dd = dd;
- init_dummy_netdev(&rx->rx_napi);
+ rx->rx_napi = alloc_netdev_dummy(0);
+ if (!rx->rx_napi) {
+ kfree(rx);
+ return -ENOMEM;
+ }
xa_init(&rx->dev_tbl);
atomic_set(&rx->enabled, 0);
@@ -374,6 +378,7 @@ void hfi1_free_rx(struct hfi1_devdata *dd)
{
if (dd->netdev_rx) {
dd_dev_info(dd, "hfi1 rx freed\n");
+ free_netdev(dd->netdev_rx->rx_napi);
kfree(dd->netdev_rx);
dd->netdev_rx = NULL;
}
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 119ec2f1382b..7133964749f8 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -1207,14 +1207,11 @@ retry:
(u32)lnkctl2);
/* only write to parent if target is not as high as ours */
if ((lnkctl2 & PCI_EXP_LNKCTL2_TLS) < target_vector) {
- lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS;
- lnkctl2 |= target_vector;
- dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
- (u32)lnkctl2);
- ret = pcie_capability_write_word(parent,
- PCI_EXP_LNKCTL2, lnkctl2);
+ ret = pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_TLS,
+ target_vector);
if (ret) {
- dd_dev_err(dd, "Unable to write to PCI config\n");
+ dd_dev_err(dd, "Unable to change parent PCI target speed\n");
return_error = 1;
goto done;
}
@@ -1223,22 +1220,11 @@ retry:
}
dd_dev_info(dd, "%s: setting target link speed\n", __func__);
- ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL2, &lnkctl2);
+ ret = pcie_capability_clear_and_set_word(dd->pcidev, PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_TLS,
+ target_vector);
if (ret) {
- dd_dev_err(dd, "Unable to read from PCI config\n");
- return_error = 1;
- goto done;
- }
-
- dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
- (u32)lnkctl2);
- lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS;
- lnkctl2 |= target_vector;
- dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
- (u32)lnkctl2);
- ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL2, lnkctl2);
- if (ret) {
- dd_dev_err(dd, "Unable to write to PCI config\n");
+ dd_dev_err(dd, "Unable to change device PCI target speed\n");
return_error = 1;
goto done;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index b4209b6aed8d..3e02c474f59f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -59,8 +59,10 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device);
struct hns_roce_ib_create_ah_resp resp = {};
struct hns_roce_ah *ah = to_hr_ah(ibah);
- int ret = 0;
- u32 max_sl;
+ u8 tclass = get_tclass(grh);
+ u8 priority = 0;
+ u8 tc_mode = 0;
+ int ret;
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata)
return -EOPNOTSUPP;
@@ -74,16 +76,23 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
ah->av.hop_limit = grh->hop_limit;
ah->av.flowlabel = grh->flow_label;
ah->av.udp_sport = get_ah_udp_sport(ah_attr);
- ah->av.tclass = get_tclass(grh);
-
- ah->av.sl = rdma_ah_get_sl(ah_attr);
- max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
- if (unlikely(ah->av.sl > max_sl)) {
- ibdev_err_ratelimited(&hr_dev->ib_dev,
- "failed to set sl, sl (%u) shouldn't be larger than %u.\n",
- ah->av.sl, max_sl);
+ ah->av.tclass = tclass;
+
+ ret = hr_dev->hw->get_dscp(hr_dev, tclass, &tc_mode, &priority);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+
+ if (ret && grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+ return ret;
+
+ if (tc_mode == HNAE3_TC_MAP_MODE_DSCP &&
+ grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+ ah->av.sl = priority;
+ else
+ ah->av.sl = rdma_ah_get_sl(ah_attr);
+
+ if (!check_sl_valid(hr_dev, ah->av.sl))
return -EINVAL;
- }
memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
@@ -99,6 +108,8 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
}
if (udata) {
+ resp.priority = ah->av.sl;
+ resp.tc_mode = tc_mode;
memcpy(resp.dmac, ah_attr->roce.dmac, ETH_ALEN);
ret = ib_copy_to_udata(udata, &resp,
min(udata->outlen, sizeof(resp)));
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index 11a78ceae568..950c133d4220 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -153,8 +153,7 @@ int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
return total;
}
-int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
- int buf_cnt, struct ib_umem *umem,
+int hns_roce_get_umem_bufs(dma_addr_t *bufs, int buf_cnt, struct ib_umem *umem,
unsigned int page_shift)
{
struct ib_block_iter biter;
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 7250d0643b5c..56dc3908da2f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -149,7 +149,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
return ret;
}
- ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
+ ret = xa_err(xa_store_irq(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
if (ret) {
ibdev_err(ibdev, "failed to xa_store CQ, ret = %d.\n", ret);
goto err_put;
@@ -163,7 +163,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
return 0;
err_xa:
- xa_erase(&cq_table->array, hr_cq->cqn);
+ xa_erase_irq(&cq_table->array, hr_cq->cqn);
err_put:
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
@@ -182,7 +182,7 @@ static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret,
hr_cq->cqn);
- xa_erase(&cq_table->array, hr_cq->cqn);
+ xa_erase_irq(&cq_table->array, hr_cq->cqn);
/* Waiting interrupt process procedure carried out */
synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
@@ -476,13 +476,6 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
struct ib_event event;
struct ib_cq *ibcq;
- hr_cq = xa_load(&hr_dev->cq_table.array,
- cqn & (hr_dev->caps.num_cqs - 1));
- if (!hr_cq) {
- dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
- return;
- }
-
if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
@@ -491,7 +484,16 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
return;
}
- refcount_inc(&hr_cq->refcount);
+ xa_lock(&hr_dev->cq_table.array);
+ hr_cq = xa_load(&hr_dev->cq_table.array,
+ cqn & (hr_dev->caps.num_cqs - 1));
+ if (hr_cq)
+ refcount_inc(&hr_cq->refcount);
+ xa_unlock(&hr_dev->cq_table.array);
+ if (!hr_cq) {
+ dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
+ return;
+ }
ibcq = &hr_cq->ib_cq;
if (ibcq->event_handler) {
@@ -534,4 +536,5 @@ void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++)
ida_destroy(&hr_dev->cq_table.bank[i].ida);
+ mutex_destroy(&hr_dev->cq_table.bank_mutex);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index c3cbd0a494bf..ff0b3f68ee3a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -100,6 +100,9 @@
#define CQ_BANKID_SHIFT 2
#define CQ_BANKID_MASK GENMASK(1, 0)
+#define HNS_ROCE_MAX_CQ_COUNT 0xFFFF
+#define HNS_ROCE_MAX_CQ_PERIOD 0xFFFF
+
enum {
SERV_TYPE_RC,
SERV_TYPE_UC,
@@ -645,6 +648,8 @@ struct hns_roce_qp {
struct hns_user_mmap_entry *dwqe_mmap_entry;
u32 config;
enum hns_roce_cong_type cong_type;
+ u8 tc_mode;
+ u8 priority;
};
struct hns_roce_ib_iboe {
@@ -923,8 +928,7 @@ struct hns_roce_hw {
int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr, int flags,
void *mb_buf);
- int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
- struct hns_roce_mr *mr);
+ int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr);
int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
@@ -950,6 +954,8 @@ struct hns_roce_hw {
int (*query_sccc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
int (*query_hw_counter)(struct hns_roce_dev *hr_dev,
u64 *stats, u32 port, int *hw_counters);
+ int (*get_dscp)(struct hns_roce_dev *hr_dev, u8 dscp,
+ u8 *tc_mode, u8 *priority);
const struct ib_device_ops *hns_roce_dev_ops;
const struct ib_device_ops *hns_roce_dev_srq_ops;
};
@@ -1228,7 +1234,7 @@ struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
int buf_cnt, struct hns_roce_buf *buf,
unsigned int page_shift);
-int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
+int hns_roce_get_umem_bufs(dma_addr_t *bufs,
int buf_cnt, struct ib_umem *umem,
unsigned int page_shift);
@@ -1292,4 +1298,6 @@ struct hns_user_mmap_entry *
hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
size_t length,
enum hns_roce_mmap_type mmap_type);
+bool check_sl_valid(struct hns_roce_dev *hr_dev, u8 sl);
+
#endif /* _HNS_ROCE_DEVICE_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index a4b3f19161dc..02baa853a76c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -281,7 +281,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
return hem;
fail:
- hns_roce_free_hem(hr_dev, hem);
+ kfree(hem);
return NULL;
}
@@ -877,6 +877,7 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
hns_roce_cleanup_mhop_hem_table(hr_dev, table);
+ mutex_destroy(&table->mutex);
return;
}
@@ -891,6 +892,7 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
hns_roce_free_hem(hr_dev, table->hem[i]);
}
+ mutex_destroy(&table->mutex);
kfree(table->hem);
}
@@ -986,15 +988,13 @@ static void hem_list_free_all(struct hns_roce_dev *hr_dev,
}
}
-static void hem_list_link_bt(struct hns_roce_dev *hr_dev, void *base_addr,
- u64 table_addr)
+static void hem_list_link_bt(void *base_addr, u64 table_addr)
{
*(u64 *)(base_addr) = table_addr;
}
/* assign L0 table address to hem from root bt */
-static void hem_list_assign_bt(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_item *hem, void *cpu_addr,
+static void hem_list_assign_bt(struct hns_roce_hem_item *hem, void *cpu_addr,
u64 phy_addr)
{
hem->addr = cpu_addr;
@@ -1163,8 +1163,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
if (level > 1) {
pre = hem_ptrs[level - 1];
step = (cur->start - pre->start) / step * BA_BYTE_LEN;
- hem_list_link_bt(hr_dev, pre->addr + step,
- cur->dma_addr);
+ hem_list_link_bt(pre->addr + step, cur->dma_addr);
}
}
@@ -1222,7 +1221,7 @@ static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
if (!hem)
return -ENOMEM;
- hem_list_assign_bt(hr_dev, hem, cpu_base, phy_base);
+ hem_list_assign_bt(hem, cpu_base, phy_base);
list_add(&hem->list, branch_head);
list_add(&hem->sibling, leaf_head);
@@ -1245,7 +1244,7 @@ static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
/* if exist mid bt, link L1 to L0 */
list_for_each_entry_safe(hem, temp_hem, branch_head, list) {
offset = (hem->start - r->offset) / step * BA_BYTE_LEN;
- hem_list_link_bt(hr_dev, cpu_base + offset, hem->dma_addr);
+ hem_list_link_bt(cpu_base + offset, hem->dma_addr);
total++;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
index 6fb51db9682b..9c415b2541af 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -57,16 +57,16 @@ enum {
};
#define check_whether_bt_num_3(type, hop_num) \
- (type < HEM_TYPE_MTT && hop_num == 2)
+ ((type) < HEM_TYPE_MTT && (hop_num) == 2)
#define check_whether_bt_num_2(type, hop_num) \
- ((type < HEM_TYPE_MTT && hop_num == 1) || \
- (type >= HEM_TYPE_MTT && hop_num == 2))
+ (((type) < HEM_TYPE_MTT && (hop_num) == 1) || \
+ ((type) >= HEM_TYPE_MTT && (hop_num) == 2))
#define check_whether_bt_num_1(type, hop_num) \
- ((type < HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0) || \
- (type >= HEM_TYPE_MTT && hop_num == 1) || \
- (type >= HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0))
+ (((type) < HEM_TYPE_MTT && (hop_num) == HNS_ROCE_HOP_NUM_0) || \
+ ((type) >= HEM_TYPE_MTT && (hop_num) == 1) || \
+ ((type) >= HEM_TYPE_MTT && (hop_num) == HNS_ROCE_HOP_NUM_0))
struct hns_roce_hem {
void *buf;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index ba7ae792d279..4287818a737f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -443,10 +443,6 @@ static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_HOPLIMIT, ah->av.hop_limit);
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_TCLASS, ah->av.tclass);
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_FLOW_LABEL, ah->av.flowlabel);
-
- if (WARN_ON(ah->av.sl > MAX_SERVICE_LEVEL))
- return -EINVAL;
-
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SL, ah->av.sl);
ud_sq_wqe->sgid_index = ah->av.gid_index;
@@ -2105,7 +2101,7 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
caps->gmv_bt_num *
(HNS_HW_PAGE_SIZE / caps->gmv_entry_sz));
- caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
+ caps->gmv_entry_num = caps->gmv_bt_num * (HNS_HW_PAGE_SIZE /
caps->gmv_entry_sz);
} else {
u32 func_num = max_t(u32, 1, hr_dev->func_num);
@@ -2671,6 +2667,8 @@ static void free_mr_exit(struct hns_roce_dev *hr_dev)
kfree(free_mr->rsv_pd);
free_mr->rsv_pd = NULL;
}
+
+ mutex_destroy(&free_mr->mutex);
}
static int free_mr_alloc_res(struct hns_roce_dev *hr_dev)
@@ -2821,8 +2819,10 @@ static int free_mr_init(struct hns_roce_dev *hr_dev)
mutex_init(&free_mr->mutex);
ret = free_mr_alloc_res(hr_dev);
- if (ret)
+ if (ret) {
+ mutex_destroy(&free_mr->mutex);
return ret;
+ }
ret = free_mr_modify_qp(hr_dev);
if (ret)
@@ -3208,13 +3208,14 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
/* Aligned to the hardware address access unit */
for (i = 0; i < ARRAY_SIZE(pages); i++)
- pages[i] >>= 6;
+ pages[i] >>= MPT_PBL_BUF_ADDR_S;
pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr);
mpt_entry->pbl_size = cpu_to_le32(mr->npages);
- mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3);
- hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3));
+ mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> MPT_PBL_BA_ADDR_S);
+ hr_reg_write(mpt_entry, MPT_PBL_BA_H,
+ upper_32_bits(pbl_ba >> MPT_PBL_BA_ADDR_S));
mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
hr_reg_write(mpt_entry, MPT_PA0_H, upper_32_bits(pages[0]));
@@ -3307,8 +3308,7 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
return ret;
}
-static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
- void *mb_buf, struct hns_roce_mr *mr)
+static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
{
dma_addr_t pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr);
struct hns_roce_v2_mpt_entry *mpt_entry;
@@ -3335,8 +3335,10 @@ static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
mpt_entry->pbl_size = cpu_to_le32(mr->npages);
- mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >> 3));
- hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3));
+ mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >>
+ MPT_PBL_BA_ADDR_S));
+ hr_reg_write(mpt_entry, MPT_PBL_BA_H,
+ upper_32_bits(pbl_ba >> MPT_PBL_BA_ADDR_S));
return 0;
}
@@ -3582,14 +3584,14 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift));
hr_reg_write(cq_context, CQC_CQE_BUF_PG_SZ,
to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift));
- hr_reg_write(cq_context, CQC_CQE_BA_L, dma_handle >> 3);
- hr_reg_write(cq_context, CQC_CQE_BA_H, (dma_handle >> (32 + 3)));
+ hr_reg_write(cq_context, CQC_CQE_BA_L, dma_handle >> CQC_CQE_BA_L_S);
+ hr_reg_write(cq_context, CQC_CQE_BA_H, dma_handle >> CQC_CQE_BA_H_S);
hr_reg_write_bool(cq_context, CQC_DB_RECORD_EN,
hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB);
hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_L,
((u32)hr_cq->db.dma) >> 1);
hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_H,
- hr_cq->db.dma >> 32);
+ hr_cq->db.dma >> CQC_CQE_DB_RECORD_ADDR_H_S);
hr_reg_write(cq_context, CQC_CQ_MAX_CNT,
HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
hr_reg_write(cq_context, CQC_CQ_PERIOD,
@@ -3711,8 +3713,9 @@ static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
wc->status == IB_WC_WR_FLUSH_ERR))
return;
- ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status);
- print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe,
+ ibdev_err_ratelimited(&hr_dev->ib_dev, "error cqe status 0x%x:\n",
+ cqe_status);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 16, 4, cqe,
cq->cqe_size, false);
wc->vendor_err = hr_reg_read(cqe, CQE_SUB_STATUS);
@@ -4217,8 +4220,7 @@ static void set_access_flags(struct hns_roce_qp *hr_qp,
}
static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
- struct hns_roce_v2_qp_context *context,
- struct hns_roce_v2_qp_context *qpc_mask)
+ struct hns_roce_v2_qp_context *context)
{
hr_reg_write(context, QPC_SGE_SHIFT,
to_hr_hem_entries_shift(hr_qp->sge.sge_cnt,
@@ -4240,7 +4242,6 @@ static inline int get_pdn(struct ib_pd *ib_pd)
}
static void modify_qp_reset_to_init(struct ib_qp *ibqp,
- const struct ib_qp_attr *attr,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
@@ -4259,7 +4260,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
hr_reg_write(context, QPC_RQWS, ilog2(hr_qp->rq.max_gs));
- set_qpc_wqe_cnt(hr_qp, context, qpc_mask);
+ set_qpc_wqe_cnt(hr_qp, context);
/* No VLAN need to set 0xFFF */
hr_reg_write(context, QPC_VLAN_ID, 0xfff);
@@ -4300,7 +4301,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
}
static void modify_qp_init_to_init(struct ib_qp *ibqp,
- const struct ib_qp_attr *attr,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
@@ -4521,16 +4521,16 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
return -EINVAL;
}
- hr_reg_write(context, QPC_TRRL_BA_L, trrl_ba >> 4);
+ hr_reg_write(context, QPC_TRRL_BA_L, trrl_ba >> QPC_TRRL_BA_L_S);
hr_reg_clear(qpc_mask, QPC_TRRL_BA_L);
- context->trrl_ba = cpu_to_le32(trrl_ba >> (16 + 4));
+ context->trrl_ba = cpu_to_le32(trrl_ba >> QPC_TRRL_BA_M_S);
qpc_mask->trrl_ba = 0;
- hr_reg_write(context, QPC_TRRL_BA_H, trrl_ba >> (32 + 16 + 4));
+ hr_reg_write(context, QPC_TRRL_BA_H, trrl_ba >> QPC_TRRL_BA_H_S);
hr_reg_clear(qpc_mask, QPC_TRRL_BA_H);
- context->irrl_ba = cpu_to_le32(irrl_ba >> 6);
+ context->irrl_ba = cpu_to_le32(irrl_ba >> QPC_IRRL_BA_L_S);
qpc_mask->irrl_ba = 0;
- hr_reg_write(context, QPC_IRRL_BA_H, irrl_ba >> (32 + 6));
+ hr_reg_write(context, QPC_IRRL_BA_H, irrl_ba >> QPC_IRRL_BA_H_S);
hr_reg_clear(qpc_mask, QPC_IRRL_BA_H);
hr_reg_enable(context, QPC_RMT_E2E);
@@ -4592,8 +4592,9 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
hr_reg_clear(qpc_mask, QPC_TRRL_HEAD_MAX);
hr_reg_clear(qpc_mask, QPC_TRRL_TAIL_MAX);
+#define MAX_LP_SGEN 3
/* rocee send 2^lp_sgen_ini segs every time */
- hr_reg_write(context, QPC_LP_SGEN_INI, 3);
+ hr_reg_write(context, QPC_LP_SGEN_INI, MAX_LP_SGEN);
hr_reg_clear(qpc_mask, QPC_LP_SGEN_INI);
if (udata && ibqp->qp_type == IB_QPT_RC &&
@@ -4619,8 +4620,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
return 0;
}
-static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
- const struct ib_qp_attr *attr, int attr_mask,
+static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, int attr_mask,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
@@ -4685,7 +4685,7 @@ static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
*tail = (*tail == hr_dev->caps.num_qps - 1) ? 0 : (*tail + 1);
list_for_each_entry(hr_dip, &hr_dev->dip_list, node) {
- if (!memcmp(grh->dgid.raw, hr_dip->dgid, 16)) {
+ if (!memcmp(grh->dgid.raw, hr_dip->dgid, GID_LEN_V2)) {
*dip_idx = hr_dip->dip_idx;
goto out;
}
@@ -4828,6 +4828,69 @@ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
return 0;
}
+static int hns_roce_hw_v2_get_dscp(struct hns_roce_dev *hr_dev, u8 dscp,
+ u8 *tc_mode, u8 *priority)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hnae3_handle *handle = priv->handle;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (!ops->get_dscp_prio)
+ return -EOPNOTSUPP;
+
+ return ops->get_dscp_prio(handle, dscp, tc_mode, priority);
+}
+
+bool check_sl_valid(struct hns_roce_dev *hr_dev, u8 sl)
+{
+ u32 max_sl;
+
+ max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
+ if (unlikely(sl > max_sl)) {
+ ibdev_err_ratelimited(&hr_dev->ib_dev,
+ "failed to set SL(%u). Shouldn't be larger than %u.\n",
+ sl, max_sl);
+ return false;
+ }
+
+ return true;
+}
+
+static int hns_roce_set_sl(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int ret;
+
+ ret = hns_roce_hw_v2_get_dscp(hr_dev, get_tclass(&attr->ah_attr.grh),
+ &hr_qp->tc_mode, &hr_qp->priority);
+ if (ret && ret != -EOPNOTSUPP &&
+ grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
+ ibdev_err_ratelimited(ibdev,
+ "failed to get dscp, ret = %d.\n", ret);
+ return ret;
+ }
+
+ if (hr_qp->tc_mode == HNAE3_TC_MAP_MODE_DSCP &&
+ grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+ hr_qp->sl = hr_qp->priority;
+ else
+ hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+
+ if (!check_sl_valid(hr_dev, hr_qp->sl))
+ return -EINVAL;
+
+ hr_reg_write(context, QPC_SL, hr_qp->sl);
+ hr_reg_clear(qpc_mask, QPC_SL);
+
+ return 0;
+}
+
static int hns_roce_v2_set_path(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask,
@@ -4843,25 +4906,18 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
int is_roce_protocol;
u16 vlan_id = 0xffff;
bool is_udp = false;
- u32 max_sl;
u8 ib_port;
u8 hr_port;
int ret;
- max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
- if (unlikely(sl > max_sl)) {
- ibdev_err_ratelimited(ibdev,
- "failed to fill QPC, sl (%u) shouldn't be larger than %u.\n",
- sl, max_sl);
- return -EINVAL;
- }
-
/*
* If free_mr_en of qp is set, it means that this qp comes from
* free mr. This qp will perform the loopback operation.
* In the loopback scenario, only sl needs to be set.
*/
if (hr_qp->free_mr_en) {
+ if (!check_sl_valid(hr_dev, sl))
+ return -EINVAL;
hr_reg_write(context, QPC_SL, sl);
hr_reg_clear(qpc_mask, QPC_SL);
hr_qp->sl = sl;
@@ -4931,11 +4987,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
- hr_qp->sl = sl;
- hr_reg_write(context, QPC_SL, hr_qp->sl);
- hr_reg_clear(qpc_mask, QPC_SL);
-
- return 0;
+ return hns_roce_set_sl(ibqp, attr, context, qpc_mask);
}
static bool check_qp_state(enum ib_qp_state cur_state,
@@ -4982,15 +5034,14 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
- modify_qp_reset_to_init(ibqp, attr, context, qpc_mask);
+ modify_qp_reset_to_init(ibqp, context, qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
- modify_qp_init_to_init(ibqp, attr, context, qpc_mask);
+ modify_qp_init_to_init(ibqp, context, qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
qpc_mask, udata);
} else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
- ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
- qpc_mask);
+ ret = modify_qp_rtr_to_rts(ibqp, attr_mask, context, qpc_mask);
}
return ret;
@@ -5802,7 +5853,7 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
dev_info(hr_dev->dev,
"cq_period(%u) reached the upper limit, adjusted to 65.\n",
cq_period);
- cq_period = HNS_ROCE_MAX_CQ_PERIOD;
+ cq_period = HNS_ROCE_MAX_CQ_PERIOD_HIP08;
}
cq_period *= HNS_ROCE_CLOCK_ADJUST;
}
@@ -6735,6 +6786,7 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.query_srqc = hns_roce_v2_query_srqc,
.query_sccc = hns_roce_v2_query_sccc,
.query_hw_counter = hns_roce_hw_v2_query_counter,
+ .get_dscp = hns_roce_hw_v2_get_dscp,
.hns_roce_dev_ops = &hns_roce_v2_dev_ops,
.hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
};
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index df04bc8ede57..def1d15a03c7 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -276,6 +276,10 @@ struct hns_roce_v2_cq_context {
__le32 byte_64_se_cqe_idx;
};
+#define CQC_CQE_BA_L_S 3
+#define CQC_CQE_BA_H_S (32 + CQC_CQE_BA_L_S)
+#define CQC_CQE_DB_RECORD_ADDR_H_S 32
+
#define HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM 0x0
#define HNS_ROCE_V2_CQ_DEFAULT_INTERVAL 0x0
@@ -447,6 +451,12 @@ struct hns_roce_v2_qp_context {
struct hns_roce_v2_qp_context_ex ext;
};
+#define QPC_TRRL_BA_L_S 4
+#define QPC_TRRL_BA_M_S (16 + QPC_TRRL_BA_L_S)
+#define QPC_TRRL_BA_H_S (32 + QPC_TRRL_BA_M_S)
+#define QPC_IRRL_BA_L_S 6
+#define QPC_IRRL_BA_H_S (32 + QPC_IRRL_BA_L_S)
+
#define QPC_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_qp_context, h, l)
#define QPC_TST QPC_FIELD_LOC(2, 0)
@@ -716,6 +726,9 @@ struct hns_roce_v2_mpt_entry {
__le32 byte_64_buf_pa1;
};
+#define MPT_PBL_BUF_ADDR_S 6
+#define MPT_PBL_BA_ADDR_S 3
+
#define MPT_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_mpt_entry, h, l)
#define MPT_ST MPT_FIELD_LOC(1, 0)
@@ -1334,7 +1347,7 @@ struct fmea_ram_ecc {
/* only for RNR timeout issue of HIP08 */
#define HNS_ROCE_CLOCK_ADJUST 1000
-#define HNS_ROCE_MAX_CQ_PERIOD 65
+#define HNS_ROCE_MAX_CQ_PERIOD_HIP08 65
#define HNS_ROCE_MAX_EQ_PERIOD 65
#define HNS_ROCE_RNR_TIMER_10NS 1
#define HNS_ROCE_1US_CFG 999
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 1dc60c2b2b7a..4cb0af733587 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -37,9 +37,11 @@
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_cache.h>
+#include "hnae3.h"
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
+#include "hns_roce_hw_v2.h"
static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port,
const u8 *addr)
@@ -192,6 +194,12 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
IB_ATOMIC_HCA : IB_ATOMIC_NONE;
props->max_pkeys = 1;
props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay;
+ props->max_ah = INT_MAX;
+ props->cq_caps.max_cq_moderation_period = HNS_ROCE_MAX_CQ_PERIOD;
+ props->cq_caps.max_cq_moderation_count = HNS_ROCE_MAX_CQ_COUNT;
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
+ props->cq_caps.max_cq_moderation_period = HNS_ROCE_MAX_CQ_PERIOD_HIP08;
+
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
props->max_srq = hr_dev->caps.num_srqs;
props->max_srq_wr = hr_dev->caps.max_srq_wrs;
@@ -421,6 +429,9 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
return 0;
error_fail_copy_to_udata:
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
+ hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
+ mutex_destroy(&context->page_mutex);
hns_roce_dealloc_uar_entry(context);
error_fail_uar_entry:
@@ -437,6 +448,10 @@ static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
+ hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
+ mutex_destroy(&context->page_mutex);
+
hns_roce_dealloc_uar_entry(context);
ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
@@ -925,6 +940,15 @@ err_unmap_dmpt:
return ret;
}
+static void hns_roce_teardown_hca(struct hns_roce_dev *hr_dev)
+{
+ hns_roce_cleanup_bitmap(hr_dev);
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
+ hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
+ mutex_destroy(&hr_dev->pgdir_mutex);
+}
+
/**
* hns_roce_setup_hca - setup host channel adapter
* @hr_dev: pointer to hns roce device
@@ -973,6 +997,10 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
err_uar_table_free:
ida_destroy(&hr_dev->uar_ida.ida);
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
+ hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
+ mutex_destroy(&hr_dev->pgdir_mutex);
+
return ret;
}
@@ -1118,7 +1146,7 @@ error_failed_register_device:
hr_dev->hw->hw_exit(hr_dev);
error_failed_engine_init:
- hns_roce_cleanup_bitmap(hr_dev);
+ hns_roce_teardown_hca(hr_dev);
error_failed_setup_hca:
hns_roce_cleanup_hem(hr_dev);
@@ -1148,7 +1176,7 @@ void hns_roce_exit(struct hns_roce_dev *hr_dev)
if (hr_dev->hw->hw_exit)
hr_dev->hw->hw_exit(hr_dev);
- hns_roce_cleanup_bitmap(hr_dev);
+ hns_roce_teardown_hca(hr_dev);
hns_roce_cleanup_hem(hr_dev);
if (hr_dev->cmd_mod)
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 9e05b57a2d67..1a61dceb3319 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -162,7 +162,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
if (mr->type != MR_TYPE_FRMR)
ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr);
else
- ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
+ ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr);
if (ret) {
dev_err(dev, "failed to write mtpt, ret = %d.\n", ret);
goto err_page;
@@ -441,18 +441,18 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_mr *mr = to_hr_mr(ibmr);
struct hns_roce_mtr *mtr = &mr->pbl_mtr;
- int ret = 0;
+ int ret, sg_num = 0;
mr->npages = 0;
mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
sizeof(dma_addr_t), GFP_KERNEL);
if (!mr->page_list)
- return ret;
+ return sg_num;
- ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
- if (ret < 1) {
+ sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
+ if (sg_num < 1) {
ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
- mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret);
+ mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num);
goto err_page_list;
}
@@ -463,17 +463,16 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
if (ret) {
ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
- ret = 0;
+ sg_num = 0;
} else {
mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
- ret = mr->npages;
}
err_page_list:
kvfree(mr->page_list);
mr->page_list = NULL;
- return ret;
+ return sg_num;
}
static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
@@ -756,7 +755,7 @@ static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
return -ENOMEM;
if (mtr->umem)
- npage = hns_roce_get_umem_bufs(hr_dev, pages, page_count,
+ npage = hns_roce_get_umem_bufs(pages, page_count,
mtr->umem, page_shift);
else
npage = hns_roce_get_kmem_bufs(hr_dev, pages, page_count,
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index f35a66325d9a..db34665d1dfb 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -410,7 +410,8 @@ static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
bankid = get_qp_bankid(hr_qp->qpn);
- ida_free(&hr_dev->qp_table.bank[bankid].ida, hr_qp->qpn >> 3);
+ ida_free(&hr_dev->qp_table.bank[bankid].ida,
+ hr_qp->qpn / HNS_ROCE_QP_BANK_NUM);
mutex_lock(&hr_dev->qp_table.bank_mutex);
hr_dev->qp_table.bank[bankid].inuse--;
@@ -1117,7 +1118,6 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
}
static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
- struct ib_pd *ib_pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata,
struct hns_roce_qp *hr_qp)
@@ -1140,7 +1140,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd);
if (ret) {
ibdev_err(ibdev, "failed to set QP param, ret = %d.\n", ret);
- return ret;
+ goto err_out;
}
if (!udata) {
@@ -1148,7 +1148,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
if (ret) {
ibdev_err(ibdev, "failed to alloc wrid, ret = %d.\n",
ret);
- return ret;
+ goto err_out;
}
}
@@ -1219,6 +1219,8 @@ err_qpn:
free_qp_buf(hr_dev, hr_qp);
err_buf:
free_kernel_wrid(hr_qp);
+err_out:
+ mutex_destroy(&hr_qp->mutex);
return ret;
}
@@ -1234,6 +1236,7 @@ void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
free_qp_buf(hr_dev, hr_qp);
free_kernel_wrid(hr_qp);
free_qp_db(hr_dev, hr_qp, udata);
+ mutex_destroy(&hr_qp->mutex);
}
static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type,
@@ -1271,7 +1274,6 @@ int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
struct ib_device *ibdev = qp->device;
struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
struct hns_roce_qp *hr_qp = to_hr_qp(qp);
- struct ib_pd *pd = qp->pd;
int ret;
ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata);
@@ -1286,7 +1288,7 @@ int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
}
- ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp);
+ ret = hns_roce_create_qp_common(hr_dev, init_attr, udata, hr_qp);
if (ret)
ibdev_err(ibdev, "create QP type 0x%x failed(%d)\n",
init_attr->qp_type, ret);
@@ -1386,6 +1388,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_ib_modify_qp_resp resp = {};
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
enum ib_qp_state cur_state, new_state;
int ret = -EINVAL;
@@ -1427,6 +1430,18 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
new_state, udata);
+ if (ret)
+ goto out;
+
+ if (udata && udata->outlen) {
+ resp.tc_mode = hr_qp->tc_mode;
+ resp.priority = hr_qp->sl;
+ ret = ib_copy_to_udata(udata, &resp,
+ min(udata->outlen, sizeof(resp)));
+ if (ret)
+ ibdev_err_ratelimited(&hr_dev->ib_dev,
+ "failed to copy modify qp resp.\n");
+ }
out:
mutex_unlock(&hr_qp->mutex);
@@ -1561,5 +1576,7 @@ void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++)
ida_destroy(&hr_dev->qp_table.bank[i].ida);
+ mutex_destroy(&hr_dev->qp_table.bank_mutex);
+ mutex_destroy(&hr_dev->qp_table.scc_mutex);
kfree(hr_dev->qp_table.idx_table.spare_idx);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 4abae9477854..f1997abc97ca 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -123,7 +123,7 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
return ret;
}
- ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
+ ret = xa_err(xa_store_irq(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
if (ret) {
ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret);
goto err_put;
@@ -136,7 +136,7 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
return 0;
err_xa:
- xa_erase(&srq_table->xa, srq->srqn);
+ xa_erase_irq(&srq_table->xa, srq->srqn);
err_put:
hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
@@ -154,7 +154,7 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n",
ret, srq->srqn);
- xa_erase(&srq_table->xa, srq->srqn);
+ xa_erase_irq(&srq_table->xa, srq->srqn);
if (refcount_dec_and_test(&srq->refcount))
complete(&srq->free);
@@ -250,7 +250,7 @@ static void free_srq_wqe_buf(struct hns_roce_dev *hr_dev,
hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr);
}
-static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+static int alloc_srq_wrid(struct hns_roce_srq *srq)
{
srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
if (!srq->wrid)
@@ -366,7 +366,7 @@ static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
goto err_idx;
if (!udata) {
- ret = alloc_srq_wrid(hr_dev, srq);
+ ret = alloc_srq_wrid(srq);
if (ret)
goto err_wqe_buf;
}
@@ -518,6 +518,7 @@ err_srq_db:
err_srq_buf:
free_srq_buf(hr_dev, srq);
err_out:
+ mutex_destroy(&srq->mutex);
atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_SRQ_CREATE_ERR_CNT]);
return ret;
@@ -532,6 +533,7 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
free_srqn(hr_dev, srq);
free_srq_db(hr_dev, srq, udata);
free_srq_buf(hr_dev, srq);
+ mutex_destroy(&srq->mutex);
return 0;
}
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
index 1ee7a4e0d8d8..36bb7e5ce638 100644
--- a/drivers/infiniband/hw/irdma/cm.c
+++ b/drivers/infiniband/hw/irdma/cm.c
@@ -1985,7 +1985,8 @@ static int irdma_addr_resolve_neigh(struct irdma_device *iwdev, u32 src_ip,
__be32 dst_ipaddr = htonl(dst_ip);
__be32 src_ipaddr = htonl(src_ip);
- rt = ip_route_output(&init_net, dst_ipaddr, src_ipaddr, 0, 0);
+ rt = ip_route_output(&init_net, dst_ipaddr, src_ipaddr, 0, 0,
+ RT_SCOPE_UNIVERSE);
if (IS_ERR(rt)) {
ibdev_dbg(&iwdev->ibdev, "CM: ip_route_output fail\n");
return -EINVAL;
diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index 4a71e678d09c..c6a3fd57a196 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -9,23 +9,23 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata)
{
struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
+ struct mana_ib_create_cq_resp resp = {};
+ struct mana_ib_ucontext *mana_ucontext;
struct ib_device *ibdev = ibcq->device;
struct mana_ib_create_cq ucmd = {};
struct mana_ib_dev *mdev;
- struct gdma_context *gc;
+ bool is_rnic_cq;
+ u32 doorbell;
int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- gc = mdev_to_gc(mdev);
- if (udata->inlen < sizeof(ucmd))
- return -EINVAL;
+ cq->comp_vector = attr->comp_vector % ibdev->num_comp_vectors;
+ cq->cq_handle = INVALID_MANA_HANDLE;
- if (attr->comp_vector > gc->max_num_queues)
+ if (udata->inlen < offsetof(struct mana_ib_create_cq, flags))
return -EINVAL;
- cq->comp_vector = attr->comp_vector;
-
err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
if (err) {
ibdev_dbg(ibdev,
@@ -33,42 +33,54 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
return err;
}
- if (attr->cqe > mdev->adapter_caps.max_qp_wr) {
+ is_rnic_cq = !!(ucmd.flags & MANA_IB_CREATE_RNIC_CQ);
+
+ if (!is_rnic_cq && attr->cqe > mdev->adapter_caps.max_qp_wr) {
ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe);
return -EINVAL;
}
cq->cqe = attr->cqe;
- cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE,
- IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(cq->umem)) {
- err = PTR_ERR(cq->umem);
- ibdev_dbg(ibdev, "Failed to get umem for create cq, err %d\n",
- err);
+ err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, &cq->queue);
+ if (err) {
+ ibdev_dbg(ibdev, "Failed to create queue for create cq, %d\n", err);
return err;
}
- err = mana_ib_create_zero_offset_dma_region(mdev, cq->umem, &cq->gdma_region);
- if (err) {
- ibdev_dbg(ibdev,
- "Failed to create dma region for create cq, %d\n",
- err);
- goto err_release_umem;
+ mana_ucontext = rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
+ ibucontext);
+ doorbell = mana_ucontext->doorbell;
+
+ if (is_rnic_cq) {
+ err = mana_ib_gd_create_cq(mdev, cq, doorbell);
+ if (err) {
+ ibdev_dbg(ibdev, "Failed to create RNIC cq, %d\n", err);
+ goto err_destroy_queue;
+ }
+
+ err = mana_ib_install_cq_cb(mdev, cq);
+ if (err) {
+ ibdev_dbg(ibdev, "Failed to install cq callback, %d\n", err);
+ goto err_destroy_rnic_cq;
+ }
}
- ibdev_dbg(ibdev,
- "create_dma_region ret %d gdma_region 0x%llx\n",
- err, cq->gdma_region);
-
- /*
- * The CQ ID is not known at this time. The ID is generated at create_qp
- */
- cq->id = INVALID_QUEUE_ID;
+ resp.cqid = cq->queue.id;
+ err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
+ if (err) {
+ ibdev_dbg(&mdev->ib_dev, "Failed to copy to udata, %d\n", err);
+ goto err_remove_cq_cb;
+ }
return 0;
-err_release_umem:
- ib_umem_release(cq->umem);
+err_remove_cq_cb:
+ mana_ib_remove_cq_cb(mdev, cq);
+err_destroy_rnic_cq:
+ mana_ib_gd_destroy_cq(mdev, cq);
+err_destroy_queue:
+ mana_ib_destroy_queue(mdev, &cq->queue);
+
return err;
}
@@ -77,25 +89,17 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
struct ib_device *ibdev = ibcq->device;
struct mana_ib_dev *mdev;
- struct gdma_context *gc;
- int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- gc = mdev_to_gc(mdev);
- err = mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region);
- if (err) {
- ibdev_dbg(ibdev,
- "Failed to destroy dma region, %d\n", err);
- return err;
- }
+ mana_ib_remove_cq_cb(mdev, cq);
- if (cq->id != INVALID_QUEUE_ID) {
- kfree(gc->cq_table[cq->id]);
- gc->cq_table[cq->id] = NULL;
- }
+ /* Ignore return code as there is not much we can do about it.
+ * The error message is printed inside.
+ */
+ mana_ib_gd_destroy_cq(mdev, cq);
- ib_umem_release(cq->umem);
+ mana_ib_destroy_queue(mdev, &cq->queue);
return 0;
}
@@ -113,8 +117,10 @@ int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
struct gdma_context *gc = mdev_to_gc(mdev);
struct gdma_queue *gdma_cq;
+ if (cq->queue.id >= gc->max_num_cqs)
+ return -EINVAL;
/* Create CQ table entry */
- WARN_ON(gc->cq_table[cq->id]);
+ WARN_ON(gc->cq_table[cq->queue.id]);
gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
if (!gdma_cq)
return -ENOMEM;
@@ -122,7 +128,18 @@ int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
gdma_cq->cq.context = cq;
gdma_cq->type = GDMA_CQ;
gdma_cq->cq.callback = mana_ib_cq_handler;
- gdma_cq->id = cq->id;
- gc->cq_table[cq->id] = gdma_cq;
+ gdma_cq->id = cq->queue.id;
+ gc->cq_table[cq->queue.id] = gdma_cq;
return 0;
}
+
+void mana_ib_remove_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
+{
+ struct gdma_context *gc = mdev_to_gc(mdev);
+
+ if (cq->queue.id >= gc->max_num_cqs || cq->queue.id == INVALID_QUEUE_ID)
+ return;
+
+ kfree(gc->cq_table[cq->queue.id]);
+ gc->cq_table[cq->queue.id] = NULL;
+}
diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c
index 6fa902ee80a6..7e09ceb3da53 100644
--- a/drivers/infiniband/hw/mana/device.c
+++ b/drivers/infiniband/hw/mana/device.c
@@ -15,6 +15,7 @@ static const struct ib_device_ops mana_ib_dev_ops = {
.driver_id = RDMA_DRIVER_MANA,
.uverbs_abi_ver = MANA_IB_UVERBS_ABI_VERSION,
+ .add_gid = mana_ib_gd_add_gid,
.alloc_pd = mana_ib_alloc_pd,
.alloc_ucontext = mana_ib_alloc_ucontext,
.create_cq = mana_ib_create_cq,
@@ -23,18 +24,21 @@ static const struct ib_device_ops mana_ib_dev_ops = {
.create_wq = mana_ib_create_wq,
.dealloc_pd = mana_ib_dealloc_pd,
.dealloc_ucontext = mana_ib_dealloc_ucontext,
+ .del_gid = mana_ib_gd_del_gid,
.dereg_mr = mana_ib_dereg_mr,
.destroy_cq = mana_ib_destroy_cq,
.destroy_qp = mana_ib_destroy_qp,
.destroy_rwq_ind_table = mana_ib_destroy_rwq_ind_table,
.destroy_wq = mana_ib_destroy_wq,
.disassociate_ucontext = mana_ib_disassociate_ucontext,
+ .get_link_layer = mana_ib_get_link_layer,
.get_port_immutable = mana_ib_get_port_immutable,
.mmap = mana_ib_mmap,
.modify_qp = mana_ib_modify_qp,
.modify_wq = mana_ib_modify_wq,
.query_device = mana_ib_query_device,
.query_gid = mana_ib_query_gid,
+ .query_pkey = mana_ib_query_pkey,
.query_port = mana_ib_query_port,
.reg_user_mr = mana_ib_reg_user_mr,
@@ -51,8 +55,10 @@ static int mana_ib_probe(struct auxiliary_device *adev,
{
struct mana_adev *madev = container_of(adev, struct mana_adev, adev);
struct gdma_dev *mdev = madev->mdev;
+ struct net_device *upper_ndev;
struct mana_context *mc;
struct mana_ib_dev *dev;
+ u8 mac_addr[ETH_ALEN];
int ret;
mc = mdev->driver_data;
@@ -74,9 +80,25 @@ static int mana_ib_probe(struct auxiliary_device *adev,
* num_comp_vectors needs to set to the max MSIX index
* when interrupts and event queues are implemented
*/
- dev->ib_dev.num_comp_vectors = 1;
+ dev->ib_dev.num_comp_vectors = mdev->gdma_context->max_num_queues;
dev->ib_dev.dev.parent = mdev->gdma_context->dev;
+ rcu_read_lock(); /* required to get upper dev */
+ upper_ndev = netdev_master_upper_dev_get_rcu(mc->ports[0]);
+ if (!upper_ndev) {
+ rcu_read_unlock();
+ ret = -ENODEV;
+ ibdev_err(&dev->ib_dev, "Failed to get master netdev");
+ goto free_ib_device;
+ }
+ ether_addr_copy(mac_addr, upper_ndev->dev_addr);
+ ret = ib_device_set_netdev(&dev->ib_dev, upper_ndev, 1);
+ rcu_read_unlock();
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
+ goto free_ib_device;
+ }
+
ret = mana_gd_register_device(&mdev->gdma_context->mana_ib);
if (ret) {
ibdev_err(&dev->ib_dev, "Failed to register device, ret %d",
@@ -92,15 +114,36 @@ static int mana_ib_probe(struct auxiliary_device *adev,
goto deregister_device;
}
+ ret = mana_ib_create_eqs(dev);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret);
+ goto deregister_device;
+ }
+
+ ret = mana_ib_gd_create_rnic_adapter(dev);
+ if (ret)
+ goto destroy_eqs;
+
+ ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d",
+ ret);
+ goto destroy_rnic;
+ }
+
ret = ib_register_device(&dev->ib_dev, "mana_%d",
mdev->gdma_context->dev);
if (ret)
- goto deregister_device;
+ goto destroy_rnic;
dev_set_drvdata(&adev->dev, dev);
return 0;
+destroy_rnic:
+ mana_ib_gd_destroy_rnic_adapter(dev);
+destroy_eqs:
+ mana_ib_destroy_eqs(dev);
deregister_device:
mana_gd_deregister_device(dev->gdma_dev);
free_ib_device:
@@ -113,9 +156,9 @@ static void mana_ib_remove(struct auxiliary_device *adev)
struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev);
ib_unregister_device(&dev->ib_dev);
-
+ mana_ib_gd_destroy_rnic_adapter(dev);
+ mana_ib_destroy_eqs(dev);
mana_gd_deregister_device(dev->gdma_dev);
-
ib_dealloc_device(&dev->ib_dev);
}
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 71e33feee61b..2a411357640e 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -237,6 +237,47 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret);
}
+int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
+ struct mana_ib_queue *queue)
+{
+ struct ib_umem *umem;
+ int err;
+
+ queue->umem = NULL;
+ queue->id = INVALID_QUEUE_ID;
+ queue->gdma_region = GDMA_INVALID_DMA_REGION;
+
+ umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(umem)) {
+ err = PTR_ERR(umem);
+ ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err);
+ return err;
+ }
+
+ err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region);
+ if (err) {
+ ibdev_dbg(&mdev->ib_dev, "Failed to create dma region, %d\n", err);
+ goto free_umem;
+ }
+ queue->umem = umem;
+
+ ibdev_dbg(&mdev->ib_dev, "created dma region 0x%llx\n", queue->gdma_region);
+
+ return 0;
+free_umem:
+ ib_umem_release(umem);
+ return err;
+}
+
+void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue)
+{
+ /* Ignore return code as there is not much we can do about it.
+ * The error message is printed inside.
+ */
+ mana_ib_gd_destroy_dma_region(mdev, queue->gdma_region);
+ ib_umem_release(queue->umem);
+}
+
static int
mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
struct gdma_context *gc,
@@ -484,11 +525,18 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
- /*
- * This version only support RAW_PACKET
- * other values need to be filled for other types
- */
+ struct ib_port_attr attr;
+ int err;
+
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
+ if (port_num == 1)
+ immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
return 0;
}
@@ -514,7 +562,42 @@ int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
int mana_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
- /* This version doesn't return port properties */
+ struct net_device *ndev = mana_ib_get_netdev(ibdev, port);
+
+ if (!ndev)
+ return -EINVAL;
+
+ memset(props, 0, sizeof(*props));
+ props->max_mtu = IB_MTU_4096;
+ props->active_mtu = ib_mtu_int_to_enum(ndev->mtu);
+
+ if (netif_carrier_ok(ndev) && netif_running(ndev)) {
+ props->state = IB_PORT_ACTIVE;
+ props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+ } else {
+ props->state = IB_PORT_DOWN;
+ props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+ }
+
+ props->active_width = IB_WIDTH_4X;
+ props->active_speed = IB_SPEED_EDR;
+ props->pkey_tbl_len = 1;
+ if (port == 1)
+ props->gid_tbl_len = 16;
+
+ return 0;
+}
+
+enum rdma_link_layer mana_ib_get_link_layer(struct ib_device *device, u32 port_num)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+int mana_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
+{
+ if (index != 0)
+ return -EINVAL;
+ *pkey = IB_DEFAULT_PKEY_FULL;
return 0;
}
@@ -570,3 +653,238 @@ int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
return 0;
}
+
+int mana_ib_create_eqs(struct mana_ib_dev *mdev)
+{
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct gdma_queue_spec spec = {};
+ int err, i;
+
+ spec.type = GDMA_EQ;
+ spec.monitor_avl_buf = false;
+ spec.queue_size = EQ_SIZE;
+ spec.eq.callback = NULL;
+ spec.eq.context = mdev;
+ spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
+ spec.eq.msix_index = 0;
+
+ err = mana_gd_create_mana_eq(&gc->mana_ib, &spec, &mdev->fatal_err_eq);
+ if (err)
+ return err;
+
+ mdev->eqs = kcalloc(mdev->ib_dev.num_comp_vectors, sizeof(struct gdma_queue *),
+ GFP_KERNEL);
+ if (!mdev->eqs) {
+ err = -ENOMEM;
+ goto destroy_fatal_eq;
+ }
+
+ for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++) {
+ spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
+ err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, &mdev->eqs[i]);
+ if (err)
+ goto destroy_eqs;
+ }
+
+ return 0;
+
+destroy_eqs:
+ while (i-- > 0)
+ mana_gd_destroy_queue(gc, mdev->eqs[i]);
+ kfree(mdev->eqs);
+destroy_fatal_eq:
+ mana_gd_destroy_queue(gc, mdev->fatal_err_eq);
+ return err;
+}
+
+void mana_ib_destroy_eqs(struct mana_ib_dev *mdev)
+{
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ int i;
+
+ mana_gd_destroy_queue(gc, mdev->fatal_err_eq);
+
+ for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++)
+ mana_gd_destroy_queue(gc, mdev->eqs[i]);
+
+ kfree(mdev->eqs);
+}
+
+int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev)
+{
+ struct mana_rnic_create_adapter_resp resp = {};
+ struct mana_rnic_create_adapter_req req = {};
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_ADAPTER, sizeof(req), sizeof(resp));
+ req.hdr.req.msg_version = GDMA_MESSAGE_V2;
+ req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.notify_eq_id = mdev->fatal_err_eq->id;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to create RNIC adapter err %d", err);
+ return err;
+ }
+ mdev->adapter_handle = resp.adapter;
+
+ return 0;
+}
+
+int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev)
+{
+ struct mana_rnic_destroy_adapter_resp resp = {};
+ struct mana_rnic_destroy_adapter_req req = {};
+ struct gdma_context *gc;
+ int err;
+
+ gc = mdev_to_gc(mdev);
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_ADAPTER, sizeof(req), sizeof(resp));
+ req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.adapter = mdev->adapter_handle;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to destroy RNIC adapter err %d", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context)
+{
+ struct mana_ib_dev *mdev = container_of(attr->device, struct mana_ib_dev, ib_dev);
+ enum rdma_network_type ntype = rdma_gid_attr_network_type(attr);
+ struct mana_rnic_config_addr_resp resp = {};
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct mana_rnic_config_addr_req req = {};
+ int err;
+
+ if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6) {
+ ibdev_dbg(&mdev->ib_dev, "Unsupported rdma network type %d", ntype);
+ return -EINVAL;
+ }
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
+ req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.adapter = mdev->adapter_handle;
+ req.op = ADDR_OP_ADD;
+ req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
+ copy_in_reverse(req.ip_addr, attr->gid.raw, sizeof(union ib_gid));
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to config IP addr err %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context)
+{
+ struct mana_ib_dev *mdev = container_of(attr->device, struct mana_ib_dev, ib_dev);
+ enum rdma_network_type ntype = rdma_gid_attr_network_type(attr);
+ struct mana_rnic_config_addr_resp resp = {};
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct mana_rnic_config_addr_req req = {};
+ int err;
+
+ if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6) {
+ ibdev_dbg(&mdev->ib_dev, "Unsupported rdma network type %d", ntype);
+ return -EINVAL;
+ }
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
+ req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.adapter = mdev->adapter_handle;
+ req.op = ADDR_OP_REMOVE;
+ req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
+ copy_in_reverse(req.ip_addr, attr->gid.raw, sizeof(union ib_gid));
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to config IP addr err %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8 *mac)
+{
+ struct mana_rnic_config_mac_addr_resp resp = {};
+ struct mana_rnic_config_mac_addr_req req = {};
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_MAC_ADDR, sizeof(req), sizeof(resp));
+ req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.adapter = mdev->adapter_handle;
+ req.op = op;
+ copy_in_reverse(req.mac_addr, mac, ETH_ALEN);
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to config Mac addr err %d", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 doorbell)
+{
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct mana_rnic_create_cq_resp resp = {};
+ struct mana_rnic_create_cq_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_CQ, sizeof(req), sizeof(resp));
+ req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.adapter = mdev->adapter_handle;
+ req.gdma_region = cq->queue.gdma_region;
+ req.eq_id = mdev->eqs[cq->comp_vector]->id;
+ req.doorbell_page = doorbell;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to create cq err %d", err);
+ return err;
+ }
+
+ cq->queue.id = resp.cq_id;
+ cq->cq_handle = resp.cq_handle;
+ /* The GDMA region is now owned by the CQ handle */
+ cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
+
+ return 0;
+}
+
+int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
+{
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct mana_rnic_destroy_cq_resp resp = {};
+ struct mana_rnic_destroy_cq_req req = {};
+ int err;
+
+ if (cq->cq_handle == INVALID_MANA_HANDLE)
+ return 0;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_CQ, sizeof(req), sizeof(resp));
+ req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.adapter = mdev->adapter_handle;
+ req.cq_handle = cq->cq_handle;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to destroy cq err %d", err);
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index f83390eebb7d..68c3b4f0faa4 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -45,19 +45,26 @@ struct mana_ib_adapter_caps {
u32 max_inline_data_size;
};
+struct mana_ib_queue {
+ struct ib_umem *umem;
+ u64 gdma_region;
+ u64 id;
+};
+
struct mana_ib_dev {
struct ib_device ib_dev;
struct gdma_dev *gdma_dev;
+ mana_handle_t adapter_handle;
+ struct gdma_queue *fatal_err_eq;
+ struct gdma_queue **eqs;
struct mana_ib_adapter_caps adapter_caps;
};
struct mana_ib_wq {
struct ib_wq ibwq;
- struct ib_umem *umem;
+ struct mana_ib_queue queue;
int wqe;
u32 wq_buf_size;
- u64 gdma_region;
- u64 id;
mana_handle_t rx_object;
};
@@ -82,22 +89,17 @@ struct mana_ib_mr {
struct mana_ib_cq {
struct ib_cq ibcq;
- struct ib_umem *umem;
+ struct mana_ib_queue queue;
int cqe;
- u64 gdma_region;
- u64 id;
u32 comp_vector;
+ mana_handle_t cq_handle;
};
struct mana_ib_qp {
struct ib_qp ibqp;
- /* Work queue info */
- struct ib_umem *sq_umem;
- int sqe;
- u64 sq_gdma_region;
- u64 sq_id;
- mana_handle_t tx_object;
+ mana_handle_t qp_handle;
+ struct mana_ib_queue raw_sq;
/* The port on the IB device, starting with 1 */
u32 port;
@@ -114,6 +116,12 @@ struct mana_ib_rwq_ind_table {
enum mana_ib_command_code {
MANA_IB_GET_ADAPTER_CAP = 0x30001,
+ MANA_IB_CREATE_ADAPTER = 0x30002,
+ MANA_IB_DESTROY_ADAPTER = 0x30003,
+ MANA_IB_CONFIG_IP_ADDR = 0x30004,
+ MANA_IB_CONFIG_MAC_ADDR = 0x30005,
+ MANA_IB_CREATE_CQ = 0x30008,
+ MANA_IB_DESTROY_CQ = 0x30009,
};
struct mana_ib_query_adapter_caps_req {
@@ -142,6 +150,86 @@ struct mana_ib_query_adapter_caps_resp {
u32 max_inline_data_size;
}; /* HW Data */
+struct mana_rnic_create_adapter_req {
+ struct gdma_req_hdr hdr;
+ u32 notify_eq_id;
+ u32 reserved;
+ u64 feature_flags;
+}; /*HW Data */
+
+struct mana_rnic_create_adapter_resp {
+ struct gdma_resp_hdr hdr;
+ mana_handle_t adapter;
+}; /* HW Data */
+
+struct mana_rnic_destroy_adapter_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+}; /*HW Data */
+
+struct mana_rnic_destroy_adapter_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW Data */
+
+enum mana_ib_addr_op {
+ ADDR_OP_ADD = 1,
+ ADDR_OP_REMOVE = 2,
+};
+
+enum sgid_entry_type {
+ SGID_TYPE_IPV4 = 1,
+ SGID_TYPE_IPV6 = 2,
+};
+
+struct mana_rnic_config_addr_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+ enum mana_ib_addr_op op;
+ enum sgid_entry_type sgid_type;
+ u8 ip_addr[16];
+}; /* HW Data */
+
+struct mana_rnic_config_addr_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW Data */
+
+struct mana_rnic_config_mac_addr_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+ enum mana_ib_addr_op op;
+ u8 mac_addr[ETH_ALEN];
+ u8 reserved[6];
+}; /* HW Data */
+
+struct mana_rnic_config_mac_addr_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW Data */
+
+struct mana_rnic_create_cq_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+ u64 gdma_region;
+ u32 eq_id;
+ u32 doorbell_page;
+}; /* HW Data */
+
+struct mana_rnic_create_cq_resp {
+ struct gdma_resp_hdr hdr;
+ mana_handle_t cq_handle;
+ u32 cq_id;
+ u32 reserved;
+}; /* HW Data */
+
+struct mana_rnic_destroy_cq_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+ mana_handle_t cq_handle;
+}; /* HW Data */
+
+struct mana_rnic_destroy_cq_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW Data */
+
static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev)
{
return mdev->gdma_dev->gdma_context;
@@ -158,7 +246,16 @@ static inline struct net_device *mana_ib_get_netdev(struct ib_device *ibdev, u32
return mc->ports[port - 1];
}
+static inline void copy_in_reverse(u8 *dst, const u8 *src, u32 size)
+{
+ u32 i;
+
+ for (i = 0; i < size; i++)
+ dst[size - 1 - i] = src[i];
+}
+
int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq);
+void mana_ib_remove_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq);
int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
mana_handle_t *gdma_region);
@@ -169,6 +266,10 @@ int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
mana_handle_t gdma_region);
+int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
+ struct mana_ib_queue *queue);
+void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue);
+
struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata);
@@ -231,4 +332,26 @@ int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext);
int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *mdev);
+
+int mana_ib_create_eqs(struct mana_ib_dev *mdev);
+
+void mana_ib_destroy_eqs(struct mana_ib_dev *mdev);
+
+int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev);
+
+int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev);
+
+int mana_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey);
+
+enum rdma_link_layer mana_ib_get_link_layer(struct ib_device *device, u32 port_num);
+
+int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context);
+
+int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context);
+
+int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8 *mac);
+
+int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 doorbell);
+
+int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq);
#endif
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index b70b13484f09..4f13423ecdbd 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -135,7 +135,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
}
ibdev_dbg(ibdev,
- "create_dma_region ret %d gdma_region %llx\n", err,
+ "created dma region for user-mr 0x%llx\n",
dma_region_handle);
mr_params.pd_handle = pd->pd_handle;
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index 6e7627745c95..ba13c5abf8ef 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -15,15 +15,13 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
struct mana_port_context *mpc = netdev_priv(ndev);
struct mana_cfg_rx_steer_req_v2 *req;
struct mana_cfg_rx_steer_resp resp = {};
- mana_handle_t *req_indir_tab;
struct gdma_context *gc;
u32 req_buf_size;
int i, err;
gc = mdev_to_gc(dev);
- req_buf_size =
- sizeof(*req) + sizeof(mana_handle_t) * MANA_INDIRECT_TABLE_SIZE;
+ req_buf_size = struct_size(req, indir_tab, MANA_INDIRECT_TABLE_SIZE);
req = kzalloc(req_buf_size, GFP_KERNEL);
if (!req)
return -ENOMEM;
@@ -44,20 +42,20 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
req->rss_enable = true;
req->num_indir_entries = MANA_INDIRECT_TABLE_SIZE;
- req->indir_tab_offset = sizeof(*req);
+ req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2,
+ indir_tab);
req->update_indir_tab = true;
req->cqe_coalescing_enable = 1;
- req_indir_tab = (mana_handle_t *)(req + 1);
/* The ind table passed to the hardware must have
* MANA_INDIRECT_TABLE_SIZE entries. Adjust the verb
* ind_table to MANA_INDIRECT_TABLE_SIZE if required
*/
ibdev_dbg(&dev->ib_dev, "ind table size %u\n", 1 << log_ind_tbl_size);
for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
- req_indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)];
+ req->indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)];
ibdev_dbg(&dev->ib_dev, "index %u handle 0x%llx\n", i,
- req_indir_tab[i]);
+ req->indir_tab[i]);
}
req->update_hashkey = true;
@@ -97,11 +95,9 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
struct mana_ib_dev *mdev =
container_of(pd->device, struct mana_ib_dev, ib_dev);
- struct gdma_context *gc = mdev_to_gc(mdev);
struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
struct mana_ib_create_qp_rss_resp resp = {};
struct mana_ib_create_qp_rss ucmd = {};
- struct gdma_queue **gdma_cq_allocated;
mana_handle_t *mana_ind_table;
struct mana_port_context *mpc;
unsigned int ind_tbl_size;
@@ -175,13 +171,6 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
goto fail;
}
- gdma_cq_allocated = kcalloc(ind_tbl_size, sizeof(*gdma_cq_allocated),
- GFP_KERNEL);
- if (!gdma_cq_allocated) {
- ret = -ENOMEM;
- goto fail;
- }
-
qp->port = port;
for (i = 0; i < ind_tbl_size; i++) {
@@ -194,13 +183,13 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
ibcq = ibwq->cq;
cq = container_of(ibcq, struct mana_ib_cq, ibcq);
- wq_spec.gdma_region = wq->gdma_region;
+ wq_spec.gdma_region = wq->queue.gdma_region;
wq_spec.queue_size = wq->wq_buf_size;
- cq_spec.gdma_region = cq->gdma_region;
+ cq_spec.gdma_region = cq->queue.gdma_region;
cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
cq_spec.modr_ctx_id = 0;
- eq = &mpc->ac->eqs[cq->comp_vector % gc->max_num_queues];
+ eq = &mpc->ac->eqs[cq->comp_vector];
cq_spec.attached_eq = eq->eq->id;
ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ,
@@ -212,18 +201,18 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
}
/* The GDMA regions are now owned by the WQ object */
- wq->gdma_region = GDMA_INVALID_DMA_REGION;
- cq->gdma_region = GDMA_INVALID_DMA_REGION;
+ wq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
+ cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
- wq->id = wq_spec.queue_index;
- cq->id = cq_spec.queue_index;
+ wq->queue.id = wq_spec.queue_index;
+ cq->queue.id = cq_spec.queue_index;
ibdev_dbg(&mdev->ib_dev,
- "ret %d rx_object 0x%llx wq id %llu cq id %llu\n",
- ret, wq->rx_object, wq->id, cq->id);
+ "rx_object 0x%llx wq id %llu cq id %llu\n",
+ wq->rx_object, wq->queue.id, cq->queue.id);
- resp.entries[i].cqid = cq->id;
- resp.entries[i].wqid = wq->id;
+ resp.entries[i].cqid = cq->queue.id;
+ resp.entries[i].wqid = wq->queue.id;
mana_ind_table[i] = wq->rx_object;
@@ -231,8 +220,6 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
ret = mana_ib_install_cq_cb(mdev, cq);
if (ret)
goto fail;
-
- gdma_cq_allocated[i] = gc->cq_table[cq->id];
}
resp.num_entries = i;
@@ -252,7 +239,6 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
goto fail;
}
- kfree(gdma_cq_allocated);
kfree(mana_ind_table);
return 0;
@@ -264,13 +250,10 @@ fail:
wq = container_of(ibwq, struct mana_ib_wq, ibwq);
cq = container_of(ibcq, struct mana_ib_cq, ibcq);
- gc->cq_table[cq->id] = NULL;
- kfree(gdma_cq_allocated[i]);
-
+ mana_ib_remove_cq_cb(mdev, cq);
mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
}
- kfree(gdma_cq_allocated);
kfree(mana_ind_table);
return ret;
@@ -289,15 +272,12 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
struct mana_ib_ucontext *mana_ucontext =
rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
ibucontext);
- struct gdma_context *gc = mdev_to_gc(mdev);
struct mana_ib_create_qp_resp resp = {};
struct mana_ib_create_qp ucmd = {};
- struct gdma_queue *gdma_cq = NULL;
struct mana_obj_spec wq_spec = {};
struct mana_obj_spec cq_spec = {};
struct mana_port_context *mpc;
struct net_device *ndev;
- struct ib_umem *umem;
struct mana_eq *eq;
int eq_vec;
u32 port;
@@ -346,56 +326,39 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n",
ucmd.sq_buf_addr, ucmd.port);
- umem = ib_umem_get(ibpd->device, ucmd.sq_buf_addr, ucmd.sq_buf_size,
- IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(umem)) {
- err = PTR_ERR(umem);
- ibdev_dbg(&mdev->ib_dev,
- "Failed to get umem for create qp-raw, err %d\n",
- err);
- goto err_free_vport;
- }
- qp->sq_umem = umem;
-
- err = mana_ib_create_zero_offset_dma_region(mdev, qp->sq_umem,
- &qp->sq_gdma_region);
+ err = mana_ib_create_queue(mdev, ucmd.sq_buf_addr, ucmd.sq_buf_size, &qp->raw_sq);
if (err) {
ibdev_dbg(&mdev->ib_dev,
- "Failed to create dma region for create qp-raw, %d\n",
- err);
- goto err_release_umem;
+ "Failed to create queue for create qp-raw, err %d\n", err);
+ goto err_free_vport;
}
- ibdev_dbg(&mdev->ib_dev,
- "create_dma_region ret %d gdma_region 0x%llx\n",
- err, qp->sq_gdma_region);
-
/* Create a WQ on the same port handle used by the Ethernet */
- wq_spec.gdma_region = qp->sq_gdma_region;
+ wq_spec.gdma_region = qp->raw_sq.gdma_region;
wq_spec.queue_size = ucmd.sq_buf_size;
- cq_spec.gdma_region = send_cq->gdma_region;
+ cq_spec.gdma_region = send_cq->queue.gdma_region;
cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
cq_spec.modr_ctx_id = 0;
- eq_vec = send_cq->comp_vector % gc->max_num_queues;
+ eq_vec = send_cq->comp_vector;
eq = &mpc->ac->eqs[eq_vec];
cq_spec.attached_eq = eq->eq->id;
err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec,
- &cq_spec, &qp->tx_object);
+ &cq_spec, &qp->qp_handle);
if (err) {
ibdev_dbg(&mdev->ib_dev,
"Failed to create wq for create raw-qp, err %d\n",
err);
- goto err_destroy_dma_region;
+ goto err_destroy_queue;
}
/* The GDMA regions are now owned by the WQ object */
- qp->sq_gdma_region = GDMA_INVALID_DMA_REGION;
- send_cq->gdma_region = GDMA_INVALID_DMA_REGION;
+ qp->raw_sq.gdma_region = GDMA_INVALID_DMA_REGION;
+ send_cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
- qp->sq_id = wq_spec.queue_index;
- send_cq->id = cq_spec.queue_index;
+ qp->raw_sq.id = wq_spec.queue_index;
+ send_cq->queue.id = cq_spec.queue_index;
/* Create CQ table entry */
err = mana_ib_install_cq_cb(mdev, send_cq);
@@ -403,11 +366,11 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
goto err_destroy_wq_obj;
ibdev_dbg(&mdev->ib_dev,
- "ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err,
- qp->tx_object, qp->sq_id, send_cq->id);
+ "qp->qp_handle 0x%llx sq id %llu cq id %llu\n",
+ qp->qp_handle, qp->raw_sq.id, send_cq->queue.id);
- resp.sqid = qp->sq_id;
- resp.cqid = send_cq->id;
+ resp.sqid = qp->raw_sq.id;
+ resp.cqid = send_cq->queue.id;
resp.tx_vp_offset = pd->tx_vp_offset;
err = ib_copy_to_udata(udata, &resp, sizeof(resp));
@@ -415,23 +378,19 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
ibdev_dbg(&mdev->ib_dev,
"Failed copy udata for create qp-raw, %d\n",
err);
- goto err_release_gdma_cq;
+ goto err_remove_cq_cb;
}
return 0;
-err_release_gdma_cq:
- kfree(gdma_cq);
- gc->cq_table[send_cq->id] = NULL;
+err_remove_cq_cb:
+ mana_ib_remove_cq_cb(mdev, send_cq);
err_destroy_wq_obj:
- mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
+ mana_destroy_wq_obj(mpc, GDMA_SQ, qp->qp_handle);
-err_destroy_dma_region:
- mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
-
-err_release_umem:
- ib_umem_release(umem);
+err_destroy_queue:
+ mana_ib_destroy_queue(mdev, &qp->raw_sq);
err_free_vport:
mana_ib_uncfg_vport(mdev, pd, port);
@@ -505,12 +464,9 @@ static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
mpc = netdev_priv(ndev);
pd = container_of(ibpd, struct mana_ib_pd, ibpd);
- mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
+ mana_destroy_wq_obj(mpc, GDMA_SQ, qp->qp_handle);
- if (qp->sq_umem) {
- mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
- ib_umem_release(qp->sq_umem);
- }
+ mana_ib_destroy_queue(mdev, &qp->raw_sq);
mana_ib_uncfg_vport(mdev, pd, qp->port);
diff --git a/drivers/infiniband/hw/mana/wq.c b/drivers/infiniband/hw/mana/wq.c
index 7c9c69962573..f959f4b9244f 100644
--- a/drivers/infiniband/hw/mana/wq.c
+++ b/drivers/infiniband/hw/mana/wq.c
@@ -13,7 +13,6 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
container_of(pd->device, struct mana_ib_dev, ib_dev);
struct mana_ib_create_wq ucmd = {};
struct mana_ib_wq *wq;
- struct ib_umem *umem;
int err;
if (udata->inlen < sizeof(ucmd))
@@ -32,39 +31,18 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
ibdev_dbg(&mdev->ib_dev, "ucmd wq_buf_addr 0x%llx\n", ucmd.wq_buf_addr);
- umem = ib_umem_get(pd->device, ucmd.wq_buf_addr, ucmd.wq_buf_size,
- IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(umem)) {
- err = PTR_ERR(umem);
+ err = mana_ib_create_queue(mdev, ucmd.wq_buf_addr, ucmd.wq_buf_size, &wq->queue);
+ if (err) {
ibdev_dbg(&mdev->ib_dev,
- "Failed to get umem for create wq, err %d\n", err);
+ "Failed to create queue for create wq, %d\n", err);
goto err_free_wq;
}
- wq->umem = umem;
wq->wqe = init_attr->max_wr;
wq->wq_buf_size = ucmd.wq_buf_size;
wq->rx_object = INVALID_MANA_HANDLE;
-
- err = mana_ib_create_zero_offset_dma_region(mdev, wq->umem, &wq->gdma_region);
- if (err) {
- ibdev_dbg(&mdev->ib_dev,
- "Failed to create dma region for create wq, %d\n",
- err);
- goto err_release_umem;
- }
-
- ibdev_dbg(&mdev->ib_dev,
- "create_dma_region ret %d gdma_region 0x%llx\n",
- err, wq->gdma_region);
-
- /* WQ ID is returned at wq_create time, doesn't know the value yet */
-
return &wq->ibwq;
-err_release_umem:
- ib_umem_release(umem);
-
err_free_wq:
kfree(wq);
@@ -86,8 +64,7 @@ int mana_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
mdev = container_of(ib_dev, struct mana_ib_dev, ib_dev);
- mana_ib_gd_destroy_dma_region(mdev, wq->gdma_region);
- ib_umem_release(wq->umem);
+ mana_ib_destroy_queue(mdev, &wq->queue);
kfree(wq);
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 0c3c4e64812c..3e43687a7f6f 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -188,7 +188,8 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
mdev = dev->mdev;
mdev_port_num = 1;
}
- if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
+ if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1 &&
+ !mlx5_core_mp_enabled(mdev)) {
/* set local port to one for Function-Per-Port HCA. */
mdev = dev->mdev;
mdev_port_num = 1;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index c2b557e64290..2366c46eebc8 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -264,8 +264,7 @@ static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
*/
read_lock(&ibdev->port[port_num - 1].roce.netdev_lock);
ndev = ibdev->port[port_num - 1].roce.netdev;
- if (ndev)
- dev_hold(ndev);
+ dev_hold(ndev);
read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock);
out:
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 96ffbbaf0a73..5a22be14d958 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
+#include <linux/io.h>
#include <rdma/ib_umem_odp.h>
#include "mlx5_ib.h"
#include <linux/jiffies.h>
@@ -108,7 +109,6 @@ static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id,
__be32 mmio_wqe[16] = {};
unsigned long flags;
unsigned int idx;
- int i;
if (unlikely(dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR))
return -EIO;
@@ -148,10 +148,8 @@ static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id,
* we hit doorbell
*/
wmb();
- for (i = 0; i < 8; i++)
- mlx5_write64(&mmio_wqe[i * 2],
- bf->bfreg->map + bf->offset + i * 8);
- io_stop_wc();
+ __iowrite64_copy(bf->bfreg->map + bf->offset, mmio_wqe,
+ sizeof(mmio_wqe) / 8);
bf->offset ^= bf->buf_size;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index a8de35c07c9e..f255a12e26a0 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -643,9 +643,10 @@ struct mlx5_ib_mkey {
unsigned int ndescs;
struct wait_queue_head wait;
refcount_t usecount;
- /* User Mkey must hold either a rb_key or a cache_ent. */
+ /* Cacheable user Mkey must hold either a rb_key or a cache_ent. */
struct mlx5r_cache_rb_key rb_key;
struct mlx5_cache_ent *cache_ent;
+ u8 cacheable : 1;
};
#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index a8ee2ca1f4a1..ecc111ed5d86 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1158,6 +1158,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
if (IS_ERR(mr))
return mr;
mr->mmkey.rb_key = rb_key;
+ mr->mmkey.cacheable = true;
return mr;
}
@@ -1168,6 +1169,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
mr->ibmr.pd = pd;
mr->umem = umem;
mr->page_shift = order_base_2(page_size);
+ mr->mmkey.cacheable = true;
set_mr_fields(dev, mr, umem->length, access_flags, iova);
return mr;
@@ -1570,7 +1572,8 @@ static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev,
unsigned int diffs = current_access_flags ^ target_access_flags;
if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING))
+ IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING |
+ IB_ACCESS_REMOTE_ATOMIC))
return false;
return mlx5r_umr_can_reconfig(dev, current_access_flags,
target_access_flags);
@@ -1835,6 +1838,23 @@ end:
return ret;
}
+static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
+ struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
+
+ if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr))
+ return 0;
+
+ if (ent) {
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ ent->in_use--;
+ mr->mmkey.cache_ent = NULL;
+ spin_unlock_irq(&ent->mkeys_queue.lock);
+ }
+ return destroy_mkey(dev, mr);
+}
+
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct mlx5_ib_mr *mr = to_mmr(ibmr);
@@ -1880,16 +1900,9 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
}
/* Stop DMA */
- if (mr->umem && mlx5r_umr_can_load_pas(dev, mr->umem->length))
- if (mlx5r_umr_revoke_mr(mr) ||
- cache_ent_find_and_store(dev, mr))
- mr->mmkey.cache_ent = NULL;
-
- if (!mr->mmkey.cache_ent) {
- rc = destroy_mkey(to_mdev(mr->ibmr.device), mr);
- if (rc)
- return rc;
- }
+ rc = mlx5_revoke_mr(mr);
+ if (rc)
+ return rc;
if (mr->umem) {
bool is_odp = is_odp_mr(mr);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 8115ab107149..e2164f813607 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3097,7 +3097,6 @@ static int create_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
switch (qp->type) {
case MLX5_IB_QPT_DCT:
err = create_dct(dev, pd, qp, params);
- rdma_restrack_no_track(&qp->ibqp.res);
break;
case MLX5_IB_QPT_DCI:
err = create_dci(dev, pd, qp, params);
@@ -3109,9 +3108,9 @@ static int create_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
err = mlx5_ib_create_gsi(pd, qp, params->attr);
break;
case MLX5_IB_QPT_HW_GSI:
- case MLX5_IB_QPT_REG_UMR:
rdma_restrack_no_track(&qp->ibqp.res);
fallthrough;
+ case MLX5_IB_QPT_REG_UMR:
default:
if (params->udata)
err = create_user_qp(dev, pd, qp, params);
diff --git a/drivers/infiniband/hw/mlx5/restrack.c b/drivers/infiniband/hw/mlx5/restrack.c
index 4ac429e72004..affcf8fe943c 100644
--- a/drivers/infiniband/hw/mlx5/restrack.c
+++ b/drivers/infiniband/hw/mlx5/restrack.c
@@ -156,6 +156,34 @@ static int fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ibcq)
return fill_res_raw(msg, dev, MLX5_SGMT_TYPE_PRM_QUERY_CQ, cq->mcq.cqn);
}
+static int fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ibqp)
+{
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ int ret;
+
+ if (qp->type < IB_QPT_DRIVER)
+ return 0;
+
+ switch (qp->type) {
+ case MLX5_IB_QPT_REG_UMR:
+ ret = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUBTYPE,
+ "REG_UMR");
+ break;
+ case MLX5_IB_QPT_DCT:
+ ret = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUBTYPE, "DCT");
+ break;
+ case MLX5_IB_QPT_DCI:
+ ret = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUBTYPE, "DCI");
+ break;
+ default:
+ return 0;
+ }
+ if (ret)
+ return ret;
+
+ return nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, IB_QPT_DRIVER);
+}
+
static int fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ibqp)
{
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
@@ -168,6 +196,7 @@ static const struct ib_device_ops restrack_ops = {
.fill_res_cq_entry_raw = fill_res_cq_entry_raw,
.fill_res_mr_entry = fill_res_mr_entry,
.fill_res_mr_entry_raw = fill_res_mr_entry_raw,
+ .fill_res_qp_entry = fill_res_qp_entry,
.fill_res_qp_entry_raw = fill_res_qp_entry_raw,
.fill_stat_mr_entry = fill_stat_mr_entry,
};
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index a51fc6854984..259303b9907c 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -447,7 +447,8 @@ qedr_addr4_resolve(struct qedr_dev *dev,
struct rtable *rt = NULL;
int rc = 0;
- rt = ip_route_output(&init_net, dst_ip, src_ip, 0, 0);
+ rt = ip_route_output(&init_net, dst_ip, src_ip, 0, 0,
+ RT_SCOPE_UNIVERSE);
if (IS_ERR(rt)) {
DP_ERR(dev, "ip_route_output returned error\n");
return -EINVAL;
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index 455e966eeff3..b27791029fa9 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -439,6 +439,7 @@ static int remove_device_files(struct super_block *sb,
return PTR_ERR(dir);
}
simple_recursive_removal(dir, NULL);
+ dput(dir);
return 0;
}
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 6af57067c32e..78dfe98ebcf7 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -3281,7 +3281,7 @@ static int qib_7220_intr_fallback(struct qib_devdata *dd)
qib_free_irq(dd);
dd->msi_lo = 0;
- if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_LEGACY) < 0)
+ if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_INTX) < 0)
qib_dev_err(dd, "Failed to enable INTx\n");
qib_setup_7220_interrupt(dd);
return 1;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index f93906d8fc09..9db29916e35a 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -3471,8 +3471,7 @@ try_intx:
pci_irq_vector(dd->pcidev, msixnum),
ret);
qib_7322_free_irq(dd);
- pci_alloc_irq_vectors(dd->pcidev, 1, 1,
- PCI_IRQ_LEGACY);
+ pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_INTX);
goto try_intx;
}
dd->cspec->msix_entries[msixnum].arg = arg;
@@ -5143,7 +5142,7 @@ static int qib_7322_intr_fallback(struct qib_devdata *dd)
qib_devinfo(dd->pcidev,
"MSIx interrupt not detected, trying INTx interrupts\n");
qib_7322_free_irq(dd);
- if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_LEGACY) < 0)
+ if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_INTX) < 0)
qib_dev_err(dd, "Failed to enable INTx\n");
qib_setup_7322_interrupt(dd, 0);
return 1;
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 47bf64ace05c..58c1d62d341b 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -210,7 +210,7 @@ int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent)
}
if (dd->flags & QIB_HAS_INTX)
- flags |= PCI_IRQ_LEGACY;
+ flags |= PCI_IRQ_INTX;
maxvec = (nent && *nent) ? *nent : 1;
nvec = pci_alloc_irq_vectors(dd->pcidev, 1, maxvec, flags);
if (nvec < 0)
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index a5e88185171f..768aad364c89 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -531,7 +531,7 @@ static int pvrdma_alloc_intrs(struct pvrdma_dev *dev)
PCI_IRQ_MSIX);
if (ret < 0) {
ret = pci_alloc_irq_vectors(pdev, 1, 1,
- PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ PCI_IRQ_MSI | PCI_IRQ_INTX);
if (ret < 0)
return ret;
}
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index ae466e72fc43..255677bc12b2 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -33,6 +33,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
if (rxe->tfm)
crypto_free_shash(rxe->tfm);
+
+ mutex_destroy(&rxe->usdev_lock);
}
/* initialize rxe device parameters */
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index b78b8c0856ab..d48af2180745 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -122,25 +122,16 @@ void retransmit_timer(struct timer_list *t)
spin_lock_irqsave(&qp->state_lock, flags);
if (qp->valid) {
qp->comp.timeout = 1;
- rxe_sched_task(&qp->comp.task);
+ rxe_sched_task(&qp->send_task);
}
spin_unlock_irqrestore(&qp->state_lock, flags);
}
void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
{
- int must_sched;
-
+ rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_SENDER_SCHED);
skb_queue_tail(&qp->resp_pkts, skb);
-
- must_sched = skb_queue_len(&qp->resp_pkts) > 1;
- if (must_sched != 0)
- rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED);
-
- if (must_sched)
- rxe_sched_task(&qp->comp.task);
- else
- rxe_run_task(&qp->comp.task);
+ rxe_sched_task(&qp->send_task);
}
static inline enum comp_state get_wqe(struct rxe_qp *qp,
@@ -325,7 +316,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
qp->comp.psn = pkt->psn;
if (qp->req.wait_psn) {
qp->req.wait_psn = 0;
- rxe_sched_task(&qp->req.task);
+ qp->req.again = 1;
}
}
return COMPST_ERROR_RETRY;
@@ -476,7 +467,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
*/
if (qp->req.wait_fence) {
qp->req.wait_fence = 0;
- rxe_sched_task(&qp->req.task);
+ qp->req.again = 1;
}
}
@@ -515,7 +506,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
if (qp->req.need_rd_atomic) {
qp->comp.timeout_retry = 0;
qp->req.need_rd_atomic = 0;
- rxe_sched_task(&qp->req.task);
+ qp->req.again = 1;
}
}
@@ -541,7 +532,7 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp,
if (qp->req.wait_psn) {
qp->req.wait_psn = 0;
- rxe_sched_task(&qp->req.task);
+ qp->req.again = 1;
}
}
@@ -654,6 +645,8 @@ int rxe_completer(struct rxe_qp *qp)
int ret;
unsigned long flags;
+ qp->req.again = 0;
+
spin_lock_irqsave(&qp->state_lock, flags);
if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
qp_state(qp) == IB_QPS_RESET) {
@@ -737,7 +730,7 @@ int rxe_completer(struct rxe_qp *qp)
if (qp->req.wait_psn) {
qp->req.wait_psn = 0;
- rxe_sched_task(&qp->req.task);
+ qp->req.again = 1;
}
state = COMPST_DONE;
@@ -792,7 +785,7 @@ int rxe_completer(struct rxe_qp *qp)
RXE_CNT_COMP_RETRY);
qp->req.need_retry = 1;
qp->comp.started_retry = 1;
- rxe_sched_task(&qp->req.task);
+ qp->req.again = 1;
}
goto done;
@@ -843,8 +836,9 @@ done:
ret = 0;
goto out;
exit:
- ret = -EAGAIN;
+ ret = (qp->req.again) ? 0 : -EAGAIN;
out:
+ qp->req.again = 0;
if (pkt)
free_pkt(pkt);
return ret;
diff --git a/drivers/infiniband/sw/rxe/rxe_hw_counters.c b/drivers/infiniband/sw/rxe/rxe_hw_counters.c
index a012522b577a..437917a7d8f2 100644
--- a/drivers/infiniband/sw/rxe/rxe_hw_counters.c
+++ b/drivers/infiniband/sw/rxe/rxe_hw_counters.c
@@ -14,7 +14,7 @@ static const struct rdma_stat_desc rxe_counter_descs[] = {
[RXE_CNT_RCV_RNR].name = "rcvd_rnr_err",
[RXE_CNT_SND_RNR].name = "send_rnr_err",
[RXE_CNT_RCV_SEQ_ERR].name = "rcvd_seq_err",
- [RXE_CNT_COMPLETER_SCHED].name = "ack_deferred",
+ [RXE_CNT_SENDER_SCHED].name = "ack_deferred",
[RXE_CNT_RETRY_EXCEEDED].name = "retry_exceeded_err",
[RXE_CNT_RNR_RETRY_EXCEEDED].name = "retry_rnr_exceeded_err",
[RXE_CNT_COMP_RETRY].name = "completer_retry_err",
diff --git a/drivers/infiniband/sw/rxe/rxe_hw_counters.h b/drivers/infiniband/sw/rxe/rxe_hw_counters.h
index 71f4d4fa9dc8..051f9e1c3852 100644
--- a/drivers/infiniband/sw/rxe/rxe_hw_counters.h
+++ b/drivers/infiniband/sw/rxe/rxe_hw_counters.h
@@ -18,7 +18,7 @@ enum rxe_counters {
RXE_CNT_RCV_RNR,
RXE_CNT_SND_RNR,
RXE_CNT_RCV_SEQ_ERR,
- RXE_CNT_COMPLETER_SCHED,
+ RXE_CNT_SENDER_SCHED,
RXE_CNT_RETRY_EXCEEDED,
RXE_CNT_RNR_RETRY_EXCEEDED,
RXE_CNT_COMP_RETRY,
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 746110898a0e..ded46119151b 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -164,7 +164,8 @@ void rxe_dealloc(struct ib_device *ib_dev);
int rxe_completer(struct rxe_qp *qp);
int rxe_requester(struct rxe_qp *qp);
-int rxe_responder(struct rxe_qp *qp);
+int rxe_sender(struct rxe_qp *qp);
+int rxe_receiver(struct rxe_qp *qp);
/* rxe_icrc.c */
int rxe_icrc_init(struct rxe_dev *rxe);
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index cd59666158b1..ca9a82e1c4c7 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -345,46 +345,52 @@ int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt,
static void rxe_skb_tx_dtor(struct sk_buff *skb)
{
- struct sock *sk = skb->sk;
- struct rxe_qp *qp = sk->sk_user_data;
- int skb_out = atomic_dec_return(&qp->skb_out);
+ struct net_device *ndev = skb->dev;
+ struct rxe_dev *rxe;
+ unsigned int qp_index;
+ struct rxe_qp *qp;
+ int skb_out;
+
+ rxe = rxe_get_dev_from_net(ndev);
+ if (!rxe && is_vlan_dev(ndev))
+ rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev));
+ if (WARN_ON(!rxe))
+ return;
+
+ qp_index = (int)(uintptr_t)skb->sk->sk_user_data;
+ if (!qp_index)
+ return;
- if (unlikely(qp->need_req_skb &&
- skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
- rxe_sched_task(&qp->req.task);
+ qp = rxe_pool_get_index(&rxe->qp_pool, qp_index);
+ if (!qp)
+ goto put_dev;
+
+ skb_out = atomic_dec_return(&qp->skb_out);
+ if (qp->need_req_skb && skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW)
+ rxe_sched_task(&qp->send_task);
rxe_put(qp);
+put_dev:
+ ib_device_put(&rxe->ib_dev);
+ sock_put(skb->sk);
}
static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt)
{
int err;
+ struct sock *sk = pkt->qp->sk->sk;
+ sock_hold(sk);
+ skb->sk = sk;
skb->destructor = rxe_skb_tx_dtor;
- skb->sk = pkt->qp->sk->sk;
-
- rxe_get(pkt->qp);
atomic_inc(&pkt->qp->skb_out);
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (skb->protocol == htons(ETH_P_IP))
err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ else
err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
- } else {
- rxe_dbg_qp(pkt->qp, "Unknown layer 3 protocol: %d\n",
- skb->protocol);
- atomic_dec(&pkt->qp->skb_out);
- rxe_put(pkt->qp);
- kfree_skb(skb);
- return -EINVAL;
- }
-
- if (unlikely(net_xmit_eval(err))) {
- rxe_dbg_qp(pkt->qp, "error sending packet: %d\n", err);
- return -EAGAIN;
- }
- return 0;
+ return err;
}
/* fix up a send packet to match the packets
@@ -392,8 +398,15 @@ static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt)
*/
static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt)
{
+ struct sock *sk = pkt->qp->sk->sk;
+
memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt));
+ sock_hold(sk);
+ skb->sk = sk;
+ skb->destructor = rxe_skb_tx_dtor;
+ atomic_inc(&pkt->qp->skb_out);
+
if (skb->protocol == htons(ETH_P_IP))
skb_pull(skb, sizeof(struct iphdr));
else
@@ -440,12 +453,6 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
return err;
}
- if ((qp_type(qp) != IB_QPT_RC) &&
- (pkt->mask & RXE_END_MASK)) {
- pkt->wqe->state = wqe_state_done;
- rxe_sched_task(&qp->comp.task);
- }
-
rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS);
goto done;
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 6215c6de3a84..67567d62195e 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -119,7 +119,7 @@ void rxe_pool_cleanup(struct rxe_pool *pool)
int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem,
bool sleepable)
{
- int err;
+ int err = -EINVAL;
gfp_t gfp_flags;
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
@@ -147,7 +147,7 @@ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem,
err_cnt:
atomic_dec(&pool->num_elem);
- return -EINVAL;
+ return err;
}
void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index e3589c02013e..d2f7b5195c19 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -244,7 +244,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
if (err < 0)
return err;
- qp->sk->sk->sk_user_data = qp;
+ qp->sk->sk->sk_user_data = (void *)(uintptr_t)qp->elem.index;
/* pick a source UDP port number for this QP based on
* the source QPN. this spreads traffic for different QPs
@@ -265,8 +265,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
qp->req.opcode = -1;
qp->comp.opcode = -1;
- rxe_init_task(&qp->req.task, qp, rxe_requester);
- rxe_init_task(&qp->comp.task, qp, rxe_completer);
+ rxe_init_task(&qp->send_task, qp, rxe_sender);
qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
if (init->qp_type == IB_QPT_RC) {
@@ -337,7 +336,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
return err;
}
- rxe_init_task(&qp->resp.task, qp, rxe_responder);
+ rxe_init_task(&qp->recv_task, qp, rxe_receiver);
qp->resp.opcode = OPCODE_NONE;
qp->resp.msn = 0;
@@ -514,14 +513,12 @@ err1:
static void rxe_qp_reset(struct rxe_qp *qp)
{
/* stop tasks from running */
- rxe_disable_task(&qp->resp.task);
- rxe_disable_task(&qp->comp.task);
- rxe_disable_task(&qp->req.task);
+ rxe_disable_task(&qp->recv_task);
+ rxe_disable_task(&qp->send_task);
/* drain work and packet queuesc */
- rxe_requester(qp);
- rxe_completer(qp);
- rxe_responder(qp);
+ rxe_sender(qp);
+ rxe_receiver(qp);
if (qp->rq.queue)
rxe_queue_reset(qp->rq.queue);
@@ -548,9 +545,8 @@ static void rxe_qp_reset(struct rxe_qp *qp)
cleanup_rd_atomic_resources(qp);
/* reenable tasks */
- rxe_enable_task(&qp->resp.task);
- rxe_enable_task(&qp->comp.task);
- rxe_enable_task(&qp->req.task);
+ rxe_enable_task(&qp->recv_task);
+ rxe_enable_task(&qp->send_task);
}
/* move the qp to the error state */
@@ -562,9 +558,8 @@ void rxe_qp_error(struct rxe_qp *qp)
qp->attr.qp_state = IB_QPS_ERR;
/* drain work and packet queues */
- rxe_sched_task(&qp->resp.task);
- rxe_sched_task(&qp->comp.task);
- rxe_sched_task(&qp->req.task);
+ rxe_sched_task(&qp->recv_task);
+ rxe_sched_task(&qp->send_task);
spin_unlock_irqrestore(&qp->state_lock, flags);
}
@@ -575,8 +570,7 @@ static void rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr,
spin_lock_irqsave(&qp->state_lock, flags);
qp->attr.sq_draining = 1;
- rxe_sched_task(&qp->comp.task);
- rxe_sched_task(&qp->req.task);
+ rxe_sched_task(&qp->send_task);
spin_unlock_irqrestore(&qp->state_lock, flags);
}
@@ -821,19 +815,15 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
del_timer_sync(&qp->rnr_nak_timer);
}
- if (qp->resp.task.func)
- rxe_cleanup_task(&qp->resp.task);
+ if (qp->recv_task.func)
+ rxe_cleanup_task(&qp->recv_task);
- if (qp->req.task.func)
- rxe_cleanup_task(&qp->req.task);
-
- if (qp->comp.task.func)
- rxe_cleanup_task(&qp->comp.task);
+ if (qp->send_task.func)
+ rxe_cleanup_task(&qp->send_task);
/* flush out any receive wr's or pending requests */
- rxe_requester(qp);
- rxe_completer(qp);
- rxe_responder(qp);
+ rxe_sender(qp);
+ rxe_receiver(qp);
if (qp->sq.queue)
rxe_queue_cleanup(qp->sq.queue);
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index d8c41fd626a9..cd14c4c2dff9 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -108,7 +108,7 @@ void rnr_nak_timer(struct timer_list *t)
/* request a send queue retry */
qp->req.need_retry = 1;
qp->req.wait_for_rnr_timer = 0;
- rxe_sched_task(&qp->req.task);
+ rxe_sched_task(&qp->send_task);
}
spin_unlock_irqrestore(&qp->state_lock, flags);
}
@@ -545,6 +545,8 @@ static void update_wqe_state(struct rxe_qp *qp,
if (pkt->mask & RXE_END_MASK) {
if (qp_type(qp) == IB_QPT_RC)
wqe->state = wqe_state_pending;
+ else
+ wqe->state = wqe_state_done;
} else {
wqe->state = wqe_state_processing;
}
@@ -573,30 +575,6 @@ static void update_wqe_psn(struct rxe_qp *qp,
qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
}
-static void save_state(struct rxe_send_wqe *wqe,
- struct rxe_qp *qp,
- struct rxe_send_wqe *rollback_wqe,
- u32 *rollback_psn)
-{
- rollback_wqe->state = wqe->state;
- rollback_wqe->first_psn = wqe->first_psn;
- rollback_wqe->last_psn = wqe->last_psn;
- rollback_wqe->dma = wqe->dma;
- *rollback_psn = qp->req.psn;
-}
-
-static void rollback_state(struct rxe_send_wqe *wqe,
- struct rxe_qp *qp,
- struct rxe_send_wqe *rollback_wqe,
- u32 rollback_psn)
-{
- wqe->state = rollback_wqe->state;
- wqe->first_psn = rollback_wqe->first_psn;
- wqe->last_psn = rollback_wqe->last_psn;
- wqe->dma = rollback_wqe->dma;
- qp->req.psn = rollback_psn;
-}
-
static void update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
{
qp->req.opcode = pkt->opcode;
@@ -655,12 +633,6 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
wqe->status = IB_WC_SUCCESS;
qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
- /* There is no ack coming for local work requests
- * which can lead to a deadlock. So go ahead and complete
- * it now.
- */
- rxe_sched_task(&qp->comp.task);
-
return 0;
}
@@ -676,8 +648,6 @@ int rxe_requester(struct rxe_qp *qp)
int opcode;
int err;
int ret;
- struct rxe_send_wqe rollback_wqe;
- u32 rollback_psn;
struct rxe_queue *q = qp->sq.queue;
struct rxe_ah *ah;
struct rxe_av *av;
@@ -786,7 +756,6 @@ int rxe_requester(struct rxe_qp *qp)
qp->req.wqe_index);
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
- rxe_sched_task(&qp->comp.task);
goto done;
}
payload = mtu;
@@ -799,9 +768,6 @@ int rxe_requester(struct rxe_qp *qp)
pkt.mask = rxe_opcode[opcode].mask;
pkt.wqe = wqe;
- /* save wqe state before we build and send packet */
- save_state(wqe, qp, &rollback_wqe, &rollback_psn);
-
av = rxe_get_av(&pkt, &ah);
if (unlikely(!av)) {
rxe_dbg_qp(qp, "Failed no address vector\n");
@@ -834,31 +800,14 @@ int rxe_requester(struct rxe_qp *qp)
if (ah)
rxe_put(ah);
- /* update wqe state as though we had sent it */
- update_wqe_state(qp, wqe, &pkt);
- update_wqe_psn(qp, wqe, &pkt, payload);
-
err = rxe_xmit_packet(qp, &pkt, skb);
if (err) {
- if (err != -EAGAIN) {
- wqe->status = IB_WC_LOC_QP_OP_ERR;
- goto err;
- }
-
- /* the packet was dropped so reset wqe to the state
- * before we sent it so we can try to resend
- */
- rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
-
- /* force a delay until the dropped packet is freed and
- * the send queue is drained below the low water mark
- */
- qp->need_req_skb = 1;
-
- rxe_sched_task(&qp->req.task);
- goto exit;
+ wqe->status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
}
+ update_wqe_state(qp, wqe, &pkt);
+ update_wqe_psn(qp, wqe, &pkt, payload);
update_state(qp, &pkt);
/* A non-zero return value will cause rxe_do_task to
@@ -878,3 +827,20 @@ exit:
out:
return ret;
}
+
+int rxe_sender(struct rxe_qp *qp)
+{
+ int req_ret;
+ int comp_ret;
+
+ /* process the send queue */
+ req_ret = rxe_requester(qp);
+
+ /* process the response queue */
+ comp_ret = rxe_completer(qp);
+
+ /* exit the task loop if both requester and completer
+ * are ready
+ */
+ return (req_ret && comp_ret) ? -EAGAIN : 0;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 963382f625d7..c6a7fa3054fa 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -49,18 +49,8 @@ static char *resp_state_name[] = {
/* rxe_recv calls here to add a request packet to the input queue */
void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
{
- int must_sched;
- struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
-
skb_queue_tail(&qp->req_pkts, skb);
-
- must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
- (skb_queue_len(&qp->req_pkts) > 1);
-
- if (must_sched)
- rxe_sched_task(&qp->resp.task);
- else
- rxe_run_task(&qp->resp.task);
+ rxe_sched_task(&qp->recv_task);
}
static inline enum resp_states get_req(struct rxe_qp *qp,
@@ -1485,7 +1475,7 @@ static void flush_recv_queue(struct rxe_qp *qp, bool notify)
qp->resp.wqe = NULL;
}
-int rxe_responder(struct rxe_qp *qp)
+int rxe_receiver(struct rxe_qp *qp)
{
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
enum resp_states state;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 614581989b38..c7d4d8ab5a09 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -888,6 +888,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp,
{
int err = 0;
unsigned long flags;
+ int good = 0;
spin_lock_irqsave(&qp->sq.sq_lock, flags);
while (ibwr) {
@@ -895,18 +896,16 @@ static int rxe_post_send_kernel(struct rxe_qp *qp,
if (err) {
*bad_wr = ibwr;
break;
+ } else {
+ good++;
}
ibwr = ibwr->next;
}
spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
- if (!err)
- rxe_sched_task(&qp->req.task);
-
- spin_lock_irqsave(&qp->state_lock, flags);
- if (qp_state(qp) == IB_QPS_ERR)
- rxe_sched_task(&qp->comp.task);
- spin_unlock_irqrestore(&qp->state_lock, flags);
+ /* kickoff processing of any posted wqes */
+ if (good)
+ rxe_sched_task(&qp->send_task);
return err;
}
@@ -936,7 +935,7 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
if (qp->is_user) {
/* Utilize process context to do protocol processing */
- rxe_run_task(&qp->req.task);
+ rxe_sched_task(&qp->send_task);
} else {
err = rxe_post_send_kernel(qp, wr, bad_wr);
if (err)
@@ -1046,7 +1045,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
spin_lock_irqsave(&qp->state_lock, flags);
if (qp_state(qp) == IB_QPS_ERR)
- rxe_sched_task(&qp->resp.task);
+ rxe_sched_task(&qp->recv_task);
spin_unlock_irqrestore(&qp->state_lock, flags);
return err;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index ccb9d19ffe8a..3c1354f82283 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -113,7 +113,7 @@ struct rxe_req_info {
int need_retry;
int wait_for_rnr_timer;
int noack_pkts;
- struct rxe_task task;
+ int again;
};
struct rxe_comp_info {
@@ -124,7 +124,6 @@ struct rxe_comp_info {
int started_retry;
u32 retry_cnt;
u32 rnr_retry;
- struct rxe_task task;
};
enum rdatm_res_state {
@@ -196,7 +195,6 @@ struct rxe_resp_info {
unsigned int res_head;
unsigned int res_tail;
struct resp_res *res;
- struct rxe_task task;
};
struct rxe_qp {
@@ -229,6 +227,9 @@ struct rxe_qp {
struct sk_buff_head req_pkts;
struct sk_buff_head resp_pkts;
+ struct rxe_task send_task;
+ struct rxe_task recv_task;
+
struct rxe_req_info req;
struct rxe_comp_info comp;
struct rxe_resp_info resp;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 6f2a688fccbf..4e31bb0b6466 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -238,7 +238,7 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
priv->mcast_mtu);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -265,7 +265,7 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
if (carrier_status)
netif_carrier_on(dev);
} else {
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
}
return ret;
@@ -329,8 +329,7 @@ static struct net_device *ipoib_get_master_net_dev(struct net_device *dev)
rcu_read_lock();
master = netdev_master_upper_dev_get_rcu(dev);
- if (master)
- dev_hold(master);
+ dev_hold(master);
rcu_read_unlock();
if (master)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 4bd161e86f8d..562df2b3ef18 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -184,8 +184,12 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
ppriv = ipoib_priv(pdev);
- snprintf(intf_name, sizeof(intf_name), "%s.%04x",
- ppriv->dev->name, pkey);
+ /* If you increase IFNAMSIZ, update snprintf below
+ * to allow longer names.
+ */
+ BUILD_BUG_ON(IFNAMSIZ != 16);
+ snprintf(intf_name, sizeof(intf_name), "%.10s.%04x", ppriv->dev->name,
+ pkey);
ndev = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
if (IS_ERR(ndev)) {
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index f50848ed5575..6fadaddb2b90 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -208,6 +208,7 @@ static const struct xpad_device {
{ 0x0738, 0xcb29, "Saitek Aviator Stick AV8R02", 0, XTYPE_XBOX360 },
{ 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 },
{ 0x07ff, 0xffff, "Mad Catz GamePad", 0, XTYPE_XBOX360 },
+ { 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", 0, XTYPE_XBOXONE },
{ 0x0c12, 0x0005, "Intec wireless", 0, XTYPE_XBOX },
{ 0x0c12, 0x8801, "Nyko Xbox Controller", 0, XTYPE_XBOX },
{ 0x0c12, 0x8802, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
@@ -487,6 +488,7 @@ static const struct usb_device_id xpad_table[] = {
{ USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */
XPAD_XBOX360_VENDOR(0x07ff), /* Mad Catz Gamepad */
+ XPAD_XBOXONE_VENDOR(0x0b05), /* ASUS controllers */
XPAD_XBOX360_VENDOR(0x0c12), /* Zeroplus X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f Xbox 360 controllers */
XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f Xbox One controllers */
diff --git a/drivers/input/misc/atlas_btns.c b/drivers/input/misc/atlas_btns.c
index 3c9bbd04e143..5b9be2957746 100644
--- a/drivers/input/misc/atlas_btns.c
+++ b/drivers/input/misc/atlas_btns.c
@@ -127,7 +127,6 @@ MODULE_DEVICE_TABLE(acpi, atlas_device_ids);
static struct acpi_driver atlas_acpi_driver = {
.name = ACPI_ATLAS_NAME,
.class = ACPI_ATLAS_CLASS,
- .owner = THIS_MODULE,
.ids = atlas_device_ids,
.ops = {
.add = atlas_acpi_button_add,
diff --git a/drivers/input/mouse/amimouse.c b/drivers/input/mouse/amimouse.c
index cda0c3ff5a28..2fbbaeb76d70 100644
--- a/drivers/input/mouse/amimouse.c
+++ b/drivers/input/mouse/amimouse.c
@@ -132,7 +132,13 @@ static void __exit amimouse_remove(struct platform_device *pdev)
input_unregister_device(dev);
}
-static struct platform_driver amimouse_driver = {
+/*
+ * amimouse_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver amimouse_driver __refdata = {
.remove_new = __exit_p(amimouse_remove),
.driver = {
.name = "amiga-mouse",
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 8fbfa448be4a..496bb7a312d2 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -195,7 +195,6 @@ MODULE_DEVICE_TABLE(amba, amba_kmi_idtable);
static struct amba_driver ambakmi_driver = {
.drv = {
.name = "kmi-pl050",
- .owner = THIS_MODULE,
.pm = pm_sleep_ptr(&amba_kmi_dev_pm_ops),
},
.id_table = amba_kmi_idtable,
diff --git a/drivers/input/serio/i8042-io.h b/drivers/input/serio/i8042-io.h
index 64590b86eb37..a8f4b2d70e59 100644
--- a/drivers/input/serio/i8042-io.h
+++ b/drivers/input/serio/i8042-io.h
@@ -15,10 +15,7 @@
* IRQs.
*/
-#ifdef __alpha__
-# define I8042_KBD_IRQ 1
-# define I8042_AUX_IRQ (RTC_PORT(0) == 0x170 ? 9 : 12) /* Jensen is special */
-#elif defined(__arm__)
+#if defined(__arm__)
/* defined in include/asm-arm/arch-xxx/irqs.h */
#include <asm/irq.h>
#elif defined(CONFIG_PPC)
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 5d1010cafed8..7e9b996b47c8 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -176,6 +176,8 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
path->num_nodes = num_nodes;
+ mutex_lock(&icc_bw_lock);
+
for (i = num_nodes - 1; i >= 0; i--) {
node->provider->users++;
hlist_add_head(&path->reqs[i].req_node, &node->req_list);
@@ -186,6 +188,8 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
node = node->reverse;
}
+ mutex_unlock(&icc_bw_lock);
+
return path;
}
@@ -792,12 +796,16 @@ void icc_put(struct icc_path *path)
pr_err("%s: error (%d)\n", __func__, ret);
mutex_lock(&icc_lock);
+ mutex_lock(&icc_bw_lock);
+
for (i = 0; i < path->num_nodes; i++) {
node = path->reqs[i].node;
hlist_del(&path->reqs[i].req_node);
if (!WARN_ON(!node->provider->users))
node->provider->users--;
}
+
+ mutex_unlock(&icc_bw_lock);
mutex_unlock(&icc_lock);
kfree_const(path->name);
diff --git a/drivers/interconnect/qcom/x1e80100.c b/drivers/interconnect/qcom/x1e80100.c
index 99824675ee3f..654abb9ce08e 100644
--- a/drivers/interconnect/qcom/x1e80100.c
+++ b/drivers/interconnect/qcom/x1e80100.c
@@ -116,15 +116,6 @@ static struct qcom_icc_node xm_sdc2 = {
.links = { X1E80100_SLAVE_A2NOC_SNOC },
};
-static struct qcom_icc_node ddr_perf_mode_master = {
- .name = "ddr_perf_mode_master",
- .id = X1E80100_MASTER_DDR_PERF_MODE,
- .channels = 1,
- .buswidth = 4,
- .num_links = 1,
- .links = { X1E80100_SLAVE_DDR_PERF_MODE },
-};
-
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
.id = X1E80100_MASTER_QUP_CORE_0,
@@ -688,14 +679,6 @@ static struct qcom_icc_node qns_a2noc_snoc = {
.links = { X1E80100_MASTER_A2NOC_SNOC },
};
-static struct qcom_icc_node ddr_perf_mode_slave = {
- .name = "ddr_perf_mode_slave",
- .id = X1E80100_SLAVE_DDR_PERF_MODE,
- .channels = 1,
- .buswidth = 4,
- .num_links = 0,
-};
-
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
.id = X1E80100_SLAVE_QUP_CORE_0,
@@ -1377,12 +1360,6 @@ static struct qcom_icc_bcm bcm_acv = {
.nodes = { &ebi },
};
-static struct qcom_icc_bcm bcm_acv_perf = {
- .name = "ACV_PERF",
- .num_nodes = 1,
- .nodes = { &ddr_perf_mode_slave },
-};
-
static struct qcom_icc_bcm bcm_ce0 = {
.name = "CE0",
.num_nodes = 1,
@@ -1583,18 +1560,15 @@ static const struct qcom_icc_desc x1e80100_aggre2_noc = {
};
static struct qcom_icc_bcm * const clk_virt_bcms[] = {
- &bcm_acv_perf,
&bcm_qup0,
&bcm_qup1,
&bcm_qup2,
};
static struct qcom_icc_node * const clk_virt_nodes[] = {
- [MASTER_DDR_PERF_MODE] = &ddr_perf_mode_master,
[MASTER_QUP_CORE_0] = &qup0_core_master,
[MASTER_QUP_CORE_1] = &qup1_core_master,
[MASTER_QUP_CORE_2] = &qup2_core_master,
- [SLAVE_DDR_PERF_MODE] = &ddr_perf_mode_slave,
[SLAVE_QUP_CORE_0] = &qup0_core_slave,
[SLAVE_QUP_CORE_1] = &qup1_core_slave,
[SLAVE_QUP_CORE_2] = &qup2_core_slave,
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 0af39bbbe3a3..c04584be3089 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -376,13 +376,17 @@ config ARM_SMMU_QCOM
config ARM_SMMU_QCOM_DEBUG
bool "ARM SMMU QCOM implementation defined debug support"
- depends on ARM_SMMU_QCOM
+ depends on ARM_SMMU_QCOM=y
help
Support for implementation specific debug features in ARM SMMU
- hardware found in QTI platforms.
+ hardware found in QTI platforms. This include support for
+ the Translation Buffer Units (TBU) that can be used to obtain
+ additional information when debugging memory management issues
+ like context faults.
- Say Y here to enable debug for issues such as TLB sync timeouts
- which requires implementation defined register dumps.
+ Say Y here to enable debug for issues such as context faults
+ or TLB sync timeouts which requires implementation defined
+ register dumps.
config ARM_SMMU_V3
tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
@@ -397,9 +401,9 @@ config ARM_SMMU_V3
Say Y here if your system includes an IOMMU device implementing
the ARM SMMUv3 architecture.
+if ARM_SMMU_V3
config ARM_SMMU_V3_SVA
bool "Shared Virtual Addressing support for the ARM SMMUv3"
- depends on ARM_SMMU_V3
select IOMMU_SVA
select IOMMU_IOPF
select MMU_NOTIFIER
@@ -410,6 +414,17 @@ config ARM_SMMU_V3_SVA
Say Y here if your system supports SVA extensions such as PCIe PASID
and PRI.
+config ARM_SMMU_V3_KUNIT_TEST
+ tristate "KUnit tests for arm-smmu-v3 driver" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ depends on ARM_SMMU_V3_SVA
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to unit-test arm-smmu-v3 driver functions.
+
+ If unsure, say N.
+endif
+
config S390_IOMMU
def_bool y if S390 && PCI
depends on S390 && PCI
diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig
index 443b2c13c37b..994063e5586f 100644
--- a/drivers/iommu/amd/Kconfig
+++ b/drivers/iommu/amd/Kconfig
@@ -7,9 +7,12 @@ config AMD_IOMMU
select PCI_ATS
select PCI_PRI
select PCI_PASID
+ select MMU_NOTIFIER
select IOMMU_API
select IOMMU_IOVA
select IOMMU_IO_PGTABLE
+ select IOMMU_SVA
+ select IOMMU_IOPF
select IOMMUFD_DRIVER if IOMMUFD
depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE
help
diff --git a/drivers/iommu/amd/Makefile b/drivers/iommu/amd/Makefile
index f454fbb1569e..9de33b2d42f5 100644
--- a/drivers/iommu/amd/Makefile
+++ b/drivers/iommu/amd/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o
+obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o ppr.o pasid.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index f482aab420f7..2fde1302a584 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -17,10 +17,16 @@ irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data);
irqreturn_t amd_iommu_int_thread_galog(int irq, void *data);
irqreturn_t amd_iommu_int_handler(int irq, void *data);
void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid);
+void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type,
+ u8 cntrl_intr, u8 cntrl_log,
+ u32 status_run_mask, u32 status_overflow_mask);
void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
void amd_iommu_restart_ga_log(struct amd_iommu *iommu);
void amd_iommu_restart_ppr_log(struct amd_iommu *iommu);
void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
+void iommu_feature_enable(struct amd_iommu *iommu, u8 bit);
+void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
+ gfp_t gfp, size_t size);
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
@@ -33,22 +39,47 @@ int amd_iommu_prepare(void);
int amd_iommu_enable(void);
void amd_iommu_disable(void);
int amd_iommu_reenable(int mode);
-int amd_iommu_enable_faulting(void);
+int amd_iommu_enable_faulting(unsigned int cpu);
extern int amd_iommu_guest_ir;
extern enum io_pgtable_fmt amd_iommu_pgtable;
extern int amd_iommu_gpt_level;
-bool amd_iommu_v2_supported(void);
-
-/* Device capabilities */
-int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev);
-void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev);
+/* Protection domain ops */
+struct protection_domain *protection_domain_alloc(unsigned int type);
+void protection_domain_free(struct protection_domain *domain);
+struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
+ struct mm_struct *mm);
+void amd_iommu_domain_free(struct iommu_domain *dom);
+int iommu_sva_set_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid);
+void amd_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
+ struct iommu_domain *domain);
+
+/* SVA/PASID */
+bool amd_iommu_pasid_supported(void);
+
+/* IOPF */
+int amd_iommu_iopf_init(struct amd_iommu *iommu);
+void amd_iommu_iopf_uninit(struct amd_iommu *iommu);
+void amd_iommu_page_response(struct device *dev, struct iopf_fault *evt,
+ struct iommu_page_response *resp);
+int amd_iommu_iopf_add_device(struct amd_iommu *iommu,
+ struct iommu_dev_data *dev_data);
+void amd_iommu_iopf_remove_device(struct amd_iommu *iommu,
+ struct iommu_dev_data *dev_data);
/* GCR3 setup */
int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data,
ioasid_t pasid, unsigned long gcr3);
int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid);
+/* PPR */
+int __init amd_iommu_alloc_ppr_log(struct amd_iommu *iommu);
+void __init amd_iommu_free_ppr_log(struct amd_iommu *iommu);
+void amd_iommu_enable_ppr_log(struct amd_iommu *iommu);
+void amd_iommu_poll_ppr_log(struct amd_iommu *iommu);
+int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag);
+
/*
* This function flushes all internal caches of
* the IOMMU used by this driver.
@@ -56,6 +87,7 @@ int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid);
void amd_iommu_flush_all_caches(struct amd_iommu *iommu);
void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
void amd_iommu_domain_update(struct protection_domain *domain);
+void amd_iommu_dev_update_dte(struct iommu_dev_data *dev_data, bool set);
void amd_iommu_domain_flush_complete(struct protection_domain *domain);
void amd_iommu_domain_flush_pages(struct protection_domain *domain,
u64 address, size_t size);
@@ -73,9 +105,6 @@ static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
}
#endif
-int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
- int status, int tag);
-
static inline bool is_rd890_iommu(struct pci_dev *pdev)
{
return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
@@ -134,14 +163,6 @@ static inline int get_pci_sbdf_id(struct pci_dev *pdev)
return PCI_SEG_DEVID_TO_SBDF(seg, devid);
}
-static inline void *alloc_pgtable_page(int nid, gfp_t gfp)
-{
- struct page *page;
-
- page = alloc_pages_node(nid, gfp | __GFP_ZERO, 0);
- return page ? page_address(page) : NULL;
-}
-
/*
* This must be called after device probe completes. During probe
* use rlookup_amd_iommu() get the iommu.
@@ -157,6 +178,11 @@ static inline struct amd_iommu *get_amd_iommu_from_dev_data(struct iommu_dev_dat
return iommu_get_iommu_dev(dev_data->dev, struct amd_iommu, iommu);
}
+static inline struct protection_domain *to_pdomain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct protection_domain, domain);
+}
+
bool translation_pre_enabled(struct amd_iommu *iommu);
bool amd_iommu_is_attach_deferred(struct device *dev);
int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line);
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index d1fed5fc219b..2b76b5dedc1d 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -8,7 +8,9 @@
#ifndef _ASM_X86_AMD_IOMMU_TYPES_H
#define _ASM_X86_AMD_IOMMU_TYPES_H
+#include <linux/iommu.h>
#include <linux/types.h>
+#include <linux/mmu_notifier.h>
#include <linux/mutex.h>
#include <linux/msi.h>
#include <linux/list.h>
@@ -251,6 +253,14 @@
#define PPR_ENTRY_SIZE 16
#define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES)
+/* PAGE_SERVICE_REQUEST PPR Log Buffer Entry flags */
+#define PPR_FLAG_EXEC 0x002 /* Execute permission requested */
+#define PPR_FLAG_READ 0x004 /* Read permission requested */
+#define PPR_FLAG_WRITE 0x020 /* Write permission requested */
+#define PPR_FLAG_US 0x040 /* 1: User, 0: Supervisor */
+#define PPR_FLAG_RVSD 0x080 /* Reserved bit not zero */
+#define PPR_FLAG_GN 0x100 /* GVA and PASID is valid */
+
#define PPR_REQ_TYPE(x) (((x) >> 60) & 0xfULL)
#define PPR_FLAGS(x) (((x) >> 48) & 0xfffULL)
#define PPR_DEVID(x) ((x) & 0xffffULL)
@@ -503,6 +513,11 @@ extern struct kmem_cache *amd_iommu_irq_cache;
list_for_each_entry((iommu), &amd_iommu_list, list)
#define for_each_iommu_safe(iommu, next) \
list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list)
+/* Making iterating over protection_domain->dev_data_list easier */
+#define for_each_pdom_dev_data(pdom_dev_data, pdom) \
+ list_for_each_entry(pdom_dev_data, &pdom->dev_data_list, list)
+#define for_each_pdom_dev_data_safe(pdom_dev_data, next, pdom) \
+ list_for_each_entry_safe((pdom_dev_data), (next), &pdom->dev_data_list, list)
struct amd_iommu;
struct iommu_domain;
@@ -544,6 +559,16 @@ enum protection_domain_mode {
PD_MODE_V2,
};
+/* Track dev_data/PASID list for the protection domain */
+struct pdom_dev_data {
+ /* Points to attached device data */
+ struct iommu_dev_data *dev_data;
+ /* PASID attached to the protection domain */
+ ioasid_t pasid;
+ /* For protection_domain->dev_data_list */
+ struct list_head list;
+};
+
/*
* This structure contains generic data for IOMMU protection domains
* independent of their use.
@@ -560,6 +585,9 @@ struct protection_domain {
bool dirty_tracking; /* dirty tracking is enabled in the domain */
unsigned dev_cnt; /* devices assigned to this domain */
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
+
+ struct mmu_notifier mn; /* mmu notifier for the SVA domain */
+ struct list_head dev_data_list; /* List of pdom_dev_data */
};
/*
@@ -762,6 +790,10 @@ struct amd_iommu {
/* DebugFS Info */
struct dentry *debugfs;
#endif
+
+ /* IOPF support */
+ struct iopf_queue *iopf_queue;
+ unsigned char iopfq_name[32];
};
static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
@@ -813,6 +845,7 @@ struct iommu_dev_data {
struct device *dev;
u16 devid; /* PCI Device ID */
+ u32 max_pasids; /* Max supported PASIDs */
u32 flags; /* Holds AMD_IOMMU_DEVICE_FLAG_<*> */
int ats_qdep;
u8 ats_enabled :1; /* ATS state */
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index e7a44929f0da..a18e74878f68 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -36,6 +36,7 @@
#include "amd_iommu.h"
#include "../irq_remapping.h"
+#include "../iommu-pages.h"
/*
* definitions for the ACPI scanning code
@@ -419,7 +420,7 @@ static void iommu_set_device_table(struct amd_iommu *iommu)
}
/* Generic functions to enable/disable certain features of the IOMMU. */
-static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
+void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
{
u64 ctrl;
@@ -649,8 +650,8 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_
/* Allocate per PCI segment device table */
static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
{
- pci_seg->dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
- get_order(pci_seg->dev_table_size));
+ pci_seg->dev_table = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
+ get_order(pci_seg->dev_table_size));
if (!pci_seg->dev_table)
return -ENOMEM;
@@ -659,17 +660,16 @@ static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
{
- free_pages((unsigned long)pci_seg->dev_table,
- get_order(pci_seg->dev_table_size));
+ iommu_free_pages(pci_seg->dev_table,
+ get_order(pci_seg->dev_table_size));
pci_seg->dev_table = NULL;
}
/* Allocate per PCI segment IOMMU rlookup table. */
static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
{
- pci_seg->rlookup_table = (void *)__get_free_pages(
- GFP_KERNEL | __GFP_ZERO,
- get_order(pci_seg->rlookup_table_size));
+ pci_seg->rlookup_table = iommu_alloc_pages(GFP_KERNEL,
+ get_order(pci_seg->rlookup_table_size));
if (pci_seg->rlookup_table == NULL)
return -ENOMEM;
@@ -678,16 +678,15 @@ static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
{
- free_pages((unsigned long)pci_seg->rlookup_table,
- get_order(pci_seg->rlookup_table_size));
+ iommu_free_pages(pci_seg->rlookup_table,
+ get_order(pci_seg->rlookup_table_size));
pci_seg->rlookup_table = NULL;
}
static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
{
- pci_seg->irq_lookup_table = (void *)__get_free_pages(
- GFP_KERNEL | __GFP_ZERO,
- get_order(pci_seg->rlookup_table_size));
+ pci_seg->irq_lookup_table = iommu_alloc_pages(GFP_KERNEL,
+ get_order(pci_seg->rlookup_table_size));
kmemleak_alloc(pci_seg->irq_lookup_table,
pci_seg->rlookup_table_size, 1, GFP_KERNEL);
if (pci_seg->irq_lookup_table == NULL)
@@ -699,8 +698,8 @@ static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_se
static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
{
kmemleak_free(pci_seg->irq_lookup_table);
- free_pages((unsigned long)pci_seg->irq_lookup_table,
- get_order(pci_seg->rlookup_table_size));
+ iommu_free_pages(pci_seg->irq_lookup_table,
+ get_order(pci_seg->rlookup_table_size));
pci_seg->irq_lookup_table = NULL;
}
@@ -708,8 +707,8 @@ static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
{
int i;
- pci_seg->alias_table = (void *)__get_free_pages(GFP_KERNEL,
- get_order(pci_seg->alias_table_size));
+ pci_seg->alias_table = iommu_alloc_pages(GFP_KERNEL,
+ get_order(pci_seg->alias_table_size));
if (!pci_seg->alias_table)
return -ENOMEM;
@@ -724,8 +723,8 @@ static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
{
- free_pages((unsigned long)pci_seg->alias_table,
- get_order(pci_seg->alias_table_size));
+ iommu_free_pages(pci_seg->alias_table,
+ get_order(pci_seg->alias_table_size));
pci_seg->alias_table = NULL;
}
@@ -736,8 +735,8 @@ static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
*/
static int __init alloc_command_buffer(struct amd_iommu *iommu)
{
- iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(CMD_BUFFER_SIZE));
+ iommu->cmd_buf = iommu_alloc_pages(GFP_KERNEL,
+ get_order(CMD_BUFFER_SIZE));
return iommu->cmd_buf ? 0 : -ENOMEM;
}
@@ -746,9 +745,9 @@ static int __init alloc_command_buffer(struct amd_iommu *iommu)
* Interrupt handler has processed all pending events and adjusted head
* and tail pointer. Reset overflow mask and restart logging again.
*/
-static void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type,
- u8 cntrl_intr, u8 cntrl_log,
- u32 status_run_mask, u32 status_overflow_mask)
+void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type,
+ u8 cntrl_intr, u8 cntrl_log,
+ u32 status_run_mask, u32 status_overflow_mask)
{
u32 status;
@@ -790,17 +789,6 @@ void amd_iommu_restart_ga_log(struct amd_iommu *iommu)
}
/*
- * This function restarts ppr logging in case the IOMMU experienced
- * PPR log overflow.
- */
-void amd_iommu_restart_ppr_log(struct amd_iommu *iommu)
-{
- amd_iommu_restart_log(iommu, "PPR", CONTROL_PPRINT_EN,
- CONTROL_PPRLOG_EN, MMIO_STATUS_PPR_RUN_MASK,
- MMIO_STATUS_PPR_OVERFLOW_MASK);
-}
-
-/*
* This function resets the command buffer if the IOMMU stopped fetching
* commands from it.
*/
@@ -845,19 +833,19 @@ static void iommu_disable_command_buffer(struct amd_iommu *iommu)
static void __init free_command_buffer(struct amd_iommu *iommu)
{
- free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
+ iommu_free_pages(iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
}
-static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
- gfp_t gfp, size_t size)
+void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp,
+ size_t size)
{
int order = get_order(size);
- void *buf = (void *)__get_free_pages(gfp, order);
+ void *buf = iommu_alloc_pages(gfp, order);
if (buf &&
check_feature(FEATURE_SNP) &&
set_memory_4k((unsigned long)buf, (1 << order))) {
- free_pages((unsigned long)buf, order);
+ iommu_free_pages(buf, order);
buf = NULL;
}
@@ -867,7 +855,7 @@ static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
/* allocates the memory where the IOMMU will log its events to */
static int __init alloc_event_buffer(struct amd_iommu *iommu)
{
- iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
+ iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL,
EVT_BUFFER_SIZE);
return iommu->evt_buf ? 0 : -ENOMEM;
@@ -901,50 +889,14 @@ static void iommu_disable_event_buffer(struct amd_iommu *iommu)
static void __init free_event_buffer(struct amd_iommu *iommu)
{
- free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
-}
-
-/* allocates the memory where the IOMMU will log its events to */
-static int __init alloc_ppr_log(struct amd_iommu *iommu)
-{
- iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
- PPR_LOG_SIZE);
-
- return iommu->ppr_log ? 0 : -ENOMEM;
-}
-
-static void iommu_enable_ppr_log(struct amd_iommu *iommu)
-{
- u64 entry;
-
- if (iommu->ppr_log == NULL)
- return;
-
- iommu_feature_enable(iommu, CONTROL_PPR_EN);
-
- entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
-
- memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
- &entry, sizeof(entry));
-
- /* set head and tail to zero manually */
- writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
- writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
-
- iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
- iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
-}
-
-static void __init free_ppr_log(struct amd_iommu *iommu)
-{
- free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
+ iommu_free_pages(iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
}
static void free_ga_log(struct amd_iommu *iommu)
{
#ifdef CONFIG_IRQ_REMAP
- free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE));
- free_pages((unsigned long)iommu->ga_log_tail, get_order(8));
+ iommu_free_pages(iommu->ga_log, get_order(GA_LOG_SIZE));
+ iommu_free_pages(iommu->ga_log_tail, get_order(8));
#endif
}
@@ -989,13 +941,11 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
return 0;
- iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(GA_LOG_SIZE));
+ iommu->ga_log = iommu_alloc_pages(GFP_KERNEL, get_order(GA_LOG_SIZE));
if (!iommu->ga_log)
goto err_out;
- iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(8));
+ iommu->ga_log_tail = iommu_alloc_pages(GFP_KERNEL, get_order(8));
if (!iommu->ga_log_tail)
goto err_out;
@@ -1008,7 +958,7 @@ err_out:
static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
{
- iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1);
+ iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL, 1);
return iommu->cmd_sem ? 0 : -ENOMEM;
}
@@ -1016,7 +966,7 @@ static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
static void __init free_cwwb_sem(struct amd_iommu *iommu)
{
if (iommu->cmd_sem)
- free_page((unsigned long)iommu->cmd_sem);
+ iommu_free_page((void *)iommu->cmd_sem);
}
static void iommu_enable_xt(struct amd_iommu *iommu)
@@ -1081,7 +1031,6 @@ static bool __copy_device_table(struct amd_iommu *iommu)
u32 lo, hi, devid, old_devtb_size;
phys_addr_t old_devtb_phys;
u16 dom_id, dte_v, irq_v;
- gfp_t gfp_flag;
u64 tmp;
/* Each IOMMU use separate device table with the same size */
@@ -1115,9 +1064,8 @@ static bool __copy_device_table(struct amd_iommu *iommu)
if (!old_devtb)
return false;
- gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
- pci_seg->old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
- get_order(pci_seg->dev_table_size));
+ pci_seg->old_dev_tbl_cpy = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
+ get_order(pci_seg->dev_table_size));
if (pci_seg->old_dev_tbl_cpy == NULL) {
pr_err("Failed to allocate memory for copying old device table!\n");
memunmap(old_devtb);
@@ -1683,9 +1631,10 @@ static void __init free_iommu_one(struct amd_iommu *iommu)
free_cwwb_sem(iommu);
free_command_buffer(iommu);
free_event_buffer(iommu);
- free_ppr_log(iommu);
+ amd_iommu_free_ppr_log(iommu);
free_ga_log(iommu);
iommu_unmap_mmio_space(iommu);
+ amd_iommu_iopf_uninit(iommu);
}
static void __init free_iommu_all(void)
@@ -2097,9 +2046,11 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
amd_iommu_max_glx_val = glxval;
else
amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
+
+ iommu_enable_gt(iommu);
}
- if (check_feature(FEATURE_PPR) && alloc_ppr_log(iommu))
+ if (check_feature(FEATURE_PPR) && amd_iommu_alloc_ppr_log(iommu))
return -ENOMEM;
if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) {
@@ -2155,6 +2106,16 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
if (ret)
return ret;
+ /*
+ * Allocate per IOMMU IOPF queue here so that in attach device path,
+ * PRI capable device can be added to IOPF queue
+ */
+ if (amd_iommu_gt_ppr_supported()) {
+ ret = amd_iommu_iopf_init(iommu);
+ if (ret)
+ return ret;
+ }
+
iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL);
return pci_enable_device(iommu->dev);
@@ -2773,7 +2734,6 @@ static void early_enable_iommu(struct amd_iommu *iommu)
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);
iommu_set_exclusion_range(iommu);
- iommu_enable_gt(iommu);
iommu_enable_ga(iommu);
iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu);
@@ -2805,8 +2765,8 @@ static void early_enable_iommus(void)
for_each_pci_segment(pci_seg) {
if (pci_seg->old_dev_tbl_cpy != NULL) {
- free_pages((unsigned long)pci_seg->old_dev_tbl_cpy,
- get_order(pci_seg->dev_table_size));
+ iommu_free_pages(pci_seg->old_dev_tbl_cpy,
+ get_order(pci_seg->dev_table_size));
pci_seg->old_dev_tbl_cpy = NULL;
}
}
@@ -2819,8 +2779,8 @@ static void early_enable_iommus(void)
pr_info("Copied DEV table from previous kernel.\n");
for_each_pci_segment(pci_seg) {
- free_pages((unsigned long)pci_seg->dev_table,
- get_order(pci_seg->dev_table_size));
+ iommu_free_pages(pci_seg->dev_table,
+ get_order(pci_seg->dev_table_size));
pci_seg->dev_table = pci_seg->old_dev_tbl_cpy;
}
@@ -2830,7 +2790,6 @@ static void early_enable_iommus(void)
iommu_disable_irtcachedis(iommu);
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);
- iommu_enable_gt(iommu);
iommu_enable_ga(iommu);
iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu);
@@ -2840,12 +2799,15 @@ static void early_enable_iommus(void)
}
}
-static void enable_iommus_v2(void)
+static void enable_iommus_ppr(void)
{
struct amd_iommu *iommu;
+ if (!amd_iommu_gt_ppr_supported())
+ return;
+
for_each_iommu(iommu)
- iommu_enable_ppr_log(iommu);
+ amd_iommu_enable_ppr_log(iommu);
}
static void enable_iommus_vapic(void)
@@ -3022,8 +2984,8 @@ static bool __init check_ioapic_information(void)
static void __init free_dma_resources(void)
{
- free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
- get_order(MAX_DOMAIN_ID/8));
+ iommu_free_pages(amd_iommu_pd_alloc_bitmap,
+ get_order(MAX_DOMAIN_ID / 8));
amd_iommu_pd_alloc_bitmap = NULL;
free_unity_maps();
@@ -3095,9 +3057,8 @@ static int __init early_amd_iommu_init(void)
/* Device table - directly used by all IOMMUs */
ret = -ENOMEM;
- amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
- GFP_KERNEL | __GFP_ZERO,
- get_order(MAX_DOMAIN_ID/8));
+ amd_iommu_pd_alloc_bitmap = iommu_alloc_pages(GFP_KERNEL,
+ get_order(MAX_DOMAIN_ID / 8));
if (amd_iommu_pd_alloc_bitmap == NULL)
goto out;
@@ -3181,7 +3142,7 @@ static int amd_iommu_enable_interrupts(void)
* PPR and GA log interrupt for all IOMMUs.
*/
enable_iommus_vapic();
- enable_iommus_v2();
+ enable_iommus_ppr();
out:
return ret;
@@ -3228,30 +3189,33 @@ out:
static void iommu_snp_enable(void)
{
#ifdef CONFIG_KVM_AMD_SEV
- if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return;
/*
* The SNP support requires that IOMMU must be enabled, and is
- * not configured in the passthrough mode.
+ * configured with V1 page table (DTE[Mode] = 0 is not supported).
*/
if (no_iommu || iommu_default_passthrough()) {
- pr_err("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n");
- return;
+ pr_warn("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n");
+ goto disable_snp;
+ }
+
+ if (amd_iommu_pgtable != AMD_IOMMU_V1) {
+ pr_warn("SNP: IOMMU is configured with V2 page table mode, SNP cannot be supported.\n");
+ goto disable_snp;
}
amd_iommu_snp_en = check_feature(FEATURE_SNP);
if (!amd_iommu_snp_en) {
- pr_err("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n");
- return;
+ pr_warn("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n");
+ goto disable_snp;
}
pr_info("IOMMU SNP support enabled.\n");
+ return;
- /* Enforce IOMMU v1 pagetable when SNP is enabled. */
- if (amd_iommu_pgtable != AMD_IOMMU_V1) {
- pr_warn("Forcing use of AMD IOMMU v1 page table due to SNP.\n");
- amd_iommu_pgtable = AMD_IOMMU_V1;
- }
+disable_snp:
+ cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
#endif
}
@@ -3389,7 +3353,7 @@ int amd_iommu_reenable(int mode)
return 0;
}
-int __init amd_iommu_enable_faulting(void)
+int __init amd_iommu_enable_faulting(unsigned int cpu)
{
/* We enable MSI later when PCI is initialized */
return 0;
@@ -3687,7 +3651,7 @@ __setup("ivrs_ioapic", parse_ivrs_ioapic);
__setup("ivrs_hpet", parse_ivrs_hpet);
__setup("ivrs_acpihid", parse_ivrs_acpihid);
-bool amd_iommu_v2_supported(void)
+bool amd_iommu_pasid_supported(void)
{
/* CPU page table size should match IOMMU guest page table size */
if (cpu_feature_enabled(X86_FEATURE_LA57) &&
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 2a0d1e97e52f..9d9a7fde59e7 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -22,6 +22,7 @@
#include "amd_iommu_types.h"
#include "amd_iommu.h"
+#include "../iommu-pages.h"
static void v1_tlb_flush_all(void *cookie)
{
@@ -156,7 +157,7 @@ static bool increase_address_space(struct protection_domain *domain,
bool ret = true;
u64 *pte;
- pte = alloc_pgtable_page(domain->nid, gfp);
+ pte = iommu_alloc_page_node(domain->nid, gfp);
if (!pte)
return false;
@@ -187,7 +188,7 @@ static bool increase_address_space(struct protection_domain *domain,
out:
spin_unlock_irqrestore(&domain->lock, flags);
- free_page((unsigned long)pte);
+ iommu_free_page(pte);
return ret;
}
@@ -250,7 +251,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
if (!IOMMU_PTE_PRESENT(__pte) ||
pte_level == PAGE_MODE_NONE) {
- page = alloc_pgtable_page(domain->nid, gfp);
+ page = iommu_alloc_page_node(domain->nid, gfp);
if (!page)
return NULL;
@@ -259,7 +260,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
/* pte could have been changed somewhere. */
if (!try_cmpxchg64(pte, &__pte, __npte))
- free_page((unsigned long)page);
+ iommu_free_page(page);
else if (IOMMU_PTE_PRESENT(__pte))
*updated = true;
@@ -431,7 +432,7 @@ out:
}
/* Everything flushed out, free pages now */
- put_pages_list(&freelist);
+ iommu_put_pages_list(&freelist);
return ret;
}
@@ -580,7 +581,7 @@ static void v1_free_pgtable(struct io_pgtable *iop)
/* Make changes visible to IOMMUs */
amd_iommu_domain_update(dom);
- put_pages_list(&freelist);
+ iommu_put_pages_list(&freelist);
}
static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
index 93489d2db4e8..78ac37c5ccc1 100644
--- a/drivers/iommu/amd/io_pgtable_v2.c
+++ b/drivers/iommu/amd/io_pgtable_v2.c
@@ -18,6 +18,7 @@
#include "amd_iommu_types.h"
#include "amd_iommu.h"
+#include "../iommu-pages.h"
#define IOMMU_PAGE_PRESENT BIT_ULL(0) /* Is present */
#define IOMMU_PAGE_RW BIT_ULL(1) /* Writeable */
@@ -99,11 +100,6 @@ static inline int page_size_to_level(u64 pg_size)
return PAGE_MODE_1_LEVEL;
}
-static inline void free_pgtable_page(u64 *pt)
-{
- free_page((unsigned long)pt);
-}
-
static void free_pgtable(u64 *pt, int level)
{
u64 *p;
@@ -125,10 +121,10 @@ static void free_pgtable(u64 *pt, int level)
if (level > 2)
free_pgtable(p, level - 1);
else
- free_pgtable_page(p);
+ iommu_free_page(p);
}
- free_pgtable_page(pt);
+ iommu_free_page(pt);
}
/* Allocate page table */
@@ -156,14 +152,14 @@ static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova,
}
if (!IOMMU_PTE_PRESENT(__pte)) {
- page = alloc_pgtable_page(nid, gfp);
+ page = iommu_alloc_page_node(nid, gfp);
if (!page)
return NULL;
__npte = set_pgtable_attr(page);
/* pte could have been changed somewhere. */
if (cmpxchg64(pte, __pte, __npte) != __pte)
- free_pgtable_page(page);
+ iommu_free_page(page);
else if (IOMMU_PTE_PRESENT(__pte))
*updated = true;
@@ -185,7 +181,7 @@ static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova,
if (pg_size == IOMMU_PAGE_SIZE_1G)
free_pgtable(__pte, end_level - 1);
else if (pg_size == IOMMU_PAGE_SIZE_2M)
- free_pgtable_page(__pte);
+ iommu_free_page(__pte);
}
return pte;
@@ -366,7 +362,7 @@ static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
struct protection_domain *pdom = (struct protection_domain *)cookie;
int ias = IOMMU_IN_ADDR_BIT_SIZE;
- pgtable->pgd = alloc_pgtable_page(pdom->nid, GFP_ATOMIC);
+ pgtable->pgd = iommu_alloc_page_node(pdom->nid, GFP_ATOMIC);
if (!pgtable->pgd)
return NULL;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index d35c1b8c8e65..52d83730a22a 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -42,6 +42,7 @@
#include "amd_iommu.h"
#include "../dma-iommu.h"
#include "../irq_remapping.h"
+#include "../iommu-pages.h"
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
@@ -89,6 +90,21 @@ static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom)
return (pdom && (pdom->pd_mode == PD_MODE_V2));
}
+static inline bool pdom_is_in_pt_mode(struct protection_domain *pdom)
+{
+ return (pdom->domain.type == IOMMU_DOMAIN_IDENTITY);
+}
+
+/*
+ * We cannot support PASID w/ existing v1 page table in the same domain
+ * since it will be nested. However, existing domain w/ v2 page table
+ * or passthrough mode can be used for PASID.
+ */
+static inline bool pdom_is_sva_capable(struct protection_domain *pdom)
+{
+ return pdom_is_v2_pgtbl_mode(pdom) || pdom_is_in_pt_mode(pdom);
+}
+
static inline int get_acpihid_device_id(struct device *dev,
struct acpihid_map_entry **entry)
{
@@ -179,11 +195,6 @@ static struct amd_iommu *rlookup_amd_iommu(struct device *dev)
return __rlookup_amd_iommu(seg, PCI_SBDF_TO_DEVID(devid));
}
-static struct protection_domain *to_pdomain(struct iommu_domain *dom)
-{
- return container_of(dom, struct protection_domain, domain);
-}
-
static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -384,7 +395,7 @@ static inline void pdev_disable_cap_ats(struct pci_dev *pdev)
}
}
-int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev)
+static inline int pdev_enable_cap_pri(struct pci_dev *pdev)
{
struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
int ret = -EINVAL;
@@ -392,6 +403,9 @@ int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev)
if (dev_data->pri_enabled)
return 0;
+ if (!dev_data->ats_enabled)
+ return 0;
+
if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) {
/*
* First reset the PRI state of the device.
@@ -408,7 +422,7 @@ int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev)
return ret;
}
-void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev)
+static inline void pdev_disable_cap_pri(struct pci_dev *pdev)
{
struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
@@ -450,15 +464,14 @@ static void pdev_enable_caps(struct pci_dev *pdev)
{
pdev_enable_cap_ats(pdev);
pdev_enable_cap_pasid(pdev);
- amd_iommu_pdev_enable_cap_pri(pdev);
-
+ pdev_enable_cap_pri(pdev);
}
static void pdev_disable_caps(struct pci_dev *pdev)
{
pdev_disable_cap_ats(pdev);
pdev_disable_cap_pasid(pdev);
- amd_iommu_pdev_disable_cap_pri(pdev);
+ pdev_disable_cap_pri(pdev);
}
/*
@@ -818,59 +831,6 @@ static void iommu_poll_events(struct amd_iommu *iommu)
writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
}
-static void iommu_poll_ppr_log(struct amd_iommu *iommu)
-{
- u32 head, tail;
-
- if (iommu->ppr_log == NULL)
- return;
-
- head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
- tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
-
- while (head != tail) {
- volatile u64 *raw;
- u64 entry[2];
- int i;
-
- raw = (u64 *)(iommu->ppr_log + head);
-
- /*
- * Hardware bug: Interrupt may arrive before the entry is
- * written to memory. If this happens we need to wait for the
- * entry to arrive.
- */
- for (i = 0; i < LOOP_TIMEOUT; ++i) {
- if (PPR_REQ_TYPE(raw[0]) != 0)
- break;
- udelay(1);
- }
-
- /* Avoid memcpy function-call overhead */
- entry[0] = raw[0];
- entry[1] = raw[1];
-
- /*
- * To detect the hardware errata 733 we need to clear the
- * entry back to zero. This issue does not exist on SNP
- * enabled system. Also this buffer is not writeable on
- * SNP enabled system.
- */
- if (!amd_iommu_snp_en)
- raw[0] = raw[1] = 0UL;
-
- /* Update head pointer of hardware ring-buffer */
- head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
- writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
-
- /* TODO: PPR Handler will be added when we add IOPF support */
-
- /* Refresh ring-buffer information */
- head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
- tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
- }
-}
-
#ifdef CONFIG_IRQ_REMAP
static int (*iommu_ga_log_notifier)(u32);
@@ -991,7 +951,7 @@ irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data)
{
amd_iommu_handle_irq(data, "PPR", MMIO_STATUS_PPR_INT_MASK,
MMIO_STATUS_PPR_OVERFLOW_MASK,
- iommu_poll_ppr_log, amd_iommu_restart_ppr_log);
+ amd_iommu_poll_ppr_log, amd_iommu_restart_ppr_log);
return IRQ_HANDLED;
}
@@ -1664,15 +1624,14 @@ void amd_iommu_domain_update(struct protection_domain *domain)
amd_iommu_domain_flush_all(domain);
}
-int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
- int status, int tag)
+int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag)
{
struct iommu_dev_data *dev_data;
struct amd_iommu *iommu;
struct iommu_cmd cmd;
- dev_data = dev_iommu_priv_get(&pdev->dev);
- iommu = get_amd_iommu_from_dev(&pdev->dev);
+ dev_data = dev_iommu_priv_get(dev);
+ iommu = get_amd_iommu_from_dev(dev);
build_complete_ppr(&cmd, dev_data->devid, pasid, status,
tag, dev_data->pri_tlp);
@@ -1692,26 +1651,29 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
static u16 domain_id_alloc(void)
{
+ unsigned long flags;
int id;
- spin_lock(&pd_bitmap_lock);
+ spin_lock_irqsave(&pd_bitmap_lock, flags);
id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
BUG_ON(id == 0);
if (id > 0 && id < MAX_DOMAIN_ID)
__set_bit(id, amd_iommu_pd_alloc_bitmap);
else
id = 0;
- spin_unlock(&pd_bitmap_lock);
+ spin_unlock_irqrestore(&pd_bitmap_lock, flags);
return id;
}
static void domain_id_free(int id)
{
- spin_lock(&pd_bitmap_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pd_bitmap_lock, flags);
if (id > 0 && id < MAX_DOMAIN_ID)
__clear_bit(id, amd_iommu_pd_alloc_bitmap);
- spin_unlock(&pd_bitmap_lock);
+ spin_unlock_irqrestore(&pd_bitmap_lock, flags);
}
static void free_gcr3_tbl_level1(u64 *tbl)
@@ -1725,7 +1687,7 @@ static void free_gcr3_tbl_level1(u64 *tbl)
ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
- free_page((unsigned long)ptr);
+ iommu_free_page(ptr);
}
}
@@ -1758,7 +1720,7 @@ static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info)
/* Free per device domain ID */
domain_id_free(gcr3_info->domid);
- free_page((unsigned long)gcr3_info->gcr3_tbl);
+ iommu_free_page(gcr3_info->gcr3_tbl);
gcr3_info->gcr3_tbl = NULL;
}
@@ -1793,7 +1755,7 @@ static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info,
/* Allocate per device domain ID */
gcr3_info->domid = domain_id_alloc();
- gcr3_info->gcr3_tbl = alloc_pgtable_page(nid, GFP_ATOMIC);
+ gcr3_info->gcr3_tbl = iommu_alloc_page_node(nid, GFP_ATOMIC);
if (gcr3_info->gcr3_tbl == NULL) {
domain_id_free(gcr3_info->domid);
return -ENOMEM;
@@ -1999,10 +1961,78 @@ static void clear_dte_entry(struct amd_iommu *iommu, u16 devid)
amd_iommu_apply_erratum_63(iommu, devid);
}
+/* Update and flush DTE for the given device */
+void amd_iommu_dev_update_dte(struct iommu_dev_data *dev_data, bool set)
+{
+ struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
+
+ if (set)
+ set_dte_entry(iommu, dev_data);
+ else
+ clear_dte_entry(iommu, dev_data->devid);
+
+ clone_aliases(iommu, dev_data->dev);
+ device_flush_dte(dev_data);
+ iommu_completion_wait(iommu);
+}
+
+/*
+ * If domain is SVA capable then initialize GCR3 table. Also if domain is
+ * in v2 page table mode then update GCR3[0].
+ */
+static int init_gcr3_table(struct iommu_dev_data *dev_data,
+ struct protection_domain *pdom)
+{
+ struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
+ int max_pasids = dev_data->max_pasids;
+ int ret = 0;
+
+ /*
+ * If domain is in pt mode then setup GCR3 table only if device
+ * is PASID capable
+ */
+ if (pdom_is_in_pt_mode(pdom) && !pdev_pasid_supported(dev_data))
+ return ret;
+
+ /*
+ * By default, setup GCR3 table to support MAX PASIDs
+ * supported by the device/IOMMU.
+ */
+ ret = setup_gcr3_table(&dev_data->gcr3_info, iommu,
+ max_pasids > 0 ? max_pasids : 1);
+ if (ret)
+ return ret;
+
+ /* Setup GCR3[0] only if domain is setup with v2 page table mode */
+ if (!pdom_is_v2_pgtbl_mode(pdom))
+ return ret;
+
+ ret = update_gcr3(dev_data, 0, iommu_virt_to_phys(pdom->iop.pgd), true);
+ if (ret)
+ free_gcr3_table(&dev_data->gcr3_info);
+
+ return ret;
+}
+
+static void destroy_gcr3_table(struct iommu_dev_data *dev_data,
+ struct protection_domain *pdom)
+{
+ struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
+
+ if (pdom_is_v2_pgtbl_mode(pdom))
+ update_gcr3(dev_data, 0, 0, false);
+
+ if (gcr3_info->gcr3_tbl == NULL)
+ return;
+
+ free_gcr3_table(gcr3_info);
+}
+
static int do_attach(struct iommu_dev_data *dev_data,
struct protection_domain *domain)
{
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
+ struct pci_dev *pdev;
int ret = 0;
/* Update data structures */
@@ -2017,26 +2047,29 @@ static int do_attach(struct iommu_dev_data *dev_data,
domain->dev_iommu[iommu->index] += 1;
domain->dev_cnt += 1;
- /* Init GCR3 table and update device table */
- if (domain->pd_mode == PD_MODE_V2) {
- /* By default, setup GCR3 table to support single PASID */
- ret = setup_gcr3_table(&dev_data->gcr3_info, iommu, 1);
+ pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL;
+ if (pdom_is_sva_capable(domain)) {
+ ret = init_gcr3_table(dev_data, domain);
if (ret)
return ret;
- ret = update_gcr3(dev_data, 0,
- iommu_virt_to_phys(domain->iop.pgd), true);
- if (ret) {
- free_gcr3_table(&dev_data->gcr3_info);
- return ret;
+ if (pdev) {
+ pdev_enable_caps(pdev);
+
+ /*
+ * Device can continue to function even if IOPF
+ * enablement failed. Hence in error path just
+ * disable device PRI support.
+ */
+ if (amd_iommu_iopf_add_device(iommu, dev_data))
+ pdev_disable_cap_pri(pdev);
}
+ } else if (pdev) {
+ pdev_enable_cap_ats(pdev);
}
/* Update device table */
- set_dte_entry(iommu, dev_data);
- clone_aliases(iommu, dev_data->dev);
-
- device_flush_dte(dev_data);
+ amd_iommu_dev_update_dte(dev_data, true);
return ret;
}
@@ -2047,19 +2080,15 @@ static void do_detach(struct iommu_dev_data *dev_data)
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
/* Clear GCR3 table */
- if (domain->pd_mode == PD_MODE_V2) {
- update_gcr3(dev_data, 0, 0, false);
- free_gcr3_table(&dev_data->gcr3_info);
- }
+ if (pdom_is_sva_capable(domain))
+ destroy_gcr3_table(dev_data, domain);
/* Update data structures */
dev_data->domain = NULL;
list_del(&dev_data->list);
- clear_dte_entry(iommu, dev_data->devid);
- clone_aliases(iommu, dev_data->dev);
- /* Flush the DTE entry */
- device_flush_dte(dev_data);
+ /* Clear DTE and flush the entry */
+ amd_iommu_dev_update_dte(dev_data, false);
/* Flush IOTLB and wait for the flushes to finish */
amd_iommu_domain_flush_all(domain);
@@ -2091,9 +2120,6 @@ static int attach_device(struct device *dev,
goto out;
}
- if (dev_is_pci(dev))
- pdev_enable_caps(to_pci_dev(dev));
-
ret = do_attach(dev_data, domain);
out:
@@ -2109,12 +2135,11 @@ out:
*/
static void detach_device(struct device *dev)
{
- struct protection_domain *domain;
- struct iommu_dev_data *dev_data;
+ struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
+ struct protection_domain *domain = dev_data->domain;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
unsigned long flags;
-
- dev_data = dev_iommu_priv_get(dev);
- domain = dev_data->domain;
+ bool ppr = dev_data->ppr;
spin_lock_irqsave(&domain->lock, flags);
@@ -2129,8 +2154,19 @@ static void detach_device(struct device *dev)
if (WARN_ON(!dev_data->domain))
goto out;
+ if (ppr) {
+ iopf_queue_flush_dev(dev);
+
+ /* Updated here so that it gets reflected in DTE */
+ dev_data->ppr = false;
+ }
+
do_detach(dev_data);
+ /* Remove IOPF handler */
+ if (ppr)
+ amd_iommu_iopf_remove_device(iommu, dev_data);
+
if (dev_is_pci(dev))
pdev_disable_caps(to_pci_dev(dev));
@@ -2144,6 +2180,7 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
{
struct iommu_device *iommu_dev;
struct amd_iommu *iommu;
+ struct iommu_dev_data *dev_data;
int ret;
if (!check_device(dev))
@@ -2170,18 +2207,22 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
iommu_dev = &iommu->iommu;
}
+ /*
+ * If IOMMU and device supports PASID then it will contain max
+ * supported PASIDs, else it will be zero.
+ */
+ dev_data = dev_iommu_priv_get(dev);
+ if (amd_iommu_pasid_supported() && dev_is_pci(dev) &&
+ pdev_pasid_supported(dev_data)) {
+ dev_data->max_pasids = min_t(u32, iommu->iommu.max_pasids,
+ pci_max_pasids(to_pci_dev(dev)));
+ }
+
iommu_completion_wait(iommu);
return iommu_dev;
}
-static void amd_iommu_probe_finalize(struct device *dev)
-{
- /* Domains are initialized for this device - have a look what we ended up with */
- set_dma_ops(dev, NULL);
- iommu_setup_dma_ops(dev, 0, U64_MAX);
-}
-
static void amd_iommu_release_device(struct device *dev)
{
struct amd_iommu *iommu;
@@ -2233,7 +2274,7 @@ static void cleanup_domain(struct protection_domain *domain)
WARN_ON(domain->dev_cnt != 0);
}
-static void protection_domain_free(struct protection_domain *domain)
+void protection_domain_free(struct protection_domain *domain)
{
if (!domain)
return;
@@ -2242,7 +2283,7 @@ static void protection_domain_free(struct protection_domain *domain)
free_io_pgtable_ops(&domain->iop.iop.ops);
if (domain->iop.root)
- free_page((unsigned long)domain->iop.root);
+ iommu_free_page(domain->iop.root);
if (domain->id)
domain_id_free(domain->id);
@@ -2257,7 +2298,7 @@ static int protection_domain_init_v1(struct protection_domain *domain, int mode)
BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL);
if (mode != PAGE_MODE_NONE) {
- pt_root = (void *)get_zeroed_page(GFP_KERNEL);
+ pt_root = iommu_alloc_page(GFP_KERNEL);
if (!pt_root)
return -ENOMEM;
}
@@ -2276,7 +2317,7 @@ static int protection_domain_init_v2(struct protection_domain *pdom)
return 0;
}
-static struct protection_domain *protection_domain_alloc(unsigned int type)
+struct protection_domain *protection_domain_alloc(unsigned int type)
{
struct io_pgtable_ops *pgtbl_ops;
struct protection_domain *domain;
@@ -2293,11 +2334,13 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
spin_lock_init(&domain->lock);
INIT_LIST_HEAD(&domain->dev_list);
+ INIT_LIST_HEAD(&domain->dev_data_list);
domain->nid = NUMA_NO_NODE;
switch (type) {
/* No need to allocate io pgtable ops in passthrough mode */
case IOMMU_DOMAIN_IDENTITY:
+ case IOMMU_DOMAIN_SVA:
return domain;
case IOMMU_DOMAIN_DMA:
pgtable = amd_iommu_pgtable;
@@ -2417,7 +2460,7 @@ amd_iommu_domain_alloc_user(struct device *dev, u32 flags,
return do_iommu_domain_alloc(type, dev, flags);
}
-static void amd_iommu_domain_free(struct iommu_domain *dom)
+void amd_iommu_domain_free(struct iommu_domain *dom)
{
struct protection_domain *domain;
unsigned long flags;
@@ -2751,6 +2794,10 @@ static int amd_iommu_def_domain_type(struct device *dev)
if (!dev_data)
return 0;
+ /* Always use DMA domain for untrusted device */
+ if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted)
+ return IOMMU_DOMAIN_DMA;
+
/*
* Do not identity map IOMMUv2 capable devices when:
* - memory encryption is active, because some of those devices
@@ -2778,18 +2825,54 @@ static const struct iommu_dirty_ops amd_dirty_ops = {
.read_and_clear_dirty = amd_iommu_read_and_clear_dirty,
};
+static int amd_iommu_dev_enable_feature(struct device *dev,
+ enum iommu_dev_features feat)
+{
+ int ret = 0;
+
+ switch (feat) {
+ case IOMMU_DEV_FEAT_IOPF:
+ case IOMMU_DEV_FEAT_SVA:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int amd_iommu_dev_disable_feature(struct device *dev,
+ enum iommu_dev_features feat)
+{
+ int ret = 0;
+
+ switch (feat) {
+ case IOMMU_DEV_FEAT_IOPF:
+ case IOMMU_DEV_FEAT_SVA:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.domain_alloc = amd_iommu_domain_alloc,
.domain_alloc_user = amd_iommu_domain_alloc_user,
+ .domain_alloc_sva = amd_iommu_domain_alloc_sva,
.probe_device = amd_iommu_probe_device,
.release_device = amd_iommu_release_device,
- .probe_finalize = amd_iommu_probe_finalize,
.device_group = amd_iommu_device_group,
.get_resv_regions = amd_iommu_get_resv_regions,
.is_attach_deferred = amd_iommu_is_attach_deferred,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
.def_domain_type = amd_iommu_def_domain_type,
+ .dev_enable_feat = amd_iommu_dev_enable_feature,
+ .dev_disable_feat = amd_iommu_dev_disable_feature,
+ .remove_dev_pasid = amd_iommu_remove_dev_pasid,
+ .page_response = amd_iommu_page_response,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = amd_iommu_attach_device,
.map_pages = amd_iommu_map_pages,
@@ -3701,20 +3784,11 @@ static struct irq_chip amd_ir_chip = {
};
static const struct msi_parent_ops amdvi_msi_parent_ops = {
- .supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED |
- MSI_FLAG_MULTI_PCI_MSI |
- MSI_FLAG_PCI_IMS,
+ .supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED | MSI_FLAG_MULTI_PCI_MSI,
.prefix = "IR-",
.init_dev_msi_info = msi_parent_init_dev_msi_info,
};
-static const struct msi_parent_ops virt_amdvi_msi_parent_ops = {
- .supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED |
- MSI_FLAG_MULTI_PCI_MSI,
- .prefix = "vIR-",
- .init_dev_msi_info = msi_parent_init_dev_msi_info,
-};
-
int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
{
struct fwnode_handle *fn;
@@ -3732,11 +3806,7 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
irq_domain_update_bus_token(iommu->ir_domain, DOMAIN_BUS_AMDVI);
iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT |
IRQ_DOMAIN_FLAG_ISOLATED_MSI;
-
- if (amd_iommu_np_cache)
- iommu->ir_domain->msi_parent_ops = &virt_amdvi_msi_parent_ops;
- else
- iommu->ir_domain->msi_parent_ops = &amdvi_msi_parent_ops;
+ iommu->ir_domain->msi_parent_ops = &amdvi_msi_parent_ops;
return 0;
}
diff --git a/drivers/iommu/amd/pasid.c b/drivers/iommu/amd/pasid.c
new file mode 100644
index 000000000000..a68215f2b3e1
--- /dev/null
+++ b/drivers/iommu/amd/pasid.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Advanced Micro Devices, Inc.
+ */
+
+#define pr_fmt(fmt) "AMD-Vi: " fmt
+#define dev_fmt(fmt) pr_fmt(fmt)
+
+#include <linux/iommu.h>
+#include <linux/mm_types.h>
+
+#include "amd_iommu.h"
+
+static inline bool is_pasid_enabled(struct iommu_dev_data *dev_data)
+{
+ if (dev_data->pasid_enabled && dev_data->max_pasids &&
+ dev_data->gcr3_info.gcr3_tbl != NULL)
+ return true;
+
+ return false;
+}
+
+static inline bool is_pasid_valid(struct iommu_dev_data *dev_data,
+ ioasid_t pasid)
+{
+ if (pasid > 0 && pasid < dev_data->max_pasids)
+ return true;
+
+ return false;
+}
+
+static void remove_dev_pasid(struct pdom_dev_data *pdom_dev_data)
+{
+ /* Update GCR3 table and flush IOTLB */
+ amd_iommu_clear_gcr3(pdom_dev_data->dev_data, pdom_dev_data->pasid);
+
+ list_del(&pdom_dev_data->list);
+ kfree(pdom_dev_data);
+}
+
+/* Clear PASID from device GCR3 table and remove pdom_dev_data from list */
+static void remove_pdom_dev_pasid(struct protection_domain *pdom,
+ struct device *dev, ioasid_t pasid)
+{
+ struct pdom_dev_data *pdom_dev_data;
+ struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
+
+ lockdep_assert_held(&pdom->lock);
+
+ for_each_pdom_dev_data(pdom_dev_data, pdom) {
+ if (pdom_dev_data->dev_data == dev_data &&
+ pdom_dev_data->pasid == pasid) {
+ remove_dev_pasid(pdom_dev_data);
+ break;
+ }
+ }
+}
+
+static void sva_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ struct pdom_dev_data *pdom_dev_data;
+ struct protection_domain *sva_pdom;
+ unsigned long flags;
+
+ sva_pdom = container_of(mn, struct protection_domain, mn);
+
+ spin_lock_irqsave(&sva_pdom->lock, flags);
+
+ for_each_pdom_dev_data(pdom_dev_data, sva_pdom) {
+ amd_iommu_dev_flush_pasid_pages(pdom_dev_data->dev_data,
+ pdom_dev_data->pasid,
+ start, end - start);
+ }
+
+ spin_unlock_irqrestore(&sva_pdom->lock, flags);
+}
+
+static void sva_mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
+{
+ struct pdom_dev_data *pdom_dev_data, *next;
+ struct protection_domain *sva_pdom;
+ unsigned long flags;
+
+ sva_pdom = container_of(mn, struct protection_domain, mn);
+
+ spin_lock_irqsave(&sva_pdom->lock, flags);
+
+ /* Assume dev_data_list contains same PASID with different devices */
+ for_each_pdom_dev_data_safe(pdom_dev_data, next, sva_pdom)
+ remove_dev_pasid(pdom_dev_data);
+
+ spin_unlock_irqrestore(&sva_pdom->lock, flags);
+}
+
+static const struct mmu_notifier_ops sva_mn = {
+ .arch_invalidate_secondary_tlbs = sva_arch_invalidate_secondary_tlbs,
+ .release = sva_mn_release,
+};
+
+int iommu_sva_set_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
+{
+ struct pdom_dev_data *pdom_dev_data;
+ struct protection_domain *sva_pdom = to_pdomain(domain);
+ struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ /* PASID zero is used for requests from the I/O device without PASID */
+ if (!is_pasid_valid(dev_data, pasid))
+ return ret;
+
+ /* Make sure PASID is enabled */
+ if (!is_pasid_enabled(dev_data))
+ return ret;
+
+ /* Add PASID to protection domain pasid list */
+ pdom_dev_data = kzalloc(sizeof(*pdom_dev_data), GFP_KERNEL);
+ if (pdom_dev_data == NULL)
+ return ret;
+
+ pdom_dev_data->pasid = pasid;
+ pdom_dev_data->dev_data = dev_data;
+
+ spin_lock_irqsave(&sva_pdom->lock, flags);
+
+ /* Setup GCR3 table */
+ ret = amd_iommu_set_gcr3(dev_data, pasid,
+ iommu_virt_to_phys(domain->mm->pgd));
+ if (ret) {
+ kfree(pdom_dev_data);
+ goto out_unlock;
+ }
+
+ list_add(&pdom_dev_data->list, &sva_pdom->dev_data_list);
+
+out_unlock:
+ spin_unlock_irqrestore(&sva_pdom->lock, flags);
+ return ret;
+}
+
+void amd_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
+ struct iommu_domain *domain)
+{
+ struct protection_domain *sva_pdom;
+ unsigned long flags;
+
+ if (!is_pasid_valid(dev_iommu_priv_get(dev), pasid))
+ return;
+
+ sva_pdom = to_pdomain(domain);
+
+ spin_lock_irqsave(&sva_pdom->lock, flags);
+
+ /* Remove PASID from dev_data_list */
+ remove_pdom_dev_pasid(sva_pdom, dev, pasid);
+
+ spin_unlock_irqrestore(&sva_pdom->lock, flags);
+}
+
+static void iommu_sva_domain_free(struct iommu_domain *domain)
+{
+ struct protection_domain *sva_pdom = to_pdomain(domain);
+
+ if (sva_pdom->mn.ops)
+ mmu_notifier_unregister(&sva_pdom->mn, domain->mm);
+
+ amd_iommu_domain_free(domain);
+}
+
+static const struct iommu_domain_ops amd_sva_domain_ops = {
+ .set_dev_pasid = iommu_sva_set_dev_pasid,
+ .free = iommu_sva_domain_free
+};
+
+struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
+ struct mm_struct *mm)
+{
+ struct protection_domain *pdom;
+ int ret;
+
+ pdom = protection_domain_alloc(IOMMU_DOMAIN_SVA);
+ if (!pdom)
+ return ERR_PTR(-ENOMEM);
+
+ pdom->domain.ops = &amd_sva_domain_ops;
+ pdom->mn.ops = &sva_mn;
+
+ ret = mmu_notifier_register(&pdom->mn, mm);
+ if (ret) {
+ protection_domain_free(pdom);
+ return ERR_PTR(ret);
+ }
+
+ return &pdom->domain;
+}
diff --git a/drivers/iommu/amd/ppr.c b/drivers/iommu/amd/ppr.c
new file mode 100644
index 000000000000..091423bb8aac
--- /dev/null
+++ b/drivers/iommu/amd/ppr.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ */
+
+#define pr_fmt(fmt) "AMD-Vi: " fmt
+#define dev_fmt(fmt) pr_fmt(fmt)
+
+#include <linux/amd-iommu.h>
+#include <linux/delay.h>
+#include <linux/mmu_notifier.h>
+
+#include <asm/iommu.h>
+
+#include "amd_iommu.h"
+#include "amd_iommu_types.h"
+
+#include "../iommu-pages.h"
+
+int __init amd_iommu_alloc_ppr_log(struct amd_iommu *iommu)
+{
+ iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
+ PPR_LOG_SIZE);
+ return iommu->ppr_log ? 0 : -ENOMEM;
+}
+
+void amd_iommu_enable_ppr_log(struct amd_iommu *iommu)
+{
+ u64 entry;
+
+ if (iommu->ppr_log == NULL)
+ return;
+
+ iommu_feature_enable(iommu, CONTROL_PPR_EN);
+
+ entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
+
+ memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
+ &entry, sizeof(entry));
+
+ /* set head and tail to zero manually */
+ writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+ writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
+
+ iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
+ iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
+}
+
+void __init amd_iommu_free_ppr_log(struct amd_iommu *iommu)
+{
+ iommu_free_pages(iommu->ppr_log, get_order(PPR_LOG_SIZE));
+}
+
+/*
+ * This function restarts ppr logging in case the IOMMU experienced
+ * PPR log overflow.
+ */
+void amd_iommu_restart_ppr_log(struct amd_iommu *iommu)
+{
+ amd_iommu_restart_log(iommu, "PPR", CONTROL_PPRINT_EN,
+ CONTROL_PPRLOG_EN, MMIO_STATUS_PPR_RUN_MASK,
+ MMIO_STATUS_PPR_OVERFLOW_MASK);
+}
+
+static inline u32 ppr_flag_to_fault_perm(u16 flag)
+{
+ int perm = 0;
+
+ if (flag & PPR_FLAG_READ)
+ perm |= IOMMU_FAULT_PERM_READ;
+ if (flag & PPR_FLAG_WRITE)
+ perm |= IOMMU_FAULT_PERM_WRITE;
+ if (flag & PPR_FLAG_EXEC)
+ perm |= IOMMU_FAULT_PERM_EXEC;
+ if (!(flag & PPR_FLAG_US))
+ perm |= IOMMU_FAULT_PERM_PRIV;
+
+ return perm;
+}
+
+static bool ppr_is_valid(struct amd_iommu *iommu, u64 *raw)
+{
+ struct device *dev = iommu->iommu.dev;
+ u16 devid = PPR_DEVID(raw[0]);
+
+ if (!(PPR_FLAGS(raw[0]) & PPR_FLAG_GN)) {
+ dev_dbg(dev, "PPR logged [Request ignored due to GN=0 (device=%04x:%02x:%02x.%x "
+ "pasid=0x%05llx address=0x%llx flags=0x%04llx tag=0x%03llx]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ PPR_PASID(raw[0]), raw[1], PPR_FLAGS(raw[0]), PPR_TAG(raw[0]));
+ return false;
+ }
+
+ if (PPR_FLAGS(raw[0]) & PPR_FLAG_RVSD) {
+ dev_dbg(dev, "PPR logged [Invalid request format (device=%04x:%02x:%02x.%x "
+ "pasid=0x%05llx address=0x%llx flags=0x%04llx tag=0x%03llx]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ PPR_PASID(raw[0]), raw[1], PPR_FLAGS(raw[0]), PPR_TAG(raw[0]));
+ return false;
+ }
+
+ return true;
+}
+
+static void iommu_call_iopf_notifier(struct amd_iommu *iommu, u64 *raw)
+{
+ struct iommu_dev_data *dev_data;
+ struct iopf_fault event;
+ struct pci_dev *pdev;
+ u16 devid = PPR_DEVID(raw[0]);
+
+ if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
+ pr_info_ratelimited("Unknown PPR request received\n");
+ return;
+ }
+
+ pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id,
+ PCI_BUS_NUM(devid), devid & 0xff);
+ if (!pdev)
+ return;
+
+ if (!ppr_is_valid(iommu, raw))
+ goto out;
+
+ memset(&event, 0, sizeof(struct iopf_fault));
+
+ event.fault.type = IOMMU_FAULT_PAGE_REQ;
+ event.fault.prm.perm = ppr_flag_to_fault_perm(PPR_FLAGS(raw[0]));
+ event.fault.prm.addr = (u64)(raw[1] & PAGE_MASK);
+ event.fault.prm.pasid = PPR_PASID(raw[0]);
+ event.fault.prm.grpid = PPR_TAG(raw[0]) & 0x1FF;
+
+ /*
+ * PASID zero is used for requests from the I/O device without
+ * a PASID
+ */
+ dev_data = dev_iommu_priv_get(&pdev->dev);
+ if (event.fault.prm.pasid == 0 ||
+ event.fault.prm.pasid >= dev_data->max_pasids) {
+ pr_info_ratelimited("Invalid PASID : 0x%x, device : 0x%x\n",
+ event.fault.prm.pasid, pdev->dev.id);
+ goto out;
+ }
+
+ event.fault.prm.flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
+ event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+ if (PPR_TAG(raw[0]) & 0x200)
+ event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
+
+ /* Submit event */
+ iommu_report_device_fault(&pdev->dev, &event);
+
+ return;
+
+out:
+ /* Nobody cared, abort */
+ amd_iommu_complete_ppr(&pdev->dev, PPR_PASID(raw[0]),
+ IOMMU_PAGE_RESP_FAILURE,
+ PPR_TAG(raw[0]) & 0x1FF);
+}
+
+void amd_iommu_poll_ppr_log(struct amd_iommu *iommu)
+{
+ u32 head, tail;
+
+ if (iommu->ppr_log == NULL)
+ return;
+
+ head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+ tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
+
+ while (head != tail) {
+ volatile u64 *raw;
+ u64 entry[2];
+ int i;
+
+ raw = (u64 *)(iommu->ppr_log + head);
+
+ /*
+ * Hardware bug: Interrupt may arrive before the entry is
+ * written to memory. If this happens we need to wait for the
+ * entry to arrive.
+ */
+ for (i = 0; i < LOOP_TIMEOUT; ++i) {
+ if (PPR_REQ_TYPE(raw[0]) != 0)
+ break;
+ udelay(1);
+ }
+
+ /* Avoid memcpy function-call overhead */
+ entry[0] = raw[0];
+ entry[1] = raw[1];
+
+ /*
+ * To detect the hardware errata 733 we need to clear the
+ * entry back to zero. This issue does not exist on SNP
+ * enabled system. Also this buffer is not writeable on
+ * SNP enabled system.
+ */
+ if (!amd_iommu_snp_en)
+ raw[0] = raw[1] = 0UL;
+
+ /* Update head pointer of hardware ring-buffer */
+ head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
+ writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+
+ /* Handle PPR entry */
+ iommu_call_iopf_notifier(iommu, entry);
+ }
+}
+
+/**************************************************************
+ *
+ * IOPF handling stuff
+ */
+
+/* Setup per-IOMMU IOPF queue if not exist. */
+int amd_iommu_iopf_init(struct amd_iommu *iommu)
+{
+ int ret = 0;
+
+ if (iommu->iopf_queue)
+ return ret;
+
+ snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name),
+ "amdiommu-%#x-iopfq",
+ PCI_SEG_DEVID_TO_SBDF(iommu->pci_seg->id, iommu->devid));
+
+ iommu->iopf_queue = iopf_queue_alloc(iommu->iopfq_name);
+ if (!iommu->iopf_queue)
+ ret = -ENOMEM;
+
+ return ret;
+}
+
+/* Destroy per-IOMMU IOPF queue if no longer needed. */
+void amd_iommu_iopf_uninit(struct amd_iommu *iommu)
+{
+ iopf_queue_free(iommu->iopf_queue);
+ iommu->iopf_queue = NULL;
+}
+
+void amd_iommu_page_response(struct device *dev, struct iopf_fault *evt,
+ struct iommu_page_response *resp)
+{
+ amd_iommu_complete_ppr(dev, resp->pasid, resp->code, resp->grpid);
+}
+
+int amd_iommu_iopf_add_device(struct amd_iommu *iommu,
+ struct iommu_dev_data *dev_data)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ if (!dev_data->pri_enabled)
+ return ret;
+
+ raw_spin_lock_irqsave(&iommu->lock, flags);
+
+ if (!iommu->iopf_queue) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = iopf_queue_add_device(iommu->iopf_queue, dev_data->dev);
+ if (ret)
+ goto out_unlock;
+
+ dev_data->ppr = true;
+
+out_unlock:
+ raw_spin_unlock_irqrestore(&iommu->lock, flags);
+ return ret;
+}
+
+/* Its assumed that caller has verified that device was added to iopf queue */
+void amd_iommu_iopf_remove_device(struct amd_iommu *iommu,
+ struct iommu_dev_data *dev_data)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&iommu->lock, flags);
+
+ iopf_queue_remove_device(iommu->iopf_queue, dev_data->dev);
+ dev_data->ppr = false;
+
+ raw_spin_unlock_irqrestore(&iommu->lock, flags);
+}
diff --git a/drivers/iommu/arm/arm-smmu-v3/Makefile b/drivers/iommu/arm/arm-smmu-v3/Makefile
index 54feb1ecccad..014a997753a8 100644
--- a/drivers/iommu/arm/arm-smmu-v3/Makefile
+++ b/drivers/iommu/arm/arm-smmu-v3/Makefile
@@ -3,3 +3,5 @@ obj-$(CONFIG_ARM_SMMU_V3) += arm_smmu_v3.o
arm_smmu_v3-objs-y += arm-smmu-v3.o
arm_smmu_v3-objs-$(CONFIG_ARM_SMMU_V3_SVA) += arm-smmu-v3-sva.o
arm_smmu_v3-objs := $(arm_smmu_v3-objs-y)
+
+obj-$(CONFIG_ARM_SMMU_V3_KUNIT_TEST) += arm-smmu-v3-test.o
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 2cd433a9c8a0..e490ffb38015 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -8,6 +8,7 @@
#include <linux/mmu_notifier.h>
#include <linux/sched/mm.h>
#include <linux/slab.h>
+#include <kunit/visibility.h>
#include "arm-smmu-v3.h"
#include "../../io-pgtable-arm.h"
@@ -34,21 +35,25 @@ struct arm_smmu_bond {
static DEFINE_MUTEX(sva_lock);
-/*
- * Write the CD to the CD tables for all masters that this domain is attached
- * to. Note that this is only used to update existing CD entries in the target
- * CD table, for which it's assumed that arm_smmu_write_ctx_desc can't fail.
- */
-static void arm_smmu_update_ctx_desc_devices(struct arm_smmu_domain *smmu_domain,
- int ssid,
- struct arm_smmu_ctx_desc *cd)
+static void
+arm_smmu_update_s1_domain_cd_entry(struct arm_smmu_domain *smmu_domain)
{
struct arm_smmu_master *master;
+ struct arm_smmu_cd target_cd;
unsigned long flags;
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
- arm_smmu_write_ctx_desc(master, ssid, cd);
+ struct arm_smmu_cd *cdptr;
+
+ /* S1 domains only support RID attachment right now */
+ cdptr = arm_smmu_get_cd_ptr(master, IOMMU_NO_PASID);
+ if (WARN_ON(!cdptr))
+ continue;
+
+ arm_smmu_make_s1_cd(&target_cd, master, smmu_domain);
+ arm_smmu_write_cd_entry(master, IOMMU_NO_PASID, cdptr,
+ &target_cd);
}
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
}
@@ -96,7 +101,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
* be some overlap between use of both ASIDs, until we invalidate the
* TLB.
*/
- arm_smmu_update_ctx_desc_devices(smmu_domain, IOMMU_NO_PASID, cd);
+ arm_smmu_update_s1_domain_cd_entry(smmu_domain);
/* Invalidate TLB entries previously associated with that context */
arm_smmu_tlb_inv_asid(smmu, asid);
@@ -105,11 +110,87 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
return NULL;
}
+static u64 page_size_to_cd(void)
+{
+ static_assert(PAGE_SIZE == SZ_4K || PAGE_SIZE == SZ_16K ||
+ PAGE_SIZE == SZ_64K);
+ if (PAGE_SIZE == SZ_64K)
+ return ARM_LPAE_TCR_TG0_64K;
+ if (PAGE_SIZE == SZ_16K)
+ return ARM_LPAE_TCR_TG0_16K;
+ return ARM_LPAE_TCR_TG0_4K;
+}
+
+VISIBLE_IF_KUNIT
+void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
+ struct arm_smmu_master *master, struct mm_struct *mm,
+ u16 asid)
+{
+ u64 par;
+
+ memset(target, 0, sizeof(*target));
+
+ par = cpuid_feature_extract_unsigned_field(
+ read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1),
+ ID_AA64MMFR0_EL1_PARANGE_SHIFT);
+
+ target->data[0] = cpu_to_le64(
+ CTXDESC_CD_0_TCR_EPD1 |
+#ifdef __BIG_ENDIAN
+ CTXDESC_CD_0_ENDI |
+#endif
+ CTXDESC_CD_0_V |
+ FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par) |
+ CTXDESC_CD_0_AA64 |
+ (master->stall_enabled ? CTXDESC_CD_0_S : 0) |
+ CTXDESC_CD_0_R |
+ CTXDESC_CD_0_A |
+ CTXDESC_CD_0_ASET |
+ FIELD_PREP(CTXDESC_CD_0_ASID, asid));
+
+ /*
+ * If no MM is passed then this creates a SVA entry that faults
+ * everything. arm_smmu_write_cd_entry() can hitlessly go between these
+ * two entries types since TTB0 is ignored by HW when EPD0 is set.
+ */
+ if (mm) {
+ target->data[0] |= cpu_to_le64(
+ FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ,
+ 64ULL - vabits_actual) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_TG0, page_size_to_cd()) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0,
+ ARM_LPAE_TCR_RGN_WBWA) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0,
+ ARM_LPAE_TCR_RGN_WBWA) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS));
+
+ target->data[1] = cpu_to_le64(virt_to_phys(mm->pgd) &
+ CTXDESC_CD_1_TTB0_MASK);
+ } else {
+ target->data[0] |= cpu_to_le64(CTXDESC_CD_0_TCR_EPD0);
+
+ /*
+ * Disable stall and immediately generate an abort if stall
+ * disable is permitted. This speeds up cleanup for an unclean
+ * exit if the device is still doing a lot of DMA.
+ */
+ if (!(master->smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
+ target->data[0] &=
+ cpu_to_le64(~(CTXDESC_CD_0_S | CTXDESC_CD_0_R));
+ }
+
+ /*
+ * MAIR value is pretty much constant and global, so we can just get it
+ * from the current CPU register
+ */
+ target->data[3] = cpu_to_le64(read_sysreg(mair_el1));
+}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_sva_cd);
+
static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
{
u16 asid;
int err = 0;
- u64 tcr, par, reg;
struct arm_smmu_ctx_desc *cd;
struct arm_smmu_ctx_desc *ret = NULL;
@@ -143,39 +224,6 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
if (err)
goto out_free_asid;
- tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, 64ULL - vabits_actual) |
- FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, ARM_LPAE_TCR_RGN_WBWA) |
- FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, ARM_LPAE_TCR_RGN_WBWA) |
- FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS) |
- CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
-
- switch (PAGE_SIZE) {
- case SZ_4K:
- tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_4K);
- break;
- case SZ_16K:
- tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_16K);
- break;
- case SZ_64K:
- tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_64K);
- break;
- default:
- WARN_ON(1);
- err = -EINVAL;
- goto out_free_asid;
- }
-
- reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
- par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
- tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par);
-
- cd->ttbr = virt_to_phys(mm->pgd);
- cd->tcr = tcr;
- /*
- * MAIR value is pretty much constant and global, so we can just get it
- * from the current CPU register
- */
- cd->mair = read_sysreg(mair_el1);
cd->asid = asid;
cd->mm = mm;
@@ -253,6 +301,8 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
{
struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
+ struct arm_smmu_master *master;
+ unsigned long flags;
mutex_lock(&sva_lock);
if (smmu_mn->cleared) {
@@ -264,8 +314,19 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
* DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
* but disable translation.
*/
- arm_smmu_update_ctx_desc_devices(smmu_domain, mm_get_enqcmd_pasid(mm),
- &quiet_cd);
+ spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+ list_for_each_entry(master, &smmu_domain->devices, domain_head) {
+ struct arm_smmu_cd target;
+ struct arm_smmu_cd *cdptr;
+
+ cdptr = arm_smmu_get_cd_ptr(master, mm_get_enqcmd_pasid(mm));
+ if (WARN_ON(!cdptr))
+ continue;
+ arm_smmu_make_sva_cd(&target, master, NULL, smmu_mn->cd->asid);
+ arm_smmu_write_cd_entry(master, mm_get_enqcmd_pasid(mm), cdptr,
+ &target);
+ }
+ spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid);
arm_smmu_atc_inv_domain(smmu_domain, mm_get_enqcmd_pasid(mm), 0, 0);
@@ -360,6 +421,8 @@ static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid,
struct mm_struct *mm)
{
int ret;
+ struct arm_smmu_cd target;
+ struct arm_smmu_cd *cdptr;
struct arm_smmu_bond *bond;
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
@@ -386,9 +449,13 @@ static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid,
goto err_free_bond;
}
- ret = arm_smmu_write_ctx_desc(master, pasid, bond->smmu_mn->cd);
- if (ret)
+ cdptr = arm_smmu_alloc_cd_ptr(master, mm_get_enqcmd_pasid(mm));
+ if (!cdptr) {
+ ret = -ENOMEM;
goto err_put_notifier;
+ }
+ arm_smmu_make_sva_cd(&target, master, mm, bond->smmu_mn->cd->asid);
+ arm_smmu_write_cd_entry(master, pasid, cdptr, &target);
list_add(&bond->list, &master->bonds);
return 0;
@@ -546,7 +613,7 @@ void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
mutex_lock(&sva_lock);
- arm_smmu_write_ctx_desc(master, id, NULL);
+ arm_smmu_clear_cd(master, id);
list_for_each_entry(t, &master->bonds, list) {
if (t->mm == mm) {
@@ -569,6 +636,9 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
int ret = 0;
struct mm_struct *mm = domain->mm;
+ if (mm_get_enqcmd_pasid(mm) != id)
+ return -EINVAL;
+
mutex_lock(&sva_lock);
ret = __arm_smmu_sva_bind(dev, id, mm);
mutex_unlock(&sva_lock);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
new file mode 100644
index 000000000000..315e487fd990
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2024 Google LLC.
+ */
+#include <kunit/test.h>
+#include <linux/io-pgtable.h>
+
+#include "arm-smmu-v3.h"
+
+struct arm_smmu_test_writer {
+ struct arm_smmu_entry_writer writer;
+ struct kunit *test;
+ const __le64 *init_entry;
+ const __le64 *target_entry;
+ __le64 *entry;
+
+ bool invalid_entry_written;
+ unsigned int num_syncs;
+};
+
+#define NUM_ENTRY_QWORDS 8
+#define NUM_EXPECTED_SYNCS(x) x
+
+static struct arm_smmu_ste bypass_ste;
+static struct arm_smmu_ste abort_ste;
+static struct arm_smmu_device smmu = {
+ .features = ARM_SMMU_FEAT_STALLS | ARM_SMMU_FEAT_ATTR_TYPES_OVR
+};
+static struct mm_struct sva_mm = {
+ .pgd = (void *)0xdaedbeefdeadbeefULL,
+};
+
+static bool arm_smmu_entry_differs_in_used_bits(const __le64 *entry,
+ const __le64 *used_bits,
+ const __le64 *target,
+ unsigned int length)
+{
+ bool differs = false;
+ unsigned int i;
+
+ for (i = 0; i < length; i++) {
+ if ((entry[i] & used_bits[i]) != target[i])
+ differs = true;
+ }
+ return differs;
+}
+
+static void
+arm_smmu_test_writer_record_syncs(struct arm_smmu_entry_writer *writer)
+{
+ struct arm_smmu_test_writer *test_writer =
+ container_of(writer, struct arm_smmu_test_writer, writer);
+ __le64 *entry_used_bits;
+
+ entry_used_bits = kunit_kzalloc(
+ test_writer->test, sizeof(*entry_used_bits) * NUM_ENTRY_QWORDS,
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test_writer->test, entry_used_bits);
+
+ pr_debug("STE value is now set to: ");
+ print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8,
+ test_writer->entry,
+ NUM_ENTRY_QWORDS * sizeof(*test_writer->entry),
+ false);
+
+ test_writer->num_syncs += 1;
+ if (!test_writer->entry[0]) {
+ test_writer->invalid_entry_written = true;
+ } else {
+ /*
+ * At any stage in a hitless transition, the entry must be
+ * equivalent to either the initial entry or the target entry
+ * when only considering the bits used by the current
+ * configuration.
+ */
+ writer->ops->get_used(test_writer->entry, entry_used_bits);
+ KUNIT_EXPECT_FALSE(
+ test_writer->test,
+ arm_smmu_entry_differs_in_used_bits(
+ test_writer->entry, entry_used_bits,
+ test_writer->init_entry, NUM_ENTRY_QWORDS) &&
+ arm_smmu_entry_differs_in_used_bits(
+ test_writer->entry, entry_used_bits,
+ test_writer->target_entry,
+ NUM_ENTRY_QWORDS));
+ }
+}
+
+static void
+arm_smmu_v3_test_debug_print_used_bits(struct arm_smmu_entry_writer *writer,
+ const __le64 *ste)
+{
+ __le64 used_bits[NUM_ENTRY_QWORDS] = {};
+
+ arm_smmu_get_ste_used(ste, used_bits);
+ pr_debug("STE used bits: ");
+ print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, used_bits,
+ sizeof(used_bits), false);
+}
+
+static const struct arm_smmu_entry_writer_ops test_ste_ops = {
+ .sync = arm_smmu_test_writer_record_syncs,
+ .get_used = arm_smmu_get_ste_used,
+};
+
+static const struct arm_smmu_entry_writer_ops test_cd_ops = {
+ .sync = arm_smmu_test_writer_record_syncs,
+ .get_used = arm_smmu_get_cd_used,
+};
+
+static void arm_smmu_v3_test_ste_expect_transition(
+ struct kunit *test, const struct arm_smmu_ste *cur,
+ const struct arm_smmu_ste *target, unsigned int num_syncs_expected,
+ bool hitless)
+{
+ struct arm_smmu_ste cur_copy = *cur;
+ struct arm_smmu_test_writer test_writer = {
+ .writer = {
+ .ops = &test_ste_ops,
+ },
+ .test = test,
+ .init_entry = cur->data,
+ .target_entry = target->data,
+ .entry = cur_copy.data,
+ .num_syncs = 0,
+ .invalid_entry_written = false,
+
+ };
+
+ pr_debug("STE initial value: ");
+ print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, cur_copy.data,
+ sizeof(cur_copy), false);
+ arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer, cur->data);
+ pr_debug("STE target value: ");
+ print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, target->data,
+ sizeof(cur_copy), false);
+ arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer,
+ target->data);
+
+ arm_smmu_write_entry(&test_writer.writer, cur_copy.data, target->data);
+
+ KUNIT_EXPECT_EQ(test, test_writer.invalid_entry_written, !hitless);
+ KUNIT_EXPECT_EQ(test, test_writer.num_syncs, num_syncs_expected);
+ KUNIT_EXPECT_MEMEQ(test, target->data, cur_copy.data, sizeof(cur_copy));
+}
+
+static void arm_smmu_v3_test_ste_expect_hitless_transition(
+ struct kunit *test, const struct arm_smmu_ste *cur,
+ const struct arm_smmu_ste *target, unsigned int num_syncs_expected)
+{
+ arm_smmu_v3_test_ste_expect_transition(test, cur, target,
+ num_syncs_expected, true);
+}
+
+static const dma_addr_t fake_cdtab_dma_addr = 0xF0F0F0F0F0F0;
+
+static void arm_smmu_test_make_cdtable_ste(struct arm_smmu_ste *ste,
+ const dma_addr_t dma_addr)
+{
+ struct arm_smmu_master master = {
+ .cd_table.cdtab_dma = dma_addr,
+ .cd_table.s1cdmax = 0xFF,
+ .cd_table.s1fmt = STRTAB_STE_0_S1FMT_64K_L2,
+ .smmu = &smmu,
+ };
+
+ arm_smmu_make_cdtable_ste(ste, &master);
+}
+
+static void arm_smmu_v3_write_ste_test_bypass_to_abort(struct kunit *test)
+{
+ /*
+ * Bypass STEs has used bits in the first two Qwords, while abort STEs
+ * only have used bits in the first QWord. Transitioning from bypass to
+ * abort requires two syncs: the first to set the first qword and make
+ * the STE into an abort, the second to clean up the second qword.
+ */
+ arm_smmu_v3_test_ste_expect_hitless_transition(
+ test, &bypass_ste, &abort_ste, NUM_EXPECTED_SYNCS(2));
+}
+
+static void arm_smmu_v3_write_ste_test_abort_to_bypass(struct kunit *test)
+{
+ /*
+ * Transitioning from abort to bypass also requires two syncs: the first
+ * to set the second qword data required by the bypass STE, and the
+ * second to set the first qword and switch to bypass.
+ */
+ arm_smmu_v3_test_ste_expect_hitless_transition(
+ test, &abort_ste, &bypass_ste, NUM_EXPECTED_SYNCS(2));
+}
+
+static void arm_smmu_v3_write_ste_test_cdtable_to_abort(struct kunit *test)
+{
+ struct arm_smmu_ste ste;
+
+ arm_smmu_test_make_cdtable_ste(&ste, fake_cdtab_dma_addr);
+ arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &abort_ste,
+ NUM_EXPECTED_SYNCS(2));
+}
+
+static void arm_smmu_v3_write_ste_test_abort_to_cdtable(struct kunit *test)
+{
+ struct arm_smmu_ste ste;
+
+ arm_smmu_test_make_cdtable_ste(&ste, fake_cdtab_dma_addr);
+ arm_smmu_v3_test_ste_expect_hitless_transition(test, &abort_ste, &ste,
+ NUM_EXPECTED_SYNCS(2));
+}
+
+static void arm_smmu_v3_write_ste_test_cdtable_to_bypass(struct kunit *test)
+{
+ struct arm_smmu_ste ste;
+
+ arm_smmu_test_make_cdtable_ste(&ste, fake_cdtab_dma_addr);
+ arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &bypass_ste,
+ NUM_EXPECTED_SYNCS(3));
+}
+
+static void arm_smmu_v3_write_ste_test_bypass_to_cdtable(struct kunit *test)
+{
+ struct arm_smmu_ste ste;
+
+ arm_smmu_test_make_cdtable_ste(&ste, fake_cdtab_dma_addr);
+ arm_smmu_v3_test_ste_expect_hitless_transition(test, &bypass_ste, &ste,
+ NUM_EXPECTED_SYNCS(3));
+}
+
+static void arm_smmu_test_make_s2_ste(struct arm_smmu_ste *ste,
+ bool ats_enabled)
+{
+ struct arm_smmu_master master = {
+ .smmu = &smmu,
+ .ats_enabled = ats_enabled,
+ };
+ struct io_pgtable io_pgtable = {};
+ struct arm_smmu_domain smmu_domain = {
+ .pgtbl_ops = &io_pgtable.ops,
+ };
+
+ io_pgtable.cfg.arm_lpae_s2_cfg.vttbr = 0xdaedbeefdeadbeefULL;
+ io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.ps = 1;
+ io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.tg = 2;
+ io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.sh = 3;
+ io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.orgn = 1;
+ io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.irgn = 2;
+ io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.sl = 3;
+ io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.tsz = 4;
+
+ arm_smmu_make_s2_domain_ste(ste, &master, &smmu_domain);
+}
+
+static void arm_smmu_v3_write_ste_test_s2_to_abort(struct kunit *test)
+{
+ struct arm_smmu_ste ste;
+
+ arm_smmu_test_make_s2_ste(&ste, true);
+ arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &abort_ste,
+ NUM_EXPECTED_SYNCS(2));
+}
+
+static void arm_smmu_v3_write_ste_test_abort_to_s2(struct kunit *test)
+{
+ struct arm_smmu_ste ste;
+
+ arm_smmu_test_make_s2_ste(&ste, true);
+ arm_smmu_v3_test_ste_expect_hitless_transition(test, &abort_ste, &ste,
+ NUM_EXPECTED_SYNCS(2));
+}
+
+static void arm_smmu_v3_write_ste_test_s2_to_bypass(struct kunit *test)
+{
+ struct arm_smmu_ste ste;
+
+ arm_smmu_test_make_s2_ste(&ste, true);
+ arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &bypass_ste,
+ NUM_EXPECTED_SYNCS(2));
+}
+
+static void arm_smmu_v3_write_ste_test_bypass_to_s2(struct kunit *test)
+{
+ struct arm_smmu_ste ste;
+
+ arm_smmu_test_make_s2_ste(&ste, true);
+ arm_smmu_v3_test_ste_expect_hitless_transition(test, &bypass_ste, &ste,
+ NUM_EXPECTED_SYNCS(2));
+}
+
+static void arm_smmu_v3_test_cd_expect_transition(
+ struct kunit *test, const struct arm_smmu_cd *cur,
+ const struct arm_smmu_cd *target, unsigned int num_syncs_expected,
+ bool hitless)
+{
+ struct arm_smmu_cd cur_copy = *cur;
+ struct arm_smmu_test_writer test_writer = {
+ .writer = {
+ .ops = &test_cd_ops,
+ },
+ .test = test,
+ .init_entry = cur->data,
+ .target_entry = target->data,
+ .entry = cur_copy.data,
+ .num_syncs = 0,
+ .invalid_entry_written = false,
+
+ };
+
+ pr_debug("CD initial value: ");
+ print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, cur_copy.data,
+ sizeof(cur_copy), false);
+ arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer, cur->data);
+ pr_debug("CD target value: ");
+ print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, target->data,
+ sizeof(cur_copy), false);
+ arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer,
+ target->data);
+
+ arm_smmu_write_entry(&test_writer.writer, cur_copy.data, target->data);
+
+ KUNIT_EXPECT_EQ(test, test_writer.invalid_entry_written, !hitless);
+ KUNIT_EXPECT_EQ(test, test_writer.num_syncs, num_syncs_expected);
+ KUNIT_EXPECT_MEMEQ(test, target->data, cur_copy.data, sizeof(cur_copy));
+}
+
+static void arm_smmu_v3_test_cd_expect_non_hitless_transition(
+ struct kunit *test, const struct arm_smmu_cd *cur,
+ const struct arm_smmu_cd *target, unsigned int num_syncs_expected)
+{
+ arm_smmu_v3_test_cd_expect_transition(test, cur, target,
+ num_syncs_expected, false);
+}
+
+static void arm_smmu_v3_test_cd_expect_hitless_transition(
+ struct kunit *test, const struct arm_smmu_cd *cur,
+ const struct arm_smmu_cd *target, unsigned int num_syncs_expected)
+{
+ arm_smmu_v3_test_cd_expect_transition(test, cur, target,
+ num_syncs_expected, true);
+}
+
+static void arm_smmu_test_make_s1_cd(struct arm_smmu_cd *cd, unsigned int asid)
+{
+ struct arm_smmu_master master = {
+ .smmu = &smmu,
+ };
+ struct io_pgtable io_pgtable = {};
+ struct arm_smmu_domain smmu_domain = {
+ .pgtbl_ops = &io_pgtable.ops,
+ .cd = {
+ .asid = asid,
+ },
+ };
+
+ io_pgtable.cfg.arm_lpae_s1_cfg.ttbr = 0xdaedbeefdeadbeefULL;
+ io_pgtable.cfg.arm_lpae_s1_cfg.tcr.ips = 1;
+ io_pgtable.cfg.arm_lpae_s1_cfg.tcr.tg = 2;
+ io_pgtable.cfg.arm_lpae_s1_cfg.tcr.sh = 3;
+ io_pgtable.cfg.arm_lpae_s1_cfg.tcr.orgn = 1;
+ io_pgtable.cfg.arm_lpae_s1_cfg.tcr.irgn = 2;
+ io_pgtable.cfg.arm_lpae_s1_cfg.tcr.tsz = 4;
+ io_pgtable.cfg.arm_lpae_s1_cfg.mair = 0xabcdef012345678ULL;
+
+ arm_smmu_make_s1_cd(cd, &master, &smmu_domain);
+}
+
+static void arm_smmu_v3_write_cd_test_s1_clear(struct kunit *test)
+{
+ struct arm_smmu_cd cd = {};
+ struct arm_smmu_cd cd_2;
+
+ arm_smmu_test_make_s1_cd(&cd_2, 1997);
+ arm_smmu_v3_test_cd_expect_non_hitless_transition(
+ test, &cd, &cd_2, NUM_EXPECTED_SYNCS(2));
+ arm_smmu_v3_test_cd_expect_non_hitless_transition(
+ test, &cd_2, &cd, NUM_EXPECTED_SYNCS(2));
+}
+
+static void arm_smmu_v3_write_cd_test_s1_change_asid(struct kunit *test)
+{
+ struct arm_smmu_cd cd = {};
+ struct arm_smmu_cd cd_2;
+
+ arm_smmu_test_make_s1_cd(&cd, 778);
+ arm_smmu_test_make_s1_cd(&cd_2, 1997);
+ arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd, &cd_2,
+ NUM_EXPECTED_SYNCS(1));
+ arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd_2, &cd,
+ NUM_EXPECTED_SYNCS(1));
+}
+
+static void arm_smmu_test_make_sva_cd(struct arm_smmu_cd *cd, unsigned int asid)
+{
+ struct arm_smmu_master master = {
+ .smmu = &smmu,
+ };
+
+ arm_smmu_make_sva_cd(cd, &master, &sva_mm, asid);
+}
+
+static void arm_smmu_test_make_sva_release_cd(struct arm_smmu_cd *cd,
+ unsigned int asid)
+{
+ struct arm_smmu_master master = {
+ .smmu = &smmu,
+ };
+
+ arm_smmu_make_sva_cd(cd, &master, NULL, asid);
+}
+
+static void arm_smmu_v3_write_cd_test_sva_clear(struct kunit *test)
+{
+ struct arm_smmu_cd cd = {};
+ struct arm_smmu_cd cd_2;
+
+ arm_smmu_test_make_sva_cd(&cd_2, 1997);
+ arm_smmu_v3_test_cd_expect_non_hitless_transition(
+ test, &cd, &cd_2, NUM_EXPECTED_SYNCS(2));
+ arm_smmu_v3_test_cd_expect_non_hitless_transition(
+ test, &cd_2, &cd, NUM_EXPECTED_SYNCS(2));
+}
+
+static void arm_smmu_v3_write_cd_test_sva_release(struct kunit *test)
+{
+ struct arm_smmu_cd cd;
+ struct arm_smmu_cd cd_2;
+
+ arm_smmu_test_make_sva_cd(&cd, 1997);
+ arm_smmu_test_make_sva_release_cd(&cd_2, 1997);
+ arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd, &cd_2,
+ NUM_EXPECTED_SYNCS(2));
+ arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd_2, &cd,
+ NUM_EXPECTED_SYNCS(2));
+}
+
+static struct kunit_case arm_smmu_v3_test_cases[] = {
+ KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_abort),
+ KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_bypass),
+ KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_to_abort),
+ KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_cdtable),
+ KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_to_bypass),
+ KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_cdtable),
+ KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_abort),
+ KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_s2),
+ KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_bypass),
+ KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_s2),
+ KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_clear),
+ KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_change_asid),
+ KUNIT_CASE(arm_smmu_v3_write_cd_test_sva_clear),
+ KUNIT_CASE(arm_smmu_v3_write_cd_test_sva_release),
+ {},
+};
+
+static int arm_smmu_v3_test_suite_init(struct kunit_suite *test)
+{
+ arm_smmu_make_bypass_ste(&smmu, &bypass_ste);
+ arm_smmu_make_abort_ste(&abort_ste);
+ return 0;
+}
+
+static struct kunit_suite arm_smmu_v3_test_module = {
+ .name = "arm-smmu-v3-kunit-test",
+ .suite_init = arm_smmu_v3_test_suite_init,
+ .test_cases = arm_smmu_v3_test_cases,
+};
+kunit_test_suites(&arm_smmu_v3_test_module);
+
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 5ed036225e69..ab415e107054 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -26,15 +26,11 @@
#include <linux/pci.h>
#include <linux/pci-ats.h>
#include <linux/platform_device.h>
+#include <kunit/visibility.h>
#include "arm-smmu-v3.h"
#include "../../dma-iommu.h"
-static bool disable_bypass = true;
-module_param(disable_bypass, bool, 0444);
-MODULE_PARM_DESC(disable_bypass,
- "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
-
static bool disable_msipolling;
module_param(disable_msipolling, bool, 0444);
MODULE_PARM_DESC(disable_msipolling,
@@ -47,8 +43,9 @@ enum arm_smmu_msi_index {
ARM_SMMU_MAX_MSIS,
};
-static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu,
- ioasid_t sid);
+#define NUM_ENTRY_QWORDS 8
+static_assert(sizeof(struct arm_smmu_ste) == NUM_ENTRY_QWORDS * sizeof(u64));
+static_assert(sizeof(struct arm_smmu_cd) == NUM_ENTRY_QWORDS * sizeof(u64));
static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
[EVTQ_MSI_INDEX] = {
@@ -76,12 +73,6 @@ struct arm_smmu_option_prop {
DEFINE_XARRAY_ALLOC1(arm_smmu_asid_xa);
DEFINE_MUTEX(arm_smmu_asid_lock);
-/*
- * Special value used by SVA when a process dies, to quiesce a CD without
- * disabling it.
- */
-struct arm_smmu_ctx_desc quiet_cd = { 0 };
-
static struct arm_smmu_option_prop arm_smmu_options[] = {
{ ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
{ ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"},
@@ -90,6 +81,7 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_device *smmu);
+static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master);
static void parse_driver_options(struct arm_smmu_device *smmu)
{
@@ -977,44 +969,45 @@ void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
* would be nice if this was complete according to the spec, but minimally it
* has to capture the bits this driver uses.
*/
-static void arm_smmu_get_ste_used(const struct arm_smmu_ste *ent,
- struct arm_smmu_ste *used_bits)
+VISIBLE_IF_KUNIT
+void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits)
{
- unsigned int cfg = FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(ent->data[0]));
+ unsigned int cfg = FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(ent[0]));
- used_bits->data[0] = cpu_to_le64(STRTAB_STE_0_V);
- if (!(ent->data[0] & cpu_to_le64(STRTAB_STE_0_V)))
+ used_bits[0] = cpu_to_le64(STRTAB_STE_0_V);
+ if (!(ent[0] & cpu_to_le64(STRTAB_STE_0_V)))
return;
- used_bits->data[0] |= cpu_to_le64(STRTAB_STE_0_CFG);
+ used_bits[0] |= cpu_to_le64(STRTAB_STE_0_CFG);
/* S1 translates */
if (cfg & BIT(0)) {
- used_bits->data[0] |= cpu_to_le64(STRTAB_STE_0_S1FMT |
- STRTAB_STE_0_S1CTXPTR_MASK |
- STRTAB_STE_0_S1CDMAX);
- used_bits->data[1] |=
+ used_bits[0] |= cpu_to_le64(STRTAB_STE_0_S1FMT |
+ STRTAB_STE_0_S1CTXPTR_MASK |
+ STRTAB_STE_0_S1CDMAX);
+ used_bits[1] |=
cpu_to_le64(STRTAB_STE_1_S1DSS | STRTAB_STE_1_S1CIR |
STRTAB_STE_1_S1COR | STRTAB_STE_1_S1CSH |
STRTAB_STE_1_S1STALLD | STRTAB_STE_1_STRW |
STRTAB_STE_1_EATS);
- used_bits->data[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID);
+ used_bits[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID);
}
/* S2 translates */
if (cfg & BIT(1)) {
- used_bits->data[1] |=
+ used_bits[1] |=
cpu_to_le64(STRTAB_STE_1_EATS | STRTAB_STE_1_SHCFG);
- used_bits->data[2] |=
+ used_bits[2] |=
cpu_to_le64(STRTAB_STE_2_S2VMID | STRTAB_STE_2_VTCR |
STRTAB_STE_2_S2AA64 | STRTAB_STE_2_S2ENDI |
STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2R);
- used_bits->data[3] |= cpu_to_le64(STRTAB_STE_3_S2TTB_MASK);
+ used_bits[3] |= cpu_to_le64(STRTAB_STE_3_S2TTB_MASK);
}
if (cfg == STRTAB_STE_0_CFG_BYPASS)
- used_bits->data[1] |= cpu_to_le64(STRTAB_STE_1_SHCFG);
+ used_bits[1] |= cpu_to_le64(STRTAB_STE_1_SHCFG);
}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_get_ste_used);
/*
* Figure out if we can do a hitless update of entry to become target. Returns a
@@ -1022,57 +1015,55 @@ static void arm_smmu_get_ste_used(const struct arm_smmu_ste *ent,
* unused_update is an intermediate value of entry that has unused bits set to
* their new values.
*/
-static u8 arm_smmu_entry_qword_diff(const struct arm_smmu_ste *entry,
- const struct arm_smmu_ste *target,
- struct arm_smmu_ste *unused_update)
+static u8 arm_smmu_entry_qword_diff(struct arm_smmu_entry_writer *writer,
+ const __le64 *entry, const __le64 *target,
+ __le64 *unused_update)
{
- struct arm_smmu_ste target_used = {};
- struct arm_smmu_ste cur_used = {};
+ __le64 target_used[NUM_ENTRY_QWORDS] = {};
+ __le64 cur_used[NUM_ENTRY_QWORDS] = {};
u8 used_qword_diff = 0;
unsigned int i;
- arm_smmu_get_ste_used(entry, &cur_used);
- arm_smmu_get_ste_used(target, &target_used);
+ writer->ops->get_used(entry, cur_used);
+ writer->ops->get_used(target, target_used);
- for (i = 0; i != ARRAY_SIZE(target_used.data); i++) {
+ for (i = 0; i != NUM_ENTRY_QWORDS; i++) {
/*
* Check that masks are up to date, the make functions are not
* allowed to set a bit to 1 if the used function doesn't say it
* is used.
*/
- WARN_ON_ONCE(target->data[i] & ~target_used.data[i]);
+ WARN_ON_ONCE(target[i] & ~target_used[i]);
/* Bits can change because they are not currently being used */
- unused_update->data[i] = (entry->data[i] & cur_used.data[i]) |
- (target->data[i] & ~cur_used.data[i]);
+ unused_update[i] = (entry[i] & cur_used[i]) |
+ (target[i] & ~cur_used[i]);
/*
* Each bit indicates that a used bit in a qword needs to be
* changed after unused_update is applied.
*/
- if ((unused_update->data[i] & target_used.data[i]) !=
- target->data[i])
+ if ((unused_update[i] & target_used[i]) != target[i])
used_qword_diff |= 1 << i;
}
return used_qword_diff;
}
-static bool entry_set(struct arm_smmu_device *smmu, ioasid_t sid,
- struct arm_smmu_ste *entry,
- const struct arm_smmu_ste *target, unsigned int start,
+static bool entry_set(struct arm_smmu_entry_writer *writer, __le64 *entry,
+ const __le64 *target, unsigned int start,
unsigned int len)
{
bool changed = false;
unsigned int i;
for (i = start; len != 0; len--, i++) {
- if (entry->data[i] != target->data[i]) {
- WRITE_ONCE(entry->data[i], target->data[i]);
+ if (entry[i] != target[i]) {
+ WRITE_ONCE(entry[i], target[i]);
changed = true;
}
}
if (changed)
- arm_smmu_sync_ste_for_sid(smmu, sid);
+ writer->ops->sync(writer);
return changed;
}
@@ -1102,24 +1093,22 @@ static bool entry_set(struct arm_smmu_device *smmu, ioasid_t sid,
* V=0 process. This relies on the IGNORED behavior described in the
* specification.
*/
-static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
- struct arm_smmu_ste *entry,
- const struct arm_smmu_ste *target)
+VISIBLE_IF_KUNIT
+void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer, __le64 *entry,
+ const __le64 *target)
{
- unsigned int num_entry_qwords = ARRAY_SIZE(target->data);
- struct arm_smmu_device *smmu = master->smmu;
- struct arm_smmu_ste unused_update;
+ __le64 unused_update[NUM_ENTRY_QWORDS];
u8 used_qword_diff;
used_qword_diff =
- arm_smmu_entry_qword_diff(entry, target, &unused_update);
+ arm_smmu_entry_qword_diff(writer, entry, target, unused_update);
if (hweight8(used_qword_diff) == 1) {
/*
* Only one qword needs its used bits to be changed. This is a
- * hitless update, update all bits the current STE is ignoring
- * to their new values, then update a single "critical qword" to
- * change the STE and finally 0 out any bits that are now unused
- * in the target configuration.
+ * hitless update, update all bits the current STE/CD is
+ * ignoring to their new values, then update a single "critical
+ * qword" to change the STE/CD and finally 0 out any bits that
+ * are now unused in the target configuration.
*/
unsigned int critical_qword_index = ffs(used_qword_diff) - 1;
@@ -1128,21 +1117,21 @@ static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
* writing it in the next step anyways. This can save a sync
* when the only change is in that qword.
*/
- unused_update.data[critical_qword_index] =
- entry->data[critical_qword_index];
- entry_set(smmu, sid, entry, &unused_update, 0, num_entry_qwords);
- entry_set(smmu, sid, entry, target, critical_qword_index, 1);
- entry_set(smmu, sid, entry, target, 0, num_entry_qwords);
+ unused_update[critical_qword_index] =
+ entry[critical_qword_index];
+ entry_set(writer, entry, unused_update, 0, NUM_ENTRY_QWORDS);
+ entry_set(writer, entry, target, critical_qword_index, 1);
+ entry_set(writer, entry, target, 0, NUM_ENTRY_QWORDS);
} else if (used_qword_diff) {
/*
* At least two qwords need their inuse bits to be changed. This
* requires a breaking update, zero the V bit, write all qwords
* but 0, then set qword 0
*/
- unused_update.data[0] = entry->data[0] & (~STRTAB_STE_0_V);
- entry_set(smmu, sid, entry, &unused_update, 0, 1);
- entry_set(smmu, sid, entry, target, 1, num_entry_qwords - 1);
- entry_set(smmu, sid, entry, target, 0, 1);
+ unused_update[0] = 0;
+ entry_set(writer, entry, unused_update, 0, 1);
+ entry_set(writer, entry, target, 1, NUM_ENTRY_QWORDS - 1);
+ entry_set(writer, entry, target, 0, 1);
} else {
/*
* No inuse bit changed. Sanity check that all unused bits are 0
@@ -1150,20 +1139,10 @@ static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
* compute_qword_diff().
*/
WARN_ON_ONCE(
- entry_set(smmu, sid, entry, target, 0, num_entry_qwords));
- }
-
- /* It's likely that we'll want to use the new STE soon */
- if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) {
- struct arm_smmu_cmdq_ent
- prefetch_cmd = { .opcode = CMDQ_OP_PREFETCH_CFG,
- .prefetch = {
- .sid = sid,
- } };
-
- arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
+ entry_set(writer, entry, target, 0, NUM_ENTRY_QWORDS));
}
}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_write_entry);
static void arm_smmu_sync_cd(struct arm_smmu_master *master,
int ssid, bool leaf)
@@ -1209,117 +1188,166 @@ static void arm_smmu_write_cd_l1_desc(__le64 *dst,
u64 val = (l1_desc->l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) |
CTXDESC_L1_DESC_V;
- /* See comment in arm_smmu_write_ctx_desc() */
+ /* The HW has 64 bit atomicity with stores to the L2 CD table */
WRITE_ONCE(*dst, cpu_to_le64(val));
}
-static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_master *master, u32 ssid)
+struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master,
+ u32 ssid)
{
- __le64 *l1ptr;
- unsigned int idx;
struct arm_smmu_l1_ctx_desc *l1_desc;
- struct arm_smmu_device *smmu = master->smmu;
struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
+ if (!cd_table->cdtab)
+ return NULL;
+
if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_LINEAR)
- return cd_table->cdtab + ssid * CTXDESC_CD_DWORDS;
+ return (struct arm_smmu_cd *)(cd_table->cdtab +
+ ssid * CTXDESC_CD_DWORDS);
- idx = ssid >> CTXDESC_SPLIT;
- l1_desc = &cd_table->l1_desc[idx];
- if (!l1_desc->l2ptr) {
- if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc))
+ l1_desc = &cd_table->l1_desc[ssid / CTXDESC_L2_ENTRIES];
+ if (!l1_desc->l2ptr)
+ return NULL;
+ return &l1_desc->l2ptr[ssid % CTXDESC_L2_ENTRIES];
+}
+
+struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master,
+ u32 ssid)
+{
+ struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
+ struct arm_smmu_device *smmu = master->smmu;
+
+ might_sleep();
+ iommu_group_mutex_assert(master->dev);
+
+ if (!cd_table->cdtab) {
+ if (arm_smmu_alloc_cd_tables(master))
return NULL;
+ }
- l1ptr = cd_table->cdtab + idx * CTXDESC_L1_DESC_DWORDS;
- arm_smmu_write_cd_l1_desc(l1ptr, l1_desc);
- /* An invalid L1CD can be cached */
- arm_smmu_sync_cd(master, ssid, false);
+ if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_64K_L2) {
+ unsigned int idx = ssid / CTXDESC_L2_ENTRIES;
+ struct arm_smmu_l1_ctx_desc *l1_desc;
+
+ l1_desc = &cd_table->l1_desc[idx];
+ if (!l1_desc->l2ptr) {
+ __le64 *l1ptr;
+
+ if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc))
+ return NULL;
+
+ l1ptr = cd_table->cdtab + idx * CTXDESC_L1_DESC_DWORDS;
+ arm_smmu_write_cd_l1_desc(l1ptr, l1_desc);
+ /* An invalid L1CD can be cached */
+ arm_smmu_sync_cd(master, ssid, false);
+ }
}
- idx = ssid & (CTXDESC_L2_ENTRIES - 1);
- return l1_desc->l2ptr + idx * CTXDESC_CD_DWORDS;
+ return arm_smmu_get_cd_ptr(master, ssid);
}
-int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid,
- struct arm_smmu_ctx_desc *cd)
+struct arm_smmu_cd_writer {
+ struct arm_smmu_entry_writer writer;
+ unsigned int ssid;
+};
+
+VISIBLE_IF_KUNIT
+void arm_smmu_get_cd_used(const __le64 *ent, __le64 *used_bits)
{
+ used_bits[0] = cpu_to_le64(CTXDESC_CD_0_V);
+ if (!(ent[0] & cpu_to_le64(CTXDESC_CD_0_V)))
+ return;
+ memset(used_bits, 0xFF, sizeof(struct arm_smmu_cd));
+
/*
- * This function handles the following cases:
- *
- * (1) Install primary CD, for normal DMA traffic (SSID = IOMMU_NO_PASID = 0).
- * (2) Install a secondary CD, for SID+SSID traffic.
- * (3) Update ASID of a CD. Atomically write the first 64 bits of the
- * CD, then invalidate the old entry and mappings.
- * (4) Quiesce the context without clearing the valid bit. Disable
- * translation, and ignore any translation fault.
- * (5) Remove a secondary CD.
+ * If EPD0 is set by the make function it means
+ * T0SZ/TG0/IR0/OR0/SH0/TTB0 are IGNORED
*/
- u64 val;
- bool cd_live;
- __le64 *cdptr;
- struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
- struct arm_smmu_device *smmu = master->smmu;
+ if (ent[0] & cpu_to_le64(CTXDESC_CD_0_TCR_EPD0)) {
+ used_bits[0] &= ~cpu_to_le64(
+ CTXDESC_CD_0_TCR_T0SZ | CTXDESC_CD_0_TCR_TG0 |
+ CTXDESC_CD_0_TCR_IRGN0 | CTXDESC_CD_0_TCR_ORGN0 |
+ CTXDESC_CD_0_TCR_SH0);
+ used_bits[1] &= ~cpu_to_le64(CTXDESC_CD_1_TTB0_MASK);
+ }
+}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_get_cd_used);
- if (WARN_ON(ssid >= (1 << cd_table->s1cdmax)))
- return -E2BIG;
+static void arm_smmu_cd_writer_sync_entry(struct arm_smmu_entry_writer *writer)
+{
+ struct arm_smmu_cd_writer *cd_writer =
+ container_of(writer, struct arm_smmu_cd_writer, writer);
- cdptr = arm_smmu_get_cd_ptr(master, ssid);
- if (!cdptr)
- return -ENOMEM;
+ arm_smmu_sync_cd(writer->master, cd_writer->ssid, true);
+}
- val = le64_to_cpu(cdptr[0]);
- cd_live = !!(val & CTXDESC_CD_0_V);
-
- if (!cd) { /* (5) */
- val = 0;
- } else if (cd == &quiet_cd) { /* (4) */
- if (!(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
- val &= ~(CTXDESC_CD_0_S | CTXDESC_CD_0_R);
- val |= CTXDESC_CD_0_TCR_EPD0;
- } else if (cd_live) { /* (3) */
- val &= ~CTXDESC_CD_0_ASID;
- val |= FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid);
- /*
- * Until CD+TLB invalidation, both ASIDs may be used for tagging
- * this substream's traffic
- */
- } else { /* (1) and (2) */
- cdptr[1] = cpu_to_le64(cd->ttbr & CTXDESC_CD_1_TTB0_MASK);
- cdptr[2] = 0;
- cdptr[3] = cpu_to_le64(cd->mair);
+static const struct arm_smmu_entry_writer_ops arm_smmu_cd_writer_ops = {
+ .sync = arm_smmu_cd_writer_sync_entry,
+ .get_used = arm_smmu_get_cd_used,
+};
- /*
- * STE may be live, and the SMMU might read dwords of this CD in any
- * order. Ensure that it observes valid values before reading
- * V=1.
- */
- arm_smmu_sync_cd(master, ssid, true);
+void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid,
+ struct arm_smmu_cd *cdptr,
+ const struct arm_smmu_cd *target)
+{
+ struct arm_smmu_cd_writer cd_writer = {
+ .writer = {
+ .ops = &arm_smmu_cd_writer_ops,
+ .master = master,
+ },
+ .ssid = ssid,
+ };
+
+ arm_smmu_write_entry(&cd_writer.writer, cdptr->data, target->data);
+}
- val = cd->tcr |
+void arm_smmu_make_s1_cd(struct arm_smmu_cd *target,
+ struct arm_smmu_master *master,
+ struct arm_smmu_domain *smmu_domain)
+{
+ struct arm_smmu_ctx_desc *cd = &smmu_domain->cd;
+ const struct io_pgtable_cfg *pgtbl_cfg =
+ &io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops)->cfg;
+ typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr =
+ &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
+
+ memset(target, 0, sizeof(*target));
+
+ target->data[0] = cpu_to_le64(
+ FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) |
#ifdef __BIG_ENDIAN
- CTXDESC_CD_0_ENDI |
+ CTXDESC_CD_0_ENDI |
#endif
- CTXDESC_CD_0_R | CTXDESC_CD_0_A |
- (cd->mm ? 0 : CTXDESC_CD_0_ASET) |
- CTXDESC_CD_0_AA64 |
- FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) |
- CTXDESC_CD_0_V;
-
- if (cd_table->stall_enabled)
- val |= CTXDESC_CD_0_S;
- }
-
- /*
- * The SMMU accesses 64-bit values atomically. See IHI0070Ca 3.21.3
- * "Configuration structures and configuration invalidation completion"
- *
- * The size of single-copy atomic reads made by the SMMU is
- * IMPLEMENTATION DEFINED but must be at least 64 bits. Any single
- * field within an aligned 64-bit span of a structure can be altered
- * without first making the structure invalid.
- */
- WRITE_ONCE(cdptr[0], cpu_to_le64(val));
- arm_smmu_sync_cd(master, ssid, true);
- return 0;
+ CTXDESC_CD_0_TCR_EPD1 |
+ CTXDESC_CD_0_V |
+ FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) |
+ CTXDESC_CD_0_AA64 |
+ (master->stall_enabled ? CTXDESC_CD_0_S : 0) |
+ CTXDESC_CD_0_R |
+ CTXDESC_CD_0_A |
+ CTXDESC_CD_0_ASET |
+ FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid)
+ );
+ target->data[1] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.ttbr &
+ CTXDESC_CD_1_TTB0_MASK);
+ target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.mair);
+}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_s1_cd);
+
+void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid)
+{
+ struct arm_smmu_cd target = {};
+ struct arm_smmu_cd *cdptr;
+
+ if (!master->cd_table.cdtab)
+ return;
+ cdptr = arm_smmu_get_cd_ptr(master, ssid);
+ if (WARN_ON(!cdptr))
+ return;
+ arm_smmu_write_cd_entry(master, ssid, cdptr, &target);
}
static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master)
@@ -1330,7 +1358,6 @@ static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master)
struct arm_smmu_device *smmu = master->smmu;
struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
- cd_table->stall_enabled = master->stall_enabled;
cd_table->s1cdmax = master->ssid_bits;
max_contexts = 1 << cd_table->s1cdmax;
@@ -1428,43 +1455,90 @@ arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, desc->span);
val |= desc->l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK;
- /* See comment in arm_smmu_write_ctx_desc() */
+ /* The HW has 64 bit atomicity with stores to the L2 STE table */
WRITE_ONCE(*dst, cpu_to_le64(val));
}
-static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
+struct arm_smmu_ste_writer {
+ struct arm_smmu_entry_writer writer;
+ u32 sid;
+};
+
+static void arm_smmu_ste_writer_sync_entry(struct arm_smmu_entry_writer *writer)
{
+ struct arm_smmu_ste_writer *ste_writer =
+ container_of(writer, struct arm_smmu_ste_writer, writer);
struct arm_smmu_cmdq_ent cmd = {
.opcode = CMDQ_OP_CFGI_STE,
.cfgi = {
- .sid = sid,
+ .sid = ste_writer->sid,
.leaf = true,
},
};
- arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
+ arm_smmu_cmdq_issue_cmd_with_sync(writer->master->smmu, &cmd);
+}
+
+static const struct arm_smmu_entry_writer_ops arm_smmu_ste_writer_ops = {
+ .sync = arm_smmu_ste_writer_sync_entry,
+ .get_used = arm_smmu_get_ste_used,
+};
+
+static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
+ struct arm_smmu_ste *ste,
+ const struct arm_smmu_ste *target)
+{
+ struct arm_smmu_device *smmu = master->smmu;
+ struct arm_smmu_ste_writer ste_writer = {
+ .writer = {
+ .ops = &arm_smmu_ste_writer_ops,
+ .master = master,
+ },
+ .sid = sid,
+ };
+
+ arm_smmu_write_entry(&ste_writer.writer, ste->data, target->data);
+
+ /* It's likely that we'll want to use the new STE soon */
+ if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) {
+ struct arm_smmu_cmdq_ent
+ prefetch_cmd = { .opcode = CMDQ_OP_PREFETCH_CFG,
+ .prefetch = {
+ .sid = sid,
+ } };
+
+ arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
+ }
}
-static void arm_smmu_make_abort_ste(struct arm_smmu_ste *target)
+VISIBLE_IF_KUNIT
+void arm_smmu_make_abort_ste(struct arm_smmu_ste *target)
{
memset(target, 0, sizeof(*target));
target->data[0] = cpu_to_le64(
STRTAB_STE_0_V |
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT));
}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_abort_ste);
-static void arm_smmu_make_bypass_ste(struct arm_smmu_ste *target)
+VISIBLE_IF_KUNIT
+void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu,
+ struct arm_smmu_ste *target)
{
memset(target, 0, sizeof(*target));
target->data[0] = cpu_to_le64(
STRTAB_STE_0_V |
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS));
- target->data[1] = cpu_to_le64(
- FIELD_PREP(STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING));
+
+ if (smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR)
+ target->data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
+ STRTAB_STE_1_SHCFG_INCOMING));
}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_bypass_ste);
-static void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
- struct arm_smmu_master *master)
+VISIBLE_IF_KUNIT
+void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
+ struct arm_smmu_master *master)
{
struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
struct arm_smmu_device *smmu = master->smmu;
@@ -1512,10 +1586,12 @@ static void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
cpu_to_le64(FIELD_PREP(STRTAB_STE_2_S2VMID, 0));
}
}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_cdtable_ste);
-static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
- struct arm_smmu_master *master,
- struct arm_smmu_domain *smmu_domain)
+VISIBLE_IF_KUNIT
+void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
+ struct arm_smmu_master *master,
+ struct arm_smmu_domain *smmu_domain)
{
struct arm_smmu_s2_cfg *s2_cfg = &smmu_domain->s2_cfg;
const struct io_pgtable_cfg *pgtbl_cfg =
@@ -1523,6 +1599,7 @@ static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr =
&pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
u64 vtcr_val;
+ struct arm_smmu_device *smmu = master->smmu;
memset(target, 0, sizeof(*target));
target->data[0] = cpu_to_le64(
@@ -1531,9 +1608,11 @@ static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
target->data[1] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_1_EATS,
- master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0) |
- FIELD_PREP(STRTAB_STE_1_SHCFG,
- STRTAB_STE_1_SHCFG_INCOMING));
+ master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0));
+
+ if (smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR)
+ target->data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
+ STRTAB_STE_1_SHCFG_INCOMING));
vtcr_val = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) |
FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) |
@@ -1555,6 +1634,7 @@ static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s2_cfg.vttbr &
STRTAB_STE_3_S2TTB_MASK);
}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_s2_domain_ste);
/*
* This can safely directly manipulate the STE memory without a sync sequence
@@ -1566,10 +1646,7 @@ static void arm_smmu_init_initial_stes(struct arm_smmu_ste *strtab,
unsigned int i;
for (i = 0; i < nent; ++i) {
- if (disable_bypass)
- arm_smmu_make_abort_ste(strtab);
- else
- arm_smmu_make_bypass_ste(strtab);
+ arm_smmu_make_abort_ste(strtab);
strtab++;
}
}
@@ -2222,13 +2299,11 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
}
static int arm_smmu_domain_finalise_s1(struct arm_smmu_device *smmu,
- struct arm_smmu_domain *smmu_domain,
- struct io_pgtable_cfg *pgtbl_cfg)
+ struct arm_smmu_domain *smmu_domain)
{
int ret;
u32 asid;
struct arm_smmu_ctx_desc *cd = &smmu_domain->cd;
- typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
refcount_set(&cd->refs, 1);
@@ -2236,31 +2311,13 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_device *smmu,
mutex_lock(&arm_smmu_asid_lock);
ret = xa_alloc(&arm_smmu_asid_xa, &asid, cd,
XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
- if (ret)
- goto out_unlock;
-
cd->asid = (u16)asid;
- cd->ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
- cd->tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) |
- FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) |
- FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) |
- FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) |
- FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) |
- FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) |
- CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
- cd->mair = pgtbl_cfg->arm_lpae_s1_cfg.mair;
-
- mutex_unlock(&arm_smmu_asid_lock);
- return 0;
-
-out_unlock:
mutex_unlock(&arm_smmu_asid_lock);
return ret;
}
static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu,
- struct arm_smmu_domain *smmu_domain,
- struct io_pgtable_cfg *pgtbl_cfg)
+ struct arm_smmu_domain *smmu_domain)
{
int vmid;
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
@@ -2284,8 +2341,7 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg pgtbl_cfg;
struct io_pgtable_ops *pgtbl_ops;
int (*finalise_stage_fn)(struct arm_smmu_device *smmu,
- struct arm_smmu_domain *smmu_domain,
- struct io_pgtable_cfg *pgtbl_cfg);
+ struct arm_smmu_domain *smmu_domain);
/* Restrict the stage to what we can actually support */
if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
@@ -2328,7 +2384,7 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
smmu_domain->domain.geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
smmu_domain->domain.geometry.force_aperture = true;
- ret = finalise_stage_fn(smmu, smmu_domain, &pgtbl_cfg);
+ ret = finalise_stage_fn(smmu, smmu_domain);
if (ret < 0) {
free_io_pgtable_ops(pgtbl_ops);
return ret;
@@ -2411,7 +2467,10 @@ static void arm_smmu_enable_ats(struct arm_smmu_master *master,
pdev = to_pci_dev(master->dev);
atomic_inc(&smmu_domain->nr_ats_masters);
- arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, 0, 0);
+ /*
+ * ATC invalidation of PASID 0 causes the entire ATC to be flushed.
+ */
+ arm_smmu_atc_inv_master(master);
if (pci_enable_ats(pdev, stu))
dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu);
}
@@ -2507,6 +2566,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
struct arm_smmu_device *smmu;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_master *master;
+ struct arm_smmu_cd *cdptr;
if (!fwspec)
return -ENOENT;
@@ -2535,6 +2595,12 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
if (ret)
return ret;
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+ cdptr = arm_smmu_alloc_cd_ptr(master, IOMMU_NO_PASID);
+ if (!cdptr)
+ return -ENOMEM;
+ }
+
/*
* Prevent arm_smmu_share_asid() from trying to change the ASID
* of either the old or new domain while we are working on it.
@@ -2552,49 +2618,26 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
switch (smmu_domain->stage) {
- case ARM_SMMU_DOMAIN_S1:
- if (!master->cd_table.cdtab) {
- ret = arm_smmu_alloc_cd_tables(master);
- if (ret)
- goto out_list_del;
- } else {
- /*
- * arm_smmu_write_ctx_desc() relies on the entry being
- * invalid to work, clear any existing entry.
- */
- ret = arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID,
- NULL);
- if (ret)
- goto out_list_del;
- }
-
- ret = arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, &smmu_domain->cd);
- if (ret)
- goto out_list_del;
+ case ARM_SMMU_DOMAIN_S1: {
+ struct arm_smmu_cd target_cd;
+ arm_smmu_make_s1_cd(&target_cd, master, smmu_domain);
+ arm_smmu_write_cd_entry(master, IOMMU_NO_PASID, cdptr,
+ &target_cd);
arm_smmu_make_cdtable_ste(&target, master);
arm_smmu_install_ste_for_dev(master, &target);
break;
+ }
case ARM_SMMU_DOMAIN_S2:
arm_smmu_make_s2_domain_ste(&target, master, smmu_domain);
arm_smmu_install_ste_for_dev(master, &target);
- if (master->cd_table.cdtab)
- arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID,
- NULL);
+ arm_smmu_clear_cd(master, IOMMU_NO_PASID);
break;
}
arm_smmu_enable_ats(master, smmu_domain);
- goto out_unlock;
-
-out_list_del:
- spin_lock_irqsave(&smmu_domain->devices_lock, flags);
- list_del_init(&master->domain_head);
- spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
-
-out_unlock:
mutex_unlock(&arm_smmu_asid_lock);
- return ret;
+ return 0;
}
static int arm_smmu_attach_dev_ste(struct device *dev,
@@ -2628,8 +2671,7 @@ static int arm_smmu_attach_dev_ste(struct device *dev,
* arm_smmu_domain->devices to avoid races updating the same context
* descriptor from arm_smmu_share_asid().
*/
- if (master->cd_table.cdtab)
- arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, NULL);
+ arm_smmu_clear_cd(master, IOMMU_NO_PASID);
return 0;
}
@@ -2637,8 +2679,9 @@ static int arm_smmu_attach_dev_identity(struct iommu_domain *domain,
struct device *dev)
{
struct arm_smmu_ste ste;
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- arm_smmu_make_bypass_ste(&ste);
+ arm_smmu_make_bypass_ste(master->smmu, &ste);
return arm_smmu_attach_dev_ste(dev, &ste);
}
@@ -2906,10 +2949,10 @@ static void arm_smmu_release_device(struct device *dev)
iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
/* Put the STE back to what arm_smmu_init_strtab() sets */
- if (disable_bypass && !dev->iommu->require_direct)
- arm_smmu_attach_dev_blocked(&arm_smmu_blocked_domain, dev);
- else
+ if (dev->iommu->require_direct)
arm_smmu_attach_dev_identity(&arm_smmu_identity_domain, dev);
+ else
+ arm_smmu_attach_dev_blocked(&arm_smmu_blocked_domain, dev);
arm_smmu_disable_pasid(master);
arm_smmu_remove_master(master);
@@ -3044,14 +3087,9 @@ static int arm_smmu_def_domain_type(struct device *dev)
return 0;
}
-static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
+static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
+ struct iommu_domain *domain)
{
- struct iommu_domain *domain;
-
- domain = iommu_get_domain_for_dev_pasid(dev, pasid, IOMMU_DOMAIN_SVA);
- if (WARN_ON(IS_ERR(domain)) || !domain)
- return;
-
arm_smmu_sva_remove_dev_pasid(domain, dev, pasid);
}
@@ -3393,7 +3431,7 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
smmu->priq.q.irq = msi_get_virq(dev, PRIQ_MSI_INDEX);
/* Add callback to free MSIs on teardown */
- devm_add_action(dev, arm_smmu_free_msis, dev);
+ devm_add_action_or_reset(dev, arm_smmu_free_msis, dev);
}
static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
@@ -3494,7 +3532,7 @@ static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
return ret;
}
-static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
+static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
{
int ret;
u32 reg, enables;
@@ -3504,7 +3542,6 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
if (reg & CR0_SMMUEN) {
dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
- WARN_ON(is_kdump_kernel() && !disable_bypass);
arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
}
@@ -3611,14 +3648,8 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
if (is_kdump_kernel())
enables &= ~(CR0_EVTQEN | CR0_PRIQEN);
- /* Enable the SMMU interface, or ensure bypass */
- if (!bypass || disable_bypass) {
- enables |= CR0_SMMUEN;
- } else {
- ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
- if (ret)
- return ret;
- }
+ /* Enable the SMMU interface */
+ enables |= CR0_SMMUEN;
ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
ARM_SMMU_CR0ACK);
if (ret) {
@@ -3777,6 +3808,9 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
return -ENXIO;
}
+ if (reg & IDR1_ATTR_TYPES_OVR)
+ smmu->features |= ARM_SMMU_FEAT_ATTR_TYPES_OVR;
+
/* Queue sizes, capped to ensure natural alignment */
smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
FIELD_GET(IDR1_CMDQS, reg));
@@ -3992,7 +4026,7 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
* STE table is not programmed to HW, see
* arm_smmu_initial_bypass_stes()
*/
- arm_smmu_make_bypass_ste(
+ arm_smmu_make_bypass_ste(smmu,
arm_smmu_get_step_for_sid(smmu, rmr->sids[i]));
}
}
@@ -4007,7 +4041,6 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
resource_size_t ioaddr;
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
- bool bypass;
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
if (!smmu)
@@ -4018,12 +4051,9 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
ret = arm_smmu_device_dt_probe(pdev, smmu);
} else {
ret = arm_smmu_device_acpi_probe(pdev, smmu);
- if (ret == -ENODEV)
- return ret;
}
-
- /* Set bypass mode according to firmware probing result */
- bypass = !!ret;
+ if (ret)
+ return ret;
/* Base address */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -4087,7 +4117,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
arm_smmu_rmr_install_bypass_ste(smmu);
/* Reset the device */
- ret = arm_smmu_device_reset(smmu, bypass);
+ ret = arm_smmu_device_reset(smmu);
if (ret)
return ret;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 23baf117e7e4..1242a086c9f9 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -44,6 +44,7 @@
#define IDR1_TABLES_PRESET (1 << 30)
#define IDR1_QUEUES_PRESET (1 << 29)
#define IDR1_REL (1 << 28)
+#define IDR1_ATTR_TYPES_OVR (1 << 27)
#define IDR1_CMDQS GENMASK(25, 21)
#define IDR1_EVTQS GENMASK(20, 16)
#define IDR1_PRIQS GENMASK(15, 11)
@@ -274,14 +275,18 @@ struct arm_smmu_ste {
* 2lvl: at most 1024 L1 entries,
* 1024 lazy entries per table.
*/
-#define CTXDESC_SPLIT 10
-#define CTXDESC_L2_ENTRIES (1 << CTXDESC_SPLIT)
+#define CTXDESC_L2_ENTRIES 1024
#define CTXDESC_L1_DESC_DWORDS 1
#define CTXDESC_L1_DESC_V (1UL << 0)
#define CTXDESC_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 12)
#define CTXDESC_CD_DWORDS 8
+
+struct arm_smmu_cd {
+ __le64 data[CTXDESC_CD_DWORDS];
+};
+
#define CTXDESC_CD_0_TCR_T0SZ GENMASK_ULL(5, 0)
#define CTXDESC_CD_0_TCR_TG0 GENMASK_ULL(7, 6)
#define CTXDESC_CD_0_TCR_IRGN0 GENMASK_ULL(9, 8)
@@ -582,16 +587,13 @@ struct arm_smmu_strtab_l1_desc {
struct arm_smmu_ctx_desc {
u16 asid;
- u64 ttbr;
- u64 tcr;
- u64 mair;
refcount_t refs;
struct mm_struct *mm;
};
struct arm_smmu_l1_ctx_desc {
- __le64 *l2ptr;
+ struct arm_smmu_cd *l2ptr;
dma_addr_t l2ptr_dma;
};
@@ -603,8 +605,6 @@ struct arm_smmu_ctx_desc_cfg {
u8 s1fmt;
/* log2 of the maximum number of CDs supported by this table */
u8 s1cdmax;
- /* Whether CD entries in this table have the stall bit set. */
- u8 stall_enabled:1;
};
struct arm_smmu_s2_cfg {
@@ -647,6 +647,7 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_SVA (1 << 17)
#define ARM_SMMU_FEAT_E2H (1 << 18)
#define ARM_SMMU_FEAT_NESTING (1 << 19)
+#define ARM_SMMU_FEAT_ATTR_TYPES_OVR (1 << 20)
u32 features;
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
@@ -735,6 +736,36 @@ struct arm_smmu_domain {
struct list_head mmu_notifiers;
};
+/* The following are exposed for testing purposes. */
+struct arm_smmu_entry_writer_ops;
+struct arm_smmu_entry_writer {
+ const struct arm_smmu_entry_writer_ops *ops;
+ struct arm_smmu_master *master;
+};
+
+struct arm_smmu_entry_writer_ops {
+ void (*get_used)(const __le64 *entry, __le64 *used);
+ void (*sync)(struct arm_smmu_entry_writer *writer);
+};
+
+#if IS_ENABLED(CONFIG_KUNIT)
+void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits);
+void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer, __le64 *cur,
+ const __le64 *target);
+void arm_smmu_get_cd_used(const __le64 *ent, __le64 *used_bits);
+void arm_smmu_make_abort_ste(struct arm_smmu_ste *target);
+void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu,
+ struct arm_smmu_ste *target);
+void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
+ struct arm_smmu_master *master);
+void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
+ struct arm_smmu_master *master,
+ struct arm_smmu_domain *smmu_domain);
+void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
+ struct arm_smmu_master *master, struct mm_struct *mm,
+ u16 asid);
+#endif
+
static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{
return container_of(dom, struct arm_smmu_domain, domain);
@@ -742,10 +773,19 @@ static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
extern struct xarray arm_smmu_asid_xa;
extern struct mutex arm_smmu_asid_lock;
-extern struct arm_smmu_ctx_desc quiet_cd;
-int arm_smmu_write_ctx_desc(struct arm_smmu_master *smmu_master, int ssid,
- struct arm_smmu_ctx_desc *cd);
+void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid);
+struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master,
+ u32 ssid);
+struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master,
+ u32 ssid);
+void arm_smmu_make_s1_cd(struct arm_smmu_cd *target,
+ struct arm_smmu_master *master,
+ struct arm_smmu_domain *smmu_domain);
+void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid,
+ struct arm_smmu_cd *cdptr,
+ const struct arm_smmu_cd *target);
+
void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid);
void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
size_t granule, bool leaf,
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
index 87bf522b9d2e..957d988b6d83 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
@@ -221,11 +221,9 @@ static irqreturn_t nvidia_smmu_context_fault(int irq, void *dev)
unsigned int inst;
irqreturn_t ret = IRQ_NONE;
struct arm_smmu_device *smmu;
- struct iommu_domain *domain = dev;
- struct arm_smmu_domain *smmu_domain;
+ struct arm_smmu_domain *smmu_domain = dev;
struct nvidia_smmu *nvidia;
- smmu_domain = container_of(domain, struct arm_smmu_domain, domain);
smmu = smmu_domain->smmu;
nvidia = to_nvidia_smmu(smmu);
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
index bb89d49adf8d..552199cbd9e2 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
@@ -1,15 +1,66 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/device.h>
+#include <linux/interconnect.h>
#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
#include <linux/ratelimit.h>
+#include <linux/spinlock.h>
#include "arm-smmu.h"
#include "arm-smmu-qcom.h"
+#define TBU_DBG_TIMEOUT_US 100
+#define DEBUG_AXUSER_REG 0x30
+#define DEBUG_AXUSER_CDMID GENMASK_ULL(43, 36)
+#define DEBUG_AXUSER_CDMID_VAL 0xff
+#define DEBUG_PAR_REG 0x28
+#define DEBUG_PAR_FAULT_VAL BIT(0)
+#define DEBUG_PAR_PA GENMASK_ULL(47, 12)
+#define DEBUG_SID_HALT_REG 0x0
+#define DEBUG_SID_HALT_VAL BIT(16)
+#define DEBUG_SID_HALT_SID GENMASK(9, 0)
+#define DEBUG_SR_HALT_ACK_REG 0x20
+#define DEBUG_SR_HALT_ACK_VAL BIT(1)
+#define DEBUG_SR_ECATS_RUNNING_VAL BIT(0)
+#define DEBUG_TXN_AXCACHE GENMASK(5, 2)
+#define DEBUG_TXN_AXPROT GENMASK(8, 6)
+#define DEBUG_TXN_AXPROT_PRIV 0x1
+#define DEBUG_TXN_AXPROT_NSEC 0x2
+#define DEBUG_TXN_TRIGG_REG 0x18
+#define DEBUG_TXN_TRIGGER BIT(0)
+#define DEBUG_VA_ADDR_REG 0x8
+
+static LIST_HEAD(tbu_list);
+static DEFINE_MUTEX(tbu_list_lock);
+static DEFINE_SPINLOCK(atos_lock);
+
+struct qcom_tbu {
+ struct device *dev;
+ struct device_node *smmu_np;
+ u32 sid_range[2];
+ struct list_head list;
+ struct clk *clk;
+ struct icc_path *path;
+ void __iomem *base;
+ spinlock_t halt_lock; /* multiple halt or resume can't execute concurrently */
+ int halt_count;
+};
+
+static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
+{
+ return container_of(smmu, struct qcom_smmu, smmu);
+}
+
void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu)
{
int ret;
@@ -49,3 +100,448 @@ void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu)
tbu_pwr_status, sync_inv_ack, sync_inv_progress);
}
}
+
+static struct qcom_tbu *qcom_find_tbu(struct qcom_smmu *qsmmu, u32 sid)
+{
+ struct qcom_tbu *tbu;
+ u32 start, end;
+
+ guard(mutex)(&tbu_list_lock);
+
+ if (list_empty(&tbu_list))
+ return NULL;
+
+ list_for_each_entry(tbu, &tbu_list, list) {
+ start = tbu->sid_range[0];
+ end = start + tbu->sid_range[1];
+
+ if (qsmmu->smmu.dev->of_node == tbu->smmu_np &&
+ start <= sid && sid < end)
+ return tbu;
+ }
+ dev_err(qsmmu->smmu.dev, "Unable to find TBU for sid 0x%x\n", sid);
+
+ return NULL;
+}
+
+static int qcom_tbu_halt(struct qcom_tbu *tbu, struct arm_smmu_domain *smmu_domain)
+{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ int ret = 0, idx = smmu_domain->cfg.cbndx;
+ u32 val, fsr, status;
+
+ guard(spinlock_irqsave)(&tbu->halt_lock);
+ if (tbu->halt_count) {
+ tbu->halt_count++;
+ return ret;
+ }
+
+ val = readl_relaxed(tbu->base + DEBUG_SID_HALT_REG);
+ val |= DEBUG_SID_HALT_VAL;
+ writel_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
+
+ fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
+ if ((fsr & ARM_SMMU_FSR_FAULT) && (fsr & ARM_SMMU_FSR_SS)) {
+ u32 sctlr_orig, sctlr;
+
+ /*
+ * We are in a fault. Our request to halt the bus will not
+ * complete until transactions in front of us (such as the fault
+ * itself) have completed. Disable iommu faults and terminate
+ * any existing transactions.
+ */
+ sctlr_orig = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_SCTLR);
+ sctlr = sctlr_orig & ~(ARM_SMMU_SCTLR_CFCFG | ARM_SMMU_SCTLR_CFIE);
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr);
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME, ARM_SMMU_RESUME_TERMINATE);
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr_orig);
+ }
+
+ if (readl_poll_timeout_atomic(tbu->base + DEBUG_SR_HALT_ACK_REG, status,
+ (status & DEBUG_SR_HALT_ACK_VAL),
+ 0, TBU_DBG_TIMEOUT_US)) {
+ dev_err(tbu->dev, "Timeout while trying to halt TBU!\n");
+ ret = -ETIMEDOUT;
+
+ val = readl_relaxed(tbu->base + DEBUG_SID_HALT_REG);
+ val &= ~DEBUG_SID_HALT_VAL;
+ writel_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
+
+ return ret;
+ }
+
+ tbu->halt_count = 1;
+
+ return ret;
+}
+
+static void qcom_tbu_resume(struct qcom_tbu *tbu)
+{
+ u32 val;
+
+ guard(spinlock_irqsave)(&tbu->halt_lock);
+ if (!tbu->halt_count) {
+ WARN(1, "%s: halt_count is 0", dev_name(tbu->dev));
+ return;
+ }
+
+ if (tbu->halt_count > 1) {
+ tbu->halt_count--;
+ return;
+ }
+
+ val = readl_relaxed(tbu->base + DEBUG_SID_HALT_REG);
+ val &= ~DEBUG_SID_HALT_VAL;
+ writel_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
+
+ tbu->halt_count = 0;
+}
+
+static phys_addr_t qcom_tbu_trigger_atos(struct arm_smmu_domain *smmu_domain,
+ struct qcom_tbu *tbu, dma_addr_t iova, u32 sid)
+{
+ bool atos_timedout = false;
+ phys_addr_t phys = 0;
+ ktime_t timeout;
+ u64 val;
+
+ /* Set address and stream-id */
+ val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
+ val &= ~DEBUG_SID_HALT_SID;
+ val |= FIELD_PREP(DEBUG_SID_HALT_SID, sid);
+ writeq_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
+ writeq_relaxed(iova, tbu->base + DEBUG_VA_ADDR_REG);
+ val = FIELD_PREP(DEBUG_AXUSER_CDMID, DEBUG_AXUSER_CDMID_VAL);
+ writeq_relaxed(val, tbu->base + DEBUG_AXUSER_REG);
+
+ /* Write-back read and write-allocate */
+ val = FIELD_PREP(DEBUG_TXN_AXCACHE, 0xf);
+
+ /* Non-secure access */
+ val |= FIELD_PREP(DEBUG_TXN_AXPROT, DEBUG_TXN_AXPROT_NSEC);
+
+ /* Privileged access */
+ val |= FIELD_PREP(DEBUG_TXN_AXPROT, DEBUG_TXN_AXPROT_PRIV);
+
+ val |= DEBUG_TXN_TRIGGER;
+ writeq_relaxed(val, tbu->base + DEBUG_TXN_TRIGG_REG);
+
+ timeout = ktime_add_us(ktime_get(), TBU_DBG_TIMEOUT_US);
+ for (;;) {
+ val = readl_relaxed(tbu->base + DEBUG_SR_HALT_ACK_REG);
+ if (!(val & DEBUG_SR_ECATS_RUNNING_VAL))
+ break;
+ val = readl_relaxed(tbu->base + DEBUG_PAR_REG);
+ if (val & DEBUG_PAR_FAULT_VAL)
+ break;
+ if (ktime_compare(ktime_get(), timeout) > 0) {
+ atos_timedout = true;
+ break;
+ }
+ }
+
+ val = readq_relaxed(tbu->base + DEBUG_PAR_REG);
+ if (val & DEBUG_PAR_FAULT_VAL)
+ dev_err(tbu->dev, "ATOS generated a fault interrupt! PAR = %llx, SID=0x%x\n",
+ val, sid);
+ else if (atos_timedout)
+ dev_err_ratelimited(tbu->dev, "ATOS translation timed out!\n");
+ else
+ phys = FIELD_GET(DEBUG_PAR_PA, val);
+
+ /* Reset hardware */
+ writeq_relaxed(0, tbu->base + DEBUG_TXN_TRIGG_REG);
+ writeq_relaxed(0, tbu->base + DEBUG_VA_ADDR_REG);
+ val = readl_relaxed(tbu->base + DEBUG_SID_HALT_REG);
+ val &= ~DEBUG_SID_HALT_SID;
+ writel_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
+
+ return phys;
+}
+
+static phys_addr_t qcom_iova_to_phys(struct arm_smmu_domain *smmu_domain,
+ dma_addr_t iova, u32 sid)
+{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
+ int idx = smmu_domain->cfg.cbndx;
+ struct qcom_tbu *tbu;
+ u32 sctlr_orig, sctlr;
+ phys_addr_t phys = 0;
+ int attempt = 0;
+ int ret;
+ u64 fsr;
+
+ tbu = qcom_find_tbu(qsmmu, sid);
+ if (!tbu)
+ return 0;
+
+ ret = icc_set_bw(tbu->path, 0, UINT_MAX);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(tbu->clk);
+ if (ret)
+ goto disable_icc;
+
+ ret = qcom_tbu_halt(tbu, smmu_domain);
+ if (ret)
+ goto disable_clk;
+
+ /*
+ * ATOS/ECATS can trigger the fault interrupt, so disable it temporarily
+ * and check for an interrupt manually.
+ */
+ sctlr_orig = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_SCTLR);
+ sctlr = sctlr_orig & ~(ARM_SMMU_SCTLR_CFCFG | ARM_SMMU_SCTLR_CFIE);
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr);
+
+ fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
+ if (fsr & ARM_SMMU_FSR_FAULT) {
+ /* Clear pending interrupts */
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
+
+ /*
+ * TBU halt takes care of resuming any stalled transcation.
+ * Kept it here for completeness sake.
+ */
+ if (fsr & ARM_SMMU_FSR_SS)
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
+ ARM_SMMU_RESUME_TERMINATE);
+ }
+
+ /* Only one concurrent atos operation */
+ scoped_guard(spinlock_irqsave, &atos_lock) {
+ /*
+ * If the translation fails, attempt the lookup more time."
+ */
+ do {
+ phys = qcom_tbu_trigger_atos(smmu_domain, tbu, iova, sid);
+
+ fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
+ if (fsr & ARM_SMMU_FSR_FAULT) {
+ /* Clear pending interrupts */
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
+
+ if (fsr & ARM_SMMU_FSR_SS)
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
+ ARM_SMMU_RESUME_TERMINATE);
+ }
+ } while (!phys && attempt++ < 2);
+
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr_orig);
+ }
+ qcom_tbu_resume(tbu);
+
+ /* Read to complete prior write transcations */
+ readl_relaxed(tbu->base + DEBUG_SR_HALT_ACK_REG);
+
+disable_clk:
+ clk_disable_unprepare(tbu->clk);
+disable_icc:
+ icc_set_bw(tbu->path, 0, 0);
+
+ return phys;
+}
+
+static phys_addr_t qcom_smmu_iova_to_phys_hard(struct arm_smmu_domain *smmu_domain, dma_addr_t iova)
+{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ int idx = smmu_domain->cfg.cbndx;
+ u32 frsynra;
+ u16 sid;
+
+ frsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
+ sid = FIELD_GET(ARM_SMMU_CBFRSYNRA_SID, frsynra);
+
+ return qcom_iova_to_phys(smmu_domain, iova, sid);
+}
+
+static phys_addr_t qcom_smmu_verify_fault(struct arm_smmu_domain *smmu_domain, dma_addr_t iova, u32 fsr)
+{
+ struct io_pgtable *iop = io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ phys_addr_t phys_post_tlbiall;
+ phys_addr_t phys;
+
+ phys = qcom_smmu_iova_to_phys_hard(smmu_domain, iova);
+ io_pgtable_tlb_flush_all(iop);
+ phys_post_tlbiall = qcom_smmu_iova_to_phys_hard(smmu_domain, iova);
+
+ if (phys != phys_post_tlbiall) {
+ dev_err(smmu->dev,
+ "ATOS results differed across TLBIALL... (before: %pa after: %pa)\n",
+ &phys, &phys_post_tlbiall);
+ }
+
+ return (phys == 0 ? phys_post_tlbiall : phys);
+}
+
+irqreturn_t qcom_smmu_context_fault(int irq, void *dev)
+{
+ struct arm_smmu_domain *smmu_domain = dev;
+ struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ u32 fsr, fsynr, cbfrsynra, resume = 0;
+ int idx = smmu_domain->cfg.cbndx;
+ phys_addr_t phys_soft;
+ unsigned long iova;
+ int ret, tmp;
+
+ static DEFINE_RATELIMIT_STATE(_rs,
+ DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+
+ fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
+ if (!(fsr & ARM_SMMU_FSR_FAULT))
+ return IRQ_NONE;
+
+ fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
+ iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
+ cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
+
+ if (list_empty(&tbu_list)) {
+ ret = report_iommu_fault(&smmu_domain->domain, NULL, iova,
+ fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
+
+ if (ret == -ENOSYS)
+ dev_err_ratelimited(smmu->dev,
+ "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
+ fsr, iova, fsynr, cbfrsynra, idx);
+
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
+ return IRQ_HANDLED;
+ }
+
+ phys_soft = ops->iova_to_phys(ops, iova);
+
+ tmp = report_iommu_fault(&smmu_domain->domain, NULL, iova,
+ fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
+ if (!tmp || tmp == -EBUSY) {
+ dev_dbg(smmu->dev,
+ "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
+ iova, fsr, fsynr, idx);
+ dev_dbg(smmu->dev, "soft iova-to-phys=%pa\n", &phys_soft);
+ ret = IRQ_HANDLED;
+ resume = ARM_SMMU_RESUME_TERMINATE;
+ } else {
+ phys_addr_t phys_atos = qcom_smmu_verify_fault(smmu_domain, iova, fsr);
+
+ if (__ratelimit(&_rs)) {
+ dev_err(smmu->dev,
+ "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
+ fsr, iova, fsynr, cbfrsynra, idx);
+ dev_err(smmu->dev,
+ "FSR = %08x [%s%s%s%s%s%s%s%s%s], SID=0x%x\n",
+ fsr,
+ (fsr & 0x02) ? "TF " : "",
+ (fsr & 0x04) ? "AFF " : "",
+ (fsr & 0x08) ? "PF " : "",
+ (fsr & 0x10) ? "EF " : "",
+ (fsr & 0x20) ? "TLBMCF " : "",
+ (fsr & 0x40) ? "TLBLKF " : "",
+ (fsr & 0x80) ? "MHF " : "",
+ (fsr & 0x40000000) ? "SS " : "",
+ (fsr & 0x80000000) ? "MULTI " : "",
+ cbfrsynra);
+
+ dev_err(smmu->dev,
+ "soft iova-to-phys=%pa\n", &phys_soft);
+ if (!phys_soft)
+ dev_err(smmu->dev,
+ "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
+ dev_name(smmu->dev));
+ if (phys_atos)
+ dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n",
+ &phys_atos);
+ else
+ dev_err(smmu->dev, "hard iova-to-phys (ATOS) failed\n");
+ }
+ ret = IRQ_NONE;
+ resume = ARM_SMMU_RESUME_TERMINATE;
+ }
+
+ /*
+ * If the client returns -EBUSY, do not clear FSR and do not RESUME
+ * if stalled. This is required to keep the IOMMU client stalled on
+ * the outstanding fault. This gives the client a chance to take any
+ * debug action and then terminate the stalled transaction.
+ * So, the sequence in case of stall on fault should be:
+ * 1) Do not clear FSR or write to RESUME here
+ * 2) Client takes any debug action
+ * 3) Client terminates the stalled transaction and resumes the IOMMU
+ * 4) Client clears FSR. The FSR should only be cleared after 3) and
+ * not before so that the fault remains outstanding. This ensures
+ * SCTLR.HUPCF has the desired effect if subsequent transactions also
+ * need to be terminated.
+ */
+ if (tmp != -EBUSY) {
+ /* Clear the faulting FSR */
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
+
+ /* Retry or terminate any stalled transactions */
+ if (fsr & ARM_SMMU_FSR_SS)
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME, resume);
+ }
+
+ return ret;
+}
+
+static int qcom_tbu_probe(struct platform_device *pdev)
+{
+ struct of_phandle_args args = { .args_count = 2 };
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct qcom_tbu *tbu;
+
+ tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
+ if (!tbu)
+ return -ENOMEM;
+
+ tbu->dev = dev;
+ INIT_LIST_HEAD(&tbu->list);
+ spin_lock_init(&tbu->halt_lock);
+
+ if (of_parse_phandle_with_args(np, "qcom,stream-id-range", "#iommu-cells", 0, &args)) {
+ dev_err(dev, "Cannot parse the 'qcom,stream-id-range' DT property\n");
+ return -EINVAL;
+ }
+
+ tbu->smmu_np = args.np;
+ tbu->sid_range[0] = args.args[0];
+ tbu->sid_range[1] = args.args[1];
+ of_node_put(args.np);
+
+ tbu->base = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(tbu->base))
+ return PTR_ERR(tbu->base);
+
+ tbu->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(tbu->clk))
+ return PTR_ERR(tbu->clk);
+
+ tbu->path = devm_of_icc_get(dev, NULL);
+ if (IS_ERR(tbu->path))
+ return PTR_ERR(tbu->path);
+
+ guard(mutex)(&tbu_list_lock);
+ list_add_tail(&tbu->list, &tbu_list);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_tbu_of_match[] = {
+ { .compatible = "qcom,sc7280-tbu" },
+ { .compatible = "qcom,sdm845-tbu" },
+ { }
+};
+
+static struct platform_driver qcom_tbu_driver = {
+ .driver = {
+ .name = "qcom_tbu",
+ .of_match_table = qcom_tbu_of_match,
+ },
+ .probe = qcom_tbu_probe,
+};
+builtin_platform_driver(qcom_tbu_driver);
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 5c7cfc51b57c..25f034677f56 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -413,6 +413,10 @@ static const struct arm_smmu_impl qcom_smmu_500_impl = {
.reset = arm_mmu500_reset,
.write_s2cr = qcom_smmu_write_s2cr,
.tlb_sync = qcom_smmu_tlb_sync,
+#ifdef CONFIG_ARM_SMMU_QCOM_DEBUG
+ .context_fault = qcom_smmu_context_fault,
+ .context_fault_needs_threaded_irq = true,
+#endif
};
static const struct arm_smmu_impl sdm845_smmu_500_impl = {
@@ -422,6 +426,10 @@ static const struct arm_smmu_impl sdm845_smmu_500_impl = {
.reset = qcom_sdm845_smmu500_reset,
.write_s2cr = qcom_smmu_write_s2cr,
.tlb_sync = qcom_smmu_tlb_sync,
+#ifdef CONFIG_ARM_SMMU_QCOM_DEBUG
+ .context_fault = qcom_smmu_context_fault,
+ .context_fault_needs_threaded_irq = true,
+#endif
};
static const struct arm_smmu_impl qcom_adreno_smmu_v2_impl = {
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
index 593910567b88..9bb3ae7d62da 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
@@ -30,6 +30,8 @@ struct qcom_smmu_match_data {
const struct arm_smmu_impl *adreno_impl;
};
+irqreturn_t qcom_smmu_context_fault(int irq, void *dev);
+
#ifdef CONFIG_ARM_SMMU_QCOM_DEBUG
void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu);
#else
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index c572d877b0e1..87c81f75cf84 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -806,8 +806,16 @@ static int arm_smmu_init_domain_context(struct arm_smmu_domain *smmu_domain,
else
context_fault = arm_smmu_context_fault;
- ret = devm_request_irq(smmu->dev, irq, context_fault, IRQF_SHARED,
- "arm-smmu-context-fault", smmu_domain);
+ if (smmu->impl && smmu->impl->context_fault_needs_threaded_irq)
+ ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
+ context_fault,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "arm-smmu-context-fault",
+ smmu_domain);
+ else
+ ret = devm_request_irq(smmu->dev, irq, context_fault, IRQF_SHARED,
+ "arm-smmu-context-fault", smmu_domain);
+
if (ret < 0) {
dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
cfg->irptndx, irq);
@@ -859,14 +867,10 @@ static void arm_smmu_destroy_domain_context(struct arm_smmu_domain *smmu_domain)
arm_smmu_rpm_put(smmu);
}
-static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
+static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
{
struct arm_smmu_domain *smmu_domain;
- if (type != IOMMU_DOMAIN_UNMANAGED) {
- if (using_legacy_binding || type != IOMMU_DOMAIN_DMA)
- return NULL;
- }
/*
* Allocate the domain and initialise some of its data structures.
* We can't really do anything meaningful until we've added a
@@ -1596,7 +1600,7 @@ static struct iommu_ops arm_smmu_ops = {
.identity_domain = &arm_smmu_identity_domain,
.blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable,
- .domain_alloc = arm_smmu_domain_alloc,
+ .domain_alloc_paging = arm_smmu_domain_alloc_paging,
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.probe_finalize = arm_smmu_probe_finalize,
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h
index 836ed6799a80..4765c6945c34 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
@@ -136,6 +136,7 @@ enum arm_smmu_cbar_type {
#define ARM_SMMU_CBAR_VMID GENMASK(7, 0)
#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
+#define ARM_SMMU_CBFRSYNRA_SID GENMASK(15, 0)
#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
#define ARM_SMMU_CBA2R_VMID16 GENMASK(31, 16)
@@ -238,6 +239,7 @@ enum arm_smmu_cbar_type {
#define ARM_SMMU_CB_ATSR 0x8f0
#define ARM_SMMU_ATSR_ACTIVE BIT(0)
+#define ARM_SMMU_RESUME_TERMINATE BIT(0)
/* Maximum number of context banks per SMMU */
#define ARM_SMMU_MAX_CBS 128
@@ -436,6 +438,7 @@ struct arm_smmu_impl {
int (*def_domain_type)(struct device *dev);
irqreturn_t (*global_fault)(int irq, void *dev);
irqreturn_t (*context_fault)(int irq, void *dev);
+ bool context_fault_needs_threaded_irq;
int (*alloc_context_bank)(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_device *smmu,
struct device *dev, int start);
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index e4cb26f6a943..f731e4b2a417 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -32,6 +32,7 @@
#include <trace/events/swiotlb.h>
#include "dma-iommu.h"
+#include "iommu-pages.h"
struct iommu_dma_msi_page {
struct list_head list;
@@ -156,7 +157,7 @@ static void fq_ring_free_locked(struct iommu_dma_cookie *cookie, struct iova_fq
if (fq->entries[idx].counter >= counter)
break;
- put_pages_list(&fq->entries[idx].freelist);
+ iommu_put_pages_list(&fq->entries[idx].freelist);
free_iova_fast(&cookie->iovad,
fq->entries[idx].iova_pfn,
fq->entries[idx].pages);
@@ -254,7 +255,7 @@ static void iommu_dma_free_fq_single(struct iova_fq *fq)
int idx;
fq_ring_for_each(idx, fq)
- put_pages_list(&fq->entries[idx].freelist);
+ iommu_put_pages_list(&fq->entries[idx].freelist);
vfree(fq);
}
@@ -267,7 +268,7 @@ static void iommu_dma_free_fq_percpu(struct iova_fq __percpu *percpu_fq)
struct iova_fq *fq = per_cpu_ptr(percpu_fq, cpu);
fq_ring_for_each(idx, fq)
- put_pages_list(&fq->entries[idx].freelist);
+ iommu_put_pages_list(&fq->entries[idx].freelist);
}
free_percpu(percpu_fq);
@@ -660,19 +661,16 @@ static void iommu_dma_init_options(struct iommu_dma_options *options,
/**
* iommu_dma_init_domain - Initialise a DMA mapping domain
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
- * @base: IOVA at which the mappable address space starts
- * @limit: Last address of the IOVA space
* @dev: Device the domain is being initialised for
*
- * @base and @limit + 1 should be exact multiples of IOMMU page granularity to
- * avoid rounding surprises. If necessary, we reserve the page at address 0
+ * If the geometry and dma_range_map include address 0, we reserve that page
* to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
* any change which could make prior IOVAs invalid will fail.
*/
-static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
- dma_addr_t limit, struct device *dev)
+static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ const struct bus_dma_region *map = dev->dma_range_map;
unsigned long order, base_pfn;
struct iova_domain *iovad;
int ret;
@@ -684,18 +682,18 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
/* Use the smallest supported page size for IOVA granularity */
order = __ffs(domain->pgsize_bitmap);
- base_pfn = max_t(unsigned long, 1, base >> order);
+ base_pfn = 1;
/* Check the domain allows at least some access to the device... */
- if (domain->geometry.force_aperture) {
+ if (map) {
+ dma_addr_t base = dma_range_map_min(map);
if (base > domain->geometry.aperture_end ||
- limit < domain->geometry.aperture_start) {
+ dma_range_map_max(map) < domain->geometry.aperture_start) {
pr_warn("specified DMA range outside IOMMU capability\n");
return -EFAULT;
}
/* ...then finally give it a kicking to make sure it fits */
- base_pfn = max_t(unsigned long, base_pfn,
- domain->geometry.aperture_start >> order);
+ base_pfn = max(base, domain->geometry.aperture_start) >> order;
}
/* start_pfn is always nonzero for an already-initialised domain */
@@ -1154,9 +1152,6 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
*/
if (dev_use_swiotlb(dev, size, dir) &&
iova_offset(iovad, phys | size)) {
- void *padding_start;
- size_t padding_size, aligned_size;
-
if (!is_swiotlb_active(dev)) {
dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
return DMA_MAPPING_ERROR;
@@ -1164,24 +1159,30 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
trace_swiotlb_bounced(dev, phys, size);
- aligned_size = iova_align(iovad, size);
- phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size,
+ phys = swiotlb_tbl_map_single(dev, phys, size,
iova_mask(iovad), dir, attrs);
if (phys == DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
- /* Cleanup the padding area. */
- padding_start = phys_to_virt(phys);
- padding_size = aligned_size;
+ /*
+ * Untrusted devices should not see padding areas with random
+ * leftover kernel data, so zero the pre- and post-padding.
+ * swiotlb_tbl_map_single() has initialized the bounce buffer
+ * proper to the contents of the original memory buffer.
+ */
+ if (dev_is_untrusted(dev)) {
+ size_t start, virt = (size_t)phys_to_virt(phys);
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) {
- padding_start += size;
- padding_size -= size;
- }
+ /* Pre-padding */
+ start = iova_align_down(iovad, virt);
+ memset((void *)start, 0, virt - start);
- memset(padding_start, 0, padding_size);
+ /* Post-padding */
+ start = virt + size;
+ memset((void *)start, 0,
+ iova_align(iovad, start) - start);
+ }
}
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
@@ -1720,10 +1721,11 @@ static size_t iommu_dma_max_mapping_size(struct device *dev)
}
static const struct dma_map_ops iommu_dma_ops = {
- .flags = DMA_F_PCI_P2PDMA_SUPPORTED,
+ .flags = DMA_F_PCI_P2PDMA_SUPPORTED |
+ DMA_F_CAN_SKIP_SYNC,
.alloc = iommu_dma_alloc,
.free = iommu_dma_free,
- .alloc_pages = dma_common_alloc_pages,
+ .alloc_pages_op = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
.alloc_noncontiguous = iommu_dma_alloc_noncontiguous,
.free_noncontiguous = iommu_dma_free_noncontiguous,
@@ -1744,25 +1746,20 @@ static const struct dma_map_ops iommu_dma_ops = {
.max_mapping_size = iommu_dma_max_mapping_size,
};
-/*
- * The IOMMU core code allocates the default DMA domain, which the underlying
- * IOMMU driver needs to support via the dma-iommu layer.
- */
-void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
+void iommu_setup_dma_ops(struct device *dev)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- if (!domain)
- goto out_err;
+ if (dev_is_pci(dev))
+ dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac;
- /*
- * The IOMMU core code allocates the default DMA domain, which the
- * underlying IOMMU driver needs to support via the dma-iommu layer.
- */
if (iommu_is_dma_domain(domain)) {
- if (iommu_dma_init_domain(domain, dma_base, dma_limit, dev))
+ if (iommu_dma_init_domain(domain, dev))
goto out_err;
dev->dma_ops = &iommu_dma_ops;
+ } else if (dev->dma_ops == &iommu_dma_ops) {
+ /* Clean up if we've switched *from* a DMA domain */
+ dev->dma_ops = NULL;
}
return;
@@ -1770,7 +1767,6 @@ out_err:
pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
dev_name(dev));
}
-EXPORT_SYMBOL_GPL(iommu_setup_dma_ops);
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
phys_addr_t msi_addr, struct iommu_domain *domain)
diff --git a/drivers/iommu/dma-iommu.h b/drivers/iommu/dma-iommu.h
index c829f1f82a99..c12d63457c76 100644
--- a/drivers/iommu/dma-iommu.h
+++ b/drivers/iommu/dma-iommu.h
@@ -9,6 +9,8 @@
#ifdef CONFIG_IOMMU_DMA
+void iommu_setup_dma_ops(struct device *dev);
+
int iommu_get_dma_cookie(struct iommu_domain *domain);
void iommu_put_dma_cookie(struct iommu_domain *domain);
@@ -17,13 +19,13 @@ int iommu_dma_init_fq(struct iommu_domain *domain);
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
extern bool iommu_dma_forcedac;
-static inline void iommu_dma_set_pci_32bit_workaround(struct device *dev)
-{
- dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac;
-}
#else /* CONFIG_IOMMU_DMA */
+static inline void iommu_setup_dma_ops(struct device *dev)
+{
+}
+
static inline int iommu_dma_init_fq(struct iommu_domain *domain)
{
return -EINVAL;
@@ -42,9 +44,5 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he
{
}
-static inline void iommu_dma_set_pci_32bit_workaround(struct device *dev)
-{
-}
-
#endif /* CONFIG_IOMMU_DMA */
#endif /* __DMA_IOMMU_H */
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index d98c9161948a..c666ecab955d 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -22,6 +22,8 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include "iommu-pages.h"
+
typedef u32 sysmmu_iova_t;
typedef u32 sysmmu_pte_t;
static struct iommu_domain exynos_identity_domain;
@@ -900,11 +902,11 @@ static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev)
if (!domain)
return NULL;
- domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
+ domain->pgtable = iommu_alloc_pages(GFP_KERNEL, 2);
if (!domain->pgtable)
goto err_pgtable;
- domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
+ domain->lv2entcnt = iommu_alloc_pages(GFP_KERNEL, 1);
if (!domain->lv2entcnt)
goto err_counter;
@@ -930,9 +932,9 @@ static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev)
return &domain->domain;
err_lv2ent:
- free_pages((unsigned long)domain->lv2entcnt, 1);
+ iommu_free_pages(domain->lv2entcnt, 1);
err_counter:
- free_pages((unsigned long)domain->pgtable, 2);
+ iommu_free_pages(domain->pgtable, 2);
err_pgtable:
kfree(domain);
return NULL;
@@ -973,8 +975,8 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
phys_to_virt(base));
}
- free_pages((unsigned long)domain->pgtable, 2);
- free_pages((unsigned long)domain->lv2entcnt, 1);
+ iommu_free_pages(domain->pgtable, 2);
+ iommu_free_pages(domain->lv2entcnt, 1);
kfree(domain);
}
diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile
index 5402b699a122..c8beb0281559 100644
--- a/drivers/iommu/intel/Makefile
+++ b/drivers/iommu/intel/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DMAR_TABLE) += dmar.o
-obj-$(CONFIG_INTEL_IOMMU) += iommu.o pasid.o nested.o
+obj-$(CONFIG_INTEL_IOMMU) += iommu.o pasid.o nested.o cache.o
obj-$(CONFIG_DMAR_TABLE) += trace.o cap_audit.o
obj-$(CONFIG_DMAR_PERF) += perf.o
obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o
diff --git a/drivers/iommu/intel/cache.c b/drivers/iommu/intel/cache.c
new file mode 100644
index 000000000000..e8418cdd8331
--- /dev/null
+++ b/drivers/iommu/intel/cache.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * cache.c - Intel VT-d cache invalidation
+ *
+ * Copyright (C) 2024 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ */
+
+#define pr_fmt(fmt) "DMAR: " fmt
+
+#include <linux/dmar.h>
+#include <linux/iommu.h>
+#include <linux/memory.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+
+#include "iommu.h"
+#include "pasid.h"
+#include "trace.h"
+
+/* Check if an existing cache tag can be reused for a new association. */
+static bool cache_tage_match(struct cache_tag *tag, u16 domain_id,
+ struct intel_iommu *iommu, struct device *dev,
+ ioasid_t pasid, enum cache_tag_type type)
+{
+ if (tag->type != type)
+ return false;
+
+ if (tag->domain_id != domain_id || tag->pasid != pasid)
+ return false;
+
+ if (type == CACHE_TAG_IOTLB || type == CACHE_TAG_NESTING_IOTLB)
+ return tag->iommu == iommu;
+
+ if (type == CACHE_TAG_DEVTLB || type == CACHE_TAG_NESTING_DEVTLB)
+ return tag->dev == dev;
+
+ return false;
+}
+
+/* Assign a cache tag with specified type to domain. */
+static int cache_tag_assign(struct dmar_domain *domain, u16 did,
+ struct device *dev, ioasid_t pasid,
+ enum cache_tag_type type)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+ struct cache_tag *tag, *temp;
+ unsigned long flags;
+
+ tag = kzalloc(sizeof(*tag), GFP_KERNEL);
+ if (!tag)
+ return -ENOMEM;
+
+ tag->type = type;
+ tag->iommu = iommu;
+ tag->domain_id = did;
+ tag->pasid = pasid;
+ tag->users = 1;
+
+ if (type == CACHE_TAG_DEVTLB || type == CACHE_TAG_NESTING_DEVTLB)
+ tag->dev = dev;
+ else
+ tag->dev = iommu->iommu.dev;
+
+ spin_lock_irqsave(&domain->cache_lock, flags);
+ list_for_each_entry(temp, &domain->cache_tags, node) {
+ if (cache_tage_match(temp, did, iommu, dev, pasid, type)) {
+ temp->users++;
+ spin_unlock_irqrestore(&domain->cache_lock, flags);
+ kfree(tag);
+ trace_cache_tag_assign(temp);
+ return 0;
+ }
+ }
+ list_add_tail(&tag->node, &domain->cache_tags);
+ spin_unlock_irqrestore(&domain->cache_lock, flags);
+ trace_cache_tag_assign(tag);
+
+ return 0;
+}
+
+/* Unassign a cache tag with specified type from domain. */
+static void cache_tag_unassign(struct dmar_domain *domain, u16 did,
+ struct device *dev, ioasid_t pasid,
+ enum cache_tag_type type)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+ struct cache_tag *tag;
+ unsigned long flags;
+
+ spin_lock_irqsave(&domain->cache_lock, flags);
+ list_for_each_entry(tag, &domain->cache_tags, node) {
+ if (cache_tage_match(tag, did, iommu, dev, pasid, type)) {
+ trace_cache_tag_unassign(tag);
+ if (--tag->users == 0) {
+ list_del(&tag->node);
+ kfree(tag);
+ }
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&domain->cache_lock, flags);
+}
+
+static int __cache_tag_assign_domain(struct dmar_domain *domain, u16 did,
+ struct device *dev, ioasid_t pasid)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ int ret;
+
+ ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_IOTLB);
+ if (ret || !info->ats_enabled)
+ return ret;
+
+ ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_DEVTLB);
+ if (ret)
+ cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_IOTLB);
+
+ return ret;
+}
+
+static void __cache_tag_unassign_domain(struct dmar_domain *domain, u16 did,
+ struct device *dev, ioasid_t pasid)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+
+ cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_IOTLB);
+
+ if (info->ats_enabled)
+ cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_DEVTLB);
+}
+
+static int __cache_tag_assign_parent_domain(struct dmar_domain *domain, u16 did,
+ struct device *dev, ioasid_t pasid)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ int ret;
+
+ ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_NESTING_IOTLB);
+ if (ret || !info->ats_enabled)
+ return ret;
+
+ ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_NESTING_DEVTLB);
+ if (ret)
+ cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_NESTING_IOTLB);
+
+ return ret;
+}
+
+static void __cache_tag_unassign_parent_domain(struct dmar_domain *domain, u16 did,
+ struct device *dev, ioasid_t pasid)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+
+ cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_NESTING_IOTLB);
+
+ if (info->ats_enabled)
+ cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_NESTING_DEVTLB);
+}
+
+static u16 domain_get_id_for_dev(struct dmar_domain *domain, struct device *dev)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+
+ /*
+ * The driver assigns different domain IDs for all domains except
+ * the SVA type.
+ */
+ if (domain->domain.type == IOMMU_DOMAIN_SVA)
+ return FLPT_DEFAULT_DID;
+
+ return domain_id_iommu(domain, iommu);
+}
+
+/*
+ * Assign cache tags to a domain when it's associated with a device's
+ * PASID using a specific domain ID.
+ *
+ * On success (return value of 0), cache tags are created and added to the
+ * domain's cache tag list. On failure (negative return value), an error
+ * code is returned indicating the reason for the failure.
+ */
+int cache_tag_assign_domain(struct dmar_domain *domain,
+ struct device *dev, ioasid_t pasid)
+{
+ u16 did = domain_get_id_for_dev(domain, dev);
+ int ret;
+
+ ret = __cache_tag_assign_domain(domain, did, dev, pasid);
+ if (ret || domain->domain.type != IOMMU_DOMAIN_NESTED)
+ return ret;
+
+ ret = __cache_tag_assign_parent_domain(domain->s2_domain, did, dev, pasid);
+ if (ret)
+ __cache_tag_unassign_domain(domain, did, dev, pasid);
+
+ return ret;
+}
+
+/*
+ * Remove the cache tags associated with a device's PASID when the domain is
+ * detached from the device.
+ *
+ * The cache tags must be previously assigned to the domain by calling the
+ * assign interface.
+ */
+void cache_tag_unassign_domain(struct dmar_domain *domain,
+ struct device *dev, ioasid_t pasid)
+{
+ u16 did = domain_get_id_for_dev(domain, dev);
+
+ __cache_tag_unassign_domain(domain, did, dev, pasid);
+ if (domain->domain.type == IOMMU_DOMAIN_NESTED)
+ __cache_tag_unassign_parent_domain(domain->s2_domain, did, dev, pasid);
+}
+
+static unsigned long calculate_psi_aligned_address(unsigned long start,
+ unsigned long end,
+ unsigned long *_pages,
+ unsigned long *_mask)
+{
+ unsigned long pages = aligned_nrpages(start, end - start + 1);
+ unsigned long aligned_pages = __roundup_pow_of_two(pages);
+ unsigned long bitmask = aligned_pages - 1;
+ unsigned long mask = ilog2(aligned_pages);
+ unsigned long pfn = IOVA_PFN(start);
+
+ /*
+ * PSI masks the low order bits of the base address. If the
+ * address isn't aligned to the mask, then compute a mask value
+ * needed to ensure the target range is flushed.
+ */
+ if (unlikely(bitmask & pfn)) {
+ unsigned long end_pfn = pfn + pages - 1, shared_bits;
+
+ /*
+ * Since end_pfn <= pfn + bitmask, the only way bits
+ * higher than bitmask can differ in pfn and end_pfn is
+ * by carrying. This means after masking out bitmask,
+ * high bits starting with the first set bit in
+ * shared_bits are all equal in both pfn and end_pfn.
+ */
+ shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
+ mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
+ }
+
+ *_pages = aligned_pages;
+ *_mask = mask;
+
+ return ALIGN_DOWN(start, VTD_PAGE_SIZE << mask);
+}
+
+/*
+ * Invalidates a range of IOVA from @start (inclusive) to @end (inclusive)
+ * when the memory mappings in the target domain have been modified.
+ */
+void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
+ unsigned long end, int ih)
+{
+ unsigned long pages, mask, addr;
+ struct cache_tag *tag;
+ unsigned long flags;
+
+ addr = calculate_psi_aligned_address(start, end, &pages, &mask);
+
+ spin_lock_irqsave(&domain->cache_lock, flags);
+ list_for_each_entry(tag, &domain->cache_tags, node) {
+ struct intel_iommu *iommu = tag->iommu;
+ struct device_domain_info *info;
+ u16 sid;
+
+ switch (tag->type) {
+ case CACHE_TAG_IOTLB:
+ case CACHE_TAG_NESTING_IOTLB:
+ if (domain->use_first_level) {
+ qi_flush_piotlb(iommu, tag->domain_id,
+ tag->pasid, addr, pages, ih);
+ } else {
+ /*
+ * Fallback to domain selective flush if no
+ * PSI support or the size is too big.
+ */
+ if (!cap_pgsel_inv(iommu->cap) ||
+ mask > cap_max_amask_val(iommu->cap))
+ iommu->flush.flush_iotlb(iommu, tag->domain_id,
+ 0, 0, DMA_TLB_DSI_FLUSH);
+ else
+ iommu->flush.flush_iotlb(iommu, tag->domain_id,
+ addr | ih, mask,
+ DMA_TLB_PSI_FLUSH);
+ }
+ break;
+ case CACHE_TAG_NESTING_DEVTLB:
+ /*
+ * Address translation cache in device side caches the
+ * result of nested translation. There is no easy way
+ * to identify the exact set of nested translations
+ * affected by a change in S2. So just flush the entire
+ * device cache.
+ */
+ addr = 0;
+ mask = MAX_AGAW_PFN_WIDTH;
+ fallthrough;
+ case CACHE_TAG_DEVTLB:
+ info = dev_iommu_priv_get(tag->dev);
+ sid = PCI_DEVID(info->bus, info->devfn);
+
+ if (tag->pasid == IOMMU_NO_PASID)
+ qi_flush_dev_iotlb(iommu, sid, info->pfsid,
+ info->ats_qdep, addr, mask);
+ else
+ qi_flush_dev_iotlb_pasid(iommu, sid, info->pfsid,
+ tag->pasid, info->ats_qdep,
+ addr, mask);
+
+ quirk_extra_dev_tlb_flush(info, addr, mask, tag->pasid, info->ats_qdep);
+ break;
+ }
+
+ trace_cache_tag_flush_range(tag, start, end, addr, pages, mask);
+ }
+ spin_unlock_irqrestore(&domain->cache_lock, flags);
+}
+
+/*
+ * Invalidates all ranges of IOVA when the memory mappings in the target
+ * domain have been modified.
+ */
+void cache_tag_flush_all(struct dmar_domain *domain)
+{
+ struct cache_tag *tag;
+ unsigned long flags;
+
+ spin_lock_irqsave(&domain->cache_lock, flags);
+ list_for_each_entry(tag, &domain->cache_tags, node) {
+ struct intel_iommu *iommu = tag->iommu;
+ struct device_domain_info *info;
+ u16 sid;
+
+ switch (tag->type) {
+ case CACHE_TAG_IOTLB:
+ case CACHE_TAG_NESTING_IOTLB:
+ if (domain->use_first_level)
+ qi_flush_piotlb(iommu, tag->domain_id,
+ tag->pasid, 0, -1, 0);
+ else
+ iommu->flush.flush_iotlb(iommu, tag->domain_id,
+ 0, 0, DMA_TLB_DSI_FLUSH);
+ break;
+ case CACHE_TAG_DEVTLB:
+ case CACHE_TAG_NESTING_DEVTLB:
+ info = dev_iommu_priv_get(tag->dev);
+ sid = PCI_DEVID(info->bus, info->devfn);
+
+ qi_flush_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep,
+ 0, MAX_AGAW_PFN_WIDTH);
+ quirk_extra_dev_tlb_flush(info, 0, MAX_AGAW_PFN_WIDTH,
+ IOMMU_NO_PASID, info->ats_qdep);
+ break;
+ }
+
+ trace_cache_tag_flush_all(tag);
+ }
+ spin_unlock_irqrestore(&domain->cache_lock, flags);
+}
+
+/*
+ * Invalidate a range of IOVA when new mappings are created in the target
+ * domain.
+ *
+ * - VT-d spec, Section 6.1 Caching Mode: When the CM field is reported as
+ * Set, any software updates to remapping structures other than first-
+ * stage mapping requires explicit invalidation of the caches.
+ * - VT-d spec, Section 6.8 Write Buffer Flushing: For hardware that requires
+ * write buffer flushing, software must explicitly perform write-buffer
+ * flushing, if cache invalidation is not required.
+ */
+void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
+ unsigned long end)
+{
+ unsigned long pages, mask, addr;
+ struct cache_tag *tag;
+ unsigned long flags;
+
+ addr = calculate_psi_aligned_address(start, end, &pages, &mask);
+
+ spin_lock_irqsave(&domain->cache_lock, flags);
+ list_for_each_entry(tag, &domain->cache_tags, node) {
+ struct intel_iommu *iommu = tag->iommu;
+
+ if (!cap_caching_mode(iommu->cap) || domain->use_first_level) {
+ iommu_flush_write_buffer(iommu);
+ continue;
+ }
+
+ if (tag->type == CACHE_TAG_IOTLB ||
+ tag->type == CACHE_TAG_NESTING_IOTLB) {
+ /*
+ * Fallback to domain selective flush if no
+ * PSI support or the size is too big.
+ */
+ if (!cap_pgsel_inv(iommu->cap) ||
+ mask > cap_max_amask_val(iommu->cap))
+ iommu->flush.flush_iotlb(iommu, tag->domain_id,
+ 0, 0, DMA_TLB_DSI_FLUSH);
+ else
+ iommu->flush.flush_iotlb(iommu, tag->domain_id,
+ addr, mask,
+ DMA_TLB_PSI_FLUSH);
+ }
+
+ trace_cache_tag_flush_range_np(tag, start, end, addr, pages, mask);
+ }
+ spin_unlock_irqrestore(&domain->cache_lock, flags);
+}
diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c
index 86b506af7daa..affbf4a1558d 100644
--- a/drivers/iommu/intel/debugfs.c
+++ b/drivers/iommu/intel/debugfs.c
@@ -706,7 +706,6 @@ static ssize_t dmar_perf_latency_write(struct file *filp,
dmar_latency_disable(iommu, DMAR_LATENCY_INV_IOTLB);
dmar_latency_disable(iommu, DMAR_LATENCY_INV_DEVTLB);
dmar_latency_disable(iommu, DMAR_LATENCY_INV_IEC);
- dmar_latency_disable(iommu, DMAR_LATENCY_PRQ);
}
rcu_read_unlock();
break;
@@ -728,12 +727,6 @@ static ssize_t dmar_perf_latency_write(struct file *filp,
dmar_latency_enable(iommu, DMAR_LATENCY_INV_IEC);
rcu_read_unlock();
break;
- case 4:
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd)
- dmar_latency_enable(iommu, DMAR_LATENCY_PRQ);
- rcu_read_unlock();
- break;
default:
return -EINVAL;
}
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 36d7427b1202..304e84949ca7 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -32,6 +32,7 @@
#include "iommu.h"
#include "../irq_remapping.h"
+#include "../iommu-pages.h"
#include "perf.h"
#include "trace.h"
#include "perfmon.h"
@@ -1067,7 +1068,6 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
goto error_free_seq_id;
}
- err = -EINVAL;
if (!cap_sagaw(iommu->cap) &&
(!ecap_smts(iommu->ecap) || ecap_slts(iommu->ecap))) {
pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
@@ -1187,7 +1187,7 @@ static void free_iommu(struct intel_iommu *iommu)
}
if (iommu->qi) {
- free_page((unsigned long)iommu->qi->desc);
+ iommu_free_page(iommu->qi->desc);
kfree(iommu->qi->desc_status);
kfree(iommu->qi);
}
@@ -1755,7 +1755,8 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
int dmar_enable_qi(struct intel_iommu *iommu)
{
struct q_inval *qi;
- struct page *desc_page;
+ void *desc;
+ int order;
if (!ecap_qis(iommu->ecap))
return -ENOENT;
@@ -1776,19 +1777,19 @@ int dmar_enable_qi(struct intel_iommu *iommu)
* Need two pages to accommodate 256 descriptors of 256 bits each
* if the remapping hardware supports scalable mode translation.
*/
- desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
- !!ecap_smts(iommu->ecap));
- if (!desc_page) {
+ order = ecap_smts(iommu->ecap) ? 1 : 0;
+ desc = iommu_alloc_pages_node(iommu->node, GFP_ATOMIC, order);
+ if (!desc) {
kfree(qi);
iommu->qi = NULL;
return -ENOMEM;
}
- qi->desc = page_address(desc_page);
+ qi->desc = desc;
qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC);
if (!qi->desc_status) {
- free_page((unsigned long) qi->desc);
+ iommu_free_page(qi->desc);
kfree(qi);
iommu->qi = NULL;
return -ENOMEM;
@@ -2122,7 +2123,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
return ret;
}
-int __init enable_drhd_fault_handling(void)
+int enable_drhd_fault_handling(unsigned int cpu)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
@@ -2132,7 +2133,12 @@ int __init enable_drhd_fault_handling(void)
*/
for_each_iommu(iommu, drhd) {
u32 fault_status;
- int ret = dmar_set_interrupt(iommu);
+ int ret;
+
+ if (iommu->irq || iommu->node != cpu_to_node(cpu))
+ continue;
+
+ ret = dmar_set_interrupt(iommu);
if (ret) {
pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 50eb9aed47cc..2e9811bf2a4e 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -27,6 +27,7 @@
#include "iommu.h"
#include "../dma-iommu.h"
#include "../irq_remapping.h"
+#include "../iommu-pages.h"
#include "pasid.h"
#include "cap_audit.h"
#include "perfmon.h"
@@ -54,11 +55,6 @@
__DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
-/* IO virtual address start page frame number */
-#define IOVA_START_PFN (1)
-
-#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
-
static void __init check_tylersburg_isoch(void);
static int rwbf_quirk;
@@ -221,12 +217,11 @@ int intel_iommu_sm = IS_ENABLED(CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON);
int intel_iommu_enabled = 0;
EXPORT_SYMBOL_GPL(intel_iommu_enabled);
-static int dmar_map_gfx = 1;
static int intel_iommu_superpage = 1;
static int iommu_identity_mapping;
static int iommu_skip_te_disable;
+static int disable_igfx_iommu;
-#define IDENTMAP_GFX 2
#define IDENTMAP_AZALIA 4
const struct iommu_ops intel_iommu_ops;
@@ -265,7 +260,7 @@ static int __init intel_iommu_setup(char *str)
no_platform_optin = 1;
pr_info("IOMMU disabled\n");
} else if (!strncmp(str, "igfx_off", 8)) {
- dmar_map_gfx = 0;
+ disable_igfx_iommu = 1;
pr_info("Disable GFX device mapping\n");
} else if (!strncmp(str, "forcedac", 8)) {
pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n");
@@ -298,22 +293,6 @@ static int __init intel_iommu_setup(char *str)
}
__setup("intel_iommu=", intel_iommu_setup);
-void *alloc_pgtable_page(int node, gfp_t gfp)
-{
- struct page *page;
- void *vaddr = NULL;
-
- page = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
- if (page)
- vaddr = page_address(page);
- return vaddr;
-}
-
-void free_pgtable_page(void *vaddr)
-{
- free_page((unsigned long)vaddr);
-}
-
static int domain_type_is_si(struct dmar_domain *domain)
{
return domain->domain.type == IOMMU_DOMAIN_IDENTITY;
@@ -545,7 +524,7 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
if (!alloc)
return NULL;
- context = alloc_pgtable_page(iommu->node, GFP_ATOMIC);
+ context = iommu_alloc_page_node(iommu->node, GFP_ATOMIC);
if (!context)
return NULL;
@@ -719,17 +698,17 @@ static void free_context_table(struct intel_iommu *iommu)
for (i = 0; i < ROOT_ENTRY_NR; i++) {
context = iommu_context_addr(iommu, i, 0, 0);
if (context)
- free_pgtable_page(context);
+ iommu_free_page(context);
if (!sm_supported(iommu))
continue;
context = iommu_context_addr(iommu, i, 0x80, 0);
if (context)
- free_pgtable_page(context);
+ iommu_free_page(context);
}
- free_pgtable_page(iommu->root_entry);
+ iommu_free_page(iommu->root_entry);
iommu->root_entry = NULL;
}
@@ -865,9 +844,9 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
break;
if (!dma_pte_present(pte)) {
- uint64_t pteval;
+ uint64_t pteval, tmp;
- tmp_page = alloc_pgtable_page(domain->nid, gfp);
+ tmp_page = iommu_alloc_page_node(domain->nid, gfp);
if (!tmp_page)
return NULL;
@@ -877,9 +856,10 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
if (domain->use_first_level)
pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
- if (cmpxchg64(&pte->val, 0ULL, pteval))
+ tmp = 0ULL;
+ if (!try_cmpxchg64(&pte->val, &tmp, pteval))
/* Someone else set it while we were thinking; use theirs. */
- free_pgtable_page(tmp_page);
+ iommu_free_page(tmp_page);
else
domain_flush_cache(domain, pte, sizeof(*pte));
}
@@ -992,7 +972,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
last_pfn < level_pfn + level_size(level) - 1)) {
dma_clear_pte(pte);
domain_flush_cache(domain, pte, sizeof(*pte));
- free_pgtable_page(level_pte);
+ iommu_free_page(level_pte);
}
next:
pfn += level_size(level);
@@ -1016,7 +996,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
/* free pgd */
if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
- free_pgtable_page(domain->pgd);
+ iommu_free_page(domain->pgd);
domain->pgd = NULL;
}
}
@@ -1118,7 +1098,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
{
struct root_entry *root;
- root = alloc_pgtable_page(iommu->node, GFP_ATOMIC);
+ root = iommu_alloc_page_node(iommu->node, GFP_ATOMIC);
if (!root) {
pr_err("Allocating root entry for %s failed\n",
iommu->name);
@@ -1394,197 +1374,9 @@ static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
quirk_extra_dev_tlb_flush(info, addr, mask, IOMMU_NO_PASID, qdep);
}
-static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
- u64 addr, unsigned mask)
-{
- struct dev_pasid_info *dev_pasid;
- struct device_domain_info *info;
- unsigned long flags;
-
- if (!domain->has_iotlb_device)
- return;
-
- spin_lock_irqsave(&domain->lock, flags);
- list_for_each_entry(info, &domain->devices, link)
- __iommu_flush_dev_iotlb(info, addr, mask);
-
- list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain) {
- info = dev_iommu_priv_get(dev_pasid->dev);
-
- if (!info->ats_enabled)
- continue;
-
- qi_flush_dev_iotlb_pasid(info->iommu,
- PCI_DEVID(info->bus, info->devfn),
- info->pfsid, dev_pasid->pasid,
- info->ats_qdep, addr,
- mask);
- }
- spin_unlock_irqrestore(&domain->lock, flags);
-}
-
-static void domain_flush_pasid_iotlb(struct intel_iommu *iommu,
- struct dmar_domain *domain, u64 addr,
- unsigned long npages, bool ih)
-{
- u16 did = domain_id_iommu(domain, iommu);
- struct dev_pasid_info *dev_pasid;
- unsigned long flags;
-
- spin_lock_irqsave(&domain->lock, flags);
- list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain)
- qi_flush_piotlb(iommu, did, dev_pasid->pasid, addr, npages, ih);
-
- if (!list_empty(&domain->devices))
- qi_flush_piotlb(iommu, did, IOMMU_NO_PASID, addr, npages, ih);
- spin_unlock_irqrestore(&domain->lock, flags);
-}
-
-static void __iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
- unsigned long pfn, unsigned int pages,
- int ih)
-{
- unsigned int aligned_pages = __roundup_pow_of_two(pages);
- unsigned long bitmask = aligned_pages - 1;
- unsigned int mask = ilog2(aligned_pages);
- u64 addr = (u64)pfn << VTD_PAGE_SHIFT;
-
- /*
- * PSI masks the low order bits of the base address. If the
- * address isn't aligned to the mask, then compute a mask value
- * needed to ensure the target range is flushed.
- */
- if (unlikely(bitmask & pfn)) {
- unsigned long end_pfn = pfn + pages - 1, shared_bits;
-
- /*
- * Since end_pfn <= pfn + bitmask, the only way bits
- * higher than bitmask can differ in pfn and end_pfn is
- * by carrying. This means after masking out bitmask,
- * high bits starting with the first set bit in
- * shared_bits are all equal in both pfn and end_pfn.
- */
- shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
- mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
- }
-
- /*
- * Fallback to domain selective flush if no PSI support or
- * the size is too big.
- */
- if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
- iommu->flush.flush_iotlb(iommu, did, 0, 0,
- DMA_TLB_DSI_FLUSH);
- else
- iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
- DMA_TLB_PSI_FLUSH);
-}
-
-static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
- struct dmar_domain *domain,
- unsigned long pfn, unsigned int pages,
- int ih, int map)
-{
- unsigned int aligned_pages = __roundup_pow_of_two(pages);
- unsigned int mask = ilog2(aligned_pages);
- uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
- u16 did = domain_id_iommu(domain, iommu);
-
- if (WARN_ON(!pages))
- return;
-
- if (ih)
- ih = 1 << 6;
-
- if (domain->use_first_level)
- domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih);
- else
- __iommu_flush_iotlb_psi(iommu, did, pfn, pages, ih);
-
- /*
- * In caching mode, changes of pages from non-present to present require
- * flush. However, device IOTLB doesn't need to be flushed in this case.
- */
- if (!cap_caching_mode(iommu->cap) || !map)
- iommu_flush_dev_iotlb(domain, addr, mask);
-}
-
-/* Notification for newly created mappings */
-static void __mapping_notify_one(struct intel_iommu *iommu, struct dmar_domain *domain,
- unsigned long pfn, unsigned int pages)
-{
- /*
- * It's a non-present to present mapping. Only flush if caching mode
- * and second level.
- */
- if (cap_caching_mode(iommu->cap) && !domain->use_first_level)
- iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
- else
- iommu_flush_write_buffer(iommu);
-}
-
-/*
- * Flush the relevant caches in nested translation if the domain
- * also serves as a parent
- */
-static void parent_domain_flush(struct dmar_domain *domain,
- unsigned long pfn,
- unsigned long pages, int ih)
-{
- struct dmar_domain *s1_domain;
-
- spin_lock(&domain->s1_lock);
- list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) {
- struct device_domain_info *device_info;
- struct iommu_domain_info *info;
- unsigned long flags;
- unsigned long i;
-
- xa_for_each(&s1_domain->iommu_array, i, info)
- __iommu_flush_iotlb_psi(info->iommu, info->did,
- pfn, pages, ih);
-
- if (!s1_domain->has_iotlb_device)
- continue;
-
- spin_lock_irqsave(&s1_domain->lock, flags);
- list_for_each_entry(device_info, &s1_domain->devices, link)
- /*
- * Address translation cache in device side caches the
- * result of nested translation. There is no easy way
- * to identify the exact set of nested translations
- * affected by a change in S2. So just flush the entire
- * device cache.
- */
- __iommu_flush_dev_iotlb(device_info, 0,
- MAX_AGAW_PFN_WIDTH);
- spin_unlock_irqrestore(&s1_domain->lock, flags);
- }
- spin_unlock(&domain->s1_lock);
-}
-
static void intel_flush_iotlb_all(struct iommu_domain *domain)
{
- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- struct iommu_domain_info *info;
- unsigned long idx;
-
- xa_for_each(&dmar_domain->iommu_array, idx, info) {
- struct intel_iommu *iommu = info->iommu;
- u16 did = domain_id_iommu(dmar_domain, iommu);
-
- if (dmar_domain->use_first_level)
- domain_flush_pasid_iotlb(iommu, dmar_domain, 0, -1, 0);
- else
- iommu->flush.flush_iotlb(iommu, did, 0, 0,
- DMA_TLB_DSI_FLUSH);
-
- if (!cap_caching_mode(iommu->cap))
- iommu_flush_dev_iotlb(dmar_domain, 0, MAX_AGAW_PFN_WIDTH);
- }
-
- if (dmar_domain->nested_parent)
- parent_domain_flush(dmar_domain, 0, -1, 0);
+ cache_tag_flush_all(to_dmar_domain(domain));
}
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -1750,7 +1542,9 @@ static struct dmar_domain *alloc_domain(unsigned int type)
domain->has_iotlb_device = false;
INIT_LIST_HEAD(&domain->devices);
INIT_LIST_HEAD(&domain->dev_pasids);
+ INIT_LIST_HEAD(&domain->cache_tags);
spin_lock_init(&domain->lock);
+ spin_lock_init(&domain->cache_lock);
xa_init(&domain->iommu_array);
return domain;
@@ -1762,6 +1556,9 @@ int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
unsigned long ndomains;
int num, ret = -ENOSPC;
+ if (domain->domain.type == IOMMU_DOMAIN_SVA)
+ return 0;
+
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -1809,6 +1606,9 @@ void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
{
struct iommu_domain_info *info;
+ if (domain->domain.type == IOMMU_DOMAIN_SVA)
+ return;
+
spin_lock(&iommu->lock);
info = xa_load(&domain->iommu_array, iommu->seq_id);
if (--info->refcnt == 0) {
@@ -1841,7 +1641,7 @@ static void domain_exit(struct dmar_domain *domain)
LIST_HEAD(freelist);
domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist);
- put_pages_list(&freelist);
+ iommu_put_pages_list(&freelist);
}
if (WARN_ON(!list_empty(&domain->devices)))
@@ -1988,13 +1788,6 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev)
domain_context_mapping_cb, domain);
}
-/* Returns a number of VTD pages, but aligned to MM page size */
-static unsigned long aligned_nrpages(unsigned long host_addr, size_t size)
-{
- host_addr &= ~PAGE_MASK;
- return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
-}
-
/* Return largest possible superpage level for a given mapping */
static int hardware_largepage_caps(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long phy_pfn, unsigned long pages)
@@ -2031,9 +1824,7 @@ static void switch_to_super_page(struct dmar_domain *domain,
unsigned long end_pfn, int level)
{
unsigned long lvl_pages = lvl_to_nr_pages(level);
- struct iommu_domain_info *info;
struct dma_pte *pte = NULL;
- unsigned long i;
while (start_pfn <= end_pfn) {
if (!pte)
@@ -2045,13 +1836,8 @@ static void switch_to_super_page(struct dmar_domain *domain,
start_pfn + lvl_pages - 1,
level + 1);
- xa_for_each(&domain->iommu_array, i, info)
- iommu_flush_iotlb_psi(info->iommu, domain,
- start_pfn, lvl_pages,
- 0, 0);
- if (domain->nested_parent)
- parent_domain_flush(domain, start_pfn,
- lvl_pages, 0);
+ cache_tag_flush_range(domain, start_pfn << VTD_PAGE_SHIFT,
+ end_pfn << VTD_PAGE_SHIFT, 0);
}
pte++;
@@ -2128,8 +1914,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
/* We don't need lock here, nobody else
* touches the iova range
*/
- tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
- if (tmp) {
+ tmp = 0ULL;
+ if (!try_cmpxchg64_local(&pte->val, &tmp, pteval)) {
static int dumps = 5;
pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
iov_pfn, tmp, (unsigned long long)pteval);
@@ -2327,6 +2113,13 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
ret = domain_attach_iommu(domain, iommu);
if (ret)
return ret;
+
+ ret = cache_tag_assign_domain(domain, dev, IOMMU_NO_PASID);
+ if (ret) {
+ domain_detach_iommu(domain, iommu);
+ return ret;
+ }
+
info->domain = domain;
spin_lock_irqsave(&domain->lock, flags);
list_add(&info->link, &domain->devices);
@@ -2402,9 +2195,6 @@ static int device_def_domain_type(struct device *dev)
if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
return IOMMU_DOMAIN_IDENTITY;
-
- if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
- return IOMMU_DOMAIN_IDENTITY;
}
return 0;
@@ -2497,7 +2287,7 @@ static int copy_context_table(struct intel_iommu *iommu,
if (!old_ce)
goto out;
- new_ce = alloc_pgtable_page(iommu->node, GFP_KERNEL);
+ new_ce = iommu_alloc_page_node(iommu->node, GFP_KERNEL);
if (!new_ce)
goto out_unmap;
@@ -2705,9 +2495,6 @@ static int __init init_dmars(void)
iommu_set_root_entry(iommu);
}
- if (!dmar_map_gfx)
- iommu_identity_mapping |= IDENTMAP_GFX;
-
check_tylersburg_isoch();
ret = si_domain_init(hw_pass_through);
@@ -2798,7 +2585,7 @@ static void __init init_no_remapping_devices(void)
/* This IOMMU has *only* gfx devices. Either bypass it or
set the gfx_mapped flag, as appropriate */
drhd->gfx_dedicated = 1;
- if (!dmar_map_gfx)
+ if (disable_igfx_iommu)
drhd->ignored = 1;
}
}
@@ -3414,19 +3201,10 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
case MEM_OFFLINE:
case MEM_CANCEL_ONLINE:
{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
LIST_HEAD(freelist);
domain_unmap(si_domain, start_vpfn, last_vpfn, &freelist);
-
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd)
- iommu_flush_iotlb_psi(iommu, si_domain,
- start_vpfn, mhp->nr_pages,
- list_empty(&freelist), 0);
- rcu_read_unlock();
- put_pages_list(&freelist);
+ iommu_put_pages_list(&freelist);
}
break;
}
@@ -3815,6 +3593,7 @@ void device_block_translation(struct device *dev)
list_del(&info->link);
spin_unlock_irqrestore(&info->domain->lock, flags);
+ cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID);
domain_detach_iommu(info->domain, iommu);
info->domain = NULL;
}
@@ -3833,7 +3612,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
domain->max_addr = 0;
/* always allocate the top pgd */
- domain->pgd = alloc_pgtable_page(domain->nid, GFP_ATOMIC);
+ domain->pgd = iommu_alloc_page_node(domain->nid, GFP_ATOMIC);
if (!domain->pgd)
return -ENOMEM;
domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
@@ -3882,8 +3661,6 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
return domain;
case IOMMU_DOMAIN_IDENTITY:
return &si_domain->domain;
- case IOMMU_DOMAIN_SVA:
- return intel_svm_domain_alloc();
default:
return NULL;
}
@@ -3987,7 +3764,7 @@ int prepare_domain_attach_device(struct iommu_domain *domain,
pte = dmar_domain->pgd;
if (dma_pte_present(pte)) {
dmar_domain->pgd = phys_to_virt(dma_pte_addr(pte));
- free_pgtable_page(pte);
+ iommu_free_page(pte);
}
dmar_domain->agaw--;
}
@@ -4122,26 +3899,9 @@ static size_t intel_iommu_unmap_pages(struct iommu_domain *domain,
static void intel_iommu_tlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- unsigned long iova_pfn = IOVA_PFN(gather->start);
- size_t size = gather->end - gather->start;
- struct iommu_domain_info *info;
- unsigned long start_pfn;
- unsigned long nrpages;
- unsigned long i;
-
- nrpages = aligned_nrpages(gather->start, size);
- start_pfn = mm_to_dma_pfn_start(iova_pfn);
-
- xa_for_each(&dmar_domain->iommu_array, i, info)
- iommu_flush_iotlb_psi(info->iommu, dmar_domain,
- start_pfn, nrpages,
- list_empty(&gather->freelist), 0);
-
- if (dmar_domain->nested_parent)
- parent_domain_flush(dmar_domain, start_pfn, nrpages,
- list_empty(&gather->freelist));
- put_pages_list(&gather->freelist);
+ cache_tag_flush_range(to_dmar_domain(domain), gather->start,
+ gather->end, list_empty(&gather->freelist));
+ iommu_put_pages_list(&gather->freelist);
}
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -4299,9 +4059,11 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
}
dev_iommu_priv_set(dev, info);
- ret = device_rbtree_insert(iommu, info);
- if (ret)
- goto free;
+ if (pdev && pci_ats_supported(pdev)) {
+ ret = device_rbtree_insert(iommu, info);
+ if (ret)
+ goto free;
+ }
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
ret = intel_pasid_alloc_table(dev);
@@ -4336,7 +4098,8 @@ static void intel_iommu_release_device(struct device *dev)
struct intel_iommu *iommu = info->iommu;
mutex_lock(&iommu->iopf_lock);
- device_rbtree_remove(info);
+ if (dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev)))
+ device_rbtree_remove(info);
mutex_unlock(&iommu->iopf_lock);
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&
@@ -4349,12 +4112,6 @@ static void intel_iommu_release_device(struct device *dev)
set_dma_ops(dev, NULL);
}
-static void intel_iommu_probe_finalize(struct device *dev)
-{
- set_dma_ops(dev, NULL);
- iommu_setup_dma_ops(dev, 0, U64_MAX);
-}
-
static void intel_iommu_get_resv_regions(struct device *device,
struct list_head *head)
{
@@ -4576,41 +4333,20 @@ static bool risky_device(struct pci_dev *pdev)
static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- unsigned long pages = aligned_nrpages(iova, size);
- unsigned long pfn = iova >> VTD_PAGE_SHIFT;
- struct iommu_domain_info *info;
- unsigned long i;
+ cache_tag_flush_range_np(to_dmar_domain(domain), iova, iova + size - 1);
- xa_for_each(&dmar_domain->iommu_array, i, info)
- __mapping_notify_one(info->iommu, dmar_domain, pfn, pages);
return 0;
}
-static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
+static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
+ struct iommu_domain *domain)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct dev_pasid_info *curr, *dev_pasid = NULL;
struct intel_iommu *iommu = info->iommu;
- struct dmar_domain *dmar_domain;
- struct iommu_domain *domain;
unsigned long flags;
- domain = iommu_get_domain_for_dev_pasid(dev, pasid, 0);
- if (WARN_ON_ONCE(!domain))
- goto out_tear_down;
-
- /*
- * The SVA implementation needs to handle its own stuffs like the mm
- * notification. Before consolidating that code into iommu core, let
- * the intel sva code handle it.
- */
- if (domain->type == IOMMU_DOMAIN_SVA) {
- intel_svm_remove_dev_pasid(dev, pasid);
- goto out_tear_down;
- }
-
- dmar_domain = to_dmar_domain(domain);
spin_lock_irqsave(&dmar_domain->lock, flags);
list_for_each_entry(curr, &dmar_domain->dev_pasids, link_domain) {
if (curr->dev == dev && curr->pasid == pasid) {
@@ -4622,10 +4358,10 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
WARN_ON_ONCE(!dev_pasid);
spin_unlock_irqrestore(&dmar_domain->lock, flags);
+ cache_tag_unassign_domain(dmar_domain, dev, pasid);
domain_detach_iommu(dmar_domain, iommu);
intel_iommu_debugfs_remove_dev_pasid(dev_pasid);
kfree(dev_pasid);
-out_tear_down:
intel_pasid_tear_down_entry(iommu, dev, pasid, false);
intel_drain_pasid_prq(dev, pasid);
}
@@ -4661,6 +4397,10 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
if (ret)
goto out_free;
+ ret = cache_tag_assign_domain(dmar_domain, dev, pasid);
+ if (ret)
+ goto out_detach_iommu;
+
if (domain_type_is_si(dmar_domain))
ret = intel_pasid_setup_pass_through(iommu, dev, pasid);
else if (dmar_domain->use_first_level)
@@ -4670,7 +4410,7 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
ret = intel_pasid_setup_second_level(iommu, dmar_domain,
dev, pasid);
if (ret)
- goto out_detach_iommu;
+ goto out_unassign_tag;
dev_pasid->dev = dev;
dev_pasid->pasid = pasid;
@@ -4682,6 +4422,8 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
intel_iommu_debugfs_create_dev_pasid(dev_pasid);
return 0;
+out_unassign_tag:
+ cache_tag_unassign_domain(dmar_domain, dev, pasid);
out_detach_iommu:
domain_detach_iommu(dmar_domain, iommu);
out_free:
@@ -4838,8 +4580,8 @@ const struct iommu_ops intel_iommu_ops = {
.hw_info = intel_iommu_hw_info,
.domain_alloc = intel_iommu_domain_alloc,
.domain_alloc_user = intel_iommu_domain_alloc_user,
+ .domain_alloc_sva = intel_svm_domain_alloc,
.probe_device = intel_iommu_probe_device,
- .probe_finalize = intel_iommu_probe_finalize,
.release_device = intel_iommu_release_device,
.get_resv_regions = intel_iommu_get_resv_regions,
.device_group = intel_iommu_device_group,
@@ -4872,7 +4614,7 @@ static void quirk_iommu_igfx(struct pci_dev *dev)
return;
pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
- dmar_map_gfx = 0;
+ disable_igfx_iommu = 1;
}
/* G4x/GM45 integrated gfx dmar support is totally busted. */
@@ -4953,8 +4695,8 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
- dmar_map_gfx = 0;
- } else if (dmar_map_gfx) {
+ disable_igfx_iommu = 1;
+ } else if (!disable_igfx_iommu) {
/* we have to ensure the gfx device is idle before we flush */
pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n");
iommu_set_dma_strict();
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index 404d2476a877..eaf015b4353b 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -35,6 +35,8 @@
#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
+#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
+
#define VTD_STRIDE_SHIFT (9)
#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT)
@@ -455,7 +457,6 @@ enum {
/* Page group response descriptor QW0 */
#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4)
-#define QI_PGRP_PDP(p) (((u64)(p)) << 5)
#define QI_PGRP_RESP_CODE(res) (((u64)(res)) << 12)
#define QI_PGRP_DID(rid) (((u64)(rid)) << 16)
#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
@@ -607,6 +608,9 @@ struct dmar_domain {
struct list_head devices; /* all devices' list */
struct list_head dev_pasids; /* all attached pasids */
+ spinlock_t cache_lock; /* Protect the cache tag list */
+ struct list_head cache_tags; /* Cache tag list */
+
int iommu_superpage;/* Level of superpages supported:
0 == 4KiB (no superpages), 1 == 2MiB,
2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
@@ -644,6 +648,11 @@ struct dmar_domain {
/* link to parent domain siblings */
struct list_head s2_link;
};
+
+ /* SVA domain */
+ struct {
+ struct mmu_notifier notifier;
+ };
};
struct iommu_domain domain; /* generic domain data structure for
@@ -1038,6 +1047,19 @@ static inline void context_set_sm_pre(struct context_entry *context)
context->lo |= BIT_ULL(4);
}
+/* Returns a number of VTD pages, but aligned to MM page size */
+static inline unsigned long aligned_nrpages(unsigned long host_addr, size_t size)
+{
+ host_addr &= ~PAGE_MASK;
+ return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
+}
+
+/* Return a size from number of VTD pages. */
+static inline unsigned long nrpages_to_size(unsigned long npages)
+{
+ return npages << VTD_PAGE_SHIFT;
+}
+
/* Convert value to context PASID directory size field coding. */
#define context_pdts(pds) (((pds) & 0x7) << 9)
@@ -1085,48 +1107,60 @@ void domain_update_iommu_cap(struct dmar_domain *domain);
int dmar_ir_support(void);
-void *alloc_pgtable_page(int node, gfp_t gfp);
-void free_pgtable_page(void *vaddr);
void iommu_flush_write_buffer(struct intel_iommu *iommu);
struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
const struct iommu_user_data *user_data);
struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid);
+enum cache_tag_type {
+ CACHE_TAG_IOTLB,
+ CACHE_TAG_DEVTLB,
+ CACHE_TAG_NESTING_IOTLB,
+ CACHE_TAG_NESTING_DEVTLB,
+};
+
+struct cache_tag {
+ struct list_head node;
+ enum cache_tag_type type;
+ struct intel_iommu *iommu;
+ /*
+ * The @dev field represents the location of the cache. For IOTLB, it
+ * resides on the IOMMU hardware. @dev stores the device pointer to
+ * the IOMMU hardware. For DevTLB, it locates in the PCIe endpoint.
+ * @dev stores the device pointer to that endpoint.
+ */
+ struct device *dev;
+ u16 domain_id;
+ ioasid_t pasid;
+ unsigned int users;
+};
+
+int cache_tag_assign_domain(struct dmar_domain *domain,
+ struct device *dev, ioasid_t pasid);
+void cache_tag_unassign_domain(struct dmar_domain *domain,
+ struct device *dev, ioasid_t pasid);
+void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
+ unsigned long end, int ih);
+void cache_tag_flush_all(struct dmar_domain *domain);
+void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
+ unsigned long end);
+
#ifdef CONFIG_INTEL_IOMMU_SVM
void intel_svm_check(struct intel_iommu *iommu);
int intel_svm_enable_prq(struct intel_iommu *iommu);
int intel_svm_finish_prq(struct intel_iommu *iommu);
void intel_svm_page_response(struct device *dev, struct iopf_fault *evt,
struct iommu_page_response *msg);
-struct iommu_domain *intel_svm_domain_alloc(void);
-void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid);
+struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
+ struct mm_struct *mm);
void intel_drain_pasid_prq(struct device *dev, u32 pasid);
-
-struct intel_svm_dev {
- struct list_head list;
- struct rcu_head rcu;
- struct device *dev;
- struct intel_iommu *iommu;
- u16 did;
- u16 sid, qdep;
-};
-
-struct intel_svm {
- struct mmu_notifier notifier;
- struct mm_struct *mm;
- u32 pasid;
- struct list_head devs;
-};
#else
static inline void intel_svm_check(struct intel_iommu *iommu) {}
static inline void intel_drain_pasid_prq(struct device *dev, u32 pasid) {}
-static inline struct iommu_domain *intel_svm_domain_alloc(void)
-{
- return NULL;
-}
-
-static inline void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid)
+static inline struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
+ struct mm_struct *mm)
{
+ return ERR_PTR(-ENODEV);
}
#endif
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 566297bc87dd..e4a70886678c 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -19,9 +19,11 @@
#include <asm/cpu.h>
#include <asm/irq_remapping.h>
#include <asm/pci-direct.h>
+#include <asm/posted_intr.h>
#include "iommu.h"
#include "../irq_remapping.h"
+#include "../iommu-pages.h"
#include "cap_audit.h"
enum irq_mode {
@@ -49,6 +51,7 @@ struct irq_2_iommu {
u16 sub_handle;
u8 irte_mask;
enum irq_mode mode;
+ bool posted_msi;
};
struct intel_ir_data {
@@ -82,7 +85,7 @@ static const struct irq_domain_ops intel_ir_domain_ops;
static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
static int __init parse_ioapics_under_ir(void);
-static const struct msi_parent_ops dmar_msi_parent_ops, virt_dmar_msi_parent_ops;
+static const struct msi_parent_ops dmar_msi_parent_ops;
static bool ir_pre_enabled(struct intel_iommu *iommu)
{
@@ -527,7 +530,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
struct ir_table *ir_table;
struct fwnode_handle *fn;
unsigned long *bitmap;
- struct page *pages;
+ void *ir_table_base;
if (iommu->ir_table)
return 0;
@@ -536,9 +539,9 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
if (!ir_table)
return -ENOMEM;
- pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
- INTR_REMAP_PAGE_ORDER);
- if (!pages) {
+ ir_table_base = iommu_alloc_pages_node(iommu->node, GFP_KERNEL,
+ INTR_REMAP_PAGE_ORDER);
+ if (!ir_table_base) {
pr_err("IR%d: failed to allocate pages of order %d\n",
iommu->seq_id, INTR_REMAP_PAGE_ORDER);
goto out_free_table;
@@ -567,13 +570,9 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
irq_domain_update_bus_token(iommu->ir_domain, DOMAIN_BUS_DMAR);
iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT |
IRQ_DOMAIN_FLAG_ISOLATED_MSI;
+ iommu->ir_domain->msi_parent_ops = &dmar_msi_parent_ops;
- if (cap_caching_mode(iommu->cap))
- iommu->ir_domain->msi_parent_ops = &virt_dmar_msi_parent_ops;
- else
- iommu->ir_domain->msi_parent_ops = &dmar_msi_parent_ops;
-
- ir_table->base = page_address(pages);
+ ir_table->base = ir_table_base;
ir_table->bitmap = bitmap;
iommu->ir_table = ir_table;
@@ -622,7 +621,7 @@ out_free_fwnode:
out_free_bitmap:
bitmap_free(bitmap);
out_free_pages:
- __free_pages(pages, INTR_REMAP_PAGE_ORDER);
+ iommu_free_pages(ir_table_base, INTR_REMAP_PAGE_ORDER);
out_free_table:
kfree(ir_table);
@@ -643,8 +642,7 @@ static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
irq_domain_free_fwnode(fn);
iommu->ir_domain = NULL;
}
- free_pages((unsigned long)iommu->ir_table->base,
- INTR_REMAP_PAGE_ORDER);
+ iommu_free_pages(iommu->ir_table->base, INTR_REMAP_PAGE_ORDER);
bitmap_free(iommu->ir_table->bitmap);
kfree(iommu->ir_table);
iommu->ir_table = NULL;
@@ -1118,6 +1116,14 @@ static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
irte->redir_hint = 1;
}
+static void prepare_irte_posted(struct irte *irte)
+{
+ memset(irte, 0, sizeof(*irte));
+
+ irte->present = 1;
+ irte->p_pst = 1;
+}
+
struct irq_remap_ops intel_irq_remap_ops = {
.prepare = intel_prepare_irq_remapping,
.enable = intel_enable_irq_remapping,
@@ -1126,6 +1132,47 @@ struct irq_remap_ops intel_irq_remap_ops = {
.enable_faulting = enable_drhd_fault_handling,
};
+#ifdef CONFIG_X86_POSTED_MSI
+
+static phys_addr_t get_pi_desc_addr(struct irq_data *irqd)
+{
+ int cpu = cpumask_first(irq_data_get_effective_affinity_mask(irqd));
+
+ if (WARN_ON(cpu >= nr_cpu_ids))
+ return 0;
+
+ return __pa(per_cpu_ptr(&posted_msi_pi_desc, cpu));
+}
+
+static void intel_ir_reconfigure_irte_posted(struct irq_data *irqd)
+{
+ struct intel_ir_data *ir_data = irqd->chip_data;
+ struct irte *irte = &ir_data->irte_entry;
+ struct irte irte_pi;
+ u64 pid_addr;
+
+ pid_addr = get_pi_desc_addr(irqd);
+
+ if (!pid_addr) {
+ pr_warn("Failed to setup IRQ %d for posted mode", irqd->irq);
+ return;
+ }
+
+ memset(&irte_pi, 0, sizeof(irte_pi));
+
+ /* The shared IRTE already be set up as posted during alloc_irte */
+ dmar_copy_shared_irte(&irte_pi, irte);
+
+ irte_pi.pda_l = (pid_addr >> (32 - PDA_LOW_BIT)) & ~(-1UL << PDA_LOW_BIT);
+ irte_pi.pda_h = (pid_addr >> 32) & ~(-1UL << PDA_HIGH_BIT);
+
+ modify_irte(&ir_data->irq_2_iommu, &irte_pi);
+}
+
+#else
+static inline void intel_ir_reconfigure_irte_posted(struct irq_data *irqd) {}
+#endif
+
static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force)
{
struct intel_ir_data *ir_data = irqd->chip_data;
@@ -1139,8 +1186,9 @@ static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force)
irte->vector = cfg->vector;
irte->dest_id = IRTE_DEST(cfg->dest_apicid);
- /* Update the hardware only if the interrupt is in remapped mode. */
- if (force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
+ if (ir_data->irq_2_iommu.posted_msi)
+ intel_ir_reconfigure_irte_posted(irqd);
+ else if (force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
modify_irte(&ir_data->irq_2_iommu, irte);
}
@@ -1194,7 +1242,7 @@ static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
struct intel_ir_data *ir_data = data->chip_data;
struct vcpu_data *vcpu_pi_info = info;
- /* stop posting interrupts, back to remapping mode */
+ /* stop posting interrupts, back to the default mode */
if (!vcpu_pi_info) {
modify_irte(&ir_data->irq_2_iommu, &ir_data->irte_entry);
} else {
@@ -1233,6 +1281,49 @@ static struct irq_chip intel_ir_chip = {
.irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity,
};
+/*
+ * With posted MSIs, all vectors are multiplexed into a single notification
+ * vector. Devices MSIs are then dispatched in a demux loop where
+ * EOIs can be coalesced as well.
+ *
+ * "INTEL-IR-POST" IRQ chip does not do EOI on ACK, thus the dummy irq_ack()
+ * function. Instead EOI is performed by the posted interrupt notification
+ * handler.
+ *
+ * For the example below, 3 MSIs are coalesced into one CPU notification. Only
+ * one apic_eoi() is needed.
+ *
+ * __sysvec_posted_msi_notification()
+ * irq_enter();
+ * handle_edge_irq()
+ * irq_chip_ack_parent()
+ * dummy(); // No EOI
+ * handle_irq_event()
+ * driver_handler()
+ * handle_edge_irq()
+ * irq_chip_ack_parent()
+ * dummy(); // No EOI
+ * handle_irq_event()
+ * driver_handler()
+ * handle_edge_irq()
+ * irq_chip_ack_parent()
+ * dummy(); // No EOI
+ * handle_irq_event()
+ * driver_handler()
+ * apic_eoi()
+ * irq_exit()
+ */
+
+static void dummy_ack(struct irq_data *d) { }
+
+static struct irq_chip intel_ir_chip_post_msi = {
+ .name = "INTEL-IR-POST",
+ .irq_ack = dummy_ack,
+ .irq_set_affinity = intel_ir_set_affinity,
+ .irq_compose_msi_msg = intel_ir_compose_msi_msg,
+ .irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity,
+};
+
static void fill_msi_msg(struct msi_msg *msg, u32 index, u32 subhandle)
{
memset(msg, 0, sizeof(*msg));
@@ -1274,6 +1365,11 @@ static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data,
break;
case X86_IRQ_ALLOC_TYPE_PCI_MSI:
case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
+ if (posted_msi_supported()) {
+ prepare_irte_posted(irte);
+ data->irq_2_iommu.posted_msi = 1;
+ }
+
set_msi_sid(irte,
pci_real_dma_dev(msi_desc_to_pci_dev(info->desc)));
break;
@@ -1361,7 +1457,12 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
irq_data->hwirq = (index << 16) + i;
irq_data->chip_data = ird;
- irq_data->chip = &intel_ir_chip;
+ if (posted_msi_supported() &&
+ ((info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI) ||
+ (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX)))
+ irq_data->chip = &intel_ir_chip_post_msi;
+ else
+ irq_data->chip = &intel_ir_chip;
intel_irq_remapping_prepare_irte(ird, irq_cfg, info, index, i);
irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
}
@@ -1421,20 +1522,11 @@ static const struct irq_domain_ops intel_ir_domain_ops = {
};
static const struct msi_parent_ops dmar_msi_parent_ops = {
- .supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED |
- MSI_FLAG_MULTI_PCI_MSI |
- MSI_FLAG_PCI_IMS,
+ .supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED | MSI_FLAG_MULTI_PCI_MSI,
.prefix = "IR-",
.init_dev_msi_info = msi_parent_init_dev_msi_info,
};
-static const struct msi_parent_ops virt_dmar_msi_parent_ops = {
- .supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED |
- MSI_FLAG_MULTI_PCI_MSI,
- .prefix = "vIR-",
- .init_dev_msi_info = msi_parent_init_dev_msi_info,
-};
-
/*
* Support of Interrupt Remapping Unit Hotplug
*/
diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c
index a7d68f3d518a..16a2bcf5cfeb 100644
--- a/drivers/iommu/intel/nested.c
+++ b/drivers/iommu/intel/nested.c
@@ -52,13 +52,14 @@ static int intel_nested_attach_dev(struct iommu_domain *domain,
return ret;
}
+ ret = cache_tag_assign_domain(dmar_domain, dev, IOMMU_NO_PASID);
+ if (ret)
+ goto detach_iommu;
+
ret = intel_pasid_setup_nested(iommu, dev,
IOMMU_NO_PASID, dmar_domain);
- if (ret) {
- domain_detach_iommu(dmar_domain, iommu);
- dev_err_ratelimited(dev, "Failed to setup pasid entry\n");
- return ret;
- }
+ if (ret)
+ goto unassign_tag;
info->domain = dmar_domain;
spin_lock_irqsave(&dmar_domain->lock, flags);
@@ -68,6 +69,12 @@ static int intel_nested_attach_dev(struct iommu_domain *domain,
domain_update_iotlb(dmar_domain);
return 0;
+unassign_tag:
+ cache_tag_unassign_domain(dmar_domain, dev, IOMMU_NO_PASID);
+detach_iommu:
+ domain_detach_iommu(dmar_domain, iommu);
+
+ return ret;
}
static void intel_nested_domain_free(struct iommu_domain *domain)
@@ -81,50 +88,6 @@ static void intel_nested_domain_free(struct iommu_domain *domain)
kfree(dmar_domain);
}
-static void nested_flush_dev_iotlb(struct dmar_domain *domain, u64 addr,
- unsigned int mask)
-{
- struct device_domain_info *info;
- unsigned long flags;
- u16 sid, qdep;
-
- spin_lock_irqsave(&domain->lock, flags);
- list_for_each_entry(info, &domain->devices, link) {
- if (!info->ats_enabled)
- continue;
- sid = info->bus << 8 | info->devfn;
- qdep = info->ats_qdep;
- qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
- qdep, addr, mask);
- quirk_extra_dev_tlb_flush(info, addr, mask,
- IOMMU_NO_PASID, qdep);
- }
- spin_unlock_irqrestore(&domain->lock, flags);
-}
-
-static void intel_nested_flush_cache(struct dmar_domain *domain, u64 addr,
- u64 npages, bool ih)
-{
- struct iommu_domain_info *info;
- unsigned int mask;
- unsigned long i;
-
- xa_for_each(&domain->iommu_array, i, info)
- qi_flush_piotlb(info->iommu,
- domain_id_iommu(domain, info->iommu),
- IOMMU_NO_PASID, addr, npages, ih);
-
- if (!domain->has_iotlb_device)
- return;
-
- if (npages == U64_MAX)
- mask = 64 - VTD_PAGE_SHIFT;
- else
- mask = ilog2(__roundup_pow_of_two(npages));
-
- nested_flush_dev_iotlb(domain, addr, mask);
-}
-
static int intel_nested_cache_invalidate_user(struct iommu_domain *domain,
struct iommu_user_data_array *array)
{
@@ -157,9 +120,9 @@ static int intel_nested_cache_invalidate_user(struct iommu_domain *domain,
break;
}
- intel_nested_flush_cache(dmar_domain, inv_entry.addr,
- inv_entry.npages,
- inv_entry.flags & IOMMU_VTD_INV_FLAGS_LEAF);
+ cache_tag_flush_range(dmar_domain, inv_entry.addr,
+ inv_entry.addr + nrpages_to_size(inv_entry.npages) - 1,
+ inv_entry.flags & IOMMU_VTD_INV_FLAGS_LEAF);
processed++;
}
@@ -206,7 +169,9 @@ struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
domain->domain.type = IOMMU_DOMAIN_NESTED;
INIT_LIST_HEAD(&domain->devices);
INIT_LIST_HEAD(&domain->dev_pasids);
+ INIT_LIST_HEAD(&domain->cache_tags);
spin_lock_init(&domain->lock);
+ spin_lock_init(&domain->cache_lock);
xa_init(&domain->iommu_array);
spin_lock(&s2_domain->s1_lock);
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 11f0b856d74c..abce19e2ad6f 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -20,6 +20,7 @@
#include "iommu.h"
#include "pasid.h"
+#include "../iommu-pages.h"
/*
* Intel IOMMU system wide PASID name space:
@@ -38,7 +39,7 @@ int intel_pasid_alloc_table(struct device *dev)
{
struct device_domain_info *info;
struct pasid_table *pasid_table;
- struct page *pages;
+ struct pasid_dir_entry *dir;
u32 max_pasid = 0;
int order, size;
@@ -59,14 +60,13 @@ int intel_pasid_alloc_table(struct device *dev)
size = max_pasid >> (PASID_PDE_SHIFT - 3);
order = size ? get_order(size) : 0;
- pages = alloc_pages_node(info->iommu->node,
- GFP_KERNEL | __GFP_ZERO, order);
- if (!pages) {
+ dir = iommu_alloc_pages_node(info->iommu->node, GFP_KERNEL, order);
+ if (!dir) {
kfree(pasid_table);
return -ENOMEM;
}
- pasid_table->table = page_address(pages);
+ pasid_table->table = dir;
pasid_table->order = order;
pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3);
info->pasid_table = pasid_table;
@@ -97,10 +97,10 @@ void intel_pasid_free_table(struct device *dev)
max_pde = pasid_table->max_pasid >> PASID_PDE_SHIFT;
for (i = 0; i < max_pde; i++) {
table = get_pasid_table_from_pde(&dir[i]);
- free_pgtable_page(table);
+ iommu_free_page(table);
}
- free_pages((unsigned long)pasid_table->table, pasid_table->order);
+ iommu_free_pages(pasid_table->table, pasid_table->order);
kfree(pasid_table);
}
@@ -146,7 +146,7 @@ static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
retry:
entries = get_pasid_table_from_pde(&dir[dir_index]);
if (!entries) {
- entries = alloc_pgtable_page(info->iommu->node, GFP_ATOMIC);
+ entries = iommu_alloc_page_node(info->iommu->node, GFP_ATOMIC);
if (!entries)
return NULL;
@@ -158,7 +158,7 @@ retry:
*/
if (cmpxchg64(&dir[dir_index].val, 0ULL,
(u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) {
- free_pgtable_page(entries);
+ iommu_free_page(entries);
goto retry;
}
if (!ecap_coherent(info->iommu->ecap)) {
diff --git a/drivers/iommu/intel/perf.h b/drivers/iommu/intel/perf.h
index fd6db8049d1a..df9a36942d64 100644
--- a/drivers/iommu/intel/perf.h
+++ b/drivers/iommu/intel/perf.h
@@ -11,7 +11,6 @@ enum latency_type {
DMAR_LATENCY_INV_IOTLB = 0,
DMAR_LATENCY_INV_DEVTLB,
DMAR_LATENCY_INV_IEC,
- DMAR_LATENCY_PRQ,
DMAR_LATENCY_NUM
};
diff --git a/drivers/iommu/intel/perfmon.c b/drivers/iommu/intel/perfmon.c
index cf43e798eca4..44083d01852d 100644
--- a/drivers/iommu/intel/perfmon.c
+++ b/drivers/iommu/intel/perfmon.c
@@ -438,7 +438,7 @@ static int iommu_pmu_assign_event(struct iommu_pmu *iommu_pmu,
iommu_pmu_set_filter(domain, event->attr.config1,
IOMMU_PMU_FILTER_DOMAIN, idx,
event->attr.config1);
- iommu_pmu_set_filter(pasid, event->attr.config1,
+ iommu_pmu_set_filter(pasid, event->attr.config2,
IOMMU_PMU_FILTER_PASID, idx,
event->attr.config1);
iommu_pmu_set_filter(ats, event->attr.config2,
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index c1bed89b1026..0e3a9b38bef2 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -22,57 +22,22 @@
#include "iommu.h"
#include "pasid.h"
#include "perf.h"
+#include "../iommu-pages.h"
#include "trace.h"
static irqreturn_t prq_event_thread(int irq, void *d);
-static DEFINE_XARRAY_ALLOC(pasid_private_array);
-static int pasid_private_add(ioasid_t pasid, void *priv)
-{
- return xa_alloc(&pasid_private_array, &pasid, priv,
- XA_LIMIT(pasid, pasid), GFP_ATOMIC);
-}
-
-static void pasid_private_remove(ioasid_t pasid)
-{
- xa_erase(&pasid_private_array, pasid);
-}
-
-static void *pasid_private_find(ioasid_t pasid)
-{
- return xa_load(&pasid_private_array, pasid);
-}
-
-static struct intel_svm_dev *
-svm_lookup_device_by_dev(struct intel_svm *svm, struct device *dev)
-{
- struct intel_svm_dev *sdev = NULL, *t;
-
- rcu_read_lock();
- list_for_each_entry_rcu(t, &svm->devs, list) {
- if (t->dev == dev) {
- sdev = t;
- break;
- }
- }
- rcu_read_unlock();
-
- return sdev;
-}
-
int intel_svm_enable_prq(struct intel_iommu *iommu)
{
struct iopf_queue *iopfq;
- struct page *pages;
int irq, ret;
- pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
- if (!pages) {
+ iommu->prq = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, PRQ_ORDER);
+ if (!iommu->prq) {
pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
iommu->name);
return -ENOMEM;
}
- iommu->prq = page_address(pages);
irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PRQ + iommu->seq_id, iommu->node, iommu);
if (irq <= 0) {
@@ -117,7 +82,7 @@ free_hwirq:
dmar_free_hwirq(irq);
iommu->pr_irq = 0;
free_prq:
- free_pages((unsigned long)iommu->prq, PRQ_ORDER);
+ iommu_free_pages(iommu->prq, PRQ_ORDER);
iommu->prq = NULL;
return ret;
@@ -140,7 +105,7 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
iommu->iopf_queue = NULL;
}
- free_pages((unsigned long)iommu->prq, PRQ_ORDER);
+ iommu_free_pages(iommu->prq, PRQ_ORDER);
iommu->prq = NULL;
return 0;
@@ -168,94 +133,32 @@ void intel_svm_check(struct intel_iommu *iommu)
iommu->flags |= VTD_FLAG_SVM_CAPABLE;
}
-static void __flush_svm_range_dev(struct intel_svm *svm,
- struct intel_svm_dev *sdev,
- unsigned long address,
- unsigned long pages, int ih)
-{
- struct device_domain_info *info = dev_iommu_priv_get(sdev->dev);
-
- if (WARN_ON(!pages))
- return;
-
- qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih);
- if (info->ats_enabled) {
- qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
- svm->pasid, sdev->qdep, address,
- order_base_2(pages));
- quirk_extra_dev_tlb_flush(info, address, order_base_2(pages),
- svm->pasid, sdev->qdep);
- }
-}
-
-static void intel_flush_svm_range_dev(struct intel_svm *svm,
- struct intel_svm_dev *sdev,
- unsigned long address,
- unsigned long pages, int ih)
-{
- unsigned long shift = ilog2(__roundup_pow_of_two(pages));
- unsigned long align = (1ULL << (VTD_PAGE_SHIFT + shift));
- unsigned long start = ALIGN_DOWN(address, align);
- unsigned long end = ALIGN(address + (pages << VTD_PAGE_SHIFT), align);
-
- while (start < end) {
- __flush_svm_range_dev(svm, sdev, start, align >> VTD_PAGE_SHIFT, ih);
- start += align;
- }
-}
-
-static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
- unsigned long pages, int ih)
-{
- struct intel_svm_dev *sdev;
-
- rcu_read_lock();
- list_for_each_entry_rcu(sdev, &svm->devs, list)
- intel_flush_svm_range_dev(svm, sdev, address, pages, ih);
- rcu_read_unlock();
-}
-
-static void intel_flush_svm_all(struct intel_svm *svm)
-{
- struct device_domain_info *info;
- struct intel_svm_dev *sdev;
-
- rcu_read_lock();
- list_for_each_entry_rcu(sdev, &svm->devs, list) {
- info = dev_iommu_priv_get(sdev->dev);
-
- qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, 0, -1UL, 0);
- if (info->ats_enabled) {
- qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
- svm->pasid, sdev->qdep,
- 0, 64 - VTD_PAGE_SHIFT);
- quirk_extra_dev_tlb_flush(info, 0, 64 - VTD_PAGE_SHIFT,
- svm->pasid, sdev->qdep);
- }
- }
- rcu_read_unlock();
-}
-
/* Pages have been freed at this point */
static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start, unsigned long end)
{
- struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
+ struct dmar_domain *domain = container_of(mn, struct dmar_domain, notifier);
- if (start == 0 && end == -1UL) {
- intel_flush_svm_all(svm);
+ if (start == 0 && end == ULONG_MAX) {
+ cache_tag_flush_all(domain);
return;
}
- intel_flush_svm_range(svm, start,
- (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
+ /*
+ * The mm_types defines vm_end as the first byte after the end address,
+ * different from IOMMU subsystem using the last address of an address
+ * range.
+ */
+ cache_tag_flush_range(domain, start, end - 1, 0);
}
static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
{
- struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
- struct intel_svm_dev *sdev;
+ struct dmar_domain *domain = container_of(mn, struct dmar_domain, notifier);
+ struct dev_pasid_info *dev_pasid;
+ struct device_domain_info *info;
+ unsigned long flags;
/* This might end up being called from exit_mmap(), *before* the page
* tables are cleared. And __mmu_notifier_release() will delete us from
@@ -269,157 +172,78 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
* page) so that we end up taking a fault that the hardware really
* *has* to handle gracefully without affecting other processes.
*/
- rcu_read_lock();
- list_for_each_entry_rcu(sdev, &svm->devs, list)
- intel_pasid_tear_down_entry(sdev->iommu, sdev->dev,
- svm->pasid, true);
- rcu_read_unlock();
+ spin_lock_irqsave(&domain->lock, flags);
+ list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain) {
+ info = dev_iommu_priv_get(dev_pasid->dev);
+ intel_pasid_tear_down_entry(info->iommu, dev_pasid->dev,
+ dev_pasid->pasid, true);
+ }
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+}
+static void intel_mm_free_notifier(struct mmu_notifier *mn)
+{
+ kfree(container_of(mn, struct dmar_domain, notifier));
}
static const struct mmu_notifier_ops intel_mmuops = {
.release = intel_mm_release,
.arch_invalidate_secondary_tlbs = intel_arch_invalidate_secondary_tlbs,
+ .free_notifier = intel_mm_free_notifier,
};
-static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
- struct intel_svm **rsvm,
- struct intel_svm_dev **rsdev)
-{
- struct intel_svm_dev *sdev = NULL;
- struct intel_svm *svm;
-
- if (pasid == IOMMU_PASID_INVALID || pasid >= PASID_MAX)
- return -EINVAL;
-
- svm = pasid_private_find(pasid);
- if (IS_ERR(svm))
- return PTR_ERR(svm);
-
- if (!svm)
- goto out;
-
- /*
- * If we found svm for the PASID, there must be at least one device
- * bond.
- */
- if (WARN_ON(list_empty(&svm->devs)))
- return -EINVAL;
- sdev = svm_lookup_device_by_dev(svm, dev);
-
-out:
- *rsvm = svm;
- *rsdev = sdev;
-
- return 0;
-}
-
static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct intel_iommu *iommu = info->iommu;
struct mm_struct *mm = domain->mm;
- struct intel_svm_dev *sdev;
- struct intel_svm *svm;
+ struct dev_pasid_info *dev_pasid;
unsigned long sflags;
+ unsigned long flags;
int ret = 0;
- svm = pasid_private_find(pasid);
- if (!svm) {
- svm = kzalloc(sizeof(*svm), GFP_KERNEL);
- if (!svm)
- return -ENOMEM;
-
- svm->pasid = pasid;
- svm->mm = mm;
- INIT_LIST_HEAD_RCU(&svm->devs);
-
- svm->notifier.ops = &intel_mmuops;
- ret = mmu_notifier_register(&svm->notifier, mm);
- if (ret) {
- kfree(svm);
- return ret;
- }
-
- ret = pasid_private_add(svm->pasid, svm);
- if (ret) {
- mmu_notifier_unregister(&svm->notifier, mm);
- kfree(svm);
- return ret;
- }
- }
+ dev_pasid = kzalloc(sizeof(*dev_pasid), GFP_KERNEL);
+ if (!dev_pasid)
+ return -ENOMEM;
- sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
- if (!sdev) {
- ret = -ENOMEM;
- goto free_svm;
- }
+ dev_pasid->dev = dev;
+ dev_pasid->pasid = pasid;
- sdev->dev = dev;
- sdev->iommu = iommu;
- sdev->did = FLPT_DEFAULT_DID;
- sdev->sid = PCI_DEVID(info->bus, info->devfn);
- if (info->ats_enabled) {
- sdev->qdep = info->ats_qdep;
- if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
- sdev->qdep = 0;
- }
+ ret = cache_tag_assign_domain(to_dmar_domain(domain), dev, pasid);
+ if (ret)
+ goto free_dev_pasid;
/* Setup the pasid table: */
sflags = cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0;
ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, pasid,
FLPT_DEFAULT_DID, sflags);
if (ret)
- goto free_sdev;
+ goto unassign_tag;
- list_add_rcu(&sdev->list, &svm->devs);
+ spin_lock_irqsave(&dmar_domain->lock, flags);
+ list_add(&dev_pasid->link_domain, &dmar_domain->dev_pasids);
+ spin_unlock_irqrestore(&dmar_domain->lock, flags);
return 0;
-free_sdev:
- kfree(sdev);
-free_svm:
- if (list_empty(&svm->devs)) {
- mmu_notifier_unregister(&svm->notifier, mm);
- pasid_private_remove(pasid);
- kfree(svm);
- }
+unassign_tag:
+ cache_tag_unassign_domain(to_dmar_domain(domain), dev, pasid);
+free_dev_pasid:
+ kfree(dev_pasid);
return ret;
}
-void intel_svm_remove_dev_pasid(struct device *dev, u32 pasid)
-{
- struct intel_svm_dev *sdev;
- struct intel_svm *svm;
- struct mm_struct *mm;
-
- if (pasid_to_svm_sdev(dev, pasid, &svm, &sdev))
- return;
- mm = svm->mm;
-
- if (sdev) {
- list_del_rcu(&sdev->list);
- kfree_rcu(sdev, rcu);
-
- if (list_empty(&svm->devs)) {
- if (svm->notifier.ops)
- mmu_notifier_unregister(&svm->notifier, mm);
- pasid_private_remove(svm->pasid);
- kfree(svm);
- }
- }
-}
-
/* Page request queue descriptor */
struct page_req_dsc {
union {
struct {
u64 type:8;
u64 pasid_present:1;
- u64 priv_data_present:1;
- u64 rsvd:6;
+ u64 rsvd:7;
u64 rid:16;
u64 pasid:20;
u64 exe_req:1;
@@ -438,7 +262,8 @@ struct page_req_dsc {
};
u64 qw_1;
};
- u64 priv_data[2];
+ u64 qw_2;
+ u64 qw_3;
};
static bool is_canonical_address(u64 addr)
@@ -572,24 +397,6 @@ static void intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev,
event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
event.fault.prm.flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
}
- if (desc->priv_data_present) {
- /*
- * Set last page in group bit if private data is present,
- * page response is required as it does for LPIG.
- * iommu_report_device_fault() doesn't understand this vendor
- * specific requirement thus we set last_page as a workaround.
- */
- event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
- event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
- event.fault.prm.private_data[0] = desc->priv_data[0];
- event.fault.prm.private_data[1] = desc->priv_data[1];
- } else if (dmar_latency_enabled(iommu, DMAR_LATENCY_PRQ)) {
- /*
- * If the private data fields are not used by hardware, use it
- * to monitor the prq handle latency.
- */
- event.fault.prm.private_data[0] = ktime_to_ns(ktime_get());
- }
iommu_report_device_fault(dev, &event);
}
@@ -597,39 +404,23 @@ static void intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev,
static void handle_bad_prq_event(struct intel_iommu *iommu,
struct page_req_dsc *req, int result)
{
- struct qi_desc desc;
+ struct qi_desc desc = { };
pr_err("%s: Invalid page request: %08llx %08llx\n",
iommu->name, ((unsigned long long *)req)[0],
((unsigned long long *)req)[1]);
- /*
- * Per VT-d spec. v3.0 ch7.7, system software must
- * respond with page group response if private data
- * is present (PDP) or last page in group (LPIG) bit
- * is set. This is an additional VT-d feature beyond
- * PCI ATS spec.
- */
- if (!req->lpig && !req->priv_data_present)
+ if (!req->lpig)
return;
desc.qw0 = QI_PGRP_PASID(req->pasid) |
QI_PGRP_DID(req->rid) |
QI_PGRP_PASID_P(req->pasid_present) |
- QI_PGRP_PDP(req->priv_data_present) |
QI_PGRP_RESP_CODE(result) |
QI_PGRP_RESP_TYPE;
desc.qw1 = QI_PGRP_IDX(req->prg_index) |
QI_PGRP_LPIG(req->lpig);
- if (req->priv_data_present) {
- desc.qw2 = req->priv_data[0];
- desc.qw3 = req->priv_data[1];
- } else {
- desc.qw2 = 0;
- desc.qw3 = 0;
- }
-
qi_submit_sync(iommu, &desc, 1, 0);
}
@@ -697,7 +488,7 @@ bad_req:
intel_svm_prq_report(iommu, dev, req);
trace_prq_report(iommu, dev, req->qw_0, req->qw_1,
- req->priv_data[0], req->priv_data[1],
+ req->qw_2, req->qw_3,
iommu->prq_seq_number++);
mutex_unlock(&iommu->iopf_lock);
prq_advance:
@@ -736,7 +527,7 @@ void intel_svm_page_response(struct device *dev, struct iopf_fault *evt,
struct intel_iommu *iommu = info->iommu;
u8 bus = info->bus, devfn = info->devfn;
struct iommu_fault_page_request *prm;
- bool private_present;
+ struct qi_desc desc;
bool pasid_present;
bool last_page;
u16 sid;
@@ -744,42 +535,25 @@ void intel_svm_page_response(struct device *dev, struct iopf_fault *evt,
prm = &evt->fault.prm;
sid = PCI_DEVID(bus, devfn);
pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
- private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
- /*
- * Per VT-d spec. v3.0 ch7.7, system software must respond
- * with page group response if private data is present (PDP)
- * or last page in group (LPIG) bit is set. This is an
- * additional VT-d requirement beyond PCI ATS spec.
- */
- if (last_page || private_present) {
- struct qi_desc desc;
-
- desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) |
- QI_PGRP_PASID_P(pasid_present) |
- QI_PGRP_PDP(private_present) |
- QI_PGRP_RESP_CODE(msg->code) |
- QI_PGRP_RESP_TYPE;
- desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page);
- desc.qw2 = 0;
- desc.qw3 = 0;
-
- if (private_present) {
- desc.qw2 = prm->private_data[0];
- desc.qw3 = prm->private_data[1];
- } else if (prm->private_data[0]) {
- dmar_latency_update(iommu, DMAR_LATENCY_PRQ,
- ktime_to_ns(ktime_get()) - prm->private_data[0]);
- }
+ desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) |
+ QI_PGRP_PASID_P(pasid_present) |
+ QI_PGRP_RESP_CODE(msg->code) |
+ QI_PGRP_RESP_TYPE;
+ desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page);
+ desc.qw2 = 0;
+ desc.qw3 = 0;
- qi_submit_sync(iommu, &desc, 1, 0);
- }
+ qi_submit_sync(iommu, &desc, 1, 0);
}
static void intel_svm_domain_free(struct iommu_domain *domain)
{
- kfree(to_dmar_domain(domain));
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+
+ /* dmar_domain free is deferred to the mmu free_notifier callback. */
+ mmu_notifier_put(&dmar_domain->notifier);
}
static const struct iommu_domain_ops intel_svm_domain_ops = {
@@ -787,14 +561,29 @@ static const struct iommu_domain_ops intel_svm_domain_ops = {
.free = intel_svm_domain_free
};
-struct iommu_domain *intel_svm_domain_alloc(void)
+struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
+ struct mm_struct *mm)
{
struct dmar_domain *domain;
+ int ret;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
- return NULL;
+ return ERR_PTR(-ENOMEM);
+
domain->domain.ops = &intel_svm_domain_ops;
+ domain->use_first_level = true;
+ INIT_LIST_HEAD(&domain->dev_pasids);
+ INIT_LIST_HEAD(&domain->cache_tags);
+ spin_lock_init(&domain->cache_lock);
+ spin_lock_init(&domain->lock);
+
+ domain->notifier.ops = &intel_mmuops;
+ ret = mmu_notifier_register(&domain->notifier, mm);
+ if (ret) {
+ kfree(domain);
+ return ERR_PTR(ret);
+ }
return &domain->domain;
}
diff --git a/drivers/iommu/intel/trace.h b/drivers/iommu/intel/trace.h
index 93d96f93a89b..961ac1c1bc21 100644
--- a/drivers/iommu/intel/trace.h
+++ b/drivers/iommu/intel/trace.h
@@ -89,6 +89,103 @@ TRACE_EVENT(prq_report,
__entry->dw1, __entry->dw2, __entry->dw3)
)
);
+
+DECLARE_EVENT_CLASS(cache_tag_log,
+ TP_PROTO(struct cache_tag *tag),
+ TP_ARGS(tag),
+ TP_STRUCT__entry(
+ __string(iommu, tag->iommu->name)
+ __string(dev, dev_name(tag->dev))
+ __field(u16, type)
+ __field(u16, domain_id)
+ __field(u32, pasid)
+ __field(u32, users)
+ ),
+ TP_fast_assign(
+ __assign_str(iommu, tag->iommu->name);
+ __assign_str(dev, dev_name(tag->dev));
+ __entry->type = tag->type;
+ __entry->domain_id = tag->domain_id;
+ __entry->pasid = tag->pasid;
+ __entry->users = tag->users;
+ ),
+ TP_printk("%s/%s type %s did %d pasid %d ref %d",
+ __get_str(iommu), __get_str(dev),
+ __print_symbolic(__entry->type,
+ { CACHE_TAG_IOTLB, "iotlb" },
+ { CACHE_TAG_DEVTLB, "devtlb" },
+ { CACHE_TAG_NESTING_IOTLB, "nesting_iotlb" },
+ { CACHE_TAG_NESTING_DEVTLB, "nesting_devtlb" }),
+ __entry->domain_id, __entry->pasid, __entry->users
+ )
+);
+
+DEFINE_EVENT(cache_tag_log, cache_tag_assign,
+ TP_PROTO(struct cache_tag *tag),
+ TP_ARGS(tag)
+);
+
+DEFINE_EVENT(cache_tag_log, cache_tag_unassign,
+ TP_PROTO(struct cache_tag *tag),
+ TP_ARGS(tag)
+);
+
+DEFINE_EVENT(cache_tag_log, cache_tag_flush_all,
+ TP_PROTO(struct cache_tag *tag),
+ TP_ARGS(tag)
+);
+
+DECLARE_EVENT_CLASS(cache_tag_flush,
+ TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
+ unsigned long addr, unsigned long pages, unsigned long mask),
+ TP_ARGS(tag, start, end, addr, pages, mask),
+ TP_STRUCT__entry(
+ __string(iommu, tag->iommu->name)
+ __string(dev, dev_name(tag->dev))
+ __field(u16, type)
+ __field(u16, domain_id)
+ __field(u32, pasid)
+ __field(unsigned long, start)
+ __field(unsigned long, end)
+ __field(unsigned long, addr)
+ __field(unsigned long, pages)
+ __field(unsigned long, mask)
+ ),
+ TP_fast_assign(
+ __assign_str(iommu, tag->iommu->name);
+ __assign_str(dev, dev_name(tag->dev));
+ __entry->type = tag->type;
+ __entry->domain_id = tag->domain_id;
+ __entry->pasid = tag->pasid;
+ __entry->start = start;
+ __entry->end = end;
+ __entry->addr = addr;
+ __entry->pages = pages;
+ __entry->mask = mask;
+ ),
+ TP_printk("%s %s[%d] type %s did %d [0x%lx-0x%lx] addr 0x%lx pages 0x%lx mask 0x%lx",
+ __get_str(iommu), __get_str(dev), __entry->pasid,
+ __print_symbolic(__entry->type,
+ { CACHE_TAG_IOTLB, "iotlb" },
+ { CACHE_TAG_DEVTLB, "devtlb" },
+ { CACHE_TAG_NESTING_IOTLB, "nesting_iotlb" },
+ { CACHE_TAG_NESTING_DEVTLB, "nesting_devtlb" }),
+ __entry->domain_id, __entry->start, __entry->end,
+ __entry->addr, __entry->pages, __entry->mask
+ )
+);
+
+DEFINE_EVENT(cache_tag_flush, cache_tag_flush_range,
+ TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
+ unsigned long addr, unsigned long pages, unsigned long mask),
+ TP_ARGS(tag, start, end, addr, pages, mask)
+);
+
+DEFINE_EVENT(cache_tag_flush, cache_tag_flush_range_np,
+ TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
+ unsigned long addr, unsigned long pages, unsigned long mask),
+ TP_ARGS(tag, start, end, addr, pages, mask)
+);
#endif /* _TRACE_INTEL_IOMMU_H */
/* This part must be outside protection */
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index f7828a7aad41..3d23b924cec1 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -21,6 +21,7 @@
#include <asm/barrier.h>
#include "io-pgtable-arm.h"
+#include "iommu-pages.h"
#define ARM_LPAE_MAX_ADDR_BITS 52
#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
@@ -198,14 +199,10 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
VM_BUG_ON((gfp & __GFP_HIGHMEM));
- if (cfg->alloc) {
+ if (cfg->alloc)
pages = cfg->alloc(cookie, size, gfp);
- } else {
- struct page *p;
-
- p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
- pages = p ? page_address(p) : NULL;
- }
+ else
+ pages = iommu_alloc_pages_node(dev_to_node(dev), gfp, order);
if (!pages)
return NULL;
@@ -233,7 +230,7 @@ out_free:
if (cfg->free)
cfg->free(cookie, pages, size);
else
- free_pages((unsigned long)pages, order);
+ iommu_free_pages(pages, order);
return NULL;
}
@@ -249,7 +246,7 @@ static void __arm_lpae_free_pages(void *pages, size_t size,
if (cfg->free)
cfg->free(cookie, pages, size);
else
- free_pages((unsigned long)pages, get_order(size));
+ iommu_free_pages(pages, get_order(size));
}
static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
diff --git a/drivers/iommu/io-pgtable-dart.c b/drivers/iommu/io-pgtable-dart.c
index 74b1ef2b96be..ad28031e1e93 100644
--- a/drivers/iommu/io-pgtable-dart.c
+++ b/drivers/iommu/io-pgtable-dart.c
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <asm/barrier.h>
+#include "iommu-pages.h"
#define DART1_MAX_ADDR_BITS 36
@@ -106,18 +107,12 @@ static phys_addr_t iopte_to_paddr(dart_iopte pte,
return paddr;
}
-static void *__dart_alloc_pages(size_t size, gfp_t gfp,
- struct io_pgtable_cfg *cfg)
+static void *__dart_alloc_pages(size_t size, gfp_t gfp)
{
int order = get_order(size);
- struct page *p;
VM_BUG_ON((gfp & __GFP_HIGHMEM));
- p = alloc_pages(gfp | __GFP_ZERO, order);
- if (!p)
- return NULL;
-
- return page_address(p);
+ return iommu_alloc_pages(gfp, order);
}
static int dart_init_pte(struct dart_io_pgtable *data,
@@ -262,13 +257,13 @@ static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
/* no L2 table present */
if (!pte) {
- cptep = __dart_alloc_pages(tblsz, gfp, cfg);
+ cptep = __dart_alloc_pages(tblsz, gfp);
if (!cptep)
return -ENOMEM;
pte = dart_install_table(cptep, ptep, 0, data);
if (pte)
- free_pages((unsigned long)cptep, get_order(tblsz));
+ iommu_free_pages(cptep, get_order(tblsz));
/* L2 table is present (now) */
pte = READ_ONCE(*ptep);
@@ -419,8 +414,7 @@ apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
cfg->apple_dart_cfg.n_ttbrs = 1 << data->tbl_bits;
for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i) {
- data->pgd[i] = __dart_alloc_pages(DART_GRANULE(data), GFP_KERNEL,
- cfg);
+ data->pgd[i] = __dart_alloc_pages(DART_GRANULE(data), GFP_KERNEL);
if (!data->pgd[i])
goto out_free_data;
cfg->apple_dart_cfg.ttbr[i] = virt_to_phys(data->pgd[i]);
@@ -429,9 +423,10 @@ apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
return &data->iop;
out_free_data:
- while (--i >= 0)
- free_pages((unsigned long)data->pgd[i],
- get_order(DART_GRANULE(data)));
+ while (--i >= 0) {
+ iommu_free_pages(data->pgd[i],
+ get_order(DART_GRANULE(data)));
+ }
kfree(data);
return NULL;
}
@@ -439,6 +434,7 @@ out_free_data:
static void apple_dart_free_pgtable(struct io_pgtable *iop)
{
struct dart_io_pgtable *data = io_pgtable_to_data(iop);
+ int order = get_order(DART_GRANULE(data));
dart_iopte *ptep, *end;
int i;
@@ -449,15 +445,10 @@ static void apple_dart_free_pgtable(struct io_pgtable *iop)
while (ptep != end) {
dart_iopte pte = *ptep++;
- if (pte) {
- unsigned long page =
- (unsigned long)iopte_deref(pte, data);
-
- free_pages(page, get_order(DART_GRANULE(data)));
- }
+ if (pte)
+ iommu_free_pages(iopte_deref(pte, data), order);
}
- free_pages((unsigned long)data->pgd[i],
- get_order(DART_GRANULE(data)));
+ iommu_free_pages(data->pgd[i], order);
}
kfree(data);
diff --git a/drivers/iommu/iommu-pages.h b/drivers/iommu/iommu-pages.h
new file mode 100644
index 000000000000..82ebf0033081
--- /dev/null
+++ b/drivers/iommu/iommu-pages.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ */
+
+#ifndef __IOMMU_PAGES_H
+#define __IOMMU_PAGES_H
+
+#include <linux/vmstat.h>
+#include <linux/gfp.h>
+#include <linux/mm.h>
+
+/*
+ * All page allocations that should be reported to as "iommu-pagetables" to
+ * userspace must use one of the functions below. This includes allocations of
+ * page-tables and other per-iommu_domain configuration structures.
+ *
+ * This is necessary for the proper accounting as IOMMU state can be rather
+ * large, i.e. multiple gigabytes in size.
+ */
+
+/**
+ * __iommu_alloc_account - account for newly allocated page.
+ * @page: head struct page of the page.
+ * @order: order of the page
+ */
+static inline void __iommu_alloc_account(struct page *page, int order)
+{
+ const long pgcnt = 1l << order;
+
+ mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, pgcnt);
+ mod_lruvec_page_state(page, NR_SECONDARY_PAGETABLE, pgcnt);
+}
+
+/**
+ * __iommu_free_account - account a page that is about to be freed.
+ * @page: head struct page of the page.
+ * @order: order of the page
+ */
+static inline void __iommu_free_account(struct page *page, int order)
+{
+ const long pgcnt = 1l << order;
+
+ mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, -pgcnt);
+ mod_lruvec_page_state(page, NR_SECONDARY_PAGETABLE, -pgcnt);
+}
+
+/**
+ * __iommu_alloc_pages - allocate a zeroed page of a given order.
+ * @gfp: buddy allocator flags
+ * @order: page order
+ *
+ * returns the head struct page of the allocated page.
+ */
+static inline struct page *__iommu_alloc_pages(gfp_t gfp, int order)
+{
+ struct page *page;
+
+ page = alloc_pages(gfp | __GFP_ZERO, order);
+ if (unlikely(!page))
+ return NULL;
+
+ __iommu_alloc_account(page, order);
+
+ return page;
+}
+
+/**
+ * __iommu_free_pages - free page of a given order
+ * @page: head struct page of the page
+ * @order: page order
+ */
+static inline void __iommu_free_pages(struct page *page, int order)
+{
+ if (!page)
+ return;
+
+ __iommu_free_account(page, order);
+ __free_pages(page, order);
+}
+
+/**
+ * iommu_alloc_pages_node - allocate a zeroed page of a given order from
+ * specific NUMA node.
+ * @nid: memory NUMA node id
+ * @gfp: buddy allocator flags
+ * @order: page order
+ *
+ * returns the virtual address of the allocated page
+ */
+static inline void *iommu_alloc_pages_node(int nid, gfp_t gfp, int order)
+{
+ struct page *page = alloc_pages_node(nid, gfp | __GFP_ZERO, order);
+
+ if (unlikely(!page))
+ return NULL;
+
+ __iommu_alloc_account(page, order);
+
+ return page_address(page);
+}
+
+/**
+ * iommu_alloc_pages - allocate a zeroed page of a given order
+ * @gfp: buddy allocator flags
+ * @order: page order
+ *
+ * returns the virtual address of the allocated page
+ */
+static inline void *iommu_alloc_pages(gfp_t gfp, int order)
+{
+ struct page *page = __iommu_alloc_pages(gfp, order);
+
+ if (unlikely(!page))
+ return NULL;
+
+ return page_address(page);
+}
+
+/**
+ * iommu_alloc_page_node - allocate a zeroed page at specific NUMA node.
+ * @nid: memory NUMA node id
+ * @gfp: buddy allocator flags
+ *
+ * returns the virtual address of the allocated page
+ */
+static inline void *iommu_alloc_page_node(int nid, gfp_t gfp)
+{
+ return iommu_alloc_pages_node(nid, gfp, 0);
+}
+
+/**
+ * iommu_alloc_page - allocate a zeroed page
+ * @gfp: buddy allocator flags
+ *
+ * returns the virtual address of the allocated page
+ */
+static inline void *iommu_alloc_page(gfp_t gfp)
+{
+ return iommu_alloc_pages(gfp, 0);
+}
+
+/**
+ * iommu_free_pages - free page of a given order
+ * @virt: virtual address of the page to be freed.
+ * @order: page order
+ */
+static inline void iommu_free_pages(void *virt, int order)
+{
+ if (!virt)
+ return;
+
+ __iommu_free_pages(virt_to_page(virt), order);
+}
+
+/**
+ * iommu_free_page - free page
+ * @virt: virtual address of the page to be freed.
+ */
+static inline void iommu_free_page(void *virt)
+{
+ iommu_free_pages(virt, 0);
+}
+
+/**
+ * iommu_put_pages_list - free a list of pages.
+ * @page: the head of the lru list to be freed.
+ *
+ * There are no locking requirement for these pages, as they are going to be
+ * put on a free list as soon as refcount reaches 0. Pages are put on this LRU
+ * list once they are removed from the IOMMU page tables. However, they can
+ * still be access through debugfs.
+ */
+static inline void iommu_put_pages_list(struct list_head *page)
+{
+ while (!list_empty(page)) {
+ struct page *p = list_entry(page->prev, struct page, lru);
+
+ list_del(&p->lru);
+ __iommu_free_account(p, 0);
+ put_page(p);
+ }
+}
+
+#endif /* __IOMMU_PAGES_H */
diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c
index 640acc804e8c..18a35e798b72 100644
--- a/drivers/iommu/iommu-sva.c
+++ b/drivers/iommu/iommu-sva.c
@@ -108,8 +108,8 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
/* Allocate a new domain and set it on device pasid. */
domain = iommu_sva_domain_alloc(dev, mm);
- if (!domain) {
- ret = -ENOMEM;
+ if (IS_ERR(domain)) {
+ ret = PTR_ERR(domain);
goto out_free_handle;
}
@@ -283,9 +283,15 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
const struct iommu_ops *ops = dev_iommu_ops(dev);
struct iommu_domain *domain;
- domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
- if (!domain)
- return NULL;
+ if (ops->domain_alloc_sva) {
+ domain = ops->domain_alloc_sva(dev, mm);
+ if (IS_ERR(domain))
+ return domain;
+ } else {
+ domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
+ if (!domain)
+ return ERR_PTR(-ENOMEM);
+ }
domain->type = IOMMU_DOMAIN_SVA;
mmgrab(mm);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 098869007c69..9df7cc75c1bc 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -581,10 +581,11 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
if (list_empty(&group->entry))
list_add_tail(&group->entry, group_list);
}
- mutex_unlock(&group->mutex);
- if (dev_is_pci(dev))
- iommu_dma_set_pci_32bit_workaround(dev);
+ if (group->default_domain)
+ iommu_setup_dma_ops(dev);
+
+ mutex_unlock(&group->mutex);
return 0;
@@ -1828,6 +1829,8 @@ int bus_iommu_probe(const struct bus_type *bus)
mutex_unlock(&group->mutex);
return ret;
}
+ for_each_group_device(group, gdev)
+ iommu_setup_dma_ops(gdev->dev);
mutex_unlock(&group->mutex);
/*
@@ -3066,18 +3069,9 @@ static ssize_t iommu_group_store_type(struct iommu_group *group,
if (ret)
goto out_unlock;
- /*
- * Release the mutex here because ops->probe_finalize() call-back of
- * some vendor IOMMU drivers calls arm_iommu_attach_device() which
- * in-turn might call back into IOMMU core code, where it tries to take
- * group->mutex, resulting in a deadlock.
- */
- mutex_unlock(&group->mutex);
-
/* Make sure dma_ops is appropriatley set */
for_each_group_device(group, gdev)
- iommu_group_do_probe_finalize(gdev->dev);
- return count;
+ iommu_setup_dma_ops(gdev->dev);
out_unlock:
mutex_unlock(&group->mutex);
@@ -3317,27 +3311,39 @@ EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed);
static int __iommu_set_group_pasid(struct iommu_domain *domain,
struct iommu_group *group, ioasid_t pasid)
{
- struct group_device *device;
- int ret = 0;
+ struct group_device *device, *last_gdev;
+ int ret;
for_each_group_device(group, device) {
ret = domain->ops->set_dev_pasid(domain, device->dev, pasid);
if (ret)
- break;
+ goto err_revert;
}
+ return 0;
+
+err_revert:
+ last_gdev = device;
+ for_each_group_device(group, device) {
+ const struct iommu_ops *ops = dev_iommu_ops(device->dev);
+
+ if (device == last_gdev)
+ break;
+ ops->remove_dev_pasid(device->dev, pasid, domain);
+ }
return ret;
}
static void __iommu_remove_group_pasid(struct iommu_group *group,
- ioasid_t pasid)
+ ioasid_t pasid,
+ struct iommu_domain *domain)
{
struct group_device *device;
const struct iommu_ops *ops;
for_each_group_device(group, device) {
ops = dev_iommu_ops(device->dev);
- ops->remove_dev_pasid(device->dev, pasid);
+ ops->remove_dev_pasid(device->dev, pasid, domain);
}
}
@@ -3354,6 +3360,7 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
{
/* Caller must be a probed driver on dev */
struct iommu_group *group = dev->iommu_group;
+ struct group_device *device;
void *curr;
int ret;
@@ -3363,10 +3370,18 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
if (!group)
return -ENODEV;
- if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner)
+ if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner ||
+ pasid == IOMMU_NO_PASID)
return -EINVAL;
mutex_lock(&group->mutex);
+ for_each_group_device(group, device) {
+ if (pasid >= device->dev->iommu->max_pasids) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL);
if (curr) {
ret = xa_err(curr) ? : -EBUSY;
@@ -3374,10 +3389,8 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
}
ret = __iommu_set_group_pasid(domain, group, pasid);
- if (ret) {
- __iommu_remove_group_pasid(group, pasid);
+ if (ret)
xa_erase(&group->pasid_array, pasid);
- }
out_unlock:
mutex_unlock(&group->mutex);
return ret;
@@ -3400,7 +3413,7 @@ void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev,
struct iommu_group *group = dev->iommu_group;
mutex_lock(&group->mutex);
- __iommu_remove_group_pasid(group, pasid);
+ __iommu_remove_group_pasid(group, pasid, domain);
WARN_ON(xa_erase(&group->pasid_array, pasid) != domain);
mutex_unlock(&group->mutex);
}
diff --git a/drivers/iommu/iommufd/Kconfig b/drivers/iommu/iommufd/Kconfig
index 99d4b075df49..76656fe0470d 100644
--- a/drivers/iommu/iommufd/Kconfig
+++ b/drivers/iommu/iommufd/Kconfig
@@ -37,6 +37,7 @@ config IOMMUFD_TEST
depends on DEBUG_KERNEL
depends on FAULT_INJECTION
depends on RUNTIME_TESTING_MENU
+ select IOMMUFD_DRIVER
default n
help
This is dangerous, do not enable unless running
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index ee59647c2050..c2443659812a 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -24,6 +24,8 @@ int no_x2apic_optout;
int disable_irq_post = 0;
+bool enable_posted_msi __ro_after_init;
+
static int disable_irq_remap;
static struct irq_remap_ops *remap_ops;
@@ -70,7 +72,8 @@ static __init int setup_irqremap(char *str)
no_x2apic_optout = 1;
else if (!strncmp(str, "nopost", 6))
disable_irq_post = 1;
-
+ else if (IS_ENABLED(CONFIG_X86_POSTED_MSI) && !strncmp(str, "posted_msi", 10))
+ enable_posted_msi = true;
str += strcspn(str, ",");
while (*str == ',')
str++;
@@ -151,7 +154,10 @@ int __init irq_remap_enable_fault_handling(void)
if (!remap_ops->enable_faulting)
return -ENODEV;
- return remap_ops->enable_faulting();
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "dmar:enable_fault_handling",
+ remap_ops->enable_faulting, NULL);
+
+ return remap_ops->enable_faulting(smp_processor_id());
}
void panic_if_irq_remap(const char *msg)
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
index 8c89cb947cdb..0d6f140b5e01 100644
--- a/drivers/iommu/irq_remapping.h
+++ b/drivers/iommu/irq_remapping.h
@@ -41,7 +41,7 @@ struct irq_remap_ops {
int (*reenable)(int);
/* Enable fault handling */
- int (*enable_faulting)(void);
+ int (*enable_faulting)(unsigned int);
};
extern struct irq_remap_ops intel_irq_remap_ops;
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index b8c47f18bc26..6a2707fe7a78 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -1790,6 +1790,7 @@ static const struct of_device_id mtk_iommu_of_ids[] = {
{ .compatible = "mediatek,mt8365-m4u", .data = &mt8365_data},
{}
};
+MODULE_DEVICE_TABLE(of, mtk_iommu_of_ids);
static struct platform_driver mtk_iommu_driver = {
.probe = mtk_iommu_probe,
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index a9fa2a54dc9b..d6e4002200bd 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -600,6 +600,7 @@ static const struct of_device_id mtk_iommu_v1_of_ids[] = {
{ .compatible = "mediatek,mt2701-m4u", },
{}
};
+MODULE_DEVICE_TABLE(of, mtk_iommu_v1_of_ids);
static const struct component_master_ops mtk_iommu_v1_com_ops = {
.bind = mtk_iommu_v1_bind,
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index da79d9f4cf63..4b369419b32c 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -26,6 +26,8 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include "iommu-pages.h"
+
/** MMU register offsets */
#define RK_MMU_DTE_ADDR 0x00 /* Directory table address */
#define RK_MMU_STATUS 0x04
@@ -727,14 +729,14 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
if (rk_dte_is_pt_valid(dte))
goto done;
- page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | rk_ops->gfp_flags);
+ page_table = iommu_alloc_page(GFP_ATOMIC | rk_ops->gfp_flags);
if (!page_table)
return ERR_PTR(-ENOMEM);
pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dma_dev, pt_dma)) {
dev_err(dma_dev, "DMA mapping error while allocating page table\n");
- free_page((unsigned long)page_table);
+ iommu_free_page(page_table);
return ERR_PTR(-ENOMEM);
}
@@ -1061,7 +1063,7 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
* Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
* Allocate one 4 KiB page for each table.
*/
- rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | rk_ops->gfp_flags);
+ rk_domain->dt = iommu_alloc_page(GFP_KERNEL | rk_ops->gfp_flags);
if (!rk_domain->dt)
goto err_free_domain;
@@ -1083,7 +1085,7 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
return &rk_domain->domain;
err_free_dt:
- free_page((unsigned long)rk_domain->dt);
+ iommu_free_page(rk_domain->dt);
err_free_domain:
kfree(rk_domain);
@@ -1104,13 +1106,13 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
u32 *page_table = phys_to_virt(pt_phys);
dma_unmap_single(dma_dev, pt_phys,
SPAGE_SIZE, DMA_TO_DEVICE);
- free_page((unsigned long)page_table);
+ iommu_free_page(page_table);
}
}
dma_unmap_single(dma_dev, rk_domain->dt_dma,
SPAGE_SIZE, DMA_TO_DEVICE);
- free_page((unsigned long)rk_domain->dt);
+ iommu_free_page(rk_domain->dt);
kfree(rk_domain);
}
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 9a5196f523de..d8eaa7ea380b 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -695,11 +695,6 @@ static size_t s390_iommu_unmap_pages(struct iommu_domain *domain,
return size;
}
-static void s390_iommu_probe_finalize(struct device *dev)
-{
- iommu_setup_dma_ops(dev, 0, U64_MAX);
-}
-
struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev)
{
if (!zdev || !zdev->s390_domain)
@@ -785,7 +780,6 @@ static const struct iommu_ops s390_iommu_ops = {
.capable = s390_iommu_capable,
.domain_alloc_paging = s390_domain_alloc_paging,
.probe_device = s390_iommu_probe_device,
- .probe_finalize = s390_iommu_probe_finalize,
.release_device = s390_iommu_release_device,
.device_group = generic_device_group,
.pgsize_bitmap = SZ_4K,
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index decd52cba998..c519b991749d 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -26,6 +26,8 @@
#include <linux/spinlock.h>
#include <linux/types.h>
+#include "iommu-pages.h"
+
#define IOMMU_RESET_REG 0x010
#define IOMMU_RESET_RELEASE_ALL 0xffffffff
#define IOMMU_ENABLE_REG 0x020
@@ -679,8 +681,7 @@ sun50i_iommu_domain_alloc_paging(struct device *dev)
if (!sun50i_domain)
return NULL;
- sun50i_domain->dt = (u32 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(DT_SIZE));
+ sun50i_domain->dt = iommu_alloc_pages(GFP_KERNEL, get_order(DT_SIZE));
if (!sun50i_domain->dt)
goto err_free_domain;
@@ -702,7 +703,7 @@ static void sun50i_iommu_domain_free(struct iommu_domain *domain)
{
struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
- free_pages((unsigned long)sun50i_domain->dt, get_order(DT_SIZE));
+ iommu_free_pages(sun50i_domain->dt, get_order(DT_SIZE));
sun50i_domain->dt = NULL;
kfree(sun50i_domain);
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 14e525bd0d9b..f86c7ae91814 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -19,6 +19,8 @@
#include <soc/tegra/ahb.h>
#include <soc/tegra/mc.h>
+#include "iommu-pages.h"
+
struct tegra_smmu_group {
struct list_head list;
struct tegra_smmu *smmu;
@@ -282,7 +284,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
- as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
+ as->pd = __iommu_alloc_pages(GFP_KERNEL | __GFP_DMA, 0);
if (!as->pd) {
kfree(as);
return NULL;
@@ -290,7 +292,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
if (!as->count) {
- __free_page(as->pd);
+ __iommu_free_pages(as->pd, 0);
kfree(as);
return NULL;
}
@@ -298,7 +300,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
if (!as->pts) {
kfree(as->count);
- __free_page(as->pd);
+ __iommu_free_pages(as->pd, 0);
kfree(as);
return NULL;
}
@@ -599,14 +601,14 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
DMA_TO_DEVICE);
if (dma_mapping_error(smmu->dev, dma)) {
- __free_page(page);
+ __iommu_free_pages(page, 0);
return NULL;
}
if (!smmu_dma_addr_valid(smmu, dma)) {
dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
DMA_TO_DEVICE);
- __free_page(page);
+ __iommu_free_pages(page, 0);
return NULL;
}
@@ -649,7 +651,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
tegra_smmu_set_pde(as, iova, 0);
dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
- __free_page(page);
+ __iommu_free_pages(page, 0);
as->pts[pde] = NULL;
}
}
@@ -688,7 +690,7 @@ static struct page *as_get_pde_page(struct tegra_smmu_as *as,
if (gfpflags_allow_blocking(gfp))
spin_unlock_irqrestore(&as->lock, *flags);
- page = alloc_page(gfp | __GFP_DMA | __GFP_ZERO);
+ page = __iommu_alloc_pages(gfp | __GFP_DMA, 0);
if (gfpflags_allow_blocking(gfp))
spin_lock_irqsave(&as->lock, *flags);
@@ -700,7 +702,7 @@ static struct page *as_get_pde_page(struct tegra_smmu_as *as,
*/
if (as->pts[pde]) {
if (page)
- __free_page(page);
+ __iommu_free_pages(page, 0);
page = as->pts[pde];
}
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 04048f64a2c0..8e776f6c6e35 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -1025,15 +1025,6 @@ err_free_dev:
return ERR_PTR(ret);
}
-static void viommu_probe_finalize(struct device *dev)
-{
-#ifndef CONFIG_ARCH_HAS_SETUP_DMA_OPS
- /* First clear the DMA ops in case we're switching from a DMA domain */
- set_dma_ops(dev, NULL);
- iommu_setup_dma_ops(dev, 0, U64_MAX);
-#endif
-}
-
static void viommu_release_device(struct device *dev)
{
struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
@@ -1073,7 +1064,6 @@ static struct iommu_ops viommu_ops = {
.capable = viommu_capable,
.domain_alloc = viommu_domain_alloc,
.probe_device = viommu_probe_device,
- .probe_finalize = viommu_probe_finalize,
.release_device = viommu_release_device,
.device_group = viommu_device_group,
.get_resv_regions = viommu_get_resv_regions,
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 72c07a12f5e1..14464716bacb 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -540,6 +540,31 @@ config RISCV_INTC
depends on RISCV
select IRQ_DOMAIN_HIERARCHY
+config RISCV_APLIC
+ bool
+ depends on RISCV
+ select IRQ_DOMAIN_HIERARCHY
+
+config RISCV_APLIC_MSI
+ bool
+ depends on RISCV_APLIC
+ select GENERIC_MSI_IRQ
+ default RISCV_APLIC
+
+config RISCV_IMSIC
+ bool
+ depends on RISCV
+ select IRQ_DOMAIN_HIERARCHY
+ select GENERIC_IRQ_MATRIX_ALLOCATOR
+ select GENERIC_MSI_IRQ
+
+config RISCV_IMSIC_PCI
+ bool
+ depends on RISCV_IMSIC
+ depends on PCI
+ depends on PCI_MSI
+ default RISCV_IMSIC
+
config SIFIVE_PLIC
bool
depends on RISCV
@@ -568,7 +593,7 @@ config IRQ_LOONGARCH_CPU
bool
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
- select GENERIC_IRQ_EFFECTIVE_AFF_MASK
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
select LOONGSON_HTVEC
select LOONGSON_LIOINTC
select LOONGSON_EIOINTC
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index ec4a18380998..d9dc3d99aaa8 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -95,6 +95,9 @@ obj-$(CONFIG_QCOM_MPM) += irq-qcom-mpm.o
obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o
obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o
obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o
+obj-$(CONFIG_RISCV_APLIC) += irq-riscv-aplic-main.o irq-riscv-aplic-direct.o
+obj-$(CONFIG_RISCV_APLIC_MSI) += irq-riscv-aplic-msi.o
+obj-$(CONFIG_RISCV_IMSIC) += irq-riscv-imsic-state.o irq-riscv-imsic-early.o irq-riscv-imsic-platform.o
obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
obj-$(CONFIG_STARFIVE_JH8100_INTC) += irq-starfive-jh8100-intc.o
obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o
diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
index 9c8b1349ee17..a1430ab60a8a 100644
--- a/drivers/irqchip/irq-alpine-msi.c
+++ b/drivers/irqchip/irq-alpine-msi.c
@@ -165,7 +165,7 @@ static int alpine_msix_middle_domain_alloc(struct irq_domain *domain,
return 0;
err_sgi:
- irq_domain_free_irqs_parent(domain, virq, i - 1);
+ irq_domain_free_irqs_parent(domain, virq, i);
alpine_msix_free_sgi(priv, sgi, nr_irqs);
return err;
}
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index a55528469278..4b021a67bdfe 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -316,7 +316,7 @@ static int armada_370_xp_msi_init(struct device_node *node,
return 0;
}
#else
-static void armada_370_xp_msi_reenable_percpu(void) {}
+static __maybe_unused void armada_370_xp_msi_reenable_percpu(void) {}
static inline int armada_370_xp_msi_init(struct device_node *node,
phys_addr_t main_int_phys_base)
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
index eb02d203c963..90daa274ef23 100644
--- a/drivers/irqchip/irq-bcm6345-l1.c
+++ b/drivers/irqchip/irq-bcm6345-l1.c
@@ -192,14 +192,10 @@ static int bcm6345_l1_set_affinity(struct irq_data *d,
u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
unsigned int old_cpu = cpu_for_irq(intc, d);
unsigned int new_cpu;
- struct cpumask valid;
unsigned long flags;
bool enabled;
- if (!cpumask_and(&valid, &intc->cpumask, dest))
- return -EINVAL;
-
- new_cpu = cpumask_any_and(&valid, cpu_online_mask);
+ new_cpu = cpumask_first_and_and(&intc->cpumask, dest, cpu_online_mask);
if (new_cpu >= nr_cpu_ids)
return -EINVAL;
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 2b0b3175cea0..c988886917f7 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -118,7 +118,7 @@ out:
chained_irq_exit(chip, desc);
}
-static void brcmstb_l2_intc_suspend(struct irq_data *d)
+static void __brcmstb_l2_intc_suspend(struct irq_data *d, bool save)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
@@ -127,7 +127,8 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
irq_gc_lock_irqsave(gc, flags);
/* Save the current mask */
- b->saved_mask = irq_reg_readl(gc, ct->regs.mask);
+ if (save)
+ b->saved_mask = irq_reg_readl(gc, ct->regs.mask);
if (b->can_wake) {
/* Program the wakeup mask */
@@ -137,6 +138,16 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
irq_gc_unlock_irqrestore(gc, flags);
}
+static void brcmstb_l2_intc_shutdown(struct irq_data *d)
+{
+ __brcmstb_l2_intc_suspend(d, false);
+}
+
+static void brcmstb_l2_intc_suspend(struct irq_data *d)
+{
+ __brcmstb_l2_intc_suspend(d, true);
+}
+
static void brcmstb_l2_intc_resume(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
@@ -252,7 +263,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
ct->chip.irq_suspend = brcmstb_l2_intc_suspend;
ct->chip.irq_resume = brcmstb_l2_intc_resume;
- ct->chip.irq_pm_shutdown = brcmstb_l2_intc_suspend;
+ ct->chip.irq_pm_shutdown = brcmstb_l2_intc_shutdown;
if (data->can_wake) {
/* This IRQ chip can wake the system, set all child interrupts
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index fca888b36680..40ebf1726393 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -786,6 +786,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
+ struct its_vpe *vpe = valid_vpe(its, desc->its_vmapp_cmd.vpe);
unsigned long vpt_addr, vconf_addr;
u64 target;
bool alloc;
@@ -798,6 +799,11 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
if (is_v4_1(its)) {
alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
its_encode_alloc(cmd, alloc);
+ /*
+ * Unmapping a VPE is self-synchronizing on GICv4.1,
+ * no need to issue a VSYNC.
+ */
+ vpe = NULL;
}
goto out;
@@ -832,7 +838,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
out:
its_fixup_cmd(cmd);
- return valid_vpe(its, desc->its_vmapp_cmd.vpe);
+ return vpe;
}
static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
@@ -3826,9 +3832,9 @@ static int its_vpe_set_affinity(struct irq_data *d,
bool force)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
- struct cpumask common, *table_mask;
+ unsigned int from, cpu = nr_cpu_ids;
+ struct cpumask *table_mask;
unsigned long flags;
- int from, cpu;
/*
* Changing affinity is mega expensive, so let's be as lazy as
@@ -3850,10 +3856,15 @@ static int its_vpe_set_affinity(struct irq_data *d,
* If we are offered another CPU in the same GICv4.1 ITS
* affinity, pick this one. Otherwise, any CPU will do.
*/
- if (table_mask && cpumask_and(&common, mask_val, table_mask))
- cpu = cpumask_test_cpu(from, &common) ? from : cpumask_first(&common);
- else
+ if (table_mask)
+ cpu = cpumask_any_and(mask_val, table_mask);
+ if (cpu < nr_cpu_ids) {
+ if (cpumask_test_cpu(from, mask_val) &&
+ cpumask_test_cpu(from, table_mask))
+ cpu = from;
+ } else {
cpu = cpumask_first(mask_val);
+ }
if (from == cpu)
goto out;
@@ -4521,8 +4532,6 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
struct page *vprop_page;
int base, nr_ids, i, err = 0;
- BUG_ON(!vm);
-
bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
if (!bitmap)
return -ENOMEM;
@@ -4561,13 +4570,8 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i));
}
- if (err) {
- if (i > 0)
- its_vpe_irq_domain_free(domain, virq, i);
-
- its_lpi_free(bitmap, base, nr_ids);
- its_free_prop_table(vprop_page);
- }
+ if (err)
+ its_vpe_irq_domain_free(domain, virq, i);
return err;
}
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
index b64cbe3052e8..c7ddebf312ad 100644
--- a/drivers/irqchip/irq-loongson-eiointc.c
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -59,6 +59,7 @@ static int cpu_to_eio_node(int cpu)
return cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
}
+#ifdef CONFIG_SMP
static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map)
{
int i, node, cpu_node, route_node;
@@ -92,19 +93,15 @@ static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *af
unsigned int cpu;
unsigned long flags;
uint32_t vector, regaddr;
- struct cpumask intersect_affinity;
struct eiointc_priv *priv = d->domain->host_data;
raw_spin_lock_irqsave(&affinity_lock, flags);
- cpumask_and(&intersect_affinity, affinity, cpu_online_mask);
- cpumask_and(&intersect_affinity, &intersect_affinity, &priv->cpuspan_map);
-
- if (cpumask_empty(&intersect_affinity)) {
+ cpu = cpumask_first_and_and(&priv->cpuspan_map, affinity, cpu_online_mask);
+ if (cpu >= nr_cpu_ids) {
raw_spin_unlock_irqrestore(&affinity_lock, flags);
return -EINVAL;
}
- cpu = cpumask_first(&intersect_affinity);
vector = d->hwirq;
regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2);
@@ -126,6 +123,7 @@ static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *af
return IRQ_SET_MASK_OK;
}
+#endif
static int eiointc_index(int node)
{
@@ -238,7 +236,9 @@ static struct irq_chip eiointc_irq_chip = {
.irq_ack = eiointc_ack_irq,
.irq_mask = eiointc_mask_irq,
.irq_unmask = eiointc_unmask_irq,
+#ifdef CONFIG_SMP
.irq_set_affinity = eiointc_set_irq_affinity,
+#endif
};
static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq,
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
index 6e1e1f011bb2..dd4d699170f4 100644
--- a/drivers/irqchip/irq-loongson-pch-msi.c
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -136,7 +136,7 @@ static int pch_msi_middle_domain_alloc(struct irq_domain *domain,
err_hwirq:
pch_msi_free_hwirq(priv, hwirq, nr_irqs);
- irq_domain_free_irqs_parent(domain, virq, i - 1);
+ irq_domain_free_irqs_parent(domain, virq, i);
return err;
}
diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
index 63db8e2172e0..cbaef65e804c 100644
--- a/drivers/irqchip/irq-loongson-pch-pic.c
+++ b/drivers/irqchip/irq-loongson-pch-pic.c
@@ -33,6 +33,7 @@
#define PIC_COUNT (PIC_COUNT_PER_REG * PIC_REG_COUNT)
#define PIC_REG_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG)
#define PIC_REG_BIT(irq_id) ((irq_id) % PIC_COUNT_PER_REG)
+#define PIC_UNDEF_VECTOR 255
static int nr_pics;
@@ -46,12 +47,19 @@ struct pch_pic {
u32 saved_vec_en[PIC_REG_COUNT];
u32 saved_vec_pol[PIC_REG_COUNT];
u32 saved_vec_edge[PIC_REG_COUNT];
+ u8 table[PIC_COUNT];
+ int inuse;
};
static struct pch_pic *pch_pic_priv[MAX_IO_PICS];
struct fwnode_handle *pch_pic_handle[MAX_IO_PICS];
+static inline u8 hwirq_to_bit(struct pch_pic *priv, int hirq)
+{
+ return priv->table[hirq];
+}
+
static void pch_pic_bitset(struct pch_pic *priv, int offset, int bit)
{
u32 reg;
@@ -80,45 +88,47 @@ static void pch_pic_mask_irq(struct irq_data *d)
{
struct pch_pic *priv = irq_data_get_irq_chip_data(d);
- pch_pic_bitset(priv, PCH_PIC_MASK, d->hwirq);
+ pch_pic_bitset(priv, PCH_PIC_MASK, hwirq_to_bit(priv, d->hwirq));
irq_chip_mask_parent(d);
}
static void pch_pic_unmask_irq(struct irq_data *d)
{
struct pch_pic *priv = irq_data_get_irq_chip_data(d);
+ int bit = hwirq_to_bit(priv, d->hwirq);
- writel(BIT(PIC_REG_BIT(d->hwirq)),
- priv->base + PCH_PIC_CLR + PIC_REG_IDX(d->hwirq) * 4);
+ writel(BIT(PIC_REG_BIT(bit)),
+ priv->base + PCH_PIC_CLR + PIC_REG_IDX(bit) * 4);
irq_chip_unmask_parent(d);
- pch_pic_bitclr(priv, PCH_PIC_MASK, d->hwirq);
+ pch_pic_bitclr(priv, PCH_PIC_MASK, bit);
}
static int pch_pic_set_type(struct irq_data *d, unsigned int type)
{
struct pch_pic *priv = irq_data_get_irq_chip_data(d);
+ int bit = hwirq_to_bit(priv, d->hwirq);
int ret = 0;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
- pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq);
- pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq);
+ pch_pic_bitset(priv, PCH_PIC_EDGE, bit);
+ pch_pic_bitclr(priv, PCH_PIC_POL, bit);
irq_set_handler_locked(d, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_FALLING:
- pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq);
- pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq);
+ pch_pic_bitset(priv, PCH_PIC_EDGE, bit);
+ pch_pic_bitset(priv, PCH_PIC_POL, bit);
irq_set_handler_locked(d, handle_edge_irq);
break;
case IRQ_TYPE_LEVEL_HIGH:
- pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq);
- pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq);
+ pch_pic_bitclr(priv, PCH_PIC_EDGE, bit);
+ pch_pic_bitclr(priv, PCH_PIC_POL, bit);
irq_set_handler_locked(d, handle_level_irq);
break;
case IRQ_TYPE_LEVEL_LOW:
- pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq);
- pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq);
+ pch_pic_bitclr(priv, PCH_PIC_EDGE, bit);
+ pch_pic_bitset(priv, PCH_PIC_POL, bit);
irq_set_handler_locked(d, handle_level_irq);
break;
default:
@@ -133,11 +143,12 @@ static void pch_pic_ack_irq(struct irq_data *d)
{
unsigned int reg;
struct pch_pic *priv = irq_data_get_irq_chip_data(d);
+ int bit = hwirq_to_bit(priv, d->hwirq);
- reg = readl(priv->base + PCH_PIC_EDGE + PIC_REG_IDX(d->hwirq) * 4);
- if (reg & BIT(PIC_REG_BIT(d->hwirq))) {
- writel(BIT(PIC_REG_BIT(d->hwirq)),
- priv->base + PCH_PIC_CLR + PIC_REG_IDX(d->hwirq) * 4);
+ reg = readl(priv->base + PCH_PIC_EDGE + PIC_REG_IDX(bit) * 4);
+ if (reg & BIT(PIC_REG_BIT(bit))) {
+ writel(BIT(PIC_REG_BIT(bit)),
+ priv->base + PCH_PIC_CLR + PIC_REG_IDX(bit) * 4);
}
irq_chip_ack_parent(d);
}
@@ -159,6 +170,8 @@ static int pch_pic_domain_translate(struct irq_domain *d,
{
struct pch_pic *priv = d->host_data;
struct device_node *of_node = to_of_node(fwspec->fwnode);
+ unsigned long flags;
+ int i;
if (of_node) {
if (fwspec->param_count < 2)
@@ -171,12 +184,33 @@ static int pch_pic_domain_translate(struct irq_domain *d,
return -EINVAL;
*hwirq = fwspec->param[0] - priv->gsi_base;
+
if (fwspec->param_count > 1)
*type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
else
*type = IRQ_TYPE_NONE;
}
+ raw_spin_lock_irqsave(&priv->pic_lock, flags);
+ /* Check pic-table to confirm if the hwirq has been assigned */
+ for (i = 0; i < priv->inuse; i++) {
+ if (priv->table[i] == *hwirq) {
+ *hwirq = i;
+ break;
+ }
+ }
+ if (i == priv->inuse) {
+ /* Assign a new hwirq in pic-table */
+ if (priv->inuse >= PIC_COUNT) {
+ pr_err("pch-pic domain has no free vectors\n");
+ raw_spin_unlock_irqrestore(&priv->pic_lock, flags);
+ return -EINVAL;
+ }
+ priv->table[priv->inuse] = *hwirq;
+ *hwirq = priv->inuse++;
+ }
+ raw_spin_unlock_irqrestore(&priv->pic_lock, flags);
+
return 0;
}
@@ -194,6 +228,9 @@ static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq,
if (err)
return err;
+ /* Write vector ID */
+ writeb(priv->ht_vec_base + hwirq, priv->base + PCH_INT_HTVEC(hwirq_to_bit(priv, hwirq)));
+
parent_fwspec.fwnode = domain->parent->fwnode;
parent_fwspec.param_count = 1;
parent_fwspec.param[0] = hwirq + priv->ht_vec_base;
@@ -222,7 +259,7 @@ static void pch_pic_reset(struct pch_pic *priv)
for (i = 0; i < PIC_COUNT; i++) {
/* Write vector ID */
- writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(i));
+ writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(hwirq_to_bit(priv, i)));
/* Hardcode route to HT0 Lo */
writeb(1, priv->base + PCH_INT_ROUTE(i));
}
@@ -284,6 +321,7 @@ static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base,
u32 gsi_base)
{
struct pch_pic *priv;
+ int i;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -294,6 +332,10 @@ static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base,
if (!priv->base)
goto free_priv;
+ priv->inuse = 0;
+ for (i = 0; i < PIC_COUNT; i++)
+ priv->table[i] = PIC_UNDEF_VECTOR;
+
priv->ht_vec_base = vec_base;
priv->vec_count = ((readq(priv->base) >> 48) & 0xff) + 1;
priv->gsi_base = gsi_base;
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index be9680645545..d67b5da38982 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -130,7 +130,7 @@ static struct irq_chip asm9260_icoll_chip = {
IRQCHIP_SKIP_SET_WAKE,
};
-asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
{
u32 irqnr;
diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
index ae67fec2ab46..f6484bf15e0b 100644
--- a/drivers/irqchip/irq-renesas-rzg2l.c
+++ b/drivers/irqchip/irq-renesas-rzg2l.c
@@ -138,7 +138,7 @@ static void rzg2l_irqc_eoi(struct irq_data *d)
irq_chip_eoi_parent(d);
}
-static void rzg2l_irqc_irq_disable(struct irq_data *d)
+static void rzg2l_tint_irq_endisable(struct irq_data *d, bool enable)
{
unsigned int hw_irq = irqd_to_hwirq(d);
@@ -151,30 +151,24 @@ static void rzg2l_irqc_irq_disable(struct irq_data *d)
raw_spin_lock(&priv->lock);
reg = readl_relaxed(priv->base + TSSR(tssr_index));
- reg &= ~(TIEN << TSSEL_SHIFT(tssr_offset));
+ if (enable)
+ reg |= TIEN << TSSEL_SHIFT(tssr_offset);
+ else
+ reg &= ~(TIEN << TSSEL_SHIFT(tssr_offset));
writel_relaxed(reg, priv->base + TSSR(tssr_index));
raw_spin_unlock(&priv->lock);
}
+}
+
+static void rzg2l_irqc_irq_disable(struct irq_data *d)
+{
+ rzg2l_tint_irq_endisable(d, false);
irq_chip_disable_parent(d);
}
static void rzg2l_irqc_irq_enable(struct irq_data *d)
{
- unsigned int hw_irq = irqd_to_hwirq(d);
-
- if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) {
- struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
- u32 offset = hw_irq - IRQC_TINT_START;
- u32 tssr_offset = TSSR_OFFSET(offset);
- u8 tssr_index = TSSR_INDEX(offset);
- u32 reg;
-
- raw_spin_lock(&priv->lock);
- reg = readl_relaxed(priv->base + TSSR(tssr_index));
- reg |= TIEN << TSSEL_SHIFT(tssr_offset);
- writel_relaxed(reg, priv->base + TSSR(tssr_index));
- raw_spin_unlock(&priv->lock);
- }
+ rzg2l_tint_irq_endisable(d, true);
irq_chip_enable_parent(d);
}
diff --git a/drivers/irqchip/irq-riscv-aplic-direct.c b/drivers/irqchip/irq-riscv-aplic-direct.c
new file mode 100644
index 000000000000..4a3ffe856d6c
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-aplic-direct.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/riscv-aplic.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/printk.h>
+#include <linux/smp.h>
+
+#include "irq-riscv-aplic-main.h"
+
+#define APLIC_DISABLE_IDELIVERY 0
+#define APLIC_ENABLE_IDELIVERY 1
+#define APLIC_DISABLE_ITHRESHOLD 1
+#define APLIC_ENABLE_ITHRESHOLD 0
+
+struct aplic_direct {
+ struct aplic_priv priv;
+ struct irq_domain *irqdomain;
+ struct cpumask lmask;
+};
+
+struct aplic_idc {
+ unsigned int hart_index;
+ void __iomem *regs;
+ struct aplic_direct *direct;
+};
+
+static unsigned int aplic_direct_parent_irq;
+static DEFINE_PER_CPU(struct aplic_idc, aplic_idcs);
+
+static void aplic_direct_irq_eoi(struct irq_data *d)
+{
+ /*
+ * The fasteoi_handler requires irq_eoi() callback hence
+ * provide a dummy handler.
+ */
+}
+
+#ifdef CONFIG_SMP
+static int aplic_direct_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ bool force)
+{
+ struct aplic_priv *priv = irq_data_get_irq_chip_data(d);
+ struct aplic_direct *direct = container_of(priv, struct aplic_direct, priv);
+ struct aplic_idc *idc;
+ unsigned int cpu, val;
+ void __iomem *target;
+
+ if (force)
+ cpu = cpumask_first_and(&direct->lmask, mask_val);
+ else
+ cpu = cpumask_first_and_and(&direct->lmask, mask_val, cpu_online_mask);
+
+ if (cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ idc = per_cpu_ptr(&aplic_idcs, cpu);
+ target = priv->regs + APLIC_TARGET_BASE + (d->hwirq - 1) * sizeof(u32);
+ val = FIELD_PREP(APLIC_TARGET_HART_IDX, idc->hart_index);
+ val |= FIELD_PREP(APLIC_TARGET_IPRIO, APLIC_DEFAULT_PRIORITY);
+ writel(val, target);
+
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+ return IRQ_SET_MASK_OK_DONE;
+}
+#endif
+
+static struct irq_chip aplic_direct_chip = {
+ .name = "APLIC-DIRECT",
+ .irq_mask = aplic_irq_mask,
+ .irq_unmask = aplic_irq_unmask,
+ .irq_set_type = aplic_irq_set_type,
+ .irq_eoi = aplic_direct_irq_eoi,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = aplic_direct_set_affinity,
+#endif
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static int aplic_direct_irqdomain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *hwirq, unsigned int *type)
+{
+ struct aplic_priv *priv = d->host_data;
+
+ return aplic_irqdomain_translate(fwspec, priv->gsi_base, hwirq, type);
+}
+
+static int aplic_direct_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct aplic_priv *priv = domain->host_data;
+ struct aplic_direct *direct = container_of(priv, struct aplic_direct, priv);
+ struct irq_fwspec *fwspec = arg;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ int i, ret;
+
+ ret = aplic_irqdomain_translate(fwspec, priv->gsi_base, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, hwirq + i, &aplic_direct_chip,
+ priv, handle_fasteoi_irq, NULL, NULL);
+ irq_set_affinity(virq + i, &direct->lmask);
+ }
+
+ return 0;
+}
+
+static const struct irq_domain_ops aplic_direct_irqdomain_ops = {
+ .translate = aplic_direct_irqdomain_translate,
+ .alloc = aplic_direct_irqdomain_alloc,
+ .free = irq_domain_free_irqs_top,
+};
+
+/*
+ * To handle an APLIC direct interrupts, we just read the CLAIMI register
+ * which will return highest priority pending interrupt and clear the
+ * pending bit of the interrupt. This process is repeated until CLAIMI
+ * register return zero value.
+ */
+static void aplic_direct_handle_irq(struct irq_desc *desc)
+{
+ struct aplic_idc *idc = this_cpu_ptr(&aplic_idcs);
+ struct irq_domain *irqdomain = idc->direct->irqdomain;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ irq_hw_number_t hw_irq;
+ int irq;
+
+ chained_irq_enter(chip, desc);
+
+ while ((hw_irq = readl(idc->regs + APLIC_IDC_CLAIMI))) {
+ hw_irq = hw_irq >> APLIC_IDC_TOPI_ID_SHIFT;
+ irq = irq_find_mapping(irqdomain, hw_irq);
+
+ if (unlikely(irq <= 0)) {
+ dev_warn_ratelimited(idc->direct->priv.dev,
+ "hw_irq %lu mapping not found\n", hw_irq);
+ } else {
+ generic_handle_irq(irq);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void aplic_idc_set_delivery(struct aplic_idc *idc, bool en)
+{
+ u32 de = (en) ? APLIC_ENABLE_IDELIVERY : APLIC_DISABLE_IDELIVERY;
+ u32 th = (en) ? APLIC_ENABLE_ITHRESHOLD : APLIC_DISABLE_ITHRESHOLD;
+
+ /* Priority must be less than threshold for interrupt triggering */
+ writel(th, idc->regs + APLIC_IDC_ITHRESHOLD);
+
+ /* Delivery must be set to 1 for interrupt triggering */
+ writel(de, idc->regs + APLIC_IDC_IDELIVERY);
+}
+
+static int aplic_direct_dying_cpu(unsigned int cpu)
+{
+ if (aplic_direct_parent_irq)
+ disable_percpu_irq(aplic_direct_parent_irq);
+
+ return 0;
+}
+
+static int aplic_direct_starting_cpu(unsigned int cpu)
+{
+ if (aplic_direct_parent_irq) {
+ enable_percpu_irq(aplic_direct_parent_irq,
+ irq_get_trigger_type(aplic_direct_parent_irq));
+ }
+
+ return 0;
+}
+
+static int aplic_direct_parse_parent_hwirq(struct device *dev, u32 index,
+ u32 *parent_hwirq, unsigned long *parent_hartid)
+{
+ struct of_phandle_args parent;
+ int rc;
+
+ /*
+ * Currently, only OF fwnode is supported so extend this
+ * function for ACPI support.
+ */
+ if (!is_of_node(dev->fwnode))
+ return -EINVAL;
+
+ rc = of_irq_parse_one(to_of_node(dev->fwnode), index, &parent);
+ if (rc)
+ return rc;
+
+ rc = riscv_of_parent_hartid(parent.np, parent_hartid);
+ if (rc)
+ return rc;
+
+ *parent_hwirq = parent.args[0];
+ return 0;
+}
+
+int aplic_direct_setup(struct device *dev, void __iomem *regs)
+{
+ int i, j, rc, cpu, current_cpu, setup_count = 0;
+ struct aplic_direct *direct;
+ struct irq_domain *domain;
+ struct aplic_priv *priv;
+ struct aplic_idc *idc;
+ unsigned long hartid;
+ u32 v, hwirq;
+
+ direct = devm_kzalloc(dev, sizeof(*direct), GFP_KERNEL);
+ if (!direct)
+ return -ENOMEM;
+ priv = &direct->priv;
+
+ rc = aplic_setup_priv(priv, dev, regs);
+ if (rc) {
+ dev_err(dev, "failed to create APLIC context\n");
+ return rc;
+ }
+
+ /* Setup per-CPU IDC and target CPU mask */
+ current_cpu = get_cpu();
+ for (i = 0; i < priv->nr_idcs; i++) {
+ rc = aplic_direct_parse_parent_hwirq(dev, i, &hwirq, &hartid);
+ if (rc) {
+ dev_warn(dev, "parent irq for IDC%d not found\n", i);
+ continue;
+ }
+
+ /*
+ * Skip interrupts other than external interrupts for
+ * current privilege level.
+ */
+ if (hwirq != RV_IRQ_EXT)
+ continue;
+
+ cpu = riscv_hartid_to_cpuid(hartid);
+ if (cpu < 0) {
+ dev_warn(dev, "invalid cpuid for IDC%d\n", i);
+ continue;
+ }
+
+ cpumask_set_cpu(cpu, &direct->lmask);
+
+ idc = per_cpu_ptr(&aplic_idcs, cpu);
+ idc->hart_index = i;
+ idc->regs = priv->regs + APLIC_IDC_BASE + i * APLIC_IDC_SIZE;
+ idc->direct = direct;
+
+ aplic_idc_set_delivery(idc, true);
+
+ /*
+ * Boot cpu might not have APLIC hart_index = 0 so check
+ * and update target registers of all interrupts.
+ */
+ if (cpu == current_cpu && idc->hart_index) {
+ v = FIELD_PREP(APLIC_TARGET_HART_IDX, idc->hart_index);
+ v |= FIELD_PREP(APLIC_TARGET_IPRIO, APLIC_DEFAULT_PRIORITY);
+ for (j = 1; j <= priv->nr_irqs; j++)
+ writel(v, priv->regs + APLIC_TARGET_BASE + (j - 1) * sizeof(u32));
+ }
+
+ setup_count++;
+ }
+ put_cpu();
+
+ /* Find parent domain and register chained handler */
+ domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(),
+ DOMAIN_BUS_ANY);
+ if (!aplic_direct_parent_irq && domain) {
+ aplic_direct_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
+ if (aplic_direct_parent_irq) {
+ irq_set_chained_handler(aplic_direct_parent_irq,
+ aplic_direct_handle_irq);
+
+ /*
+ * Setup CPUHP notifier to enable parent
+ * interrupt on all CPUs
+ */
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "irqchip/riscv/aplic:starting",
+ aplic_direct_starting_cpu,
+ aplic_direct_dying_cpu);
+ }
+ }
+
+ /* Fail if we were not able to setup IDC for any CPU */
+ if (!setup_count)
+ return -ENODEV;
+
+ /* Setup global config and interrupt delivery */
+ aplic_init_hw_global(priv, false);
+
+ /* Create irq domain instance for the APLIC */
+ direct->irqdomain = irq_domain_create_linear(dev->fwnode, priv->nr_irqs + 1,
+ &aplic_direct_irqdomain_ops, priv);
+ if (!direct->irqdomain) {
+ dev_err(dev, "failed to create direct irq domain\n");
+ return -ENOMEM;
+ }
+
+ /* Advertise the interrupt controller */
+ dev_info(dev, "%d interrupts directly connected to %d CPUs\n",
+ priv->nr_irqs, priv->nr_idcs);
+
+ return 0;
+}
diff --git a/drivers/irqchip/irq-riscv-aplic-main.c b/drivers/irqchip/irq-riscv-aplic-main.c
new file mode 100644
index 000000000000..774a0c97fdab
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-aplic-main.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/irqchip/riscv-aplic.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+
+#include "irq-riscv-aplic-main.h"
+
+void aplic_irq_unmask(struct irq_data *d)
+{
+ struct aplic_priv *priv = irq_data_get_irq_chip_data(d);
+
+ writel(d->hwirq, priv->regs + APLIC_SETIENUM);
+}
+
+void aplic_irq_mask(struct irq_data *d)
+{
+ struct aplic_priv *priv = irq_data_get_irq_chip_data(d);
+
+ writel(d->hwirq, priv->regs + APLIC_CLRIENUM);
+}
+
+int aplic_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct aplic_priv *priv = irq_data_get_irq_chip_data(d);
+ void __iomem *sourcecfg;
+ u32 val = 0;
+
+ switch (type) {
+ case IRQ_TYPE_NONE:
+ val = APLIC_SOURCECFG_SM_INACTIVE;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ val = APLIC_SOURCECFG_SM_LEVEL_LOW;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ val = APLIC_SOURCECFG_SM_LEVEL_HIGH;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ val = APLIC_SOURCECFG_SM_EDGE_FALL;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ val = APLIC_SOURCECFG_SM_EDGE_RISE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ sourcecfg = priv->regs + APLIC_SOURCECFG_BASE;
+ sourcecfg += (d->hwirq - 1) * sizeof(u32);
+ writel(val, sourcecfg);
+
+ return 0;
+}
+
+int aplic_irqdomain_translate(struct irq_fwspec *fwspec, u32 gsi_base,
+ unsigned long *hwirq, unsigned int *type)
+{
+ if (WARN_ON(fwspec->param_count < 2))
+ return -EINVAL;
+ if (WARN_ON(!fwspec->param[0]))
+ return -EINVAL;
+
+ /* For DT, gsi_base is always zero. */
+ *hwirq = fwspec->param[0] - gsi_base;
+ *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+
+ WARN_ON(*type == IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+void aplic_init_hw_global(struct aplic_priv *priv, bool msi_mode)
+{
+ u32 val;
+#ifdef CONFIG_RISCV_M_MODE
+ u32 valh;
+
+ if (msi_mode) {
+ val = lower_32_bits(priv->msicfg.base_ppn);
+ valh = FIELD_PREP(APLIC_xMSICFGADDRH_BAPPN, upper_32_bits(priv->msicfg.base_ppn));
+ valh |= FIELD_PREP(APLIC_xMSICFGADDRH_LHXW, priv->msicfg.lhxw);
+ valh |= FIELD_PREP(APLIC_xMSICFGADDRH_HHXW, priv->msicfg.hhxw);
+ valh |= FIELD_PREP(APLIC_xMSICFGADDRH_LHXS, priv->msicfg.lhxs);
+ valh |= FIELD_PREP(APLIC_xMSICFGADDRH_HHXS, priv->msicfg.hhxs);
+ writel(val, priv->regs + APLIC_xMSICFGADDR);
+ writel(valh, priv->regs + APLIC_xMSICFGADDRH);
+ }
+#endif
+
+ /* Setup APLIC domaincfg register */
+ val = readl(priv->regs + APLIC_DOMAINCFG);
+ val |= APLIC_DOMAINCFG_IE;
+ if (msi_mode)
+ val |= APLIC_DOMAINCFG_DM;
+ writel(val, priv->regs + APLIC_DOMAINCFG);
+ if (readl(priv->regs + APLIC_DOMAINCFG) != val)
+ dev_warn(priv->dev, "unable to write 0x%x in domaincfg\n", val);
+}
+
+static void aplic_init_hw_irqs(struct aplic_priv *priv)
+{
+ int i;
+
+ /* Disable all interrupts */
+ for (i = 0; i <= priv->nr_irqs; i += 32)
+ writel(-1U, priv->regs + APLIC_CLRIE_BASE + (i / 32) * sizeof(u32));
+
+ /* Set interrupt type and default priority for all interrupts */
+ for (i = 1; i <= priv->nr_irqs; i++) {
+ writel(0, priv->regs + APLIC_SOURCECFG_BASE + (i - 1) * sizeof(u32));
+ writel(APLIC_DEFAULT_PRIORITY,
+ priv->regs + APLIC_TARGET_BASE + (i - 1) * sizeof(u32));
+ }
+
+ /* Clear APLIC domaincfg */
+ writel(0, priv->regs + APLIC_DOMAINCFG);
+}
+
+int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *regs)
+{
+ struct of_phandle_args parent;
+ int rc;
+
+ /*
+ * Currently, only OF fwnode is supported so extend this
+ * function for ACPI support.
+ */
+ if (!is_of_node(dev->fwnode))
+ return -EINVAL;
+
+ /* Save device pointer and register base */
+ priv->dev = dev;
+ priv->regs = regs;
+
+ /* Find out number of interrupt sources */
+ rc = of_property_read_u32(to_of_node(dev->fwnode), "riscv,num-sources",
+ &priv->nr_irqs);
+ if (rc) {
+ dev_err(dev, "failed to get number of interrupt sources\n");
+ return rc;
+ }
+
+ /*
+ * Find out number of IDCs based on parent interrupts
+ *
+ * If "msi-parent" property is present then we ignore the
+ * APLIC IDCs which forces the APLIC driver to use MSI mode.
+ */
+ if (!of_property_present(to_of_node(dev->fwnode), "msi-parent")) {
+ while (!of_irq_parse_one(to_of_node(dev->fwnode), priv->nr_idcs, &parent))
+ priv->nr_idcs++;
+ }
+
+ /* Setup initial state APLIC interrupts */
+ aplic_init_hw_irqs(priv);
+
+ return 0;
+}
+
+static int aplic_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ bool msi_mode = false;
+ void __iomem *regs;
+ int rc;
+
+ /* Map the MMIO registers */
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (!regs) {
+ dev_err(dev, "failed map MMIO registers\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * If msi-parent property is present then setup APLIC MSI
+ * mode otherwise setup APLIC direct mode.
+ */
+ if (is_of_node(dev->fwnode))
+ msi_mode = of_property_present(to_of_node(dev->fwnode), "msi-parent");
+ if (msi_mode)
+ rc = aplic_msi_setup(dev, regs);
+ else
+ rc = aplic_direct_setup(dev, regs);
+ if (rc)
+ dev_err(dev, "failed to setup APLIC in %s mode\n", msi_mode ? "MSI" : "direct");
+
+ return rc;
+}
+
+static const struct of_device_id aplic_match[] = {
+ { .compatible = "riscv,aplic" },
+ {}
+};
+
+static struct platform_driver aplic_driver = {
+ .driver = {
+ .name = "riscv-aplic",
+ .of_match_table = aplic_match,
+ },
+ .probe = aplic_probe,
+};
+builtin_platform_driver(aplic_driver);
diff --git a/drivers/irqchip/irq-riscv-aplic-main.h b/drivers/irqchip/irq-riscv-aplic-main.h
new file mode 100644
index 000000000000..4393927d8c80
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-aplic-main.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+
+#ifndef _IRQ_RISCV_APLIC_MAIN_H
+#define _IRQ_RISCV_APLIC_MAIN_H
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/fwnode.h>
+
+#define APLIC_DEFAULT_PRIORITY 1
+
+struct aplic_msicfg {
+ phys_addr_t base_ppn;
+ u32 hhxs;
+ u32 hhxw;
+ u32 lhxs;
+ u32 lhxw;
+};
+
+struct aplic_priv {
+ struct device *dev;
+ u32 gsi_base;
+ u32 nr_irqs;
+ u32 nr_idcs;
+ void __iomem *regs;
+ struct aplic_msicfg msicfg;
+};
+
+void aplic_irq_unmask(struct irq_data *d);
+void aplic_irq_mask(struct irq_data *d);
+int aplic_irq_set_type(struct irq_data *d, unsigned int type);
+int aplic_irqdomain_translate(struct irq_fwspec *fwspec, u32 gsi_base,
+ unsigned long *hwirq, unsigned int *type);
+void aplic_init_hw_global(struct aplic_priv *priv, bool msi_mode);
+int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *regs);
+int aplic_direct_setup(struct device *dev, void __iomem *regs);
+#ifdef CONFIG_RISCV_APLIC_MSI
+int aplic_msi_setup(struct device *dev, void __iomem *regs);
+#else
+static inline int aplic_msi_setup(struct device *dev, void __iomem *regs)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif
diff --git a/drivers/irqchip/irq-riscv-aplic-msi.c b/drivers/irqchip/irq-riscv-aplic-msi.c
new file mode 100644
index 000000000000..028444af48bd
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-aplic-msi.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/riscv-aplic.h>
+#include <linux/irqchip/riscv-imsic.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/smp.h>
+
+#include "irq-riscv-aplic-main.h"
+
+static void aplic_msi_irq_mask(struct irq_data *d)
+{
+ aplic_irq_mask(d);
+ irq_chip_mask_parent(d);
+}
+
+static void aplic_msi_irq_unmask(struct irq_data *d)
+{
+ irq_chip_unmask_parent(d);
+ aplic_irq_unmask(d);
+}
+
+static void aplic_msi_irq_eoi(struct irq_data *d)
+{
+ struct aplic_priv *priv = irq_data_get_irq_chip_data(d);
+
+ /*
+ * EOI handling is required only for level-triggered interrupts
+ * when APLIC is in MSI mode.
+ */
+
+ switch (irqd_get_trigger_type(d)) {
+ case IRQ_TYPE_LEVEL_LOW:
+ case IRQ_TYPE_LEVEL_HIGH:
+ /*
+ * The section "4.9.2 Special consideration for level-sensitive interrupt
+ * sources" of the RISC-V AIA specification says:
+ *
+ * A second option is for the interrupt service routine to write the
+ * APLIC’s source identity number for the interrupt to the domain’s
+ * setipnum register just before exiting. This will cause the interrupt’s
+ * pending bit to be set to one again if the source is still asserting
+ * an interrupt, but not if the source is not asserting an interrupt.
+ */
+ writel(d->hwirq, priv->regs + APLIC_SETIPNUM_LE);
+ break;
+ }
+}
+
+static void aplic_msi_write_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ unsigned int group_index, hart_index, guest_index, val;
+ struct aplic_priv *priv = irq_data_get_irq_chip_data(d);
+ struct aplic_msicfg *mc = &priv->msicfg;
+ phys_addr_t tppn, tbppn, msg_addr;
+ void __iomem *target;
+
+ /* For zeroed MSI, simply write zero into the target register */
+ if (!msg->address_hi && !msg->address_lo && !msg->data) {
+ target = priv->regs + APLIC_TARGET_BASE;
+ target += (d->hwirq - 1) * sizeof(u32);
+ writel(0, target);
+ return;
+ }
+
+ /* Sanity check on message data */
+ WARN_ON(msg->data > APLIC_TARGET_EIID_MASK);
+
+ /* Compute target MSI address */
+ msg_addr = (((u64)msg->address_hi) << 32) | msg->address_lo;
+ tppn = msg_addr >> APLIC_xMSICFGADDR_PPN_SHIFT;
+
+ /* Compute target HART Base PPN */
+ tbppn = tppn;
+ tbppn &= ~APLIC_xMSICFGADDR_PPN_HART(mc->lhxs);
+ tbppn &= ~APLIC_xMSICFGADDR_PPN_LHX(mc->lhxw, mc->lhxs);
+ tbppn &= ~APLIC_xMSICFGADDR_PPN_HHX(mc->hhxw, mc->hhxs);
+ WARN_ON(tbppn != mc->base_ppn);
+
+ /* Compute target group and hart indexes */
+ group_index = (tppn >> APLIC_xMSICFGADDR_PPN_HHX_SHIFT(mc->hhxs)) &
+ APLIC_xMSICFGADDR_PPN_HHX_MASK(mc->hhxw);
+ hart_index = (tppn >> APLIC_xMSICFGADDR_PPN_LHX_SHIFT(mc->lhxs)) &
+ APLIC_xMSICFGADDR_PPN_LHX_MASK(mc->lhxw);
+ hart_index |= (group_index << mc->lhxw);
+ WARN_ON(hart_index > APLIC_TARGET_HART_IDX_MASK);
+
+ /* Compute target guest index */
+ guest_index = tppn & APLIC_xMSICFGADDR_PPN_HART(mc->lhxs);
+ WARN_ON(guest_index > APLIC_TARGET_GUEST_IDX_MASK);
+
+ /* Update IRQ TARGET register */
+ target = priv->regs + APLIC_TARGET_BASE;
+ target += (d->hwirq - 1) * sizeof(u32);
+ val = FIELD_PREP(APLIC_TARGET_HART_IDX, hart_index);
+ val |= FIELD_PREP(APLIC_TARGET_GUEST_IDX, guest_index);
+ val |= FIELD_PREP(APLIC_TARGET_EIID, msg->data);
+ writel(val, target);
+}
+
+static void aplic_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = (u32)desc->data.icookie.value;
+}
+
+static int aplic_msi_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *hwirq, unsigned int *type)
+{
+ struct msi_domain_info *info = d->host_data;
+ struct aplic_priv *priv = info->data;
+
+ return aplic_irqdomain_translate(fwspec, priv->gsi_base, hwirq, type);
+}
+
+static const struct msi_domain_template aplic_msi_template = {
+ .chip = {
+ .name = "APLIC-MSI",
+ .irq_mask = aplic_msi_irq_mask,
+ .irq_unmask = aplic_msi_irq_unmask,
+ .irq_set_type = aplic_irq_set_type,
+ .irq_eoi = aplic_msi_irq_eoi,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+ .irq_write_msi_msg = aplic_msi_write_msg,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+ },
+
+ .ops = {
+ .set_desc = aplic_msi_set_desc,
+ .msi_translate = aplic_msi_translate,
+ },
+
+ .info = {
+ .bus_token = DOMAIN_BUS_WIRED_TO_MSI,
+ .flags = MSI_FLAG_USE_DEV_FWNODE,
+ .handler = handle_fasteoi_irq,
+ .handler_name = "fasteoi",
+ },
+};
+
+int aplic_msi_setup(struct device *dev, void __iomem *regs)
+{
+ const struct imsic_global_config *imsic_global;
+ struct aplic_priv *priv;
+ struct aplic_msicfg *mc;
+ phys_addr_t pa;
+ int rc;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ rc = aplic_setup_priv(priv, dev, regs);
+ if (rc) {
+ dev_err(dev, "failed to create APLIC context\n");
+ return rc;
+ }
+ mc = &priv->msicfg;
+
+ /*
+ * The APLIC outgoing MSI config registers assume target MSI
+ * controller to be RISC-V AIA IMSIC controller.
+ */
+ imsic_global = imsic_get_global_config();
+ if (!imsic_global) {
+ dev_err(dev, "IMSIC global config not found\n");
+ return -ENODEV;
+ }
+
+ /* Find number of guest index bits (LHXS) */
+ mc->lhxs = imsic_global->guest_index_bits;
+ if (APLIC_xMSICFGADDRH_LHXS_MASK < mc->lhxs) {
+ dev_err(dev, "IMSIC guest index bits big for APLIC LHXS\n");
+ return -EINVAL;
+ }
+
+ /* Find number of HART index bits (LHXW) */
+ mc->lhxw = imsic_global->hart_index_bits;
+ if (APLIC_xMSICFGADDRH_LHXW_MASK < mc->lhxw) {
+ dev_err(dev, "IMSIC hart index bits big for APLIC LHXW\n");
+ return -EINVAL;
+ }
+
+ /* Find number of group index bits (HHXW) */
+ mc->hhxw = imsic_global->group_index_bits;
+ if (APLIC_xMSICFGADDRH_HHXW_MASK < mc->hhxw) {
+ dev_err(dev, "IMSIC group index bits big for APLIC HHXW\n");
+ return -EINVAL;
+ }
+
+ /* Find first bit position of group index (HHXS) */
+ mc->hhxs = imsic_global->group_index_shift;
+ if (mc->hhxs < (2 * APLIC_xMSICFGADDR_PPN_SHIFT)) {
+ dev_err(dev, "IMSIC group index shift should be >= %d\n",
+ (2 * APLIC_xMSICFGADDR_PPN_SHIFT));
+ return -EINVAL;
+ }
+ mc->hhxs -= (2 * APLIC_xMSICFGADDR_PPN_SHIFT);
+ if (APLIC_xMSICFGADDRH_HHXS_MASK < mc->hhxs) {
+ dev_err(dev, "IMSIC group index shift big for APLIC HHXS\n");
+ return -EINVAL;
+ }
+
+ /* Compute PPN base */
+ mc->base_ppn = imsic_global->base_addr >> APLIC_xMSICFGADDR_PPN_SHIFT;
+ mc->base_ppn &= ~APLIC_xMSICFGADDR_PPN_HART(mc->lhxs);
+ mc->base_ppn &= ~APLIC_xMSICFGADDR_PPN_LHX(mc->lhxw, mc->lhxs);
+ mc->base_ppn &= ~APLIC_xMSICFGADDR_PPN_HHX(mc->hhxw, mc->hhxs);
+
+ /* Setup global config and interrupt delivery */
+ aplic_init_hw_global(priv, true);
+
+ /* Set the APLIC device MSI domain if not available */
+ if (!dev_get_msi_domain(dev)) {
+ /*
+ * The device MSI domain for OF devices is only set at the
+ * time of populating/creating OF device. If the device MSI
+ * domain is discovered later after the OF device is created
+ * then we need to set it explicitly before using any platform
+ * MSI functions.
+ *
+ * In case of APLIC device, the parent MSI domain is always
+ * IMSIC and the IMSIC MSI domains are created later through
+ * the platform driver probing so we set it explicitly here.
+ */
+ if (is_of_node(dev->fwnode))
+ of_msi_configure(dev, to_of_node(dev->fwnode));
+ }
+
+ if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN, &aplic_msi_template,
+ priv->nr_irqs + 1, priv, priv)) {
+ dev_err(dev, "failed to create MSI irq domain\n");
+ return -ENOMEM;
+ }
+
+ /* Advertise the interrupt controller */
+ pa = priv->msicfg.base_ppn << APLIC_xMSICFGADDR_PPN_SHIFT;
+ dev_info(dev, "%d interrupts forwarded to MSI base %pa\n", priv->nr_irqs, &pa);
+
+ return 0;
+}
diff --git a/drivers/irqchip/irq-riscv-imsic-early.c b/drivers/irqchip/irq-riscv-imsic-early.c
new file mode 100644
index 000000000000..886418ec06cb
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-imsic-early.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+
+#define pr_fmt(fmt) "riscv-imsic: " fmt
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+
+#include "irq-riscv-imsic-state.h"
+
+static int imsic_parent_irq;
+
+#ifdef CONFIG_SMP
+static void imsic_ipi_send(unsigned int cpu)
+{
+ struct imsic_local_config *local = per_cpu_ptr(imsic->global.local, cpu);
+
+ writel_relaxed(IMSIC_IPI_ID, local->msi_va);
+}
+
+static void imsic_ipi_starting_cpu(void)
+{
+ /* Enable IPIs for current CPU. */
+ __imsic_id_set_enable(IMSIC_IPI_ID);
+}
+
+static void imsic_ipi_dying_cpu(void)
+{
+ /* Disable IPIs for current CPU. */
+ __imsic_id_clear_enable(IMSIC_IPI_ID);
+}
+
+static int __init imsic_ipi_domain_init(void)
+{
+ int virq;
+
+ /* Create IMSIC IPI multiplexing */
+ virq = ipi_mux_create(IMSIC_NR_IPI, imsic_ipi_send);
+ if (virq <= 0)
+ return virq < 0 ? virq : -ENOMEM;
+
+ /* Set vIRQ range */
+ riscv_ipi_set_virq_range(virq, IMSIC_NR_IPI, true);
+
+ /* Announce that IMSIC is providing IPIs */
+ pr_info("%pfwP: providing IPIs using interrupt %d\n", imsic->fwnode, IMSIC_IPI_ID);
+
+ return 0;
+}
+#else
+static void imsic_ipi_starting_cpu(void) { }
+static void imsic_ipi_dying_cpu(void) { }
+static int __init imsic_ipi_domain_init(void) { return 0; }
+#endif
+
+/*
+ * To handle an interrupt, we read the TOPEI CSR and write zero in one
+ * instruction. If TOPEI CSR is non-zero then we translate TOPEI.ID to
+ * Linux interrupt number and let Linux IRQ subsystem handle it.
+ */
+static void imsic_handle_irq(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ int err, cpu = smp_processor_id();
+ struct imsic_vector *vec;
+ unsigned long local_id;
+
+ chained_irq_enter(chip, desc);
+
+ while ((local_id = csr_swap(CSR_TOPEI, 0))) {
+ local_id >>= TOPEI_ID_SHIFT;
+
+ if (local_id == IMSIC_IPI_ID) {
+ if (IS_ENABLED(CONFIG_SMP))
+ ipi_mux_process();
+ continue;
+ }
+
+ if (unlikely(!imsic->base_domain))
+ continue;
+
+ vec = imsic_vector_from_local_id(cpu, local_id);
+ if (!vec) {
+ pr_warn_ratelimited("vector not found for local ID 0x%lx\n", local_id);
+ continue;
+ }
+
+ err = generic_handle_domain_irq(imsic->base_domain, vec->hwirq);
+ if (unlikely(err))
+ pr_warn_ratelimited("hwirq 0x%x mapping not found\n", vec->hwirq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int imsic_starting_cpu(unsigned int cpu)
+{
+ /* Mark per-CPU IMSIC state as online */
+ imsic_state_online();
+
+ /* Enable per-CPU parent interrupt */
+ enable_percpu_irq(imsic_parent_irq, irq_get_trigger_type(imsic_parent_irq));
+
+ /* Setup IPIs */
+ imsic_ipi_starting_cpu();
+
+ /*
+ * Interrupts identities might have been enabled/disabled while
+ * this CPU was not running so sync-up local enable/disable state.
+ */
+ imsic_local_sync_all();
+
+ /* Enable local interrupt delivery */
+ imsic_local_delivery(true);
+
+ return 0;
+}
+
+static int imsic_dying_cpu(unsigned int cpu)
+{
+ /* Cleanup IPIs */
+ imsic_ipi_dying_cpu();
+
+ /* Mark per-CPU IMSIC state as offline */
+ imsic_state_offline();
+
+ return 0;
+}
+
+static int __init imsic_early_probe(struct fwnode_handle *fwnode)
+{
+ struct irq_domain *domain;
+ int rc;
+
+ /* Find parent domain and register chained handler */
+ domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY);
+ if (!domain) {
+ pr_err("%pfwP: Failed to find INTC domain\n", fwnode);
+ return -ENOENT;
+ }
+ imsic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
+ if (!imsic_parent_irq) {
+ pr_err("%pfwP: Failed to create INTC mapping\n", fwnode);
+ return -ENOENT;
+ }
+
+ /* Initialize IPI domain */
+ rc = imsic_ipi_domain_init();
+ if (rc) {
+ pr_err("%pfwP: Failed to initialize IPI domain\n", fwnode);
+ return rc;
+ }
+
+ /* Setup chained handler to the parent domain interrupt */
+ irq_set_chained_handler(imsic_parent_irq, imsic_handle_irq);
+
+ /*
+ * Setup cpuhp state (must be done after setting imsic_parent_irq)
+ *
+ * Don't disable per-CPU IMSIC file when CPU goes offline
+ * because this affects IPI and the masking/unmasking of
+ * virtual IPIs is done via generic IPI-Mux
+ */
+ cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_IMSIC_STARTING, "irqchip/riscv/imsic:starting",
+ imsic_starting_cpu, imsic_dying_cpu);
+
+ return 0;
+}
+
+static int __init imsic_early_dt_init(struct device_node *node, struct device_node *parent)
+{
+ struct fwnode_handle *fwnode = &node->fwnode;
+ int rc;
+
+ /* Setup IMSIC state */
+ rc = imsic_setup_state(fwnode);
+ if (rc) {
+ pr_err("%pfwP: failed to setup state (error %d)\n", fwnode, rc);
+ return rc;
+ }
+
+ /* Do early setup of IPIs */
+ rc = imsic_early_probe(fwnode);
+ if (rc)
+ return rc;
+
+ /* Ensure that OF platform device gets probed */
+ of_node_clear_flag(node, OF_POPULATED);
+ return 0;
+}
+
+IRQCHIP_DECLARE(riscv_imsic, "riscv,imsics", imsic_early_dt_init);
diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c
new file mode 100644
index 000000000000..11723a763c10
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-imsic-platform.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+
+#define pr_fmt(fmt) "riscv-imsic: " fmt
+#include <linux/bitmap.h>
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+
+#include "irq-riscv-imsic-state.h"
+
+static bool imsic_cpu_page_phys(unsigned int cpu, unsigned int guest_index,
+ phys_addr_t *out_msi_pa)
+{
+ struct imsic_global_config *global;
+ struct imsic_local_config *local;
+
+ global = &imsic->global;
+ local = per_cpu_ptr(global->local, cpu);
+
+ if (BIT(global->guest_index_bits) <= guest_index)
+ return false;
+
+ if (out_msi_pa)
+ *out_msi_pa = local->msi_pa + (guest_index * IMSIC_MMIO_PAGE_SZ);
+
+ return true;
+}
+
+static void imsic_irq_mask(struct irq_data *d)
+{
+ imsic_vector_mask(irq_data_get_irq_chip_data(d));
+}
+
+static void imsic_irq_unmask(struct irq_data *d)
+{
+ imsic_vector_unmask(irq_data_get_irq_chip_data(d));
+}
+
+static int imsic_irq_retrigger(struct irq_data *d)
+{
+ struct imsic_vector *vec = irq_data_get_irq_chip_data(d);
+ struct imsic_local_config *local;
+
+ if (WARN_ON(!vec))
+ return -ENOENT;
+
+ local = per_cpu_ptr(imsic->global.local, vec->cpu);
+ writel_relaxed(vec->local_id, local->msi_va);
+ return 0;
+}
+
+static void imsic_irq_compose_vector_msg(struct imsic_vector *vec, struct msi_msg *msg)
+{
+ phys_addr_t msi_addr;
+
+ if (WARN_ON(!vec))
+ return;
+
+ if (WARN_ON(!imsic_cpu_page_phys(vec->cpu, 0, &msi_addr)))
+ return;
+
+ msg->address_hi = upper_32_bits(msi_addr);
+ msg->address_lo = lower_32_bits(msi_addr);
+ msg->data = vec->local_id;
+}
+
+static void imsic_irq_compose_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ imsic_irq_compose_vector_msg(irq_data_get_irq_chip_data(d), msg);
+}
+
+#ifdef CONFIG_SMP
+static void imsic_msi_update_msg(struct irq_data *d, struct imsic_vector *vec)
+{
+ struct msi_msg msg = { };
+
+ imsic_irq_compose_vector_msg(vec, &msg);
+ irq_data_get_irq_chip(d)->irq_write_msi_msg(d, &msg);
+}
+
+static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ bool force)
+{
+ struct imsic_vector *old_vec, *new_vec;
+ struct irq_data *pd = d->parent_data;
+
+ old_vec = irq_data_get_irq_chip_data(pd);
+ if (WARN_ON(!old_vec))
+ return -ENOENT;
+
+ /* If old vector cpu belongs to the target cpumask then do nothing */
+ if (cpumask_test_cpu(old_vec->cpu, mask_val))
+ return IRQ_SET_MASK_OK_DONE;
+
+ /* If move is already in-flight then return failure */
+ if (imsic_vector_get_move(old_vec))
+ return -EBUSY;
+
+ /* Get a new vector on the desired set of CPUs */
+ new_vec = imsic_vector_alloc(old_vec->hwirq, mask_val);
+ if (!new_vec)
+ return -ENOSPC;
+
+ /* Point device to the new vector */
+ imsic_msi_update_msg(d, new_vec);
+
+ /* Update irq descriptors with the new vector */
+ pd->chip_data = new_vec;
+
+ /* Update effective affinity of parent irq data */
+ irq_data_update_effective_affinity(pd, cpumask_of(new_vec->cpu));
+
+ /* Move state of the old vector to the new vector */
+ imsic_vector_move(old_vec, new_vec);
+
+ return IRQ_SET_MASK_OK_DONE;
+}
+#endif
+
+static struct irq_chip imsic_irq_base_chip = {
+ .name = "IMSIC",
+ .irq_mask = imsic_irq_mask,
+ .irq_unmask = imsic_irq_unmask,
+ .irq_retrigger = imsic_irq_retrigger,
+ .irq_compose_msi_msg = imsic_irq_compose_msg,
+ .flags = IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static int imsic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct imsic_vector *vec;
+
+ /* Multi-MSI is not supported yet. */
+ if (nr_irqs > 1)
+ return -EOPNOTSUPP;
+
+ vec = imsic_vector_alloc(virq, cpu_online_mask);
+ if (!vec)
+ return -ENOSPC;
+
+ irq_domain_set_info(domain, virq, virq, &imsic_irq_base_chip, vec,
+ handle_simple_irq, NULL, NULL);
+ irq_set_noprobe(virq);
+ irq_set_affinity(virq, cpu_online_mask);
+ irq_data_update_effective_affinity(irq_get_irq_data(virq), cpumask_of(vec->cpu));
+
+ return 0;
+}
+
+static void imsic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+
+ imsic_vector_free(irq_data_get_irq_chip_data(d));
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+}
+
+static int imsic_irq_domain_select(struct irq_domain *domain, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token)
+{
+ const struct msi_parent_ops *ops = domain->msi_parent_ops;
+ u32 busmask = BIT(bus_token);
+
+ if (fwspec->fwnode != domain->fwnode || fwspec->param_count != 0)
+ return 0;
+
+ /* Handle pure domain searches */
+ if (bus_token == ops->bus_select_token)
+ return 1;
+
+ return !!(ops->bus_select_mask & busmask);
+}
+
+#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+static void imsic_irq_debug_show(struct seq_file *m, struct irq_domain *d,
+ struct irq_data *irqd, int ind)
+{
+ if (!irqd) {
+ imsic_vector_debug_show_summary(m, ind);
+ return;
+ }
+
+ imsic_vector_debug_show(m, irq_data_get_irq_chip_data(irqd), ind);
+}
+#endif
+
+static const struct irq_domain_ops imsic_base_domain_ops = {
+ .alloc = imsic_irq_domain_alloc,
+ .free = imsic_irq_domain_free,
+ .select = imsic_irq_domain_select,
+#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+ .debug_show = imsic_irq_debug_show,
+#endif
+};
+
+#ifdef CONFIG_RISCV_IMSIC_PCI
+
+static void imsic_pci_mask_irq(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void imsic_pci_unmask_irq(struct irq_data *d)
+{
+ irq_chip_unmask_parent(d);
+ pci_msi_unmask_irq(d);
+}
+
+#define MATCH_PCI_MSI BIT(DOMAIN_BUS_PCI_MSI)
+
+#else
+
+#define MATCH_PCI_MSI 0
+
+#endif
+
+static bool imsic_init_dev_msi_info(struct device *dev,
+ struct irq_domain *domain,
+ struct irq_domain *real_parent,
+ struct msi_domain_info *info)
+{
+ const struct msi_parent_ops *pops = real_parent->msi_parent_ops;
+
+ /* MSI parent domain specific settings */
+ switch (real_parent->bus_token) {
+ case DOMAIN_BUS_NEXUS:
+ if (WARN_ON_ONCE(domain != real_parent))
+ return false;
+#ifdef CONFIG_SMP
+ info->chip->irq_set_affinity = imsic_irq_set_affinity;
+#endif
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return false;
+ }
+
+ /* Is the target supported? */
+ switch (info->bus_token) {
+#ifdef CONFIG_RISCV_IMSIC_PCI
+ case DOMAIN_BUS_PCI_DEVICE_MSI:
+ case DOMAIN_BUS_PCI_DEVICE_MSIX:
+ info->chip->irq_mask = imsic_pci_mask_irq;
+ info->chip->irq_unmask = imsic_pci_unmask_irq;
+ break;
+#endif
+ case DOMAIN_BUS_DEVICE_MSI:
+ /*
+ * Per-device MSI should never have any MSI feature bits
+ * set. It's sole purpose is to create a dumb interrupt
+ * chip which has a device specific irq_write_msi_msg()
+ * callback.
+ */
+ if (WARN_ON_ONCE(info->flags))
+ return false;
+
+ /* Core managed MSI descriptors */
+ info->flags |= MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS |
+ MSI_FLAG_FREE_MSI_DESCS;
+ break;
+ case DOMAIN_BUS_WIRED_TO_MSI:
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return false;
+ }
+
+ /* Use hierarchial chip operations re-trigger */
+ info->chip->irq_retrigger = irq_chip_retrigger_hierarchy;
+
+ /*
+ * Mask out the domain specific MSI feature flags which are not
+ * supported by the real parent.
+ */
+ info->flags &= pops->supported_flags;
+
+ /* Enforce the required flags */
+ info->flags |= pops->required_flags;
+
+ return true;
+}
+
+#define MATCH_PLATFORM_MSI BIT(DOMAIN_BUS_PLATFORM_MSI)
+
+static const struct msi_parent_ops imsic_msi_parent_ops = {
+ .supported_flags = MSI_GENERIC_FLAGS_MASK |
+ MSI_FLAG_PCI_MSIX,
+ .required_flags = MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS,
+ .bus_select_token = DOMAIN_BUS_NEXUS,
+ .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
+ .init_dev_msi_info = imsic_init_dev_msi_info,
+};
+
+int imsic_irqdomain_init(void)
+{
+ struct imsic_global_config *global;
+
+ if (!imsic || !imsic->fwnode) {
+ pr_err("early driver not probed\n");
+ return -ENODEV;
+ }
+
+ if (imsic->base_domain) {
+ pr_err("%pfwP: irq domain already created\n", imsic->fwnode);
+ return -ENODEV;
+ }
+
+ /* Create Base IRQ domain */
+ imsic->base_domain = irq_domain_create_tree(imsic->fwnode,
+ &imsic_base_domain_ops, imsic);
+ if (!imsic->base_domain) {
+ pr_err("%pfwP: failed to create IMSIC base domain\n", imsic->fwnode);
+ return -ENOMEM;
+ }
+ imsic->base_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ imsic->base_domain->msi_parent_ops = &imsic_msi_parent_ops;
+
+ irq_domain_update_bus_token(imsic->base_domain, DOMAIN_BUS_NEXUS);
+
+ global = &imsic->global;
+ pr_info("%pfwP: hart-index-bits: %d, guest-index-bits: %d\n",
+ imsic->fwnode, global->hart_index_bits, global->guest_index_bits);
+ pr_info("%pfwP: group-index-bits: %d, group-index-shift: %d\n",
+ imsic->fwnode, global->group_index_bits, global->group_index_shift);
+ pr_info("%pfwP: per-CPU IDs %d at base PPN %pa\n",
+ imsic->fwnode, global->nr_ids, &global->base_addr);
+ pr_info("%pfwP: total %d interrupts available\n",
+ imsic->fwnode, num_possible_cpus() * (global->nr_ids - 1));
+
+ return 0;
+}
+
+static int imsic_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ if (imsic && imsic->fwnode != dev->fwnode) {
+ dev_err(dev, "fwnode mismatch\n");
+ return -ENODEV;
+ }
+
+ return imsic_irqdomain_init();
+}
+
+static const struct of_device_id imsic_platform_match[] = {
+ { .compatible = "riscv,imsics" },
+ {}
+};
+
+static struct platform_driver imsic_platform_driver = {
+ .driver = {
+ .name = "riscv-imsic",
+ .of_match_table = imsic_platform_match,
+ },
+ .probe = imsic_platform_probe,
+};
+builtin_platform_driver(imsic_platform_driver);
diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c
new file mode 100644
index 000000000000..5479f872e62b
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-imsic-state.c
@@ -0,0 +1,865 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+
+#define pr_fmt(fmt) "riscv-imsic: " fmt
+#include <linux/cpu.h>
+#include <linux/bitmap.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <asm/hwcap.h>
+
+#include "irq-riscv-imsic-state.h"
+
+#define IMSIC_DISABLE_EIDELIVERY 0
+#define IMSIC_ENABLE_EIDELIVERY 1
+#define IMSIC_DISABLE_EITHRESHOLD 1
+#define IMSIC_ENABLE_EITHRESHOLD 0
+
+static inline void imsic_csr_write(unsigned long reg, unsigned long val)
+{
+ csr_write(CSR_ISELECT, reg);
+ csr_write(CSR_IREG, val);
+}
+
+static inline unsigned long imsic_csr_read(unsigned long reg)
+{
+ csr_write(CSR_ISELECT, reg);
+ return csr_read(CSR_IREG);
+}
+
+static inline unsigned long imsic_csr_read_clear(unsigned long reg, unsigned long val)
+{
+ csr_write(CSR_ISELECT, reg);
+ return csr_read_clear(CSR_IREG, val);
+}
+
+static inline void imsic_csr_set(unsigned long reg, unsigned long val)
+{
+ csr_write(CSR_ISELECT, reg);
+ csr_set(CSR_IREG, val);
+}
+
+static inline void imsic_csr_clear(unsigned long reg, unsigned long val)
+{
+ csr_write(CSR_ISELECT, reg);
+ csr_clear(CSR_IREG, val);
+}
+
+struct imsic_priv *imsic;
+
+const struct imsic_global_config *imsic_get_global_config(void)
+{
+ return imsic ? &imsic->global : NULL;
+}
+EXPORT_SYMBOL_GPL(imsic_get_global_config);
+
+static bool __imsic_eix_read_clear(unsigned long id, bool pend)
+{
+ unsigned long isel, imask;
+
+ isel = id / BITS_PER_LONG;
+ isel *= BITS_PER_LONG / IMSIC_EIPx_BITS;
+ isel += pend ? IMSIC_EIP0 : IMSIC_EIE0;
+ imask = BIT(id & (__riscv_xlen - 1));
+
+ return !!(imsic_csr_read_clear(isel, imask) & imask);
+}
+
+static inline bool __imsic_id_read_clear_enabled(unsigned long id)
+{
+ return __imsic_eix_read_clear(id, false);
+}
+
+static inline bool __imsic_id_read_clear_pending(unsigned long id)
+{
+ return __imsic_eix_read_clear(id, true);
+}
+
+void __imsic_eix_update(unsigned long base_id, unsigned long num_id, bool pend, bool val)
+{
+ unsigned long id = base_id, last_id = base_id + num_id;
+ unsigned long i, isel, ireg;
+
+ while (id < last_id) {
+ isel = id / BITS_PER_LONG;
+ isel *= BITS_PER_LONG / IMSIC_EIPx_BITS;
+ isel += pend ? IMSIC_EIP0 : IMSIC_EIE0;
+
+ /*
+ * Prepare the ID mask to be programmed in the
+ * IMSIC EIEx and EIPx registers. These registers
+ * are XLEN-wide and we must not touch IDs which
+ * are < base_id and >= (base_id + num_id).
+ */
+ ireg = 0;
+ for (i = id & (__riscv_xlen - 1); id < last_id && i < __riscv_xlen; i++) {
+ ireg |= BIT(i);
+ id++;
+ }
+
+ /*
+ * The IMSIC EIEx and EIPx registers are indirectly
+ * accessed via using ISELECT and IREG CSRs so we
+ * need to access these CSRs without getting preempted.
+ *
+ * All existing users of this function call this
+ * function with local IRQs disabled so we don't
+ * need to do anything special here.
+ */
+ if (val)
+ imsic_csr_set(isel, ireg);
+ else
+ imsic_csr_clear(isel, ireg);
+ }
+}
+
+static void __imsic_local_sync(struct imsic_local_priv *lpriv)
+{
+ struct imsic_local_config *mlocal;
+ struct imsic_vector *vec, *mvec;
+ int i;
+
+ lockdep_assert_held(&lpriv->lock);
+
+ for_each_set_bit(i, lpriv->dirty_bitmap, imsic->global.nr_ids + 1) {
+ if (!i || i == IMSIC_IPI_ID)
+ goto skip;
+ vec = &lpriv->vectors[i];
+
+ if (READ_ONCE(vec->enable))
+ __imsic_id_set_enable(i);
+ else
+ __imsic_id_clear_enable(i);
+
+ /*
+ * If the ID was being moved to a new ID on some other CPU
+ * then we can get a MSI during the movement so check the
+ * ID pending bit and re-trigger the new ID on other CPU
+ * using MMIO write.
+ */
+ mvec = READ_ONCE(vec->move);
+ WRITE_ONCE(vec->move, NULL);
+ if (mvec && mvec != vec) {
+ if (__imsic_id_read_clear_pending(i)) {
+ mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu);
+ writel_relaxed(mvec->local_id, mlocal->msi_va);
+ }
+
+ imsic_vector_free(&lpriv->vectors[i]);
+ }
+
+skip:
+ bitmap_clear(lpriv->dirty_bitmap, i, 1);
+ }
+}
+
+void imsic_local_sync_all(void)
+{
+ struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&lpriv->lock, flags);
+ bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1);
+ __imsic_local_sync(lpriv);
+ raw_spin_unlock_irqrestore(&lpriv->lock, flags);
+}
+
+void imsic_local_delivery(bool enable)
+{
+ if (enable) {
+ imsic_csr_write(IMSIC_EITHRESHOLD, IMSIC_ENABLE_EITHRESHOLD);
+ imsic_csr_write(IMSIC_EIDELIVERY, IMSIC_ENABLE_EIDELIVERY);
+ return;
+ }
+
+ imsic_csr_write(IMSIC_EIDELIVERY, IMSIC_DISABLE_EIDELIVERY);
+ imsic_csr_write(IMSIC_EITHRESHOLD, IMSIC_DISABLE_EITHRESHOLD);
+}
+
+#ifdef CONFIG_SMP
+static void imsic_local_timer_callback(struct timer_list *timer)
+{
+ struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&lpriv->lock, flags);
+ __imsic_local_sync(lpriv);
+ raw_spin_unlock_irqrestore(&lpriv->lock, flags);
+}
+
+static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu)
+{
+ lockdep_assert_held(&lpriv->lock);
+
+ /*
+ * The spinlock acquire/release semantics ensure that changes
+ * to vector enable, vector move and dirty bitmap are visible
+ * to the target CPU.
+ */
+
+ /*
+ * We schedule a timer on the target CPU if the target CPU is not
+ * same as the current CPU. An offline CPU will unconditionally
+ * synchronize IDs through imsic_starting_cpu() when the
+ * CPU is brought up.
+ */
+ if (cpu_online(cpu)) {
+ if (cpu == smp_processor_id()) {
+ __imsic_local_sync(lpriv);
+ return;
+ }
+
+ if (!timer_pending(&lpriv->timer)) {
+ lpriv->timer.expires = jiffies + 1;
+ add_timer_on(&lpriv->timer, cpu);
+ }
+ }
+}
+#else
+static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu)
+{
+ lockdep_assert_held(&lpriv->lock);
+ __imsic_local_sync(lpriv);
+}
+#endif
+
+void imsic_vector_mask(struct imsic_vector *vec)
+{
+ struct imsic_local_priv *lpriv;
+
+ lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu);
+ if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec))
+ return;
+
+ /*
+ * This function is called through Linux irq subsystem with
+ * irqs disabled so no need to save/restore irq flags.
+ */
+
+ raw_spin_lock(&lpriv->lock);
+
+ WRITE_ONCE(vec->enable, false);
+ bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1);
+ __imsic_remote_sync(lpriv, vec->cpu);
+
+ raw_spin_unlock(&lpriv->lock);
+}
+
+void imsic_vector_unmask(struct imsic_vector *vec)
+{
+ struct imsic_local_priv *lpriv;
+
+ lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu);
+ if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec))
+ return;
+
+ /*
+ * This function is called through Linux irq subsystem with
+ * irqs disabled so no need to save/restore irq flags.
+ */
+
+ raw_spin_lock(&lpriv->lock);
+
+ WRITE_ONCE(vec->enable, true);
+ bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1);
+ __imsic_remote_sync(lpriv, vec->cpu);
+
+ raw_spin_unlock(&lpriv->lock);
+}
+
+static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsic_vector *vec,
+ bool new_enable, struct imsic_vector *new_move)
+{
+ unsigned long flags;
+ bool enabled;
+
+ raw_spin_lock_irqsave(&lpriv->lock, flags);
+
+ /* Update enable and move details */
+ enabled = READ_ONCE(vec->enable);
+ WRITE_ONCE(vec->enable, new_enable);
+ WRITE_ONCE(vec->move, new_move);
+
+ /* Mark the vector as dirty and synchronize */
+ bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1);
+ __imsic_remote_sync(lpriv, vec->cpu);
+
+ raw_spin_unlock_irqrestore(&lpriv->lock, flags);
+
+ return enabled;
+}
+
+void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec)
+{
+ struct imsic_local_priv *old_lpriv, *new_lpriv;
+ bool enabled;
+
+ if (WARN_ON_ONCE(old_vec->cpu == new_vec->cpu))
+ return;
+
+ old_lpriv = per_cpu_ptr(imsic->lpriv, old_vec->cpu);
+ if (WARN_ON_ONCE(&old_lpriv->vectors[old_vec->local_id] != old_vec))
+ return;
+
+ new_lpriv = per_cpu_ptr(imsic->lpriv, new_vec->cpu);
+ if (WARN_ON_ONCE(&new_lpriv->vectors[new_vec->local_id] != new_vec))
+ return;
+
+ /*
+ * Move and re-trigger the new vector based on the pending
+ * state of the old vector because we might get a device
+ * interrupt on the old vector while device was being moved
+ * to the new vector.
+ */
+ enabled = imsic_vector_move_update(old_lpriv, old_vec, false, new_vec);
+ imsic_vector_move_update(new_lpriv, new_vec, enabled, new_vec);
+}
+
+#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+void imsic_vector_debug_show(struct seq_file *m, struct imsic_vector *vec, int ind)
+{
+ struct imsic_local_priv *lpriv;
+ struct imsic_vector *mvec;
+ bool is_enabled;
+
+ lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu);
+ if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec))
+ return;
+
+ is_enabled = imsic_vector_isenabled(vec);
+ mvec = imsic_vector_get_move(vec);
+
+ seq_printf(m, "%*starget_cpu : %5u\n", ind, "", vec->cpu);
+ seq_printf(m, "%*starget_local_id : %5u\n", ind, "", vec->local_id);
+ seq_printf(m, "%*sis_reserved : %5u\n", ind, "",
+ (vec->local_id <= IMSIC_IPI_ID) ? 1 : 0);
+ seq_printf(m, "%*sis_enabled : %5u\n", ind, "", is_enabled ? 1 : 0);
+ seq_printf(m, "%*sis_move_pending : %5u\n", ind, "", mvec ? 1 : 0);
+ if (mvec) {
+ seq_printf(m, "%*smove_cpu : %5u\n", ind, "", mvec->cpu);
+ seq_printf(m, "%*smove_local_id : %5u\n", ind, "", mvec->local_id);
+ }
+}
+
+void imsic_vector_debug_show_summary(struct seq_file *m, int ind)
+{
+ irq_matrix_debug_show(m, imsic->matrix, ind);
+}
+#endif
+
+struct imsic_vector *imsic_vector_from_local_id(unsigned int cpu, unsigned int local_id)
+{
+ struct imsic_local_priv *lpriv = per_cpu_ptr(imsic->lpriv, cpu);
+
+ if (!lpriv || imsic->global.nr_ids < local_id)
+ return NULL;
+
+ return &lpriv->vectors[local_id];
+}
+
+struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask *mask)
+{
+ struct imsic_vector *vec = NULL;
+ struct imsic_local_priv *lpriv;
+ unsigned long flags;
+ unsigned int cpu;
+ int local_id;
+
+ raw_spin_lock_irqsave(&imsic->matrix_lock, flags);
+ local_id = irq_matrix_alloc(imsic->matrix, mask, false, &cpu);
+ raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags);
+ if (local_id < 0)
+ return NULL;
+
+ lpriv = per_cpu_ptr(imsic->lpriv, cpu);
+ vec = &lpriv->vectors[local_id];
+ vec->hwirq = hwirq;
+ vec->enable = false;
+ vec->move = NULL;
+
+ return vec;
+}
+
+void imsic_vector_free(struct imsic_vector *vec)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&imsic->matrix_lock, flags);
+ vec->hwirq = UINT_MAX;
+ irq_matrix_free(imsic->matrix, vec->cpu, vec->local_id, false);
+ raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags);
+}
+
+static void __init imsic_local_cleanup(void)
+{
+ struct imsic_local_priv *lpriv;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ lpriv = per_cpu_ptr(imsic->lpriv, cpu);
+
+ bitmap_free(lpriv->dirty_bitmap);
+ kfree(lpriv->vectors);
+ }
+
+ free_percpu(imsic->lpriv);
+}
+
+static int __init imsic_local_init(void)
+{
+ struct imsic_global_config *global = &imsic->global;
+ struct imsic_local_priv *lpriv;
+ struct imsic_vector *vec;
+ int cpu, i;
+
+ /* Allocate per-CPU private state */
+ imsic->lpriv = alloc_percpu(typeof(*imsic->lpriv));
+ if (!imsic->lpriv)
+ return -ENOMEM;
+
+ /* Setup per-CPU private state */
+ for_each_possible_cpu(cpu) {
+ lpriv = per_cpu_ptr(imsic->lpriv, cpu);
+
+ raw_spin_lock_init(&lpriv->lock);
+
+ /* Allocate dirty bitmap */
+ lpriv->dirty_bitmap = bitmap_zalloc(global->nr_ids + 1, GFP_KERNEL);
+ if (!lpriv->dirty_bitmap)
+ goto fail_local_cleanup;
+
+#ifdef CONFIG_SMP
+ /* Setup lazy timer for synchronization */
+ timer_setup(&lpriv->timer, imsic_local_timer_callback, TIMER_PINNED);
+#endif
+
+ /* Allocate vector array */
+ lpriv->vectors = kcalloc(global->nr_ids + 1, sizeof(*lpriv->vectors),
+ GFP_KERNEL);
+ if (!lpriv->vectors)
+ goto fail_local_cleanup;
+
+ /* Setup vector array */
+ for (i = 0; i <= global->nr_ids; i++) {
+ vec = &lpriv->vectors[i];
+ vec->cpu = cpu;
+ vec->local_id = i;
+ vec->hwirq = UINT_MAX;
+ }
+ }
+
+ return 0;
+
+fail_local_cleanup:
+ imsic_local_cleanup();
+ return -ENOMEM;
+}
+
+void imsic_state_online(void)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&imsic->matrix_lock, flags);
+ irq_matrix_online(imsic->matrix);
+ raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags);
+}
+
+void imsic_state_offline(void)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&imsic->matrix_lock, flags);
+ irq_matrix_offline(imsic->matrix);
+ raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags);
+
+#ifdef CONFIG_SMP
+ struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
+
+ raw_spin_lock_irqsave(&lpriv->lock, flags);
+ WARN_ON_ONCE(try_to_del_timer_sync(&lpriv->timer) < 0);
+ raw_spin_unlock_irqrestore(&lpriv->lock, flags);
+#endif
+}
+
+static int __init imsic_matrix_init(void)
+{
+ struct imsic_global_config *global = &imsic->global;
+
+ raw_spin_lock_init(&imsic->matrix_lock);
+ imsic->matrix = irq_alloc_matrix(global->nr_ids + 1,
+ 0, global->nr_ids + 1);
+ if (!imsic->matrix)
+ return -ENOMEM;
+
+ /* Reserve ID#0 because it is special and never implemented */
+ irq_matrix_assign_system(imsic->matrix, 0, false);
+
+ /* Reserve IPI ID because it is special and used internally */
+ irq_matrix_assign_system(imsic->matrix, IMSIC_IPI_ID, false);
+
+ return 0;
+}
+
+static int __init imsic_get_parent_hartid(struct fwnode_handle *fwnode,
+ u32 index, unsigned long *hartid)
+{
+ struct of_phandle_args parent;
+ int rc;
+
+ /*
+ * Currently, only OF fwnode is supported so extend this
+ * function for ACPI support.
+ */
+ if (!is_of_node(fwnode))
+ return -EINVAL;
+
+ rc = of_irq_parse_one(to_of_node(fwnode), index, &parent);
+ if (rc)
+ return rc;
+
+ /*
+ * Skip interrupts other than external interrupts for
+ * current privilege level.
+ */
+ if (parent.args[0] != RV_IRQ_EXT)
+ return -EINVAL;
+
+ return riscv_of_parent_hartid(parent.np, hartid);
+}
+
+static int __init imsic_get_mmio_resource(struct fwnode_handle *fwnode,
+ u32 index, struct resource *res)
+{
+ /*
+ * Currently, only OF fwnode is supported so extend this
+ * function for ACPI support.
+ */
+ if (!is_of_node(fwnode))
+ return -EINVAL;
+
+ return of_address_to_resource(to_of_node(fwnode), index, res);
+}
+
+static int __init imsic_parse_fwnode(struct fwnode_handle *fwnode,
+ struct imsic_global_config *global,
+ u32 *nr_parent_irqs,
+ u32 *nr_mmios)
+{
+ unsigned long hartid;
+ struct resource res;
+ int rc;
+ u32 i;
+
+ /*
+ * Currently, only OF fwnode is supported so extend this
+ * function for ACPI support.
+ */
+ if (!is_of_node(fwnode))
+ return -EINVAL;
+
+ *nr_parent_irqs = 0;
+ *nr_mmios = 0;
+
+ /* Find number of parent interrupts */
+ while (!imsic_get_parent_hartid(fwnode, *nr_parent_irqs, &hartid))
+ (*nr_parent_irqs)++;
+ if (!*nr_parent_irqs) {
+ pr_err("%pfwP: no parent irqs available\n", fwnode);
+ return -EINVAL;
+ }
+
+ /* Find number of guest index bits in MSI address */
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,guest-index-bits",
+ &global->guest_index_bits);
+ if (rc)
+ global->guest_index_bits = 0;
+
+ /* Find number of HART index bits */
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,hart-index-bits",
+ &global->hart_index_bits);
+ if (rc) {
+ /* Assume default value */
+ global->hart_index_bits = __fls(*nr_parent_irqs);
+ if (BIT(global->hart_index_bits) < *nr_parent_irqs)
+ global->hart_index_bits++;
+ }
+
+ /* Find number of group index bits */
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-bits",
+ &global->group_index_bits);
+ if (rc)
+ global->group_index_bits = 0;
+
+ /*
+ * Find first bit position of group index.
+ * If not specified assumed the default APLIC-IMSIC configuration.
+ */
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-shift",
+ &global->group_index_shift);
+ if (rc)
+ global->group_index_shift = IMSIC_MMIO_PAGE_SHIFT * 2;
+
+ /* Find number of interrupt identities */
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-ids",
+ &global->nr_ids);
+ if (rc) {
+ pr_err("%pfwP: number of interrupt identities not found\n", fwnode);
+ return rc;
+ }
+
+ /* Find number of guest interrupt identities */
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-guest-ids",
+ &global->nr_guest_ids);
+ if (rc)
+ global->nr_guest_ids = global->nr_ids;
+
+ /* Sanity check guest index bits */
+ i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT;
+ if (i < global->guest_index_bits) {
+ pr_err("%pfwP: guest index bits too big\n", fwnode);
+ return -EINVAL;
+ }
+
+ /* Sanity check HART index bits */
+ i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT - global->guest_index_bits;
+ if (i < global->hart_index_bits) {
+ pr_err("%pfwP: HART index bits too big\n", fwnode);
+ return -EINVAL;
+ }
+
+ /* Sanity check group index bits */
+ i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT -
+ global->guest_index_bits - global->hart_index_bits;
+ if (i < global->group_index_bits) {
+ pr_err("%pfwP: group index bits too big\n", fwnode);
+ return -EINVAL;
+ }
+
+ /* Sanity check group index shift */
+ i = global->group_index_bits + global->group_index_shift - 1;
+ if (i >= BITS_PER_LONG) {
+ pr_err("%pfwP: group index shift too big\n", fwnode);
+ return -EINVAL;
+ }
+
+ /* Sanity check number of interrupt identities */
+ if (global->nr_ids < IMSIC_MIN_ID ||
+ global->nr_ids >= IMSIC_MAX_ID ||
+ (global->nr_ids & IMSIC_MIN_ID) != IMSIC_MIN_ID) {
+ pr_err("%pfwP: invalid number of interrupt identities\n", fwnode);
+ return -EINVAL;
+ }
+
+ /* Sanity check number of guest interrupt identities */
+ if (global->nr_guest_ids < IMSIC_MIN_ID ||
+ global->nr_guest_ids >= IMSIC_MAX_ID ||
+ (global->nr_guest_ids & IMSIC_MIN_ID) != IMSIC_MIN_ID) {
+ pr_err("%pfwP: invalid number of guest interrupt identities\n", fwnode);
+ return -EINVAL;
+ }
+
+ /* Compute base address */
+ rc = imsic_get_mmio_resource(fwnode, 0, &res);
+ if (rc) {
+ pr_err("%pfwP: first MMIO resource not found\n", fwnode);
+ return -EINVAL;
+ }
+ global->base_addr = res.start;
+ global->base_addr &= ~(BIT(global->guest_index_bits +
+ global->hart_index_bits +
+ IMSIC_MMIO_PAGE_SHIFT) - 1);
+ global->base_addr &= ~((BIT(global->group_index_bits) - 1) <<
+ global->group_index_shift);
+
+ /* Find number of MMIO register sets */
+ while (!imsic_get_mmio_resource(fwnode, *nr_mmios, &res))
+ (*nr_mmios)++;
+
+ return 0;
+}
+
+int __init imsic_setup_state(struct fwnode_handle *fwnode)
+{
+ u32 i, j, index, nr_parent_irqs, nr_mmios, nr_handlers = 0;
+ struct imsic_global_config *global;
+ struct imsic_local_config *local;
+ void __iomem **mmios_va = NULL;
+ struct resource *mmios = NULL;
+ unsigned long reloff, hartid;
+ phys_addr_t base_addr;
+ int rc, cpu;
+
+ /*
+ * Only one IMSIC instance allowed in a platform for clean
+ * implementation of SMP IRQ affinity and per-CPU IPIs.
+ *
+ * This means on a multi-socket (or multi-die) platform we
+ * will have multiple MMIO regions for one IMSIC instance.
+ */
+ if (imsic) {
+ pr_err("%pfwP: already initialized hence ignoring\n", fwnode);
+ return -EALREADY;
+ }
+
+ if (!riscv_isa_extension_available(NULL, SxAIA)) {
+ pr_err("%pfwP: AIA support not available\n", fwnode);
+ return -ENODEV;
+ }
+
+ imsic = kzalloc(sizeof(*imsic), GFP_KERNEL);
+ if (!imsic)
+ return -ENOMEM;
+ imsic->fwnode = fwnode;
+ global = &imsic->global;
+
+ global->local = alloc_percpu(typeof(*global->local));
+ if (!global->local) {
+ rc = -ENOMEM;
+ goto out_free_priv;
+ }
+
+ /* Parse IMSIC fwnode */
+ rc = imsic_parse_fwnode(fwnode, global, &nr_parent_irqs, &nr_mmios);
+ if (rc)
+ goto out_free_local;
+
+ /* Allocate MMIO resource array */
+ mmios = kcalloc(nr_mmios, sizeof(*mmios), GFP_KERNEL);
+ if (!mmios) {
+ rc = -ENOMEM;
+ goto out_free_local;
+ }
+
+ /* Allocate MMIO virtual address array */
+ mmios_va = kcalloc(nr_mmios, sizeof(*mmios_va), GFP_KERNEL);
+ if (!mmios_va) {
+ rc = -ENOMEM;
+ goto out_iounmap;
+ }
+
+ /* Parse and map MMIO register sets */
+ for (i = 0; i < nr_mmios; i++) {
+ rc = imsic_get_mmio_resource(fwnode, i, &mmios[i]);
+ if (rc) {
+ pr_err("%pfwP: unable to parse MMIO regset %d\n", fwnode, i);
+ goto out_iounmap;
+ }
+
+ base_addr = mmios[i].start;
+ base_addr &= ~(BIT(global->guest_index_bits +
+ global->hart_index_bits +
+ IMSIC_MMIO_PAGE_SHIFT) - 1);
+ base_addr &= ~((BIT(global->group_index_bits) - 1) <<
+ global->group_index_shift);
+ if (base_addr != global->base_addr) {
+ rc = -EINVAL;
+ pr_err("%pfwP: address mismatch for regset %d\n", fwnode, i);
+ goto out_iounmap;
+ }
+
+ mmios_va[i] = ioremap(mmios[i].start, resource_size(&mmios[i]));
+ if (!mmios_va[i]) {
+ rc = -EIO;
+ pr_err("%pfwP: unable to map MMIO regset %d\n", fwnode, i);
+ goto out_iounmap;
+ }
+ }
+
+ /* Initialize local (or per-CPU )state */
+ rc = imsic_local_init();
+ if (rc) {
+ pr_err("%pfwP: failed to initialize local state\n",
+ fwnode);
+ goto out_iounmap;
+ }
+
+ /* Configure handlers for target CPUs */
+ for (i = 0; i < nr_parent_irqs; i++) {
+ rc = imsic_get_parent_hartid(fwnode, i, &hartid);
+ if (rc) {
+ pr_warn("%pfwP: hart ID for parent irq%d not found\n", fwnode, i);
+ continue;
+ }
+
+ cpu = riscv_hartid_to_cpuid(hartid);
+ if (cpu < 0) {
+ pr_warn("%pfwP: invalid cpuid for parent irq%d\n", fwnode, i);
+ continue;
+ }
+
+ /* Find MMIO location of MSI page */
+ index = nr_mmios;
+ reloff = i * BIT(global->guest_index_bits) *
+ IMSIC_MMIO_PAGE_SZ;
+ for (j = 0; nr_mmios; j++) {
+ if (reloff < resource_size(&mmios[j])) {
+ index = j;
+ break;
+ }
+
+ /*
+ * MMIO region size may not be aligned to
+ * BIT(global->guest_index_bits) * IMSIC_MMIO_PAGE_SZ
+ * if holes are present.
+ */
+ reloff -= ALIGN(resource_size(&mmios[j]),
+ BIT(global->guest_index_bits) * IMSIC_MMIO_PAGE_SZ);
+ }
+ if (index >= nr_mmios) {
+ pr_warn("%pfwP: MMIO not found for parent irq%d\n", fwnode, i);
+ continue;
+ }
+
+ local = per_cpu_ptr(global->local, cpu);
+ local->msi_pa = mmios[index].start + reloff;
+ local->msi_va = mmios_va[index] + reloff;
+
+ nr_handlers++;
+ }
+
+ /* If no CPU handlers found then can't take interrupts */
+ if (!nr_handlers) {
+ pr_err("%pfwP: No CPU handlers found\n", fwnode);
+ rc = -ENODEV;
+ goto out_local_cleanup;
+ }
+
+ /* Initialize matrix allocator */
+ rc = imsic_matrix_init();
+ if (rc) {
+ pr_err("%pfwP: failed to create matrix allocator\n", fwnode);
+ goto out_local_cleanup;
+ }
+
+ /* We don't need MMIO arrays anymore so let's free-up */
+ kfree(mmios_va);
+ kfree(mmios);
+
+ return 0;
+
+out_local_cleanup:
+ imsic_local_cleanup();
+out_iounmap:
+ for (i = 0; i < nr_mmios; i++) {
+ if (mmios_va[i])
+ iounmap(mmios_va[i]);
+ }
+ kfree(mmios_va);
+ kfree(mmios);
+out_free_local:
+ free_percpu(imsic->global.local);
+out_free_priv:
+ kfree(imsic);
+ imsic = NULL;
+ return rc;
+}
diff --git a/drivers/irqchip/irq-riscv-imsic-state.h b/drivers/irqchip/irq-riscv-imsic-state.h
new file mode 100644
index 000000000000..5ae2f69b035b
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-imsic-state.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+
+#ifndef _IRQ_RISCV_IMSIC_STATE_H
+#define _IRQ_RISCV_IMSIC_STATE_H
+
+#include <linux/irqchip/riscv-imsic.h>
+#include <linux/irqdomain.h>
+#include <linux/fwnode.h>
+#include <linux/timer.h>
+
+#define IMSIC_IPI_ID 1
+#define IMSIC_NR_IPI 8
+
+struct imsic_vector {
+ /* Fixed details of the vector */
+ unsigned int cpu;
+ unsigned int local_id;
+ /* Details saved by driver in the vector */
+ unsigned int hwirq;
+ /* Details accessed using local lock held */
+ bool enable;
+ struct imsic_vector *move;
+};
+
+struct imsic_local_priv {
+ /* Local lock to protect vector enable/move variables and dirty bitmap */
+ raw_spinlock_t lock;
+
+ /* Local dirty bitmap for synchronization */
+ unsigned long *dirty_bitmap;
+
+#ifdef CONFIG_SMP
+ /* Local timer for synchronization */
+ struct timer_list timer;
+#endif
+
+ /* Local vector table */
+ struct imsic_vector *vectors;
+};
+
+struct imsic_priv {
+ /* Device details */
+ struct fwnode_handle *fwnode;
+
+ /* Global configuration common for all HARTs */
+ struct imsic_global_config global;
+
+ /* Per-CPU state */
+ struct imsic_local_priv __percpu *lpriv;
+
+ /* State of IRQ matrix allocator */
+ raw_spinlock_t matrix_lock;
+ struct irq_matrix *matrix;
+
+ /* IRQ domains (created by platform driver) */
+ struct irq_domain *base_domain;
+};
+
+extern struct imsic_priv *imsic;
+
+void __imsic_eix_update(unsigned long base_id, unsigned long num_id, bool pend, bool val);
+
+static inline void __imsic_id_set_enable(unsigned long id)
+{
+ __imsic_eix_update(id, 1, false, true);
+}
+
+static inline void __imsic_id_clear_enable(unsigned long id)
+{
+ __imsic_eix_update(id, 1, false, false);
+}
+
+void imsic_local_sync_all(void);
+void imsic_local_delivery(bool enable);
+
+void imsic_vector_mask(struct imsic_vector *vec);
+void imsic_vector_unmask(struct imsic_vector *vec);
+
+static inline bool imsic_vector_isenabled(struct imsic_vector *vec)
+{
+ return READ_ONCE(vec->enable);
+}
+
+static inline struct imsic_vector *imsic_vector_get_move(struct imsic_vector *vec)
+{
+ return READ_ONCE(vec->move);
+}
+
+void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec);
+
+struct imsic_vector *imsic_vector_from_local_id(unsigned int cpu, unsigned int local_id);
+
+struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask *mask);
+void imsic_vector_free(struct imsic_vector *vector);
+
+void imsic_vector_debug_show(struct seq_file *m, struct imsic_vector *vec, int ind);
+void imsic_vector_debug_show_summary(struct seq_file *m, int ind);
+
+void imsic_state_online(void);
+void imsic_state_offline(void);
+int imsic_setup_state(struct fwnode_handle *fwnode);
+int imsic_irqdomain_init(void);
+
+#endif
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index f3d4cb9e34f7..8fb183ced1e7 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -164,15 +164,12 @@ static int plic_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force)
{
unsigned int cpu;
- struct cpumask amask;
struct plic_priv *priv = irq_data_get_irq_chip_data(d);
- cpumask_and(&amask, &priv->lmask, mask_val);
-
if (force)
- cpu = cpumask_first(&amask);
+ cpu = cpumask_first_and(&priv->lmask, mask_val);
else
- cpu = cpumask_any_and(&amask, cpu_online_mask);
+ cpu = cpumask_first_and_and(&priv->lmask, mask_val, cpu_online_mask);
if (cpu >= nr_cpu_ids)
return -EINVAL;
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 26a5193d0ae4..2cc9f3b7d669 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -19,13 +19,26 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
-#include <linux/syscore_ops.h>
+#include <linux/pm.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
-#define IRQS_PER_BANK 32
+#define IRQS_PER_BANK 32
-#define HWSPNLCK_TIMEOUT 1000 /* usec */
+#define HWSPNLCK_TIMEOUT 1000 /* usec */
+
+#define EXTI_EnCIDCFGR(n) (0x180 + (n) * 4)
+#define EXTI_HWCFGR1 0x3f0
+
+/* Register: EXTI_EnCIDCFGR(n) */
+#define EXTI_CIDCFGR_CFEN_MASK BIT(0)
+#define EXTI_CIDCFGR_CID_MASK GENMASK(6, 4)
+#define EXTI_CIDCFGR_CID_SHIFT 4
+
+/* Register: EXTI_HWCFGR1 */
+#define EXTI_HWCFGR1_CIDWIDTH_MASK GENMASK(27, 24)
+
+#define EXTI_CID1 1
struct stm32_exti_bank {
u32 imr_ofst;
@@ -36,6 +49,7 @@ struct stm32_exti_bank {
u32 rpr_ofst;
u32 fpr_ofst;
u32 trg_ofst;
+ u32 seccfgr_ofst;
};
#define UNDEF_REG ~0
@@ -54,17 +68,18 @@ struct stm32_exti_chip_data {
u32 mask_cache;
u32 rtsr_cache;
u32 ftsr_cache;
+ u32 event_reserved;
};
struct stm32_exti_host_data {
void __iomem *base;
+ struct device *dev;
struct stm32_exti_chip_data *chips_data;
const struct stm32_exti_drv_data *drv_data;
struct hwspinlock *hwlock;
+ bool dt_has_irqs_desc; /* skip internal desc_irqs array and get it from DT */
};
-static struct stm32_exti_host_data *stm32_host_data;
-
static const struct stm32_exti_bank stm32f4xx_exti_b1 = {
.imr_ofst = 0x00,
.emr_ofst = 0x04,
@@ -74,6 +89,7 @@ static const struct stm32_exti_bank stm32f4xx_exti_b1 = {
.rpr_ofst = 0x14,
.fpr_ofst = UNDEF_REG,
.trg_ofst = UNDEF_REG,
+ .seccfgr_ofst = UNDEF_REG,
};
static const struct stm32_exti_bank *stm32f4xx_exti_banks[] = {
@@ -94,6 +110,7 @@ static const struct stm32_exti_bank stm32h7xx_exti_b1 = {
.rpr_ofst = 0x88,
.fpr_ofst = UNDEF_REG,
.trg_ofst = UNDEF_REG,
+ .seccfgr_ofst = UNDEF_REG,
};
static const struct stm32_exti_bank stm32h7xx_exti_b2 = {
@@ -105,6 +122,7 @@ static const struct stm32_exti_bank stm32h7xx_exti_b2 = {
.rpr_ofst = 0x98,
.fpr_ofst = UNDEF_REG,
.trg_ofst = UNDEF_REG,
+ .seccfgr_ofst = UNDEF_REG,
};
static const struct stm32_exti_bank stm32h7xx_exti_b3 = {
@@ -116,6 +134,7 @@ static const struct stm32_exti_bank stm32h7xx_exti_b3 = {
.rpr_ofst = 0xA8,
.fpr_ofst = UNDEF_REG,
.trg_ofst = UNDEF_REG,
+ .seccfgr_ofst = UNDEF_REG,
};
static const struct stm32_exti_bank *stm32h7xx_exti_banks[] = {
@@ -138,6 +157,7 @@ static const struct stm32_exti_bank stm32mp1_exti_b1 = {
.rpr_ofst = 0x0C,
.fpr_ofst = 0x10,
.trg_ofst = 0x3EC,
+ .seccfgr_ofst = 0x14,
};
static const struct stm32_exti_bank stm32mp1_exti_b2 = {
@@ -149,6 +169,7 @@ static const struct stm32_exti_bank stm32mp1_exti_b2 = {
.rpr_ofst = 0x2C,
.fpr_ofst = 0x30,
.trg_ofst = 0x3E8,
+ .seccfgr_ofst = 0x34,
};
static const struct stm32_exti_bank stm32mp1_exti_b3 = {
@@ -160,6 +181,7 @@ static const struct stm32_exti_bank stm32mp1_exti_b3 = {
.rpr_ofst = 0x4C,
.fpr_ofst = 0x50,
.trg_ofst = 0x3E4,
+ .seccfgr_ofst = 0x54,
};
static const struct stm32_exti_bank *stm32mp1_exti_banks[] = {
@@ -322,7 +344,7 @@ static void stm32_irq_handler(struct irq_desc *desc)
while ((pending = stm32_exti_pending(gc))) {
for_each_set_bit(n, &pending, IRQS_PER_BANK)
generic_handle_domain_irq(domain, irq_base + n);
- }
+ }
}
chained_irq_exit(chip, desc);
@@ -621,50 +643,32 @@ static int stm32_exti_h_set_affinity(struct irq_data *d,
return IRQ_SET_MASK_OK_DONE;
}
-static int __maybe_unused stm32_exti_h_suspend(void)
+static int stm32_exti_h_suspend(struct device *dev)
{
+ struct stm32_exti_host_data *host_data = dev_get_drvdata(dev);
struct stm32_exti_chip_data *chip_data;
int i;
- for (i = 0; i < stm32_host_data->drv_data->bank_nr; i++) {
- chip_data = &stm32_host_data->chips_data[i];
- raw_spin_lock(&chip_data->rlock);
+ for (i = 0; i < host_data->drv_data->bank_nr; i++) {
+ chip_data = &host_data->chips_data[i];
stm32_chip_suspend(chip_data, chip_data->wake_active);
- raw_spin_unlock(&chip_data->rlock);
}
return 0;
}
-static void __maybe_unused stm32_exti_h_resume(void)
+static int stm32_exti_h_resume(struct device *dev)
{
+ struct stm32_exti_host_data *host_data = dev_get_drvdata(dev);
struct stm32_exti_chip_data *chip_data;
int i;
- for (i = 0; i < stm32_host_data->drv_data->bank_nr; i++) {
- chip_data = &stm32_host_data->chips_data[i];
- raw_spin_lock(&chip_data->rlock);
+ for (i = 0; i < host_data->drv_data->bank_nr; i++) {
+ chip_data = &host_data->chips_data[i];
stm32_chip_resume(chip_data, chip_data->mask_cache);
- raw_spin_unlock(&chip_data->rlock);
}
-}
-static struct syscore_ops stm32_exti_h_syscore_ops = {
-#ifdef CONFIG_PM_SLEEP
- .suspend = stm32_exti_h_suspend,
- .resume = stm32_exti_h_resume,
-#endif
-};
-
-static void stm32_exti_h_syscore_init(struct stm32_exti_host_data *host_data)
-{
- stm32_host_data = host_data;
- register_syscore_ops(&stm32_exti_h_syscore_ops);
-}
-
-static void stm32_exti_h_syscore_deinit(void)
-{
- unregister_syscore_ops(&stm32_exti_h_syscore_ops);
+ return 0;
}
static int stm32_exti_h_retrigger(struct irq_data *d)
@@ -725,12 +729,35 @@ static int stm32_exti_h_domain_alloc(struct irq_domain *dm,
bank = hwirq / IRQS_PER_BANK;
chip_data = &host_data->chips_data[bank];
+ /* Check if event is reserved (Secure) */
+ if (chip_data->event_reserved & BIT(hwirq % IRQS_PER_BANK)) {
+ dev_err(host_data->dev, "event %lu is reserved, secure\n", hwirq);
+ return -EPERM;
+ }
+
event_trg = readl_relaxed(host_data->base + chip_data->reg_bank->trg_ofst);
chip = (event_trg & BIT(hwirq % IRQS_PER_BANK)) ?
&stm32_exti_h_chip : &stm32_exti_h_chip_direct;
irq_domain_set_hwirq_and_chip(dm, virq, hwirq, chip, chip_data);
+ if (host_data->dt_has_irqs_desc) {
+ struct of_phandle_args out_irq;
+ int ret;
+
+ ret = of_irq_parse_one(host_data->dev->of_node, hwirq, &out_irq);
+ if (ret)
+ return ret;
+ /* we only support one parent, so far */
+ if (of_node_to_fwnode(out_irq.np) != dm->parent->fwnode)
+ return -EINVAL;
+
+ of_phandle_args_to_fwspec(out_irq.np, out_irq.args,
+ out_irq.args_count, &p_fwspec);
+
+ return irq_domain_alloc_irqs_parent(dm, virq, 1, &p_fwspec);
+ }
+
if (!host_data->drv_data->desc_irqs)
return -EINVAL;
@@ -771,8 +798,6 @@ stm32_exti_host_data *stm32_exti_host_init(const struct stm32_exti_drv_data *dd,
goto free_chips_data;
}
- stm32_host_data = host_data;
-
return host_data;
free_chips_data:
@@ -807,6 +832,10 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
if (stm32_bank->emr_ofst != UNDEF_REG)
writel_relaxed(0, base + stm32_bank->emr_ofst);
+ /* reserve Secure events */
+ if (stm32_bank->seccfgr_ofst != UNDEF_REG)
+ chip_data->event_reserved = readl_relaxed(base + stm32_bank->seccfgr_ofst);
+
pr_info("%pOF: bank%d\n", node, bank_idx);
return chip_data;
@@ -891,6 +920,27 @@ static const struct irq_domain_ops stm32_exti_h_domain_ops = {
.xlate = irq_domain_xlate_twocell,
};
+static void stm32_exti_check_rif(struct stm32_exti_host_data *host_data)
+{
+ unsigned int bank, i, event;
+ u32 cid, cidcfgr, hwcfgr1;
+
+ /* quit on CID not supported */
+ hwcfgr1 = readl_relaxed(host_data->base + EXTI_HWCFGR1);
+ if ((hwcfgr1 & EXTI_HWCFGR1_CIDWIDTH_MASK) == 0)
+ return;
+
+ for (bank = 0; bank < host_data->drv_data->bank_nr; bank++) {
+ for (i = 0; i < IRQS_PER_BANK; i++) {
+ event = bank * IRQS_PER_BANK + i;
+ cidcfgr = readl_relaxed(host_data->base + EXTI_EnCIDCFGR(event));
+ cid = (cidcfgr & EXTI_CIDCFGR_CID_MASK) >> EXTI_CIDCFGR_CID_SHIFT;
+ if ((cidcfgr & EXTI_CIDCFGR_CFEN_MASK) && cid != EXTI_CID1)
+ host_data->chips_data[bank].event_reserved |= BIT(i);
+ }
+ }
+}
+
static void stm32_exti_remove_irq(void *data)
{
struct irq_domain *domain = data;
@@ -898,11 +948,6 @@ static void stm32_exti_remove_irq(void *data)
irq_domain_remove(domain);
}
-static void stm32_exti_remove(struct platform_device *pdev)
-{
- stm32_exti_h_syscore_deinit();
-}
-
static int stm32_exti_probe(struct platform_device *pdev)
{
int ret, i;
@@ -916,6 +961,9 @@ static int stm32_exti_probe(struct platform_device *pdev)
if (!host_data)
return -ENOMEM;
+ dev_set_drvdata(dev, host_data);
+ host_data->dev = dev;
+
/* check for optional hwspinlock which may be not available yet */
ret = of_hwspin_lock_get_id(np, 0);
if (ret == -EPROBE_DEFER)
@@ -955,6 +1003,8 @@ static int stm32_exti_probe(struct platform_device *pdev)
for (i = 0; i < drv_data->bank_nr; i++)
stm32_exti_chip_init(host_data, i, np);
+ stm32_exti_check_rif(host_data);
+
parent_domain = irq_find_host(of_irq_find_parent(np));
if (!parent_domain) {
dev_err(dev, "GIC interrupt-parent not found\n");
@@ -975,7 +1025,8 @@ static int stm32_exti_probe(struct platform_device *pdev)
if (ret)
return ret;
- stm32_exti_h_syscore_init(host_data);
+ if (of_property_read_bool(np, "interrupts-extended"))
+ host_data->dt_has_irqs_desc = true;
return 0;
}
@@ -988,12 +1039,16 @@ static const struct of_device_id stm32_exti_ids[] = {
};
MODULE_DEVICE_TABLE(of, stm32_exti_ids);
+static const struct dev_pm_ops stm32_exti_dev_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(stm32_exti_h_suspend, stm32_exti_h_resume)
+};
+
static struct platform_driver stm32_exti_driver = {
.probe = stm32_exti_probe,
- .remove_new = stm32_exti_remove,
.driver = {
.name = "stm32_exti",
.of_match_table = stm32_exti_ids,
+ .pm = &stm32_exti_dev_pm_ops,
},
};
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index e760b1278143..bb92fd85e975 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -192,7 +192,6 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
gc->chip_types[0].regs.type = reg_offs->ctrl;
gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
- gc->chip_types[1].chip.name = gc->chip_types[0].chip.name;
gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit;
gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit;
gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit;
diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c
index 680586354d12..d59bfbe8c6d0 100644
--- a/drivers/irqchip/irq-tb10x.c
+++ b/drivers/irqchip/irq-tb10x.c
@@ -150,7 +150,6 @@ static int __init of_tb10x_init_irq(struct device_node *ictl,
gc->chip_types[0].regs.mask = AB_IRQCTL_INT_ENABLE;
gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
- gc->chip_types[1].chip.name = gc->chip_types[0].chip.name;
gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit;
gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit;
gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit;
diff --git a/drivers/isdn/capi/Makefile b/drivers/isdn/capi/Makefile
index 352217ebabd8..4fd3a4d7133f 100644
--- a/drivers/isdn/capi/Makefile
+++ b/drivers/isdn/capi/Makefile
@@ -2,4 +2,5 @@
# Makefile for the CAPI subsystem used by BT_CMTP
obj-$(CONFIG_BT_CMTP) += kernelcapi.o
-kernelcapi-y := kcapi.o capiutil.o capi.o kcapi_proc.o
+kernelcapi-y := kcapi.o capiutil.o capi.o
+kernelcapi-$(CONFIG_PROC_FS) += kcapi_proc.o
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 136ba9fe55e0..c5d13bdc239b 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -917,13 +917,16 @@ int __init kcapi_init(void)
return err;
}
- kcapi_proc_init();
+ if (IS_ENABLED(CONFIG_PROC_FS))
+ kcapi_proc_init();
+
return 0;
}
void kcapi_exit(void)
{
- kcapi_proc_exit();
+ if (IS_ENABLED(CONFIG_PROC_FS))
+ kcapi_proc_exit();
cdebug_exit();
destroy_workqueue(kcapi_wq);
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 2776ca5fc33f..b215b28cad7b 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -401,23 +401,23 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
}
static int data_sock_setsockopt(struct socket *sock, int level, int optname,
- sockptr_t optval, unsigned int len)
+ sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int err = 0, opt = 0;
if (*debug & DEBUG_SOCKET)
printk(KERN_DEBUG "%s(%p, %d, %x, optval, %d)\n", __func__, sock,
- level, optname, len);
+ level, optname, optlen);
lock_sock(sk);
switch (optname) {
case MISDN_TIME_STAMP:
- if (copy_from_sockptr(&opt, optval, sizeof(int))) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&opt, sizeof(opt),
+ optval, optlen);
+ if (err)
break;
- }
if (opt)
_pms(sk)->cmask |= MISDN_TIME_STAMP;
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index a0e717a986dc..fb38f684444f 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -262,7 +262,7 @@ config SENSORS_AMS
will be called ams.
config SENSORS_AMS_PMU
- bool "PMU variant"
+ bool "PMU variant" if SENSORS_AMS_I2C
depends on SENSORS_AMS && ADB_PMU
default y
help
diff --git a/drivers/macintosh/macio-adb.c b/drivers/macintosh/macio-adb.c
index 779f1268286e..19c63959ebed 100644
--- a/drivers/macintosh/macio-adb.c
+++ b/drivers/macintosh/macio-adb.c
@@ -83,35 +83,32 @@ struct adb_driver macio_adb_driver = {
int macio_probe(void)
{
- struct device_node *np;
+ struct device_node *np __free(device_node) =
+ of_find_compatible_node(NULL, "adb", "chrp,adb0");
- np = of_find_compatible_node(NULL, "adb", "chrp,adb0");
- if (np) {
- of_node_put(np);
+ if (np)
return 0;
- }
+
return -ENODEV;
}
int macio_init(void)
{
- struct device_node *adbs;
+ struct device_node *adbs __free(device_node) =
+ of_find_compatible_node(NULL, "adb", "chrp,adb0");
struct resource r;
unsigned int irq;
- adbs = of_find_compatible_node(NULL, "adb", "chrp,adb0");
if (!adbs)
return -ENXIO;
- if (of_address_to_resource(adbs, 0, &r)) {
- of_node_put(adbs);
+ if (of_address_to_resource(adbs, 0, &r))
return -ENXIO;
- }
+
adb = ioremap(r.start, sizeof(struct adb_regs));
- if (!adb) {
- of_node_put(adbs);
+ if (!adb)
return -ENOMEM;
- }
+
out_8(&adb->ctrl.r, 0);
out_8(&adb->intr.r, 0);
@@ -121,7 +118,6 @@ int macio_init(void)
out_8(&adb->autopoll.r, APE);
irq = irq_of_parse_and_map(adbs, 0);
- of_node_put(adbs);
if (request_irq(irq, macio_adb_interrupt, 0, "ADB", (void *)0)) {
iounmap(adb);
printk(KERN_ERR "ADB: can't get irq %d\n", irq);
diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c
index db9270da5b8e..b6ddf1d47cb4 100644
--- a/drivers/macintosh/via-macii.c
+++ b/drivers/macintosh/via-macii.c
@@ -140,24 +140,19 @@ static int macii_probe(void)
/* Initialize the driver */
static int macii_init(void)
{
- unsigned long flags;
int err;
- local_irq_save(flags);
-
err = macii_init_via();
if (err)
- goto out;
+ return err;
err = request_irq(IRQ_MAC_ADB, macii_interrupt, 0, "ADB",
macii_interrupt);
if (err)
- goto out;
+ return err;
macii_state = idle;
-out:
- local_irq_restore(flags);
- return err;
+ return 0;
}
/* initialize the hardware */
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 42940108a187..3b8842c4a340 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -23,6 +23,18 @@ config ARM_MHU_V2
Say Y here if you want to build the ARM MHUv2 controller driver,
which provides unidirectional mailboxes between processing elements.
+config ARM_MHU_V3
+ tristate "ARM MHUv3 Mailbox"
+ depends on HAS_IOMEM || COMPILE_TEST
+ depends on OF
+ help
+ Say Y here if you want to build the ARM MHUv3 controller driver,
+ which provides unidirectional mailboxes between processing elements.
+
+ ARM MHUv3 controllers can implement a varying number of extensions
+ that provides different means of transports: supported extensions
+ will be discovered and possibly managed at probe-time.
+
config IMX_MBOX
tristate "i.MX Mailbox"
depends on ARCH_MXC || COMPILE_TEST
@@ -68,15 +80,6 @@ config OMAP2PLUS_MBOX
OMAP2/3; or IPU, IVA HD and DSP in OMAP4/5. Say Y here if you
want to use OMAP2+ Mailbox framework support.
-config OMAP_MBOX_KFIFO_SIZE
- int "Mailbox kfifo default buffer size (bytes)"
- depends on OMAP2PLUS_MBOX
- default 256
- help
- Specify the default size of mailbox's kfifo buffers (bytes).
- This can also be changed at runtime (via the mbox_kfifo_size
- module parameter).
-
config ROCKCHIP_MBOX
bool "Rockchip Soc Integrated Mailbox Support"
depends on ARCH_ROCKCHIP || COMPILE_TEST
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 18793e6caa2f..5cf2f54debaf 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -9,6 +9,8 @@ obj-$(CONFIG_ARM_MHU) += arm_mhu.o arm_mhu_db.o
obj-$(CONFIG_ARM_MHU_V2) += arm_mhuv2.o
+obj-$(CONFIG_ARM_MHU_V3) += arm_mhuv3.o
+
obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o
obj-$(CONFIG_ARMADA_37XX_RWTM_MBOX) += armada-37xx-rwtm-mailbox.o
diff --git a/drivers/mailbox/arm_mhuv3.c b/drivers/mailbox/arm_mhuv3.c
new file mode 100644
index 000000000000..b97e79a5870f
--- /dev/null
+++ b/drivers/mailbox/arm_mhuv3.c
@@ -0,0 +1,1103 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM Message Handling Unit Version 3 (MHUv3) driver.
+ *
+ * Copyright (C) 2024 ARM Ltd.
+ *
+ * Based on ARM MHUv2 driver.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+/* ====== MHUv3 Registers ====== */
+
+/* Maximum number of Doorbell channel windows */
+#define MHUV3_DBCW_MAX 128
+/* Number of DBCH combined interrupt status registers */
+#define MHUV3_DBCH_CMB_INT_ST_REG_CNT 4
+
+/* Number of FFCH combined interrupt status registers */
+#define MHUV3_FFCH_CMB_INT_ST_REG_CNT 2
+
+#define MHUV3_FLAG_BITS 32
+
+/* Not a typo ... */
+#define MHUV3_MAJOR_VERSION 2
+
+enum {
+ MHUV3_MBOX_CELL_TYPE,
+ MHUV3_MBOX_CELL_CHWN,
+ MHUV3_MBOX_CELL_PARAM,
+ MHUV3_MBOX_CELLS
+};
+
+/* Padding bitfields/fields represents hole in the regs MMIO */
+
+/* CTRL_Page */
+struct blk_id {
+#define id GENMASK(3, 0)
+ u32 val;
+} __packed;
+
+struct feat_spt0 {
+#define dbe_spt GENMASK(3, 0)
+#define fe_spt GENMASK(7, 4)
+#define fce_spt GENMASK(11, 8)
+ u32 val;
+} __packed;
+
+struct feat_spt1 {
+#define auto_op_spt GENMASK(3, 0)
+ u32 val;
+} __packed;
+
+struct dbch_cfg0 {
+#define num_dbch GENMASK(7, 0)
+ u32 val;
+} __packed;
+
+struct ffch_cfg0 {
+#define num_ffch GENMASK(7, 0)
+#define x8ba_spt BIT(8)
+#define x16ba_spt BIT(9)
+#define x32ba_spt BIT(10)
+#define x64ba_spt BIT(11)
+#define ffch_depth GENMASK(25, 16)
+ u32 val;
+} __packed;
+
+struct fch_cfg0 {
+#define num_fch GENMASK(9, 0)
+#define fcgi_spt BIT(10) // MBX-only
+#define num_fcg GENMASK(15, 11)
+#define num_fch_per_grp GENMASK(20, 16)
+#define fch_ws GENMASK(28, 21)
+ u32 val;
+} __packed;
+
+struct ctrl {
+#define op_req BIT(0)
+#define ch_op_mask BIT(1)
+ u32 val;
+} __packed;
+
+struct fch_ctrl {
+#define _int_en BIT(2)
+ u32 val;
+} __packed;
+
+struct iidr {
+#define implementer GENMASK(11, 0)
+#define revision GENMASK(15, 12)
+#define variant GENMASK(19, 16)
+#define product_id GENMASK(31, 20)
+ u32 val;
+} __packed;
+
+struct aidr {
+#define arch_minor_rev GENMASK(3, 0)
+#define arch_major_rev GENMASK(7, 4)
+ u32 val;
+} __packed;
+
+struct ctrl_page {
+ struct blk_id blk_id;
+ u8 pad[12];
+ struct feat_spt0 feat_spt0;
+ struct feat_spt1 feat_spt1;
+ u8 pad1[8];
+ struct dbch_cfg0 dbch_cfg0;
+ u8 pad2[12];
+ struct ffch_cfg0 ffch_cfg0;
+ u8 pad3[12];
+ struct fch_cfg0 fch_cfg0;
+ u8 pad4[188];
+ struct ctrl x_ctrl;
+ /*-- MBX-only registers --*/
+ u8 pad5[60];
+ struct fch_ctrl fch_ctrl;
+ u32 fcg_int_en;
+ u8 pad6[696];
+ /*-- End of MBX-only ---- */
+ u32 dbch_int_st[MHUV3_DBCH_CMB_INT_ST_REG_CNT];
+ u32 ffch_int_st[MHUV3_FFCH_CMB_INT_ST_REG_CNT];
+ /*-- MBX-only registers --*/
+ u8 pad7[88];
+ u32 fcg_int_st;
+ u8 pad8[12];
+ u32 fcg_grp_int_st[32];
+ u8 pad9[2760];
+ /*-- End of MBX-only ---- */
+ struct iidr iidr;
+ struct aidr aidr;
+ u32 imp_def_id[12];
+} __packed;
+
+/* DBCW_Page */
+
+struct xbcw_ctrl {
+#define comb_en BIT(0)
+ u32 val;
+} __packed;
+
+struct pdbcw_int {
+#define tfr_ack BIT(0)
+ u32 val;
+} __packed;
+
+struct pdbcw_page {
+ u32 st;
+ u8 pad[8];
+ u32 set;
+ struct pdbcw_int int_st;
+ struct pdbcw_int int_clr;
+ struct pdbcw_int int_en;
+ struct xbcw_ctrl ctrl;
+} __packed;
+
+struct mdbcw_page {
+ u32 st;
+ u32 st_msk;
+ u32 clr;
+ u8 pad[4];
+ u32 msk_st;
+ u32 msk_set;
+ u32 msk_clr;
+ struct xbcw_ctrl ctrl;
+} __packed;
+
+struct dummy_page {
+ u8 pad[SZ_4K];
+} __packed;
+
+struct mhu3_pbx_frame_reg {
+ struct ctrl_page ctrl;
+ struct pdbcw_page dbcw[MHUV3_DBCW_MAX];
+ struct dummy_page ffcw;
+ struct dummy_page fcw;
+ u8 pad[SZ_4K * 11];
+ struct dummy_page impdef;
+} __packed;
+
+struct mhu3_mbx_frame_reg {
+ struct ctrl_page ctrl;
+ struct mdbcw_page dbcw[MHUV3_DBCW_MAX];
+ struct dummy_page ffcw;
+ struct dummy_page fcw;
+ u8 pad[SZ_4K * 11];
+ struct dummy_page impdef;
+} __packed;
+
+/* Macro for reading a bitmask within a physically mapped packed struct */
+#define readl_relaxed_bitmask(_regptr, _bitmask) \
+ ({ \
+ unsigned long _rval; \
+ _rval = readl_relaxed(_regptr); \
+ FIELD_GET(_bitmask, _rval); \
+ })
+
+/* Macro for writing a bitmask within a physically mapped packed struct */
+#define writel_relaxed_bitmask(_value, _regptr, _bitmask) \
+ ({ \
+ unsigned long _rval; \
+ typeof(_regptr) _rptr = _regptr; \
+ typeof(_bitmask) _bmask = _bitmask; \
+ _rval = readl_relaxed(_rptr); \
+ _rval &= ~(_bmask); \
+ _rval |= FIELD_PREP((unsigned long long)_bmask, _value);\
+ writel_relaxed(_rval, _rptr); \
+ })
+
+/* ====== MHUv3 data structures ====== */
+
+enum mhuv3_frame {
+ PBX_FRAME,
+ MBX_FRAME,
+};
+
+static char *mhuv3_str[] = {
+ "PBX",
+ "MBX"
+};
+
+enum mhuv3_extension_type {
+ DBE_EXT,
+ FCE_EXT,
+ FE_EXT,
+ NUM_EXT
+};
+
+static char *mhuv3_ext_str[] = {
+ "DBE",
+ "FCE",
+ "FE"
+};
+
+struct mhuv3;
+
+/**
+ * struct mhuv3_protocol_ops - MHUv3 operations
+ *
+ * @rx_startup: Receiver startup callback.
+ * @rx_shutdown: Receiver shutdown callback.
+ * @read_data: Read available Sender in-band LE data (if any).
+ * @rx_complete: Acknowledge data reception to the Sender. Any out-of-band data
+ * has to have been already retrieved before calling this.
+ * @tx_startup: Sender startup callback.
+ * @tx_shutdown: Sender shutdown callback.
+ * @last_tx_done: Report back to the Sender if the last transfer has completed.
+ * @send_data: Send data to the receiver.
+ *
+ * Each supported transport protocol provides its own implementation of
+ * these operations.
+ */
+struct mhuv3_protocol_ops {
+ int (*rx_startup)(struct mhuv3 *mhu, struct mbox_chan *chan);
+ void (*rx_shutdown)(struct mhuv3 *mhu, struct mbox_chan *chan);
+ void *(*read_data)(struct mhuv3 *mhu, struct mbox_chan *chan);
+ void (*rx_complete)(struct mhuv3 *mhu, struct mbox_chan *chan);
+ void (*tx_startup)(struct mhuv3 *mhu, struct mbox_chan *chan);
+ void (*tx_shutdown)(struct mhuv3 *mhu, struct mbox_chan *chan);
+ int (*last_tx_done)(struct mhuv3 *mhu, struct mbox_chan *chan);
+ int (*send_data)(struct mhuv3 *mhu, struct mbox_chan *chan, void *arg);
+};
+
+/**
+ * struct mhuv3_mbox_chan_priv - MHUv3 channel private information
+ *
+ * @ch_idx: Channel window index associated to this mailbox channel.
+ * @doorbell: Doorbell bit number within the @ch_idx window.
+ * Only relevant to Doorbell transport.
+ * @ops: Transport protocol specific operations for this channel.
+ *
+ * Transport specific data attached to mmailbox channel priv data.
+ */
+struct mhuv3_mbox_chan_priv {
+ u32 ch_idx;
+ u32 doorbell;
+ const struct mhuv3_protocol_ops *ops;
+};
+
+/**
+ * struct mhuv3_extension - MHUv3 extension descriptor
+ *
+ * @type: Type of extension
+ * @num_chans: Max number of channels found for this extension.
+ * @base_ch_idx: First channel number assigned to this extension, picked from
+ * the set of all mailbox channels descriptors created.
+ * @mbox_of_xlate: Extension specific helper to parse DT and lookup associated
+ * channel from the related 'mboxes' property.
+ * @combined_irq_setup: Extension specific helper to setup the combined irq.
+ * @channels_init: Extension specific helper to initialize channels.
+ * @chan_from_comb_irq_get: Extension specific helper to lookup which channel
+ * triggered the combined irq.
+ * @pending_db: Array of per-channel pending doorbells.
+ * @pending_lock: Protect access to pending_db.
+ */
+struct mhuv3_extension {
+ enum mhuv3_extension_type type;
+ unsigned int num_chans;
+ unsigned int base_ch_idx;
+ struct mbox_chan *(*mbox_of_xlate)(struct mhuv3 *mhu,
+ unsigned int channel,
+ unsigned int param);
+ void (*combined_irq_setup)(struct mhuv3 *mhu);
+ int (*channels_init)(struct mhuv3 *mhu);
+ struct mbox_chan *(*chan_from_comb_irq_get)(struct mhuv3 *mhu);
+ u32 pending_db[MHUV3_DBCW_MAX];
+ /* Protect access to pending_db */
+ spinlock_t pending_lock;
+};
+
+/**
+ * struct mhuv3 - MHUv3 mailbox controller data
+ *
+ * @frame: Frame type: MBX_FRAME or PBX_FRAME.
+ * @auto_op_full: Flag to indicate if the MHU supports AutoOp full mode.
+ * @major: MHUv3 controller architectural major version.
+ * @minor: MHUv3 controller architectural minor version.
+ * @implem: MHUv3 controller IIDR implementer.
+ * @rev: MHUv3 controller IIDR revision.
+ * @var: MHUv3 controller IIDR variant.
+ * @prod_id: MHUv3 controller IIDR product_id.
+ * @num_chans: The total number of channnels discovered across all extensions.
+ * @cmb_irq: Combined IRQ number if any found defined.
+ * @ctrl: A reference to the MHUv3 control page for this block.
+ * @pbx: Base address of the PBX register mapping region.
+ * @mbx: Base address of the MBX register mapping region.
+ * @ext: Array holding descriptors for any found implemented extension.
+ * @mbox: Mailbox controller belonging to the MHU frame.
+ */
+struct mhuv3 {
+ enum mhuv3_frame frame;
+ bool auto_op_full;
+ unsigned int major;
+ unsigned int minor;
+ unsigned int implem;
+ unsigned int rev;
+ unsigned int var;
+ unsigned int prod_id;
+ unsigned int num_chans;
+ int cmb_irq;
+ struct ctrl_page __iomem *ctrl;
+ union {
+ struct mhu3_pbx_frame_reg __iomem *pbx;
+ struct mhu3_mbx_frame_reg __iomem *mbx;
+ };
+ struct mhuv3_extension *ext[NUM_EXT];
+ struct mbox_controller mbox;
+};
+
+#define mhu_from_mbox(_mbox) container_of(_mbox, struct mhuv3, mbox)
+
+typedef int (*mhuv3_extension_initializer)(struct mhuv3 *mhu);
+
+/* =================== Doorbell transport protocol operations =============== */
+
+static void mhuv3_doorbell_tx_startup(struct mhuv3 *mhu, struct mbox_chan *chan)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+
+ /* Enable Transfer Acknowledgment events */
+ writel_relaxed_bitmask(0x1, &mhu->pbx->dbcw[priv->ch_idx].int_en, tfr_ack);
+}
+
+static void mhuv3_doorbell_tx_shutdown(struct mhuv3 *mhu, struct mbox_chan *chan)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+ struct mhuv3_extension *e = mhu->ext[DBE_EXT];
+ unsigned long flags;
+
+ /* Disable Channel Transfer Ack events */
+ writel_relaxed_bitmask(0x0, &mhu->pbx->dbcw[priv->ch_idx].int_en, tfr_ack);
+
+ /* Clear Channel Transfer Ack and pending doorbells */
+ writel_relaxed_bitmask(0x1, &mhu->pbx->dbcw[priv->ch_idx].int_clr, tfr_ack);
+ spin_lock_irqsave(&e->pending_lock, flags);
+ e->pending_db[priv->ch_idx] = 0;
+ spin_unlock_irqrestore(&e->pending_lock, flags);
+}
+
+static int mhuv3_doorbell_rx_startup(struct mhuv3 *mhu, struct mbox_chan *chan)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+
+ /* Unmask Channel Transfer events */
+ writel_relaxed(BIT(priv->doorbell), &mhu->mbx->dbcw[priv->ch_idx].msk_clr);
+
+ return 0;
+}
+
+static void mhuv3_doorbell_rx_shutdown(struct mhuv3 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+
+ /* Mask Channel Transfer events */
+ writel_relaxed(BIT(priv->doorbell), &mhu->mbx->dbcw[priv->ch_idx].msk_set);
+}
+
+static void mhuv3_doorbell_rx_complete(struct mhuv3 *mhu, struct mbox_chan *chan)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+
+ /* Clearing the pending transfer generates the Channel Transfer Ack */
+ writel_relaxed(BIT(priv->doorbell), &mhu->mbx->dbcw[priv->ch_idx].clr);
+}
+
+static int mhuv3_doorbell_last_tx_done(struct mhuv3 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+ int done;
+
+ done = !(readl_relaxed(&mhu->pbx->dbcw[priv->ch_idx].st) &
+ BIT(priv->doorbell));
+ if (done) {
+ struct mhuv3_extension *e = mhu->ext[DBE_EXT];
+ unsigned long flags;
+
+ /* Take care to clear the pending doorbell also when polling */
+ spin_lock_irqsave(&e->pending_lock, flags);
+ e->pending_db[priv->ch_idx] &= ~BIT(priv->doorbell);
+ spin_unlock_irqrestore(&e->pending_lock, flags);
+ }
+
+ return done;
+}
+
+static int mhuv3_doorbell_send_data(struct mhuv3 *mhu, struct mbox_chan *chan,
+ void *arg)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+ struct mhuv3_extension *e = mhu->ext[DBE_EXT];
+
+ scoped_guard(spinlock_irqsave, &e->pending_lock) {
+ /* Only one in-flight Transfer is allowed per-doorbell */
+ if (e->pending_db[priv->ch_idx] & BIT(priv->doorbell))
+ return -EBUSY;
+
+ e->pending_db[priv->ch_idx] |= BIT(priv->doorbell);
+ }
+
+ writel_relaxed(BIT(priv->doorbell), &mhu->pbx->dbcw[priv->ch_idx].set);
+
+ return 0;
+}
+
+static const struct mhuv3_protocol_ops mhuv3_doorbell_ops = {
+ .tx_startup = mhuv3_doorbell_tx_startup,
+ .tx_shutdown = mhuv3_doorbell_tx_shutdown,
+ .rx_startup = mhuv3_doorbell_rx_startup,
+ .rx_shutdown = mhuv3_doorbell_rx_shutdown,
+ .rx_complete = mhuv3_doorbell_rx_complete,
+ .last_tx_done = mhuv3_doorbell_last_tx_done,
+ .send_data = mhuv3_doorbell_send_data,
+};
+
+/* Sender and receiver mailbox ops */
+static bool mhuv3_sender_last_tx_done(struct mbox_chan *chan)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+ struct mhuv3 *mhu = mhu_from_mbox(chan->mbox);
+
+ return priv->ops->last_tx_done(mhu, chan);
+}
+
+static int mhuv3_sender_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+ struct mhuv3 *mhu = mhu_from_mbox(chan->mbox);
+
+ if (!priv->ops->last_tx_done(mhu, chan))
+ return -EBUSY;
+
+ return priv->ops->send_data(mhu, chan, data);
+}
+
+static int mhuv3_sender_startup(struct mbox_chan *chan)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+ struct mhuv3 *mhu = mhu_from_mbox(chan->mbox);
+
+ if (priv->ops->tx_startup)
+ priv->ops->tx_startup(mhu, chan);
+
+ return 0;
+}
+
+static void mhuv3_sender_shutdown(struct mbox_chan *chan)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+ struct mhuv3 *mhu = mhu_from_mbox(chan->mbox);
+
+ if (priv->ops->tx_shutdown)
+ priv->ops->tx_shutdown(mhu, chan);
+}
+
+static const struct mbox_chan_ops mhuv3_sender_ops = {
+ .send_data = mhuv3_sender_send_data,
+ .startup = mhuv3_sender_startup,
+ .shutdown = mhuv3_sender_shutdown,
+ .last_tx_done = mhuv3_sender_last_tx_done,
+};
+
+static int mhuv3_receiver_startup(struct mbox_chan *chan)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+ struct mhuv3 *mhu = mhu_from_mbox(chan->mbox);
+
+ return priv->ops->rx_startup(mhu, chan);
+}
+
+static void mhuv3_receiver_shutdown(struct mbox_chan *chan)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+ struct mhuv3 *mhu = mhu_from_mbox(chan->mbox);
+
+ priv->ops->rx_shutdown(mhu, chan);
+}
+
+static int mhuv3_receiver_send_data(struct mbox_chan *chan, void *data)
+{
+ dev_err(chan->mbox->dev,
+ "Trying to transmit on a MBX MHUv3 frame\n");
+ return -EIO;
+}
+
+static bool mhuv3_receiver_last_tx_done(struct mbox_chan *chan)
+{
+ dev_err(chan->mbox->dev, "Trying to Tx poll on a MBX MHUv3 frame\n");
+ return true;
+}
+
+static const struct mbox_chan_ops mhuv3_receiver_ops = {
+ .send_data = mhuv3_receiver_send_data,
+ .startup = mhuv3_receiver_startup,
+ .shutdown = mhuv3_receiver_shutdown,
+ .last_tx_done = mhuv3_receiver_last_tx_done,
+};
+
+static struct mbox_chan *mhuv3_dbe_mbox_of_xlate(struct mhuv3 *mhu,
+ unsigned int channel,
+ unsigned int doorbell)
+{
+ struct mhuv3_extension *e = mhu->ext[DBE_EXT];
+ struct mbox_controller *mbox = &mhu->mbox;
+ struct mbox_chan *chans = mbox->chans;
+
+ if (channel >= e->num_chans || doorbell >= MHUV3_FLAG_BITS) {
+ dev_err(mbox->dev, "Couldn't xlate to a valid channel (%d: %d)\n",
+ channel, doorbell);
+ return ERR_PTR(-ENODEV);
+ }
+
+ return &chans[e->base_ch_idx + channel * MHUV3_FLAG_BITS + doorbell];
+}
+
+static void mhuv3_dbe_combined_irq_setup(struct mhuv3 *mhu)
+{
+ struct mhuv3_extension *e = mhu->ext[DBE_EXT];
+ int i;
+
+ if (mhu->frame == PBX_FRAME) {
+ struct pdbcw_page __iomem *dbcw = mhu->pbx->dbcw;
+
+ for (i = 0; i < e->num_chans; i++) {
+ writel_relaxed_bitmask(0x1, &dbcw[i].int_clr, tfr_ack);
+ writel_relaxed_bitmask(0x0, &dbcw[i].int_en, tfr_ack);
+ writel_relaxed_bitmask(0x1, &dbcw[i].ctrl, comb_en);
+ }
+ } else {
+ struct mdbcw_page __iomem *dbcw = mhu->mbx->dbcw;
+
+ for (i = 0; i < e->num_chans; i++) {
+ writel_relaxed(0xFFFFFFFF, &dbcw[i].clr);
+ writel_relaxed(0xFFFFFFFF, &dbcw[i].msk_set);
+ writel_relaxed_bitmask(0x1, &dbcw[i].ctrl, comb_en);
+ }
+ }
+}
+
+static int mhuv3_dbe_channels_init(struct mhuv3 *mhu)
+{
+ struct mhuv3_extension *e = mhu->ext[DBE_EXT];
+ struct mbox_controller *mbox = &mhu->mbox;
+ struct mbox_chan *chans;
+ int i;
+
+ chans = mbox->chans + mbox->num_chans;
+ e->base_ch_idx = mbox->num_chans;
+ for (i = 0; i < e->num_chans; i++) {
+ struct mhuv3_mbox_chan_priv *priv;
+ int k;
+
+ for (k = 0; k < MHUV3_FLAG_BITS; k++) {
+ priv = devm_kmalloc(mbox->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->ch_idx = i;
+ priv->ops = &mhuv3_doorbell_ops;
+ priv->doorbell = k;
+ chans++->con_priv = priv;
+ mbox->num_chans++;
+ }
+ }
+
+ spin_lock_init(&e->pending_lock);
+
+ return 0;
+}
+
+static bool mhuv3_dbe_doorbell_lookup(struct mhuv3 *mhu, unsigned int channel,
+ unsigned int *db)
+{
+ struct mhuv3_extension *e = mhu->ext[DBE_EXT];
+ struct device *dev = mhu->mbox.dev;
+ u32 st;
+
+ if (mhu->frame == PBX_FRAME) {
+ u32 active_dbs, fired_dbs;
+
+ st = readl_relaxed_bitmask(&mhu->pbx->dbcw[channel].int_st,
+ tfr_ack);
+ if (!st)
+ goto err_spurious;
+
+ active_dbs = readl_relaxed(&mhu->pbx->dbcw[channel].st);
+ scoped_guard(spinlock_irqsave, &e->pending_lock) {
+ fired_dbs = e->pending_db[channel] & ~active_dbs;
+ if (!fired_dbs)
+ goto err_spurious;
+
+ *db = __ffs(fired_dbs);
+ e->pending_db[channel] &= ~BIT(*db);
+ }
+ fired_dbs &= ~BIT(*db);
+ /* Clear TFR Ack if no more doorbells pending */
+ if (!fired_dbs)
+ writel_relaxed_bitmask(0x1,
+ &mhu->pbx->dbcw[channel].int_clr,
+ tfr_ack);
+ } else {
+ st = readl_relaxed(&mhu->mbx->dbcw[channel].st_msk);
+ if (!st)
+ goto err_spurious;
+
+ *db = __ffs(st);
+ }
+
+ return true;
+
+err_spurious:
+ dev_warn(dev, "Spurious IRQ on %s channel:%d\n",
+ mhuv3_str[mhu->frame], channel);
+
+ return false;
+}
+
+static struct mbox_chan *mhuv3_dbe_chan_from_comb_irq_get(struct mhuv3 *mhu)
+{
+ struct mhuv3_extension *e = mhu->ext[DBE_EXT];
+ struct device *dev = mhu->mbox.dev;
+ int i;
+
+ for (i = 0; i < MHUV3_DBCH_CMB_INT_ST_REG_CNT; i++) {
+ unsigned int channel, db;
+ u32 cmb_st;
+
+ cmb_st = readl_relaxed(&mhu->ctrl->dbch_int_st[i]);
+ if (!cmb_st)
+ continue;
+
+ channel = i * MHUV3_FLAG_BITS + __ffs(cmb_st);
+ if (channel >= e->num_chans) {
+ dev_err(dev, "Invalid %s channel:%d\n",
+ mhuv3_str[mhu->frame], channel);
+ return ERR_PTR(-EIO);
+ }
+
+ if (!mhuv3_dbe_doorbell_lookup(mhu, channel, &db))
+ continue;
+
+ dev_dbg(dev, "Found %s ch[%d]/db[%d]\n",
+ mhuv3_str[mhu->frame], channel, db);
+
+ return &mhu->mbox.chans[channel * MHUV3_FLAG_BITS + db];
+ }
+
+ return ERR_PTR(-EIO);
+}
+
+static int mhuv3_dbe_init(struct mhuv3 *mhu)
+{
+ struct device *dev = mhu->mbox.dev;
+ struct mhuv3_extension *e;
+
+ if (!readl_relaxed_bitmask(&mhu->ctrl->feat_spt0, dbe_spt))
+ return 0;
+
+ dev_dbg(dev, "%s: Initializing DBE Extension.\n", mhuv3_str[mhu->frame]);
+
+ e = devm_kzalloc(dev, sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->type = DBE_EXT;
+ /* Note that, by the spec, the number of channels is (num_dbch + 1) */
+ e->num_chans =
+ readl_relaxed_bitmask(&mhu->ctrl->dbch_cfg0, num_dbch) + 1;
+ e->mbox_of_xlate = mhuv3_dbe_mbox_of_xlate;
+ e->combined_irq_setup = mhuv3_dbe_combined_irq_setup;
+ e->channels_init = mhuv3_dbe_channels_init;
+ e->chan_from_comb_irq_get = mhuv3_dbe_chan_from_comb_irq_get;
+
+ mhu->num_chans += e->num_chans * MHUV3_FLAG_BITS;
+ mhu->ext[DBE_EXT] = e;
+
+ dev_dbg(dev, "%s: found %d DBE channels.\n",
+ mhuv3_str[mhu->frame], e->num_chans);
+
+ return 0;
+}
+
+static int mhuv3_fce_init(struct mhuv3 *mhu)
+{
+ struct device *dev = mhu->mbox.dev;
+
+ if (!readl_relaxed_bitmask(&mhu->ctrl->feat_spt0, fce_spt))
+ return 0;
+
+ dev_dbg(dev, "%s: FCE Extension not supported by driver.\n",
+ mhuv3_str[mhu->frame]);
+
+ return 0;
+}
+
+static int mhuv3_fe_init(struct mhuv3 *mhu)
+{
+ struct device *dev = mhu->mbox.dev;
+
+ if (!readl_relaxed_bitmask(&mhu->ctrl->feat_spt0, fe_spt))
+ return 0;
+
+ dev_dbg(dev, "%s: FE Extension not supported by driver.\n",
+ mhuv3_str[mhu->frame]);
+
+ return 0;
+}
+
+static mhuv3_extension_initializer mhuv3_extension_init[NUM_EXT] = {
+ mhuv3_dbe_init,
+ mhuv3_fce_init,
+ mhuv3_fe_init,
+};
+
+static int mhuv3_initialize_channels(struct device *dev, struct mhuv3 *mhu)
+{
+ struct mbox_controller *mbox = &mhu->mbox;
+ int i, ret = 0;
+
+ mbox->chans = devm_kcalloc(dev, mhu->num_chans,
+ sizeof(*mbox->chans), GFP_KERNEL);
+ if (!mbox->chans)
+ return dev_err_probe(dev, -ENOMEM,
+ "Failed to initialize channels\n");
+
+ for (i = 0; i < NUM_EXT && !ret; i++)
+ if (mhu->ext[i])
+ ret = mhu->ext[i]->channels_init(mhu);
+
+ return ret;
+}
+
+static struct mbox_chan *mhuv3_mbox_of_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *pa)
+{
+ struct mhuv3 *mhu = mhu_from_mbox(mbox);
+ unsigned int type, channel, param;
+
+ if (pa->args_count != MHUV3_MBOX_CELLS)
+ return ERR_PTR(-EINVAL);
+
+ type = pa->args[MHUV3_MBOX_CELL_TYPE];
+ if (type >= NUM_EXT)
+ return ERR_PTR(-EINVAL);
+
+ channel = pa->args[MHUV3_MBOX_CELL_CHWN];
+ param = pa->args[MHUV3_MBOX_CELL_PARAM];
+
+ return mhu->ext[type]->mbox_of_xlate(mhu, channel, param);
+}
+
+static void mhu_frame_cleanup_actions(void *data)
+{
+ struct mhuv3 *mhu = data;
+
+ writel_relaxed_bitmask(0x0, &mhu->ctrl->x_ctrl, op_req);
+}
+
+static int mhuv3_frame_init(struct mhuv3 *mhu, void __iomem *regs)
+{
+ struct device *dev = mhu->mbox.dev;
+ int i;
+
+ mhu->ctrl = regs;
+ mhu->frame = readl_relaxed_bitmask(&mhu->ctrl->blk_id, id);
+ if (mhu->frame > MBX_FRAME)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid Frame type- %d\n", mhu->frame);
+
+ mhu->major = readl_relaxed_bitmask(&mhu->ctrl->aidr, arch_major_rev);
+ mhu->minor = readl_relaxed_bitmask(&mhu->ctrl->aidr, arch_minor_rev);
+ mhu->implem = readl_relaxed_bitmask(&mhu->ctrl->iidr, implementer);
+ mhu->rev = readl_relaxed_bitmask(&mhu->ctrl->iidr, revision);
+ mhu->var = readl_relaxed_bitmask(&mhu->ctrl->iidr, variant);
+ mhu->prod_id = readl_relaxed_bitmask(&mhu->ctrl->iidr, product_id);
+ if (mhu->major != MHUV3_MAJOR_VERSION)
+ return dev_err_probe(dev, -EINVAL,
+ "Unsupported MHU %s block - major:%d minor:%d\n",
+ mhuv3_str[mhu->frame], mhu->major,
+ mhu->minor);
+
+ mhu->auto_op_full =
+ !!readl_relaxed_bitmask(&mhu->ctrl->feat_spt1, auto_op_spt);
+ /* Request the PBX/MBX to remain operational */
+ if (mhu->auto_op_full) {
+ writel_relaxed_bitmask(0x1, &mhu->ctrl->x_ctrl, op_req);
+ devm_add_action_or_reset(dev, mhu_frame_cleanup_actions, mhu);
+ }
+
+ dev_dbg(dev,
+ "Found MHU %s block - major:%d minor:%d\n implem:0x%X rev:0x%X var:0x%X prod_id:0x%X",
+ mhuv3_str[mhu->frame], mhu->major, mhu->minor,
+ mhu->implem, mhu->rev, mhu->var, mhu->prod_id);
+
+ if (mhu->frame == PBX_FRAME)
+ mhu->pbx = regs;
+ else
+ mhu->mbx = regs;
+
+ for (i = 0; i < NUM_EXT; i++) {
+ int ret;
+
+ /*
+ * Note that extensions initialization fails only when such
+ * extension initialization routine fails and the extensions
+ * was found to be supported in hardware and in software.
+ */
+ ret = mhuv3_extension_init[i](mhu);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to initialize %s %s\n",
+ mhuv3_str[mhu->frame],
+ mhuv3_ext_str[i]);
+ }
+
+ return 0;
+}
+
+static irqreturn_t mhuv3_pbx_comb_interrupt(int irq, void *arg)
+{
+ unsigned int i, found = 0;
+ struct mhuv3 *mhu = arg;
+ struct mbox_chan *chan;
+ struct device *dev;
+ int ret = IRQ_NONE;
+
+ dev = mhu->mbox.dev;
+ for (i = 0; i < NUM_EXT; i++) {
+ struct mhuv3_mbox_chan_priv *priv;
+
+ /* FCE does not participate to the PBX combined */
+ if (i == FCE_EXT || !mhu->ext[i])
+ continue;
+
+ chan = mhu->ext[i]->chan_from_comb_irq_get(mhu);
+ if (IS_ERR(chan))
+ continue;
+
+ found++;
+ priv = chan->con_priv;
+ if (!chan->cl) {
+ dev_warn(dev, "TX Ack on UNBOUND channel (%u)\n",
+ priv->ch_idx);
+ continue;
+ }
+
+ mbox_chan_txdone(chan, 0);
+ ret = IRQ_HANDLED;
+ }
+
+ if (found == 0)
+ dev_warn_once(dev, "Failed to find channel for the TX interrupt\n");
+
+ return ret;
+}
+
+static irqreturn_t mhuv3_mbx_comb_interrupt(int irq, void *arg)
+{
+ unsigned int i, found = 0;
+ struct mhuv3 *mhu = arg;
+ struct mbox_chan *chan;
+ struct device *dev;
+ int ret = IRQ_NONE;
+
+ dev = mhu->mbox.dev;
+ for (i = 0; i < NUM_EXT; i++) {
+ struct mhuv3_mbox_chan_priv *priv;
+ void *data __free(kfree) = NULL;
+
+ if (!mhu->ext[i])
+ continue;
+
+ /* Process any extension which could be source of the IRQ */
+ chan = mhu->ext[i]->chan_from_comb_irq_get(mhu);
+ if (IS_ERR(chan))
+ continue;
+
+ found++;
+ /* From here on we need to call rx_complete even on error */
+ priv = chan->con_priv;
+ if (!chan->cl) {
+ dev_warn(dev, "RX Data on UNBOUND channel (%u)\n",
+ priv->ch_idx);
+ goto rx_ack;
+ }
+
+ /* Read optional in-band LE data first. */
+ if (priv->ops->read_data) {
+ data = priv->ops->read_data(mhu, chan);
+ if (IS_ERR(data)) {
+ dev_err(dev,
+ "Failed to read in-band data. err:%ld\n",
+ PTR_ERR(no_free_ptr(data)));
+ goto rx_ack;
+ }
+ }
+
+ mbox_chan_received_data(chan, data);
+ ret = IRQ_HANDLED;
+
+ /*
+ * Acknowledge transfer after any possible optional
+ * out-of-band data has also been retrieved via
+ * mbox_chan_received_data().
+ */
+rx_ack:
+ if (priv->ops->rx_complete)
+ priv->ops->rx_complete(mhu, chan);
+ }
+
+ if (found == 0)
+ dev_warn_once(dev, "Failed to find channel for the RX interrupt\n");
+
+ return ret;
+}
+
+static int mhuv3_setup_pbx(struct mhuv3 *mhu)
+{
+ struct device *dev = mhu->mbox.dev;
+
+ mhu->mbox.ops = &mhuv3_sender_ops;
+
+ if (mhu->cmb_irq > 0) {
+ int ret, i;
+
+ ret = devm_request_threaded_irq(dev, mhu->cmb_irq, NULL,
+ mhuv3_pbx_comb_interrupt,
+ IRQF_ONESHOT, "mhuv3-pbx", mhu);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to request PBX IRQ\n");
+
+ mhu->mbox.txdone_irq = true;
+ mhu->mbox.txdone_poll = false;
+
+ for (i = 0; i < NUM_EXT; i++)
+ if (mhu->ext[i])
+ mhu->ext[i]->combined_irq_setup(mhu);
+
+ dev_dbg(dev, "MHUv3 PBX IRQs initialized.\n");
+
+ return 0;
+ }
+
+ dev_info(dev, "Using PBX in Tx polling mode.\n");
+ mhu->mbox.txdone_irq = false;
+ mhu->mbox.txdone_poll = true;
+ mhu->mbox.txpoll_period = 1;
+
+ return 0;
+}
+
+static int mhuv3_setup_mbx(struct mhuv3 *mhu)
+{
+ struct device *dev = mhu->mbox.dev;
+ int ret, i;
+
+ mhu->mbox.ops = &mhuv3_receiver_ops;
+
+ if (mhu->cmb_irq <= 0)
+ return dev_err_probe(dev, -EINVAL,
+ "MBX combined IRQ is missing !\n");
+
+ ret = devm_request_threaded_irq(dev, mhu->cmb_irq, NULL,
+ mhuv3_mbx_comb_interrupt, IRQF_ONESHOT,
+ "mhuv3-mbx", mhu);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request MBX IRQ\n");
+
+ for (i = 0; i < NUM_EXT; i++)
+ if (mhu->ext[i])
+ mhu->ext[i]->combined_irq_setup(mhu);
+
+ dev_dbg(dev, "MHUv3 MBX IRQs initialized.\n");
+
+ return ret;
+}
+
+static int mhuv3_irqs_init(struct mhuv3 *mhu, struct platform_device *pdev)
+{
+ dev_dbg(mhu->mbox.dev, "Initializing %s block.\n",
+ mhuv3_str[mhu->frame]);
+
+ if (mhu->frame == PBX_FRAME) {
+ mhu->cmb_irq =
+ platform_get_irq_byname_optional(pdev, "combined");
+ return mhuv3_setup_pbx(mhu);
+ }
+
+ mhu->cmb_irq = platform_get_irq_byname(pdev, "combined");
+ return mhuv3_setup_mbx(mhu);
+}
+
+static int mhuv3_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *regs;
+ struct mhuv3 *mhu;
+ int ret;
+
+ mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
+ if (!mhu)
+ return -ENOMEM;
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ mhu->mbox.dev = dev;
+ ret = mhuv3_frame_init(mhu, regs);
+ if (ret)
+ return ret;
+
+ ret = mhuv3_irqs_init(mhu, pdev);
+ if (ret)
+ return ret;
+
+ mhu->mbox.of_xlate = mhuv3_mbox_of_xlate;
+ ret = mhuv3_initialize_channels(dev, mhu);
+ if (ret)
+ return ret;
+
+ ret = devm_mbox_controller_register(dev, &mhu->mbox);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register ARM MHUv3 driver\n");
+
+ return ret;
+}
+
+static const struct of_device_id mhuv3_of_match[] = {
+ { .compatible = "arm,mhuv3", .data = NULL },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mhuv3_of_match);
+
+static struct platform_driver mhuv3_driver = {
+ .driver = {
+ .name = "arm-mhuv3-mailbox",
+ .of_match_table = mhuv3_of_match,
+ },
+ .probe = mhuv3_probe,
+};
+module_platform_driver(mhuv3_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ARM MHUv3 Driver");
+MODULE_AUTHOR("Cristian Marussi <cristian.marussi@arm.com>");
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
index 1768d3d5aaa0..242e7504a628 100644
--- a/drivers/mailbox/bcm-pdc-mailbox.c
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -43,6 +43,7 @@
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
+#include <linux/workqueue.h>
#define PDC_SUCCESS 0
@@ -293,8 +294,8 @@ struct pdc_state {
unsigned int pdc_irq;
- /* tasklet for deferred processing after DMA rx interrupt */
- struct tasklet_struct rx_tasklet;
+ /* work for deferred processing after DMA rx interrupt */
+ struct work_struct rx_work;
/* Number of bytes of receive status prior to each rx frame */
u32 rx_status_len;
@@ -952,18 +953,18 @@ static irqreturn_t pdc_irq_handler(int irq, void *data)
iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
/* Wakeup IRQ thread */
- tasklet_schedule(&pdcs->rx_tasklet);
+ queue_work(system_bh_wq, &pdcs->rx_work);
return IRQ_HANDLED;
}
/**
- * pdc_tasklet_cb() - Tasklet callback that runs the deferred processing after
+ * pdc_work_cb() - Work callback that runs the deferred processing after
* a DMA receive interrupt. Reenables the receive interrupt.
* @t: Pointer to the Altera sSGDMA channel structure
*/
-static void pdc_tasklet_cb(struct tasklet_struct *t)
+static void pdc_work_cb(struct work_struct *t)
{
- struct pdc_state *pdcs = from_tasklet(pdcs, t, rx_tasklet);
+ struct pdc_state *pdcs = from_work(pdcs, t, rx_work);
pdc_receive(pdcs);
@@ -1577,8 +1578,8 @@ static int pdc_probe(struct platform_device *pdev)
pdc_hw_init(pdcs);
- /* Init tasklet for deferred DMA rx processing */
- tasklet_setup(&pdcs->rx_tasklet, pdc_tasklet_cb);
+ /* Init work for deferred DMA rx processing */
+ INIT_WORK(&pdcs->rx_work, pdc_work_cb);
err = pdc_interrupts_init(pdcs);
if (err)
@@ -1595,7 +1596,7 @@ static int pdc_probe(struct platform_device *pdev)
return PDC_SUCCESS;
cleanup_buf_pool:
- tasklet_kill(&pdcs->rx_tasklet);
+ cancel_work_sync(&pdcs->rx_work);
dma_pool_destroy(pdcs->rx_buf_pool);
cleanup_ring_pool:
@@ -1611,7 +1612,7 @@ static void pdc_remove(struct platform_device *pdev)
pdc_free_debugfs();
- tasklet_kill(&pdcs->rx_tasklet);
+ cancel_work_sync(&pdcs->rx_work);
pdc_hw_disable(pdcs);
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 5c1d09cad761..933727f89431 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -21,6 +21,7 @@
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
#include <linux/slab.h>
+#include <linux/workqueue.h>
#include "mailbox.h"
@@ -80,7 +81,7 @@ struct imx_mu_con_priv {
char irq_desc[IMX_MU_CHAN_NAME_SIZE];
enum imx_mu_chan_type type;
struct mbox_chan *chan;
- struct tasklet_struct txdb_tasklet;
+ struct work_struct txdb_work;
};
struct imx_mu_priv {
@@ -232,7 +233,7 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv,
break;
case IMX_MU_TYPE_TXDB:
imx_mu_xcr_rmw(priv, IMX_MU_GCR, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0);
- tasklet_schedule(&cp->txdb_tasklet);
+ queue_work(system_bh_wq, &cp->txdb_work);
break;
case IMX_MU_TYPE_TXDB_V2:
imx_mu_xcr_rmw(priv, IMX_MU_GCR, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0);
@@ -420,7 +421,7 @@ static int imx_mu_seco_tx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp,
}
/* Simulate hack for mbox framework */
- tasklet_schedule(&cp->txdb_tasklet);
+ queue_work(system_bh_wq, &cp->txdb_work);
break;
default:
@@ -484,9 +485,9 @@ exit:
return err;
}
-static void imx_mu_txdb_tasklet(unsigned long data)
+static void imx_mu_txdb_work(struct work_struct *t)
{
- struct imx_mu_con_priv *cp = (struct imx_mu_con_priv *)data;
+ struct imx_mu_con_priv *cp = from_work(cp, t, txdb_work);
mbox_chan_txdone(cp->chan, 0);
}
@@ -570,8 +571,7 @@ static int imx_mu_startup(struct mbox_chan *chan)
if (cp->type == IMX_MU_TYPE_TXDB) {
/* Tx doorbell don't have ACK support */
- tasklet_init(&cp->txdb_tasklet, imx_mu_txdb_tasklet,
- (unsigned long)cp);
+ INIT_WORK(&cp->txdb_work, imx_mu_txdb_work);
return 0;
}
@@ -615,7 +615,7 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
}
if (cp->type == IMX_MU_TYPE_TXDB) {
- tasklet_kill(&cp->txdb_tasklet);
+ cancel_work_sync(&cp->txdb_work);
pm_runtime_put_sync(priv->dev);
return;
}
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index ead2200f39ba..4aa394e91109 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -465,7 +465,7 @@ static void cmdq_mbox_shutdown(struct mbox_chan *chan)
struct cmdq_task *task, *tmp;
unsigned long flags;
- WARN_ON(pm_runtime_get_sync(cmdq->mbox.dev));
+ WARN_ON(pm_runtime_get_sync(cmdq->mbox.dev) < 0);
spin_lock_irqsave(&thread->chan->lock, flags);
if (list_empty(&thread->task_busy_list))
@@ -765,6 +765,7 @@ static const struct of_device_id cmdq_of_ids[] = {
{.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_mt8195},
{}
};
+MODULE_DEVICE_TABLE(of, cmdq_of_ids);
static struct platform_driver cmdq_drv = {
.probe = cmdq_probe,
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index c961706fe61d..46747559b438 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -19,7 +19,6 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/omap-mailbox.h>
#include <linux/mailbox_controller.h>
#include <linux/mailbox_client.h>
@@ -51,6 +50,11 @@
#define MBOX_INTR_CFG_TYPE1 0
#define MBOX_INTR_CFG_TYPE2 1
+typedef enum {
+ IRQ_TX = 1,
+ IRQ_RX = 2,
+} omap_mbox_irq_t;
+
struct omap_mbox_fifo {
unsigned long msg;
unsigned long fifo_stat;
@@ -61,14 +65,6 @@ struct omap_mbox_fifo {
u32 intr_bit;
};
-struct omap_mbox_queue {
- spinlock_t lock;
- struct kfifo fifo;
- struct work_struct work;
- struct omap_mbox *mbox;
- bool full;
-};
-
struct omap_mbox_match_data {
u32 intr_type;
};
@@ -81,29 +77,11 @@ struct omap_mbox_device {
u32 num_users;
u32 num_fifos;
u32 intr_type;
- struct omap_mbox **mboxes;
- struct mbox_controller controller;
- struct list_head elem;
-};
-
-struct omap_mbox_fifo_info {
- int tx_id;
- int tx_usr;
- int tx_irq;
-
- int rx_id;
- int rx_usr;
- int rx_irq;
-
- const char *name;
- bool send_no_irq;
};
struct omap_mbox {
const char *name;
int irq;
- struct omap_mbox_queue *rxq;
- struct device *dev;
struct omap_mbox_device *parent;
struct omap_mbox_fifo tx_fifo;
struct omap_mbox_fifo rx_fifo;
@@ -112,22 +90,6 @@ struct omap_mbox {
bool send_no_irq;
};
-/* global variables for the mailbox devices */
-static DEFINE_MUTEX(omap_mbox_devices_lock);
-static LIST_HEAD(omap_mbox_devices);
-
-static unsigned int mbox_kfifo_size = CONFIG_OMAP_MBOX_KFIFO_SIZE;
-module_param(mbox_kfifo_size, uint, S_IRUGO);
-MODULE_PARM_DESC(mbox_kfifo_size, "Size of omap's mailbox kfifo (bytes)");
-
-static struct omap_mbox *mbox_chan_to_omap_mbox(struct mbox_chan *chan)
-{
- if (!chan || !chan->con_priv)
- return NULL;
-
- return (struct omap_mbox *)chan->con_priv;
-}
-
static inline
unsigned int mbox_read_reg(struct omap_mbox_device *mdev, size_t ofs)
{
@@ -197,7 +159,7 @@ static int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
return (int)(enable & status & bit);
}
-static void _omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
+static void omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
{
u32 l;
struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
@@ -210,7 +172,7 @@ static void _omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
mbox_write_reg(mbox->parent, l, irqenable);
}
-static void _omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
+static void omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
{
struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
&mbox->tx_fifo : &mbox->rx_fifo;
@@ -227,87 +189,27 @@ static void _omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
mbox_write_reg(mbox->parent, bit, irqdisable);
}
-void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq)
-{
- struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
-
- if (WARN_ON(!mbox))
- return;
-
- _omap_mbox_enable_irq(mbox, irq);
-}
-EXPORT_SYMBOL(omap_mbox_enable_irq);
-
-void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq)
-{
- struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
-
- if (WARN_ON(!mbox))
- return;
-
- _omap_mbox_disable_irq(mbox, irq);
-}
-EXPORT_SYMBOL(omap_mbox_disable_irq);
-
-/*
- * Message receiver(workqueue)
- */
-static void mbox_rx_work(struct work_struct *work)
-{
- struct omap_mbox_queue *mq =
- container_of(work, struct omap_mbox_queue, work);
- mbox_msg_t data;
- u32 msg;
- int len;
-
- while (kfifo_len(&mq->fifo) >= sizeof(msg)) {
- len = kfifo_out(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
- WARN_ON(len != sizeof(msg));
- data = msg;
-
- mbox_chan_received_data(mq->mbox->chan, (void *)data);
- spin_lock_irq(&mq->lock);
- if (mq->full) {
- mq->full = false;
- _omap_mbox_enable_irq(mq->mbox, IRQ_RX);
- }
- spin_unlock_irq(&mq->lock);
- }
-}
-
/*
* Mailbox interrupt handler
*/
static void __mbox_tx_interrupt(struct omap_mbox *mbox)
{
- _omap_mbox_disable_irq(mbox, IRQ_TX);
+ omap_mbox_disable_irq(mbox, IRQ_TX);
ack_mbox_irq(mbox, IRQ_TX);
mbox_chan_txdone(mbox->chan, 0);
}
static void __mbox_rx_interrupt(struct omap_mbox *mbox)
{
- struct omap_mbox_queue *mq = mbox->rxq;
u32 msg;
- int len;
while (!mbox_fifo_empty(mbox)) {
- if (unlikely(kfifo_avail(&mq->fifo) < sizeof(msg))) {
- _omap_mbox_disable_irq(mbox, IRQ_RX);
- mq->full = true;
- goto nomem;
- }
-
msg = mbox_fifo_read(mbox);
-
- len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
- WARN_ON(len != sizeof(msg));
+ mbox_chan_received_data(mbox->chan, (void *)(uintptr_t)msg);
}
- /* no more messages in the fifo. clear IRQ source. */
+ /* clear IRQ source. */
ack_mbox_irq(mbox, IRQ_RX);
-nomem:
- schedule_work(&mbox->rxq->work);
}
static irqreturn_t mbox_interrupt(int irq, void *p)
@@ -323,188 +225,34 @@ static irqreturn_t mbox_interrupt(int irq, void *p)
return IRQ_HANDLED;
}
-static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox,
- void (*work)(struct work_struct *))
-{
- struct omap_mbox_queue *mq;
-
- if (!work)
- return NULL;
-
- mq = kzalloc(sizeof(*mq), GFP_KERNEL);
- if (!mq)
- return NULL;
-
- spin_lock_init(&mq->lock);
-
- if (kfifo_alloc(&mq->fifo, mbox_kfifo_size, GFP_KERNEL))
- goto error;
-
- INIT_WORK(&mq->work, work);
- return mq;
-
-error:
- kfree(mq);
- return NULL;
-}
-
-static void mbox_queue_free(struct omap_mbox_queue *q)
-{
- kfifo_free(&q->fifo);
- kfree(q);
-}
-
static int omap_mbox_startup(struct omap_mbox *mbox)
{
int ret = 0;
- struct omap_mbox_queue *mq;
- mq = mbox_queue_alloc(mbox, mbox_rx_work);
- if (!mq)
- return -ENOMEM;
- mbox->rxq = mq;
- mq->mbox = mbox;
-
- ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED,
- mbox->name, mbox);
+ ret = request_threaded_irq(mbox->irq, NULL, mbox_interrupt,
+ IRQF_ONESHOT, mbox->name, mbox);
if (unlikely(ret)) {
pr_err("failed to register mailbox interrupt:%d\n", ret);
- goto fail_request_irq;
+ return ret;
}
if (mbox->send_no_irq)
mbox->chan->txdone_method = TXDONE_BY_ACK;
- _omap_mbox_enable_irq(mbox, IRQ_RX);
+ omap_mbox_enable_irq(mbox, IRQ_RX);
return 0;
-
-fail_request_irq:
- mbox_queue_free(mbox->rxq);
- return ret;
}
static void omap_mbox_fini(struct omap_mbox *mbox)
{
- _omap_mbox_disable_irq(mbox, IRQ_RX);
+ omap_mbox_disable_irq(mbox, IRQ_RX);
free_irq(mbox->irq, mbox);
- flush_work(&mbox->rxq->work);
- mbox_queue_free(mbox->rxq);
-}
-
-static struct omap_mbox *omap_mbox_device_find(struct omap_mbox_device *mdev,
- const char *mbox_name)
-{
- struct omap_mbox *_mbox, *mbox = NULL;
- struct omap_mbox **mboxes = mdev->mboxes;
- int i;
-
- if (!mboxes)
- return NULL;
-
- for (i = 0; (_mbox = mboxes[i]); i++) {
- if (!strcmp(_mbox->name, mbox_name)) {
- mbox = _mbox;
- break;
- }
- }
- return mbox;
-}
-
-struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl,
- const char *chan_name)
-{
- struct device *dev = cl->dev;
- struct omap_mbox *mbox = NULL;
- struct omap_mbox_device *mdev;
- int ret;
-
- if (!dev)
- return ERR_PTR(-ENODEV);
-
- if (dev->of_node) {
- pr_err("%s: please use mbox_request_channel(), this API is supported only for OMAP non-DT usage\n",
- __func__);
- return ERR_PTR(-ENODEV);
- }
-
- mutex_lock(&omap_mbox_devices_lock);
- list_for_each_entry(mdev, &omap_mbox_devices, elem) {
- mbox = omap_mbox_device_find(mdev, chan_name);
- if (mbox)
- break;
- }
- mutex_unlock(&omap_mbox_devices_lock);
-
- if (!mbox || !mbox->chan)
- return ERR_PTR(-ENOENT);
-
- ret = mbox_bind_client(mbox->chan, cl);
- if (ret)
- return ERR_PTR(ret);
-
- return mbox->chan;
-}
-EXPORT_SYMBOL(omap_mbox_request_channel);
-
-static struct class omap_mbox_class = { .name = "mbox", };
-
-static int omap_mbox_register(struct omap_mbox_device *mdev)
-{
- int ret;
- int i;
- struct omap_mbox **mboxes;
-
- if (!mdev || !mdev->mboxes)
- return -EINVAL;
-
- mboxes = mdev->mboxes;
- for (i = 0; mboxes[i]; i++) {
- struct omap_mbox *mbox = mboxes[i];
-
- mbox->dev = device_create(&omap_mbox_class, mdev->dev,
- 0, mbox, "%s", mbox->name);
- if (IS_ERR(mbox->dev)) {
- ret = PTR_ERR(mbox->dev);
- goto err_out;
- }
- }
-
- mutex_lock(&omap_mbox_devices_lock);
- list_add(&mdev->elem, &omap_mbox_devices);
- mutex_unlock(&omap_mbox_devices_lock);
-
- ret = devm_mbox_controller_register(mdev->dev, &mdev->controller);
-
-err_out:
- if (ret) {
- while (i--)
- device_unregister(mboxes[i]->dev);
- }
- return ret;
-}
-
-static int omap_mbox_unregister(struct omap_mbox_device *mdev)
-{
- int i;
- struct omap_mbox **mboxes;
-
- if (!mdev || !mdev->mboxes)
- return -EINVAL;
-
- mutex_lock(&omap_mbox_devices_lock);
- list_del(&mdev->elem);
- mutex_unlock(&omap_mbox_devices_lock);
-
- mboxes = mdev->mboxes;
- for (i = 0; mboxes[i]; i++)
- device_unregister(mboxes[i]->dev);
- return 0;
}
static int omap_mbox_chan_startup(struct mbox_chan *chan)
{
- struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+ struct omap_mbox *mbox = chan->con_priv;
struct omap_mbox_device *mdev = mbox->parent;
int ret = 0;
@@ -519,7 +267,7 @@ static int omap_mbox_chan_startup(struct mbox_chan *chan)
static void omap_mbox_chan_shutdown(struct mbox_chan *chan)
{
- struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+ struct omap_mbox *mbox = chan->con_priv;
struct omap_mbox_device *mdev = mbox->parent;
mutex_lock(&mdev->cfg_lock);
@@ -530,41 +278,40 @@ static void omap_mbox_chan_shutdown(struct mbox_chan *chan)
static int omap_mbox_chan_send_noirq(struct omap_mbox *mbox, u32 msg)
{
- int ret = -EBUSY;
+ if (mbox_fifo_full(mbox))
+ return -EBUSY;
- if (!mbox_fifo_full(mbox)) {
- _omap_mbox_enable_irq(mbox, IRQ_RX);
- mbox_fifo_write(mbox, msg);
- ret = 0;
- _omap_mbox_disable_irq(mbox, IRQ_RX);
+ omap_mbox_enable_irq(mbox, IRQ_RX);
+ mbox_fifo_write(mbox, msg);
+ omap_mbox_disable_irq(mbox, IRQ_RX);
- /* we must read and ack the interrupt directly from here */
- mbox_fifo_read(mbox);
- ack_mbox_irq(mbox, IRQ_RX);
- }
+ /* we must read and ack the interrupt directly from here */
+ mbox_fifo_read(mbox);
+ ack_mbox_irq(mbox, IRQ_RX);
- return ret;
+ return 0;
}
static int omap_mbox_chan_send(struct omap_mbox *mbox, u32 msg)
{
- int ret = -EBUSY;
-
- if (!mbox_fifo_full(mbox)) {
- mbox_fifo_write(mbox, msg);
- ret = 0;
+ if (mbox_fifo_full(mbox)) {
+ /* always enable the interrupt */
+ omap_mbox_enable_irq(mbox, IRQ_TX);
+ return -EBUSY;
}
+ mbox_fifo_write(mbox, msg);
+
/* always enable the interrupt */
- _omap_mbox_enable_irq(mbox, IRQ_TX);
- return ret;
+ omap_mbox_enable_irq(mbox, IRQ_TX);
+ return 0;
}
static int omap_mbox_chan_send_data(struct mbox_chan *chan, void *data)
{
- struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+ struct omap_mbox *mbox = chan->con_priv;
int ret;
- u32 msg = omap_mbox_message(data);
+ u32 msg = (u32)(uintptr_t)(data);
if (!mbox)
return -EINVAL;
@@ -666,8 +413,9 @@ static struct mbox_chan *omap_mbox_of_xlate(struct mbox_controller *controller,
struct device_node *node;
struct omap_mbox_device *mdev;
struct omap_mbox *mbox;
+ int i;
- mdev = container_of(controller, struct omap_mbox_device, controller);
+ mdev = dev_get_drvdata(controller->dev);
if (WARN_ON(!mdev))
return ERR_PTR(-EINVAL);
@@ -678,22 +426,29 @@ static struct mbox_chan *omap_mbox_of_xlate(struct mbox_controller *controller,
return ERR_PTR(-ENODEV);
}
- mbox = omap_mbox_device_find(mdev, node->name);
+ for (i = 0; i < controller->num_chans; i++) {
+ mbox = controller->chans[i].con_priv;
+ if (!strcmp(mbox->name, node->name)) {
+ of_node_put(node);
+ return &controller->chans[i];
+ }
+ }
+
of_node_put(node);
- return mbox ? mbox->chan : ERR_PTR(-ENOENT);
+ return ERR_PTR(-ENOENT);
}
static int omap_mbox_probe(struct platform_device *pdev)
{
int ret;
struct mbox_chan *chnls;
- struct omap_mbox **list, *mbox, *mboxblk;
- struct omap_mbox_fifo_info *finfo, *finfoblk;
+ struct omap_mbox *mbox;
struct omap_mbox_device *mdev;
struct omap_mbox_fifo *fifo;
struct device_node *node = pdev->dev.of_node;
struct device_node *child;
const struct omap_mbox_match_data *match_data;
+ struct mbox_controller *controller;
u32 intr_type, info_count;
u32 num_users, num_fifos;
u32 tmp[3];
@@ -722,40 +477,6 @@ static int omap_mbox_probe(struct platform_device *pdev)
return -ENODEV;
}
- finfoblk = devm_kcalloc(&pdev->dev, info_count, sizeof(*finfoblk),
- GFP_KERNEL);
- if (!finfoblk)
- return -ENOMEM;
-
- finfo = finfoblk;
- child = NULL;
- for (i = 0; i < info_count; i++, finfo++) {
- child = of_get_next_available_child(node, child);
- ret = of_property_read_u32_array(child, "ti,mbox-tx", tmp,
- ARRAY_SIZE(tmp));
- if (ret)
- return ret;
- finfo->tx_id = tmp[0];
- finfo->tx_irq = tmp[1];
- finfo->tx_usr = tmp[2];
-
- ret = of_property_read_u32_array(child, "ti,mbox-rx", tmp,
- ARRAY_SIZE(tmp));
- if (ret)
- return ret;
- finfo->rx_id = tmp[0];
- finfo->rx_irq = tmp[1];
- finfo->rx_usr = tmp[2];
-
- finfo->name = child->name;
-
- finfo->send_no_irq = of_property_read_bool(child, "ti,mbox-send-noirq");
-
- if (finfo->tx_id >= num_fifos || finfo->rx_id >= num_fifos ||
- finfo->tx_usr >= num_users || finfo->rx_usr >= num_users)
- return -EINVAL;
- }
-
mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return -ENOMEM;
@@ -769,52 +490,67 @@ static int omap_mbox_probe(struct platform_device *pdev)
if (!mdev->irq_ctx)
return -ENOMEM;
- /* allocate one extra for marking end of list */
- list = devm_kcalloc(&pdev->dev, info_count + 1, sizeof(*list),
- GFP_KERNEL);
- if (!list)
- return -ENOMEM;
-
chnls = devm_kcalloc(&pdev->dev, info_count + 1, sizeof(*chnls),
GFP_KERNEL);
if (!chnls)
return -ENOMEM;
- mboxblk = devm_kcalloc(&pdev->dev, info_count, sizeof(*mbox),
- GFP_KERNEL);
- if (!mboxblk)
- return -ENOMEM;
+ child = NULL;
+ for (i = 0; i < info_count; i++) {
+ int tx_id, tx_irq, tx_usr;
+ int rx_id, rx_usr;
+
+ mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ child = of_get_next_available_child(node, child);
+ ret = of_property_read_u32_array(child, "ti,mbox-tx", tmp,
+ ARRAY_SIZE(tmp));
+ if (ret)
+ return ret;
+ tx_id = tmp[0];
+ tx_irq = tmp[1];
+ tx_usr = tmp[2];
+
+ ret = of_property_read_u32_array(child, "ti,mbox-rx", tmp,
+ ARRAY_SIZE(tmp));
+ if (ret)
+ return ret;
+ rx_id = tmp[0];
+ /* rx_irq = tmp[1]; */
+ rx_usr = tmp[2];
+
+ if (tx_id >= num_fifos || rx_id >= num_fifos ||
+ tx_usr >= num_users || rx_usr >= num_users)
+ return -EINVAL;
- mbox = mboxblk;
- finfo = finfoblk;
- for (i = 0; i < info_count; i++, finfo++) {
fifo = &mbox->tx_fifo;
- fifo->msg = MAILBOX_MESSAGE(finfo->tx_id);
- fifo->fifo_stat = MAILBOX_FIFOSTATUS(finfo->tx_id);
- fifo->intr_bit = MAILBOX_IRQ_NOTFULL(finfo->tx_id);
- fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->tx_usr);
- fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->tx_usr);
- fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->tx_usr);
+ fifo->msg = MAILBOX_MESSAGE(tx_id);
+ fifo->fifo_stat = MAILBOX_FIFOSTATUS(tx_id);
+ fifo->intr_bit = MAILBOX_IRQ_NOTFULL(tx_id);
+ fifo->irqenable = MAILBOX_IRQENABLE(intr_type, tx_usr);
+ fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, tx_usr);
+ fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, tx_usr);
fifo = &mbox->rx_fifo;
- fifo->msg = MAILBOX_MESSAGE(finfo->rx_id);
- fifo->msg_stat = MAILBOX_MSGSTATUS(finfo->rx_id);
- fifo->intr_bit = MAILBOX_IRQ_NEWMSG(finfo->rx_id);
- fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->rx_usr);
- fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->rx_usr);
- fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->rx_usr);
-
- mbox->send_no_irq = finfo->send_no_irq;
+ fifo->msg = MAILBOX_MESSAGE(rx_id);
+ fifo->msg_stat = MAILBOX_MSGSTATUS(rx_id);
+ fifo->intr_bit = MAILBOX_IRQ_NEWMSG(rx_id);
+ fifo->irqenable = MAILBOX_IRQENABLE(intr_type, rx_usr);
+ fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, rx_usr);
+ fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, rx_usr);
+
+ mbox->send_no_irq = of_property_read_bool(child, "ti,mbox-send-noirq");
mbox->intr_type = intr_type;
mbox->parent = mdev;
- mbox->name = finfo->name;
- mbox->irq = platform_get_irq(pdev, finfo->tx_irq);
+ mbox->name = child->name;
+ mbox->irq = platform_get_irq(pdev, tx_irq);
if (mbox->irq < 0)
return mbox->irq;
mbox->chan = &chnls[i];
chnls[i].con_priv = mbox;
- list[i] = mbox++;
}
mutex_init(&mdev->cfg_lock);
@@ -822,28 +558,30 @@ static int omap_mbox_probe(struct platform_device *pdev)
mdev->num_users = num_users;
mdev->num_fifos = num_fifos;
mdev->intr_type = intr_type;
- mdev->mboxes = list;
+ controller = devm_kzalloc(&pdev->dev, sizeof(*controller), GFP_KERNEL);
+ if (!controller)
+ return -ENOMEM;
/*
* OMAP/K3 Mailbox IP does not have a Tx-Done IRQ, but rather a Tx-Ready
* IRQ and is needed to run the Tx state machine
*/
- mdev->controller.txdone_irq = true;
- mdev->controller.dev = mdev->dev;
- mdev->controller.ops = &omap_mbox_chan_ops;
- mdev->controller.chans = chnls;
- mdev->controller.num_chans = info_count;
- mdev->controller.of_xlate = omap_mbox_of_xlate;
- ret = omap_mbox_register(mdev);
+ controller->txdone_irq = true;
+ controller->dev = mdev->dev;
+ controller->ops = &omap_mbox_chan_ops;
+ controller->chans = chnls;
+ controller->num_chans = info_count;
+ controller->of_xlate = omap_mbox_of_xlate;
+ ret = devm_mbox_controller_register(mdev->dev, controller);
if (ret)
return ret;
platform_set_drvdata(pdev, mdev);
- pm_runtime_enable(mdev->dev);
+ devm_pm_runtime_enable(mdev->dev);
ret = pm_runtime_resume_and_get(mdev->dev);
if (ret < 0)
- goto unregister;
+ return ret;
/*
* just print the raw revision register, the format is not
@@ -854,61 +592,20 @@ static int omap_mbox_probe(struct platform_device *pdev)
ret = pm_runtime_put_sync(mdev->dev);
if (ret < 0 && ret != -ENOSYS)
- goto unregister;
+ return ret;
- devm_kfree(&pdev->dev, finfoblk);
return 0;
-
-unregister:
- pm_runtime_disable(mdev->dev);
- omap_mbox_unregister(mdev);
- return ret;
-}
-
-static void omap_mbox_remove(struct platform_device *pdev)
-{
- struct omap_mbox_device *mdev = platform_get_drvdata(pdev);
-
- pm_runtime_disable(mdev->dev);
- omap_mbox_unregister(mdev);
}
static struct platform_driver omap_mbox_driver = {
.probe = omap_mbox_probe,
- .remove_new = omap_mbox_remove,
.driver = {
.name = "omap-mailbox",
.pm = &omap_mbox_pm_ops,
.of_match_table = of_match_ptr(omap_mailbox_of_match),
},
};
-
-static int __init omap_mbox_init(void)
-{
- int err;
-
- err = class_register(&omap_mbox_class);
- if (err)
- return err;
-
- /* kfifo size sanity check: alignment and minimal size */
- mbox_kfifo_size = ALIGN(mbox_kfifo_size, sizeof(u32));
- mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size, sizeof(u32));
-
- err = platform_driver_register(&omap_mbox_driver);
- if (err)
- class_unregister(&omap_mbox_class);
-
- return err;
-}
-subsys_initcall(omap_mbox_init);
-
-static void __exit omap_mbox_exit(void)
-{
- platform_driver_unregister(&omap_mbox_driver);
- class_unregister(&omap_mbox_class);
-}
-module_exit(omap_mbox_exit);
+module_platform_driver(omap_mbox_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("omap mailbox: interrupt driven messaging");
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
index 25c65afc030a..7c90bac3de21 100644
--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -6,9 +6,11 @@
*/
#include <linux/arm-smccc.h>
+#include <linux/cpuhotplug.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
@@ -16,6 +18,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/platform_device.h>
/* IPI agent ID any */
@@ -52,6 +55,15 @@
#define IPI_MB_CHNL_TX 0 /* IPI mailbox TX channel */
#define IPI_MB_CHNL_RX 1 /* IPI mailbox RX channel */
+/* IPI Message Buffer Information */
+#define RESP_OFFSET 0x20U
+#define DEST_OFFSET 0x40U
+#define IPI_BUF_SIZE 0x20U
+#define DST_BIT_POS 9U
+#define SRC_BITMASK GENMASK(11, 8)
+
+#define MAX_SGI 16
+
/**
* struct zynqmp_ipi_mchan - Description of a Xilinx ZynqMP IPI mailbox channel
* @is_opened: indicate if the IPI channel is opened
@@ -72,6 +84,10 @@ struct zynqmp_ipi_mchan {
unsigned int chan_type;
};
+struct zynqmp_ipi_mbox;
+
+typedef int (*setup_ipi_fn)(struct zynqmp_ipi_mbox *ipi_mbox, struct device_node *node);
+
/**
* struct zynqmp_ipi_mbox - Description of a ZynqMP IPI mailbox
* platform data.
@@ -81,6 +97,7 @@ struct zynqmp_ipi_mchan {
* @remote_id: remote IPI agent ID
* @mbox: mailbox Controller
* @mchans: array for channels, tx channel and rx channel.
+ * @setup_ipi_fn: Function Pointer to set up IPI Channels
*/
struct zynqmp_ipi_mbox {
struct zynqmp_ipi_pdata *pdata;
@@ -88,6 +105,7 @@ struct zynqmp_ipi_mbox {
u32 remote_id;
struct mbox_controller mbox;
struct zynqmp_ipi_mchan mchans[2];
+ setup_ipi_fn setup_ipi_fn;
};
/**
@@ -98,6 +116,7 @@ struct zynqmp_ipi_mbox {
* @irq: IPI agent interrupt ID
* @method: IPI SMC or HVC is going to be used
* @local_id: local IPI agent ID
+ * @virq_sgi: IRQ number mapped to SGI
* @num_mboxes: number of mailboxes of this IPI agent
* @ipi_mboxes: IPI mailboxes of this IPI agent
*/
@@ -106,10 +125,13 @@ struct zynqmp_ipi_pdata {
int irq;
unsigned int method;
u32 local_id;
+ int virq_sgi;
int num_mboxes;
struct zynqmp_ipi_mbox ipi_mboxes[] __counted_by(num_mboxes);
};
+static DEFINE_PER_CPU(struct zynqmp_ipi_pdata *, per_cpu_pdata);
+
static struct device_driver zynqmp_ipi_mbox_driver = {
.owner = THIS_MODULE,
.name = "zynqmp-ipi-mbox",
@@ -163,9 +185,11 @@ static irqreturn_t zynqmp_ipi_interrupt(int irq, void *data)
if (ret > 0 && ret & IPI_MB_STATUS_RECV_PENDING) {
if (mchan->is_opened) {
msg = mchan->rx_buf;
- msg->len = mchan->req_buf_size;
- memcpy_fromio(msg->data, mchan->req_buf,
- msg->len);
+ if (msg) {
+ msg->len = mchan->req_buf_size;
+ memcpy_fromio(msg->data, mchan->req_buf,
+ msg->len);
+ }
mbox_chan_received_data(chan, (void *)msg);
status = IRQ_HANDLED;
}
@@ -174,6 +198,14 @@ static irqreturn_t zynqmp_ipi_interrupt(int irq, void *data)
return status;
}
+static irqreturn_t zynqmp_sgi_interrupt(int irq, void *data)
+{
+ struct zynqmp_ipi_pdata **pdata_ptr = data;
+ struct zynqmp_ipi_pdata *pdata = *pdata_ptr;
+
+ return zynqmp_ipi_interrupt(irq, pdata);
+}
+
/**
* zynqmp_ipi_peek_data - Peek to see if there are any rx messages.
*
@@ -275,26 +307,26 @@ static int zynqmp_ipi_send_data(struct mbox_chan *chan, void *data)
if (mchan->chan_type == IPI_MB_CHNL_TX) {
/* Send request message */
- if (msg && msg->len > mchan->req_buf_size) {
+ if (msg && msg->len > mchan->req_buf_size && mchan->req_buf) {
dev_err(dev, "channel %d message length %u > max %lu\n",
mchan->chan_type, (unsigned int)msg->len,
mchan->req_buf_size);
return -EINVAL;
}
- if (msg && msg->len)
+ if (msg && msg->len && mchan->req_buf)
memcpy_toio(mchan->req_buf, msg->data, msg->len);
/* Kick IPI mailbox to send message */
arg0 = SMC_IPI_MAILBOX_NOTIFY;
zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
} else {
/* Send response message */
- if (msg && msg->len > mchan->resp_buf_size) {
+ if (msg && msg->len > mchan->resp_buf_size && mchan->resp_buf) {
dev_err(dev, "channel %d message length %u > max %lu\n",
mchan->chan_type, (unsigned int)msg->len,
mchan->resp_buf_size);
return -EINVAL;
}
- if (msg && msg->len)
+ if (msg && msg->len && mchan->resp_buf)
memcpy_toio(mchan->resp_buf, msg->data, msg->len);
arg0 = SMC_IPI_MAILBOX_ACK;
zynqmp_ipi_fw_call(ipi_mbox, arg0, IPI_SMC_ACK_EIRQ_MASK,
@@ -415,12 +447,6 @@ static struct mbox_chan *zynqmp_ipi_of_xlate(struct mbox_controller *mbox,
return chan;
}
-static const struct of_device_id zynqmp_ipi_of_match[] = {
- { .compatible = "xlnx,zynqmp-ipi-mailbox" },
- {},
-};
-MODULE_DEVICE_TABLE(of, zynqmp_ipi_of_match);
-
/**
* zynqmp_ipi_mbox_get_buf_res - Get buffer resource from the IPI dev node
*
@@ -470,12 +496,9 @@ static void zynqmp_ipi_mbox_dev_release(struct device *dev)
static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
struct device_node *node)
{
- struct zynqmp_ipi_mchan *mchan;
struct mbox_chan *chans;
struct mbox_controller *mbox;
- struct resource res;
struct device *dev, *mdev;
- const char *name;
int ret;
dev = ipi_mbox->pdata->dev;
@@ -495,6 +518,73 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
}
mdev = &ipi_mbox->dev;
+ /* Get the IPI remote agent ID */
+ ret = of_property_read_u32(node, "xlnx,ipi-id", &ipi_mbox->remote_id);
+ if (ret < 0) {
+ dev_err(dev, "No IPI remote ID is specified.\n");
+ return ret;
+ }
+
+ ret = ipi_mbox->setup_ipi_fn(ipi_mbox, node);
+ if (ret) {
+ dev_err(dev, "Failed to set up IPI Buffers.\n");
+ return ret;
+ }
+
+ mbox = &ipi_mbox->mbox;
+ mbox->dev = mdev;
+ mbox->ops = &zynqmp_ipi_chan_ops;
+ mbox->num_chans = 2;
+ mbox->txdone_irq = false;
+ mbox->txdone_poll = true;
+ mbox->txpoll_period = 5;
+ mbox->of_xlate = zynqmp_ipi_of_xlate;
+ chans = devm_kzalloc(mdev, 2 * sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+ mbox->chans = chans;
+ chans[IPI_MB_CHNL_TX].con_priv = &ipi_mbox->mchans[IPI_MB_CHNL_TX];
+ chans[IPI_MB_CHNL_RX].con_priv = &ipi_mbox->mchans[IPI_MB_CHNL_RX];
+ ipi_mbox->mchans[IPI_MB_CHNL_TX].chan_type = IPI_MB_CHNL_TX;
+ ipi_mbox->mchans[IPI_MB_CHNL_RX].chan_type = IPI_MB_CHNL_RX;
+ ret = devm_mbox_controller_register(mdev, mbox);
+ if (ret)
+ dev_err(mdev,
+ "Failed to register mbox_controller(%d)\n", ret);
+ else
+ dev_info(mdev,
+ "Registered ZynqMP IPI mbox with TX/RX channels.\n");
+ return ret;
+}
+
+/**
+ * zynqmp_ipi_setup - set up IPI Buffers for classic flow
+ *
+ * @ipi_mbox: pointer to IPI mailbox private data structure
+ * @node: IPI mailbox device node
+ *
+ * This will be used to set up IPI Buffers for ZynqMP SOC if user
+ * wishes to use classic driver usage model on new SOC's with only
+ * buffered IPIs.
+ *
+ * Note that bufferless IPIs and mixed usage of buffered and bufferless
+ * IPIs are not supported with this flow.
+ *
+ * This will be invoked with compatible string "xlnx,zynqmp-ipi-mailbox".
+ *
+ * Return: 0 for success, negative value for failure
+ */
+static int zynqmp_ipi_setup(struct zynqmp_ipi_mbox *ipi_mbox,
+ struct device_node *node)
+{
+ struct zynqmp_ipi_mchan *mchan;
+ struct device *mdev;
+ struct resource res;
+ const char *name;
+ int ret;
+
+ mdev = &ipi_mbox->dev;
+
mchan = &ipi_mbox->mchans[IPI_MB_CHNL_TX];
name = "local_request_region";
ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
@@ -569,39 +659,218 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
if (!mchan->rx_buf)
return -ENOMEM;
- /* Get the IPI remote agent ID */
- ret = of_property_read_u32(node, "xlnx,ipi-id", &ipi_mbox->remote_id);
- if (ret < 0) {
- dev_err(dev, "No IPI remote ID is specified.\n");
+ return 0;
+}
+
+/**
+ * versal_ipi_setup - Set up IPIs to support mixed usage of
+ * Buffered and Bufferless IPIs.
+ *
+ * @ipi_mbox: pointer to IPI mailbox private data structure
+ * @node: IPI mailbox device node
+ *
+ * Return: 0 for success, negative value for failure
+ */
+static int versal_ipi_setup(struct zynqmp_ipi_mbox *ipi_mbox,
+ struct device_node *node)
+{
+ struct zynqmp_ipi_mchan *tx_mchan, *rx_mchan;
+ struct resource host_res, remote_res;
+ struct device_node *parent_node;
+ int host_idx, remote_idx;
+ struct device *mdev;
+
+ tx_mchan = &ipi_mbox->mchans[IPI_MB_CHNL_TX];
+ rx_mchan = &ipi_mbox->mchans[IPI_MB_CHNL_RX];
+ parent_node = of_get_parent(node);
+ mdev = &ipi_mbox->dev;
+
+ host_idx = zynqmp_ipi_mbox_get_buf_res(parent_node, "msg", &host_res);
+ remote_idx = zynqmp_ipi_mbox_get_buf_res(node, "msg", &remote_res);
+
+ /*
+ * Only set up buffers if both sides claim to have msg buffers.
+ * This is because each buffered IPI's corresponding msg buffers
+ * are reserved for use by other buffered IPI's.
+ */
+ if (!host_idx && !remote_idx) {
+ u32 host_src, host_dst, remote_src, remote_dst;
+ u32 buff_sz;
+
+ buff_sz = resource_size(&host_res);
+
+ host_src = host_res.start & SRC_BITMASK;
+ remote_src = remote_res.start & SRC_BITMASK;
+
+ host_dst = (host_src >> DST_BIT_POS) * DEST_OFFSET;
+ remote_dst = (remote_src >> DST_BIT_POS) * DEST_OFFSET;
+
+ /* Validate that IPI IDs is within IPI Message buffer space. */
+ if (host_dst >= buff_sz || remote_dst >= buff_sz) {
+ dev_err(mdev,
+ "Invalid IPI Message buffer values: %x %x\n",
+ host_dst, remote_dst);
+ return -EINVAL;
+ }
+
+ tx_mchan->req_buf = devm_ioremap(mdev,
+ host_res.start | remote_dst,
+ IPI_BUF_SIZE);
+ if (!tx_mchan->req_buf) {
+ dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
+ return -ENOMEM;
+ }
+
+ tx_mchan->resp_buf = devm_ioremap(mdev,
+ (remote_res.start | host_dst) +
+ RESP_OFFSET, IPI_BUF_SIZE);
+ if (!tx_mchan->resp_buf) {
+ dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
+ return -ENOMEM;
+ }
+
+ rx_mchan->req_buf = devm_ioremap(mdev,
+ remote_res.start | host_dst,
+ IPI_BUF_SIZE);
+ if (!rx_mchan->req_buf) {
+ dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
+ return -ENOMEM;
+ }
+
+ rx_mchan->resp_buf = devm_ioremap(mdev,
+ (host_res.start | remote_dst) +
+ RESP_OFFSET, IPI_BUF_SIZE);
+ if (!rx_mchan->resp_buf) {
+ dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
+ return -ENOMEM;
+ }
+
+ tx_mchan->resp_buf_size = IPI_BUF_SIZE;
+ tx_mchan->req_buf_size = IPI_BUF_SIZE;
+ tx_mchan->rx_buf = devm_kzalloc(mdev, IPI_BUF_SIZE +
+ sizeof(struct zynqmp_ipi_message),
+ GFP_KERNEL);
+ if (!tx_mchan->rx_buf)
+ return -ENOMEM;
+
+ rx_mchan->resp_buf_size = IPI_BUF_SIZE;
+ rx_mchan->req_buf_size = IPI_BUF_SIZE;
+ rx_mchan->rx_buf = devm_kzalloc(mdev, IPI_BUF_SIZE +
+ sizeof(struct zynqmp_ipi_message),
+ GFP_KERNEL);
+ if (!rx_mchan->rx_buf)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int xlnx_mbox_cpuhp_start(unsigned int cpu)
+{
+ struct zynqmp_ipi_pdata *pdata;
+
+ pdata = get_cpu_var(per_cpu_pdata);
+ put_cpu_var(per_cpu_pdata);
+ enable_percpu_irq(pdata->virq_sgi, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static int xlnx_mbox_cpuhp_down(unsigned int cpu)
+{
+ struct zynqmp_ipi_pdata *pdata;
+
+ pdata = get_cpu_var(per_cpu_pdata);
+ put_cpu_var(per_cpu_pdata);
+ disable_percpu_irq(pdata->virq_sgi);
+
+ return 0;
+}
+
+static void xlnx_disable_percpu_irq(void *data)
+{
+ struct zynqmp_ipi_pdata *pdata;
+
+ pdata = *this_cpu_ptr(&per_cpu_pdata);
+
+ disable_percpu_irq(pdata->virq_sgi);
+}
+
+static int xlnx_mbox_init_sgi(struct platform_device *pdev,
+ int sgi_num,
+ struct zynqmp_ipi_pdata *pdata)
+{
+ int ret = 0;
+ int cpu;
+
+ /*
+ * IRQ related structures are used for the following:
+ * for each SGI interrupt ensure its mapped by GIC IRQ domain
+ * and that each corresponding linux IRQ for the HW IRQ has
+ * a handler for when receiving an interrupt from the remote
+ * processor.
+ */
+ struct irq_domain *domain;
+ struct irq_fwspec sgi_fwspec;
+ struct device_node *interrupt_parent = NULL;
+ struct device *dev = &pdev->dev;
+
+ /* Find GIC controller to map SGIs. */
+ interrupt_parent = of_irq_find_parent(dev->of_node);
+ if (!interrupt_parent) {
+ dev_err(&pdev->dev, "Failed to find property for Interrupt parent\n");
+ return -EINVAL;
+ }
+
+ /* Each SGI needs to be associated with GIC's IRQ domain. */
+ domain = irq_find_host(interrupt_parent);
+ of_node_put(interrupt_parent);
+
+ /* Each mapping needs GIC domain when finding IRQ mapping. */
+ sgi_fwspec.fwnode = domain->fwnode;
+
+ /*
+ * When irq domain looks at mapping each arg is as follows:
+ * 3 args for: interrupt type (SGI), interrupt # (set later), type
+ */
+ sgi_fwspec.param_count = 1;
+
+ /* Set SGI's hwirq */
+ sgi_fwspec.param[0] = sgi_num;
+ pdata->virq_sgi = irq_create_fwspec_mapping(&sgi_fwspec);
+
+ for_each_possible_cpu(cpu)
+ per_cpu(per_cpu_pdata, cpu) = pdata;
+
+ ret = request_percpu_irq(pdata->virq_sgi, zynqmp_sgi_interrupt, pdev->name,
+ &per_cpu_pdata);
+ WARN_ON(ret);
+ if (ret) {
+ irq_dispose_mapping(pdata->virq_sgi);
return ret;
}
- mbox = &ipi_mbox->mbox;
- mbox->dev = mdev;
- mbox->ops = &zynqmp_ipi_chan_ops;
- mbox->num_chans = 2;
- mbox->txdone_irq = false;
- mbox->txdone_poll = true;
- mbox->txpoll_period = 5;
- mbox->of_xlate = zynqmp_ipi_of_xlate;
- chans = devm_kzalloc(mdev, 2 * sizeof(*chans), GFP_KERNEL);
- if (!chans)
- return -ENOMEM;
- mbox->chans = chans;
- chans[IPI_MB_CHNL_TX].con_priv = &ipi_mbox->mchans[IPI_MB_CHNL_TX];
- chans[IPI_MB_CHNL_RX].con_priv = &ipi_mbox->mchans[IPI_MB_CHNL_RX];
- ipi_mbox->mchans[IPI_MB_CHNL_TX].chan_type = IPI_MB_CHNL_TX;
- ipi_mbox->mchans[IPI_MB_CHNL_RX].chan_type = IPI_MB_CHNL_RX;
- ret = devm_mbox_controller_register(mdev, mbox);
- if (ret)
- dev_err(mdev,
- "Failed to register mbox_controller(%d)\n", ret);
- else
- dev_info(mdev,
- "Registered ZynqMP IPI mbox with TX/RX channels.\n");
+ irq_to_desc(pdata->virq_sgi);
+ irq_set_status_flags(pdata->virq_sgi, IRQ_PER_CPU);
+
+ /* Setup function for the CPU hot-plug cases */
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mailbox/sgi:starting",
+ xlnx_mbox_cpuhp_start, xlnx_mbox_cpuhp_down);
+
return ret;
}
+static void xlnx_mbox_cleanup_sgi(struct zynqmp_ipi_pdata *pdata)
+{
+ cpuhp_remove_state(CPUHP_AP_ONLINE_DYN);
+
+ on_each_cpu(xlnx_disable_percpu_irq, NULL, 1);
+
+ irq_clear_status_flags(pdata->virq_sgi, IRQ_PER_CPU);
+ free_percpu_irq(pdata->virq_sgi, &per_cpu_pdata);
+ irq_dispose_mapping(pdata->virq_sgi);
+}
+
/**
* zynqmp_ipi_free_mboxes - Free IPI mailboxes devices
*
@@ -612,6 +881,9 @@ static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata)
struct zynqmp_ipi_mbox *ipi_mbox;
int i;
+ if (pdata->irq < MAX_SGI)
+ xlnx_mbox_cleanup_sgi(pdata);
+
i = pdata->num_mboxes;
for (; i >= 0; i--) {
ipi_mbox = &pdata->ipi_mboxes[i];
@@ -627,9 +899,11 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *nc, *np = pdev->dev.of_node;
- struct zynqmp_ipi_pdata *pdata;
+ struct zynqmp_ipi_pdata __percpu *pdata;
+ struct of_phandle_args out_irq;
struct zynqmp_ipi_mbox *mbox;
int num_mboxes, ret = -EINVAL;
+ setup_ipi_fn ipi_fn;
num_mboxes = of_get_available_child_count(np);
if (num_mboxes == 0) {
@@ -650,9 +924,18 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
return ret;
}
+ ipi_fn = (setup_ipi_fn)device_get_match_data(&pdev->dev);
+ if (!ipi_fn) {
+ dev_err(dev,
+ "Mbox Compatible String is missing IPI Setup fn.\n");
+ return -ENODEV;
+ }
+
pdata->num_mboxes = num_mboxes;
mbox = pdata->ipi_mboxes;
+ mbox->setup_ipi_fn = ipi_fn;
+
for_each_available_child_of_node(np, nc) {
mbox->pdata = pdata;
ret = zynqmp_ipi_mbox_probe(mbox, nc);
@@ -665,14 +948,32 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
mbox++;
}
- /* IPI IRQ */
- ret = platform_get_irq(pdev, 0);
- if (ret < 0)
+ ret = of_irq_parse_one(dev_of_node(dev), 0, &out_irq);
+ if (ret < 0) {
+ dev_err(dev, "failed to parse interrupts\n");
goto free_mbox_dev;
+ }
+ ret = out_irq.args[1];
+
+ /*
+ * If Interrupt number is in SGI range, then request SGI else request
+ * IPI system IRQ.
+ */
+ if (ret < MAX_SGI) {
+ pdata->irq = ret;
+ ret = xlnx_mbox_init_sgi(pdev, pdata->irq, pdata);
+ if (ret)
+ goto free_mbox_dev;
+ } else {
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto free_mbox_dev;
+
+ pdata->irq = ret;
+ ret = devm_request_irq(dev, pdata->irq, zynqmp_ipi_interrupt,
+ IRQF_SHARED, dev_name(dev), pdata);
+ }
- pdata->irq = ret;
- ret = devm_request_irq(dev, pdata->irq, zynqmp_ipi_interrupt,
- IRQF_SHARED, dev_name(dev), pdata);
if (ret) {
dev_err(dev, "IRQ %d is not requested successfully.\n",
pdata->irq);
@@ -695,6 +996,17 @@ static void zynqmp_ipi_remove(struct platform_device *pdev)
zynqmp_ipi_free_mboxes(pdata);
}
+static const struct of_device_id zynqmp_ipi_of_match[] = {
+ { .compatible = "xlnx,zynqmp-ipi-mailbox",
+ .data = &zynqmp_ipi_setup,
+ },
+ { .compatible = "xlnx,versal-ipi-mailbox",
+ .data = &versal_ipi_setup,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, zynqmp_ipi_of_match);
+
static struct platform_driver zynqmp_ipi_driver = {
.probe = zynqmp_ipi_probe,
.remove_new = zynqmp_ipi_remove,
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 2bba4d6aaaa2..463eb13bd0b2 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -54,7 +54,7 @@ void bch_dump_bucket(struct btree_keys *b)
int __bch_count_data(struct btree_keys *b)
{
unsigned int ret = 0;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct bkey *k;
if (b->ops->is_extents)
@@ -67,7 +67,7 @@ void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
{
va_list args;
struct bkey *k, *p = NULL;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
const char *err;
for_each_key(b, k, &iter) {
@@ -879,7 +879,7 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
unsigned int status = BTREE_INSERT_STATUS_NO_INSERT;
struct bset *i = bset_tree_last(b)->data;
struct bkey *m, *prev = NULL;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct bkey preceding_key_on_stack = ZERO_KEY;
struct bkey *preceding_key_p = &preceding_key_on_stack;
@@ -895,9 +895,9 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
else
preceding_key(k, &preceding_key_p);
- m = bch_btree_iter_init(b, &iter, preceding_key_p);
+ m = bch_btree_iter_stack_init(b, &iter, preceding_key_p);
- if (b->ops->insert_fixup(b, k, &iter, replace_key))
+ if (b->ops->insert_fixup(b, k, &iter.iter, replace_key))
return status;
status = BTREE_INSERT_STATUS_INSERT;
@@ -1100,33 +1100,33 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
btree_iter_cmp));
}
-static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
- struct btree_iter *iter,
- struct bkey *search,
- struct bset_tree *start)
+static struct bkey *__bch_btree_iter_stack_init(struct btree_keys *b,
+ struct btree_iter_stack *iter,
+ struct bkey *search,
+ struct bset_tree *start)
{
struct bkey *ret = NULL;
- iter->size = ARRAY_SIZE(iter->data);
- iter->used = 0;
+ iter->iter.size = ARRAY_SIZE(iter->stack_data);
+ iter->iter.used = 0;
#ifdef CONFIG_BCACHE_DEBUG
- iter->b = b;
+ iter->iter.b = b;
#endif
for (; start <= bset_tree_last(b); start++) {
ret = bch_bset_search(b, start, search);
- bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
+ bch_btree_iter_push(&iter->iter, ret, bset_bkey_last(start->data));
}
return ret;
}
-struct bkey *bch_btree_iter_init(struct btree_keys *b,
- struct btree_iter *iter,
+struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
+ struct btree_iter_stack *iter,
struct bkey *search)
{
- return __bch_btree_iter_init(b, iter, search, b->set);
+ return __bch_btree_iter_stack_init(b, iter, search, b->set);
}
static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
@@ -1293,10 +1293,10 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
struct bset_sort_state *state)
{
size_t order = b->page_order, keys = 0;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
int oldsize = bch_count_data(b);
- __bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
+ __bch_btree_iter_stack_init(b, &iter, NULL, &b->set[start]);
if (start) {
unsigned int i;
@@ -1307,7 +1307,7 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
order = get_order(__set_bytes(b->set->data, keys));
}
- __btree_sort(b, &iter, start, order, false, state);
+ __btree_sort(b, &iter.iter, start, order, false, state);
EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
}
@@ -1323,11 +1323,11 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
struct bset_sort_state *state)
{
uint64_t start_time = local_clock();
- struct btree_iter iter;
+ struct btree_iter_stack iter;
- bch_btree_iter_init(b, &iter, NULL);
+ bch_btree_iter_stack_init(b, &iter, NULL);
- btree_mergesort(b, new->set->data, &iter, false, true);
+ btree_mergesort(b, new->set->data, &iter.iter, false, true);
bch_time_stats_update(&state->time, start_time);
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index d795c84246b0..011f6062c4c0 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -321,7 +321,14 @@ struct btree_iter {
#endif
struct btree_iter_set {
struct bkey *k, *end;
- } data[MAX_BSETS];
+ } data[];
+};
+
+/* Fixed-size btree_iter that can be allocated on the stack */
+
+struct btree_iter_stack {
+ struct btree_iter iter;
+ struct btree_iter_set stack_data[MAX_BSETS];
};
typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k);
@@ -333,9 +340,9 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
struct bkey *end);
-struct bkey *bch_btree_iter_init(struct btree_keys *b,
- struct btree_iter *iter,
- struct bkey *search);
+struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
+ struct btree_iter_stack *iter,
+ struct bkey *search);
struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
const struct bkey *search);
@@ -350,13 +357,14 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b,
return search ? __bch_bset_search(b, t, search) : t->data->start;
}
-#define for_each_key_filter(b, k, iter, filter) \
- for (bch_btree_iter_init((b), (iter), NULL); \
- ((k) = bch_btree_iter_next_filter((iter), (b), filter));)
+#define for_each_key_filter(b, k, stack_iter, filter) \
+ for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \
+ ((k) = bch_btree_iter_next_filter(&((stack_iter)->iter), (b), \
+ filter));)
-#define for_each_key(b, k, iter) \
- for (bch_btree_iter_init((b), (iter), NULL); \
- ((k) = bch_btree_iter_next(iter));)
+#define for_each_key(b, k, stack_iter) \
+ for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \
+ ((k) = bch_btree_iter_next(&((stack_iter)->iter)));)
/* Sorting */
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 196cdacce38f..d011a7154d33 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1309,7 +1309,7 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
uint8_t stale = 0;
unsigned int keys = 0, good_keys = 0;
struct bkey *k;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct bset_tree *t;
gc->nodes++;
@@ -1570,7 +1570,7 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
static unsigned int btree_gc_count_keys(struct btree *b)
{
struct bkey *k;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
unsigned int ret = 0;
for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
@@ -1611,17 +1611,18 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
int ret = 0;
bool should_rewrite;
struct bkey *k;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct gc_merge_info r[GC_MERGE_NODES];
struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
- bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
+ bch_btree_iter_stack_init(&b->keys, &iter, &b->c->gc_done);
for (i = r; i < r + ARRAY_SIZE(r); i++)
i->b = ERR_PTR(-EINTR);
while (1) {
- k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
+ k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
+ bch_ptr_bad);
if (k) {
r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
true, b);
@@ -1911,7 +1912,7 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
{
int ret = 0;
struct bkey *k, *p = NULL;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
bch_initial_mark_key(b->c, b->level, k);
@@ -1919,10 +1920,10 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
bch_initial_mark_key(b->c, b->level + 1, &b->key);
if (b->level) {
- bch_btree_iter_init(&b->keys, &iter, NULL);
+ bch_btree_iter_stack_init(&b->keys, &iter, NULL);
do {
- k = bch_btree_iter_next_filter(&iter, &b->keys,
+ k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
bch_ptr_bad);
if (k) {
btree_node_prefetch(b, k);
@@ -1950,7 +1951,7 @@ static int bch_btree_check_thread(void *arg)
struct btree_check_info *info = arg;
struct btree_check_state *check_state = info->state;
struct cache_set *c = check_state->c;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct bkey *k, *p;
int cur_idx, prev_idx, skip_nr;
@@ -1959,8 +1960,8 @@ static int bch_btree_check_thread(void *arg)
ret = 0;
/* root node keys are checked before thread created */
- bch_btree_iter_init(&c->root->keys, &iter, NULL);
- k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
+ bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
+ k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
BUG_ON(!k);
p = k;
@@ -1978,7 +1979,7 @@ static int bch_btree_check_thread(void *arg)
skip_nr = cur_idx - prev_idx;
while (skip_nr) {
- k = bch_btree_iter_next_filter(&iter,
+ k = bch_btree_iter_next_filter(&iter.iter,
&c->root->keys,
bch_ptr_bad);
if (k)
@@ -2051,7 +2052,7 @@ int bch_btree_check(struct cache_set *c)
int ret = 0;
int i;
struct bkey *k = NULL;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct btree_check_state check_state;
/* check and mark root node keys */
@@ -2547,11 +2548,11 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
if (b->level) {
struct bkey *k;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
- bch_btree_iter_init(&b->keys, &iter, from);
+ bch_btree_iter_stack_init(&b->keys, &iter, from);
- while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
+ while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
bch_ptr_bad))) {
ret = bcache_btree(map_nodes_recurse, k, b,
op, from, fn, flags);
@@ -2580,11 +2581,12 @@ int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
{
int ret = MAP_CONTINUE;
struct bkey *k;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
- bch_btree_iter_init(&b->keys, &iter, from);
+ bch_btree_iter_stack_init(&b->keys, &iter, from);
- while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
+ while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
+ bch_ptr_bad))) {
ret = !b->level
? fn(op, b, k)
: bcache_btree(map_keys_recurse, k,
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 330bcd9ea4a9..4d11fc664cb0 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -171,7 +171,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
struct page *page;
unsigned int i;
- page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
+ page = read_cache_page_gfp(bdev->bd_mapping,
SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL);
if (IS_ERR(page))
return "IO error";
@@ -881,8 +881,8 @@ static void bcache_device_free(struct bcache_device *d)
bcache_device_detach(d);
if (disk) {
- ida_simple_remove(&bcache_device_idx,
- first_minor_to_idx(disk->first_minor));
+ ida_free(&bcache_device_idx,
+ first_minor_to_idx(disk->first_minor));
put_disk(disk);
}
@@ -940,8 +940,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
if (!d->full_dirty_stripes)
goto out_free_stripe_sectors_dirty;
- idx = ida_simple_get(&bcache_device_idx, 0,
- BCACHE_DEVICE_IDX_MAX, GFP_KERNEL);
+ idx = ida_alloc_max(&bcache_device_idx, BCACHE_DEVICE_IDX_MAX - 1,
+ GFP_KERNEL);
if (idx < 0)
goto out_free_full_dirty_stripes;
@@ -986,7 +986,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
out_bioset_exit:
bioset_exit(&d->bio_split);
out_ida_remove:
- ida_simple_remove(&bcache_device_idx, idx);
+ ida_free(&bcache_device_idx, idx);
out_free_full_dirty_stripes:
kvfree(d->full_dirty_stripes);
out_free_stripe_sectors_dirty:
@@ -1914,8 +1914,9 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
INIT_LIST_HEAD(&c->btree_cache_freed);
INIT_LIST_HEAD(&c->data_buckets);
- iter_size = ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size + 1) *
- sizeof(struct btree_iter_set);
+ iter_size = sizeof(struct btree_iter) +
+ ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size) *
+ sizeof(struct btree_iter_set);
c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL);
if (!c->devices)
@@ -2554,10 +2555,6 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (IS_ERR(bdev_file))
goto out_free_sb;
- err = "failed to set blocksize";
- if (set_blocksize(file_bdev(bdev_file), 4096))
- goto out_blkdev_put;
-
err = read_super(sb, file_bdev(bdev_file), &sb_disk);
if (err)
goto out_blkdev_put;
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 6956beb55326..826b14cae4e5 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -660,7 +660,7 @@ static unsigned int bch_root_usage(struct cache_set *c)
unsigned int bytes = 0;
struct bkey *k;
struct btree *b;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
goto lock_root;
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 8827a6f130ad..792e070ccf38 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -908,15 +908,15 @@ static int bch_dirty_init_thread(void *arg)
struct dirty_init_thrd_info *info = arg;
struct bch_dirty_init_state *state = info->state;
struct cache_set *c = state->c;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct bkey *k, *p;
int cur_idx, prev_idx, skip_nr;
k = p = NULL;
prev_idx = 0;
- bch_btree_iter_init(&c->root->keys, &iter, NULL);
- k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
+ bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
+ k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
BUG_ON(!k);
p = k;
@@ -930,7 +930,7 @@ static int bch_dirty_init_thread(void *arg)
skip_nr = cur_idx - prev_idx;
while (skip_nr) {
- k = bch_btree_iter_next_filter(&iter,
+ k = bch_btree_iter_next_filter(&iter.iter,
&c->root->keys,
bch_ptr_bad);
if (k)
@@ -979,7 +979,7 @@ void bch_sectors_dirty_init(struct bcache_device *d)
int i;
struct btree *b = NULL;
struct bkey *k = NULL;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct sectors_dirty_init op;
struct cache_set *c = d->c;
struct bch_dirty_init_state state;
diff --git a/drivers/md/dm-bio-prison-v2.c b/drivers/md/dm-bio-prison-v2.c
index fd852981ef9c..cf433b0cf742 100644
--- a/drivers/md/dm-bio-prison-v2.c
+++ b/drivers/md/dm-bio-prison-v2.c
@@ -321,8 +321,7 @@ static bool __unlock(struct dm_bio_prison_v2 *prison,
{
BUG_ON(!cell->exclusive_lock);
- bio_list_merge(bios, &cell->bios);
- bio_list_init(&cell->bios);
+ bio_list_merge_init(bios, &cell->bios);
if (cell->shared_count) {
cell->exclusive_lock = false;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 911f73f7ebba..16884b585053 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -115,8 +115,7 @@ static void __commit(struct work_struct *_ws)
*/
spin_lock_irq(&b->lock);
list_splice_init(&b->work_items, &work_items);
- bio_list_merge(&bios, &b->bios);
- bio_list_init(&b->bios);
+ bio_list_merge_init(&bios, &b->bios);
b->commit_scheduled = false;
spin_unlock_irq(&b->lock);
@@ -565,8 +564,7 @@ static void defer_bio(struct cache *cache, struct bio *bio)
static void defer_bios(struct cache *cache, struct bio_list *bios)
{
spin_lock_irq(&cache->lock);
- bio_list_merge(&cache->deferred_bios, bios);
- bio_list_init(bios);
+ bio_list_merge_init(&cache->deferred_bios, bios);
spin_unlock_irq(&cache->lock);
wake_deferred_bio_worker(cache);
@@ -1816,8 +1814,7 @@ static void process_deferred_bios(struct work_struct *ws)
bio_list_init(&bios);
spin_lock_irq(&cache->lock);
- bio_list_merge(&bios, &cache->deferred_bios);
- bio_list_init(&cache->deferred_bios);
+ bio_list_merge_init(&bios, &cache->deferred_bios);
spin_unlock_irq(&cache->lock);
while ((bio = bio_list_pop(&bios))) {
@@ -1847,8 +1844,7 @@ static void requeue_deferred_bios(struct cache *cache)
struct bio_list bios;
bio_list_init(&bios);
- bio_list_merge(&bios, &cache->deferred_bios);
- bio_list_init(&cache->deferred_bios);
+ bio_list_merge_init(&bios, &cache->deferred_bios);
while ((bio = bio_list_pop(&bios))) {
bio->bi_status = BLK_STS_DM_REQUEUE;
@@ -3394,8 +3390,8 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
if (!cache->features.discard_passdown) {
/* No passdown is done so setting own virtual limits */
- limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
- cache->origin_sectors);
+ limits->max_hw_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
+ cache->origin_sectors);
limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
return;
}
@@ -3404,7 +3400,6 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
* cache_iterate_devices() is stacking both origin and fast device limits
* but discards aren't passed to fast device, so inherit origin's limits.
*/
- limits->max_discard_sectors = origin_limits->max_discard_sectors;
limits->max_hw_discard_sectors = origin_limits->max_hw_discard_sectors;
limits->discard_granularity = origin_limits->discard_granularity;
limits->discard_alignment = origin_limits->discard_alignment;
diff --git a/drivers/md/dm-clone-metadata.c b/drivers/md/dm-clone-metadata.c
index c43d55672bce..47c1fa7aad8b 100644
--- a/drivers/md/dm-clone-metadata.c
+++ b/drivers/md/dm-clone-metadata.c
@@ -465,11 +465,6 @@ static void __destroy_persistent_data_structures(struct dm_clone_metadata *cmd)
/*---------------------------------------------------------------------------*/
-static size_t bitmap_size(unsigned long nr_bits)
-{
- return BITS_TO_LONGS(nr_bits) * sizeof(long);
-}
-
static int __dirty_map_init(struct dirty_map *dmap, unsigned long nr_words,
unsigned long nr_regions)
{
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index 94b2fc33f64b..ad79b52ffc14 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -1181,8 +1181,7 @@ static void process_deferred_discards(struct clone *clone)
struct bio_list discards = BIO_EMPTY_LIST;
spin_lock_irq(&clone->lock);
- bio_list_merge(&discards, &clone->deferred_discard_bios);
- bio_list_init(&clone->deferred_discard_bios);
+ bio_list_merge_init(&discards, &clone->deferred_discard_bios);
spin_unlock_irq(&clone->lock);
if (bio_list_empty(&discards))
@@ -1215,8 +1214,7 @@ static void process_deferred_bios(struct clone *clone)
struct bio_list bios = BIO_EMPTY_LIST;
spin_lock_irq(&clone->lock);
- bio_list_merge(&bios, &clone->deferred_bios);
- bio_list_init(&clone->deferred_bios);
+ bio_list_merge_init(&bios, &clone->deferred_bios);
spin_unlock_irq(&clone->lock);
if (bio_list_empty(&bios))
@@ -1237,11 +1235,9 @@ static void process_deferred_flush_bios(struct clone *clone)
* before issuing them or signaling their completion.
*/
spin_lock_irq(&clone->lock);
- bio_list_merge(&bios, &clone->deferred_flush_bios);
- bio_list_init(&clone->deferred_flush_bios);
-
- bio_list_merge(&bio_completions, &clone->deferred_flush_completions);
- bio_list_init(&clone->deferred_flush_completions);
+ bio_list_merge_init(&bios, &clone->deferred_flush_bios);
+ bio_list_merge_init(&bio_completions,
+ &clone->deferred_flush_completions);
spin_unlock_irq(&clone->lock);
if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
@@ -2050,7 +2046,8 @@ static void set_discard_limits(struct clone *clone, struct queue_limits *limits)
if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) {
/* No passdown is done so we set our own virtual limits */
limits->discard_granularity = clone->region_size << SECTOR_SHIFT;
- limits->max_discard_sectors = round_down(UINT_MAX >> SECTOR_SHIFT, clone->region_size);
+ limits->max_hw_discard_sectors = round_down(UINT_MAX >> SECTOR_SHIFT,
+ clone->region_size);
return;
}
@@ -2059,7 +2056,6 @@ static void set_discard_limits(struct clone *clone, struct queue_limits *limits)
* device limits but discards aren't passed to the source device, so
* inherit destination's limits.
*/
- limits->max_discard_sectors = dest_limits->max_discard_sectors;
limits->max_hw_discard_sectors = dest_limits->max_hw_discard_sectors;
limits->discard_granularity = dest_limits->discard_granularity;
limits->discard_alignment = dest_limits->discard_alignment;
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index e6757a30dcca..08700bfc3e23 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -140,7 +140,7 @@ struct mapped_device {
#ifdef CONFIG_BLK_DEV_ZONED
unsigned int nr_zones;
- unsigned int *zwp_offset;
+ void *zone_revalidate_map;
#endif
#ifdef CONFIG_IMA
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 9a74c6316c5d..1b7a97cc3779 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -47,6 +47,8 @@
#define DM_MSG_PREFIX "crypt"
+static DEFINE_IDA(workqueue_ida);
+
/*
* context holding the current state of a multi-part conversion
*/
@@ -137,9 +139,9 @@ struct iv_elephant_private {
* and encrypts / decrypts at the same time.
*/
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
- DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
- DM_CRYPT_NO_READ_WORKQUEUE, DM_CRYPT_NO_WRITE_WORKQUEUE,
- DM_CRYPT_WRITE_INLINE };
+ DM_CRYPT_SAME_CPU, DM_CRYPT_HIGH_PRIORITY,
+ DM_CRYPT_NO_OFFLOAD, DM_CRYPT_NO_READ_WORKQUEUE,
+ DM_CRYPT_NO_WRITE_WORKQUEUE, DM_CRYPT_WRITE_INLINE };
enum cipher_flags {
CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cipher */
@@ -184,6 +186,7 @@ struct crypt_config {
struct crypto_aead **tfms_aead;
} cipher_tfm;
unsigned int tfms_count;
+ int workqueue_id;
unsigned long cipher_flags;
/*
@@ -1653,8 +1656,8 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
/*
* Generate a new unfragmented bio with the given size
- * This should never violate the device limitations (but only because
- * max_segment_size is being constrained to PAGE_SIZE).
+ * This should never violate the device limitations (but if it did then block
+ * core should split the bio as needed).
*
* This function may be called concurrently. If we allocate from the mempool
* concurrently, there is a possibility of deadlock. For example, if we have
@@ -2771,6 +2774,9 @@ static void crypt_dtr(struct dm_target *ti)
if (cc->crypt_queue)
destroy_workqueue(cc->crypt_queue);
+ if (cc->workqueue_id)
+ ida_free(&workqueue_ida, cc->workqueue_id);
+
crypt_free_tfms(cc);
bioset_exit(&cc->bs);
@@ -3134,7 +3140,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
struct crypt_config *cc = ti->private;
struct dm_arg_set as;
static const struct dm_arg _args[] = {
- {0, 8, "Invalid number of feature args"},
+ {0, 9, "Invalid number of feature args"},
};
unsigned int opt_params, val;
const char *opt_string, *sval;
@@ -3161,6 +3167,8 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
else if (!strcasecmp(opt_string, "same_cpu_crypt"))
set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
+ else if (!strcasecmp(opt_string, "high_priority"))
+ set_bit(DM_CRYPT_HIGH_PRIORITY, &cc->flags);
else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
@@ -3230,8 +3238,9 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct crypt_config *cc;
const char *devname = dm_table_device_name(ti->table);
- int key_size;
+ int key_size, wq_id;
unsigned int align_mask;
+ unsigned int common_wq_flags;
unsigned long long tmpll;
int ret;
size_t iv_size_padding, additional_req_size;
@@ -3398,20 +3407,38 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
cc->tag_pool_max_sectors <<= cc->sector_shift;
}
+ wq_id = ida_alloc_min(&workqueue_ida, 1, GFP_KERNEL);
+ if (wq_id < 0) {
+ ti->error = "Couldn't get workqueue id";
+ ret = wq_id;
+ goto bad;
+ }
+ cc->workqueue_id = wq_id;
+
ret = -ENOMEM;
- cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname);
+ common_wq_flags = WQ_MEM_RECLAIM | WQ_SYSFS;
+ if (test_bit(DM_CRYPT_HIGH_PRIORITY, &cc->flags))
+ common_wq_flags |= WQ_HIGHPRI;
+
+ cc->io_queue = alloc_workqueue("kcryptd_io-%s-%d", common_wq_flags, 1, devname, wq_id);
if (!cc->io_queue) {
ti->error = "Couldn't create kcryptd io queue";
goto bad;
}
- if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
- cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
- 1, devname);
- else
- cc->crypt_queue = alloc_workqueue("kcryptd/%s",
- WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
- num_online_cpus(), devname);
+ if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) {
+ cc->crypt_queue = alloc_workqueue("kcryptd-%s-%d",
+ common_wq_flags | WQ_CPU_INTENSIVE,
+ 1, devname, wq_id);
+ } else {
+ /*
+ * While crypt_queue is certainly CPU intensive, the use of
+ * WQ_CPU_INTENSIVE is meaningless with WQ_UNBOUND.
+ */
+ cc->crypt_queue = alloc_workqueue("kcryptd-%s-%d",
+ common_wq_flags | WQ_UNBOUND,
+ num_online_cpus(), devname, wq_id);
+ }
if (!cc->crypt_queue) {
ti->error = "Couldn't create kcryptd queue";
goto bad;
@@ -3427,6 +3454,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->error = "Couldn't spawn write thread";
goto bad;
}
+ if (test_bit(DM_CRYPT_HIGH_PRIORITY, &cc->flags))
+ set_user_nice(cc->write_thread, MIN_NICE);
ti->num_flush_bios = 1;
ti->limit_swap_bios = true;
@@ -3547,6 +3576,7 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
num_feature_args += !!ti->num_discard_bios;
num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
+ num_feature_args += test_bit(DM_CRYPT_HIGH_PRIORITY, &cc->flags);
num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
num_feature_args += test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
num_feature_args += test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
@@ -3560,6 +3590,8 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
DMEMIT(" allow_discards");
if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
DMEMIT(" same_cpu_crypt");
+ if (test_bit(DM_CRYPT_HIGH_PRIORITY, &cc->flags))
+ DMEMIT(" high_priority");
if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
DMEMIT(" submit_from_crypt_cpus");
if (test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags))
@@ -3579,6 +3611,7 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
DMEMIT_TARGET_NAME_VERSION(ti->type);
DMEMIT(",allow_discards=%c", ti->num_discard_bios ? 'y' : 'n');
DMEMIT(",same_cpu_crypt=%c", test_bit(DM_CRYPT_SAME_CPU, &cc->flags) ? 'y' : 'n');
+ DMEMIT(",high_priority=%c", test_bit(DM_CRYPT_HIGH_PRIORITY, &cc->flags) ? 'y' : 'n');
DMEMIT(",submit_from_crypt_cpus=%c", test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags) ?
'y' : 'n');
DMEMIT(",no_read_workqueue=%c", test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags) ?
@@ -3688,14 +3721,6 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct crypt_config *cc = ti->private;
- /*
- * Unfortunate constraint that is required to avoid the potential
- * for exceeding underlying device's max_segments limits -- due to
- * crypt_alloc_buffer() possibly allocating pages for the encryption
- * bio that are not as physically contiguous as the original bio.
- */
- limits->max_segment_size = PAGE_SIZE;
-
limits->logical_block_size =
max_t(unsigned int, limits->logical_block_size, cc->sector_size);
limits->physical_block_size =
@@ -3706,7 +3731,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 25, 0},
+ .version = {1, 26, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 5eabdb06c649..08f6387620c1 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -28,7 +28,8 @@ struct delay_class {
struct delay_c {
struct timer_list delay_timer;
- struct mutex timer_lock;
+ struct mutex process_bios_lock; /* hold while removing bios to be processed from list */
+ spinlock_t delayed_bios_lock; /* hold on all accesses to delayed_bios list */
struct workqueue_struct *kdelayd_wq;
struct work_struct flush_expired_bios;
struct list_head delayed_bios;
@@ -49,8 +50,6 @@ struct dm_delay_info {
unsigned long expires;
};
-static DEFINE_MUTEX(delayed_bios_lock);
-
static void handle_delayed_timer(struct timer_list *t)
{
struct delay_c *dc = from_timer(dc, t, delay_timer);
@@ -60,12 +59,7 @@ static void handle_delayed_timer(struct timer_list *t)
static void queue_timeout(struct delay_c *dc, unsigned long expires)
{
- mutex_lock(&dc->timer_lock);
-
- if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires)
- mod_timer(&dc->delay_timer, expires);
-
- mutex_unlock(&dc->timer_lock);
+ timer_reduce(&dc->delay_timer, expires);
}
static inline bool delay_is_fast(struct delay_c *dc)
@@ -89,12 +83,16 @@ static void flush_delayed_bios(struct delay_c *dc, bool flush_all)
{
struct dm_delay_info *delayed, *next;
struct bio_list flush_bio_list;
+ LIST_HEAD(local_list);
unsigned long next_expires = 0;
bool start_timer = false;
bio_list_init(&flush_bio_list);
- mutex_lock(&delayed_bios_lock);
- list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {
+ mutex_lock(&dc->process_bios_lock);
+ spin_lock(&dc->delayed_bios_lock);
+ list_replace_init(&dc->delayed_bios, &local_list);
+ spin_unlock(&dc->delayed_bios_lock);
+ list_for_each_entry_safe(delayed, next, &local_list, list) {
cond_resched();
if (flush_all || time_after_eq(jiffies, delayed->expires)) {
struct bio *bio = dm_bio_from_per_bio_data(delayed,
@@ -114,7 +112,10 @@ static void flush_delayed_bios(struct delay_c *dc, bool flush_all)
}
}
}
- mutex_unlock(&delayed_bios_lock);
+ spin_lock(&dc->delayed_bios_lock);
+ list_splice(&local_list, &dc->delayed_bios);
+ spin_unlock(&dc->delayed_bios_lock);
+ mutex_unlock(&dc->process_bios_lock);
if (start_timer)
queue_timeout(dc, next_expires);
@@ -128,13 +129,13 @@ static int flush_worker_fn(void *data)
while (!kthread_should_stop()) {
flush_delayed_bios(dc, false);
- mutex_lock(&delayed_bios_lock);
+ spin_lock(&dc->delayed_bios_lock);
if (unlikely(list_empty(&dc->delayed_bios))) {
set_current_state(TASK_INTERRUPTIBLE);
- mutex_unlock(&delayed_bios_lock);
+ spin_unlock(&dc->delayed_bios_lock);
schedule();
} else {
- mutex_unlock(&delayed_bios_lock);
+ spin_unlock(&dc->delayed_bios_lock);
cond_resched();
}
}
@@ -154,8 +155,10 @@ static void delay_dtr(struct dm_target *ti)
{
struct delay_c *dc = ti->private;
- if (dc->kdelayd_wq)
+ if (dc->kdelayd_wq) {
+ timer_shutdown_sync(&dc->delay_timer);
destroy_workqueue(dc->kdelayd_wq);
+ }
if (dc->read.dev)
dm_put_device(ti, dc->read.dev);
@@ -166,7 +169,7 @@ static void delay_dtr(struct dm_target *ti)
if (dc->worker)
kthread_stop(dc->worker);
- mutex_destroy(&dc->timer_lock);
+ mutex_destroy(&dc->process_bios_lock);
kfree(dc);
}
@@ -224,7 +227,8 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->private = dc;
INIT_LIST_HEAD(&dc->delayed_bios);
- mutex_init(&dc->timer_lock);
+ mutex_init(&dc->process_bios_lock);
+ spin_lock_init(&dc->delayed_bios_lock);
dc->may_delay = true;
dc->argc = argc;
@@ -240,19 +244,18 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ret = delay_class_ctr(ti, &dc->flush, argv);
if (ret)
goto bad;
- max_delay = max(max_delay, dc->write.delay);
- max_delay = max(max_delay, dc->flush.delay);
goto out;
}
ret = delay_class_ctr(ti, &dc->write, argv + 3);
if (ret)
goto bad;
+ max_delay = max(max_delay, dc->write.delay);
+
if (argc == 6) {
ret = delay_class_ctr(ti, &dc->flush, argv + 3);
if (ret)
goto bad;
- max_delay = max(max_delay, dc->flush.delay);
goto out;
}
@@ -267,8 +270,7 @@ out:
* In case of small requested delays, use kthread instead of
* timers and workqueue to achieve better latency.
*/
- dc->worker = kthread_create(&flush_worker_fn, dc,
- "dm-delay-flush-worker");
+ dc->worker = kthread_run(&flush_worker_fn, dc, "dm-delay-flush-worker");
if (IS_ERR(dc->worker)) {
ret = PTR_ERR(dc->worker);
dc->worker = NULL;
@@ -309,14 +311,14 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
delayed->context = dc;
delayed->expires = expires = jiffies + msecs_to_jiffies(c->delay);
- mutex_lock(&delayed_bios_lock);
+ spin_lock(&dc->delayed_bios_lock);
if (unlikely(!dc->may_delay)) {
- mutex_unlock(&delayed_bios_lock);
+ spin_unlock(&dc->delayed_bios_lock);
return DM_MAPIO_REMAPPED;
}
c->ops++;
list_add_tail(&delayed->list, &dc->delayed_bios);
- mutex_unlock(&delayed_bios_lock);
+ spin_unlock(&dc->delayed_bios_lock);
if (delay_is_fast(dc))
wake_up_process(dc->worker);
@@ -330,12 +332,12 @@ static void delay_presuspend(struct dm_target *ti)
{
struct delay_c *dc = ti->private;
- mutex_lock(&delayed_bios_lock);
+ spin_lock(&dc->delayed_bios_lock);
dc->may_delay = false;
- mutex_unlock(&delayed_bios_lock);
+ spin_unlock(&dc->delayed_bios_lock);
if (!delay_is_fast(dc))
- del_timer_sync(&dc->delay_timer);
+ timer_delete(&dc->delay_timer);
flush_delayed_bios(dc, true);
}
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index 6acfa5bf97a4..8f81e597858d 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1272,8 +1272,7 @@ static void process_deferred_bios(struct era *era)
bio_list_init(&marked_bios);
spin_lock(&era->deferred_lock);
- bio_list_merge(&deferred_bios, &era->deferred_bios);
- bio_list_init(&era->deferred_bios);
+ bio_list_merge_init(&deferred_bios, &era->deferred_bios);
spin_unlock(&era->deferred_lock);
if (bio_list_empty(&deferred_bios))
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 37b9f8f1ae1a..417fddebe367 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -3492,6 +3492,7 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim
limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
limits->dma_alignment = limits->logical_block_size - 1;
+ limits->discard_granularity = ic->sectors_per_block << SECTOR_SHIFT;
}
limits->max_integrity_segments = USHRT_MAX;
}
@@ -4221,7 +4222,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
} else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
} else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
- if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
+ if ((uint64_t)val >= (uint64_t)UINT_MAX * 1000 / HZ) {
r = -EINVAL;
ti->error = "Invalid bitmap_flush_interval argument";
goto bad;
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index f17a6cf2284e..8d7df8303d0a 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -871,7 +871,7 @@ static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limit
if (!bdev_max_discard_sectors(lc->dev->bdev)) {
lc->device_supports_discard = false;
limits->discard_granularity = lc->sectorsize;
- limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT);
+ limits->max_hw_discard_sectors = (UINT_MAX >> SECTOR_SHIFT);
}
limits->logical_block_size = bdev_logical_block_size(lc->dev->bdev);
limits->physical_block_size = bdev_physical_block_size(lc->dev->bdev);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 05d1328d1811..15b681b90153 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -704,8 +704,7 @@ static void process_queued_bios(struct work_struct *work)
return;
}
- bio_list_merge(&bios, &m->queued_bios);
- bio_list_init(&m->queued_bios);
+ bio_list_merge_init(&bios, &m->queued_bios);
spin_unlock_irqrestore(&m->lock, flags);
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 0ace06d1bee3..f40c18da4000 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -2410,7 +2410,7 @@ static void snapshot_io_hints(struct dm_target *ti, struct queue_limits *limits)
/* All discards are split on chunk_size boundary */
limits->discard_granularity = snap->store->chunk_size;
- limits->max_discard_sectors = snap->store->chunk_size;
+ limits->max_hw_discard_sectors = snap->store->chunk_size;
up_read(&_origins_lock);
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 41f1d731ae5a..cc66a27c363a 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1963,26 +1963,27 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
bool wc = false, fua = false;
int r;
- /*
- * Copy table's limits to the DM device's request_queue
- */
- q->limits = *limits;
-
if (dm_table_supports_nowait(t))
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q);
else
blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q);
if (!dm_table_supports_discards(t)) {
- q->limits.max_discard_sectors = 0;
- q->limits.max_hw_discard_sectors = 0;
- q->limits.discard_granularity = 0;
- q->limits.discard_alignment = 0;
- q->limits.discard_misaligned = 0;
+ limits->max_hw_discard_sectors = 0;
+ limits->discard_granularity = 0;
+ limits->discard_alignment = 0;
+ limits->discard_misaligned = 0;
}
+ if (!dm_table_supports_write_zeroes(t))
+ limits->max_write_zeroes_sectors = 0;
+
if (!dm_table_supports_secure_erase(t))
- q->limits.max_secure_erase_sectors = 0;
+ limits->max_secure_erase_sectors = 0;
+
+ r = queue_limits_set(q, limits);
+ if (r)
+ return r;
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
wc = true;
@@ -2007,9 +2008,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
else
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- if (!dm_table_supports_write_zeroes(t))
- q->limits.max_write_zeroes_sectors = 0;
-
dm_table_verify_integrity(t);
/*
@@ -2042,12 +2040,12 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
r = dm_set_zones_restrictions(t, q);
if (r)
return r;
- if (!static_key_enabled(&zoned_enabled.key))
+ if (blk_queue_is_zoned(q) &&
+ !static_key_enabled(&zoned_enabled.key))
static_branch_enable(&zoned_enabled);
}
dm_update_crypto_profile(q, t);
- disk_update_readahead(t->md->disk);
/*
* Check for request-based device is left to
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 0c4efb0bef8a..652627aea11b 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -249,7 +249,6 @@ static int io_err_iterate_devices(struct dm_target *ti,
static void io_err_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
- limits->max_discard_sectors = UINT_MAX;
limits->max_hw_discard_sectors = UINT_MAX;
limits->discard_granularity = 512;
}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 4793ad2aa1f7..991f77a5885b 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -592,12 +592,6 @@ struct dm_thin_endio_hook {
struct dm_bio_prison_cell *cell;
};
-static void __merge_bio_list(struct bio_list *bios, struct bio_list *master)
-{
- bio_list_merge(bios, master);
- bio_list_init(master);
-}
-
static void error_bio_list(struct bio_list *bios, blk_status_t error)
{
struct bio *bio;
@@ -616,7 +610,7 @@ static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master,
bio_list_init(&bios);
spin_lock_irq(&tc->lock);
- __merge_bio_list(&bios, master);
+ bio_list_merge_init(&bios, master);
spin_unlock_irq(&tc->lock);
error_bio_list(&bios, error);
@@ -645,8 +639,8 @@ static void requeue_io(struct thin_c *tc)
bio_list_init(&bios);
spin_lock_irq(&tc->lock);
- __merge_bio_list(&bios, &tc->deferred_bio_list);
- __merge_bio_list(&bios, &tc->retry_on_resume_list);
+ bio_list_merge_init(&bios, &tc->deferred_bio_list);
+ bio_list_merge_init(&bios, &tc->retry_on_resume_list);
spin_unlock_irq(&tc->lock);
error_bio_list(&bios, BLK_STS_DM_REQUEUE);
@@ -4100,7 +4094,7 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
if (pt->adjusted_pf.discard_enabled) {
disable_discard_passdown_if_not_supported(pt);
if (!pt->adjusted_pf.discard_passdown)
- limits->max_discard_sectors = 0;
+ limits->max_hw_discard_sectors = 0;
/*
* The pool uses the same discard limits as the underlying data
* device. DM core has already set this up.
@@ -4497,7 +4491,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
if (pool->pf.discard_enabled) {
limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
- limits->max_discard_sectors = pool->sectors_per_block * BIO_PRISON_MAX_RANGE;
+ limits->max_hw_discard_sectors = pool->sectors_per_block * BIO_PRISON_MAX_RANGE;
}
}
diff --git a/drivers/md/dm-vdo/Makefile b/drivers/md/dm-vdo/Makefile
index 33e09abc6acd..9476957bfbf4 100644
--- a/drivers/md/dm-vdo/Makefile
+++ b/drivers/md/dm-vdo/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I$(srctree)/$(src) -I$(srctree)/$(src)/indexer
+ccflags-y := -I$(src) -I$(src)/indexer
obj-$(CONFIG_DM_VDO) += dm-vdo.o
diff --git a/drivers/md/dm-vdo/data-vio.c b/drivers/md/dm-vdo/data-vio.c
index 94f6f1ccfb7d..ab3ea8337809 100644
--- a/drivers/md/dm-vdo/data-vio.c
+++ b/drivers/md/dm-vdo/data-vio.c
@@ -604,8 +604,7 @@ static void assign_discard_permit(struct limiter *limiter)
static void get_waiters(struct limiter *limiter)
{
- bio_list_merge(&limiter->waiters, &limiter->new_waiters);
- bio_list_init(&limiter->new_waiters);
+ bio_list_merge_init(&limiter->waiters, &limiter->new_waiters);
}
static inline struct data_vio *get_available_data_vio(struct data_vio_pool *pool)
diff --git a/drivers/md/dm-vdo/dm-vdo-target.c b/drivers/md/dm-vdo/dm-vdo-target.c
index 5a4b0a927f56..b423bec6458b 100644
--- a/drivers/md/dm-vdo/dm-vdo-target.c
+++ b/drivers/md/dm-vdo/dm-vdo-target.c
@@ -878,7 +878,7 @@ static int parse_device_config(int argc, char **argv, struct dm_target *ti,
}
if (config->version == 0) {
- u64 device_size = i_size_read(config->owned_device->bdev->bd_inode);
+ u64 device_size = bdev_nr_bytes(config->owned_device->bdev);
config->physical_blocks = device_size / VDO_BLOCK_SIZE;
}
@@ -1011,7 +1011,7 @@ static void vdo_status(struct dm_target *ti, status_type_t status_type,
static block_count_t __must_check get_underlying_device_block_count(const struct vdo *vdo)
{
- return i_size_read(vdo_get_backing_device(vdo)->bd_inode) / VDO_BLOCK_SIZE;
+ return bdev_nr_bytes(vdo_get_backing_device(vdo)) / VDO_BLOCK_SIZE;
}
static int __must_check process_vdo_message_locked(struct vdo *vdo, unsigned int argc,
diff --git a/drivers/md/dm-vdo/flush.c b/drivers/md/dm-vdo/flush.c
index 57e87f0d7069..dd4fdee2ca0c 100644
--- a/drivers/md/dm-vdo/flush.c
+++ b/drivers/md/dm-vdo/flush.c
@@ -369,8 +369,7 @@ void vdo_dump_flusher(const struct flusher *flusher)
static void initialize_flush(struct vdo_flush *flush, struct vdo *vdo)
{
bio_list_init(&flush->bios);
- bio_list_merge(&flush->bios, &vdo->flusher->waiting_flush_bios);
- bio_list_init(&vdo->flusher->waiting_flush_bios);
+ bio_list_merge_init(&flush->bios, &vdo->flusher->waiting_flush_bios);
}
static void launch_flush(struct vdo_flush *flush)
diff --git a/drivers/md/dm-vdo/indexer/io-factory.c b/drivers/md/dm-vdo/indexer/io-factory.c
index 515765d35794..1bee9d63dc0a 100644
--- a/drivers/md/dm-vdo/indexer/io-factory.c
+++ b/drivers/md/dm-vdo/indexer/io-factory.c
@@ -90,7 +90,7 @@ void uds_put_io_factory(struct io_factory *factory)
size_t uds_get_writable_size(struct io_factory *factory)
{
- return i_size_read(factory->bdev->bd_inode);
+ return bdev_nr_bytes(factory->bdev);
}
/* Create a struct dm_bufio_client for an index region starting at offset. */
diff --git a/drivers/md/dm-vdo/murmurhash3.c b/drivers/md/dm-vdo/murmurhash3.c
index 00c9b9c05001..3a989efae142 100644
--- a/drivers/md/dm-vdo/murmurhash3.c
+++ b/drivers/md/dm-vdo/murmurhash3.c
@@ -8,33 +8,14 @@
#include "murmurhash3.h"
+#include <asm/unaligned.h>
+
static inline u64 rotl64(u64 x, s8 r)
{
return (x << r) | (x >> (64 - r));
}
#define ROTL64(x, y) rotl64(x, y)
-static __always_inline u64 getblock64(const u64 *p, int i)
-{
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
- return p[i];
-#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
- return __builtin_bswap64(p[i]);
-#else
-#error "can't figure out byte order"
-#endif
-}
-
-static __always_inline void putblock64(u64 *p, int i, u64 value)
-{
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
- p[i] = value;
-#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
- p[i] = __builtin_bswap64(value);
-#else
-#error "can't figure out byte order"
-#endif
-}
/* Finalization mix - force all bits of a hash block to avalanche */
@@ -60,6 +41,8 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out)
const u64 c1 = 0x87c37b91114253d5LLU;
const u64 c2 = 0x4cf5ad432745937fLLU;
+ u64 *hash_out = out;
+
/* body */
const u64 *blocks = (const u64 *)(data);
@@ -67,8 +50,8 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out)
int i;
for (i = 0; i < nblocks; i++) {
- u64 k1 = getblock64(blocks, i * 2 + 0);
- u64 k2 = getblock64(blocks, i * 2 + 1);
+ u64 k1 = get_unaligned_le64(&blocks[i * 2]);
+ u64 k2 = get_unaligned_le64(&blocks[i * 2 + 1]);
k1 *= c1;
k1 = ROTL64(k1, 31);
@@ -154,7 +137,7 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out)
break;
default:
break;
- };
+ }
}
/* finalization */
@@ -170,6 +153,6 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out)
h1 += h2;
h2 += h1;
- putblock64((u64 *)out, 0, h1);
- putblock64((u64 *)out, 1, h2);
+ put_unaligned_le64(h1, &hash_out[0]);
+ put_unaligned_le64(h2, &hash_out[1]);
}
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index 3b13e6eb1aa4..9a0bb623e823 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -61,7 +61,6 @@ static int zero_map(struct dm_target *ti, struct bio *bio)
static void zero_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
- limits->max_discard_sectors = UINT_MAX;
limits->max_hw_discard_sectors = UINT_MAX;
limits->discard_granularity = 512;
}
diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c
index eb9832b22b14..8e6bcb0d786a 100644
--- a/drivers/md/dm-zone.c
+++ b/drivers/md/dm-zone.c
@@ -60,16 +60,23 @@ int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
struct dm_table *map;
int srcu_idx, ret;
- if (dm_suspended_md(md))
- return -EAGAIN;
+ if (!md->zone_revalidate_map) {
+ /* Regular user context */
+ if (dm_suspended_md(md))
+ return -EAGAIN;
- map = dm_get_live_table(md, &srcu_idx);
- if (!map)
- return -EIO;
+ map = dm_get_live_table(md, &srcu_idx);
+ if (!map)
+ return -EIO;
+ } else {
+ /* Zone revalidation during __bind() */
+ map = md->zone_revalidate_map;
+ }
ret = dm_blk_do_report_zones(md, map, sector, nr_zones, cb, data);
- dm_put_live_table(md, srcu_idx);
+ if (!md->zone_revalidate_map)
+ dm_put_live_table(md, srcu_idx);
return ret;
}
@@ -138,80 +145,47 @@ bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
}
}
-void dm_cleanup_zoned_dev(struct mapped_device *md)
+/*
+ * Count conventional zones of a mapped zoned device. If the device
+ * only has conventional zones, do not expose it as zoned.
+ */
+static int dm_check_zoned_cb(struct blk_zone *zone, unsigned int idx,
+ void *data)
{
- if (md->disk) {
- bitmap_free(md->disk->conv_zones_bitmap);
- md->disk->conv_zones_bitmap = NULL;
- bitmap_free(md->disk->seq_zones_wlock);
- md->disk->seq_zones_wlock = NULL;
- }
+ unsigned int *nr_conv_zones = data;
- kvfree(md->zwp_offset);
- md->zwp_offset = NULL;
- md->nr_zones = 0;
-}
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ (*nr_conv_zones)++;
-static unsigned int dm_get_zone_wp_offset(struct blk_zone *zone)
-{
- switch (zone->cond) {
- case BLK_ZONE_COND_IMP_OPEN:
- case BLK_ZONE_COND_EXP_OPEN:
- case BLK_ZONE_COND_CLOSED:
- return zone->wp - zone->start;
- case BLK_ZONE_COND_FULL:
- return zone->len;
- case BLK_ZONE_COND_EMPTY:
- case BLK_ZONE_COND_NOT_WP:
- case BLK_ZONE_COND_OFFLINE:
- case BLK_ZONE_COND_READONLY:
- default:
- /*
- * Conventional, offline and read-only zones do not have a valid
- * write pointer. Use 0 as for an empty zone.
- */
- return 0;
- }
+ return 0;
}
-static int dm_zone_revalidate_cb(struct blk_zone *zone, unsigned int idx,
- void *data)
+static int dm_check_zoned(struct mapped_device *md, struct dm_table *t)
{
- struct mapped_device *md = data;
struct gendisk *disk = md->disk;
+ unsigned int nr_conv_zones = 0;
+ int ret;
- switch (zone->type) {
- case BLK_ZONE_TYPE_CONVENTIONAL:
- if (!disk->conv_zones_bitmap) {
- disk->conv_zones_bitmap = bitmap_zalloc(disk->nr_zones,
- GFP_NOIO);
- if (!disk->conv_zones_bitmap)
- return -ENOMEM;
- }
- set_bit(idx, disk->conv_zones_bitmap);
- break;
- case BLK_ZONE_TYPE_SEQWRITE_REQ:
- case BLK_ZONE_TYPE_SEQWRITE_PREF:
- if (!disk->seq_zones_wlock) {
- disk->seq_zones_wlock = bitmap_zalloc(disk->nr_zones,
- GFP_NOIO);
- if (!disk->seq_zones_wlock)
- return -ENOMEM;
- }
- if (!md->zwp_offset) {
- md->zwp_offset =
- kvcalloc(disk->nr_zones, sizeof(unsigned int),
- GFP_KERNEL);
- if (!md->zwp_offset)
- return -ENOMEM;
- }
- md->zwp_offset[idx] = dm_get_zone_wp_offset(zone);
-
- break;
- default:
- DMERR("Invalid zone type 0x%x at sectors %llu",
- (int)zone->type, zone->start);
- return -ENODEV;
+ /* Count conventional zones */
+ md->zone_revalidate_map = t;
+ ret = dm_blk_report_zones(disk, 0, UINT_MAX,
+ dm_check_zoned_cb, &nr_conv_zones);
+ md->zone_revalidate_map = NULL;
+ if (ret < 0) {
+ DMERR("Check zoned failed %d", ret);
+ return ret;
+ }
+
+ /*
+ * If we only have conventional zones, expose the mapped device as
+ * a regular device.
+ */
+ if (nr_conv_zones >= ret) {
+ disk->queue->limits.max_open_zones = 0;
+ disk->queue->limits.max_active_zones = 0;
+ disk->queue->limits.zoned = false;
+ clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
+ disk->nr_zones = 0;
}
return 0;
@@ -226,41 +200,32 @@ static int dm_zone_revalidate_cb(struct blk_zone *zone, unsigned int idx,
static int dm_revalidate_zones(struct mapped_device *md, struct dm_table *t)
{
struct gendisk *disk = md->disk;
- unsigned int noio_flag;
int ret;
- /*
- * Check if something changed. If yes, cleanup the current resources
- * and reallocate everything.
- */
+ /* Revalidate only if something changed. */
if (!disk->nr_zones || disk->nr_zones != md->nr_zones)
- dm_cleanup_zoned_dev(md);
+ md->nr_zones = 0;
+
if (md->nr_zones)
return 0;
/*
- * Scan all zones to initialize everything. Ensure that all vmalloc
- * operations in this context are done as if GFP_NOIO was specified.
+ * Our table is not live yet. So the call to dm_get_live_table()
+ * in dm_blk_report_zones() will fail. Set a temporary pointer to
+ * our table for dm_blk_report_zones() to use directly.
*/
- noio_flag = memalloc_noio_save();
- ret = dm_blk_do_report_zones(md, t, 0, disk->nr_zones,
- dm_zone_revalidate_cb, md);
- memalloc_noio_restore(noio_flag);
- if (ret < 0)
- goto err;
- if (ret != disk->nr_zones) {
- ret = -EIO;
- goto err;
+ md->zone_revalidate_map = t;
+ ret = blk_revalidate_disk_zones(disk);
+ md->zone_revalidate_map = NULL;
+
+ if (ret) {
+ DMERR("Revalidate zones failed %d", ret);
+ return ret;
}
md->nr_zones = disk->nr_zones;
return 0;
-
-err:
- DMERR("Revalidate zones failed %d", ret);
- dm_cleanup_zoned_dev(md);
- return ret;
}
static int device_not_zone_append_capable(struct dm_target *ti,
@@ -289,294 +254,40 @@ static bool dm_table_supports_zone_append(struct dm_table *t)
int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q)
{
struct mapped_device *md = t->md;
+ int ret;
/*
- * For a zoned target, the number of zones should be updated for the
- * correct value to be exposed in sysfs queue/nr_zones.
+ * Check if zone append is natively supported, and if not, set the
+ * mapped device queue as needing zone append emulation.
*/
WARN_ON_ONCE(queue_is_mq(q));
- md->disk->nr_zones = bdev_nr_zones(md->disk->part0);
-
- /* Check if zone append is natively supported */
if (dm_table_supports_zone_append(t)) {
clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
- dm_cleanup_zoned_dev(md);
- return 0;
+ } else {
+ set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
+ blk_queue_max_zone_append_sectors(q, 0);
}
- /*
- * Mark the mapped device as needing zone append emulation and
- * initialize the emulation resources once the capacity is set.
- */
- set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
if (!get_capacity(md->disk))
return 0;
- return dm_revalidate_zones(md, t);
-}
-
-static int dm_update_zone_wp_offset_cb(struct blk_zone *zone, unsigned int idx,
- void *data)
-{
- unsigned int *wp_offset = data;
-
- *wp_offset = dm_get_zone_wp_offset(zone);
-
- return 0;
-}
-
-static int dm_update_zone_wp_offset(struct mapped_device *md, unsigned int zno,
- unsigned int *wp_ofst)
-{
- sector_t sector = zno * bdev_zone_sectors(md->disk->part0);
- unsigned int noio_flag;
- struct dm_table *t;
- int srcu_idx, ret;
-
- t = dm_get_live_table(md, &srcu_idx);
- if (!t)
- return -EIO;
-
- /*
- * Ensure that all memory allocations in this context are done as if
- * GFP_NOIO was specified.
- */
- noio_flag = memalloc_noio_save();
- ret = dm_blk_do_report_zones(md, t, sector, 1,
- dm_update_zone_wp_offset_cb, wp_ofst);
- memalloc_noio_restore(noio_flag);
-
- dm_put_live_table(md, srcu_idx);
-
- if (ret != 1)
- return -EIO;
-
- return 0;
-}
-
-struct orig_bio_details {
- enum req_op op;
- unsigned int nr_sectors;
-};
-
-/*
- * First phase of BIO mapping for targets with zone append emulation:
- * check all BIO that change a zone writer pointer and change zone
- * append operations into regular write operations.
- */
-static bool dm_zone_map_bio_begin(struct mapped_device *md,
- unsigned int zno, struct bio *clone)
-{
- sector_t zsectors = bdev_zone_sectors(md->disk->part0);
- unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]);
-
- /*
- * If the target zone is in an error state, recover by inspecting the
- * zone to get its current write pointer position. Note that since the
- * target zone is already locked, a BIO issuing context should never
- * see the zone write in the DM_ZONE_UPDATING_WP_OFST state.
- */
- if (zwp_offset == DM_ZONE_INVALID_WP_OFST) {
- if (dm_update_zone_wp_offset(md, zno, &zwp_offset))
- return false;
- WRITE_ONCE(md->zwp_offset[zno], zwp_offset);
- }
-
- switch (bio_op(clone)) {
- case REQ_OP_ZONE_RESET:
- case REQ_OP_ZONE_FINISH:
- return true;
- case REQ_OP_WRITE_ZEROES:
- case REQ_OP_WRITE:
- /* Writes must be aligned to the zone write pointer */
- if ((clone->bi_iter.bi_sector & (zsectors - 1)) != zwp_offset)
- return false;
- break;
- case REQ_OP_ZONE_APPEND:
- /*
- * Change zone append operations into a non-mergeable regular
- * writes directed at the current write pointer position of the
- * target zone.
- */
- clone->bi_opf = REQ_OP_WRITE | REQ_NOMERGE |
- (clone->bi_opf & (~REQ_OP_MASK));
- clone->bi_iter.bi_sector += zwp_offset;
- break;
- default:
- DMWARN_LIMIT("Invalid BIO operation");
- return false;
- }
-
- /* Cannot write to a full zone */
- if (zwp_offset >= zsectors)
- return false;
-
- return true;
-}
-
-/*
- * Second phase of BIO mapping for targets with zone append emulation:
- * update the zone write pointer offset array to account for the additional
- * data written to a zone. Note that at this point, the remapped clone BIO
- * may already have completed, so we do not touch it.
- */
-static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, unsigned int zno,
- struct orig_bio_details *orig_bio_details,
- unsigned int nr_sectors)
-{
- unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]);
-
- /* The clone BIO may already have been completed and failed */
- if (zwp_offset == DM_ZONE_INVALID_WP_OFST)
- return BLK_STS_IOERR;
-
- /* Update the zone wp offset */
- switch (orig_bio_details->op) {
- case REQ_OP_ZONE_RESET:
- WRITE_ONCE(md->zwp_offset[zno], 0);
- return BLK_STS_OK;
- case REQ_OP_ZONE_FINISH:
- WRITE_ONCE(md->zwp_offset[zno],
- bdev_zone_sectors(md->disk->part0));
- return BLK_STS_OK;
- case REQ_OP_WRITE_ZEROES:
- case REQ_OP_WRITE:
- WRITE_ONCE(md->zwp_offset[zno], zwp_offset + nr_sectors);
- return BLK_STS_OK;
- case REQ_OP_ZONE_APPEND:
- /*
- * Check that the target did not truncate the write operation
- * emulating a zone append.
- */
- if (nr_sectors != orig_bio_details->nr_sectors) {
- DMWARN_LIMIT("Truncated write for zone append");
- return BLK_STS_IOERR;
- }
- WRITE_ONCE(md->zwp_offset[zno], zwp_offset + nr_sectors);
- return BLK_STS_OK;
- default:
- DMWARN_LIMIT("Invalid BIO operation");
- return BLK_STS_IOERR;
- }
-}
-
-static inline void dm_zone_lock(struct gendisk *disk, unsigned int zno,
- struct bio *clone)
-{
- if (WARN_ON_ONCE(bio_flagged(clone, BIO_ZONE_WRITE_LOCKED)))
- return;
-
- wait_on_bit_lock_io(disk->seq_zones_wlock, zno, TASK_UNINTERRUPTIBLE);
- bio_set_flag(clone, BIO_ZONE_WRITE_LOCKED);
-}
-
-static inline void dm_zone_unlock(struct gendisk *disk, unsigned int zno,
- struct bio *clone)
-{
- if (!bio_flagged(clone, BIO_ZONE_WRITE_LOCKED))
- return;
-
- WARN_ON_ONCE(!test_bit(zno, disk->seq_zones_wlock));
- clear_bit_unlock(zno, disk->seq_zones_wlock);
- smp_mb__after_atomic();
- wake_up_bit(disk->seq_zones_wlock, zno);
-
- bio_clear_flag(clone, BIO_ZONE_WRITE_LOCKED);
-}
-
-static bool dm_need_zone_wp_tracking(struct bio *bio)
-{
/*
- * Special processing is not needed for operations that do not need the
- * zone write lock, that is, all operations that target conventional
- * zones and all operations that do not modify directly a sequential
- * zone write pointer.
+ * Check that the mapped device will indeed be zoned, that is, that it
+ * has sequential write required zones.
*/
- if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
- return false;
- switch (bio_op(bio)) {
- case REQ_OP_WRITE_ZEROES:
- case REQ_OP_WRITE:
- case REQ_OP_ZONE_RESET:
- case REQ_OP_ZONE_FINISH:
- case REQ_OP_ZONE_APPEND:
- return bio_zone_is_seq(bio);
- default:
- return false;
- }
-}
-
-/*
- * Special IO mapping for targets needing zone append emulation.
- */
-int dm_zone_map_bio(struct dm_target_io *tio)
-{
- struct dm_io *io = tio->io;
- struct dm_target *ti = tio->ti;
- struct mapped_device *md = io->md;
- struct bio *clone = &tio->clone;
- struct orig_bio_details orig_bio_details;
- unsigned int zno;
- blk_status_t sts;
- int r;
-
- /*
- * IOs that do not change a zone write pointer do not need
- * any additional special processing.
- */
- if (!dm_need_zone_wp_tracking(clone))
- return ti->type->map(ti, clone);
-
- /* Lock the target zone */
- zno = bio_zone_no(clone);
- dm_zone_lock(md->disk, zno, clone);
-
- orig_bio_details.nr_sectors = bio_sectors(clone);
- orig_bio_details.op = bio_op(clone);
+ ret = dm_check_zoned(md, t);
+ if (ret)
+ return ret;
+ if (!blk_queue_is_zoned(q))
+ return 0;
- /*
- * Check that the bio and the target zone write pointer offset are
- * both valid, and if the bio is a zone append, remap it to a write.
- */
- if (!dm_zone_map_bio_begin(md, zno, clone)) {
- dm_zone_unlock(md->disk, zno, clone);
- return DM_MAPIO_KILL;
+ if (!md->disk->nr_zones) {
+ DMINFO("%s using %s zone append",
+ md->disk->disk_name,
+ queue_emulates_zone_append(q) ? "emulated" : "native");
}
- /* Let the target do its work */
- r = ti->type->map(ti, clone);
- switch (r) {
- case DM_MAPIO_SUBMITTED:
- /*
- * The target submitted the clone BIO. The target zone will
- * be unlocked on completion of the clone.
- */
- sts = dm_zone_map_bio_end(md, zno, &orig_bio_details,
- *tio->len_ptr);
- break;
- case DM_MAPIO_REMAPPED:
- /*
- * The target only remapped the clone BIO. In case of error,
- * unlock the target zone here as the clone will not be
- * submitted.
- */
- sts = dm_zone_map_bio_end(md, zno, &orig_bio_details,
- *tio->len_ptr);
- if (sts != BLK_STS_OK)
- dm_zone_unlock(md->disk, zno, clone);
- break;
- case DM_MAPIO_REQUEUE:
- case DM_MAPIO_KILL:
- default:
- dm_zone_unlock(md->disk, zno, clone);
- sts = BLK_STS_IOERR;
- break;
- }
-
- if (sts != BLK_STS_OK)
- return DM_MAPIO_KILL;
-
- return r;
+ return dm_revalidate_zones(md, t);
}
/*
@@ -587,61 +298,17 @@ void dm_zone_endio(struct dm_io *io, struct bio *clone)
struct mapped_device *md = io->md;
struct gendisk *disk = md->disk;
struct bio *orig_bio = io->orig_bio;
- unsigned int zwp_offset;
- unsigned int zno;
/*
- * For targets that do not emulate zone append, we only need to
- * handle native zone-append bios.
+ * Get the offset within the zone of the written sector
+ * and add that to the original bio sector position.
*/
- if (!dm_emulate_zone_append(md)) {
- /*
- * Get the offset within the zone of the written sector
- * and add that to the original bio sector position.
- */
- if (clone->bi_status == BLK_STS_OK &&
- bio_op(clone) == REQ_OP_ZONE_APPEND) {
- sector_t mask =
- (sector_t)bdev_zone_sectors(disk->part0) - 1;
-
- orig_bio->bi_iter.bi_sector +=
- clone->bi_iter.bi_sector & mask;
- }
-
- return;
- }
+ if (clone->bi_status == BLK_STS_OK &&
+ bio_op(clone) == REQ_OP_ZONE_APPEND) {
+ sector_t mask = bdev_zone_sectors(disk->part0) - 1;
- /*
- * For targets that do emulate zone append, if the clone BIO does not
- * own the target zone write lock, we have nothing to do.
- */
- if (!bio_flagged(clone, BIO_ZONE_WRITE_LOCKED))
- return;
-
- zno = bio_zone_no(orig_bio);
-
- if (clone->bi_status != BLK_STS_OK) {
- /*
- * BIOs that modify a zone write pointer may leave the zone
- * in an unknown state in case of failure (e.g. the write
- * pointer was only partially advanced). In this case, set
- * the target zone write pointer as invalid unless it is
- * already being updated.
- */
- WRITE_ONCE(md->zwp_offset[zno], DM_ZONE_INVALID_WP_OFST);
- } else if (bio_op(orig_bio) == REQ_OP_ZONE_APPEND) {
- /*
- * Get the written sector for zone append operation that were
- * emulated using regular write operations.
- */
- zwp_offset = READ_ONCE(md->zwp_offset[zno]);
- if (WARN_ON_ONCE(zwp_offset < bio_sectors(orig_bio)))
- WRITE_ONCE(md->zwp_offset[zno],
- DM_ZONE_INVALID_WP_OFST);
- else
- orig_bio->bi_iter.bi_sector +=
- zwp_offset - bio_sectors(orig_bio);
+ orig_bio->bi_iter.bi_sector += clone->bi_iter.bi_sector & mask;
}
- dm_zone_unlock(disk, zno, clone);
+ return;
}
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 621794a9edd6..12236e6f46f3 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -1001,7 +1001,6 @@ static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
limits->discard_alignment = 0;
limits->discard_granularity = DMZ_BLOCK_SIZE;
- limits->max_discard_sectors = chunk_sectors;
limits->max_hw_discard_sectors = chunk_sectors;
limits->max_write_zeroes_sectors = chunk_sectors;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 56aa2a8b9d71..13037d6a6f62 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -765,7 +765,7 @@ static struct table_device *open_table_device(struct mapped_device *md,
return td;
out_blkdev_put:
- fput(bdev_file);
+ __fput_sync(bdev_file);
out_free_td:
kfree(td);
return ERR_PTR(r);
@@ -778,7 +778,13 @@ static void close_table_device(struct table_device *td, struct mapped_device *md
{
if (md->disk->slave_dir)
bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
- fput(td->dm_dev.bdev_file);
+
+ /* Leverage async fput() if DMF_DEFERRED_REMOVE set */
+ if (unlikely(test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
+ fput(td->dm_dev.bdev_file);
+ else
+ __fput_sync(td->dm_dev.bdev_file);
+
put_dax(td->dm_dev.dax_dev);
list_del(&td->list);
kfree(td);
@@ -1080,7 +1086,7 @@ void disable_discard(struct mapped_device *md)
struct queue_limits *limits = dm_get_queue_limits(md);
/* device doesn't really support DISCARD, disable it */
- limits->max_discard_sectors = 0;
+ limits->max_hw_discard_sectors = 0;
}
void disable_write_zeroes(struct mapped_device *md)
@@ -1422,25 +1428,12 @@ static void __map_bio(struct bio *clone)
down(&md->swap_bios_semaphore);
}
- if (static_branch_unlikely(&zoned_enabled)) {
- /*
- * Check if the IO needs a special mapping due to zone append
- * emulation on zoned target. In this case, dm_zone_map_bio()
- * calls the target map operation.
- */
- if (unlikely(dm_emulate_zone_append(md)))
- r = dm_zone_map_bio(tio);
- else
- goto do_map;
- } else {
-do_map:
- if (likely(ti->type->map == linear_map))
- r = linear_map(ti, clone);
- else if (ti->type->map == stripe_map)
- r = stripe_map(ti, clone);
- else
- r = ti->type->map(ti, clone);
- }
+ if (likely(ti->type->map == linear_map))
+ r = linear_map(ti, clone);
+ else if (ti->type->map == stripe_map)
+ r = stripe_map(ti, clone);
+ else
+ r = ti->type->map(ti, clone);
switch (r) {
case DM_MAPIO_SUBMITTED:
@@ -1768,6 +1761,33 @@ static void init_clone_info(struct clone_info *ci, struct dm_io *io,
ci->sector_count = 0;
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static inline bool dm_zone_bio_needs_split(struct mapped_device *md,
+ struct bio *bio)
+{
+ /*
+ * For mapped device that need zone append emulation, we must
+ * split any large BIO that straddles zone boundaries.
+ */
+ return dm_emulate_zone_append(md) && bio_straddles_zones(bio) &&
+ !bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING);
+}
+static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
+{
+ return dm_emulate_zone_append(md) && blk_zone_plug_bio(bio, 0);
+}
+#else
+static inline bool dm_zone_bio_needs_split(struct mapped_device *md,
+ struct bio *bio)
+{
+ return false;
+}
+static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
+{
+ return false;
+}
+#endif
+
/*
* Entry point to split a bio into clones and submit them to the targets.
*/
@@ -1777,19 +1797,32 @@ static void dm_split_and_process_bio(struct mapped_device *md,
struct clone_info ci;
struct dm_io *io;
blk_status_t error = BLK_STS_OK;
- bool is_abnormal;
+ bool is_abnormal, need_split;
- is_abnormal = is_abnormal_io(bio);
- if (unlikely(is_abnormal)) {
+ need_split = is_abnormal = is_abnormal_io(bio);
+ if (static_branch_unlikely(&zoned_enabled))
+ need_split = is_abnormal || dm_zone_bio_needs_split(md, bio);
+
+ if (unlikely(need_split)) {
/*
* Use bio_split_to_limits() for abnormal IO (e.g. discard, etc)
* otherwise associated queue_limits won't be imposed.
+ * Also split the BIO for mapped devices needing zone append
+ * emulation to ensure that the BIO does not cross zone
+ * boundaries.
*/
bio = bio_split_to_limits(bio);
if (!bio)
return;
}
+ /*
+ * Use the block layer zone write plugging for mapped devices that
+ * need zone append emulation (e.g. dm-crypt).
+ */
+ if (static_branch_unlikely(&zoned_enabled) && dm_zone_plug_bio(md, bio))
+ return;
+
/* Only support nowait for normal IO */
if (unlikely(bio->bi_opf & REQ_NOWAIT) && !is_abnormal) {
io = alloc_io(md, bio, GFP_NOWAIT);
@@ -2010,7 +2043,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
md->dax_dev = NULL;
}
- dm_cleanup_zoned_dev(md);
if (md->disk) {
spin_lock(&_minor_lock);
md->disk->private_data = NULL;
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 7f1acbf6bd9e..e0c57f19839b 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -104,13 +104,11 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q);
void dm_zone_endio(struct dm_io *io, struct bio *clone);
#ifdef CONFIG_BLK_DEV_ZONED
-void dm_cleanup_zoned_dev(struct mapped_device *md);
int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
bool dm_is_zone_write(struct mapped_device *md, struct bio *bio);
int dm_zone_map_bio(struct dm_target_io *io);
#else
-static inline void dm_cleanup_zoned_dev(struct mapped_device *md) {}
#define dm_blk_report_zones NULL
static inline bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
{
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 059afc24c08b..0a2d37eb38ef 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -1424,7 +1424,7 @@ __acquires(bitmap->lock)
sector_t chunk = offset >> bitmap->chunkshift;
unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
- sector_t csize;
+ sector_t csize = ((sector_t)1) << bitmap->chunkshift;
int err;
if (page >= bitmap->pages) {
@@ -1433,6 +1433,7 @@ __acquires(bitmap->lock)
* End-of-device while looking for a whole page or
* user set a huge number to sysfs bitmap_set_bits.
*/
+ *blocks = csize - (offset & (csize - 1));
return NULL;
}
err = md_bitmap_checkpage(bitmap, page, create, 0);
@@ -1441,8 +1442,7 @@ __acquires(bitmap->lock)
bitmap->bp[page].map == NULL)
csize = ((sector_t)1) << (bitmap->chunkshift +
PAGE_COUNTER_SHIFT);
- else
- csize = ((sector_t)1) << bitmap->chunkshift;
+
*blocks = csize - (offset & (csize - 1));
if (err < 0)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index e575e74aabf5..aff9118ff697 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8087,7 +8087,8 @@ void md_wakeup_thread(struct md_thread __rcu *thread)
if (t) {
pr_debug("md: waking up MD thread %s.\n", t->tsk->comm);
set_bit(THREAD_WAKEUP, &t->flags);
- wake_up(&t->wqueue);
+ if (wq_has_sleeper(&t->wqueue))
+ wake_up(&t->wqueue);
}
rcu_read_unlock();
}
@@ -8582,6 +8583,10 @@ static int is_mddev_idle(struct mddev *mddev, int init)
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) {
struct gendisk *disk = rdev->bdev->bd_disk;
+
+ if (!init && !blk_queue_io_stat(disk->queue))
+ continue;
+
curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
atomic_read(&disk->sync_io);
/* sync IO will cause sync_io to increase before the disk_stats
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 097d9dbd69b8..ca085ecad504 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -621,7 +621,8 @@ extern void mddev_unlock(struct mddev *mddev);
static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
{
- atomic_add(nr_sectors, &bdev->bd_disk->sync_io);
+ if (blk_queue_io_stat(bdev->bd_disk->queue))
+ atomic_add(nr_sectors, &bdev->bd_disk->sync_io);
}
static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index be8ac24f50b6..7b8a71ca66dd 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1558,7 +1558,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
for (j = 0; j < i; j++)
if (r1_bio->bios[j])
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
- free_r1bio(r1_bio);
+ mempool_free(r1_bio, &conf->r1bio_pool);
allow_barrier(conf, bio->bi_iter.bi_sector);
if (bio->bi_opf & REQ_NOWAIT) {
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d874abfc1836..2bd1ce9b3922 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -36,7 +36,6 @@
*/
#include <linux/blkdev.h>
-#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/raid/pq.h>
#include <linux/async_tx.h>
@@ -6734,6 +6733,9 @@ static void raid5d(struct md_thread *thread)
int batch_size, released;
unsigned int offset;
+ if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
+ break;
+
released = release_stripe_list(conf, conf->temp_inactive_list);
if (released)
clear_bit(R5_DID_ALLOC, &conf->cache_state);
@@ -6770,18 +6772,7 @@ static void raid5d(struct md_thread *thread)
spin_unlock_irq(&conf->device_lock);
md_check_recovery(mddev);
spin_lock_irq(&conf->device_lock);
-
- /*
- * Waiting on MD_SB_CHANGE_PENDING below may deadlock
- * seeing md_check_recovery() is needed to clear
- * the flag when using mdmon.
- */
- continue;
}
-
- wait_event_lock_irq(mddev->sb_wait,
- !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags),
- conf->device_lock);
}
pr_debug("%d stripes handled\n", handled);
diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
index 559a172ebc6c..da09834990b8 100644
--- a/drivers/media/cec/core/cec-adap.c
+++ b/drivers/media/cec/core/cec-adap.c
@@ -490,6 +490,15 @@ int cec_thread_func(void *_adap)
goto unlock;
}
+ if (adap->transmit_in_progress &&
+ adap->transmit_in_progress_aborted) {
+ if (adap->transmitting)
+ cec_data_cancel(adap->transmitting,
+ CEC_TX_STATUS_ABORTED, 0);
+ adap->transmit_in_progress = false;
+ adap->transmit_in_progress_aborted = false;
+ goto unlock;
+ }
if (adap->transmit_in_progress && timeout) {
/*
* If we timeout, then log that. Normally this does
@@ -771,6 +780,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
{
struct cec_data *data;
bool is_raw = msg_is_raw(msg);
+ int err;
if (adap->devnode.unregistered)
return -ENODEV;
@@ -935,11 +945,13 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
* Release the lock and wait, retake the lock afterwards.
*/
mutex_unlock(&adap->lock);
- wait_for_completion_killable(&data->c);
- if (!data->completed)
- cancel_delayed_work_sync(&data->work);
+ err = wait_for_completion_killable(&data->c);
+ cancel_delayed_work_sync(&data->work);
mutex_lock(&adap->lock);
+ if (err)
+ adap->transmit_in_progress_aborted = true;
+
/* Cancel the transmit if it was interrupted */
if (!data->completed) {
if (data->msg.tx_status & CEC_TX_STATUS_OK)
@@ -1575,9 +1587,12 @@ unconfigure:
*/
static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
{
- if (WARN_ON(adap->is_configuring || adap->is_configured))
+ if (WARN_ON(adap->is_claiming_log_addrs ||
+ adap->is_configuring || adap->is_configured))
return;
+ adap->is_claiming_log_addrs = true;
+
init_completion(&adap->config_completion);
/* Ready to kick off the thread */
@@ -1592,6 +1607,7 @@ static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
wait_for_completion(&adap->config_completion);
mutex_lock(&adap->lock);
}
+ adap->is_claiming_log_addrs = false;
}
/*
diff --git a/drivers/media/cec/core/cec-api.c b/drivers/media/cec/core/cec-api.c
index 67dc79ef1705..3ef915344304 100644
--- a/drivers/media/cec/core/cec-api.c
+++ b/drivers/media/cec/core/cec-api.c
@@ -178,7 +178,7 @@ static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU |
CEC_LOG_ADDRS_FL_CDC_ONLY;
mutex_lock(&adap->lock);
- if (!adap->is_configuring &&
+ if (!adap->is_claiming_log_addrs && !adap->is_configuring &&
(!log_addrs.num_log_addrs || !adap->is_configured) &&
!cec_is_busy(adap, fh)) {
err = __cec_s_log_addrs(adap, &log_addrs, block);
@@ -664,6 +664,8 @@ static int cec_release(struct inode *inode, struct file *filp)
list_del_init(&data->xfer_list);
}
mutex_unlock(&adap->lock);
+
+ mutex_lock(&fh->lock);
while (!list_empty(&fh->msgs)) {
struct cec_msg_entry *entry =
list_first_entry(&fh->msgs, struct cec_msg_entry, list);
@@ -681,6 +683,7 @@ static int cec_release(struct inode *inode, struct file *filp)
kfree(entry);
}
}
+ mutex_unlock(&fh->lock);
kfree(fh);
cec_put_device(devnode);
diff --git a/drivers/media/cec/core/cec-core.c b/drivers/media/cec/core/cec-core.c
index 5a54db839e5d..6f940df0230c 100644
--- a/drivers/media/cec/core/cec-core.c
+++ b/drivers/media/cec/core/cec-core.c
@@ -62,12 +62,12 @@ int cec_get_device(struct cec_devnode *devnode)
*/
mutex_lock(&devnode->lock);
/*
- * return ENXIO if the cec device has been removed
+ * return ENODEV if the cec device has been removed
* already or if it is not registered anymore.
*/
if (!devnode->registered) {
mutex_unlock(&devnode->lock);
- return -ENXIO;
+ return -ENODEV;
}
/* and increase the device refcount */
get_device(&devnode->dev);
diff --git a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
index 48ed2993d2f0..8fbbb4091455 100644
--- a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
+++ b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/dmi.h>
#include <linux/pci.h>
@@ -573,6 +574,12 @@ static void cros_ec_cec_remove(struct platform_device *pdev)
}
}
+static const struct platform_device_id cros_ec_cec_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cros_ec_cec_id);
+
static struct platform_driver cros_ec_cec_driver = {
.probe = cros_ec_cec_probe,
.remove_new = cros_ec_cec_remove,
@@ -580,6 +587,7 @@ static struct platform_driver cros_ec_cec_driver = {
.name = DRV_NAME,
.pm = &cros_ec_cec_pm_ops,
},
+ .id_table = cros_ec_cec_id,
};
module_platform_driver(cros_ec_cec_driver);
@@ -587,4 +595,3 @@ module_platform_driver(cros_ec_cec_driver);
MODULE_DESCRIPTION("CEC driver for ChromeOS ECs");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/media/cec/platform/sti/stih-cec.c b/drivers/media/cec/platform/sti/stih-cec.c
index a20fc5c0c88d..99978a7c8d9b 100644
--- a/drivers/media/cec/platform/sti/stih-cec.c
+++ b/drivers/media/cec/platform/sti/stih-cec.c
@@ -6,6 +6,7 @@
*/
#include <linux/clk.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
diff --git a/drivers/media/common/saa7146/saa7146_hlp.c b/drivers/media/common/saa7146/saa7146_hlp.c
index 7569d8cdd4d8..fe3348af543e 100644
--- a/drivers/media/common/saa7146/saa7146_hlp.c
+++ b/drivers/media/common/saa7146/saa7146_hlp.c
@@ -122,7 +122,7 @@ static int calculate_h_scale_registers(struct saa7146_dev *dev,
xacm = 0;
/* set horizontal filter parameters (CXY = CXUV) */
- cxy = hps_h_coeff_tab[( (xpsc - 1) < 63 ? (xpsc - 1) : 63 )].hps_coeff;
+ cxy = hps_h_coeff_tab[min(xpsc - 1, 63)].hps_coeff;
cxuv = cxy;
/* calculate and set horizontal fine scale (xsci) */
@@ -151,7 +151,7 @@ static int calculate_h_scale_registers(struct saa7146_dev *dev,
xacm = 0;
/* get best match in the table of attenuations
for horizontal scaling */
- h_atten = hps_h_coeff_tab[( (xpsc - 1) < 63 ? (xpsc - 1) : 63 )].weight_sum;
+ h_atten = hps_h_coeff_tab[min(xpsc - 1, 63)].weight_sum;
for (i = 0; h_attenuation[i] != 0; i++) {
if (h_attenuation[i] >= h_atten)
@@ -283,10 +283,10 @@ static int calculate_v_scale_registers(struct saa7146_dev *dev, enum v4l2_field
}
/* get filter coefficients for cya, cyb from table hps_v_coeff_tab */
- cya_cyb = hps_v_coeff_tab[ (yacl < 63 ? yacl : 63 ) ].hps_coeff;
+ cya_cyb = hps_v_coeff_tab[min(yacl, 63)].hps_coeff;
/* get best match in the table of attenuations for vertical scaling */
- v_atten = hps_v_coeff_tab[ (yacl < 63 ? yacl : 63 ) ].weight_sum;
+ v_atten = hps_v_coeff_tab[min(yacl, 63)].weight_sum;
for (i = 0; v_attenuation[i] != 0; i++) {
if (v_attenuation[i] >= v_atten)
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index b6bf8f232f48..358f1fe42975 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -421,11 +421,12 @@ static void init_buffer_cache_hints(struct vb2_queue *q, struct vb2_buffer *vb)
*/
static void vb2_queue_add_buffer(struct vb2_queue *q, struct vb2_buffer *vb, unsigned int index)
{
- WARN_ON(index >= q->max_num_buffers || q->bufs[index] || vb->vb2_queue);
+ WARN_ON(index >= q->max_num_buffers || test_bit(index, q->bufs_bitmap) || vb->vb2_queue);
q->bufs[index] = vb;
vb->index = index;
vb->vb2_queue = q;
+ set_bit(index, q->bufs_bitmap);
}
/**
@@ -434,6 +435,7 @@ static void vb2_queue_add_buffer(struct vb2_queue *q, struct vb2_buffer *vb, uns
*/
static void vb2_queue_remove_buffer(struct vb2_buffer *vb)
{
+ clear_bit(vb->index, vb->vb2_queue->bufs_bitmap);
vb->vb2_queue->bufs[vb->index] = NULL;
vb->vb2_queue = NULL;
}
@@ -442,16 +444,19 @@ static void vb2_queue_remove_buffer(struct vb2_buffer *vb)
* __vb2_queue_alloc() - allocate vb2 buffer structures and (for MMAP type)
* video buffer memory for all buffers/planes on the queue and initializes the
* queue
+ * @first_index: index of the first created buffer, all newly allocated buffers
+ * have indices in the range [first_index..first_index+count-1]
*
* Returns the number of buffers successfully allocated.
*/
static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
unsigned int num_buffers, unsigned int num_planes,
- const unsigned plane_sizes[VB2_MAX_PLANES])
+ const unsigned int plane_sizes[VB2_MAX_PLANES],
+ unsigned int *first_index)
{
- unsigned int q_num_buffers = vb2_get_num_buffers(q);
unsigned int buffer, plane;
struct vb2_buffer *vb;
+ unsigned long index = q->max_num_buffers;
int ret;
/*
@@ -459,7 +464,25 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
* in the queue is below q->max_num_buffers
*/
num_buffers = min_t(unsigned int, num_buffers,
- q->max_num_buffers - q_num_buffers);
+ q->max_num_buffers - vb2_get_num_buffers(q));
+
+ while (num_buffers) {
+ index = bitmap_find_next_zero_area(q->bufs_bitmap, q->max_num_buffers,
+ 0, num_buffers, 0);
+
+ if (index < q->max_num_buffers)
+ break;
+ /* Try to find free space for less buffers */
+ num_buffers--;
+ }
+
+ /* If there is no space left to allocate buffers return 0 to indicate the error */
+ if (!num_buffers) {
+ *first_index = 0;
+ return 0;
+ }
+
+ *first_index = index;
for (buffer = 0; buffer < num_buffers; ++buffer) {
/* Allocate vb2 buffer structures */
@@ -479,7 +502,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
vb->planes[plane].min_length = plane_sizes[plane];
}
- vb2_queue_add_buffer(q, vb, q_num_buffers + buffer);
+ vb2_queue_add_buffer(q, vb, index++);
call_void_bufop(q, init_buffer, vb);
/* Allocate video buffer memory for the MMAP type */
@@ -517,17 +540,16 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
}
/*
- * __vb2_free_mem() - release all video buffer memory for a given queue
+ * __vb2_free_mem() - release video buffer memory for a given range of
+ * buffers in a given queue
*/
-static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
+static void __vb2_free_mem(struct vb2_queue *q, unsigned int start, unsigned int count)
{
- unsigned int buffer;
+ unsigned int i;
struct vb2_buffer *vb;
- unsigned int q_num_buffers = vb2_get_num_buffers(q);
- for (buffer = q_num_buffers - buffers; buffer < q_num_buffers;
- ++buffer) {
- vb = vb2_get_buffer(q, buffer);
+ for (i = start; i < start + count; i++) {
+ vb = vb2_get_buffer(q, i);
if (!vb)
continue;
@@ -542,35 +564,33 @@ static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
}
/*
- * __vb2_queue_free() - free buffers at the end of the queue - video memory and
+ * __vb2_queue_free() - free @count buffers from @start index of the queue - video memory and
* related information, if no buffers are left return the queue to an
* uninitialized state. Might be called even if the queue has already been freed.
*/
-static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
+static void __vb2_queue_free(struct vb2_queue *q, unsigned int start, unsigned int count)
{
- unsigned int buffer;
- unsigned int q_num_buffers = vb2_get_num_buffers(q);
+ unsigned int i;
lockdep_assert_held(&q->mmap_lock);
/* Call driver-provided cleanup function for each buffer, if provided */
- for (buffer = q_num_buffers - buffers; buffer < q_num_buffers;
- ++buffer) {
- struct vb2_buffer *vb = vb2_get_buffer(q, buffer);
+ for (i = start; i < start + count; i++) {
+ struct vb2_buffer *vb = vb2_get_buffer(q, i);
if (vb && vb->planes[0].mem_priv)
call_void_vb_qop(vb, buf_cleanup, vb);
}
/* Release video buffer memory */
- __vb2_free_mem(q, buffers);
+ __vb2_free_mem(q, start, count);
#ifdef CONFIG_VIDEO_ADV_DEBUG
/*
* Check that all the calls were balanced during the life-time of this
* queue. If not then dump the counters to the kernel log.
*/
- if (q_num_buffers) {
+ if (vb2_get_num_buffers(q)) {
bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
q->cnt_prepare_streaming != q->cnt_unprepare_streaming ||
q->cnt_wait_prepare != q->cnt_wait_finish;
@@ -596,8 +616,8 @@ static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
q->cnt_stop_streaming = 0;
q->cnt_unprepare_streaming = 0;
}
- for (buffer = 0; buffer < vb2_get_num_buffers(q); buffer++) {
- struct vb2_buffer *vb = vb2_get_buffer(q, buffer);
+ for (i = start; i < start + count; i++) {
+ struct vb2_buffer *vb = vb2_get_buffer(q, i);
bool unbalanced;
if (!vb)
@@ -614,7 +634,7 @@ static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
if (unbalanced) {
pr_info("unbalanced counters for queue %p, buffer %d:\n",
- q, buffer);
+ q, i);
if (vb->cnt_buf_init != vb->cnt_buf_cleanup)
pr_info(" buf_init: %u buf_cleanup: %u\n",
vb->cnt_buf_init, vb->cnt_buf_cleanup);
@@ -648,9 +668,8 @@ static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
#endif
/* Free vb2 buffers */
- for (buffer = q_num_buffers - buffers; buffer < q_num_buffers;
- ++buffer) {
- struct vb2_buffer *vb = vb2_get_buffer(q, buffer);
+ for (i = start; i < start + count; i++) {
+ struct vb2_buffer *vb = vb2_get_buffer(q, i);
if (!vb)
continue;
@@ -659,7 +678,6 @@ static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
kfree(vb);
}
- q->num_buffers -= buffers;
if (!vb2_get_num_buffers(q)) {
q->memory = VB2_MEMORY_UNKNOWN;
INIT_LIST_HEAD(&q->queued_list);
@@ -691,7 +709,7 @@ EXPORT_SYMBOL(vb2_buffer_in_use);
static bool __buffers_in_use(struct vb2_queue *q)
{
unsigned int buffer;
- for (buffer = 0; buffer < vb2_get_num_buffers(q); ++buffer) {
+ for (buffer = 0; buffer < q->max_num_buffers; ++buffer) {
struct vb2_buffer *vb = vb2_get_buffer(q, buffer);
if (!vb)
@@ -813,6 +831,32 @@ static bool verify_coherency_flags(struct vb2_queue *q, bool non_coherent_mem)
return true;
}
+static int vb2_core_allocated_buffers_storage(struct vb2_queue *q)
+{
+ if (!q->bufs)
+ q->bufs = kcalloc(q->max_num_buffers, sizeof(*q->bufs), GFP_KERNEL);
+ if (!q->bufs)
+ return -ENOMEM;
+
+ if (!q->bufs_bitmap)
+ q->bufs_bitmap = bitmap_zalloc(q->max_num_buffers, GFP_KERNEL);
+ if (!q->bufs_bitmap) {
+ kfree(q->bufs);
+ q->bufs = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void vb2_core_free_buffers_storage(struct vb2_queue *q)
+{
+ kfree(q->bufs);
+ q->bufs = NULL;
+ bitmap_free(q->bufs_bitmap);
+ q->bufs_bitmap = NULL;
+}
+
int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
unsigned int flags, unsigned int *count)
{
@@ -820,7 +864,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
unsigned int q_num_bufs = vb2_get_num_buffers(q);
unsigned plane_sizes[VB2_MAX_PLANES] = { };
bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT;
- unsigned int i;
+ unsigned int i, first_index;
int ret = 0;
if (q->streaming) {
@@ -851,9 +895,10 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
* queued without ever calling STREAMON.
*/
__vb2_queue_cancel(q);
- __vb2_queue_free(q, q_num_bufs);
+ __vb2_queue_free(q, 0, q->max_num_buffers);
mutex_unlock(&q->mmap_lock);
+ q->is_busy = 0;
/*
* In case of REQBUFS(0) return immediately without calling
* driver's queue_setup() callback and allocating resources.
@@ -865,7 +910,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
/*
* Make sure the requested values and current defaults are sane.
*/
- num_buffers = max_t(unsigned int, *count, q->min_queued_buffers);
+ num_buffers = max_t(unsigned int, *count, q->min_reqbufs_allocation);
num_buffers = min_t(unsigned int, num_buffers, q->max_num_buffers);
memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
/*
@@ -873,10 +918,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
* in the queue_setup op.
*/
mutex_lock(&q->mmap_lock);
- if (!q->bufs)
- q->bufs = kcalloc(q->max_num_buffers, sizeof(*q->bufs), GFP_KERNEL);
- if (!q->bufs)
- ret = -ENOMEM;
+ ret = vb2_core_allocated_buffers_storage(q);
q->memory = memory;
mutex_unlock(&q->mmap_lock);
if (ret)
@@ -906,8 +948,10 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
/* Finally, allocate buffers and video memory */
allocated_buffers =
- __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes);
+ __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes, &first_index);
if (allocated_buffers == 0) {
+ /* There shouldn't be any buffers allocated, so first_index == 0 */
+ WARN_ON(first_index);
dprintk(q, 1, "memory allocation failed\n");
ret = -ENOMEM;
goto error;
@@ -917,7 +961,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
* There is no point in continuing if we can't allocate the minimum
* number of buffers needed by this vb2_queue.
*/
- if (allocated_buffers < q->min_queued_buffers)
+ if (allocated_buffers < q->min_reqbufs_allocation)
ret = -ENOMEM;
/*
@@ -946,7 +990,6 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
}
mutex_lock(&q->mmap_lock);
- q->num_buffers = allocated_buffers;
if (ret < 0) {
/*
@@ -954,7 +997,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
* from already queued buffers and it will reset q->memory to
* VB2_MEMORY_UNKNOWN.
*/
- __vb2_queue_free(q, allocated_buffers);
+ __vb2_queue_free(q, first_index, allocated_buffers);
mutex_unlock(&q->mmap_lock);
return ret;
}
@@ -966,6 +1009,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
*/
*count = allocated_buffers;
q->waiting_for_buffers = !q->is_output;
+ q->is_busy = 1;
return 0;
@@ -973,6 +1017,7 @@ error:
mutex_lock(&q->mmap_lock);
q->memory = VB2_MEMORY_UNKNOWN;
mutex_unlock(&q->mmap_lock);
+ vb2_core_free_buffers_storage(q);
return ret;
}
EXPORT_SYMBOL_GPL(vb2_core_reqbufs);
@@ -980,7 +1025,8 @@ EXPORT_SYMBOL_GPL(vb2_core_reqbufs);
int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
unsigned int flags, unsigned int *count,
unsigned int requested_planes,
- const unsigned int requested_sizes[])
+ const unsigned int requested_sizes[],
+ unsigned int *first_index)
{
unsigned int num_planes = 0, num_buffers, allocated_buffers;
unsigned plane_sizes[VB2_MAX_PLANES] = { };
@@ -1005,11 +1051,8 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
* value in the queue_setup op.
*/
mutex_lock(&q->mmap_lock);
+ ret = vb2_core_allocated_buffers_storage(q);
q->memory = memory;
- if (!q->bufs)
- q->bufs = kcalloc(q->max_num_buffers, sizeof(*q->bufs), GFP_KERNEL);
- if (!q->bufs)
- ret = -ENOMEM;
mutex_unlock(&q->mmap_lock);
if (ret)
return ret;
@@ -1042,7 +1085,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
/* Finally, allocate buffers and video memory */
allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers,
- num_planes, plane_sizes);
+ num_planes, plane_sizes, first_index);
if (allocated_buffers == 0) {
dprintk(q, 1, "memory allocation failed\n");
ret = -ENOMEM;
@@ -1072,7 +1115,6 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
}
mutex_lock(&q->mmap_lock);
- q->num_buffers += allocated_buffers;
if (ret < 0) {
/*
@@ -1080,7 +1122,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
* from already queued buffers and it will reset q->memory to
* VB2_MEMORY_UNKNOWN.
*/
- __vb2_queue_free(q, allocated_buffers);
+ __vb2_queue_free(q, *first_index, allocated_buffers);
mutex_unlock(&q->mmap_lock);
return -ENOMEM;
}
@@ -1091,6 +1133,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
* to the userspace.
*/
*count = allocated_buffers;
+ q->is_busy = 1;
return 0;
@@ -1648,6 +1691,44 @@ int vb2_core_prepare_buf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb)
}
EXPORT_SYMBOL_GPL(vb2_core_prepare_buf);
+int vb2_core_remove_bufs(struct vb2_queue *q, unsigned int start, unsigned int count)
+{
+ unsigned int i, ret = 0;
+ unsigned int q_num_bufs = vb2_get_num_buffers(q);
+
+ if (count == 0)
+ return 0;
+
+ if (count > q_num_bufs)
+ return -EINVAL;
+
+ if (start > q->max_num_buffers - count)
+ return -EINVAL;
+
+ mutex_lock(&q->mmap_lock);
+
+ /* Check that all buffers in the range exist */
+ for (i = start; i < start + count; i++) {
+ struct vb2_buffer *vb = vb2_get_buffer(q, i);
+
+ if (!vb) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+ }
+ __vb2_queue_free(q, start, count);
+ dprintk(q, 2, "%u buffers removed\n", count);
+
+unlock:
+ mutex_unlock(&q->mmap_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vb2_core_remove_bufs);
+
/*
* vb2_start_streaming() - Attempt to start streaming.
* @q: videobuf2 queue
@@ -1694,7 +1775,7 @@ static int vb2_start_streaming(struct vb2_queue *q)
* Forcefully reclaim buffers if the driver did not
* correctly return them to vb2.
*/
- for (i = 0; i < vb2_get_num_buffers(q); ++i) {
+ for (i = 0; i < q->max_num_buffers; ++i) {
vb = vb2_get_buffer(q, i);
if (!vb)
@@ -2100,7 +2181,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
* to vb2 in stop_streaming().
*/
if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
- for (i = 0; i < vb2_get_num_buffers(q); i++) {
+ for (i = 0; i < q->max_num_buffers; i++) {
struct vb2_buffer *vb = vb2_get_buffer(q, i);
if (!vb)
@@ -2144,7 +2225,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
* call to __fill_user_buffer() after buf_finish(). That order can't
* be changed, so we can't move the buf_finish() to __vb2_dqbuf().
*/
- for (i = 0; i < vb2_get_num_buffers(q); i++) {
+ for (i = 0; i < q->max_num_buffers; i++) {
struct vb2_buffer *vb;
struct media_request *req;
@@ -2503,7 +2584,7 @@ int vb2_core_queue_init(struct vb2_queue *q)
WARN_ON(!q->ops->buf_queue))
return -EINVAL;
- if (WARN_ON(q->max_num_buffers > MAX_BUFFER_INDEX) ||
+ if (WARN_ON(q->max_num_buffers < VB2_MAX_FRAME) ||
WARN_ON(q->min_queued_buffers > q->max_num_buffers))
return -EINVAL;
@@ -2521,6 +2602,25 @@ int vb2_core_queue_init(struct vb2_queue *q)
if (WARN_ON(q->supports_requests && q->min_queued_buffers))
return -EINVAL;
+ /*
+ * The minimum requirement is 2: one buffer is used
+ * by the hardware while the other is being processed by userspace.
+ */
+ if (q->min_reqbufs_allocation < 2)
+ q->min_reqbufs_allocation = 2;
+
+ /*
+ * If the driver needs 'min_queued_buffers' in the queue before
+ * calling start_streaming() then the minimum requirement is
+ * 'min_queued_buffers + 1' to keep at least one buffer available
+ * for userspace.
+ */
+ if (q->min_reqbufs_allocation < q->min_queued_buffers + 1)
+ q->min_reqbufs_allocation = q->min_queued_buffers + 1;
+
+ if (WARN_ON(q->min_reqbufs_allocation > q->max_num_buffers))
+ return -EINVAL;
+
INIT_LIST_HEAD(&q->queued_list);
INIT_LIST_HEAD(&q->done_list);
spin_lock_init(&q->done_lock);
@@ -2552,9 +2652,9 @@ void vb2_core_queue_release(struct vb2_queue *q)
__vb2_cleanup_fileio(q);
__vb2_queue_cancel(q);
mutex_lock(&q->mmap_lock);
- __vb2_queue_free(q, vb2_get_num_buffers(q));
- kfree(q->bufs);
- q->bufs = NULL;
+ __vb2_queue_free(q, 0, q->max_num_buffers);
+ vb2_core_free_buffers_storage(q);
+ q->is_busy = 0;
mutex_unlock(&q->mmap_lock);
}
EXPORT_SYMBOL_GPL(vb2_core_queue_release);
@@ -2713,7 +2813,6 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
struct vb2_fileio_data *fileio;
struct vb2_buffer *vb;
int i, ret;
- unsigned int count = 0;
/*
* Sanity check
@@ -2734,18 +2833,8 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
if (q->streaming || vb2_get_num_buffers(q) > 0)
return -EBUSY;
- /*
- * Start with q->min_queued_buffers + 1, driver can increase it in
- * queue_setup()
- *
- * 'min_queued_buffers' buffers need to be queued up before you
- * can start streaming, plus 1 for userspace (or in this case,
- * kernelspace) processing.
- */
- count = max(2, q->min_queued_buffers + 1);
-
dprintk(q, 3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n",
- (read) ? "read" : "write", count, q->fileio_read_once,
+ (read) ? "read" : "write", q->min_reqbufs_allocation, q->fileio_read_once,
q->fileio_write_immediately);
fileio = kzalloc(sizeof(*fileio), GFP_KERNEL);
@@ -2759,13 +2848,19 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
* Request buffers and use MMAP type to force driver
* to allocate buffers by itself.
*/
- fileio->count = count;
+ fileio->count = q->min_reqbufs_allocation;
fileio->memory = VB2_MEMORY_MMAP;
fileio->type = q->type;
q->fileio = fileio;
ret = vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count);
if (ret)
goto err_kfree;
+ /* vb2_fileio_data supports max VB2_MAX_FRAME buffers */
+ if (fileio->count > VB2_MAX_FRAME) {
+ dprintk(q, 1, "fileio: more than VB2_MAX_FRAME buffers requested\n");
+ ret = -ENOSPC;
+ goto err_reqbufs;
+ }
/*
* Userspace can never add or delete buffers later, so there
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index c575198e8354..293f3d5f1c4e 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -685,7 +685,7 @@ static void vb2_set_flags_and_caps(struct vb2_queue *q, u32 memory,
*flags &= V4L2_MEMORY_FLAG_NON_COHERENT;
}
- *caps = V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS;
+ *caps |= V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS;
if (q->io_modes & VB2_MMAP)
*caps |= V4L2_BUF_CAP_SUPPORTS_MMAP;
if (q->io_modes & VB2_USERPTR)
@@ -795,11 +795,15 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
for (i = 0; i < requested_planes; i++)
if (requested_sizes[i] == 0)
return -EINVAL;
- return ret ? ret : vb2_core_create_bufs(q, create->memory,
- create->flags,
- &create->count,
- requested_planes,
- requested_sizes);
+ if (ret)
+ return ret;
+
+ return vb2_core_create_bufs(q, create->memory,
+ create->flags,
+ &create->count,
+ requested_planes,
+ requested_sizes,
+ &create->index);
}
EXPORT_SYMBOL_GPL(vb2_create_bufs);
@@ -997,6 +1001,24 @@ EXPORT_SYMBOL_GPL(vb2_poll);
/* vb2 ioctl helpers */
+int vb2_ioctl_remove_bufs(struct file *file, void *priv,
+ struct v4l2_remove_buffers *d)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->queue->type != d->type)
+ return -EINVAL;
+
+ if (d->count == 0)
+ return 0;
+
+ if (vb2_queue_is_busy(vdev->queue, file))
+ return -EBUSY;
+
+ return vb2_core_remove_bufs(vdev->queue, d->index, d->count);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_remove_bufs);
+
int vb2_ioctl_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *p)
{
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 733d0bc4b4cc..b43695bc51e7 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -956,7 +956,7 @@ int dvb_usercopy(struct file *file,
int (*func)(struct file *file,
unsigned int cmd, void *arg))
{
- char sbuf[128];
+ char sbuf[128] = {};
void *mbuf = NULL;
void *parg = NULL;
int err = -EINVAL;
diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c
index a829c89792a4..5afdbe244596 100644
--- a/drivers/media/dvb-frontends/af9013.c
+++ b/drivers/media/dvb-frontends/af9013.c
@@ -1480,7 +1480,7 @@ static int af9013_probe(struct i2c_client *client)
goto err_regmap_exit;
}
state->muxc->priv = state;
- ret = i2c_mux_add_adapter(state->muxc, 0, 0, 0);
+ ret = i2c_mux_add_adapter(state->muxc, 0, 0);
if (ret)
goto err_regmap_exit;
diff --git a/drivers/media/dvb-frontends/as102_fe_types.h b/drivers/media/dvb-frontends/as102_fe_types.h
index 297f9520ebf9..8a4e392c8896 100644
--- a/drivers/media/dvb-frontends/as102_fe_types.h
+++ b/drivers/media/dvb-frontends/as102_fe_types.h
@@ -174,6 +174,6 @@ struct as10x_register_addr {
uint32_t addr;
/* register mode access */
uint8_t mode;
-};
+} __packed;
#endif
diff --git a/drivers/media/dvb-frontends/cxd2880/Kconfig b/drivers/media/dvb-frontends/cxd2880/Kconfig
index 9d989676e800..94a8e0b936b9 100644
--- a/drivers/media/dvb-frontends/cxd2880/Kconfig
+++ b/drivers/media/dvb-frontends/cxd2880/Kconfig
@@ -5,4 +5,4 @@ config DVB_CXD2880
depends on DVB_CORE && SPI
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Say Y when you want to support this frontend. \ No newline at end of file
+ Say Y when you want to support this frontend.
diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
index 15f7e58c5a30..2c2fd4bf79cc 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
+++ b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
@@ -33,7 +33,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/firmware.h>
#include <linux/i2c.h>
/*
@@ -1910,7 +1909,6 @@ struct drx_demod_instance {
/* generic demodulator data */
struct i2c_adapter *i2c;
- const struct firmware *firmware;
};
/*-------------------------------------------------------------------------
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index 19d8de400a68..6fcaf07e1b82 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -56,6 +56,7 @@ INCLUDE FILES
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/module.h>
+#include <linux/firmware.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/slab.h>
@@ -1444,8 +1445,7 @@ static int drxdap_fasi_read_block(struct i2c_device_addr *dev_addr,
/* Read block from I2C **************************************************** */
do {
- u16 todo = (datasize < DRXDAP_MAX_RCHUNKSIZE ?
- datasize : DRXDAP_MAX_RCHUNKSIZE);
+ u16 todo = min(datasize, DRXDAP_MAX_RCHUNKSIZE);
bufx = 0;
@@ -1659,7 +1659,7 @@ static int drxdap_fasi_write_block(struct i2c_device_addr *dev_addr,
Address must be rewritten because HI is reset after data transport and
expects an address.
*/
- todo = (block_size < datasize ? block_size : datasize);
+ todo = min(block_size, datasize);
if (todo == 0) {
u16 overhead_size_i2c_addr = 0;
u16 data_block_size = 0;
@@ -1681,9 +1681,7 @@ static int drxdap_fasi_write_block(struct i2c_device_addr *dev_addr,
first_err = st;
}
bufx = 0;
- todo =
- (data_block_size <
- datasize ? data_block_size : datasize);
+ todo = min(data_block_size, datasize);
}
memcpy(&buf[bufx], data, todo);
/* write (address if can do and) data */
@@ -11750,6 +11748,7 @@ static int drx_ctrl_u_code(struct drx_demod_instance *demod,
u8 *mc_data = NULL;
unsigned size;
char *mc_file;
+ const struct firmware *fw;
/* Check arguments */
if (!mc_info || !mc_info->mc_file)
@@ -11757,28 +11756,22 @@ static int drx_ctrl_u_code(struct drx_demod_instance *demod,
mc_file = mc_info->mc_file;
- if (!demod->firmware) {
- const struct firmware *fw = NULL;
-
- rc = request_firmware(&fw, mc_file, demod->i2c->dev.parent);
- if (rc < 0) {
- pr_err("Couldn't read firmware %s\n", mc_file);
- return rc;
- }
- demod->firmware = fw;
-
- if (demod->firmware->size < 2 * sizeof(u16)) {
- rc = -EINVAL;
- pr_err("Firmware is too short!\n");
- goto release;
- }
+ rc = request_firmware(&fw, mc_file, demod->i2c->dev.parent);
+ if (rc < 0) {
+ pr_err("Couldn't read firmware %s\n", mc_file);
+ return rc;
+ }
- pr_info("Firmware %s, size %zu\n",
- mc_file, demod->firmware->size);
+ if (fw->size < 2 * sizeof(u16)) {
+ rc = -EINVAL;
+ pr_err("Firmware is too short!\n");
+ goto release;
}
- mc_data_init = demod->firmware->data;
- size = demod->firmware->size;
+ pr_info("Firmware %s, size %zu\n", mc_file, fw->size);
+
+ mc_data_init = fw->data;
+ size = fw->size;
mc_data = (void *)mc_data_init;
/* Check data */
@@ -11874,7 +11867,8 @@ static int drx_ctrl_u_code(struct drx_demod_instance *demod,
0x0000)) {
pr_err("error reading firmware at pos %zd\n",
mc_data - mc_data_init);
- return -EIO;
+ rc = -EIO;
+ goto release;
}
result = memcmp(curr_ptr, mc_data_buffer,
@@ -11883,7 +11877,8 @@ static int drx_ctrl_u_code(struct drx_demod_instance *demod,
if (result) {
pr_err("error verifying firmware at pos %zd\n",
mc_data - mc_data_init);
- return -EIO;
+ rc = -EIO;
+ goto release;
}
curr_addr += ((dr_xaddr_t)(bytes_to_comp / 2));
@@ -11893,17 +11888,17 @@ static int drx_ctrl_u_code(struct drx_demod_instance *demod,
break;
}
default:
- return -EINVAL;
+ rc = -EINVAL;
+ goto release;
}
mc_data += mc_block_nr_bytes;
}
- return 0;
+ rc = 0;
release:
- release_firmware(demod->firmware);
- demod->firmware = NULL;
+ release_firmware(fw);
return rc;
}
@@ -12271,7 +12266,6 @@ static void drx39xxj_release(struct dvb_frontend *fe)
kfree(demod->my_ext_attr);
kfree(demod->my_common_attr);
kfree(demod->my_i2c_dev_addr);
- release_firmware(demod->firmware);
kfree(demod);
kfree(state);
}
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index 263887592415..b25d11be8611 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -2176,6 +2176,11 @@ static int lgdt3306a_probe(struct i2c_client *client)
struct dvb_frontend *fe;
int ret;
+ if (!client->dev.platform_data) {
+ dev_err(&client->dev, "platform data is mandatory\n");
+ return -EINVAL;
+ }
+
config = kmemdup(client->dev.platform_data,
sizeof(struct lgdt3306a_config), GFP_KERNEL);
if (config == NULL) {
@@ -2203,7 +2208,7 @@ static int lgdt3306a_probe(struct i2c_client *client)
goto err_kfree;
}
state->muxc->priv = client;
- ret = i2c_mux_add_adapter(state->muxc, 0, 0, 0);
+ ret = i2c_mux_add_adapter(state->muxc, 0, 0);
if (ret)
goto err_kfree;
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index e0272054fca5..5a03485686d9 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -1000,6 +1000,13 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
if (ret)
goto err;
+ if (dev->chiptype == M88DS3103_CHIPTYPE_3103B) {
+ /* to light up the LOCK led */
+ ret = m88ds3103_update_bits(dev, 0x11, 0x80, 0x00);
+ if (ret)
+ goto err;
+ }
+
dev->delivery_system = c->delivery_system;
return 0;
@@ -1866,7 +1873,7 @@ static int m88ds3103_probe(struct i2c_client *client)
goto err_kfree;
}
dev->muxc->priv = dev;
- ret = i2c_mux_add_adapter(dev->muxc, 0, 0, 0);
+ ret = i2c_mux_add_adapter(dev->muxc, 0, 0);
if (ret)
goto err_kfree;
diff --git a/drivers/media/dvb-frontends/mxl5xx.c b/drivers/media/dvb-frontends/mxl5xx.c
index 4ebbcf05cc09..91e9c378397c 100644
--- a/drivers/media/dvb-frontends/mxl5xx.c
+++ b/drivers/media/dvb-frontends/mxl5xx.c
@@ -1381,57 +1381,57 @@ static int config_ts(struct mxl *state, enum MXL_HYDRA_DEMOD_ID_E demod_id,
u32 nco_count_min = 0;
u32 clk_type = 0;
- struct MXL_REG_FIELD_T xpt_sync_polarity[MXL_HYDRA_DEMOD_MAX] = {
+ static const struct MXL_REG_FIELD_T xpt_sync_polarity[MXL_HYDRA_DEMOD_MAX] = {
{0x90700010, 8, 1}, {0x90700010, 9, 1},
{0x90700010, 10, 1}, {0x90700010, 11, 1},
{0x90700010, 12, 1}, {0x90700010, 13, 1},
{0x90700010, 14, 1}, {0x90700010, 15, 1} };
- struct MXL_REG_FIELD_T xpt_clock_polarity[MXL_HYDRA_DEMOD_MAX] = {
+ static const struct MXL_REG_FIELD_T xpt_clock_polarity[MXL_HYDRA_DEMOD_MAX] = {
{0x90700010, 16, 1}, {0x90700010, 17, 1},
{0x90700010, 18, 1}, {0x90700010, 19, 1},
{0x90700010, 20, 1}, {0x90700010, 21, 1},
{0x90700010, 22, 1}, {0x90700010, 23, 1} };
- struct MXL_REG_FIELD_T xpt_valid_polarity[MXL_HYDRA_DEMOD_MAX] = {
+ static const struct MXL_REG_FIELD_T xpt_valid_polarity[MXL_HYDRA_DEMOD_MAX] = {
{0x90700014, 0, 1}, {0x90700014, 1, 1},
{0x90700014, 2, 1}, {0x90700014, 3, 1},
{0x90700014, 4, 1}, {0x90700014, 5, 1},
{0x90700014, 6, 1}, {0x90700014, 7, 1} };
- struct MXL_REG_FIELD_T xpt_ts_clock_phase[MXL_HYDRA_DEMOD_MAX] = {
+ static const struct MXL_REG_FIELD_T xpt_ts_clock_phase[MXL_HYDRA_DEMOD_MAX] = {
{0x90700018, 0, 3}, {0x90700018, 4, 3},
{0x90700018, 8, 3}, {0x90700018, 12, 3},
{0x90700018, 16, 3}, {0x90700018, 20, 3},
{0x90700018, 24, 3}, {0x90700018, 28, 3} };
- struct MXL_REG_FIELD_T xpt_lsb_first[MXL_HYDRA_DEMOD_MAX] = {
+ static const struct MXL_REG_FIELD_T xpt_lsb_first[MXL_HYDRA_DEMOD_MAX] = {
{0x9070000C, 16, 1}, {0x9070000C, 17, 1},
{0x9070000C, 18, 1}, {0x9070000C, 19, 1},
{0x9070000C, 20, 1}, {0x9070000C, 21, 1},
{0x9070000C, 22, 1}, {0x9070000C, 23, 1} };
- struct MXL_REG_FIELD_T xpt_sync_byte[MXL_HYDRA_DEMOD_MAX] = {
+ static const struct MXL_REG_FIELD_T xpt_sync_byte[MXL_HYDRA_DEMOD_MAX] = {
{0x90700010, 0, 1}, {0x90700010, 1, 1},
{0x90700010, 2, 1}, {0x90700010, 3, 1},
{0x90700010, 4, 1}, {0x90700010, 5, 1},
{0x90700010, 6, 1}, {0x90700010, 7, 1} };
- struct MXL_REG_FIELD_T xpt_enable_output[MXL_HYDRA_DEMOD_MAX] = {
+ static const struct MXL_REG_FIELD_T xpt_enable_output[MXL_HYDRA_DEMOD_MAX] = {
{0x9070000C, 0, 1}, {0x9070000C, 1, 1},
{0x9070000C, 2, 1}, {0x9070000C, 3, 1},
{0x9070000C, 4, 1}, {0x9070000C, 5, 1},
{0x9070000C, 6, 1}, {0x9070000C, 7, 1} };
- struct MXL_REG_FIELD_T xpt_err_replace_sync[MXL_HYDRA_DEMOD_MAX] = {
+ static const struct MXL_REG_FIELD_T xpt_err_replace_sync[MXL_HYDRA_DEMOD_MAX] = {
{0x9070000C, 24, 1}, {0x9070000C, 25, 1},
{0x9070000C, 26, 1}, {0x9070000C, 27, 1},
{0x9070000C, 28, 1}, {0x9070000C, 29, 1},
{0x9070000C, 30, 1}, {0x9070000C, 31, 1} };
- struct MXL_REG_FIELD_T xpt_err_replace_valid[MXL_HYDRA_DEMOD_MAX] = {
+ static const struct MXL_REG_FIELD_T xpt_err_replace_valid[MXL_HYDRA_DEMOD_MAX] = {
{0x90700014, 8, 1}, {0x90700014, 9, 1},
{0x90700014, 10, 1}, {0x90700014, 11, 1},
{0x90700014, 12, 1}, {0x90700014, 13, 1},
{0x90700014, 14, 1}, {0x90700014, 15, 1} };
- struct MXL_REG_FIELD_T xpt_continuous_clock[MXL_HYDRA_DEMOD_MAX] = {
+ static const struct MXL_REG_FIELD_T xpt_continuous_clock[MXL_HYDRA_DEMOD_MAX] = {
{0x907001D4, 0, 1}, {0x907001D4, 1, 1},
{0x907001D4, 2, 1}, {0x907001D4, 3, 1},
{0x907001D4, 4, 1}, {0x907001D4, 5, 1},
{0x907001D4, 6, 1}, {0x907001D4, 7, 1} };
- struct MXL_REG_FIELD_T xpt_nco_clock_rate[MXL_HYDRA_DEMOD_MAX] = {
+ static const struct MXL_REG_FIELD_T xpt_nco_clock_rate[MXL_HYDRA_DEMOD_MAX] = {
{0x90700044, 16, 80}, {0x90700044, 16, 81},
{0x90700044, 16, 82}, {0x90700044, 16, 83},
{0x90700044, 16, 84}, {0x90700044, 16, 85},
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index 35c969fd2cb5..30d10fe4b33e 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -838,7 +838,7 @@ static int rtl2830_probe(struct i2c_client *client)
goto err_regmap_exit;
}
dev->muxc->priv = client;
- ret = i2c_mux_add_adapter(dev->muxc, 0, 0, 0);
+ ret = i2c_mux_add_adapter(dev->muxc, 0, 0);
if (ret)
goto err_regmap_exit;
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index 601cf45c3935..5142820b1b3d 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -1082,7 +1082,7 @@ static int rtl2832_probe(struct i2c_client *client)
goto err_regmap_exit;
}
dev->muxc->priv = dev;
- ret = i2c_mux_add_adapter(dev->muxc, 0, 0, 0);
+ ret = i2c_mux_add_adapter(dev->muxc, 0, 0);
if (ret)
goto err_regmap_exit;
diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c
index 434d003bf397..013d423d3263 100644
--- a/drivers/media/dvb-frontends/si2165.c
+++ b/drivers/media/dvb-frontends/si2165.c
@@ -513,10 +513,8 @@ static int si2165_upload_firmware(struct si2165_state *state)
ret = 0;
state->firmware_loaded = true;
error:
- if (fw) {
- release_firmware(fw);
- fw = NULL;
- }
+ release_firmware(fw);
+ fw = NULL;
return ret;
}
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index dae1f2153e8b..26828fd41e68 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -744,7 +744,7 @@ static int si2168_probe(struct i2c_client *client)
goto err_kfree;
}
dev->muxc->priv = client;
- ret = i2c_mux_add_adapter(dev->muxc, 0, 0, 0);
+ ret = i2c_mux_add_adapter(dev->muxc, 0, 0);
if (ret)
goto err_kfree;
diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c
index 2f4d8fb400cd..35634f9a8ab5 100644
--- a/drivers/media/dvb-frontends/stb0899_drv.c
+++ b/drivers/media/dvb-frontends/stb0899_drv.c
@@ -1277,7 +1277,7 @@ static int stb0899_get_dev_id(struct stb0899_state *state)
dprintk(state->verbose, FE_ERROR, 1, "Demodulator Core ID=[%s], Version=[%d]", (char *) &demod_str, demod_ver);
CONVERT32(STB0899_READ_S2REG(STB0899_S2FEC, FEC_CORE_ID_REG), (char *)&fec_str);
fec_ver = STB0899_READ_S2REG(STB0899_S2FEC, FEC_VER_ID_REG);
- if (! (chip_id > 0)) {
+ if (!chip_id) {
dprintk(state->verbose, FE_ERROR, 1, "couldn't find a STB 0899");
return -ENODEV;
diff --git a/drivers/media/dvb-frontends/tda10048.c b/drivers/media/dvb-frontends/tda10048.c
index 5d5e4e9e4422..3e725cdcc66b 100644
--- a/drivers/media/dvb-frontends/tda10048.c
+++ b/drivers/media/dvb-frontends/tda10048.c
@@ -410,6 +410,7 @@ static int tda10048_set_if(struct dvb_frontend *fe, u32 bw)
struct tda10048_config *config = &state->config;
int i;
u32 if_freq_khz;
+ u64 sample_freq;
dprintk(1, "%s(bw = %d)\n", __func__, bw);
@@ -451,9 +452,11 @@ static int tda10048_set_if(struct dvb_frontend *fe, u32 bw)
dprintk(1, "- pll_pfactor = %d\n", state->pll_pfactor);
/* Calculate the sample frequency */
- state->sample_freq = state->xtal_hz * (state->pll_mfactor + 45);
- state->sample_freq /= (state->pll_nfactor + 1);
- state->sample_freq /= (state->pll_pfactor + 4);
+ sample_freq = state->xtal_hz;
+ sample_freq *= state->pll_mfactor + 45;
+ do_div(sample_freq, state->pll_nfactor + 1);
+ do_div(sample_freq, state->pll_pfactor + 4);
+ state->sample_freq = sample_freq;
dprintk(1, "- sample_freq = %d\n", state->sample_freq);
/* Update the I/F */
diff --git a/drivers/media/dvb-frontends/tda18271c2dd.c b/drivers/media/dvb-frontends/tda18271c2dd.c
index a34834487943..fd928787207e 100644
--- a/drivers/media/dvb-frontends/tda18271c2dd.c
+++ b/drivers/media/dvb-frontends/tda18271c2dd.c
@@ -328,7 +328,7 @@ static int CalcMainPLL(struct tda_state *state, u32 freq)
OscFreq = (u64) freq * (u64) Div;
OscFreq *= (u64) 16384;
- do_div(OscFreq, (u64)16000000);
+ do_div(OscFreq, 16000000);
MainDiv = OscFreq;
state->m_Regs[MPD] = PostDiv & 0x77;
@@ -352,7 +352,7 @@ static int CalcCalPLL(struct tda_state *state, u32 freq)
OscFreq = (u64)freq * (u64)Div;
/* CalDiv = u32( OscFreq * 16384 / 16000000 ); */
OscFreq *= (u64)16384;
- do_div(OscFreq, (u64)16000000);
+ do_div(OscFreq, 16000000);
CalDiv = OscFreq;
state->m_Regs[CPD] = PostDiv;
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 56f276b920ab..c6d3ee472d81 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -195,6 +195,7 @@ config VIDEO_IMX334
config VIDEO_IMX335
tristate "Sony IMX335 sensor support"
depends on OF_GPIO
+ select V4L2_CCI_I2C
help
This is a Video4Linux2 sensor driver for the Sony
IMX335 camera.
@@ -405,6 +406,7 @@ config VIDEO_OV2740
config VIDEO_OV4689
tristate "OmniVision OV4689 sensor support"
depends on GPIOLIB
+ select V4L2_CCI_I2C
help
This is a Video4Linux2 sensor-level driver for the OmniVision
OV4689 camera.
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 4829cbe32419..819ff9f7c90f 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -1486,7 +1486,7 @@ static int adv7180_probe(struct i2c_client *client)
if (ret)
goto err_media_entity_cleanup;
- if (state->irq) {
+ if (state->irq > 0) {
ret = request_threaded_irq(client->irq, NULL, adv7180_irq,
IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
KBUILD_MODNAME, state);
diff --git a/drivers/media/i2c/adv748x/adv748x-hdmi.c b/drivers/media/i2c/adv748x/adv748x-hdmi.c
index ec151dc69c23..a4db9bae5f79 100644
--- a/drivers/media/i2c/adv748x/adv748x-hdmi.c
+++ b/drivers/media/i2c/adv748x/adv748x-hdmi.c
@@ -214,7 +214,7 @@ static int adv748x_hdmi_set_video_timings(struct adv748x_state *state,
* v4l2_subdev_video_ops
*/
-static int adv748x_hdmi_s_dv_timings(struct v4l2_subdev *sd,
+static int adv748x_hdmi_s_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_dv_timings *timings)
{
struct adv748x_hdmi *hdmi = adv748x_sd_to_hdmi(sd);
@@ -254,7 +254,7 @@ error:
return ret;
}
-static int adv748x_hdmi_g_dv_timings(struct v4l2_subdev *sd,
+static int adv748x_hdmi_g_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_dv_timings *timings)
{
struct adv748x_hdmi *hdmi = adv748x_sd_to_hdmi(sd);
@@ -269,7 +269,7 @@ static int adv748x_hdmi_g_dv_timings(struct v4l2_subdev *sd,
return 0;
}
-static int adv748x_hdmi_query_dv_timings(struct v4l2_subdev *sd,
+static int adv748x_hdmi_query_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_dv_timings *timings)
{
struct adv748x_hdmi *hdmi = adv748x_sd_to_hdmi(sd);
@@ -392,9 +392,6 @@ static int adv748x_hdmi_g_pixelaspect(struct v4l2_subdev *sd,
}
static const struct v4l2_subdev_video_ops adv748x_video_ops_hdmi = {
- .s_dv_timings = adv748x_hdmi_s_dv_timings,
- .g_dv_timings = adv748x_hdmi_g_dv_timings,
- .query_dv_timings = adv748x_hdmi_query_dv_timings,
.g_input_status = adv748x_hdmi_g_input_status,
.s_stream = adv748x_hdmi_s_stream,
.g_pixelaspect = adv748x_hdmi_g_pixelaspect,
@@ -413,7 +410,7 @@ static int adv748x_hdmi_propagate_pixelrate(struct adv748x_hdmi *hdmi)
if (!tx)
return -ENOLINK;
- adv748x_hdmi_query_dv_timings(&hdmi->sd, &timings);
+ adv748x_hdmi_query_dv_timings(&hdmi->sd, 0, &timings);
return adv748x_csi2_set_pixelrate(tx, timings.bt.pixelclock);
}
@@ -610,6 +607,9 @@ static const struct v4l2_subdev_pad_ops adv748x_pad_ops_hdmi = {
.get_fmt = adv748x_hdmi_get_format,
.get_edid = adv748x_hdmi_get_edid,
.set_edid = adv748x_hdmi_set_edid,
+ .s_dv_timings = adv748x_hdmi_s_dv_timings,
+ .g_dv_timings = adv748x_hdmi_g_dv_timings,
+ .query_dv_timings = adv748x_hdmi_query_dv_timings,
.dv_timings_cap = adv748x_hdmi_dv_timings_cap,
.enum_dv_timings = adv748x_hdmi_enum_dv_timings,
};
@@ -734,7 +734,7 @@ int adv748x_hdmi_init(struct adv748x_hdmi *hdmi)
struct v4l2_dv_timings cea1280x720 = V4L2_DV_BT_CEA_1280X720P30;
int ret;
- adv748x_hdmi_s_dv_timings(&hdmi->sd, &cea1280x720);
+ adv748x_hdmi_s_dv_timings(&hdmi->sd, 0, &cea1280x720);
/* Initialise a default 16:9 aspect ratio */
hdmi->aspect_ratio.numerator = 16;
diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
index 0f780eb6ef63..79946e9c7401 100644
--- a/drivers/media/i2c/adv7511-v4l2.c
+++ b/drivers/media/i2c/adv7511-v4l2.c
@@ -995,8 +995,8 @@ static int adv7511_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
-static int adv7511_s_dv_timings(struct v4l2_subdev *sd,
- struct v4l2_dv_timings *timings)
+static int adv7511_s_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_dv_timings *timings)
{
struct adv7511_state *state = get_adv7511_state(sd);
struct v4l2_bt_timings *bt = &timings->bt;
@@ -1004,6 +1004,9 @@ static int adv7511_s_dv_timings(struct v4l2_subdev *sd,
v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+ if (pad != 0)
+ return -EINVAL;
+
/* quick sanity check */
if (!v4l2_valid_dv_timings(timings, &adv7511_timings_cap, NULL, NULL))
return -EINVAL;
@@ -1042,13 +1045,16 @@ static int adv7511_s_dv_timings(struct v4l2_subdev *sd,
return 0;
}
-static int adv7511_g_dv_timings(struct v4l2_subdev *sd,
+static int adv7511_g_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_dv_timings *timings)
{
struct adv7511_state *state = get_adv7511_state(sd);
v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+ if (pad != 0)
+ return -EINVAL;
+
if (!timings)
return -EINVAL;
@@ -1078,8 +1084,6 @@ static int adv7511_dv_timings_cap(struct v4l2_subdev *sd,
static const struct v4l2_subdev_video_ops adv7511_video_ops = {
.s_stream = adv7511_s_stream,
- .s_dv_timings = adv7511_s_dv_timings,
- .g_dv_timings = adv7511_g_dv_timings,
};
/* ------------------------------ AUDIO OPS ------------------------------ */
@@ -1403,6 +1407,8 @@ static const struct v4l2_subdev_pad_ops adv7511_pad_ops = {
.enum_mbus_code = adv7511_enum_mbus_code,
.get_fmt = adv7511_get_fmt,
.set_fmt = adv7511_set_fmt,
+ .s_dv_timings = adv7511_s_dv_timings,
+ .g_dv_timings = adv7511_g_dv_timings,
.enum_dv_timings = adv7511_enum_dv_timings,
.dv_timings_cap = adv7511_dv_timings_cap,
};
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 319db3e847c4..48230d5109f0 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -1557,8 +1557,8 @@ static unsigned int adv76xx_read_hdmi_pixelclock(struct v4l2_subdev *sd)
return freq;
}
-static int adv76xx_query_dv_timings(struct v4l2_subdev *sd,
- struct v4l2_dv_timings *timings)
+static int adv76xx_query_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_dv_timings *timings)
{
struct adv76xx_state *state = to_state(sd);
const struct adv76xx_chip_info *info = state->info;
@@ -1687,8 +1687,8 @@ found:
return 0;
}
-static int adv76xx_s_dv_timings(struct v4l2_subdev *sd,
- struct v4l2_dv_timings *timings)
+static int adv76xx_s_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_dv_timings *timings)
{
struct adv76xx_state *state = to_state(sd);
struct v4l2_bt_timings *bt;
@@ -1730,8 +1730,8 @@ static int adv76xx_s_dv_timings(struct v4l2_subdev *sd,
return 0;
}
-static int adv76xx_g_dv_timings(struct v4l2_subdev *sd,
- struct v4l2_dv_timings *timings)
+static int adv76xx_g_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_dv_timings *timings)
{
struct adv76xx_state *state = to_state(sd);
@@ -2607,7 +2607,7 @@ static int adv76xx_log_status(struct v4l2_subdev *sd)
stdi.lcf, stdi.bl, stdi.lcvs,
stdi.interlaced ? "interlaced" : "progressive",
stdi.hs_pol, stdi.vs_pol);
- if (adv76xx_query_dv_timings(sd, &timings))
+ if (adv76xx_query_dv_timings(sd, 0, &timings))
v4l2_info(sd, "No video detected\n");
else
v4l2_print_dv_timings(sd->name, "Detected format: ",
@@ -2726,9 +2726,6 @@ static const struct v4l2_subdev_core_ops adv76xx_core_ops = {
static const struct v4l2_subdev_video_ops adv76xx_video_ops = {
.s_routing = adv76xx_s_routing,
.g_input_status = adv76xx_g_input_status,
- .s_dv_timings = adv76xx_s_dv_timings,
- .g_dv_timings = adv76xx_g_dv_timings,
- .query_dv_timings = adv76xx_query_dv_timings,
};
static const struct v4l2_subdev_pad_ops adv76xx_pad_ops = {
@@ -2738,6 +2735,9 @@ static const struct v4l2_subdev_pad_ops adv76xx_pad_ops = {
.set_fmt = adv76xx_set_format,
.get_edid = adv76xx_get_edid,
.set_edid = adv76xx_set_edid,
+ .s_dv_timings = adv76xx_s_dv_timings,
+ .g_dv_timings = adv76xx_g_dv_timings,
+ .query_dv_timings = adv76xx_query_dv_timings,
.dv_timings_cap = adv76xx_dv_timings_cap,
.enum_dv_timings = adv76xx_enum_dv_timings,
};
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 2ad0f9f5503d..f2d4217310e7 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -1518,7 +1518,7 @@ static void adv7842_fill_optional_dv_timings_fields(struct v4l2_subdev *sd,
timings->bt.flags |= V4L2_DV_FL_CAN_DETECT_REDUCED_FPS;
}
-static int adv7842_query_dv_timings(struct v4l2_subdev *sd,
+static int adv7842_query_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_dv_timings *timings)
{
struct adv7842_state *state = to_state(sd);
@@ -1527,6 +1527,9 @@ static int adv7842_query_dv_timings(struct v4l2_subdev *sd,
v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+ if (pad != 0)
+ return -EINVAL;
+
memset(timings, 0, sizeof(struct v4l2_dv_timings));
/* SDP block */
@@ -1643,7 +1646,7 @@ found:
return 0;
}
-static int adv7842_s_dv_timings(struct v4l2_subdev *sd,
+static int adv7842_s_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_dv_timings *timings)
{
struct adv7842_state *state = to_state(sd);
@@ -1652,6 +1655,9 @@ static int adv7842_s_dv_timings(struct v4l2_subdev *sd,
v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+ if (pad != 0)
+ return -EINVAL;
+
if (state->mode == ADV7842_MODE_SDP)
return -ENODATA;
@@ -1689,11 +1695,14 @@ static int adv7842_s_dv_timings(struct v4l2_subdev *sd,
return 0;
}
-static int adv7842_g_dv_timings(struct v4l2_subdev *sd,
+static int adv7842_g_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_dv_timings *timings)
{
struct adv7842_state *state = to_state(sd);
+ if (pad != 0)
+ return -EINVAL;
+
if (state->mode == ADV7842_MODE_SDP)
return -ENODATA;
*timings = state->timings;
@@ -2780,7 +2789,7 @@ static int adv7842_cp_log_status(struct v4l2_subdev *sd)
"interlaced" : "progressive",
hs_pol, vs_pol);
}
- if (adv7842_query_dv_timings(sd, &timings))
+ if (adv7842_query_dv_timings(sd, 0, &timings))
v4l2_info(sd, "No video detected\n");
else
v4l2_print_dv_timings(sd->name, "Detected format: ",
@@ -3226,7 +3235,7 @@ static int adv7842_command_ram_test(struct v4l2_subdev *sd)
memset(&state->timings, 0, sizeof(struct v4l2_dv_timings));
- adv7842_s_dv_timings(sd, &timings);
+ adv7842_s_dv_timings(sd, 0, &timings);
return ret;
}
@@ -3298,9 +3307,6 @@ static const struct v4l2_subdev_video_ops adv7842_video_ops = {
.s_routing = adv7842_s_routing,
.querystd = adv7842_querystd,
.g_input_status = adv7842_g_input_status,
- .s_dv_timings = adv7842_s_dv_timings,
- .g_dv_timings = adv7842_g_dv_timings,
- .query_dv_timings = adv7842_query_dv_timings,
};
static const struct v4l2_subdev_pad_ops adv7842_pad_ops = {
@@ -3309,6 +3315,9 @@ static const struct v4l2_subdev_pad_ops adv7842_pad_ops = {
.set_fmt = adv7842_set_format,
.get_edid = adv7842_get_edid,
.set_edid = adv7842_set_edid,
+ .s_dv_timings = adv7842_s_dv_timings,
+ .g_dv_timings = adv7842_g_dv_timings,
+ .query_dv_timings = adv7842_query_dv_timings,
.enum_dv_timings = adv7842_enum_dv_timings,
.dv_timings_cap = adv7842_dv_timings_cap,
};
diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c
index 84d29bcf0ccd..0e88ce0ef8d7 100644
--- a/drivers/media/i2c/dw9714.c
+++ b/drivers/media/i2c/dw9714.c
@@ -310,8 +310,8 @@ module_i2c_driver(dw9714_i2c_driver);
MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
MODULE_AUTHOR("Jian Xu Zheng");
-MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
-MODULE_AUTHOR("Jouni Ukkonen <jouni.ukkonen@intel.com>");
-MODULE_AUTHOR("Tommi Franttila <tommi.franttila@intel.com>");
+MODULE_AUTHOR("Yuning Pu");
+MODULE_AUTHOR("Jouni Ukkonen");
+MODULE_AUTHOR("Tommi Franttila");
MODULE_DESCRIPTION("DW9714 VCM driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
index f548b1bb75fb..e932d25ca7b3 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_driver.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
@@ -1475,7 +1475,7 @@ err_mutex:
return ret;
}
-static void __exit et8ek8_remove(struct i2c_client *client)
+static void et8ek8_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
@@ -1517,7 +1517,7 @@ static struct i2c_driver et8ek8_i2c_driver = {
.of_match_table = et8ek8_of_table,
},
.probe = et8ek8_probe,
- .remove = __exit_p(et8ek8_remove),
+ .remove = et8ek8_remove,
.id_table = et8ek8_id_table,
};
diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
index 38c77d515786..b440f386f062 100644
--- a/drivers/media/i2c/hi556.c
+++ b/drivers/media/i2c/hi556.c
@@ -3,10 +3,13 @@
#include <asm/unaligned.h>
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -633,6 +636,11 @@ struct hi556 {
struct v4l2_ctrl *hblank;
struct v4l2_ctrl *exposure;
+ /* GPIOs, clocks, etc. */
+ struct gpio_desc *reset_gpio;
+ struct clk *clk;
+ struct regulator *avdd;
+
/* Current mode */
const struct hi556_mode *cur_mode;
@@ -1206,8 +1214,18 @@ static int hi556_check_hwcfg(struct device *dev)
int ret = 0;
unsigned int i, j;
- if (!fwnode)
- return -ENXIO;
+ /*
+ * Sometimes the fwnode graph is initialized by the bridge driver,
+ * wait for this.
+ */
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return -EPROBE_DEFER;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return ret;
ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
if (ret) {
@@ -1220,15 +1238,6 @@ static int hi556_check_hwcfg(struct device *dev)
return -EINVAL;
}
- ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
- if (!ep)
- return -ENXIO;
-
- ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
- fwnode_handle_put(ep);
- if (ret)
- return ret;
-
if (bus_cfg.bus.mipi_csi2.num_data_lanes != 2) {
dev_err(dev, "number of CSI2 data lanes %d is not supported",
bus_cfg.bus.mipi_csi2.num_data_lanes);
@@ -1275,6 +1284,47 @@ static void hi556_remove(struct i2c_client *client)
mutex_destroy(&hi556->mutex);
}
+static int hi556_suspend(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct hi556 *hi556 = to_hi556(sd);
+ int ret;
+
+ gpiod_set_value_cansleep(hi556->reset_gpio, 1);
+
+ ret = regulator_disable(hi556->avdd);
+ if (ret) {
+ dev_err(dev, "failed to disable avdd: %d\n", ret);
+ gpiod_set_value_cansleep(hi556->reset_gpio, 0);
+ return ret;
+ }
+
+ clk_disable_unprepare(hi556->clk);
+ return 0;
+}
+
+static int hi556_resume(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct hi556 *hi556 = to_hi556(sd);
+ int ret;
+
+ ret = clk_prepare_enable(hi556->clk);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(hi556->avdd);
+ if (ret) {
+ dev_err(dev, "failed to enable avdd: %d\n", ret);
+ clk_disable_unprepare(hi556->clk);
+ return ret;
+ }
+
+ gpiod_set_value_cansleep(hi556->reset_gpio, 0);
+ usleep_range(5000, 5500);
+ return 0;
+}
+
static int hi556_probe(struct i2c_client *client)
{
struct hi556 *hi556;
@@ -1294,12 +1344,35 @@ static int hi556_probe(struct i2c_client *client)
v4l2_i2c_subdev_init(&hi556->sd, client, &hi556_subdev_ops);
+ hi556->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(hi556->reset_gpio))
+ return dev_err_probe(&client->dev, PTR_ERR(hi556->reset_gpio),
+ "failed to get reset GPIO\n");
+
+ hi556->clk = devm_clk_get_optional(&client->dev, "clk");
+ if (IS_ERR(hi556->clk))
+ return dev_err_probe(&client->dev, PTR_ERR(hi556->clk),
+ "failed to get clock\n");
+
+ /* The regulator core will provide a "dummy" regulator if necessary */
+ hi556->avdd = devm_regulator_get(&client->dev, "avdd");
+ if (IS_ERR(hi556->avdd))
+ return dev_err_probe(&client->dev, PTR_ERR(hi556->avdd),
+ "failed to get avdd regulator\n");
+
full_power = acpi_dev_state_d0(&client->dev);
if (full_power) {
+ /* Ensure non ACPI managed resources are enabled */
+ ret = hi556_resume(&client->dev);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "failed to power on sensor\n");
+
ret = hi556_identify_module(hi556);
if (ret) {
dev_err(&client->dev, "failed to find sensor: %d", ret);
- return ret;
+ goto probe_error_power_off;
}
}
@@ -1344,9 +1417,16 @@ probe_error_v4l2_ctrl_handler_free:
v4l2_ctrl_handler_free(hi556->sd.ctrl_handler);
mutex_destroy(&hi556->mutex);
+probe_error_power_off:
+ if (full_power)
+ hi556_suspend(&client->dev);
+
return ret;
}
+static DEFINE_RUNTIME_DEV_PM_OPS(hi556_pm_ops, hi556_suspend, hi556_resume,
+ NULL);
+
#ifdef CONFIG_ACPI
static const struct acpi_device_id hi556_acpi_ids[] = {
{"INT3537"},
@@ -1360,6 +1440,7 @@ static struct i2c_driver hi556_i2c_driver = {
.driver = {
.name = "hi556",
.acpi_match_table = ACPI_PTR(hi556_acpi_ids),
+ .pm = pm_sleep_ptr(&hi556_pm_ops),
},
.probe = hi556_probe,
.remove = hi556_remove,
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index 10b6ad66d126..4962cfe7c83d 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -1114,6 +1114,7 @@ free_ctrl:
v4l2_ctrl_handler_free(&imx214->ctrls);
error_power_off:
pm_runtime_disable(imx214->dev);
+ regulator_bulk_disable(IMX214_NUM_SUPPLIES, imx214->supplies);
return ret;
}
diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
index e17ef2e9d9d0..51ebf5453fce 100644
--- a/drivers/media/i2c/imx219.c
+++ b/drivers/media/i2c/imx219.c
@@ -551,8 +551,7 @@ static int imx219_init_controls(struct imx219 *imx219)
if (ctrl_hdlr->error) {
ret = ctrl_hdlr->error;
- dev_err(&client->dev, "%s control init failed (%d)\n",
- __func__, ret);
+ dev_err_probe(&client->dev, ret, "Control init failed\n");
goto error;
}
@@ -1024,17 +1023,15 @@ static int imx219_identify_module(struct imx219 *imx219)
u64 val;
ret = cci_read(imx219->regmap, IMX219_REG_CHIP_ID, &val, NULL);
- if (ret) {
- dev_err(&client->dev, "failed to read chip id %x\n",
- IMX219_CHIP_ID);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "failed to read chip id %x\n",
+ IMX219_CHIP_ID);
- if (val != IMX219_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%llx\n",
- IMX219_CHIP_ID, val);
- return -EIO;
- }
+ if (val != IMX219_CHIP_ID)
+ return dev_err_probe(&client->dev, -EIO,
+ "chip id mismatch: %x!=%llx\n",
+ IMX219_CHIP_ID, val);
return 0;
}
@@ -1048,35 +1045,36 @@ static int imx219_check_hwcfg(struct device *dev, struct imx219 *imx219)
int ret = -EINVAL;
endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
- if (!endpoint) {
- dev_err(dev, "endpoint node not found\n");
- return -EINVAL;
- }
+ if (!endpoint)
+ return dev_err_probe(dev, -EINVAL, "endpoint node not found\n");
if (v4l2_fwnode_endpoint_alloc_parse(endpoint, &ep_cfg)) {
- dev_err(dev, "could not parse endpoint\n");
+ dev_err_probe(dev, -EINVAL, "could not parse endpoint\n");
goto error_out;
}
/* Check the number of MIPI CSI2 data lanes */
if (ep_cfg.bus.mipi_csi2.num_data_lanes != 2 &&
ep_cfg.bus.mipi_csi2.num_data_lanes != 4) {
- dev_err(dev, "only 2 or 4 data lanes are currently supported\n");
+ dev_err_probe(dev, -EINVAL,
+ "only 2 or 4 data lanes are currently supported\n");
goto error_out;
}
imx219->lanes = ep_cfg.bus.mipi_csi2.num_data_lanes;
/* Check the link frequency set in device tree */
if (!ep_cfg.nr_of_link_frequencies) {
- dev_err(dev, "link-frequency property not found in DT\n");
+ dev_err_probe(dev, -EINVAL,
+ "link-frequency property not found in DT\n");
goto error_out;
}
if (ep_cfg.nr_of_link_frequencies != 1 ||
(ep_cfg.link_frequencies[0] != ((imx219->lanes == 2) ?
IMX219_DEFAULT_LINK_FREQ : IMX219_DEFAULT_LINK_FREQ_4LANE))) {
- dev_err(dev, "Link frequency not supported: %lld\n",
- ep_cfg.link_frequencies[0]);
+ dev_err_probe(dev, -EINVAL,
+ "Link frequency not supported: %lld\n",
+ ep_cfg.link_frequencies[0]);
goto error_out;
}
@@ -1107,31 +1105,25 @@ static int imx219_probe(struct i2c_client *client)
return -EINVAL;
imx219->regmap = devm_cci_regmap_init_i2c(client, 16);
- if (IS_ERR(imx219->regmap)) {
- ret = PTR_ERR(imx219->regmap);
- dev_err(dev, "failed to initialize CCI: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(imx219->regmap))
+ return dev_err_probe(dev, PTR_ERR(imx219->regmap),
+ "failed to initialize CCI\n");
/* Get system clock (xclk) */
imx219->xclk = devm_clk_get(dev, NULL);
- if (IS_ERR(imx219->xclk)) {
- dev_err(dev, "failed to get xclk\n");
- return PTR_ERR(imx219->xclk);
- }
+ if (IS_ERR(imx219->xclk))
+ return dev_err_probe(dev, PTR_ERR(imx219->xclk),
+ "failed to get xclk\n");
imx219->xclk_freq = clk_get_rate(imx219->xclk);
- if (imx219->xclk_freq != IMX219_XCLK_FREQ) {
- dev_err(dev, "xclk frequency not supported: %d Hz\n",
- imx219->xclk_freq);
- return -EINVAL;
- }
+ if (imx219->xclk_freq != IMX219_XCLK_FREQ)
+ return dev_err_probe(dev, -EINVAL,
+ "xclk frequency not supported: %d Hz\n",
+ imx219->xclk_freq);
ret = imx219_get_regulators(imx219);
- if (ret) {
- dev_err(dev, "failed to get regulators\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
/* Request optional enable pin */
imx219->reset_gpio = devm_gpiod_get_optional(dev, "reset",
@@ -1183,20 +1175,21 @@ static int imx219_probe(struct i2c_client *client)
ret = media_entity_pads_init(&imx219->sd.entity, 1, &imx219->pad);
if (ret) {
- dev_err(dev, "failed to init entity pads: %d\n", ret);
+ dev_err_probe(dev, ret, "failed to init entity pads\n");
goto error_handler_free;
}
imx219->sd.state_lock = imx219->ctrl_handler.lock;
ret = v4l2_subdev_init_finalize(&imx219->sd);
if (ret < 0) {
- dev_err(dev, "subdev init error: %d\n", ret);
+ dev_err_probe(dev, ret, "subdev init error\n");
goto error_media_entity;
}
ret = v4l2_async_register_subdev_sensor(&imx219->sd);
if (ret < 0) {
- dev_err(dev, "failed to register sensor sub-device: %d\n", ret);
+ dev_err_probe(dev, ret,
+ "failed to register sensor sub-device\n");
goto error_subdev_cleanup;
}
diff --git a/drivers/media/i2c/imx335.c b/drivers/media/i2c/imx335.c
index dab6d080bc4c..990d74214cc2 100644
--- a/drivers/media/i2c/imx335.c
+++ b/drivers/media/i2c/imx335.c
@@ -11,57 +11,108 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <media/v4l2-cci.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
/* Streaming Mode */
-#define IMX335_REG_MODE_SELECT 0x3000
-#define IMX335_MODE_STANDBY 0x01
-#define IMX335_MODE_STREAMING 0x00
+#define IMX335_REG_MODE_SELECT CCI_REG8(0x3000)
+#define IMX335_MODE_STANDBY 0x01
+#define IMX335_MODE_STREAMING 0x00
+
+/* Group hold register */
+#define IMX335_REG_HOLD CCI_REG8(0x3001)
+
+#define IMX335_REG_MASTER_MODE CCI_REG8(0x3002)
+#define IMX335_REG_BCWAIT_TIME CCI_REG8(0x300c)
+#define IMX335_REG_CPWAIT_TIME CCI_REG8(0x300d)
+#define IMX335_REG_WINMODE CCI_REG8(0x3018)
+#define IMX335_REG_HTRIMMING_START CCI_REG16_LE(0x302c)
+#define IMX335_REG_HNUM CCI_REG8(0x302e)
/* Lines per frame */
-#define IMX335_REG_LPFR 0x3030
+#define IMX335_REG_VMAX CCI_REG24_LE(0x3030)
-/* Chip ID */
-#define IMX335_REG_ID 0x3912
-#define IMX335_ID 0x00
-
-/* Exposure control */
-#define IMX335_REG_SHUTTER 0x3058
-#define IMX335_EXPOSURE_MIN 1
-#define IMX335_EXPOSURE_OFFSET 9
-#define IMX335_EXPOSURE_STEP 1
-#define IMX335_EXPOSURE_DEFAULT 0x0648
-
-/* Analog gain control */
-#define IMX335_REG_AGAIN 0x30e8
-#define IMX335_AGAIN_MIN 0
-#define IMX335_AGAIN_MAX 240
-#define IMX335_AGAIN_STEP 1
-#define IMX335_AGAIN_DEFAULT 0
+#define IMX335_REG_OPB_SIZE_V CCI_REG8(0x304c)
+#define IMX335_REG_ADBIT CCI_REG8(0x3050)
+#define IMX335_REG_Y_OUT_SIZE CCI_REG16_LE(0x3056)
-/* Group hold register */
-#define IMX335_REG_HOLD 0x3001
+#define IMX335_REG_SHUTTER CCI_REG24_LE(0x3058)
+#define IMX335_EXPOSURE_MIN 1
+#define IMX335_EXPOSURE_OFFSET 9
+#define IMX335_EXPOSURE_STEP 1
+#define IMX335_EXPOSURE_DEFAULT 0x0648
+
+#define IMX335_REG_AREA3_ST_ADR_1 CCI_REG16_LE(0x3074)
+#define IMX335_REG_AREA3_WIDTH_1 CCI_REG16_LE(0x3076)
+
+/* Analog and Digital gain control */
+#define IMX335_REG_GAIN CCI_REG8(0x30e8)
+#define IMX335_AGAIN_MIN 0
+#define IMX335_AGAIN_MAX 100
+#define IMX335_AGAIN_STEP 1
+#define IMX335_AGAIN_DEFAULT 0
+
+#define IMX335_REG_TPG_TESTCLKEN CCI_REG8(0x3148)
+
+#define IMX335_REG_INCLKSEL1 CCI_REG16_LE(0x314c)
+#define IMX335_REG_INCLKSEL2 CCI_REG8(0x315a)
+#define IMX335_REG_INCLKSEL3 CCI_REG8(0x3168)
+#define IMX335_REG_INCLKSEL4 CCI_REG8(0x316a)
+
+#define IMX335_REG_MDBIT CCI_REG8(0x319d)
+#define IMX335_REG_SYSMODE CCI_REG8(0x319e)
+
+#define IMX335_REG_XVS_XHS_DRV CCI_REG8(0x31a1)
/* Test pattern generator */
-#define IMX335_REG_TPG 0x329e
-#define IMX335_TPG_ALL_000 0
-#define IMX335_TPG_ALL_FFF 1
-#define IMX335_TPG_ALL_555 2
-#define IMX335_TPG_ALL_AAA 3
-#define IMX335_TPG_TOG_555_AAA 4
-#define IMX335_TPG_TOG_AAA_555 5
-#define IMX335_TPG_TOG_000_555 6
-#define IMX335_TPG_TOG_555_000 7
-#define IMX335_TPG_TOG_000_FFF 8
-#define IMX335_TPG_TOG_FFF_000 9
-#define IMX335_TPG_H_COLOR_BARS 10
-#define IMX335_TPG_V_COLOR_BARS 11
+#define IMX335_REG_TPG_DIG_CLP_MODE CCI_REG8(0x3280)
+#define IMX335_REG_TPG_EN_DUOUT CCI_REG8(0x329c)
+#define IMX335_REG_TPG CCI_REG8(0x329e)
+#define IMX335_TPG_ALL_000 0
+#define IMX335_TPG_ALL_FFF 1
+#define IMX335_TPG_ALL_555 2
+#define IMX335_TPG_ALL_AAA 3
+#define IMX335_TPG_TOG_555_AAA 4
+#define IMX335_TPG_TOG_AAA_555 5
+#define IMX335_TPG_TOG_000_555 6
+#define IMX335_TPG_TOG_555_000 7
+#define IMX335_TPG_TOG_000_FFF 8
+#define IMX335_TPG_TOG_FFF_000 9
+#define IMX335_TPG_H_COLOR_BARS 10
+#define IMX335_TPG_V_COLOR_BARS 11
+#define IMX335_REG_TPG_COLORWIDTH CCI_REG8(0x32a0)
+
+#define IMX335_REG_BLKLEVEL CCI_REG16_LE(0x3302)
+
+#define IMX335_REG_WRJ_OPEN CCI_REG8(0x336c)
+
+#define IMX335_REG_ADBIT1 CCI_REG16_LE(0x341c)
+
+/* Chip ID */
+#define IMX335_REG_ID CCI_REG8(0x3912)
+#define IMX335_ID 0x00
+
+/* Data Lanes */
+#define IMX335_REG_LANEMODE CCI_REG8(0x3a01)
+#define IMX335_2LANE 1
+#define IMX335_4LANE 3
+
+#define IMX335_REG_TCLKPOST CCI_REG16_LE(0x3a18)
+#define IMX335_REG_TCLKPREPARE CCI_REG16_LE(0x3a1a)
+#define IMX335_REG_TCLK_TRAIL CCI_REG16_LE(0x3a1c)
+#define IMX335_REG_TCLK_ZERO CCI_REG16_LE(0x3a1e)
+#define IMX335_REG_THS_PREPARE CCI_REG16_LE(0x3a20)
+#define IMX335_REG_THS_ZERO CCI_REG16_LE(0x3a22)
+#define IMX335_REG_THS_TRAIL CCI_REG16_LE(0x3a24)
+#define IMX335_REG_THS_EXIT CCI_REG16_LE(0x3a26)
+#define IMX335_REG_TPLX CCI_REG16_LE(0x3a28)
/* Input clock rate */
-#define IMX335_INCLK_RATE 24000000
+#define IMX335_INCLK_RATE 24000000
/* CSI2 HW configuration */
#define IMX335_LINK_FREQ_594MHz 594000000LL
@@ -69,9 +120,6 @@
#define IMX335_NUM_DATA_LANES 4
-#define IMX335_REG_MIN 0x00
-#define IMX335_REG_MAX 0xfffff
-
/* IMX335 native and active pixel array size. */
#define IMX335_NATIVE_WIDTH 2616U
#define IMX335_NATIVE_HEIGHT 1964U
@@ -81,23 +129,13 @@
#define IMX335_PIXEL_ARRAY_HEIGHT 1944U
/**
- * struct imx335_reg - imx335 sensor register
- * @address: Register address
- * @val: Register value
- */
-struct imx335_reg {
- u16 address;
- u8 val;
-};
-
-/**
* struct imx335_reg_list - imx335 sensor register list
* @num_of_regs: Number of registers in the list
* @regs: Pointer to register list
*/
struct imx335_reg_list {
u32 num_of_regs;
- const struct imx335_reg *regs;
+ const struct cci_reg_sequence *regs;
};
static const char * const imx335_supply_name[] = {
@@ -138,6 +176,7 @@ struct imx335_mode {
* @pad: Media pad. Only one pad supported
* @reset_gpio: Sensor reset gpio
* @supplies: Regulator supplies to handle power control
+ * @cci: CCI register map
* @inclk: Sensor input clock
* @ctrl_handler: V4L2 control handler
* @link_freq_ctrl: Pointer to link frequency control
@@ -147,6 +186,7 @@ struct imx335_mode {
* @exp_ctrl: Pointer to exposure control
* @again_ctrl: Pointer to analog gain control
* @vblank: Vertical blanking in lines
+ * @lane_mode: Mode for number of connected data lanes
* @cur_mode: Pointer to current selected sensor mode
* @mutex: Mutex for serializing sensor controls
* @link_freq_bitmap: Menu bitmap for link_freq_ctrl
@@ -159,6 +199,7 @@ struct imx335 {
struct media_pad pad;
struct gpio_desc *reset_gpio;
struct regulator_bulk_data supplies[ARRAY_SIZE(imx335_supply_name)];
+ struct regmap *cci;
struct clk *inclk;
struct v4l2_ctrl_handler ctrl_handler;
@@ -171,6 +212,7 @@ struct imx335 {
struct v4l2_ctrl *again_ctrl;
};
u32 vblank;
+ u32 lane_mode;
const struct imx335_mode *cur_mode;
struct mutex mutex;
unsigned long link_freq_bitmap;
@@ -210,140 +252,135 @@ static const int imx335_tpg_val[] = {
};
/* Sensor mode registers */
-static const struct imx335_reg mode_2592x1940_regs[] = {
- {0x3000, 0x01},
- {0x3002, 0x00},
- {0x3018, 0x04},
- {0x302c, 0x3c},
- {0x302e, 0x20},
- {0x3056, 0x94},
- {0x3074, 0xc8},
- {0x3076, 0x28},
- {0x304c, 0x00},
- {0x31a1, 0x00},
- {0x3288, 0x21},
- {0x328a, 0x02},
- {0x3414, 0x05},
- {0x3416, 0x18},
- {0x3648, 0x01},
- {0x364a, 0x04},
- {0x364c, 0x04},
- {0x3678, 0x01},
- {0x367c, 0x31},
- {0x367e, 0x31},
- {0x3706, 0x10},
- {0x3708, 0x03},
- {0x3714, 0x02},
- {0x3715, 0x02},
- {0x3716, 0x01},
- {0x3717, 0x03},
- {0x371c, 0x3d},
- {0x371d, 0x3f},
- {0x372c, 0x00},
- {0x372d, 0x00},
- {0x372e, 0x46},
- {0x372f, 0x00},
- {0x3730, 0x89},
- {0x3731, 0x00},
- {0x3732, 0x08},
- {0x3733, 0x01},
- {0x3734, 0xfe},
- {0x3735, 0x05},
- {0x3740, 0x02},
- {0x375d, 0x00},
- {0x375e, 0x00},
- {0x375f, 0x11},
- {0x3760, 0x01},
- {0x3768, 0x1b},
- {0x3769, 0x1b},
- {0x376a, 0x1b},
- {0x376b, 0x1b},
- {0x376c, 0x1a},
- {0x376d, 0x17},
- {0x376e, 0x0f},
- {0x3776, 0x00},
- {0x3777, 0x00},
- {0x3778, 0x46},
- {0x3779, 0x00},
- {0x377a, 0x89},
- {0x377b, 0x00},
- {0x377c, 0x08},
- {0x377d, 0x01},
- {0x377e, 0x23},
- {0x377f, 0x02},
- {0x3780, 0xd9},
- {0x3781, 0x03},
- {0x3782, 0xf5},
- {0x3783, 0x06},
- {0x3784, 0xa5},
- {0x3788, 0x0f},
- {0x378a, 0xd9},
- {0x378b, 0x03},
- {0x378c, 0xeb},
- {0x378d, 0x05},
- {0x378e, 0x87},
- {0x378f, 0x06},
- {0x3790, 0xf5},
- {0x3792, 0x43},
- {0x3794, 0x7a},
- {0x3796, 0xa1},
- {0x37b0, 0x36},
- {0x3a00, 0x00},
+static const struct cci_reg_sequence mode_2592x1940_regs[] = {
+ { IMX335_REG_MODE_SELECT, IMX335_MODE_STANDBY },
+ { IMX335_REG_MASTER_MODE, 0x00 },
+ { IMX335_REG_WINMODE, 0x04 },
+ { IMX335_REG_HTRIMMING_START, 48 },
+ { IMX335_REG_HNUM, 2592 },
+ { IMX335_REG_Y_OUT_SIZE, 1944 },
+ { IMX335_REG_AREA3_ST_ADR_1, 176 },
+ { IMX335_REG_AREA3_WIDTH_1, 3928 },
+ { IMX335_REG_OPB_SIZE_V, 0 },
+ { IMX335_REG_XVS_XHS_DRV, 0x00 },
+ { CCI_REG8(0x3288), 0x21 },
+ { CCI_REG8(0x328a), 0x02 },
+ { CCI_REG8(0x3414), 0x05 },
+ { CCI_REG8(0x3416), 0x18 },
+ { CCI_REG8(0x3648), 0x01 },
+ { CCI_REG8(0x364a), 0x04 },
+ { CCI_REG8(0x364c), 0x04 },
+ { CCI_REG8(0x3678), 0x01 },
+ { CCI_REG8(0x367c), 0x31 },
+ { CCI_REG8(0x367e), 0x31 },
+ { CCI_REG8(0x3706), 0x10 },
+ { CCI_REG8(0x3708), 0x03 },
+ { CCI_REG8(0x3714), 0x02 },
+ { CCI_REG8(0x3715), 0x02 },
+ { CCI_REG8(0x3716), 0x01 },
+ { CCI_REG8(0x3717), 0x03 },
+ { CCI_REG8(0x371c), 0x3d },
+ { CCI_REG8(0x371d), 0x3f },
+ { CCI_REG8(0x372c), 0x00 },
+ { CCI_REG8(0x372d), 0x00 },
+ { CCI_REG8(0x372e), 0x46 },
+ { CCI_REG8(0x372f), 0x00 },
+ { CCI_REG8(0x3730), 0x89 },
+ { CCI_REG8(0x3731), 0x00 },
+ { CCI_REG8(0x3732), 0x08 },
+ { CCI_REG8(0x3733), 0x01 },
+ { CCI_REG8(0x3734), 0xfe },
+ { CCI_REG8(0x3735), 0x05 },
+ { CCI_REG8(0x3740), 0x02 },
+ { CCI_REG8(0x375d), 0x00 },
+ { CCI_REG8(0x375e), 0x00 },
+ { CCI_REG8(0x375f), 0x11 },
+ { CCI_REG8(0x3760), 0x01 },
+ { CCI_REG8(0x3768), 0x1b },
+ { CCI_REG8(0x3769), 0x1b },
+ { CCI_REG8(0x376a), 0x1b },
+ { CCI_REG8(0x376b), 0x1b },
+ { CCI_REG8(0x376c), 0x1a },
+ { CCI_REG8(0x376d), 0x17 },
+ { CCI_REG8(0x376e), 0x0f },
+ { CCI_REG8(0x3776), 0x00 },
+ { CCI_REG8(0x3777), 0x00 },
+ { CCI_REG8(0x3778), 0x46 },
+ { CCI_REG8(0x3779), 0x00 },
+ { CCI_REG8(0x377a), 0x89 },
+ { CCI_REG8(0x377b), 0x00 },
+ { CCI_REG8(0x377c), 0x08 },
+ { CCI_REG8(0x377d), 0x01 },
+ { CCI_REG8(0x377e), 0x23 },
+ { CCI_REG8(0x377f), 0x02 },
+ { CCI_REG8(0x3780), 0xd9 },
+ { CCI_REG8(0x3781), 0x03 },
+ { CCI_REG8(0x3782), 0xf5 },
+ { CCI_REG8(0x3783), 0x06 },
+ { CCI_REG8(0x3784), 0xa5 },
+ { CCI_REG8(0x3788), 0x0f },
+ { CCI_REG8(0x378a), 0xd9 },
+ { CCI_REG8(0x378b), 0x03 },
+ { CCI_REG8(0x378c), 0xeb },
+ { CCI_REG8(0x378d), 0x05 },
+ { CCI_REG8(0x378e), 0x87 },
+ { CCI_REG8(0x378f), 0x06 },
+ { CCI_REG8(0x3790), 0xf5 },
+ { CCI_REG8(0x3792), 0x43 },
+ { CCI_REG8(0x3794), 0x7a },
+ { CCI_REG8(0x3796), 0xa1 },
+ { CCI_REG8(0x37b0), 0x36 },
+ { CCI_REG8(0x3a00), 0x00 },
};
-static const struct imx335_reg raw10_framefmt_regs[] = {
- {0x3050, 0x00},
- {0x319d, 0x00},
- {0x341c, 0xff},
- {0x341d, 0x01},
+static const struct cci_reg_sequence raw10_framefmt_regs[] = {
+ { IMX335_REG_ADBIT, 0x00 },
+ { IMX335_REG_MDBIT, 0x00 },
+ { IMX335_REG_ADBIT1, 0x1ff },
};
-static const struct imx335_reg raw12_framefmt_regs[] = {
- {0x3050, 0x01},
- {0x319d, 0x01},
- {0x341c, 0x47},
- {0x341d, 0x00},
+static const struct cci_reg_sequence raw12_framefmt_regs[] = {
+ { IMX335_REG_ADBIT, 0x01 },
+ { IMX335_REG_MDBIT, 0x01 },
+ { IMX335_REG_ADBIT1, 0x47 },
};
-static const struct imx335_reg mipi_data_rate_1188Mbps[] = {
- {0x300c, 0x3b},
- {0x300d, 0x2a},
- {0x314c, 0xc6},
- {0x314d, 0x00},
- {0x315a, 0x02},
- {0x3168, 0xa0},
- {0x316a, 0x7e},
- {0x319e, 0x01},
- {0x3a18, 0x8f},
- {0x3a1a, 0x4f},
- {0x3a1c, 0x47},
- {0x3a1e, 0x37},
- {0x3a1f, 0x01},
- {0x3a20, 0x4f},
- {0x3a22, 0x87},
- {0x3a24, 0x4f},
- {0x3a26, 0x7f},
- {0x3a28, 0x3f},
+static const struct cci_reg_sequence mipi_data_rate_1188Mbps[] = {
+ { IMX335_REG_BCWAIT_TIME, 0x3b },
+ { IMX335_REG_CPWAIT_TIME, 0x2a },
+ { IMX335_REG_INCLKSEL1, 0x00c6 },
+ { IMX335_REG_INCLKSEL2, 0x02 },
+ { IMX335_REG_INCLKSEL3, 0xa0 },
+ { IMX335_REG_INCLKSEL4, 0x7e },
+ { IMX335_REG_SYSMODE, 0x01 },
+ { IMX335_REG_TCLKPOST, 0x8f },
+ { IMX335_REG_TCLKPREPARE, 0x4f },
+ { IMX335_REG_TCLK_TRAIL, 0x47 },
+ { IMX335_REG_TCLK_ZERO, 0x0137 },
+ { IMX335_REG_THS_PREPARE, 0x4f },
+ { IMX335_REG_THS_ZERO, 0x87 },
+ { IMX335_REG_THS_TRAIL, 0x4f },
+ { IMX335_REG_THS_EXIT, 0x7f },
+ { IMX335_REG_TPLX, 0x3f },
};
-static const struct imx335_reg mipi_data_rate_891Mbps[] = {
- {0x300c, 0x3b},
- {0x300d, 0x2a},
- {0x314c, 0x29},
- {0x314d, 0x01},
- {0x315a, 0x06},
- {0x3168, 0xa0},
- {0x316a, 0x7e},
- {0x319e, 0x02},
- {0x3a18, 0x7f},
- {0x3a1a, 0x37},
- {0x3a1c, 0x37},
- {0x3a1e, 0xf7},
- {0x3a20, 0x3f},
- {0x3a22, 0x6f},
- {0x3a24, 0x3f},
- {0x3a26, 0x5f},
- {0x3a28, 0x2f},
+static const struct cci_reg_sequence mipi_data_rate_891Mbps[] = {
+ { IMX335_REG_BCWAIT_TIME, 0x3b },
+ { IMX335_REG_CPWAIT_TIME, 0x2a },
+ { IMX335_REG_INCLKSEL1, 0x0129 },
+ { IMX335_REG_INCLKSEL2, 0x06 },
+ { IMX335_REG_INCLKSEL3, 0xa0 },
+ { IMX335_REG_INCLKSEL4, 0x7e },
+ { IMX335_REG_SYSMODE, 0x02 },
+ { IMX335_REG_TCLKPOST, 0x7f },
+ { IMX335_REG_TCLKPREPARE, 0x37 },
+ { IMX335_REG_TCLK_TRAIL, 0x37 },
+ { IMX335_REG_TCLK_ZERO, 0xf7 },
+ { IMX335_REG_THS_PREPARE, 0x3f },
+ { IMX335_REG_THS_ZERO, 0x6f },
+ { IMX335_REG_THS_TRAIL, 0x3f },
+ { IMX335_REG_THS_EXIT, 0x5f },
+ { IMX335_REG_TPLX, 0x2f },
};
static const s64 link_freq[] = {
@@ -372,10 +409,10 @@ static const u32 imx335_mbus_codes[] = {
/* Supported sensor mode configurations */
static const struct imx335_mode supported_mode = {
.width = 2592,
- .height = 1940,
+ .height = 1944,
.hblank = 342,
- .vblank = 2560,
- .vblank_min = 2560,
+ .vblank = 2556,
+ .vblank_min = 2556,
.vblank_max = 133060,
.pclk = 396000000,
.reg_list = {
@@ -396,101 +433,6 @@ static inline struct imx335 *to_imx335(struct v4l2_subdev *subdev)
}
/**
- * imx335_read_reg() - Read registers.
- * @imx335: pointer to imx335 device
- * @reg: register address
- * @len: length of bytes to read. Max supported bytes is 4
- * @val: pointer to register value to be filled.
- *
- * Big endian register addresses with little endian values.
- *
- * Return: 0 if successful, error code otherwise.
- */
-static int imx335_read_reg(struct imx335 *imx335, u16 reg, u32 len, u32 *val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&imx335->sd);
- struct i2c_msg msgs[2] = {0};
- u8 addr_buf[2] = {0};
- u8 data_buf[4] = {0};
- int ret;
-
- if (WARN_ON(len > 4))
- return -EINVAL;
-
- put_unaligned_be16(reg, addr_buf);
-
- /* Write register address */
- msgs[0].addr = client->addr;
- msgs[0].flags = 0;
- msgs[0].len = ARRAY_SIZE(addr_buf);
- msgs[0].buf = addr_buf;
-
- /* Read data from register */
- msgs[1].addr = client->addr;
- msgs[1].flags = I2C_M_RD;
- msgs[1].len = len;
- msgs[1].buf = data_buf;
-
- ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (ret != ARRAY_SIZE(msgs))
- return -EIO;
-
- *val = get_unaligned_le32(data_buf);
-
- return 0;
-}
-
-/**
- * imx335_write_reg() - Write register
- * @imx335: pointer to imx335 device
- * @reg: register address
- * @len: length of bytes. Max supported bytes is 4
- * @val: register value
- *
- * Big endian register addresses with little endian values.
- *
- * Return: 0 if successful, error code otherwise.
- */
-static int imx335_write_reg(struct imx335 *imx335, u16 reg, u32 len, u32 val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&imx335->sd);
- u8 buf[6] = {0};
-
- if (WARN_ON(len > 4))
- return -EINVAL;
-
- put_unaligned_be16(reg, buf);
- put_unaligned_le32(val, buf + 2);
- if (i2c_master_send(client, buf, len + 2) != len + 2)
- return -EIO;
-
- return 0;
-}
-
-/**
- * imx335_write_regs() - Write a list of registers
- * @imx335: pointer to imx335 device
- * @regs: list of registers to be written
- * @len: length of registers array
- *
- * Return: 0 if successful. error code otherwise.
- */
-static int imx335_write_regs(struct imx335 *imx335,
- const struct imx335_reg *regs, u32 len)
-{
- unsigned int i;
- int ret;
-
- for (i = 0; i < len; i++) {
- ret = imx335_write_reg(imx335, regs[i].address, 1, regs[i].val);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-/**
* imx335_update_controls() - Update control ranges based on streaming mode
* @imx335: pointer to imx335 device
* @mode: pointer to imx335_mode sensor mode
@@ -526,7 +468,8 @@ static int imx335_update_controls(struct imx335 *imx335,
static int imx335_update_exp_gain(struct imx335 *imx335, u32 exposure, u32 gain)
{
u32 lpfr, shutter;
- int ret;
+ int ret_hold;
+ int ret = 0;
lpfr = imx335->vblank + imx335->cur_mode->height;
shutter = lpfr - exposure;
@@ -534,64 +477,55 @@ static int imx335_update_exp_gain(struct imx335 *imx335, u32 exposure, u32 gain)
dev_dbg(imx335->dev, "Set exp %u, analog gain %u, shutter %u, lpfr %u\n",
exposure, gain, shutter, lpfr);
- ret = imx335_write_reg(imx335, IMX335_REG_HOLD, 1, 1);
- if (ret)
- return ret;
-
- ret = imx335_write_reg(imx335, IMX335_REG_LPFR, 3, lpfr);
- if (ret)
- goto error_release_group_hold;
-
- ret = imx335_write_reg(imx335, IMX335_REG_SHUTTER, 3, shutter);
- if (ret)
- goto error_release_group_hold;
-
- ret = imx335_write_reg(imx335, IMX335_REG_AGAIN, 2, gain);
-
-error_release_group_hold:
- imx335_write_reg(imx335, IMX335_REG_HOLD, 1, 0);
+ cci_write(imx335->cci, IMX335_REG_HOLD, 1, &ret);
+ cci_write(imx335->cci, IMX335_REG_VMAX, lpfr, &ret);
+ cci_write(imx335->cci, IMX335_REG_SHUTTER, shutter, &ret);
+ cci_write(imx335->cci, IMX335_REG_GAIN, gain, &ret);
+ /*
+ * Unconditionally attempt to release the hold, but track the
+ * error if the unhold itself fails.
+ */
+ ret_hold = cci_write(imx335->cci, IMX335_REG_HOLD, 0, NULL);
+ if (ret_hold)
+ ret = ret_hold;
return ret;
}
static int imx335_update_test_pattern(struct imx335 *imx335, u32 pattern_index)
{
- int ret;
+ int ret = 0;
if (pattern_index >= ARRAY_SIZE(imx335_tpg_val))
return -EINVAL;
if (pattern_index) {
- const struct imx335_reg tpg_enable_regs[] = {
- { 0x3148, 0x10 },
- { 0x3280, 0x00 },
- { 0x329c, 0x01 },
- { 0x32a0, 0x11 },
- { 0x3302, 0x00 },
- { 0x3303, 0x00 },
- { 0x336c, 0x00 },
+ const struct cci_reg_sequence tpg_enable_regs[] = {
+ { IMX335_REG_TPG_TESTCLKEN, 0x10 },
+ { IMX335_REG_TPG_DIG_CLP_MODE, 0x00 },
+ { IMX335_REG_TPG_EN_DUOUT, 0x01 },
+ { IMX335_REG_TPG_COLORWIDTH, 0x11 },
+ { IMX335_REG_BLKLEVEL, 0x00 },
+ { IMX335_REG_WRJ_OPEN, 0x00 },
};
- ret = imx335_write_reg(imx335, IMX335_REG_TPG, 1,
- imx335_tpg_val[pattern_index]);
- if (ret)
- return ret;
+ cci_write(imx335->cci, IMX335_REG_TPG,
+ imx335_tpg_val[pattern_index], &ret);
- ret = imx335_write_regs(imx335, tpg_enable_regs,
- ARRAY_SIZE(tpg_enable_regs));
+ cci_multi_reg_write(imx335->cci, tpg_enable_regs,
+ ARRAY_SIZE(tpg_enable_regs), &ret);
} else {
- const struct imx335_reg tpg_disable_regs[] = {
- { 0x3148, 0x00 },
- { 0x3280, 0x01 },
- { 0x329c, 0x00 },
- { 0x32a0, 0x10 },
- { 0x3302, 0x32 },
- { 0x3303, 0x00 },
- { 0x336c, 0x01 },
+ const struct cci_reg_sequence tpg_disable_regs[] = {
+ { IMX335_REG_TPG_TESTCLKEN, 0x00 },
+ { IMX335_REG_TPG_DIG_CLP_MODE, 0x01 },
+ { IMX335_REG_TPG_EN_DUOUT, 0x00 },
+ { IMX335_REG_TPG_COLORWIDTH, 0x10 },
+ { IMX335_REG_BLKLEVEL, 0x32 },
+ { IMX335_REG_WRJ_OPEN, 0x01 },
};
- ret = imx335_write_regs(imx335, tpg_disable_regs,
- ARRAY_SIZE(tpg_disable_regs));
+ cci_multi_reg_write(imx335->cci, tpg_disable_regs,
+ ARRAY_SIZE(tpg_disable_regs), &ret);
}
return ret;
@@ -890,12 +824,14 @@ static int imx335_set_framefmt(struct imx335 *imx335)
{
switch (imx335->cur_mbus_code) {
case MEDIA_BUS_FMT_SRGGB10_1X10:
- return imx335_write_regs(imx335, raw10_framefmt_regs,
- ARRAY_SIZE(raw10_framefmt_regs));
+ return cci_multi_reg_write(imx335->cci, raw10_framefmt_regs,
+ ARRAY_SIZE(raw10_framefmt_regs),
+ NULL);
case MEDIA_BUS_FMT_SRGGB12_1X12:
- return imx335_write_regs(imx335, raw12_framefmt_regs,
- ARRAY_SIZE(raw12_framefmt_regs));
+ return cci_multi_reg_write(imx335->cci, raw12_framefmt_regs,
+ ARRAY_SIZE(raw12_framefmt_regs),
+ NULL);
}
return -EINVAL;
@@ -914,7 +850,8 @@ static int imx335_start_streaming(struct imx335 *imx335)
/* Setup PLL */
reg_list = &link_freq_reglist[__ffs(imx335->link_freq_bitmap)];
- ret = imx335_write_regs(imx335, reg_list->regs, reg_list->num_of_regs);
+ ret = cci_multi_reg_write(imx335->cci, reg_list->regs,
+ reg_list->num_of_regs, NULL);
if (ret) {
dev_err(imx335->dev, "%s failed to set plls\n", __func__);
return ret;
@@ -922,8 +859,8 @@ static int imx335_start_streaming(struct imx335 *imx335)
/* Write sensor mode registers */
reg_list = &imx335->cur_mode->reg_list;
- ret = imx335_write_regs(imx335, reg_list->regs,
- reg_list->num_of_regs);
+ ret = cci_multi_reg_write(imx335->cci, reg_list->regs,
+ reg_list->num_of_regs, NULL);
if (ret) {
dev_err(imx335->dev, "fail to write initial registers\n");
return ret;
@@ -936,6 +873,12 @@ static int imx335_start_streaming(struct imx335 *imx335)
return ret;
}
+ /* Configure lanes */
+ ret = cci_write(imx335->cci, IMX335_REG_LANEMODE,
+ imx335->lane_mode, NULL);
+ if (ret)
+ return ret;
+
/* Setup handler will write actual exposure and gain */
ret = __v4l2_ctrl_handler_setup(imx335->sd.ctrl_handler);
if (ret) {
@@ -944,8 +887,8 @@ static int imx335_start_streaming(struct imx335 *imx335)
}
/* Start streaming */
- ret = imx335_write_reg(imx335, IMX335_REG_MODE_SELECT,
- 1, IMX335_MODE_STREAMING);
+ ret = cci_write(imx335->cci, IMX335_REG_MODE_SELECT,
+ IMX335_MODE_STREAMING, NULL);
if (ret) {
dev_err(imx335->dev, "fail to start streaming\n");
return ret;
@@ -965,8 +908,8 @@ static int imx335_start_streaming(struct imx335 *imx335)
*/
static int imx335_stop_streaming(struct imx335 *imx335)
{
- return imx335_write_reg(imx335, IMX335_REG_MODE_SELECT,
- 1, IMX335_MODE_STANDBY);
+ return cci_write(imx335->cci, IMX335_REG_MODE_SELECT,
+ IMX335_MODE_STANDBY, NULL);
}
/**
@@ -1017,14 +960,14 @@ error_unlock:
static int imx335_detect(struct imx335 *imx335)
{
int ret;
- u32 val;
+ u64 val;
- ret = imx335_read_reg(imx335, IMX335_REG_ID, 2, &val);
+ ret = cci_read(imx335->cci, IMX335_REG_ID, &val, NULL);
if (ret)
return ret;
if (val != IMX335_ID) {
- dev_err(imx335->dev, "chip id mismatch: %x!=%x\n",
+ dev_err(imx335->dev, "chip id mismatch: %x!=%llx\n",
IMX335_ID, val);
return -ENXIO;
}
@@ -1096,7 +1039,14 @@ static int imx335_parse_hw_config(struct imx335 *imx335)
if (ret)
return ret;
- if (bus_cfg.bus.mipi_csi2.num_data_lanes != IMX335_NUM_DATA_LANES) {
+ switch (bus_cfg.bus.mipi_csi2.num_data_lanes) {
+ case 2:
+ imx335->lane_mode = IMX335_2LANE;
+ break;
+ case 4:
+ imx335->lane_mode = IMX335_4LANE;
+ break;
+ default:
dev_err(imx335->dev,
"number of CSI2 data lanes %d is not supported\n",
bus_cfg.bus.mipi_csi2.num_data_lanes);
@@ -1208,10 +1158,16 @@ static int imx335_init_controls(struct imx335 *imx335)
{
struct v4l2_ctrl_handler *ctrl_hdlr = &imx335->ctrl_handler;
const struct imx335_mode *mode = imx335->cur_mode;
+ struct v4l2_fwnode_device_properties props;
u32 lpfr;
int ret;
- ret = v4l2_ctrl_handler_init(ctrl_hdlr, 7);
+ ret = v4l2_fwnode_device_parse(imx335->dev, &props);
+ if (ret)
+ return ret;
+
+ /* v4l2_fwnode_device_properties can add two more controls */
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 9);
if (ret)
return ret;
@@ -1228,6 +1184,14 @@ static int imx335_init_controls(struct imx335 *imx335)
IMX335_EXPOSURE_STEP,
IMX335_EXPOSURE_DEFAULT);
+ /*
+ * The sensor has an analog gain and a digital gain, both controlled
+ * through a single gain value, expressed in 0.3dB increments. Values
+ * from 0.0dB (0) to 30.0dB (100) apply analog gain only, higher values
+ * up to 72.0dB (240) add further digital gain. Limit the range to
+ * analog gain only, support for digital gain can be added separately
+ * if needed.
+ */
imx335->again_ctrl = v4l2_ctrl_new_std(ctrl_hdlr,
&imx335_ctrl_ops,
V4L2_CID_ANALOGUE_GAIN,
@@ -1276,6 +1240,8 @@ static int imx335_init_controls(struct imx335 *imx335)
if (imx335->hblank_ctrl)
imx335->hblank_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &imx335_ctrl_ops, &props);
+
if (ctrl_hdlr->error) {
dev_err(imx335->dev, "control init failed: %d\n",
ctrl_hdlr->error);
@@ -1304,6 +1270,11 @@ static int imx335_probe(struct i2c_client *client)
return -ENOMEM;
imx335->dev = &client->dev;
+ imx335->cci = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(imx335->cci)) {
+ dev_err(imx335->dev, "Unable to initialize I2C\n");
+ return -ENODEV;
+ }
/* Initialize subdev */
v4l2_i2c_subdev_init(&imx335->sd, client, &imx335_subdev_ops);
diff --git a/drivers/media/i2c/max9271.h b/drivers/media/i2c/max9271.h
index dc5e4e70ba6f..0bf1d40811eb 100644
--- a/drivers/media/i2c/max9271.h
+++ b/drivers/media/i2c/max9271.h
@@ -8,6 +8,9 @@
* Copyright (C) 2015 Cogent Embedded, Inc.
*/
+#ifndef __MEDIA_I2C_MAX9271_H__
+#define __MEDIA_I2C_MAX9271_H__
+
#include <linux/i2c.h>
#define MAX9271_DEFAULT_ADDR 0x40
@@ -231,3 +234,5 @@ int max9271_set_deserializer_address(struct max9271_device *dev, u8 addr);
* Return 0 on success or a negative error code on failure
*/
int max9271_set_translation(struct max9271_device *dev, u8 source, u8 dest);
+
+#endif /* __MEDIA_I2C_MAX9271_H__ */
diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
index d685d445cf23..dfcb3fc03350 100644
--- a/drivers/media/i2c/max9286.c
+++ b/drivers/media/i2c/max9286.c
@@ -383,7 +383,7 @@ static int max9286_i2c_mux_init(struct max9286_priv *priv)
for_each_source(priv, source) {
unsigned int index = to_index(priv, source);
- ret = i2c_mux_add_adapter(priv->mux, 0, index, 0);
+ ret = i2c_mux_add_adapter(priv->mux, 0, index);
if (ret < 0)
goto error;
}
diff --git a/drivers/media/i2c/ov2680.c b/drivers/media/i2c/ov2680.c
index 39d321e2b7f9..3ae0ea58668d 100644
--- a/drivers/media/i2c/ov2680.c
+++ b/drivers/media/i2c/ov2680.c
@@ -75,6 +75,8 @@
#define OV2680_ACTIVE_START_TOP 8
#define OV2680_MIN_CROP_WIDTH 2
#define OV2680_MIN_CROP_HEIGHT 2
+#define OV2680_MIN_VBLANK 4
+#define OV2680_MAX_VBLANK 0xffff
/* Fixed pre-div of 1/2 */
#define OV2680_PLL_PREDIV0 2
@@ -84,10 +86,7 @@
/* 66MHz pixel clock: 66MHz / 1704 * 1294 = 30fps */
#define OV2680_PIXELS_PER_LINE 1704
-#define OV2680_LINES_PER_FRAME 1294
-
-/* If possible send 16 extra rows / lines to the ISP as padding */
-#define OV2680_END_MARGIN 16
+#define OV2680_LINES_PER_FRAME_30FPS 1294
/* Max exposure time is VTS - 8 */
#define OV2680_INTEGRATION_TIME_MARGIN 8
@@ -130,6 +129,8 @@ struct ov2680_ctrls {
struct v4l2_ctrl *test_pattern;
struct v4l2_ctrl *link_freq;
struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
};
struct ov2680_mode {
@@ -143,8 +144,6 @@ struct ov2680_mode {
u16 v_end;
u16 h_output_size;
u16 v_output_size;
- u16 hts;
- u16 vts;
};
struct ov2680_dev {
@@ -359,15 +358,11 @@ static void ov2680_calc_mode(struct ov2680_dev *sensor)
sensor->mode.v_start = (sensor->mode.crop.top +
(sensor->mode.crop.height - height) / 2) & ~1;
sensor->mode.h_end =
- min(sensor->mode.h_start + width + OV2680_END_MARGIN - 1,
- OV2680_NATIVE_WIDTH - 1);
+ min(sensor->mode.h_start + width - 1, OV2680_NATIVE_WIDTH - 1);
sensor->mode.v_end =
- min(sensor->mode.v_start + height + OV2680_END_MARGIN - 1,
- OV2680_NATIVE_HEIGHT - 1);
+ min(sensor->mode.v_start + height - 1, OV2680_NATIVE_HEIGHT - 1);
sensor->mode.h_output_size = orig_width;
sensor->mode.v_output_size = orig_height;
- sensor->mode.hts = OV2680_PIXELS_PER_LINE;
- sensor->mode.vts = OV2680_LINES_PER_FRAME;
}
static int ov2680_set_mode(struct ov2680_dev *sensor)
@@ -402,9 +397,8 @@ static int ov2680_set_mode(struct ov2680_dev *sensor)
cci_write(sensor->regmap, OV2680_REG_VERTICAL_OUTPUT_SIZE,
sensor->mode.v_output_size, &ret);
cci_write(sensor->regmap, OV2680_REG_TIMING_HTS,
- sensor->mode.hts, &ret);
- cci_write(sensor->regmap, OV2680_REG_TIMING_VTS,
- sensor->mode.vts, &ret);
+ OV2680_PIXELS_PER_LINE, &ret);
+ /* VTS gets set by the vblank ctrl */
cci_write(sensor->regmap, OV2680_REG_ISP_X_WIN, 0, &ret);
cci_write(sensor->regmap, OV2680_REG_ISP_Y_WIN, 0, &ret);
cci_write(sensor->regmap, OV2680_REG_X_INC, inc, &ret);
@@ -478,6 +472,15 @@ static int ov2680_exposure_set(struct ov2680_dev *sensor, u32 exp)
NULL);
}
+static int ov2680_exposure_update_range(struct ov2680_dev *sensor)
+{
+ int exp_max = sensor->mode.fmt.height + sensor->ctrls.vblank->val -
+ OV2680_INTEGRATION_TIME_MARGIN;
+
+ return __v4l2_ctrl_modify_range(sensor->ctrls.exposure, 0, exp_max,
+ 1, exp_max);
+}
+
static int ov2680_stream_enable(struct ov2680_dev *sensor)
{
int ret;
@@ -644,7 +647,7 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *try_fmt;
const struct v4l2_rect *crop;
unsigned int width, height;
- int ret = 0;
+ int def, max, ret = 0;
crop = __ov2680_get_pad_crop(sensor, sd_state, format->pad,
format->which);
@@ -673,6 +676,27 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd,
sensor->mode.fmt = format->format;
ov2680_calc_mode(sensor);
+ /* vblank range is height dependent adjust and reset to default */
+ max = OV2680_MAX_VBLANK - height;
+ def = OV2680_LINES_PER_FRAME_30FPS - height;
+ ret = __v4l2_ctrl_modify_range(sensor->ctrls.vblank, OV2680_MIN_VBLANK,
+ max, 1, def);
+ if (ret)
+ goto unlock;
+
+ ret = __v4l2_ctrl_s_ctrl(sensor->ctrls.vblank, def);
+ if (ret)
+ goto unlock;
+
+ /* exposure range depends on vts which may have changed */
+ ret = ov2680_exposure_update_range(sensor);
+ if (ret)
+ goto unlock;
+
+ /* adjust hblank value for new width */
+ def = OV2680_PIXELS_PER_LINE - width;
+ ret = __v4l2_ctrl_modify_range(sensor->ctrls.hblank, def, def, 1, def);
+
unlock:
mutex_unlock(&sensor->lock);
@@ -842,6 +866,13 @@ static int ov2680_s_ctrl(struct v4l2_ctrl *ctrl)
struct ov2680_dev *sensor = to_ov2680_dev(sd);
int ret;
+ /* Update exposure range on vblank changes */
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ ret = ov2680_exposure_update_range(sensor);
+ if (ret)
+ return ret;
+ }
+
/* Only apply changes to the controls if the device is powered up */
if (!pm_runtime_get_if_in_use(sensor->sd.dev)) {
ov2680_set_bayer_order(sensor, &sensor->mode.fmt);
@@ -864,6 +895,10 @@ static int ov2680_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_TEST_PATTERN:
ret = ov2680_test_pattern_set(sensor, ctrl->val);
break;
+ case V4L2_CID_VBLANK:
+ ret = cci_write(sensor->regmap, OV2680_REG_TIMING_VTS,
+ sensor->mode.fmt.height + ctrl->val, NULL);
+ break;
default:
ret = -EINVAL;
break;
@@ -922,8 +957,8 @@ static int ov2680_v4l2_register(struct ov2680_dev *sensor)
const struct v4l2_ctrl_ops *ops = &ov2680_ctrl_ops;
struct ov2680_ctrls *ctrls = &sensor->ctrls;
struct v4l2_ctrl_handler *hdl = &ctrls->handler;
- int exp_max = OV2680_LINES_PER_FRAME - OV2680_INTEGRATION_TIME_MARGIN;
- int ret = 0;
+ struct v4l2_fwnode_device_properties props;
+ int def, max, ret = 0;
v4l2_i2c_subdev_init(&sensor->sd, client, &ov2680_subdev_ops);
sensor->sd.internal_ops = &ov2680_internal_ops;
@@ -948,8 +983,9 @@ static int ov2680_v4l2_register(struct ov2680_dev *sensor)
ARRAY_SIZE(test_pattern_menu) - 1,
0, 0, test_pattern_menu);
+ max = OV2680_LINES_PER_FRAME_30FPS - OV2680_INTEGRATION_TIME_MARGIN;
ctrls->exposure = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE,
- 0, exp_max, 1, exp_max);
+ 0, max, 1, max);
ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_ANALOGUE_GAIN,
0, 1023, 1, 250);
@@ -960,6 +996,21 @@ static int ov2680_v4l2_register(struct ov2680_dev *sensor)
0, sensor->pixel_rate,
1, sensor->pixel_rate);
+ max = OV2680_MAX_VBLANK - OV2680_DEFAULT_HEIGHT;
+ def = OV2680_LINES_PER_FRAME_30FPS - OV2680_DEFAULT_HEIGHT;
+ ctrls->vblank = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VBLANK,
+ OV2680_MIN_VBLANK, max, 1, def);
+
+ def = OV2680_PIXELS_PER_LINE - OV2680_DEFAULT_WIDTH;
+ ctrls->hblank = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HBLANK,
+ def, def, 1, def);
+
+ ret = v4l2_fwnode_device_parse(sensor->dev, &props);
+ if (ret)
+ goto cleanup_entity;
+
+ v4l2_ctrl_new_fwnode_properties(hdl, ops, &props);
+
if (hdl->error) {
ret = hdl->error;
goto cleanup_entity;
@@ -968,6 +1019,7 @@ static int ov2680_v4l2_register(struct ov2680_dev *sensor)
ctrls->vflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
ctrls->hflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
ctrls->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ ctrls->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
sensor->sd.ctrl_handler = hdl;
@@ -1116,25 +1168,24 @@ static int ov2680_parse_dt(struct ov2680_dev *sensor)
sensor->pixel_rate = sensor->link_freq[0] * 2;
do_div(sensor->pixel_rate, 10);
- /* Verify bus cfg */
- if (bus_cfg.bus.mipi_csi2.num_data_lanes != 1) {
- ret = dev_err_probe(dev, -EINVAL,
- "only a 1-lane CSI2 config is supported");
- goto out_free_bus_cfg;
+ if (!bus_cfg.nr_of_link_frequencies) {
+ dev_warn(dev, "Consider passing 'link-frequencies' in DT\n");
+ goto skip_link_freq_validation;
}
for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++)
if (bus_cfg.link_frequencies[i] == sensor->link_freq[0])
break;
- if (bus_cfg.nr_of_link_frequencies == 0 ||
- bus_cfg.nr_of_link_frequencies == i) {
+ if (bus_cfg.nr_of_link_frequencies == i) {
ret = dev_err_probe(dev, -EINVAL,
"supported link freq %lld not found\n",
sensor->link_freq[0]);
goto out_free_bus_cfg;
}
+skip_link_freq_validation:
+ ret = 0;
out_free_bus_cfg:
v4l2_fwnode_endpoint_free(&bus_cfg);
return ret;
diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index 552935ccb4a9..c48dbcde9877 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -768,14 +768,15 @@ static int ov2740_init_controls(struct ov2740 *ov2740)
cur_mode = ov2740->cur_mode;
size = ARRAY_SIZE(link_freq_menu_items);
- ov2740->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov2740_ctrl_ops,
- V4L2_CID_LINK_FREQ,
- size - 1, 0,
- link_freq_menu_items);
+ ov2740->link_freq =
+ v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov2740_ctrl_ops,
+ V4L2_CID_LINK_FREQ, size - 1,
+ ov2740->supported_modes->link_freq_index,
+ link_freq_menu_items);
if (ov2740->link_freq)
ov2740->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
- pixel_rate = to_pixel_rate(OV2740_LINK_FREQ_360MHZ_INDEX);
+ pixel_rate = to_pixel_rate(ov2740->supported_modes->link_freq_index);
ov2740->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov2740_ctrl_ops,
V4L2_CID_PIXEL_RATE, 0,
pixel_rate, 1, pixel_rate);
@@ -1332,9 +1333,16 @@ static int ov2740_probe(struct i2c_client *client)
return dev_err_probe(dev, ret, "failed to check HW configuration\n");
ov2740->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(ov2740->reset_gpio))
+ if (IS_ERR(ov2740->reset_gpio)) {
return dev_err_probe(dev, PTR_ERR(ov2740->reset_gpio),
"failed to get reset GPIO\n");
+ } else if (ov2740->reset_gpio) {
+ /*
+ * Ensure reset is asserted for at least 20 ms before
+ * ov2740_resume() deasserts it.
+ */
+ msleep(20);
+ }
ov2740->clk = devm_clk_get_optional(dev, "clk");
if (IS_ERR(ov2740->clk))
diff --git a/drivers/media/i2c/ov4689.c b/drivers/media/i2c/ov4689.c
index 403091651885..1c3a449f9354 100644
--- a/drivers/media/i2c/ov4689.c
+++ b/drivers/media/i2c/ov4689.c
@@ -3,7 +3,7 @@
* ov4689 driver
*
* Copyright (C) 2017 Fuzhou Rockchip Electronics Co., Ltd.
- * Copyright (C) 2022 Mikhail Rudenko
+ * Copyright (C) 2022, 2024 Mikhail Rudenko
*/
#include <linux/clk.h>
@@ -15,45 +15,86 @@
#include <linux/regulator/consumer.h>
#include <media/media-entity.h>
#include <media/v4l2-async.h>
+#include <media/v4l2-cci.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-subdev.h>
#include <media/v4l2-fwnode.h>
-#define CHIP_ID 0x004688
-#define OV4689_REG_CHIP_ID 0x300a
-
-#define OV4689_XVCLK_FREQ 24000000
-
-#define OV4689_REG_CTRL_MODE 0x0100
+#define OV4689_REG_CTRL_MODE CCI_REG8(0x0100)
#define OV4689_MODE_SW_STANDBY 0x0
#define OV4689_MODE_STREAMING BIT(0)
-#define OV4689_REG_EXPOSURE 0x3500
+#define OV4689_REG_CHIP_ID CCI_REG16(0x300a)
+#define CHIP_ID 0x004688
+
+#define OV4689_REG_EXPOSURE CCI_REG24(0x3500)
#define OV4689_EXPOSURE_MIN 4
#define OV4689_EXPOSURE_STEP 1
-#define OV4689_VTS_MAX 0x7fff
-#define OV4689_REG_GAIN_H 0x3508
-#define OV4689_REG_GAIN_L 0x3509
-#define OV4689_GAIN_H_MASK 0x07
-#define OV4689_GAIN_H_SHIFT 8
-#define OV4689_GAIN_L_MASK 0xff
+#define OV4689_REG_GAIN CCI_REG16(0x3508)
#define OV4689_GAIN_STEP 1
#define OV4689_GAIN_DEFAULT 0x80
-#define OV4689_REG_TEST_PATTERN 0x5040
-#define OV4689_TEST_PATTERN_ENABLE 0x80
-#define OV4689_TEST_PATTERN_DISABLE 0x0
+#define OV4689_REG_DIG_GAIN CCI_REG16(0x352a)
+#define OV4689_DIG_GAIN_MIN 1
+#define OV4689_DIG_GAIN_MAX 0x7fff
+#define OV4689_DIG_GAIN_STEP 1
+#define OV4689_DIG_GAIN_DEFAULT 0x800
-#define OV4689_REG_VTS 0x380e
+#define OV4689_REG_H_CROP_START CCI_REG16(0x3800)
+#define OV4689_REG_V_CROP_START CCI_REG16(0x3802)
+#define OV4689_REG_H_CROP_END CCI_REG16(0x3804)
+#define OV4689_REG_V_CROP_END CCI_REG16(0x3806)
+#define OV4689_REG_H_OUTPUT_SIZE CCI_REG16(0x3808)
+#define OV4689_REG_V_OUTPUT_SIZE CCI_REG16(0x380a)
-#define REG_NULL 0xFFFF
+#define OV4689_REG_HTS CCI_REG16(0x380c)
+#define OV4689_HTS_DIVIDER 4
+#define OV4689_HTS_MAX 0x7fff
-#define OV4689_REG_VALUE_08BIT 1
-#define OV4689_REG_VALUE_16BIT 2
-#define OV4689_REG_VALUE_24BIT 3
+#define OV4689_REG_VTS CCI_REG16(0x380e)
+#define OV4689_VTS_MAX 0x7fff
+
+#define OV4689_REG_H_WIN_OFF CCI_REG16(0x3810)
+#define OV4689_REG_V_WIN_OFF CCI_REG16(0x3812)
+
+#define OV4689_REG_TIMING_FORMAT1 CCI_REG8(0x3820) /* Vertical */
+#define OV4689_REG_TIMING_FORMAT2 CCI_REG8(0x3821) /* Horizontal */
+#define OV4689_TIMING_FLIP_MASK GENMASK(2, 1)
+#define OV4689_TIMING_FLIP_ARRAY BIT(1)
+#define OV4689_TIMING_FLIP_DIGITAL BIT(2)
+#define OV4689_TIMING_FLIP_BOTH (OV4689_TIMING_FLIP_ARRAY |\
+ OV4689_TIMING_FLIP_DIGITAL)
+
+#define OV4689_REG_ANCHOR_LEFT_START CCI_REG16(0x4020)
+#define OV4689_ANCHOR_LEFT_START_DEF 576
+#define OV4689_REG_ANCHOR_LEFT_END CCI_REG16(0x4022)
+#define OV4689_ANCHOR_LEFT_END_DEF 831
+#define OV4689_REG_ANCHOR_RIGHT_START CCI_REG16(0x4024)
+#define OV4689_ANCHOR_RIGHT_START_DEF 1984
+#define OV4689_REG_ANCHOR_RIGHT_END CCI_REG16(0x4026)
+#define OV4689_ANCHOR_RIGHT_END_DEF 2239
+
+#define OV4689_REG_VFIFO_CTRL_01 CCI_REG8(0x4601)
+
+#define OV4689_REG_WB_GAIN_RED CCI_REG16(0x500c)
+#define OV4689_REG_WB_GAIN_BLUE CCI_REG16(0x5010)
+#define OV4689_WB_GAIN_MIN 1
+#define OV4689_WB_GAIN_MAX 0xfff
+#define OV4689_WB_GAIN_STEP 1
+#define OV4689_WB_GAIN_DEFAULT 0x400
+
+#define OV4689_REG_TEST_PATTERN CCI_REG8(0x5040)
+#define OV4689_TEST_PATTERN_ENABLE 0x80
+#define OV4689_TEST_PATTERN_DISABLE 0x0
#define OV4689_LANES 4
+#define OV4689_XVCLK_FREQ 24000000
+
+#define OV4689_PIXEL_ARRAY_WIDTH 2720
+#define OV4689_PIXEL_ARRAY_HEIGHT 1536
+#define OV4689_DUMMY_ROWS 8 /* 8 dummy rows on each side */
+#define OV4689_DUMMY_COLUMNS 16 /* 16 dummy columns on each side */
static const char *const ov4689_supply_names[] = {
"avdd", /* Analog power */
@@ -61,11 +102,6 @@ static const char *const ov4689_supply_names[] = {
"dvdd", /* Digital core power */
};
-struct regval {
- u16 addr;
- u8 val;
-};
-
enum ov4689_mode_id {
OV4689_MODE_2688_1520 = 0,
OV4689_NUM_MODES,
@@ -75,20 +111,18 @@ struct ov4689_mode {
enum ov4689_mode_id id;
u32 width;
u32 height;
- u32 max_fps;
u32 hts_def;
+ u32 hts_min;
u32 vts_def;
u32 exp_def;
u32 pixel_rate;
- u32 sensor_width;
- u32 sensor_height;
- u32 crop_top;
- u32 crop_left;
- const struct regval *reg_list;
+ const struct cci_reg_sequence *reg_list;
+ unsigned int num_regs;
};
struct ov4689 {
- struct i2c_client *client;
+ struct device *dev;
+ struct regmap *regmap;
struct clk *xvclk;
struct gpio_desc *reset_gpio;
struct gpio_desc *pwdn_gpio;
@@ -99,7 +133,6 @@ struct ov4689 {
u32 clock_rate;
- struct mutex mutex; /* lock to protect ctrls and cur_mode */
struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_ctrl *exposure;
@@ -119,95 +152,108 @@ struct ov4689_gain_range {
/*
* Xclk 24Mhz
- * max_framerate 30fps
+ * max_framerate 90fps
* mipi_datarate per lane 1008Mbps
*/
-static const struct regval ov4689_2688x1520_regs[] = {
- {0x0103, 0x01}, {0x3638, 0x00}, {0x0300, 0x00},
- {0x0302, 0x2a}, {0x0303, 0x00}, {0x0304, 0x03},
- {0x030b, 0x00}, {0x030d, 0x1e}, {0x030e, 0x04},
- {0x030f, 0x01}, {0x0312, 0x01}, {0x031e, 0x00},
- {0x3000, 0x20}, {0x3002, 0x00}, {0x3018, 0x72},
- {0x3020, 0x93}, {0x3021, 0x03}, {0x3022, 0x01},
- {0x3031, 0x0a}, {0x303f, 0x0c}, {0x3305, 0xf1},
- {0x3307, 0x04}, {0x3309, 0x29}, {0x3500, 0x00},
- {0x3501, 0x60}, {0x3502, 0x00}, {0x3503, 0x04},
- {0x3504, 0x00}, {0x3505, 0x00}, {0x3506, 0x00},
- {0x3507, 0x00}, {0x3508, 0x00}, {0x3509, 0x80},
- {0x350a, 0x00}, {0x350b, 0x00}, {0x350c, 0x00},
- {0x350d, 0x00}, {0x350e, 0x00}, {0x350f, 0x80},
- {0x3510, 0x00}, {0x3511, 0x00}, {0x3512, 0x00},
- {0x3513, 0x00}, {0x3514, 0x00}, {0x3515, 0x80},
- {0x3516, 0x00}, {0x3517, 0x00}, {0x3518, 0x00},
- {0x3519, 0x00}, {0x351a, 0x00}, {0x351b, 0x80},
- {0x351c, 0x00}, {0x351d, 0x00}, {0x351e, 0x00},
- {0x351f, 0x00}, {0x3520, 0x00}, {0x3521, 0x80},
- {0x3522, 0x08}, {0x3524, 0x08}, {0x3526, 0x08},
- {0x3528, 0x08}, {0x352a, 0x08}, {0x3602, 0x00},
- {0x3603, 0x40}, {0x3604, 0x02}, {0x3605, 0x00},
- {0x3606, 0x00}, {0x3607, 0x00}, {0x3609, 0x12},
- {0x360a, 0x40}, {0x360c, 0x08}, {0x360f, 0xe5},
- {0x3608, 0x8f}, {0x3611, 0x00}, {0x3613, 0xf7},
- {0x3616, 0x58}, {0x3619, 0x99}, {0x361b, 0x60},
- {0x361c, 0x7a}, {0x361e, 0x79}, {0x361f, 0x02},
- {0x3632, 0x00}, {0x3633, 0x10}, {0x3634, 0x10},
- {0x3635, 0x10}, {0x3636, 0x15}, {0x3646, 0x86},
- {0x364a, 0x0b}, {0x3700, 0x17}, {0x3701, 0x22},
- {0x3703, 0x10}, {0x370a, 0x37}, {0x3705, 0x00},
- {0x3706, 0x63}, {0x3709, 0x3c}, {0x370b, 0x01},
- {0x370c, 0x30}, {0x3710, 0x24}, {0x3711, 0x0c},
- {0x3716, 0x00}, {0x3720, 0x28}, {0x3729, 0x7b},
- {0x372a, 0x84}, {0x372b, 0xbd}, {0x372c, 0xbc},
- {0x372e, 0x52}, {0x373c, 0x0e}, {0x373e, 0x33},
- {0x3743, 0x10}, {0x3744, 0x88}, {0x3745, 0xc0},
- {0x374a, 0x43}, {0x374c, 0x00}, {0x374e, 0x23},
- {0x3751, 0x7b}, {0x3752, 0x84}, {0x3753, 0xbd},
- {0x3754, 0xbc}, {0x3756, 0x52}, {0x375c, 0x00},
- {0x3760, 0x00}, {0x3761, 0x00}, {0x3762, 0x00},
- {0x3763, 0x00}, {0x3764, 0x00}, {0x3767, 0x04},
- {0x3768, 0x04}, {0x3769, 0x08}, {0x376a, 0x08},
- {0x376b, 0x20}, {0x376c, 0x00}, {0x376d, 0x00},
- {0x376e, 0x00}, {0x3773, 0x00}, {0x3774, 0x51},
- {0x3776, 0xbd}, {0x3777, 0xbd}, {0x3781, 0x18},
- {0x3783, 0x25}, {0x3798, 0x1b}, {0x3800, 0x00},
- {0x3801, 0x08}, {0x3802, 0x00}, {0x3803, 0x04},
- {0x3804, 0x0a}, {0x3805, 0x97}, {0x3806, 0x05},
- {0x3807, 0xfb}, {0x3808, 0x0a}, {0x3809, 0x80},
- {0x380a, 0x05}, {0x380b, 0xf0}, {0x380c, 0x0a},
- {0x380d, 0x0e}, {0x380e, 0x06}, {0x380f, 0x12},
- {0x3810, 0x00}, {0x3811, 0x08}, {0x3812, 0x00},
- {0x3813, 0x04}, {0x3814, 0x01}, {0x3815, 0x01},
- {0x3819, 0x01}, {0x3820, 0x00}, {0x3821, 0x06},
- {0x3829, 0x00}, {0x382a, 0x01}, {0x382b, 0x01},
- {0x382d, 0x7f}, {0x3830, 0x04}, {0x3836, 0x01},
- {0x3837, 0x00}, {0x3841, 0x02}, {0x3846, 0x08},
- {0x3847, 0x07}, {0x3d85, 0x36}, {0x3d8c, 0x71},
- {0x3d8d, 0xcb}, {0x3f0a, 0x00}, {0x4000, 0xf1},
- {0x4001, 0x40}, {0x4002, 0x04}, {0x4003, 0x14},
- {0x400e, 0x00}, {0x4011, 0x00}, {0x401a, 0x00},
- {0x401b, 0x00}, {0x401c, 0x00}, {0x401d, 0x00},
- {0x401f, 0x00}, {0x4020, 0x00}, {0x4021, 0x10},
- {0x4022, 0x07}, {0x4023, 0xcf}, {0x4024, 0x09},
- {0x4025, 0x60}, {0x4026, 0x09}, {0x4027, 0x6f},
- {0x4028, 0x00}, {0x4029, 0x02}, {0x402a, 0x06},
- {0x402b, 0x04}, {0x402c, 0x02}, {0x402d, 0x02},
- {0x402e, 0x0e}, {0x402f, 0x04}, {0x4302, 0xff},
- {0x4303, 0xff}, {0x4304, 0x00}, {0x4305, 0x00},
- {0x4306, 0x00}, {0x4308, 0x02}, {0x4500, 0x6c},
- {0x4501, 0xc4}, {0x4502, 0x40}, {0x4503, 0x01},
- {0x4601, 0xa7}, {0x4800, 0x04}, {0x4813, 0x08},
- {0x481f, 0x40}, {0x4829, 0x78}, {0x4837, 0x10},
- {0x4b00, 0x2a}, {0x4b0d, 0x00}, {0x4d00, 0x04},
- {0x4d01, 0x42}, {0x4d02, 0xd1}, {0x4d03, 0x93},
- {0x4d04, 0xf5}, {0x4d05, 0xc1}, {0x5000, 0xf3},
- {0x5001, 0x11}, {0x5004, 0x00}, {0x500a, 0x00},
- {0x500b, 0x00}, {0x5032, 0x00}, {0x5040, 0x00},
- {0x5050, 0x0c}, {0x5500, 0x00}, {0x5501, 0x10},
- {0x5502, 0x01}, {0x5503, 0x0f}, {0x8000, 0x00},
- {0x8001, 0x00}, {0x8002, 0x00}, {0x8003, 0x00},
- {0x8004, 0x00}, {0x8005, 0x00}, {0x8006, 0x00},
- {0x8007, 0x00}, {0x8008, 0x00}, {0x3638, 0x00},
- {REG_NULL, 0x00},
+static const struct cci_reg_sequence ov4689_2688x1520_regs[] = {
+ /* System control*/
+ { CCI_REG8(0x0103), 0x01 }, /* SC_CTRL0103 software_reset = 1 */
+ { CCI_REG8(0x3000), 0x20 }, /* SC_CMMN_PAD_OEN0 FSIN_output_enable = 1 */
+ { CCI_REG8(0x3021), 0x03 }, /*
+ * SC_CMMN_MISC_CTRL fst_stby_ctr = 0,
+ * sleep_no_latch_enable = 0
+ */
+
+ /* AEC PK */
+ { CCI_REG8(0x3503), 0x04 }, /* AEC_MANUAL gain_input_as_sensor_gain_format = 1 */
+
+ /* ADC and analog control*/
+ { CCI_REG8(0x3603), 0x40 },
+ { CCI_REG8(0x3604), 0x02 },
+ { CCI_REG8(0x3609), 0x12 },
+ { CCI_REG8(0x360c), 0x08 },
+ { CCI_REG8(0x360f), 0xe5 },
+ { CCI_REG8(0x3608), 0x8f },
+ { CCI_REG8(0x3611), 0x00 },
+ { CCI_REG8(0x3613), 0xf7 },
+ { CCI_REG8(0x3616), 0x58 },
+ { CCI_REG8(0x3619), 0x99 },
+ { CCI_REG8(0x361b), 0x60 },
+ { CCI_REG8(0x361e), 0x79 },
+ { CCI_REG8(0x3634), 0x10 },
+ { CCI_REG8(0x3635), 0x10 },
+ { CCI_REG8(0x3636), 0x15 },
+ { CCI_REG8(0x3646), 0x86 },
+ { CCI_REG8(0x364a), 0x0b },
+
+ /* Sensor control */
+ { CCI_REG8(0x3700), 0x17 },
+ { CCI_REG8(0x3701), 0x22 },
+ { CCI_REG8(0x3703), 0x10 },
+ { CCI_REG8(0x370a), 0x37 },
+ { CCI_REG8(0x3706), 0x63 },
+ { CCI_REG8(0x3709), 0x3c },
+ { CCI_REG8(0x370c), 0x30 },
+ { CCI_REG8(0x3710), 0x24 },
+ { CCI_REG8(0x3720), 0x28 },
+ { CCI_REG8(0x3729), 0x7b },
+ { CCI_REG8(0x372b), 0xbd },
+ { CCI_REG8(0x372c), 0xbc },
+ { CCI_REG8(0x372e), 0x52 },
+ { CCI_REG8(0x373c), 0x0e },
+ { CCI_REG8(0x373e), 0x33 },
+ { CCI_REG8(0x3743), 0x10 },
+ { CCI_REG8(0x3744), 0x88 },
+ { CCI_REG8(0x3745), 0xc0 },
+ { CCI_REG8(0x374c), 0x00 },
+ { CCI_REG8(0x374e), 0x23 },
+ { CCI_REG8(0x3751), 0x7b },
+ { CCI_REG8(0x3753), 0xbd },
+ { CCI_REG8(0x3754), 0xbc },
+ { CCI_REG8(0x3756), 0x52 },
+ { CCI_REG8(0x376b), 0x20 },
+ { CCI_REG8(0x3774), 0x51 },
+ { CCI_REG8(0x3776), 0xbd },
+ { CCI_REG8(0x3777), 0xbd },
+ { CCI_REG8(0x3781), 0x18 },
+ { CCI_REG8(0x3783), 0x25 },
+ { CCI_REG8(0x3798), 0x1b },
+
+ /* Timing control */
+ { CCI_REG8(0x3819), 0x01 }, /* VSYNC_END_L vsync_end_point[7:0] = 0x01 */
+
+ /* OTP control */
+ { CCI_REG8(0x3d85), 0x36 }, /* OTP_REG85 OTP_power_up_load_setting_enable = 1,
+ * OTP_power_up_load_data_enable = 1,
+ * OTP_bist_select = 1 (compare with zero)
+ */
+ { CCI_REG8(0x3d8c), 0x71 }, /* OTP_SETTING_STT_ADDRESS_H */
+ { CCI_REG8(0x3d8d), 0xcb }, /* OTP_SETTING_STT_ADDRESS_L */
+
+ /* BLC registers*/
+ { CCI_REG8(0x4001), 0x40 }, /* DEBUG_MODE */
+ { CCI_REG8(0x401b), 0x00 }, /* DEBUG_MODE */
+ { CCI_REG8(0x401d), 0x00 }, /* DEBUG_MODE */
+ { CCI_REG8(0x401f), 0x00 }, /* DEBUG_MODE */
+
+ /* ADC sync control */
+ { CCI_REG8(0x4500), 0x6c }, /* ADC_SYNC_CTRL */
+ { CCI_REG8(0x4503), 0x01 }, /* ADC_SYNC_CTRL */
+
+ /* Temperature monitor */
+ { CCI_REG8(0x4d00), 0x04 }, /* TPM_CTRL_00 tmp_slope[15:8] = 0x04 */
+ { CCI_REG8(0x4d01), 0x42 }, /* TPM_CTRL_01 tmp_slope[7:0] = 0x42 */
+ { CCI_REG8(0x4d02), 0xd1 }, /* TPM_CTRL_02 tpm_offset[31:24] = 0xd1 */
+ { CCI_REG8(0x4d03), 0x93 }, /* TPM_CTRL_03 tpm_offset[23:16] = 0x93 */
+ { CCI_REG8(0x4d04), 0xf5 }, /* TPM_CTRL_04 tpm_offset[15:8] = 0xf5 */
+ { CCI_REG8(0x4d05), 0xc1 }, /* TPM_CTRL_05 tpm_offset[7:0] = 0xc1 */
+
+ /* pre-ISP control */
+ { CCI_REG8(0x5050), 0x0c }, /* DEBUG_MODE */
+
+ /* OTP-DPC control */
+ { CCI_REG8(0x5501), 0x10 }, /* OTP_DPC_START_L otp_start_address[7:0] = 0x10 */
+ { CCI_REG8(0x5503), 0x0f }, /* OTP_DPC_END_L otp_end_address[7:0] = 0x0f */
};
static const struct ov4689_mode supported_modes[] = {
@@ -215,16 +261,13 @@ static const struct ov4689_mode supported_modes[] = {
.id = OV4689_MODE_2688_1520,
.width = 2688,
.height = 1520,
- .sensor_width = 2720,
- .sensor_height = 1536,
- .crop_top = 8,
- .crop_left = 16,
- .max_fps = 30,
.exp_def = 1536,
- .hts_def = 4 * 2574,
+ .hts_def = 10296,
+ .hts_min = 3432,
.vts_def = 1554,
.pixel_rate = 480000000,
.reg_list = ov4689_2688x1520_regs,
+ .num_regs = ARRAY_SIZE(ov4689_2688x1520_regs),
},
};
@@ -277,83 +320,6 @@ static const struct ov4689_gain_range ov4689_gain_ranges[] = {
},
};
-/* Write registers up to 4 at a time */
-static int ov4689_write_reg(struct i2c_client *client, u16 reg, u32 len,
- u32 val)
-{
- u32 buf_i, val_i;
- __be32 val_be;
- u8 *val_p;
- u8 buf[6];
-
- if (len > 4)
- return -EINVAL;
-
- buf[0] = reg >> 8;
- buf[1] = reg & 0xff;
-
- val_be = cpu_to_be32(val);
- val_p = (u8 *)&val_be;
- buf_i = 2;
- val_i = 4 - len;
-
- while (val_i < 4)
- buf[buf_i++] = val_p[val_i++];
-
- if (i2c_master_send(client, buf, len + 2) != len + 2)
- return -EIO;
-
- return 0;
-}
-
-static int ov4689_write_array(struct i2c_client *client,
- const struct regval *regs)
-{
- int ret = 0;
- u32 i;
-
- for (i = 0; ret == 0 && regs[i].addr != REG_NULL; i++)
- ret = ov4689_write_reg(client, regs[i].addr,
- OV4689_REG_VALUE_08BIT, regs[i].val);
-
- return ret;
-}
-
-/* Read registers up to 4 at a time */
-static int ov4689_read_reg(struct i2c_client *client, u16 reg, unsigned int len,
- u32 *val)
-{
- __be16 reg_addr_be = cpu_to_be16(reg);
- struct i2c_msg msgs[2];
- __be32 data_be = 0;
- u8 *data_be_p;
- int ret;
-
- if (len > 4 || !len)
- return -EINVAL;
-
- data_be_p = (u8 *)&data_be;
- /* Write register address */
- msgs[0].addr = client->addr;
- msgs[0].flags = 0;
- msgs[0].len = 2;
- msgs[0].buf = (u8 *)&reg_addr_be;
-
- /* Read data from register */
- msgs[1].addr = client->addr;
- msgs[1].flags = I2C_M_RD;
- msgs[1].len = len;
- msgs[1].buf = &data_be_p[4 - len];
-
- ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (ret != ARRAY_SIZE(msgs))
- return -EIO;
-
- *val = be32_to_cpu(data_be);
-
- return 0;
-}
-
static void ov4689_fill_fmt(const struct ov4689_mode *mode,
struct v4l2_mbus_framefmt *fmt)
{
@@ -376,19 +342,6 @@ static int ov4689_set_fmt(struct v4l2_subdev *sd,
return 0;
}
-static int ov4689_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *fmt)
-{
- struct v4l2_mbus_framefmt *mbus_fmt = &fmt->format;
- struct ov4689 *ov4689 = to_ov4689(sd);
-
- /* only one mode supported for now */
- ov4689_fill_fmt(ov4689->cur_mode, mbus_fmt);
-
- return 0;
-}
-
static int ov4689_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
@@ -427,16 +380,14 @@ static int ov4689_enable_test_pattern(struct ov4689 *ov4689, u32 pattern)
else
val = OV4689_TEST_PATTERN_DISABLE;
- return ov4689_write_reg(ov4689->client, OV4689_REG_TEST_PATTERN,
- OV4689_REG_VALUE_08BIT, val);
+ return cci_write(ov4689->regmap, OV4689_REG_TEST_PATTERN,
+ val, NULL);
}
static int ov4689_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_selection *sel)
{
- const struct ov4689_mode *mode = to_ov4689(sd)->cur_mode;
-
if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
return -EINVAL;
@@ -444,63 +395,114 @@ static int ov4689_get_selection(struct v4l2_subdev *sd,
case V4L2_SEL_TGT_CROP_BOUNDS:
sel->r.top = 0;
sel->r.left = 0;
- sel->r.width = mode->sensor_width;
- sel->r.height = mode->sensor_height;
+ sel->r.width = OV4689_PIXEL_ARRAY_WIDTH;
+ sel->r.height = OV4689_PIXEL_ARRAY_HEIGHT;
return 0;
case V4L2_SEL_TGT_CROP:
case V4L2_SEL_TGT_CROP_DEFAULT:
- sel->r.top = mode->crop_top;
- sel->r.left = mode->crop_left;
- sel->r.width = mode->width;
- sel->r.height = mode->height;
+ sel->r.top = OV4689_DUMMY_ROWS;
+ sel->r.left = OV4689_DUMMY_COLUMNS;
+ sel->r.width =
+ OV4689_PIXEL_ARRAY_WIDTH - 2 * OV4689_DUMMY_COLUMNS;
+ sel->r.height =
+ OV4689_PIXEL_ARRAY_HEIGHT - 2 * OV4689_DUMMY_ROWS;
return 0;
}
return -EINVAL;
}
+static int ov4689_setup_timings(struct ov4689 *ov4689)
+{
+ const struct ov4689_mode *mode = ov4689->cur_mode;
+ struct regmap *rm = ov4689->regmap;
+ int ret = 0;
+
+ cci_write(rm, OV4689_REG_H_CROP_START, 8, &ret);
+ cci_write(rm, OV4689_REG_V_CROP_START, 8, &ret);
+ cci_write(rm, OV4689_REG_H_CROP_END, 2711, &ret);
+ cci_write(rm, OV4689_REG_V_CROP_END, 1531, &ret);
+
+ cci_write(rm, OV4689_REG_H_OUTPUT_SIZE, mode->width, &ret);
+ cci_write(rm, OV4689_REG_V_OUTPUT_SIZE, mode->height, &ret);
+
+ cci_write(rm, OV4689_REG_H_WIN_OFF, 8, &ret);
+ cci_write(rm, OV4689_REG_V_WIN_OFF, 4, &ret);
+
+ cci_write(rm, OV4689_REG_VFIFO_CTRL_01, 167, &ret);
+
+ return ret;
+}
+
+static int ov4689_setup_blc_anchors(struct ov4689 *ov4689)
+{
+ struct regmap *rm = ov4689->regmap;
+ int ret = 0;
+
+ cci_write(rm, OV4689_REG_ANCHOR_LEFT_START, 16, &ret);
+ cci_write(rm, OV4689_REG_ANCHOR_LEFT_END, 1999, &ret);
+ cci_write(rm, OV4689_REG_ANCHOR_RIGHT_START, 2400, &ret);
+ cci_write(rm, OV4689_REG_ANCHOR_RIGHT_END, 2415, &ret);
+
+ return ret;
+}
+
static int ov4689_s_stream(struct v4l2_subdev *sd, int on)
{
struct ov4689 *ov4689 = to_ov4689(sd);
- struct i2c_client *client = ov4689->client;
+ struct v4l2_subdev_state *sd_state;
+ struct device *dev = ov4689->dev;
int ret = 0;
- mutex_lock(&ov4689->mutex);
+ sd_state = v4l2_subdev_lock_and_get_active_state(&ov4689->subdev);
if (on) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
goto unlock_and_return;
- ret = ov4689_write_array(ov4689->client,
- ov4689->cur_mode->reg_list);
+ ret = cci_multi_reg_write(ov4689->regmap,
+ ov4689->cur_mode->reg_list,
+ ov4689->cur_mode->num_regs,
+ NULL);
+ if (ret) {
+ pm_runtime_put(dev);
+ goto unlock_and_return;
+ }
+
+ ret = ov4689_setup_timings(ov4689);
if (ret) {
- pm_runtime_put(&client->dev);
+ pm_runtime_put(dev);
+ goto unlock_and_return;
+ }
+
+ ret = ov4689_setup_blc_anchors(ov4689);
+ if (ret) {
+ pm_runtime_put(dev);
goto unlock_and_return;
}
ret = __v4l2_ctrl_handler_setup(&ov4689->ctrl_handler);
if (ret) {
- pm_runtime_put(&client->dev);
+ pm_runtime_put(dev);
goto unlock_and_return;
}
- ret = ov4689_write_reg(ov4689->client, OV4689_REG_CTRL_MODE,
- OV4689_REG_VALUE_08BIT,
- OV4689_MODE_STREAMING);
+ ret = cci_write(ov4689->regmap, OV4689_REG_CTRL_MODE,
+ OV4689_MODE_STREAMING, NULL);
if (ret) {
- pm_runtime_put(&client->dev);
+ pm_runtime_put(dev);
goto unlock_and_return;
}
} else {
- ov4689_write_reg(ov4689->client, OV4689_REG_CTRL_MODE,
- OV4689_REG_VALUE_08BIT,
- OV4689_MODE_SW_STANDBY);
- pm_runtime_put(&client->dev);
+ cci_write(ov4689->regmap, OV4689_REG_CTRL_MODE,
+ OV4689_MODE_SW_STANDBY, NULL);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
}
unlock_and_return:
- mutex_unlock(&ov4689->mutex);
+ v4l2_subdev_unlock_state(sd_state);
return ret;
}
@@ -563,18 +565,13 @@ static int __maybe_unused ov4689_power_off(struct device *dev)
return 0;
}
-static int ov4689_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+static int ov4689_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
- struct ov4689 *ov4689 = to_ov4689(sd);
- struct v4l2_mbus_framefmt *try_fmt;
-
- mutex_lock(&ov4689->mutex);
-
- try_fmt = v4l2_subdev_state_get_format(fh->state, 0);
- /* Initialize try_fmt */
- ov4689_fill_fmt(&supported_modes[OV4689_MODE_2688_1520], try_fmt);
+ struct v4l2_mbus_framefmt *fmt =
+ v4l2_subdev_state_get_format(sd_state, 0);
- mutex_unlock(&ov4689->mutex);
+ ov4689_fill_fmt(&supported_modes[OV4689_MODE_2688_1520], fmt);
return 0;
}
@@ -583,10 +580,6 @@ static const struct dev_pm_ops ov4689_pm_ops = {
SET_RUNTIME_PM_OPS(ov4689_power_off, ov4689_power_on, NULL)
};
-static const struct v4l2_subdev_internal_ops ov4689_internal_ops = {
- .open = ov4689_open,
-};
-
static const struct v4l2_subdev_video_ops ov4689_video_ops = {
.s_stream = ov4689_s_stream,
};
@@ -594,11 +587,15 @@ static const struct v4l2_subdev_video_ops ov4689_video_ops = {
static const struct v4l2_subdev_pad_ops ov4689_pad_ops = {
.enum_mbus_code = ov4689_enum_mbus_code,
.enum_frame_size = ov4689_enum_frame_sizes,
- .get_fmt = ov4689_get_fmt,
+ .get_fmt = v4l2_subdev_get_fmt,
.set_fmt = ov4689_set_fmt,
.get_selection = ov4689_get_selection,
};
+static const struct v4l2_subdev_internal_ops ov4689_internal_ops = {
+ .init_state = ov4689_init_state,
+};
+
static const struct v4l2_subdev_ops ov4689_subdev_ops = {
.video = &ov4689_video_ops,
.pad = &ov4689_pad_ops,
@@ -610,7 +607,6 @@ static const struct v4l2_subdev_ops ov4689_subdev_ops = {
*/
static int ov4689_map_gain(struct ov4689 *ov4689, int logical_gain, int *result)
{
- const struct device *dev = &ov4689->client->dev;
const struct ov4689_gain_range *range;
unsigned int n;
@@ -621,7 +617,8 @@ static int ov4689_map_gain(struct ov4689 *ov4689, int logical_gain, int *result)
}
if (n == ARRAY_SIZE(ov4689_gain_ranges)) {
- dev_warn_ratelimited(dev, "no mapping found for gain %d\n",
+ dev_warn_ratelimited(ov4689->dev,
+ "no mapping found for gain %d\n",
logical_gain);
return -EINVAL;
}
@@ -637,10 +634,11 @@ static int ov4689_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov4689 *ov4689 =
container_of(ctrl->handler, struct ov4689, ctrl_handler);
- struct i2c_client *client = ov4689->client;
- int sensor_gain;
+ struct regmap *regmap = ov4689->regmap;
+ struct device *dev = ov4689->dev;
+ int sensor_gain = 0;
s64 max_expo;
- int ret;
+ int ret = 0;
/* Propagate change of current control to all related controls */
switch (ctrl->id) {
@@ -654,44 +652,58 @@ static int ov4689_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(dev))
return 0;
switch (ctrl->id) {
case V4L2_CID_EXPOSURE:
- /* 4 least significant bits of expsoure are fractional part */
- ret = ov4689_write_reg(ov4689->client, OV4689_REG_EXPOSURE,
- OV4689_REG_VALUE_24BIT, ctrl->val << 4);
+ /* 4 least significant bits of exposure are fractional part */
+ cci_write(regmap, OV4689_REG_EXPOSURE, ctrl->val << 4, &ret);
break;
case V4L2_CID_ANALOGUE_GAIN:
ret = ov4689_map_gain(ov4689, ctrl->val, &sensor_gain);
-
- ret = ret ?:
- ov4689_write_reg(ov4689->client, OV4689_REG_GAIN_H,
- OV4689_REG_VALUE_08BIT,
- (sensor_gain >> OV4689_GAIN_H_SHIFT) &
- OV4689_GAIN_H_MASK);
- ret = ret ?:
- ov4689_write_reg(ov4689->client, OV4689_REG_GAIN_L,
- OV4689_REG_VALUE_08BIT,
- sensor_gain & OV4689_GAIN_L_MASK);
+ cci_write(regmap, OV4689_REG_GAIN, sensor_gain, &ret);
break;
case V4L2_CID_VBLANK:
- ret = ov4689_write_reg(ov4689->client, OV4689_REG_VTS,
- OV4689_REG_VALUE_16BIT,
- ctrl->val + ov4689->cur_mode->height);
+ cci_write(regmap, OV4689_REG_VTS,
+ ctrl->val + ov4689->cur_mode->height, &ret);
break;
case V4L2_CID_TEST_PATTERN:
ret = ov4689_enable_test_pattern(ov4689, ctrl->val);
break;
+ case V4L2_CID_HBLANK:
+ cci_write(regmap, OV4689_REG_HTS,
+ (ctrl->val + ov4689->cur_mode->width) /
+ OV4689_HTS_DIVIDER, &ret);
+ break;
+ case V4L2_CID_VFLIP:
+ cci_update_bits(regmap, OV4689_REG_TIMING_FORMAT1,
+ OV4689_TIMING_FLIP_MASK,
+ ctrl->val ? OV4689_TIMING_FLIP_BOTH : 0, &ret);
+ break;
+ case V4L2_CID_HFLIP:
+ cci_update_bits(regmap, OV4689_REG_TIMING_FORMAT2,
+ OV4689_TIMING_FLIP_MASK,
+ ctrl->val ? 0 : OV4689_TIMING_FLIP_BOTH, &ret);
+ break;
+ case V4L2_CID_DIGITAL_GAIN:
+ cci_write(regmap, OV4689_REG_DIG_GAIN, ctrl->val, &ret);
+ break;
+ case V4L2_CID_RED_BALANCE:
+ cci_write(regmap, OV4689_REG_WB_GAIN_RED, ctrl->val, &ret);
+ break;
+ case V4L2_CID_BLUE_BALANCE:
+ cci_write(regmap, OV4689_REG_WB_GAIN_BLUE, ctrl->val, &ret);
+ break;
default:
- dev_warn(&client->dev, "%s Unhandled id:0x%x, val:0x%x\n",
+ dev_warn(dev, "%s Unhandled id:0x%x, val:0x%x\n",
__func__, ctrl->id, ctrl->val);
ret = -EINVAL;
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
return ret;
}
@@ -707,16 +719,15 @@ static int ov4689_initialize_controls(struct ov4689 *ov4689)
struct v4l2_ctrl_handler *handler;
const struct ov4689_mode *mode;
s64 exposure_max, vblank_def;
+ s64 hblank_def, hblank_min;
struct v4l2_ctrl *ctrl;
- s64 h_blank_def;
int ret;
handler = &ov4689->ctrl_handler;
mode = ov4689->cur_mode;
- ret = v4l2_ctrl_handler_init(handler, 10);
+ ret = v4l2_ctrl_handler_init(handler, 15);
if (ret)
return ret;
- handler->lock = &ov4689->mutex;
ctrl = v4l2_ctrl_new_int_menu(handler, NULL, V4L2_CID_LINK_FREQ, 0, 0,
link_freq_menu_items);
@@ -726,11 +737,11 @@ static int ov4689_initialize_controls(struct ov4689 *ov4689)
v4l2_ctrl_new_std(handler, NULL, V4L2_CID_PIXEL_RATE, 0,
mode->pixel_rate, 1, mode->pixel_rate);
- h_blank_def = mode->hts_def - mode->width;
- ctrl = v4l2_ctrl_new_std(handler, NULL, V4L2_CID_HBLANK, h_blank_def,
- h_blank_def, 1, h_blank_def);
- if (ctrl)
- ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ hblank_def = mode->hts_def - mode->width;
+ hblank_min = mode->hts_min - mode->width;
+ v4l2_ctrl_new_std(handler, &ov4689_ctrl_ops, V4L2_CID_HBLANK,
+ hblank_min, OV4689_HTS_MAX - mode->width,
+ OV4689_HTS_DIVIDER, hblank_def);
vblank_def = mode->vts_def - mode->height;
v4l2_ctrl_new_std(handler, &ov4689_ctrl_ops, V4L2_CID_VBLANK,
@@ -754,10 +765,24 @@ static int ov4689_initialize_controls(struct ov4689 *ov4689)
ARRAY_SIZE(ov4689_test_pattern_menu) - 1,
0, 0, ov4689_test_pattern_menu);
+ v4l2_ctrl_new_std(handler, &ov4689_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(handler, &ov4689_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
+
+ v4l2_ctrl_new_std(handler, &ov4689_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ OV4689_DIG_GAIN_MIN, OV4689_DIG_GAIN_MAX,
+ OV4689_DIG_GAIN_STEP, OV4689_DIG_GAIN_DEFAULT);
+
+ v4l2_ctrl_new_std(handler, &ov4689_ctrl_ops, V4L2_CID_RED_BALANCE,
+ OV4689_WB_GAIN_MIN, OV4689_WB_GAIN_MAX,
+ OV4689_WB_GAIN_STEP, OV4689_WB_GAIN_DEFAULT);
+
+ v4l2_ctrl_new_std(handler, &ov4689_ctrl_ops, V4L2_CID_BLUE_BALANCE,
+ OV4689_WB_GAIN_MIN, OV4689_WB_GAIN_MAX,
+ OV4689_WB_GAIN_STEP, OV4689_WB_GAIN_DEFAULT);
+
if (handler->error) {
ret = handler->error;
- dev_err(&ov4689->client->dev, "Failed to init controls(%d)\n",
- ret);
+ dev_err(ov4689->dev, "Failed to init controls(%d)\n", ret);
goto err_free_handler;
}
@@ -783,19 +808,18 @@ err_free_handler:
static int ov4689_check_sensor_id(struct ov4689 *ov4689,
struct i2c_client *client)
{
- struct device *dev = &ov4689->client->dev;
- u32 id = 0;
+ struct device *dev = ov4689->dev;
+ u64 id = 0;
int ret;
- ret = ov4689_read_reg(client, OV4689_REG_CHIP_ID,
- OV4689_REG_VALUE_16BIT, &id);
+ ret = cci_read(ov4689->regmap, OV4689_REG_CHIP_ID, &id, NULL);
if (ret) {
dev_err(dev, "Cannot read sensor ID\n");
return ret;
}
if (id != CHIP_ID) {
- dev_err(dev, "Unexpected sensor ID %06x, expected %06x\n",
+ dev_err(dev, "Unexpected sensor ID %06llx, expected %06x\n",
id, CHIP_ID);
return -ENODEV;
}
@@ -812,7 +836,7 @@ static int ov4689_configure_regulators(struct ov4689 *ov4689)
for (i = 0; i < ARRAY_SIZE(ov4689_supply_names); i++)
ov4689->supplies[i].supply = ov4689_supply_names[i];
- return devm_regulator_bulk_get(&ov4689->client->dev,
+ return devm_regulator_bulk_get(ov4689->dev,
ARRAY_SIZE(ov4689_supply_names),
ov4689->supplies);
}
@@ -881,7 +905,8 @@ static int ov4689_probe(struct i2c_client *client)
if (!ov4689)
return -ENOMEM;
- ov4689->client = client;
+ ov4689->dev = dev;
+
ov4689->cur_mode = &supported_modes[OV4689_MODE_2688_1520];
ov4689->xvclk = devm_clk_get_optional(dev, NULL);
@@ -905,6 +930,13 @@ static int ov4689_probe(struct i2c_client *client)
return -EINVAL;
}
+ ov4689->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(ov4689->regmap)) {
+ ret = PTR_ERR(ov4689->regmap);
+ dev_err(dev, "failed to initialize CCI: %d\n", ret);
+ return ret;
+ }
+
ov4689->reset_gpio = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(ov4689->reset_gpio)) {
@@ -923,13 +955,15 @@ static int ov4689_probe(struct i2c_client *client)
return dev_err_probe(dev, ret,
"Failed to get power regulators\n");
- mutex_init(&ov4689->mutex);
-
sd = &ov4689->subdev;
v4l2_i2c_subdev_init(sd, client, &ov4689_subdev_ops);
+ sd->internal_ops = &ov4689_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
ret = ov4689_initialize_controls(ov4689);
- if (ret)
- goto err_destroy_mutex;
+ if (ret) {
+ dev_err(dev, "Failed to initialize controls\n");
+ return ret;
+ }
ret = ov4689_power_on(dev);
if (ret)
@@ -939,35 +973,47 @@ static int ov4689_probe(struct i2c_client *client)
if (ret)
goto err_power_off;
- sd->internal_ops = &ov4689_internal_ops;
- sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- ov4689->pad.flags = MEDIA_PAD_FL_SOURCE;
sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ov4689->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&sd->entity, 1, &ov4689->pad);
if (ret < 0)
goto err_power_off;
- ret = v4l2_async_register_subdev_sensor(sd);
+ sd->state_lock = ov4689->ctrl_handler.lock;
+ ret = v4l2_subdev_init_finalize(sd);
if (ret) {
- dev_err(dev, "v4l2 async register subdev failed\n");
+ dev_err(dev, "Could not register v4l2 device\n");
goto err_clean_entity;
}
pm_runtime_set_active(dev);
+ pm_runtime_get_noresume(dev);
pm_runtime_enable(dev);
- pm_runtime_idle(dev);
+ pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_use_autosuspend(dev);
+
+ ret = v4l2_async_register_subdev_sensor(sd);
+ if (ret) {
+ dev_err(dev, "v4l2 async register subdev failed\n");
+ goto err_clean_subdev_pm;
+ }
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
return 0;
+err_clean_subdev_pm:
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ v4l2_subdev_cleanup(sd);
err_clean_entity:
media_entity_cleanup(&sd->entity);
err_power_off:
ov4689_power_off(dev);
err_free_handler:
v4l2_ctrl_handler_free(&ov4689->ctrl_handler);
-err_destroy_mutex:
- mutex_destroy(&ov4689->mutex);
return ret;
}
@@ -979,9 +1025,8 @@ static void ov4689_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
-
+ v4l2_subdev_cleanup(sd);
v4l2_ctrl_handler_free(&ov4689->ctrl_handler);
- mutex_destroy(&ov4689->mutex);
pm_runtime_disable(&client->dev);
if (!pm_runtime_status_suspended(&client->dev))
diff --git a/drivers/media/i2c/rdacm20.c b/drivers/media/i2c/rdacm20.c
index b4647bda8c21..b8bd8354d100 100644
--- a/drivers/media/i2c/rdacm20.c
+++ b/drivers/media/i2c/rdacm20.c
@@ -463,8 +463,8 @@ static int rdacm20_initialize(struct rdacm20_device *dev)
return ret;
/*
- * Ensure that we have a good link configuration before attempting to
- * identify the device.
+ * Ensure that we have a good link configuration before attempting to
+ * identify the device.
*/
ret = max9271_configure_i2c(&dev->serializer,
MAX9271_I2CSLVSH_469NS_234NS |
diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
index f250640729ca..b947a55281f0 100644
--- a/drivers/media/i2c/st-mipid02.c
+++ b/drivers/media/i2c/st-mipid02.c
@@ -326,7 +326,7 @@ static int mipid02_configure_from_rx_speed(struct mipid02_dev *bridge,
}
dev_dbg(&client->dev, "detect link_freq = %lld Hz", link_freq);
- do_div(ui_4, link_freq);
+ ui_4 = div64_u64(ui_4, link_freq);
bridge->r.clk_lane_reg1 |= ui_4 << 2;
return 0;
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 3192a334aaab..0307fee3cce9 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -1521,11 +1521,14 @@ static int tc358743_g_input_status(struct v4l2_subdev *sd, u32 *status)
return 0;
}
-static int tc358743_s_dv_timings(struct v4l2_subdev *sd,
+static int tc358743_s_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_dv_timings *timings)
{
struct tc358743_state *state = to_state(sd);
+ if (pad != 0)
+ return -EINVAL;
+
if (!timings)
return -EINVAL;
@@ -1553,11 +1556,14 @@ static int tc358743_s_dv_timings(struct v4l2_subdev *sd,
return 0;
}
-static int tc358743_g_dv_timings(struct v4l2_subdev *sd,
+static int tc358743_g_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_dv_timings *timings)
{
struct tc358743_state *state = to_state(sd);
+ if (pad != 0)
+ return -EINVAL;
+
*timings = state->timings;
return 0;
@@ -1573,11 +1579,14 @@ static int tc358743_enum_dv_timings(struct v4l2_subdev *sd,
&tc358743_timings_cap, NULL, NULL);
}
-static int tc358743_query_dv_timings(struct v4l2_subdev *sd,
- struct v4l2_dv_timings *timings)
+static int tc358743_query_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_dv_timings *timings)
{
int ret;
+ if (pad != 0)
+ return -EINVAL;
+
ret = tc358743_get_detected_timings(sd, timings);
if (ret)
return ret;
@@ -1822,9 +1831,6 @@ static const struct v4l2_subdev_core_ops tc358743_core_ops = {
static const struct v4l2_subdev_video_ops tc358743_video_ops = {
.g_input_status = tc358743_g_input_status,
- .s_dv_timings = tc358743_s_dv_timings,
- .g_dv_timings = tc358743_g_dv_timings,
- .query_dv_timings = tc358743_query_dv_timings,
.s_stream = tc358743_s_stream,
};
@@ -1834,6 +1840,9 @@ static const struct v4l2_subdev_pad_ops tc358743_pad_ops = {
.get_fmt = tc358743_get_fmt,
.get_edid = tc358743_g_edid,
.set_edid = tc358743_s_edid,
+ .s_dv_timings = tc358743_s_dv_timings,
+ .g_dv_timings = tc358743_g_dv_timings,
+ .query_dv_timings = tc358743_query_dv_timings,
.enum_dv_timings = tc358743_enum_dv_timings,
.dv_timings_cap = tc358743_dv_timings_cap,
.get_mbus_config = tc358743_get_mbus_config,
@@ -2110,7 +2119,7 @@ static int tc358743_probe(struct i2c_client *client)
tc358743_initial_setup(sd);
- tc358743_s_dv_timings(sd, &default_timing);
+ tc358743_s_dv_timings(sd, 0, &default_timing);
tc358743_set_csi_color_space(sd);
diff --git a/drivers/media/i2c/tc358746.c b/drivers/media/i2c/tc358746.c
index d676adc4401b..edf79107adc5 100644
--- a/drivers/media/i2c/tc358746.c
+++ b/drivers/media/i2c/tc358746.c
@@ -844,8 +844,7 @@ static unsigned long tc358746_find_pll_settings(struct tc358746 *tc358746,
continue;
tmp = fout * postdiv;
- do_div(tmp, fin);
- mul = tmp;
+ mul = div64_ul(tmp, fin);
if (mul > 511)
continue;
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index 8e4a0718c4b6..58ce8fec3041 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -1669,8 +1669,8 @@ tda1997x_g_input_status(struct v4l2_subdev *sd, u32 *status)
return 0;
};
-static int tda1997x_s_dv_timings(struct v4l2_subdev *sd,
- struct v4l2_dv_timings *timings)
+static int tda1997x_s_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_dv_timings *timings)
{
struct tda1997x_state *state = to_state(sd);
@@ -1694,7 +1694,7 @@ static int tda1997x_s_dv_timings(struct v4l2_subdev *sd,
return 0;
}
-static int tda1997x_g_dv_timings(struct v4l2_subdev *sd,
+static int tda1997x_g_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_dv_timings *timings)
{
struct tda1997x_state *state = to_state(sd);
@@ -1707,7 +1707,7 @@ static int tda1997x_g_dv_timings(struct v4l2_subdev *sd,
return 0;
}
-static int tda1997x_query_dv_timings(struct v4l2_subdev *sd,
+static int tda1997x_query_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_dv_timings *timings)
{
struct tda1997x_state *state = to_state(sd);
@@ -1724,9 +1724,6 @@ static int tda1997x_query_dv_timings(struct v4l2_subdev *sd,
static const struct v4l2_subdev_video_ops tda1997x_video_ops = {
.g_input_status = tda1997x_g_input_status,
- .s_dv_timings = tda1997x_s_dv_timings,
- .g_dv_timings = tda1997x_g_dv_timings,
- .query_dv_timings = tda1997x_query_dv_timings,
};
@@ -1930,6 +1927,9 @@ static const struct v4l2_subdev_pad_ops tda1997x_pad_ops = {
.set_fmt = tda1997x_set_format,
.get_edid = tda1997x_get_edid,
.set_edid = tda1997x_set_edid,
+ .s_dv_timings = tda1997x_s_dv_timings,
+ .g_dv_timings = tda1997x_g_dv_timings,
+ .query_dv_timings = tda1997x_query_dv_timings,
.dv_timings_cap = tda1997x_get_dv_timings_cap,
.enum_dv_timings = tda1997x_enum_dv_timings,
};
diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c
index ea70c1c13872..49ed83a0ac94 100644
--- a/drivers/media/i2c/ths7303.c
+++ b/drivers/media/i2c/ths7303.c
@@ -193,8 +193,8 @@ static int ths7303_s_stream(struct v4l2_subdev *sd, int enable)
}
/* for setting filter for HD output */
-static int ths7303_s_dv_timings(struct v4l2_subdev *sd,
- struct v4l2_dv_timings *dv_timings)
+static int ths7303_s_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_dv_timings *dv_timings)
{
struct ths7303_state *state = to_state(sd);
@@ -210,7 +210,6 @@ static int ths7303_s_dv_timings(struct v4l2_subdev *sd,
static const struct v4l2_subdev_video_ops ths7303_video_ops = {
.s_stream = ths7303_s_stream,
.s_std_output = ths7303_s_std_output,
- .s_dv_timings = ths7303_s_dv_timings,
};
#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -317,9 +316,14 @@ static const struct v4l2_subdev_core_ops ths7303_core_ops = {
#endif
};
+static const struct v4l2_subdev_pad_ops ths7303_pad_ops = {
+ .s_dv_timings = ths7303_s_dv_timings,
+};
+
static const struct v4l2_subdev_ops ths7303_ops = {
.core = &ths7303_core_ops,
.video = &ths7303_video_ops,
+ .pad = &ths7303_pad_ops,
};
static int ths7303_probe(struct i2c_client *client)
diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c
index 0e0f676cd221..ce0a7f809f19 100644
--- a/drivers/media/i2c/ths8200.c
+++ b/drivers/media/i2c/ths8200.c
@@ -358,13 +358,16 @@ static void ths8200_setup(struct v4l2_subdev *sd, struct v4l2_bt_timings *bt)
bt->hsync, bt->vsync);
}
-static int ths8200_s_dv_timings(struct v4l2_subdev *sd,
+static int ths8200_s_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_dv_timings *timings)
{
struct ths8200_state *state = to_state(sd);
v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+ if (pad != 0)
+ return -EINVAL;
+
if (!v4l2_valid_dv_timings(timings, &ths8200_timings_cap,
NULL, NULL))
return -EINVAL;
@@ -385,13 +388,16 @@ static int ths8200_s_dv_timings(struct v4l2_subdev *sd,
return 0;
}
-static int ths8200_g_dv_timings(struct v4l2_subdev *sd,
+static int ths8200_g_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_dv_timings *timings)
{
struct ths8200_state *state = to_state(sd);
v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+ if (pad != 0)
+ return -EINVAL;
+
*timings = state->dv_timings;
return 0;
@@ -420,11 +426,11 @@ static int ths8200_dv_timings_cap(struct v4l2_subdev *sd,
/* Specific video subsystem operation handlers */
static const struct v4l2_subdev_video_ops ths8200_video_ops = {
.s_stream = ths8200_s_stream,
- .s_dv_timings = ths8200_s_dv_timings,
- .g_dv_timings = ths8200_g_dv_timings,
};
static const struct v4l2_subdev_pad_ops ths8200_pad_ops = {
+ .s_dv_timings = ths8200_s_dv_timings,
+ .g_dv_timings = ths8200_g_dv_timings,
.enum_dv_timings = ths8200_enum_dv_timings,
.dv_timings_cap = ths8200_dv_timings_cap,
};
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index 6a04ffae5343..ea01bd86450e 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -546,13 +546,16 @@ static int tvp7002_write_inittab(struct v4l2_subdev *sd,
return error;
}
-static int tvp7002_s_dv_timings(struct v4l2_subdev *sd,
- struct v4l2_dv_timings *dv_timings)
+static int tvp7002_s_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_dv_timings *dv_timings)
{
struct tvp7002 *device = to_tvp7002(sd);
const struct v4l2_bt_timings *bt = &dv_timings->bt;
int i;
+ if (pad != 0)
+ return -EINVAL;
+
if (dv_timings->type != V4L2_DV_BT_656_1120)
return -EINVAL;
for (i = 0; i < NUM_TIMINGS; i++) {
@@ -566,11 +569,14 @@ static int tvp7002_s_dv_timings(struct v4l2_subdev *sd,
return -EINVAL;
}
-static int tvp7002_g_dv_timings(struct v4l2_subdev *sd,
- struct v4l2_dv_timings *dv_timings)
+static int tvp7002_g_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_dv_timings *dv_timings)
{
struct tvp7002 *device = to_tvp7002(sd);
+ if (pad != 0)
+ return -EINVAL;
+
*dv_timings = device->current_timings->timings;
return 0;
}
@@ -659,12 +665,16 @@ static int tvp7002_query_dv(struct v4l2_subdev *sd, int *index)
return 0;
}
-static int tvp7002_query_dv_timings(struct v4l2_subdev *sd,
- struct v4l2_dv_timings *timings)
+static int tvp7002_query_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_dv_timings *timings)
{
int index;
- int err = tvp7002_query_dv(sd, &index);
+ int err;
+
+ if (pad != 0)
+ return -EINVAL;
+ err = tvp7002_query_dv(sd, &index);
if (err)
return err;
*timings = tvp7002_timings[index].timings;
@@ -861,9 +871,6 @@ static const struct v4l2_subdev_core_ops tvp7002_core_ops = {
/* Specific video subsystem operation handlers */
static const struct v4l2_subdev_video_ops tvp7002_video_ops = {
- .g_dv_timings = tvp7002_g_dv_timings,
- .s_dv_timings = tvp7002_s_dv_timings,
- .query_dv_timings = tvp7002_query_dv_timings,
.s_stream = tvp7002_s_stream,
};
@@ -872,6 +879,9 @@ static const struct v4l2_subdev_pad_ops tvp7002_pad_ops = {
.enum_mbus_code = tvp7002_enum_mbus_code,
.get_fmt = tvp7002_get_pad_format,
.set_fmt = tvp7002_set_pad_format,
+ .g_dv_timings = tvp7002_g_dv_timings,
+ .s_dv_timings = tvp7002_s_dv_timings,
+ .query_dv_timings = tvp7002_query_dv_timings,
.enum_dv_timings = tvp7002_enum_dv_timings,
};
@@ -1001,7 +1011,7 @@ static int tvp7002_probe(struct i2c_client *c)
/* Set registers according to default video mode */
timings = device->current_timings->timings;
- error = tvp7002_s_dv_timings(sd, &timings);
+ error = tvp7002_s_dv_timings(sd, 0, &timings);
#if defined(CONFIG_MEDIA_CONTROLLER)
device->pad.flags = MEDIA_PAD_FL_SOURCE;
diff --git a/drivers/media/mc/mc-devnode.c b/drivers/media/mc/mc-devnode.c
index 7f67825c8757..318e267e798e 100644
--- a/drivers/media/mc/mc-devnode.c
+++ b/drivers/media/mc/mc-devnode.c
@@ -245,15 +245,14 @@ int __must_check media_devnode_register(struct media_device *mdev,
kobject_set_name(&devnode->cdev.kobj, "media%d", devnode->minor);
/* Part 3: Add the media and char device */
+ set_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
ret = cdev_device_add(&devnode->cdev, &devnode->dev);
if (ret < 0) {
+ clear_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
pr_err("%s: cdev_device_add failed\n", __func__);
goto cdev_add_error;
}
- /* Part 4: Activate this minor. The char device can now be used. */
- set_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
-
return 0;
cdev_add_error:
diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
index 0e28b9a7936e..96dd0f6ccd0d 100644
--- a/drivers/media/mc/mc-entity.c
+++ b/drivers/media/mc/mc-entity.c
@@ -619,6 +619,12 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe,
link = list_entry(entry->links, typeof(*link), list);
last_link = media_pipeline_walk_pop(walk);
+ if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) != MEDIA_LNK_FL_DATA_LINK) {
+ dev_dbg(walk->mdev->dev,
+ "media pipeline: skipping link (not data-link)\n");
+ return 0;
+ }
+
dev_dbg(walk->mdev->dev,
"media pipeline: exploring link '%s':%u -> '%s':%u\n",
link->source->entity->name, link->source->index,
diff --git a/drivers/media/mmc/siano/smssdio.c b/drivers/media/mmc/siano/smssdio.c
index 065b572e0272..8199077faf36 100644
--- a/drivers/media/mmc/siano/smssdio.c
+++ b/drivers/media/mmc/siano/smssdio.c
@@ -344,30 +344,7 @@ static struct sdio_driver smssdio_driver = {
.probe = smssdio_probe,
.remove = smssdio_remove,
};
-
-/*******************************************************************/
-/* Module functions */
-/*******************************************************************/
-
-static int __init smssdio_module_init(void)
-{
- int ret = 0;
-
- printk(KERN_INFO "smssdio: Siano SMS1xxx SDIO driver\n");
- printk(KERN_INFO "smssdio: Copyright Pierre Ossman\n");
-
- ret = sdio_register_driver(&smssdio_driver);
-
- return ret;
-}
-
-static void __exit smssdio_module_exit(void)
-{
- sdio_unregister_driver(&smssdio_driver);
-}
-
-module_init(smssdio_module_init);
-module_exit(smssdio_module_exit);
+module_sdio_driver(smssdio_driver);
MODULE_DESCRIPTION("Siano SMS1xxx SDIO driver");
MODULE_AUTHOR("Pierre Ossman");
diff --git a/drivers/media/pci/cobalt/cobalt-v4l2.c b/drivers/media/pci/cobalt/cobalt-v4l2.c
index 77ba08ace29f..d4d7b264c965 100644
--- a/drivers/media/pci/cobalt/cobalt-v4l2.c
+++ b/drivers/media/pci/cobalt/cobalt-v4l2.c
@@ -633,7 +633,7 @@ static int cobalt_s_dv_timings(struct file *file, void *priv_fh,
return -EBUSY;
err = v4l2_subdev_call(s->sd,
- video, s_dv_timings, timings);
+ pad, s_dv_timings, 0, timings);
if (!err) {
s->timings = *timings;
s->width = timings->bt.width;
@@ -653,7 +653,7 @@ static int cobalt_g_dv_timings(struct file *file, void *priv_fh,
return 0;
}
return v4l2_subdev_call(s->sd,
- video, g_dv_timings, timings);
+ pad, g_dv_timings, 0, timings);
}
static int cobalt_query_dv_timings(struct file *file, void *priv_fh,
@@ -666,7 +666,7 @@ static int cobalt_query_dv_timings(struct file *file, void *priv_fh,
return 0;
}
return v4l2_subdev_call(s->sd,
- video, query_dv_timings, timings);
+ pad, query_dv_timings, 0, timings);
}
static int cobalt_dv_timings_cap(struct file *file, void *priv_fh,
@@ -1080,7 +1080,7 @@ static int cobalt_g_pixelaspect(struct file *file, void *fh,
if (s->input == 1)
timings = cea1080p60;
else
- err = v4l2_subdev_call(s->sd, video, g_dv_timings, &timings);
+ err = v4l2_subdev_call(s->sd, pad, g_dv_timings, 0, &timings);
if (!err)
*f = v4l2_dv_timings_aspect_ratio(&timings);
return err;
@@ -1099,7 +1099,7 @@ static int cobalt_g_selection(struct file *file, void *fh,
if (s->input == 1)
timings = cea1080p60;
else
- err = v4l2_subdev_call(s->sd, video, g_dv_timings, &timings);
+ err = v4l2_subdev_call(s->sd, pad, g_dv_timings, 0, &timings);
if (err)
return err;
@@ -1243,7 +1243,7 @@ static int cobalt_node_register(struct cobalt *cobalt, int node)
if (s->sd)
vdev->ctrl_handler = s->sd->ctrl_handler;
s->timings = dv1080p60;
- v4l2_subdev_call(s->sd, video, s_dv_timings, &s->timings);
+ v4l2_subdev_call(s->sd, pad, s_dv_timings, 0, &s->timings);
if (!s->is_output && s->sd)
cobalt_enable_input(s);
vdev->ioctl_ops = s->is_dummy ? &cobalt_ioctl_empty_ops :
diff --git a/drivers/media/pci/intel/Kconfig b/drivers/media/pci/intel/Kconfig
index ee4684159d3d..d9fcddce028b 100644
--- a/drivers/media/pci/intel/Kconfig
+++ b/drivers/media/pci/intel/Kconfig
@@ -1,11 +1,13 @@
# SPDX-License-Identifier: GPL-2.0-only
source "drivers/media/pci/intel/ipu3/Kconfig"
+source "drivers/media/pci/intel/ipu6/Kconfig"
source "drivers/media/pci/intel/ivsc/Kconfig"
config IPU_BRIDGE
tristate "Intel IPU Bridge"
- depends on I2C && ACPI
+ depends on ACPI || COMPILE_TEST
+ depends on I2C
help
The IPU bridge is a helper library for Intel IPU drivers to
function on systems shipped with Windows.
diff --git a/drivers/media/pci/intel/Makefile b/drivers/media/pci/intel/Makefile
index f199a97e1d78..3a2cc6567159 100644
--- a/drivers/media/pci/intel/Makefile
+++ b/drivers/media/pci/intel/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_IPU_BRIDGE) += ipu-bridge.o
obj-y += ipu3/
obj-y += ivsc/
+obj-$(CONFIG_VIDEO_INTEL_IPU6) += ipu6/
diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c
index e994db4f4d91..61750cc98d70 100644
--- a/drivers/media/pci/intel/ipu-bridge.c
+++ b/drivers/media/pci/intel/ipu-bridge.c
@@ -15,6 +15,8 @@
#include <media/ipu-bridge.h>
#include <media/v4l2-fwnode.h>
+#define ADEV_DEV(adev) ACPI_PTR(&((adev)->dev))
+
/*
* 92335fcf-3203-4472-af93-7b4453ac29da
*
@@ -87,6 +89,7 @@ static const char * const ipu_vcm_types[] = {
"lc898212axb",
};
+#if IS_ENABLED(CONFIG_ACPI)
/*
* Used to figure out IVSC acpi device by ipu_bridge_get_ivsc_acpi_dev()
* instead of device and driver match to probe IVSC device.
@@ -100,13 +103,13 @@ static const struct acpi_device_id ivsc_acpi_ids[] = {
static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev)
{
- acpi_handle handle = acpi_device_handle(adev);
- struct acpi_device *consumer, *ivsc_adev;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(ivsc_acpi_ids); i++) {
const struct acpi_device_id *acpi_id = &ivsc_acpi_ids[i];
+ struct acpi_device *consumer, *ivsc_adev;
+ acpi_handle handle = acpi_device_handle(adev);
for_each_acpi_dev_match(ivsc_adev, acpi_id->id, NULL, -1)
/* camera sensor depends on IVSC in DSDT if exist */
for_each_acpi_consumer_dev(ivsc_adev, consumer)
@@ -118,6 +121,12 @@ static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev
return NULL;
}
+#else
+static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev)
+{
+ return NULL;
+}
+#endif
static int ipu_bridge_match_ivsc_dev(struct device *dev, const void *adev)
{
@@ -163,7 +172,7 @@ static int ipu_bridge_check_ivsc_dev(struct ipu_sensor *sensor,
csi_dev = ipu_bridge_get_ivsc_csi_dev(adev);
if (!csi_dev) {
acpi_dev_put(adev);
- dev_err(&adev->dev, "Failed to find MEI CSI dev\n");
+ dev_err(ADEV_DEV(adev), "Failed to find MEI CSI dev\n");
return -ENODEV;
}
@@ -182,24 +191,25 @@ static int ipu_bridge_read_acpi_buffer(struct acpi_device *adev, char *id,
acpi_status status;
int ret = 0;
- status = acpi_evaluate_object(adev->handle, id, NULL, &buffer);
+ status = acpi_evaluate_object(ACPI_PTR(adev->handle),
+ id, NULL, &buffer);
if (ACPI_FAILURE(status))
return -ENODEV;
obj = buffer.pointer;
if (!obj) {
- dev_err(&adev->dev, "Couldn't locate ACPI buffer\n");
+ dev_err(ADEV_DEV(adev), "Couldn't locate ACPI buffer\n");
return -ENODEV;
}
if (obj->type != ACPI_TYPE_BUFFER) {
- dev_err(&adev->dev, "Not an ACPI buffer\n");
+ dev_err(ADEV_DEV(adev), "Not an ACPI buffer\n");
ret = -ENODEV;
goto out_free_buff;
}
if (obj->buffer.length > size) {
- dev_err(&adev->dev, "Given buffer is too small\n");
+ dev_err(ADEV_DEV(adev), "Given buffer is too small\n");
ret = -EINVAL;
goto out_free_buff;
}
@@ -220,7 +230,7 @@ static u32 ipu_bridge_parse_rotation(struct acpi_device *adev,
case IPU_SENSOR_ROTATION_INVERTED:
return 180;
default:
- dev_warn(&adev->dev,
+ dev_warn(ADEV_DEV(adev),
"Unknown rotation %d. Assume 0 degree rotation\n",
ssdb->degree);
return 0;
@@ -230,12 +240,14 @@ static u32 ipu_bridge_parse_rotation(struct acpi_device *adev,
static enum v4l2_fwnode_orientation ipu_bridge_parse_orientation(struct acpi_device *adev)
{
enum v4l2_fwnode_orientation orientation;
- struct acpi_pld_info *pld;
- acpi_status status;
+ struct acpi_pld_info *pld = NULL;
+ acpi_status status = AE_ERROR;
+#if IS_ENABLED(CONFIG_ACPI)
status = acpi_get_physical_device_location(adev->handle, &pld);
+#endif
if (ACPI_FAILURE(status)) {
- dev_warn(&adev->dev, "_PLD call failed, using default orientation\n");
+ dev_warn(ADEV_DEV(adev), "_PLD call failed, using default orientation\n");
return V4L2_FWNODE_ORIENTATION_EXTERNAL;
}
@@ -253,7 +265,8 @@ static enum v4l2_fwnode_orientation ipu_bridge_parse_orientation(struct acpi_dev
orientation = V4L2_FWNODE_ORIENTATION_EXTERNAL;
break;
default:
- dev_warn(&adev->dev, "Unknown _PLD panel val %d\n", pld->panel);
+ dev_warn(ADEV_DEV(adev), "Unknown _PLD panel val %d\n",
+ pld->panel);
orientation = V4L2_FWNODE_ORIENTATION_EXTERNAL;
break;
}
@@ -272,12 +285,12 @@ int ipu_bridge_parse_ssdb(struct acpi_device *adev, struct ipu_sensor *sensor)
return ret;
if (ssdb.vcmtype > ARRAY_SIZE(ipu_vcm_types)) {
- dev_warn(&adev->dev, "Unknown VCM type %d\n", ssdb.vcmtype);
+ dev_warn(ADEV_DEV(adev), "Unknown VCM type %d\n", ssdb.vcmtype);
ssdb.vcmtype = 0;
}
if (ssdb.lanes > IPU_MAX_LANES) {
- dev_err(&adev->dev, "Number of lanes in SSDB is invalid\n");
+ dev_err(ADEV_DEV(adev), "Number of lanes in SSDB is invalid\n");
return -EINVAL;
}
@@ -465,8 +478,14 @@ static void ipu_bridge_create_connection_swnodes(struct ipu_bridge *bridge,
sensor->ipu_properties);
if (sensor->csi_dev) {
+ const char *device_hid = "";
+
+#if IS_ENABLED(CONFIG_ACPI)
+ device_hid = acpi_device_hid(sensor->ivsc_adev);
+#endif
+
snprintf(sensor->ivsc_name, sizeof(sensor->ivsc_name), "%s-%u",
- acpi_device_hid(sensor->ivsc_adev), sensor->link);
+ device_hid, sensor->link);
nodes[SWNODE_IVSC_HID] = NODE_SENSOR(sensor->ivsc_name,
sensor->ivsc_properties);
@@ -631,11 +650,15 @@ static int ipu_bridge_connect_sensor(const struct ipu_sensor_config *cfg,
{
struct fwnode_handle *fwnode, *primary;
struct ipu_sensor *sensor;
- struct acpi_device *adev;
+ struct acpi_device *adev = NULL;
int ret;
+#if IS_ENABLED(CONFIG_ACPI)
for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
- if (!adev->status.enabled)
+#else
+ while (true) {
+#endif
+ if (!ACPI_PTR(adev->status.enabled))
continue;
if (bridge->n_sensors >= IPU_MAX_PORTS) {
@@ -671,7 +694,7 @@ static int ipu_bridge_connect_sensor(const struct ipu_sensor_config *cfg,
goto err_free_swnodes;
}
- sensor->adev = acpi_dev_get(adev);
+ sensor->adev = ACPI_PTR(acpi_dev_get(adev));
primary = acpi_fwnode_handle(adev);
primary->secondary = fwnode;
@@ -727,11 +750,16 @@ static int ipu_bridge_ivsc_is_ready(void)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(ipu_supported_sensors); i++) {
+#if IS_ENABLED(CONFIG_ACPI)
const struct ipu_sensor_config *cfg =
&ipu_supported_sensors[i];
for_each_acpi_dev_match(sensor_adev, cfg->hid, NULL, -1) {
- if (!sensor_adev->status.enabled)
+#else
+ while (true) {
+ sensor_adev = NULL;
+#endif
+ if (!ACPI_PTR(sensor_adev->status.enabled))
continue;
adev = ipu_bridge_get_ivsc_acpi_dev(sensor_adev);
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index c42adc5a408d..81ec8630453b 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -4,9 +4,9 @@
*
* Based partially on Intel IPU4 driver written by
* Sakari Ailus <sakari.ailus@linux.intel.com>
- * Samu Onkalo <samu.onkalo@intel.com>
+ * Samu Onkalo
* Jouni Högander <jouni.hogander@intel.com>
- * Jouni Ukkonen <jouni.ukkonen@intel.com>
+ * Jouni Ukkonen
* Antti Laakso <antti.laakso@intel.com>
* et al.
*/
@@ -1752,11 +1752,6 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
v4l2_async_nf_init(&cio2->notifier, &cio2->v4l2_dev);
- /* Register notifier for subdevices we care */
- r = cio2_parse_firmware(cio2);
- if (r)
- goto fail_clean_notifier;
-
r = devm_request_irq(dev, pci_dev->irq, cio2_irq, IRQF_SHARED,
CIO2_NAME, cio2);
if (r) {
@@ -1764,6 +1759,11 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
goto fail_clean_notifier;
}
+ /* Register notifier for subdevices we care */
+ r = cio2_parse_firmware(cio2);
+ if (r)
+ goto fail_clean_notifier;
+
pm_runtime_put_noidle(dev);
pm_runtime_allow(dev);
@@ -1807,16 +1807,10 @@ static int __maybe_unused cio2_runtime_suspend(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
void __iomem *const base = cio2->base;
- u16 pm;
writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C);
dev_dbg(dev, "cio2 runtime suspend.\n");
- pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
- pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
- pm |= CIO2_PMCSR_D3;
- pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
-
return 0;
}
@@ -1825,15 +1819,10 @@ static int __maybe_unused cio2_runtime_resume(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
void __iomem *const base = cio2->base;
- u16 pm;
writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C);
dev_dbg(dev, "cio2 runtime resume.\n");
- pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
- pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
- pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
-
return 0;
}
@@ -2006,10 +1995,10 @@ static struct pci_driver cio2_pci_driver = {
module_pci_driver(cio2_pci_driver);
-MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
+MODULE_AUTHOR("Tuukka Toivonen");
MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
MODULE_AUTHOR("Jian Xu Zheng");
-MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
+MODULE_AUTHOR("Yuning Pu");
MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("IPU3 CIO2 driver");
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.h b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
index d731ce8adbe3..d7cb7dae665b 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.h
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
@@ -320,10 +320,6 @@ struct pci_dev;
#define CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT 0x4
#define CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT 0x570
-#define CIO2_PMCSR_OFFSET 4U
-#define CIO2_PMCSR_D0D3_SHIFT 2U
-#define CIO2_PMCSR_D3 0x3
-
struct cio2_csi2_timing {
s32 clk_termen;
s32 clk_settle;
diff --git a/drivers/media/pci/intel/ipu6/Kconfig b/drivers/media/pci/intel/ipu6/Kconfig
new file mode 100644
index 000000000000..154343080c82
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/Kconfig
@@ -0,0 +1,18 @@
+config VIDEO_INTEL_IPU6
+ tristate "Intel IPU6 driver"
+ depends on ACPI || COMPILE_TEST
+ depends on VIDEO_DEV
+ depends on X86 && X86_64 && HAS_DMA
+ select DMA_OPS
+ select IOMMU_IOVA
+ select VIDEO_V4L2_SUBDEV_API
+ select MEDIA_CONTROLLER
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
+ select IPU_BRIDGE
+ help
+ This is the 6th Gen Intel Image Processing Unit, found in Intel SoCs
+ and used for capturing images and video from camera sensors.
+
+ To compile this driver, say Y here! It contains 2 modules -
+ intel_ipu6 and intel_ipu6_isys.
diff --git a/drivers/media/pci/intel/ipu6/Makefile b/drivers/media/pci/intel/ipu6/Makefile
new file mode 100644
index 000000000000..a821b0a1567f
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+intel-ipu6-y := ipu6.o \
+ ipu6-bus.o \
+ ipu6-dma.o \
+ ipu6-mmu.o \
+ ipu6-buttress.o \
+ ipu6-cpd.o \
+ ipu6-fw-com.o
+
+obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6.o
+
+intel-ipu6-isys-y := ipu6-isys.o \
+ ipu6-isys-csi2.o \
+ ipu6-fw-isys.o \
+ ipu6-isys-video.o \
+ ipu6-isys-queue.o \
+ ipu6-isys-subdev.o \
+ ipu6-isys-mcd-phy.o \
+ ipu6-isys-jsl-phy.o \
+ ipu6-isys-dwc-phy.o
+
+obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6-isys.o
diff --git a/drivers/media/pci/intel/ipu6/ipu6-bus.c b/drivers/media/pci/intel/ipu6/ipu6-bus.c
new file mode 100644
index 000000000000..149ec098cdbf
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-bus.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2024 Intel Corporation
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include "ipu6.h"
+#include "ipu6-bus.h"
+#include "ipu6-buttress.h"
+#include "ipu6-dma.h"
+
+static int bus_pm_runtime_suspend(struct device *dev)
+{
+ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev);
+ int ret;
+
+ ret = pm_generic_runtime_suspend(dev);
+ if (ret)
+ return ret;
+
+ ret = ipu6_buttress_power(dev, adev->ctrl, false);
+ if (!ret)
+ return 0;
+
+ dev_err(dev, "power down failed!\n");
+
+ /* Powering down failed, attempt to resume device now */
+ ret = pm_generic_runtime_resume(dev);
+ if (!ret)
+ return -EBUSY;
+
+ return -EIO;
+}
+
+static int bus_pm_runtime_resume(struct device *dev)
+{
+ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev);
+ int ret;
+
+ ret = ipu6_buttress_power(dev, adev->ctrl, true);
+ if (ret)
+ return ret;
+
+ ret = pm_generic_runtime_resume(dev);
+ if (ret)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ ipu6_buttress_power(dev, adev->ctrl, false);
+
+ return -EBUSY;
+}
+
+static struct dev_pm_domain ipu6_bus_pm_domain = {
+ .ops = {
+ .runtime_suspend = bus_pm_runtime_suspend,
+ .runtime_resume = bus_pm_runtime_resume,
+ },
+};
+
+static DEFINE_MUTEX(ipu6_bus_mutex);
+
+static void ipu6_bus_release(struct device *dev)
+{
+ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev);
+
+ kfree(adev->pdata);
+ kfree(adev);
+}
+
+struct ipu6_bus_device *
+ipu6_bus_initialize_device(struct pci_dev *pdev, struct device *parent,
+ void *pdata, struct ipu6_buttress_ctrl *ctrl,
+ char *name)
+{
+ struct auxiliary_device *auxdev;
+ struct ipu6_bus_device *adev;
+ struct ipu6_device *isp = pci_get_drvdata(pdev);
+ int ret;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return ERR_PTR(-ENOMEM);
+
+ adev->dma_mask = DMA_BIT_MASK(isp->secure_mode ? IPU6_MMU_ADDR_BITS :
+ IPU6_MMU_ADDR_BITS_NON_SECURE);
+ adev->isp = isp;
+ adev->ctrl = ctrl;
+ adev->pdata = pdata;
+ auxdev = &adev->auxdev;
+ auxdev->name = name;
+ auxdev->id = (pci_domain_nr(pdev->bus) << 16) |
+ PCI_DEVID(pdev->bus->number, pdev->devfn);
+
+ auxdev->dev.parent = parent;
+ auxdev->dev.release = ipu6_bus_release;
+ auxdev->dev.dma_ops = &ipu6_dma_ops;
+ auxdev->dev.dma_mask = &adev->dma_mask;
+ auxdev->dev.dma_parms = pdev->dev.dma_parms;
+ auxdev->dev.coherent_dma_mask = adev->dma_mask;
+
+ ret = auxiliary_device_init(auxdev);
+ if (ret < 0) {
+ dev_err(&isp->pdev->dev, "auxiliary device init failed (%d)\n",
+ ret);
+ kfree(adev);
+ return ERR_PTR(ret);
+ }
+
+ dev_pm_domain_set(&auxdev->dev, &ipu6_bus_pm_domain);
+
+ pm_runtime_forbid(&adev->auxdev.dev);
+ pm_runtime_enable(&adev->auxdev.dev);
+
+ return adev;
+}
+
+int ipu6_bus_add_device(struct ipu6_bus_device *adev)
+{
+ struct auxiliary_device *auxdev = &adev->auxdev;
+ int ret;
+
+ ret = auxiliary_device_add(auxdev);
+ if (ret) {
+ auxiliary_device_uninit(auxdev);
+ return ret;
+ }
+
+ mutex_lock(&ipu6_bus_mutex);
+ list_add(&adev->list, &adev->isp->devices);
+ mutex_unlock(&ipu6_bus_mutex);
+
+ pm_runtime_allow(&auxdev->dev);
+
+ return 0;
+}
+
+void ipu6_bus_del_devices(struct pci_dev *pdev)
+{
+ struct ipu6_device *isp = pci_get_drvdata(pdev);
+ struct ipu6_bus_device *adev, *save;
+
+ mutex_lock(&ipu6_bus_mutex);
+
+ list_for_each_entry_safe(adev, save, &isp->devices, list) {
+ pm_runtime_disable(&adev->auxdev.dev);
+ list_del(&adev->list);
+ auxiliary_device_delete(&adev->auxdev);
+ auxiliary_device_uninit(&adev->auxdev);
+ }
+
+ mutex_unlock(&ipu6_bus_mutex);
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-bus.h b/drivers/media/pci/intel/ipu6/ipu6-bus.h
new file mode 100644
index 000000000000..b26c6aee1621
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-bus.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013 - 2024 Intel Corporation */
+
+#ifndef IPU6_BUS_H
+#define IPU6_BUS_H
+
+#include <linux/auxiliary_bus.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/irqreturn.h>
+#include <linux/list.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+struct firmware;
+struct pci_dev;
+
+#define IPU6_BUS_NAME IPU6_NAME "-bus"
+
+struct ipu6_buttress_ctrl;
+
+struct ipu6_bus_device {
+ struct auxiliary_device auxdev;
+ struct auxiliary_driver *auxdrv;
+ const struct ipu6_auxdrv_data *auxdrv_data;
+ struct list_head list;
+ void *pdata;
+ struct ipu6_mmu *mmu;
+ struct ipu6_device *isp;
+ struct ipu6_buttress_ctrl *ctrl;
+ u64 dma_mask;
+ const struct firmware *fw;
+ struct sg_table fw_sgt;
+ u64 *pkg_dir;
+ dma_addr_t pkg_dir_dma_addr;
+ unsigned int pkg_dir_size;
+};
+
+struct ipu6_auxdrv_data {
+ irqreturn_t (*isr)(struct ipu6_bus_device *adev);
+ irqreturn_t (*isr_threaded)(struct ipu6_bus_device *adev);
+ bool wake_isr_thread;
+};
+
+#define to_ipu6_bus_device(_dev) \
+ container_of(to_auxiliary_dev(_dev), struct ipu6_bus_device, auxdev)
+#define auxdev_to_adev(_auxdev) \
+ container_of(_auxdev, struct ipu6_bus_device, auxdev)
+#define ipu6_bus_get_drvdata(adev) dev_get_drvdata(&(adev)->auxdev.dev)
+
+struct ipu6_bus_device *
+ipu6_bus_initialize_device(struct pci_dev *pdev, struct device *parent,
+ void *pdata, struct ipu6_buttress_ctrl *ctrl,
+ char *name);
+int ipu6_bus_add_device(struct ipu6_bus_device *adev);
+void ipu6_bus_del_devices(struct pci_dev *pdev);
+
+#endif
diff --git a/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/drivers/media/pci/intel/ipu6/ipu6-buttress.c
new file mode 100644
index 000000000000..23c537e7ce1e
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-buttress.c
@@ -0,0 +1,917 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013--2024 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/math64.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pfn.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/time64.h>
+
+#include "ipu6.h"
+#include "ipu6-bus.h"
+#include "ipu6-buttress.h"
+#include "ipu6-platform-buttress-regs.h"
+
+#define BOOTLOADER_STATUS_OFFSET 0x15c
+
+#define BOOTLOADER_MAGIC_KEY 0xb00710ad
+
+#define ENTRY BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1
+#define EXIT BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2
+#define QUERY BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE
+
+#define BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX 10
+
+#define BUTTRESS_POWER_TIMEOUT_US (200 * USEC_PER_MSEC)
+
+#define BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US (5 * USEC_PER_SEC)
+#define BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US (10 * USEC_PER_SEC)
+#define BUTTRESS_CSE_FWRESET_TIMEOUT_US (100 * USEC_PER_MSEC)
+
+#define BUTTRESS_IPC_TX_TIMEOUT_MS MSEC_PER_SEC
+#define BUTTRESS_IPC_RX_TIMEOUT_MS MSEC_PER_SEC
+#define BUTTRESS_IPC_VALIDITY_TIMEOUT_US (1 * USEC_PER_SEC)
+#define BUTTRESS_TSC_SYNC_TIMEOUT_US (5 * USEC_PER_MSEC)
+
+#define BUTTRESS_IPC_RESET_RETRY 2000
+#define BUTTRESS_CSE_IPC_RESET_RETRY 4
+#define BUTTRESS_IPC_CMD_SEND_RETRY 1
+
+#define BUTTRESS_MAX_CONSECUTIVE_IRQS 100
+
+static const u32 ipu6_adev_irq_mask[2] = {
+ BUTTRESS_ISR_IS_IRQ,
+ BUTTRESS_ISR_PS_IRQ
+};
+
+int ipu6_buttress_ipc_reset(struct ipu6_device *isp,
+ struct ipu6_buttress_ipc *ipc)
+{
+ unsigned int retries = BUTTRESS_IPC_RESET_RETRY;
+ struct ipu6_buttress *b = &isp->buttress;
+ u32 val = 0, csr_in_clr;
+
+ if (!isp->secure_mode) {
+ dev_dbg(&isp->pdev->dev, "Skip IPC reset for non-secure mode");
+ return 0;
+ }
+
+ mutex_lock(&b->ipc_mutex);
+
+ /* Clear-by-1 CSR (all bits), corresponding internal states. */
+ val = readl(isp->base + ipc->csr_in);
+ writel(val, isp->base + ipc->csr_in);
+
+ /* Set peer CSR bit IPC_PEER_COMP_ACTIONS_RST_PHASE1 */
+ writel(ENTRY, isp->base + ipc->csr_out);
+ /*
+ * Clear-by-1 all CSR bits EXCEPT following
+ * bits:
+ * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1.
+ * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2.
+ * C. Possibly custom bits, depending on
+ * their role.
+ */
+ csr_in_clr = BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ |
+ BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID |
+ BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ | QUERY;
+
+ do {
+ usleep_range(400, 500);
+ val = readl(isp->base + ipc->csr_in);
+ switch (val) {
+ case ENTRY | EXIT:
+ case ENTRY | EXIT | QUERY:
+ /*
+ * 1) Clear-by-1 CSR bits
+ * (IPC_PEER_COMP_ACTIONS_RST_PHASE1,
+ * IPC_PEER_COMP_ACTIONS_RST_PHASE2).
+ * 2) Set peer CSR bit
+ * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE.
+ */
+ writel(ENTRY | EXIT, isp->base + ipc->csr_in);
+ writel(QUERY, isp->base + ipc->csr_out);
+ break;
+ case ENTRY:
+ case ENTRY | QUERY:
+ /*
+ * 1) Clear-by-1 CSR bits
+ * (IPC_PEER_COMP_ACTIONS_RST_PHASE1,
+ * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE).
+ * 2) Set peer CSR bit
+ * IPC_PEER_COMP_ACTIONS_RST_PHASE1.
+ */
+ writel(ENTRY | QUERY, isp->base + ipc->csr_in);
+ writel(ENTRY, isp->base + ipc->csr_out);
+ break;
+ case EXIT:
+ case EXIT | QUERY:
+ /*
+ * Clear-by-1 CSR bit
+ * IPC_PEER_COMP_ACTIONS_RST_PHASE2.
+ * 1) Clear incoming doorbell.
+ * 2) Clear-by-1 all CSR bits EXCEPT following
+ * bits:
+ * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1.
+ * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2.
+ * C. Possibly custom bits, depending on
+ * their role.
+ * 3) Set peer CSR bit
+ * IPC_PEER_COMP_ACTIONS_RST_PHASE2.
+ */
+ writel(EXIT, isp->base + ipc->csr_in);
+ writel(0, isp->base + ipc->db0_in);
+ writel(csr_in_clr, isp->base + ipc->csr_in);
+ writel(EXIT, isp->base + ipc->csr_out);
+
+ /*
+ * Read csr_in again to make sure if RST_PHASE2 is done.
+ * If csr_in is QUERY, it should be handled again.
+ */
+ usleep_range(200, 300);
+ val = readl(isp->base + ipc->csr_in);
+ if (val & QUERY) {
+ dev_dbg(&isp->pdev->dev,
+ "RST_PHASE2 retry csr_in = %x\n", val);
+ break;
+ }
+ mutex_unlock(&b->ipc_mutex);
+ return 0;
+ case QUERY:
+ /*
+ * 1) Clear-by-1 CSR bit
+ * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE.
+ * 2) Set peer CSR bit
+ * IPC_PEER_COMP_ACTIONS_RST_PHASE1
+ */
+ writel(QUERY, isp->base + ipc->csr_in);
+ writel(ENTRY, isp->base + ipc->csr_out);
+ break;
+ default:
+ dev_warn_ratelimited(&isp->pdev->dev,
+ "Unexpected CSR 0x%x\n", val);
+ break;
+ }
+ } while (retries--);
+
+ mutex_unlock(&b->ipc_mutex);
+ dev_err(&isp->pdev->dev, "Timed out while waiting for CSE\n");
+
+ return -ETIMEDOUT;
+}
+
+static void ipu6_buttress_ipc_validity_close(struct ipu6_device *isp,
+ struct ipu6_buttress_ipc *ipc)
+{
+ writel(BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ,
+ isp->base + ipc->csr_out);
+}
+
+static int
+ipu6_buttress_ipc_validity_open(struct ipu6_device *isp,
+ struct ipu6_buttress_ipc *ipc)
+{
+ unsigned int mask = BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID;
+ void __iomem *addr;
+ int ret;
+ u32 val;
+
+ writel(BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ,
+ isp->base + ipc->csr_out);
+
+ addr = isp->base + ipc->csr_in;
+ ret = readl_poll_timeout(addr, val, val & mask, 200,
+ BUTTRESS_IPC_VALIDITY_TIMEOUT_US);
+ if (ret) {
+ dev_err(&isp->pdev->dev, "CSE validity timeout 0x%x\n", val);
+ ipu6_buttress_ipc_validity_close(isp, ipc);
+ }
+
+ return ret;
+}
+
+static void ipu6_buttress_ipc_recv(struct ipu6_device *isp,
+ struct ipu6_buttress_ipc *ipc, u32 *ipc_msg)
+{
+ if (ipc_msg)
+ *ipc_msg = readl(isp->base + ipc->data0_in);
+ writel(0, isp->base + ipc->db0_in);
+}
+
+static int ipu6_buttress_ipc_send_bulk(struct ipu6_device *isp,
+ enum ipu6_buttress_ipc_domain ipc_domain,
+ struct ipu6_ipc_buttress_bulk_msg *msgs,
+ u32 size)
+{
+ unsigned long tx_timeout_jiffies, rx_timeout_jiffies;
+ unsigned int i, retry = BUTTRESS_IPC_CMD_SEND_RETRY;
+ struct ipu6_buttress *b = &isp->buttress;
+ struct ipu6_buttress_ipc *ipc;
+ u32 val;
+ int ret;
+ int tout;
+
+ ipc = ipc_domain == IPU6_BUTTRESS_IPC_CSE ? &b->cse : &b->ish;
+
+ mutex_lock(&b->ipc_mutex);
+
+ ret = ipu6_buttress_ipc_validity_open(isp, ipc);
+ if (ret) {
+ dev_err(&isp->pdev->dev, "IPC validity open failed\n");
+ goto out;
+ }
+
+ tx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_TX_TIMEOUT_MS);
+ rx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_RX_TIMEOUT_MS);
+
+ for (i = 0; i < size; i++) {
+ reinit_completion(&ipc->send_complete);
+ if (msgs[i].require_resp)
+ reinit_completion(&ipc->recv_complete);
+
+ dev_dbg(&isp->pdev->dev, "bulk IPC command: 0x%x\n",
+ msgs[i].cmd);
+ writel(msgs[i].cmd, isp->base + ipc->data0_out);
+ val = BUTTRESS_IU2CSEDB0_BUSY | msgs[i].cmd_size;
+ writel(val, isp->base + ipc->db0_out);
+
+ tout = wait_for_completion_timeout(&ipc->send_complete,
+ tx_timeout_jiffies);
+ if (!tout) {
+ dev_err(&isp->pdev->dev, "send IPC response timeout\n");
+ if (!retry--) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ /* Try again if CSE is not responding on first try */
+ writel(0, isp->base + ipc->db0_out);
+ i--;
+ continue;
+ }
+
+ retry = BUTTRESS_IPC_CMD_SEND_RETRY;
+
+ if (!msgs[i].require_resp)
+ continue;
+
+ tout = wait_for_completion_timeout(&ipc->recv_complete,
+ rx_timeout_jiffies);
+ if (!tout) {
+ dev_err(&isp->pdev->dev, "recv IPC response timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (ipc->nack_mask &&
+ (ipc->recv_data & ipc->nack_mask) == ipc->nack) {
+ dev_err(&isp->pdev->dev,
+ "IPC NACK for cmd 0x%x\n", msgs[i].cmd);
+ ret = -EIO;
+ goto out;
+ }
+
+ if (ipc->recv_data != msgs[i].expected_resp) {
+ dev_err(&isp->pdev->dev,
+ "expected resp: 0x%x, IPC response: 0x%x ",
+ msgs[i].expected_resp, ipc->recv_data);
+ ret = -EIO;
+ goto out;
+ }
+ }
+
+ dev_dbg(&isp->pdev->dev, "bulk IPC commands done\n");
+
+out:
+ ipu6_buttress_ipc_validity_close(isp, ipc);
+ mutex_unlock(&b->ipc_mutex);
+ return ret;
+}
+
+static int
+ipu6_buttress_ipc_send(struct ipu6_device *isp,
+ enum ipu6_buttress_ipc_domain ipc_domain,
+ u32 ipc_msg, u32 size, bool require_resp,
+ u32 expected_resp)
+{
+ struct ipu6_ipc_buttress_bulk_msg msg = {
+ .cmd = ipc_msg,
+ .cmd_size = size,
+ .require_resp = require_resp,
+ .expected_resp = expected_resp,
+ };
+
+ return ipu6_buttress_ipc_send_bulk(isp, ipc_domain, &msg, 1);
+}
+
+static irqreturn_t ipu6_buttress_call_isr(struct ipu6_bus_device *adev)
+{
+ irqreturn_t ret = IRQ_WAKE_THREAD;
+
+ if (!adev || !adev->auxdrv || !adev->auxdrv_data)
+ return IRQ_NONE;
+
+ if (adev->auxdrv_data->isr)
+ ret = adev->auxdrv_data->isr(adev);
+
+ if (ret == IRQ_WAKE_THREAD && !adev->auxdrv_data->isr_threaded)
+ ret = IRQ_NONE;
+
+ return ret;
+}
+
+irqreturn_t ipu6_buttress_isr(int irq, void *isp_ptr)
+{
+ struct ipu6_device *isp = isp_ptr;
+ struct ipu6_bus_device *adev[] = { isp->isys, isp->psys };
+ struct ipu6_buttress *b = &isp->buttress;
+ u32 reg_irq_sts = BUTTRESS_REG_ISR_STATUS;
+ irqreturn_t ret = IRQ_NONE;
+ u32 disable_irqs = 0;
+ u32 irq_status;
+ u32 i, count = 0;
+
+ pm_runtime_get_noresume(&isp->pdev->dev);
+
+ irq_status = readl(isp->base + reg_irq_sts);
+ if (!irq_status) {
+ pm_runtime_put_noidle(&isp->pdev->dev);
+ return IRQ_NONE;
+ }
+
+ do {
+ writel(irq_status, isp->base + BUTTRESS_REG_ISR_CLEAR);
+
+ for (i = 0; i < ARRAY_SIZE(ipu6_adev_irq_mask); i++) {
+ irqreturn_t r = ipu6_buttress_call_isr(adev[i]);
+
+ if (!(irq_status & ipu6_adev_irq_mask[i]))
+ continue;
+
+ if (r == IRQ_WAKE_THREAD) {
+ ret = IRQ_WAKE_THREAD;
+ disable_irqs |= ipu6_adev_irq_mask[i];
+ } else if (ret == IRQ_NONE && r == IRQ_HANDLED) {
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ if ((irq_status & BUTTRESS_EVENT) && ret == IRQ_NONE)
+ ret = IRQ_HANDLED;
+
+ if (irq_status & BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING) {
+ dev_dbg(&isp->pdev->dev,
+ "BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING\n");
+ ipu6_buttress_ipc_recv(isp, &b->cse, &b->cse.recv_data);
+ complete(&b->cse.recv_complete);
+ }
+
+ if (irq_status & BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING) {
+ dev_dbg(&isp->pdev->dev,
+ "BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING\n");
+ ipu6_buttress_ipc_recv(isp, &b->ish, &b->ish.recv_data);
+ complete(&b->ish.recv_complete);
+ }
+
+ if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE) {
+ dev_dbg(&isp->pdev->dev,
+ "BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n");
+ complete(&b->cse.send_complete);
+ }
+
+ if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH) {
+ dev_dbg(&isp->pdev->dev,
+ "BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n");
+ complete(&b->ish.send_complete);
+ }
+
+ if (irq_status & BUTTRESS_ISR_SAI_VIOLATION &&
+ ipu6_buttress_get_secure_mode(isp))
+ dev_err(&isp->pdev->dev,
+ "BUTTRESS_ISR_SAI_VIOLATION\n");
+
+ if (irq_status & (BUTTRESS_ISR_IS_FATAL_MEM_ERR |
+ BUTTRESS_ISR_PS_FATAL_MEM_ERR))
+ dev_err(&isp->pdev->dev,
+ "BUTTRESS_ISR_FATAL_MEM_ERR\n");
+
+ if (irq_status & BUTTRESS_ISR_UFI_ERROR)
+ dev_err(&isp->pdev->dev, "BUTTRESS_ISR_UFI_ERROR\n");
+
+ if (++count == BUTTRESS_MAX_CONSECUTIVE_IRQS) {
+ dev_err(&isp->pdev->dev, "too many consecutive IRQs\n");
+ ret = IRQ_NONE;
+ break;
+ }
+
+ irq_status = readl(isp->base + reg_irq_sts);
+ } while (irq_status);
+
+ if (disable_irqs)
+ writel(BUTTRESS_IRQS & ~disable_irqs,
+ isp->base + BUTTRESS_REG_ISR_ENABLE);
+
+ pm_runtime_put(&isp->pdev->dev);
+
+ return ret;
+}
+
+irqreturn_t ipu6_buttress_isr_threaded(int irq, void *isp_ptr)
+{
+ struct ipu6_device *isp = isp_ptr;
+ struct ipu6_bus_device *adev[] = { isp->isys, isp->psys };
+ const struct ipu6_auxdrv_data *drv_data = NULL;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ipu6_adev_irq_mask) && adev[i]; i++) {
+ drv_data = adev[i]->auxdrv_data;
+ if (!drv_data)
+ continue;
+
+ if (drv_data->wake_isr_thread &&
+ drv_data->isr_threaded(adev[i]) == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE);
+
+ return ret;
+}
+
+int ipu6_buttress_power(struct device *dev, struct ipu6_buttress_ctrl *ctrl,
+ bool on)
+{
+ struct ipu6_device *isp = to_ipu6_bus_device(dev)->isp;
+ u32 pwr_sts, val;
+ int ret;
+
+ if (!ctrl)
+ return 0;
+
+ mutex_lock(&isp->buttress.power_mutex);
+
+ if (!on) {
+ val = 0;
+ pwr_sts = ctrl->pwr_sts_off << ctrl->pwr_sts_shift;
+ } else {
+ val = BUTTRESS_FREQ_CTL_START |
+ FIELD_PREP(BUTTRESS_FREQ_CTL_RATIO_MASK,
+ ctrl->ratio) |
+ FIELD_PREP(BUTTRESS_FREQ_CTL_QOS_FLOOR_MASK,
+ ctrl->qos_floor) |
+ BUTTRESS_FREQ_CTL_ICCMAX_LEVEL;
+
+ pwr_sts = ctrl->pwr_sts_on << ctrl->pwr_sts_shift;
+ }
+
+ writel(val, isp->base + ctrl->freq_ctl);
+
+ ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATE,
+ val, (val & ctrl->pwr_sts_mask) == pwr_sts,
+ 100, BUTTRESS_POWER_TIMEOUT_US);
+ if (ret)
+ dev_err(&isp->pdev->dev,
+ "Change power status timeout with 0x%x\n", val);
+
+ ctrl->started = !ret && on;
+
+ mutex_unlock(&isp->buttress.power_mutex);
+
+ return ret;
+}
+
+bool ipu6_buttress_get_secure_mode(struct ipu6_device *isp)
+{
+ u32 val;
+
+ val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL);
+
+ return val & BUTTRESS_SECURITY_CTL_FW_SECURE_MODE;
+}
+
+bool ipu6_buttress_auth_done(struct ipu6_device *isp)
+{
+ u32 val;
+
+ if (!isp->secure_mode)
+ return true;
+
+ val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL);
+ val = FIELD_GET(BUTTRESS_SECURITY_CTL_FW_SETUP_MASK, val);
+
+ return val == BUTTRESS_SECURITY_CTL_AUTH_DONE;
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_buttress_auth_done, INTEL_IPU6);
+
+int ipu6_buttress_reset_authentication(struct ipu6_device *isp)
+{
+ int ret;
+ u32 val;
+
+ if (!isp->secure_mode) {
+ dev_dbg(&isp->pdev->dev, "Skip auth for non-secure mode\n");
+ return 0;
+ }
+
+ writel(BUTTRESS_FW_RESET_CTL_START, isp->base +
+ BUTTRESS_REG_FW_RESET_CTL);
+
+ ret = readl_poll_timeout(isp->base + BUTTRESS_REG_FW_RESET_CTL, val,
+ val & BUTTRESS_FW_RESET_CTL_DONE, 500,
+ BUTTRESS_CSE_FWRESET_TIMEOUT_US);
+ if (ret) {
+ dev_err(&isp->pdev->dev,
+ "Time out while resetting authentication state\n");
+ return ret;
+ }
+
+ dev_dbg(&isp->pdev->dev, "FW reset for authentication done\n");
+ writel(0, isp->base + BUTTRESS_REG_FW_RESET_CTL);
+ /* leave some time for HW restore */
+ usleep_range(800, 1000);
+
+ return 0;
+}
+
+int ipu6_buttress_map_fw_image(struct ipu6_bus_device *sys,
+ const struct firmware *fw, struct sg_table *sgt)
+{
+ bool is_vmalloc = is_vmalloc_addr(fw->data);
+ struct page **pages;
+ const void *addr;
+ unsigned long n_pages;
+ unsigned int i;
+ int ret;
+
+ if (!is_vmalloc && !virt_addr_valid(fw->data))
+ return -EDOM;
+
+ n_pages = PHYS_PFN(PAGE_ALIGN(fw->size));
+
+ pages = kmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ addr = fw->data;
+ for (i = 0; i < n_pages; i++) {
+ struct page *p = is_vmalloc ?
+ vmalloc_to_page(addr) : virt_to_page(addr);
+
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pages[i] = p;
+ addr += PAGE_SIZE;
+ }
+
+ ret = sg_alloc_table_from_pages(sgt, pages, n_pages, 0, fw->size,
+ GFP_KERNEL);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = dma_map_sgtable(&sys->auxdev.dev, sgt, DMA_TO_DEVICE, 0);
+ if (ret < 0) {
+ ret = -ENOMEM;
+ sg_free_table(sgt);
+ goto out;
+ }
+
+ dma_sync_sgtable_for_device(&sys->auxdev.dev, sgt, DMA_TO_DEVICE);
+
+out:
+ kfree(pages);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_buttress_map_fw_image, INTEL_IPU6);
+
+void ipu6_buttress_unmap_fw_image(struct ipu6_bus_device *sys,
+ struct sg_table *sgt)
+{
+ dma_unmap_sgtable(&sys->auxdev.dev, sgt, DMA_TO_DEVICE, 0);
+ sg_free_table(sgt);
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_buttress_unmap_fw_image, INTEL_IPU6);
+
+int ipu6_buttress_authenticate(struct ipu6_device *isp)
+{
+ struct ipu6_buttress *b = &isp->buttress;
+ struct ipu6_psys_pdata *psys_pdata;
+ u32 data, mask, done, fail;
+ int ret;
+
+ if (!isp->secure_mode) {
+ dev_dbg(&isp->pdev->dev, "Skip auth for non-secure mode\n");
+ return 0;
+ }
+
+ psys_pdata = isp->psys->pdata;
+
+ mutex_lock(&b->auth_mutex);
+
+ if (ipu6_buttress_auth_done(isp)) {
+ ret = 0;
+ goto out_unlock;
+ }
+
+ /*
+ * Write address of FIT table to FW_SOURCE register
+ * Let's use fw address. I.e. not using FIT table yet
+ */
+ data = lower_32_bits(isp->psys->pkg_dir_dma_addr);
+ writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_LO);
+
+ data = upper_32_bits(isp->psys->pkg_dir_dma_addr);
+ writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_HI);
+
+ /*
+ * Write boot_load into IU2CSEDATA0
+ * Write sizeof(boot_load) | 0x2 << CLIENT_ID to
+ * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as
+ */
+ dev_info(&isp->pdev->dev, "Sending BOOT_LOAD to CSE\n");
+
+ ret = ipu6_buttress_ipc_send(isp, IPU6_BUTTRESS_IPC_CSE,
+ BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD,
+ 1, true,
+ BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE);
+ if (ret) {
+ dev_err(&isp->pdev->dev, "CSE boot_load failed\n");
+ goto out_unlock;
+ }
+
+ mask = BUTTRESS_SECURITY_CTL_FW_SETUP_MASK;
+ done = BUTTRESS_SECURITY_CTL_FW_SETUP_DONE;
+ fail = BUTTRESS_SECURITY_CTL_AUTH_FAILED;
+ ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data,
+ ((data & mask) == done ||
+ (data & mask) == fail), 500,
+ BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US);
+ if (ret) {
+ dev_err(&isp->pdev->dev, "CSE boot_load timeout\n");
+ goto out_unlock;
+ }
+
+ if ((data & mask) == fail) {
+ dev_err(&isp->pdev->dev, "CSE auth failed\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = readl_poll_timeout(psys_pdata->base + BOOTLOADER_STATUS_OFFSET,
+ data, data == BOOTLOADER_MAGIC_KEY, 500,
+ BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US);
+ if (ret) {
+ dev_err(&isp->pdev->dev, "Unexpected magic number 0x%x\n",
+ data);
+ goto out_unlock;
+ }
+
+ /*
+ * Write authenticate_run into IU2CSEDATA0
+ * Write sizeof(boot_load) | 0x2 << CLIENT_ID to
+ * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as
+ */
+ dev_info(&isp->pdev->dev, "Sending AUTHENTICATE_RUN to CSE\n");
+ ret = ipu6_buttress_ipc_send(isp, IPU6_BUTTRESS_IPC_CSE,
+ BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN,
+ 1, true,
+ BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE);
+ if (ret) {
+ dev_err(&isp->pdev->dev, "CSE authenticate_run failed\n");
+ goto out_unlock;
+ }
+
+ done = BUTTRESS_SECURITY_CTL_AUTH_DONE;
+ ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data,
+ ((data & mask) == done ||
+ (data & mask) == fail), 500,
+ BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US);
+ if (ret) {
+ dev_err(&isp->pdev->dev, "CSE authenticate timeout\n");
+ goto out_unlock;
+ }
+
+ if ((data & mask) == fail) {
+ dev_err(&isp->pdev->dev, "CSE boot_load failed\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ dev_info(&isp->pdev->dev, "CSE authenticate_run done\n");
+
+out_unlock:
+ mutex_unlock(&b->auth_mutex);
+
+ return ret;
+}
+
+static int ipu6_buttress_send_tsc_request(struct ipu6_device *isp)
+{
+ u32 val, mask, done;
+ int ret;
+
+ mask = BUTTRESS_PWR_STATE_HH_STATUS_MASK;
+
+ writel(BUTTRESS_FABRIC_CMD_START_TSC_SYNC,
+ isp->base + BUTTRESS_REG_FABRIC_CMD);
+
+ val = readl(isp->base + BUTTRESS_REG_PWR_STATE);
+ val = FIELD_GET(mask, val);
+ if (val == BUTTRESS_PWR_STATE_HH_STATE_ERR) {
+ dev_err(&isp->pdev->dev, "Start tsc sync failed\n");
+ return -EINVAL;
+ }
+
+ done = BUTTRESS_PWR_STATE_HH_STATE_DONE;
+ ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATE, val,
+ FIELD_GET(mask, val) == done, 500,
+ BUTTRESS_TSC_SYNC_TIMEOUT_US);
+ if (ret)
+ dev_err(&isp->pdev->dev, "Start tsc sync timeout\n");
+
+ return ret;
+}
+
+int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp)
+{
+ unsigned int i;
+
+ for (i = 0; i < BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX; i++) {
+ u32 val;
+ int ret;
+
+ ret = ipu6_buttress_send_tsc_request(isp);
+ if (ret != -ETIMEDOUT)
+ return ret;
+
+ val = readl(isp->base + BUTTRESS_REG_TSW_CTL);
+ val = val | BUTTRESS_TSW_CTL_SOFT_RESET;
+ writel(val, isp->base + BUTTRESS_REG_TSW_CTL);
+ val = val & ~BUTTRESS_TSW_CTL_SOFT_RESET;
+ writel(val, isp->base + BUTTRESS_REG_TSW_CTL);
+ }
+
+ dev_err(&isp->pdev->dev, "TSC sync failed (timeout)\n");
+
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_buttress_start_tsc_sync, INTEL_IPU6);
+
+void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val)
+{
+ u32 tsc_hi_1, tsc_hi_2, tsc_lo;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ tsc_hi_1 = readl(isp->base + BUTTRESS_REG_TSC_HI);
+ tsc_lo = readl(isp->base + BUTTRESS_REG_TSC_LO);
+ tsc_hi_2 = readl(isp->base + BUTTRESS_REG_TSC_HI);
+ if (tsc_hi_1 == tsc_hi_2) {
+ *val = (u64)tsc_hi_1 << 32 | tsc_lo;
+ } else {
+ /* Check if TSC has rolled over */
+ if (tsc_lo & BIT(31))
+ *val = (u64)tsc_hi_1 << 32 | tsc_lo;
+ else
+ *val = (u64)tsc_hi_2 << 32 | tsc_lo;
+ }
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_buttress_tsc_read, INTEL_IPU6);
+
+u64 ipu6_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu6_device *isp)
+{
+ u64 ns = ticks * 10000;
+
+ /*
+ * converting TSC tick count to ns is calculated by:
+ * Example (TSC clock frequency is 19.2MHz):
+ * ns = ticks * 1000 000 000 / 19.2Mhz
+ * = ticks * 1000 000 000 / 19200000Hz
+ * = ticks * 10000 / 192 ns
+ */
+ return div_u64(ns, isp->buttress.ref_clk);
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_buttress_tsc_ticks_to_ns, INTEL_IPU6);
+
+void ipu6_buttress_restore(struct ipu6_device *isp)
+{
+ struct ipu6_buttress *b = &isp->buttress;
+
+ writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR);
+ writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE);
+ writel(b->wdt_cached_value, isp->base + BUTTRESS_REG_WDT);
+}
+
+int ipu6_buttress_init(struct ipu6_device *isp)
+{
+ int ret, ipc_reset_retry = BUTTRESS_CSE_IPC_RESET_RETRY;
+ struct ipu6_buttress *b = &isp->buttress;
+ u32 val;
+
+ mutex_init(&b->power_mutex);
+ mutex_init(&b->auth_mutex);
+ mutex_init(&b->cons_mutex);
+ mutex_init(&b->ipc_mutex);
+ init_completion(&b->ish.send_complete);
+ init_completion(&b->cse.send_complete);
+ init_completion(&b->ish.recv_complete);
+ init_completion(&b->cse.recv_complete);
+
+ b->cse.nack = BUTTRESS_CSE2IUDATA0_IPC_NACK;
+ b->cse.nack_mask = BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK;
+ b->cse.csr_in = BUTTRESS_REG_CSE2IUCSR;
+ b->cse.csr_out = BUTTRESS_REG_IU2CSECSR;
+ b->cse.db0_in = BUTTRESS_REG_CSE2IUDB0;
+ b->cse.db0_out = BUTTRESS_REG_IU2CSEDB0;
+ b->cse.data0_in = BUTTRESS_REG_CSE2IUDATA0;
+ b->cse.data0_out = BUTTRESS_REG_IU2CSEDATA0;
+
+ /* no ISH on IPU6 */
+ memset(&b->ish, 0, sizeof(b->ish));
+ INIT_LIST_HEAD(&b->constraints);
+
+ isp->secure_mode = ipu6_buttress_get_secure_mode(isp);
+ dev_info(&isp->pdev->dev, "IPU6 in %s mode touch 0x%x mask 0x%x\n",
+ isp->secure_mode ? "secure" : "non-secure",
+ readl(isp->base + BUTTRESS_REG_SECURITY_TOUCH),
+ readl(isp->base + BUTTRESS_REG_CAMERA_MASK));
+
+ b->wdt_cached_value = readl(isp->base + BUTTRESS_REG_WDT);
+ writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR);
+ writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE);
+
+ /* get ref_clk frequency by reading the indication in btrs control */
+ val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL);
+ val = FIELD_GET(BUTTRESS_REG_BTRS_CTRL_REF_CLK_IND, val);
+
+ switch (val) {
+ case 0x0:
+ b->ref_clk = 240;
+ break;
+ case 0x1:
+ b->ref_clk = 192;
+ break;
+ case 0x2:
+ b->ref_clk = 384;
+ break;
+ default:
+ dev_warn(&isp->pdev->dev,
+ "Unsupported ref clock, use 19.2Mhz by default.\n");
+ b->ref_clk = 192;
+ break;
+ }
+
+ /* Retry couple of times in case of CSE initialization is delayed */
+ do {
+ ret = ipu6_buttress_ipc_reset(isp, &b->cse);
+ if (ret) {
+ dev_warn(&isp->pdev->dev,
+ "IPC reset protocol failed, retrying\n");
+ } else {
+ dev_dbg(&isp->pdev->dev, "IPC reset done\n");
+ return 0;
+ }
+ } while (ipc_reset_retry--);
+
+ dev_err(&isp->pdev->dev, "IPC reset protocol failed\n");
+
+ mutex_destroy(&b->power_mutex);
+ mutex_destroy(&b->auth_mutex);
+ mutex_destroy(&b->cons_mutex);
+ mutex_destroy(&b->ipc_mutex);
+
+ return ret;
+}
+
+void ipu6_buttress_exit(struct ipu6_device *isp)
+{
+ struct ipu6_buttress *b = &isp->buttress;
+
+ writel(0, isp->base + BUTTRESS_REG_ISR_ENABLE);
+
+ mutex_destroy(&b->power_mutex);
+ mutex_destroy(&b->auth_mutex);
+ mutex_destroy(&b->cons_mutex);
+ mutex_destroy(&b->ipc_mutex);
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/drivers/media/pci/intel/ipu6/ipu6-buttress.h
new file mode 100644
index 000000000000..9b6f56958be7
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-buttress.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013--2024 Intel Corporation */
+
+#ifndef IPU6_BUTTRESS_H
+#define IPU6_BUTTRESS_H
+
+#include <linux/completion.h>
+#include <linux/irqreturn.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+struct device;
+struct firmware;
+struct ipu6_device;
+struct ipu6_bus_device;
+
+#define BUTTRESS_PS_FREQ_STEP 25U
+#define BUTTRESS_MIN_FORCE_PS_FREQ (BUTTRESS_PS_FREQ_STEP * 8)
+#define BUTTRESS_MAX_FORCE_PS_FREQ (BUTTRESS_PS_FREQ_STEP * 32)
+
+#define BUTTRESS_IS_FREQ_STEP 25U
+#define BUTTRESS_MIN_FORCE_IS_FREQ (BUTTRESS_IS_FREQ_STEP * 8)
+#define BUTTRESS_MAX_FORCE_IS_FREQ (BUTTRESS_IS_FREQ_STEP * 22)
+
+struct ipu6_buttress_ctrl {
+ u32 freq_ctl, pwr_sts_shift, pwr_sts_mask, pwr_sts_on, pwr_sts_off;
+ unsigned int ratio;
+ unsigned int qos_floor;
+ bool started;
+};
+
+struct ipu6_buttress_ipc {
+ struct completion send_complete;
+ struct completion recv_complete;
+ u32 nack;
+ u32 nack_mask;
+ u32 recv_data;
+ u32 csr_out;
+ u32 csr_in;
+ u32 db0_in;
+ u32 db0_out;
+ u32 data0_out;
+ u32 data0_in;
+};
+
+struct ipu6_buttress {
+ struct mutex power_mutex, auth_mutex, cons_mutex, ipc_mutex;
+ struct ipu6_buttress_ipc cse;
+ struct ipu6_buttress_ipc ish;
+ struct list_head constraints;
+ u32 wdt_cached_value;
+ bool force_suspend;
+ u32 ref_clk;
+};
+
+enum ipu6_buttress_ipc_domain {
+ IPU6_BUTTRESS_IPC_CSE,
+ IPU6_BUTTRESS_IPC_ISH,
+};
+
+struct ipu6_ipc_buttress_bulk_msg {
+ u32 cmd;
+ u32 expected_resp;
+ bool require_resp;
+ u8 cmd_size;
+};
+
+int ipu6_buttress_ipc_reset(struct ipu6_device *isp,
+ struct ipu6_buttress_ipc *ipc);
+int ipu6_buttress_map_fw_image(struct ipu6_bus_device *sys,
+ const struct firmware *fw,
+ struct sg_table *sgt);
+void ipu6_buttress_unmap_fw_image(struct ipu6_bus_device *sys,
+ struct sg_table *sgt);
+int ipu6_buttress_power(struct device *dev, struct ipu6_buttress_ctrl *ctrl,
+ bool on);
+bool ipu6_buttress_get_secure_mode(struct ipu6_device *isp);
+int ipu6_buttress_authenticate(struct ipu6_device *isp);
+int ipu6_buttress_reset_authentication(struct ipu6_device *isp);
+bool ipu6_buttress_auth_done(struct ipu6_device *isp);
+int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp);
+void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val);
+u64 ipu6_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu6_device *isp);
+
+irqreturn_t ipu6_buttress_isr(int irq, void *isp_ptr);
+irqreturn_t ipu6_buttress_isr_threaded(int irq, void *isp_ptr);
+int ipu6_buttress_init(struct ipu6_device *isp);
+void ipu6_buttress_exit(struct ipu6_device *isp);
+void ipu6_buttress_csi_port_config(struct ipu6_device *isp,
+ u32 legacy, u32 combo);
+void ipu6_buttress_restore(struct ipu6_device *isp);
+#endif /* IPU6_BUTTRESS_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu6-cpd.c b/drivers/media/pci/intel/ipu6/ipu6-cpd.c
new file mode 100644
index 000000000000..715b21ab4b8e
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-cpd.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013--2024 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp_types.h>
+#include <linux/math64.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#include "ipu6.h"
+#include "ipu6-bus.h"
+#include "ipu6-cpd.h"
+
+/* 15 entries + header*/
+#define MAX_PKG_DIR_ENT_CNT 16
+/* 2 qword per entry/header */
+#define PKG_DIR_ENT_LEN 2
+/* PKG_DIR size in bytes */
+#define PKG_DIR_SIZE ((MAX_PKG_DIR_ENT_CNT) * \
+ (PKG_DIR_ENT_LEN) * sizeof(u64))
+/* _IUPKDR_ */
+#define PKG_DIR_HDR_MARK 0x5f4955504b44525fULL
+
+/* $CPD */
+#define CPD_HDR_MARK 0x44504324
+
+#define MAX_MANIFEST_SIZE (SZ_2K * sizeof(u32))
+#define MAX_METADATA_SIZE SZ_64K
+
+#define MAX_COMPONENT_ID 127
+#define MAX_COMPONENT_VERSION 0xffff
+
+#define MANIFEST_IDX 0
+#define METADATA_IDX 1
+#define MODULEDATA_IDX 2
+/*
+ * PKG_DIR Entry (type == id)
+ * 63:56 55 54:48 47:32 31:24 23:0
+ * Rsvd Rsvd Type Version Rsvd Size
+ */
+#define PKG_DIR_SIZE_MASK GENMASK(23, 0)
+#define PKG_DIR_VERSION_MASK GENMASK(47, 32)
+#define PKG_DIR_TYPE_MASK GENMASK(54, 48)
+
+static inline const struct ipu6_cpd_ent *ipu6_cpd_get_entry(const void *cpd,
+ u8 idx)
+{
+ const struct ipu6_cpd_hdr *cpd_hdr = cpd;
+ const struct ipu6_cpd_ent *ent;
+
+ ent = (const struct ipu6_cpd_ent *)((const u8 *)cpd + cpd_hdr->hdr_len);
+ return ent + idx;
+}
+
+#define ipu6_cpd_get_manifest(cpd) ipu6_cpd_get_entry(cpd, MANIFEST_IDX)
+#define ipu6_cpd_get_metadata(cpd) ipu6_cpd_get_entry(cpd, METADATA_IDX)
+#define ipu6_cpd_get_moduledata(cpd) ipu6_cpd_get_entry(cpd, MODULEDATA_IDX)
+
+static const struct ipu6_cpd_metadata_cmpnt_hdr *
+ipu6_cpd_metadata_get_cmpnt(struct ipu6_device *isp, const void *metadata,
+ unsigned int metadata_size, u8 idx)
+{
+ size_t extn_size = sizeof(struct ipu6_cpd_metadata_extn);
+ size_t cmpnt_count = metadata_size - extn_size;
+
+ cmpnt_count = div_u64(cmpnt_count, isp->cpd_metadata_cmpnt_size);
+
+ if (idx > MAX_COMPONENT_ID || idx >= cmpnt_count) {
+ dev_err(&isp->pdev->dev, "Component index out of range (%d)\n",
+ idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return metadata + extn_size + idx * isp->cpd_metadata_cmpnt_size;
+}
+
+static u32 ipu6_cpd_metadata_cmpnt_version(struct ipu6_device *isp,
+ const void *metadata,
+ unsigned int metadata_size, u8 idx)
+{
+ const struct ipu6_cpd_metadata_cmpnt_hdr *cmpnt;
+
+ cmpnt = ipu6_cpd_metadata_get_cmpnt(isp, metadata, metadata_size, idx);
+ if (IS_ERR(cmpnt))
+ return PTR_ERR(cmpnt);
+
+ return cmpnt->ver;
+}
+
+static int ipu6_cpd_metadata_get_cmpnt_id(struct ipu6_device *isp,
+ const void *metadata,
+ unsigned int metadata_size, u8 idx)
+{
+ const struct ipu6_cpd_metadata_cmpnt_hdr *cmpnt;
+
+ cmpnt = ipu6_cpd_metadata_get_cmpnt(isp, metadata, metadata_size, idx);
+ if (IS_ERR(cmpnt))
+ return PTR_ERR(cmpnt);
+
+ return cmpnt->id;
+}
+
+static int ipu6_cpd_parse_module_data(struct ipu6_device *isp,
+ const void *module_data,
+ unsigned int module_data_size,
+ dma_addr_t dma_addr_module_data,
+ u64 *pkg_dir, const void *metadata,
+ unsigned int metadata_size)
+{
+ const struct ipu6_cpd_module_data_hdr *module_data_hdr;
+ const struct ipu6_cpd_hdr *dir_hdr;
+ const struct ipu6_cpd_ent *dir_ent;
+ unsigned int i;
+ u8 len;
+
+ if (!module_data)
+ return -EINVAL;
+
+ module_data_hdr = module_data;
+ dir_hdr = module_data + module_data_hdr->hdr_len;
+ len = dir_hdr->hdr_len;
+ dir_ent = (const struct ipu6_cpd_ent *)(((u8 *)dir_hdr) + len);
+
+ pkg_dir[0] = PKG_DIR_HDR_MARK;
+ /* pkg_dir entry count = component count + pkg_dir header */
+ pkg_dir[1] = dir_hdr->ent_cnt + 1;
+
+ for (i = 0; i < dir_hdr->ent_cnt; i++, dir_ent++) {
+ u64 *p = &pkg_dir[PKG_DIR_ENT_LEN * (1 + i)];
+ int ver, id;
+
+ *p++ = dma_addr_module_data + dir_ent->offset;
+ id = ipu6_cpd_metadata_get_cmpnt_id(isp, metadata,
+ metadata_size, i);
+ if (id < 0 || id > MAX_COMPONENT_ID) {
+ dev_err(&isp->pdev->dev, "Invalid CPD component id\n");
+ return -EINVAL;
+ }
+
+ ver = ipu6_cpd_metadata_cmpnt_version(isp, metadata,
+ metadata_size, i);
+ if (ver < 0 || ver > MAX_COMPONENT_VERSION) {
+ dev_err(&isp->pdev->dev,
+ "Invalid CPD component version\n");
+ return -EINVAL;
+ }
+
+ *p = FIELD_PREP(PKG_DIR_SIZE_MASK, dir_ent->len) |
+ FIELD_PREP(PKG_DIR_TYPE_MASK, id) |
+ FIELD_PREP(PKG_DIR_VERSION_MASK, ver);
+ }
+
+ return 0;
+}
+
+int ipu6_cpd_create_pkg_dir(struct ipu6_bus_device *adev, const void *src)
+{
+ dma_addr_t dma_addr_src = sg_dma_address(adev->fw_sgt.sgl);
+ const struct ipu6_cpd_ent *ent, *man_ent, *met_ent;
+ struct device *dev = &adev->auxdev.dev;
+ struct ipu6_device *isp = adev->isp;
+ unsigned int man_sz, met_sz;
+ void *pkg_dir_pos;
+ int ret;
+
+ man_ent = ipu6_cpd_get_manifest(src);
+ man_sz = man_ent->len;
+
+ met_ent = ipu6_cpd_get_metadata(src);
+ met_sz = met_ent->len;
+
+ adev->pkg_dir_size = PKG_DIR_SIZE + man_sz + met_sz;
+ adev->pkg_dir = dma_alloc_attrs(dev, adev->pkg_dir_size,
+ &adev->pkg_dir_dma_addr, GFP_KERNEL, 0);
+ if (!adev->pkg_dir)
+ return -ENOMEM;
+
+ /*
+ * pkg_dir entry/header:
+ * qword | 63:56 | 55 | 54:48 | 47:32 | 31:24 | 23:0
+ * N Address/Offset/"_IUPKDR_"
+ * N + 1 | rsvd | rsvd | type | ver | rsvd | size
+ *
+ * We can ignore other fields that size in N + 1 qword as they
+ * are 0 anyway. Just setting size for now.
+ */
+
+ ent = ipu6_cpd_get_moduledata(src);
+
+ ret = ipu6_cpd_parse_module_data(isp, src + ent->offset,
+ ent->len, dma_addr_src + ent->offset,
+ adev->pkg_dir, src + met_ent->offset,
+ met_ent->len);
+ if (ret) {
+ dev_err(&isp->pdev->dev, "Failed to parse module data\n");
+ dma_free_attrs(dev, adev->pkg_dir_size,
+ adev->pkg_dir, adev->pkg_dir_dma_addr, 0);
+ return ret;
+ }
+
+ /* Copy manifest after pkg_dir */
+ pkg_dir_pos = adev->pkg_dir + PKG_DIR_ENT_LEN * MAX_PKG_DIR_ENT_CNT;
+ memcpy(pkg_dir_pos, src + man_ent->offset, man_sz);
+
+ /* Copy metadata after manifest */
+ pkg_dir_pos += man_sz;
+ memcpy(pkg_dir_pos, src + met_ent->offset, met_sz);
+
+ dma_sync_single_range_for_device(dev, adev->pkg_dir_dma_addr,
+ 0, adev->pkg_dir_size, DMA_TO_DEVICE);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_cpd_create_pkg_dir, INTEL_IPU6);
+
+void ipu6_cpd_free_pkg_dir(struct ipu6_bus_device *adev)
+{
+ dma_free_attrs(&adev->auxdev.dev, adev->pkg_dir_size, adev->pkg_dir,
+ adev->pkg_dir_dma_addr, 0);
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_cpd_free_pkg_dir, INTEL_IPU6);
+
+static int ipu6_cpd_validate_cpd(struct ipu6_device *isp, const void *cpd,
+ unsigned long cpd_size,
+ unsigned long data_size)
+{
+ const struct ipu6_cpd_hdr *cpd_hdr = cpd;
+ const struct ipu6_cpd_ent *ent;
+ unsigned int i;
+ u8 len;
+
+ len = cpd_hdr->hdr_len;
+
+ /* Ensure cpd hdr is within moduledata */
+ if (cpd_size < len) {
+ dev_err(&isp->pdev->dev, "Invalid CPD moduledata size\n");
+ return -EINVAL;
+ }
+
+ /* Sanity check for CPD header */
+ if ((cpd_size - len) / sizeof(*ent) < cpd_hdr->ent_cnt) {
+ dev_err(&isp->pdev->dev, "Invalid CPD header\n");
+ return -EINVAL;
+ }
+
+ /* Ensure that all entries are within moduledata */
+ ent = (const struct ipu6_cpd_ent *)(((const u8 *)cpd_hdr) + len);
+ for (i = 0; i < cpd_hdr->ent_cnt; i++, ent++) {
+ if (data_size < ent->offset ||
+ data_size - ent->offset < ent->len) {
+ dev_err(&isp->pdev->dev, "Invalid CPD entry (%d)\n", i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int ipu6_cpd_validate_moduledata(struct ipu6_device *isp,
+ const void *moduledata,
+ u32 moduledata_size)
+{
+ const struct ipu6_cpd_module_data_hdr *mod_hdr = moduledata;
+ int ret;
+
+ /* Ensure moduledata hdr is within moduledata */
+ if (moduledata_size < sizeof(*mod_hdr) ||
+ moduledata_size < mod_hdr->hdr_len) {
+ dev_err(&isp->pdev->dev, "Invalid CPD moduledata size\n");
+ return -EINVAL;
+ }
+
+ dev_info(&isp->pdev->dev, "FW version: %x\n", mod_hdr->fw_pkg_date);
+ ret = ipu6_cpd_validate_cpd(isp, moduledata + mod_hdr->hdr_len,
+ moduledata_size - mod_hdr->hdr_len,
+ moduledata_size);
+ if (ret) {
+ dev_err(&isp->pdev->dev, "Invalid CPD in moduledata\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ipu6_cpd_validate_metadata(struct ipu6_device *isp,
+ const void *metadata, u32 meta_size)
+{
+ const struct ipu6_cpd_metadata_extn *extn = metadata;
+
+ /* Sanity check for metadata size */
+ if (meta_size < sizeof(*extn) || meta_size > MAX_METADATA_SIZE) {
+ dev_err(&isp->pdev->dev, "Invalid CPD metadata\n");
+ return -EINVAL;
+ }
+
+ /* Validate extension and image types */
+ if (extn->extn_type != IPU6_CPD_METADATA_EXTN_TYPE_IUNIT ||
+ extn->img_type != IPU6_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE) {
+ dev_err(&isp->pdev->dev,
+ "Invalid CPD metadata descriptor img_type (%d)\n",
+ extn->img_type);
+ return -EINVAL;
+ }
+
+ /* Validate metadata size multiple of metadata components */
+ if ((meta_size - sizeof(*extn)) % isp->cpd_metadata_cmpnt_size) {
+ dev_err(&isp->pdev->dev, "Invalid CPD metadata size\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int ipu6_cpd_validate_cpd_file(struct ipu6_device *isp, const void *cpd_file,
+ unsigned long cpd_file_size)
+{
+ const struct ipu6_cpd_hdr *hdr = cpd_file;
+ const struct ipu6_cpd_ent *ent;
+ int ret;
+
+ ret = ipu6_cpd_validate_cpd(isp, cpd_file, cpd_file_size,
+ cpd_file_size);
+ if (ret) {
+ dev_err(&isp->pdev->dev, "Invalid CPD in file\n");
+ return ret;
+ }
+
+ /* Check for CPD file marker */
+ if (hdr->hdr_mark != CPD_HDR_MARK) {
+ dev_err(&isp->pdev->dev, "Invalid CPD header\n");
+ return -EINVAL;
+ }
+
+ /* Sanity check for manifest size */
+ ent = ipu6_cpd_get_manifest(cpd_file);
+ if (ent->len > MAX_MANIFEST_SIZE) {
+ dev_err(&isp->pdev->dev, "Invalid CPD manifest size\n");
+ return -EINVAL;
+ }
+
+ /* Validate metadata */
+ ent = ipu6_cpd_get_metadata(cpd_file);
+ ret = ipu6_cpd_validate_metadata(isp, cpd_file + ent->offset, ent->len);
+ if (ret) {
+ dev_err(&isp->pdev->dev, "Invalid CPD metadata\n");
+ return ret;
+ }
+
+ /* Validate moduledata */
+ ent = ipu6_cpd_get_moduledata(cpd_file);
+ ret = ipu6_cpd_validate_moduledata(isp, cpd_file + ent->offset,
+ ent->len);
+ if (ret)
+ dev_err(&isp->pdev->dev, "Invalid CPD moduledata\n");
+
+ return ret;
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-cpd.h b/drivers/media/pci/intel/ipu6/ipu6-cpd.h
new file mode 100644
index 000000000000..e0e4fdeca902
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-cpd.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2015--2024 Intel Corporation */
+
+#ifndef IPU6_CPD_H
+#define IPU6_CPD_H
+
+struct ipu6_device;
+struct ipu6_bus_device;
+
+#define IPU6_CPD_SIZE_OF_FW_ARCH_VERSION 7
+#define IPU6_CPD_SIZE_OF_SYSTEM_VERSION 11
+#define IPU6_CPD_SIZE_OF_COMPONENT_NAME 12
+
+#define IPU6_CPD_METADATA_EXTN_TYPE_IUNIT 0x10
+
+#define IPU6_CPD_METADATA_IMAGE_TYPE_RESERVED 0
+#define IPU6_CPD_METADATA_IMAGE_TYPE_BOOTLOADER 1
+#define IPU6_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE 2
+
+#define IPU6_CPD_PKG_DIR_PSYS_SERVER_IDX 0
+#define IPU6_CPD_PKG_DIR_ISYS_SERVER_IDX 1
+
+#define IPU6_CPD_PKG_DIR_CLIENT_PG_TYPE 3
+
+#define IPU6_CPD_METADATA_HASH_KEY_SIZE 48
+#define IPU6SE_CPD_METADATA_HASH_KEY_SIZE 32
+
+struct ipu6_cpd_module_data_hdr {
+ u32 hdr_len;
+ u32 endian;
+ u32 fw_pkg_date;
+ u32 hive_sdk_date;
+ u32 compiler_date;
+ u32 target_platform_type;
+ u8 sys_ver[IPU6_CPD_SIZE_OF_SYSTEM_VERSION];
+ u8 fw_arch_ver[IPU6_CPD_SIZE_OF_FW_ARCH_VERSION];
+ u8 rsvd[2];
+} __packed;
+
+/*
+ * ipu6_cpd_hdr structure updated as the chksum and
+ * sub_partition_name is unused on host side
+ * CSE layout version 1.6 for IPU6SE (hdr_len = 0x10)
+ * CSE layout version 1.7 for IPU6 (hdr_len = 0x14)
+ */
+struct ipu6_cpd_hdr {
+ u32 hdr_mark;
+ u32 ent_cnt;
+ u8 hdr_ver;
+ u8 ent_ver;
+ u8 hdr_len;
+} __packed;
+
+struct ipu6_cpd_ent {
+ u8 name[IPU6_CPD_SIZE_OF_COMPONENT_NAME];
+ u32 offset;
+ u32 len;
+ u8 rsvd[4];
+} __packed;
+
+struct ipu6_cpd_metadata_cmpnt_hdr {
+ u32 id;
+ u32 size;
+ u32 ver;
+} __packed;
+
+struct ipu6_cpd_metadata_cmpnt {
+ struct ipu6_cpd_metadata_cmpnt_hdr hdr;
+ u8 sha2_hash[IPU6_CPD_METADATA_HASH_KEY_SIZE];
+ u32 entry_point;
+ u32 icache_base_offs;
+ u8 attrs[16];
+} __packed;
+
+struct ipu6se_cpd_metadata_cmpnt {
+ struct ipu6_cpd_metadata_cmpnt_hdr hdr;
+ u8 sha2_hash[IPU6SE_CPD_METADATA_HASH_KEY_SIZE];
+ u32 entry_point;
+ u32 icache_base_offs;
+ u8 attrs[16];
+} __packed;
+
+struct ipu6_cpd_metadata_extn {
+ u32 extn_type;
+ u32 len;
+ u32 img_type;
+ u8 rsvd[16];
+} __packed;
+
+struct ipu6_cpd_client_pkg_hdr {
+ u32 prog_list_offs;
+ u32 prog_list_size;
+ u32 prog_desc_offs;
+ u32 prog_desc_size;
+ u32 pg_manifest_offs;
+ u32 pg_manifest_size;
+ u32 prog_bin_offs;
+ u32 prog_bin_size;
+} __packed;
+
+int ipu6_cpd_create_pkg_dir(struct ipu6_bus_device *adev, const void *src);
+void ipu6_cpd_free_pkg_dir(struct ipu6_bus_device *adev);
+int ipu6_cpd_validate_cpd_file(struct ipu6_device *isp, const void *cpd_file,
+ unsigned long cpd_file_size);
+#endif /* IPU6_CPD_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu6-dma.c b/drivers/media/pci/intel/ipu6/ipu6-dma.c
new file mode 100644
index 000000000000..92530a1cc90f
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-dma.c
@@ -0,0 +1,502 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013--2024 Intel Corporation
+ */
+
+#include <linux/cacheflush.h>
+#include <linux/dma-mapping.h>
+#include <linux/iova.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "ipu6.h"
+#include "ipu6-bus.h"
+#include "ipu6-dma.h"
+#include "ipu6-mmu.h"
+
+struct vm_info {
+ struct list_head list;
+ struct page **pages;
+ dma_addr_t ipu6_iova;
+ void *vaddr;
+ unsigned long size;
+};
+
+static struct vm_info *get_vm_info(struct ipu6_mmu *mmu, dma_addr_t iova)
+{
+ struct vm_info *info, *save;
+
+ list_for_each_entry_safe(info, save, &mmu->vma_list, list) {
+ if (iova >= info->ipu6_iova &&
+ iova < (info->ipu6_iova + info->size))
+ return info;
+ }
+
+ return NULL;
+}
+
+static void __dma_clear_buffer(struct page *page, size_t size,
+ unsigned long attrs)
+{
+ void *ptr;
+
+ if (!page)
+ return;
+ /*
+ * Ensure that the allocated pages are zeroed, and that any data
+ * lurking in the kernel direct-mapped region is invalidated.
+ */
+ ptr = page_address(page);
+ memset(ptr, 0, size);
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ clflush_cache_range(ptr, size);
+}
+
+static struct page **__dma_alloc_buffer(struct device *dev, size_t size,
+ gfp_t gfp, unsigned long attrs)
+{
+ int count = PHYS_PFN(size);
+ int array_size = count * sizeof(struct page *);
+ struct page **pages;
+ int i = 0;
+
+ pages = kvzalloc(array_size, GFP_KERNEL);
+ if (!pages)
+ return NULL;
+
+ gfp |= __GFP_NOWARN;
+
+ while (count) {
+ int j, order = __fls(count);
+
+ pages[i] = alloc_pages(gfp, order);
+ while (!pages[i] && order)
+ pages[i] = alloc_pages(gfp, --order);
+ if (!pages[i])
+ goto error;
+
+ if (order) {
+ split_page(pages[i], order);
+ j = 1 << order;
+ while (j--)
+ pages[i + j] = pages[i] + j;
+ }
+
+ __dma_clear_buffer(pages[i], PAGE_SIZE << order, attrs);
+ i += 1 << order;
+ count -= 1 << order;
+ }
+
+ return pages;
+error:
+ while (i--)
+ if (pages[i])
+ __free_pages(pages[i], 0);
+ kvfree(pages);
+ return NULL;
+}
+
+static void __dma_free_buffer(struct device *dev, struct page **pages,
+ size_t size, unsigned long attrs)
+{
+ int count = PHYS_PFN(size);
+ unsigned int i;
+
+ for (i = 0; i < count && pages[i]; i++) {
+ __dma_clear_buffer(pages[i], PAGE_SIZE, attrs);
+ __free_pages(pages[i], 0);
+ }
+
+ kvfree(pages);
+}
+
+static void ipu6_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ void *vaddr;
+ u32 offset;
+ struct vm_info *info;
+ struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
+
+ info = get_vm_info(mmu, dma_handle);
+ if (WARN_ON(!info))
+ return;
+
+ offset = dma_handle - info->ipu6_iova;
+ if (WARN_ON(size > (info->size - offset)))
+ return;
+
+ vaddr = info->vaddr + offset;
+ clflush_cache_range(vaddr, size);
+}
+
+static void ipu6_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sglist, sg, nents, i)
+ clflush_cache_range(page_to_virt(sg_page(sg)), sg->length);
+}
+
+static void *ipu6_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ unsigned long attrs)
+{
+ struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
+ struct pci_dev *pdev = to_ipu6_bus_device(dev)->isp->pdev;
+ dma_addr_t pci_dma_addr, ipu6_iova;
+ struct vm_info *info;
+ unsigned long count;
+ struct page **pages;
+ struct iova *iova;
+ unsigned int i;
+ int ret;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return NULL;
+
+ size = PAGE_ALIGN(size);
+ count = PHYS_PFN(size);
+
+ iova = alloc_iova(&mmu->dmap->iovad, count,
+ PHYS_PFN(dma_get_mask(dev)), 0);
+ if (!iova)
+ goto out_kfree;
+
+ pages = __dma_alloc_buffer(dev, size, gfp, attrs);
+ if (!pages)
+ goto out_free_iova;
+
+ dev_dbg(dev, "dma_alloc: size %zu iova low pfn %lu, high pfn %lu\n",
+ size, iova->pfn_lo, iova->pfn_hi);
+ for (i = 0; iova->pfn_lo + i <= iova->pfn_hi; i++) {
+ pci_dma_addr = dma_map_page_attrs(&pdev->dev, pages[i], 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL,
+ attrs);
+ dev_dbg(dev, "dma_alloc: mapped pci_dma_addr %pad\n",
+ &pci_dma_addr);
+ if (dma_mapping_error(&pdev->dev, pci_dma_addr)) {
+ dev_err(dev, "pci_dma_mapping for page[%d] failed", i);
+ goto out_unmap;
+ }
+
+ ret = ipu6_mmu_map(mmu->dmap->mmu_info,
+ PFN_PHYS(iova->pfn_lo + i), pci_dma_addr,
+ PAGE_SIZE);
+ if (ret) {
+ dev_err(dev, "ipu6_mmu_map for pci_dma[%d] %pad failed",
+ i, &pci_dma_addr);
+ dma_unmap_page_attrs(&pdev->dev, pci_dma_addr,
+ PAGE_SIZE, DMA_BIDIRECTIONAL,
+ attrs);
+ goto out_unmap;
+ }
+ }
+
+ info->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL);
+ if (!info->vaddr)
+ goto out_unmap;
+
+ *dma_handle = PFN_PHYS(iova->pfn_lo);
+
+ info->pages = pages;
+ info->ipu6_iova = *dma_handle;
+ info->size = size;
+ list_add(&info->list, &mmu->vma_list);
+
+ return info->vaddr;
+
+out_unmap:
+ while (i--) {
+ ipu6_iova = PFN_PHYS(iova->pfn_lo + i);
+ pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info,
+ ipu6_iova);
+ dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, attrs);
+
+ ipu6_mmu_unmap(mmu->dmap->mmu_info, ipu6_iova, PAGE_SIZE);
+ }
+
+ __dma_free_buffer(dev, pages, size, attrs);
+
+out_free_iova:
+ __free_iova(&mmu->dmap->iovad, iova);
+out_kfree:
+ kfree(info);
+
+ return NULL;
+}
+
+static void ipu6_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle,
+ unsigned long attrs)
+{
+ struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
+ struct pci_dev *pdev = to_ipu6_bus_device(dev)->isp->pdev;
+ struct iova *iova = find_iova(&mmu->dmap->iovad, PHYS_PFN(dma_handle));
+ dma_addr_t pci_dma_addr, ipu6_iova;
+ struct vm_info *info;
+ struct page **pages;
+ unsigned int i;
+
+ if (WARN_ON(!iova))
+ return;
+
+ info = get_vm_info(mmu, dma_handle);
+ if (WARN_ON(!info))
+ return;
+
+ if (WARN_ON(!info->vaddr))
+ return;
+
+ if (WARN_ON(!info->pages))
+ return;
+
+ list_del(&info->list);
+
+ size = PAGE_ALIGN(size);
+
+ pages = info->pages;
+
+ vunmap(vaddr);
+
+ for (i = 0; i < PHYS_PFN(size); i++) {
+ ipu6_iova = PFN_PHYS(iova->pfn_lo + i);
+ pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info,
+ ipu6_iova);
+ dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, attrs);
+ }
+
+ ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo),
+ PFN_PHYS(iova_size(iova)));
+
+ __dma_free_buffer(dev, pages, size, attrs);
+
+ mmu->tlb_invalidate(mmu);
+
+ __free_iova(&mmu->dmap->iovad, iova);
+
+ kfree(info);
+}
+
+static int ipu6_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *addr, dma_addr_t iova, size_t size,
+ unsigned long attrs)
+{
+ struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
+ size_t count = PHYS_PFN(PAGE_ALIGN(size));
+ struct vm_info *info;
+ size_t i;
+ int ret;
+
+ info = get_vm_info(mmu, iova);
+ if (!info)
+ return -EFAULT;
+
+ if (!info->vaddr)
+ return -EFAULT;
+
+ if (vma->vm_start & ~PAGE_MASK)
+ return -EINVAL;
+
+ if (size > info->size)
+ return -EFAULT;
+
+ for (i = 0; i < count; i++) {
+ ret = vm_insert_page(vma, vma->vm_start + PFN_PHYS(i),
+ info->pages[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ipu6_dma_unmap_sg(struct device *dev,
+ struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct pci_dev *pdev = to_ipu6_bus_device(dev)->isp->pdev;
+ struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
+ struct iova *iova = find_iova(&mmu->dmap->iovad,
+ PHYS_PFN(sg_dma_address(sglist)));
+ int i, npages, count;
+ struct scatterlist *sg;
+ dma_addr_t pci_dma_addr;
+
+ if (!nents)
+ return;
+
+ if (WARN_ON(!iova))
+ return;
+
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ ipu6_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL);
+
+ /* get the nents as orig_nents given by caller */
+ count = 0;
+ npages = iova_size(iova);
+ for_each_sg(sglist, sg, nents, i) {
+ if (sg_dma_len(sg) == 0 ||
+ sg_dma_address(sg) == DMA_MAPPING_ERROR)
+ break;
+
+ npages -= PHYS_PFN(PAGE_ALIGN(sg_dma_len(sg)));
+ count++;
+ if (npages <= 0)
+ break;
+ }
+
+ /*
+ * Before IPU6 mmu unmap, return the pci dma address back to sg
+ * assume the nents is less than orig_nents as the least granule
+ * is 1 SZ_4K page
+ */
+ dev_dbg(dev, "trying to unmap concatenated %u ents\n", count);
+ for_each_sg(sglist, sg, count, i) {
+ dev_dbg(dev, "ipu unmap sg[%d] %pad\n", i, &sg_dma_address(sg));
+ pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info,
+ sg_dma_address(sg));
+ dev_dbg(dev, "return pci_dma_addr %pad back to sg[%d]\n",
+ &pci_dma_addr, i);
+ sg_dma_address(sg) = pci_dma_addr;
+ }
+
+ dev_dbg(dev, "ipu6_mmu_unmap low pfn %lu high pfn %lu\n",
+ iova->pfn_lo, iova->pfn_hi);
+ ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo),
+ PFN_PHYS(iova_size(iova)));
+
+ mmu->tlb_invalidate(mmu);
+
+ dma_unmap_sg_attrs(&pdev->dev, sglist, nents, dir, attrs);
+
+ __free_iova(&mmu->dmap->iovad, iova);
+}
+
+static int ipu6_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
+ struct pci_dev *pdev = to_ipu6_bus_device(dev)->isp->pdev;
+ struct scatterlist *sg;
+ struct iova *iova;
+ size_t npages = 0;
+ unsigned long iova_addr;
+ int i, count;
+
+ for_each_sg(sglist, sg, nents, i) {
+ if (sg->offset) {
+ dev_err(dev, "Unsupported non-zero sg[%d].offset %x\n",
+ i, sg->offset);
+ return -EFAULT;
+ }
+ }
+
+ dev_dbg(dev, "pci_dma_map_sg trying to map %d ents\n", nents);
+ count = dma_map_sg_attrs(&pdev->dev, sglist, nents, dir, attrs);
+ if (count <= 0) {
+ dev_err(dev, "pci_dma_map_sg %d ents failed\n", nents);
+ return 0;
+ }
+
+ dev_dbg(dev, "pci_dma_map_sg %d ents mapped\n", count);
+
+ for_each_sg(sglist, sg, count, i)
+ npages += PHYS_PFN(PAGE_ALIGN(sg_dma_len(sg)));
+
+ iova = alloc_iova(&mmu->dmap->iovad, npages,
+ PHYS_PFN(dma_get_mask(dev)), 0);
+ if (!iova)
+ return 0;
+
+ dev_dbg(dev, "dmamap: iova low pfn %lu, high pfn %lu\n", iova->pfn_lo,
+ iova->pfn_hi);
+
+ iova_addr = iova->pfn_lo;
+ for_each_sg(sglist, sg, count, i) {
+ int ret;
+
+ dev_dbg(dev, "mapping entry %d: iova 0x%llx phy %pad size %d\n",
+ i, PFN_PHYS(iova_addr), &sg_dma_address(sg),
+ sg_dma_len(sg));
+
+ ret = ipu6_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr),
+ sg_dma_address(sg),
+ PAGE_ALIGN(sg_dma_len(sg)));
+ if (ret)
+ goto out_fail;
+
+ sg_dma_address(sg) = PFN_PHYS(iova_addr);
+
+ iova_addr += PHYS_PFN(PAGE_ALIGN(sg_dma_len(sg)));
+ }
+
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ ipu6_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL);
+
+ return count;
+
+out_fail:
+ ipu6_dma_unmap_sg(dev, sglist, i, dir, attrs);
+
+ return 0;
+}
+
+/*
+ * Create scatter-list for the already allocated DMA buffer
+ */
+static int ipu6_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t handle, size_t size,
+ unsigned long attrs)
+{
+ struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
+ struct vm_info *info;
+ int n_pages;
+ int ret = 0;
+
+ info = get_vm_info(mmu, handle);
+ if (!info)
+ return -EFAULT;
+
+ if (!info->vaddr)
+ return -EFAULT;
+
+ if (WARN_ON(!info->pages))
+ return -ENOMEM;
+
+ n_pages = PHYS_PFN(PAGE_ALIGN(size));
+
+ ret = sg_alloc_table_from_pages(sgt, info->pages, n_pages, 0, size,
+ GFP_KERNEL);
+ if (ret)
+ dev_warn(dev, "IPU6 get sgt table failed\n");
+
+ return ret;
+}
+
+const struct dma_map_ops ipu6_dma_ops = {
+ .alloc = ipu6_dma_alloc,
+ .free = ipu6_dma_free,
+ .mmap = ipu6_dma_mmap,
+ .map_sg = ipu6_dma_map_sg,
+ .unmap_sg = ipu6_dma_unmap_sg,
+ .sync_single_for_cpu = ipu6_dma_sync_single_for_cpu,
+ .sync_single_for_device = ipu6_dma_sync_single_for_cpu,
+ .sync_sg_for_cpu = ipu6_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = ipu6_dma_sync_sg_for_cpu,
+ .get_sgtable = ipu6_dma_get_sgtable,
+};
diff --git a/drivers/media/pci/intel/ipu6/ipu6-dma.h b/drivers/media/pci/intel/ipu6/ipu6-dma.h
new file mode 100644
index 000000000000..847ea5b7c925
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-dma.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013--2024 Intel Corporation */
+
+#ifndef IPU6_DMA_H
+#define IPU6_DMA_H
+
+#include <linux/dma-map-ops.h>
+#include <linux/iova.h>
+
+struct ipu6_mmu_info;
+
+struct ipu6_dma_mapping {
+ struct ipu6_mmu_info *mmu_info;
+ struct iova_domain iovad;
+};
+
+extern const struct dma_map_ops ipu6_dma_ops;
+
+#endif /* IPU6_DMA_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu6-fw-com.c b/drivers/media/pci/intel/ipu6/ipu6-fw-com.c
new file mode 100644
index 000000000000..0b33fe9e703d
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-fw-com.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013--2024 Intel Corporation
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/math.h>
+#include <linux/overflow.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "ipu6-bus.h"
+#include "ipu6-fw-com.h"
+
+/*
+ * FWCOM layer is a shared resource between FW and driver. It consist
+ * of token queues to both send and receive directions. Queue is simply
+ * an array of structures with read and write indexes to the queue.
+ * There are 1...n queues to both directions. Queues locates in
+ * system RAM and are mapped to ISP MMU so that both CPU and ISP can
+ * see the same buffer. Indexes are located in ISP DMEM so that FW code
+ * can poll those with very low latency and cost. CPU access to indexes is
+ * more costly but that happens only at message sending time and
+ * interrupt triggered message handling. CPU doesn't need to poll indexes.
+ * wr_reg / rd_reg are offsets to those dmem location. They are not
+ * the indexes itself.
+ */
+
+/* Shared structure between driver and FW - do not modify */
+struct ipu6_fw_sys_queue {
+ u64 host_address;
+ u32 vied_address;
+ u32 size;
+ u32 token_size;
+ u32 wr_reg; /* reg number in subsystem's regmem */
+ u32 rd_reg;
+ u32 _align;
+} __packed;
+
+struct ipu6_fw_sys_queue_res {
+ u64 host_address;
+ u32 vied_address;
+ u32 reg;
+} __packed;
+
+enum syscom_state {
+ /* Program load or explicit host setting should init to this */
+ SYSCOM_STATE_UNINIT = 0x57a7e000,
+ /* SP Syscom sets this when it is ready for use */
+ SYSCOM_STATE_READY = 0x57a7e001,
+ /* SP Syscom sets this when no more syscom accesses will happen */
+ SYSCOM_STATE_INACTIVE = 0x57a7e002,
+};
+
+enum syscom_cmd {
+ /* Program load or explicit host setting should init to this */
+ SYSCOM_COMMAND_UNINIT = 0x57a7f000,
+ /* Host Syscom requests syscom to become inactive */
+ SYSCOM_COMMAND_INACTIVE = 0x57a7f001,
+};
+
+/* firmware config: data that sent from the host to SP via DDR */
+/* Cell copies data into a context */
+
+struct ipu6_fw_syscom_config {
+ u32 firmware_address;
+
+ u32 num_input_queues;
+ u32 num_output_queues;
+
+ /* ISP pointers to an array of ipu6_fw_sys_queue structures */
+ u32 input_queue;
+ u32 output_queue;
+
+ /* ISYS / PSYS private data */
+ u32 specific_addr;
+ u32 specific_size;
+};
+
+struct ipu6_fw_com_context {
+ struct ipu6_bus_device *adev;
+ void __iomem *dmem_addr;
+ int (*cell_ready)(struct ipu6_bus_device *adev);
+ void (*cell_start)(struct ipu6_bus_device *adev);
+
+ void *dma_buffer;
+ dma_addr_t dma_addr;
+ unsigned int dma_size;
+ unsigned long attrs;
+
+ struct ipu6_fw_sys_queue *input_queue; /* array of host to SP queues */
+ struct ipu6_fw_sys_queue *output_queue; /* array of SP to host */
+
+ u32 config_vied_addr;
+
+ unsigned int buttress_boot_offset;
+ void __iomem *base_addr;
+};
+
+#define FW_COM_WR_REG 0
+#define FW_COM_RD_REG 4
+
+#define REGMEM_OFFSET 0
+#define TUNIT_MAGIC_PATTERN 0x5a5a5a5a
+
+enum regmem_id {
+ /* pass pkg_dir address to SPC in non-secure mode */
+ PKG_DIR_ADDR_REG = 0,
+ /* Tunit CFG blob for secure - provided by host.*/
+ TUNIT_CFG_DWR_REG = 1,
+ /* syscom commands - modified by the host */
+ SYSCOM_COMMAND_REG = 2,
+ /* Store interrupt status - updated by SP */
+ SYSCOM_IRQ_REG = 3,
+ /* first syscom queue pointer register */
+ SYSCOM_QPR_BASE_REG = 4
+};
+
+#define BUTTRESS_FW_BOOT_PARAMS_0 0x4000
+#define BUTTRESS_FW_BOOT_PARAM_REG(base, offset, id) \
+ ((base) + BUTTRESS_FW_BOOT_PARAMS_0 + ((offset) + (id)) * 4)
+
+enum buttress_syscom_id {
+ /* pass syscom configuration to SPC */
+ SYSCOM_CONFIG_ID = 0,
+ /* syscom state - modified by SP */
+ SYSCOM_STATE_ID = 1,
+ /* syscom vtl0 addr mask */
+ SYSCOM_VTL0_ADDR_MASK_ID = 2,
+ SYSCOM_ID_MAX
+};
+
+static void ipu6_sys_queue_init(struct ipu6_fw_sys_queue *q, unsigned int size,
+ unsigned int token_size,
+ struct ipu6_fw_sys_queue_res *res)
+{
+ unsigned int buf_size = (size + 1) * token_size;
+
+ q->size = size + 1;
+ q->token_size = token_size;
+
+ /* acquire the shared buffer space */
+ q->host_address = res->host_address;
+ res->host_address += buf_size;
+ q->vied_address = res->vied_address;
+ res->vied_address += buf_size;
+
+ /* acquire the shared read and writer pointers */
+ q->wr_reg = res->reg;
+ res->reg++;
+ q->rd_reg = res->reg;
+ res->reg++;
+}
+
+void *ipu6_fw_com_prepare(struct ipu6_fw_com_cfg *cfg,
+ struct ipu6_bus_device *adev, void __iomem *base)
+{
+ size_t conf_size, inq_size, outq_size, specific_size;
+ struct ipu6_fw_syscom_config *config_host_addr;
+ unsigned int sizeinput = 0, sizeoutput = 0;
+ struct ipu6_fw_sys_queue_res res;
+ struct ipu6_fw_com_context *ctx;
+ struct device *dev = &adev->auxdev.dev;
+ size_t sizeall, offset;
+ unsigned long attrs = 0;
+ void *specific_host_addr;
+ unsigned int i;
+
+ if (!cfg || !cfg->cell_start || !cfg->cell_ready)
+ return NULL;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+ ctx->dmem_addr = base + cfg->dmem_addr + REGMEM_OFFSET;
+ ctx->adev = adev;
+ ctx->cell_start = cfg->cell_start;
+ ctx->cell_ready = cfg->cell_ready;
+ ctx->buttress_boot_offset = cfg->buttress_boot_offset;
+ ctx->base_addr = base;
+
+ /*
+ * Allocate DMA mapped memory. Allocate one big chunk.
+ */
+ /* Base cfg for FW */
+ conf_size = roundup(sizeof(struct ipu6_fw_syscom_config), 8);
+ /* Descriptions of the queues */
+ inq_size = size_mul(cfg->num_input_queues,
+ sizeof(struct ipu6_fw_sys_queue));
+ outq_size = size_mul(cfg->num_output_queues,
+ sizeof(struct ipu6_fw_sys_queue));
+ /* FW specific information structure */
+ specific_size = roundup(cfg->specific_size, 8);
+
+ sizeall = conf_size + inq_size + outq_size + specific_size;
+
+ for (i = 0; i < cfg->num_input_queues; i++)
+ sizeinput += size_mul(cfg->input[i].queue_size + 1,
+ cfg->input[i].token_size);
+
+ for (i = 0; i < cfg->num_output_queues; i++)
+ sizeoutput += size_mul(cfg->output[i].queue_size + 1,
+ cfg->output[i].token_size);
+
+ sizeall += sizeinput + sizeoutput;
+
+ ctx->dma_buffer = dma_alloc_attrs(dev, sizeall, &ctx->dma_addr,
+ GFP_KERNEL, attrs);
+ ctx->attrs = attrs;
+ if (!ctx->dma_buffer) {
+ dev_err(dev, "failed to allocate dma memory\n");
+ kfree(ctx);
+ return NULL;
+ }
+
+ ctx->dma_size = sizeall;
+
+ config_host_addr = ctx->dma_buffer;
+ ctx->config_vied_addr = ctx->dma_addr;
+
+ offset = conf_size;
+ ctx->input_queue = ctx->dma_buffer + offset;
+ config_host_addr->input_queue = ctx->dma_addr + offset;
+ config_host_addr->num_input_queues = cfg->num_input_queues;
+
+ offset += inq_size;
+ ctx->output_queue = ctx->dma_buffer + offset;
+ config_host_addr->output_queue = ctx->dma_addr + offset;
+ config_host_addr->num_output_queues = cfg->num_output_queues;
+
+ /* copy firmware specific data */
+ offset += outq_size;
+ specific_host_addr = ctx->dma_buffer + offset;
+ config_host_addr->specific_addr = ctx->dma_addr + offset;
+ config_host_addr->specific_size = cfg->specific_size;
+ if (cfg->specific_addr && cfg->specific_size)
+ memcpy(specific_host_addr, cfg->specific_addr,
+ cfg->specific_size);
+
+ /* initialize input queues */
+ offset += specific_size;
+ res.reg = SYSCOM_QPR_BASE_REG;
+ res.host_address = (u64)(ctx->dma_buffer + offset);
+ res.vied_address = ctx->dma_addr + offset;
+ for (i = 0; i < cfg->num_input_queues; i++)
+ ipu6_sys_queue_init(ctx->input_queue + i,
+ cfg->input[i].queue_size,
+ cfg->input[i].token_size, &res);
+
+ /* initialize output queues */
+ offset += sizeinput;
+ res.host_address = (u64)(ctx->dma_buffer + offset);
+ res.vied_address = ctx->dma_addr + offset;
+ for (i = 0; i < cfg->num_output_queues; i++) {
+ ipu6_sys_queue_init(ctx->output_queue + i,
+ cfg->output[i].queue_size,
+ cfg->output[i].token_size, &res);
+ }
+
+ return ctx;
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_prepare, INTEL_IPU6);
+
+int ipu6_fw_com_open(struct ipu6_fw_com_context *ctx)
+{
+ /* write magic pattern to disable the tunit trace */
+ writel(TUNIT_MAGIC_PATTERN, ctx->dmem_addr + TUNIT_CFG_DWR_REG * 4);
+ /* Check if SP is in valid state */
+ if (!ctx->cell_ready(ctx->adev))
+ return -EIO;
+
+ /* store syscom uninitialized command */
+ writel(SYSCOM_COMMAND_UNINIT, ctx->dmem_addr + SYSCOM_COMMAND_REG * 4);
+
+ /* store syscom uninitialized state */
+ writel(SYSCOM_STATE_UNINIT,
+ BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr,
+ ctx->buttress_boot_offset,
+ SYSCOM_STATE_ID));
+
+ /* store firmware configuration address */
+ writel(ctx->config_vied_addr,
+ BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr,
+ ctx->buttress_boot_offset,
+ SYSCOM_CONFIG_ID));
+ ctx->cell_start(ctx->adev);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_open, INTEL_IPU6);
+
+int ipu6_fw_com_close(struct ipu6_fw_com_context *ctx)
+{
+ int state;
+
+ state = readl(BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr,
+ ctx->buttress_boot_offset,
+ SYSCOM_STATE_ID));
+ if (state != SYSCOM_STATE_READY)
+ return -EBUSY;
+
+ /* set close request flag */
+ writel(SYSCOM_COMMAND_INACTIVE, ctx->dmem_addr +
+ SYSCOM_COMMAND_REG * 4);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_close, INTEL_IPU6);
+
+int ipu6_fw_com_release(struct ipu6_fw_com_context *ctx, unsigned int force)
+{
+ /* check if release is forced, an verify cell state if it is not */
+ if (!force && !ctx->cell_ready(ctx->adev))
+ return -EBUSY;
+
+ dma_free_attrs(&ctx->adev->auxdev.dev, ctx->dma_size,
+ ctx->dma_buffer, ctx->dma_addr, ctx->attrs);
+ kfree(ctx);
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_release, INTEL_IPU6);
+
+bool ipu6_fw_com_ready(struct ipu6_fw_com_context *ctx)
+{
+ int state;
+
+ state = readl(BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr,
+ ctx->buttress_boot_offset,
+ SYSCOM_STATE_ID));
+
+ return state == SYSCOM_STATE_READY;
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_ready, INTEL_IPU6);
+
+void *ipu6_send_get_token(struct ipu6_fw_com_context *ctx, int q_nbr)
+{
+ struct ipu6_fw_sys_queue *q = &ctx->input_queue[q_nbr];
+ void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4;
+ unsigned int wr, rd;
+ unsigned int packets;
+ unsigned int index;
+
+ wr = readl(q_dmem + FW_COM_WR_REG);
+ rd = readl(q_dmem + FW_COM_RD_REG);
+
+ if (WARN_ON_ONCE(wr >= q->size || rd >= q->size))
+ return NULL;
+
+ if (wr < rd)
+ packets = rd - wr - 1;
+ else
+ packets = q->size - (wr - rd + 1);
+
+ if (!packets)
+ return NULL;
+
+ index = readl(q_dmem + FW_COM_WR_REG);
+
+ return (void *)(q->host_address + index * q->token_size);
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_send_get_token, INTEL_IPU6);
+
+void ipu6_send_put_token(struct ipu6_fw_com_context *ctx, int q_nbr)
+{
+ struct ipu6_fw_sys_queue *q = &ctx->input_queue[q_nbr];
+ void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4;
+ unsigned int wr = readl(q_dmem + FW_COM_WR_REG) + 1;
+
+ if (wr >= q->size)
+ wr = 0;
+
+ writel(wr, q_dmem + FW_COM_WR_REG);
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_send_put_token, INTEL_IPU6);
+
+void *ipu6_recv_get_token(struct ipu6_fw_com_context *ctx, int q_nbr)
+{
+ struct ipu6_fw_sys_queue *q = &ctx->output_queue[q_nbr];
+ void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4;
+ unsigned int wr, rd;
+ unsigned int packets;
+
+ wr = readl(q_dmem + FW_COM_WR_REG);
+ rd = readl(q_dmem + FW_COM_RD_REG);
+
+ if (WARN_ON_ONCE(wr >= q->size || rd >= q->size))
+ return NULL;
+
+ if (wr < rd)
+ wr += q->size;
+
+ packets = wr - rd;
+ if (!packets)
+ return NULL;
+
+ return (void *)(q->host_address + rd * q->token_size);
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_recv_get_token, INTEL_IPU6);
+
+void ipu6_recv_put_token(struct ipu6_fw_com_context *ctx, int q_nbr)
+{
+ struct ipu6_fw_sys_queue *q = &ctx->output_queue[q_nbr];
+ void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4;
+ unsigned int rd = readl(q_dmem + FW_COM_RD_REG) + 1;
+
+ if (rd >= q->size)
+ rd = 0;
+
+ writel(rd, q_dmem + FW_COM_RD_REG);
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_recv_put_token, INTEL_IPU6);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-fw-com.h b/drivers/media/pci/intel/ipu6/ipu6-fw-com.h
new file mode 100644
index 000000000000..b02285a3e43e
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-fw-com.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013--2024 Intel Corporation */
+
+#ifndef IPU6_FW_COM_H
+#define IPU6_FW_COM_H
+
+struct ipu6_fw_com_context;
+struct ipu6_bus_device;
+
+struct ipu6_fw_syscom_queue_config {
+ unsigned int queue_size; /* tokens per queue */
+ unsigned int token_size; /* bytes per token */
+};
+
+#define SYSCOM_BUTTRESS_FW_PARAMS_ISYS_OFFSET 0
+
+struct ipu6_fw_com_cfg {
+ unsigned int num_input_queues;
+ unsigned int num_output_queues;
+ struct ipu6_fw_syscom_queue_config *input;
+ struct ipu6_fw_syscom_queue_config *output;
+
+ unsigned int dmem_addr;
+
+ /* firmware-specific configuration data */
+ void *specific_addr;
+ unsigned int specific_size;
+ int (*cell_ready)(struct ipu6_bus_device *adev);
+ void (*cell_start)(struct ipu6_bus_device *adev);
+
+ unsigned int buttress_boot_offset;
+};
+
+void *ipu6_fw_com_prepare(struct ipu6_fw_com_cfg *cfg,
+ struct ipu6_bus_device *adev, void __iomem *base);
+
+int ipu6_fw_com_open(struct ipu6_fw_com_context *ctx);
+bool ipu6_fw_com_ready(struct ipu6_fw_com_context *ctx);
+int ipu6_fw_com_close(struct ipu6_fw_com_context *ctx);
+int ipu6_fw_com_release(struct ipu6_fw_com_context *ctx, unsigned int force);
+
+void *ipu6_recv_get_token(struct ipu6_fw_com_context *ctx, int q_nbr);
+void ipu6_recv_put_token(struct ipu6_fw_com_context *ctx, int q_nbr);
+void *ipu6_send_get_token(struct ipu6_fw_com_context *ctx, int q_nbr);
+void ipu6_send_put_token(struct ipu6_fw_com_context *ctx, int q_nbr);
+
+#endif
diff --git a/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c b/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c
new file mode 100644
index 000000000000..62ed92ff1d30
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013--2024 Intel Corporation
+ */
+
+#include <linux/cacheflush.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "ipu6-bus.h"
+#include "ipu6-fw-com.h"
+#include "ipu6-isys.h"
+#include "ipu6-platform-isys-csi2-reg.h"
+#include "ipu6-platform-regs.h"
+
+static const char send_msg_types[N_IPU6_FW_ISYS_SEND_TYPE][32] = {
+ "STREAM_OPEN",
+ "STREAM_START",
+ "STREAM_START_AND_CAPTURE",
+ "STREAM_CAPTURE",
+ "STREAM_STOP",
+ "STREAM_FLUSH",
+ "STREAM_CLOSE"
+};
+
+static int handle_proxy_response(struct ipu6_isys *isys, unsigned int req_id)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ struct ipu6_fw_isys_proxy_resp_info_abi *resp;
+ int ret;
+
+ resp = ipu6_recv_get_token(isys->fwcom, IPU6_BASE_PROXY_RECV_QUEUES);
+ if (!resp)
+ return 1;
+
+ dev_dbg(dev, "Proxy response: id %u, error %u, details %u\n",
+ resp->request_id, resp->error_info.error,
+ resp->error_info.error_details);
+
+ ret = req_id == resp->request_id ? 0 : -EIO;
+
+ ipu6_recv_put_token(isys->fwcom, IPU6_BASE_PROXY_RECV_QUEUES);
+
+ return ret;
+}
+
+int ipu6_fw_isys_send_proxy_token(struct ipu6_isys *isys,
+ unsigned int req_id,
+ unsigned int index,
+ unsigned int offset, u32 value)
+{
+ struct ipu6_fw_com_context *ctx = isys->fwcom;
+ struct device *dev = &isys->adev->auxdev.dev;
+ struct ipu6_fw_proxy_send_queue_token *token;
+ unsigned int timeout = 1000;
+ int ret;
+
+ dev_dbg(dev,
+ "proxy send: req_id 0x%x, index %d, offset 0x%x, value 0x%x\n",
+ req_id, index, offset, value);
+
+ token = ipu6_send_get_token(ctx, IPU6_BASE_PROXY_SEND_QUEUES);
+ if (!token)
+ return -EBUSY;
+
+ token->request_id = req_id;
+ token->region_index = index;
+ token->offset = offset;
+ token->value = value;
+ ipu6_send_put_token(ctx, IPU6_BASE_PROXY_SEND_QUEUES);
+
+ do {
+ usleep_range(100, 110);
+ ret = handle_proxy_response(isys, req_id);
+ if (!ret)
+ break;
+ if (ret == -EIO) {
+ dev_err(dev, "Proxy respond with unexpected id\n");
+ break;
+ }
+ timeout--;
+ } while (ret && timeout);
+
+ if (!timeout)
+ dev_err(dev, "Proxy response timed out\n");
+
+ return ret;
+}
+
+int ipu6_fw_isys_complex_cmd(struct ipu6_isys *isys,
+ const unsigned int stream_handle,
+ void *cpu_mapped_buf,
+ dma_addr_t dma_mapped_buf,
+ size_t size, u16 send_type)
+{
+ struct ipu6_fw_com_context *ctx = isys->fwcom;
+ struct device *dev = &isys->adev->auxdev.dev;
+ struct ipu6_fw_send_queue_token *token;
+
+ if (send_type >= N_IPU6_FW_ISYS_SEND_TYPE)
+ return -EINVAL;
+
+ dev_dbg(dev, "send_token: %s\n", send_msg_types[send_type]);
+
+ /*
+ * Time to flush cache in case we have some payload. Not all messages
+ * have that
+ */
+ if (cpu_mapped_buf)
+ clflush_cache_range(cpu_mapped_buf, size);
+
+ token = ipu6_send_get_token(ctx,
+ stream_handle + IPU6_BASE_MSG_SEND_QUEUES);
+ if (!token)
+ return -EBUSY;
+
+ token->payload = dma_mapped_buf;
+ token->buf_handle = (unsigned long)cpu_mapped_buf;
+ token->send_type = send_type;
+
+ ipu6_send_put_token(ctx, stream_handle + IPU6_BASE_MSG_SEND_QUEUES);
+
+ return 0;
+}
+
+int ipu6_fw_isys_simple_cmd(struct ipu6_isys *isys,
+ const unsigned int stream_handle, u16 send_type)
+{
+ return ipu6_fw_isys_complex_cmd(isys, stream_handle, NULL, 0, 0,
+ send_type);
+}
+
+int ipu6_fw_isys_close(struct ipu6_isys *isys)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ int retry = IPU6_ISYS_CLOSE_RETRY;
+ unsigned long flags;
+ void *fwcom;
+ int ret;
+
+ /*
+ * Stop the isys fw. Actual close takes
+ * some time as the FW must stop its actions including code fetch
+ * to SP icache.
+ * spinlock to wait the interrupt handler to be finished
+ */
+ spin_lock_irqsave(&isys->power_lock, flags);
+ ret = ipu6_fw_com_close(isys->fwcom);
+ fwcom = isys->fwcom;
+ isys->fwcom = NULL;
+ spin_unlock_irqrestore(&isys->power_lock, flags);
+ if (ret)
+ dev_err(dev, "Device close failure: %d\n", ret);
+
+ /* release probably fails if the close failed. Let's try still */
+ do {
+ usleep_range(400, 500);
+ ret = ipu6_fw_com_release(fwcom, 0);
+ retry--;
+ } while (ret && retry);
+
+ if (ret) {
+ dev_err(dev, "Device release time out %d\n", ret);
+ spin_lock_irqsave(&isys->power_lock, flags);
+ isys->fwcom = fwcom;
+ spin_unlock_irqrestore(&isys->power_lock, flags);
+ }
+
+ return ret;
+}
+
+void ipu6_fw_isys_cleanup(struct ipu6_isys *isys)
+{
+ int ret;
+
+ ret = ipu6_fw_com_release(isys->fwcom, 1);
+ if (ret < 0)
+ dev_warn(&isys->adev->auxdev.dev,
+ "Device busy, fw_com release failed.");
+ isys->fwcom = NULL;
+}
+
+static void start_sp(struct ipu6_bus_device *adev)
+{
+ struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev);
+ void __iomem *spc_regs_base = isys->pdata->base +
+ isys->pdata->ipdata->hw_variant.spc_offset;
+ u32 val = IPU6_ISYS_SPC_STATUS_START |
+ IPU6_ISYS_SPC_STATUS_RUN |
+ IPU6_ISYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE;
+
+ val |= isys->icache_prefetch ? IPU6_ISYS_SPC_STATUS_ICACHE_PREFETCH : 0;
+
+ writel(val, spc_regs_base + IPU6_ISYS_REG_SPC_STATUS_CTRL);
+}
+
+static int query_sp(struct ipu6_bus_device *adev)
+{
+ struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev);
+ void __iomem *spc_regs_base = isys->pdata->base +
+ isys->pdata->ipdata->hw_variant.spc_offset;
+ u32 val;
+
+ val = readl(spc_regs_base + IPU6_ISYS_REG_SPC_STATUS_CTRL);
+ /* return true when READY == 1, START == 0 */
+ val &= IPU6_ISYS_SPC_STATUS_READY | IPU6_ISYS_SPC_STATUS_START;
+
+ return val == IPU6_ISYS_SPC_STATUS_READY;
+}
+
+static int ipu6_isys_fwcom_cfg_init(struct ipu6_isys *isys,
+ struct ipu6_fw_com_cfg *fwcom,
+ unsigned int num_streams)
+{
+ unsigned int max_send_queues, max_sram_blocks, max_devq_size;
+ struct ipu6_fw_syscom_queue_config *input_queue_cfg;
+ struct ipu6_fw_syscom_queue_config *output_queue_cfg;
+ struct device *dev = &isys->adev->auxdev.dev;
+ int type_proxy = IPU6_FW_ISYS_QUEUE_TYPE_PROXY;
+ int type_dev = IPU6_FW_ISYS_QUEUE_TYPE_DEV;
+ int type_msg = IPU6_FW_ISYS_QUEUE_TYPE_MSG;
+ int base_dev_send = IPU6_BASE_DEV_SEND_QUEUES;
+ int base_msg_send = IPU6_BASE_MSG_SEND_QUEUES;
+ int base_msg_recv = IPU6_BASE_MSG_RECV_QUEUES;
+ struct ipu6_fw_isys_fw_config *isys_fw_cfg;
+ u32 num_in_message_queues;
+ unsigned int max_streams;
+ unsigned int size;
+ unsigned int i;
+
+ max_streams = isys->pdata->ipdata->max_streams;
+ max_send_queues = isys->pdata->ipdata->max_send_queues;
+ max_sram_blocks = isys->pdata->ipdata->max_sram_blocks;
+ max_devq_size = isys->pdata->ipdata->max_devq_size;
+ num_in_message_queues = clamp(num_streams, 1U, max_streams);
+ isys_fw_cfg = devm_kzalloc(dev, sizeof(*isys_fw_cfg), GFP_KERNEL);
+ if (!isys_fw_cfg)
+ return -ENOMEM;
+
+ isys_fw_cfg->num_send_queues[type_proxy] = IPU6_N_MAX_PROXY_SEND_QUEUES;
+ isys_fw_cfg->num_send_queues[type_dev] = IPU6_N_MAX_DEV_SEND_QUEUES;
+ isys_fw_cfg->num_send_queues[type_msg] = num_in_message_queues;
+ isys_fw_cfg->num_recv_queues[type_proxy] = IPU6_N_MAX_PROXY_RECV_QUEUES;
+ /* Common msg/dev return queue */
+ isys_fw_cfg->num_recv_queues[type_dev] = 0;
+ isys_fw_cfg->num_recv_queues[type_msg] = 1;
+
+ size = sizeof(*input_queue_cfg) * max_send_queues;
+ input_queue_cfg = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (!input_queue_cfg)
+ return -ENOMEM;
+
+ size = sizeof(*output_queue_cfg) * IPU6_N_MAX_RECV_QUEUES;
+ output_queue_cfg = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (!output_queue_cfg)
+ return -ENOMEM;
+
+ fwcom->input = input_queue_cfg;
+ fwcom->output = output_queue_cfg;
+
+ fwcom->num_input_queues = isys_fw_cfg->num_send_queues[type_proxy] +
+ isys_fw_cfg->num_send_queues[type_dev] +
+ isys_fw_cfg->num_send_queues[type_msg];
+
+ fwcom->num_output_queues = isys_fw_cfg->num_recv_queues[type_proxy] +
+ isys_fw_cfg->num_recv_queues[type_dev] +
+ isys_fw_cfg->num_recv_queues[type_msg];
+
+ /* SRAM partitioning. Equal partitioning is set. */
+ for (i = 0; i < max_sram_blocks; i++) {
+ if (i < num_in_message_queues)
+ isys_fw_cfg->buffer_partition.num_gda_pages[i] =
+ (IPU6_DEVICE_GDA_NR_PAGES *
+ IPU6_DEVICE_GDA_VIRT_FACTOR) /
+ num_in_message_queues;
+ else
+ isys_fw_cfg->buffer_partition.num_gda_pages[i] = 0;
+ }
+
+ /* FW assumes proxy interface at fwcom queue 0 */
+ for (i = 0; i < isys_fw_cfg->num_send_queues[type_proxy]; i++) {
+ input_queue_cfg[i].token_size =
+ sizeof(struct ipu6_fw_proxy_send_queue_token);
+ input_queue_cfg[i].queue_size = IPU6_ISYS_SIZE_PROXY_SEND_QUEUE;
+ }
+
+ for (i = 0; i < isys_fw_cfg->num_send_queues[type_dev]; i++) {
+ input_queue_cfg[base_dev_send + i].token_size =
+ sizeof(struct ipu6_fw_send_queue_token);
+ input_queue_cfg[base_dev_send + i].queue_size = max_devq_size;
+ }
+
+ for (i = 0; i < isys_fw_cfg->num_send_queues[type_msg]; i++) {
+ input_queue_cfg[base_msg_send + i].token_size =
+ sizeof(struct ipu6_fw_send_queue_token);
+ input_queue_cfg[base_msg_send + i].queue_size =
+ IPU6_ISYS_SIZE_SEND_QUEUE;
+ }
+
+ for (i = 0; i < isys_fw_cfg->num_recv_queues[type_proxy]; i++) {
+ output_queue_cfg[i].token_size =
+ sizeof(struct ipu6_fw_proxy_resp_queue_token);
+ output_queue_cfg[i].queue_size =
+ IPU6_ISYS_SIZE_PROXY_RECV_QUEUE;
+ }
+ /* There is no recv DEV queue */
+ for (i = 0; i < isys_fw_cfg->num_recv_queues[type_msg]; i++) {
+ output_queue_cfg[base_msg_recv + i].token_size =
+ sizeof(struct ipu6_fw_resp_queue_token);
+ output_queue_cfg[base_msg_recv + i].queue_size =
+ IPU6_ISYS_SIZE_RECV_QUEUE;
+ }
+
+ fwcom->dmem_addr = isys->pdata->ipdata->hw_variant.dmem_offset;
+ fwcom->specific_addr = isys_fw_cfg;
+ fwcom->specific_size = sizeof(*isys_fw_cfg);
+
+ return 0;
+}
+
+int ipu6_fw_isys_init(struct ipu6_isys *isys, unsigned int num_streams)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ int retry = IPU6_ISYS_OPEN_RETRY;
+ struct ipu6_fw_com_cfg fwcom = {
+ .cell_start = start_sp,
+ .cell_ready = query_sp,
+ .buttress_boot_offset = SYSCOM_BUTTRESS_FW_PARAMS_ISYS_OFFSET,
+ };
+ int ret;
+
+ ipu6_isys_fwcom_cfg_init(isys, &fwcom, num_streams);
+
+ isys->fwcom = ipu6_fw_com_prepare(&fwcom, isys->adev,
+ isys->pdata->base);
+ if (!isys->fwcom) {
+ dev_err(dev, "isys fw com prepare failed\n");
+ return -EIO;
+ }
+
+ ret = ipu6_fw_com_open(isys->fwcom);
+ if (ret) {
+ dev_err(dev, "isys fw com open failed %d\n", ret);
+ return ret;
+ }
+
+ do {
+ usleep_range(400, 500);
+ if (ipu6_fw_com_ready(isys->fwcom))
+ break;
+ retry--;
+ } while (retry > 0);
+
+ if (!retry) {
+ dev_err(dev, "isys port open ready failed %d\n", ret);
+ ipu6_fw_isys_close(isys);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+struct ipu6_fw_isys_resp_info_abi *
+ipu6_fw_isys_get_resp(void *context, unsigned int queue)
+{
+ return ipu6_recv_get_token(context, queue);
+}
+
+void ipu6_fw_isys_put_resp(void *context, unsigned int queue)
+{
+ ipu6_recv_put_token(context, queue);
+}
+
+void ipu6_fw_isys_dump_stream_cfg(struct device *dev,
+ struct ipu6_fw_isys_stream_cfg_data_abi *cfg)
+{
+ unsigned int i;
+
+ dev_dbg(dev, "-----------------------------------------------------\n");
+ dev_dbg(dev, "IPU6_FW_ISYS_STREAM_CFG_DATA\n");
+
+ dev_dbg(dev, "compfmt = %d\n", cfg->vc);
+ dev_dbg(dev, "src = %d\n", cfg->src);
+ dev_dbg(dev, "vc = %d\n", cfg->vc);
+ dev_dbg(dev, "isl_use = %d\n", cfg->isl_use);
+ dev_dbg(dev, "sensor_type = %d\n", cfg->sensor_type);
+
+ dev_dbg(dev, "send_irq_sof_discarded = %d\n",
+ cfg->send_irq_sof_discarded);
+ dev_dbg(dev, "send_irq_eof_discarded = %d\n",
+ cfg->send_irq_eof_discarded);
+ dev_dbg(dev, "send_resp_sof_discarded = %d\n",
+ cfg->send_resp_sof_discarded);
+ dev_dbg(dev, "send_resp_eof_discarded = %d\n",
+ cfg->send_resp_eof_discarded);
+
+ dev_dbg(dev, "crop:\n");
+ dev_dbg(dev, "\t.left_top = [%d, %d]\n", cfg->crop.left_offset,
+ cfg->crop.top_offset);
+ dev_dbg(dev, "\t.right_bottom = [%d, %d]\n", cfg->crop.right_offset,
+ cfg->crop.bottom_offset);
+
+ dev_dbg(dev, "nof_input_pins = %d\n", cfg->nof_input_pins);
+ for (i = 0; i < cfg->nof_input_pins; i++) {
+ dev_dbg(dev, "input pin[%d]:\n", i);
+ dev_dbg(dev, "\t.dt = 0x%0x\n", cfg->input_pins[i].dt);
+ dev_dbg(dev, "\t.mipi_store_mode = %d\n",
+ cfg->input_pins[i].mipi_store_mode);
+ dev_dbg(dev, "\t.bits_per_pix = %d\n",
+ cfg->input_pins[i].bits_per_pix);
+ dev_dbg(dev, "\t.mapped_dt = 0x%0x\n",
+ cfg->input_pins[i].mapped_dt);
+ dev_dbg(dev, "\t.input_res = %dx%d\n",
+ cfg->input_pins[i].input_res.width,
+ cfg->input_pins[i].input_res.height);
+ dev_dbg(dev, "\t.mipi_decompression = %d\n",
+ cfg->input_pins[i].mipi_decompression);
+ dev_dbg(dev, "\t.capture_mode = %d\n",
+ cfg->input_pins[i].capture_mode);
+ }
+
+ dev_dbg(dev, "nof_output_pins = %d\n", cfg->nof_output_pins);
+ for (i = 0; i < cfg->nof_output_pins; i++) {
+ dev_dbg(dev, "output_pin[%d]:\n", i);
+ dev_dbg(dev, "\t.input_pin_id = %d\n",
+ cfg->output_pins[i].input_pin_id);
+ dev_dbg(dev, "\t.output_res = %dx%d\n",
+ cfg->output_pins[i].output_res.width,
+ cfg->output_pins[i].output_res.height);
+ dev_dbg(dev, "\t.stride = %d\n", cfg->output_pins[i].stride);
+ dev_dbg(dev, "\t.pt = %d\n", cfg->output_pins[i].pt);
+ dev_dbg(dev, "\t.payload_buf_size = %d\n",
+ cfg->output_pins[i].payload_buf_size);
+ dev_dbg(dev, "\t.ft = %d\n", cfg->output_pins[i].ft);
+ dev_dbg(dev, "\t.watermark_in_lines = %d\n",
+ cfg->output_pins[i].watermark_in_lines);
+ dev_dbg(dev, "\t.send_irq = %d\n",
+ cfg->output_pins[i].send_irq);
+ dev_dbg(dev, "\t.reserve_compression = %d\n",
+ cfg->output_pins[i].reserve_compression);
+ dev_dbg(dev, "\t.snoopable = %d\n",
+ cfg->output_pins[i].snoopable);
+ dev_dbg(dev, "\t.error_handling_enable = %d\n",
+ cfg->output_pins[i].error_handling_enable);
+ dev_dbg(dev, "\t.sensor_type = %d\n",
+ cfg->output_pins[i].sensor_type);
+ }
+ dev_dbg(dev, "-----------------------------------------------------\n");
+}
+
+void
+ipu6_fw_isys_dump_frame_buff_set(struct device *dev,
+ struct ipu6_fw_isys_frame_buff_set_abi *buf,
+ unsigned int outputs)
+{
+ unsigned int i;
+
+ dev_dbg(dev, "-----------------------------------------------------\n");
+ dev_dbg(dev, "IPU6_FW_ISYS_FRAME_BUFF_SET\n");
+
+ for (i = 0; i < outputs; i++) {
+ dev_dbg(dev, "output_pin[%d]:\n", i);
+ dev_dbg(dev, "\t.out_buf_id = %llu\n",
+ buf->output_pins[i].out_buf_id);
+ dev_dbg(dev, "\t.addr = 0x%x\n", buf->output_pins[i].addr);
+ dev_dbg(dev, "\t.compress = %d\n",
+ buf->output_pins[i].compress);
+ }
+
+ dev_dbg(dev, "send_irq_sof = 0x%x\n", buf->send_irq_sof);
+ dev_dbg(dev, "send_irq_eof = 0x%x\n", buf->send_irq_eof);
+ dev_dbg(dev, "send_resp_sof = 0x%x\n", buf->send_resp_sof);
+ dev_dbg(dev, "send_resp_eof = 0x%x\n", buf->send_resp_eof);
+ dev_dbg(dev, "send_irq_capture_ack = 0x%x\n",
+ buf->send_irq_capture_ack);
+ dev_dbg(dev, "send_irq_capture_done = 0x%x\n",
+ buf->send_irq_capture_done);
+ dev_dbg(dev, "send_resp_capture_ack = 0x%x\n",
+ buf->send_resp_capture_ack);
+ dev_dbg(dev, "send_resp_capture_done = 0x%x\n",
+ buf->send_resp_capture_done);
+
+ dev_dbg(dev, "-----------------------------------------------------\n");
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h b/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h
new file mode 100644
index 000000000000..b60f02076d8a
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h
@@ -0,0 +1,596 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013--2024 Intel Corporation */
+
+#ifndef IPU6_FW_ISYS_H
+#define IPU6_FW_ISYS_H
+
+#include <linux/types.h>
+
+struct device;
+struct ipu6_isys;
+
+/* Max number of Input/Output Pins */
+#define IPU6_MAX_IPINS 4
+
+#define IPU6_MAX_OPINS ((IPU6_MAX_IPINS) + 1)
+
+#define IPU6_STREAM_ID_MAX 16
+#define IPU6_NONSECURE_STREAM_ID_MAX 12
+#define IPU6_DEV_SEND_QUEUE_SIZE (IPU6_STREAM_ID_MAX)
+#define IPU6_NOF_SRAM_BLOCKS_MAX (IPU6_STREAM_ID_MAX)
+#define IPU6_N_MAX_MSG_SEND_QUEUES (IPU6_STREAM_ID_MAX)
+#define IPU6SE_STREAM_ID_MAX 8
+#define IPU6SE_NONSECURE_STREAM_ID_MAX 4
+#define IPU6SE_DEV_SEND_QUEUE_SIZE (IPU6SE_STREAM_ID_MAX)
+#define IPU6SE_NOF_SRAM_BLOCKS_MAX (IPU6SE_STREAM_ID_MAX)
+#define IPU6SE_N_MAX_MSG_SEND_QUEUES (IPU6SE_STREAM_ID_MAX)
+
+/* Single return queue for all streams/commands type */
+#define IPU6_N_MAX_MSG_RECV_QUEUES 1
+/* Single device queue for high priority commands (bypass in-order queue) */
+#define IPU6_N_MAX_DEV_SEND_QUEUES 1
+/* Single dedicated send queue for proxy interface */
+#define IPU6_N_MAX_PROXY_SEND_QUEUES 1
+/* Single dedicated recv queue for proxy interface */
+#define IPU6_N_MAX_PROXY_RECV_QUEUES 1
+/* Send queues layout */
+#define IPU6_BASE_PROXY_SEND_QUEUES 0
+#define IPU6_BASE_DEV_SEND_QUEUES \
+ (IPU6_BASE_PROXY_SEND_QUEUES + IPU6_N_MAX_PROXY_SEND_QUEUES)
+#define IPU6_BASE_MSG_SEND_QUEUES \
+ (IPU6_BASE_DEV_SEND_QUEUES + IPU6_N_MAX_DEV_SEND_QUEUES)
+/* Recv queues layout */
+#define IPU6_BASE_PROXY_RECV_QUEUES 0
+#define IPU6_BASE_MSG_RECV_QUEUES \
+ (IPU6_BASE_PROXY_RECV_QUEUES + IPU6_N_MAX_PROXY_RECV_QUEUES)
+#define IPU6_N_MAX_RECV_QUEUES \
+ (IPU6_BASE_MSG_RECV_QUEUES + IPU6_N_MAX_MSG_RECV_QUEUES)
+
+#define IPU6_N_MAX_SEND_QUEUES \
+ (IPU6_BASE_MSG_SEND_QUEUES + IPU6_N_MAX_MSG_SEND_QUEUES)
+#define IPU6SE_N_MAX_SEND_QUEUES \
+ (IPU6_BASE_MSG_SEND_QUEUES + IPU6SE_N_MAX_MSG_SEND_QUEUES)
+
+/* Max number of planes for frame formats supported by the FW */
+#define IPU6_PIN_PLANES_MAX 4
+
+#define IPU6_FW_ISYS_SENSOR_TYPE_START 14
+#define IPU6_FW_ISYS_SENSOR_TYPE_END 19
+#define IPU6SE_FW_ISYS_SENSOR_TYPE_START 6
+#define IPU6SE_FW_ISYS_SENSOR_TYPE_END 11
+/*
+ * Device close takes some time from last ack message to actual stopping
+ * of the SP processor. As long as the SP processor runs we can't proceed with
+ * clean up of resources.
+ */
+#define IPU6_ISYS_OPEN_RETRY 2000
+#define IPU6_ISYS_CLOSE_RETRY 2000
+#define IPU6_FW_CALL_TIMEOUT_JIFFIES msecs_to_jiffies(2000)
+
+enum ipu6_fw_isys_resp_type {
+ IPU6_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE = 0,
+ IPU6_FW_ISYS_RESP_TYPE_STREAM_START_ACK,
+ IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK,
+ IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK,
+ IPU6_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK,
+ IPU6_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK,
+ IPU6_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK,
+ IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_READY,
+ IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_WATERMARK,
+ IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF,
+ IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF,
+ IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE,
+ IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE,
+ IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_SKIPPED,
+ IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_SKIPPED,
+ IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF_DISCARDED,
+ IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF_DISCARDED,
+ IPU6_FW_ISYS_RESP_TYPE_STATS_DATA_READY,
+ N_IPU6_FW_ISYS_RESP_TYPE
+};
+
+enum ipu6_fw_isys_send_type {
+ IPU6_FW_ISYS_SEND_TYPE_STREAM_OPEN = 0,
+ IPU6_FW_ISYS_SEND_TYPE_STREAM_START,
+ IPU6_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE,
+ IPU6_FW_ISYS_SEND_TYPE_STREAM_CAPTURE,
+ IPU6_FW_ISYS_SEND_TYPE_STREAM_STOP,
+ IPU6_FW_ISYS_SEND_TYPE_STREAM_FLUSH,
+ IPU6_FW_ISYS_SEND_TYPE_STREAM_CLOSE,
+ N_IPU6_FW_ISYS_SEND_TYPE
+};
+
+enum ipu6_fw_isys_queue_type {
+ IPU6_FW_ISYS_QUEUE_TYPE_PROXY = 0,
+ IPU6_FW_ISYS_QUEUE_TYPE_DEV,
+ IPU6_FW_ISYS_QUEUE_TYPE_MSG,
+ N_IPU6_FW_ISYS_QUEUE_TYPE
+};
+
+enum ipu6_fw_isys_stream_source {
+ IPU6_FW_ISYS_STREAM_SRC_PORT_0 = 0,
+ IPU6_FW_ISYS_STREAM_SRC_PORT_1,
+ IPU6_FW_ISYS_STREAM_SRC_PORT_2,
+ IPU6_FW_ISYS_STREAM_SRC_PORT_3,
+ IPU6_FW_ISYS_STREAM_SRC_PORT_4,
+ IPU6_FW_ISYS_STREAM_SRC_PORT_5,
+ IPU6_FW_ISYS_STREAM_SRC_PORT_6,
+ IPU6_FW_ISYS_STREAM_SRC_PORT_7,
+ IPU6_FW_ISYS_STREAM_SRC_PORT_8,
+ IPU6_FW_ISYS_STREAM_SRC_PORT_9,
+ IPU6_FW_ISYS_STREAM_SRC_PORT_10,
+ IPU6_FW_ISYS_STREAM_SRC_PORT_11,
+ IPU6_FW_ISYS_STREAM_SRC_PORT_12,
+ IPU6_FW_ISYS_STREAM_SRC_PORT_13,
+ IPU6_FW_ISYS_STREAM_SRC_PORT_14,
+ IPU6_FW_ISYS_STREAM_SRC_PORT_15,
+ IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_0,
+ IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_1,
+ IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_2,
+ IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_3,
+ IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_4,
+ IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_5,
+ IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_6,
+ IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_7,
+ IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_8,
+ IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_9,
+ N_IPU6_FW_ISYS_STREAM_SRC
+};
+
+#define IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT0 IPU6_FW_ISYS_STREAM_SRC_PORT_0
+#define IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT1 IPU6_FW_ISYS_STREAM_SRC_PORT_1
+#define IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT2 IPU6_FW_ISYS_STREAM_SRC_PORT_2
+#define IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT3 IPU6_FW_ISYS_STREAM_SRC_PORT_3
+
+#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_PORTA IPU6_FW_ISYS_STREAM_SRC_PORT_4
+#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_PORTB IPU6_FW_ISYS_STREAM_SRC_PORT_5
+#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT0 \
+ IPU6_FW_ISYS_STREAM_SRC_PORT_6
+#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT1 \
+ IPU6_FW_ISYS_STREAM_SRC_PORT_7
+#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT2 \
+ IPU6_FW_ISYS_STREAM_SRC_PORT_8
+#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT3 \
+ IPU6_FW_ISYS_STREAM_SRC_PORT_9
+
+#define IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_PORT0 IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_0
+#define IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_PORT1 IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_1
+
+/*
+ * enum ipu6_fw_isys_mipi_vc: MIPI csi2 spec
+ * supports up to 4 virtual per physical channel
+ */
+enum ipu6_fw_isys_mipi_vc {
+ IPU6_FW_ISYS_MIPI_VC_0 = 0,
+ IPU6_FW_ISYS_MIPI_VC_1,
+ IPU6_FW_ISYS_MIPI_VC_2,
+ IPU6_FW_ISYS_MIPI_VC_3,
+ N_IPU6_FW_ISYS_MIPI_VC
+};
+
+enum ipu6_fw_isys_frame_format_type {
+ IPU6_FW_ISYS_FRAME_FORMAT_NV11 = 0, /* 12 bit YUV 411, Y, UV plane */
+ IPU6_FW_ISYS_FRAME_FORMAT_NV12, /* 12 bit YUV 420, Y, UV plane */
+ IPU6_FW_ISYS_FRAME_FORMAT_NV12_16, /* 16 bit YUV 420, Y, UV plane */
+ /* 12 bit YUV 420, Intel proprietary tiled format */
+ IPU6_FW_ISYS_FRAME_FORMAT_NV12_TILEY,
+
+ IPU6_FW_ISYS_FRAME_FORMAT_NV16, /* 16 bit YUV 422, Y, UV plane */
+ IPU6_FW_ISYS_FRAME_FORMAT_NV21, /* 12 bit YUV 420, Y, VU plane */
+ IPU6_FW_ISYS_FRAME_FORMAT_NV61, /* 16 bit YUV 422, Y, VU plane */
+ IPU6_FW_ISYS_FRAME_FORMAT_YV12, /* 12 bit YUV 420, Y, V, U plane */
+ IPU6_FW_ISYS_FRAME_FORMAT_YV16, /* 16 bit YUV 422, Y, V, U plane */
+ IPU6_FW_ISYS_FRAME_FORMAT_YUV420, /* 12 bit YUV 420, Y, U, V plane */
+ IPU6_FW_ISYS_FRAME_FORMAT_YUV420_10, /* yuv420, 10 bits per subpixel */
+ IPU6_FW_ISYS_FRAME_FORMAT_YUV420_12, /* yuv420, 12 bits per subpixel */
+ IPU6_FW_ISYS_FRAME_FORMAT_YUV420_14, /* yuv420, 14 bits per subpixel */
+ IPU6_FW_ISYS_FRAME_FORMAT_YUV420_16, /* yuv420, 16 bits per subpixel */
+ IPU6_FW_ISYS_FRAME_FORMAT_YUV422, /* 16 bit YUV 422, Y, U, V plane */
+ IPU6_FW_ISYS_FRAME_FORMAT_YUV422_16, /* yuv422, 16 bits per subpixel */
+ IPU6_FW_ISYS_FRAME_FORMAT_UYVY, /* 16 bit YUV 422, UYVY interleaved */
+ IPU6_FW_ISYS_FRAME_FORMAT_YUYV, /* 16 bit YUV 422, YUYV interleaved */
+ IPU6_FW_ISYS_FRAME_FORMAT_YUV444, /* 24 bit YUV 444, Y, U, V plane */
+ /* Internal format, 2 y lines followed by a uvinterleaved line */
+ IPU6_FW_ISYS_FRAME_FORMAT_YUV_LINE,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW8, /* RAW8, 1 plane */
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW10, /* RAW10, 1 plane */
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW12, /* RAW12, 1 plane */
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW14, /* RAW14, 1 plane */
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW16, /* RAW16, 1 plane */
+ /**
+ * 16 bit RGB, 1 plane. Each 3 sub pixels are packed into one 16 bit
+ * value, 5 bits for R, 6 bits for G and 5 bits for B.
+ */
+ IPU6_FW_ISYS_FRAME_FORMAT_RGB565,
+ IPU6_FW_ISYS_FRAME_FORMAT_PLANAR_RGB888, /* 24 bit RGB, 3 planes */
+ IPU6_FW_ISYS_FRAME_FORMAT_RGBA888, /* 32 bit RGBA, 1 plane, A=Alpha */
+ IPU6_FW_ISYS_FRAME_FORMAT_QPLANE6, /* Internal, for advanced ISP */
+ IPU6_FW_ISYS_FRAME_FORMAT_BINARY_8, /* byte stream, used for jpeg. */
+ N_IPU6_FW_ISYS_FRAME_FORMAT
+};
+
+enum ipu6_fw_isys_pin_type {
+ /* captured as MIPI packets */
+ IPU6_FW_ISYS_PIN_TYPE_MIPI = 0,
+ /* captured through the SoC path */
+ IPU6_FW_ISYS_PIN_TYPE_RAW_SOC = 3,
+};
+
+/*
+ * enum ipu6_fw_isys_mipi_store_mode. Describes if long MIPI packets reach
+ * MIPI SRAM with the long packet header or
+ * if not, then only option is to capture it with pin type MIPI.
+ */
+enum ipu6_fw_isys_mipi_store_mode {
+ IPU6_FW_ISYS_MIPI_STORE_MODE_NORMAL = 0,
+ IPU6_FW_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER,
+ N_IPU6_FW_ISYS_MIPI_STORE_MODE
+};
+
+enum ipu6_fw_isys_capture_mode {
+ IPU6_FW_ISYS_CAPTURE_MODE_REGULAR = 0,
+ IPU6_FW_ISYS_CAPTURE_MODE_BURST,
+ N_IPU6_FW_ISYS_CAPTURE_MODE,
+};
+
+enum ipu6_fw_isys_sensor_mode {
+ IPU6_FW_ISYS_SENSOR_MODE_NORMAL = 0,
+ IPU6_FW_ISYS_SENSOR_MODE_TOBII,
+ N_IPU6_FW_ISYS_SENSOR_MODE,
+};
+
+enum ipu6_fw_isys_error {
+ IPU6_FW_ISYS_ERROR_NONE = 0,
+ IPU6_FW_ISYS_ERROR_FW_INTERNAL_CONSISTENCY,
+ IPU6_FW_ISYS_ERROR_HW_CONSISTENCY,
+ IPU6_FW_ISYS_ERROR_DRIVER_INVALID_COMMAND_SEQUENCE,
+ IPU6_FW_ISYS_ERROR_DRIVER_INVALID_DEVICE_CONFIGURATION,
+ IPU6_FW_ISYS_ERROR_DRIVER_INVALID_STREAM_CONFIGURATION,
+ IPU6_FW_ISYS_ERROR_DRIVER_INVALID_FRAME_CONFIGURATION,
+ IPU6_FW_ISYS_ERROR_INSUFFICIENT_RESOURCES,
+ IPU6_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO,
+ IPU6_FW_ISYS_ERROR_HW_REPORTED_SIG2CIO,
+ IPU6_FW_ISYS_ERROR_SENSOR_FW_SYNC,
+ IPU6_FW_ISYS_ERROR_STREAM_IN_SUSPENSION,
+ IPU6_FW_ISYS_ERROR_RESPONSE_QUEUE_FULL,
+ N_IPU6_FW_ISYS_ERROR
+};
+
+enum ipu6_fw_proxy_error {
+ IPU6_FW_PROXY_ERROR_NONE = 0,
+ IPU6_FW_PROXY_ERROR_INVALID_WRITE_REGION,
+ IPU6_FW_PROXY_ERROR_INVALID_WRITE_OFFSET,
+ N_IPU6_FW_PROXY_ERROR
+};
+
+/* firmware ABI structure below are aligned in firmware, no need pack */
+struct ipu6_fw_isys_buffer_partition_abi {
+ u32 num_gda_pages[IPU6_STREAM_ID_MAX];
+};
+
+struct ipu6_fw_isys_fw_config {
+ struct ipu6_fw_isys_buffer_partition_abi buffer_partition;
+ u32 num_send_queues[N_IPU6_FW_ISYS_QUEUE_TYPE];
+ u32 num_recv_queues[N_IPU6_FW_ISYS_QUEUE_TYPE];
+};
+
+/*
+ * struct ipu6_fw_isys_resolution_abi: Generic resolution structure.
+ */
+struct ipu6_fw_isys_resolution_abi {
+ u32 width;
+ u32 height;
+};
+
+/**
+ * struct ipu6_fw_isys_output_pin_payload_abi - ISYS output pin buffer
+ * @out_buf_id: Points to output pin buffer - buffer identifier
+ * @addr: Points to output pin buffer - CSS Virtual Address
+ * @compress: Request frame compression (1), or not (0)
+ */
+struct ipu6_fw_isys_output_pin_payload_abi {
+ u64 out_buf_id;
+ u32 addr;
+ u32 compress;
+};
+
+/**
+ * struct ipu6_fw_isys_output_pin_info_abi - ISYS output pin info
+ * @output_res: output pin resolution
+ * @stride: output stride in Bytes (not valid for statistics)
+ * @watermark_in_lines: pin watermark level in lines
+ * @payload_buf_size: minimum size in Bytes of all buffers that will be
+ * supplied for capture on this pin
+ * @ts_offsets: ts_offsets
+ * @s2m_pixel_soc_pixel_remapping: pixel soc remapping (see the definition of
+ * S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING)
+ * @csi_be_soc_pixel_remapping: see s2m_pixel_soc_pixel_remapping
+ * @send_irq: assert if pin event should trigger irq
+ * @input_pin_id: related input pin id
+ * @pt: pin type -real format "enum ipu6_fw_isys_pin_type"
+ * @ft: frame format type -real format "enum ipu6_fw_isys_frame_format_type"
+ * @reserved: a reserved field
+ * @reserve_compression: reserve compression resources for pin
+ * @snoopable: snoopable
+ * @error_handling_enable: enable error handling
+ * @sensor_type: sensor_type
+ */
+struct ipu6_fw_isys_output_pin_info_abi {
+ struct ipu6_fw_isys_resolution_abi output_res;
+ u32 stride;
+ u32 watermark_in_lines;
+ u32 payload_buf_size;
+ u32 ts_offsets[IPU6_PIN_PLANES_MAX];
+ u32 s2m_pixel_soc_pixel_remapping;
+ u32 csi_be_soc_pixel_remapping;
+ u8 send_irq;
+ u8 input_pin_id;
+ u8 pt;
+ u8 ft;
+ u8 reserved;
+ u8 reserve_compression;
+ u8 snoopable;
+ u8 error_handling_enable;
+ u32 sensor_type;
+};
+
+/**
+ * struct ipu6_fw_isys_input_pin_info_abi - ISYS input pin info
+ * @input_res: input resolution
+ * @dt: mipi data type ((enum ipu6_fw_isys_mipi_data_type)
+ * @mipi_store_mode: defines if legacy long packet header will be stored or
+ * discarded if discarded, output pin type for this
+ * input pin can only be MIPI
+ * (enum ipu6_fw_isys_mipi_store_mode)
+ * @bits_per_pix: native bits per pixel
+ * @mapped_dt: actual data type from sensor
+ * @mipi_decompression: defines which compression will be in mipi backend
+ * @crop_first_and_last_lines: Control whether to crop the first and last line
+ * of the input image. Crop done by HW device.
+ * @capture_mode: mode of capture, regular or burst, default value is regular
+ * @reserved: a reserved field
+ */
+struct ipu6_fw_isys_input_pin_info_abi {
+ struct ipu6_fw_isys_resolution_abi input_res;
+ u8 dt;
+ u8 mipi_store_mode;
+ u8 bits_per_pix;
+ u8 mapped_dt;
+ u8 mipi_decompression;
+ u8 crop_first_and_last_lines;
+ u8 capture_mode;
+ u8 reserved;
+};
+
+/**
+ * struct ipu6_fw_isys_cropping_abi - ISYS cropping coordinates
+ * @top_offset: Top offset
+ * @left_offset: Left offset
+ * @bottom_offset: Bottom offset
+ * @right_offset: Right offset
+ */
+struct ipu6_fw_isys_cropping_abi {
+ s32 top_offset;
+ s32 left_offset;
+ s32 bottom_offset;
+ s32 right_offset;
+};
+
+/**
+ * struct ipu6_fw_isys_stream_cfg_data_abi - ISYS stream configuration data
+ * ISYS stream configuration data structure
+ * @crop: for extended use and is not used in FW currently
+ * @input_pins: input pin descriptors
+ * @output_pins: output pin descriptors
+ * @compfmt: de-compression setting for User Defined Data
+ * @nof_input_pins: number of input pins
+ * @nof_output_pins: number of output pins
+ * @send_irq_sof_discarded: send irq on discarded frame sof response
+ * - if '1' it will override the send_resp_sof_discarded
+ * and send the response
+ * - if '0' the send_resp_sof_discarded will determine
+ * whether to send the response
+ * @send_irq_eof_discarded: send irq on discarded frame eof response
+ * - if '1' it will override the send_resp_eof_discarded
+ * and send the response
+ * - if '0' the send_resp_eof_discarded will determine
+ * whether to send the response
+ * @send_resp_sof_discarded: send response for discarded frame sof detected,
+ * used only when send_irq_sof_discarded is '0'
+ * @send_resp_eof_discarded: send response for discarded frame eof detected,
+ * used only when send_irq_eof_discarded is '0'
+ * @src: Stream source index e.g. MIPI_generator_0, CSI2-rx_1
+ * @vc: MIPI Virtual Channel (up to 4 virtual per physical channel)
+ * @isl_use: indicates whether stream requires ISL and how
+ * @sensor_type: type of connected sensor, tobii or others, default is 0
+ * @reserved: a reserved field
+ * @reserved2: a reserved field
+ */
+struct ipu6_fw_isys_stream_cfg_data_abi {
+ struct ipu6_fw_isys_cropping_abi crop;
+ struct ipu6_fw_isys_input_pin_info_abi input_pins[IPU6_MAX_IPINS];
+ struct ipu6_fw_isys_output_pin_info_abi output_pins[IPU6_MAX_OPINS];
+ u32 compfmt;
+ u8 nof_input_pins;
+ u8 nof_output_pins;
+ u8 send_irq_sof_discarded;
+ u8 send_irq_eof_discarded;
+ u8 send_resp_sof_discarded;
+ u8 send_resp_eof_discarded;
+ u8 src;
+ u8 vc;
+ u8 isl_use;
+ u8 sensor_type;
+ u8 reserved;
+ u8 reserved2;
+};
+
+/**
+ * struct ipu6_fw_isys_frame_buff_set_abi - ISYS frame buffer set (request)
+ * @output_pins: output pin addresses
+ * @send_irq_sof: send irq on frame sof response
+ * - if '1' it will override the send_resp_sof and
+ * send the response
+ * - if '0' the send_resp_sof will determine whether to
+ * send the response
+ * @send_irq_eof: send irq on frame eof response
+ * - if '1' it will override the send_resp_eof and
+ * send the response
+ * - if '0' the send_resp_eof will determine whether to
+ * send the response
+ * @send_irq_capture_ack: send irq on capture ack
+ * @send_irq_capture_done: send irq on capture done
+ * @send_resp_sof: send response for frame sof detected,
+ * used only when send_irq_sof is '0'
+ * @send_resp_eof: send response for frame eof detected,
+ * used only when send_irq_eof is '0'
+ * @send_resp_capture_ack: send response for capture ack event
+ * @send_resp_capture_done: send response for capture done event
+ * @reserved: a reserved field
+ */
+struct ipu6_fw_isys_frame_buff_set_abi {
+ struct ipu6_fw_isys_output_pin_payload_abi output_pins[IPU6_MAX_OPINS];
+ u8 send_irq_sof;
+ u8 send_irq_eof;
+ u8 send_irq_capture_ack;
+ u8 send_irq_capture_done;
+ u8 send_resp_sof;
+ u8 send_resp_eof;
+ u8 send_resp_capture_ack;
+ u8 send_resp_capture_done;
+ u8 reserved[8];
+};
+
+/**
+ * struct ipu6_fw_isys_error_info_abi - ISYS error information
+ * @error: error code if something went wrong
+ * @error_details: depending on error code, it may contain additional error info
+ */
+struct ipu6_fw_isys_error_info_abi {
+ u32 error;
+ u32 error_details;
+};
+
+/**
+ * struct ipu6_fw_isys_resp_info_abi - ISYS firmware response
+ * @buf_id: buffer ID
+ * @pin: this var is only valid for pin event related responses,
+ * contains pin addresses
+ * @error_info: error information from the FW
+ * @timestamp: Time information for event if available
+ * @stream_handle: stream id the response corresponds to
+ * @type: response type (enum ipu6_fw_isys_resp_type)
+ * @pin_id: pin id that the pin payload corresponds to
+ * @reserved: a reserved field
+ * @reserved2: a reserved field
+ */
+struct ipu6_fw_isys_resp_info_abi {
+ u64 buf_id;
+ struct ipu6_fw_isys_output_pin_payload_abi pin;
+ struct ipu6_fw_isys_error_info_abi error_info;
+ u32 timestamp[2];
+ u8 stream_handle;
+ u8 type;
+ u8 pin_id;
+ u8 reserved;
+ u32 reserved2;
+};
+
+/**
+ * struct ipu6_fw_isys_proxy_error_info_abi - ISYS proxy error
+ * @error: error code if something went wrong
+ * @error_details: depending on error code, it may contain additional error info
+ */
+struct ipu6_fw_isys_proxy_error_info_abi {
+ u32 error;
+ u32 error_details;
+};
+
+struct ipu6_fw_isys_proxy_resp_info_abi {
+ u32 request_id;
+ struct ipu6_fw_isys_proxy_error_info_abi error_info;
+};
+
+/**
+ * struct ipu6_fw_proxy_write_queue_token - ISYS proxy write queue token
+ * @request_id: update id for the specific proxy write request
+ * @region_index: Region id for the proxy write request
+ * @offset: Offset of the write request according to the base address
+ * of the region
+ * @value: Value that is requested to be written with the proxy write request
+ */
+struct ipu6_fw_proxy_write_queue_token {
+ u32 request_id;
+ u32 region_index;
+ u32 offset;
+ u32 value;
+};
+
+/**
+ * struct ipu6_fw_resp_queue_token - ISYS response queue token
+ * @resp_info: response info
+ */
+struct ipu6_fw_resp_queue_token {
+ struct ipu6_fw_isys_resp_info_abi resp_info;
+};
+
+/**
+ * struct ipu6_fw_send_queue_token - ISYS send queue token
+ * @buf_handle: buffer handle
+ * @payload: payload
+ * @send_type: send_type
+ * @stream_id: stream_id
+ */
+struct ipu6_fw_send_queue_token {
+ u64 buf_handle;
+ u32 payload;
+ u16 send_type;
+ u16 stream_id;
+};
+
+/**
+ * struct ipu6_fw_proxy_resp_queue_token - ISYS proxy response queue token
+ * @proxy_resp_info: proxy response info
+ */
+struct ipu6_fw_proxy_resp_queue_token {
+ struct ipu6_fw_isys_proxy_resp_info_abi proxy_resp_info;
+};
+
+/**
+ * struct ipu6_fw_proxy_send_queue_token - SYS proxy send queue token
+ * @request_id: request_id
+ * @region_index: region_index
+ * @offset: offset
+ * @value: value
+ */
+struct ipu6_fw_proxy_send_queue_token {
+ u32 request_id;
+ u32 region_index;
+ u32 offset;
+ u32 value;
+};
+
+void
+ipu6_fw_isys_dump_stream_cfg(struct device *dev,
+ struct ipu6_fw_isys_stream_cfg_data_abi *cfg);
+void
+ipu6_fw_isys_dump_frame_buff_set(struct device *dev,
+ struct ipu6_fw_isys_frame_buff_set_abi *buf,
+ unsigned int outputs);
+int ipu6_fw_isys_init(struct ipu6_isys *isys, unsigned int num_streams);
+int ipu6_fw_isys_close(struct ipu6_isys *isys);
+int ipu6_fw_isys_simple_cmd(struct ipu6_isys *isys,
+ const unsigned int stream_handle, u16 send_type);
+int ipu6_fw_isys_complex_cmd(struct ipu6_isys *isys,
+ const unsigned int stream_handle,
+ void *cpu_mapped_buf, dma_addr_t dma_mapped_buf,
+ size_t size, u16 send_type);
+int ipu6_fw_isys_send_proxy_token(struct ipu6_isys *isys,
+ unsigned int req_id,
+ unsigned int index,
+ unsigned int offset, u32 value);
+void ipu6_fw_isys_cleanup(struct ipu6_isys *isys);
+struct ipu6_fw_isys_resp_info_abi *
+ipu6_fw_isys_get_resp(void *context, unsigned int queue);
+void ipu6_fw_isys_put_resp(void *context, unsigned int queue);
+#endif
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
new file mode 100644
index 000000000000..b9ce4324996d
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013--2024 Intel Corporation
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/minmax.h>
+#include <linux/sprintf.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-subdev.h>
+
+#include "ipu6-bus.h"
+#include "ipu6-isys.h"
+#include "ipu6-isys-csi2.h"
+#include "ipu6-isys-subdev.h"
+#include "ipu6-platform-isys-csi2-reg.h"
+
+static const u32 csi2_supported_codes[] = {
+ MEDIA_BUS_FMT_RGB565_1X16,
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ MEDIA_BUS_FMT_META_8,
+ MEDIA_BUS_FMT_META_10,
+ MEDIA_BUS_FMT_META_12,
+ MEDIA_BUS_FMT_META_16,
+ MEDIA_BUS_FMT_META_24,
+ 0
+};
+
+/*
+ * Strings corresponding to CSI-2 receiver errors are here.
+ * Corresponding macros are defined in the header file.
+ */
+static const struct ipu6_csi2_error dphy_rx_errors[] = {
+ { "Single packet header error corrected", true },
+ { "Multiple packet header errors detected", true },
+ { "Payload checksum (CRC) error", true },
+ { "Transfer FIFO overflow", false },
+ { "Reserved short packet data type detected", true },
+ { "Reserved long packet data type detected", true },
+ { "Incomplete long packet detected", false },
+ { "Frame sync error", false },
+ { "Line sync error", false },
+ { "DPHY recoverable synchronization error", true },
+ { "DPHY fatal error", false },
+ { "DPHY elastic FIFO overflow", false },
+ { "Inter-frame short packet discarded", true },
+ { "Inter-frame long packet discarded", true },
+ { "MIPI pktgen overflow", false },
+ { "MIPI pktgen data loss", false },
+ { "FIFO overflow", false },
+ { "Lane deskew", false },
+ { "SOT sync error", false },
+ { "HSIDLE detected", false }
+};
+
+s64 ipu6_isys_csi2_get_link_freq(struct ipu6_isys_csi2 *csi2)
+{
+ struct media_pad *src_pad;
+ struct v4l2_subdev *ext_sd;
+ struct device *dev;
+
+ if (!csi2)
+ return -EINVAL;
+
+ dev = &csi2->isys->adev->auxdev.dev;
+ src_pad = media_entity_remote_source_pad_unique(&csi2->asd.sd.entity);
+ if (IS_ERR(src_pad)) {
+ dev_err(dev, "can't get source pad of %s (%ld)\n",
+ csi2->asd.sd.name, PTR_ERR(src_pad));
+ return PTR_ERR(src_pad);
+ }
+
+ ext_sd = media_entity_to_v4l2_subdev(src_pad->entity);
+ if (WARN(!ext_sd, "Failed to get subdev for %s\n", csi2->asd.sd.name))
+ return -ENODEV;
+
+ return v4l2_get_link_freq(ext_sd->ctrl_handler, 0, 0);
+}
+
+static int csi2_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd);
+ struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd);
+ struct device *dev = &csi2->isys->adev->auxdev.dev;
+
+ dev_dbg(dev, "csi2 subscribe event(type %u id %u)\n",
+ sub->type, sub->id);
+
+ switch (sub->type) {
+ case V4L2_EVENT_FRAME_SYNC:
+ return v4l2_event_subscribe(fh, sub, 10, NULL);
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct v4l2_subdev_core_ops csi2_sd_core_ops = {
+ .subscribe_event = csi2_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+/*
+ * The input system CSI2+ receiver has several
+ * parameters affecting the receiver timings. These depend
+ * on the MIPI bus frequency F in Hz (sensor transmitter rate)
+ * as follows:
+ * register value = (A/1e9 + B * UI) / COUNT_ACC
+ * where
+ * UI = 1 / (2 * F) in seconds
+ * COUNT_ACC = counter accuracy in seconds
+ * COUNT_ACC = 0.125 ns = 1 / 8 ns, ACCINV = 8.
+ *
+ * A and B are coefficients from the table below,
+ * depending whether the register minimum or maximum value is
+ * calculated.
+ * Minimum Maximum
+ * Clock lane A B A B
+ * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0
+ * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16
+ * Data lanes
+ * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4
+ * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6
+ * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4
+ * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6
+ * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4
+ * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6
+ * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4
+ * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6
+ *
+ * We use the minimum values of both A and B.
+ */
+
+#define DIV_SHIFT 8
+#define CSI2_ACCINV 8
+
+static u32 calc_timing(s32 a, s32 b, s64 link_freq, s32 accinv)
+{
+ return accinv * a + (accinv * b * (500000000 >> DIV_SHIFT)
+ / (s32)(link_freq >> DIV_SHIFT));
+}
+
+static int
+ipu6_isys_csi2_calc_timing(struct ipu6_isys_csi2 *csi2,
+ struct ipu6_isys_csi2_timing *timing, s32 accinv)
+{
+ struct device *dev = &csi2->isys->adev->auxdev.dev;
+ s64 link_freq;
+
+ link_freq = ipu6_isys_csi2_get_link_freq(csi2);
+ if (link_freq < 0)
+ return link_freq;
+
+ timing->ctermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A,
+ CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B,
+ link_freq, accinv);
+ timing->csettle = calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A,
+ CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B,
+ link_freq, accinv);
+ timing->dtermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A,
+ CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B,
+ link_freq, accinv);
+ timing->dsettle = calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A,
+ CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B,
+ link_freq, accinv);
+
+ dev_dbg(dev, "ctermen %u csettle %u dtermen %u dsettle %u\n",
+ timing->ctermen, timing->csettle,
+ timing->dtermen, timing->dsettle);
+
+ return 0;
+}
+
+void ipu6_isys_register_errors(struct ipu6_isys_csi2 *csi2)
+{
+ u32 irq = readl(csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+ CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET);
+ struct ipu6_isys *isys = csi2->isys;
+ u32 mask;
+
+ mask = isys->pdata->ipdata->csi2.irq_mask;
+ writel(irq & mask, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+ CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET);
+ csi2->receiver_errors |= irq & mask;
+}
+
+void ipu6_isys_csi2_error(struct ipu6_isys_csi2 *csi2)
+{
+ struct device *dev = &csi2->isys->adev->auxdev.dev;
+ const struct ipu6_csi2_error *errors;
+ u32 status;
+ u32 i;
+
+ /* register errors once more in case of interrupts are disabled */
+ ipu6_isys_register_errors(csi2);
+ status = csi2->receiver_errors;
+ csi2->receiver_errors = 0;
+ errors = dphy_rx_errors;
+
+ for (i = 0; i < CSI_RX_NUM_ERRORS_IN_IRQ; i++) {
+ if (status & BIT(i))
+ dev_err_ratelimited(dev, "csi2-%i error: %s\n",
+ csi2->port, errors[i].error_string);
+ }
+}
+
+static int ipu6_isys_csi2_set_stream(struct v4l2_subdev *sd,
+ const struct ipu6_isys_csi2_timing *timing,
+ unsigned int nlanes, int enable)
+{
+ struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd);
+ struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd);
+ struct ipu6_isys *isys = csi2->isys;
+ struct device *dev = &isys->adev->auxdev.dev;
+ struct ipu6_isys_csi2_config cfg;
+ unsigned int nports;
+ int ret = 0;
+ u32 mask = 0;
+ u32 i;
+
+ dev_dbg(dev, "stream %s CSI2-%u with %u lanes\n", enable ? "on" : "off",
+ csi2->port, nlanes);
+
+ cfg.port = csi2->port;
+ cfg.nlanes = nlanes;
+
+ mask = isys->pdata->ipdata->csi2.irq_mask;
+ nports = isys->pdata->ipdata->csi2.nports;
+
+ if (!enable) {
+ writel(0, csi2->base + CSI_REG_CSI_FE_ENABLE);
+ writel(0, csi2->base + CSI_REG_PPI2CSI_ENABLE);
+
+ writel(0,
+ csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+ CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET);
+ writel(mask,
+ csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+ CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET);
+ writel(0,
+ csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+ CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET);
+ writel(0xffffffff,
+ csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+ CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET);
+
+ isys->phy_set_power(isys, &cfg, timing, false);
+
+ writel(0, isys->pdata->base + CSI_REG_HUB_FW_ACCESS_PORT
+ (isys->pdata->ipdata->csi2.fw_access_port_ofs,
+ csi2->port));
+ writel(0, isys->pdata->base +
+ CSI_REG_HUB_DRV_ACCESS_PORT(csi2->port));
+
+ return ret;
+ }
+
+ /* reset port reset */
+ writel(0x1, csi2->base + CSI_REG_PORT_GPREG_SRST);
+ usleep_range(100, 200);
+ writel(0x0, csi2->base + CSI_REG_PORT_GPREG_SRST);
+
+ /* enable port clock */
+ for (i = 0; i < nports; i++) {
+ writel(1, isys->pdata->base + CSI_REG_HUB_DRV_ACCESS_PORT(i));
+ writel(1, isys->pdata->base + CSI_REG_HUB_FW_ACCESS_PORT
+ (isys->pdata->ipdata->csi2.fw_access_port_ofs, i));
+ }
+
+ /* enable all error related irq */
+ writel(mask,
+ csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+ CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET);
+ writel(mask,
+ csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+ CSI_PORT_REG_BASE_IRQ_MASK_OFFSET);
+ writel(mask,
+ csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+ CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET);
+ writel(mask,
+ csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+ CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET);
+ writel(mask,
+ csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+ CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET);
+
+ /*
+ * Using event from firmware instead of irq to handle CSI2 sync event
+ * which can reduce system wakeups. If CSI2 sync irq enabled, we need
+ * disable the firmware CSI2 sync event to avoid duplicate handling.
+ */
+ writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+ CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET);
+ writel(0, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+ CSI_PORT_REG_BASE_IRQ_MASK_OFFSET);
+ writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+ CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET);
+ writel(0, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+ CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET);
+ writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+ CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET);
+
+ /* configure to enable FE and PPI2CSI */
+ writel(0, csi2->base + CSI_REG_CSI_FE_MODE);
+ writel(CSI_SENSOR_INPUT, csi2->base + CSI_REG_CSI_FE_MUX_CTRL);
+ writel(CSI_CNTR_SENSOR_LINE_ID | CSI_CNTR_SENSOR_FRAME_ID,
+ csi2->base + CSI_REG_CSI_FE_SYNC_CNTR_SEL);
+ writel(FIELD_PREP(PPI_INTF_CONFIG_NOF_ENABLED_DLANES_MASK, nlanes - 1),
+ csi2->base + CSI_REG_PPI2CSI_CONFIG_PPI_INTF);
+
+ writel(1, csi2->base + CSI_REG_PPI2CSI_ENABLE);
+ writel(1, csi2->base + CSI_REG_CSI_FE_ENABLE);
+
+ ret = isys->phy_set_power(isys, &cfg, timing, true);
+ if (ret)
+ dev_err(dev, "csi-%d phy power up failed %d\n", csi2->port,
+ ret);
+
+ return ret;
+}
+
+static int set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd);
+ struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd);
+ struct device *dev = &csi2->isys->adev->auxdev.dev;
+ struct ipu6_isys_csi2_timing timing = { };
+ unsigned int nlanes;
+ int ret;
+
+ dev_dbg(dev, "csi2 stream %s callback\n", enable ? "on" : "off");
+
+ if (!enable) {
+ csi2->stream_count--;
+ if (csi2->stream_count)
+ return 0;
+
+ ipu6_isys_csi2_set_stream(sd, &timing, 0, enable);
+ return 0;
+ }
+
+ if (csi2->stream_count) {
+ csi2->stream_count++;
+ return 0;
+ }
+
+ nlanes = csi2->nlanes;
+
+ ret = ipu6_isys_csi2_calc_timing(csi2, &timing, CSI2_ACCINV);
+ if (ret)
+ return ret;
+
+ ret = ipu6_isys_csi2_set_stream(sd, &timing, nlanes, enable);
+ if (ret)
+ return ret;
+
+ csi2->stream_count++;
+
+ return 0;
+}
+
+static int ipu6_isys_csi2_set_sel(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd);
+ struct device *dev = &asd->isys->adev->auxdev.dev;
+ struct v4l2_mbus_framefmt *sink_ffmt;
+ struct v4l2_mbus_framefmt *src_ffmt;
+ struct v4l2_rect *crop;
+
+ if (sel->pad == CSI2_PAD_SINK || sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ sink_ffmt = v4l2_subdev_state_get_opposite_stream_format(state,
+ sel->pad,
+ sel->stream);
+ if (!sink_ffmt)
+ return -EINVAL;
+
+ src_ffmt = v4l2_subdev_state_get_format(state, sel->pad, sel->stream);
+ if (!src_ffmt)
+ return -EINVAL;
+
+ crop = v4l2_subdev_state_get_crop(state, sel->pad, sel->stream);
+ if (!crop)
+ return -EINVAL;
+
+ /* Only vertical cropping is supported */
+ sel->r.left = 0;
+ sel->r.width = sink_ffmt->width;
+ /* Non-bayer formats can't be single line cropped */
+ if (!ipu6_isys_is_bayer_format(sink_ffmt->code))
+ sel->r.top &= ~1;
+ sel->r.height = clamp(sel->r.height & ~1, IPU6_ISYS_MIN_HEIGHT,
+ sink_ffmt->height - sel->r.top);
+ *crop = sel->r;
+
+ /* update source pad format */
+ src_ffmt->width = sel->r.width;
+ src_ffmt->height = sel->r.height;
+ if (ipu6_isys_is_bayer_format(sink_ffmt->code))
+ src_ffmt->code = ipu6_isys_convert_bayer_order(sink_ffmt->code,
+ sel->r.left,
+ sel->r.top);
+ dev_dbg(dev, "set crop for %s sel: %d,%d,%d,%d code: 0x%x\n",
+ sd->name, sel->r.left, sel->r.top, sel->r.width, sel->r.height,
+ src_ffmt->code);
+
+ return 0;
+}
+
+static int ipu6_isys_csi2_get_sel(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct v4l2_mbus_framefmt *sink_ffmt;
+ struct v4l2_rect *crop;
+ int ret = 0;
+
+ if (sd->entity.pads[sel->pad].flags & MEDIA_PAD_FL_SINK)
+ return -EINVAL;
+
+ sink_ffmt = v4l2_subdev_state_get_opposite_stream_format(state,
+ sel->pad,
+ sel->stream);
+ if (!sink_ffmt)
+ return -EINVAL;
+
+ crop = v4l2_subdev_state_get_crop(state, sel->pad, sel->stream);
+ if (!crop)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = sink_ffmt->width;
+ sel->r.height = sink_ffmt->height;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *crop;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_subdev_video_ops csi2_sd_video_ops = {
+ .s_stream = set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops csi2_sd_pad_ops = {
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = ipu6_isys_subdev_set_fmt,
+ .get_selection = ipu6_isys_csi2_get_sel,
+ .set_selection = ipu6_isys_csi2_set_sel,
+ .enum_mbus_code = ipu6_isys_subdev_enum_mbus_code,
+ .set_routing = ipu6_isys_subdev_set_routing,
+};
+
+static const struct v4l2_subdev_ops csi2_sd_ops = {
+ .core = &csi2_sd_core_ops,
+ .video = &csi2_sd_video_ops,
+ .pad = &csi2_sd_pad_ops,
+};
+
+static const struct media_entity_operations csi2_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+ .has_pad_interdep = v4l2_subdev_has_pad_interdep,
+};
+
+void ipu6_isys_csi2_cleanup(struct ipu6_isys_csi2 *csi2)
+{
+ if (!csi2->isys)
+ return;
+
+ v4l2_device_unregister_subdev(&csi2->asd.sd);
+ v4l2_subdev_cleanup(&csi2->asd.sd);
+ ipu6_isys_subdev_cleanup(&csi2->asd);
+ csi2->isys = NULL;
+}
+
+int ipu6_isys_csi2_init(struct ipu6_isys_csi2 *csi2,
+ struct ipu6_isys *isys,
+ void __iomem *base, unsigned int index)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ int ret;
+
+ csi2->isys = isys;
+ csi2->base = base;
+ csi2->port = index;
+
+ csi2->asd.sd.entity.ops = &csi2_entity_ops;
+ csi2->asd.isys = isys;
+ ret = ipu6_isys_subdev_init(&csi2->asd, &csi2_sd_ops, 0,
+ NR_OF_CSI2_SINK_PADS, NR_OF_CSI2_SRC_PADS);
+ if (ret)
+ goto fail;
+
+ csi2->asd.source = IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT0 + index;
+ csi2->asd.supported_codes = csi2_supported_codes;
+ snprintf(csi2->asd.sd.name, sizeof(csi2->asd.sd.name),
+ IPU6_ISYS_ENTITY_PREFIX " CSI2 %u", index);
+ v4l2_set_subdevdata(&csi2->asd.sd, &csi2->asd);
+ ret = v4l2_subdev_init_finalize(&csi2->asd.sd);
+ if (ret) {
+ dev_err(dev, "failed to init v4l2 subdev\n");
+ goto fail;
+ }
+
+ ret = v4l2_device_register_subdev(&isys->v4l2_dev, &csi2->asd.sd);
+ if (ret) {
+ dev_err(dev, "failed to register v4l2 subdev\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ ipu6_isys_csi2_cleanup(csi2);
+
+ return ret;
+}
+
+void ipu6_isys_csi2_sof_event_by_stream(struct ipu6_isys_stream *stream)
+{
+ struct video_device *vdev = stream->asd->sd.devnode;
+ struct device *dev = &stream->isys->adev->auxdev.dev;
+ struct ipu6_isys_csi2 *csi2 = ipu6_isys_subdev_to_csi2(stream->asd);
+ struct v4l2_event ev = {
+ .type = V4L2_EVENT_FRAME_SYNC,
+ };
+
+ ev.u.frame_sync.frame_sequence = atomic_fetch_inc(&stream->sequence);
+ v4l2_event_queue(vdev, &ev);
+
+ dev_dbg(dev, "sof_event::csi2-%i sequence: %i, vc: %d\n",
+ csi2->port, ev.u.frame_sync.frame_sequence, stream->vc);
+}
+
+void ipu6_isys_csi2_eof_event_by_stream(struct ipu6_isys_stream *stream)
+{
+ struct device *dev = &stream->isys->adev->auxdev.dev;
+ struct ipu6_isys_csi2 *csi2 = ipu6_isys_subdev_to_csi2(stream->asd);
+ u32 frame_sequence = atomic_read(&stream->sequence);
+
+ dev_dbg(dev, "eof_event::csi2-%i sequence: %i\n",
+ csi2->port, frame_sequence);
+}
+
+int ipu6_isys_csi2_get_remote_desc(u32 source_stream,
+ struct ipu6_isys_csi2 *csi2,
+ struct media_entity *source_entity,
+ struct v4l2_mbus_frame_desc_entry *entry)
+{
+ struct v4l2_mbus_frame_desc_entry *desc_entry = NULL;
+ struct device *dev = &csi2->isys->adev->auxdev.dev;
+ struct v4l2_mbus_frame_desc desc;
+ struct v4l2_subdev *source;
+ struct media_pad *pad;
+ unsigned int i;
+ int ret;
+
+ source = media_entity_to_v4l2_subdev(source_entity);
+ if (!source)
+ return -EPIPE;
+
+ pad = media_pad_remote_pad_first(&csi2->asd.pad[CSI2_PAD_SINK]);
+ if (!pad)
+ return -EPIPE;
+
+ ret = v4l2_subdev_call(source, pad, get_frame_desc, pad->index, &desc);
+ if (ret)
+ return ret;
+
+ if (desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) {
+ dev_err(dev, "Unsupported frame descriptor type\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < desc.num_entries; i++) {
+ if (source_stream == desc.entry[i].stream) {
+ desc_entry = &desc.entry[i];
+ break;
+ }
+ }
+
+ if (!desc_entry) {
+ dev_err(dev, "Failed to find stream %u from remote subdev\n",
+ source_stream);
+ return -EINVAL;
+ }
+
+ if (desc_entry->bus.csi2.vc >= NR_OF_CSI2_VC) {
+ dev_err(dev, "invalid vc %d\n", desc_entry->bus.csi2.vc);
+ return -EINVAL;
+ }
+
+ *entry = *desc_entry;
+
+ return 0;
+}
+
+void ipu6_isys_set_csi2_streams_status(struct ipu6_isys_video *av, bool status)
+{
+ struct ipu6_isys_stream *stream = av->stream;
+ struct v4l2_subdev *sd = &stream->asd->sd;
+ struct v4l2_subdev_state *state;
+ struct media_pad *r_pad;
+ unsigned int i;
+ u32 r_stream;
+
+ r_pad = media_pad_remote_pad_first(&av->pad);
+ r_stream = ipu6_isys_get_src_stream_by_src_pad(sd, r_pad->index);
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+
+ for (i = 0; i < state->stream_configs.num_configs; i++) {
+ struct v4l2_subdev_stream_config *cfg =
+ &state->stream_configs.configs[i];
+
+ if (cfg->pad == r_pad->index && r_stream == cfg->stream) {
+ dev_dbg(&av->isys->adev->auxdev.dev,
+ "%s: pad:%u, stream:%u, status:%u\n",
+ sd->entity.name, r_pad->index, r_stream,
+ status);
+ cfg->enabled = status;
+ }
+ }
+
+ v4l2_subdev_unlock_state(state);
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h
new file mode 100644
index 000000000000..eba6b29386ea
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013--2024 Intel Corporation */
+
+#ifndef IPU6_ISYS_CSI2_H
+#define IPU6_ISYS_CSI2_H
+
+#include <linux/container_of.h>
+
+#include "ipu6-isys-subdev.h"
+#include "ipu6-isys-video.h"
+
+struct media_entity;
+struct v4l2_mbus_frame_desc_entry;
+
+struct ipu6_isys_video;
+struct ipu6_isys;
+struct ipu6_isys_csi2_pdata;
+struct ipu6_isys_stream;
+
+#define NR_OF_CSI2_VC 16
+#define INVALID_VC_ID -1
+#define NR_OF_CSI2_SINK_PADS 1
+#define CSI2_PAD_SINK 0
+#define NR_OF_CSI2_SRC_PADS 8
+#define CSI2_PAD_SRC 1
+#define NR_OF_CSI2_PADS (NR_OF_CSI2_SINK_PADS + NR_OF_CSI2_SRC_PADS)
+
+#define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A 0
+#define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B 0
+#define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A 95
+#define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B -8
+
+#define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A 0
+#define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B 0
+#define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A 85
+#define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B -2
+
+struct ipu6_isys_csi2 {
+ struct ipu6_isys_subdev asd;
+ struct ipu6_isys_csi2_pdata *pdata;
+ struct ipu6_isys *isys;
+ struct ipu6_isys_video av[NR_OF_CSI2_SRC_PADS];
+
+ void __iomem *base;
+ u32 receiver_errors;
+ unsigned int nlanes;
+ unsigned int port;
+ unsigned int stream_count;
+};
+
+struct ipu6_isys_csi2_timing {
+ u32 ctermen;
+ u32 csettle;
+ u32 dtermen;
+ u32 dsettle;
+};
+
+struct ipu6_csi2_error {
+ const char *error_string;
+ bool is_info_only;
+};
+
+#define ipu6_isys_subdev_to_csi2(__sd) \
+ container_of(__sd, struct ipu6_isys_csi2, asd)
+
+#define to_ipu6_isys_csi2(__asd) container_of(__asd, struct ipu6_isys_csi2, asd)
+
+s64 ipu6_isys_csi2_get_link_freq(struct ipu6_isys_csi2 *csi2);
+int ipu6_isys_csi2_init(struct ipu6_isys_csi2 *csi2, struct ipu6_isys *isys,
+ void __iomem *base, unsigned int index);
+void ipu6_isys_csi2_cleanup(struct ipu6_isys_csi2 *csi2);
+void ipu6_isys_csi2_sof_event_by_stream(struct ipu6_isys_stream *stream);
+void ipu6_isys_csi2_eof_event_by_stream(struct ipu6_isys_stream *stream);
+void ipu6_isys_register_errors(struct ipu6_isys_csi2 *csi2);
+void ipu6_isys_csi2_error(struct ipu6_isys_csi2 *csi2);
+int ipu6_isys_csi2_get_remote_desc(u32 source_stream,
+ struct ipu6_isys_csi2 *csi2,
+ struct media_entity *source_entity,
+ struct v4l2_mbus_frame_desc_entry *entry);
+void ipu6_isys_set_csi2_streams_status(struct ipu6_isys_video *av, bool status);
+
+#endif /* IPU6_ISYS_CSI2_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c b/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c
new file mode 100644
index 000000000000..1715275e6776
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013--2024 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/iopoll.h>
+#include <linux/math64.h>
+
+#include "ipu6-bus.h"
+#include "ipu6-isys.h"
+#include "ipu6-platform-isys-csi2-reg.h"
+
+#define IPU6_DWC_DPHY_BASE(i) (0x238038 + 0x34 * (i))
+#define IPU6_DWC_DPHY_RSTZ 0x00
+#define IPU6_DWC_DPHY_SHUTDOWNZ 0x04
+#define IPU6_DWC_DPHY_HSFREQRANGE 0x08
+#define IPU6_DWC_DPHY_CFGCLKFREQRANGE 0x0c
+#define IPU6_DWC_DPHY_TEST_IFC_ACCESS_MODE 0x10
+#define IPU6_DWC_DPHY_TEST_IFC_REQ 0x14
+#define IPU6_DWC_DPHY_TEST_IFC_REQ_COMPLETION 0x18
+#define IPU6_DWC_DPHY_DFT_CTRL0 0x28
+#define IPU6_DWC_DPHY_DFT_CTRL1 0x2c
+#define IPU6_DWC_DPHY_DFT_CTRL2 0x30
+
+/*
+ * test IFC request definition:
+ * - req: 0 for read, 1 for write
+ * - 12 bits address
+ * - 8bits data (will ignore for read)
+ * --24----16------4-----0
+ * --|-data-|-addr-|-req-|
+ */
+#define IFC_REQ(req, addr, data) (FIELD_PREP(GENMASK(23, 16), data) | \
+ FIELD_PREP(GENMASK(15, 4), addr) | \
+ FIELD_PREP(GENMASK(1, 0), req))
+
+#define TEST_IFC_REQ_READ 0
+#define TEST_IFC_REQ_WRITE 1
+#define TEST_IFC_REQ_RESET 2
+
+#define TEST_IFC_ACCESS_MODE_FSM 0
+#define TEST_IFC_ACCESS_MODE_IFC_CTL 1
+
+enum phy_fsm_state {
+ PHY_FSM_STATE_POWERON = 0,
+ PHY_FSM_STATE_BGPON = 1,
+ PHY_FSM_STATE_CAL_TYPE = 2,
+ PHY_FSM_STATE_BURNIN_CAL = 3,
+ PHY_FSM_STATE_TERMCAL = 4,
+ PHY_FSM_STATE_OFFSETCAL = 5,
+ PHY_FSM_STATE_OFFSET_LANE = 6,
+ PHY_FSM_STATE_IDLE = 7,
+ PHY_FSM_STATE_ULP = 8,
+ PHY_FSM_STATE_DDLTUNNING = 9,
+ PHY_FSM_STATE_SKEW_BACKWARD = 10,
+ PHY_FSM_STATE_INVALID,
+};
+
+static void dwc_dphy_write(struct ipu6_isys *isys, u32 phy_id, u32 addr,
+ u32 data)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ void __iomem *isys_base = isys->pdata->base;
+ void __iomem *base = isys_base + IPU6_DWC_DPHY_BASE(phy_id);
+
+ dev_dbg(dev, "write: reg 0x%lx = data 0x%x", base + addr - isys_base,
+ data);
+ writel(data, base + addr);
+}
+
+static u32 dwc_dphy_read(struct ipu6_isys *isys, u32 phy_id, u32 addr)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ void __iomem *isys_base = isys->pdata->base;
+ void __iomem *base = isys_base + IPU6_DWC_DPHY_BASE(phy_id);
+ u32 data;
+
+ data = readl(base + addr);
+ dev_dbg(dev, "read: reg 0x%lx = data 0x%x", base + addr - isys_base,
+ data);
+
+ return data;
+}
+
+static void dwc_dphy_write_mask(struct ipu6_isys *isys, u32 phy_id, u32 addr,
+ u32 data, u8 shift, u8 width)
+{
+ u32 temp;
+ u32 mask;
+
+ mask = (1 << width) - 1;
+ temp = dwc_dphy_read(isys, phy_id, addr);
+ temp &= ~(mask << shift);
+ temp |= (data & mask) << shift;
+ dwc_dphy_write(isys, phy_id, addr, temp);
+}
+
+static u32 __maybe_unused dwc_dphy_read_mask(struct ipu6_isys *isys, u32 phy_id,
+ u32 addr, u8 shift, u8 width)
+{
+ u32 val;
+
+ val = dwc_dphy_read(isys, phy_id, addr) >> shift;
+ return val & ((1 << width) - 1);
+}
+
+#define DWC_DPHY_TIMEOUT (5 * USEC_PER_SEC)
+static int dwc_dphy_ifc_read(struct ipu6_isys *isys, u32 phy_id, u32 addr,
+ u32 *val)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ void __iomem *isys_base = isys->pdata->base;
+ void __iomem *base = isys_base + IPU6_DWC_DPHY_BASE(phy_id);
+ void __iomem *reg;
+ u32 completion;
+ int ret;
+
+ dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_TEST_IFC_REQ,
+ IFC_REQ(TEST_IFC_REQ_READ, addr, 0));
+ reg = base + IPU6_DWC_DPHY_TEST_IFC_REQ_COMPLETION;
+ ret = readl_poll_timeout(reg, completion, !(completion & BIT(0)),
+ 10, DWC_DPHY_TIMEOUT);
+ if (ret) {
+ dev_err(dev, "DWC ifc request read timeout\n");
+ return ret;
+ }
+
+ *val = completion >> 8 & 0xff;
+ *val = FIELD_GET(GENMASK(15, 8), completion);
+ dev_dbg(dev, "DWC ifc read 0x%x = 0x%x", addr, *val);
+
+ return 0;
+}
+
+static int dwc_dphy_ifc_write(struct ipu6_isys *isys, u32 phy_id, u32 addr,
+ u32 data)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ void __iomem *isys_base = isys->pdata->base;
+ void __iomem *base = isys_base + IPU6_DWC_DPHY_BASE(phy_id);
+ void __iomem *reg;
+ u32 completion;
+ int ret;
+
+ dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_TEST_IFC_REQ,
+ IFC_REQ(TEST_IFC_REQ_WRITE, addr, data));
+ completion = readl(base + IPU6_DWC_DPHY_TEST_IFC_REQ_COMPLETION);
+ reg = base + IPU6_DWC_DPHY_TEST_IFC_REQ_COMPLETION;
+ ret = readl_poll_timeout(reg, completion, !(completion & BIT(0)),
+ 10, DWC_DPHY_TIMEOUT);
+ if (ret)
+ dev_err(dev, "DWC ifc request write timeout\n");
+
+ return ret;
+}
+
+static void dwc_dphy_ifc_write_mask(struct ipu6_isys *isys, u32 phy_id,
+ u32 addr, u32 data, u8 shift, u8 width)
+{
+ u32 temp, mask;
+ int ret;
+
+ ret = dwc_dphy_ifc_read(isys, phy_id, addr, &temp);
+ if (ret)
+ return;
+
+ mask = (1 << width) - 1;
+ temp &= ~(mask << shift);
+ temp |= (data & mask) << shift;
+ dwc_dphy_ifc_write(isys, phy_id, addr, temp);
+}
+
+static u32 dwc_dphy_ifc_read_mask(struct ipu6_isys *isys, u32 phy_id, u32 addr,
+ u8 shift, u8 width)
+{
+ int ret;
+ u32 val;
+
+ ret = dwc_dphy_ifc_read(isys, phy_id, addr, &val);
+ if (ret)
+ return 0;
+
+ return ((val >> shift) & ((1 << width) - 1));
+}
+
+static int dwc_dphy_pwr_up(struct ipu6_isys *isys, u32 phy_id)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ u32 fsm_state;
+ int ret;
+
+ dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_RSTZ, 1);
+ usleep_range(10, 20);
+ dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_SHUTDOWNZ, 1);
+
+ ret = read_poll_timeout(dwc_dphy_ifc_read_mask, fsm_state,
+ (fsm_state == PHY_FSM_STATE_IDLE ||
+ fsm_state == PHY_FSM_STATE_ULP),
+ 100, DWC_DPHY_TIMEOUT, false, isys,
+ phy_id, 0x1e, 0, 4);
+
+ if (ret)
+ dev_err(dev, "Dphy %d power up failed, state 0x%x", phy_id,
+ fsm_state);
+
+ return ret;
+}
+
+struct dwc_dphy_freq_range {
+ u8 hsfreq;
+ u16 min;
+ u16 max;
+ u16 default_mbps;
+ u16 osc_freq_target;
+};
+
+#define DPHY_FREQ_RANGE_NUM (63)
+#define DPHY_FREQ_RANGE_INVALID_INDEX (0xff)
+static const struct dwc_dphy_freq_range freqranges[DPHY_FREQ_RANGE_NUM] = {
+ {0x00, 80, 97, 80, 335},
+ {0x10, 80, 107, 90, 335},
+ {0x20, 84, 118, 100, 335},
+ {0x30, 93, 128, 110, 335},
+ {0x01, 103, 139, 120, 335},
+ {0x11, 112, 149, 130, 335},
+ {0x21, 122, 160, 140, 335},
+ {0x31, 131, 170, 150, 335},
+ {0x02, 141, 181, 160, 335},
+ {0x12, 150, 191, 170, 335},
+ {0x22, 160, 202, 180, 335},
+ {0x32, 169, 212, 190, 335},
+ {0x03, 183, 228, 205, 335},
+ {0x13, 198, 244, 220, 335},
+ {0x23, 212, 259, 235, 335},
+ {0x33, 226, 275, 250, 335},
+ {0x04, 250, 301, 275, 335},
+ {0x14, 274, 328, 300, 335},
+ {0x25, 297, 354, 325, 335},
+ {0x35, 321, 380, 350, 335},
+ {0x05, 369, 433, 400, 335},
+ {0x16, 416, 485, 450, 335},
+ {0x26, 464, 538, 500, 335},
+ {0x37, 511, 590, 550, 335},
+ {0x07, 559, 643, 600, 335},
+ {0x18, 606, 695, 650, 335},
+ {0x28, 654, 748, 700, 335},
+ {0x39, 701, 800, 750, 335},
+ {0x09, 749, 853, 800, 335},
+ {0x19, 796, 905, 850, 335},
+ {0x29, 844, 958, 900, 335},
+ {0x3a, 891, 1010, 950, 335},
+ {0x0a, 939, 1063, 1000, 335},
+ {0x1a, 986, 1115, 1050, 335},
+ {0x2a, 1034, 1168, 1100, 335},
+ {0x3b, 1081, 1220, 1150, 335},
+ {0x0b, 1129, 1273, 1200, 335},
+ {0x1b, 1176, 1325, 1250, 335},
+ {0x2b, 1224, 1378, 1300, 335},
+ {0x3c, 1271, 1430, 1350, 335},
+ {0x0c, 1319, 1483, 1400, 335},
+ {0x1c, 1366, 1535, 1450, 335},
+ {0x2c, 1414, 1588, 1500, 335},
+ {0x3d, 1461, 1640, 1550, 208},
+ {0x0d, 1509, 1693, 1600, 214},
+ {0x1d, 1556, 1745, 1650, 221},
+ {0x2e, 1604, 1798, 1700, 228},
+ {0x3e, 1651, 1850, 1750, 234},
+ {0x0e, 1699, 1903, 1800, 241},
+ {0x1e, 1746, 1955, 1850, 248},
+ {0x2f, 1794, 2008, 1900, 255},
+ {0x3f, 1841, 2060, 1950, 261},
+ {0x0f, 1889, 2113, 2000, 268},
+ {0x40, 1936, 2165, 2050, 275},
+ {0x41, 1984, 2218, 2100, 281},
+ {0x42, 2031, 2270, 2150, 288},
+ {0x43, 2079, 2323, 2200, 294},
+ {0x44, 2126, 2375, 2250, 302},
+ {0x45, 2174, 2428, 2300, 308},
+ {0x46, 2221, 2480, 2350, 315},
+ {0x47, 2269, 2500, 2400, 321},
+ {0x48, 2316, 2500, 2450, 328},
+ {0x49, 2364, 2500, 2500, 335}
+};
+
+static u16 get_hsfreq_by_mbps(u32 mbps)
+{
+ unsigned int i = DPHY_FREQ_RANGE_NUM;
+
+ while (i--) {
+ if (freqranges[i].default_mbps == mbps ||
+ (mbps >= freqranges[i].min && mbps <= freqranges[i].max))
+ return i;
+ }
+
+ return DPHY_FREQ_RANGE_INVALID_INDEX;
+}
+
+static int ipu6_isys_dwc_phy_config(struct ipu6_isys *isys,
+ u32 phy_id, u32 mbps)
+{
+ struct ipu6_bus_device *adev = isys->adev;
+ struct device *dev = &adev->auxdev.dev;
+ struct ipu6_device *isp = adev->isp;
+ u32 cfg_clk_freqrange;
+ u32 osc_freq_target;
+ u32 index;
+
+ dev_dbg(dev, "config Dphy %u with %u mbps", phy_id, mbps);
+
+ index = get_hsfreq_by_mbps(mbps);
+ if (index == DPHY_FREQ_RANGE_INVALID_INDEX) {
+ dev_err(dev, "link freq not found for mbps %u", mbps);
+ return -EINVAL;
+ }
+
+ dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_HSFREQRANGE,
+ freqranges[index].hsfreq, 0, 7);
+
+ /* Force termination Calibration */
+ if (isys->phy_termcal_val) {
+ dwc_dphy_ifc_write_mask(isys, phy_id, 0x20a, 0x1, 0, 1);
+ dwc_dphy_ifc_write_mask(isys, phy_id, 0x209, 0x3, 0, 2);
+ dwc_dphy_ifc_write_mask(isys, phy_id, 0x209,
+ isys->phy_termcal_val, 4, 4);
+ }
+
+ /*
+ * Enable override to configure the DDL target oscillation
+ * frequency on bit 0 of register 0xe4
+ */
+ dwc_dphy_ifc_write_mask(isys, phy_id, 0xe4, 0x1, 0, 1);
+ /*
+ * configure registers 0xe2, 0xe3 with the
+ * appropriate DDL target oscillation frequency
+ * 0x1cc(460)
+ */
+ osc_freq_target = freqranges[index].osc_freq_target;
+ dwc_dphy_ifc_write_mask(isys, phy_id, 0xe2,
+ osc_freq_target & 0xff, 0, 8);
+ dwc_dphy_ifc_write_mask(isys, phy_id, 0xe3,
+ (osc_freq_target >> 8) & 0xf, 0, 4);
+
+ if (mbps < 1500) {
+ /* deskew_polarity_rw, for < 1.5Gbps */
+ dwc_dphy_ifc_write_mask(isys, phy_id, 0x8, 0x1, 5, 1);
+ }
+
+ /*
+ * Set cfgclkfreqrange[5:0] = round[(Fcfg_clk(MHz)-17)*4]
+ * (38.4 - 17) * 4 = ~85 (0x55)
+ */
+ cfg_clk_freqrange = (isp->buttress.ref_clk - 170) * 4 / 10;
+ dev_dbg(dev, "ref_clk = %u clk_freqrange = %u",
+ isp->buttress.ref_clk, cfg_clk_freqrange);
+ dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_CFGCLKFREQRANGE,
+ cfg_clk_freqrange, 0, 8);
+
+ dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_DFT_CTRL2, 0x1, 4, 1);
+ dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_DFT_CTRL2, 0x1, 8, 1);
+
+ return 0;
+}
+
+static void ipu6_isys_dwc_phy_aggr_setup(struct ipu6_isys *isys, u32 master,
+ u32 slave, u32 mbps)
+{
+ /* Config mastermacro */
+ dwc_dphy_ifc_write_mask(isys, master, 0x133, 0x1, 0, 1);
+ dwc_dphy_ifc_write_mask(isys, slave, 0x133, 0x0, 0, 1);
+
+ /* Config master PHY clk lane to drive long channel clk */
+ dwc_dphy_ifc_write_mask(isys, master, 0x307, 0x1, 2, 1);
+ dwc_dphy_ifc_write_mask(isys, slave, 0x307, 0x0, 2, 1);
+
+ /* Config both PHYs data lanes to get clk from long channel */
+ dwc_dphy_ifc_write_mask(isys, master, 0x508, 0x1, 5, 1);
+ dwc_dphy_ifc_write_mask(isys, slave, 0x508, 0x1, 5, 1);
+ dwc_dphy_ifc_write_mask(isys, master, 0x708, 0x1, 5, 1);
+ dwc_dphy_ifc_write_mask(isys, slave, 0x708, 0x1, 5, 1);
+
+ /* Config slave PHY clk lane to bypass long channel clk to DDR clk */
+ dwc_dphy_ifc_write_mask(isys, master, 0x308, 0x0, 3, 1);
+ dwc_dphy_ifc_write_mask(isys, slave, 0x308, 0x1, 3, 1);
+
+ /* Override slave PHY clk lane enable (DPHYRXCLK_CLL_demux module) */
+ dwc_dphy_ifc_write_mask(isys, slave, 0xe0, 0x3, 0, 2);
+
+ /* Override slave PHY DDR clk lane enable (DPHYHSRX_div124 module) */
+ dwc_dphy_ifc_write_mask(isys, slave, 0xe1, 0x1, 1, 1);
+ dwc_dphy_ifc_write_mask(isys, slave, 0x307, 0x1, 3, 1);
+
+ /* Turn off slave PHY LP-RX clk lane */
+ dwc_dphy_ifc_write_mask(isys, slave, 0x304, 0x1, 7, 1);
+ dwc_dphy_ifc_write_mask(isys, slave, 0x305, 0xa, 0, 5);
+}
+
+#define PHY_E 4
+static int ipu6_isys_dwc_phy_powerup_ack(struct ipu6_isys *isys, u32 phy_id)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ u32 rescal_done;
+ int ret;
+
+ ret = dwc_dphy_pwr_up(isys, phy_id);
+ if (ret != 0) {
+ dev_err(dev, "Dphy %u power up failed(%d)", phy_id, ret);
+ return ret;
+ }
+
+ /* reset forcerxmode */
+ dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_DFT_CTRL2, 0, 4, 1);
+ dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_DFT_CTRL2, 0, 8, 1);
+
+ dev_dbg(dev, "Dphy %u is ready!", phy_id);
+
+ if (phy_id != PHY_E || isys->phy_termcal_val)
+ return 0;
+
+ usleep_range(100, 200);
+ rescal_done = dwc_dphy_ifc_read_mask(isys, phy_id, 0x221, 7, 1);
+ if (rescal_done) {
+ isys->phy_termcal_val = dwc_dphy_ifc_read_mask(isys, phy_id,
+ 0x220, 2, 4);
+ dev_dbg(dev, "termcal done with value = %u",
+ isys->phy_termcal_val);
+ }
+
+ return 0;
+}
+
+static void ipu6_isys_dwc_phy_reset(struct ipu6_isys *isys, u32 phy_id)
+{
+ dev_dbg(&isys->adev->auxdev.dev, "Reset phy %u", phy_id);
+
+ dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_SHUTDOWNZ, 0);
+ dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_RSTZ, 0);
+ dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_TEST_IFC_ACCESS_MODE,
+ TEST_IFC_ACCESS_MODE_FSM);
+ dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_TEST_IFC_REQ,
+ TEST_IFC_REQ_RESET);
+}
+
+int ipu6_isys_dwc_phy_set_power(struct ipu6_isys *isys,
+ struct ipu6_isys_csi2_config *cfg,
+ const struct ipu6_isys_csi2_timing *timing,
+ bool on)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ void __iomem *isys_base = isys->pdata->base;
+ u32 phy_id, primary, secondary;
+ u32 nlanes, port, mbps;
+ s64 link_freq;
+ int ret;
+
+ port = cfg->port;
+
+ if (!isys_base || port >= isys->pdata->ipdata->csi2.nports) {
+ dev_warn(dev, "invalid port ID %d\n", port);
+ return -EINVAL;
+ }
+
+ nlanes = cfg->nlanes;
+ /* only port 0, 2 and 4 support 4 lanes */
+ if (nlanes == 4 && port % 2) {
+ dev_err(dev, "invalid csi-port %u with %u lanes\n", port,
+ nlanes);
+ return -EINVAL;
+ }
+
+ link_freq = ipu6_isys_csi2_get_link_freq(&isys->csi2[port]);
+ if (link_freq < 0) {
+ dev_err(dev, "get link freq failed(%lld).\n", link_freq);
+ return link_freq;
+ }
+
+ mbps = div_u64(link_freq, 500000);
+
+ phy_id = port;
+ primary = port & ~1;
+ secondary = primary + 1;
+ if (on) {
+ if (nlanes == 4) {
+ dev_dbg(dev, "config phy %u and %u in aggr mode\n",
+ primary, secondary);
+
+ ipu6_isys_dwc_phy_reset(isys, primary);
+ ipu6_isys_dwc_phy_reset(isys, secondary);
+ ipu6_isys_dwc_phy_aggr_setup(isys, primary,
+ secondary, mbps);
+
+ ret = ipu6_isys_dwc_phy_config(isys, primary, mbps);
+ if (ret)
+ return ret;
+ ret = ipu6_isys_dwc_phy_config(isys, secondary, mbps);
+ if (ret)
+ return ret;
+
+ ret = ipu6_isys_dwc_phy_powerup_ack(isys, primary);
+ if (ret)
+ return ret;
+
+ ret = ipu6_isys_dwc_phy_powerup_ack(isys, secondary);
+ return ret;
+ }
+
+ dev_dbg(dev, "config phy %u with %u lanes in non-aggr mode\n",
+ phy_id, nlanes);
+
+ ipu6_isys_dwc_phy_reset(isys, phy_id);
+ ret = ipu6_isys_dwc_phy_config(isys, phy_id, mbps);
+ if (ret)
+ return ret;
+
+ ret = ipu6_isys_dwc_phy_powerup_ack(isys, phy_id);
+ return ret;
+ }
+
+ if (nlanes == 4) {
+ dev_dbg(dev, "Power down phy %u and phy %u for port %u\n",
+ primary, secondary, port);
+ ipu6_isys_dwc_phy_reset(isys, secondary);
+ ipu6_isys_dwc_phy_reset(isys, primary);
+
+ return 0;
+ }
+
+ dev_dbg(dev, "Powerdown phy %u with %u lanes\n", phy_id, nlanes);
+
+ ipu6_isys_dwc_phy_reset(isys, phy_id);
+
+ return 0;
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c b/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c
new file mode 100644
index 000000000000..c804291cfae9
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013--2024 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/io.h>
+
+#include "ipu6-bus.h"
+#include "ipu6-isys.h"
+#include "ipu6-isys-csi2.h"
+#include "ipu6-platform-isys-csi2-reg.h"
+
+/* only use BB0, BB2, BB4, and BB6 on PHY0 */
+#define IPU6SE_ISYS_PHY_BB_NUM 4
+#define IPU6SE_ISYS_PHY_0_BASE 0x10000
+
+#define PHY_CPHY_DLL_OVRD(x) (0x100 + 0x100 * (x))
+#define PHY_CPHY_RX_CONTROL1(x) (0x110 + 0x100 * (x))
+#define PHY_DPHY_CFG(x) (0x148 + 0x100 * (x))
+#define PHY_BB_AFE_CONFIG(x) (0x174 + 0x100 * (x))
+
+/*
+ * use port_cfg to configure that which data lanes used
+ * +---------+ +------+ +-----+
+ * | port0 x4<-----| | | |
+ * | | | port | | |
+ * | port1 x2<-----| | | |
+ * | | | <-| PHY |
+ * | port2 x4<-----| | | |
+ * | | |config| | |
+ * | port3 x2<-----| | | |
+ * +---------+ +------+ +-----+
+ */
+static const unsigned int csi2_port_cfg[][3] = {
+ {0, 0, 0x1f}, /* no link */
+ {4, 0, 0x10}, /* x4 + x4 config */
+ {2, 0, 0x12}, /* x2 + x2 config */
+ {1, 0, 0x13}, /* x1 + x1 config */
+ {2, 1, 0x15}, /* x2x1 + x2x1 config */
+ {1, 1, 0x16}, /* x1x1 + x1x1 config */
+ {2, 2, 0x18}, /* x2x2 + x2x2 config */
+ {1, 2, 0x19} /* x1x2 + x1x2 config */
+};
+
+/* port, nlanes, bbindex, portcfg */
+static const unsigned int phy_port_cfg[][4] = {
+ /* sip0 */
+ {0, 1, 0, 0x15},
+ {0, 2, 0, 0x15},
+ {0, 4, 0, 0x15},
+ {0, 4, 2, 0x22},
+ /* sip1 */
+ {2, 1, 4, 0x15},
+ {2, 2, 4, 0x15},
+ {2, 4, 4, 0x15},
+ {2, 4, 6, 0x22}
+};
+
+static void ipu6_isys_csi2_phy_config_by_port(struct ipu6_isys *isys,
+ unsigned int port,
+ unsigned int nlanes)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ void __iomem *base = isys->adev->isp->base;
+ unsigned int bbnum;
+ u32 val, reg, i;
+
+ dev_dbg(dev, "port %u with %u lanes", port, nlanes);
+
+ /* only support <1.5Gbps */
+ for (i = 0; i < IPU6SE_ISYS_PHY_BB_NUM; i++) {
+ /* cphy_dll_ovrd.crcdc_fsm_dlane0 = 13 */
+ reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_DLL_OVRD(i);
+ val = readl(base + reg);
+ val |= FIELD_PREP(GENMASK(6, 1), 13);
+ writel(val, base + reg);
+
+ /* cphy_rx_control1.en_crc1 = 1 */
+ reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_RX_CONTROL1(i);
+ val = readl(base + reg);
+ val |= BIT(31);
+ writel(val, base + reg);
+
+ /* dphy_cfg.reserved = 1, .lden_from_dll_ovrd_0 = 1 */
+ reg = IPU6SE_ISYS_PHY_0_BASE + PHY_DPHY_CFG(i);
+ val = readl(base + reg);
+ val |= BIT(25) | BIT(26);
+ writel(val, base + reg);
+
+ /* cphy_dll_ovrd.lden_crcdc_fsm_dlane0 = 1 */
+ reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_DLL_OVRD(i);
+ val = readl(base + reg);
+ val |= BIT(0);
+ writel(val, base + reg);
+ }
+
+ /* Front end config, use minimal channel loss */
+ for (i = 0; i < ARRAY_SIZE(phy_port_cfg); i++) {
+ if (phy_port_cfg[i][0] == port &&
+ phy_port_cfg[i][1] == nlanes) {
+ bbnum = phy_port_cfg[i][2] / 2;
+ reg = IPU6SE_ISYS_PHY_0_BASE + PHY_BB_AFE_CONFIG(bbnum);
+ val = readl(base + reg);
+ val |= phy_port_cfg[i][3];
+ writel(val, base + reg);
+ }
+ }
+}
+
+static void ipu6_isys_csi2_rx_control(struct ipu6_isys *isys)
+{
+ void __iomem *base = isys->adev->isp->base;
+ u32 val, reg;
+
+ reg = CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL;
+ val = readl(base + reg);
+ val |= BIT(0);
+ writel(val, base + CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL);
+
+ reg = CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL;
+ val = readl(base + reg);
+ val |= BIT(0);
+ writel(val, base + CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL);
+
+ reg = CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL;
+ val = readl(base + reg);
+ val |= BIT(0);
+ writel(val, base + CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL);
+
+ reg = CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL;
+ val = readl(base + reg);
+ val |= BIT(0);
+ writel(val, base + CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL);
+}
+
+static int ipu6_isys_csi2_set_port_cfg(struct ipu6_isys *isys,
+ unsigned int port, unsigned int nlanes)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ unsigned int sip = port / 2;
+ unsigned int index;
+
+ switch (nlanes) {
+ case 1:
+ index = 5;
+ break;
+ case 2:
+ index = 6;
+ break;
+ case 4:
+ index = 1;
+ break;
+ default:
+ dev_err(dev, "lanes nr %u is unsupported\n", nlanes);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "port config for port %u with %u lanes\n", port, nlanes);
+
+ writel(csi2_port_cfg[index][2],
+ isys->pdata->base + CSI2_HUB_GPREG_SIP_FB_PORT_CFG(sip));
+
+ return 0;
+}
+
+static void
+ipu6_isys_csi2_set_timing(struct ipu6_isys *isys,
+ const struct ipu6_isys_csi2_timing *timing,
+ unsigned int port, unsigned int nlanes)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ void __iomem *reg;
+ u32 port_base;
+ u32 i;
+
+ port_base = (port % 2) ? CSI2_SIP_TOP_CSI_RX_PORT_BASE_1(port) :
+ CSI2_SIP_TOP_CSI_RX_PORT_BASE_0(port);
+
+ dev_dbg(dev, "set timing for port %u with %u lanes\n", port, nlanes);
+
+ reg = isys->pdata->base + port_base;
+ reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_CLANE;
+
+ writel(timing->ctermen, reg);
+
+ reg = isys->pdata->base + port_base;
+ reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_CLANE;
+ writel(timing->csettle, reg);
+
+ for (i = 0; i < nlanes; i++) {
+ reg = isys->pdata->base + port_base;
+ reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_DLANE(i);
+ writel(timing->dtermen, reg);
+
+ reg = isys->pdata->base + port_base;
+ reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_DLANE(i);
+ writel(timing->dsettle, reg);
+ }
+}
+
+#define DPHY_TIMER_INCR 0x28
+int ipu6_isys_jsl_phy_set_power(struct ipu6_isys *isys,
+ struct ipu6_isys_csi2_config *cfg,
+ const struct ipu6_isys_csi2_timing *timing,
+ bool on)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ void __iomem *isys_base = isys->pdata->base;
+ int ret = 0;
+ u32 nlanes;
+ u32 port;
+
+ if (!on)
+ return 0;
+
+ port = cfg->port;
+ nlanes = cfg->nlanes;
+
+ if (!isys_base || port >= isys->pdata->ipdata->csi2.nports) {
+ dev_warn(dev, "invalid port ID %d\n", port);
+ return -EINVAL;
+ }
+
+ ipu6_isys_csi2_phy_config_by_port(isys, port, nlanes);
+
+ writel(DPHY_TIMER_INCR,
+ isys->pdata->base + CSI2_HUB_GPREG_DPHY_TIMER_INCR);
+
+ /* set port cfg and rx timing */
+ ipu6_isys_csi2_set_timing(isys, timing, port, nlanes);
+
+ ret = ipu6_isys_csi2_set_port_cfg(isys, port, nlanes);
+ if (ret)
+ return ret;
+
+ ipu6_isys_csi2_rx_control(isys);
+
+ return 0;
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c
new file mode 100644
index 000000000000..71aa5009512a
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c
@@ -0,0 +1,720 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013--2024 Intel Corporation
+ */
+
+#include <linux/bits.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/refcount.h>
+#include <linux/time64.h>
+
+#include <media/v4l2-async.h>
+
+#include "ipu6.h"
+#include "ipu6-bus.h"
+#include "ipu6-isys.h"
+#include "ipu6-isys-csi2.h"
+#include "ipu6-platform-isys-csi2-reg.h"
+
+#define CSI_REG_HUB_GPREG_PHY_CTL(id) (CSI_REG_BASE + 0x18008 + (id) * 0x8)
+#define CSI_REG_HUB_GPREG_PHY_CTL_RESET BIT(4)
+#define CSI_REG_HUB_GPREG_PHY_CTL_PWR_EN BIT(0)
+#define CSI_REG_HUB_GPREG_PHY_STATUS(id) (CSI_REG_BASE + 0x1800c + (id) * 0x8)
+#define CSI_REG_HUB_GPREG_PHY_POWER_ACK BIT(0)
+#define CSI_REG_HUB_GPREG_PHY_READY BIT(4)
+
+#define MCD_PHY_POWER_STATUS_TIMEOUT (200 * USEC_PER_MSEC)
+
+/*
+ * bridge to phy in buttress reg map, each phy has 16 kbytes
+ * only 2 phys for TGL U and Y
+ */
+#define IPU6_ISYS_MCD_PHY_BASE(i) (0x10000 + (i) * 0x4000)
+
+/*
+ * There are 2 MCD DPHY instances on TGL and 1 MCD DPHY instance on ADL.
+ * Each MCD PHY has 12-lanes which has 8 data lanes and 4 clock lanes.
+ * CSI port 1, 3 (5, 7) can support max 2 data lanes.
+ * CSI port 0, 2 (4, 6) can support max 4 data lanes.
+ * PHY configurations are PPI based instead of port.
+ * Left:
+ * +---------+---------+---------+---------+--------+---------+----------+
+ * | | | | | | | |
+ * | PPI | PPI5 | PPI4 | PPI3 | PPI2 | PPI1 | PPI0 |
+ * +---------+---------+---------+---------+--------+---------+----------+
+ * | | | | | | | |
+ * | x4 | unused | D3 | D2 | C0 | D0 | D1 |
+ * |---------+---------+---------+---------+--------+---------+----------+
+ * | | | | | | | |
+ * | x2x2 | C1 | D0 | D1 | C0 | D0 | D1 |
+ * ----------+---------+---------+---------+--------+---------+----------+
+ * | | | | | | | |
+ * | x2x1 | C1 | D0 | unused | C0 | D0 | D1 |
+ * +---------+---------+---------+---------+--------+---------+----------+
+ * | | | | | | | |
+ * | x1x1 | C1 | D0 | unused | C0 | D0 | unused |
+ * +---------+---------+---------+---------+--------+---------+----------+
+ * | | | | | | | |
+ * | x1x2 | C1 | D0 | D1 | C0 | D0 | unused |
+ * +---------+---------+---------+---------+--------+---------+----------+
+ *
+ * Right:
+ * +---------+---------+---------+---------+--------+---------+----------+
+ * | | | | | | | |
+ * | PPI | PPI6 | PPI7 | PPI8 | PPI9 | PPI10 | PPI11 |
+ * +---------+---------+---------+---------+--------+---------+----------+
+ * | | | | | | | |
+ * | x4 | D1 | D0 | C2 | D2 | D3 | unused |
+ * |---------+---------+---------+---------+--------+---------+----------+
+ * | | | | | | | |
+ * | x2x2 | D1 | D0 | C2 | D1 | D0 | C3 |
+ * ----------+---------+---------+---------+--------+---------+----------+
+ * | | | | | | | |
+ * | x2x1 | D1 | D0 | C2 | unused | D0 | C3 |
+ * +---------+---------+---------+---------+--------+---------+----------+
+ * | | | | | | | |
+ * | x1x1 | unused | D0 | C2 | unused | D0 | C3 |
+ * +---------+---------+---------+---------+--------+---------+----------+
+ * | | | | | | | |
+ * | x1x2 | unused | D0 | C2 | D1 | D0 | C3 |
+ * +---------+---------+---------+---------+--------+---------+----------+
+ *
+ * ppi mapping per phy :
+ *
+ * x4 + x4:
+ * Left : port0 - PPI range {0, 1, 2, 3, 4}
+ * Right: port2 - PPI range {6, 7, 8, 9, 10}
+ *
+ * x4 + x2x2:
+ * Left: port0 - PPI range {0, 1, 2, 3, 4}
+ * Right: port2 - PPI range {6, 7, 8}, port3 - PPI range {9, 10, 11}
+ *
+ * x2x2 + x4:
+ * Left: port0 - PPI range {0, 1, 2}, port1 - PPI range {3, 4, 5}
+ * Right: port2 - PPI range {6, 7, 8, 9, 10}
+ *
+ * x2x2 + x2x2:
+ * Left : port0 - PPI range {0, 1, 2}, port1 - PPI range {3, 4, 5}
+ * Right: port2 - PPI range {6, 7, 8}, port3 - PPI range {9, 10, 11}
+ */
+
+struct phy_reg {
+ u32 reg;
+ u32 val;
+};
+
+static const struct phy_reg common_init_regs[] = {
+ /* for TGL-U, use 0x80000000 */
+ {0x00000040, 0x80000000},
+ {0x00000044, 0x00a80880},
+ {0x00000044, 0x00b80880},
+ {0x00000010, 0x0000078c},
+ {0x00000344, 0x2f4401e2},
+ {0x00000544, 0x924401e2},
+ {0x00000744, 0x594401e2},
+ {0x00000944, 0x624401e2},
+ {0x00000b44, 0xfc4401e2},
+ {0x00000d44, 0xc54401e2},
+ {0x00000f44, 0x034401e2},
+ {0x00001144, 0x8f4401e2},
+ {0x00001344, 0x754401e2},
+ {0x00001544, 0xe94401e2},
+ {0x00001744, 0xcb4401e2},
+ {0x00001944, 0xfa4401e2}
+};
+
+static const struct phy_reg x1_port0_config_regs[] = {
+ {0x00000694, 0xc80060fa},
+ {0x00000680, 0x3d4f78ea},
+ {0x00000690, 0x10a0140b},
+ {0x000006a8, 0xdf04010a},
+ {0x00000700, 0x57050060},
+ {0x00000710, 0x0030001c},
+ {0x00000738, 0x5f004444},
+ {0x0000073c, 0x78464204},
+ {0x00000748, 0x7821f940},
+ {0x0000074c, 0xb2000433},
+ {0x00000494, 0xfe6030fa},
+ {0x00000480, 0x29ef5ed0},
+ {0x00000490, 0x10a0540b},
+ {0x000004a8, 0x7a01010a},
+ {0x00000500, 0xef053460},
+ {0x00000510, 0xe030101c},
+ {0x00000538, 0xdf808444},
+ {0x0000053c, 0xc8422204},
+ {0x00000540, 0x0180088c},
+ {0x00000574, 0x00000000},
+ {0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x1_port1_config_regs[] = {
+ {0x00000c94, 0xc80060fa},
+ {0x00000c80, 0xcf47abea},
+ {0x00000c90, 0x10a0840b},
+ {0x00000ca8, 0xdf04010a},
+ {0x00000d00, 0x57050060},
+ {0x00000d10, 0x0030001c},
+ {0x00000d38, 0x5f004444},
+ {0x00000d3c, 0x78464204},
+ {0x00000d48, 0x7821f940},
+ {0x00000d4c, 0xb2000433},
+ {0x00000a94, 0xc91030fa},
+ {0x00000a80, 0x5a166ed0},
+ {0x00000a90, 0x10a0540b},
+ {0x00000aa8, 0x5d060100},
+ {0x00000b00, 0xef053460},
+ {0x00000b10, 0xa030101c},
+ {0x00000b38, 0xdf808444},
+ {0x00000b3c, 0xc8422204},
+ {0x00000b40, 0x0180088c},
+ {0x00000b74, 0x00000000},
+ {0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x1_port2_config_regs[] = {
+ {0x00001294, 0x28f000fa},
+ {0x00001280, 0x08130cea},
+ {0x00001290, 0x10a0140b},
+ {0x000012a8, 0xd704010a},
+ {0x00001300, 0x8d050060},
+ {0x00001310, 0x0030001c},
+ {0x00001338, 0xdf008444},
+ {0x0000133c, 0x78422204},
+ {0x00001348, 0x7821f940},
+ {0x0000134c, 0x5a000433},
+ {0x00001094, 0x2d20b0fa},
+ {0x00001080, 0xade75dd0},
+ {0x00001090, 0x10a0540b},
+ {0x000010a8, 0xb101010a},
+ {0x00001100, 0x33053460},
+ {0x00001110, 0x0030101c},
+ {0x00001138, 0xdf808444},
+ {0x0000113c, 0xc8422204},
+ {0x00001140, 0x8180088c},
+ {0x00001174, 0x00000000},
+ {0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x1_port3_config_regs[] = {
+ {0x00001894, 0xc80060fa},
+ {0x00001880, 0x0f90fd6a},
+ {0x00001890, 0x10a0840b},
+ {0x000018a8, 0xdf04010a},
+ {0x00001900, 0x57050060},
+ {0x00001910, 0x0030001c},
+ {0x00001938, 0x5f004444},
+ {0x0000193c, 0x78464204},
+ {0x00001948, 0x7821f940},
+ {0x0000194c, 0xb2000433},
+ {0x00001694, 0x3050d0fa},
+ {0x00001680, 0x0ef6d050},
+ {0x00001690, 0x10a0540b},
+ {0x000016a8, 0xe301010a},
+ {0x00001700, 0x69053460},
+ {0x00001710, 0xa030101c},
+ {0x00001738, 0xdf808444},
+ {0x0000173c, 0xc8422204},
+ {0x00001740, 0x0180088c},
+ {0x00001774, 0x00000000},
+ {0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x2_port0_config_regs[] = {
+ {0x00000694, 0xc80060fa},
+ {0x00000680, 0x3d4f78ea},
+ {0x00000690, 0x10a0140b},
+ {0x000006a8, 0xdf04010a},
+ {0x00000700, 0x57050060},
+ {0x00000710, 0x0030001c},
+ {0x00000738, 0x5f004444},
+ {0x0000073c, 0x78464204},
+ {0x00000748, 0x7821f940},
+ {0x0000074c, 0xb2000433},
+ {0x00000494, 0xc80060fa},
+ {0x00000480, 0x29ef5ed8},
+ {0x00000490, 0x10a0540b},
+ {0x000004a8, 0x7a01010a},
+ {0x00000500, 0xef053460},
+ {0x00000510, 0xe030101c},
+ {0x00000538, 0xdf808444},
+ {0x0000053c, 0xc8422204},
+ {0x00000540, 0x0180088c},
+ {0x00000574, 0x00000000},
+ {0x00000294, 0xc80060fa},
+ {0x00000280, 0xcb45b950},
+ {0x00000290, 0x10a0540b},
+ {0x000002a8, 0x8c01010a},
+ {0x00000300, 0xef053460},
+ {0x00000310, 0x8030101c},
+ {0x00000338, 0x41808444},
+ {0x0000033c, 0x32422204},
+ {0x00000340, 0x0180088c},
+ {0x00000374, 0x00000000},
+ {0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x2_port1_config_regs[] = {
+ {0x00000c94, 0xc80060fa},
+ {0x00000c80, 0xcf47abea},
+ {0x00000c90, 0x10a0840b},
+ {0x00000ca8, 0xdf04010a},
+ {0x00000d00, 0x57050060},
+ {0x00000d10, 0x0030001c},
+ {0x00000d38, 0x5f004444},
+ {0x00000d3c, 0x78464204},
+ {0x00000d48, 0x7821f940},
+ {0x00000d4c, 0xb2000433},
+ {0x00000a94, 0xc80060fa},
+ {0x00000a80, 0x5a166ed8},
+ {0x00000a90, 0x10a0540b},
+ {0x00000aa8, 0x7a01010a},
+ {0x00000b00, 0xef053460},
+ {0x00000b10, 0xa030101c},
+ {0x00000b38, 0xdf808444},
+ {0x00000b3c, 0xc8422204},
+ {0x00000b40, 0x0180088c},
+ {0x00000b74, 0x00000000},
+ {0x00000894, 0xc80060fa},
+ {0x00000880, 0x4d4f21d0},
+ {0x00000890, 0x10a0540b},
+ {0x000008a8, 0x5601010a},
+ {0x00000900, 0xef053460},
+ {0x00000910, 0x8030101c},
+ {0x00000938, 0xdf808444},
+ {0x0000093c, 0xc8422204},
+ {0x00000940, 0x0180088c},
+ {0x00000974, 0x00000000},
+ {0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x2_port2_config_regs[] = {
+ {0x00001294, 0xc80060fa},
+ {0x00001280, 0x08130cea},
+ {0x00001290, 0x10a0140b},
+ {0x000012a8, 0xd704010a},
+ {0x00001300, 0x8d050060},
+ {0x00001310, 0x0030001c},
+ {0x00001338, 0xdf008444},
+ {0x0000133c, 0x78422204},
+ {0x00001348, 0x7821f940},
+ {0x0000134c, 0x5a000433},
+ {0x00001094, 0xc80060fa},
+ {0x00001080, 0xade75dd8},
+ {0x00001090, 0x10a0540b},
+ {0x000010a8, 0xb101010a},
+ {0x00001100, 0x33053460},
+ {0x00001110, 0x0030101c},
+ {0x00001138, 0xdf808444},
+ {0x0000113c, 0xc8422204},
+ {0x00001140, 0x8180088c},
+ {0x00001174, 0x00000000},
+ {0x00000e94, 0xc80060fa},
+ {0x00000e80, 0x0fbf16d0},
+ {0x00000e90, 0x10a0540b},
+ {0x00000ea8, 0x7a01010a},
+ {0x00000f00, 0xf5053460},
+ {0x00000f10, 0xc030101c},
+ {0x00000f38, 0xdf808444},
+ {0x00000f3c, 0xc8422204},
+ {0x00000f40, 0x8180088c},
+ {0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x2_port3_config_regs[] = {
+ {0x00001894, 0xc80060fa},
+ {0x00001880, 0x0f90fd6a},
+ {0x00001890, 0x10a0840b},
+ {0x000018a8, 0xdf04010a},
+ {0x00001900, 0x57050060},
+ {0x00001910, 0x0030001c},
+ {0x00001938, 0x5f004444},
+ {0x0000193c, 0x78464204},
+ {0x00001948, 0x7821f940},
+ {0x0000194c, 0xb2000433},
+ {0x00001694, 0xc80060fa},
+ {0x00001680, 0x0ef6d058},
+ {0x00001690, 0x10a0540b},
+ {0x000016a8, 0x7a01010a},
+ {0x00001700, 0x69053460},
+ {0x00001710, 0xa030101c},
+ {0x00001738, 0xdf808444},
+ {0x0000173c, 0xc8422204},
+ {0x00001740, 0x0180088c},
+ {0x00001774, 0x00000000},
+ {0x00001494, 0xc80060fa},
+ {0x00001480, 0xf9d34bd0},
+ {0x00001490, 0x10a0540b},
+ {0x000014a8, 0x7a01010a},
+ {0x00001500, 0x1b053460},
+ {0x00001510, 0x0030101c},
+ {0x00001538, 0xdf808444},
+ {0x0000153c, 0xc8422204},
+ {0x00001540, 0x8180088c},
+ {0x00001574, 0x00000000},
+ {0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x4_port0_config_regs[] = {
+ {0x00000694, 0xc80060fa},
+ {0x00000680, 0x3d4f78fa},
+ {0x00000690, 0x10a0140b},
+ {0x000006a8, 0xdf04010a},
+ {0x00000700, 0x57050060},
+ {0x00000710, 0x0030001c},
+ {0x00000738, 0x5f004444},
+ {0x0000073c, 0x78464204},
+ {0x00000748, 0x7821f940},
+ {0x0000074c, 0xb2000433},
+ {0x00000494, 0xfe6030fa},
+ {0x00000480, 0x29ef5ed8},
+ {0x00000490, 0x10a0540b},
+ {0x000004a8, 0x7a01010a},
+ {0x00000500, 0xef053460},
+ {0x00000510, 0xe030101c},
+ {0x00000538, 0xdf808444},
+ {0x0000053c, 0xc8422204},
+ {0x00000540, 0x0180088c},
+ {0x00000574, 0x00000004},
+ {0x00000294, 0x23e030fa},
+ {0x00000280, 0xcb45b950},
+ {0x00000290, 0x10a0540b},
+ {0x000002a8, 0x8c01010a},
+ {0x00000300, 0xef053460},
+ {0x00000310, 0x8030101c},
+ {0x00000338, 0x41808444},
+ {0x0000033c, 0x32422204},
+ {0x00000340, 0x0180088c},
+ {0x00000374, 0x00000004},
+ {0x00000894, 0x5620b0fa},
+ {0x00000880, 0x4d4f21dc},
+ {0x00000890, 0x10a0540b},
+ {0x000008a8, 0x5601010a},
+ {0x00000900, 0xef053460},
+ {0x00000910, 0x8030101c},
+ {0x00000938, 0xdf808444},
+ {0x0000093c, 0xc8422204},
+ {0x00000940, 0x0180088c},
+ {0x00000974, 0x00000004},
+ {0x00000a94, 0xc91030fa},
+ {0x00000a80, 0x5a166ecc},
+ {0x00000a90, 0x10a0540b},
+ {0x00000aa8, 0x5d01010a},
+ {0x00000b00, 0xef053460},
+ {0x00000b10, 0xa030101c},
+ {0x00000b38, 0xdf808444},
+ {0x00000b3c, 0xc8422204},
+ {0x00000b40, 0x0180088c},
+ {0x00000b74, 0x00000004},
+ {0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x4_port1_config_regs[] = {
+ {0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x4_port2_config_regs[] = {
+ {0x00001294, 0x28f000fa},
+ {0x00001280, 0x08130cfa},
+ {0x00001290, 0x10c0140b},
+ {0x000012a8, 0xd704010a},
+ {0x00001300, 0x8d050060},
+ {0x00001310, 0x0030001c},
+ {0x00001338, 0xdf008444},
+ {0x0000133c, 0x78422204},
+ {0x00001348, 0x7821f940},
+ {0x0000134c, 0x5a000433},
+ {0x00001094, 0x2d20b0fa},
+ {0x00001080, 0xade75dd8},
+ {0x00001090, 0x10a0540b},
+ {0x000010a8, 0xb101010a},
+ {0x00001100, 0x33053460},
+ {0x00001110, 0x0030101c},
+ {0x00001138, 0xdf808444},
+ {0x0000113c, 0xc8422204},
+ {0x00001140, 0x8180088c},
+ {0x00001174, 0x00000004},
+ {0x00000e94, 0xd308d0fa},
+ {0x00000e80, 0x0fbf16d0},
+ {0x00000e90, 0x10a0540b},
+ {0x00000ea8, 0x2c01010a},
+ {0x00000f00, 0xf5053460},
+ {0x00000f10, 0xc030101c},
+ {0x00000f38, 0xdf808444},
+ {0x00000f3c, 0xc8422204},
+ {0x00000f40, 0x8180088c},
+ {0x00000f74, 0x00000004},
+ {0x00001494, 0x136850fa},
+ {0x00001480, 0xf9d34bdc},
+ {0x00001490, 0x10a0540b},
+ {0x000014a8, 0x5a01010a},
+ {0x00001500, 0x1b053460},
+ {0x00001510, 0x0030101c},
+ {0x00001538, 0xdf808444},
+ {0x0000153c, 0xc8422204},
+ {0x00001540, 0x8180088c},
+ {0x00001574, 0x00000004},
+ {0x00001694, 0x3050d0fa},
+ {0x00001680, 0x0ef6d04c},
+ {0x00001690, 0x10a0540b},
+ {0x000016a8, 0xe301010a},
+ {0x00001700, 0x69053460},
+ {0x00001710, 0xa030101c},
+ {0x00001738, 0xdf808444},
+ {0x0000173c, 0xc8422204},
+ {0x00001740, 0x0180088c},
+ {0x00001774, 0x00000004},
+ {0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x4_port3_config_regs[] = {
+ {0x00000000, 0x00000000}
+};
+
+static const struct phy_reg *x1_config_regs[4] = {
+ x1_port0_config_regs,
+ x1_port1_config_regs,
+ x1_port2_config_regs,
+ x1_port3_config_regs
+};
+
+static const struct phy_reg *x2_config_regs[4] = {
+ x2_port0_config_regs,
+ x2_port1_config_regs,
+ x2_port2_config_regs,
+ x2_port3_config_regs
+};
+
+static const struct phy_reg *x4_config_regs[4] = {
+ x4_port0_config_regs,
+ x4_port1_config_regs,
+ x4_port2_config_regs,
+ x4_port3_config_regs
+};
+
+static const struct phy_reg **config_regs[3] = {
+ x1_config_regs,
+ x2_config_regs,
+ x4_config_regs
+};
+
+static int ipu6_isys_mcd_phy_powerup_ack(struct ipu6_isys *isys, u8 id)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ void __iomem *isys_base = isys->pdata->base;
+ u32 val;
+ int ret;
+
+ val = readl(isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id));
+ val |= CSI_REG_HUB_GPREG_PHY_CTL_PWR_EN;
+ writel(val, isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id));
+
+ ret = readl_poll_timeout(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(id),
+ val, val & CSI_REG_HUB_GPREG_PHY_POWER_ACK,
+ 200, MCD_PHY_POWER_STATUS_TIMEOUT);
+ if (ret)
+ dev_err(dev, "PHY%d powerup ack timeout", id);
+
+ return ret;
+}
+
+static int ipu6_isys_mcd_phy_powerdown_ack(struct ipu6_isys *isys, u8 id)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ void __iomem *isys_base = isys->pdata->base;
+ u32 val;
+ int ret;
+
+ writel(0, isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id));
+ ret = readl_poll_timeout(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(id),
+ val, !(val & CSI_REG_HUB_GPREG_PHY_POWER_ACK),
+ 200, MCD_PHY_POWER_STATUS_TIMEOUT);
+ if (ret)
+ dev_err(dev, "PHY%d powerdown ack timeout", id);
+
+ return ret;
+}
+
+static void ipu6_isys_mcd_phy_reset(struct ipu6_isys *isys, u8 id, bool assert)
+{
+ void __iomem *isys_base = isys->pdata->base;
+ u32 val;
+
+ val = readl(isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id));
+ if (assert)
+ val |= CSI_REG_HUB_GPREG_PHY_CTL_RESET;
+ else
+ val &= ~(CSI_REG_HUB_GPREG_PHY_CTL_RESET);
+
+ writel(val, isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id));
+}
+
+static int ipu6_isys_mcd_phy_ready(struct ipu6_isys *isys, u8 id)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ void __iomem *isys_base = isys->pdata->base;
+ u32 val;
+ int ret;
+
+ ret = readl_poll_timeout(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(id),
+ val, val & CSI_REG_HUB_GPREG_PHY_READY,
+ 200, MCD_PHY_POWER_STATUS_TIMEOUT);
+ if (ret)
+ dev_err(dev, "PHY%d ready ack timeout", id);
+
+ return ret;
+}
+
+static void ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys)
+{
+ struct ipu6_bus_device *adev = isys->adev;
+ struct ipu6_device *isp = adev->isp;
+ void __iomem *isp_base = isp->base;
+ struct sensor_async_sd *s_asd;
+ struct v4l2_async_connection *asc;
+ void __iomem *phy_base;
+ unsigned int i;
+ u8 phy_id;
+
+ list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) {
+ s_asd = container_of(asc, struct sensor_async_sd, asc);
+ phy_id = s_asd->csi2.port / 4;
+ phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id);
+
+ for (i = 0; i < ARRAY_SIZE(common_init_regs); i++)
+ writel(common_init_regs[i].val,
+ phy_base + common_init_regs[i].reg);
+ }
+}
+
+static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg)
+{
+ int phy_port;
+ int ret;
+
+ if (!(cfg->nlanes == 4 || cfg->nlanes == 2 || cfg->nlanes == 1))
+ return -EINVAL;
+
+ /* B,F -> C0 A,E -> C1 C,G -> C2 D,H -> C4 */
+ /* normalize driver port number */
+ phy_port = cfg->port % 4;
+
+ /* swap port number only for A and B */
+ if (phy_port == 0)
+ phy_port = 1;
+ else if (phy_port == 1)
+ phy_port = 0;
+
+ ret = phy_port;
+
+ /* check validity per lane configuration */
+ if (cfg->nlanes == 4 && !(phy_port == 0 || phy_port == 2))
+ ret = -EINVAL;
+ else if ((cfg->nlanes == 2 || cfg->nlanes == 1) &&
+ !(phy_port >= 0 && phy_port <= 3))
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ struct ipu6_bus_device *adev = isys->adev;
+ const struct phy_reg **phy_config_regs;
+ struct ipu6_device *isp = adev->isp;
+ void __iomem *isp_base = isp->base;
+ struct sensor_async_sd *s_asd;
+ struct ipu6_isys_csi2_config cfg;
+ struct v4l2_async_connection *asc;
+ int phy_port, phy_id;
+ unsigned int i;
+ void __iomem *phy_base;
+
+ list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) {
+ s_asd = container_of(asc, struct sensor_async_sd, asc);
+ cfg.port = s_asd->csi2.port;
+ cfg.nlanes = s_asd->csi2.nlanes;
+ phy_port = ipu6_isys_driver_port_to_phy_port(&cfg);
+ if (phy_port < 0) {
+ dev_err(dev, "invalid port %d for lane %d", cfg.port,
+ cfg.nlanes);
+ return -ENXIO;
+ }
+
+ phy_id = cfg.port / 4;
+ phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id);
+ dev_dbg(dev, "port%d PHY%u lanes %u\n", cfg.port, phy_id,
+ cfg.nlanes);
+
+ phy_config_regs = config_regs[cfg.nlanes / 2];
+ cfg.port = phy_port;
+ for (i = 0; phy_config_regs[cfg.port][i].reg; i++)
+ writel(phy_config_regs[cfg.port][i].val,
+ phy_base + phy_config_regs[cfg.port][i].reg);
+ }
+
+ return 0;
+}
+
+#define CSI_MCD_PHY_NUM 2
+static refcount_t phy_power_ref_count[CSI_MCD_PHY_NUM];
+
+int ipu6_isys_mcd_phy_set_power(struct ipu6_isys *isys,
+ struct ipu6_isys_csi2_config *cfg,
+ const struct ipu6_isys_csi2_timing *timing,
+ bool on)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ void __iomem *isys_base = isys->pdata->base;
+ u8 port, phy_id;
+ refcount_t *ref;
+ int ret;
+
+ port = cfg->port;
+ phy_id = port / 4;
+
+ ref = &phy_power_ref_count[phy_id];
+
+ dev_dbg(dev, "for phy %d port %d, lanes: %d\n", phy_id, port,
+ cfg->nlanes);
+
+ if (!isys_base || port >= isys->pdata->ipdata->csi2.nports) {
+ dev_warn(dev, "invalid port ID %d\n", port);
+ return -EINVAL;
+ }
+
+ if (on) {
+ if (refcount_read(ref)) {
+ dev_dbg(dev, "for phy %d is already UP", phy_id);
+ refcount_inc(ref);
+ return 0;
+ }
+
+ ret = ipu6_isys_mcd_phy_powerup_ack(isys, phy_id);
+ if (ret)
+ return ret;
+
+ ipu6_isys_mcd_phy_reset(isys, phy_id, 0);
+ ipu6_isys_mcd_phy_common_init(isys);
+
+ ret = ipu6_isys_mcd_phy_config(isys);
+ if (ret)
+ return ret;
+
+ ipu6_isys_mcd_phy_reset(isys, phy_id, 1);
+ ret = ipu6_isys_mcd_phy_ready(isys, phy_id);
+ if (ret)
+ return ret;
+
+ refcount_set(ref, 1);
+ return 0;
+ }
+
+ if (!refcount_dec_and_test(ref))
+ return 0;
+
+ return ipu6_isys_mcd_phy_powerdown_ack(isys, phy_id);
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
new file mode 100644
index 000000000000..40a8ebfcfce2
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
@@ -0,0 +1,810 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013--2024 Intel Corporation
+ */
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "ipu6-bus.h"
+#include "ipu6-fw-isys.h"
+#include "ipu6-isys.h"
+#include "ipu6-isys-video.h"
+
+static int queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q);
+ struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq);
+ struct device *dev = &av->isys->adev->auxdev.dev;
+ u32 size = ipu6_isys_get_data_size(av);
+
+ /* num_planes == 0: we're being called through VIDIOC_REQBUFS */
+ if (!*num_planes) {
+ sizes[0] = size;
+ } else if (sizes[0] < size) {
+ dev_dbg(dev, "%s: queue setup: size %u < %u\n",
+ av->vdev.name, sizes[0], size);
+ return -EINVAL;
+ }
+
+ *num_planes = 1;
+
+ return 0;
+}
+
+static int ipu6_isys_buf_prepare(struct vb2_buffer *vb)
+{
+ struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue);
+ struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq);
+ struct device *dev = &av->isys->adev->auxdev.dev;
+ u32 bytesperline = ipu6_isys_get_bytes_per_line(av);
+ u32 height = ipu6_isys_get_frame_height(av);
+ u32 size = ipu6_isys_get_data_size(av);
+
+ dev_dbg(dev, "buffer: %s: configured size %u, buffer size %lu\n",
+ av->vdev.name, size, vb2_plane_size(vb, 0));
+
+ if (size > vb2_plane_size(vb, 0))
+ return -EINVAL;
+
+ vb2_set_plane_payload(vb, 0, bytesperline * height);
+
+ return 0;
+}
+
+/*
+ * Queue a buffer list back to incoming or active queues. The buffers
+ * are removed from the buffer list.
+ */
+void ipu6_isys_buffer_list_queue(struct ipu6_isys_buffer_list *bl,
+ unsigned long op_flags,
+ enum vb2_buffer_state state)
+{
+ struct ipu6_isys_buffer *ib, *ib_safe;
+ unsigned long flags;
+ bool first = true;
+
+ if (!bl)
+ return;
+
+ WARN_ON_ONCE(!bl->nbufs);
+ WARN_ON_ONCE(op_flags & IPU6_ISYS_BUFFER_LIST_FL_ACTIVE &&
+ op_flags & IPU6_ISYS_BUFFER_LIST_FL_INCOMING);
+
+ list_for_each_entry_safe(ib, ib_safe, &bl->head, head) {
+ struct ipu6_isys_video *av;
+ struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib);
+ struct ipu6_isys_queue *aq =
+ vb2_queue_to_isys_queue(vb->vb2_queue);
+ struct device *dev;
+
+ av = ipu6_isys_queue_to_video(aq);
+ dev = &av->isys->adev->auxdev.dev;
+ spin_lock_irqsave(&aq->lock, flags);
+ list_del(&ib->head);
+ if (op_flags & IPU6_ISYS_BUFFER_LIST_FL_ACTIVE)
+ list_add(&ib->head, &aq->active);
+ else if (op_flags & IPU6_ISYS_BUFFER_LIST_FL_INCOMING)
+ list_add_tail(&ib->head, &aq->incoming);
+ spin_unlock_irqrestore(&aq->lock, flags);
+
+ if (op_flags & IPU6_ISYS_BUFFER_LIST_FL_SET_STATE)
+ vb2_buffer_done(vb, state);
+
+ if (first) {
+ dev_dbg(dev,
+ "queue buf list %p flags %lx, s %d, %d bufs\n",
+ bl, op_flags, state, bl->nbufs);
+ first = false;
+ }
+
+ bl->nbufs--;
+ }
+
+ WARN_ON(bl->nbufs);
+}
+
+/*
+ * flush_firmware_streamon_fail() - Flush in cases where requests may
+ * have been queued to firmware and the *firmware streamon fails for a
+ * reason or another.
+ */
+static void flush_firmware_streamon_fail(struct ipu6_isys_stream *stream)
+{
+ struct device *dev = &stream->isys->adev->auxdev.dev;
+ struct ipu6_isys_queue *aq;
+ unsigned long flags;
+
+ lockdep_assert_held(&stream->mutex);
+
+ list_for_each_entry(aq, &stream->queues, node) {
+ struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq);
+ struct ipu6_isys_buffer *ib, *ib_safe;
+
+ spin_lock_irqsave(&aq->lock, flags);
+ list_for_each_entry_safe(ib, ib_safe, &aq->active, head) {
+ struct vb2_buffer *vb =
+ ipu6_isys_buffer_to_vb2_buffer(ib);
+
+ list_del(&ib->head);
+ if (av->streaming) {
+ dev_dbg(dev,
+ "%s: queue buffer %u back to incoming\n",
+ av->vdev.name, vb->index);
+ /* Queue already streaming, return to driver. */
+ list_add(&ib->head, &aq->incoming);
+ continue;
+ }
+ /* Queue not yet streaming, return to user. */
+ dev_dbg(dev, "%s: return %u back to videobuf2\n",
+ av->vdev.name, vb->index);
+ vb2_buffer_done(ipu6_isys_buffer_to_vb2_buffer(ib),
+ VB2_BUF_STATE_QUEUED);
+ }
+ spin_unlock_irqrestore(&aq->lock, flags);
+ }
+}
+
+/*
+ * Attempt obtaining a buffer list from the incoming queues, a list of buffers
+ * that contains one entry from each video buffer queue. If a buffer can't be
+ * obtained from every queue, the buffers are returned back to the queue.
+ */
+static int buffer_list_get(struct ipu6_isys_stream *stream,
+ struct ipu6_isys_buffer_list *bl)
+{
+ struct device *dev = &stream->isys->adev->auxdev.dev;
+ struct ipu6_isys_queue *aq;
+ unsigned long flags;
+ unsigned long buf_flag = IPU6_ISYS_BUFFER_LIST_FL_INCOMING;
+
+ bl->nbufs = 0;
+ INIT_LIST_HEAD(&bl->head);
+
+ list_for_each_entry(aq, &stream->queues, node) {
+ struct ipu6_isys_buffer *ib;
+
+ spin_lock_irqsave(&aq->lock, flags);
+ if (list_empty(&aq->incoming)) {
+ spin_unlock_irqrestore(&aq->lock, flags);
+ if (!list_empty(&bl->head))
+ ipu6_isys_buffer_list_queue(bl, buf_flag, 0);
+ return -ENODATA;
+ }
+
+ ib = list_last_entry(&aq->incoming,
+ struct ipu6_isys_buffer, head);
+
+ dev_dbg(dev, "buffer: %s: buffer %u\n",
+ ipu6_isys_queue_to_video(aq)->vdev.name,
+ ipu6_isys_buffer_to_vb2_buffer(ib)->index);
+ list_del(&ib->head);
+ list_add(&ib->head, &bl->head);
+ spin_unlock_irqrestore(&aq->lock, flags);
+
+ bl->nbufs++;
+ }
+
+ dev_dbg(dev, "get buffer list %p, %u buffers\n", bl, bl->nbufs);
+
+ return 0;
+}
+
+static void
+ipu6_isys_buf_to_fw_frame_buf_pin(struct vb2_buffer *vb,
+ struct ipu6_fw_isys_frame_buff_set_abi *set)
+{
+ struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue);
+
+ set->output_pins[aq->fw_output].addr =
+ vb2_dma_contig_plane_dma_addr(vb, 0);
+ set->output_pins[aq->fw_output].out_buf_id = vb->index + 1;
+}
+
+/*
+ * Convert a buffer list to a isys fw ABI framebuffer set. The
+ * buffer list is not modified.
+ */
+#define IPU6_ISYS_FRAME_NUM_THRESHOLD (30)
+void
+ipu6_isys_buf_to_fw_frame_buf(struct ipu6_fw_isys_frame_buff_set_abi *set,
+ struct ipu6_isys_stream *stream,
+ struct ipu6_isys_buffer_list *bl)
+{
+ struct ipu6_isys_buffer *ib;
+
+ WARN_ON(!bl->nbufs);
+
+ set->send_irq_sof = 1;
+ set->send_resp_sof = 1;
+ set->send_irq_eof = 0;
+ set->send_resp_eof = 0;
+
+ if (stream->streaming)
+ set->send_irq_capture_ack = 0;
+ else
+ set->send_irq_capture_ack = 1;
+ set->send_irq_capture_done = 0;
+
+ set->send_resp_capture_ack = 1;
+ set->send_resp_capture_done = 1;
+ if (atomic_read(&stream->sequence) >= IPU6_ISYS_FRAME_NUM_THRESHOLD) {
+ set->send_resp_capture_ack = 0;
+ set->send_resp_capture_done = 0;
+ }
+
+ list_for_each_entry(ib, &bl->head, head) {
+ struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib);
+
+ ipu6_isys_buf_to_fw_frame_buf_pin(vb, set);
+ }
+}
+
+/* Start streaming for real. The buffer list must be available. */
+static int ipu6_isys_stream_start(struct ipu6_isys_video *av,
+ struct ipu6_isys_buffer_list *bl, bool error)
+{
+ struct ipu6_isys_stream *stream = av->stream;
+ struct device *dev = &stream->isys->adev->auxdev.dev;
+ struct ipu6_isys_buffer_list __bl;
+ int ret;
+
+ mutex_lock(&stream->isys->stream_mutex);
+ ret = ipu6_isys_video_set_streaming(av, 1, bl);
+ mutex_unlock(&stream->isys->stream_mutex);
+ if (ret)
+ goto out_requeue;
+
+ stream->streaming = 1;
+
+ bl = &__bl;
+
+ do {
+ struct ipu6_fw_isys_frame_buff_set_abi *buf = NULL;
+ struct isys_fw_msgs *msg;
+ u16 send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_CAPTURE;
+
+ ret = buffer_list_get(stream, bl);
+ if (ret < 0)
+ break;
+
+ msg = ipu6_get_fw_msg_buf(stream);
+ if (!msg)
+ return -ENOMEM;
+
+ buf = &msg->fw_msg.frame;
+ ipu6_isys_buf_to_fw_frame_buf(buf, stream, bl);
+ ipu6_fw_isys_dump_frame_buff_set(dev, buf,
+ stream->nr_output_pins);
+ ipu6_isys_buffer_list_queue(bl, IPU6_ISYS_BUFFER_LIST_FL_ACTIVE,
+ 0);
+ ret = ipu6_fw_isys_complex_cmd(stream->isys,
+ stream->stream_handle, buf,
+ msg->dma_addr, sizeof(*buf),
+ send_type);
+ } while (!WARN_ON(ret));
+
+ return 0;
+
+out_requeue:
+ if (bl && bl->nbufs)
+ ipu6_isys_buffer_list_queue(bl,
+ (IPU6_ISYS_BUFFER_LIST_FL_INCOMING |
+ error) ?
+ IPU6_ISYS_BUFFER_LIST_FL_SET_STATE :
+ 0, error ? VB2_BUF_STATE_ERROR :
+ VB2_BUF_STATE_QUEUED);
+ flush_firmware_streamon_fail(stream);
+
+ return ret;
+}
+
+static void buf_queue(struct vb2_buffer *vb)
+{
+ struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue);
+ struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq);
+ struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb);
+ struct ipu6_isys_video_buffer *ivb =
+ vb2_buffer_to_ipu6_isys_video_buffer(vvb);
+ struct ipu6_isys_buffer *ib = &ivb->ib;
+ struct device *dev = &av->isys->adev->auxdev.dev;
+ struct media_pipeline *media_pipe =
+ media_entity_pipeline(&av->vdev.entity);
+ struct ipu6_fw_isys_frame_buff_set_abi *buf = NULL;
+ struct ipu6_isys_stream *stream = av->stream;
+ struct ipu6_isys_buffer_list bl;
+ struct isys_fw_msgs *msg;
+ unsigned long flags;
+ dma_addr_t dma;
+ int ret;
+
+ dev_dbg(dev, "queue buffer %u for %s\n", vb->index, av->vdev.name);
+
+ dma = vb2_dma_contig_plane_dma_addr(vb, 0);
+ dev_dbg(dev, "iova: iova %pad\n", &dma);
+
+ spin_lock_irqsave(&aq->lock, flags);
+ list_add(&ib->head, &aq->incoming);
+ spin_unlock_irqrestore(&aq->lock, flags);
+
+ if (!media_pipe || !vb->vb2_queue->start_streaming_called) {
+ dev_dbg(dev, "media pipeline is not ready for %s\n",
+ av->vdev.name);
+ return;
+ }
+
+ mutex_lock(&stream->mutex);
+
+ if (stream->nr_streaming != stream->nr_queues) {
+ dev_dbg(dev, "not streaming yet, adding to incoming\n");
+ goto out;
+ }
+
+ /*
+ * We just put one buffer to the incoming list of this queue
+ * (above). Let's see whether all queues in the pipeline would
+ * have a buffer.
+ */
+ ret = buffer_list_get(stream, &bl);
+ if (ret < 0) {
+ dev_dbg(dev, "No buffers available\n");
+ goto out;
+ }
+
+ msg = ipu6_get_fw_msg_buf(stream);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ buf = &msg->fw_msg.frame;
+ ipu6_isys_buf_to_fw_frame_buf(buf, stream, &bl);
+ ipu6_fw_isys_dump_frame_buff_set(dev, buf, stream->nr_output_pins);
+
+ if (!stream->streaming) {
+ ret = ipu6_isys_stream_start(av, &bl, true);
+ if (ret)
+ dev_err(dev, "stream start failed.\n");
+ goto out;
+ }
+
+ /*
+ * We must queue the buffers in the buffer list to the
+ * appropriate video buffer queues BEFORE passing them to the
+ * firmware since we could get a buffer event back before we
+ * have queued them ourselves to the active queue.
+ */
+ ipu6_isys_buffer_list_queue(&bl, IPU6_ISYS_BUFFER_LIST_FL_ACTIVE, 0);
+
+ ret = ipu6_fw_isys_complex_cmd(stream->isys, stream->stream_handle,
+ buf, msg->dma_addr, sizeof(*buf),
+ IPU6_FW_ISYS_SEND_TYPE_STREAM_CAPTURE);
+ if (ret < 0)
+ dev_err(dev, "send stream capture failed\n");
+
+out:
+ mutex_unlock(&stream->mutex);
+}
+
+static int ipu6_isys_link_fmt_validate(struct ipu6_isys_queue *aq)
+{
+ struct v4l2_mbus_framefmt format;
+ struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq);
+ struct device *dev = &av->isys->adev->auxdev.dev;
+ struct media_pad *remote_pad =
+ media_pad_remote_pad_first(av->vdev.entity.pads);
+ struct v4l2_subdev *sd;
+ u32 r_stream, code;
+ int ret;
+
+ if (!remote_pad)
+ return -ENOTCONN;
+
+ sd = media_entity_to_v4l2_subdev(remote_pad->entity);
+ r_stream = ipu6_isys_get_src_stream_by_src_pad(sd, remote_pad->index);
+
+ ret = ipu6_isys_get_stream_pad_fmt(sd, remote_pad->index, r_stream,
+ &format);
+
+ if (ret) {
+ dev_dbg(dev, "failed to get %s: pad %d, stream:%d format\n",
+ sd->entity.name, remote_pad->index, r_stream);
+ return ret;
+ }
+
+ if (format.width != ipu6_isys_get_frame_width(av) ||
+ format.height != ipu6_isys_get_frame_height(av)) {
+ dev_dbg(dev, "wrong width or height %ux%u (%ux%u expected)\n",
+ ipu6_isys_get_frame_width(av),
+ ipu6_isys_get_frame_height(av), format.width,
+ format.height);
+ return -EINVAL;
+ }
+
+ code = ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0)->code;
+ if (format.code != code) {
+ dev_dbg(dev, "wrong mbus code 0x%8.8x (0x%8.8x expected)\n",
+ code, format.code);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void return_buffers(struct ipu6_isys_queue *aq,
+ enum vb2_buffer_state state)
+{
+ struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq);
+ struct ipu6_isys_buffer *ib;
+ bool need_reset = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&aq->lock, flags);
+ while (!list_empty(&aq->incoming)) {
+ struct vb2_buffer *vb;
+
+ ib = list_first_entry(&aq->incoming, struct ipu6_isys_buffer,
+ head);
+ vb = ipu6_isys_buffer_to_vb2_buffer(ib);
+ list_del(&ib->head);
+ spin_unlock_irqrestore(&aq->lock, flags);
+
+ vb2_buffer_done(vb, state);
+
+ spin_lock_irqsave(&aq->lock, flags);
+ }
+
+ /*
+ * Something went wrong (FW crash / HW hang / not all buffers
+ * returned from isys) if there are still buffers queued in active
+ * queue. We have to clean up places a bit.
+ */
+ while (!list_empty(&aq->active)) {
+ struct vb2_buffer *vb;
+
+ ib = list_first_entry(&aq->active, struct ipu6_isys_buffer,
+ head);
+ vb = ipu6_isys_buffer_to_vb2_buffer(ib);
+
+ list_del(&ib->head);
+ spin_unlock_irqrestore(&aq->lock, flags);
+
+ vb2_buffer_done(vb, state);
+
+ spin_lock_irqsave(&aq->lock, flags);
+ need_reset = true;
+ }
+
+ spin_unlock_irqrestore(&aq->lock, flags);
+
+ if (need_reset) {
+ mutex_lock(&av->isys->mutex);
+ av->isys->need_reset = true;
+ mutex_unlock(&av->isys->mutex);
+ }
+}
+
+static void ipu6_isys_stream_cleanup(struct ipu6_isys_video *av)
+{
+ video_device_pipeline_stop(&av->vdev);
+ ipu6_isys_put_stream(av->stream);
+ av->stream = NULL;
+}
+
+static int start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q);
+ struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq);
+ struct device *dev = &av->isys->adev->auxdev.dev;
+ const struct ipu6_isys_pixelformat *pfmt =
+ ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0);
+ struct ipu6_isys_buffer_list __bl, *bl = NULL;
+ struct ipu6_isys_stream *stream;
+ struct media_entity *source_entity = NULL;
+ int nr_queues, ret;
+
+ dev_dbg(dev, "stream: %s: width %u, height %u, css pixelformat %u\n",
+ av->vdev.name, ipu6_isys_get_frame_width(av),
+ ipu6_isys_get_frame_height(av), pfmt->css_pixelformat);
+
+ ret = ipu6_isys_setup_video(av, &source_entity, &nr_queues);
+ if (ret < 0) {
+ dev_dbg(dev, "failed to setup video\n");
+ goto out_return_buffers;
+ }
+
+ ret = ipu6_isys_link_fmt_validate(aq);
+ if (ret) {
+ dev_dbg(dev,
+ "%s: link format validation failed (%d)\n",
+ av->vdev.name, ret);
+ goto out_pipeline_stop;
+ }
+
+ ret = ipu6_isys_fw_open(av->isys);
+ if (ret)
+ goto out_pipeline_stop;
+
+ stream = av->stream;
+ mutex_lock(&stream->mutex);
+ if (!stream->nr_streaming) {
+ ret = ipu6_isys_video_prepare_stream(av, source_entity,
+ nr_queues);
+ if (ret)
+ goto out_fw_close;
+ }
+
+ stream->nr_streaming++;
+ dev_dbg(dev, "queue %u of %u\n", stream->nr_streaming,
+ stream->nr_queues);
+
+ list_add(&aq->node, &stream->queues);
+ ipu6_isys_set_csi2_streams_status(av, true);
+ ipu6_isys_configure_stream_watermark(av, true);
+ ipu6_isys_update_stream_watermark(av, true);
+
+ if (stream->nr_streaming != stream->nr_queues)
+ goto out;
+
+ bl = &__bl;
+ ret = buffer_list_get(stream, bl);
+ if (ret < 0) {
+ dev_warn(dev, "no buffer available, DRIVER BUG?\n");
+ goto out;
+ }
+
+ ret = ipu6_isys_stream_start(av, bl, false);
+ if (ret)
+ goto out_stream_start;
+
+out:
+ mutex_unlock(&stream->mutex);
+
+ return 0;
+
+out_stream_start:
+ ipu6_isys_update_stream_watermark(av, false);
+ list_del(&aq->node);
+ stream->nr_streaming--;
+
+out_fw_close:
+ mutex_unlock(&stream->mutex);
+ ipu6_isys_fw_close(av->isys);
+
+out_pipeline_stop:
+ ipu6_isys_stream_cleanup(av);
+
+out_return_buffers:
+ return_buffers(aq, VB2_BUF_STATE_QUEUED);
+
+ return ret;
+}
+
+static void stop_streaming(struct vb2_queue *q)
+{
+ struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q);
+ struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq);
+ struct ipu6_isys_stream *stream = av->stream;
+
+ ipu6_isys_set_csi2_streams_status(av, false);
+
+ mutex_lock(&stream->mutex);
+
+ ipu6_isys_update_stream_watermark(av, false);
+
+ mutex_lock(&av->isys->stream_mutex);
+ if (stream->nr_streaming == stream->nr_queues && stream->streaming)
+ ipu6_isys_video_set_streaming(av, 0, NULL);
+ mutex_unlock(&av->isys->stream_mutex);
+
+ stream->nr_streaming--;
+ list_del(&aq->node);
+ stream->streaming = 0;
+ mutex_unlock(&stream->mutex);
+
+ ipu6_isys_stream_cleanup(av);
+
+ return_buffers(aq, VB2_BUF_STATE_ERROR);
+
+ ipu6_isys_fw_close(av->isys);
+}
+
+static unsigned int
+get_sof_sequence_by_timestamp(struct ipu6_isys_stream *stream,
+ struct ipu6_fw_isys_resp_info_abi *info)
+{
+ u64 time = (u64)info->timestamp[1] << 32 | info->timestamp[0];
+ struct ipu6_isys *isys = stream->isys;
+ struct device *dev = &isys->adev->auxdev.dev;
+ unsigned int i;
+
+ /*
+ * The timestamp is invalid as no TSC in some FPGA platform,
+ * so get the sequence from pipeline directly in this case.
+ */
+ if (time == 0)
+ return atomic_read(&stream->sequence) - 1;
+
+ for (i = 0; i < IPU6_ISYS_MAX_PARALLEL_SOF; i++)
+ if (time == stream->seq[i].timestamp) {
+ dev_dbg(dev, "sof: using seq nr %u for ts %llu\n",
+ stream->seq[i].sequence, time);
+ return stream->seq[i].sequence;
+ }
+
+ for (i = 0; i < IPU6_ISYS_MAX_PARALLEL_SOF; i++)
+ dev_dbg(dev, "sof: sequence %u, timestamp value %llu\n",
+ stream->seq[i].sequence, stream->seq[i].timestamp);
+
+ return 0;
+}
+
+static u64 get_sof_ns_delta(struct ipu6_isys_video *av,
+ struct ipu6_fw_isys_resp_info_abi *info)
+{
+ struct ipu6_bus_device *adev = av->isys->adev;
+ struct ipu6_device *isp = adev->isp;
+ u64 delta, tsc_now;
+
+ ipu6_buttress_tsc_read(isp, &tsc_now);
+ if (!tsc_now)
+ return 0;
+
+ delta = tsc_now - ((u64)info->timestamp[1] << 32 | info->timestamp[0]);
+
+ return ipu6_buttress_tsc_ticks_to_ns(delta, isp);
+}
+
+void ipu6_isys_buf_calc_sequence_time(struct ipu6_isys_buffer *ib,
+ struct ipu6_fw_isys_resp_info_abi *info)
+{
+ struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue);
+ struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq);
+ struct device *dev = &av->isys->adev->auxdev.dev;
+ struct ipu6_isys_stream *stream = av->stream;
+ u64 ns;
+ u32 sequence;
+
+ ns = ktime_get_ns() - get_sof_ns_delta(av, info);
+ sequence = get_sof_sequence_by_timestamp(stream, info);
+
+ vbuf->vb2_buf.timestamp = ns;
+ vbuf->sequence = sequence;
+
+ dev_dbg(dev, "buf: %s: buffer done, CPU-timestamp:%lld, sequence:%d\n",
+ av->vdev.name, ktime_get_ns(), sequence);
+ dev_dbg(dev, "index:%d, vbuf timestamp:%lld\n", vb->index,
+ vbuf->vb2_buf.timestamp);
+}
+
+void ipu6_isys_queue_buf_done(struct ipu6_isys_buffer *ib)
+{
+ struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib);
+
+ if (atomic_read(&ib->str2mmio_flag)) {
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+ /*
+ * Operation on buffer is ended with error and will be reported
+ * to the userspace when it is de-queued
+ */
+ atomic_set(&ib->str2mmio_flag, 0);
+ } else {
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ }
+}
+
+void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream,
+ struct ipu6_fw_isys_resp_info_abi *info)
+{
+ struct ipu6_isys_queue *aq = stream->output_pins[info->pin_id].aq;
+ struct ipu6_isys *isys = stream->isys;
+ struct device *dev = &isys->adev->auxdev.dev;
+ struct ipu6_isys_buffer *ib;
+ struct vb2_buffer *vb;
+ unsigned long flags;
+ bool first = true;
+ struct vb2_v4l2_buffer *buf;
+
+ spin_lock_irqsave(&aq->lock, flags);
+ if (list_empty(&aq->active)) {
+ spin_unlock_irqrestore(&aq->lock, flags);
+ dev_err(dev, "active queue empty\n");
+ return;
+ }
+
+ list_for_each_entry_reverse(ib, &aq->active, head) {
+ dma_addr_t addr;
+
+ vb = ipu6_isys_buffer_to_vb2_buffer(ib);
+ addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ if (info->pin.addr != addr) {
+ if (first)
+ dev_err(dev, "Unexpected buffer address %pad\n",
+ &addr);
+ first = false;
+ continue;
+ }
+
+ if (info->error_info.error ==
+ IPU6_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO) {
+ /*
+ * Check for error message:
+ * 'IPU6_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO'
+ */
+ atomic_set(&ib->str2mmio_flag, 1);
+ }
+ dev_dbg(dev, "buffer: found buffer %pad\n", &addr);
+
+ buf = to_vb2_v4l2_buffer(vb);
+ buf->field = V4L2_FIELD_NONE;
+
+ list_del(&ib->head);
+ spin_unlock_irqrestore(&aq->lock, flags);
+
+ ipu6_isys_buf_calc_sequence_time(ib, info);
+
+ ipu6_isys_queue_buf_done(ib);
+
+ return;
+ }
+
+ dev_err(dev, "Failed to find a matching video buffer");
+
+ spin_unlock_irqrestore(&aq->lock, flags);
+}
+
+static const struct vb2_ops ipu6_isys_queue_ops = {
+ .queue_setup = queue_setup,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .buf_prepare = ipu6_isys_buf_prepare,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .buf_queue = buf_queue,
+};
+
+int ipu6_isys_queue_init(struct ipu6_isys_queue *aq)
+{
+ struct ipu6_isys *isys = ipu6_isys_queue_to_video(aq)->isys;
+ struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq);
+ int ret;
+
+ /* no support for userptr */
+ if (!aq->vbq.io_modes)
+ aq->vbq.io_modes = VB2_MMAP | VB2_DMABUF;
+
+ aq->vbq.drv_priv = aq;
+ aq->vbq.ops = &ipu6_isys_queue_ops;
+ aq->vbq.lock = &av->mutex;
+ aq->vbq.mem_ops = &vb2_dma_contig_memops;
+ aq->vbq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ aq->vbq.min_queued_buffers = 1;
+ aq->vbq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+
+ ret = vb2_queue_init(&aq->vbq);
+ if (ret)
+ return ret;
+
+ aq->dev = &isys->adev->auxdev.dev;
+ aq->vbq.dev = &isys->adev->auxdev.dev;
+ spin_lock_init(&aq->lock);
+ INIT_LIST_HEAD(&aq->active);
+ INIT_LIST_HEAD(&aq->incoming);
+
+ return 0;
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h
new file mode 100644
index 000000000000..95cfd4869d93
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013--2024 Intel Corporation */
+
+#ifndef IPU6_ISYS_QUEUE_H
+#define IPU6_ISYS_QUEUE_H
+
+#include <linux/container_of.h>
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/spinlock_types.h>
+
+#include <media/videobuf2-v4l2.h>
+
+#include "ipu6-fw-isys.h"
+#include "ipu6-isys-video.h"
+
+struct ipu6_isys_stream;
+
+struct ipu6_isys_queue {
+ struct vb2_queue vbq;
+ struct list_head node;
+ struct device *dev;
+ /*
+ * @lock: serialise access to queued and pre_streamon_queued
+ */
+ spinlock_t lock;
+ struct list_head active;
+ struct list_head incoming;
+ unsigned int fw_output;
+};
+
+struct ipu6_isys_buffer {
+ struct list_head head;
+ atomic_t str2mmio_flag;
+};
+
+struct ipu6_isys_video_buffer {
+ struct vb2_v4l2_buffer vb_v4l2;
+ struct ipu6_isys_buffer ib;
+};
+
+#define IPU6_ISYS_BUFFER_LIST_FL_INCOMING BIT(0)
+#define IPU6_ISYS_BUFFER_LIST_FL_ACTIVE BIT(1)
+#define IPU6_ISYS_BUFFER_LIST_FL_SET_STATE BIT(2)
+
+struct ipu6_isys_buffer_list {
+ struct list_head head;
+ unsigned int nbufs;
+};
+
+#define vb2_queue_to_isys_queue(__vb2) \
+ container_of(__vb2, struct ipu6_isys_queue, vbq)
+
+#define ipu6_isys_to_isys_video_buffer(__ib) \
+ container_of(__ib, struct ipu6_isys_video_buffer, ib)
+
+#define vb2_buffer_to_ipu6_isys_video_buffer(__vvb) \
+ container_of(__vvb, struct ipu6_isys_video_buffer, vb_v4l2)
+
+#define ipu6_isys_buffer_to_vb2_buffer(__ib) \
+ (&ipu6_isys_to_isys_video_buffer(__ib)->vb_v4l2.vb2_buf)
+
+void ipu6_isys_buffer_list_queue(struct ipu6_isys_buffer_list *bl,
+ unsigned long op_flags,
+ enum vb2_buffer_state state);
+void
+ipu6_isys_buf_to_fw_frame_buf(struct ipu6_fw_isys_frame_buff_set_abi *set,
+ struct ipu6_isys_stream *stream,
+ struct ipu6_isys_buffer_list *bl);
+void
+ipu6_isys_buf_calc_sequence_time(struct ipu6_isys_buffer *ib,
+ struct ipu6_fw_isys_resp_info_abi *info);
+void ipu6_isys_queue_buf_done(struct ipu6_isys_buffer *ib);
+void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream,
+ struct ipu6_fw_isys_resp_info_abi *info);
+int ipu6_isys_queue_init(struct ipu6_isys_queue *aq);
+#endif /* IPU6_ISYS_QUEUE_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c b/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c
new file mode 100644
index 000000000000..0a06de5c739c
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013--2024 Intel Corporation
+ */
+
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/minmax.h>
+
+#include <media/media-entity.h>
+#include <media/mipi-csi2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "ipu6-bus.h"
+#include "ipu6-isys.h"
+#include "ipu6-isys-subdev.h"
+
+unsigned int ipu6_isys_mbus_code_to_bpp(u32 code)
+{
+ switch (code) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_META_24:
+ return 24;
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_META_16:
+ return 16;
+ case MEDIA_BUS_FMT_SBGGR12_1X12:
+ case MEDIA_BUS_FMT_SGBRG12_1X12:
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
+ case MEDIA_BUS_FMT_SRGGB12_1X12:
+ case MEDIA_BUS_FMT_META_12:
+ return 12;
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_META_10:
+ return 10;
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_META_8:
+ return 8;
+ default:
+ WARN_ON(1);
+ return 8;
+ }
+}
+
+unsigned int ipu6_isys_mbus_code_to_mipi(u32 code)
+{
+ switch (code) {
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ return MIPI_CSI2_DT_RGB565;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ return MIPI_CSI2_DT_RGB888;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ return MIPI_CSI2_DT_YUV422_8B;
+ case MEDIA_BUS_FMT_SBGGR16_1X16:
+ case MEDIA_BUS_FMT_SGBRG16_1X16:
+ case MEDIA_BUS_FMT_SGRBG16_1X16:
+ case MEDIA_BUS_FMT_SRGGB16_1X16:
+ return MIPI_CSI2_DT_RAW16;
+ case MEDIA_BUS_FMT_SBGGR12_1X12:
+ case MEDIA_BUS_FMT_SGBRG12_1X12:
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
+ case MEDIA_BUS_FMT_SRGGB12_1X12:
+ return MIPI_CSI2_DT_RAW12;
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ return MIPI_CSI2_DT_RAW10;
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ return MIPI_CSI2_DT_RAW8;
+ default:
+ /* return unavailable MIPI data type - 0x3f */
+ WARN_ON(1);
+ return 0x3f;
+ }
+}
+
+bool ipu6_isys_is_bayer_format(u32 code)
+{
+ switch (ipu6_isys_mbus_code_to_mipi(code)) {
+ case MIPI_CSI2_DT_RAW8:
+ case MIPI_CSI2_DT_RAW10:
+ case MIPI_CSI2_DT_RAW12:
+ case MIPI_CSI2_DT_RAW14:
+ case MIPI_CSI2_DT_RAW16:
+ case MIPI_CSI2_DT_RAW20:
+ case MIPI_CSI2_DT_RAW24:
+ case MIPI_CSI2_DT_RAW28:
+ return true;
+ default:
+ return false;
+ }
+}
+
+u32 ipu6_isys_convert_bayer_order(u32 code, int x, int y)
+{
+ static const u32 code_map[] = {
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ MEDIA_BUS_FMT_SRGGB16_1X16,
+ MEDIA_BUS_FMT_SGRBG16_1X16,
+ MEDIA_BUS_FMT_SGBRG16_1X16,
+ MEDIA_BUS_FMT_SBGGR16_1X16,
+ };
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(code_map); i++)
+ if (code_map[i] == code)
+ break;
+
+ if (WARN_ON(i == ARRAY_SIZE(code_map)))
+ return code;
+
+ return code_map[i ^ (((y & 1) << 1) | (x & 1))];
+}
+
+int ipu6_isys_subdev_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd);
+ struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_rect *crop;
+ u32 code = asd->supported_codes[0];
+ u32 other_pad, other_stream;
+ unsigned int i;
+ int ret;
+
+ /* No transcoding, source and sink formats must match. */
+ if ((sd->entity.pads[format->pad].flags & MEDIA_PAD_FL_SOURCE) &&
+ sd->entity.num_pads > 1)
+ return v4l2_subdev_get_fmt(sd, state, format);
+
+ format->format.width = clamp(format->format.width, IPU6_ISYS_MIN_WIDTH,
+ IPU6_ISYS_MAX_WIDTH);
+ format->format.height = clamp(format->format.height,
+ IPU6_ISYS_MIN_HEIGHT,
+ IPU6_ISYS_MAX_HEIGHT);
+
+ for (i = 0; asd->supported_codes[i]; i++) {
+ if (asd->supported_codes[i] == format->format.code) {
+ code = asd->supported_codes[i];
+ break;
+ }
+ }
+ format->format.code = code;
+ format->format.field = V4L2_FIELD_NONE;
+
+ /* Store the format and propagate it to the source pad. */
+ fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream);
+ if (!fmt)
+ return -EINVAL;
+
+ *fmt = format->format;
+
+ if (!(sd->entity.pads[format->pad].flags & MEDIA_PAD_FL_SINK))
+ return 0;
+
+ /* propagate format to following source pad */
+ fmt = v4l2_subdev_state_get_opposite_stream_format(state, format->pad,
+ format->stream);
+ if (!fmt)
+ return -EINVAL;
+
+ *fmt = format->format;
+
+ ret = v4l2_subdev_routing_find_opposite_end(&state->routing,
+ format->pad,
+ format->stream,
+ &other_pad,
+ &other_stream);
+ if (ret)
+ return -EINVAL;
+
+ crop = v4l2_subdev_state_get_crop(state, other_pad, other_stream);
+ /* reset crop */
+ crop->left = 0;
+ crop->top = 0;
+ crop->width = fmt->width;
+ crop->height = fmt->height;
+
+ return 0;
+}
+
+int ipu6_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd);
+ const u32 *supported_codes = asd->supported_codes;
+ u32 index;
+
+ for (index = 0; supported_codes[index]; index++) {
+ if (index == code->index) {
+ code->code = supported_codes[index];
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int subdev_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_krouting *routing)
+{
+ static const struct v4l2_mbus_framefmt format = {
+ .width = 4096,
+ .height = 3072,
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .field = V4L2_FIELD_NONE,
+ };
+ int ret;
+
+ ret = v4l2_subdev_routing_validate(sd, routing,
+ V4L2_SUBDEV_ROUTING_ONLY_1_TO_1);
+ if (ret)
+ return ret;
+
+ return v4l2_subdev_set_routing_with_fmt(sd, state, routing, &format);
+}
+
+int ipu6_isys_get_stream_pad_fmt(struct v4l2_subdev *sd, u32 pad, u32 stream,
+ struct v4l2_mbus_framefmt *format)
+{
+ struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_subdev_state *state;
+
+ if (!sd || !format)
+ return -EINVAL;
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+ fmt = v4l2_subdev_state_get_format(state, pad, stream);
+ if (fmt)
+ *format = *fmt;
+ v4l2_subdev_unlock_state(state);
+
+ return fmt ? 0 : -EINVAL;
+}
+
+int ipu6_isys_get_stream_pad_crop(struct v4l2_subdev *sd, u32 pad, u32 stream,
+ struct v4l2_rect *crop)
+{
+ struct v4l2_subdev_state *state;
+ struct v4l2_rect *rect;
+
+ if (!sd || !crop)
+ return -EINVAL;
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+ rect = v4l2_subdev_state_get_crop(state, pad, stream);
+ if (rect)
+ *crop = *rect;
+ v4l2_subdev_unlock_state(state);
+
+ return rect ? 0 : -EINVAL;
+}
+
+u32 ipu6_isys_get_src_stream_by_src_pad(struct v4l2_subdev *sd, u32 pad)
+{
+ struct v4l2_subdev_state *state;
+ struct v4l2_subdev_route *routes;
+ unsigned int i;
+ u32 source_stream = 0;
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+ if (!state)
+ return 0;
+
+ routes = state->routing.routes;
+ for (i = 0; i < state->routing.num_routes; i++) {
+ if (routes[i].source_pad == pad) {
+ source_stream = routes[i].source_stream;
+ break;
+ }
+ }
+
+ v4l2_subdev_unlock_state(state);
+
+ return source_stream;
+}
+
+static int ipu6_isys_subdev_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_subdev_route route = {
+ .sink_pad = 0,
+ .sink_stream = 0,
+ .source_pad = 1,
+ .source_stream = 0,
+ .flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE,
+ };
+ struct v4l2_subdev_krouting routing = {
+ .num_routes = 1,
+ .routes = &route,
+ };
+
+ return subdev_set_routing(sd, state, &routing);
+}
+
+int ipu6_isys_subdev_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ enum v4l2_subdev_format_whence which,
+ struct v4l2_subdev_krouting *routing)
+{
+ return subdev_set_routing(sd, state, routing);
+}
+
+static const struct v4l2_subdev_internal_ops ipu6_isys_subdev_internal_ops = {
+ .init_state = ipu6_isys_subdev_init_state,
+};
+
+int ipu6_isys_subdev_init(struct ipu6_isys_subdev *asd,
+ const struct v4l2_subdev_ops *ops,
+ unsigned int nr_ctrls,
+ unsigned int num_sink_pads,
+ unsigned int num_source_pads)
+{
+ unsigned int num_pads = num_sink_pads + num_source_pads;
+ unsigned int i;
+ int ret;
+
+ v4l2_subdev_init(&asd->sd, ops);
+
+ asd->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS |
+ V4L2_SUBDEV_FL_STREAMS;
+ asd->sd.owner = THIS_MODULE;
+ asd->sd.dev = &asd->isys->adev->auxdev.dev;
+ asd->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ asd->sd.internal_ops = &ipu6_isys_subdev_internal_ops;
+
+ asd->pad = devm_kcalloc(&asd->isys->adev->auxdev.dev, num_pads,
+ sizeof(*asd->pad), GFP_KERNEL);
+ if (!asd->pad)
+ return -ENOMEM;
+
+ for (i = 0; i < num_sink_pads; i++)
+ asd->pad[i].flags = MEDIA_PAD_FL_SINK |
+ MEDIA_PAD_FL_MUST_CONNECT;
+
+ for (i = num_sink_pads; i < num_pads; i++)
+ asd->pad[i].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&asd->sd.entity, num_pads, asd->pad);
+ if (ret)
+ return ret;
+
+ if (asd->ctrl_init) {
+ ret = v4l2_ctrl_handler_init(&asd->ctrl_handler, nr_ctrls);
+ if (ret)
+ goto out_media_entity_cleanup;
+
+ asd->ctrl_init(&asd->sd);
+ if (asd->ctrl_handler.error) {
+ ret = asd->ctrl_handler.error;
+ goto out_v4l2_ctrl_handler_free;
+ }
+
+ asd->sd.ctrl_handler = &asd->ctrl_handler;
+ }
+
+ asd->source = -1;
+
+ return 0;
+
+out_v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(&asd->ctrl_handler);
+
+out_media_entity_cleanup:
+ media_entity_cleanup(&asd->sd.entity);
+
+ return ret;
+}
+
+void ipu6_isys_subdev_cleanup(struct ipu6_isys_subdev *asd)
+{
+ media_entity_cleanup(&asd->sd.entity);
+ v4l2_ctrl_handler_free(&asd->ctrl_handler);
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h b/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h
new file mode 100644
index 000000000000..9ef8d95464f5
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013--2024 Intel Corporation */
+
+#ifndef IPU6_ISYS_SUBDEV_H
+#define IPU6_ISYS_SUBDEV_H
+
+#include <linux/container_of.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+struct ipu6_isys;
+
+struct ipu6_isys_subdev {
+ struct v4l2_subdev sd;
+ struct ipu6_isys *isys;
+ u32 const *supported_codes;
+ struct media_pad *pad;
+ struct v4l2_ctrl_handler ctrl_handler;
+ void (*ctrl_init)(struct v4l2_subdev *sd);
+ int source; /* SSI stream source; -1 if unset */
+};
+
+#define to_ipu6_isys_subdev(__sd) \
+ container_of(__sd, struct ipu6_isys_subdev, sd)
+
+unsigned int ipu6_isys_mbus_code_to_bpp(u32 code);
+unsigned int ipu6_isys_mbus_code_to_mipi(u32 code);
+bool ipu6_isys_is_bayer_format(u32 code);
+u32 ipu6_isys_convert_bayer_order(u32 code, int x, int y);
+
+int ipu6_isys_subdev_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *fmt);
+int ipu6_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum
+ *code);
+int ipu6_isys_subdev_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt);
+u32 ipu6_isys_get_src_stream_by_src_pad(struct v4l2_subdev *sd, u32 pad);
+int ipu6_isys_get_stream_pad_fmt(struct v4l2_subdev *sd, u32 pad, u32 stream,
+ struct v4l2_mbus_framefmt *format);
+int ipu6_isys_get_stream_pad_crop(struct v4l2_subdev *sd, u32 pad, u32 stream,
+ struct v4l2_rect *crop);
+int ipu6_isys_subdev_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ enum v4l2_subdev_format_whence which,
+ struct v4l2_subdev_krouting *routing);
+int ipu6_isys_subdev_init(struct ipu6_isys_subdev *asd,
+ const struct v4l2_subdev_ops *ops,
+ unsigned int nr_ctrls,
+ unsigned int num_sink_pads,
+ unsigned int num_source_pads);
+void ipu6_isys_subdev_cleanup(struct ipu6_isys_subdev *asd);
+#endif /* IPU6_ISYS_SUBDEV_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
new file mode 100644
index 000000000000..c8a33e1e910c
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
@@ -0,0 +1,1420 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013--2024 Intel Corporation
+ */
+
+#include <linux/align.h>
+#include <linux/bits.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/math64.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "ipu6.h"
+#include "ipu6-bus.h"
+#include "ipu6-cpd.h"
+#include "ipu6-fw-isys.h"
+#include "ipu6-isys.h"
+#include "ipu6-isys-csi2.h"
+#include "ipu6-isys-queue.h"
+#include "ipu6-isys-video.h"
+#include "ipu6-platform-regs.h"
+
+const struct ipu6_isys_pixelformat ipu6_isys_pfmts[] = {
+ { V4L2_PIX_FMT_SBGGR12, 16, 12, MEDIA_BUS_FMT_SBGGR12_1X12,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW16 },
+ { V4L2_PIX_FMT_SGBRG12, 16, 12, MEDIA_BUS_FMT_SGBRG12_1X12,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW16 },
+ { V4L2_PIX_FMT_SGRBG12, 16, 12, MEDIA_BUS_FMT_SGRBG12_1X12,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW16 },
+ { V4L2_PIX_FMT_SRGGB12, 16, 12, MEDIA_BUS_FMT_SRGGB12_1X12,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW16 },
+ { V4L2_PIX_FMT_SBGGR10, 16, 10, MEDIA_BUS_FMT_SBGGR10_1X10,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW16 },
+ { V4L2_PIX_FMT_SGBRG10, 16, 10, MEDIA_BUS_FMT_SGBRG10_1X10,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW16 },
+ { V4L2_PIX_FMT_SGRBG10, 16, 10, MEDIA_BUS_FMT_SGRBG10_1X10,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW16 },
+ { V4L2_PIX_FMT_SRGGB10, 16, 10, MEDIA_BUS_FMT_SRGGB10_1X10,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW16 },
+ { V4L2_PIX_FMT_SBGGR8, 8, 8, MEDIA_BUS_FMT_SBGGR8_1X8,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW8 },
+ { V4L2_PIX_FMT_SGBRG8, 8, 8, MEDIA_BUS_FMT_SGBRG8_1X8,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW8 },
+ { V4L2_PIX_FMT_SGRBG8, 8, 8, MEDIA_BUS_FMT_SGRBG8_1X8,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW8 },
+ { V4L2_PIX_FMT_SRGGB8, 8, 8, MEDIA_BUS_FMT_SRGGB8_1X8,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW8 },
+ { V4L2_PIX_FMT_SBGGR12P, 12, 12, MEDIA_BUS_FMT_SBGGR12_1X12,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW12 },
+ { V4L2_PIX_FMT_SGBRG12P, 12, 12, MEDIA_BUS_FMT_SGBRG12_1X12,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW12 },
+ { V4L2_PIX_FMT_SGRBG12P, 12, 12, MEDIA_BUS_FMT_SGRBG12_1X12,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW12 },
+ { V4L2_PIX_FMT_SRGGB12P, 12, 12, MEDIA_BUS_FMT_SRGGB12_1X12,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW12 },
+ { V4L2_PIX_FMT_SBGGR10P, 10, 10, MEDIA_BUS_FMT_SBGGR10_1X10,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW10 },
+ { V4L2_PIX_FMT_SGBRG10P, 10, 10, MEDIA_BUS_FMT_SGBRG10_1X10,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW10 },
+ { V4L2_PIX_FMT_SGRBG10P, 10, 10, MEDIA_BUS_FMT_SGRBG10_1X10,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW10 },
+ { V4L2_PIX_FMT_SRGGB10P, 10, 10, MEDIA_BUS_FMT_SRGGB10_1X10,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW10 },
+ { V4L2_PIX_FMT_UYVY, 16, 16, MEDIA_BUS_FMT_UYVY8_1X16,
+ IPU6_FW_ISYS_FRAME_FORMAT_UYVY},
+ { V4L2_PIX_FMT_YUYV, 16, 16, MEDIA_BUS_FMT_YUYV8_1X16,
+ IPU6_FW_ISYS_FRAME_FORMAT_YUYV},
+ { V4L2_PIX_FMT_RGB565, 16, 16, MEDIA_BUS_FMT_RGB565_1X16,
+ IPU6_FW_ISYS_FRAME_FORMAT_RGB565 },
+ { V4L2_PIX_FMT_BGR24, 24, 24, MEDIA_BUS_FMT_RGB888_1X24,
+ IPU6_FW_ISYS_FRAME_FORMAT_RGBA888 },
+ { V4L2_META_FMT_GENERIC_8, 8, 8, MEDIA_BUS_FMT_META_8,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW8, true },
+ { V4L2_META_FMT_GENERIC_CSI2_10, 10, 10, MEDIA_BUS_FMT_META_10,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW10, true },
+ { V4L2_META_FMT_GENERIC_CSI2_12, 12, 12, MEDIA_BUS_FMT_META_12,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW12, true },
+ { V4L2_META_FMT_GENERIC_CSI2_16, 16, 16, MEDIA_BUS_FMT_META_16,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW16, true },
+};
+
+static int video_open(struct file *file)
+{
+ struct ipu6_isys_video *av = video_drvdata(file);
+ struct ipu6_isys *isys = av->isys;
+ struct ipu6_bus_device *adev = isys->adev;
+
+ mutex_lock(&isys->mutex);
+ if (isys->need_reset) {
+ mutex_unlock(&isys->mutex);
+ dev_warn(&adev->auxdev.dev, "isys power cycle required\n");
+ return -EIO;
+ }
+ mutex_unlock(&isys->mutex);
+
+ return v4l2_fh_open(file);
+}
+
+const struct ipu6_isys_pixelformat *
+ipu6_isys_get_isys_format(u32 pixelformat, u32 type)
+{
+ const struct ipu6_isys_pixelformat *default_pfmt = NULL;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ipu6_isys_pfmts); i++) {
+ const struct ipu6_isys_pixelformat *pfmt = &ipu6_isys_pfmts[i];
+
+ if (type && ((!pfmt->is_meta &&
+ type != V4L2_BUF_TYPE_VIDEO_CAPTURE) ||
+ (pfmt->is_meta &&
+ type != V4L2_BUF_TYPE_META_CAPTURE)))
+ continue;
+
+ if (!default_pfmt)
+ default_pfmt = pfmt;
+
+ if (pfmt->pixelformat != pixelformat)
+ continue;
+
+ return pfmt;
+ }
+
+ return default_pfmt;
+}
+
+static int ipu6_isys_vidioc_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct ipu6_isys_video *av = video_drvdata(file);
+
+ strscpy(cap->driver, IPU6_ISYS_NAME, sizeof(cap->driver));
+ strscpy(cap->card, av->isys->media_dev.model, sizeof(cap->card));
+
+ return 0;
+}
+
+static int ipu6_isys_vidioc_enum_fmt(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ unsigned int i, num_found;
+
+ for (i = 0, num_found = 0; i < ARRAY_SIZE(ipu6_isys_pfmts); i++) {
+ if ((ipu6_isys_pfmts[i].is_meta &&
+ f->type != V4L2_BUF_TYPE_META_CAPTURE) ||
+ (!ipu6_isys_pfmts[i].is_meta &&
+ f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE))
+ continue;
+
+ if (f->mbus_code && f->mbus_code != ipu6_isys_pfmts[i].code)
+ continue;
+
+ if (num_found < f->index) {
+ num_found++;
+ continue;
+ }
+
+ f->flags = 0;
+ f->pixelformat = ipu6_isys_pfmts[i].pixelformat;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int ipu6_isys_vidioc_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ unsigned int i;
+
+ if (fsize->index > 0)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(ipu6_isys_pfmts); i++) {
+ if (fsize->pixel_format != ipu6_isys_pfmts[i].pixelformat)
+ continue;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = IPU6_ISYS_MIN_WIDTH;
+ fsize->stepwise.max_width = IPU6_ISYS_MAX_WIDTH;
+ fsize->stepwise.min_height = IPU6_ISYS_MIN_HEIGHT;
+ fsize->stepwise.max_height = IPU6_ISYS_MAX_HEIGHT;
+ fsize->stepwise.step_width = 2;
+ fsize->stepwise.step_height = 2;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int ipu6_isys_vidioc_g_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct ipu6_isys_video *av = video_drvdata(file);
+
+ f->fmt.pix = av->pix_fmt;
+
+ return 0;
+}
+
+static int ipu6_isys_vidioc_g_fmt_meta_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct ipu6_isys_video *av = video_drvdata(file);
+
+ f->fmt.meta = av->meta_fmt;
+
+ return 0;
+}
+
+static void ipu6_isys_try_fmt_cap(struct ipu6_isys_video *av, u32 type,
+ u32 *format, u32 *width, u32 *height,
+ u32 *bytesperline, u32 *sizeimage)
+{
+ const struct ipu6_isys_pixelformat *pfmt =
+ ipu6_isys_get_isys_format(*format, type);
+
+ *format = pfmt->pixelformat;
+ *width = clamp(*width, IPU6_ISYS_MIN_WIDTH, IPU6_ISYS_MAX_WIDTH);
+ *height = clamp(*height, IPU6_ISYS_MIN_HEIGHT, IPU6_ISYS_MAX_HEIGHT);
+
+ if (pfmt->bpp != pfmt->bpp_packed)
+ *bytesperline = *width * DIV_ROUND_UP(pfmt->bpp, BITS_PER_BYTE);
+ else
+ *bytesperline = DIV_ROUND_UP(*width * pfmt->bpp, BITS_PER_BYTE);
+
+ *bytesperline = ALIGN(*bytesperline, av->isys->line_align);
+
+ /*
+ * (height + 1) * bytesperline due to a hardware issue: the DMA unit
+ * is a power of two, and a line should be transferred as few units
+ * as possible. The result is that up to line length more data than
+ * the image size may be transferred to memory after the image.
+ * Another limitation is the GDA allocation unit size. For low
+ * resolution it gives a bigger number. Use larger one to avoid
+ * memory corruption.
+ */
+ *sizeimage = *bytesperline * *height +
+ max(*bytesperline,
+ av->isys->pdata->ipdata->isys_dma_overshoot);
+}
+
+static void __ipu6_isys_vidioc_try_fmt_vid_cap(struct ipu6_isys_video *av,
+ struct v4l2_format *f)
+{
+ ipu6_isys_try_fmt_cap(av, f->type, &f->fmt.pix.pixelformat,
+ &f->fmt.pix.width, &f->fmt.pix.height,
+ &f->fmt.pix.bytesperline, &f->fmt.pix.sizeimage);
+
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
+ f->fmt.pix.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ f->fmt.pix.quantization = V4L2_QUANTIZATION_DEFAULT;
+ f->fmt.pix.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+}
+
+static int ipu6_isys_vidioc_try_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct ipu6_isys_video *av = video_drvdata(file);
+
+ if (vb2_is_busy(&av->aq.vbq))
+ return -EBUSY;
+
+ __ipu6_isys_vidioc_try_fmt_vid_cap(av, f);
+
+ return 0;
+}
+
+static int __ipu6_isys_vidioc_try_fmt_meta_cap(struct ipu6_isys_video *av,
+ struct v4l2_format *f)
+{
+ ipu6_isys_try_fmt_cap(av, f->type, &f->fmt.meta.dataformat,
+ &f->fmt.meta.width, &f->fmt.meta.height,
+ &f->fmt.meta.bytesperline,
+ &f->fmt.meta.buffersize);
+
+ return 0;
+}
+
+static int ipu6_isys_vidioc_try_fmt_meta_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct ipu6_isys_video *av = video_drvdata(file);
+
+ __ipu6_isys_vidioc_try_fmt_meta_cap(av, f);
+
+ return 0;
+}
+
+static int ipu6_isys_vidioc_s_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct ipu6_isys_video *av = video_drvdata(file);
+
+ ipu6_isys_vidioc_try_fmt_vid_cap(file, fh, f);
+ av->pix_fmt = f->fmt.pix;
+
+ return 0;
+}
+
+static int ipu6_isys_vidioc_s_fmt_meta_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct ipu6_isys_video *av = video_drvdata(file);
+
+ if (vb2_is_busy(&av->aq.vbq))
+ return -EBUSY;
+
+ ipu6_isys_vidioc_try_fmt_meta_cap(file, fh, f);
+ av->meta_fmt = f->fmt.meta;
+
+ return 0;
+}
+
+static int ipu6_isys_vidioc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *p)
+{
+ struct ipu6_isys_video *av = video_drvdata(file);
+ int ret;
+
+ av->aq.vbq.is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(p->type);
+ av->aq.vbq.is_output = V4L2_TYPE_IS_OUTPUT(p->type);
+
+ ret = vb2_queue_change_type(&av->aq.vbq, p->type);
+ if (ret)
+ return ret;
+
+ return vb2_ioctl_reqbufs(file, priv, p);
+}
+
+static int ipu6_isys_vidioc_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *p)
+{
+ struct ipu6_isys_video *av = video_drvdata(file);
+ int ret;
+
+ av->aq.vbq.is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(p->format.type);
+ av->aq.vbq.is_output = V4L2_TYPE_IS_OUTPUT(p->format.type);
+
+ ret = vb2_queue_change_type(&av->aq.vbq, p->format.type);
+ if (ret)
+ return ret;
+
+ return vb2_ioctl_create_bufs(file, priv, p);
+}
+
+static int link_validate(struct media_link *link)
+{
+ struct ipu6_isys_video *av =
+ container_of(link->sink, struct ipu6_isys_video, pad);
+ struct device *dev = &av->isys->adev->auxdev.dev;
+ struct v4l2_subdev_state *s_state;
+ struct v4l2_subdev *s_sd;
+ struct v4l2_mbus_framefmt *s_fmt;
+ struct media_pad *s_pad;
+ u32 s_stream, code;
+ int ret = -EPIPE;
+
+ if (!link->source->entity)
+ return ret;
+
+ s_sd = media_entity_to_v4l2_subdev(link->source->entity);
+ s_state = v4l2_subdev_get_unlocked_active_state(s_sd);
+ if (!s_state)
+ return ret;
+
+ dev_dbg(dev, "validating link \"%s\":%u -> \"%s\"\n",
+ link->source->entity->name, link->source->index,
+ link->sink->entity->name);
+
+ s_pad = media_pad_remote_pad_first(&av->pad);
+ s_stream = ipu6_isys_get_src_stream_by_src_pad(s_sd, s_pad->index);
+
+ v4l2_subdev_lock_state(s_state);
+
+ s_fmt = v4l2_subdev_state_get_format(s_state, s_pad->index, s_stream);
+ if (!s_fmt) {
+ dev_err(dev, "failed to get source pad format\n");
+ goto unlock;
+ }
+
+ code = ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0)->code;
+
+ if (s_fmt->width != ipu6_isys_get_frame_width(av) ||
+ s_fmt->height != ipu6_isys_get_frame_height(av) ||
+ s_fmt->code != code) {
+ dev_dbg(dev, "format mismatch %dx%d,%x != %dx%d,%x\n",
+ s_fmt->width, s_fmt->height, s_fmt->code,
+ ipu6_isys_get_frame_width(av),
+ ipu6_isys_get_frame_height(av), code);
+ goto unlock;
+ }
+
+ v4l2_subdev_unlock_state(s_state);
+
+ return 0;
+unlock:
+ v4l2_subdev_unlock_state(s_state);
+
+ return ret;
+}
+
+static void get_stream_opened(struct ipu6_isys_video *av)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&av->isys->streams_lock, flags);
+ av->isys->stream_opened++;
+ spin_unlock_irqrestore(&av->isys->streams_lock, flags);
+}
+
+static void put_stream_opened(struct ipu6_isys_video *av)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&av->isys->streams_lock, flags);
+ av->isys->stream_opened--;
+ spin_unlock_irqrestore(&av->isys->streams_lock, flags);
+}
+
+static int ipu6_isys_fw_pin_cfg(struct ipu6_isys_video *av,
+ struct ipu6_fw_isys_stream_cfg_data_abi *cfg)
+{
+ struct media_pad *src_pad = media_pad_remote_pad_first(&av->pad);
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(src_pad->entity);
+ struct ipu6_fw_isys_input_pin_info_abi *input_pin;
+ struct ipu6_fw_isys_output_pin_info_abi *output_pin;
+ struct ipu6_isys_stream *stream = av->stream;
+ struct ipu6_isys_queue *aq = &av->aq;
+ struct v4l2_mbus_framefmt fmt;
+ const struct ipu6_isys_pixelformat *pfmt =
+ ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0);
+ struct v4l2_rect v4l2_crop;
+ struct ipu6_isys *isys = av->isys;
+ struct device *dev = &isys->adev->auxdev.dev;
+ int input_pins = cfg->nof_input_pins++;
+ int output_pins;
+ u32 src_stream;
+ int ret;
+
+ src_stream = ipu6_isys_get_src_stream_by_src_pad(sd, src_pad->index);
+ ret = ipu6_isys_get_stream_pad_fmt(sd, src_pad->index, src_stream,
+ &fmt);
+ if (ret < 0) {
+ dev_err(dev, "can't get stream format (%d)\n", ret);
+ return ret;
+ }
+
+ ret = ipu6_isys_get_stream_pad_crop(sd, src_pad->index, src_stream,
+ &v4l2_crop);
+ if (ret < 0) {
+ dev_err(dev, "can't get stream crop (%d)\n", ret);
+ return ret;
+ }
+
+ input_pin = &cfg->input_pins[input_pins];
+ input_pin->input_res.width = fmt.width;
+ input_pin->input_res.height = fmt.height;
+ input_pin->dt = av->dt;
+ input_pin->bits_per_pix = pfmt->bpp_packed;
+ input_pin->mapped_dt = 0x40; /* invalid mipi data type */
+ input_pin->mipi_decompression = 0;
+ input_pin->capture_mode = IPU6_FW_ISYS_CAPTURE_MODE_REGULAR;
+ input_pin->mipi_store_mode = pfmt->bpp == pfmt->bpp_packed ?
+ IPU6_FW_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER :
+ IPU6_FW_ISYS_MIPI_STORE_MODE_NORMAL;
+ input_pin->crop_first_and_last_lines = v4l2_crop.top & 1;
+
+ output_pins = cfg->nof_output_pins++;
+ aq->fw_output = output_pins;
+ stream->output_pins[output_pins].pin_ready = ipu6_isys_queue_buf_ready;
+ stream->output_pins[output_pins].aq = aq;
+
+ output_pin = &cfg->output_pins[output_pins];
+ output_pin->input_pin_id = input_pins;
+ output_pin->output_res.width = ipu6_isys_get_frame_width(av);
+ output_pin->output_res.height = ipu6_isys_get_frame_height(av);
+
+ output_pin->stride = ipu6_isys_get_bytes_per_line(av);
+ if (pfmt->bpp != pfmt->bpp_packed)
+ output_pin->pt = IPU6_FW_ISYS_PIN_TYPE_RAW_SOC;
+ else
+ output_pin->pt = IPU6_FW_ISYS_PIN_TYPE_MIPI;
+ output_pin->ft = pfmt->css_pixelformat;
+ output_pin->send_irq = 1;
+ memset(output_pin->ts_offsets, 0, sizeof(output_pin->ts_offsets));
+ output_pin->s2m_pixel_soc_pixel_remapping =
+ S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING;
+ output_pin->csi_be_soc_pixel_remapping =
+ CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING;
+
+ output_pin->snoopable = true;
+ output_pin->error_handling_enable = false;
+ output_pin->sensor_type = isys->sensor_type++;
+ if (isys->sensor_type > isys->pdata->ipdata->sensor_type_end)
+ isys->sensor_type = isys->pdata->ipdata->sensor_type_start;
+
+ return 0;
+}
+
+static int start_stream_firmware(struct ipu6_isys_video *av,
+ struct ipu6_isys_buffer_list *bl)
+{
+ struct ipu6_fw_isys_stream_cfg_data_abi *stream_cfg;
+ struct ipu6_fw_isys_frame_buff_set_abi *buf = NULL;
+ struct ipu6_isys_stream *stream = av->stream;
+ struct device *dev = &av->isys->adev->auxdev.dev;
+ struct isys_fw_msgs *msg = NULL;
+ struct ipu6_isys_queue *aq;
+ int ret, retout, tout;
+ u16 send_type;
+
+ msg = ipu6_get_fw_msg_buf(stream);
+ if (!msg)
+ return -ENOMEM;
+
+ stream_cfg = &msg->fw_msg.stream;
+ stream_cfg->src = stream->stream_source;
+ stream_cfg->vc = stream->vc;
+ stream_cfg->isl_use = 0;
+ stream_cfg->sensor_type = IPU6_FW_ISYS_SENSOR_MODE_NORMAL;
+
+ list_for_each_entry(aq, &stream->queues, node) {
+ struct ipu6_isys_video *__av = ipu6_isys_queue_to_video(aq);
+
+ ret = ipu6_isys_fw_pin_cfg(__av, stream_cfg);
+ if (ret < 0) {
+ ipu6_put_fw_msg_buf(av->isys, (u64)stream_cfg);
+ return ret;
+ }
+ }
+
+ ipu6_fw_isys_dump_stream_cfg(dev, stream_cfg);
+
+ stream->nr_output_pins = stream_cfg->nof_output_pins;
+
+ reinit_completion(&stream->stream_open_completion);
+
+ ret = ipu6_fw_isys_complex_cmd(av->isys, stream->stream_handle,
+ stream_cfg, msg->dma_addr,
+ sizeof(*stream_cfg),
+ IPU6_FW_ISYS_SEND_TYPE_STREAM_OPEN);
+ if (ret < 0) {
+ dev_err(dev, "can't open stream (%d)\n", ret);
+ ipu6_put_fw_msg_buf(av->isys, (u64)stream_cfg);
+ return ret;
+ }
+
+ get_stream_opened(av);
+
+ tout = wait_for_completion_timeout(&stream->stream_open_completion,
+ IPU6_FW_CALL_TIMEOUT_JIFFIES);
+
+ ipu6_put_fw_msg_buf(av->isys, (u64)stream_cfg);
+
+ if (!tout) {
+ dev_err(dev, "stream open time out\n");
+ ret = -ETIMEDOUT;
+ goto out_put_stream_opened;
+ }
+ if (stream->error) {
+ dev_err(dev, "stream open error: %d\n", stream->error);
+ ret = -EIO;
+ goto out_put_stream_opened;
+ }
+ dev_dbg(dev, "start stream: open complete\n");
+
+ if (bl) {
+ msg = ipu6_get_fw_msg_buf(stream);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto out_put_stream_opened;
+ }
+ buf = &msg->fw_msg.frame;
+ ipu6_isys_buf_to_fw_frame_buf(buf, stream, bl);
+ ipu6_isys_buffer_list_queue(bl,
+ IPU6_ISYS_BUFFER_LIST_FL_ACTIVE, 0);
+ }
+
+ reinit_completion(&stream->stream_start_completion);
+
+ if (bl) {
+ send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE;
+ ipu6_fw_isys_dump_frame_buff_set(dev, buf,
+ stream_cfg->nof_output_pins);
+ ret = ipu6_fw_isys_complex_cmd(av->isys, stream->stream_handle,
+ buf, msg->dma_addr,
+ sizeof(*buf), send_type);
+ } else {
+ send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_START;
+ ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle,
+ send_type);
+ }
+
+ if (ret < 0) {
+ dev_err(dev, "can't start streaming (%d)\n", ret);
+ goto out_stream_close;
+ }
+
+ tout = wait_for_completion_timeout(&stream->stream_start_completion,
+ IPU6_FW_CALL_TIMEOUT_JIFFIES);
+ if (!tout) {
+ dev_err(dev, "stream start time out\n");
+ ret = -ETIMEDOUT;
+ goto out_stream_close;
+ }
+ if (stream->error) {
+ dev_err(dev, "stream start error: %d\n", stream->error);
+ ret = -EIO;
+ goto out_stream_close;
+ }
+ dev_dbg(dev, "start stream: complete\n");
+
+ return 0;
+
+out_stream_close:
+ reinit_completion(&stream->stream_close_completion);
+
+ retout = ipu6_fw_isys_simple_cmd(av->isys,
+ stream->stream_handle,
+ IPU6_FW_ISYS_SEND_TYPE_STREAM_CLOSE);
+ if (retout < 0) {
+ dev_dbg(dev, "can't close stream (%d)\n", retout);
+ goto out_put_stream_opened;
+ }
+
+ tout = wait_for_completion_timeout(&stream->stream_close_completion,
+ IPU6_FW_CALL_TIMEOUT_JIFFIES);
+ if (!tout)
+ dev_err(dev, "stream close time out\n");
+ else if (stream->error)
+ dev_err(dev, "stream close error: %d\n", stream->error);
+ else
+ dev_dbg(dev, "stream close complete\n");
+
+out_put_stream_opened:
+ put_stream_opened(av);
+
+ return ret;
+}
+
+static void stop_streaming_firmware(struct ipu6_isys_video *av)
+{
+ struct device *dev = &av->isys->adev->auxdev.dev;
+ struct ipu6_isys_stream *stream = av->stream;
+ int ret, tout;
+
+ reinit_completion(&stream->stream_stop_completion);
+
+ ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle,
+ IPU6_FW_ISYS_SEND_TYPE_STREAM_FLUSH);
+
+ if (ret < 0) {
+ dev_err(dev, "can't stop stream (%d)\n", ret);
+ return;
+ }
+
+ tout = wait_for_completion_timeout(&stream->stream_stop_completion,
+ IPU6_FW_CALL_TIMEOUT_JIFFIES);
+ if (!tout)
+ dev_warn(dev, "stream stop time out\n");
+ else if (stream->error)
+ dev_warn(dev, "stream stop error: %d\n", stream->error);
+ else
+ dev_dbg(dev, "stop stream: complete\n");
+}
+
+static void close_streaming_firmware(struct ipu6_isys_video *av)
+{
+ struct ipu6_isys_stream *stream = av->stream;
+ struct device *dev = &av->isys->adev->auxdev.dev;
+ int ret, tout;
+
+ reinit_completion(&stream->stream_close_completion);
+
+ ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle,
+ IPU6_FW_ISYS_SEND_TYPE_STREAM_CLOSE);
+ if (ret < 0) {
+ dev_err(dev, "can't close stream (%d)\n", ret);
+ return;
+ }
+
+ tout = wait_for_completion_timeout(&stream->stream_close_completion,
+ IPU6_FW_CALL_TIMEOUT_JIFFIES);
+ if (!tout)
+ dev_warn(dev, "stream close time out\n");
+ else if (stream->error)
+ dev_warn(dev, "stream close error: %d\n", stream->error);
+ else
+ dev_dbg(dev, "close stream: complete\n");
+
+ put_stream_opened(av);
+}
+
+int ipu6_isys_video_prepare_stream(struct ipu6_isys_video *av,
+ struct media_entity *source_entity,
+ int nr_queues)
+{
+ struct ipu6_isys_stream *stream = av->stream;
+ struct ipu6_isys_csi2 *csi2;
+
+ if (WARN_ON(stream->nr_streaming))
+ return -EINVAL;
+
+ stream->nr_queues = nr_queues;
+ atomic_set(&stream->sequence, 0);
+
+ stream->seq_index = 0;
+ memset(stream->seq, 0, sizeof(stream->seq));
+
+ if (WARN_ON(!list_empty(&stream->queues)))
+ return -EINVAL;
+
+ stream->stream_source = stream->asd->source;
+ csi2 = ipu6_isys_subdev_to_csi2(stream->asd);
+ csi2->receiver_errors = 0;
+ stream->source_entity = source_entity;
+
+ dev_dbg(&av->isys->adev->auxdev.dev,
+ "prepare stream: external entity %s\n",
+ stream->source_entity->name);
+
+ return 0;
+}
+
+void ipu6_isys_configure_stream_watermark(struct ipu6_isys_video *av,
+ bool state)
+{
+ struct ipu6_isys *isys = av->isys;
+ struct ipu6_isys_csi2 *csi2 = NULL;
+ struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark;
+ struct device *dev = &isys->adev->auxdev.dev;
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_subdev *esd;
+ struct v4l2_control hb = { .id = V4L2_CID_HBLANK, .value = 0 };
+ unsigned int bpp, lanes;
+ s64 link_freq = 0;
+ u64 pixel_rate = 0;
+ int ret;
+
+ if (!state)
+ return;
+
+ esd = media_entity_to_v4l2_subdev(av->stream->source_entity);
+
+ av->watermark.width = ipu6_isys_get_frame_width(av);
+ av->watermark.height = ipu6_isys_get_frame_height(av);
+ av->watermark.sram_gran_shift = isys->pdata->ipdata->sram_gran_shift;
+ av->watermark.sram_gran_size = isys->pdata->ipdata->sram_gran_size;
+
+ ret = v4l2_g_ctrl(esd->ctrl_handler, &hb);
+ if (!ret && hb.value >= 0)
+ av->watermark.hblank = hb.value;
+ else
+ av->watermark.hblank = 0;
+
+ csi2 = ipu6_isys_subdev_to_csi2(av->stream->asd);
+ link_freq = ipu6_isys_csi2_get_link_freq(csi2);
+ if (link_freq > 0) {
+ lanes = csi2->nlanes;
+ ret = ipu6_isys_get_stream_pad_fmt(&csi2->asd.sd, 0,
+ av->source_stream, &format);
+ if (!ret) {
+ bpp = ipu6_isys_mbus_code_to_bpp(format.code);
+ pixel_rate = mul_u64_u32_div(link_freq, lanes * 2, bpp);
+ }
+ }
+
+ av->watermark.pixel_rate = pixel_rate;
+
+ if (!pixel_rate) {
+ mutex_lock(&iwake_watermark->mutex);
+ iwake_watermark->force_iwake_disable = true;
+ mutex_unlock(&iwake_watermark->mutex);
+ dev_warn(dev, "unexpected pixel_rate from %s, disable iwake.\n",
+ av->stream->source_entity->name);
+ }
+}
+
+static void calculate_stream_datarate(struct ipu6_isys_video *av)
+{
+ struct video_stream_watermark *watermark = &av->watermark;
+ const struct ipu6_isys_pixelformat *pfmt =
+ ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0);
+ u32 pages_per_line, pb_bytes_per_line, pixels_per_line, bytes_per_line;
+ u64 line_time_ns, stream_data_rate;
+ u16 shift, size;
+
+ shift = watermark->sram_gran_shift;
+ size = watermark->sram_gran_size;
+
+ pixels_per_line = watermark->width + watermark->hblank;
+ line_time_ns = div_u64(pixels_per_line * NSEC_PER_SEC,
+ watermark->pixel_rate);
+ bytes_per_line = watermark->width * pfmt->bpp / 8;
+ pages_per_line = DIV_ROUND_UP(bytes_per_line, size);
+ pb_bytes_per_line = pages_per_line << shift;
+ stream_data_rate = div64_u64(pb_bytes_per_line * 1000, line_time_ns);
+
+ watermark->stream_data_rate = stream_data_rate;
+}
+
+void ipu6_isys_update_stream_watermark(struct ipu6_isys_video *av, bool state)
+{
+ struct isys_iwake_watermark *iwake_watermark =
+ &av->isys->iwake_watermark;
+
+ if (!av->watermark.pixel_rate)
+ return;
+
+ if (state) {
+ calculate_stream_datarate(av);
+ mutex_lock(&iwake_watermark->mutex);
+ list_add(&av->watermark.stream_node,
+ &iwake_watermark->video_list);
+ mutex_unlock(&iwake_watermark->mutex);
+ } else {
+ av->watermark.stream_data_rate = 0;
+ mutex_lock(&iwake_watermark->mutex);
+ list_del(&av->watermark.stream_node);
+ mutex_unlock(&iwake_watermark->mutex);
+ }
+
+ update_watermark_setting(av->isys);
+}
+
+void ipu6_isys_put_stream(struct ipu6_isys_stream *stream)
+{
+ struct device *dev;
+ unsigned int i;
+ unsigned long flags;
+
+ if (!stream) {
+ pr_err("ipu6-isys: no available stream\n");
+ return;
+ }
+
+ dev = &stream->isys->adev->auxdev.dev;
+
+ spin_lock_irqsave(&stream->isys->streams_lock, flags);
+ for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) {
+ if (&stream->isys->streams[i] == stream) {
+ if (stream->isys->streams_ref_count[i] > 0)
+ stream->isys->streams_ref_count[i]--;
+ else
+ dev_warn(dev, "invalid stream %d\n", i);
+
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&stream->isys->streams_lock, flags);
+}
+
+static struct ipu6_isys_stream *
+ipu6_isys_get_stream(struct ipu6_isys_video *av, struct ipu6_isys_subdev *asd)
+{
+ struct ipu6_isys_stream *stream = NULL;
+ struct ipu6_isys *isys = av->isys;
+ unsigned long flags;
+ unsigned int i;
+ u8 vc = av->vc;
+
+ if (!isys)
+ return NULL;
+
+ spin_lock_irqsave(&isys->streams_lock, flags);
+ for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) {
+ if (isys->streams_ref_count[i] && isys->streams[i].vc == vc &&
+ isys->streams[i].asd == asd) {
+ isys->streams_ref_count[i]++;
+ stream = &isys->streams[i];
+ break;
+ }
+ }
+
+ if (!stream) {
+ for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) {
+ if (!isys->streams_ref_count[i]) {
+ isys->streams_ref_count[i]++;
+ stream = &isys->streams[i];
+ stream->vc = vc;
+ stream->asd = asd;
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&isys->streams_lock, flags);
+
+ return stream;
+}
+
+struct ipu6_isys_stream *
+ipu6_isys_query_stream_by_handle(struct ipu6_isys *isys, u8 stream_handle)
+{
+ unsigned long flags;
+ struct ipu6_isys_stream *stream = NULL;
+
+ if (!isys)
+ return NULL;
+
+ if (stream_handle >= IPU6_ISYS_MAX_STREAMS) {
+ dev_err(&isys->adev->auxdev.dev,
+ "stream_handle %d is invalid\n", stream_handle);
+ return NULL;
+ }
+
+ spin_lock_irqsave(&isys->streams_lock, flags);
+ if (isys->streams_ref_count[stream_handle] > 0) {
+ isys->streams_ref_count[stream_handle]++;
+ stream = &isys->streams[stream_handle];
+ }
+ spin_unlock_irqrestore(&isys->streams_lock, flags);
+
+ return stream;
+}
+
+struct ipu6_isys_stream *
+ipu6_isys_query_stream_by_source(struct ipu6_isys *isys, int source, u8 vc)
+{
+ struct ipu6_isys_stream *stream = NULL;
+ unsigned long flags;
+ unsigned int i;
+
+ if (!isys)
+ return NULL;
+
+ if (source < 0) {
+ dev_err(&stream->isys->adev->auxdev.dev,
+ "query stream with invalid port number\n");
+ return NULL;
+ }
+
+ spin_lock_irqsave(&isys->streams_lock, flags);
+ for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) {
+ if (!isys->streams_ref_count[i])
+ continue;
+
+ if (isys->streams[i].stream_source == source &&
+ isys->streams[i].vc == vc) {
+ stream = &isys->streams[i];
+ isys->streams_ref_count[i]++;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&isys->streams_lock, flags);
+
+ return stream;
+}
+
+static u64 get_stream_mask_by_pipeline(struct ipu6_isys_video *__av)
+{
+ struct media_pipeline *pipeline =
+ media_entity_pipeline(&__av->vdev.entity);
+ unsigned int i;
+ u64 stream_mask = 0;
+
+ for (i = 0; i < NR_OF_CSI2_SRC_PADS; i++) {
+ struct ipu6_isys_video *av = &__av->csi2->av[i];
+
+ if (pipeline == media_entity_pipeline(&av->vdev.entity))
+ stream_mask |= BIT_ULL(av->source_stream);
+ }
+
+ return stream_mask;
+}
+
+int ipu6_isys_video_set_streaming(struct ipu6_isys_video *av, int state,
+ struct ipu6_isys_buffer_list *bl)
+{
+ struct v4l2_subdev_krouting *routing;
+ struct ipu6_isys_stream *stream = av->stream;
+ struct v4l2_subdev_state *subdev_state;
+ struct device *dev = &av->isys->adev->auxdev.dev;
+ struct v4l2_subdev *sd;
+ struct v4l2_subdev *ssd;
+ struct media_pad *r_pad;
+ struct media_pad *s_pad;
+ u32 sink_pad, sink_stream;
+ u64 r_stream;
+ u64 stream_mask = 0;
+ int ret = 0;
+
+ dev_dbg(dev, "set stream: %d\n", state);
+
+ if (WARN(!stream->source_entity, "No source entity for stream\n"))
+ return -ENODEV;
+
+ ssd = media_entity_to_v4l2_subdev(stream->source_entity);
+ sd = &stream->asd->sd;
+ r_pad = media_pad_remote_pad_first(&av->pad);
+ r_stream = ipu6_isys_get_src_stream_by_src_pad(sd, r_pad->index);
+
+ subdev_state = v4l2_subdev_lock_and_get_active_state(sd);
+ routing = &subdev_state->routing;
+ ret = v4l2_subdev_routing_find_opposite_end(routing, r_pad->index,
+ r_stream, &sink_pad,
+ &sink_stream);
+ v4l2_subdev_unlock_state(subdev_state);
+ if (ret)
+ return ret;
+
+ s_pad = media_pad_remote_pad_first(&stream->asd->pad[sink_pad]);
+
+ stream_mask = get_stream_mask_by_pipeline(av);
+ if (!state) {
+ stop_streaming_firmware(av);
+
+ /* stop external sub-device now. */
+ dev_dbg(dev, "disable streams 0x%llx of %s\n", stream_mask,
+ ssd->name);
+ ret = v4l2_subdev_disable_streams(ssd, s_pad->index,
+ stream_mask);
+ if (ret) {
+ dev_err(dev, "disable streams of %s failed with %d\n",
+ ssd->name, ret);
+ return ret;
+ }
+
+ /* stop sub-device which connects with video */
+ dev_dbg(dev, "stream off entity %s pad:%d\n", sd->name,
+ r_pad->index);
+ ret = v4l2_subdev_call(sd, video, s_stream, state);
+ if (ret) {
+ dev_err(dev, "stream off %s failed with %d\n", sd->name,
+ ret);
+ return ret;
+ }
+ close_streaming_firmware(av);
+ } else {
+ ret = start_stream_firmware(av, bl);
+ if (ret) {
+ dev_err(dev, "start stream of firmware failed\n");
+ return ret;
+ }
+
+ /* start sub-device which connects with video */
+ dev_dbg(dev, "stream on %s pad %d\n", sd->name, r_pad->index);
+ ret = v4l2_subdev_call(sd, video, s_stream, state);
+ if (ret) {
+ dev_err(dev, "stream on %s failed with %d\n", sd->name,
+ ret);
+ goto out_media_entity_stop_streaming_firmware;
+ }
+
+ /* start external sub-device now. */
+ dev_dbg(dev, "enable streams 0x%llx of %s\n", stream_mask,
+ ssd->name);
+ ret = v4l2_subdev_enable_streams(ssd, s_pad->index,
+ stream_mask);
+ if (ret) {
+ dev_err(dev,
+ "enable streams 0x%llx of %s failed with %d\n",
+ stream_mask, stream->source_entity->name, ret);
+ goto out_media_entity_stop_streaming;
+ }
+ }
+
+ av->streaming = state;
+
+ return 0;
+
+out_media_entity_stop_streaming:
+ v4l2_subdev_disable_streams(sd, r_pad->index, BIT(r_stream));
+
+out_media_entity_stop_streaming_firmware:
+ stop_streaming_firmware(av);
+
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops ipu6_v4l2_ioctl_ops = {
+ .vidioc_querycap = ipu6_isys_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = ipu6_isys_vidioc_enum_fmt,
+ .vidioc_enum_fmt_meta_cap = ipu6_isys_vidioc_enum_fmt,
+ .vidioc_enum_framesizes = ipu6_isys_vidioc_enum_framesizes,
+ .vidioc_g_fmt_vid_cap = ipu6_isys_vidioc_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = ipu6_isys_vidioc_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = ipu6_isys_vidioc_try_fmt_vid_cap,
+ .vidioc_g_fmt_meta_cap = ipu6_isys_vidioc_g_fmt_meta_cap,
+ .vidioc_s_fmt_meta_cap = ipu6_isys_vidioc_s_fmt_meta_cap,
+ .vidioc_try_fmt_meta_cap = ipu6_isys_vidioc_try_fmt_meta_cap,
+ .vidioc_reqbufs = ipu6_isys_vidioc_reqbufs,
+ .vidioc_create_bufs = ipu6_isys_vidioc_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+};
+
+static const struct media_entity_operations entity_ops = {
+ .link_validate = link_validate,
+};
+
+static const struct v4l2_file_operations isys_fops = {
+ .owner = THIS_MODULE,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+ .open = video_open,
+ .release = vb2_fop_release,
+};
+
+int ipu6_isys_fw_open(struct ipu6_isys *isys)
+{
+ struct ipu6_bus_device *adev = isys->adev;
+ const struct ipu6_isys_internal_pdata *ipdata = isys->pdata->ipdata;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(&adev->auxdev.dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&isys->mutex);
+
+ if (isys->ref_count++)
+ goto unlock;
+
+ ipu6_configure_spc(adev->isp, &ipdata->hw_variant,
+ IPU6_CPD_PKG_DIR_ISYS_SERVER_IDX, isys->pdata->base,
+ adev->pkg_dir, adev->pkg_dir_dma_addr);
+
+ /*
+ * Buffers could have been left to wrong queue at last closure.
+ * Move them now back to empty buffer queue.
+ */
+ ipu6_cleanup_fw_msg_bufs(isys);
+
+ if (isys->fwcom) {
+ /*
+ * Something went wrong in previous shutdown. As we are now
+ * restarting isys we can safely delete old context.
+ */
+ dev_warn(&adev->auxdev.dev, "clearing old context\n");
+ ipu6_fw_isys_cleanup(isys);
+ }
+
+ ret = ipu6_fw_isys_init(isys, ipdata->num_parallel_streams);
+ if (ret < 0)
+ goto out;
+
+unlock:
+ mutex_unlock(&isys->mutex);
+
+ return 0;
+
+out:
+ isys->ref_count--;
+ mutex_unlock(&isys->mutex);
+ pm_runtime_put(&adev->auxdev.dev);
+
+ return ret;
+}
+
+void ipu6_isys_fw_close(struct ipu6_isys *isys)
+{
+ mutex_lock(&isys->mutex);
+
+ isys->ref_count--;
+ if (!isys->ref_count) {
+ ipu6_fw_isys_close(isys);
+ if (isys->fwcom) {
+ isys->need_reset = true;
+ dev_warn(&isys->adev->auxdev.dev,
+ "failed to close fw isys\n");
+ }
+ }
+
+ mutex_unlock(&isys->mutex);
+
+ if (isys->need_reset)
+ pm_runtime_put_sync(&isys->adev->auxdev.dev);
+ else
+ pm_runtime_put(&isys->adev->auxdev.dev);
+}
+
+int ipu6_isys_setup_video(struct ipu6_isys_video *av,
+ struct media_entity **source_entity, int *nr_queues)
+{
+ const struct ipu6_isys_pixelformat *pfmt =
+ ipu6_isys_get_isys_format(ipu6_isys_get_format(av), 0);
+ struct device *dev = &av->isys->adev->auxdev.dev;
+ struct v4l2_mbus_frame_desc_entry entry;
+ struct v4l2_subdev_route *route = NULL;
+ struct v4l2_subdev_route *r;
+ struct v4l2_subdev_state *state;
+ struct ipu6_isys_subdev *asd;
+ struct v4l2_subdev *remote_sd;
+ struct media_pipeline *pipeline;
+ struct media_pad *source_pad, *remote_pad;
+ int ret = -EINVAL;
+
+ *nr_queues = 0;
+
+ remote_pad = media_pad_remote_pad_unique(&av->pad);
+ if (IS_ERR(remote_pad)) {
+ dev_dbg(dev, "failed to get remote pad\n");
+ return PTR_ERR(remote_pad);
+ }
+
+ remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity);
+ asd = to_ipu6_isys_subdev(remote_sd);
+ source_pad = media_pad_remote_pad_first(&remote_pad->entity->pads[0]);
+ if (!source_pad) {
+ dev_dbg(dev, "No external source entity\n");
+ return -ENODEV;
+ }
+
+ *source_entity = source_pad->entity;
+
+ /* Find the root */
+ state = v4l2_subdev_lock_and_get_active_state(remote_sd);
+ for_each_active_route(&state->routing, r) {
+ (*nr_queues)++;
+
+ if (r->source_pad == remote_pad->index)
+ route = r;
+ }
+
+ if (!route) {
+ v4l2_subdev_unlock_state(state);
+ dev_dbg(dev, "Failed to find route\n");
+ return -ENODEV;
+ }
+ av->source_stream = route->sink_stream;
+ v4l2_subdev_unlock_state(state);
+
+ ret = ipu6_isys_csi2_get_remote_desc(av->source_stream,
+ to_ipu6_isys_csi2(asd),
+ *source_entity, &entry);
+ if (ret == -ENOIOCTLCMD) {
+ av->vc = 0;
+ av->dt = ipu6_isys_mbus_code_to_mipi(pfmt->code);
+ } else if (!ret) {
+ dev_dbg(dev, "Framedesc: stream %u, len %u, vc %u, dt %#x\n",
+ entry.stream, entry.length, entry.bus.csi2.vc,
+ entry.bus.csi2.dt);
+
+ av->vc = entry.bus.csi2.vc;
+ av->dt = entry.bus.csi2.dt;
+ } else {
+ dev_err(dev, "failed to get remote frame desc\n");
+ return ret;
+ }
+
+ pipeline = media_entity_pipeline(&av->vdev.entity);
+ if (!pipeline)
+ ret = video_device_pipeline_alloc_start(&av->vdev);
+ else
+ ret = video_device_pipeline_start(&av->vdev, pipeline);
+ if (ret < 0) {
+ dev_dbg(dev, "media pipeline start failed\n");
+ return ret;
+ }
+
+ av->stream = ipu6_isys_get_stream(av, asd);
+ if (!av->stream) {
+ video_device_pipeline_stop(&av->vdev);
+ dev_err(dev, "no available stream for firmware\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Do everything that's needed to initialise things related to video
+ * buffer queue, video node, and the related media entity. The caller
+ * is expected to assign isys field and set the name of the video
+ * device.
+ */
+int ipu6_isys_video_init(struct ipu6_isys_video *av)
+{
+ struct v4l2_format format = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .fmt.pix = {
+ .width = 1920,
+ .height = 1080,
+ },
+ };
+ struct v4l2_format format_meta = {
+ .type = V4L2_BUF_TYPE_META_CAPTURE,
+ .fmt.meta = {
+ .width = 1920,
+ .height = 4,
+ },
+ };
+ int ret;
+
+ mutex_init(&av->mutex);
+ av->vdev.device_caps = V4L2_CAP_STREAMING | V4L2_CAP_IO_MC |
+ V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE;
+ av->vdev.vfl_dir = VFL_DIR_RX;
+
+ ret = ipu6_isys_queue_init(&av->aq);
+ if (ret)
+ goto out_free_watermark;
+
+ av->pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
+ ret = media_entity_pads_init(&av->vdev.entity, 1, &av->pad);
+ if (ret)
+ goto out_vb2_queue_release;
+
+ av->vdev.entity.ops = &entity_ops;
+ av->vdev.release = video_device_release_empty;
+ av->vdev.fops = &isys_fops;
+ av->vdev.v4l2_dev = &av->isys->v4l2_dev;
+ if (!av->vdev.ioctl_ops)
+ av->vdev.ioctl_ops = &ipu6_v4l2_ioctl_ops;
+ av->vdev.queue = &av->aq.vbq;
+ av->vdev.lock = &av->mutex;
+
+ __ipu6_isys_vidioc_try_fmt_vid_cap(av, &format);
+ av->pix_fmt = format.fmt.pix;
+ __ipu6_isys_vidioc_try_fmt_meta_cap(av, &format_meta);
+ av->meta_fmt = format_meta.fmt.meta;
+
+ set_bit(V4L2_FL_USES_V4L2_FH, &av->vdev.flags);
+ video_set_drvdata(&av->vdev, av);
+
+ ret = video_register_device(&av->vdev, VFL_TYPE_VIDEO, -1);
+ if (ret)
+ goto out_media_entity_cleanup;
+
+ return ret;
+
+out_media_entity_cleanup:
+ vb2_video_unregister_device(&av->vdev);
+ media_entity_cleanup(&av->vdev.entity);
+
+out_vb2_queue_release:
+ vb2_queue_release(&av->aq.vbq);
+
+out_free_watermark:
+ mutex_destroy(&av->mutex);
+
+ return ret;
+}
+
+void ipu6_isys_video_cleanup(struct ipu6_isys_video *av)
+{
+ vb2_video_unregister_device(&av->vdev);
+ media_entity_cleanup(&av->vdev.entity);
+ mutex_destroy(&av->mutex);
+}
+
+u32 ipu6_isys_get_format(struct ipu6_isys_video *av)
+{
+ if (av->aq.vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return av->pix_fmt.pixelformat;
+
+ if (av->aq.vbq.type == V4L2_BUF_TYPE_META_CAPTURE)
+ return av->meta_fmt.dataformat;
+
+ return 0;
+}
+
+u32 ipu6_isys_get_data_size(struct ipu6_isys_video *av)
+{
+ if (av->aq.vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return av->pix_fmt.sizeimage;
+
+ if (av->aq.vbq.type == V4L2_BUF_TYPE_META_CAPTURE)
+ return av->meta_fmt.buffersize;
+
+ return 0;
+}
+
+u32 ipu6_isys_get_bytes_per_line(struct ipu6_isys_video *av)
+{
+ if (av->aq.vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return av->pix_fmt.bytesperline;
+
+ if (av->aq.vbq.type == V4L2_BUF_TYPE_META_CAPTURE)
+ return av->meta_fmt.bytesperline;
+
+ return 0;
+}
+
+u32 ipu6_isys_get_frame_width(struct ipu6_isys_video *av)
+{
+ if (av->aq.vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return av->pix_fmt.width;
+
+ if (av->aq.vbq.type == V4L2_BUF_TYPE_META_CAPTURE)
+ return av->meta_fmt.width;
+
+ return 0;
+}
+
+u32 ipu6_isys_get_frame_height(struct ipu6_isys_video *av)
+{
+ if (av->aq.vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return av->pix_fmt.height;
+
+ if (av->aq.vbq.type == V4L2_BUF_TYPE_META_CAPTURE)
+ return av->meta_fmt.height;
+
+ return 0;
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-video.h b/drivers/media/pci/intel/ipu6/ipu6-isys-video.h
new file mode 100644
index 000000000000..1d945be2b879
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-video.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013--2024 Intel Corporation */
+
+#ifndef IPU6_ISYS_VIDEO_H
+#define IPU6_ISYS_VIDEO_H
+
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/container_of.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-dev.h>
+
+#include "ipu6-isys-queue.h"
+
+#define IPU6_ISYS_OUTPUT_PINS 11
+#define IPU6_ISYS_MAX_PARALLEL_SOF 2
+
+struct file;
+struct ipu6_isys;
+struct ipu6_isys_csi2;
+struct ipu6_isys_subdev;
+
+struct ipu6_isys_pixelformat {
+ u32 pixelformat;
+ u32 bpp;
+ u32 bpp_packed;
+ u32 code;
+ u32 css_pixelformat;
+ bool is_meta;
+};
+
+struct sequence_info {
+ unsigned int sequence;
+ u64 timestamp;
+};
+
+struct output_pin_data {
+ void (*pin_ready)(struct ipu6_isys_stream *stream,
+ struct ipu6_fw_isys_resp_info_abi *info);
+ struct ipu6_isys_queue *aq;
+};
+
+/*
+ * Align with firmware stream. Each stream represents a CSI virtual channel.
+ * May map to multiple video devices
+ */
+struct ipu6_isys_stream {
+ struct mutex mutex;
+ struct media_entity *source_entity;
+ atomic_t sequence;
+ unsigned int seq_index;
+ struct sequence_info seq[IPU6_ISYS_MAX_PARALLEL_SOF];
+ int stream_source;
+ int stream_handle;
+ unsigned int nr_output_pins;
+ struct ipu6_isys_subdev *asd;
+
+ int nr_queues; /* Number of capture queues */
+ int nr_streaming;
+ int streaming; /* Has streaming been really started? */
+ struct list_head queues;
+ struct completion stream_open_completion;
+ struct completion stream_close_completion;
+ struct completion stream_start_completion;
+ struct completion stream_stop_completion;
+ struct ipu6_isys *isys;
+
+ struct output_pin_data output_pins[IPU6_ISYS_OUTPUT_PINS];
+ int error;
+ u8 vc;
+};
+
+struct video_stream_watermark {
+ u32 width;
+ u32 height;
+ u32 hblank;
+ u32 frame_rate;
+ u64 pixel_rate;
+ u64 stream_data_rate;
+ u16 sram_gran_shift;
+ u16 sram_gran_size;
+ struct list_head stream_node;
+};
+
+struct ipu6_isys_video {
+ struct ipu6_isys_queue aq;
+ /* Serialise access to other fields in the struct. */
+ struct mutex mutex;
+ struct media_pad pad;
+ struct video_device vdev;
+ struct v4l2_pix_format pix_fmt;
+ struct v4l2_meta_format meta_fmt;
+ struct ipu6_isys *isys;
+ struct ipu6_isys_csi2 *csi2;
+ struct ipu6_isys_stream *stream;
+ unsigned int streaming;
+ struct video_stream_watermark watermark;
+ u32 source_stream;
+ u8 vc;
+ u8 dt;
+};
+
+#define ipu6_isys_queue_to_video(__aq) \
+ container_of(__aq, struct ipu6_isys_video, aq)
+
+extern const struct ipu6_isys_pixelformat ipu6_isys_pfmts[];
+extern const struct ipu6_isys_pixelformat ipu6_isys_pfmts_packed[];
+
+const struct ipu6_isys_pixelformat *
+ipu6_isys_get_isys_format(u32 pixelformat, u32 code);
+int ipu6_isys_video_prepare_stream(struct ipu6_isys_video *av,
+ struct media_entity *source_entity,
+ int nr_queues);
+int ipu6_isys_video_set_streaming(struct ipu6_isys_video *av, int state,
+ struct ipu6_isys_buffer_list *bl);
+int ipu6_isys_fw_open(struct ipu6_isys *isys);
+void ipu6_isys_fw_close(struct ipu6_isys *isys);
+int ipu6_isys_setup_video(struct ipu6_isys_video *av,
+ struct media_entity **source_entity, int *nr_queues);
+int ipu6_isys_video_init(struct ipu6_isys_video *av);
+void ipu6_isys_video_cleanup(struct ipu6_isys_video *av);
+void ipu6_isys_put_stream(struct ipu6_isys_stream *stream);
+struct ipu6_isys_stream *
+ipu6_isys_query_stream_by_handle(struct ipu6_isys *isys, u8 stream_handle);
+struct ipu6_isys_stream *
+ipu6_isys_query_stream_by_source(struct ipu6_isys *isys, int source, u8 vc);
+
+void ipu6_isys_configure_stream_watermark(struct ipu6_isys_video *av,
+ bool state);
+void ipu6_isys_update_stream_watermark(struct ipu6_isys_video *av, bool state);
+
+u32 ipu6_isys_get_format(struct ipu6_isys_video *av);
+u32 ipu6_isys_get_data_size(struct ipu6_isys_video *av);
+u32 ipu6_isys_get_bytes_per_line(struct ipu6_isys_video *av);
+u32 ipu6_isys_get_frame_width(struct ipu6_isys_video *av);
+u32 ipu6_isys_get_frame_height(struct ipu6_isys_video *av);
+
+#endif /* IPU6_ISYS_VIDEO_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys.c b/drivers/media/pci/intel/ipu6/ipu6-isys.c
new file mode 100644
index 000000000000..5992138c7290
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys.c
@@ -0,0 +1,1367 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013--2024 Intel Corporation
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/completion.h>
+#include <linux/container_of.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/irqreturn.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+
+#include <media/ipu-bridge.h>
+#include <media/media-device.h>
+#include <media/media-entity.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#include "ipu6-bus.h"
+#include "ipu6-cpd.h"
+#include "ipu6-isys.h"
+#include "ipu6-isys-csi2.h"
+#include "ipu6-mmu.h"
+#include "ipu6-platform-buttress-regs.h"
+#include "ipu6-platform-isys-csi2-reg.h"
+#include "ipu6-platform-regs.h"
+
+#define IPU6_BUTTRESS_FABIC_CONTROL 0x68
+#define GDA_ENABLE_IWAKE_INDEX 2
+#define GDA_IWAKE_THRESHOLD_INDEX 1
+#define GDA_IRQ_CRITICAL_THRESHOLD_INDEX 0
+#define GDA_MEMOPEN_THRESHOLD_INDEX 3
+#define DEFAULT_DID_RATIO 90
+#define DEFAULT_IWAKE_THRESHOLD 0x42
+#define DEFAULT_MEM_OPEN_TIME 10
+#define ONE_THOUSAND_MICROSECOND 1000
+/* One page is 2KB, 8 x 16 x 16 = 2048B = 2KB */
+#define ISF_DMA_TOP_GDA_PROFERTY_PAGE_SIZE 0x800
+
+/* LTR & DID value are 10 bit at most */
+#define LTR_DID_VAL_MAX 1023
+#define LTR_DEFAULT_VALUE 0x70503c19
+#define FILL_TIME_DEFAULT_VALUE 0xfff0783c
+#define LTR_DID_PKGC_2R 20
+#define LTR_SCALE_DEFAULT 5
+#define LTR_SCALE_1024NS 2
+#define DID_SCALE_1US 2
+#define DID_SCALE_32US 3
+#define REG_PKGC_PMON_CFG 0xb00
+
+#define VAL_PKGC_PMON_CFG_RESET 0x38
+#define VAL_PKGC_PMON_CFG_START 0x7
+
+#define IS_PIXEL_BUFFER_PAGES 0x80
+/*
+ * when iwake mode is disabled, the critical threshold is statically set
+ * to 75% of the IS pixel buffer, criticalThreshold = (128 * 3) / 4
+ */
+#define CRITICAL_THRESHOLD_IWAKE_DISABLE (IS_PIXEL_BUFFER_PAGES * 3 / 4)
+
+union fabric_ctrl {
+ struct {
+ u16 ltr_val : 10;
+ u16 ltr_scale : 3;
+ u16 reserved : 3;
+ u16 did_val : 10;
+ u16 did_scale : 3;
+ u16 reserved2 : 1;
+ u16 keep_power_in_D0 : 1;
+ u16 keep_power_override : 1;
+ } bits;
+ u32 value;
+};
+
+enum ltr_did_type {
+ LTR_IWAKE_ON,
+ LTR_IWAKE_OFF,
+ LTR_ISYS_ON,
+ LTR_ISYS_OFF,
+ LTR_ENHANNCE_IWAKE,
+ LTR_TYPE_MAX
+};
+
+#define ISYS_PM_QOS_VALUE 300
+
+static int isys_isr_one(struct ipu6_bus_device *adev);
+
+static int
+isys_complete_ext_device_registration(struct ipu6_isys *isys,
+ struct v4l2_subdev *sd,
+ struct ipu6_isys_csi2_config *csi2)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < sd->entity.num_pads; i++) {
+ if (sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
+ break;
+ }
+
+ if (i == sd->entity.num_pads) {
+ dev_warn(dev, "no src pad in external entity\n");
+ ret = -ENOENT;
+ goto unregister_subdev;
+ }
+
+ ret = media_create_pad_link(&sd->entity, i,
+ &isys->csi2[csi2->port].asd.sd.entity,
+ 0, MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret) {
+ dev_warn(dev, "can't create link\n");
+ goto unregister_subdev;
+ }
+
+ isys->csi2[csi2->port].nlanes = csi2->nlanes;
+
+ return 0;
+
+unregister_subdev:
+ v4l2_device_unregister_subdev(sd);
+
+ return ret;
+}
+
+static void isys_stream_init(struct ipu6_isys *isys)
+{
+ u32 i;
+
+ for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) {
+ mutex_init(&isys->streams[i].mutex);
+ init_completion(&isys->streams[i].stream_open_completion);
+ init_completion(&isys->streams[i].stream_close_completion);
+ init_completion(&isys->streams[i].stream_start_completion);
+ init_completion(&isys->streams[i].stream_stop_completion);
+ INIT_LIST_HEAD(&isys->streams[i].queues);
+ isys->streams[i].isys = isys;
+ isys->streams[i].stream_handle = i;
+ isys->streams[i].vc = INVALID_VC_ID;
+ }
+}
+
+static void isys_csi2_unregister_subdevices(struct ipu6_isys *isys)
+{
+ const struct ipu6_isys_internal_csi2_pdata *csi2 =
+ &isys->pdata->ipdata->csi2;
+ unsigned int i;
+
+ for (i = 0; i < csi2->nports; i++)
+ ipu6_isys_csi2_cleanup(&isys->csi2[i]);
+}
+
+static int isys_csi2_register_subdevices(struct ipu6_isys *isys)
+{
+ const struct ipu6_isys_internal_csi2_pdata *csi2_pdata =
+ &isys->pdata->ipdata->csi2;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < csi2_pdata->nports; i++) {
+ ret = ipu6_isys_csi2_init(&isys->csi2[i], isys,
+ isys->pdata->base +
+ CSI_REG_PORT_BASE(i), i);
+ if (ret)
+ goto fail;
+
+ isys->isr_csi2_bits |= IPU6_ISYS_UNISPART_IRQ_CSI2(i);
+ }
+
+ return 0;
+
+fail:
+ while (i--)
+ ipu6_isys_csi2_cleanup(&isys->csi2[i]);
+
+ return ret;
+}
+
+static int isys_csi2_create_media_links(struct ipu6_isys *isys)
+{
+ const struct ipu6_isys_internal_csi2_pdata *csi2_pdata =
+ &isys->pdata->ipdata->csi2;
+ struct device *dev = &isys->adev->auxdev.dev;
+ unsigned int i, j;
+ int ret;
+
+ for (i = 0; i < csi2_pdata->nports; i++) {
+ struct media_entity *sd = &isys->csi2[i].asd.sd.entity;
+
+ for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) {
+ struct ipu6_isys_video *av = &isys->csi2[i].av[j];
+
+ ret = media_create_pad_link(sd, CSI2_PAD_SRC + j,
+ &av->vdev.entity, 0, 0);
+ if (ret) {
+ dev_err(dev, "CSI2 can't create link\n");
+ return ret;
+ }
+
+ av->csi2 = &isys->csi2[i];
+ }
+ }
+
+ return 0;
+}
+
+static void isys_unregister_video_devices(struct ipu6_isys *isys)
+{
+ const struct ipu6_isys_internal_csi2_pdata *csi2_pdata =
+ &isys->pdata->ipdata->csi2;
+ unsigned int i, j;
+
+ for (i = 0; i < csi2_pdata->nports; i++)
+ for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++)
+ ipu6_isys_video_cleanup(&isys->csi2[i].av[j]);
+}
+
+static int isys_register_video_devices(struct ipu6_isys *isys)
+{
+ const struct ipu6_isys_internal_csi2_pdata *csi2_pdata =
+ &isys->pdata->ipdata->csi2;
+ unsigned int i, j;
+ int ret;
+
+ for (i = 0; i < csi2_pdata->nports; i++) {
+ for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) {
+ struct ipu6_isys_video *av = &isys->csi2[i].av[j];
+
+ snprintf(av->vdev.name, sizeof(av->vdev.name),
+ IPU6_ISYS_ENTITY_PREFIX " ISYS Capture %u",
+ i * NR_OF_CSI2_SRC_PADS + j);
+ av->isys = isys;
+ av->aq.vbq.buf_struct_size =
+ sizeof(struct ipu6_isys_video_buffer);
+
+ ret = ipu6_isys_video_init(av);
+ if (ret)
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ while (i--) {
+ while (j--)
+ ipu6_isys_video_cleanup(&isys->csi2[i].av[j]);
+ j = NR_OF_CSI2_SRC_PADS;
+ }
+
+ return ret;
+}
+
+void isys_setup_hw(struct ipu6_isys *isys)
+{
+ void __iomem *base = isys->pdata->base;
+ const u8 *thd = isys->pdata->ipdata->hw_variant.cdc_fifo_threshold;
+ u32 irqs = 0;
+ unsigned int i, nports;
+
+ nports = isys->pdata->ipdata->csi2.nports;
+
+ /* Enable irqs for all MIPI ports */
+ for (i = 0; i < nports; i++)
+ irqs |= IPU6_ISYS_UNISPART_IRQ_CSI2(i);
+
+ writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_edge);
+ writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_lnp);
+ writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_mask);
+ writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_enable);
+ writel(GENMASK(19, 0),
+ base + isys->pdata->ipdata->csi2.ctrl0_irq_clear);
+
+ irqs = ISYS_UNISPART_IRQS;
+ writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_EDGE);
+ writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE);
+ writel(GENMASK(28, 0), base + IPU6_REG_ISYS_UNISPART_IRQ_CLEAR);
+ writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_MASK);
+ writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_ENABLE);
+
+ writel(0, base + IPU6_REG_ISYS_UNISPART_SW_IRQ_REG);
+ writel(0, base + IPU6_REG_ISYS_UNISPART_SW_IRQ_MUX_REG);
+
+ /* Write CDC FIFO threshold values for isys */
+ for (i = 0; i < isys->pdata->ipdata->hw_variant.cdc_fifos; i++)
+ writel(thd[i], base + IPU6_REG_ISYS_CDC_THRESHOLD(i));
+}
+
+static void ipu6_isys_csi2_isr(struct ipu6_isys_csi2 *csi2)
+{
+ struct ipu6_isys_stream *stream;
+ unsigned int i;
+ u32 status;
+ int source;
+
+ ipu6_isys_register_errors(csi2);
+
+ status = readl(csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+ CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET);
+
+ writel(status, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+ CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET);
+
+ source = csi2->asd.source;
+ for (i = 0; i < NR_OF_CSI2_VC; i++) {
+ if (status & IPU_CSI_RX_IRQ_FS_VC(i)) {
+ stream = ipu6_isys_query_stream_by_source(csi2->isys,
+ source, i);
+ if (stream) {
+ ipu6_isys_csi2_sof_event_by_stream(stream);
+ ipu6_isys_put_stream(stream);
+ }
+ }
+
+ if (status & IPU_CSI_RX_IRQ_FE_VC(i)) {
+ stream = ipu6_isys_query_stream_by_source(csi2->isys,
+ source, i);
+ if (stream) {
+ ipu6_isys_csi2_eof_event_by_stream(stream);
+ ipu6_isys_put_stream(stream);
+ }
+ }
+ }
+}
+
+irqreturn_t isys_isr(struct ipu6_bus_device *adev)
+{
+ struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev);
+ void __iomem *base = isys->pdata->base;
+ u32 status_sw, status_csi;
+ u32 ctrl0_status, ctrl0_clear;
+
+ spin_lock(&isys->power_lock);
+ if (!isys->power) {
+ spin_unlock(&isys->power_lock);
+ return IRQ_NONE;
+ }
+
+ ctrl0_status = isys->pdata->ipdata->csi2.ctrl0_irq_status;
+ ctrl0_clear = isys->pdata->ipdata->csi2.ctrl0_irq_clear;
+
+ status_csi = readl(isys->pdata->base + ctrl0_status);
+ status_sw = readl(isys->pdata->base +
+ IPU6_REG_ISYS_UNISPART_IRQ_STATUS);
+
+ writel(ISYS_UNISPART_IRQS & ~IPU6_ISYS_UNISPART_IRQ_SW,
+ base + IPU6_REG_ISYS_UNISPART_IRQ_MASK);
+
+ do {
+ writel(status_csi, isys->pdata->base + ctrl0_clear);
+
+ writel(status_sw, isys->pdata->base +
+ IPU6_REG_ISYS_UNISPART_IRQ_CLEAR);
+
+ if (isys->isr_csi2_bits & status_csi) {
+ unsigned int i;
+
+ for (i = 0; i < isys->pdata->ipdata->csi2.nports; i++) {
+ /* irq from not enabled port */
+ if (!isys->csi2[i].base)
+ continue;
+ if (status_csi & IPU6_ISYS_UNISPART_IRQ_CSI2(i))
+ ipu6_isys_csi2_isr(&isys->csi2[i]);
+ }
+ }
+
+ writel(0, base + IPU6_REG_ISYS_UNISPART_SW_IRQ_REG);
+
+ if (!isys_isr_one(adev))
+ status_sw = IPU6_ISYS_UNISPART_IRQ_SW;
+ else
+ status_sw = 0;
+
+ status_csi = readl(isys->pdata->base + ctrl0_status);
+ status_sw |= readl(isys->pdata->base +
+ IPU6_REG_ISYS_UNISPART_IRQ_STATUS);
+ } while ((status_csi & isys->isr_csi2_bits) ||
+ (status_sw & IPU6_ISYS_UNISPART_IRQ_SW));
+
+ writel(ISYS_UNISPART_IRQS, base + IPU6_REG_ISYS_UNISPART_IRQ_MASK);
+
+ spin_unlock(&isys->power_lock);
+
+ return IRQ_HANDLED;
+}
+
+static void get_lut_ltrdid(struct ipu6_isys *isys, struct ltr_did *pltr_did)
+{
+ struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark;
+ struct ltr_did ltrdid_default;
+
+ ltrdid_default.lut_ltr.value = LTR_DEFAULT_VALUE;
+ ltrdid_default.lut_fill_time.value = FILL_TIME_DEFAULT_VALUE;
+
+ if (iwake_watermark->ltrdid.lut_ltr.value)
+ *pltr_did = iwake_watermark->ltrdid;
+ else
+ *pltr_did = ltrdid_default;
+}
+
+static int set_iwake_register(struct ipu6_isys *isys, u32 index, u32 value)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ u32 req_id = index;
+ u32 offset = 0;
+ int ret;
+
+ ret = ipu6_fw_isys_send_proxy_token(isys, req_id, index, offset, value);
+ if (ret)
+ dev_err(dev, "write %d failed %d", index, ret);
+
+ return ret;
+}
+
+/*
+ * When input system is powered up and before enabling any new sensor capture,
+ * or after disabling any sensor capture the following values need to be set:
+ * LTR_value = LTR(usec) from calculation;
+ * LTR_scale = 2;
+ * DID_value = DID(usec) from calculation;
+ * DID_scale = 2;
+ *
+ * When input system is powered down, the LTR and DID values
+ * must be returned to the default values:
+ * LTR_value = 1023;
+ * LTR_scale = 5;
+ * DID_value = 1023;
+ * DID_scale = 2;
+ */
+static void set_iwake_ltrdid(struct ipu6_isys *isys, u16 ltr, u16 did,
+ enum ltr_did_type use)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ u16 ltr_val, ltr_scale = LTR_SCALE_1024NS;
+ u16 did_val, did_scale = DID_SCALE_1US;
+ struct ipu6_device *isp = isys->adev->isp;
+ union fabric_ctrl fc;
+
+ switch (use) {
+ case LTR_IWAKE_ON:
+ ltr_val = min_t(u16, ltr, (u16)LTR_DID_VAL_MAX);
+ did_val = min_t(u16, did, (u16)LTR_DID_VAL_MAX);
+ ltr_scale = (ltr == LTR_DID_VAL_MAX &&
+ did == LTR_DID_VAL_MAX) ?
+ LTR_SCALE_DEFAULT : LTR_SCALE_1024NS;
+ break;
+ case LTR_ISYS_ON:
+ case LTR_IWAKE_OFF:
+ ltr_val = LTR_DID_PKGC_2R;
+ did_val = LTR_DID_PKGC_2R;
+ break;
+ case LTR_ISYS_OFF:
+ ltr_val = LTR_DID_VAL_MAX;
+ did_val = LTR_DID_VAL_MAX;
+ ltr_scale = LTR_SCALE_DEFAULT;
+ break;
+ case LTR_ENHANNCE_IWAKE:
+ if (ltr == LTR_DID_VAL_MAX && did == LTR_DID_VAL_MAX) {
+ ltr_val = LTR_DID_VAL_MAX;
+ did_val = LTR_DID_VAL_MAX;
+ ltr_scale = LTR_SCALE_DEFAULT;
+ } else if (did < ONE_THOUSAND_MICROSECOND) {
+ ltr_val = ltr;
+ did_val = did;
+ } else {
+ ltr_val = ltr;
+ /* div 90% value by 32 to account for scale change */
+ did_val = did / 32;
+ did_scale = DID_SCALE_32US;
+ }
+ break;
+ default:
+ ltr_val = LTR_DID_VAL_MAX;
+ did_val = LTR_DID_VAL_MAX;
+ ltr_scale = LTR_SCALE_DEFAULT;
+ break;
+ }
+
+ fc.value = readl(isp->base + IPU6_BUTTRESS_FABIC_CONTROL);
+ fc.bits.ltr_val = ltr_val;
+ fc.bits.ltr_scale = ltr_scale;
+ fc.bits.did_val = did_val;
+ fc.bits.did_scale = did_scale;
+
+ dev_dbg(dev, "ltr: value %u scale %u, did: value %u scale %u\n",
+ ltr_val, ltr_scale, did_val, did_scale);
+ writel(fc.value, isp->base + IPU6_BUTTRESS_FABIC_CONTROL);
+}
+
+/*
+ * Driver may clear register GDA_ENABLE_IWAKE before FW configures the
+ * stream for debug purpose. Otherwise driver should not access this register.
+ */
+static void enable_iwake(struct ipu6_isys *isys, bool enable)
+{
+ struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark;
+ int ret;
+
+ mutex_lock(&iwake_watermark->mutex);
+
+ if (iwake_watermark->iwake_enabled == enable) {
+ mutex_unlock(&iwake_watermark->mutex);
+ return;
+ }
+
+ ret = set_iwake_register(isys, GDA_ENABLE_IWAKE_INDEX, enable);
+ if (!ret)
+ iwake_watermark->iwake_enabled = enable;
+
+ mutex_unlock(&iwake_watermark->mutex);
+}
+
+void update_watermark_setting(struct ipu6_isys *isys)
+{
+ struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark;
+ u32 iwake_threshold, iwake_critical_threshold, page_num;
+ struct device *dev = &isys->adev->auxdev.dev;
+ u32 calc_fill_time_us = 0, ltr = 0, did = 0;
+ struct video_stream_watermark *p_watermark;
+ enum ltr_did_type ltr_did_type;
+ struct list_head *stream_node;
+ u64 isys_pb_datarate_mbs = 0;
+ u32 mem_open_threshold = 0;
+ struct ltr_did ltrdid;
+ u64 threshold_bytes;
+ u32 max_sram_size;
+ u32 shift;
+
+ shift = isys->pdata->ipdata->sram_gran_shift;
+ max_sram_size = isys->pdata->ipdata->max_sram_size;
+
+ mutex_lock(&iwake_watermark->mutex);
+ if (iwake_watermark->force_iwake_disable) {
+ set_iwake_ltrdid(isys, 0, 0, LTR_IWAKE_OFF);
+ set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX,
+ CRITICAL_THRESHOLD_IWAKE_DISABLE);
+ goto unlock_exit;
+ }
+
+ if (list_empty(&iwake_watermark->video_list)) {
+ isys_pb_datarate_mbs = 0;
+ } else {
+ list_for_each(stream_node, &iwake_watermark->video_list) {
+ p_watermark = list_entry(stream_node,
+ struct video_stream_watermark,
+ stream_node);
+ isys_pb_datarate_mbs += p_watermark->stream_data_rate;
+ }
+ }
+ mutex_unlock(&iwake_watermark->mutex);
+
+ if (!isys_pb_datarate_mbs) {
+ enable_iwake(isys, false);
+ set_iwake_ltrdid(isys, 0, 0, LTR_IWAKE_OFF);
+ mutex_lock(&iwake_watermark->mutex);
+ set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX,
+ CRITICAL_THRESHOLD_IWAKE_DISABLE);
+ goto unlock_exit;
+ }
+
+ enable_iwake(isys, true);
+ calc_fill_time_us = max_sram_size / isys_pb_datarate_mbs;
+
+ if (isys->pdata->ipdata->enhanced_iwake) {
+ ltr = isys->pdata->ipdata->ltr;
+ did = calc_fill_time_us * DEFAULT_DID_RATIO / 100;
+ ltr_did_type = LTR_ENHANNCE_IWAKE;
+ } else {
+ get_lut_ltrdid(isys, &ltrdid);
+
+ if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th0)
+ ltr = 0;
+ else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th1)
+ ltr = ltrdid.lut_ltr.bits.val0;
+ else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th2)
+ ltr = ltrdid.lut_ltr.bits.val1;
+ else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th3)
+ ltr = ltrdid.lut_ltr.bits.val2;
+ else
+ ltr = ltrdid.lut_ltr.bits.val3;
+
+ did = calc_fill_time_us - ltr;
+ ltr_did_type = LTR_IWAKE_ON;
+ }
+
+ set_iwake_ltrdid(isys, ltr, did, ltr_did_type);
+
+ /* calculate iwake threshold with 2KB granularity pages */
+ threshold_bytes = did * isys_pb_datarate_mbs;
+ iwake_threshold = max_t(u32, 1, threshold_bytes >> shift);
+ iwake_threshold = min_t(u32, iwake_threshold, max_sram_size);
+
+ mutex_lock(&iwake_watermark->mutex);
+ if (isys->pdata->ipdata->enhanced_iwake) {
+ set_iwake_register(isys, GDA_IWAKE_THRESHOLD_INDEX,
+ DEFAULT_IWAKE_THRESHOLD);
+ /* calculate number of pages that will be filled in 10 usec */
+ page_num = (DEFAULT_MEM_OPEN_TIME * isys_pb_datarate_mbs) /
+ ISF_DMA_TOP_GDA_PROFERTY_PAGE_SIZE;
+ page_num += ((DEFAULT_MEM_OPEN_TIME * isys_pb_datarate_mbs) %
+ ISF_DMA_TOP_GDA_PROFERTY_PAGE_SIZE) ? 1 : 0;
+ mem_open_threshold = isys->pdata->ipdata->memopen_threshold;
+ mem_open_threshold = max_t(u32, mem_open_threshold, page_num);
+ dev_dbg(dev, "mem_open_threshold: %u\n", mem_open_threshold);
+ set_iwake_register(isys, GDA_MEMOPEN_THRESHOLD_INDEX,
+ mem_open_threshold);
+ } else {
+ set_iwake_register(isys, GDA_IWAKE_THRESHOLD_INDEX,
+ iwake_threshold);
+ }
+
+ iwake_critical_threshold = iwake_threshold +
+ (IS_PIXEL_BUFFER_PAGES - iwake_threshold) / 2;
+
+ dev_dbg(dev, "threshold: %u critical: %u\n", iwake_threshold,
+ iwake_critical_threshold);
+
+ set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX,
+ iwake_critical_threshold);
+
+ writel(VAL_PKGC_PMON_CFG_RESET,
+ isys->adev->isp->base + REG_PKGC_PMON_CFG);
+ writel(VAL_PKGC_PMON_CFG_START,
+ isys->adev->isp->base + REG_PKGC_PMON_CFG);
+unlock_exit:
+ mutex_unlock(&iwake_watermark->mutex);
+}
+
+static void isys_iwake_watermark_init(struct ipu6_isys *isys)
+{
+ struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark;
+
+ INIT_LIST_HEAD(&iwake_watermark->video_list);
+ mutex_init(&iwake_watermark->mutex);
+
+ iwake_watermark->ltrdid.lut_ltr.value = 0;
+ iwake_watermark->isys = isys;
+ iwake_watermark->iwake_enabled = false;
+ iwake_watermark->force_iwake_disable = false;
+}
+
+static void isys_iwake_watermark_cleanup(struct ipu6_isys *isys)
+{
+ struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark;
+
+ mutex_lock(&iwake_watermark->mutex);
+ list_del(&iwake_watermark->video_list);
+ mutex_unlock(&iwake_watermark->mutex);
+
+ mutex_destroy(&iwake_watermark->mutex);
+}
+
+/* The .bound() notifier callback when a match is found */
+static int isys_notifier_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_connection *asc)
+{
+ struct ipu6_isys *isys =
+ container_of(notifier, struct ipu6_isys, notifier);
+ struct sensor_async_sd *s_asd =
+ container_of(asc, struct sensor_async_sd, asc);
+ int ret;
+
+ ret = ipu_bridge_instantiate_vcm(sd->dev);
+ if (ret) {
+ dev_err(&isys->adev->auxdev.dev, "instantiate vcm failed\n");
+ return ret;
+ }
+
+ dev_dbg(&isys->adev->auxdev.dev, "bind %s nlanes is %d port is %d\n",
+ sd->name, s_asd->csi2.nlanes, s_asd->csi2.port);
+ ret = isys_complete_ext_device_registration(isys, sd, &s_asd->csi2);
+ if (ret)
+ return ret;
+
+ return v4l2_device_register_subdev_nodes(&isys->v4l2_dev);
+}
+
+static int isys_notifier_complete(struct v4l2_async_notifier *notifier)
+{
+ struct ipu6_isys *isys =
+ container_of(notifier, struct ipu6_isys, notifier);
+
+ return v4l2_device_register_subdev_nodes(&isys->v4l2_dev);
+}
+
+static const struct v4l2_async_notifier_operations isys_async_ops = {
+ .bound = isys_notifier_bound,
+ .complete = isys_notifier_complete,
+};
+
+#define ISYS_MAX_PORTS 8
+static int isys_notifier_init(struct ipu6_isys *isys)
+{
+ struct ipu6_device *isp = isys->adev->isp;
+ struct device *dev = &isp->pdev->dev;
+ unsigned int i;
+ int ret;
+
+ v4l2_async_nf_init(&isys->notifier, &isys->v4l2_dev);
+
+ for (i = 0; i < ISYS_MAX_PORTS; i++) {
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ struct sensor_async_sd *s_asd;
+ struct fwnode_handle *ep;
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!ep)
+ continue;
+
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+ if (ret) {
+ dev_err(dev, "fwnode endpoint parse failed: %d\n", ret);
+ goto err_parse;
+ }
+
+ s_asd = v4l2_async_nf_add_fwnode_remote(&isys->notifier, ep,
+ struct sensor_async_sd);
+ if (IS_ERR(s_asd)) {
+ ret = PTR_ERR(s_asd);
+ dev_err(dev, "add remove fwnode failed: %d\n", ret);
+ goto err_parse;
+ }
+
+ s_asd->csi2.port = vep.base.port;
+ s_asd->csi2.nlanes = vep.bus.mipi_csi2.num_data_lanes;
+
+ dev_dbg(dev, "remote endpoint port %d with %d lanes added\n",
+ s_asd->csi2.port, s_asd->csi2.nlanes);
+
+ fwnode_handle_put(ep);
+
+ continue;
+
+err_parse:
+ fwnode_handle_put(ep);
+ return ret;
+ }
+
+ isys->notifier.ops = &isys_async_ops;
+ ret = v4l2_async_nf_register(&isys->notifier);
+ if (ret) {
+ dev_err(dev, "failed to register async notifier : %d\n", ret);
+ v4l2_async_nf_cleanup(&isys->notifier);
+ }
+
+ return ret;
+}
+
+static void isys_notifier_cleanup(struct ipu6_isys *isys)
+{
+ v4l2_async_nf_unregister(&isys->notifier);
+ v4l2_async_nf_cleanup(&isys->notifier);
+}
+
+static int isys_register_devices(struct ipu6_isys *isys)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ struct pci_dev *pdev = isys->adev->isp->pdev;
+ int ret;
+
+ isys->media_dev.dev = dev;
+ media_device_pci_init(&isys->media_dev,
+ pdev, IPU6_MEDIA_DEV_MODEL_NAME);
+
+ strscpy(isys->v4l2_dev.name, isys->media_dev.model,
+ sizeof(isys->v4l2_dev.name));
+
+ ret = media_device_register(&isys->media_dev);
+ if (ret < 0)
+ goto out_media_device_unregister;
+
+ isys->v4l2_dev.mdev = &isys->media_dev;
+ isys->v4l2_dev.ctrl_handler = NULL;
+
+ ret = v4l2_device_register(&pdev->dev, &isys->v4l2_dev);
+ if (ret < 0)
+ goto out_media_device_unregister;
+
+ ret = isys_register_video_devices(isys);
+ if (ret)
+ goto out_v4l2_device_unregister;
+
+ ret = isys_csi2_register_subdevices(isys);
+ if (ret)
+ goto out_isys_unregister_video_device;
+
+ ret = isys_csi2_create_media_links(isys);
+ if (ret)
+ goto out_isys_unregister_subdevices;
+
+ ret = isys_notifier_init(isys);
+ if (ret)
+ goto out_isys_unregister_subdevices;
+
+ return 0;
+
+out_isys_unregister_subdevices:
+ isys_csi2_unregister_subdevices(isys);
+
+out_isys_unregister_video_device:
+ isys_unregister_video_devices(isys);
+
+out_v4l2_device_unregister:
+ v4l2_device_unregister(&isys->v4l2_dev);
+
+out_media_device_unregister:
+ media_device_unregister(&isys->media_dev);
+ media_device_cleanup(&isys->media_dev);
+
+ dev_err(dev, "failed to register isys devices\n");
+
+ return ret;
+}
+
+static void isys_unregister_devices(struct ipu6_isys *isys)
+{
+ isys_unregister_video_devices(isys);
+ isys_csi2_unregister_subdevices(isys);
+ v4l2_device_unregister(&isys->v4l2_dev);
+ media_device_unregister(&isys->media_dev);
+ media_device_cleanup(&isys->media_dev);
+}
+
+static int isys_runtime_pm_resume(struct device *dev)
+{
+ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev);
+ struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev);
+ struct ipu6_device *isp = adev->isp;
+ unsigned long flags;
+ int ret;
+
+ if (!isys)
+ return 0;
+
+ ret = ipu6_mmu_hw_init(adev->mmu);
+ if (ret)
+ return ret;
+
+ cpu_latency_qos_update_request(&isys->pm_qos, ISYS_PM_QOS_VALUE);
+
+ ret = ipu6_buttress_start_tsc_sync(isp);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&isys->power_lock, flags);
+ isys->power = 1;
+ spin_unlock_irqrestore(&isys->power_lock, flags);
+
+ isys_setup_hw(isys);
+
+ set_iwake_ltrdid(isys, 0, 0, LTR_ISYS_ON);
+
+ return 0;
+}
+
+static int isys_runtime_pm_suspend(struct device *dev)
+{
+ struct ipu6_bus_device *adev = to_ipu6_bus_device(dev);
+ struct ipu6_isys *isys;
+ unsigned long flags;
+
+ isys = dev_get_drvdata(dev);
+ if (!isys)
+ return 0;
+
+ spin_lock_irqsave(&isys->power_lock, flags);
+ isys->power = 0;
+ spin_unlock_irqrestore(&isys->power_lock, flags);
+
+ mutex_lock(&isys->mutex);
+ isys->need_reset = false;
+ mutex_unlock(&isys->mutex);
+
+ isys->phy_termcal_val = 0;
+ cpu_latency_qos_update_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE);
+
+ set_iwake_ltrdid(isys, 0, 0, LTR_ISYS_OFF);
+
+ ipu6_mmu_hw_cleanup(adev->mmu);
+
+ return 0;
+}
+
+static int isys_suspend(struct device *dev)
+{
+ struct ipu6_isys *isys = dev_get_drvdata(dev);
+
+ /* If stream is open, refuse to suspend */
+ if (isys->stream_opened)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int isys_resume(struct device *dev)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops isys_pm_ops = {
+ .runtime_suspend = isys_runtime_pm_suspend,
+ .runtime_resume = isys_runtime_pm_resume,
+ .suspend = isys_suspend,
+ .resume = isys_resume,
+};
+
+static void isys_remove(struct auxiliary_device *auxdev)
+{
+ struct ipu6_bus_device *adev = auxdev_to_adev(auxdev);
+ struct ipu6_isys *isys = dev_get_drvdata(&auxdev->dev);
+ struct ipu6_device *isp = adev->isp;
+ struct isys_fw_msgs *fwmsg, *safe;
+ unsigned int i;
+
+ list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist, head)
+ dma_free_attrs(&auxdev->dev, sizeof(struct isys_fw_msgs),
+ fwmsg, fwmsg->dma_addr, 0);
+
+ list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist_fw, head)
+ dma_free_attrs(&auxdev->dev, sizeof(struct isys_fw_msgs),
+ fwmsg, fwmsg->dma_addr, 0);
+
+ isys_unregister_devices(isys);
+ isys_notifier_cleanup(isys);
+
+ cpu_latency_qos_remove_request(&isys->pm_qos);
+
+ if (!isp->secure_mode) {
+ ipu6_cpd_free_pkg_dir(adev);
+ ipu6_buttress_unmap_fw_image(adev, &adev->fw_sgt);
+ release_firmware(adev->fw);
+ }
+
+ for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++)
+ mutex_destroy(&isys->streams[i].mutex);
+
+ isys_iwake_watermark_cleanup(isys);
+ mutex_destroy(&isys->stream_mutex);
+ mutex_destroy(&isys->mutex);
+}
+
+static int alloc_fw_msg_bufs(struct ipu6_isys *isys, int amount)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ struct isys_fw_msgs *addr;
+ dma_addr_t dma_addr;
+ unsigned long flags;
+ unsigned int i;
+
+ for (i = 0; i < amount; i++) {
+ addr = dma_alloc_attrs(dev, sizeof(struct isys_fw_msgs),
+ &dma_addr, GFP_KERNEL, 0);
+ if (!addr)
+ break;
+ addr->dma_addr = dma_addr;
+
+ spin_lock_irqsave(&isys->listlock, flags);
+ list_add(&addr->head, &isys->framebuflist);
+ spin_unlock_irqrestore(&isys->listlock, flags);
+ }
+
+ if (i == amount)
+ return 0;
+
+ spin_lock_irqsave(&isys->listlock, flags);
+ while (!list_empty(&isys->framebuflist)) {
+ addr = list_first_entry(&isys->framebuflist,
+ struct isys_fw_msgs, head);
+ list_del(&addr->head);
+ spin_unlock_irqrestore(&isys->listlock, flags);
+ dma_free_attrs(dev, sizeof(struct isys_fw_msgs), addr,
+ addr->dma_addr, 0);
+ spin_lock_irqsave(&isys->listlock, flags);
+ }
+ spin_unlock_irqrestore(&isys->listlock, flags);
+
+ return -ENOMEM;
+}
+
+struct isys_fw_msgs *ipu6_get_fw_msg_buf(struct ipu6_isys_stream *stream)
+{
+ struct ipu6_isys *isys = stream->isys;
+ struct device *dev = &isys->adev->auxdev.dev;
+ struct isys_fw_msgs *msg;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&isys->listlock, flags);
+ if (list_empty(&isys->framebuflist)) {
+ spin_unlock_irqrestore(&isys->listlock, flags);
+ dev_dbg(dev, "Frame list empty\n");
+
+ ret = alloc_fw_msg_bufs(isys, 5);
+ if (ret < 0)
+ return NULL;
+
+ spin_lock_irqsave(&isys->listlock, flags);
+ if (list_empty(&isys->framebuflist)) {
+ spin_unlock_irqrestore(&isys->listlock, flags);
+ dev_err(dev, "Frame list empty\n");
+ return NULL;
+ }
+ }
+ msg = list_last_entry(&isys->framebuflist, struct isys_fw_msgs, head);
+ list_move(&msg->head, &isys->framebuflist_fw);
+ spin_unlock_irqrestore(&isys->listlock, flags);
+ memset(&msg->fw_msg, 0, sizeof(msg->fw_msg));
+
+ return msg;
+}
+
+void ipu6_cleanup_fw_msg_bufs(struct ipu6_isys *isys)
+{
+ struct isys_fw_msgs *fwmsg, *fwmsg0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&isys->listlock, flags);
+ list_for_each_entry_safe(fwmsg, fwmsg0, &isys->framebuflist_fw, head)
+ list_move(&fwmsg->head, &isys->framebuflist);
+ spin_unlock_irqrestore(&isys->listlock, flags);
+}
+
+void ipu6_put_fw_msg_buf(struct ipu6_isys *isys, u64 data)
+{
+ struct isys_fw_msgs *msg;
+ unsigned long flags;
+ u64 *ptr = (u64 *)data;
+
+ if (!ptr)
+ return;
+
+ spin_lock_irqsave(&isys->listlock, flags);
+ msg = container_of(ptr, struct isys_fw_msgs, fw_msg.dummy);
+ list_move(&msg->head, &isys->framebuflist);
+ spin_unlock_irqrestore(&isys->listlock, flags);
+}
+
+static int isys_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *auxdev_id)
+{
+ const struct ipu6_isys_internal_csi2_pdata *csi2_pdata;
+ struct ipu6_bus_device *adev = auxdev_to_adev(auxdev);
+ struct ipu6_device *isp = adev->isp;
+ const struct firmware *fw;
+ struct ipu6_isys *isys;
+ unsigned int i;
+ int ret;
+
+ if (!isp->bus_ready_to_probe)
+ return -EPROBE_DEFER;
+
+ isys = devm_kzalloc(&auxdev->dev, sizeof(*isys), GFP_KERNEL);
+ if (!isys)
+ return -ENOMEM;
+
+ adev->auxdrv_data =
+ (const struct ipu6_auxdrv_data *)auxdev_id->driver_data;
+ adev->auxdrv = to_auxiliary_drv(auxdev->dev.driver);
+ isys->adev = adev;
+ isys->pdata = adev->pdata;
+ csi2_pdata = &isys->pdata->ipdata->csi2;
+
+ isys->csi2 = devm_kcalloc(&auxdev->dev, csi2_pdata->nports,
+ sizeof(*isys->csi2), GFP_KERNEL);
+ if (!isys->csi2)
+ return -ENOMEM;
+
+ ret = ipu6_mmu_hw_init(adev->mmu);
+ if (ret)
+ return ret;
+
+ /* initial sensor type */
+ isys->sensor_type = isys->pdata->ipdata->sensor_type_start;
+
+ spin_lock_init(&isys->streams_lock);
+ spin_lock_init(&isys->power_lock);
+ isys->power = 0;
+ isys->phy_termcal_val = 0;
+
+ mutex_init(&isys->mutex);
+ mutex_init(&isys->stream_mutex);
+
+ spin_lock_init(&isys->listlock);
+ INIT_LIST_HEAD(&isys->framebuflist);
+ INIT_LIST_HEAD(&isys->framebuflist_fw);
+
+ isys->line_align = IPU6_ISYS_2600_MEM_LINE_ALIGN;
+ isys->icache_prefetch = 0;
+
+ dev_set_drvdata(&auxdev->dev, isys);
+
+ isys_stream_init(isys);
+
+ if (!isp->secure_mode) {
+ fw = isp->cpd_fw;
+ ret = ipu6_buttress_map_fw_image(adev, fw, &adev->fw_sgt);
+ if (ret)
+ goto release_firmware;
+
+ ret = ipu6_cpd_create_pkg_dir(adev, isp->cpd_fw->data);
+ if (ret)
+ goto remove_shared_buffer;
+ }
+
+ cpu_latency_qos_add_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE);
+
+ ret = alloc_fw_msg_bufs(isys, 20);
+ if (ret < 0)
+ goto out_remove_pkg_dir_shared_buffer;
+
+ isys_iwake_watermark_init(isys);
+
+ if (is_ipu6se(adev->isp->hw_ver))
+ isys->phy_set_power = ipu6_isys_jsl_phy_set_power;
+ else if (is_ipu6ep_mtl(adev->isp->hw_ver))
+ isys->phy_set_power = ipu6_isys_dwc_phy_set_power;
+ else
+ isys->phy_set_power = ipu6_isys_mcd_phy_set_power;
+
+ ret = isys_register_devices(isys);
+ if (ret)
+ goto out_remove_pkg_dir_shared_buffer;
+
+ ipu6_mmu_hw_cleanup(adev->mmu);
+
+ return 0;
+
+out_remove_pkg_dir_shared_buffer:
+ if (!isp->secure_mode)
+ ipu6_cpd_free_pkg_dir(adev);
+remove_shared_buffer:
+ if (!isp->secure_mode)
+ ipu6_buttress_unmap_fw_image(adev, &adev->fw_sgt);
+release_firmware:
+ if (!isp->secure_mode)
+ release_firmware(adev->fw);
+
+ for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++)
+ mutex_destroy(&isys->streams[i].mutex);
+
+ mutex_destroy(&isys->mutex);
+ mutex_destroy(&isys->stream_mutex);
+
+ ipu6_mmu_hw_cleanup(adev->mmu);
+
+ return ret;
+}
+
+struct fwmsg {
+ int type;
+ char *msg;
+ bool valid_ts;
+};
+
+static const struct fwmsg fw_msg[] = {
+ {IPU6_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE, "STREAM_OPEN_DONE", 0},
+ {IPU6_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK, "STREAM_CLOSE_ACK", 0},
+ {IPU6_FW_ISYS_RESP_TYPE_STREAM_START_ACK, "STREAM_START_ACK", 0},
+ {IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK,
+ "STREAM_START_AND_CAPTURE_ACK", 0},
+ {IPU6_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK, "STREAM_STOP_ACK", 0},
+ {IPU6_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK, "STREAM_FLUSH_ACK", 0},
+ {IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_READY, "PIN_DATA_READY", 1},
+ {IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK, "STREAM_CAPTURE_ACK", 0},
+ {IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE,
+ "STREAM_START_AND_CAPTURE_DONE", 1},
+ {IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE, "STREAM_CAPTURE_DONE", 1},
+ {IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF, "FRAME_SOF", 1},
+ {IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF, "FRAME_EOF", 1},
+ {IPU6_FW_ISYS_RESP_TYPE_STATS_DATA_READY, "STATS_READY", 1},
+ {-1, "UNKNOWN MESSAGE", 0}
+};
+
+static u32 resp_type_to_index(int type)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fw_msg); i++)
+ if (fw_msg[i].type == type)
+ return i;
+
+ return ARRAY_SIZE(fw_msg) - 1;
+}
+
+static int isys_isr_one(struct ipu6_bus_device *adev)
+{
+ struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev);
+ struct ipu6_fw_isys_resp_info_abi *resp;
+ struct ipu6_isys_stream *stream;
+ struct ipu6_isys_csi2 *csi2 = NULL;
+ u32 index;
+ u64 ts;
+
+ if (!isys->fwcom)
+ return 1;
+
+ resp = ipu6_fw_isys_get_resp(isys->fwcom, IPU6_BASE_MSG_RECV_QUEUES);
+ if (!resp)
+ return 1;
+
+ ts = (u64)resp->timestamp[1] << 32 | resp->timestamp[0];
+
+ index = resp_type_to_index(resp->type);
+ dev_dbg(&adev->auxdev.dev,
+ "FW resp %02d %s, stream %u, ts 0x%16.16llx, pin %d\n",
+ resp->type, fw_msg[index].msg, resp->stream_handle,
+ fw_msg[index].valid_ts ? ts : 0, resp->pin_id);
+
+ if (resp->error_info.error == IPU6_FW_ISYS_ERROR_STREAM_IN_SUSPENSION)
+ /* Suspension is kind of special case: not enough buffers */
+ dev_dbg(&adev->auxdev.dev,
+ "FW error resp SUSPENSION, details %d\n",
+ resp->error_info.error_details);
+ else if (resp->error_info.error)
+ dev_dbg(&adev->auxdev.dev,
+ "FW error resp error %d, details %d\n",
+ resp->error_info.error, resp->error_info.error_details);
+
+ if (resp->stream_handle >= IPU6_ISYS_MAX_STREAMS) {
+ dev_err(&adev->auxdev.dev, "bad stream handle %u\n",
+ resp->stream_handle);
+ goto leave;
+ }
+
+ stream = ipu6_isys_query_stream_by_handle(isys, resp->stream_handle);
+ if (!stream) {
+ dev_err(&adev->auxdev.dev, "stream of stream_handle %u is unused\n",
+ resp->stream_handle);
+ goto leave;
+ }
+ stream->error = resp->error_info.error;
+
+ csi2 = ipu6_isys_subdev_to_csi2(stream->asd);
+
+ switch (resp->type) {
+ case IPU6_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE:
+ complete(&stream->stream_open_completion);
+ break;
+ case IPU6_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK:
+ complete(&stream->stream_close_completion);
+ break;
+ case IPU6_FW_ISYS_RESP_TYPE_STREAM_START_ACK:
+ complete(&stream->stream_start_completion);
+ break;
+ case IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK:
+ complete(&stream->stream_start_completion);
+ break;
+ case IPU6_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK:
+ complete(&stream->stream_stop_completion);
+ break;
+ case IPU6_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK:
+ complete(&stream->stream_stop_completion);
+ break;
+ case IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_READY:
+ /*
+ * firmware only release the capture msg until software
+ * get pin_data_ready event
+ */
+ ipu6_put_fw_msg_buf(ipu6_bus_get_drvdata(adev), resp->buf_id);
+ if (resp->pin_id < IPU6_ISYS_OUTPUT_PINS &&
+ stream->output_pins[resp->pin_id].pin_ready)
+ stream->output_pins[resp->pin_id].pin_ready(stream,
+ resp);
+ else
+ dev_warn(&adev->auxdev.dev,
+ "%d:No data pin ready handler for pin id %d\n",
+ resp->stream_handle, resp->pin_id);
+ if (csi2)
+ ipu6_isys_csi2_error(csi2);
+
+ break;
+ case IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK:
+ break;
+ case IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE:
+ case IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE:
+ break;
+ case IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF:
+
+ ipu6_isys_csi2_sof_event_by_stream(stream);
+ stream->seq[stream->seq_index].sequence =
+ atomic_read(&stream->sequence) - 1;
+ stream->seq[stream->seq_index].timestamp = ts;
+ dev_dbg(&adev->auxdev.dev,
+ "sof: handle %d: (index %u), timestamp 0x%16.16llx\n",
+ resp->stream_handle,
+ stream->seq[stream->seq_index].sequence, ts);
+ stream->seq_index = (stream->seq_index + 1)
+ % IPU6_ISYS_MAX_PARALLEL_SOF;
+ break;
+ case IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF:
+ ipu6_isys_csi2_eof_event_by_stream(stream);
+ dev_dbg(&adev->auxdev.dev,
+ "eof: handle %d: (index %u), timestamp 0x%16.16llx\n",
+ resp->stream_handle,
+ stream->seq[stream->seq_index].sequence, ts);
+ break;
+ case IPU6_FW_ISYS_RESP_TYPE_STATS_DATA_READY:
+ break;
+ default:
+ dev_err(&adev->auxdev.dev, "%d:unknown response type %u\n",
+ resp->stream_handle, resp->type);
+ break;
+ }
+
+ ipu6_isys_put_stream(stream);
+leave:
+ ipu6_fw_isys_put_resp(isys->fwcom, IPU6_BASE_MSG_RECV_QUEUES);
+ return 0;
+}
+
+static const struct ipu6_auxdrv_data ipu6_isys_auxdrv_data = {
+ .isr = isys_isr,
+ .isr_threaded = NULL,
+ .wake_isr_thread = false,
+};
+
+static const struct auxiliary_device_id ipu6_isys_id_table[] = {
+ {
+ .name = "intel_ipu6.isys",
+ .driver_data = (kernel_ulong_t)&ipu6_isys_auxdrv_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(auxiliary, ipu6_isys_id_table);
+
+static struct auxiliary_driver isys_driver = {
+ .name = IPU6_ISYS_NAME,
+ .probe = isys_probe,
+ .remove = isys_remove,
+ .id_table = ipu6_isys_id_table,
+ .driver = {
+ .pm = &isys_pm_ops,
+ },
+};
+
+module_auxiliary_driver(isys_driver);
+
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
+MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
+MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
+MODULE_AUTHOR("Yunliang Ding <yunliang.ding@intel.com>");
+MODULE_AUTHOR("Hongju Wang <hongju.wang@intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel IPU6 input system driver");
+MODULE_IMPORT_NS(INTEL_IPU6);
+MODULE_IMPORT_NS(INTEL_IPU_BRIDGE);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys.h b/drivers/media/pci/intel/ipu6/ipu6-isys.h
new file mode 100644
index 000000000000..86dfc2eff5d0
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013--2024 Intel Corporation */
+
+#ifndef IPU6_ISYS_H
+#define IPU6_ISYS_H
+
+#include <linux/irqreturn.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pm_qos.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-device.h>
+
+#include "ipu6.h"
+#include "ipu6-fw-isys.h"
+#include "ipu6-isys-csi2.h"
+#include "ipu6-isys-video.h"
+
+struct ipu6_bus_device;
+
+#define IPU6_ISYS_ENTITY_PREFIX "Intel IPU6"
+/* FW support max 16 streams */
+#define IPU6_ISYS_MAX_STREAMS 16
+#define ISYS_UNISPART_IRQS (IPU6_ISYS_UNISPART_IRQ_SW | \
+ IPU6_ISYS_UNISPART_IRQ_CSI0 | \
+ IPU6_ISYS_UNISPART_IRQ_CSI1)
+
+#define IPU6_ISYS_2600_MEM_LINE_ALIGN 64
+
+/*
+ * Current message queue configuration. These must be big enough
+ * so that they never gets full. Queues are located in system memory
+ */
+#define IPU6_ISYS_SIZE_RECV_QUEUE 40
+#define IPU6_ISYS_SIZE_SEND_QUEUE 40
+#define IPU6_ISYS_SIZE_PROXY_RECV_QUEUE 5
+#define IPU6_ISYS_SIZE_PROXY_SEND_QUEUE 5
+#define IPU6_ISYS_NUM_RECV_QUEUE 1
+
+#define IPU6_ISYS_MIN_WIDTH 2U
+#define IPU6_ISYS_MIN_HEIGHT 2U
+#define IPU6_ISYS_MAX_WIDTH 4672U
+#define IPU6_ISYS_MAX_HEIGHT 3416U
+
+/* the threshold granularity is 2KB on IPU6 */
+#define IPU6_SRAM_GRANULARITY_SHIFT 11
+#define IPU6_SRAM_GRANULARITY_SIZE 2048
+/* the threshold granularity is 1KB on IPU6SE */
+#define IPU6SE_SRAM_GRANULARITY_SHIFT 10
+#define IPU6SE_SRAM_GRANULARITY_SIZE 1024
+/* IS pixel buffer is 256KB, MaxSRAMSize is 200KB on IPU6 */
+#define IPU6_MAX_SRAM_SIZE (200 << 10)
+/* IS pixel buffer is 128KB, MaxSRAMSize is 96KB on IPU6SE */
+#define IPU6SE_MAX_SRAM_SIZE (96 << 10)
+
+#define IPU6EP_LTR_VALUE 200
+#define IPU6EP_MIN_MEMOPEN_TH 0x4
+#define IPU6EP_MTL_LTR_VALUE 1023
+#define IPU6EP_MTL_MIN_MEMOPEN_TH 0xc
+
+struct ltr_did {
+ union {
+ u32 value;
+ struct {
+ u8 val0;
+ u8 val1;
+ u8 val2;
+ u8 val3;
+ } bits;
+ } lut_ltr;
+ union {
+ u32 value;
+ struct {
+ u8 th0;
+ u8 th1;
+ u8 th2;
+ u8 th3;
+ } bits;
+ } lut_fill_time;
+};
+
+struct isys_iwake_watermark {
+ bool iwake_enabled;
+ bool force_iwake_disable;
+ u32 iwake_threshold;
+ u64 isys_pixelbuffer_datarate;
+ struct ltr_did ltrdid;
+ struct mutex mutex; /* protect whole struct */
+ struct ipu6_isys *isys;
+ struct list_head video_list;
+};
+
+struct ipu6_isys_csi2_config {
+ u32 nlanes;
+ u32 port;
+};
+
+struct sensor_async_sd {
+ struct v4l2_async_connection asc;
+ struct ipu6_isys_csi2_config csi2;
+};
+
+/*
+ * struct ipu6_isys
+ *
+ * @media_dev: Media device
+ * @v4l2_dev: V4L2 device
+ * @adev: ISYS bus device
+ * @power: Is ISYS powered on or not?
+ * @isr_bits: Which bits does the ISR handle?
+ * @power_lock: Serialise access to power (power state in general)
+ * @csi2_rx_ctrl_cached: cached shared value between all CSI2 receivers
+ * @streams_lock: serialise access to streams
+ * @streams: streams per firmware stream ID
+ * @fwcom: fw communication layer private pointer
+ * or optional external library private pointer
+ * @line_align: line alignment in memory
+ * @phy_termcal_val: the termination calibration value, only used for DWC PHY
+ * @need_reset: Isys requires d0i0->i3 transition
+ * @ref_count: total number of callers fw open
+ * @mutex: serialise access isys video open/release related operations
+ * @stream_mutex: serialise stream start and stop, queueing requests
+ * @pdata: platform data pointer
+ * @csi2: CSI-2 receivers
+ */
+struct ipu6_isys {
+ struct media_device media_dev;
+ struct v4l2_device v4l2_dev;
+ struct ipu6_bus_device *adev;
+
+ int power;
+ spinlock_t power_lock;
+ u32 isr_csi2_bits;
+ u32 csi2_rx_ctrl_cached;
+ spinlock_t streams_lock;
+ struct ipu6_isys_stream streams[IPU6_ISYS_MAX_STREAMS];
+ int streams_ref_count[IPU6_ISYS_MAX_STREAMS];
+ void *fwcom;
+ unsigned int line_align;
+ u32 phy_termcal_val;
+ bool need_reset;
+ bool icache_prefetch;
+ bool csi2_cse_ipc_not_supported;
+ unsigned int ref_count;
+ unsigned int stream_opened;
+ unsigned int sensor_type;
+
+ struct mutex mutex;
+ struct mutex stream_mutex;
+
+ struct ipu6_isys_pdata *pdata;
+
+ int (*phy_set_power)(struct ipu6_isys *isys,
+ struct ipu6_isys_csi2_config *cfg,
+ const struct ipu6_isys_csi2_timing *timing,
+ bool on);
+
+ struct ipu6_isys_csi2 *csi2;
+
+ struct pm_qos_request pm_qos;
+ spinlock_t listlock; /* Protect framebuflist */
+ struct list_head framebuflist;
+ struct list_head framebuflist_fw;
+ struct v4l2_async_notifier notifier;
+ struct isys_iwake_watermark iwake_watermark;
+};
+
+struct isys_fw_msgs {
+ union {
+ u64 dummy;
+ struct ipu6_fw_isys_frame_buff_set_abi frame;
+ struct ipu6_fw_isys_stream_cfg_data_abi stream;
+ } fw_msg;
+ struct list_head head;
+ dma_addr_t dma_addr;
+};
+
+struct isys_fw_msgs *ipu6_get_fw_msg_buf(struct ipu6_isys_stream *stream);
+void ipu6_put_fw_msg_buf(struct ipu6_isys *isys, u64 data);
+void ipu6_cleanup_fw_msg_bufs(struct ipu6_isys *isys);
+
+extern const struct v4l2_ioctl_ops ipu6_isys_ioctl_ops;
+
+void isys_setup_hw(struct ipu6_isys *isys);
+irqreturn_t isys_isr(struct ipu6_bus_device *adev);
+void update_watermark_setting(struct ipu6_isys *isys);
+
+int ipu6_isys_mcd_phy_set_power(struct ipu6_isys *isys,
+ struct ipu6_isys_csi2_config *cfg,
+ const struct ipu6_isys_csi2_timing *timing,
+ bool on);
+
+int ipu6_isys_dwc_phy_set_power(struct ipu6_isys *isys,
+ struct ipu6_isys_csi2_config *cfg,
+ const struct ipu6_isys_csi2_timing *timing,
+ bool on);
+
+int ipu6_isys_jsl_phy_set_power(struct ipu6_isys *isys,
+ struct ipu6_isys_csi2_config *cfg,
+ const struct ipu6_isys_csi2_timing *timing,
+ bool on);
+#endif /* IPU6_ISYS_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu6-mmu.c b/drivers/media/pci/intel/ipu6/ipu6-mmu.c
new file mode 100644
index 000000000000..c3a20507d6db
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-mmu.c
@@ -0,0 +1,846 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013--2024 Intel Corporation
+ */
+#include <asm/barrier.h>
+
+#include <linux/align.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/bug.h>
+#include <linux/cacheflush.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/iova.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
+#include <linux/mm.h>
+#include <linux/pfn.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+
+#include "ipu6.h"
+#include "ipu6-dma.h"
+#include "ipu6-mmu.h"
+#include "ipu6-platform-regs.h"
+
+#define ISP_PAGE_SHIFT 12
+#define ISP_PAGE_SIZE BIT(ISP_PAGE_SHIFT)
+#define ISP_PAGE_MASK (~(ISP_PAGE_SIZE - 1))
+
+#define ISP_L1PT_SHIFT 22
+#define ISP_L1PT_MASK (~((1U << ISP_L1PT_SHIFT) - 1))
+
+#define ISP_L2PT_SHIFT 12
+#define ISP_L2PT_MASK (~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK))))
+
+#define ISP_L1PT_PTES 1024
+#define ISP_L2PT_PTES 1024
+
+#define ISP_PADDR_SHIFT 12
+
+#define REG_TLB_INVALIDATE 0x0000
+
+#define REG_L1_PHYS 0x0004 /* 27-bit pfn */
+#define REG_INFO 0x0008
+
+#define TBL_PHYS_ADDR(a) ((phys_addr_t)(a) << ISP_PADDR_SHIFT)
+
+static void tlb_invalidate(struct ipu6_mmu *mmu)
+{
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&mmu->ready_lock, flags);
+ if (!mmu->ready) {
+ spin_unlock_irqrestore(&mmu->ready_lock, flags);
+ return;
+ }
+
+ for (i = 0; i < mmu->nr_mmus; i++) {
+ /*
+ * To avoid the HW bug induced dead lock in some of the IPU6
+ * MMUs on successive invalidate calls, we need to first do a
+ * read to the page table base before writing the invalidate
+ * register. MMUs which need to implement this WA, will have
+ * the insert_read_before_invalidate flags set as true.
+ * Disregard the return value of the read.
+ */
+ if (mmu->mmu_hw[i].insert_read_before_invalidate)
+ readl(mmu->mmu_hw[i].base + REG_L1_PHYS);
+
+ writel(0xffffffff, mmu->mmu_hw[i].base +
+ REG_TLB_INVALIDATE);
+ /*
+ * The TLB invalidation is a "single cycle" (IOMMU clock cycles)
+ * When the actual MMIO write reaches the IPU6 TLB Invalidate
+ * register, wmb() will force the TLB invalidate out if the CPU
+ * attempts to update the IOMMU page table (or sooner).
+ */
+ wmb();
+ }
+ spin_unlock_irqrestore(&mmu->ready_lock, flags);
+}
+
+#ifdef DEBUG
+static void page_table_dump(struct ipu6_mmu_info *mmu_info)
+{
+ u32 l1_idx;
+
+ dev_dbg(mmu_info->dev, "begin IOMMU page table dump\n");
+
+ for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) {
+ u32 l2_idx;
+ u32 iova = (phys_addr_t)l1_idx << ISP_L1PT_SHIFT;
+
+ if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval)
+ continue;
+ dev_dbg(mmu_info->dev,
+ "l1 entry %u; iovas 0x%8.8x-0x%8.8x, at %pa\n",
+ l1_idx, iova, iova + ISP_PAGE_SIZE,
+ TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx]));
+
+ for (l2_idx = 0; l2_idx < ISP_L2PT_PTES; l2_idx++) {
+ u32 *l2_pt = mmu_info->l2_pts[l1_idx];
+ u32 iova2 = iova + (l2_idx << ISP_L2PT_SHIFT);
+
+ if (l2_pt[l2_idx] == mmu_info->dummy_page_pteval)
+ continue;
+
+ dev_dbg(mmu_info->dev,
+ "\tl2 entry %u; iova 0x%8.8x, phys %pa\n",
+ l2_idx, iova2,
+ TBL_PHYS_ADDR(l2_pt[l2_idx]));
+ }
+ }
+
+ dev_dbg(mmu_info->dev, "end IOMMU page table dump\n");
+}
+#endif /* DEBUG */
+
+static dma_addr_t map_single(struct ipu6_mmu_info *mmu_info, void *ptr)
+{
+ dma_addr_t dma;
+
+ dma = dma_map_single(mmu_info->dev, ptr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(mmu_info->dev, dma))
+ return 0;
+
+ return dma;
+}
+
+static int get_dummy_page(struct ipu6_mmu_info *mmu_info)
+{
+ void *pt = (void *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+ dma_addr_t dma;
+
+ if (!pt)
+ return -ENOMEM;
+
+ dev_dbg(mmu_info->dev, "dummy_page: get_zeroed_page() == %p\n", pt);
+
+ dma = map_single(mmu_info, pt);
+ if (!dma) {
+ dev_err(mmu_info->dev, "Failed to map dummy page\n");
+ goto err_free_page;
+ }
+
+ mmu_info->dummy_page = pt;
+ mmu_info->dummy_page_pteval = dma >> ISP_PAGE_SHIFT;
+
+ return 0;
+
+err_free_page:
+ free_page((unsigned long)pt);
+ return -ENOMEM;
+}
+
+static void free_dummy_page(struct ipu6_mmu_info *mmu_info)
+{
+ dma_unmap_single(mmu_info->dev,
+ TBL_PHYS_ADDR(mmu_info->dummy_page_pteval),
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)mmu_info->dummy_page);
+}
+
+static int alloc_dummy_l2_pt(struct ipu6_mmu_info *mmu_info)
+{
+ u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+ dma_addr_t dma;
+ unsigned int i;
+
+ if (!pt)
+ return -ENOMEM;
+
+ dev_dbg(mmu_info->dev, "dummy_l2: get_zeroed_page() = %p\n", pt);
+
+ dma = map_single(mmu_info, pt);
+ if (!dma) {
+ dev_err(mmu_info->dev, "Failed to map l2pt page\n");
+ goto err_free_page;
+ }
+
+ for (i = 0; i < ISP_L2PT_PTES; i++)
+ pt[i] = mmu_info->dummy_page_pteval;
+
+ mmu_info->dummy_l2_pt = pt;
+ mmu_info->dummy_l2_pteval = dma >> ISP_PAGE_SHIFT;
+
+ return 0;
+
+err_free_page:
+ free_page((unsigned long)pt);
+ return -ENOMEM;
+}
+
+static void free_dummy_l2_pt(struct ipu6_mmu_info *mmu_info)
+{
+ dma_unmap_single(mmu_info->dev,
+ TBL_PHYS_ADDR(mmu_info->dummy_l2_pteval),
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)mmu_info->dummy_l2_pt);
+}
+
+static u32 *alloc_l1_pt(struct ipu6_mmu_info *mmu_info)
+{
+ u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+ dma_addr_t dma;
+ unsigned int i;
+
+ if (!pt)
+ return NULL;
+
+ dev_dbg(mmu_info->dev, "alloc_l1: get_zeroed_page() = %p\n", pt);
+
+ for (i = 0; i < ISP_L1PT_PTES; i++)
+ pt[i] = mmu_info->dummy_l2_pteval;
+
+ dma = map_single(mmu_info, pt);
+ if (!dma) {
+ dev_err(mmu_info->dev, "Failed to map l1pt page\n");
+ goto err_free_page;
+ }
+
+ mmu_info->l1_pt_dma = dma >> ISP_PADDR_SHIFT;
+ dev_dbg(mmu_info->dev, "l1 pt %p mapped at %llx\n", pt, dma);
+
+ return pt;
+
+err_free_page:
+ free_page((unsigned long)pt);
+ return NULL;
+}
+
+static u32 *alloc_l2_pt(struct ipu6_mmu_info *mmu_info)
+{
+ u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+ unsigned int i;
+
+ if (!pt)
+ return NULL;
+
+ dev_dbg(mmu_info->dev, "alloc_l2: get_zeroed_page() = %p\n", pt);
+
+ for (i = 0; i < ISP_L1PT_PTES; i++)
+ pt[i] = mmu_info->dummy_page_pteval;
+
+ return pt;
+}
+
+static int l2_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+ phys_addr_t paddr, size_t size)
+{
+ u32 l1_idx = iova >> ISP_L1PT_SHIFT;
+ u32 iova_start = iova;
+ u32 *l2_pt, *l2_virt;
+ unsigned int l2_idx;
+ unsigned long flags;
+ dma_addr_t dma;
+ u32 l1_entry;
+
+ dev_dbg(mmu_info->dev,
+ "mapping l2 page table for l1 index %u (iova %8.8x)\n",
+ l1_idx, (u32)iova);
+
+ spin_lock_irqsave(&mmu_info->lock, flags);
+ l1_entry = mmu_info->l1_pt[l1_idx];
+ if (l1_entry == mmu_info->dummy_l2_pteval) {
+ l2_virt = mmu_info->l2_pts[l1_idx];
+ if (likely(!l2_virt)) {
+ l2_virt = alloc_l2_pt(mmu_info);
+ if (!l2_virt) {
+ spin_unlock_irqrestore(&mmu_info->lock, flags);
+ return -ENOMEM;
+ }
+ }
+
+ dma = map_single(mmu_info, l2_virt);
+ if (!dma) {
+ dev_err(mmu_info->dev, "Failed to map l2pt page\n");
+ free_page((unsigned long)l2_virt);
+ spin_unlock_irqrestore(&mmu_info->lock, flags);
+ return -EINVAL;
+ }
+
+ l1_entry = dma >> ISP_PADDR_SHIFT;
+
+ dev_dbg(mmu_info->dev, "page for l1_idx %u %p allocated\n",
+ l1_idx, l2_virt);
+ mmu_info->l1_pt[l1_idx] = l1_entry;
+ mmu_info->l2_pts[l1_idx] = l2_virt;
+ clflush_cache_range((void *)&mmu_info->l1_pt[l1_idx],
+ sizeof(mmu_info->l1_pt[l1_idx]));
+ }
+
+ l2_pt = mmu_info->l2_pts[l1_idx];
+
+ dev_dbg(mmu_info->dev, "l2_pt at %p with dma 0x%x\n", l2_pt, l1_entry);
+
+ paddr = ALIGN(paddr, ISP_PAGE_SIZE);
+
+ l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
+
+ dev_dbg(mmu_info->dev, "l2_idx %u, phys 0x%8.8x\n", l2_idx,
+ l2_pt[l2_idx]);
+ if (l2_pt[l2_idx] != mmu_info->dummy_page_pteval) {
+ spin_unlock_irqrestore(&mmu_info->lock, flags);
+ return -EINVAL;
+ }
+
+ l2_pt[l2_idx] = paddr >> ISP_PADDR_SHIFT;
+
+ clflush_cache_range((void *)&l2_pt[l2_idx], sizeof(l2_pt[l2_idx]));
+ spin_unlock_irqrestore(&mmu_info->lock, flags);
+
+ dev_dbg(mmu_info->dev, "l2 index %u mapped as 0x%8.8x\n", l2_idx,
+ l2_pt[l2_idx]);
+
+ return 0;
+}
+
+static int __ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+ phys_addr_t paddr, size_t size)
+{
+ u32 iova_start = round_down(iova, ISP_PAGE_SIZE);
+ u32 iova_end = ALIGN(iova + size, ISP_PAGE_SIZE);
+
+ dev_dbg(mmu_info->dev,
+ "mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr 0x%10.10llx\n",
+ iova_start, iova_end, size, paddr);
+
+ return l2_map(mmu_info, iova_start, paddr, size);
+}
+
+static size_t l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+ phys_addr_t dummy, size_t size)
+{
+ u32 l1_idx = iova >> ISP_L1PT_SHIFT;
+ u32 iova_start = iova;
+ unsigned int l2_idx;
+ size_t unmapped = 0;
+ unsigned long flags;
+ u32 *l2_pt;
+
+ dev_dbg(mmu_info->dev, "unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n",
+ l1_idx, iova);
+
+ spin_lock_irqsave(&mmu_info->lock, flags);
+ if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) {
+ spin_unlock_irqrestore(&mmu_info->lock, flags);
+ dev_err(mmu_info->dev,
+ "unmap iova 0x%8.8lx l1 idx %u which was not mapped\n",
+ iova, l1_idx);
+ return 0;
+ }
+
+ for (l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
+ (iova_start & ISP_L1PT_MASK) + (l2_idx << ISP_PAGE_SHIFT)
+ < iova_start + size && l2_idx < ISP_L2PT_PTES; l2_idx++) {
+ l2_pt = mmu_info->l2_pts[l1_idx];
+ dev_dbg(mmu_info->dev,
+ "unmap l2 index %u with pteval 0x%10.10llx\n",
+ l2_idx, TBL_PHYS_ADDR(l2_pt[l2_idx]));
+ l2_pt[l2_idx] = mmu_info->dummy_page_pteval;
+
+ clflush_cache_range((void *)&l2_pt[l2_idx],
+ sizeof(l2_pt[l2_idx]));
+ unmapped++;
+ }
+ spin_unlock_irqrestore(&mmu_info->lock, flags);
+
+ return unmapped << ISP_PAGE_SHIFT;
+}
+
+static size_t __ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info,
+ unsigned long iova, size_t size)
+{
+ return l2_unmap(mmu_info, iova, 0, size);
+}
+
+static int allocate_trash_buffer(struct ipu6_mmu *mmu)
+{
+ unsigned int n_pages = PHYS_PFN(PAGE_ALIGN(IPU6_MMUV2_TRASH_RANGE));
+ struct iova *iova;
+ unsigned int i;
+ dma_addr_t dma;
+ unsigned long iova_addr;
+ int ret;
+
+ /* Allocate 8MB in iova range */
+ iova = alloc_iova(&mmu->dmap->iovad, n_pages,
+ PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0);
+ if (!iova) {
+ dev_err(mmu->dev, "cannot allocate iova range for trash\n");
+ return -ENOMEM;
+ }
+
+ dma = dma_map_page(mmu->dmap->mmu_info->dev, mmu->trash_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(mmu->dmap->mmu_info->dev, dma)) {
+ dev_err(mmu->dmap->mmu_info->dev, "Failed to map trash page\n");
+ ret = -ENOMEM;
+ goto out_free_iova;
+ }
+
+ mmu->pci_trash_page = dma;
+
+ /*
+ * Map the 8MB iova address range to the same physical trash page
+ * mmu->trash_page which is already reserved at the probe
+ */
+ iova_addr = iova->pfn_lo;
+ for (i = 0; i < n_pages; i++) {
+ ret = ipu6_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr),
+ mmu->pci_trash_page, PAGE_SIZE);
+ if (ret) {
+ dev_err(mmu->dev,
+ "mapping trash buffer range failed\n");
+ goto out_unmap;
+ }
+
+ iova_addr++;
+ }
+
+ mmu->iova_trash_page = PFN_PHYS(iova->pfn_lo);
+ dev_dbg(mmu->dev, "iova trash buffer for MMUID: %d is %u\n",
+ mmu->mmid, (unsigned int)mmu->iova_trash_page);
+ return 0;
+
+out_unmap:
+ ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo),
+ PFN_PHYS(iova_size(iova)));
+ dma_unmap_page(mmu->dmap->mmu_info->dev, mmu->pci_trash_page,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+out_free_iova:
+ __free_iova(&mmu->dmap->iovad, iova);
+ return ret;
+}
+
+int ipu6_mmu_hw_init(struct ipu6_mmu *mmu)
+{
+ struct ipu6_mmu_info *mmu_info;
+ unsigned long flags;
+ unsigned int i;
+
+ mmu_info = mmu->dmap->mmu_info;
+
+ /* Initialise the each MMU HW block */
+ for (i = 0; i < mmu->nr_mmus; i++) {
+ struct ipu6_mmu_hw *mmu_hw = &mmu->mmu_hw[i];
+ unsigned int j;
+ u16 block_addr;
+
+ /* Write page table address per MMU */
+ writel((phys_addr_t)mmu_info->l1_pt_dma,
+ mmu->mmu_hw[i].base + REG_L1_PHYS);
+
+ /* Set info bits per MMU */
+ writel(mmu->mmu_hw[i].info_bits,
+ mmu->mmu_hw[i].base + REG_INFO);
+
+ /* Configure MMU TLB stream configuration for L1 */
+ for (j = 0, block_addr = 0; j < mmu_hw->nr_l1streams;
+ block_addr += mmu->mmu_hw[i].l1_block_sz[j], j++) {
+ if (block_addr > IPU6_MAX_LI_BLOCK_ADDR) {
+ dev_err(mmu->dev, "invalid L1 configuration\n");
+ return -EINVAL;
+ }
+
+ /* Write block start address for each streams */
+ writel(block_addr, mmu_hw->base +
+ mmu_hw->l1_stream_id_reg_offset + 4 * j);
+ }
+
+ /* Configure MMU TLB stream configuration for L2 */
+ for (j = 0, block_addr = 0; j < mmu_hw->nr_l2streams;
+ block_addr += mmu->mmu_hw[i].l2_block_sz[j], j++) {
+ if (block_addr > IPU6_MAX_L2_BLOCK_ADDR) {
+ dev_err(mmu->dev, "invalid L2 configuration\n");
+ return -EINVAL;
+ }
+
+ writel(block_addr, mmu_hw->base +
+ mmu_hw->l2_stream_id_reg_offset + 4 * j);
+ }
+ }
+
+ if (!mmu->trash_page) {
+ int ret;
+
+ mmu->trash_page = alloc_page(GFP_KERNEL);
+ if (!mmu->trash_page) {
+ dev_err(mmu->dev, "insufficient memory for trash buffer\n");
+ return -ENOMEM;
+ }
+
+ ret = allocate_trash_buffer(mmu);
+ if (ret) {
+ __free_page(mmu->trash_page);
+ mmu->trash_page = NULL;
+ dev_err(mmu->dev, "trash buffer allocation failed\n");
+ return ret;
+ }
+ }
+
+ spin_lock_irqsave(&mmu->ready_lock, flags);
+ mmu->ready = true;
+ spin_unlock_irqrestore(&mmu->ready_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_mmu_hw_init, INTEL_IPU6);
+
+static struct ipu6_mmu_info *ipu6_mmu_alloc(struct ipu6_device *isp)
+{
+ struct ipu6_mmu_info *mmu_info;
+ int ret;
+
+ mmu_info = kzalloc(sizeof(*mmu_info), GFP_KERNEL);
+ if (!mmu_info)
+ return NULL;
+
+ mmu_info->aperture_start = 0;
+ mmu_info->aperture_end = DMA_BIT_MASK(isp->secure_mode ?
+ IPU6_MMU_ADDR_BITS :
+ IPU6_MMU_ADDR_BITS_NON_SECURE);
+ mmu_info->pgsize_bitmap = SZ_4K;
+ mmu_info->dev = &isp->pdev->dev;
+
+ ret = get_dummy_page(mmu_info);
+ if (ret)
+ goto err_free_info;
+
+ ret = alloc_dummy_l2_pt(mmu_info);
+ if (ret)
+ goto err_free_dummy_page;
+
+ mmu_info->l2_pts = vzalloc(ISP_L2PT_PTES * sizeof(*mmu_info->l2_pts));
+ if (!mmu_info->l2_pts)
+ goto err_free_dummy_l2_pt;
+
+ /*
+ * We always map the L1 page table (a single page as well as
+ * the L2 page tables).
+ */
+ mmu_info->l1_pt = alloc_l1_pt(mmu_info);
+ if (!mmu_info->l1_pt)
+ goto err_free_l2_pts;
+
+ spin_lock_init(&mmu_info->lock);
+
+ dev_dbg(mmu_info->dev, "domain initialised\n");
+
+ return mmu_info;
+
+err_free_l2_pts:
+ vfree(mmu_info->l2_pts);
+err_free_dummy_l2_pt:
+ free_dummy_l2_pt(mmu_info);
+err_free_dummy_page:
+ free_dummy_page(mmu_info);
+err_free_info:
+ kfree(mmu_info);
+
+ return NULL;
+}
+
+void ipu6_mmu_hw_cleanup(struct ipu6_mmu *mmu)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mmu->ready_lock, flags);
+ mmu->ready = false;
+ spin_unlock_irqrestore(&mmu->ready_lock, flags);
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_mmu_hw_cleanup, INTEL_IPU6);
+
+static struct ipu6_dma_mapping *alloc_dma_mapping(struct ipu6_device *isp)
+{
+ struct ipu6_dma_mapping *dmap;
+
+ dmap = kzalloc(sizeof(*dmap), GFP_KERNEL);
+ if (!dmap)
+ return NULL;
+
+ dmap->mmu_info = ipu6_mmu_alloc(isp);
+ if (!dmap->mmu_info) {
+ kfree(dmap);
+ return NULL;
+ }
+
+ init_iova_domain(&dmap->iovad, SZ_4K, 1);
+ dmap->mmu_info->dmap = dmap;
+
+ dev_dbg(&isp->pdev->dev, "alloc mapping\n");
+
+ iova_cache_get();
+
+ return dmap;
+}
+
+phys_addr_t ipu6_mmu_iova_to_phys(struct ipu6_mmu_info *mmu_info,
+ dma_addr_t iova)
+{
+ phys_addr_t phy_addr;
+ unsigned long flags;
+ u32 *l2_pt;
+
+ spin_lock_irqsave(&mmu_info->lock, flags);
+ l2_pt = mmu_info->l2_pts[iova >> ISP_L1PT_SHIFT];
+ phy_addr = (phys_addr_t)l2_pt[(iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT];
+ phy_addr <<= ISP_PAGE_SHIFT;
+ spin_unlock_irqrestore(&mmu_info->lock, flags);
+
+ return phy_addr;
+}
+
+static size_t ipu6_mmu_pgsize(unsigned long pgsize_bitmap,
+ unsigned long addr_merge, size_t size)
+{
+ unsigned int pgsize_idx;
+ size_t pgsize;
+
+ /* Max page size that still fits into 'size' */
+ pgsize_idx = __fls(size);
+
+ if (likely(addr_merge)) {
+ /* Max page size allowed by address */
+ unsigned int align_pgsize_idx = __ffs(addr_merge);
+
+ pgsize_idx = min(pgsize_idx, align_pgsize_idx);
+ }
+
+ pgsize = (1UL << (pgsize_idx + 1)) - 1;
+ pgsize &= pgsize_bitmap;
+
+ WARN_ON(!pgsize);
+
+ /* pick the biggest page */
+ pgsize_idx = __fls(pgsize);
+ pgsize = 1UL << pgsize_idx;
+
+ return pgsize;
+}
+
+size_t ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+ size_t size)
+{
+ size_t unmapped_page, unmapped = 0;
+ unsigned int min_pagesz;
+
+ /* find out the minimum page size supported */
+ min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap);
+
+ /*
+ * The virtual address and the size of the mapping must be
+ * aligned (at least) to the size of the smallest page supported
+ * by the hardware
+ */
+ if (!IS_ALIGNED(iova | size, min_pagesz)) {
+ dev_err(NULL, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
+ iova, size, min_pagesz);
+ return -EINVAL;
+ }
+
+ /*
+ * Keep iterating until we either unmap 'size' bytes (or more)
+ * or we hit an area that isn't mapped.
+ */
+ while (unmapped < size) {
+ size_t pgsize = ipu6_mmu_pgsize(mmu_info->pgsize_bitmap,
+ iova, size - unmapped);
+
+ unmapped_page = __ipu6_mmu_unmap(mmu_info, iova, pgsize);
+ if (!unmapped_page)
+ break;
+
+ dev_dbg(mmu_info->dev, "unmapped: iova 0x%lx size 0x%zx\n",
+ iova, unmapped_page);
+
+ iova += unmapped_page;
+ unmapped += unmapped_page;
+ }
+
+ return unmapped;
+}
+
+int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+ phys_addr_t paddr, size_t size)
+{
+ unsigned long orig_iova = iova;
+ unsigned int min_pagesz;
+ size_t orig_size = size;
+ int ret = 0;
+
+ if (mmu_info->pgsize_bitmap == 0UL)
+ return -ENODEV;
+
+ /* find out the minimum page size supported */
+ min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap);
+
+ /*
+ * both the virtual address and the physical one, as well as
+ * the size of the mapping, must be aligned (at least) to the
+ * size of the smallest page supported by the hardware
+ */
+ if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
+ dev_err(mmu_info->dev,
+ "unaligned: iova %lx pa %pa size %zx min_pagesz %x\n",
+ iova, &paddr, size, min_pagesz);
+ return -EINVAL;
+ }
+
+ dev_dbg(mmu_info->dev, "map: iova 0x%lx pa %pa size 0x%zx\n",
+ iova, &paddr, size);
+
+ while (size) {
+ size_t pgsize = ipu6_mmu_pgsize(mmu_info->pgsize_bitmap,
+ iova | paddr, size);
+
+ dev_dbg(mmu_info->dev,
+ "mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
+ iova, &paddr, pgsize);
+
+ ret = __ipu6_mmu_map(mmu_info, iova, paddr, pgsize);
+ if (ret)
+ break;
+
+ iova += pgsize;
+ paddr += pgsize;
+ size -= pgsize;
+ }
+
+ /* unroll mapping in case something went wrong */
+ if (ret)
+ ipu6_mmu_unmap(mmu_info, orig_iova, orig_size - size);
+
+ return ret;
+}
+
+static void ipu6_mmu_destroy(struct ipu6_mmu *mmu)
+{
+ struct ipu6_dma_mapping *dmap = mmu->dmap;
+ struct ipu6_mmu_info *mmu_info = dmap->mmu_info;
+ struct iova *iova;
+ u32 l1_idx;
+
+ if (mmu->iova_trash_page) {
+ iova = find_iova(&dmap->iovad, PHYS_PFN(mmu->iova_trash_page));
+ if (iova) {
+ /* unmap and free the trash buffer iova */
+ ipu6_mmu_unmap(mmu_info, PFN_PHYS(iova->pfn_lo),
+ PFN_PHYS(iova_size(iova)));
+ __free_iova(&dmap->iovad, iova);
+ } else {
+ dev_err(mmu->dev, "trash buffer iova not found.\n");
+ }
+
+ mmu->iova_trash_page = 0;
+ dma_unmap_page(mmu_info->dev, mmu->pci_trash_page,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ mmu->pci_trash_page = 0;
+ __free_page(mmu->trash_page);
+ }
+
+ for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) {
+ if (mmu_info->l1_pt[l1_idx] != mmu_info->dummy_l2_pteval) {
+ dma_unmap_single(mmu_info->dev,
+ TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx]),
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)mmu_info->l2_pts[l1_idx]);
+ }
+ }
+
+ vfree(mmu_info->l2_pts);
+ free_dummy_page(mmu_info);
+ dma_unmap_single(mmu_info->dev, TBL_PHYS_ADDR(mmu_info->l1_pt_dma),
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)mmu_info->dummy_l2_pt);
+ free_page((unsigned long)mmu_info->l1_pt);
+ kfree(mmu_info);
+}
+
+struct ipu6_mmu *ipu6_mmu_init(struct device *dev,
+ void __iomem *base, int mmid,
+ const struct ipu6_hw_variants *hw)
+{
+ struct ipu6_device *isp = pci_get_drvdata(to_pci_dev(dev));
+ struct ipu6_mmu_pdata *pdata;
+ struct ipu6_mmu *mmu;
+ unsigned int i;
+
+ if (hw->nr_mmus > IPU6_MMU_MAX_DEVICES)
+ return ERR_PTR(-EINVAL);
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < hw->nr_mmus; i++) {
+ struct ipu6_mmu_hw *pdata_mmu = &pdata->mmu_hw[i];
+ const struct ipu6_mmu_hw *src_mmu = &hw->mmu_hw[i];
+
+ if (src_mmu->nr_l1streams > IPU6_MMU_MAX_TLB_L1_STREAMS ||
+ src_mmu->nr_l2streams > IPU6_MMU_MAX_TLB_L2_STREAMS)
+ return ERR_PTR(-EINVAL);
+
+ *pdata_mmu = *src_mmu;
+ pdata_mmu->base = base + src_mmu->offset;
+ }
+
+ mmu = devm_kzalloc(dev, sizeof(*mmu), GFP_KERNEL);
+ if (!mmu)
+ return ERR_PTR(-ENOMEM);
+
+ mmu->mmid = mmid;
+ mmu->mmu_hw = pdata->mmu_hw;
+ mmu->nr_mmus = hw->nr_mmus;
+ mmu->tlb_invalidate = tlb_invalidate;
+ mmu->ready = false;
+ INIT_LIST_HEAD(&mmu->vma_list);
+ spin_lock_init(&mmu->ready_lock);
+
+ mmu->dmap = alloc_dma_mapping(isp);
+ if (!mmu->dmap) {
+ dev_err(dev, "can't alloc dma mapping\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return mmu;
+}
+
+void ipu6_mmu_cleanup(struct ipu6_mmu *mmu)
+{
+ struct ipu6_dma_mapping *dmap = mmu->dmap;
+
+ ipu6_mmu_destroy(mmu);
+ mmu->dmap = NULL;
+ iova_cache_put();
+ put_iova_domain(&dmap->iovad);
+ kfree(dmap);
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-mmu.h b/drivers/media/pci/intel/ipu6/ipu6-mmu.h
new file mode 100644
index 000000000000..21cdb0f146eb
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-mmu.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013--2024 Intel Corporation */
+
+#ifndef IPU6_MMU_H
+#define IPU6_MMU_H
+
+#define ISYS_MMID 1
+#define PSYS_MMID 0
+
+#include <linux/list.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+struct device;
+struct page;
+struct ipu6_hw_variants;
+
+struct ipu6_mmu_info {
+ struct device *dev;
+
+ u32 *l1_pt;
+ u32 l1_pt_dma;
+ u32 **l2_pts;
+
+ u32 *dummy_l2_pt;
+ u32 dummy_l2_pteval;
+ void *dummy_page;
+ u32 dummy_page_pteval;
+
+ dma_addr_t aperture_start;
+ dma_addr_t aperture_end;
+ unsigned long pgsize_bitmap;
+
+ spinlock_t lock; /* Serialize access to users */
+ struct ipu6_dma_mapping *dmap;
+};
+
+struct ipu6_mmu {
+ struct list_head node;
+
+ struct ipu6_mmu_hw *mmu_hw;
+ unsigned int nr_mmus;
+ unsigned int mmid;
+
+ phys_addr_t pgtbl;
+ struct device *dev;
+
+ struct ipu6_dma_mapping *dmap;
+ struct list_head vma_list;
+
+ struct page *trash_page;
+ dma_addr_t pci_trash_page; /* IOVA from PCI DMA services (parent) */
+ dma_addr_t iova_trash_page; /* IOVA for IPU6 child nodes to use */
+
+ bool ready;
+ spinlock_t ready_lock; /* Serialize access to bool ready */
+
+ void (*tlb_invalidate)(struct ipu6_mmu *mmu);
+};
+
+struct ipu6_mmu *ipu6_mmu_init(struct device *dev,
+ void __iomem *base, int mmid,
+ const struct ipu6_hw_variants *hw);
+void ipu6_mmu_cleanup(struct ipu6_mmu *mmu);
+int ipu6_mmu_hw_init(struct ipu6_mmu *mmu);
+void ipu6_mmu_hw_cleanup(struct ipu6_mmu *mmu);
+int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+ phys_addr_t paddr, size_t size);
+size_t ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+ size_t size);
+phys_addr_t ipu6_mmu_iova_to_phys(struct ipu6_mmu_info *mmu_info,
+ dma_addr_t iova);
+#endif
diff --git a/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h b/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h
new file mode 100644
index 000000000000..20f27011df43
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023--2024 Intel Corporation */
+
+#ifndef IPU6_PLATFORM_BUTTRESS_REGS_H
+#define IPU6_PLATFORM_BUTTRESS_REGS_H
+
+#include <linux/bits.h>
+
+/* IS_WORKPOINT_REQ */
+#define IPU6_BUTTRESS_REG_IS_FREQ_CTL 0x34
+/* PS_WORKPOINT_REQ */
+#define IPU6_BUTTRESS_REG_PS_FREQ_CTL 0x38
+
+/* should be tuned for real silicon */
+#define IPU6_IS_FREQ_CTL_DEFAULT_RATIO 0x08
+#define IPU6SE_IS_FREQ_CTL_DEFAULT_RATIO 0x0a
+#define IPU6_PS_FREQ_CTL_DEFAULT_RATIO 0x0d
+
+#define IPU6_IS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO 0x10
+#define IPU6_PS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO 0x0708
+
+#define IPU6_BUTTRESS_PWR_STATE_IS_PWR_SHIFT 3
+#define IPU6_BUTTRESS_PWR_STATE_IS_PWR_MASK GENMASK(4, 3)
+
+#define IPU6_BUTTRESS_PWR_STATE_PS_PWR_SHIFT 6
+#define IPU6_BUTTRESS_PWR_STATE_PS_PWR_MASK GENMASK(7, 6)
+
+#define IPU6_BUTTRESS_PWR_STATE_DN_DONE 0x0
+#define IPU6_BUTTRESS_PWR_STATE_UP_PROCESS 0x1
+#define IPU6_BUTTRESS_PWR_STATE_DN_PROCESS 0x2
+#define IPU6_BUTTRESS_PWR_STATE_UP_DONE 0x3
+
+#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_0 0x270
+#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_1 0x274
+#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_2 0x278
+#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_3 0x27c
+#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_4 0x280
+#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_5 0x284
+#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_6 0x288
+#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_7 0x28c
+
+#define BUTTRESS_REG_WDT 0x8
+#define BUTTRESS_REG_BTRS_CTRL 0xc
+#define BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0 BIT(0)
+#define BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1 BIT(1)
+#define BUTTRESS_REG_BTRS_CTRL_REF_CLK_IND GENMASK(9, 8)
+
+#define BUTTRESS_REG_FW_RESET_CTL 0x30
+#define BUTTRESS_FW_RESET_CTL_START BIT(0)
+#define BUTTRESS_FW_RESET_CTL_DONE BIT(1)
+
+#define BUTTRESS_REG_IS_FREQ_CTL 0x34
+#define BUTTRESS_REG_PS_FREQ_CTL 0x38
+
+#define BUTTRESS_FREQ_CTL_START BIT(31)
+#define BUTTRESS_FREQ_CTL_ICCMAX_LEVEL GENMASK(19, 16)
+#define BUTTRESS_FREQ_CTL_QOS_FLOOR_MASK GENMASK(15, 8)
+#define BUTTRESS_FREQ_CTL_RATIO_MASK GENMASK(7, 0)
+
+#define BUTTRESS_REG_PWR_STATE 0x5c
+
+#define BUTTRESS_PWR_STATE_RESET 0x0
+#define BUTTRESS_PWR_STATE_PWR_ON_DONE 0x1
+#define BUTTRESS_PWR_STATE_PWR_RDY 0x3
+#define BUTTRESS_PWR_STATE_PWR_IDLE 0x4
+
+#define BUTTRESS_PWR_STATE_HH_STATUS_MASK GENMASK(12, 11)
+
+enum {
+ BUTTRESS_PWR_STATE_HH_STATE_IDLE,
+ BUTTRESS_PWR_STATE_HH_STATE_IN_PRGS,
+ BUTTRESS_PWR_STATE_HH_STATE_DONE,
+ BUTTRESS_PWR_STATE_HH_STATE_ERR,
+};
+
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_MASK GENMASK(23, 19)
+
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IDLE 0x0
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PLL_CMP 0x1
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_CLKACK 0x2
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PG_ACK 0x3
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_ASSRT_CYCLES 0x4
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_STOP_CLK_CYCLES1 0x5
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_STOP_CLK_CYCLES2 0x6
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_DEASSRT_CYCLES 0x7
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_FUSE_WR_CMP 0x8
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_BRK_POINT 0x9
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IS_RDY 0xa
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_HALT_HALTED 0xb
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_DURATION_CNT3 0xc
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_CLKACK_PD 0xd
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_PD_BRK_POINT 0xe
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PD_PG_ACK0 0xf
+
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_MASK GENMASK(28, 24)
+
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_IDLE 0x0
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_PLL_IP_RDY 0x1
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_RO_PRE_CNT_EXH 0x2
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_VGI_PWRGOOD 0x3
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_RO_POST_CNT_EXH 0x4
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WR_PLL_RATIO 0x5
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_PLL_CMP 0x6
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_CLKACK 0x7
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RST_ASSRT_CYCLES 0x8
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_STOP_CLK_CYCLES1 0x9
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_STOP_CLK_CYCLES2 0xa
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RST_DEASSRT_CYCLES 0xb
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_PU_BRK_PNT 0xc
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_FUSE_ACCPT 0xd
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_PS_PWR_UP 0xf
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_4_HALTED 0x10
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RESET_CNT3 0x11
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PD_CLKACK 0x12
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PD_OFF_IND 0x13
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_PH4 0x14
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_PLL_CMP 0x15
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_CLKACK 0x16
+
+#define BUTTRESS_REG_SECURITY_CTL 0x300
+#define BUTTRESS_REG_SKU 0x314
+#define BUTTRESS_REG_SECURITY_TOUCH 0x318
+#define BUTTRESS_REG_CAMERA_MASK 0x84
+
+#define BUTTRESS_SECURITY_CTL_FW_SECURE_MODE BIT(16)
+#define BUTTRESS_SECURITY_CTL_FW_SETUP_MASK GENMASK(4, 0)
+
+#define BUTTRESS_SECURITY_CTL_FW_SETUP_DONE BIT(0)
+#define BUTTRESS_SECURITY_CTL_AUTH_DONE BIT(1)
+#define BUTTRESS_SECURITY_CTL_AUTH_FAILED BIT(3)
+
+#define BUTTRESS_REG_FW_SOURCE_BASE_LO 0x78
+#define BUTTRESS_REG_FW_SOURCE_BASE_HI 0x7C
+#define BUTTRESS_REG_FW_SOURCE_SIZE 0x80
+
+#define BUTTRESS_REG_ISR_STATUS 0x90
+#define BUTTRESS_REG_ISR_ENABLED_STATUS 0x94
+#define BUTTRESS_REG_ISR_ENABLE 0x98
+#define BUTTRESS_REG_ISR_CLEAR 0x9C
+
+#define BUTTRESS_ISR_IS_IRQ BIT(0)
+#define BUTTRESS_ISR_PS_IRQ BIT(1)
+#define BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE BIT(2)
+#define BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH BIT(3)
+#define BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING BIT(4)
+#define BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING BIT(5)
+#define BUTTRESS_ISR_CSE_CSR_SET BIT(6)
+#define BUTTRESS_ISR_ISH_CSR_SET BIT(7)
+#define BUTTRESS_ISR_SPURIOUS_CMP BIT(8)
+#define BUTTRESS_ISR_WATCHDOG_EXPIRED BIT(9)
+#define BUTTRESS_ISR_PUNIT_2_IUNIT_IRQ BIT(10)
+#define BUTTRESS_ISR_SAI_VIOLATION BIT(11)
+#define BUTTRESS_ISR_HW_ASSERTION BIT(12)
+#define BUTTRESS_ISR_IS_CORRECTABLE_MEM_ERR BIT(13)
+#define BUTTRESS_ISR_IS_FATAL_MEM_ERR BIT(14)
+#define BUTTRESS_ISR_IS_NON_FATAL_MEM_ERR BIT(15)
+#define BUTTRESS_ISR_PS_CORRECTABLE_MEM_ERR BIT(16)
+#define BUTTRESS_ISR_PS_FATAL_MEM_ERR BIT(17)
+#define BUTTRESS_ISR_PS_NON_FATAL_MEM_ERR BIT(18)
+#define BUTTRESS_ISR_PS_FAST_THROTTLE BIT(19)
+#define BUTTRESS_ISR_UFI_ERROR BIT(20)
+
+#define BUTTRESS_REG_IU2CSEDB0 0x100
+
+#define BUTTRESS_IU2CSEDB0_BUSY BIT(31)
+#define BUTTRESS_IU2CSEDB0_IPC_CLIENT_ID_VAL 2
+
+#define BUTTRESS_REG_IU2CSEDATA0 0x104
+
+#define BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD 1
+#define BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN 2
+#define BUTTRESS_IU2CSEDATA0_IPC_AUTH_REPLACE 3
+#define BUTTRESS_IU2CSEDATA0_IPC_UPDATE_SECURE_TOUCH 16
+
+#define BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE BIT(0)
+#define BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE BIT(1)
+#define BUTTRESS_CSE2IUDATA0_IPC_AUTH_REPLACE_DONE BIT(2)
+#define BUTTRESS_CSE2IUDATA0_IPC_UPDATE_SECURE_TOUCH_DONE BIT(4)
+
+#define BUTTRESS_REG_IU2CSECSR 0x108
+
+#define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1 BIT(0)
+#define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2 BIT(1)
+#define BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE BIT(2)
+#define BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ BIT(3)
+#define BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID BIT(4)
+#define BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ BIT(5)
+
+#define BUTTRESS_REG_CSE2IUDB0 0x304
+#define BUTTRESS_REG_CSE2IUCSR 0x30C
+#define BUTTRESS_REG_CSE2IUDATA0 0x308
+
+/* 0x20 == NACK, 0xf == unknown command */
+#define BUTTRESS_CSE2IUDATA0_IPC_NACK 0xf20
+#define BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK GENMASK(15, 0)
+
+#define BUTTRESS_REG_ISH2IUCSR 0x50
+#define BUTTRESS_REG_ISH2IUDB0 0x54
+#define BUTTRESS_REG_ISH2IUDATA0 0x58
+
+#define BUTTRESS_REG_IU2ISHDB0 0x10C
+#define BUTTRESS_REG_IU2ISHDATA0 0x110
+#define BUTTRESS_REG_IU2ISHDATA1 0x114
+#define BUTTRESS_REG_IU2ISHCSR 0x118
+
+#define BUTTRESS_REG_FABRIC_CMD 0x88
+
+#define BUTTRESS_FABRIC_CMD_START_TSC_SYNC BIT(0)
+#define BUTTRESS_FABRIC_CMD_IS_DRAIN BIT(4)
+
+#define BUTTRESS_REG_TSW_CTL 0x120
+#define BUTTRESS_TSW_CTL_SOFT_RESET BIT(8)
+
+#define BUTTRESS_REG_TSC_LO 0x164
+#define BUTTRESS_REG_TSC_HI 0x168
+
+#define BUTTRESS_IRQS (BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING | \
+ BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE | \
+ BUTTRESS_ISR_IS_IRQ | BUTTRESS_ISR_PS_IRQ)
+
+#define BUTTRESS_EVENT (BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING | \
+ BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING | \
+ BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE | \
+ BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH | \
+ BUTTRESS_ISR_SAI_VIOLATION)
+#endif /* IPU6_PLATFORM_BUTTRESS_REGS_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h b/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h
new file mode 100644
index 000000000000..cc58377534dc
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023--2024 Intel Corporation */
+
+#ifndef IPU6_PLATFORM_ISYS_CSI2_REG_H
+#define IPU6_PLATFORM_ISYS_CSI2_REG_H
+
+#include <linux/bits.h>
+
+#define CSI_REG_BASE 0x220000
+#define CSI_REG_PORT_BASE(id) (CSI_REG_BASE + (id) * 0x1000)
+
+/* CSI Port Genral Purpose Registers */
+#define CSI_REG_PORT_GPREG_SRST 0x0
+#define CSI_REG_PORT_GPREG_CSI2_SLV_REG_SRST 0x4
+#define CSI_REG_PORT_GPREG_CSI2_PORT_CONTROL 0x8
+
+/*
+ * Port IRQs mapping events:
+ * IRQ0 - CSI_FE event
+ * IRQ1 - CSI_SYNC
+ * IRQ2 - S2M_SIDS0TO7
+ * IRQ3 - S2M_SIDS8TO15
+ */
+#define CSI_PORT_REG_BASE_IRQ_CSI 0x80
+#define CSI_PORT_REG_BASE_IRQ_CSI_SYNC 0xA0
+#define CSI_PORT_REG_BASE_IRQ_S2M_SIDS0TOS7 0xC0
+#define CSI_PORT_REG_BASE_IRQ_S2M_SIDS8TOS15 0xE0
+
+#define CSI_PORT_REG_BASE_IRQ_EDGE_OFFSET 0x0
+#define CSI_PORT_REG_BASE_IRQ_MASK_OFFSET 0x4
+#define CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET 0x8
+#define CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET 0xc
+#define CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET 0x10
+#define CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET 0x14
+
+#define IPU6SE_CSI_RX_ERROR_IRQ_MASK GENMASK(18, 0)
+#define IPU6_CSI_RX_ERROR_IRQ_MASK GENMASK(19, 0)
+
+#define CSI_RX_NUM_ERRORS_IN_IRQ 20
+#define CSI_RX_NUM_IRQ 32
+
+#define IPU_CSI_RX_IRQ_FS_VC(chn) (1 << ((chn) * 2))
+#define IPU_CSI_RX_IRQ_FE_VC(chn) (2 << ((chn) * 2))
+
+/* PPI2CSI */
+#define CSI_REG_PPI2CSI_ENABLE 0x200
+#define CSI_REG_PPI2CSI_CONFIG_PPI_INTF 0x204
+#define PPI_INTF_CONFIG_NOF_ENABLED_DLANES_MASK GENMASK(4, 3)
+#define CSI_REG_PPI2CSI_CONFIG_CSI_FEATURE 0x208
+
+enum CSI_PPI2CSI_CTRL {
+ CSI_PPI2CSI_DISABLE = 0,
+ CSI_PPI2CSI_ENABLE = 1,
+};
+
+/* CSI_FE */
+#define CSI_REG_CSI_FE_ENABLE 0x280
+#define CSI_REG_CSI_FE_MODE 0x284
+#define CSI_REG_CSI_FE_MUX_CTRL 0x288
+#define CSI_REG_CSI_FE_SYNC_CNTR_SEL 0x290
+
+enum CSI_FE_ENABLE_TYPE {
+ CSI_FE_DISABLE = 0,
+ CSI_FE_ENABLE = 1,
+};
+
+enum CSI_FE_MODE_TYPE {
+ CSI_FE_DPHY_MODE = 0,
+ CSI_FE_CPHY_MODE = 1,
+};
+
+enum CSI_FE_INPUT_SELECTOR {
+ CSI_SENSOR_INPUT = 0,
+ CSI_MIPIGEN_INPUT = 1,
+};
+
+enum CSI_FE_SYNC_CNTR_SEL_TYPE {
+ CSI_CNTR_SENSOR_LINE_ID = BIT(0),
+ CSI_CNTR_INT_LINE_PKT_ID = ~CSI_CNTR_SENSOR_LINE_ID,
+ CSI_CNTR_SENSOR_FRAME_ID = BIT(1),
+ CSI_CNTR_INT_FRAME_PKT_ID = ~CSI_CNTR_SENSOR_FRAME_ID,
+};
+
+/* CSI HUB General Purpose Registers */
+#define CSI_REG_HUB_GPREG_SRST (CSI_REG_BASE + 0x18000)
+#define CSI_REG_HUB_GPREG_SLV_REG_SRST (CSI_REG_BASE + 0x18004)
+
+#define CSI_REG_HUB_DRV_ACCESS_PORT(id) (CSI_REG_BASE + 0x18018 + (id) * 4)
+#define CSI_REG_HUB_FW_ACCESS_PORT_OFS 0x17000
+#define CSI_REG_HUB_FW_ACCESS_PORT_V6OFS 0x16000
+#define CSI_REG_HUB_FW_ACCESS_PORT(ofs, id) \
+ (CSI_REG_BASE + (ofs) + (id) * 4)
+
+enum CSI_PORT_CLK_GATING_SWITCH {
+ CSI_PORT_CLK_GATING_OFF = 0,
+ CSI_PORT_CLK_GATING_ON = 1,
+};
+
+#define CSI_REG_BASE_HUB_IRQ 0x18200
+
+#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE 0x238200
+#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK 0x238204
+#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS 0x238208
+#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR 0x23820c
+#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE 0x238210
+#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE 0x238214
+
+#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_EDGE 0x238220
+#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_MASK 0x238224
+#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_STATUS 0x238228
+#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_CLEAR 0x23822c
+#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_ENABLE 0x238230
+#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_LEVEL_NOT_PULSE 0x238234
+
+/* MTL IPU6V6 irq ctrl0 & ctrl1 */
+#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE 0x238700
+#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK 0x238704
+#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS 0x238708
+#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR 0x23870c
+#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE 0x238710
+#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE 0x238714
+
+#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_EDGE 0x238720
+#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_MASK 0x238724
+#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_STATUS 0x238728
+#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_CLEAR 0x23872c
+#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_ENABLE 0x238730
+#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_LEVEL_NOT_PULSE 0x238734
+
+/*
+ * 3:0 CSI_PORT.irq_out[3:0] CSI_PORT_CTRL0 IRQ outputs (4bits)
+ * [0] CSI_PORT.IRQ_CTRL0_csi
+ * [1] CSI_PORT.IRQ_CTRL1_csi_sync
+ * [2] CSI_PORT.IRQ_CTRL2_s2m_sids0to7
+ * [3] CSI_PORT.IRQ_CTRL3_s2m_sids8to15
+ */
+#define IPU6_ISYS_UNISPART_IRQ_CSI2(port) \
+ (0x3 << ((port) * IPU6_CSI_IRQ_NUM_PER_PIPE))
+
+/*
+ * ipu6se support 2 front ends, 2 port per front end, 4 ports 0..3
+ * sip0 - 0, 1
+ * sip1 - 2, 3
+ * 0 and 2 support 4 data lanes, 1 and 3 support 2 data lanes
+ * all offset are base from isys base address
+ */
+
+#define CSI2_HUB_GPREG_SIP_SRST(sip) (0x238038 + (sip) * 4)
+#define CSI2_HUB_GPREG_SIP_FB_PORT_CFG(sip) (0x238050 + (sip) * 4)
+
+#define CSI2_HUB_GPREG_DPHY_TIMER_INCR 0x238040
+#define CSI2_HUB_GPREG_HPLL_FREQ 0x238044
+#define CSI2_HUB_GPREG_IS_CLK_RATIO 0x238048
+#define CSI2_HUB_GPREG_HPLL_FREQ_ISCLK_RATE_OVERRIDE 0x23804c
+#define CSI2_HUB_GPREG_PORT_CLKGATING_DISABLE 0x238058
+#define CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL 0x23805c
+#define CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL 0x238088
+#define CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL 0x2380a4
+#define CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL 0x2380d0
+
+#define CSI2_SIP_TOP_CSI_RX_BASE(sip) (0x23805c + (sip) * 0x48)
+#define CSI2_SIP_TOP_CSI_RX_PORT_BASE_0(port) (0x23805c + ((port) / 2) * 0x48)
+#define CSI2_SIP_TOP_CSI_RX_PORT_BASE_1(port) (0x238088 + ((port) / 2) * 0x48)
+
+/* offset from port base */
+#define CSI2_SIP_TOP_CSI_RX_PORT_CONTROL 0x0
+#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_CLANE 0x4
+#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_CLANE 0x8
+#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_DLANE(lane) (0xc + (lane) * 8)
+#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_DLANE(lane) (0x10 + (lane) * 8)
+
+#endif /* IPU6_ISYS_CSI2_REG_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h b/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h
new file mode 100644
index 000000000000..b883385adb44
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2018 - 2024 Intel Corporation */
+
+#ifndef IPU6_PLATFORM_REGS_H
+#define IPU6_PLATFORM_REGS_H
+
+#include <linux/bits.h>
+
+/*
+ * IPU6 uses uniform address within IPU6, therefore all subsystem registers
+ * locates in one single space starts from 0 but in different sctions with
+ * different addresses, the subsystem offsets are defined to 0 as the
+ * register definition will have the address offset to 0.
+ */
+#define IPU6_UNIFIED_OFFSET 0
+
+#define IPU6_ISYS_IOMMU0_OFFSET 0x2e0000
+#define IPU6_ISYS_IOMMU1_OFFSET 0x2e0500
+#define IPU6_ISYS_IOMMUI_OFFSET 0x2e0a00
+
+#define IPU6_PSYS_IOMMU0_OFFSET 0x1b0000
+#define IPU6_PSYS_IOMMU1_OFFSET 0x1b0700
+#define IPU6_PSYS_IOMMU1R_OFFSET 0x1b0e00
+#define IPU6_PSYS_IOMMUI_OFFSET 0x1b1500
+
+/* the offset from IOMMU base register */
+#define IPU6_MMU_L1_STREAM_ID_REG_OFFSET 0x0c
+#define IPU6_MMU_L2_STREAM_ID_REG_OFFSET 0x4c
+#define IPU6_PSYS_MMU1W_L2_STREAM_ID_REG_OFFSET 0x8c
+
+#define IPU6_MMU_INFO_OFFSET 0x8
+
+#define IPU6_ISYS_SPC_OFFSET 0x210000
+
+#define IPU6SE_PSYS_SPC_OFFSET 0x110000
+#define IPU6_PSYS_SPC_OFFSET 0x118000
+
+#define IPU6_ISYS_DMEM_OFFSET 0x200000
+#define IPU6_PSYS_DMEM_OFFSET 0x100000
+
+#define IPU6_REG_ISYS_UNISPART_IRQ_EDGE 0x27c000
+#define IPU6_REG_ISYS_UNISPART_IRQ_MASK 0x27c004
+#define IPU6_REG_ISYS_UNISPART_IRQ_STATUS 0x27c008
+#define IPU6_REG_ISYS_UNISPART_IRQ_CLEAR 0x27c00c
+#define IPU6_REG_ISYS_UNISPART_IRQ_ENABLE 0x27c010
+#define IPU6_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE 0x27c014
+#define IPU6_REG_ISYS_UNISPART_SW_IRQ_REG 0x27c414
+#define IPU6_REG_ISYS_UNISPART_SW_IRQ_MUX_REG 0x27c418
+#define IPU6_ISYS_UNISPART_IRQ_CSI0 BIT(2)
+#define IPU6_ISYS_UNISPART_IRQ_CSI1 BIT(3)
+#define IPU6_ISYS_UNISPART_IRQ_SW BIT(22)
+
+#define IPU6_REG_ISYS_ISL_TOP_IRQ_EDGE 0x2b0200
+#define IPU6_REG_ISYS_ISL_TOP_IRQ_MASK 0x2b0204
+#define IPU6_REG_ISYS_ISL_TOP_IRQ_STATUS 0x2b0208
+#define IPU6_REG_ISYS_ISL_TOP_IRQ_CLEAR 0x2b020c
+#define IPU6_REG_ISYS_ISL_TOP_IRQ_ENABLE 0x2b0210
+#define IPU6_REG_ISYS_ISL_TOP_IRQ_LEVEL_NOT_PULSE 0x2b0214
+
+#define IPU6_REG_ISYS_CMPR_TOP_IRQ_EDGE 0x2d2100
+#define IPU6_REG_ISYS_CMPR_TOP_IRQ_MASK 0x2d2104
+#define IPU6_REG_ISYS_CMPR_TOP_IRQ_STATUS 0x2d2108
+#define IPU6_REG_ISYS_CMPR_TOP_IRQ_CLEAR 0x2d210c
+#define IPU6_REG_ISYS_CMPR_TOP_IRQ_ENABLE 0x2d2110
+#define IPU6_REG_ISYS_CMPR_TOP_IRQ_LEVEL_NOT_PULSE 0x2d2114
+
+/* CDC Burst collector thresholds for isys - 3 FIFOs i = 0..2 */
+#define IPU6_REG_ISYS_CDC_THRESHOLD(i) (0x27c400 + ((i) * 4))
+
+#define IPU6_CSI_IRQ_NUM_PER_PIPE 4
+#define IPU6SE_ISYS_CSI_PORT_NUM 4
+#define IPU6_ISYS_CSI_PORT_NUM 8
+
+#define IPU6_ISYS_CSI_PORT_IRQ(irq_num) BIT(irq_num)
+
+/* PKG DIR OFFSET in IMR in secure mode */
+#define IPU6_PKG_DIR_IMR_OFFSET 0x40
+
+#define IPU6_ISYS_REG_SPC_STATUS_CTRL 0x0
+
+#define IPU6_ISYS_SPC_STATUS_START BIT(1)
+#define IPU6_ISYS_SPC_STATUS_RUN BIT(3)
+#define IPU6_ISYS_SPC_STATUS_READY BIT(5)
+#define IPU6_ISYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE BIT(12)
+#define IPU6_ISYS_SPC_STATUS_ICACHE_PREFETCH BIT(13)
+
+#define IPU6_PSYS_REG_SPC_STATUS_CTRL 0x0
+#define IPU6_PSYS_REG_SPC_START_PC 0x4
+#define IPU6_PSYS_REG_SPC_ICACHE_BASE 0x10
+#define IPU6_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER 0x14
+
+#define IPU6_PSYS_SPC_STATUS_START BIT(1)
+#define IPU6_PSYS_SPC_STATUS_RUN BIT(3)
+#define IPU6_PSYS_SPC_STATUS_READY BIT(5)
+#define IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE BIT(12)
+#define IPU6_PSYS_SPC_STATUS_ICACHE_PREFETCH BIT(13)
+
+#define IPU6_PSYS_REG_SPP0_STATUS_CTRL 0x20000
+
+#define IPU6_INFO_ENABLE_SNOOP BIT(0)
+#define IPU6_INFO_DEC_FORCE_FLUSH BIT(1)
+#define IPU6_INFO_DEC_PASS_THROUGH BIT(2)
+#define IPU6_INFO_ZLW BIT(3)
+#define IPU6_INFO_REQUEST_DESTINATION_IOSF BIT(9)
+#define IPU6_INFO_IMR_BASE BIT(10)
+#define IPU6_INFO_IMR_DESTINED BIT(11)
+
+#define IPU6_INFO_REQUEST_DESTINATION_PRIMARY IPU6_INFO_REQUEST_DESTINATION_IOSF
+
+/*
+ * s2m_pixel_soc_pixel_remapping is dedicated for the enabling of the
+ * pixel s2m remp ability.Remap here means that s2m rearange the order
+ * of the pixels in each 4 pixels group.
+ * For examle, mirroring remping means that if input's 4 first pixels
+ * are 1 2 3 4 then in output we should see 4 3 2 1 in this 4 first pixels.
+ * 0xE4 is from s2m MAS document. It means no remapping.
+ */
+#define S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING 0xe4
+/*
+ * csi_be_soc_pixel_remapping is for the enabling of the pixel remapping.
+ * This remapping is exactly like the stream2mmio remapping.
+ */
+#define CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING 0xe4
+
+#define IPU6_REG_DMA_TOP_AB_GROUP1_BASE_ADDR 0x1ae000
+#define IPU6_REG_DMA_TOP_AB_GROUP2_BASE_ADDR 0x1af000
+#define IPU6_REG_DMA_TOP_AB_RING_MIN_OFFSET(n) (0x4 + (n) * 0xc)
+#define IPU6_REG_DMA_TOP_AB_RING_MAX_OFFSET(n) (0x8 + (n) * 0xc)
+#define IPU6_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(n) (0xc + (n) * 0xc)
+
+enum ipu6_device_ab_group1_target_id {
+ IPU6_DEVICE_AB_GROUP1_TARGET_ID_R0_SPC_DMEM,
+ IPU6_DEVICE_AB_GROUP1_TARGET_ID_R1_SPC_DMEM,
+ IPU6_DEVICE_AB_GROUP1_TARGET_ID_R2_SPC_DMEM,
+ IPU6_DEVICE_AB_GROUP1_TARGET_ID_R3_SPC_STATUS_REG,
+ IPU6_DEVICE_AB_GROUP1_TARGET_ID_R4_SPC_MASTER_BASE_ADDR,
+ IPU6_DEVICE_AB_GROUP1_TARGET_ID_R5_SPC_PC_STALL,
+ IPU6_DEVICE_AB_GROUP1_TARGET_ID_R6_SPC_EQ,
+ IPU6_DEVICE_AB_GROUP1_TARGET_ID_R7_SPC_RESERVED,
+ IPU6_DEVICE_AB_GROUP1_TARGET_ID_R8_SPC_RESERVED,
+ IPU6_DEVICE_AB_GROUP1_TARGET_ID_R9_SPP0,
+ IPU6_DEVICE_AB_GROUP1_TARGET_ID_R10_SPP1,
+ IPU6_DEVICE_AB_GROUP1_TARGET_ID_R11_CENTRAL_R1,
+ IPU6_DEVICE_AB_GROUP1_TARGET_ID_R12_IRQ,
+ IPU6_DEVICE_AB_GROUP1_TARGET_ID_R13_CENTRAL_R2,
+ IPU6_DEVICE_AB_GROUP1_TARGET_ID_R14_DMA,
+ IPU6_DEVICE_AB_GROUP1_TARGET_ID_R15_DMA,
+ IPU6_DEVICE_AB_GROUP1_TARGET_ID_R16_GP,
+ IPU6_DEVICE_AB_GROUP1_TARGET_ID_R17_ZLW_INSERTER,
+ IPU6_DEVICE_AB_GROUP1_TARGET_ID_R18_AB,
+};
+
+enum nci_ab_access_mode {
+ NCI_AB_ACCESS_MODE_RW, /* read & write */
+ NCI_AB_ACCESS_MODE_RO, /* read only */
+ NCI_AB_ACCESS_MODE_WO, /* write only */
+ NCI_AB_ACCESS_MODE_NA, /* No access at all */
+};
+
+/* IRQ-related registers in PSYS */
+#define IPU6_REG_PSYS_GPDEV_IRQ_EDGE 0x1aa200
+#define IPU6_REG_PSYS_GPDEV_IRQ_MASK 0x1aa204
+#define IPU6_REG_PSYS_GPDEV_IRQ_STATUS 0x1aa208
+#define IPU6_REG_PSYS_GPDEV_IRQ_CLEAR 0x1aa20c
+#define IPU6_REG_PSYS_GPDEV_IRQ_ENABLE 0x1aa210
+#define IPU6_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE 0x1aa214
+/* There are 8 FW interrupts, n = 0..7 */
+#define IPU6_PSYS_GPDEV_FWIRQ0 5
+#define IPU6_PSYS_GPDEV_FWIRQ1 6
+#define IPU6_PSYS_GPDEV_FWIRQ2 7
+#define IPU6_PSYS_GPDEV_FWIRQ3 8
+#define IPU6_PSYS_GPDEV_FWIRQ4 9
+#define IPU6_PSYS_GPDEV_FWIRQ5 10
+#define IPU6_PSYS_GPDEV_FWIRQ6 11
+#define IPU6_PSYS_GPDEV_FWIRQ7 12
+#define IPU6_PSYS_GPDEV_IRQ_FWIRQ(n) BIT(n)
+#define IPU6_REG_PSYS_GPDEV_FWIRQ(n) (4 * (n) + 0x1aa100)
+
+#endif /* IPU6_PLATFORM_REGS_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu6.c b/drivers/media/pci/intel/ipu6/ipu6.c
new file mode 100644
index 000000000000..d2bebd208461
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6.c
@@ -0,0 +1,856 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013--2024 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/pci-ats.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <media/ipu-bridge.h>
+#include <media/ipu6-pci-table.h>
+
+#include "ipu6.h"
+#include "ipu6-bus.h"
+#include "ipu6-buttress.h"
+#include "ipu6-cpd.h"
+#include "ipu6-isys.h"
+#include "ipu6-mmu.h"
+#include "ipu6-platform-buttress-regs.h"
+#include "ipu6-platform-isys-csi2-reg.h"
+#include "ipu6-platform-regs.h"
+
+#define IPU6_PCI_BAR 0
+
+struct ipu6_cell_program {
+ u32 magic_number;
+
+ u32 blob_offset;
+ u32 blob_size;
+
+ u32 start[3];
+
+ u32 icache_source;
+ u32 icache_target;
+ u32 icache_size;
+
+ u32 pmem_source;
+ u32 pmem_target;
+ u32 pmem_size;
+
+ u32 data_source;
+ u32 data_target;
+ u32 data_size;
+
+ u32 bss_target;
+ u32 bss_size;
+
+ u32 cell_id;
+ u32 regs_addr;
+
+ u32 cell_pmem_data_bus_address;
+ u32 cell_dmem_data_bus_address;
+ u32 cell_pmem_control_bus_address;
+ u32 cell_dmem_control_bus_address;
+
+ u32 next;
+ u32 dummy[2];
+};
+
+static struct ipu6_isys_internal_pdata isys_ipdata = {
+ .hw_variant = {
+ .offset = IPU6_UNIFIED_OFFSET,
+ .nr_mmus = 3,
+ .mmu_hw = {
+ {
+ .offset = IPU6_ISYS_IOMMU0_OFFSET,
+ .info_bits = IPU6_INFO_REQUEST_DESTINATION_IOSF,
+ .nr_l1streams = 16,
+ .l1_block_sz = {
+ 3, 8, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 1, 1, 1
+ },
+ .nr_l2streams = 16,
+ .l2_block_sz = {
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2
+ },
+ .insert_read_before_invalidate = false,
+ .l1_stream_id_reg_offset =
+ IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
+ .l2_stream_id_reg_offset =
+ IPU6_MMU_L2_STREAM_ID_REG_OFFSET,
+ },
+ {
+ .offset = IPU6_ISYS_IOMMU1_OFFSET,
+ .info_bits = 0,
+ .nr_l1streams = 16,
+ .l1_block_sz = {
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 1, 1, 4
+ },
+ .nr_l2streams = 16,
+ .l2_block_sz = {
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2
+ },
+ .insert_read_before_invalidate = false,
+ .l1_stream_id_reg_offset =
+ IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
+ .l2_stream_id_reg_offset =
+ IPU6_MMU_L2_STREAM_ID_REG_OFFSET,
+ },
+ {
+ .offset = IPU6_ISYS_IOMMUI_OFFSET,
+ .info_bits = 0,
+ .nr_l1streams = 0,
+ .nr_l2streams = 0,
+ .insert_read_before_invalidate = false,
+ },
+ },
+ .cdc_fifos = 3,
+ .cdc_fifo_threshold = {6, 8, 2},
+ .dmem_offset = IPU6_ISYS_DMEM_OFFSET,
+ .spc_offset = IPU6_ISYS_SPC_OFFSET,
+ },
+ .isys_dma_overshoot = IPU6_ISYS_OVERALLOC_MIN,
+};
+
+static struct ipu6_psys_internal_pdata psys_ipdata = {
+ .hw_variant = {
+ .offset = IPU6_UNIFIED_OFFSET,
+ .nr_mmus = 4,
+ .mmu_hw = {
+ {
+ .offset = IPU6_PSYS_IOMMU0_OFFSET,
+ .info_bits =
+ IPU6_INFO_REQUEST_DESTINATION_IOSF,
+ .nr_l1streams = 16,
+ .l1_block_sz = {
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2
+ },
+ .nr_l2streams = 16,
+ .l2_block_sz = {
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2
+ },
+ .insert_read_before_invalidate = false,
+ .l1_stream_id_reg_offset =
+ IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
+ .l2_stream_id_reg_offset =
+ IPU6_MMU_L2_STREAM_ID_REG_OFFSET,
+ },
+ {
+ .offset = IPU6_PSYS_IOMMU1_OFFSET,
+ .info_bits = 0,
+ .nr_l1streams = 32,
+ .l1_block_sz = {
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 10,
+ 5, 4, 14, 6, 4, 14, 6, 4, 8,
+ 4, 2, 1, 1, 1, 1, 14
+ },
+ .nr_l2streams = 32,
+ .l2_block_sz = {
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2
+ },
+ .insert_read_before_invalidate = false,
+ .l1_stream_id_reg_offset =
+ IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
+ .l2_stream_id_reg_offset =
+ IPU6_PSYS_MMU1W_L2_STREAM_ID_REG_OFFSET,
+ },
+ {
+ .offset = IPU6_PSYS_IOMMU1R_OFFSET,
+ .info_bits = 0,
+ .nr_l1streams = 16,
+ .l1_block_sz = {
+ 1, 4, 4, 4, 4, 16, 8, 4, 32,
+ 16, 16, 2, 2, 2, 1, 12
+ },
+ .nr_l2streams = 16,
+ .l2_block_sz = {
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2
+ },
+ .insert_read_before_invalidate = false,
+ .l1_stream_id_reg_offset =
+ IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
+ .l2_stream_id_reg_offset =
+ IPU6_MMU_L2_STREAM_ID_REG_OFFSET,
+ },
+ {
+ .offset = IPU6_PSYS_IOMMUI_OFFSET,
+ .info_bits = 0,
+ .nr_l1streams = 0,
+ .nr_l2streams = 0,
+ .insert_read_before_invalidate = false,
+ },
+ },
+ .dmem_offset = IPU6_PSYS_DMEM_OFFSET,
+ },
+};
+
+static const struct ipu6_buttress_ctrl isys_buttress_ctrl = {
+ .ratio = IPU6_IS_FREQ_CTL_DEFAULT_RATIO,
+ .qos_floor = IPU6_IS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO,
+ .freq_ctl = IPU6_BUTTRESS_REG_IS_FREQ_CTL,
+ .pwr_sts_shift = IPU6_BUTTRESS_PWR_STATE_IS_PWR_SHIFT,
+ .pwr_sts_mask = IPU6_BUTTRESS_PWR_STATE_IS_PWR_MASK,
+ .pwr_sts_on = IPU6_BUTTRESS_PWR_STATE_UP_DONE,
+ .pwr_sts_off = IPU6_BUTTRESS_PWR_STATE_DN_DONE,
+};
+
+static const struct ipu6_buttress_ctrl psys_buttress_ctrl = {
+ .ratio = IPU6_PS_FREQ_CTL_DEFAULT_RATIO,
+ .qos_floor = IPU6_PS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO,
+ .freq_ctl = IPU6_BUTTRESS_REG_PS_FREQ_CTL,
+ .pwr_sts_shift = IPU6_BUTTRESS_PWR_STATE_PS_PWR_SHIFT,
+ .pwr_sts_mask = IPU6_BUTTRESS_PWR_STATE_PS_PWR_MASK,
+ .pwr_sts_on = IPU6_BUTTRESS_PWR_STATE_UP_DONE,
+ .pwr_sts_off = IPU6_BUTTRESS_PWR_STATE_DN_DONE,
+};
+
+static void
+ipu6_pkg_dir_configure_spc(struct ipu6_device *isp,
+ const struct ipu6_hw_variants *hw_variant,
+ int pkg_dir_idx, void __iomem *base,
+ u64 *pkg_dir, dma_addr_t pkg_dir_vied_address)
+{
+ struct ipu6_cell_program *prog;
+ void __iomem *spc_base;
+ u32 server_fw_addr;
+ dma_addr_t dma_addr;
+ u32 pg_offset;
+
+ server_fw_addr = lower_32_bits(*(pkg_dir + (pkg_dir_idx + 1) * 2));
+ if (pkg_dir_idx == IPU6_CPD_PKG_DIR_ISYS_SERVER_IDX)
+ dma_addr = sg_dma_address(isp->isys->fw_sgt.sgl);
+ else
+ dma_addr = sg_dma_address(isp->psys->fw_sgt.sgl);
+
+ pg_offset = server_fw_addr - dma_addr;
+ prog = (struct ipu6_cell_program *)((u64)isp->cpd_fw->data + pg_offset);
+ spc_base = base + prog->regs_addr;
+ if (spc_base != (base + hw_variant->spc_offset))
+ dev_warn(&isp->pdev->dev,
+ "SPC reg addr %p not matching value from CPD %p\n",
+ base + hw_variant->spc_offset, spc_base);
+ writel(server_fw_addr + prog->blob_offset +
+ prog->icache_source, spc_base + IPU6_PSYS_REG_SPC_ICACHE_BASE);
+ writel(IPU6_INFO_REQUEST_DESTINATION_IOSF,
+ spc_base + IPU6_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER);
+ writel(prog->start[1], spc_base + IPU6_PSYS_REG_SPC_START_PC);
+ writel(pkg_dir_vied_address, base + hw_variant->dmem_offset);
+}
+
+void ipu6_configure_spc(struct ipu6_device *isp,
+ const struct ipu6_hw_variants *hw_variant,
+ int pkg_dir_idx, void __iomem *base, u64 *pkg_dir,
+ dma_addr_t pkg_dir_dma_addr)
+{
+ void __iomem *dmem_base = base + hw_variant->dmem_offset;
+ void __iomem *spc_regs_base = base + hw_variant->spc_offset;
+ u32 val;
+
+ val = readl(spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL);
+ val |= IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE;
+ writel(val, spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL);
+
+ if (isp->secure_mode)
+ writel(IPU6_PKG_DIR_IMR_OFFSET, dmem_base);
+ else
+ ipu6_pkg_dir_configure_spc(isp, hw_variant, pkg_dir_idx, base,
+ pkg_dir, pkg_dir_dma_addr);
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_configure_spc, INTEL_IPU6);
+
+#define IPU6_ISYS_CSI2_NPORTS 4
+#define IPU6SE_ISYS_CSI2_NPORTS 4
+#define IPU6_TGL_ISYS_CSI2_NPORTS 8
+#define IPU6EP_MTL_ISYS_CSI2_NPORTS 4
+
+static void ipu6_internal_pdata_init(struct ipu6_device *isp)
+{
+ u8 hw_ver = isp->hw_ver;
+
+ isys_ipdata.num_parallel_streams = IPU6_ISYS_NUM_STREAMS;
+ isys_ipdata.sram_gran_shift = IPU6_SRAM_GRANULARITY_SHIFT;
+ isys_ipdata.sram_gran_size = IPU6_SRAM_GRANULARITY_SIZE;
+ isys_ipdata.max_sram_size = IPU6_MAX_SRAM_SIZE;
+ isys_ipdata.sensor_type_start = IPU6_FW_ISYS_SENSOR_TYPE_START;
+ isys_ipdata.sensor_type_end = IPU6_FW_ISYS_SENSOR_TYPE_END;
+ isys_ipdata.max_streams = IPU6_ISYS_NUM_STREAMS;
+ isys_ipdata.max_send_queues = IPU6_N_MAX_SEND_QUEUES;
+ isys_ipdata.max_sram_blocks = IPU6_NOF_SRAM_BLOCKS_MAX;
+ isys_ipdata.max_devq_size = IPU6_DEV_SEND_QUEUE_SIZE;
+ isys_ipdata.csi2.nports = IPU6_ISYS_CSI2_NPORTS;
+ isys_ipdata.csi2.irq_mask = IPU6_CSI_RX_ERROR_IRQ_MASK;
+ isys_ipdata.csi2.ctrl0_irq_edge = IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE;
+ isys_ipdata.csi2.ctrl0_irq_clear =
+ IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR;
+ isys_ipdata.csi2.ctrl0_irq_mask = IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK;
+ isys_ipdata.csi2.ctrl0_irq_enable =
+ IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE;
+ isys_ipdata.csi2.ctrl0_irq_status =
+ IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS;
+ isys_ipdata.csi2.ctrl0_irq_lnp =
+ IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE;
+ isys_ipdata.enhanced_iwake = is_ipu6ep_mtl(hw_ver) || is_ipu6ep(hw_ver);
+ psys_ipdata.hw_variant.spc_offset = IPU6_PSYS_SPC_OFFSET;
+ isys_ipdata.csi2.fw_access_port_ofs = CSI_REG_HUB_FW_ACCESS_PORT_OFS;
+
+ if (is_ipu6ep(hw_ver)) {
+ isys_ipdata.ltr = IPU6EP_LTR_VALUE;
+ isys_ipdata.memopen_threshold = IPU6EP_MIN_MEMOPEN_TH;
+ }
+
+ if (is_ipu6_tgl(hw_ver))
+ isys_ipdata.csi2.nports = IPU6_TGL_ISYS_CSI2_NPORTS;
+
+ if (is_ipu6ep_mtl(hw_ver)) {
+ isys_ipdata.csi2.nports = IPU6EP_MTL_ISYS_CSI2_NPORTS;
+
+ isys_ipdata.csi2.ctrl0_irq_edge =
+ IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE;
+ isys_ipdata.csi2.ctrl0_irq_clear =
+ IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR;
+ isys_ipdata.csi2.ctrl0_irq_mask =
+ IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK;
+ isys_ipdata.csi2.ctrl0_irq_enable =
+ IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE;
+ isys_ipdata.csi2.ctrl0_irq_lnp =
+ IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE;
+ isys_ipdata.csi2.ctrl0_irq_status =
+ IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS;
+ isys_ipdata.csi2.fw_access_port_ofs =
+ CSI_REG_HUB_FW_ACCESS_PORT_V6OFS;
+ isys_ipdata.ltr = IPU6EP_MTL_LTR_VALUE;
+ isys_ipdata.memopen_threshold = IPU6EP_MTL_MIN_MEMOPEN_TH;
+ }
+
+ if (is_ipu6se(hw_ver)) {
+ isys_ipdata.csi2.nports = IPU6SE_ISYS_CSI2_NPORTS;
+ isys_ipdata.csi2.irq_mask = IPU6SE_CSI_RX_ERROR_IRQ_MASK;
+ isys_ipdata.num_parallel_streams = IPU6SE_ISYS_NUM_STREAMS;
+ isys_ipdata.sram_gran_shift = IPU6SE_SRAM_GRANULARITY_SHIFT;
+ isys_ipdata.sram_gran_size = IPU6SE_SRAM_GRANULARITY_SIZE;
+ isys_ipdata.max_sram_size = IPU6SE_MAX_SRAM_SIZE;
+ isys_ipdata.sensor_type_start =
+ IPU6SE_FW_ISYS_SENSOR_TYPE_START;
+ isys_ipdata.sensor_type_end = IPU6SE_FW_ISYS_SENSOR_TYPE_END;
+ isys_ipdata.max_streams = IPU6SE_ISYS_NUM_STREAMS;
+ isys_ipdata.max_send_queues = IPU6SE_N_MAX_SEND_QUEUES;
+ isys_ipdata.max_sram_blocks = IPU6SE_NOF_SRAM_BLOCKS_MAX;
+ isys_ipdata.max_devq_size = IPU6SE_DEV_SEND_QUEUE_SIZE;
+ psys_ipdata.hw_variant.spc_offset = IPU6SE_PSYS_SPC_OFFSET;
+ }
+}
+
+static struct ipu6_bus_device *
+ipu6_isys_init(struct pci_dev *pdev, struct device *parent,
+ struct ipu6_buttress_ctrl *ctrl, void __iomem *base,
+ const struct ipu6_isys_internal_pdata *ipdata)
+{
+ struct device *dev = &pdev->dev;
+ struct ipu6_bus_device *isys_adev;
+ struct ipu6_isys_pdata *pdata;
+ int ret;
+
+ ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb);
+ if (ret) {
+ dev_err_probe(dev, ret, "IPU6 bridge init failed\n");
+ return ERR_PTR(ret);
+ }
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->base = base;
+ pdata->ipdata = ipdata;
+
+ isys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl,
+ IPU6_ISYS_NAME);
+ if (IS_ERR(isys_adev)) {
+ dev_err_probe(dev, PTR_ERR(isys_adev),
+ "ipu6_bus_initialize_device isys failed\n");
+ kfree(pdata);
+ return ERR_CAST(isys_adev);
+ }
+
+ isys_adev->mmu = ipu6_mmu_init(dev, base, ISYS_MMID,
+ &ipdata->hw_variant);
+ if (IS_ERR(isys_adev->mmu)) {
+ dev_err_probe(dev, PTR_ERR(isys_adev->mmu),
+ "ipu6_mmu_init(isys_adev->mmu) failed\n");
+ put_device(&isys_adev->auxdev.dev);
+ kfree(pdata);
+ return ERR_CAST(isys_adev->mmu);
+ }
+
+ isys_adev->mmu->dev = &isys_adev->auxdev.dev;
+
+ ret = ipu6_bus_add_device(isys_adev);
+ if (ret) {
+ kfree(pdata);
+ return ERR_PTR(ret);
+ }
+
+ return isys_adev;
+}
+
+static struct ipu6_bus_device *
+ipu6_psys_init(struct pci_dev *pdev, struct device *parent,
+ struct ipu6_buttress_ctrl *ctrl, void __iomem *base,
+ const struct ipu6_psys_internal_pdata *ipdata)
+{
+ struct ipu6_bus_device *psys_adev;
+ struct ipu6_psys_pdata *pdata;
+ int ret;
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->base = base;
+ pdata->ipdata = ipdata;
+
+ psys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl,
+ IPU6_PSYS_NAME);
+ if (IS_ERR(psys_adev)) {
+ dev_err_probe(&pdev->dev, PTR_ERR(psys_adev),
+ "ipu6_bus_initialize_device psys failed\n");
+ kfree(pdata);
+ return ERR_CAST(psys_adev);
+ }
+
+ psys_adev->mmu = ipu6_mmu_init(&pdev->dev, base, PSYS_MMID,
+ &ipdata->hw_variant);
+ if (IS_ERR(psys_adev->mmu)) {
+ dev_err_probe(&pdev->dev, PTR_ERR(psys_adev->mmu),
+ "ipu6_mmu_init(psys_adev->mmu) failed\n");
+ put_device(&psys_adev->auxdev.dev);
+ kfree(pdata);
+ return ERR_CAST(psys_adev->mmu);
+ }
+
+ psys_adev->mmu->dev = &psys_adev->auxdev.dev;
+
+ ret = ipu6_bus_add_device(psys_adev);
+ if (ret) {
+ kfree(pdata);
+ return ERR_PTR(ret);
+ }
+
+ return psys_adev;
+}
+
+static int ipu6_pci_config_setup(struct pci_dev *dev, u8 hw_ver)
+{
+ int ret;
+
+ /* disable IPU6 PCI ATS on mtl ES2 */
+ if (is_ipu6ep_mtl(hw_ver) && boot_cpu_data.x86_stepping == 0x2 &&
+ pci_ats_supported(dev))
+ pci_disable_ats(dev);
+
+ /* No PCI msi capability for IPU6EP */
+ if (is_ipu6ep(hw_ver) || is_ipu6ep_mtl(hw_ver)) {
+ /* likely do nothing as msi not enabled by default */
+ pci_disable_msi(dev);
+ return 0;
+ }
+
+ ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_MSI);
+ if (ret < 0)
+ return dev_err_probe(&dev->dev, ret, "Request msi failed");
+
+ return 0;
+}
+
+static void ipu6_configure_vc_mechanism(struct ipu6_device *isp)
+{
+ u32 val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL);
+
+ if (IPU6_BTRS_ARB_STALL_MODE_VC0 == IPU6_BTRS_ARB_MODE_TYPE_STALL)
+ val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0;
+ else
+ val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0;
+
+ if (IPU6_BTRS_ARB_STALL_MODE_VC1 == IPU6_BTRS_ARB_MODE_TYPE_STALL)
+ val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1;
+ else
+ val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1;
+
+ writel(val, isp->base + BUTTRESS_REG_BTRS_CTRL);
+}
+
+static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct ipu6_buttress_ctrl *isys_ctrl = NULL, *psys_ctrl = NULL;
+ struct device *dev = &pdev->dev;
+ void __iomem *isys_base = NULL;
+ void __iomem *psys_base = NULL;
+ struct ipu6_device *isp;
+ phys_addr_t phys;
+ u32 val, version, sku_id;
+ int ret;
+
+ isp = devm_kzalloc(dev, sizeof(*isp), GFP_KERNEL);
+ if (!isp)
+ return -ENOMEM;
+
+ isp->pdev = pdev;
+ INIT_LIST_HEAD(&isp->devices);
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Enable PCI device failed\n");
+
+ phys = pci_resource_start(pdev, IPU6_PCI_BAR);
+ dev_dbg(dev, "IPU6 PCI bar[%u] = %pa\n", IPU6_PCI_BAR, &phys);
+
+ ret = pcim_iomap_regions(pdev, 1 << IPU6_PCI_BAR, pci_name(pdev));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to I/O mem remapping\n");
+
+ isp->base = pcim_iomap_table(pdev)[IPU6_PCI_BAR];
+ pci_set_drvdata(pdev, isp);
+ pci_set_master(pdev);
+
+ isp->cpd_metadata_cmpnt_size = sizeof(struct ipu6_cpd_metadata_cmpnt);
+ switch (id->device) {
+ case PCI_DEVICE_ID_INTEL_IPU6:
+ isp->hw_ver = IPU6_VER_6;
+ isp->cpd_fw_name = IPU6_FIRMWARE_NAME;
+ break;
+ case PCI_DEVICE_ID_INTEL_IPU6SE:
+ isp->hw_ver = IPU6_VER_6SE;
+ isp->cpd_fw_name = IPU6SE_FIRMWARE_NAME;
+ isp->cpd_metadata_cmpnt_size =
+ sizeof(struct ipu6se_cpd_metadata_cmpnt);
+ break;
+ case PCI_DEVICE_ID_INTEL_IPU6EP_ADLP:
+ case PCI_DEVICE_ID_INTEL_IPU6EP_RPLP:
+ isp->hw_ver = IPU6_VER_6EP;
+ isp->cpd_fw_name = IPU6EP_FIRMWARE_NAME;
+ break;
+ case PCI_DEVICE_ID_INTEL_IPU6EP_ADLN:
+ isp->hw_ver = IPU6_VER_6EP;
+ isp->cpd_fw_name = IPU6EPADLN_FIRMWARE_NAME;
+ break;
+ case PCI_DEVICE_ID_INTEL_IPU6EP_MTL:
+ isp->hw_ver = IPU6_VER_6EP_MTL;
+ isp->cpd_fw_name = IPU6EPMTL_FIRMWARE_NAME;
+ break;
+ default:
+ return dev_err_probe(dev, -ENODEV,
+ "Unsupported IPU6 device %x\n",
+ id->device);
+ }
+
+ ipu6_internal_pdata_init(isp);
+
+ isys_base = isp->base + isys_ipdata.hw_variant.offset;
+ psys_base = isp->base + psys_ipdata.hw_variant.offset;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set DMA mask\n");
+
+ ret = dma_set_max_seg_size(dev, UINT_MAX);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set max_seg_size\n");
+
+ ret = ipu6_pci_config_setup(pdev, isp->hw_ver);
+ if (ret)
+ return ret;
+
+ ret = ipu6_buttress_init(isp);
+ if (ret)
+ return ret;
+
+ ret = request_firmware(&isp->cpd_fw, isp->cpd_fw_name, dev);
+ if (ret) {
+ dev_err_probe(&isp->pdev->dev, ret,
+ "Requesting signed firmware %s failed\n",
+ isp->cpd_fw_name);
+ goto buttress_exit;
+ }
+
+ ret = ipu6_cpd_validate_cpd_file(isp, isp->cpd_fw->data,
+ isp->cpd_fw->size);
+ if (ret) {
+ dev_err_probe(&isp->pdev->dev, ret,
+ "Failed to validate cpd\n");
+ goto out_ipu6_bus_del_devices;
+ }
+
+ isys_ctrl = devm_kmemdup(dev, &isys_buttress_ctrl,
+ sizeof(isys_buttress_ctrl), GFP_KERNEL);
+ if (!isys_ctrl) {
+ ret = -ENOMEM;
+ goto out_ipu6_bus_del_devices;
+ }
+
+ isp->isys = ipu6_isys_init(pdev, dev, isys_ctrl, isys_base,
+ &isys_ipdata);
+ if (IS_ERR(isp->isys)) {
+ ret = PTR_ERR(isp->isys);
+ goto out_ipu6_bus_del_devices;
+ }
+
+ psys_ctrl = devm_kmemdup(dev, &psys_buttress_ctrl,
+ sizeof(psys_buttress_ctrl), GFP_KERNEL);
+ if (!psys_ctrl) {
+ ret = -ENOMEM;
+ goto out_ipu6_bus_del_devices;
+ }
+
+ isp->psys = ipu6_psys_init(pdev, &isp->isys->auxdev.dev, psys_ctrl,
+ psys_base, &psys_ipdata);
+ if (IS_ERR(isp->psys)) {
+ ret = PTR_ERR(isp->psys);
+ goto out_ipu6_bus_del_devices;
+ }
+
+ ret = pm_runtime_resume_and_get(&isp->psys->auxdev.dev);
+ if (ret < 0)
+ goto out_ipu6_bus_del_devices;
+
+ ret = ipu6_mmu_hw_init(isp->psys->mmu);
+ if (ret) {
+ dev_err_probe(&isp->pdev->dev, ret,
+ "Failed to set MMU hardware\n");
+ goto out_ipu6_bus_del_devices;
+ }
+
+ ret = ipu6_buttress_map_fw_image(isp->psys, isp->cpd_fw,
+ &isp->psys->fw_sgt);
+ if (ret) {
+ dev_err_probe(&isp->pdev->dev, ret, "failed to map fw image\n");
+ goto out_ipu6_bus_del_devices;
+ }
+
+ ret = ipu6_cpd_create_pkg_dir(isp->psys, isp->cpd_fw->data);
+ if (ret) {
+ dev_err_probe(&isp->pdev->dev, ret,
+ "failed to create pkg dir\n");
+ goto out_ipu6_bus_del_devices;
+ }
+
+ ret = devm_request_threaded_irq(dev, pdev->irq, ipu6_buttress_isr,
+ ipu6_buttress_isr_threaded,
+ IRQF_SHARED, IPU6_NAME, isp);
+ if (ret) {
+ dev_err_probe(dev, ret, "Requesting irq failed\n");
+ goto out_ipu6_bus_del_devices;
+ }
+
+ ret = ipu6_buttress_authenticate(isp);
+ if (ret) {
+ dev_err_probe(&isp->pdev->dev, ret,
+ "FW authentication failed\n");
+ goto out_free_irq;
+ }
+
+ ipu6_mmu_hw_cleanup(isp->psys->mmu);
+ pm_runtime_put(&isp->psys->auxdev.dev);
+
+ /* Configure the arbitration mechanisms for VC requests */
+ ipu6_configure_vc_mechanism(isp);
+
+ val = readl(isp->base + BUTTRESS_REG_SKU);
+ sku_id = FIELD_GET(GENMASK(6, 4), val);
+ version = FIELD_GET(GENMASK(3, 0), val);
+ dev_info(dev, "IPU%u-v%u[%x] hardware version %d\n", version, sku_id,
+ pdev->device, isp->hw_ver);
+
+ pm_runtime_put_noidle(dev);
+ pm_runtime_allow(dev);
+
+ isp->bus_ready_to_probe = true;
+
+ return 0;
+
+out_free_irq:
+ devm_free_irq(dev, pdev->irq, isp);
+out_ipu6_bus_del_devices:
+ if (isp->psys) {
+ ipu6_cpd_free_pkg_dir(isp->psys);
+ ipu6_buttress_unmap_fw_image(isp->psys, &isp->psys->fw_sgt);
+ }
+ if (!IS_ERR_OR_NULL(isp->psys) && !IS_ERR_OR_NULL(isp->psys->mmu))
+ ipu6_mmu_cleanup(isp->psys->mmu);
+ if (!IS_ERR_OR_NULL(isp->isys) && !IS_ERR_OR_NULL(isp->isys->mmu))
+ ipu6_mmu_cleanup(isp->isys->mmu);
+ ipu6_bus_del_devices(pdev);
+ release_firmware(isp->cpd_fw);
+buttress_exit:
+ ipu6_buttress_exit(isp);
+
+ return ret;
+}
+
+static void ipu6_pci_remove(struct pci_dev *pdev)
+{
+ struct ipu6_device *isp = pci_get_drvdata(pdev);
+ struct ipu6_mmu *isys_mmu = isp->isys->mmu;
+ struct ipu6_mmu *psys_mmu = isp->psys->mmu;
+
+ devm_free_irq(&pdev->dev, pdev->irq, isp);
+ ipu6_cpd_free_pkg_dir(isp->psys);
+
+ ipu6_buttress_unmap_fw_image(isp->psys, &isp->psys->fw_sgt);
+ ipu6_buttress_exit(isp);
+
+ ipu6_bus_del_devices(pdev);
+
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ release_firmware(isp->cpd_fw);
+
+ ipu6_mmu_cleanup(psys_mmu);
+ ipu6_mmu_cleanup(isys_mmu);
+}
+
+static void ipu6_pci_reset_prepare(struct pci_dev *pdev)
+{
+ struct ipu6_device *isp = pci_get_drvdata(pdev);
+
+ pm_runtime_forbid(&isp->pdev->dev);
+}
+
+static void ipu6_pci_reset_done(struct pci_dev *pdev)
+{
+ struct ipu6_device *isp = pci_get_drvdata(pdev);
+
+ ipu6_buttress_restore(isp);
+ if (isp->secure_mode)
+ ipu6_buttress_reset_authentication(isp);
+
+ isp->need_ipc_reset = true;
+ pm_runtime_allow(&isp->pdev->dev);
+}
+
+/*
+ * PCI base driver code requires driver to provide these to enable
+ * PCI device level PM state transitions (D0<->D3)
+ */
+static int ipu6_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int ipu6_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ipu6_device *isp = pci_get_drvdata(pdev);
+ struct ipu6_buttress *b = &isp->buttress;
+ int ret;
+
+ /* Configure the arbitration mechanisms for VC requests */
+ ipu6_configure_vc_mechanism(isp);
+
+ isp->secure_mode = ipu6_buttress_get_secure_mode(isp);
+ dev_info(dev, "IPU6 in %s mode\n",
+ isp->secure_mode ? "secure" : "non-secure");
+
+ ipu6_buttress_restore(isp);
+
+ ret = ipu6_buttress_ipc_reset(isp, &b->cse);
+ if (ret)
+ dev_err(&isp->pdev->dev, "IPC reset protocol failed!\n");
+
+ ret = pm_runtime_resume_and_get(&isp->psys->auxdev.dev);
+ if (ret < 0) {
+ dev_err(&isp->psys->auxdev.dev, "Failed to get runtime PM\n");
+ return 0;
+ }
+
+ ret = ipu6_buttress_authenticate(isp);
+ if (ret)
+ dev_err(&isp->pdev->dev, "FW authentication failed(%d)\n", ret);
+
+ pm_runtime_put(&isp->psys->auxdev.dev);
+
+ return 0;
+}
+
+static int ipu6_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ipu6_device *isp = pci_get_drvdata(pdev);
+ int ret;
+
+ ipu6_configure_vc_mechanism(isp);
+ ipu6_buttress_restore(isp);
+
+ if (isp->need_ipc_reset) {
+ struct ipu6_buttress *b = &isp->buttress;
+
+ isp->need_ipc_reset = false;
+ ret = ipu6_buttress_ipc_reset(isp, &b->cse);
+ if (ret)
+ dev_err(&isp->pdev->dev, "IPC reset protocol failed\n");
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops ipu6_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(&ipu6_suspend, &ipu6_resume)
+ RUNTIME_PM_OPS(&ipu6_suspend, &ipu6_runtime_resume, NULL)
+};
+
+MODULE_DEVICE_TABLE(pci, ipu6_pci_tbl);
+
+static const struct pci_error_handlers pci_err_handlers = {
+ .reset_prepare = ipu6_pci_reset_prepare,
+ .reset_done = ipu6_pci_reset_done,
+};
+
+static struct pci_driver ipu6_pci_driver = {
+ .name = IPU6_NAME,
+ .id_table = ipu6_pci_tbl,
+ .probe = ipu6_pci_probe,
+ .remove = ipu6_pci_remove,
+ .driver = {
+ .pm = pm_ptr(&ipu6_pm_ops),
+ },
+ .err_handler = &pci_err_handlers,
+};
+
+module_pci_driver(ipu6_pci_driver);
+
+MODULE_IMPORT_NS(INTEL_IPU_BRIDGE);
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
+MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
+MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
+MODULE_AUTHOR("Qingwu Zhang <qingwu.zhang@intel.com>");
+MODULE_AUTHOR("Yunliang Ding <yunliang.ding@intel.com>");
+MODULE_AUTHOR("Hongju Wang <hongju.wang@intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel IPU6 PCI driver");
diff --git a/drivers/media/pci/intel/ipu6/ipu6.h b/drivers/media/pci/intel/ipu6/ipu6.h
new file mode 100644
index 000000000000..92e3c3414c91
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6.h
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013 - 2024 Intel Corporation */
+
+#ifndef IPU6_H
+#define IPU6_H
+
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include "ipu6-buttress.h"
+
+struct firmware;
+struct pci_dev;
+struct ipu6_bus_device;
+
+#define IPU6_NAME "intel-ipu6"
+#define IPU6_MEDIA_DEV_MODEL_NAME "ipu6"
+
+#define IPU6SE_FIRMWARE_NAME "intel/ipu/ipu6se_fw.bin"
+#define IPU6EP_FIRMWARE_NAME "intel/ipu/ipu6ep_fw.bin"
+#define IPU6_FIRMWARE_NAME "intel/ipu/ipu6_fw.bin"
+#define IPU6EPMTL_FIRMWARE_NAME "intel/ipu/ipu6epmtl_fw.bin"
+#define IPU6EPADLN_FIRMWARE_NAME "intel/ipu/ipu6epadln_fw.bin"
+
+enum ipu6_version {
+ IPU6_VER_INVALID = 0,
+ IPU6_VER_6 = 1,
+ IPU6_VER_6SE = 3,
+ IPU6_VER_6EP = 5,
+ IPU6_VER_6EP_MTL = 6,
+};
+
+/*
+ * IPU6 - TGL
+ * IPU6SE - JSL
+ * IPU6EP - ADL/RPL
+ * IPU6EP_MTL - MTL
+ */
+static inline bool is_ipu6se(u8 hw_ver)
+{
+ return hw_ver == IPU6_VER_6SE;
+}
+
+static inline bool is_ipu6ep(u8 hw_ver)
+{
+ return hw_ver == IPU6_VER_6EP;
+}
+
+static inline bool is_ipu6ep_mtl(u8 hw_ver)
+{
+ return hw_ver == IPU6_VER_6EP_MTL;
+}
+
+static inline bool is_ipu6_tgl(u8 hw_ver)
+{
+ return hw_ver == IPU6_VER_6;
+}
+
+/*
+ * ISYS DMA can overshoot. For higher resolutions over allocation is one line
+ * but it must be at minimum 1024 bytes. Value could be different in
+ * different versions / generations thus provide it via platform data.
+ */
+#define IPU6_ISYS_OVERALLOC_MIN 1024
+
+/* Physical pages in GDA is 128, page size is 2K for IPU6, 1K for others */
+#define IPU6_DEVICE_GDA_NR_PAGES 128
+
+/* Virtualization factor to calculate the available virtual pages */
+#define IPU6_DEVICE_GDA_VIRT_FACTOR 32
+
+struct ipu6_device {
+ struct pci_dev *pdev;
+ struct list_head devices;
+ struct ipu6_bus_device *isys;
+ struct ipu6_bus_device *psys;
+ struct ipu6_buttress buttress;
+
+ const struct firmware *cpd_fw;
+ const char *cpd_fw_name;
+ u32 cpd_metadata_cmpnt_size;
+
+ void __iomem *base;
+ bool need_ipc_reset;
+ bool secure_mode;
+ u8 hw_ver;
+ bool bus_ready_to_probe;
+};
+
+#define IPU6_ISYS_NAME "isys"
+#define IPU6_PSYS_NAME "psys"
+
+#define IPU6_MMU_MAX_DEVICES 4
+#define IPU6_MMU_ADDR_BITS 32
+/* The firmware is accessible within the first 2 GiB only in non-secure mode. */
+#define IPU6_MMU_ADDR_BITS_NON_SECURE 31
+
+#define IPU6_MMU_MAX_TLB_L1_STREAMS 32
+#define IPU6_MMU_MAX_TLB_L2_STREAMS 32
+#define IPU6_MAX_LI_BLOCK_ADDR 128
+#define IPU6_MAX_L2_BLOCK_ADDR 64
+
+#define IPU6SE_ISYS_NUM_STREAMS IPU6SE_NONSECURE_STREAM_ID_MAX
+#define IPU6_ISYS_NUM_STREAMS IPU6_NONSECURE_STREAM_ID_MAX
+
+/*
+ * To maximize the IOSF utlization, IPU6 need to send requests in bursts.
+ * At the DMA interface with the buttress, there are CDC FIFOs with burst
+ * collection capability. CDC FIFO burst collectors have a configurable
+ * threshold and is configured based on the outcome of performance measurements.
+ *
+ * isys has 3 ports with IOSF interface for VC0, VC1 and VC2
+ * psys has 4 ports with IOSF interface for VC0, VC1w, VC1r and VC2
+ *
+ * Threshold values are pre-defined and are arrived at after performance
+ * evaluations on a type of IPU6
+ */
+#define IPU6_MAX_VC_IOSF_PORTS 4
+
+/*
+ * IPU6 must configure correct arbitration mechanism related to the IOSF VC
+ * requests. There are two options per VC0 and VC1 - > 0 means rearbitrate on
+ * stall and 1 means stall until the request is completed.
+ */
+#define IPU6_BTRS_ARB_MODE_TYPE_REARB 0
+#define IPU6_BTRS_ARB_MODE_TYPE_STALL 1
+
+/* Currently chosen arbitration mechanism for VC0 */
+#define IPU6_BTRS_ARB_STALL_MODE_VC0 \
+ IPU6_BTRS_ARB_MODE_TYPE_REARB
+
+/* Currently chosen arbitration mechanism for VC1 */
+#define IPU6_BTRS_ARB_STALL_MODE_VC1 \
+ IPU6_BTRS_ARB_MODE_TYPE_REARB
+
+/*
+ * MMU Invalidation HW bug workaround by ZLW mechanism
+ *
+ * Old IPU6 MMUV2 has a bug in the invalidation mechanism which might result in
+ * wrong translation or replication of the translation. This will cause data
+ * corruption. So we cannot directly use the MMU V2 invalidation registers
+ * to invalidate the MMU. Instead, whenever an invalidate is called, we need to
+ * clear the TLB by evicting all the valid translations by filling it with trash
+ * buffer (which is guaranteed not to be used by any other processes). ZLW is
+ * used to fill the L1 and L2 caches with the trash buffer translations. ZLW
+ * or Zero length write, is pre-fetch mechanism to pre-fetch the pages in
+ * advance to the L1 and L2 caches without triggering any memory operations.
+ *
+ * In MMU V2, L1 -> 16 streams and 64 blocks, maximum 16 blocks per stream
+ * One L1 block has 16 entries, hence points to 16 * 4K pages
+ * L2 -> 16 streams and 32 blocks. 2 blocks per streams
+ * One L2 block maps to 1024 L1 entries, hence points to 4MB address range
+ * 2 blocks per L2 stream means, 1 stream points to 8MB range
+ *
+ * As we need to clear the caches and 8MB being the biggest cache size, we need
+ * to have trash buffer which points to 8MB address range. As these trash
+ * buffers are not used for any memory transactions, we need only the least
+ * amount of physical memory. So we reserve 8MB IOVA address range but only
+ * one page is reserved from physical memory. Each of this 8MB IOVA address
+ * range is then mapped to the same physical memory page.
+ */
+/* One L2 entry maps 1024 L1 entries and one L1 entry per page */
+#define IPU6_MMUV2_L2_RANGE (1024 * PAGE_SIZE)
+/* Max L2 blocks per stream */
+#define IPU6_MMUV2_MAX_L2_BLOCKS 2
+/* Max L1 blocks per stream */
+#define IPU6_MMUV2_MAX_L1_BLOCKS 16
+#define IPU6_MMUV2_TRASH_RANGE (IPU6_MMUV2_L2_RANGE * IPU6_MMUV2_MAX_L2_BLOCKS)
+/* Entries per L1 block */
+#define MMUV2_ENTRIES_PER_L1_BLOCK 16
+#define MMUV2_TRASH_L1_BLOCK_OFFSET (MMUV2_ENTRIES_PER_L1_BLOCK * PAGE_SIZE)
+#define MMUV2_TRASH_L2_BLOCK_OFFSET IPU6_MMUV2_L2_RANGE
+
+/*
+ * In some of the IPU6 MMUs, there is provision to configure L1 and L2 page
+ * table caches. Both these L1 and L2 caches are divided into multiple sections
+ * called streams. There is maximum 16 streams for both caches. Each of these
+ * sections are subdivided into multiple blocks. When nr_l1streams = 0 and
+ * nr_l2streams = 0, means the MMU is of type MMU_V1 and do not support
+ * L1/L2 page table caches.
+ *
+ * L1 stream per block sizes are configurable and varies per usecase.
+ * L2 has constant block sizes - 2 blocks per stream.
+ *
+ * MMU1 support pre-fetching of the pages to have less cache lookup misses. To
+ * enable the pre-fetching, MMU1 AT (Address Translator) device registers
+ * need to be configured.
+ *
+ * There are four types of memory accesses which requires ZLW configuration.
+ * ZLW(Zero Length Write) is a mechanism to enable VT-d pre-fetching on IOMMU.
+ *
+ * 1. Sequential Access or 1D mode
+ * Set ZLW_EN -> 1
+ * set ZLW_PAGE_CROSS_1D -> 1
+ * Set ZLW_N to "N" pages so that ZLW will be inserte N pages ahead where
+ * N is pre-defined and hardcoded in the platform data
+ * Set ZLW_2D -> 0
+ *
+ * 2. ZLW 2D mode
+ * Set ZLW_EN -> 1
+ * set ZLW_PAGE_CROSS_1D -> 1,
+ * Set ZLW_N -> 0
+ * Set ZLW_2D -> 1
+ *
+ * 3. ZLW Enable (no 1D or 2D mode)
+ * Set ZLW_EN -> 1
+ * set ZLW_PAGE_CROSS_1D -> 0,
+ * Set ZLW_N -> 0
+ * Set ZLW_2D -> 0
+ *
+ * 4. ZLW disable
+ * Set ZLW_EN -> 0
+ * set ZLW_PAGE_CROSS_1D -> 0,
+ * Set ZLW_N -> 0
+ * Set ZLW_2D -> 0
+ *
+ * To configure the ZLW for the above memory access, four registers are
+ * available. Hence to track these four settings, we have the following entries
+ * in the struct ipu6_mmu_hw. Each of these entries are per stream and
+ * available only for the L1 streams.
+ *
+ * a. l1_zlw_en -> To track zlw enabled per stream (ZLW_EN)
+ * b. l1_zlw_1d_mode -> Track 1D mode per stream. ZLW inserted at page boundary
+ * c. l1_ins_zlw_ahead_pages -> to track how advance the ZLW need to be inserted
+ * Insert ZLW request N pages ahead address.
+ * d. l1_zlw_2d_mode -> To track 2D mode per stream (ZLW_2D)
+ *
+ *
+ * Currently L1/L2 streams, blocks, AT ZLW configurations etc. are pre-defined
+ * as per the usecase specific calculations. Any change to this pre-defined
+ * table has to happen in sync with IPU6 FW.
+ */
+struct ipu6_mmu_hw {
+ union {
+ unsigned long offset;
+ void __iomem *base;
+ };
+ u32 info_bits;
+ u8 nr_l1streams;
+ /*
+ * L1 has variable blocks per stream - total of 64 blocks and maximum of
+ * 16 blocks per stream. Configurable by using the block start address
+ * per stream. Block start address is calculated from the block size
+ */
+ u8 l1_block_sz[IPU6_MMU_MAX_TLB_L1_STREAMS];
+ /* Is ZLW is enabled in each stream */
+ bool l1_zlw_en[IPU6_MMU_MAX_TLB_L1_STREAMS];
+ bool l1_zlw_1d_mode[IPU6_MMU_MAX_TLB_L1_STREAMS];
+ u8 l1_ins_zlw_ahead_pages[IPU6_MMU_MAX_TLB_L1_STREAMS];
+ bool l1_zlw_2d_mode[IPU6_MMU_MAX_TLB_L1_STREAMS];
+
+ u32 l1_stream_id_reg_offset;
+ u32 l2_stream_id_reg_offset;
+
+ u8 nr_l2streams;
+ /*
+ * L2 has fixed 2 blocks per stream. Block address is calculated
+ * from the block size
+ */
+ u8 l2_block_sz[IPU6_MMU_MAX_TLB_L2_STREAMS];
+ /* flag to track if WA is needed for successive invalidate HW bug */
+ bool insert_read_before_invalidate;
+};
+
+struct ipu6_mmu_pdata {
+ u32 nr_mmus;
+ struct ipu6_mmu_hw mmu_hw[IPU6_MMU_MAX_DEVICES];
+ int mmid;
+};
+
+struct ipu6_isys_csi2_pdata {
+ void __iomem *base;
+};
+
+struct ipu6_isys_internal_csi2_pdata {
+ u32 nports;
+ u32 irq_mask;
+ u32 ctrl0_irq_edge;
+ u32 ctrl0_irq_clear;
+ u32 ctrl0_irq_mask;
+ u32 ctrl0_irq_enable;
+ u32 ctrl0_irq_lnp;
+ u32 ctrl0_irq_status;
+ u32 fw_access_port_ofs;
+};
+
+struct ipu6_isys_internal_tpg_pdata {
+ u32 ntpgs;
+ u32 *offsets;
+ u32 *sels;
+};
+
+struct ipu6_hw_variants {
+ unsigned long offset;
+ u32 nr_mmus;
+ struct ipu6_mmu_hw mmu_hw[IPU6_MMU_MAX_DEVICES];
+ u8 cdc_fifos;
+ u8 cdc_fifo_threshold[IPU6_MAX_VC_IOSF_PORTS];
+ u32 dmem_offset;
+ u32 spc_offset;
+};
+
+struct ipu6_isys_internal_pdata {
+ struct ipu6_isys_internal_csi2_pdata csi2;
+ struct ipu6_hw_variants hw_variant;
+ u32 num_parallel_streams;
+ u32 isys_dma_overshoot;
+ u32 sram_gran_shift;
+ u32 sram_gran_size;
+ u32 max_sram_size;
+ u32 max_streams;
+ u32 max_send_queues;
+ u32 max_sram_blocks;
+ u32 max_devq_size;
+ u32 sensor_type_start;
+ u32 sensor_type_end;
+ u32 ltr;
+ u32 memopen_threshold;
+ bool enhanced_iwake;
+};
+
+struct ipu6_isys_pdata {
+ void __iomem *base;
+ const struct ipu6_isys_internal_pdata *ipdata;
+};
+
+struct ipu6_psys_internal_pdata {
+ struct ipu6_hw_variants hw_variant;
+};
+
+struct ipu6_psys_pdata {
+ void __iomem *base;
+ const struct ipu6_psys_internal_pdata *ipdata;
+};
+
+int ipu6_fw_authenticate(void *data, u64 val);
+void ipu6_configure_spc(struct ipu6_device *isp,
+ const struct ipu6_hw_variants *hw_variant,
+ int pkg_dir_idx, void __iomem *base, u64 *pkg_dir,
+ dma_addr_t pkg_dir_dma_addr);
+#endif /* IPU6_H */
diff --git a/drivers/media/pci/intel/ivsc/mei_csi.c b/drivers/media/pci/intel/ivsc/mei_csi.c
index 55e0c60c420c..89b582a221ab 100644
--- a/drivers/media/pci/intel/ivsc/mei_csi.c
+++ b/drivers/media/pci/intel/ivsc/mei_csi.c
@@ -19,12 +19,15 @@
#include <linux/mei_cl_bus.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/units.h>
#include <linux/uuid.h>
#include <linux/workqueue.h>
+#include <media/ipu-bridge.h>
+#include <media/ipu6-pci-table.h>
#include <media/v4l2-async.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fwnode.h>
@@ -661,11 +664,23 @@ static int mei_csi_probe(struct mei_cl_device *cldev,
const struct mei_cl_device_id *id)
{
struct device *dev = &cldev->dev;
+ struct pci_dev *ipu;
struct mei_csi *csi;
+ unsigned int i;
int ret;
- if (!dev_fwnode(dev))
- return -EPROBE_DEFER;
+ for (i = 0, ipu = NULL; !ipu && ipu6_pci_tbl[i].vendor; i++)
+ ipu = pci_get_device(ipu6_pci_tbl[i].vendor,
+ ipu6_pci_tbl[i].device, NULL);
+
+ if (!ipu)
+ return -ENODEV;
+
+ ret = ipu_bridge_init(&ipu->dev, ipu_bridge_parse_ssdb);
+ if (ret < 0)
+ return ret;
+ if (WARN_ON(!dev_fwnode(dev)))
+ return -ENXIO;
csi = devm_kzalloc(dev, sizeof(struct mei_csi), GFP_KERNEL);
if (!csi)
@@ -784,6 +799,7 @@ static struct mei_cl_driver mei_csi_driver = {
module_mei_cl_driver(mei_csi_driver);
+MODULE_IMPORT_NS(INTEL_IPU_BRIDGE);
MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
MODULE_DESCRIPTION("Device driver for IVSC CSI");
diff --git a/drivers/media/pci/mgb4/mgb4_core.c b/drivers/media/pci/mgb4/mgb4_core.c
index 9bcf10a77fd3..60498a5abebf 100644
--- a/drivers/media/pci/mgb4/mgb4_core.c
+++ b/drivers/media/pci/mgb4/mgb4_core.c
@@ -493,13 +493,13 @@ static int mgb4_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct mgb4_dev *mgbdev;
struct resource video = {
.start = 0x0,
- .end = 0x100,
+ .end = 0xff,
.flags = IORESOURCE_MEM,
.name = "mgb4-video",
};
struct resource cmt = {
.start = 0x1000,
- .end = 0x1800,
+ .end = 0x17ff,
.flags = IORESOURCE_MEM,
.name = "mgb4-cmt",
};
diff --git a/drivers/media/pci/mgb4/mgb4_regs.c b/drivers/media/pci/mgb4/mgb4_regs.c
index 53d4e4503a74..31befd722d72 100644
--- a/drivers/media/pci/mgb4/mgb4_regs.c
+++ b/drivers/media/pci/mgb4/mgb4_regs.c
@@ -10,7 +10,7 @@
int mgb4_regs_map(struct resource *res, struct mgb4_regs *regs)
{
regs->mapbase = res->start;
- regs->mapsize = res->end - res->start;
+ regs->mapsize = resource_size(res);
if (!request_mem_region(regs->mapbase, regs->mapsize, res->name))
return -EINVAL;
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c b/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
index 46676f2c89c7..1c885d620b75 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
@@ -135,7 +135,7 @@ static void netup_i2c_fifo_tx(struct netup_i2c *i2c)
(readw(&i2c->regs->tx_fifo.stat_ctrl) & 0x3f);
u32 msg_length = i2c->msg->len - i2c->xmit_size;
- msg_length = (msg_length < fifo_space ? msg_length : fifo_space);
+ msg_length = min(msg_length, fifo_space);
while (msg_length--) {
data = i2c->msg->buf[i2c->xmit_size++];
writeb(data, &i2c->regs->tx_fifo.data8);
diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c
index 7481f553f959..24ec576dc3bf 100644
--- a/drivers/media/pci/ngene/ngene-core.c
+++ b/drivers/media/pci/ngene/ngene-core.c
@@ -1488,7 +1488,9 @@ static int init_channel(struct ngene_channel *chan)
}
if (dev->ci.en && (io & NGENE_IO_TSOUT)) {
- dvb_ca_en50221_init(adapter, dev->ci.en, 0, 1);
+ ret = dvb_ca_en50221_init(adapter, dev->ci.en, 0, 1);
+ if (ret != 0)
+ goto err;
set_transfer(chan, 1);
chan->dev->channel[2].DataFormatFlags = DF_SWAP32;
set_transfer(&chan->dev->channel[2], 1);
diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
index d3cde05a6eba..dd2236c5c4a1 100644
--- a/drivers/media/pci/saa7134/saa7134-alsa.c
+++ b/drivers/media/pci/saa7134/saa7134-alsa.c
@@ -1096,9 +1096,6 @@ static void snd_saa7134_free(struct snd_card * card)
if (chip->dev->dmasound.priv_data == NULL)
return;
- if (chip->irq >= 0)
- free_irq(chip->irq, &chip->dev->dmasound);
-
chip->dev->dmasound.priv_data = NULL;
}
@@ -1147,10 +1144,8 @@ static int alsa_card_saa7134_create(struct saa7134_dev *dev, int devnum)
chip->iobase = pci_resource_start(dev->pci, 0);
- err = request_irq(dev->pci->irq, saa7134_alsa_irq,
- IRQF_SHARED, dev->name,
- (void*) &dev->dmasound);
-
+ err = devm_request_irq(&dev->pci->dev, dev->pci->irq, saa7134_alsa_irq,
+ IRQF_SHARED, dev->name, &dev->dmasound);
if (err < 0) {
pr_err("%s: can't get IRQ %d for ALSA\n",
dev->name, dev->pci->irq);
diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
index 6d87fbb0ee04..1a9e2bccc413 100644
--- a/drivers/media/pci/solo6x10/solo6x10-core.c
+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
@@ -144,11 +144,8 @@ static void free_solo_dev(struct solo_dev *solo_dev)
/* Now cleanup the PCI device */
solo_irq_off(solo_dev, ~0);
- free_irq(pdev->irq, solo_dev);
- pci_iounmap(pdev, solo_dev->reg_base);
}
- pci_release_regions(pdev);
pci_disable_device(pdev);
v4l2_device_unregister(&solo_dev->v4l2_dev);
pci_set_drvdata(pdev, NULL);
@@ -480,15 +477,10 @@ static int solo_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_write_config_byte(pdev, 0x40, 0x00);
pci_write_config_byte(pdev, 0x41, 0x00);
- ret = pci_request_regions(pdev, SOLO6X10_NAME);
+ ret = pcim_iomap_regions(pdev, BIT(0), SOLO6X10_NAME);
if (ret)
goto fail_probe;
-
- solo_dev->reg_base = pci_ioremap_bar(pdev, 0);
- if (solo_dev->reg_base == NULL) {
- ret = -ENOMEM;
- goto fail_probe;
- }
+ solo_dev->reg_base = pcim_iomap_table(pdev)[0];
chip_id = solo_reg_read(solo_dev, SOLO_CHIP_OPTION) &
SOLO_CHIP_ID_MASK;
@@ -551,8 +543,8 @@ static int solo_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* PLL locking time of 1ms */
mdelay(1);
- ret = request_irq(pdev->irq, solo_isr, IRQF_SHARED, SOLO6X10_NAME,
- solo_dev);
+ ret = devm_request_irq(&pdev->dev, pdev->irq, solo_isr, IRQF_SHARED,
+ SOLO6X10_NAME, solo_dev);
if (ret)
goto fail_probe;
diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c
index a47c5850ef87..2e62c938e2cb 100644
--- a/drivers/media/pci/ttpci/budget-av.c
+++ b/drivers/media/pci/ttpci/budget-av.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * budget-av.c: driver for the SAA7146 based Budget DVB cards
- * with analog video in
+ * budget-av.ko: driver for the SAA7146 based Budget DVB cards
+ * with analog video input (and optionally with CI)
*
* Compiled from various sources by Michael Hunold <michael@mihu.de>
*
@@ -16,7 +16,6 @@
* the project's page is at https://linuxtv.org
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "budget.h"
#include "stv0299.h"
@@ -63,8 +62,8 @@ struct budget_av {
static int ciintf_slot_shutdown(struct dvb_ca_en50221 *ca, int slot);
-
-/* GPIO Connections:
+/*
+ * GPIO Connections:
* 0 - Vcc/Reset (Reset is controlled by capacitor). Resets the frontend *AS WELL*!
* 1 - CI memory select 0=>IO memory, 1=>Attribute Memory
* 2 - CI Card Enable (Active Low)
@@ -95,12 +94,12 @@ static u8 i2c_readreg(struct i2c_adapter *i2c, u8 id, u8 reg)
return mm2[0];
}
-static int i2c_readregs(struct i2c_adapter *i2c, u8 id, u8 reg, u8 * buf, u8 len)
+static int i2c_readregs(struct i2c_adapter *i2c, u8 id, u8 reg, u8 *buf, u8 len)
{
u8 mm1[] = { reg };
struct i2c_msg msgs[2] = {
- {.addr = id / 2,.flags = 0,.buf = mm1,.len = 1},
- {.addr = id / 2,.flags = I2C_M_RD,.buf = buf,.len = len}
+ {.addr = id / 2, .flags = 0, .buf = mm1, .len = 1},
+ {.addr = id / 2, .flags = I2C_M_RD, .buf = buf, .len = len}
};
if (i2c_transfer(i2c, msgs, 2) != 2)
@@ -206,7 +205,7 @@ static int ciintf_slot_reset(struct dvb_ca_en50221 *ca, int slot)
if (slot != 0)
return -EINVAL;
- dprintk(1, "ciintf_slot_reset\n");
+ dprintk(1, "ci slot reset\n");
budget_av->slot_status = SLOTSTATUS_RESET;
saa7146_setgpio(saa, 2, SAA7146_GPIO_OUTHI); /* disable card */
@@ -235,7 +234,7 @@ static int ciintf_slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
if (slot != 0)
return -EINVAL;
- dprintk(1, "ciintf_slot_shutdown\n");
+ dprintk(1, "ci slot shutdown\n");
ttpci_budget_set_video_port(saa, BUDGET_VIDEO_PORTB);
budget_av->slot_status = SLOTSTATUS_NONE;
@@ -251,7 +250,7 @@ static int ciintf_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
if (slot != 0)
return -EINVAL;
- dprintk(1, "ciintf_slot_ts_enable: %d\n", budget_av->slot_status);
+ dprintk(1, "ci slot status: %d\n", budget_av->slot_status);
ttpci_budget_set_video_port(saa, BUDGET_VIDEO_PORTA);
@@ -267,8 +266,10 @@ static int ciintf_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open
if (slot != 0)
return -EINVAL;
- /* test the card detect line - needs to be done carefully
- * since it never goes high for some CAMs on this interface (e.g. topuptv) */
+ /*
+ * test the card detect line - needs to be done carefully
+ * since it never goes high for some CAMs on this interface (e.g. topuptv)
+ */
if (budget_av->slot_status == SLOTSTATUS_NONE) {
saa7146_setgpio(saa, 3, SAA7146_GPIO_INPUT);
udelay(1);
@@ -281,12 +282,14 @@ static int ciintf_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open
saa7146_setgpio(saa, 3, SAA7146_GPIO_OUTLO);
}
- /* We also try and read from IO memory to work round the above detection bug. If
+ /*
+ * We also try and read from IO memory to work round the above detection bug. If
* there is no CAM, we will get a timeout. Only done if there is no cam
* present, since this test actually breaks some cams :(
*
* if the CI interface is not open, we also do the above test since we
- * don't care if the cam has problems - we'll be resetting it on open() anyway */
+ * don't care if the cam has problems - we'll be resetting it on open() anyway
+ */
if ((budget_av->slot_status == SLOTSTATUS_NONE) || (!open)) {
saa7146_setgpio(budget_av->budget.dev, 1, SAA7146_GPIO_OUTLO);
result = ttpci_budget_debiread(&budget_av->budget, DEBICICAM, 0, 1, 0, 1);
@@ -305,16 +308,14 @@ static int ciintf_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open
/* read from attribute memory in reset/ready state to know when the CAM is ready */
if (budget_av->slot_status == SLOTSTATUS_RESET) {
result = ciintf_read_attribute_mem(ca, slot, 0);
- if (result == 0x1d) {
+ if (result == 0x1d)
budget_av->slot_status = SLOTSTATUS_READY;
- }
}
/* work out correct return code */
if (budget_av->slot_status != SLOTSTATUS_NONE) {
- if (budget_av->slot_status & SLOTSTATUS_READY) {
+ if (budget_av->slot_status & SLOTSTATUS_READY)
return DVB_CA_EN50221_POLL_CAM_PRESENT | DVB_CA_EN50221_POLL_CAM_READY;
- }
return DVB_CA_EN50221_POLL_CAM_PRESENT;
}
return 0;
@@ -349,8 +350,9 @@ static int ciintf_init(struct budget_av *budget_av)
budget_av->budget.ci_present = 1;
budget_av->slot_status = SLOTSTATUS_NONE;
- if ((result = dvb_ca_en50221_init(&budget_av->budget.dvb_adapter,
- &budget_av->ca, 0, 1)) != 0) {
+ result = dvb_ca_en50221_init(&budget_av->budget.dvb_adapter,
+ &budget_av->ca, 0, 1);
+ if (result != 0) {
pr_err("ci initialisation failed\n");
goto error;
}
@@ -439,7 +441,7 @@ static int saa7113_setinput(struct budget_av *budget_av, int input)
{
struct budget *budget = &budget_av->budget;
- if (1 != budget_av->has_saa7113)
+ if (budget_av->has_saa7113 != 1)
return -ENODEV;
if (input == 1) {
@@ -448,8 +450,9 @@ static int saa7113_setinput(struct budget_av *budget_av, int input)
} else if (input == 0) {
i2c_writereg(&budget->i2c_adap, 0x4a, 0x02, 0xc0);
i2c_writereg(&budget->i2c_adap, 0x4a, 0x09, 0x00);
- } else
+ } else {
return -EINVAL;
+ }
budget_av->cur_input = input;
return 0;
@@ -492,7 +495,7 @@ static int philips_su1278_ty_ci_tuner_set_params(struct dvb_frontend *fe)
u32 div;
u8 buf[4];
struct budget *budget = fe->dvb->priv;
- struct i2c_msg msg = {.addr = 0x61,.flags = 0,.buf = buf,.len = sizeof(buf) };
+ struct i2c_msg msg = {.addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf) };
if ((c->frequency < 950000) || (c->frequency > 2150000))
return -EINVAL;
@@ -606,7 +609,7 @@ static int philips_cu1216_tuner_set_params(struct dvb_frontend *fe)
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct budget *budget = fe->dvb->priv;
u8 buf[6];
- struct i2c_msg msg = {.addr = 0x60,.flags = 0,.buf = buf,.len = sizeof(buf) };
+ struct i2c_msg msg = {.addr = 0x60, .flags = 0, .buf = buf, .len = sizeof(buf) };
int i;
#define CU1216_IF 36125000
@@ -670,7 +673,7 @@ static int philips_tu1216_tuner_init(struct dvb_frontend *fe)
{
struct budget *budget = fe->dvb->priv;
static u8 tu1216_init[] = { 0x0b, 0xf5, 0x85, 0xab };
- struct i2c_msg tuner_msg = {.addr = 0x60,.flags = 0,.buf = tu1216_init,.len = sizeof(tu1216_init) };
+ struct i2c_msg tuner_msg = {.addr = 0x60, .flags = 0, .buf = tu1216_init, .len = sizeof(tu1216_init) };
// setup PLL configuration
if (fe->ops.i2c_gate_ctrl)
@@ -687,7 +690,7 @@ static int philips_tu1216_tuner_set_params(struct dvb_frontend *fe)
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct budget *budget = fe->dvb->priv;
u8 tuner_buf[4];
- struct i2c_msg tuner_msg = {.addr = 0x60,.flags = 0,.buf = tuner_buf,.len =
+ struct i2c_msg tuner_msg = {.addr = 0x60, .flags = 0, .buf = tuner_buf, .len =
sizeof(tuner_buf) };
int tuner_frequency = 0;
u8 band, cp, filter;
@@ -865,7 +868,7 @@ static int philips_sd1878_ci_set_symbol_rate(struct dvb_frontend *fe,
static const struct stv0299_config philips_sd1878_config = {
.demod_address = 0x68,
- .inittab = philips_sd1878_inittab,
+ .inittab = philips_sd1878_inittab,
.mclk = 88000000UL,
.invert = 0,
.skip_reinit = 0,
@@ -878,222 +881,222 @@ static const struct stv0299_config philips_sd1878_config = {
/* KNC1 DVB-S (STB0899) Inittab */
static const struct stb0899_s1_reg knc1_stb0899_s1_init_1[] = {
- { STB0899_DEV_ID , 0x81 },
- { STB0899_DISCNTRL1 , 0x32 },
- { STB0899_DISCNTRL2 , 0x80 },
- { STB0899_DISRX_ST0 , 0x04 },
- { STB0899_DISRX_ST1 , 0x00 },
- { STB0899_DISPARITY , 0x00 },
- { STB0899_DISSTATUS , 0x20 },
- { STB0899_DISF22 , 0x8c },
- { STB0899_DISF22RX , 0x9a },
- { STB0899_SYSREG , 0x0b },
- { STB0899_ACRPRESC , 0x11 },
- { STB0899_ACRDIV1 , 0x0a },
- { STB0899_ACRDIV2 , 0x05 },
- { STB0899_DACR1 , 0x00 },
- { STB0899_DACR2 , 0x00 },
- { STB0899_OUTCFG , 0x00 },
- { STB0899_MODECFG , 0x00 },
- { STB0899_IRQSTATUS_3 , 0x30 },
- { STB0899_IRQSTATUS_2 , 0x00 },
- { STB0899_IRQSTATUS_1 , 0x00 },
- { STB0899_IRQSTATUS_0 , 0x00 },
- { STB0899_IRQMSK_3 , 0xf3 },
- { STB0899_IRQMSK_2 , 0xfc },
- { STB0899_IRQMSK_1 , 0xff },
- { STB0899_IRQMSK_0 , 0xff },
- { STB0899_IRQCFG , 0x00 },
- { STB0899_I2CCFG , 0x88 },
- { STB0899_I2CRPT , 0x58 }, /* Repeater=8, Stop=disabled */
- { STB0899_IOPVALUE5 , 0x00 },
- { STB0899_IOPVALUE4 , 0x20 },
- { STB0899_IOPVALUE3 , 0xc9 },
- { STB0899_IOPVALUE2 , 0x90 },
- { STB0899_IOPVALUE1 , 0x40 },
- { STB0899_IOPVALUE0 , 0x00 },
- { STB0899_GPIO00CFG , 0x82 },
- { STB0899_GPIO01CFG , 0x82 },
- { STB0899_GPIO02CFG , 0x82 },
- { STB0899_GPIO03CFG , 0x82 },
- { STB0899_GPIO04CFG , 0x82 },
- { STB0899_GPIO05CFG , 0x82 },
- { STB0899_GPIO06CFG , 0x82 },
- { STB0899_GPIO07CFG , 0x82 },
- { STB0899_GPIO08CFG , 0x82 },
- { STB0899_GPIO09CFG , 0x82 },
- { STB0899_GPIO10CFG , 0x82 },
- { STB0899_GPIO11CFG , 0x82 },
- { STB0899_GPIO12CFG , 0x82 },
- { STB0899_GPIO13CFG , 0x82 },
- { STB0899_GPIO14CFG , 0x82 },
- { STB0899_GPIO15CFG , 0x82 },
- { STB0899_GPIO16CFG , 0x82 },
- { STB0899_GPIO17CFG , 0x82 },
- { STB0899_GPIO18CFG , 0x82 },
- { STB0899_GPIO19CFG , 0x82 },
- { STB0899_GPIO20CFG , 0x82 },
- { STB0899_SDATCFG , 0xb8 },
- { STB0899_SCLTCFG , 0xba },
- { STB0899_AGCRFCFG , 0x08 }, /* 0x1c */
- { STB0899_GPIO22 , 0x82 }, /* AGCBB2CFG */
- { STB0899_GPIO21 , 0x91 }, /* AGCBB1CFG */
- { STB0899_DIRCLKCFG , 0x82 },
- { STB0899_CLKOUT27CFG , 0x7e },
- { STB0899_STDBYCFG , 0x82 },
- { STB0899_CS0CFG , 0x82 },
- { STB0899_CS1CFG , 0x82 },
- { STB0899_DISEQCOCFG , 0x20 },
- { STB0899_GPIO32CFG , 0x82 },
- { STB0899_GPIO33CFG , 0x82 },
- { STB0899_GPIO34CFG , 0x82 },
- { STB0899_GPIO35CFG , 0x82 },
- { STB0899_GPIO36CFG , 0x82 },
- { STB0899_GPIO37CFG , 0x82 },
- { STB0899_GPIO38CFG , 0x82 },
- { STB0899_GPIO39CFG , 0x82 },
- { STB0899_NCOARSE , 0x15 }, /* 0x15 = 27 Mhz Clock, F/3 = 198MHz, F/6 = 99MHz */
- { STB0899_SYNTCTRL , 0x02 }, /* 0x00 = CLK from CLKI, 0x02 = CLK from XTALI */
- { STB0899_FILTCTRL , 0x00 },
- { STB0899_SYSCTRL , 0x00 },
- { STB0899_STOPCLK1 , 0x20 },
- { STB0899_STOPCLK2 , 0x00 },
- { STB0899_INTBUFSTATUS , 0x00 },
- { STB0899_INTBUFCTRL , 0x0a },
- { 0xffff , 0xff },
+ { STB0899_DEV_ID, 0x81 },
+ { STB0899_DISCNTRL1, 0x32 },
+ { STB0899_DISCNTRL2, 0x80 },
+ { STB0899_DISRX_ST0, 0x04 },
+ { STB0899_DISRX_ST1, 0x00 },
+ { STB0899_DISPARITY, 0x00 },
+ { STB0899_DISSTATUS, 0x20 },
+ { STB0899_DISF22, 0x8c },
+ { STB0899_DISF22RX, 0x9a },
+ { STB0899_SYSREG, 0x0b },
+ { STB0899_ACRPRESC, 0x11 },
+ { STB0899_ACRDIV1, 0x0a },
+ { STB0899_ACRDIV2, 0x05 },
+ { STB0899_DACR1, 0x00 },
+ { STB0899_DACR2, 0x00 },
+ { STB0899_OUTCFG, 0x00 },
+ { STB0899_MODECFG, 0x00 },
+ { STB0899_IRQSTATUS_3, 0x30 },
+ { STB0899_IRQSTATUS_2, 0x00 },
+ { STB0899_IRQSTATUS_1, 0x00 },
+ { STB0899_IRQSTATUS_0, 0x00 },
+ { STB0899_IRQMSK_3, 0xf3 },
+ { STB0899_IRQMSK_2, 0xfc },
+ { STB0899_IRQMSK_1, 0xff },
+ { STB0899_IRQMSK_0, 0xff },
+ { STB0899_IRQCFG, 0x00 },
+ { STB0899_I2CCFG, 0x88 },
+ { STB0899_I2CRPT, 0x58 }, /* Repeater=8, Stop=disabled */
+ { STB0899_IOPVALUE5, 0x00 },
+ { STB0899_IOPVALUE4, 0x20 },
+ { STB0899_IOPVALUE3, 0xc9 },
+ { STB0899_IOPVALUE2, 0x90 },
+ { STB0899_IOPVALUE1, 0x40 },
+ { STB0899_IOPVALUE0, 0x00 },
+ { STB0899_GPIO00CFG, 0x82 },
+ { STB0899_GPIO01CFG, 0x82 },
+ { STB0899_GPIO02CFG, 0x82 },
+ { STB0899_GPIO03CFG, 0x82 },
+ { STB0899_GPIO04CFG, 0x82 },
+ { STB0899_GPIO05CFG, 0x82 },
+ { STB0899_GPIO06CFG, 0x82 },
+ { STB0899_GPIO07CFG, 0x82 },
+ { STB0899_GPIO08CFG, 0x82 },
+ { STB0899_GPIO09CFG, 0x82 },
+ { STB0899_GPIO10CFG, 0x82 },
+ { STB0899_GPIO11CFG, 0x82 },
+ { STB0899_GPIO12CFG, 0x82 },
+ { STB0899_GPIO13CFG, 0x82 },
+ { STB0899_GPIO14CFG, 0x82 },
+ { STB0899_GPIO15CFG, 0x82 },
+ { STB0899_GPIO16CFG, 0x82 },
+ { STB0899_GPIO17CFG, 0x82 },
+ { STB0899_GPIO18CFG, 0x82 },
+ { STB0899_GPIO19CFG, 0x82 },
+ { STB0899_GPIO20CFG, 0x82 },
+ { STB0899_SDATCFG, 0xb8 },
+ { STB0899_SCLTCFG, 0xba },
+ { STB0899_AGCRFCFG, 0x08 }, /* 0x1c */
+ { STB0899_GPIO22, 0x82 }, /* AGCBB2CFG */
+ { STB0899_GPIO21, 0x91 }, /* AGCBB1CFG */
+ { STB0899_DIRCLKCFG, 0x82 },
+ { STB0899_CLKOUT27CFG, 0x7e },
+ { STB0899_STDBYCFG, 0x82 },
+ { STB0899_CS0CFG, 0x82 },
+ { STB0899_CS1CFG, 0x82 },
+ { STB0899_DISEQCOCFG, 0x20 },
+ { STB0899_GPIO32CFG, 0x82 },
+ { STB0899_GPIO33CFG, 0x82 },
+ { STB0899_GPIO34CFG, 0x82 },
+ { STB0899_GPIO35CFG, 0x82 },
+ { STB0899_GPIO36CFG, 0x82 },
+ { STB0899_GPIO37CFG, 0x82 },
+ { STB0899_GPIO38CFG, 0x82 },
+ { STB0899_GPIO39CFG, 0x82 },
+ { STB0899_NCOARSE, 0x15 }, /* 0x15 = 27 Mhz Clock, F/3 = 198MHz, F/6 = 99MHz */
+ { STB0899_SYNTCTRL, 0x02 }, /* 0x00 = CLK from CLKI, 0x02 = CLK from XTALI */
+ { STB0899_FILTCTRL, 0x00 },
+ { STB0899_SYSCTRL, 0x00 },
+ { STB0899_STOPCLK1, 0x20 },
+ { STB0899_STOPCLK2, 0x00 },
+ { STB0899_INTBUFSTATUS, 0x00 },
+ { STB0899_INTBUFCTRL, 0x0a },
+ { 0xffff, 0xff },
};
static const struct stb0899_s1_reg knc1_stb0899_s1_init_3[] = {
- { STB0899_DEMOD , 0x00 },
- { STB0899_RCOMPC , 0xc9 },
- { STB0899_AGC1CN , 0x41 },
- { STB0899_AGC1REF , 0x08 },
- { STB0899_RTC , 0x7a },
- { STB0899_TMGCFG , 0x4e },
- { STB0899_AGC2REF , 0x33 },
- { STB0899_TLSR , 0x84 },
- { STB0899_CFD , 0xee },
- { STB0899_ACLC , 0x87 },
- { STB0899_BCLC , 0x94 },
- { STB0899_EQON , 0x41 },
- { STB0899_LDT , 0xdd },
- { STB0899_LDT2 , 0xc9 },
- { STB0899_EQUALREF , 0xb4 },
- { STB0899_TMGRAMP , 0x10 },
- { STB0899_TMGTHD , 0x30 },
- { STB0899_IDCCOMP , 0xfb },
- { STB0899_QDCCOMP , 0x03 },
- { STB0899_POWERI , 0x3b },
- { STB0899_POWERQ , 0x3d },
- { STB0899_RCOMP , 0x81 },
- { STB0899_AGCIQIN , 0x80 },
- { STB0899_AGC2I1 , 0x04 },
- { STB0899_AGC2I2 , 0xf5 },
- { STB0899_TLIR , 0x25 },
- { STB0899_RTF , 0x80 },
- { STB0899_DSTATUS , 0x00 },
- { STB0899_LDI , 0xca },
- { STB0899_CFRM , 0xf1 },
- { STB0899_CFRL , 0xf3 },
- { STB0899_NIRM , 0x2a },
- { STB0899_NIRL , 0x05 },
- { STB0899_ISYMB , 0x17 },
- { STB0899_QSYMB , 0xfa },
- { STB0899_SFRH , 0x2f },
- { STB0899_SFRM , 0x68 },
- { STB0899_SFRL , 0x40 },
- { STB0899_SFRUPH , 0x2f },
- { STB0899_SFRUPM , 0x68 },
- { STB0899_SFRUPL , 0x40 },
- { STB0899_EQUAI1 , 0xfd },
- { STB0899_EQUAQ1 , 0x04 },
- { STB0899_EQUAI2 , 0x0f },
- { STB0899_EQUAQ2 , 0xff },
- { STB0899_EQUAI3 , 0xdf },
- { STB0899_EQUAQ3 , 0xfa },
- { STB0899_EQUAI4 , 0x37 },
- { STB0899_EQUAQ4 , 0x0d },
- { STB0899_EQUAI5 , 0xbd },
- { STB0899_EQUAQ5 , 0xf7 },
- { STB0899_DSTATUS2 , 0x00 },
- { STB0899_VSTATUS , 0x00 },
- { STB0899_VERROR , 0xff },
- { STB0899_IQSWAP , 0x2a },
- { STB0899_ECNT1M , 0x00 },
- { STB0899_ECNT1L , 0x00 },
- { STB0899_ECNT2M , 0x00 },
- { STB0899_ECNT2L , 0x00 },
- { STB0899_ECNT3M , 0x00 },
- { STB0899_ECNT3L , 0x00 },
- { STB0899_FECAUTO1 , 0x06 },
- { STB0899_FECM , 0x01 },
- { STB0899_VTH12 , 0xf0 },
- { STB0899_VTH23 , 0xa0 },
- { STB0899_VTH34 , 0x78 },
- { STB0899_VTH56 , 0x4e },
- { STB0899_VTH67 , 0x48 },
- { STB0899_VTH78 , 0x38 },
- { STB0899_PRVIT , 0xff },
- { STB0899_VITSYNC , 0x19 },
- { STB0899_RSULC , 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */
- { STB0899_TSULC , 0x42 },
- { STB0899_RSLLC , 0x40 },
- { STB0899_TSLPL , 0x12 },
- { STB0899_TSCFGH , 0x0c },
- { STB0899_TSCFGM , 0x00 },
- { STB0899_TSCFGL , 0x0c },
- { STB0899_TSOUT , 0x4d }, /* 0x0d for CAM */
- { STB0899_RSSYNCDEL , 0x00 },
- { STB0899_TSINHDELH , 0x02 },
- { STB0899_TSINHDELM , 0x00 },
- { STB0899_TSINHDELL , 0x00 },
- { STB0899_TSLLSTKM , 0x00 },
- { STB0899_TSLLSTKL , 0x00 },
- { STB0899_TSULSTKM , 0x00 },
- { STB0899_TSULSTKL , 0xab },
- { STB0899_PCKLENUL , 0x00 },
- { STB0899_PCKLENLL , 0xcc },
- { STB0899_RSPCKLEN , 0xcc },
- { STB0899_TSSTATUS , 0x80 },
- { STB0899_ERRCTRL1 , 0xb6 },
- { STB0899_ERRCTRL2 , 0x96 },
- { STB0899_ERRCTRL3 , 0x89 },
- { STB0899_DMONMSK1 , 0x27 },
- { STB0899_DMONMSK0 , 0x03 },
- { STB0899_DEMAPVIT , 0x5c },
- { STB0899_PLPARM , 0x1f },
- { STB0899_PDELCTRL , 0x48 },
- { STB0899_PDELCTRL2 , 0x00 },
- { STB0899_BBHCTRL1 , 0x00 },
- { STB0899_BBHCTRL2 , 0x00 },
- { STB0899_HYSTTHRESH , 0x77 },
- { STB0899_MATCSTM , 0x00 },
- { STB0899_MATCSTL , 0x00 },
- { STB0899_UPLCSTM , 0x00 },
- { STB0899_UPLCSTL , 0x00 },
- { STB0899_DFLCSTM , 0x00 },
- { STB0899_DFLCSTL , 0x00 },
- { STB0899_SYNCCST , 0x00 },
- { STB0899_SYNCDCSTM , 0x00 },
- { STB0899_SYNCDCSTL , 0x00 },
- { STB0899_ISI_ENTRY , 0x00 },
- { STB0899_ISI_BIT_EN , 0x00 },
- { STB0899_MATSTRM , 0x00 },
- { STB0899_MATSTRL , 0x00 },
- { STB0899_UPLSTRM , 0x00 },
- { STB0899_UPLSTRL , 0x00 },
- { STB0899_DFLSTRM , 0x00 },
- { STB0899_DFLSTRL , 0x00 },
- { STB0899_SYNCSTR , 0x00 },
- { STB0899_SYNCDSTRM , 0x00 },
- { STB0899_SYNCDSTRL , 0x00 },
- { STB0899_CFGPDELSTATUS1 , 0x10 },
- { STB0899_CFGPDELSTATUS2 , 0x00 },
- { STB0899_BBFERRORM , 0x00 },
- { STB0899_BBFERRORL , 0x00 },
- { STB0899_UPKTERRORM , 0x00 },
- { STB0899_UPKTERRORL , 0x00 },
- { 0xffff , 0xff },
+ { STB0899_DEMOD, 0x00 },
+ { STB0899_RCOMPC, 0xc9 },
+ { STB0899_AGC1CN, 0x41 },
+ { STB0899_AGC1REF, 0x08 },
+ { STB0899_RTC, 0x7a },
+ { STB0899_TMGCFG, 0x4e },
+ { STB0899_AGC2REF, 0x33 },
+ { STB0899_TLSR, 0x84 },
+ { STB0899_CFD, 0xee },
+ { STB0899_ACLC, 0x87 },
+ { STB0899_BCLC, 0x94 },
+ { STB0899_EQON, 0x41 },
+ { STB0899_LDT, 0xdd },
+ { STB0899_LDT2, 0xc9 },
+ { STB0899_EQUALREF, 0xb4 },
+ { STB0899_TMGRAMP, 0x10 },
+ { STB0899_TMGTHD, 0x30 },
+ { STB0899_IDCCOMP, 0xfb },
+ { STB0899_QDCCOMP, 0x03 },
+ { STB0899_POWERI, 0x3b },
+ { STB0899_POWERQ, 0x3d },
+ { STB0899_RCOMP, 0x81 },
+ { STB0899_AGCIQIN, 0x80 },
+ { STB0899_AGC2I1, 0x04 },
+ { STB0899_AGC2I2, 0xf5 },
+ { STB0899_TLIR, 0x25 },
+ { STB0899_RTF, 0x80 },
+ { STB0899_DSTATUS, 0x00 },
+ { STB0899_LDI, 0xca },
+ { STB0899_CFRM, 0xf1 },
+ { STB0899_CFRL, 0xf3 },
+ { STB0899_NIRM, 0x2a },
+ { STB0899_NIRL, 0x05 },
+ { STB0899_ISYMB, 0x17 },
+ { STB0899_QSYMB, 0xfa },
+ { STB0899_SFRH, 0x2f },
+ { STB0899_SFRM, 0x68 },
+ { STB0899_SFRL, 0x40 },
+ { STB0899_SFRUPH, 0x2f },
+ { STB0899_SFRUPM, 0x68 },
+ { STB0899_SFRUPL, 0x40 },
+ { STB0899_EQUAI1, 0xfd },
+ { STB0899_EQUAQ1, 0x04 },
+ { STB0899_EQUAI2, 0x0f },
+ { STB0899_EQUAQ2, 0xff },
+ { STB0899_EQUAI3, 0xdf },
+ { STB0899_EQUAQ3, 0xfa },
+ { STB0899_EQUAI4, 0x37 },
+ { STB0899_EQUAQ4, 0x0d },
+ { STB0899_EQUAI5, 0xbd },
+ { STB0899_EQUAQ5, 0xf7 },
+ { STB0899_DSTATUS2, 0x00 },
+ { STB0899_VSTATUS, 0x00 },
+ { STB0899_VERROR, 0xff },
+ { STB0899_IQSWAP, 0x2a },
+ { STB0899_ECNT1M, 0x00 },
+ { STB0899_ECNT1L, 0x00 },
+ { STB0899_ECNT2M, 0x00 },
+ { STB0899_ECNT2L, 0x00 },
+ { STB0899_ECNT3M, 0x00 },
+ { STB0899_ECNT3L, 0x00 },
+ { STB0899_FECAUTO1, 0x06 },
+ { STB0899_FECM, 0x01 },
+ { STB0899_VTH12, 0xf0 },
+ { STB0899_VTH23, 0xa0 },
+ { STB0899_VTH34, 0x78 },
+ { STB0899_VTH56, 0x4e },
+ { STB0899_VTH67, 0x48 },
+ { STB0899_VTH78, 0x38 },
+ { STB0899_PRVIT, 0xff },
+ { STB0899_VITSYNC, 0x19 },
+ { STB0899_RSULC, 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */
+ { STB0899_TSULC, 0x42 },
+ { STB0899_RSLLC, 0x40 },
+ { STB0899_TSLPL, 0x12 },
+ { STB0899_TSCFGH, 0x0c },
+ { STB0899_TSCFGM, 0x00 },
+ { STB0899_TSCFGL, 0x0c },
+ { STB0899_TSOUT, 0x4d }, /* 0x0d for CAM */
+ { STB0899_RSSYNCDEL, 0x00 },
+ { STB0899_TSINHDELH, 0x02 },
+ { STB0899_TSINHDELM, 0x00 },
+ { STB0899_TSINHDELL, 0x00 },
+ { STB0899_TSLLSTKM, 0x00 },
+ { STB0899_TSLLSTKL, 0x00 },
+ { STB0899_TSULSTKM, 0x00 },
+ { STB0899_TSULSTKL, 0xab },
+ { STB0899_PCKLENUL, 0x00 },
+ { STB0899_PCKLENLL, 0xcc },
+ { STB0899_RSPCKLEN, 0xcc },
+ { STB0899_TSSTATUS, 0x80 },
+ { STB0899_ERRCTRL1, 0xb6 },
+ { STB0899_ERRCTRL2, 0x96 },
+ { STB0899_ERRCTRL3, 0x89 },
+ { STB0899_DMONMSK1, 0x27 },
+ { STB0899_DMONMSK0, 0x03 },
+ { STB0899_DEMAPVIT, 0x5c },
+ { STB0899_PLPARM, 0x1f },
+ { STB0899_PDELCTRL, 0x48 },
+ { STB0899_PDELCTRL2, 0x00 },
+ { STB0899_BBHCTRL1, 0x00 },
+ { STB0899_BBHCTRL2, 0x00 },
+ { STB0899_HYSTTHRESH, 0x77 },
+ { STB0899_MATCSTM, 0x00 },
+ { STB0899_MATCSTL, 0x00 },
+ { STB0899_UPLCSTM, 0x00 },
+ { STB0899_UPLCSTL, 0x00 },
+ { STB0899_DFLCSTM, 0x00 },
+ { STB0899_DFLCSTL, 0x00 },
+ { STB0899_SYNCCST, 0x00 },
+ { STB0899_SYNCDCSTM, 0x00 },
+ { STB0899_SYNCDCSTL, 0x00 },
+ { STB0899_ISI_ENTRY, 0x00 },
+ { STB0899_ISI_BIT_EN, 0x00 },
+ { STB0899_MATSTRM, 0x00 },
+ { STB0899_MATSTRL, 0x00 },
+ { STB0899_UPLSTRM, 0x00 },
+ { STB0899_UPLSTRL, 0x00 },
+ { STB0899_DFLSTRM, 0x00 },
+ { STB0899_DFLSTRL, 0x00 },
+ { STB0899_SYNCSTR, 0x00 },
+ { STB0899_SYNCDSTRM, 0x00 },
+ { STB0899_SYNCDSTRL, 0x00 },
+ { STB0899_CFGPDELSTATUS1, 0x10 },
+ { STB0899_CFGPDELSTATUS2, 0x00 },
+ { STB0899_BBFERRORM, 0x00 },
+ { STB0899_BBFERRORL, 0x00 },
+ { STB0899_UPKTERRORM, 0x00 },
+ { STB0899_UPKTERRORL, 0x00 },
+ { 0xffff, 0xff },
};
/* STB0899 demodulator config for the KNC1 and clones */
@@ -1153,8 +1156,8 @@ static u8 read_pwm(struct budget_av *budget_av)
{
u8 b = 0xff;
u8 pwm;
- struct i2c_msg msg[] = { {.addr = 0x50,.flags = 0,.buf = &b,.len = 1},
- {.addr = 0x50,.flags = I2C_M_RD,.buf = &pwm,.len = 1}
+ struct i2c_msg msg[] = { {.addr = 0x50, .flags = 0, .buf = &b, .len = 1},
+ {.addr = 0x50, .flags = I2C_M_RD, .buf = &pwm, .len = 1}
};
if ((i2c_transfer(&budget_av->budget.i2c_adap, msg, 2) != 2)
@@ -1196,8 +1199,8 @@ static u8 read_pwm(struct budget_av *budget_av)
static void frontend_init(struct budget_av *budget_av)
{
- struct saa7146_dev * saa = budget_av->budget.dev;
- struct dvb_frontend * fe = NULL;
+ struct saa7146_dev *saa = budget_av->budget.dev;
+ struct dvb_frontend *fe = NULL;
/* Enable / PowerON Frontend */
saa7146_setgpio(saa, 0, SAA7146_GPIO_OUTLO);
@@ -1207,16 +1210,16 @@ static void frontend_init(struct budget_av *budget_av)
/* additional setup necessary for the PLUS cards */
switch (saa->pci->subsystem_device) {
- case SUBID_DVBS_KNC1_PLUS:
- case SUBID_DVBC_KNC1_PLUS:
- case SUBID_DVBT_KNC1_PLUS:
- case SUBID_DVBC_EASYWATCH:
- case SUBID_DVBC_KNC1_PLUS_MK3:
- case SUBID_DVBS2_KNC1:
- case SUBID_DVBS2_KNC1_OEM:
- case SUBID_DVBS2_EASYWATCH:
- saa7146_setgpio(saa, 3, SAA7146_GPIO_OUTHI);
- break;
+ case SUBID_DVBS_KNC1_PLUS:
+ case SUBID_DVBC_KNC1_PLUS:
+ case SUBID_DVBT_KNC1_PLUS:
+ case SUBID_DVBC_EASYWATCH:
+ case SUBID_DVBC_KNC1_PLUS_MK3:
+ case SUBID_DVBS2_KNC1:
+ case SUBID_DVBS2_KNC1_OEM:
+ case SUBID_DVBS2_EASYWATCH:
+ saa7146_setgpio(saa, 3, SAA7146_GPIO_OUTHI);
+ break;
}
switch (saa->pci->subsystem_device) {
@@ -1233,15 +1236,13 @@ static void frontend_init(struct budget_av *budget_av)
if (saa->pci->subsystem_vendor == 0x1894) {
fe = dvb_attach(stv0299_attach, &cinergy_1200s_1894_0010_config,
&budget_av->budget.i2c_adap);
- if (fe) {
+ if (fe)
dvb_attach(tua6100_attach, fe, 0x60, &budget_av->budget.i2c_adap);
- }
} else {
fe = dvb_attach(stv0299_attach, &typhoon_config,
&budget_av->budget.i2c_adap);
- if (fe) {
+ if (fe)
fe->ops.tuner_ops.set_params = philips_su1278_ty_ci_tuner_set_params;
- }
}
break;
@@ -1253,34 +1254,32 @@ static void frontend_init(struct budget_av *budget_av)
case SUBID_DVBS_EASYWATCH_2:
fe = dvb_attach(stv0299_attach, &philips_sd1878_config,
&budget_av->budget.i2c_adap);
- if (fe) {
+ if (fe)
dvb_attach(dvb_pll_attach, fe, 0x60,
&budget_av->budget.i2c_adap,
DVB_PLL_PHILIPS_SD1878_TDA8261);
- }
break;
case SUBID_DVBS_TYPHOON:
fe = dvb_attach(stv0299_attach, &typhoon_config,
&budget_av->budget.i2c_adap);
- if (fe) {
+ if (fe)
fe->ops.tuner_ops.set_params = philips_su1278_ty_ci_tuner_set_params;
- }
break;
case SUBID_DVBS2_KNC1:
case SUBID_DVBS2_KNC1_OEM:
case SUBID_DVBS2_EASYWATCH:
budget_av->reinitialise_demod = 1;
- if ((fe = dvb_attach(stb0899_attach, &knc1_dvbs2_config, &budget_av->budget.i2c_adap)))
+ fe = dvb_attach(stb0899_attach, &knc1_dvbs2_config, &budget_av->budget.i2c_adap);
+ if (fe)
dvb_attach(tda8261_attach, fe, &sd1878c_config, &budget_av->budget.i2c_adap);
break;
case SUBID_DVBS_CINERGY1200:
fe = dvb_attach(stv0299_attach, &cinergy_1200s_config,
&budget_av->budget.i2c_adap);
- if (fe) {
+ if (fe)
fe->ops.tuner_ops.set_params = philips_su1278_ty_ci_tuner_set_params;
- }
break;
case SUBID_DVBC_KNC1:
@@ -1296,9 +1295,8 @@ static void frontend_init(struct budget_av *budget_av)
fe = dvb_attach(tda10021_attach, &philips_cu1216_config_altaddress,
&budget_av->budget.i2c_adap,
read_pwm(budget_av));
- if (fe) {
+ if (fe)
fe->ops.tuner_ops.set_params = philips_cu1216_tuner_set_params;
- }
break;
case SUBID_DVBC_EASYWATCH_MK3:
@@ -1312,9 +1310,8 @@ static void frontend_init(struct budget_av *budget_av)
&philips_cu1216_tda10023_config,
&budget_av->budget.i2c_adap,
read_pwm(budget_av));
- if (fe) {
+ if (fe)
fe->ops.tuner_ops.set_params = philips_cu1216_tuner_set_params;
- }
break;
case SUBID_DVBT_EASYWATCH:
@@ -1351,7 +1348,7 @@ static void frontend_init(struct budget_av *budget_av)
}
-static void budget_av_irq(struct saa7146_dev *dev, u32 * isr)
+static void budget_av_irq(struct saa7146_dev *dev, u32 *isr)
{
struct budget_av *budget_av = dev->ext_priv;
@@ -1368,7 +1365,7 @@ static int budget_av_detach(struct saa7146_dev *dev)
dprintk(2, "dev: %p\n", dev);
- if (1 == budget_av->has_saa7113) {
+ if (budget_av->has_saa7113 == 1) {
saa7146_setgpio(dev, 0, SAA7146_GPIO_OUTLO);
msleep(200);
@@ -1439,7 +1436,8 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
dprintk(2, "dev: %p\n", dev);
- if (!(budget_av = kzalloc(sizeof(struct budget_av), GFP_KERNEL)))
+ budget_av = kzalloc(sizeof(struct budget_av), GFP_KERNEL);
+ if (!budget_av)
return -ENOMEM;
budget_av->has_saa7113 = 0;
@@ -1465,18 +1463,19 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
if (err != 0) {
ttpci_budget_deinit(&budget_av->budget);
kfree(budget_av);
- ERR("cannot init vv subsystem\n");
+ pr_err("cannot init vv subsystem\n");
return err;
}
vv_data.vid_ops.vidioc_enum_input = vidioc_enum_input;
vv_data.vid_ops.vidioc_g_input = vidioc_g_input;
vv_data.vid_ops.vidioc_s_input = vidioc_s_input;
- if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_VIDEO))) {
+ err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_VIDEO);
+ if (err) {
saa7146_vv_release(dev);
ttpci_budget_deinit(&budget_av->budget);
kfree(budget_av);
- ERR("cannot register capture v4l2 device\n");
+ pr_err("cannot register capture v4l2 device\n");
return err;
}
@@ -1510,15 +1509,15 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
}
static struct saa7146_standard standard[] = {
- {.name = "PAL",.id = V4L2_STD_PAL,
- .v_offset = 0x17,.v_field = 288,
- .h_offset = 0x14,.h_pixels = 680,
- .v_max_out = 576,.h_max_out = 768 },
-
- {.name = "NTSC",.id = V4L2_STD_NTSC,
- .v_offset = 0x16,.v_field = 240,
- .h_offset = 0x06,.h_pixels = 708,
- .v_max_out = 480,.h_max_out = 640, },
+ {.name = "PAL", .id = V4L2_STD_PAL,
+ .v_offset = 0x17, .v_field = 288,
+ .h_offset = 0x14, .h_pixels = 680,
+ .v_max_out = 576, .h_max_out = 768 },
+
+ {.name = "NTSC", .id = V4L2_STD_NTSC,
+ .v_offset = 0x16, .v_field = 240,
+ .h_offset = 0x06, .h_pixels = 708,
+ .v_max_out = 480, .h_max_out = 640, },
};
static struct saa7146_ext_vv vv_data = {
@@ -1532,8 +1531,8 @@ static struct saa7146_ext_vv vv_data = {
static struct saa7146_extension budget_extension;
MAKE_BUDGET_INFO(knc1s, "KNC1 DVB-S", BUDGET_KNC1S);
-MAKE_BUDGET_INFO(knc1s2,"KNC1 DVB-S2", BUDGET_KNC1S2);
-MAKE_BUDGET_INFO(sates2,"Satelco EasyWatch DVB-S2", BUDGET_KNC1S2);
+MAKE_BUDGET_INFO(knc1s2, "KNC1 DVB-S2", BUDGET_KNC1S2);
+MAKE_BUDGET_INFO(sates2, "Satelco EasyWatch DVB-S2", BUDGET_KNC1S2);
MAKE_BUDGET_INFO(knc1c, "KNC1 DVB-C", BUDGET_KNC1C);
MAKE_BUDGET_INFO(knc1t, "KNC1 DVB-T", BUDGET_KNC1T);
MAKE_BUDGET_INFO(kncxs, "KNC TV STAR DVB-S", BUDGET_TVSTAR);
diff --git a/drivers/media/pci/ttpci/budget-ci.c b/drivers/media/pci/ttpci/budget-ci.c
index 66e1a004ee43..76de40e3c802 100644
--- a/drivers/media/pci/ttpci/budget-ci.c
+++ b/drivers/media/pci/ttpci/budget-ci.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * budget-ci.c: driver for the SAA7146 based Budget DVB cards
+ * budget-ci.ko: driver for the SAA7146 based Budget DVB cards
+ * with CI (but without analog video input)
*
* Compiled from various sources by Michael Hunold <michael@mihu.de>
*
@@ -123,7 +124,7 @@ static void msp430_ir_interrupt(struct tasklet_struct *t)
*/
if (ir_debug)
- printk("budget_ci: received byte 0x%02x\n", command);
+ pr_info("received byte 0x%02x\n", command);
/* Remove repeat bit, we use every command */
command = command & 0x7f;
@@ -164,7 +165,7 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
dev = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!dev) {
- printk(KERN_ERR "budget_ci: IR interface initialisation failed\n");
+ pr_err("IR interface initialisation failed\n");
return -ENOMEM;
}
@@ -223,7 +224,7 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
error = rc_register_device(dev);
if (error) {
- printk(KERN_ERR "budget_ci: could not init driver for IR device (code %d)\n", error);
+ pr_err("could not init driver for IR device (code %d)\n", error);
rc_free_device(dev);
return error;
}
@@ -411,24 +412,21 @@ static int ciintf_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open
flags = ttpci_budget_debiread(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1, 1, 0);
if (flags & CICONTROL_CAMDETECT) {
// mark it as present if it wasn't before
- if (budget_ci->slot_status & SLOTSTATUS_NONE) {
+ if (budget_ci->slot_status & SLOTSTATUS_NONE)
budget_ci->slot_status = SLOTSTATUS_PRESENT;
- }
// during a RESET, we check if we can read from IO memory to see when CAM is ready
if (budget_ci->slot_status & SLOTSTATUS_RESET) {
- if (ciintf_read_attribute_mem(ca, slot, 0) == 0x1d) {
+ if (ciintf_read_attribute_mem(ca, slot, 0) == 0x1d)
budget_ci->slot_status = SLOTSTATUS_READY;
- }
}
} else {
budget_ci->slot_status = SLOTSTATUS_NONE;
}
if (budget_ci->slot_status != SLOTSTATUS_NONE) {
- if (budget_ci->slot_status & SLOTSTATUS_READY) {
+ if (budget_ci->slot_status & SLOTSTATUS_READY)
return DVB_CA_EN50221_POLL_CAM_PRESENT | DVB_CA_EN50221_POLL_CAM_READY;
- }
return DVB_CA_EN50221_POLL_CAM_PRESENT;
}
@@ -483,21 +481,21 @@ static int ciintf_init(struct budget_ci *budget_ci)
budget_ci->ca.slot_ts_enable = ciintf_slot_ts_enable;
budget_ci->ca.poll_slot_status = ciintf_poll_slot_status;
budget_ci->ca.data = budget_ci;
- if ((result = dvb_ca_en50221_init(&budget_ci->budget.dvb_adapter,
- &budget_ci->ca,
- ca_flags, 1)) != 0) {
- printk("budget_ci: CI interface detected, but initialisation failed.\n");
+
+ result = dvb_ca_en50221_init(&budget_ci->budget.dvb_adapter,
+ &budget_ci->ca, ca_flags, 1);
+ if (result != 0) {
+ pr_err("CI interface detected, but initialisation failed.\n");
goto error;
}
// Setup CI slot IRQ
if (budget_ci->ci_irq) {
tasklet_setup(&budget_ci->ciintf_irq_tasklet, ciintf_interrupt);
- if (budget_ci->slot_status != SLOTSTATUS_NONE) {
+ if (budget_ci->slot_status != SLOTSTATUS_NONE)
saa7146_setgpio(saa, 0, SAA7146_GPIO_IRQLO);
- } else {
+ else
saa7146_setgpio(saa, 0, SAA7146_GPIO_IRQHI);
- }
SAA7146_IER_ENABLE(saa, MASK_03);
}
@@ -506,7 +504,7 @@ static int ciintf_init(struct budget_ci *budget_ci)
CICONTROL_RESET, 1, 0);
// success!
- printk("budget_ci: CI interface initialised\n");
+ pr_info("CI interface initialised\n");
budget_ci->budget.ci_present = 1;
// forge a fake CI IRQ so the CAM state is setup correctly
@@ -551,7 +549,7 @@ static void ciintf_deinit(struct budget_ci *budget_ci)
saa7146_write(saa, MC1, MASK_27);
}
-static void budget_ci_irq(struct saa7146_dev *dev, u32 * isr)
+static void budget_ci_irq(struct saa7146_dev *dev, u32 *isr)
{
struct budget_ci *budget_ci = dev->ext_priv;
@@ -651,7 +649,7 @@ static int philips_su1278_tt_tuner_set_params(struct dvb_frontend *fe)
struct budget_ci *budget_ci = fe->dvb->priv;
u32 div;
u8 buf[4];
- struct i2c_msg msg = {.addr = 0x60,.flags = 0,.buf = buf,.len = sizeof(buf) };
+ struct i2c_msg msg = {.addr = 0x60, .flags = 0, .buf = buf, .len = sizeof(buf) };
if ((p->frequency < 950000) || (p->frequency > 2150000))
return -EINVAL;
@@ -701,7 +699,7 @@ static int philips_tdm1316l_tuner_init(struct dvb_frontend *fe)
struct budget_ci *budget_ci = fe->dvb->priv;
static u8 td1316_init[] = { 0x0b, 0xf5, 0x85, 0xab };
static u8 disable_mc44BC374c[] = { 0x1d, 0x74, 0xa0, 0x68 };
- struct i2c_msg tuner_msg = {.addr = budget_ci->tuner_pll_address,.flags = 0,.buf = td1316_init,.len =
+ struct i2c_msg tuner_msg = {.addr = budget_ci->tuner_pll_address, .flags = 0, .buf = td1316_init, .len =
sizeof(td1316_init) };
// setup PLL configuration
@@ -731,7 +729,7 @@ static int philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe)
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct budget_ci *budget_ci = fe->dvb->priv;
u8 tuner_buf[4];
- struct i2c_msg tuner_msg = {.addr = budget_ci->tuner_pll_address,.flags = 0,.buf = tuner_buf,.len = sizeof(tuner_buf) };
+ struct i2c_msg tuner_msg = {.addr = budget_ci->tuner_pll_address, .flags = 0, .buf = tuner_buf, .len = sizeof(tuner_buf) };
int tuner_frequency = 0;
u8 band, cp, filter;
@@ -856,9 +854,9 @@ static int dvbc_philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe)
// determine charge pump
tuner_frequency = p->frequency + 36125000;
- if (tuner_frequency < 87000000)
+ if (tuner_frequency < 87000000) {
return -EINVAL;
- else if (tuner_frequency < 130000000) {
+ } else if (tuner_frequency < 130000000) {
cp = 3;
band = 1;
} else if (tuner_frequency < 160000000) {
@@ -885,8 +883,9 @@ static int dvbc_philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe)
} else if (tuner_frequency < 895000000) {
cp = 7;
band = 4;
- } else
+ } else {
return -EINVAL;
+ }
// assume PLL filter should always be 8MHz for the moment.
filter = 1;
@@ -1035,222 +1034,222 @@ static struct tda827x_config tda827x_config = {
/* TT S2-3200 DVB-S (STB0899) Inittab */
static const struct stb0899_s1_reg tt3200_stb0899_s1_init_1[] = {
- { STB0899_DEV_ID , 0x81 },
- { STB0899_DISCNTRL1 , 0x32 },
- { STB0899_DISCNTRL2 , 0x80 },
- { STB0899_DISRX_ST0 , 0x04 },
- { STB0899_DISRX_ST1 , 0x00 },
- { STB0899_DISPARITY , 0x00 },
- { STB0899_DISSTATUS , 0x20 },
- { STB0899_DISF22 , 0x8c },
- { STB0899_DISF22RX , 0x9a },
- { STB0899_SYSREG , 0x0b },
- { STB0899_ACRPRESC , 0x11 },
- { STB0899_ACRDIV1 , 0x0a },
- { STB0899_ACRDIV2 , 0x05 },
- { STB0899_DACR1 , 0x00 },
- { STB0899_DACR2 , 0x00 },
- { STB0899_OUTCFG , 0x00 },
- { STB0899_MODECFG , 0x00 },
- { STB0899_IRQSTATUS_3 , 0x30 },
- { STB0899_IRQSTATUS_2 , 0x00 },
- { STB0899_IRQSTATUS_1 , 0x00 },
- { STB0899_IRQSTATUS_0 , 0x00 },
- { STB0899_IRQMSK_3 , 0xf3 },
- { STB0899_IRQMSK_2 , 0xfc },
- { STB0899_IRQMSK_1 , 0xff },
- { STB0899_IRQMSK_0 , 0xff },
- { STB0899_IRQCFG , 0x00 },
- { STB0899_I2CCFG , 0x88 },
- { STB0899_I2CRPT , 0x48 }, /* 12k Pullup, Repeater=16, Stop=disabled */
- { STB0899_IOPVALUE5 , 0x00 },
- { STB0899_IOPVALUE4 , 0x20 },
- { STB0899_IOPVALUE3 , 0xc9 },
- { STB0899_IOPVALUE2 , 0x90 },
- { STB0899_IOPVALUE1 , 0x40 },
- { STB0899_IOPVALUE0 , 0x00 },
- { STB0899_GPIO00CFG , 0x82 },
- { STB0899_GPIO01CFG , 0x82 },
- { STB0899_GPIO02CFG , 0x82 },
- { STB0899_GPIO03CFG , 0x82 },
- { STB0899_GPIO04CFG , 0x82 },
- { STB0899_GPIO05CFG , 0x82 },
- { STB0899_GPIO06CFG , 0x82 },
- { STB0899_GPIO07CFG , 0x82 },
- { STB0899_GPIO08CFG , 0x82 },
- { STB0899_GPIO09CFG , 0x82 },
- { STB0899_GPIO10CFG , 0x82 },
- { STB0899_GPIO11CFG , 0x82 },
- { STB0899_GPIO12CFG , 0x82 },
- { STB0899_GPIO13CFG , 0x82 },
- { STB0899_GPIO14CFG , 0x82 },
- { STB0899_GPIO15CFG , 0x82 },
- { STB0899_GPIO16CFG , 0x82 },
- { STB0899_GPIO17CFG , 0x82 },
- { STB0899_GPIO18CFG , 0x82 },
- { STB0899_GPIO19CFG , 0x82 },
- { STB0899_GPIO20CFG , 0x82 },
- { STB0899_SDATCFG , 0xb8 },
- { STB0899_SCLTCFG , 0xba },
- { STB0899_AGCRFCFG , 0x1c }, /* 0x11 */
- { STB0899_GPIO22 , 0x82 }, /* AGCBB2CFG */
- { STB0899_GPIO21 , 0x91 }, /* AGCBB1CFG */
- { STB0899_DIRCLKCFG , 0x82 },
- { STB0899_CLKOUT27CFG , 0x7e },
- { STB0899_STDBYCFG , 0x82 },
- { STB0899_CS0CFG , 0x82 },
- { STB0899_CS1CFG , 0x82 },
- { STB0899_DISEQCOCFG , 0x20 },
- { STB0899_GPIO32CFG , 0x82 },
- { STB0899_GPIO33CFG , 0x82 },
- { STB0899_GPIO34CFG , 0x82 },
- { STB0899_GPIO35CFG , 0x82 },
- { STB0899_GPIO36CFG , 0x82 },
- { STB0899_GPIO37CFG , 0x82 },
- { STB0899_GPIO38CFG , 0x82 },
- { STB0899_GPIO39CFG , 0x82 },
- { STB0899_NCOARSE , 0x15 }, /* 0x15 = 27 Mhz Clock, F/3 = 198MHz, F/6 = 99MHz */
- { STB0899_SYNTCTRL , 0x02 }, /* 0x00 = CLK from CLKI, 0x02 = CLK from XTALI */
- { STB0899_FILTCTRL , 0x00 },
- { STB0899_SYSCTRL , 0x00 },
- { STB0899_STOPCLK1 , 0x20 },
- { STB0899_STOPCLK2 , 0x00 },
- { STB0899_INTBUFSTATUS , 0x00 },
- { STB0899_INTBUFCTRL , 0x0a },
- { 0xffff , 0xff },
+ { STB0899_DEV_ID, 0x81 },
+ { STB0899_DISCNTRL1, 0x32 },
+ { STB0899_DISCNTRL2, 0x80 },
+ { STB0899_DISRX_ST0, 0x04 },
+ { STB0899_DISRX_ST1, 0x00 },
+ { STB0899_DISPARITY, 0x00 },
+ { STB0899_DISSTATUS, 0x20 },
+ { STB0899_DISF22, 0x8c },
+ { STB0899_DISF22RX, 0x9a },
+ { STB0899_SYSREG, 0x0b },
+ { STB0899_ACRPRESC, 0x11 },
+ { STB0899_ACRDIV1, 0x0a },
+ { STB0899_ACRDIV2, 0x05 },
+ { STB0899_DACR1, 0x00 },
+ { STB0899_DACR2, 0x00 },
+ { STB0899_OUTCFG, 0x00 },
+ { STB0899_MODECFG, 0x00 },
+ { STB0899_IRQSTATUS_3, 0x30 },
+ { STB0899_IRQSTATUS_2, 0x00 },
+ { STB0899_IRQSTATUS_1, 0x00 },
+ { STB0899_IRQSTATUS_0, 0x00 },
+ { STB0899_IRQMSK_3, 0xf3 },
+ { STB0899_IRQMSK_2, 0xfc },
+ { STB0899_IRQMSK_1, 0xff },
+ { STB0899_IRQMSK_0, 0xff },
+ { STB0899_IRQCFG, 0x00 },
+ { STB0899_I2CCFG, 0x88 },
+ { STB0899_I2CRPT, 0x48 }, /* 12k Pullup, Repeater=16, Stop=disabled */
+ { STB0899_IOPVALUE5, 0x00 },
+ { STB0899_IOPVALUE4, 0x20 },
+ { STB0899_IOPVALUE3, 0xc9 },
+ { STB0899_IOPVALUE2, 0x90 },
+ { STB0899_IOPVALUE1, 0x40 },
+ { STB0899_IOPVALUE0, 0x00 },
+ { STB0899_GPIO00CFG, 0x82 },
+ { STB0899_GPIO01CFG, 0x82 },
+ { STB0899_GPIO02CFG, 0x82 },
+ { STB0899_GPIO03CFG, 0x82 },
+ { STB0899_GPIO04CFG, 0x82 },
+ { STB0899_GPIO05CFG, 0x82 },
+ { STB0899_GPIO06CFG, 0x82 },
+ { STB0899_GPIO07CFG, 0x82 },
+ { STB0899_GPIO08CFG, 0x82 },
+ { STB0899_GPIO09CFG, 0x82 },
+ { STB0899_GPIO10CFG, 0x82 },
+ { STB0899_GPIO11CFG, 0x82 },
+ { STB0899_GPIO12CFG, 0x82 },
+ { STB0899_GPIO13CFG, 0x82 },
+ { STB0899_GPIO14CFG, 0x82 },
+ { STB0899_GPIO15CFG, 0x82 },
+ { STB0899_GPIO16CFG, 0x82 },
+ { STB0899_GPIO17CFG, 0x82 },
+ { STB0899_GPIO18CFG, 0x82 },
+ { STB0899_GPIO19CFG, 0x82 },
+ { STB0899_GPIO20CFG, 0x82 },
+ { STB0899_SDATCFG, 0xb8 },
+ { STB0899_SCLTCFG, 0xba },
+ { STB0899_AGCRFCFG, 0x1c }, /* 0x11 */
+ { STB0899_GPIO22, 0x82 }, /* AGCBB2CFG */
+ { STB0899_GPIO21, 0x91 }, /* AGCBB1CFG */
+ { STB0899_DIRCLKCFG, 0x82 },
+ { STB0899_CLKOUT27CFG, 0x7e },
+ { STB0899_STDBYCFG, 0x82 },
+ { STB0899_CS0CFG, 0x82 },
+ { STB0899_CS1CFG, 0x82 },
+ { STB0899_DISEQCOCFG, 0x20 },
+ { STB0899_GPIO32CFG, 0x82 },
+ { STB0899_GPIO33CFG, 0x82 },
+ { STB0899_GPIO34CFG, 0x82 },
+ { STB0899_GPIO35CFG, 0x82 },
+ { STB0899_GPIO36CFG, 0x82 },
+ { STB0899_GPIO37CFG, 0x82 },
+ { STB0899_GPIO38CFG, 0x82 },
+ { STB0899_GPIO39CFG, 0x82 },
+ { STB0899_NCOARSE, 0x15 }, /* 0x15 = 27 Mhz Clock, F/3 = 198MHz, F/6 = 99MHz */
+ { STB0899_SYNTCTRL, 0x02 }, /* 0x00 = CLK from CLKI, 0x02 = CLK from XTALI */
+ { STB0899_FILTCTRL, 0x00 },
+ { STB0899_SYSCTRL, 0x00 },
+ { STB0899_STOPCLK1, 0x20 },
+ { STB0899_STOPCLK2, 0x00 },
+ { STB0899_INTBUFSTATUS, 0x00 },
+ { STB0899_INTBUFCTRL, 0x0a },
+ { 0xffff, 0xff },
};
static const struct stb0899_s1_reg tt3200_stb0899_s1_init_3[] = {
- { STB0899_DEMOD , 0x00 },
- { STB0899_RCOMPC , 0xc9 },
- { STB0899_AGC1CN , 0x41 },
- { STB0899_AGC1REF , 0x10 },
- { STB0899_RTC , 0x7a },
- { STB0899_TMGCFG , 0x4e },
- { STB0899_AGC2REF , 0x34 },
- { STB0899_TLSR , 0x84 },
- { STB0899_CFD , 0xc7 },
- { STB0899_ACLC , 0x87 },
- { STB0899_BCLC , 0x94 },
- { STB0899_EQON , 0x41 },
- { STB0899_LDT , 0xdd },
- { STB0899_LDT2 , 0xc9 },
- { STB0899_EQUALREF , 0xb4 },
- { STB0899_TMGRAMP , 0x10 },
- { STB0899_TMGTHD , 0x30 },
- { STB0899_IDCCOMP , 0xfb },
- { STB0899_QDCCOMP , 0x03 },
- { STB0899_POWERI , 0x3b },
- { STB0899_POWERQ , 0x3d },
- { STB0899_RCOMP , 0x81 },
- { STB0899_AGCIQIN , 0x80 },
- { STB0899_AGC2I1 , 0x04 },
- { STB0899_AGC2I2 , 0xf5 },
- { STB0899_TLIR , 0x25 },
- { STB0899_RTF , 0x80 },
- { STB0899_DSTATUS , 0x00 },
- { STB0899_LDI , 0xca },
- { STB0899_CFRM , 0xf1 },
- { STB0899_CFRL , 0xf3 },
- { STB0899_NIRM , 0x2a },
- { STB0899_NIRL , 0x05 },
- { STB0899_ISYMB , 0x17 },
- { STB0899_QSYMB , 0xfa },
- { STB0899_SFRH , 0x2f },
- { STB0899_SFRM , 0x68 },
- { STB0899_SFRL , 0x40 },
- { STB0899_SFRUPH , 0x2f },
- { STB0899_SFRUPM , 0x68 },
- { STB0899_SFRUPL , 0x40 },
- { STB0899_EQUAI1 , 0xfd },
- { STB0899_EQUAQ1 , 0x04 },
- { STB0899_EQUAI2 , 0x0f },
- { STB0899_EQUAQ2 , 0xff },
- { STB0899_EQUAI3 , 0xdf },
- { STB0899_EQUAQ3 , 0xfa },
- { STB0899_EQUAI4 , 0x37 },
- { STB0899_EQUAQ4 , 0x0d },
- { STB0899_EQUAI5 , 0xbd },
- { STB0899_EQUAQ5 , 0xf7 },
- { STB0899_DSTATUS2 , 0x00 },
- { STB0899_VSTATUS , 0x00 },
- { STB0899_VERROR , 0xff },
- { STB0899_IQSWAP , 0x2a },
- { STB0899_ECNT1M , 0x00 },
- { STB0899_ECNT1L , 0x00 },
- { STB0899_ECNT2M , 0x00 },
- { STB0899_ECNT2L , 0x00 },
- { STB0899_ECNT3M , 0x00 },
- { STB0899_ECNT3L , 0x00 },
- { STB0899_FECAUTO1 , 0x06 },
- { STB0899_FECM , 0x01 },
- { STB0899_VTH12 , 0xf0 },
- { STB0899_VTH23 , 0xa0 },
- { STB0899_VTH34 , 0x78 },
- { STB0899_VTH56 , 0x4e },
- { STB0899_VTH67 , 0x48 },
- { STB0899_VTH78 , 0x38 },
- { STB0899_PRVIT , 0xff },
- { STB0899_VITSYNC , 0x19 },
- { STB0899_RSULC , 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */
- { STB0899_TSULC , 0x42 },
- { STB0899_RSLLC , 0x40 },
- { STB0899_TSLPL , 0x12 },
- { STB0899_TSCFGH , 0x0c },
- { STB0899_TSCFGM , 0x00 },
- { STB0899_TSCFGL , 0x0c },
- { STB0899_TSOUT , 0x4d }, /* 0x0d for CAM */
- { STB0899_RSSYNCDEL , 0x00 },
- { STB0899_TSINHDELH , 0x02 },
- { STB0899_TSINHDELM , 0x00 },
- { STB0899_TSINHDELL , 0x00 },
- { STB0899_TSLLSTKM , 0x00 },
- { STB0899_TSLLSTKL , 0x00 },
- { STB0899_TSULSTKM , 0x00 },
- { STB0899_TSULSTKL , 0xab },
- { STB0899_PCKLENUL , 0x00 },
- { STB0899_PCKLENLL , 0xcc },
- { STB0899_RSPCKLEN , 0xcc },
- { STB0899_TSSTATUS , 0x80 },
- { STB0899_ERRCTRL1 , 0xb6 },
- { STB0899_ERRCTRL2 , 0x96 },
- { STB0899_ERRCTRL3 , 0x89 },
- { STB0899_DMONMSK1 , 0x27 },
- { STB0899_DMONMSK0 , 0x03 },
- { STB0899_DEMAPVIT , 0x5c },
- { STB0899_PLPARM , 0x1f },
- { STB0899_PDELCTRL , 0x48 },
- { STB0899_PDELCTRL2 , 0x00 },
- { STB0899_BBHCTRL1 , 0x00 },
- { STB0899_BBHCTRL2 , 0x00 },
- { STB0899_HYSTTHRESH , 0x77 },
- { STB0899_MATCSTM , 0x00 },
- { STB0899_MATCSTL , 0x00 },
- { STB0899_UPLCSTM , 0x00 },
- { STB0899_UPLCSTL , 0x00 },
- { STB0899_DFLCSTM , 0x00 },
- { STB0899_DFLCSTL , 0x00 },
- { STB0899_SYNCCST , 0x00 },
- { STB0899_SYNCDCSTM , 0x00 },
- { STB0899_SYNCDCSTL , 0x00 },
- { STB0899_ISI_ENTRY , 0x00 },
- { STB0899_ISI_BIT_EN , 0x00 },
- { STB0899_MATSTRM , 0x00 },
- { STB0899_MATSTRL , 0x00 },
- { STB0899_UPLSTRM , 0x00 },
- { STB0899_UPLSTRL , 0x00 },
- { STB0899_DFLSTRM , 0x00 },
- { STB0899_DFLSTRL , 0x00 },
- { STB0899_SYNCSTR , 0x00 },
- { STB0899_SYNCDSTRM , 0x00 },
- { STB0899_SYNCDSTRL , 0x00 },
- { STB0899_CFGPDELSTATUS1 , 0x10 },
- { STB0899_CFGPDELSTATUS2 , 0x00 },
- { STB0899_BBFERRORM , 0x00 },
- { STB0899_BBFERRORL , 0x00 },
- { STB0899_UPKTERRORM , 0x00 },
- { STB0899_UPKTERRORL , 0x00 },
- { 0xffff , 0xff },
+ { STB0899_DEMOD, 0x00 },
+ { STB0899_RCOMPC, 0xc9 },
+ { STB0899_AGC1CN, 0x41 },
+ { STB0899_AGC1REF, 0x10 },
+ { STB0899_RTC, 0x7a },
+ { STB0899_TMGCFG, 0x4e },
+ { STB0899_AGC2REF, 0x34 },
+ { STB0899_TLSR, 0x84 },
+ { STB0899_CFD, 0xc7 },
+ { STB0899_ACLC, 0x87 },
+ { STB0899_BCLC, 0x94 },
+ { STB0899_EQON, 0x41 },
+ { STB0899_LDT, 0xdd },
+ { STB0899_LDT2, 0xc9 },
+ { STB0899_EQUALREF, 0xb4 },
+ { STB0899_TMGRAMP, 0x10 },
+ { STB0899_TMGTHD, 0x30 },
+ { STB0899_IDCCOMP, 0xfb },
+ { STB0899_QDCCOMP, 0x03 },
+ { STB0899_POWERI, 0x3b },
+ { STB0899_POWERQ, 0x3d },
+ { STB0899_RCOMP, 0x81 },
+ { STB0899_AGCIQIN, 0x80 },
+ { STB0899_AGC2I1, 0x04 },
+ { STB0899_AGC2I2, 0xf5 },
+ { STB0899_TLIR, 0x25 },
+ { STB0899_RTF, 0x80 },
+ { STB0899_DSTATUS, 0x00 },
+ { STB0899_LDI, 0xca },
+ { STB0899_CFRM, 0xf1 },
+ { STB0899_CFRL, 0xf3 },
+ { STB0899_NIRM, 0x2a },
+ { STB0899_NIRL, 0x05 },
+ { STB0899_ISYMB, 0x17 },
+ { STB0899_QSYMB, 0xfa },
+ { STB0899_SFRH, 0x2f },
+ { STB0899_SFRM, 0x68 },
+ { STB0899_SFRL, 0x40 },
+ { STB0899_SFRUPH, 0x2f },
+ { STB0899_SFRUPM, 0x68 },
+ { STB0899_SFRUPL, 0x40 },
+ { STB0899_EQUAI1, 0xfd },
+ { STB0899_EQUAQ1, 0x04 },
+ { STB0899_EQUAI2, 0x0f },
+ { STB0899_EQUAQ2, 0xff },
+ { STB0899_EQUAI3, 0xdf },
+ { STB0899_EQUAQ3, 0xfa },
+ { STB0899_EQUAI4, 0x37 },
+ { STB0899_EQUAQ4, 0x0d },
+ { STB0899_EQUAI5, 0xbd },
+ { STB0899_EQUAQ5, 0xf7 },
+ { STB0899_DSTATUS2, 0x00 },
+ { STB0899_VSTATUS, 0x00 },
+ { STB0899_VERROR, 0xff },
+ { STB0899_IQSWAP, 0x2a },
+ { STB0899_ECNT1M, 0x00 },
+ { STB0899_ECNT1L, 0x00 },
+ { STB0899_ECNT2M, 0x00 },
+ { STB0899_ECNT2L, 0x00 },
+ { STB0899_ECNT3M, 0x00 },
+ { STB0899_ECNT3L, 0x00 },
+ { STB0899_FECAUTO1, 0x06 },
+ { STB0899_FECM, 0x01 },
+ { STB0899_VTH12, 0xf0 },
+ { STB0899_VTH23, 0xa0 },
+ { STB0899_VTH34, 0x78 },
+ { STB0899_VTH56, 0x4e },
+ { STB0899_VTH67, 0x48 },
+ { STB0899_VTH78, 0x38 },
+ { STB0899_PRVIT, 0xff },
+ { STB0899_VITSYNC, 0x19 },
+ { STB0899_RSULC, 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */
+ { STB0899_TSULC, 0x42 },
+ { STB0899_RSLLC, 0x40 },
+ { STB0899_TSLPL, 0x12 },
+ { STB0899_TSCFGH, 0x0c },
+ { STB0899_TSCFGM, 0x00 },
+ { STB0899_TSCFGL, 0x0c },
+ { STB0899_TSOUT, 0x4d }, /* 0x0d for CAM */
+ { STB0899_RSSYNCDEL, 0x00 },
+ { STB0899_TSINHDELH, 0x02 },
+ { STB0899_TSINHDELM, 0x00 },
+ { STB0899_TSINHDELL, 0x00 },
+ { STB0899_TSLLSTKM, 0x00 },
+ { STB0899_TSLLSTKL, 0x00 },
+ { STB0899_TSULSTKM, 0x00 },
+ { STB0899_TSULSTKL, 0xab },
+ { STB0899_PCKLENUL, 0x00 },
+ { STB0899_PCKLENLL, 0xcc },
+ { STB0899_RSPCKLEN, 0xcc },
+ { STB0899_TSSTATUS, 0x80 },
+ { STB0899_ERRCTRL1, 0xb6 },
+ { STB0899_ERRCTRL2, 0x96 },
+ { STB0899_ERRCTRL3, 0x89 },
+ { STB0899_DMONMSK1, 0x27 },
+ { STB0899_DMONMSK0, 0x03 },
+ { STB0899_DEMAPVIT, 0x5c },
+ { STB0899_PLPARM, 0x1f },
+ { STB0899_PDELCTRL, 0x48 },
+ { STB0899_PDELCTRL2, 0x00 },
+ { STB0899_BBHCTRL1, 0x00 },
+ { STB0899_BBHCTRL2, 0x00 },
+ { STB0899_HYSTTHRESH, 0x77 },
+ { STB0899_MATCSTM, 0x00 },
+ { STB0899_MATCSTL, 0x00 },
+ { STB0899_UPLCSTM, 0x00 },
+ { STB0899_UPLCSTL, 0x00 },
+ { STB0899_DFLCSTM, 0x00 },
+ { STB0899_DFLCSTL, 0x00 },
+ { STB0899_SYNCCST, 0x00 },
+ { STB0899_SYNCDCSTM, 0x00 },
+ { STB0899_SYNCDCSTL, 0x00 },
+ { STB0899_ISI_ENTRY, 0x00 },
+ { STB0899_ISI_BIT_EN, 0x00 },
+ { STB0899_MATSTRM, 0x00 },
+ { STB0899_MATSTRL, 0x00 },
+ { STB0899_UPLSTRM, 0x00 },
+ { STB0899_UPLSTRL, 0x00 },
+ { STB0899_DFLSTRM, 0x00 },
+ { STB0899_DFLSTRL, 0x00 },
+ { STB0899_SYNCSTR, 0x00 },
+ { STB0899_SYNCDSTRM, 0x00 },
+ { STB0899_SYNCDSTRL, 0x00 },
+ { STB0899_CFGPDELSTATUS1, 0x10 },
+ { STB0899_CFGPDELSTATUS2, 0x00 },
+ { STB0899_BBFERRORM, 0x00 },
+ { STB0899_BBFERRORL, 0x00 },
+ { STB0899_UPKTERRORM, 0x00 },
+ { STB0899_UPKTERRORL, 0x00 },
+ { 0xffff, 0xff },
};
static struct stb0899_config tt3200_config = {
@@ -1359,7 +1358,7 @@ static void frontend_init(struct budget_ci *budget_ci)
budget_ci->budget.dvb_frontend->ops.dishnetwork_send_legacy_command = NULL;
if (dvb_attach(lnbp21_attach, budget_ci->budget.dvb_frontend, &budget_ci->budget.i2c_adap, LNBP21_LLC, 0) == NULL) {
- printk("%s: No LNBP21 found!\n", __func__);
+ pr_err("%s(): No LNBP21 found!\n", __func__);
dvb_frontend_detach(budget_ci->budget.dvb_frontend);
budget_ci->budget.dvb_frontend = NULL;
}
@@ -1370,7 +1369,7 @@ static void frontend_init(struct budget_ci *budget_ci)
budget_ci->budget.dvb_frontend = dvb_attach(tda10023_attach, &tda10023_config, &budget_ci->budget.i2c_adap, 0x48);
if (budget_ci->budget.dvb_frontend) {
if (dvb_attach(tda827x_attach, budget_ci->budget.dvb_frontend, 0x61, &budget_ci->budget.i2c_adap, &tda827x_config) == NULL) {
- printk(KERN_ERR "%s: No tda827x found!\n", __func__);
+ pr_err("%s(): No tda827x found!\n", __func__);
dvb_frontend_detach(budget_ci->budget.dvb_frontend);
budget_ci->budget.dvb_frontend = NULL;
}
@@ -1382,12 +1381,12 @@ static void frontend_init(struct budget_ci *budget_ci)
if (budget_ci->budget.dvb_frontend) {
if (dvb_attach(stb6000_attach, budget_ci->budget.dvb_frontend, 0x63, &budget_ci->budget.i2c_adap)) {
if (!dvb_attach(lnbp21_attach, budget_ci->budget.dvb_frontend, &budget_ci->budget.i2c_adap, 0, 0)) {
- printk(KERN_ERR "%s: No LNBP21 found!\n", __func__);
+ pr_err("%s(): No LNBP21 found!\n", __func__);
dvb_frontend_detach(budget_ci->budget.dvb_frontend);
budget_ci->budget.dvb_frontend = NULL;
}
} else {
- printk(KERN_ERR "%s: No STB6000 found!\n", __func__);
+ pr_err("%s(): No STB6000 found!\n", __func__);
dvb_frontend_detach(budget_ci->budget.dvb_frontend);
budget_ci->budget.dvb_frontend = NULL;
}
@@ -1422,13 +1421,13 @@ static void frontend_init(struct budget_ci *budget_ci)
if (budget_ci->budget.dvb_frontend) {
if (dvb_attach(stb6100_attach, budget_ci->budget.dvb_frontend, &tt3200_stb6100_config, &budget_ci->budget.i2c_adap)) {
if (!dvb_attach(lnbp21_attach, budget_ci->budget.dvb_frontend, &budget_ci->budget.i2c_adap, 0, 0)) {
- printk("%s: No LNBP21 found!\n", __func__);
+ pr_err("%s(): No LNBP21 found!\n", __func__);
dvb_frontend_detach(budget_ci->budget.dvb_frontend);
budget_ci->budget.dvb_frontend = NULL;
}
} else {
- dvb_frontend_detach(budget_ci->budget.dvb_frontend);
- budget_ci->budget.dvb_frontend = NULL;
+ dvb_frontend_detach(budget_ci->budget.dvb_frontend);
+ budget_ci->budget.dvb_frontend = NULL;
}
}
break;
@@ -1436,7 +1435,7 @@ static void frontend_init(struct budget_ci *budget_ci)
}
if (budget_ci->budget.dvb_frontend == NULL) {
- printk("budget-ci: A frontend driver was not found for device [%04x:%04x] subsystem [%04x:%04x]\n",
+ pr_err("A frontend driver was not found for device [%04x:%04x] subsystem [%04x:%04x]\n",
budget_ci->budget.dev->pci->vendor,
budget_ci->budget.dev->pci->device,
budget_ci->budget.dev->pci->subsystem_vendor,
@@ -1444,7 +1443,7 @@ static void frontend_init(struct budget_ci *budget_ci)
} else {
if (dvb_register_frontend
(&budget_ci->budget.dvb_adapter, budget_ci->budget.dvb_frontend)) {
- printk("budget-ci: Frontend registration failed!\n");
+ pr_err("Frontend registration failed!\n");
dvb_frontend_detach(budget_ci->budget.dvb_frontend);
budget_ci->budget.dvb_frontend = NULL;
}
@@ -1538,7 +1537,7 @@ static const struct pci_device_id pci_tbl[] = {
MAKE_EXTENSION_PCI(ttbs1500b, 0x13c2, 0x101b),
{
.vendor = 0,
- }
+ }
};
MODULE_DEVICE_TABLE(pci, pci_tbl);
diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c
index 25f44c3eebf3..d33adeca196f 100644
--- a/drivers/media/pci/ttpci/budget-core.c
+++ b/drivers/media/pci/ttpci/budget-core.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * budget-core.c: driver for the SAA7146 based Budget DVB cards
+ * budget-core.ko: base-driver for the SAA7146 based Budget DVB cards
*
* Compiled from various sources by Michael Hunold <michael@mihu.de>
*
@@ -34,6 +34,7 @@
#define BUFFER_WARNING_WAIT (30*HZ)
int budget_debug;
+EXPORT_SYMBOL_GPL(budget_debug);
static int dma_buffer_size = TS_MIN_BUFSIZE_K;
module_param_named(debug, budget_debug, int, 0644);
module_param_named(bufsize, dma_buffer_size, int, 0444);
@@ -80,7 +81,7 @@ static int start_ts_capture(struct budget *budget)
* Pitch: 188, NumBytes3: 188, NumLines3: 1024
*/
- switch(budget->card->type) {
+ switch (budget->card->type) {
case BUDGET_FS_ACTIVY:
saa7146_write(dev, DD1_INIT, 0x04000000);
saa7146_write(dev, MC2, (MASK_09 | MASK_25));
@@ -208,7 +209,7 @@ static void vpeirq(struct tasklet_struct *t)
budget->buffer_warnings++;
if (budget->buffer_warnings && time_after(jiffies, budget->buffer_warning_time)) {
- printk("%s %s: used %d times >80%% of buffer (%u bytes now)\n",
+ pr_warn("%s %s: used %d times >80%% of buffer (%u bytes now)\n",
budget->dev->name, __func__, budget->buffer_warnings, count);
budget->buffer_warning_time = jiffies + BUFFER_WARNING_WAIT;
budget->buffer_warnings = 0;
@@ -259,6 +260,7 @@ int ttpci_budget_debiread(struct budget *budget, u32 config, int addr, int count
return ttpci_budget_debiread_nolock(budget, config, addr,
count, nobusyloop);
}
+EXPORT_SYMBOL_GPL(ttpci_budget_debiread);
static int ttpci_budget_debiwrite_nolock(struct budget *budget, u32 config,
int addr, int count, u32 value, int nobusyloop)
@@ -299,6 +301,7 @@ int ttpci_budget_debiwrite(struct budget *budget, u32 config, int addr,
return ttpci_budget_debiwrite_nolock(budget, config, addr,
count, value, nobusyloop);
}
+EXPORT_SYMBOL_GPL(ttpci_budget_debiwrite);
/****************************************************************************
@@ -423,7 +426,7 @@ int ttpci_budget_init(struct budget *budget, struct saa7146_dev *dev,
budget->card = bi;
budget->dev = (struct saa7146_dev *) dev;
- switch(budget->card->type) {
+ switch (budget->card->type) {
case BUDGET_FS_ACTIVY:
budget->buffer_width = TS_WIDTH_ACTIVY;
max_bufsize = TS_MAX_BUFSIZE_K_ACTIVY;
@@ -470,7 +473,7 @@ int ttpci_budget_init(struct budget *budget, struct saa7146_dev *dev,
budget->dev->name,
budget->buffer_size > budget->buffer_width * budget->buffer_height ? "odd/even" : "single",
budget->buffer_width, budget->buffer_height);
- printk("%s: dma buffer size %u\n", budget->dev->name, budget->buffer_size);
+ pr_info("%s: dma buffer size %u\n", budget->dev->name, budget->buffer_size);
ret = dvb_register_adapter(&budget->dvb_adapter, budget->card->name,
owner, &budget->dev->pci->dev, adapter_nums);
@@ -491,8 +494,10 @@ int ttpci_budget_init(struct budget *budget, struct saa7146_dev *dev,
spin_lock_init(&budget->feedlock);
spin_lock_init(&budget->debilock);
- /* the Siemens DVB needs this if you want to have the i2c chips
- get recognized before the main driver is loaded */
+ /*
+ * the Siemens DVB needs this if you want to have the i2c chips
+ * get recognized before the main driver is loaded
+ */
if (bi->type != BUDGET_FS_ACTIVY)
saa7146_write(dev, GPIO_CTRL, 0x500000); /* GPIO 3 = 1 */
@@ -511,7 +516,7 @@ int ttpci_budget_init(struct budget *budget, struct saa7146_dev *dev,
ttpci_eeprom_parse_mac(&budget->i2c_adap, budget->dvb_adapter.proposed_mac);
budget->grabbing = saa7146_vmalloc_build_pgtable(dev->pci, budget->buffer_size, &budget->pt);
- if (NULL == budget->grabbing) {
+ if (budget->grabbing == NULL) {
ret = -ENOMEM;
goto err_del_i2c;
}
@@ -526,7 +531,8 @@ int ttpci_budget_init(struct budget *budget, struct saa7146_dev *dev,
if (bi->type != BUDGET_FS_ACTIVY)
saa7146_setgpio(dev, 2, SAA7146_GPIO_OUTHI);
- if ((ret = budget_register(budget)) == 0)
+ ret = budget_register(budget);
+ if (ret == 0)
return 0; /* Everything OK */
/* An error occurred, cleanup resources */
@@ -540,6 +546,7 @@ err_dvb_unregister:
return ret;
}
+EXPORT_SYMBOL_GPL(ttpci_budget_init);
void ttpci_budget_init_hooks(struct budget *budget)
{
@@ -548,6 +555,7 @@ void ttpci_budget_init_hooks(struct budget *budget)
budget->dvb_frontend->ops.read_status = budget_read_fe_status;
}
}
+EXPORT_SYMBOL_GPL(ttpci_budget_init_hooks);
int ttpci_budget_deinit(struct budget *budget)
{
@@ -567,8 +575,9 @@ int ttpci_budget_deinit(struct budget *budget)
return 0;
}
+EXPORT_SYMBOL_GPL(ttpci_budget_deinit);
-void ttpci_budget_irq10_handler(struct saa7146_dev *dev, u32 * isr)
+void ttpci_budget_irq10_handler(struct saa7146_dev *dev, u32 *isr)
{
struct budget *budget = dev->ext_priv;
@@ -577,6 +586,7 @@ void ttpci_budget_irq10_handler(struct saa7146_dev *dev, u32 * isr)
if (*isr & MASK_10)
tasklet_schedule(&budget->vpe_tasklet);
}
+EXPORT_SYMBOL_GPL(ttpci_budget_irq10_handler);
void ttpci_budget_set_video_port(struct saa7146_dev *dev, int video_port)
{
@@ -590,14 +600,6 @@ void ttpci_budget_set_video_port(struct saa7146_dev *dev, int video_port)
}
spin_unlock(&budget->feedlock);
}
-
-EXPORT_SYMBOL_GPL(ttpci_budget_debiread);
-EXPORT_SYMBOL_GPL(ttpci_budget_debiwrite);
-EXPORT_SYMBOL_GPL(ttpci_budget_init);
-EXPORT_SYMBOL_GPL(ttpci_budget_init_hooks);
-EXPORT_SYMBOL_GPL(ttpci_budget_deinit);
-EXPORT_SYMBOL_GPL(ttpci_budget_irq10_handler);
EXPORT_SYMBOL_GPL(ttpci_budget_set_video_port);
-EXPORT_SYMBOL_GPL(budget_debug);
MODULE_LICENSE("GPL");
diff --git a/drivers/media/pci/ttpci/budget.c b/drivers/media/pci/ttpci/budget.c
index b76a1b330b50..f623c250909b 100644
--- a/drivers/media/pci/ttpci/budget.c
+++ b/drivers/media/pci/ttpci/budget.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * budget.c: driver for the SAA7146 based Budget DVB cards
+ * budget.ko: driver for the SAA7146 based Budget DVB cards
+ * without analog video input or CI
*
* Compiled from various sources by Michael Hunold <michael@mihu.de>
*
@@ -42,20 +43,24 @@ MODULE_PARM_DESC(diseqc_method, "Select DiSEqC method for subsystem id 13c2:1003
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-static void Set22K (struct budget *budget, int state)
+static void Set22K(struct budget *budget, int state)
{
- struct saa7146_dev *dev=budget->dev;
+ struct saa7146_dev *dev = budget->dev;
+
dprintk(2, "budget: %p\n", budget);
saa7146_setgpio(dev, 3, (state ? SAA7146_GPIO_OUTHI : SAA7146_GPIO_OUTLO));
}
-/* Diseqc functions only for TT Budget card */
-/* taken from the Skyvision DVB driver by
- Ralph Metzler <rjkm@metzlerbros.de> */
+/*
+ * Diseqc functions only for TT Budget card
+ * taken from the Skyvision DVB driver by
+ * Ralph Metzler <rjkm@metzlerbros.de>
+ */
-static void DiseqcSendBit (struct budget *budget, int data)
+static void DiseqcSendBit(struct budget *budget, int data)
{
- struct saa7146_dev *dev=budget->dev;
+ struct saa7146_dev *dev = budget->dev;
+
dprintk(2, "budget: %p\n", budget);
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTHI);
@@ -64,13 +69,13 @@ static void DiseqcSendBit (struct budget *budget, int data)
udelay(data ? 1000 : 500);
}
-static void DiseqcSendByte (struct budget *budget, int data)
+static void DiseqcSendByte(struct budget *budget, int data)
{
- int i, par=1, d;
+ int i, par = 1, d;
dprintk(2, "budget: %p\n", budget);
- for (i=7; i>=0; i--) {
+ for (i = 7; i >= 0; i--) {
d = (data>>i)&1;
par ^= d;
DiseqcSendBit(budget, d);
@@ -79,9 +84,9 @@ static void DiseqcSendByte (struct budget *budget, int data)
DiseqcSendBit(budget, par);
}
-static int SendDiSEqCMsg (struct budget *budget, int len, u8 *msg, unsigned long burst)
+static int SendDiSEqCMsg(struct budget *budget, int len, u8 *msg, unsigned long burst)
{
- struct saa7146_dev *dev=budget->dev;
+ struct saa7146_dev *dev = budget->dev;
int i;
dprintk(2, "budget: %p\n", budget);
@@ -89,15 +94,15 @@ static int SendDiSEqCMsg (struct budget *budget, int len, u8 *msg, unsigned long
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO);
mdelay(16);
- for (i=0; i<len; i++)
+ for (i = 0; i < len; i++)
DiseqcSendByte(budget, msg[i]);
mdelay(16);
- if (burst!=-1) {
- if (burst)
+ if (burst != -1) {
+ if (burst) {
DiseqcSendByte(budget, 0xff);
- else {
+ } else {
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTHI);
mdelay(12);
udelay(500);
@@ -118,24 +123,24 @@ static int SendDiSEqCMsg (struct budget *budget, int len, u8 *msg, unsigned long
static int SetVoltage_Activy(struct budget *budget,
enum fe_sec_voltage voltage)
{
- struct saa7146_dev *dev=budget->dev;
+ struct saa7146_dev *dev = budget->dev;
dprintk(2, "budget: %p\n", budget);
switch (voltage) {
- case SEC_VOLTAGE_13:
- saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTHI);
- saa7146_setgpio(dev, 2, SAA7146_GPIO_OUTLO);
- break;
- case SEC_VOLTAGE_18:
- saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTHI);
- saa7146_setgpio(dev, 2, SAA7146_GPIO_OUTHI);
- break;
- case SEC_VOLTAGE_OFF:
- saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTLO);
- break;
- default:
- return -EINVAL;
+ case SEC_VOLTAGE_13:
+ saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTHI);
+ saa7146_setgpio(dev, 2, SAA7146_GPIO_OUTLO);
+ break;
+ case SEC_VOLTAGE_18:
+ saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTHI);
+ saa7146_setgpio(dev, 2, SAA7146_GPIO_OUTHI);
+ break;
+ case SEC_VOLTAGE_OFF:
+ saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTLO);
+ break;
+ default:
+ return -EINVAL;
}
return 0;
@@ -146,7 +151,7 @@ static int siemens_budget_set_voltage(struct dvb_frontend *fe,
{
struct budget *budget = fe->dvb->priv;
- return SetVoltage_Activy (budget, voltage);
+ return SetVoltage_Activy(budget, voltage);
}
static int budget_set_tone(struct dvb_frontend *fe,
@@ -156,11 +161,11 @@ static int budget_set_tone(struct dvb_frontend *fe,
switch (tone) {
case SEC_TONE_ON:
- Set22K (budget, 1);
+ Set22K(budget, 1);
break;
case SEC_TONE_OFF:
- Set22K (budget, 0);
+ Set22K(budget, 0);
break;
default:
@@ -170,11 +175,11 @@ static int budget_set_tone(struct dvb_frontend *fe,
return 0;
}
-static int budget_diseqc_send_master_cmd(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd* cmd)
+static int budget_diseqc_send_master_cmd(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd *cmd)
{
struct budget *budget = fe->dvb->priv;
- SendDiSEqCMsg (budget, cmd->msg_len, cmd->msg, 0);
+ SendDiSEqCMsg(budget, cmd->msg_len, cmd->msg, 0);
return 0;
}
@@ -184,7 +189,7 @@ static int budget_diseqc_send_burst(struct dvb_frontend *fe,
{
struct budget *budget = fe->dvb->priv;
- SendDiSEqCMsg (budget, 0, NULL, minicmd);
+ SendDiSEqCMsg(budget, 0, NULL, minicmd);
return 0;
}
@@ -208,7 +213,8 @@ static int alps_bsrv2_tuner_set_params(struct dvb_frontend *fe)
pwr = 0;
else if (c->frequency >= 1100000)
pwr = 1;
- else pwr = 2;
+ else
+ pwr = 2;
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
@@ -220,12 +226,12 @@ static int alps_bsrv2_tuner_set_params(struct dvb_frontend *fe)
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
- if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1) return -EIO;
+ if (i2c_transfer(&budget->i2c_adap, &msg, 1) != 1)
+ return -EIO;
return 0;
}
-static struct ves1x93_config alps_bsrv2_config =
-{
+static struct ves1x93_config alps_bsrv2_config = {
.demod_address = 0x08,
.xin = 90100000UL,
.invert_pwm = 0,
@@ -248,7 +254,8 @@ static int alps_tdbe2_tuner_set_params(struct dvb_frontend *fe)
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
- if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1) return -EIO;
+ if (i2c_transfer(&budget->i2c_adap, &msg, 1) != 1)
+ return -EIO;
return 0;
}
@@ -303,7 +310,8 @@ static int grundig_29504_401_tuner_set_params(struct dvb_frontend *fe)
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
- if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1) return -EIO;
+ if (i2c_transfer(&budget->i2c_adap, &msg, 1) != 1)
+ return -EIO;
return 0;
}
@@ -333,7 +341,8 @@ static int grundig_29504_451_tuner_set_params(struct dvb_frontend *fe)
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
- if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1) return -EIO;
+ if (i2c_transfer(&budget->i2c_adap, &msg, 1) != 1)
+ return -EIO;
return 0;
}
@@ -365,7 +374,8 @@ static int s5h1420_tuner_set_params(struct dvb_frontend *fe)
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
- if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1) return -EIO;
+ if (i2c_transfer(&budget->i2c_adap, &msg, 1) != 1)
+ return -EIO;
return 0;
}
@@ -422,12 +432,12 @@ static int i2c_readreg(struct i2c_adapter *i2c, u8 adr, u8 reg)
return (i2c_transfer(i2c, msg, 2) != 2) ? -EIO : val;
}
-static u8 read_pwm(struct budget* budget)
+static u8 read_pwm(struct budget *budget)
{
u8 b = 0xff;
u8 pwm;
- struct i2c_msg msg[] = { { .addr = 0x50,.flags = 0,.buf = &b,.len = 1 },
- { .addr = 0x50,.flags = I2C_M_RD,.buf = &pwm,.len = 1} };
+ struct i2c_msg msg[] = { { .addr = 0x50, .flags = 0, .buf = &b, .len = 1 },
+ { .addr = 0x50, .flags = I2C_M_RD, .buf = &pwm, .len = 1} };
if ((i2c_transfer(&budget->i2c_adap, msg, 2) != 2) || (pwm == 0xff))
pwm = 0x48;
@@ -478,7 +488,7 @@ static void frontend_init(struct budget *budget)
{
(void)alps_bsbe1_config; /* avoid warning */
- switch(budget->dev->pci->subsystem_device) {
+ switch (budget->dev->pci->subsystem_device) {
case 0x1003: // Hauppauge/TT Nova budget (stv0299/ALPS BSRU6(tsa5059) OR ves1893/ALPS BSRV2(sp5659))
case 0x1013:
// try the ALPS BSRV2 first of all
@@ -527,7 +537,7 @@ static void frontend_init(struct budget *budget)
case 0x4f52: /* Cards based on Philips Semi Sylt PCI ref. design */
budget->dvb_frontend = dvb_attach(stv0299_attach, &alps_bsru6_config, &budget->i2c_adap);
if (budget->dvb_frontend) {
- printk(KERN_INFO "budget: tuner ALPS BSRU6 in Philips Semi. Sylt detected\n");
+ pr_info("tuner ALPS BSRU6 in Philips Semi. Sylt detected\n");
budget->dvb_frontend->ops.tuner_ops.set_params = alps_bsru6_tuner_set_params;
budget->dvb_frontend->tuner_priv = &budget->i2c_adap;
break;
@@ -545,7 +555,7 @@ static void frontend_init(struct budget *budget)
/* assume ALPS BSRU6 */
budget->dvb_frontend = dvb_attach(stv0299_attach, &alps_bsru6_config_activy, &budget->i2c_adap);
if (budget->dvb_frontend) {
- printk(KERN_INFO "budget: tuner ALPS BSRU6 detected\n");
+ pr_info("tuner ALPS BSRU6 detected\n");
budget->dvb_frontend->ops.tuner_ops.set_params = alps_bsru6_tuner_set_params;
budget->dvb_frontend->tuner_priv = &budget->i2c_adap;
budget->dvb_frontend->ops.set_voltage = siemens_budget_set_voltage;
@@ -561,7 +571,7 @@ static void frontend_init(struct budget *budget)
msleep(250);
budget->dvb_frontend = dvb_attach(stv0299_attach, &alps_bsbe1_config_activy, &budget->i2c_adap);
if (budget->dvb_frontend) {
- printk(KERN_INFO "budget: tuner ALPS BSBE1 detected\n");
+ pr_info("tuner ALPS BSBE1 detected\n");
budget->dvb_frontend->ops.tuner_ops.set_params = alps_bsbe1_tuner_set_params;
budget->dvb_frontend->tuner_priv = &budget->i2c_adap;
budget->dvb_frontend->ops.set_voltage = siemens_budget_set_voltage;
@@ -607,7 +617,7 @@ static void frontend_init(struct budget *budget)
budget->dvb_frontend = fe;
if (dvb_attach(lnbp21_attach, fe, &budget->i2c_adap,
0, 0) == NULL) {
- printk("%s: No LNBP21 found!\n", __func__);
+ pr_err("%s(): No LNBP21 found!\n", __func__);
goto error_out;
}
break;
@@ -629,10 +639,10 @@ static void frontend_init(struct budget *budget)
budget->dvb_frontend = fe;
if (dvb_attach(tda826x_attach, fe, 0x60,
&budget->i2c_adap, 0) == NULL)
- printk("%s: No tda826x found!\n", __func__);
+ pr_err("%s(): No tda826x found!\n", __func__);
if (dvb_attach(lnbp21_attach, fe,
&budget->i2c_adap, 0, 0) == NULL) {
- printk("%s: No LNBP21 found!\n", __func__);
+ pr_err("%s(): No LNBP21 found!\n", __func__);
goto error_out;
}
break;
@@ -642,6 +652,7 @@ static void frontend_init(struct budget *budget)
case 0x101c: { /* TT S2-1600 */
const struct stv6110x_devctl *ctl;
+
saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTLO);
msleep(50);
saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTHI);
@@ -672,9 +683,11 @@ static void frontend_init(struct budget *budget)
tt1600_stv090x_config.tuner_set_refclk = ctl->tuner_set_refclk;
tt1600_stv090x_config.tuner_get_status = ctl->tuner_get_status;
- /* call the init function once to initialize
- tuner's clock output divider and demod's
- master clock */
+ /*
+ * call the init function once to initialize
+ * tuner's clock output divider and demod's
+ * master clock
+ */
if (budget->dvb_frontend->ops.init)
budget->dvb_frontend->ops.init(budget->dvb_frontend);
@@ -682,11 +695,11 @@ static void frontend_init(struct budget *budget)
budget->dvb_frontend,
&budget->i2c_adap,
&tt1600_isl6423_config) == NULL) {
- printk(KERN_ERR "%s: No Intersil ISL6423 found!\n", __func__);
+ pr_err("%s(): No Intersil ISL6423 found!\n", __func__);
goto error_out;
}
} else {
- printk(KERN_ERR "%s: No STV6110(A) Silicon Tuner found!\n", __func__);
+ pr_err("%s(): No STV6110(A) Silicon Tuner found!\n", __func__);
goto error_out;
}
}
@@ -695,6 +708,7 @@ static void frontend_init(struct budget *budget)
case 0x1020: { /* Omicom S2 */
const struct stv6110x_devctl *ctl;
+
saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTLO);
msleep(50);
saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTHI);
@@ -706,7 +720,7 @@ static void frontend_init(struct budget *budget)
STV090x_DEMODULATOR_0);
if (budget->dvb_frontend) {
- printk(KERN_INFO "budget: Omicom S2 detected\n");
+ pr_info("Omicom S2 detected\n");
ctl = dvb_attach(stv6110x_attach,
budget->dvb_frontend,
@@ -726,9 +740,11 @@ static void frontend_init(struct budget *budget)
tt1600_stv090x_config.tuner_set_refclk = ctl->tuner_set_refclk;
tt1600_stv090x_config.tuner_get_status = ctl->tuner_get_status;
- /* call the init function once to initialize
- tuner's clock output divider and demod's
- master clock */
+ /*
+ * call the init function once to initialize
+ * tuner's clock output divider and demod's
+ * master clock
+ */
if (budget->dvb_frontend->ops.init)
budget->dvb_frontend->ops.init(budget->dvb_frontend);
@@ -737,12 +753,11 @@ static void frontend_init(struct budget *budget)
&budget->i2c_adap,
LNBH24_PCL | LNBH24_TTX,
LNBH24_TEN, 0x14>>1) == NULL) {
- printk(KERN_ERR
- "No LNBH24 found!\n");
+ pr_err("No LNBH24 found!\n");
goto error_out;
}
} else {
- printk(KERN_ERR "%s: No STV6110(A) Silicon Tuner found!\n", __func__);
+ pr_err("%s(): No STV6110(A) Silicon Tuner found!\n", __func__);
goto error_out;
}
}
@@ -751,7 +766,7 @@ static void frontend_init(struct budget *budget)
}
if (budget->dvb_frontend == NULL) {
- printk("budget: A frontend driver was not found for device [%04x:%04x] subsystem [%04x:%04x]\n",
+ pr_err("A frontend driver was not found for device [%04x:%04x] subsystem [%04x:%04x]\n",
budget->dev->pci->vendor,
budget->dev->pci->device,
budget->dev->pci->subsystem_vendor,
@@ -763,21 +778,19 @@ static void frontend_init(struct budget *budget)
return;
error_out:
- printk("budget: Frontend registration failed!\n");
+ pr_err("Frontend registration failed!\n");
dvb_frontend_detach(budget->dvb_frontend);
budget->dvb_frontend = NULL;
- return;
}
-static int budget_attach (struct saa7146_dev* dev, struct saa7146_pci_extension_data *info)
+static int budget_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info)
{
struct budget *budget = NULL;
int err;
budget = kmalloc(sizeof(struct budget), GFP_KERNEL);
- if( NULL == budget ) {
+ if (budget == NULL)
return -ENOMEM;
- }
dprintk(2, "dev:%p, info:%p, budget:%p\n", dev, info, budget);
@@ -785,8 +798,8 @@ static int budget_attach (struct saa7146_dev* dev, struct saa7146_pci_extension_
err = ttpci_budget_init(budget, dev, info, THIS_MODULE, adapter_nr);
if (err) {
- printk("==> failed\n");
- kfree (budget);
+ pr_err("==> failed\n");
+ kfree(budget);
return err;
}
@@ -798,7 +811,7 @@ static int budget_attach (struct saa7146_dev* dev, struct saa7146_pci_extension_
return 0;
}
-static int budget_detach (struct saa7146_dev* dev)
+static int budget_detach(struct saa7146_dev *dev)
{
struct budget *budget = dev->ext_priv;
int err;
@@ -808,9 +821,9 @@ static int budget_detach (struct saa7146_dev* dev)
dvb_frontend_detach(budget->dvb_frontend);
}
- err = ttpci_budget_deinit (budget);
+ err = ttpci_budget_deinit(budget);
- kfree (budget);
+ kfree(budget);
dev->ext_priv = NULL;
return err;
@@ -839,8 +852,8 @@ static const struct pci_device_id pci_tbl[] = {
MAKE_EXTENSION_PCI(ttbs, 0x13c2, 0x1016),
MAKE_EXTENSION_PCI(ttbs1401, 0x13c2, 0x1018),
MAKE_EXTENSION_PCI(tt1600, 0x13c2, 0x101c),
- MAKE_EXTENSION_PCI(fsacs1,0x1131, 0x4f60),
- MAKE_EXTENSION_PCI(fsacs0,0x1131, 0x4f61),
+ MAKE_EXTENSION_PCI(fsacs1, 0x1131, 0x4f60),
+ MAKE_EXTENSION_PCI(fsacs0, 0x1131, 0x4f61),
MAKE_EXTENSION_PCI(fsact1, 0x1131, 0x5f60),
MAKE_EXTENSION_PCI(fsact, 0x1131, 0x5f61),
MAKE_EXTENSION_PCI(omicom, 0x14c4, 0x1020),
diff --git a/drivers/media/pci/ttpci/budget.h b/drivers/media/pci/ttpci/budget.h
index bd87432e6cde..83ead34dc766 100644
--- a/drivers/media/pci/ttpci/budget.h
+++ b/drivers/media/pci/ttpci/budget.h
@@ -3,6 +3,12 @@
#ifndef __BUDGET_DVB__
#define __BUDGET_DVB__
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <media/dvb_frontend.h>
#include <media/dvbdev.h>
#include <media/demux.h>
@@ -22,9 +28,8 @@ extern int budget_debug;
#endif
#define dprintk(level, fmt, arg...) do { \
- if (level & budget_debug) \
- printk(KERN_DEBUG KBUILD_MODNAME ": %s(): " fmt, \
- __func__, ##arg); \
+ if ((level) & budget_debug) \
+ pr_info("%s(): " fmt, __func__, ##arg); \
} while (0)
#define TS_SIZE 188
@@ -83,13 +88,13 @@ struct budget {
void *priv;
};
-#define MAKE_BUDGET_INFO(x_var,x_name,x_type) \
+#define MAKE_BUDGET_INFO(x_var, x_name, x_type) \
static struct budget_info x_var ## _info = { \
- .name=x_name, \
- .type=x_type }; \
+ .name = x_name, \
+ .type = x_type }; \
static struct saa7146_pci_extension_data x_var = { \
.ext_priv = &x_var ## _info, \
- .ext = &budget_extension };
+ .ext = &budget_extension }
#define BUDGET_TT 0
#define BUDGET_TT_HW_DISEQC 1
@@ -119,7 +124,7 @@ extern int ttpci_budget_init(struct budget *budget, struct saa7146_dev *dev,
struct module *owner, short *adapter_nums);
extern void ttpci_budget_init_hooks(struct budget *budget);
extern int ttpci_budget_deinit(struct budget *budget);
-extern void ttpci_budget_irq10_handler(struct saa7146_dev *dev, u32 * isr);
+extern void ttpci_budget_irq10_handler(struct saa7146_dev *dev, u32 *isr);
extern void ttpci_budget_set_video_port(struct saa7146_dev *dev, int video_port);
extern int ttpci_budget_debiread(struct budget *budget, u32 config, int addr, int count,
int uselocks, int nobusyloop);
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 91e54215de3a..2d79bfc68c15 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -67,6 +67,7 @@ source "drivers/media/platform/amlogic/Kconfig"
source "drivers/media/platform/amphion/Kconfig"
source "drivers/media/platform/aspeed/Kconfig"
source "drivers/media/platform/atmel/Kconfig"
+source "drivers/media/platform/broadcom/Kconfig"
source "drivers/media/platform/cadence/Kconfig"
source "drivers/media/platform/chips-media/Kconfig"
source "drivers/media/platform/intel/Kconfig"
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 3296ec1ebe16..da17301f7439 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -10,6 +10,7 @@ obj-y += amlogic/
obj-y += amphion/
obj-y += aspeed/
obj-y += atmel/
+obj-y += broadcom/
obj-y += cadence/
obj-y += chips-media/
obj-y += intel/
diff --git a/drivers/media/platform/broadcom/Kconfig b/drivers/media/platform/broadcom/Kconfig
new file mode 100644
index 000000000000..32b76ebfcd9a
--- /dev/null
+++ b/drivers/media/platform/broadcom/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config VIDEO_BCM2835_UNICAM
+ tristate "Broadcom BCM283x/BCM271x Unicam video capture driver"
+ depends on ARCH_BCM2835 || COMPILE_TEST
+ depends on COMMON_CLK && PM
+ depends on VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ select VIDEO_V4L2_SUBDEV_API
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ Say Y here to enable support for the BCM283x/BCM271x CSI-2 receiver.
+ This is a V4L2 driver that controls the CSI-2 receiver directly,
+ independently from the VC4 firmware.
+
+ This driver is mutually exclusive with the use of bcm2835-camera. The
+ firmware will disable all access to the peripheral from within the
+ firmware if it finds a DT node using it, and bcm2835-camera will
+ therefore fail to probe.
+
+ To compile this driver as a module, choose M here. The module will be
+ called bcm2835-unicam.
diff --git a/drivers/media/platform/broadcom/Makefile b/drivers/media/platform/broadcom/Makefile
new file mode 100644
index 000000000000..03d2045aba2e
--- /dev/null
+++ b/drivers/media/platform/broadcom/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_VIDEO_BCM2835_UNICAM) += bcm2835-unicam.o
diff --git a/drivers/media/platform/broadcom/bcm2835-unicam-regs.h b/drivers/media/platform/broadcom/bcm2835-unicam-regs.h
new file mode 100644
index 000000000000..fce6fa92707c
--- /dev/null
+++ b/drivers/media/platform/broadcom/bcm2835-unicam-regs.h
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/*
+ * Copyright (C) 2017-2020 Raspberry Pi Trading.
+ * Dave Stevenson <dave.stevenson@raspberrypi.com>
+ */
+
+#ifndef VC4_REGS_UNICAM_H
+#define VC4_REGS_UNICAM_H
+
+#include <linux/bits.h>
+
+/*
+ * The following values are taken from files found within the code drop
+ * made by Broadcom for the BCM21553 Graphics Driver, predominantly in
+ * brcm_usrlib/dag/vmcsx/vcinclude/hardware_vc4.h.
+ * They have been modified to be only the register offset.
+ */
+#define UNICAM_CTRL 0x000
+#define UNICAM_STA 0x004
+#define UNICAM_ANA 0x008
+#define UNICAM_PRI 0x00c
+#define UNICAM_CLK 0x010
+#define UNICAM_CLT 0x014
+#define UNICAM_DAT0 0x018
+#define UNICAM_DAT1 0x01c
+#define UNICAM_DAT2 0x020
+#define UNICAM_DAT3 0x024
+#define UNICAM_DLT 0x028
+#define UNICAM_CMP0 0x02c
+#define UNICAM_CMP1 0x030
+#define UNICAM_CAP0 0x034
+#define UNICAM_CAP1 0x038
+#define UNICAM_ICTL 0x100
+#define UNICAM_ISTA 0x104
+#define UNICAM_IDI0 0x108
+#define UNICAM_IPIPE 0x10c
+#define UNICAM_IBSA0 0x110
+#define UNICAM_IBEA0 0x114
+#define UNICAM_IBLS 0x118
+#define UNICAM_IBWP 0x11c
+#define UNICAM_IHWIN 0x120
+#define UNICAM_IHSTA 0x124
+#define UNICAM_IVWIN 0x128
+#define UNICAM_IVSTA 0x12c
+#define UNICAM_ICC 0x130
+#define UNICAM_ICS 0x134
+#define UNICAM_IDC 0x138
+#define UNICAM_IDPO 0x13c
+#define UNICAM_IDCA 0x140
+#define UNICAM_IDCD 0x144
+#define UNICAM_IDS 0x148
+#define UNICAM_DCS 0x200
+#define UNICAM_DBSA0 0x204
+#define UNICAM_DBEA0 0x208
+#define UNICAM_DBWP 0x20c
+#define UNICAM_DBCTL 0x300
+#define UNICAM_IBSA1 0x304
+#define UNICAM_IBEA1 0x308
+#define UNICAM_IDI1 0x30c
+#define UNICAM_DBSA1 0x310
+#define UNICAM_DBEA1 0x314
+#define UNICAM_MISC 0x400
+
+/*
+ * The following bitmasks are from the kernel released by Broadcom
+ * for Android - https://android.googlesource.com/kernel/bcm/
+ * The Rhea, Hawaii, and Java chips all contain the same VideoCore4
+ * Unicam block as BCM2835, as defined in eg
+ * arch/arm/mach-rhea/include/mach/rdb_A0/brcm_rdb_cam.h and similar.
+ * Values reworked to use the kernel BIT and GENMASK macros.
+ *
+ * Some of the bit mnenomics have been amended to match the datasheet.
+ */
+/* UNICAM_CTRL Register */
+#define UNICAM_CPE BIT(0)
+#define UNICAM_MEM BIT(1)
+#define UNICAM_CPR BIT(2)
+#define UNICAM_CPM_MASK GENMASK(3, 3)
+#define UNICAM_CPM_CSI2 0
+#define UNICAM_CPM_CCP2 1
+#define UNICAM_SOE BIT(4)
+#define UNICAM_DCM_MASK GENMASK(5, 5)
+#define UNICAM_DCM_STROBE 0
+#define UNICAM_DCM_DATA 1
+#define UNICAM_SLS BIT(6)
+#define UNICAM_PFT_MASK GENMASK(11, 8)
+#define UNICAM_OET_MASK GENMASK(20, 12)
+
+/* UNICAM_STA Register */
+#define UNICAM_SYN BIT(0)
+#define UNICAM_CS BIT(1)
+#define UNICAM_SBE BIT(2)
+#define UNICAM_PBE BIT(3)
+#define UNICAM_HOE BIT(4)
+#define UNICAM_PLE BIT(5)
+#define UNICAM_SSC BIT(6)
+#define UNICAM_CRCE BIT(7)
+#define UNICAM_OES BIT(8)
+#define UNICAM_IFO BIT(9)
+#define UNICAM_OFO BIT(10)
+#define UNICAM_BFO BIT(11)
+#define UNICAM_DL BIT(12)
+#define UNICAM_PS BIT(13)
+#define UNICAM_IS BIT(14)
+#define UNICAM_PI0 BIT(15)
+#define UNICAM_PI1 BIT(16)
+#define UNICAM_FSI_S BIT(17)
+#define UNICAM_FEI_S BIT(18)
+#define UNICAM_LCI_S BIT(19)
+#define UNICAM_BUF0_RDY BIT(20)
+#define UNICAM_BUF0_NO BIT(21)
+#define UNICAM_BUF1_RDY BIT(22)
+#define UNICAM_BUF1_NO BIT(23)
+#define UNICAM_DI BIT(24)
+
+#define UNICAM_STA_MASK_ALL \
+ (UNICAM_SBE | UNICAM_PBE | UNICAM_HOE | UNICAM_PLE | UNICAM_SSC | \
+ UNICAM_CRCE | UNICAM_IFO | UNICAM_OFO | UNICAM_DL | UNICAM_PS | \
+ UNICAM_PI0 | UNICAM_PI1)
+
+/* UNICAM_ANA Register */
+#define UNICAM_APD BIT(0)
+#define UNICAM_BPD BIT(1)
+#define UNICAM_AR BIT(2)
+#define UNICAM_DDL BIT(3)
+#define UNICAM_CTATADJ_MASK GENMASK(7, 4)
+#define UNICAM_PTATADJ_MASK GENMASK(11, 8)
+
+/* UNICAM_PRI Register */
+#define UNICAM_PE BIT(0)
+#define UNICAM_PT_MASK GENMASK(2, 1)
+#define UNICAM_NP_MASK GENMASK(7, 4)
+#define UNICAM_PP_MASK GENMASK(11, 8)
+#define UNICAM_BS_MASK GENMASK(15, 12)
+#define UNICAM_BL_MASK GENMASK(17, 16)
+
+/* UNICAM_CLK Register */
+#define UNICAM_CLE BIT(0)
+#define UNICAM_CLPD BIT(1)
+#define UNICAM_CLLPE BIT(2)
+#define UNICAM_CLHSE BIT(3)
+#define UNICAM_CLTRE BIT(4)
+#define UNICAM_CLAC_MASK GENMASK(8, 5)
+#define UNICAM_CLSTE BIT(29)
+
+/* UNICAM_CLT Register */
+#define UNICAM_CLT1_MASK GENMASK(7, 0)
+#define UNICAM_CLT2_MASK GENMASK(15, 8)
+
+/* UNICAM_DATn Registers */
+#define UNICAM_DLE BIT(0)
+#define UNICAM_DLPD BIT(1)
+#define UNICAM_DLLPE BIT(2)
+#define UNICAM_DLHSE BIT(3)
+#define UNICAM_DLTRE BIT(4)
+#define UNICAM_DLSM BIT(5)
+#define UNICAM_DLFO BIT(28)
+#define UNICAM_DLSTE BIT(29)
+
+#define UNICAM_DAT_MASK_ALL (UNICAM_DLSTE | UNICAM_DLFO)
+
+/* UNICAM_DLT Register */
+#define UNICAM_DLT1_MASK GENMASK(7, 0)
+#define UNICAM_DLT2_MASK GENMASK(15, 8)
+#define UNICAM_DLT3_MASK GENMASK(23, 16)
+
+/* UNICAM_ICTL Register */
+#define UNICAM_FSIE BIT(0)
+#define UNICAM_FEIE BIT(1)
+#define UNICAM_IBOB BIT(2)
+#define UNICAM_FCM BIT(3)
+#define UNICAM_TFC BIT(4)
+#define UNICAM_LIP_MASK GENMASK(6, 5)
+#define UNICAM_LCIE_MASK GENMASK(28, 16)
+
+/* UNICAM_IDI0/1 Register */
+#define UNICAM_ID0_MASK GENMASK(7, 0)
+#define UNICAM_ID1_MASK GENMASK(15, 8)
+#define UNICAM_ID2_MASK GENMASK(23, 16)
+#define UNICAM_ID3_MASK GENMASK(31, 24)
+
+/* UNICAM_ISTA Register */
+#define UNICAM_FSI BIT(0)
+#define UNICAM_FEI BIT(1)
+#define UNICAM_LCI BIT(2)
+
+#define UNICAM_ISTA_MASK_ALL (UNICAM_FSI | UNICAM_FEI | UNICAM_LCI)
+
+/* UNICAM_IPIPE Register */
+#define UNICAM_PUM_MASK GENMASK(2, 0)
+/* Unpacking modes */
+#define UNICAM_PUM_NONE 0
+#define UNICAM_PUM_UNPACK6 1
+#define UNICAM_PUM_UNPACK7 2
+#define UNICAM_PUM_UNPACK8 3
+#define UNICAM_PUM_UNPACK10 4
+#define UNICAM_PUM_UNPACK12 5
+#define UNICAM_PUM_UNPACK14 6
+#define UNICAM_PUM_UNPACK16 7
+#define UNICAM_DDM_MASK GENMASK(6, 3)
+#define UNICAM_PPM_MASK GENMASK(9, 7)
+/* Packing modes */
+#define UNICAM_PPM_NONE 0
+#define UNICAM_PPM_PACK8 1
+#define UNICAM_PPM_PACK10 2
+#define UNICAM_PPM_PACK12 3
+#define UNICAM_PPM_PACK14 4
+#define UNICAM_PPM_PACK16 5
+#define UNICAM_DEM_MASK GENMASK(11, 10)
+#define UNICAM_DEBL_MASK GENMASK(14, 12)
+#define UNICAM_ICM_MASK GENMASK(16, 15)
+#define UNICAM_IDM_MASK GENMASK(17, 17)
+
+/* UNICAM_ICC Register */
+#define UNICAM_ICFL_MASK GENMASK(4, 0)
+#define UNICAM_ICFH_MASK GENMASK(9, 5)
+#define UNICAM_ICST_MASK GENMASK(12, 10)
+#define UNICAM_ICLT_MASK GENMASK(15, 13)
+#define UNICAM_ICLL_MASK GENMASK(31, 16)
+
+/* UNICAM_DCS Register */
+#define UNICAM_DIE BIT(0)
+#define UNICAM_DIM BIT(1)
+#define UNICAM_DBOB BIT(3)
+#define UNICAM_FDE BIT(4)
+#define UNICAM_LDP BIT(5)
+#define UNICAM_EDL_MASK GENMASK(15, 8)
+
+/* UNICAM_DBCTL Register */
+#define UNICAM_DBEN BIT(0)
+#define UNICAM_BUF0_IE BIT(1)
+#define UNICAM_BUF1_IE BIT(2)
+
+/* UNICAM_CMP[0,1] register */
+#define UNICAM_PCE BIT(31)
+#define UNICAM_GI BIT(9)
+#define UNICAM_CPH BIT(8)
+#define UNICAM_PCVC_MASK GENMASK(7, 6)
+#define UNICAM_PCDT_MASK GENMASK(5, 0)
+
+/* UNICAM_MISC register */
+#define UNICAM_FL0 BIT(6)
+#define UNICAM_FL1 BIT(9)
+
+#endif
diff --git a/drivers/media/platform/broadcom/bcm2835-unicam.c b/drivers/media/platform/broadcom/bcm2835-unicam.c
new file mode 100644
index 000000000000..a1d93c14553d
--- /dev/null
+++ b/drivers/media/platform/broadcom/bcm2835-unicam.c
@@ -0,0 +1,2739 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * BCM283x / BCM271x Unicam Capture Driver
+ *
+ * Copyright (C) 2017-2020 - Raspberry Pi (Trading) Ltd.
+ * Copyright (C) 2024 - Ideas on Board
+ *
+ * Dave Stevenson <dave.stevenson@raspberrypi.com>
+ *
+ * Based on TI am437x driver by
+ * Benoit Parrot <bparrot@ti.com>
+ * Lad, Prabhakar <prabhakar.csengg@gmail.com>
+ *
+ * and TI CAL camera interface driver by
+ * Benoit Parrot <bparrot@ti.com>
+ *
+ *
+ * There are two camera drivers in the kernel for BCM283x - this one and
+ * bcm2835-camera (currently in staging).
+ *
+ * This driver directly controls the Unicam peripheral - there is no
+ * involvement with the VideoCore firmware. Unicam receives CSI-2 or CCP2 data
+ * and writes it into SDRAM. The only potential processing options are to
+ * repack Bayer data into an alternate format, and applying windowing. The
+ * repacking does not shift the data, so can repack V4L2_PIX_FMT_Sxxxx10P to
+ * V4L2_PIX_FMT_Sxxxx10, or V4L2_PIX_FMT_Sxxxx12P to V4L2_PIX_FMT_Sxxxx12, but
+ * not generically up to V4L2_PIX_FMT_Sxxxx16. Support for windowing may be
+ * added later.
+ *
+ * It should be possible to connect this driver to any sensor with a suitable
+ * output interface and V4L2 subdevice driver.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include <media/mipi-csi2.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "bcm2835-unicam-regs.h"
+
+#define UNICAM_MODULE_NAME "unicam"
+
+/*
+ * Unicam must request a minimum of 250Mhz from the VPU clock.
+ * Otherwise the input FIFOs overrun and cause image corruption.
+ */
+#define UNICAM_MIN_VPU_CLOCK_RATE (250 * 1000 * 1000)
+
+/* Unicam has an internal DMA alignment constraint of 16 bytes for each line. */
+#define UNICAM_DMA_BPL_ALIGNMENT 16
+
+/*
+ * The image stride is stored in a 16 bit register, and needs to be aligned to
+ * the DMA constraint. As the ISP in the same SoC has a 32 bytes alignment
+ * constraint on its input, set the image stride alignment to 32 bytes here as
+ * well to avoid incompatible configurations.
+ */
+#define UNICAM_IMAGE_BPL_ALIGNMENT 32
+#define UNICAM_IMAGE_MAX_BPL ((1U << 16) - UNICAM_IMAGE_BPL_ALIGNMENT)
+
+/*
+ * Max width is therefore determined by the max stride divided by the number of
+ * bits per pixel. Take 32bpp as a worst case. No imposed limit on the height,
+ * so adopt a square image for want of anything better.
+ */
+#define UNICAM_IMAGE_MIN_WIDTH 16
+#define UNICAM_IMAGE_MIN_HEIGHT 16
+#define UNICAM_IMAGE_MAX_WIDTH (UNICAM_IMAGE_MAX_BPL / 4)
+#define UNICAM_IMAGE_MAX_HEIGHT UNICAM_IMAGE_MAX_WIDTH
+
+/*
+ * There's no intrinsic limits on the width and height for embedded data. Use
+ * the same maximum values as for the image, to avoid overflows in the image
+ * size computation.
+ */
+#define UNICAM_META_MIN_WIDTH 1
+#define UNICAM_META_MIN_HEIGHT 1
+#define UNICAM_META_MAX_WIDTH UNICAM_IMAGE_MAX_WIDTH
+#define UNICAM_META_MAX_HEIGHT UNICAM_IMAGE_MAX_HEIGHT
+
+/*
+ * Size of the dummy buffer. Can be any size really, but the DMA
+ * allocation works in units of page sizes.
+ */
+#define UNICAM_DUMMY_BUF_SIZE PAGE_SIZE
+
+enum unicam_pad {
+ UNICAM_SD_PAD_SINK,
+ UNICAM_SD_PAD_SOURCE_IMAGE,
+ UNICAM_SD_PAD_SOURCE_METADATA,
+ UNICAM_SD_NUM_PADS
+};
+
+enum unicam_node_type {
+ UNICAM_IMAGE_NODE,
+ UNICAM_METADATA_NODE,
+ UNICAM_MAX_NODES
+};
+
+/*
+ * struct unicam_format_info - Unicam media bus format information
+ * @fourcc: V4L2 pixel format FCC identifier. 0 if n/a.
+ * @unpacked_fourcc: V4L2 pixel format FCC identifier if the data is expanded
+ * out to 16bpp. 0 if n/a.
+ * @code: V4L2 media bus format code.
+ * @depth: Bits per pixel as delivered from the source.
+ * @csi_dt: CSI data type.
+ * @unpack: PUM value when unpacking to @unpacked_fourcc
+ */
+struct unicam_format_info {
+ u32 fourcc;
+ u32 unpacked_fourcc;
+ u32 code;
+ u8 depth;
+ u8 csi_dt;
+ u8 unpack;
+};
+
+struct unicam_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+ dma_addr_t dma_addr;
+ unsigned int size;
+};
+
+static inline struct unicam_buffer *to_unicam_buffer(struct vb2_buffer *vb)
+{
+ return container_of(vb, struct unicam_buffer, vb.vb2_buf);
+}
+
+struct unicam_node {
+ bool registered;
+ unsigned int id;
+
+ /* Pointer to the current v4l2_buffer */
+ struct unicam_buffer *cur_frm;
+ /* Pointer to the next v4l2_buffer */
+ struct unicam_buffer *next_frm;
+ /* Used to store current pixel format */
+ struct v4l2_format fmt;
+ /* Buffer queue used in video-buf */
+ struct vb2_queue buffer_queue;
+ /* Queue of filled frames */
+ struct list_head dma_queue;
+ /* IRQ lock for DMA queue */
+ spinlock_t dma_queue_lock;
+ /* Identifies video device for this channel */
+ struct video_device video_dev;
+ /* Pointer to the parent handle */
+ struct unicam_device *dev;
+ struct media_pad pad;
+ /*
+ * Dummy buffer intended to be used by unicam
+ * if we have no other queued buffers to swap to.
+ */
+ struct unicam_buffer dummy_buf;
+ void *dummy_buf_cpu_addr;
+};
+
+struct unicam_device {
+ struct kref kref;
+
+ /* peripheral base address */
+ void __iomem *base;
+ /* clock gating base address */
+ void __iomem *clk_gate_base;
+ /* lp clock handle */
+ struct clk *clock;
+ /* vpu clock handle */
+ struct clk *vpu_clock;
+ /* V4l2 device */
+ struct v4l2_device v4l2_dev;
+ struct media_device mdev;
+
+ /* parent device */
+ struct device *dev;
+ /* subdevice async notifier */
+ struct v4l2_async_notifier notifier;
+ unsigned int sequence;
+
+ /* Sensor node */
+ struct {
+ struct v4l2_subdev *subdev;
+ struct media_pad *pad;
+ } sensor;
+
+ /* Internal subdev */
+ struct {
+ struct v4l2_subdev sd;
+ struct media_pad pads[UNICAM_SD_NUM_PADS];
+ unsigned int enabled_streams;
+ } subdev;
+
+ enum v4l2_mbus_type bus_type;
+ /*
+ * Stores bus.mipi_csi2.flags for CSI2 sensors, or
+ * bus.mipi_csi1.strobe for CCP2.
+ */
+ unsigned int bus_flags;
+ unsigned int max_data_lanes;
+
+ struct {
+ struct media_pipeline pipe;
+ unsigned int num_data_lanes;
+ unsigned int nodes;
+ } pipe;
+
+ /* Lock used for the video devices of both nodes */
+ struct mutex lock;
+ struct unicam_node node[UNICAM_MAX_NODES];
+};
+
+static inline struct unicam_device *
+notifier_to_unicam_device(struct v4l2_async_notifier *notifier)
+{
+ return container_of(notifier, struct unicam_device, notifier);
+}
+
+static inline struct unicam_device *
+sd_to_unicam_device(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct unicam_device, subdev.sd);
+}
+
+static void unicam_release(struct kref *kref)
+{
+ struct unicam_device *unicam =
+ container_of(kref, struct unicam_device, kref);
+
+ if (unicam->mdev.dev)
+ media_device_cleanup(&unicam->mdev);
+
+ mutex_destroy(&unicam->lock);
+ kfree(unicam);
+}
+
+static struct unicam_device *unicam_get(struct unicam_device *unicam)
+{
+ kref_get(&unicam->kref);
+
+ return unicam;
+}
+
+static void unicam_put(struct unicam_device *unicam)
+{
+ kref_put(&unicam->kref, unicam_release);
+}
+
+/* -----------------------------------------------------------------------------
+ * Misc helper functions
+ */
+
+static inline bool unicam_sd_pad_is_source(u32 pad)
+{
+ /* Camera RX has 1 sink pad, and N source pads */
+ return pad != UNICAM_SD_PAD_SINK;
+}
+
+static inline bool is_metadata_node(struct unicam_node *node)
+{
+ return node->video_dev.device_caps & V4L2_CAP_META_CAPTURE;
+}
+
+static inline bool is_image_node(struct unicam_node *node)
+{
+ return node->video_dev.device_caps & V4L2_CAP_VIDEO_CAPTURE;
+}
+
+/* -----------------------------------------------------------------------------
+ * Format data table and helper functions
+ */
+
+static const struct v4l2_mbus_framefmt unicam_default_image_format = {
+ .width = 640,
+ .height = 480,
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .ycbcr_enc = V4L2_YCBCR_ENC_601,
+ .quantization = V4L2_QUANTIZATION_LIM_RANGE,
+ .xfer_func = V4L2_XFER_FUNC_SRGB,
+ .flags = 0,
+};
+
+static const struct v4l2_mbus_framefmt unicam_default_meta_format = {
+ .width = 640,
+ .height = 2,
+ .code = MEDIA_BUS_FMT_META_8,
+ .field = V4L2_FIELD_NONE,
+};
+
+static const struct unicam_format_info unicam_image_formats[] = {
+ /* YUV Formats */
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .depth = 16,
+ .csi_dt = MIPI_CSI2_DT_YUV422_8B,
+ }, {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .depth = 16,
+ .csi_dt = MIPI_CSI2_DT_YUV422_8B,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .code = MEDIA_BUS_FMT_YVYU8_1X16,
+ .depth = 16,
+ .csi_dt = MIPI_CSI2_DT_YUV422_8B,
+ }, {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .code = MEDIA_BUS_FMT_VYUY8_1X16,
+ .depth = 16,
+ .csi_dt = MIPI_CSI2_DT_YUV422_8B,
+ }, {
+ /* RGB Formats */
+ .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
+ .code = MEDIA_BUS_FMT_RGB565_1X16,
+ .depth = 16,
+ .csi_dt = MIPI_CSI2_DT_RGB565,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB24, /* rgb */
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .depth = 24,
+ .csi_dt = MIPI_CSI2_DT_RGB888,
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGR24, /* bgr */
+ .code = MEDIA_BUS_FMT_BGR888_1X24,
+ .depth = 24,
+ .csi_dt = MIPI_CSI2_DT_RGB888,
+ }, {
+ /* Bayer Formats */
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .depth = 8,
+ .csi_dt = MIPI_CSI2_DT_RAW8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .depth = 8,
+ .csi_dt = MIPI_CSI2_DT_RAW8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .depth = 8,
+ .csi_dt = MIPI_CSI2_DT_RAW8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .depth = 8,
+ .csi_dt = MIPI_CSI2_DT_RAW8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR10P,
+ .unpacked_fourcc = V4L2_PIX_FMT_SBGGR10,
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .depth = 10,
+ .csi_dt = MIPI_CSI2_DT_RAW10,
+ .unpack = UNICAM_PUM_UNPACK10,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG10P,
+ .unpacked_fourcc = V4L2_PIX_FMT_SGBRG10,
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .depth = 10,
+ .csi_dt = MIPI_CSI2_DT_RAW10,
+ .unpack = UNICAM_PUM_UNPACK10,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG10P,
+ .unpacked_fourcc = V4L2_PIX_FMT_SGRBG10,
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .depth = 10,
+ .csi_dt = MIPI_CSI2_DT_RAW10,
+ .unpack = UNICAM_PUM_UNPACK10,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB10P,
+ .unpacked_fourcc = V4L2_PIX_FMT_SRGGB10,
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .depth = 10,
+ .csi_dt = MIPI_CSI2_DT_RAW10,
+ .unpack = UNICAM_PUM_UNPACK10,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR12P,
+ .unpacked_fourcc = V4L2_PIX_FMT_SBGGR12,
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .depth = 12,
+ .csi_dt = MIPI_CSI2_DT_RAW12,
+ .unpack = UNICAM_PUM_UNPACK12,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG12P,
+ .unpacked_fourcc = V4L2_PIX_FMT_SGBRG12,
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .depth = 12,
+ .csi_dt = MIPI_CSI2_DT_RAW12,
+ .unpack = UNICAM_PUM_UNPACK12,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG12P,
+ .unpacked_fourcc = V4L2_PIX_FMT_SGRBG12,
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .depth = 12,
+ .csi_dt = MIPI_CSI2_DT_RAW12,
+ .unpack = UNICAM_PUM_UNPACK12,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB12P,
+ .unpacked_fourcc = V4L2_PIX_FMT_SRGGB12,
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .depth = 12,
+ .csi_dt = MIPI_CSI2_DT_RAW12,
+ .unpack = UNICAM_PUM_UNPACK12,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR14P,
+ .unpacked_fourcc = V4L2_PIX_FMT_SBGGR14,
+ .code = MEDIA_BUS_FMT_SBGGR14_1X14,
+ .depth = 14,
+ .csi_dt = MIPI_CSI2_DT_RAW14,
+ .unpack = UNICAM_PUM_UNPACK14,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG14P,
+ .unpacked_fourcc = V4L2_PIX_FMT_SGBRG14,
+ .code = MEDIA_BUS_FMT_SGBRG14_1X14,
+ .depth = 14,
+ .csi_dt = MIPI_CSI2_DT_RAW14,
+ .unpack = UNICAM_PUM_UNPACK14,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG14P,
+ .unpacked_fourcc = V4L2_PIX_FMT_SGRBG14,
+ .code = MEDIA_BUS_FMT_SGRBG14_1X14,
+ .depth = 14,
+ .csi_dt = MIPI_CSI2_DT_RAW14,
+ .unpack = UNICAM_PUM_UNPACK14,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB14P,
+ .unpacked_fourcc = V4L2_PIX_FMT_SRGGB14,
+ .code = MEDIA_BUS_FMT_SRGGB14_1X14,
+ .depth = 14,
+ .csi_dt = MIPI_CSI2_DT_RAW14,
+ .unpack = UNICAM_PUM_UNPACK14,
+ }, {
+ /* 16 bit Bayer formats could be supported. */
+
+ /* Greyscale formats */
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .depth = 8,
+ .csi_dt = MIPI_CSI2_DT_RAW8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_Y10P,
+ .unpacked_fourcc = V4L2_PIX_FMT_Y10,
+ .code = MEDIA_BUS_FMT_Y10_1X10,
+ .depth = 10,
+ .csi_dt = MIPI_CSI2_DT_RAW10,
+ .unpack = UNICAM_PUM_UNPACK10,
+ }, {
+ .fourcc = V4L2_PIX_FMT_Y12P,
+ .unpacked_fourcc = V4L2_PIX_FMT_Y12,
+ .code = MEDIA_BUS_FMT_Y12_1X12,
+ .depth = 12,
+ .csi_dt = MIPI_CSI2_DT_RAW12,
+ .unpack = UNICAM_PUM_UNPACK12,
+ }, {
+ .fourcc = V4L2_PIX_FMT_Y14P,
+ .unpacked_fourcc = V4L2_PIX_FMT_Y14,
+ .code = MEDIA_BUS_FMT_Y14_1X14,
+ .depth = 14,
+ .csi_dt = MIPI_CSI2_DT_RAW14,
+ .unpack = UNICAM_PUM_UNPACK14,
+ },
+};
+
+static const struct unicam_format_info unicam_meta_formats[] = {
+ {
+ .fourcc = V4L2_META_FMT_GENERIC_8,
+ .code = MEDIA_BUS_FMT_META_8,
+ .depth = 8,
+ }, {
+ .fourcc = V4L2_META_FMT_GENERIC_CSI2_10,
+ .code = MEDIA_BUS_FMT_META_10,
+ .depth = 10,
+ }, {
+ .fourcc = V4L2_META_FMT_GENERIC_CSI2_12,
+ .code = MEDIA_BUS_FMT_META_12,
+ .depth = 12,
+ }, {
+ .fourcc = V4L2_META_FMT_GENERIC_CSI2_14,
+ .code = MEDIA_BUS_FMT_META_14,
+ .depth = 14,
+ },
+};
+
+/* Format setup functions */
+static const struct unicam_format_info *
+unicam_find_format_by_code(u32 code, u32 pad)
+{
+ const struct unicam_format_info *formats;
+ unsigned int num_formats;
+ unsigned int i;
+
+ if (pad == UNICAM_SD_PAD_SOURCE_IMAGE) {
+ formats = unicam_image_formats;
+ num_formats = ARRAY_SIZE(unicam_image_formats);
+ } else {
+ formats = unicam_meta_formats;
+ num_formats = ARRAY_SIZE(unicam_meta_formats);
+ }
+
+ for (i = 0; i < num_formats; i++) {
+ if (formats[i].code == code)
+ return &formats[i];
+ }
+
+ return NULL;
+}
+
+static const struct unicam_format_info *
+unicam_find_format_by_fourcc(u32 fourcc, u32 pad)
+{
+ const struct unicam_format_info *formats;
+ unsigned int num_formats;
+ unsigned int i;
+
+ if (pad == UNICAM_SD_PAD_SOURCE_IMAGE) {
+ formats = unicam_image_formats;
+ num_formats = ARRAY_SIZE(unicam_image_formats);
+ } else {
+ formats = unicam_meta_formats;
+ num_formats = ARRAY_SIZE(unicam_meta_formats);
+ }
+
+ for (i = 0; i < num_formats; ++i) {
+ if (formats[i].fourcc == fourcc)
+ return &formats[i];
+ }
+
+ return NULL;
+}
+
+static void unicam_calc_image_size_bpl(struct unicam_device *unicam,
+ const struct unicam_format_info *fmtinfo,
+ struct v4l2_pix_format *pix)
+{
+ u32 min_bpl;
+
+ v4l_bound_align_image(&pix->width, UNICAM_IMAGE_MIN_WIDTH,
+ UNICAM_IMAGE_MAX_WIDTH, 2,
+ &pix->height, UNICAM_IMAGE_MIN_HEIGHT,
+ UNICAM_IMAGE_MAX_HEIGHT, 0, 0);
+
+ /* Unpacking always goes to 16bpp */
+ if (pix->pixelformat == fmtinfo->unpacked_fourcc)
+ min_bpl = pix->width * 2;
+ else
+ min_bpl = pix->width * fmtinfo->depth / 8;
+ min_bpl = ALIGN(min_bpl, UNICAM_IMAGE_BPL_ALIGNMENT);
+
+ pix->bytesperline = ALIGN(pix->bytesperline, UNICAM_IMAGE_BPL_ALIGNMENT);
+ pix->bytesperline = clamp_t(unsigned int, pix->bytesperline, min_bpl,
+ UNICAM_IMAGE_MAX_BPL);
+
+ pix->sizeimage = pix->height * pix->bytesperline;
+}
+
+static void unicam_calc_meta_size_bpl(struct unicam_device *unicam,
+ const struct unicam_format_info *fmtinfo,
+ struct v4l2_meta_format *meta)
+{
+ v4l_bound_align_image(&meta->width, UNICAM_META_MIN_WIDTH,
+ UNICAM_META_MAX_WIDTH, 0,
+ &meta->height, UNICAM_META_MIN_HEIGHT,
+ UNICAM_META_MAX_HEIGHT, 0, 0);
+
+ meta->bytesperline = ALIGN(meta->width * fmtinfo->depth / 8,
+ UNICAM_DMA_BPL_ALIGNMENT);
+ meta->buffersize = meta->height * meta->bytesperline;
+}
+
+/* -----------------------------------------------------------------------------
+ * Hardware handling
+ */
+
+static inline void unicam_clk_write(struct unicam_device *unicam, u32 val)
+{
+ /* Pass the CM_PASSWORD along with the value. */
+ writel(val | 0x5a000000, unicam->clk_gate_base);
+}
+
+static inline u32 unicam_reg_read(struct unicam_device *unicam, u32 offset)
+{
+ return readl(unicam->base + offset);
+}
+
+static inline void unicam_reg_write(struct unicam_device *unicam, u32 offset, u32 val)
+{
+ writel(val, unicam->base + offset);
+}
+
+static inline int unicam_get_field(u32 value, u32 mask)
+{
+ return (value & mask) >> __ffs(mask);
+}
+
+static inline void unicam_set_field(u32 *valp, u32 field, u32 mask)
+{
+ u32 val = *valp;
+
+ val &= ~mask;
+ val |= (field << __ffs(mask)) & mask;
+ *valp = val;
+}
+
+static inline void unicam_reg_write_field(struct unicam_device *unicam, u32 offset,
+ u32 field, u32 mask)
+{
+ u32 val = unicam_reg_read(unicam, offset);
+
+ unicam_set_field(&val, field, mask);
+ unicam_reg_write(unicam, offset, val);
+}
+
+static void unicam_wr_dma_addr(struct unicam_node *node,
+ struct unicam_buffer *buf)
+{
+ dma_addr_t endaddr = buf->dma_addr + buf->size;
+
+ if (node->id == UNICAM_IMAGE_NODE) {
+ unicam_reg_write(node->dev, UNICAM_IBSA0, buf->dma_addr);
+ unicam_reg_write(node->dev, UNICAM_IBEA0, endaddr);
+ } else {
+ unicam_reg_write(node->dev, UNICAM_DBSA0, buf->dma_addr);
+ unicam_reg_write(node->dev, UNICAM_DBEA0, endaddr);
+ }
+}
+
+static unsigned int unicam_get_lines_done(struct unicam_device *unicam)
+{
+ struct unicam_node *node = &unicam->node[UNICAM_IMAGE_NODE];
+ unsigned int stride = node->fmt.fmt.pix.bytesperline;
+ struct unicam_buffer *frm = node->cur_frm;
+ dma_addr_t cur_addr;
+
+ if (!frm)
+ return 0;
+
+ cur_addr = unicam_reg_read(unicam, UNICAM_IBWP);
+ return (unsigned int)(cur_addr - frm->dma_addr) / stride;
+}
+
+static void unicam_schedule_next_buffer(struct unicam_node *node)
+{
+ struct unicam_buffer *buf;
+
+ buf = list_first_entry(&node->dma_queue, struct unicam_buffer, list);
+ node->next_frm = buf;
+ list_del(&buf->list);
+
+ unicam_wr_dma_addr(node, buf);
+}
+
+static void unicam_schedule_dummy_buffer(struct unicam_node *node)
+{
+ int node_id = is_image_node(node) ? UNICAM_IMAGE_NODE : UNICAM_METADATA_NODE;
+
+ dev_dbg(node->dev->dev, "Scheduling dummy buffer for node %d\n", node_id);
+
+ unicam_wr_dma_addr(node, &node->dummy_buf);
+
+ node->next_frm = NULL;
+}
+
+static void unicam_process_buffer_complete(struct unicam_node *node,
+ unsigned int sequence)
+{
+ node->cur_frm->vb.field = node->fmt.fmt.pix.field;
+ node->cur_frm->vb.sequence = sequence;
+
+ vb2_buffer_done(&node->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
+}
+
+static void unicam_queue_event_sof(struct unicam_device *unicam)
+{
+ struct unicam_node *node = &unicam->node[UNICAM_IMAGE_NODE];
+ struct v4l2_event event = {
+ .type = V4L2_EVENT_FRAME_SYNC,
+ .u.frame_sync.frame_sequence = unicam->sequence,
+ };
+
+ v4l2_event_queue(&node->video_dev, &event);
+}
+
+static irqreturn_t unicam_isr(int irq, void *dev)
+{
+ struct unicam_device *unicam = dev;
+ unsigned int lines_done = unicam_get_lines_done(dev);
+ unsigned int sequence = unicam->sequence;
+ unsigned int i;
+ u32 ista, sta;
+ bool fe;
+ u64 ts;
+
+ sta = unicam_reg_read(unicam, UNICAM_STA);
+ /* Write value back to clear the interrupts */
+ unicam_reg_write(unicam, UNICAM_STA, sta);
+
+ ista = unicam_reg_read(unicam, UNICAM_ISTA);
+ /* Write value back to clear the interrupts */
+ unicam_reg_write(unicam, UNICAM_ISTA, ista);
+
+ dev_dbg(unicam->dev, "ISR: ISTA: 0x%X, STA: 0x%X, sequence %d, lines done %d\n",
+ ista, sta, sequence, lines_done);
+
+ if (!(sta & (UNICAM_IS | UNICAM_PI0)))
+ return IRQ_HANDLED;
+
+ /*
+ * Look for either the Frame End interrupt or the Packet Capture status
+ * to signal a frame end.
+ */
+ fe = ista & UNICAM_FEI || sta & UNICAM_PI0;
+
+ /*
+ * We must run the frame end handler first. If we have a valid next_frm
+ * and we get a simultaneout FE + FS interrupt, running the FS handler
+ * first would null out the next_frm ptr and we would have lost the
+ * buffer forever.
+ */
+ if (fe) {
+ /*
+ * Ensure we have swapped buffers already as we can't
+ * stop the peripheral. If no buffer is available, use a
+ * dummy buffer to dump out frames until we get a new buffer
+ * to use.
+ */
+ for (i = 0; i < ARRAY_SIZE(unicam->node); i++) {
+ struct unicam_node *node = &unicam->node[i];
+
+ if (!vb2_start_streaming_called(&node->buffer_queue))
+ continue;
+
+ /*
+ * If cur_frm == next_frm, it means we have not had
+ * a chance to swap buffers, likely due to having
+ * multiple interrupts occurring simultaneously (like FE
+ * + FS + LS). In this case, we cannot signal the buffer
+ * as complete, as the HW will reuse that buffer.
+ */
+ if (node->cur_frm && node->cur_frm != node->next_frm)
+ unicam_process_buffer_complete(node, sequence);
+ node->cur_frm = node->next_frm;
+ }
+ unicam->sequence++;
+ }
+
+ if (ista & UNICAM_FSI) {
+ /*
+ * Timestamp is to be when the first data byte was captured,
+ * aka frame start.
+ */
+ ts = ktime_get_ns();
+ for (i = 0; i < ARRAY_SIZE(unicam->node); i++) {
+ struct unicam_node *node = &unicam->node[i];
+
+ if (!vb2_start_streaming_called(&node->buffer_queue))
+ continue;
+
+ if (node->cur_frm)
+ node->cur_frm->vb.vb2_buf.timestamp = ts;
+ else
+ dev_dbg(unicam->v4l2_dev.dev,
+ "ISR: [%d] Dropping frame, buffer not available at FS\n",
+ i);
+ /*
+ * Set the next frame output to go to a dummy frame
+ * if we have not managed to obtain another frame
+ * from the queue.
+ */
+ unicam_schedule_dummy_buffer(node);
+ }
+
+ unicam_queue_event_sof(unicam);
+ }
+
+ /*
+ * Cannot swap buffer at frame end, there may be a race condition
+ * where the HW does not actually swap it if the new frame has
+ * already started.
+ */
+ if (ista & (UNICAM_FSI | UNICAM_LCI) && !fe) {
+ for (i = 0; i < ARRAY_SIZE(unicam->node); i++) {
+ struct unicam_node *node = &unicam->node[i];
+
+ if (!vb2_start_streaming_called(&node->buffer_queue))
+ continue;
+
+ spin_lock(&node->dma_queue_lock);
+ if (!list_empty(&node->dma_queue) && !node->next_frm)
+ unicam_schedule_next_buffer(node);
+ spin_unlock(&node->dma_queue_lock);
+ }
+ }
+
+ if (unicam_reg_read(unicam, UNICAM_ICTL) & UNICAM_FCM) {
+ /* Switch out of trigger mode if selected */
+ unicam_reg_write_field(unicam, UNICAM_ICTL, 1, UNICAM_TFC);
+ unicam_reg_write_field(unicam, UNICAM_ICTL, 0, UNICAM_FCM);
+ }
+ return IRQ_HANDLED;
+}
+
+static void unicam_set_packing_config(struct unicam_device *unicam,
+ const struct unicam_format_info *fmtinfo)
+{
+ struct unicam_node *node = &unicam->node[UNICAM_IMAGE_NODE];
+ u32 pack, unpack;
+ u32 val;
+
+ if (node->fmt.fmt.pix.pixelformat == fmtinfo->fourcc) {
+ unpack = UNICAM_PUM_NONE;
+ pack = UNICAM_PPM_NONE;
+ } else {
+ unpack = fmtinfo->unpack;
+ /* Repacking is always to 16bpp */
+ pack = UNICAM_PPM_PACK16;
+ }
+
+ val = 0;
+ unicam_set_field(&val, unpack, UNICAM_PUM_MASK);
+ unicam_set_field(&val, pack, UNICAM_PPM_MASK);
+ unicam_reg_write(unicam, UNICAM_IPIPE, val);
+}
+
+static void unicam_cfg_image_id(struct unicam_device *unicam, u8 vc, u8 dt)
+{
+ if (unicam->bus_type == V4L2_MBUS_CSI2_DPHY) {
+ /* CSI2 mode */
+ unicam_reg_write(unicam, UNICAM_IDI0, (vc << 6) | dt);
+ } else {
+ /* CCP2 mode */
+ unicam_reg_write(unicam, UNICAM_IDI0, 0x80 | dt);
+ }
+}
+
+static void unicam_enable_ed(struct unicam_device *unicam)
+{
+ u32 val = unicam_reg_read(unicam, UNICAM_DCS);
+
+ unicam_set_field(&val, 2, UNICAM_EDL_MASK);
+ /* Do not wrap at the end of the embedded data buffer */
+ unicam_set_field(&val, 0, UNICAM_DBOB);
+
+ unicam_reg_write(unicam, UNICAM_DCS, val);
+}
+
+static int unicam_get_image_vc_dt(struct unicam_device *unicam,
+ struct v4l2_subdev_state *state,
+ u8 *vc, u8 *dt)
+{
+ struct v4l2_mbus_frame_desc fd;
+ u32 stream;
+ int ret;
+
+ ret = v4l2_subdev_routing_find_opposite_end(&state->routing,
+ UNICAM_SD_PAD_SOURCE_IMAGE,
+ 0, NULL, &stream);
+ if (ret)
+ return ret;
+
+ ret = v4l2_subdev_call(unicam->sensor.subdev, pad, get_frame_desc,
+ unicam->sensor.pad->index, &fd);
+ if (ret)
+ return ret;
+
+ /* Only CSI-2 supports DTs. */
+ if (fd.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2)
+ return -EINVAL;
+
+ for (unsigned int i = 0; i < fd.num_entries; ++i) {
+ const struct v4l2_mbus_frame_desc_entry *fde = &fd.entry[i];
+
+ if (fde->stream == stream) {
+ *vc = fde->bus.csi2.vc;
+ *dt = fde->bus.csi2.dt;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void unicam_start_rx(struct unicam_device *unicam,
+ struct v4l2_subdev_state *state)
+{
+ struct unicam_node *node = &unicam->node[UNICAM_IMAGE_NODE];
+ const struct unicam_format_info *fmtinfo;
+ const struct v4l2_mbus_framefmt *fmt;
+ unsigned int line_int_freq;
+ u8 vc, dt;
+ u32 val;
+ int ret;
+
+ fmt = v4l2_subdev_state_get_format(state, UNICAM_SD_PAD_SOURCE_IMAGE, 0);
+ fmtinfo = unicam_find_format_by_code(fmt->code,
+ UNICAM_SD_PAD_SOURCE_IMAGE);
+ if (WARN_ON(!fmtinfo))
+ return;
+
+ /*
+ * Enable lane clocks. The register is structured as follows:
+ *
+ * [9:8] - DAT3
+ * [7:6] - DAT2
+ * [5:4] - DAT1
+ * [3:2] - DAT0
+ * [1:0] - CLK
+ *
+ * Enabled lane must be set to b01, and disabled lanes to b00. The clock
+ * lane is always enabled.
+ */
+ val = 0x155 & GENMASK(unicam->pipe.num_data_lanes * 2 + 1, 0);
+ unicam_clk_write(unicam, val);
+
+ /* Basic init */
+ unicam_reg_write(unicam, UNICAM_CTRL, UNICAM_MEM);
+
+ /* Enable analogue control, and leave in reset. */
+ val = UNICAM_AR;
+ unicam_set_field(&val, 7, UNICAM_CTATADJ_MASK);
+ unicam_set_field(&val, 7, UNICAM_PTATADJ_MASK);
+ unicam_reg_write(unicam, UNICAM_ANA, val);
+ usleep_range(1000, 2000);
+
+ /* Come out of reset */
+ unicam_reg_write_field(unicam, UNICAM_ANA, 0, UNICAM_AR);
+
+ /* Peripheral reset */
+ unicam_reg_write_field(unicam, UNICAM_CTRL, 1, UNICAM_CPR);
+ unicam_reg_write_field(unicam, UNICAM_CTRL, 0, UNICAM_CPR);
+
+ unicam_reg_write_field(unicam, UNICAM_CTRL, 0, UNICAM_CPE);
+
+ /* Enable Rx control. */
+ val = unicam_reg_read(unicam, UNICAM_CTRL);
+ if (unicam->bus_type == V4L2_MBUS_CSI2_DPHY) {
+ unicam_set_field(&val, UNICAM_CPM_CSI2, UNICAM_CPM_MASK);
+ unicam_set_field(&val, UNICAM_DCM_STROBE, UNICAM_DCM_MASK);
+ } else {
+ unicam_set_field(&val, UNICAM_CPM_CCP2, UNICAM_CPM_MASK);
+ unicam_set_field(&val, unicam->bus_flags, UNICAM_DCM_MASK);
+ }
+ /* Packet framer timeout */
+ unicam_set_field(&val, 0xf, UNICAM_PFT_MASK);
+ unicam_set_field(&val, 128, UNICAM_OET_MASK);
+ unicam_reg_write(unicam, UNICAM_CTRL, val);
+
+ unicam_reg_write(unicam, UNICAM_IHWIN, 0);
+ unicam_reg_write(unicam, UNICAM_IVWIN, 0);
+
+ /* AXI bus access QoS setup */
+ val = unicam_reg_read(unicam, UNICAM_PRI);
+ unicam_set_field(&val, 0, UNICAM_BL_MASK);
+ unicam_set_field(&val, 0, UNICAM_BS_MASK);
+ unicam_set_field(&val, 0xe, UNICAM_PP_MASK);
+ unicam_set_field(&val, 8, UNICAM_NP_MASK);
+ unicam_set_field(&val, 2, UNICAM_PT_MASK);
+ unicam_set_field(&val, 1, UNICAM_PE);
+ unicam_reg_write(unicam, UNICAM_PRI, val);
+
+ unicam_reg_write_field(unicam, UNICAM_ANA, 0, UNICAM_DDL);
+
+ /* Always start in trigger frame capture mode (UNICAM_FCM set) */
+ val = UNICAM_FSIE | UNICAM_FEIE | UNICAM_FCM | UNICAM_IBOB;
+ line_int_freq = max(fmt->height >> 2, 128);
+ unicam_set_field(&val, line_int_freq, UNICAM_LCIE_MASK);
+ unicam_reg_write(unicam, UNICAM_ICTL, val);
+ unicam_reg_write(unicam, UNICAM_STA, UNICAM_STA_MASK_ALL);
+ unicam_reg_write(unicam, UNICAM_ISTA, UNICAM_ISTA_MASK_ALL);
+
+ /* tclk_term_en */
+ unicam_reg_write_field(unicam, UNICAM_CLT, 2, UNICAM_CLT1_MASK);
+ /* tclk_settle */
+ unicam_reg_write_field(unicam, UNICAM_CLT, 6, UNICAM_CLT2_MASK);
+ /* td_term_en */
+ unicam_reg_write_field(unicam, UNICAM_DLT, 2, UNICAM_DLT1_MASK);
+ /* ths_settle */
+ unicam_reg_write_field(unicam, UNICAM_DLT, 6, UNICAM_DLT2_MASK);
+ /* trx_enable */
+ unicam_reg_write_field(unicam, UNICAM_DLT, 0, UNICAM_DLT3_MASK);
+
+ unicam_reg_write_field(unicam, UNICAM_CTRL, 0, UNICAM_SOE);
+
+ /* Packet compare setup - required to avoid missing frame ends */
+ val = 0;
+ unicam_set_field(&val, 1, UNICAM_PCE);
+ unicam_set_field(&val, 1, UNICAM_GI);
+ unicam_set_field(&val, 1, UNICAM_CPH);
+ unicam_set_field(&val, 0, UNICAM_PCVC_MASK);
+ unicam_set_field(&val, 1, UNICAM_PCDT_MASK);
+ unicam_reg_write(unicam, UNICAM_CMP0, val);
+
+ /* Enable clock lane and set up terminations */
+ val = 0;
+ if (unicam->bus_type == V4L2_MBUS_CSI2_DPHY) {
+ /* CSI2 */
+ unicam_set_field(&val, 1, UNICAM_CLE);
+ unicam_set_field(&val, 1, UNICAM_CLLPE);
+ if (!(unicam->bus_flags & V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK)) {
+ unicam_set_field(&val, 1, UNICAM_CLTRE);
+ unicam_set_field(&val, 1, UNICAM_CLHSE);
+ }
+ } else {
+ /* CCP2 */
+ unicam_set_field(&val, 1, UNICAM_CLE);
+ unicam_set_field(&val, 1, UNICAM_CLHSE);
+ unicam_set_field(&val, 1, UNICAM_CLTRE);
+ }
+ unicam_reg_write(unicam, UNICAM_CLK, val);
+
+ /*
+ * Enable required data lanes with appropriate terminations.
+ * The same value needs to be written to UNICAM_DATn registers for
+ * the active lanes, and 0 for inactive ones.
+ */
+ val = 0;
+ if (unicam->bus_type == V4L2_MBUS_CSI2_DPHY) {
+ /* CSI2 */
+ unicam_set_field(&val, 1, UNICAM_DLE);
+ unicam_set_field(&val, 1, UNICAM_DLLPE);
+ if (!(unicam->bus_flags & V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK)) {
+ unicam_set_field(&val, 1, UNICAM_DLTRE);
+ unicam_set_field(&val, 1, UNICAM_DLHSE);
+ }
+ } else {
+ /* CCP2 */
+ unicam_set_field(&val, 1, UNICAM_DLE);
+ unicam_set_field(&val, 1, UNICAM_DLHSE);
+ unicam_set_field(&val, 1, UNICAM_DLTRE);
+ }
+ unicam_reg_write(unicam, UNICAM_DAT0, val);
+
+ if (unicam->pipe.num_data_lanes == 1)
+ val = 0;
+ unicam_reg_write(unicam, UNICAM_DAT1, val);
+
+ if (unicam->max_data_lanes > 2) {
+ /*
+ * Registers UNICAM_DAT2 and UNICAM_DAT3 only valid if the
+ * instance supports more than 2 data lanes.
+ */
+ if (unicam->pipe.num_data_lanes == 2)
+ val = 0;
+ unicam_reg_write(unicam, UNICAM_DAT2, val);
+
+ if (unicam->pipe.num_data_lanes == 3)
+ val = 0;
+ unicam_reg_write(unicam, UNICAM_DAT3, val);
+ }
+
+ unicam_reg_write(unicam, UNICAM_IBLS,
+ node->fmt.fmt.pix.bytesperline);
+ unicam_wr_dma_addr(node, node->cur_frm);
+ unicam_set_packing_config(unicam, fmtinfo);
+
+ ret = unicam_get_image_vc_dt(unicam, state, &vc, &dt);
+ if (ret) {
+ /*
+ * If the source doesn't support frame descriptors, default to
+ * VC 0 and use the DT corresponding to the format.
+ */
+ vc = 0;
+ dt = fmtinfo->csi_dt;
+ }
+
+ unicam_cfg_image_id(unicam, vc, dt);
+
+ val = unicam_reg_read(unicam, UNICAM_MISC);
+ unicam_set_field(&val, 1, UNICAM_FL0);
+ unicam_set_field(&val, 1, UNICAM_FL1);
+ unicam_reg_write(unicam, UNICAM_MISC, val);
+
+ /* Enable peripheral */
+ unicam_reg_write_field(unicam, UNICAM_CTRL, 1, UNICAM_CPE);
+
+ /* Load image pointers */
+ unicam_reg_write_field(unicam, UNICAM_ICTL, 1, UNICAM_LIP_MASK);
+
+ /*
+ * Enable trigger only for the first frame to
+ * sync correctly to the FS from the source.
+ */
+ unicam_reg_write_field(unicam, UNICAM_ICTL, 1, UNICAM_TFC);
+}
+
+static void unicam_start_metadata(struct unicam_device *unicam)
+{
+ struct unicam_node *node = &unicam->node[UNICAM_METADATA_NODE];
+
+ unicam_enable_ed(unicam);
+ unicam_wr_dma_addr(node, node->cur_frm);
+ unicam_reg_write_field(unicam, UNICAM_DCS, 1, UNICAM_LDP);
+}
+
+static void unicam_disable(struct unicam_device *unicam)
+{
+ /* Analogue lane control disable */
+ unicam_reg_write_field(unicam, UNICAM_ANA, 1, UNICAM_DDL);
+
+ /* Stop the output engine */
+ unicam_reg_write_field(unicam, UNICAM_CTRL, 1, UNICAM_SOE);
+
+ /* Disable the data lanes. */
+ unicam_reg_write(unicam, UNICAM_DAT0, 0);
+ unicam_reg_write(unicam, UNICAM_DAT1, 0);
+
+ if (unicam->max_data_lanes > 2) {
+ unicam_reg_write(unicam, UNICAM_DAT2, 0);
+ unicam_reg_write(unicam, UNICAM_DAT3, 0);
+ }
+
+ /* Peripheral reset */
+ unicam_reg_write_field(unicam, UNICAM_CTRL, 1, UNICAM_CPR);
+ usleep_range(50, 100);
+ unicam_reg_write_field(unicam, UNICAM_CTRL, 0, UNICAM_CPR);
+
+ /* Disable peripheral */
+ unicam_reg_write_field(unicam, UNICAM_CTRL, 0, UNICAM_CPE);
+
+ /* Clear ED setup */
+ unicam_reg_write(unicam, UNICAM_DCS, 0);
+
+ /* Disable all lane clocks */
+ unicam_clk_write(unicam, 0);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+static int __unicam_subdev_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_krouting *routing)
+{
+ struct v4l2_subdev_route *route;
+ int ret;
+
+ ret = v4l2_subdev_routing_validate(sd, routing,
+ V4L2_SUBDEV_ROUTING_ONLY_1_TO_1);
+ if (ret)
+ return ret;
+
+ ret = v4l2_subdev_set_routing(sd, state, routing);
+ if (ret)
+ return ret;
+
+ for_each_active_route(&state->routing, route) {
+ const struct v4l2_mbus_framefmt *def_fmt;
+ struct v4l2_mbus_framefmt *fmt;
+
+ if (route->source_pad == UNICAM_SD_PAD_SOURCE_IMAGE)
+ def_fmt = &unicam_default_image_format;
+ else
+ def_fmt = &unicam_default_meta_format;
+
+ fmt = v4l2_subdev_state_get_format(state, route->sink_pad,
+ route->sink_stream);
+ *fmt = *def_fmt;
+ fmt = v4l2_subdev_state_get_format(state, route->source_pad,
+ route->source_stream);
+ *fmt = *def_fmt;
+ }
+
+ return 0;
+}
+
+static int unicam_subdev_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_subdev_route routes[] = {
+ {
+ .sink_pad = UNICAM_SD_PAD_SINK,
+ .sink_stream = 0,
+ .source_pad = UNICAM_SD_PAD_SOURCE_IMAGE,
+ .source_stream = 0,
+ .flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE,
+ },
+ };
+
+ struct v4l2_subdev_krouting routing = {
+ .len_routes = ARRAY_SIZE(routes),
+ .num_routes = ARRAY_SIZE(routes),
+ .routes = routes,
+ };
+
+ /* Initialize routing to single route to the fist source pad. */
+ return __unicam_subdev_set_routing(sd, state, &routing);
+}
+
+static int unicam_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ u32 pad, stream;
+ int ret;
+
+ ret = v4l2_subdev_routing_find_opposite_end(&state->routing,
+ code->pad, code->stream,
+ &pad, &stream);
+ if (ret)
+ return ret;
+
+ if (unicam_sd_pad_is_source(code->pad)) {
+ /* No transcoding, source and sink codes must match. */
+ const struct v4l2_mbus_framefmt *fmt;
+
+ fmt = v4l2_subdev_state_get_format(state, pad, stream);
+ if (!fmt)
+ return -EINVAL;
+
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = fmt->code;
+ } else {
+ const struct unicam_format_info *formats;
+ unsigned int num_formats;
+
+ if (pad == UNICAM_SD_PAD_SOURCE_IMAGE) {
+ formats = unicam_image_formats;
+ num_formats = ARRAY_SIZE(unicam_image_formats);
+ } else {
+ formats = unicam_meta_formats;
+ num_formats = ARRAY_SIZE(unicam_meta_formats);
+ }
+
+ if (code->index >= num_formats)
+ return -EINVAL;
+
+ code->code = formats[code->index].code;
+ }
+
+ return 0;
+}
+
+static int unicam_subdev_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ u32 pad, stream;
+ int ret;
+
+ if (fse->index > 0)
+ return -EINVAL;
+
+ ret = v4l2_subdev_routing_find_opposite_end(&state->routing, fse->pad,
+ fse->stream, &pad,
+ &stream);
+ if (ret)
+ return ret;
+
+ if (unicam_sd_pad_is_source(fse->pad)) {
+ /* No transcoding, source and sink formats must match. */
+ const struct v4l2_mbus_framefmt *fmt;
+
+ fmt = v4l2_subdev_state_get_format(state, pad, stream);
+ if (!fmt)
+ return -EINVAL;
+
+ if (fse->code != fmt->code)
+ return -EINVAL;
+
+ fse->min_width = fmt->width;
+ fse->max_width = fmt->width;
+ fse->min_height = fmt->height;
+ fse->max_height = fmt->height;
+ } else {
+ const struct unicam_format_info *fmtinfo;
+
+ fmtinfo = unicam_find_format_by_code(fse->code, pad);
+ if (!fmtinfo)
+ return -EINVAL;
+
+ if (pad == UNICAM_SD_PAD_SOURCE_IMAGE) {
+ fse->min_width = UNICAM_IMAGE_MIN_WIDTH;
+ fse->max_width = UNICAM_IMAGE_MAX_WIDTH;
+ fse->min_height = UNICAM_IMAGE_MIN_HEIGHT;
+ fse->max_height = UNICAM_IMAGE_MAX_HEIGHT;
+ } else {
+ fse->min_width = UNICAM_META_MIN_WIDTH;
+ fse->max_width = UNICAM_META_MAX_WIDTH;
+ fse->min_height = UNICAM_META_MIN_HEIGHT;
+ fse->max_height = UNICAM_META_MAX_HEIGHT;
+ }
+ }
+
+ return 0;
+}
+
+static int unicam_subdev_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct unicam_device *unicam = sd_to_unicam_device(sd);
+ struct v4l2_mbus_framefmt *sink_format, *source_format;
+ const struct unicam_format_info *fmtinfo;
+ u32 source_pad, source_stream;
+ int ret;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE &&
+ unicam->subdev.enabled_streams)
+ return -EBUSY;
+
+ /* No transcoding, source and sink formats must match. */
+ if (unicam_sd_pad_is_source(format->pad))
+ return v4l2_subdev_get_fmt(sd, state, format);
+
+ /*
+ * Allowed formats for the stream on the sink pad depend on what source
+ * pad the stream is routed to. Find the corresponding source pad and
+ * use it to validate the media bus code.
+ */
+ ret = v4l2_subdev_routing_find_opposite_end(&state->routing,
+ format->pad, format->stream,
+ &source_pad, &source_stream);
+ if (ret)
+ return ret;
+
+ fmtinfo = unicam_find_format_by_code(format->format.code, source_pad);
+ if (!fmtinfo) {
+ fmtinfo = source_pad == UNICAM_SD_PAD_SOURCE_IMAGE
+ ? &unicam_image_formats[0] : &unicam_meta_formats[0];
+ format->format.code = fmtinfo->code;
+ }
+
+ if (source_pad == UNICAM_SD_PAD_SOURCE_IMAGE) {
+ format->format.width = clamp_t(unsigned int,
+ format->format.width,
+ UNICAM_IMAGE_MIN_WIDTH,
+ UNICAM_IMAGE_MAX_WIDTH);
+ format->format.height = clamp_t(unsigned int,
+ format->format.height,
+ UNICAM_IMAGE_MIN_HEIGHT,
+ UNICAM_IMAGE_MAX_HEIGHT);
+ format->format.field = V4L2_FIELD_NONE;
+ } else {
+ format->format.width = clamp_t(unsigned int,
+ format->format.width,
+ UNICAM_META_MIN_WIDTH,
+ UNICAM_META_MAX_WIDTH);
+ format->format.height = clamp_t(unsigned int,
+ format->format.height,
+ UNICAM_META_MIN_HEIGHT,
+ UNICAM_META_MAX_HEIGHT);
+ format->format.field = V4L2_FIELD_NONE;
+
+ /* Colorspace don't apply to metadata. */
+ format->format.colorspace = 0;
+ format->format.ycbcr_enc = 0;
+ format->format.quantization = 0;
+ format->format.xfer_func = 0;
+ }
+
+ sink_format = v4l2_subdev_state_get_format(state, format->pad,
+ format->stream);
+ source_format = v4l2_subdev_state_get_format(state, source_pad,
+ source_stream);
+ *sink_format = format->format;
+ *source_format = format->format;
+
+ return 0;
+}
+
+static int unicam_subdev_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ enum v4l2_subdev_format_whence which,
+ struct v4l2_subdev_krouting *routing)
+{
+ struct unicam_device *unicam = sd_to_unicam_device(sd);
+
+ if (which == V4L2_SUBDEV_FORMAT_ACTIVE && unicam->subdev.enabled_streams)
+ return -EBUSY;
+
+ return __unicam_subdev_set_routing(sd, state, routing);
+}
+
+static int unicam_sd_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct unicam_device *unicam = sd_to_unicam_device(sd);
+ u32 other_pad, other_stream;
+ int ret;
+
+ if (!unicam->subdev.enabled_streams) {
+ /* Configure and start Unicam. */
+ unicam->sequence = 0;
+
+ if (unicam->pipe.nodes & BIT(UNICAM_METADATA_NODE))
+ unicam_start_metadata(unicam);
+
+ unicam_start_rx(unicam, state);
+ }
+
+ ret = v4l2_subdev_routing_find_opposite_end(&state->routing, pad, 0,
+ &other_pad, &other_stream);
+ if (ret)
+ return ret;
+
+ ret = v4l2_subdev_enable_streams(unicam->sensor.subdev,
+ unicam->sensor.pad->index,
+ BIT(other_stream));
+ if (ret) {
+ dev_err(unicam->dev, "stream on failed in subdev\n");
+ return ret;
+ }
+
+ unicam->subdev.enabled_streams |= BIT(other_stream);
+
+ return 0;
+}
+
+static int unicam_sd_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct unicam_device *unicam = sd_to_unicam_device(sd);
+ u32 other_pad, other_stream;
+ int ret;
+
+ ret = v4l2_subdev_routing_find_opposite_end(&state->routing, pad, 0,
+ &other_pad, &other_stream);
+ if (ret)
+ return ret;
+
+ v4l2_subdev_disable_streams(unicam->sensor.subdev,
+ unicam->sensor.pad->index,
+ BIT(other_stream));
+
+ unicam->subdev.enabled_streams &= ~BIT(other_stream);
+
+ if (!unicam->subdev.enabled_streams)
+ unicam_disable(unicam);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops unicam_subdev_pad_ops = {
+ .enum_mbus_code = unicam_subdev_enum_mbus_code,
+ .enum_frame_size = unicam_subdev_enum_frame_size,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = unicam_subdev_set_format,
+ .set_routing = unicam_subdev_set_routing,
+ .enable_streams = unicam_sd_enable_streams,
+ .disable_streams = unicam_sd_disable_streams,
+};
+
+static const struct v4l2_subdev_ops unicam_subdev_ops = {
+ .pad = &unicam_subdev_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops unicam_subdev_internal_ops = {
+ .init_state = unicam_subdev_init_state,
+};
+
+static const struct media_entity_operations unicam_subdev_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+ .has_pad_interdep = v4l2_subdev_has_pad_interdep,
+};
+
+static int unicam_subdev_init(struct unicam_device *unicam)
+{
+ struct v4l2_subdev *sd = &unicam->subdev.sd;
+ int ret;
+
+ v4l2_subdev_init(sd, &unicam_subdev_ops);
+ sd->internal_ops = &unicam_subdev_internal_ops;
+ v4l2_set_subdevdata(sd, unicam);
+
+ sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ sd->entity.ops = &unicam_subdev_media_ops;
+ sd->dev = unicam->dev;
+ sd->owner = THIS_MODULE;
+ sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_STREAMS;
+
+ strscpy(sd->name, "unicam", sizeof(sd->name));
+
+ unicam->subdev.pads[UNICAM_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ unicam->subdev.pads[UNICAM_SD_PAD_SOURCE_IMAGE].flags = MEDIA_PAD_FL_SOURCE;
+ unicam->subdev.pads[UNICAM_SD_PAD_SOURCE_METADATA].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&sd->entity, ARRAY_SIZE(unicam->subdev.pads),
+ unicam->subdev.pads);
+ if (ret) {
+ dev_err(unicam->dev, "Failed to initialize media entity: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = v4l2_subdev_init_finalize(sd);
+ if (ret) {
+ dev_err(unicam->dev, "Failed to initialize subdev: %d\n", ret);
+ goto err_entity;
+ }
+
+ ret = v4l2_device_register_subdev(&unicam->v4l2_dev, sd);
+ if (ret) {
+ dev_err(unicam->dev, "Failed to register subdev: %d\n", ret);
+ goto err_subdev;
+ }
+
+ return 0;
+
+err_subdev:
+ v4l2_subdev_cleanup(sd);
+err_entity:
+ media_entity_cleanup(&sd->entity);
+ return ret;
+}
+
+static void unicam_subdev_cleanup(struct unicam_device *unicam)
+{
+ v4l2_subdev_cleanup(&unicam->subdev.sd);
+ media_entity_cleanup(&unicam->subdev.sd.entity);
+}
+
+/* -----------------------------------------------------------------------------
+ * Videobuf2 queue operations
+ */
+
+static int unicam_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct unicam_node *node = vb2_get_drv_priv(vq);
+ u32 size = is_image_node(node) ? node->fmt.fmt.pix.sizeimage
+ : node->fmt.fmt.meta.buffersize;
+
+ if (*nplanes) {
+ if (sizes[0] < size) {
+ dev_dbg(node->dev->dev, "sizes[0] %i < size %u\n",
+ sizes[0], size);
+ return -EINVAL;
+ }
+ size = sizes[0];
+ }
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ return 0;
+}
+
+static int unicam_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct unicam_node *node = vb2_get_drv_priv(vb->vb2_queue);
+ struct unicam_buffer *buf = to_unicam_buffer(vb);
+ u32 size = is_image_node(node) ? node->fmt.fmt.pix.sizeimage
+ : node->fmt.fmt.meta.buffersize;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_dbg(node->dev->dev,
+ "data will not fit into plane (%lu < %u)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ buf->dma_addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
+ buf->size = size;
+
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
+
+ return 0;
+}
+
+static void unicam_return_buffers(struct unicam_node *node,
+ enum vb2_buffer_state state)
+{
+ struct unicam_buffer *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp, &node->dma_queue, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ }
+
+ if (node->cur_frm)
+ vb2_buffer_done(&node->cur_frm->vb.vb2_buf,
+ state);
+ if (node->next_frm && node->cur_frm != node->next_frm)
+ vb2_buffer_done(&node->next_frm->vb.vb2_buf,
+ state);
+
+ node->cur_frm = NULL;
+ node->next_frm = NULL;
+}
+
+static int unicam_num_data_lanes(struct unicam_device *unicam)
+{
+ struct v4l2_mbus_config mbus_config = { 0 };
+ unsigned int num_data_lanes;
+ int ret;
+
+ if (unicam->bus_type != V4L2_MBUS_CSI2_DPHY)
+ return unicam->max_data_lanes;
+
+ ret = v4l2_subdev_call(unicam->sensor.subdev, pad, get_mbus_config,
+ unicam->sensor.pad->index, &mbus_config);
+ if (ret == -ENOIOCTLCMD)
+ return unicam->max_data_lanes;
+
+ if (ret < 0) {
+ dev_err(unicam->dev, "Failed to get mbus config: %d\n", ret);
+ return ret;
+ }
+
+ num_data_lanes = mbus_config.bus.mipi_csi2.num_data_lanes;
+
+ if (num_data_lanes != 1 && num_data_lanes != 2 && num_data_lanes != 4) {
+ dev_err(unicam->dev,
+ "Device %s has requested %u data lanes, invalid\n",
+ unicam->sensor.subdev->name, num_data_lanes);
+ return -EINVAL;
+ }
+
+ if (num_data_lanes > unicam->max_data_lanes) {
+ dev_err(unicam->dev,
+ "Device %s has requested %u data lanes, >%u configured in DT\n",
+ unicam->sensor.subdev->name, num_data_lanes,
+ unicam->max_data_lanes);
+ return -EINVAL;
+ }
+
+ return num_data_lanes;
+}
+
+static int unicam_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct unicam_node *node = vb2_get_drv_priv(vq);
+ struct unicam_device *unicam = node->dev;
+ struct unicam_buffer *buf;
+ struct media_pipeline_pad_iter iter;
+ struct media_pad *pad;
+ unsigned long flags;
+ int ret;
+
+ dev_dbg(unicam->dev, "Starting stream on %s device\n",
+ is_metadata_node(node) ? "metadata" : "image");
+
+ /*
+ * Start the pipeline. This validates all links, and populates the
+ * pipeline structure.
+ */
+ ret = video_device_pipeline_start(&node->video_dev, &unicam->pipe.pipe);
+ if (ret < 0) {
+ dev_dbg(unicam->dev, "Failed to start media pipeline: %d\n", ret);
+ goto err_buffers;
+ }
+
+ /*
+ * Determine which video nodes are included in the pipeline, and get the
+ * number of data lanes.
+ */
+ if (unicam->pipe.pipe.start_count == 1) {
+ unicam->pipe.nodes = 0;
+
+ media_pipeline_for_each_pad(&unicam->pipe.pipe, &iter, pad) {
+ if (pad->entity != &unicam->subdev.sd.entity)
+ continue;
+
+ if (pad->index == UNICAM_SD_PAD_SOURCE_IMAGE)
+ unicam->pipe.nodes |= BIT(UNICAM_IMAGE_NODE);
+ else if (pad->index == UNICAM_SD_PAD_SOURCE_METADATA)
+ unicam->pipe.nodes |= BIT(UNICAM_METADATA_NODE);
+ }
+
+ if (!(unicam->pipe.nodes & BIT(UNICAM_IMAGE_NODE))) {
+ dev_dbg(unicam->dev,
+ "Pipeline does not include image node\n");
+ ret = -EPIPE;
+ goto err_pipeline;
+ }
+
+ ret = unicam_num_data_lanes(unicam);
+ if (ret < 0)
+ goto err_pipeline;
+
+ unicam->pipe.num_data_lanes = ret;
+
+ dev_dbg(unicam->dev, "Running with %u data lanes, nodes %u\n",
+ unicam->pipe.num_data_lanes, unicam->pipe.nodes);
+ }
+
+ /* Arm the node with the first buffer from the DMA queue. */
+ spin_lock_irqsave(&node->dma_queue_lock, flags);
+ buf = list_first_entry(&node->dma_queue, struct unicam_buffer, list);
+ node->cur_frm = buf;
+ node->next_frm = buf;
+ list_del(&buf->list);
+ spin_unlock_irqrestore(&node->dma_queue_lock, flags);
+
+ /*
+ * Wait for all the video devices in the pipeline to have been started
+ * before starting the hardware. In the general case, this would
+ * prevent capturing multiple streams independently. However, the
+ * Unicam DMA engines are not generic, they have been designed to
+ * capture image data and embedded data from the same camera sensor.
+ * Not only does the main use case not benefit from independent
+ * capture, it requires proper synchronization of the streams at start
+ * time.
+ */
+ if (unicam->pipe.pipe.start_count < hweight32(unicam->pipe.nodes))
+ return 0;
+
+ ret = pm_runtime_resume_and_get(unicam->dev);
+ if (ret < 0) {
+ dev_err(unicam->dev, "PM runtime resume failed: %d\n", ret);
+ goto err_pipeline;
+ }
+
+ /* Enable the streams on the source. */
+ ret = v4l2_subdev_enable_streams(&unicam->subdev.sd,
+ UNICAM_SD_PAD_SOURCE_IMAGE,
+ BIT(0));
+ if (ret < 0) {
+ dev_err(unicam->dev, "stream on failed in subdev\n");
+ goto err_pm_put;
+ }
+
+ if (unicam->pipe.nodes & BIT(UNICAM_METADATA_NODE)) {
+ ret = v4l2_subdev_enable_streams(&unicam->subdev.sd,
+ UNICAM_SD_PAD_SOURCE_METADATA,
+ BIT(0));
+ if (ret < 0) {
+ dev_err(unicam->dev, "stream on failed in subdev\n");
+ goto err_disable_streams;
+ }
+ }
+
+ return 0;
+
+err_disable_streams:
+ v4l2_subdev_disable_streams(&unicam->subdev.sd,
+ UNICAM_SD_PAD_SOURCE_IMAGE, BIT(0));
+err_pm_put:
+ pm_runtime_put_sync(unicam->dev);
+err_pipeline:
+ video_device_pipeline_stop(&node->video_dev);
+err_buffers:
+ unicam_return_buffers(node, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+
+static void unicam_stop_streaming(struct vb2_queue *vq)
+{
+ struct unicam_node *node = vb2_get_drv_priv(vq);
+ struct unicam_device *unicam = node->dev;
+
+ /* Stop the hardware when the first video device gets stopped. */
+ if (unicam->pipe.pipe.start_count == hweight32(unicam->pipe.nodes)) {
+ if (unicam->pipe.nodes & BIT(UNICAM_METADATA_NODE))
+ v4l2_subdev_disable_streams(&unicam->subdev.sd,
+ UNICAM_SD_PAD_SOURCE_METADATA,
+ BIT(0));
+
+ v4l2_subdev_disable_streams(&unicam->subdev.sd,
+ UNICAM_SD_PAD_SOURCE_IMAGE,
+ BIT(0));
+
+ pm_runtime_put(unicam->dev);
+ }
+
+ video_device_pipeline_stop(&node->video_dev);
+
+ /* Clear all queued buffers for the node */
+ unicam_return_buffers(node, VB2_BUF_STATE_ERROR);
+}
+
+static void unicam_buffer_queue(struct vb2_buffer *vb)
+{
+ struct unicam_node *node = vb2_get_drv_priv(vb->vb2_queue);
+ struct unicam_buffer *buf = to_unicam_buffer(vb);
+
+ spin_lock_irq(&node->dma_queue_lock);
+ list_add_tail(&buf->list, &node->dma_queue);
+ spin_unlock_irq(&node->dma_queue_lock);
+}
+
+static const struct vb2_ops unicam_video_qops = {
+ .queue_setup = unicam_queue_setup,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .buf_prepare = unicam_buffer_prepare,
+ .start_streaming = unicam_start_streaming,
+ .stop_streaming = unicam_stop_streaming,
+ .buf_queue = unicam_buffer_queue,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 video device operations
+ */
+
+static int unicam_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, UNICAM_MODULE_NAME, sizeof(cap->driver));
+ strscpy(cap->card, UNICAM_MODULE_NAME, sizeof(cap->card));
+
+ cap->capabilities |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE;
+
+ return 0;
+}
+
+static int unicam_enum_fmt_vid(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ unsigned int index;
+ unsigned int i;
+
+ for (i = 0, index = 0; i < ARRAY_SIZE(unicam_image_formats); i++) {
+ if (f->mbus_code && unicam_image_formats[i].code != f->mbus_code)
+ continue;
+
+ if (index == f->index) {
+ f->pixelformat = unicam_image_formats[i].fourcc;
+ return 0;
+ }
+
+ index++;
+
+ if (!unicam_image_formats[i].unpacked_fourcc)
+ continue;
+
+ if (index == f->index) {
+ f->pixelformat = unicam_image_formats[i].unpacked_fourcc;
+ return 0;
+ }
+
+ index++;
+ }
+
+ return -EINVAL;
+}
+
+static int unicam_g_fmt_vid(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct unicam_node *node = video_drvdata(file);
+
+ *f = node->fmt;
+
+ return 0;
+}
+
+static void __unicam_try_fmt_vid(struct unicam_node *node,
+ struct v4l2_pix_format *pix)
+{
+ const struct unicam_format_info *fmtinfo;
+
+ /*
+ * Default to the first format if the requested pixel format code isn't
+ * supported.
+ */
+ fmtinfo = unicam_find_format_by_fourcc(pix->pixelformat,
+ UNICAM_SD_PAD_SOURCE_IMAGE);
+ if (!fmtinfo) {
+ fmtinfo = &unicam_image_formats[0];
+ pix->pixelformat = fmtinfo->fourcc;
+ }
+
+ unicam_calc_image_size_bpl(node->dev, fmtinfo, pix);
+
+ if (pix->field == V4L2_FIELD_ANY)
+ pix->field = V4L2_FIELD_NONE;
+}
+
+static int unicam_try_fmt_vid(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct unicam_node *node = video_drvdata(file);
+
+ __unicam_try_fmt_vid(node, &f->fmt.pix);
+ return 0;
+}
+
+static int unicam_s_fmt_vid(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct unicam_node *node = video_drvdata(file);
+
+ if (vb2_is_busy(&node->buffer_queue))
+ return -EBUSY;
+
+ __unicam_try_fmt_vid(node, &f->fmt.pix);
+ node->fmt = *f;
+
+ return 0;
+}
+
+static int unicam_enum_fmt_meta(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ unsigned int i, index;
+
+ for (i = 0, index = 0; i < ARRAY_SIZE(unicam_meta_formats); i++) {
+ if (f->mbus_code && unicam_meta_formats[i].code != f->mbus_code)
+ continue;
+
+ if (index == f->index) {
+ f->pixelformat = unicam_meta_formats[i].fourcc;
+ f->type = V4L2_BUF_TYPE_META_CAPTURE;
+ f->flags = V4L2_FMT_FLAG_META_LINE_BASED;
+ return 0;
+ }
+
+ index++;
+ }
+
+ return -EINVAL;
+}
+
+static int unicam_g_fmt_meta(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct unicam_node *node = video_drvdata(file);
+
+ f->fmt.meta = node->fmt.fmt.meta;
+
+ return 0;
+}
+
+static const struct unicam_format_info *
+__unicam_try_fmt_meta(struct unicam_node *node, struct v4l2_meta_format *meta)
+{
+ const struct unicam_format_info *fmtinfo;
+
+ /*
+ * Default to the first format if the requested pixel format code isn't
+ * supported.
+ */
+ fmtinfo = unicam_find_format_by_fourcc(meta->dataformat,
+ UNICAM_SD_PAD_SOURCE_METADATA);
+ if (!fmtinfo) {
+ fmtinfo = &unicam_meta_formats[0];
+ meta->dataformat = fmtinfo->fourcc;
+ }
+
+ unicam_calc_meta_size_bpl(node->dev, fmtinfo, meta);
+
+ return fmtinfo;
+}
+
+static int unicam_try_fmt_meta(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct unicam_node *node = video_drvdata(file);
+
+ __unicam_try_fmt_meta(node, &f->fmt.meta);
+ return 0;
+}
+
+static int unicam_s_fmt_meta(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct unicam_node *node = video_drvdata(file);
+
+ if (vb2_is_busy(&node->buffer_queue))
+ return -EBUSY;
+
+ __unicam_try_fmt_meta(node, &f->fmt.meta);
+ node->fmt = *f;
+
+ return 0;
+}
+
+static int unicam_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct unicam_node *node = video_drvdata(file);
+ int ret = -EINVAL;
+
+ if (fsize->index > 0)
+ return ret;
+
+ if (is_image_node(node)) {
+ if (!unicam_find_format_by_fourcc(fsize->pixel_format,
+ UNICAM_SD_PAD_SOURCE_IMAGE))
+ return ret;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = UNICAM_IMAGE_MIN_WIDTH;
+ fsize->stepwise.max_width = UNICAM_IMAGE_MAX_WIDTH;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.min_height = UNICAM_IMAGE_MIN_HEIGHT;
+ fsize->stepwise.max_height = UNICAM_IMAGE_MAX_HEIGHT;
+ fsize->stepwise.step_height = 1;
+ } else {
+ if (!unicam_find_format_by_fourcc(fsize->pixel_format,
+ UNICAM_SD_PAD_SOURCE_METADATA))
+ return ret;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = UNICAM_META_MIN_WIDTH;
+ fsize->stepwise.max_width = UNICAM_META_MAX_WIDTH;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.min_height = UNICAM_META_MIN_HEIGHT;
+ fsize->stepwise.max_height = UNICAM_META_MAX_HEIGHT;
+ fsize->stepwise.step_height = 1;
+ }
+
+ return 0;
+}
+
+static int unicam_log_status(struct file *file, void *fh)
+{
+ struct unicam_node *node = video_drvdata(file);
+ struct unicam_device *unicam = node->dev;
+ u32 reg;
+
+ /* status for sub devices */
+ v4l2_device_call_all(&unicam->v4l2_dev, 0, core, log_status);
+
+ dev_info(unicam->dev, "-----Receiver status-----\n");
+ dev_info(unicam->dev, "V4L2 width/height: %ux%u\n",
+ node->fmt.fmt.pix.width, node->fmt.fmt.pix.height);
+ dev_info(unicam->dev, "V4L2 format: %08x\n",
+ node->fmt.fmt.pix.pixelformat);
+ reg = unicam_reg_read(unicam, UNICAM_IPIPE);
+ dev_info(unicam->dev, "Unpacking/packing: %u / %u\n",
+ unicam_get_field(reg, UNICAM_PUM_MASK),
+ unicam_get_field(reg, UNICAM_PPM_MASK));
+ dev_info(unicam->dev, "----Live data----\n");
+ dev_info(unicam->dev, "Programmed stride: %4u\n",
+ unicam_reg_read(unicam, UNICAM_IBLS));
+ dev_info(unicam->dev, "Detected resolution: %ux%u\n",
+ unicam_reg_read(unicam, UNICAM_IHSTA),
+ unicam_reg_read(unicam, UNICAM_IVSTA));
+ dev_info(unicam->dev, "Write pointer: %08x\n",
+ unicam_reg_read(unicam, UNICAM_IBWP));
+
+ return 0;
+}
+
+static int unicam_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_FRAME_SYNC:
+ return v4l2_event_subscribe(fh, sub, 2, NULL);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct v4l2_ioctl_ops unicam_ioctl_ops = {
+ .vidioc_querycap = unicam_querycap,
+
+ .vidioc_enum_fmt_vid_cap = unicam_enum_fmt_vid,
+ .vidioc_g_fmt_vid_cap = unicam_g_fmt_vid,
+ .vidioc_try_fmt_vid_cap = unicam_try_fmt_vid,
+ .vidioc_s_fmt_vid_cap = unicam_s_fmt_vid,
+
+ .vidioc_enum_fmt_meta_cap = unicam_enum_fmt_meta,
+ .vidioc_g_fmt_meta_cap = unicam_g_fmt_meta,
+ .vidioc_try_fmt_meta_cap = unicam_try_fmt_meta,
+ .vidioc_s_fmt_meta_cap = unicam_s_fmt_meta,
+
+ .vidioc_enum_framesizes = unicam_enum_framesizes,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_log_status = unicam_log_status,
+ .vidioc_subscribe_event = unicam_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/* unicam capture driver file operations */
+static const struct v4l2_file_operations unicam_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+static int unicam_video_link_validate(struct media_link *link)
+{
+ struct video_device *vdev =
+ media_entity_to_video_device(link->sink->entity);
+ struct v4l2_subdev *sd =
+ media_entity_to_v4l2_subdev(link->source->entity);
+ struct unicam_node *node = video_get_drvdata(vdev);
+ const u32 pad = is_image_node(node) ? UNICAM_SD_PAD_SOURCE_IMAGE
+ : UNICAM_SD_PAD_SOURCE_METADATA;
+ const struct v4l2_mbus_framefmt *format;
+ struct v4l2_subdev_state *state;
+ int ret = 0;
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+
+ format = v4l2_subdev_state_get_format(state, pad, 0);
+ if (!format) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (is_image_node(node)) {
+ const struct v4l2_pix_format *fmt = &node->fmt.fmt.pix;
+ const struct unicam_format_info *fmtinfo;
+
+ fmtinfo = unicam_find_format_by_fourcc(fmt->pixelformat,
+ UNICAM_SD_PAD_SOURCE_IMAGE);
+ if (WARN_ON(!fmtinfo)) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ if (fmtinfo->code != format->code ||
+ fmt->height != format->height ||
+ fmt->width != format->width ||
+ fmt->field != format->field) {
+ dev_dbg(node->dev->dev,
+ "image: (%u x %u) 0x%08x %s != (%u x %u) 0x%08x %s\n",
+ fmt->width, fmt->height, fmtinfo->code,
+ v4l2_field_names[fmt->field],
+ format->width, format->height, format->code,
+ v4l2_field_names[format->field]);
+ ret = -EPIPE;
+ }
+ } else {
+ const struct v4l2_meta_format *fmt = &node->fmt.fmt.meta;
+
+ const struct unicam_format_info *fmtinfo;
+
+ fmtinfo = unicam_find_format_by_fourcc(fmt->dataformat,
+ UNICAM_SD_PAD_SOURCE_METADATA);
+ if (WARN_ON(!fmtinfo)) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ if (fmtinfo->code != format->code ||
+ fmt->height != format->height ||
+ fmt->width != format->width) {
+ dev_dbg(node->dev->dev,
+ "meta: (%u x %u) 0x%04x != (%u x %u) 0x%04x\n",
+ fmt->width, fmt->height, fmtinfo->code,
+ format->width, format->height, format->code);
+ ret = -EPIPE;
+ }
+ }
+
+out:
+ v4l2_subdev_unlock_state(state);
+ return ret;
+}
+
+static const struct media_entity_operations unicam_video_media_ops = {
+ .link_validate = unicam_video_link_validate,
+};
+
+static void unicam_node_release(struct video_device *vdev)
+{
+ struct unicam_node *node = video_get_drvdata(vdev);
+
+ unicam_put(node->dev);
+}
+
+static void unicam_set_default_format(struct unicam_node *node)
+{
+ if (is_image_node(node)) {
+ struct v4l2_pix_format *fmt = &node->fmt.fmt.pix;
+ const struct unicam_format_info *fmtinfo =
+ &unicam_image_formats[0];
+
+ node->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ v4l2_fill_pix_format(fmt, &unicam_default_image_format);
+ fmt->pixelformat = fmtinfo->fourcc;
+ unicam_calc_image_size_bpl(node->dev, fmtinfo, fmt);
+ } else {
+ struct v4l2_meta_format *fmt = &node->fmt.fmt.meta;
+ const struct unicam_format_info *fmtinfo =
+ &unicam_meta_formats[0];
+
+ node->fmt.type = V4L2_BUF_TYPE_META_CAPTURE;
+
+ fmt->dataformat = fmtinfo->fourcc;
+ fmt->width = unicam_default_meta_format.width;
+ fmt->height = unicam_default_meta_format.height;
+ unicam_calc_meta_size_bpl(node->dev, fmtinfo, fmt);
+ }
+}
+
+static int unicam_register_node(struct unicam_device *unicam,
+ enum unicam_node_type type)
+{
+ const u32 pad_index = type == UNICAM_IMAGE_NODE
+ ? UNICAM_SD_PAD_SOURCE_IMAGE
+ : UNICAM_SD_PAD_SOURCE_METADATA;
+ struct unicam_node *node = &unicam->node[type];
+ struct video_device *vdev = &node->video_dev;
+ struct vb2_queue *q = &node->buffer_queue;
+ int ret;
+
+ node->dev = unicam_get(unicam);
+ node->id = type;
+
+ spin_lock_init(&node->dma_queue_lock);
+
+ INIT_LIST_HEAD(&node->dma_queue);
+
+ /* Initialize the videobuf2 queue. */
+ q->type = type == UNICAM_IMAGE_NODE ? V4L2_BUF_TYPE_VIDEO_CAPTURE
+ : V4L2_BUF_TYPE_META_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
+ q->drv_priv = node;
+ q->ops = &unicam_video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct unicam_buffer);
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &unicam->lock;
+ q->min_queued_buffers = 1;
+ q->dev = unicam->dev;
+
+ ret = vb2_queue_init(q);
+ if (ret) {
+ dev_err(unicam->dev, "vb2_queue_init() failed\n");
+ goto err_unicam_put;
+ }
+
+ /* Initialize the video device. */
+ vdev->release = unicam_node_release;
+ vdev->fops = &unicam_fops;
+ vdev->ioctl_ops = &unicam_ioctl_ops;
+ vdev->v4l2_dev = &unicam->v4l2_dev;
+ vdev->vfl_dir = VFL_DIR_RX;
+ vdev->queue = q;
+ vdev->lock = &unicam->lock;
+ vdev->device_caps = type == UNICAM_IMAGE_NODE
+ ? V4L2_CAP_VIDEO_CAPTURE : V4L2_CAP_META_CAPTURE;
+ vdev->device_caps |= V4L2_CAP_STREAMING | V4L2_CAP_IO_MC;
+ vdev->entity.ops = &unicam_video_media_ops;
+
+ snprintf(vdev->name, sizeof(vdev->name), "%s-%s", UNICAM_MODULE_NAME,
+ type == UNICAM_IMAGE_NODE ? "image" : "embedded");
+
+ video_set_drvdata(vdev, node);
+
+ if (type == UNICAM_IMAGE_NODE)
+ vdev->entity.flags |= MEDIA_ENT_FL_DEFAULT;
+
+ node->pad.flags = MEDIA_PAD_FL_SINK;
+
+ ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
+ if (ret)
+ goto err_unicam_put;
+
+ node->dummy_buf.size = UNICAM_DUMMY_BUF_SIZE;
+ node->dummy_buf_cpu_addr = dma_alloc_coherent(unicam->dev,
+ node->dummy_buf.size,
+ &node->dummy_buf.dma_addr,
+ GFP_KERNEL);
+ if (!node->dummy_buf_cpu_addr) {
+ dev_err(unicam->dev, "Unable to allocate dummy buffer.\n");
+ ret = -ENOMEM;
+ goto err_entity_cleanup;
+ }
+
+ unicam_set_default_format(node);
+
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ dev_err(unicam->dev, "Unable to register video device %s\n",
+ vdev->name);
+ goto err_dma_free;
+ }
+
+ node->registered = true;
+
+ ret = media_create_pad_link(&unicam->subdev.sd.entity,
+ pad_index,
+ &node->video_dev.entity,
+ 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret) {
+ /*
+ * No need for cleanup, the caller will unregister the
+ * video device, which will drop the reference on the
+ * device and trigger the cleanup.
+ */
+ dev_err(unicam->dev, "Unable to create pad link for %s\n",
+ unicam->sensor.subdev->name);
+ return ret;
+ }
+
+ return 0;
+
+err_dma_free:
+ dma_free_coherent(unicam->dev, node->dummy_buf.size,
+ node->dummy_buf_cpu_addr,
+ node->dummy_buf.dma_addr);
+err_entity_cleanup:
+ media_entity_cleanup(&vdev->entity);
+err_unicam_put:
+ unicam_put(unicam);
+ return ret;
+}
+
+static void unicam_unregister_nodes(struct unicam_device *unicam)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(unicam->node); i++) {
+ struct unicam_node *node = &unicam->node[i];
+
+ if (node->registered) {
+ vb2_video_unregister_device(&node->video_dev);
+ node->registered = false;
+ }
+
+ if (node->dummy_buf_cpu_addr)
+ dma_free_coherent(unicam->dev, node->dummy_buf.size,
+ node->dummy_buf_cpu_addr,
+ node->dummy_buf.dma_addr);
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * Power management
+ */
+
+static int unicam_runtime_resume(struct device *dev)
+{
+ struct unicam_device *unicam = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_set_min_rate(unicam->vpu_clock, UNICAM_MIN_VPU_CLOCK_RATE);
+ if (ret) {
+ dev_err(unicam->dev, "failed to set up VPU clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(unicam->vpu_clock);
+ if (ret) {
+ dev_err(unicam->dev, "Failed to enable VPU clock: %d\n", ret);
+ goto err_vpu_clock;
+ }
+
+ ret = clk_set_rate(unicam->clock, 100 * 1000 * 1000);
+ if (ret) {
+ dev_err(unicam->dev, "failed to set up CSI clock\n");
+ goto err_vpu_prepare;
+ }
+
+ ret = clk_prepare_enable(unicam->clock);
+ if (ret) {
+ dev_err(unicam->dev, "Failed to enable CSI clock: %d\n", ret);
+ goto err_vpu_prepare;
+ }
+
+ return 0;
+
+err_vpu_prepare:
+ clk_disable_unprepare(unicam->vpu_clock);
+err_vpu_clock:
+ if (clk_set_min_rate(unicam->vpu_clock, 0))
+ dev_err(unicam->dev, "failed to reset the VPU clock\n");
+
+ return ret;
+}
+
+static int unicam_runtime_suspend(struct device *dev)
+{
+ struct unicam_device *unicam = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(unicam->clock);
+
+ if (clk_set_min_rate(unicam->vpu_clock, 0))
+ dev_err(unicam->dev, "failed to reset the VPU clock\n");
+
+ clk_disable_unprepare(unicam->vpu_clock);
+
+ return 0;
+}
+
+static const struct dev_pm_ops unicam_pm_ops = {
+ RUNTIME_PM_OPS(unicam_runtime_suspend, unicam_runtime_resume, NULL)
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 async notifier
+ */
+
+static int unicam_async_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asc)
+{
+ struct unicam_device *unicam = notifier_to_unicam_device(notifier);
+ struct media_pad *sink = &unicam->subdev.pads[UNICAM_SD_PAD_SINK];
+ struct media_pad *source;
+ int ret;
+
+ dev_dbg(unicam->dev, "Using sensor %s for capture\n",
+ subdev->name);
+
+ ret = v4l2_create_fwnode_links_to_pad(subdev, sink, MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret)
+ return ret;
+
+ source = media_pad_remote_pad_unique(sink);
+ if (IS_ERR(source)) {
+ dev_err(unicam->dev, "No connected sensor pad\n");
+ return PTR_ERR(source);
+ }
+
+ unicam->sensor.subdev = subdev;
+ unicam->sensor.pad = source;
+
+ return 0;
+}
+
+static int unicam_async_complete(struct v4l2_async_notifier *notifier)
+{
+ struct unicam_device *unicam = notifier_to_unicam_device(notifier);
+ int ret;
+
+ ret = unicam_register_node(unicam, UNICAM_IMAGE_NODE);
+ if (ret) {
+ dev_err(unicam->dev, "Unable to register image video device.\n");
+ goto unregister;
+ }
+
+ ret = unicam_register_node(unicam, UNICAM_METADATA_NODE);
+ if (ret) {
+ dev_err(unicam->dev, "Unable to register metadata video device.\n");
+ goto unregister;
+ }
+
+ ret = v4l2_device_register_subdev_nodes(&unicam->v4l2_dev);
+ if (ret) {
+ dev_err(unicam->dev, "Unable to register subdev nodes.\n");
+ goto unregister;
+ }
+
+ return 0;
+
+unregister:
+ unicam_unregister_nodes(unicam);
+ unicam_put(unicam);
+
+ return ret;
+}
+
+static const struct v4l2_async_notifier_operations unicam_async_ops = {
+ .bound = unicam_async_bound,
+ .complete = unicam_async_complete,
+};
+
+static int unicam_async_nf_init(struct unicam_device *unicam)
+{
+ struct v4l2_fwnode_endpoint ep = { };
+ struct fwnode_handle *ep_handle;
+ struct v4l2_async_connection *asc;
+ int ret;
+
+ ret = of_property_read_u32(unicam->dev->of_node, "brcm,num-data-lanes",
+ &unicam->max_data_lanes);
+ if (ret < 0) {
+ dev_err(unicam->dev, "Missing %s DT property\n",
+ "brcm,num-data-lanes");
+ return -EINVAL;
+ }
+
+ /* Get and parse the local endpoint. */
+ ep_handle = fwnode_graph_get_endpoint_by_id(dev_fwnode(unicam->dev), 0, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!ep_handle) {
+ dev_err(unicam->dev, "No endpoint found\n");
+ return -ENODEV;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(ep_handle, &ep);
+ if (ret) {
+ dev_err(unicam->dev, "Failed to parse endpoint: %d\n", ret);
+ goto error;
+ }
+
+ unicam->bus_type = ep.bus_type;
+
+ switch (ep.bus_type) {
+ case V4L2_MBUS_CSI2_DPHY: {
+ unsigned int num_data_lanes = ep.bus.mipi_csi2.num_data_lanes;
+
+ if (num_data_lanes != 1 && num_data_lanes != 2 &&
+ num_data_lanes != 4) {
+ dev_err(unicam->dev, "%u data lanes not supported\n",
+ num_data_lanes);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (num_data_lanes > unicam->max_data_lanes) {
+ dev_err(unicam->dev,
+ "Endpoint uses %u data lanes when %u are supported\n",
+ num_data_lanes, unicam->max_data_lanes);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ unicam->max_data_lanes = num_data_lanes;
+ unicam->bus_flags = ep.bus.mipi_csi2.flags;
+ break;
+ }
+
+ case V4L2_MBUS_CCP2:
+ unicam->max_data_lanes = 1;
+ unicam->bus_flags = ep.bus.mipi_csi1.strobe;
+ break;
+
+ default:
+ /* Unsupported bus type */
+ dev_err(unicam->dev, "Unsupported bus type %u\n", ep.bus_type);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* Initialize and register the async notifier. */
+ v4l2_async_nf_init(&unicam->notifier, &unicam->v4l2_dev);
+
+ asc = v4l2_async_nf_add_fwnode_remote(&unicam->notifier, ep_handle,
+ struct v4l2_async_connection);
+ fwnode_handle_put(ep_handle);
+ ep_handle = NULL;
+
+ if (IS_ERR(asc)) {
+ ret = PTR_ERR(asc);
+ dev_err(unicam->dev, "Failed to add entry to notifier: %d\n",
+ ret);
+ goto error;
+ }
+
+ unicam->notifier.ops = &unicam_async_ops;
+
+ ret = v4l2_async_nf_register(&unicam->notifier);
+ if (ret) {
+ dev_err(unicam->dev, "Error registering device notifier: %d\n",
+ ret);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ fwnode_handle_put(ep_handle);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Probe & remove
+ */
+
+static int unicam_media_init(struct unicam_device *unicam)
+{
+ int ret;
+
+ unicam->mdev.dev = unicam->dev;
+ strscpy(unicam->mdev.model, UNICAM_MODULE_NAME,
+ sizeof(unicam->mdev.model));
+ unicam->mdev.hw_revision = 0;
+
+ media_device_init(&unicam->mdev);
+
+ unicam->v4l2_dev.mdev = &unicam->mdev;
+
+ ret = v4l2_device_register(unicam->dev, &unicam->v4l2_dev);
+ if (ret < 0) {
+ dev_err(unicam->dev, "Unable to register v4l2 device\n");
+ goto err_media_cleanup;
+ }
+
+ ret = media_device_register(&unicam->mdev);
+ if (ret < 0) {
+ dev_err(unicam->dev,
+ "Unable to register media-controller device\n");
+ goto err_v4l2_unregister;
+ }
+
+ return 0;
+
+err_v4l2_unregister:
+ v4l2_device_unregister(&unicam->v4l2_dev);
+err_media_cleanup:
+ media_device_cleanup(&unicam->mdev);
+ return ret;
+}
+
+static int unicam_probe(struct platform_device *pdev)
+{
+ struct unicam_device *unicam;
+ int ret;
+
+ unicam = kzalloc(sizeof(*unicam), GFP_KERNEL);
+ if (!unicam)
+ return -ENOMEM;
+
+ kref_init(&unicam->kref);
+ mutex_init(&unicam->lock);
+
+ unicam->dev = &pdev->dev;
+ platform_set_drvdata(pdev, unicam);
+
+ unicam->base = devm_platform_ioremap_resource_byname(pdev, "unicam");
+ if (IS_ERR(unicam->base)) {
+ ret = PTR_ERR(unicam->base);
+ goto err_unicam_put;
+ }
+
+ unicam->clk_gate_base = devm_platform_ioremap_resource_byname(pdev, "cmi");
+ if (IS_ERR(unicam->clk_gate_base)) {
+ ret = PTR_ERR(unicam->clk_gate_base);
+ goto err_unicam_put;
+ }
+
+ unicam->clock = devm_clk_get(&pdev->dev, "lp");
+ if (IS_ERR(unicam->clock)) {
+ dev_err(unicam->dev, "Failed to get lp clock\n");
+ ret = PTR_ERR(unicam->clock);
+ goto err_unicam_put;
+ }
+
+ unicam->vpu_clock = devm_clk_get(&pdev->dev, "vpu");
+ if (IS_ERR(unicam->vpu_clock)) {
+ dev_err(unicam->dev, "Failed to get vpu clock\n");
+ ret = PTR_ERR(unicam->vpu_clock);
+ goto err_unicam_put;
+ }
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err_unicam_put;
+
+ ret = devm_request_irq(&pdev->dev, ret, unicam_isr, 0,
+ "unicam_capture0", unicam);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to request interrupt\n");
+ goto err_unicam_put;
+ }
+
+ /* Enable the block power domain. */
+ pm_runtime_enable(&pdev->dev);
+
+ ret = unicam_media_init(unicam);
+ if (ret)
+ goto err_pm_runtime;
+
+ ret = unicam_subdev_init(unicam);
+ if (ret)
+ goto err_media_unregister;
+
+ ret = unicam_async_nf_init(unicam);
+ if (ret)
+ goto err_subdev_unregister;
+
+ return 0;
+
+err_subdev_unregister:
+ unicam_subdev_cleanup(unicam);
+err_media_unregister:
+ media_device_unregister(&unicam->mdev);
+err_pm_runtime:
+ pm_runtime_disable(&pdev->dev);
+err_unicam_put:
+ unicam_put(unicam);
+
+ return ret;
+}
+
+static void unicam_remove(struct platform_device *pdev)
+{
+ struct unicam_device *unicam = platform_get_drvdata(pdev);
+
+ unicam_unregister_nodes(unicam);
+ v4l2_device_unregister(&unicam->v4l2_dev);
+ media_device_unregister(&unicam->mdev);
+ v4l2_async_nf_unregister(&unicam->notifier);
+
+ unicam_subdev_cleanup(unicam);
+
+ unicam_put(unicam);
+
+ pm_runtime_disable(&pdev->dev);
+}
+
+static const struct of_device_id unicam_of_match[] = {
+ { .compatible = "brcm,bcm2835-unicam", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, unicam_of_match);
+
+static struct platform_driver unicam_driver = {
+ .probe = unicam_probe,
+ .remove_new = unicam_remove,
+ .driver = {
+ .name = UNICAM_MODULE_NAME,
+ .pm = pm_ptr(&unicam_pm_ops),
+ .of_match_table = unicam_of_match,
+ },
+};
+
+module_platform_driver(unicam_driver);
+
+MODULE_AUTHOR("Dave Stevenson <dave.stevenson@raspberrypi.com>");
+MODULE_DESCRIPTION("BCM2835 Unicam driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
index 2d7b0508cc9a..6f7d27a48eff 100644
--- a/drivers/media/platform/cadence/cdns-csi2rx.c
+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
@@ -239,10 +239,6 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
writel(reg, csi2rx->base + CSI2RX_STATIC_CFG_REG);
- ret = v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, true);
- if (ret)
- goto err_disable_pclk;
-
/* Enable DPHY clk and data lanes. */
if (csi2rx->dphy) {
reg = CSI2RX_DPHY_CL_EN | CSI2RX_DPHY_CL_RST;
@@ -252,6 +248,13 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
}
writel(reg, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG);
+
+ ret = csi2rx_configure_ext_dphy(csi2rx);
+ if (ret) {
+ dev_err(csi2rx->dev,
+ "Failed to configure external DPHY: %d\n", ret);
+ goto err_disable_pclk;
+ }
}
/*
@@ -291,14 +294,9 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
reset_control_deassert(csi2rx->sys_rst);
- if (csi2rx->dphy) {
- ret = csi2rx_configure_ext_dphy(csi2rx);
- if (ret) {
- dev_err(csi2rx->dev,
- "Failed to configure external DPHY: %d\n", ret);
- goto err_disable_sysclk;
- }
- }
+ ret = v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, true);
+ if (ret)
+ goto err_disable_sysclk;
clk_disable_unprepare(csi2rx->p_clk);
@@ -312,6 +310,10 @@ err_disable_pixclk:
clk_disable_unprepare(csi2rx->pixel_clk[i - 1]);
}
+ if (csi2rx->dphy) {
+ writel(0, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG);
+ phy_power_off(csi2rx->dphy);
+ }
err_disable_pclk:
clk_disable_unprepare(csi2rx->p_clk);
diff --git a/drivers/media/platform/chips-media/wave5/wave5-helper.c b/drivers/media/platform/chips-media/wave5/wave5-helper.c
index 8433ecab230c..7e0f34bfa5be 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-helper.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-helper.c
@@ -52,11 +52,12 @@ int wave5_vpu_release_device(struct file *filp,
char *name)
{
struct vpu_instance *inst = wave5_to_vpu_inst(filp->private_data);
+ struct vpu_device *dev = inst->dev;
+ int ret = 0;
v4l2_m2m_ctx_release(inst->v4l2_fh.m2m_ctx);
if (inst->state != VPU_INST_STATE_NONE) {
u32 fail_res;
- int ret;
ret = close_func(inst, &fail_res);
if (fail_res == WAVE5_SYSERR_VPU_STILL_RUNNING) {
@@ -71,8 +72,20 @@ int wave5_vpu_release_device(struct file *filp,
}
wave5_cleanup_instance(inst);
+ if (dev->irq < 0) {
+ ret = mutex_lock_interruptible(&dev->dev_lock);
+ if (ret)
+ return ret;
- return 0;
+ if (list_empty(&dev->instances)) {
+ dev_dbg(dev->dev, "Disabling the hrtimer\n");
+ hrtimer_cancel(&dev->hrtimer);
+ }
+
+ mutex_unlock(&dev->dev_lock);
+ }
+
+ return ret;
}
int wave5_vpu_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq,
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
index ef227af72348..c8624c681fa6 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
@@ -1810,7 +1810,6 @@ static int wave5_vpu_open_dec(struct file *filp)
v4l2_fh_add(&inst->v4l2_fh);
INIT_LIST_HEAD(&inst->list);
- list_add_tail(&inst->list, &dev->instances);
inst->v4l2_m2m_dev = inst->dev->v4l2_m2m_dec_dev;
inst->v4l2_fh.m2m_ctx =
@@ -1867,6 +1866,18 @@ static int wave5_vpu_open_dec(struct file *filp)
wave5_vdi_allocate_sram(inst->dev);
+ ret = mutex_lock_interruptible(&dev->dev_lock);
+ if (ret)
+ goto cleanup_inst;
+
+ if (dev->irq < 0 && !hrtimer_active(&dev->hrtimer) && list_empty(&dev->instances))
+ hrtimer_start(&dev->hrtimer, ns_to_ktime(dev->vpu_poll_interval * NSEC_PER_MSEC),
+ HRTIMER_MODE_REL_PINNED);
+
+ list_add_tail(&inst->list, &dev->instances);
+
+ mutex_unlock(&dev->dev_lock);
+
return 0;
cleanup_inst:
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c b/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
index 8bbf9d10b467..a45a2f699000 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
@@ -1554,7 +1554,6 @@ static int wave5_vpu_open_enc(struct file *filp)
v4l2_fh_add(&inst->v4l2_fh);
INIT_LIST_HEAD(&inst->list);
- list_add_tail(&inst->list, &dev->instances);
inst->v4l2_m2m_dev = inst->dev->v4l2_m2m_enc_dev;
inst->v4l2_fh.m2m_ctx =
@@ -1729,6 +1728,18 @@ static int wave5_vpu_open_enc(struct file *filp)
wave5_vdi_allocate_sram(inst->dev);
+ ret = mutex_lock_interruptible(&dev->dev_lock);
+ if (ret)
+ goto cleanup_inst;
+
+ if (dev->irq < 0 && !hrtimer_active(&dev->hrtimer) && list_empty(&dev->instances))
+ hrtimer_start(&dev->hrtimer, ns_to_ktime(dev->vpu_poll_interval * NSEC_PER_MSEC),
+ HRTIMER_MODE_REL_PINNED);
+
+ list_add_tail(&inst->list, &dev->instances);
+
+ mutex_unlock(&dev->dev_lock);
+
return 0;
cleanup_inst:
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu.c b/drivers/media/platform/chips-media/wave5/wave5-vpu.c
index 1b3df5b04249..68a519ac412d 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu.c
@@ -26,6 +26,9 @@ struct wave5_match_data {
const char *fw_name;
};
+static int vpu_poll_interval = 5;
+module_param(vpu_poll_interval, int, 0644);
+
int wave5_vpu_wait_interrupt(struct vpu_instance *inst, unsigned int timeout)
{
int ret;
@@ -40,7 +43,7 @@ int wave5_vpu_wait_interrupt(struct vpu_instance *inst, unsigned int timeout)
return 0;
}
-static irqreturn_t wave5_vpu_irq_thread(int irq, void *dev_id)
+static void wave5_vpu_handle_irq(void *dev_id)
{
u32 seq_done;
u32 cmd_done;
@@ -48,42 +51,67 @@ static irqreturn_t wave5_vpu_irq_thread(int irq, void *dev_id)
struct vpu_instance *inst;
struct vpu_device *dev = dev_id;
- if (wave5_vdi_read_register(dev, W5_VPU_VPU_INT_STS)) {
- irq_reason = wave5_vdi_read_register(dev, W5_VPU_VINT_REASON);
- wave5_vdi_write_register(dev, W5_VPU_VINT_REASON_CLR, irq_reason);
- wave5_vdi_write_register(dev, W5_VPU_VINT_CLEAR, 0x1);
-
- list_for_each_entry(inst, &dev->instances, list) {
- seq_done = wave5_vdi_read_register(dev, W5_RET_SEQ_DONE_INSTANCE_INFO);
- cmd_done = wave5_vdi_read_register(dev, W5_RET_QUEUE_CMD_DONE_INST);
-
- if (irq_reason & BIT(INT_WAVE5_INIT_SEQ) ||
- irq_reason & BIT(INT_WAVE5_ENC_SET_PARAM)) {
- if (seq_done & BIT(inst->id)) {
- seq_done &= ~BIT(inst->id);
- wave5_vdi_write_register(dev, W5_RET_SEQ_DONE_INSTANCE_INFO,
- seq_done);
- complete(&inst->irq_done);
- }
+ irq_reason = wave5_vdi_read_register(dev, W5_VPU_VINT_REASON);
+ wave5_vdi_write_register(dev, W5_VPU_VINT_REASON_CLR, irq_reason);
+ wave5_vdi_write_register(dev, W5_VPU_VINT_CLEAR, 0x1);
+
+ list_for_each_entry(inst, &dev->instances, list) {
+ seq_done = wave5_vdi_read_register(dev, W5_RET_SEQ_DONE_INSTANCE_INFO);
+ cmd_done = wave5_vdi_read_register(dev, W5_RET_QUEUE_CMD_DONE_INST);
+
+ if (irq_reason & BIT(INT_WAVE5_INIT_SEQ) ||
+ irq_reason & BIT(INT_WAVE5_ENC_SET_PARAM)) {
+ if (seq_done & BIT(inst->id)) {
+ seq_done &= ~BIT(inst->id);
+ wave5_vdi_write_register(dev, W5_RET_SEQ_DONE_INSTANCE_INFO,
+ seq_done);
+ complete(&inst->irq_done);
}
+ }
- if (irq_reason & BIT(INT_WAVE5_DEC_PIC) ||
- irq_reason & BIT(INT_WAVE5_ENC_PIC)) {
- if (cmd_done & BIT(inst->id)) {
- cmd_done &= ~BIT(inst->id);
- wave5_vdi_write_register(dev, W5_RET_QUEUE_CMD_DONE_INST,
- cmd_done);
- inst->ops->finish_process(inst);
- }
+ if (irq_reason & BIT(INT_WAVE5_DEC_PIC) ||
+ irq_reason & BIT(INT_WAVE5_ENC_PIC)) {
+ if (cmd_done & BIT(inst->id)) {
+ cmd_done &= ~BIT(inst->id);
+ wave5_vdi_write_register(dev, W5_RET_QUEUE_CMD_DONE_INST,
+ cmd_done);
+ inst->ops->finish_process(inst);
}
-
- wave5_vpu_clear_interrupt(inst, irq_reason);
}
+
+ wave5_vpu_clear_interrupt(inst, irq_reason);
}
+}
+
+static irqreturn_t wave5_vpu_irq_thread(int irq, void *dev_id)
+{
+ struct vpu_device *dev = dev_id;
+
+ if (wave5_vdi_read_register(dev, W5_VPU_VPU_INT_STS))
+ wave5_vpu_handle_irq(dev);
return IRQ_HANDLED;
}
+static void wave5_vpu_irq_work_fn(struct kthread_work *work)
+{
+ struct vpu_device *dev = container_of(work, struct vpu_device, work);
+
+ if (wave5_vdi_read_register(dev, W5_VPU_VPU_INT_STS))
+ wave5_vpu_handle_irq(dev);
+}
+
+static enum hrtimer_restart wave5_vpu_timer_callback(struct hrtimer *timer)
+{
+ struct vpu_device *dev =
+ container_of(timer, struct vpu_device, hrtimer);
+
+ kthread_queue_work(dev->worker, &dev->work);
+ hrtimer_forward_now(timer, ns_to_ktime(vpu_poll_interval * NSEC_PER_MSEC));
+
+ return HRTIMER_RESTART;
+}
+
static int wave5_vpu_load_firmware(struct device *dev, const char *fw_name,
u32 *revision)
{
@@ -185,6 +213,28 @@ static int wave5_vpu_probe(struct platform_device *pdev)
}
dev->product = wave5_vpu_get_product_id(dev);
+ dev->irq = platform_get_irq(pdev, 0);
+ if (dev->irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq resource, falling back to polling\n");
+ hrtimer_init(&dev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+ dev->hrtimer.function = &wave5_vpu_timer_callback;
+ dev->worker = kthread_create_worker(0, "vpu_irq_thread");
+ if (IS_ERR(dev->worker)) {
+ dev_err(&pdev->dev, "failed to create vpu irq worker\n");
+ ret = PTR_ERR(dev->worker);
+ goto err_vdi_release;
+ }
+ dev->vpu_poll_interval = vpu_poll_interval;
+ kthread_init_work(&dev->work, wave5_vpu_irq_work_fn);
+ } else {
+ ret = devm_request_threaded_irq(&pdev->dev, dev->irq, NULL,
+ wave5_vpu_irq_thread, IRQF_ONESHOT, "vpu_irq", dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Register interrupt handler, fail: %d\n", ret);
+ goto err_enc_unreg;
+ }
+ }
+
INIT_LIST_HEAD(&dev->instances);
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret) {
@@ -207,20 +257,6 @@ static int wave5_vpu_probe(struct platform_device *pdev)
}
}
- dev->irq = platform_get_irq(pdev, 0);
- if (dev->irq < 0) {
- dev_err(&pdev->dev, "failed to get irq resource\n");
- ret = -ENXIO;
- goto err_enc_unreg;
- }
-
- ret = devm_request_threaded_irq(&pdev->dev, dev->irq, NULL,
- wave5_vpu_irq_thread, IRQF_ONESHOT, "vpu_irq", dev);
- if (ret) {
- dev_err(&pdev->dev, "Register interrupt handler, fail: %d\n", ret);
- goto err_enc_unreg;
- }
-
ret = wave5_vpu_load_firmware(&pdev->dev, match_data->fw_name, &fw_revision);
if (ret) {
dev_err(&pdev->dev, "wave5_vpu_load_firmware, fail: %d\n", ret);
@@ -254,6 +290,11 @@ static void wave5_vpu_remove(struct platform_device *pdev)
{
struct vpu_device *dev = dev_get_drvdata(&pdev->dev);
+ if (dev->irq < 0) {
+ kthread_destroy_worker(dev->worker);
+ hrtimer_cancel(&dev->hrtimer);
+ }
+
mutex_destroy(&dev->dev_lock);
mutex_destroy(&dev->hw_lock);
clk_bulk_disable_unprepare(dev->num_clks, dev->clks);
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpuapi.h b/drivers/media/platform/chips-media/wave5/wave5-vpuapi.h
index 352f6e904e50..edc50450ddb8 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpuapi.h
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpuapi.h
@@ -756,6 +756,10 @@ struct vpu_device {
u32 product_code;
struct ida inst_ida;
struct clk_bulk_data *clks;
+ struct hrtimer hrtimer;
+ struct kthread_work work;
+ struct kthread_worker *worker;
+ int vpu_poll_interval;
int num_clks;
};
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
index 1d64bac34b90..ea2ea119dd2a 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
@@ -524,7 +524,7 @@ static void mdp_auto_release_work(struct work_struct *work)
mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps,
cmd->num_comps);
- if (atomic_dec_and_test(&mdp->job_count)) {
+ if (refcount_dec_and_test(&mdp->job_count)) {
if (cmd->mdp_ctx)
mdp_m2m_job_finish(cmd->mdp_ctx);
@@ -575,7 +575,7 @@ static void mdp_handle_cmdq_callback(struct mbox_client *cl, void *mssg)
mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps,
cmd->num_comps);
- if (atomic_dec_and_test(&mdp->job_count))
+ if (refcount_dec_and_test(&mdp->job_count))
wake_up(&mdp->callback_wq);
mdp_cmdq_pkt_destroy(&cmd->pkt);
@@ -724,9 +724,9 @@ int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param)
int i, ret;
u8 pp_used = __get_pp_num(param->param->type);
- atomic_set(&mdp->job_count, pp_used);
+ refcount_set(&mdp->job_count, pp_used);
if (atomic_read(&mdp->suspended)) {
- atomic_set(&mdp->job_count, 0);
+ refcount_set(&mdp->job_count, 0);
return -ECANCELED;
}
@@ -764,7 +764,7 @@ err_clock_off:
mdp_comp_clocks_off(&mdp->pdev->dev, cmd[i]->comps,
cmd[i]->num_comps);
err_cancel_job:
- atomic_set(&mdp->job_count, 0);
+ refcount_set(&mdp->job_count, 0);
return ret;
}
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
index 5209f531ef8d..c1f3bf98120a 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
@@ -380,14 +380,14 @@ static int __maybe_unused mdp_suspend(struct device *dev)
atomic_set(&mdp->suspended, 1);
- if (atomic_read(&mdp->job_count)) {
+ if (refcount_read(&mdp->job_count)) {
ret = wait_event_timeout(mdp->callback_wq,
- !atomic_read(&mdp->job_count),
+ !refcount_read(&mdp->job_count),
2 * HZ);
if (ret == 0) {
dev_err(dev,
"%s:flushed cmdq task incomplete, count=%d\n",
- __func__, atomic_read(&mdp->job_count));
+ __func__, refcount_read(&mdp->job_count));
return -EBUSY;
}
}
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h
index 8c09e984fd01..430251f63754 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h
@@ -134,7 +134,7 @@ struct mdp_dev {
/* synchronization protect for m2m device operation */
struct mutex m2m_lock;
atomic_t suspended;
- atomic_t job_count;
+ refcount_t job_count;
};
struct mdp_pipe_info {
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
index 35a8b059bde5..0e69128a3772 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
@@ -104,14 +104,14 @@ static void mdp_m2m_device_run(void *priv)
task.cb_data = NULL;
task.mdp_ctx = ctx;
- if (atomic_read(&ctx->mdp_dev->job_count)) {
+ if (refcount_read(&ctx->mdp_dev->job_count)) {
ret = wait_event_timeout(ctx->mdp_dev->callback_wq,
- !atomic_read(&ctx->mdp_dev->job_count),
+ !refcount_read(&ctx->mdp_dev->job_count),
2 * HZ);
if (ret == 0) {
dev_err(&ctx->mdp_dev->pdev->dev,
"%d jobs not yet done\n",
- atomic_read(&ctx->mdp_dev->job_count));
+ refcount_read(&ctx->mdp_dev->job_count));
goto worker_end;
}
}
diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
index 6bbe55de6ce9..ff23b225db70 100644
--- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
@@ -79,6 +79,8 @@ struct mtk_vcodec_fw *mtk_vcodec_fw_scp_init(void *priv, enum mtk_vcodec_fw_use
}
fw = devm_kzalloc(&plat_dev->dev, sizeof(*fw), GFP_KERNEL);
+ if (!fw)
+ return ERR_PTR(-ENOMEM);
fw->type = SCP;
fw->ops = &mtk_vcodec_rproc_msg;
fw->scp = scp;
diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
index 4c34344dc7dc..d7027d600208 100644
--- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
@@ -50,12 +50,12 @@ static void mtk_vcodec_vpu_reset_dec_handler(void *priv)
dev_err(&dev->plat_dev->dev, "Watchdog timeout!!");
- mutex_lock(&dev->dev_mutex);
+ mutex_lock(&dev->dev_ctx_lock);
list_for_each_entry(ctx, &dev->ctx_list, list) {
ctx->state = MTK_STATE_ABORT;
mtk_v4l2_vdec_dbg(0, ctx, "[%d] Change to state MTK_STATE_ABORT", ctx->id);
}
- mutex_unlock(&dev->dev_mutex);
+ mutex_unlock(&dev->dev_ctx_lock);
}
static void mtk_vcodec_vpu_reset_enc_handler(void *priv)
@@ -65,12 +65,12 @@ static void mtk_vcodec_vpu_reset_enc_handler(void *priv)
dev_err(&dev->plat_dev->dev, "Watchdog timeout!!");
- mutex_lock(&dev->dev_mutex);
+ mutex_lock(&dev->dev_ctx_lock);
list_for_each_entry(ctx, &dev->ctx_list, list) {
ctx->state = MTK_STATE_ABORT;
mtk_v4l2_vdec_dbg(0, ctx, "[%d] Change to state MTK_STATE_ABORT", ctx->id);
}
- mutex_unlock(&dev->dev_mutex);
+ mutex_unlock(&dev->dev_ctx_lock);
}
static const struct mtk_vcodec_fw_ops mtk_vcodec_vpu_msg = {
diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c
index 9ce34a3b5ee6..c60e4c193b25 100644
--- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c
@@ -49,7 +49,6 @@ int mtk_vcodec_mem_alloc(void *priv, struct mtk_vcodec_mem *mem)
{
enum mtk_instance_type inst_type = *((unsigned int *)priv);
struct platform_device *plat_dev;
- unsigned long size = mem->size;
int id;
if (inst_type == MTK_INST_ENCODER) {
@@ -64,15 +63,15 @@ int mtk_vcodec_mem_alloc(void *priv, struct mtk_vcodec_mem *mem)
id = dec_ctx->id;
}
- mem->va = dma_alloc_coherent(&plat_dev->dev, size, &mem->dma_addr, GFP_KERNEL);
+ mem->va = dma_alloc_coherent(&plat_dev->dev, mem->size, &mem->dma_addr, GFP_KERNEL);
if (!mem->va) {
- mtk_v4l2_err(plat_dev, "%s dma_alloc size=%ld failed!",
- dev_name(&plat_dev->dev), size);
+ mtk_v4l2_err(plat_dev, "%s dma_alloc size=0x%zx failed!",
+ __func__, mem->size);
return -ENOMEM;
}
- mtk_v4l2_debug(plat_dev, 3, "[%d] - va = %p dma = 0x%lx size = 0x%lx", id, mem->va,
- (unsigned long)mem->dma_addr, size);
+ mtk_v4l2_debug(plat_dev, 3, "[%d] - va = %p dma = 0x%lx size = 0x%zx", id, mem->va,
+ (unsigned long)mem->dma_addr, mem->size);
return 0;
}
@@ -82,7 +81,6 @@ void mtk_vcodec_mem_free(void *priv, struct mtk_vcodec_mem *mem)
{
enum mtk_instance_type inst_type = *((unsigned int *)priv);
struct platform_device *plat_dev;
- unsigned long size = mem->size;
int id;
if (inst_type == MTK_INST_ENCODER) {
@@ -98,15 +96,16 @@ void mtk_vcodec_mem_free(void *priv, struct mtk_vcodec_mem *mem)
}
if (!mem->va) {
- mtk_v4l2_err(plat_dev, "%s dma_free size=%ld failed!",
- dev_name(&plat_dev->dev), size);
+ mtk_v4l2_err(plat_dev, "%s: Tried to free a NULL VA", __func__);
+ if (mem->size)
+ mtk_v4l2_err(plat_dev, "Failed to free %zu bytes", mem->size);
return;
}
- mtk_v4l2_debug(plat_dev, 3, "[%d] - va = %p dma = 0x%lx size = 0x%lx", id, mem->va,
- (unsigned long)mem->dma_addr, size);
+ mtk_v4l2_debug(plat_dev, 3, "[%d] - va = %p dma = 0x%lx size = 0x%zx", id, mem->va,
+ (unsigned long)mem->dma_addr, mem->size);
- dma_free_coherent(&plat_dev->dev, size, mem->va, mem->dma_addr);
+ dma_free_coherent(&plat_dev->dev, mem->size, mem->va, mem->dma_addr);
mem->va = NULL;
mem->dma_addr = 0;
mem->size = 0;
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c
index ba742f0e391d..9107707de6c4 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c
@@ -262,7 +262,7 @@ static int vidioc_try_fmt(struct mtk_vcodec_dec_ctx *ctx, struct v4l2_format *f,
int tmp_w, tmp_h;
/*
- * Find next closer width align 64, heign align 64, size align
+ * Find next closer width align 64, height align 64, size align
* 64 rectangle
* Note: This only get default value, the real HW needed value
* only available when ctx in MTK_STATE_HEADER state
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
index f47c98faf068..2073781ccadb 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
@@ -268,7 +268,9 @@ static int fops_vcodec_open(struct file *file)
ctx->dev->vdec_pdata->init_vdec_params(ctx);
+ mutex_lock(&dev->dev_ctx_lock);
list_add(&ctx->list, &dev->ctx_list);
+ mutex_unlock(&dev->dev_ctx_lock);
mtk_vcodec_dbgfs_create(ctx);
mutex_unlock(&dev->dev_mutex);
@@ -311,7 +313,9 @@ static int fops_vcodec_release(struct file *file)
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
mtk_vcodec_dbgfs_remove(dev, ctx->id);
+ mutex_lock(&dev->dev_ctx_lock);
list_del_init(&ctx->list);
+ mutex_unlock(&dev->dev_ctx_lock);
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
return 0;
@@ -404,6 +408,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
for (i = 0; i < MTK_VDEC_HW_MAX; i++)
mutex_init(&dev->dec_mutex[i]);
mutex_init(&dev->dev_mutex);
+ mutex_init(&dev->dev_ctx_lock);
spin_lock_init(&dev->irqlock);
snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s",
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
index 849b89dd205c..ac568ed14fa2 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
@@ -67,11 +67,11 @@ enum mtk_vdec_hw_arch {
* @pic_w: picture width
* @pic_h: picture height
* @buf_w: picture buffer width (64 aligned up from pic_w)
- * @buf_h: picture buffer heiht (64 aligned up from pic_h)
+ * @buf_h: picture buffer height (64 aligned up from pic_h)
* @fb_sz: bitstream size of each plane
* E.g. suppose picture size is 176x144,
* buffer size will be aligned to 176x160.
- * @cap_fourcc: fourcc number(may changed when resolution change)
+ * @cap_fourcc: fourcc number(may change on a resolution change)
* @reserved: align struct to 64-bit in order to adjust 32-bit and 64-bit os.
*/
struct vdec_pic_info {
@@ -241,6 +241,7 @@ struct mtk_vcodec_dec_ctx {
*
* @dec_mutex: decoder hardware lock
* @dev_mutex: video_device lock
+ * @dev_ctx_lock: the lock of context list
* @decode_workqueue: decode work queue
*
* @irqlock: protect data access by irq handler and work thread
@@ -282,6 +283,7 @@ struct mtk_vcodec_dec_dev {
/* decoder hardware mutex lock */
struct mutex dec_mutex[MTK_VDEC_HW_MAX];
struct mutex dev_mutex;
+ struct mutex dev_ctx_lock;
struct workqueue_struct *decode_workqueue;
spinlock_t irqlock;
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_av1_req_lat_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_av1_req_lat_if.c
index 2b6a5adbc419..bf21f2467a0f 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_av1_req_lat_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_av1_req_lat_if.c
@@ -326,7 +326,7 @@ struct vdec_av1_slice_quantization {
* @use_lr: whether to use loop restoration
* @use_chroma_lr: whether to use chroma loop restoration
* @frame_restoration_type: specifies the type of restoration used for each plane
- * @loop_restoration_size: pecifies the size of loop restoration units in units
+ * @loop_restoration_size: specifies the size of loop restoration units in units
* of samples in the current plane
*/
struct vdec_av1_slice_lr {
@@ -347,7 +347,7 @@ struct vdec_av1_slice_lr {
* and loop_filter_sharpness together determine when
* a block edge is filtered, and by how much the
* filtering can change the sample values
- * @loop_filter_delta_enabled: filetr level depends on the mode and reference
+ * @loop_filter_delta_enabled: filter level depends on the mode and reference
* frame used to predict a block
*/
struct vdec_av1_slice_loop_filter {
@@ -392,7 +392,7 @@ struct vdec_av1_slice_mfmv {
/**
* struct vdec_av1_slice_tile - AV1 Tile info
* @tile_cols: specifies the number of tiles across the frame
- * @tile_rows: pecifies the number of tiles down the frame
+ * @tile_rows: specifies the number of tiles down the frame
* @mi_col_starts: an array specifying the start column
* @mi_row_starts: an array specifying the start row
* @context_update_tile_id: specifies which tile to use for the CDF update
@@ -423,15 +423,15 @@ struct vdec_av1_slice_tile {
* or the tile sizes are coded
* @interpolation_filter: specifies the filter selection used for performing inter prediction
* @allow_warped_motion: motion_mode may be present or not
- * @is_motion_mode_switchable : euqlt to 0 specifies that only the SIMPLE motion mode will be used
+ * @is_motion_mode_switchable : equal to 0 specifies that only the SIMPLE motion mode will be used
* @reference_mode : frame reference mode selected
* @allow_high_precision_mv: specifies that motion vectors are specified to
* quarter pel precision or to eighth pel precision
- * @allow_intra_bc: ubducates that intra block copy may be used in this frame
+ * @allow_intra_bc: allows that intra block copy may be used in this frame
* @force_integer_mv: specifies motion vectors will always be integers or
* can contain fractional bits
* @allow_screen_content_tools: intra blocks may use palette encoding
- * @error_resilient_mode: error resislent mode is enable/disable
+ * @error_resilient_mode: error resilient mode is enable/disable
* @frame_type: specifies the AV1 frame type
* @primary_ref_frame: specifies which reference frame contains the CDF values
* and other state that should be loaded at the start of the frame
@@ -440,8 +440,8 @@ struct vdec_av1_slice_tile {
* @disable_cdf_update: specified whether the CDF update in the symbol
* decoding process should be disables
* @skip_mode: av1 skip mode parameters
- * @seg: av1 segmentaon parameters
- * @delta_q_lf: av1 delta loop fileter
+ * @seg: av1 segmentation parameters
+ * @delta_q_lf: av1 delta loop filter
* @quant: av1 Quantization params
* @lr: av1 Loop Restauration parameters
* @superres_denom: the denominator for the upscaling ratio
@@ -450,8 +450,8 @@ struct vdec_av1_slice_tile {
* @mfmv: av1 mfmv parameters
* @tile: av1 Tile info
* @frame_is_intra: intra frame
- * @loss_less_array: loss less array
- * @coded_loss_less: coded lsss less
+ * @loss_less_array: lossless array
+ * @coded_loss_less: coded lossless
* @mi_rows: size of mi unit in rows
* @mi_cols: size of mi unit in cols
*/
@@ -1023,18 +1023,26 @@ static void vdec_av1_slice_free_working_buffer(struct vdec_av1_slice_instance *i
int i;
for (i = 0; i < ARRAY_SIZE(instance->mv); i++)
- mtk_vcodec_mem_free(ctx, &instance->mv[i]);
+ if (instance->mv[i].va)
+ mtk_vcodec_mem_free(ctx, &instance->mv[i]);
for (i = 0; i < ARRAY_SIZE(instance->seg); i++)
- mtk_vcodec_mem_free(ctx, &instance->seg[i]);
+ if (instance->seg[i].va)
+ mtk_vcodec_mem_free(ctx, &instance->seg[i]);
for (i = 0; i < ARRAY_SIZE(instance->cdf); i++)
- mtk_vcodec_mem_free(ctx, &instance->cdf[i]);
+ if (instance->cdf[i].va)
+ mtk_vcodec_mem_free(ctx, &instance->cdf[i]);
+
- mtk_vcodec_mem_free(ctx, &instance->tile);
- mtk_vcodec_mem_free(ctx, &instance->cdf_temp);
- mtk_vcodec_mem_free(ctx, &instance->cdf_table);
- mtk_vcodec_mem_free(ctx, &instance->iq_table);
+ if (instance->tile.va)
+ mtk_vcodec_mem_free(ctx, &instance->tile);
+ if (instance->cdf_temp.va)
+ mtk_vcodec_mem_free(ctx, &instance->cdf_temp);
+ if (instance->cdf_table.va)
+ mtk_vcodec_mem_free(ctx, &instance->cdf_table);
+ if (instance->iq_table.va)
+ mtk_vcodec_mem_free(ctx, &instance->iq_table);
instance->level = AV1_RES_NONE;
}
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_if.c
index bf7dffe60d07..795cb19b075d 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_if.c
@@ -94,7 +94,7 @@ struct vdec_h264_dec_info {
* AP-W/R : AP is writer/reader on this item
* VPU-W/R: VPU is write/reader on this item
* @hdr_buf : Header parsing buffer (AP-W, VPU-R)
- * @pred_buf_dma : HW working predication buffer dma address (AP-W, VPU-R)
+ * @pred_buf_dma : HW working prediction buffer dma address (AP-W, VPU-R)
* @mv_buf_dma : HW working motion vector buffer dma address (AP-W, VPU-R)
* @list_free : free frame buffer ring list (AP-W/R, VPU-W)
* @list_disp : display frame buffer ring list (AP-R, VPU-W)
@@ -117,7 +117,7 @@ struct vdec_h264_vsi {
* struct vdec_h264_inst - h264 decoder instance
* @num_nalu : how many nalus be decoded
* @ctx : point to mtk_vcodec_dec_ctx
- * @pred_buf : HW working predication buffer
+ * @pred_buf : HW working prediction buffer
* @mv_buf : HW working motion vector buffer
* @vpu : VPU instance
* @vsi : VPU shared information
@@ -136,7 +136,7 @@ static unsigned int get_mv_buf_size(unsigned int width, unsigned int height)
return HW_MB_STORE_SZ * (width/MB_UNIT_LEN) * (height/MB_UNIT_LEN);
}
-static int allocate_predication_buf(struct vdec_h264_inst *inst)
+static int allocate_prediction_buf(struct vdec_h264_inst *inst)
{
int err = 0;
@@ -151,7 +151,7 @@ static int allocate_predication_buf(struct vdec_h264_inst *inst)
return 0;
}
-static void free_predication_buf(struct vdec_h264_inst *inst)
+static void free_prediction_buf(struct vdec_h264_inst *inst)
{
struct mtk_vcodec_mem *mem = NULL;
@@ -286,7 +286,7 @@ static int vdec_h264_init(struct mtk_vcodec_dec_ctx *ctx)
}
inst->vsi = (struct vdec_h264_vsi *)inst->vpu.vsi;
- err = allocate_predication_buf(inst);
+ err = allocate_prediction_buf(inst);
if (err)
goto error_deinit;
@@ -308,7 +308,7 @@ static void vdec_h264_deinit(void *h_vdec)
struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec;
vpu_dec_deinit(&inst->vpu);
- free_predication_buf(inst);
+ free_prediction_buf(inst);
free_mv_buf(inst);
kfree(inst);
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_common.h b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_common.h
index ac82be336055..31ffa13160a3 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_common.h
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_common.h
@@ -175,7 +175,7 @@ void mtk_vdec_h264_get_ref_list(u8 *ref_list,
int num_valid);
/**
- * mtk_vdec_h264_get_ctrl_ptr - get each CID contrl address.
+ * mtk_vdec_h264_get_ctrl_ptr - get each CID control address.
*
* @ctx: v4l2 ctx
* @id: CID control ID
@@ -185,7 +185,7 @@ void mtk_vdec_h264_get_ref_list(u8 *ref_list,
void *mtk_vdec_h264_get_ctrl_ptr(struct mtk_vcodec_dec_ctx *ctx, int id);
/**
- * mtk_vdec_h264_fill_dpb_info - get each CID contrl address.
+ * mtk_vdec_h264_fill_dpb_info - Fill the decoded picture buffer info
*
* @ctx: v4l2 ctx
* @decode_params: slice decode params
@@ -225,10 +225,13 @@ void mtk_vdec_h264_copy_slice_hd_params(struct mtk_h264_slice_hd_param *dst_para
const struct v4l2_ctrl_h264_decode_params *dec_param);
/**
- * mtk_vdec_h264_copy_scaling_matrix - get each CID contrl address.
+ * mtk_vdec_h264_copy_scaling_matrix - Copy scaling matrix from a control to the driver
*
- * @dst_matrix: scaling list params for hw decoder
- * @src_matrix: scaling list params from user driver
+ * @dst_matrix: scaling list params for the HW decoder
+ * @src_matrix: scaling list params from a V4L2 control
+ *
+ * This function is used to copy the scaling matrix from a
+ * v4l2 control into the slice parameters for a decode.
*/
void mtk_vdec_h264_copy_scaling_matrix(struct slice_api_h264_scaling_matrix *dst_matrix,
const struct v4l2_ctrl_h264_scaling_matrix *src_matrix);
@@ -246,7 +249,7 @@ mtk_vdec_h264_copy_decode_params(struct slice_api_h264_decode_param *dst_params,
const struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES]);
/**
- * mtk_vdec_h264_update_dpb - updata dpb list.
+ * mtk_vdec_h264_update_dpb - update dpb list.
*
* @dec_param: v4l2 control decode params
* @dpb: dpb entry informaton
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c
index 5600f1df653d..73d5cef33b2a 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c
@@ -27,7 +27,7 @@ struct mtk_h264_dec_slice_param {
/**
* struct vdec_h264_dec_info - decode information
* @dpb_sz : decoding picture buffer size
- * @resolution_changed : resoltion change happen
+ * @resolution_changed : flag to notify that a resolution change happened
* @realloc_mv_buf : flag to notify driver to re-allocate mv buffer
* @cap_num_planes : number planes of capture buffer
* @bs_dma : Input bit-stream buffer dma address
@@ -54,7 +54,7 @@ struct vdec_h264_dec_info {
* by VPU.
* AP-W/R : AP is writer/reader on this item
* VPU-W/R: VPU is write/reader on this item
- * @pred_buf_dma : HW working predication buffer dma address (AP-W, VPU-R)
+ * @pred_buf_dma : HW working prediction buffer dma address (AP-W, VPU-R)
* @mv_buf_dma : HW working motion vector buffer dma address (AP-W, VPU-R)
* @dec : decode information (AP-R, VPU-W)
* @pic : picture information (AP-R, VPU-W)
@@ -74,7 +74,7 @@ struct vdec_h264_vsi {
* struct vdec_h264_slice_inst - h264 decoder instance
* @num_nalu : how many nalus be decoded
* @ctx : point to mtk_vcodec_dec_ctx
- * @pred_buf : HW working predication buffer
+ * @pred_buf : HW working prediction buffer
* @mv_buf : HW working motion vector buffer
* @vpu : VPU instance
* @vsi_ctx : Local VSI data for this decoding context
@@ -154,7 +154,7 @@ static int get_vdec_decode_parameters(struct vdec_h264_slice_inst *inst)
return 0;
}
-static int allocate_predication_buf(struct vdec_h264_slice_inst *inst)
+static int allocate_prediction_buf(struct vdec_h264_slice_inst *inst)
{
int err;
@@ -169,7 +169,7 @@ static int allocate_predication_buf(struct vdec_h264_slice_inst *inst)
return 0;
}
-static void free_predication_buf(struct vdec_h264_slice_inst *inst)
+static void free_prediction_buf(struct vdec_h264_slice_inst *inst)
{
struct mtk_vcodec_mem *mem = &inst->pred_buf;
@@ -292,7 +292,7 @@ static int vdec_h264_slice_init(struct mtk_vcodec_dec_ctx *ctx)
inst->vsi_ctx.dec.resolution_changed = true;
inst->vsi_ctx.dec.realloc_mv_buf = true;
- err = allocate_predication_buf(inst);
+ err = allocate_prediction_buf(inst);
if (err)
goto error_deinit;
@@ -320,7 +320,7 @@ static void vdec_h264_slice_deinit(void *h_vdec)
struct vdec_h264_slice_inst *inst = h_vdec;
vpu_dec_deinit(&inst->vpu);
- free_predication_buf(inst);
+ free_prediction_buf(inst);
free_mv_buf(inst);
kfree(inst);
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c
index 0e741e0dc8ba..2d4611e7fa0b 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c
@@ -51,7 +51,7 @@ struct vdec_h264_slice_lat_dec_param {
* struct vdec_h264_slice_info - decode information
*
* @nal_info: nal info of current picture
- * @timeout: Decode timeout: 1 timeout, 0 no timeount
+ * @timeout: Decode timeout: 1 timeout, 0 no timeout
* @bs_buf_size: bitstream size
* @bs_buf_addr: bitstream buffer dma address
* @y_fb_dma: Y frame buffer dma address
@@ -131,9 +131,9 @@ struct vdec_h264_slice_share_info {
/**
* struct vdec_h264_slice_inst - h264 decoder instance
*
- * @slice_dec_num: how many picture be decoded
+ * @slice_dec_num: Number of frames to be decoded
* @ctx: point to mtk_vcodec_dec_ctx
- * @pred_buf: HW working predication buffer
+ * @pred_buf: HW working prediction buffer
* @mv_buf: HW working motion vector buffer
* @vpu: VPU instance
* @vsi: vsi used for lat
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
index 06ed47df693b..aa721cc43647 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
@@ -254,7 +254,7 @@ struct vdec_hevc_slice_lat_dec_param {
* struct vdec_hevc_slice_info - decode information
*
* @wdma_end_addr_offset: wdma end address offset
- * @timeout: Decode timeout: 1 timeout, 0 no timeount
+ * @timeout: Decode timeout: 1 timeout, 0 no timeout
* @vdec_fb_va: VDEC frame buffer struct virtual address
* @crc: Used to check whether hardware's status is right
*/
@@ -342,7 +342,7 @@ struct vdec_hevc_slice_share_info {
/**
* struct vdec_hevc_slice_inst - hevc decoder instance
*
- * @slice_dec_num: how many picture be decoded
+ * @slice_dec_num: Number of frames to be decoded
* @ctx: point to mtk_vcodec_dec_ctx
* @mv_buf: HW working motion vector buffer
* @vpu: VPU instance
@@ -869,7 +869,6 @@ static int vdec_hevc_slice_init(struct mtk_vcodec_dec_ctx *ctx)
inst->vpu.codec_type = ctx->current_codec;
inst->vpu.capture_type = ctx->capture_fourcc;
- ctx->drv_handle = inst;
err = vpu_dec_init(&inst->vpu);
if (err) {
mtk_vdec_err(ctx, "vdec_hevc init err=%d", err);
@@ -898,6 +897,7 @@ static int vdec_hevc_slice_init(struct mtk_vcodec_dec_ctx *ctx)
mtk_vdec_debug(ctx, "lat hevc instance >> %p, codec_type = 0x%x",
inst, inst->vpu.codec_type);
+ ctx->drv_handle = inst;
return 0;
error_free_inst:
kfree(inst);
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
index 19407f9bc773..4bc89c8644fe 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
@@ -56,7 +56,7 @@
* @cur_c_fb_dma : current plane C frame buffer dma address
* @bs_dma : bitstream dma address
* @bs_sz : bitstream size
- * @resolution_changed: resolution change flag 1 - changed, 0 - not change
+ * @resolution_changed: resolution change flag 1 - changed, 0 - not changed
* @show_frame : display this frame or not
* @wait_key_frame : wait key frame coming
*/
@@ -109,7 +109,7 @@ struct vdec_vp8_hw_reg_base {
/**
* struct vdec_vp8_vpu_inst - VPU instance for VP8 decode
* @wq_hd : Wait queue to wait VPU message ack
- * @signaled : 1 - Host has received ack message from VPU, 0 - not receive
+ * @signaled : 1 - Host has received ack message from VPU, 0 - not received
* @failure : VPU execution result status 0 - success, others - fail
* @inst_addr : VPU decoder instance address
*/
@@ -449,7 +449,7 @@ static int vdec_vp8_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
inst->frm_cnt, y_fb_dma, c_fb_dma, fb);
inst->cur_fb = fb;
- dec->bs_dma = (unsigned long)bs->dma_addr;
+ dec->bs_dma = (uint64_t)bs->dma_addr;
dec->bs_sz = bs->size;
dec->cur_y_fb_dma = y_fb_dma;
dec->cur_c_fb_dma = c_fb_dma;
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c
index f677e499fefa..e27e728f392e 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c
@@ -35,7 +35,7 @@
* @cur_c_fb_dma: current plane C frame buffer dma address
* @bs_dma: bitstream dma address
* @bs_sz: bitstream size
- * @resolution_changed:resolution change flag 1 - changed, 0 - not change
+ * @resolution_changed:resolution change flag 1 - changed, 0 - not changed
* @frame_header_type: current frame header type
* @crc: used to check whether hardware's status is right
* @reserved: reserved, currently unused
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
index 55355fa70090..eb3354192853 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
@@ -16,6 +16,7 @@
#include "../vdec_drv_base.h"
#include "../vdec_vpu_if.h"
+#define VP9_MAX_SUPER_FRAMES_NUM 8
#define VP9_SUPER_FRAME_BS_SZ 64
#define MAX_VP9_DPB_SIZE 9
@@ -41,7 +42,7 @@ struct vp9_dram_buf {
/**
* struct vp9_fb_info - contains frame buffer info
- * @fb : frmae buffer
+ * @fb : frame buffer
* @reserved : reserved field used by vpu
*/
struct vp9_fb_info {
@@ -89,7 +90,7 @@ struct vp9_sf_ref_fb {
* AP-W/R : AP is writer/reader on this item
* VPU-W/R: VPU is write/reader on this item
* @sf_bs_buf : super frame backup buffer (AP-W, VPU-R)
- * @sf_ref_fb : record supoer frame reference buffer information
+ * @sf_ref_fb : record super frame reference buffer information
* (AP-R/W, VPU-R/W)
* @sf_next_ref_fb_idx : next available super frame (AP-W, VPU-R)
* @sf_frm_cnt : super frame count, filled by vpu (AP-R, VPU-W)
@@ -133,11 +134,11 @@ struct vp9_sf_ref_fb {
*/
struct vdec_vp9_vsi {
unsigned char sf_bs_buf[VP9_SUPER_FRAME_BS_SZ];
- struct vp9_sf_ref_fb sf_ref_fb[VP9_MAX_FRM_BUF_NUM-1];
+ struct vp9_sf_ref_fb sf_ref_fb[VP9_MAX_SUPER_FRAMES_NUM];
int sf_next_ref_fb_idx;
unsigned int sf_frm_cnt;
- unsigned int sf_frm_offset[VP9_MAX_FRM_BUF_NUM-1];
- unsigned int sf_frm_sz[VP9_MAX_FRM_BUF_NUM-1];
+ unsigned int sf_frm_offset[VP9_MAX_SUPER_FRAMES_NUM];
+ unsigned int sf_frm_sz[VP9_MAX_SUPER_FRAMES_NUM];
unsigned int sf_frm_idx;
unsigned int sf_init;
struct vdec_fb fb;
@@ -526,7 +527,7 @@ static void vp9_swap_frm_bufs(struct vdec_vp9_inst *inst)
/* if this super frame and it is not last sub-frame, get next fb for
* sub-frame decode
*/
- if (vsi->sf_frm_cnt > 0 && vsi->sf_frm_idx != vsi->sf_frm_cnt - 1)
+ if (vsi->sf_frm_cnt > 0 && vsi->sf_frm_idx != vsi->sf_frm_cnt)
vsi->sf_next_ref_fb_idx = vp9_get_sf_ref_fb(inst);
}
@@ -735,7 +736,7 @@ static void get_free_fb(struct vdec_vp9_inst *inst, struct vdec_fb **out_fb)
static int validate_vsi_array_indexes(struct vdec_vp9_inst *inst,
struct vdec_vp9_vsi *vsi) {
- if (vsi->sf_frm_idx >= VP9_MAX_FRM_BUF_NUM - 1) {
+ if (vsi->sf_frm_idx > VP9_MAX_SUPER_FRAMES_NUM) {
mtk_vdec_err(inst->ctx, "Invalid vsi->sf_frm_idx=%u.", vsi->sf_frm_idx);
return -EIO;
}
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c
index cf48d09b78d7..eea709d93820 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c
@@ -1074,7 +1074,7 @@ static int vdec_vp9_slice_setup_tile_buffer(struct vdec_vp9_slice_instance *inst
unsigned int mi_row;
unsigned int mi_col;
unsigned int offset;
- unsigned int pa;
+ dma_addr_t pa;
unsigned int size;
struct vdec_vp9_slice_tiles *tiles;
unsigned char *pos;
@@ -1109,7 +1109,7 @@ static int vdec_vp9_slice_setup_tile_buffer(struct vdec_vp9_slice_instance *inst
pos = va + offset;
end = va + bs->size;
/* truncated */
- pa = (unsigned int)bs->dma_addr + offset;
+ pa = bs->dma_addr + offset;
tb = instance->tile.va;
for (i = 0; i < rows; i++) {
for (j = 0; j < cols; j++) {
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec_msg_queue.h b/drivers/media/platform/mediatek/vcodec/decoder/vdec_msg_queue.h
index 1d9beb9e4a14..b0f576867f4b 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec_msg_queue.h
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec_msg_queue.h
@@ -158,14 +158,14 @@ int vdec_msg_queue_qbuf(struct vdec_msg_queue_ctx *ctx, struct vdec_lat_buf *buf
struct vdec_lat_buf *vdec_msg_queue_dqbuf(struct vdec_msg_queue_ctx *ctx);
/**
- * vdec_msg_queue_update_ube_rptr - used to updata the ube read point.
+ * vdec_msg_queue_update_ube_rptr - used to update the ube read point.
* @msg_queue: used to store the lat buffer information
* @ube_rptr: current ube read point
*/
void vdec_msg_queue_update_ube_rptr(struct vdec_msg_queue *msg_queue, uint64_t ube_rptr);
/**
- * vdec_msg_queue_update_ube_wptr - used to updata the ube write point.
+ * vdec_msg_queue_update_ube_wptr - used to update the ube write point.
* @msg_queue: used to store the lat buffer information
* @ube_wptr: current ube write point
*/
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c
index 82e57ae983d5..da6be556727b 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c
@@ -77,12 +77,14 @@ static bool vpu_dec_check_ap_inst(struct mtk_vcodec_dec_dev *dec_dev, struct vde
struct mtk_vcodec_dec_ctx *ctx;
int ret = false;
+ mutex_lock(&dec_dev->dev_ctx_lock);
list_for_each_entry(ctx, &dec_dev->ctx_list, list) {
if (!IS_ERR_OR_NULL(ctx) && ctx->vpu_inst == vpu) {
ret = true;
break;
}
}
+ mutex_unlock(&dec_dev->dev_ctx_lock);
return ret;
}
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.h b/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.h
index aa7d08afc2f4..57ed9b1f5eaa 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.h
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.h
@@ -81,7 +81,7 @@ int vpu_dec_deinit(struct vdec_vpu_inst *vpu);
/**
* vpu_dec_reset - reset decoder, use for flush decoder when end of stream or
- * seek. Remainig non displayed frame will be pushed to display.
+ * seek. Remaining non displayed frame will be pushed to display.
*
* @vpu: instance for vdec_vpu_inst
*/
@@ -98,7 +98,7 @@ int vpu_dec_core(struct vdec_vpu_inst *vpu);
/**
* vpu_dec_core_end - core end decoding, basically the function will be invoked once
* when core HW decoding done and receive interrupt successfully. The
- * decoder in VPU will updata hardware information and deinit hardware
+ * decoder in VPU will update hardware information and deinit hardware
* and check if there is a new decoded frame available to display.
*
* @vpu : instance for vdec_vpu_inst
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
index 181884e798fd..7eaf0e24c9fc 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
@@ -311,7 +311,7 @@ static int vidioc_try_fmt_out(struct mtk_vcodec_enc_ctx *ctx, struct v4l2_format
pix_fmt_mp->height = clamp(pix_fmt_mp->height, MTK_VENC_MIN_H, max_height);
pix_fmt_mp->width = clamp(pix_fmt_mp->width, MTK_VENC_MIN_W, max_width);
- /* find next closer width align 16, heign align 32, size align
+ /* find next closer width align 16, height align 32, size align
* 64 rectangle
*/
tmp_w = pix_fmt_mp->width;
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
index 6319f24bc714..3cb8a1622222 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
@@ -177,7 +177,9 @@ static int fops_vcodec_open(struct file *file)
mtk_v4l2_venc_dbg(2, ctx, "Create instance [%d]@%p m2m_ctx=%p ",
ctx->id, ctx, ctx->m2m_ctx);
+ mutex_lock(&dev->dev_ctx_lock);
list_add(&ctx->list, &dev->ctx_list);
+ mutex_unlock(&dev->dev_ctx_lock);
mutex_unlock(&dev->dev_mutex);
mtk_v4l2_venc_dbg(0, ctx, "%s encoder [%d]", dev_name(&dev->plat_dev->dev),
@@ -212,7 +214,9 @@ static int fops_vcodec_release(struct file *file)
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+ mutex_lock(&dev->dev_ctx_lock);
list_del_init(&ctx->list);
+ mutex_unlock(&dev->dev_ctx_lock);
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
return 0;
@@ -294,6 +298,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
mutex_init(&dev->enc_mutex);
mutex_init(&dev->dev_mutex);
+ mutex_init(&dev->dev_ctx_lock);
spin_lock_init(&dev->irqlock);
snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s",
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
index a042f607ed8d..0bd85d0fb379 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
@@ -178,6 +178,7 @@ struct mtk_vcodec_enc_ctx {
*
* @enc_mutex: encoder hardware lock.
* @dev_mutex: video_device lock
+ * @dev_ctx_lock: the lock of context list
* @encode_workqueue: encode work queue
*
* @enc_irq: h264 encoder irq resource
@@ -205,6 +206,7 @@ struct mtk_vcodec_enc_dev {
/* encoder hardware mutex lock */
struct mutex enc_mutex;
struct mutex dev_mutex;
+ struct mutex dev_ctx_lock;
struct workqueue_struct *encode_workqueue;
int enc_irq;
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c
index a22b7dfc656e..1a2b14a3e219 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c
@@ -58,13 +58,15 @@ int mtk_vcodec_init_enc_clk(struct mtk_vcodec_enc_dev *mtkdev)
return 0;
}
-void mtk_vcodec_enc_pw_on(struct mtk_vcodec_pm *pm)
+int mtk_vcodec_enc_pw_on(struct mtk_vcodec_pm *pm)
{
int ret;
ret = pm_runtime_resume_and_get(pm->dev);
if (ret)
dev_err(pm->dev, "pm_runtime_resume_and_get fail: %d", ret);
+
+ return ret;
}
void mtk_vcodec_enc_pw_off(struct mtk_vcodec_pm *pm)
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h
index 157ea08ba9e3..2e28f25e36cc 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h
@@ -10,7 +10,7 @@
#include "mtk_vcodec_enc_drv.h"
int mtk_vcodec_init_enc_clk(struct mtk_vcodec_enc_dev *dev);
-void mtk_vcodec_enc_pw_on(struct mtk_vcodec_pm *pm);
+int mtk_vcodec_enc_pw_on(struct mtk_vcodec_pm *pm);
void mtk_vcodec_enc_pw_off(struct mtk_vcodec_pm *pm);
void mtk_vcodec_enc_clock_on(struct mtk_vcodec_pm *pm);
void mtk_vcodec_enc_clock_off(struct mtk_vcodec_pm *pm);
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
index a68dac72c4e4..f8145998fcaf 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
@@ -301,11 +301,12 @@ static void h264_enc_free_work_buf(struct venc_h264_inst *inst)
* other buffers need to be freed by AP.
*/
for (i = 0; i < VENC_H264_VPU_WORK_BUF_MAX; i++) {
- if (i != VENC_H264_VPU_WORK_BUF_SKIP_FRAME)
+ if (i != VENC_H264_VPU_WORK_BUF_SKIP_FRAME && inst->work_bufs[i].va)
mtk_vcodec_mem_free(inst->ctx, &inst->work_bufs[i]);
}
- mtk_vcodec_mem_free(inst->ctx, &inst->pps_buf);
+ if (inst->pps_buf.va)
+ mtk_vcodec_mem_free(inst->ctx, &inst->pps_buf);
}
static int h264_enc_alloc_work_buf(struct venc_h264_inst *inst, bool is_34bit)
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c
index c402a686f3cb..e83747b8d69a 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c
@@ -64,7 +64,9 @@ int venc_if_encode(struct mtk_vcodec_enc_ctx *ctx,
ctx->dev->curr_ctx = ctx;
spin_unlock_irqrestore(&ctx->dev->irqlock, flags);
- mtk_vcodec_enc_pw_on(&ctx->dev->pm);
+ ret = mtk_vcodec_enc_pw_on(&ctx->dev->pm);
+ if (ret)
+ goto venc_if_encode_pw_on_err;
mtk_vcodec_enc_clock_on(&ctx->dev->pm);
ret = ctx->enc_if->encode(ctx->drv_handle, opt, frm_buf,
bs_buf, result);
@@ -75,6 +77,7 @@ int venc_if_encode(struct mtk_vcodec_enc_ctx *ctx,
ctx->dev->curr_ctx = NULL;
spin_unlock_irqrestore(&ctx->dev->irqlock, flags);
+venc_if_encode_pw_on_err:
mtk_venc_unlock(ctx);
return ret;
}
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.h b/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.h
index d00fb68b8235..889440a436b6 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.h
+++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.h
@@ -156,7 +156,7 @@ int venc_if_set_param(struct mtk_vcodec_enc_ctx *ctx,
* @ctx: device context
* @opt: encode frame option
* @frm_buf: input frame buffer information
- * @bs_buf: output bitstream buffer infomraiton
+ * @bs_buf: output bitstream buffer information
* @result: encode result
* Return: 0 if encoding frame successfully, otherwise it is failed.
*/
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
index 84ad1cc6ad17..51bb7ee141b9 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
@@ -47,12 +47,14 @@ static bool vpu_enc_check_ap_inst(struct mtk_vcodec_enc_dev *enc_dev, struct ven
struct mtk_vcodec_enc_ctx *ctx;
int ret = false;
+ mutex_lock(&enc_dev->dev_ctx_lock);
list_for_each_entry(ctx, &enc_dev->ctx_list, list) {
if (!IS_ERR_OR_NULL(ctx) && ctx->vpu_inst == vpu) {
ret = true;
break;
}
}
+ mutex_unlock(&enc_dev->dev_ctx_lock);
return ret;
}
diff --git a/drivers/media/platform/nvidia/tegra-vde/h264.c b/drivers/media/platform/nvidia/tegra-vde/h264.c
index 204e474d57f7..cfea5572a1b8 100644
--- a/drivers/media/platform/nvidia/tegra-vde/h264.c
+++ b/drivers/media/platform/nvidia/tegra-vde/h264.c
@@ -633,7 +633,9 @@ static int tegra_vde_decode_end(struct tegra_vde *vde)
timeout = wait_for_completion_interruptible_timeout(
&vde->decode_completion, msecs_to_jiffies(1000));
- if (timeout == 0) {
+ if (timeout < 0) {
+ ret = timeout;
+ } else if (timeout == 0) {
bsev_ptr = tegra_vde_readl(vde, vde->bsev, 0x10);
macroblocks_nb = tegra_vde_readl(vde, vde->sxe, 0xC8) & 0x1FFF;
read_bytes = bsev_ptr ? bsev_ptr - vde->bitstream_data_addr : 0;
@@ -642,8 +644,6 @@ static int tegra_vde_decode_end(struct tegra_vde *vde)
read_bytes, macroblocks_nb);
ret = -EIO;
- } else if (timeout < 0) {
- ret = timeout;
} else {
ret = 0;
}
diff --git a/drivers/media/platform/nxp/imx-mipi-csis.c b/drivers/media/platform/nxp/imx-mipi-csis.c
index db8ff5f5c4d3..f49b06978f14 100644
--- a/drivers/media/platform/nxp/imx-mipi-csis.c
+++ b/drivers/media/platform/nxp/imx-mipi-csis.c
@@ -30,6 +30,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-mc.h>
#include <media/v4l2-subdev.h>
@@ -742,6 +743,18 @@ static void mipi_csis_stop_stream(struct mipi_csis_device *csis)
mipi_csis_system_enable(csis, false);
}
+static void mipi_csis_queue_event_sof(struct mipi_csis_device *csis)
+{
+ struct v4l2_event event = {
+ .type = V4L2_EVENT_FRAME_SYNC,
+ };
+ u32 frame;
+
+ frame = mipi_csis_read(csis, MIPI_CSIS_FRAME_COUNTER_CH(0));
+ event.u.frame_sync.frame_sequence = frame;
+ v4l2_event_queue(csis->sd.devnode, &event);
+}
+
static irqreturn_t mipi_csis_irq_handler(int irq, void *dev_id)
{
struct mipi_csis_device *csis = dev_id;
@@ -765,6 +778,10 @@ static irqreturn_t mipi_csis_irq_handler(int irq, void *dev_id)
event->counter++;
}
}
+
+ if (status & MIPI_CSIS_INT_SRC_FRAME_START)
+ mipi_csis_queue_event_sof(csis);
+
spin_unlock_irqrestore(&csis->slock, flags);
mipi_csis_write(csis, MIPI_CSIS_INT_SRC, status);
@@ -1154,8 +1171,23 @@ static int mipi_csis_log_status(struct v4l2_subdev *sd)
return 0;
}
+static int mipi_csis_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ if (sub->type != V4L2_EVENT_FRAME_SYNC)
+ return -EINVAL;
+
+ /* V4L2_EVENT_FRAME_SYNC doesn't require an id, so zero should be set */
+ if (sub->id != 0)
+ return -EINVAL;
+
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+}
+
static const struct v4l2_subdev_core_ops mipi_csis_core_ops = {
.log_status = mipi_csis_log_status,
+ .subscribe_event = mipi_csis_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
static const struct v4l2_subdev_video_ops mipi_csis_video_ops = {
@@ -1358,7 +1390,7 @@ static int mipi_csis_subdev_init(struct mipi_csis_device *csis)
snprintf(sd->name, sizeof(sd->name), "csis-%s",
dev_name(csis->dev));
- sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
sd->ctrl_handler = NULL;
sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
diff --git a/drivers/media/platform/qcom/camss/Makefile b/drivers/media/platform/qcom/camss/Makefile
index 4e2222358973..0d4389ab312d 100644
--- a/drivers/media/platform/qcom/camss/Makefile
+++ b/drivers/media/platform/qcom/camss/Makefile
@@ -14,7 +14,7 @@ qcom-camss-objs += \
camss-vfe-4-1.o \
camss-vfe-4-7.o \
camss-vfe-4-8.o \
- camss-vfe-170.o \
+ camss-vfe-17x.o \
camss-vfe-480.o \
camss-vfe-gen1.o \
camss-vfe.o \
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
index f50e2235c37f..df7e93a5a4f6 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
@@ -148,6 +148,91 @@ csiphy_reg_t lane_regs_sdm845[5][14] = {
},
};
+/* GEN2 1.1 2PH */
+static const struct
+csiphy_reg_t lane_regs_sc8280xp[5][14] = {
+ {
+ {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0000, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0008, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x000C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0708, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0764, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0200, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0208, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x020C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0400, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0408, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x040C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0600, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0608, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x060C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+};
+
/* GEN2 1.2.1 2PH */
static const struct
csiphy_reg_t lane_regs_sm8250[5][20] = {
@@ -428,6 +513,10 @@ static void csiphy_gen2_config_lanes(struct csiphy_device *csiphy,
r = &lane_regs_sm8250[0][0];
array_size = ARRAY_SIZE(lane_regs_sm8250[0]);
break;
+ case CAMSS_8280XP:
+ r = &lane_regs_sc8280xp[0][0];
+ array_size = ARRAY_SIZE(lane_regs_sc8280xp[0]);
+ break;
default:
WARN(1, "unknown cspi version\n");
return;
@@ -463,13 +552,26 @@ static u8 csiphy_get_lane_mask(struct csiphy_lanes_cfg *lane_cfg)
return lane_mask;
}
+static bool csiphy_is_gen2(u32 version)
+{
+ bool ret = false;
+
+ switch (version) {
+ case CAMSS_845:
+ case CAMSS_8250:
+ case CAMSS_8280XP:
+ ret = true;
+ break;
+ }
+
+ return ret;
+}
+
static void csiphy_lanes_enable(struct csiphy_device *csiphy,
struct csiphy_config *cfg,
s64 link_freq, u8 lane_mask)
{
struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg;
- bool is_gen2 = (csiphy->camss->res->version == CAMSS_845 ||
- csiphy->camss->res->version == CAMSS_8250);
u8 settle_cnt;
u8 val;
int i;
@@ -491,7 +593,7 @@ static void csiphy_lanes_enable(struct csiphy_device *csiphy,
val = 0x00;
writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(0));
- if (is_gen2)
+ if (csiphy_is_gen2(csiphy->camss->res->version))
csiphy_gen2_config_lanes(csiphy, settle_cnt);
else
csiphy_gen1_config_lanes(csiphy, cfg, settle_cnt);
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
index 264c99efeae8..45b3a8e5dea4 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
@@ -578,6 +578,7 @@ int msm_csiphy_subdev_init(struct camss *camss,
break;
case CAMSS_845:
case CAMSS_8250:
+ case CAMSS_8280XP:
csiphy->formats = csiphy_formats_sdm845;
csiphy->nformats = ARRAY_SIZE(csiphy_formats_sdm845);
break;
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-170.c b/drivers/media/platform/qcom/camss/camss-vfe-17x.c
index 795ac3815339..795ac3815339 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-170.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-17x.c
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
index 2062be668f49..d875237cf244 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
@@ -225,6 +225,7 @@ static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code,
case CAMSS_660:
case CAMSS_845:
case CAMSS_8250:
+ case CAMSS_8280XP:
switch (sink_code) {
case MEDIA_BUS_FMT_YUYV8_1X16:
{
@@ -1518,6 +1519,7 @@ int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
break;
case CAMSS_845:
case CAMSS_8250:
+ case CAMSS_8280XP:
l->formats = formats_rdi_845;
l->nformats = ARRAY_SIZE(formats_rdi_845);
break;
@@ -1595,6 +1597,23 @@ static const struct media_entity_operations vfe_media_ops = {
.link_validate = v4l2_subdev_link_validate,
};
+static int vfe_bpl_align(struct vfe_device *vfe)
+{
+ int ret = 8;
+
+ switch (vfe->camss->res->version) {
+ case CAMSS_845:
+ case CAMSS_8250:
+ case CAMSS_8280XP:
+ ret = 16;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
/*
* msm_vfe_register_entities - Register subdev node for VFE module
* @vfe: VFE device
@@ -1661,11 +1680,7 @@ int msm_vfe_register_entities(struct vfe_device *vfe,
}
video_out->ops = &vfe->video_ops;
- if (vfe->camss->res->version == CAMSS_845 ||
- vfe->camss->res->version == CAMSS_8250)
- video_out->bpl_alignment = 16;
- else
- video_out->bpl_alignment = 8;
+ video_out->bpl_alignment = vfe_bpl_align(vfe);
video_out->line_based = 0;
if (i == VFE_LINE_PIX) {
video_out->bpl_alignment = 16;
diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
index a89da5ef4710..54cd82f74115 100644
--- a/drivers/media/platform/qcom/camss/camss-video.c
+++ b/drivers/media/platform/qcom/camss/camss-video.c
@@ -1028,6 +1028,7 @@ int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
break;
case CAMSS_845:
case CAMSS_8250:
+ case CAMSS_8280XP:
video->formats = formats_rdi_845;
video->nformats = ARRAY_SIZE(formats_rdi_845);
break;
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index 58f4be660290..1923615f0eea 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -941,6 +941,298 @@ static const struct resources_icc icc_res_sm8250[] = {
},
};
+static const struct camss_subdev_resources csiphy_res_sc8280xp[] = {
+ /* CSIPHY0 */
+ {
+ .regulators = {},
+ .clock = { "csiphy0", "csiphy0_timer" },
+ .clock_rate = { { 400000000 },
+ { 300000000 } },
+ .reg = { "csiphy0" },
+ .interrupt = { "csiphy0" },
+ .ops = &csiphy_ops_3ph_1_0
+ },
+ /* CSIPHY1 */
+ {
+ .regulators = {},
+ .clock = { "csiphy1", "csiphy1_timer" },
+ .clock_rate = { { 400000000 },
+ { 300000000 } },
+ .reg = { "csiphy1" },
+ .interrupt = { "csiphy1" },
+ .ops = &csiphy_ops_3ph_1_0
+ },
+ /* CSIPHY2 */
+ {
+ .regulators = {},
+ .clock = { "csiphy2", "csiphy2_timer" },
+ .clock_rate = { { 400000000 },
+ { 300000000 } },
+ .reg = { "csiphy2" },
+ .interrupt = { "csiphy2" },
+ .ops = &csiphy_ops_3ph_1_0
+ },
+ /* CSIPHY3 */
+ {
+ .regulators = {},
+ .clock = { "csiphy3", "csiphy3_timer" },
+ .clock_rate = { { 400000000 },
+ { 300000000 } },
+ .reg = { "csiphy3" },
+ .interrupt = { "csiphy3" },
+ .ops = &csiphy_ops_3ph_1_0
+ },
+};
+
+static const struct camss_subdev_resources csid_res_sc8280xp[] = {
+ /* CSID0 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "vfe0_csid", "vfe0_cphy_rx", "vfe0", "vfe0_axi" },
+ .clock_rate = { { 400000000, 480000000, 600000000 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "csid0" },
+ .interrupt = { "csid0" },
+ .ops = &csid_ops_gen2
+ },
+ /* CSID1 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "vfe1_csid", "vfe1_cphy_rx", "vfe1", "vfe1_axi" },
+ .clock_rate = { { 400000000, 480000000, 600000000 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "csid1" },
+ .interrupt = { "csid1" },
+ .ops = &csid_ops_gen2
+ },
+ /* CSID2 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "vfe2_csid", "vfe2_cphy_rx", "vfe2", "vfe2_axi" },
+ .clock_rate = { { 400000000, 480000000, 600000000 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "csid2" },
+ .interrupt = { "csid2" },
+ .ops = &csid_ops_gen2
+ },
+ /* CSID3 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "vfe3_csid", "vfe3_cphy_rx", "vfe3", "vfe3_axi" },
+ .clock_rate = { { 400000000, 480000000, 600000000 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "csid3" },
+ .interrupt = { "csid3" },
+ .ops = &csid_ops_gen2
+ },
+ /* CSID_LITE0 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "vfe_lite0_csid", "vfe_lite0_cphy_rx", "vfe_lite0" },
+ .clock_rate = { { 400000000, 480000000, 600000000 },
+ { 0 },
+ { 0 }, },
+ .reg = { "csid0_lite" },
+ .interrupt = { "csid0_lite" },
+ .is_lite = true,
+ .ops = &csid_ops_gen2
+ },
+ /* CSID_LITE1 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "vfe_lite1_csid", "vfe_lite1_cphy_rx", "vfe_lite1" },
+ .clock_rate = { { 400000000, 480000000, 600000000 },
+ { 0 },
+ { 0 }, },
+ .reg = { "csid1_lite" },
+ .interrupt = { "csid1_lite" },
+ .is_lite = true,
+ .ops = &csid_ops_gen2
+ },
+ /* CSID_LITE2 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "vfe_lite2_csid", "vfe_lite2_cphy_rx", "vfe_lite2" },
+ .clock_rate = { { 400000000, 480000000, 600000000 },
+ { 0 },
+ { 0 }, },
+ .reg = { "csid2_lite" },
+ .interrupt = { "csid2_lite" },
+ .is_lite = true,
+ .ops = &csid_ops_gen2
+ },
+ /* CSID_LITE3 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "vfe_lite3_csid", "vfe_lite3_cphy_rx", "vfe_lite3" },
+ .clock_rate = { { 400000000, 480000000, 600000000 },
+ { 0 },
+ { 0 }, },
+ .reg = { "csid3_lite" },
+ .interrupt = { "csid3_lite" },
+ .is_lite = true,
+ .ops = &csid_ops_gen2
+ }
+};
+
+static const struct camss_subdev_resources vfe_res_sc8280xp[] = {
+ /* VFE0 */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "gcc_axi_sf", "cpas_ahb", "camnoc_axi", "vfe0", "vfe0_axi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 19200000, 80000000},
+ { 19200000, 150000000, 266666667, 320000000, 400000000, 480000000 },
+ { 400000000, 558000000, 637000000, 760000000 },
+ { 0 }, },
+ .reg = { "vfe0" },
+ .interrupt = { "vfe0" },
+ .pd_name = "ife0",
+ .line_num = 4,
+ .ops = &vfe_ops_170
+ },
+ /* VFE1 */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "gcc_axi_sf", "cpas_ahb", "camnoc_axi", "vfe1", "vfe1_axi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 19200000, 80000000},
+ { 19200000, 150000000, 266666667, 320000000, 400000000, 480000000 },
+ { 400000000, 558000000, 637000000, 760000000 },
+ { 0 }, },
+ .reg = { "vfe1" },
+ .interrupt = { "vfe1" },
+ .pd_name = "ife1",
+ .line_num = 4,
+ .ops = &vfe_ops_170
+ },
+ /* VFE2 */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "gcc_axi_sf", "cpas_ahb", "camnoc_axi", "vfe2", "vfe2_axi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 19200000, 80000000},
+ { 19200000, 150000000, 266666667, 320000000, 400000000, 480000000 },
+ { 400000000, 558000000, 637000000, 760000000 },
+ { 0 }, },
+ .reg = { "vfe2" },
+ .interrupt = { "vfe2" },
+ .pd_name = "ife2",
+ .line_num = 4,
+ .ops = &vfe_ops_170
+ },
+ /* VFE3 */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "gcc_axi_sf", "cpas_ahb", "camnoc_axi", "vfe3", "vfe3_axi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 19200000, 80000000},
+ { 19200000, 150000000, 266666667, 320000000, 400000000, 480000000 },
+ { 400000000, 558000000, 637000000, 760000000 },
+ { 0 }, },
+ .reg = { "vfe3" },
+ .interrupt = { "vfe3" },
+ .pd_name = "ife3",
+ .line_num = 4,
+ .ops = &vfe_ops_170
+ },
+ /* VFE_LITE_0 */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "gcc_axi_sf", "cpas_ahb", "camnoc_axi", "vfe_lite0" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 19200000, 80000000},
+ { 19200000, 150000000, 266666667, 320000000, 400000000, 480000000 },
+ { 320000000, 400000000, 480000000, 600000000 }, },
+ .reg = { "vfe_lite0" },
+ .interrupt = { "vfe_lite0" },
+ .is_lite = true,
+ .line_num = 4,
+ .ops = &vfe_ops_170
+ },
+ /* VFE_LITE_1 */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "gcc_axi_sf", "cpas_ahb", "camnoc_axi", "vfe_lite1" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 19200000, 80000000},
+ { 19200000, 150000000, 266666667, 320000000, 400000000, 480000000 },
+ { 320000000, 400000000, 480000000, 600000000 }, },
+ .reg = { "vfe_lite1" },
+ .interrupt = { "vfe_lite1" },
+ .is_lite = true,
+ .line_num = 4,
+ .ops = &vfe_ops_170
+ },
+ /* VFE_LITE_2 */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "gcc_axi_sf", "cpas_ahb", "camnoc_axi", "vfe_lite2" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 19200000, 80000000},
+ { 19200000, 150000000, 266666667, 320000000, 400000000, 480000000 },
+ { 320000000, 400000000, 480000000, 600000000, }, },
+ .reg = { "vfe_lite2" },
+ .interrupt = { "vfe_lite2" },
+ .is_lite = true,
+ .line_num = 4,
+ .ops = &vfe_ops_170
+ },
+ /* VFE_LITE_3 */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "gcc_axi_sf", "cpas_ahb", "camnoc_axi", "vfe_lite3" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 19200000, 80000000},
+ { 19200000, 150000000, 266666667, 320000000, 400000000, 480000000 },
+ { 320000000, 400000000, 480000000, 600000000 }, },
+ .reg = { "vfe_lite3" },
+ .interrupt = { "vfe_lite3" },
+ .is_lite = true,
+ .line_num = 4,
+ .ops = &vfe_ops_170
+ },
+};
+
+static const struct resources_icc icc_res_sc8280xp[] = {
+ {
+ .name = "cam_ahb",
+ .icc_bw_tbl.avg = 150000,
+ .icc_bw_tbl.peak = 300000,
+ },
+ {
+ .name = "cam_hf_mnoc",
+ .icc_bw_tbl.avg = 2097152,
+ .icc_bw_tbl.peak = 2097152,
+ },
+ {
+ .name = "cam_sf_mnoc",
+ .icc_bw_tbl.avg = 2097152,
+ .icc_bw_tbl.peak = 2097152,
+ },
+ {
+ .name = "cam_sf_icp_mnoc",
+ .icc_bw_tbl.avg = 2097152,
+ .icc_bw_tbl.peak = 2097152,
+ },
+};
+
/*
* camss_add_clock_margin - Add margin to clock frequency rate
* @rate: Clock frequency rate
@@ -1826,12 +2118,27 @@ static const struct camss_resources sm8250_resources = {
.vfe_num = ARRAY_SIZE(vfe_res_8250),
};
+static const struct camss_resources sc8280xp_resources = {
+ .version = CAMSS_8280XP,
+ .pd_name = "top",
+ .csiphy_res = csiphy_res_sc8280xp,
+ .csid_res = csid_res_sc8280xp,
+ .ispif_res = NULL,
+ .vfe_res = vfe_res_sc8280xp,
+ .icc_res = icc_res_sc8280xp,
+ .icc_path_num = ARRAY_SIZE(icc_res_sc8280xp),
+ .csiphy_num = ARRAY_SIZE(csiphy_res_sc8280xp),
+ .csid_num = ARRAY_SIZE(csid_res_sc8280xp),
+ .vfe_num = ARRAY_SIZE(vfe_res_sc8280xp),
+};
+
static const struct of_device_id camss_dt_match[] = {
{ .compatible = "qcom,msm8916-camss", .data = &msm8916_resources },
{ .compatible = "qcom,msm8996-camss", .data = &msm8996_resources },
{ .compatible = "qcom,sdm660-camss", .data = &sdm660_resources },
{ .compatible = "qcom,sdm845-camss", .data = &sdm845_resources },
{ .compatible = "qcom,sm8250-camss", .data = &sm8250_resources },
+ { .compatible = "qcom,sc8280xp-camss", .data = &sc8280xp_resources },
{ }
};
diff --git a/drivers/media/platform/qcom/camss/camss.h b/drivers/media/platform/qcom/camss/camss.h
index a0c2dcc779f0..ac15fe23a702 100644
--- a/drivers/media/platform/qcom/camss/camss.h
+++ b/drivers/media/platform/qcom/camss/camss.h
@@ -77,6 +77,7 @@ enum camss_version {
CAMSS_660,
CAMSS_845,
CAMSS_8250,
+ CAMSS_8280XP,
};
enum icc_count {
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c b/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
index 073f70c6ac68..bb4b07bed28d 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
@@ -730,7 +730,8 @@ static int rvin_s_dv_timings(struct file *file, void *priv_fh,
struct v4l2_subdev *sd = vin_to_source(vin);
int ret;
- ret = v4l2_subdev_call(sd, video, s_dv_timings, timings);
+ ret = v4l2_subdev_call(sd, pad, s_dv_timings,
+ vin->parallel.sink_pad, timings);
if (ret)
return ret;
@@ -744,7 +745,8 @@ static int rvin_g_dv_timings(struct file *file, void *priv_fh,
struct rvin_dev *vin = video_drvdata(file);
struct v4l2_subdev *sd = vin_to_source(vin);
- return v4l2_subdev_call(sd, video, g_dv_timings, timings);
+ return v4l2_subdev_call(sd, pad, g_dv_timings,
+ vin->parallel.sink_pad, timings);
}
static int rvin_query_dv_timings(struct file *file, void *priv_fh,
@@ -753,7 +755,8 @@ static int rvin_query_dv_timings(struct file *file, void *priv_fh,
struct rvin_dev *vin = video_drvdata(file);
struct v4l2_subdev *sd = vin_to_source(vin);
- return v4l2_subdev_call(sd, video, query_dv_timings, timings);
+ return v4l2_subdev_call(sd, pad, query_dv_timings,
+ vin->parallel.sink_pad, timings);
}
static int rvin_dv_timings_cap(struct file *file, void *priv_fh,
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-vin.h b/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
index 792336dada44..997a66318a29 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
@@ -59,7 +59,7 @@ enum rvin_isp_id {
#define RVIN_REMOTES_MAX \
(((unsigned int)RVIN_CSI_MAX) > ((unsigned int)RVIN_ISP_MAX) ? \
- RVIN_CSI_MAX : RVIN_ISP_MAX)
+ (unsigned int)RVIN_CSI_MAX : (unsigned int)RVIN_ISP_MAX)
/**
* enum rvin_dma_state - DMA states
diff --git a/drivers/media/platform/st/sti/c8sectpfe/Kconfig b/drivers/media/platform/st/sti/c8sectpfe/Kconfig
index 702b910509c9..01c33d9c9ec3 100644
--- a/drivers/media/platform/st/sti/c8sectpfe/Kconfig
+++ b/drivers/media/platform/st/sti/c8sectpfe/Kconfig
@@ -5,7 +5,6 @@ config DVB_C8SECTPFE
depends on PINCTRL && DVB_CORE && I2C
depends on ARCH_STI || ARCH_MULTIPLATFORM || COMPILE_TEST
select FW_LOADER
- select DEBUG_FS
select DVB_LNBP21 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STV090x if MEDIA_SUBDRV_AUTOSELECT
select DVB_STB6100 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/platform/st/sti/c8sectpfe/Makefile b/drivers/media/platform/st/sti/c8sectpfe/Makefile
index aedfc725cc19..99425137ee0a 100644
--- a/drivers/media/platform/st/sti/c8sectpfe/Makefile
+++ b/drivers/media/platform/st/sti/c8sectpfe/Makefile
@@ -1,6 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
-c8sectpfe-y += c8sectpfe-core.o c8sectpfe-common.o c8sectpfe-dvb.o \
- c8sectpfe-debugfs.o
+c8sectpfe-y += c8sectpfe-core.o c8sectpfe-common.o c8sectpfe-dvb.o
+
+ifneq ($(CONFIG_DEBUG_FS),)
+c8sectpfe-y += c8sectpfe-debugfs.o
+endif
obj-$(CONFIG_DVB_C8SECTPFE) += c8sectpfe.o
diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
index e4cf27b5a072..2f58a0d0df85 100644
--- a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
@@ -24,7 +24,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/pinctrl.h>
@@ -1097,7 +1096,6 @@ static int load_slim_core_fw(const struct firmware *fw, struct c8sectpfei *fei)
}
}
- release_firmware(fw);
return err;
}
@@ -1121,6 +1119,7 @@ static int load_c8sectpfe_fw(struct c8sectpfei *fei)
}
err = load_slim_core_fw(fw, fei);
+ release_firmware(fw);
if (err) {
dev_err(fei->dev, "load_slim_core_fw failed err=(%d)\n", err);
return err;
diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.h b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.h
index d2c35fb32d7e..8e1bfd860524 100644
--- a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.h
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.h
@@ -12,7 +12,12 @@
#include "c8sectpfe-core.h"
+#if defined(CONFIG_DEBUG_FS)
void c8sectpfe_debugfs_init(struct c8sectpfei *);
void c8sectpfe_debugfs_exit(struct c8sectpfei *);
+#else
+static inline void c8sectpfe_debugfs_init(struct c8sectpfei *) {};
+static inline void c8sectpfe_debugfs_exit(struct c8sectpfei *) {};
+#endif
#endif /* __C8SECTPFE_DEBUG_H */
diff --git a/drivers/media/platform/st/sti/hva/hva-hw.c b/drivers/media/platform/st/sti/hva/hva-hw.c
index fe4ea2e7f37e..fcb18fb52fdd 100644
--- a/drivers/media/platform/st/sti/hva/hva-hw.c
+++ b/drivers/media/platform/st/sti/hva/hva-hw.c
@@ -406,8 +406,7 @@ err_pm:
err_disable:
pm_runtime_disable(dev);
err_clk:
- if (hva->clk)
- clk_unprepare(hva->clk);
+ clk_unprepare(hva->clk);
return ret;
}
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c
index bce821eb71ce..4acc3b90d03a 100644
--- a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c
+++ b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c
@@ -439,11 +439,8 @@ static int dcmipp_probe(struct platform_device *pdev)
"Could not get reset control\n");
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- if (irq != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Could not get irq\n");
- return irq ? irq : -ENXIO;
- }
+ if (irq < 0)
+ return irq;
dcmipp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(dcmipp->regs)) {
diff --git a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig
index 47a8c0fb7eb9..99c401e653bc 100644
--- a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig
+++ b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig
@@ -8,6 +8,7 @@ config VIDEO_SUN8I_A83T_MIPI_CSI2
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
select REGMAP_MMIO
+ select GENERIC_PHY
select GENERIC_PHY_MIPI_DPHY
help
Support for the Allwinner A83T MIPI CSI-2 controller and D-PHY.
diff --git a/drivers/media/platform/ti/davinci/vpif_capture.c b/drivers/media/platform/ti/davinci/vpif_capture.c
index c31a5566fc5a..c28794b6677b 100644
--- a/drivers/media/platform/ti/davinci/vpif_capture.c
+++ b/drivers/media/platform/ti/davinci/vpif_capture.c
@@ -1132,7 +1132,7 @@ vpif_query_dv_timings(struct file *file, void *priv,
if (input.capabilities != V4L2_IN_CAP_DV_TIMINGS)
return -ENODATA;
- ret = v4l2_subdev_call(ch->sd, video, query_dv_timings, timings);
+ ret = v4l2_subdev_call(ch->sd, pad, query_dv_timings, 0, timings);
if (ret == -ENOIOCTLCMD || ret == -ENODEV)
return -ENODATA;
@@ -1177,7 +1177,7 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
return -EBUSY;
/* Configure subdevice timings, if any */
- ret = v4l2_subdev_call(ch->sd, video, s_dv_timings, timings);
+ ret = v4l2_subdev_call(ch->sd, pad, s_dv_timings, 0, timings);
if (ret == -ENOIOCTLCMD || ret == -ENODEV)
ret = 0;
if (ret < 0) {
diff --git a/drivers/media/platform/ti/davinci/vpif_display.c b/drivers/media/platform/ti/davinci/vpif_display.c
index 02ede1fe12cb..76d8fa8ad088 100644
--- a/drivers/media/platform/ti/davinci/vpif_display.c
+++ b/drivers/media/platform/ti/davinci/vpif_display.c
@@ -934,7 +934,7 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
}
/* Configure subdevice timings, if any */
- ret = v4l2_subdev_call(ch->sd, video, s_dv_timings, timings);
+ ret = v4l2_subdev_call(ch->sd, pad, s_dv_timings, 0, timings);
if (ret == -ENOIOCTLCMD || ret == -ENODEV)
ret = 0;
if (ret < 0) {
diff --git a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
index 6da83d0cffaa..22442fce7607 100644
--- a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
+++ b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
@@ -786,15 +786,14 @@ static void ti_csi2rx_buffer_queue(struct vb2_buffer *vb)
dev_warn(csi->dev,
"Failed to drain DMA. Next frame might be bogus\n");
+ spin_lock_irqsave(&dma->lock, flags);
ret = ti_csi2rx_start_dma(csi, buf);
if (ret) {
- dev_err(csi->dev, "Failed to start DMA: %d\n", ret);
- spin_lock_irqsave(&dma->lock, flags);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
dma->state = TI_CSI2RX_DMA_IDLE;
spin_unlock_irqrestore(&dma->lock, flags);
+ dev_err(csi->dev, "Failed to start DMA: %d\n", ret);
} else {
- spin_lock_irqsave(&dma->lock, flags);
list_add_tail(&buf->list, &dma->submitted);
spin_unlock_irqrestore(&dma->lock, flags);
}
diff --git a/drivers/media/platform/verisilicon/hantro_h1_regs.h b/drivers/media/platform/verisilicon/hantro_h1_regs.h
index 30e7e7b920b5..8650cc489392 100644
--- a/drivers/media/platform/verisilicon/hantro_h1_regs.h
+++ b/drivers/media/platform/verisilicon/hantro_h1_regs.h
@@ -62,7 +62,7 @@
#define H1_REG_ENC_CTRL1_INTRA_PRED_MODE(x) ((x) << 16)
#define H1_REG_ENC_CTRL1_FRAME_NUM(x) ((x))
#define H1_REG_ENC_CTRL2 0x048
-#define H1_REG_ENC_CTRL2_DEBLOCKING_FILETER_MODE(x) ((x) << 30)
+#define H1_REG_ENC_CTRL2_DEBLOCKING_FILTER_MODE(x) ((x) << 30)
#define H1_REG_ENC_CTRL2_H264_SLICE_SIZE(x) ((x) << 23)
#define H1_REG_ENC_CTRL2_DISABLE_QUARTER_PIXMV BIT(22)
#define H1_REG_ENC_CTRL2_TRANS8X8_MODE_EN BIT(21)
@@ -89,7 +89,7 @@
#define H1_REG_STR_BUF_LIMIT 0x060
#define H1_REG_MAD_CTRL 0x064
#define H1_REG_MAD_CTRL_QP_ADJUST(x) ((x) << 28)
-#define H1_REG_MAD_CTRL_MAD_THREDHOLD(x) ((x) << 22)
+#define H1_REG_MAD_CTRL_MAD_THRESHOLD(x) ((x) << 22)
#define H1_REG_MAD_CTRL_QP_SUM_DIV2(x) ((x))
#define H1_REG_ADDR_VP8_PROB_CNT 0x068
#define H1_REG_QP_VAL 0x06c
diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.c b/drivers/media/platform/verisilicon/hantro_v4l2.c
index 941fa23c211a..df6f2536263b 100644
--- a/drivers/media/platform/verisilicon/hantro_v4l2.c
+++ b/drivers/media/platform/verisilicon/hantro_v4l2.c
@@ -756,6 +756,7 @@ const struct v4l2_ioctl_ops hantro_ioctl_ops = {
.vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
.vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
.vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_remove_bufs = v4l2_m2m_ioctl_remove_bufs,
.vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
index f1c5c0a6a335..e3e6aa87fe08 100644
--- a/drivers/media/radio/radio-shark2.c
+++ b/drivers/media/radio/radio-shark2.c
@@ -62,7 +62,7 @@ struct shark_device {
#ifdef SHARK_USE_LEDS
struct work_struct led_work;
struct led_classdev leds[NO_LEDS];
- char led_names[NO_LEDS][32];
+ char led_names[NO_LEDS][64];
atomic_t brightness[NO_LEDS];
unsigned long brightness_new;
#endif
diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index 41eeec648803..b29a1a9f381d 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -9,7 +9,6 @@
#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 5719dda6e0f0..0b55314a8082 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -845,13 +845,13 @@ static ssize_t imon_clock_show(struct device *d,
mutex_lock(&ictx->lock);
if (!ictx->display_supported) {
- len = snprintf(buf, PAGE_SIZE, "Not supported.");
+ len = sysfs_emit(buf, "Not supported.");
} else {
- len = snprintf(buf, PAGE_SIZE,
- "To set the clock on your iMON display:\n"
- "# date \"+%%y %%m %%d %%w %%H %%M %%S\" > imon_clock\n"
- "%s", ictx->display_isopen ?
- "\nNOTE: imon device must be closed\n" : "");
+ len = sysfs_emit(buf,
+ "To set the clock on your iMON display:\n"
+ "# date \"+%%y %%m %%d %%w %%H %%M %%S\" > imon_clock\n"
+ "%s", ictx->display_isopen ?
+ "\nNOTE: imon device must be closed\n" : "");
}
mutex_unlock(&ictx->lock);
diff --git a/drivers/media/rc/ir-spi.c b/drivers/media/rc/ir-spi.c
index bbc81bed4f90..8fc8e496e6aa 100644
--- a/drivers/media/rc/ir-spi.c
+++ b/drivers/media/rc/ir-spi.c
@@ -4,13 +4,18 @@
// Copyright (c) 2016 Samsung Electronics Co., Ltd.
// Copyright (c) Andi Shyti <andi@etezian.org>
-#include <linux/delay.h>
-#include <linux/fs.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/math.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/of_gpio.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
#include <media/rc-core.h>
#define IR_SPI_DRIVER_NAME "ir-spi"
@@ -31,8 +36,7 @@ struct ir_spi_data {
struct regulator *regulator;
};
-static int ir_spi_tx(struct rc_dev *dev,
- unsigned int *buffer, unsigned int count)
+static int ir_spi_tx(struct rc_dev *dev, unsigned int *buffer, unsigned int count)
{
int i;
int ret;
@@ -52,7 +56,7 @@ static int ir_spi_tx(struct rc_dev *dev,
return -EINVAL;
/*
- * the first value in buffer is a pulse, so that 0, 2, 4, ...
+ * The first value in buffer is a pulse, so that 0, 2, 4, ...
* contain a pulse duration. On the contrary, 1, 3, 5, ...
* contain a space duration.
*/
@@ -111,15 +115,16 @@ static int ir_spi_set_duty_cycle(struct rc_dev *dev, u32 duty_cycle)
static int ir_spi_probe(struct spi_device *spi)
{
+ struct device *dev = &spi->dev;
int ret;
u8 dc;
struct ir_spi_data *idata;
- idata = devm_kzalloc(&spi->dev, sizeof(*idata), GFP_KERNEL);
+ idata = devm_kzalloc(dev, sizeof(*idata), GFP_KERNEL);
if (!idata)
return -ENOMEM;
- idata->regulator = devm_regulator_get(&spi->dev, "irda_regulator");
+ idata->regulator = devm_regulator_get(dev, "irda_regulator");
if (IS_ERR(idata->regulator))
return PTR_ERR(idata->regulator);
@@ -135,32 +140,31 @@ static int ir_spi_probe(struct spi_device *spi)
idata->rc->priv = idata;
idata->spi = spi;
- idata->negated = of_property_read_bool(spi->dev.of_node,
- "led-active-low");
- ret = of_property_read_u8(spi->dev.of_node, "duty-cycle", &dc);
+ idata->negated = device_property_read_bool(dev, "led-active-low");
+ ret = device_property_read_u8(dev, "duty-cycle", &dc);
if (ret)
dc = 50;
- /* ir_spi_set_duty_cycle cannot fail,
- * it returns int to be compatible with the
- * rc->s_tx_duty_cycle function
+ /*
+ * ir_spi_set_duty_cycle() cannot fail, it returns int
+ * to be compatible with the rc->s_tx_duty_cycle function.
*/
ir_spi_set_duty_cycle(idata->rc, dc);
idata->freq = IR_SPI_DEFAULT_FREQUENCY;
- return devm_rc_register_device(&spi->dev, idata->rc);
+ return devm_rc_register_device(dev, idata->rc);
}
static const struct of_device_id ir_spi_of_match[] = {
{ .compatible = "ir-spi-led" },
- {},
+ {}
};
MODULE_DEVICE_TABLE(of, ir_spi_of_match);
static const struct spi_device_id ir_spi_ids[] = {
{ "ir-spi-led" },
- {},
+ {}
};
MODULE_DEVICE_TABLE(spi, ir_spi_ids);
@@ -172,7 +176,6 @@ static struct spi_driver ir_spi_driver = {
.of_match_table = ir_spi_of_match,
},
};
-
module_spi_driver(ir_spi_driver);
MODULE_AUTHOR("Andi Shyti <andi@etezian.org>");
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index caad59f76793..52aea4167718 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -27,7 +27,9 @@ static dev_t lirc_base_dev;
static DEFINE_IDA(lirc_ida);
/* Only used for sysfs but defined to void otherwise */
-static struct class *lirc_class;
+static const struct class lirc_class = {
+ .name = "lirc",
+};
/**
* lirc_raw_event() - Send raw IR data to lirc to be relayed to userspace
@@ -724,7 +726,7 @@ int lirc_register(struct rc_dev *dev)
return minor;
device_initialize(&dev->lirc_dev);
- dev->lirc_dev.class = lirc_class;
+ dev->lirc_dev.class = &lirc_class;
dev->lirc_dev.parent = &dev->dev;
dev->lirc_dev.release = lirc_release_device;
dev->lirc_dev.devt = MKDEV(MAJOR(lirc_base_dev), minor);
@@ -789,15 +791,13 @@ int __init lirc_dev_init(void)
{
int retval;
- lirc_class = class_create("lirc");
- if (IS_ERR(lirc_class)) {
- pr_err("class_create failed\n");
- return PTR_ERR(lirc_class);
- }
+ retval = class_register(&lirc_class);
+ if (retval)
+ return retval;
retval = alloc_chrdev_region(&lirc_base_dev, 0, RC_DEV_MAX, "lirc");
if (retval) {
- class_destroy(lirc_class);
+ class_unregister(&lirc_class);
pr_err("alloc_chrdev_region failed\n");
return retval;
}
@@ -810,7 +810,7 @@ int __init lirc_dev_init(void)
void __exit lirc_dev_exit(void)
{
- class_destroy(lirc_class);
+ class_unregister(&lirc_class);
unregister_chrdev_region(lirc_base_dev, RC_DEV_MAX);
}
diff --git a/drivers/media/rc/mtk-cir.c b/drivers/media/rc/mtk-cir.c
index 4e294e59d3cb..b2f82b2d1c8d 100644
--- a/drivers/media/rc/mtk-cir.c
+++ b/drivers/media/rc/mtk-cir.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c
index 96ae0294ac10..fc5fd3927177 100644
--- a/drivers/media/rc/serial_ir.c
+++ b/drivers/media/rc/serial_ir.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/serial_reg.h>
#include <linux/types.h>
diff --git a/drivers/media/rc/st_rc.c b/drivers/media/rc/st_rc.c
index 28477aa95563..988b09191c4c 100644
--- a/drivers/media/rc/st_rc.c
+++ b/drivers/media/rc/st_rc.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
index bf58c965ead8..b49df8355e6b 100644
--- a/drivers/media/rc/sunxi-cir.c
+++ b/drivers/media/rc/sunxi-cir.c
@@ -12,6 +12,7 @@
#include <linux/clk.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/media/spi/cxd2880-spi.c b/drivers/media/spi/cxd2880-spi.c
index 6be4e5528879..65fa7f857fca 100644
--- a/drivers/media/spi/cxd2880-spi.c
+++ b/drivers/media/spi/cxd2880-spi.c
@@ -388,7 +388,7 @@ static int cxd2880_start_feed(struct dvb_demux_feed *feed)
if (dvb_spi->feed_count == 0) {
dvb_spi->ts_buf =
- kmalloc(MAX_TRANS_PKT * 188,
+ kzalloc(MAX_TRANS_PKT * 188,
GFP_KERNEL | GFP_DMA);
if (!dvb_spi->ts_buf) {
pr_err("ts buffer allocate failed\n");
diff --git a/drivers/media/spi/gs1662.c b/drivers/media/spi/gs1662.c
index 75c21a93e6d0..dc5c4c055d29 100644
--- a/drivers/media/spi/gs1662.c
+++ b/drivers/media/spi/gs1662.c
@@ -259,12 +259,15 @@ static inline struct gs *to_gs(struct v4l2_subdev *sd)
return container_of(sd, struct gs, sd);
}
-static int gs_s_dv_timings(struct v4l2_subdev *sd,
- struct v4l2_dv_timings *timings)
+static int gs_s_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_dv_timings *timings)
{
struct gs *gs = to_gs(sd);
int reg_value;
+ if (pad != 0)
+ return -EINVAL;
+
reg_value = get_register_timings(timings);
if (reg_value == 0x0)
return -EINVAL;
@@ -273,23 +276,29 @@ static int gs_s_dv_timings(struct v4l2_subdev *sd,
return 0;
}
-static int gs_g_dv_timings(struct v4l2_subdev *sd,
- struct v4l2_dv_timings *timings)
+static int gs_g_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_dv_timings *timings)
{
struct gs *gs = to_gs(sd);
+ if (pad != 0)
+ return -EINVAL;
+
*timings = gs->current_timings;
return 0;
}
-static int gs_query_dv_timings(struct v4l2_subdev *sd,
- struct v4l2_dv_timings *timings)
+static int gs_query_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_dv_timings *timings)
{
struct gs *gs = to_gs(sd);
struct v4l2_dv_timings fmt;
u16 reg_value, i;
int ret;
+ if (pad != 0)
+ return -EINVAL;
+
if (gs->enabled)
return -EBUSY;
@@ -410,14 +419,14 @@ static const struct v4l2_subdev_core_ops gs_core_ops = {
};
static const struct v4l2_subdev_video_ops gs_video_ops = {
- .s_dv_timings = gs_s_dv_timings,
- .g_dv_timings = gs_g_dv_timings,
.s_stream = gs_s_stream,
.g_input_status = gs_g_input_status,
- .query_dv_timings = gs_query_dv_timings,
};
static const struct v4l2_subdev_pad_ops gs_pad_ops = {
+ .s_dv_timings = gs_s_dv_timings,
+ .g_dv_timings = gs_g_dv_timings,
+ .query_dv_timings = gs_query_dv_timings,
.enum_dv_timings = gs_enum_dv_timings,
.dv_timings_cap = gs_dv_timings_cap,
};
diff --git a/drivers/media/test-drivers/vicodec/vicodec-core.c b/drivers/media/test-drivers/vicodec/vicodec-core.c
index e13f5452b927..3e011fe62ae1 100644
--- a/drivers/media/test-drivers/vicodec/vicodec-core.c
+++ b/drivers/media/test-drivers/vicodec/vicodec-core.c
@@ -1345,6 +1345,7 @@ static const struct v4l2_ioctl_ops vicodec_ioctl_ops = {
.vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
.vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
.vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_remove_bufs = v4l2_m2m_ioctl_remove_bufs,
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
diff --git a/drivers/media/test-drivers/vimc/vimc-capture.c b/drivers/media/test-drivers/vimc/vimc-capture.c
index 2a2d19d23bab..ba7550b8ba7e 100644
--- a/drivers/media/test-drivers/vimc/vimc-capture.c
+++ b/drivers/media/test-drivers/vimc/vimc-capture.c
@@ -221,6 +221,7 @@ static const struct v4l2_ioctl_ops vimc_capture_ioctl_ops = {
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_remove_bufs = vb2_ioctl_remove_bufs,
};
static void vimc_capture_return_all_buffers(struct vimc_capture_device *vcapture,
@@ -432,7 +433,7 @@ static struct vimc_ent_device *vimc_capture_add(struct vimc_device *vimc,
q->mem_ops = vimc_allocator == VIMC_ALLOCATOR_DMA_CONTIG
? &vb2_dma_contig_memops : &vb2_vmalloc_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_queued_buffers = 2;
+ q->min_reqbufs_allocation = 2;
q->lock = &vcapture->lock;
q->dev = v4l2_dev->dev;
diff --git a/drivers/media/test-drivers/visl/visl-video.c b/drivers/media/test-drivers/visl/visl-video.c
index b9a4b44bd0ed..f8d970319764 100644
--- a/drivers/media/test-drivers/visl/visl-video.c
+++ b/drivers/media/test-drivers/visl/visl-video.c
@@ -539,6 +539,7 @@ const struct v4l2_ioctl_ops visl_ioctl_ops = {
.vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
.vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
.vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_remove_bufs = v4l2_m2m_ioctl_remove_bufs,
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c
index 159c72cbb5bf..0273bc9863b0 100644
--- a/drivers/media/test-drivers/vivid/vivid-core.c
+++ b/drivers/media/test-drivers/vivid/vivid-core.c
@@ -769,6 +769,7 @@ static const struct v4l2_ioctl_ops vivid_ioctl_ops = {
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_remove_bufs = vb2_ioctl_remove_bufs,
.vidioc_enum_input = vivid_enum_input,
.vidioc_g_input = vivid_g_input,
@@ -861,7 +862,7 @@ static const struct media_device_ops vivid_media_ops = {
static int vivid_create_queue(struct vivid_dev *dev,
struct vb2_queue *q,
u32 buf_type,
- unsigned int min_queued_buffers,
+ unsigned int min_reqbufs_allocation,
const struct vb2_ops *ops)
{
if (buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->multiplanar)
@@ -898,7 +899,7 @@ static int vivid_create_queue(struct vivid_dev *dev,
q->mem_ops = allocators[dev->inst] == 1 ? &vb2_dma_contig_memops :
&vb2_vmalloc_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_queued_buffers = supports_requests[dev->inst] ? 0 : min_queued_buffers;
+ q->min_reqbufs_allocation = min_reqbufs_allocation;
q->lock = &dev->mutex;
q->dev = dev->v4l2_dev.dev;
q->supports_requests = supports_requests[dev->inst];
@@ -1364,7 +1365,7 @@ static int vivid_create_queues(struct vivid_dev *dev)
if (dev->has_meta_out) {
/* initialize meta_out queue */
ret = vivid_create_queue(dev, &dev->vb_meta_out_q,
- V4L2_BUF_TYPE_META_OUTPUT, 1,
+ V4L2_BUF_TYPE_META_OUTPUT, 2,
&vivid_meta_out_qops);
if (ret)
return ret;
@@ -1373,7 +1374,7 @@ static int vivid_create_queues(struct vivid_dev *dev)
if (dev->has_touch_cap) {
/* initialize touch_cap queue */
ret = vivid_create_queue(dev, &dev->vb_touch_cap_q,
- V4L2_BUF_TYPE_VIDEO_CAPTURE, 1,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE, 2,
&vivid_touch_cap_qops);
if (ret)
return ret;
diff --git a/drivers/media/test-drivers/vivid/vivid-meta-out.c b/drivers/media/test-drivers/vivid/vivid-meta-out.c
index 4a569a6e58be..82ab3b26914e 100644
--- a/drivers/media/test-drivers/vivid/vivid-meta-out.c
+++ b/drivers/media/test-drivers/vivid/vivid-meta-out.c
@@ -18,7 +18,6 @@ static int meta_out_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
struct device *alloc_devs[])
{
struct vivid_dev *dev = vb2_get_drv_priv(vq);
- unsigned int q_num_bufs = vb2_get_num_buffers(vq);
unsigned int size = sizeof(struct vivid_meta_out_buf);
if (!vivid_is_webcam(dev))
@@ -31,9 +30,6 @@ static int meta_out_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
sizes[0] = size;
}
- if (q_num_bufs + *nbuffers < 2)
- *nbuffers = 2 - q_num_bufs;
-
*nplanes = 1;
return 0;
}
diff --git a/drivers/media/test-drivers/vivid/vivid-touch-cap.c b/drivers/media/test-drivers/vivid/vivid-touch-cap.c
index 4b3c6ea0afde..3888c21b4d0c 100644
--- a/drivers/media/test-drivers/vivid/vivid-touch-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-touch-cap.c
@@ -13,7 +13,6 @@ static int touch_cap_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
struct device *alloc_devs[])
{
struct vivid_dev *dev = vb2_get_drv_priv(vq);
- unsigned int q_num_bufs = vb2_get_num_buffers(vq);
struct v4l2_pix_format *f = &dev->tch_format;
unsigned int size = f->sizeimage;
@@ -24,9 +23,6 @@ static int touch_cap_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
sizes[0] = size;
}
- if (q_num_bufs + *nbuffers < 2)
- *nbuffers = 2 - q_num_bufs;
-
*nplanes = 1;
return 0;
}
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
index 2182e5b7b606..30aa4ee958bd 100644
--- a/drivers/media/tuners/xc5000.c
+++ b/drivers/media/tuners/xc5000.c
@@ -58,7 +58,7 @@ struct xc5000_priv {
struct dvb_frontend *fe;
struct delayed_work timer_sleep;
- const struct firmware *firmware;
+ bool inited;
};
/* Misc Defines */
@@ -1110,23 +1110,19 @@ static int xc_load_fw_and_init_tuner(struct dvb_frontend *fe, int force)
if (!force && xc5000_is_firmware_loaded(fe) == 0)
return 0;
- if (!priv->firmware) {
- ret = request_firmware(&fw, desired_fw->name,
- priv->i2c_props.adap->dev.parent);
- if (ret) {
- pr_err("xc5000: Upload failed. rc %d\n", ret);
- return ret;
- }
- dprintk(1, "firmware read %zu bytes.\n", fw->size);
+ ret = request_firmware(&fw, desired_fw->name,
+ priv->i2c_props.adap->dev.parent);
+ if (ret) {
+ pr_err("xc5000: Upload failed. rc %d\n", ret);
+ return ret;
+ }
+ dprintk(1, "firmware read %zu bytes.\n", fw->size);
- if (fw->size != desired_fw->size) {
- pr_err("xc5000: Firmware file with incorrect size\n");
- release_firmware(fw);
- return -EINVAL;
- }
- priv->firmware = fw;
- } else
- fw = priv->firmware;
+ if (fw->size != desired_fw->size) {
+ pr_err("xc5000: Firmware file with incorrect size\n");
+ release_firmware(fw);
+ return -EINVAL;
+ }
/* Try up to 5 times to load firmware */
for (i = 0; i < 5; i++) {
@@ -1204,6 +1200,7 @@ static int xc_load_fw_and_init_tuner(struct dvb_frontend *fe, int force)
}
err:
+ release_firmware(fw);
if (!ret)
printk(KERN_INFO "xc5000: Firmware %s loaded and running.\n",
desired_fw->name);
@@ -1274,7 +1271,7 @@ static int xc5000_resume(struct dvb_frontend *fe)
/* suspended before firmware is loaded.
Avoid firmware load in resume path. */
- if (!priv->firmware)
+ if (!priv->inited)
return 0;
return xc5000_set_params(fe);
@@ -1293,6 +1290,8 @@ static int xc5000_init(struct dvb_frontend *fe)
if (debug)
xc_debug_dump(priv);
+ priv->inited = true;
+
return 0;
}
@@ -1306,10 +1305,6 @@ static void xc5000_release(struct dvb_frontend *fe)
if (priv) {
cancel_delayed_work(&priv->timer_sleep);
- if (priv->firmware) {
- release_firmware(priv->firmware);
- priv->firmware = NULL;
- }
hybrid_tuner_release_state(priv);
}
diff --git a/drivers/media/usb/as102/as102_usb_drv.c b/drivers/media/usb/as102/as102_usb_drv.c
index 6b380144d6c2..e0ef66a522e2 100644
--- a/drivers/media/usb/as102/as102_usb_drv.c
+++ b/drivers/media/usb/as102/as102_usb_drv.c
@@ -259,7 +259,7 @@ static int as102_alloc_usb_stream_buffer(struct as102_dev_t *dev)
for (i = 0; i < MAX_STREAM_URB; i++) {
struct urb *urb;
- urb = usb_alloc_urb(0, GFP_ATOMIC);
+ urb = usb_alloc_urb(0, GFP_KERNEL);
if (urb == NULL) {
as102_free_usb_stream_buffer(dev);
return -ENOMEM;
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index fd9fc43d47e0..2ec49ea479d5 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -602,10 +602,7 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
vbi_field_size = dev->vbi_width * dev->vbi_height * 2;
if (dev->vbi_read < vbi_field_size) {
remain = vbi_field_size - dev->vbi_read;
- if (len < remain)
- lencopy = len;
- else
- lencopy = remain;
+ lencopy = umin(len, remain);
if (vbi_buf != NULL)
au0828_copy_vbi(dev, vbi_dma_q, vbi_buf, p,
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index 790787f0eba8..90f1aea99dac 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -197,10 +197,7 @@ static int flexcop_usb_memory_req(struct flexcop_usb *fc_usb,
return -EINVAL;
}
for (i = 0; i < len;) {
- pagechunk =
- wMax < bytes_left_to_read_on_page(addr, len) ?
- wMax :
- bytes_left_to_read_on_page(addr, len);
+ pagechunk = min(wMax, bytes_left_to_read_on_page(addr, len));
deb_info("%x\n",
(addr & V8_MEMORY_PAGE_MASK) |
(V8_MEMORY_EXTENDED*extended));
@@ -448,7 +445,7 @@ static int flexcop_usb_transfer_init(struct flexcop_usb *fc_usb)
/* creating iso urbs */
for (i = 0; i < B2C2_USB_NUM_ISO_URB; i++) {
fc_usb->iso_urb[i] = usb_alloc_urb(B2C2_USB_FRAMES_PER_ISO,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (fc_usb->iso_urb[i] == NULL) {
ret = -ENOMEM;
goto urb_error;
@@ -481,7 +478,7 @@ static int flexcop_usb_transfer_init(struct flexcop_usb *fc_usb)
frame_offset += frame_size;
}
- if ((ret = usb_submit_urb(fc_usb->iso_urb[i],GFP_ATOMIC))) {
+ if ((ret = usb_submit_urb(fc_usb->iso_urb[i],GFP_KERNEL))) {
err("submitting urb %d failed with %d.", i, ret);
goto urb_error;
}
@@ -515,7 +512,7 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb)
alt = fc_usb->uintf->cur_altsetting;
- if (alt->desc.bNumEndpoints < 1)
+ if (alt->desc.bNumEndpoints < 2)
return -ENODEV;
if (!usb_endpoint_is_isoc_in(&alt->endpoint[0].desc))
return -ENODEV;
@@ -531,6 +528,12 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb)
case USB_SPEED_HIGH:
info("running at HIGH speed.");
break;
+ case USB_SPEED_SUPER:
+ info("running at SUPER speed.");
+ break;
+ case USB_SPEED_SUPER_PLUS:
+ info("running at SUPER+ speed.");
+ break;
case USB_SPEED_UNKNOWN:
default:
err("cannot handle USB speed because it is unknown.");
diff --git a/drivers/media/usb/cx231xx/cx231xx-i2c.c b/drivers/media/usb/cx231xx/cx231xx-i2c.c
index c6659253c6fb..6da8e7943d94 100644
--- a/drivers/media/usb/cx231xx/cx231xx-i2c.c
+++ b/drivers/media/usb/cx231xx/cx231xx-i2c.c
@@ -567,10 +567,7 @@ int cx231xx_i2c_mux_create(struct cx231xx *dev)
int cx231xx_i2c_mux_register(struct cx231xx *dev, int mux_no)
{
- return i2c_mux_add_adapter(dev->muxc,
- 0,
- mux_no /* chan_id */,
- 0 /* class */);
+ return i2c_mux_add_adapter(dev->muxc, 0, mux_no);
}
void cx231xx_i2c_mux_unregister(struct cx231xx *dev)
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 4eb7dd4599b7..0d2c42819d39 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -868,6 +868,9 @@ static int af9035_read_config(struct dvb_usb_device *d)
if ((le16_to_cpu(d->udev->descriptor.idVendor) == USB_VID_AVERMEDIA) &&
(le16_to_cpu(d->udev->descriptor.idProduct) == USB_PID_AVERMEDIA_TD310)) {
state->it930x_addresses = 1;
+ /* TD310 RC works with NEC defaults */
+ state->ir_mode = 0x05;
+ state->ir_type = 0x00;
}
return 0;
}
@@ -2066,6 +2069,11 @@ static const struct dvb_usb_device_properties it930x_props = {
.tuner_attach = it930x_tuner_attach,
.tuner_detach = it930x_tuner_detach,
.init = it930x_init,
+ /*
+ * dvb_usbv2_remote_init() calls rc_config() only for those devices
+ * which have non-empty rc_map, so it's safe to enable it for every IT930x
+ */
+ .get_rc_config = af9035_get_rc_config,
.get_stream_config = af9035_get_stream_config,
.get_adapter_count = af9035_get_adapter_count,
@@ -2157,7 +2165,7 @@ static const struct usb_device_id af9035_id_table[] = {
{ DVB_USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9303,
&it930x_props, "ITE 9303 Generic", NULL) },
{ DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_TD310,
- &it930x_props, "AVerMedia TD310 DVB-T2", NULL) },
+ &it930x_props, "AVerMedia TD310 DVB-T2", RC_MAP_AVERMEDIA_RM_KS) },
{ DVB_USB_DEVICE(USB_VID_DEXATEK, 0x0100,
&it930x_props, "Logilink VG0022A", NULL) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_TC2_STICK,
diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
index a1235d0cce92..8699846eb416 100644
--- a/drivers/media/usb/dvb-usb-v2/anysee.c
+++ b/drivers/media/usb/dvb-usb-v2/anysee.c
@@ -202,14 +202,14 @@ static int anysee_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
while (i < num) {
if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
- if (msg[i].len != 2 || msg[i + 1].len > 60) {
+ if (msg[i].len < 1 || msg[i].len > 2 || msg[i + 1].len > 60) {
ret = -EOPNOTSUPP;
break;
}
buf[0] = CMD_I2C_READ;
buf[1] = (msg[i].addr << 1) | 0x01;
buf[2] = msg[i].buf[0];
- buf[3] = msg[i].buf[1];
+ buf[3] = (msg[i].len < 2) ? 0 : msg[i].buf[1];
buf[4] = msg[i].len-1;
buf[5] = msg[i+1].len;
ret = anysee_ctrl_msg(d, buf, 6, msg[i+1].buf,
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 3af594134a6d..6ddc20513393 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -2412,7 +2412,12 @@ static int stk9090m_frontend_attach(struct dvb_usb_adapter *adap)
adap->fe_adap[0].fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 0x80, &stk9090m_config);
- return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
+ if (!adap->fe_adap[0].fe) {
+ release_firmware(state->frontend_firmware);
+ return -ENODEV;
+ }
+
+ return 0;
}
static int dib9090_tuner_attach(struct dvb_usb_adapter *adap)
@@ -2485,8 +2490,10 @@ static int nim9090md_frontend_attach(struct dvb_usb_adapter *adap)
dib9000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, 0x80);
adap->fe_adap[0].fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 0x80, &nim9090md_config[0]);
- if (adap->fe_adap[0].fe == NULL)
+ if (!adap->fe_adap[0].fe) {
+ release_firmware(state->frontend_firmware);
return -ENODEV;
+ }
i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_3_4, 0);
dib9000_i2c_enumeration(i2c, 1, 0x12, 0x82);
@@ -2494,7 +2501,12 @@ static int nim9090md_frontend_attach(struct dvb_usb_adapter *adap)
fe_slave = dvb_attach(dib9000_attach, i2c, 0x82, &nim9090md_config[1]);
dib9000_set_slave_frontend(adap->fe_adap[0].fe, fe_slave);
- return fe_slave == NULL ? -ENODEV : 0;
+ if (!fe_slave) {
+ release_firmware(state->frontend_firmware);
+ return -ENODEV;
+ }
+
+ return 0;
}
static int nim9090md_tuner_attach(struct dvb_usb_adapter *adap)
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index b3bb1805829a..79e2ccf974c9 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -36,7 +36,6 @@
/* Max transfer size done by I2C transfer functions */
#define MAX_XFER_SIZE 64
-
#define DW210X_READ_MSG 0
#define DW210X_WRITE_MSG 1
@@ -53,10 +52,10 @@
#define DW2102_FIRMWARE "dvb-usb-dw2102.fw"
#define DW2104_FIRMWARE "dvb-usb-dw2104.fw"
#define DW3101_FIRMWARE "dvb-usb-dw3101.fw"
-#define S630_FIRMWARE "dvb-usb-s630.fw"
-#define S660_FIRMWARE "dvb-usb-s660.fw"
-#define P1100_FIRMWARE "dvb-usb-p1100.fw"
-#define P7500_FIRMWARE "dvb-usb-p7500.fw"
+#define S630_FIRMWARE "dvb-usb-s630.fw"
+#define S660_FIRMWARE "dvb-usb-s660.fw"
+#define P1100_FIRMWARE "dvb-usb-p1100.fw"
+#define P7500_FIRMWARE "dvb-usb-p7500.fw"
#define err_str "did not find the firmware file '%s'. You can use <kernel_dir>/scripts/get_dvb_firmware to get the firmware"
@@ -87,7 +86,7 @@ MODULE_PARM_DESC(demod, "demod to probe (1=cx24116 2=stv0903+stv6110 4=stv0903+s
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static int dw210x_op_rw(struct usb_device *dev, u8 request, u16 value,
- u16 index, u8 * data, u16 len, int flags)
+ u16 index, u8 *data, u16 len, int flags)
{
int ret;
u8 *u8buf;
@@ -99,11 +98,10 @@ static int dw210x_op_rw(struct usb_device *dev, u8 request, u16 value,
if (!u8buf)
return -ENOMEM;
-
if (flags == DW210X_WRITE_MSG)
memcpy(u8buf, data, len);
ret = usb_control_msg(dev, pipe, request, request_type | USB_TYPE_VENDOR,
- value, index , u8buf, len, 2000);
+ value, index, u8buf, len, 2000);
if (flags == DW210X_READ_MSG)
memcpy(data, u8buf, len);
@@ -114,7 +112,7 @@ static int dw210x_op_rw(struct usb_device *dev, u8 request, u16 value,
/* I2C */
static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
- int num)
+ int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
int i = 0;
@@ -136,7 +134,7 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
value = msg[0].buf[0];/* register */
for (i = 0; i < msg[1].len; i++) {
dw210x_op_rw(d->udev, 0xb5, value + i, 0,
- buf6, 2, DW210X_READ_MSG);
+ buf6, 2, DW210X_READ_MSG);
msg[1].buf[i] = buf6[0];
}
break;
@@ -152,7 +150,7 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
buf6[1] = msg[0].buf[0];
buf6[2] = msg[0].buf[1];
dw210x_op_rw(d->udev, 0xb2, 0, 0,
- buf6, 3, DW210X_WRITE_MSG);
+ buf6, 3, DW210X_WRITE_MSG);
break;
case 0x60:
if (msg[0].flags == 0) {
@@ -169,7 +167,7 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
buf6[5] = msg[0].buf[2];
buf6[6] = msg[0].buf[3];
dw210x_op_rw(d->udev, 0xb2, 0, 0,
- buf6, 7, DW210X_WRITE_MSG);
+ buf6, 7, DW210X_WRITE_MSG);
} else {
if (msg[0].len < 1) {
num = -EOPNOTSUPP;
@@ -177,7 +175,7 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
}
/* read from tuner */
dw210x_op_rw(d->udev, 0xb5, 0, 0,
- buf6, 1, DW210X_READ_MSG);
+ buf6, 1, DW210X_READ_MSG);
msg[0].buf[0] = buf6[0];
}
break;
@@ -187,7 +185,7 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
break;
}
dw210x_op_rw(d->udev, 0xb8, 0, 0,
- buf6, 2, DW210X_READ_MSG);
+ buf6, 2, DW210X_READ_MSG);
msg[0].buf[0] = buf6[0];
msg[0].buf[1] = buf6[1];
break;
@@ -199,7 +197,7 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
buf6[0] = 0x30;
buf6[1] = msg[0].buf[0];
dw210x_op_rw(d->udev, 0xb2, 0, 0,
- buf6, 2, DW210X_WRITE_MSG);
+ buf6, 2, DW210X_WRITE_MSG);
break;
}
@@ -211,7 +209,7 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
}
static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
- struct i2c_msg msg[], int num)
+ struct i2c_msg msg[], int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
u8 buf6[] = {0, 0, 0, 0, 0, 0, 0};
@@ -242,10 +240,10 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
buf6[1] = msg[0].len;
buf6[2] = msg[0].buf[0];
dw210x_op_rw(d->udev, 0xc2, 0, 0,
- buf6, msg[0].len + 2, DW210X_WRITE_MSG);
+ buf6, msg[0].len + 2, DW210X_WRITE_MSG);
/* read si2109 register */
dw210x_op_rw(d->udev, 0xc3, 0xd0, 0,
- buf6, msg[1].len + 2, DW210X_READ_MSG);
+ buf6, msg[1].len + 2, DW210X_READ_MSG);
memcpy(msg[1].buf, buf6 + 2, msg[1].len);
break;
@@ -264,11 +262,11 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
buf6[1] = msg[0].len;
memcpy(buf6 + 2, msg[0].buf, msg[0].len);
dw210x_op_rw(d->udev, 0xc2, 0, 0, buf6,
- msg[0].len + 2, DW210X_WRITE_MSG);
+ msg[0].len + 2, DW210X_WRITE_MSG);
break;
case(DW2102_RC_QUERY):
dw210x_op_rw(d->udev, 0xb8, 0, 0,
- buf6, 2, DW210X_READ_MSG);
+ buf6, 2, DW210X_READ_MSG);
msg[0].buf[0] = buf6[0];
msg[0].buf[1] = buf6[1];
break;
@@ -276,7 +274,7 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
buf6[0] = 0x30;
buf6[1] = msg[0].buf[0];
dw210x_op_rw(d->udev, 0xb2, 0, 0,
- buf6, 2, DW210X_WRITE_MSG);
+ buf6, 2, DW210X_WRITE_MSG);
break;
}
break;
@@ -320,10 +318,10 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
obuf[1] = msg[0].len;
obuf[2] = msg[0].buf[0];
dw210x_op_rw(d->udev, 0xc2, 0, 0,
- obuf, msg[0].len + 2, DW210X_WRITE_MSG);
+ obuf, msg[0].len + 2, DW210X_WRITE_MSG);
/* second read registers */
- dw210x_op_rw(d->udev, 0xc3, 0xd1 , 0,
- ibuf, msg[1].len + 2, DW210X_READ_MSG);
+ dw210x_op_rw(d->udev, 0xc3, 0xd1, 0,
+ ibuf, msg[1].len + 2, DW210X_READ_MSG);
memcpy(msg[1].buf, ibuf + 2, msg[1].len);
break;
@@ -345,7 +343,7 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
obuf[1] = msg[0].len;
memcpy(obuf + 2, msg[0].buf, msg[0].len);
dw210x_op_rw(d->udev, 0xc2, 0, 0,
- obuf, msg[0].len + 2, DW210X_WRITE_MSG);
+ obuf, msg[0].len + 2, DW210X_WRITE_MSG);
break;
}
case 0x61: {
@@ -363,22 +361,24 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
obuf[1] = msg[0].len;
memcpy(obuf + 2, msg[0].buf, msg[0].len);
dw210x_op_rw(d->udev, 0xc2, 0, 0,
- obuf, msg[0].len + 2, DW210X_WRITE_MSG);
+ obuf, msg[0].len + 2, DW210X_WRITE_MSG);
break;
}
case(DW2102_RC_QUERY): {
u8 ibuf[2];
+
dw210x_op_rw(d->udev, 0xb8, 0, 0,
- ibuf, 2, DW210X_READ_MSG);
- memcpy(msg[0].buf, ibuf , 2);
+ ibuf, 2, DW210X_READ_MSG);
+ memcpy(msg[0].buf, ibuf, 2);
break;
}
case(DW2102_VOLTAGE_CTRL): {
u8 obuf[2];
+
obuf[0] = 0x30;
obuf[1] = msg[0].buf[0];
dw210x_op_rw(d->udev, 0xb2, 0, 0,
- obuf, 2, DW210X_WRITE_MSG);
+ obuf, 2, DW210X_WRITE_MSG);
break;
}
}
@@ -406,23 +406,26 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
switch (msg[j].addr) {
case(DW2102_RC_QUERY): {
u8 ibuf[2];
+
dw210x_op_rw(d->udev, 0xb8, 0, 0,
- ibuf, 2, DW210X_READ_MSG);
- memcpy(msg[j].buf, ibuf , 2);
+ ibuf, 2, DW210X_READ_MSG);
+ memcpy(msg[j].buf, ibuf, 2);
break;
}
case(DW2102_VOLTAGE_CTRL): {
u8 obuf[2];
+
obuf[0] = 0x30;
obuf[1] = msg[j].buf[0];
dw210x_op_rw(d->udev, 0xb2, 0, 0,
- obuf, 2, DW210X_WRITE_MSG);
+ obuf, 2, DW210X_WRITE_MSG);
break;
}
- /*case 0x55: cx24116
- case 0x6a: stv0903
- case 0x68: ds3000, stv0903
- case 0x60: ts2020, stv6110, stb6100 */
+ /* case 0x55: cx24116
+ * case 0x6a: stv0903
+ * case 0x68: ds3000, stv0903
+ * case 0x60: ts2020, stv6110, stb6100
+ */
default: {
if (msg[j].flags == I2C_M_RD) {
/* read registers */
@@ -436,17 +439,16 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
}
dw210x_op_rw(d->udev, 0xc3,
- (msg[j].addr << 1) + 1, 0,
- ibuf, msg[j].len + 2,
- DW210X_READ_MSG);
+ (msg[j].addr << 1) + 1, 0,
+ ibuf, msg[j].len + 2,
+ DW210X_READ_MSG);
memcpy(msg[j].buf, ibuf + 2, msg[j].len);
mdelay(10);
- } else if (((msg[j].buf[0] == 0xb0) &&
- (msg[j].addr == 0x68)) ||
- ((msg[j].buf[0] == 0xf7) &&
- (msg[j].addr == 0x55))) {
+ } else if (((msg[j].buf[0] == 0xb0) && (msg[j].addr == 0x68)) ||
+ ((msg[j].buf[0] == 0xf7) && (msg[j].addr == 0x55))) {
/* write firmware */
u8 obuf[19];
+
obuf[0] = msg[j].addr << 1;
obuf[1] = (msg[j].len > 15 ? 17 : msg[j].len);
obuf[2] = msg[j].buf[0];
@@ -454,10 +456,10 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
i = 1;
do {
memcpy(obuf + 3, msg[j].buf + i,
- (len > 16 ? 16 : len));
+ (len > 16 ? 16 : len));
dw210x_op_rw(d->udev, 0xc2, 0, 0,
- obuf, (len > 16 ? 16 : len) + 3,
- DW210X_WRITE_MSG);
+ obuf, (len > 16 ? 16 : len) + 3,
+ DW210X_WRITE_MSG);
i += 16;
len -= 16;
} while (len > 0);
@@ -476,13 +478,12 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
obuf[1] = msg[j].len;
memcpy(obuf + 2, msg[j].buf, msg[j].len);
dw210x_op_rw(d->udev, 0xc2, 0, 0,
- obuf, msg[j].len + 2,
- DW210X_WRITE_MSG);
+ obuf, msg[j].len + 2,
+ DW210X_WRITE_MSG);
}
break;
}
}
-
}
ret = num;
@@ -492,7 +493,7 @@ unlock:
}
static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
- int num)
+ int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
int ret;
@@ -525,10 +526,10 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
obuf[1] = msg[0].len;
obuf[2] = msg[0].buf[0];
dw210x_op_rw(d->udev, 0xc2, 0, 0,
- obuf, msg[0].len + 2, DW210X_WRITE_MSG);
+ obuf, msg[0].len + 2, DW210X_WRITE_MSG);
/* second read registers */
- dw210x_op_rw(d->udev, 0xc3, 0x19 , 0,
- ibuf, msg[1].len + 2, DW210X_READ_MSG);
+ dw210x_op_rw(d->udev, 0xc3, 0x19, 0,
+ ibuf, msg[1].len + 2, DW210X_READ_MSG);
memcpy(msg[1].buf, ibuf + 2, msg[1].len);
break;
@@ -550,14 +551,15 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
obuf[1] = msg[0].len;
memcpy(obuf + 2, msg[0].buf, msg[0].len);
dw210x_op_rw(d->udev, 0xc2, 0, 0,
- obuf, msg[0].len + 2, DW210X_WRITE_MSG);
+ obuf, msg[0].len + 2, DW210X_WRITE_MSG);
break;
}
case(DW2102_RC_QUERY): {
u8 ibuf[2];
+
dw210x_op_rw(d->udev, 0xb8, 0, 0,
- ibuf, 2, DW210X_READ_MSG);
- memcpy(msg[0].buf, ibuf , 2);
+ ibuf, 2, DW210X_READ_MSG);
+ memcpy(msg[0].buf, ibuf, 2);
break;
}
}
@@ -567,7 +569,7 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
for (i = 0; i < num; i++) {
deb_xfer("%02x:%02x: %s ", i, msg[i].addr,
- msg[i].flags == 0 ? ">>>" : "<<<");
+ msg[i].flags == 0 ? ">>>" : "<<<");
debug_dump(msg[i].buf, msg[i].len, deb_xfer);
}
ret = num;
@@ -578,7 +580,7 @@ unlock:
}
static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
- int num)
+ int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
struct usb_device *udev;
@@ -594,8 +596,9 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
switch (msg[j].addr) {
case (DW2102_RC_QUERY): {
u8 ibuf[5];
+
dw210x_op_rw(d->udev, 0xb8, 0, 0,
- ibuf, 5, DW210X_READ_MSG);
+ ibuf, 5, DW210X_READ_MSG);
memcpy(msg[j].buf, ibuf + 3, 2);
break;
}
@@ -605,11 +608,11 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
obuf[0] = 1;
obuf[1] = msg[j].buf[1];/* off-on */
dw210x_op_rw(d->udev, 0x8a, 0, 0,
- obuf, 2, DW210X_WRITE_MSG);
+ obuf, 2, DW210X_WRITE_MSG);
obuf[0] = 3;
obuf[1] = msg[j].buf[0];/* 13v-18v */
dw210x_op_rw(d->udev, 0x8a, 0, 0,
- obuf, 2, DW210X_WRITE_MSG);
+ obuf, 2, DW210X_WRITE_MSG);
break;
}
case (DW2102_LED_CTRL): {
@@ -618,14 +621,15 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
obuf[0] = 5;
obuf[1] = msg[j].buf[0];
dw210x_op_rw(d->udev, 0x8a, 0, 0,
- obuf, 2, DW210X_WRITE_MSG);
+ obuf, 2, DW210X_WRITE_MSG);
break;
}
- /*case 0x55: cx24116
- case 0x6a: stv0903
- case 0x68: ds3000, stv0903, rs2000
- case 0x60: ts2020, stv6110, stb6100
- case 0xa0: eeprom */
+ /* case 0x55: cx24116
+ * case 0x6a: stv0903
+ * case 0x68: ds3000, stv0903, rs2000
+ * case 0x60: ts2020, stv6110, stb6100
+ * case 0xa0: eeprom
+ */
default: {
if (msg[j].flags == I2C_M_RD) {
/* read registers */
@@ -639,14 +643,14 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
}
dw210x_op_rw(d->udev, 0x91, 0, 0,
- ibuf, msg[j].len,
+ ibuf, msg[j].len,
DW210X_READ_MSG);
memcpy(msg[j].buf, ibuf, msg[j].len);
break;
- } else if ((msg[j].buf[0] == 0xb0) &&
- (msg[j].addr == 0x68)) {
+ } else if ((msg[j].buf[0] == 0xb0) && (msg[j].addr == 0x68)) {
/* write firmware */
u8 obuf[19];
+
obuf[0] = (msg[j].len > 16 ?
18 : msg[j].len + 1);
obuf[1] = msg[j].addr << 1;
@@ -655,10 +659,10 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
i = 1;
do {
memcpy(obuf + 3, msg[j].buf + i,
- (len > 16 ? 16 : len));
+ (len > 16 ? 16 : len));
dw210x_op_rw(d->udev, 0x80, 0, 0,
- obuf, (len > 16 ? 16 : len) + 3,
- DW210X_WRITE_MSG);
+ obuf, (len > 16 ? 16 : len) + 3,
+ DW210X_WRITE_MSG);
i += 16;
len -= 16;
} while (len > 0);
@@ -677,10 +681,9 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
obuf[1] = (msg[j].addr << 1);
memcpy(obuf + 2, msg[j].buf, msg[j].len);
dw210x_op_rw(d->udev,
- le16_to_cpu(udev->descriptor.idProduct) ==
- 0x7500 ? 0x92 : 0x90, 0, 0,
- obuf, msg[j].len + 2,
- DW210X_WRITE_MSG);
+ le16_to_cpu(udev->descriptor.idProduct) == 0x7500 ? 0x92 : 0x90,
+ 0, 0, obuf, msg[j].len + 2,
+ DW210X_WRITE_MSG);
break;
} else {
/* write registers */
@@ -696,8 +699,8 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
obuf[1] = (msg[j].addr << 1);
memcpy(obuf + 2, msg[j].buf, msg[j].len);
dw210x_op_rw(d->udev, 0x80, 0, 0,
- obuf, msg[j].len + 2,
- DW210X_WRITE_MSG);
+ obuf, msg[j].len + 2,
+ DW210X_WRITE_MSG);
break;
}
break;
@@ -712,10 +715,11 @@ unlock:
}
static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
- int num)
+ int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
struct dw2102_state *state;
+ int j;
if (!d)
return -ENODEV;
@@ -729,77 +733,102 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
return -EAGAIN;
}
- switch (num) {
- case 1:
- switch (msg[0].addr) {
+ j = 0;
+ while (j < num) {
+ switch (msg[j].addr) {
case SU3000_STREAM_CTRL:
- state->data[0] = msg[0].buf[0] + 0x36;
+ state->data[0] = msg[j].buf[0] + 0x36;
state->data[1] = 3;
state->data[2] = 0;
if (dvb_usb_generic_rw(d, state->data, 3,
- state->data, 0, 0) < 0)
+ state->data, 0, 0) < 0)
err("i2c transfer failed.");
break;
case DW2102_RC_QUERY:
state->data[0] = 0x10;
if (dvb_usb_generic_rw(d, state->data, 1,
- state->data, 2, 0) < 0)
+ state->data, 2, 0) < 0)
err("i2c transfer failed.");
- msg[0].buf[1] = state->data[0];
- msg[0].buf[0] = state->data[1];
+ msg[j].buf[1] = state->data[0];
+ msg[j].buf[0] = state->data[1];
break;
default:
- if (3 + msg[0].len > sizeof(state->data)) {
- warn("i2c wr: len=%d is too big!\n",
- msg[0].len);
- num = -EOPNOTSUPP;
+ /* if the current write msg is followed by a another
+ * read msg to/from the same address
+ */
+ if ((j + 1 < num) && (msg[j + 1].flags & I2C_M_RD) &&
+ (msg[j].addr == msg[j + 1].addr)) {
+ /* join both i2c msgs to one usb read command */
+ if (4 + msg[j].len > sizeof(state->data)) {
+ warn("i2c combined wr/rd: write len=%d is too big!\n",
+ msg[j].len);
+ num = -EOPNOTSUPP;
+ break;
+ }
+ if (1 + msg[j + 1].len > sizeof(state->data)) {
+ warn("i2c combined wr/rd: read len=%d is too big!\n",
+ msg[j + 1].len);
+ num = -EOPNOTSUPP;
+ break;
+ }
+
+ state->data[0] = 0x09;
+ state->data[1] = msg[j].len;
+ state->data[2] = msg[j + 1].len;
+ state->data[3] = msg[j].addr;
+ memcpy(&state->data[4], msg[j].buf, msg[j].len);
+
+ if (dvb_usb_generic_rw(d, state->data, msg[j].len + 4,
+ state->data, msg[j + 1].len + 1, 0) < 0)
+ err("i2c transfer failed.");
+
+ memcpy(msg[j + 1].buf, &state->data[1], msg[j + 1].len);
+ j++;
break;
}
- /* always i2c write*/
- state->data[0] = 0x08;
- state->data[1] = msg[0].addr;
- state->data[2] = msg[0].len;
+ if (msg[j].flags & I2C_M_RD) {
+ /* single read */
+ if (4 + msg[j].len > sizeof(state->data)) {
+ warn("i2c rd: len=%d is too big!\n", msg[j].len);
+ num = -EOPNOTSUPP;
+ break;
+ }
- memcpy(&state->data[3], msg[0].buf, msg[0].len);
+ state->data[0] = 0x09;
+ state->data[1] = 0;
+ state->data[2] = msg[j].len;
+ state->data[3] = msg[j].addr;
+ memcpy(&state->data[4], msg[j].buf, msg[j].len);
- if (dvb_usb_generic_rw(d, state->data, msg[0].len + 3,
- state->data, 1, 0) < 0)
- err("i2c transfer failed.");
+ if (dvb_usb_generic_rw(d, state->data, 4,
+ state->data, msg[j].len + 1, 0) < 0)
+ err("i2c transfer failed.");
- }
- break;
- case 2:
- /* always i2c read */
- if (4 + msg[0].len > sizeof(state->data)) {
- warn("i2c rd: len=%d is too big!\n",
- msg[0].len);
- num = -EOPNOTSUPP;
- break;
- }
- if (1 + msg[1].len > sizeof(state->data)) {
- warn("i2c rd: len=%d is too big!\n",
- msg[1].len);
- num = -EOPNOTSUPP;
- break;
- }
+ memcpy(msg[j].buf, &state->data[1], msg[j].len);
+ break;
+ }
- state->data[0] = 0x09;
- state->data[1] = msg[0].len;
- state->data[2] = msg[1].len;
- state->data[3] = msg[0].addr;
- memcpy(&state->data[4], msg[0].buf, msg[0].len);
+ /* single write */
+ if (3 + msg[j].len > sizeof(state->data)) {
+ warn("i2c wr: len=%d is too big!\n", msg[j].len);
+ num = -EOPNOTSUPP;
+ break;
+ }
- if (dvb_usb_generic_rw(d, state->data, msg[0].len + 4,
- state->data, msg[1].len + 1, 0) < 0)
- err("i2c transfer failed.");
+ state->data[0] = 0x08;
+ state->data[1] = msg[j].addr;
+ state->data[2] = msg[j].len;
- memcpy(msg[1].buf, &state->data[1], msg[1].len);
- break;
- default:
- warn("more than 2 i2c messages at a time is not handled yet.");
- break;
- }
+ memcpy(&state->data[3], msg[j].buf, msg[j].len);
+
+ if (dvb_usb_generic_rw(d, state->data, msg[j].len + 3,
+ state->data, 1, 0) < 0)
+ err("i2c transfer failed.");
+ } // switch
+ j++;
+
+ } // while
mutex_unlock(&d->data_mutex);
mutex_unlock(&d->i2c_mutex);
return num;
@@ -852,11 +881,11 @@ static int dw210x_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
u8 eeprom[256], eepromline[16];
for (i = 0; i < 256; i++) {
- if (dw210x_op_rw(d->udev, 0xb6, 0xa0 , i, ibuf, 2, DW210X_READ_MSG) < 0) {
+ if (dw210x_op_rw(d->udev, 0xb6, 0xa0, i, ibuf, 2, DW210X_READ_MSG) < 0) {
err("read eeprom failed.");
return -EIO;
} else {
- eepromline[i%16] = ibuf[0];
+ eepromline[i % 16] = ibuf[0];
eeprom[i] = ibuf[0];
}
if ((i % 16) == 15) {
@@ -963,7 +992,6 @@ static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
.flags = I2C_M_RD,
.buf = ibuf,
.len = 1,
-
}
};
@@ -983,8 +1011,6 @@ static int su3000_identify_state(struct usb_device *udev,
const struct dvb_usb_device_description **desc,
int *cold)
{
- info("%s", __func__);
-
*cold = 0;
return 0;
}
@@ -1003,6 +1029,7 @@ static int dw210x_set_voltage(struct dvb_frontend *fe,
};
struct dvb_usb_adapter *udev_adap = fe->dvb->priv;
+
if (voltage == SEC_VOLTAGE_18)
msg.buf = command_18v;
else if (voltage == SEC_VOLTAGE_13)
@@ -1206,11 +1233,11 @@ static int dw2104_frontend_attach(struct dvb_usb_adapter *d)
if (demod_probe & 4) {
d->fe_adap[0].fe = dvb_attach(stv0900_attach, &dw2104a_stv0900_config,
- &d->dev->i2c_adap, 0);
- if (d->fe_adap[0].fe != NULL) {
+ &d->dev->i2c_adap, 0);
+ if (d->fe_adap[0].fe) {
if (dvb_attach(stb6100_attach, d->fe_adap[0].fe,
- &dw2104a_stb6100_config,
- &d->dev->i2c_adap)) {
+ &dw2104a_stb6100_config,
+ &d->dev->i2c_adap)) {
tuner_ops = &d->fe_adap[0].fe->ops.tuner_ops;
tuner_ops->set_frequency = stb6100_set_freq;
tuner_ops->get_frequency = stb6100_get_freq;
@@ -1225,11 +1252,11 @@ static int dw2104_frontend_attach(struct dvb_usb_adapter *d)
if (demod_probe & 2) {
d->fe_adap[0].fe = dvb_attach(stv0900_attach, &dw2104_stv0900_config,
- &d->dev->i2c_adap, 0);
- if (d->fe_adap[0].fe != NULL) {
+ &d->dev->i2c_adap, 0);
+ if (d->fe_adap[0].fe) {
if (dvb_attach(stv6110_attach, d->fe_adap[0].fe,
- &dw2104_stv6110_config,
- &d->dev->i2c_adap)) {
+ &dw2104_stv6110_config,
+ &d->dev->i2c_adap)) {
d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
info("Attached STV0900+STV6110A!");
return 0;
@@ -1239,8 +1266,8 @@ static int dw2104_frontend_attach(struct dvb_usb_adapter *d)
if (demod_probe & 1) {
d->fe_adap[0].fe = dvb_attach(cx24116_attach, &dw2104_config,
- &d->dev->i2c_adap);
- if (d->fe_adap[0].fe != NULL) {
+ &d->dev->i2c_adap);
+ if (d->fe_adap[0].fe) {
d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
info("Attached cx24116!");
return 0;
@@ -1248,10 +1275,10 @@ static int dw2104_frontend_attach(struct dvb_usb_adapter *d)
}
d->fe_adap[0].fe = dvb_attach(ds3000_attach, &dw2104_ds3000_config,
- &d->dev->i2c_adap);
- if (d->fe_adap[0].fe != NULL) {
+ &d->dev->i2c_adap);
+ if (d->fe_adap[0].fe) {
dvb_attach(ts2020_attach, d->fe_adap[0].fe,
- &dw2104_ts2020_config, &d->dev->i2c_adap);
+ &dw2104_ts2020_config, &d->dev->i2c_adap);
d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
info("Attached DS3000!");
return 0;
@@ -1269,8 +1296,8 @@ static int dw2102_frontend_attach(struct dvb_usb_adapter *d)
if (dw2102_properties.i2c_algo == &dw2102_serit_i2c_algo) {
/*dw2102_properties.adapter->tuner_attach = NULL;*/
d->fe_adap[0].fe = dvb_attach(si21xx_attach, &serit_sp1511lhb_config,
- &d->dev->i2c_adap);
- if (d->fe_adap[0].fe != NULL) {
+ &d->dev->i2c_adap);
+ if (d->fe_adap[0].fe) {
d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
info("Attached si21xx!");
return 0;
@@ -1279,10 +1306,10 @@ static int dw2102_frontend_attach(struct dvb_usb_adapter *d)
if (dw2102_properties.i2c_algo == &dw2102_earda_i2c_algo) {
d->fe_adap[0].fe = dvb_attach(stv0288_attach, &earda_config,
- &d->dev->i2c_adap);
- if (d->fe_adap[0].fe != NULL) {
+ &d->dev->i2c_adap);
+ if (d->fe_adap[0].fe) {
if (dvb_attach(stb6000_attach, d->fe_adap[0].fe, 0x61,
- &d->dev->i2c_adap)) {
+ &d->dev->i2c_adap)) {
d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
info("Attached stv0288!");
return 0;
@@ -1293,8 +1320,8 @@ static int dw2102_frontend_attach(struct dvb_usb_adapter *d)
if (dw2102_properties.i2c_algo == &dw2102_i2c_algo) {
/*dw2102_properties.adapter->tuner_attach = dw2102_tuner_attach;*/
d->fe_adap[0].fe = dvb_attach(stv0299_attach, &sharp_z0194a_config,
- &d->dev->i2c_adap);
- if (d->fe_adap[0].fe != NULL) {
+ &d->dev->i2c_adap);
+ if (d->fe_adap[0].fe) {
d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
info("Attached stv0299!");
return 0;
@@ -1306,8 +1333,8 @@ static int dw2102_frontend_attach(struct dvb_usb_adapter *d)
static int dw3101_frontend_attach(struct dvb_usb_adapter *d)
{
d->fe_adap[0].fe = dvb_attach(tda10023_attach, &dw3101_tda10023_config,
- &d->dev->i2c_adap, 0x48);
- if (d->fe_adap[0].fe != NULL) {
+ &d->dev->i2c_adap, 0x48);
+ if (d->fe_adap[0].fe) {
info("Attached tda10023!");
return 0;
}
@@ -1317,10 +1344,10 @@ static int dw3101_frontend_attach(struct dvb_usb_adapter *d)
static int zl100313_frontend_attach(struct dvb_usb_adapter *d)
{
d->fe_adap[0].fe = dvb_attach(mt312_attach, &zl313_config,
- &d->dev->i2c_adap);
- if (d->fe_adap[0].fe != NULL) {
+ &d->dev->i2c_adap);
+ if (d->fe_adap[0].fe) {
if (dvb_attach(zl10039_attach, d->fe_adap[0].fe, 0x60,
- &d->dev->i2c_adap)) {
+ &d->dev->i2c_adap)) {
d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
info("Attached zl100313+zl10039!");
return 0;
@@ -1335,12 +1362,12 @@ static int stv0288_frontend_attach(struct dvb_usb_adapter *d)
u8 obuf[] = {7, 1};
d->fe_adap[0].fe = dvb_attach(stv0288_attach, &earda_config,
- &d->dev->i2c_adap);
+ &d->dev->i2c_adap);
- if (d->fe_adap[0].fe == NULL)
+ if (!d->fe_adap[0].fe)
return -EIO;
- if (NULL == dvb_attach(stb6000_attach, d->fe_adap[0].fe, 0x61, &d->dev->i2c_adap))
+ if (dvb_attach(stb6000_attach, d->fe_adap[0].fe, 0x61, &d->dev->i2c_adap) == NULL)
return -EIO;
d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
@@ -1350,7 +1377,6 @@ static int stv0288_frontend_attach(struct dvb_usb_adapter *d)
info("Attached stv0288+stb6000!");
return 0;
-
}
static int ds3000_frontend_attach(struct dvb_usb_adapter *d)
@@ -1359,13 +1385,13 @@ static int ds3000_frontend_attach(struct dvb_usb_adapter *d)
u8 obuf[] = {7, 1};
d->fe_adap[0].fe = dvb_attach(ds3000_attach, &s660_ds3000_config,
- &d->dev->i2c_adap);
+ &d->dev->i2c_adap);
- if (d->fe_adap[0].fe == NULL)
+ if (!d->fe_adap[0].fe)
return -EIO;
dvb_attach(ts2020_attach, d->fe_adap[0].fe, &s660_ts2020_config,
- &d->dev->i2c_adap);
+ &d->dev->i2c_adap);
st->old_set_voltage = d->fe_adap[0].fe->ops.set_voltage;
d->fe_adap[0].fe->ops.set_voltage = s660_set_voltage;
@@ -1382,8 +1408,8 @@ static int prof_7500_frontend_attach(struct dvb_usb_adapter *d)
u8 obuf[] = {7, 1};
d->fe_adap[0].fe = dvb_attach(stv0900_attach, &prof_7500_stv0900_config,
- &d->dev->i2c_adap, 0);
- if (d->fe_adap[0].fe == NULL)
+ &d->dev->i2c_adap, 0);
+ if (!d->fe_adap[0].fe)
return -EIO;
d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
@@ -1439,12 +1465,12 @@ static int su3000_frontend_attach(struct dvb_usb_adapter *adap)
mutex_unlock(&d->data_mutex);
adap->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config,
- &d->i2c_adap);
- if (adap->fe_adap[0].fe == NULL)
+ &d->i2c_adap);
+ if (!adap->fe_adap[0].fe)
return -EIO;
if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe,
- &dw2104_ts2020_config,
+ &dw2104_ts2020_config,
&d->i2c_adap)) {
info("Attached DS3000/TS2020!");
return 0;
@@ -1499,10 +1525,10 @@ static int t220_frontend_attach(struct dvb_usb_adapter *adap)
mutex_unlock(&d->data_mutex);
adap->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config,
- &d->i2c_adap, NULL);
- if (adap->fe_adap[0].fe != NULL) {
+ &d->i2c_adap, NULL);
+ if (adap->fe_adap[0].fe) {
if (dvb_attach(tda18271_attach, adap->fe_adap[0].fe, 0x60,
- &d->i2c_adap, &tda18271_config)) {
+ &d->i2c_adap, &tda18271_config)) {
info("Attached TDA18271HD/CXD2820R!");
return 0;
}
@@ -1527,14 +1553,14 @@ static int m88rs2000_frontend_attach(struct dvb_usb_adapter *adap)
mutex_unlock(&d->data_mutex);
adap->fe_adap[0].fe = dvb_attach(m88rs2000_attach,
- &s421_m88rs2000_config,
- &d->i2c_adap);
+ &s421_m88rs2000_config,
+ &d->i2c_adap);
- if (adap->fe_adap[0].fe == NULL)
+ if (!adap->fe_adap[0].fe)
return -EIO;
if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe,
- &dw2104_ts2020_config,
+ &dw2104_ts2020_config,
&d->i2c_adap)) {
info("Attached RS2000/TS2020!");
return 0;
@@ -1701,14 +1727,14 @@ static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap)
static int dw2102_tuner_attach(struct dvb_usb_adapter *adap)
{
dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x60,
- &adap->dev->i2c_adap, DVB_PLL_OPERA1);
+ &adap->dev->i2c_adap, DVB_PLL_OPERA1);
return 0;
}
static int dw3101_tuner_attach(struct dvb_usb_adapter *adap)
{
dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x60,
- &adap->dev->i2c_adap, DVB_PLL_TUA6034);
+ &adap->dev->i2c_adap, DVB_PLL_TUA6034);
return 0;
}
@@ -1726,7 +1752,7 @@ static int dw2102_rc_query(struct dvb_usb_device *d)
if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) {
if (msg.buf[0] != 0xff) {
deb_rc("%s: rc code: %x, %x\n",
- __func__, key[0], key[1]);
+ __func__, key[0], key[1]);
rc_keydown(d->rc_dev, RC_PROTO_UNKNOWN, key[0], 0);
}
}
@@ -1747,7 +1773,7 @@ static int prof_rc_query(struct dvb_usb_device *d)
if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) {
if (msg.buf[0] != 0xff) {
deb_rc("%s: rc code: %x, %x\n",
- __func__, key[0], key[1]);
+ __func__, key[0], key[1]);
rc_keydown(d->rc_dev, RC_PROTO_UNKNOWN, key[0] ^ 0xff,
0);
}
@@ -1769,7 +1795,7 @@ static int su3000_rc_query(struct dvb_usb_device *d)
if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) {
if (msg.buf[0] != 0xff) {
deb_rc("%s: rc code: %x, %x\n",
- __func__, key[0], key[1]);
+ __func__, key[0], key[1]);
rc_keydown(d->rc_dev, RC_PROTO_RC5,
RC_SCANCODE_RC5(key[1], key[0]), 0);
}
@@ -1807,7 +1833,6 @@ enum dw2102_table_entry {
TECHNOTREND_CONNECT_S2_4600,
TEVII_S482_1,
TEVII_S482_2,
- TERRATEC_CINERGY_S2_BOX,
TEVII_S662
};
@@ -1840,7 +1865,6 @@ static struct usb_device_id dw2102_table[] = {
DVB_USB_DEV(TECHNOTREND, TECHNOTREND_CONNECT_S2_4600),
DVB_USB_DEV(TEVII, TEVII_S482_1),
DVB_USB_DEV(TEVII, TEVII_S482_2),
- DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_S2_BOX),
DVB_USB_DEV(TEVII, TEVII_S662),
{ }
};
@@ -1848,7 +1872,7 @@ static struct usb_device_id dw2102_table[] = {
MODULE_DEVICE_TABLE(usb, dw2102_table);
static int dw2102_load_firmware(struct usb_device *dev,
- const struct firmware *frmwr)
+ const struct firmware *frmwr)
{
u8 *b, *p;
int ret = 0, i;
@@ -1875,12 +1899,12 @@ static int dw2102_load_firmware(struct usb_device *dev,
dw210x_op_rw(dev, 0xa0, 0x7f92, 0, &reset, 1, DW210X_WRITE_MSG);
dw210x_op_rw(dev, 0xa0, 0xe600, 0, &reset, 1, DW210X_WRITE_MSG);
- if (p != NULL) {
+ if (p) {
memcpy(p, fw->data, fw->size);
for (i = 0; i < fw->size; i += 0x40) {
- b = (u8 *) p + i;
- if (dw210x_op_rw(dev, 0xa0, i, 0, b , 0x40,
- DW210X_WRITE_MSG) != 0x40) {
+ b = (u8 *)p + i;
+ if (dw210x_op_rw(dev, 0xa0, i, 0, b, 0x40,
+ DW210X_WRITE_MSG) != 0x40) {
err("error while transferring firmware");
ret = -EINVAL;
break;
@@ -1906,50 +1930,49 @@ static int dw2102_load_firmware(struct usb_device *dev,
case USB_PID_CYPRESS_DW2104:
reset = 1;
dw210x_op_rw(dev, 0xc4, 0x0000, 0, &reset, 1,
- DW210X_WRITE_MSG);
+ DW210X_WRITE_MSG);
fallthrough;
case USB_PID_CYPRESS_DW3101:
reset = 0;
dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0,
- DW210X_WRITE_MSG);
+ DW210X_WRITE_MSG);
break;
case USB_PID_TERRATEC_CINERGY_S:
case USB_PID_CYPRESS_DW2102:
dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0,
- DW210X_WRITE_MSG);
+ DW210X_WRITE_MSG);
dw210x_op_rw(dev, 0xb9, 0x0000, 0, &reset16[0], 2,
- DW210X_READ_MSG);
+ DW210X_READ_MSG);
/* check STV0299 frontend */
dw210x_op_rw(dev, 0xb5, 0, 0, &reset16[0], 2,
- DW210X_READ_MSG);
+ DW210X_READ_MSG);
if ((reset16[0] == 0xa1) || (reset16[0] == 0x80)) {
dw2102_properties.i2c_algo = &dw2102_i2c_algo;
dw2102_properties.adapter->fe[0].tuner_attach = &dw2102_tuner_attach;
break;
- } else {
- /* check STV0288 frontend */
- reset16[0] = 0xd0;
- reset16[1] = 1;
- reset16[2] = 0;
- dw210x_op_rw(dev, 0xc2, 0, 0, &reset16[0], 3,
- DW210X_WRITE_MSG);
- dw210x_op_rw(dev, 0xc3, 0xd1, 0, &reset16[0], 3,
- DW210X_READ_MSG);
- if (reset16[2] == 0x11) {
- dw2102_properties.i2c_algo = &dw2102_earda_i2c_algo;
- break;
- }
+ }
+ /* check STV0288 frontend */
+ reset16[0] = 0xd0;
+ reset16[1] = 1;
+ reset16[2] = 0;
+ dw210x_op_rw(dev, 0xc2, 0, 0, &reset16[0], 3,
+ DW210X_WRITE_MSG);
+ dw210x_op_rw(dev, 0xc3, 0xd1, 0, &reset16[0], 3,
+ DW210X_READ_MSG);
+ if (reset16[2] == 0x11) {
+ dw2102_properties.i2c_algo = &dw2102_earda_i2c_algo;
+ break;
}
fallthrough;
case 0x2101:
dw210x_op_rw(dev, 0xbc, 0x0030, 0, &reset16[0], 2,
- DW210X_READ_MSG);
+ DW210X_READ_MSG);
dw210x_op_rw(dev, 0xba, 0x0000, 0, &reset16[0], 7,
- DW210X_READ_MSG);
+ DW210X_READ_MSG);
dw210x_op_rw(dev, 0xba, 0x0000, 0, &reset16[0], 7,
- DW210X_READ_MSG);
+ DW210X_READ_MSG);
dw210x_op_rw(dev, 0xb9, 0x0000, 0, &reset16[0], 2,
- DW210X_READ_MSG);
+ DW210X_READ_MSG);
break;
}
@@ -2551,7 +2574,7 @@ static struct dvb_usb_device_properties tt_s2_4600_properties = {
{ NULL },
},
{ "Terratec Cinergy S2 USB BOX",
- { &dw2102_table[TERRATEC_CINERGY_S2_BOX], NULL },
+ { &dw2102_table[TERRATEC_CINERGY_S2_R4], NULL },
{ NULL },
},
{ "TeVii S662",
@@ -2562,18 +2585,18 @@ static struct dvb_usb_device_properties tt_s2_4600_properties = {
};
static int dw2102_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+ const struct usb_device_id *id)
{
if (!(dvb_usb_device_init(intf, &dw2102_properties,
- THIS_MODULE, NULL, adapter_nr) &&
+ THIS_MODULE, NULL, adapter_nr) &&
dvb_usb_device_init(intf, &dw2104_properties,
THIS_MODULE, NULL, adapter_nr) &&
dvb_usb_device_init(intf, &dw3101_properties,
- THIS_MODULE, NULL, adapter_nr) &&
+ THIS_MODULE, NULL, adapter_nr) &&
dvb_usb_device_init(intf, &s6x0_properties,
- THIS_MODULE, NULL, adapter_nr) &&
+ THIS_MODULE, NULL, adapter_nr) &&
dvb_usb_device_init(intf, &p1100_properties,
- THIS_MODULE, NULL, adapter_nr) &&
+ THIS_MODULE, NULL, adapter_nr) &&
dvb_usb_device_init(intf, &s660_properties,
THIS_MODULE, NULL, adapter_nr) &&
dvb_usb_device_init(intf, &p7500_properties,
@@ -2586,7 +2609,6 @@ static int dw2102_probe(struct usb_interface *intf,
THIS_MODULE, NULL, adapter_nr) &&
dvb_usb_device_init(intf, &tt_s2_4600_properties,
THIS_MODULE, NULL, adapter_nr))) {
-
return 0;
}
diff --git a/drivers/media/usb/go7007/go7007-fw.c b/drivers/media/usb/go7007/go7007-fw.c
index 018019ba47d4..86ce593e0c54 100644
--- a/drivers/media/usb/go7007/go7007-fw.c
+++ b/drivers/media/usb/go7007/go7007-fw.c
@@ -1289,8 +1289,8 @@ static int avsync_to_package(struct go7007 *go, __le16 *code, int space)
0xbf99, (u16)((-adjratio) >> 16),
0xbf92, 0,
0xbf93, 0,
- 0xbff4, f1 > f2 ? f1 : f2,
- 0xbff5, f1 < f2 ? f1 : f2,
+ 0xbff4, max(f1, f2),
+ 0xbff5, min(f1, f2),
0xbff6, f1 < f2 ? ratio : ratio + 1,
0xbff7, f1 > f2 ? ratio : ratio + 1,
0xbff8, 0,
diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c
index 5f5fa851ca64..14aaf36cde6e 100644
--- a/drivers/media/usb/gspca/cpia1.c
+++ b/drivers/media/usb/gspca/cpia1.c
@@ -604,10 +604,8 @@ static int find_over_exposure(int brightness)
MaxAllowableOverExposure = FLICKER_MAX_EXPOSURE - brightness -
FLICKER_BRIGHTNESS_CONSTANT;
- if (MaxAllowableOverExposure < FLICKER_ALLOWABLE_OVER_EXPOSURE)
- OverExposure = MaxAllowableOverExposure;
- else
- OverExposure = FLICKER_ALLOWABLE_OVER_EXPOSURE;
+ OverExposure = min(MaxAllowableOverExposure,
+ FLICKER_ALLOWABLE_OVER_EXPOSURE);
return OverExposure;
}
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index 8e1de1e8bd12..a6e450181fd0 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -247,7 +247,7 @@ struct s2255_vc {
struct s2255_dev {
struct s2255_vc vc[MAX_CHANNELS];
struct v4l2_device v4l2_dev;
- atomic_t num_channels;
+ refcount_t num_channels;
int frames;
struct mutex lock; /* channels[].vdev.lock */
struct mutex cmdlock; /* protects cmdbuf */
@@ -1550,11 +1550,11 @@ static void s2255_video_device_release(struct video_device *vdev)
container_of(vdev, struct s2255_vc, vdev);
dprintk(dev, 4, "%s, chnls: %d\n", __func__,
- atomic_read(&dev->num_channels));
+ refcount_read(&dev->num_channels));
v4l2_ctrl_handler_free(&vc->hdl);
- if (atomic_dec_and_test(&dev->num_channels))
+ if (refcount_dec_and_test(&dev->num_channels))
s2255_destroy(dev);
return;
}
@@ -1659,7 +1659,7 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
"failed to register video device!\n");
break;
}
- atomic_inc(&dev->num_channels);
+ refcount_inc(&dev->num_channels);
v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n",
video_device_node_name(&vc->vdev));
@@ -1667,11 +1667,11 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
pr_info("Sensoray 2255 V4L driver Revision: %s\n",
S2255_VERSION);
/* if no channels registered, return error and probe will fail*/
- if (atomic_read(&dev->num_channels) == 0) {
+ if (refcount_read(&dev->num_channels) == 0) {
v4l2_device_unregister(&dev->v4l2_dev);
return ret;
}
- if (atomic_read(&dev->num_channels) != MAX_CHANNELS)
+ if (refcount_read(&dev->num_channels) != MAX_CHANNELS)
pr_warn("s2255: Not all channels available.\n");
return 0;
}
@@ -2221,7 +2221,7 @@ static int s2255_probe(struct usb_interface *interface,
goto errorFWDATA1;
}
- atomic_set(&dev->num_channels, 0);
+ refcount_set(&dev->num_channels, 0);
dev->pid = id->idProduct;
dev->fw_data = kzalloc(sizeof(struct s2255_fw), GFP_KERNEL);
if (!dev->fw_data)
@@ -2341,12 +2341,12 @@ static void s2255_disconnect(struct usb_interface *interface)
{
struct s2255_dev *dev = to_s2255_dev(usb_get_intfdata(interface));
int i;
- int channels = atomic_read(&dev->num_channels);
+ int channels = refcount_read(&dev->num_channels);
mutex_lock(&dev->lock);
v4l2_device_disconnect(&dev->v4l2_dev);
mutex_unlock(&dev->lock);
/*see comments in the uvc_driver.c usb disconnect function */
- atomic_inc(&dev->num_channels);
+ refcount_inc(&dev->num_channels);
/* unregister each video device. */
for (i = 0; i < channels; i++)
video_unregister_device(&dev->vc[i].vdev);
@@ -2359,7 +2359,7 @@ static void s2255_disconnect(struct usb_interface *interface)
dev->vc[i].vidstatus_ready = 1;
wake_up(&dev->vc[i].wait_vidstatus);
}
- if (atomic_dec_and_test(&dev->num_channels))
+ if (refcount_dec_and_test(&dev->num_channels))
s2255_destroy(dev);
dev_info(&interface->dev, "%s\n", __func__);
}
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index 723510520d09..2c8179a84991 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -40,7 +40,7 @@ struct smsusb_urb_t {
struct smscore_buffer_t *cb;
struct smsusb_device_t *dev;
- struct urb urb;
+ struct urb *urb;
/* For the bottom half */
struct work_struct wq;
@@ -160,7 +160,7 @@ static int smsusb_submit_urb(struct smsusb_device_t *dev,
}
usb_fill_bulk_urb(
- &surb->urb,
+ surb->urb,
dev->udev,
usb_rcvbulkpipe(dev->udev, dev->in_ep),
surb->cb->p,
@@ -168,9 +168,9 @@ static int smsusb_submit_urb(struct smsusb_device_t *dev,
smsusb_onresponse,
surb
);
- surb->urb.transfer_flags |= URB_FREE_BUFFER;
+ surb->urb->transfer_flags |= URB_FREE_BUFFER;
- return usb_submit_urb(&surb->urb, GFP_ATOMIC);
+ return usb_submit_urb(surb->urb, GFP_ATOMIC);
}
static void smsusb_stop_streaming(struct smsusb_device_t *dev)
@@ -178,7 +178,7 @@ static void smsusb_stop_streaming(struct smsusb_device_t *dev)
int i;
for (i = 0; i < MAX_URBS; i++) {
- usb_kill_urb(&dev->surbs[i].urb);
+ usb_kill_urb(dev->surbs[i].urb);
if (dev->surbs[i].wq.func)
cancel_work_sync(&dev->surbs[i].wq);
@@ -338,6 +338,8 @@ static void smsusb_term_device(struct usb_interface *intf)
struct smsusb_device_t *dev = usb_get_intfdata(intf);
if (dev) {
+ int i;
+
dev->state = SMSUSB_DISCONNECTED;
smsusb_stop_streaming(dev);
@@ -346,6 +348,9 @@ static void smsusb_term_device(struct usb_interface *intf)
if (dev->coredev)
smscore_unregister_device(dev->coredev);
+ for (i = 0; i < MAX_URBS; i++)
+ usb_free_urb(dev->surbs[i].urb);
+
pr_debug("device 0x%p destroyed\n", dev);
kfree(dev);
}
@@ -463,7 +468,9 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
/* initialize urbs */
for (i = 0; i < MAX_URBS; i++) {
dev->surbs[i].dev = dev;
- usb_init_urb(&dev->surbs[i].urb);
+ dev->surbs[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!dev->surbs[i].urb)
+ goto err_unregister_device;
}
pr_debug("smsusb_start_streaming(...).\n");
@@ -486,6 +493,7 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
return rc;
err_unregister_device:
+ /* smsusb_term_device() frees any allocated urb. */
smsusb_term_device(intf);
#ifdef CONFIG_MEDIA_CONTROLLER_DVB
media_device_unregister(mdev);
diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c
index 366f0e4a5dc0..9cbd957ecc90 100644
--- a/drivers/media/usb/stk1160/stk1160-video.c
+++ b/drivers/media/usb/stk1160/stk1160-video.c
@@ -99,7 +99,7 @@ void stk1160_buffer_done(struct stk1160 *dev)
static inline
void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len)
{
- int linesdone, lineoff, lencopy;
+ int linesdone, lineoff, lencopy, offset;
int bytesperline = dev->width * 2;
struct stk1160_buffer *buf = dev->isoc_ctl.buf;
u8 *dst = buf->mem;
@@ -130,17 +130,19 @@ void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len)
dst += linesdone * bytesperline * 2 + lineoff;
/* Copy the remaining of current line */
- if (remain < (bytesperline - lineoff))
- lencopy = remain;
- else
- lencopy = bytesperline - lineoff;
+ lencopy = min(remain, bytesperline - lineoff);
/*
* Check if we have enough space left in the buffer.
* In that case, we force loop exit after copy.
*/
- if (lencopy > buf->bytesused - buf->length) {
- lencopy = buf->bytesused - buf->length;
+ offset = dst - (u8 *)buf->mem;
+ if (offset > buf->length) {
+ dev_warn_ratelimited(dev->dev, "out of bounds offset\n");
+ return;
+ }
+ if (lencopy > buf->length - offset) {
+ lencopy = buf->length - offset;
remain = lencopy;
}
@@ -173,17 +175,19 @@ void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len)
src += lencopy;
/* Copy one line at a time */
- if (remain < bytesperline)
- lencopy = remain;
- else
- lencopy = bytesperline;
+ lencopy = min(remain, bytesperline);
/*
* Check if we have enough space left in the buffer.
* In that case, we force loop exit after copy.
*/
- if (lencopy > buf->bytesused - buf->length) {
- lencopy = buf->bytesused - buf->length;
+ offset = dst - (u8 *)buf->mem;
+ if (offset > buf->length) {
+ dev_warn_ratelimited(dev->dev, "offset out of bounds\n");
+ return;
+ }
+ if (lencopy > buf->length - offset) {
+ lencopy = buf->length - offset;
remain = lencopy;
}
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index e59a463c2761..4b685f883e4d 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -836,7 +836,7 @@ static s32 uvc_get_le_value(struct uvc_control_mapping *mapping,
while (1) {
u8 byte = *data & mask;
value |= offset > 0 ? (byte >> offset) : (byte << (-offset));
- bits -= 8 - (offset > 0 ? offset : 0);
+ bits -= 8 - max(offset, 0);
if (bits <= 0)
break;
@@ -1850,16 +1850,18 @@ int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
list_for_each_entry(entity, &chain->entities, chain) {
ret = uvc_ctrl_commit_entity(chain->dev, entity, rollback,
&err_ctrl);
- if (ret < 0)
+ if (ret < 0) {
+ if (ctrls)
+ ctrls->error_idx =
+ uvc_ctrl_find_ctrl_idx(entity, ctrls,
+ err_ctrl);
goto done;
+ }
}
if (!rollback)
uvc_ctrl_send_events(handle, ctrls->controls, ctrls->count);
done:
- if (ret < 0 && ctrls)
- ctrls->error_idx = uvc_ctrl_find_ctrl_idx(entity, ctrls,
- err_ctrl);
mutex_unlock(&chain->ctrl_mutex);
return ret;
}
@@ -2165,7 +2167,7 @@ static int uvc_ctrl_init_xu_ctrl(struct uvc_device *dev,
int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
struct uvc_xu_control_query *xqry)
{
- struct uvc_entity *entity;
+ struct uvc_entity *entity, *iter;
struct uvc_control *ctrl;
unsigned int i;
bool found;
@@ -2175,16 +2177,16 @@ int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
int ret;
/* Find the extension unit. */
- found = false;
- list_for_each_entry(entity, &chain->entities, chain) {
- if (UVC_ENTITY_TYPE(entity) == UVC_VC_EXTENSION_UNIT &&
- entity->id == xqry->unit) {
- found = true;
+ entity = NULL;
+ list_for_each_entry(iter, &chain->entities, chain) {
+ if (UVC_ENTITY_TYPE(iter) == UVC_VC_EXTENSION_UNIT &&
+ iter->id == xqry->unit) {
+ entity = iter;
break;
}
}
- if (!found) {
+ if (!entity) {
uvc_dbg(chain->dev, CONTROL, "Extension unit %u not found\n",
xqry->unit);
return -ENOENT;
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index bbd90123a4e7..8fe24c98087e 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
+#include <linux/usb/quirks.h>
#include <linux/usb/uvc.h>
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
@@ -2232,8 +2233,14 @@ static int uvc_probe(struct usb_interface *intf,
goto error;
}
+ if (dev->quirks & UVC_QUIRK_NO_RESET_RESUME)
+ udev->quirks &= ~USB_QUIRK_RESET_RESUME;
+
+ if (!(dev->quirks & UVC_QUIRK_DISABLE_AUTOSUSPEND))
+ usb_enable_autosuspend(udev);
+
uvc_dbg(dev, PROBE, "UVC device initialized\n");
- usb_enable_autosuspend(udev);
+
return 0;
error:
@@ -2574,6 +2581,33 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_INFO_QUIRK(UVC_QUIRK_RESTORE_CTRLS_ON_INIT) },
+ /* Logitech Rally Bar Huddle */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x046d,
+ .idProduct = 0x087c,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_NO_RESET_RESUME) },
+ /* Logitech Rally Bar */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x046d,
+ .idProduct = 0x089b,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_NO_RESET_RESUME) },
+ /* Logitech Rally Bar Mini */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x046d,
+ .idProduct = 0x08d3,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_NO_RESET_RESUME) },
/* Chicony CNF7129 (Asus EEE 100HE) */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
@@ -3012,6 +3046,15 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = UVC_PC_PROTOCOL_15,
.driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_uvc11 },
+ /* Insta360 Link */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x2e1a,
+ .idProduct = 0x4c01,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_DISABLE_AUTOSUSPEND) },
/* Lenovo Integrated Camera */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
@@ -3030,6 +3073,15 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_limited },
+ /* Shine-Optics Integrated Camera */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x3277,
+ .idProduct = 0x009e,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = UVC_PC_PROTOCOL_15,
+ .driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_uvc11 },
/* Acer EasyCamera */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 6fb0a78b1b00..3653b2c8a86c 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -73,6 +73,8 @@
#define UVC_QUIRK_FORCE_Y8 0x00000800
#define UVC_QUIRK_FORCE_BPP 0x00001000
#define UVC_QUIRK_WAKE_AUTOSUSPEND 0x00002000
+#define UVC_QUIRK_NO_RESET_RESUME 0x00004000
+#define UVC_QUIRK_DISABLE_AUTOSUSPEND 0x00008000
/* Format flags */
#define UVC_FMT_FLAG_COMPRESSED 0x00000001
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index 3ec323bd528b..222f01665f7c 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -316,9 +316,8 @@ v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier);
static int v4l2_async_create_ancillary_links(struct v4l2_async_notifier *n,
struct v4l2_subdev *sd)
{
- struct media_link *link = NULL;
-
#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
+ struct media_link *link;
if (sd->entity.function != MEDIA_ENT_F_LENS &&
sd->entity.function != MEDIA_ENT_F_FLASH)
@@ -326,9 +325,10 @@ static int v4l2_async_create_ancillary_links(struct v4l2_async_notifier *n,
link = media_create_ancillary_link(&n->sd->entity, &sd->entity);
-#endif
-
return IS_ERR(link) ? PTR_ERR(link) : 0;
+#else
+ return 0;
+#endif
}
static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
@@ -341,7 +341,7 @@ static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
int ret;
if (list_empty(&sd->asc_list)) {
- ret = v4l2_device_register_subdev(v4l2_dev, sd);
+ ret = __v4l2_device_register_subdev(v4l2_dev, sd, sd->owner);
if (ret < 0)
return ret;
registered = true;
@@ -563,6 +563,7 @@ void v4l2_async_nf_init(struct v4l2_async_notifier *notifier,
{
INIT_LIST_HEAD(&notifier->waiting_list);
INIT_LIST_HEAD(&notifier->done_list);
+ INIT_LIST_HEAD(&notifier->notifier_entry);
notifier->v4l2_dev = v4l2_dev;
}
EXPORT_SYMBOL(v4l2_async_nf_init);
@@ -572,6 +573,7 @@ void v4l2_async_subdev_nf_init(struct v4l2_async_notifier *notifier,
{
INIT_LIST_HEAD(&notifier->waiting_list);
INIT_LIST_HEAD(&notifier->done_list);
+ INIT_LIST_HEAD(&notifier->notifier_entry);
notifier->sd = sd;
}
EXPORT_SYMBOL_GPL(v4l2_async_subdev_nf_init);
@@ -618,16 +620,10 @@ err_unlock:
int v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
{
- int ret;
-
if (WARN_ON(!notifier->v4l2_dev == !notifier->sd))
return -EINVAL;
- ret = __v4l2_async_nf_register(notifier);
- if (ret)
- notifier->v4l2_dev = NULL;
-
- return ret;
+ return __v4l2_async_nf_register(notifier);
}
EXPORT_SYMBOL(v4l2_async_nf_register);
@@ -639,7 +635,7 @@ __v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
v4l2_async_nf_unbind_all_subdevs(notifier);
- list_del(&notifier->notifier_entry);
+ list_del_init(&notifier->notifier_entry);
}
void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
@@ -787,7 +783,7 @@ v4l2_async_connection_unique(struct v4l2_subdev *sd)
}
EXPORT_SYMBOL_GPL(v4l2_async_connection_unique);
-int v4l2_async_register_subdev(struct v4l2_subdev *sd)
+int __v4l2_async_register_subdev(struct v4l2_subdev *sd, struct module *module)
{
struct v4l2_async_notifier *subdev_notifier;
struct v4l2_async_notifier *notifier;
@@ -811,6 +807,8 @@ int v4l2_async_register_subdev(struct v4l2_subdev *sd)
return -EINVAL;
}
+ sd->owner = module;
+
mutex_lock(&list_lock);
list_for_each_entry(notifier, &notifier_list, notifier_entry) {
@@ -853,9 +851,11 @@ err_unbind:
mutex_unlock(&list_lock);
+ sd->owner = NULL;
+
return ret;
}
-EXPORT_SYMBOL(v4l2_async_register_subdev);
+EXPORT_SYMBOL(__v4l2_async_register_subdev);
void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
{
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index d34d210908d9..4165c815faef 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -263,7 +263,9 @@ const struct v4l2_format_info *v4l2_format_info(u32 format)
{ .format = V4L2_PIX_FMT_YVYU, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_UYVY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_VYUY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_Y210, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_Y212, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_Y216, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_YUV48_12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_MT2110T, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2,
.block_w = { 16, 8, 0, 0 }, .block_h = { 32, 16, 0, 0 }},
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-api.c b/drivers/media/v4l2-core/v4l2-ctrls-api.c
index d9a422017bd9..e5a364efd5e6 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-api.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-api.c
@@ -1052,35 +1052,40 @@ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctr
if (id >= node2id(hdl->ctrl_refs.prev)) {
ref = NULL; /* Yes, so there is no next control */
} else if (ref) {
+ struct v4l2_ctrl_ref *pos = ref;
+
/*
* We found a control with the given ID, so just get
* the next valid one in the list.
*/
- list_for_each_entry_continue(ref, &hdl->ctrl_refs, node) {
- is_compound = ref->ctrl->is_array ||
- ref->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES;
- if (id < ref->ctrl->id &&
- (is_compound & mask) == match)
+ ref = NULL;
+ list_for_each_entry_continue(pos, &hdl->ctrl_refs, node) {
+ is_compound = pos->ctrl->is_array ||
+ pos->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES;
+ if (id < pos->ctrl->id &&
+ (is_compound & mask) == match) {
+ ref = pos;
break;
+ }
}
- if (&ref->node == &hdl->ctrl_refs)
- ref = NULL;
} else {
+ struct v4l2_ctrl_ref *pos;
+
/*
* No control with the given ID exists, so start
* searching for the next largest ID. We know there
* is one, otherwise the first 'if' above would have
* been true.
*/
- list_for_each_entry(ref, &hdl->ctrl_refs, node) {
- is_compound = ref->ctrl->is_array ||
- ref->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES;
- if (id < ref->ctrl->id &&
- (is_compound & mask) == match)
+ list_for_each_entry(pos, &hdl->ctrl_refs, node) {
+ is_compound = pos->ctrl->is_array ||
+ pos->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES;
+ if (id < pos->ctrl->id &&
+ (is_compound & mask) == match) {
+ ref = pos;
break;
+ }
}
- if (&ref->node == &hdl->ctrl_refs)
- ref = NULL;
}
}
mutex_unlock(hdl->lock);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c
index c4d995f32191..eeab6a5eb7ba 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-core.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c
@@ -295,6 +295,9 @@ void v4l2_ctrl_type_op_log(const struct v4l2_ctrl *ctrl)
case V4L2_CTRL_TYPE_U32:
pr_cont("%u", (unsigned)*ptr.p_u32);
break;
+ case V4L2_CTRL_TYPE_AREA:
+ pr_cont("%ux%u", ptr.p_area->width, ptr.p_area->height);
+ break;
case V4L2_CTRL_TYPE_H264_SPS:
pr_cont("H264_SPS");
break;
@@ -2504,8 +2507,7 @@ int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
EXPORT_SYMBOL(v4l2_ctrl_handler_setup);
/* Log the control name and value */
-static void log_ctrl(const struct v4l2_ctrl_handler *hdl,
- struct v4l2_ctrl *ctrl,
+static void log_ctrl(const struct v4l2_ctrl *ctrl,
const char *prefix, const char *colon)
{
if (ctrl->flags & (V4L2_CTRL_FLAG_DISABLED | V4L2_CTRL_FLAG_WRITE_ONLY))
@@ -2515,11 +2517,7 @@ static void log_ctrl(const struct v4l2_ctrl_handler *hdl,
pr_info("%s%s%s: ", prefix, colon, ctrl->name);
- if (ctrl->handler != hdl)
- v4l2_ctrl_lock(ctrl);
ctrl->type_ops->log(ctrl);
- if (ctrl->handler != hdl)
- v4l2_ctrl_unlock(ctrl);
if (ctrl->flags & (V4L2_CTRL_FLAG_INACTIVE |
V4L2_CTRL_FLAG_GRABBED |
@@ -2538,7 +2536,7 @@ static void log_ctrl(const struct v4l2_ctrl_handler *hdl,
void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl,
const char *prefix)
{
- struct v4l2_ctrl_ref *ref;
+ struct v4l2_ctrl *ctrl;
const char *colon = "";
int len;
@@ -2550,12 +2548,9 @@ void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl,
if (len && prefix[len - 1] != ' ')
colon = ": ";
mutex_lock(hdl->lock);
- list_for_each_entry(ref, &hdl->ctrl_refs, node) {
- if (ref->from_other_dev ||
- (ref->ctrl->flags & V4L2_CTRL_FLAG_DISABLED))
- continue;
- log_ctrl(hdl, ref->ctrl, prefix, colon);
- }
+ list_for_each_entry(ctrl, &hdl->ctrls, node)
+ if (!(ctrl->flags & V4L2_CTRL_FLAG_DISABLED))
+ log_ctrl(ctrl, prefix, colon);
mutex_unlock(hdl->lock);
}
EXPORT_SYMBOL(v4l2_ctrl_handler_log_status);
@@ -2564,6 +2559,9 @@ int v4l2_ctrl_new_fwnode_properties(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ctrl_ops,
const struct v4l2_fwnode_device_properties *p)
{
+ if (hdl->error)
+ return hdl->error;
+
if (p->orientation != V4L2_FWNODE_PROPERTY_UNSET) {
u32 orientation_ctrl;
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index d13954bd31fd..be2ba7ca5de2 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -722,6 +722,9 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_PREPARE_BUF, vidioc_prepare_buf);
SET_VALID_IOCTL(ops, VIDIOC_STREAMON, vidioc_streamon);
SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff);
+ /* VIDIOC_CREATE_BUFS support is mandatory to enable VIDIOC_REMOVE_BUFS */
+ if (ops->vidioc_create_bufs)
+ SET_VALID_IOCTL(ops, VIDIOC_REMOVE_BUFS, vidioc_remove_bufs);
}
if (is_vid || is_vbi || is_meta) {
@@ -1036,8 +1039,10 @@ int __video_register_device(struct video_device *vdev,
vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor);
vdev->dev.parent = vdev->dev_parent;
dev_set_name(&vdev->dev, "%s%d", name_base, vdev->num);
+ mutex_lock(&videodev_lock);
ret = device_register(&vdev->dev);
if (ret < 0) {
+ mutex_unlock(&videodev_lock);
pr_err("%s: device_register failed\n", __func__);
goto cleanup;
}
@@ -1057,6 +1062,7 @@ int __video_register_device(struct video_device *vdev,
/* Part 6: Activate this minor. The char device can now be used. */
set_bit(V4L2_FL_REGISTERED, &vdev->flags);
+ mutex_unlock(&videodev_lock);
return 0;
diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
index d2e58ae91f9b..5e537454f5cd 100644
--- a/drivers/media/v4l2-core/v4l2-device.c
+++ b/drivers/media/v4l2-core/v4l2-device.c
@@ -108,8 +108,8 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev)
}
EXPORT_SYMBOL_GPL(v4l2_device_unregister);
-int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
- struct v4l2_subdev *sd)
+int __v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
+ struct v4l2_subdev *sd, struct module *module)
{
int err;
@@ -125,9 +125,9 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
* try_module_get() such sub-device owners.
*/
sd->owner_v4l2_dev = v4l2_dev->dev && v4l2_dev->dev->driver &&
- sd->owner == v4l2_dev->dev->driver->owner;
+ module == v4l2_dev->dev->driver->owner;
- if (!sd->owner_v4l2_dev && !try_module_get(sd->owner))
+ if (!sd->owner_v4l2_dev && !try_module_get(module))
return -ENODEV;
sd->v4l2_dev = v4l2_dev;
@@ -152,6 +152,8 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
goto error_unregister;
}
+ sd->owner = module;
+
spin_lock(&v4l2_dev->lock);
list_add_tail(&sd->list, &v4l2_dev->subdevs);
spin_unlock(&v4l2_dev->lock);
@@ -168,7 +170,7 @@ error_module:
sd->v4l2_dev = NULL;
return err;
}
-EXPORT_SYMBOL_GPL(v4l2_device_register_subdev);
+EXPORT_SYMBOL_GPL(__v4l2_device_register_subdev);
static void v4l2_subdev_release(struct v4l2_subdev *sd)
{
diff --git a/drivers/media/v4l2-core/v4l2-i2c.c b/drivers/media/v4l2-core/v4l2-i2c.c
index b4acca75644b..586c46544255 100644
--- a/drivers/media/v4l2-core/v4l2-i2c.c
+++ b/drivers/media/v4l2-core/v4l2-i2c.c
@@ -100,7 +100,7 @@ struct v4l2_subdev
* Register with the v4l2_device which increases the module's
* use count as well.
*/
- if (v4l2_device_register_subdev(v4l2_dev, sd))
+ if (__v4l2_device_register_subdev(v4l2_dev, sd, sd->owner))
sd = NULL;
/* Decrease the module use count to match the first try_module_get. */
module_put(client->dev.driver->owner);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 6e7b8b682d13..4c76d17b4629 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -343,8 +343,9 @@ static void v4l_print_format(const void *arg, bool write_only)
case V4L2_BUF_TYPE_META_OUTPUT:
meta = &p->fmt.meta;
pixelformat = meta->dataformat;
- pr_cont(", dataformat=%p4cc, buffersize=%u\n",
- &pixelformat, meta->buffersize);
+ pr_cont(", dataformat=%p4cc, buffersize=%u, width=%u, height=%u, bytesperline=%u\n",
+ &pixelformat, meta->buffersize, meta->width,
+ meta->height, meta->bytesperline);
break;
}
}
@@ -489,6 +490,14 @@ static void v4l_print_create_buffers(const void *arg, bool write_only)
v4l_print_format(&p->format, write_only);
}
+static void v4l_print_remove_buffers(const void *arg, bool write_only)
+{
+ const struct v4l2_remove_buffers *p = arg;
+
+ pr_cont("type=%s, index=%u, count=%u\n",
+ prt_names(p->type, v4l2_type_names), p->index, p->count);
+}
+
static void v4l_print_streamparm(const void *arg, bool write_only)
{
const struct v4l2_streamparm *p = arg;
@@ -1312,6 +1321,8 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_Y10BPACK: descr = "10-bit Greyscale (Packed)"; break;
case V4L2_PIX_FMT_Y10P: descr = "10-bit Greyscale (MIPI Packed)"; break;
case V4L2_PIX_FMT_IPU3_Y10: descr = "10-bit greyscale (IPU3 Packed)"; break;
+ case V4L2_PIX_FMT_Y12P: descr = "12-bit Greyscale (MIPI Packed)"; break;
+ case V4L2_PIX_FMT_Y14P: descr = "14-bit Greyscale (MIPI Packed)"; break;
case V4L2_PIX_FMT_Y8I: descr = "Interleaved 8-bit Greyscale"; break;
case V4L2_PIX_FMT_Y12I: descr = "Interleaved 12-bit Greyscale"; break;
case V4L2_PIX_FMT_Z16: descr = "16-bit Depth"; break;
@@ -1452,6 +1463,13 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_Y210: descr = "10-bit YUYV Packed"; break;
case V4L2_PIX_FMT_Y212: descr = "12-bit YUYV Packed"; break;
case V4L2_PIX_FMT_Y216: descr = "16-bit YUYV Packed"; break;
+ case V4L2_META_FMT_GENERIC_8: descr = "8-bit Generic Metadata"; break;
+ case V4L2_META_FMT_GENERIC_CSI2_10: descr = "8-bit Generic Meta, 10b CSI-2"; break;
+ case V4L2_META_FMT_GENERIC_CSI2_12: descr = "8-bit Generic Meta, 12b CSI-2"; break;
+ case V4L2_META_FMT_GENERIC_CSI2_14: descr = "8-bit Generic Meta, 14b CSI-2"; break;
+ case V4L2_META_FMT_GENERIC_CSI2_16: descr = "8-bit Generic Meta, 16b CSI-2"; break;
+ case V4L2_META_FMT_GENERIC_CSI2_20: descr = "8-bit Generic Meta, 20b CSI-2"; break;
+ case V4L2_META_FMT_GENERIC_CSI2_24: descr = "8-bit Generic Meta, 24b CSI-2"; break;
default:
/* Compressed formats */
@@ -1522,6 +1540,22 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
}
}
+ if (fmt->type == V4L2_BUF_TYPE_META_CAPTURE) {
+ switch (fmt->pixelformat) {
+ case V4L2_META_FMT_GENERIC_8:
+ case V4L2_META_FMT_GENERIC_CSI2_10:
+ case V4L2_META_FMT_GENERIC_CSI2_12:
+ case V4L2_META_FMT_GENERIC_CSI2_14:
+ case V4L2_META_FMT_GENERIC_CSI2_16:
+ case V4L2_META_FMT_GENERIC_CSI2_20:
+ case V4L2_META_FMT_GENERIC_CSI2_24:
+ fmt->flags |= V4L2_FMT_FLAG_META_LINE_BASED;
+ break;
+ default:
+ fmt->flags &= ~V4L2_FMT_FLAG_META_LINE_BASED;
+ }
+ }
+
if (descr)
WARN_ON(strscpy(fmt->description, descr, sz) < 0);
fmt->flags |= flags;
@@ -2092,6 +2126,7 @@ static int v4l_overlay(const struct v4l2_ioctl_ops *ops,
static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
+ struct video_device *vfd = video_devdata(file);
struct v4l2_requestbuffers *p = arg;
int ret = check_fmt(file, p->type);
@@ -2100,6 +2135,10 @@ static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops,
memset_after(p, 0, flags);
+ p->capabilities = 0;
+ if (is_valid_ioctl(vfd, VIDIOC_REMOVE_BUFS))
+ p->capabilities = V4L2_BUF_CAP_SUPPORTS_REMOVE_BUFS;
+
return ops->vidioc_reqbufs(file, fh, p);
}
@@ -2133,6 +2172,7 @@ static int v4l_dqbuf(const struct v4l2_ioctl_ops *ops,
static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
+ struct video_device *vfd = video_devdata(file);
struct v4l2_create_buffers *create = arg;
int ret = check_fmt(file, create->format.type);
@@ -2143,6 +2183,10 @@ static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops,
v4l_sanitize_format(&create->format);
+ create->capabilities = 0;
+ if (is_valid_ioctl(vfd, VIDIOC_REMOVE_BUFS))
+ create->capabilities = V4L2_BUF_CAP_SUPPORTS_REMOVE_BUFS;
+
ret = ops->vidioc_create_bufs(file, fh, create);
if (create->format.type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
@@ -2161,6 +2205,17 @@ static int v4l_prepare_buf(const struct v4l2_ioctl_ops *ops,
return ret ? ret : ops->vidioc_prepare_buf(file, fh, b);
}
+static int v4l_remove_bufs(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_remove_buffers *remove = arg;
+
+ if (ops->vidioc_remove_bufs)
+ return ops->vidioc_remove_bufs(file, fh, remove);
+
+ return -ENOTTY;
+}
+
static int v4l_g_parm(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
@@ -2910,6 +2965,7 @@ static const struct v4l2_ioctl_info v4l2_ioctls[] = {
IOCTL_INFO(VIDIOC_ENUM_FREQ_BANDS, v4l_enum_freq_bands, v4l_print_freq_band, 0),
IOCTL_INFO(VIDIOC_DBG_G_CHIP_INFO, v4l_dbg_g_chip_info, v4l_print_dbg_chip_info, INFO_FL_CLEAR(v4l2_dbg_chip_info, match)),
IOCTL_INFO(VIDIOC_QUERY_EXT_CTRL, v4l_query_ext_ctrl, v4l_print_query_ext_ctrl, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_query_ext_ctrl, id)),
+ IOCTL_INFO(VIDIOC_REMOVE_BUFS, v4l_remove_bufs, v4l_print_remove_buffers, INFO_FL_PRIO | INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_remove_buffers, type)),
};
#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
@@ -3147,13 +3203,13 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
case VIDIOC_SUBDEV_S_ROUTING: {
struct v4l2_subdev_routing *routing = parg;
- if (routing->num_routes > 256)
+ if (routing->len_routes > 256)
return -E2BIG;
*user_ptr = u64_to_user_ptr(routing->routes);
*kernel_ptr = (void **)&routing->routes;
*array_size = sizeof(struct v4l2_subdev_route)
- * routing->num_routes;
+ * routing->len_routes;
ret = 1;
break;
}
@@ -3407,11 +3463,14 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
* FIXME: subdev IOCTLS are partially handled here and partially in
* v4l2-subdev.c and the 'always_copy' flag can only be set for IOCTLS
* defined here as part of the 'v4l2_ioctls' array. As
- * VIDIOC_SUBDEV_G_ROUTING needs to return results to applications even
- * in case of failure, but it is not defined here as part of the
+ * VIDIOC_SUBDEV_[GS]_ROUTING needs to return results to applications
+ * even in case of failure, but it is not defined here as part of the
* 'v4l2_ioctls' array, insert an ad-hoc check to address that.
*/
- if (err < 0 && !always_copy && cmd != VIDIOC_SUBDEV_G_ROUTING)
+ if (cmd == VIDIOC_SUBDEV_G_ROUTING || cmd == VIDIOC_SUBDEV_S_ROUTING)
+ always_copy = true;
+
+ if (err < 0 && !always_copy)
goto out;
if (has_array_args) {
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 75517134a5e9..eb22d6172462 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -1386,6 +1386,21 @@ int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
+int v4l2_m2m_ioctl_remove_bufs(struct file *file, void *priv,
+ struct v4l2_remove_buffers *remove)
+{
+ struct v4l2_fh *fh = file->private_data;
+ struct vb2_queue *q = v4l2_m2m_get_vq(fh->m2m_ctx, remove->type);
+
+ if (!q)
+ return -EINVAL;
+ if (q->type != remove->type)
+ return -EINVAL;
+
+ return vb2_core_remove_bufs(q, remove->index, remove->count);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_remove_bufs);
+
int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
diff --git a/drivers/media/v4l2-core/v4l2-spi.c b/drivers/media/v4l2-core/v4l2-spi.c
index a7092c3930d6..1baf8e63f19e 100644
--- a/drivers/media/v4l2-core/v4l2-spi.c
+++ b/drivers/media/v4l2-core/v4l2-spi.c
@@ -59,7 +59,7 @@ struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev,
* Register with the v4l2_device which increases the module's
* use count as well.
*/
- if (v4l2_device_register_subdev(v4l2_dev, sd))
+ if (__v4l2_device_register_subdev(v4l2_dev, sd, sd->owner))
sd = NULL;
/* Decrease the module use count to match the first try_module_get. */
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 4c6198c48dd6..8470d6eda9a3 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -369,6 +369,36 @@ static int call_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
return check_edid(sd, edid) ? : sd->ops->pad->set_edid(sd, edid);
}
+static int call_s_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_dv_timings *timings)
+{
+ if (!timings)
+ return -EINVAL;
+
+ return check_pad(sd, pad) ? :
+ sd->ops->pad->s_dv_timings(sd, pad, timings);
+}
+
+static int call_g_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_dv_timings *timings)
+{
+ if (!timings)
+ return -EINVAL;
+
+ return check_pad(sd, pad) ? :
+ sd->ops->pad->g_dv_timings(sd, pad, timings);
+}
+
+static int call_query_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_dv_timings *timings)
+{
+ if (!timings)
+ return -EINVAL;
+
+ return check_pad(sd, pad) ? :
+ sd->ops->pad->query_dv_timings(sd, pad, timings);
+}
+
static int call_dv_timings_cap(struct v4l2_subdev *sd,
struct v4l2_dv_timings_cap *cap)
{
@@ -412,15 +442,6 @@ static int call_s_stream(struct v4l2_subdev *sd, int enable)
if (WARN_ON(!!sd->enabled_streams == !!enable))
return 0;
-#if IS_REACHABLE(CONFIG_LEDS_CLASS)
- if (!IS_ERR_OR_NULL(sd->privacy_led)) {
- if (enable)
- led_set_brightness(sd->privacy_led,
- sd->privacy_led->max_brightness);
- else
- led_set_brightness(sd->privacy_led, 0);
- }
-#endif
ret = sd->ops->video->s_stream(sd, enable);
if (!enable && ret < 0) {
@@ -428,9 +449,20 @@ static int call_s_stream(struct v4l2_subdev *sd, int enable)
ret = 0;
}
- if (!ret)
+ if (!ret) {
sd->enabled_streams = enable ? BIT(0) : 0;
+#if IS_REACHABLE(CONFIG_LEDS_CLASS)
+ if (!IS_ERR_OR_NULL(sd->privacy_led)) {
+ if (enable)
+ led_set_brightness(sd->privacy_led,
+ sd->privacy_led->max_brightness);
+ else
+ led_set_brightness(sd->privacy_led, 0);
+ }
+#endif
+ }
+
return ret;
}
@@ -487,6 +519,9 @@ static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = {
.set_frame_interval = call_set_frame_interval,
.get_edid = call_get_edid,
.set_edid = call_set_edid,
+ .s_dv_timings = call_s_dv_timings,
+ .g_dv_timings = call_g_dv_timings,
+ .query_dv_timings = call_query_dv_timings,
.dv_timings_cap = call_dv_timings_cap,
.enum_dv_timings = call_enum_dv_timings,
.get_frame_desc = call_get_frame_desc,
@@ -732,6 +767,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
memset(&sel, 0, sizeof(sel));
sel.which = crop->which;
sel.pad = crop->pad;
+ sel.stream = crop->stream;
sel.target = V4L2_SEL_TGT_CROP;
rval = v4l2_subdev_call(
@@ -756,6 +792,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
memset(&sel, 0, sizeof(sel));
sel.which = crop->which;
sel.pad = crop->pad;
+ sel.stream = crop->stream;
sel.target = V4L2_SEL_TGT_CROP;
sel.r = crop->rect;
@@ -873,16 +910,16 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
}
case VIDIOC_SUBDEV_QUERY_DV_TIMINGS:
- return v4l2_subdev_call(sd, video, query_dv_timings, arg);
+ return v4l2_subdev_call(sd, pad, query_dv_timings, 0, arg);
case VIDIOC_SUBDEV_G_DV_TIMINGS:
- return v4l2_subdev_call(sd, video, g_dv_timings, arg);
+ return v4l2_subdev_call(sd, pad, g_dv_timings, 0, arg);
case VIDIOC_SUBDEV_S_DV_TIMINGS:
if (ro_subdev)
return -EPERM;
- return v4l2_subdev_call(sd, video, s_dv_timings, arg);
+ return v4l2_subdev_call(sd, pad, s_dv_timings, 0, arg);
case VIDIOC_SUBDEV_G_STD:
return v4l2_subdev_call(sd, video, g_std, arg);
@@ -923,14 +960,10 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
krouting = &state->routing;
- if (routing->num_routes < krouting->num_routes) {
- routing->num_routes = krouting->num_routes;
- return -ENOSPC;
- }
-
memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes,
krouting->routes,
- krouting->num_routes * sizeof(*krouting->routes));
+ min(krouting->num_routes, routing->len_routes) *
+ sizeof(*krouting->routes));
routing->num_routes = krouting->num_routes;
return 0;
@@ -952,6 +985,9 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
if (routing->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
return -EPERM;
+ if (routing->num_routes > routing->len_routes)
+ return -EINVAL;
+
memset(routing->reserved, 0, sizeof(routing->reserved));
for (i = 0; i < routing->num_routes; ++i) {
@@ -977,11 +1013,36 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
return -EINVAL;
}
+ /*
+ * If the driver doesn't support setting routing, just return
+ * the routing table.
+ */
+ if (!v4l2_subdev_has_op(sd, pad, set_routing)) {
+ memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes,
+ state->routing.routes,
+ min(state->routing.num_routes, routing->len_routes) *
+ sizeof(*state->routing.routes));
+ routing->num_routes = state->routing.num_routes;
+
+ return 0;
+ }
+
krouting.num_routes = routing->num_routes;
+ krouting.len_routes = routing->len_routes;
krouting.routes = routes;
- return v4l2_subdev_call(sd, pad, set_routing, state,
+ rval = v4l2_subdev_call(sd, pad, set_routing, state,
routing->which, &krouting);
+ if (rval < 0)
+ return rval;
+
+ memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes,
+ state->routing.routes,
+ min(state->routing.num_routes, routing->len_routes) *
+ sizeof(*state->routing.routes));
+ routing->num_routes = state->routing.num_routes;
+
+ return 0;
}
case VIDIOC_SUBDEV_G_CLIENT_CAP: {
@@ -1400,17 +1461,13 @@ int v4l2_subdev_link_validate(struct media_link *link)
states_locked = sink_state && source_state;
- if (states_locked) {
- v4l2_subdev_lock_state(sink_state);
- v4l2_subdev_lock_state(source_state);
- }
+ if (states_locked)
+ v4l2_subdev_lock_states(sink_state, source_state);
ret = v4l2_subdev_link_validate_locked(link, states_locked);
- if (states_locked) {
- v4l2_subdev_unlock_state(sink_state);
- v4l2_subdev_unlock_state(source_state);
- }
+ if (states_locked)
+ v4l2_subdev_unlock_states(sink_state, source_state);
return ret;
}
diff --git a/drivers/memory/brcmstb_memc.c b/drivers/memory/brcmstb_memc.c
index ea9213f7152e..4f17a93aa028 100644
--- a/drivers/memory/brcmstb_memc.c
+++ b/drivers/memory/brcmstb_memc.c
@@ -243,6 +243,7 @@ static const struct of_device_id brcmstb_memc_of_match[] = {
},
{}
};
+MODULE_DEVICE_TABLE(of, brcmstb_memc_of_match);
static int brcmstb_memc_suspend(struct device *dev)
{
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index 572c7fbdcfd3..fbe52ecc0eca 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -450,6 +450,7 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = {
{.compatible = "mediatek,mt8195-smi-larb", .data = &mtk_smi_larb_mt8195},
{}
};
+MODULE_DEVICE_TABLE(of, mtk_smi_larb_of_ids);
static int mtk_smi_larb_sleep_ctrl_enable(struct mtk_smi_larb *larb)
{
@@ -735,6 +736,7 @@ static const struct of_device_id mtk_smi_common_of_ids[] = {
{.compatible = "mediatek,mt8365-smi-common", .data = &mtk_smi_common_mt8365},
{}
};
+MODULE_DEVICE_TABLE(of, mtk_smi_common_of_ids);
static int mtk_smi_common_probe(struct platform_device *pdev)
{
diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c
index 48540817e046..56e51737c81f 100644
--- a/drivers/memory/pl353-smc.c
+++ b/drivers/memory/pl353-smc.c
@@ -154,7 +154,6 @@ MODULE_DEVICE_TABLE(amba, pl353_ids);
static struct amba_driver pl353_smc_driver = {
.drv = {
- .owner = THIS_MODULE,
.name = "pl353-smc",
.pm = &pl353_smc_dev_pm_ops,
},
diff --git a/drivers/memstick/host/rtsx_pci_ms.c b/drivers/memstick/host/rtsx_pci_ms.c
index 15720a4afac2..980a54513e6c 100644
--- a/drivers/memstick/host/rtsx_pci_ms.c
+++ b/drivers/memstick/host/rtsx_pci_ms.c
@@ -574,16 +574,13 @@ static int rtsx_pci_ms_drv_probe(struct platform_device *pdev)
return 0;
}
-static int rtsx_pci_ms_drv_remove(struct platform_device *pdev)
+static void rtsx_pci_ms_drv_remove(struct platform_device *pdev)
{
struct realtek_pci_ms *host = platform_get_drvdata(pdev);
struct rtsx_pcr *pcr;
struct memstick_host *msh;
int rc;
- if (!host)
- return 0;
-
pcr = host->pcr;
pcr->slots[RTSX_MS_CARD].p_dev = NULL;
pcr->slots[RTSX_MS_CARD].card_event = NULL;
@@ -613,8 +610,6 @@ static int rtsx_pci_ms_drv_remove(struct platform_device *pdev)
dev_dbg(&(pdev->dev),
": Realtek PCI-E Memstick controller has been removed\n");
-
- return 0;
}
static struct platform_device_id rtsx_pci_ms_ids[] = {
@@ -628,7 +623,7 @@ MODULE_DEVICE_TABLE(platform, rtsx_pci_ms_ids);
static struct platform_driver rtsx_pci_ms_driver = {
.probe = rtsx_pci_ms_drv_probe,
- .remove = rtsx_pci_ms_drv_remove,
+ .remove_new = rtsx_pci_ms_drv_remove,
.id_table = rtsx_pci_ms_ids,
.suspend = rtsx_pci_ms_suspend,
.resume = rtsx_pci_ms_resume,
diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
index 29271ad4728a..246876ac713c 100644
--- a/drivers/memstick/host/rtsx_usb_ms.c
+++ b/drivers/memstick/host/rtsx_usb_ms.c
@@ -805,7 +805,7 @@ err_out:
return err;
}
-static int rtsx_usb_ms_drv_remove(struct platform_device *pdev)
+static void rtsx_usb_ms_drv_remove(struct platform_device *pdev)
{
struct rtsx_usb_ms *host = platform_get_drvdata(pdev);
struct memstick_host *msh = host->msh;
@@ -840,8 +840,6 @@ static int rtsx_usb_ms_drv_remove(struct platform_device *pdev)
": Realtek USB Memstick controller has been removed\n");
memstick_free_host(msh);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static struct platform_device_id rtsx_usb_ms_ids[] = {
@@ -855,7 +853,7 @@ MODULE_DEVICE_TABLE(platform, rtsx_usb_ms_ids);
static struct platform_driver rtsx_usb_ms_driver = {
.probe = rtsx_usb_ms_drv_probe,
- .remove = rtsx_usb_ms_drv_remove,
+ .remove_new = rtsx_usb_ms_drv_remove,
.id_table = rtsx_usb_ms_ids,
.driver = {
.name = "rtsx_usb_ms",
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index c459f709107b..a3c17c4fe69c 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -129,6 +129,7 @@ static const struct scsi_host_template mptfc_driver_template = {
.sg_tablesize = MPT_SCSI_SG_DEPTH,
.max_sectors = 8192,
.cmd_per_lun = 7,
+ .dma_alignment = 511,
.shost_groups = mptscsih_host_attr_groups,
};
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 300f8e955a53..a0bcb0864ecd 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2020,6 +2020,7 @@ static const struct scsi_host_template mptsas_driver_template = {
.sg_tablesize = MPT_SCSI_SG_DEPTH,
.max_sectors = 8192,
.cmd_per_lun = 7,
+ .dma_alignment = 511,
.shost_groups = mptscsih_host_attr_groups,
.no_write_same = 1,
};
@@ -2964,17 +2965,13 @@ mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc,
goto out_free;
manufacture_reply = data_out + sizeof(struct rep_manu_request);
- strscpy(edev->vendor_id, manufacture_reply->vendor_id,
- sizeof(edev->vendor_id));
- strscpy(edev->product_id, manufacture_reply->product_id,
- sizeof(edev->product_id));
- strscpy(edev->product_rev, manufacture_reply->product_rev,
- sizeof(edev->product_rev));
+ memtostr(edev->vendor_id, manufacture_reply->vendor_id);
+ memtostr(edev->product_id, manufacture_reply->product_id);
+ memtostr(edev->product_rev, manufacture_reply->product_rev);
edev->level = manufacture_reply->sas_format;
if (manufacture_reply->sas_format) {
- strscpy(edev->component_vendor_id,
- manufacture_reply->component_vendor_id,
- sizeof(edev->component_vendor_id));
+ memtostr(edev->component_vendor_id,
+ manufacture_reply->component_vendor_id);
tmp = (u8 *)&manufacture_reply->component_id;
edev->component_id = tmp[0] << 8 | tmp[1];
edev->component_revision_id =
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 9080a73b4ea6..6c3f25cc33ff 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -2438,8 +2438,6 @@ mptscsih_slave_configure(struct scsi_device *sdev)
"tagged %d, simple %d\n",
ioc->name,sdev->tagged_supported, sdev->simple_tags));
- blk_queue_dma_alignment (sdev->request_queue, 512 - 1);
-
return 0;
}
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 6c5920db1e9d..574b882c9a85 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -843,6 +843,7 @@ static const struct scsi_host_template mptspi_driver_template = {
.sg_tablesize = MPT_SCSI_SG_DEPTH,
.max_sectors = 8192,
.cmd_per_lun = 7,
+ .dma_alignment = 511,
.shost_groups = mptscsih_host_attr_groups,
};
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 4b023ee229cf..266b4f54af60 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -292,7 +292,7 @@ config MFD_MADERA_SPI
config MFD_MAX5970
tristate "Maxim 5970/5978 power switch and monitor"
- depends on (I2C && OF)
+ depends on I2C && OF
select MFD_SIMPLE_MFD_I2C
help
This driver controls a Maxim 5970/5978 switch via I2C bus.
@@ -458,7 +458,7 @@ config MFD_EXYNOS_LPASS
config MFD_GATEWORKS_GSC
tristate "Gateworks System Controller"
- depends on (I2C && OF)
+ depends on I2C && OF
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -473,7 +473,7 @@ config MFD_GATEWORKS_GSC
config MFD_MC13XXX
tristate
- depends on (SPI_MASTER || I2C)
+ depends on SPI_MASTER || I2C
select MFD_CORE
select REGMAP_IRQ
help
@@ -1109,7 +1109,7 @@ config PCF50633_GPIO
config MFD_PM8XXX
tristate "Qualcomm PM8xxx PMIC chips driver"
- depends on (ARM || HEXAGON || COMPILE_TEST)
+ depends on ARM || HEXAGON || COMPILE_TEST
select IRQ_DOMAIN_HIERARCHY
select MFD_CORE
select REGMAP
@@ -1225,7 +1225,7 @@ config MFD_RK8XX
select MFD_CORE
config MFD_RK8XX_I2C
- tristate "Rockchip RK805/RK808/RK809/RK817/RK818 Power Management Chip"
+ tristate "Rockchip RK805/RK808/RK809/RK816/RK817/RK818 Power Management Chip"
depends on I2C && OF
select MFD_CORE
select REGMAP_I2C
@@ -1233,7 +1233,7 @@ config MFD_RK8XX_I2C
select MFD_RK8XX
help
If you say yes here you get support for the RK805, RK808, RK809,
- RK817 and RK818 Power Management chips.
+ RK816, RK817 and RK818 Power Management chips.
This driver provides common support for accessing the device
through I2C interface. The device supports multiple sub-devices
including interrupts, RTC, LDO & DCDC regulators, and onkey.
@@ -1418,7 +1418,7 @@ config MFD_DB8500_PRCMU
config MFD_STMPE
bool "STMicroelectronics STMPE"
- depends on (I2C=y || SPI_MASTER=y)
+ depends on I2C=y || SPI_MASTER=y
depends on OF
select MFD_CORE
help
@@ -2116,7 +2116,7 @@ config MFD_STM32_TIMERS
config MFD_STPMIC1
tristate "Support for STPMIC1 PMIC"
- depends on (I2C=y && OF)
+ depends on I2C=y && OF
select REGMAP_I2C
select REGMAP_IRQ
select MFD_CORE
diff --git a/drivers/mfd/axp20x-i2c.c b/drivers/mfd/axp20x-i2c.c
index 68d3560cfe4a..b8e7ac89f697 100644
--- a/drivers/mfd/axp20x-i2c.c
+++ b/drivers/mfd/axp20x-i2c.c
@@ -65,6 +65,7 @@ static const struct of_device_id axp20x_i2c_of_match[] = {
{ .compatible = "x-powers,axp221", .data = (void *)AXP221_ID },
{ .compatible = "x-powers,axp223", .data = (void *)AXP223_ID },
{ .compatible = "x-powers,axp313a", .data = (void *)AXP313A_ID },
+ { .compatible = "x-powers,axp717", .data = (void *)AXP717_ID },
{ .compatible = "x-powers,axp803", .data = (void *)AXP803_ID },
{ .compatible = "x-powers,axp806", .data = (void *)AXP806_ID },
{ .compatible = "x-powers,axp15060", .data = (void *)AXP15060_ID },
@@ -81,6 +82,7 @@ static const struct i2c_device_id axp20x_i2c_id[] = {
{ "axp221", 0 },
{ "axp223", 0 },
{ "axp313a", 0 },
+ { "axp717", 0 },
{ "axp803", 0 },
{ "axp806", 0 },
{ "axp15060", 0 },
diff --git a/drivers/mfd/axp20x-rsb.c b/drivers/mfd/axp20x-rsb.c
index 214bc0d84d44..059656f2a1bd 100644
--- a/drivers/mfd/axp20x-rsb.c
+++ b/drivers/mfd/axp20x-rsb.c
@@ -58,6 +58,7 @@ static void axp20x_rsb_remove(struct sunxi_rsb_device *rdev)
static const struct of_device_id axp20x_rsb_of_match[] = {
{ .compatible = "x-powers,axp223", .data = (void *)AXP223_ID },
+ { .compatible = "x-powers,axp717", .data = (void *)AXP717_ID },
{ .compatible = "x-powers,axp803", .data = (void *)AXP803_ID },
{ .compatible = "x-powers,axp806", .data = (void *)AXP806_ID },
{ .compatible = "x-powers,axp809", .data = (void *)AXP809_ID },
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index d8daa593ebd5..f2c0f144c0fc 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -42,6 +42,7 @@ static const char * const axp20x_model_names[] = {
"AXP223",
"AXP288",
"AXP313a",
+ "AXP717",
"AXP803",
"AXP806",
"AXP809",
@@ -207,6 +208,25 @@ static const struct regmap_access_table axp313a_volatile_table = {
.n_yes_ranges = ARRAY_SIZE(axp313a_volatile_ranges),
};
+static const struct regmap_range axp717_writeable_ranges[] = {
+ regmap_reg_range(AXP717_IRQ0_EN, AXP717_IRQ4_EN),
+ regmap_reg_range(AXP717_DCDC_OUTPUT_CONTROL, AXP717_CPUSLDO_CONTROL),
+};
+
+static const struct regmap_range axp717_volatile_ranges[] = {
+ regmap_reg_range(AXP717_IRQ0_STATE, AXP717_IRQ4_STATE),
+};
+
+static const struct regmap_access_table axp717_writeable_table = {
+ .yes_ranges = axp717_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(axp717_writeable_ranges),
+};
+
+static const struct regmap_access_table axp717_volatile_table = {
+ .yes_ranges = axp717_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(axp717_volatile_ranges),
+};
+
static const struct regmap_range axp806_volatile_ranges[] = {
regmap_reg_range(AXP20X_IRQ1_STATE, AXP20X_IRQ2_STATE),
};
@@ -317,6 +337,11 @@ static const struct resource axp313a_pek_resources[] = {
DEFINE_RES_IRQ_NAMED(AXP313A_IRQ_PEK_FAL_EDGE, "PEK_DBF"),
};
+static const struct resource axp717_pek_resources[] = {
+ DEFINE_RES_IRQ_NAMED(AXP717_IRQ_PEK_RIS_EDGE, "PEK_DBR"),
+ DEFINE_RES_IRQ_NAMED(AXP717_IRQ_PEK_FAL_EDGE, "PEK_DBF"),
+};
+
static const struct resource axp803_pek_resources[] = {
DEFINE_RES_IRQ_NAMED(AXP803_IRQ_PEK_RIS_EDGE, "PEK_DBR"),
DEFINE_RES_IRQ_NAMED(AXP803_IRQ_PEK_FAL_EDGE, "PEK_DBF"),
@@ -391,6 +416,15 @@ static const struct regmap_config axp313a_regmap_config = {
.cache_type = REGCACHE_MAPLE,
};
+static const struct regmap_config axp717_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .wr_table = &axp717_writeable_table,
+ .volatile_table = &axp717_volatile_table,
+ .max_register = AXP717_CPUSLDO_CONTROL,
+ .cache_type = REGCACHE_MAPLE,
+};
+
static const struct regmap_config axp806_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -589,6 +623,40 @@ static const struct regmap_irq axp313a_regmap_irqs[] = {
INIT_REGMAP_IRQ(AXP313A, DIE_TEMP_HIGH, 0, 0),
};
+static const struct regmap_irq axp717_regmap_irqs[] = {
+ INIT_REGMAP_IRQ(AXP717, SOC_DROP_LVL2, 0, 7),
+ INIT_REGMAP_IRQ(AXP717, SOC_DROP_LVL1, 0, 6),
+ INIT_REGMAP_IRQ(AXP717, GAUGE_NEW_SOC, 0, 4),
+ INIT_REGMAP_IRQ(AXP717, BOOST_OVER_V, 0, 2),
+ INIT_REGMAP_IRQ(AXP717, VBUS_OVER_V, 0, 1),
+ INIT_REGMAP_IRQ(AXP717, VBUS_FAULT, 0, 0),
+ INIT_REGMAP_IRQ(AXP717, VBUS_PLUGIN, 1, 7),
+ INIT_REGMAP_IRQ(AXP717, VBUS_REMOVAL, 1, 6),
+ INIT_REGMAP_IRQ(AXP717, BATT_PLUGIN, 1, 5),
+ INIT_REGMAP_IRQ(AXP717, BATT_REMOVAL, 1, 4),
+ INIT_REGMAP_IRQ(AXP717, PEK_SHORT, 1, 3),
+ INIT_REGMAP_IRQ(AXP717, PEK_LONG, 1, 2),
+ INIT_REGMAP_IRQ(AXP717, PEK_FAL_EDGE, 1, 1),
+ INIT_REGMAP_IRQ(AXP717, PEK_RIS_EDGE, 1, 0),
+ INIT_REGMAP_IRQ(AXP717, WDOG_EXPIRE, 2, 7),
+ INIT_REGMAP_IRQ(AXP717, LDO_OVER_CURR, 2, 6),
+ INIT_REGMAP_IRQ(AXP717, BATT_OVER_CURR, 2, 5),
+ INIT_REGMAP_IRQ(AXP717, CHARG_DONE, 2, 4),
+ INIT_REGMAP_IRQ(AXP717, CHARG, 2, 3),
+ INIT_REGMAP_IRQ(AXP717, DIE_TEMP_HIGH, 2, 2),
+ INIT_REGMAP_IRQ(AXP717, CHARG_TIMER, 2, 1),
+ INIT_REGMAP_IRQ(AXP717, BATT_OVER_V, 2, 0),
+ INIT_REGMAP_IRQ(AXP717, BC_USB_DONE, 3, 7),
+ INIT_REGMAP_IRQ(AXP717, BC_USB_CHNG, 3, 6),
+ INIT_REGMAP_IRQ(AXP717, BATT_QUIT_TEMP_HIGH, 3, 4),
+ INIT_REGMAP_IRQ(AXP717, BATT_CHG_TEMP_HIGH, 3, 3),
+ INIT_REGMAP_IRQ(AXP717, BATT_CHG_TEMP_LOW, 3, 2),
+ INIT_REGMAP_IRQ(AXP717, BATT_ACT_TEMP_HIGH, 3, 1),
+ INIT_REGMAP_IRQ(AXP717, BATT_ACT_TEMP_LOW, 3, 0),
+ INIT_REGMAP_IRQ(AXP717, TYPEC_REMOVE, 4, 6),
+ INIT_REGMAP_IRQ(AXP717, TYPEC_PLUGIN, 4, 5),
+};
+
static const struct regmap_irq axp803_regmap_irqs[] = {
INIT_REGMAP_IRQ(AXP803, ACIN_OVER_V, 0, 7),
INIT_REGMAP_IRQ(AXP803, ACIN_PLUGIN, 0, 6),
@@ -776,6 +844,17 @@ static const struct regmap_irq_chip axp313a_regmap_irq_chip = {
.num_regs = 1,
};
+static const struct regmap_irq_chip axp717_regmap_irq_chip = {
+ .name = "axp717_irq_chip",
+ .status_base = AXP717_IRQ0_STATE,
+ .ack_base = AXP717_IRQ0_STATE,
+ .unmask_base = AXP717_IRQ0_EN,
+ .init_ack_masked = true,
+ .irqs = axp717_regmap_irqs,
+ .num_irqs = ARRAY_SIZE(axp717_regmap_irqs),
+ .num_regs = 5,
+};
+
static const struct regmap_irq_chip axp803_regmap_irq_chip = {
.name = "axp803",
.status_base = AXP20X_IRQ1_STATE,
@@ -941,6 +1020,11 @@ static struct mfd_cell axp313a_cells[] = {
MFD_CELL_RES("axp313a-pek", axp313a_pek_resources),
};
+static struct mfd_cell axp717_cells[] = {
+ MFD_CELL_NAME("axp20x-regulator"),
+ MFD_CELL_RES("axp20x-pek", axp717_pek_resources),
+};
+
static const struct resource axp288_adc_resources[] = {
DEFINE_RES_IRQ_NAMED(AXP288_IRQ_GPADC, "GPADC"),
};
@@ -1181,6 +1265,12 @@ int axp20x_match_device(struct axp20x_dev *axp20x)
axp20x->regmap_cfg = &axp313a_regmap_config;
axp20x->regmap_irq_chip = &axp313a_regmap_irq_chip;
break;
+ case AXP717_ID:
+ axp20x->nr_cells = ARRAY_SIZE(axp717_cells);
+ axp20x->cells = axp717_cells;
+ axp20x->regmap_cfg = &axp717_regmap_config;
+ axp20x->regmap_irq_chip = &axp717_regmap_irq_chip;
+ break;
case AXP803_ID:
axp20x->nr_cells = ARRAY_SIZE(axp803_cells);
axp20x->cells = axp803_cells;
diff --git a/drivers/mfd/cs42l43.c b/drivers/mfd/cs42l43.c
index a0fb2dc6c3b2..ae8fd37afb75 100644
--- a/drivers/mfd/cs42l43.c
+++ b/drivers/mfd/cs42l43.c
@@ -43,6 +43,9 @@
#define CS42L43_MCU_UPDATE_TIMEOUT_US 500000
#define CS42L43_MCU_UPDATE_RETRIES 5
+#define CS42L43_MCU_ROM_REV 0x2001
+#define CS42L43_MCU_ROM_BIOS_REV 0x0000
+
#define CS42L43_MCU_SUPPORTED_REV 0x2105
#define CS42L43_MCU_SHADOW_REGS_REQUIRED_REV 0x2200
#define CS42L43_MCU_SUPPORTED_BIOS_REV 0x0001
@@ -709,6 +712,23 @@ err:
complete(&cs42l43->firmware_download);
}
+static int cs42l43_mcu_is_hw_compatible(struct cs42l43 *cs42l43,
+ unsigned int mcu_rev,
+ unsigned int bios_rev)
+{
+ /*
+ * The firmware has two revision numbers bringing either of them up to a
+ * supported version will provide the disable the driver requires.
+ */
+ if (mcu_rev < CS42L43_MCU_SUPPORTED_REV &&
+ bios_rev < CS42L43_MCU_SUPPORTED_BIOS_REV) {
+ dev_err(cs42l43->dev, "Firmware too old to support disable\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/*
* The process of updating the firmware is split into a series of steps, at the
* end of each step a soft reset of the device might be required which will
@@ -745,11 +765,10 @@ static int cs42l43_mcu_update_step(struct cs42l43 *cs42l43)
((mcu_rev & CS42L43_FW_SUBMINOR_REV_MASK) >> 8);
/*
- * The firmware has two revision numbers bringing either of them up to a
- * supported version will provide the features the driver requires.
+ * The firmware has two revision numbers both of them being at the ROM
+ * revision indicates no patch has been applied.
*/
- patched = mcu_rev >= CS42L43_MCU_SUPPORTED_REV ||
- bios_rev >= CS42L43_MCU_SUPPORTED_BIOS_REV;
+ patched = mcu_rev != CS42L43_MCU_ROM_REV || bios_rev != CS42L43_MCU_ROM_BIOS_REV;
/*
* Later versions of the firmwware require the driver to access some
* features through a set of shadow registers.
@@ -794,10 +813,15 @@ static int cs42l43_mcu_update_step(struct cs42l43 *cs42l43)
return cs42l43_mcu_stage_2_3(cs42l43, shadow);
}
case CS42L43_MCU_BOOT_STAGE3:
- if (patched)
+ if (patched) {
+ ret = cs42l43_mcu_is_hw_compatible(cs42l43, mcu_rev, bios_rev);
+ if (ret)
+ return ret;
+
return cs42l43_mcu_disable(cs42l43);
- else
+ } else {
return cs42l43_mcu_stage_3_2(cs42l43);
+ }
case CS42L43_MCU_BOOT_STAGE4:
return 0;
default:
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 8c00e0c695c5..c36a101df7be 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -54,7 +54,7 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0)
return ret;
diff --git a/drivers/mfd/intel-m10-bmc-pmci.c b/drivers/mfd/intel-m10-bmc-pmci.c
index 0392ef8b57d8..698c5933938b 100644
--- a/drivers/mfd/intel-m10-bmc-pmci.c
+++ b/drivers/mfd/intel-m10-bmc-pmci.c
@@ -370,6 +370,7 @@ static const struct m10bmc_csr_map m10bmc_n6000_csr_map = {
.pr_reh_addr = M10BMC_N6000_PR_REH_ADDR,
.pr_magic = M10BMC_N6000_PR_PROG_MAGIC,
.rsu_update_counter = M10BMC_N6000_STAGING_FLASH_COUNT,
+ .staging_size = M10BMC_STAGING_SIZE,
};
static const struct intel_m10bmc_platform_info m10bmc_pmci_n6000 = {
diff --git a/drivers/mfd/intel-m10-bmc-spi.c b/drivers/mfd/intel-m10-bmc-spi.c
index cbeb7de9e041..d64d28199df6 100644
--- a/drivers/mfd/intel-m10-bmc-spi.c
+++ b/drivers/mfd/intel-m10-bmc-spi.c
@@ -109,6 +109,7 @@ static const struct m10bmc_csr_map m10bmc_n3000_csr_map = {
.pr_reh_addr = M10BMC_N3000_PR_REH_ADDR,
.pr_magic = M10BMC_N3000_PR_PROG_MAGIC,
.rsu_update_counter = M10BMC_N3000_STAGING_FLASH_COUNT,
+ .staging_size = M10BMC_STAGING_SIZE,
};
static struct mfd_cell m10bmc_d5005_subdevs[] = {
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index 5557f023a173..8a332852bf97 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -6,14 +6,17 @@
* Author: Michael Brunner <michael.brunner@kontron.com>
*/
+#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/kempld.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/dmi.h>
#include <linux/io.h>
#include <linux/delay.h>
-#include <linux/acpi.h>
+#include <linux/sysfs.h>
#define MAX_ID_LEN 4
static char force_device_id[MAX_ID_LEN + 1] = "";
@@ -106,7 +109,7 @@ static int kempld_register_cells_generic(struct kempld_device_data *pld)
if (pld->feature_mask & KEMPLD_FEATURE_MASK_UART)
devs[i++].name = kempld_dev_names[KEMPLD_UART];
- return mfd_add_devices(pld->dev, -1, devs, i, NULL, 0, NULL);
+ return mfd_add_devices(pld->dev, PLATFORM_DEVID_NONE, devs, i, NULL, 0, NULL);
}
static struct resource kempld_ioresource = {
@@ -126,31 +129,22 @@ static const struct kempld_platform_data kempld_platform_data_generic = {
static struct platform_device *kempld_pdev;
-static int kempld_create_platform_device(const struct dmi_system_id *id)
+static int kempld_create_platform_device(const struct kempld_platform_data *pdata)
{
- const struct kempld_platform_data *pdata = id->driver_data;
- int ret;
-
- kempld_pdev = platform_device_alloc("kempld", -1);
- if (!kempld_pdev)
- return -ENOMEM;
-
- ret = platform_device_add_data(kempld_pdev, pdata, sizeof(*pdata));
- if (ret)
- goto err;
-
- ret = platform_device_add_resources(kempld_pdev, pdata->ioresource, 1);
- if (ret)
- goto err;
-
- ret = platform_device_add(kempld_pdev);
- if (ret)
- goto err;
+ const struct platform_device_info pdevinfo = {
+ .name = "kempld",
+ .id = PLATFORM_DEVID_NONE,
+ .res = pdata->ioresource,
+ .num_res = 1,
+ .data = pdata,
+ .size_data = sizeof(*pdata),
+ };
+
+ kempld_pdev = platform_device_register_full(&pdevinfo);
+ if (IS_ERR(kempld_pdev))
+ return PTR_ERR(kempld_pdev);
return 0;
-err:
- platform_device_put(kempld_pdev);
- return ret;
}
/**
@@ -299,11 +293,8 @@ static int kempld_get_info(struct kempld_device_data *pld)
else
minor = (pld->info.minor - 10) + 'A';
- ret = scnprintf(pld->info.version, sizeof(pld->info.version),
- "P%X%c%c.%04X", pld->info.number, major, minor,
- pld->info.buildnr);
- if (ret < 0)
- return ret;
+ scnprintf(pld->info.version, sizeof(pld->info.version), "P%X%c%c.%04X",
+ pld->info.number, major, minor, pld->info.buildnr);
return 0;
}
@@ -372,16 +363,13 @@ static DEVICE_ATTR_RO(pld_version);
static DEVICE_ATTR_RO(pld_specification);
static DEVICE_ATTR_RO(pld_type);
-static struct attribute *pld_attributes[] = {
+static struct attribute *pld_attrs[] = {
&dev_attr_pld_version.attr,
&dev_attr_pld_specification.attr,
&dev_attr_pld_type.attr,
NULL
};
-
-static const struct attribute_group pld_attr_group = {
- .attrs = pld_attributes,
-};
+ATTRIBUTE_GROUPS(pld);
static int kempld_detect_device(struct kempld_device_data *pld)
{
@@ -414,36 +402,8 @@ static int kempld_detect_device(struct kempld_device_data *pld)
pld->info.version, kempld_get_type_string(pld),
pld->info.spec_major, pld->info.spec_minor);
- ret = sysfs_create_group(&pld->dev->kobj, &pld_attr_group);
- if (ret)
- return ret;
-
- ret = kempld_register_cells(pld);
- if (ret)
- sysfs_remove_group(&pld->dev->kobj, &pld_attr_group);
-
- return ret;
-}
-
-#ifdef CONFIG_ACPI
-static int kempld_get_acpi_data(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- const struct kempld_platform_data *pdata;
- int ret;
-
- pdata = acpi_device_get_match_data(dev);
- ret = platform_device_add_data(pdev, pdata,
- sizeof(struct kempld_platform_data));
-
- return ret;
+ return kempld_register_cells(pld);
}
-#else
-static int kempld_get_acpi_data(struct platform_device *pdev)
-{
- return -ENODEV;
-}
-#endif /* CONFIG_ACPI */
static int kempld_probe(struct platform_device *pdev)
{
@@ -453,15 +413,21 @@ static int kempld_probe(struct platform_device *pdev)
struct resource *ioport;
int ret;
- if (kempld_pdev == NULL) {
+ if (IS_ERR_OR_NULL(kempld_pdev)) {
/*
* No kempld_pdev device has been registered in kempld_init,
* so we seem to be probing an ACPI platform device.
*/
- ret = kempld_get_acpi_data(pdev);
+ pdata = device_get_match_data(dev);
+ if (!pdata)
+ return -ENODEV;
+
+ ret = platform_device_add_data(pdev, pdata, sizeof(*pdata));
if (ret)
return ret;
- } else if (kempld_pdev != pdev) {
+ } else if (kempld_pdev == pdev) {
+ pdata = dev_get_platdata(dev);
+ } else {
/*
* The platform device we are probing is not the one we
* registered in kempld_init using the DMI table, so this one
@@ -472,7 +438,6 @@ static int kempld_probe(struct platform_device *pdev)
dev_notice(dev, "platform device exists - not using ACPI\n");
return -ENODEV;
}
- pdata = dev_get_platdata(dev);
pld = devm_kzalloc(dev, sizeof(*pld), GFP_KERNEL);
if (!pld)
@@ -503,25 +468,22 @@ static void kempld_remove(struct platform_device *pdev)
struct kempld_device_data *pld = platform_get_drvdata(pdev);
const struct kempld_platform_data *pdata = dev_get_platdata(pld->dev);
- sysfs_remove_group(&pld->dev->kobj, &pld_attr_group);
-
mfd_remove_devices(&pdev->dev);
pdata->release_hardware_mutex(pld);
}
-#ifdef CONFIG_ACPI
static const struct acpi_device_id kempld_acpi_table[] = {
{ "KEM0000", (kernel_ulong_t)&kempld_platform_data_generic },
{ "KEM0001", (kernel_ulong_t)&kempld_platform_data_generic },
{}
};
MODULE_DEVICE_TABLE(acpi, kempld_acpi_table);
-#endif
static struct platform_driver kempld_driver = {
.driver = {
.name = "kempld",
- .acpi_match_table = ACPI_PTR(kempld_acpi_table),
+ .acpi_match_table = kempld_acpi_table,
+ .dev_groups = pld_groups,
},
.probe = kempld_probe,
.remove_new = kempld_remove,
@@ -534,375 +496,281 @@ static const struct dmi_system_id kempld_dmi_table[] __initconst = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-bBD"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "BBL6",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-bBL6"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "BDV7",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-bDV7"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "BHL6",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-bHL6"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "BKL6",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-bKL6"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "BSL6",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-bSL6"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "CAL6",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-cAL"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "CBL6",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-cBL6"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "CBW6",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-cBW6"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "CCR2",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-bIP2"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "CCR6",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-bIP6"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "CDV7",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-cDV7"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "CHL6",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-cHL6"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "CHR2",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "ETXexpress-SC T2"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "CHR2",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "ETXe-SC T2"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "CHR2",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-bSC2"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "CHR6",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "ETXexpress-SC T6"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "CHR6",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "ETXe-SC T6"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "CHR6",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-bSC6"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "CKL6",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-cKL6"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "CNTG",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "ETXexpress-PC"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "CNTG",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-bPC2"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "CNTX",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "PXT"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "CSL6",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-cSL6"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "CVV6",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-cBT"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "FRI2",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BIOS_VERSION, "FRI2"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "FRI2",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Fish River Island II"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "A203",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "KBox A-203"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "M4A1",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-m4AL"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "MAL1",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-mAL10"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "MAPL",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "mITX-APL"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "MBR1",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "ETX-OH"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "MVV1",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-mBT"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "NTC1",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "nanoETXexpress-TT"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "NTC1",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "nETXe-TT"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "NTC1",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-mTT"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "NUP1",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-mCT"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "PAPL",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "pITX-APL"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "SXAL",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "SMARC-sXAL"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "SXAL4",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "SMARC-sXA4"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "UNP1",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "microETXexpress-DC"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "UNP1",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-cDC2"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "UNTG",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "microETXexpress-PC"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "UNTG",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-cPC2"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "UUP6",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-cCT6"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "UTH6",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "COMe-cTH6"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
}, {
.ident = "Q7AL",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
DMI_MATCH(DMI_BOARD_NAME, "Qseven-Q7AL"),
},
- .driver_data = (void *)&kempld_platform_data_generic,
- .callback = kempld_create_platform_device,
},
{}
};
@@ -911,27 +779,28 @@ MODULE_DEVICE_TABLE(dmi, kempld_dmi_table);
static int __init kempld_init(void)
{
const struct dmi_system_id *id;
+ int ret = -ENODEV;
- if (force_device_id[0]) {
- for (id = kempld_dmi_table;
- id->matches[0].slot != DMI_NONE; id++)
- if (strstr(id->ident, force_device_id))
- if (id->callback && !id->callback(id))
- break;
- if (id->matches[0].slot == DMI_NONE)
- return -ENODEV;
- } else {
- dmi_check_system(kempld_dmi_table);
+ for (id = dmi_first_match(kempld_dmi_table); id; id = dmi_first_match(id + 1)) {
+ /* Check, if user asked for the exact device ID match */
+ if (force_device_id[0] && !strstr(id->ident, force_device_id))
+ continue;
+
+ ret = kempld_create_platform_device(&kempld_platform_data_generic);
+ if (ret)
+ continue;
+
+ break;
}
+ if (ret)
+ return ret;
return platform_driver_register(&kempld_driver);
}
static void __exit kempld_exit(void)
{
- if (kempld_pdev)
- platform_device_unregister(kempld_pdev);
-
+ platform_device_unregister(kempld_pdev);
platform_driver_unregister(&kempld_driver);
}
diff --git a/drivers/mfd/ocelot-spi.c b/drivers/mfd/ocelot-spi.c
index 94f82677675b..b015c8683f1b 100644
--- a/drivers/mfd/ocelot-spi.c
+++ b/drivers/mfd/ocelot-spi.c
@@ -145,7 +145,6 @@ static int ocelot_spi_regmap_bus_read(void *context, const void *reg, size_t reg
struct device *dev = context;
struct ocelot_ddata *ddata;
struct spi_device *spi;
- struct spi_message msg;
unsigned int index = 0;
ddata = dev_get_drvdata(dev);
@@ -166,9 +165,7 @@ static int ocelot_spi_regmap_bus_read(void *context, const void *reg, size_t reg
xfers[index].len = val_size;
index++;
- spi_message_init_with_transfers(&msg, xfers, index);
-
- return spi_sync(spi, &msg);
+ return spi_sync_transfer(spi, xfers, index);
}
static int ocelot_spi_regmap_bus_write(void *context, const void *data, size_t count)
diff --git a/drivers/mfd/rk8xx-core.c b/drivers/mfd/rk8xx-core.c
index e2261b68b844..5eda3c0dbbdf 100644
--- a/drivers/mfd/rk8xx-core.c
+++ b/drivers/mfd/rk8xx-core.c
@@ -28,6 +28,10 @@ static const struct resource rtc_resources[] = {
DEFINE_RES_IRQ(RK808_IRQ_RTC_ALARM),
};
+static const struct resource rk816_rtc_resources[] = {
+ DEFINE_RES_IRQ(RK816_IRQ_RTC_ALARM),
+};
+
static const struct resource rk817_rtc_resources[] = {
DEFINE_RES_IRQ(RK817_IRQ_RTC_ALARM),
};
@@ -87,6 +91,22 @@ static const struct mfd_cell rk808s[] = {
},
};
+static const struct mfd_cell rk816s[] = {
+ { .name = "rk805-pinctrl", },
+ { .name = "rk808-clkout", },
+ { .name = "rk808-regulator", },
+ {
+ .name = "rk805-pwrkey",
+ .num_resources = ARRAY_SIZE(rk805_key_resources),
+ .resources = rk805_key_resources,
+ },
+ {
+ .name = "rk808-rtc",
+ .num_resources = ARRAY_SIZE(rk816_rtc_resources),
+ .resources = rk816_rtc_resources,
+ },
+};
+
static const struct mfd_cell rk817s[] = {
{ .name = "rk808-clkout", },
{ .name = "rk808-regulator", },
@@ -148,6 +168,17 @@ static const struct rk808_reg_data rk808_pre_init_reg[] = {
VB_LO_SEL_3500MV },
};
+static const struct rk808_reg_data rk816_pre_init_reg[] = {
+ { RK818_BUCK1_CONFIG_REG, RK817_RAMP_RATE_MASK,
+ RK817_RAMP_RATE_12_5MV_PER_US },
+ { RK818_BUCK2_CONFIG_REG, RK817_RAMP_RATE_MASK,
+ RK817_RAMP_RATE_12_5MV_PER_US },
+ { RK818_BUCK4_CONFIG_REG, BUCK_ILMIN_MASK, BUCK_ILMIN_250MA },
+ { RK808_THERMAL_REG, TEMP_HOTDIE_MSK, TEMP105C},
+ { RK808_VB_MON_REG, VBAT_LOW_VOL_MASK | VBAT_LOW_ACT_MASK,
+ RK808_VBAT_LOW_3V0 | EN_VABT_LOW_SHUT_DOWN },
+};
+
static const struct rk808_reg_data rk817_pre_init_reg[] = {
{RK817_RTC_CTRL_REG, RTC_STOP, RTC_STOP},
/* Codec specific registers */
@@ -350,6 +381,59 @@ static const struct regmap_irq rk808_irqs[] = {
},
};
+static const unsigned int rk816_irq_status_offsets[] = {
+ RK816_IRQ_STS_OFFSET(RK816_INT_STS_REG1),
+ RK816_IRQ_STS_OFFSET(RK816_INT_STS_REG2),
+ RK816_IRQ_STS_OFFSET(RK816_INT_STS_REG3),
+};
+
+static const unsigned int rk816_irq_mask_offsets[] = {
+ RK816_IRQ_MSK_OFFSET(RK816_INT_STS_MSK_REG1),
+ RK816_IRQ_MSK_OFFSET(RK816_INT_STS_MSK_REG2),
+ RK816_IRQ_MSK_OFFSET(RK816_INT_STS_MSK_REG3),
+};
+
+static unsigned int rk816_get_irq_reg(struct regmap_irq_chip_data *data,
+ unsigned int base, int index)
+{
+ unsigned int irq_reg = base;
+
+ switch (base) {
+ case RK816_INT_STS_REG1:
+ irq_reg += rk816_irq_status_offsets[index];
+ break;
+ case RK816_INT_STS_MSK_REG1:
+ irq_reg += rk816_irq_mask_offsets[index];
+ break;
+ }
+
+ return irq_reg;
+};
+
+static const struct regmap_irq rk816_irqs[] = {
+ /* INT_STS_REG1 IRQs */
+ REGMAP_IRQ_REG(RK816_IRQ_PWRON_FALL, 0, RK816_INT_STS_PWRON_FALL),
+ REGMAP_IRQ_REG(RK816_IRQ_PWRON_RISE, 0, RK816_INT_STS_PWRON_RISE),
+
+ /* INT_STS_REG2 IRQs */
+ REGMAP_IRQ_REG(RK816_IRQ_VB_LOW, 1, RK816_INT_STS_VB_LOW),
+ REGMAP_IRQ_REG(RK816_IRQ_PWRON, 1, RK816_INT_STS_PWRON),
+ REGMAP_IRQ_REG(RK816_IRQ_PWRON_LP, 1, RK816_INT_STS_PWRON_LP),
+ REGMAP_IRQ_REG(RK816_IRQ_HOTDIE, 1, RK816_INT_STS_HOTDIE),
+ REGMAP_IRQ_REG(RK816_IRQ_RTC_ALARM, 1, RK816_INT_STS_RTC_ALARM),
+ REGMAP_IRQ_REG(RK816_IRQ_RTC_PERIOD, 1, RK816_INT_STS_RTC_PERIOD),
+ REGMAP_IRQ_REG(RK816_IRQ_USB_OV, 1, RK816_INT_STS_USB_OV),
+
+ /* INT_STS3 IRQs */
+ REGMAP_IRQ_REG(RK816_IRQ_PLUG_IN, 2, RK816_INT_STS_PLUG_IN),
+ REGMAP_IRQ_REG(RK816_IRQ_PLUG_OUT, 2, RK816_INT_STS_PLUG_OUT),
+ REGMAP_IRQ_REG(RK816_IRQ_CHG_OK, 2, RK816_INT_STS_CHG_OK),
+ REGMAP_IRQ_REG(RK816_IRQ_CHG_TE, 2, RK816_INT_STS_CHG_TE),
+ REGMAP_IRQ_REG(RK816_IRQ_CHG_TS, 2, RK816_INT_STS_CHG_TS),
+ REGMAP_IRQ_REG(RK816_IRQ_CHG_CVTLIM, 2, RK816_INT_STS_CHG_CVTLIM),
+ REGMAP_IRQ_REG(RK816_IRQ_DISCHG_ILIM, 2, RK816_INT_STS_DISCHG_ILIM),
+};
+
static const struct regmap_irq rk818_irqs[] = {
/* INT_STS */
[RK818_IRQ_VOUT_LO] = {
@@ -482,6 +566,18 @@ static const struct regmap_irq_chip rk808_irq_chip = {
.init_ack_masked = true,
};
+static const struct regmap_irq_chip rk816_irq_chip = {
+ .name = "rk816",
+ .irqs = rk816_irqs,
+ .num_irqs = ARRAY_SIZE(rk816_irqs),
+ .num_regs = 3,
+ .get_irq_reg = rk816_get_irq_reg,
+ .status_base = RK816_INT_STS_REG1,
+ .mask_base = RK816_INT_STS_MSK_REG1,
+ .ack_base = RK816_INT_STS_REG1,
+ .init_ack_masked = true,
+};
+
static struct regmap_irq_chip rk817_irq_chip = {
.name = "rk817",
.irqs = rk817_irqs,
@@ -530,6 +626,7 @@ static int rk808_power_off(struct sys_off_data *data)
reg = RK817_SYS_CFG(3);
bit = DEV_OFF;
break;
+ case RK816_ID:
case RK818_ID:
reg = RK818_DEVCTRL_REG;
bit = DEV_OFF;
@@ -637,6 +734,13 @@ int rk8xx_probe(struct device *dev, int variant, unsigned int irq, struct regmap
cells = rk808s;
nr_cells = ARRAY_SIZE(rk808s);
break;
+ case RK816_ID:
+ rk808->regmap_irq_chip = &rk816_irq_chip;
+ pre_init_reg = rk816_pre_init_reg;
+ nr_pre_init_regs = ARRAY_SIZE(rk816_pre_init_reg);
+ cells = rk816s;
+ nr_cells = ARRAY_SIZE(rk816s);
+ break;
case RK818_ID:
rk808->regmap_irq_chip = &rk818_irq_chip;
pre_init_reg = rk818_pre_init_reg;
diff --git a/drivers/mfd/rk8xx-i2c.c b/drivers/mfd/rk8xx-i2c.c
index 75b5cf09d5a0..69a6b297d723 100644
--- a/drivers/mfd/rk8xx-i2c.c
+++ b/drivers/mfd/rk8xx-i2c.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Rockchip RK808/RK818 Core (I2C) driver
+ * Rockchip RK805/RK808/RK816/RK817/RK818 Core (I2C) driver
*
* Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
* Copyright (C) 2016 PHYTEC Messtechnik GmbH
@@ -49,6 +49,35 @@ static bool rk808_is_volatile_reg(struct device *dev, unsigned int reg)
return false;
}
+static bool rk816_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ /*
+ * Technically the ROUND_30s bit makes RTC_CTRL_REG volatile, but
+ * we don't use that feature. It's better to cache.
+ */
+
+ switch (reg) {
+ case RK808_SECONDS_REG ... RK808_WEEKS_REG:
+ case RK808_RTC_STATUS_REG:
+ case RK808_VB_MON_REG:
+ case RK808_THERMAL_REG:
+ case RK816_DCDC_EN_REG1:
+ case RK816_DCDC_EN_REG2:
+ case RK816_INT_STS_REG1:
+ case RK816_INT_STS_REG2:
+ case RK816_INT_STS_REG3:
+ case RK808_DEVCTRL_REG:
+ case RK816_SUP_STS_REG:
+ case RK816_GGSTS_REG:
+ case RK816_ZERO_CUR_ADC_REGH:
+ case RK816_ZERO_CUR_ADC_REGL:
+ case RK816_GASCNT_REG(0) ... RK816_BAT_VOL_REGL:
+ return true;
+ }
+
+ return false;
+}
+
static bool rk817_is_volatile_reg(struct device *dev, unsigned int reg)
{
/*
@@ -100,6 +129,14 @@ static const struct regmap_config rk808_regmap_config = {
.volatile_reg = rk808_is_volatile_reg,
};
+static const struct regmap_config rk816_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = RK816_DATA_REG(18),
+ .cache_type = REGCACHE_MAPLE,
+ .volatile_reg = rk816_is_volatile_reg,
+};
+
static const struct regmap_config rk817_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -123,6 +160,11 @@ static const struct rk8xx_i2c_platform_data rk809_data = {
.variant = RK809_ID,
};
+static const struct rk8xx_i2c_platform_data rk816_data = {
+ .regmap_cfg = &rk816_regmap_config,
+ .variant = RK816_ID,
+};
+
static const struct rk8xx_i2c_platform_data rk817_data = {
.regmap_cfg = &rk817_regmap_config,
.variant = RK817_ID,
@@ -161,6 +203,7 @@ static const struct of_device_id rk8xx_i2c_of_match[] = {
{ .compatible = "rockchip,rk805", .data = &rk805_data },
{ .compatible = "rockchip,rk808", .data = &rk808_data },
{ .compatible = "rockchip,rk809", .data = &rk809_data },
+ { .compatible = "rockchip,rk816", .data = &rk816_data },
{ .compatible = "rockchip,rk817", .data = &rk817_data },
{ .compatible = "rockchip,rk818", .data = &rk818_data },
{ },
diff --git a/drivers/mfd/rohm-bd71828.c b/drivers/mfd/rohm-bd71828.c
index 2f3826c7eef4..5b4290f116fc 100644
--- a/drivers/mfd/rohm-bd71828.c
+++ b/drivers/mfd/rohm-bd71828.c
@@ -464,6 +464,27 @@ static int set_clk_mode(struct device *dev, struct regmap *regmap,
OUT32K_MODE_CMOS);
}
+static struct i2c_client *bd71828_dev;
+static void bd71828_power_off(void)
+{
+ while (true) {
+ s32 val;
+
+ /* We are not allowed to sleep, so do not use regmap involving mutexes here. */
+ val = i2c_smbus_read_byte_data(bd71828_dev, BD71828_REG_PS_CTRL_1);
+ if (val >= 0)
+ i2c_smbus_write_byte_data(bd71828_dev,
+ BD71828_REG_PS_CTRL_1,
+ BD71828_MASK_STATE_HBNT | (u8)val);
+ mdelay(500);
+ }
+}
+
+static void bd71828_remove_poweroff(void *data)
+{
+ pm_power_off = NULL;
+}
+
static int bd71828_i2c_probe(struct i2c_client *i2c)
{
struct regmap_irq_chip_data *irq_data;
@@ -542,7 +563,20 @@ static int bd71828_i2c_probe(struct i2c_client *i2c)
ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_AUTO, mfd, cells,
NULL, 0, regmap_irq_get_domain(irq_data));
if (ret)
- dev_err_probe(&i2c->dev, ret, "Failed to create subdevices\n");
+ return dev_err_probe(&i2c->dev, ret, "Failed to create subdevices\n");
+
+ if (of_device_is_system_power_controller(i2c->dev.of_node) &&
+ chip_type == ROHM_CHIP_TYPE_BD71828) {
+ if (!pm_power_off) {
+ bd71828_dev = i2c;
+ pm_power_off = bd71828_power_off;
+ ret = devm_add_action_or_reset(&i2c->dev,
+ bd71828_remove_poweroff,
+ NULL);
+ } else {
+ dev_warn(&i2c->dev, "Poweroff callback already assigned\n");
+ }
+ }
return ret;
}
diff --git a/drivers/mfd/rsmu_i2c.c b/drivers/mfd/rsmu_i2c.c
index 5711e512b6a2..cba64f107a2f 100644
--- a/drivers/mfd/rsmu_i2c.c
+++ b/drivers/mfd/rsmu_i2c.c
@@ -32,6 +32,8 @@
#define RSMU_SABRE_PAGE_ADDR 0x7F
#define RSMU_SABRE_PAGE_WINDOW 128
+typedef int (*rsmu_rw_device)(struct rsmu_ddata *rsmu, u8 reg, u8 *buf, u8 bytes);
+
static const struct regmap_range_cfg rsmu_sabre_range_cfg[] = {
{
.range_min = 0,
@@ -54,7 +56,28 @@ static bool rsmu_sabre_volatile_reg(struct device *dev, unsigned int reg)
}
}
-static int rsmu_read_device(struct rsmu_ddata *rsmu, u8 reg, u8 *buf, u16 bytes)
+static int rsmu_smbus_i2c_write_device(struct rsmu_ddata *rsmu, u8 reg, u8 *buf, u8 bytes)
+{
+ struct i2c_client *client = to_i2c_client(rsmu->dev);
+
+ return i2c_smbus_write_i2c_block_data(client, reg, bytes, buf);
+}
+
+static int rsmu_smbus_i2c_read_device(struct rsmu_ddata *rsmu, u8 reg, u8 *buf, u8 bytes)
+{
+ struct i2c_client *client = to_i2c_client(rsmu->dev);
+ int ret;
+
+ ret = i2c_smbus_read_i2c_block_data(client, reg, bytes, buf);
+ if (ret == bytes)
+ return 0;
+ else if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+}
+
+static int rsmu_i2c_read_device(struct rsmu_ddata *rsmu, u8 reg, u8 *buf, u8 bytes)
{
struct i2c_client *client = to_i2c_client(rsmu->dev);
struct i2c_msg msg[2];
@@ -84,10 +107,11 @@ static int rsmu_read_device(struct rsmu_ddata *rsmu, u8 reg, u8 *buf, u16 bytes)
return 0;
}
-static int rsmu_write_device(struct rsmu_ddata *rsmu, u8 reg, u8 *buf, u16 bytes)
+static int rsmu_i2c_write_device(struct rsmu_ddata *rsmu, u8 reg, u8 *buf, u8 bytes)
{
struct i2c_client *client = to_i2c_client(rsmu->dev);
- u8 msg[RSMU_MAX_WRITE_COUNT + 1]; /* 1 Byte added for the device register */
+ /* we add 1 byte for device register */
+ u8 msg[RSMU_MAX_WRITE_COUNT + 1];
int cnt;
if (bytes > RSMU_MAX_WRITE_COUNT)
@@ -107,7 +131,8 @@ static int rsmu_write_device(struct rsmu_ddata *rsmu, u8 reg, u8 *buf, u16 bytes
return 0;
}
-static int rsmu_write_page_register(struct rsmu_ddata *rsmu, u32 reg)
+static int rsmu_write_page_register(struct rsmu_ddata *rsmu, u32 reg,
+ rsmu_rw_device rsmu_write_device)
{
u32 page = reg & RSMU_CM_PAGE_MASK;
u8 buf[4];
@@ -136,35 +161,35 @@ static int rsmu_write_page_register(struct rsmu_ddata *rsmu, u32 reg)
return err;
}
-static int rsmu_reg_read(void *context, unsigned int reg, unsigned int *val)
+static int rsmu_i2c_reg_read(void *context, unsigned int reg, unsigned int *val)
{
struct rsmu_ddata *rsmu = i2c_get_clientdata((struct i2c_client *)context);
u8 addr = (u8)(reg & RSMU_CM_ADDRESS_MASK);
int err;
- err = rsmu_write_page_register(rsmu, reg);
+ err = rsmu_write_page_register(rsmu, reg, rsmu_i2c_write_device);
if (err)
return err;
- err = rsmu_read_device(rsmu, addr, (u8 *)val, 1);
+ err = rsmu_i2c_read_device(rsmu, addr, (u8 *)val, 1);
if (err)
dev_err(rsmu->dev, "Failed to read offset address 0x%x\n", addr);
return err;
}
-static int rsmu_reg_write(void *context, unsigned int reg, unsigned int val)
+static int rsmu_i2c_reg_write(void *context, unsigned int reg, unsigned int val)
{
struct rsmu_ddata *rsmu = i2c_get_clientdata((struct i2c_client *)context);
u8 addr = (u8)(reg & RSMU_CM_ADDRESS_MASK);
u8 data = (u8)val;
int err;
- err = rsmu_write_page_register(rsmu, reg);
+ err = rsmu_write_page_register(rsmu, reg, rsmu_i2c_write_device);
if (err)
return err;
- err = rsmu_write_device(rsmu, addr, &data, 1);
+ err = rsmu_i2c_write_device(rsmu, addr, &data, 1);
if (err)
dev_err(rsmu->dev,
"Failed to write offset address 0x%x\n", addr);
@@ -172,12 +197,57 @@ static int rsmu_reg_write(void *context, unsigned int reg, unsigned int val)
return err;
}
-static const struct regmap_config rsmu_cm_regmap_config = {
+static int rsmu_smbus_i2c_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct rsmu_ddata *rsmu = i2c_get_clientdata((struct i2c_client *)context);
+ u8 addr = (u8)(reg & RSMU_CM_ADDRESS_MASK);
+ int err;
+
+ err = rsmu_write_page_register(rsmu, reg, rsmu_smbus_i2c_write_device);
+ if (err)
+ return err;
+
+ err = rsmu_smbus_i2c_read_device(rsmu, addr, (u8 *)val, 1);
+ if (err)
+ dev_err(rsmu->dev, "Failed to read offset address 0x%x\n", addr);
+
+ return err;
+}
+
+static int rsmu_smbus_i2c_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct rsmu_ddata *rsmu = i2c_get_clientdata((struct i2c_client *)context);
+ u8 addr = (u8)(reg & RSMU_CM_ADDRESS_MASK);
+ u8 data = (u8)val;
+ int err;
+
+ err = rsmu_write_page_register(rsmu, reg, rsmu_smbus_i2c_write_device);
+ if (err)
+ return err;
+
+ err = rsmu_smbus_i2c_write_device(rsmu, addr, &data, 1);
+ if (err)
+ dev_err(rsmu->dev,
+ "Failed to write offset address 0x%x\n", addr);
+
+ return err;
+}
+
+static const struct regmap_config rsmu_i2c_cm_regmap_config = {
.reg_bits = 32,
.val_bits = 8,
.max_register = 0x20120000,
- .reg_read = rsmu_reg_read,
- .reg_write = rsmu_reg_write,
+ .reg_read = rsmu_i2c_reg_read,
+ .reg_write = rsmu_i2c_reg_write,
+ .cache_type = REGCACHE_NONE,
+};
+
+static const struct regmap_config rsmu_smbus_i2c_cm_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 8,
+ .max_register = 0x20120000,
+ .reg_read = rsmu_smbus_i2c_reg_read,
+ .reg_write = rsmu_smbus_i2c_reg_write,
.cache_type = REGCACHE_NONE,
};
@@ -219,7 +289,15 @@ static int rsmu_i2c_probe(struct i2c_client *client)
switch (rsmu->type) {
case RSMU_CM:
- cfg = &rsmu_cm_regmap_config;
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ cfg = &rsmu_i2c_cm_regmap_config;
+ } else if (i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_I2C_BLOCK)) {
+ cfg = &rsmu_smbus_i2c_cm_regmap_config;
+ } else {
+ dev_err(rsmu->dev, "Unsupported i2c adapter\n");
+ return -ENOTSUPP;
+ }
break;
case RSMU_SABRE:
cfg = &rsmu_sabre_regmap_config;
@@ -236,6 +314,7 @@ static int rsmu_i2c_probe(struct i2c_client *client)
rsmu->regmap = devm_regmap_init(&client->dev, NULL, client, cfg);
else
rsmu->regmap = devm_regmap_init_i2c(client, cfg);
+
if (IS_ERR(rsmu->regmap)) {
ret = PTR_ERR(rsmu->regmap);
dev_err(rsmu->dev, "Failed to allocate register map: %d\n", ret);
diff --git a/drivers/mfd/rsmu_spi.c b/drivers/mfd/rsmu_spi.c
index ca0a1202c3ce..39d9be1e141f 100644
--- a/drivers/mfd/rsmu_spi.c
+++ b/drivers/mfd/rsmu_spi.c
@@ -106,10 +106,10 @@ static int rsmu_write_page_register(struct rsmu_ddata *rsmu, u32 reg)
return 0;
page_reg = RSMU_CM_PAGE_ADDR;
page = reg & RSMU_PAGE_MASK;
- buf[0] = (u8)(page & 0xff);
- buf[1] = (u8)((page >> 8) & 0xff);
- buf[2] = (u8)((page >> 16) & 0xff);
- buf[3] = (u8)((page >> 24) & 0xff);
+ buf[0] = (u8)(page & 0xFF);
+ buf[1] = (u8)((page >> 8) & 0xFF);
+ buf[2] = (u8)((page >> 16) & 0xFF);
+ buf[3] = (u8)((page >> 24) & 0xFF);
bytes = 4;
break;
case RSMU_SABRE:
diff --git a/drivers/mfd/ssbi.c b/drivers/mfd/ssbi.c
index b0b0be483dbf..f849f2d34ec7 100644
--- a/drivers/mfd/ssbi.c
+++ b/drivers/mfd/ssbi.c
@@ -64,7 +64,6 @@ enum ssbi_controller_type {
};
struct ssbi {
- struct device *slave;
void __iomem *base;
spinlock_t lock;
enum ssbi_controller_type controller_type;
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index 07e5aa10a146..a41e9a3e2064 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -765,7 +765,6 @@ static int timb_probe(struct pci_dev *dev,
default:
dev_err(&dev->dev, "Unknown IP setup: %d.%d.%d\n",
priv->fw.major, priv->fw.minor, ip_setup);
- err = -ENODEV;
goto err_mfd;
}
diff --git a/drivers/mfd/tps6594-core.c b/drivers/mfd/tps6594-core.c
index 783ee59901e8..c59f3d7e32b0 100644
--- a/drivers/mfd/tps6594-core.c
+++ b/drivers/mfd/tps6594-core.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Core functions for TI TPS6594/TPS6593/LP8764 PMICs
+ * Core functions for TI TPS65224/TPS6594/TPS6593/LP8764 PMICs
*
* Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/
*/
@@ -278,16 +278,159 @@ static const unsigned int tps6594_irq_reg[] = {
TPS6594_REG_RTC_STATUS,
};
+/* TPS65224 Resources */
+
+static const struct resource tps65224_regulator_resources[] = {
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_BUCK1_UVOV, TPS65224_IRQ_NAME_BUCK1_UVOV),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_BUCK2_UVOV, TPS65224_IRQ_NAME_BUCK2_UVOV),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_BUCK3_UVOV, TPS65224_IRQ_NAME_BUCK3_UVOV),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_BUCK4_UVOV, TPS65224_IRQ_NAME_BUCK4_UVOV),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_LDO1_UVOV, TPS65224_IRQ_NAME_LDO1_UVOV),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_LDO2_UVOV, TPS65224_IRQ_NAME_LDO2_UVOV),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_LDO3_UVOV, TPS65224_IRQ_NAME_LDO3_UVOV),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_VCCA_UVOV, TPS65224_IRQ_NAME_VCCA_UVOV),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_VMON1_UVOV, TPS65224_IRQ_NAME_VMON1_UVOV),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_VMON2_UVOV, TPS65224_IRQ_NAME_VMON2_UVOV),
+};
+
+static const struct resource tps65224_pinctrl_resources[] = {
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_GPIO1, TPS65224_IRQ_NAME_GPIO1),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_GPIO2, TPS65224_IRQ_NAME_GPIO2),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_GPIO3, TPS65224_IRQ_NAME_GPIO3),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_GPIO4, TPS65224_IRQ_NAME_GPIO4),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_GPIO5, TPS65224_IRQ_NAME_GPIO5),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_GPIO6, TPS65224_IRQ_NAME_GPIO6),
+};
+
+static const struct resource tps65224_pfsm_resources[] = {
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_VSENSE, TPS65224_IRQ_NAME_VSENSE),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_ENABLE, TPS65224_IRQ_NAME_ENABLE),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_PB_SHORT, TPS65224_IRQ_NAME_PB_SHORT),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_FSD, TPS65224_IRQ_NAME_FSD),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_SOFT_REBOOT, TPS65224_IRQ_NAME_SOFT_REBOOT),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_BIST_PASS, TPS65224_IRQ_NAME_BIST_PASS),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_EXT_CLK, TPS65224_IRQ_NAME_EXT_CLK),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_REG_UNLOCK, TPS65224_IRQ_NAME_REG_UNLOCK),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_TWARN, TPS65224_IRQ_NAME_TWARN),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_PB_LONG, TPS65224_IRQ_NAME_PB_LONG),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_PB_FALL, TPS65224_IRQ_NAME_PB_FALL),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_PB_RISE, TPS65224_IRQ_NAME_PB_RISE),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_TSD_ORD, TPS65224_IRQ_NAME_TSD_ORD),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_BIST_FAIL, TPS65224_IRQ_NAME_BIST_FAIL),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_REG_CRC_ERR, TPS65224_IRQ_NAME_REG_CRC_ERR),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_RECOV_CNT, TPS65224_IRQ_NAME_RECOV_CNT),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_TSD_IMM, TPS65224_IRQ_NAME_TSD_IMM),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_VCCA_OVP, TPS65224_IRQ_NAME_VCCA_OVP),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_PFSM_ERR, TPS65224_IRQ_NAME_PFSM_ERR),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_BG_XMON, TPS65224_IRQ_NAME_BG_XMON),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_IMM_SHUTDOWN, TPS65224_IRQ_NAME_IMM_SHUTDOWN),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_ORD_SHUTDOWN, TPS65224_IRQ_NAME_ORD_SHUTDOWN),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_MCU_PWR_ERR, TPS65224_IRQ_NAME_MCU_PWR_ERR),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_SOC_PWR_ERR, TPS65224_IRQ_NAME_SOC_PWR_ERR),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_COMM_ERR, TPS65224_IRQ_NAME_COMM_ERR),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_I2C2_ERR, TPS65224_IRQ_NAME_I2C2_ERR),
+};
+
+static const struct resource tps65224_adc_resources[] = {
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_ADC_CONV_READY, TPS65224_IRQ_NAME_ADC_CONV_READY),
+};
+
+static const struct mfd_cell tps65224_common_cells[] = {
+ MFD_CELL_RES("tps65224-adc", tps65224_adc_resources),
+ MFD_CELL_RES("tps6594-pfsm", tps65224_pfsm_resources),
+ MFD_CELL_RES("tps6594-pinctrl", tps65224_pinctrl_resources),
+ MFD_CELL_RES("tps6594-regulator", tps65224_regulator_resources),
+};
+
+static const struct regmap_irq tps65224_irqs[] = {
+ /* INT_BUCK register */
+ REGMAP_IRQ_REG(TPS65224_IRQ_BUCK1_UVOV, 0, TPS65224_BIT_BUCK1_UVOV_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_BUCK2_UVOV, 0, TPS65224_BIT_BUCK2_UVOV_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_BUCK3_UVOV, 0, TPS65224_BIT_BUCK3_UVOV_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_BUCK4_UVOV, 0, TPS65224_BIT_BUCK4_UVOV_INT),
+
+ /* INT_VMON_LDO register */
+ REGMAP_IRQ_REG(TPS65224_IRQ_LDO1_UVOV, 1, TPS65224_BIT_LDO1_UVOV_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_LDO2_UVOV, 1, TPS65224_BIT_LDO2_UVOV_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_LDO3_UVOV, 1, TPS65224_BIT_LDO3_UVOV_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_VCCA_UVOV, 1, TPS65224_BIT_VCCA_UVOV_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_VMON1_UVOV, 1, TPS65224_BIT_VMON1_UVOV_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_VMON2_UVOV, 1, TPS65224_BIT_VMON2_UVOV_INT),
+
+ /* INT_GPIO register */
+ REGMAP_IRQ_REG(TPS65224_IRQ_GPIO1, 2, TPS65224_BIT_GPIO1_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_GPIO2, 2, TPS65224_BIT_GPIO2_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_GPIO3, 2, TPS65224_BIT_GPIO3_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_GPIO4, 2, TPS65224_BIT_GPIO4_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_GPIO5, 2, TPS65224_BIT_GPIO5_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_GPIO6, 2, TPS65224_BIT_GPIO6_INT),
+
+ /* INT_STARTUP register */
+ REGMAP_IRQ_REG(TPS65224_IRQ_VSENSE, 3, TPS65224_BIT_VSENSE_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_ENABLE, 3, TPS6594_BIT_ENABLE_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_PB_SHORT, 3, TPS65224_BIT_PB_SHORT_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_FSD, 3, TPS6594_BIT_FSD_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_SOFT_REBOOT, 3, TPS6594_BIT_SOFT_REBOOT_INT),
+
+ /* INT_MISC register */
+ REGMAP_IRQ_REG(TPS65224_IRQ_BIST_PASS, 4, TPS6594_BIT_BIST_PASS_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_EXT_CLK, 4, TPS6594_BIT_EXT_CLK_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_REG_UNLOCK, 4, TPS65224_BIT_REG_UNLOCK_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_TWARN, 4, TPS6594_BIT_TWARN_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_PB_LONG, 4, TPS65224_BIT_PB_LONG_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_PB_FALL, 4, TPS65224_BIT_PB_FALL_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_PB_RISE, 4, TPS65224_BIT_PB_RISE_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_ADC_CONV_READY, 4, TPS65224_BIT_ADC_CONV_READY_INT),
+
+ /* INT_MODERATE_ERR register */
+ REGMAP_IRQ_REG(TPS65224_IRQ_TSD_ORD, 5, TPS6594_BIT_TSD_ORD_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_BIST_FAIL, 5, TPS6594_BIT_BIST_FAIL_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_REG_CRC_ERR, 5, TPS6594_BIT_REG_CRC_ERR_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_RECOV_CNT, 5, TPS6594_BIT_RECOV_CNT_INT),
+
+ /* INT_SEVERE_ERR register */
+ REGMAP_IRQ_REG(TPS65224_IRQ_TSD_IMM, 6, TPS6594_BIT_TSD_IMM_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_VCCA_OVP, 6, TPS6594_BIT_VCCA_OVP_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_PFSM_ERR, 6, TPS6594_BIT_PFSM_ERR_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_BG_XMON, 6, TPS65224_BIT_BG_XMON_INT),
+
+ /* INT_FSM_ERR register */
+ REGMAP_IRQ_REG(TPS65224_IRQ_IMM_SHUTDOWN, 7, TPS6594_BIT_IMM_SHUTDOWN_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_ORD_SHUTDOWN, 7, TPS6594_BIT_ORD_SHUTDOWN_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_MCU_PWR_ERR, 7, TPS6594_BIT_MCU_PWR_ERR_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_SOC_PWR_ERR, 7, TPS6594_BIT_SOC_PWR_ERR_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_COMM_ERR, 7, TPS6594_BIT_COMM_ERR_INT),
+ REGMAP_IRQ_REG(TPS65224_IRQ_I2C2_ERR, 7, TPS65224_BIT_I2C2_ERR_INT),
+};
+
+static const unsigned int tps65224_irq_reg[] = {
+ TPS6594_REG_INT_BUCK,
+ TPS6594_REG_INT_LDO_VMON,
+ TPS6594_REG_INT_GPIO,
+ TPS6594_REG_INT_STARTUP,
+ TPS6594_REG_INT_MISC,
+ TPS6594_REG_INT_MODERATE_ERR,
+ TPS6594_REG_INT_SEVERE_ERR,
+ TPS6594_REG_INT_FSM_ERR,
+};
+
static inline unsigned int tps6594_get_irq_reg(struct regmap_irq_chip_data *data,
unsigned int base, int index)
{
return tps6594_irq_reg[index];
};
+static inline unsigned int tps65224_get_irq_reg(struct regmap_irq_chip_data *data,
+ unsigned int base, int index)
+{
+ return tps65224_irq_reg[index];
+};
+
static int tps6594_handle_post_irq(void *irq_drv_data)
{
struct tps6594 *tps = irq_drv_data;
int ret = 0;
+ unsigned int regmap_reg, mask_val;
/*
* When CRC is enabled, writing to a read-only bit triggers an error,
@@ -299,10 +442,17 @@ static int tps6594_handle_post_irq(void *irq_drv_data)
* COMM_ADR_ERR_INT bit set. Clear immediately this bit to avoid raising
* a new interrupt.
*/
- if (tps->use_crc)
- ret = regmap_write_bits(tps->regmap, TPS6594_REG_INT_COMM_ERR,
- TPS6594_BIT_COMM_ADR_ERR_INT,
- TPS6594_BIT_COMM_ADR_ERR_INT);
+ if (tps->use_crc) {
+ if (tps->chip_id == TPS65224) {
+ regmap_reg = TPS6594_REG_INT_FSM_ERR;
+ mask_val = TPS6594_BIT_COMM_ERR_INT;
+ } else {
+ regmap_reg = TPS6594_REG_INT_COMM_ERR;
+ mask_val = TPS6594_BIT_COMM_ADR_ERR_INT;
+ }
+
+ ret = regmap_write_bits(tps->regmap, regmap_reg, mask_val, mask_val);
+ }
return ret;
};
@@ -319,24 +469,58 @@ static struct regmap_irq_chip tps6594_irq_chip = {
.handle_post_irq = tps6594_handle_post_irq,
};
-bool tps6594_is_volatile_reg(struct device *dev, unsigned int reg)
-{
- return (reg >= TPS6594_REG_INT_TOP && reg <= TPS6594_REG_STAT_READBACK_ERR) ||
- reg == TPS6594_REG_RTC_STATUS;
-}
-EXPORT_SYMBOL_GPL(tps6594_is_volatile_reg);
+static struct regmap_irq_chip tps65224_irq_chip = {
+ .ack_base = TPS6594_REG_INT_BUCK,
+ .ack_invert = 1,
+ .clear_ack = 1,
+ .init_ack_masked = 1,
+ .num_regs = ARRAY_SIZE(tps65224_irq_reg),
+ .irqs = tps65224_irqs,
+ .num_irqs = ARRAY_SIZE(tps65224_irqs),
+ .get_irq_reg = tps65224_get_irq_reg,
+ .handle_post_irq = tps6594_handle_post_irq,
+};
+
+static const struct regmap_range tps6594_volatile_ranges[] = {
+ regmap_reg_range(TPS6594_REG_INT_TOP, TPS6594_REG_STAT_READBACK_ERR),
+ regmap_reg_range(TPS6594_REG_RTC_STATUS, TPS6594_REG_RTC_STATUS),
+};
+
+const struct regmap_access_table tps6594_volatile_table = {
+ .yes_ranges = tps6594_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(tps6594_volatile_ranges),
+};
+EXPORT_SYMBOL_GPL(tps6594_volatile_table);
+
+static const struct regmap_range tps65224_volatile_ranges[] = {
+ regmap_reg_range(TPS6594_REG_INT_TOP, TPS6594_REG_STAT_SEVERE_ERR),
+};
+
+const struct regmap_access_table tps65224_volatile_table = {
+ .yes_ranges = tps65224_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(tps65224_volatile_ranges),
+};
+EXPORT_SYMBOL_GPL(tps65224_volatile_table);
static int tps6594_check_crc_mode(struct tps6594 *tps, bool primary_pmic)
{
int ret;
+ unsigned int regmap_reg, mask_val;
+
+ if (tps->chip_id == TPS65224) {
+ regmap_reg = TPS6594_REG_CONFIG_2;
+ mask_val = TPS65224_BIT_I2C1_SPI_CRC_EN;
+ } else {
+ regmap_reg = TPS6594_REG_SERIAL_IF_CONFIG;
+ mask_val = TPS6594_BIT_I2C1_SPI_CRC_EN;
+ };
/*
* Check if CRC is enabled.
* Once CRC is enabled, it can't be disabled until next power cycle.
*/
tps->use_crc = true;
- ret = regmap_test_bits(tps->regmap, TPS6594_REG_SERIAL_IF_CONFIG,
- TPS6594_BIT_I2C1_SPI_CRC_EN);
+ ret = regmap_test_bits(tps->regmap, regmap_reg, mask_val);
if (ret == 0) {
ret = -EIO;
} else if (ret > 0) {
@@ -351,6 +535,15 @@ static int tps6594_check_crc_mode(struct tps6594 *tps, bool primary_pmic)
static int tps6594_set_crc_feature(struct tps6594 *tps)
{
int ret;
+ unsigned int regmap_reg, mask_val;
+
+ if (tps->chip_id == TPS65224) {
+ regmap_reg = TPS6594_REG_CONFIG_2;
+ mask_val = TPS65224_BIT_I2C1_SPI_CRC_EN;
+ } else {
+ regmap_reg = TPS6594_REG_FSM_I2C_TRIGGERS;
+ mask_val = TPS6594_BIT_TRIGGER_I2C(2);
+ }
ret = tps6594_check_crc_mode(tps, true);
if (ret) {
@@ -359,8 +552,7 @@ static int tps6594_set_crc_feature(struct tps6594 *tps)
* on primary PMIC.
*/
tps->use_crc = false;
- ret = regmap_write_bits(tps->regmap, TPS6594_REG_FSM_I2C_TRIGGERS,
- TPS6594_BIT_TRIGGER_I2C(2), TPS6594_BIT_TRIGGER_I2C(2));
+ ret = regmap_write_bits(tps->regmap, regmap_reg, mask_val, mask_val);
if (ret)
return ret;
@@ -416,6 +608,9 @@ int tps6594_device_init(struct tps6594 *tps, bool enable_crc)
{
struct device *dev = tps->dev;
int ret;
+ struct regmap_irq_chip *irq_chip;
+ const struct mfd_cell *cells;
+ int n_cells;
if (enable_crc) {
ret = tps6594_enable_crc(tps);
@@ -429,26 +624,35 @@ int tps6594_device_init(struct tps6594 *tps, bool enable_crc)
if (ret)
return dev_err_probe(dev, ret, "Failed to set PMIC state\n");
- tps6594_irq_chip.irq_drv_data = tps;
- tps6594_irq_chip.name = devm_kasprintf(dev, GFP_KERNEL, "%s-%ld-0x%02x",
- dev->driver->name, tps->chip_id, tps->reg);
+ if (tps->chip_id == TPS65224) {
+ irq_chip = &tps65224_irq_chip;
+ n_cells = ARRAY_SIZE(tps65224_common_cells);
+ cells = tps65224_common_cells;
+ } else {
+ irq_chip = &tps6594_irq_chip;
+ n_cells = ARRAY_SIZE(tps6594_common_cells);
+ cells = tps6594_common_cells;
+ }
+
+ irq_chip->irq_drv_data = tps;
+ irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-%ld-0x%02x",
+ dev->driver->name, tps->chip_id, tps->reg);
- if (!tps6594_irq_chip.name)
+ if (!irq_chip->name)
return -ENOMEM;
ret = devm_regmap_add_irq_chip(dev, tps->regmap, tps->irq, IRQF_SHARED | IRQF_ONESHOT,
- 0, &tps6594_irq_chip, &tps->irq_data);
+ 0, irq_chip, &tps->irq_data);
if (ret)
return dev_err_probe(dev, ret, "Failed to add regmap IRQ\n");
- ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, tps6594_common_cells,
- ARRAY_SIZE(tps6594_common_cells), NULL, 0,
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, cells, n_cells, NULL, 0,
regmap_irq_get_domain(tps->irq_data));
if (ret)
return dev_err_probe(dev, ret, "Failed to add common child devices\n");
- /* No RTC for LP8764 */
- if (tps->chip_id != LP8764) {
+ /* No RTC for LP8764 and TPS65224 */
+ if (tps->chip_id != LP8764 && tps->chip_id != TPS65224) {
ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, tps6594_rtc_cells,
ARRAY_SIZE(tps6594_rtc_cells), NULL, 0,
regmap_irq_get_domain(tps->irq_data));
@@ -461,5 +665,6 @@ int tps6594_device_init(struct tps6594 *tps, bool enable_crc)
EXPORT_SYMBOL_GPL(tps6594_device_init);
MODULE_AUTHOR("Julien Panis <jpanis@baylibre.com>");
+MODULE_AUTHOR("Bhargav Raviprakash <bhargav.r@ltts.com");
MODULE_DESCRIPTION("TPS6594 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/tps6594-i2c.c b/drivers/mfd/tps6594-i2c.c
index 899c88c0fe77..4ab91c34d9fb 100644
--- a/drivers/mfd/tps6594-i2c.c
+++ b/drivers/mfd/tps6594-i2c.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * I2C access driver for TI TPS6594/TPS6593/LP8764 PMICs
+ * I2C access driver for TI TPS65224/TPS6594/TPS6593/LP8764 PMICs
*
* Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/
*/
@@ -183,11 +183,11 @@ static int tps6594_i2c_write(void *context, const void *data, size_t count)
return ret;
}
-static const struct regmap_config tps6594_i2c_regmap_config = {
+static struct regmap_config tps6594_i2c_regmap_config = {
.reg_bits = 16,
.val_bits = 8,
.max_register = TPS6594_REG_DWD_FAIL_CNT_REG,
- .volatile_reg = tps6594_is_volatile_reg,
+ .volatile_table = &tps6594_volatile_table,
.read = tps6594_i2c_read,
.write = tps6594_i2c_write,
};
@@ -196,6 +196,7 @@ static const struct of_device_id tps6594_i2c_of_match_table[] = {
{ .compatible = "ti,tps6594-q1", .data = (void *)TPS6594, },
{ .compatible = "ti,tps6593-q1", .data = (void *)TPS6593, },
{ .compatible = "ti,lp8764-q1", .data = (void *)LP8764, },
+ { .compatible = "ti,tps65224-q1", .data = (void *)TPS65224, },
{}
};
MODULE_DEVICE_TABLE(of, tps6594_i2c_of_match_table);
@@ -216,15 +217,18 @@ static int tps6594_i2c_probe(struct i2c_client *client)
tps->reg = client->addr;
tps->irq = client->irq;
- tps->regmap = devm_regmap_init(dev, NULL, client, &tps6594_i2c_regmap_config);
- if (IS_ERR(tps->regmap))
- return dev_err_probe(dev, PTR_ERR(tps->regmap), "Failed to init regmap\n");
-
match = of_match_device(tps6594_i2c_of_match_table, dev);
if (!match)
return dev_err_probe(dev, -EINVAL, "Failed to find matching chip ID\n");
tps->chip_id = (unsigned long)match->data;
+ if (tps->chip_id == TPS65224)
+ tps6594_i2c_regmap_config.volatile_table = &tps65224_volatile_table;
+
+ tps->regmap = devm_regmap_init(dev, NULL, client, &tps6594_i2c_regmap_config);
+ if (IS_ERR(tps->regmap))
+ return dev_err_probe(dev, PTR_ERR(tps->regmap), "Failed to init regmap\n");
+
crc8_populate_msb(tps6594_i2c_crc_table, TPS6594_CRC8_POLYNOMIAL);
return tps6594_device_init(tps, enable_crc);
@@ -240,5 +244,5 @@ static struct i2c_driver tps6594_i2c_driver = {
module_i2c_driver(tps6594_i2c_driver);
MODULE_AUTHOR("Julien Panis <jpanis@baylibre.com>");
-MODULE_DESCRIPTION("TPS6594 I2C Interface Driver");
+MODULE_DESCRIPTION("I2C Interface Driver for TPS65224, TPS6594/3, and LP8764");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/tps6594-spi.c b/drivers/mfd/tps6594-spi.c
index 24b72847e3f5..6ebccb79f0cc 100644
--- a/drivers/mfd/tps6594-spi.c
+++ b/drivers/mfd/tps6594-spi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * SPI access driver for TI TPS6594/TPS6593/LP8764 PMICs
+ * SPI access driver for TI TPS65224/TPS6594/TPS6593/LP8764 PMICs
*
* Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/
*/
@@ -66,11 +66,11 @@ static int tps6594_spi_reg_write(void *context, unsigned int reg, unsigned int v
return spi_write(spi, buf, count);
}
-static const struct regmap_config tps6594_spi_regmap_config = {
+static struct regmap_config tps6594_spi_regmap_config = {
.reg_bits = 16,
.val_bits = 8,
.max_register = TPS6594_REG_DWD_FAIL_CNT_REG,
- .volatile_reg = tps6594_is_volatile_reg,
+ .volatile_table = &tps6594_volatile_table,
.reg_read = tps6594_spi_reg_read,
.reg_write = tps6594_spi_reg_write,
.use_single_read = true,
@@ -81,6 +81,7 @@ static const struct of_device_id tps6594_spi_of_match_table[] = {
{ .compatible = "ti,tps6594-q1", .data = (void *)TPS6594, },
{ .compatible = "ti,tps6593-q1", .data = (void *)TPS6593, },
{ .compatible = "ti,lp8764-q1", .data = (void *)LP8764, },
+ { .compatible = "ti,tps65224-q1", .data = (void *)TPS65224, },
{}
};
MODULE_DEVICE_TABLE(of, tps6594_spi_of_match_table);
@@ -101,15 +102,18 @@ static int tps6594_spi_probe(struct spi_device *spi)
tps->reg = spi_get_chipselect(spi, 0);
tps->irq = spi->irq;
- tps->regmap = devm_regmap_init(dev, NULL, spi, &tps6594_spi_regmap_config);
- if (IS_ERR(tps->regmap))
- return dev_err_probe(dev, PTR_ERR(tps->regmap), "Failed to init regmap\n");
-
match = of_match_device(tps6594_spi_of_match_table, dev);
if (!match)
return dev_err_probe(dev, -EINVAL, "Failed to find matching chip ID\n");
tps->chip_id = (unsigned long)match->data;
+ if (tps->chip_id == TPS65224)
+ tps6594_spi_regmap_config.volatile_table = &tps65224_volatile_table;
+
+ tps->regmap = devm_regmap_init(dev, NULL, spi, &tps6594_spi_regmap_config);
+ if (IS_ERR(tps->regmap))
+ return dev_err_probe(dev, PTR_ERR(tps->regmap), "Failed to init regmap\n");
+
crc8_populate_msb(tps6594_spi_crc_table, TPS6594_CRC8_POLYNOMIAL);
return tps6594_device_init(tps, enable_crc);
@@ -125,5 +129,5 @@ static struct spi_driver tps6594_spi_driver = {
module_spi_driver(tps6594_spi_driver);
MODULE_AUTHOR("Julien Panis <jpanis@baylibre.com>");
-MODULE_DESCRIPTION("TPS6594 SPI Interface Driver");
+MODULE_DESCRIPTION("SPI Interface Driver for TPS65224, TPS6594/3, and LP8764");
MODULE_LICENSE("GPL");
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 1a64364700eb..0ad2ff9065aa 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -1002,7 +1002,7 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
} else {
pcr->card_removed |= SD_EXIST;
pcr->card_inserted &= ~SD_EXIST;
- if (PCI_PID(pcr) == PID_5261) {
+ if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) {
rtsx_pci_write_register(pcr, RTS5261_FW_STATUS,
RTS5261_EXPRESS_LINK_FAIL_MASK, 0);
pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 572333ead5fb..4bd4f32bcdab 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -758,15 +758,6 @@ static int at24_probe(struct i2c_client *client)
}
pm_runtime_enable(dev);
- at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
- if (IS_ERR(at24->nvmem)) {
- pm_runtime_disable(dev);
- if (!pm_runtime_status_suspended(dev))
- regulator_disable(at24->vcc_reg);
- return dev_err_probe(dev, PTR_ERR(at24->nvmem),
- "failed to register nvmem\n");
- }
-
/*
* Perform a one-byte test read to verify that the chip is functional,
* unless powering on the device is to be avoided during probe (i.e.
@@ -782,6 +773,15 @@ static int at24_probe(struct i2c_client *client)
}
}
+ at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
+ if (IS_ERR(at24->nvmem)) {
+ pm_runtime_disable(dev);
+ if (!pm_runtime_status_suspended(dev))
+ regulator_disable(at24->vcc_reg);
+ return dev_err_probe(dev, PTR_ERR(at24->nvmem),
+ "failed to register nvmem\n");
+ }
+
/* If this a SPD EEPROM, probe for DDR3 thermal sensor */
if (cdata == &at24_data_spd)
at24_probe_temp_sensor(client);
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
index 95ef971b5e1c..39468bd27b85 100644
--- a/drivers/misc/lkdtm/Makefile
+++ b/drivers/misc/lkdtm/Makefile
@@ -15,11 +15,7 @@ lkdtm-$(CONFIG_PPC_64S_HASH_MMU) += powerpc.o
KASAN_SANITIZE_stackleak.o := n
-KASAN_SANITIZE_rodata.o := n
-KCSAN_SANITIZE_rodata.o := n
-KCOV_INSTRUMENT_rodata.o := n
-OBJECT_FILES_NON_STANDARD_rodata.o := y
-CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS)
+CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS) $(CC_FLAGS_CFI)
OBJCOPYFLAGS :=
OBJCOPYFLAGS_rodata_objcopy.o := \
diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
index b93404d65650..5b861dbff27e 100644
--- a/drivers/misc/lkdtm/perms.c
+++ b/drivers/misc/lkdtm/perms.c
@@ -61,7 +61,7 @@ static void *setup_function_descriptor(func_desc_t *fdesc, void *dst)
return fdesc;
}
-static noinline void execute_location(void *dst, bool write)
+static noinline __nocfi void execute_location(void *dst, bool write)
{
void (*func)(void);
func_desc_t fdesc;
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index aac36750d2c5..c3a6657dcd4a 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -115,6 +115,8 @@
#define MEI_DEV_ID_ARL_S 0x7F68 /* Arrow Lake Point S */
#define MEI_DEV_ID_ARL_H 0x7770 /* Arrow Lake Point H */
+#define MEI_DEV_ID_LNL_M 0xA870 /* Lunar Lake Point M */
+
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index b5757993c9b2..7f59dd38c32f 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -116,12 +116,14 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_SPS_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ARL_H, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LNL_M, MEI_ME_PCH15_CFG)},
+
/* required last entry */
{0, }
};
diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c
index 6c9f00bcb94b..b543e6b9f3cf 100644
--- a/drivers/misc/mei/platform-vsc.c
+++ b/drivers/misc/mei/platform-vsc.c
@@ -400,25 +400,40 @@ static void mei_vsc_remove(struct platform_device *pdev)
static int mei_vsc_suspend(struct device *dev)
{
struct mei_device *mei_dev = dev_get_drvdata(dev);
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
mei_stop(mei_dev);
+ mei_disable_interrupts(mei_dev);
+
+ vsc_tp_free_irq(hw->tp);
+
return 0;
}
static int mei_vsc_resume(struct device *dev)
{
struct mei_device *mei_dev = dev_get_drvdata(dev);
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
int ret;
- ret = mei_restart(mei_dev);
+ ret = vsc_tp_request_irq(hw->tp);
if (ret)
return ret;
+ ret = mei_restart(mei_dev);
+ if (ret)
+ goto err_free;
+
/* start timer if stopped in suspend */
schedule_delayed_work(&mei_dev->timer_work, HZ);
return 0;
+
+err_free:
+ vsc_tp_free_irq(hw->tp);
+
+ return ret;
}
static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume);
diff --git a/drivers/misc/mei/pxp/mei_pxp.c b/drivers/misc/mei/pxp/mei_pxp.c
index b1e4c23b31a3..49abc95677cd 100644
--- a/drivers/misc/mei/pxp/mei_pxp.c
+++ b/drivers/misc/mei/pxp/mei_pxp.c
@@ -236,8 +236,11 @@ static int mei_pxp_component_match(struct device *dev, int subcomponent,
pdev = to_pci_dev(dev);
- if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8) ||
- pdev->vendor != PCI_VENDOR_ID_INTEL)
+ if (pdev->vendor != PCI_VENDOR_ID_INTEL)
+ return 0;
+
+ if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8) &&
+ pdev->class != (PCI_CLASS_DISPLAY_OTHER << 8))
return 0;
if (subcomponent != I915_COMPONENT_PXP)
diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
index ecfb70cd057c..e6a98dba8a73 100644
--- a/drivers/misc/mei/vsc-tp.c
+++ b/drivers/misc/mei/vsc-tp.c
@@ -94,6 +94,27 @@ static const struct acpi_gpio_mapping vsc_tp_acpi_gpios[] = {
{}
};
+static irqreturn_t vsc_tp_isr(int irq, void *data)
+{
+ struct vsc_tp *tp = data;
+
+ atomic_inc(&tp->assert_cnt);
+
+ wake_up(&tp->xfer_wait);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
+{
+ struct vsc_tp *tp = data;
+
+ if (tp->event_notify)
+ tp->event_notify(tp->event_notify_context);
+
+ return IRQ_HANDLED;
+}
+
/* wakeup firmware and wait for response */
static int vsc_tp_wakeup_request(struct vsc_tp *tp)
{
@@ -384,6 +405,37 @@ int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
EXPORT_SYMBOL_NS_GPL(vsc_tp_register_event_cb, VSC_TP);
/**
+ * vsc_tp_request_irq - request irq for vsc_tp device
+ * @tp: vsc_tp device handle
+ */
+int vsc_tp_request_irq(struct vsc_tp *tp)
+{
+ struct spi_device *spi = tp->spi;
+ struct device *dev = &spi->dev;
+ int ret;
+
+ irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
+ ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(dev), tp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_request_irq, VSC_TP);
+
+/**
+ * vsc_tp_free_irq - free irq for vsc_tp device
+ * @tp: vsc_tp device handle
+ */
+void vsc_tp_free_irq(struct vsc_tp *tp)
+{
+ free_irq(tp->spi->irq, tp);
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_free_irq, VSC_TP);
+
+/**
* vsc_tp_intr_synchronize - synchronize vsc_tp interrupt
* @tp: vsc_tp device handle
*/
@@ -413,27 +465,6 @@ void vsc_tp_intr_disable(struct vsc_tp *tp)
}
EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_disable, VSC_TP);
-static irqreturn_t vsc_tp_isr(int irq, void *data)
-{
- struct vsc_tp *tp = data;
-
- atomic_inc(&tp->assert_cnt);
-
- return IRQ_WAKE_THREAD;
-}
-
-static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
-{
- struct vsc_tp *tp = data;
-
- wake_up(&tp->xfer_wait);
-
- if (tp->event_notify)
- tp->event_notify(tp->event_notify_context);
-
- return IRQ_HANDLED;
-}
-
static int vsc_tp_match_any(struct acpi_device *adev, void *data)
{
struct acpi_device **__adev = data;
@@ -490,10 +521,9 @@ static int vsc_tp_probe(struct spi_device *spi)
tp->spi = spi;
irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
- ret = devm_request_threaded_irq(dev, spi->irq, vsc_tp_isr,
- vsc_tp_thread_isr,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- dev_name(dev), tp);
+ ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(dev), tp);
if (ret)
return ret;
@@ -522,6 +552,8 @@ static int vsc_tp_probe(struct spi_device *spi)
err_destroy_lock:
mutex_destroy(&tp->mutex);
+ free_irq(spi->irq, tp);
+
return ret;
}
@@ -532,6 +564,8 @@ static void vsc_tp_remove(struct spi_device *spi)
platform_device_unregister(tp->pdev);
mutex_destroy(&tp->mutex);
+
+ free_irq(spi->irq, tp);
}
static const struct acpi_device_id vsc_tp_acpi_ids[] = {
diff --git a/drivers/misc/mei/vsc-tp.h b/drivers/misc/mei/vsc-tp.h
index f9513ddc3e40..14ca195cbddc 100644
--- a/drivers/misc/mei/vsc-tp.h
+++ b/drivers/misc/mei/vsc-tp.h
@@ -37,6 +37,9 @@ int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen,
int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
void *context);
+int vsc_tp_request_irq(struct vsc_tp *tp);
+void vsc_tp_free_irq(struct vsc_tp *tp);
+
void vsc_tp_intr_enable(struct vsc_tp *tp);
void vsc_tp_intr_disable(struct vsc_tp *tp);
void vsc_tp_intr_synchronize(struct vsc_tp *tp);
diff --git a/drivers/misc/pvpanic/pvpanic-pci.c b/drivers/misc/pvpanic/pvpanic-pci.c
index 9ad20e82785b..b21598a18f6d 100644
--- a/drivers/misc/pvpanic/pvpanic-pci.c
+++ b/drivers/misc/pvpanic/pvpanic-pci.c
@@ -44,8 +44,6 @@ static struct pci_driver pvpanic_pci_driver = {
.name = "pvpanic-pci",
.id_table = pvpanic_pci_id_tbl,
.probe = pvpanic_pci_probe,
- .driver = {
- .dev_groups = pvpanic_dev_groups,
- },
+ .dev_groups = pvpanic_dev_groups,
};
module_pci_driver(pvpanic_pci_driver);
diff --git a/drivers/misc/tps6594-pfsm.c b/drivers/misc/tps6594-pfsm.c
index 88dcac814892..9bcca1856bfe 100644
--- a/drivers/misc/tps6594-pfsm.c
+++ b/drivers/misc/tps6594-pfsm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * PFSM (Pre-configurable Finite State Machine) driver for TI TPS6594/TPS6593/LP8764 PMICs
+ * PFSM (Pre-configurable Finite State Machine) driver for TI TPS65224/TPS6594/TPS6593/LP8764 PMICs
*
* Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/
*/
@@ -39,10 +39,12 @@
*
* @miscdev: misc device infos
* @regmap: regmap for accessing the device registers
+ * @chip_id: chip identifier of the device
*/
struct tps6594_pfsm {
struct miscdevice miscdev;
struct regmap *regmap;
+ unsigned long chip_id;
};
static ssize_t tps6594_pfsm_read(struct file *f, char __user *buf,
@@ -133,21 +135,29 @@ static long tps6594_pfsm_ioctl(struct file *f, unsigned int cmd, unsigned long a
struct tps6594_pfsm *pfsm = TPS6594_FILE_TO_PFSM(f);
struct pmic_state_opt state_opt;
void __user *argp = (void __user *)arg;
+ unsigned int regmap_reg, mask;
int ret = -ENOIOCTLCMD;
switch (cmd) {
case PMIC_GOTO_STANDBY:
- /* Disable LP mode */
- ret = regmap_clear_bits(pfsm->regmap, TPS6594_REG_RTC_CTRL_2,
- TPS6594_BIT_LP_STANDBY_SEL);
- if (ret)
- return ret;
+ /* Disable LP mode on TPS6594 Family PMIC */
+ if (pfsm->chip_id != TPS65224) {
+ ret = regmap_clear_bits(pfsm->regmap, TPS6594_REG_RTC_CTRL_2,
+ TPS6594_BIT_LP_STANDBY_SEL);
+
+ if (ret)
+ return ret;
+ }
/* Force trigger */
ret = regmap_write_bits(pfsm->regmap, TPS6594_REG_FSM_I2C_TRIGGERS,
TPS6594_BIT_TRIGGER_I2C(0), TPS6594_BIT_TRIGGER_I2C(0));
break;
case PMIC_GOTO_LP_STANDBY:
+ /* TPS65224 does not support LP STANDBY */
+ if (pfsm->chip_id == TPS65224)
+ return ret;
+
/* Enable LP mode */
ret = regmap_set_bits(pfsm->regmap, TPS6594_REG_RTC_CTRL_2,
TPS6594_BIT_LP_STANDBY_SEL);
@@ -169,6 +179,10 @@ static long tps6594_pfsm_ioctl(struct file *f, unsigned int cmd, unsigned long a
TPS6594_BIT_NSLEEP1B | TPS6594_BIT_NSLEEP2B);
break;
case PMIC_SET_MCU_ONLY_STATE:
+ /* TPS65224 does not support MCU_ONLY_STATE */
+ if (pfsm->chip_id == TPS65224)
+ return ret;
+
if (copy_from_user(&state_opt, argp, sizeof(state_opt)))
return -EFAULT;
@@ -192,14 +206,20 @@ static long tps6594_pfsm_ioctl(struct file *f, unsigned int cmd, unsigned long a
return -EFAULT;
/* Configure wake-up destination */
+ if (pfsm->chip_id == TPS65224) {
+ regmap_reg = TPS65224_REG_STARTUP_CTRL;
+ mask = TPS65224_MASK_STARTUP_DEST;
+ } else {
+ regmap_reg = TPS6594_REG_RTC_CTRL_2;
+ mask = TPS6594_MASK_STARTUP_DEST;
+ }
+
if (state_opt.mcu_only_startup_dest)
- ret = regmap_write_bits(pfsm->regmap, TPS6594_REG_RTC_CTRL_2,
- TPS6594_MASK_STARTUP_DEST,
- TPS6594_STARTUP_DEST_MCU_ONLY);
+ ret = regmap_write_bits(pfsm->regmap, regmap_reg,
+ mask, TPS6594_STARTUP_DEST_MCU_ONLY);
else
- ret = regmap_write_bits(pfsm->regmap, TPS6594_REG_RTC_CTRL_2,
- TPS6594_MASK_STARTUP_DEST,
- TPS6594_STARTUP_DEST_ACTIVE);
+ ret = regmap_write_bits(pfsm->regmap, regmap_reg,
+ mask, TPS6594_STARTUP_DEST_ACTIVE);
if (ret)
return ret;
@@ -211,7 +231,8 @@ static long tps6594_pfsm_ioctl(struct file *f, unsigned int cmd, unsigned long a
/* Modify NSLEEP1-2 bits */
ret = regmap_clear_bits(pfsm->regmap, TPS6594_REG_FSM_NSLEEP_TRIGGERS,
- TPS6594_BIT_NSLEEP2B);
+ pfsm->chip_id == TPS65224 ?
+ TPS6594_BIT_NSLEEP1B : TPS6594_BIT_NSLEEP2B);
break;
}
@@ -262,6 +283,7 @@ static int tps6594_pfsm_probe(struct platform_device *pdev)
tps->chip_id, tps->reg);
pfsm->miscdev.fops = &tps6594_pfsm_fops;
pfsm->miscdev.parent = dev->parent;
+ pfsm->chip_id = tps->chip_id;
for (i = 0 ; i < pdev->num_resources ; i++) {
irq = platform_get_irq_byname(pdev, pdev->resource[i].name);
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index 4f8d962bb5b2..c61e8953511d 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -787,8 +787,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
error = pci_alloc_irq_vectors(pdev, num_irq_vectors, num_irq_vectors,
PCI_IRQ_MSIX);
if (error < 0) {
- error = pci_alloc_irq_vectors(pdev, 1, 1,
- PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ error = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (error < 0)
goto err_unsubscribe_event;
} else {
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 64a3492e8002..367509b5b646 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -234,7 +234,7 @@ static ssize_t power_ro_lock_show(struct device *dev,
else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
locked = 1;
- ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
+ ret = sysfs_emit(buf, "%d\n", locked);
mmc_blk_put(md);
@@ -296,9 +296,9 @@ static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
int ret;
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
- ret = snprintf(buf, PAGE_SIZE, "%d\n",
- get_disk_ro(dev_to_disk(dev)) ^
- md->read_only);
+ ret = sysfs_emit(buf, "%d\n",
+ get_disk_ro(dev_to_disk(dev)) ^
+ md->read_only);
mmc_blk_put(md);
return ret;
}
@@ -413,7 +413,7 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
struct mmc_blk_ioc_data *idata;
int err;
- idata = kmalloc(sizeof(*idata), GFP_KERNEL);
+ idata = kzalloc(sizeof(*idata), GFP_KERNEL);
if (!idata) {
err = -ENOMEM;
goto out;
@@ -488,7 +488,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
if (idata->flags & MMC_BLK_IOC_DROP)
return 0;
- if (idata->flags & MMC_BLK_IOC_SBC)
+ if (idata->flags & MMC_BLK_IOC_SBC && i > 0)
prev_idata = idatas[i - 1];
/*
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 1642ea72d22c..f10a4dcf1f95 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -351,11 +351,11 @@ void mmc_add_host_debugfs(struct mmc_host *host)
root = debugfs_create_dir(mmc_hostname(host), NULL);
host->debugfs_root = root;
- debugfs_create_file("ios", S_IRUSR, root, host, &mmc_ios_fops);
+ debugfs_create_file("ios", 0400, root, host, &mmc_ios_fops);
debugfs_create_file("caps", 0600, root, &host->caps, &mmc_caps_fops);
debugfs_create_file("caps2", 0600, root, &host->caps2,
&mmc_caps2_fops);
- debugfs_create_file_unsafe("clock", S_IRUSR | S_IWUSR, root, host,
+ debugfs_create_file_unsafe("clock", 0600, root, host,
&mmc_clock_fops);
debugfs_create_file_unsafe("err_state", 0600, root, host,
@@ -388,7 +388,8 @@ void mmc_add_card_debugfs(struct mmc_card *card)
root = debugfs_create_dir(mmc_card_id(card), host->debugfs_root);
card->debugfs_root = root;
- debugfs_create_x32("state", S_IRUSR, root, &card->state);
+ debugfs_create_x32("state", 0400, root, &card->state);
+ debugfs_create_x32("quirks", 0400, root, &card->quirks);
}
void mmc_remove_card_debugfs(struct mmc_card *card)
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 8f8781d6c25e..48bda70145ee 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -13,7 +13,6 @@
#include <linux/err.h>
#include <linux/idr.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/pagemap.h>
#include <linux/pm_wakeup.h>
#include <linux/export.h>
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index a59cd592f06e..8b9b34286ef3 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -19,6 +19,20 @@
#include "sd_ops.h"
#include "mmc_ops.h"
+/*
+ * Extensive testing has shown that some specific SD cards
+ * require an increased command timeout to be successfully
+ * initialized.
+ */
+#define SD_APP_OP_COND_PERIOD_US (10 * 1000) /* 10ms */
+#define SD_APP_OP_COND_TIMEOUT_MS 2000 /* 2s */
+
+struct sd_app_op_cond_busy_data {
+ struct mmc_host *host;
+ u32 ocr;
+ struct mmc_command *cmd;
+};
+
int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
{
int err;
@@ -115,10 +129,45 @@ int mmc_app_set_bus_width(struct mmc_card *card, int width)
return mmc_wait_for_app_cmd(card->host, card, &cmd);
}
+static int sd_app_op_cond_cb(void *cb_data, bool *busy)
+{
+ struct sd_app_op_cond_busy_data *data = cb_data;
+ struct mmc_host *host = data->host;
+ struct mmc_command *cmd = data->cmd;
+ u32 ocr = data->ocr;
+ int err;
+
+ *busy = false;
+
+ err = mmc_wait_for_app_cmd(host, NULL, cmd);
+ if (err)
+ return err;
+
+ /* If we're just probing, do a single pass. */
+ if (ocr == 0)
+ return 0;
+
+ /* Wait until reset completes. */
+ if (mmc_host_is_spi(host)) {
+ if (!(cmd->resp[0] & R1_SPI_IDLE))
+ return 0;
+ } else if (cmd->resp[0] & MMC_CARD_BUSY) {
+ return 0;
+ }
+
+ *busy = true;
+ return 0;
+}
+
int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
{
struct mmc_command cmd = {};
- int i, err = 0;
+ struct sd_app_op_cond_busy_data cb_data = {
+ .host = host,
+ .ocr = ocr,
+ .cmd = &cmd
+ };
+ int err;
cmd.opcode = SD_APP_OP_COND;
if (mmc_host_is_spi(host))
@@ -127,36 +176,16 @@ int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
cmd.arg = ocr;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
- for (i = 100; i; i--) {
- err = mmc_wait_for_app_cmd(host, NULL, &cmd);
- if (err)
- break;
-
- /* if we're just probing, do a single pass */
- if (ocr == 0)
- break;
-
- /* otherwise wait until reset completes */
- if (mmc_host_is_spi(host)) {
- if (!(cmd.resp[0] & R1_SPI_IDLE))
- break;
- } else {
- if (cmd.resp[0] & MMC_CARD_BUSY)
- break;
- }
-
- err = -ETIMEDOUT;
-
- mmc_delay(10);
- }
-
- if (!i)
- pr_err("%s: card never left busy state\n", mmc_hostname(host));
+ err = __mmc_poll_for_busy(host, SD_APP_OP_COND_PERIOD_US,
+ SD_APP_OP_COND_TIMEOUT_MS, &sd_app_op_cond_cb,
+ &cb_data);
+ if (err)
+ return err;
if (rocr && !mmc_host_is_spi(host))
*rocr = cmd.resp[0];
- return err;
+ return 0;
}
static int __mmc_send_if_cond(struct mmc_host *host, u32 ocr, u8 pcie_bits,
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 71d885fbc228..c5fdfe2325f8 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -265,16 +265,19 @@ void sdio_unregister_bus(void)
}
/**
- * sdio_register_driver - register a function driver
+ * __sdio_register_driver - register a function driver
* @drv: SDIO function driver
+ * @owner: owning module/driver
*/
-int sdio_register_driver(struct sdio_driver *drv)
+int __sdio_register_driver(struct sdio_driver *drv, struct module *owner)
{
drv->drv.name = drv->name;
drv->drv.bus = &sdio_bus_type;
+ drv->drv.owner = owner;
+
return driver_register(&drv->drv);
}
-EXPORT_SYMBOL_GPL(sdio_register_driver);
+EXPORT_SYMBOL_GPL(__sdio_register_driver);
/**
* sdio_unregister_driver - unregister a function driver
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 39f45c2b6de8..12247219e1c2 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -19,7 +19,7 @@
struct mmc_gpio {
struct gpio_desc *ro_gpio;
struct gpio_desc *cd_gpio;
- irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id);
+ irq_handler_t cd_gpio_isr;
char *ro_label;
char *cd_label;
u32 cd_debounce_delay_ms;
@@ -162,8 +162,7 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_wake);
/* Register an alternate interrupt service routine for
* the card-detect GPIO.
*/
-void mmc_gpio_set_cd_isr(struct mmc_host *host,
- irqreturn_t (*isr)(int irq, void *dev_id))
+void mmc_gpio_set_cd_isr(struct mmc_host *host, irq_handler_t isr)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
@@ -221,6 +220,26 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
}
EXPORT_SYMBOL(mmc_gpiod_request_cd);
+/**
+ * mmc_gpiod_set_cd_config - set config for card-detection GPIO
+ * @host: mmc host
+ * @config: Generic pinconf config (from pinconf_to_config_packed())
+ *
+ * This can be used by mmc host drivers to fixup a card-detection GPIO's config
+ * (e.g. set PIN_CONFIG_BIAS_PULL_UP) after acquiring the GPIO descriptor
+ * through mmc_gpiod_request_cd().
+ *
+ * Returns:
+ * 0 on success, or a negative errno value on error.
+ */
+int mmc_gpiod_set_cd_config(struct mmc_host *host, unsigned long config)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+
+ return gpiod_set_config(ctx->cd_gpio, config);
+}
+EXPORT_SYMBOL(mmc_gpiod_set_cd_config);
+
bool mmc_can_gpio_cd(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index aebc587f77a7..bb0d4fb0892a 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -233,6 +233,7 @@ config MMC_SDHCI_OF_DWCMSHC
depends on MMC_SDHCI_PLTFM
depends on OF
depends on COMMON_CLK
+ select MMC_CQHCI
help
This selects Synopsys DesignWare Cores Mobile Storage Controller
support.
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index dba826db739a..8199d9620075 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -224,18 +224,6 @@ struct mci_slot_pdata {
bool non_removable;
};
-/**
- * struct mci_platform_data - board-specific MMC/SDcard configuration
- * @dma_slave: DMA slave interface to use in data transfers.
- * @dma_filter: Filtering function to filter the DMA channel
- * @slot: Per-slot configuration data.
- */
-struct mci_platform_data {
- void *dma_slave;
- dma_filter_fn dma_filter;
- struct mci_slot_pdata slot[ATMCI_MAX_NR_SLOTS];
-};
-
struct atmel_mci_caps {
bool has_dma_conf_reg;
bool has_pdc;
@@ -300,7 +288,8 @@ struct atmel_mci_dma {
* rate and timeout calculations.
* @mapbase: Physical address of the MMIO registers.
* @mck: The peripheral bus clock hooked up to the MMC controller.
- * @pdev: Platform device associated with the MMC controller.
+ * @dev: Device associated with the MMC controller.
+ * @pdata: Per-slot configuration data.
* @slot: Slots sharing this MMC controller.
* @caps: MCI capabilities depending on MCI version.
* @prepare_data: function to setup MCI before data transfer which
@@ -377,8 +366,9 @@ struct atmel_mci {
unsigned long bus_hz;
unsigned long mapbase;
struct clk *mck;
- struct platform_device *pdev;
+ struct device *dev;
+ struct mci_slot_pdata pdata[ATMCI_MAX_NR_SLOTS];
struct atmel_mci_slot *slot[ATMCI_MAX_NR_SLOTS];
struct atmel_mci_caps caps;
@@ -530,6 +520,7 @@ static void atmci_show_status_reg(struct seq_file *s,
static int atmci_regs_show(struct seq_file *s, void *v)
{
struct atmel_mci *host = s->private;
+ struct device *dev = host->dev;
u32 *buf;
int ret = 0;
@@ -538,7 +529,7 @@ static int atmci_regs_show(struct seq_file *s, void *v)
if (!buf)
return -ENOMEM;
- pm_runtime_get_sync(&host->pdev->dev);
+ pm_runtime_get_sync(dev);
/*
* Grab a more or less consistent snapshot. Note that we're
@@ -549,8 +540,8 @@ static int atmci_regs_show(struct seq_file *s, void *v)
memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);
spin_unlock_bh(&host->lock);
- pm_runtime_mark_last_busy(&host->pdev->dev);
- pm_runtime_put_autosuspend(&host->pdev->dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
seq_printf(s, "MR:\t0x%08x%s%s ",
buf[ATMCI_MR / 4],
@@ -626,7 +617,6 @@ static void atmci_init_debugfs(struct atmel_mci_slot *slot)
&host->completed_events);
}
-#if defined(CONFIG_OF)
static const struct of_device_id atmci_dt_ids[] = {
{ .compatible = "atmel,hsmci" },
{ /* sentinel */ }
@@ -634,79 +624,64 @@ static const struct of_device_id atmci_dt_ids[] = {
MODULE_DEVICE_TABLE(of, atmci_dt_ids);
-static struct mci_platform_data*
-atmci_of_init(struct platform_device *pdev)
+static int atmci_of_init(struct atmel_mci *host)
{
- struct device_node *np = pdev->dev.of_node;
+ struct device *dev = host->dev;
+ struct device_node *np = dev->of_node;
struct device_node *cnp;
- struct mci_platform_data *pdata;
u32 slot_id;
int err;
- if (!np) {
- dev_err(&pdev->dev, "device node not found\n");
- return ERR_PTR(-EINVAL);
- }
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
+ if (!np)
+ return dev_err_probe(dev, -EINVAL, "device node not found\n");
for_each_child_of_node(np, cnp) {
if (of_property_read_u32(cnp, "reg", &slot_id)) {
- dev_warn(&pdev->dev, "reg property is missing for %pOF\n",
- cnp);
+ dev_warn(dev, "reg property is missing for %pOF\n", cnp);
continue;
}
if (slot_id >= ATMCI_MAX_NR_SLOTS) {
- dev_warn(&pdev->dev, "can't have more than %d slots\n",
+ dev_warn(dev, "can't have more than %d slots\n",
ATMCI_MAX_NR_SLOTS);
of_node_put(cnp);
break;
}
if (of_property_read_u32(cnp, "bus-width",
- &pdata->slot[slot_id].bus_width))
- pdata->slot[slot_id].bus_width = 1;
+ &host->pdata[slot_id].bus_width))
+ host->pdata[slot_id].bus_width = 1;
- pdata->slot[slot_id].detect_pin =
- devm_fwnode_gpiod_get(&pdev->dev, of_fwnode_handle(cnp),
+ host->pdata[slot_id].detect_pin =
+ devm_fwnode_gpiod_get(dev, of_fwnode_handle(cnp),
"cd", GPIOD_IN, "cd-gpios");
- err = PTR_ERR_OR_ZERO(pdata->slot[slot_id].detect_pin);
+ err = PTR_ERR_OR_ZERO(host->pdata[slot_id].detect_pin);
if (err) {
if (err != -ENOENT) {
of_node_put(cnp);
- return ERR_PTR(err);
+ return err;
}
- pdata->slot[slot_id].detect_pin = NULL;
+ host->pdata[slot_id].detect_pin = NULL;
}
- pdata->slot[slot_id].non_removable =
+ host->pdata[slot_id].non_removable =
of_property_read_bool(cnp, "non-removable");
- pdata->slot[slot_id].wp_pin =
- devm_fwnode_gpiod_get(&pdev->dev, of_fwnode_handle(cnp),
+ host->pdata[slot_id].wp_pin =
+ devm_fwnode_gpiod_get(dev, of_fwnode_handle(cnp),
"wp", GPIOD_IN, "wp-gpios");
- err = PTR_ERR_OR_ZERO(pdata->slot[slot_id].wp_pin);
+ err = PTR_ERR_OR_ZERO(host->pdata[slot_id].wp_pin);
if (err) {
if (err != -ENOENT) {
of_node_put(cnp);
- return ERR_PTR(err);
+ return err;
}
- pdata->slot[slot_id].wp_pin = NULL;
+ host->pdata[slot_id].wp_pin = NULL;
}
}
- return pdata;
-}
-#else /* CONFIG_OF */
-static inline struct mci_platform_data*
-atmci_of_init(struct platform_device *dev)
-{
- return ERR_PTR(-EINVAL);
+ return 0;
}
-#endif
static inline unsigned int atmci_get_version(struct atmel_mci *host)
{
@@ -738,11 +713,10 @@ static inline unsigned int atmci_convert_chksize(struct atmel_mci *host,
static void atmci_timeout_timer(struct timer_list *t)
{
- struct atmel_mci *host;
+ struct atmel_mci *host = from_timer(host, t, timer);
+ struct device *dev = host->dev;
- host = from_timer(host, t, timer);
-
- dev_dbg(&host->pdev->dev, "software timeout\n");
+ dev_dbg(dev, "software timeout\n");
if (host->mrq->cmd->data) {
host->mrq->cmd->data->error = -ETIMEDOUT;
@@ -860,15 +834,14 @@ static u32 atmci_prepare_command(struct mmc_host *mmc,
static void atmci_send_command(struct atmel_mci *host,
struct mmc_command *cmd, u32 cmd_flags)
{
+ struct device *dev = host->dev;
unsigned int timeout_ms = cmd->busy_timeout ? cmd->busy_timeout :
ATMCI_CMD_TIMEOUT_MS;
WARN_ON(host->cmd);
host->cmd = cmd;
- dev_vdbg(&host->pdev->dev,
- "start command: ARGR=0x%08x CMDR=0x%08x\n",
- cmd->arg, cmd_flags);
+ dev_vdbg(dev, "start command: ARGR=0x%08x CMDR=0x%08x\n", cmd->arg, cmd_flags);
atmci_writel(host, ATMCI_ARGR, cmd->arg);
atmci_writel(host, ATMCI_CMDR, cmd_flags);
@@ -878,7 +851,9 @@ static void atmci_send_command(struct atmel_mci *host,
static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
{
- dev_dbg(&host->pdev->dev, "send stop command\n");
+ struct device *dev = host->dev;
+
+ dev_dbg(dev, "send stop command\n");
atmci_send_command(host, data->stop, host->stop_cmdr);
atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
}
@@ -951,11 +926,10 @@ static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir)
static void atmci_pdc_cleanup(struct atmel_mci *host)
{
struct mmc_data *data = host->data;
+ struct device *dev = host->dev;
if (data)
- dma_unmap_sg(&host->pdev->dev,
- data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ dma_unmap_sg(dev, data->sg, data->sg_len, mmc_get_dma_dir(data));
}
/*
@@ -965,6 +939,7 @@ static void atmci_pdc_cleanup(struct atmel_mci *host)
*/
static void atmci_pdc_complete(struct atmel_mci *host)
{
+ struct device *dev = host->dev;
int transfer_size = host->data->blocks * host->data->blksz;
int i;
@@ -981,7 +956,7 @@ static void atmci_pdc_complete(struct atmel_mci *host)
atmci_pdc_cleanup(host);
- dev_dbg(&host->pdev->dev, "(%s) set pending xfer complete\n", __func__);
+ dev_dbg(dev, "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
tasklet_schedule(&host->tasklet);
}
@@ -1003,8 +978,9 @@ static void atmci_dma_complete(void *arg)
{
struct atmel_mci *host = arg;
struct mmc_data *data = host->data;
+ struct device *dev = host->dev;
- dev_vdbg(&host->pdev->dev, "DMA complete\n");
+ dev_vdbg(dev, "DMA complete\n");
if (host->caps.has_dma_conf_reg)
/* Disable DMA hardware handshaking on MCI */
@@ -1017,8 +993,7 @@ static void atmci_dma_complete(void *arg)
* to send the stop command or waiting for NBUSY in this case.
*/
if (data) {
- dev_dbg(&host->pdev->dev,
- "(%s) set pending xfer complete\n", __func__);
+ dev_dbg(dev, "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
tasklet_schedule(&host->tasklet);
@@ -1092,6 +1067,7 @@ static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
static u32
atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
{
+ struct device *dev = host->dev;
u32 iflags, tmp;
int i;
@@ -1117,8 +1093,7 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
/* Configure PDC */
host->data_size = data->blocks * data->blksz;
- dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ dma_map_sg(dev, data->sg, data->sg_len, mmc_get_dma_dir(data));
if ((!host->caps.has_rwproof)
&& (host->data->flags & MMC_DATA_WRITE)) {
@@ -1244,8 +1219,9 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
static void atmci_stop_transfer(struct atmel_mci *host)
{
- dev_dbg(&host->pdev->dev,
- "(%s) set pending xfer complete\n", __func__);
+ struct device *dev = host->dev;
+
+ dev_dbg(dev, "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
}
@@ -1261,14 +1237,14 @@ static void atmci_stop_transfer_pdc(struct atmel_mci *host)
static void atmci_stop_transfer_dma(struct atmel_mci *host)
{
struct dma_chan *chan = host->data_chan;
+ struct device *dev = host->dev;
if (chan) {
dmaengine_terminate_all(chan);
atmci_dma_cleanup(host);
} else {
/* Data transfer was stopped by the interrupt handler */
- dev_dbg(&host->pdev->dev,
- "(%s) set pending xfer complete\n", __func__);
+ dev_dbg(dev, "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
}
@@ -1281,6 +1257,7 @@ static void atmci_stop_transfer_dma(struct atmel_mci *host)
static void atmci_start_request(struct atmel_mci *host,
struct atmel_mci_slot *slot)
{
+ struct device *dev = host->dev;
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_data *data;
@@ -1296,7 +1273,7 @@ static void atmci_start_request(struct atmel_mci *host,
host->cmd_status = 0;
host->data_status = 0;
- dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode);
+ dev_dbg(dev, "start request: cmd %u\n", mrq->cmd->opcode);
if (host->need_reset || host->caps.need_reset_after_xfer) {
iflags = atmci_readl(host, ATMCI_IMR);
@@ -1375,6 +1352,8 @@ static void atmci_start_request(struct atmel_mci *host,
static void atmci_queue_request(struct atmel_mci *host,
struct atmel_mci_slot *slot, struct mmc_request *mrq)
{
+ struct device *dev = host->dev;
+
dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
host->state);
@@ -1384,7 +1363,7 @@ static void atmci_queue_request(struct atmel_mci *host,
host->state = STATE_SENDING_CMD;
atmci_start_request(host, slot);
} else {
- dev_dbg(&host->pdev->dev, "queue request\n");
+ dev_dbg(dev, "queue request\n");
list_add_tail(&slot->queue_node, &host->queue);
}
spin_unlock_bh(&host->lock);
@@ -1394,10 +1373,11 @@ static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct atmel_mci_slot *slot = mmc_priv(mmc);
struct atmel_mci *host = slot->host;
+ struct device *dev = host->dev;
struct mmc_data *data;
WARN_ON(slot->mrq);
- dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
+ dev_dbg(dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
/*
* We may "know" the card is gone even though there's still an
@@ -1607,6 +1587,7 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
{
struct atmel_mci_slot *slot = NULL;
struct mmc_host *prev_mmc = host->cur_slot->mmc;
+ struct device *dev = host->dev;
WARN_ON(host->cmd || host->data);
@@ -1629,12 +1610,11 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
slot = list_entry(host->queue.next,
struct atmel_mci_slot, queue_node);
list_del(&slot->queue_node);
- dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
- mmc_hostname(slot->mmc));
+ dev_vdbg(dev, "list not empty: %s is next\n", mmc_hostname(slot->mmc));
host->state = STATE_SENDING_CMD;
atmci_start_request(host, slot);
} else {
- dev_vdbg(&host->pdev->dev, "list empty\n");
+ dev_vdbg(dev, "list empty\n");
host->state = STATE_IDLE;
}
@@ -1770,6 +1750,7 @@ static void atmci_tasklet_func(struct tasklet_struct *t)
struct atmel_mci *host = from_tasklet(host, t, tasklet);
struct mmc_request *mrq = host->mrq;
struct mmc_data *data = host->data;
+ struct device *dev = host->dev;
enum atmel_mci_state state = host->state;
enum atmel_mci_state prev_state;
u32 status;
@@ -1778,14 +1759,13 @@ static void atmci_tasklet_func(struct tasklet_struct *t)
state = host->state;
- dev_vdbg(&host->pdev->dev,
- "tasklet: state %u pending/completed/mask %lx/%lx/%x\n",
+ dev_vdbg(dev, "tasklet: state %u pending/completed/mask %lx/%lx/%x\n",
state, host->pending_events, host->completed_events,
atmci_readl(host, ATMCI_IMR));
do {
prev_state = state;
- dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state);
+ dev_dbg(dev, "FSM: state=%d\n", state);
switch (state) {
case STATE_IDLE:
@@ -1798,18 +1778,17 @@ static void atmci_tasklet_func(struct tasklet_struct *t)
* END_REQUEST by default, WAITING_NOTBUSY if it's a
* command needing it or DATA_XFER if there is data.
*/
- dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
+ dev_dbg(dev, "FSM: cmd ready?\n");
if (!atmci_test_and_clear_pending(host,
EVENT_CMD_RDY))
break;
- dev_dbg(&host->pdev->dev, "set completed cmd ready\n");
+ dev_dbg(dev, "set completed cmd ready\n");
host->cmd = NULL;
atmci_set_completed(host, EVENT_CMD_RDY);
atmci_command_complete(host, mrq->cmd);
if (mrq->data) {
- dev_dbg(&host->pdev->dev,
- "command with data transfer");
+ dev_dbg(dev, "command with data transfer\n");
/*
* If there is a command error don't start
* data transfer.
@@ -1824,8 +1803,7 @@ static void atmci_tasklet_func(struct tasklet_struct *t)
} else
state = STATE_DATA_XFER;
} else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
- dev_dbg(&host->pdev->dev,
- "command response need waiting notbusy");
+ dev_dbg(dev, "command response need waiting notbusy\n");
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
state = STATE_WAITING_NOTBUSY;
} else
@@ -1836,7 +1814,7 @@ static void atmci_tasklet_func(struct tasklet_struct *t)
case STATE_DATA_XFER:
if (atmci_test_and_clear_pending(host,
EVENT_DATA_ERROR)) {
- dev_dbg(&host->pdev->dev, "set completed data error\n");
+ dev_dbg(dev, "set completed data error\n");
atmci_set_completed(host, EVENT_DATA_ERROR);
state = STATE_END_REQUEST;
break;
@@ -1849,14 +1827,12 @@ static void atmci_tasklet_func(struct tasklet_struct *t)
* to the next step which is WAITING_NOTBUSY in write
* case and directly SENDING_STOP in read case.
*/
- dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n");
+ dev_dbg(dev, "FSM: xfer complete?\n");
if (!atmci_test_and_clear_pending(host,
EVENT_XFER_COMPLETE))
break;
- dev_dbg(&host->pdev->dev,
- "(%s) set completed xfer complete\n",
- __func__);
+ dev_dbg(dev, "(%s) set completed xfer complete\n", __func__);
atmci_set_completed(host, EVENT_XFER_COMPLETE);
if (host->caps.need_notbusy_for_read_ops ||
@@ -1881,12 +1857,12 @@ static void atmci_tasklet_func(struct tasklet_struct *t)
* included) or a write operation. In the latest case,
* we need to send a stop command.
*/
- dev_dbg(&host->pdev->dev, "FSM: not busy?\n");
+ dev_dbg(dev, "FSM: not busy?\n");
if (!atmci_test_and_clear_pending(host,
EVENT_NOTBUSY))
break;
- dev_dbg(&host->pdev->dev, "set completed not busy\n");
+ dev_dbg(dev, "set completed not busy\n");
atmci_set_completed(host, EVENT_NOTBUSY);
if (host->data) {
@@ -1916,12 +1892,12 @@ static void atmci_tasklet_func(struct tasklet_struct *t)
* in order to go to the end request state instead of
* sending stop again.
*/
- dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
+ dev_dbg(dev, "FSM: cmd ready?\n");
if (!atmci_test_and_clear_pending(host,
EVENT_CMD_RDY))
break;
- dev_dbg(&host->pdev->dev, "FSM: cmd ready\n");
+ dev_dbg(dev, "FSM: cmd ready\n");
host->cmd = NULL;
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
@@ -2120,6 +2096,7 @@ static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
static irqreturn_t atmci_interrupt(int irq, void *dev_id)
{
struct atmel_mci *host = dev_id;
+ struct device *dev = host->dev;
u32 status, mask, pending;
unsigned int pass_count = 0;
@@ -2131,21 +2108,21 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
break;
if (pending & ATMCI_DATA_ERROR_FLAGS) {
- dev_dbg(&host->pdev->dev, "IRQ: data error\n");
+ dev_dbg(dev, "IRQ: data error\n");
atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
| ATMCI_RXRDY | ATMCI_TXRDY
| ATMCI_ENDRX | ATMCI_ENDTX
| ATMCI_RXBUFF | ATMCI_TXBUFE);
host->data_status = status;
- dev_dbg(&host->pdev->dev, "set pending data error\n");
+ dev_dbg(dev, "set pending data error\n");
smp_wmb();
atmci_set_pending(host, EVENT_DATA_ERROR);
tasklet_schedule(&host->tasklet);
}
if (pending & ATMCI_TXBUFE) {
- dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n");
+ dev_dbg(dev, "IRQ: tx buffer empty\n");
atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
/*
@@ -2161,7 +2138,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
atmci_pdc_complete(host);
}
} else if (pending & ATMCI_ENDTX) {
- dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n");
+ dev_dbg(dev, "IRQ: end of tx buffer\n");
atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
if (host->data_size) {
@@ -2172,7 +2149,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
}
if (pending & ATMCI_RXBUFF) {
- dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n");
+ dev_dbg(dev, "IRQ: rx buffer full\n");
atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
/*
@@ -2188,7 +2165,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
atmci_pdc_complete(host);
}
} else if (pending & ATMCI_ENDRX) {
- dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n");
+ dev_dbg(dev, "IRQ: end of rx buffer\n");
atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
if (host->data_size) {
@@ -2205,19 +2182,19 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
* The appropriate workaround is to use the BLKE signal.
*/
if (pending & ATMCI_BLKE) {
- dev_dbg(&host->pdev->dev, "IRQ: blke\n");
+ dev_dbg(dev, "IRQ: blke\n");
atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
smp_wmb();
- dev_dbg(&host->pdev->dev, "set pending notbusy\n");
+ dev_dbg(dev, "set pending notbusy\n");
atmci_set_pending(host, EVENT_NOTBUSY);
tasklet_schedule(&host->tasklet);
}
if (pending & ATMCI_NOTBUSY) {
- dev_dbg(&host->pdev->dev, "IRQ: not_busy\n");
+ dev_dbg(dev, "IRQ: not_busy\n");
atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY);
smp_wmb();
- dev_dbg(&host->pdev->dev, "set pending notbusy\n");
+ dev_dbg(dev, "set pending notbusy\n");
atmci_set_pending(host, EVENT_NOTBUSY);
tasklet_schedule(&host->tasklet);
}
@@ -2228,11 +2205,11 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
atmci_write_data_pio(host);
if (pending & ATMCI_CMDRDY) {
- dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n");
+ dev_dbg(dev, "IRQ: cmd ready\n");
atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
host->cmd_status = status;
smp_wmb();
- dev_dbg(&host->pdev->dev, "set pending cmd rdy\n");
+ dev_dbg(dev, "set pending cmd rdy\n");
atmci_set_pending(host, EVENT_CMD_RDY);
tasklet_schedule(&host->tasklet);
}
@@ -2264,11 +2241,12 @@ static int atmci_init_slot(struct atmel_mci *host,
struct mci_slot_pdata *slot_data, unsigned int id,
u32 sdc_reg, u32 sdio_irq)
{
+ struct device *dev = host->dev;
struct mmc_host *mmc;
struct atmel_mci_slot *slot;
int ret;
- mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev);
+ mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), dev);
if (!mmc)
return -ENOMEM;
@@ -2387,29 +2365,13 @@ static void atmci_cleanup_slot(struct atmel_mci_slot *slot,
static int atmci_configure_dma(struct atmel_mci *host)
{
- host->dma.chan = dma_request_chan(&host->pdev->dev, "rxtx");
-
- if (PTR_ERR(host->dma.chan) == -ENODEV) {
- struct mci_platform_data *pdata = host->pdev->dev.platform_data;
- dma_cap_mask_t mask;
-
- if (!pdata || !pdata->dma_filter)
- return -ENODEV;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- host->dma.chan = dma_request_channel(mask, pdata->dma_filter,
- pdata->dma_slave);
- if (!host->dma.chan)
- host->dma.chan = ERR_PTR(-ENODEV);
- }
+ struct device *dev = host->dev;
+ host->dma.chan = dma_request_chan(dev, "rxtx");
if (IS_ERR(host->dma.chan))
return PTR_ERR(host->dma.chan);
- dev_info(&host->pdev->dev, "using %s for DMA transfers\n",
- dma_chan_name(host->dma.chan));
+ dev_info(dev, "using %s for DMA transfers\n", dma_chan_name(host->dma.chan));
host->dma_conf.src_addr = host->mapbase + ATMCI_RDR;
host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -2429,11 +2391,11 @@ static int atmci_configure_dma(struct atmel_mci *host)
*/
static void atmci_get_cap(struct atmel_mci *host)
{
+ struct device *dev = host->dev;
unsigned int version;
version = atmci_get_version(host);
- dev_info(&host->pdev->dev,
- "version: 0x%x\n", version);
+ dev_info(dev, "version: 0x%x\n", version);
host->caps.has_dma_conf_reg = false;
host->caps.has_pdc = true;
@@ -2474,15 +2436,14 @@ static void atmci_get_cap(struct atmel_mci *host)
break;
default:
host->caps.has_pdc = false;
- dev_warn(&host->pdev->dev,
- "Unmanaged mci version, set minimum capabilities\n");
+ dev_warn(dev, "Unmanaged mci version, set minimum capabilities\n");
break;
}
}
static int atmci_probe(struct platform_device *pdev)
{
- struct mci_platform_data *pdata;
+ struct device *dev = &pdev->dev;
struct atmel_mci *host;
struct resource *regs;
unsigned int nr_slots;
@@ -2492,32 +2453,28 @@ static int atmci_probe(struct platform_device *pdev)
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs)
return -ENXIO;
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- pdata = atmci_of_init(pdev);
- if (IS_ERR(pdata)) {
- dev_err(&pdev->dev, "platform data not available\n");
- return PTR_ERR(pdata);
- }
- }
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
- host->pdev = pdev;
+ host->dev = dev;
spin_lock_init(&host->lock);
INIT_LIST_HEAD(&host->queue);
- host->mck = devm_clk_get(&pdev->dev, "mci_clk");
+ ret = atmci_of_init(host);
+ if (ret)
+ return dev_err_probe(dev, ret, "Slot information not available\n");
+
+ host->mck = devm_clk_get(dev, "mci_clk");
if (IS_ERR(host->mck))
return PTR_ERR(host->mck);
- host->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
+ host->regs = devm_ioremap(dev, regs->start, resource_size(regs));
if (!host->regs)
return -ENOMEM;
@@ -2532,7 +2489,7 @@ static int atmci_probe(struct platform_device *pdev)
tasklet_setup(&host->tasklet, atmci_tasklet_func);
- ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host);
+ ret = request_irq(irq, atmci_interrupt, 0, dev_name(dev), host);
if (ret) {
clk_disable_unprepare(host->mck);
return ret;
@@ -2548,12 +2505,12 @@ static int atmci_probe(struct platform_device *pdev)
host->submit_data = &atmci_submit_data_dma;
host->stop_transfer = &atmci_stop_transfer_dma;
} else if (host->caps.has_pdc) {
- dev_info(&pdev->dev, "using PDC\n");
+ dev_info(dev, "using PDC\n");
host->prepare_data = &atmci_prepare_data_pdc;
host->submit_data = &atmci_submit_data_pdc;
host->stop_transfer = &atmci_stop_transfer_pdc;
} else {
- dev_info(&pdev->dev, "using PIO\n");
+ dev_info(dev, "using PIO\n");
host->prepare_data = &atmci_prepare_data;
host->submit_data = &atmci_submit_data;
host->stop_transfer = &atmci_stop_transfer;
@@ -2563,25 +2520,25 @@ static int atmci_probe(struct platform_device *pdev)
timer_setup(&host->timer, atmci_timeout_timer, 0);
- pm_runtime_get_noresume(&pdev->dev);
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_DELAY);
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
/* We need at least one slot to succeed */
nr_slots = 0;
ret = -ENODEV;
- if (pdata->slot[0].bus_width) {
- ret = atmci_init_slot(host, &pdata->slot[0],
+ if (host->pdata[0].bus_width) {
+ ret = atmci_init_slot(host, &host->pdata[0],
0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
if (!ret) {
nr_slots++;
host->buf_size = host->slot[0]->mmc->max_req_size;
}
}
- if (pdata->slot[1].bus_width) {
- ret = atmci_init_slot(host, &pdata->slot[1],
+ if (host->pdata[1].bus_width) {
+ ret = atmci_init_slot(host, &host->pdata[1],
1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
if (!ret) {
nr_slots++;
@@ -2592,27 +2549,25 @@ static int atmci_probe(struct platform_device *pdev)
}
if (!nr_slots) {
- dev_err(&pdev->dev, "init failed: no slot defined\n");
+ dev_err_probe(dev, ret, "init failed: no slot defined\n");
goto err_init_slot;
}
if (!host->caps.has_rwproof) {
- host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size,
+ host->buffer = dma_alloc_coherent(dev, host->buf_size,
&host->buf_phys_addr,
GFP_KERNEL);
if (!host->buffer) {
- ret = -ENOMEM;
- dev_err(&pdev->dev, "buffer allocation failed\n");
+ ret = dev_err_probe(dev, -ENOMEM, "buffer allocation failed\n");
goto err_dma_alloc;
}
}
- dev_info(&pdev->dev,
- "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
- host->mapbase, irq, nr_slots);
+ dev_info(dev, "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
+ host->mapbase, irq, nr_slots);
- pm_runtime_mark_last_busy(&host->pdev->dev);
- pm_runtime_put_autosuspend(&pdev->dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
return 0;
@@ -2624,8 +2579,8 @@ err_dma_alloc:
err_init_slot:
clk_disable_unprepare(host->mck);
- pm_runtime_disable(&pdev->dev);
- pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
del_timer_sync(&host->timer);
if (!IS_ERR(host->dma.chan))
@@ -2638,13 +2593,13 @@ err_dma_probe_defer:
static void atmci_remove(struct platform_device *pdev)
{
struct atmel_mci *host = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
unsigned int i;
- pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_get_sync(dev);
if (host->buffer)
- dma_free_coherent(&pdev->dev, host->buf_size,
- host->buffer, host->buf_phys_addr);
+ dma_free_coherent(dev, host->buf_size, host->buffer, host->buf_phys_addr);
for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
if (host->slot[i])
@@ -2663,8 +2618,8 @@ static void atmci_remove(struct platform_device *pdev)
clk_disable_unprepare(host->mck);
- pm_runtime_disable(&pdev->dev);
- pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
}
#ifdef CONFIG_PM
@@ -2701,7 +2656,7 @@ static struct platform_driver atmci_driver = {
.driver = {
.name = "atmel_mci",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .of_match_table = of_match_ptr(atmci_dt_ids),
+ .of_match_table = atmci_dt_ids,
.pm = &atmci_dev_pm_ops,
},
};
diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c
index 41e94cd14109..c14d7251d0bb 100644
--- a/drivers/mmc/host/cqhci-core.c
+++ b/drivers/mmc/host/cqhci-core.c
@@ -474,8 +474,8 @@ static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
return sg_count;
}
-static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
- bool dma64)
+void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
+ bool dma64)
{
__le32 *attr = (__le32 __force *)desc;
@@ -495,6 +495,7 @@ static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
dataddr[0] = cpu_to_le32(addr);
}
}
+EXPORT_SYMBOL(cqhci_set_tran_desc);
static int cqhci_prep_tran_desc(struct mmc_request *mrq,
struct cqhci_host *cq_host, int tag)
@@ -522,7 +523,11 @@ static int cqhci_prep_tran_desc(struct mmc_request *mrq,
if ((i+1) == sg_count)
end = true;
- cqhci_set_tran_desc(desc, addr, len, end, dma64);
+ if (cq_host->ops->set_tran_desc)
+ cq_host->ops->set_tran_desc(cq_host, &desc, addr, len, end, dma64);
+ else
+ cqhci_set_tran_desc(desc, addr, len, end, dma64);
+
desc += cq_host->trans_desc_len;
}
diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h
index 1a12e40a02e6..fab9d74445ba 100644
--- a/drivers/mmc/host/cqhci.h
+++ b/drivers/mmc/host/cqhci.h
@@ -293,6 +293,9 @@ struct cqhci_host_ops {
int (*program_key)(struct cqhci_host *cq_host,
const union cqhci_crypto_cfg_entry *cfg, int slot);
#endif
+ void (*set_tran_desc)(struct cqhci_host *cq_host, u8 **desc,
+ dma_addr_t addr, int len, bool end, bool dma64);
+
};
static inline void cqhci_writel(struct cqhci_host *host, u32 val, int reg)
@@ -318,6 +321,7 @@ irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc, bool dma64);
struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev);
int cqhci_deactivate(struct mmc_host *mmc);
+void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end, bool dma64);
static inline int cqhci_suspend(struct mmc_host *mmc)
{
return cqhci_deactivate(mmc);
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 8bd938919687..d7427894e0bc 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1337,7 +1337,7 @@ ioremap_fail:
return ret;
}
-static void __exit davinci_mmcsd_remove(struct platform_device *pdev)
+static void davinci_mmcsd_remove(struct platform_device *pdev)
{
struct mmc_davinci_host *host = platform_get_drvdata(pdev);
@@ -1392,7 +1392,7 @@ static struct platform_driver davinci_mmcsd_driver = {
.of_match_table = davinci_mmc_dt_ids,
},
.probe = davinci_mmcsd_probe,
- .remove_new = __exit_p(davinci_mmcsd_remove),
+ .remove_new = davinci_mmcsd_remove,
.id_table = davinci_mmc_devtype,
};
diff --git a/drivers/mmc/host/dw_mmc-hi3798cv200.c b/drivers/mmc/host/dw_mmc-hi3798cv200.c
index 61923a518369..6099756e59b3 100644
--- a/drivers/mmc/host/dw_mmc-hi3798cv200.c
+++ b/drivers/mmc/host/dw_mmc-hi3798cv200.c
@@ -87,7 +87,6 @@ static int dw_mci_hi3798cv200_execute_tuning(struct dw_mci_slot *slot,
goto tuning_out;
prev_err = err;
- err = 0;
}
tuning_out:
diff --git a/drivers/mmc/host/dw_mmc-hi3798mv200.c b/drivers/mmc/host/dw_mmc-hi3798mv200.c
index 989ae8dda722..96af693e3e37 100644
--- a/drivers/mmc/host/dw_mmc-hi3798mv200.c
+++ b/drivers/mmc/host/dw_mmc-hi3798mv200.c
@@ -133,7 +133,6 @@ static int dw_mci_hi3798mv200_execute_tuning_mix_mode(struct dw_mci_slot *slot,
goto tuning_out;
prev_err = err;
- err = 0;
}
tuning_out:
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index b88d6dec209f..9a5f75163aca 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -300,6 +300,7 @@ static void moxart_transfer_pio(struct moxart_host *host)
remain = sgm->length;
if (remain > host->data_len)
remain = host->data_len;
+ sgm->consumed = 0;
if (data->flags & MMC_DATA_WRITE) {
while (remain > 0) {
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 1634b1f5d201..a94835b8ab93 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -13,7 +13,6 @@
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 088f8ed4fdc4..a8ee0df47148 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1114,10 +1114,25 @@ static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on,
host = slot->host;
- if (slot->vsd)
- gpiod_set_value(slot->vsd, power_on);
- if (slot->vio)
- gpiod_set_value(slot->vio, power_on);
+ if (power_on) {
+ if (slot->vsd) {
+ gpiod_set_value(slot->vsd, power_on);
+ msleep(1);
+ }
+ if (slot->vio) {
+ gpiod_set_value(slot->vio, power_on);
+ msleep(1);
+ }
+ } else {
+ if (slot->vio) {
+ gpiod_set_value(slot->vio, power_on);
+ msleep(50);
+ }
+ if (slot->vsd) {
+ gpiod_set_value(slot->vsd, power_on);
+ msleep(50);
+ }
+ }
if (slot->pdata->set_power != NULL)
slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on,
@@ -1254,18 +1269,18 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
slot->pdata = &host->pdata->slots[id];
/* Check for some optional GPIO controls */
- slot->vsd = gpiod_get_index_optional(host->dev, "vsd",
- id, GPIOD_OUT_LOW);
+ slot->vsd = devm_gpiod_get_index_optional(host->dev, "vsd",
+ id, GPIOD_OUT_LOW);
if (IS_ERR(slot->vsd))
return dev_err_probe(host->dev, PTR_ERR(slot->vsd),
"error looking up VSD GPIO\n");
- slot->vio = gpiod_get_index_optional(host->dev, "vio",
- id, GPIOD_OUT_LOW);
+ slot->vio = devm_gpiod_get_index_optional(host->dev, "vio",
+ id, GPIOD_OUT_LOW);
if (IS_ERR(slot->vio))
return dev_err_probe(host->dev, PTR_ERR(slot->vio),
"error looking up VIO GPIO\n");
- slot->cover = gpiod_get_index_optional(host->dev, "cover",
- id, GPIOD_IN);
+ slot->cover = devm_gpiod_get_index_optional(host->dev, "cover",
+ id, GPIOD_IN);
if (IS_ERR(slot->cover))
return dev_err_probe(host->dev, PTR_ERR(slot->cover),
"error looking up cover switch GPIO\n");
@@ -1379,13 +1394,6 @@ static int mmc_omap_probe(struct platform_device *pdev)
if (IS_ERR(host->virt_base))
return PTR_ERR(host->virt_base);
- host->slot_switch = gpiod_get_optional(host->dev, "switch",
- GPIOD_OUT_LOW);
- if (IS_ERR(host->slot_switch))
- return dev_err_probe(host->dev, PTR_ERR(host->slot_switch),
- "error looking up slot switch GPIO\n");
-
-
INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work);
INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);
@@ -1404,6 +1412,12 @@ static int mmc_omap_probe(struct platform_device *pdev)
host->dev = &pdev->dev;
platform_set_drvdata(pdev, host);
+ host->slot_switch = devm_gpiod_get_optional(host->dev, "switch",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(host->slot_switch))
+ return dev_err_probe(host->dev, PTR_ERR(host->slot_switch),
+ "error looking up slot switch GPIO\n");
+
host->id = pdev->id;
host->irq = irq;
host->phys_base = res->start;
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index c675dec587ef..12f4faaaf4ee 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -589,6 +589,9 @@ static void renesas_sdhi_reset(struct tmio_mmc_host *host, bool preserve)
sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
priv->needs_adjust_hs400 = false;
renesas_sdhi_set_clock(host, host->clk_cache);
+
+ /* Ensure default value for this driver. */
+ renesas_sdhi_sdbuf_width(host, 16);
} else if (priv->scc_ctl) {
renesas_sdhi_scc_reset(host, priv);
}
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 53d34c3eddce..422fa63a2e99 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -210,7 +210,7 @@ static const struct renesas_sdhi_quirks sdhi_quirks_r8a77990 = {
.manual_tap_correction = true,
};
-static const struct renesas_sdhi_quirks sdhi_quirks_r9a09g011 = {
+static const struct renesas_sdhi_quirks sdhi_quirks_rzg2l = {
.fixed_addr_mode = true,
.hs400_disabled = true,
};
@@ -255,9 +255,9 @@ static const struct renesas_sdhi_of_data_with_quirks of_r8a77990_compatible = {
.quirks = &sdhi_quirks_r8a77990,
};
-static const struct renesas_sdhi_of_data_with_quirks of_r9a09g011_compatible = {
+static const struct renesas_sdhi_of_data_with_quirks of_rzg2l_compatible = {
.of_data = &of_data_rcar_gen3,
- .quirks = &sdhi_quirks_r9a09g011,
+ .quirks = &sdhi_quirks_rzg2l,
};
static const struct renesas_sdhi_of_data_with_quirks of_rcar_gen3_compatible = {
@@ -283,7 +283,8 @@ static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = {
{ .compatible = "renesas,sdhi-r8a77970", .data = &of_r8a77970_compatible, },
{ .compatible = "renesas,sdhi-r8a77990", .data = &of_r8a77990_compatible, },
{ .compatible = "renesas,sdhi-r8a77995", .data = &of_rcar_gen3_nohs400_compatible, },
- { .compatible = "renesas,sdhi-r9a09g011", .data = &of_r9a09g011_compatible, },
+ { .compatible = "renesas,sdhi-r9a09g011", .data = &of_rzg2l_compatible, },
+ { .compatible = "renesas,rzg2l-sdhi", .data = &of_rzg2l_compatible, },
{ .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
{ .compatible = "renesas,rcar-gen4-sdhi", .data = &of_rcar_gen3_compatible, },
{},
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index acf5fc3ad7e4..eb8f427f9770 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -10,6 +10,7 @@
#include <linux/export.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/pinctrl/pinconf-generic.h>
#include <linux/platform_device.h>
#include <linux/ioport.h>
#include <linux/io.h>
@@ -80,6 +81,8 @@ struct sdhci_acpi_host {
enum {
DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP = BIT(0),
DMI_QUIRK_SD_NO_WRITE_PROTECT = BIT(1),
+ DMI_QUIRK_SD_CD_ACTIVE_HIGH = BIT(2),
+ DMI_QUIRK_SD_CD_ENABLE_PULL_UP = BIT(3),
};
static inline void *sdhci_acpi_priv(struct sdhci_acpi_host *c)
@@ -719,9 +722,30 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
+/* Please keep this list sorted alphabetically */
static const struct dmi_system_id sdhci_acpi_quirks[] = {
{
/*
+ * The Acer Aspire Switch 10 (SW5-012) microSD slot always
+ * reports the card being write-protected even though microSD
+ * cards do not have a write-protect switch at all.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
+ },
+ .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
+ },
+ {
+ /* Asus T100TA, needs pull-up for cd but DSDT GpioInt has NoPull set */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T100TA"),
+ },
+ .driver_data = (void *)DMI_QUIRK_SD_CD_ENABLE_PULL_UP,
+ },
+ {
+ /*
* The Lenovo Miix 320-10ICR has a bug in the _PS0 method of
* the SHC1 ACPI device, this bug causes it to reprogram the
* wrong LDO (DLDO3) to 1.8V if 1.8V modes are used and the
@@ -736,15 +760,23 @@ static const struct dmi_system_id sdhci_acpi_quirks[] = {
},
{
/*
- * The Acer Aspire Switch 10 (SW5-012) microSD slot always
- * reports the card being write-protected even though microSD
- * cards do not have a write-protect switch at all.
+ * Lenovo Yoga Tablet 2 Pro 1380F/L (13" Android version) this
+ * has broken WP reporting and an inverted CD signal.
+ * Note this has more or less the same BIOS as the Lenovo Yoga
+ * Tablet 2 830F/L or 1050F/L (8" and 10" Android), but unlike
+ * the 830 / 1050 models which share the same mainboard this
+ * model has a different mainboard and the inverted CD and
+ * broken WP are unique to this board.
*/
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"),
+ DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"),
+ /* Full match so as to NOT match the 830/1050 BIOS */
+ DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21.X64.0005.R00.1504101516"),
},
- .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
+ .driver_data = (void *)(DMI_QUIRK_SD_NO_WRITE_PROTECT |
+ DMI_QUIRK_SD_CD_ACTIVE_HIGH),
},
{
/*
@@ -757,6 +789,17 @@ static const struct dmi_system_id sdhci_acpi_quirks[] = {
},
.driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
},
+ {
+ /*
+ * The Toshiba WT10-A's microSD slot always reports the card being
+ * write-protected.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TOSHIBA WT10-A"),
+ },
+ .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
+ },
{} /* Terminating entry */
};
@@ -866,12 +909,18 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) {
bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL);
+ if (quirks & DMI_QUIRK_SD_CD_ACTIVE_HIGH)
+ host->mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
+
err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0);
if (err) {
if (err == -EPROBE_DEFER)
goto err_free;
dev_warn(dev, "failed to setup card detect gpio\n");
c->use_runtime_pm = false;
+ } else if (quirks & DMI_QUIRK_SD_CD_ENABLE_PULL_UP) {
+ mmc_gpiod_set_cd_config(host->mmc,
+ PIN_CONF_PACKED(PIN_CONFIG_BIAS_PULL_UP, 20000));
}
if (quirks & DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP)
diff --git a/drivers/mmc/host/sdhci-esdhc-mcf.c b/drivers/mmc/host/sdhci-esdhc-mcf.c
index c97363e2d86c..3ad87322f6a5 100644
--- a/drivers/mmc/host/sdhci-esdhc-mcf.c
+++ b/drivers/mmc/host/sdhci-esdhc-mcf.c
@@ -335,7 +335,7 @@ static void esdhc_mcf_copy_to_bounce_buffer(struct sdhci_host *host,
data->blksz * data->blocks);
}
-static struct sdhci_ops sdhci_esdhc_ops = {
+static const struct sdhci_ops sdhci_esdhc_ops = {
.reset = esdhc_mcf_reset,
.set_clock = esdhc_mcf_pltfm_set_clock,
.get_max_clock = esdhc_mcf_pltfm_get_max_clock,
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 668e0aceeeba..e113b99a3eab 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2694,6 +2694,11 @@ static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = true;
+ spin_unlock_irqrestore(&host->lock, flags);
/* Drop the performance vote */
dev_pm_opp_set_rate(dev, 0);
@@ -2708,6 +2713,7 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ unsigned long flags;
int ret;
ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
@@ -2726,7 +2732,15 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
dev_pm_opp_set_rate(dev, msm_host->clk_rate);
- return sdhci_msm_ice_resume(msm_host);
+ ret = sdhci_msm_ice_resume(msm_host);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = false;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return ret;
}
static const struct dev_pm_ops sdhci_msm_pm_ops = {
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index ab4b964d4058..39edf04fedcf 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -21,6 +21,7 @@
#include <linux/sizes.h>
#include "sdhci-pltfm.h"
+#include "cqhci.h"
#define SDHCI_DWCMSHC_ARG2_STUFF GENMASK(31, 16)
@@ -52,6 +53,9 @@
#define AT_CTRL_SWIN_TH_VAL_MASK GENMASK(31, 24) /* bits [31:24] */
#define AT_CTRL_SWIN_TH_VAL 0x9 /* sampling window threshold */
+/* DWC IP vendor area 2 pointer */
+#define DWCMSHC_P_VENDOR_AREA2 0xea
+
/* Sophgo CV18XX specific Registers */
#define CV18XX_SDHCI_MSHC_CTRL 0x00
#define CV18XX_EMMC_FUNC_EN BIT(0)
@@ -66,6 +70,10 @@
#define CV18XX_SDHCI_PHY_CONFIG 0x4c
#define CV18XX_PHY_TX_BPS BIT(0)
+#define CV18XX_TUNE_MAX 128
+#define CV18XX_TUNE_STEP 1
+#define CV18XX_RETRY_TUNING_MAX 50
+
/* Rockchip specific Registers */
#define DWCMSHC_EMMC_DLL_CTRL 0x800
#define DWCMSHC_EMMC_DLL_RXCLK 0x804
@@ -181,6 +189,10 @@
#define BOUNDARY_OK(addr, len) \
((addr | (SZ_128M - 1)) == ((addr + len - 1) | (SZ_128M - 1)))
+#define DWCMSHC_SDHCI_CQE_TRNS_MODE (SDHCI_TRNS_MULTI | \
+ SDHCI_TRNS_BLK_CNT_EN | \
+ SDHCI_TRNS_DMA)
+
enum dwcmshc_rk_type {
DWCMSHC_RK3568,
DWCMSHC_RK3588,
@@ -196,7 +208,9 @@ struct rk35xx_priv {
struct dwcmshc_priv {
struct clk *bus_clk;
- int vendor_specific_area1; /* P_VENDOR_SPECIFIC_AREA reg */
+ int vendor_specific_area1; /* P_VENDOR_SPECIFIC_AREA1 reg */
+ int vendor_specific_area2; /* P_VENDOR_SPECIFIC_AREA2 reg */
+
void *priv; /* pointer to SoC private stuff */
u16 delay_line;
u16 flags;
@@ -455,6 +469,90 @@ static void dwcmshc_hs400_enhanced_strobe(struct mmc_host *mmc,
sdhci_writel(host, vendor, reg);
}
+static int dwcmshc_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ int err = sdhci_execute_tuning(mmc, opcode);
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (err)
+ return err;
+
+ /*
+ * Tuning can leave the IP in an active state (Buffer Read Enable bit
+ * set) which prevents the entry to low power states (i.e. S0i3). Data
+ * reset will clear it.
+ */
+ sdhci_reset(host, SDHCI_RESET_DATA);
+
+ return 0;
+}
+
+static u32 dwcmshc_cqe_irq_handler(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
+static void dwcmshc_sdhci_cqe_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u8 ctrl;
+
+ sdhci_writew(host, DWCMSHC_SDHCI_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
+
+ sdhci_cqe_enable(mmc);
+
+ /*
+ * The "DesignWare Cores Mobile Storage Host Controller
+ * DWC_mshc / DWC_mshc_lite Databook" says:
+ * when Host Version 4 Enable" is 1 in Host Control 2 register,
+ * SDHCI_CTRL_ADMA32 bit means ADMA2 is selected.
+ * Selection of 32-bit/64-bit System Addressing:
+ * either 32-bit or 64-bit system addressing is selected by
+ * 64-bit Addressing bit in Host Control 2 register.
+ *
+ * On the other hand the "DesignWare Cores Mobile Storage Host
+ * Controller DWC_mshc / DWC_mshc_lite User Guide" says, that we have to
+ * set DMA_SEL to ADMA2 _only_ mode in the Host Control 2 register.
+ */
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ ctrl &= ~SDHCI_CTRL_DMA_MASK;
+ ctrl |= SDHCI_CTRL_ADMA32;
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+}
+
+static void dwcmshc_set_tran_desc(struct cqhci_host *cq_host, u8 **desc,
+ dma_addr_t addr, int len, bool end, bool dma64)
+{
+ int tmplen, offset;
+
+ if (likely(!len || BOUNDARY_OK(addr, len))) {
+ cqhci_set_tran_desc(*desc, addr, len, end, dma64);
+ return;
+ }
+
+ offset = addr & (SZ_128M - 1);
+ tmplen = SZ_128M - offset;
+ cqhci_set_tran_desc(*desc, addr, tmplen, false, dma64);
+
+ addr += tmplen;
+ len -= tmplen;
+ *desc += cq_host->trans_desc_len;
+ cqhci_set_tran_desc(*desc, addr, len, end, dma64);
+}
+
+static void dwcmshc_cqhci_dumpregs(struct mmc_host *mmc)
+{
+ sdhci_dumpregs(mmc_priv(mmc));
+}
+
static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -626,6 +724,7 @@ static int th1520_execute_tuning(struct sdhci_host *host, u32 opcode)
/* perform tuning */
sdhci_start_tuning(host);
+ host->tuning_loop_count = 128;
host->tuning_err = __sdhci_execute_tuning(host, opcode);
if (host->tuning_err) {
/* disable auto-tuning upon tuning error */
@@ -685,6 +784,113 @@ static void cv18xx_sdhci_reset(struct sdhci_host *host, u8 mask)
sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_TX_RX_DLY);
}
+static void cv18xx_sdhci_set_tap(struct sdhci_host *host, int tap)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ u16 clk;
+ u32 val;
+
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk &= ~SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ val = sdhci_readl(host, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
+ val &= ~CV18XX_LATANCY_1T;
+ sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
+
+ val = (FIELD_PREP(CV18XX_PHY_TX_DLY_MSK, 0) |
+ FIELD_PREP(CV18XX_PHY_TX_SRC_MSK, CV18XX_PHY_TX_SRC_INVERT_CLK_TX) |
+ FIELD_PREP(CV18XX_PHY_RX_DLY_MSK, tap));
+ sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_TX_RX_DLY);
+
+ sdhci_writel(host, 0, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_CONFIG);
+
+ clk |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+ usleep_range(1000, 2000);
+}
+
+static int cv18xx_retry_tuning(struct mmc_host *mmc, u32 opcode, int *cmd_error)
+{
+ int ret, retry = 0;
+
+ while (retry < CV18XX_RETRY_TUNING_MAX) {
+ ret = mmc_send_tuning(mmc, opcode, NULL);
+ if (ret)
+ return ret;
+ retry++;
+ }
+
+ return 0;
+}
+
+static void cv18xx_sdhci_post_tuning(struct sdhci_host *host)
+{
+ u32 val;
+
+ val = sdhci_readl(host, SDHCI_INT_STATUS);
+ val |= SDHCI_INT_DATA_AVAIL;
+ sdhci_writel(host, val, SDHCI_INT_STATUS);
+
+ sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+}
+
+static int cv18xx_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
+{
+ int min, max, avg, ret;
+ int win_length, target_min, target_max, target_win_length;
+
+ min = max = 0;
+ target_win_length = 0;
+
+ sdhci_reset_tuning(host);
+
+ while (max < CV18XX_TUNE_MAX) {
+ /* find the mininum delay first which can pass tuning */
+ while (min < CV18XX_TUNE_MAX) {
+ cv18xx_sdhci_set_tap(host, min);
+ if (!cv18xx_retry_tuning(host->mmc, opcode, NULL))
+ break;
+ min += CV18XX_TUNE_STEP;
+ }
+
+ /* find the maxinum delay which can not pass tuning */
+ max = min + CV18XX_TUNE_STEP;
+ while (max < CV18XX_TUNE_MAX) {
+ cv18xx_sdhci_set_tap(host, max);
+ if (cv18xx_retry_tuning(host->mmc, opcode, NULL)) {
+ max -= CV18XX_TUNE_STEP;
+ break;
+ }
+ max += CV18XX_TUNE_STEP;
+ }
+
+ win_length = max - min + 1;
+ /* get the largest pass window */
+ if (win_length > target_win_length) {
+ target_win_length = win_length;
+ target_min = min;
+ target_max = max;
+ }
+
+ /* continue to find the next pass window */
+ min = max + CV18XX_TUNE_STEP;
+ }
+
+ cv18xx_sdhci_post_tuning(host);
+
+ /* use average delay to get the best timing */
+ avg = (target_min + target_max) / 2;
+ cv18xx_sdhci_set_tap(host, avg);
+ ret = mmc_send_tuning(host->mmc, opcode, NULL);
+
+ dev_dbg(mmc_dev(host->mmc), "tuning %s at 0x%x ret %d\n",
+ ret ? "failed" : "passed", avg, ret);
+
+ return ret;
+}
+
static const struct sdhci_ops sdhci_dwcmshc_ops = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
@@ -692,6 +898,7 @@ static const struct sdhci_ops sdhci_dwcmshc_ops = {
.get_max_clock = dwcmshc_get_max_clock,
.reset = sdhci_reset,
.adma_write_desc = dwcmshc_adma_write_desc,
+ .irq = dwcmshc_cqe_irq_handler,
};
static const struct sdhci_ops sdhci_dwcmshc_rk35xx_ops = {
@@ -711,7 +918,7 @@ static const struct sdhci_ops sdhci_dwcmshc_th1520_ops = {
.reset = th1520_sdhci_reset,
.adma_write_desc = dwcmshc_adma_write_desc,
.voltage_switch = dwcmshc_phy_1_8v_init,
- .platform_execute_tuning = &th1520_execute_tuning,
+ .platform_execute_tuning = th1520_execute_tuning,
};
static const struct sdhci_ops sdhci_dwcmshc_cv18xx_ops = {
@@ -721,6 +928,7 @@ static const struct sdhci_ops sdhci_dwcmshc_cv18xx_ops = {
.get_max_clock = dwcmshc_get_max_clock,
.reset = cv18xx_sdhci_reset,
.adma_write_desc = dwcmshc_adma_write_desc,
+ .platform_execute_tuning = cv18xx_sdhci_execute_tuning,
};
static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = {
@@ -758,6 +966,73 @@ static const struct sdhci_pltfm_data sdhci_dwcmshc_cv18xx_pdata = {
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
+static const struct cqhci_host_ops dwcmshc_cqhci_ops = {
+ .enable = dwcmshc_sdhci_cqe_enable,
+ .disable = sdhci_cqe_disable,
+ .dumpregs = dwcmshc_cqhci_dumpregs,
+ .set_tran_desc = dwcmshc_set_tran_desc,
+};
+
+static void dwcmshc_cqhci_init(struct sdhci_host *host, struct platform_device *pdev)
+{
+ struct cqhci_host *cq_host;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ bool dma64 = false;
+ u16 clk;
+ int err;
+
+ host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
+ cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host) {
+ dev_err(mmc_dev(host->mmc), "Unable to setup CQE: not enough memory\n");
+ goto dsbl_cqe_caps;
+ }
+
+ /*
+ * For dwcmshc host controller we have to enable internal clock
+ * before access to some registers from Vendor Specific Area 2.
+ */
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk |= SDHCI_CLOCK_INT_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ if (!(clk & SDHCI_CLOCK_INT_EN)) {
+ dev_err(mmc_dev(host->mmc), "Unable to setup CQE: internal clock enable error\n");
+ goto free_cq_host;
+ }
+
+ cq_host->mmio = host->ioaddr + priv->vendor_specific_area2;
+ cq_host->ops = &dwcmshc_cqhci_ops;
+
+ /* Enable using of 128-bit task descriptors */
+ dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+ if (dma64) {
+ dev_dbg(mmc_dev(host->mmc), "128-bit task descriptors\n");
+ cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+ }
+ err = cqhci_init(cq_host, host->mmc, dma64);
+ if (err) {
+ dev_err(mmc_dev(host->mmc), "Unable to setup CQE: error %d\n", err);
+ goto int_clock_disable;
+ }
+
+ dev_dbg(mmc_dev(host->mmc), "CQE init done\n");
+
+ return;
+
+int_clock_disable:
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk &= ~SDHCI_CLOCK_INT_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+free_cq_host:
+ devm_kfree(&pdev->dev, cq_host);
+
+dsbl_cqe_caps:
+ host->mmc->caps2 &= ~(MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD);
+}
+
static int dwcmshc_rk35xx_init(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
{
int err;
@@ -862,7 +1137,7 @@ static int dwcmshc_probe(struct platform_device *pdev)
struct rk35xx_priv *rk_priv = NULL;
const struct sdhci_pltfm_data *pltfm_data;
int err;
- u32 extra;
+ u32 extra, caps;
pltfm_data = device_get_match_data(&pdev->dev);
if (!pltfm_data) {
@@ -913,6 +1188,7 @@ static int dwcmshc_probe(struct platform_device *pdev)
host->mmc_host_ops.request = dwcmshc_request;
host->mmc_host_ops.hs400_enhanced_strobe = dwcmshc_hs400_enhanced_strobe;
+ host->mmc_host_ops.execute_tuning = dwcmshc_execute_tuning;
if (pltfm_data == &sdhci_dwcmshc_rk35xx_pdata) {
rk_priv = devm_kzalloc(&pdev->dev, sizeof(struct rk35xx_priv), GFP_KERNEL);
@@ -962,6 +1238,10 @@ static int dwcmshc_probe(struct platform_device *pdev)
sdhci_enable_v4_mode(host);
#endif
+ caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+ if (caps & SDHCI_CAN_64BIT_V4)
+ sdhci_enable_v4_mode(host);
+
host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
pm_runtime_get_noresume(dev);
@@ -972,6 +1252,14 @@ static int dwcmshc_probe(struct platform_device *pdev)
if (err)
goto err_rpm;
+ /* Setup Command Queue Engine if enabled */
+ if (device_property_read_bool(&pdev->dev, "supports-cqe")) {
+ priv->vendor_specific_area2 =
+ sdhci_readw(host, DWCMSHC_P_VENDOR_AREA2);
+
+ dwcmshc_cqhci_init(host, pdev);
+ }
+
if (rk_priv)
dwcmshc_rk35xx_postinit(host, priv);
@@ -999,6 +1287,17 @@ free_pltfm:
return err;
}
+static void dwcmshc_disable_card_clk(struct sdhci_host *host)
+{
+ u16 ctrl;
+
+ ctrl = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ if (ctrl & SDHCI_CLOCK_CARD_EN) {
+ ctrl &= ~SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, ctrl, SDHCI_CLOCK_CONTROL);
+ }
+}
+
static void dwcmshc_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
@@ -1006,8 +1305,14 @@ static void dwcmshc_remove(struct platform_device *pdev)
struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
struct rk35xx_priv *rk_priv = priv->priv;
+ pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
sdhci_remove_host(host, 0);
+ dwcmshc_disable_card_clk(host);
+
clk_disable_unprepare(pltfm_host->clk);
clk_disable_unprepare(priv->bus_clk);
if (rk_priv)
@@ -1027,6 +1332,12 @@ static int dwcmshc_suspend(struct device *dev)
pm_runtime_resume(dev);
+ if (host->mmc->caps2 & MMC_CAP2_CQE) {
+ ret = cqhci_suspend(host->mmc);
+ if (ret)
+ return ret;
+ }
+
ret = sdhci_suspend_host(host);
if (ret)
return ret;
@@ -1071,6 +1382,12 @@ static int dwcmshc_resume(struct device *dev)
if (ret)
goto disable_rockchip_clks;
+ if (host->mmc->caps2 & MMC_CAP2_CQE) {
+ ret = cqhci_resume(host->mmc);
+ if (ret)
+ goto disable_rockchip_clks;
+ }
+
return 0;
disable_rockchip_clks:
@@ -1099,17 +1416,6 @@ static void dwcmshc_enable_card_clk(struct sdhci_host *host)
}
}
-static void dwcmshc_disable_card_clk(struct sdhci_host *host)
-{
- u16 ctrl;
-
- ctrl = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
- if (ctrl & SDHCI_CLOCK_CARD_EN) {
- ctrl &= ~SDHCI_CLOCK_CARD_EN;
- sdhci_writew(host, ctrl, SDHCI_CLOCK_CONTROL);
- }
-}
-
static int dwcmshc_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index e78faef67d7a..5841a9afeb9f 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -925,7 +925,7 @@ static void sdhci_omap_set_timeout(struct sdhci_host *host,
__sdhci_set_timeout(host, cmd);
}
-static struct sdhci_ops sdhci_omap_ops = {
+static const struct sdhci_ops sdhci_omap_ops = {
.set_clock = sdhci_omap_set_clock,
.set_power = sdhci_omap_set_power,
.enable_dma = sdhci_omap_enable_dma,
@@ -1439,6 +1439,9 @@ static int __maybe_unused sdhci_omap_runtime_suspend(struct device *dev)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+ if (host->tuning_mode != SDHCI_TUNING_MODE_3)
+ mmc_retune_needed(host->mmc);
+
if (omap_host->con != -EINVAL)
sdhci_runtime_suspend_host(host);
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 025b31aa712c..ef89ec382bfe 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -63,7 +63,7 @@ static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip)
if ((pm_flags & MMC_PM_KEEP_POWER) && (pm_flags & MMC_PM_WAKE_SDIO_IRQ))
return device_wakeup_enable(&chip->pdev->dev);
else if (!cap_cd_wake)
- return device_wakeup_disable(&chip->pdev->dev);
+ device_wakeup_disable(&chip->pdev->dev);
return 0;
}
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index 77911a57b12c..0f81586a19df 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -25,12 +25,6 @@
#define GLI_9750_WT_EN_ON 0x1
#define GLI_9750_WT_EN_OFF 0x0
-#define PCI_GLI_9750_PM_CTRL 0xFC
-#define PCI_GLI_9750_PM_STATE GENMASK(1, 0)
-
-#define PCI_GLI_9750_CORRERR_MASK 0x214
-#define PCI_GLI_9750_CORRERR_MASK_REPLAY_TIMER_TIMEOUT BIT(12)
-
#define SDHCI_GLI_9750_CFG2 0x848
#define SDHCI_GLI_9750_CFG2_L1DLY GENMASK(28, 24)
#define GLI_9750_CFG2_L1DLY_VALUE 0x1F
@@ -152,12 +146,6 @@
#define PCI_GLI_9755_MISC 0x78
#define PCI_GLI_9755_MISC_SSC_OFF BIT(26)
-#define PCI_GLI_9755_PM_CTRL 0xFC
-#define PCI_GLI_9755_PM_STATE GENMASK(1, 0)
-
-#define PCI_GLI_9755_CORRERR_MASK 0x214
-#define PCI_GLI_9755_CORRERR_MASK_REPLAY_TIMER_TIMEOUT BIT(12)
-
#define SDHCI_GLI_9767_GM_BURST_SIZE 0x510
#define SDHCI_GLI_9767_GM_BURST_SIZE_AXI_ALWAYS_SET BIT(8)
@@ -547,6 +535,7 @@ static void gl9750_hw_setting(struct sdhci_host *host)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct pci_dev *pdev;
+ int aer;
u32 value;
pdev = slot->chip->pdev;
@@ -561,16 +550,16 @@ static void gl9750_hw_setting(struct sdhci_host *host)
sdhci_writel(host, value, SDHCI_GLI_9750_CFG2);
/* toggle PM state to allow GL9750 to enter ASPM L1.2 */
- pci_read_config_dword(pdev, PCI_GLI_9750_PM_CTRL, &value);
- value |= PCI_GLI_9750_PM_STATE;
- pci_write_config_dword(pdev, PCI_GLI_9750_PM_CTRL, value);
- value &= ~PCI_GLI_9750_PM_STATE;
- pci_write_config_dword(pdev, PCI_GLI_9750_PM_CTRL, value);
+ pci_set_power_state(pdev, PCI_D3hot);
+ pci_set_power_state(pdev, PCI_D0);
/* mask the replay timer timeout of AER */
- pci_read_config_dword(pdev, PCI_GLI_9750_CORRERR_MASK, &value);
- value |= PCI_GLI_9750_CORRERR_MASK_REPLAY_TIMER_TIMEOUT;
- pci_write_config_dword(pdev, PCI_GLI_9750_CORRERR_MASK, value);
+ aer = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+ if (aer) {
+ pci_read_config_dword(pdev, aer + PCI_ERR_COR_MASK, &value);
+ value |= PCI_ERR_COR_REP_TIMER;
+ pci_write_config_dword(pdev, aer + PCI_ERR_COR_MASK, value);
+ }
gl9750_wt_off(host);
}
@@ -745,6 +734,7 @@ static void sdhci_gl9755_set_clock(struct sdhci_host *host, unsigned int clock)
static void gl9755_hw_setting(struct sdhci_pci_slot *slot)
{
struct pci_dev *pdev = slot->chip->pdev;
+ int aer;
u32 value;
gl9755_wt_on(pdev);
@@ -775,16 +765,16 @@ static void gl9755_hw_setting(struct sdhci_pci_slot *slot)
pci_write_config_dword(pdev, PCI_GLI_9755_CFG2, value);
/* toggle PM state to allow GL9755 to enter ASPM L1.2 */
- pci_read_config_dword(pdev, PCI_GLI_9755_PM_CTRL, &value);
- value |= PCI_GLI_9755_PM_STATE;
- pci_write_config_dword(pdev, PCI_GLI_9755_PM_CTRL, value);
- value &= ~PCI_GLI_9755_PM_STATE;
- pci_write_config_dword(pdev, PCI_GLI_9755_PM_CTRL, value);
+ pci_set_power_state(pdev, PCI_D3hot);
+ pci_set_power_state(pdev, PCI_D0);
/* mask the replay timer timeout of AER */
- pci_read_config_dword(pdev, PCI_GLI_9755_CORRERR_MASK, &value);
- value |= PCI_GLI_9755_CORRERR_MASK_REPLAY_TIMER_TIMEOUT;
- pci_write_config_dword(pdev, PCI_GLI_9755_CORRERR_MASK, value);
+ aer = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+ if (aer) {
+ pci_read_config_dword(pdev, aer + PCI_ERR_COR_MASK, &value);
+ value |= PCI_ERR_COR_REP_TIMER;
+ pci_write_config_dword(pdev, aer + PCI_ERR_COR_MASK, value);
+ }
gl9755_wt_off(pdev);
}
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 0e8a8ac14e56..a71d56c7031f 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -17,10 +17,8 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
@@ -132,14 +130,16 @@ struct sdhci_s3c {
* struct sdhci_s3c_drv_data - S3C SDHCI platform specific driver data
* @sdhci_quirks: sdhci host specific quirks.
* @no_divider: no or non-standard internal clock divider.
+ * @ops: sdhci_ops to use for this variant
*
* Specifies platform specific configuration of sdhci controller.
* Note: A structure for driver specific platform data is used for future
* expansion of its usage.
*/
struct sdhci_s3c_drv_data {
- unsigned int sdhci_quirks;
- bool no_divider;
+ unsigned int sdhci_quirks;
+ bool no_divider;
+ const struct sdhci_ops *ops;
};
static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host)
@@ -414,7 +414,7 @@ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
}
-static struct sdhci_ops sdhci_s3c_ops = {
+static const struct sdhci_ops sdhci_s3c_ops_s3c6410 = {
.get_max_clock = sdhci_s3c_get_max_clk,
.set_clock = sdhci_s3c_set_clock,
.get_min_clock = sdhci_s3c_get_min_clock,
@@ -423,6 +423,15 @@ static struct sdhci_ops sdhci_s3c_ops = {
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
+static const struct sdhci_ops sdhci_s3c_ops_exynos4 __maybe_unused = {
+ .get_max_clock = sdhci_cmu_get_max_clock,
+ .set_clock = sdhci_cmu_set_clock,
+ .get_min_clock = sdhci_cmu_get_min_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
#ifdef CONFIG_OF
static int sdhci_s3c_parse_dt(struct device *dev,
struct sdhci_host *host, struct s3c_sdhci_platdata *pdata)
@@ -446,7 +455,7 @@ static int sdhci_s3c_parse_dt(struct device *dev,
return 0;
}
- if (of_get_named_gpio(node, "cd-gpios", 0))
+ if (of_property_present(node, "cd-gpios"))
return 0;
/* assuming internal card detect that will be configured by pinctrl */
@@ -562,7 +571,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
pdata->cfg_gpio(pdev, pdata->max_width);
host->hw_name = "samsung-hsmmc";
- host->ops = &sdhci_s3c_ops;
+ host->ops = &sdhci_s3c_ops_s3c6410;
host->quirks = 0;
host->quirks2 = 0;
host->irq = irq;
@@ -572,6 +581,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT;
if (drv_data) {
host->quirks |= drv_data->sdhci_quirks;
+ host->ops = drv_data->ops;
sc->no_divider = drv_data->no_divider;
}
@@ -619,16 +629,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
/* HSMMC on Samsung SoCs uses SDCLK as timeout clock */
host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;
- /*
- * If controller does not have internal clock divider,
- * we can use overriding functions instead of default.
- */
- if (sc->no_divider) {
- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
- }
-
/* It supports additional host capabilities if needed */
if (pdata->host_caps)
host->mmc->caps |= pdata->host_caps;
@@ -760,6 +760,7 @@ MODULE_DEVICE_TABLE(platform, sdhci_s3c_driver_ids);
#ifdef CONFIG_OF
static const struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = {
.no_divider = true,
+ .ops = &sdhci_s3c_ops_exynos4,
};
static const struct of_device_id sdhci_s3c_dt_match[] = {
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index bed57a1c64b5..8776f4287119 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -13,7 +13,6 @@
#include <linux/mmc/mmc.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -440,7 +439,7 @@ static void sdhci_sprd_set_power(struct sdhci_host *host, unsigned char mode,
}
}
-static struct sdhci_ops sdhci_sprd_ops = {
+static const struct sdhci_ops sdhci_sprd_ops = {
.read_l = sdhci_sprd_readl,
.write_l = sdhci_sprd_writel,
.write_w = sdhci_sprd_writew,
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index c79f73459915..746f4cf7ab03 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -3439,12 +3439,18 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
host->data->error = -EILSEQ;
if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
sdhci_err_stats_inc(host, DAT_CRC);
- } else if ((intmask & SDHCI_INT_DATA_CRC) &&
+ } else if ((intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_TUNING_ERROR)) &&
SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
!= MMC_BUS_TEST_R) {
host->data->error = -EILSEQ;
if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
sdhci_err_stats_inc(host, DAT_CRC);
+ if (intmask & SDHCI_INT_TUNING_ERROR) {
+ u16 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+
+ ctrl2 &= ~SDHCI_CTRL_TUNED_CLK;
+ sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
+ }
} else if (intmask & SDHCI_INT_ADMA_ERROR) {
pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
intmask);
@@ -3979,7 +3985,7 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
} else
*cmd_error = 0;
- if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) {
+ if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC | SDHCI_INT_TUNING_ERROR)) {
*data_error = -EILSEQ;
if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
sdhci_err_stats_inc(host, DAT_CRC);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index a20864fc0641..957c7a917ffb 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -158,6 +158,7 @@
#define SDHCI_INT_BUS_POWER 0x00800000
#define SDHCI_INT_AUTO_CMD_ERR 0x01000000
#define SDHCI_INT_ADMA_ERROR 0x02000000
+#define SDHCI_INT_TUNING_ERROR 0x04000000
#define SDHCI_INT_NORMAL_MASK 0x00007FFF
#define SDHCI_INT_ERROR_MASK 0xFFFF8000
@@ -169,7 +170,7 @@
SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR | \
- SDHCI_INT_BLK_GAP)
+ SDHCI_INT_BLK_GAP | SDHCI_INT_TUNING_ERROR)
#define SDHCI_INT_ALL_MASK ((unsigned int)-1)
#define SDHCI_CQE_INT_ERR_MASK ( \
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index d659c59422e1..17ad32cfc0c3 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -141,18 +141,26 @@ static const struct timing_data td[] = {
struct sdhci_am654_data {
struct regmap *base;
- int otap_del_sel[ARRAY_SIZE(td)];
- int itap_del_sel[ARRAY_SIZE(td)];
+ u32 otap_del_sel[ARRAY_SIZE(td)];
+ u32 itap_del_sel[ARRAY_SIZE(td)];
+ u32 itap_del_ena[ARRAY_SIZE(td)];
int clkbuf_sel;
int trm_icp;
int drv_strength;
int strb_sel;
u32 flags;
u32 quirks;
+ bool dll_enable;
#define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0)
};
+struct window {
+ u8 start;
+ u8 end;
+ u8 length;
+};
+
struct sdhci_am654_driver_data {
const struct sdhci_pltfm_data *pdata;
u32 flags;
@@ -232,11 +240,13 @@ static void sdhci_am654_setup_dll(struct sdhci_host *host, unsigned int clock)
}
static void sdhci_am654_write_itapdly(struct sdhci_am654_data *sdhci_am654,
- u32 itapdly)
+ u32 itapdly, u32 enable)
{
/* Set ITAPCHGWIN before writing to ITAPDLY */
regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK,
1 << ITAPCHGWIN_SHIFT);
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPDLYENA_MASK,
+ enable << ITAPDLYENA_SHIFT);
regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPDLYSEL_MASK,
itapdly << ITAPDLYSEL_SHIFT);
regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK, 0);
@@ -253,8 +263,8 @@ static void sdhci_am654_setup_delay_chain(struct sdhci_am654_data *sdhci_am654,
mask = SELDLYTXCLK_MASK | SELDLYRXCLK_MASK;
regmap_update_bits(sdhci_am654->base, PHY_CTRL5, mask, val);
- sdhci_am654_write_itapdly(sdhci_am654,
- sdhci_am654->itap_del_sel[timing]);
+ sdhci_am654_write_itapdly(sdhci_am654, sdhci_am654->itap_del_sel[timing],
+ sdhci_am654->itap_del_ena[timing]);
}
static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
@@ -263,19 +273,17 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
unsigned char timing = host->mmc->ios.timing;
u32 otap_del_sel;
- u32 otap_del_ena;
u32 mask, val;
regmap_update_bits(sdhci_am654->base, PHY_CTRL1, ENDLL_MASK, 0);
sdhci_set_clock(host, clock);
- /* Setup DLL Output TAP delay */
+ /* Setup Output TAP delay */
otap_del_sel = sdhci_am654->otap_del_sel[timing];
- otap_del_ena = (timing > MMC_TIMING_UHS_SDR25) ? 1 : 0;
mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK;
- val = (otap_del_ena << OTAPDLYENA_SHIFT) |
+ val = (0x1 << OTAPDLYENA_SHIFT) |
(otap_del_sel << OTAPDLYSEL_SHIFT);
/* Write to STRBSEL for HS400 speed mode */
@@ -290,10 +298,21 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
- if (timing > MMC_TIMING_UHS_SDR25 && clock >= CLOCK_TOO_SLOW_HZ)
+ if (timing > MMC_TIMING_UHS_SDR25 && clock >= CLOCK_TOO_SLOW_HZ) {
sdhci_am654_setup_dll(host, clock);
- else
+ sdhci_am654->dll_enable = true;
+
+ if (timing == MMC_TIMING_MMC_HS400) {
+ sdhci_am654->itap_del_ena[timing] = 0x1;
+ sdhci_am654->itap_del_sel[timing] = sdhci_am654->itap_del_sel[timing - 1];
+ }
+
+ sdhci_am654_write_itapdly(sdhci_am654, sdhci_am654->itap_del_sel[timing],
+ sdhci_am654->itap_del_ena[timing]);
+ } else {
sdhci_am654_setup_delay_chain(sdhci_am654, timing);
+ sdhci_am654->dll_enable = false;
+ }
regmap_update_bits(sdhci_am654->base, PHY_CTRL5, CLKBUFSEL_MASK,
sdhci_am654->clkbuf_sel);
@@ -306,16 +325,29 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host,
struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
unsigned char timing = host->mmc->ios.timing;
u32 otap_del_sel;
+ u32 itap_del_ena;
+ u32 itap_del_sel;
u32 mask, val;
- /* Setup DLL Output TAP delay */
+ /* Setup Output TAP delay */
otap_del_sel = sdhci_am654->otap_del_sel[timing];
mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK;
val = (0x1 << OTAPDLYENA_SHIFT) |
(otap_del_sel << OTAPDLYSEL_SHIFT);
- regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
+ /* Setup Input TAP delay */
+ itap_del_ena = sdhci_am654->itap_del_ena[timing];
+ itap_del_sel = sdhci_am654->itap_del_sel[timing];
+
+ mask |= ITAPDLYENA_MASK | ITAPDLYSEL_MASK;
+ val |= (itap_del_ena << ITAPDLYENA_SHIFT) |
+ (itap_del_sel << ITAPDLYSEL_SHIFT);
+
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK,
+ 1 << ITAPCHGWIN_SHIFT);
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK, 0);
regmap_update_bits(sdhci_am654->base, PHY_CTRL5, CLKBUFSEL_MASK,
sdhci_am654->clkbuf_sel);
@@ -408,45 +440,110 @@ static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
return 0;
}
-#define ITAP_MAX 32
+#define ITAPDLY_LENGTH 32
+#define ITAPDLY_LAST_INDEX (ITAPDLY_LENGTH - 1)
+
+static u32 sdhci_am654_calculate_itap(struct sdhci_host *host, struct window
+ *fail_window, u8 num_fails, bool circular_buffer)
+{
+ u8 itap = 0, start_fail = 0, end_fail = 0, pass_length = 0;
+ u8 first_fail_start = 0, last_fail_end = 0;
+ struct device *dev = mmc_dev(host->mmc);
+ struct window pass_window = {0, 0, 0};
+ int prev_fail_end = -1;
+ u8 i;
+
+ if (!num_fails)
+ return ITAPDLY_LAST_INDEX >> 1;
+
+ if (fail_window->length == ITAPDLY_LENGTH) {
+ dev_err(dev, "No passing ITAPDLY, return 0\n");
+ return 0;
+ }
+
+ first_fail_start = fail_window->start;
+ last_fail_end = fail_window[num_fails - 1].end;
+
+ for (i = 0; i < num_fails; i++) {
+ start_fail = fail_window[i].start;
+ end_fail = fail_window[i].end;
+ pass_length = start_fail - (prev_fail_end + 1);
+
+ if (pass_length > pass_window.length) {
+ pass_window.start = prev_fail_end + 1;
+ pass_window.length = pass_length;
+ }
+ prev_fail_end = end_fail;
+ }
+
+ if (!circular_buffer)
+ pass_length = ITAPDLY_LAST_INDEX - last_fail_end;
+ else
+ pass_length = ITAPDLY_LAST_INDEX - last_fail_end + first_fail_start;
+
+ if (pass_length > pass_window.length) {
+ pass_window.start = last_fail_end + 1;
+ pass_window.length = pass_length;
+ }
+
+ if (!circular_buffer)
+ itap = pass_window.start + (pass_window.length >> 1);
+ else
+ itap = (pass_window.start + (pass_window.length >> 1)) % ITAPDLY_LENGTH;
+
+ return (itap > ITAPDLY_LAST_INDEX) ? ITAPDLY_LAST_INDEX >> 1 : itap;
+}
+
static int sdhci_am654_platform_execute_tuning(struct sdhci_host *host,
u32 opcode)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
- int cur_val, prev_val = 1, fail_len = 0, pass_window = 0, pass_len;
- u32 itap;
+ unsigned char timing = host->mmc->ios.timing;
+ struct window fail_window[ITAPDLY_LENGTH];
+ u8 curr_pass, itap;
+ u8 fail_index = 0;
+ u8 prev_pass = 1;
+
+ memset(fail_window, 0, sizeof(fail_window));
/* Enable ITAPDLY */
- regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPDLYENA_MASK,
- 1 << ITAPDLYENA_SHIFT);
+ sdhci_am654->itap_del_ena[timing] = 0x1;
+
+ for (itap = 0; itap < ITAPDLY_LENGTH; itap++) {
+ sdhci_am654_write_itapdly(sdhci_am654, itap, sdhci_am654->itap_del_ena[timing]);
- for (itap = 0; itap < ITAP_MAX; itap++) {
- sdhci_am654_write_itapdly(sdhci_am654, itap);
+ curr_pass = !mmc_send_tuning(host->mmc, opcode, NULL);
- cur_val = !mmc_send_tuning(host->mmc, opcode, NULL);
- if (cur_val && !prev_val)
- pass_window = itap;
+ if (!curr_pass && prev_pass)
+ fail_window[fail_index].start = itap;
- if (!cur_val)
- fail_len++;
+ if (!curr_pass) {
+ fail_window[fail_index].end = itap;
+ fail_window[fail_index].length++;
+ }
+
+ if (curr_pass && !prev_pass)
+ fail_index++;
- prev_val = cur_val;
+ prev_pass = curr_pass;
}
- /*
- * Having determined the length of the failing window and start of
- * the passing window calculate the length of the passing window and
- * set the final value halfway through it considering the range as a
- * circular buffer
- */
- pass_len = ITAP_MAX - fail_len;
- itap = (pass_window + (pass_len >> 1)) % ITAP_MAX;
- sdhci_am654_write_itapdly(sdhci_am654, itap);
+
+ if (fail_window[fail_index].length != 0)
+ fail_index++;
+
+ itap = sdhci_am654_calculate_itap(host, fail_window, fail_index,
+ sdhci_am654->dll_enable);
+
+ sdhci_am654_write_itapdly(sdhci_am654, itap, sdhci_am654->itap_del_ena[timing]);
+
+ /* Save ITAPDLY */
+ sdhci_am654->itap_del_sel[timing] = itap;
return 0;
}
-static struct sdhci_ops sdhci_am654_ops = {
+static const struct sdhci_ops sdhci_am654_ops = {
.platform_execute_tuning = sdhci_am654_platform_execute_tuning,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -476,7 +573,7 @@ static const struct sdhci_am654_driver_data sdhci_am654_drvdata = {
.flags = IOMUX_PRESENT | FREQSEL_2_BIT | STRBSEL_4_BIT | DLL_PRESENT,
};
-static struct sdhci_ops sdhci_j721e_8bit_ops = {
+static const struct sdhci_ops sdhci_j721e_8bit_ops = {
.platform_execute_tuning = sdhci_am654_platform_execute_tuning,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -500,7 +597,7 @@ static const struct sdhci_am654_driver_data sdhci_j721e_8bit_drvdata = {
.flags = DLL_PRESENT | DLL_CALIB,
};
-static struct sdhci_ops sdhci_j721e_4bit_ops = {
+static const struct sdhci_ops sdhci_j721e_4bit_ops = {
.platform_execute_tuning = sdhci_am654_platform_execute_tuning,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -590,9 +687,12 @@ static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
host->mmc->caps2 &= ~td[i].capability;
}
- if (td[i].itap_binding)
- device_property_read_u32(dev, td[i].itap_binding,
- &sdhci_am654->itap_del_sel[i]);
+ if (td[i].itap_binding) {
+ ret = device_property_read_u32(dev, td[i].itap_binding,
+ &sdhci_am654->itap_del_sel[i]);
+ if (!ret)
+ sdhci_am654->itap_del_ena[i] = 0x1;
+ }
}
return 0;
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 97a00ec9a4d4..b06c8dd51562 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -209,7 +209,7 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
if (dev->bdev_file) {
invalidate_mapping_pages(dev->bdev_file->f_mapping, 0, -1);
- fput(dev->bdev_file);
+ bdev_fput(dev->bdev_file);
}
kfree(dev);
@@ -265,6 +265,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size,
struct file *bdev_file;
struct block_device *bdev;
struct block2mtd_dev *dev;
+ loff_t size;
char *name;
if (!devname)
@@ -291,7 +292,8 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size,
goto err_free_block2mtd;
}
- if ((long)bdev->bd_inode->i_size % erase_size) {
+ size = bdev_nr_bytes(bdev);
+ if ((long)size % erase_size) {
pr_err("erasesize must be a divisor of device size\n");
goto err_free_block2mtd;
}
@@ -309,7 +311,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size,
dev->mtd.name = name;
- dev->mtd.size = bdev->bd_inode->i_size & PAGE_MASK;
+ dev->mtd.size = size & PAGE_MASK;
dev->mtd.erasesize = erase_size;
dev->mtd.writesize = 1;
dev->mtd.writebufsize = PAGE_SIZE;
diff --git a/drivers/mtd/devices/mchp23k256.c b/drivers/mtd/devices/mchp23k256.c
index d533475fda15..cef5f9677d39 100644
--- a/drivers/mtd/devices/mchp23k256.c
+++ b/drivers/mtd/devices/mchp23k256.c
@@ -257,4 +257,3 @@ module_spi_driver(mchp23k256_driver);
MODULE_DESCRIPTION("MTD SPI driver for MCHP23K256 RAM chips");
MODULE_AUTHOR("Andrew Lunn <andre@lunn.ch>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:mchp23k256");
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index d4ce2376d33f..ac8a0a19a021 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -153,7 +153,7 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
struct flash_platform_data *plat)
{
struct sa_info *info;
- int nr, size, i, ret = 0;
+ int nr, i, ret = 0;
/*
* Count number of devices.
@@ -167,12 +167,10 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
goto out;
}
- size = sizeof(struct sa_info) + sizeof(struct sa_subdev_info) * nr;
-
/*
* Allocate the map_info structs in one go.
*/
- info = kzalloc(size, GFP_KERNEL);
+ info = kzalloc(struct_size(info, subdev, nr), GFP_KERNEL);
if (!info) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 5887feb347a4..724f917f91ba 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -900,7 +900,7 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
config.name = compatible;
config.id = NVMEM_DEVID_AUTO;
config.owner = THIS_MODULE;
- config.add_legacy_fixed_of_cells = true;
+ config.add_legacy_fixed_of_cells = !mtd_type_is_nand(mtd);
config.type = NVMEM_TYPE_OTP;
config.root_only = true;
config.ignore_wp = true;
@@ -956,8 +956,10 @@ static int mtd_otp_nvmem_add(struct mtd_info *mtd)
if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
size = mtd_otp_size(mtd, true);
- if (size < 0)
- return size;
+ if (size < 0) {
+ err = size;
+ goto err;
+ }
if (size > 0) {
nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
@@ -1012,6 +1014,9 @@ static int mtd_otp_nvmem_add(struct mtd_info *mtd)
err:
nvmem_unregister(mtd->otp_user_nvmem);
+ /* Don't report error if OTP is not supported. */
+ if (err == -EOPNOTSUPP)
+ return 0;
return dev_err_probe(dev, err, "Failed to register OTP NVMEM device\n");
}
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index a8d12c71f987..1b2ec0fec60c 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -857,7 +857,7 @@ static inline void brcmnand_read_data_bus(struct brcmnand_controller *ctrl,
struct brcmnand_soc *soc = ctrl->soc;
int i;
- if (soc->read_data_bus) {
+ if (soc && soc->read_data_bus) {
soc->read_data_bus(soc, flash_cache, buffer, fc_words);
} else {
for (i = 0; i < fc_words; i++)
diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c
index e75d81cf8c21..051deea768db 100644
--- a/drivers/mtd/nand/raw/davinci_nand.c
+++ b/drivers/mtd/nand/raw/davinci_nand.c
@@ -671,8 +671,11 @@ static int davinci_nand_exec_instr(struct davinci_nand_info *info,
break;
}
- if (instr->delay_ns)
+ if (instr->delay_ns) {
+ /* Dummy read to be sure that command is sent before ndelay starts */
+ davinci_nand_readl(info, 0);
ndelay(instr->delay_ns);
+ }
return 0;
}
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index 5243fab9face..8db7fc424571 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -53,7 +53,7 @@ static unsigned long doc_locations[] __initdata = {
0xe8000, 0xea000, 0xec000, 0xee000,
#endif
#endif
- 0xffffffff };
+};
static struct mtd_info *doclist = NULL;
@@ -1554,7 +1554,7 @@ static int __init init_nanddoc(void)
if (ret < 0)
return ret;
} else {
- for (i = 0; (doc_locations[i] != 0xffffffff); i++) {
+ for (i = 0; i < ARRAY_SIZE(doc_locations); i++) {
doc_probe(doc_locations[i]);
}
}
diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c
index a74e64e0cfa3..c02e50608816 100644
--- a/drivers/mtd/nand/raw/nand_hynix.c
+++ b/drivers/mtd/nand/raw/nand_hynix.c
@@ -401,7 +401,7 @@ static int hynix_nand_rr_init(struct nand_chip *chip)
if (ret)
pr_warn("failed to initialize read-retry infrastructure");
- return 0;
+ return ret;
}
static void hynix_nand_extract_oobsize(struct nand_chip *chip,
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index b079605c84d3..b8cff9240b28 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2815,7 +2815,7 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
host->cfg0_raw & ~(7 << CW_PER_PAGE));
nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
instrs = 3;
- } else {
+ } else if (q_op.cmd_reg != OP_RESET_DEVICE) {
return 0;
}
@@ -2830,9 +2830,8 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
nandc_set_reg(chip, NAND_EXEC_CMD, 1);
write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
- (q_op.cmd_reg == OP_BLOCK_ERASE) ? write_reg_dma(nandc, NAND_DEV0_CFG0,
- 2, NAND_BAM_NEXT_SGL) : read_reg_dma(nandc,
- NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ if (q_op.cmd_reg == OP_BLOCK_ERASE)
+ write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
diff --git a/drivers/mtd/parsers/redboot.c b/drivers/mtd/parsers/redboot.c
index a16b42a88581..3b55b676ca6b 100644
--- a/drivers/mtd/parsers/redboot.c
+++ b/drivers/mtd/parsers/redboot.c
@@ -102,7 +102,7 @@ nogood:
offset -= master->erasesize;
}
} else {
- offset = directory * master->erasesize;
+ offset = (unsigned long) directory * master->erasesize;
while (mtd_block_isbad(master, offset)) {
offset += master->erasesize;
if (offset == master->size)
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 3e1f1913536b..028514c6996f 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -2893,7 +2893,7 @@ static int spi_nor_late_init_params(struct spi_nor *nor)
spi_nor_init_default_locking_ops(nor);
if (params->n_banks > 1)
- params->bank_size = div64_u64(params->size, params->n_banks);
+ params->bank_size = div_u64(params->size, params->n_banks);
return 0;
}
@@ -3406,7 +3406,7 @@ static int spi_nor_set_mtd_eraseregions(struct spi_nor *nor)
return -EINVAL;
mtd_region[i].erasesize = erasesize;
- mtd_region[i].numblocks = div64_ul(region[i].size, erasesize);
+ mtd_region[i].numblocks = div_u64(region[i].size, erasesize);
mtd_region[i].offset = region[i].offset;
}
diff --git a/drivers/mux/core.c b/drivers/mux/core.c
index 775816112932..78c0022697ec 100644
--- a/drivers/mux/core.c
+++ b/drivers/mux/core.c
@@ -64,7 +64,7 @@ static void mux_chip_release(struct device *dev)
{
struct mux_chip *mux_chip = to_mux_chip(dev);
- ida_simple_remove(&mux_ida, mux_chip->id);
+ ida_free(&mux_ida, mux_chip->id);
kfree(mux_chip);
}
@@ -111,7 +111,7 @@ struct mux_chip *mux_chip_alloc(struct device *dev,
mux_chip->dev.of_node = dev->of_node;
dev_set_drvdata(&mux_chip->dev, mux_chip);
- mux_chip->id = ida_simple_get(&mux_ida, 0, 0, GFP_KERNEL);
+ mux_chip->id = ida_alloc(&mux_ida, GFP_KERNEL);
if (mux_chip->id < 0) {
int err = mux_chip->id;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 8ca0bc223b30..9920b3a68ed1 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -290,6 +290,19 @@ config GTP
To compile this drivers as a module, choose M here: the module
will be called gtp.
+config PFCP
+ tristate "Packet Forwarding Control Protocol (PFCP)"
+ depends on INET
+ select NET_UDP_TUNNEL
+ help
+ This allows one to create PFCP virtual interfaces that allows to
+ set up software and hardware offload of PFCP packets.
+ Note that this module does not support PFCP protocol in the kernel space.
+ There is no support for parsing any PFCP messages.
+
+ To compile this drivers as a module, choose M here: the module
+ will be called pfcp.
+
config AMT
tristate "Automatic Multicast Tunneling (AMT)"
depends on INET && IP_MULTICAST
@@ -507,7 +520,7 @@ source "drivers/net/ipa/Kconfig"
config NET_SB1000
tristate "General Instruments Surfboard 1000"
- depends on PNP
+ depends on ISA && PNP
help
This is a driver for the General Instrument (also known as
NextLevel) SURFboard 1000 internal
@@ -627,6 +640,7 @@ config NETDEVSIM
depends on PSAMPLE || PSAMPLE=n
depends on PTP_1588_CLOCK_MOCK || PTP_1588_CLOCK_MOCK=n
select NET_DEVLINK
+ select PAGE_POOL
help
This driver is a developer testing tool and software model that can
be used to test various control path networking APIs, especially
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 7cab36f94782..9c053673d6b2 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_GENEVE) += geneve.o
obj-$(CONFIG_BAREUDP) += bareudp.o
obj-$(CONFIG_GTP) += gtp.o
obj-$(CONFIG_NLMON) += nlmon.o
+obj-$(CONFIG_PFCP) += pfcp.o
obj-$(CONFIG_NET_VRF) += vrf.o
obj-$(CONFIG_VSOCKMON) += vsockmon.o
obj-$(CONFIG_MHI_NET) += mhi_net.o
diff --git a/drivers/net/arcnet/Kconfig b/drivers/net/arcnet/Kconfig
index a51b9dab6d3a..d1d07a1d4fbc 100644
--- a/drivers/net/arcnet/Kconfig
+++ b/drivers/net/arcnet/Kconfig
@@ -4,7 +4,7 @@
#
menuconfig ARCNET
- depends on NETDEVICES && (ISA || PCI || PCMCIA)
+ depends on NETDEVICES && (ISA || PCI || PCMCIA) && HAS_IOPORT
tristate "ARCnet support"
help
If you have a network card of this type, say Y and check out the
diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h
index b54275389f8a..bee60b377d7c 100644
--- a/drivers/net/arcnet/arcdevice.h
+++ b/drivers/net/arcnet/arcdevice.h
@@ -16,6 +16,7 @@
#ifdef __KERNEL__
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
/*
* RECON_THRESHOLD is the maximum number of RECON messages to receive
@@ -268,7 +269,7 @@ struct arcnet_local {
struct net_device *dev;
int reply_status;
- struct tasklet_struct reply_tasklet;
+ struct work_struct reply_work;
/*
* Buffer management: an ARCnet card has 4 x 512-byte buffers, each of
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 166bfc3c8e6c..530c15d6a5eb 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -54,6 +54,7 @@
#include <linux/errqueue.h>
#include <linux/leds.h>
+#include <linux/workqueue.h>
#include "arcdevice.h"
#include "com9026.h"
@@ -424,9 +425,9 @@ out:
rtnl_unlock();
}
-static void arcnet_reply_tasklet(struct tasklet_struct *t)
+static void arcnet_reply_work(struct work_struct *t)
{
- struct arcnet_local *lp = from_tasklet(lp, t, reply_tasklet);
+ struct arcnet_local *lp = from_work(lp, t, reply_work);
struct sk_buff *ackskb, *skb;
struct sock_exterr_skb *serr;
@@ -527,7 +528,7 @@ int arcnet_open(struct net_device *dev)
arc_cont(D_PROTO, "\n");
}
- tasklet_setup(&lp->reply_tasklet, arcnet_reply_tasklet);
+ INIT_WORK(&lp->reply_work, arcnet_reply_work);
arc_printk(D_INIT, dev, "arcnet_open: resetting card.\n");
@@ -620,7 +621,7 @@ int arcnet_close(struct net_device *dev)
netif_stop_queue(dev);
netif_carrier_off(dev);
- tasklet_kill(&lp->reply_tasklet);
+ cancel_work_sync(&lp->reply_work);
/* flush TX and disable RX */
lp->hw.intmask(dev, 0);
@@ -984,7 +985,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
->ack_tx(dev, ackstatus);
}
lp->reply_status = ackstatus;
- tasklet_hi_schedule(&lp->reply_tasklet);
+ queue_work(system_bh_highpri_wq, &lp->reply_work);
}
if (lp->cur_tx != -1)
release_arcbuf(dev, lp->cur_tx);
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 339db6e4a1d5..d5c56ca91b77 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -61,6 +61,7 @@ struct bareudp_dev {
static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct metadata_dst *tun_dst = NULL;
+ IP_TUNNEL_DECLARE_FLAGS(key) = { };
struct bareudp_dev *bareudp;
unsigned short family;
unsigned int len;
@@ -137,7 +138,10 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
bareudp->dev->stats.rx_dropped++;
goto drop;
}
- tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0);
+
+ __set_bit(IP_TUNNEL_KEY_BIT, key);
+
+ tun_dst = udp_tun_rx_dst(skb, family, key, 0, 0);
if (!tun_dst) {
bareudp->dev->stats.rx_dropped++;
goto drop;
@@ -285,10 +289,10 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct bareudp_dev *bareudp,
const struct ip_tunnel_info *info)
{
+ bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct socket *sock = rcu_dereference(bareudp->sock);
- bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
const struct ip_tunnel_key *key = &info->key;
struct rtable *rt;
__be16 sport, df;
@@ -316,7 +320,8 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
ttl = key->ttl;
- df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
+ df = test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags) ?
+ htons(IP_DF) : 0;
skb_scrub_packet(skb, xnet);
err = -ENOSPC;
@@ -338,7 +343,8 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
udp_tunnel_xmit_skb(rt, sock->sk, skb, saddr, info->key.u.ipv4.dst,
tos, ttl, df, sport, bareudp->port,
!net_eq(bareudp->net, dev_net(bareudp->dev)),
- !(info->key.tun_flags & TUNNEL_CSUM));
+ !test_bit(IP_TUNNEL_CSUM_BIT,
+ info->key.tun_flags));
return 0;
free_dst:
@@ -350,10 +356,10 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct bareudp_dev *bareudp,
const struct ip_tunnel_info *info)
{
+ bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct socket *sock = rcu_dereference(bareudp->sock);
- bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
const struct ip_tunnel_key *key = &info->key;
struct dst_entry *dst = NULL;
struct in6_addr saddr, daddr;
@@ -402,7 +408,8 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
udp_tunnel6_xmit_skb(dst, sock->sk, skb, dev,
&saddr, &daddr, prio, ttl,
info->key.label, sport, bareudp->port,
- !(info->key.tun_flags & TUNNEL_CSUM));
+ !test_bit(IP_TUNNEL_CSUM_BIT,
+ info->key.tun_flags));
return 0;
free_dst:
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2c5ed0a7cb18..3c3fcce4acd4 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3014,8 +3014,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
tags = NULL;
/* Find out through which dev should the packet go */
- rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
- RTO_ONLINK, 0);
+ rt = ip_route_output(dev_net(bond->dev), targets[i], 0, 0, 0,
+ RT_SCOPE_LINK);
if (IS_ERR(rt)) {
/* there's no route to target - try to send arp
* probe to generate any traffic (arp_validate=0)
@@ -4710,7 +4710,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
}
}
- bond_dev->mtu = new_mtu;
+ WRITE_ONCE(bond_dev->mtu, new_mtu);
return 0;
@@ -5245,7 +5245,7 @@ static inline int bond_slave_override(struct bonding *bond,
/* Find out if any slaves have the same mapping as this skb. */
bond_for_each_slave_rcu(bond, slave, iter) {
- if (slave->queue_id == skb_get_queue_mapping(skb)) {
+ if (READ_ONCE(slave->queue_id) == skb_get_queue_mapping(skb)) {
if (bond_slave_is_up(slave) &&
slave->link == BOND_LINK_UP) {
bond_dev_queue_xmit(bond, skb, slave->dev);
@@ -5933,7 +5933,7 @@ static void bond_uninit(struct net_device *bond_dev)
bond_set_slave_arr(bond, NULL, NULL);
- list_del(&bond->bond_list);
+ list_del_rcu(&bond->bond_list);
bond_debug_unregister(bond);
}
@@ -6347,7 +6347,7 @@ static int bond_init(struct net_device *bond_dev)
spin_lock_init(&bond->stats_lock);
netdev_lockdep_set_classes(bond_dev);
- list_add_tail(&bond->bond_list, &bn->dev_list);
+ list_add_tail_rcu(&bond->bond_list, &bn->dev_list);
bond_prepare_sysfs_group(bond);
@@ -6477,16 +6477,16 @@ static int __init bonding_init(void)
if (res)
goto out;
+ bond_create_debugfs();
+
res = register_pernet_subsys(&bond_net_ops);
if (res)
- goto out;
+ goto err_net_ops;
res = bond_netlink_init();
if (res)
goto err_link;
- bond_create_debugfs();
-
for (i = 0; i < max_bonds; i++) {
res = bond_create(&init_net, NULL);
if (res)
@@ -6501,10 +6501,11 @@ static int __init bonding_init(void)
out:
return res;
err:
- bond_destroy_debugfs();
bond_netlink_fini();
err_link:
unregister_pernet_subsys(&bond_net_ops);
+err_net_ops:
+ bond_destroy_debugfs();
goto out;
}
@@ -6513,11 +6514,11 @@ static void __exit bonding_exit(void)
{
unregister_netdevice_notifier(&bond_netdev_notifier);
- bond_destroy_debugfs();
-
bond_netlink_fini();
unregister_pernet_subsys(&bond_net_ops);
+ bond_destroy_debugfs();
+
#ifdef CONFIG_NET_POLL_CONTROLLER
/* Make sure we don't have an imbalance on our netpoll blocking */
WARN_ON(atomic_read(&netpoll_block_tx));
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 29b4c3d1b9b6..2a6a424806aa 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -51,7 +51,8 @@ static int bond_fill_slave_info(struct sk_buff *skb,
slave_dev->addr_len, slave->perm_hwaddr))
goto nla_put_failure;
- if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id))
+ if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID,
+ READ_ONCE(slave->queue_id)))
goto nla_put_failure;
if (nla_put_s32(skb, IFLA_BOND_SLAVE_PRIO, slave->prio))
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 4cdbc7e084f4..0cacd7027e35 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1589,7 +1589,7 @@ static int bond_option_queue_id_set(struct bonding *bond,
goto err_no_cmd;
/* Actually set the qids for the slave */
- update_slave->queue_id = qid;
+ WRITE_ONCE(update_slave->queue_id, qid);
out:
return ret;
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 43be458422b3..7edf72ec816a 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -209,7 +209,7 @@ static void bond_info_show_slave(struct seq_file *seq,
seq_printf(seq, "Permanent HW addr: %*phC\n",
slave->dev->addr_len, slave->perm_hwaddr);
- seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
+ seq_printf(seq, "Slave queue ID: %d\n", READ_ONCE(slave->queue_id));
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
const struct port *port = &SLAVE_AD_INFO(slave)->port;
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 2805135a7205..1e13bb170515 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -37,12 +37,12 @@ static ssize_t bonding_show_bonds(const struct class *cls,
{
const struct bond_net *bn =
container_of_const(attr, struct bond_net, class_attr_bonding_masters);
- int res = 0;
struct bonding *bond;
+ int res = 0;
- rtnl_lock();
+ rcu_read_lock();
- list_for_each_entry(bond, &bn->dev_list, bond_list) {
+ list_for_each_entry_rcu(bond, &bn->dev_list, bond_list) {
if (res > (PAGE_SIZE - IFNAMSIZ)) {
/* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10)
@@ -55,7 +55,7 @@ static ssize_t bonding_show_bonds(const struct class *cls,
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
- rtnl_unlock();
+ rcu_read_unlock();
return res;
}
@@ -170,10 +170,9 @@ static ssize_t bonding_show_slaves(struct device *d,
struct slave *slave;
int res = 0;
- if (!rtnl_trylock())
- return restart_syscall();
+ rcu_read_lock();
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (res > (PAGE_SIZE - IFNAMSIZ)) {
/* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10)
@@ -184,7 +183,7 @@ static ssize_t bonding_show_slaves(struct device *d,
res += sysfs_emit_at(buf, res, "%s ", slave->dev->name);
}
- rtnl_unlock();
+ rcu_read_unlock();
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
@@ -626,10 +625,9 @@ static ssize_t bonding_show_queue_id(struct device *d,
struct slave *slave;
int res = 0;
- if (!rtnl_trylock())
- return restart_syscall();
+ rcu_read_lock();
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (res > (PAGE_SIZE - IFNAMSIZ - 6)) {
/* not enough space for another interface_name:queue_id pair */
if ((PAGE_SIZE - res) > 10)
@@ -638,12 +636,13 @@ static ssize_t bonding_show_queue_id(struct device *d,
break;
}
res += sysfs_emit_at(buf, res, "%s:%d ",
- slave->dev->name, slave->queue_id);
+ slave->dev->name,
+ READ_ONCE(slave->queue_id));
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
- rtnl_unlock();
+ rcu_read_unlock();
return res;
}
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 313866f2c0e4..36d0e8440b5b 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -53,7 +53,7 @@ static SLAVE_ATTR_RO(perm_hwaddr);
static ssize_t queue_id_show(struct slave *slave, char *buf)
{
- return sysfs_emit(buf, "%d\n", slave->queue_id);
+ return sysfs_emit(buf, "%d\n", READ_ONCE(slave->queue_id));
}
static SLAVE_ATTR_RO(queue_id);
diff --git a/drivers/net/can/cc770/Kconfig b/drivers/net/can/cc770/Kconfig
index 9ef1359319f0..467ef19de1c1 100644
--- a/drivers/net/can/cc770/Kconfig
+++ b/drivers/net/can/cc770/Kconfig
@@ -7,6 +7,7 @@ if CAN_CC770
config CAN_CC770_ISA
tristate "ISA Bus based legacy CC770 driver"
+ depends on ISA
help
This driver adds legacy support for CC770 and AN82527 chips
connected to the ISA bus using I/O port, memory mapped or
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index 3a3be5cdfc1f..83e724e0ab87 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -338,7 +338,7 @@ int can_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
EXPORT_SYMBOL_GPL(can_change_mtu);
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 4b2f9cb17fc3..01168db4c106 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -87,6 +87,7 @@ config CAN_PLX_PCI
config CAN_SJA1000_ISA
tristate "ISA Bus based legacy SJA1000 driver"
+ depends on ISA
help
This driver adds legacy support for SJA1000 chips connected to
the ISA bus using I/O port, memory mapped or indirect access.
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 285635c23443..f67e85807100 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -140,7 +140,7 @@ static int vcan_change_mtu(struct net_device *dev, int new_mtu)
!can_is_canxl_dev_mtu(new_mtu))
return -EINVAL;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index f7fabba707ea..9e1b7d41005f 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -135,7 +135,7 @@ static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
!can_is_canxl_dev_mtu(new_mtu))
return -EINVAL;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index b2eeff04f4c8..8f50abe739b7 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1266,95 +1266,70 @@ static void b53_adjust_63xx_rgmii(struct dsa_switch *ds, int port,
phy_modes(interface));
}
-static void b53_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
+static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
{
struct b53_device *dev = ds->priv;
- struct ethtool_keee *p = &dev->ports[port].eee;
- u8 rgmii_ctrl = 0, reg = 0, off;
- bool tx_pause = false;
- bool rx_pause = false;
-
- if (!phy_is_pseudo_fixed_link(phydev))
- return;
+ u8 rgmii_ctrl = 0, off;
- /* Enable flow control on BCM5301x's CPU port */
- if (is5301x(dev) && dsa_is_cpu_port(ds, port))
- tx_pause = rx_pause = true;
+ if (port == dev->imp_port)
+ off = B53_RGMII_CTRL_IMP;
+ else
+ off = B53_RGMII_CTRL_P(port);
- if (phydev->pause) {
- if (phydev->asym_pause)
- tx_pause = true;
- rx_pause = true;
- }
+ /* Configure the port RGMII clock delay by DLL disabled and
+ * tx_clk aligned timing (restoring to reset defaults)
+ */
+ b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
+ rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC |
+ RGMII_CTRL_TIMING_SEL);
- b53_force_port_config(dev, port, phydev->speed, phydev->duplex,
- tx_pause, rx_pause);
- b53_force_link(dev, port, phydev->link);
+ /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
+ * sure that we enable the port TX clock internal delay to
+ * account for this internal delay that is inserted, otherwise
+ * the switch won't be able to receive correctly.
+ *
+ * PHY_INTERFACE_MODE_RGMII means that we are not introducing
+ * any delay neither on transmission nor reception, so the
+ * BCM53125 must also be configured accordingly to account for
+ * the lack of delay and introduce
+ *
+ * The BCM53125 switch has its RX clock and TX clock control
+ * swapped, hence the reason why we modify the TX clock path in
+ * the "RGMII" case
+ */
+ if (interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
+ if (interface == PHY_INTERFACE_MODE_RGMII)
+ rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
+ rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
+ b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
- if (is63xx(dev) && port >= B53_63XX_RGMII0)
- b53_adjust_63xx_rgmii(ds, port, phydev->interface);
+ dev_info(ds->dev, "Configured port %d for %s\n", port,
+ phy_modes(interface));
+}
- if (is531x5(dev) && phy_interface_is_rgmii(phydev)) {
- if (port == dev->imp_port)
- off = B53_RGMII_CTRL_IMP;
- else
- off = B53_RGMII_CTRL_P(port);
+static void b53_adjust_5325_mii(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+ u8 reg = 0;
- /* Configure the port RGMII clock delay by DLL disabled and
- * tx_clk aligned timing (restoring to reset defaults)
- */
- b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
- rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC |
- RGMII_CTRL_TIMING_SEL);
-
- /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
- * sure that we enable the port TX clock internal delay to
- * account for this internal delay that is inserted, otherwise
- * the switch won't be able to receive correctly.
- *
- * PHY_INTERFACE_MODE_RGMII means that we are not introducing
- * any delay neither on transmission nor reception, so the
- * BCM53125 must also be configured accordingly to account for
- * the lack of delay and introduce
- *
- * The BCM53125 switch has its RX clock and TX clock control
- * swapped, hence the reason why we modify the TX clock path in
- * the "RGMII" case
- */
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
- rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
- rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
- b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
-
- dev_info(ds->dev, "Configured port %d for %s\n", port,
- phy_modes(phydev->interface));
- }
+ b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
+ &reg);
- /* configure MII port if necessary */
- if (is5325(dev)) {
+ /* reverse mii needs to be enabled */
+ if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
+ reg | PORT_OVERRIDE_RV_MII_25);
b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
&reg);
- /* reverse mii needs to be enabled */
if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
- b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
- reg | PORT_OVERRIDE_RV_MII_25);
- b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
- &reg);
-
- if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
- dev_err(ds->dev,
- "Failed to enable reverse MII mode\n");
- return;
- }
+ dev_err(ds->dev,
+ "Failed to enable reverse MII mode\n");
+ return;
}
}
-
- /* Re-negotiate EEE if it was enabled already */
- p->eee_enabled = b53_eee_init(ds, port, phydev);
}
void b53_port_event(struct dsa_switch *ds, int port)
@@ -1408,30 +1383,48 @@ static void b53_phylink_get_caps(struct dsa_switch *ds, int port,
dev->ops->phylink_get_caps(dev, port, config);
}
-static struct phylink_pcs *b53_phylink_mac_select_pcs(struct dsa_switch *ds,
- int port,
+static struct phylink_pcs *b53_phylink_mac_select_pcs(struct phylink_config *config,
phy_interface_t interface)
{
- struct b53_device *dev = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct b53_device *dev = dp->ds->priv;
if (!dev->ops->phylink_mac_select_pcs)
return NULL;
- return dev->ops->phylink_mac_select_pcs(dev, port, interface);
+ return dev->ops->phylink_mac_select_pcs(dev, dp->index, interface);
}
-void b53_phylink_mac_config(struct dsa_switch *ds, int port,
- unsigned int mode,
- const struct phylink_link_state *state)
+static void b53_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ phy_interface_t interface = state->interface;
+ struct dsa_switch *ds = dp->ds;
+ struct b53_device *dev = ds->priv;
+ int port = dp->index;
+
+ if (is63xx(dev) && port >= B53_63XX_RGMII0)
+ b53_adjust_63xx_rgmii(ds, port, interface);
+
+ if (mode == MLO_AN_FIXED) {
+ if (is531x5(dev) && phy_interface_mode_is_rgmii(interface))
+ b53_adjust_531x5_rgmii(ds, port, interface);
+
+ /* configure MII port if necessary */
+ if (is5325(dev))
+ b53_adjust_5325_mii(ds, port);
+ }
}
-EXPORT_SYMBOL(b53_phylink_mac_config);
-void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface)
+static void b53_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
{
- struct b53_device *dev = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct b53_device *dev = dp->ds->priv;
+ int port = dp->index;
if (mode == MLO_AN_PHY)
return;
@@ -1445,24 +1438,31 @@ void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
dev->ops->serdes_link_set)
dev->ops->serdes_link_set(dev, port, mode, interface, false);
}
-EXPORT_SYMBOL(b53_phylink_mac_link_down);
-void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phydev,
- int speed, int duplex,
- bool tx_pause, bool rx_pause)
+static void b53_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct dsa_switch *ds = dp->ds;
struct b53_device *dev = ds->priv;
+ struct ethtool_keee *p = &dev->ports[dp->index].eee;
+ int port = dp->index;
- if (is63xx(dev) && port >= B53_63XX_RGMII0)
- b53_adjust_63xx_rgmii(ds, port, interface);
-
- if (mode == MLO_AN_PHY)
+ if (mode == MLO_AN_PHY) {
+ /* Re-negotiate EEE if it was enabled already */
+ p->eee_enabled = b53_eee_init(ds, port, phydev);
return;
+ }
if (mode == MLO_AN_FIXED) {
+ /* Force flow control on BCM5301x's CPU port */
+ if (is5301x(dev) && dsa_is_cpu_port(ds, port))
+ tx_pause = rx_pause = true;
+
b53_force_port_config(dev, port, speed, duplex,
tx_pause, rx_pause);
b53_force_link(dev, port, true);
@@ -1473,7 +1473,6 @@ void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
dev->ops->serdes_link_set)
dev->ops->serdes_link_set(dev, port, mode, interface, true);
}
-EXPORT_SYMBOL(b53_phylink_mac_link_up);
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
struct netlink_ext_ack *extack)
@@ -2268,6 +2267,13 @@ static int b53_get_max_mtu(struct dsa_switch *ds, int port)
return JMS_MAX_SIZE;
}
+static const struct phylink_mac_ops b53_phylink_mac_ops = {
+ .mac_select_pcs = b53_phylink_mac_select_pcs,
+ .mac_config = b53_phylink_mac_config,
+ .mac_link_down = b53_phylink_mac_link_down,
+ .mac_link_up = b53_phylink_mac_link_up,
+};
+
static const struct dsa_switch_ops b53_switch_ops = {
.get_tag_protocol = b53_get_tag_protocol,
.setup = b53_setup,
@@ -2278,12 +2284,7 @@ static const struct dsa_switch_ops b53_switch_ops = {
.get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
.phy_read = b53_phy_read16,
.phy_write = b53_phy_write16,
- .adjust_link = b53_adjust_link,
.phylink_get_caps = b53_phylink_get_caps,
- .phylink_mac_select_pcs = b53_phylink_mac_select_pcs,
- .phylink_mac_config = b53_phylink_mac_config,
- .phylink_mac_link_down = b53_phylink_mac_link_down,
- .phylink_mac_link_up = b53_phylink_mac_link_up,
.port_enable = b53_enable_port,
.port_disable = b53_disable_port,
.get_mac_eee = b53_get_mac_eee,
@@ -2726,6 +2727,7 @@ struct b53_device *b53_switch_alloc(struct device *base,
dev->priv = priv;
dev->ops = ops;
ds->ops = &b53_switch_ops;
+ ds->phylink_mac_ops = &b53_phylink_mac_ops;
dev->vlan_enabled = true;
/* Let DSA handle the case were multiple bridges span the same switch
* device and different VLAN awareness settings are requested, which
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index c13a907947f1..05141176daf5 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -352,18 +352,6 @@ int b53_br_flags(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack);
int b53_setup_devlink_resources(struct dsa_switch *ds);
void b53_port_event(struct dsa_switch *ds, int port);
-void b53_phylink_mac_config(struct dsa_switch *ds, int port,
- unsigned int mode,
- const struct phylink_link_state *state);
-void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface);
-void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phydev,
- int speed, int duplex,
- bool tx_pause, bool rx_pause);
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
struct netlink_ext_ack *extack);
int b53_vlan_add(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index bc77ee9e6d0a..ed1e6560df25 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -740,16 +740,19 @@ static void bcm_sf2_sw_get_caps(struct dsa_switch *ds, int port,
MAC_10 | MAC_100 | MAC_1000;
}
-static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
+static void bcm_sf2_sw_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
u32 id_mode_dis = 0, port_mode;
+ struct bcm_sf2_priv *priv;
u32 reg_rgmii_ctrl;
u32 reg;
- if (port == core_readl(priv, CORE_IMP0_PRT_ID))
+ priv = bcm_sf2_to_priv(dp->ds);
+
+ if (dp->index == core_readl(priv, CORE_IMP0_PRT_ID))
return;
switch (state->interface) {
@@ -770,7 +773,7 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
return;
}
- reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
+ reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, dp->index);
/* Clear id_mode_dis bit, and the existing port mode, let
* RGMII_MODE_EN bet set by mac_link_{up,down}
@@ -809,13 +812,16 @@ static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
reg_writel(priv, reg, reg_rgmii_ctrl);
}
-static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
+static void bcm_sf2_sw_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct bcm_sf2_priv *priv;
+ int port = dp->index;
u32 reg, offset;
+ priv = bcm_sf2_to_priv(dp->ds);
if (priv->wol_ports_mask & BIT(port))
return;
@@ -824,23 +830,26 @@ static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
reg &= ~LINK_STS;
core_writel(priv, reg, offset);
- bcm_sf2_sw_mac_link_set(ds, port, interface, false);
+ bcm_sf2_sw_mac_link_set(dp->ds, port, interface, false);
}
-static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
+static void bcm_sf2_sw_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- struct ethtool_keee *p = &priv->dev->ports[port].eee;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct bcm_sf2_priv *priv;
u32 reg_rgmii_ctrl = 0;
+ struct ethtool_keee *p;
+ int port = dp->index;
u32 reg, offset;
- bcm_sf2_sw_mac_link_set(ds, port, interface, true);
+ bcm_sf2_sw_mac_link_set(dp->ds, port, interface, true);
+ priv = bcm_sf2_to_priv(dp->ds);
offset = bcm_sf2_port_override_offset(priv, port);
if (phy_interface_mode_is_rgmii(interface) ||
@@ -886,8 +895,10 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
core_writel(priv, reg, offset);
- if (mode == MLO_AN_PHY && phydev)
- p->eee_enabled = b53_eee_init(ds, port, phydev);
+ if (mode == MLO_AN_PHY && phydev) {
+ p = &priv->dev->ports[port].eee;
+ p->eee_enabled = b53_eee_init(dp->ds, port, phydev);
+ }
}
static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port,
@@ -1196,6 +1207,12 @@ static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds, int port,
return cnt;
}
+static const struct phylink_mac_ops bcm_sf2_phylink_mac_ops = {
+ .mac_config = bcm_sf2_sw_mac_config,
+ .mac_link_down = bcm_sf2_sw_mac_link_down,
+ .mac_link_up = bcm_sf2_sw_mac_link_up,
+};
+
static const struct dsa_switch_ops bcm_sf2_ops = {
.get_tag_protocol = b53_get_tag_protocol,
.setup = bcm_sf2_sw_setup,
@@ -1206,9 +1223,6 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
.get_phy_flags = bcm_sf2_sw_get_phy_flags,
.phylink_get_caps = bcm_sf2_sw_get_caps,
- .phylink_mac_config = bcm_sf2_sw_mac_config,
- .phylink_mac_link_down = bcm_sf2_sw_mac_link_down,
- .phylink_mac_link_up = bcm_sf2_sw_mac_link_up,
.phylink_fixed_state = bcm_sf2_sw_fixed_state,
.suspend = bcm_sf2_sw_suspend,
.resume = bcm_sf2_sw_resume,
@@ -1399,6 +1413,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
priv->dev = dev;
ds = dev->ds;
ds->ops = &bcm_sf2_ops;
+ ds->phylink_mac_ops = &bcm_sf2_phylink_mac_ops;
/* Advertise the 8 egress queues */
ds->num_tx_queues = SF2_NUM_EGRESS_QUEUES;
diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.c b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
index 5249a1c2a80b..bfe21f9f7dcd 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_ptp.c
+++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
@@ -27,7 +27,8 @@ void hellcreek_ptp_write(struct hellcreek *hellcreek, u16 data,
}
/* Get nanoseconds from PTP clock */
-static u64 hellcreek_ptp_clock_read(struct hellcreek *hellcreek)
+static u64 hellcreek_ptp_clock_read(struct hellcreek *hellcreek,
+ struct ptp_system_timestamp *sts)
{
u16 nsl, nsh;
@@ -45,16 +46,19 @@ static u64 hellcreek_ptp_clock_read(struct hellcreek *hellcreek)
nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+ ptp_read_system_prets(sts);
nsl = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+ ptp_read_system_postts(sts);
return (u64)nsl | ((u64)nsh << 16);
}
-static u64 __hellcreek_ptp_gettime(struct hellcreek *hellcreek)
+static u64 __hellcreek_ptp_gettime(struct hellcreek *hellcreek,
+ struct ptp_system_timestamp *sts)
{
u64 ns;
- ns = hellcreek_ptp_clock_read(hellcreek);
+ ns = hellcreek_ptp_clock_read(hellcreek, sts);
if (ns < hellcreek->last_ts)
hellcreek->seconds++;
hellcreek->last_ts = ns;
@@ -72,7 +76,7 @@ u64 hellcreek_ptp_gettime_seconds(struct hellcreek *hellcreek, u64 ns)
{
u64 s;
- __hellcreek_ptp_gettime(hellcreek);
+ __hellcreek_ptp_gettime(hellcreek, NULL);
if (hellcreek->last_ts > ns)
s = hellcreek->seconds * NSEC_PER_SEC;
else
@@ -81,14 +85,15 @@ u64 hellcreek_ptp_gettime_seconds(struct hellcreek *hellcreek, u64 ns)
return s;
}
-static int hellcreek_ptp_gettime(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
+static int hellcreek_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
u64 ns;
mutex_lock(&hellcreek->ptp_lock);
- ns = __hellcreek_ptp_gettime(hellcreek);
+ ns = __hellcreek_ptp_gettime(hellcreek, sts);
mutex_unlock(&hellcreek->ptp_lock);
*ts = ns_to_timespec64(ns);
@@ -184,7 +189,7 @@ static int hellcreek_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
if (abs(delta) > MAX_SLOW_OFFSET_ADJ) {
struct timespec64 now, then = ns_to_timespec64(delta);
- hellcreek_ptp_gettime(ptp, &now);
+ hellcreek_ptp_gettimex(ptp, &now, NULL);
now = timespec64_add(now, then);
hellcreek_ptp_settime(ptp, &now);
@@ -233,7 +238,7 @@ static void hellcreek_ptp_overflow_check(struct work_struct *work)
hellcreek = dw_overflow_to_hellcreek(dw);
mutex_lock(&hellcreek->ptp_lock);
- __hellcreek_ptp_gettime(hellcreek);
+ __hellcreek_ptp_gettime(hellcreek, NULL);
mutex_unlock(&hellcreek->ptp_lock);
schedule_delayed_work(&hellcreek->overflow_work,
@@ -409,7 +414,7 @@ int hellcreek_ptp_setup(struct hellcreek *hellcreek)
hellcreek->ptp_clock_info.pps = 0;
hellcreek->ptp_clock_info.adjfine = hellcreek_ptp_adjfine;
hellcreek->ptp_clock_info.adjtime = hellcreek_ptp_adjtime;
- hellcreek->ptp_clock_info.gettime64 = hellcreek_ptp_gettime;
+ hellcreek->ptp_clock_info.gettimex64 = hellcreek_ptp_gettimex;
hellcreek->ptp_clock_info.settime64 = hellcreek_ptp_settime;
hellcreek->ptp_clock_info.enable = hellcreek_ptp_enable;
hellcreek->ptp_clock_info.do_aux_work = hellcreek_hwtstamp_work;
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index fcb20eac332a..02f07b870f10 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -1007,15 +1007,14 @@ static const struct lan9303_mib_desc lan9303_mib[] = {
static void lan9303_get_strings(struct dsa_switch *ds, int port,
u32 stringset, uint8_t *data)
{
+ u8 *buf = data;
unsigned int u;
if (stringset != ETH_SS_STATS)
return;
- for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) {
- strncpy(data + u * ETH_GSTRING_LEN, lan9303_mib[u].name,
- ETH_GSTRING_LEN);
- }
+ for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++)
+ ethtool_puts(&buf, lan9303_mib[u].name);
}
static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port,
@@ -1293,14 +1292,29 @@ static void lan9303_phylink_get_caps(struct dsa_switch *ds, int port,
}
}
-static void lan9303_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void lan9303_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void lan9303_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+}
+
+static void lan9303_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev, int speed,
- int duplex, bool tx_pause,
+ int speed, int duplex, bool tx_pause,
bool rx_pause)
{
- struct lan9303 *chip = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct lan9303 *chip = dp->ds->priv;
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
u32 ctl;
u32 reg;
@@ -1330,6 +1344,12 @@ static void lan9303_phylink_mac_link_up(struct dsa_switch *ds, int port,
regmap_write(chip->regmap, flow_ctl_reg[port], reg);
}
+static const struct phylink_mac_ops lan9303_phylink_mac_ops = {
+ .mac_config = lan9303_phylink_mac_config,
+ .mac_link_down = lan9303_phylink_mac_link_down,
+ .mac_link_up = lan9303_phylink_mac_link_up,
+};
+
static const struct dsa_switch_ops lan9303_switch_ops = {
.get_tag_protocol = lan9303_get_tag_protocol,
.setup = lan9303_setup,
@@ -1337,7 +1357,6 @@ static const struct dsa_switch_ops lan9303_switch_ops = {
.phy_read = lan9303_phy_read,
.phy_write = lan9303_phy_write,
.phylink_get_caps = lan9303_phylink_get_caps,
- .phylink_mac_link_up = lan9303_phylink_mac_link_up,
.get_ethtool_stats = lan9303_get_ethtool_stats,
.get_sset_count = lan9303_get_sset_count,
.port_enable = lan9303_port_enable,
@@ -1365,6 +1384,7 @@ static int lan9303_register_switch(struct lan9303 *chip)
chip->ds->num_ports = LAN9303_NUM_PORTS;
chip->ds->priv = chip;
chip->ds->ops = &lan9303_switch_ops;
+ chip->ds->phylink_mac_ops = &lan9303_phylink_mac_ops;
base = chip->phy_addr_base;
chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1 + base, base);
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index de48b194048f..a557049e34f5 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -1670,11 +1670,13 @@ static void gswip_port_set_pause(struct gswip_priv *priv, int port,
mdio_phy, GSWIP_MDIO_PHYp(port));
}
-static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
+static void gswip_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct gswip_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
+ int port = dp->index;
u32 miicfg = 0;
miicfg |= GSWIP_MII_CFG_LDCLKDIS;
@@ -1700,7 +1702,7 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
miicfg |= GSWIP_MII_CFG_MODE_GMII;
break;
default:
- dev_err(ds->dev,
+ dev_err(dp->ds->dev,
"Unsupported interface: %d\n", state->interface);
return;
}
@@ -1726,28 +1728,32 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
}
}
-static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port,
+static void gswip_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct gswip_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
+ int port = dp->index;
gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
- if (!dsa_is_cpu_port(ds, port))
+ if (!dsa_port_is_cpu(dp))
gswip_port_set_link(priv, port, false);
}
-static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void gswip_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct gswip_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
+ int port = dp->index;
- if (!dsa_is_cpu_port(ds, port)) {
+ if (!dsa_port_is_cpu(dp)) {
gswip_port_set_link(priv, port, true);
gswip_port_set_speed(priv, port, speed, interface);
gswip_port_set_duplex(priv, port, duplex);
@@ -1824,6 +1830,12 @@ static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset)
return ARRAY_SIZE(gswip_rmon_cnt);
}
+static const struct phylink_mac_ops gswip_phylink_mac_ops = {
+ .mac_config = gswip_phylink_mac_config,
+ .mac_link_down = gswip_phylink_mac_link_down,
+ .mac_link_up = gswip_phylink_mac_link_up,
+};
+
static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
.get_tag_protocol = gswip_get_tag_protocol,
.setup = gswip_setup,
@@ -1842,9 +1854,6 @@ static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
.port_change_mtu = gswip_port_change_mtu,
.port_max_mtu = gswip_port_max_mtu,
.phylink_get_caps = gswip_xrx200_phylink_get_caps,
- .phylink_mac_config = gswip_phylink_mac_config,
- .phylink_mac_link_down = gswip_phylink_mac_link_down,
- .phylink_mac_link_up = gswip_phylink_mac_link_up,
.get_strings = gswip_get_strings,
.get_ethtool_stats = gswip_get_ethtool_stats,
.get_sset_count = gswip_get_sset_count,
@@ -1868,9 +1877,6 @@ static const struct dsa_switch_ops gswip_xrx300_switch_ops = {
.port_change_mtu = gswip_port_change_mtu,
.port_max_mtu = gswip_port_max_mtu,
.phylink_get_caps = gswip_xrx300_phylink_get_caps,
- .phylink_mac_config = gswip_phylink_mac_config,
- .phylink_mac_link_down = gswip_phylink_mac_link_down,
- .phylink_mac_link_up = gswip_phylink_mac_link_up,
.get_strings = gswip_get_strings,
.get_ethtool_stats = gswip_get_ethtool_stats,
.get_sset_count = gswip_get_sset_count,
@@ -2136,6 +2142,7 @@ static int gswip_probe(struct platform_device *pdev)
priv->ds->num_ports = priv->hw_info->max_ports;
priv->ds->priv = priv;
priv->ds->ops = priv->hw_info->ops;
+ priv->ds->phylink_mac_ops = &gswip_phylink_mac_ops;
priv->dev = dev;
mutex_init(&priv->pce_table_lock);
version = gswip_switch_r(priv, GSWIP_VERSION);
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index 394ca8678d2b..c1b906c05a02 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -4,6 +4,8 @@ menuconfig NET_DSA_MICROCHIP_KSZ_COMMON
depends on NET_DSA
select NET_DSA_TAG_KSZ
select NET_DSA_TAG_NONE
+ select NET_IEEE8021Q_HELPERS
+ select DCB
help
This driver adds support for Microchip KSZ9477 series switch and
KSZ8795/KSZ88x3 switch chips.
diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile
index 49459a50dbc8..1cfba1ec9355 100644
--- a/drivers/net/dsa/microchip/Makefile
+++ b/drivers/net/dsa/microchip/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_switch.o
-ksz_switch-objs := ksz_common.o
+ksz_switch-objs := ksz_common.o ksz_dcb.o
ksz_switch-objs += ksz9477.o ksz9477_acl.o ksz9477_tc_flower.o
ksz_switch-objs += ksz8795.o
ksz_switch-objs += lan937x_main.o
diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h
index 1a5225264e6a..ae43077e76c3 100644
--- a/drivers/net/dsa/microchip/ksz8.h
+++ b/drivers/net/dsa/microchip/ksz8.h
@@ -19,8 +19,6 @@ void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port);
void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port);
int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
-int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
- u8 *fid, u8 *src_port, u8 *timestamp, u16 *entries);
void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt);
void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt);
@@ -56,9 +54,10 @@ int ksz8_reset_switch(struct ksz_device *dev);
int ksz8_switch_init(struct ksz_device *dev);
void ksz8_switch_exit(struct ksz_device *dev);
int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu);
-void ksz8_phylink_mac_link_up(struct ksz_device *dev, int port,
- unsigned int mode, phy_interface_t interface,
- struct phy_device *phydev, int speed, int duplex,
+void ksz8_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex,
bool tx_pause, bool rx_pause);
+int ksz8_all_queues_split(struct ksz_device *dev, int queues);
#endif
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index 14923535ca7e..d27b9c36d73f 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -127,37 +127,71 @@ int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu)
return -EOPNOTSUPP;
}
-static void ksz8795_set_prio_queue(struct ksz_device *dev, int port, int queue)
+static int ksz8_port_queue_split(struct ksz_device *dev, int port, int queues)
{
- u8 hi, lo;
+ u8 mask_4q, mask_2q;
+ u8 reg_4q, reg_2q;
+ u8 data_4q = 0;
+ u8 data_2q = 0;
+ int ret;
- /* Number of queues can only be 1, 2, or 4. */
- switch (queue) {
- case 4:
- case 3:
- queue = PORT_QUEUE_SPLIT_4;
- break;
- case 2:
- queue = PORT_QUEUE_SPLIT_2;
- break;
- default:
- queue = PORT_QUEUE_SPLIT_1;
+ if (ksz_is_ksz88x3(dev)) {
+ mask_4q = KSZ8873_PORT_4QUEUE_SPLIT_EN;
+ mask_2q = KSZ8873_PORT_2QUEUE_SPLIT_EN;
+ reg_4q = REG_PORT_CTRL_0;
+ reg_2q = REG_PORT_CTRL_2;
+
+ /* KSZ8795 family switches have Weighted Fair Queueing (WFQ)
+ * enabled by default. Enable it for KSZ8873 family switches
+ * too. Default value for KSZ8873 family is strict priority,
+ * which should be enabled by using TC_SETUP_QDISC_ETS, not
+ * by default.
+ */
+ ret = ksz_rmw8(dev, REG_SW_CTRL_3, WEIGHTED_FAIR_QUEUE_ENABLE,
+ WEIGHTED_FAIR_QUEUE_ENABLE);
+ if (ret)
+ return ret;
+ } else {
+ mask_4q = KSZ8795_PORT_4QUEUE_SPLIT_EN;
+ mask_2q = KSZ8795_PORT_2QUEUE_SPLIT_EN;
+ reg_4q = REG_PORT_CTRL_13;
+ reg_2q = REG_PORT_CTRL_0;
+
+ /* TODO: this is legacy from initial KSZ8795 driver, should be
+ * moved to appropriate place in the future.
+ */
+ ret = ksz_rmw8(dev, REG_SW_CTRL_19,
+ SW_OUT_RATE_LIMIT_QUEUE_BASED,
+ SW_OUT_RATE_LIMIT_QUEUE_BASED);
+ if (ret)
+ return ret;
}
- ksz_pread8(dev, port, REG_PORT_CTRL_0, &lo);
- ksz_pread8(dev, port, P_DROP_TAG_CTRL, &hi);
- lo &= ~PORT_QUEUE_SPLIT_L;
- if (queue & PORT_QUEUE_SPLIT_2)
- lo |= PORT_QUEUE_SPLIT_L;
- hi &= ~PORT_QUEUE_SPLIT_H;
- if (queue & PORT_QUEUE_SPLIT_4)
- hi |= PORT_QUEUE_SPLIT_H;
- ksz_pwrite8(dev, port, REG_PORT_CTRL_0, lo);
- ksz_pwrite8(dev, port, P_DROP_TAG_CTRL, hi);
-
- /* Default is port based for egress rate limit. */
- if (queue != PORT_QUEUE_SPLIT_1)
- ksz_cfg(dev, REG_SW_CTRL_19, SW_OUT_RATE_LIMIT_QUEUE_BASED,
- true);
+
+ if (queues == 4)
+ data_4q = mask_4q;
+ else if (queues == 2)
+ data_2q = mask_2q;
+
+ ret = ksz_prmw8(dev, port, reg_4q, mask_4q, data_4q);
+ if (ret)
+ return ret;
+
+ return ksz_prmw8(dev, port, reg_2q, mask_2q, data_2q);
+}
+
+int ksz8_all_queues_split(struct ksz_device *dev, int queues)
+{
+ struct dsa_switch *ds = dev->ds;
+ const struct dsa_port *dp;
+
+ dsa_switch_for_each_port(dp, ds) {
+ int ret = ksz8_port_queue_split(dev, dp->index, queues);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
@@ -385,39 +419,39 @@ static int ksz8_valid_dyn_entry(struct ksz_device *dev, u8 *data)
int timeout = 100;
const u32 *masks;
const u16 *regs;
+ int ret;
masks = dev->info->masks;
regs = dev->info->regs;
do {
- ksz_read8(dev, regs[REG_IND_DATA_CHECK], data);
+ ret = ksz_read8(dev, regs[REG_IND_DATA_CHECK], data);
+ if (ret)
+ return ret;
+
timeout--;
} while ((*data & masks[DYNAMIC_MAC_TABLE_NOT_READY]) && timeout);
/* Entry is not ready for accessing. */
- if (*data & masks[DYNAMIC_MAC_TABLE_NOT_READY]) {
- return -EAGAIN;
- /* Entry is ready for accessing. */
- } else {
- ksz_read8(dev, regs[REG_IND_DATA_8], data);
+ if (*data & masks[DYNAMIC_MAC_TABLE_NOT_READY])
+ return -ETIMEDOUT;
- /* There is no valid entry in the table. */
- if (*data & masks[DYNAMIC_MAC_TABLE_MAC_EMPTY])
- return -ENXIO;
- }
- return 0;
+ /* Entry is ready for accessing. */
+ return ksz_read8(dev, regs[REG_IND_DATA_8], data);
}
-int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
- u8 *fid, u8 *src_port, u8 *timestamp, u16 *entries)
+static int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
+ u8 *fid, u8 *src_port, u16 *entries)
{
u32 data_hi, data_lo;
const u8 *shifts;
const u32 *masks;
const u16 *regs;
u16 ctrl_addr;
+ u64 buf = 0;
u8 data;
- int rc;
+ int cnt;
+ int ret;
shifts = dev->info->shifts;
masks = dev->info->masks;
@@ -426,49 +460,50 @@ int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
ctrl_addr = IND_ACC_TABLE(TABLE_DYNAMIC_MAC | TABLE_READ) | addr;
mutex_lock(&dev->alu_mutex);
- ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ if (ret)
+ goto unlock_alu;
+
+ ret = ksz8_valid_dyn_entry(dev, &data);
+ if (ret)
+ goto unlock_alu;
- rc = ksz8_valid_dyn_entry(dev, &data);
- if (rc == -EAGAIN) {
- if (addr == 0)
- *entries = 0;
- } else if (rc == -ENXIO) {
+ if (data & masks[DYNAMIC_MAC_TABLE_MAC_EMPTY]) {
*entries = 0;
- /* At least one valid entry in the table. */
- } else {
- u64 buf = 0;
- int cnt;
-
- ksz_read64(dev, regs[REG_IND_DATA_HI], &buf);
- data_hi = (u32)(buf >> 32);
- data_lo = (u32)buf;
-
- /* Check out how many valid entry in the table. */
- cnt = data & masks[DYNAMIC_MAC_TABLE_ENTRIES_H];
- cnt <<= shifts[DYNAMIC_MAC_ENTRIES_H];
- cnt |= (data_hi & masks[DYNAMIC_MAC_TABLE_ENTRIES]) >>
- shifts[DYNAMIC_MAC_ENTRIES];
- *entries = cnt + 1;
-
- *fid = (data_hi & masks[DYNAMIC_MAC_TABLE_FID]) >>
- shifts[DYNAMIC_MAC_FID];
- *src_port = (data_hi & masks[DYNAMIC_MAC_TABLE_SRC_PORT]) >>
- shifts[DYNAMIC_MAC_SRC_PORT];
- *timestamp = (data_hi & masks[DYNAMIC_MAC_TABLE_TIMESTAMP]) >>
- shifts[DYNAMIC_MAC_TIMESTAMP];
-
- mac_addr[5] = (u8)data_lo;
- mac_addr[4] = (u8)(data_lo >> 8);
- mac_addr[3] = (u8)(data_lo >> 16);
- mac_addr[2] = (u8)(data_lo >> 24);
-
- mac_addr[1] = (u8)data_hi;
- mac_addr[0] = (u8)(data_hi >> 8);
- rc = 0;
+ goto unlock_alu;
}
+
+ ret = ksz_read64(dev, regs[REG_IND_DATA_HI], &buf);
+ if (ret)
+ goto unlock_alu;
+
+ data_hi = (u32)(buf >> 32);
+ data_lo = (u32)buf;
+
+ /* Check out how many valid entry in the table. */
+ cnt = data & masks[DYNAMIC_MAC_TABLE_ENTRIES_H];
+ cnt <<= shifts[DYNAMIC_MAC_ENTRIES_H];
+ cnt |= (data_hi & masks[DYNAMIC_MAC_TABLE_ENTRIES]) >>
+ shifts[DYNAMIC_MAC_ENTRIES];
+ *entries = cnt + 1;
+
+ *fid = (data_hi & masks[DYNAMIC_MAC_TABLE_FID]) >>
+ shifts[DYNAMIC_MAC_FID];
+ *src_port = (data_hi & masks[DYNAMIC_MAC_TABLE_SRC_PORT]) >>
+ shifts[DYNAMIC_MAC_SRC_PORT];
+
+ mac_addr[5] = (u8)data_lo;
+ mac_addr[4] = (u8)(data_lo >> 8);
+ mac_addr[3] = (u8)(data_lo >> 16);
+ mac_addr[2] = (u8)(data_lo >> 24);
+
+ mac_addr[1] = (u8)data_hi;
+ mac_addr[0] = (u8)(data_hi >> 8);
+
+unlock_alu:
mutex_unlock(&dev->alu_mutex);
- return rc;
+ return ret;
}
static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
@@ -1193,28 +1228,28 @@ void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
int ksz8_fdb_dump(struct ksz_device *dev, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
- int ret = 0;
- u16 i = 0;
- u16 entries = 0;
- u8 timestamp = 0;
- u8 fid;
- u8 src_port;
u8 mac[ETH_ALEN];
+ u8 src_port, fid;
+ u16 entries = 0;
+ int ret, i;
- do {
+ for (i = 0; i < KSZ8_DYN_MAC_ENTRIES; i++) {
ret = ksz8_r_dyn_mac_table(dev, i, mac, &fid, &src_port,
- &timestamp, &entries);
- if (!ret && port == src_port) {
+ &entries);
+ if (ret)
+ return ret;
+
+ if (i >= entries)
+ return 0;
+
+ if (port == src_port) {
ret = cb(mac, fid, false, data);
if (ret)
- break;
+ return ret;
}
- i++;
- } while (i < entries);
- if (i >= entries)
- ret = 0;
+ }
- return ret;
+ return 0;
}
static int ksz8_add_sta_mac(struct ksz_device *dev, int port,
@@ -1512,6 +1547,7 @@ void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
struct dsa_switch *ds = dev->ds;
const u32 *masks;
+ int queues;
u8 member;
masks = dev->info->masks;
@@ -1519,19 +1555,20 @@ void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
/* enable broadcast storm limit */
ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
- if (!ksz_is_ksz88x3(dev))
- ksz8795_set_prio_queue(dev, port, 4);
+ /* For KSZ88x3 enable only one queue by default, otherwise we won't
+ * be able to get rid of PCP prios on Port 2.
+ */
+ if (ksz_is_ksz88x3(dev))
+ queues = 1;
+ else
+ queues = dev->info->num_tx_queues;
- /* disable DiffServ priority */
- ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_ENABLE, false);
+ ksz8_port_queue_split(dev, port, queues);
/* replace priority */
ksz_port_cfg(dev, port, P_802_1P_CTRL,
masks[PORT_802_1P_REMAPPING], false);
- /* enable 802.1p priority */
- ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_ENABLE, true);
-
if (cpu_port)
member = dsa_user_ports(ds);
else
@@ -1701,11 +1738,15 @@ static void ksz8_cpu_port_link_up(struct ksz_device *dev, int speed, int duplex,
SW_10_MBIT, ctrl);
}
-void ksz8_phylink_mac_link_up(struct ksz_device *dev, int port,
- unsigned int mode, phy_interface_t interface,
- struct phy_device *phydev, int speed, int duplex,
+void ksz8_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex,
bool tx_pause, bool rx_pause)
{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ksz_device *dev = dp->ds->priv;
+ int port = dp->index;
+
/* If the port is the CPU port, apply special handling. Only the CPU
* port is configured via global registers.
*/
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
index 7c9341ef73b0..69566a5d9cda 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -124,7 +124,8 @@
#define PORT_BASED_PRIO_3 3
#define PORT_INSERT_TAG BIT(2)
#define PORT_REMOVE_TAG BIT(1)
-#define PORT_QUEUE_SPLIT_L BIT(0)
+#define KSZ8795_PORT_2QUEUE_SPLIT_EN BIT(0)
+#define KSZ8873_PORT_4QUEUE_SPLIT_EN BIT(0)
#define REG_PORT_1_CTRL_1 0x11
#define REG_PORT_2_CTRL_1 0x21
@@ -143,6 +144,7 @@
#define REG_PORT_4_CTRL_2 0x42
#define REG_PORT_5_CTRL_2 0x52
+#define KSZ8873_PORT_2QUEUE_SPLIT_EN BIT(7)
#define PORT_INGRESS_FILTER BIT(6)
#define PORT_DISCARD_NON_VID BIT(5)
#define PORT_FORCE_FLOW_CTRL BIT(4)
@@ -463,10 +465,7 @@
#define REG_PORT_4_CTRL_13 0xE1
#define REG_PORT_5_CTRL_13 0xF1
-#define PORT_QUEUE_SPLIT_H BIT(1)
-#define PORT_QUEUE_SPLIT_1 0
-#define PORT_QUEUE_SPLIT_2 1
-#define PORT_QUEUE_SPLIT_4 2
+#define KSZ8795_PORT_4QUEUE_SPLIT_EN BIT(1)
#define PORT_DROP_TAG BIT(0)
#define REG_PORT_1_CTRL_14 0xB2
@@ -794,5 +793,6 @@
#define TAIL_TAG_LOOKUP BIT(7)
#define FID_ENTRIES 128
+#define KSZ8_DYN_MAC_ENTRIES 1024
#endif
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 7f745628c84d..f8ad7833f5d9 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -1158,18 +1158,12 @@ void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
/* enable broadcast storm limit */
ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
- /* disable DiffServ priority */
- ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_PRIO_ENABLE, false);
-
/* replace priority */
ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING,
false);
ksz9477_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4,
MTI_PVID_REPLACE, false);
- /* enable 802.1p priority */
- ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
-
/* force flow control for non-PHY ports only */
ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
diff --git a/drivers/net/dsa/microchip/ksz9477_tc_flower.c b/drivers/net/dsa/microchip/ksz9477_tc_flower.c
index 8b2f5be667e0..ca7830ab168a 100644
--- a/drivers/net/dsa/microchip/ksz9477_tc_flower.c
+++ b/drivers/net/dsa/microchip/ksz9477_tc_flower.c
@@ -124,6 +124,9 @@ static int ksz9477_flower_parse_key(struct ksz_device *dev, int port,
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
ret = ksz9477_flower_parse_key_l2(dev, port, extack, rule,
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 2b510f150dd8..1e0085cd9a9a 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -24,10 +24,12 @@
#include <linux/of_net.h>
#include <linux/micrel_phy.h>
#include <net/dsa.h>
+#include <net/ieee8021q.h>
#include <net/pkt_cls.h>
#include <net/switchdev.h>
#include "ksz_common.h"
+#include "ksz_dcb.h"
#include "ksz_ptp.h"
#include "ksz8.h"
#include "ksz9477.h"
@@ -253,6 +255,28 @@ static const struct ksz_drive_strength ksz8830_drive_strengths[] = {
{ KSZ8873_DRIVE_STRENGTH_16MA, 16000 },
};
+static void ksz8830_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state);
+static void ksz_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state);
+static void ksz_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface);
+
+static const struct phylink_mac_ops ksz8830_phylink_mac_ops = {
+ .mac_config = ksz8830_phylink_mac_config,
+ .mac_link_down = ksz_phylink_mac_link_down,
+ .mac_link_up = ksz8_phylink_mac_link_up,
+};
+
+static const struct phylink_mac_ops ksz8_phylink_mac_ops = {
+ .mac_config = ksz_phylink_mac_config,
+ .mac_link_down = ksz_phylink_mac_link_down,
+ .mac_link_up = ksz8_phylink_mac_link_up,
+};
+
static const struct ksz_dev_ops ksz8_dev_ops = {
.setup = ksz8_setup,
.get_port_addr = ksz8_get_port_addr,
@@ -277,7 +301,6 @@ static const struct ksz_dev_ops ksz8_dev_ops = {
.mirror_add = ksz8_port_mirror_add,
.mirror_del = ksz8_port_mirror_del,
.get_caps = ksz8_get_caps,
- .phylink_mac_link_up = ksz8_phylink_mac_link_up,
.config_cpu_port = ksz8_config_cpu_port,
.enable_stp_addr = ksz8_enable_stp_addr,
.reset = ksz8_reset_switch,
@@ -286,13 +309,19 @@ static const struct ksz_dev_ops ksz8_dev_ops = {
.change_mtu = ksz8_change_mtu,
};
-static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
+static void ksz9477_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev, int speed,
- int duplex, bool tx_pause,
+ int speed, int duplex, bool tx_pause,
bool rx_pause);
+static const struct phylink_mac_ops ksz9477_phylink_mac_ops = {
+ .mac_config = ksz_phylink_mac_config,
+ .mac_link_down = ksz_phylink_mac_link_down,
+ .mac_link_up = ksz9477_phylink_mac_link_up,
+};
+
static const struct ksz_dev_ops ksz9477_dev_ops = {
.setup = ksz9477_setup,
.get_port_addr = ksz9477_get_port_addr,
@@ -319,7 +348,6 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.mdb_add = ksz9477_mdb_add,
.mdb_del = ksz9477_mdb_del,
.change_mtu = ksz9477_change_mtu,
- .phylink_mac_link_up = ksz9477_phylink_mac_link_up,
.get_wol = ksz9477_get_wol,
.set_wol = ksz9477_set_wol,
.wol_pre_shutdown = ksz9477_wol_pre_shutdown,
@@ -331,6 +359,12 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.exit = ksz9477_switch_exit,
};
+static const struct phylink_mac_ops lan937x_phylink_mac_ops = {
+ .mac_config = ksz_phylink_mac_config,
+ .mac_link_down = ksz_phylink_mac_link_down,
+ .mac_link_up = ksz9477_phylink_mac_link_up,
+};
+
static const struct ksz_dev_ops lan937x_dev_ops = {
.setup = lan937x_setup,
.teardown = lan937x_teardown,
@@ -359,7 +393,6 @@ static const struct ksz_dev_ops lan937x_dev_ops = {
.mdb_add = ksz9477_mdb_add,
.mdb_del = ksz9477_mdb_del,
.change_mtu = lan937x_change_mtu,
- .phylink_mac_link_up = ksz9477_phylink_mac_link_up,
.config_cpu_port = lan937x_config_cpu_port,
.tc_cbs_set_cinc = lan937x_tc_cbs_set_cinc,
.enable_stp_addr = ksz9477_enable_stp_addr,
@@ -1194,9 +1227,10 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 3, /* total port count */
.port_nirqs = 3,
.num_tx_queues = 4,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
.ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1223,7 +1257,9 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
.num_tx_queues = 4,
+ .num_ipms = 4,
.ops = &ksz8_dev_ops,
+ .phylink_mac_ops = &ksz8_phylink_mac_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1262,7 +1298,9 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
.num_tx_queues = 4,
+ .num_ipms = 4,
.ops = &ksz8_dev_ops,
+ .phylink_mac_ops = &ksz8_phylink_mac_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1287,7 +1325,9 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
.num_tx_queues = 4,
+ .num_ipms = 4,
.ops = &ksz8_dev_ops,
+ .phylink_mac_ops = &ksz8_phylink_mac_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1312,7 +1352,9 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x4, /* can be configured as cpu port */
.port_cnt = 3,
.num_tx_queues = 4,
+ .num_ipms = 4,
.ops = &ksz8_dev_ops,
+ .phylink_mac_ops = &ksz8830_phylink_mac_ops,
.mib_names = ksz88xx_mib_names,
.mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1336,9 +1378,10 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 7, /* total physical port count */
.port_nirqs = 4,
.num_tx_queues = 4,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
.ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1370,7 +1413,9 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 6, /* total physical port count */
.port_nirqs = 2,
.num_tx_queues = 4,
+ .num_ipms = 8,
.ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1402,7 +1447,9 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 7, /* total physical port count */
.port_nirqs = 2,
.num_tx_queues = 4,
+ .num_ipms = 8,
.ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1432,7 +1479,9 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 3, /* total port count */
.port_nirqs = 2,
.num_tx_queues = 4,
+ .num_ipms = 8,
.ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1458,9 +1507,10 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 3, /* total port count */
.port_nirqs = 3,
.num_tx_queues = 4,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
.ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1486,9 +1536,10 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 7, /* total port count */
.port_nirqs = 3,
.num_tx_queues = 4,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
.ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1519,8 +1570,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 7, /* total physical port count */
.port_nirqs = 3,
.num_tx_queues = 4,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
.ops = &ksz9477_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1551,9 +1602,10 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 5, /* total physical port count */
.port_nirqs = 6,
.num_tx_queues = 8,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
.ops = &lan937x_dev_ops,
+ .phylink_mac_ops = &lan937x_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1578,9 +1630,10 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 6, /* total physical port count */
.port_nirqs = 6,
.num_tx_queues = 8,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
.ops = &lan937x_dev_ops,
+ .phylink_mac_ops = &lan937x_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1605,9 +1658,10 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 8, /* total physical port count */
.port_nirqs = 6,
.num_tx_queues = 8,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
.ops = &lan937x_dev_ops,
+ .phylink_mac_ops = &lan937x_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1636,9 +1690,10 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 5, /* total physical port count */
.port_nirqs = 6,
.num_tx_queues = 8,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
.ops = &lan937x_dev_ops,
+ .phylink_mac_ops = &lan937x_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1667,9 +1722,10 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 8, /* total physical port count */
.port_nirqs = 6,
.num_tx_queues = 8,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
.ops = &lan937x_dev_ops,
+ .phylink_mac_ops = &lan937x_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -2304,6 +2360,7 @@ static int ksz_setup(struct dsa_switch *ds)
ksz_init_mib_timer(dev);
ds->configure_vlan_while_not_filtering = false;
+ ds->dscp_prio_mapping_is_global = true;
if (dev->dev_ops->setup) {
ret = dev->dev_ops->setup(ds);
@@ -2347,6 +2404,10 @@ static int ksz_setup(struct dsa_switch *ds)
goto out_ptp_clock_unregister;
}
+ ret = ksz_dcb_init(dev);
+ if (ret)
+ goto out_ptp_clock_unregister;
+
/* start switch */
regmap_update_bits(ksz_regmap_8(dev), regs[S_START_CTRL],
SW_START, SW_START);
@@ -2523,14 +2584,15 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port)
return 0;
}
-static void ksz_mac_link_down(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface)
+static void ksz_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
{
- struct ksz_device *dev = ds->priv;
- struct ksz_port *p = &dev->ports[port];
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ksz_device *dev = dp->ds->priv;
/* Read all MIB counters when the link is going down. */
- p->read = true;
+ dev->ports[dp->index].read = true;
/* timer started */
if (dev->mib_read_interval)
schedule_delayed_work(&dev->mib_read, 0);
@@ -2660,9 +2722,33 @@ static int ksz_port_mdb_del(struct dsa_switch *ds, int port,
return dev->dev_ops->mdb_del(dev, port, mdb, db);
}
+static int ksz9477_set_default_prio_queue_mapping(struct ksz_device *dev,
+ int port)
+{
+ u32 queue_map = 0;
+ int ipm;
+
+ for (ipm = 0; ipm < dev->info->num_ipms; ipm++) {
+ int queue;
+
+ /* Traffic Type (TT) is corresponding to the Internal Priority
+ * Map (IPM) in the switch. Traffic Class (TC) is
+ * corresponding to the queue in the switch.
+ */
+ queue = ieee8021q_tt_to_tc(ipm, dev->info->num_tx_queues);
+ if (queue < 0)
+ return queue;
+
+ queue_map |= queue << (ipm * KSZ9477_PORT_TC_MAP_S);
+ }
+
+ return ksz_pwrite32(dev, port, KSZ9477_PORT_MRI_TC_MAP__4, queue_map);
+}
+
static int ksz_port_setup(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
+ int ret;
if (!dsa_is_user_port(ds, port))
return 0;
@@ -2670,11 +2756,17 @@ static int ksz_port_setup(struct dsa_switch *ds, int port)
/* setup user port */
dev->dev_ops->port_setup(dev, port, false);
+ if (!is_ksz8(dev)) {
+ ret = ksz9477_set_default_prio_queue_mapping(dev, port);
+ if (ret)
+ return ret;
+ }
+
/* port_stp_state_set() will be called after to enable the port so
* there is no need to do anything.
*/
- return 0;
+ return ksz_dcb_init_port(dev, port);
}
void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
@@ -3065,16 +3157,23 @@ phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit)
return interface;
}
-static void ksz_phylink_mac_config(struct dsa_switch *ds, int port,
+static void ksz8830_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ksz_device *dev = dp->ds->priv;
+
+ dev->ports[dp->index].manual_flow = !(state->pause & MLO_PAUSE_AN);
+}
+
+static void ksz_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct ksz_device *dev = ds->priv;
-
- if (ksz_is_ksz88x3(dev)) {
- dev->ports[port].manual_flow = !(state->pause & MLO_PAUSE_AN);
- return;
- }
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ksz_device *dev = dp->ds->priv;
+ int port = dp->index;
/* Internal PHYs */
if (dev->info->internal_phy[port])
@@ -3087,9 +3186,6 @@ static void ksz_phylink_mac_config(struct dsa_switch *ds, int port,
ksz_set_xmii(dev, port, state->interface);
- if (dev->dev_ops->phylink_mac_config)
- dev->dev_ops->phylink_mac_config(dev, port, mode, state);
-
if (dev->dev_ops->setup_rgmii_delay)
dev->dev_ops->setup_rgmii_delay(dev, port);
}
@@ -3187,13 +3283,16 @@ static void ksz_duplex_flowctrl(struct ksz_device *dev, int port, int duplex,
ksz_prmw8(dev, port, regs[P_XMII_CTRL_0], mask, val);
}
-static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
+static void ksz9477_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev, int speed,
- int duplex, bool tx_pause,
+ int speed, int duplex, bool tx_pause,
bool rx_pause)
{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ksz_device *dev = dp->ds->priv;
+ int port = dp->index;
struct ksz_port *p;
p = &dev->ports[port];
@@ -3209,18 +3308,6 @@ static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
ksz_duplex_flowctrl(dev, port, duplex, tx_pause, rx_pause);
}
-static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phydev, int speed,
- int duplex, bool tx_pause, bool rx_pause)
-{
- struct ksz_device *dev = ds->priv;
-
- dev->dev_ops->phylink_mac_link_up(dev, port, mode, interface, phydev,
- speed, duplex, tx_pause, rx_pause);
-}
-
static int ksz_switch_detect(struct ksz_device *dev)
{
u8 id1, id2, id4;
@@ -3522,7 +3609,7 @@ static int ksz_tc_ets_add(struct ksz_device *dev, int port,
for (tc_prio = 0; tc_prio < ARRAY_SIZE(p->priomap); tc_prio++) {
int queue;
- if (tc_prio > KSZ9477_MAX_TC_PRIO)
+ if (tc_prio >= dev->info->num_ipms)
break;
queue = ksz_ets_band_to_queue(p, p->priomap[tc_prio]);
@@ -3534,8 +3621,7 @@ static int ksz_tc_ets_add(struct ksz_device *dev, int port,
static int ksz_tc_ets_del(struct ksz_device *dev, int port)
{
- int ret, queue, tc_prio, s;
- u32 queue_map = 0;
+ int ret, queue;
/* To restore the default chip configuration, set all queues to use the
* WRR scheduler with a weight of 1.
@@ -3547,31 +3633,10 @@ static int ksz_tc_ets_del(struct ksz_device *dev, int port)
return ret;
}
- switch (dev->info->num_tx_queues) {
- case 2:
- s = 2;
- break;
- case 4:
- s = 1;
- break;
- case 8:
- s = 0;
- break;
- default:
- return -EINVAL;
- }
-
/* Revert the queue mapping for TC-priority to its default setting on
* the chip.
*/
- for (tc_prio = 0; tc_prio <= KSZ9477_MAX_TC_PRIO; tc_prio++) {
- int queue;
-
- queue = tc_prio >> s;
- queue_map |= queue << (tc_prio * KSZ9477_PORT_TC_MAP_S);
- }
-
- return ksz_pwrite32(dev, port, KSZ9477_PORT_MRI_TC_MAP__4, queue_map);
+ return ksz9477_set_default_prio_queue_mapping(dev, port);
}
static int ksz_tc_ets_validate(struct ksz_device *dev, int port,
@@ -3616,7 +3681,7 @@ static int ksz_tc_setup_qdisc_ets(struct dsa_switch *ds, int port,
struct ksz_device *dev = ds->priv;
int ret;
- if (!dev->info->tc_ets_supported)
+ if (is_ksz8(dev))
return -EOPNOTSUPP;
if (qopt->parent != TC_H_ROOT) {
@@ -3881,9 +3946,6 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.phy_read = ksz_phy_read16,
.phy_write = ksz_phy_write16,
.phylink_get_caps = ksz_phylink_get_caps,
- .phylink_mac_config = ksz_phylink_mac_config,
- .phylink_mac_link_up = ksz_phylink_mac_link_up,
- .phylink_mac_link_down = ksz_mac_link_down,
.port_setup = ksz_port_setup,
.set_ageing_time = ksz_set_ageing_time,
.get_strings = ksz_get_strings,
@@ -3925,6 +3987,13 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.port_setup_tc = ksz_setup_tc,
.get_mac_eee = ksz_get_mac_eee,
.set_mac_eee = ksz_set_mac_eee,
+ .port_get_default_prio = ksz_port_get_default_prio,
+ .port_set_default_prio = ksz_port_set_default_prio,
+ .port_get_dscp_prio = ksz_port_get_dscp_prio,
+ .port_add_dscp_prio = ksz_port_add_dscp_prio,
+ .port_del_dscp_prio = ksz_port_del_dscp_prio,
+ .port_get_apptrust = ksz_port_get_apptrust,
+ .port_set_apptrust = ksz_port_set_apptrust,
};
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
@@ -4328,6 +4397,9 @@ int ksz_switch_register(struct ksz_device *dev)
/* set the real number of ports */
dev->ds->num_ports = dev->info->port_cnt;
+ /* set the phylink ops */
+ dev->ds->phylink_mac_ops = dev->info->phylink_mac_ops;
+
/* Host port interface will be self detected, or specifically set in
* device tree.
*/
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 40c11b0d6b62..c784fd23a993 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -19,9 +19,13 @@
#include "ksz_ptp.h"
#define KSZ_MAX_NUM_PORTS 8
+/* all KSZ switches count ports from 1 */
+#define KSZ_PORT_1 0
+#define KSZ_PORT_2 1
struct ksz_device;
struct ksz_port;
+struct phylink_mac_ops;
enum ksz_regmap_width {
KSZ_REGMAP_8,
@@ -58,9 +62,10 @@ struct ksz_chip_data {
int port_cnt;
u8 port_nirqs;
u8 num_tx_queues;
+ u8 num_ipms; /* number of Internal Priority Maps */
bool tc_cbs_supported;
- bool tc_ets_supported;
const struct ksz_dev_ops *ops;
+ const struct phylink_mac_ops *phylink_mac_ops;
bool ksz87xx_eee_link_erratum;
const struct ksz_mib_names *mib_names;
int mib_cnt;
@@ -349,9 +354,6 @@ struct ksz_dev_ops {
int (*change_mtu)(struct ksz_device *dev, int port, int mtu);
void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze);
void (*port_init_cnt)(struct ksz_device *dev, int port);
- void (*phylink_mac_config)(struct ksz_device *dev, int port,
- unsigned int mode,
- const struct phylink_link_state *state);
void (*phylink_mac_link_up)(struct ksz_device *dev, int port,
unsigned int mode,
phy_interface_t interface,
@@ -620,6 +622,11 @@ static inline bool ksz_is_ksz88x3(struct ksz_device *dev)
return dev->chip_id == KSZ8830_CHIP_ID;
}
+static inline bool is_ksz8(struct ksz_device *dev)
+{
+ return ksz_is_ksz87xx(dev) || ksz_is_ksz88x3(dev);
+}
+
static inline int is_lan937x(struct ksz_device *dev)
{
return dev->chip_id == LAN9370_CHIP_ID ||
@@ -722,7 +729,6 @@ static inline int is_lan937x(struct ksz_device *dev)
#define KSZ9477_PORT_MRI_TC_MAP__4 0x0808
#define KSZ9477_PORT_TC_MAP_S 4
-#define KSZ9477_MAX_TC_PRIO 7
/* CBS related registers */
#define REG_PORT_MTI_QUEUE_INDEX__4 0x0900
diff --git a/drivers/net/dsa/microchip/ksz_dcb.c b/drivers/net/dsa/microchip/ksz_dcb.c
new file mode 100644
index 000000000000..086bc9b3cf53
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_dcb.c
@@ -0,0 +1,819 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2024 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+
+#include <linux/dsa/ksz_common.h>
+#include <net/dsa.h>
+#include <net/dscp.h>
+#include <net/ieee8021q.h>
+
+#include "ksz_common.h"
+#include "ksz_dcb.h"
+#include "ksz8.h"
+
+#define KSZ8_REG_PORT_1_CTRL_0 0x10
+#define KSZ8_PORT_DIFFSERV_ENABLE BIT(6)
+#define KSZ8_PORT_802_1P_ENABLE BIT(5)
+#define KSZ8_PORT_BASED_PRIO_M GENMASK(4, 3)
+
+#define KSZ88X3_REG_TOS_DSCP_CTRL 0x60
+#define KSZ8765_REG_TOS_DSCP_CTRL 0x90
+
+#define KSZ9477_REG_SW_MAC_TOS_CTRL 0x033e
+#define KSZ9477_SW_TOS_DSCP_REMAP BIT(0)
+#define KSZ9477_SW_TOS_DSCP_DEFAULT_PRIO_M GENMASK(5, 3)
+
+#define KSZ9477_REG_DIFFSERV_PRIO_MAP 0x0340
+
+#define KSZ9477_REG_PORT_MRI_PRIO_CTRL 0x0801
+#define KSZ9477_PORT_HIGHEST_PRIO BIT(7)
+#define KSZ9477_PORT_OR_PRIO BIT(6)
+#define KSZ9477_PORT_MAC_PRIO_ENABLE BIT(4)
+#define KSZ9477_PORT_VLAN_PRIO_ENABLE BIT(3)
+#define KSZ9477_PORT_802_1P_PRIO_ENABLE BIT(2)
+#define KSZ9477_PORT_DIFFSERV_PRIO_ENABLE BIT(1)
+#define KSZ9477_PORT_ACL_PRIO_ENABLE BIT(0)
+
+#define KSZ9477_REG_PORT_MRI_MAC_CTRL 0x0802
+#define KSZ9477_PORT_BASED_PRIO_M GENMASK(2, 0)
+
+struct ksz_apptrust_map {
+ u8 apptrust;
+ u8 bit;
+};
+
+static const struct ksz_apptrust_map ksz8_apptrust_map_to_bit[] = {
+ { DCB_APP_SEL_PCP, KSZ8_PORT_802_1P_ENABLE },
+ { IEEE_8021QAZ_APP_SEL_DSCP, KSZ8_PORT_DIFFSERV_ENABLE },
+};
+
+static const struct ksz_apptrust_map ksz9477_apptrust_map_to_bit[] = {
+ { DCB_APP_SEL_PCP, KSZ9477_PORT_802_1P_PRIO_ENABLE },
+ { IEEE_8021QAZ_APP_SEL_DSCP, KSZ9477_PORT_DIFFSERV_PRIO_ENABLE },
+};
+
+/* ksz_supported_apptrust[] - Supported apptrust selectors and Priority Order
+ * of Internal Priority Map (IPM) sources.
+ *
+ * This array defines the apptrust selectors supported by the hardware, where
+ * the index within the array indicates the priority of the selector - lower
+ * indices correspond to higher priority. This fixed priority scheme is due to
+ * the hardware's design, which does not support configurable priority among
+ * different priority sources.
+ *
+ * The priority sources, including Tail Tag, ACL, VLAN PCP and DSCP are ordered
+ * by the hardware's fixed logic, as detailed below. The order reflects a
+ * non-configurable precedence where certain types of priority information
+ * override others:
+ *
+ * 1. Tail Tag - Highest priority, overrides ACL, VLAN PCP, and DSCP priorities.
+ * 2. ACL - Overrides VLAN PCP and DSCP priorities.
+ * 3. VLAN PCP - Overrides DSCP priority.
+ * 4. DSCP - Lowest priority, does not override any other priority source.
+ *
+ * In this context, the array's lower index (higher priority) for
+ * 'DCB_APP_SEL_PCP' suggests its relative priority over
+ * 'IEEE_8021QAZ_APP_SEL_DSCP' within the system's fixed priority scheme.
+ *
+ * DCB_APP_SEL_PCP - Priority Code Point selector
+ * IEEE_8021QAZ_APP_SEL_DSCP - Differentiated Services Code Point selector
+ */
+static const u8 ksz_supported_apptrust[] = {
+ DCB_APP_SEL_PCP,
+ IEEE_8021QAZ_APP_SEL_DSCP,
+};
+
+static const char * const ksz_supported_apptrust_variants[] = {
+ "empty", "dscp", "pcp", "dscp pcp"
+};
+
+static void ksz_get_default_port_prio_reg(struct ksz_device *dev, int *reg,
+ u8 *mask, int *shift)
+{
+ if (is_ksz8(dev)) {
+ *reg = KSZ8_REG_PORT_1_CTRL_0;
+ *mask = KSZ8_PORT_BASED_PRIO_M;
+ *shift = __bf_shf(KSZ8_PORT_BASED_PRIO_M);
+ } else {
+ *reg = KSZ9477_REG_PORT_MRI_MAC_CTRL;
+ *mask = KSZ9477_PORT_BASED_PRIO_M;
+ *shift = __bf_shf(KSZ9477_PORT_BASED_PRIO_M);
+ }
+}
+
+/**
+ * ksz_get_dscp_prio_reg - Retrieves the DSCP-to-priority-mapping register
+ * @dev: Pointer to the KSZ switch device structure
+ * @reg: Pointer to the register address to be set
+ * @per_reg: Pointer to the number of DSCP values per register
+ * @mask: Pointer to the mask to be set
+ *
+ * This function retrieves the DSCP to priority mapping register, the number of
+ * DSCP values per register, and the mask to be set.
+ */
+static void ksz_get_dscp_prio_reg(struct ksz_device *dev, int *reg,
+ int *per_reg, u8 *mask)
+{
+ if (ksz_is_ksz87xx(dev)) {
+ *reg = KSZ8765_REG_TOS_DSCP_CTRL;
+ *per_reg = 4;
+ *mask = GENMASK(1, 0);
+ } else if (ksz_is_ksz88x3(dev)) {
+ *reg = KSZ88X3_REG_TOS_DSCP_CTRL;
+ *per_reg = 4;
+ *mask = GENMASK(1, 0);
+ } else {
+ *reg = KSZ9477_REG_DIFFSERV_PRIO_MAP;
+ *per_reg = 2;
+ *mask = GENMASK(2, 0);
+ }
+}
+
+/**
+ * ksz_get_apptrust_map_and_reg - Retrieves the apptrust map and register
+ * @dev: Pointer to the KSZ switch device structure
+ * @map: Pointer to the apptrust map to be set
+ * @reg: Pointer to the register address to be set
+ * @mask: Pointer to the mask to be set
+ *
+ * This function retrieves the apptrust map and register address for the
+ * apptrust configuration.
+ */
+static void ksz_get_apptrust_map_and_reg(struct ksz_device *dev,
+ const struct ksz_apptrust_map **map,
+ int *reg, u8 *mask)
+{
+ if (is_ksz8(dev)) {
+ *map = ksz8_apptrust_map_to_bit;
+ *reg = KSZ8_REG_PORT_1_CTRL_0;
+ *mask = KSZ8_PORT_DIFFSERV_ENABLE | KSZ8_PORT_802_1P_ENABLE;
+ } else {
+ *map = ksz9477_apptrust_map_to_bit;
+ *reg = KSZ9477_REG_PORT_MRI_PRIO_CTRL;
+ *mask = KSZ9477_PORT_802_1P_PRIO_ENABLE |
+ KSZ9477_PORT_DIFFSERV_PRIO_ENABLE;
+ }
+}
+
+/**
+ * ksz_port_get_default_prio - Retrieves the default priority for a port on a
+ * KSZ switch
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number from which to get the default priority
+ *
+ * This function fetches the default priority for the specified port on a KSZ
+ * switch.
+ *
+ * Return: The default priority of the port on success, or a negative error
+ * code on failure.
+ */
+int ksz_port_get_default_prio(struct dsa_switch *ds, int port)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret, reg, shift;
+ u8 data, mask;
+
+ ksz_get_default_port_prio_reg(dev, &reg, &mask, &shift);
+
+ ret = ksz_pread8(dev, port, reg, &data);
+ if (ret)
+ return ret;
+
+ return (data & mask) >> shift;
+}
+
+/**
+ * ksz88x3_port_set_default_prio_quirks - Quirks for default priority
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number for which to set the default priority
+ * @prio: Priority value to set
+ *
+ * This function implements quirks for setting the default priority on KSZ88x3
+ * devices. On Port 2, no other priority providers are working
+ * except of PCP. So, configuring default priority on Port 2 is not possible.
+ * On Port 1, it is not possible to configure port priority if PCP
+ * apptrust on Port 2 is disabled. Since we disable multiple queues on the
+ * switch to disable PCP on Port 2, we need to ensure that the default priority
+ * configuration on Port 1 is in agreement with the configuration on Port 2.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz88x3_port_set_default_prio_quirks(struct ksz_device *dev, int port,
+ u8 prio)
+{
+ if (!prio)
+ return 0;
+
+ if (port == KSZ_PORT_2) {
+ dev_err(dev->dev, "Port priority configuration is not working on Port 2\n");
+ return -EINVAL;
+ } else if (port == KSZ_PORT_1) {
+ u8 port2_data;
+ int ret;
+
+ ret = ksz_pread8(dev, KSZ_PORT_2, KSZ8_REG_PORT_1_CTRL_0,
+ &port2_data);
+ if (ret)
+ return ret;
+
+ if (!(port2_data & KSZ8_PORT_802_1P_ENABLE)) {
+ dev_err(dev->dev, "Not possible to configure port priority on Port 1 if PCP apptrust on Port 2 is disabled\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ksz_port_set_default_prio - Sets the default priority for a port on a KSZ
+ * switch
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number for which to set the default priority
+ * @prio: Priority value to set
+ *
+ * This function sets the default priority for the specified port on a KSZ
+ * switch.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+int ksz_port_set_default_prio(struct dsa_switch *ds, int port, u8 prio)
+{
+ struct ksz_device *dev = ds->priv;
+ int reg, shift, ret;
+ u8 mask;
+
+ if (prio >= dev->info->num_ipms)
+ return -EINVAL;
+
+ if (ksz_is_ksz88x3(dev)) {
+ ret = ksz88x3_port_set_default_prio_quirks(dev, port, prio);
+ if (ret)
+ return ret;
+ }
+
+ ksz_get_default_port_prio_reg(dev, &reg, &mask, &shift);
+
+ return ksz_prmw8(dev, port, reg, mask, (prio << shift) & mask);
+}
+
+/**
+ * ksz_port_get_dscp_prio - Retrieves the priority for a DSCP value on a KSZ
+ * switch
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number for which to get the priority
+ * @dscp: DSCP value for which to get the priority
+ *
+ * This function fetches the priority value from switch global DSCP-to-priorty
+ * mapping table for the specified DSCP value.
+ *
+ * Return: The priority value for the DSCP on success, or a negative error
+ * code on failure.
+ */
+int ksz_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp)
+{
+ struct ksz_device *dev = ds->priv;
+ int reg, per_reg, ret, shift;
+ u8 data, mask;
+
+ ksz_get_dscp_prio_reg(dev, &reg, &per_reg, &mask);
+
+ /* If DSCP remapping is disabled, DSCP bits 3-5 are used as Internal
+ * Priority Map (IPM)
+ */
+ if (!is_ksz8(dev)) {
+ ret = ksz_read8(dev, KSZ9477_REG_SW_MAC_TOS_CTRL, &data);
+ if (ret)
+ return ret;
+
+ /* If DSCP remapping is disabled, DSCP bits 3-5 are used as
+ * Internal Priority Map (IPM)
+ */
+ if (!(data & KSZ9477_SW_TOS_DSCP_REMAP))
+ return FIELD_GET(KSZ9477_SW_TOS_DSCP_DEFAULT_PRIO_M,
+ dscp);
+ }
+
+ /* In case DSCP remapping is enabled, we need to write the DSCP to
+ * priority mapping table.
+ */
+ reg += dscp / per_reg;
+ ret = ksz_read8(dev, reg, &data);
+ if (ret)
+ return ret;
+
+ shift = (dscp % per_reg) * (8 / per_reg);
+
+ return (data >> shift) & mask;
+}
+
+/**
+ * ksz_set_global_dscp_entry - Sets the global DSCP-to-priority mapping entry
+ * @dev: Pointer to the KSZ switch device structure
+ * @dscp: DSCP value for which to set the priority
+ * @ipm: Priority value to set
+ *
+ * This function sets the global DSCP-to-priority mapping entry for the
+ * specified DSCP value.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ksz_set_global_dscp_entry(struct ksz_device *dev, u8 dscp, u8 ipm)
+{
+ int reg, per_reg, shift;
+ u8 mask;
+
+ ksz_get_dscp_prio_reg(dev, &reg, &per_reg, &mask);
+
+ shift = (dscp % per_reg) * (8 / per_reg);
+
+ return ksz_rmw8(dev, reg + (dscp / per_reg), mask << shift,
+ ipm << shift);
+}
+
+/**
+ * ksz_init_global_dscp_map - Initializes the global DSCP-to-priority mapping
+ * @dev: Pointer to the KSZ switch device structure
+ *
+ * This function initializes the global DSCP-to-priority mapping table for the
+ * switch.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz_init_global_dscp_map(struct ksz_device *dev)
+{
+ int ret, dscp;
+
+ /* On KSZ9xxx variants, DSCP remapping is disabled by default.
+ * Enable to have, predictable and reproducible behavior across
+ * different devices.
+ */
+ if (!is_ksz8(dev)) {
+ ret = ksz_rmw8(dev, KSZ9477_REG_SW_MAC_TOS_CTRL,
+ KSZ9477_SW_TOS_DSCP_REMAP,
+ KSZ9477_SW_TOS_DSCP_REMAP);
+ if (ret)
+ return ret;
+ }
+
+ for (dscp = 0; dscp < DSCP_MAX; dscp++) {
+ int ipm, tt;
+
+ /* Map DSCP to Traffic Type, which is corresponding to the
+ * Internal Priority Map (IPM) in the switch.
+ */
+ if (!is_ksz8(dev)) {
+ ipm = ietf_dscp_to_ieee8021q_tt(dscp);
+ } else {
+ /* On KSZ8xxx variants we do not have IPM to queue
+ * remapping table. We need to convert DSCP to Traffic
+ * Type and then to queue.
+ */
+ tt = ietf_dscp_to_ieee8021q_tt(dscp);
+ if (tt < 0)
+ return tt;
+
+ ipm = ieee8021q_tt_to_tc(tt, dev->info->num_tx_queues);
+ }
+
+ if (ipm < 0)
+ return ipm;
+
+ ret = ksz_set_global_dscp_entry(dev, dscp, ipm);
+ }
+
+ return 0;
+}
+
+/**
+ * ksz_port_add_dscp_prio - Adds a DSCP-to-priority mapping entry for a port on
+ * a KSZ switch.
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number for which to add the DSCP-to-priority mapping entry
+ * @dscp: DSCP value for which to add the priority
+ * @prio: Priority value to set
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+int ksz_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (prio >= dev->info->num_ipms)
+ return -ERANGE;
+
+ return ksz_set_global_dscp_entry(dev, dscp, prio);
+}
+
+/**
+ * ksz_port_del_dscp_prio - Deletes a DSCP-to-priority mapping entry for a port
+ * on a KSZ switch.
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number for which to delete the DSCP-to-priority mapping entry
+ * @dscp: DSCP value for which to delete the priority
+ * @prio: Priority value to delete
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+int ksz_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio)
+{
+ struct ksz_device *dev = ds->priv;
+ int ipm;
+
+ if (ksz_port_get_dscp_prio(ds, port, dscp) != prio)
+ return 0;
+
+ if (is_ksz8(dev)) {
+ ipm = ieee8021q_tt_to_tc(IEEE8021Q_TT_BE,
+ dev->info->num_tx_queues);
+ if (ipm < 0)
+ return ipm;
+ } else {
+ ipm = IEEE8021Q_TT_BE;
+ }
+
+ return ksz_set_global_dscp_entry(dev, dscp, ipm);
+}
+
+/**
+ * ksz_apptrust_error - Prints an error message for an invalid apptrust selector
+ * @dev: Pointer to the KSZ switch device structure
+ *
+ * This function prints an error message when an invalid apptrust selector is
+ * provided.
+ */
+static void ksz_apptrust_error(struct ksz_device *dev)
+{
+ char supported_apptrust_variants[64];
+ int i;
+
+ supported_apptrust_variants[0] = '\0';
+ for (i = 0; i < ARRAY_SIZE(ksz_supported_apptrust_variants); i++) {
+ if (i > 0)
+ strlcat(supported_apptrust_variants, ", ",
+ sizeof(supported_apptrust_variants));
+ strlcat(supported_apptrust_variants,
+ ksz_supported_apptrust_variants[i],
+ sizeof(supported_apptrust_variants));
+ }
+
+ dev_err(dev->dev, "Invalid apptrust selector or priority order. Supported: %s\n",
+ supported_apptrust_variants);
+}
+
+/**
+ * ksz_port_set_apptrust_validate - Validates the apptrust selectors
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number for which to set the apptrust selectors
+ * @sel: Array of apptrust selectors to validate
+ * @nsel: Number of apptrust selectors in the array
+ *
+ * This function validates the apptrust selectors provided and ensures that
+ * they are in the correct order.
+ *
+ * This family of switches supports two apptrust selectors: DCB_APP_SEL_PCP and
+ * IEEE_8021QAZ_APP_SEL_DSCP. The priority order of the selectors is fixed and
+ * cannot be changed. The order is as follows:
+ * 1. DCB_APP_SEL_PCP - Priority Code Point selector (highest priority)
+ * 2. IEEE_8021QAZ_APP_SEL_DSCP - Differentiated Services Code Point selector
+ * (lowest priority)
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz_port_set_apptrust_validate(struct ksz_device *dev, int port,
+ const u8 *sel, int nsel)
+{
+ int i, j, found;
+ int j_prev = 0;
+
+ /* Iterate through the requested selectors */
+ for (i = 0; i < nsel; i++) {
+ found = 0;
+
+ /* Check if the current selector is supported by the hardware */
+ for (j = 0; j < sizeof(ksz_supported_apptrust); j++) {
+ if (sel[i] != ksz_supported_apptrust[j])
+ continue;
+
+ found = 1;
+
+ /* Ensure that no higher priority selector (lower index)
+ * precedes a lower priority one
+ */
+ if (i > 0 && j <= j_prev)
+ goto err_sel_not_vaild;
+
+ j_prev = j;
+ break;
+ }
+
+ if (!found)
+ goto err_sel_not_vaild;
+ }
+
+ return 0;
+
+err_sel_not_vaild:
+ ksz_apptrust_error(dev);
+
+ return -EINVAL;
+}
+
+/**
+ * ksz88x3_port1_apptrust_quirk - Quirk for apptrust configuration on Port 1
+ * of KSZ88x3 devices
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number for which to set the apptrust selectors
+ * @reg: Register address for the apptrust configuration
+ * @port1_data: Data to set for the apptrust configuration
+ *
+ * This function implements a quirk for apptrust configuration on Port 1 of
+ * KSZ88x3 devices. It ensures that apptrust configuration on Port 1 is not
+ * possible if PCP apptrust on Port 2 is disabled. This is because the Port 2
+ * seems to be permanently hardwired to PCP classification, so we need to
+ * do Port 1 configuration always in agreement with Port 2 configuration.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz88x3_port1_apptrust_quirk(struct ksz_device *dev, int port,
+ int reg, u8 port1_data)
+{
+ u8 port2_data;
+ int ret;
+
+ /* If no apptrust is requested for Port 1, no need to care about Port 2
+ * configuration.
+ */
+ if (!(port1_data & (KSZ8_PORT_802_1P_ENABLE | KSZ8_PORT_DIFFSERV_ENABLE)))
+ return 0;
+
+ /* We got request to enable any apptrust on Port 1. To make it possible,
+ * we need to enable multiple queues on the switch. If we enable
+ * multiqueue support, PCP classification on Port 2 will be
+ * automatically activated by HW.
+ */
+ ret = ksz_pread8(dev, KSZ_PORT_2, reg, &port2_data);
+ if (ret)
+ return ret;
+
+ /* If KSZ8_PORT_802_1P_ENABLE bit is set on Port 2, the driver showed
+ * the interest in PCP classification on Port 2. In this case,
+ * multiqueue support is enabled and we can enable any apptrust on
+ * Port 1.
+ * If KSZ8_PORT_802_1P_ENABLE bit is not set on Port 2, the PCP
+ * classification on Port 2 is still active, but the driver disabled
+ * multiqueue support and made frame prioritization inactive for
+ * all ports. In this case, we can't enable any apptrust on Port 1.
+ */
+ if (!(port2_data & KSZ8_PORT_802_1P_ENABLE)) {
+ dev_err(dev->dev, "Not possible to enable any apptrust on Port 1 if PCP apptrust on Port 2 is disabled\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ksz88x3_port2_apptrust_quirk - Quirk for apptrust configuration on Port 2
+ * of KSZ88x3 devices
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number for which to set the apptrust selectors
+ * @reg: Register address for the apptrust configuration
+ * @port2_data: Data to set for the apptrust configuration
+ *
+ * This function implements a quirk for apptrust configuration on Port 2 of
+ * KSZ88x3 devices. It ensures that DSCP apptrust is not working on Port 2 and
+ * that it is not possible to disable PCP on Port 2. The only way to disable PCP
+ * on Port 2 is to disable multiple queues on the switch.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz88x3_port2_apptrust_quirk(struct ksz_device *dev, int port,
+ int reg, u8 port2_data)
+{
+ struct dsa_switch *ds = dev->ds;
+ u8 port1_data;
+ int ret;
+
+ /* First validate Port 2 configuration. DiffServ/DSCP is not working
+ * on this port.
+ */
+ if (port2_data & KSZ8_PORT_DIFFSERV_ENABLE) {
+ dev_err(dev->dev, "DSCP apptrust is not working on Port 2\n");
+ return -EINVAL;
+ }
+
+ /* If PCP support is requested, we need to enable all queues on the
+ * switch to make PCP priority working on Port 2.
+ */
+ if (port2_data & KSZ8_PORT_802_1P_ENABLE)
+ return ksz8_all_queues_split(dev, dev->info->num_tx_queues);
+
+ /* We got request to disable PCP priority on Port 2.
+ * Now, we need to compare Port 2 configuration with Port 1
+ * configuration.
+ */
+ ret = ksz_pread8(dev, KSZ_PORT_1, reg, &port1_data);
+ if (ret)
+ return ret;
+
+ /* If Port 1 has any apptrust enabled, we can't disable multiple queues
+ * on the switch, so we can't disable PCP on Port 2.
+ */
+ if (port1_data & (KSZ8_PORT_802_1P_ENABLE | KSZ8_PORT_DIFFSERV_ENABLE)) {
+ dev_err(dev->dev, "Not possible to disable PCP on Port 2 if any apptrust is enabled on Port 1\n");
+ return -EINVAL;
+ }
+
+ /* Now we need to ensure that default priority on Port 1 is set to 0
+ * otherwise we can't disable multiqueue support on the switch.
+ */
+ ret = ksz_port_get_default_prio(ds, KSZ_PORT_1);
+ if (ret < 0) {
+ return ret;
+ } else if (ret) {
+ dev_err(dev->dev, "Not possible to disable PCP on Port 2 if non zero default priority is set on Port 1\n");
+ return -EINVAL;
+ }
+
+ /* Port 1 has no apptrust or default priority set and we got request to
+ * disable PCP on Port 2. We can disable multiqueue support to disable
+ * PCP on Port 2.
+ */
+ return ksz8_all_queues_split(dev, 1);
+}
+
+/**
+ * ksz88x3_port_apptrust_quirk - Quirk for apptrust configuration on KSZ88x3
+ * devices
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number for which to set the apptrust selectors
+ * @reg: Register address for the apptrust configuration
+ * @data: Data to set for the apptrust configuration
+ *
+ * This function implements a quirk for apptrust configuration on KSZ88x3
+ * devices. It ensures that apptrust configuration on Port 1 and
+ * Port 2 is done in agreement with each other.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz88x3_port_apptrust_quirk(struct ksz_device *dev, int port,
+ int reg, u8 data)
+{
+ if (port == KSZ_PORT_1)
+ return ksz88x3_port1_apptrust_quirk(dev, port, reg, data);
+ else if (port == KSZ_PORT_2)
+ return ksz88x3_port2_apptrust_quirk(dev, port, reg, data);
+
+ return 0;
+}
+
+/**
+ * ksz_port_set_apptrust - Sets the apptrust selectors for a port on a KSZ
+ * switch
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number for which to set the apptrust selectors
+ * @sel: Array of apptrust selectors to set
+ * @nsel: Number of apptrust selectors in the array
+ *
+ * This function sets the apptrust selectors for the specified port on a KSZ
+ * switch.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+int ksz_port_set_apptrust(struct dsa_switch *ds, int port,
+ const u8 *sel, int nsel)
+{
+ const struct ksz_apptrust_map *map;
+ struct ksz_device *dev = ds->priv;
+ int reg, i, ret;
+ u8 data = 0;
+ u8 mask;
+
+ ret = ksz_port_set_apptrust_validate(dev, port, sel, nsel);
+ if (ret)
+ return ret;
+
+ ksz_get_apptrust_map_and_reg(dev, &map, &reg, &mask);
+
+ for (i = 0; i < nsel; i++) {
+ int j;
+
+ for (j = 0; j < ARRAY_SIZE(ksz_supported_apptrust); j++) {
+ if (sel[i] != ksz_supported_apptrust[j])
+ continue;
+
+ data |= map[j].bit;
+ break;
+ }
+ }
+
+ if (ksz_is_ksz88x3(dev)) {
+ ret = ksz88x3_port_apptrust_quirk(dev, port, reg, data);
+ if (ret)
+ return ret;
+ }
+
+ return ksz_prmw8(dev, port, reg, mask, data);
+}
+
+/**
+ * ksz_port_get_apptrust - Retrieves the apptrust selectors for a port on a KSZ
+ * switch
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number for which to get the apptrust selectors
+ * @sel: Array to store the apptrust selectors
+ * @nsel: Number of apptrust selectors in the array
+ *
+ * This function fetches the apptrust selectors for the specified port on a KSZ
+ * switch.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+int ksz_port_get_apptrust(struct dsa_switch *ds, int port, u8 *sel, int *nsel)
+{
+ const struct ksz_apptrust_map *map;
+ struct ksz_device *dev = ds->priv;
+ int reg, i, ret;
+ u8 data;
+ u8 mask;
+
+ ksz_get_apptrust_map_and_reg(dev, &map, &reg, &mask);
+
+ ret = ksz_pread8(dev, port, reg, &data);
+ if (ret)
+ return ret;
+
+ *nsel = 0;
+ for (i = 0; i < ARRAY_SIZE(ksz_supported_apptrust); i++) {
+ if (data & map[i].bit)
+ sel[(*nsel)++] = ksz_supported_apptrust[i];
+ }
+
+ return 0;
+}
+
+/**
+ * ksz_dcb_init_port - Initializes the DCB configuration for a port on a KSZ
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number for which to initialize the DCB configuration
+ *
+ * This function initializes the DCB configuration for the specified port on a
+ * KSZ switch. Particular DCB configuration is set for the port, including the
+ * default priority and apptrust selectors.
+ * The default priority is set to Best Effort, and the apptrust selectors are
+ * set to all supported selectors.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+int ksz_dcb_init_port(struct ksz_device *dev, int port)
+{
+ const u8 ksz_default_apptrust[] = { DCB_APP_SEL_PCP };
+ int ret, ipm;
+
+ if (is_ksz8(dev)) {
+ ipm = ieee8021q_tt_to_tc(IEEE8021Q_TT_BE,
+ dev->info->num_tx_queues);
+ if (ipm < 0)
+ return ipm;
+ } else {
+ ipm = IEEE8021Q_TT_BE;
+ }
+
+ /* Set the default priority for the port to Best Effort */
+ ret = ksz_port_set_default_prio(dev->ds, port, ipm);
+ if (ret)
+ return ret;
+
+ return ksz_port_set_apptrust(dev->ds, port, ksz_default_apptrust,
+ ARRAY_SIZE(ksz_default_apptrust));
+}
+
+/**
+ * ksz_dcb_init - Initializes the DCB configuration for a KSZ switch
+ * @dev: Pointer to the KSZ switch device structure
+ *
+ * This function initializes the DCB configuration for a KSZ switch. The global
+ * DSCP-to-priority mapping table is initialized.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+int ksz_dcb_init(struct ksz_device *dev)
+{
+ int ret;
+
+ ret = ksz_init_global_dscp_map(dev);
+ if (ret)
+ return ret;
+
+ /* Enable 802.1p priority control on Port 2 during switch initialization.
+ * This setup is critical for the apptrust functionality on Port 1, which
+ * relies on the priority settings of Port 2. Note: Port 1 is naturally
+ * configured before Port 2, necessitating this configuration order.
+ */
+ if (ksz_is_ksz88x3(dev))
+ return ksz_prmw8(dev, KSZ_PORT_2, KSZ8_REG_PORT_1_CTRL_0,
+ KSZ8_PORT_802_1P_ENABLE,
+ KSZ8_PORT_802_1P_ENABLE);
+
+ return 0;
+}
diff --git a/drivers/net/dsa/microchip/ksz_dcb.h b/drivers/net/dsa/microchip/ksz_dcb.h
new file mode 100644
index 000000000000..e2065223ba90
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_dcb.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Pengutronix, Oleksij Rempel <kernel@pengutronix.de> */
+
+#ifndef __KSZ_DCB_H
+#define __KSZ_DCB_H
+
+#include <net/dsa.h>
+
+#include "ksz_common.h"
+
+int ksz_port_get_default_prio(struct dsa_switch *ds, int port);
+int ksz_port_set_default_prio(struct dsa_switch *ds, int port, u8 prio);
+int ksz_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp);
+int ksz_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio);
+int ksz_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio);
+int ksz_port_set_apptrust(struct dsa_switch *ds, int port,
+ const unsigned char *sel,
+ int nsel);
+int ksz_port_get_apptrust(struct dsa_switch *ds, int port, u8 *sel, int *nsel);
+int ksz_dcb_init_port(struct ksz_device *dev, int port);
+int ksz_dcb_init(struct ksz_device *dev);
+
+#endif /* __KSZ_DCB_H */
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
index c8166fb440ab..8e8d83213b04 100644
--- a/drivers/net/dsa/microchip/ksz_spi.c
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -222,7 +222,6 @@ MODULE_DEVICE_TABLE(spi, ksz_spi_ids);
static struct spi_driver ksz_spi_driver = {
.driver = {
.name = "ksz-switch",
- .owner = THIS_MODULE,
.of_match_table = ksz_dt_ids,
},
.id_table = ksz_spi_ids,
@@ -233,13 +232,6 @@ static struct spi_driver ksz_spi_driver = {
module_spi_driver(ksz_spi_driver);
-MODULE_ALIAS("spi:ksz9477");
-MODULE_ALIAS("spi:ksz9896");
-MODULE_ALIAS("spi:ksz9897");
-MODULE_ALIAS("spi:ksz9893");
-MODULE_ALIAS("spi:ksz9563");
-MODULE_ALIAS("spi:ksz8563");
-MODULE_ALIAS("spi:ksz9567");
MODULE_ALIAS("spi:lan937x");
MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>");
MODULE_DESCRIPTION("Microchip ksz Series Switch SPI Driver");
diff --git a/drivers/net/dsa/mt7530-mdio.c b/drivers/net/dsa/mt7530-mdio.c
index fa3ee85a99c1..51df42ccdbe6 100644
--- a/drivers/net/dsa/mt7530-mdio.c
+++ b/drivers/net/dsa/mt7530-mdio.c
@@ -18,7 +18,8 @@
static int
mt7530_regmap_write(void *context, unsigned int reg, unsigned int val)
{
- struct mii_bus *bus = context;
+ struct mt7530_priv *priv = context;
+ struct mii_bus *bus = priv->bus;
u16 page, r, lo, hi;
int ret;
@@ -27,36 +28,35 @@ mt7530_regmap_write(void *context, unsigned int reg, unsigned int val)
lo = val & 0xffff;
hi = val >> 16;
- /* MT7530 uses 31 as the pseudo port */
- ret = bus->write(bus, 0x1f, 0x1f, page);
+ ret = bus->write(bus, priv->mdiodev->addr, 0x1f, page);
if (ret < 0)
return ret;
- ret = bus->write(bus, 0x1f, r, lo);
+ ret = bus->write(bus, priv->mdiodev->addr, r, lo);
if (ret < 0)
return ret;
- ret = bus->write(bus, 0x1f, 0x10, hi);
+ ret = bus->write(bus, priv->mdiodev->addr, 0x10, hi);
return ret;
}
static int
mt7530_regmap_read(void *context, unsigned int reg, unsigned int *val)
{
- struct mii_bus *bus = context;
+ struct mt7530_priv *priv = context;
+ struct mii_bus *bus = priv->bus;
u16 page, r, lo, hi;
int ret;
page = (reg >> 6) & 0x3ff;
r = (reg >> 2) & 0xf;
- /* MT7530 uses 31 as the pseudo port */
- ret = bus->write(bus, 0x1f, 0x1f, page);
+ ret = bus->write(bus, priv->mdiodev->addr, 0x1f, page);
if (ret < 0)
return ret;
- lo = bus->read(bus, 0x1f, r);
- hi = bus->read(bus, 0x1f, 0x10);
+ lo = bus->read(bus, priv->mdiodev->addr, r);
+ hi = bus->read(bus, priv->mdiodev->addr, 0x10);
*val = (hi << 16) | (lo & 0xffff);
@@ -107,8 +107,7 @@ mt7531_create_sgmii(struct mt7530_priv *priv)
mt7531_pcs_config[i]->unlock = mt7530_mdio_regmap_unlock;
mt7531_pcs_config[i]->lock_arg = &priv->bus->mdio_lock;
- regmap = devm_regmap_init(priv->dev,
- &mt7530_regmap_bus, priv->bus,
+ regmap = devm_regmap_init(priv->dev, &mt7530_regmap_bus, priv,
mt7531_pcs_config[i]);
if (IS_ERR(regmap)) {
ret = PTR_ERR(regmap);
@@ -153,6 +152,7 @@ mt7530_probe(struct mdio_device *mdiodev)
priv->bus = mdiodev->bus;
priv->dev = &mdiodev->dev;
+ priv->mdiodev = mdiodev;
ret = mt7530_probe_common(priv);
if (ret)
@@ -203,8 +203,8 @@ mt7530_probe(struct mdio_device *mdiodev)
regmap_config->reg_stride = 4;
regmap_config->max_register = MT7530_CREV;
regmap_config->disable_locking = true;
- priv->regmap = devm_regmap_init(priv->dev, &mt7530_regmap_bus,
- priv->bus, regmap_config);
+ priv->regmap = devm_regmap_init(priv->dev, &mt7530_regmap_bus, priv,
+ regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 767f66c37f6b..598434d8d6e4 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -74,108 +74,94 @@ static const struct mt7530_mib_desc mt7530_mib[] = {
MIB_DESC(1, 0xb8, "RxArlDrop"),
};
-/* Since phy_device has not yet been created and
- * phy_{read,write}_mmd_indirect is not available, we provide our own
- * core_{read,write}_mmd_indirect with core_{clear,write,set} wrappers
- * to complete this function.
- */
-static int
-core_read_mmd_indirect(struct mt7530_priv *priv, int prtad, int devad)
+static void
+mt7530_mutex_lock(struct mt7530_priv *priv)
+{
+ if (priv->bus)
+ mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
+}
+
+static void
+mt7530_mutex_unlock(struct mt7530_priv *priv)
+{
+ if (priv->bus)
+ mutex_unlock(&priv->bus->mdio_lock);
+}
+
+static void
+core_write(struct mt7530_priv *priv, u32 reg, u32 val)
{
struct mii_bus *bus = priv->bus;
- int value, ret;
+ int ret;
+
+ mt7530_mutex_lock(priv);
/* Write the desired MMD Devad */
- ret = bus->write(bus, 0, MII_MMD_CTRL, devad);
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_CTRL, MDIO_MMD_VEND2);
if (ret < 0)
goto err;
/* Write the desired MMD register address */
- ret = bus->write(bus, 0, MII_MMD_DATA, prtad);
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_DATA, reg);
if (ret < 0)
goto err;
/* Select the Function : DATA with no post increment */
- ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_CTRL, MDIO_MMD_VEND2 | MII_MMD_CTRL_NOINCR);
if (ret < 0)
goto err;
- /* Read the content of the MMD's selected register */
- value = bus->read(bus, 0, MII_MMD_DATA);
-
- return value;
+ /* Write the data into MMD's selected register */
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_DATA, val);
err:
- dev_err(&bus->dev, "failed to read mmd register\n");
+ if (ret < 0)
+ dev_err(&bus->dev, "failed to write mmd register\n");
- return ret;
+ mt7530_mutex_unlock(priv);
}
-static int
-core_write_mmd_indirect(struct mt7530_priv *priv, int prtad,
- int devad, u32 data)
+static void
+core_rmw(struct mt7530_priv *priv, u32 reg, u32 mask, u32 set)
{
struct mii_bus *bus = priv->bus;
+ u32 val;
int ret;
+ mt7530_mutex_lock(priv);
+
/* Write the desired MMD Devad */
- ret = bus->write(bus, 0, MII_MMD_CTRL, devad);
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_CTRL, MDIO_MMD_VEND2);
if (ret < 0)
goto err;
/* Write the desired MMD register address */
- ret = bus->write(bus, 0, MII_MMD_DATA, prtad);
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_DATA, reg);
if (ret < 0)
goto err;
/* Select the Function : DATA with no post increment */
- ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_CTRL, MDIO_MMD_VEND2 | MII_MMD_CTRL_NOINCR);
if (ret < 0)
goto err;
+ /* Read the content of the MMD's selected register */
+ val = bus->read(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_DATA);
+ val &= ~mask;
+ val |= set;
/* Write the data into MMD's selected register */
- ret = bus->write(bus, 0, MII_MMD_DATA, data);
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_DATA, val);
err:
if (ret < 0)
- dev_err(&bus->dev,
- "failed to write mmd register\n");
- return ret;
-}
-
-static void
-mt7530_mutex_lock(struct mt7530_priv *priv)
-{
- if (priv->bus)
- mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
-}
-
-static void
-mt7530_mutex_unlock(struct mt7530_priv *priv)
-{
- if (priv->bus)
- mutex_unlock(&priv->bus->mdio_lock);
-}
-
-static void
-core_write(struct mt7530_priv *priv, u32 reg, u32 val)
-{
- mt7530_mutex_lock(priv);
-
- core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val);
-
- mt7530_mutex_unlock(priv);
-}
-
-static void
-core_rmw(struct mt7530_priv *priv, u32 reg, u32 mask, u32 set)
-{
- u32 val;
-
- mt7530_mutex_lock(priv);
-
- val = core_read_mmd_indirect(priv, reg, MDIO_MMD_VEND2);
- val &= ~mask;
- val |= set;
- core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val);
+ dev_err(&bus->dev, "failed to write mmd register\n");
mt7530_mutex_unlock(priv);
}
@@ -431,23 +417,23 @@ mt7530_setup_port6(struct dsa_switch *ds, phy_interface_t interface)
mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK, P6_INTF_MODE(1));
- xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK;
+ xtal = mt7530_read(priv, MT753X_MTRAP) & MT7530_XTAL_MASK;
- if (xtal == HWTRAP_XTAL_25MHZ)
+ if (xtal == MT7530_XTAL_25MHZ)
ssc_delta = 0x57;
else
ssc_delta = 0x87;
if (priv->id == ID_MT7621) {
/* PLL frequency: 125MHz: 1.0GBit */
- if (xtal == HWTRAP_XTAL_40MHZ)
+ if (xtal == MT7530_XTAL_40MHZ)
ncpo1 = 0x0640;
- if (xtal == HWTRAP_XTAL_25MHZ)
+ if (xtal == MT7530_XTAL_25MHZ)
ncpo1 = 0x0a00;
} else { /* PLL frequency: 250MHz: 2.0Gbit */
- if (xtal == HWTRAP_XTAL_40MHZ)
+ if (xtal == MT7530_XTAL_40MHZ)
ncpo1 = 0x0c80;
- if (xtal == HWTRAP_XTAL_25MHZ)
+ if (xtal == MT7530_XTAL_25MHZ)
ncpo1 = 0x1400;
}
@@ -470,19 +456,20 @@ mt7530_setup_port6(struct dsa_switch *ds, phy_interface_t interface)
static void
mt7531_pll_setup(struct mt7530_priv *priv)
{
+ enum mt7531_xtal_fsel xtal;
u32 top_sig;
u32 hwstrap;
- u32 xtal;
u32 val;
val = mt7530_read(priv, MT7531_CREV);
top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR);
- hwstrap = mt7530_read(priv, MT7531_HWTRAP);
+ hwstrap = mt7530_read(priv, MT753X_TRAP);
if ((val & CHIP_REV_M) > 0)
- xtal = (top_sig & PAD_MCM_SMI_EN) ? HWTRAP_XTAL_FSEL_40MHZ :
- HWTRAP_XTAL_FSEL_25MHZ;
+ xtal = (top_sig & PAD_MCM_SMI_EN) ? MT7531_XTAL_FSEL_40MHZ :
+ MT7531_XTAL_FSEL_25MHZ;
else
- xtal = hwstrap & HWTRAP_XTAL_FSEL_MASK;
+ xtal = (hwstrap & MT7531_XTAL25) ? MT7531_XTAL_FSEL_25MHZ :
+ MT7531_XTAL_FSEL_40MHZ;
/* Step 1 : Disable MT7531 COREPLL */
val = mt7530_read(priv, MT7531_PLLGP_EN);
@@ -511,13 +498,13 @@ mt7531_pll_setup(struct mt7530_priv *priv)
usleep_range(25, 35);
switch (xtal) {
- case HWTRAP_XTAL_FSEL_25MHZ:
+ case MT7531_XTAL_FSEL_25MHZ:
val = mt7530_read(priv, MT7531_PLLGP_CR0);
val &= ~RG_COREPLL_SDM_PCW_M;
val |= 0x140000 << RG_COREPLL_SDM_PCW_S;
mt7530_write(priv, MT7531_PLLGP_CR0, val);
break;
- case HWTRAP_XTAL_FSEL_40MHZ:
+ case MT7531_XTAL_FSEL_40MHZ:
val = mt7530_read(priv, MT7531_PLLGP_CR0);
val &= ~RG_COREPLL_SDM_PCW_M;
val |= 0x190000 << RG_COREPLL_SDM_PCW_S;
@@ -871,19 +858,15 @@ mt7530_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
return 0;
}
-static const char *p5_intf_modes(unsigned int p5_interface)
+static const char *mt7530_p5_mode_str(unsigned int mode)
{
- switch (p5_interface) {
- case P5_DISABLED:
- return "DISABLED";
- case P5_INTF_SEL_PHY_P0:
- return "PHY P0";
- case P5_INTF_SEL_PHY_P4:
- return "PHY P4";
- case P5_INTF_SEL_GMAC5:
- return "GMAC5";
+ switch (mode) {
+ case MUX_PHY_P0:
+ return "MUX PHY P0";
+ case MUX_PHY_P4:
+ return "MUX PHY P4";
default:
- return "unknown";
+ return "GMAC5";
}
}
@@ -895,34 +878,31 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
mutex_lock(&priv->reg_mutex);
- val = mt7530_read(priv, MT7530_MHWTRAP);
+ val = mt7530_read(priv, MT753X_MTRAP);
- val |= MHWTRAP_MANUAL | MHWTRAP_P5_MAC_SEL | MHWTRAP_P5_DIS;
- val &= ~MHWTRAP_P5_RGMII_MODE & ~MHWTRAP_PHY0_SEL;
+ val &= ~MT7530_P5_PHY0_SEL & ~MT7530_P5_MAC_SEL & ~MT7530_P5_RGMII_MODE;
- switch (priv->p5_intf_sel) {
- case P5_INTF_SEL_PHY_P0:
- /* MT7530_P5_MODE_GPHY_P0: 2nd GMAC -> P5 -> P0 */
- val |= MHWTRAP_PHY0_SEL;
+ switch (priv->p5_mode) {
+ /* MUX_PHY_P0: P0 -> P5 -> SoC MAC */
+ case MUX_PHY_P0:
+ val |= MT7530_P5_PHY0_SEL;
fallthrough;
- case P5_INTF_SEL_PHY_P4:
- /* MT7530_P5_MODE_GPHY_P4: 2nd GMAC -> P5 -> P4 */
- val &= ~MHWTRAP_P5_MAC_SEL & ~MHWTRAP_P5_DIS;
+ /* MUX_PHY_P4: P4 -> P5 -> SoC MAC */
+ case MUX_PHY_P4:
/* Setup the MAC by default for the cpu port */
- mt7530_write(priv, MT7530_PMCR_P(5), 0x56300);
- break;
- case P5_INTF_SEL_GMAC5:
- /* MT7530_P5_MODE_GMAC: P5 -> External phy or 2nd GMAC */
- val &= ~MHWTRAP_P5_DIS;
+ mt7530_write(priv, MT753X_PMCR_P(5), 0x56300);
break;
+
+ /* GMAC5: P5 -> SoC MAC or external PHY */
default:
+ val |= MT7530_P5_MAC_SEL;
break;
}
/* Setup RGMII settings */
if (phy_interface_mode_is_rgmii(interface)) {
- val |= MHWTRAP_P5_RGMII_MODE;
+ val |= MT7530_P5_RGMII_MODE;
/* P5 RGMII RX Clock Control: delay setting for 1000M */
mt7530_write(priv, MT7530_P5RGMIIRXCR, CSR_RGMII_EDGE_ALIGN);
@@ -942,28 +922,181 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
P5_IO_CLK_DRV(1) | P5_IO_DATA_DRV(1));
}
- mt7530_write(priv, MT7530_MHWTRAP, val);
+ mt7530_write(priv, MT753X_MTRAP, val);
- dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, intf_sel=%s, phy-mode=%s\n",
- val, p5_intf_modes(priv->p5_intf_sel), phy_modes(interface));
+ dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, mode=%s, phy-mode=%s\n", val,
+ mt7530_p5_mode_str(priv->p5_mode), phy_modes(interface));
mutex_unlock(&priv->reg_mutex);
}
-/* On page 205, section "8.6.3 Frame filtering" of the active standard, IEEE Std
- * 802.1Q™-2022, it is stated that frames with 01:80:C2:00:00:00-0F as MAC DA
- * must only be propagated to C-VLAN and MAC Bridge components. That means
- * VLAN-aware and VLAN-unaware bridges. On the switch designs with CPU ports,
- * these frames are supposed to be processed by the CPU (software). So we make
- * the switch only forward them to the CPU port. And if received from a CPU
- * port, forward to a single port. The software is responsible of making the
- * switch conform to the latter by setting a single port as destination port on
- * the special tag.
+/* In Clause 5 of IEEE Std 802-2014, two sublayers of the data link layer (DLL)
+ * of the Open Systems Interconnection basic reference model (OSI/RM) are
+ * described; the medium access control (MAC) and logical link control (LLC)
+ * sublayers. The MAC sublayer is the one facing the physical layer.
+ *
+ * In 8.2 of IEEE Std 802.1Q-2022, the Bridge architecture is described. A
+ * Bridge component comprises a MAC Relay Entity for interconnecting the Ports
+ * of the Bridge, at least two Ports, and higher layer entities with at least a
+ * Spanning Tree Protocol Entity included.
+ *
+ * Each Bridge Port also functions as an end station and shall provide the MAC
+ * Service to an LLC Entity. Each instance of the MAC Service is provided to a
+ * distinct LLC Entity that supports protocol identification, multiplexing, and
+ * demultiplexing, for protocol data unit (PDU) transmission and reception by
+ * one or more higher layer entities.
+ *
+ * It is described in 8.13.9 of IEEE Std 802.1Q-2022 that in a Bridge, the LLC
+ * Entity associated with each Bridge Port is modeled as being directly
+ * connected to the attached Local Area Network (LAN).
+ *
+ * On the switch with CPU port architecture, CPU port functions as Management
+ * Port, and the Management Port functionality is provided by software which
+ * functions as an end station. Software is connected to an IEEE 802 LAN that is
+ * wholly contained within the system that incorporates the Bridge. Software
+ * provides access to the LLC Entity associated with each Bridge Port by the
+ * value of the source port field on the special tag on the frame received by
+ * software.
+ *
+ * We call frames that carry control information to determine the active
+ * topology and current extent of each Virtual Local Area Network (VLAN), i.e.,
+ * spanning tree or Shortest Path Bridging (SPB) and Multiple VLAN Registration
+ * Protocol Data Units (MVRPDUs), and frames from other link constrained
+ * protocols, such as Extensible Authentication Protocol over LAN (EAPOL) and
+ * Link Layer Discovery Protocol (LLDP), link-local frames. They are not
+ * forwarded by a Bridge. Permanently configured entries in the filtering
+ * database (FDB) ensure that such frames are discarded by the Forwarding
+ * Process. In 8.6.3 of IEEE Std 802.1Q-2022, this is described in detail:
*
- * This switch intellectual property cannot conform to this part of the standard
- * fully. Whilst the REV_UN frame tag covers the remaining :04-0D and :0F MAC
- * DAs, it also includes :22-FF which the scope of propagation is not supposed
- * to be restricted for these MAC DAs.
+ * Each of the reserved MAC addresses specified in Table 8-1
+ * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]) shall be
+ * permanently configured in the FDB in C-VLAN components and ERs.
+ *
+ * Each of the reserved MAC addresses specified in Table 8-2
+ * (01-80-C2-00-00-[01,02,03,04,05,06,07,08,09,0A,0E]) shall be permanently
+ * configured in the FDB in S-VLAN components.
+ *
+ * Each of the reserved MAC addresses specified in Table 8-3
+ * (01-80-C2-00-00-[01,02,04,0E]) shall be permanently configured in the FDB in
+ * TPMR components.
+ *
+ * The FDB entries for reserved MAC addresses shall specify filtering for all
+ * Bridge Ports and all VIDs. Management shall not provide the capability to
+ * modify or remove entries for reserved MAC addresses.
+ *
+ * The addresses in Table 8-1, Table 8-2, and Table 8-3 determine the scope of
+ * propagation of PDUs within a Bridged Network, as follows:
+ *
+ * The Nearest Bridge group address (01-80-C2-00-00-0E) is an address that no
+ * conformant Two-Port MAC Relay (TPMR) component, Service VLAN (S-VLAN)
+ * component, Customer VLAN (C-VLAN) component, or MAC Bridge can forward.
+ * PDUs transmitted using this destination address, or any other addresses
+ * that appear in Table 8-1, Table 8-2, and Table 8-3
+ * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]), can
+ * therefore travel no further than those stations that can be reached via a
+ * single individual LAN from the originating station.
+ *
+ * The Nearest non-TPMR Bridge group address (01-80-C2-00-00-03), is an
+ * address that no conformant S-VLAN component, C-VLAN component, or MAC
+ * Bridge can forward; however, this address is relayed by a TPMR component.
+ * PDUs using this destination address, or any of the other addresses that
+ * appear in both Table 8-1 and Table 8-2 but not in Table 8-3
+ * (01-80-C2-00-00-[00,03,05,06,07,08,09,0A,0B,0C,0D,0F]), will be relayed by
+ * any TPMRs but will propagate no further than the nearest S-VLAN component,
+ * C-VLAN component, or MAC Bridge.
+ *
+ * The Nearest Customer Bridge group address (01-80-C2-00-00-00) is an address
+ * that no conformant C-VLAN component, MAC Bridge can forward; however, it is
+ * relayed by TPMR components and S-VLAN components. PDUs using this
+ * destination address, or any of the other addresses that appear in Table 8-1
+ * but not in either Table 8-2 or Table 8-3 (01-80-C2-00-00-[00,0B,0C,0D,0F]),
+ * will be relayed by TPMR components and S-VLAN components but will propagate
+ * no further than the nearest C-VLAN component or MAC Bridge.
+ *
+ * Because the LLC Entity associated with each Bridge Port is provided via CPU
+ * port, we must not filter these frames but forward them to CPU port.
+ *
+ * In a Bridge, the transmission Port is majorly decided by ingress and egress
+ * rules, FDB, and spanning tree Port State functions of the Forwarding Process.
+ * For link-local frames, only CPU port should be designated as destination port
+ * in the FDB, and the other functions of the Forwarding Process must not
+ * interfere with the decision of the transmission Port. We call this process
+ * trapping frames to CPU port.
+ *
+ * Therefore, on the switch with CPU port architecture, link-local frames must
+ * be trapped to CPU port, and certain link-local frames received by a Port of a
+ * Bridge comprising a TPMR component or an S-VLAN component must be excluded
+ * from it.
+ *
+ * A Bridge of the switch with CPU port architecture cannot comprise a Two-Port
+ * MAC Relay (TPMR) component as a TPMR component supports only a subset of the
+ * functionality of a MAC Bridge. A Bridge comprising two Ports (Management Port
+ * doesn't count) of this architecture will either function as a standard MAC
+ * Bridge or a standard VLAN Bridge.
+ *
+ * Therefore, a Bridge of this architecture can only comprise S-VLAN components,
+ * C-VLAN components, or MAC Bridge components. Since there's no TPMR component,
+ * we don't need to relay PDUs using the destination addresses specified on the
+ * Nearest non-TPMR section, and the proportion of the Nearest Customer Bridge
+ * section where they must be relayed by TPMR components.
+ *
+ * One option to trap link-local frames to CPU port is to add static FDB entries
+ * with CPU port designated as destination port. However, because that
+ * Independent VLAN Learning (IVL) is being used on every VID, each entry only
+ * applies to a single VLAN Identifier (VID). For a Bridge comprising a MAC
+ * Bridge component or a C-VLAN component, there would have to be 16 times 4096
+ * entries. This switch intellectual property can only hold a maximum of 2048
+ * entries. Using this option, there also isn't a mechanism to prevent
+ * link-local frames from being discarded when the spanning tree Port State of
+ * the reception Port is discarding.
+ *
+ * The remaining option is to utilise the BPC, RGAC1, RGAC2, RGAC3, and RGAC4
+ * registers. Whilst this applies to every VID, it doesn't contain all of the
+ * reserved MAC addresses without affecting the remaining Standard Group MAC
+ * Addresses. The REV_UN frame tag utilised using the RGAC4 register covers the
+ * remaining 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F] destination
+ * addresses. It also includes the 01-80-C2-00-00-22 to 01-80-C2-00-00-FF
+ * destination addresses which may be relayed by MAC Bridges or VLAN Bridges.
+ * The latter option provides better but not complete conformance.
+ *
+ * This switch intellectual property also does not provide a mechanism to trap
+ * link-local frames with specific destination addresses to CPU port by Bridge,
+ * to conform to the filtering rules for the distinct Bridge components.
+ *
+ * Therefore, regardless of the type of the Bridge component, link-local frames
+ * with these destination addresses will be trapped to CPU port:
+ *
+ * 01-80-C2-00-00-[00,01,02,03,0E]
+ *
+ * In a Bridge comprising a MAC Bridge component or a C-VLAN component:
+ *
+ * Link-local frames with these destination addresses won't be trapped to CPU
+ * port which won't conform to IEEE Std 802.1Q-2022:
+ *
+ * 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F]
+ *
+ * In a Bridge comprising an S-VLAN component:
+ *
+ * Link-local frames with these destination addresses will be trapped to CPU
+ * port which won't conform to IEEE Std 802.1Q-2022:
+ *
+ * 01-80-C2-00-00-00
+ *
+ * Link-local frames with these destination addresses won't be trapped to CPU
+ * port which won't conform to IEEE Std 802.1Q-2022:
+ *
+ * 01-80-C2-00-00-[04,05,06,07,08,09,0A]
+ *
+ * To trap link-local frames to CPU port as conformant as this switch
+ * intellectual property can allow, link-local frames are made to be regarded as
+ * Bridge Protocol Data Units (BPDUs). This is because this switch intellectual
+ * property only lets the frames regarded as BPDUs bypass the spanning tree Port
+ * State function of the Forwarding Process.
+ *
+ * The only remaining interference is the ingress rules. When the reception Port
+ * has no PVID assigned on software, VLAN-untagged frames won't be allowed in.
+ * There doesn't seem to be a mechanism on the switch intellectual property to
+ * have link-local frames bypass this function of the Forwarding Process.
*/
static void
mt753x_trap_frames(struct mt7530_priv *priv)
@@ -971,35 +1104,35 @@ mt753x_trap_frames(struct mt7530_priv *priv)
/* Trap 802.1X PAE frames and BPDUs to the CPU port(s) and egress them
* VLAN-untagged.
*/
- mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_EG_TAG_MASK |
- MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK |
- MT753X_BPDU_PORT_FW_MASK,
- MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) |
- MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_BPDU_CPU_ONLY);
+ mt7530_rmw(priv, MT753X_BPC,
+ PAE_BPDU_FR | PAE_EG_TAG_MASK | PAE_PORT_FW_MASK |
+ BPDU_EG_TAG_MASK | BPDU_PORT_FW_MASK,
+ PAE_BPDU_FR | PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ PAE_PORT_FW(TO_CPU_FW_CPU_ONLY) |
+ BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ TO_CPU_FW_CPU_ONLY);
/* Trap frames with :01 and :02 MAC DAs to the CPU port(s) and egress
* them VLAN-untagged.
*/
- mt7530_rmw(priv, MT753X_RGAC1, MT753X_R02_EG_TAG_MASK |
- MT753X_R02_PORT_FW_MASK | MT753X_R01_EG_TAG_MASK |
- MT753X_R01_PORT_FW_MASK,
- MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) |
- MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_BPDU_CPU_ONLY);
+ mt7530_rmw(priv, MT753X_RGAC1,
+ R02_BPDU_FR | R02_EG_TAG_MASK | R02_PORT_FW_MASK |
+ R01_BPDU_FR | R01_EG_TAG_MASK | R01_PORT_FW_MASK,
+ R02_BPDU_FR | R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ R02_PORT_FW(TO_CPU_FW_CPU_ONLY) | R01_BPDU_FR |
+ R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ TO_CPU_FW_CPU_ONLY);
/* Trap frames with :03 and :0E MAC DAs to the CPU port(s) and egress
* them VLAN-untagged.
*/
- mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_EG_TAG_MASK |
- MT753X_R0E_PORT_FW_MASK | MT753X_R03_EG_TAG_MASK |
- MT753X_R03_PORT_FW_MASK,
- MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) |
- MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_BPDU_CPU_ONLY);
+ mt7530_rmw(priv, MT753X_RGAC2,
+ R0E_BPDU_FR | R0E_EG_TAG_MASK | R0E_PORT_FW_MASK |
+ R03_BPDU_FR | R03_EG_TAG_MASK | R03_PORT_FW_MASK,
+ R0E_BPDU_FR | R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ R0E_PORT_FW(TO_CPU_FW_CPU_ONLY) | R03_BPDU_FR |
+ R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ TO_CPU_FW_CPU_ONLY);
}
static void
@@ -1012,7 +1145,7 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
PORT_SPEC_TAG);
/* Enable flooding on the CPU port */
- mt7530_set(priv, MT7530_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) |
+ mt7530_set(priv, MT753X_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) |
UNU_FFP(BIT(port)));
/* Add the CPU port to the CPU port bitmap for MT7531 and the switch on
@@ -1057,6 +1190,14 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
mutex_unlock(&priv->reg_mutex);
+ if (priv->id != ID_MT7530 && priv->id != ID_MT7621)
+ return 0;
+
+ if (port == 5)
+ mt7530_clear(priv, MT753X_MTRAP, MT7530_P5_DIS);
+ else if (port == 6)
+ mt7530_clear(priv, MT753X_MTRAP, MT7530_P6_DIS);
+
return 0;
}
@@ -1075,6 +1216,15 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
PCR_MATRIX_CLR);
mutex_unlock(&priv->reg_mutex);
+
+ if (priv->id != ID_MT7530 && priv->id != ID_MT7621)
+ return;
+
+ /* Do not set MT7530_P5_DIS when port 5 is being used for PHY muxing. */
+ if (port == 5 && priv->p5_mode == GMAC5)
+ mt7530_set(priv, MT753X_MTRAP, MT7530_P5_DIS);
+ else if (port == 6)
+ mt7530_set(priv, MT753X_MTRAP, MT7530_P6_DIS);
}
static int
@@ -1176,15 +1326,15 @@ mt7530_port_bridge_flags(struct dsa_switch *ds, int port,
flags.val & BR_LEARNING ? 0 : SA_DIS);
if (flags.mask & BR_FLOOD)
- mt7530_rmw(priv, MT7530_MFC, UNU_FFP(BIT(port)),
+ mt7530_rmw(priv, MT753X_MFC, UNU_FFP(BIT(port)),
flags.val & BR_FLOOD ? UNU_FFP(BIT(port)) : 0);
if (flags.mask & BR_MCAST_FLOOD)
- mt7530_rmw(priv, MT7530_MFC, UNM_FFP(BIT(port)),
+ mt7530_rmw(priv, MT753X_MFC, UNM_FFP(BIT(port)),
flags.val & BR_MCAST_FLOOD ? UNM_FFP(BIT(port)) : 0);
if (flags.mask & BR_BCAST_FLOOD)
- mt7530_rmw(priv, MT7530_MFC, BC_FFP(BIT(port)),
+ mt7530_rmw(priv, MT753X_MFC, BC_FFP(BIT(port)),
flags.val & BR_BCAST_FLOOD ? BC_FFP(BIT(port)) : 0);
return 0;
@@ -1262,7 +1412,7 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
G0_PORT_VID_DEF);
- for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ for (i = 0; i < priv->ds->num_ports; i++) {
if (dsa_is_user_port(ds, i) &&
dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) {
all_user_ports_removed = false;
@@ -1720,18 +1870,6 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
-static int mt753x_mirror_port_get(unsigned int id, u32 val)
-{
- return (id == ID_MT7531) ? MT7531_MIRROR_PORT_GET(val) :
- MIRROR_PORT(val);
-}
-
-static int mt753x_mirror_port_set(unsigned int id, u32 val)
-{
- return (id == ID_MT7531) ? MT7531_MIRROR_PORT_SET(val) :
- MIRROR_PORT(val);
-}
-
static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
bool ingress, struct netlink_ext_ack *extack)
@@ -1747,14 +1885,14 @@ static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id));
/* MT7530 only supports one monitor port */
- monitor_port = mt753x_mirror_port_get(priv->id, val);
+ monitor_port = MT753X_MIRROR_PORT_GET(priv->id, val);
if (val & MT753X_MIRROR_EN(priv->id) &&
monitor_port != mirror->to_local_port)
return -EEXIST;
val |= MT753X_MIRROR_EN(priv->id);
- val &= ~MT753X_MIRROR_MASK(priv->id);
- val |= mt753x_mirror_port_set(priv->id, mirror->to_local_port);
+ val &= ~MT753X_MIRROR_PORT_MASK(priv->id);
+ val |= MT753X_MIRROR_PORT_SET(priv->id, mirror->to_local_port);
mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val);
val = mt7530_read(priv, MT7530_PCR_P(port));
@@ -2242,7 +2380,7 @@ mt7530_setup(struct dsa_switch *ds)
}
/* Waiting for MT7530 got to stable */
- INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP);
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT753X_TRAP);
ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0,
20, 1000000);
if (ret < 0) {
@@ -2257,7 +2395,7 @@ mt7530_setup(struct dsa_switch *ds)
return -ENODEV;
}
- if ((val & HWTRAP_XTAL_MASK) == HWTRAP_XTAL_20MHZ) {
+ if ((val & MT7530_XTAL_MASK) == MT7530_XTAL_20MHZ) {
dev_err(priv->dev,
"MT7530 with a 20MHz XTAL is not supported!\n");
return -EINVAL;
@@ -2268,8 +2406,6 @@ mt7530_setup(struct dsa_switch *ds)
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
SYS_CTRL_REG_RST);
- mt7530_pll_setup(priv);
-
/* Lower Tx driving for TRGMII path */
for (i = 0; i < NUM_TRGMII_CTRL; i++)
mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
@@ -2279,23 +2415,26 @@ mt7530_setup(struct dsa_switch *ds)
mt7530_rmw(priv, MT7530_TRGMII_RD(i),
RD_TAP_MASK, RD_TAP(16));
- /* Enable port 6 */
- val = mt7530_read(priv, MT7530_MHWTRAP);
- val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS;
- val |= MHWTRAP_MANUAL;
- mt7530_write(priv, MT7530_MHWTRAP, val);
+ /* Allow modifying the trap and directly access PHY registers via the
+ * MDIO bus the switch is on.
+ */
+ mt7530_rmw(priv, MT753X_MTRAP, MT7530_CHG_TRAP |
+ MT7530_PHY_INDIRECT_ACCESS, MT7530_CHG_TRAP);
+
+ if ((val & MT7530_XTAL_MASK) == MT7530_XTAL_40MHZ)
+ mt7530_pll_setup(priv);
mt753x_trap_frames(priv);
/* Enable and reset MIB counters */
mt7530_mib_reset(ds);
- for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ for (i = 0; i < priv->ds->num_ports; i++) {
/* Clear link settings and enable force mode to force link down
* on all ports until they're enabled later.
*/
- mt7530_rmw(priv, MT7530_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
- PMCR_FORCE_MODE, PMCR_FORCE_MODE);
+ mt7530_rmw(priv, MT753X_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
+ MT7530_FORCE_MODE, MT7530_FORCE_MODE);
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
@@ -2318,18 +2457,19 @@ mt7530_setup(struct dsa_switch *ds)
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
+ /* Allow mirroring frames received on the local port (monitor port). */
+ mt7530_set(priv, MT753X_AGC, LOCAL_EN);
+
/* Setup VLAN ID 0 for VLAN-unaware bridges */
ret = mt7530_setup_vlan0(priv);
if (ret)
return ret;
- /* Setup port 5 */
- if (!dsa_is_unused_port(ds, 5)) {
- priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
- } else {
+ /* Check for PHY muxing on port 5 */
+ if (dsa_is_unused_port(ds, 5)) {
/* Scan the ethernet nodes. Look for GMAC1, lookup the used PHY.
- * Set priv->p5_intf_sel to the appropriate value if PHY muxing
- * is detected.
+ * Set priv->p5_mode to the appropriate value if PHY muxing is
+ * detected.
*/
for_each_child_of_node(dn, mac_np) {
if (!of_device_is_compatible(mac_np,
@@ -2344,7 +2484,8 @@ mt7530_setup(struct dsa_switch *ds)
if (!phy_node)
continue;
- if (phy_node->parent == priv->dev->of_node->parent) {
+ if (phy_node->parent == priv->dev->of_node->parent ||
+ phy_node->parent->parent == priv->dev->of_node) {
ret = of_get_phy_mode(mac_np, &interface);
if (ret && ret != -ENODEV) {
of_node_put(mac_np);
@@ -2353,18 +2494,20 @@ mt7530_setup(struct dsa_switch *ds)
}
id = of_mdio_parse_addr(ds->dev, phy_node);
if (id == 0)
- priv->p5_intf_sel = P5_INTF_SEL_PHY_P0;
+ priv->p5_mode = MUX_PHY_P0;
if (id == 4)
- priv->p5_intf_sel = P5_INTF_SEL_PHY_P4;
+ priv->p5_mode = MUX_PHY_P4;
}
of_node_put(mac_np);
of_node_put(phy_node);
break;
}
- if (priv->p5_intf_sel == P5_INTF_SEL_PHY_P0 ||
- priv->p5_intf_sel == P5_INTF_SEL_PHY_P4)
+ if (priv->p5_mode == MUX_PHY_P0 ||
+ priv->p5_mode == MUX_PHY_P4) {
+ mt7530_clear(priv, MT753X_MTRAP, MT7530_P5_DIS);
mt7530_setup_port5(ds, interface);
+ }
}
#ifdef CONFIG_GPIOLIB
@@ -2395,15 +2538,15 @@ mt7531_setup_common(struct dsa_switch *ds)
mt7530_mib_reset(ds);
/* Disable flooding on all ports */
- mt7530_clear(priv, MT7530_MFC, BC_FFP_MASK | UNM_FFP_MASK |
+ mt7530_clear(priv, MT753X_MFC, BC_FFP_MASK | UNM_FFP_MASK |
UNU_FFP_MASK);
- for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ for (i = 0; i < priv->ds->num_ports; i++) {
/* Clear link settings and enable force mode to force link down
* on all ports until they're enabled later.
*/
- mt7530_rmw(priv, MT7530_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
- MT7531_FORCE_MODE, MT7531_FORCE_MODE);
+ mt7530_rmw(priv, MT753X_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
+ MT7531_FORCE_MODE_MASK, MT7531_FORCE_MODE_MASK);
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
@@ -2429,6 +2572,9 @@ mt7531_setup_common(struct dsa_switch *ds)
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
+ /* Allow mirroring frames received on the local port (monitor port). */
+ mt7530_set(priv, MT753X_AGC, LOCAL_EN);
+
/* Flush the FDB table */
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
if (ret < 0)
@@ -2459,7 +2605,7 @@ mt7531_setup(struct dsa_switch *ds)
}
/* Waiting for MT7530 got to stable */
- INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP);
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT753X_TRAP);
ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0,
20, 1000000);
if (ret < 0) {
@@ -2482,8 +2628,8 @@ mt7531_setup(struct dsa_switch *ds)
priv->p5_sgmii = !!(val & PAD_DUAL_SGMII_EN);
/* Force link down on all ports before internal reset */
- for (i = 0; i < MT7530_NUM_PORTS; i++)
- mt7530_write(priv, MT7530_PMCR_P(i), MT7531_FORCE_LNK);
+ for (i = 0; i < priv->ds->num_ports; i++)
+ mt7530_write(priv, MT753X_PMCR_P(i), MT7531_FORCE_MODE_LNK);
/* Reset the switch through internal reset */
mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_SW_RST | SYS_CTRL_REG_RST);
@@ -2491,32 +2637,44 @@ mt7531_setup(struct dsa_switch *ds)
if (!priv->p5_sgmii) {
mt7531_pll_setup(priv);
} else {
- /* Let ds->user_mii_bus be able to access external phy. */
+ /* Unlike MT7531BE, the GPIO 6-12 pins are not used for RGMII on
+ * MT7531AE. Set the GPIO 11-12 pins to function as MDC and MDIO
+ * to expose the MDIO bus of the switch.
+ */
mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO11_RG_RXD2_MASK,
MT7531_EXT_P_MDC_11);
mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO12_RG_RXD3_MASK,
MT7531_EXT_P_MDIO_12);
}
- if (!dsa_is_unused_port(ds, 5))
- priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
-
mt7530_rmw(priv, MT7531_GPIO_MODE0, MT7531_GPIO0_MASK,
MT7531_GPIO0_INTERRUPT);
- /* Enable PHY core PLL, since phy_device has not yet been created
- * provided for phy_[read,write]_mmd_indirect is called, we provide
- * our own mt7531_ind_mmd_phy_[read,write] to complete this
- * function.
+ /* Enable Energy-Efficient Ethernet (EEE) and PHY core PLL, since
+ * phy_device has not yet been created provided for
+ * phy_[read,write]_mmd_indirect is called, we provide our own
+ * mt7531_ind_mmd_phy_[read,write] to complete this function.
*/
- val = mt7531_ind_c45_phy_read(priv, MT753X_CTRL_PHY_ADDR,
+ val = mt7531_ind_c45_phy_read(priv,
+ MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
MDIO_MMD_VEND2, CORE_PLL_GROUP4);
- val |= MT7531_PHY_PLL_BYPASS_MODE;
+ val |= MT7531_RG_SYSPLL_DMY2 | MT7531_PHY_PLL_BYPASS_MODE;
val &= ~MT7531_PHY_PLL_OFF;
- mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2,
- CORE_PLL_GROUP4, val);
+ mt7531_ind_c45_phy_write(priv,
+ MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MDIO_MMD_VEND2, CORE_PLL_GROUP4, val);
+
+ /* Disable EEE advertisement on the switch PHYs. */
+ for (i = MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr);
+ i < MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr) + MT7530_NUM_PHYS;
+ i++) {
+ mt7531_ind_c45_phy_write(priv, i, MDIO_MMD_AN, MDIO_AN_EEE_ADV,
+ 0);
+ }
- mt7531_setup_common(ds);
+ ret = mt7531_setup_common(ds);
+ if (ret)
+ return ret;
/* Setup VLAN ID 0 for VLAN-unaware bridges */
ret = mt7530_setup_vlan0(priv);
@@ -2532,6 +2690,8 @@ mt7531_setup(struct dsa_switch *ds)
static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
+ config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
+
switch (port) {
/* Ports which are connected to switch PHYs. There is no MII pinout. */
case 0 ... 4:
@@ -2563,6 +2723,8 @@ static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port,
{
struct mt7530_priv *priv = ds->priv;
+ config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
+
switch (port) {
/* Ports which are connected to switch PHYs. There is no MII pinout. */
case 0 ... 4:
@@ -2602,14 +2764,17 @@ static void mt7988_mac_port_get_caps(struct dsa_switch *ds, int port,
case 0 ... 3:
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
break;
/* Port 6 is connected to SoC's XGMII MAC. There is no MII pinout. */
case 6:
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
- config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_10000FD;
+
+ config->mac_capabilities |= MAC_10000FD;
+ break;
}
}
@@ -2625,7 +2790,7 @@ mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
mt7530_setup_port6(priv->ds, interface);
}
-static void mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
+static void mt7531_rgmii_setup(struct mt7530_priv *priv,
phy_interface_t interface,
struct phy_device *phydev)
{
@@ -2676,62 +2841,70 @@ mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
if (phy_interface_mode_is_rgmii(interface)) {
dp = dsa_to_port(ds, port);
phydev = dp->user->phydev;
- mt7531_rgmii_setup(priv, port, interface, phydev);
+ mt7531_rgmii_setup(priv, interface, phydev);
}
}
static struct phylink_pcs *
-mt753x_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+mt753x_phylink_mac_select_pcs(struct phylink_config *config,
phy_interface_t interface)
{
- struct mt7530_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mt7530_priv *priv = dp->ds->priv;
switch (interface) {
case PHY_INTERFACE_MODE_TRGMII:
- return &priv->pcs[port].pcs;
+ return &priv->pcs[dp->index].pcs;
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
- return priv->ports[port].sgmii_pcs;
+ return priv->ports[dp->index].sgmii_pcs;
default:
return NULL;
}
}
static void
-mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+mt753x_phylink_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
- struct mt7530_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct dsa_switch *ds = dp->ds;
+ struct mt7530_priv *priv;
+ int port = dp->index;
+
+ priv = ds->priv;
if ((port == 5 || port == 6) && priv->info->mac_port_config)
priv->info->mac_port_config(ds, port, mode, state->interface);
/* Are we connected to external phy */
if (port == 5 && dsa_is_user_port(ds, 5))
- mt7530_set(priv, MT7530_PMCR_P(port), PMCR_EXT_PHY);
+ mt7530_set(priv, MT753X_PMCR_P(port), PMCR_EXT_PHY);
}
-static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port,
+static void mt753x_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct mt7530_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mt7530_priv *priv = dp->ds->priv;
- mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
+ mt7530_clear(priv, MT753X_PMCR_P(dp->index), PMCR_LINK_SETTINGS_MASK);
}
-static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void mt753x_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct mt7530_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mt7530_priv *priv = dp->ds->priv;
u32 mcr;
- mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK;
+ mcr = PMCR_MAC_RX_EN | PMCR_MAC_TX_EN | PMCR_FORCE_LNK;
switch (speed) {
case SPEED_1000:
@@ -2746,9 +2919,9 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
if (duplex == DUPLEX_FULL) {
mcr |= PMCR_FORCE_FDX;
if (tx_pause)
- mcr |= PMCR_TX_FC_EN;
+ mcr |= PMCR_FORCE_TX_FC_EN;
if (rx_pause)
- mcr |= PMCR_RX_FC_EN;
+ mcr |= PMCR_FORCE_RX_FC_EN;
}
if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, false) >= 0) {
@@ -2763,7 +2936,7 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
}
}
- mt7530_set(priv, MT7530_PMCR_P(port), mcr);
+ mt7530_set(priv, MT753X_PMCR_P(dp->index), mcr);
}
static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
@@ -2771,9 +2944,7 @@ static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
{
struct mt7530_priv *priv = ds->priv;
- /* This switch only supports full-duplex at 1Gbps */
- config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_10 | MAC_100 | MAC_1000FD;
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE;
priv->info->mac_port_get_caps(ds, port, config);
}
@@ -2861,6 +3032,8 @@ mt753x_setup(struct dsa_switch *ds)
ret = mt7530_setup_mdio(priv);
if (ret && priv->irq)
mt7530_free_irq_common(priv);
+ if (ret)
+ return ret;
/* Initialise the PCS devices */
for (i = 0; i < priv->ds->num_ports; i++) {
@@ -2883,10 +3056,10 @@ static int mt753x_get_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_keee *e)
{
struct mt7530_priv *priv = ds->priv;
- u32 eeecr = mt7530_read(priv, MT7530_PMEEECR_P(port));
+ u32 eeecr = mt7530_read(priv, MT753X_PMEEECR_P(port));
e->tx_lpi_enabled = !(eeecr & LPI_MODE_EN);
- e->tx_lpi_timer = GET_LPI_THRESH(eeecr);
+ e->tx_lpi_timer = LPI_THRESH_GET(eeecr);
return 0;
}
@@ -2900,11 +3073,11 @@ static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
if (e->tx_lpi_timer > 0xFFF)
return -EINVAL;
- set = SET_LPI_THRESH(e->tx_lpi_timer);
+ set = LPI_THRESH_SET(e->tx_lpi_timer);
if (!e->tx_lpi_enabled)
/* Force LPI Mode without a delay */
set |= LPI_MODE_EN;
- mt7530_rmw(priv, MT7530_PMEEECR_P(port), mask, set);
+ mt7530_rmw(priv, MT753X_PMEEECR_P(port), mask, set);
return 0;
}
@@ -2933,10 +3106,12 @@ mt753x_conduit_state_change(struct dsa_switch *ds,
else
priv->active_cpu_ports &= ~mask;
- if (priv->active_cpu_ports)
- val = CPU_EN | CPU_PORT(__ffs(priv->active_cpu_ports));
+ if (priv->active_cpu_ports) {
+ val = MT7530_CPU_EN |
+ MT7530_CPU_PORT(__ffs(priv->active_cpu_ports));
+ }
- mt7530_rmw(priv, MT7530_MFC, CPU_EN | CPU_PORT_MASK, val);
+ mt7530_rmw(priv, MT753X_MFC, MT7530_CPU_EN | MT7530_CPU_PORT_MASK, val);
}
static int mt7988_setup(struct dsa_switch *ds)
@@ -2983,16 +3158,19 @@ const struct dsa_switch_ops mt7530_switch_ops = {
.port_mirror_add = mt753x_port_mirror_add,
.port_mirror_del = mt753x_port_mirror_del,
.phylink_get_caps = mt753x_phylink_get_caps,
- .phylink_mac_select_pcs = mt753x_phylink_mac_select_pcs,
- .phylink_mac_config = mt753x_phylink_mac_config,
- .phylink_mac_link_down = mt753x_phylink_mac_link_down,
- .phylink_mac_link_up = mt753x_phylink_mac_link_up,
.get_mac_eee = mt753x_get_mac_eee,
.set_mac_eee = mt753x_set_mac_eee,
.conduit_state_change = mt753x_conduit_state_change,
};
EXPORT_SYMBOL_GPL(mt7530_switch_ops);
+static const struct phylink_mac_ops mt753x_phylink_mac_ops = {
+ .mac_select_pcs = mt753x_phylink_mac_select_pcs,
+ .mac_config = mt753x_phylink_mac_config,
+ .mac_link_down = mt753x_phylink_mac_link_down,
+ .mac_link_up = mt753x_phylink_mac_link_up,
+};
+
const struct mt753x_info mt753x_table[] = {
[ID_MT7621] = {
.id = ID_MT7621,
@@ -3059,17 +3237,11 @@ mt7530_probe_common(struct mt7530_priv *priv)
if (!priv->info)
return -EINVAL;
- /* Sanity check if these required device operations are filled
- * properly.
- */
- if (!priv->info->sw_setup || !priv->info->phy_read_c22 ||
- !priv->info->phy_write_c22 || !priv->info->mac_port_get_caps)
- return -EINVAL;
-
priv->id = priv->info->id;
priv->dev = dev;
priv->ds->priv = priv;
priv->ds->ops = &mt7530_switch_ops;
+ priv->ds->phylink_mac_ops = &mt753x_phylink_mac_ops;
mutex_init(&priv->reg_mutex);
dev_set_drvdata(dev, priv);
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index d17b318e6ee4..2ea4e24628c6 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -32,73 +32,101 @@ enum mt753x_id {
#define SYSC_REG_RSTCTRL 0x34
#define RESET_MCM BIT(2)
-/* Registers to mac forward control for unknown frames */
-#define MT7530_MFC 0x10
-#define BC_FFP(x) (((x) & 0xff) << 24)
-#define BC_FFP_MASK BC_FFP(~0)
-#define UNM_FFP(x) (((x) & 0xff) << 16)
-#define UNM_FFP_MASK UNM_FFP(~0)
-#define UNU_FFP(x) (((x) & 0xff) << 8)
-#define UNU_FFP_MASK UNU_FFP(~0)
-#define CPU_EN BIT(7)
-#define CPU_PORT_MASK GENMASK(6, 4)
-#define CPU_PORT(x) FIELD_PREP(CPU_PORT_MASK, x)
-#define MIRROR_EN BIT(3)
-#define MIRROR_PORT(x) ((x) & 0x7)
-#define MIRROR_MASK 0x7
-
-/* Registers for CPU forward control */
+/* Register for ARL global control */
+#define MT753X_AGC 0xc
+#define LOCAL_EN BIT(7)
+
+/* Register for MAC forward control */
+#define MT753X_MFC 0x10
+#define BC_FFP_MASK GENMASK(31, 24)
+#define BC_FFP(x) FIELD_PREP(BC_FFP_MASK, x)
+#define UNM_FFP_MASK GENMASK(23, 16)
+#define UNM_FFP(x) FIELD_PREP(UNM_FFP_MASK, x)
+#define UNU_FFP_MASK GENMASK(15, 8)
+#define UNU_FFP(x) FIELD_PREP(UNU_FFP_MASK, x)
+#define MT7530_CPU_EN BIT(7)
+#define MT7530_CPU_PORT_MASK GENMASK(6, 4)
+#define MT7530_CPU_PORT(x) FIELD_PREP(MT7530_CPU_PORT_MASK, x)
+#define MT7530_MIRROR_EN BIT(3)
+#define MT7530_MIRROR_PORT_MASK GENMASK(2, 0)
+#define MT7530_MIRROR_PORT_GET(x) FIELD_GET(MT7530_MIRROR_PORT_MASK, x)
+#define MT7530_MIRROR_PORT_SET(x) FIELD_PREP(MT7530_MIRROR_PORT_MASK, x)
+#define MT7531_QRY_FFP_MASK GENMASK(7, 0)
+#define MT7531_QRY_FFP(x) FIELD_PREP(MT7531_QRY_FFP_MASK, x)
+
+/* Register for CPU forward control */
#define MT7531_CFC 0x4
#define MT7531_MIRROR_EN BIT(19)
-#define MT7531_MIRROR_MASK (MIRROR_MASK << 16)
-#define MT7531_MIRROR_PORT_GET(x) (((x) >> 16) & MIRROR_MASK)
-#define MT7531_MIRROR_PORT_SET(x) (((x) & MIRROR_MASK) << 16)
+#define MT7531_MIRROR_PORT_MASK GENMASK(18, 16)
+#define MT7531_MIRROR_PORT_GET(x) FIELD_GET(MT7531_MIRROR_PORT_MASK, x)
+#define MT7531_MIRROR_PORT_SET(x) FIELD_PREP(MT7531_MIRROR_PORT_MASK, x)
#define MT7531_CPU_PMAP_MASK GENMASK(7, 0)
#define MT7531_CPU_PMAP(x) FIELD_PREP(MT7531_CPU_PMAP_MASK, x)
-#define MT753X_MIRROR_REG(id) ((((id) == ID_MT7531) || ((id) == ID_MT7988)) ? \
- MT7531_CFC : MT7530_MFC)
-#define MT753X_MIRROR_EN(id) ((((id) == ID_MT7531) || ((id) == ID_MT7988)) ? \
- MT7531_MIRROR_EN : MIRROR_EN)
-#define MT753X_MIRROR_MASK(id) ((((id) == ID_MT7531) || ((id) == ID_MT7988)) ? \
- MT7531_MIRROR_MASK : MIRROR_MASK)
+#define MT753X_MIRROR_REG(id) ((id == ID_MT7531 || \
+ id == ID_MT7988) ? \
+ MT7531_CFC : MT753X_MFC)
-/* Registers for BPDU and PAE frame control*/
+#define MT753X_MIRROR_EN(id) ((id == ID_MT7531 || \
+ id == ID_MT7988) ? \
+ MT7531_MIRROR_EN : MT7530_MIRROR_EN)
+
+#define MT753X_MIRROR_PORT_MASK(id) ((id == ID_MT7531 || \
+ id == ID_MT7988) ? \
+ MT7531_MIRROR_PORT_MASK : \
+ MT7530_MIRROR_PORT_MASK)
+
+#define MT753X_MIRROR_PORT_GET(id, val) ((id == ID_MT7531 || \
+ id == ID_MT7988) ? \
+ MT7531_MIRROR_PORT_GET(val) : \
+ MT7530_MIRROR_PORT_GET(val))
+
+#define MT753X_MIRROR_PORT_SET(id, val) ((id == ID_MT7531 || \
+ id == ID_MT7988) ? \
+ MT7531_MIRROR_PORT_SET(val) : \
+ MT7530_MIRROR_PORT_SET(val))
+
+/* Register for BPDU and PAE frame control */
#define MT753X_BPC 0x24
-#define MT753X_PAE_EG_TAG_MASK GENMASK(24, 22)
-#define MT753X_PAE_EG_TAG(x) FIELD_PREP(MT753X_PAE_EG_TAG_MASK, x)
-#define MT753X_PAE_PORT_FW_MASK GENMASK(18, 16)
-#define MT753X_PAE_PORT_FW(x) FIELD_PREP(MT753X_PAE_PORT_FW_MASK, x)
-#define MT753X_BPDU_EG_TAG_MASK GENMASK(8, 6)
-#define MT753X_BPDU_EG_TAG(x) FIELD_PREP(MT753X_BPDU_EG_TAG_MASK, x)
-#define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0)
-
-/* Register for :01 and :02 MAC DA frame control */
+#define PAE_BPDU_FR BIT(25)
+#define PAE_EG_TAG_MASK GENMASK(24, 22)
+#define PAE_EG_TAG(x) FIELD_PREP(PAE_EG_TAG_MASK, x)
+#define PAE_PORT_FW_MASK GENMASK(18, 16)
+#define PAE_PORT_FW(x) FIELD_PREP(PAE_PORT_FW_MASK, x)
+#define BPDU_EG_TAG_MASK GENMASK(8, 6)
+#define BPDU_EG_TAG(x) FIELD_PREP(BPDU_EG_TAG_MASK, x)
+#define BPDU_PORT_FW_MASK GENMASK(2, 0)
+
+/* Register for 01-80-C2-00-00-[01,02] MAC DA frame control */
#define MT753X_RGAC1 0x28
-#define MT753X_R02_EG_TAG_MASK GENMASK(24, 22)
-#define MT753X_R02_EG_TAG(x) FIELD_PREP(MT753X_R02_EG_TAG_MASK, x)
-#define MT753X_R02_PORT_FW_MASK GENMASK(18, 16)
-#define MT753X_R02_PORT_FW(x) FIELD_PREP(MT753X_R02_PORT_FW_MASK, x)
-#define MT753X_R01_EG_TAG_MASK GENMASK(8, 6)
-#define MT753X_R01_EG_TAG(x) FIELD_PREP(MT753X_R01_EG_TAG_MASK, x)
-#define MT753X_R01_PORT_FW_MASK GENMASK(2, 0)
-
-/* Register for :03 and :0E MAC DA frame control */
+#define R02_BPDU_FR BIT(25)
+#define R02_EG_TAG_MASK GENMASK(24, 22)
+#define R02_EG_TAG(x) FIELD_PREP(R02_EG_TAG_MASK, x)
+#define R02_PORT_FW_MASK GENMASK(18, 16)
+#define R02_PORT_FW(x) FIELD_PREP(R02_PORT_FW_MASK, x)
+#define R01_BPDU_FR BIT(9)
+#define R01_EG_TAG_MASK GENMASK(8, 6)
+#define R01_EG_TAG(x) FIELD_PREP(R01_EG_TAG_MASK, x)
+#define R01_PORT_FW_MASK GENMASK(2, 0)
+
+/* Register for 01-80-C2-00-00-[03,0E] MAC DA frame control */
#define MT753X_RGAC2 0x2c
-#define MT753X_R0E_EG_TAG_MASK GENMASK(24, 22)
-#define MT753X_R0E_EG_TAG(x) FIELD_PREP(MT753X_R0E_EG_TAG_MASK, x)
-#define MT753X_R0E_PORT_FW_MASK GENMASK(18, 16)
-#define MT753X_R0E_PORT_FW(x) FIELD_PREP(MT753X_R0E_PORT_FW_MASK, x)
-#define MT753X_R03_EG_TAG_MASK GENMASK(8, 6)
-#define MT753X_R03_EG_TAG(x) FIELD_PREP(MT753X_R03_EG_TAG_MASK, x)
-#define MT753X_R03_PORT_FW_MASK GENMASK(2, 0)
-
-enum mt753x_bpdu_port_fw {
- MT753X_BPDU_FOLLOW_MFC,
- MT753X_BPDU_CPU_EXCLUDE = 4,
- MT753X_BPDU_CPU_INCLUDE = 5,
- MT753X_BPDU_CPU_ONLY = 6,
- MT753X_BPDU_DROP = 7,
+#define R0E_BPDU_FR BIT(25)
+#define R0E_EG_TAG_MASK GENMASK(24, 22)
+#define R0E_EG_TAG(x) FIELD_PREP(R0E_EG_TAG_MASK, x)
+#define R0E_PORT_FW_MASK GENMASK(18, 16)
+#define R0E_PORT_FW(x) FIELD_PREP(R0E_PORT_FW_MASK, x)
+#define R03_BPDU_FR BIT(9)
+#define R03_EG_TAG_MASK GENMASK(8, 6)
+#define R03_EG_TAG(x) FIELD_PREP(R03_EG_TAG_MASK, x)
+#define R03_PORT_FW_MASK GENMASK(2, 0)
+
+enum mt753x_to_cpu_fw {
+ TO_CPU_FW_SYSTEM_DEFAULT,
+ TO_CPU_FW_CPU_EXCLUDE = 4,
+ TO_CPU_FW_CPU_INCLUDE = 5,
+ TO_CPU_FW_CPU_ONLY = 6,
+ TO_CPU_FW_DROP = 7,
};
/* Registers for address table access */
@@ -295,48 +323,55 @@ enum mt7530_vlan_port_acc_frm {
#define G0_PORT_VID_DEF G0_PORT_VID(0)
/* Register for port MAC control register */
-#define MT7530_PMCR_P(x) (0x3000 + ((x) * 0x100))
-#define PMCR_IFG_XMIT(x) (((x) & 0x3) << 18)
+#define MT753X_PMCR_P(x) (0x3000 + ((x) * 0x100))
+#define PMCR_IFG_XMIT_MASK GENMASK(19, 18)
+#define PMCR_IFG_XMIT(x) FIELD_PREP(PMCR_IFG_XMIT_MASK, x)
#define PMCR_EXT_PHY BIT(17)
#define PMCR_MAC_MODE BIT(16)
-#define PMCR_FORCE_MODE BIT(15)
-#define PMCR_TX_EN BIT(14)
-#define PMCR_RX_EN BIT(13)
+#define MT7530_FORCE_MODE BIT(15)
+#define PMCR_MAC_TX_EN BIT(14)
+#define PMCR_MAC_RX_EN BIT(13)
#define PMCR_BACKOFF_EN BIT(9)
#define PMCR_BACKPR_EN BIT(8)
#define PMCR_FORCE_EEE1G BIT(7)
#define PMCR_FORCE_EEE100 BIT(6)
-#define PMCR_TX_FC_EN BIT(5)
-#define PMCR_RX_FC_EN BIT(4)
+#define PMCR_FORCE_RX_FC_EN BIT(5)
+#define PMCR_FORCE_TX_FC_EN BIT(4)
#define PMCR_FORCE_SPEED_1000 BIT(3)
#define PMCR_FORCE_SPEED_100 BIT(2)
#define PMCR_FORCE_FDX BIT(1)
#define PMCR_FORCE_LNK BIT(0)
-#define PMCR_SPEED_MASK (PMCR_FORCE_SPEED_100 | \
- PMCR_FORCE_SPEED_1000)
-#define MT7531_FORCE_LNK BIT(31)
-#define MT7531_FORCE_SPD BIT(30)
-#define MT7531_FORCE_DPX BIT(29)
-#define MT7531_FORCE_RX_FC BIT(28)
-#define MT7531_FORCE_TX_FC BIT(27)
-#define MT7531_FORCE_MODE (MT7531_FORCE_LNK | \
- MT7531_FORCE_SPD | \
- MT7531_FORCE_DPX | \
- MT7531_FORCE_RX_FC | \
- MT7531_FORCE_TX_FC)
-#define PMCR_LINK_SETTINGS_MASK (PMCR_TX_EN | PMCR_FORCE_SPEED_1000 | \
- PMCR_RX_EN | PMCR_FORCE_SPEED_100 | \
- PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
- PMCR_FORCE_FDX | PMCR_FORCE_LNK | \
- PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100)
-
-#define MT7530_PMEEECR_P(x) (0x3004 + (x) * 0x100)
-#define WAKEUP_TIME_1000(x) (((x) & 0xFF) << 24)
-#define WAKEUP_TIME_100(x) (((x) & 0xFF) << 16)
+#define MT7531_FORCE_MODE_LNK BIT(31)
+#define MT7531_FORCE_MODE_SPD BIT(30)
+#define MT7531_FORCE_MODE_DPX BIT(29)
+#define MT7531_FORCE_MODE_RX_FC BIT(28)
+#define MT7531_FORCE_MODE_TX_FC BIT(27)
+#define MT7531_FORCE_MODE_EEE100 BIT(26)
+#define MT7531_FORCE_MODE_EEE1G BIT(25)
+#define MT7531_FORCE_MODE_MASK (MT7531_FORCE_MODE_LNK | \
+ MT7531_FORCE_MODE_SPD | \
+ MT7531_FORCE_MODE_DPX | \
+ MT7531_FORCE_MODE_RX_FC | \
+ MT7531_FORCE_MODE_TX_FC | \
+ MT7531_FORCE_MODE_EEE100 | \
+ MT7531_FORCE_MODE_EEE1G)
+#define PMCR_LINK_SETTINGS_MASK (PMCR_MAC_TX_EN | PMCR_MAC_RX_EN | \
+ PMCR_FORCE_EEE1G | \
+ PMCR_FORCE_EEE100 | \
+ PMCR_FORCE_RX_FC_EN | \
+ PMCR_FORCE_TX_FC_EN | \
+ PMCR_FORCE_SPEED_1000 | \
+ PMCR_FORCE_SPEED_100 | \
+ PMCR_FORCE_FDX | PMCR_FORCE_LNK)
+
+#define MT753X_PMEEECR_P(x) (0x3004 + (x) * 0x100)
+#define WAKEUP_TIME_1000_MASK GENMASK(31, 24)
+#define WAKEUP_TIME_1000(x) FIELD_PREP(WAKEUP_TIME_1000_MASK, x)
+#define WAKEUP_TIME_100_MASK GENMASK(23, 16)
+#define WAKEUP_TIME_100(x) FIELD_PREP(WAKEUP_TIME_100_MASK, x)
#define LPI_THRESH_MASK GENMASK(15, 4)
-#define LPI_THRESH_SHT 4
-#define SET_LPI_THRESH(x) (((x) << LPI_THRESH_SHT) & LPI_THRESH_MASK)
-#define GET_LPI_THRESH(x) (((x) & LPI_THRESH_MASK) >> LPI_THRESH_SHT)
+#define LPI_THRESH_GET(x) FIELD_GET(LPI_THRESH_MASK, x)
+#define LPI_THRESH_SET(x) FIELD_PREP(LPI_THRESH_MASK, x)
#define LPI_MODE_EN BIT(0)
#define MT7530_PMSR_P(x) (0x3008 + (x) * 0x100)
@@ -461,32 +496,30 @@ enum mt7531_clk_skew {
MT7531_CLK_SKEW_REVERSE = 3,
};
-/* Register for hw trap status */
-#define MT7530_HWTRAP 0x7800
-#define HWTRAP_XTAL_MASK (BIT(10) | BIT(9))
-#define HWTRAP_XTAL_25MHZ (BIT(10) | BIT(9))
-#define HWTRAP_XTAL_40MHZ (BIT(10))
-#define HWTRAP_XTAL_20MHZ (BIT(9))
-
-#define MT7531_HWTRAP 0x7800
-#define HWTRAP_XTAL_FSEL_MASK BIT(7)
-#define HWTRAP_XTAL_FSEL_25MHZ BIT(7)
-#define HWTRAP_XTAL_FSEL_40MHZ 0
-/* Unique fields of (M)HWSTRAP for MT7531 */
-#define XTAL_FSEL_S 7
-#define XTAL_FSEL_M BIT(7)
-#define PHY_EN BIT(6)
-#define CHG_STRAP BIT(8)
-
-/* Register for hw trap modification */
-#define MT7530_MHWTRAP 0x7804
-#define MHWTRAP_PHY0_SEL BIT(20)
-#define MHWTRAP_MANUAL BIT(16)
-#define MHWTRAP_P5_MAC_SEL BIT(13)
-#define MHWTRAP_P6_DIS BIT(8)
-#define MHWTRAP_P5_RGMII_MODE BIT(7)
-#define MHWTRAP_P5_DIS BIT(6)
-#define MHWTRAP_PHY_ACCESS BIT(5)
+/* Register for trap status */
+#define MT753X_TRAP 0x7800
+#define MT7530_XTAL_MASK (BIT(10) | BIT(9))
+#define MT7530_XTAL_25MHZ (BIT(10) | BIT(9))
+#define MT7530_XTAL_40MHZ BIT(10)
+#define MT7530_XTAL_20MHZ BIT(9)
+#define MT7531_XTAL25 BIT(7)
+
+/* Register for trap modification */
+#define MT753X_MTRAP 0x7804
+#define MT7530_P5_PHY0_SEL BIT(20)
+#define MT7530_CHG_TRAP BIT(16)
+#define MT7530_P5_MAC_SEL BIT(13)
+#define MT7530_P6_DIS BIT(8)
+#define MT7530_P5_RGMII_MODE BIT(7)
+#define MT7530_P5_DIS BIT(6)
+#define MT7530_PHY_INDIRECT_ACCESS BIT(5)
+#define MT7531_CHG_STRAP BIT(8)
+#define MT7531_PHY_EN BIT(6)
+
+enum mt7531_xtal_fsel {
+ MT7531_XTAL_FSEL_25MHZ,
+ MT7531_XTAL_FSEL_40MHZ,
+};
/* Register for TOP signal control */
#define MT7530_TOP_SIG_CTRL 0x7808
@@ -616,10 +649,11 @@ enum mt7531_clk_skew {
#define RG_SYSPLL_DDSFBK_EN BIT(12)
#define RG_SYSPLL_BIAS_EN BIT(11)
#define RG_SYSPLL_BIAS_LPF_EN BIT(10)
+#define MT7531_RG_SYSPLL_DMY2 BIT(6)
#define MT7531_PHY_PLL_OFF BIT(5)
#define MT7531_PHY_PLL_BYPASS_MODE BIT(4)
-#define MT753X_CTRL_PHY_ADDR 0
+#define MT753X_CTRL_PHY_ADDR(addr) ((addr + 1) & 0x1f)
#define CORE_PLL_GROUP5 0x404
#define RG_LCDDS_PCW_NCPO1(x) ((x) & 0xffff)
@@ -692,12 +726,11 @@ struct mt7530_port {
struct phylink_pcs *sgmii_pcs;
};
-/* Port 5 interface select definitions */
-enum p5_interface_select {
- P5_DISABLED,
- P5_INTF_SEL_PHY_P0,
- P5_INTF_SEL_PHY_P4,
- P5_INTF_SEL_GMAC5,
+/* Port 5 mode definitions of the MT7530 switch */
+enum mt7530_p5_mode {
+ GMAC5,
+ MUX_PHY_P0,
+ MUX_PHY_P4,
};
struct mt7530_priv;
@@ -710,15 +743,14 @@ struct mt753x_pcs {
/* struct mt753x_info - This is the main data structure for holding the specific
* part for each supported device
+ * @id: Holding the identifier to a switch model
+ * @pcs_ops: Holding the pointer to the MAC PCS operations structure
* @sw_setup: Holding the handler to a device initialization
* @phy_read_c22: Holding the way reading PHY port using C22
* @phy_write_c22: Holding the way writing PHY port using C22
* @phy_read_c45: Holding the way reading PHY port using C45
* @phy_write_c45: Holding the way writing PHY port using C45
- * @phy_mode_supported: Check if the PHY type is being supported on a certain
- * port
- * @mac_port_validate: Holding the way to set addition validate type for a
- * certan MAC port
+ * @mac_port_get_caps: Holding the handler that provides MAC capabilities
* @mac_port_config: Holding the way setting up the PHY attribute to a
* certain MAC port
*/
@@ -737,9 +769,6 @@ struct mt753x_info {
int regnum, u16 val);
void (*mac_port_get_caps)(struct dsa_switch *ds, int port,
struct phylink_config *config);
- void (*mac_port_validate)(struct dsa_switch *ds, int port,
- phy_interface_t interface,
- unsigned long *supported);
void (*mac_port_config)(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface);
@@ -760,7 +789,7 @@ struct mt753x_info {
* @ports: Holding the state among ports
* @reg_mutex: The lock for protecting among process accessing
* registers
- * @p5_intf_sel: Holding the current port 5 interface select
+ * @p5_mode: Holding the current mode of port 5 of the MT7530 switch
* @p5_sgmii: Flag for distinguishing if port 5 of the MT7531 switch
* has got SGMII
* @irq: IRQ number of the switch
@@ -768,6 +797,7 @@ struct mt753x_info {
* @irq_enable: IRQ enable bits, synced to SYS_INT_EN
* @create_sgmii: Pointer to function creating SGMII PCS instance(s)
* @active_cpu_ports: Holding the active CPU ports
+ * @mdiodev: The pointer to the MDIO device structure
*/
struct mt7530_priv {
struct device *dev;
@@ -781,7 +811,7 @@ struct mt7530_priv {
const struct mt753x_info *info;
unsigned int id;
bool mcm;
- enum p5_interface_select p5_intf_sel;
+ enum mt7530_p5_mode p5_mode;
bool p5_sgmii;
u8 mirror_rx;
u8 mirror_tx;
@@ -794,6 +824,7 @@ struct mt7530_priv {
u32 irq_enable;
int (*create_sgmii)(struct mt7530_priv *priv);
u8 active_cpu_ports;
+ struct mdio_device *mdiodev;
};
struct mt7530_hw_vlan_entry {
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 9ed1821184ec..07c897b13de1 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -131,8 +131,8 @@ struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip)
{
struct mv88e6xxx_mdio_bus *mdio_bus;
- mdio_bus = list_first_entry(&chip->mdios, struct mv88e6xxx_mdio_bus,
- list);
+ mdio_bus = list_first_entry_or_null(&chip->mdios,
+ struct mv88e6xxx_mdio_bus, list);
if (!mdio_bus)
return NULL;
@@ -566,13 +566,61 @@ static void mv88e6xxx_translate_cmode(u8 cmode, unsigned long *supported)
phy_interface_set_rgmii(supported);
}
-static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
- struct phylink_config *config)
+static void
+mv88e6250_setup_supported_interfaces(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
unsigned long *supported = config->supported_interfaces;
+ int err;
+ u16 reg;
- /* Translate the default cmode */
- mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
+ if (err) {
+ dev_err(chip->dev, "p%d: failed to read port status\n", port);
+ return;
+ }
+
+ switch (reg & MV88E6250_PORT_STS_PORTMODE_MASK) {
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_HALF_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_100_HALF_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_FULL_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_100_FULL_PHY:
+ __set_bit(PHY_INTERFACE_MODE_REVMII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_HALF:
+ case MV88E6250_PORT_STS_PORTMODE_MII_FULL:
+ __set_bit(PHY_INTERFACE_MODE_MII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_200_RMII_FULL_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_HALF_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL_PHY:
+ __set_bit(PHY_INTERFACE_MODE_REVRMII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL:
+ __set_bit(PHY_INTERFACE_MODE_RMII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_100_RGMII:
+ __set_bit(PHY_INTERFACE_MODE_RGMII, supported);
+ break;
+
+ default:
+ dev_err(chip->dev,
+ "p%d: invalid port mode in status register: %04x\n",
+ port, reg);
+ }
+}
+
+static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ if (!mv88e6xxx_phy_is_internal(chip, port))
+ mv88e6250_setup_supported_interfaces(chip, port, config);
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
}
@@ -589,12 +637,12 @@ static void mv88e6351_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
MAC_1000FD;
}
-static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip)
+static int mv88e63xx_get_port_serdes_cmode(struct mv88e6xxx_chip *chip, int port)
{
u16 reg, val;
int err;
- err = mv88e6xxx_port_read(chip, 4, MV88E6XXX_PORT_STS, &reg);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
if (err)
return err;
@@ -603,16 +651,16 @@ static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip)
return 0xf;
val = reg & ~MV88E6XXX_PORT_STS_PHY_DETECT;
- err = mv88e6xxx_port_write(chip, 4, MV88E6XXX_PORT_STS, val);
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, val);
if (err)
return err;
- err = mv88e6xxx_port_read(chip, 4, MV88E6XXX_PORT_STS, &val);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &val);
if (err)
return err;
/* Restore PHY_DETECT value */
- err = mv88e6xxx_port_write(chip, 4, MV88E6XXX_PORT_STS, reg);
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, reg);
if (err)
return err;
@@ -640,7 +688,30 @@ static void mv88e6352_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
if (err <= 0)
return;
- cmode = mv88e6352_get_port4_serdes_cmode(chip);
+ cmode = mv88e63xx_get_port_serdes_cmode(chip, port);
+ if (cmode < 0)
+ dev_err(chip->dev, "p%d: failed to read serdes cmode\n",
+ port);
+ else
+ mv88e6xxx_translate_cmode(cmode, supported);
+ }
+}
+
+static void mv88e632x_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ unsigned long *supported = config->supported_interfaces;
+ int cmode;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+
+ /* Port 0/1 are serdes only ports */
+ if (port == 0 || port == 1) {
+ cmode = mv88e63xx_get_port_serdes_cmode(chip, port);
if (cmode < 0)
dev_err(chip->dev, "p%d: failed to read serdes cmode\n",
port);
@@ -790,24 +861,27 @@ static void mv88e6xxx_get_caps(struct dsa_switch *ds, int port,
}
}
-static struct phylink_pcs *mv88e6xxx_mac_select_pcs(struct dsa_switch *ds,
- int port,
- phy_interface_t interface)
+static struct phylink_pcs *
+mv88e6xxx_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
{
- struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mv88e6xxx_chip *chip = dp->ds->priv;
struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP);
if (chip->info->ops->pcs_ops)
- pcs = chip->info->ops->pcs_ops->pcs_select(chip, port,
+ pcs = chip->info->ops->pcs_ops->pcs_select(chip, dp->index,
interface);
return pcs;
}
-static int mv88e6xxx_mac_prepare(struct dsa_switch *ds, int port,
+static int mv88e6xxx_mac_prepare(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
- struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mv88e6xxx_chip *chip = dp->ds->priv;
+ int port = dp->index;
int err = 0;
/* In inband mode, the link may come up at any time while the link
@@ -826,11 +900,13 @@ static int mv88e6xxx_mac_prepare(struct dsa_switch *ds, int port,
return err;
}
-static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
+static void mv88e6xxx_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mv88e6xxx_chip *chip = dp->ds->priv;
+ int port = dp->index;
int err = 0;
mv88e6xxx_reg_lock(chip);
@@ -846,13 +922,15 @@ err_unlock:
mv88e6xxx_reg_unlock(chip);
if (err && err != -EOPNOTSUPP)
- dev_err(ds->dev, "p%d: failed to configure MAC/PCS\n", port);
+ dev_err(chip->dev, "p%d: failed to configure MAC/PCS\n", port);
}
-static int mv88e6xxx_mac_finish(struct dsa_switch *ds, int port,
+static int mv88e6xxx_mac_finish(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
- struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mv88e6xxx_chip *chip = dp->ds->priv;
+ int port = dp->index;
int err = 0;
/* Undo the forced down state above after completing configuration
@@ -876,12 +954,14 @@ static int mv88e6xxx_mac_finish(struct dsa_switch *ds, int port,
return err;
}
-static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
+static void mv88e6xxx_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mv88e6xxx_chip *chip = dp->ds->priv;
const struct mv88e6xxx_ops *ops;
+ int port = dp->index;
int err = 0;
ops = chip->info->ops;
@@ -904,14 +984,16 @@ static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
"p%d: failed to force MAC link down\n", port);
}
-static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
+static void mv88e6xxx_mac_link_up(struct phylink_config *config,
struct phy_device *phydev,
+ unsigned int mode, phy_interface_t interface,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mv88e6xxx_chip *chip = dp->ds->priv;
const struct mv88e6xxx_ops *ops;
+ int port = dp->index;
int err = 0;
ops = chip->info->ops;
@@ -937,7 +1019,7 @@ error:
mv88e6xxx_reg_unlock(chip);
if (err && err != -EOPNOTSUPP)
- dev_err(ds->dev,
+ dev_err(chip->dev,
"p%d: failed to configure MAC link up\n", port);
}
@@ -3075,6 +3157,7 @@ static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip)
static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
{
struct gpio_desc *gpiod = chip->reset;
+ int err;
/* If there is a GPIO connected to the reset pin, toggle it */
if (gpiod) {
@@ -3083,17 +3166,26 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
* mid-byte, causing the first EEPROM read after the reset
* from the wrong location resulting in the switch booting
* to wrong mode and inoperable.
+ * For this reason, switch families with EEPROM support
+ * generally wait for EEPROM loads to complete as their pre-
+ * and post-reset handlers.
*/
- if (chip->info->ops->get_eeprom)
- mv88e6xxx_g2_eeprom_wait(chip);
+ if (chip->info->ops->hardware_reset_pre) {
+ err = chip->info->ops->hardware_reset_pre(chip);
+ if (err)
+ dev_err(chip->dev, "pre-reset error: %d\n", err);
+ }
gpiod_set_value_cansleep(gpiod, 1);
usleep_range(10000, 20000);
gpiod_set_value_cansleep(gpiod, 0);
usleep_range(10000, 20000);
- if (chip->info->ops->get_eeprom)
- mv88e6xxx_g2_eeprom_wait(chip);
+ if (chip->info->ops->hardware_reset_post) {
+ err = chip->info->ops->hardware_reset_post(chip);
+ if (err)
+ dev_err(chip->dev, "post-reset error: %d\n", err);
+ }
}
}
@@ -4323,6 +4415,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4513,6 +4607,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4613,6 +4709,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4707,6 +4805,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4765,6 +4865,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4821,6 +4923,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4880,6 +4984,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4933,6 +5039,8 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.watchdog_ops = &mv88e6250_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6250_g1_wait_eeprom_done_prereset,
+ .hardware_reset_post = mv88e6xxx_g1_wait_eeprom_done,
.reset = mv88e6250_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
@@ -4980,6 +5088,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -5039,13 +5149,15 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_get_caps = mv88e6185_phylink_get_caps,
+ .phylink_get_caps = mv88e632x_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6321_ops = {
@@ -5085,13 +5197,15 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_get_caps = mv88e6185_phylink_get_caps,
+ .phylink_get_caps = mv88e632x_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6341_ops = {
@@ -5135,6 +5249,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -5290,6 +5406,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -5352,6 +5470,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -5414,6 +5534,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -5479,6 +5601,8 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
.watchdog_ops = &mv88e6393x_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6393x_port_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -5503,8 +5627,12 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6250,
.name = "Marvell 88E6020",
.num_databases = 64,
- .num_ports = 4,
+ /* Ports 2-4 are not routed to pins
+ * => usable ports 0, 1, 5, 6
+ */
+ .num_ports = 7,
.num_internal_phys = 2,
+ .invalid_port_mask = BIT(2) | BIT(3) | BIT(4),
.max_vid = 4095,
.port_base_addr = 0x8,
.phy_base_addr = 0x0,
@@ -5653,7 +5781,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6141,
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6141",
- .num_databases = 4096,
+ .num_databases = 256,
.num_macs = 2048,
.num_ports = 6,
.num_internal_phys = 5,
@@ -6112,7 +6240,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6341",
- .num_databases = 4096,
+ .num_databases = 256,
.num_macs = 2048,
.num_internal_phys = 5,
.num_ports = 6,
@@ -6918,6 +7046,15 @@ static int mv88e6xxx_crosschip_lag_leave(struct dsa_switch *ds, int sw_index,
return err_sync ? : err_pvt;
}
+static const struct phylink_mac_ops mv88e6xxx_phylink_mac_ops = {
+ .mac_select_pcs = mv88e6xxx_mac_select_pcs,
+ .mac_prepare = mv88e6xxx_mac_prepare,
+ .mac_config = mv88e6xxx_mac_config,
+ .mac_finish = mv88e6xxx_mac_finish,
+ .mac_link_down = mv88e6xxx_mac_link_down,
+ .mac_link_up = mv88e6xxx_mac_link_up,
+};
+
static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_tag_protocol = mv88e6xxx_get_tag_protocol,
.change_tag_protocol = mv88e6xxx_change_tag_protocol,
@@ -6926,12 +7063,6 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.port_setup = mv88e6xxx_port_setup,
.port_teardown = mv88e6xxx_port_teardown,
.phylink_get_caps = mv88e6xxx_get_caps,
- .phylink_mac_select_pcs = mv88e6xxx_mac_select_pcs,
- .phylink_mac_prepare = mv88e6xxx_mac_prepare,
- .phylink_mac_config = mv88e6xxx_mac_config,
- .phylink_mac_finish = mv88e6xxx_mac_finish,
- .phylink_mac_link_down = mv88e6xxx_mac_link_down,
- .phylink_mac_link_up = mv88e6xxx_mac_link_up,
.get_strings = mv88e6xxx_get_strings,
.get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
.get_eth_mac_stats = mv88e6xxx_get_eth_mac_stats,
@@ -7000,6 +7131,7 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
ds->priv = chip;
ds->dev = dev;
ds->ops = &mv88e6xxx_switch_ops;
+ ds->phylink_mac_ops = &mv88e6xxx_phylink_mac_ops;
ds->ageing_time_min = chip->info->age_time_coeff;
ds->ageing_time_max = chip->info->age_time_coeff * U8_MAX;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 85eb293381a7..c34caf9815c5 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -487,6 +487,12 @@ struct mv88e6xxx_ops {
int (*ppu_enable)(struct mv88e6xxx_chip *chip);
int (*ppu_disable)(struct mv88e6xxx_chip *chip);
+ /* Additional handlers to run before and after hard reset, to make sure
+ * that the switch and EEPROM are in a good state.
+ */
+ int (*hardware_reset_pre)(struct mv88e6xxx_chip *chip);
+ int (*hardware_reset_post)(struct mv88e6xxx_chip *chip);
+
/* Switch Software Reset */
int (*reset)(struct mv88e6xxx_chip *chip);
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 49444a72ff09..9820cd596757 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -75,6 +75,95 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
}
+static int mv88e6250_g1_eeprom_reload(struct mv88e6xxx_chip *chip)
+{
+ /* MV88E6185_G1_CTL1_RELOAD_EEPROM is also valid for 88E6250 */
+ int bit = __bf_shf(MV88E6185_G1_CTL1_RELOAD_EEPROM);
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
+ if (err)
+ return err;
+
+ val |= MV88E6185_G1_CTL1_RELOAD_EEPROM;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_CTL1, bit, 0);
+}
+
+/* Returns 0 when done, -EBUSY when waiting, other negative codes on error */
+static int mv88e6xxx_g1_is_eeprom_done(struct mv88e6xxx_chip *chip)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
+ if (err < 0) {
+ dev_err(chip->dev, "Error reading status");
+ return err;
+ }
+
+ /* If the switch is still resetting, it may not
+ * respond on the bus, and so MDIO read returns
+ * 0xffff. Differentiate between that, and waiting for
+ * the EEPROM to be done by bit 0 being set.
+ */
+ if (val == 0xffff || !(val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE)))
+ return -EBUSY;
+
+ return 0;
+}
+
+/* As the EEInt (EEPROM done) flag clears on read if the status register, this
+ * function must be called directly after a hard reset or EEPROM ReLoad request,
+ * or the done condition may have been missed
+ */
+int mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
+{
+ const unsigned long timeout = jiffies + 1 * HZ;
+ int ret;
+
+ /* Wait up to 1 second for the switch to finish reading the
+ * EEPROM.
+ */
+ while (time_before(jiffies, timeout)) {
+ ret = mv88e6xxx_g1_is_eeprom_done(chip);
+ if (ret != -EBUSY)
+ return ret;
+ }
+
+ dev_err(chip->dev, "Timeout waiting for EEPROM done");
+ return -ETIMEDOUT;
+}
+
+int mv88e6250_g1_wait_eeprom_done_prereset(struct mv88e6xxx_chip *chip)
+{
+ int ret;
+
+ ret = mv88e6xxx_g1_is_eeprom_done(chip);
+ if (ret != -EBUSY)
+ return ret;
+
+ /* Pre-reset, we don't know the state of the switch - when
+ * mv88e6xxx_g1_is_eeprom_done() returns -EBUSY, that may be because
+ * the switch is actually busy reading the EEPROM, or because
+ * MV88E6XXX_G1_STS_IRQ_EEPROM_DONE has been cleared by an unrelated
+ * status register read already.
+ *
+ * To account for the latter case, trigger another EEPROM reload for
+ * another chance at seeing the done flag.
+ */
+ ret = mv88e6250_g1_eeprom_reload(chip);
+ if (ret)
+ return ret;
+
+ return mv88e6xxx_g1_wait_eeprom_done(chip);
+}
+
/* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
* Offset 0x02: Switch MAC Address Register Bytes 2 & 3
* Offset 0x03: Switch MAC Address Register Bytes 4 & 5
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 1095261f5b49..3dbb7a1b8fe1 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -282,6 +282,8 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
+int mv88e6250_g1_wait_eeprom_done_prereset(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 86deeb347cbc..ddadeb9bfdae 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -25,10 +25,25 @@
#define MV88E6250_PORT_STS_PORTMODE_PHY_100_HALF 0x0900
#define MV88E6250_PORT_STS_PORTMODE_PHY_10_FULL 0x0a00
#define MV88E6250_PORT_STS_PORTMODE_PHY_100_FULL 0x0b00
-#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF 0x0c00
-#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF 0x0d00
-#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL 0x0e00
-#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL 0x0f00
+/* - Modes with PHY suffix use output instead of input clock
+ * - Modes without RMII or RGMII use MII
+ * - Modes without speed do not have a fixed speed specified in the manual
+ * ("DC to x MHz" - variable clock support?)
+ */
+#define MV88E6250_PORT_STS_PORTMODE_MII_DISABLED 0x0000
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_RGMII 0x0100
+#define MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL_PHY 0x0200
+#define MV88E6250_PORT_STS_PORTMODE_MII_200_RMII_FULL_PHY 0x0400
+#define MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL 0x0600
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL 0x0700
+#define MV88E6250_PORT_STS_PORTMODE_MII_HALF 0x0800
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_HALF_PHY 0x0900
+#define MV88E6250_PORT_STS_PORTMODE_MII_FULL 0x0a00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL_PHY 0x0b00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF_PHY 0x0c00
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF_PHY 0x0d00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL_PHY 0x0e00
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL_PHY 0x0f00
#define MV88E6XXX_PORT_STS_LINK 0x0800
#define MV88E6XXX_PORT_STS_DUPLEX 0x0400
#define MV88E6XXX_PORT_STS_SPEED_MASK 0x0300
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 3c5509e75a54..85952d841f28 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -1755,6 +1755,9 @@ static int vsc9959_stream_identify(struct flow_cls_offload *f,
BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS)))
return -EOPNOTSUPP;
+ if (flow_rule_match_has_control_flags(rule, f->common.extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index 8d9d271ac3af..968cb81088bf 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -523,28 +523,30 @@ static void ar9331_sw_phylink_get_caps(struct dsa_switch *ds, int port,
}
}
-static void ar9331_sw_phylink_mac_config(struct dsa_switch *ds, int port,
+static void ar9331_sw_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct ar9331_sw_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ar9331_sw_priv *priv = dp->ds->priv;
struct regmap *regmap = priv->regmap;
int ret;
- ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port),
+ ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(dp->index),
AR9331_SW_PORT_STATUS_LINK_EN |
AR9331_SW_PORT_STATUS_FLOW_LINK_EN, 0);
if (ret)
dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
}
-static void ar9331_sw_phylink_mac_link_down(struct dsa_switch *ds, int port,
+static void ar9331_sw_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct ar9331_sw_priv *priv = ds->priv;
- struct ar9331_sw_port *p = &priv->port[port];
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ar9331_sw_priv *priv = dp->ds->priv;
struct regmap *regmap = priv->regmap;
+ int port = dp->index;
int ret;
ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port),
@@ -552,23 +554,24 @@ static void ar9331_sw_phylink_mac_link_down(struct dsa_switch *ds, int port,
if (ret)
dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
- cancel_delayed_work_sync(&p->mib_read);
+ cancel_delayed_work_sync(&priv->port[port].mib_read);
}
-static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void ar9331_sw_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct ar9331_sw_priv *priv = ds->priv;
- struct ar9331_sw_port *p = &priv->port[port];
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ar9331_sw_priv *priv = dp->ds->priv;
struct regmap *regmap = priv->regmap;
+ int port = dp->index;
u32 val;
int ret;
- schedule_delayed_work(&p->mib_read, 0);
+ schedule_delayed_work(&priv->port[port].mib_read, 0);
val = AR9331_SW_PORT_STATUS_MAC_MASK;
switch (speed) {
@@ -684,14 +687,17 @@ static void ar9331_get_pause_stats(struct dsa_switch *ds, int port,
spin_unlock(&p->stats_lock);
}
+static const struct phylink_mac_ops ar9331_phylink_mac_ops = {
+ .mac_config = ar9331_sw_phylink_mac_config,
+ .mac_link_down = ar9331_sw_phylink_mac_link_down,
+ .mac_link_up = ar9331_sw_phylink_mac_link_up,
+};
+
static const struct dsa_switch_ops ar9331_sw_ops = {
.get_tag_protocol = ar9331_sw_get_tag_protocol,
.setup = ar9331_sw_setup,
.port_disable = ar9331_sw_port_disable,
.phylink_get_caps = ar9331_sw_phylink_get_caps,
- .phylink_mac_config = ar9331_sw_phylink_mac_config,
- .phylink_mac_link_down = ar9331_sw_phylink_mac_link_down,
- .phylink_mac_link_up = ar9331_sw_phylink_mac_link_up,
.get_stats64 = ar9331_get_stats64,
.get_pause_stats = ar9331_get_pause_stats,
};
@@ -1059,6 +1065,7 @@ static int ar9331_sw_probe(struct mdio_device *mdiodev)
ds->priv = priv;
priv->ops = ar9331_sw_ops;
ds->ops = &priv->ops;
+ ds->phylink_mac_ops = &ar9331_phylink_mac_ops;
dev_set_drvdata(&mdiodev->dev, priv);
for (i = 0; i < ARRAY_SIZE(priv->port); i++) {
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index dab66c0c6f64..b3c27cf538e8 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -1283,11 +1283,13 @@ qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_inde
}
static struct phylink_pcs *
-qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+qca8k_phylink_mac_select_pcs(struct phylink_config *config,
phy_interface_t interface)
{
- struct qca8k_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct qca8k_priv *priv = dp->ds->priv;
struct phylink_pcs *pcs = NULL;
+ int port = dp->index;
switch (interface) {
case PHY_INTERFACE_MODE_SGMII:
@@ -1311,13 +1313,18 @@ qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
}
static void
-qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+qca8k_phylink_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
- struct qca8k_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct dsa_switch *ds = dp->ds;
+ struct qca8k_priv *priv;
+ int port = dp->index;
int cpu_port_index;
u32 reg;
+ priv = ds->priv;
+
switch (port) {
case 0: /* 1st CPU port */
if (state->interface != PHY_INTERFACE_MODE_RGMII &&
@@ -1426,20 +1433,24 @@ static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
}
static void
-qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
+qca8k_phylink_mac_link_down(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
- struct qca8k_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct qca8k_priv *priv = dp->ds->priv;
- qca8k_port_set_status(priv, port, 0);
+ qca8k_port_set_status(priv, dp->index, 0);
}
static void
-qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface, struct phy_device *phydev,
- int speed, int duplex, bool tx_pause, bool rx_pause)
+qca8k_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
- struct qca8k_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct qca8k_priv *priv = dp->ds->priv;
+ int port = dp->index;
u32 reg;
if (phylink_autoneg_inband(mode)) {
@@ -1463,10 +1474,10 @@ qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
if (duplex == DUPLEX_FULL)
reg |= QCA8K_PORT_STATUS_DUPLEX;
- if (rx_pause || dsa_is_cpu_port(ds, port))
+ if (rx_pause || dsa_port_is_cpu(dp))
reg |= QCA8K_PORT_STATUS_RXFLOW;
- if (tx_pause || dsa_is_cpu_port(ds, port))
+ if (tx_pause || dsa_port_is_cpu(dp))
reg |= QCA8K_PORT_STATUS_TXFLOW;
}
@@ -1991,6 +2002,13 @@ qca8k_setup(struct dsa_switch *ds)
return 0;
}
+static const struct phylink_mac_ops qca8k_phylink_mac_ops = {
+ .mac_select_pcs = qca8k_phylink_mac_select_pcs,
+ .mac_config = qca8k_phylink_mac_config,
+ .mac_link_down = qca8k_phylink_mac_link_down,
+ .mac_link_up = qca8k_phylink_mac_link_up,
+};
+
static const struct dsa_switch_ops qca8k_switch_ops = {
.get_tag_protocol = qca8k_get_tag_protocol,
.setup = qca8k_setup,
@@ -2021,10 +2039,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.port_vlan_add = qca8k_port_vlan_add,
.port_vlan_del = qca8k_port_vlan_del,
.phylink_get_caps = qca8k_phylink_get_caps,
- .phylink_mac_select_pcs = qca8k_phylink_mac_select_pcs,
- .phylink_mac_config = qca8k_phylink_mac_config,
- .phylink_mac_link_down = qca8k_phylink_mac_link_down,
- .phylink_mac_link_up = qca8k_phylink_mac_link_up,
.get_phy_flags = qca8k_get_phy_flags,
.port_lag_join = qca8k_port_lag_join,
.port_lag_leave = qca8k_port_lag_leave,
@@ -2091,6 +2105,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
priv->ds->num_ports = QCA8K_NUM_PORTS;
priv->ds->priv = priv;
priv->ds->ops = &qca8k_switch_ops;
+ priv->ds->phylink_mac_ops = &qca8k_phylink_mac_ops;
mutex_init(&priv->reg_mutex);
dev_set_drvdata(&mdiodev->dev, priv);
diff --git a/drivers/net/dsa/realtek/realtek.h b/drivers/net/dsa/realtek/realtek.h
index e0b1aa01337b..a1b2e0b529d5 100644
--- a/drivers/net/dsa/realtek/realtek.h
+++ b/drivers/net/dsa/realtek/realtek.h
@@ -17,6 +17,7 @@
#define REALTEK_HW_STOP_DELAY 25 /* msecs */
#define REALTEK_HW_START_DELAY 100 /* msecs */
+struct phylink_mac_ops;
struct realtek_ops;
struct dentry;
struct inode;
@@ -117,6 +118,7 @@ struct realtek_ops {
struct realtek_variant {
const struct dsa_switch_ops *ds_ops;
const struct realtek_ops *ops;
+ const struct phylink_mac_ops *phylink_mac_ops;
unsigned int clk_delay;
u8 cmd_read;
u8 cmd_write;
diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
index 12665a8a3412..b9674f68b756 100644
--- a/drivers/net/dsa/realtek/rtl8365mb.c
+++ b/drivers/net/dsa/realtek/rtl8365mb.c
@@ -1048,11 +1048,13 @@ static void rtl8365mb_phylink_get_caps(struct dsa_switch *ds, int port,
phy_interface_set_rgmii(config->supported_interfaces);
}
-static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
+static void rtl8365mb_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct realtek_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct realtek_priv *priv = dp->ds->priv;
+ u8 port = dp->index;
int ret;
if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) {
@@ -1076,13 +1078,15 @@ static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
*/
}
-static void rtl8365mb_phylink_mac_link_down(struct dsa_switch *ds, int port,
+static void rtl8365mb_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct realtek_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct realtek_priv *priv = dp->ds->priv;
struct rtl8365mb_port *p;
struct rtl8365mb *mb;
+ u8 port = dp->index;
int ret;
mb = priv->chip_data;
@@ -1101,16 +1105,18 @@ static void rtl8365mb_phylink_mac_link_down(struct dsa_switch *ds, int port,
}
}
-static void rtl8365mb_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void rtl8365mb_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev, int speed,
- int duplex, bool tx_pause,
+ int speed, int duplex, bool tx_pause,
bool rx_pause)
{
- struct realtek_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct realtek_priv *priv = dp->ds->priv;
struct rtl8365mb_port *p;
struct rtl8365mb *mb;
+ u8 port = dp->index;
int ret;
mb = priv->chip_data;
@@ -2106,15 +2112,18 @@ static int rtl8365mb_detect(struct realtek_priv *priv)
return 0;
}
+static const struct phylink_mac_ops rtl8365mb_phylink_mac_ops = {
+ .mac_config = rtl8365mb_phylink_mac_config,
+ .mac_link_down = rtl8365mb_phylink_mac_link_down,
+ .mac_link_up = rtl8365mb_phylink_mac_link_up,
+};
+
static const struct dsa_switch_ops rtl8365mb_switch_ops = {
.get_tag_protocol = rtl8365mb_get_tag_protocol,
.change_tag_protocol = rtl8365mb_change_tag_protocol,
.setup = rtl8365mb_setup,
.teardown = rtl8365mb_teardown,
.phylink_get_caps = rtl8365mb_phylink_get_caps,
- .phylink_mac_config = rtl8365mb_phylink_mac_config,
- .phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
- .phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
.port_stp_state_set = rtl8365mb_port_stp_state_set,
.get_strings = rtl8365mb_get_strings,
.get_ethtool_stats = rtl8365mb_get_ethtool_stats,
@@ -2136,6 +2145,7 @@ static const struct realtek_ops rtl8365mb_ops = {
const struct realtek_variant rtl8365mb_variant = {
.ds_ops = &rtl8365mb_switch_ops,
.ops = &rtl8365mb_ops,
+ .phylink_mac_ops = &rtl8365mb_phylink_mac_ops,
.clk_delay = 10,
.cmd_read = 0xb9,
.cmd_write = 0xb8,
diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index e10ae94cf771..9e821b42e5f3 100644
--- a/drivers/net/dsa/realtek/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -176,6 +176,7 @@
#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f
/* LED control registers */
+/* The LED blink rate is global; it is used by all triggers in all groups. */
#define RTL8366RB_LED_BLINKRATE_REG 0x0430
#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
#define RTL8366RB_LED_BLINKRATE_28MS 0x0000
@@ -185,27 +186,27 @@
#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
+/* LED trigger event for each group */
#define RTL8366RB_LED_CTRL_REG 0x0431
-#define RTL8366RB_LED_OFF 0x0
-#define RTL8366RB_LED_DUP_COL 0x1
-#define RTL8366RB_LED_LINK_ACT 0x2
-#define RTL8366RB_LED_SPD1000 0x3
-#define RTL8366RB_LED_SPD100 0x4
-#define RTL8366RB_LED_SPD10 0x5
-#define RTL8366RB_LED_SPD1000_ACT 0x6
-#define RTL8366RB_LED_SPD100_ACT 0x7
-#define RTL8366RB_LED_SPD10_ACT 0x8
-#define RTL8366RB_LED_SPD100_10_ACT 0x9
-#define RTL8366RB_LED_FIBER 0xa
-#define RTL8366RB_LED_AN_FAULT 0xb
-#define RTL8366RB_LED_LINK_RX 0xc
-#define RTL8366RB_LED_LINK_TX 0xd
-#define RTL8366RB_LED_MASTER 0xe
-#define RTL8366RB_LED_FORCE 0xf
+#define RTL8366RB_LED_CTRL_OFFSET(led_group) \
+ (4 * (led_group))
+#define RTL8366RB_LED_CTRL_MASK(led_group) \
+ (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group))
+
+/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only
+ * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is
+ * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored.
+ */
#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
-#define RTL8366RB_LED_1_OFFSET 6
#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
-#define RTL8366RB_LED_3_OFFSET 6
+#define RTL8366RB_LED_X_X_CTRL_REG(led_group) \
+ ((led_group) <= 1 ? \
+ RTL8366RB_LED_0_1_CTRL_REG : \
+ RTL8366RB_LED_2_3_CTRL_REG)
+#define RTL8366RB_LED_0_X_CTRL_MASK GENMASK(5, 0)
+#define RTL8366RB_LED_X_1_CTRL_MASK GENMASK(11, 6)
+#define RTL8366RB_LED_2_X_CTRL_MASK GENMASK(5, 0)
+#define RTL8366RB_LED_X_3_CTRL_MASK GENMASK(11, 6)
#define RTL8366RB_MIB_COUNT 33
#define RTL8366RB_GLOBAL_MIB_COUNT 1
@@ -349,14 +350,44 @@
#define RTL8366RB_GREEN_FEATURE_TX BIT(0)
#define RTL8366RB_GREEN_FEATURE_RX BIT(2)
+enum rtl8366_ledgroup_mode {
+ RTL8366RB_LEDGROUP_OFF = 0x0,
+ RTL8366RB_LEDGROUP_DUP_COL = 0x1,
+ RTL8366RB_LEDGROUP_LINK_ACT = 0x2,
+ RTL8366RB_LEDGROUP_SPD1000 = 0x3,
+ RTL8366RB_LEDGROUP_SPD100 = 0x4,
+ RTL8366RB_LEDGROUP_SPD10 = 0x5,
+ RTL8366RB_LEDGROUP_SPD1000_ACT = 0x6,
+ RTL8366RB_LEDGROUP_SPD100_ACT = 0x7,
+ RTL8366RB_LEDGROUP_SPD10_ACT = 0x8,
+ RTL8366RB_LEDGROUP_SPD100_10_ACT = 0x9,
+ RTL8366RB_LEDGROUP_FIBER = 0xa,
+ RTL8366RB_LEDGROUP_AN_FAULT = 0xb,
+ RTL8366RB_LEDGROUP_LINK_RX = 0xc,
+ RTL8366RB_LEDGROUP_LINK_TX = 0xd,
+ RTL8366RB_LEDGROUP_MASTER = 0xe,
+ RTL8366RB_LEDGROUP_FORCE = 0xf,
+
+ __RTL8366RB_LEDGROUP_MODE_MAX
+};
+
+struct rtl8366rb_led {
+ u8 port_num;
+ u8 led_group;
+ struct realtek_priv *priv;
+ struct led_classdev cdev;
+};
+
/**
* struct rtl8366rb - RTL8366RB-specific data
* @max_mtu: per-port max MTU setting
* @pvid_enabled: if PVID is set for respective port
+ * @leds: per-port and per-ledgroup led info
*/
struct rtl8366rb {
unsigned int max_mtu[RTL8366RB_NUM_PORTS];
bool pvid_enabled[RTL8366RB_NUM_PORTS];
+ struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS];
};
static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
@@ -799,6 +830,217 @@ static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
return 0;
}
+static int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
+ u8 led_group,
+ enum rtl8366_ledgroup_mode mode)
+{
+ int ret;
+ u32 val;
+
+ val = mode << RTL8366RB_LED_CTRL_OFFSET(led_group);
+
+ ret = regmap_update_bits(priv->map,
+ RTL8366RB_LED_CTRL_REG,
+ RTL8366RB_LED_CTRL_MASK(led_group),
+ val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port)
+{
+ switch (led_group) {
+ case 0:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 1:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 2:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 3:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ default:
+ return 0;
+ }
+}
+
+static int rb8366rb_get_port_led(struct rtl8366rb_led *led)
+{
+ struct realtek_priv *priv = led->priv;
+ u8 led_group = led->led_group;
+ u8 port_num = led->port_num;
+ int ret;
+ u32 val;
+
+ ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group),
+ &val);
+ if (ret) {
+ dev_err(priv->dev, "error reading LED on port %d group %d\n",
+ led_group, port_num);
+ return ret;
+ }
+
+ return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num));
+}
+
+static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable)
+{
+ struct realtek_priv *priv = led->priv;
+ u8 led_group = led->led_group;
+ u8 port_num = led->port_num;
+ int ret;
+
+ ret = regmap_update_bits(priv->map,
+ RTL8366RB_LED_X_X_CTRL_REG(led_group),
+ rtl8366rb_led_group_port_mask(led_group,
+ port_num),
+ enable ? 0xffff : 0);
+ if (ret) {
+ dev_err(priv->dev, "error updating LED on port %d group %d\n",
+ led_group, port_num);
+ return ret;
+ }
+
+ /* Change the LED group to manual controlled LEDs if required */
+ ret = rb8366rb_set_ledgroup_mode(priv, led_group,
+ RTL8366RB_LEDGROUP_FORCE);
+
+ if (ret) {
+ dev_err(priv->dev, "error updating LED GROUP group %d\n",
+ led_group);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev,
+ enum led_brightness brightness)
+{
+ struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led,
+ cdev);
+
+ return rb8366rb_set_port_led(led, brightness == LED_ON);
+}
+
+static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp,
+ struct fwnode_handle *led_fwnode)
+{
+ struct rtl8366rb *rb = priv->chip_data;
+ struct led_init_data init_data = { };
+ enum led_default_state state;
+ struct rtl8366rb_led *led;
+ u32 led_group;
+ int ret;
+
+ ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group);
+ if (ret)
+ return ret;
+
+ if (led_group >= RTL8366RB_NUM_LEDGROUPS) {
+ dev_warn(priv->dev, "Invalid LED reg %d defined for port %d",
+ led_group, dp->index);
+ return -EINVAL;
+ }
+
+ led = &rb->leds[dp->index][led_group];
+ led->port_num = dp->index;
+ led->led_group = led_group;
+ led->priv = priv;
+
+ state = led_init_default_state_get(led_fwnode);
+ switch (state) {
+ case LEDS_DEFSTATE_ON:
+ led->cdev.brightness = 1;
+ rb8366rb_set_port_led(led, 1);
+ break;
+ case LEDS_DEFSTATE_KEEP:
+ led->cdev.brightness =
+ rb8366rb_get_port_led(led);
+ break;
+ case LEDS_DEFSTATE_OFF:
+ default:
+ led->cdev.brightness = 0;
+ rb8366rb_set_port_led(led, 0);
+ }
+
+ led->cdev.max_brightness = 1;
+ led->cdev.brightness_set_blocking =
+ rtl8366rb_cled_brightness_set_blocking;
+ init_data.fwnode = led_fwnode;
+ init_data.devname_mandatory = true;
+
+ init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d",
+ dp->ds->index, dp->index, led_group);
+ if (!init_data.devicename)
+ return -ENOMEM;
+
+ ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data);
+ if (ret) {
+ dev_warn(priv->dev, "Failed to init LED %d for port %d",
+ led_group, dp->index);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv)
+{
+ int ret = 0;
+ int i;
+
+ regmap_update_bits(priv->map,
+ RTL8366RB_INTERRUPT_CONTROL_REG,
+ RTL8366RB_P4_RGMII_LED,
+ 0);
+
+ for (i = 0; i < RTL8366RB_NUM_LEDGROUPS; i++) {
+ ret = rb8366rb_set_ledgroup_mode(priv, i,
+ RTL8366RB_LEDGROUP_OFF);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int rtl8366rb_setup_leds(struct realtek_priv *priv)
+{
+ struct device_node *leds_np, *led_np;
+ struct dsa_switch *ds = &priv->ds;
+ struct dsa_port *dp;
+ int ret = 0;
+
+ dsa_switch_for_each_port(dp, ds) {
+ if (!dp->dn)
+ continue;
+
+ leds_np = of_get_child_by_name(dp->dn, "leds");
+ if (!leds_np) {
+ dev_dbg(priv->dev, "No leds defined for port %d",
+ dp->index);
+ continue;
+ }
+
+ for_each_child_of_node(leds_np, led_np) {
+ ret = rtl8366rb_setup_led(priv, dp,
+ of_fwnode_handle(led_np));
+ if (ret) {
+ of_node_put(led_np);
+ break;
+ }
+ }
+
+ of_node_put(leds_np);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
static int rtl8366rb_setup(struct dsa_switch *ds)
{
struct realtek_priv *priv = ds->priv;
@@ -807,7 +1049,6 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
u32 chip_ver = 0;
u32 chip_id = 0;
int jam_size;
- u32 val;
int ret;
int i;
@@ -987,7 +1228,9 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
if (ret)
return ret;
- /* Set blinking, TODO: make this configurable */
+ /* Set blinking, used by all LED groups using HW triggers.
+ * TODO: make this configurable
+ */
ret = regmap_update_bits(priv->map, RTL8366RB_LED_BLINKRATE_REG,
RTL8366RB_LED_BLINKRATE_MASK,
RTL8366RB_LED_BLINKRATE_56MS);
@@ -995,32 +1238,17 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
return ret;
/* Set up LED activity:
- * Each port has 4 LEDs, we configure all ports to the same
- * behaviour (no individual config) but we can set up each
- * LED separately.
+ * Each port has 4 LEDs on fixed groups. Each group shares the same
+ * hardware trigger across all ports. LEDs can only be indiviually
+ * controlled setting the LED group to fixed mode and using the driver
+ * to toggle them LEDs on/off.
*/
if (priv->leds_disabled) {
- /* Turn everything off */
- regmap_update_bits(priv->map,
- RTL8366RB_LED_0_1_CTRL_REG,
- 0x0FFF, 0);
- regmap_update_bits(priv->map,
- RTL8366RB_LED_2_3_CTRL_REG,
- 0x0FFF, 0);
- regmap_update_bits(priv->map,
- RTL8366RB_INTERRUPT_CONTROL_REG,
- RTL8366RB_P4_RGMII_LED,
- 0);
- val = RTL8366RB_LED_OFF;
+ ret = rtl8366rb_setup_all_leds_off(priv);
+ if (ret)
+ return ret;
} else {
- /* TODO: make this configurable per LED */
- val = RTL8366RB_LED_FORCE;
- }
- for (i = 0; i < 4; i++) {
- ret = regmap_update_bits(priv->map,
- RTL8366RB_LED_CTRL_REG,
- 0xf << (i * 4),
- val << (i * 4));
+ ret = rtl8366rb_setup_leds(priv);
if (ret)
return ret;
}
@@ -1077,11 +1305,19 @@ static void rtl8366rb_phylink_get_caps(struct dsa_switch *ds, int port,
}
static void
-rtl8366rb_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface, struct phy_device *phydev,
+rtl8366rb_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void
+rtl8366rb_mac_link_up(struct phylink_config *config, struct phy_device *phydev,
+ unsigned int mode, phy_interface_t interface,
int speed, int duplex, bool tx_pause, bool rx_pause)
{
- struct realtek_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct realtek_priv *priv = dp->ds->priv;
+ int port = dp->index;
unsigned int val;
int ret;
@@ -1147,10 +1383,12 @@ rtl8366rb_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
}
static void
-rtl8366rb_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
+rtl8366rb_mac_link_down(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
- struct realtek_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct realtek_priv *priv = dp->ds->priv;
+ int port = dp->index;
int ret;
if (port != priv->cpu_port)
@@ -1167,52 +1405,6 @@ rtl8366rb_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
}
}
-static void rb8366rb_set_port_led(struct realtek_priv *priv,
- int port, bool enable)
-{
- u16 val = enable ? 0x3f : 0;
- int ret;
-
- if (priv->leds_disabled)
- return;
-
- switch (port) {
- case 0:
- ret = regmap_update_bits(priv->map,
- RTL8366RB_LED_0_1_CTRL_REG,
- 0x3F, val);
- break;
- case 1:
- ret = regmap_update_bits(priv->map,
- RTL8366RB_LED_0_1_CTRL_REG,
- 0x3F << RTL8366RB_LED_1_OFFSET,
- val << RTL8366RB_LED_1_OFFSET);
- break;
- case 2:
- ret = regmap_update_bits(priv->map,
- RTL8366RB_LED_2_3_CTRL_REG,
- 0x3F, val);
- break;
- case 3:
- ret = regmap_update_bits(priv->map,
- RTL8366RB_LED_2_3_CTRL_REG,
- 0x3F << RTL8366RB_LED_3_OFFSET,
- val << RTL8366RB_LED_3_OFFSET);
- break;
- case 4:
- ret = regmap_update_bits(priv->map,
- RTL8366RB_INTERRUPT_CONTROL_REG,
- RTL8366RB_P4_RGMII_LED,
- enable ? RTL8366RB_P4_RGMII_LED : 0);
- break;
- default:
- dev_err(priv->dev, "no LED for port %d\n", port);
- return;
- }
- if (ret)
- dev_err(priv->dev, "error updating LED on port %d\n", port);
-}
-
static int
rtl8366rb_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
@@ -1226,7 +1418,6 @@ rtl8366rb_port_enable(struct dsa_switch *ds, int port,
if (ret)
return ret;
- rb8366rb_set_port_led(priv, port, true);
return 0;
}
@@ -1241,8 +1432,6 @@ rtl8366rb_port_disable(struct dsa_switch *ds, int port)
BIT(port));
if (ret)
return;
-
- rb8366rb_set_port_led(priv, port, false);
}
static int
@@ -1849,12 +2038,16 @@ static int rtl8366rb_detect(struct realtek_priv *priv)
return 0;
}
+static const struct phylink_mac_ops rtl8366rb_phylink_mac_ops = {
+ .mac_config = rtl8366rb_mac_config,
+ .mac_link_down = rtl8366rb_mac_link_down,
+ .mac_link_up = rtl8366rb_mac_link_up,
+};
+
static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.get_tag_protocol = rtl8366_get_tag_protocol,
.setup = rtl8366rb_setup,
.phylink_get_caps = rtl8366rb_phylink_get_caps,
- .phylink_mac_link_up = rtl8366rb_mac_link_up,
- .phylink_mac_link_down = rtl8366rb_mac_link_down,
.get_strings = rtl8366_get_strings,
.get_ethtool_stats = rtl8366_get_ethtool_stats,
.get_sset_count = rtl8366_get_sset_count,
@@ -1892,6 +2085,7 @@ static const struct realtek_ops rtl8366rb_ops = {
const struct realtek_variant rtl8366rb_variant = {
.ds_ops = &rtl8366rb_switch_ops,
.ops = &rtl8366rb_ops,
+ .phylink_mac_ops = &rtl8366rb_phylink_mac_ops,
.clk_delay = 10,
.cmd_read = 0xa9,
.cmd_write = 0xa8,
diff --git a/drivers/net/dsa/realtek/rtl83xx.c b/drivers/net/dsa/realtek/rtl83xx.c
index d2e876805393..35709a1756ae 100644
--- a/drivers/net/dsa/realtek/rtl83xx.c
+++ b/drivers/net/dsa/realtek/rtl83xx.c
@@ -236,6 +236,7 @@ int rtl83xx_register_switch(struct realtek_priv *priv)
ds->priv = priv;
ds->dev = priv->dev;
ds->ops = priv->variant->ds_ops;
+ ds->phylink_mac_ops = priv->variant->phylink_mac_ops;
ds->num_ports = priv->num_ports;
ret = dsa_register_switch(ds);
@@ -290,16 +291,13 @@ EXPORT_SYMBOL_NS_GPL(rtl83xx_shutdown, REALTEK_DSA);
* rtl83xx_remove() - Cleanup a realtek switch driver
* @priv: realtek_priv pointer
*
- * If a method is provided, this function asserts the hard reset of the switch
- * in order to avoid leaking traffic when the driver is gone.
+ * Placehold for common cleanup procedures.
*
- * Context: Might sleep if priv->gdev->chip->can_sleep.
+ * Context: Any
* Return: nothing
*/
void rtl83xx_remove(struct realtek_priv *priv)
{
- /* leave the device reset asserted */
- rtl83xx_reset_assert(priv);
}
EXPORT_SYMBOL_NS_GPL(rtl83xx_remove, REALTEK_DSA);
diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c
index 10092ea85e46..92e032972b34 100644
--- a/drivers/net/dsa/rzn1_a5psw.c
+++ b/drivers/net/dsa/rzn1_a5psw.c
@@ -239,23 +239,31 @@ static void a5psw_phylink_get_caps(struct dsa_switch *ds, int port,
}
static struct phylink_pcs *
-a5psw_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+a5psw_phylink_mac_select_pcs(struct phylink_config *config,
phy_interface_t interface)
{
- struct dsa_port *dp = dsa_to_port(ds, port);
- struct a5psw *a5psw = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct a5psw *a5psw = dp->ds->priv;
- if (!dsa_port_is_cpu(dp) && a5psw->pcs[port])
- return a5psw->pcs[port];
+ if (dsa_port_is_cpu(dp))
+ return NULL;
- return NULL;
+ return a5psw->pcs[dp->index];
+}
+
+static void a5psw_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
}
-static void a5psw_phylink_mac_link_down(struct dsa_switch *ds, int port,
+static void a5psw_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct a5psw *a5psw = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct a5psw *a5psw = dp->ds->priv;
+ int port = dp->index;
u32 cmd_cfg;
cmd_cfg = a5psw_reg_readl(a5psw, A5PSW_CMD_CFG(port));
@@ -263,15 +271,17 @@ static void a5psw_phylink_mac_link_down(struct dsa_switch *ds, int port,
a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
}
-static void a5psw_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void a5psw_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev, int speed,
- int duplex, bool tx_pause, bool rx_pause)
+ int speed, int duplex, bool tx_pause,
+ bool rx_pause)
{
u32 cmd_cfg = A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA |
A5PSW_CMD_CFG_TX_CRC_APPEND;
- struct a5psw *a5psw = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct a5psw *a5psw = dp->ds->priv;
if (speed == SPEED_1000)
cmd_cfg |= A5PSW_CMD_CFG_ETH_SPEED;
@@ -284,7 +294,7 @@ static void a5psw_phylink_mac_link_up(struct dsa_switch *ds, int port,
if (!rx_pause)
cmd_cfg &= ~A5PSW_CMD_CFG_PAUSE_IGNORE;
- a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
+ a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(dp->index), cmd_cfg);
}
static int a5psw_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
@@ -992,15 +1002,19 @@ static int a5psw_setup(struct dsa_switch *ds)
return 0;
}
+static const struct phylink_mac_ops a5psw_phylink_mac_ops = {
+ .mac_select_pcs = a5psw_phylink_mac_select_pcs,
+ .mac_config = a5psw_phylink_mac_config,
+ .mac_link_down = a5psw_phylink_mac_link_down,
+ .mac_link_up = a5psw_phylink_mac_link_up,
+};
+
static const struct dsa_switch_ops a5psw_switch_ops = {
.get_tag_protocol = a5psw_get_tag_protocol,
.setup = a5psw_setup,
.port_disable = a5psw_port_disable,
.port_enable = a5psw_port_enable,
.phylink_get_caps = a5psw_phylink_get_caps,
- .phylink_mac_select_pcs = a5psw_phylink_mac_select_pcs,
- .phylink_mac_link_down = a5psw_phylink_mac_link_down,
- .phylink_mac_link_up = a5psw_phylink_mac_link_up,
.port_change_mtu = a5psw_port_change_mtu,
.port_max_mtu = a5psw_port_max_mtu,
.get_sset_count = a5psw_get_sset_count,
@@ -1252,6 +1266,7 @@ static int a5psw_probe(struct platform_device *pdev)
ds->dev = dev;
ds->num_ports = A5PSW_PORTS_NUM;
ds->ops = &a5psw_switch_ops;
+ ds->phylink_mac_ops = &a5psw_phylink_mac_ops;
ds->priv = a5psw;
ret = dsa_register_switch(ds);
diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c
index 9e8ca182c722..05d8ed3121e7 100644
--- a/drivers/net/dsa/sja1105/sja1105_flower.c
+++ b/drivers/net/dsa/sja1105/sja1105_flower.c
@@ -214,6 +214,9 @@ static int sja1105_flower_parse_key(struct sja1105_private *priv,
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 6646f7fb0f90..ee0fb1c343f1 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -1358,10 +1358,11 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
}
static struct phylink_pcs *
-sja1105_mac_select_pcs(struct dsa_switch *ds, int port, phy_interface_t iface)
+sja1105_mac_select_pcs(struct phylink_config *config, phy_interface_t iface)
{
- struct sja1105_private *priv = ds->priv;
- struct dw_xpcs *xpcs = priv->xpcs[port];
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct sja1105_private *priv = dp->ds->priv;
+ struct dw_xpcs *xpcs = priv->xpcs[dp->index];
if (xpcs)
return &xpcs->pcs;
@@ -1369,21 +1370,31 @@ sja1105_mac_select_pcs(struct dsa_switch *ds, int port, phy_interface_t iface)
return NULL;
}
-static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
+static void sja1105_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void sja1105_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- sja1105_inhibit_tx(ds->priv, BIT(port), true);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+
+ sja1105_inhibit_tx(dp->ds->priv, BIT(dp->index), true);
}
-static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
+static void sja1105_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct sja1105_private *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct sja1105_private *priv = dp->ds->priv;
+ int port = dp->index;
sja1105_adjust_port_config(priv, port, speed);
@@ -3198,6 +3209,13 @@ static void sja1105_teardown(struct dsa_switch *ds)
sja1105_static_config_free(&priv->static_config);
}
+static const struct phylink_mac_ops sja1105_phylink_mac_ops = {
+ .mac_select_pcs = sja1105_mac_select_pcs,
+ .mac_config = sja1105_mac_config,
+ .mac_link_up = sja1105_mac_link_up,
+ .mac_link_down = sja1105_mac_link_down,
+};
+
static const struct dsa_switch_ops sja1105_switch_ops = {
.get_tag_protocol = sja1105_get_tag_protocol,
.connect_tag_protocol = sja1105_connect_tag_protocol,
@@ -3207,9 +3225,6 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.port_change_mtu = sja1105_change_mtu,
.port_max_mtu = sja1105_get_max_mtu,
.phylink_get_caps = sja1105_phylink_get_caps,
- .phylink_mac_select_pcs = sja1105_mac_select_pcs,
- .phylink_mac_link_up = sja1105_mac_link_up,
- .phylink_mac_link_down = sja1105_mac_link_down,
.get_strings = sja1105_get_strings,
.get_ethtool_stats = sja1105_get_ethtool_stats,
.get_sset_count = sja1105_get_sset_count,
@@ -3375,6 +3390,7 @@ static int sja1105_probe(struct spi_device *spi)
ds->dev = dev;
ds->num_ports = priv->info->num_ports;
ds->ops = &sja1105_switch_ops;
+ ds->phylink_mac_ops = &sja1105_phylink_mac_ops;
ds->priv = priv;
priv->ds = ds;
@@ -3456,7 +3472,6 @@ MODULE_DEVICE_TABLE(spi, sja1105_spi_ids);
static struct spi_driver sja1105_driver = {
.driver = {
.name = "sja1105",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(sja1105_dt_ids),
},
.id_table = sja1105_spi_ids,
diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c
index 833e55e4b961..52ddb4ef259e 100644
--- a/drivers/net/dsa/sja1105/sja1105_mdio.c
+++ b/drivers/net/dsa/sja1105/sja1105_mdio.c
@@ -94,7 +94,7 @@ int sja1110_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg)
return tmp & 0xffff;
}
-int sja1110_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int reg, int mmd,
+int sja1110_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int mmd, int reg,
u16 val)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index ae70eac3be28..4b031fefcec6 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/bitops.h>
@@ -268,6 +269,9 @@
#define IS_7398(a) ((a)->chipid == VSC73XX_CHIPID_ID_7398)
#define IS_739X(a) (IS_7395(a) || IS_7398(a))
+#define VSC73XX_POLL_SLEEP_US 1000
+#define VSC73XX_POLL_TIMEOUT_US 10000
+
struct vsc73xx_counter {
u8 counter;
const char *name;
@@ -713,51 +717,44 @@ static void vsc73xx_init_port(struct vsc73xx *vsc, int port)
port, VSC73XX_C_RX0, 0);
}
-static void vsc73xx_adjust_enable_port(struct vsc73xx *vsc,
- int port, struct phy_device *phydev,
- u32 initval)
+static void vsc73xx_reset_port(struct vsc73xx *vsc, int port, u32 initval)
{
- u32 val = initval;
- u8 seed;
-
- /* Reset this port FIXME: break out subroutine */
- val |= VSC73XX_MAC_CFG_RESET;
- vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val);
-
- /* Seed the port randomness with randomness */
- get_random_bytes(&seed, 1);
- val |= seed << VSC73XX_MAC_CFG_SEED_OFFSET;
- val |= VSC73XX_MAC_CFG_SEED_LOAD;
- val |= VSC73XX_MAC_CFG_WEXC_DIS;
- vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val);
+ int ret, err;
+ u32 val;
- /* Flow control for the PHY facing ports:
- * Use a zero delay pause frame when pause condition is left
- * Obey pause control frames
- * When generating pause frames, use 0xff as pause value
- */
- vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_FCCONF,
- VSC73XX_FCCONF_ZERO_PAUSE_EN |
- VSC73XX_FCCONF_FLOW_CTRL_OBEY |
- 0xff);
+ /* Disable RX on this port */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_MAC_CFG,
+ VSC73XX_MAC_CFG_RX_EN, 0);
- /* Disallow backward dropping of frames from this port */
+ /* Discard packets */
vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
- VSC73XX_SBACKWDROP, BIT(port), 0);
+ VSC73XX_ARBDISC, BIT(port), BIT(port));
+
+ /* Wait until queue is empty */
+ ret = read_poll_timeout(vsc73xx_read, err,
+ err < 0 || (val & BIT(port)),
+ VSC73XX_POLL_SLEEP_US,
+ VSC73XX_POLL_TIMEOUT_US, false,
+ vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_ARBEMPTY, &val);
+ if (ret)
+ dev_err(vsc->dev,
+ "timeout waiting for block arbiter\n");
+ else if (err < 0)
+ dev_err(vsc->dev, "error reading arbiter\n");
- /* Enable TX, RX, deassert reset, stop loading seed */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
- VSC73XX_MAC_CFG,
- VSC73XX_MAC_CFG_RESET | VSC73XX_MAC_CFG_SEED_LOAD |
- VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN,
- VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN);
+ /* Put this port into reset */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG,
+ VSC73XX_MAC_CFG_RESET | initval);
}
-static void vsc73xx_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
+static void vsc73xx_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
{
- struct vsc73xx *vsc = ds->priv;
- u32 val;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct vsc73xx *vsc = dp->ds->priv;
+ int port = dp->index;
/* Special handling of the CPU-facing port */
if (port == CPU_PORT) {
@@ -774,104 +771,93 @@ static void vsc73xx_adjust_link(struct dsa_switch *ds, int port,
VSC73XX_ADVPORTM_ENA_GTX |
VSC73XX_ADVPORTM_DDR_MODE);
}
+}
+
+static void vsc73xx_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct vsc73xx *vsc = dp->ds->priv;
+ int port = dp->index;
- /* This is the MAC confiuration that always need to happen
- * after a PHY or the CPU port comes up or down.
+ /* This routine is described in the datasheet (below ARBDISC register
+ * description)
*/
- if (!phydev->link) {
- int maxloop = 10;
-
- dev_dbg(vsc->dev, "port %d: went down\n",
- port);
-
- /* Disable RX on this port */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
- VSC73XX_MAC_CFG,
- VSC73XX_MAC_CFG_RX_EN, 0);
-
- /* Discard packets */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
- VSC73XX_ARBDISC, BIT(port), BIT(port));
-
- /* Wait until queue is empty */
- vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0,
- VSC73XX_ARBEMPTY, &val);
- while (!(val & BIT(port))) {
- msleep(1);
- vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0,
- VSC73XX_ARBEMPTY, &val);
- if (--maxloop == 0) {
- dev_err(vsc->dev,
- "timeout waiting for block arbiter\n");
- /* Continue anyway */
- break;
- }
- }
+ vsc73xx_reset_port(vsc, port, 0);
+
+ /* Allow backward dropping of frames from this port */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_SBACKWDROP, BIT(port), BIT(port));
+
+ /* Receive mask (disable forwarding) */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_RECVMASK, BIT(port), 0);
+}
+
+static void vsc73xx_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy, unsigned int mode,
+ phy_interface_t interface, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct vsc73xx *vsc = dp->ds->priv;
+ int port = dp->index;
+ u32 val;
+ u8 seed;
- /* Put this port into reset */
- vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG,
- VSC73XX_MAC_CFG_RESET);
+ if (speed == SPEED_1000)
+ val = VSC73XX_MAC_CFG_GIGA_MODE | VSC73XX_MAC_CFG_TX_IPG_1000M;
+ else
+ val = VSC73XX_MAC_CFG_TX_IPG_100_10M;
- /* Accept packets again */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
- VSC73XX_ARBDISC, BIT(port), 0);
+ if (phy_interface_mode_is_rgmii(interface))
+ val |= VSC73XX_MAC_CFG_CLK_SEL_1000M;
+ else
+ val |= VSC73XX_MAC_CFG_CLK_SEL_EXT;
- /* Allow backward dropping of frames from this port */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
- VSC73XX_SBACKWDROP, BIT(port), BIT(port));
+ if (duplex == DUPLEX_FULL)
+ val |= VSC73XX_MAC_CFG_FDX;
- /* Receive mask (disable forwarding) */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
- VSC73XX_RECVMASK, BIT(port), 0);
+ /* This routine is described in the datasheet (below ARBDISC register
+ * description)
+ */
+ vsc73xx_reset_port(vsc, port, val);
- return;
- }
+ /* Seed the port randomness with randomness */
+ get_random_bytes(&seed, 1);
+ val |= seed << VSC73XX_MAC_CFG_SEED_OFFSET;
+ val |= VSC73XX_MAC_CFG_SEED_LOAD;
+ val |= VSC73XX_MAC_CFG_WEXC_DIS;
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val);
- /* Figure out what speed was negotiated */
- if (phydev->speed == SPEED_1000) {
- dev_dbg(vsc->dev, "port %d: 1000 Mbit mode full duplex\n",
- port);
-
- /* Set up default for internal port or external RGMII */
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
- val = VSC73XX_MAC_CFG_1000M_F_RGMII;
- else
- val = VSC73XX_MAC_CFG_1000M_F_PHY;
- vsc73xx_adjust_enable_port(vsc, port, phydev, val);
- } else if (phydev->speed == SPEED_100) {
- if (phydev->duplex == DUPLEX_FULL) {
- val = VSC73XX_MAC_CFG_100_10M_F_PHY;
- dev_dbg(vsc->dev,
- "port %d: 100 Mbit full duplex mode\n",
- port);
- } else {
- val = VSC73XX_MAC_CFG_100_10M_H_PHY;
- dev_dbg(vsc->dev,
- "port %d: 100 Mbit half duplex mode\n",
- port);
- }
- vsc73xx_adjust_enable_port(vsc, port, phydev, val);
- } else if (phydev->speed == SPEED_10) {
- if (phydev->duplex == DUPLEX_FULL) {
- val = VSC73XX_MAC_CFG_100_10M_F_PHY;
- dev_dbg(vsc->dev,
- "port %d: 10 Mbit full duplex mode\n",
- port);
- } else {
- val = VSC73XX_MAC_CFG_100_10M_H_PHY;
- dev_dbg(vsc->dev,
- "port %d: 10 Mbit half duplex mode\n",
- port);
- }
- vsc73xx_adjust_enable_port(vsc, port, phydev, val);
- } else {
- dev_err(vsc->dev,
- "could not adjust link: unknown speed\n");
- }
+ /* Flow control for the PHY facing ports:
+ * Use a zero delay pause frame when pause condition is left
+ * Obey pause control frames
+ * When generating pause frames, use 0xff as pause value
+ */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_FCCONF,
+ VSC73XX_FCCONF_ZERO_PAUSE_EN |
+ VSC73XX_FCCONF_FLOW_CTRL_OBEY |
+ 0xff);
- /* Enable port (forwarding) in the receieve mask */
+ /* Accept packets again */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_ARBDISC, BIT(port), 0);
+
+ /* Enable port (forwarding) in the receive mask */
vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
VSC73XX_RECVMASK, BIT(port), BIT(port));
+
+ /* Disallow backward dropping of frames from this port */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_SBACKWDROP, BIT(port), 0);
+
+ /* Enable TX, RX, deassert reset, stop loading seed */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_MAC_CFG,
+ VSC73XX_MAC_CFG_RESET | VSC73XX_MAC_CFG_SEED_LOAD |
+ VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN,
+ VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN);
}
static int vsc73xx_port_enable(struct dsa_switch *ds, int port,
@@ -1053,12 +1039,17 @@ static void vsc73xx_phylink_get_caps(struct dsa_switch *dsa, int port,
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000;
}
+static const struct phylink_mac_ops vsc73xx_phylink_mac_ops = {
+ .mac_config = vsc73xx_mac_config,
+ .mac_link_down = vsc73xx_mac_link_down,
+ .mac_link_up = vsc73xx_mac_link_up,
+};
+
static const struct dsa_switch_ops vsc73xx_ds_ops = {
.get_tag_protocol = vsc73xx_get_tag_protocol,
.setup = vsc73xx_setup,
.phy_read = vsc73xx_phy_read,
.phy_write = vsc73xx_phy_write,
- .adjust_link = vsc73xx_adjust_link,
.get_strings = vsc73xx_get_strings,
.get_ethtool_stats = vsc73xx_get_ethtool_stats,
.get_sset_count = vsc73xx_get_sset_count,
@@ -1195,26 +1186,16 @@ int vsc73xx_probe(struct vsc73xx *vsc)
vsc->addr[0], vsc->addr[1], vsc->addr[2],
vsc->addr[3], vsc->addr[4], vsc->addr[5]);
- /* The VSC7395 switch chips have 5+1 ports which means 5
- * ordinary ports and a sixth CPU port facing the processor
- * with an RGMII interface. These ports are numbered 0..4
- * and 6, so they leave a "hole" in the port map for port 5,
- * which is invalid.
- *
- * The VSC7398 has 8 ports, port 7 is again the CPU port.
- *
- * We allocate 8 ports and avoid access to the nonexistant
- * ports.
- */
vsc->ds = devm_kzalloc(dev, sizeof(*vsc->ds), GFP_KERNEL);
if (!vsc->ds)
return -ENOMEM;
vsc->ds->dev = dev;
- vsc->ds->num_ports = 8;
+ vsc->ds->num_ports = VSC73XX_MAX_NUM_PORTS;
vsc->ds->priv = vsc;
vsc->ds->ops = &vsc73xx_ds_ops;
+ vsc->ds->phylink_mac_ops = &vsc73xx_phylink_mac_ops;
ret = dsa_register_switch(vsc->ds);
if (ret) {
dev_err(dev, "unable to register switch (%d)\n", ret);
diff --git a/drivers/net/dsa/vitesse-vsc73xx.h b/drivers/net/dsa/vitesse-vsc73xx.h
index 30b1f0a36566..2997f7e108b1 100644
--- a/drivers/net/dsa/vitesse-vsc73xx.h
+++ b/drivers/net/dsa/vitesse-vsc73xx.h
@@ -3,8 +3,28 @@
#include <linux/etherdevice.h>
#include <linux/gpio/driver.h>
+/* The VSC7395 switch chips have 5+1 ports which means 5 ordinary ports and
+ * a sixth CPU port facing the processor with an RGMII interface. These ports
+ * are numbered 0..4 and 6, so they leave a "hole" in the port map for port 5,
+ * which is invalid.
+ *
+ * The VSC7398 has 8 ports, port 7 is again the CPU port.
+ *
+ * We allocate 8 ports and avoid access to the nonexistent ports.
+ */
+#define VSC73XX_MAX_NUM_PORTS 8
+
/**
- * struct vsc73xx - VSC73xx state container
+ * struct vsc73xx - VSC73xx state container: main data structure
+ * @dev: The device pointer
+ * @reset: The descriptor for the GPIO line tied to the reset pin
+ * @ds: Pointer to the DSA core structure
+ * @gc: Main structure of the GPIO controller
+ * @chipid: Storage for the Chip ID value read from the CHIPID register of the
+ * switch
+ * @addr: MAC address used in flow control frames
+ * @ops: Structure with hardware-dependent operations
+ * @priv: Pointer to the configuration interface structure
*/
struct vsc73xx {
struct device *dev;
@@ -17,6 +37,11 @@ struct vsc73xx {
void *priv;
};
+/**
+ * struct vsc73xx_ops - VSC73xx methods container
+ * @read: Method for register reading over the hardware-dependent interface
+ * @write: Method for register writing over the hardware-dependent interface
+ */
struct vsc73xx_ops {
int (*read)(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
u32 *val);
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
index 96db032b478f..de3b768f2ff9 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.c
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -466,13 +466,25 @@ static void xrs700x_phylink_get_caps(struct dsa_switch *ds, int port,
}
}
-static void xrs700x_mac_link_up(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
+static void xrs700x_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void xrs700x_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+}
+
+static void xrs700x_mac_link_up(struct phylink_config *config,
struct phy_device *phydev,
+ unsigned int mode, phy_interface_t interface,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct xrs700x *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct xrs700x *priv = dp->ds->priv;
+ int port = dp->index;
unsigned int val;
switch (speed) {
@@ -699,13 +711,18 @@ static int xrs700x_hsr_leave(struct dsa_switch *ds, int port,
return 0;
}
+static const struct phylink_mac_ops xrs700x_phylink_mac_ops = {
+ .mac_config = xrs700x_mac_config,
+ .mac_link_down = xrs700x_mac_link_down,
+ .mac_link_up = xrs700x_mac_link_up,
+};
+
static const struct dsa_switch_ops xrs700x_ops = {
.get_tag_protocol = xrs700x_get_tag_protocol,
.setup = xrs700x_setup,
.teardown = xrs700x_teardown,
.port_stp_state_set = xrs700x_port_stp_state_set,
.phylink_get_caps = xrs700x_phylink_get_caps,
- .phylink_mac_link_up = xrs700x_mac_link_up,
.get_strings = xrs700x_get_strings,
.get_sset_count = xrs700x_get_sset_count,
.get_ethtool_stats = xrs700x_get_ethtool_stats,
@@ -763,6 +780,7 @@ struct xrs700x *xrs700x_switch_alloc(struct device *base, void *devpriv)
INIT_DELAYED_WORK(&priv->mib_work, xrs700x_mib_work);
ds->ops = &xrs700x_ops;
+ ds->phylink_mac_ops = &xrs700x_phylink_mac_ops;
ds->priv = priv;
priv->dev = base;
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index ba3e7aa1a28f..4725a8cfd695 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -31,9 +31,6 @@
Setting to > 1512 effectively disables this feature. */
static int rx_copybreak = 200;
-/* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */
-static const int mtu = 1500;
-
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
static int max_interrupt_work = 20;
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 5267e9dcd87e..be58dac0502a 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -502,7 +502,7 @@ static int el3_config(struct net_device *dev, struct ifmap *map)
{
if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
if (map->port <= 3) {
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
tc589_set_xcvr(dev, dev->if_port);
} else {
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index 706bd59bf645..1fbab79e2be4 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -44,7 +44,7 @@ config 3C515
config PCMCIA_3C574
tristate "3Com 3c574 PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
help
Say Y here if you intend to attach a 3Com 3c574 or compatible PCMCIA
(PC-card) Fast Ethernet card to your computer.
@@ -54,7 +54,7 @@ config PCMCIA_3C574
config PCMCIA_3C589
tristate "3Com 3c589 PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
help
Say Y here if you intend to attach a 3Com 3c589 or compatible PCMCIA
(PC-card) Ethernet card to your computer.
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index a4130e643342..345f250781c6 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -19,7 +19,7 @@ if NET_VENDOR_8390
config PCMCIA_AXNET
tristate "Asix AX88190 PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
help
Say Y here if you intend to attach an Asix AX88190-based PCMCIA
(PC-card) Fast Ethernet card to your computer. These cards are
@@ -117,7 +117,7 @@ config NE2000
config NE2K_PCI
tristate "PCI NE2000 and clones support (see help)"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select CRC32
help
This driver is for NE2000 compatible PCI cards. It will not work
@@ -146,7 +146,7 @@ config APNE
config PCMCIA_PCNET
tristate "NE2000 compatible PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
select CRC32
help
Say Y here if you intend to attach an NE2000 compatible PCMCIA
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index 05d39ecb97ff..e876fe52399a 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -258,7 +258,7 @@ static int etherh_set_config(struct net_device *dev, struct ifmap *map)
* media type, turn off automedia detection.
*/
dev->flags &= ~IFF_AUTOMEDIA;
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
break;
default:
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 9bd5e991f1e5..780fb4afb6af 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -994,7 +994,7 @@ static int set_config(struct net_device *dev, struct ifmap *map)
return -EOPNOTSUPP;
else if ((map->port < 1) || (map->port > 2))
return -EINVAL;
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
NS8390_init(dev, 1);
}
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index 8b4ef5121308..0713f1e2c7f3 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -11,10 +11,10 @@
#include <linux/crc8.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/gpio/consumer.h>
#include <linux/if_bridge.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
-#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/mii.h>
#include <linux/module.h>
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 3d9220f9c9fe..b325e0cef120 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3852,7 +3852,7 @@ static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
et131x_disable_txrx(netdev);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
et131x_adapter_memory_free(adapter);
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index eafef84fe3be..3d8ac63132fb 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -2539,7 +2539,7 @@ static int ace_change_mtu(struct net_device *dev, int new_mtu)
struct ace_regs __iomem *regs = ap->regs;
writel(new_mtu + ETH_HLEN + 4, &regs->IfMtu);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (new_mtu > ACE_STD_MTU) {
if (!(ap->jumbo)) {
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 1c8763be0e4b..3c112c18ae6a 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -788,7 +788,7 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
return -EBUSY;
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
netdev_update_features(dev);
return 0;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 9e9e4a03f1a8..2d8a66ea82fa 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -351,7 +351,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
io_sq->bounce_buf_ctrl.next_to_use = 0;
- size = io_sq->bounce_buf_ctrl.buffer_size *
+ size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
io_sq->bounce_buf_ctrl.buffers_num;
dev_node = dev_to_node(ena_dev->dmadev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index fea57eb8e58b..924f03f5a6c7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -47,7 +47,7 @@
/* ENA adaptive interrupt moderation settings */
#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 64
-#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0
+#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 20
#define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1
#define ENA_HASH_KEY_SIZE 40
@@ -305,6 +305,8 @@ struct ena_com_dev {
u16 stats_func; /* Selected function for extended statistic dump */
u16 stats_queue; /* Selected queue for extended statistic dump */
+ u32 ena_min_poll_delay_us;
+
struct ena_com_mmio_read mmio_read;
struct ena_rss rss;
@@ -325,8 +327,6 @@ struct ena_com_dev {
struct ena_intr_moder_entry *intr_moder_tbl;
struct ena_com_llq_info llq_info;
-
- u32 ena_min_poll_delay_us;
};
struct ena_com_dev_get_features_ctx {
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index 933e619b3a31..4c6e07aa4bbb 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -229,30 +229,43 @@ static struct ena_eth_io_rx_cdesc_base *
idx * io_cq->cdesc_entry_size_in_bytes);
}
-static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
- u16 *first_cdesc_idx)
+static int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
+ u16 *first_cdesc_idx,
+ u16 *num_descs)
{
+ u16 count = io_cq->cur_rx_pkt_cdesc_count, head_masked;
struct ena_eth_io_rx_cdesc_base *cdesc;
- u16 count = 0, head_masked;
u32 last = 0;
do {
+ u32 status;
+
cdesc = ena_com_get_next_rx_cdesc(io_cq);
if (!cdesc)
break;
+ status = READ_ONCE(cdesc->status);
ena_com_cq_inc_head(io_cq);
+ if (unlikely((status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT && count != 0)) {
+ struct ena_com_dev *dev = ena_com_io_cq_to_ena_dev(io_cq);
+
+ netdev_err(dev->net_device,
+ "First bit is on in descriptor #%d on q_id: %d, req_id: %u\n",
+ count, io_cq->qid, cdesc->req_id);
+ return -EFAULT;
+ }
count++;
- last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
- ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+ last = (status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
} while (!last);
if (last) {
*first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
- count += io_cq->cur_rx_pkt_cdesc_count;
head_masked = io_cq->head & (io_cq->q_depth - 1);
+ *num_descs = count;
io_cq->cur_rx_pkt_cdesc_count = 0;
io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
@@ -260,11 +273,11 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
"ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
io_cq->qid, *first_cdesc_idx, count);
} else {
- io_cq->cur_rx_pkt_cdesc_count += count;
- count = 0;
+ io_cq->cur_rx_pkt_cdesc_count = count;
+ *num_descs = 0;
}
- return count;
+ return 0;
}
static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
@@ -539,10 +552,14 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
u16 cdesc_idx = 0;
u16 nb_hw_desc;
u16 i = 0;
+ int rc;
WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
- nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
+ rc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx, &nb_hw_desc);
+ if (unlikely(rc != 0))
+ return -EFAULT;
+
if (nb_hw_desc == 0) {
ena_rx_ctx->descs = nb_hw_desc;
return 0;
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 72b019758caa..449bc4960ccc 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -47,7 +47,7 @@ struct ena_com_rx_ctx {
bool frag;
u32 hash;
u16 descs;
- int max_bufs;
+ u16 max_bufs;
u8 pkt_offset;
};
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 0cb6cc1cef56..b24cc3f05248 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -49,6 +49,7 @@ static const struct ena_stats ena_stats_global_strings[] = {
ENA_STAT_GLOBAL_ENTRY(interface_up),
ENA_STAT_GLOBAL_ENTRY(interface_down),
ENA_STAT_GLOBAL_ENTRY(admin_q_pause),
+ ENA_STAT_GLOBAL_ENTRY(reset_fail),
};
static const struct ena_stats ena_stats_eni_strings[] = {
@@ -459,10 +460,18 @@ static void ena_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct ena_adapter *adapter = netdev_priv(dev);
-
- strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strscpy(info->bus_info, pci_name(adapter->pdev),
- sizeof(info->bus_info));
+ ssize_t ret = 0;
+
+ ret = strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ if (ret < 0)
+ netif_dbg(adapter, drv, dev,
+ "module name will be truncated, status = %zd\n", ret);
+
+ ret = strscpy(info->bus_info, pci_name(adapter->pdev),
+ sizeof(info->bus_info));
+ if (ret < 0)
+ netif_dbg(adapter, drv, dev,
+ "bus info will be truncated, status = %zd\n", ret);
}
static void ena_get_ringparam(struct net_device *netdev,
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 09e7da1a69c9..184b6e6cbed4 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -42,7 +42,7 @@ MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
static int ena_rss_init_default(struct ena_adapter *adapter);
static void check_for_admin_com_state(struct ena_adapter *adapter);
-static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
+static int ena_destroy_device(struct ena_adapter *adapter, bool graceful);
static int ena_restore_device(struct ena_adapter *adapter);
static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
@@ -104,7 +104,7 @@ static int ena_change_mtu(struct net_device *dev, int new_mtu)
if (!ret) {
netif_dbg(adapter, drv, dev, "Set MTU to %d\n", new_mtu);
update_rx_ring_mtu(adapter, new_mtu);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
} else {
netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
new_mtu);
@@ -718,8 +718,11 @@ void ena_unmap_tx_buff(struct ena_ring *tx_ring,
static void ena_free_tx_bufs(struct ena_ring *tx_ring)
{
bool print_once = true;
+ bool is_xdp_ring;
u32 i;
+ is_xdp_ring = ENA_IS_XDP_INDEX(tx_ring->adapter, tx_ring->qid);
+
for (i = 0; i < tx_ring->ring_size; i++) {
struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
@@ -739,10 +742,15 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
ena_unmap_tx_buff(tx_ring, tx_info);
- dev_kfree_skb_any(tx_info->skb);
+ if (is_xdp_ring)
+ xdp_return_frame(tx_info->xdpf);
+ else
+ dev_kfree_skb_any(tx_info->skb);
}
- netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->qid));
+
+ if (!is_xdp_ring)
+ netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->qid));
}
static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
@@ -1339,6 +1347,8 @@ error:
if (rc == -ENOSPC) {
ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, &rx_ring->syncp);
ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS);
+ } else if (rc == -EFAULT) {
+ ena_reset_device(adapter, ENA_REGS_RESET_RX_DESCRIPTOR_MALFORMED);
} else {
ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
&rx_ring->syncp);
@@ -2693,6 +2703,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
{
struct device *dev = &pdev->dev;
struct ena_admin_host_info *host_info;
+ ssize_t ret;
int rc;
/* Allocate only the host info */
@@ -2707,11 +2718,19 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
host_info->bdf = pci_dev_id(pdev);
host_info->os_type = ENA_ADMIN_OS_LINUX;
host_info->kernel_ver = LINUX_VERSION_CODE;
- strscpy(host_info->kernel_ver_str, utsname()->version,
- sizeof(host_info->kernel_ver_str) - 1);
+ ret = strscpy(host_info->kernel_ver_str, utsname()->version,
+ sizeof(host_info->kernel_ver_str));
+ if (ret < 0)
+ dev_dbg(dev,
+ "kernel version string will be truncated, status = %zd\n", ret);
+
host_info->os_dist = 0;
- strscpy(host_info->os_dist_str, utsname()->release,
- sizeof(host_info->os_dist_str));
+ ret = strscpy(host_info->os_dist_str, utsname()->release,
+ sizeof(host_info->os_dist_str));
+ if (ret < 0)
+ dev_dbg(dev,
+ "OS distribution string will be truncated, status = %zd\n", ret);
+
host_info->driver_version =
(DRV_MODULE_GEN_MAJOR) |
(DRV_MODULE_GEN_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
@@ -3227,14 +3246,15 @@ err_disable_msix:
return rc;
}
-static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
+static int ena_destroy_device(struct ena_adapter *adapter, bool graceful)
{
struct net_device *netdev = adapter->netdev;
struct ena_com_dev *ena_dev = adapter->ena_dev;
bool dev_up;
+ int rc = 0;
if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
- return;
+ return 0;
netif_carrier_off(netdev);
@@ -3252,7 +3272,7 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
* and device is up, ena_down() already reset the device.
*/
if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
- ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
+ rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
ena_free_mgmnt_irq(adapter);
@@ -3271,6 +3291,8 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
+
+ return rc;
}
static int ena_restore_device(struct ena_adapter *adapter)
@@ -3347,14 +3369,17 @@ err:
static void ena_fw_reset_device(struct work_struct *work)
{
+ int rc = 0;
+
struct ena_adapter *adapter =
container_of(work, struct ena_adapter, reset_task);
rtnl_lock();
if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
- ena_destroy_device(adapter, false);
- ena_restore_device(adapter);
+ rc |= ena_destroy_device(adapter, false);
+ rc |= ena_restore_device(adapter);
+ adapter->dev_stats.reset_fail += !!rc;
dev_err(&adapter->pdev->dev, "Device reset completed successfully\n");
}
@@ -3481,10 +3506,11 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
{
struct ena_ring *tx_ring;
struct ena_ring *rx_ring;
- int i, budget, rc;
+ int qid, budget, rc;
int io_queue_count;
io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
+
/* Make sure the driver doesn't turn the device in other process */
smp_rmb();
@@ -3497,27 +3523,29 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
return;
- budget = ENA_MONITORED_TX_QUEUES;
+ budget = min_t(u32, io_queue_count, ENA_MONITORED_TX_QUEUES);
- for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
- tx_ring = &adapter->tx_ring[i];
- rx_ring = &adapter->rx_ring[i];
+ qid = adapter->last_monitored_tx_qid;
+
+ while (budget) {
+ qid = (qid + 1) % io_queue_count;
+
+ tx_ring = &adapter->tx_ring[qid];
+ rx_ring = &adapter->rx_ring[qid];
rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
if (unlikely(rc))
return;
- rc = !ENA_IS_XDP_INDEX(adapter, i) ?
+ rc = !ENA_IS_XDP_INDEX(adapter, qid) ?
check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
if (unlikely(rc))
return;
budget--;
- if (!budget)
- break;
}
- adapter->last_monitored_tx_qid = i % io_queue_count;
+ adapter->last_monitored_tx_qid = qid;
}
/* trigger napi schedule after 2 consecutive detections */
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 6d2cc20210cc..d59509747d1a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -290,6 +290,7 @@ struct ena_stats_dev {
u64 admin_q_pause;
u64 rx_drops;
u64 tx_drops;
+ u64 reset_fail;
};
enum ena_flags_t {
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index 2c3d6a77ea79..a2efebafd686 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -22,6 +22,7 @@ enum ena_regs_reset_reason_types {
ENA_REGS_RESET_GENERIC = 13,
ENA_REGS_RESET_MISS_INTERRUPT = 14,
ENA_REGS_RESET_SUSPECTED_POLL_STARVATION = 15,
+ ENA_REGS_RESET_RX_DESCRIPTOR_MALFORMED = 16,
};
/* ena_registers offsets */
diff --git a/drivers/net/ethernet/amazon/ena/ena_xdp.c b/drivers/net/ethernet/amazon/ena/ena_xdp.c
index 337c435d3ce9..5b175e7e92a1 100644
--- a/drivers/net/ethernet/amazon/ena/ena_xdp.c
+++ b/drivers/net/ethernet/amazon/ena/ena_xdp.c
@@ -89,7 +89,7 @@ int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
rc = ena_xdp_tx_map_frame(tx_ring, tx_info, xdpf, &ena_tx_ctx);
if (unlikely(rc))
- return rc;
+ goto err;
ena_tx_ctx.req_id = req_id;
@@ -112,7 +112,9 @@ int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
error_unmap_dma:
ena_unmap_tx_buff(tx_ring, tx_info);
+err:
tx_info->xdpf = NULL;
+
return rc;
}
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index f8cc8925161c..b39c6f3e1eda 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -56,7 +56,7 @@ config LANCE
config PCNET32
tristate "AMD PCnet32 PCI support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select CRC32
select MII
help
@@ -122,7 +122,7 @@ config MVME147_NET
config PCMCIA_NMCLAN
tristate "New Media PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
help
Say Y here if you intend to attach a New Media Ethernet or LiveWire
PCMCIA (PC-card) Ethernet card to your computer.
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index ea6cfc2095e1..f64f96fa17cf 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1520,9 +1520,9 @@ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
if (!netif_running(dev)) {
/* new_mtu will be used
- * when device starts netxt time
+ * when device starts next time
*/
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -1531,7 +1531,7 @@ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
/* stop the chip */
writel(RUN, lp->mmio + CMD0);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
err = amd8111e_restart(dev);
spin_unlock_irq(&lp->lock);
@@ -1796,7 +1796,6 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
lp = netdev_priv(dev);
lp->pci_dev = pdev;
lp->amd8111e_net_dev = dev;
- lp->pm_cap = pdev->pm_cap;
spin_lock_init(&lp->lock);
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index 9d570adb295b..305232f5476d 100644
--- a/drivers/net/ethernet/amd/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -764,7 +764,6 @@ struct amd8111e_priv{
u32 ext_phy_id;
struct amd8111e_link_config link_config;
- int pm_cap;
struct net_device *next;
int mii;
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 0dd391c84c13..37054a670407 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -760,7 +760,7 @@ static int mace_config(struct net_device *dev, struct ifmap *map)
{
if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
if (map->port <= 2) {
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
} else
return -EINVAL;
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index 9662ee72814c..536635e57727 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -593,6 +593,16 @@ err_out:
pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
}
+void pdsc_pci_reset_thread(struct work_struct *work)
+{
+ struct pdsc *pdsc = container_of(work, struct pdsc, pci_reset_work);
+ struct pci_dev *pdev = pdsc->pdev;
+
+ pci_dev_get(pdev);
+ pci_reset_function(pdev);
+ pci_dev_put(pdev);
+}
+
static void pdsc_check_pci_health(struct pdsc *pdsc)
{
u8 fw_status;
@@ -607,7 +617,8 @@ static void pdsc_check_pci_health(struct pdsc *pdsc)
if (fw_status != PDS_RC_BAD_PCI)
return;
- pci_reset_function(pdsc->pdev);
+ /* prevent deadlock between pdsc_reset_prepare and pdsc_health_thread */
+ queue_work(pdsc->wq, &pdsc->pci_reset_work);
}
void pdsc_health_thread(struct work_struct *work)
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index 92d7657dd614..14522d6d5f86 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -197,6 +197,7 @@ struct pdsc {
struct pdsc_qcq notifyqcq;
u64 last_eid;
struct pdsc_viftype *viftype_status;
+ struct work_struct pci_reset_work;
};
/** enum pds_core_dbell_bits - bitwise composition of dbell values.
@@ -255,7 +256,8 @@ int pdsc_dl_flash_update(struct devlink *dl,
int pdsc_dl_enable_get(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx);
int pdsc_dl_enable_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx);
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack);
int pdsc_dl_enable_validate(struct devlink *dl, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack);
@@ -313,5 +315,6 @@ int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw,
void pdsc_fw_down(struct pdsc *pdsc);
void pdsc_fw_up(struct pdsc *pdsc);
+void pdsc_pci_reset_thread(struct work_struct *work);
#endif /* _PDSC_H_ */
diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
index e494e1298dc9..495ef4ef8c10 100644
--- a/drivers/net/ethernet/amd/pds_core/dev.c
+++ b/drivers/net/ethernet/amd/pds_core/dev.c
@@ -229,6 +229,9 @@ int pdsc_devcmd_reset(struct pdsc *pdsc)
.reset.opcode = PDS_CORE_CMD_RESET,
};
+ if (!pdsc_is_fw_running(pdsc))
+ return 0;
+
return pdsc_devcmd(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
}
diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
index 54864f27c87a..2681889162a2 100644
--- a/drivers/net/ethernet/amd/pds_core/devlink.c
+++ b/drivers/net/ethernet/amd/pds_core/devlink.c
@@ -37,7 +37,8 @@ int pdsc_dl_enable_get(struct devlink *dl, u32 id,
}
int pdsc_dl_enable_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct pdsc *pdsc = devlink_priv(dl);
struct pdsc_viftype *vt_entry;
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
index ab6133e7db42..660268ff9562 100644
--- a/drivers/net/ethernet/amd/pds_core/main.c
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -239,6 +239,7 @@ static int pdsc_init_pf(struct pdsc *pdsc)
snprintf(wq_name, sizeof(wq_name), "%s.%d", PDS_CORE_DRV_NAME, pdsc->uid);
pdsc->wq = create_singlethread_workqueue(wq_name);
INIT_WORK(&pdsc->health_work, pdsc_health_thread);
+ INIT_WORK(&pdsc->pci_reset_work, pdsc_pci_reset_thread);
timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0);
pdsc->wdtimer_period = PDSC_WATCHDOG_SECS * HZ;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 6b73648b3779..c4a4e316683f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -2070,7 +2070,7 @@ static int xgbe_change_mtu(struct net_device *netdev, int mtu)
return ret;
pdata->rx_buf_size = ret;
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
xgbe_restart_dev(pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index f409d7bd1f1e..c5e5fac49779 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -170,7 +170,7 @@ static int xgbe_config_irqs(struct xgbe_prv_data *pdata)
goto out;
ret = pci_alloc_irq_vectors(pdata->pcidev, 1, 1,
- PCI_IRQ_LEGACY | PCI_IRQ_MSI);
+ PCI_IRQ_INTX | PCI_IRQ_MSI);
if (ret < 0) {
dev_info(pdata->dev, "single IRQ enablement failed\n");
return ret;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
index 9131020d06af..7912b3b45148 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
@@ -538,7 +538,6 @@ static const struct xgbe_version_data xgbe_v1 = {
.tx_tstamp_workaround = 1,
};
-#ifdef CONFIG_ACPI
static const struct acpi_device_id xgbe_acpi_match[] = {
{ .id = "AMDI8001",
.driver_data = (kernel_ulong_t)&xgbe_v1 },
@@ -546,9 +545,7 @@ static const struct acpi_device_id xgbe_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
-#endif
-#ifdef CONFIG_OF
static const struct of_device_id xgbe_of_match[] = {
{ .compatible = "amd,xgbe-seattle-v1a",
.data = &xgbe_v1 },
@@ -556,7 +553,6 @@ static const struct of_device_id xgbe_of_match[] = {
};
MODULE_DEVICE_TABLE(of, xgbe_of_match);
-#endif
static SIMPLE_DEV_PM_OPS(xgbe_platform_pm_ops,
xgbe_platform_suspend, xgbe_platform_resume);
@@ -564,12 +560,8 @@ static SIMPLE_DEV_PM_OPS(xgbe_platform_pm_ops,
static struct platform_driver xgbe_driver = {
.driver = {
.name = XGBE_DRV_NAME,
-#ifdef CONFIG_ACPI
.acpi_match_table = xgbe_acpi_match,
-#endif
-#ifdef CONFIG_OF
.of_match_table = xgbe_of_match,
-#endif
.pm = &xgbe_platform_pm_ops,
},
.probe = xgbe_platform_probe,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 44900026d11b..4af9d89d5f88 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1530,7 +1530,7 @@ static int xgene_change_mtu(struct net_device *ndev, int new_mtu)
frame_size = (new_mtu > ETH_DATA_LEN) ? (new_mtu + 18) : 0x600;
xgene_enet_close(ndev);
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
pdata->mac_ops->set_framesize(pdata, frame_size);
xgene_enet_open(ndev);
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
index 8ebcc68e807f..f6a96931c89a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/Makefile
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -8,7 +8,7 @@
obj-$(CONFIG_AQTION) += atlantic.o
-ccflags-y += -I$(srctree)/$(src)
+ccflags-y += -I$(src)
atlantic-objs := aq_main.o \
aq_nic.o \
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 7e9c74b141ef..fc2b325f34e7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -17,7 +17,7 @@
#define AQ_CFG_IS_POLLING_DEF 0U
-#define AQ_CFG_FORCE_LEGACY_INT 0U
+#define AQ_CFG_FORCE_INTX 0U
#define AQ_CFG_INTERRUPT_MODERATION_OFF 0
#define AQ_CFG_INTERRUPT_MODERATION_ON 1
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index dbd284660135..f010bda61c96 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -104,7 +104,7 @@ struct aq_stats_s {
};
#define AQ_HW_IRQ_INVALID 0U
-#define AQ_HW_IRQ_LEGACY 1U
+#define AQ_HW_IRQ_INTX 1U
#define AQ_HW_IRQ_MSI 2U
#define AQ_HW_IRQ_MSIX 3U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 0b2a52199914..c1d1673c5749 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -146,7 +146,7 @@ static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
if (err < 0)
goto err_exit;
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
err_exit:
return err;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index d6d6d5d37ff3..fe0e3e2a8117 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -127,7 +127,7 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->irq_type = aq_pci_func_get_irq_type(self);
- if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
+ if ((cfg->irq_type == AQ_HW_IRQ_INTX) ||
(cfg->aq_hw_caps->vecs == 1U) ||
(cfg->vecs == 1U)) {
cfg->is_rss = 0U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index baa5f8cc31f2..43c71f6b314f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -200,7 +200,7 @@ unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self)
if (self->pdev->msi_enabled)
return AQ_HW_IRQ_MSI;
- return AQ_HW_IRQ_LEGACY;
+ return AQ_HW_IRQ_INTX;
}
static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
@@ -298,11 +298,8 @@ static int aq_pci_probe(struct pci_dev *pdev,
numvecs += AQ_HW_SERVICE_IRQS;
/*enable interrupts */
-#if !AQ_CFG_FORCE_LEGACY_INT
- err = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
- PCI_IRQ_MSIX | PCI_IRQ_MSI |
- PCI_IRQ_LEGACY);
-
+#if !AQ_CFG_FORCE_INTX
+ err = pci_alloc_irq_vectors(self->pdev, 1, numvecs, PCI_IRQ_ALL_TYPES);
if (err < 0)
goto err_hwinit;
numvecs = err;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 9dfd68f0fda9..8de2cdd09213 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -352,7 +352,7 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
- [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U },
+ [AQ_HW_IRQ_INTX] = { 0x20000080U, 0x20000080U },
[AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
[AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 54e70f07b573..56c46266bb0a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -562,7 +562,7 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
- [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U },
+ [AQ_HW_IRQ_INTX] = { 0x20000080U, 0x20000080U },
[AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
[AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
index 220400a633f5..b0ed572e88c6 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
@@ -534,7 +534,7 @@ static int hw_atl2_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
{
static u32 aq_hw_atl2_igcr_table_[4][2] = {
[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
- [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U },
+ [AQ_HW_IRQ_INTX] = { 0x20000080U, 0x20000080U },
[AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
[AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 0f2f400b5bc4..a38be924cdaa 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1788,7 +1788,7 @@ static int ag71xx_change_mtu(struct net_device *ndev, int new_mtu)
{
struct ag71xx *ag = netdev_priv(ndev);
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
ag71xx_max_frame_len(ndev->mtu));
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 49bb9a8f00e6..ad6d6abd885f 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -901,7 +901,7 @@ static int alx_init_intr(struct alx_priv *alx)
int ret;
ret = pci_alloc_irq_vectors(alx->hw.pdev, 1, 1,
- PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ PCI_IRQ_MSI | PCI_IRQ_INTX);
if (ret < 0)
return ret;
@@ -1176,7 +1176,7 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
struct alx_priv *alx = netdev_priv(netdev);
int max_frame = ALX_MAX_FRAME_LEN(mtu);
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
alx->hw.mtu = mtu;
alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
netdev_update_features(netdev);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 46cdc32b4e31..c571614b1d50 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -561,7 +561,7 @@ static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev)) {
while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
msleep(1);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
adapter->hw.max_frame_size = new_mtu;
atl1c_set_rxbufsize(adapter, netdev);
atl1c_down(adapter);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 5f2a6fcba967..9b778b34b67e 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -428,7 +428,7 @@ static int atl1e_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev)) {
while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
msleep(1);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
adapter->hw.max_frame_size = new_mtu;
adapter->hw.rx_jumbo_th = (max_frame + 7) >> 3;
atl1e_down(adapter);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index a9014d7932db..3afd3627ce48 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2687,7 +2687,7 @@ static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
adapter->rx_buffer_len = (max_frame + 7) & ~7;
adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev)) {
atl1_down(adapter);
atl1_up(adapter);
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index bcfc9488125b..fa9a4919f25d 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -905,7 +905,7 @@ static int atl2_change_mtu(struct net_device *netdev, int new_mtu)
struct atl2_hw *hw = &adapter->hw;
/* set MTU */
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
hw->max_frame_size = new_mtu;
ATL2_WRITE_REG(hw, REG_MTU, new_mtu + ETH_HLEN +
VLAN_HLEN + ETH_FCS_LEN);
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index dd06b68b33ed..82768b0e9026 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -392,7 +392,9 @@ static void umac_reset(struct bcmasp_intf *intf)
umac_wl(intf, 0x0, UMC_CMD);
umac_wl(intf, UMC_CMD_SW_RESET, UMC_CMD);
usleep_range(10, 100);
- umac_wl(intf, 0x0, UMC_CMD);
+ /* We hold the umac in reset and bring it out of
+ * reset when phy link is up.
+ */
}
static void umac_set_hw_addr(struct bcmasp_intf *intf,
@@ -412,6 +414,8 @@ static void umac_enable_set(struct bcmasp_intf *intf, u32 mask,
u32 reg;
reg = umac_rl(intf, UMC_CMD);
+ if (reg & UMC_CMD_SW_RESET)
+ return;
if (enable)
reg |= mask;
else
@@ -430,13 +434,10 @@ static void umac_init(struct bcmasp_intf *intf)
umac_wl(intf, 0x800, UMC_FRM_LEN);
umac_wl(intf, 0xffff, UMC_PAUSE_CNTRL);
umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ);
- umac_enable_set(intf, UMC_CMD_PROMISC, 1);
}
-static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
+static int bcmasp_tx_reclaim(struct bcmasp_intf *intf)
{
- struct bcmasp_intf *intf =
- container_of(napi, struct bcmasp_intf, tx_napi);
struct bcmasp_intf_stats64 *stats = &intf->stats64;
struct device *kdev = &intf->parent->pdev->dev;
unsigned long read, released = 0;
@@ -479,10 +480,16 @@ static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
DESC_RING_COUNT);
}
- /* Ensure all descriptors have been written to DRAM for the hardware
- * to see updated contents.
- */
- wmb();
+ return released;
+}
+
+static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct bcmasp_intf *intf =
+ container_of(napi, struct bcmasp_intf, tx_napi);
+ int released = 0;
+
+ released = bcmasp_tx_reclaim(intf);
napi_complete(&intf->tx_napi);
@@ -658,6 +665,12 @@ static void bcmasp_adj_link(struct net_device *dev)
UMC_CMD_HD_EN | UMC_CMD_RX_PAUSE_IGNORE |
UMC_CMD_TX_PAUSE_IGNORE);
reg |= cmd_bits;
+ if (reg & UMC_CMD_SW_RESET) {
+ reg &= ~UMC_CMD_SW_RESET;
+ umac_wl(intf, reg, UMC_CMD);
+ udelay(2);
+ reg |= UMC_CMD_TX_EN | UMC_CMD_RX_EN | UMC_CMD_PROMISC;
+ }
umac_wl(intf, reg, UMC_CMD);
active = phy_init_eee(phydev, 0) >= 0;
@@ -788,6 +801,7 @@ static void bcmasp_init_tx(struct bcmasp_intf *intf)
intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
intf->tx_spb_index = 0;
intf->tx_spb_clean_index = 0;
+ memset(intf->tx_cbs, 0, sizeof(struct bcmasp_tx_cb) * DESC_RING_COUNT);
/* Make sure channels are disabled */
tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
@@ -876,6 +890,8 @@ static void bcmasp_netif_deinit(struct net_device *dev)
} while (timeout-- > 0);
tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL);
+ bcmasp_tx_reclaim(intf);
+
umac_enable_set(intf, UMC_CMD_TX_EN, 0);
phy_stop(dev->phydev);
@@ -1035,19 +1051,12 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
/* Indicate that the MAC is responsible for PHY PM */
phydev->mac_managed_pm = true;
- } else if (!intf->wolopts) {
- ret = phy_resume(dev->phydev);
- if (ret)
- goto err_phy_disable;
}
umac_reset(intf);
umac_init(intf);
- /* Disable the UniMAC RX/TX */
- umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 0);
-
umac_set_hw_addr(intf, dev->dev_addr);
intf->old_duplex = -1;
@@ -1062,9 +1071,6 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
bcmasp_enable_rx(intf, 1);
- /* Turn on UniMAC TX/RX */
- umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 1);
-
intf->crc_fwd = !!(umac_rl(intf, UMC_CMD) & UMC_CMD_CRC_FWD);
bcmasp_netif_start(dev);
@@ -1306,7 +1312,14 @@ static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
if (intf->wolopts & WAKE_FILTER)
bcmasp_netfilt_suspend(intf);
- /* UniMAC receive needs to be turned on */
+ /* Bring UniMAC out of reset if needed and enable RX */
+ reg = umac_rl(intf, UMC_CMD);
+ if (reg & UMC_CMD_SW_RESET)
+ reg &= ~UMC_CMD_SW_RESET;
+
+ reg |= UMC_CMD_RX_EN | UMC_CMD_PROMISC;
+ umac_wl(intf, reg, UMC_CMD);
+
umac_enable_set(intf, UMC_CMD_RX_EN, 1);
if (intf->parent->wol_irq > 0) {
@@ -1324,7 +1337,6 @@ int bcmasp_interface_suspend(struct bcmasp_intf *intf)
{
struct device *kdev = &intf->parent->pdev->dev;
struct net_device *dev = intf->ndev;
- int ret = 0;
if (!netif_running(dev))
return 0;
@@ -1334,10 +1346,6 @@ int bcmasp_interface_suspend(struct bcmasp_intf *intf)
bcmasp_netif_deinit(dev);
if (!intf->wolopts) {
- ret = phy_suspend(dev->phydev);
- if (ret)
- goto out;
-
if (intf->internal_phy)
bcmasp_ephy_enable_set(intf, false);
else
@@ -1354,11 +1362,7 @@ int bcmasp_interface_suspend(struct bcmasp_intf *intf)
clk_disable_unprepare(intf->parent->clk);
- return ret;
-
-out:
- bcmasp_netif_init(dev, false);
- return ret;
+ return 0;
}
static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 3e4fb3c3e834..e5809ad5eb82 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1042,13 +1042,13 @@ static int b44_change_mtu(struct net_device *dev, int new_mtu)
/* We'll just catch it later when the
* device is up'd.
*/
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
spin_lock_irq(&bp->lock);
b44_halt(bp);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
b44_init_rings(bp);
b44_init_hw(bp, B44_FULL_RESET);
spin_unlock_irq(&bp->lock);
@@ -2009,12 +2009,14 @@ static int b44_set_pauseparam(struct net_device *dev,
bp->flags |= B44_FLAG_TX_PAUSE;
else
bp->flags &= ~B44_FLAG_TX_PAUSE;
- if (bp->flags & B44_FLAG_PAUSE_AUTO) {
- b44_halt(bp);
- b44_init_rings(bp);
- b44_init_hw(bp, B44_FULL_RESET);
- } else {
- __b44_set_flow_ctrl(bp, bp->flags);
+ if (netif_running(dev)) {
+ if (bp->flags & B44_FLAG_PAUSE_AUTO) {
+ b44_halt(bp);
+ b44_init_rings(bp);
+ b44_init_hw(bp, B44_FULL_RESET);
+ } else {
+ __b44_set_flow_ctrl(bp, bp->flags);
+ }
}
spin_unlock_irq(&bp->lock);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 3196c4dea076..3c0e3b9828be 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1652,7 +1652,7 @@ static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index b65b8592ad75..6ec773e61182 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -7912,7 +7912,7 @@ bnx2_change_mtu(struct net_device *dev, int new_mtu)
{
struct bnx2 *bp = netdev_priv(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
false);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index c9b6acd8c892..a8e07e51418f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -4902,7 +4902,7 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
* because the actual alloc size is
* only updated as part of load
*/
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (!bnx2x_mtu_allows_gro(new_mtu))
dev->features &= ~NETIF_F_GRO_HW;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 493b724848c8..c437ca1c0fd3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -76,7 +76,7 @@
NETIF_MSG_TX_ERR)
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
+MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
@@ -137,6 +137,7 @@ static const struct {
[NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
[NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
[NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
+ [NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" },
};
static const struct pci_device_id bnxt_pci_tbl[] = {
@@ -211,6 +212,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
{ PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
+ { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
#endif
{ 0 }
@@ -294,7 +296,7 @@ static bool bnxt_vf_pciid(enum board_idx idx)
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
- idx == NETXTREME_E_P5_VF_HV);
+ idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
@@ -1296,9 +1298,9 @@ static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
return RX_AGG_CMP_VALID(agg, *raw_cons);
}
-static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
- unsigned int len,
- dma_addr_t mapping)
+static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
+ unsigned int len,
+ dma_addr_t mapping)
{
struct bnxt *bp = bnapi->bp;
struct pci_dev *pdev = bp->pdev;
@@ -1318,6 +1320,39 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
bp->rx_dir);
skb_put(skb, len);
+
+ return skb;
+}
+
+static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
+ unsigned int len,
+ dma_addr_t mapping)
+{
+ return bnxt_copy_data(bnapi, data, len, mapping);
+}
+
+static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi,
+ struct xdp_buff *xdp,
+ unsigned int len,
+ dma_addr_t mapping)
+{
+ unsigned int metasize = 0;
+ u8 *data = xdp->data;
+ struct sk_buff *skb;
+
+ len = xdp->data_end - xdp->data_meta;
+ metasize = xdp->data - xdp->data_meta;
+ data = xdp->data_meta;
+
+ skb = bnxt_copy_data(bnapi, data, len, mapping);
+ if (!skb)
+ return skb;
+
+ if (metasize) {
+ skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
+
return skb;
}
@@ -1778,7 +1813,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->sw_stats->rx.rx_oom_discards += 1;
return NULL;
}
} else {
@@ -1788,7 +1823,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
if (!new_data) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->sw_stats->rx.rx_oom_discards += 1;
return NULL;
}
@@ -1804,7 +1839,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (!skb) {
skb_free_frag(data);
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->sw_stats->rx.rx_oom_discards += 1;
return NULL;
}
skb_reserve(skb, bp->rx_offset);
@@ -1815,7 +1850,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->sw_stats->rx.rx_oom_discards += 1;
return NULL;
}
}
@@ -2073,7 +2108,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = -EIO;
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
- bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
+ bnapi->cp_ring.sw_stats->rx.rx_buf_errors++;
if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
!(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
netdev_warn_once(bp->dev, "RX buffer error %x\n",
@@ -2094,24 +2129,24 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
cp_cons, agg_bufs,
false);
- if (!frag_len) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!frag_len)
+ goto oom_next_rx;
}
xdp_active = true;
}
if (xdp_active) {
- if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) {
+ if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) {
rc = 1;
goto next_rx;
}
}
if (len <= bp->rx_copy_thresh) {
- skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
+ if (!xdp_active)
+ skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
+ else
+ skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr);
bnxt_reuse_rx_data(rxr, cons, data);
if (!skb) {
if (agg_bufs) {
@@ -2121,9 +2156,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
else
bnxt_xdp_buff_frags_free(rxr, &xdp);
}
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
+ goto oom_next_rx;
}
} else {
u32 payload;
@@ -2134,29 +2167,21 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
payload = 0;
skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
payload | len);
- if (!skb) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!skb)
+ goto oom_next_rx;
}
if (agg_bufs) {
if (!xdp_active) {
skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
- if (!skb) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!skb)
+ goto oom_next_rx;
} else {
skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
if (!skb) {
/* we should be able to free the old skb here */
bnxt_xdp_buff_frags_free(rxr, &xdp);
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
+ goto oom_next_rx;
}
}
}
@@ -2199,7 +2224,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
} else {
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
if (dev->features & NETIF_F_RXCSUM)
- bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
+ bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++;
}
}
@@ -2234,6 +2259,11 @@ next_rx_no_prod_no_len:
*raw_cons = tmp_raw_cons;
return rc;
+
+oom_next_rx:
+ cpr->sw_stats->rx.rx_oom_discards += 1;
+ rc = -ENOMEM;
+ goto next_rx;
}
/* In netpoll mode, if we are using a combined completion ring, we need to
@@ -2280,7 +2310,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
}
rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
if (rc && rc != -EBUSY)
- cpr->sw_stats.rx.rx_netpoll_discards += 1;
+ cpr->sw_stats->rx.rx_netpoll_discards += 1;
return rc;
}
@@ -2489,6 +2519,9 @@ static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
}
return false;
}
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
+ netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n");
+ break;
default:
netdev_err(bp->dev, "FW reported unknown error type %u\n",
err_type);
@@ -3559,14 +3592,15 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
}
static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
- struct bnxt_rx_ring_info *rxr)
+ struct bnxt_rx_ring_info *rxr,
+ int numa_node)
{
struct page_pool_params pp = { 0 };
pp.pool_size = bp->rx_agg_ring_size;
if (BNXT_RX_PAGE_MODE(bp))
pp.pool_size += bp->rx_ring_size;
- pp.nid = dev_to_node(&bp->pdev->dev);
+ pp.nid = numa_node;
pp.napi = &rxr->bnapi->napi;
pp.netdev = bp->dev;
pp.dev = &bp->pdev->dev;
@@ -3586,7 +3620,8 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
static int bnxt_alloc_rx_rings(struct bnxt *bp)
{
- int i, rc = 0, agg_rings = 0;
+ int numa_node = dev_to_node(&bp->pdev->dev);
+ int i, rc = 0, agg_rings = 0, cpu;
if (!bp->rx_ring)
return -ENOMEM;
@@ -3597,10 +3632,15 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring;
+ int cpu_node;
ring = &rxr->rx_ring_struct;
- rc = bnxt_alloc_rx_page_pool(bp, rxr);
+ cpu = cpumask_local_spread(i, numa_node);
+ cpu_node = cpu_to_node(cpu);
+ netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
+ i, cpu_node);
+ rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
if (rc)
return rc;
@@ -3859,13 +3899,12 @@ static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
static int bnxt_alloc_cp_rings(struct bnxt *bp)
{
bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
- int i, j, rc, ulp_base_vec, ulp_msix;
+ int i, j, rc, ulp_msix;
int tcs = bp->num_tc;
if (!tcs)
tcs = 1;
ulp_msix = bnxt_get_ulp_msix_num(bp);
- ulp_base_vec = bnxt_get_ulp_msix_base(bp);
for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr, *cpr2;
@@ -3884,10 +3923,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
if (rc)
return rc;
- if (ulp_msix && i >= ulp_base_vec)
- ring->map_idx = i + ulp_msix;
- else
- ring->map_idx = i;
+ ring->map_idx = ulp_msix + i;
if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
continue;
@@ -3917,6 +3953,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
if (rc)
return rc;
cpr2->bnapi = bnapi;
+ cpr2->sw_stats = cpr->sw_stats;
cpr2->cp_idx = k;
if (!k && rx) {
bp->rx_ring[i].rx_cpr = cpr2;
@@ -4241,6 +4278,7 @@ static void bnxt_init_vnics(struct bnxt *bp)
int j;
vnic->fw_vnic_id = INVALID_HW_RING_ID;
+ vnic->vnic_id = i;
for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
@@ -4757,6 +4795,9 @@ static void bnxt_free_ring_stats(struct bnxt *bp)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
bnxt_free_stats_mem(bp, &cpr->stats);
+
+ kfree(cpr->sw_stats);
+ cpr->sw_stats = NULL;
}
}
@@ -4771,6 +4812,10 @@ static int bnxt_alloc_stats(struct bnxt *bp)
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ cpr->sw_stats = kzalloc(sizeof(*cpr->sw_stats), GFP_KERNEL);
+ if (!cpr->sw_stats)
+ return -ENOMEM;
+
cpr->stats.len = size;
rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
if (rc)
@@ -5788,8 +5833,22 @@ void bnxt_fill_ipv6_mask(__be32 mask[4])
static void
bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
struct hwrm_cfa_ntuple_filter_alloc_input *req,
- u16 rxq)
+ struct bnxt_ntuple_filter *fltr)
{
+ struct bnxt_rss_ctx *rss_ctx, *tmp;
+ u16 rxq = fltr->base.rxq;
+
+ if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
+ list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) {
+ if (rss_ctx->index == fltr->base.fw_vnic_id) {
+ struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
+
+ req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
+ break;
+ }
+ }
+ return;
+ }
if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
struct bnxt_vnic_info *vnic;
u32 enables;
@@ -5830,7 +5889,7 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
req->flags =
cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
} else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
- bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr->base.rxq);
+ bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr);
} else {
vnic = &bp->vnic_info[fltr->base.rxq + 1];
req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
@@ -5938,9 +5997,9 @@ static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
}
-static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
+int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u32 tpa_flags)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
struct hwrm_vnic_tpa_cfg_input *req;
int rc;
@@ -6025,9 +6084,10 @@ static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
}
-static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
+int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx)
{
int entries;
+ u16 *tbl;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
@@ -6035,16 +6095,22 @@ static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
entries = HW_HASH_INDEX_SIZE;
bp->rss_indir_tbl_entries = entries;
- bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
- GFP_KERNEL);
- if (!bp->rss_indir_tbl)
+ tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
+ if (!tbl)
return -ENOMEM;
+
+ if (rss_ctx)
+ rss_ctx->rss_indir_tbl = tbl;
+ else
+ bp->rss_indir_tbl = tbl;
+
return 0;
}
-static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
+void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx)
{
u16 max_rings, max_entries, pad, i;
+ u16 *rss_indir_tbl;
if (!bp->rx_nr_rings)
return;
@@ -6055,13 +6121,17 @@ static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
max_rings = bp->rx_nr_rings;
max_entries = bnxt_get_rxfh_indir_size(bp->dev);
+ if (rss_ctx)
+ rss_indir_tbl = &rss_ctx->rss_indir_tbl[0];
+ else
+ rss_indir_tbl = &bp->rss_indir_tbl[0];
for (i = 0; i < max_entries; i++)
- bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
+ rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
pad = bp->rss_indir_tbl_entries - max_entries;
if (pad)
- memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
+ memset(&rss_indir_tbl[i], 0, pad * sizeof(u16));
}
static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
@@ -6117,6 +6187,8 @@ static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
+ else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
+ j = vnic->rss_ctx->rss_indir_tbl[i];
else
j = bp->rss_indir_tbl[i];
rxr = &bp->rx_ring[j];
@@ -6154,9 +6226,9 @@ __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
}
-static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
+static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ bool set_rss)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_rss_cfg_input *req;
int rc;
@@ -6174,9 +6246,9 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
return hwrm_req_send(bp, req);
}
-static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
+static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic, bool set_rss)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_rss_cfg_input *req;
dma_addr_t ring_tbl_map;
u32 i, nr_ctxs;
@@ -6229,9 +6301,8 @@ static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
hwrm_req_drop(bp, req);
}
-static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
+static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_plcmodes_cfg_input *req;
int rc;
@@ -6256,7 +6327,8 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
return hwrm_req_send(bp, req);
}
-static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
+static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic,
u16 ctx_idx)
{
struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
@@ -6265,10 +6337,10 @@ static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
return;
req->rss_cos_lb_ctx_id =
- cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
+ cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
hwrm_req_send(bp, req);
- bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
+ vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
}
static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
@@ -6280,13 +6352,14 @@ static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
- bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
+ bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j);
}
}
bp->rsscos_nr_ctxs = 0;
}
-static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
+static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic, u16 ctx_idx)
{
struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
@@ -6299,7 +6372,7 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc)
- bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
+ vnic->fw_rss_cos_lb_ctx[ctx_idx] =
le16_to_cpu(resp->rss_cos_lb_ctx_id);
hwrm_req_drop(bp, req);
@@ -6313,10 +6386,9 @@ static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
}
-int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
+int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_cfg_input *req;
unsigned int ring = 0, grp_idx;
u16 def_vlan = 0;
@@ -6364,8 +6436,8 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
if (vnic->flags & BNXT_VNIC_RSS_FLAG)
ring = 0;
else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
- ring = vnic_id - 1;
- else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
+ ring = vnic->vnic_id - 1;
+ else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
ring = bp->rx_nr_rings - 1;
grp_idx = bp->rx_ring[ring].bnapi->index;
@@ -6381,25 +6453,25 @@ vnic_mru:
#endif
if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
- if (!vnic_id && bnxt_ulp_registered(bp->edev))
+ if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev))
req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
return hwrm_req_send(bp, req);
}
-static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
+static void bnxt_hwrm_vnic_free_one(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
{
- if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
+ if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
struct hwrm_vnic_free_input *req;
if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
return;
- req->vnic_id =
- cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
hwrm_req_send(bp, req);
- bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
+ vnic->fw_vnic_id = INVALID_HW_RING_ID;
}
}
@@ -6408,15 +6480,14 @@ static void bnxt_hwrm_vnic_free(struct bnxt *bp)
u16 i;
for (i = 0; i < bp->nr_vnics; i++)
- bnxt_hwrm_vnic_free_one(bp, i);
+ bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]);
}
-static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
- unsigned int start_rx_ring_idx,
- unsigned int nr_rings)
+int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ unsigned int start_rx_ring_idx,
+ unsigned int nr_rings)
{
unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_alloc_output *resp;
struct hwrm_vnic_alloc_input *req;
int rc;
@@ -6442,7 +6513,7 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
vnic_no_ring_grps:
for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
- if (vnic_id == BNXT_VNIC_DEFAULT)
+ if (vnic->vnic_id == BNXT_VNIC_DEFAULT)
req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
resp = hwrm_req_hold(bp, req);
@@ -7274,17 +7345,7 @@ static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
int bnxt_nq_rings_in_use(struct bnxt *bp)
{
- int cp = bp->cp_nr_rings;
- int ulp_msix, ulp_base;
-
- ulp_msix = bnxt_get_ulp_msix_num(bp);
- if (ulp_msix) {
- ulp_base = bnxt_get_ulp_msix_base(bp);
- cp += ulp_msix;
- if ((ulp_base + ulp_msix) > cp)
- cp = ulp_base + ulp_msix;
- }
- return cp;
+ return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp);
}
static int bnxt_cp_rings_in_use(struct bnxt *bp)
@@ -7300,16 +7361,7 @@ static int bnxt_cp_rings_in_use(struct bnxt *bp)
static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
{
- int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
- int cp = bp->cp_nr_rings;
-
- if (!ulp_stat)
- return cp;
-
- if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
- return bnxt_get_ulp_msix_base(bp) + ulp_stat;
-
- return cp + ulp_stat;
+ return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
}
static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
@@ -7341,7 +7393,7 @@ static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
hw_resc->resv_rx_rings = bp->rx_nr_rings;
if (!netif_is_rxfh_configured(bp->dev))
- bnxt_set_dflt_rss_indir_tbl(bp);
+ bnxt_set_dflt_rss_indir_tbl(bp, NULL);
}
}
@@ -7349,7 +7401,7 @@ static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
{
if (bp->flags & BNXT_FLAG_RFS) {
if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
- return 2;
+ return 2 + bp->num_rss_ctx;
if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return rx_rings + 1;
}
@@ -7417,17 +7469,32 @@ static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
}
+static int bnxt_get_avail_msix(struct bnxt *bp, int num);
+
static int __bnxt_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_rings hwr = {0};
+ int cp = bp->cp_nr_rings;
int rx_rings, rc;
+ int ulp_msix = 0;
bool sh = false;
int tx_cp;
if (!bnxt_need_reserve_rings(bp))
return 0;
- hwr.cp = bnxt_nq_rings_in_use(bp);
+ if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
+ ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
+ if (!ulp_msix)
+ bnxt_set_ulp_stat_ctxs(bp, 0);
+
+ if (ulp_msix > bp->ulp_num_msix_want)
+ ulp_msix = bp->ulp_num_msix_want;
+ hwr.cp = cp + ulp_msix;
+ } else {
+ hwr.cp = bnxt_nq_rings_in_use(bp);
+ }
+
hwr.tx = bp->tx_nr_rings;
hwr.rx = bp->rx_nr_rings;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
@@ -7497,7 +7564,20 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
return -ENOMEM;
if (!netif_is_rxfh_configured(bp->dev))
- bnxt_set_dflt_rss_indir_tbl(bp);
+ bnxt_set_dflt_rss_indir_tbl(bp, NULL);
+
+ if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) {
+ int resv_msix, resv_ctx, ulp_ctxs;
+ struct bnxt_hw_resc *hw_resc;
+
+ hw_resc = &bp->hw_resc;
+ resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
+ ulp_msix = min_t(int, resv_msix, ulp_msix);
+ bnxt_set_ulp_msix_num(bp, ulp_msix);
+ resv_ctx = hw_resc->resv_stat_ctxs - bp->cp_nr_rings;
+ ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp));
+ bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs);
+ }
return rc;
}
@@ -9089,7 +9169,7 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
BNXT_FW_HEALTH_WIN_BASE +
BNXT_GRC_REG_CHIP_NUM);
}
- if (!BNXT_CHIP_P5(bp))
+ if (!BNXT_CHIP_P5_PLUS(bp))
return;
status_loc = BNXT_GRC_REG_STATUS_P5 |
@@ -9676,7 +9756,7 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
else if (BNXT_NO_FW_ACCESS(bp))
return 0;
for (i = 0; i < bp->nr_vnics; i++) {
- rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
+ rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags);
if (rc) {
netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
i, rc);
@@ -9691,7 +9771,7 @@ static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
int i;
for (i = 0; i < bp->nr_vnics; i++)
- bnxt_hwrm_vnic_set_rss(bp, i, false);
+ bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false);
}
static void bnxt_clear_vnic(struct bnxt *bp)
@@ -9769,28 +9849,27 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
return hwrm_req_send(bp, req);
}
-static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
+static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
int rc;
if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
goto skip_rss_ctx;
/* allocate context for vnic */
- rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
goto vnic_setup_err;
}
bp->rsscos_nr_ctxs++;
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
- rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
goto vnic_setup_err;
}
bp->rsscos_nr_ctxs++;
@@ -9798,26 +9877,26 @@ static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
skip_rss_ctx:
/* configure default vnic, ring grp */
- rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
goto vnic_setup_err;
}
/* Enable RSS hashing on vnic */
- rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
+ rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
goto vnic_setup_err;
}
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
- rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
+ rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
}
}
@@ -9825,16 +9904,33 @@ vnic_setup_err:
return rc;
}
-static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
+int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int rc;
+
+ rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
+ vnic->vnic_id, rc);
+ return rc;
+ }
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic);
+ if (rc)
+ netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
+ vnic->vnic_id, rc);
+ return rc;
+}
+
+int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int rc, i, nr_ctxs;
nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
for (i = 0; i < nr_ctxs; i++) {
- rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
- vnic_id, i, rc);
+ vnic->vnic_id, i, rc);
break;
}
bp->rsscos_nr_ctxs++;
@@ -9842,63 +9938,57 @@ static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
if (i < nr_ctxs)
return -ENOMEM;
- rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
- if (rc) {
- netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
- vnic_id, rc);
- return rc;
- }
- rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
- if (rc) {
- netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
- vnic_id, rc);
+ rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
+ if (rc)
return rc;
- }
+
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
- rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
+ rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
}
}
return rc;
}
-static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
+static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
- return __bnxt_setup_vnic_p5(bp, vnic_id);
+ return __bnxt_setup_vnic_p5(bp, vnic);
else
- return __bnxt_setup_vnic(bp, vnic_id);
+ return __bnxt_setup_vnic(bp, vnic);
}
-static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, u16 vnic_id,
+static int bnxt_alloc_and_setup_vnic(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic,
u16 start_rx_ring_idx, int rx_rings)
{
int rc;
- rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, start_rx_ring_idx, rx_rings);
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
return rc;
}
- return bnxt_setup_vnic(bp, vnic_id);
+ return bnxt_setup_vnic(bp, vnic);
}
static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
{
+ struct bnxt_vnic_info *vnic;
int i, rc = 0;
- if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
- return bnxt_alloc_and_setup_vnic(bp, BNXT_VNIC_NTUPLE, 0,
- bp->rx_nr_rings);
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
+ vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
+ return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings);
+ }
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return 0;
for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_vnic_info *vnic;
u16 vnic_id = i + 1;
u16 ring_id = i;
@@ -9909,12 +9999,104 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
vnic->flags |= BNXT_VNIC_RFS_FLAG;
if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
- if (bnxt_alloc_and_setup_vnic(bp, vnic_id, ring_id, 1))
+ if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1))
break;
}
return rc;
}
+void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
+ bool all)
+{
+ struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
+ struct bnxt_filter_base *usr_fltr, *tmp;
+ struct bnxt_ntuple_filter *ntp_fltr;
+ int i;
+
+ bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
+ for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
+ if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
+ bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
+ }
+ if (!all)
+ return;
+
+ list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
+ if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) &&
+ usr_fltr->fw_vnic_id == rss_ctx->index) {
+ ntp_fltr = container_of(usr_fltr,
+ struct bnxt_ntuple_filter,
+ base);
+ bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr);
+ bnxt_del_ntp_filter(bp, ntp_fltr);
+ bnxt_del_one_usr_fltr(bp, usr_fltr);
+ }
+ }
+
+ if (vnic->rss_table)
+ dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size,
+ vnic->rss_table,
+ vnic->rss_table_dma_addr);
+ kfree(rss_ctx->rss_indir_tbl);
+ list_del(&rss_ctx->list);
+ bp->num_rss_ctx--;
+ clear_bit(rss_ctx->index, bp->rss_ctx_bmap);
+ kfree(rss_ctx);
+}
+
+static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
+{
+ bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
+ struct bnxt_rss_ctx *rss_ctx, *tmp;
+
+ list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) {
+ struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
+
+ if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
+ bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) ||
+ __bnxt_setup_vnic_p5(bp, vnic)) {
+ netdev_err(bp->dev, "Failed to restore RSS ctx %d\n",
+ rss_ctx->index);
+ bnxt_del_one_rss_ctx(bp, rss_ctx, true);
+ }
+ }
+}
+
+struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp)
+{
+ struct bnxt_rss_ctx *rss_ctx = NULL;
+
+ rss_ctx = kzalloc(sizeof(*rss_ctx), GFP_KERNEL);
+ if (rss_ctx) {
+ rss_ctx->vnic.rss_ctx = rss_ctx;
+ list_add_tail(&rss_ctx->list, &bp->rss_ctx_list);
+ bp->num_rss_ctx++;
+ }
+ return rss_ctx;
+}
+
+void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all)
+{
+ struct bnxt_rss_ctx *rss_ctx, *tmp;
+
+ list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list)
+ bnxt_del_one_rss_ctx(bp, rss_ctx, all);
+
+ if (all)
+ bitmap_free(bp->rss_ctx_bmap);
+}
+
+static void bnxt_init_multi_rss_ctx(struct bnxt *bp)
+{
+ bp->rss_ctx_bmap = bitmap_zalloc(BNXT_RSS_CTX_BMAP_LEN, GFP_KERNEL);
+ if (bp->rss_ctx_bmap) {
+ /* burn index 0 since we cannot have context 0 */
+ __set_bit(0, bp->rss_ctx_bmap);
+ INIT_LIST_HEAD(&bp->rss_ctx_list);
+ bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
+ }
+}
+
/* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
static bool bnxt_promisc_ok(struct bnxt *bp)
{
@@ -9927,16 +10109,17 @@ static bool bnxt_promisc_ok(struct bnxt *bp)
static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[1];
unsigned int rc = 0;
- rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1);
if (rc) {
netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
rc);
return rc;
}
- rc = bnxt_hwrm_vnic_cfg(bp, 1);
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc) {
netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
rc);
@@ -9979,7 +10162,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
rx_nr_rings--;
/* default vnic 0 */
- rc = bnxt_hwrm_vnic_alloc(bp, BNXT_VNIC_DEFAULT, 0, rx_nr_rings);
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings);
if (rc) {
netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
goto err_out;
@@ -9988,7 +10171,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
if (BNXT_VF(bp))
bnxt_hwrm_func_qcfg(bp);
- rc = bnxt_setup_vnic(bp, BNXT_VNIC_DEFAULT);
+ rc = bnxt_setup_vnic(bp, vnic);
if (rc)
goto err_out;
if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
@@ -10303,19 +10486,10 @@ unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
}
-int bnxt_get_avail_msix(struct bnxt *bp, int num)
+static int bnxt_get_avail_msix(struct bnxt *bp, int num)
{
- int max_cp = bnxt_get_max_func_cp_rings(bp);
int max_irq = bnxt_get_max_func_irqs(bp);
int total_req = bp->cp_nr_rings + num;
- int max_idx, avail_msix;
-
- max_idx = bp->total_irqs;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
- max_idx = min_t(int, bp->total_irqs, max_cp);
- avail_msix = max_idx - bp->cp_nr_rings;
- if (!BNXT_NEW_RM(bp) || avail_msix >= num)
- return avail_msix;
if (max_irq < total_req) {
num = max_irq - bp->cp_nr_rings;
@@ -10442,13 +10616,23 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
{
bool irq_cleared = false;
int tcs = bp->num_tc;
+ int irqs_required;
int rc;
if (!bnxt_need_reserve_rings(bp))
return 0;
- if (irq_re_init && BNXT_NEW_RM(bp) &&
- bnxt_get_num_msix(bp) != bp->total_irqs) {
+ if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
+ int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
+
+ if (ulp_msix > bp->ulp_num_msix_want)
+ ulp_msix = bp->ulp_num_msix_want;
+ irqs_required = ulp_msix + bp->cp_nr_rings;
+ } else {
+ irqs_required = bnxt_get_num_msix(bp);
+ }
+
+ if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) {
bnxt_ulp_irq_stop(bp);
bnxt_clear_int_mode(bp);
irq_cleared = true;
@@ -10630,9 +10814,9 @@ static void bnxt_disable_napi(struct bnxt *bp)
cpr = &bnapi->cp_ring;
if (bnapi->tx_fault)
- cpr->sw_stats.tx.tx_resets++;
+ cpr->sw_stats->tx.tx_resets++;
if (bnapi->in_reset)
- cpr->sw_stats.rx.rx_resets++;
+ cpr->sw_stats->rx.rx_resets++;
napi_disable(&bnapi->napi);
if (bnapi->rx_ring)
cancel_work_sync(&cpr->dim.work);
@@ -11367,7 +11551,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
if (fw_reset) {
set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
- bnxt_ulp_stop(bp);
+ bnxt_ulp_irq_stop(bp);
bnxt_free_ctx_mem(bp);
bnxt_dcb_free(bp);
rc = bnxt_fw_init_one(bp);
@@ -11674,6 +11858,46 @@ static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
bnxt_cfg_one_usr_fltr(bp, usr_fltr);
}
+static int bnxt_set_xps_mapping(struct bnxt *bp)
+{
+ int numa_node = dev_to_node(&bp->pdev->dev);
+ unsigned int q_idx, map_idx, cpu, i;
+ const struct cpumask *cpu_mask_ptr;
+ int nr_cpus = num_online_cpus();
+ cpumask_t *q_map;
+ int rc = 0;
+
+ q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL);
+ if (!q_map)
+ return -ENOMEM;
+
+ /* Create CPU mask for all TX queues across MQPRIO traffic classes.
+ * Each TC has the same number of TX queues. The nth TX queue for each
+ * TC will have the same CPU mask.
+ */
+ for (i = 0; i < nr_cpus; i++) {
+ map_idx = i % bp->tx_nr_rings_per_tc;
+ cpu = cpumask_local_spread(i, numa_node);
+ cpu_mask_ptr = get_cpu_mask(cpu);
+ cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr);
+ }
+
+ /* Register CPU mask for each TX queue except the ones marked for XDP */
+ for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) {
+ map_idx = q_idx % bp->tx_nr_rings_per_tc;
+ rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx);
+ if (rc) {
+ netdev_warn(bp->dev, "Error setting XPS for q:%d\n",
+ q_idx);
+ break;
+ }
+ }
+
+ kfree(q_map);
+
+ return rc;
+}
+
static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -11736,8 +11960,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
}
}
- if (irq_re_init)
+ if (irq_re_init) {
udp_tunnel_nic_reset_ntf(bp->dev);
+ rc = bnxt_set_xps_mapping(bp);
+ if (rc)
+ netdev_warn(bp->dev, "failed to set xps mapping\n");
+ }
if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
if (!static_key_enabled(&bnxt_xdp_locking_key))
@@ -11758,8 +11986,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* VF-reps may need to be re-opened after the PF is re-opened */
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
+ if (bp->ptp_cfg)
+ atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
bnxt_ptp_init_rtc(bp, true);
bnxt_ptp_cfg_tstamp_filters(bp);
+ if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
+ bnxt_hwrm_realloc_rss_ctx_vnic(bp);
bnxt_cfg_usr_fltrs(bp);
return 0;
@@ -11874,10 +12106,9 @@ static int bnxt_open(struct net_device *dev)
bnxt_hwrm_if_change(bp, false);
} else {
if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
- if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
- bnxt_ulp_start(bp, 0);
- bnxt_reenable_sriov(bp);
- }
+ if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ bnxt_queue_sp_work(bp,
+ BNXT_RESTART_ULP_SP_EVENT);
}
}
@@ -11908,6 +12139,8 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
while (bnxt_drv_busy(bp))
msleep(20);
+ if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
+ bnxt_clear_rss_ctxs(bp, false);
/* Flush rings and disable interrupts */
bnxt_shutdown_nic(bp, irq_re_init);
@@ -12107,8 +12340,8 @@ static void bnxt_get_ring_stats(struct bnxt *bp,
stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
stats->rx_dropped +=
- cpr->sw_stats.rx.rx_netpoll_discards +
- cpr->sw_stats.rx.rx_oom_discards;
+ cpr->sw_stats->rx.rx_netpoll_discards +
+ cpr->sw_stats->rx.rx_oom_discards;
}
}
@@ -12175,7 +12408,7 @@ static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
struct bnxt_total_ring_err_stats *stats,
struct bnxt_cp_ring_info *cpr)
{
- struct bnxt_sw_stats *sw_stats = &cpr->sw_stats;
+ struct bnxt_sw_stats *sw_stats = cpr->sw_stats;
u64 *hw_stats = cpr->stats.sw_stats;
stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
@@ -12405,33 +12638,26 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
}
/* If runtime conditions support RFS */
-static bool bnxt_rfs_capable(struct bnxt *bp)
+bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
{
struct bnxt_hw_rings hwr = {0};
int max_vnics, max_rss_ctxs;
- hwr.rss_ctx = 1;
- if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
- /* 2 VNICS: default + Ntuple */
- hwr.vnic = 2;
- hwr.rss_ctx = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
- hwr.vnic;
- goto check_reserve_vnic;
- }
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
+ !BNXT_SUPPORTS_NTUPLE_VNIC(bp))
return bnxt_rfs_supported(bp);
+
if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
return false;
- hwr.vnic = 1 + bp->rx_nr_rings;
-check_reserve_vnic:
+ hwr.grp = bp->rx_nr_rings;
+ hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings);
+ if (new_rss_ctx)
+ hwr.vnic++;
+ hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
max_vnics = bnxt_get_max_func_vnics(bp);
max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
- if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
- !(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP))
- hwr.rss_ctx = hwr.vnic;
-
if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
if (bp->rx_nr_rings > 1)
netdev_warn(bp->dev,
@@ -12465,7 +12691,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
netdev_features_t vlan_features;
- if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
+ if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false))
features &= ~NETIF_F_NTUPLE;
if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
@@ -12857,17 +13083,8 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent)
if (!silent)
bnxt_dbg_dump_states(bp);
if (netif_running(bp->dev)) {
- int rc;
-
- if (silent) {
- bnxt_close_nic(bp, false, false);
- bnxt_open_nic(bp, false, false);
- } else {
- bnxt_ulp_stop(bp);
- bnxt_close_nic(bp, true, false);
- rc = bnxt_open_nic(bp, true, false);
- bnxt_ulp_start(bp, rc);
- }
+ bnxt_close_nic(bp, !silent, false);
+ bnxt_open_nic(bp, !silent, false);
}
}
@@ -13025,7 +13242,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
rxr->bnapi->in_reset = false;
bnxt_alloc_one_rx_ring(bp, i);
cpr = &rxr->bnapi->cp_ring;
- cpr->sw_stats.rx.rx_resets++;
+ cpr->sw_stats->rx.rx_resets++;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
@@ -13035,9 +13252,18 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
bnxt_rtnl_unlock_sp(bp);
}
+static void bnxt_fw_fatal_close(struct bnxt *bp)
+{
+ bnxt_tx_disable(bp);
+ bnxt_disable_napi(bp);
+ bnxt_disable_int_sync(bp);
+ bnxt_free_irq(bp);
+ bnxt_clear_int_mode(bp);
+ pci_disable_device(bp->pdev);
+}
+
static void bnxt_fw_reset_close(struct bnxt *bp)
{
- bnxt_ulp_stop(bp);
/* When firmware is in fatal state, quiesce device and disable
* bus master to prevent any potential bad DMAs before freeing
* kernel memory.
@@ -13048,12 +13274,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
if (val == 0xffff)
bp->fw_reset_min_dsecs = 0;
- bnxt_tx_disable(bp);
- bnxt_disable_napi(bp);
- bnxt_disable_int_sync(bp);
- bnxt_free_irq(bp);
- bnxt_clear_int_mode(bp);
- pci_disable_device(bp->pdev);
+ bnxt_fw_fatal_close(bp);
}
__bnxt_close_nic(bp, true, false);
bnxt_vf_reps_free(bp);
@@ -13123,6 +13344,7 @@ void bnxt_fw_exception(struct bnxt *bp)
{
netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
+ bnxt_ulp_stop(bp);
bnxt_rtnl_lock_sp(bp);
bnxt_force_fw_reset(bp);
bnxt_rtnl_unlock_sp(bp);
@@ -13154,6 +13376,7 @@ static int bnxt_get_registered_vfs(struct bnxt *bp)
void bnxt_fw_reset(struct bnxt *bp)
{
+ bnxt_ulp_stop(bp);
bnxt_rtnl_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
@@ -13232,7 +13455,7 @@ static void bnxt_chk_missed_irq(struct bnxt *bp)
bnxt_dbg_hwrm_ring_info_get(bp,
DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
fw_ring_id, &val[0], &val[1]);
- cpr->sw_stats.cmn.missed_irqs++;
+ cpr->sw_stats->cmn.missed_irqs++;
}
}
}
@@ -13278,6 +13501,12 @@ static void bnxt_fw_echo_reply(struct bnxt *bp)
hwrm_req_send(bp, req);
}
+static void bnxt_ulp_restart(struct bnxt *bp)
+{
+ bnxt_ulp_stop(bp);
+ bnxt_ulp_start(bp, 0);
+}
+
static void bnxt_sp_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, sp_task);
@@ -13289,6 +13518,11 @@ static void bnxt_sp_task(struct work_struct *work)
return;
}
+ if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) {
+ bnxt_ulp_restart(bp);
+ bnxt_reenable_sriov(bp);
+ }
+
if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
bnxt_cfg_rx_mode(bp);
@@ -13417,8 +13651,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
return -ENOMEM;
hwr.stat = hwr.cp;
if (BNXT_NEW_RM(bp)) {
- hwr.cp += bnxt_get_ulp_msix_num(bp);
- hwr.stat += bnxt_get_ulp_stat_ctxs(bp);
+ hwr.cp += bnxt_get_ulp_msix_num_in_use(bp);
+ hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp);
hwr.grp = rx;
hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
}
@@ -13601,7 +13835,7 @@ static void bnxt_set_dflt_rfs(struct bnxt *bp)
bp->flags &= ~BNXT_FLAG_RFS;
if (bnxt_rfs_supported(bp)) {
dev->hw_features |= NETIF_F_NTUPLE;
- if (bnxt_rfs_capable(bp)) {
+ if (bnxt_rfs_capable(bp, false)) {
bp->flags |= BNXT_FLAG_RFS;
dev->features |= NETIF_F_NTUPLE;
}
@@ -13745,10 +13979,8 @@ static bool bnxt_fw_reset_timeout(struct bnxt *bp)
static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
{
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
- bnxt_ulp_start(bp, rc);
+ if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
bnxt_dl_health_fw_status_update(bp, false);
- }
bp->fw_reset_state = 0;
dev_close(bp->dev);
}
@@ -13779,7 +14011,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_state = 0;
netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
n);
- return;
+ goto ulp_start;
}
bnxt_queue_fw_reset_work(bp, HZ / 10);
return;
@@ -13789,7 +14021,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
bnxt_fw_reset_abort(bp, rc);
rtnl_unlock();
- return;
+ goto ulp_start;
}
bnxt_fw_reset_close(bp);
if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
@@ -13882,7 +14114,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
bnxt_fw_reset_abort(bp, rc);
rtnl_unlock();
- return;
+ goto ulp_start;
}
if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
@@ -13894,10 +14126,6 @@ static void bnxt_fw_reset_task(struct work_struct *work)
/* Make sure fw_reset_state is 0 before clearing the flag */
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- bnxt_ulp_start(bp, 0);
- bnxt_reenable_sriov(bp);
- bnxt_vf_reps_alloc(bp);
- bnxt_vf_reps_open(bp);
bnxt_ptp_reapply_pps(bp);
clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
@@ -13905,6 +14133,12 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bnxt_dl_health_fw_status_update(bp, true);
}
rtnl_unlock();
+ bnxt_ulp_start(bp, 0);
+ bnxt_reenable_sriov(bp);
+ rtnl_lock();
+ bnxt_vf_reps_alloc(bp);
+ bnxt_vf_reps_open(bp);
+ rtnl_unlock();
break;
}
return;
@@ -13920,6 +14154,8 @@ fw_reset_abort:
rtnl_lock();
bnxt_fw_reset_abort(bp, rc);
rtnl_unlock();
+ulp_start:
+ bnxt_ulp_start(bp, rc);
}
static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
@@ -14044,7 +14280,7 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
if (netif_running(dev))
bnxt_close_nic(bp, true, false);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
bnxt_set_ring_params(bp);
if (netif_running(dev))
@@ -14454,12 +14690,9 @@ static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
u16 mode;
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
-
mode = nla_get_u16(attr);
if (mode == bp->br_mode)
break;
@@ -14543,7 +14776,7 @@ static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
- stats->alloc_fail = cpr->sw_stats.rx.rx_oom_discards;
+ stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards;
}
static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
@@ -14595,12 +14828,17 @@ static void bnxt_remove_one(struct pci_dev *pdev)
if (BNXT_PF(bp))
bnxt_sriov_disable(bp);
- bnxt_rdma_aux_device_uninit(bp);
+ bnxt_rdma_aux_device_del(bp);
bnxt_ptp_clear(bp);
unregister_netdev(dev);
+
+ bnxt_rdma_aux_device_uninit(bp);
+
bnxt_free_l2_filters(bp, true);
bnxt_free_ntp_fltrs(bp, true);
+ if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
+ bnxt_clear_rss_ctxs(bp, true);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
/* Flush any pending tasks */
cancel_work_sync(&bp->sp_task);
@@ -14689,8 +14927,9 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
*max_rx = hw_resc->max_rx_rings;
*max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
- bnxt_get_ulp_msix_num(bp),
- hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
+ bnxt_get_ulp_msix_num_in_use(bp),
+ hw_resc->max_stat_ctxs -
+ bnxt_get_ulp_stat_ctxs_in_use(bp));
if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
*max_cp = min_t(int, *max_cp, max_irq);
max_ring_grps = hw_resc->max_hw_ring_grps;
@@ -14786,6 +15025,7 @@ static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
{
int dflt_rings, max_rx_rings, max_tx_rings, rc;
+ int avail_msix;
if (!bnxt_can_reserve_rings(bp))
return 0;
@@ -14813,6 +15053,14 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
+ if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
+ int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want);
+
+ bnxt_set_ulp_msix_num(bp, ulp_num_msix);
+ bnxt_set_dflt_ulp_stat_ctxs(bp);
+ }
+
rc = __bnxt_reserve_rings(bp);
if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "Unable to reserve tx rings\n");
@@ -15059,7 +15307,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp->flags |= BNXT_FLAG_CHIP_P7;
}
- rc = bnxt_alloc_rss_indir_tbl(bp);
+ rc = bnxt_alloc_rss_indir_tbl(bp, NULL);
if (rc)
goto init_err_pci_clean;
@@ -15162,6 +15410,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
+ bnxt_rdma_aux_device_init(bp);
rc = bnxt_set_dflt_rings(bp, true);
if (rc) {
if (BNXT_VF(bp) && rc == -ENODEV) {
@@ -15212,13 +15461,17 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_LIST_HEAD(&bp->usr_fltr_list);
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ bnxt_init_multi_rss_ctx(bp);
+
+
rc = register_netdev(dev);
if (rc)
goto init_err_cleanup;
bnxt_dl_fw_reporters_create(bp);
- bnxt_rdma_aux_device_init(bp);
+ bnxt_rdma_aux_device_add(bp);
bnxt_print_device_info(bp);
@@ -15226,12 +15479,15 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
init_err_cleanup:
+ bnxt_rdma_aux_device_uninit(bp);
bnxt_dl_unregister(bp);
init_err_dl:
bnxt_shutdown_tc(bp);
bnxt_clear_int_mode(bp);
init_err_pci_clean:
+ if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
+ bnxt_clear_rss_ctxs(bp, true);
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_resources(bp);
bnxt_hwmon_uninit(bp);
@@ -15286,8 +15542,9 @@ static int bnxt_suspend(struct device *device)
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
- rtnl_lock();
bnxt_ulp_stop(bp);
+
+ rtnl_lock();
if (netif_running(dev)) {
netif_device_detach(dev);
rc = bnxt_close(dev);
@@ -15342,10 +15599,10 @@ static int bnxt_resume(struct device *device)
}
resume_exit:
+ rtnl_unlock();
bnxt_ulp_start(bp, rc);
if (!rc)
bnxt_reenable_sriov(bp);
- rtnl_unlock();
return rc;
}
@@ -15371,24 +15628,36 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
+ bool abort = false;
netdev_info(netdev, "PCI I/O error detected\n");
+ bnxt_ulp_stop(bp);
+
rtnl_lock();
netif_device_detach(netdev);
- bnxt_ulp_stop(bp);
+ if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ netdev_err(bp->dev, "Firmware reset already in progress\n");
+ abort = true;
+ }
- if (state == pci_channel_io_perm_failure) {
+ if (abort || state == pci_channel_io_perm_failure) {
rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
}
- if (state == pci_channel_io_frozen)
+ /* Link is not reliable anymore if state is pci_channel_io_frozen
+ * so we disable bus master to prevent any potential bad DMAs before
+ * freeing kernel memory.
+ */
+ if (state == pci_channel_io_frozen) {
set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
+ bnxt_fw_fatal_close(bp);
+ }
if (netif_running(netdev))
- bnxt_close(netdev);
+ __bnxt_close_nic(bp, true, true);
if (pci_is_enabled(pdev))
pci_disable_device(pdev);
@@ -15419,6 +15688,10 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
netdev_info(bp->dev, "PCI Slot Reset\n");
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
+ test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
+ msleep(900);
+
rtnl_lock();
if (pci_enable_device(pdev)) {
@@ -15472,6 +15745,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
}
reset_exit:
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bnxt_clear_reservations(bp, true);
rtnl_unlock();
@@ -15498,13 +15772,13 @@ static void bnxt_io_resume(struct pci_dev *pdev)
if (!err && netif_running(netdev))
err = bnxt_open(netdev);
- bnxt_ulp_start(bp, err);
- if (!err) {
- bnxt_reenable_sriov(bp);
+ if (!err)
netif_device_attach(netdev);
- }
rtnl_unlock();
+ bnxt_ulp_start(bp, err);
+ if (!err)
+ bnxt_reenable_sriov(bp);
}
static const struct pci_error_handlers bnxt_err_handler = {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index dd849e715c9b..656ab81c0272 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1152,7 +1152,7 @@ struct bnxt_cp_ring_info {
struct bnxt_stats_mem stats;
u32 hw_stats_ctx_id;
- struct bnxt_sw_stats sw_stats;
+ struct bnxt_sw_stats *sw_stats;
struct bnxt_ring_struct cp_ring_struct;
@@ -1256,8 +1256,22 @@ struct bnxt_vnic_info {
#define BNXT_VNIC_UCAST_FLAG 8
#define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10
#define BNXT_VNIC_NTUPLE_FLAG 0x20
+#define BNXT_VNIC_RSSCTX_FLAG 0x40
+ struct bnxt_rss_ctx *rss_ctx;
+ u32 vnic_id;
};
+struct bnxt_rss_ctx {
+ struct list_head list;
+ struct bnxt_vnic_info vnic;
+ u16 *rss_indir_tbl;
+ u8 index;
+};
+
+#define BNXT_MAX_ETH_RSS_CTX 32
+#define BNXT_RSS_CTX_BMAP_LEN (BNXT_MAX_ETH_RSS_CTX + 1)
+#define BNXT_VNIC_ID_INVALID 0xffffffff
+
struct bnxt_hw_rings {
int tx;
int rx;
@@ -1360,6 +1374,7 @@ struct bnxt_filter_base {
#define BNXT_ACT_RING_DST 2
#define BNXT_ACT_FUNC_DST 4
#define BNXT_ACT_NO_AGING 8
+#define BNXT_ACT_RSS_CTX 0x10
u16 sw_id;
u16 rxq;
u16 fw_vnic_id;
@@ -1998,6 +2013,7 @@ enum board_idx {
NETXTREME_E_VF_HV,
NETXTREME_E_P5_VF,
NETXTREME_E_P5_VF_HV,
+ NETXTREME_E_P7_VF,
};
struct bnxt {
@@ -2227,6 +2243,9 @@ struct bnxt {
/* grp_info indexed by completion ring index */
struct bnxt_ring_grp_info *grp_info;
struct bnxt_vnic_info *vnic_info;
+ struct list_head rss_ctx_list;
+ unsigned long *rss_ctx_bmap;
+ u32 num_rss_ctx;
int nr_vnics;
u16 *rss_indir_tbl;
u16 rss_indir_tbl_entries;
@@ -2241,6 +2260,7 @@ struct bnxt {
#define BNXT_RSS_CAP_AH_V6_RSS_CAP BIT(5)
#define BNXT_RSS_CAP_ESP_V4_RSS_CAP BIT(6)
#define BNXT_RSS_CAP_ESP_V6_RSS_CAP BIT(7)
+#define BNXT_RSS_CAP_MULTI_RSS_CTX BIT(8)
u8 rss_hash_key[HW_HASH_KEY_SIZE];
u8 rss_hash_key_valid:1;
@@ -2284,6 +2304,7 @@ struct bnxt {
struct bnxt_irq *irq_tbl;
int total_irqs;
+ int ulp_num_msix_want;
u8 mac_addr[ETH_ALEN];
#ifdef CONFIG_BNXT_DCB
@@ -2340,6 +2361,10 @@ struct bnxt {
#define BNXT_SUPPORTS_NTUPLE_VNIC(bp) \
(BNXT_PF(bp) && ((bp)->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3))
+#define BNXT_SUPPORTS_MULTI_RSS_CTX(bp) \
+ (BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \
+ ((bp)->rss_cap & BNXT_RSS_CAP_MULTI_RSS_CTX))
+
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
u16 hwrm_cmd_kong_seq;
@@ -2416,6 +2441,7 @@ struct bnxt {
#define BNXT_LINK_CFG_CHANGE_SP_EVENT 21
#define BNXT_THERMAL_THRESHOLD_SP_EVENT 22
#define BNXT_FW_ECHO_REQUEST_SP_EVENT 23
+#define BNXT_RESTART_ULP_SP_EVENT 24
struct delayed_work fw_reset_task;
int fw_reset_state;
@@ -2693,9 +2719,16 @@ int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr);
int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr);
+int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u32 tpa_flags);
void bnxt_fill_ipv6_mask(__be32 mask[4]);
+int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx);
+void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx);
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings);
-int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
+int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ unsigned int start_rx_ring_idx,
+ unsigned int nr_rings);
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
int bnxt_nq_rings_in_use(struct bnxt *bp);
int bnxt_hwrm_set_coal(struct bnxt *);
@@ -2705,7 +2738,6 @@ unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp);
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp);
-int bnxt_get_avail_msix(struct bnxt *bp, int num);
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init);
void bnxt_tx_disable(struct bnxt *bp);
void bnxt_tx_enable(struct bnxt *bp);
@@ -2721,6 +2753,12 @@ int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
int bnxt_hwrm_func_qcaps(struct bnxt *bp);
int bnxt_hwrm_fw_set_time(struct bnxt *);
+int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
+ bool all);
+struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp);
+void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
@@ -2728,6 +2766,7 @@ void bnxt_reenable_sriov(struct bnxt *bp);
void bnxt_close_nic(struct bnxt *, bool, bool);
void bnxt_get_ring_err_stats(struct bnxt *bp,
struct bnxt_total_ring_err_stats *stats);
+bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx);
int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
u32 *reg_buf);
void bnxt_fw_exception(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index ae4529c043f0..4cb0fabf977e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -437,18 +437,20 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
+ bnxt_ulp_stop(bp);
rtnl_lock();
if (bnxt_sriov_cfg(bp)) {
NL_SET_ERR_MSG_MOD(extack,
"reload is unsupported while VFs are allocated or being configured");
rtnl_unlock();
+ bnxt_ulp_start(bp, 0);
return -EOPNOTSUPP;
}
if (bp->dev->reg_state == NETREG_UNREGISTERED) {
rtnl_unlock();
+ bnxt_ulp_start(bp, 0);
return -ENODEV;
}
- bnxt_ulp_stop(bp);
if (netif_running(bp->dev))
bnxt_close_nic(bp, true, true);
bnxt_vf_reps_free(bp);
@@ -516,7 +518,6 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
bnxt_vf_reps_alloc(bp);
if (netif_running(bp->dev))
rc = bnxt_open_nic(bp, true, true);
- bnxt_ulp_start(bp, rc);
if (!rc) {
bnxt_reenable_sriov(bp);
bnxt_ptp_reapply_pps(bp);
@@ -570,6 +571,8 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
dev_close(bp->dev);
}
rtnl_unlock();
+ if (action == DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
+ bnxt_ulp_start(bp, rc);
return rc;
}
@@ -1096,7 +1099,8 @@ static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
}
static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
struct hwrm_nvm_set_variable_input *req;
@@ -1145,7 +1149,8 @@ static int bnxt_remote_dev_reset_get(struct devlink *dl, u32 id,
}
static int bnxt_remote_dev_reset_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
int rc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 1d240a27455a..8763f8a01457 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -631,13 +631,13 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
buf[j] = sw_stats[k];
skip_tpa_ring_stats:
- sw = (u64 *)&cpr->sw_stats.rx;
+ sw = (u64 *)&cpr->sw_stats->rx;
if (is_rx_ring(bp, i)) {
for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++)
buf[j] = sw[k];
}
- sw = (u64 *)&cpr->sw_stats.cmn;
+ sw = (u64 *)&cpr->sw_stats->cmn;
for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
buf[j] = sw[k];
}
@@ -969,6 +969,8 @@ static int bnxt_set_channels(struct net_device *dev,
}
bnxt_clear_usr_fltrs(bp, true);
+ if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
+ bnxt_clear_rss_ctxs(bp, false);
if (netif_running(dev)) {
if (BNXT_PF(bp)) {
/* TODO CHIMP_FW: Send message to all VF's
@@ -1205,6 +1207,36 @@ fltr_err:
return rc;
}
+static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp,
+ u32 index)
+{
+ struct bnxt_rss_ctx *rss_ctx, *tmp;
+
+ list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list)
+ if (rss_ctx->index == index)
+ return rss_ctx;
+ return NULL;
+}
+
+static int bnxt_alloc_rss_ctx_rss_table(struct bnxt *bp,
+ struct bnxt_rss_ctx *rss_ctx)
+{
+ int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
+ struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
+
+ vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
+ vnic->rss_table = dma_alloc_coherent(&bp->pdev->dev,
+ vnic->rss_table_size,
+ &vnic->rss_table_dma_addr,
+ GFP_KERNEL);
+ if (!vnic->rss_table)
+ return -ENOMEM;
+
+ vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
+ vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
+ return 0;
+}
+
static int bnxt_add_l2_cls_rule(struct bnxt *bp,
struct ethtool_rx_flow_spec *fs)
{
@@ -1280,22 +1312,24 @@ static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec,
}
static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
- struct ethtool_rx_flow_spec *fs)
+ struct ethtool_rxnfc *cmd)
{
- u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
- u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
struct bnxt_ntuple_filter *new_fltr, *fltr;
+ u32 flow_type = fs->flow_type & 0xff;
struct bnxt_l2_filter *l2_fltr;
struct bnxt_flow_masks *fmasks;
- u32 flow_type = fs->flow_type;
struct flow_keys *fkeys;
- u32 idx;
+ u32 idx, ring;
int rc;
+ u8 vf;
if (!bp->vnic_info)
return -EAGAIN;
- if ((flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
+ vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ if ((fs->flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
return -EOPNOTSUPP;
if (flow_type == IP_USER_FLOW) {
@@ -1403,6 +1437,19 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
rcu_read_unlock();
new_fltr->base.flags = BNXT_ACT_NO_AGING;
+ if (fs->flow_type & FLOW_RSS) {
+ struct bnxt_rss_ctx *rss_ctx;
+
+ new_fltr->base.fw_vnic_id = 0;
+ new_fltr->base.flags |= BNXT_ACT_RSS_CTX;
+ rss_ctx = bnxt_get_rss_ctx_from_index(bp, cmd->rss_context);
+ if (rss_ctx) {
+ new_fltr->base.fw_vnic_id = rss_ctx->index;
+ } else {
+ rc = -EINVAL;
+ goto ntuple_err;
+ }
+ }
if (fs->ring_cookie == RX_CLS_FLOW_DISC)
new_fltr->base.flags |= BNXT_ACT_DROP;
else
@@ -1444,12 +1491,12 @@ static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
flow_type == IPV6_USER_FLOW) &&
!(bp->fw_cap & BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO))
return -EOPNOTSUPP;
- if (flow_type & (FLOW_MAC_EXT | FLOW_RSS))
+ if (flow_type & FLOW_MAC_EXT)
return -EINVAL;
flow_type &= ~FLOW_EXT;
if (fs->ring_cookie == RX_CLS_FLOW_DISC && flow_type != ETHER_FLOW)
- return bnxt_add_ntuple_cls_rule(bp, fs);
+ return bnxt_add_ntuple_cls_rule(bp, cmd);
ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
@@ -1463,7 +1510,7 @@ static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (flow_type == ETHER_FLOW)
rc = bnxt_add_l2_cls_rule(bp, fs);
else
- rc = bnxt_add_ntuple_cls_rule(bp, fs);
+ rc = bnxt_add_ntuple_cls_rule(bp, cmd);
return rc;
}
@@ -1754,7 +1801,10 @@ static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
static int bnxt_get_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh)
{
+ u32 rss_context = rxfh->rss_context;
+ struct bnxt_rss_ctx *rss_ctx = NULL;
struct bnxt *bp = netdev_priv(dev);
+ u16 *indir_tbl = bp->rss_indir_tbl;
struct bnxt_vnic_info *vnic;
u32 i, tbl_size;
@@ -1764,10 +1814,18 @@ static int bnxt_get_rxfh(struct net_device *dev,
return 0;
vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
- if (rxfh->indir && bp->rss_indir_tbl) {
+ if (rxfh->rss_context) {
+ rss_ctx = bnxt_get_rss_ctx_from_index(bp, rss_context);
+ if (!rss_ctx)
+ return -EINVAL;
+ indir_tbl = rss_ctx->rss_indir_tbl;
+ vnic = &rss_ctx->vnic;
+ }
+
+ if (rxfh->indir && indir_tbl) {
tbl_size = bnxt_get_rxfh_indir_size(dev);
for (i = 0; i < tbl_size; i++)
- rxfh->indir[i] = bp->rss_indir_tbl[i];
+ rxfh->indir[i] = indir_tbl[i];
}
if (rxfh->key && vnic->rss_hash_key)
@@ -1776,6 +1834,136 @@ static int bnxt_get_rxfh(struct net_device *dev,
return 0;
}
+static void bnxt_modify_rss(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
+ struct ethtool_rxfh_param *rxfh)
+{
+ if (rxfh->key) {
+ if (rss_ctx) {
+ memcpy(rss_ctx->vnic.rss_hash_key, rxfh->key,
+ HW_HASH_KEY_SIZE);
+ } else {
+ memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE);
+ bp->rss_hash_key_updated = true;
+ }
+ }
+ if (rxfh->indir) {
+ u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
+ u16 *indir_tbl = bp->rss_indir_tbl;
+
+ if (rss_ctx)
+ indir_tbl = rss_ctx->rss_indir_tbl;
+ for (i = 0; i < tbl_size; i++)
+ indir_tbl[i] = rxfh->indir[i];
+ pad = bp->rss_indir_tbl_entries - tbl_size;
+ if (pad)
+ memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
+ }
+}
+
+static int bnxt_set_rxfh_context(struct bnxt *bp,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ u32 *rss_context = &rxfh->rss_context;
+ struct bnxt_rss_ctx *rss_ctx;
+ struct bnxt_vnic_info *vnic;
+ bool modify = false;
+ int bit_id;
+ int rc;
+
+ if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) {
+ NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!netif_running(bp->dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to set RSS contexts when interface is down");
+ return -EAGAIN;
+ }
+
+ if (*rss_context != ETH_RXFH_CONTEXT_ALLOC) {
+ rss_ctx = bnxt_get_rss_ctx_from_index(bp, *rss_context);
+ if (!rss_ctx) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "RSS context %u not found",
+ *rss_context);
+ return -EINVAL;
+ }
+ if (*rss_context && rxfh->rss_delete) {
+ bnxt_del_one_rss_ctx(bp, rss_ctx, true);
+ return 0;
+ }
+ modify = true;
+ vnic = &rss_ctx->vnic;
+ goto modify_context;
+ }
+
+ if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Out of RSS contexts, maximum %u",
+ BNXT_MAX_ETH_RSS_CTX);
+ return -EINVAL;
+ }
+
+ if (!bnxt_rfs_capable(bp, true)) {
+ NL_SET_ERR_MSG_MOD(extack, "Out hardware resources");
+ return -ENOMEM;
+ }
+
+ rss_ctx = bnxt_alloc_rss_ctx(bp);
+ if (!rss_ctx)
+ return -ENOMEM;
+
+ vnic = &rss_ctx->vnic;
+ vnic->flags |= BNXT_VNIC_RSSCTX_FLAG;
+ vnic->vnic_id = BNXT_VNIC_ID_INVALID;
+ rc = bnxt_alloc_rss_ctx_rss_table(bp, rss_ctx);
+ if (rc)
+ goto out;
+
+ rc = bnxt_alloc_rss_indir_tbl(bp, rss_ctx);
+ if (rc)
+ goto out;
+
+ bnxt_set_dflt_rss_indir_tbl(bp, rss_ctx);
+ memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE);
+
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VNIC");
+ goto out;
+ }
+
+ rc = bnxt_hwrm_vnic_set_tpa(bp, vnic, bp->flags & BNXT_FLAG_TPA);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA");
+ goto out;
+ }
+modify_context:
+ bnxt_modify_rss(bp, rss_ctx, rxfh);
+
+ if (modify)
+ return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
+
+ rc = __bnxt_setup_vnic_p5(bp, vnic);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA");
+ goto out;
+ }
+
+ bit_id = bitmap_find_free_region(bp->rss_ctx_bmap,
+ BNXT_RSS_CTX_BMAP_LEN, 0);
+ if (bit_id < 0) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ rss_ctx->index = (u16)bit_id;
+ *rss_context = rss_ctx->index;
+
+ return 0;
+out:
+ bnxt_del_one_rss_ctx(bp, rss_ctx, true);
+ return rc;
+}
+
static int bnxt_set_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
@@ -1786,20 +1974,11 @@ static int bnxt_set_rxfh(struct net_device *dev,
if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (rxfh->key) {
- memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE);
- bp->rss_hash_key_updated = true;
- }
+ if (rxfh->rss_context)
+ return bnxt_set_rxfh_context(bp, rxfh, extack);
- if (rxfh->indir) {
- u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
+ bnxt_modify_rss(bp, NULL, rxfh);
- for (i = 0; i < tbl_size; i++)
- bp->rss_indir_tbl[i] = rxfh->indir[i];
- pad = bp->rss_indir_tbl_entries - tbl_size;
- if (pad)
- memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
- }
bnxt_clear_usr_fltrs(bp, false);
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
@@ -4641,6 +4820,14 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (!bp->num_tests || !BNXT_PF(bp))
return;
+
+ if (etest->flags & ETH_TEST_FL_OFFLINE &&
+ bnxt_ulp_registered(bp->edev)) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ netdev_warn(dev, "Offline tests cannot be run with RoCE driver loaded\n");
+ return;
+ }
+
memset(buf, 0, sizeof(u64) * bp->num_tests);
if (!netif_running(dev)) {
etest->flags |= ETH_TEST_FL_FAILED;
@@ -4671,7 +4858,6 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (!offline) {
bnxt_run_fw_tests(bp, test_mask, &test_results);
} else {
- bnxt_ulp_stop(bp);
bnxt_close_nic(bp, true, false);
bnxt_run_fw_tests(bp, test_mask, &test_results);
@@ -4682,7 +4868,6 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (rc) {
bnxt_hwrm_mac_loopback(bp, false);
etest->flags |= ETH_TEST_FL_FAILED;
- bnxt_ulp_start(bp, rc);
return;
}
if (bnxt_run_loopback(bp))
@@ -4709,7 +4894,6 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
bnxt_hwrm_phy_loopback(bp, false, false);
bnxt_half_close_nic(bp);
rc = bnxt_open_nic(bp, true, true);
- bnxt_ulp_start(bp, rc);
}
if (rc || bnxt_test_irq(bp)) {
buf[BNXT_IRQ_TEST_IDX] = 1;
@@ -5071,6 +5255,7 @@ void bnxt_ethtool_free(struct bnxt *bp)
const struct ethtool_ops bnxt_ethtool_ops = {
.cap_link_lanes_supported = 1,
+ .cap_rss_ctx_supported = 1,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USECS_IRQ |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index e957abd704db..06ea86c80be1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -468,6 +468,10 @@ struct cmd_nums {
#define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL
#define HWRM_TF_IF_TBL_SET 0x2feUL
#define HWRM_TF_IF_TBL_GET 0x2ffUL
+ #define HWRM_TF_RESC_USAGE_SET 0x300UL
+ #define HWRM_TF_RESC_USAGE_QUERY 0x301UL
+ #define HWRM_TF_TBL_TYPE_ALLOC 0x302UL
+ #define HWRM_TF_TBL_TYPE_FREE 0x303UL
#define HWRM_TFC_TBL_SCOPE_QCAPS 0x380UL
#define HWRM_TFC_TBL_SCOPE_ID_ALLOC 0x381UL
#define HWRM_TFC_TBL_SCOPE_CONFIG 0x382UL
@@ -495,6 +499,7 @@ struct cmd_nums {
#define HWRM_TFC_IF_TBL_SET 0x398UL
#define HWRM_TFC_IF_TBL_GET 0x399UL
#define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
+ #define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL
#define HWRM_SV 0x400UL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
@@ -604,8 +609,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 3
-#define HWRM_VERSION_RSVD 15
-#define HWRM_VERSION_STR "1.10.3.15"
+#define HWRM_VERSION_RSVD 39
+#define HWRM_VERSION_STR "1.10.3.39"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -1328,8 +1333,9 @@ struct hwrm_async_event_cmpl_error_report_base {
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
};
/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
@@ -1478,6 +1484,30 @@ struct hwrm_async_event_cmpl_error_report_thermal {
#define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING
};
+/* hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
+};
+
/* hwrm_func_reset_input (size:192b/24B) */
struct hwrm_func_reset_input {
__le16 req_type;
@@ -1781,6 +1811,9 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED 0x100000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT2_UDCC_SUPPORTED 0x200000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_SO_TXTIME_SUPPORTED 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_INGRESS_NIC_FLOW_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_LPBK_STATS_SUPPORTED 0x2000000UL
__le16 tunnel_disable_flag;
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
@@ -1791,10 +1824,8 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL
__le16 xid_partition_cap;
- #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_KTLS_TKC 0x1UL
- #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_KTLS_RKC 0x2UL
- #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_QUIC_TKC 0x4UL
- #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_QUIC_RKC 0x8UL
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_TX_CK 0x1UL
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_RX_CK 0x2UL
u8 device_serial_number[8];
__le16 ctxs_per_partition;
u8 unused_2[2];
@@ -1844,6 +1875,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED 0x1000UL
#define FUNC_QCFG_RESP_FLAGS_MULTI_ROOT 0x2000UL
#define FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV 0x4000UL
+ #define FUNC_QCFG_RESP_FLAGS_ROCE_VNIC_ID_VALID 0x8000UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1955,7 +1987,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_DB_PAGE_SIZE_2MB 0x9UL
#define FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB 0xaUL
#define FUNC_QCFG_RESP_DB_PAGE_SIZE_LAST FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB
- u8 unused_2[2];
+ __le16 roce_vnic_id;
__le32 partition_min_bw;
#define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_SFT 0
@@ -2003,6 +2035,8 @@ struct hwrm_func_qcfg_output {
__le32 roce_max_srq_per_vf;
__le32 roce_max_gid_per_vf;
__le16 xid_partition_cfg;
+ #define FUNC_QCFG_RESP_XID_PARTITION_CFG_TX_CK 0x1UL
+ #define FUNC_QCFG_RESP_XID_PARTITION_CFG_RX_CK 0x2UL
u8 unused_7;
u8 valid;
};
@@ -2229,10 +2263,8 @@ struct hwrm_func_cfg_input {
__le32 roce_max_srq_per_vf;
__le32 roce_max_gid_per_vf;
__le16 xid_partition_cfg;
- #define FUNC_CFG_REQ_XID_PARTITION_CFG_KTLS_TKC 0x1UL
- #define FUNC_CFG_REQ_XID_PARTITION_CFG_KTLS_RKC 0x2UL
- #define FUNC_CFG_REQ_XID_PARTITION_CFG_QUIC_TKC 0x4UL
- #define FUNC_CFG_REQ_XID_PARTITION_CFG_QUIC_RKC 0x8UL
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_TX_CK 0x1UL
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_RX_CK 0x2UL
__le16 unused_2;
};
@@ -2416,6 +2448,7 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL
#define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL
#define FUNC_DRV_RGTR_REQ_FLAGS_ASYM_QUEUE_CFG_SUPPORT 0x400UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_TF_INGRESS_NIC_FLOW_MODE 0x800UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -3636,19 +3669,22 @@ struct hwrm_func_backing_store_cfg_v2_input {
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RKC 0x14UL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
__le16 instance;
__le32 flags;
#define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL
@@ -3707,17 +3743,22 @@ struct hwrm_func_backing_store_qcfg_v2_input {
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RX_CK 0x14UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION_TABLE 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
__le16 instance;
@@ -3740,15 +3781,18 @@ struct hwrm_func_backing_store_qcfg_v2_output {
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RKC 0x14UL
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_TKC 0x1aUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_RKC 0x1bUL
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
__le16 instance;
__le32 flags;
__le64 page_dir;
@@ -3841,19 +3885,22 @@ struct hwrm_func_backing_store_qcaps_v2_input {
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_KTLS_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_KTLS_RKC 0x14UL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_TKC 0x1aUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_RKC 0x1bUL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
u8 rsvd[6];
};
@@ -3873,19 +3920,22 @@ struct hwrm_func_backing_store_qcaps_v2_output {
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_KTLS_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_KTLS_RKC 0x14UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_TKC 0x1aUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_RKC 0x1bUL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
__le16 entry_size;
__le32 flags;
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
@@ -3990,6 +4040,7 @@ struct hwrm_func_drv_if_change_output {
__le32 flags;
#define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL
#define FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE 0x2UL
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE 0x4UL
u8 unused_0[3];
u8 valid;
};
@@ -4472,7 +4523,11 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPDD (0x18UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP112 (0x1eUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFPDD (0x1fUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP (0x20UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP
__le16 fec_cfg;
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
@@ -7380,7 +7435,7 @@ struct hwrm_cfa_l2_filter_free_output {
u8 valid;
};
-/* hwrm_cfa_l2_filter_cfg_input (size:320b/40B) */
+/* hwrm_cfa_l2_filter_cfg_input (size:384b/48B) */
struct hwrm_cfa_l2_filter_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -7399,12 +7454,22 @@ struct hwrm_cfa_l2_filter_cfg_input {
#define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
#define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
#define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_BYPASS_LKUP (0x1UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP (0x2UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP
__le32 enables;
#define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
#define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_PROF_FUNC 0x4UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_L2_CONTEXT_ID 0x8UL
__le64 l2_filter_id;
__le32 dst_id;
__le32 new_mirror_vnic_id;
+ __le32 prof_func;
+ __le32 l2_context_id;
};
/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
@@ -8466,7 +8531,15 @@ struct hwrm_tunnel_dst_port_query_input {
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
u8 tunnel_next_proto;
u8 unused_0[6];
};
@@ -8514,7 +8587,15 @@ struct hwrm_tunnel_dst_port_alloc_input {
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
u8 tunnel_next_proto;
__be16 tunnel_dst_port_val;
u8 unused_0[4];
@@ -8565,7 +8646,15 @@ struct hwrm_tunnel_dst_port_free_input {
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
u8 tunnel_next_proto;
__le16 tunnel_dst_port_id;
u8 unused_0[4];
@@ -8860,7 +8949,7 @@ struct hwrm_stat_generic_qstats_output {
u8 valid;
};
-/* generic_sw_hw_stats (size:1408b/176B) */
+/* generic_sw_hw_stats (size:1472b/184B) */
struct generic_sw_hw_stats {
__le64 pcie_statistics_tx_tlp;
__le64 pcie_statistics_rx_tlp;
@@ -8884,6 +8973,7 @@ struct generic_sw_hw_stats {
__le64 hw_db_recov_dbs_dropped;
__le64 hw_db_recov_drops_serviced;
__le64 hw_db_recov_dbs_recovered;
+ __le64 hw_db_recov_oo_drop_count;
};
/* hwrm_fw_reset_input (size:192b/24B) */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index cc07660330f5..e661ab154d6b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -109,7 +109,8 @@ static void bnxt_ptp_get_current_time(struct bnxt *bp)
spin_unlock_bh(&ptp->ptp_lock);
}
-static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
+static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts,
+ u32 txts_tmo)
{
struct hwrm_port_ts_query_output *resp;
struct hwrm_port_ts_query_input *req;
@@ -122,10 +123,15 @@ static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
req->flags = cpu_to_le32(flags);
if ((flags & PORT_TS_QUERY_REQ_FLAGS_PATH) ==
PORT_TS_QUERY_REQ_FLAGS_PATH_TX) {
+ u32 tmo_us = txts_tmo * 1000;
+
req->enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES);
req->ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid);
req->ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off);
- req->ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT);
+ if (!tmo_us)
+ tmo_us = BNXT_PTP_QTS_TIMEOUT;
+ tmo_us = min(tmo_us, BNXT_PTP_QTS_MAX_TMO_US);
+ req->ts_req_timeout = cpu_to_le16(tmo_us);
}
resp = hwrm_req_hold(bp, req);
@@ -672,10 +678,17 @@ static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
struct skb_shared_hwtstamps timestamp;
+ unsigned long now = jiffies;
u64 ts = 0, ns = 0;
+ u32 tmo = 0;
int rc;
- rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts);
+ if (!ptp->txts_pending)
+ ptp->abs_txts_tmo = now + msecs_to_jiffies(ptp->txts_tmo);
+ if (!time_after_eq(now, ptp->abs_txts_tmo))
+ tmo = jiffies_to_msecs(ptp->abs_txts_tmo - now);
+ rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts,
+ tmo);
if (!rc) {
memset(&timestamp, 0, sizeof(timestamp));
spin_lock_bh(&ptp->ptp_lock);
@@ -684,6 +697,10 @@ static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb)
timestamp.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(ptp->tx_skb, &timestamp);
} else {
+ if (!time_after_eq(jiffies, ptp->abs_txts_tmo)) {
+ ptp->txts_pending = true;
+ return;
+ }
netdev_warn_once(bp->dev,
"TS query for TX timer failed rc = %x\n", rc);
}
@@ -691,6 +708,7 @@ static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb)
dev_kfree_skb_any(ptp->tx_skb);
ptp->tx_skb = NULL;
atomic_inc(&ptp->tx_avail);
+ ptp->txts_pending = false;
}
static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
@@ -714,6 +732,8 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
spin_unlock_bh(&ptp->ptp_lock);
ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD;
}
+ if (ptp->txts_pending)
+ return 0;
return HZ;
}
@@ -891,7 +911,8 @@ int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg)
if (rc)
return rc;
} else {
- rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME, &ns);
+ rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME,
+ &ns, 0);
if (rc)
return rc;
}
@@ -965,6 +986,7 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
spin_unlock_bh(&ptp->ptp_lock);
ptp_schedule_worker(ptp->ptp_clock, 0);
}
+ ptp->txts_tmo = BNXT_PTP_DFLT_TX_TMO;
return 0;
out:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index fce8dc39a7d0..2c3415c8fc03 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -22,7 +22,9 @@
#define BNXT_LO_TIMER_MASK 0x0000ffffffffUL
#define BNXT_HI_TIMER_MASK 0xffff00000000UL
+#define BNXT_PTP_DFLT_TX_TMO 1000 /* ms */
#define BNXT_PTP_QTS_TIMEOUT 1000
+#define BNXT_PTP_QTS_MAX_TMO_US 65535U
#define BNXT_PTP_QTS_TX_ENABLES (PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID | \
PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT | \
PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET)
@@ -115,11 +117,14 @@ struct bnxt_ptp_cfg {
BNXT_PTP_MSG_PDELAY_REQ | \
BNXT_PTP_MSG_PDELAY_RESP)
u8 tx_tstamp_en:1;
+ u8 txts_pending:1;
int rx_filter;
u32 tstamp_filters;
u32 refclk_regs[2];
u32 refclk_mapped_regs[2];
+ u32 txts_tmo;
+ unsigned long abs_txts_tmo;
};
#if BITS_PER_LONG == 32
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 273c9ba48f09..d2ca90407cce 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -370,6 +370,7 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
struct bnxt_tc_flow *flow)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(tc_flow_cmd);
+ struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
struct flow_dissector *dissector = rule->match.dissector;
/* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
@@ -380,6 +381,9 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 93f9bd55020f..ba3fa1c2e5d9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -31,21 +31,74 @@ static DEFINE_IDA(bnxt_aux_dev_ids);
static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
{
struct bnxt_en_dev *edev = bp->edev;
- int num_msix, idx, i;
+ int num_msix, i;
if (!edev->ulp_tbl->msix_requested) {
netdev_warn(bp->dev, "Requested MSI-X vectors insufficient\n");
return;
}
num_msix = edev->ulp_tbl->msix_requested;
- idx = edev->ulp_tbl->msix_base;
for (i = 0; i < num_msix; i++) {
- ent[i].vector = bp->irq_tbl[idx + i].vector;
- ent[i].ring_idx = idx + i;
+ ent[i].vector = bp->irq_tbl[i].vector;
+ ent[i].ring_idx = i;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
ent[i].db_offset = bp->db_offset;
else
- ent[i].db_offset = (idx + i) * 0x80;
+ ent[i].db_offset = i * 0x80;
+ }
+}
+
+int bnxt_get_ulp_msix_num(struct bnxt *bp)
+{
+ if (bp->edev)
+ return bp->edev->ulp_num_msix_vec;
+ return 0;
+}
+
+void bnxt_set_ulp_msix_num(struct bnxt *bp, int num)
+{
+ if (bp->edev)
+ bp->edev->ulp_num_msix_vec = num;
+}
+
+int bnxt_get_ulp_msix_num_in_use(struct bnxt *bp)
+{
+ if (bnxt_ulp_registered(bp->edev))
+ return bp->edev->ulp_num_msix_vec;
+ return 0;
+}
+
+int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
+{
+ if (bp->edev)
+ return bp->edev->ulp_num_ctxs;
+ return 0;
+}
+
+void bnxt_set_ulp_stat_ctxs(struct bnxt *bp, int num_ulp_ctx)
+{
+ if (bp->edev)
+ bp->edev->ulp_num_ctxs = num_ulp_ctx;
+}
+
+int bnxt_get_ulp_stat_ctxs_in_use(struct bnxt *bp)
+{
+ if (bnxt_ulp_registered(bp->edev))
+ return bp->edev->ulp_num_ctxs;
+ return 0;
+}
+
+void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp)
+{
+ if (bp->edev) {
+ bp->edev->ulp_num_ctxs = BNXT_MIN_ROCE_STAT_CTXS;
+ /* Reserve one additional stat_ctx for PF0 (except
+ * on 1-port NICs) as it also creates one stat_ctx
+ * for PF1 in case of RoCE bonding.
+ */
+ if (BNXT_PF(bp) && !bp->pf.port_id &&
+ bp->port_count > 1)
+ bp->edev->ulp_num_ctxs++;
}
}
@@ -57,25 +110,36 @@ int bnxt_register_dev(struct bnxt_en_dev *edev,
struct bnxt *bp = netdev_priv(dev);
unsigned int max_stat_ctxs;
struct bnxt_ulp *ulp;
+ int rc = 0;
+ rtnl_lock();
+ mutex_lock(&edev->en_dev_lock);
+ if (!bp->irq_tbl) {
+ rc = -ENODEV;
+ goto exit;
+ }
max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
- bp->cp_nr_rings == max_stat_ctxs)
- return -ENOMEM;
+ bp->cp_nr_rings == max_stat_ctxs) {
+ rc = -ENOMEM;
+ goto exit;
+ }
ulp = edev->ulp_tbl;
- if (!ulp)
- return -ENOMEM;
-
ulp->handle = handle;
rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
if (test_bit(BNXT_STATE_OPEN, &bp->state))
- bnxt_hwrm_vnic_cfg(bp, 0);
+ bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[BNXT_VNIC_DEFAULT]);
+
+ edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
bnxt_fill_msix_vecs(bp, bp->edev->msix_entries);
edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
- return 0;
+exit:
+ mutex_unlock(&edev->en_dev_lock);
+ rtnl_unlock();
+ return rc;
}
EXPORT_SYMBOL(bnxt_register_dev);
@@ -87,8 +151,11 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
int i = 0;
ulp = edev->ulp_tbl;
+ rtnl_lock();
+ mutex_lock(&edev->en_dev_lock);
if (ulp->msix_requested)
edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
+ edev->ulp_tbl->msix_requested = 0;
if (ulp->max_async_event_id)
bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
@@ -101,11 +168,13 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
msleep(100);
i++;
}
+ mutex_unlock(&edev->en_dev_lock);
+ rtnl_unlock();
return;
}
EXPORT_SYMBOL(bnxt_unregister_dev);
-int bnxt_get_ulp_msix_num(struct bnxt *bp)
+static int bnxt_set_dflt_ulp_msix(struct bnxt *bp)
{
u32 roce_msix = BNXT_VF(bp) ?
BNXT_MAX_VF_ROCE_MSIX : BNXT_MAX_ROCE_MSIX;
@@ -114,29 +183,6 @@ int bnxt_get_ulp_msix_num(struct bnxt *bp)
min_t(u32, roce_msix, num_online_cpus()) : 0);
}
-int bnxt_get_ulp_msix_base(struct bnxt *bp)
-{
- if (bnxt_ulp_registered(bp->edev)) {
- struct bnxt_en_dev *edev = bp->edev;
-
- if (edev->ulp_tbl->msix_requested)
- return edev->ulp_tbl->msix_base;
- }
- return 0;
-}
-
-int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
-{
- if (bnxt_ulp_registered(bp->edev)) {
- struct bnxt_en_dev *edev = bp->edev;
-
- if (edev->ulp_tbl->msix_requested)
- return BNXT_MIN_ROCE_STAT_CTXS;
- }
-
- return 0;
-}
-
int bnxt_send_msg(struct bnxt_en_dev *edev,
struct bnxt_fw_msg *fw_msg)
{
@@ -181,6 +227,12 @@ void bnxt_ulp_stop(struct bnxt *bp)
if (!edev)
return;
+ mutex_lock(&edev->en_dev_lock);
+ if (!bnxt_ulp_registered(edev)) {
+ mutex_unlock(&edev->en_dev_lock);
+ return;
+ }
+
edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
if (aux_priv) {
struct auxiliary_device *adev;
@@ -195,6 +247,7 @@ void bnxt_ulp_stop(struct bnxt *bp)
adrv->suspend(adev, pm);
}
}
+ mutex_unlock(&edev->en_dev_lock);
}
void bnxt_ulp_start(struct bnxt *bp, int err)
@@ -210,6 +263,15 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
if (err)
return;
+ mutex_lock(&edev->en_dev_lock);
+ if (!bnxt_ulp_registered(edev)) {
+ mutex_unlock(&edev->en_dev_lock);
+ return;
+ }
+
+ if (edev->ulp_tbl->msix_requested)
+ bnxt_fill_msix_vecs(bp, edev->msix_entries);
+
if (aux_priv) {
struct auxiliary_device *adev;
@@ -222,7 +284,7 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
adrv->resume(adev);
}
}
-
+ mutex_unlock(&edev->en_dev_lock);
}
void bnxt_ulp_irq_stop(struct bnxt *bp)
@@ -306,7 +368,6 @@ void bnxt_rdma_aux_device_uninit(struct bnxt *bp)
aux_priv = bp->aux_priv;
adev = &aux_priv->aux_dev;
- auxiliary_device_delete(adev);
auxiliary_device_uninit(adev);
}
@@ -324,6 +385,14 @@ static void bnxt_aux_dev_release(struct device *dev)
bp->aux_priv = NULL;
}
+void bnxt_rdma_aux_device_del(struct bnxt *bp)
+{
+ if (!bp->edev)
+ return;
+
+ auxiliary_device_delete(&bp->aux_priv->aux_dev);
+}
+
static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
{
edev->net = bp->dev;
@@ -331,6 +400,7 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
edev->l2_db_size = bp->db_size;
edev->l2_db_size_nc = bp->db_size;
edev->l2_db_offset = bp->db_offset;
+ mutex_init(&edev->en_dev_lock);
if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
@@ -344,7 +414,23 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
edev->pf_port_id = bp->pf.port_id;
edev->en_state = bp->state;
edev->bar0 = bp->bar0;
- edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
+}
+
+void bnxt_rdma_aux_device_add(struct bnxt *bp)
+{
+ struct auxiliary_device *aux_dev;
+ int rc;
+
+ if (!bp->edev)
+ return;
+
+ aux_dev = &bp->aux_priv->aux_dev;
+ rc = auxiliary_device_add(aux_dev);
+ if (rc) {
+ netdev_warn(bp->dev, "Failed to add auxiliary device for ROCE\n");
+ auxiliary_device_uninit(aux_dev);
+ bp->flags &= ~BNXT_FLAG_ROCE_CAP;
+ }
}
void bnxt_rdma_aux_device_init(struct bnxt *bp)
@@ -392,21 +478,16 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp)
if (!edev)
goto aux_dev_uninit;
+ aux_priv->edev = edev;
+
ulp = kzalloc(sizeof(*ulp), GFP_KERNEL);
if (!ulp)
goto aux_dev_uninit;
edev->ulp_tbl = ulp;
- aux_priv->edev = edev;
bp->edev = edev;
bnxt_set_edev_info(edev, bp);
-
- rc = auxiliary_device_add(aux_dev);
- if (rc) {
- netdev_warn(bp->dev,
- "Failed to add auxiliary device for ROCE\n");
- goto aux_dev_uninit;
- }
+ bp->ulp_num_msix_want = bnxt_set_dflt_ulp_msix(bp);
return;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index b9e73de14b57..4eafe6ec0abf 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -46,7 +46,6 @@ struct bnxt_ulp {
unsigned long *async_events_bmap;
u16 max_async_event_id;
u16 msix_requested;
- u16 msix_base;
atomic_t ref_count;
};
@@ -86,18 +85,28 @@ struct bnxt_en_dev {
* updated in resume.
*/
void __iomem *bar0;
+
+ u16 ulp_num_msix_vec;
+ u16 ulp_num_ctxs;
+
+ /* serialize ulp operations */
+ struct mutex en_dev_lock;
};
static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev)
{
- if (edev && edev->ulp_tbl)
+ if (edev && rcu_access_pointer(edev->ulp_tbl->ulp_ops))
return true;
return false;
}
int bnxt_get_ulp_msix_num(struct bnxt *bp);
-int bnxt_get_ulp_msix_base(struct bnxt *bp);
+int bnxt_get_ulp_msix_num_in_use(struct bnxt *bp);
+void bnxt_set_ulp_msix_num(struct bnxt *bp, int num);
int bnxt_get_ulp_stat_ctxs(struct bnxt *bp);
+void bnxt_set_ulp_stat_ctxs(struct bnxt *bp, int num_ctxs);
+int bnxt_get_ulp_stat_ctxs_in_use(struct bnxt *bp);
+void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp);
void bnxt_ulp_stop(struct bnxt *bp);
void bnxt_ulp_start(struct bnxt *bp, int err);
void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
@@ -105,6 +114,8 @@ void bnxt_ulp_irq_stop(struct bnxt *bp);
void bnxt_ulp_irq_restart(struct bnxt *bp, int err);
void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl);
void bnxt_rdma_aux_device_uninit(struct bnxt *bp);
+void bnxt_rdma_aux_device_del(struct bnxt *bp);
+void bnxt_rdma_aux_device_add(struct bnxt *bp);
void bnxt_rdma_aux_device_init(struct bnxt *bp);
int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt_ulp_ops *ulp_ops,
void *handle);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 4079538bc310..345681d5007e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -197,7 +197,7 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir);
xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
- xdp_prepare_buff(xdp, data_ptr - offset, offset, len, false);
+ xdp_prepare_buff(xdp, data_ptr - offset, offset, len, true);
}
void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
@@ -222,7 +222,7 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
* false - packet should be passed to the stack.
*/
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
- struct xdp_buff xdp, struct page *page, u8 **data_ptr,
+ struct xdp_buff *xdp, struct page *page, u8 **data_ptr,
unsigned int *len, u8 *event)
{
struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
@@ -244,9 +244,9 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
txr = rxr->bnapi->tx_ring[0];
/* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
- orig_data = xdp.data;
+ orig_data = xdp->data;
- act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
tx_avail = bnxt_tx_avail(bp, txr);
/* If the tx ring is not full, we must not update the rx producer yet
@@ -255,10 +255,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
if (tx_avail != bp->tx_ring_size)
*event &= ~BNXT_RX_EVENT;
- *len = xdp.data_end - xdp.data;
- if (orig_data != xdp.data) {
- offset = xdp.data - xdp.data_hard_start;
- *data_ptr = xdp.data_hard_start + offset;
+ *len = xdp->data_end - xdp->data;
+ if (orig_data != xdp->data) {
+ offset = xdp->data - xdp->data_hard_start;
+ *data_ptr = xdp->data_hard_start + offset;
}
switch (act) {
@@ -270,8 +270,8 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
mapping = rx_buf->mapping - bp->rx_dma_offset;
*event &= BNXT_TX_CMP_EVENT;
- if (unlikely(xdp_buff_has_frags(&xdp))) {
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp);
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
tx_needed += sinfo->nr_frags;
*event = BNXT_AGG_EVENT;
@@ -279,7 +279,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
if (tx_avail < tx_needed) {
trace_xdp_exception(bp->dev, xdp_prog, act);
- bnxt_xdp_buff_frags_free(rxr, &xdp);
+ bnxt_xdp_buff_frags_free(rxr, xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
}
@@ -289,7 +289,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
*event |= BNXT_TX_EVENT;
__bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
- NEXT_RX(rxr->rx_prod), &xdp);
+ NEXT_RX(rxr->rx_prod), xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
case XDP_REDIRECT:
@@ -306,12 +306,12 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
/* if we are unable to allocate a new buffer, abort and reuse */
if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
trace_xdp_exception(bp->dev, xdp_prog, act);
- bnxt_xdp_buff_frags_free(rxr, &xdp);
+ bnxt_xdp_buff_frags_free(rxr, xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
}
- if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) {
+ if (xdp_do_redirect(bp->dev, xdp, xdp_prog)) {
trace_xdp_exception(bp->dev, xdp_prog, act);
page_pool_recycle_direct(rxr->page_pool, page);
return true;
@@ -326,7 +326,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
trace_xdp_exception(bp->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
- bnxt_xdp_buff_frags_free(rxr, &xdp);
+ bnxt_xdp_buff_frags_free(rxr, xdp);
bnxt_reuse_rx_data(rxr, cons, page);
break;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 5e412c5655ba..0122782400b8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -18,7 +18,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct xdp_buff *xdp);
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget);
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
- struct xdp_buff xdp, struct page *page, u8 **data_ptr,
+ struct xdp_buff *xdp, struct page *page, u8 **data_ptr,
unsigned int *len, u8 *event);
int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 3d63177e7e52..c2b4188a1ef1 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -3682,7 +3682,8 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
#if defined(CONFIG_INET)
struct rtable *rt;
- rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
+ rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0,
+ RT_SCOPE_UNIVERSE);
if (!IS_ERR(rt)) {
*dst = &rt->dst;
return 0;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 7396e2823e32..c7e7dac057a3 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) controller driver
*
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet: " fmt
@@ -2467,14 +2467,18 @@ static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
{
u32 reg;
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- if (reg & CMD_SW_RESET)
+ if (reg & CMD_SW_RESET) {
+ spin_unlock_bh(&priv->reg_lock);
return;
+ }
if (enable)
reg |= mask;
else
reg &= ~mask;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
/* UniMAC stops on a packet boundary, wait for a full-size packet
* to be processed
@@ -2490,8 +2494,10 @@ static void reset_umac(struct bcmgenet_priv *priv)
udelay(10);
/* issue soft reset and disable MAC while updating its registers */
+ spin_lock_bh(&priv->reg_lock);
bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
udelay(2);
+ spin_unlock_bh(&priv->reg_lock);
}
static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
@@ -3280,7 +3286,7 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
}
/* Returns a reusable dma control register value */
-static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
+static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv, bool flush_rx)
{
unsigned int i;
u32 reg;
@@ -3305,6 +3311,14 @@ static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
udelay(10);
bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
+ if (flush_rx) {
+ reg = bcmgenet_rbuf_ctrl_get(priv);
+ bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0));
+ udelay(10);
+ bcmgenet_rbuf_ctrl_set(priv, reg);
+ udelay(10);
+ }
+
return dma_ctrl;
}
@@ -3326,7 +3340,9 @@ static void bcmgenet_netif_start(struct net_device *dev)
struct bcmgenet_priv *priv = netdev_priv(dev);
/* Start the network engine */
+ netif_addr_lock_bh(dev);
bcmgenet_set_rx_mode(dev);
+ netif_addr_unlock_bh(dev);
bcmgenet_enable_rx_napi(priv);
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
@@ -3368,8 +3384,8 @@ static int bcmgenet_open(struct net_device *dev)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
- /* Disable RX/TX DMA and flush TX queues */
- dma_ctrl = bcmgenet_dma_disable(priv);
+ /* Disable RX/TX DMA and flush TX and RX queues */
+ dma_ctrl = bcmgenet_dma_disable(priv, true);
/* Reinitialize TDMA and RDMA and SW housekeeping */
ret = bcmgenet_init_dma(priv);
@@ -3587,16 +3603,19 @@ static void bcmgenet_set_rx_mode(struct net_device *dev)
* 3. The number of filters needed exceeds the number filters
* supported by the hardware.
*/
+ spin_lock(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
(nfilter > MAX_MDF_FILTER)) {
reg |= CMD_PROMISC;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock(&priv->reg_lock);
bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
return;
} else {
reg &= ~CMD_PROMISC;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock(&priv->reg_lock);
}
/* update MDF filter */
@@ -3995,6 +4014,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
goto err;
}
+ spin_lock_init(&priv->reg_lock);
spin_lock_init(&priv->lock);
/* Set default pause parameters */
@@ -4235,7 +4255,7 @@ static int bcmgenet_resume(struct device *d)
bcmgenet_hfb_create_rxnfc_filter(priv, rule);
/* Disable RX/TX DMA and flush TX queues */
- dma_ctrl = bcmgenet_dma_disable(priv);
+ dma_ctrl = bcmgenet_dma_disable(priv, false);
/* Reinitialize TDMA and RDMA and SW housekeeping */
ret = bcmgenet_init_dma(priv);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 7523b60b3c1c..43b923c48b14 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
*/
#ifndef __BCMGENET_H__
@@ -573,6 +573,8 @@ struct bcmgenet_rxnfc_rule {
/* device context */
struct bcmgenet_priv {
void __iomem *base;
+ /* reg_lock: lock to serialize access to shared registers */
+ spinlock_t reg_lock;
enum bcmgenet_version version;
struct net_device *dev;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 7a41cad5788f..1248792d7fd4 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
*
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet_wol: " fmt
@@ -151,6 +151,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
}
/* Can't suspend with WoL if MAC is still in reset */
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
if (reg & CMD_SW_RESET)
reg &= ~CMD_SW_RESET;
@@ -158,6 +159,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
/* disable RX */
reg &= ~CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
mdelay(10);
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
@@ -203,6 +205,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
}
/* Enable CRC forward */
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
priv->crc_fwd_en = 1;
reg |= CMD_CRC_FWD;
@@ -210,6 +213,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
/* Receiver must be enabled for WOL MP detection */
reg |= CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
reg = UMAC_IRQ_MPD_R;
if (hfb_enable)
@@ -256,7 +260,9 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
}
/* Disable CRC Forward */
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~CMD_CRC_FWD;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 9ada89355747..c4a3698cef66 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET MDIO routines
*
- * Copyright (c) 2014-2017 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
*/
#include <linux/acpi.h>
@@ -76,6 +76,7 @@ static void bcmgenet_mac_config(struct net_device *dev)
reg |= RGMII_LINK;
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
CMD_HD_EN |
@@ -88,6 +89,7 @@ static void bcmgenet_mac_config(struct net_device *dev)
reg |= CMD_TX_EN | CMD_RX_EN;
}
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
active = phy_init_eee(phydev, 0) >= 0;
bcmgenet_eee_enable_set(dev,
@@ -275,6 +277,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
* block for the interface to work, unconditionally clear the
* Out-of-band disable since we do not need it.
*/
+ mutex_lock(&phydev->lock);
reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
reg &= ~OOB_DISABLE;
if (priv->ext_phy) {
@@ -286,6 +289,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
reg |= RGMII_MODE_EN;
}
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ mutex_unlock(&phydev->lock);
if (init)
dev_info(kdev, "configuring instance for %s\n", phy_name);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 62ff4381ac83..1589a49b876c 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -4019,7 +4019,7 @@ static int tg3_power_up(struct tg3 *tp)
static int tg3_setup_phy(struct tg3 *, bool);
-static int tg3_power_down_prepare(struct tg3 *tp)
+static void tg3_power_down_prepare(struct tg3 *tp)
{
u32 misc_host_ctrl;
bool device_should_wake, do_low_power;
@@ -4263,7 +4263,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
- return 0;
+ return;
}
static void tg3_power_down(struct tg3 *tp)
@@ -14295,7 +14295,7 @@ static void tg3_set_rx_mode(struct net_device *dev)
static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
int new_mtu)
{
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (new_mtu > ETH_DATA_LEN) {
if (tg3_flag(tp, 5780_CLASS)) {
@@ -18084,7 +18084,6 @@ static int tg3_suspend(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct tg3 *tp = netdev_priv(dev);
- int err = 0;
rtnl_lock();
@@ -18108,32 +18107,11 @@ static int tg3_suspend(struct device *device)
tg3_flag_clear(tp, INIT_COMPLETE);
tg3_full_unlock(tp);
- err = tg3_power_down_prepare(tp);
- if (err) {
- int err2;
-
- tg3_full_lock(tp, 0);
-
- tg3_flag_set(tp, INIT_COMPLETE);
- err2 = tg3_restart_hw(tp, true);
- if (err2)
- goto out;
-
- tg3_timer_start(tp);
-
- netif_device_attach(dev);
- tg3_netif_start(tp);
-
-out:
- tg3_full_unlock(tp);
-
- if (!err2)
- tg3_phy_start(tp);
- }
+ tg3_power_down_prepare(tp);
unlock:
rtnl_unlock();
- return err;
+ return 0;
}
static int tg3_resume(struct device *device)
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index c32174484a96..fe121d36112d 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3276,7 +3276,7 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
mutex_lock(&bnad->conf_mutex);
mtu = netdev->mtu;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
frame = BNAD_FRAME_SIZE(mtu);
new_frame = BNAD_FRAME_SIZE(new_mtu);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 7246e13dd559..97291bfbeea5 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -312,7 +312,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
void *kern_buf;
/* Copy the user space buf */
- kern_buf = memdup_user(buf, nbytes);
+ kern_buf = memdup_user_nul(buf, nbytes);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
@@ -372,7 +372,7 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
void *kern_buf;
/* Copy the user space buf */
- kern_buf = memdup_user(buf, nbytes);
+ kern_buf = memdup_user_nul(buf, nbytes);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 898debfd4db3..241ce9a2fa99 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -3022,7 +3022,7 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu)
if (netif_running(dev))
return -EBUSY;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 5e97f1e4e38e..a71b320fd030 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1358,7 +1358,7 @@ static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
/* Bring interface down, change mtu and bring interface back up */
xgmac_stop(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return xgmac_open(dev);
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index f38d31bfab1b..674c54831875 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -1262,7 +1262,7 @@ int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
}
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
lio->mtu = new_mtu;
WRITE_ONCE(sc->caller_is_done, true);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
index aa6c0dfb6f1c..96c6ea12279f 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
@@ -218,7 +218,7 @@ lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu)
return -EIO;
}
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 007d4b06819e..744f2434f7fa 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -649,7 +649,7 @@ static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
struct octeon_mgmt *p = netdev_priv(netdev);
int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
/* HW lifts the limit if the frame is VLAN tagged
* (+4 bytes per each tag, up to two tags)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index eff350e0bc2a..aebb9fef3f6e 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1589,7 +1589,7 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
}
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (!netif_running(netdev))
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index d2286adf09fe..7d7d3e0098df 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -844,7 +844,7 @@ static int t1_change_mtu(struct net_device *dev, int new_mtu)
return -EOPNOTSUPP;
if ((ret = mac->ops->set_mtu(mac, new_mtu)))
return ret;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 2236f1d35f2b..f92a3550e480 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -2559,7 +2559,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
return ret;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
init_port_mtus(adapter);
if (adapter->params.rev == 0 && offload_running(adapter))
t3_load_mtus(adapter, adapter->params.mtus,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 2eb33a727bba..2418645c8823 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3180,7 +3180,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
pi->viid_mirror, new_mtu, -1, -1, -1, -1, true);
if (!ret)
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return ret;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 72ac4a34424b..69d045d769c4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -305,7 +305,7 @@ static void cxgb4_process_flow_match(struct net_device *dev,
fs->mask.iport = ~0;
}
-static int cxgb4_validate_flow_match(struct net_device *dev,
+static int cxgb4_validate_flow_match(struct netlink_ext_ack *extack,
struct flow_rule *rule)
{
struct flow_dissector *dissector = rule->match.dissector;
@@ -321,11 +321,15 @@ static int cxgb4_validate_flow_match(struct net_device *dev,
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
BIT_ULL(FLOW_DISSECTOR_KEY_IP))) {
- netdev_warn(dev, "Unsupported key used: 0x%llx\n",
- dissector->used_keys);
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported key used: 0x%llx",
+ dissector->used_keys);
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -339,13 +343,15 @@ static int cxgb4_validate_flow_match(struct net_device *dev,
struct flow_match_ip match;
if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) {
- netdev_err(dev, "IP Key supported only with IPv4/v6");
+ NL_SET_ERR_MSG_MOD(extack,
+ "IP Key supported only with IPv4/v6");
return -EINVAL;
}
flow_rule_match_ip(rule, &match);
if (match.mask->ttl) {
- netdev_warn(dev, "ttl match unsupported for offload");
+ NL_SET_ERR_MSG_MOD(extack,
+ "ttl match unsupported for offload");
return -EOPNOTSUPP;
}
}
@@ -576,7 +582,7 @@ static bool valid_l4_mask(u32 mask)
return hi && lo ? false : true;
}
-static bool valid_pedit_action(struct net_device *dev,
+static bool valid_pedit_action(struct netlink_ext_ack *extack,
const struct flow_action_entry *act,
u8 *natmode_flags)
{
@@ -595,8 +601,7 @@ static bool valid_pedit_action(struct net_device *dev,
case PEDIT_ETH_SMAC_47_16:
break;
default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit field");
return false;
}
break;
@@ -609,8 +614,7 @@ static bool valid_pedit_action(struct net_device *dev,
*natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit field");
return false;
}
break;
@@ -629,8 +633,7 @@ static bool valid_pedit_action(struct net_device *dev,
*natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit field");
return false;
}
break;
@@ -638,8 +641,8 @@ static bool valid_pedit_action(struct net_device *dev,
switch (offset) {
case PEDIT_TCP_SPORT_DPORT:
if (!valid_l4_mask(~mask)) {
- netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported mask for TCP L4 ports");
return false;
}
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
@@ -648,8 +651,7 @@ static bool valid_pedit_action(struct net_device *dev,
*natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
break;
default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit field");
return false;
}
break;
@@ -657,8 +659,8 @@ static bool valid_pedit_action(struct net_device *dev,
switch (offset) {
case PEDIT_UDP_SPORT_DPORT:
if (!valid_l4_mask(~mask)) {
- netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported mask for UDP L4 ports");
return false;
}
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
@@ -667,13 +669,12 @@ static bool valid_pedit_action(struct net_device *dev,
*natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
break;
default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit field");
return false;
}
break;
default:
- netdev_err(dev, "%s: Unsupported pedit type\n", __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit type");
return false;
}
return true;
@@ -727,8 +728,7 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
* the provided output port is not valid
*/
if (!found) {
- netdev_err(dev, "%s: Out port invalid\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Out port invalid");
return -EINVAL;
}
act_redir = true;
@@ -745,21 +745,21 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
case FLOW_ACTION_VLAN_PUSH:
case FLOW_ACTION_VLAN_MANGLE:
if (proto != ETH_P_8021Q) {
- netdev_err(dev, "%s: Unsupported vlan proto\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported vlan proto");
return -EOPNOTSUPP;
}
break;
default:
- netdev_err(dev, "%s: Unsupported vlan action\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported vlan action");
return -EOPNOTSUPP;
}
act_vlan = true;
}
break;
case FLOW_ACTION_MANGLE: {
- bool pedit_valid = valid_pedit_action(dev, act,
+ bool pedit_valid = valid_pedit_action(extack, act,
&natmode_flags);
if (!pedit_valid)
@@ -771,14 +771,14 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
/* Do nothing. cxgb4_set_filter will validate */
break;
default:
- netdev_err(dev, "%s: Unsupported action\n", __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
return -EOPNOTSUPP;
}
}
if ((act_pedit || act_vlan) && !act_redir) {
- netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack,
+ "pedit/vlan rewrite invalid without egress redirect");
return -EINVAL;
}
@@ -864,7 +864,7 @@ int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule,
if (cxgb4_validate_flow_actions(dev, &rule->action, extack, 0))
return -EOPNOTSUPP;
- if (cxgb4_validate_flow_match(dev, rule))
+ if (cxgb4_validate_flow_match(extack, rule))
return -EOPNOTSUPP;
cxgb4_process_flow_match(dev, rule, fs);
@@ -901,8 +901,7 @@ int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule,
init_completion(&ctx.completion);
ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
if (ret) {
- netdev_err(dev, "%s: filter creation err %d\n",
- __func__, ret);
+ NL_SET_ERR_MSG_FMT_MOD(extack, "filter creation err %d", ret);
return ret;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 17faac715882..5c13bcb4550d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -406,7 +406,7 @@ free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info)
for (i = 0; i < nq; i++) {
struct sge_uld_txq *txq = &txq_info->uldtxq[i];
- if (txq && txq->q.desc) {
+ if (txq->q.desc) {
tasklet_kill(&txq->qresume_tsk);
t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
txq->q.cntxt_id);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 49d5808b7d11..de52bcb884c4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2670,12 +2670,12 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
lb->loopback = 1;
q = &adap->sge.ethtxq[pi->first_qset];
- __netif_tx_lock(q->txq, smp_processor_id());
+ __netif_tx_lock_bh(q->txq);
reclaim_completed_tx(adap, &q->q, -1, true);
credits = txq_avail(&q->q) - ndesc;
if (unlikely(credits < 0)) {
- __netif_tx_unlock(q->txq);
+ __netif_tx_unlock_bh(q->txq);
return -ENOMEM;
}
@@ -2710,7 +2710,7 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
init_completion(&lb->completion);
txq_advance(&q->q, ndesc);
cxgb4_ring_tx_db(adap, &q->q, ndesc);
- __netif_tx_unlock(q->txq);
+ __netif_tx_unlock_bh(q->txq);
/* wait for the pkt to return */
ret = wait_for_completion_timeout(&lb->completion, 10 * HZ);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 9ba0864592e8..2fbe0f059a0b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1169,7 +1169,7 @@ static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
-1, -1, -1, -1, true);
if (!ret)
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return ret;
}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index 6482728794dd..e8e460a92e0e 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -10,6 +10,7 @@
#include <net/ipv6.h>
#include <linux/netdevice.h>
#include <crypto/aes.h>
+#include <linux/skbuff_ref.h>
#include "chcr_ktls.h"
static LIST_HEAD(uld_ctx_list);
diff --git a/drivers/net/ethernet/chelsio/libcxgb/Makefile b/drivers/net/ethernet/chelsio/libcxgb/Makefile
index aa79264e72ba..fbedc31674b3 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/Makefile
+++ b/drivers/net/ethernet/chelsio/libcxgb/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../cxgb4
+ccflags-y := -I $(src)/../cxgb4
obj-$(CONFIG_CHELSIO_LIB) += libcxgb.o
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index d266a87297a5..f604119efc80 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2027,7 +2027,7 @@ static int _enic_change_mtu(struct net_device *netdev, int new_mtu)
return err;
}
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (running) {
err = enic_open(netdev);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 12a83fa1302d..9f6089e81608 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -146,23 +146,19 @@ EXPORT_SYMBOL(vnic_dev_get_res);
static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size)
{
- /* The base address of the desc rings must be 512 byte aligned.
- * Descriptor count is aligned to groups of 32 descriptors. A
- * count of 0 means the maximum 4096 descriptors. Descriptor
- * size is aligned to 16 bytes.
- */
-
- unsigned int count_align = 32;
- unsigned int desc_align = 16;
- ring->base_align = 512;
+ /* Descriptor ring base address alignment in bytes*/
+ ring->base_align = VNIC_DESC_BASE_ALIGN;
+ /* A count of 0 means the maximum descriptors */
if (desc_count == 0)
- desc_count = 4096;
+ desc_count = VNIC_DESC_MAX_COUNT;
- ring->desc_count = ALIGN(desc_count, count_align);
+ /* Descriptor count aligned in groups of VNIC_DESC_COUNT_ALIGN descriptors */
+ ring->desc_count = ALIGN(desc_count, VNIC_DESC_COUNT_ALIGN);
- ring->desc_size = ALIGN(desc_size, desc_align);
+ /* Descriptor size alignment in bytes */
+ ring->desc_size = ALIGN(desc_size, VNIC_DESC_SIZE_ALIGN);
ring->size = ring->desc_count * ring->desc_size;
ring->size_unaligned = ring->size + ring->base_align;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index 6273794b923b..7fdd8c661c99 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -31,6 +31,11 @@ static inline void writeq(u64 val, void __iomem *reg)
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define VNIC_DESC_SIZE_ALIGN 16
+#define VNIC_DESC_COUNT_ALIGN 32
+#define VNIC_DESC_BASE_ALIGN 512
+#define VNIC_DESC_MAX_COUNT 4096
+
enum vnic_dev_intr_mode {
VNIC_DEV_INTR_MODE_UNKNOWN,
VNIC_DEV_INTR_MODE_INTX,
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 705c3eb19cd3..5f0c9e1771db 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1107,10 +1107,13 @@ static void gmac_tx_irq_enable(struct net_device *netdev,
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
struct gemini_ethernet *geth = port->geth;
+ unsigned long flags;
u32 val, mask;
netdev_dbg(netdev, "%s device %d\n", __func__, netdev->dev_id);
+ spin_lock_irqsave(&geth->irq_lock, flags);
+
mask = GMAC0_IRQ0_TXQ0_INTS << (6 * netdev->dev_id + txq);
if (en)
@@ -1119,6 +1122,8 @@ static void gmac_tx_irq_enable(struct net_device *netdev,
val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
val = en ? val | mask : val & ~mask;
writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
}
static void gmac_tx_irq(struct net_device *netdev, unsigned int txq_num)
@@ -1415,15 +1420,19 @@ static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget)
union gmac_rxdesc_3 word3;
struct page *page = NULL;
unsigned int page_offs;
+ unsigned long flags;
unsigned short r, w;
union dma_rwptr rw;
dma_addr_t mapping;
int frag_nr = 0;
+ spin_lock_irqsave(&geth->irq_lock, flags);
rw.bits32 = readl(ptr_reg);
/* Reset interrupt as all packages until here are taken into account */
writel(DEFAULT_Q0_INT_BIT << netdev->dev_id,
geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+
r = rw.bits.rptr;
w = rw.bits.wptr;
@@ -1726,10 +1735,9 @@ static irqreturn_t gmac_irq(int irq, void *data)
gmac_update_hw_stats(netdev);
if (val & (GMAC0_RX_OVERRUN_INT_BIT << (netdev->dev_id * 8))) {
+ spin_lock(&geth->irq_lock);
writel(GMAC0_RXDERR_INT_BIT << (netdev->dev_id * 8),
geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
-
- spin_lock(&geth->irq_lock);
u64_stats_update_begin(&port->ir_stats_syncp);
++port->stats.rx_fifo_errors;
u64_stats_update_end(&port->ir_stats_syncp);
@@ -1978,7 +1986,7 @@ static int gmac_change_mtu(struct net_device *netdev, int new_mtu)
gmac_disable_tx_rx(netdev);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
gmac_update_config0_reg(netdev, max_len << CONFIG0_MAXLEN_SHIFT,
CONFIG0_MAXLEN_MASK);
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index aaf0eda96292..8af5ecec7d61 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -708,7 +708,7 @@ static int change_mtu(struct net_device *dev, int new_mtu)
{
if (netif_running(dev))
return -EBUSY;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index ad862ed7888a..a8596ebcdfd6 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4982,10 +4982,7 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
-
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
mode = nla_get_u16(attr);
if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 4b15af6b7122..44da335d66bd 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -1587,7 +1587,7 @@ static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
length = __le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_LENGTH_MASK;
xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN);
- xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(entry->xdp);
/* RX metadata with timestamps is in front of actual data,
* subtract metadata size to get length of actual data and
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 003bc9a45c65..1047c805054e 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -1092,7 +1092,7 @@ static int ftmac100_change_mtu(struct net_device *netdev, int mtu)
}
iowrite32(maccr, priv->base + FTMAC100_OFFSET_MACCR);
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
return 0;
}
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index dcbc598b11c6..baa0b3c2ce6f 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2995,7 +2995,7 @@ static int dpaa_change_mtu(struct net_device *net_dev, int new_mtu)
if (priv->xdp_prog && !xdp_validate_mtu(priv, new_mtu))
return -EINVAL;
- net_dev->mtu = new_mtu;
+ WRITE_ONCE(net_dev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 888509cf1f21..6866807973da 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -2698,7 +2698,7 @@ static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
return err;
out:
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -2896,11 +2896,14 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
static int update_xps(struct dpaa2_eth_priv *priv)
{
struct net_device *net_dev = priv->net_dev;
- struct cpumask xps_mask;
- struct dpaa2_eth_fq *fq;
int i, num_queues, netdev_queues;
+ struct dpaa2_eth_fq *fq;
+ cpumask_var_t xps_mask;
int err = 0;
+ if (!alloc_cpumask_var(&xps_mask, GFP_KERNEL))
+ return -ENOMEM;
+
num_queues = dpaa2_eth_queue_count(priv);
netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
@@ -2910,16 +2913,17 @@ static int update_xps(struct dpaa2_eth_priv *priv)
for (i = 0; i < netdev_queues; i++) {
fq = &priv->fq[i % num_queues];
- cpumask_clear(&xps_mask);
- cpumask_set_cpu(fq->target_cpu, &xps_mask);
+ cpumask_clear(xps_mask);
+ cpumask_set_cpu(fq->target_cpu, xps_mask);
- err = netif_set_xps_queue(net_dev, &xps_mask, i);
+ err = netif_set_xps_queue(net_dev, xps_mask, i);
if (err) {
netdev_warn_once(net_dev, "Error setting XPS queue\n");
break;
}
}
+ free_cpumask_var(xps_mask);
return err;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
index b6a534a3e0b1..701a87370737 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
@@ -33,6 +33,9 @@ static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls,
acl_h = &acl_key->match;
acl_m = &acl_key->mask;
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -548,6 +551,9 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index f3543a2df68d..a71f848adc05 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -590,7 +590,7 @@ static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu)
return err;
}
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
return 0;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
index 051748b997f3..a466c2379146 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
@@ -55,7 +55,7 @@ static u32 dpaa2_xsk_run_xdp(struct dpaa2_eth_priv *priv,
xdp_set_data_meta_invalid(xdp_buff);
xdp_buff->rxq = &ch->xdp_rxq;
- xsk_buff_dma_sync_for_cpu(xdp_buff, ch->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(xdp_buff);
xdp_act = bpf_prog_run_xdp(xdp_prog, xdp_buff);
/* xdp.data pointer may have changed */
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 9f07f4947b63..5c45f42232d3 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -2769,7 +2769,7 @@ static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog,
if (priv->min_num_stack_tx_queues + num_xdp_tx_queues >
priv->num_tx_rings) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Reserving %d XDP TXQs does not leave a minimum of %d for stack (total %d)",
+ "Reserving %d XDP TXQs leaves under %d for stack (total %d)",
num_xdp_tx_queues,
priv->min_num_stack_tx_queues,
priv->num_tx_rings);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index d7693fdf640d..a72d8a2eb0b3 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2454,8 +2454,6 @@ static int fec_enet_mii_probe(struct net_device *ndev)
fep->link = 0;
fep->full_duplex = 0;
- phy_dev->mac_managed_pm = true;
-
phy_attached_info(phy_dev);
return 0;
@@ -2467,10 +2465,12 @@ static int fec_enet_mii_init(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
bool suppress_preamble = false;
+ struct phy_device *phydev;
struct device_node *node;
int err = -ENXIO;
u32 mii_speed, holdtime;
u32 bus_freq;
+ int addr;
/*
* The i.MX28 dual fec interfaces are not equal.
@@ -2584,6 +2584,13 @@ static int fec_enet_mii_init(struct platform_device *pdev)
goto err_out_free_mdiobus;
of_node_put(node);
+ /* find all the PHY devices on the bus and set mac_managed_pm to true */
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+ phydev = mdiobus_get_phy(fep->mii_bus, addr);
+ if (phydev)
+ phydev->mac_managed_pm = true;
+ }
+
mii_cnt++;
/* save fec0 mii_bus */
@@ -3667,29 +3674,6 @@ fec_set_mac_address(struct net_device *ndev, void *p)
return 0;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * fec_poll_controller - FEC Poll controller function
- * @dev: The FEC network adapter
- *
- * Polled functionality used by netconsole and others in non interrupt mode
- *
- */
-static void fec_poll_controller(struct net_device *dev)
-{
- int i;
- struct fec_enet_private *fep = netdev_priv(dev);
-
- for (i = 0; i < FEC_IRQ_NUM; i++) {
- if (fep->irq[i] > 0) {
- disable_irq(fep->irq[i]);
- fec_enet_interrupt(fep->irq[i], dev);
- enable_irq(fep->irq[i]);
- }
- }
-}
-#endif
-
static inline void fec_enet_set_netdev_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -3996,9 +3980,6 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_tx_timeout = fec_timeout,
.ndo_set_mac_address = fec_set_mac_address,
.ndo_eth_ioctl = phy_do_ioctl_running,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = fec_poll_controller,
-#endif
.ndo_set_features = fec_set_features,
.ndo_bpf = fec_enet_bpf,
.ndo_xdp_xmit = fec_enet_xdp_xmit,
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 758535adc9ff..92b8f4ab26f1 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -267,7 +267,6 @@ struct memac_cfg {
bool reset_on_init;
bool pause_ignore;
bool promiscuous_mode_enable;
- struct fixed_phy_status *fixed_link;
u16 max_frame_length;
u16 pause_quanta;
u32 tx_ipg_length;
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c
index f557d68e5b76..1ed245a2ee01 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.c
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.c
@@ -12,7 +12,6 @@
struct muram_info {
struct gen_pool *pool;
void __iomem *vbase;
- size_t size;
phys_addr_t pbase;
};
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index a811238c018d..2baef59f741d 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2026,7 +2026,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
if (dev->flags & IFF_UP)
stop_gfar(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (dev->flags & IFF_UP)
startup_gfar(dev);
diff --git a/drivers/net/ethernet/fujitsu/Kconfig b/drivers/net/ethernet/fujitsu/Kconfig
index 0a1400cb410a..06a28bce5d27 100644
--- a/drivers/net/ethernet/fujitsu/Kconfig
+++ b/drivers/net/ethernet/fujitsu/Kconfig
@@ -18,7 +18,7 @@ if NET_VENDOR_FUJITSU
config PCMCIA_FMVJ18X
tristate "Fujitsu FMV-J18x PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
select CRC32
help
Say Y here if you intend to attach a Fujitsu FMV-J18x or compatible
diff --git a/drivers/net/ethernet/fungible/funeth/Makefile b/drivers/net/ethernet/fungible/funeth/Makefile
index 646d69595b4f..d51e4c2b4a1a 100644
--- a/drivers/net/ethernet/fungible/funeth/Makefile
+++ b/drivers/net/ethernet/fungible/funeth/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
-ccflags-y += -I$(srctree)/$(src)/../funcore -I$(srctree)/$(src)
+ccflags-y += -I$(src)/../funcore -I$(src)
obj-$(CONFIG_FUN_ETH) += funeth.o
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_main.c b/drivers/net/ethernet/fungible/funeth/funeth_main.c
index df86770731ad..ac86179a0a81 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_main.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_main.c
@@ -927,7 +927,7 @@ static int fun_change_mtu(struct net_device *netdev, int new_mtu)
rc = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MTU, new_mtu);
if (!rc)
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
return rc;
}
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 4814c96d5fe7..ae1e21c9b0a5 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -50,6 +50,10 @@
/* PTYPEs are always 10 bits. */
#define GVE_NUM_PTYPES 1024
+/* Default minimum ring size */
+#define GVE_DEFAULT_MIN_TX_RING_SIZE 256
+#define GVE_DEFAULT_MIN_RX_RING_SIZE 512
+
#define GVE_DEFAULT_RX_BUFFER_SIZE 2048
#define GVE_MAX_RX_BUFFER_SIZE 4096
@@ -63,7 +67,6 @@
#define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
#define DQO_QPL_DEFAULT_TX_PAGES 512
-#define DQO_QPL_DEFAULT_RX_PAGES 2048
/* Maximum TSO size supported on DQO */
#define GVE_DQO_TX_MAX 0x3FFFF
@@ -607,6 +610,7 @@ struct gve_notify_block {
struct gve_priv *priv;
struct gve_tx_ring *tx; /* tx rings on this block */
struct gve_rx_ring *rx; /* rx rings on this block */
+ u32 irq;
};
/* Tracks allowed and current queue settings */
@@ -621,11 +625,6 @@ struct gve_qpl_config {
unsigned long *qpl_id_map; /* bitmap of used qpl ids */
};
-struct gve_options_dqo_rda {
- u16 tx_comp_ring_entries; /* number of tx_comp descriptors */
- u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
-};
-
struct gve_irq_db {
__be32 index;
} ____cacheline_aligned;
@@ -639,28 +638,10 @@ struct gve_ptype_lut {
struct gve_ptype ptypes[GVE_NUM_PTYPES];
};
-/* Parameters for allocating queue page lists */
-struct gve_qpls_alloc_cfg {
- struct gve_qpl_config *qpl_cfg;
- struct gve_queue_config *tx_cfg;
- struct gve_queue_config *rx_cfg;
-
- u16 num_xdp_queues;
- bool raw_addressing;
- bool is_gqi;
-
- /* Allocated resources are returned here */
- struct gve_queue_page_list *qpls;
-};
-
/* Parameters for allocating resources for tx queues */
struct gve_tx_alloc_rings_cfg {
struct gve_queue_config *qcfg;
- /* qpls and qpl_cfg must already be allocated */
- struct gve_queue_page_list *qpls;
- struct gve_qpl_config *qpl_cfg;
-
u16 ring_size;
u16 start_idx;
u16 num_rings;
@@ -676,10 +657,6 @@ struct gve_rx_alloc_rings_cfg {
struct gve_queue_config *qcfg;
struct gve_queue_config *qcfg_tx;
- /* qpls and qpl_cfg must already be allocated */
- struct gve_queue_page_list *qpls;
- struct gve_qpl_config *qpl_cfg;
-
u16 ring_size;
u16 packet_buffer_size;
bool raw_addressing;
@@ -705,7 +682,6 @@ struct gve_priv {
struct net_device *dev;
struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
- struct gve_queue_page_list *qpls; /* array of num qpls */
struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
dma_addr_t irq_db_indices_bus;
@@ -718,9 +694,13 @@ struct gve_priv {
u16 num_event_counters;
u16 tx_desc_cnt; /* num desc per ring */
u16 rx_desc_cnt; /* num desc per ring */
+ u16 max_tx_desc_cnt;
+ u16 max_rx_desc_cnt;
+ u16 min_tx_desc_cnt;
+ u16 min_rx_desc_cnt;
+ bool modify_ring_size_enabled;
+ bool default_min_ring_size;
u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
- u16 rx_pages_per_qpl; /* Suggested number of pages per qpl for RX queues by NIC */
- u16 rx_data_slot_cnt; /* rx buffer length */
u64 max_registered_pages;
u64 num_registered_pages; /* num pages registered with NIC */
struct bpf_prog *xdp_prog; /* XDP BPF program */
@@ -730,7 +710,6 @@ struct gve_priv {
u16 num_xdp_queues;
struct gve_queue_config tx_cfg;
struct gve_queue_config rx_cfg;
- struct gve_qpl_config qpl_cfg; /* map used QPL ids */
u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
@@ -792,7 +771,6 @@ struct gve_priv {
u64 link_speed;
bool up_before_suspend; /* True if dev was up before suspend */
- struct gve_options_dqo_rda options_dqo_rda;
struct gve_ptype_lut *ptype_lut_dqo;
/* Must be a power of two. */
@@ -1027,7 +1005,6 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
return priv->tx_cfg.max_queues + rx_qid;
}
-/* Returns the index into priv->qpls where a certain rx queue's QPL resides */
static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
{
return tx_cfg->max_queues + rx_qid;
@@ -1038,41 +1015,17 @@ static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
return gve_tx_qpl_id(priv, 0);
}
-/* Returns the index into priv->qpls where the first rx queue's QPL resides */
static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
{
return gve_get_rx_qpl_id(tx_cfg, 0);
}
-/* Returns a pointer to the next available tx qpl in the list of qpls */
-static inline
-struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_tx_alloc_rings_cfg *cfg,
- int tx_qid)
-{
- /* QPL already in use */
- if (test_bit(tx_qid, cfg->qpl_cfg->qpl_id_map))
- return NULL;
- set_bit(tx_qid, cfg->qpl_cfg->qpl_id_map);
- return &cfg->qpls[tx_qid];
-}
-
-/* Returns a pointer to the next available rx qpl in the list of qpls */
-static inline
-struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_rx_alloc_rings_cfg *cfg,
- int rx_qid)
+static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)
{
- int id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx_qid);
- /* QPL already in use */
- if (test_bit(id, cfg->qpl_cfg->qpl_id_map))
- return NULL;
- set_bit(id, cfg->qpl_cfg->qpl_id_map);
- return &cfg->qpls[id];
-}
-
-/* Unassigns the qpl with the given id */
-static inline void gve_unassign_qpl(struct gve_qpl_config *qpl_cfg, int id)
-{
- clear_bit(id, qpl_cfg->qpl_id_map);
+ /* For DQO, page count should be more than ring size for
+ * out-of-order completions. Set it to two times of ring size.
+ */
+ return 2 * rx_desc_cnt;
}
/* Returns the correct dma direction for tx and rx qpls */
@@ -1115,6 +1068,12 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev,
enum dma_data_direction, gfp_t gfp_flags);
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
enum dma_data_direction);
+/* qpls */
+struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
+ u32 id, int pages);
+void gve_free_queue_page_list(struct gve_priv *priv,
+ struct gve_queue_page_list *qpl,
+ u32 id);
/* tx handling */
netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
@@ -1137,6 +1096,12 @@ bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
int gve_rx_poll(struct gve_notify_block *block, int budget);
bool gve_rx_work_pending(struct gve_rx_ring *rx);
+int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx);
+void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg);
int gve_rx_alloc_rings(struct gve_priv *priv);
int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg);
@@ -1150,6 +1115,12 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
/* Reset */
void gve_schedule_reset(struct gve_priv *priv);
int gve_reset(struct gve_priv *priv, bool attempt_teardown);
+void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
+int gve_adjust_config(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
int gve_adjust_queues(struct gve_priv *priv,
struct gve_queue_config new_rx_config,
struct gve_queue_config new_tx_config);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index ae12ac38e18b..8ca0def176ef 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -32,6 +32,8 @@ struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *desc
return option_end > descriptor_end ? NULL : (struct gve_device_option *)option_end;
}
+#define GVE_DEVICE_OPTION_NO_MIN_RING_SIZE 8
+
static
void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_descriptor *device_descriptor,
@@ -41,7 +43,8 @@ void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_option_dqo_rda **dev_op_dqo_rda,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
- struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
+ struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
+ struct gve_device_option_modify_ring **dev_op_modify_ring)
{
u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
u16 option_length = be16_to_cpu(option->option_length);
@@ -165,6 +168,27 @@ void gve_parse_device_option(struct gve_priv *priv,
"Buffer Sizes");
*dev_op_buffer_sizes = (void *)(option + 1);
break;
+ case GVE_DEV_OPT_ID_MODIFY_RING:
+ if (option_length < GVE_DEVICE_OPTION_NO_MIN_RING_SIZE ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "Modify Ring", (int)sizeof(**dev_op_modify_ring),
+ GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_modify_ring)) {
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT, "Modify Ring");
+ }
+
+ *dev_op_modify_ring = (void *)(option + 1);
+
+ /* device has not provided min ring size */
+ if (option_length == GVE_DEVICE_OPTION_NO_MIN_RING_SIZE)
+ priv->default_min_ring_size = true;
+ break;
default:
/* If we don't recognize the option just continue
* without doing anything.
@@ -183,7 +207,8 @@ gve_process_device_options(struct gve_priv *priv,
struct gve_device_option_dqo_rda **dev_op_dqo_rda,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
- struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
+ struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
+ struct gve_device_option_modify_ring **dev_op_modify_ring)
{
const int num_options = be16_to_cpu(descriptor->num_device_options);
struct gve_device_option *dev_opt;
@@ -204,7 +229,8 @@ gve_process_device_options(struct gve_priv *priv,
gve_parse_device_option(priv, descriptor, dev_opt,
dev_op_gqi_rda, dev_op_gqi_qpl,
dev_op_dqo_rda, dev_op_jumbo_frames,
- dev_op_dqo_qpl, dev_op_buffer_sizes);
+ dev_op_dqo_qpl, dev_op_buffer_sizes,
+ dev_op_modify_ring);
dev_opt = next_opt;
}
@@ -565,6 +591,7 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
cpu_to_be64(tx->q_resources_bus),
.tx_ring_addr = cpu_to_be64(tx->bus),
.ntfy_id = cpu_to_be32(tx->ntfy_id),
+ .tx_ring_size = cpu_to_be16(priv->tx_desc_cnt),
};
if (gve_is_gqi(priv)) {
@@ -573,24 +600,17 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
} else {
- u16 comp_ring_size;
u32 qpl_id = 0;
- if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
+ if (priv->queue_format == GVE_DQO_RDA_FORMAT)
qpl_id = GVE_RAW_ADDRESSING_QPL_ID;
- comp_ring_size =
- priv->options_dqo_rda.tx_comp_ring_entries;
- } else {
+ else
qpl_id = tx->dqo.qpl->id;
- comp_ring_size = priv->tx_desc_cnt;
- }
cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
- cmd.create_tx_queue.tx_ring_size =
- cpu_to_be16(priv->tx_desc_cnt);
cmd.create_tx_queue.tx_comp_ring_addr =
cpu_to_be64(tx->complq_bus_dqo);
cmd.create_tx_queue.tx_comp_ring_size =
- cpu_to_be16(comp_ring_size);
+ cpu_to_be16(priv->tx_desc_cnt);
}
return gve_adminq_issue_cmd(priv, &cmd);
@@ -610,63 +630,73 @@ int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_que
return gve_adminq_kick_and_wait(priv);
}
-static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
+static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
+ union gve_adminq_command *cmd,
+ u32 queue_index)
{
struct gve_rx_ring *rx = &priv->rx[queue_index];
- union gve_adminq_command cmd;
- memset(&cmd, 0, sizeof(cmd));
- cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
- cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
+ cmd->create_rx_queue = (struct gve_adminq_create_rx_queue) {
.queue_id = cpu_to_be32(queue_index),
.ntfy_id = cpu_to_be32(rx->ntfy_id),
.queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
+ .rx_ring_size = cpu_to_be16(priv->rx_desc_cnt),
};
if (gve_is_gqi(priv)) {
u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id;
- cmd.create_rx_queue.rx_desc_ring_addr =
- cpu_to_be64(rx->desc.bus),
- cmd.create_rx_queue.rx_data_ring_addr =
- cpu_to_be64(rx->data.data_bus),
- cmd.create_rx_queue.index = cpu_to_be32(queue_index);
- cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
- cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
+ cmd->create_rx_queue.rx_desc_ring_addr =
+ cpu_to_be64(rx->desc.bus);
+ cmd->create_rx_queue.rx_data_ring_addr =
+ cpu_to_be64(rx->data.data_bus);
+ cmd->create_rx_queue.index = cpu_to_be32(queue_index);
+ cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
+ cmd->create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
} else {
- u16 rx_buff_ring_entries;
u32 qpl_id = 0;
- if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
+ if (priv->queue_format == GVE_DQO_RDA_FORMAT)
qpl_id = GVE_RAW_ADDRESSING_QPL_ID;
- rx_buff_ring_entries =
- priv->options_dqo_rda.rx_buff_ring_entries;
- } else {
+ else
qpl_id = rx->dqo.qpl->id;
- rx_buff_ring_entries = priv->rx_desc_cnt;
- }
- cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
- cmd.create_rx_queue.rx_ring_size =
- cpu_to_be16(priv->rx_desc_cnt);
- cmd.create_rx_queue.rx_desc_ring_addr =
+ cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
+ cmd->create_rx_queue.rx_desc_ring_addr =
cpu_to_be64(rx->dqo.complq.bus);
- cmd.create_rx_queue.rx_data_ring_addr =
+ cmd->create_rx_queue.rx_data_ring_addr =
cpu_to_be64(rx->dqo.bufq.bus);
- cmd.create_rx_queue.packet_buffer_size =
+ cmd->create_rx_queue.packet_buffer_size =
cpu_to_be16(priv->data_buffer_size_dqo);
- cmd.create_rx_queue.rx_buff_ring_size =
- cpu_to_be16(rx_buff_ring_entries);
- cmd.create_rx_queue.enable_rsc =
+ cmd->create_rx_queue.rx_buff_ring_size =
+ cpu_to_be16(priv->rx_desc_cnt);
+ cmd->create_rx_queue.enable_rsc =
!!(priv->dev->features & NETIF_F_LRO);
if (priv->header_split_enabled)
- cmd.create_rx_queue.header_buffer_size =
+ cmd->create_rx_queue.header_buffer_size =
cpu_to_be16(priv->header_buf_size);
}
+}
+
+static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
+{
+ union gve_adminq_command cmd;
+ gve_adminq_get_create_rx_queue_cmd(priv, &cmd, queue_index);
return gve_adminq_issue_cmd(priv, &cmd);
}
+/* Unlike gve_adminq_create_rx_queue, this actually rings the doorbell */
+int gve_adminq_create_single_rx_queue(struct gve_priv *priv, u32 queue_index)
+{
+ union gve_adminq_command cmd;
+
+ gve_adminq_get_create_rx_queue_cmd(priv, &cmd, queue_index);
+ return gve_adminq_execute_cmd(priv, &cmd);
+}
+
int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
{
int err;
@@ -713,22 +743,31 @@ int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_qu
return gve_adminq_kick_and_wait(priv);
}
+static void gve_adminq_make_destroy_rx_queue_cmd(union gve_adminq_command *cmd,
+ u32 queue_index)
+{
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
+ cmd->destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) {
+ .queue_id = cpu_to_be32(queue_index),
+ };
+}
+
static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
{
union gve_adminq_command cmd;
- int err;
- memset(&cmd, 0, sizeof(cmd));
- cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
- cmd.destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) {
- .queue_id = cpu_to_be32(queue_index),
- };
+ gve_adminq_make_destroy_rx_queue_cmd(&cmd, queue_index);
+ return gve_adminq_issue_cmd(priv, &cmd);
+}
- err = gve_adminq_issue_cmd(priv, &cmd);
- if (err)
- return err;
+/* Unlike gve_adminq_destroy_rx_queue, this actually rings the doorbell */
+int gve_adminq_destroy_single_rx_queue(struct gve_priv *priv, u32 queue_index)
+{
+ union gve_adminq_command cmd;
- return 0;
+ gve_adminq_make_destroy_rx_queue_cmd(&cmd, queue_index);
+ return gve_adminq_execute_cmd(priv, &cmd);
}
int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
@@ -745,31 +784,17 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
return gve_adminq_kick_and_wait(priv);
}
-static int gve_set_desc_cnt(struct gve_priv *priv,
- struct gve_device_descriptor *descriptor)
-{
- priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
- priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
- return 0;
-}
-
-static int
-gve_set_desc_cnt_dqo(struct gve_priv *priv,
- const struct gve_device_descriptor *descriptor,
- const struct gve_device_option_dqo_rda *dev_op_dqo_rda)
+static void gve_set_default_desc_cnt(struct gve_priv *priv,
+ const struct gve_device_descriptor *descriptor)
{
priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
- if (priv->queue_format == GVE_DQO_QPL_FORMAT)
- return 0;
-
- priv->options_dqo_rda.tx_comp_ring_entries =
- be16_to_cpu(dev_op_dqo_rda->tx_comp_ring_entries);
- priv->options_dqo_rda.rx_buff_ring_entries =
- be16_to_cpu(dev_op_dqo_rda->rx_buff_ring_entries);
-
- return 0;
+ /* set default ranges */
+ priv->max_tx_desc_cnt = priv->tx_desc_cnt;
+ priv->max_rx_desc_cnt = priv->rx_desc_cnt;
+ priv->min_tx_desc_cnt = priv->tx_desc_cnt;
+ priv->min_rx_desc_cnt = priv->rx_desc_cnt;
}
static void gve_enable_supported_features(struct gve_priv *priv,
@@ -779,7 +804,9 @@ static void gve_enable_supported_features(struct gve_priv *priv,
const struct gve_device_option_dqo_qpl
*dev_op_dqo_qpl,
const struct gve_device_option_buffer_sizes
- *dev_op_buffer_sizes)
+ *dev_op_buffer_sizes,
+ const struct gve_device_option_modify_ring
+ *dev_op_modify_ring)
{
/* Before control reaches this point, the page-size-capped max MTU from
* the gve_device_descriptor field has already been stored in
@@ -796,12 +823,8 @@ static void gve_enable_supported_features(struct gve_priv *priv,
if (dev_op_dqo_qpl) {
priv->tx_pages_per_qpl =
be16_to_cpu(dev_op_dqo_qpl->tx_pages_per_qpl);
- priv->rx_pages_per_qpl =
- be16_to_cpu(dev_op_dqo_qpl->rx_pages_per_qpl);
if (priv->tx_pages_per_qpl == 0)
priv->tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES;
- if (priv->rx_pages_per_qpl == 0)
- priv->rx_pages_per_qpl = DQO_QPL_DEFAULT_RX_PAGES;
}
if (dev_op_buffer_sizes &&
@@ -814,12 +837,33 @@ static void gve_enable_supported_features(struct gve_priv *priv,
"BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n",
priv->max_rx_buffer_size, priv->header_buf_size);
}
+
+ /* Read and store ring size ranges given by device */
+ if (dev_op_modify_ring &&
+ (supported_features_mask & GVE_SUP_MODIFY_RING_MASK)) {
+ priv->modify_ring_size_enabled = true;
+
+ /* max ring size for DQO QPL should not be overwritten because of device limit */
+ if (priv->queue_format != GVE_DQO_QPL_FORMAT) {
+ priv->max_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_rx_ring_size);
+ priv->max_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_tx_ring_size);
+ }
+ if (priv->default_min_ring_size) {
+ /* If device hasn't provided minimums, use default minimums */
+ priv->min_tx_desc_cnt = GVE_DEFAULT_MIN_TX_RING_SIZE;
+ priv->min_rx_desc_cnt = GVE_DEFAULT_MIN_RX_RING_SIZE;
+ } else {
+ priv->min_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_rx_ring_size);
+ priv->min_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_tx_ring_size);
+ }
+ }
}
int gve_adminq_describe_device(struct gve_priv *priv)
{
struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
+ struct gve_device_option_modify_ring *dev_op_modify_ring = NULL;
struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
@@ -851,9 +895,9 @@ int gve_adminq_describe_device(struct gve_priv *priv)
err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
&dev_op_gqi_qpl, &dev_op_dqo_rda,
- &dev_op_jumbo_frames,
- &dev_op_dqo_qpl,
- &dev_op_buffer_sizes);
+ &dev_op_jumbo_frames, &dev_op_dqo_qpl,
+ &dev_op_buffer_sizes,
+ &dev_op_modify_ring);
if (err)
goto free_device_descriptor;
@@ -888,15 +932,13 @@ int gve_adminq_describe_device(struct gve_priv *priv)
dev_info(&priv->pdev->dev,
"Driver is running with GQI QPL queue format.\n");
}
- if (gve_is_gqi(priv)) {
- err = gve_set_desc_cnt(priv, descriptor);
- } else {
- /* DQO supports LRO. */
+
+ /* set default descriptor counts */
+ gve_set_default_desc_cnt(priv, descriptor);
+
+ /* DQO supports LRO. */
+ if (!gve_is_gqi(priv))
priv->dev->hw_features |= NETIF_F_LRO;
- err = gve_set_desc_cnt_dqo(priv, descriptor, dev_op_dqo_rda);
- }
- if (err)
- goto free_device_descriptor;
priv->max_registered_pages =
be64_to_cpu(descriptor->max_registered_pages);
@@ -912,18 +954,11 @@ int gve_adminq_describe_device(struct gve_priv *priv)
mac = descriptor->mac;
dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
- priv->rx_data_slot_cnt = be16_to_cpu(descriptor->rx_pages_per_qpl);
-
- if (gve_is_gqi(priv) && priv->rx_data_slot_cnt < priv->rx_desc_cnt) {
- dev_err(&priv->pdev->dev, "rx_data_slot_cnt cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
- priv->rx_data_slot_cnt);
- priv->rx_desc_cnt = priv->rx_data_slot_cnt;
- }
priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
gve_enable_supported_features(priv, supported_features_mask,
dev_op_jumbo_frames, dev_op_dqo_qpl,
- dev_op_buffer_sizes);
+ dev_op_buffer_sizes, dev_op_modify_ring);
free_device_descriptor:
dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 5ac972e45ff8..e64f0dbe744d 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -103,8 +103,7 @@ static_assert(sizeof(struct gve_device_option_gqi_qpl) == 4);
struct gve_device_option_dqo_rda {
__be32 supported_features_mask;
- __be16 tx_comp_ring_entries;
- __be16 rx_buff_ring_entries;
+ __be32 reserved;
};
static_assert(sizeof(struct gve_device_option_dqo_rda) == 8);
@@ -134,6 +133,16 @@ struct gve_device_option_buffer_sizes {
static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8);
+struct gve_device_option_modify_ring {
+ __be32 supported_featured_mask;
+ __be16 max_rx_ring_size;
+ __be16 max_tx_ring_size;
+ __be16 min_rx_ring_size;
+ __be16 min_tx_ring_size;
+};
+
+static_assert(sizeof(struct gve_device_option_modify_ring) == 12);
+
/* Terminology:
*
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
@@ -143,28 +152,31 @@ static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8);
* the device for read/write and data is copied from/to SKBs.
*/
enum gve_dev_opt_id {
- GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1,
- GVE_DEV_OPT_ID_GQI_RDA = 0x2,
- GVE_DEV_OPT_ID_GQI_QPL = 0x3,
- GVE_DEV_OPT_ID_DQO_RDA = 0x4,
- GVE_DEV_OPT_ID_DQO_QPL = 0x7,
- GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
- GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
+ GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1,
+ GVE_DEV_OPT_ID_GQI_RDA = 0x2,
+ GVE_DEV_OPT_ID_GQI_QPL = 0x3,
+ GVE_DEV_OPT_ID_DQO_RDA = 0x4,
+ GVE_DEV_OPT_ID_MODIFY_RING = 0x6,
+ GVE_DEV_OPT_ID_DQO_QPL = 0x7,
+ GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
+ GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
};
enum gve_dev_opt_req_feat_mask {
- GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0,
};
enum gve_sup_feature_mask {
- GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
- GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
+ GVE_SUP_MODIFY_RING_MASK = 1 << 0,
+ GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
+ GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
};
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
@@ -439,7 +451,9 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv,
int gve_adminq_deconfigure_device_resources(struct gve_priv *priv);
int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
+int gve_adminq_create_single_rx_queue(struct gve_priv *priv, u32 queue_index);
int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues);
+int gve_adminq_destroy_single_rx_queue(struct gve_priv *priv, u32 queue_index);
int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
int gve_adminq_register_page_list(struct gve_priv *priv,
struct gve_queue_page_list *qpl);
diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h
index b81584829c40..e83773fb891f 100644
--- a/drivers/net/ethernet/google/gve/gve_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_dqo.h
@@ -44,6 +44,12 @@ void gve_tx_free_rings_dqo(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg);
void gve_tx_start_ring_dqo(struct gve_priv *priv, int idx);
void gve_tx_stop_ring_dqo(struct gve_priv *priv, int idx);
+int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx);
+void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg);
int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg);
void gve_rx_free_rings_dqo(struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 9aebfb843d9d..fe1741d482b4 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -8,6 +8,7 @@
#include "gve.h"
#include "gve_adminq.h"
#include "gve_dqo.h"
+#include "gve_utils.h"
static void gve_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
@@ -73,7 +74,7 @@ static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
"adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt",
"adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt",
"adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
- "adminq_report_stats_cnt", "adminq_report_link_speed_cnt"
+ "adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt"
};
static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
@@ -89,42 +90,34 @@ static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct gve_priv *priv = netdev_priv(netdev);
- char *s = (char *)data;
+ u8 *s = (char *)data;
int num_tx_queues;
int i, j;
num_tx_queues = gve_num_tx_queues(priv);
switch (stringset) {
case ETH_SS_STATS:
- memcpy(s, *gve_gstrings_main_stats,
- sizeof(gve_gstrings_main_stats));
- s += sizeof(gve_gstrings_main_stats);
-
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- for (j = 0; j < NUM_GVE_RX_CNTS; j++) {
- snprintf(s, ETH_GSTRING_LEN,
- gve_gstrings_rx_stats[j], i);
- s += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < ARRAY_SIZE(gve_gstrings_main_stats); i++)
+ ethtool_puts(&s, gve_gstrings_main_stats[i]);
- for (i = 0; i < num_tx_queues; i++) {
- for (j = 0; j < NUM_GVE_TX_CNTS; j++) {
- snprintf(s, ETH_GSTRING_LEN,
- gve_gstrings_tx_stats[j], i);
- s += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < priv->rx_cfg.num_queues; i++)
+ for (j = 0; j < NUM_GVE_RX_CNTS; j++)
+ ethtool_sprintf(&s, gve_gstrings_rx_stats[j],
+ i);
+
+ for (i = 0; i < num_tx_queues; i++)
+ for (j = 0; j < NUM_GVE_TX_CNTS; j++)
+ ethtool_sprintf(&s, gve_gstrings_tx_stats[j],
+ i);
+
+ for (i = 0; i < ARRAY_SIZE(gve_gstrings_adminq_stats); i++)
+ ethtool_puts(&s, gve_gstrings_adminq_stats[i]);
- memcpy(s, *gve_gstrings_adminq_stats,
- sizeof(gve_gstrings_adminq_stats));
- s += sizeof(gve_gstrings_adminq_stats);
break;
case ETH_SS_PRIV_FLAGS:
- memcpy(s, *gve_gstrings_priv_flags,
- sizeof(gve_gstrings_priv_flags));
- s += sizeof(gve_gstrings_priv_flags);
+ for (i = 0; i < ARRAY_SIZE(gve_gstrings_priv_flags); i++)
+ ethtool_puts(&s, gve_gstrings_priv_flags[i]);
break;
default:
@@ -165,6 +158,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
struct stats *report_stats;
int *rx_qid_to_stats_idx;
int *tx_qid_to_stats_idx;
+ int num_stopped_rxqs = 0;
+ int num_stopped_txqs = 0;
struct gve_priv *priv;
bool skip_nic_stats;
unsigned int start;
@@ -181,12 +176,23 @@ gve_get_ethtool_stats(struct net_device *netdev,
sizeof(int), GFP_KERNEL);
if (!rx_qid_to_stats_idx)
return;
+ for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
+ rx_qid_to_stats_idx[ring] = -1;
+ if (!gve_rx_was_added_to_block(priv, ring))
+ num_stopped_rxqs++;
+ }
tx_qid_to_stats_idx = kmalloc_array(num_tx_queues,
sizeof(int), GFP_KERNEL);
if (!tx_qid_to_stats_idx) {
kfree(rx_qid_to_stats_idx);
return;
}
+ for (ring = 0; ring < num_tx_queues; ring++) {
+ tx_qid_to_stats_idx[ring] = -1;
+ if (!gve_tx_was_added_to_block(priv, ring))
+ num_stopped_txqs++;
+ }
+
for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0,
rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0,
rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0,
@@ -260,7 +266,13 @@ gve_get_ethtool_stats(struct net_device *netdev,
/* For rx cross-reporting stats, start from nic rx stats in report */
base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues +
GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
- max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
+ /* The boundary between driver stats and NIC stats shifts if there are
+ * stopped queues.
+ */
+ base_stats_idx += NIC_RX_STATS_REPORT_NUM * num_stopped_rxqs +
+ NIC_TX_STATS_REPORT_NUM * num_stopped_txqs;
+ max_stats_idx = NIC_RX_STATS_REPORT_NUM *
+ (priv->rx_cfg.num_queues - num_stopped_rxqs) +
base_stats_idx;
/* Preprocess the stats report for rx, map queue id to start index */
skip_nic_stats = false;
@@ -274,6 +286,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
skip_nic_stats = true;
break;
}
+ if (queue_id < 0 || queue_id >= priv->rx_cfg.num_queues) {
+ net_err_ratelimited("Invalid rxq id in NIC stats\n");
+ continue;
+ }
rx_qid_to_stats_idx[queue_id] = stats_idx;
}
/* walk RX rings */
@@ -308,11 +324,11 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = rx->rx_copybreak_pkt;
data[i++] = rx->rx_copied_pkt;
/* stats from NIC */
- if (skip_nic_stats) {
+ stats_idx = rx_qid_to_stats_idx[ring];
+ if (skip_nic_stats || stats_idx < 0) {
/* skip NIC rx stats */
i += NIC_RX_STATS_REPORT_NUM;
} else {
- stats_idx = rx_qid_to_stats_idx[ring];
for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
u64 value =
be64_to_cpu(report_stats[stats_idx + j].value);
@@ -338,7 +354,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
/* For tx cross-reporting stats, start from nic tx stats in report */
base_stats_idx = max_stats_idx;
- max_stats_idx = NIC_TX_STATS_REPORT_NUM * num_tx_queues +
+ max_stats_idx = NIC_TX_STATS_REPORT_NUM *
+ (num_tx_queues - num_stopped_txqs) +
max_stats_idx;
/* Preprocess the stats report for tx, map queue id to start index */
skip_nic_stats = false;
@@ -352,6 +369,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
skip_nic_stats = true;
break;
}
+ if (queue_id < 0 || queue_id >= num_tx_queues) {
+ net_err_ratelimited("Invalid txq id in NIC stats\n");
+ continue;
+ }
tx_qid_to_stats_idx[queue_id] = stats_idx;
}
/* walk TX rings */
@@ -383,11 +404,11 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = gve_tx_load_event_counter(priv, tx);
data[i++] = tx->dma_mapping_error;
/* stats from NIC */
- if (skip_nic_stats) {
+ stats_idx = tx_qid_to_stats_idx[ring];
+ if (skip_nic_stats || stats_idx < 0) {
/* skip NIC tx stats */
i += NIC_TX_STATS_REPORT_NUM;
} else {
- stats_idx = tx_qid_to_stats_idx[ring];
for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
u64 value =
be64_to_cpu(report_stats[stats_idx + j].value);
@@ -428,6 +449,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = priv->adminq_set_driver_parameter_cnt;
data[i++] = priv->adminq_report_stats_cnt;
data[i++] = priv->adminq_report_link_speed_cnt;
+ data[i++] = priv->adminq_get_ptype_map_cnt;
}
static void gve_get_channels(struct net_device *netdev,
@@ -489,8 +511,8 @@ static void gve_get_ringparam(struct net_device *netdev,
{
struct gve_priv *priv = netdev_priv(netdev);
- cmd->rx_max_pending = priv->rx_desc_cnt;
- cmd->tx_max_pending = priv->tx_desc_cnt;
+ cmd->rx_max_pending = priv->max_rx_desc_cnt;
+ cmd->tx_max_pending = priv->max_tx_desc_cnt;
cmd->rx_pending = priv->rx_desc_cnt;
cmd->tx_pending = priv->tx_desc_cnt;
@@ -502,20 +524,81 @@ static void gve_get_ringparam(struct net_device *netdev,
kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
+static int gve_adjust_ring_sizes(struct gve_priv *priv,
+ u16 new_tx_desc_cnt,
+ u16 new_rx_desc_cnt)
+{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+ int err;
+
+ /* get current queue configuration */
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+
+ /* copy over the new ring_size from ethtool */
+ tx_alloc_cfg.ring_size = new_tx_desc_cnt;
+ rx_alloc_cfg.ring_size = new_rx_desc_cnt;
+
+ if (netif_running(priv->dev)) {
+ err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+ if (err)
+ return err;
+ }
+
+ /* Set new ring_size for the next up */
+ priv->tx_desc_cnt = new_tx_desc_cnt;
+ priv->rx_desc_cnt = new_rx_desc_cnt;
+
+ return 0;
+}
+
+static int gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt,
+ u16 new_rx_desc_cnt)
+{
+ /* check for valid range */
+ if (new_tx_desc_cnt < priv->min_tx_desc_cnt ||
+ new_tx_desc_cnt > priv->max_tx_desc_cnt ||
+ new_rx_desc_cnt < priv->min_rx_desc_cnt ||
+ new_rx_desc_cnt > priv->max_rx_desc_cnt) {
+ dev_err(&priv->pdev->dev, "Requested descriptor count out of range\n");
+ return -EINVAL;
+ }
+
+ if (!is_power_of_2(new_tx_desc_cnt) || !is_power_of_2(new_rx_desc_cnt)) {
+ dev_err(&priv->pdev->dev, "Requested descriptor count has to be a power of 2\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int gve_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *cmd,
struct kernel_ethtool_ringparam *kernel_cmd,
struct netlink_ext_ack *extack)
{
struct gve_priv *priv = netdev_priv(netdev);
+ u16 new_tx_cnt, new_rx_cnt;
+ int err;
- if (priv->tx_desc_cnt != cmd->tx_pending ||
- priv->rx_desc_cnt != cmd->rx_pending) {
- dev_info(&priv->pdev->dev, "Modify ring size is not supported.\n");
+ err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split);
+ if (err)
+ return err;
+
+ if (cmd->tx_pending == priv->tx_desc_cnt && cmd->rx_pending == priv->rx_desc_cnt)
+ return 0;
+
+ if (!priv->modify_ring_size_enabled) {
+ dev_err(&priv->pdev->dev, "Modify ring size is not supported.\n");
return -EOPNOTSUPP;
}
- return gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split);
+ new_tx_cnt = cmd->tx_pending;
+ new_rx_cnt = cmd->rx_pending;
+
+ if (gve_validate_req_ring_size(priv, new_tx_cnt, new_rx_cnt))
+ return -EINVAL;
+
+ return gve_adjust_ring_sizes(priv, new_tx_cnt, new_rx_cnt);
}
static int gve_user_reset(struct net_device *netdev, u32 *flags)
@@ -710,5 +793,6 @@ const struct ethtool_ops gve_ethtool_ops = {
.set_tunable = gve_set_tunable,
.get_priv_flags = gve_get_priv_flags,
.set_priv_flags = gve_set_priv_flags,
- .get_link_ksettings = gve_get_link_ksettings
+ .get_link_ksettings = gve_get_link_ksettings,
+ .get_ts_info = ethtool_op_get_ts_info,
};
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 166bd827a6d7..cabf7d4bcecb 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -9,6 +9,7 @@
#include <linux/etherdevice.h>
#include <linux/filter.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/sched.h>
@@ -16,6 +17,7 @@
#include <linux/workqueue.h>
#include <linux/utsname.h>
#include <linux/version.h>
+#include <net/netdev_queues.h>
#include <net/sch_generic.h>
#include <net/xdp_sock_drv.h>
#include "gve.h"
@@ -253,6 +255,18 @@ static irqreturn_t gve_intr_dqo(int irq, void *arg)
return IRQ_HANDLED;
}
+static int gve_is_napi_on_home_cpu(struct gve_priv *priv, u32 irq)
+{
+ int cpu_curr = smp_processor_id();
+ const struct cpumask *aff_mask;
+
+ aff_mask = irq_get_effective_affinity_mask(irq);
+ if (unlikely(!aff_mask))
+ return 1;
+
+ return cpumask_test_cpu(cpu_curr, aff_mask);
+}
+
int gve_napi_poll(struct napi_struct *napi, int budget)
{
struct gve_notify_block *block;
@@ -322,8 +336,21 @@ int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
reschedule |= work_done == budget;
}
- if (reschedule)
- return budget;
+ if (reschedule) {
+ /* Reschedule by returning budget only if already on the correct
+ * cpu.
+ */
+ if (likely(gve_is_napi_on_home_cpu(priv, block->irq)))
+ return budget;
+
+ /* If not on the cpu with which this queue's irq has affinity
+ * with, we avoid rescheduling napi and arm the irq instead so
+ * that napi gets rescheduled back eventually onto the right
+ * cpu.
+ */
+ if (work_done == budget)
+ work_done--;
+ }
if (likely(napi_complete_done(napi, work_done))) {
/* Enable interrupts again.
@@ -428,6 +455,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
"Failed to receive msix vector %d\n", i);
goto abort_with_some_ntfy_blocks;
}
+ block->irq = priv->msix_vectors[msix_idx].vector;
irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
get_cpu_mask(i % active_cpus));
block->irq_db_index = &priv->irq_db_indices[i].index;
@@ -441,6 +469,7 @@ abort_with_some_ntfy_blocks:
irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
NULL);
free_irq(priv->msix_vectors[msix_idx].vector, block);
+ block->irq = 0;
}
kvfree(priv->ntfy_blocks);
priv->ntfy_blocks = NULL;
@@ -474,6 +503,7 @@ static void gve_free_notify_blocks(struct gve_priv *priv)
irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
NULL);
free_irq(priv->msix_vectors[msix_idx].vector, block);
+ block->irq = 0;
}
free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
kvfree(priv->ntfy_blocks);
@@ -582,37 +612,36 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
gve_clear_device_resources_ok(priv);
}
-static int gve_unregister_qpl(struct gve_priv *priv, u32 i)
+static int gve_unregister_qpl(struct gve_priv *priv,
+ struct gve_queue_page_list *qpl)
{
int err;
- err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
+ if (!qpl)
+ return 0;
+
+ err = gve_adminq_unregister_page_list(priv, qpl->id);
if (err) {
netif_err(priv, drv, priv->dev,
"Failed to unregister queue page list %d\n",
- priv->qpls[i].id);
+ qpl->id);
return err;
}
- priv->num_registered_pages -= priv->qpls[i].num_entries;
+ priv->num_registered_pages -= qpl->num_entries;
return 0;
}
-static int gve_register_qpl(struct gve_priv *priv, u32 i)
+static int gve_register_qpl(struct gve_priv *priv,
+ struct gve_queue_page_list *qpl)
{
- int num_rx_qpls;
int pages;
int err;
- /* Rx QPLs succeed Tx QPLs in the priv->qpls array. */
- num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
- if (i >= gve_rx_start_qpl_id(&priv->tx_cfg) + num_rx_qpls) {
- netif_err(priv, drv, priv->dev,
- "Cannot register nonexisting QPL at index %d\n", i);
- return -EINVAL;
- }
+ if (!qpl)
+ return 0;
- pages = priv->qpls[i].num_entries;
+ pages = qpl->num_entries;
if (pages + priv->num_registered_pages > priv->max_registered_pages) {
netif_err(priv, drv, priv->dev,
@@ -622,14 +651,11 @@ static int gve_register_qpl(struct gve_priv *priv, u32 i)
return -EINVAL;
}
- err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
+ err = gve_adminq_register_page_list(priv, qpl);
if (err) {
netif_err(priv, drv, priv->dev,
"failed to register queue page list %d\n",
- priv->qpls[i].id);
- /* This failure will trigger a reset - no need to clean
- * up
- */
+ qpl->id);
return err;
}
@@ -637,6 +663,26 @@ static int gve_register_qpl(struct gve_priv *priv, u32 i)
return 0;
}
+static struct gve_queue_page_list *gve_tx_get_qpl(struct gve_priv *priv, int idx)
+{
+ struct gve_tx_ring *tx = &priv->tx[idx];
+
+ if (gve_is_gqi(priv))
+ return tx->tx_fifo.qpl;
+ else
+ return tx->dqo.qpl;
+}
+
+static struct gve_queue_page_list *gve_rx_get_qpl(struct gve_priv *priv, int idx)
+{
+ struct gve_rx_ring *rx = &priv->rx[idx];
+
+ if (gve_is_gqi(priv))
+ return rx->data.qpl;
+ else
+ return rx->dqo.qpl;
+}
+
static int gve_register_xdp_qpls(struct gve_priv *priv)
{
int start_id;
@@ -645,7 +691,7 @@ static int gve_register_xdp_qpls(struct gve_priv *priv)
start_id = gve_xdp_tx_start_queue_id(priv);
for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_register_qpl(priv, i);
+ err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i));
/* This failure will trigger a reset - no need to clean up */
if (err)
return err;
@@ -656,7 +702,6 @@ static int gve_register_xdp_qpls(struct gve_priv *priv)
static int gve_register_qpls(struct gve_priv *priv)
{
int num_tx_qpls, num_rx_qpls;
- int start_id;
int err;
int i;
@@ -665,15 +710,13 @@ static int gve_register_qpls(struct gve_priv *priv)
num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
for (i = 0; i < num_tx_qpls; i++) {
- err = gve_register_qpl(priv, i);
+ err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i));
if (err)
return err;
}
- /* there might be a gap between the tx and rx qpl ids */
- start_id = gve_rx_start_qpl_id(&priv->tx_cfg);
for (i = 0; i < num_rx_qpls; i++) {
- err = gve_register_qpl(priv, start_id + i);
+ err = gve_register_qpl(priv, gve_rx_get_qpl(priv, i));
if (err)
return err;
}
@@ -689,7 +732,7 @@ static int gve_unregister_xdp_qpls(struct gve_priv *priv)
start_id = gve_xdp_tx_start_queue_id(priv);
for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_unregister_qpl(priv, i);
+ err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i));
/* This failure will trigger a reset - no need to clean */
if (err)
return err;
@@ -700,7 +743,6 @@ static int gve_unregister_xdp_qpls(struct gve_priv *priv)
static int gve_unregister_qpls(struct gve_priv *priv)
{
int num_tx_qpls, num_rx_qpls;
- int start_id;
int err;
int i;
@@ -709,15 +751,14 @@ static int gve_unregister_qpls(struct gve_priv *priv)
num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
for (i = 0; i < num_tx_qpls; i++) {
- err = gve_unregister_qpl(priv, i);
+ err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i));
/* This failure will trigger a reset - no need to clean */
if (err)
return err;
}
- start_id = gve_rx_start_qpl_id(&priv->tx_cfg);
for (i = 0; i < num_rx_qpls; i++) {
- err = gve_unregister_qpl(priv, start_id + i);
+ err = gve_unregister_qpl(priv, gve_rx_get_qpl(priv, i));
/* This failure will trigger a reset - no need to clean */
if (err)
return err;
@@ -828,8 +869,6 @@ static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
{
cfg->qcfg = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
- cfg->qpls = priv->qpls;
- cfg->qpl_cfg = &priv->qpl_cfg;
cfg->ring_size = priv->tx_desc_cnt;
cfg->start_idx = 0;
cfg->num_rings = gve_num_tx_queues(priv);
@@ -886,9 +925,9 @@ static int gve_alloc_xdp_rings(struct gve_priv *priv)
return 0;
}
-static int gve_alloc_rings(struct gve_priv *priv,
- struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
- struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+static int gve_queues_mem_alloc(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
int err;
@@ -974,9 +1013,9 @@ static void gve_free_xdp_rings(struct gve_priv *priv)
}
}
-static void gve_free_rings(struct gve_priv *priv,
- struct gve_tx_alloc_rings_cfg *tx_cfg,
- struct gve_rx_alloc_rings_cfg *rx_cfg)
+static void gve_queues_mem_free(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_cfg)
{
if (gve_is_gqi(priv)) {
gve_tx_free_rings_gqi(priv, tx_cfg);
@@ -1005,35 +1044,41 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev,
return 0;
}
-static int gve_alloc_queue_page_list(struct gve_priv *priv,
- struct gve_queue_page_list *qpl,
- u32 id, int pages)
+struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
+ u32 id, int pages)
{
+ struct gve_queue_page_list *qpl;
int err;
int i;
+ qpl = kvzalloc(sizeof(*qpl), GFP_KERNEL);
+ if (!qpl)
+ return NULL;
+
qpl->id = id;
qpl->num_entries = 0;
qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL);
- /* caller handles clean up */
if (!qpl->pages)
- return -ENOMEM;
+ goto abort;
+
qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL);
- /* caller handles clean up */
if (!qpl->page_buses)
- return -ENOMEM;
+ goto abort;
for (i = 0; i < pages; i++) {
err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
&qpl->page_buses[i],
gve_qpl_dma_dir(priv, id), GFP_KERNEL);
- /* caller handles clean up */
if (err)
- return -ENOMEM;
+ goto abort;
qpl->num_entries++;
}
- return 0;
+ return qpl;
+
+abort:
+ gve_free_queue_page_list(priv, qpl, id);
+ return NULL;
}
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
@@ -1045,14 +1090,16 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
put_page(page);
}
-static void gve_free_queue_page_list(struct gve_priv *priv,
- struct gve_queue_page_list *qpl,
- int id)
+void gve_free_queue_page_list(struct gve_priv *priv,
+ struct gve_queue_page_list *qpl,
+ u32 id)
{
int i;
- if (!qpl->pages)
+ if (!qpl)
return;
+ if (!qpl->pages)
+ goto free_qpl;
if (!qpl->page_buses)
goto free_pages;
@@ -1065,120 +1112,8 @@ static void gve_free_queue_page_list(struct gve_priv *priv,
free_pages:
kvfree(qpl->pages);
qpl->pages = NULL;
-}
-
-static void gve_free_n_qpls(struct gve_priv *priv,
- struct gve_queue_page_list *qpls,
- int start_id,
- int num_qpls)
-{
- int i;
-
- for (i = start_id; i < start_id + num_qpls; i++)
- gve_free_queue_page_list(priv, &qpls[i], i);
-}
-
-static int gve_alloc_n_qpls(struct gve_priv *priv,
- struct gve_queue_page_list *qpls,
- int page_count,
- int start_id,
- int num_qpls)
-{
- int err;
- int i;
-
- for (i = start_id; i < start_id + num_qpls; i++) {
- err = gve_alloc_queue_page_list(priv, &qpls[i], i, page_count);
- if (err)
- goto free_qpls;
- }
-
- return 0;
-
-free_qpls:
- /* Must include the failing QPL too for gve_alloc_queue_page_list fails
- * without cleaning up.
- */
- gve_free_n_qpls(priv, qpls, start_id, i - start_id + 1);
- return err;
-}
-
-static int gve_alloc_qpls(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *cfg)
-{
- int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
- int rx_start_id, tx_num_qpls, rx_num_qpls;
- struct gve_queue_page_list *qpls;
- int page_count;
- int err;
-
- if (cfg->raw_addressing)
- return 0;
-
- qpls = kvcalloc(max_queues, sizeof(*qpls), GFP_KERNEL);
- if (!qpls)
- return -ENOMEM;
-
- cfg->qpl_cfg->qpl_map_size = BITS_TO_LONGS(max_queues) *
- sizeof(unsigned long) * BITS_PER_BYTE;
- cfg->qpl_cfg->qpl_id_map = kvcalloc(BITS_TO_LONGS(max_queues),
- sizeof(unsigned long), GFP_KERNEL);
- if (!cfg->qpl_cfg->qpl_id_map) {
- err = -ENOMEM;
- goto free_qpl_array;
- }
-
- /* Allocate TX QPLs */
- page_count = priv->tx_pages_per_qpl;
- tx_num_qpls = gve_num_tx_qpls(cfg->tx_cfg, cfg->num_xdp_queues,
- gve_is_qpl(priv));
- err = gve_alloc_n_qpls(priv, qpls, page_count, 0, tx_num_qpls);
- if (err)
- goto free_qpl_map;
-
- /* Allocate RX QPLs */
- rx_start_id = gve_rx_start_qpl_id(cfg->tx_cfg);
- /* For GQI_QPL number of pages allocated have 1:1 relationship with
- * number of descriptors. For DQO, number of pages required are
- * more than descriptors (because of out of order completions).
- */
- page_count = cfg->is_gqi ? priv->rx_data_slot_cnt : priv->rx_pages_per_qpl;
- rx_num_qpls = gve_num_rx_qpls(cfg->rx_cfg, gve_is_qpl(priv));
- err = gve_alloc_n_qpls(priv, qpls, page_count, rx_start_id, rx_num_qpls);
- if (err)
- goto free_tx_qpls;
-
- cfg->qpls = qpls;
- return 0;
-
-free_tx_qpls:
- gve_free_n_qpls(priv, qpls, 0, tx_num_qpls);
-free_qpl_map:
- kvfree(cfg->qpl_cfg->qpl_id_map);
- cfg->qpl_cfg->qpl_id_map = NULL;
-free_qpl_array:
- kvfree(qpls);
- return err;
-}
-
-static void gve_free_qpls(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *cfg)
-{
- int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
- struct gve_queue_page_list *qpls = cfg->qpls;
- int i;
-
- if (!qpls)
- return;
-
- kvfree(cfg->qpl_cfg->qpl_id_map);
- cfg->qpl_cfg->qpl_id_map = NULL;
-
- for (i = 0; i < max_queues; i++)
- gve_free_queue_page_list(priv, &qpls[i], i);
-
- kvfree(qpls);
- cfg->qpls = NULL;
+free_qpl:
+ kvfree(qpl);
}
/* Use this to schedule a reset when the device is capable of continuing
@@ -1282,18 +1217,6 @@ static void gve_drain_page_cache(struct gve_priv *priv)
page_frag_cache_drain(&priv->rx[i].page_cache);
}
-static void gve_qpls_get_curr_alloc_cfg(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *cfg)
-{
- cfg->raw_addressing = !gve_is_qpl(priv);
- cfg->is_gqi = gve_is_gqi(priv);
- cfg->num_xdp_queues = priv->num_xdp_queues;
- cfg->qpl_cfg = &priv->qpl_cfg;
- cfg->tx_cfg = &priv->tx_cfg;
- cfg->rx_cfg = &priv->rx_cfg;
- cfg->qpls = priv->qpls;
-}
-
static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg)
{
@@ -1301,8 +1224,6 @@ static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
cfg->qcfg_tx = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->enable_header_split = priv->header_split_enabled;
- cfg->qpls = priv->qpls;
- cfg->qpl_cfg = &priv->qpl_cfg;
cfg->ring_size = priv->rx_desc_cnt;
cfg->packet_buffer_size = gve_is_gqi(priv) ?
GVE_DEFAULT_RX_BUFFER_SIZE :
@@ -1310,90 +1231,56 @@ static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
cfg->rx = priv->rx;
}
-static void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
- struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
- struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
- gve_qpls_get_curr_alloc_cfg(priv, qpls_alloc_cfg);
gve_tx_get_curr_alloc_cfg(priv, tx_alloc_cfg);
gve_rx_get_curr_alloc_cfg(priv, rx_alloc_cfg);
}
-static void gve_rx_start_rings(struct gve_priv *priv, int num_rings)
+static void gve_rx_start_ring(struct gve_priv *priv, int i)
{
- int i;
-
- for (i = 0; i < num_rings; i++) {
- if (gve_is_gqi(priv))
- gve_rx_start_ring_gqi(priv, i);
- else
- gve_rx_start_ring_dqo(priv, i);
- }
+ if (gve_is_gqi(priv))
+ gve_rx_start_ring_gqi(priv, i);
+ else
+ gve_rx_start_ring_dqo(priv, i);
}
-static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings)
+static void gve_rx_start_rings(struct gve_priv *priv, int num_rings)
{
int i;
- if (!priv->rx)
- return;
-
- for (i = 0; i < num_rings; i++) {
- if (gve_is_gqi(priv))
- gve_rx_stop_ring_gqi(priv, i);
- else
- gve_rx_stop_ring_dqo(priv, i);
- }
+ for (i = 0; i < num_rings; i++)
+ gve_rx_start_ring(priv, i);
}
-static void gve_queues_mem_free(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
- struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
- struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+static void gve_rx_stop_ring(struct gve_priv *priv, int i)
{
- gve_free_rings(priv, tx_alloc_cfg, rx_alloc_cfg);
- gve_free_qpls(priv, qpls_alloc_cfg);
+ if (gve_is_gqi(priv))
+ gve_rx_stop_ring_gqi(priv, i);
+ else
+ gve_rx_stop_ring_dqo(priv, i);
}
-static int gve_queues_mem_alloc(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
- struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
- struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings)
{
- int err;
-
- err = gve_alloc_qpls(priv, qpls_alloc_cfg);
- if (err) {
- netif_err(priv, drv, priv->dev, "Failed to alloc QPLs\n");
- return err;
- }
- tx_alloc_cfg->qpls = qpls_alloc_cfg->qpls;
- rx_alloc_cfg->qpls = qpls_alloc_cfg->qpls;
- err = gve_alloc_rings(priv, tx_alloc_cfg, rx_alloc_cfg);
- if (err) {
- netif_err(priv, drv, priv->dev, "Failed to alloc rings\n");
- goto free_qpls;
- }
+ int i;
- return 0;
+ if (!priv->rx)
+ return;
-free_qpls:
- gve_free_qpls(priv, qpls_alloc_cfg);
- return err;
+ for (i = 0; i < num_rings; i++)
+ gve_rx_stop_ring(priv, i);
}
static void gve_queues_mem_remove(struct gve_priv *priv)
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
- struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
- gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
- gve_queues_mem_free(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
- priv->qpls = NULL;
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+ gve_queues_mem_free(priv, &tx_alloc_cfg, &rx_alloc_cfg);
priv->tx = NULL;
priv->rx = NULL;
}
@@ -1402,7 +1289,6 @@ static void gve_queues_mem_remove(struct gve_priv *priv)
* No memory is allocated. Passed-in memory is freed on errors.
*/
static int gve_queues_start(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
@@ -1410,12 +1296,10 @@ static int gve_queues_start(struct gve_priv *priv,
int err;
/* Record new resources into priv */
- priv->qpls = qpls_alloc_cfg->qpls;
priv->tx = tx_alloc_cfg->tx;
priv->rx = rx_alloc_cfg->rx;
/* Record new configs into priv */
- priv->qpl_cfg = *qpls_alloc_cfg->qpl_cfg;
priv->tx_cfg = *tx_alloc_cfg->qcfg;
priv->rx_cfg = *rx_alloc_cfg->qcfg;
priv->tx_desc_cnt = tx_alloc_cfg->ring_size;
@@ -1483,23 +1367,19 @@ static int gve_open(struct net_device *dev)
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
- struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
struct gve_priv *priv = netdev_priv(dev);
int err;
- gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
- err = gve_queues_mem_alloc(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
+ err = gve_queues_mem_alloc(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if (err)
return err;
/* No need to free on error: ownership of resources is lost after
* calling gve_queues_start.
*/
- err = gve_queues_start(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
+ err = gve_queues_start(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if (err)
return err;
@@ -1558,11 +1438,8 @@ static int gve_close(struct net_device *dev)
static int gve_remove_xdp_queues(struct gve_priv *priv)
{
- int qpl_start_id;
int err;
- qpl_start_id = gve_xdp_tx_start_queue_id(priv);
-
err = gve_destroy_xdp_rings(priv);
if (err)
return err;
@@ -1574,27 +1451,19 @@ static int gve_remove_xdp_queues(struct gve_priv *priv)
gve_unreg_xdp_info(priv);
gve_free_xdp_rings(priv);
- gve_free_n_qpls(priv, priv->qpls, qpl_start_id, gve_num_xdp_qpls(priv));
priv->num_xdp_queues = 0;
return 0;
}
static int gve_add_xdp_queues(struct gve_priv *priv)
{
- int start_id;
int err;
priv->num_xdp_queues = priv->rx_cfg.num_queues;
- start_id = gve_xdp_tx_start_queue_id(priv);
- err = gve_alloc_n_qpls(priv, priv->qpls, priv->tx_pages_per_qpl,
- start_id, gve_num_xdp_qpls(priv));
- if (err)
- goto err;
-
err = gve_alloc_xdp_rings(priv);
if (err)
- goto free_xdp_qpls;
+ goto err;
err = gve_reg_xdp_info(priv, priv->dev);
if (err)
@@ -1612,8 +1481,6 @@ static int gve_add_xdp_queues(struct gve_priv *priv)
free_xdp_rings:
gve_free_xdp_rings(priv);
-free_xdp_qpls:
- gve_free_n_qpls(priv, priv->qpls, start_id, gve_num_xdp_qpls(priv));
err:
priv->num_xdp_queues = 0;
return err;
@@ -1863,16 +1730,14 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
-static int gve_adjust_config(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
- struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
- struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+int gve_adjust_config(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
int err;
/* Allocate resources for the new confiugration */
- err = gve_queues_mem_alloc(priv, qpls_alloc_cfg,
- tx_alloc_cfg, rx_alloc_cfg);
+ err = gve_queues_mem_alloc(priv, tx_alloc_cfg, rx_alloc_cfg);
if (err) {
netif_err(priv, drv, priv->dev,
"Adjust config failed to alloc new queues");
@@ -1884,14 +1749,12 @@ static int gve_adjust_config(struct gve_priv *priv,
if (err) {
netif_err(priv, drv, priv->dev,
"Adjust config failed to close old queues");
- gve_queues_mem_free(priv, qpls_alloc_cfg,
- tx_alloc_cfg, rx_alloc_cfg);
+ gve_queues_mem_free(priv, tx_alloc_cfg, rx_alloc_cfg);
return err;
}
/* Bring the device back up again with the new resources. */
- err = gve_queues_start(priv, qpls_alloc_cfg,
- tx_alloc_cfg, rx_alloc_cfg);
+ err = gve_queues_start(priv, tx_alloc_cfg, rx_alloc_cfg);
if (err) {
netif_err(priv, drv, priv->dev,
"Adjust config failed to start new queues, !!! DISABLING ALL QUEUES !!!\n");
@@ -1911,32 +1774,18 @@ int gve_adjust_queues(struct gve_priv *priv,
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
- struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
- struct gve_qpl_config new_qpl_cfg;
int err;
- gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
-
- /* qpl_cfg is not read-only, it contains a map that gets updated as
- * rings are allocated, which is why we cannot use the yet unreleased
- * one in priv.
- */
- qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
- tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
- rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
/* Relay the new config from ethtool */
- qpls_alloc_cfg.tx_cfg = &new_tx_config;
tx_alloc_cfg.qcfg = &new_tx_config;
rx_alloc_cfg.qcfg_tx = &new_tx_config;
- qpls_alloc_cfg.rx_cfg = &new_rx_config;
rx_alloc_cfg.qcfg = &new_rx_config;
tx_alloc_cfg.num_rings = new_tx_config.num_queues;
if (netif_carrier_ok(priv->dev)) {
- err = gve_adjust_config(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
+ err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
return err;
}
/* Set the config for the next up. */
@@ -1961,12 +1810,16 @@ static void gve_turndown(struct gve_priv *priv)
int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+ if (!gve_tx_was_added_to_block(priv, idx))
+ continue;
napi_disable(&block->napi);
}
for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+ if (!gve_rx_was_added_to_block(priv, idx))
+ continue;
napi_disable(&block->napi);
}
@@ -1989,6 +1842,9 @@ static void gve_turnup(struct gve_priv *priv)
int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+ if (!gve_tx_was_added_to_block(priv, idx))
+ continue;
+
napi_enable(&block->napi);
if (gve_is_gqi(priv)) {
iowrite32be(0, gve_irq_doorbell(priv, block));
@@ -1996,11 +1852,21 @@ static void gve_turnup(struct gve_priv *priv)
gve_set_itr_coalesce_usecs_dqo(priv, block,
priv->tx_coalesce_usecs);
}
+
+ /* Any descs written by the NIC before this barrier will be
+ * handled by the one-off napi schedule below. Whereas any
+ * descs after the barrier will generate interrupts.
+ */
+ mb();
+ napi_schedule(&block->napi);
}
for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+ if (!gve_rx_was_added_to_block(priv, idx))
+ continue;
+
napi_enable(&block->napi);
if (gve_is_gqi(priv)) {
iowrite32be(0, gve_irq_doorbell(priv, block));
@@ -2008,11 +1874,27 @@ static void gve_turnup(struct gve_priv *priv)
gve_set_itr_coalesce_usecs_dqo(priv, block,
priv->rx_coalesce_usecs);
}
+
+ /* Any descs written by the NIC before this barrier will be
+ * handled by the one-off napi schedule below. Whereas any
+ * descs after the barrier will generate interrupts.
+ */
+ mb();
+ napi_schedule(&block->napi);
}
gve_set_napi_enabled(priv);
}
+static void gve_turnup_and_check_status(struct gve_priv *priv)
+{
+ u32 status;
+
+ gve_turnup(priv);
+ status = ioread32be(&priv->reg_bar0->device_status);
+ gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
+}
+
static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct gve_notify_block *block;
@@ -2077,7 +1959,6 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
- struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
bool enable_hdr_split;
int err = 0;
@@ -2097,15 +1978,13 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
if (enable_hdr_split == priv->header_split_enabled)
return 0;
- gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
rx_alloc_cfg.enable_header_split = enable_hdr_split;
rx_alloc_cfg.packet_buffer_size = gve_get_pkt_buf_size(priv, enable_hdr_split);
if (netif_running(priv->dev))
- err = gve_adjust_config(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
+ err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
return err;
}
@@ -2115,26 +1994,15 @@ static int gve_set_features(struct net_device *netdev,
const netdev_features_t orig_features = netdev->features;
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
- struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
struct gve_priv *priv = netdev_priv(netdev);
- struct gve_qpl_config new_qpl_cfg;
int err;
- gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
- /* qpl_cfg is not read-only, it contains a map that gets updated as
- * rings are allocated, which is why we cannot use the yet unreleased
- * one in priv.
- */
- qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
- tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
- rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
netdev->features ^= NETIF_F_LRO;
if (netif_carrier_ok(netdev)) {
- err = gve_adjust_config(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
+ err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if (err) {
/* Revert the change on error. */
netdev->features = orig_features;
@@ -2473,6 +2341,140 @@ static void gve_write_version(u8 __iomem *driver_version_register)
writeb('\n', driver_version_register);
}
+static int gve_rx_queue_stop(struct net_device *dev, void *per_q_mem, int idx)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_rx_ring *gve_per_q_mem;
+ int err;
+
+ if (!priv->rx)
+ return -EAGAIN;
+
+ /* Destroying queue 0 while other queues exist is not supported in DQO */
+ if (!gve_is_gqi(priv) && idx == 0)
+ return -ERANGE;
+
+ /* Single-queue destruction requires quiescence on all queues */
+ gve_turndown(priv);
+
+ /* This failure will trigger a reset - no need to clean up */
+ err = gve_adminq_destroy_single_rx_queue(priv, idx);
+ if (err)
+ return err;
+
+ if (gve_is_qpl(priv)) {
+ /* This failure will trigger a reset - no need to clean up */
+ err = gve_unregister_qpl(priv, gve_rx_get_qpl(priv, idx));
+ if (err)
+ return err;
+ }
+
+ gve_rx_stop_ring(priv, idx);
+
+ /* Turn the unstopped queues back up */
+ gve_turnup_and_check_status(priv);
+
+ gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
+ *gve_per_q_mem = priv->rx[idx];
+ memset(&priv->rx[idx], 0, sizeof(priv->rx[idx]));
+ return 0;
+}
+
+static void gve_rx_queue_mem_free(struct net_device *dev, void *per_q_mem)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_rx_alloc_rings_cfg cfg = {0};
+ struct gve_rx_ring *gve_per_q_mem;
+
+ gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
+ gve_rx_get_curr_alloc_cfg(priv, &cfg);
+
+ if (gve_is_gqi(priv))
+ gve_rx_free_ring_gqi(priv, gve_per_q_mem, &cfg);
+ else
+ gve_rx_free_ring_dqo(priv, gve_per_q_mem, &cfg);
+}
+
+static int gve_rx_queue_mem_alloc(struct net_device *dev, void *per_q_mem,
+ int idx)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_rx_alloc_rings_cfg cfg = {0};
+ struct gve_rx_ring *gve_per_q_mem;
+ int err;
+
+ if (!priv->rx)
+ return -EAGAIN;
+
+ gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
+ gve_rx_get_curr_alloc_cfg(priv, &cfg);
+
+ if (gve_is_gqi(priv))
+ err = gve_rx_alloc_ring_gqi(priv, &cfg, gve_per_q_mem, idx);
+ else
+ err = gve_rx_alloc_ring_dqo(priv, &cfg, gve_per_q_mem, idx);
+
+ return err;
+}
+
+static int gve_rx_queue_start(struct net_device *dev, void *per_q_mem, int idx)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_rx_ring *gve_per_q_mem;
+ int err;
+
+ if (!priv->rx)
+ return -EAGAIN;
+
+ gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
+ priv->rx[idx] = *gve_per_q_mem;
+
+ /* Single-queue creation requires quiescence on all queues */
+ gve_turndown(priv);
+
+ gve_rx_start_ring(priv, idx);
+
+ if (gve_is_qpl(priv)) {
+ /* This failure will trigger a reset - no need to clean up */
+ err = gve_register_qpl(priv, gve_rx_get_qpl(priv, idx));
+ if (err)
+ goto abort;
+ }
+
+ /* This failure will trigger a reset - no need to clean up */
+ err = gve_adminq_create_single_rx_queue(priv, idx);
+ if (err)
+ goto abort;
+
+ if (gve_is_gqi(priv))
+ gve_rx_write_doorbell(priv, &priv->rx[idx]);
+ else
+ gve_rx_post_buffers_dqo(&priv->rx[idx]);
+
+ /* Turn the unstopped queues back up */
+ gve_turnup_and_check_status(priv);
+ return 0;
+
+abort:
+ gve_rx_stop_ring(priv, idx);
+
+ /* All failures in this func result in a reset, by clearing the struct
+ * at idx, we prevent a double free when that reset runs. The reset,
+ * which needs the rtnl lock, will not run till this func returns and
+ * its caller gives up the lock.
+ */
+ memset(&priv->rx[idx], 0, sizeof(priv->rx[idx]));
+ return err;
+}
+
+static const struct netdev_queue_mgmt_ops gve_queue_mgmt_ops = {
+ .ndo_queue_mem_size = sizeof(struct gve_rx_ring),
+ .ndo_queue_mem_alloc = gve_rx_queue_mem_alloc,
+ .ndo_queue_mem_free = gve_rx_queue_mem_free,
+ .ndo_queue_start = gve_rx_queue_start,
+ .ndo_queue_stop = gve_rx_queue_stop,
+};
+
static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int max_tx_queues, max_rx_queues;
@@ -2527,6 +2529,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
dev->ethtool_ops = &gve_ethtool_ops;
dev->netdev_ops = &gve_netdev_ops;
+ dev->queue_mgmt_ops = &gve_queue_mgmt_ops;
/* Set default and supported features.
*
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 20f5a9e7fae9..acb73d4d0de6 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -30,6 +30,9 @@ static void gve_rx_unfill_pages(struct gve_priv *priv,
u32 slots = rx->mask + 1;
int i;
+ if (!rx->data.page_info)
+ return;
+
if (rx->data.raw_addressing) {
for (i = 0; i < slots; i++)
gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i],
@@ -38,8 +41,6 @@ static void gve_rx_unfill_pages(struct gve_priv *priv,
for (i = 0; i < slots; i++)
page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1);
- gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id);
- rx->data.qpl = NULL;
for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) {
page_ref_sub(rx->qpl_copy_pool[i].page,
@@ -51,6 +52,41 @@ static void gve_rx_unfill_pages(struct gve_priv *priv,
rx->data.page_info = NULL;
}
+static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
+{
+ ctx->skb_head = NULL;
+ ctx->skb_tail = NULL;
+ ctx->total_size = 0;
+ ctx->frag_cnt = 0;
+ ctx->drop_pkt = false;
+}
+
+static void gve_rx_init_ring_state_gqi(struct gve_rx_ring *rx)
+{
+ rx->desc.seqno = 1;
+ rx->cnt = 0;
+ gve_rx_ctx_clear(&rx->ctx);
+}
+
+static void gve_rx_reset_ring_gqi(struct gve_priv *priv, int idx)
+{
+ struct gve_rx_ring *rx = &priv->rx[idx];
+ const u32 slots = priv->rx_desc_cnt;
+ size_t size;
+
+ /* Reset desc ring */
+ if (rx->desc.desc_ring) {
+ size = slots * sizeof(rx->desc.desc_ring[0]);
+ memset(rx->desc.desc_ring, 0, size);
+ }
+
+ /* Reset q_resources */
+ if (rx->q_resources)
+ memset(rx->q_resources, 0, sizeof(*rx->q_resources));
+
+ gve_rx_init_ring_state_gqi(rx);
+}
+
void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@@ -60,34 +96,48 @@ void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx)
gve_remove_napi(priv, ntfy_idx);
gve_rx_remove_from_block(priv, idx);
+ gve_rx_reset_ring_gqi(priv, idx);
}
-static void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
- struct gve_rx_alloc_rings_cfg *cfg)
+void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
struct device *dev = &priv->pdev->dev;
u32 slots = rx->mask + 1;
int idx = rx->q_num;
size_t bytes;
+ u32 qpl_id;
- bytes = sizeof(struct gve_rx_desc) * cfg->ring_size;
- dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus);
- rx->desc.desc_ring = NULL;
+ if (rx->desc.desc_ring) {
+ bytes = sizeof(struct gve_rx_desc) * cfg->ring_size;
+ dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus);
+ rx->desc.desc_ring = NULL;
+ }
- dma_free_coherent(dev, sizeof(*rx->q_resources),
- rx->q_resources, rx->q_resources_bus);
- rx->q_resources = NULL;
+ if (rx->q_resources) {
+ dma_free_coherent(dev, sizeof(*rx->q_resources),
+ rx->q_resources, rx->q_resources_bus);
+ rx->q_resources = NULL;
+ }
gve_rx_unfill_pages(priv, rx, cfg);
- bytes = sizeof(*rx->data.data_ring) * slots;
- dma_free_coherent(dev, bytes, rx->data.data_ring,
- rx->data.data_bus);
- rx->data.data_ring = NULL;
+ if (rx->data.data_ring) {
+ bytes = sizeof(*rx->data.data_ring) * slots;
+ dma_free_coherent(dev, bytes, rx->data.data_ring,
+ rx->data.data_bus);
+ rx->data.data_ring = NULL;
+ }
kvfree(rx->qpl_copy_pool);
rx->qpl_copy_pool = NULL;
+ if (rx->data.qpl) {
+ qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, idx);
+ gve_free_queue_page_list(priv, rx->data.qpl, qpl_id);
+ rx->data.qpl = NULL;
+ }
+
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
}
@@ -144,14 +194,6 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
if (!rx->data.page_info)
return -ENOMEM;
- if (!rx->data.raw_addressing) {
- rx->data.qpl = gve_assign_rx_qpl(cfg, rx->q_num);
- if (!rx->data.qpl) {
- kvfree(rx->data.page_info);
- rx->data.page_info = NULL;
- return -ENOMEM;
- }
- }
for (i = 0; i < slots; i++) {
if (!rx->data.raw_addressing) {
struct page *page = rx->data.qpl->pages[i];
@@ -204,9 +246,6 @@ alloc_err_qpl:
page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1);
- gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id);
- rx->data.qpl = NULL;
-
return err;
alloc_err_rda:
@@ -217,15 +256,6 @@ alloc_err_rda:
return err;
}
-static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
-{
- ctx->skb_head = NULL;
- ctx->skb_tail = NULL;
- ctx->total_size = 0;
- ctx->frag_cnt = 0;
- ctx->drop_pkt = false;
-}
-
void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@@ -234,14 +264,16 @@ void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx)
gve_add_napi(priv, ntfy_idx, gve_napi_poll);
}
-static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
- struct gve_rx_alloc_rings_cfg *cfg,
- struct gve_rx_ring *rx,
- int idx)
+int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx)
{
struct device *hdev = &priv->pdev->dev;
- u32 slots = priv->rx_data_slot_cnt;
+ u32 slots = cfg->ring_size;
int filled_pages;
+ int qpl_page_cnt;
+ u32 qpl_id = 0;
size_t bytes;
int err;
@@ -274,10 +306,22 @@ static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
goto abort_with_slots;
}
+ if (!rx->data.raw_addressing) {
+ qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
+ qpl_page_cnt = cfg->ring_size;
+
+ rx->data.qpl = gve_alloc_queue_page_list(priv, qpl_id,
+ qpl_page_cnt);
+ if (!rx->data.qpl) {
+ err = -ENOMEM;
+ goto abort_with_copy_pool;
+ }
+ }
+
filled_pages = gve_rx_prefill_pages(rx, cfg);
if (filled_pages < 0) {
err = -ENOMEM;
- goto abort_with_copy_pool;
+ goto abort_with_qpl;
}
rx->fill_cnt = filled_pages;
/* Ensure data ring slots (packet buffers) are visible. */
@@ -304,9 +348,8 @@ static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
err = -ENOMEM;
goto abort_with_q_resources;
}
- rx->cnt = 0;
rx->db_threshold = slots / 2;
- rx->desc.seqno = 1;
+ gve_rx_init_ring_state_gqi(rx);
rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_rx_ctx_clear(&rx->ctx);
@@ -319,6 +362,11 @@ abort_with_q_resources:
rx->q_resources = NULL;
abort_filled:
gve_rx_unfill_pages(priv, rx, cfg);
+abort_with_qpl:
+ if (!rx->data.raw_addressing) {
+ gve_free_queue_page_list(priv, rx->data.qpl, qpl_id);
+ rx->data.qpl = NULL;
+ }
abort_with_copy_pool:
kvfree(rx->qpl_copy_pool);
rx->qpl_copy_pool = NULL;
@@ -337,12 +385,6 @@ int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
int err = 0;
int i, j;
- if (!cfg->raw_addressing && !cfg->qpls) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc QPL ring before allocing QPLs\n");
- return -EINVAL;
- }
-
rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
GFP_KERNEL);
if (!rx)
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 8e8071308aeb..c1c912de59c7 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -178,7 +178,7 @@ static int gve_alloc_page_dqo(struct gve_rx_ring *rx,
return err;
} else {
idx = rx->dqo.next_qpl_page_idx;
- if (idx >= priv->rx_pages_per_qpl) {
+ if (idx >= gve_get_rx_pages_per_qpl_dqo(priv->rx_desc_cnt)) {
net_err_ratelimited("%s: Out of QPL pages\n",
priv->dev->name);
return -ENOMEM;
@@ -211,6 +211,82 @@ static void gve_rx_free_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
}
}
+static void gve_rx_init_ring_state_dqo(struct gve_rx_ring *rx,
+ const u32 buffer_queue_slots,
+ const u32 completion_queue_slots)
+{
+ int i;
+
+ /* Set buffer queue state */
+ rx->dqo.bufq.mask = buffer_queue_slots - 1;
+ rx->dqo.bufq.head = 0;
+ rx->dqo.bufq.tail = 0;
+
+ /* Set completion queue state */
+ rx->dqo.complq.num_free_slots = completion_queue_slots;
+ rx->dqo.complq.mask = completion_queue_slots - 1;
+ rx->dqo.complq.cur_gen_bit = 0;
+ rx->dqo.complq.head = 0;
+
+ /* Set RX SKB context */
+ rx->ctx.skb_head = NULL;
+ rx->ctx.skb_tail = NULL;
+
+ /* Set up linked list of buffer IDs */
+ if (rx->dqo.buf_states) {
+ for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
+ rx->dqo.buf_states[i].next = i + 1;
+ rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1;
+ }
+
+ rx->dqo.free_buf_states = 0;
+ rx->dqo.recycled_buf_states.head = -1;
+ rx->dqo.recycled_buf_states.tail = -1;
+ rx->dqo.used_buf_states.head = -1;
+ rx->dqo.used_buf_states.tail = -1;
+}
+
+static void gve_rx_reset_ring_dqo(struct gve_priv *priv, int idx)
+{
+ struct gve_rx_ring *rx = &priv->rx[idx];
+ size_t size;
+ int i;
+
+ const u32 buffer_queue_slots = priv->rx_desc_cnt;
+ const u32 completion_queue_slots = priv->rx_desc_cnt;
+
+ /* Reset buffer queue */
+ if (rx->dqo.bufq.desc_ring) {
+ size = sizeof(rx->dqo.bufq.desc_ring[0]) *
+ buffer_queue_slots;
+ memset(rx->dqo.bufq.desc_ring, 0, size);
+ }
+
+ /* Reset completion queue */
+ if (rx->dqo.complq.desc_ring) {
+ size = sizeof(rx->dqo.complq.desc_ring[0]) *
+ completion_queue_slots;
+ memset(rx->dqo.complq.desc_ring, 0, size);
+ }
+
+ /* Reset q_resources */
+ if (rx->q_resources)
+ memset(rx->q_resources, 0, sizeof(*rx->q_resources));
+
+ /* Reset buf states */
+ if (rx->dqo.buf_states) {
+ for (i = 0; i < rx->dqo.num_buf_states; i++) {
+ struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i];
+
+ if (bs->page_info.page)
+ gve_free_page_dqo(priv, bs, !rx->dqo.qpl);
+ }
+ }
+
+ gve_rx_init_ring_state_dqo(rx, buffer_queue_slots,
+ completion_queue_slots);
+}
+
void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@@ -220,16 +296,18 @@ void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
gve_remove_napi(priv, ntfy_idx);
gve_rx_remove_from_block(priv, idx);
+ gve_rx_reset_ring_dqo(priv, idx);
}
-static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
- struct gve_rx_alloc_rings_cfg *cfg)
+void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
struct device *hdev = &priv->pdev->dev;
size_t completion_queue_slots;
size_t buffer_queue_slots;
int idx = rx->q_num;
size_t size;
+ u32 qpl_id;
int i;
completion_queue_slots = rx->dqo.complq.mask + 1;
@@ -247,8 +325,10 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
if (bs->page_info.page)
gve_free_page_dqo(priv, bs, !rx->dqo.qpl);
}
+
if (rx->dqo.qpl) {
- gve_unassign_qpl(cfg->qpl_cfg, rx->dqo.qpl->id);
+ qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
+ gve_free_queue_page_list(priv, rx->dqo.qpl, qpl_id);
rx->dqo.qpl = NULL;
}
@@ -275,10 +355,10 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
}
-static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
+static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx,
+ const u32 buf_count)
{
struct device *hdev = &priv->pdev->dev;
- int buf_count = rx->dqo.bufq.mask + 1;
rx->dqo.hdr_bufs.data = dma_alloc_coherent(hdev, priv->header_buf_size * buf_count,
&rx->dqo.hdr_bufs.addr, GFP_KERNEL);
@@ -296,17 +376,17 @@ void gve_rx_start_ring_dqo(struct gve_priv *priv, int idx)
gve_add_napi(priv, ntfy_idx, gve_napi_poll_dqo);
}
-static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
- struct gve_rx_alloc_rings_cfg *cfg,
- struct gve_rx_ring *rx,
- int idx)
+int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx)
{
struct device *hdev = &priv->pdev->dev;
+ int qpl_page_cnt;
size_t size;
- int i;
+ u32 qpl_id;
- const u32 buffer_queue_slots = cfg->raw_addressing ?
- priv->options_dqo_rda.rx_buff_ring_entries : cfg->ring_size;
+ const u32 buffer_queue_slots = cfg->ring_size;
const u32 completion_queue_slots = cfg->ring_size;
netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n");
@@ -314,15 +394,10 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
memset(rx, 0, sizeof(*rx));
rx->gve = priv;
rx->q_num = idx;
- rx->dqo.bufq.mask = buffer_queue_slots - 1;
- rx->dqo.complq.num_free_slots = completion_queue_slots;
- rx->dqo.complq.mask = completion_queue_slots - 1;
- rx->ctx.skb_head = NULL;
- rx->ctx.skb_tail = NULL;
rx->dqo.num_buf_states = cfg->raw_addressing ?
min_t(s16, S16_MAX, buffer_queue_slots * 4) :
- priv->rx_pages_per_qpl;
+ gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
sizeof(rx->dqo.buf_states[0]),
GFP_KERNEL);
@@ -331,19 +406,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
/* Allocate header buffers for header-split */
if (cfg->enable_header_split)
- if (gve_rx_alloc_hdr_bufs(priv, rx))
+ if (gve_rx_alloc_hdr_bufs(priv, rx, buffer_queue_slots))
goto err;
- /* Set up linked list of buffer IDs */
- for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
- rx->dqo.buf_states[i].next = i + 1;
-
- rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1;
- rx->dqo.recycled_buf_states.head = -1;
- rx->dqo.recycled_buf_states.tail = -1;
- rx->dqo.used_buf_states.head = -1;
- rx->dqo.used_buf_states.tail = -1;
-
/* Allocate RX completion queue */
size = sizeof(rx->dqo.complq.desc_ring[0]) *
completion_queue_slots;
@@ -360,7 +425,11 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
goto err;
if (!cfg->raw_addressing) {
- rx->dqo.qpl = gve_assign_rx_qpl(cfg, rx->q_num);
+ qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
+ qpl_page_cnt = gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
+
+ rx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
+ qpl_page_cnt);
if (!rx->dqo.qpl)
goto err;
rx->dqo.next_qpl_page_idx = 0;
@@ -371,6 +440,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
if (!rx->q_resources)
goto err;
+ gve_rx_init_ring_state_dqo(rx, buffer_queue_slots,
+ completion_queue_slots);
+
return 0;
err:
@@ -393,12 +465,6 @@ int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
int err;
int i;
- if (!cfg->raw_addressing && !cfg->qpls) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc QPL ring before allocing QPLs\n");
- return -EINVAL;
- }
-
rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
GFP_KERNEL);
if (!rx)
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 4b9853adc113..24a64ec1073e 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -216,6 +216,7 @@ static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx,
struct device *hdev = &priv->pdev->dev;
int idx = tx->q_num;
size_t bytes;
+ u32 qpl_id;
u32 slots;
slots = tx->mask + 1;
@@ -223,9 +224,12 @@ static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx,
tx->q_resources, tx->q_resources_bus);
tx->q_resources = NULL;
- if (!tx->raw_addressing) {
- gve_tx_fifo_release(priv, &tx->tx_fifo);
- gve_unassign_qpl(cfg->qpl_cfg, tx->tx_fifo.qpl->id);
+ if (tx->tx_fifo.qpl) {
+ if (tx->tx_fifo.base)
+ gve_tx_fifo_release(priv, &tx->tx_fifo);
+
+ qpl_id = gve_tx_qpl_id(priv, tx->q_num);
+ gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id);
tx->tx_fifo.qpl = NULL;
}
@@ -256,6 +260,8 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
int idx)
{
struct device *hdev = &priv->pdev->dev;
+ int qpl_page_cnt;
+ u32 qpl_id = 0;
size_t bytes;
/* Make sure everything is zeroed to start */
@@ -280,9 +286,14 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
tx->raw_addressing = cfg->raw_addressing;
tx->dev = hdev;
if (!tx->raw_addressing) {
- tx->tx_fifo.qpl = gve_assign_tx_qpl(cfg, idx);
+ qpl_id = gve_tx_qpl_id(priv, tx->q_num);
+ qpl_page_cnt = priv->tx_pages_per_qpl;
+
+ tx->tx_fifo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
+ qpl_page_cnt);
if (!tx->tx_fifo.qpl)
goto abort_with_desc;
+
/* map Tx FIFO */
if (gve_tx_fifo_init(priv, &tx->tx_fifo))
goto abort_with_qpl;
@@ -302,8 +313,10 @@ abort_with_fifo:
if (!tx->raw_addressing)
gve_tx_fifo_release(priv, &tx->tx_fifo);
abort_with_qpl:
- if (!tx->raw_addressing)
- gve_unassign_qpl(cfg->qpl_cfg, tx->tx_fifo.qpl->id);
+ if (!tx->raw_addressing) {
+ gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id);
+ tx->tx_fifo.qpl = NULL;
+ }
abort_with_desc:
dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
tx->desc = NULL;
@@ -320,12 +333,6 @@ int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
int err = 0;
int i, j;
- if (!cfg->raw_addressing && !cfg->qpls) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc QPL ring before allocing QPLs\n");
- return -EINVAL;
- }
-
if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
netif_err(priv, drv, priv->dev,
"Cannot alloc more than the max num of Tx rings\n");
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index bc34b6cd3a3e..fe1b26a4d736 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -209,6 +209,7 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
struct device *hdev = &priv->pdev->dev;
int idx = tx->q_num;
size_t bytes;
+ u32 qpl_id;
if (tx->q_resources) {
dma_free_coherent(hdev, sizeof(*tx->q_resources),
@@ -237,7 +238,8 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
tx->dqo.tx_qpl_buf_next = NULL;
if (tx->dqo.qpl) {
- gve_unassign_qpl(cfg->qpl_cfg, tx->dqo.qpl->id);
+ qpl_id = gve_tx_qpl_id(priv, tx->q_num);
+ gve_free_queue_page_list(priv, tx->dqo.qpl, qpl_id);
tx->dqo.qpl = NULL;
}
@@ -285,7 +287,9 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
{
struct device *hdev = &priv->pdev->dev;
int num_pending_packets;
+ int qpl_page_cnt;
size_t bytes;
+ u32 qpl_id;
int i;
memset(tx, 0, sizeof(*tx));
@@ -295,9 +299,7 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
/* Queue sizes must be a power of 2 */
tx->mask = cfg->ring_size - 1;
- tx->dqo.complq_mask = priv->queue_format == GVE_DQO_RDA_FORMAT ?
- priv->options_dqo_rda.tx_comp_ring_entries - 1 :
- tx->mask;
+ tx->dqo.complq_mask = tx->mask;
/* The max number of pending packets determines the maximum number of
* descriptors which maybe written to the completion queue.
@@ -354,7 +356,11 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
goto err;
if (!cfg->raw_addressing) {
- tx->dqo.qpl = gve_assign_tx_qpl(cfg, idx);
+ qpl_id = gve_tx_qpl_id(priv, tx->q_num);
+ qpl_page_cnt = priv->tx_pages_per_qpl;
+
+ tx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
+ qpl_page_cnt);
if (!tx->dqo.qpl)
goto err;
@@ -376,12 +382,6 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
int err = 0;
int i, j;
- if (!cfg->raw_addressing && !cfg->qpls) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc QPL ring before allocing QPLs\n");
- return -EINVAL;
- }
-
if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
netif_err(priv, drv, priv->dev,
"Cannot alloc more than the max num of Tx rings\n");
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 8a713eed4465..fd32e15cadcb 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1777,7 +1777,7 @@ static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
}
/* finally, set new mtu to netdevice */
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
out:
if (if_running) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
index e214bfaece1f..8e9293e57bfd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -3,7 +3,7 @@
# Makefile for the HISILICON network device drivers.
#
-ccflags-y += -I$(srctree)/$(src)
+ccflags-y += -I$(src)
ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3pf
ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3vf
ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3_common
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index f19f1e1d1f9f..7cebb08bd320 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -104,6 +104,7 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_WOL_B,
HNAE3_DEV_SUPPORT_TM_FLUSH_B,
HNAE3_DEV_SUPPORT_VF_FAULT_B,
+ HNAE3_DEV_SUPPORT_ERR_MOD_GEN_REG_B,
};
#define hnae3_ae_dev_fd_supported(ae_dev) \
@@ -181,6 +182,9 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_ae_dev_vf_fault_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_VF_FAULT_B, (ae_dev)->caps)
+#define hnae3_ae_dev_gen_reg_dfx_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_ERR_MOD_GEN_REG_B, (hdev)->ae_dev->caps)
+
enum HNAE3_PF_CAP_BITS {
HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
};
@@ -362,6 +366,15 @@ struct hnae3_vector_info {
#define HNAE3_FW_VERSION_BYTE0_SHIFT 0
#define HNAE3_FW_VERSION_BYTE0_MASK GENMASK(7, 0)
+#define HNAE3_SCC_VERSION_BYTE3_SHIFT 24
+#define HNAE3_SCC_VERSION_BYTE3_MASK GENMASK(31, 24)
+#define HNAE3_SCC_VERSION_BYTE2_SHIFT 16
+#define HNAE3_SCC_VERSION_BYTE2_MASK GENMASK(23, 16)
+#define HNAE3_SCC_VERSION_BYTE1_SHIFT 8
+#define HNAE3_SCC_VERSION_BYTE1_MASK GENMASK(15, 8)
+#define HNAE3_SCC_VERSION_BYTE0_SHIFT 0
+#define HNAE3_SCC_VERSION_BYTE0_MASK GENMASK(7, 0)
+
struct hnae3_ring_chain_node {
struct hnae3_ring_chain_node *next;
u32 tqp_index;
@@ -897,7 +910,7 @@ struct hnae3_handle {
struct hnae3_roce_private_info rinfo;
};
- u32 numa_node_mask; /* for multi-chip support */
+ nodemask_t numa_node_mask; /* for multi-chip support */
enum hnae3_port_base_vlan_state port_base_vlan_state;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
index 652d71326231..ea40b594dbac 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -158,6 +158,7 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
{HCLGE_COMM_CAP_WOL_B, HNAE3_DEV_SUPPORT_WOL_B},
{HCLGE_COMM_CAP_TM_FLUSH_B, HNAE3_DEV_SUPPORT_TM_FLUSH_B},
{HCLGE_COMM_CAP_VF_FAULT_B, HNAE3_DEV_SUPPORT_VF_FAULT_B},
+ {HCLGE_COMM_CAP_ERR_MOD_GEN_REG_B, HNAE3_DEV_SUPPORT_ERR_MOD_GEN_REG_B},
};
static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
@@ -470,10 +471,14 @@ static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw,
int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
int num)
{
+ bool is_special = hclge_comm_is_special_opcode(le16_to_cpu(desc->opcode));
struct hclge_comm_cmq_ring *csq = &hw->cmq.csq;
int ret;
int ntc;
+ if (hw->cmq.ops.trace_cmd_send)
+ hw->cmq.ops.trace_cmd_send(hw, desc, num, is_special);
+
spin_lock_bh(&hw->cmq.csq.lock);
if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state)) {
@@ -507,6 +512,9 @@ int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
spin_unlock_bh(&hw->cmq.csq.lock);
+ if (hw->cmq.ops.trace_cmd_get)
+ hw->cmq.ops.trace_cmd_get(hw, desc, num, is_special);
+
return ret;
}
@@ -584,6 +592,17 @@ err_csq:
return ret;
}
+void hclge_comm_cmd_init_ops(struct hclge_comm_hw *hw,
+ const struct hclge_comm_cmq_ops *ops)
+{
+ struct hclge_comm_cmq *cmdq = &hw->cmq;
+
+ if (ops) {
+ cmdq->ops.trace_cmd_send = ops->trace_cmd_send;
+ cmdq->ops.trace_cmd_get = ops->trace_cmd_get;
+ }
+}
+
int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
u32 *fw_version, bool is_pf,
unsigned long reset_pending)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
index 552396518e08..2c2a2f1e0d7a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
@@ -91,6 +91,7 @@ enum hclge_opcode_type {
HCLGE_OPC_DFX_RCB_REG = 0x004D,
HCLGE_OPC_DFX_TQP_REG = 0x004E,
HCLGE_OPC_DFX_SSU_REG_2 = 0x004F,
+ HCLGE_OPC_DFX_GEN_REG = 0x7038,
HCLGE_OPC_QUERY_DEV_SPECS = 0x0050,
HCLGE_OPC_GET_QUEUE_ERR_VF = 0x0067,
@@ -246,6 +247,9 @@ enum hclge_opcode_type {
HCLGE_OPC_QCN_AJUST_INIT = 0x1A07,
HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08,
+ /* SCC commands */
+ HCLGE_OPC_QUERY_SCC_VER = 0x1A84,
+
/* Mailbox command */
HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000,
HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001,
@@ -353,6 +357,7 @@ enum HCLGE_COMM_CAP_BITS {
HCLGE_COMM_CAP_LANE_NUM_B = 27,
HCLGE_COMM_CAP_WOL_B = 28,
HCLGE_COMM_CAP_TM_FLUSH_B = 31,
+ HCLGE_COMM_CAP_ERR_MOD_GEN_REG_B = 32,
};
enum HCLGE_COMM_API_CAP_BITS {
@@ -392,6 +397,11 @@ struct hclge_comm_query_version_cmd {
__le32 caps[HCLGE_COMM_QUERY_CAP_LENGTH]; /* capabilities of device */
};
+struct hclge_comm_query_scc_cmd {
+ __le32 scc_version;
+ u8 rsv[20];
+};
+
#define HCLGE_DESC_DATA_LEN 6
struct hclge_desc {
__le16 opcode;
@@ -423,11 +433,22 @@ enum hclge_comm_cmd_status {
HCLGE_COMM_ERR_CSQ_ERROR = -3,
};
+struct hclge_comm_hw;
+struct hclge_comm_cmq_ops {
+ void (*trace_cmd_send)(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int num, bool is_special);
+ void (*trace_cmd_get)(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int num, bool is_special);
+};
+
struct hclge_comm_cmq {
struct hclge_comm_cmq_ring csq;
struct hclge_comm_cmq_ring crq;
u16 tx_timeout;
enum hclge_comm_cmd_status last_status;
+ struct hclge_comm_cmq_ops ops;
};
struct hclge_comm_hw {
@@ -474,5 +495,6 @@ int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw);
int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
u32 *fw_version, bool is_pf,
unsigned long reset_pending);
-
+void hclge_comm_cmd_init_ops(struct hclge_comm_hw *hw,
+ const struct hclge_comm_cmq_ops *ops);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
index f3c9395d8351..618f66d9586b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
@@ -85,7 +85,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_TX_STATS,
true);
- desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
+ desc.data[0] = cpu_to_le32(tqp->index);
ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret) {
dev_err(&hw->cmq.csq.pdev->dev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 19668a8d22f7..ff71fb1eced9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2068,8 +2068,6 @@ static void hns3_tx_push_bd(struct hns3_enet_ring *ring, int num)
__iowrite64_copy(ring->tqp->mem_base, desc,
(sizeof(struct hns3_desc) * HNS3_MAX_PUSH_BD_NUM) /
HNS3_BYTES_PER_64BIT);
-
- io_stop_wc();
}
static void hns3_tx_mem_doorbell(struct hns3_enet_ring *ring)
@@ -2088,8 +2086,6 @@ static void hns3_tx_mem_doorbell(struct hns3_enet_ring *ring)
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_mem_doorbell += ring->pending_buf;
u64_stats_update_end(&ring->syncp);
-
- io_stop_wc();
}
static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
@@ -2761,7 +2757,7 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
netdev_err(netdev, "failed to change MTU in hardware %d\n",
ret);
else
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 999a0ee162a6..941cb529d671 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -78,6 +78,9 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_NIC_LB_TEST_NO_MEM_ERR 1
#define HNS3_NIC_LB_TEST_TX_CNT_ERR 2
#define HNS3_NIC_LB_TEST_RX_CNT_ERR 3
+#define HNS3_NIC_LB_TEST_UNEXECUTED 4
+
+static int hns3_get_sset_count(struct net_device *netdev, int stringset);
static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{
@@ -418,18 +421,26 @@ static void hns3_do_external_lb(struct net_device *ndev,
static void hns3_self_test(struct net_device *ndev,
struct ethtool_test *eth_test, u64 *data)
{
+ int cnt = hns3_get_sset_count(ndev, ETH_SS_TEST);
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
int st_param[HNAE3_LOOP_NONE][2];
bool if_running = netif_running(ndev);
+ int i;
+
+ /* initialize the loopback test result, avoid marking an unexcuted
+ * loopback test as PASS.
+ */
+ for (i = 0; i < cnt; i++)
+ data[i] = HNS3_NIC_LB_TEST_UNEXECUTED;
if (hns3_nic_resetting(ndev)) {
netdev_err(ndev, "dev resetting!");
- return;
+ goto failure;
}
if (!(eth_test->flags & ETH_TEST_FL_OFFLINE))
- return;
+ goto failure;
if (netif_msg_ifdown(h))
netdev_info(ndev, "self test start\n");
@@ -451,6 +462,10 @@ static void hns3_self_test(struct net_device *ndev,
if (netif_msg_ifdown(h))
netdev_info(ndev, "self test end\n");
+ return;
+
+failure:
+ eth_test->flags |= ETH_TEST_FL_FAILED;
}
static void hns3_update_limit_promisc_mode(struct net_device *netdev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 9ec471ced3d6..debf143e9940 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -18,6 +18,646 @@ static const char * const hclge_mac_state_str[] = {
static const char * const tc_map_mode_str[] = { "PRIO", "DSCP" };
+static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
+ {false, "Reserved"},
+ {true, "BP_CPU_STATE"},
+ {true, "DFX_MSIX_INFO_NIC_0"},
+ {true, "DFX_MSIX_INFO_NIC_1"},
+ {true, "DFX_MSIX_INFO_NIC_2"},
+ {true, "DFX_MSIX_INFO_NIC_3"},
+
+ {true, "DFX_MSIX_INFO_ROC_0"},
+ {true, "DFX_MSIX_INFO_ROC_1"},
+ {true, "DFX_MSIX_INFO_ROC_2"},
+ {true, "DFX_MSIX_INFO_ROC_3"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = {
+ {false, "Reserved"},
+ {true, "SSU_ETS_PORT_STATUS"},
+ {true, "SSU_ETS_TCG_STATUS"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {true, "SSU_BP_STATUS_0"},
+
+ {true, "SSU_BP_STATUS_1"},
+ {true, "SSU_BP_STATUS_2"},
+ {true, "SSU_BP_STATUS_3"},
+ {true, "SSU_BP_STATUS_4"},
+ {true, "SSU_BP_STATUS_5"},
+ {true, "SSU_MAC_TX_PFC_IND"},
+
+ {true, "MAC_SSU_RX_PFC_IND"},
+ {true, "BTMP_AGEING_ST_B0"},
+ {true, "BTMP_AGEING_ST_B1"},
+ {true, "BTMP_AGEING_ST_B2"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "FULL_DROP_NUM"},
+ {true, "PART_DROP_NUM"},
+ {true, "PPP_KEY_DROP_NUM"},
+ {true, "PPP_RLT_DROP_NUM"},
+ {true, "LO_PRI_UNICAST_RLT_DROP_NUM"},
+ {true, "HI_PRI_MULTICAST_RLT_DROP_NUM"},
+
+ {true, "LO_PRI_MULTICAST_RLT_DROP_NUM"},
+ {true, "NCSI_PACKET_CURR_BUFFER_CNT"},
+ {true, "BTMP_AGEING_RLS_CNT_BANK0"},
+ {true, "BTMP_AGEING_RLS_CNT_BANK1"},
+ {true, "BTMP_AGEING_RLS_CNT_BANK2"},
+ {true, "SSU_MB_RD_RLT_DROP_CNT"},
+
+ {true, "SSU_PPP_MAC_KEY_NUM_L"},
+ {true, "SSU_PPP_MAC_KEY_NUM_H"},
+ {true, "SSU_PPP_HOST_KEY_NUM_L"},
+ {true, "SSU_PPP_HOST_KEY_NUM_H"},
+ {true, "PPP_SSU_MAC_RLT_NUM_L"},
+ {true, "PPP_SSU_MAC_RLT_NUM_H"},
+
+ {true, "PPP_SSU_HOST_RLT_NUM_L"},
+ {true, "PPP_SSU_HOST_RLT_NUM_H"},
+ {true, "NCSI_RX_PACKET_IN_CNT_L"},
+ {true, "NCSI_RX_PACKET_IN_CNT_H"},
+ {true, "NCSI_TX_PACKET_OUT_CNT_L"},
+ {true, "NCSI_TX_PACKET_OUT_CNT_H"},
+
+ {true, "SSU_KEY_DROP_NUM"},
+ {true, "MB_UNCOPY_NUM"},
+ {true, "RX_OQ_DROP_PKT_CNT"},
+ {true, "TX_OQ_DROP_PKT_CNT"},
+ {true, "BANK_UNBALANCE_DROP_CNT"},
+ {true, "BANK_UNBALANCE_RX_DROP_CNT"},
+
+ {true, "NIC_L2_ERR_DROP_PKT_CNT"},
+ {true, "ROC_L2_ERR_DROP_PKT_CNT"},
+ {true, "NIC_L2_ERR_DROP_PKT_CNT_RX"},
+ {true, "ROC_L2_ERR_DROP_PKT_CNT_RX"},
+ {true, "RX_OQ_GLB_DROP_PKT_CNT"},
+ {false, "Reserved"},
+
+ {true, "LO_PRI_UNICAST_CUR_CNT"},
+ {true, "HI_PRI_MULTICAST_CUR_CNT"},
+ {true, "LO_PRI_MULTICAST_CUR_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = {
+ {true, "prt_id"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_0"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_1"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_2"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_3"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_4"},
+
+ {true, "PACKET_TC_CURR_BUFFER_CNT_5"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_6"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_7"},
+ {true, "PACKET_CURR_BUFFER_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "RX_PACKET_IN_CNT_L"},
+ {true, "RX_PACKET_IN_CNT_H"},
+ {true, "RX_PACKET_OUT_CNT_L"},
+ {true, "RX_PACKET_OUT_CNT_H"},
+ {true, "TX_PACKET_IN_CNT_L"},
+ {true, "TX_PACKET_IN_CNT_H"},
+
+ {true, "TX_PACKET_OUT_CNT_L"},
+ {true, "TX_PACKET_OUT_CNT_H"},
+ {true, "ROC_RX_PACKET_IN_CNT_L"},
+ {true, "ROC_RX_PACKET_IN_CNT_H"},
+ {true, "ROC_TX_PACKET_OUT_CNT_L"},
+ {true, "ROC_TX_PACKET_OUT_CNT_H"},
+
+ {true, "RX_PACKET_TC_IN_CNT_0_L"},
+ {true, "RX_PACKET_TC_IN_CNT_0_H"},
+ {true, "RX_PACKET_TC_IN_CNT_1_L"},
+ {true, "RX_PACKET_TC_IN_CNT_1_H"},
+ {true, "RX_PACKET_TC_IN_CNT_2_L"},
+ {true, "RX_PACKET_TC_IN_CNT_2_H"},
+
+ {true, "RX_PACKET_TC_IN_CNT_3_L"},
+ {true, "RX_PACKET_TC_IN_CNT_3_H"},
+ {true, "RX_PACKET_TC_IN_CNT_4_L"},
+ {true, "RX_PACKET_TC_IN_CNT_4_H"},
+ {true, "RX_PACKET_TC_IN_CNT_5_L"},
+ {true, "RX_PACKET_TC_IN_CNT_5_H"},
+
+ {true, "RX_PACKET_TC_IN_CNT_6_L"},
+ {true, "RX_PACKET_TC_IN_CNT_6_H"},
+ {true, "RX_PACKET_TC_IN_CNT_7_L"},
+ {true, "RX_PACKET_TC_IN_CNT_7_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_0_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_0_H"},
+
+ {true, "RX_PACKET_TC_OUT_CNT_1_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_1_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_2_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_2_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_3_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_3_H"},
+
+ {true, "RX_PACKET_TC_OUT_CNT_4_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_4_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_5_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_5_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_6_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_6_H"},
+
+ {true, "RX_PACKET_TC_OUT_CNT_7_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_7_H"},
+ {true, "TX_PACKET_TC_IN_CNT_0_L"},
+ {true, "TX_PACKET_TC_IN_CNT_0_H"},
+ {true, "TX_PACKET_TC_IN_CNT_1_L"},
+ {true, "TX_PACKET_TC_IN_CNT_1_H"},
+
+ {true, "TX_PACKET_TC_IN_CNT_2_L"},
+ {true, "TX_PACKET_TC_IN_CNT_2_H"},
+ {true, "TX_PACKET_TC_IN_CNT_3_L"},
+ {true, "TX_PACKET_TC_IN_CNT_3_H"},
+ {true, "TX_PACKET_TC_IN_CNT_4_L"},
+ {true, "TX_PACKET_TC_IN_CNT_4_H"},
+
+ {true, "TX_PACKET_TC_IN_CNT_5_L"},
+ {true, "TX_PACKET_TC_IN_CNT_5_H"},
+ {true, "TX_PACKET_TC_IN_CNT_6_L"},
+ {true, "TX_PACKET_TC_IN_CNT_6_H"},
+ {true, "TX_PACKET_TC_IN_CNT_7_L"},
+ {true, "TX_PACKET_TC_IN_CNT_7_H"},
+
+ {true, "TX_PACKET_TC_OUT_CNT_0_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_0_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_1_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_1_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_2_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_2_H"},
+
+ {true, "TX_PACKET_TC_OUT_CNT_3_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_3_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_4_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_4_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_5_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_5_H"},
+
+ {true, "TX_PACKET_TC_OUT_CNT_6_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_6_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_7_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_7_H"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = {
+ {true, "OQ_INDEX"},
+ {true, "QUEUE_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
+ {true, "prt_id"},
+ {true, "IGU_RX_ERR_PKT"},
+ {true, "IGU_RX_NO_SOF_PKT"},
+ {true, "EGU_TX_1588_SHORT_PKT"},
+ {true, "EGU_TX_1588_PKT"},
+ {true, "EGU_TX_ERR_PKT"},
+
+ {true, "IGU_RX_OUT_L2_PKT"},
+ {true, "IGU_RX_OUT_L3_PKT"},
+ {true, "IGU_RX_OUT_L4_PKT"},
+ {true, "IGU_RX_IN_L2_PKT"},
+ {true, "IGU_RX_IN_L3_PKT"},
+ {true, "IGU_RX_IN_L4_PKT"},
+
+ {true, "IGU_RX_EL3E_PKT"},
+ {true, "IGU_RX_EL4E_PKT"},
+ {true, "IGU_RX_L3E_PKT"},
+ {true, "IGU_RX_L4E_PKT"},
+ {true, "IGU_RX_ROCEE_PKT"},
+ {true, "IGU_RX_OUT_UDP0_PKT"},
+
+ {true, "IGU_RX_IN_UDP0_PKT"},
+ {true, "IGU_MC_CAR_DROP_PKT_L"},
+ {true, "IGU_MC_CAR_DROP_PKT_H"},
+ {true, "IGU_BC_CAR_DROP_PKT_L"},
+ {true, "IGU_BC_CAR_DROP_PKT_H"},
+ {false, "Reserved"},
+
+ {true, "IGU_RX_OVERSIZE_PKT_L"},
+ {true, "IGU_RX_OVERSIZE_PKT_H"},
+ {true, "IGU_RX_UNDERSIZE_PKT_L"},
+ {true, "IGU_RX_UNDERSIZE_PKT_H"},
+ {true, "IGU_RX_OUT_ALL_PKT_L"},
+ {true, "IGU_RX_OUT_ALL_PKT_H"},
+
+ {true, "IGU_TX_OUT_ALL_PKT_L"},
+ {true, "IGU_TX_OUT_ALL_PKT_H"},
+ {true, "IGU_RX_UNI_PKT_L"},
+ {true, "IGU_RX_UNI_PKT_H"},
+ {true, "IGU_RX_MULTI_PKT_L"},
+ {true, "IGU_RX_MULTI_PKT_H"},
+
+ {true, "IGU_RX_BROAD_PKT_L"},
+ {true, "IGU_RX_BROAD_PKT_H"},
+ {true, "EGU_TX_OUT_ALL_PKT_L"},
+ {true, "EGU_TX_OUT_ALL_PKT_H"},
+ {true, "EGU_TX_UNI_PKT_L"},
+ {true, "EGU_TX_UNI_PKT_H"},
+
+ {true, "EGU_TX_MULTI_PKT_L"},
+ {true, "EGU_TX_MULTI_PKT_H"},
+ {true, "EGU_TX_BROAD_PKT_L"},
+ {true, "EGU_TX_BROAD_PKT_H"},
+ {true, "IGU_TX_KEY_NUM_L"},
+ {true, "IGU_TX_KEY_NUM_H"},
+
+ {true, "IGU_RX_NON_TUN_PKT_L"},
+ {true, "IGU_RX_NON_TUN_PKT_H"},
+ {true, "IGU_RX_TUN_PKT_L"},
+ {true, "IGU_RX_TUN_PKT_H"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = {
+ {true, "tc_queue_num"},
+ {true, "FSM_DFX_ST0"},
+ {true, "FSM_DFX_ST1"},
+ {true, "RPU_RX_PKT_DROP_CNT"},
+ {true, "BUF_WAIT_TIMEOUT"},
+ {true, "BUF_WAIT_TIMEOUT_QID"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = {
+ {false, "Reserved"},
+ {true, "FIFO_DFX_ST0"},
+ {true, "FIFO_DFX_ST1"},
+ {true, "FIFO_DFX_ST2"},
+ {true, "FIFO_DFX_ST3"},
+ {true, "FIFO_DFX_ST4"},
+
+ {true, "FIFO_DFX_ST5"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = {
+ {false, "Reserved"},
+ {true, "NCSI_EGU_TX_FIFO_STS"},
+ {true, "NCSI_PAUSE_STATUS"},
+ {true, "NCSI_RX_CTRL_DMAC_ERR_CNT"},
+ {true, "NCSI_RX_CTRL_SMAC_ERR_CNT"},
+ {true, "NCSI_RX_CTRL_CKS_ERR_CNT"},
+
+ {true, "NCSI_RX_CTRL_PKT_CNT"},
+ {true, "NCSI_RX_PT_DMAC_ERR_CNT"},
+ {true, "NCSI_RX_PT_SMAC_ERR_CNT"},
+ {true, "NCSI_RX_PT_PKT_CNT"},
+ {true, "NCSI_RX_FCS_ERR_CNT"},
+ {true, "NCSI_TX_CTRL_DMAC_ERR_CNT"},
+
+ {true, "NCSI_TX_CTRL_SMAC_ERR_CNT"},
+ {true, "NCSI_TX_CTRL_PKT_CNT"},
+ {true, "NCSI_TX_PT_DMAC_ERR_CNT"},
+ {true, "NCSI_TX_PT_SMAC_ERR_CNT"},
+ {true, "NCSI_TX_PT_PKT_CNT"},
+ {true, "NCSI_TX_PT_PKT_TRUNC_CNT"},
+
+ {true, "NCSI_TX_PT_PKT_ERR_CNT"},
+ {true, "NCSI_TX_CTRL_PKT_ERR_CNT"},
+ {true, "NCSI_RX_CTRL_PKT_TRUNC_CNT"},
+ {true, "NCSI_RX_CTRL_PKT_CFLIT_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "NCSI_MAC_RX_OCTETS_OK"},
+ {true, "NCSI_MAC_RX_OCTETS_BAD"},
+ {true, "NCSI_MAC_RX_UC_PKTS"},
+ {true, "NCSI_MAC_RX_MC_PKTS"},
+ {true, "NCSI_MAC_RX_BC_PKTS"},
+ {true, "NCSI_MAC_RX_PKTS_64OCTETS"},
+
+ {true, "NCSI_MAC_RX_PKTS_65TO127OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_128TO255OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_255TO511OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_512TO1023OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_1024TO1518OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_1519TOMAXOCTETS"},
+
+ {true, "NCSI_MAC_RX_FCS_ERRORS"},
+ {true, "NCSI_MAC_RX_LONG_ERRORS"},
+ {true, "NCSI_MAC_RX_JABBER_ERRORS"},
+ {true, "NCSI_MAC_RX_RUNT_ERR_CNT"},
+ {true, "NCSI_MAC_RX_SHORT_ERR_CNT"},
+ {true, "NCSI_MAC_RX_FILT_PKT_CNT"},
+
+ {true, "NCSI_MAC_RX_OCTETS_TOTAL_FILT"},
+ {true, "NCSI_MAC_TX_OCTETS_OK"},
+ {true, "NCSI_MAC_TX_OCTETS_BAD"},
+ {true, "NCSI_MAC_TX_UC_PKTS"},
+ {true, "NCSI_MAC_TX_MC_PKTS"},
+ {true, "NCSI_MAC_TX_BC_PKTS"},
+
+ {true, "NCSI_MAC_TX_PKTS_64OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_65TO127OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_128TO255OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_256TO511OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_512TO1023OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_1024TO1518OCTETS"},
+
+ {true, "NCSI_MAC_TX_PKTS_1519TOMAXOCTETS"},
+ {true, "NCSI_MAC_TX_UNDERRUN"},
+ {true, "NCSI_MAC_TX_CRC_ERROR"},
+ {true, "NCSI_MAC_TX_PAUSE_FRAMES"},
+ {true, "NCSI_MAC_RX_PAD_PKTS"},
+ {true, "NCSI_MAC_RX_PAUSE_FRAMES"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = {
+ {false, "Reserved"},
+ {true, "LGE_IGU_AFIFO_DFX_0"},
+ {true, "LGE_IGU_AFIFO_DFX_1"},
+ {true, "LGE_IGU_AFIFO_DFX_2"},
+ {true, "LGE_IGU_AFIFO_DFX_3"},
+ {true, "LGE_IGU_AFIFO_DFX_4"},
+
+ {true, "LGE_IGU_AFIFO_DFX_5"},
+ {true, "LGE_IGU_AFIFO_DFX_6"},
+ {true, "LGE_IGU_AFIFO_DFX_7"},
+ {true, "LGE_EGU_AFIFO_DFX_0"},
+ {true, "LGE_EGU_AFIFO_DFX_1"},
+ {true, "LGE_EGU_AFIFO_DFX_2"},
+
+ {true, "LGE_EGU_AFIFO_DFX_3"},
+ {true, "LGE_EGU_AFIFO_DFX_4"},
+ {true, "LGE_EGU_AFIFO_DFX_5"},
+ {true, "LGE_EGU_AFIFO_DFX_6"},
+ {true, "LGE_EGU_AFIFO_DFX_7"},
+ {true, "CGE_IGU_AFIFO_DFX_0"},
+
+ {true, "CGE_IGU_AFIFO_DFX_1"},
+ {true, "CGE_EGU_AFIFO_DFX_0"},
+ {true, "CGE_EGU_AFIFO_DFX_1"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = {
+ {false, "Reserved"},
+ {true, "DROP_FROM_PRT_PKT_CNT"},
+ {true, "DROP_FROM_HOST_PKT_CNT"},
+ {true, "DROP_TX_VLAN_PROC_CNT"},
+ {true, "DROP_MNG_CNT"},
+ {true, "DROP_FD_CNT"},
+
+ {true, "DROP_NO_DST_CNT"},
+ {true, "DROP_MC_MBID_FULL_CNT"},
+ {true, "DROP_SC_FILTERED"},
+ {true, "PPP_MC_DROP_PKT_CNT"},
+ {true, "DROP_PT_CNT"},
+ {true, "DROP_MAC_ANTI_SPOOF_CNT"},
+
+ {true, "DROP_IG_VFV_CNT"},
+ {true, "DROP_IG_PRTV_CNT"},
+ {true, "DROP_CNM_PFC_PAUSE_CNT"},
+ {true, "DROP_TORUS_TC_CNT"},
+ {true, "DROP_TORUS_LPBK_CNT"},
+ {true, "PPP_HFS_STS"},
+
+ {true, "PPP_MC_RSLT_STS"},
+ {true, "PPP_P3U_STS"},
+ {true, "PPP_RSLT_DESCR_STS"},
+ {true, "PPP_UMV_STS_0"},
+ {true, "PPP_UMV_STS_1"},
+ {true, "PPP_VFV_STS"},
+
+ {true, "PPP_GRO_KEY_CNT"},
+ {true, "PPP_GRO_INFO_CNT"},
+ {true, "PPP_GRO_DROP_CNT"},
+ {true, "PPP_GRO_OUT_CNT"},
+ {true, "PPP_GRO_KEY_MATCH_DATA_CNT"},
+ {true, "PPP_GRO_KEY_MATCH_TCAM_CNT"},
+
+ {true, "PPP_GRO_INFO_MATCH_CNT"},
+ {true, "PPP_GRO_FREE_ENTRY_CNT"},
+ {true, "PPP_GRO_INNER_DFX_SIGNAL"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "GET_RX_PKT_CNT_L"},
+ {true, "GET_RX_PKT_CNT_H"},
+ {true, "GET_TX_PKT_CNT_L"},
+ {true, "GET_TX_PKT_CNT_H"},
+ {true, "SEND_UC_PRT2HOST_PKT_CNT_L"},
+ {true, "SEND_UC_PRT2HOST_PKT_CNT_H"},
+
+ {true, "SEND_UC_PRT2PRT_PKT_CNT_L"},
+ {true, "SEND_UC_PRT2PRT_PKT_CNT_H"},
+ {true, "SEND_UC_HOST2HOST_PKT_CNT_L"},
+ {true, "SEND_UC_HOST2HOST_PKT_CNT_H"},
+ {true, "SEND_UC_HOST2PRT_PKT_CNT_L"},
+ {true, "SEND_UC_HOST2PRT_PKT_CNT_H"},
+
+ {true, "SEND_MC_FROM_PRT_CNT_L"},
+ {true, "SEND_MC_FROM_PRT_CNT_H"},
+ {true, "SEND_MC_FROM_HOST_CNT_L"},
+ {true, "SEND_MC_FROM_HOST_CNT_H"},
+ {true, "SSU_MC_RD_CNT_L"},
+ {true, "SSU_MC_RD_CNT_H"},
+
+ {true, "SSU_MC_DROP_CNT_L"},
+ {true, "SSU_MC_DROP_CNT_H"},
+ {true, "SSU_MC_RD_PKT_CNT_L"},
+ {true, "SSU_MC_RD_PKT_CNT_H"},
+ {true, "PPP_MC_2HOST_PKT_CNT_L"},
+ {true, "PPP_MC_2HOST_PKT_CNT_H"},
+
+ {true, "PPP_MC_2PRT_PKT_CNT_L"},
+ {true, "PPP_MC_2PRT_PKT_CNT_H"},
+ {true, "NTSNOS_PKT_CNT_L"},
+ {true, "NTSNOS_PKT_CNT_H"},
+ {true, "NTUP_PKT_CNT_L"},
+ {true, "NTUP_PKT_CNT_H"},
+
+ {true, "NTLCL_PKT_CNT_L"},
+ {true, "NTLCL_PKT_CNT_H"},
+ {true, "NTTGT_PKT_CNT_L"},
+ {true, "NTTGT_PKT_CNT_H"},
+ {true, "RTNS_PKT_CNT_L"},
+ {true, "RTNS_PKT_CNT_H"},
+
+ {true, "RTLPBK_PKT_CNT_L"},
+ {true, "RTLPBK_PKT_CNT_H"},
+ {true, "NR_PKT_CNT_L"},
+ {true, "NR_PKT_CNT_H"},
+ {true, "RR_PKT_CNT_L"},
+ {true, "RR_PKT_CNT_H"},
+
+ {true, "MNG_TBL_HIT_CNT_L"},
+ {true, "MNG_TBL_HIT_CNT_H"},
+ {true, "FD_TBL_HIT_CNT_L"},
+ {true, "FD_TBL_HIT_CNT_H"},
+ {true, "FD_LKUP_CNT_L"},
+ {true, "FD_LKUP_CNT_H"},
+
+ {true, "BC_HIT_CNT_L"},
+ {true, "BC_HIT_CNT_H"},
+ {true, "UM_TBL_UC_HIT_CNT_L"},
+ {true, "UM_TBL_UC_HIT_CNT_H"},
+ {true, "UM_TBL_MC_HIT_CNT_L"},
+ {true, "UM_TBL_MC_HIT_CNT_H"},
+
+ {true, "UM_TBL_VMDQ1_HIT_CNT_L"},
+ {true, "UM_TBL_VMDQ1_HIT_CNT_H"},
+ {true, "MTA_TBL_HIT_CNT_L"},
+ {true, "MTA_TBL_HIT_CNT_H"},
+ {true, "FWD_BONDING_HIT_CNT_L"},
+ {true, "FWD_BONDING_HIT_CNT_H"},
+
+ {true, "PROMIS_TBL_HIT_CNT_L"},
+ {true, "PROMIS_TBL_HIT_CNT_H"},
+ {true, "GET_TUNL_PKT_CNT_L"},
+ {true, "GET_TUNL_PKT_CNT_H"},
+ {true, "GET_BMC_PKT_CNT_L"},
+ {true, "GET_BMC_PKT_CNT_H"},
+
+ {true, "SEND_UC_PRT2BMC_PKT_CNT_L"},
+ {true, "SEND_UC_PRT2BMC_PKT_CNT_H"},
+ {true, "SEND_UC_HOST2BMC_PKT_CNT_L"},
+ {true, "SEND_UC_HOST2BMC_PKT_CNT_H"},
+ {true, "SEND_UC_BMC2HOST_PKT_CNT_L"},
+ {true, "SEND_UC_BMC2HOST_PKT_CNT_H"},
+
+ {true, "SEND_UC_BMC2PRT_PKT_CNT_L"},
+ {true, "SEND_UC_BMC2PRT_PKT_CNT_H"},
+ {true, "PPP_MC_2BMC_PKT_CNT_L"},
+ {true, "PPP_MC_2BMC_PKT_CNT_H"},
+ {true, "VLAN_MIRR_CNT_L"},
+ {true, "VLAN_MIRR_CNT_H"},
+
+ {true, "IG_MIRR_CNT_L"},
+ {true, "IG_MIRR_CNT_H"},
+ {true, "EG_MIRR_CNT_L"},
+ {true, "EG_MIRR_CNT_H"},
+ {true, "RX_DEFAULT_HOST_HIT_CNT_L"},
+ {true, "RX_DEFAULT_HOST_HIT_CNT_H"},
+
+ {true, "LAN_PAIR_CNT_L"},
+ {true, "LAN_PAIR_CNT_H"},
+ {true, "UM_TBL_MC_HIT_PKT_CNT_L"},
+ {true, "UM_TBL_MC_HIT_PKT_CNT_H"},
+ {true, "MTA_TBL_HIT_PKT_CNT_L"},
+ {true, "MTA_TBL_HIT_PKT_CNT_H"},
+
+ {true, "PROMIS_TBL_HIT_PKT_CNT_L"},
+ {true, "PROMIS_TBL_HIT_PKT_CNT_H"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = {
+ {false, "Reserved"},
+ {true, "FSM_DFX_ST0"},
+ {true, "FSM_DFX_ST1"},
+ {true, "FSM_DFX_ST2"},
+ {true, "FIFO_DFX_ST0"},
+ {true, "FIFO_DFX_ST1"},
+
+ {true, "FIFO_DFX_ST2"},
+ {true, "FIFO_DFX_ST3"},
+ {true, "FIFO_DFX_ST4"},
+ {true, "FIFO_DFX_ST5"},
+ {true, "FIFO_DFX_ST6"},
+ {true, "FIFO_DFX_ST7"},
+
+ {true, "FIFO_DFX_ST8"},
+ {true, "FIFO_DFX_ST9"},
+ {true, "FIFO_DFX_ST10"},
+ {true, "FIFO_DFX_ST11"},
+ {true, "Q_CREDIT_VLD_0"},
+ {true, "Q_CREDIT_VLD_1"},
+
+ {true, "Q_CREDIT_VLD_2"},
+ {true, "Q_CREDIT_VLD_3"},
+ {true, "Q_CREDIT_VLD_4"},
+ {true, "Q_CREDIT_VLD_5"},
+ {true, "Q_CREDIT_VLD_6"},
+ {true, "Q_CREDIT_VLD_7"},
+
+ {true, "Q_CREDIT_VLD_8"},
+ {true, "Q_CREDIT_VLD_9"},
+ {true, "Q_CREDIT_VLD_10"},
+ {true, "Q_CREDIT_VLD_11"},
+ {true, "Q_CREDIT_VLD_12"},
+ {true, "Q_CREDIT_VLD_13"},
+
+ {true, "Q_CREDIT_VLD_14"},
+ {true, "Q_CREDIT_VLD_15"},
+ {true, "Q_CREDIT_VLD_16"},
+ {true, "Q_CREDIT_VLD_17"},
+ {true, "Q_CREDIT_VLD_18"},
+ {true, "Q_CREDIT_VLD_19"},
+
+ {true, "Q_CREDIT_VLD_20"},
+ {true, "Q_CREDIT_VLD_21"},
+ {true, "Q_CREDIT_VLD_22"},
+ {true, "Q_CREDIT_VLD_23"},
+ {true, "Q_CREDIT_VLD_24"},
+ {true, "Q_CREDIT_VLD_25"},
+
+ {true, "Q_CREDIT_VLD_26"},
+ {true, "Q_CREDIT_VLD_27"},
+ {true, "Q_CREDIT_VLD_28"},
+ {true, "Q_CREDIT_VLD_29"},
+ {true, "Q_CREDIT_VLD_30"},
+ {true, "Q_CREDIT_VLD_31"},
+
+ {true, "GRO_BD_SERR_CNT"},
+ {true, "GRO_CONTEXT_SERR_CNT"},
+ {true, "RX_STASH_CFG_SERR_CNT"},
+ {true, "AXI_RD_FBD_SERR_CNT"},
+ {true, "GRO_BD_MERR_CNT"},
+ {true, "GRO_CONTEXT_MERR_CNT"},
+
+ {true, "RX_STASH_CFG_MERR_CNT"},
+ {true, "AXI_RD_FBD_MERR_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = {
+ {true, "q_num"},
+ {true, "RCB_CFG_RX_RING_TAIL"},
+ {true, "RCB_CFG_RX_RING_HEAD"},
+ {true, "RCB_CFG_RX_RING_FBDNUM"},
+ {true, "RCB_CFG_RX_RING_OFFSET"},
+ {true, "RCB_CFG_RX_RING_FBDOFFSET"},
+
+ {true, "RCB_CFG_RX_RING_PKTNUM_RECORD"},
+ {true, "RCB_CFG_TX_RING_TAIL"},
+ {true, "RCB_CFG_TX_RING_HEAD"},
+ {true, "RCB_CFG_TX_RING_FBDNUM"},
+ {true, "RCB_CFG_TX_RING_OFFSET"},
+ {true, "RCB_CFG_TX_RING_EBDNUM"},
+};
+
static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
{ .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
.dfx_msg = &hclge_dbg_bios_common_reg[0],
@@ -161,10 +801,8 @@ static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset,
return 0;
}
-static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
- struct hclge_desc *desc_src,
- int index, int bd_num,
- enum hclge_opcode_type cmd)
+int hclge_dbg_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc_src,
+ int index, int bd_num, enum hclge_opcode_type cmd)
{
struct hclge_desc *desc = desc_src;
int ret, i;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
index 724052928b88..2b998cbed826 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -99,646 +99,6 @@ struct hclge_dbg_status_dfx_info {
char message[HCLGE_DBG_MAX_DFX_MSG_LEN];
};
-static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
- {false, "Reserved"},
- {true, "BP_CPU_STATE"},
- {true, "DFX_MSIX_INFO_NIC_0"},
- {true, "DFX_MSIX_INFO_NIC_1"},
- {true, "DFX_MSIX_INFO_NIC_2"},
- {true, "DFX_MSIX_INFO_NIC_3"},
-
- {true, "DFX_MSIX_INFO_ROC_0"},
- {true, "DFX_MSIX_INFO_ROC_1"},
- {true, "DFX_MSIX_INFO_ROC_2"},
- {true, "DFX_MSIX_INFO_ROC_3"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = {
- {false, "Reserved"},
- {true, "SSU_ETS_PORT_STATUS"},
- {true, "SSU_ETS_TCG_STATUS"},
- {false, "Reserved"},
- {false, "Reserved"},
- {true, "SSU_BP_STATUS_0"},
-
- {true, "SSU_BP_STATUS_1"},
- {true, "SSU_BP_STATUS_2"},
- {true, "SSU_BP_STATUS_3"},
- {true, "SSU_BP_STATUS_4"},
- {true, "SSU_BP_STATUS_5"},
- {true, "SSU_MAC_TX_PFC_IND"},
-
- {true, "MAC_SSU_RX_PFC_IND"},
- {true, "BTMP_AGEING_ST_B0"},
- {true, "BTMP_AGEING_ST_B1"},
- {true, "BTMP_AGEING_ST_B2"},
- {false, "Reserved"},
- {false, "Reserved"},
-
- {true, "FULL_DROP_NUM"},
- {true, "PART_DROP_NUM"},
- {true, "PPP_KEY_DROP_NUM"},
- {true, "PPP_RLT_DROP_NUM"},
- {true, "LO_PRI_UNICAST_RLT_DROP_NUM"},
- {true, "HI_PRI_MULTICAST_RLT_DROP_NUM"},
-
- {true, "LO_PRI_MULTICAST_RLT_DROP_NUM"},
- {true, "NCSI_PACKET_CURR_BUFFER_CNT"},
- {true, "BTMP_AGEING_RLS_CNT_BANK0"},
- {true, "BTMP_AGEING_RLS_CNT_BANK1"},
- {true, "BTMP_AGEING_RLS_CNT_BANK2"},
- {true, "SSU_MB_RD_RLT_DROP_CNT"},
-
- {true, "SSU_PPP_MAC_KEY_NUM_L"},
- {true, "SSU_PPP_MAC_KEY_NUM_H"},
- {true, "SSU_PPP_HOST_KEY_NUM_L"},
- {true, "SSU_PPP_HOST_KEY_NUM_H"},
- {true, "PPP_SSU_MAC_RLT_NUM_L"},
- {true, "PPP_SSU_MAC_RLT_NUM_H"},
-
- {true, "PPP_SSU_HOST_RLT_NUM_L"},
- {true, "PPP_SSU_HOST_RLT_NUM_H"},
- {true, "NCSI_RX_PACKET_IN_CNT_L"},
- {true, "NCSI_RX_PACKET_IN_CNT_H"},
- {true, "NCSI_TX_PACKET_OUT_CNT_L"},
- {true, "NCSI_TX_PACKET_OUT_CNT_H"},
-
- {true, "SSU_KEY_DROP_NUM"},
- {true, "MB_UNCOPY_NUM"},
- {true, "RX_OQ_DROP_PKT_CNT"},
- {true, "TX_OQ_DROP_PKT_CNT"},
- {true, "BANK_UNBALANCE_DROP_CNT"},
- {true, "BANK_UNBALANCE_RX_DROP_CNT"},
-
- {true, "NIC_L2_ERR_DROP_PKT_CNT"},
- {true, "ROC_L2_ERR_DROP_PKT_CNT"},
- {true, "NIC_L2_ERR_DROP_PKT_CNT_RX"},
- {true, "ROC_L2_ERR_DROP_PKT_CNT_RX"},
- {true, "RX_OQ_GLB_DROP_PKT_CNT"},
- {false, "Reserved"},
-
- {true, "LO_PRI_UNICAST_CUR_CNT"},
- {true, "HI_PRI_MULTICAST_CUR_CNT"},
- {true, "LO_PRI_MULTICAST_CUR_CNT"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = {
- {true, "prt_id"},
- {true, "PACKET_TC_CURR_BUFFER_CNT_0"},
- {true, "PACKET_TC_CURR_BUFFER_CNT_1"},
- {true, "PACKET_TC_CURR_BUFFER_CNT_2"},
- {true, "PACKET_TC_CURR_BUFFER_CNT_3"},
- {true, "PACKET_TC_CURR_BUFFER_CNT_4"},
-
- {true, "PACKET_TC_CURR_BUFFER_CNT_5"},
- {true, "PACKET_TC_CURR_BUFFER_CNT_6"},
- {true, "PACKET_TC_CURR_BUFFER_CNT_7"},
- {true, "PACKET_CURR_BUFFER_CNT"},
- {false, "Reserved"},
- {false, "Reserved"},
-
- {true, "RX_PACKET_IN_CNT_L"},
- {true, "RX_PACKET_IN_CNT_H"},
- {true, "RX_PACKET_OUT_CNT_L"},
- {true, "RX_PACKET_OUT_CNT_H"},
- {true, "TX_PACKET_IN_CNT_L"},
- {true, "TX_PACKET_IN_CNT_H"},
-
- {true, "TX_PACKET_OUT_CNT_L"},
- {true, "TX_PACKET_OUT_CNT_H"},
- {true, "ROC_RX_PACKET_IN_CNT_L"},
- {true, "ROC_RX_PACKET_IN_CNT_H"},
- {true, "ROC_TX_PACKET_OUT_CNT_L"},
- {true, "ROC_TX_PACKET_OUT_CNT_H"},
-
- {true, "RX_PACKET_TC_IN_CNT_0_L"},
- {true, "RX_PACKET_TC_IN_CNT_0_H"},
- {true, "RX_PACKET_TC_IN_CNT_1_L"},
- {true, "RX_PACKET_TC_IN_CNT_1_H"},
- {true, "RX_PACKET_TC_IN_CNT_2_L"},
- {true, "RX_PACKET_TC_IN_CNT_2_H"},
-
- {true, "RX_PACKET_TC_IN_CNT_3_L"},
- {true, "RX_PACKET_TC_IN_CNT_3_H"},
- {true, "RX_PACKET_TC_IN_CNT_4_L"},
- {true, "RX_PACKET_TC_IN_CNT_4_H"},
- {true, "RX_PACKET_TC_IN_CNT_5_L"},
- {true, "RX_PACKET_TC_IN_CNT_5_H"},
-
- {true, "RX_PACKET_TC_IN_CNT_6_L"},
- {true, "RX_PACKET_TC_IN_CNT_6_H"},
- {true, "RX_PACKET_TC_IN_CNT_7_L"},
- {true, "RX_PACKET_TC_IN_CNT_7_H"},
- {true, "RX_PACKET_TC_OUT_CNT_0_L"},
- {true, "RX_PACKET_TC_OUT_CNT_0_H"},
-
- {true, "RX_PACKET_TC_OUT_CNT_1_L"},
- {true, "RX_PACKET_TC_OUT_CNT_1_H"},
- {true, "RX_PACKET_TC_OUT_CNT_2_L"},
- {true, "RX_PACKET_TC_OUT_CNT_2_H"},
- {true, "RX_PACKET_TC_OUT_CNT_3_L"},
- {true, "RX_PACKET_TC_OUT_CNT_3_H"},
-
- {true, "RX_PACKET_TC_OUT_CNT_4_L"},
- {true, "RX_PACKET_TC_OUT_CNT_4_H"},
- {true, "RX_PACKET_TC_OUT_CNT_5_L"},
- {true, "RX_PACKET_TC_OUT_CNT_5_H"},
- {true, "RX_PACKET_TC_OUT_CNT_6_L"},
- {true, "RX_PACKET_TC_OUT_CNT_6_H"},
-
- {true, "RX_PACKET_TC_OUT_CNT_7_L"},
- {true, "RX_PACKET_TC_OUT_CNT_7_H"},
- {true, "TX_PACKET_TC_IN_CNT_0_L"},
- {true, "TX_PACKET_TC_IN_CNT_0_H"},
- {true, "TX_PACKET_TC_IN_CNT_1_L"},
- {true, "TX_PACKET_TC_IN_CNT_1_H"},
-
- {true, "TX_PACKET_TC_IN_CNT_2_L"},
- {true, "TX_PACKET_TC_IN_CNT_2_H"},
- {true, "TX_PACKET_TC_IN_CNT_3_L"},
- {true, "TX_PACKET_TC_IN_CNT_3_H"},
- {true, "TX_PACKET_TC_IN_CNT_4_L"},
- {true, "TX_PACKET_TC_IN_CNT_4_H"},
-
- {true, "TX_PACKET_TC_IN_CNT_5_L"},
- {true, "TX_PACKET_TC_IN_CNT_5_H"},
- {true, "TX_PACKET_TC_IN_CNT_6_L"},
- {true, "TX_PACKET_TC_IN_CNT_6_H"},
- {true, "TX_PACKET_TC_IN_CNT_7_L"},
- {true, "TX_PACKET_TC_IN_CNT_7_H"},
-
- {true, "TX_PACKET_TC_OUT_CNT_0_L"},
- {true, "TX_PACKET_TC_OUT_CNT_0_H"},
- {true, "TX_PACKET_TC_OUT_CNT_1_L"},
- {true, "TX_PACKET_TC_OUT_CNT_1_H"},
- {true, "TX_PACKET_TC_OUT_CNT_2_L"},
- {true, "TX_PACKET_TC_OUT_CNT_2_H"},
-
- {true, "TX_PACKET_TC_OUT_CNT_3_L"},
- {true, "TX_PACKET_TC_OUT_CNT_3_H"},
- {true, "TX_PACKET_TC_OUT_CNT_4_L"},
- {true, "TX_PACKET_TC_OUT_CNT_4_H"},
- {true, "TX_PACKET_TC_OUT_CNT_5_L"},
- {true, "TX_PACKET_TC_OUT_CNT_5_H"},
-
- {true, "TX_PACKET_TC_OUT_CNT_6_L"},
- {true, "TX_PACKET_TC_OUT_CNT_6_H"},
- {true, "TX_PACKET_TC_OUT_CNT_7_L"},
- {true, "TX_PACKET_TC_OUT_CNT_7_H"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = {
- {true, "OQ_INDEX"},
- {true, "QUEUE_CNT"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
- {true, "prt_id"},
- {true, "IGU_RX_ERR_PKT"},
- {true, "IGU_RX_NO_SOF_PKT"},
- {true, "EGU_TX_1588_SHORT_PKT"},
- {true, "EGU_TX_1588_PKT"},
- {true, "EGU_TX_ERR_PKT"},
-
- {true, "IGU_RX_OUT_L2_PKT"},
- {true, "IGU_RX_OUT_L3_PKT"},
- {true, "IGU_RX_OUT_L4_PKT"},
- {true, "IGU_RX_IN_L2_PKT"},
- {true, "IGU_RX_IN_L3_PKT"},
- {true, "IGU_RX_IN_L4_PKT"},
-
- {true, "IGU_RX_EL3E_PKT"},
- {true, "IGU_RX_EL4E_PKT"},
- {true, "IGU_RX_L3E_PKT"},
- {true, "IGU_RX_L4E_PKT"},
- {true, "IGU_RX_ROCEE_PKT"},
- {true, "IGU_RX_OUT_UDP0_PKT"},
-
- {true, "IGU_RX_IN_UDP0_PKT"},
- {true, "IGU_MC_CAR_DROP_PKT_L"},
- {true, "IGU_MC_CAR_DROP_PKT_H"},
- {true, "IGU_BC_CAR_DROP_PKT_L"},
- {true, "IGU_BC_CAR_DROP_PKT_H"},
- {false, "Reserved"},
-
- {true, "IGU_RX_OVERSIZE_PKT_L"},
- {true, "IGU_RX_OVERSIZE_PKT_H"},
- {true, "IGU_RX_UNDERSIZE_PKT_L"},
- {true, "IGU_RX_UNDERSIZE_PKT_H"},
- {true, "IGU_RX_OUT_ALL_PKT_L"},
- {true, "IGU_RX_OUT_ALL_PKT_H"},
-
- {true, "IGU_TX_OUT_ALL_PKT_L"},
- {true, "IGU_TX_OUT_ALL_PKT_H"},
- {true, "IGU_RX_UNI_PKT_L"},
- {true, "IGU_RX_UNI_PKT_H"},
- {true, "IGU_RX_MULTI_PKT_L"},
- {true, "IGU_RX_MULTI_PKT_H"},
-
- {true, "IGU_RX_BROAD_PKT_L"},
- {true, "IGU_RX_BROAD_PKT_H"},
- {true, "EGU_TX_OUT_ALL_PKT_L"},
- {true, "EGU_TX_OUT_ALL_PKT_H"},
- {true, "EGU_TX_UNI_PKT_L"},
- {true, "EGU_TX_UNI_PKT_H"},
-
- {true, "EGU_TX_MULTI_PKT_L"},
- {true, "EGU_TX_MULTI_PKT_H"},
- {true, "EGU_TX_BROAD_PKT_L"},
- {true, "EGU_TX_BROAD_PKT_H"},
- {true, "IGU_TX_KEY_NUM_L"},
- {true, "IGU_TX_KEY_NUM_H"},
-
- {true, "IGU_RX_NON_TUN_PKT_L"},
- {true, "IGU_RX_NON_TUN_PKT_H"},
- {true, "IGU_RX_TUN_PKT_L"},
- {true, "IGU_RX_TUN_PKT_H"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = {
- {true, "tc_queue_num"},
- {true, "FSM_DFX_ST0"},
- {true, "FSM_DFX_ST1"},
- {true, "RPU_RX_PKT_DROP_CNT"},
- {true, "BUF_WAIT_TIMEOUT"},
- {true, "BUF_WAIT_TIMEOUT_QID"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = {
- {false, "Reserved"},
- {true, "FIFO_DFX_ST0"},
- {true, "FIFO_DFX_ST1"},
- {true, "FIFO_DFX_ST2"},
- {true, "FIFO_DFX_ST3"},
- {true, "FIFO_DFX_ST4"},
-
- {true, "FIFO_DFX_ST5"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = {
- {false, "Reserved"},
- {true, "NCSI_EGU_TX_FIFO_STS"},
- {true, "NCSI_PAUSE_STATUS"},
- {true, "NCSI_RX_CTRL_DMAC_ERR_CNT"},
- {true, "NCSI_RX_CTRL_SMAC_ERR_CNT"},
- {true, "NCSI_RX_CTRL_CKS_ERR_CNT"},
-
- {true, "NCSI_RX_CTRL_PKT_CNT"},
- {true, "NCSI_RX_PT_DMAC_ERR_CNT"},
- {true, "NCSI_RX_PT_SMAC_ERR_CNT"},
- {true, "NCSI_RX_PT_PKT_CNT"},
- {true, "NCSI_RX_FCS_ERR_CNT"},
- {true, "NCSI_TX_CTRL_DMAC_ERR_CNT"},
-
- {true, "NCSI_TX_CTRL_SMAC_ERR_CNT"},
- {true, "NCSI_TX_CTRL_PKT_CNT"},
- {true, "NCSI_TX_PT_DMAC_ERR_CNT"},
- {true, "NCSI_TX_PT_SMAC_ERR_CNT"},
- {true, "NCSI_TX_PT_PKT_CNT"},
- {true, "NCSI_TX_PT_PKT_TRUNC_CNT"},
-
- {true, "NCSI_TX_PT_PKT_ERR_CNT"},
- {true, "NCSI_TX_CTRL_PKT_ERR_CNT"},
- {true, "NCSI_RX_CTRL_PKT_TRUNC_CNT"},
- {true, "NCSI_RX_CTRL_PKT_CFLIT_CNT"},
- {false, "Reserved"},
- {false, "Reserved"},
-
- {true, "NCSI_MAC_RX_OCTETS_OK"},
- {true, "NCSI_MAC_RX_OCTETS_BAD"},
- {true, "NCSI_MAC_RX_UC_PKTS"},
- {true, "NCSI_MAC_RX_MC_PKTS"},
- {true, "NCSI_MAC_RX_BC_PKTS"},
- {true, "NCSI_MAC_RX_PKTS_64OCTETS"},
-
- {true, "NCSI_MAC_RX_PKTS_65TO127OCTETS"},
- {true, "NCSI_MAC_RX_PKTS_128TO255OCTETS"},
- {true, "NCSI_MAC_RX_PKTS_255TO511OCTETS"},
- {true, "NCSI_MAC_RX_PKTS_512TO1023OCTETS"},
- {true, "NCSI_MAC_RX_PKTS_1024TO1518OCTETS"},
- {true, "NCSI_MAC_RX_PKTS_1519TOMAXOCTETS"},
-
- {true, "NCSI_MAC_RX_FCS_ERRORS"},
- {true, "NCSI_MAC_RX_LONG_ERRORS"},
- {true, "NCSI_MAC_RX_JABBER_ERRORS"},
- {true, "NCSI_MAC_RX_RUNT_ERR_CNT"},
- {true, "NCSI_MAC_RX_SHORT_ERR_CNT"},
- {true, "NCSI_MAC_RX_FILT_PKT_CNT"},
-
- {true, "NCSI_MAC_RX_OCTETS_TOTAL_FILT"},
- {true, "NCSI_MAC_TX_OCTETS_OK"},
- {true, "NCSI_MAC_TX_OCTETS_BAD"},
- {true, "NCSI_MAC_TX_UC_PKTS"},
- {true, "NCSI_MAC_TX_MC_PKTS"},
- {true, "NCSI_MAC_TX_BC_PKTS"},
-
- {true, "NCSI_MAC_TX_PKTS_64OCTETS"},
- {true, "NCSI_MAC_TX_PKTS_65TO127OCTETS"},
- {true, "NCSI_MAC_TX_PKTS_128TO255OCTETS"},
- {true, "NCSI_MAC_TX_PKTS_256TO511OCTETS"},
- {true, "NCSI_MAC_TX_PKTS_512TO1023OCTETS"},
- {true, "NCSI_MAC_TX_PKTS_1024TO1518OCTETS"},
-
- {true, "NCSI_MAC_TX_PKTS_1519TOMAXOCTETS"},
- {true, "NCSI_MAC_TX_UNDERRUN"},
- {true, "NCSI_MAC_TX_CRC_ERROR"},
- {true, "NCSI_MAC_TX_PAUSE_FRAMES"},
- {true, "NCSI_MAC_RX_PAD_PKTS"},
- {true, "NCSI_MAC_RX_PAUSE_FRAMES"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = {
- {false, "Reserved"},
- {true, "LGE_IGU_AFIFO_DFX_0"},
- {true, "LGE_IGU_AFIFO_DFX_1"},
- {true, "LGE_IGU_AFIFO_DFX_2"},
- {true, "LGE_IGU_AFIFO_DFX_3"},
- {true, "LGE_IGU_AFIFO_DFX_4"},
-
- {true, "LGE_IGU_AFIFO_DFX_5"},
- {true, "LGE_IGU_AFIFO_DFX_6"},
- {true, "LGE_IGU_AFIFO_DFX_7"},
- {true, "LGE_EGU_AFIFO_DFX_0"},
- {true, "LGE_EGU_AFIFO_DFX_1"},
- {true, "LGE_EGU_AFIFO_DFX_2"},
-
- {true, "LGE_EGU_AFIFO_DFX_3"},
- {true, "LGE_EGU_AFIFO_DFX_4"},
- {true, "LGE_EGU_AFIFO_DFX_5"},
- {true, "LGE_EGU_AFIFO_DFX_6"},
- {true, "LGE_EGU_AFIFO_DFX_7"},
- {true, "CGE_IGU_AFIFO_DFX_0"},
-
- {true, "CGE_IGU_AFIFO_DFX_1"},
- {true, "CGE_EGU_AFIFO_DFX_0"},
- {true, "CGE_EGU_AFIFO_DFX_1"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = {
- {false, "Reserved"},
- {true, "DROP_FROM_PRT_PKT_CNT"},
- {true, "DROP_FROM_HOST_PKT_CNT"},
- {true, "DROP_TX_VLAN_PROC_CNT"},
- {true, "DROP_MNG_CNT"},
- {true, "DROP_FD_CNT"},
-
- {true, "DROP_NO_DST_CNT"},
- {true, "DROP_MC_MBID_FULL_CNT"},
- {true, "DROP_SC_FILTERED"},
- {true, "PPP_MC_DROP_PKT_CNT"},
- {true, "DROP_PT_CNT"},
- {true, "DROP_MAC_ANTI_SPOOF_CNT"},
-
- {true, "DROP_IG_VFV_CNT"},
- {true, "DROP_IG_PRTV_CNT"},
- {true, "DROP_CNM_PFC_PAUSE_CNT"},
- {true, "DROP_TORUS_TC_CNT"},
- {true, "DROP_TORUS_LPBK_CNT"},
- {true, "PPP_HFS_STS"},
-
- {true, "PPP_MC_RSLT_STS"},
- {true, "PPP_P3U_STS"},
- {true, "PPP_RSLT_DESCR_STS"},
- {true, "PPP_UMV_STS_0"},
- {true, "PPP_UMV_STS_1"},
- {true, "PPP_VFV_STS"},
-
- {true, "PPP_GRO_KEY_CNT"},
- {true, "PPP_GRO_INFO_CNT"},
- {true, "PPP_GRO_DROP_CNT"},
- {true, "PPP_GRO_OUT_CNT"},
- {true, "PPP_GRO_KEY_MATCH_DATA_CNT"},
- {true, "PPP_GRO_KEY_MATCH_TCAM_CNT"},
-
- {true, "PPP_GRO_INFO_MATCH_CNT"},
- {true, "PPP_GRO_FREE_ENTRY_CNT"},
- {true, "PPP_GRO_INNER_DFX_SIGNAL"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
-
- {true, "GET_RX_PKT_CNT_L"},
- {true, "GET_RX_PKT_CNT_H"},
- {true, "GET_TX_PKT_CNT_L"},
- {true, "GET_TX_PKT_CNT_H"},
- {true, "SEND_UC_PRT2HOST_PKT_CNT_L"},
- {true, "SEND_UC_PRT2HOST_PKT_CNT_H"},
-
- {true, "SEND_UC_PRT2PRT_PKT_CNT_L"},
- {true, "SEND_UC_PRT2PRT_PKT_CNT_H"},
- {true, "SEND_UC_HOST2HOST_PKT_CNT_L"},
- {true, "SEND_UC_HOST2HOST_PKT_CNT_H"},
- {true, "SEND_UC_HOST2PRT_PKT_CNT_L"},
- {true, "SEND_UC_HOST2PRT_PKT_CNT_H"},
-
- {true, "SEND_MC_FROM_PRT_CNT_L"},
- {true, "SEND_MC_FROM_PRT_CNT_H"},
- {true, "SEND_MC_FROM_HOST_CNT_L"},
- {true, "SEND_MC_FROM_HOST_CNT_H"},
- {true, "SSU_MC_RD_CNT_L"},
- {true, "SSU_MC_RD_CNT_H"},
-
- {true, "SSU_MC_DROP_CNT_L"},
- {true, "SSU_MC_DROP_CNT_H"},
- {true, "SSU_MC_RD_PKT_CNT_L"},
- {true, "SSU_MC_RD_PKT_CNT_H"},
- {true, "PPP_MC_2HOST_PKT_CNT_L"},
- {true, "PPP_MC_2HOST_PKT_CNT_H"},
-
- {true, "PPP_MC_2PRT_PKT_CNT_L"},
- {true, "PPP_MC_2PRT_PKT_CNT_H"},
- {true, "NTSNOS_PKT_CNT_L"},
- {true, "NTSNOS_PKT_CNT_H"},
- {true, "NTUP_PKT_CNT_L"},
- {true, "NTUP_PKT_CNT_H"},
-
- {true, "NTLCL_PKT_CNT_L"},
- {true, "NTLCL_PKT_CNT_H"},
- {true, "NTTGT_PKT_CNT_L"},
- {true, "NTTGT_PKT_CNT_H"},
- {true, "RTNS_PKT_CNT_L"},
- {true, "RTNS_PKT_CNT_H"},
-
- {true, "RTLPBK_PKT_CNT_L"},
- {true, "RTLPBK_PKT_CNT_H"},
- {true, "NR_PKT_CNT_L"},
- {true, "NR_PKT_CNT_H"},
- {true, "RR_PKT_CNT_L"},
- {true, "RR_PKT_CNT_H"},
-
- {true, "MNG_TBL_HIT_CNT_L"},
- {true, "MNG_TBL_HIT_CNT_H"},
- {true, "FD_TBL_HIT_CNT_L"},
- {true, "FD_TBL_HIT_CNT_H"},
- {true, "FD_LKUP_CNT_L"},
- {true, "FD_LKUP_CNT_H"},
-
- {true, "BC_HIT_CNT_L"},
- {true, "BC_HIT_CNT_H"},
- {true, "UM_TBL_UC_HIT_CNT_L"},
- {true, "UM_TBL_UC_HIT_CNT_H"},
- {true, "UM_TBL_MC_HIT_CNT_L"},
- {true, "UM_TBL_MC_HIT_CNT_H"},
-
- {true, "UM_TBL_VMDQ1_HIT_CNT_L"},
- {true, "UM_TBL_VMDQ1_HIT_CNT_H"},
- {true, "MTA_TBL_HIT_CNT_L"},
- {true, "MTA_TBL_HIT_CNT_H"},
- {true, "FWD_BONDING_HIT_CNT_L"},
- {true, "FWD_BONDING_HIT_CNT_H"},
-
- {true, "PROMIS_TBL_HIT_CNT_L"},
- {true, "PROMIS_TBL_HIT_CNT_H"},
- {true, "GET_TUNL_PKT_CNT_L"},
- {true, "GET_TUNL_PKT_CNT_H"},
- {true, "GET_BMC_PKT_CNT_L"},
- {true, "GET_BMC_PKT_CNT_H"},
-
- {true, "SEND_UC_PRT2BMC_PKT_CNT_L"},
- {true, "SEND_UC_PRT2BMC_PKT_CNT_H"},
- {true, "SEND_UC_HOST2BMC_PKT_CNT_L"},
- {true, "SEND_UC_HOST2BMC_PKT_CNT_H"},
- {true, "SEND_UC_BMC2HOST_PKT_CNT_L"},
- {true, "SEND_UC_BMC2HOST_PKT_CNT_H"},
-
- {true, "SEND_UC_BMC2PRT_PKT_CNT_L"},
- {true, "SEND_UC_BMC2PRT_PKT_CNT_H"},
- {true, "PPP_MC_2BMC_PKT_CNT_L"},
- {true, "PPP_MC_2BMC_PKT_CNT_H"},
- {true, "VLAN_MIRR_CNT_L"},
- {true, "VLAN_MIRR_CNT_H"},
-
- {true, "IG_MIRR_CNT_L"},
- {true, "IG_MIRR_CNT_H"},
- {true, "EG_MIRR_CNT_L"},
- {true, "EG_MIRR_CNT_H"},
- {true, "RX_DEFAULT_HOST_HIT_CNT_L"},
- {true, "RX_DEFAULT_HOST_HIT_CNT_H"},
-
- {true, "LAN_PAIR_CNT_L"},
- {true, "LAN_PAIR_CNT_H"},
- {true, "UM_TBL_MC_HIT_PKT_CNT_L"},
- {true, "UM_TBL_MC_HIT_PKT_CNT_H"},
- {true, "MTA_TBL_HIT_PKT_CNT_L"},
- {true, "MTA_TBL_HIT_PKT_CNT_H"},
-
- {true, "PROMIS_TBL_HIT_PKT_CNT_L"},
- {true, "PROMIS_TBL_HIT_PKT_CNT_H"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = {
- {false, "Reserved"},
- {true, "FSM_DFX_ST0"},
- {true, "FSM_DFX_ST1"},
- {true, "FSM_DFX_ST2"},
- {true, "FIFO_DFX_ST0"},
- {true, "FIFO_DFX_ST1"},
-
- {true, "FIFO_DFX_ST2"},
- {true, "FIFO_DFX_ST3"},
- {true, "FIFO_DFX_ST4"},
- {true, "FIFO_DFX_ST5"},
- {true, "FIFO_DFX_ST6"},
- {true, "FIFO_DFX_ST7"},
-
- {true, "FIFO_DFX_ST8"},
- {true, "FIFO_DFX_ST9"},
- {true, "FIFO_DFX_ST10"},
- {true, "FIFO_DFX_ST11"},
- {true, "Q_CREDIT_VLD_0"},
- {true, "Q_CREDIT_VLD_1"},
-
- {true, "Q_CREDIT_VLD_2"},
- {true, "Q_CREDIT_VLD_3"},
- {true, "Q_CREDIT_VLD_4"},
- {true, "Q_CREDIT_VLD_5"},
- {true, "Q_CREDIT_VLD_6"},
- {true, "Q_CREDIT_VLD_7"},
-
- {true, "Q_CREDIT_VLD_8"},
- {true, "Q_CREDIT_VLD_9"},
- {true, "Q_CREDIT_VLD_10"},
- {true, "Q_CREDIT_VLD_11"},
- {true, "Q_CREDIT_VLD_12"},
- {true, "Q_CREDIT_VLD_13"},
-
- {true, "Q_CREDIT_VLD_14"},
- {true, "Q_CREDIT_VLD_15"},
- {true, "Q_CREDIT_VLD_16"},
- {true, "Q_CREDIT_VLD_17"},
- {true, "Q_CREDIT_VLD_18"},
- {true, "Q_CREDIT_VLD_19"},
-
- {true, "Q_CREDIT_VLD_20"},
- {true, "Q_CREDIT_VLD_21"},
- {true, "Q_CREDIT_VLD_22"},
- {true, "Q_CREDIT_VLD_23"},
- {true, "Q_CREDIT_VLD_24"},
- {true, "Q_CREDIT_VLD_25"},
-
- {true, "Q_CREDIT_VLD_26"},
- {true, "Q_CREDIT_VLD_27"},
- {true, "Q_CREDIT_VLD_28"},
- {true, "Q_CREDIT_VLD_29"},
- {true, "Q_CREDIT_VLD_30"},
- {true, "Q_CREDIT_VLD_31"},
-
- {true, "GRO_BD_SERR_CNT"},
- {true, "GRO_CONTEXT_SERR_CNT"},
- {true, "RX_STASH_CFG_SERR_CNT"},
- {true, "AXI_RD_FBD_SERR_CNT"},
- {true, "GRO_BD_MERR_CNT"},
- {true, "GRO_CONTEXT_MERR_CNT"},
-
- {true, "RX_STASH_CFG_MERR_CNT"},
- {true, "AXI_RD_FBD_MERR_CNT"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = {
- {true, "q_num"},
- {true, "RCB_CFG_RX_RING_TAIL"},
- {true, "RCB_CFG_RX_RING_HEAD"},
- {true, "RCB_CFG_RX_RING_FBDNUM"},
- {true, "RCB_CFG_RX_RING_OFFSET"},
- {true, "RCB_CFG_RX_RING_FBDOFFSET"},
-
- {true, "RCB_CFG_RX_RING_PKTNUM_RECORD"},
- {true, "RCB_CFG_TX_RING_TAIL"},
- {true, "RCB_CFG_TX_RING_HEAD"},
- {true, "RCB_CFG_TX_RING_FBDNUM"},
- {true, "RCB_CFG_TX_RING_OFFSET"},
- {true, "RCB_CFG_TX_RING_EBDNUM"},
-};
-
#define HCLGE_DBG_INFO_LEN 256
#define HCLGE_DBG_VLAN_FLTR_INFO_LEN 256
#define HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN 512
@@ -771,4 +131,7 @@ struct hclge_dbg_vlan_cfg {
u8 pri_only2;
};
+int hclge_dbg_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc_src,
+ int index, int bd_num, enum hclge_opcode_type cmd);
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
index 9a939c0b217f..a1571c108678 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
@@ -5,6 +5,34 @@
#include "hclge_devlink.h"
+static int hclge_devlink_scc_info_get(struct devlink *devlink,
+ struct devlink_info_req *req)
+{
+ struct hclge_devlink_priv *priv = devlink_priv(devlink);
+ char scc_version[HCLGE_DEVLINK_FW_SCC_LEN];
+ struct hclge_dev *hdev = priv->hdev;
+ u32 scc_version_tmp;
+ int ret;
+
+ ret = hclge_query_scc_version(hdev, &scc_version_tmp);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get scc version, ret = %d\n", ret);
+ return ret;
+ }
+
+ snprintf(scc_version, sizeof(scc_version), "%lu.%lu.%lu.%lu",
+ hnae3_get_field(scc_version_tmp, HNAE3_SCC_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(scc_version_tmp, HNAE3_SCC_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(scc_version_tmp, HNAE3_SCC_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(scc_version_tmp, HNAE3_SCC_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
+ return devlink_info_version_running_put(req, "fw.scc", scc_version);
+}
+
static int hclge_devlink_info_get(struct devlink *devlink,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
@@ -13,6 +41,7 @@ static int hclge_devlink_info_get(struct devlink *devlink,
struct hclge_devlink_priv *priv = devlink_priv(devlink);
char version_str[HCLGE_DEVLINK_FW_STRING_LEN];
struct hclge_dev *hdev = priv->hdev;
+ int ret;
snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu",
hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
@@ -24,9 +53,18 @@ static int hclge_devlink_info_get(struct devlink *devlink,
hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
HNAE3_FW_VERSION_BYTE0_SHIFT));
- return devlink_info_version_running_put(req,
- DEVLINK_INFO_VERSION_GENERIC_FW,
- version_str);
+ ret = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ version_str);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "failed to set running version of fw\n");
+ return ret;
+ }
+
+ if (hdev->pdev->revision > HNAE3_DEVICE_VERSION_V2)
+ ret = hclge_devlink_scc_info_get(devlink, req);
+
+ return ret;
}
static int hclge_devlink_reload_down(struct devlink *devlink, bool netns_change,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h
index 918be04507a5..148effa5ea89 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h
@@ -6,6 +6,8 @@
#include "hclge_main.h"
+#define HCLGE_DEVLINK_FW_SCC_LEN 32
+
struct hclge_devlink_priv {
struct hclge_dev *hdev;
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index d63e114f93d0..e132c2f09560 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1198,6 +1198,425 @@ static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
}
};
+static const struct hclge_mod_reg_info hclge_ssu_reg_0_info[] = {
+ {
+ .reg_name = "SSU_BP_STATUS_0~5",
+ .reg_offset_group = { 5, 6, 7, 8, 9, 10},
+ .group_size = 6
+ }, {
+ .reg_name = "LO_PRI_UNICAST_CUR_CNT",
+ .reg_offset_group = {54},
+ .group_size = 1
+ }, {
+ .reg_name = "HI/LO_PRI_MULTICAST_CUR_CNT",
+ .reg_offset_group = {55, 56},
+ .group_size = 2
+ }, {
+ .reg_name = "SSU_MB_RD_RLT_DROP_CNT",
+ .reg_offset_group = {29},
+ .group_size = 1
+ }, {
+ .reg_name = "SSU_PPP_MAC_KEY_NUM",
+ .reg_offset_group = {31, 30},
+ .group_size = 2
+ }, {
+ .reg_name = "SSU_PPP_HOST_KEY_NUM",
+ .reg_offset_group = {33, 32},
+ .group_size = 2
+ }, {
+ .reg_name = "PPP_SSU_MAC/HOST_RLT_NUM",
+ .reg_offset_group = {35, 34, 37, 36},
+ .group_size = 4
+ }, {
+ .reg_name = "FULL/PART_DROP_NUM",
+ .reg_offset_group = {18, 19},
+ .group_size = 2
+ }, {
+ .reg_name = "PPP_KEY/RLT_DROP_NUM",
+ .reg_offset_group = {20, 21},
+ .group_size = 2
+ }, {
+ .reg_name = "NIC/ROC_L2_ERR_DROP_PKT_CNT",
+ .reg_offset_group = {48, 49},
+ .group_size = 2
+ }, {
+ .reg_name = "NIC/ROC_L2_ERR_DROP_PKT_CNT_RX",
+ .reg_offset_group = {50, 51},
+ .group_size = 2
+ },
+};
+
+static const struct hclge_mod_reg_info hclge_ssu_reg_1_info[] = {
+ {
+ .reg_name = "RX_PACKET_IN/OUT_CNT",
+ .reg_offset_group = {13, 12, 15, 14},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_IN/OUT_CNT",
+ .reg_offset_group = {17, 16, 19, 18},
+ .group_size = 4
+ }, {
+ .reg_name = "RX_PACKET_TC0_IN/OUT_CNT",
+ .reg_offset_group = {25, 24, 41, 40},
+ .group_size = 4
+ }, {
+ .reg_name = "RX_PACKET_TC1_IN/OUT_CNT",
+ .reg_offset_group = {27, 26, 43, 42},
+ .group_size = 4
+ }, {
+ .reg_name = "RX_PACKET_TC2_IN/OUT_CNT",
+ .reg_offset_group = {29, 28, 45, 44},
+ .group_size = 4
+ }, {
+ .reg_name = "RX_PACKET_TC3_IN/OUT_CNT",
+ .reg_offset_group = {31, 30, 47, 46},
+ .group_size = 4
+ }, {
+ .reg_name = "RX_PACKET_TC4_IN/OUT_CNT",
+ .reg_offset_group = {33, 32, 49, 48},
+ .group_size = 4
+ }, {
+ .reg_name = "RX_PACKET_TC5_IN/OUT_CNT",
+ .reg_offset_group = {35, 34, 51, 50},
+ .group_size = 4
+ }, {
+ .reg_name = "RX_PACKET_TC6_IN/OUT_CNT",
+ .reg_offset_group = {37, 36, 53, 52},
+ .group_size = 4
+ }, {
+ .reg_name = "RX_PACKET_TC7_IN/OUT_CNT",
+ .reg_offset_group = {39, 38, 55, 54},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_TC0_IN/OUT_CNT",
+ .reg_offset_group = {57, 56, 73, 72},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_TC1_IN/OUT_CNT",
+ .reg_offset_group = {59, 58, 75, 74},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_TC2_IN/OUT_CNT",
+ .reg_offset_group = {61, 60, 77, 76},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_TC3_IN/OUT_CNT",
+ .reg_offset_group = {63, 62, 79, 78},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_TC4_IN/OUT_CNT",
+ .reg_offset_group = {65, 64, 81, 80},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_TC5_IN/OUT_CNT",
+ .reg_offset_group = {67, 66, 83, 82},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_TC6_IN/OUT_CNT",
+ .reg_offset_group = {69, 68, 85, 84},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_TC7_IN/OUT_CNT",
+ .reg_offset_group = {71, 70, 87, 86},
+ .group_size = 4
+ }, {
+ .reg_name = "PACKET_TC0~3_CURR_BUFFER_CNT",
+ .reg_offset_group = {1, 2, 3, 4},
+ .group_size = 4
+ }, {
+ .reg_name = "PACKET_TC4~7_CURR_BUFFER_CNT",
+ .reg_offset_group = {5, 6, 7, 8},
+ .group_size = 4
+ }, {
+ .reg_name = "ROC_RX_PACKET_IN_CNT",
+ .reg_offset_group = {21, 20},
+ .group_size = 2
+ }, {
+ .reg_name = "ROC_TX_PACKET_OUT_CNT",
+ .reg_offset_group = {23, 22},
+ .group_size = 2
+ }
+};
+
+static const struct hclge_mod_reg_info hclge_rpu_reg_0_info[] = {
+ {
+ .reg_name = "RPU_FSM_DFX_ST0/ST1_TNL",
+ .has_suffix = true,
+ .reg_offset_group = {1, 2},
+ .group_size = 2
+ }, {
+ .reg_name = "RPU_RX_PKT_DROP_CNT_TNL",
+ .has_suffix = true,
+ .reg_offset_group = {3},
+ .group_size = 1
+ }
+};
+
+static const struct hclge_mod_reg_info hclge_rpu_reg_1_info[] = {
+ {
+ .reg_name = "FIFO_DFX_ST0_1_2_4",
+ .reg_offset_group = {1, 2, 3, 5},
+ .group_size = 4
+ }
+};
+
+static const struct hclge_mod_reg_info hclge_igu_egu_reg_info[] = {
+ {
+ .reg_name = "IGU_RX_ERR_PKT",
+ .reg_offset_group = {1},
+ .group_size = 1
+ }, {
+ .reg_name = "IGU_RX_OUT_ALL_PKT",
+ .reg_offset_group = {29, 28},
+ .group_size = 2
+ }, {
+ .reg_name = "EGU_TX_OUT_ALL_PKT",
+ .reg_offset_group = {39, 38},
+ .group_size = 2
+ }, {
+ .reg_name = "EGU_TX_ERR_PKT",
+ .reg_offset_group = {5},
+ .group_size = 1
+ }
+};
+
+static const struct hclge_mod_reg_info hclge_gen_reg_info_tnl[] = {
+ {
+ .reg_name = "SSU2RPU_TNL_WR_PKT_CNT_TNL",
+ .has_suffix = true,
+ .reg_offset_group = {1},
+ .group_size = 1
+ }, {
+ .reg_name = "RPU2HST_TNL_WR_PKT_CNT_TNL",
+ .has_suffix = true,
+ .reg_offset_group = {12},
+ .group_size = 1
+ }
+};
+
+static const struct hclge_mod_reg_info hclge_gen_reg_info[] = {
+ {
+ .reg_name = "SSU_OVERSIZE_DROP_CNT",
+ .reg_offset_group = {12},
+ .group_size = 1
+ }, {
+ .reg_name = "ROCE_RX_BYPASS_5NS_DROP_NUM",
+ .reg_offset_group = {13},
+ .group_size = 1
+ }, {
+ .reg_name = "RX_PKT_IN/OUT_ERR_CNT",
+ .reg_offset_group = {15, 14, 19, 18},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PKT_IN/OUT_ERR_CNT",
+ .reg_offset_group = {17, 16, 21, 20},
+ .group_size = 4
+ }, {
+ .reg_name = "ETS_TC_READY",
+ .reg_offset_group = {22},
+ .group_size = 1
+ }, {
+ .reg_name = "MIB_TX/RX_BAD_PKTS",
+ .reg_offset_group = {19, 18, 29, 28},
+ .group_size = 4
+ }, {
+ .reg_name = "MIB_TX/RX_GOOD_PKTS",
+ .reg_offset_group = {21, 20, 31, 30},
+ .group_size = 4
+ }, {
+ .reg_name = "MIB_TX/RX_TOTAL_PKTS",
+ .reg_offset_group = {23, 22, 33, 32},
+ .group_size = 4
+ }, {
+ .reg_name = "MIB_TX/RX_PAUSE_PKTS",
+ .reg_offset_group = {25, 24, 35, 34},
+ .group_size = 4
+ }, {
+ .reg_name = "MIB_TX_ERR_ALL_PKTS",
+ .reg_offset_group = {27, 26},
+ .group_size = 2
+ }, {
+ .reg_name = "MIB_RX_FCS_ERR_PKTS",
+ .reg_offset_group = {37, 36},
+ .group_size = 2
+ }, {
+ .reg_name = "IGU_EGU_AUTO_GATE_EN",
+ .reg_offset_group = {42},
+ .group_size = 1
+ }, {
+ .reg_name = "IGU_EGU_INT_SRC",
+ .reg_offset_group = {43},
+ .group_size = 1
+ }, {
+ .reg_name = "EGU_READY_NUM_CFG",
+ .reg_offset_group = {44},
+ .group_size = 1
+ }, {
+ .reg_name = "IGU_EGU_TNL_DFX",
+ .reg_offset_group = {45},
+ .group_size = 1
+ }, {
+ .reg_name = "TX_TNL_NOTE_PKT",
+ .reg_offset_group = {46},
+ .group_size = 1
+ }
+};
+
+static const struct hclge_mod_reg_common_msg hclge_ssu_reg_common_msg[] = {
+ {
+ .cmd = HCLGE_OPC_DFX_SSU_REG_0,
+ .result_regs = hclge_ssu_reg_0_info,
+ .bd_num = HCLGE_BD_NUM_SSU_REG_0,
+ .result_regs_size = ARRAY_SIZE(hclge_ssu_reg_0_info)
+ }, {
+ .cmd = HCLGE_OPC_DFX_SSU_REG_1,
+ .result_regs = hclge_ssu_reg_1_info,
+ .bd_num = HCLGE_BD_NUM_SSU_REG_1,
+ .result_regs_size = ARRAY_SIZE(hclge_ssu_reg_1_info)
+ }, {
+ .cmd = HCLGE_OPC_DFX_RPU_REG_0,
+ .result_regs = hclge_rpu_reg_0_info,
+ .bd_num = HCLGE_BD_NUM_RPU_REG_0,
+ .result_regs_size = ARRAY_SIZE(hclge_rpu_reg_0_info),
+ .need_para = true
+ }, {
+ .cmd = HCLGE_OPC_DFX_RPU_REG_1,
+ .result_regs = hclge_rpu_reg_1_info,
+ .bd_num = HCLGE_BD_NUM_RPU_REG_1,
+ .result_regs_size = ARRAY_SIZE(hclge_rpu_reg_1_info)
+ }, {
+ .cmd = HCLGE_OPC_DFX_IGU_EGU_REG,
+ .result_regs = hclge_igu_egu_reg_info,
+ .bd_num = HCLGE_BD_NUM_IGU_EGU_REG,
+ .result_regs_size = ARRAY_SIZE(hclge_igu_egu_reg_info)
+ }, {
+ .cmd = HCLGE_OPC_DFX_GEN_REG,
+ .result_regs = hclge_gen_reg_info_tnl,
+ .bd_num = HCLGE_BD_NUM_GEN_REG,
+ .result_regs_size = ARRAY_SIZE(hclge_gen_reg_info_tnl),
+ .need_para = true
+ }, {
+ .cmd = HCLGE_OPC_DFX_GEN_REG,
+ .result_regs = hclge_gen_reg_info,
+ .bd_num = HCLGE_BD_NUM_GEN_REG,
+ .result_regs_size = ARRAY_SIZE(hclge_gen_reg_info)
+ }
+};
+
+static int
+hclge_print_mod_reg_info(struct device *dev, struct hclge_desc *desc,
+ const struct hclge_mod_reg_info *reg_info, int size)
+{
+ int i, j, pos, actual_len;
+ u8 offset, bd_idx, index;
+ char *buf;
+
+ buf = kzalloc(HCLGE_MOD_REG_INFO_LEN_MAX, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < size; i++) {
+ actual_len = strlen(reg_info[i].reg_name) +
+ HCLGE_MOD_REG_EXTRA_LEN +
+ HCLGE_MOD_REG_VALUE_LEN * reg_info[i].group_size;
+ if (actual_len > HCLGE_MOD_REG_INFO_LEN_MAX) {
+ dev_info(dev, "length of reg(%s) is invalid, len=%d\n",
+ reg_info[i].reg_name, actual_len);
+ continue;
+ }
+
+ pos = scnprintf(buf, HCLGE_MOD_REG_INFO_LEN_MAX, "%s",
+ reg_info[i].reg_name);
+ if (reg_info[i].has_suffix)
+ pos += scnprintf(buf + pos,
+ HCLGE_MOD_REG_INFO_LEN_MAX - pos, "%u",
+ le32_to_cpu(desc->data[0]));
+ pos += scnprintf(buf + pos,
+ HCLGE_MOD_REG_INFO_LEN_MAX - pos,
+ ":");
+ for (j = 0; j < reg_info[i].group_size; j++) {
+ offset = reg_info[i].reg_offset_group[j];
+ index = offset % HCLGE_DESC_DATA_LEN;
+ bd_idx = offset / HCLGE_DESC_DATA_LEN;
+ pos += scnprintf(buf + pos,
+ HCLGE_MOD_REG_INFO_LEN_MAX - pos,
+ " %08x",
+ le32_to_cpu(desc[bd_idx].data[index]));
+ }
+ dev_info(dev, "%s\n", buf);
+ }
+
+ kfree(buf);
+ return 0;
+}
+
+static bool hclge_err_mod_check_support_cmd(enum hclge_opcode_type opcode,
+ struct hclge_dev *hdev)
+{
+ if (opcode == HCLGE_OPC_DFX_GEN_REG &&
+ !hnae3_ae_dev_gen_reg_dfx_supported(hdev))
+ return false;
+ return true;
+}
+
+/* For each common msg, send cmdq to IMP and print result reg info.
+ * If there is a parameter, loop it and request.
+ */
+static void
+hclge_query_reg_info(struct hclge_dev *hdev,
+ struct hclge_mod_reg_common_msg *msg, u32 loop_time,
+ u32 *loop_para)
+{
+ int desc_len, i, ret;
+
+ desc_len = msg->bd_num * sizeof(struct hclge_desc);
+ msg->desc = kzalloc(desc_len, GFP_KERNEL);
+ if (!msg->desc) {
+ dev_err(&hdev->pdev->dev, "failed to query reg info, ret=%d",
+ -ENOMEM);
+ return;
+ }
+
+ for (i = 0; i < loop_time; i++) {
+ ret = hclge_dbg_cmd_send(hdev, msg->desc, *loop_para,
+ msg->bd_num, msg->cmd);
+ loop_para++;
+ if (ret)
+ continue;
+ ret = hclge_print_mod_reg_info(&hdev->pdev->dev, msg->desc,
+ msg->result_regs,
+ msg->result_regs_size);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "failed to print mod reg info, ret=%d\n",
+ ret);
+ }
+
+ kfree(msg->desc);
+}
+
+static void hclge_query_reg_info_of_ssu(struct hclge_dev *hdev)
+{
+ u32 loop_para[HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE] = {0};
+ struct hclge_mod_reg_common_msg msg;
+ u8 i, j, num;
+ u32 loop_time;
+
+ num = ARRAY_SIZE(hclge_ssu_reg_common_msg);
+ for (i = 0; i < num; i++) {
+ msg = hclge_ssu_reg_common_msg[i];
+ if (!hclge_err_mod_check_support_cmd(msg.cmd, hdev))
+ continue;
+ loop_time = 1;
+ loop_para[0] = 0;
+ if (msg.need_para) {
+ loop_time = hdev->ae_dev->dev_specs.tnl_num;
+ for (j = 0; j < loop_time; j++)
+ loop_para[j] = j + 1;
+ }
+ hclge_query_reg_info(hdev, &msg, loop_time, loop_para);
+ }
+}
+
static const struct hclge_hw_module_id hclge_hw_module_id_st[] = {
{
.module_id = MODULE_NONE,
@@ -1210,7 +1629,8 @@ static const struct hclge_hw_module_id hclge_hw_module_id_st[] = {
.msg = "MODULE_GE"
}, {
.module_id = MODULE_IGU_EGU,
- .msg = "MODULE_IGU_EGU"
+ .msg = "MODULE_IGU_EGU",
+ .query_reg_info = hclge_query_reg_info_of_ssu
}, {
.module_id = MODULE_LGE,
.msg = "MODULE_LGE"
@@ -1231,7 +1651,8 @@ static const struct hclge_hw_module_id hclge_hw_module_id_st[] = {
.msg = "MODULE_RTC"
}, {
.module_id = MODULE_SSU,
- .msg = "MODULE_SSU"
+ .msg = "MODULE_SSU",
+ .query_reg_info = hclge_query_reg_info_of_ssu
}, {
.module_id = MODULE_TM,
.msg = "MODULE_TM"
@@ -2762,7 +3183,7 @@ void hclge_handle_occurred_error(struct hclge_dev *hdev)
}
static bool
-hclge_handle_error_type_reg_log(struct device *dev,
+hclge_handle_error_type_reg_log(struct hclge_dev *hdev,
struct hclge_mod_err_info *mod_info,
struct hclge_type_reg_err_info *type_reg_info)
{
@@ -2770,6 +3191,7 @@ hclge_handle_error_type_reg_log(struct device *dev,
#define HCLGE_ERR_TYPE_IS_RAS_OFFSET 7
u8 mod_id, total_module, type_id, total_type, i, is_ras;
+ struct device *dev = &hdev->pdev->dev;
u8 index_module = MODULE_NONE;
u8 index_type = NONE_ERROR;
bool cause_by_vf = false;
@@ -2810,6 +3232,9 @@ hclge_handle_error_type_reg_log(struct device *dev,
for (i = 0; i < type_reg_info->reg_num; i++)
dev_err(dev, "0x%08x\n", type_reg_info->hclge_reg[i]);
+ if (hclge_hw_module_id_st[index_module].query_reg_info)
+ hclge_hw_module_id_st[index_module].query_reg_info(hdev);
+
return cause_by_vf;
}
@@ -2850,7 +3275,7 @@ static void hclge_handle_error_module_log(struct hnae3_ae_dev *ae_dev,
type_reg_info = (struct hclge_type_reg_err_info *)
&buf[offset++];
- if (hclge_handle_error_type_reg_log(dev, mod_info,
+ if (hclge_handle_error_type_reg_log(hdev, mod_info,
type_reg_info))
cause_by_vf = true;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index 68b738affa66..45a783a50643 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -5,6 +5,7 @@
#define __HCLGE_ERR_H
#include "hclge_main.h"
+#include "hclge_debugfs.h"
#include "hnae3.h"
#define HCLGE_MPF_RAS_INT_MIN_BD_NUM 10
@@ -115,6 +116,18 @@
#define HCLGE_REG_NUM_MAX 256
#define HCLGE_DESC_NO_DATA_LEN 8
+#define HCLGE_BD_NUM_SSU_REG_0 10
+#define HCLGE_BD_NUM_SSU_REG_1 15
+#define HCLGE_BD_NUM_RPU_REG_0 1
+#define HCLGE_BD_NUM_RPU_REG_1 2
+#define HCLGE_BD_NUM_IGU_EGU_REG 9
+#define HCLGE_BD_NUM_GEN_REG 8
+#define HCLGE_MOD_REG_INFO_LEN_MAX 256
+#define HCLGE_MOD_REG_EXTRA_LEN 11
+#define HCLGE_MOD_REG_VALUE_LEN 9
+#define HCLGE_MOD_REG_GROUP_MAX_SIZE 6
+#define HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE 8
+
enum hclge_err_int_type {
HCLGE_ERR_INT_MSIX = 0,
HCLGE_ERR_INT_RAS_CE = 1,
@@ -191,6 +204,7 @@ struct hclge_hw_error {
struct hclge_hw_module_id {
enum hclge_mod_name_list module_id;
const char *msg;
+ void (*query_reg_info)(struct hclge_dev *hdev);
};
struct hclge_hw_type_id {
@@ -218,6 +232,28 @@ struct hclge_type_reg_err_info {
u32 hclge_reg[HCLGE_REG_NUM_MAX];
};
+struct hclge_mod_reg_info {
+ const char *reg_name;
+ bool has_suffix; /* add suffix for register name */
+ /* the positions of reg values in hclge_desc.data */
+ u8 reg_offset_group[HCLGE_MOD_REG_GROUP_MAX_SIZE];
+ u8 group_size;
+};
+
+/* This structure defines cmdq used to query the hardware module debug
+ * regisgers.
+ */
+struct hclge_mod_reg_common_msg {
+ enum hclge_opcode_type cmd;
+ struct hclge_desc *desc;
+ u8 bd_num; /* the bd number of hclge_desc used */
+ bool need_para; /* whether this cmdq needs to add para */
+
+ /* the regs need to print */
+ const struct hclge_mod_reg_info *result_regs;
+ u16 result_regs_size;
+};
+
int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en);
int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state);
int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index b4afb66efe5c..43cc6ee4d87d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -27,6 +27,8 @@
#include "hclge_devlink.h"
#include "hclge_comm_cmd.h"
+#include "hclge_trace.h"
+
#define HCLGE_NAME "hclge"
#define HCLGE_BUF_SIZE_UNIT 256U
@@ -391,6 +393,48 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
return hclge_comm_cmd_send(&hw->hw, desc, num);
}
+static void hclge_trace_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
+ int num, bool is_special)
+{
+ int i;
+
+ trace_hclge_pf_cmd_send(hw, desc, 0, num);
+
+ if (!is_special) {
+ for (i = 1; i < num; i++)
+ trace_hclge_pf_cmd_send(hw, &desc[i], i, num);
+ } else {
+ for (i = 1; i < num; i++)
+ trace_hclge_pf_special_cmd_send(hw, (__le32 *)&desc[i],
+ i, num);
+ }
+}
+
+static void hclge_trace_cmd_get(struct hclge_comm_hw *hw, struct hclge_desc *desc,
+ int num, bool is_special)
+{
+ int i;
+
+ if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
+ return;
+
+ trace_hclge_pf_cmd_get(hw, desc, 0, num);
+
+ if (!is_special) {
+ for (i = 1; i < num; i++)
+ trace_hclge_pf_cmd_get(hw, &desc[i], i, num);
+ } else {
+ for (i = 1; i < num; i++)
+ trace_hclge_pf_special_cmd_get(hw, (__le32 *)&desc[i],
+ i, num);
+ }
+}
+
+static const struct hclge_comm_cmq_ops hclge_cmq_ops = {
+ .trace_cmd_send = hclge_trace_cmd_send,
+ .trace_cmd_get = hclge_trace_cmd_get,
+};
+
static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21
@@ -1537,6 +1581,9 @@ static int hclge_configure(struct hclge_dev *hdev)
cfg.default_speed, ret);
return ret;
}
+ hdev->hw.mac.req_speed = hdev->hw.mac.speed;
+ hdev->hw.mac.req_autoneg = AUTONEG_ENABLE;
+ hdev->hw.mac.req_duplex = DUPLEX_FULL;
hclge_parse_link_mode(hdev, cfg.speed_ability);
@@ -1766,7 +1813,8 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
nic->pdev = hdev->pdev;
nic->ae_algo = &ae_algo;
- nic->numa_node_mask = hdev->numa_node_mask;
+ bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits,
+ MAX_NUMNODES);
nic->kinfo.io_base = hdev->hw.hw.io_base;
ret = hclge_knic_setup(vport, num_tqps,
@@ -2458,7 +2506,8 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport)
roce->pdev = nic->pdev;
roce->ae_algo = nic->ae_algo;
- roce->numa_node_mask = nic->numa_node_mask;
+ bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits,
+ MAX_NUMNODES);
return 0;
}
@@ -3342,9 +3391,9 @@ hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
return ret;
}
- hdev->hw.mac.autoneg = cmd->base.autoneg;
- hdev->hw.mac.speed = cmd->base.speed;
- hdev->hw.mac.duplex = cmd->base.duplex;
+ hdev->hw.mac.req_autoneg = cmd->base.autoneg;
+ hdev->hw.mac.req_speed = cmd->base.speed;
+ hdev->hw.mac.req_duplex = cmd->base.duplex;
linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
return 0;
@@ -3377,9 +3426,9 @@ static int hclge_tp_port_init(struct hclge_dev *hdev)
if (!hnae3_dev_phy_imp_supported(hdev))
return 0;
- cmd.base.autoneg = hdev->hw.mac.autoneg;
- cmd.base.speed = hdev->hw.mac.speed;
- cmd.base.duplex = hdev->hw.mac.duplex;
+ cmd.base.autoneg = hdev->hw.mac.req_autoneg;
+ cmd.base.speed = hdev->hw.mac.req_speed;
+ cmd.base.duplex = hdev->hw.mac.req_duplex;
linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
@@ -7178,8 +7227,9 @@ static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
}
}
-static void hclge_get_cls_key_ip(const struct flow_rule *flow,
- struct hclge_fd_rule *rule)
+static int hclge_get_cls_key_ip(const struct flow_rule *flow,
+ struct hclge_fd_rule *rule,
+ struct netlink_ext_ack *extack)
{
u16 addr_type = 0;
@@ -7188,6 +7238,9 @@ static void hclge_get_cls_key_ip(const struct flow_rule *flow,
flow_rule_match_control(flow, &match);
addr_type = match.key->addr_type;
+
+ if (flow_rule_has_control_flags(match.mask->flags, extack))
+ return -EOPNOTSUPP;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
@@ -7216,6 +7269,8 @@ static void hclge_get_cls_key_ip(const struct flow_rule *flow,
rule->unused_tuple |= BIT(INNER_SRC_IP);
rule->unused_tuple |= BIT(INNER_DST_IP);
}
+
+ return 0;
}
static void hclge_get_cls_key_port(const struct flow_rule *flow,
@@ -7241,7 +7296,9 @@ static int hclge_parse_cls_flower(struct hclge_dev *hdev,
struct hclge_fd_rule *rule)
{
struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
+ struct netlink_ext_ack *extack = cls_flower->common.extack;
struct flow_dissector *dissector = flow->match.dissector;
+ int ret;
if (dissector->used_keys &
~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
@@ -7259,7 +7316,11 @@ static int hclge_parse_cls_flower(struct hclge_dev *hdev,
hclge_get_cls_key_basic(flow, rule);
hclge_get_cls_key_mac(flow, rule);
hclge_get_cls_key_vlan(flow, rule);
- hclge_get_cls_key_ip(flow, rule);
+
+ ret = hclge_get_cls_key_ip(flow, rule, extack);
+ if (ret)
+ return ret;
+
hclge_get_cls_key_port(flow, rule);
return 0;
@@ -7952,8 +8013,7 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
/* Set the DOWN flag here to disable link updating */
set_bit(HCLGE_STATE_DOWN, &hdev->state);
- /* flush memory to make sure DOWN is seen by service task */
- smp_mb__before_atomic();
+ smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */
hclge_flush_link_update(hdev);
}
}
@@ -9906,6 +9966,7 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
static int hclge_init_vlan_filter(struct hclge_dev *hdev)
{
struct hclge_vport *vport;
+ bool enable = true;
int ret;
int i;
@@ -9925,8 +9986,12 @@ static int hclge_init_vlan_filter(struct hclge_dev *hdev)
vport->cur_vlan_fltr_en = true;
}
+ if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps) &&
+ !test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
+ enable = false;
+
return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
- HCLGE_FILTER_FE_INGRESS, true, 0);
+ HCLGE_FILTER_FE_INGRESS, enable, 0);
}
static int hclge_init_vlan_type(struct hclge_dev *hdev)
@@ -10839,6 +10904,24 @@ static u32 hclge_get_fw_version(struct hnae3_handle *handle)
return hdev->fw_version;
}
+int hclge_query_scc_version(struct hclge_dev *hdev, u32 *scc_version)
+{
+ struct hclge_comm_query_scc_cmd *resp;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_SCC_VER, 1);
+ resp = (struct hclge_comm_query_scc_cmd *)desc.data;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ return ret;
+
+ *scc_version = le32_to_cpu(resp->scc_version);
+
+ return 0;
+}
+
static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
{
struct phy_device *phydev = hdev->hw.mac.phydev;
@@ -11622,16 +11705,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto out;
- ret = hclge_devlink_init(hdev);
- if (ret)
- goto err_pci_uninit;
-
/* Firmware command queue initialize */
ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
if (ret)
- goto err_devlink_uninit;
+ goto err_pci_uninit;
/* Firmware command initialize */
+ hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclge_cmq_ops);
ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
true, hdev->reset_pending);
if (ret)
@@ -11757,7 +11837,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
ret = hclge_update_port_info(hdev);
if (ret)
- goto err_mdiobus_unreg;
+ goto err_ptp_uninit;
INIT_KFIFO(hdev->mac_tnl_log);
@@ -11797,6 +11877,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
dev_warn(&pdev->dev,
"failed to wake on lan init, ret = %d\n", ret);
+ ret = hclge_devlink_init(hdev);
+ if (ret)
+ goto err_ptp_uninit;
+
hclge_state_init(hdev);
hdev->last_reset_time = jiffies;
@@ -11804,9 +11888,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
HCLGE_DRIVER_NAME);
hclge_task_schedule(hdev, round_jiffies_relative(HZ));
-
return 0;
+err_ptp_uninit:
+ hclge_ptp_uninit(hdev);
err_mdiobus_unreg:
if (hdev->hw.mac.phydev)
mdiobus_unregister(hdev->hw.mac.mdio_bus);
@@ -11816,8 +11901,6 @@ err_msi_uninit:
pci_free_irq_vectors(pdev);
err_cmd_uninit:
hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
-err_devlink_uninit:
- hclge_devlink_uninit(hdev);
err_pci_uninit:
pcim_iounmap(pdev, hdev->hw.hw.io_base);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index e821dd2f1528..b5178b0f88b3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -279,11 +279,14 @@ struct hclge_mac {
u8 media_type; /* port media type, e.g. fibre/copper/backplane */
u8 mac_addr[ETH_ALEN];
u8 autoneg;
+ u8 req_autoneg;
u8 duplex;
+ u8 req_duplex;
u8 support_autoneg;
u8 speed_type; /* 0: sfp speed, 1: active speed */
u8 lane_num;
u32 speed;
+ u32 req_speed;
u32 max_speed;
u32 speed_ability; /* speed ability supported by current media */
u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */
@@ -891,7 +894,7 @@ struct hclge_dev {
u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */
u16 num_alloc_vport; /* Num vports this driver supports */
- u32 numa_node_mask;
+ nodemask_t numa_node_mask;
u16 rx_buf_len;
u16 num_tx_desc; /* desc num of per tx queue */
u16 num_rx_desc; /* desc num of per rx queue */
@@ -1169,4 +1172,5 @@ int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en);
int hclge_mac_update_stats(struct hclge_dev *hdev);
struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf);
int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type);
+int hclge_query_scc_version(struct hclge_dev *hdev, u32 *scc_version);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index d4a0e0be7a72..59c863306657 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -1077,12 +1077,13 @@ static void hclge_mbx_request_handling(struct hclge_mbx_ops_param *param)
hdev = param->vport->back;
cmd_func = hclge_mbx_ops_list[param->req->msg.code];
- if (cmd_func)
- ret = cmd_func(param);
- else
+ if (!cmd_func) {
dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %u\n",
param->req->msg.code);
+ return;
+ }
+ ret = cmd_func(param);
/* PF driver should not reply IMP */
if (hnae3_get_bit(param->req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
index f3cd5a376eca..7e47f0c21d88 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
@@ -10,6 +10,7 @@
#include <linux/tracepoint.h>
+#define PF_DESC_LEN (sizeof(struct hclge_desc) / sizeof(u32))
#define PF_GET_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32))
#define PF_SEND_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32))
@@ -77,6 +78,99 @@ TRACE_EVENT(hclge_pf_mbx_send,
)
);
+DECLARE_EVENT_CLASS(hclge_pf_cmd_template,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int index,
+ int num),
+ TP_ARGS(hw, desc, index, num),
+
+ TP_STRUCT__entry(__field(u16, opcode)
+ __field(u16, flag)
+ __field(u16, retval)
+ __field(u16, rsv)
+ __field(int, index)
+ __field(int, num)
+ __string(pciname, pci_name(hw->cmq.csq.pdev))
+ __array(u32, data, HCLGE_DESC_DATA_LEN)),
+
+ TP_fast_assign(int i;
+ __entry->opcode = le16_to_cpu(desc->opcode);
+ __entry->flag = le16_to_cpu(desc->flag);
+ __entry->retval = le16_to_cpu(desc->retval);
+ __entry->rsv = le16_to_cpu(desc->rsv);
+ __entry->index = index;
+ __entry->num = num;
+ __assign_str(pciname, pci_name(hw->cmq.csq.pdev));
+ for (i = 0; i < HCLGE_DESC_DATA_LEN; i++)
+ __entry->data[i] = le32_to_cpu(desc->data[i]);),
+
+ TP_printk("%s opcode:0x%04x %d-%d flag:0x%04x retval:0x%04x rsv:0x%04x data:%s",
+ __get_str(pciname), __entry->opcode,
+ __entry->index, __entry->num,
+ __entry->flag, __entry->retval, __entry->rsv,
+ __print_array(__entry->data,
+ HCLGE_DESC_DATA_LEN, sizeof(u32)))
+);
+
+DEFINE_EVENT(hclge_pf_cmd_template, hclge_pf_cmd_send,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int index,
+ int num),
+ TP_ARGS(hw, desc, index, num)
+);
+
+DEFINE_EVENT(hclge_pf_cmd_template, hclge_pf_cmd_get,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int index,
+ int num),
+ TP_ARGS(hw, desc, index, num)
+);
+
+DECLARE_EVENT_CLASS(hclge_pf_special_cmd_template,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ __le32 *data,
+ int index,
+ int num),
+ TP_ARGS(hw, data, index, num),
+
+ TP_STRUCT__entry(__field(int, index)
+ __field(int, num)
+ __string(pciname, pci_name(hw->cmq.csq.pdev))
+ __array(u32, data, PF_DESC_LEN)),
+
+ TP_fast_assign(int i;
+ __entry->index = index;
+ __entry->num = num;
+ __assign_str(pciname, pci_name(hw->cmq.csq.pdev));
+ for (i = 0; i < PF_DESC_LEN; i++)
+ __entry->data[i] = le32_to_cpu(data[i]);
+ ),
+
+ TP_printk("%s %d-%d data:%s",
+ __get_str(pciname),
+ __entry->index, __entry->num,
+ __print_array(__entry->data,
+ PF_DESC_LEN, sizeof(u32)))
+);
+
+DEFINE_EVENT(hclge_pf_special_cmd_template, hclge_pf_special_cmd_send,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ __le32 *desc,
+ int index,
+ int num),
+ TP_ARGS(hw, desc, index, num));
+
+DEFINE_EVENT(hclge_pf_special_cmd_template, hclge_pf_special_cmd_get,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ __le32 *desc,
+ int index,
+ int num),
+ TP_ARGS(hw, desc, index, num)
+);
+
#endif /* _HCLGE_TRACE_H_ */
/* This must be outside ifdef _HCLGE_TRACE_H */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 0aa9beefd1c7..3735d2fed11f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -11,6 +11,7 @@
#include "hnae3.h"
#include "hclgevf_devlink.h"
#include "hclge_comm_rss.h"
+#include "hclgevf_trace.h"
#define HCLGEVF_NAME "hclgevf"
@@ -47,6 +48,42 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num)
return hclge_comm_cmd_send(&hw->hw, desc, num);
}
+static void hclgevf_trace_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
+ int num, bool is_special)
+{
+ int i;
+
+ trace_hclge_vf_cmd_send(hw, desc, 0, num);
+
+ if (is_special)
+ return;
+
+ for (i = 1; i < num; i++)
+ trace_hclge_vf_cmd_send(hw, &desc[i], i, num);
+}
+
+static void hclgevf_trace_cmd_get(struct hclge_comm_hw *hw, struct hclge_desc *desc,
+ int num, bool is_special)
+{
+ int i;
+
+ if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
+ return;
+
+ trace_hclge_vf_cmd_get(hw, desc, 0, num);
+
+ if (is_special)
+ return;
+
+ for (i = 1; i < num; i++)
+ trace_hclge_vf_cmd_get(hw, &desc[i], i, num);
+}
+
+static const struct hclge_comm_cmq_ops hclgevf_cmq_ops = {
+ .trace_cmd_send = hclgevf_trace_cmd_send,
+ .trace_cmd_get = hclgevf_trace_cmd_get,
+};
+
void hclgevf_arq_init(struct hclgevf_dev *hdev)
{
struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq;
@@ -412,7 +449,8 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
nic->ae_algo = &ae_algovf;
nic->pdev = hdev->pdev;
- nic->numa_node_mask = hdev->numa_node_mask;
+ bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits,
+ MAX_NUMNODES);
nic->flags |= HNAE3_SUPPORT_VF;
nic->kinfo.io_base = hdev->hw.hw.io_base;
@@ -2082,8 +2120,8 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
roce->pdev = nic->pdev;
roce->ae_algo = nic->ae_algo;
- roce->numa_node_mask = nic->numa_node_mask;
-
+ bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits,
+ MAX_NUMNODES);
return 0;
}
@@ -2180,8 +2218,7 @@ static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
} else {
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
- /* flush memory to make sure DOWN is seen by service task */
- smp_mb__before_atomic();
+ smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */
hclgevf_flush_link_update(hdev);
}
}
@@ -2796,6 +2833,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
}
hclgevf_arq_init(hdev);
+
ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
&hdev->fw_version, false,
hdev->reset_pending);
@@ -2845,15 +2883,13 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
if (ret)
return ret;
- ret = hclgevf_devlink_init(hdev);
- if (ret)
- goto err_devlink_init;
-
ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
if (ret)
goto err_cmd_queue_init;
hclgevf_arq_init(hdev);
+
+ hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclgevf_cmq_ops);
ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
&hdev->fw_version, false,
hdev->reset_pending);
@@ -2941,6 +2977,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
hclgevf_init_rxd_adv_layout(hdev);
+ ret = hclgevf_devlink_init(hdev);
+ if (ret)
+ goto err_config;
+
set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state);
hdev->last_reset_time = jiffies;
@@ -2960,8 +3000,6 @@ err_misc_irq_init:
err_cmd_init:
hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
err_cmd_queue_init:
- hclgevf_devlink_uninit(hdev);
-err_devlink_init:
hclgevf_pci_uninit(hdev);
clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index a73f2bf3a56a..cccef3228461 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -236,7 +236,7 @@ struct hclgevf_dev {
u16 rss_size_max; /* HW defined max RSS task queue */
u16 num_alloc_vport; /* num vports this driver supports */
- u32 numa_node_mask;
+ nodemask_t numa_node_mask;
u16 rx_buf_len;
u16 num_tx_desc; /* desc num of per tx queue */
u16 num_rx_desc; /* desc num of per rx queue */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
index b259e95dd53c..e2e3a2602b6a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
@@ -77,6 +77,56 @@ TRACE_EVENT(hclge_vf_mbx_send,
)
);
+DECLARE_EVENT_CLASS(hclge_vf_cmd_template,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int index,
+ int num),
+
+ TP_ARGS(hw, desc, index, num),
+
+ TP_STRUCT__entry(__field(u16, opcode)
+ __field(u16, flag)
+ __field(u16, retval)
+ __field(u16, rsv)
+ __field(int, index)
+ __field(int, num)
+ __string(pciname, pci_name(hw->cmq.csq.pdev))
+ __array(u32, data, HCLGE_DESC_DATA_LEN)),
+
+ TP_fast_assign(int i;
+ __entry->opcode = le16_to_cpu(desc->opcode);
+ __entry->flag = le16_to_cpu(desc->flag);
+ __entry->retval = le16_to_cpu(desc->retval);
+ __entry->rsv = le16_to_cpu(desc->rsv);
+ __entry->index = index;
+ __entry->num = num;
+ __assign_str(pciname, pci_name(hw->cmq.csq.pdev));
+ for (i = 0; i < HCLGE_DESC_DATA_LEN; i++)
+ __entry->data[i] = le32_to_cpu(desc->data[i]);),
+
+ TP_printk("%s opcode:0x%04x %d-%d flag:0x%04x retval:0x%04x rsv:0x%04x data:%s",
+ __get_str(pciname), __entry->opcode,
+ __entry->index, __entry->num,
+ __entry->flag, __entry->retval, __entry->rsv,
+ __print_array(__entry->data,
+ HCLGE_DESC_DATA_LEN, sizeof(u32)))
+);
+
+DEFINE_EVENT(hclge_vf_cmd_template, hclge_vf_cmd_send,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int index,
+ int num),
+ TP_ARGS(hw, desc, index, num));
+
+DEFINE_EVENT(hclge_vf_cmd_template, hclge_vf_cmd_get,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int index,
+ int num),
+ TP_ARGS(hw, desc, index, num));
+
#endif /* _HCLGEVF_TRACE_H_ */
/* This must be outside ifdef _HCLGEVF_TRACE_H */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 499c657d37a9..890f213da8d1 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -581,7 +581,7 @@ static int hinic_change_mtu(struct net_device *netdev, int new_mtu)
if (err)
netif_err(nic_dev, drv, netdev, "Failed to set port mtu\n");
else
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
return err;
}
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index e6e47b1842ea..a19d098f2e2b 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -1098,7 +1098,7 @@ static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
/* This is to prevent starting RX channel in emac_rx_enable() */
set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
- dev->ndev->mtu = new_mtu;
+ WRITE_ONCE(dev->ndev->mtu, new_mtu);
emac_full_tx_reset(dev);
}
@@ -1130,7 +1130,7 @@ static int emac_change_mtu(struct net_device *ndev, int new_mtu)
}
if (!ret) {
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
dev->rx_skb_size = emac_rx_skb_size(new_mtu);
dev->rx_sync_size = emac_rx_sync_size(new_mtu);
}
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 2439f7e96e05..d92dd9c83031 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -605,9 +605,13 @@ static int mal_probe(struct platform_device *ofdev)
INIT_LIST_HEAD(&mal->list);
spin_lock_init(&mal->lock);
- init_dummy_netdev(&mal->dummy_dev);
+ mal->dummy_dev = alloc_netdev_dummy(0);
+ if (!mal->dummy_dev) {
+ err = -ENOMEM;
+ goto fail_unmap;
+ }
- netif_napi_add_weight(&mal->dummy_dev, &mal->napi, mal_poll,
+ netif_napi_add_weight(mal->dummy_dev, &mal->napi, mal_poll,
CONFIG_IBM_EMAC_POLL_WEIGHT);
/* Load power-on reset defaults */
@@ -637,7 +641,7 @@ static int mal_probe(struct platform_device *ofdev)
GFP_KERNEL);
if (mal->bd_virt == NULL) {
err = -ENOMEM;
- goto fail_unmap;
+ goto fail_dummy;
}
for (i = 0; i < mal->num_tx_chans; ++i)
@@ -703,6 +707,8 @@ static int mal_probe(struct platform_device *ofdev)
free_irq(mal->serr_irq, mal);
fail2:
dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma);
+ fail_dummy:
+ free_netdev(mal->dummy_dev);
fail_unmap:
dcr_unmap(mal->dcr_host, 0x100);
fail:
@@ -734,6 +740,8 @@ static void mal_remove(struct platform_device *ofdev)
mal_reset(mal);
+ free_netdev(mal->dummy_dev);
+
dma_free_coherent(&ofdev->dev,
sizeof(struct mal_descriptor) *
(NUM_TX_BUFF * mal->num_tx_chans +
diff --git a/drivers/net/ethernet/ibm/emac/mal.h b/drivers/net/ethernet/ibm/emac/mal.h
index d212373a72e7..e0ddc41186a2 100644
--- a/drivers/net/ethernet/ibm/emac/mal.h
+++ b/drivers/net/ethernet/ibm/emac/mal.h
@@ -205,7 +205,7 @@ struct mal_instance {
int index;
spinlock_t lock;
- struct net_device dummy_dev;
+ struct net_device *dummy_dev;
unsigned int features;
};
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index b5aef0b29efe..4c9d9badd698 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1537,7 +1537,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
adapter->rx_buff_pool[i].active = 1;
if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) {
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
vio_cmo_set_dev_desired(viodev,
ibmveth_get_desired_dma
(viodev));
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 30c47b8470ad..5e9a93bdb518 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2371,7 +2371,7 @@ static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
else
ind_bufp->index = 0;
- return 0;
+ return rc;
}
static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
@@ -2424,7 +2424,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_dropped++;
tx_send_failed++;
ret = NETDEV_TX_OK;
- ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ if (lpar_rc != H_SUCCESS)
+ goto tx_err;
goto out;
}
@@ -2439,8 +2441,10 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
dev_kfree_skb_any(skb);
tx_send_failed++;
tx_dropped++;
- ibmvnic_tx_scrq_flush(adapter, tx_scrq);
ret = NETDEV_TX_OK;
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ if (lpar_rc != H_SUCCESS)
+ goto tx_err;
goto out;
}
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 639fbb12bd35..e0287fbd501d 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -16,6 +16,9 @@ config NET_VENDOR_INTEL
if NET_VENDOR_INTEL
+source "drivers/net/ethernet/intel/libeth/Kconfig"
+source "drivers/net/ethernet/intel/libie/Kconfig"
+
config E100
tristate "Intel(R) PRO/100+ support"
depends on PCI
@@ -41,7 +44,7 @@ config E100
config E1000
tristate "Intel(R) PRO/1000 Gigabit Ethernet support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This driver supports Intel(R) PRO/1000 gigabit ethernet family of
adapters. For more information on how to identify your adapter, go
@@ -225,6 +228,7 @@ config I40E
depends on PTP_1588_CLOCK_OPTIONAL
depends on PCI
select AUXILIARY_BUS
+ select LIBIE
select NET_DEVLINK
help
This driver supports Intel(R) Ethernet Controller XL710 Family of
@@ -253,6 +257,8 @@ config I40E_DCB
# so that CONFIG_IAVF symbol will always mirror the state of CONFIG_I40EVF
config IAVF
tristate
+ select LIBIE
+
config I40EVF
tristate "Intel(R) Ethernet Adaptive Virtual Function support"
select IAVF
@@ -283,6 +289,7 @@ config ICE
depends on GNSS || GNSS = n
select AUXILIARY_BUS
select DIMLIB
+ select LIBIE
select NET_DEVLINK
select PLDMFW
select DPLL
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index dacb481ee5b1..04c844ef4964 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -3,6 +3,9 @@
# Makefile for the Intel network device drivers.
#
+obj-$(CONFIG_LIBETH) += libeth/
+obj-$(CONFIG_LIBIE) += libie/
+
obj-$(CONFIG_E100) += e100.o
obj-$(CONFIG_E1000) += e1000/
obj-$(CONFIG_E1000E) += e1000e/
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 3fcb8daaa243..9b068d40778d 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -3037,7 +3037,7 @@ static int __e100_power_off(struct pci_dev *pdev, bool wake)
return 0;
}
-static int __maybe_unused e100_suspend(struct device *dev_d)
+static int e100_suspend(struct device *dev_d)
{
bool wake;
@@ -3046,7 +3046,7 @@ static int __maybe_unused e100_suspend(struct device *dev_d)
return 0;
}
-static int __maybe_unused e100_resume(struct device *dev_d)
+static int e100_resume(struct device *dev_d)
{
struct net_device *netdev = dev_get_drvdata(dev_d);
struct nic *nic = netdev_priv(netdev);
@@ -3163,7 +3163,7 @@ static const struct pci_error_handlers e100_err_handler = {
.resume = e100_io_resume,
};
-static SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume);
static struct pci_driver e100_driver = {
.name = DRV_NAME,
@@ -3172,7 +3172,7 @@ static struct pci_driver e100_driver = {
.remove = e100_remove,
/* Power Management hooks */
- .driver.pm = &e100_pm_ops,
+ .driver.pm = pm_sleep_ptr(&e100_pm_ops),
.shutdown = e100_shutdown,
.err_handler = &e100_err_handler,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 1d1e93686af2..60fff9a6c53e 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -149,8 +149,8 @@ static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid);
static void e1000_restore_vlan(struct e1000_adapter *adapter);
-static int __maybe_unused e1000_suspend(struct device *dev);
-static int __maybe_unused e1000_resume(struct device *dev);
+static int e1000_suspend(struct device *dev);
+static int e1000_resume(struct device *dev);
static void e1000_shutdown(struct pci_dev *pdev);
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -175,16 +175,14 @@ static const struct pci_error_handlers e1000_err_handler = {
.resume = e1000_io_resume,
};
-static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume);
static struct pci_driver e1000_driver = {
.name = e1000_driver_name,
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
.remove = e1000_remove,
- .driver = {
- .pm = &e1000_pm_ops,
- },
+ .driver.pm = pm_sleep_ptr(&e1000_pm_ops),
.shutdown = e1000_shutdown,
.err_handler = &e1000_err_handler
};
@@ -3571,7 +3569,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
e1000_up(adapter);
@@ -5135,7 +5133,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
return 0;
}
-static int __maybe_unused e1000_suspend(struct device *dev)
+static int e1000_suspend(struct device *dev)
{
int retval;
struct pci_dev *pdev = to_pci_dev(dev);
@@ -5147,7 +5145,7 @@ static int __maybe_unused e1000_suspend(struct device *dev)
return retval;
}
-static int __maybe_unused e1000_resume(struct device *dev)
+static int e1000_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 23a58cada43a..5e2cfa73f889 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -679,8 +679,6 @@
/* PCI/PCI-X/PCI-EX Config space */
#define PCI_HEADER_TYPE_REGISTER 0x0E
-#define PCI_HEADER_TYPE_MULTIFUNC 0x80
-
#define PHY_REVISION_MASK 0xFFFFFFF0
#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
#define MAX_PHY_MULTI_PAGE_REG 0xF
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index dc553c51d79a..85da20778e0f 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -156,7 +156,7 @@ static int e1000_get_link_ksettings(struct net_device *netdev,
speed = adapter->link_speed;
cmd->base.duplex = adapter->link_duplex - 1;
}
- } else if (!pm_runtime_suspended(netdev->dev.parent)) {
+ } else {
u32 status = er32(STATUS);
if (status & E1000_STATUS_LU) {
@@ -274,16 +274,13 @@ static int e1000_set_link_ksettings(struct net_device *netdev,
ethtool_convert_link_mode_to_legacy_u32(&advertising,
cmd->link_modes.advertising);
- pm_runtime_get_sync(netdev->dev.parent);
-
/* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed
*/
if (hw->phy.ops.check_reset_block &&
hw->phy.ops.check_reset_block(hw)) {
e_err("Cannot change link characteristics when SoL/IDER is active.\n");
- ret_val = -EINVAL;
- goto out;
+ return -EINVAL;
}
/* MDI setting is only allowed when autoneg enabled because
@@ -291,16 +288,13 @@ static int e1000_set_link_ksettings(struct net_device *netdev,
* duplex is forced.
*/
if (cmd->base.eth_tp_mdix_ctrl) {
- if (hw->phy.media_type != e1000_media_type_copper) {
- ret_val = -EOPNOTSUPP;
- goto out;
- }
+ if (hw->phy.media_type != e1000_media_type_copper)
+ return -EOPNOTSUPP;
if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
(cmd->base.autoneg != AUTONEG_ENABLE)) {
e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
- ret_val = -EINVAL;
- goto out;
+ return -EINVAL;
}
}
@@ -347,7 +341,6 @@ static int e1000_set_link_ksettings(struct net_device *netdev,
}
out:
- pm_runtime_put_sync(netdev->dev.parent);
clear_bit(__E1000_RESETTING, &adapter->state);
return ret_val;
}
@@ -383,8 +376,6 @@ static int e1000_set_pauseparam(struct net_device *netdev,
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
usleep_range(1000, 2000);
- pm_runtime_get_sync(netdev->dev.parent);
-
if (adapter->fc_autoneg == AUTONEG_ENABLE) {
hw->fc.requested_mode = e1000_fc_default;
if (netif_running(adapter->netdev)) {
@@ -417,7 +408,6 @@ static int e1000_set_pauseparam(struct net_device *netdev,
}
out:
- pm_runtime_put_sync(netdev->dev.parent);
clear_bit(__E1000_RESETTING, &adapter->state);
return retval;
}
@@ -448,8 +438,6 @@ static void e1000_get_regs(struct net_device *netdev,
u32 *regs_buff = p;
u16 phy_data;
- pm_runtime_get_sync(netdev->dev.parent);
-
memset(p, 0, E1000_REGS_LEN * sizeof(u32));
regs->version = (1u << 24) |
@@ -495,8 +483,6 @@ static void e1000_get_regs(struct net_device *netdev,
e1e_rphy(hw, MII_STAT1000, &phy_data);
regs_buff[24] = (u32)phy_data; /* phy local receiver status */
regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
-
- pm_runtime_put_sync(netdev->dev.parent);
}
static int e1000_get_eeprom_len(struct net_device *netdev)
@@ -529,8 +515,6 @@ static int e1000_get_eeprom(struct net_device *netdev,
if (!eeprom_buff)
return -ENOMEM;
- pm_runtime_get_sync(netdev->dev.parent);
-
if (hw->nvm.type == e1000_nvm_eeprom_spi) {
ret_val = e1000_read_nvm(hw, first_word,
last_word - first_word + 1,
@@ -544,8 +528,6 @@ static int e1000_get_eeprom(struct net_device *netdev,
}
}
- pm_runtime_put_sync(netdev->dev.parent);
-
if (ret_val) {
/* a read error occurred, throw away the result */
memset(eeprom_buff, 0xff, sizeof(u16) *
@@ -595,8 +577,6 @@ static int e1000_set_eeprom(struct net_device *netdev,
ptr = (void *)eeprom_buff;
- pm_runtime_get_sync(netdev->dev.parent);
-
if (eeprom->offset & 1) {
/* need read/modify/write of first changed EEPROM word */
/* only the second byte of the word is being modified */
@@ -637,7 +617,6 @@ static int e1000_set_eeprom(struct net_device *netdev,
ret_val = e1000e_update_nvm_checksum(hw);
out:
- pm_runtime_put_sync(netdev->dev.parent);
kfree(eeprom_buff);
return ret_val;
}
@@ -733,8 +712,6 @@ static int e1000_set_ringparam(struct net_device *netdev,
}
}
- pm_runtime_get_sync(netdev->dev.parent);
-
e1000e_down(adapter, true);
/* We can't just free everything and then setup again, because the
@@ -773,7 +750,6 @@ err_setup_rx:
e1000e_free_tx_resources(temp_tx);
err_setup:
e1000e_up(adapter);
- pm_runtime_put_sync(netdev->dev.parent);
free_temp:
vfree(temp_tx);
vfree(temp_rx);
@@ -1816,8 +1792,6 @@ static void e1000_diag_test(struct net_device *netdev,
u8 autoneg;
bool if_running = netif_running(netdev);
- pm_runtime_get_sync(netdev->dev.parent);
-
set_bit(__E1000_TESTING, &adapter->state);
if (!if_running) {
@@ -1903,8 +1877,6 @@ static void e1000_diag_test(struct net_device *netdev,
}
msleep_interruptible(4 * 1000);
-
- pm_runtime_put_sync(netdev->dev.parent);
}
static void e1000_get_wol(struct net_device *netdev,
@@ -2046,15 +2018,11 @@ static int e1000_set_coalesce(struct net_device *netdev,
adapter->itr_setting = adapter->itr & ~3;
}
- pm_runtime_get_sync(netdev->dev.parent);
-
if (adapter->itr_setting != 0)
e1000e_write_itr(adapter, adapter->itr);
else
e1000e_write_itr(adapter, 0);
- pm_runtime_put_sync(netdev->dev.parent);
-
return 0;
}
@@ -2068,9 +2036,7 @@ static int e1000_nway_reset(struct net_device *netdev)
if (!adapter->hw.mac.autoneg)
return -EINVAL;
- pm_runtime_get_sync(netdev->dev.parent);
e1000e_reinit_locked(adapter);
- pm_runtime_put_sync(netdev->dev.parent);
return 0;
}
@@ -2084,12 +2050,8 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
int i;
char *p = NULL;
- pm_runtime_get_sync(netdev->dev.parent);
-
dev_get_stats(netdev, &net_stats);
- pm_runtime_put_sync(netdev->dev.parent);
-
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
switch (e1000_gstrings_stats[i].type) {
case NETDEV_STATS:
@@ -2146,9 +2108,7 @@ static int e1000_get_rxnfc(struct net_device *netdev,
struct e1000_hw *hw = &adapter->hw;
u32 mrqc;
- pm_runtime_get_sync(netdev->dev.parent);
mrqc = er32(MRQC);
- pm_runtime_put_sync(netdev->dev.parent);
if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK))
return 0;
@@ -2211,13 +2171,9 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
return -EOPNOTSUPP;
}
- pm_runtime_get_sync(netdev->dev.parent);
-
ret_val = hw->phy.ops.acquire(hw);
- if (ret_val) {
- pm_runtime_put_sync(netdev->dev.parent);
+ if (ret_val)
return -EBUSY;
- }
/* EEE Capability */
ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data);
@@ -2257,8 +2213,6 @@ release:
if (ret_val)
ret_val = -ENODATA;
- pm_runtime_put_sync(netdev->dev.parent);
-
return ret_val;
}
@@ -2299,16 +2253,12 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
- pm_runtime_get_sync(netdev->dev.parent);
-
/* reset the link */
if (netif_running(netdev))
e1000e_reinit_locked(adapter);
else
e1000e_reset(adapter);
- pm_runtime_put_sync(netdev->dev.parent);
-
return 0;
}
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 1fef6bb5a5fb..4b6e7536170a 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -628,6 +628,7 @@ struct e1000_phy_info {
u32 id;
u32 reset_delay_us; /* in usec */
u32 revision;
+ u32 retry_count;
enum e1000_media_type media_type;
@@ -644,6 +645,7 @@ struct e1000_phy_info {
bool polarity_correction;
bool speed_downgraded;
bool autoneg_wait_to_complete;
+ bool retry_enabled;
};
struct e1000_nvm_info {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 19e450a5bd31..f9e94be36e97 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -222,11 +222,18 @@ out:
if (hw->mac.type >= e1000_pch_lpt) {
/* Only unforce SMBus if ME is not active */
if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+ /* Switching PHY interface always returns MDI error
+ * so disable retry mechanism to avoid wasting time
+ */
+ e1000e_disable_phy_retry(hw);
+
/* Unforce SMBus mode in PHY */
e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
+ e1000e_enable_phy_retry(hw);
+
/* Unforce SMBus mode in MAC */
mac_reg = er32(CTRL_EXT);
mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
@@ -310,6 +317,11 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
goto out;
}
+ /* There is no guarantee that the PHY is accessible at this time
+ * so disable retry mechanism to avoid wasting time
+ */
+ e1000e_disable_phy_retry(hw);
+
/* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
* inaccessible and resetting the PHY is not blocked, toggle the
* LANPHYPC Value bit to force the interconnect to PCIe mode.
@@ -380,6 +392,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
break;
}
+ e1000e_enable_phy_retry(hw);
+
hw->phy.ops.release(hw);
if (!ret_val) {
@@ -449,6 +463,11 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->id = e1000_phy_unknown;
+ if (hw->mac.type == e1000_pch_mtp) {
+ phy->retry_count = 2;
+ e1000e_enable_phy_retry(hw);
+ }
+
ret_val = e1000_init_phy_workarounds_pchlan(hw);
if (ret_val)
return ret_val;
@@ -1146,18 +1165,6 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
if (ret_val)
goto out;
- /* Force SMBus mode in PHY */
- ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
- if (ret_val)
- goto release;
- phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
- e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
-
- /* Force SMBus mode in MAC */
- mac_reg = er32(CTRL_EXT);
- mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
- ew32(CTRL_EXT, mac_reg);
-
/* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
* LPLU and disable Gig speed when entering ULP
*/
@@ -1313,6 +1320,11 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
/* Toggle LANPHYPC Value bit */
e1000_toggle_lanphypc_pch_lpt(hw);
+ /* Switching PHY interface always returns MDI error
+ * so disable retry mechanism to avoid wasting time
+ */
+ e1000e_disable_phy_retry(hw);
+
/* Unforce SMBus mode in PHY */
ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
if (ret_val) {
@@ -1333,6 +1345,8 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+ e1000e_enable_phy_retry(hw);
+
/* Unforce SMBus mode in MAC */
mac_reg = er32(CTRL_EXT);
mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index cc8c531ec3df..220d62fca55d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6038,7 +6038,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
adapter->max_frame_size = max_frame;
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
pm_runtime_get_sync(netdev->dev.parent);
@@ -6623,6 +6623,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, ctrl_ext, rctl, status, wufc;
int retval = 0;
+ u16 smb_ctrl;
/* Runtime suspend should only enable wakeup for link changes */
if (runtime)
@@ -6696,6 +6697,23 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
if (retval)
return retval;
}
+
+ /* Force SMBUS to allow WOL */
+ /* Switching PHY interface always returns MDI error
+ * so disable retry mechanism to avoid wasting time
+ */
+ e1000e_disable_phy_retry(hw);
+
+ e1e_rphy(hw, CV_SMB_CTRL, &smb_ctrl);
+ smb_ctrl |= CV_SMB_CTRL_FORCE_SMBUS;
+ e1e_wphy(hw, CV_SMB_CTRL, smb_ctrl);
+
+ e1000e_enable_phy_retry(hw);
+
+ /* Force SMBus mode in MAC */
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, ctrl_ext);
}
/* Ensure that the appropriate bits are set in LPI_CTRL
@@ -6950,13 +6968,13 @@ static int __e1000_resume(struct pci_dev *pdev)
return 0;
}
-static __maybe_unused int e1000e_pm_prepare(struct device *dev)
+static int e1000e_pm_prepare(struct device *dev)
{
return pm_runtime_suspended(dev) &&
pm_suspend_via_firmware();
}
-static __maybe_unused int e1000e_pm_suspend(struct device *dev)
+static int e1000e_pm_suspend(struct device *dev)
{
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -6979,7 +6997,7 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
return rc;
}
-static __maybe_unused int e1000e_pm_resume(struct device *dev)
+static int e1000e_pm_resume(struct device *dev)
{
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -7013,7 +7031,7 @@ static __maybe_unused int e1000e_pm_runtime_idle(struct device *dev)
return -EBUSY;
}
-static __maybe_unused int e1000e_pm_runtime_resume(struct device *dev)
+static int e1000e_pm_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7032,7 +7050,7 @@ static __maybe_unused int e1000e_pm_runtime_resume(struct device *dev)
return rc;
}
-static __maybe_unused int e1000e_pm_runtime_suspend(struct device *dev)
+static int e1000e_pm_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7919,8 +7937,7 @@ static const struct pci_device_id e1000_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
-static const struct dev_pm_ops e1000_pm_ops = {
-#ifdef CONFIG_PM_SLEEP
+static const struct dev_pm_ops e1000e_pm_ops = {
.prepare = e1000e_pm_prepare,
.suspend = e1000e_pm_suspend,
.resume = e1000e_pm_resume,
@@ -7928,9 +7945,8 @@ static const struct dev_pm_ops e1000_pm_ops = {
.thaw = e1000e_pm_thaw,
.poweroff = e1000e_pm_suspend,
.restore = e1000e_pm_resume,
-#endif
- SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume,
- e1000e_pm_runtime_idle)
+ RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume,
+ e1000e_pm_runtime_idle)
};
/* PCI Device API Driver */
@@ -7939,9 +7955,7 @@ static struct pci_driver e1000_driver = {
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
.remove = e1000_remove,
- .driver = {
- .pm = &e1000_pm_ops,
- },
+ .driver.pm = pm_ptr(&e1000e_pm_ops),
.shutdown = e1000_shutdown,
.err_handler = &e1000_err_handler
};
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 5e329156d1ba..f7ae0e0aa4a4 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -107,6 +107,16 @@ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0);
}
+void e1000e_disable_phy_retry(struct e1000_hw *hw)
+{
+ hw->phy.retry_enabled = false;
+}
+
+void e1000e_enable_phy_retry(struct e1000_hw *hw)
+{
+ hw->phy.retry_enabled = true;
+}
+
/**
* e1000e_read_phy_reg_mdic - Read MDI control register
* @hw: pointer to the HW structure
@@ -118,55 +128,73 @@ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
**/
s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
{
+ u32 i, mdic = 0, retry_counter, retry_max;
struct e1000_phy_info *phy = &hw->phy;
- u32 i, mdic = 0;
+ bool success;
if (offset > MAX_PHY_REG_ADDRESS) {
e_dbg("PHY Address %d is out of range\n", offset);
return -E1000_ERR_PARAM;
}
+ retry_max = phy->retry_enabled ? phy->retry_count : 0;
+
/* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
- mdic = ((offset << E1000_MDIC_REG_SHIFT) |
- (phy->addr << E1000_MDIC_PHY_SHIFT) |
- (E1000_MDIC_OP_READ));
+ for (retry_counter = 0; retry_counter <= retry_max; retry_counter++) {
+ success = true;
- ew32(MDIC, mdic);
+ mdic = ((offset << E1000_MDIC_REG_SHIFT) |
+ (phy->addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_READ));
- /* Poll the ready bit to see if the MDI read completed
- * Increasing the time out as testing showed failures with
- * the lower time out
- */
- for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- udelay(50);
- mdic = er32(MDIC);
- if (mdic & E1000_MDIC_READY)
- break;
- }
- if (!(mdic & E1000_MDIC_READY)) {
- e_dbg("MDI Read PHY Reg Address %d did not complete\n", offset);
- return -E1000_ERR_PHY;
- }
- if (mdic & E1000_MDIC_ERROR) {
- e_dbg("MDI Read PHY Reg Address %d Error\n", offset);
- return -E1000_ERR_PHY;
- }
- if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) {
- e_dbg("MDI Read offset error - requested %d, returned %d\n",
- offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic));
- return -E1000_ERR_PHY;
+ ew32(MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+ udelay(50);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ e_dbg("MDI Read PHY Reg Address %d did not complete\n",
+ offset);
+ success = false;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ e_dbg("MDI Read PHY Reg Address %d Error\n", offset);
+ success = false;
+ }
+ if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) {
+ e_dbg("MDI Read offset error - requested %d, returned %d\n",
+ offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic));
+ success = false;
+ }
+
+ /* Allow some time after each MDIC transaction to avoid
+ * reading duplicate data in the next MDIC transaction.
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ udelay(100);
+
+ if (success) {
+ *data = (u16)mdic;
+ return 0;
+ }
+
+ if (retry_counter != retry_max) {
+ e_dbg("Perform retry on PHY transaction...\n");
+ mdelay(10);
+ }
}
- *data = (u16)mdic;
- /* Allow some time after each MDIC transaction to avoid
- * reading duplicate data in the next MDIC transaction.
- */
- if (hw->mac.type == e1000_pch2lan)
- udelay(100);
- return 0;
+ return -E1000_ERR_PHY;
}
/**
@@ -179,56 +207,72 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
**/
s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
{
+ u32 i, mdic = 0, retry_counter, retry_max;
struct e1000_phy_info *phy = &hw->phy;
- u32 i, mdic = 0;
+ bool success;
if (offset > MAX_PHY_REG_ADDRESS) {
e_dbg("PHY Address %d is out of range\n", offset);
return -E1000_ERR_PARAM;
}
+ retry_max = phy->retry_enabled ? phy->retry_count : 0;
+
/* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
- mdic = (((u32)data) |
- (offset << E1000_MDIC_REG_SHIFT) |
- (phy->addr << E1000_MDIC_PHY_SHIFT) |
- (E1000_MDIC_OP_WRITE));
+ for (retry_counter = 0; retry_counter <= retry_max; retry_counter++) {
+ success = true;
- ew32(MDIC, mdic);
+ mdic = (((u32)data) |
+ (offset << E1000_MDIC_REG_SHIFT) |
+ (phy->addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_WRITE));
- /* Poll the ready bit to see if the MDI read completed
- * Increasing the time out as testing showed failures with
- * the lower time out
- */
- for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- udelay(50);
- mdic = er32(MDIC);
- if (mdic & E1000_MDIC_READY)
- break;
- }
- if (!(mdic & E1000_MDIC_READY)) {
- e_dbg("MDI Write PHY Reg Address %d did not complete\n", offset);
- return -E1000_ERR_PHY;
- }
- if (mdic & E1000_MDIC_ERROR) {
- e_dbg("MDI Write PHY Red Address %d Error\n", offset);
- return -E1000_ERR_PHY;
- }
- if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) {
- e_dbg("MDI Write offset error - requested %d, returned %d\n",
- offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic));
- return -E1000_ERR_PHY;
- }
+ ew32(MDIC, mdic);
- /* Allow some time after each MDIC transaction to avoid
- * reading duplicate data in the next MDIC transaction.
- */
- if (hw->mac.type == e1000_pch2lan)
- udelay(100);
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+ udelay(50);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ e_dbg("MDI Write PHY Reg Address %d did not complete\n",
+ offset);
+ success = false;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ e_dbg("MDI Write PHY Reg Address %d Error\n", offset);
+ success = false;
+ }
+ if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) {
+ e_dbg("MDI Write offset error - requested %d, returned %d\n",
+ offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic));
+ success = false;
+ }
- return 0;
+ /* Allow some time after each MDIC transaction to avoid
+ * reading duplicate data in the next MDIC transaction.
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ udelay(100);
+
+ if (success)
+ return 0;
+
+ if (retry_counter != retry_max) {
+ e_dbg("Perform retry on PHY transaction...\n");
+ mdelay(10);
+ }
+ }
+
+ return -E1000_ERR_PHY;
}
/**
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index c48777d09523..049bb325b4b1 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -51,6 +51,8 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
void e1000_power_up_phy_copper(struct e1000_hw *hw);
void e1000_power_down_phy_copper(struct e1000_hw *hw);
+void e1000e_disable_phy_retry(struct e1000_hw *hw);
+void e1000e_enable_phy_retry(struct e1000_hw *hw);
s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index d748b98274e7..92de609b7218 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -2342,7 +2342,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
* suspend or hibernation. This function does not need to handle lower PCIe
* device state as the stack takes care of that for us.
**/
-static int __maybe_unused fm10k_resume(struct device *dev)
+static int fm10k_resume(struct device *dev)
{
struct fm10k_intfc *interface = dev_get_drvdata(dev);
struct net_device *netdev = interface->netdev;
@@ -2369,7 +2369,7 @@ static int __maybe_unused fm10k_resume(struct device *dev)
* system suspend or hibernation. This function does not need to handle lower
* PCIe device state as the stack takes care of that for us.
**/
-static int __maybe_unused fm10k_suspend(struct device *dev)
+static int fm10k_suspend(struct device *dev)
{
struct fm10k_intfc *interface = dev_get_drvdata(dev);
struct net_device *netdev = interface->netdev;
@@ -2502,16 +2502,14 @@ static const struct pci_error_handlers fm10k_err_handler = {
.reset_done = fm10k_io_reset_done,
};
-static SIMPLE_DEV_PM_OPS(fm10k_pm_ops, fm10k_suspend, fm10k_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(fm10k_pm_ops, fm10k_suspend, fm10k_resume);
static struct pci_driver fm10k_driver = {
.name = fm10k_driver_name,
.id_table = fm10k_pci_tbl,
.probe = fm10k_probe,
.remove = fm10k_remove,
- .driver = {
- .pm = &fm10k_pm_ops,
- },
+ .driver.pm = pm_sleep_ptr(&fm10k_pm_ops),
.sriov_configure = fm10k_iov_configure,
.err_handler = &fm10k_err_handler
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index ba24f3fa92c3..bca2084cc54b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -788,7 +788,6 @@ struct i40e_veb {
u16 stats_idx; /* index of VEB parent */
u8 enabled_tc;
u16 bridge_mode; /* Bridge Mode (VEB/VEPA) */
- u16 flags;
u16 bw_limit;
u8 bw_max_quanta;
bool is_abs_credits;
@@ -955,6 +954,7 @@ struct i40e_q_vector {
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[I40E_INT_NAME_STR_LEN];
bool arm_wb_state;
+ bool in_busy_poll;
int irq_num; /* IRQ assigned to this q_vector */
} ____cacheline_internodealigned_in_smp;
@@ -1212,7 +1212,7 @@ void i40e_vsi_stop_rings(struct i40e_vsi *vsi);
void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi);
int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi);
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
-struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
+struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc);
void i40e_veb_release(struct i40e_veb *veb);
@@ -1236,8 +1236,8 @@ static inline void i40e_dbg_exit(void) {}
int i40e_lan_add_device(struct i40e_pf *pf);
int i40e_lan_del_device(struct i40e_pf *pf);
void i40e_client_subtask(struct i40e_pf *pf);
-void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi);
-void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset);
+void i40e_notify_client_of_l2_param_changes(struct i40e_pf *pf);
+void i40e_notify_client_of_netdev_close(struct i40e_pf *pf, bool reset);
void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs);
void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id);
void i40e_client_update_msix_info(struct i40e_pf *pf);
@@ -1373,6 +1373,17 @@ i40e_pf_get_vsi_by_seid(struct i40e_pf *pf, u16 seid)
}
/**
+ * i40e_pf_get_main_vsi - get pointer to main VSI
+ * @pf: pointer to a PF
+ *
+ * Return: pointer to main VSI or NULL if it does not exist
+ **/
+static inline struct i40e_vsi *i40e_pf_get_main_vsi(struct i40e_pf *pf)
+{
+ return (pf->lan_vsi != I40E_NO_VSI) ? pf->vsi[pf->lan_vsi] : NULL;
+}
+
+/**
* i40e_pf_get_veb_by_seid - find VEB by SEID
* @pf: pointer to a PF
* @seid: SEID of the VSI
@@ -1390,4 +1401,15 @@ i40e_pf_get_veb_by_seid(struct i40e_pf *pf, u16 seid)
return NULL;
}
+/**
+ * i40e_pf_get_main_veb - get pointer to main VEB
+ * @pf: pointer to a PF
+ *
+ * Return: pointer to main VEB or NULL if it does not exist
+ **/
+static inline struct i40e_veb *i40e_pf_get_main_veb(struct i40e_pf *pf)
+{
+ return (pf->lan_veb != I40E_NO_VEB) ? pf->veb[pf->lan_veb] : NULL;
+}
+
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index b32071ee84af..59263551c383 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -101,25 +101,26 @@ i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len)
/**
* i40e_notify_client_of_l2_param_changes - call the client notify callback
- * @vsi: the VSI with l2 param changes
+ * @pf: PF device pointer
*
- * If there is a client to this VSI, call the client
+ * If there is a client, call its callback
**/
-void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
+void i40e_notify_client_of_l2_param_changes(struct i40e_pf *pf)
{
- struct i40e_pf *pf = vsi->back;
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_client_instance *cdev = pf->cinst;
struct i40e_params params;
if (!cdev || !cdev->client)
return;
if (!cdev->client->ops || !cdev->client->ops->l2_param_change) {
- dev_dbg(&vsi->back->pdev->dev,
+ dev_dbg(&pf->pdev->dev,
"Cannot locate client instance l2_param_change routine\n");
return;
}
if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
- dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n");
+ dev_dbg(&pf->pdev->dev,
+ "Client is not open, abort l2 param change\n");
return;
}
memset(&params, 0, sizeof(params));
@@ -157,20 +158,19 @@ static void i40e_client_release_qvlist(struct i40e_info *ldev)
/**
* i40e_notify_client_of_netdev_close - call the client close callback
- * @vsi: the VSI with netdev closed
+ * @pf: PF device pointer
* @reset: true when close called due to a reset pending
*
* If there is a client to this netdev, call the client with close
**/
-void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
+void i40e_notify_client_of_netdev_close(struct i40e_pf *pf, bool reset)
{
- struct i40e_pf *pf = vsi->back;
struct i40e_client_instance *cdev = pf->cinst;
if (!cdev || !cdev->client)
return;
if (!cdev->client->ops || !cdev->client->ops->close) {
- dev_dbg(&vsi->back->pdev->dev,
+ dev_dbg(&pf->pdev->dev,
"Cannot locate client instance close routine\n");
return;
}
@@ -333,9 +333,9 @@ static int i40e_register_auxiliary_dev(struct i40e_info *ldev, const char *name)
**/
static void i40e_client_add_instance(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_client_instance *cdev = NULL;
struct netdev_hw_addr *mac = NULL;
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
@@ -399,9 +399,9 @@ void i40e_client_del_instance(struct i40e_pf *pf)
**/
void i40e_client_subtask(struct i40e_pf *pf)
{
- struct i40e_client *client;
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_client_instance *cdev;
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_client *client;
int ret = 0;
if (!test_and_clear_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state))
@@ -665,8 +665,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
bool is_vf, u32 vf_id,
u32 flag, u32 valid_flag)
{
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(ldev->pf);
struct i40e_pf *pf = ldev->pf;
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_vsi_context ctxt;
bool update = true;
int err;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index de6ca6295742..e8031f1a9b4f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -381,259 +381,6 @@ int i40e_aq_set_rss_key(struct i40e_hw *hw,
return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
}
-/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
- * hardware to a bit-field that can be used by SW to more easily determine the
- * packet type.
- *
- * Macros are used to shorten the table lines and make this table human
- * readable.
- *
- * We store the PTYPE in the top byte of the bit field - this is just so that
- * we can check that the table doesn't have a row missing, as the index into
- * the table should be the PTYPE.
- *
- * Typical work flow:
- *
- * IF NOT i40e_ptype_lookup[ptype].known
- * THEN
- * Packet is unknown
- * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
- * Use the rest of the fields to look at the tunnels, inner protocols, etc
- * ELSE
- * Use the enum i40e_rx_l2_ptype to decode the packet type
- * ENDIF
- */
-
-/* macro to make the table lines short, use explicit indexing with [PTYPE] */
-#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
- [PTYPE] = { \
- 1, \
- I40E_RX_PTYPE_OUTER_##OUTER_IP, \
- I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
- I40E_RX_PTYPE_##OUTER_FRAG, \
- I40E_RX_PTYPE_TUNNEL_##T, \
- I40E_RX_PTYPE_TUNNEL_END_##TE, \
- I40E_RX_PTYPE_##TEF, \
- I40E_RX_PTYPE_INNER_PROT_##I, \
- I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
-
-#define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
-
-/* shorter macros makes the table fit but are terse */
-#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
-#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
-#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
-
-/* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */
-struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = {
- /* L2 Packet types */
- I40E_PTT_UNUSED_ENTRY(0),
- I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
- I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- I40E_PTT_UNUSED_ENTRY(4),
- I40E_PTT_UNUSED_ENTRY(5),
- I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- I40E_PTT_UNUSED_ENTRY(8),
- I40E_PTT_UNUSED_ENTRY(9),
- I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
- I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
-
- /* Non Tunneled IPv4 */
- I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(25),
- I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
- I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
- I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv4 */
- I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(32),
- I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv6 */
- I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(39),
- I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT */
- I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> IPv4 */
- I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(47),
- I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> IPv6 */
- I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(54),
- I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC */
- I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
- I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(62),
- I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
- I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(69),
- I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC/VLAN */
- I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
- I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(77),
- I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
- I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(84),
- I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
- /* Non Tunneled IPv6 */
- I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(91),
- I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
- I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
- I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv4 */
- I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(98),
- I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv6 */
- I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(105),
- I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT */
- I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> IPv4 */
- I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(113),
- I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> IPv6 */
- I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(120),
- I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC */
- I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
- I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(128),
- I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
- I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(135),
- I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN */
- I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
- I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(143),
- I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
- I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(150),
- I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
- /* unused entries */
- [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
-};
-
/**
* i40e_init_shared_code - Initialize the shared code
* @hw: pointer to hardware structure
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
index 2f53f0f53bc3..daa9f2c42f70 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
@@ -407,8 +407,9 @@ static int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
**/
static int i40e_ddp_restore(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
+ struct net_device *netdev = vsi->netdev;
struct i40e_ddp_old_profile_list *entry;
- struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
int status = 0;
if (!list_empty(&pf->ddp_old_prof)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index f9ba45f596c9..abf624d770e6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -53,6 +53,7 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
struct i40e_pf *pf = filp->private_data;
+ struct i40e_vsi *main_vsi;
int bytes_not_copied;
int buf_size = 256;
char *buf;
@@ -68,8 +69,8 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
if (!buf)
return -ENOSPC;
- len = snprintf(buf, buf_size, "%s: %s\n",
- pf->vsi[pf->lan_vsi]->netdev->name,
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ len = snprintf(buf, buf_size, "%s: %s\n", main_vsi->netdev->name,
i40e_dbg_command_buf);
bytes_not_copied = copy_to_user(buffer, buf, len);
@@ -128,7 +129,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev,
" state[%d] = %08lx\n",
i, vsi->state[i]);
- if (vsi == pf->vsi[pf->lan_vsi])
+ if (vsi->type == I40E_VSI_MAIN)
dev_info(&pf->pdev->dev, " MAC address: %pM Port MAC: %pM\n",
pf->hw.mac.addr,
pf->hw.mac.port_addr);
@@ -786,7 +787,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
if (cnt == 0) {
/* default to PF VSI */
- vsi_seid = pf->vsi[pf->lan_vsi]->seid;
+ vsi = i40e_pf_get_main_vsi(pf);
+ vsi_seid = vsi->seid;
} else if (vsi_seid < 0) {
dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n",
vsi_seid);
@@ -867,7 +869,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
- veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid, enabled_tc);
+ veb = i40e_veb_setup(pf, uplink_seid, vsi_seid, enabled_tc);
if (veb)
dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid);
else
@@ -1030,7 +1032,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
- vsi = pf->vsi[pf->lan_vsi];
+ vsi = i40e_pf_get_main_vsi(pf);
switch_id =
le16_to_cpu(vsi->info.switch_id) &
I40E_AQ_VSI_SW_ID_MASK;
@@ -1380,6 +1382,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
i40e_get_current_fd_count(pf));
} else if (strncmp(cmd_buf, "lldp", 4) == 0) {
+ /* Get main VSI */
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
+
if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
int ret;
@@ -1391,10 +1396,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
- pf->hw.mac.addr,
- ETH_P_LLDP, 0,
- pf->vsi[pf->lan_vsi]->seid,
- 0, true, NULL, NULL);
+ pf->hw.mac.addr, ETH_P_LLDP, 0,
+ main_vsi->seid, 0, true, NULL,
+ NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"%s: Add Control Packet Filter AQ command failed =0x%x\n",
@@ -1409,10 +1413,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
int ret;
ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
- pf->hw.mac.addr,
- ETH_P_LLDP, 0,
- pf->vsi[pf->lan_vsi]->seid,
- 0, false, NULL, NULL);
+ pf->hw.mac.addr, ETH_P_LLDP, 0,
+ main_vsi->seid, 0, false, NULL,
+ NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"%s: Remove Control Packet Filter AQ command failed =0x%x\n",
@@ -1639,6 +1642,7 @@ static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
struct i40e_pf *pf = filp->private_data;
+ struct i40e_vsi *main_vsi;
int bytes_not_copied;
int buf_size = 256;
char *buf;
@@ -1654,8 +1658,8 @@ static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
if (!buf)
return -ENOSPC;
- len = snprintf(buf, buf_size, "%s: %s\n",
- pf->vsi[pf->lan_vsi]->netdev->name,
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ len = snprintf(buf, buf_size, "%s: %s\n", main_vsi->netdev->name,
i40e_dbg_netdev_ops_buf);
bytes_not_copied = copy_to_user(buffer, buf, len);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 42e7e6cdaa6d..4e28785c9fb2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1241,7 +1241,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
i40e_partition_setting_complaint(pf);
return -EOPNOTSUPP;
}
- if (vsi != pf->vsi[pf->lan_vsi])
+ if (vsi->type != I40E_VSI_MAIN)
return -EOPNOTSUPP;
if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&
@@ -1710,7 +1710,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
return -EOPNOTSUPP;
}
- if (vsi != pf->vsi[pf->lan_vsi])
+ if (vsi->type != I40E_VSI_MAIN)
return -EOPNOTSUPP;
is_an = hw_link_info->an_info & I40E_AQ_AN_COMPLETED;
@@ -2029,7 +2029,7 @@ static void i40e_get_ringparam(struct net_device *netdev,
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
ring->rx_max_pending = i40e_get_max_num_descriptors(pf);
ring->tx_max_pending = i40e_get_max_num_descriptors(pf);
@@ -2292,7 +2292,7 @@ static int i40e_get_stats_count(struct net_device *netdev)
struct i40e_pf *pf = vsi->back;
int stats_len;
- if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1)
+ if (vsi->type == I40E_VSI_MAIN && pf->hw.partition_id == 1)
stats_len = I40E_PF_STATS_LEN;
else
stats_len = I40E_VSI_STATS_LEN;
@@ -2422,17 +2422,14 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
}
rcu_read_unlock();
- if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
+ if (vsi->type != I40E_VSI_MAIN || pf->hw.partition_id != 1)
goto check_data_pointer;
- veb_stats = ((pf->lan_veb != I40E_NO_VEB) &&
- (pf->lan_veb < I40E_MAX_VEB) &&
- test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags));
+ veb = i40e_pf_get_main_veb(pf);
+ veb_stats = veb && test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags);
- if (veb_stats) {
- veb = pf->veb[pf->lan_veb];
+ if (veb_stats)
i40e_update_veb_stats(veb);
- }
/* If veb stats aren't enabled, pass NULL instead of the veb so that
* we initialize stats to zero and update the data pointer
@@ -2495,7 +2492,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
"rx", i);
}
- if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
+ if (vsi->type != I40E_VSI_MAIN || pf->hw.partition_id != 1)
goto check_data_pointer;
i40e_add_stat_strings(&data, i40e_gstrings_veb_stats);
@@ -2792,7 +2789,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return -EOPNOTSUPP;
}
- if (vsi != pf->vsi[pf->lan_vsi])
+ if (vsi->type != I40E_VSI_MAIN)
return -EOPNOTSUPP;
/* NVM bit on means WoL disabled for the port */
@@ -3370,6 +3367,7 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
struct i40e_rx_flow_userdef userdef = {0};
struct i40e_fdir_filter *rule = NULL;
struct hlist_node *node2;
+ struct i40e_vsi *vsi;
u64 input_set;
u16 index;
@@ -3493,9 +3491,8 @@ no_input_set:
fsp->flow_type |= FLOW_EXT;
}
- if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) {
- struct i40e_vsi *vsi;
-
+ vsi = i40e_pf_get_main_vsi(pf);
+ if (rule->dest_vsi != vsi->id) {
vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi);
if (vsi && vsi->type == I40E_VSI_SRIOV) {
/* VFs are zero-indexed by the driver, but ethtool
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index f86578857e8a..1f188c052828 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -100,6 +100,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX
MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
+MODULE_IMPORT_NS(LIBIE);
MODULE_LICENSE("GPL v2");
static struct workqueue_struct *i40e_wq;
@@ -989,7 +990,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
ns->tx_dropped = es->tx_discards;
/* pull in a couple PF stats if this is the main vsi */
- if (vsi == pf->vsi[pf->lan_vsi]) {
+ if (vsi->type == I40E_VSI_MAIN) {
ns->rx_crc_errors = pf->stats.crc_errors;
ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
ns->rx_length_errors = pf->stats.rx_length_errors;
@@ -1234,7 +1235,7 @@ void i40e_update_stats(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- if (vsi == pf->vsi[pf->lan_vsi])
+ if (vsi->type == I40E_VSI_MAIN)
i40e_update_pf_stats(pf);
i40e_update_vsi_stats(vsi);
@@ -1253,8 +1254,11 @@ int i40e_count_filters(struct i40e_vsi *vsi)
int bkt;
int cnt = 0;
- hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
- ++cnt;
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (f->state == I40E_FILTER_NEW ||
+ f->state == I40E_FILTER_ACTIVE)
+ ++cnt;
+ }
return cnt;
}
@@ -2472,12 +2476,12 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
**/
static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_hw *hw = &pf->hw;
int aq_ret;
if (vsi->type == I40E_VSI_MAIN &&
- pf->lan_veb != I40E_NO_VEB &&
+ i40e_pf_get_main_veb(pf) &&
!test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
/* set defport ON for Main VSI instead of true promisc
* this way we will get all unicast/multicast and VLAN
@@ -2957,7 +2961,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
i40e_vsi_reinit_locked(vsi);
set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
@@ -3911,6 +3915,12 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
q_vector->tx.target_itr >> 1);
q_vector->tx.current_itr = q_vector->tx.target_itr;
+ /* Set ITR for software interrupts triggered after exiting
+ * busy-loop polling.
+ */
+ wr32(hw, I40E_PFINT_ITRN(I40E_SW_ITR, vector - 1),
+ I40E_ITR_20K);
+
wr32(hw, I40E_PFINT_RATEN(vector - 1),
i40e_intrl_usec_to_reg(vsi->int_rate_limit));
@@ -4313,7 +4323,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_q_vector *q_vector = vsi->q_vectors[0];
/* We do not have a way to disarm Queue causes while leaving
@@ -5463,7 +5473,7 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
**/
static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
u8 enabled_tc = 1, i;
@@ -5480,13 +5490,14 @@ static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
**/
static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
{
- struct i40e_hw *hw = &pf->hw;
u8 i, enabled_tc = 1;
u8 num_tc = 0;
- struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
- if (i40e_is_tc_mqprio_enabled(pf))
- return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
+ if (i40e_is_tc_mqprio_enabled(pf)) {
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
+
+ return vsi->mqprio_qopt.qopt.num_tc;
+ }
/* If neither MQPRIO nor DCB is enabled, then always use single TC */
if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags))
@@ -5494,7 +5505,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
/* SFP mode will be enabled for all TCs on port */
if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags))
- return i40e_dcb_get_num_tc(dcbcfg);
+ return i40e_dcb_get_num_tc(&pf->hw.local_dcbx_config);
/* MFP mode return count of enabled TCs for this PF */
if (pf->hw.func_caps.iscsi)
@@ -5907,6 +5918,28 @@ out:
}
/**
+ * i40e_vsi_reconfig_tc - Reconfigure VSI Tx Scheduler for stored TC map
+ * @vsi: VSI to be reconfigured
+ *
+ * This reconfigures a particular VSI for TCs that are mapped to the
+ * TC bitmap stored previously for the VSI.
+ *
+ * Context: It is expected that the VSI queues have been quisced before
+ * calling this function.
+ *
+ * Return: 0 on success, negative value on failure
+ **/
+static int i40e_vsi_reconfig_tc(struct i40e_vsi *vsi)
+{
+ u8 enabled_tc;
+
+ enabled_tc = vsi->tc_config.enabled_tc;
+ vsi->tc_config.enabled_tc = 0;
+
+ return i40e_vsi_config_tc(vsi, enabled_tc);
+}
+
+/**
* i40e_get_link_speed - Returns link speed for the interface
* @vsi: VSI to be configured
*
@@ -6468,6 +6501,7 @@ static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
struct i40e_channel *ch)
{
+ struct i40e_vsi *main_vsi;
u8 vsi_type;
u16 seid;
int ret;
@@ -6481,7 +6515,8 @@ static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
}
/* underlying switching element */
- seid = pf->vsi[pf->lan_vsi]->uplink_seid;
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ seid = main_vsi->uplink_seid;
/* create channel (VSI), configure TX rings */
ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
@@ -6799,7 +6834,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
/* - Enable all TCs for the LAN VSI
* - For all others keep them at TC0 for now
*/
- if (v == pf->lan_vsi)
+ if (vsi->type == I40E_VSI_MAIN)
tc_map = i40e_pf_get_tc_map(pf);
else
tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
@@ -7038,7 +7073,9 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
/* Configure Rx Packet Buffers in HW */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu;
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
+
+ mfs_tc[i] = main_vsi->netdev->mtu;
mfs_tc[i] += I40E_PACKET_HDR_PAD;
}
@@ -8634,6 +8671,10 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
+
+ if (flow_rule_has_control_flags(match.mask->flags,
+ f->common.extack))
+ return -EOPNOTSUPP;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
@@ -9104,7 +9145,7 @@ err_setup_rx:
i40e_vsi_free_rx_resources(vsi);
err_setup_tx:
i40e_vsi_free_tx_resources(vsi);
- if (vsi == pf->vsi[pf->lan_vsi])
+ if (vsi->type == I40E_VSI_MAIN)
i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
return err;
@@ -9795,7 +9836,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
} else {
/* replay sideband filters */
- i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
+ i40e_fdir_filter_restore(i40e_pf_get_main_vsi(pf));
if (!disable_atr && !pf->fd_tcp4_filter_cnt)
clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
@@ -9893,7 +9934,8 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
**/
static void i40e_link_event(struct i40e_pf *pf)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
+ struct i40e_veb *veb = i40e_pf_get_main_veb(pf);
u8 new_link_speed, old_link_speed;
bool new_link, old_link;
int status;
@@ -9933,8 +9975,8 @@ static void i40e_link_event(struct i40e_pf *pf)
/* Notify the base of the switch tree connected to
* the link. Floating VEBs are not notified.
*/
- if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
- i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
+ if (veb)
+ i40e_veb_link_event(veb, new_link);
else
i40e_vsi_link_event(vsi, new_link);
@@ -10264,7 +10306,7 @@ static void i40e_verify_eeprom(struct i40e_pf *pf)
**/
static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_vsi_context ctxt;
int ret;
@@ -10300,7 +10342,7 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
**/
static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_vsi_context ctxt;
int ret;
@@ -10376,7 +10418,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
if (veb->uplink_seid == pf->mac_seid) {
/* Check that the LAN VSI has VEB owning flag set */
- ctl_vsi = pf->vsi[pf->lan_vsi];
+ ctl_vsi = i40e_pf_get_main_vsi(pf);
if (WARN_ON(ctl_vsi->veb_idx != veb->idx ||
!(ctl_vsi->flags & I40E_VSI_FLAG_VEB_OWNER))) {
@@ -10519,7 +10561,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi);
**/
static void i40e_fdir_sb_setup(struct i40e_pf *pf)
{
- struct i40e_vsi *vsi;
+ struct i40e_vsi *main_vsi, *vsi;
/* quick workaround for an NVM issue that leaves a critical register
* uninitialized
@@ -10544,8 +10586,8 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
/* create a new VSI if none exists */
if (!vsi) {
- vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
- pf->vsi[pf->lan_vsi]->seid, 0);
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, main_vsi->seid, 0);
if (!vsi) {
dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
@@ -10824,7 +10866,7 @@ static int i40e_reset(struct i40e_pf *pf)
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
{
const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_hw *hw = &pf->hw;
struct i40e_veb *veb;
int ret;
@@ -10833,7 +10875,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
is_recovery_mode_reported)
- i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
+ i40e_set_ethtool_ops(vsi->netdev);
if (test_bit(__I40E_DOWN, pf->state) &&
!test_bit(__I40E_RECOVERY_MODE, pf->state))
@@ -11257,7 +11299,7 @@ static void i40e_service_task(struct work_struct *work)
return;
if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
- i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
+ i40e_detect_recover_hung(pf);
i40e_sync_filters_subtask(pf);
i40e_reset_subtask(pf);
i40e_handle_mdd_event(pf);
@@ -11266,14 +11308,12 @@ static void i40e_service_task(struct work_struct *work)
i40e_fdir_reinit_subtask(pf);
if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
/* Client subtask will reopen next time through. */
- i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
- true);
+ i40e_notify_client_of_netdev_close(pf, true);
} else {
i40e_client_subtask(pf);
if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
pf->state))
- i40e_notify_client_of_l2_param_changes(
- pf->vsi[pf->lan_vsi]);
+ i40e_notify_client_of_l2_param_changes(pf);
}
i40e_sync_filters_subtask(pf);
} else {
@@ -11981,7 +12021,7 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
/* if not MSIX, give the one vector only to the LAN VSI */
if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
num_q_vectors = vsi->num_q_vectors;
- else if (vsi == pf->vsi[pf->lan_vsi])
+ else if (vsi->type == I40E_VSI_MAIN)
num_q_vectors = 1;
else
return -EINVAL;
@@ -12387,7 +12427,7 @@ void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
**/
static int i40e_pf_config_rss(struct i40e_pf *pf)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
u8 seed[I40E_HKEY_ARRAY_SIZE];
u8 *lut;
struct i40e_hw *hw = &pf->hw;
@@ -12459,7 +12499,7 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
**/
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
int new_rss_size;
if (!test_bit(I40E_FLAG_RSS_ENA, pf->flags))
@@ -13098,7 +13138,7 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
int rem;
/* Only for PF VSI for now */
- if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
+ if (vsi->type != I40E_VSI_MAIN)
return -EOPNOTSUPP;
/* Find the HW bridge for PF VSI */
@@ -13108,20 +13148,16 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
- __u16 mode;
-
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
+ __u16 mode = nla_get_u16(attr);
- mode = nla_get_u16(attr);
if ((mode != BRIDGE_MODE_VEPA) &&
(mode != BRIDGE_MODE_VEB))
return -EINVAL;
/* Insert a new HW bridge */
if (!veb) {
- veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
+ veb = i40e_veb_setup(pf, vsi->uplink_seid, vsi->seid,
vsi->tc_config.enabled_tc);
if (veb) {
veb->bridge_mode = mode;
@@ -13170,7 +13206,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct i40e_veb *veb;
/* Only for PF VSI for now */
- if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
+ if (vsi->type != I40E_VSI_MAIN)
return -EOPNOTSUPP;
/* Find the HW bridge for the PF VSI */
@@ -13752,9 +13788,10 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
* the end, which is 4 bytes long, so force truncation of the
* original name by IFNAMSIZ - 4
*/
- snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
- IFNAMSIZ - 4,
- pf->vsi[pf->lan_vsi]->netdev->name);
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
+
+ snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d", IFNAMSIZ - 4,
+ main_vsi->netdev->name);
eth_random_addr(mac_addr);
spin_lock_bh(&vsi->mac_filter_hash_lock);
@@ -14121,8 +14158,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
vsi->seid, vsi->uplink_seid);
return -ENODEV;
}
- if (vsi == pf->vsi[pf->lan_vsi] &&
- !test_bit(__I40E_DOWN, pf->state)) {
+ if (vsi->type == I40E_VSI_MAIN && !test_bit(__I40E_DOWN, pf->state)) {
dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
return -ENODEV;
}
@@ -14266,9 +14302,9 @@ vector_setup_out:
**/
static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
{
+ struct i40e_vsi *main_vsi;
u16 alloc_queue_pairs;
struct i40e_pf *pf;
- u8 enabled_tc;
int ret;
if (!vsi)
@@ -14300,10 +14336,10 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
/* Update the FW view of the VSI. Force a reset of TC and queue
* layout configurations.
*/
- enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
- pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
- pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
- i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ main_vsi->seid = pf->main_vsi_seid;
+ i40e_vsi_reconfig_tc(main_vsi);
+
if (vsi->type == I40E_VSI_MAIN)
i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
@@ -14377,13 +14413,13 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
}
if (vsi->uplink_seid == pf->mac_seid)
- veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
+ veb = i40e_veb_setup(pf, pf->mac_seid, vsi->seid,
vsi->tc_config.enabled_tc);
else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
- veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
+ veb = i40e_veb_setup(pf, vsi->uplink_seid, vsi->seid,
vsi->tc_config.enabled_tc);
if (veb) {
- if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
+ if (vsi->type != I40E_VSI_MAIN) {
dev_info(&vsi->back->pdev->dev,
"New VSI creation error, uplink seid of LAN VSI expected.\n");
return NULL;
@@ -14774,7 +14810,6 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
/**
* i40e_veb_setup - Set up a VEB
* @pf: board private structure
- * @flags: VEB setup flags
* @uplink_seid: the switch element to link to
* @vsi_seid: the initial VSI seid
* @enabled_tc: Enabled TC bit-map
@@ -14787,9 +14822,8 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
* Returns pointer to the successfully allocated VEB sw struct on
* success, otherwise returns NULL on failure.
**/
-struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
- u16 uplink_seid, u16 vsi_seid,
- u8 enabled_tc)
+struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 uplink_seid,
+ u16 vsi_seid, u8 enabled_tc)
{
struct i40e_vsi *vsi = NULL;
struct i40e_veb *veb;
@@ -14820,7 +14854,6 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
if (veb_idx < 0)
goto err_alloc;
veb = pf->veb[veb_idx];
- veb->flags = flags;
veb->uplink_seid = uplink_seid;
veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
@@ -14872,7 +14905,8 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
/* Main VEB? */
if (uplink_seid != pf->mac_seid)
break;
- if (pf->lan_veb >= I40E_MAX_VEB) {
+ veb = i40e_pf_get_main_veb(pf);
+ if (!veb) {
int v;
/* find existing or else empty VEB */
@@ -14886,12 +14920,15 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
pf->lan_veb = v;
}
}
- if (pf->lan_veb >= I40E_MAX_VEB)
+
+ /* Try to get again main VEB as pf->lan_veb may have changed */
+ veb = i40e_pf_get_main_veb(pf);
+ if (!veb)
break;
- pf->veb[pf->lan_veb]->seid = seid;
- pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
- pf->veb[pf->lan_veb]->pf = pf;
+ veb->seid = seid;
+ veb->uplink_seid = pf->mac_seid;
+ veb->pf = pf;
break;
case I40E_SWITCH_ELEMENT_TYPE_VSI:
if (num_reported != 1)
@@ -14989,6 +15026,7 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
**/
static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired)
{
+ struct i40e_vsi *main_vsi;
u16 flags = 0;
int ret;
@@ -15033,22 +15071,25 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
}
/* first time setup */
- if (pf->lan_vsi == I40E_NO_VSI || reinit) {
- struct i40e_vsi *vsi = NULL;
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ if (!main_vsi || reinit) {
+ struct i40e_veb *veb;
u16 uplink_seid;
/* Set up the PF VSI associated with the PF's main VSI
* that is already in the HW switch
*/
- if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
- uplink_seid = pf->veb[pf->lan_veb]->seid;
+ veb = i40e_pf_get_main_veb(pf);
+ if (veb)
+ uplink_seid = veb->seid;
else
uplink_seid = pf->mac_seid;
- if (pf->lan_vsi == I40E_NO_VSI)
- vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+ if (!main_vsi)
+ main_vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN,
+ uplink_seid, 0);
else if (reinit)
- vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
- if (!vsi) {
+ main_vsi = i40e_vsi_reinit_setup(main_vsi);
+ if (!main_vsi) {
dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
i40e_cloud_filter_exit(pf);
i40e_fdir_teardown(pf);
@@ -15056,13 +15097,10 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
}
} else {
/* force a reset of TC and queue layout configurations */
- u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
-
- pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
- pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
- i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+ main_vsi->seid = pf->main_vsi_seid;
+ i40e_vsi_reconfig_tc(main_vsi);
}
- i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
+ i40e_vlan_stripping_disable(main_vsi);
i40e_fdir_sb_setup(pf);
@@ -15089,7 +15127,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
rtnl_lock();
/* repopulate tunnel port filters */
- udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
+ udp_tunnel_nic_reset_ntf(main_vsi->netdev);
if (!lock_acquired)
rtnl_unlock();
@@ -15233,6 +15271,7 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
#define REMAIN(__x) (INFO_STRING_LEN - (__x))
static void i40e_print_features(struct i40e_pf *pf)
{
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
struct i40e_hw *hw = &pf->hw;
char *buf;
int i;
@@ -15246,8 +15285,7 @@ static void i40e_print_features(struct i40e_pf *pf)
i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
#endif
i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
- pf->hw.func_caps.num_vsis,
- pf->vsi[pf->lan_vsi]->num_queue_pairs);
+ pf->hw.func_caps.num_vsis, main_vsi->num_queue_pairs);
if (test_bit(I40E_FLAG_RSS_ENA, pf->flags))
i += scnprintf(&buf[i], REMAIN(i), " RSS");
if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags))
@@ -15911,7 +15949,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
goto err_vsis;
}
- INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
+
+ vsi = i40e_pf_get_main_vsi(pf);
+ INIT_LIST_HEAD(&vsi->ch_list);
/* if FDIR VSI was set up, start it now */
vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
@@ -16098,8 +16138,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
val = FIELD_GET(I40E_PRTGL_SAH_MFS_MASK,
rd32(&pf->hw, I40E_PRTGL_SAH));
if (val < MAX_FRAME_SIZE_DEFAULT)
- dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
- pf->hw.port, val);
+ dev_warn(&pdev->dev, "MFS for port %x (%d) has been set below the default (%d)\n",
+ pf->hw.port, val, MAX_FRAME_SIZE_DEFAULT);
/* Add a filter to drop all Flow control frames from any VSI from being
* transmitted. By doing so we stop a malicious VF from sending out
@@ -16214,7 +16254,7 @@ static void i40e_remove(struct pci_dev *pdev)
/* Client close must be called explicitly here because the timer
* has been stopped.
*/
- i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+ i40e_notify_client_of_netdev_close(pf, false);
i40e_fdir_teardown(pf);
@@ -16413,15 +16453,15 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
**/
static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
{
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
struct i40e_hw *hw = &pf->hw;
u8 mac_addr[6];
u16 flags = 0;
int ret;
/* Get current MAC address in case it's an LAA */
- if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
- ether_addr_copy(mac_addr,
- pf->vsi[pf->lan_vsi]->netdev->dev_addr);
+ if (main_vsi && main_vsi->netdev) {
+ ether_addr_copy(mac_addr, main_vsi->netdev->dev_addr);
} else {
dev_err(&pf->pdev->dev,
"Failed to retrieve MAC address; using default\n");
@@ -16473,7 +16513,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
/* Client close must be called explicitly here because the timer
* has been stopped.
*/
- i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+ i40e_notify_client_of_netdev_close(pf, false);
if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) &&
pf->wol_en)
@@ -16509,7 +16549,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
* i40e_suspend - PM callback for moving to D3
* @dev: generic device information structure
**/
-static int __maybe_unused i40e_suspend(struct device *dev)
+static int i40e_suspend(struct device *dev)
{
struct i40e_pf *pf = dev_get_drvdata(dev);
struct i40e_hw *hw = &pf->hw;
@@ -16527,7 +16567,7 @@ static int __maybe_unused i40e_suspend(struct device *dev)
/* Client close must be called explicitly here because the timer
* has been stopped.
*/
- i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+ i40e_notify_client_of_netdev_close(pf, false);
if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) &&
pf->wol_en)
@@ -16560,7 +16600,7 @@ static int __maybe_unused i40e_suspend(struct device *dev)
* i40e_resume - PM callback for waking up from D3
* @dev: generic device information structure
**/
-static int __maybe_unused i40e_resume(struct device *dev)
+static int i40e_resume(struct device *dev)
{
struct i40e_pf *pf = dev_get_drvdata(dev);
int err;
@@ -16606,16 +16646,14 @@ static const struct pci_error_handlers i40e_err_handler = {
.resume = i40e_pci_error_resume,
};
-static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
static struct pci_driver i40e_driver = {
.name = i40e_driver_name,
.id_table = i40e_pci_tbl,
.probe = i40e_probe,
.remove = i40e_remove,
- .driver = {
- .pm = &i40e_pm_ops,
- },
+ .driver.pm = pm_sleep_ptr(&i40e_pm_ops),
.shutdown = i40e_shutdown,
.err_handler = &i40e_err_handler,
.sriov_configure = i40e_pci_sriov_configure,
@@ -16641,7 +16679,7 @@ static int __init i40e_init_module(void)
* since we need to be able to guarantee forward progress even under
* memory pressure.
*/
- i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
+ i40e_wq = alloc_workqueue("%s", 0, 0, i40e_driver_name);
if (!i40e_wq) {
pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 605fd82f5d20..7f0936f4e05e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -734,37 +734,7 @@ int i40e_validate_nvm_checksum(struct i40e_hw *hw,
return ret_code;
}
-static int i40e_nvmupd_state_init(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static int i40e_nvmupd_state_reading(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static int i40e_nvmupd_state_writing(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno);
-static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- int *perrno);
-static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- int *perrno);
-static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static inline u8 i40e_nvmupd_get_module(u32 val)
+static u8 i40e_nvmupd_get_module(u32 val)
{
return (u8)(val & I40E_NVM_MOD_PNT_MASK);
}
@@ -799,121 +769,408 @@ static const char * const i40e_nvm_update_state_str[] = {
};
/**
- * i40e_nvmupd_command - Process an NVM update command
+ * i40e_nvmupd_validate_command - Validate given command
* @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command
- * @bytes: pointer to the data buffer
+ * @cmd: pointer to nvm update command buffer
* @perrno: pointer to return error code
*
- * Dispatches command depending on what update state is current
+ * Return one of the valid command types or I40E_NVMUPD_INVALID
**/
-int i40e_nvmupd_command(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static enum i40e_nvmupd_cmd
+i40e_nvmupd_validate_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd,
+ int *perrno)
{
enum i40e_nvmupd_cmd upd_cmd;
- int status;
-
- /* assume success */
- *perrno = 0;
+ u8 module, transaction;
- /* early check for status command and debug msgs */
- upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+ /* anything that doesn't match a recognized case is an error */
+ upd_cmd = I40E_NVMUPD_INVALID;
- i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
- i40e_nvm_update_state_str[upd_cmd],
- hw->nvmupd_state,
- hw->nvm_release_on_done, hw->nvm_wait_opcode,
- cmd->command, cmd->config, cmd->offset, cmd->data_size);
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
- if (upd_cmd == I40E_NVMUPD_INVALID) {
- *perrno = -EFAULT;
+ /* limits on data size */
+ if (cmd->data_size < 1 || cmd->data_size > I40E_NVMUPD_MAX_DATA) {
i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_validate_command returns %d errno %d\n",
- upd_cmd, *perrno);
+ "%s data_size %d\n", __func__, cmd->data_size);
+ *perrno = -EFAULT;
+ return I40E_NVMUPD_INVALID;
}
- /* a status request returns immediately rather than
- * going into the state machine
- */
- if (upd_cmd == I40E_NVMUPD_STATUS) {
- if (!cmd->data_size) {
- *perrno = -EFAULT;
- return -EINVAL;
+ switch (cmd->command) {
+ case I40E_NVM_READ:
+ switch (transaction) {
+ case I40E_NVM_CON:
+ upd_cmd = I40E_NVMUPD_READ_CON;
+ break;
+ case I40E_NVM_SNT:
+ upd_cmd = I40E_NVMUPD_READ_SNT;
+ break;
+ case I40E_NVM_LCB:
+ upd_cmd = I40E_NVMUPD_READ_LCB;
+ break;
+ case I40E_NVM_SA:
+ upd_cmd = I40E_NVMUPD_READ_SA;
+ break;
+ case I40E_NVM_EXEC:
+ if (module == 0xf)
+ upd_cmd = I40E_NVMUPD_STATUS;
+ else if (module == 0)
+ upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
+ break;
+ case I40E_NVM_AQE:
+ upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
+ break;
}
+ break;
- bytes[0] = hw->nvmupd_state;
-
- if (cmd->data_size >= 4) {
- bytes[1] = 0;
- *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
+ case I40E_NVM_WRITE:
+ switch (transaction) {
+ case I40E_NVM_CON:
+ upd_cmd = I40E_NVMUPD_WRITE_CON;
+ break;
+ case I40E_NVM_SNT:
+ upd_cmd = I40E_NVMUPD_WRITE_SNT;
+ break;
+ case I40E_NVM_LCB:
+ upd_cmd = I40E_NVMUPD_WRITE_LCB;
+ break;
+ case I40E_NVM_SA:
+ upd_cmd = I40E_NVMUPD_WRITE_SA;
+ break;
+ case I40E_NVM_ERA:
+ upd_cmd = I40E_NVMUPD_WRITE_ERA;
+ break;
+ case I40E_NVM_CSUM:
+ upd_cmd = I40E_NVMUPD_CSUM_CON;
+ break;
+ case (I40E_NVM_CSUM | I40E_NVM_SA):
+ upd_cmd = I40E_NVMUPD_CSUM_SA;
+ break;
+ case (I40E_NVM_CSUM | I40E_NVM_LCB):
+ upd_cmd = I40E_NVMUPD_CSUM_LCB;
+ break;
+ case I40E_NVM_EXEC:
+ if (module == 0)
+ upd_cmd = I40E_NVMUPD_EXEC_AQ;
+ break;
}
+ break;
+ }
- /* Clear error status on read */
- if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ return upd_cmd;
+}
- return 0;
+/**
+ * i40e_nvmupd_nvm_erase - Erase an NVM module
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @perrno: pointer to return error code
+ *
+ * module, offset, data_size and data are in cmd structure
+ **/
+static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ u8 module, transaction;
+ int status = 0;
+ bool last;
+
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+ last = (transaction & I40E_NVM_LCB);
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
+ last, &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s mod 0x%x off 0x%x len 0x%x\n",
+ __func__, module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s status %d aq %d\n",
+ __func__, status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
}
- /* Clear status even it is not read and log */
- if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
+ return status;
+}
+
+/**
+ * i40e_nvmupd_nvm_write - Write NVM
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * module, offset, data_size and data are in cmd structure
+ **/
+static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ u8 module, transaction;
+ u8 preservation_flags;
+ int status = 0;
+ bool last;
+
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+ last = (transaction & I40E_NVM_LCB);
+ preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ status = i40e_aq_update_nvm(hw, module, cmd->offset,
+ (u16)cmd->data_size, bytes, last,
+ preservation_flags, &cmd_details);
+ if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
- "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ "%s mod 0x%x off 0x%x len 0x%x\n",
+ __func__, module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s status %d aq %d\n",
+ __func__, status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
}
- /* Acquire lock to prevent race condition where adminq_task
- * can execute after i40e_nvmupd_nvm_read/write but before state
- * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
- *
- * During NVMUpdate, it is observed that lock could be held for
- * ~5ms for most commands. However lock is held for ~60ms for
- * NVMUPD_CSUM_LCB command.
- */
- mutex_lock(&hw->aq.arq_mutex);
- switch (hw->nvmupd_state) {
- case I40E_NVMUPD_STATE_INIT:
- status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
- break;
+ return status;
+}
- case I40E_NVMUPD_STATE_READING:
- status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
- break;
+/**
+ * i40e_nvmupd_nvm_read - Read NVM
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ u8 module, transaction;
+ int status;
+ bool last;
- case I40E_NVMUPD_STATE_WRITING:
- status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
- break;
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+ last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
- case I40E_NVMUPD_STATE_INIT_WAIT:
- case I40E_NVMUPD_STATE_WRITE_WAIT:
- /* if we need to stop waiting for an event, clear
- * the wait info and return before doing anything else
- */
- if (cmd->offset == 0xffff) {
- i40e_nvmupd_clear_wait_state(hw);
- status = 0;
- break;
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
+ bytes, last, &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s mod 0x%x off 0x%x len 0x%x\n",
+ __func__, module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s status %d aq %d\n",
+ __func__, status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_nvmupd_exec_aq - Run an AQ command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ struct i40e_aq_desc *aq_desc;
+ u32 buff_size = 0;
+ u8 *buff = NULL;
+ u32 aq_desc_len;
+ u32 aq_data_len;
+ int status;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+ if (cmd->offset == 0xffff)
+ return 0;
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ memset(&hw->nvm_wb_desc, 0, aq_desc_len);
+
+ /* get the aq descriptor */
+ if (cmd->data_size < aq_desc_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
+ cmd->data_size, aq_desc_len);
+ *perrno = -EINVAL;
+ return -EINVAL;
+ }
+ aq_desc = (struct i40e_aq_desc *)bytes;
+
+ /* if data buffer needed, make sure it's ready */
+ aq_data_len = cmd->data_size - aq_desc_len;
+ buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen));
+ if (buff_size) {
+ if (!hw->nvm_buff.va) {
+ status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
+ hw->aq.asq_buf_size);
+ if (status)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
+ status);
}
- status = -EBUSY;
- *perrno = -EBUSY;
- break;
+ if (hw->nvm_buff.va) {
+ buff = hw->nvm_buff.va;
+ memcpy(buff, &bytes[aq_desc_len], aq_data_len);
+ }
+ }
- default:
- /* invalid state, should never happen */
+ if (cmd->offset)
+ memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
+
+ /* and away we go! */
+ status = i40e_asq_send_command(hw, aq_desc, buff,
+ buff_size, &cmd_details);
+ if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
- "NVMUPD: no such state %d\n", hw->nvmupd_state);
- status = -EOPNOTSUPP;
- *perrno = -ESRCH;
- break;
+ "%s err %pe aq_err %s\n",
+ __func__, ERR_PTR(status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ return status;
+ }
+
+ /* should we wait for a followup event? */
+ if (cmd->offset) {
+ hw->nvm_wait_opcode = cmd->offset;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
- mutex_unlock(&hw->aq.arq_mutex);
return status;
}
/**
+ * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ u32 aq_total_len;
+ u32 aq_desc_len;
+ int remainder;
+ u8 *buff;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
+
+ /* check offset range */
+ if (cmd->offset > aq_total_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
+ __func__, cmd->offset, aq_total_len);
+ *perrno = -EINVAL;
+ return -EINVAL;
+ }
+
+ /* check copylength range */
+ if (cmd->data_size > (aq_total_len - cmd->offset)) {
+ int new_len = aq_total_len - cmd->offset;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
+ __func__, cmd->data_size, new_len);
+ cmd->data_size = new_len;
+ }
+
+ remainder = cmd->data_size;
+ if (cmd->offset < aq_desc_len) {
+ u32 len = aq_desc_len - cmd->offset;
+
+ len = min(len, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
+ __func__, cmd->offset, cmd->offset + len);
+
+ buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
+ memcpy(bytes, buff, len);
+
+ bytes += len;
+ remainder -= len;
+ buff = hw->nvm_buff.va;
+ } else {
+ buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
+ }
+
+ if (remainder > 0) {
+ int start_byte = buff - (u8 *)hw->nvm_buff.va;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
+ __func__, start_byte, start_byte + remainder);
+ memcpy(bytes, buff, remainder);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ u32 aq_total_len;
+ u32 aq_desc_len;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen);
+
+ /* check copylength range */
+ if (cmd->data_size > aq_total_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s: copy length %d too big, trimming to %d\n",
+ __func__, cmd->data_size, aq_total_len);
+ cmd->data_size = aq_total_len;
+ }
+
+ memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size);
+
+ return 0;
+}
+
+/**
* i40e_nvmupd_state_init - Handle NVM update state Init
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
@@ -937,7 +1194,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (status) {
*perrno = i40e_aq_rc_to_posix(status,
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
i40e_release_nvm(hw);
@@ -948,7 +1205,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (status) {
*perrno = i40e_aq_rc_to_posix(status,
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
if (status)
@@ -962,7 +1219,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
*perrno = i40e_aq_rc_to_posix(status,
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
if (status) {
@@ -979,7 +1236,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
*perrno = i40e_aq_rc_to_posix(status,
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
if (status) {
@@ -996,7 +1253,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
*perrno = i40e_aq_rc_to_posix(status,
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
if (status) {
@@ -1012,7 +1269,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
*perrno = i40e_aq_rc_to_posix(status,
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
} else {
status = i40e_update_nvm_checksum(hw);
if (status) {
@@ -1185,7 +1442,7 @@ retry:
* so here we try to reacquire the semaphore then retry the write.
* We only do one retry, then give up.
*/
- if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
+ if (status && hw->aq.asq_last_status == I40E_AQ_RC_EBUSY &&
!retry_attempt) {
u32 old_asq_status = hw->aq.asq_last_status;
int old_status = status;
@@ -1215,457 +1472,168 @@ retry:
}
/**
- * i40e_nvmupd_clear_wait_state - clear wait state on hw
- * @hw: pointer to the hardware structure
- **/
-void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
-{
- i40e_debug(hw, I40E_DEBUG_NVM,
- "NVMUPD: clearing wait on opcode 0x%04x\n",
- hw->nvm_wait_opcode);
-
- if (hw->nvm_release_on_done) {
- i40e_release_nvm(hw);
- hw->nvm_release_on_done = false;
- }
- hw->nvm_wait_opcode = 0;
-
- if (hw->aq.arq_last_status) {
- hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
- return;
- }
-
- switch (hw->nvmupd_state) {
- case I40E_NVMUPD_STATE_INIT_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- break;
-
- case I40E_NVMUPD_STATE_WRITE_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
- break;
-
- default:
- break;
- }
-}
-
-/**
- * i40e_nvmupd_check_wait_event - handle NVM update operation events
- * @hw: pointer to the hardware structure
- * @opcode: the event that just happened
- * @desc: AdminQ descriptor
- **/
-void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
- struct i40e_aq_desc *desc)
-{
- u32 aq_desc_len = sizeof(struct i40e_aq_desc);
-
- if (opcode == hw->nvm_wait_opcode) {
- memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len);
- i40e_nvmupd_clear_wait_state(hw);
- }
-}
-
-/**
- * i40e_nvmupd_validate_command - Validate given command
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @perrno: pointer to return error code
- *
- * Return one of the valid command types or I40E_NVMUPD_INVALID
- **/
-static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- int *perrno)
-{
- enum i40e_nvmupd_cmd upd_cmd;
- u8 module, transaction;
-
- /* anything that doesn't match a recognized case is an error */
- upd_cmd = I40E_NVMUPD_INVALID;
-
- transaction = i40e_nvmupd_get_transaction(cmd->config);
- module = i40e_nvmupd_get_module(cmd->config);
-
- /* limits on data size */
- if ((cmd->data_size < 1) ||
- (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_validate_command data_size %d\n",
- cmd->data_size);
- *perrno = -EFAULT;
- return I40E_NVMUPD_INVALID;
- }
-
- switch (cmd->command) {
- case I40E_NVM_READ:
- switch (transaction) {
- case I40E_NVM_CON:
- upd_cmd = I40E_NVMUPD_READ_CON;
- break;
- case I40E_NVM_SNT:
- upd_cmd = I40E_NVMUPD_READ_SNT;
- break;
- case I40E_NVM_LCB:
- upd_cmd = I40E_NVMUPD_READ_LCB;
- break;
- case I40E_NVM_SA:
- upd_cmd = I40E_NVMUPD_READ_SA;
- break;
- case I40E_NVM_EXEC:
- if (module == 0xf)
- upd_cmd = I40E_NVMUPD_STATUS;
- else if (module == 0)
- upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
- break;
- case I40E_NVM_AQE:
- upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
- break;
- }
- break;
-
- case I40E_NVM_WRITE:
- switch (transaction) {
- case I40E_NVM_CON:
- upd_cmd = I40E_NVMUPD_WRITE_CON;
- break;
- case I40E_NVM_SNT:
- upd_cmd = I40E_NVMUPD_WRITE_SNT;
- break;
- case I40E_NVM_LCB:
- upd_cmd = I40E_NVMUPD_WRITE_LCB;
- break;
- case I40E_NVM_SA:
- upd_cmd = I40E_NVMUPD_WRITE_SA;
- break;
- case I40E_NVM_ERA:
- upd_cmd = I40E_NVMUPD_WRITE_ERA;
- break;
- case I40E_NVM_CSUM:
- upd_cmd = I40E_NVMUPD_CSUM_CON;
- break;
- case (I40E_NVM_CSUM|I40E_NVM_SA):
- upd_cmd = I40E_NVMUPD_CSUM_SA;
- break;
- case (I40E_NVM_CSUM|I40E_NVM_LCB):
- upd_cmd = I40E_NVMUPD_CSUM_LCB;
- break;
- case I40E_NVM_EXEC:
- if (module == 0)
- upd_cmd = I40E_NVMUPD_EXEC_AQ;
- break;
- }
- break;
- }
-
- return upd_cmd;
-}
-
-/**
- * i40e_nvmupd_exec_aq - Run an AQ command
+ * i40e_nvmupd_command - Process an NVM update command
* @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
+ * @cmd: pointer to nvm update command
* @bytes: pointer to the data buffer
* @perrno: pointer to return error code
*
- * cmd structure contains identifiers and data buffer
+ * Dispatches command depending on what update state is current
**/
-static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+int i40e_nvmupd_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
- struct i40e_asq_cmd_details cmd_details;
- struct i40e_aq_desc *aq_desc;
- u32 buff_size = 0;
- u8 *buff = NULL;
- u32 aq_desc_len;
- u32 aq_data_len;
+ enum i40e_nvmupd_cmd upd_cmd;
int status;
- i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
- if (cmd->offset == 0xffff)
- return 0;
+ /* assume success */
+ *perrno = 0;
- memset(&cmd_details, 0, sizeof(cmd_details));
- cmd_details.wb_desc = &hw->nvm_wb_desc;
+ /* early check for status command and debug msgs */
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
- aq_desc_len = sizeof(struct i40e_aq_desc);
- memset(&hw->nvm_wb_desc, 0, aq_desc_len);
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
+ i40e_nvm_update_state_str[upd_cmd],
+ hw->nvmupd_state,
+ hw->nvm_release_on_done, hw->nvm_wait_opcode,
+ cmd->command, cmd->config, cmd->offset, cmd->data_size);
- /* get the aq descriptor */
- if (cmd->data_size < aq_desc_len) {
+ if (upd_cmd == I40E_NVMUPD_INVALID) {
+ *perrno = -EFAULT;
i40e_debug(hw, I40E_DEBUG_NVM,
- "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
- cmd->data_size, aq_desc_len);
- *perrno = -EINVAL;
- return -EINVAL;
+ "i40e_nvmupd_validate_command returns %d errno %d\n",
+ upd_cmd, *perrno);
}
- aq_desc = (struct i40e_aq_desc *)bytes;
- /* if data buffer needed, make sure it's ready */
- aq_data_len = cmd->data_size - aq_desc_len;
- buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen));
- if (buff_size) {
- if (!hw->nvm_buff.va) {
- status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
- hw->aq.asq_buf_size);
- if (status)
- i40e_debug(hw, I40E_DEBUG_NVM,
- "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
- status);
- }
-
- if (hw->nvm_buff.va) {
- buff = hw->nvm_buff.va;
- memcpy(buff, &bytes[aq_desc_len], aq_data_len);
+ /* a status request returns immediately rather than
+ * going into the state machine
+ */
+ if (upd_cmd == I40E_NVMUPD_STATUS) {
+ if (!cmd->data_size) {
+ *perrno = -EFAULT;
+ return -EINVAL;
}
- }
-
- if (cmd->offset)
- memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
-
- /* and away we go! */
- status = i40e_asq_send_command(hw, aq_desc, buff,
- buff_size, &cmd_details);
- if (status) {
- i40e_debug(hw, I40E_DEBUG_NVM,
- "%s err %pe aq_err %s\n",
- __func__, ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
- *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
- return status;
- }
-
- /* should we wait for a followup event? */
- if (cmd->offset) {
- hw->nvm_wait_opcode = cmd->offset;
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
- }
-
- return status;
-}
-/**
- * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @bytes: pointer to the data buffer
- * @perrno: pointer to return error code
- *
- * cmd structure contains identifiers and data buffer
- **/
-static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
-{
- u32 aq_total_len;
- u32 aq_desc_len;
- int remainder;
- u8 *buff;
-
- i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
-
- aq_desc_len = sizeof(struct i40e_aq_desc);
- aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
+ bytes[0] = hw->nvmupd_state;
- /* check offset range */
- if (cmd->offset > aq_total_len) {
- i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
- __func__, cmd->offset, aq_total_len);
- *perrno = -EINVAL;
- return -EINVAL;
- }
+ if (cmd->data_size >= 4) {
+ bytes[1] = 0;
+ *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
+ }
- /* check copylength range */
- if (cmd->data_size > (aq_total_len - cmd->offset)) {
- int new_len = aq_total_len - cmd->offset;
+ /* Clear error status on read */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
- __func__, cmd->data_size, new_len);
- cmd->data_size = new_len;
+ return 0;
}
- remainder = cmd->data_size;
- if (cmd->offset < aq_desc_len) {
- u32 len = aq_desc_len - cmd->offset;
-
- len = min(len, cmd->data_size);
- i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
- __func__, cmd->offset, cmd->offset + len);
-
- buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
- memcpy(bytes, buff, len);
-
- bytes += len;
- remainder -= len;
- buff = hw->nvm_buff.va;
- } else {
- buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
+ /* Clear status even it is not read and log */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
}
- if (remainder > 0) {
- int start_byte = buff - (u8 *)hw->nvm_buff.va;
-
- i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
- __func__, start_byte, start_byte + remainder);
- memcpy(bytes, buff, remainder);
- }
+ /* Acquire lock to prevent race condition where adminq_task
+ * can execute after i40e_nvmupd_nvm_read/write but before state
+ * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
+ *
+ * During NVMUpdate, it is observed that lock could be held for
+ * ~5ms for most commands. However lock is held for ~60ms for
+ * NVMUPD_CSUM_LCB command.
+ */
+ mutex_lock(&hw->aq.arq_mutex);
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT:
+ status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
+ break;
- return 0;
-}
+ case I40E_NVMUPD_STATE_READING:
+ status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
+ break;
-/**
- * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @bytes: pointer to the data buffer
- * @perrno: pointer to return error code
- *
- * cmd structure contains identifiers and data buffer
- **/
-static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
-{
- u32 aq_total_len;
- u32 aq_desc_len;
+ case I40E_NVMUPD_STATE_WRITING:
+ status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
+ break;
- i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ /* if we need to stop waiting for an event, clear
+ * the wait info and return before doing anything else
+ */
+ if (cmd->offset == 0xffff) {
+ i40e_nvmupd_clear_wait_state(hw);
+ status = 0;
+ break;
+ }
- aq_desc_len = sizeof(struct i40e_aq_desc);
- aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen);
+ status = -EBUSY;
+ *perrno = -EBUSY;
+ break;
- /* check copylength range */
- if (cmd->data_size > aq_total_len) {
+ default:
+ /* invalid state, should never happen */
i40e_debug(hw, I40E_DEBUG_NVM,
- "%s: copy length %d too big, trimming to %d\n",
- __func__, cmd->data_size, aq_total_len);
- cmd->data_size = aq_total_len;
+ "NVMUPD: no such state %d\n", hw->nvmupd_state);
+ status = -EOPNOTSUPP;
+ *perrno = -ESRCH;
+ break;
}
- memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size);
-
- return 0;
+ mutex_unlock(&hw->aq.arq_mutex);
+ return status;
}
/**
- * i40e_nvmupd_nvm_read - Read NVM
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @bytes: pointer to the data buffer
- * @perrno: pointer to return error code
- *
- * cmd structure contains identifiers and data buffer
+ * i40e_nvmupd_clear_wait_state - clear wait state on hw
+ * @hw: pointer to the hardware structure
**/
-static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
{
- struct i40e_asq_cmd_details cmd_details;
- u8 module, transaction;
- int status;
- bool last;
-
- transaction = i40e_nvmupd_get_transaction(cmd->config);
- module = i40e_nvmupd_get_module(cmd->config);
- last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
-
- memset(&cmd_details, 0, sizeof(cmd_details));
- cmd_details.wb_desc = &hw->nvm_wb_desc;
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: clearing wait on opcode 0x%04x\n",
+ hw->nvm_wait_opcode);
- status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
- bytes, last, &cmd_details);
- if (status) {
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
- module, cmd->offset, cmd->data_size);
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_nvm_read status %d aq %d\n",
- status, hw->aq.asq_last_status);
- *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ if (hw->nvm_release_on_done) {
+ i40e_release_nvm(hw);
+ hw->nvm_release_on_done = false;
}
+ hw->nvm_wait_opcode = 0;
- return status;
-}
-
-/**
- * i40e_nvmupd_nvm_erase - Erase an NVM module
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @perrno: pointer to return error code
- *
- * module, offset, data_size and data are in cmd structure
- **/
-static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- int *perrno)
-{
- struct i40e_asq_cmd_details cmd_details;
- u8 module, transaction;
- int status = 0;
- bool last;
+ if (hw->aq.arq_last_status) {
+ hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
+ return;
+ }
- transaction = i40e_nvmupd_get_transaction(cmd->config);
- module = i40e_nvmupd_get_module(cmd->config);
- last = (transaction & I40E_NVM_LCB);
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
- memset(&cmd_details, 0, sizeof(cmd_details));
- cmd_details.wb_desc = &hw->nvm_wb_desc;
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ break;
- status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
- last, &cmd_details);
- if (status) {
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
- module, cmd->offset, cmd->data_size);
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_nvm_erase status %d aq %d\n",
- status, hw->aq.asq_last_status);
- *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ default:
+ break;
}
-
- return status;
}
/**
- * i40e_nvmupd_nvm_write - Write NVM
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @bytes: pointer to the data buffer
- * @perrno: pointer to return error code
- *
- * module, offset, data_size and data are in cmd structure
+ * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * @hw: pointer to the hardware structure
+ * @opcode: the event that just happened
+ * @desc: AdminQ descriptor
**/
-static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+ struct i40e_aq_desc *desc)
{
- struct i40e_asq_cmd_details cmd_details;
- u8 module, transaction;
- u8 preservation_flags;
- int status = 0;
- bool last;
-
- transaction = i40e_nvmupd_get_transaction(cmd->config);
- module = i40e_nvmupd_get_module(cmd->config);
- last = (transaction & I40E_NVM_LCB);
- preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
-
- memset(&cmd_details, 0, sizeof(cmd_details));
- cmd_details.wb_desc = &hw->nvm_wb_desc;
+ u32 aq_desc_len = sizeof(struct i40e_aq_desc);
- status = i40e_aq_update_nvm(hw, module, cmd->offset,
- (u16)cmd->data_size, bytes, last,
- preservation_flags, &cmd_details);
- if (status) {
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
- module, cmd->offset, cmd->data_size);
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_nvm_write status %d aq %d\n",
- status, hw->aq.asq_last_status);
- *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ if (opcode == hw->nvm_wait_opcode) {
+ memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len);
+ i40e_nvmupd_clear_wait_state(hw);
}
-
- return status;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index ce1f11b8ad65..5a0699ca7ce5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -371,13 +371,6 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
int i40e_set_mac_type(struct i40e_hw *hw);
-extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
-
-static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
-{
- return i40e_ptype_lookup[ptype];
-}
-
/**
* i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition
* @link_speed: the speed to convert
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index e7ebcb09f23c..b72a4b5d76b9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -1472,7 +1472,8 @@ void i40e_ptp_restore_hw_time(struct i40e_pf *pf)
**/
void i40e_ptp_init(struct i40e_pf *pf)
{
- struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
+ struct net_device *netdev = vsi->netdev;
struct i40e_hw *hw = &pf->hw;
u32 pf_id;
long err;
@@ -1536,6 +1537,7 @@ void i40e_ptp_init(struct i40e_pf *pf)
**/
void i40e_ptp_stop(struct i40e_pf *pf)
{
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
struct i40e_hw *hw = &pf->hw;
u32 regval;
@@ -1555,7 +1557,7 @@ void i40e_ptp_stop(struct i40e_pf *pf)
ptp_clock_unregister(pf->ptp_clock);
pf->ptp_clock = NULL;
dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__,
- pf->vsi[pf->lan_vsi]->netdev->name);
+ main_vsi->netdev->name);
}
if (i40e_is_ptp_pin_dev(&pf->hw)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 14ab642cafdb..432afbb64201 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -333,8 +333,11 @@
#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */
#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 0d7177083708..c006f716a3bd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/bpf_trace.h>
+#include <linux/net/intel/libie/rx.h>
#include <linux/prefetch.h>
#include <linux/sctp.h>
#include <net/mpls.h>
@@ -23,7 +24,7 @@ static void i40e_fdir(struct i40e_ring *tx_ring,
{
struct i40e_filter_program_desc *fdir_desc;
struct i40e_pf *pf = tx_ring->vsi->back;
- u32 flex_ptype, dtype_cmd;
+ u32 flex_ptype, dtype_cmd, vsi_id;
u16 i;
/* grab the next descriptor */
@@ -41,8 +42,8 @@ static void i40e_fdir(struct i40e_ring *tx_ring,
flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_PCTYPE_MASK, fdata->pctype);
/* Use LAN VSI Id if not programmed by user */
- flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_DEST_VSI_MASK,
- fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id);
+ vsi_id = fdata->dest_vsi ? : i40e_pf_get_main_vsi(pf)->id;
+ flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_DEST_VSI_MASK, vsi_id);
dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
@@ -860,13 +861,15 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
/**
* i40e_detect_recover_hung - Function to detect and recover hung_queues
- * @vsi: pointer to vsi struct with tx queues
+ * @pf: pointer to PF struct
*
- * VSI has netdev and netdev has TX queues. This function is to check each of
- * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
+ * LAN VSI has netdev and netdev has TX queues. This function is to check
+ * each of those TX queues if they are hung, trigger recovery by issuing
+ * SW interrupt.
**/
-void i40e_detect_recover_hung(struct i40e_vsi *vsi)
+void i40e_detect_recover_hung(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_ring *tx_ring = NULL;
struct net_device *netdev;
unsigned int i;
@@ -1741,38 +1744,30 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb,
union i40e_rx_desc *rx_desc)
{
- struct i40e_rx_ptype_decoded decoded;
+ struct libeth_rx_pt decoded;
u32 rx_error, rx_status;
bool ipv4, ipv6;
u8 ptype;
u64 qword;
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword);
- rx_error = FIELD_GET(I40E_RXD_QW1_ERROR_MASK, qword);
- rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword);
- decoded = decode_rx_desc_ptype(ptype);
-
skb->ip_summed = CHECKSUM_NONE;
- skb_checksum_none_assert(skb);
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword);
- /* Rx csum enabled and ip headers found? */
- if (!(vsi->netdev->features & NETIF_F_RXCSUM))
+ decoded = libie_rx_pt_parse(ptype);
+ if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded))
return;
+ rx_error = FIELD_GET(I40E_RXD_QW1_ERROR_MASK, qword);
+ rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword);
+
/* did the hardware decode the packet and checksum? */
if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
- /* both known and outer_ip must be set for the below code to work */
- if (!(decoded.known && decoded.outer_ip))
- return;
-
- ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
- ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
if (ipv4 &&
(rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
@@ -1800,20 +1795,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
* we need to bump the checksum level by 1 to reflect the fact that
* we are indicating we validated the inner checksum.
*/
- if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
+ if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT)
skb->csum_level = 1;
- /* Only report checksum unnecessary for TCP, UDP, or SCTP */
- switch (decoded.inner_prot) {
- case I40E_RX_PTYPE_INNER_PROT_TCP:
- case I40E_RX_PTYPE_INNER_PROT_UDP:
- case I40E_RX_PTYPE_INNER_PROT_SCTP:
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- fallthrough;
- default:
- break;
- }
-
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
return;
checksum_fail:
@@ -1821,29 +1806,6 @@ checksum_fail:
}
/**
- * i40e_ptype_to_htype - get a hash type
- * @ptype: the ptype value from the descriptor
- *
- * Returns a hash type to be used by skb_set_hash
- **/
-static inline int i40e_ptype_to_htype(u8 ptype)
-{
- struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
-
- if (!decoded.known)
- return PKT_HASH_TYPE_NONE;
-
- if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
- decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
- return PKT_HASH_TYPE_L4;
- else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
- decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
- return PKT_HASH_TYPE_L3;
- else
- return PKT_HASH_TYPE_L2;
-}
-
-/**
* i40e_rx_hash - set the hash value in the skb
* @ring: descriptor ring
* @rx_desc: specific descriptor
@@ -1855,17 +1817,19 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
struct sk_buff *skb,
u8 rx_ptype)
{
+ struct libeth_rx_pt decoded;
u32 hash;
const __le64 rss_mask =
cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
- if (!(ring->netdev->features & NETIF_F_RXHASH))
+ decoded = libie_rx_pt_parse(rx_ptype);
+ if (!libeth_rx_pt_has_hash(ring->netdev, decoded))
return;
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
- skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
+ libeth_rx_pt_set_hash(skb, hash, decoded);
}
}
@@ -2144,9 +2108,7 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
*/
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- I40E_RX_HDR_SIZE,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, I40E_RX_HDR_SIZE);
if (unlikely(!skb))
return NULL;
@@ -2630,7 +2592,22 @@ process_next:
return failure ? budget : (int)total_rx_packets;
}
-static inline u32 i40e_buildreg_itr(const int type, u16 itr)
+/**
+ * i40e_buildreg_itr - build a value for writing to I40E_PFINT_DYN_CTLN register
+ * @itr_idx: interrupt throttling index
+ * @interval: interrupt throttling interval value in usecs
+ * @force_swint: force software interrupt
+ *
+ * The function builds a value for I40E_PFINT_DYN_CTLN register that
+ * is used to update interrupt throttling interval for specified ITR index
+ * and optionally enforces a software interrupt. If the @itr_idx is equal
+ * to I40E_ITR_NONE then no interval change is applied and only @force_swint
+ * parameter is taken into account. If the interval change and enforced
+ * software interrupt are not requested then the built value just enables
+ * appropriate vector interrupt.
+ **/
+static u32 i40e_buildreg_itr(enum i40e_dyn_idx itr_idx, u16 interval,
+ bool force_swint)
{
u32 val;
@@ -2644,23 +2621,33 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr)
* an event in the PBA anyway so we need to rely on the automask
* to hold pending events for us until the interrupt is re-enabled
*
- * The itr value is reported in microseconds, and the register
- * value is recorded in 2 microsecond units. For this reason we
- * only need to shift by the interval shift - 1 instead of the
- * full value.
+ * We have to shift the given value as it is reported in microseconds
+ * and the register value is recorded in 2 microsecond units.
*/
- itr &= I40E_ITR_MASK;
+ interval >>= 1;
+ /* 1. Enable vector interrupt
+ * 2. Update the interval for the specified ITR index
+ * (I40E_ITR_NONE in the register is used to indicate that
+ * no interval update is requested)
+ */
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
- (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
+ FIELD_PREP(I40E_PFINT_DYN_CTLN_ITR_INDX_MASK, itr_idx) |
+ FIELD_PREP(I40E_PFINT_DYN_CTLN_INTERVAL_MASK, interval);
+
+ /* 3. Enforce software interrupt trigger if requested
+ * (These software interrupts rate is limited by ITR2 that is
+ * set to 20K interrupts per second)
+ */
+ if (force_swint)
+ val |= I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
+ FIELD_PREP(I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK,
+ I40E_SW_ITR);
return val;
}
-/* a small macro to shorten up some long lines */
-#define INTREG I40E_PFINT_DYN_CTLN
-
/* The act of updating the ITR will cause it to immediately trigger. In order
* to prevent this from throwing off adaptive update statistics we defer the
* update so that it can only happen so often. So after either Tx or Rx are
@@ -2679,8 +2666,10 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr)
static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
struct i40e_q_vector *q_vector)
{
+ enum i40e_dyn_idx itr_idx = I40E_ITR_NONE;
struct i40e_hw *hw = &vsi->back->hw;
- u32 intval;
+ u16 interval = 0;
+ u32 itr_val;
/* If we don't have MSIX, then we only need to re-enable icr0 */
if (!test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) {
@@ -2702,8 +2691,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
/* Rx ITR needs to be reduced, this is highest priority */
- intval = i40e_buildreg_itr(I40E_RX_ITR,
- q_vector->rx.target_itr);
+ itr_idx = I40E_RX_ITR;
+ interval = q_vector->rx.target_itr;
q_vector->rx.current_itr = q_vector->rx.target_itr;
q_vector->itr_countdown = ITR_COUNTDOWN_START;
} else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
@@ -2712,25 +2701,36 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
/* Tx ITR needs to be reduced, this is second priority
* Tx ITR needs to be increased more than Rx, fourth priority
*/
- intval = i40e_buildreg_itr(I40E_TX_ITR,
- q_vector->tx.target_itr);
+ itr_idx = I40E_TX_ITR;
+ interval = q_vector->tx.target_itr;
q_vector->tx.current_itr = q_vector->tx.target_itr;
q_vector->itr_countdown = ITR_COUNTDOWN_START;
} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
/* Rx ITR needs to be increased, third priority */
- intval = i40e_buildreg_itr(I40E_RX_ITR,
- q_vector->rx.target_itr);
+ itr_idx = I40E_RX_ITR;
+ interval = q_vector->rx.target_itr;
q_vector->rx.current_itr = q_vector->rx.target_itr;
q_vector->itr_countdown = ITR_COUNTDOWN_START;
} else {
/* No ITR update, lowest priority */
- intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
if (q_vector->itr_countdown)
q_vector->itr_countdown--;
}
- if (!test_bit(__I40E_VSI_DOWN, vsi->state))
- wr32(hw, INTREG(q_vector->reg_idx), intval);
+ /* Do not update interrupt control register if VSI is down */
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ return;
+
+ /* Update ITR interval if necessary and enforce software interrupt
+ * if we are exiting busy poll.
+ */
+ if (q_vector->in_busy_poll) {
+ itr_val = i40e_buildreg_itr(itr_idx, interval, true);
+ q_vector->in_busy_poll = false;
+ } else {
+ itr_val = i40e_buildreg_itr(itr_idx, interval, false);
+ }
+ wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->reg_idx), itr_val);
}
/**
@@ -2845,6 +2845,8 @@ tx_only:
*/
if (likely(napi_complete_done(napi, work_done)))
i40e_update_enable_itr(vsi, q_vector);
+ else
+ q_vector->in_busy_poll = true;
return min(work_done, budget - 1);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index abf15067eb5d..7c26c9a2bf65 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -68,6 +68,7 @@ enum i40e_dyn_idx {
/* these are indexes into ITRN registers */
#define I40E_RX_ITR I40E_IDX_ITR0
#define I40E_TX_ITR I40E_IDX_ITR1
+#define I40E_SW_ITR I40E_IDX_ITR2
/* Supported RSS offloads */
#define I40E_DEFAULT_RSS_HENA ( \
@@ -469,7 +470,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring);
int i40e_napi_poll(struct napi_struct *napi, int budget);
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
-void i40e_detect_recover_hung(struct i40e_vsi *vsi);
+void i40e_detect_recover_hung(struct i40e_pf *pf);
int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);
int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index d9031499697e..28568e126850 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -745,94 +745,6 @@ enum i40e_rx_desc_error_l3l4e_fcoe_masks {
#define I40E_RXD_QW1_PTYPE_SHIFT 30
#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
-/* Packet type non-ip values */
-enum i40e_rx_l2_ptype {
- I40E_RX_PTYPE_L2_RESERVED = 0,
- I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
- I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
- I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
- I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
- I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
- I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
- I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
- I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
- I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
- I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
- I40E_RX_PTYPE_L2_ARP = 11,
- I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
- I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
- I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
- I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
- I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
- I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
- I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
- I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
- I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
- I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
-};
-
-struct i40e_rx_ptype_decoded {
- u32 known:1;
- u32 outer_ip:1;
- u32 outer_ip_ver:1;
- u32 outer_frag:1;
- u32 tunnel_type:3;
- u32 tunnel_end_prot:2;
- u32 tunnel_end_frag:1;
- u32 inner_prot:4;
- u32 payload_layer:3;
-};
-
-enum i40e_rx_ptype_outer_ip {
- I40E_RX_PTYPE_OUTER_L2 = 0,
- I40E_RX_PTYPE_OUTER_IP = 1
-};
-
-enum i40e_rx_ptype_outer_ip_ver {
- I40E_RX_PTYPE_OUTER_NONE = 0,
- I40E_RX_PTYPE_OUTER_IPV4 = 0,
- I40E_RX_PTYPE_OUTER_IPV6 = 1
-};
-
-enum i40e_rx_ptype_outer_fragmented {
- I40E_RX_PTYPE_NOT_FRAG = 0,
- I40E_RX_PTYPE_FRAG = 1
-};
-
-enum i40e_rx_ptype_tunnel_type {
- I40E_RX_PTYPE_TUNNEL_NONE = 0,
- I40E_RX_PTYPE_TUNNEL_IP_IP = 1,
- I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
- I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
- I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
-};
-
-enum i40e_rx_ptype_tunnel_end_prot {
- I40E_RX_PTYPE_TUNNEL_END_NONE = 0,
- I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1,
- I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2,
-};
-
-enum i40e_rx_ptype_inner_prot {
- I40E_RX_PTYPE_INNER_PROT_NONE = 0,
- I40E_RX_PTYPE_INNER_PROT_UDP = 1,
- I40E_RX_PTYPE_INNER_PROT_TCP = 2,
- I40E_RX_PTYPE_INNER_PROT_SCTP = 3,
- I40E_RX_PTYPE_INNER_PROT_ICMP = 4,
- I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5
-};
-
-enum i40e_rx_ptype_payload_layer {
- I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
- I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
- I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
- I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
-};
-
#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38
#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 83a34e98bdc7..662622f01e31 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -795,13 +795,13 @@ error_param:
static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
{
struct i40e_mac_filter *f = NULL;
+ struct i40e_vsi *main_vsi, *vsi;
struct i40e_pf *pf = vf->pf;
- struct i40e_vsi *vsi;
u64 max_tx_rate = 0;
int ret = 0;
- vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
- vf->vf_id);
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, main_vsi->seid, vf->vf_id);
if (!vsi) {
dev_err(&pf->pdev->dev,
@@ -1624,8 +1624,8 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
{
struct i40e_hw *hw = &pf->hw;
struct i40e_vf *vf;
- int i, v;
u32 reg;
+ int i;
/* If we don't have any VFs, then there is nothing to reset */
if (!pf->num_alloc_vfs)
@@ -1636,11 +1636,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
return false;
/* Begin reset on all VFs at once */
- for (v = 0; v < pf->num_alloc_vfs; v++) {
- vf = &pf->vf[v];
+ for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
/* If VF is being reset no need to trigger reset again */
if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
- i40e_trigger_vf_reset(&pf->vf[v], flr);
+ i40e_trigger_vf_reset(vf, flr);
}
/* HW requires some time to make sure it can flush the FIFO for a VF
@@ -1649,14 +1648,13 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
* the VFs using a simple iterator that increments once that VF has
* finished resetting.
*/
- for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
+ for (i = 0, vf = &pf->vf[0]; i < 10 && vf < &pf->vf[pf->num_alloc_vfs]; ++i) {
usleep_range(10000, 20000);
/* Check each VF in sequence, beginning with the VF to fail
* the previous check.
*/
- while (v < pf->num_alloc_vfs) {
- vf = &pf->vf[v];
+ while (vf < &pf->vf[pf->num_alloc_vfs]) {
if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
@@ -1666,7 +1664,7 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
/* If the current VF has finished resetting, move on
* to the next VF in sequence.
*/
- v++;
+ ++vf;
}
}
@@ -1676,39 +1674,39 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
/* Display a warning if at least one VF didn't manage to reset in
* time, but continue on with the operation.
*/
- if (v < pf->num_alloc_vfs)
+ if (vf < &pf->vf[pf->num_alloc_vfs])
dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
- pf->vf[v].vf_id);
+ vf->vf_id);
usleep_range(10000, 20000);
/* Begin disabling all the rings associated with VFs, but do not wait
* between each VF.
*/
- for (v = 0; v < pf->num_alloc_vfs; v++) {
+ for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
/* On initial reset, we don't have any queues to disable */
- if (pf->vf[v].lan_vsi_idx == 0)
+ if (vf->lan_vsi_idx == 0)
continue;
/* If VF is reset in another thread just continue */
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
continue;
- i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
+ i40e_vsi_stop_rings_no_wait(pf->vsi[vf->lan_vsi_idx]);
}
/* Now that we've notified HW to disable all of the VF rings, wait
* until they finish.
*/
- for (v = 0; v < pf->num_alloc_vfs; v++) {
+ for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
/* On initial reset, we don't have any queues to disable */
- if (pf->vf[v].lan_vsi_idx == 0)
+ if (vf->lan_vsi_idx == 0)
continue;
/* If VF is reset in another thread just continue */
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
continue;
- i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
+ i40e_vsi_wait_queues_disabled(pf->vsi[vf->lan_vsi_idx]);
}
/* Hw may need up to 50ms to finish disabling the RX queues. We
@@ -1717,12 +1715,12 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
mdelay(50);
/* Finish the reset on each VF */
- for (v = 0; v < pf->num_alloc_vfs; v++) {
+ for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
/* If VF is reset in another thread just continue */
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
continue;
- i40e_cleanup_reset_vf(&pf->vf[v]);
+ i40e_cleanup_reset_vf(vf);
}
i40e_flush(hw);
@@ -3139,11 +3137,12 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
/* Allow to delete VF primary MAC only if it was not set
* administratively by PF or if VF is trusted.
*/
- if (ether_addr_equal(addr, vf->default_lan_addr.addr) &&
- i40e_can_vf_change_mac(vf))
- was_unimac_deleted = true;
- else
- continue;
+ if (ether_addr_equal(addr, vf->default_lan_addr.addr)) {
+ if (i40e_can_vf_change_mac(vf))
+ was_unimac_deleted = true;
+ else
+ continue;
+ }
if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
ret = -EINVAL;
@@ -3323,8 +3322,9 @@ error_param:
static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
struct i40e_pf *pf = vf->pf;
- int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
+ struct i40e_vsi *main_vsi;
int aq_ret = 0;
+ int abs_vf_id;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
@@ -3332,8 +3332,9 @@ static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
- i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
- msg, msglen);
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
+ i40e_notify_client_of_vf_msg(main_vsi, abs_vf_id, msg, msglen);
error_param:
/* send the response to the VF */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 11500003af0d..4e885df789ef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -301,8 +301,7 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize);
if (unlikely(!skb))
goto out;
@@ -483,7 +482,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
bi = *i40e_rx_bi(rx_ring, next_to_process);
xsk_buff_set_size(bi, size);
- xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(bi);
if (!first)
first = bi;
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index db8188c7ac4b..23a6557fc3db 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -287,7 +287,7 @@ struct iavf_adapter {
#define IAVF_FLAG_RESET_PENDING BIT(4)
#define IAVF_FLAG_RESET_NEEDED BIT(5)
#define IAVF_FLAG_WB_ON_ITR_CAPABLE BIT(6)
-#define IAVF_FLAG_LEGACY_RX BIT(15)
+/* BIT(15) is free, was IAVF_FLAG_LEGACY_RX */
#define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16)
#define IAVF_FLAG_QUEUES_DISABLED BIT(17)
#define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
index 5a25233a89d5..aa751ce3425b 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
@@ -432,259 +432,6 @@ enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
return iavf_aq_get_set_rss_key(hw, vsi_id, key, true);
}
-/* The iavf_ptype_lookup table is used to convert from the 8-bit ptype in the
- * hardware to a bit-field that can be used by SW to more easily determine the
- * packet type.
- *
- * Macros are used to shorten the table lines and make this table human
- * readable.
- *
- * We store the PTYPE in the top byte of the bit field - this is just so that
- * we can check that the table doesn't have a row missing, as the index into
- * the table should be the PTYPE.
- *
- * Typical work flow:
- *
- * IF NOT iavf_ptype_lookup[ptype].known
- * THEN
- * Packet is unknown
- * ELSE IF iavf_ptype_lookup[ptype].outer_ip == IAVF_RX_PTYPE_OUTER_IP
- * Use the rest of the fields to look at the tunnels, inner protocols, etc
- * ELSE
- * Use the enum iavf_rx_l2_ptype to decode the packet type
- * ENDIF
- */
-
-/* macro to make the table lines short, use explicit indexing with [PTYPE] */
-#define IAVF_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
- [PTYPE] = { \
- 1, \
- IAVF_RX_PTYPE_OUTER_##OUTER_IP, \
- IAVF_RX_PTYPE_OUTER_##OUTER_IP_VER, \
- IAVF_RX_PTYPE_##OUTER_FRAG, \
- IAVF_RX_PTYPE_TUNNEL_##T, \
- IAVF_RX_PTYPE_TUNNEL_END_##TE, \
- IAVF_RX_PTYPE_##TEF, \
- IAVF_RX_PTYPE_INNER_PROT_##I, \
- IAVF_RX_PTYPE_PAYLOAD_LAYER_##PL }
-
-#define IAVF_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
-
-/* shorter macros makes the table fit but are terse */
-#define IAVF_RX_PTYPE_NOF IAVF_RX_PTYPE_NOT_FRAG
-#define IAVF_RX_PTYPE_FRG IAVF_RX_PTYPE_FRAG
-#define IAVF_RX_PTYPE_INNER_PROT_TS IAVF_RX_PTYPE_INNER_PROT_TIMESYNC
-
-/* Lookup table mapping the 8-bit HW PTYPE to the bit field for decoding */
-struct iavf_rx_ptype_decoded iavf_ptype_lookup[BIT(8)] = {
- /* L2 Packet types */
- IAVF_PTT_UNUSED_ENTRY(0),
- IAVF_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- IAVF_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
- IAVF_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- IAVF_PTT_UNUSED_ENTRY(4),
- IAVF_PTT_UNUSED_ENTRY(5),
- IAVF_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- IAVF_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- IAVF_PTT_UNUSED_ENTRY(8),
- IAVF_PTT_UNUSED_ENTRY(9),
- IAVF_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- IAVF_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
- IAVF_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
-
- /* Non Tunneled IPv4 */
- IAVF_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(25),
- IAVF_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
- IAVF_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
- IAVF_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv4 */
- IAVF_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(32),
- IAVF_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv6 */
- IAVF_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(39),
- IAVF_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT */
- IAVF_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> IPv4 */
- IAVF_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(47),
- IAVF_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> IPv6 */
- IAVF_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(54),
- IAVF_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC */
- IAVF_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
- IAVF_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(62),
- IAVF_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
- IAVF_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(69),
- IAVF_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC/VLAN */
- IAVF_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
- IAVF_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(77),
- IAVF_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
- IAVF_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(84),
- IAVF_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
- /* Non Tunneled IPv6 */
- IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(91),
- IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
- IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
- IAVF_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv4 */
- IAVF_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(98),
- IAVF_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv6 */
- IAVF_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(105),
- IAVF_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT */
- IAVF_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> IPv4 */
- IAVF_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(113),
- IAVF_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> IPv6 */
- IAVF_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(120),
- IAVF_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC */
- IAVF_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
- IAVF_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(128),
- IAVF_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
- IAVF_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(135),
- IAVF_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN */
- IAVF_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
- IAVF_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(143),
- IAVF_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
- IAVF_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(150),
- IAVF_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
- /* unused entries */
- [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
-};
-
/**
* iavf_aq_send_msg_to_pf
* @hw: pointer to the hardware structure
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 378c3e9ddf9d..52273f7eab2c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -240,29 +240,6 @@ static const struct iavf_stats iavf_gstrings_stats[] = {
#define IAVF_QUEUE_STATS_LEN ARRAY_SIZE(iavf_gstrings_queue_stats)
-/* For now we have one and only one private flag and it is only defined
- * when we have support for the SKIP_CPU_SYNC DMA attribute. Instead
- * of leaving all this code sitting around empty we will strip it unless
- * our one private flag is actually available.
- */
-struct iavf_priv_flags {
- char flag_string[ETH_GSTRING_LEN];
- u32 flag;
- bool read_only;
-};
-
-#define IAVF_PRIV_FLAG(_name, _flag, _read_only) { \
- .flag_string = _name, \
- .flag = _flag, \
- .read_only = _read_only, \
-}
-
-static const struct iavf_priv_flags iavf_gstrings_priv_flags[] = {
- IAVF_PRIV_FLAG("legacy-rx", IAVF_FLAG_LEGACY_RX, 0),
-};
-
-#define IAVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(iavf_gstrings_priv_flags)
-
/**
* iavf_get_link_ksettings - Get Link Speed and Duplex settings
* @netdev: network interface device structure
@@ -342,8 +319,6 @@ static int iavf_get_sset_count(struct net_device *netdev, int sset)
return IAVF_STATS_LEN +
(IAVF_QUEUE_STATS_LEN * 2 *
netdev->real_num_tx_queues);
- else if (sset == ETH_SS_PRIV_FLAGS)
- return IAVF_PRIV_FLAGS_STR_LEN;
else
return -EINVAL;
}
@@ -386,21 +361,6 @@ static void iavf_get_ethtool_stats(struct net_device *netdev,
}
/**
- * iavf_get_priv_flag_strings - Get private flag strings
- * @netdev: network interface device structure
- * @data: buffer for string data
- *
- * Builds the private flags string table
- **/
-static void iavf_get_priv_flag_strings(struct net_device *netdev, u8 *data)
-{
- unsigned int i;
-
- for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++)
- ethtool_puts(&data, iavf_gstrings_priv_flags[i].flag_string);
-}
-
-/**
* iavf_get_stat_strings - Get stat strings
* @netdev: network interface device structure
* @data: buffer for string data
@@ -438,109 +398,12 @@ static void iavf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
case ETH_SS_STATS:
iavf_get_stat_strings(netdev, data);
break;
- case ETH_SS_PRIV_FLAGS:
- iavf_get_priv_flag_strings(netdev, data);
- break;
default:
break;
}
}
/**
- * iavf_get_priv_flags - report device private flags
- * @netdev: network interface device structure
- *
- * The get string set count and the string set should be matched for each
- * flag returned. Add new strings for each flag to the iavf_gstrings_priv_flags
- * array.
- *
- * Returns a u32 bitmap of flags.
- **/
-static u32 iavf_get_priv_flags(struct net_device *netdev)
-{
- struct iavf_adapter *adapter = netdev_priv(netdev);
- u32 i, ret_flags = 0;
-
- for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
- const struct iavf_priv_flags *priv_flags;
-
- priv_flags = &iavf_gstrings_priv_flags[i];
-
- if (priv_flags->flag & adapter->flags)
- ret_flags |= BIT(i);
- }
-
- return ret_flags;
-}
-
-/**
- * iavf_set_priv_flags - set private flags
- * @netdev: network interface device structure
- * @flags: bit flags to be set
- **/
-static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
-{
- struct iavf_adapter *adapter = netdev_priv(netdev);
- u32 orig_flags, new_flags, changed_flags;
- int ret = 0;
- u32 i;
-
- orig_flags = READ_ONCE(adapter->flags);
- new_flags = orig_flags;
-
- for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
- const struct iavf_priv_flags *priv_flags;
-
- priv_flags = &iavf_gstrings_priv_flags[i];
-
- if (flags & BIT(i))
- new_flags |= priv_flags->flag;
- else
- new_flags &= ~(priv_flags->flag);
-
- if (priv_flags->read_only &&
- ((orig_flags ^ new_flags) & ~BIT(i)))
- return -EOPNOTSUPP;
- }
-
- /* Before we finalize any flag changes, any checks which we need to
- * perform to determine if the new flags will be supported should go
- * here...
- */
-
- /* Compare and exchange the new flags into place. If we failed, that
- * is if cmpxchg returns anything but the old value, this means
- * something else must have modified the flags variable since we
- * copied it. We'll just punt with an error and log something in the
- * message buffer.
- */
- if (cmpxchg(&adapter->flags, orig_flags, new_flags) != orig_flags) {
- dev_warn(&adapter->pdev->dev,
- "Unable to update adapter->flags as it was modified by another thread...\n");
- return -EAGAIN;
- }
-
- changed_flags = orig_flags ^ new_flags;
-
- /* Process any additional changes needed as a result of flag changes.
- * The changed_flags value reflects the list of bits that were changed
- * in the code above.
- */
-
- /* issue a reset to force legacy-rx change to take effect */
- if (changed_flags & IAVF_FLAG_LEGACY_RX) {
- if (netif_running(netdev)) {
- iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
- ret = iavf_wait_for_reset(adapter);
- if (ret)
- netdev_warn(netdev, "Changing private flags timeout or interrupted waiting for reset");
- }
- }
-
- return ret;
-}
-
-/**
* iavf_get_msglevel - Get debug message level
* @netdev: network interface device structure
*
@@ -585,7 +448,6 @@ static void iavf_get_drvinfo(struct net_device *netdev,
strscpy(drvinfo->driver, iavf_driver_name, 32);
strscpy(drvinfo->fw_version, "N/A", 4);
strscpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
- drvinfo->n_priv_flags = IAVF_PRIV_FLAGS_STR_LEN;
}
/**
@@ -1995,8 +1857,6 @@ static const struct ethtool_ops iavf_ethtool_ops = {
.get_strings = iavf_get_strings,
.get_ethtool_stats = iavf_get_ethtool_stats,
.get_sset_count = iavf_get_sset_count,
- .get_priv_flags = iavf_get_priv_flags,
- .set_priv_flags = iavf_set_priv_flags,
.get_msglevel = iavf_get_msglevel,
.set_msglevel = iavf_set_msglevel,
.get_coalesce = iavf_get_coalesce,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index ef2440f3abf8..c6dff0963053 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
+#include <linux/net/intel/libie/rx.h>
+
#include "iavf.h"
#include "iavf_prototype.h"
/* All iavf tracepoints are defined by the include below, which must
@@ -45,6 +47,8 @@ MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
MODULE_ALIAS("i40evf");
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
+MODULE_IMPORT_NS(LIBETH);
+MODULE_IMPORT_NS(LIBIE);
MODULE_LICENSE("GPL v2");
static const struct net_device_ops iavf_netdev_ops;
@@ -714,40 +718,10 @@ static void iavf_configure_tx(struct iavf_adapter *adapter)
**/
static void iavf_configure_rx(struct iavf_adapter *adapter)
{
- unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
struct iavf_hw *hw = &adapter->hw;
- int i;
-
- /* Legacy Rx will always default to a 2048 buffer size. */
-#if (PAGE_SIZE < 8192)
- if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
- struct net_device *netdev = adapter->netdev;
-
- /* For jumbo frames on systems with 4K pages we have to use
- * an order 1 page, so we might as well increase the size
- * of our Rx buffer to make better use of the available space
- */
- rx_buf_len = IAVF_RXBUFFER_3072;
-
- /* We use a 1536 buffer size for configurations with
- * standard Ethernet mtu. On x86 this gives us enough room
- * for shared info and 192 bytes of padding.
- */
- if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
- (netdev->mtu <= ETH_DATA_LEN))
- rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
- }
-#endif
- for (i = 0; i < adapter->num_active_queues; i++) {
+ for (u32 i = 0; i < adapter->num_active_queues; i++)
adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
- adapter->rx_rings[i].rx_buf_len = rx_buf_len;
-
- if (adapter->flags & IAVF_FLAG_LEGACY_RX)
- clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
- else
- set_ring_build_skb_enabled(&adapter->rx_rings[i]);
- }
}
/**
@@ -1615,7 +1589,6 @@ static int iavf_alloc_queues(struct iavf_adapter *adapter)
rx_ring = &adapter->rx_rings[i];
rx_ring->queue_index = i;
rx_ring->netdev = adapter->netdev;
- rx_ring->dev = &adapter->pdev->dev;
rx_ring->count = adapter->rx_desc_count;
rx_ring->itr_setting = IAVF_ITR_RX_DEF;
}
@@ -2642,9 +2615,8 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
iavf_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- /* MTU range: 68 - 9710 */
netdev->min_mtu = ETH_MIN_MTU;
- netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
+ netdev->max_mtu = LIBIE_MAX_MTU;
if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
@@ -3503,6 +3475,34 @@ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
}
/**
+ * iavf_is_tc_config_same - Compare the mqprio TC config with the
+ * TC config already configured on this adapter.
+ * @adapter: board private structure
+ * @mqprio_qopt: TC config received from kernel.
+ *
+ * This function compares the TC config received from the kernel
+ * with the config already configured on the adapter.
+ *
+ * Return: True if configuration is same, false otherwise.
+ **/
+static bool iavf_is_tc_config_same(struct iavf_adapter *adapter,
+ struct tc_mqprio_qopt *mqprio_qopt)
+{
+ struct virtchnl_channel_info *ch = &adapter->ch_config.ch_info[0];
+ int i;
+
+ if (adapter->num_tc != mqprio_qopt->num_tc)
+ return false;
+
+ for (i = 0; i < adapter->num_tc; i++) {
+ if (ch[i].count != mqprio_qopt->count[i] ||
+ ch[i].offset != mqprio_qopt->offset[i])
+ return false;
+ }
+ return true;
+}
+
+/**
* __iavf_setup_tc - configure multiple traffic classes
* @netdev: network interface device structure
* @type_data: tc offload data
@@ -3559,7 +3559,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
if (ret)
return ret;
/* Return if same TC config is requested */
- if (adapter->num_tc == num_tc)
+ if (iavf_is_tc_config_same(adapter, &mqprio_qopt->qopt))
return 0;
adapter->num_tc = num_tc;
@@ -3757,6 +3757,10 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
+
+ if (flow_rule_has_control_flags(match.mask->flags,
+ f->common.extack))
+ return -EOPNOTSUPP;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
@@ -4296,7 +4300,7 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev)) {
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
@@ -5023,7 +5027,7 @@ err_dma:
*
* Called when the system (VM) is entering sleep/suspend.
**/
-static int __maybe_unused iavf_suspend(struct device *dev_d)
+static int iavf_suspend(struct device *dev_d)
{
struct net_device *netdev = dev_get_drvdata(dev_d);
struct iavf_adapter *adapter = netdev_priv(netdev);
@@ -5051,7 +5055,7 @@ static int __maybe_unused iavf_suspend(struct device *dev_d)
*
* Called when the system (VM) is resumed from sleep/suspend.
**/
-static int __maybe_unused iavf_resume(struct device *dev_d)
+static int iavf_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct iavf_adapter *adapter;
@@ -5238,14 +5242,14 @@ static void iavf_shutdown(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D3hot);
}
-static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
static struct pci_driver iavf_driver = {
.name = iavf_driver_name,
.id_table = iavf_pci_tbl,
.probe = iavf_probe,
.remove = iavf_remove,
- .driver.pm = &iavf_pm_ops,
+ .driver.pm = pm_sleep_ptr(&iavf_pm_ops),
.shutdown = iavf_shutdown,
};
diff --git a/drivers/net/ethernet/intel/iavf/iavf_prototype.h b/drivers/net/ethernet/intel/iavf/iavf_prototype.h
index 4a48e6171405..48c3901381b4 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_prototype.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_prototype.h
@@ -45,13 +45,6 @@ enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid,
enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 seid,
struct iavf_aqc_get_set_rss_key_data *key);
-extern struct iavf_rx_ptype_decoded iavf_ptype_lookup[];
-
-static inline struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
-{
- return iavf_ptype_lookup[ptype];
-}
-
void iavf_vf_parse_hw_config(struct iavf_hw *hw,
struct virtchnl_vf_resource *msg);
enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index b71484c87a84..26b424fd6718 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/bitfield.h>
+#include <linux/net/intel/libie/rx.h>
#include <linux/prefetch.h>
#include "iavf.h"
@@ -184,7 +185,7 @@ void iavf_detect_recover_hung(struct iavf_vsi *vsi)
* pending work.
*/
packets = tx_ring->stats.packets & INT_MAX;
- if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
+ if (tx_ring->prev_pkt_ctr == packets) {
iavf_force_wb(vsi, tx_ring->q_vector);
continue;
}
@@ -193,7 +194,7 @@ void iavf_detect_recover_hung(struct iavf_vsi *vsi)
* to iavf_get_tx_pending()
*/
smp_rmb();
- tx_ring->tx_stats.prev_pkt_ctr =
+ tx_ring->prev_pkt_ctr =
iavf_get_tx_pending(tx_ring, true) ? packets : -1;
}
}
@@ -319,7 +320,7 @@ static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
((j / WB_STRIDE) == 0) && (j > 0) &&
!test_bit(__IAVF_VSI_DOWN, vsi->state) &&
(IAVF_DESC_UNUSED(tx_ring) != tx_ring->count))
- tx_ring->arm_wb = true;
+ tx_ring->flags |= IAVF_TXR_FLAGS_ARM_WB;
}
/* notify netdev of completed buffers */
@@ -674,7 +675,7 @@ int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- tx_ring->tx_stats.prev_pkt_ctr = -1;
+ tx_ring->prev_pkt_ctr = -1;
return 0;
err:
@@ -689,11 +690,8 @@ err:
**/
static void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
{
- unsigned long bi_size;
- u16 i;
-
/* ring already cleared, nothing to do */
- if (!rx_ring->rx_bi)
+ if (!rx_ring->rx_fqes)
return;
if (rx_ring->skb) {
@@ -701,41 +699,16 @@ static void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
rx_ring->skb = NULL;
}
- /* Free all the Rx ring sk_buffs */
- for (i = 0; i < rx_ring->count; i++) {
- struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+ /* Free all the Rx ring buffers */
+ for (u32 i = rx_ring->next_to_clean; i != rx_ring->next_to_use; ) {
+ const struct libeth_fqe *rx_fqes = &rx_ring->rx_fqes[i];
- if (!rx_bi->page)
- continue;
+ page_pool_put_full_page(rx_ring->pp, rx_fqes->page, false);
- /* Invalidate cache lines that may have been written to by
- * device so that we avoid corrupting memory.
- */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_bi->dma,
- rx_bi->page_offset,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
-
- /* free resources associated with mapping */
- dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
- iavf_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE,
- IAVF_RX_DMA_ATTR);
-
- __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
-
- rx_bi->page = NULL;
- rx_bi->page_offset = 0;
+ if (unlikely(++i == rx_ring->count))
+ i = 0;
}
- bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
- memset(rx_ring->rx_bi, 0, bi_size);
-
- /* Zero out the descriptor ring */
- memset(rx_ring->desc, 0, rx_ring->size);
-
- rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
}
@@ -748,15 +721,22 @@ static void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
**/
void iavf_free_rx_resources(struct iavf_ring *rx_ring)
{
+ struct libeth_fq fq = {
+ .fqes = rx_ring->rx_fqes,
+ .pp = rx_ring->pp,
+ };
+
iavf_clean_rx_ring(rx_ring);
- kfree(rx_ring->rx_bi);
- rx_ring->rx_bi = NULL;
if (rx_ring->desc) {
- dma_free_coherent(rx_ring->dev, rx_ring->size,
+ dma_free_coherent(rx_ring->pp->p.dev, rx_ring->size,
rx_ring->desc, rx_ring->dma);
rx_ring->desc = NULL;
}
+
+ libeth_rx_fq_destroy(&fq);
+ rx_ring->rx_fqes = NULL;
+ rx_ring->pp = NULL;
}
/**
@@ -767,38 +747,46 @@ void iavf_free_rx_resources(struct iavf_ring *rx_ring)
**/
int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
{
- struct device *dev = rx_ring->dev;
- int bi_size;
-
- /* warn if we are about to overwrite the pointer */
- WARN_ON(rx_ring->rx_bi);
- bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
- rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
- if (!rx_ring->rx_bi)
- goto err;
+ struct libeth_fq fq = {
+ .count = rx_ring->count,
+ .buf_len = LIBIE_MAX_RX_BUF_LEN,
+ .nid = NUMA_NO_NODE,
+ };
+ int ret;
+
+ ret = libeth_rx_fq_create(&fq, &rx_ring->q_vector->napi);
+ if (ret)
+ return ret;
+
+ rx_ring->pp = fq.pp;
+ rx_ring->rx_fqes = fq.fqes;
+ rx_ring->truesize = fq.truesize;
+ rx_ring->rx_buf_len = fq.buf_len;
u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ rx_ring->desc = dma_alloc_coherent(fq.pp->p.dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
- dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
+ dev_info(fq.pp->p.dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
rx_ring->size);
goto err;
}
- rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
return 0;
+
err:
- kfree(rx_ring->rx_bi);
- rx_ring->rx_bi = NULL;
+ libeth_rx_fq_destroy(&fq);
+ rx_ring->rx_fqes = NULL;
+ rx_ring->pp = NULL;
+
return -ENOMEM;
}
@@ -811,9 +799,6 @@ static void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
{
rx_ring->next_to_use = val;
- /* update next to alloc since we have filled the ring */
- rx_ring->next_to_alloc = val;
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -824,69 +809,6 @@ static void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
}
/**
- * iavf_rx_offset - Return expected offset into page to access data
- * @rx_ring: Ring we are requesting offset of
- *
- * Returns the offset value for ring into the data buffer.
- */
-static unsigned int iavf_rx_offset(struct iavf_ring *rx_ring)
-{
- return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0;
-}
-
-/**
- * iavf_alloc_mapped_page - recycle or make a new page
- * @rx_ring: ring to use
- * @bi: rx_buffer struct to modify
- *
- * Returns true if the page was successfully allocated or
- * reused.
- **/
-static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring,
- struct iavf_rx_buffer *bi)
-{
- struct page *page = bi->page;
- dma_addr_t dma;
-
- /* since we are recycling buffers we should seldom need to alloc */
- if (likely(page)) {
- rx_ring->rx_stats.page_reuse_count++;
- return true;
- }
-
- /* alloc new page for storage */
- page = dev_alloc_pages(iavf_rx_pg_order(rx_ring));
- if (unlikely(!page)) {
- rx_ring->rx_stats.alloc_page_failed++;
- return false;
- }
-
- /* map page for use */
- dma = dma_map_page_attrs(rx_ring->dev, page, 0,
- iavf_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE,
- IAVF_RX_DMA_ATTR);
-
- /* if mapping failed free memory back to system since
- * there isn't much point in holding memory we can't use
- */
- if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_pages(page, iavf_rx_pg_order(rx_ring));
- rx_ring->rx_stats.alloc_page_failed++;
- return false;
- }
-
- bi->dma = dma;
- bi->page = page;
- bi->page_offset = iavf_rx_offset(rx_ring);
-
- /* initialize pagecnt_bias to 1 representing we fully own page */
- bi->pagecnt_bias = 1;
-
- return true;
-}
-
-/**
* iavf_receive_skb - Send a completed packet up the stack
* @rx_ring: rx ring in play
* @skb: packet to send up
@@ -916,38 +838,37 @@ static void iavf_receive_skb(struct iavf_ring *rx_ring,
**/
bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
{
+ const struct libeth_fq_fp fq = {
+ .pp = rx_ring->pp,
+ .fqes = rx_ring->rx_fqes,
+ .truesize = rx_ring->truesize,
+ .count = rx_ring->count,
+ };
u16 ntu = rx_ring->next_to_use;
union iavf_rx_desc *rx_desc;
- struct iavf_rx_buffer *bi;
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
return false;
rx_desc = IAVF_RX_DESC(rx_ring, ntu);
- bi = &rx_ring->rx_bi[ntu];
do {
- if (!iavf_alloc_mapped_page(rx_ring, bi))
- goto no_buffers;
+ dma_addr_t addr;
- /* sync the buffer for use by the device */
- dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
- bi->page_offset,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
+ addr = libeth_rx_alloc(&fq, ntu);
+ if (addr == DMA_MAPPING_ERROR)
+ goto no_buffers;
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+ rx_desc->read.pkt_addr = cpu_to_le64(addr);
rx_desc++;
- bi++;
ntu++;
if (unlikely(ntu == rx_ring->count)) {
rx_desc = IAVF_RX_DESC(rx_ring, 0);
- bi = rx_ring->rx_bi;
ntu = 0;
}
@@ -966,6 +887,8 @@ no_buffers:
if (rx_ring->next_to_use != ntu)
iavf_release_rx_desc(rx_ring, ntu);
+ rx_ring->rx_stats.alloc_page_failed++;
+
/* make sure to come back via polling to try again after
* allocation failure
*/
@@ -982,38 +905,30 @@ static void iavf_rx_checksum(struct iavf_vsi *vsi,
struct sk_buff *skb,
union iavf_rx_desc *rx_desc)
{
- struct iavf_rx_ptype_decoded decoded;
+ struct libeth_rx_pt decoded;
u32 rx_error, rx_status;
bool ipv4, ipv6;
u8 ptype;
u64 qword;
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
- rx_error = FIELD_GET(IAVF_RXD_QW1_ERROR_MASK, qword);
- rx_status = FIELD_GET(IAVF_RXD_QW1_STATUS_MASK, qword);
- decoded = decode_rx_desc_ptype(ptype);
-
skb->ip_summed = CHECKSUM_NONE;
- skb_checksum_none_assert(skb);
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
- /* Rx csum enabled and ip headers found? */
- if (!(vsi->netdev->features & NETIF_F_RXCSUM))
+ decoded = libie_rx_pt_parse(ptype);
+ if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded))
return;
+ rx_error = FIELD_GET(IAVF_RXD_QW1_ERROR_MASK, qword);
+ rx_status = FIELD_GET(IAVF_RXD_QW1_STATUS_MASK, qword);
+
/* did the hardware decode the packet and checksum? */
if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
- /* both known and outer_ip must be set for the below code to work */
- if (!(decoded.known && decoded.outer_ip))
- return;
-
- ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4);
- ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6);
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
if (ipv4 &&
(rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) |
@@ -1037,17 +952,7 @@ static void iavf_rx_checksum(struct iavf_vsi *vsi,
if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT))
return;
- /* Only report checksum unnecessary for TCP, UDP, or SCTP */
- switch (decoded.inner_prot) {
- case IAVF_RX_PTYPE_INNER_PROT_TCP:
- case IAVF_RX_PTYPE_INNER_PROT_UDP:
- case IAVF_RX_PTYPE_INNER_PROT_SCTP:
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- fallthrough;
- default:
- break;
- }
-
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
return;
checksum_fail:
@@ -1055,29 +960,6 @@ checksum_fail:
}
/**
- * iavf_ptype_to_htype - get a hash type
- * @ptype: the ptype value from the descriptor
- *
- * Returns a hash type to be used by skb_set_hash
- **/
-static int iavf_ptype_to_htype(u8 ptype)
-{
- struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
-
- if (!decoded.known)
- return PKT_HASH_TYPE_NONE;
-
- if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
- decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4)
- return PKT_HASH_TYPE_L4;
- else if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
- decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3)
- return PKT_HASH_TYPE_L3;
- else
- return PKT_HASH_TYPE_L2;
-}
-
-/**
* iavf_rx_hash - set the hash value in the skb
* @ring: descriptor ring
* @rx_desc: specific descriptor
@@ -1089,17 +971,19 @@ static void iavf_rx_hash(struct iavf_ring *ring,
struct sk_buff *skb,
u8 rx_ptype)
{
+ struct libeth_rx_pt decoded;
u32 hash;
const __le64 rss_mask =
cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
- if (!(ring->netdev->features & NETIF_F_RXHASH))
+ decoded = libie_rx_pt_parse(rx_ptype);
+ if (!libeth_rx_pt_has_hash(ring->netdev, decoded))
return;
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
- skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype));
+ libeth_rx_pt_set_hash(skb, hash, decoded);
}
}
@@ -1152,95 +1036,9 @@ static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb)
}
/**
- * iavf_reuse_rx_page - page flip buffer and store it back on the ring
- * @rx_ring: rx descriptor ring to store buffers on
- * @old_buff: donor buffer to have page reused
- *
- * Synchronizes page for reuse by the adapter
- **/
-static void iavf_reuse_rx_page(struct iavf_ring *rx_ring,
- struct iavf_rx_buffer *old_buff)
-{
- struct iavf_rx_buffer *new_buff;
- u16 nta = rx_ring->next_to_alloc;
-
- new_buff = &rx_ring->rx_bi[nta];
-
- /* update, and store next to alloc */
- nta++;
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- /* transfer page from old buffer to new buffer */
- new_buff->dma = old_buff->dma;
- new_buff->page = old_buff->page;
- new_buff->page_offset = old_buff->page_offset;
- new_buff->pagecnt_bias = old_buff->pagecnt_bias;
-}
-
-/**
- * iavf_can_reuse_rx_page - Determine if this page can be reused by
- * the adapter for another receive
- *
- * @rx_buffer: buffer containing the page
- *
- * If page is reusable, rx_buffer->page_offset is adjusted to point to
- * an unused region in the page.
- *
- * For small pages, @truesize will be a constant value, half the size
- * of the memory at page. We'll attempt to alternate between high and
- * low halves of the page, with one half ready for use by the hardware
- * and the other half being consumed by the stack. We use the page
- * ref count to determine whether the stack has finished consuming the
- * portion of this page that was passed up with a previous packet. If
- * the page ref count is >1, we'll assume the "other" half page is
- * still busy, and this page cannot be reused.
- *
- * For larger pages, @truesize will be the actual space used by the
- * received packet (adjusted upward to an even multiple of the cache
- * line size). This will advance through the page by the amount
- * actually consumed by the received packets while there is still
- * space for a buffer. Each region of larger pages will be used at
- * most once, after which the page will not be reused.
- *
- * In either case, if the page is reusable its refcount is increased.
- **/
-static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)
-{
- unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
- struct page *page = rx_buffer->page;
-
- /* Is any reuse possible? */
- if (!dev_page_is_reusable(page))
- return false;
-
-#if (PAGE_SIZE < 8192)
- /* if we are only owner of page we can reuse it */
- if (unlikely((page_count(page) - pagecnt_bias) > 1))
- return false;
-#else
-#define IAVF_LAST_OFFSET \
- (SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048)
- if (rx_buffer->page_offset > IAVF_LAST_OFFSET)
- return false;
-#endif
-
- /* If we have drained the page fragment pool we need to update
- * the pagecnt_bias and page count so that we fully restock the
- * number of references the driver holds.
- */
- if (unlikely(!pagecnt_bias)) {
- page_ref_add(page, USHRT_MAX);
- rx_buffer->pagecnt_bias = USHRT_MAX;
- }
-
- return true;
-}
-
-/**
* iavf_add_rx_frag - Add contents of Rx buffer to sk_buff
- * @rx_ring: rx descriptor ring to transact packets on
- * @rx_buffer: buffer containing page to add
* @skb: sk_buff to place the data into
+ * @rx_buffer: buffer containing page to add
* @size: packet length from rx_desc
*
* This function will add the data contained in rx_buffer->page to the skb.
@@ -1248,206 +1046,50 @@ static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)
*
* The function will then update the page offset.
**/
-static void iavf_add_rx_frag(struct iavf_ring *rx_ring,
- struct iavf_rx_buffer *rx_buffer,
- struct sk_buff *skb,
+static void iavf_add_rx_frag(struct sk_buff *skb,
+ const struct libeth_fqe *rx_buffer,
unsigned int size)
{
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));
-#endif
-
- if (!size)
- return;
+ u32 hr = rx_buffer->page->pp->p.offset;
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
- rx_buffer->page_offset, size, truesize);
-
- /* page is being used so we must update the page offset */
-#if (PAGE_SIZE < 8192)
- rx_buffer->page_offset ^= truesize;
-#else
- rx_buffer->page_offset += truesize;
-#endif
-}
-
-/**
- * iavf_get_rx_buffer - Fetch Rx buffer and synchronize data for use
- * @rx_ring: rx descriptor ring to transact packets on
- * @size: size of buffer to add to skb
- *
- * This function will pull an Rx buffer from the ring and synchronize it
- * for use by the CPU.
- */
-static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
- const unsigned int size)
-{
- struct iavf_rx_buffer *rx_buffer;
-
- rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
- prefetchw(rx_buffer->page);
- if (!size)
- return rx_buffer;
-
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- size,
- DMA_FROM_DEVICE);
-
- /* We have pulled a buffer for use, so decrement pagecnt_bias */
- rx_buffer->pagecnt_bias--;
-
- return rx_buffer;
-}
-
-/**
- * iavf_construct_skb - Allocate skb and populate it
- * @rx_ring: rx descriptor ring to transact packets on
- * @rx_buffer: rx buffer to pull data from
- * @size: size of buffer to add to skb
- *
- * This function allocates an skb. It then populates it with the page
- * data from the current receive descriptor, taking care to set up the
- * skb correctly.
- */
-static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
- struct iavf_rx_buffer *rx_buffer,
- unsigned int size)
-{
- void *va;
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(size);
-#endif
- unsigned int headlen;
- struct sk_buff *skb;
-
- if (!rx_buffer)
- return NULL;
- /* prefetch first cache line of first page */
- va = page_address(rx_buffer->page) + rx_buffer->page_offset;
- net_prefetch(va);
-
- /* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- IAVF_RX_HDR_SIZE,
- GFP_ATOMIC | __GFP_NOWARN);
- if (unlikely(!skb))
- return NULL;
-
- /* Determine available headroom for copy */
- headlen = size;
- if (headlen > IAVF_RX_HDR_SIZE)
- headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
-
- /* update all of the pointers */
- size -= headlen;
- if (size) {
- skb_add_rx_frag(skb, 0, rx_buffer->page,
- rx_buffer->page_offset + headlen,
- size, truesize);
-
- /* buffer is used by skb, update page_offset */
-#if (PAGE_SIZE < 8192)
- rx_buffer->page_offset ^= truesize;
-#else
- rx_buffer->page_offset += truesize;
-#endif
- } else {
- /* buffer is unused, reset bias back to rx_buffer */
- rx_buffer->pagecnt_bias++;
- }
-
- return skb;
+ rx_buffer->offset + hr, size, rx_buffer->truesize);
}
/**
* iavf_build_skb - Build skb around an existing buffer
- * @rx_ring: Rx descriptor ring to transact packets on
* @rx_buffer: Rx buffer to pull data from
* @size: size of buffer to add to skb
*
* This function builds an skb around an existing Rx buffer, taking care
* to set up the skb correctly and avoid any memcpy overhead.
*/
-static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
- struct iavf_rx_buffer *rx_buffer,
+static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer,
unsigned int size)
{
- void *va;
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- SKB_DATA_ALIGN(IAVF_SKB_PAD + size);
-#endif
+ u32 hr = rx_buffer->page->pp->p.offset;
struct sk_buff *skb;
+ void *va;
- if (!rx_buffer || !size)
- return NULL;
/* prefetch first cache line of first page */
- va = page_address(rx_buffer->page) + rx_buffer->page_offset;
- net_prefetch(va);
+ va = page_address(rx_buffer->page) + rx_buffer->offset;
+ net_prefetch(va + hr);
/* build an skb around the page buffer */
- skb = napi_build_skb(va - IAVF_SKB_PAD, truesize);
+ skb = napi_build_skb(va, rx_buffer->truesize);
if (unlikely(!skb))
return NULL;
+ skb_mark_for_recycle(skb);
+
/* update pointers within the skb to store the data */
- skb_reserve(skb, IAVF_SKB_PAD);
+ skb_reserve(skb, hr);
__skb_put(skb, size);
- /* buffer is used by skb, update page_offset */
-#if (PAGE_SIZE < 8192)
- rx_buffer->page_offset ^= truesize;
-#else
- rx_buffer->page_offset += truesize;
-#endif
-
return skb;
}
/**
- * iavf_put_rx_buffer - Clean up used buffer and either recycle or free
- * @rx_ring: rx descriptor ring to transact packets on
- * @rx_buffer: rx buffer to pull data from
- *
- * This function will clean up the contents of the rx_buffer. It will
- * either recycle the buffer or unmap it and free the associated resources.
- */
-static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,
- struct iavf_rx_buffer *rx_buffer)
-{
- if (!rx_buffer)
- return;
-
- if (iavf_can_reuse_rx_page(rx_buffer)) {
- /* hand second half of page back to the ring */
- iavf_reuse_rx_page(rx_ring, rx_buffer);
- rx_ring->rx_stats.page_reuse_count++;
- } else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
- iavf_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR);
- __page_frag_cache_drain(rx_buffer->page,
- rx_buffer->pagecnt_bias);
- }
-
- /* clear contents of buffer_info */
- rx_buffer->page = NULL;
-}
-
-/**
* iavf_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
@@ -1500,7 +1142,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
bool failure = false;
while (likely(total_rx_packets < (unsigned int)budget)) {
- struct iavf_rx_buffer *rx_buffer;
+ struct libeth_fqe *rx_buffer;
union iavf_rx_desc *rx_desc;
unsigned int size;
u16 vlan_tag = 0;
@@ -1535,28 +1177,27 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
size = FIELD_GET(IAVF_RXD_QW1_LENGTH_PBUF_MASK, qword);
iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
- rx_buffer = iavf_get_rx_buffer(rx_ring, size);
+
+ rx_buffer = &rx_ring->rx_fqes[rx_ring->next_to_clean];
+ if (!libeth_rx_sync_for_cpu(rx_buffer, size))
+ goto skip_data;
/* retrieve a buffer from the ring */
if (skb)
- iavf_add_rx_frag(rx_ring, rx_buffer, skb, size);
- else if (ring_uses_build_skb(rx_ring))
- skb = iavf_build_skb(rx_ring, rx_buffer, size);
+ iavf_add_rx_frag(skb, rx_buffer, size);
else
- skb = iavf_construct_skb(rx_ring, rx_buffer, size);
+ skb = iavf_build_skb(rx_buffer, size);
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
- if (rx_buffer && size)
- rx_buffer->pagecnt_bias++;
break;
}
- iavf_put_rx_buffer(rx_ring, rx_buffer);
+skip_data:
cleaned_count++;
- if (iavf_is_non_eop(rx_ring, rx_desc, skb))
+ if (iavf_is_non_eop(rx_ring, rx_desc, skb) || unlikely(!skb))
continue;
/* ERR_MASK will only have valid bits if EOP set, and
@@ -1743,8 +1384,8 @@ int iavf_napi_poll(struct napi_struct *napi, int budget)
clean_complete = false;
continue;
}
- arm_wb |= ring->arm_wb;
- ring->arm_wb = false;
+ arm_wb |= !!(ring->flags & IAVF_TXR_FLAGS_ARM_WB);
+ ring->flags &= ~IAVF_TXR_FLAGS_ARM_WB;
}
/* Handle case where we are called by netpoll with a budget of 0 */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index 10ba36602c0c..d7b5587aeb8e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -80,79 +80,8 @@ enum iavf_dyn_idx_t {
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
-/* Supported Rx Buffer Sizes (a multiple of 128) */
-#define IAVF_RXBUFFER_256 256
-#define IAVF_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */
-#define IAVF_RXBUFFER_2048 2048
-#define IAVF_RXBUFFER_3072 3072 /* Used for large frames w/ padding */
-#define IAVF_MAX_RXBUFFER 9728 /* largest size for single descriptor */
-
-/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
- * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
- * this adds up to 512 bytes of extra data meaning the smallest allocation
- * we could have is 1K.
- * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
- * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
- */
-#define IAVF_RX_HDR_SIZE IAVF_RXBUFFER_256
-#define IAVF_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
#define iavf_rx_desc iavf_32byte_rx_desc
-#define IAVF_RX_DMA_ATTR \
- (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
-
-/* Attempt to maximize the headroom available for incoming frames. We
- * use a 2K buffer for receives and need 1536/1534 to store the data for
- * the frame. This leaves us with 512 bytes of room. From that we need
- * to deduct the space needed for the shared info and the padding needed
- * to IP align the frame.
- *
- * Note: For cache line sizes 256 or larger this value is going to end
- * up negative. In these cases we should fall back to the legacy
- * receive path.
- */
-#if (PAGE_SIZE < 8192)
-#define IAVF_2K_TOO_SMALL_WITH_PADDING \
-((NET_SKB_PAD + IAVF_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IAVF_RXBUFFER_2048))
-
-static inline int iavf_compute_pad(int rx_buf_len)
-{
- int page_size, pad_size;
-
- page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
- pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
-
- return pad_size;
-}
-
-static inline int iavf_skb_pad(void)
-{
- int rx_buf_len;
-
- /* If a 2K buffer cannot handle a standard Ethernet frame then
- * optimize padding for a 3K buffer instead of a 1.5K buffer.
- *
- * For a 3K buffer we need to add enough padding to allow for
- * tailroom due to NET_IP_ALIGN possibly shifting us out of
- * cache-line alignment.
- */
- if (IAVF_2K_TOO_SMALL_WITH_PADDING)
- rx_buf_len = IAVF_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
- else
- rx_buf_len = IAVF_RXBUFFER_1536;
-
- /* if needed make room for NET_IP_ALIGN */
- rx_buf_len -= NET_IP_ALIGN;
-
- return iavf_compute_pad(rx_buf_len);
-}
-
-#define IAVF_SKB_PAD iavf_skb_pad()
-#else
-#define IAVF_2K_TOO_SMALL_WITH_PADDING false
-#define IAVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
-#endif
-
/**
* iavf_test_staterr - tests bits in Rx descriptor status and error fields
* @rx_desc: pointer to receive descriptor (in le64 format)
@@ -271,17 +200,6 @@ struct iavf_tx_buffer {
u32 tx_flags;
};
-struct iavf_rx_buffer {
- dma_addr_t dma;
- struct page *page;
-#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
- __u32 page_offset;
-#else
- __u16 page_offset;
-#endif
- __u16 pagecnt_bias;
-};
-
struct iavf_queue_stats {
u64 packets;
u64 bytes;
@@ -293,7 +211,6 @@ struct iavf_tx_queue_stats {
u64 tx_done_old;
u64 tx_linearize;
u64 tx_force_wb;
- int prev_pkt_ctr;
u64 tx_lost_interrupt;
};
@@ -301,14 +218,6 @@ struct iavf_rx_queue_stats {
u64 non_eop_descs;
u64 alloc_page_failed;
u64 alloc_buff_failed;
- u64 page_reuse_count;
- u64 realloc_count;
-};
-
-enum iavf_ring_state_t {
- __IAVF_TX_FDIR_INIT_DONE,
- __IAVF_TX_XPS_INIT_DONE,
- __IAVF_RING_STATE_NBITS /* must be last */
};
/* some useful defines for virtchannel interface, which
@@ -326,16 +235,19 @@ enum iavf_ring_state_t {
struct iavf_ring {
struct iavf_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */
- struct device *dev; /* Used for DMA mapping */
+ union {
+ struct page_pool *pp; /* Used on Rx for buffer management */
+ struct device *dev; /* Used on Tx for DMA mapping */
+ };
struct net_device *netdev; /* netdev ring maps to */
union {
+ struct libeth_fqe *rx_fqes;
struct iavf_tx_buffer *tx_bi;
- struct iavf_rx_buffer *rx_bi;
};
- DECLARE_BITMAP(state, __IAVF_RING_STATE_NBITS);
- u16 queue_index; /* Queue number of ring */
- u8 dcb_tc; /* Traffic class of ring */
u8 __iomem *tail;
+ u32 truesize;
+
+ u16 queue_index; /* Queue number of ring */
/* high bit set means dynamic, use accessors routines to read/write.
* hardware only supports 2us resolution for the ITR registers.
@@ -345,23 +257,15 @@ struct iavf_ring {
u16 itr_setting;
u16 count; /* Number of descriptors */
- u16 reg_idx; /* HW register index of the ring */
- u16 rx_buf_len;
/* used in interrupt processing */
u16 next_to_use;
u16 next_to_clean;
- u8 atr_sample_rate;
- u8 atr_count;
-
- bool ring_active; /* is ring online or not */
- bool arm_wb; /* do something to arm write back */
- u8 packet_stride;
-
u16 flags;
#define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0)
-#define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
+#define IAVF_TXR_FLAGS_ARM_WB BIT(1)
+/* BIT(2) is free */
#define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(3)
#define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(4)
#define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(5)
@@ -374,6 +278,7 @@ struct iavf_ring {
struct iavf_rx_queue_stats rx_stats;
};
+ int prev_pkt_ctr; /* For Tx stall detection */
unsigned int size; /* length of descriptor ring in bytes */
dma_addr_t dma; /* physical address of ring */
@@ -381,7 +286,6 @@ struct iavf_ring {
struct iavf_q_vector *q_vector; /* Backreference to associated vector */
struct rcu_head rcu; /* to avoid race on free */
- u16 next_to_alloc;
struct sk_buff *skb; /* When iavf_clean_rx_ring_irq() must
* return before it sees the EOP for
* the current packet, we save that skb
@@ -390,22 +294,9 @@ struct iavf_ring {
* iavf_clean_rx_ring_irq() is called
* for this ring.
*/
-} ____cacheline_internodealigned_in_smp;
-
-static inline bool ring_uses_build_skb(struct iavf_ring *ring)
-{
- return !!(ring->flags & IAVF_RXR_FLAGS_BUILD_SKB_ENABLED);
-}
-static inline void set_ring_build_skb_enabled(struct iavf_ring *ring)
-{
- ring->flags |= IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
-}
-
-static inline void clear_ring_build_skb_enabled(struct iavf_ring *ring)
-{
- ring->flags &= ~IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
-}
+ u32 rx_buf_len;
+} ____cacheline_internodealigned_in_smp;
#define IAVF_ITR_ADAPTIVE_MIN_INC 0x0002
#define IAVF_ITR_ADAPTIVE_MIN_USECS 0x0002
@@ -428,17 +319,6 @@ struct iavf_ring_container {
#define iavf_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
-static inline unsigned int iavf_rx_pg_order(struct iavf_ring *ring)
-{
-#if (PAGE_SIZE < 8192)
- if (ring->rx_buf_len > (PAGE_SIZE / 2))
- return 1;
-#endif
- return 0;
-}
-
-#define iavf_rx_pg_size(_ring) (PAGE_SIZE << iavf_rx_pg_order(_ring))
-
bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count);
netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h
index 2b6a207fa441..f6b09e57abce 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_type.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_type.h
@@ -10,8 +10,6 @@
#include "iavf_adminq.h"
#include "iavf_devids.h"
-#define IAVF_RXQ_CTX_DBUFF_SHIFT 7
-
/* IAVF_MASK is a macro used on 32 bit registers */
#define IAVF_MASK(mask, shift) ((u32)(mask) << (shift))
@@ -327,94 +325,6 @@ enum iavf_rx_desc_error_l3l4e_fcoe_masks {
#define IAVF_RXD_QW1_PTYPE_SHIFT 30
#define IAVF_RXD_QW1_PTYPE_MASK (0xFFULL << IAVF_RXD_QW1_PTYPE_SHIFT)
-/* Packet type non-ip values */
-enum iavf_rx_l2_ptype {
- IAVF_RX_PTYPE_L2_RESERVED = 0,
- IAVF_RX_PTYPE_L2_MAC_PAY2 = 1,
- IAVF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
- IAVF_RX_PTYPE_L2_FIP_PAY2 = 3,
- IAVF_RX_PTYPE_L2_OUI_PAY2 = 4,
- IAVF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
- IAVF_RX_PTYPE_L2_LLDP_PAY2 = 6,
- IAVF_RX_PTYPE_L2_ECP_PAY2 = 7,
- IAVF_RX_PTYPE_L2_EVB_PAY2 = 8,
- IAVF_RX_PTYPE_L2_QCN_PAY2 = 9,
- IAVF_RX_PTYPE_L2_EAPOL_PAY2 = 10,
- IAVF_RX_PTYPE_L2_ARP = 11,
- IAVF_RX_PTYPE_L2_FCOE_PAY3 = 12,
- IAVF_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
- IAVF_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
- IAVF_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
- IAVF_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
- IAVF_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
- IAVF_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
- IAVF_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
- IAVF_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
- IAVF_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
- IAVF_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
- IAVF_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
- IAVF_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
- IAVF_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
-};
-
-struct iavf_rx_ptype_decoded {
- u32 known:1;
- u32 outer_ip:1;
- u32 outer_ip_ver:1;
- u32 outer_frag:1;
- u32 tunnel_type:3;
- u32 tunnel_end_prot:2;
- u32 tunnel_end_frag:1;
- u32 inner_prot:4;
- u32 payload_layer:3;
-};
-
-enum iavf_rx_ptype_outer_ip {
- IAVF_RX_PTYPE_OUTER_L2 = 0,
- IAVF_RX_PTYPE_OUTER_IP = 1
-};
-
-enum iavf_rx_ptype_outer_ip_ver {
- IAVF_RX_PTYPE_OUTER_NONE = 0,
- IAVF_RX_PTYPE_OUTER_IPV4 = 0,
- IAVF_RX_PTYPE_OUTER_IPV6 = 1
-};
-
-enum iavf_rx_ptype_outer_fragmented {
- IAVF_RX_PTYPE_NOT_FRAG = 0,
- IAVF_RX_PTYPE_FRAG = 1
-};
-
-enum iavf_rx_ptype_tunnel_type {
- IAVF_RX_PTYPE_TUNNEL_NONE = 0,
- IAVF_RX_PTYPE_TUNNEL_IP_IP = 1,
- IAVF_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
- IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
- IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
-};
-
-enum iavf_rx_ptype_tunnel_end_prot {
- IAVF_RX_PTYPE_TUNNEL_END_NONE = 0,
- IAVF_RX_PTYPE_TUNNEL_END_IPV4 = 1,
- IAVF_RX_PTYPE_TUNNEL_END_IPV6 = 2,
-};
-
-enum iavf_rx_ptype_inner_prot {
- IAVF_RX_PTYPE_INNER_PROT_NONE = 0,
- IAVF_RX_PTYPE_INNER_PROT_UDP = 1,
- IAVF_RX_PTYPE_INNER_PROT_TCP = 2,
- IAVF_RX_PTYPE_INNER_PROT_SCTP = 3,
- IAVF_RX_PTYPE_INNER_PROT_ICMP = 4,
- IAVF_RX_PTYPE_INNER_PROT_TIMESYNC = 5
-};
-
-enum iavf_rx_ptype_payload_layer {
- IAVF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
- IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
- IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
- IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
-};
-
#define IAVF_RXD_QW1_LENGTH_PBUF_SHIFT 38
#define IAVF_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
IAVF_RXD_QW1_LENGTH_PBUF_SHIFT)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 22f2df7c460b..1e543f6a7c30 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
+#include <linux/net/intel/libie/rx.h>
+
#include "iavf.h"
#include "iavf_prototype.h"
@@ -268,13 +270,13 @@ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
void iavf_configure_queues(struct iavf_adapter *adapter)
{
struct virtchnl_vsi_queue_config_info *vqci;
- int i, max_frame = adapter->vf_res->max_mtu;
int pairs = adapter->num_active_queues;
struct virtchnl_queue_pair_info *vqpi;
+ u32 i, max_frame;
size_t len;
- if (max_frame > IAVF_MAX_RXBUFFER || !max_frame)
- max_frame = IAVF_MAX_RXBUFFER;
+ max_frame = LIBIE_MAX_RX_FRM_LEN(adapter->rx_rings->pp->p.offset);
+ max_frame = min_not_zero(adapter->vf_res->max_mtu, max_frame);
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -288,11 +290,6 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
if (!vqci)
return;
- /* Limit maximum frame size when jumbo frames is not enabled */
- if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) &&
- (adapter->netdev->mtu <= ETH_DATA_LEN))
- max_frame = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
-
vqci->vsi_id = adapter->vsi_res->vsi_id;
vqci->num_queue_pairs = pairs;
vqpi = vqci->qpair;
@@ -309,9 +306,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
vqpi->rxq.ring_len = adapter->rx_rings[i].count;
vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
vqpi->rxq.max_pkt_size = max_frame;
- vqpi->rxq.databuffer_size =
- ALIGN(adapter->rx_rings[i].rx_buf_len,
- BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT));
+ vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
if (CRC_OFFLOAD_ALLOWED(adapter))
vqpi->rxq.crc_disable = !!(adapter->netdev->features &
NETIF_F_RXFCS);
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index cddd82d4ca0f..03500e28ac99 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -5,6 +5,7 @@
# Makefile for the Intel(R) Ethernet Connection E800 Series Linux Driver
#
+subdir-ccflags-y += -I$(src)
obj-$(CONFIG_ICE) += ice.o
ice-y := ice_main.o \
@@ -28,7 +29,8 @@ ice-y := ice_main.o \
ice_flex_pipe.o \
ice_flow.o \
ice_idc.o \
- ice_devlink.o \
+ devlink/devlink.o \
+ devlink/devlink_port.o \
ice_ddp.o \
ice_fw_update.o \
ice_lag.o \
@@ -36,7 +38,8 @@ ice-y := ice_main.o \
ice_repr.o \
ice_tc_lib.o \
ice_fwlog.o \
- ice_debugfs.o
+ ice_debugfs.o \
+ ice_adapter.o
ice-$(CONFIG_PCI_IOV) += \
ice_sriov.o \
ice_virtchnl.o \
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index b516e42b41f0..c4b69655cdf5 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -5,13 +5,11 @@
#include "ice.h"
#include "ice_lib.h"
-#include "ice_devlink.h"
+#include "devlink.h"
#include "ice_eswitch.h"
#include "ice_fw_update.h"
#include "ice_dcb_lib.h"
-static int ice_active_port_option = -1;
-
/* context for devlink info version reporting */
struct ice_info_ctx {
char buf[128];
@@ -478,17 +476,17 @@ ice_devlink_reload_down(struct devlink *devlink, bool netns_change,
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
if (ice_is_eswitch_mode_switchdev(pf)) {
NL_SET_ERR_MSG_MOD(extack,
- "Go to legacy mode before doing reinit\n");
+ "Go to legacy mode before doing reinit");
return -EOPNOTSUPP;
}
if (ice_is_adq_active(pf)) {
NL_SET_ERR_MSG_MOD(extack,
- "Turn off ADQ before doing reinit\n");
+ "Turn off ADQ before doing reinit");
return -EOPNOTSUPP;
}
if (ice_has_vfs(pf)) {
NL_SET_ERR_MSG_MOD(extack,
- "Remove all VFs before doing reinit\n");
+ "Remove all VFs before doing reinit");
return -EOPNOTSUPP;
}
ice_devlink_reinit_down(pf);
@@ -526,248 +524,153 @@ ice_devlink_reload_empr_finish(struct ice_pf *pf,
}
/**
- * ice_devlink_port_opt_speed_str - convert speed to a string
- * @speed: speed value
- */
-static const char *ice_devlink_port_opt_speed_str(u8 speed)
-{
- switch (speed & ICE_AQC_PORT_OPT_MAX_LANE_M) {
- case ICE_AQC_PORT_OPT_MAX_LANE_100M:
- return "0.1";
- case ICE_AQC_PORT_OPT_MAX_LANE_1G:
- return "1";
- case ICE_AQC_PORT_OPT_MAX_LANE_2500M:
- return "2.5";
- case ICE_AQC_PORT_OPT_MAX_LANE_5G:
- return "5";
- case ICE_AQC_PORT_OPT_MAX_LANE_10G:
- return "10";
- case ICE_AQC_PORT_OPT_MAX_LANE_25G:
- return "25";
- case ICE_AQC_PORT_OPT_MAX_LANE_50G:
- return "50";
- case ICE_AQC_PORT_OPT_MAX_LANE_100G:
- return "100";
- }
-
- return "-";
-}
-
-#define ICE_PORT_OPT_DESC_LEN 50
-/**
- * ice_devlink_port_options_print - Print available port split options
- * @pf: the PF to print split port options
+ * ice_get_tx_topo_user_sel - Read user's choice from flash
+ * @pf: pointer to pf structure
+ * @layers: value read from flash will be saved here
*
- * Prints a table with available port split options and max port speeds
+ * Reads user's preference for Tx Scheduler Topology Tree from PFA TLV.
+ *
+ * Return: zero when read was successful, negative values otherwise.
*/
-static void ice_devlink_port_options_print(struct ice_pf *pf)
+static int ice_get_tx_topo_user_sel(struct ice_pf *pf, uint8_t *layers)
{
- u8 i, j, options_count, cnt, speed, pending_idx, active_idx;
- struct ice_aqc_get_port_options_elem *options, *opt;
- struct device *dev = ice_pf_to_dev(pf);
- bool active_valid, pending_valid;
- char desc[ICE_PORT_OPT_DESC_LEN];
- const char *str;
- int status;
+ struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {};
+ struct ice_hw *hw = &pf->hw;
+ int err;
- options = kcalloc(ICE_AQC_PORT_OPT_MAX * ICE_MAX_PORT_PER_PCI_DEV,
- sizeof(*options), GFP_KERNEL);
- if (!options)
- return;
+ err = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (err)
+ return err;
- for (i = 0; i < ICE_MAX_PORT_PER_PCI_DEV; i++) {
- opt = options + i * ICE_AQC_PORT_OPT_MAX;
- options_count = ICE_AQC_PORT_OPT_MAX;
- active_valid = 0;
+ err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0,
+ sizeof(usr_sel), &usr_sel, true, true, NULL);
+ if (err)
+ goto exit_release_res;
- status = ice_aq_get_port_options(&pf->hw, opt, &options_count,
- i, true, &active_idx,
- &active_valid, &pending_idx,
- &pending_valid);
- if (status) {
- dev_dbg(dev, "Couldn't read port option for port %d, err %d\n",
- i, status);
- goto err;
- }
- }
+ if (usr_sel.data & ICE_AQC_NVM_TX_TOPO_USER_SEL)
+ *layers = ICE_SCHED_5_LAYERS;
+ else
+ *layers = ICE_SCHED_9_LAYERS;
- dev_dbg(dev, "Available port split options and max port speeds (Gbps):\n");
- dev_dbg(dev, "Status Split Quad 0 Quad 1\n");
- dev_dbg(dev, " count L0 L1 L2 L3 L4 L5 L6 L7\n");
+exit_release_res:
+ ice_release_nvm(hw);
- for (i = 0; i < options_count; i++) {
- cnt = 0;
+ return err;
+}
- if (i == ice_active_port_option)
- str = "Active";
- else if ((i == pending_idx) && pending_valid)
- str = "Pending";
- else
- str = "";
+/**
+ * ice_update_tx_topo_user_sel - Save user's preference in flash
+ * @pf: pointer to pf structure
+ * @layers: value to be saved in flash
+ *
+ * Variable "layers" defines user's preference about number of layers in Tx
+ * Scheduler Topology Tree. This choice should be stored in PFA TLV field
+ * and be picked up by driver, next time during init.
+ *
+ * Return: zero when save was successful, negative values otherwise.
+ */
+static int ice_update_tx_topo_user_sel(struct ice_pf *pf, int layers)
+{
+ struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {};
+ struct ice_hw *hw = &pf->hw;
+ int err;
- cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
- "%-8s", str);
+ err = ice_acquire_nvm(hw, ICE_RES_WRITE);
+ if (err)
+ return err;
- cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
- "%-6u", options[i].pmd);
+ err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0,
+ sizeof(usr_sel), &usr_sel, true, true, NULL);
+ if (err)
+ goto exit_release_res;
- for (j = 0; j < ICE_MAX_PORT_PER_PCI_DEV; ++j) {
- speed = options[i + j * ICE_AQC_PORT_OPT_MAX].max_lane_speed;
- str = ice_devlink_port_opt_speed_str(speed);
- cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
- "%3s ", str);
- }
+ if (layers == ICE_SCHED_5_LAYERS)
+ usr_sel.data |= ICE_AQC_NVM_TX_TOPO_USER_SEL;
+ else
+ usr_sel.data &= ~ICE_AQC_NVM_TX_TOPO_USER_SEL;
- dev_dbg(dev, "%s\n", desc);
- }
+ err = ice_write_one_nvm_block(pf, ICE_AQC_NVM_TX_TOPO_MOD_ID, 2,
+ sizeof(usr_sel.data), &usr_sel.data,
+ true, NULL, NULL);
+exit_release_res:
+ ice_release_nvm(hw);
-err:
- kfree(options);
+ return err;
}
/**
- * ice_devlink_aq_set_port_option - Send set port option admin queue command
- * @pf: the PF to print split port options
- * @option_idx: selected port option
- * @extack: extended netdev ack structure
+ * ice_devlink_tx_sched_layers_get - Get tx_scheduling_layers parameter
+ * @devlink: pointer to the devlink instance
+ * @id: the parameter ID to set
+ * @ctx: context to store the parameter value
*
- * Sends set port option admin queue command with selected port option and
- * calls NVM write activate.
+ * Return: zero on success and negative value on failure.
*/
-static int
-ice_devlink_aq_set_port_option(struct ice_pf *pf, u8 option_idx,
- struct netlink_ext_ack *extack)
+static int ice_devlink_tx_sched_layers_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
{
- struct device *dev = ice_pf_to_dev(pf);
- int status;
-
- status = ice_aq_set_port_option(&pf->hw, 0, true, option_idx);
- if (status) {
- dev_dbg(dev, "ice_aq_set_port_option, err %d aq_err %d\n",
- status, pf->hw.adminq.sq_last_status);
- NL_SET_ERR_MSG_MOD(extack, "Port split request failed");
- return -EIO;
- }
-
- status = ice_acquire_nvm(&pf->hw, ICE_RES_WRITE);
- if (status) {
- dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
- status, pf->hw.adminq.sq_last_status);
- NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
- return -EIO;
- }
-
- status = ice_nvm_write_activate(&pf->hw, ICE_AQC_NVM_ACTIV_REQ_EMPR, NULL);
- if (status) {
- dev_dbg(dev, "ice_nvm_write_activate failed, err %d aq_err %d\n",
- status, pf->hw.adminq.sq_last_status);
- NL_SET_ERR_MSG_MOD(extack, "Port split request failed to save data");
- ice_release_nvm(&pf->hw);
- return -EIO;
- }
+ struct ice_pf *pf = devlink_priv(devlink);
+ int err;
- ice_release_nvm(&pf->hw);
+ err = ice_get_tx_topo_user_sel(pf, &ctx->val.vu8);
+ if (err)
+ return err;
- NL_SET_ERR_MSG_MOD(extack, "Reboot required to finish port split");
return 0;
}
/**
- * ice_devlink_port_split - .port_split devlink handler
- * @devlink: devlink instance structure
- * @port: devlink port structure
- * @count: number of ports to split to
- * @extack: extended netdev ack structure
- *
- * Callback for the devlink .port_split operation.
- *
- * Unfortunately, the devlink expression of available options is limited
- * to just a number, so search for an FW port option which supports
- * the specified number. As there could be multiple FW port options with
- * the same port split count, allow switching between them. When the same
- * port split count request is issued again, switch to the next FW port
- * option with the same port split count.
+ * ice_devlink_tx_sched_layers_set - Set tx_scheduling_layers parameter
+ * @devlink: pointer to the devlink instance
+ * @id: the parameter ID to set
+ * @ctx: context to get the parameter value
+ * @extack: netlink extended ACK structure
*
- * Return: zero on success or an error code on failure.
+ * Return: zero on success and negative value on failure.
*/
-static int
-ice_devlink_port_split(struct devlink *devlink, struct devlink_port *port,
- unsigned int count, struct netlink_ext_ack *extack)
+static int ice_devlink_tx_sched_layers_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
- struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
- u8 i, j, active_idx, pending_idx, new_option;
struct ice_pf *pf = devlink_priv(devlink);
- u8 option_count = ICE_AQC_PORT_OPT_MAX;
- struct device *dev = ice_pf_to_dev(pf);
- bool active_valid, pending_valid;
- int status;
-
- status = ice_aq_get_port_options(&pf->hw, options, &option_count,
- 0, true, &active_idx, &active_valid,
- &pending_idx, &pending_valid);
- if (status) {
- dev_dbg(dev, "Couldn't read port split options, err = %d\n",
- status);
- NL_SET_ERR_MSG_MOD(extack, "Failed to get available port split options");
- return -EIO;
- }
-
- new_option = ICE_AQC_PORT_OPT_MAX;
- active_idx = pending_valid ? pending_idx : active_idx;
- for (i = 1; i <= option_count; i++) {
- /* In order to allow switching between FW port options with
- * the same port split count, search for a new option starting
- * from the active/pending option (with array wrap around).
- */
- j = (active_idx + i) % option_count;
-
- if (count == options[j].pmd) {
- new_option = j;
- break;
- }
- }
-
- if (new_option == active_idx) {
- dev_dbg(dev, "request to split: count: %u is already set and there are no other options\n",
- count);
- NL_SET_ERR_MSG_MOD(extack, "Requested split count is already set");
- ice_devlink_port_options_print(pf);
- return -EINVAL;
- }
-
- if (new_option == ICE_AQC_PORT_OPT_MAX) {
- dev_dbg(dev, "request to split: count: %u not found\n", count);
- NL_SET_ERR_MSG_MOD(extack, "Port split requested unsupported port config");
- ice_devlink_port_options_print(pf);
- return -EINVAL;
- }
+ int err;
- status = ice_devlink_aq_set_port_option(pf, new_option, extack);
- if (status)
- return status;
+ err = ice_update_tx_topo_user_sel(pf, ctx->val.vu8);
+ if (err)
+ return err;
- ice_devlink_port_options_print(pf);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Tx scheduling layers have been changed on this device. You must do the PCI slot powercycle for the change to take effect.");
return 0;
}
/**
- * ice_devlink_port_unsplit - .port_unsplit devlink handler
- * @devlink: devlink instance structure
- * @port: devlink port structure
- * @extack: extended netdev ack structure
+ * ice_devlink_tx_sched_layers_validate - Validate passed tx_scheduling_layers
+ * parameter value
+ * @devlink: unused pointer to devlink instance
+ * @id: the parameter ID to validate
+ * @val: value to validate
+ * @extack: netlink extended ACK structure
*
- * Callback for the devlink .port_unsplit operation.
- * Calls ice_devlink_port_split with split count set to 1.
- * There could be no FW option available with split count 1.
+ * Supported values are:
+ * - 5 - five layers Tx Scheduler Topology Tree
+ * - 9 - nine layers Tx Scheduler Topology Tree
*
- * Return: zero on success or an error code on failure.
+ * Return: zero when passed parameter value is supported. Negative value on
+ * error.
*/
-static int
-ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port,
- struct netlink_ext_ack *extack)
+static int ice_devlink_tx_sched_layers_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
{
- return ice_devlink_port_split(devlink, port, 1, extack);
+ if (val.vu8 != ICE_SCHED_5_LAYERS && val.vu8 != ICE_SCHED_9_LAYERS) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Wrong number of tx scheduler layers provided.");
+ return -EINVAL;
+ }
+
+ return 0;
}
/**
@@ -1290,18 +1193,16 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
static int ice_devlink_reinit_up(struct ice_pf *pf)
{
struct ice_vsi *vsi = ice_get_main_vsi(pf);
- struct ice_vsi_cfg_params params;
int err;
err = ice_init_dev(pf);
if (err)
return err;
- params = ice_vsi_to_params(vsi);
- params.flags = ICE_VSI_FLAG_INIT;
+ vsi->flags = ICE_VSI_FLAG_INIT;
rtnl_lock();
- err = ice_vsi_cfg(vsi, &params);
+ err = ice_vsi_cfg(vsi);
rtnl_unlock();
if (err)
goto err_vsi_cfg;
@@ -1391,9 +1292,9 @@ ice_devlink_enable_roce_get(struct devlink *devlink, u32 id,
return 0;
}
-static int
-ice_devlink_enable_roce_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+static int ice_devlink_enable_roce_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
bool roce_ena = ctx->val.vbool;
@@ -1442,9 +1343,9 @@ ice_devlink_enable_iw_get(struct devlink *devlink, u32 id,
return 0;
}
-static int
-ice_devlink_enable_iw_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+static int ice_devlink_enable_iw_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
bool iw_ena = ctx->val.vbool;
@@ -1482,6 +1383,11 @@ ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id,
return 0;
}
+enum ice_param_id {
+ ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
+};
+
static const struct devlink_param ice_devlink_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
ice_devlink_enable_roce_get,
@@ -1491,7 +1397,13 @@ static const struct devlink_param ice_devlink_params[] = {
ice_devlink_enable_iw_get,
ice_devlink_enable_iw_set,
ice_devlink_enable_iw_validate),
-
+ DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
+ "tx_scheduling_layers",
+ DEVLINK_PARAM_TYPE_U8,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ ice_devlink_tx_sched_layers_get,
+ ice_devlink_tx_sched_layers_set,
+ ice_devlink_tx_sched_layers_validate),
};
static void ice_devlink_free(void *devlink_ptr)
@@ -1534,7 +1446,7 @@ void ice_devlink_register(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
- devlink_register(devlink);
+ devl_register(devlink);
}
/**
@@ -1545,197 +1457,28 @@ void ice_devlink_register(struct ice_pf *pf)
*/
void ice_devlink_unregister(struct ice_pf *pf)
{
- devlink_unregister(priv_to_devlink(pf));
-}
-
-/**
- * ice_devlink_set_switch_id - Set unique switch id based on pci dsn
- * @pf: the PF to create a devlink port for
- * @ppid: struct with switch id information
- */
-static void
-ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid)
-{
- struct pci_dev *pdev = pf->pdev;
- u64 id;
-
- id = pci_get_dsn(pdev);
-
- ppid->id_len = sizeof(id);
- put_unaligned_be64(id, &ppid->id);
+ devl_unregister(priv_to_devlink(pf));
}
int ice_devlink_register_params(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
+ struct ice_hw *hw = &pf->hw;
+ size_t params_size;
- return devlink_params_register(devlink, ice_devlink_params,
- ARRAY_SIZE(ice_devlink_params));
-}
-
-void ice_devlink_unregister_params(struct ice_pf *pf)
-{
- devlink_params_unregister(priv_to_devlink(pf), ice_devlink_params,
- ARRAY_SIZE(ice_devlink_params));
-}
-
-/**
- * ice_devlink_set_port_split_options - Set port split options
- * @pf: the PF to set port split options
- * @attrs: devlink attributes
- *
- * Sets devlink port split options based on available FW port options
- */
-static void
-ice_devlink_set_port_split_options(struct ice_pf *pf,
- struct devlink_port_attrs *attrs)
-{
- struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
- u8 i, active_idx, pending_idx, option_count = ICE_AQC_PORT_OPT_MAX;
- bool active_valid, pending_valid;
- int status;
-
- status = ice_aq_get_port_options(&pf->hw, options, &option_count,
- 0, true, &active_idx, &active_valid,
- &pending_idx, &pending_valid);
- if (status) {
- dev_dbg(ice_pf_to_dev(pf), "Couldn't read port split options, err = %d\n",
- status);
- return;
- }
-
- /* find the biggest available port split count */
- for (i = 0; i < option_count; i++)
- attrs->lanes = max_t(int, attrs->lanes, options[i].pmd);
-
- attrs->splittable = attrs->lanes ? 1 : 0;
- ice_active_port_option = active_idx;
-}
-
-static const struct devlink_port_ops ice_devlink_port_ops = {
- .port_split = ice_devlink_port_split,
- .port_unsplit = ice_devlink_port_unsplit,
-};
-
-/**
- * ice_devlink_create_pf_port - Create a devlink port for this PF
- * @pf: the PF to create a devlink port for
- *
- * Create and register a devlink_port for this PF.
- * This function has to be called under devl_lock.
- *
- * Return: zero on success or an error code on failure.
- */
-int ice_devlink_create_pf_port(struct ice_pf *pf)
-{
- struct devlink_port_attrs attrs = {};
- struct devlink_port *devlink_port;
- struct devlink *devlink;
- struct ice_vsi *vsi;
- struct device *dev;
- int err;
-
- devlink = priv_to_devlink(pf);
-
- dev = ice_pf_to_dev(pf);
-
- devlink_port = &pf->devlink_port;
-
- vsi = ice_get_main_vsi(pf);
- if (!vsi)
- return -EIO;
-
- attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = pf->hw.bus.func;
-
- /* As FW supports only port split options for whole device,
- * set port split options only for first PF.
- */
- if (pf->hw.pf_id == 0)
- ice_devlink_set_port_split_options(pf, &attrs);
-
- ice_devlink_set_switch_id(pf, &attrs.switch_id);
-
- devlink_port_attrs_set(devlink_port, &attrs);
-
- err = devl_port_register_with_ops(devlink, devlink_port, vsi->idx,
- &ice_devlink_port_ops);
- if (err) {
- dev_err(dev, "Failed to create devlink port for PF %d, error %d\n",
- pf->hw.pf_id, err);
- return err;
- }
-
- return 0;
-}
-
-/**
- * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF
- * @pf: the PF to cleanup
- *
- * Unregisters the devlink_port structure associated with this PF.
- * This function has to be called under devl_lock.
- */
-void ice_devlink_destroy_pf_port(struct ice_pf *pf)
-{
- devl_port_unregister(&pf->devlink_port);
-}
-
-/**
- * ice_devlink_create_vf_port - Create a devlink port for this VF
- * @vf: the VF to create a port for
- *
- * Create and register a devlink_port for this VF.
- *
- * Return: zero on success or an error code on failure.
- */
-int ice_devlink_create_vf_port(struct ice_vf *vf)
-{
- struct devlink_port_attrs attrs = {};
- struct devlink_port *devlink_port;
- struct devlink *devlink;
- struct ice_vsi *vsi;
- struct device *dev;
- struct ice_pf *pf;
- int err;
-
- pf = vf->pf;
- dev = ice_pf_to_dev(pf);
- devlink_port = &vf->devlink_port;
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi)
- return -EINVAL;
-
- attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
- attrs.pci_vf.pf = pf->hw.bus.func;
- attrs.pci_vf.vf = vf->vf_id;
-
- ice_devlink_set_switch_id(pf, &attrs.switch_id);
-
- devlink_port_attrs_set(devlink_port, &attrs);
- devlink = priv_to_devlink(pf);
+ params_size = ARRAY_SIZE(ice_devlink_params);
- err = devlink_port_register(devlink, devlink_port, vsi->idx);
- if (err) {
- dev_err(dev, "Failed to create devlink port for VF %d, error %d\n",
- vf->vf_id, err);
- return err;
- }
+ if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
+ params_size--;
- return 0;
+ return devl_params_register(devlink, ice_devlink_params,
+ params_size);
}
-/**
- * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF
- * @vf: the VF to cleanup
- *
- * Unregisters the devlink_port structure associated with this VF.
- */
-void ice_devlink_destroy_vf_port(struct ice_vf *vf)
+void ice_devlink_unregister_params(struct ice_pf *pf)
{
- devl_rate_leaf_destroy(&vf->devlink_port);
- devlink_port_unregister(&vf->devlink_port);
+ devl_params_unregister(priv_to_devlink(pf), ice_devlink_params,
+ ARRAY_SIZE(ice_devlink_params));
}
#define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024)
@@ -1976,8 +1719,8 @@ void ice_devlink_init_regions(struct ice_pf *pf)
u64 nvm_size, sram_size;
nvm_size = pf->hw.flash.flash_size;
- pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1,
- nvm_size);
+ pf->nvm_region = devl_region_create(devlink, &ice_nvm_region_ops, 1,
+ nvm_size);
if (IS_ERR(pf->nvm_region)) {
dev_err(dev, "failed to create NVM devlink region, err %ld\n",
PTR_ERR(pf->nvm_region));
@@ -1985,17 +1728,17 @@ void ice_devlink_init_regions(struct ice_pf *pf)
}
sram_size = pf->hw.flash.sr_words * 2u;
- pf->sram_region = devlink_region_create(devlink, &ice_sram_region_ops,
- 1, sram_size);
+ pf->sram_region = devl_region_create(devlink, &ice_sram_region_ops,
+ 1, sram_size);
if (IS_ERR(pf->sram_region)) {
dev_err(dev, "failed to create shadow-ram devlink region, err %ld\n",
PTR_ERR(pf->sram_region));
pf->sram_region = NULL;
}
- pf->devcaps_region = devlink_region_create(devlink,
- &ice_devcaps_region_ops, 10,
- ICE_AQ_MAX_BUF_LEN);
+ pf->devcaps_region = devl_region_create(devlink,
+ &ice_devcaps_region_ops, 10,
+ ICE_AQ_MAX_BUF_LEN);
if (IS_ERR(pf->devcaps_region)) {
dev_err(dev, "failed to create device-caps devlink region, err %ld\n",
PTR_ERR(pf->devcaps_region));
@@ -2012,11 +1755,11 @@ void ice_devlink_init_regions(struct ice_pf *pf)
void ice_devlink_destroy_regions(struct ice_pf *pf)
{
if (pf->nvm_region)
- devlink_region_destroy(pf->nvm_region);
+ devl_region_destroy(pf->nvm_region);
if (pf->sram_region)
- devlink_region_destroy(pf->sram_region);
+ devl_region_destroy(pf->sram_region);
if (pf->devcaps_region)
- devlink_region_destroy(pf->devcaps_region);
+ devl_region_destroy(pf->devcaps_region);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/devlink/devlink.h
index d291c0e2e17b..d291c0e2e17b 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.h
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.h
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
new file mode 100644
index 000000000000..13e6790d3cae
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024, Intel Corporation. */
+
+#include <linux/vmalloc.h>
+
+#include "ice.h"
+#include "devlink.h"
+
+static int ice_active_port_option = -1;
+
+/**
+ * ice_devlink_port_opt_speed_str - convert speed to a string
+ * @speed: speed value
+ */
+static const char *ice_devlink_port_opt_speed_str(u8 speed)
+{
+ switch (speed & ICE_AQC_PORT_OPT_MAX_LANE_M) {
+ case ICE_AQC_PORT_OPT_MAX_LANE_100M:
+ return "0.1";
+ case ICE_AQC_PORT_OPT_MAX_LANE_1G:
+ return "1";
+ case ICE_AQC_PORT_OPT_MAX_LANE_2500M:
+ return "2.5";
+ case ICE_AQC_PORT_OPT_MAX_LANE_5G:
+ return "5";
+ case ICE_AQC_PORT_OPT_MAX_LANE_10G:
+ return "10";
+ case ICE_AQC_PORT_OPT_MAX_LANE_25G:
+ return "25";
+ case ICE_AQC_PORT_OPT_MAX_LANE_50G:
+ return "50";
+ case ICE_AQC_PORT_OPT_MAX_LANE_100G:
+ return "100";
+ }
+
+ return "-";
+}
+
+#define ICE_PORT_OPT_DESC_LEN 50
+/**
+ * ice_devlink_port_options_print - Print available port split options
+ * @pf: the PF to print split port options
+ *
+ * Prints a table with available port split options and max port speeds
+ */
+static void ice_devlink_port_options_print(struct ice_pf *pf)
+{
+ u8 i, j, options_count, cnt, speed, pending_idx, active_idx;
+ struct ice_aqc_get_port_options_elem *options, *opt;
+ struct device *dev = ice_pf_to_dev(pf);
+ bool active_valid, pending_valid;
+ char desc[ICE_PORT_OPT_DESC_LEN];
+ const char *str;
+ int status;
+
+ options = kcalloc(ICE_AQC_PORT_OPT_MAX * ICE_MAX_PORT_PER_PCI_DEV,
+ sizeof(*options), GFP_KERNEL);
+ if (!options)
+ return;
+
+ for (i = 0; i < ICE_MAX_PORT_PER_PCI_DEV; i++) {
+ opt = options + i * ICE_AQC_PORT_OPT_MAX;
+ options_count = ICE_AQC_PORT_OPT_MAX;
+ active_valid = 0;
+
+ status = ice_aq_get_port_options(&pf->hw, opt, &options_count,
+ i, true, &active_idx,
+ &active_valid, &pending_idx,
+ &pending_valid);
+ if (status) {
+ dev_dbg(dev, "Couldn't read port option for port %d, err %d\n",
+ i, status);
+ goto err;
+ }
+ }
+
+ dev_dbg(dev, "Available port split options and max port speeds (Gbps):\n");
+ dev_dbg(dev, "Status Split Quad 0 Quad 1\n");
+ dev_dbg(dev, " count L0 L1 L2 L3 L4 L5 L6 L7\n");
+
+ for (i = 0; i < options_count; i++) {
+ cnt = 0;
+
+ if (i == ice_active_port_option)
+ str = "Active";
+ else if ((i == pending_idx) && pending_valid)
+ str = "Pending";
+ else
+ str = "";
+
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%-8s", str);
+
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%-6u", options[i].pmd);
+
+ for (j = 0; j < ICE_MAX_PORT_PER_PCI_DEV; ++j) {
+ speed = options[i + j * ICE_AQC_PORT_OPT_MAX].max_lane_speed;
+ str = ice_devlink_port_opt_speed_str(speed);
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%3s ", str);
+ }
+
+ dev_dbg(dev, "%s\n", desc);
+ }
+
+err:
+ kfree(options);
+}
+
+/**
+ * ice_devlink_aq_set_port_option - Send set port option admin queue command
+ * @pf: the PF to print split port options
+ * @option_idx: selected port option
+ * @extack: extended netdev ack structure
+ *
+ * Sends set port option admin queue command with selected port option and
+ * calls NVM write activate.
+ */
+static int
+ice_devlink_aq_set_port_option(struct ice_pf *pf, u8 option_idx,
+ struct netlink_ext_ack *extack)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int status;
+
+ status = ice_aq_set_port_option(&pf->hw, 0, true, option_idx);
+ if (status) {
+ dev_dbg(dev, "ice_aq_set_port_option, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Port split request failed");
+ return -EIO;
+ }
+
+ status = ice_acquire_nvm(&pf->hw, ICE_RES_WRITE);
+ if (status) {
+ dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
+ return -EIO;
+ }
+
+ status = ice_nvm_write_activate(&pf->hw, ICE_AQC_NVM_ACTIV_REQ_EMPR, NULL);
+ if (status) {
+ dev_dbg(dev, "ice_nvm_write_activate failed, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Port split request failed to save data");
+ ice_release_nvm(&pf->hw);
+ return -EIO;
+ }
+
+ ice_release_nvm(&pf->hw);
+
+ NL_SET_ERR_MSG_MOD(extack, "Reboot required to finish port split");
+ return 0;
+}
+
+/**
+ * ice_devlink_port_split - .port_split devlink handler
+ * @devlink: devlink instance structure
+ * @port: devlink port structure
+ * @count: number of ports to split to
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_split operation.
+ *
+ * Unfortunately, the devlink expression of available options is limited
+ * to just a number, so search for an FW port option which supports
+ * the specified number. As there could be multiple FW port options with
+ * the same port split count, allow switching between them. When the same
+ * port split count request is issued again, switch to the next FW port
+ * option with the same port split count.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_split(struct devlink *devlink, struct devlink_port *port,
+ unsigned int count, struct netlink_ext_ack *extack)
+{
+ struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
+ u8 i, j, active_idx, pending_idx, new_option;
+ struct ice_pf *pf = devlink_priv(devlink);
+ u8 option_count = ICE_AQC_PORT_OPT_MAX;
+ struct device *dev = ice_pf_to_dev(pf);
+ bool active_valid, pending_valid;
+ int status;
+
+ status = ice_aq_get_port_options(&pf->hw, options, &option_count,
+ 0, true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (status) {
+ dev_dbg(dev, "Couldn't read port split options, err = %d\n",
+ status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get available port split options");
+ return -EIO;
+ }
+
+ new_option = ICE_AQC_PORT_OPT_MAX;
+ active_idx = pending_valid ? pending_idx : active_idx;
+ for (i = 1; i <= option_count; i++) {
+ /* In order to allow switching between FW port options with
+ * the same port split count, search for a new option starting
+ * from the active/pending option (with array wrap around).
+ */
+ j = (active_idx + i) % option_count;
+
+ if (count == options[j].pmd) {
+ new_option = j;
+ break;
+ }
+ }
+
+ if (new_option == active_idx) {
+ dev_dbg(dev, "request to split: count: %u is already set and there are no other options\n",
+ count);
+ NL_SET_ERR_MSG_MOD(extack, "Requested split count is already set");
+ ice_devlink_port_options_print(pf);
+ return -EINVAL;
+ }
+
+ if (new_option == ICE_AQC_PORT_OPT_MAX) {
+ dev_dbg(dev, "request to split: count: %u not found\n", count);
+ NL_SET_ERR_MSG_MOD(extack, "Port split requested unsupported port config");
+ ice_devlink_port_options_print(pf);
+ return -EINVAL;
+ }
+
+ status = ice_devlink_aq_set_port_option(pf, new_option, extack);
+ if (status)
+ return status;
+
+ ice_devlink_port_options_print(pf);
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_unsplit - .port_unsplit devlink handler
+ * @devlink: devlink instance structure
+ * @port: devlink port structure
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_unsplit operation.
+ * Calls ice_devlink_port_split with split count set to 1.
+ * There could be no FW option available with split count 1.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port,
+ struct netlink_ext_ack *extack)
+{
+ return ice_devlink_port_split(devlink, port, 1, extack);
+}
+
+/**
+ * ice_devlink_set_port_split_options - Set port split options
+ * @pf: the PF to set port split options
+ * @attrs: devlink attributes
+ *
+ * Sets devlink port split options based on available FW port options
+ */
+static void
+ice_devlink_set_port_split_options(struct ice_pf *pf,
+ struct devlink_port_attrs *attrs)
+{
+ struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
+ u8 i, active_idx, pending_idx, option_count = ICE_AQC_PORT_OPT_MAX;
+ bool active_valid, pending_valid;
+ int status;
+
+ status = ice_aq_get_port_options(&pf->hw, options, &option_count,
+ 0, true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (status) {
+ dev_dbg(ice_pf_to_dev(pf), "Couldn't read port split options, err = %d\n",
+ status);
+ return;
+ }
+
+ /* find the biggest available port split count */
+ for (i = 0; i < option_count; i++)
+ attrs->lanes = max_t(int, attrs->lanes, options[i].pmd);
+
+ attrs->splittable = attrs->lanes ? 1 : 0;
+ ice_active_port_option = active_idx;
+}
+
+static const struct devlink_port_ops ice_devlink_port_ops = {
+ .port_split = ice_devlink_port_split,
+ .port_unsplit = ice_devlink_port_unsplit,
+};
+
+/**
+ * ice_devlink_set_switch_id - Set unique switch id based on pci dsn
+ * @pf: the PF to create a devlink port for
+ * @ppid: struct with switch id information
+ */
+static void
+ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid)
+{
+ struct pci_dev *pdev = pf->pdev;
+ u64 id;
+
+ id = pci_get_dsn(pdev);
+
+ ppid->id_len = sizeof(id);
+ put_unaligned_be64(id, &ppid->id);
+}
+
+/**
+ * ice_devlink_create_pf_port - Create a devlink port for this PF
+ * @pf: the PF to create a devlink port for
+ *
+ * Create and register a devlink_port for this PF.
+ * This function has to be called under devl_lock.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_create_pf_port(struct ice_pf *pf)
+{
+ struct devlink_port_attrs attrs = {};
+ struct devlink_port *devlink_port;
+ struct devlink *devlink;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ int err;
+
+ devlink = priv_to_devlink(pf);
+
+ dev = ice_pf_to_dev(pf);
+
+ devlink_port = &pf->devlink_port;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return -EIO;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = pf->hw.bus.func;
+
+ /* As FW supports only port split options for whole device,
+ * set port split options only for first PF.
+ */
+ if (pf->hw.pf_id == 0)
+ ice_devlink_set_port_split_options(pf, &attrs);
+
+ ice_devlink_set_switch_id(pf, &attrs.switch_id);
+
+ devlink_port_attrs_set(devlink_port, &attrs);
+
+ err = devl_port_register_with_ops(devlink, devlink_port, vsi->idx,
+ &ice_devlink_port_ops);
+ if (err) {
+ dev_err(dev, "Failed to create devlink port for PF %d, error %d\n",
+ pf->hw.pf_id, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF
+ * @pf: the PF to cleanup
+ *
+ * Unregisters the devlink_port structure associated with this PF.
+ * This function has to be called under devl_lock.
+ */
+void ice_devlink_destroy_pf_port(struct ice_pf *pf)
+{
+ devl_port_unregister(&pf->devlink_port);
+}
+
+/**
+ * ice_devlink_create_vf_port - Create a devlink port for this VF
+ * @vf: the VF to create a port for
+ *
+ * Create and register a devlink_port for this VF.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_create_vf_port(struct ice_vf *vf)
+{
+ struct devlink_port_attrs attrs = {};
+ struct devlink_port *devlink_port;
+ struct devlink *devlink;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ struct ice_pf *pf;
+ int err;
+
+ pf = vf->pf;
+ dev = ice_pf_to_dev(pf);
+ devlink_port = &vf->devlink_port;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ return -EINVAL;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
+ attrs.pci_vf.pf = pf->hw.bus.func;
+ attrs.pci_vf.vf = vf->vf_id;
+
+ ice_devlink_set_switch_id(pf, &attrs.switch_id);
+
+ devlink_port_attrs_set(devlink_port, &attrs);
+ devlink = priv_to_devlink(pf);
+
+ err = devlink_port_register(devlink, devlink_port, vsi->idx);
+ if (err) {
+ dev_err(dev, "Failed to create devlink port for VF %d, error %d\n",
+ vf->vf_id, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF
+ * @vf: the VF to cleanup
+ *
+ * Unregisters the devlink_port structure associated with this VF.
+ */
+void ice_devlink_destroy_vf_port(struct ice_vf *vf)
+{
+ devl_rate_leaf_destroy(&vf->devlink_port);
+ devlink_port_unregister(&vf->devlink_port);
+}
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h b/drivers/net/ethernet/intel/ice/devlink/devlink_port.h
new file mode 100644
index 000000000000..9223bcdb6444
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024, Intel Corporation. */
+
+#ifndef _DEVLINK_PORT_H_
+#define _DEVLINK_PORT_H_
+
+int ice_devlink_create_pf_port(struct ice_pf *pf);
+void ice_devlink_destroy_pf_port(struct ice_pf *pf);
+int ice_devlink_create_vf_port(struct ice_vf *vf);
+void ice_devlink_destroy_vf_port(struct ice_vf *vf);
+
+#endif /* _DEVLINK_PORT_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 365c03d1c462..6ad8002b22e1 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -77,6 +77,7 @@
#include "ice_gnss.h"
#include "ice_irq.h"
#include "ice_dpll.h"
+#include "ice_adapter.h"
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
@@ -330,7 +331,6 @@ struct ice_vsi {
struct net_device *netdev;
struct ice_sw *vsw; /* switch this VSI is on */
struct ice_pf *back; /* back pointer to PF */
- struct ice_port_info *port_info; /* back pointer to port_info */
struct ice_rx_ring **rx_rings; /* Rx ring array */
struct ice_tx_ring **tx_rings; /* Tx ring array */
struct ice_q_vector **q_vectors; /* q_vector array */
@@ -348,12 +348,9 @@ struct ice_vsi {
/* tell if only dynamic irq allocation is allowed */
bool irq_dyn_alloc;
- enum ice_vsi_type type;
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
- struct ice_vf *vf; /* VF associated with this VSI */
-
u16 num_gfltr;
u16 num_bfltr;
@@ -445,12 +442,18 @@ struct ice_vsi {
u8 old_numtc;
u16 old_ena_tc;
- struct ice_channel *ch;
-
/* setup back reference, to which aggregator node this VSI
* corresponds to
*/
struct ice_agg_node *agg_node;
+
+ struct_group_tagged(ice_vsi_cfg_params, params,
+ struct ice_port_info *port_info; /* back pointer to port_info */
+ struct ice_channel *ch; /* VSI's channel structure, may be NULL */
+ struct ice_vf *vf; /* VF associated with this VSI, may be NULL */
+ u32 flags; /* VSI flags used for rebuild and configuration */
+ enum ice_vsi_type type; /* the type of the VSI */
+ );
} ____cacheline_internodealigned_in_smp;
/* struct that defines an interrupt vector */
@@ -458,7 +461,7 @@ struct ice_q_vector {
struct ice_vsi *vsi;
u16 v_idx; /* index in the vsi->q_vector array. */
- u16 reg_idx;
+ u16 reg_idx; /* PF relative register index */
u8 num_ring_rx; /* total number of Rx rings in vector */
u8 num_ring_tx; /* total number of Tx rings in vector */
u8 wb_on_itr:1; /* if true, WB on ITR is enabled */
@@ -480,6 +483,7 @@ struct ice_q_vector {
char name[ICE_INT_NAME_STR_LEN];
u16 total_events; /* net_dim(): number of interrupts processed */
+ u16 vf_reg_idx; /* VF relative register index */
struct msi_map irq;
} ____cacheline_internodealigned_in_smp;
@@ -522,17 +526,10 @@ enum ice_misc_thread_tasks {
};
struct ice_eswitch {
- struct ice_vsi *control_vsi;
struct ice_vsi *uplink_vsi;
struct ice_esw_br_offloads *br_offloads;
struct xarray reprs;
bool is_running;
- /* struct to allow cp queues management optimization */
- struct {
- int to_reach;
- int value;
- bool is_reaching;
- } qs;
};
struct ice_agg_node {
@@ -544,6 +541,7 @@ struct ice_agg_node {
struct ice_pf {
struct pci_dev *pdev;
+ struct ice_adapter *adapter;
struct devlink_region *nvm_region;
struct devlink_region *sram_region;
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c
new file mode 100644
index 000000000000..52d15ef7f4b1
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: Copyright Red Hat
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/xarray.h>
+#include "ice_adapter.h"
+
+static DEFINE_XARRAY(ice_adapters);
+
+/* PCI bus number is 8 bits. Slot is 5 bits. Domain can have the rest. */
+#define INDEX_FIELD_DOMAIN GENMASK(BITS_PER_LONG - 1, 13)
+#define INDEX_FIELD_BUS GENMASK(12, 5)
+#define INDEX_FIELD_SLOT GENMASK(4, 0)
+
+static unsigned long ice_adapter_index(const struct pci_dev *pdev)
+{
+ unsigned int domain = pci_domain_nr(pdev->bus);
+
+ WARN_ON(domain > FIELD_MAX(INDEX_FIELD_DOMAIN));
+
+ return FIELD_PREP(INDEX_FIELD_DOMAIN, domain) |
+ FIELD_PREP(INDEX_FIELD_BUS, pdev->bus->number) |
+ FIELD_PREP(INDEX_FIELD_SLOT, PCI_SLOT(pdev->devfn));
+}
+
+static struct ice_adapter *ice_adapter_new(void)
+{
+ struct ice_adapter *adapter;
+
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter)
+ return NULL;
+
+ spin_lock_init(&adapter->ptp_gltsyn_time_lock);
+ refcount_set(&adapter->refcount, 1);
+
+ return adapter;
+}
+
+static void ice_adapter_free(struct ice_adapter *adapter)
+{
+ kfree(adapter);
+}
+
+DEFINE_FREE(ice_adapter_free, struct ice_adapter*, if (_T) ice_adapter_free(_T))
+
+/**
+ * ice_adapter_get - Get a shared ice_adapter structure.
+ * @pdev: Pointer to the pci_dev whose driver is getting the ice_adapter.
+ *
+ * Gets a pointer to a shared ice_adapter structure. Physical functions (PFs)
+ * of the same multi-function PCI device share one ice_adapter structure.
+ * The ice_adapter is reference-counted. The PF driver must use ice_adapter_put
+ * to release its reference.
+ *
+ * Context: Process, may sleep.
+ * Return: Pointer to ice_adapter on success.
+ * ERR_PTR() on error. -ENOMEM is the only possible error.
+ */
+struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev)
+{
+ struct ice_adapter *ret, __free(ice_adapter_free) *adapter = NULL;
+ unsigned long index = ice_adapter_index(pdev);
+
+ adapter = ice_adapter_new();
+ if (!adapter)
+ return ERR_PTR(-ENOMEM);
+
+ xa_lock(&ice_adapters);
+ ret = __xa_cmpxchg(&ice_adapters, index, NULL, adapter, GFP_KERNEL);
+ if (xa_is_err(ret)) {
+ ret = ERR_PTR(xa_err(ret));
+ goto unlock;
+ }
+ if (ret) {
+ refcount_inc(&ret->refcount);
+ goto unlock;
+ }
+ ret = no_free_ptr(adapter);
+unlock:
+ xa_unlock(&ice_adapters);
+ return ret;
+}
+
+/**
+ * ice_adapter_put - Release a reference to the shared ice_adapter structure.
+ * @pdev: Pointer to the pci_dev whose driver is releasing the ice_adapter.
+ *
+ * Releases the reference to ice_adapter previously obtained with
+ * ice_adapter_get.
+ *
+ * Context: Any.
+ */
+void ice_adapter_put(const struct pci_dev *pdev)
+{
+ unsigned long index = ice_adapter_index(pdev);
+ struct ice_adapter *adapter;
+
+ xa_lock(&ice_adapters);
+ adapter = xa_load(&ice_adapters, index);
+ if (WARN_ON(!adapter))
+ goto unlock;
+
+ if (!refcount_dec_and_test(&adapter->refcount))
+ goto unlock;
+
+ WARN_ON(__xa_erase(&ice_adapters, index) != adapter);
+ ice_adapter_free(adapter);
+unlock:
+ xa_unlock(&ice_adapters);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h
new file mode 100644
index 000000000000..9d11014ec02f
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-FileCopyrightText: Copyright Red Hat */
+
+#ifndef _ICE_ADAPTER_H_
+#define _ICE_ADAPTER_H_
+
+#include <linux/spinlock_types.h>
+#include <linux/refcount_types.h>
+
+struct pci_dev;
+
+/**
+ * struct ice_adapter - PCI adapter resources shared across PFs
+ * @ptp_gltsyn_time_lock: Spinlock protecting access to the GLTSYN_TIME
+ * register of the PTP clock.
+ * @refcount: Reference count. struct ice_pf objects hold the references.
+ */
+struct ice_adapter {
+ /* For access to the GLTSYN_TIME register */
+ spinlock_t ptp_gltsyn_time_lock;
+
+ refcount_t refcount;
+};
+
+struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev);
+void ice_adapter_put(const struct pci_dev *pdev);
+
+#endif /* _ICE_ADAPTER_H */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 8040317c9561..e76c388b9905 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -121,6 +121,7 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076
#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
#define ICE_AQC_CAPS_NVM_MGMT 0x0080
+#define ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE 0x0085
#define ICE_AQC_CAPS_FW_LAG_SUPPORT 0x0092
#define ICE_AQC_BIT_ROCEV2_LAG 0x01
#define ICE_AQC_BIT_SRIOV_LAG 0x02
@@ -264,6 +265,8 @@ struct ice_aqc_set_port_params {
#define ICE_AQC_RES_TYPE_FLAG_SHARED BIT(7)
#define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM BIT(12)
#define ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX BIT(13)
+#define ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED BIT(14)
+#define ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_CTL BIT(15)
#define ICE_AQC_RES_TYPE_FLAG_DEDICATED 0x00
@@ -593,8 +596,9 @@ struct ice_aqc_recipe_data_elem {
struct ice_aqc_recipe_to_profile {
__le16 profile_id;
u8 rsvd[6];
- DECLARE_BITMAP(recipe_assoc, ICE_MAX_NUM_RECIPES);
+ __le64 recipe_assoc;
};
+static_assert(sizeof(struct ice_aqc_recipe_to_profile) == 16);
/* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3)
*/
@@ -807,6 +811,23 @@ struct ice_aqc_get_topo {
__le32 addr_low;
};
+/* Get/Set Tx Topology (indirect 0x0418/0x0417) */
+struct ice_aqc_get_set_tx_topo {
+ u8 set_flags;
+#define ICE_AQC_TX_TOPO_FLAGS_CORRER BIT(0)
+#define ICE_AQC_TX_TOPO_FLAGS_SRC_RAM BIT(1)
+#define ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW BIT(4)
+#define ICE_AQC_TX_TOPO_FLAGS_ISSUED BIT(5)
+
+ u8 get_flags;
+#define ICE_AQC_TX_TOPO_GET_RAM 2
+
+ __le16 reserved1;
+ __le32 reserved2;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
/* Update TSE (indirect 0x0403)
* Get TSE (indirect 0x0404)
* Add TSE (indirect 0x0401)
@@ -1663,6 +1684,15 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_START_POINT 0
+#define ICE_AQC_NVM_TX_TOPO_MOD_ID 0x14B
+
+struct ice_aqc_nvm_tx_topo_user_sel {
+ __le16 length;
+ u8 data;
+#define ICE_AQC_NVM_TX_TOPO_USER_SEL BIT(4)
+ u8 reserved;
+};
+
/* NVM Checksum Command (direct, 0x0706) */
struct ice_aqc_nvm_checksum {
u8 flags;
@@ -2535,6 +2565,7 @@ struct ice_aq_desc {
struct ice_aqc_get_link_topo get_link_topo;
struct ice_aqc_i2c read_write_i2c;
struct ice_aqc_read_i2c_resp read_i2c_resp;
+ struct ice_aqc_get_set_tx_topo get_set_tx_topo;
} params;
};
@@ -2641,6 +2672,10 @@ enum ice_adminq_opc {
ice_aqc_opc_query_sched_res = 0x0412,
ice_aqc_opc_remove_rl_profiles = 0x0415,
+ /* tx topology commands */
+ ice_aqc_opc_set_tx_topo = 0x0417,
+ ice_aqc_opc_get_tx_topo = 0x0418,
+
/* PHY commands */
ice_aqc_opc_get_phy_caps = 0x0600,
ice_aqc_opc_set_phy_cfg = 0x0601,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index a545a7917e4f..687f6cb2b917 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -121,7 +121,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
q_vector->irq.index = -ENOENT;
if (vsi->type == ICE_VSI_VF) {
- q_vector->reg_idx = ice_calc_vf_reg_idx(vsi->vf, q_vector);
+ ice_calc_vf_reg_idx(vsi->vf, q_vector);
goto out;
} else if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
struct ice_vsi *ctrl_vsi = ice_get_vf_ctrl_vsi(pf, vsi);
@@ -145,6 +145,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
skip_alloc:
q_vector->reg_idx = q_vector->irq.index;
+ q_vector->vf_reg_idx = q_vector->irq.index;
/* only set affinity_mask if the CPU is online */
if (cpu_online(v_idx))
@@ -264,30 +265,6 @@ static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8
}
/**
- * ice_eswitch_calc_txq_handle
- * @ring: pointer to ring which unique index is needed
- *
- * To correctly work with many netdevs ring->q_index of Tx rings on switchdev
- * VSI can repeat. Hardware ring setup requires unique q_index. Calculate it
- * here by finding index in vsi->tx_rings of this ring.
- *
- * Return ICE_INVAL_Q_INDEX when index wasn't found. Should never happen,
- * because VSI is get from ring->vsi, so it has to be present in this VSI.
- */
-static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring)
-{
- const struct ice_vsi *vsi = ring->vsi;
- int i;
-
- ice_for_each_txq(vsi, i) {
- if (vsi->tx_rings[i] == ring)
- return i;
- }
-
- return ICE_INVAL_Q_INDEX;
-}
-
-/**
* ice_cfg_xps_tx_ring - Configure XPS for a Tx ring
* @ring: The Tx ring to configure
*
@@ -353,9 +330,6 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
- case ICE_VSI_SWITCHDEV_CTRL:
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
- break;
default:
return;
}
@@ -479,6 +453,14 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
/* Rx queue threshold in units of 64 */
rlan_ctx.lrxqthresh = 1;
+ /* PF acts as uplink for switchdev; set flex descriptor with src_vsi
+ * metadata and flags to allow redirecting to PR netdev
+ */
+ if (ice_is_eswitch_mode_switchdev(vsi->back)) {
+ ring->flags |= ICE_RX_FLAGS_MULTIDEV;
+ rxdid = ICE_RXDID_FLEX_NIC_2;
+ }
+
/* Enable Flexible Descriptors in the queue context which
* allows this driver to select a specific receive descriptor format
* increasing context priority to pick up profile ID; default is 0x01;
@@ -919,14 +901,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
/* Add unique software queue handle of the Tx queue per
* TC into the VSI Tx ring
*/
- if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
- ring->q_handle = ice_eswitch_calc_txq_handle(ring);
-
- if (ring->q_handle == ICE_INVAL_Q_INDEX)
- return -ENODEV;
- } else {
- ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
- }
+ ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
if (ch)
status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0,
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index db4b2844e1f7..5649b257e631 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -160,10 +160,16 @@ static int ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E825C_SGMII:
hw->mac_type = ICE_MAC_GENERIC_3K_E825;
break;
- case ICE_DEV_ID_E830_BACKPLANE:
- case ICE_DEV_ID_E830_QSFP56:
- case ICE_DEV_ID_E830_SFP:
- case ICE_DEV_ID_E830_SFP_DD:
+ case ICE_DEV_ID_E830CC_BACKPLANE:
+ case ICE_DEV_ID_E830CC_QSFP56:
+ case ICE_DEV_ID_E830CC_SFP:
+ case ICE_DEV_ID_E830CC_SFP_DD:
+ case ICE_DEV_ID_E830C_BACKPLANE:
+ case ICE_DEV_ID_E830_XXV_BACKPLANE:
+ case ICE_DEV_ID_E830C_QSFP:
+ case ICE_DEV_ID_E830_XXV_QSFP:
+ case ICE_DEV_ID_E830C_SFP:
+ case ICE_DEV_ID_E830_XXV_SFP:
hw->mac_type = ICE_MAC_E830;
break;
default:
@@ -1002,8 +1008,8 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
*/
int ice_init_hw(struct ice_hw *hw)
{
- struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
- void *mac_buf __free(kfree);
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
+ void *mac_buf __free(kfree) = NULL;
u16 mac_buf_len;
int status;
@@ -1142,6 +1148,8 @@ int ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_fltr_mgmt_struct;
mutex_init(&hw->tnl_lock);
+ ice_init_chk_recipe_reuse_support(hw);
+
return 0;
err_unroll_fltr_mgmt_struct:
@@ -1615,6 +1623,8 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
case ice_aqc_opc_set_port_params:
case ice_aqc_opc_get_vlan_mode_parameters:
case ice_aqc_opc_set_vlan_mode_parameters:
+ case ice_aqc_opc_set_tx_topo:
+ case ice_aqc_opc_get_tx_topo:
case ice_aqc_opc_add_recipe:
case ice_aqc_opc_recipe_to_profile:
case ice_aqc_opc_get_recipe:
@@ -2171,6 +2181,9 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n",
prefix, caps->sriov_lag);
break;
+ case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE:
+ caps->tx_sched_topo_comp_mode_en = (number == 1);
+ break;
default:
/* Not one of the recognized common capabilities */
found = false;
@@ -3272,7 +3285,7 @@ int ice_update_link_info(struct ice_port_info *pi)
return status;
if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
- struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
@@ -3420,7 +3433,7 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
int
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
{
- struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
struct ice_aqc_set_phy_cfg_data cfg = { 0 };
struct ice_hw *hw;
int status;
@@ -3561,7 +3574,7 @@ int
ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fec_mode fec)
{
- struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
struct ice_hw *hw;
int status;
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 6e20ee610022..a94e7072b570 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -3,7 +3,7 @@
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
-#include "ice_devlink.h"
+#include "devlink/devlink.h"
/**
* ice_dcb_get_ena_tc - return bitmap of enabled TCs
@@ -291,7 +291,6 @@ static void ice_dcb_ena_dis_vsi(struct ice_pf *pf, bool ena, bool locked)
switch (vsi->type) {
case ICE_VSI_CHNL:
- case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_PF:
if (ena)
ice_ena_vsi(vsi, locked);
@@ -776,8 +775,7 @@ void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked)
/* no need to proceed with remaining cfg if it is CHNL
* or switchdev VSI
*/
- if (vsi->type == ICE_VSI_CHNL ||
- vsi->type == ICE_VSI_SWITCHDEV_CTRL)
+ if (vsi->type == ICE_VSI_CHNL)
continue;
ice_vsi_map_rings_to_vectors(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index fc91c4d41186..ce5034ed2b24 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -4,6 +4,7 @@
#include "ice_common.h"
#include "ice.h"
#include "ice_ddp.h"
+#include "ice_sched.h"
/* For supporting double VLAN mode, it is necessary to enable or disable certain
* boost tcam entries. The metadata labels names that match the following
@@ -721,6 +722,12 @@ static bool ice_is_gtp_c_profile(u16 prof_idx)
}
}
+static bool ice_is_pfcp_profile(u16 prof_idx)
+{
+ return prof_idx >= ICE_PROFID_IPV4_PFCP_NODE &&
+ prof_idx <= ICE_PROFID_IPV6_PFCP_SESSION;
+}
+
/**
* ice_get_sw_prof_type - determine switch profile type
* @hw: pointer to the HW structure
@@ -738,6 +745,9 @@ static enum ice_prof_type ice_get_sw_prof_type(struct ice_hw *hw,
if (ice_is_gtp_u_profile(prof_idx))
return ICE_PROF_TUN_GTPU;
+ if (ice_is_pfcp_profile(prof_idx))
+ return ICE_PROF_TUN_PFCP;
+
for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
/* UDP tunnel will have UDP_OF protocol ID and VNI offset */
if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
@@ -1424,14 +1434,14 @@ ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
goto exit;
}
- conf_idx = le32_to_cpu(seg->signed_seg_idx);
- start = le32_to_cpu(seg->signed_buf_start);
count = le32_to_cpu(seg->signed_buf_count);
-
state = ice_download_pkg_sig_seg(hw, seg);
- if (state)
+ if (state || !count)
goto exit;
+ conf_idx = le32_to_cpu(seg->signed_seg_idx);
+ start = le32_to_cpu(seg->signed_buf_start);
+
state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start,
count);
@@ -2263,3 +2273,211 @@ enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf,
return state;
}
+
+/**
+ * ice_get_set_tx_topo - get or set Tx topology
+ * @hw: pointer to the HW struct
+ * @buf: pointer to Tx topology buffer
+ * @buf_size: buffer size
+ * @cd: pointer to command details structure or NULL
+ * @flags: pointer to descriptor flags
+ * @set: 0-get, 1-set topology
+ *
+ * The function will get or set Tx topology
+ *
+ * Return: zero when set was successful, negative values otherwise.
+ */
+static int
+ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
+ struct ice_sq_cd *cd, u8 *flags, bool set)
+{
+ struct ice_aqc_get_set_tx_topo *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ cmd = &desc.params.get_set_tx_topo;
+ if (set) {
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_tx_topo);
+ cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED;
+ /* requested to update a new topology, not a default topology */
+ if (buf)
+ cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
+ ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
+
+ if (ice_is_e825c(hw))
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ } else {
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
+ cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
+ }
+
+ if (!ice_is_e825c(hw))
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (status)
+ return status;
+ /* read the return flag values (first byte) for get operation */
+ if (!set && flags)
+ *flags = desc.params.get_set_tx_topo.set_flags;
+
+ return 0;
+}
+
+/**
+ * ice_cfg_tx_topo - Initialize new Tx topology if available
+ * @hw: pointer to the HW struct
+ * @buf: pointer to Tx topology buffer
+ * @len: buffer size
+ *
+ * The function will apply the new Tx topology from the package buffer
+ * if available.
+ *
+ * Return: zero when update was successful, negative values otherwise.
+ */
+int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
+{
+ u8 *current_topo, *new_topo = NULL;
+ struct ice_run_time_cfg_seg *seg;
+ struct ice_buf_hdr *section;
+ struct ice_pkg_hdr *pkg_hdr;
+ enum ice_ddp_state state;
+ u16 offset, size = 0;
+ u32 reg = 0;
+ int status;
+ u8 flags;
+
+ if (!buf || !len)
+ return -EINVAL;
+
+ /* Does FW support new Tx topology mode ? */
+ if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) {
+ ice_debug(hw, ICE_DBG_INIT, "FW doesn't support compatibility mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ current_topo = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!current_topo)
+ return -ENOMEM;
+
+ /* Get the current Tx topology */
+ status = ice_get_set_tx_topo(hw, current_topo, ICE_AQ_MAX_BUF_LEN, NULL,
+ &flags, false);
+
+ kfree(current_topo);
+
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n");
+ return status;
+ }
+
+ /* Is default topology already applied ? */
+ if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
+ hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) {
+ ice_debug(hw, ICE_DBG_INIT, "Default topology already applied\n");
+ return -EEXIST;
+ }
+
+ /* Is new topology already applied ? */
+ if ((flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
+ hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) {
+ ice_debug(hw, ICE_DBG_INIT, "New topology already applied\n");
+ return -EEXIST;
+ }
+
+ /* Setting topology already issued? */
+ if (flags & ICE_AQC_TX_TOPO_FLAGS_ISSUED) {
+ ice_debug(hw, ICE_DBG_INIT, "Update Tx topology was done by another PF\n");
+ /* Add a small delay before exiting */
+ msleep(2000);
+ return -EEXIST;
+ }
+
+ /* Change the topology from new to default (5 to 9) */
+ if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
+ hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) {
+ ice_debug(hw, ICE_DBG_INIT, "Change topology from 5 to 9 layers\n");
+ goto update_topo;
+ }
+
+ pkg_hdr = (struct ice_pkg_hdr *)buf;
+ state = ice_verify_pkg(pkg_hdr, len);
+ if (state) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to verify pkg (err: %d)\n",
+ state);
+ return -EIO;
+ }
+
+ /* Find runtime configuration segment */
+ seg = (struct ice_run_time_cfg_seg *)
+ ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr);
+ if (!seg) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n");
+ return -EIO;
+ }
+
+ if (le32_to_cpu(seg->buf_table.buf_count) < ICE_MIN_S_COUNT) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment count(%d) is wrong\n",
+ seg->buf_table.buf_count);
+ return -EIO;
+ }
+
+ section = ice_pkg_val_buf(seg->buf_table.buf_array);
+ if (!section || le32_to_cpu(section->section_entry[0].type) !=
+ ICE_SID_TX_5_LAYER_TOPO) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology section type is wrong\n");
+ return -EIO;
+ }
+
+ size = le16_to_cpu(section->section_entry[0].size);
+ offset = le16_to_cpu(section->section_entry[0].offset);
+ if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology section size is wrong\n");
+ return -EIO;
+ }
+
+ /* Make sure the section fits in the buffer */
+ if (offset + size > ICE_PKG_BUF_SIZE) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology buffer > 4K\n");
+ return -EIO;
+ }
+
+ /* Get the new topology buffer */
+ new_topo = ((u8 *)section) + offset;
+
+update_topo:
+ /* Acquire global lock to make sure that set topology issued
+ * by one PF.
+ */
+ status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, ICE_RES_WRITE,
+ ICE_GLOBAL_CFG_LOCK_TIMEOUT);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n");
+ return status;
+ }
+
+ /* Check if reset was triggered already. */
+ reg = rd32(hw, GLGEN_RSTAT);
+ if (reg & GLGEN_RSTAT_DEVSTATE_M) {
+ /* Reset is in progress, re-init the HW again */
+ ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. Layer topology might be applied already\n");
+ ice_check_reset(hw);
+ return 0;
+ }
+
+ /* Set new topology */
+ status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed setting Tx topology\n");
+ return status;
+ }
+
+ /* New topology is updated, delay 1 second before issuing the CORER */
+ msleep(1000);
+ ice_reset(hw, ICE_RESET_CORER);
+ /* CORER will clear the global lock, so no explicit call
+ * required for release.
+ */
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h
index ff66c2ffb1a2..622543f08b43 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.h
@@ -454,4 +454,6 @@ u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld);
void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
u32 sect_type);
+int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len);
+
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c
index d252d98218d0..9fc0fd95a13d 100644
--- a/drivers/net/ethernet/intel/ice/ice_debugfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c
@@ -171,7 +171,7 @@ ice_debugfs_module_write(struct file *filp, const char __user *buf,
if (*ppos != 0 || count > 8)
return -EINVAL;
- cmd_buf = memdup_user(buf, count);
+ cmd_buf = memdup_user_nul(buf, count);
if (IS_ERR(cmd_buf))
return PTR_ERR(cmd_buf);
@@ -257,7 +257,7 @@ ice_debugfs_nr_messages_write(struct file *filp, const char __user *buf,
if (*ppos != 0 || count > 4)
return -EINVAL;
- cmd_buf = memdup_user(buf, count);
+ cmd_buf = memdup_user_nul(buf, count);
if (IS_ERR(cmd_buf))
return PTR_ERR(cmd_buf);
@@ -332,7 +332,7 @@ ice_debugfs_enable_write(struct file *filp, const char __user *buf,
if (*ppos != 0 || count > 2)
return -EINVAL;
- cmd_buf = memdup_user(buf, count);
+ cmd_buf = memdup_user_nul(buf, count);
if (IS_ERR(cmd_buf))
return PTR_ERR(cmd_buf);
@@ -428,7 +428,7 @@ ice_debugfs_log_size_write(struct file *filp, const char __user *buf,
if (*ppos != 0 || count > 5)
return -EINVAL;
- cmd_buf = memdup_user(buf, count);
+ cmd_buf = memdup_user_nul(buf, count);
if (IS_ERR(cmd_buf))
return PTR_ERR(cmd_buf);
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index 9dfae9bce758..34fd604132f5 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -16,14 +16,26 @@
#define ICE_DEV_ID_E823L_1GBE 0x124F
/* Intel(R) Ethernet Connection E823-L for QSFP */
#define ICE_DEV_ID_E823L_QSFP 0x151D
+/* Intel(R) Ethernet Controller E830-CC for backplane */
+#define ICE_DEV_ID_E830CC_BACKPLANE 0x12D1
+/* Intel(R) Ethernet Controller E830-CC for QSFP */
+#define ICE_DEV_ID_E830CC_QSFP56 0x12D2
+/* Intel(R) Ethernet Controller E830-CC for SFP */
+#define ICE_DEV_ID_E830CC_SFP 0x12D3
+/* Intel(R) Ethernet Controller E830-CC for SFP-DD */
+#define ICE_DEV_ID_E830CC_SFP_DD 0x12D4
/* Intel(R) Ethernet Controller E830-C for backplane */
-#define ICE_DEV_ID_E830_BACKPLANE 0x12D1
+#define ICE_DEV_ID_E830C_BACKPLANE 0x12D5
/* Intel(R) Ethernet Controller E830-C for QSFP */
-#define ICE_DEV_ID_E830_QSFP56 0x12D2
+#define ICE_DEV_ID_E830C_QSFP 0x12D8
/* Intel(R) Ethernet Controller E830-C for SFP */
-#define ICE_DEV_ID_E830_SFP 0x12D3
-/* Intel(R) Ethernet Controller E830-C for SFP-DD */
-#define ICE_DEV_ID_E830_SFP_DD 0x12D4
+#define ICE_DEV_ID_E830C_SFP 0x12DA
+/* Intel(R) Ethernet Controller E830-XXV for backplane */
+#define ICE_DEV_ID_E830_XXV_BACKPLANE 0x12DC
+/* Intel(R) Ethernet Controller E830-XXV for QSFP */
+#define ICE_DEV_ID_E830_XXV_QSFP 0x12DD
+/* Intel(R) Ethernet Controller E830-XXV for SFP */
+#define ICE_DEV_ID_E830_XXV_SFP 0x12DE
/* Intel(R) Ethernet Controller E810-C for backplane */
#define ICE_DEV_ID_E810C_BACKPLANE 0x1591
/* Intel(R) Ethernet Controller E810-C for QSFP */
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index 9069725c71b4..b102db8b829a 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -7,89 +7,10 @@
#include "ice_eswitch_br.h"
#include "ice_fltr.h"
#include "ice_repr.h"
-#include "ice_devlink.h"
+#include "devlink/devlink.h"
#include "ice_tc_lib.h"
/**
- * ice_eswitch_del_sp_rules - delete adv rules added on PRs
- * @pf: pointer to the PF struct
- *
- * Delete all advanced rules that were used to forward packets with the
- * device's VSI index to the corresponding eswitch ctrl VSI queue.
- */
-static void ice_eswitch_del_sp_rules(struct ice_pf *pf)
-{
- struct ice_repr *repr;
- unsigned long id;
-
- xa_for_each(&pf->eswitch.reprs, id, repr) {
- if (repr->sp_rule.rid)
- ice_rem_adv_rule_by_id(&pf->hw, &repr->sp_rule);
- }
-}
-
-/**
- * ice_eswitch_add_sp_rule - add adv rule with device's VSI index
- * @pf: pointer to PF struct
- * @repr: pointer to the repr struct
- *
- * This function adds advanced rule that forwards packets with
- * device's VSI index to the corresponding eswitch ctrl VSI queue.
- */
-static int ice_eswitch_add_sp_rule(struct ice_pf *pf, struct ice_repr *repr)
-{
- struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
- struct ice_adv_rule_info rule_info = { 0 };
- struct ice_adv_lkup_elem *list;
- struct ice_hw *hw = &pf->hw;
- const u16 lkups_cnt = 1;
- int err;
-
- list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
- if (!list)
- return -ENOMEM;
-
- ice_rule_add_src_vsi_metadata(list);
-
- rule_info.sw_act.flag = ICE_FLTR_TX;
- rule_info.sw_act.vsi_handle = ctrl_vsi->idx;
- rule_info.sw_act.fltr_act = ICE_FWD_TO_Q;
- rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id +
- ctrl_vsi->rxq_map[repr->q_id];
- rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
- rule_info.flags_info.act_valid = true;
- rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN;
- rule_info.src_vsi = repr->src_vsi->idx;
-
- err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
- &repr->sp_rule);
- if (err)
- dev_err(ice_pf_to_dev(pf), "Unable to add slow-path rule for eswitch for PR %d",
- repr->id);
-
- kfree(list);
- return err;
-}
-
-static int
-ice_eswitch_add_sp_rules(struct ice_pf *pf)
-{
- struct ice_repr *repr;
- unsigned long id;
- int err;
-
- xa_for_each(&pf->eswitch.reprs, id, repr) {
- err = ice_eswitch_add_sp_rule(pf, repr);
- if (err) {
- ice_eswitch_del_sp_rules(pf);
- return err;
- }
- }
-
- return 0;
-}
-
-/**
* ice_eswitch_setup_env - configure eswitch HW filters
* @pf: pointer to PF struct
*
@@ -99,10 +20,13 @@ ice_eswitch_add_sp_rules(struct ice_pf *pf)
static int ice_eswitch_setup_env(struct ice_pf *pf)
{
struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
- struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
struct net_device *netdev = uplink_vsi->netdev;
+ bool if_running = netif_running(netdev);
struct ice_vsi_vlan_ops *vlan_ops;
- bool rule_added = false;
+
+ if (if_running && !test_and_set_bit(ICE_VSI_DOWN, uplink_vsi->state))
+ if (ice_down(uplink_vsi))
+ return -ENODEV;
ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
@@ -112,98 +36,53 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
netif_addr_unlock_bh(netdev);
if (ice_vsi_add_vlan_zero(uplink_vsi))
+ goto err_vlan_zero;
+
+ if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
+ ICE_FLTR_RX))
goto err_def_rx;
- if (!ice_is_dflt_vsi_in_use(uplink_vsi->port_info)) {
- if (ice_set_dflt_vsi(uplink_vsi))
- goto err_def_rx;
- rule_added = true;
- }
+ if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
+ ICE_FLTR_TX))
+ goto err_def_tx;
vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
if (vlan_ops->dis_rx_filtering(uplink_vsi))
- goto err_dis_rx;
+ goto err_vlan_filtering;
if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
goto err_override_uplink;
- if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
- goto err_override_control;
-
if (ice_vsi_update_local_lb(uplink_vsi, true))
goto err_override_local_lb;
+ if (if_running && ice_up(uplink_vsi))
+ goto err_up;
+
return 0;
+err_up:
+ ice_vsi_update_local_lb(uplink_vsi, false);
err_override_local_lb:
- ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
-err_override_control:
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
err_override_uplink:
vlan_ops->ena_rx_filtering(uplink_vsi);
-err_dis_rx:
- if (rule_added)
- ice_clear_dflt_vsi(uplink_vsi);
+err_vlan_filtering:
+ ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
+ ICE_FLTR_TX);
+err_def_tx:
+ ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
+ ICE_FLTR_RX);
err_def_rx:
+ ice_vsi_del_vlan_zero(uplink_vsi);
+err_vlan_zero:
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
ICE_FWD_TO_VSI);
- return -ENODEV;
-}
-
-/**
- * ice_eswitch_remap_rings_to_vectors - reconfigure rings of eswitch ctrl VSI
- * @eswitch: pointer to eswitch struct
- *
- * In eswitch number of allocated Tx/Rx rings is equal.
- *
- * This function fills q_vectors structures associated with representor and
- * move each ring pairs to port representor netdevs. Each port representor
- * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to
- * number of VFs.
- */
-static void ice_eswitch_remap_rings_to_vectors(struct ice_eswitch *eswitch)
-{
- struct ice_vsi *vsi = eswitch->control_vsi;
- unsigned long repr_id = 0;
- int q_id;
-
- ice_for_each_txq(vsi, q_id) {
- struct ice_q_vector *q_vector;
- struct ice_tx_ring *tx_ring;
- struct ice_rx_ring *rx_ring;
- struct ice_repr *repr;
-
- repr = xa_find(&eswitch->reprs, &repr_id, U32_MAX,
- XA_PRESENT);
- if (!repr)
- break;
-
- repr_id += 1;
- repr->q_id = q_id;
- q_vector = repr->q_vector;
- tx_ring = vsi->tx_rings[q_id];
- rx_ring = vsi->rx_rings[q_id];
-
- q_vector->vsi = vsi;
- q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
-
- q_vector->num_ring_tx = 1;
- q_vector->tx.tx_ring = tx_ring;
- tx_ring->q_vector = q_vector;
- tx_ring->next = NULL;
- tx_ring->netdev = repr->netdev;
- /* In switchdev mode, from OS stack perspective, there is only
- * one queue for given netdev, so it needs to be indexed as 0.
- */
- tx_ring->q_index = 0;
+ if (if_running)
+ ice_up(uplink_vsi);
- q_vector->num_ring_rx = 1;
- q_vector->rx.rx_ring = rx_ring;
- rx_ring->q_vector = q_vector;
- rx_ring->next = NULL;
- rx_ring->netdev = repr->netdev;
- }
+ return -ENODEV;
}
/**
@@ -225,8 +104,6 @@ ice_eswitch_release_repr(struct ice_pf *pf, struct ice_repr *repr)
repr->dst = NULL;
ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac,
ICE_FWD_TO_VSI);
-
- netif_napi_del(&repr->q_vector->napi);
}
/**
@@ -236,7 +113,7 @@ ice_eswitch_release_repr(struct ice_pf *pf, struct ice_repr *repr)
*/
static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr)
{
- struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
+ struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
struct ice_vsi *vsi = repr->src_vsi;
struct metadata_dst *dst;
@@ -252,15 +129,11 @@ static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr)
if (ice_vsi_add_vlan_zero(vsi))
goto err_update_security;
- netif_napi_add(repr->netdev, &repr->q_vector->napi,
- ice_napi_poll);
-
- netif_keep_dst(repr->netdev);
+ netif_keep_dst(uplink_vsi->netdev);
dst = repr->dst;
dst->u.port_info.port_id = vsi->vsi_num;
- dst->u.port_info.lower_dev = repr->netdev;
- ice_repr_set_traffic_vsi(repr, ctrl_vsi);
+ dst->u.port_info.lower_dev = uplink_vsi->netdev;
return 0;
@@ -318,27 +191,19 @@ void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi)
netdev_tx_t
ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
- struct ice_netdev_priv *np;
- struct ice_repr *repr;
- struct ice_vsi *vsi;
-
- np = netdev_priv(netdev);
- vsi = np->vsi;
-
- if (!vsi || !ice_is_switchdev_running(vsi->back))
- return NETDEV_TX_BUSY;
-
- if (ice_is_reset_in_progress(vsi->back->state) ||
- test_bit(ICE_VF_DIS, vsi->back->state))
- return NETDEV_TX_BUSY;
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+ unsigned int len = skb->len;
+ int ret;
- repr = ice_netdev_to_repr(netdev);
skb_dst_drop(skb);
dst_hold((struct dst_entry *)repr->dst);
skb_dst_set(skb, (struct dst_entry *)repr->dst);
- skb->queue_mapping = repr->q_id;
+ skb->dev = repr->dst->u.port_info.lower_dev;
+
+ ret = dev_queue_xmit(skb);
+ ice_repr_inc_tx_stats(repr, len, ret);
- return ice_start_xmit(skb, netdev);
+ return ret;
}
/**
@@ -374,71 +239,29 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
static void ice_eswitch_release_env(struct ice_pf *pf)
{
struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
- struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
struct ice_vsi_vlan_ops *vlan_ops;
vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
ice_vsi_update_local_lb(uplink_vsi, false);
- ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
vlan_ops->ena_rx_filtering(uplink_vsi);
- ice_clear_dflt_vsi(uplink_vsi);
+ ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
+ ICE_FLTR_TX);
+ ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
+ ICE_FLTR_RX);
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
ICE_FWD_TO_VSI);
}
/**
- * ice_eswitch_vsi_setup - configure eswitch control VSI
- * @pf: pointer to PF structure
- * @pi: pointer to port_info structure
- */
-static struct ice_vsi *
-ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
-{
- struct ice_vsi_cfg_params params = {};
-
- params.type = ICE_VSI_SWITCHDEV_CTRL;
- params.pi = pi;
- params.flags = ICE_VSI_FLAG_INIT;
-
- return ice_vsi_setup(pf, &params);
-}
-
-/**
- * ice_eswitch_napi_enable - enable NAPI for all port representors
- * @reprs: xarray of reprs
- */
-static void ice_eswitch_napi_enable(struct xarray *reprs)
-{
- struct ice_repr *repr;
- unsigned long id;
-
- xa_for_each(reprs, id, repr)
- napi_enable(&repr->q_vector->napi);
-}
-
-/**
- * ice_eswitch_napi_disable - disable NAPI for all port representors
- * @reprs: xarray of reprs
- */
-static void ice_eswitch_napi_disable(struct xarray *reprs)
-{
- struct ice_repr *repr;
- unsigned long id;
-
- xa_for_each(reprs, id, repr)
- napi_disable(&repr->q_vector->napi);
-}
-
-/**
* ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
* @pf: pointer to PF structure
*/
static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
{
- struct ice_vsi *ctrl_vsi, *uplink_vsi;
+ struct ice_vsi *uplink_vsi;
uplink_vsi = ice_get_main_vsi(pf);
if (!uplink_vsi)
@@ -450,17 +273,10 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
return -EINVAL;
}
- pf->eswitch.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
- if (!pf->eswitch.control_vsi)
- return -ENODEV;
-
- ctrl_vsi = pf->eswitch.control_vsi;
- /* cp VSI is createad with 1 queue as default */
- pf->eswitch.qs.value = 1;
pf->eswitch.uplink_vsi = uplink_vsi;
if (ice_eswitch_setup_env(pf))
- goto err_vsi;
+ return -ENODEV;
if (ice_eswitch_br_offloads_init(pf))
goto err_br_offloads;
@@ -471,8 +287,6 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
err_br_offloads:
ice_eswitch_release_env(pf);
-err_vsi:
- ice_vsi_release(ctrl_vsi);
return -ENODEV;
}
@@ -482,14 +296,10 @@ err_vsi:
*/
static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
{
- struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
-
ice_eswitch_br_offloads_deinit(pf);
ice_eswitch_release_env(pf);
- ice_vsi_release(ctrl_vsi);
pf->eswitch.is_running = false;
- pf->eswitch.qs.is_reaching = false;
}
/**
@@ -530,7 +340,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
pf->hw.pf_id);
- xa_init_flags(&pf->eswitch.reprs, XA_FLAGS_ALLOC);
+ xa_init(&pf->eswitch.reprs);
NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
break;
}
@@ -602,56 +412,18 @@ void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
static void ice_eswitch_stop_reprs(struct ice_pf *pf)
{
- ice_eswitch_del_sp_rules(pf);
ice_eswitch_stop_all_tx_queues(pf);
- ice_eswitch_napi_disable(&pf->eswitch.reprs);
}
static void ice_eswitch_start_reprs(struct ice_pf *pf)
{
- ice_eswitch_napi_enable(&pf->eswitch.reprs);
ice_eswitch_start_all_tx_queues(pf);
- ice_eswitch_add_sp_rules(pf);
-}
-
-static void
-ice_eswitch_cp_change_queues(struct ice_eswitch *eswitch, int change)
-{
- struct ice_vsi *cp = eswitch->control_vsi;
- int queues = 0;
-
- if (eswitch->qs.is_reaching) {
- if (eswitch->qs.to_reach >= eswitch->qs.value + change) {
- queues = eswitch->qs.to_reach;
- eswitch->qs.is_reaching = false;
- } else {
- queues = 0;
- }
- } else if ((change > 0 && cp->alloc_txq <= eswitch->qs.value) ||
- change < 0) {
- queues = cp->alloc_txq + change;
- }
-
- if (queues) {
- cp->req_txq = queues;
- cp->req_rxq = queues;
- ice_vsi_close(cp);
- ice_vsi_rebuild(cp, ICE_VSI_FLAG_NO_INIT);
- ice_vsi_open(cp);
- } else if (!change) {
- /* change == 0 means that VSI wasn't open, open it here */
- ice_vsi_open(cp);
- }
-
- eswitch->qs.value += change;
- ice_eswitch_remap_rings_to_vectors(eswitch);
}
int
ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
{
struct ice_repr *repr;
- int change = 1;
int err;
if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
@@ -661,9 +433,6 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
err = ice_eswitch_enable_switchdev(pf);
if (err)
return err;
- /* Control plane VSI is created with 1 queue as default */
- pf->eswitch.qs.to_reach -= 1;
- change = 0;
}
ice_eswitch_stop_reprs(pf);
@@ -678,14 +447,12 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
if (err)
goto err_setup_repr;
- err = xa_alloc(&pf->eswitch.reprs, &repr->id, repr,
- XA_LIMIT(1, INT_MAX), GFP_KERNEL);
+ err = xa_insert(&pf->eswitch.reprs, repr->id, repr, GFP_KERNEL);
if (err)
goto err_xa_alloc;
vf->repr_id = repr->id;
- ice_eswitch_cp_change_queues(&pf->eswitch, change);
ice_eswitch_start_reprs(pf);
return 0;
@@ -715,8 +482,6 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
if (xa_empty(&pf->eswitch.reprs))
ice_eswitch_disable_switchdev(pf);
- else
- ice_eswitch_cp_change_queues(&pf->eswitch, -1);
ice_eswitch_release_repr(pf, repr);
ice_repr_rem_vf(repr);
@@ -738,37 +503,37 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
* ice_eswitch_rebuild - rebuild eswitch
* @pf: pointer to PF structure
*/
-int ice_eswitch_rebuild(struct ice_pf *pf)
+void ice_eswitch_rebuild(struct ice_pf *pf)
{
struct ice_repr *repr;
unsigned long id;
- int err;
if (!ice_is_switchdev_running(pf))
- return 0;
-
- err = ice_vsi_rebuild(pf->eswitch.control_vsi, ICE_VSI_FLAG_INIT);
- if (err)
- return err;
+ return;
xa_for_each(&pf->eswitch.reprs, id, repr)
ice_eswitch_detach(pf, repr->vf);
-
- return 0;
}
/**
- * ice_eswitch_reserve_cp_queues - reserve control plane VSI queues
- * @pf: pointer to PF structure
- * @change: how many more (or less) queues is needed
+ * ice_eswitch_get_target - get netdev based on src_vsi from descriptor
+ * @rx_ring: ring used to receive the packet
+ * @rx_desc: descriptor used to get src_vsi value
*
- * Remember to call ice_eswitch_attach/detach() the "change" times.
+ * Get src_vsi value from descriptor and load correct representor. If it isn't
+ * found return rx_ring->netdev.
*/
-void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change)
+struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc)
{
- if (pf->eswitch.qs.value + change < 0)
- return;
+ struct ice_eswitch *eswitch = &rx_ring->vsi->back->eswitch;
+ struct ice_32b_rx_flex_desc_nic_2 *desc;
+ struct ice_repr *repr;
+
+ desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc;
+ repr = xa_load(&eswitch->reprs, le16_to_cpu(desc->src_vsi));
+ if (!repr)
+ return rx_ring->netdev;
- pf->eswitch.qs.to_reach = pf->eswitch.qs.value + change;
- pf->eswitch.qs.is_reaching = true;
+ return repr->netdev;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index 1a288a03a79a..e2e5c0c75e7d 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -10,7 +10,7 @@
void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf);
int
ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf);
-int ice_eswitch_rebuild(struct ice_pf *pf);
+void ice_eswitch_rebuild(struct ice_pf *pf);
int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int
@@ -26,7 +26,8 @@ void ice_eswitch_set_target_vsi(struct sk_buff *skb,
struct ice_tx_offload_params *off);
netdev_tx_t
ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
-void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change);
+struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc);
#else /* CONFIG_ICE_SWITCHDEV */
static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { }
@@ -78,7 +79,11 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
-static inline void
-ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change) { }
+static inline struct net_device *
+ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc)
+{
+ return rx_ring->netdev;
+}
#endif /* CONFIG_ICE_SWITCHDEV */
#endif /* _ICE_ESWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 255a9c8151b4..78b833b3e1d7 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -941,11 +941,11 @@ static u64 ice_loopback_test(struct net_device *netdev)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *orig_vsi = np->vsi, *test_vsi;
struct ice_pf *pf = orig_vsi->back;
+ u8 *tx_frame __free(kfree) = NULL;
u8 broadcast[ETH_ALEN], ret = 0;
int num_frames, valid_frames;
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
- u8 *tx_frame __free(kfree);
int i;
netdev_info(netdev, "loopback test\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index 9a1a04f5f146..e3cab8e98f52 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -41,6 +41,8 @@ static struct in6_addr zero_ipv6_addr_mask = {
static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow)
{
switch (flow) {
+ case ICE_FLTR_PTYPE_NONF_ETH:
+ return ETHER_FLOW;
case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
return TCP_V4_FLOW;
case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
@@ -72,6 +74,8 @@ static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow)
static enum ice_fltr_ptype ice_ethtool_flow_to_fltr(int eth)
{
switch (eth) {
+ case ETHER_FLOW:
+ return ICE_FLTR_PTYPE_NONF_ETH;
case TCP_V4_FLOW:
return ICE_FLTR_PTYPE_NONF_IPV4_TCP;
case UDP_V4_FLOW:
@@ -137,6 +141,10 @@ int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd)
memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
switch (fsp->flow_type) {
+ case ETHER_FLOW:
+ fsp->h_u.ether_spec = rule->eth;
+ fsp->m_u.ether_spec = rule->eth_mask;
+ break;
case IPV4_USER_FLOW:
fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
fsp->h_u.usr_ip4_spec.proto = 0;
@@ -1194,6 +1202,122 @@ ice_set_fdir_ip6_usr_seg(struct ice_flow_seg_info *seg,
}
/**
+ * ice_fdir_vlan_valid - validate VLAN data for Flow Director rule
+ * @dev: network interface device structure
+ * @fsp: pointer to ethtool Rx flow specification
+ *
+ * Return: true if vlan data is valid, false otherwise
+ */
+static bool ice_fdir_vlan_valid(struct device *dev,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ if (fsp->m_ext.vlan_etype && !eth_type_vlan(fsp->h_ext.vlan_etype))
+ return false;
+
+ if (fsp->m_ext.vlan_tci && ntohs(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
+ return false;
+
+ /* proto and vlan must have vlan-etype defined */
+ if (fsp->m_u.ether_spec.h_proto && fsp->m_ext.vlan_tci &&
+ !fsp->m_ext.vlan_etype) {
+ dev_warn(dev, "Filter with proto and vlan require also vlan-etype");
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_set_ether_flow_seg - set address and protocol segments for ether flow
+ * @dev: network interface device structure
+ * @seg: flow segment for programming
+ * @eth_spec: mask data from ethtool
+ *
+ * Return: 0 on success and errno in case of error.
+ */
+static int ice_set_ether_flow_seg(struct device *dev,
+ struct ice_flow_seg_info *seg,
+ struct ethhdr *eth_spec)
+{
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH);
+
+ /* empty rules are not valid */
+ if (is_zero_ether_addr(eth_spec->h_source) &&
+ is_zero_ether_addr(eth_spec->h_dest) &&
+ !eth_spec->h_proto)
+ return -EINVAL;
+
+ /* Ethertype */
+ if (eth_spec->h_proto == htons(0xFFFF)) {
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_TYPE,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ } else if (eth_spec->h_proto) {
+ dev_warn(dev, "Only 0x0000 or 0xffff proto mask is allowed for flow-type ether");
+ return -EOPNOTSUPP;
+ }
+
+ /* Source MAC address */
+ if (is_broadcast_ether_addr(eth_spec->h_source))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_SA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!is_zero_ether_addr(eth_spec->h_source))
+ goto err_mask;
+
+ /* Destination MAC address */
+ if (is_broadcast_ether_addr(eth_spec->h_dest))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_DA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!is_zero_ether_addr(eth_spec->h_dest))
+ goto err_mask;
+
+ return 0;
+
+err_mask:
+ dev_warn(dev, "Only 00:00:00:00:00:00 or ff:ff:ff:ff:ff:ff MAC address mask is allowed for flow-type ether");
+ return -EOPNOTSUPP;
+}
+
+/**
+ * ice_set_fdir_vlan_seg - set vlan segments for ether flow
+ * @seg: flow segment for programming
+ * @ext_masks: masks for additional RX flow fields
+ *
+ * Return: 0 on success and errno in case of error.
+ */
+static int
+ice_set_fdir_vlan_seg(struct ice_flow_seg_info *seg,
+ struct ethtool_flow_ext *ext_masks)
+{
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_VLAN);
+
+ if (ext_masks->vlan_etype) {
+ if (ext_masks->vlan_etype != htons(0xFFFF))
+ return -EOPNOTSUPP;
+
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_S_VLAN,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ }
+
+ if (ext_masks->vlan_tci) {
+ if (ext_masks->vlan_tci != htons(0xFFFF))
+ return -EOPNOTSUPP;
+
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_C_VLAN,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ }
+
+ return 0;
+}
+
+/**
* ice_cfg_fdir_xtrct_seq - Configure extraction sequence for the given filter
* @pf: PF structure
* @fsp: pointer to ethtool Rx flow specification
@@ -1209,7 +1333,7 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
struct device *dev = ice_pf_to_dev(pf);
enum ice_fltr_ptype fltr_idx;
struct ice_hw *hw = &pf->hw;
- bool perfect_filter;
+ bool perfect_filter = false;
int ret;
seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
@@ -1262,6 +1386,16 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
ret = ice_set_fdir_ip6_usr_seg(seg, &fsp->m_u.usr_ip6_spec,
&perfect_filter);
break;
+ case ETHER_FLOW:
+ ret = ice_set_ether_flow_seg(dev, seg, &fsp->m_u.ether_spec);
+ if (!ret && (fsp->m_ext.vlan_etype || fsp->m_ext.vlan_tci)) {
+ if (!ice_fdir_vlan_valid(dev, fsp)) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = ice_set_fdir_vlan_seg(seg, &fsp->m_ext);
+ }
+ break;
default:
ret = -EINVAL;
}
@@ -1823,6 +1957,10 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
input->mask.v6.tc = fsp->m_u.usr_ip6_spec.tclass;
input->mask.v6.proto = fsp->m_u.usr_ip6_spec.l4_proto;
break;
+ case ETHER_FLOW:
+ input->eth = fsp->h_u.ether_spec;
+ input->eth_mask = fsp->m_u.ether_spec;
+ break;
default:
/* not doing un-parsed flow types */
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index 5840c3e04a5b..26b357c0ae15 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -4,6 +4,8 @@
#include "ice_common.h"
/* These are training packet headers used to program flow director filters. */
+static const u8 ice_fdir_eth_pkt[22];
+
static const u8 ice_fdir_tcpv4_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
@@ -417,6 +419,11 @@ static const u8 ice_fdir_ip6_tun_pkt[] = {
/* Flow Director no-op training packet table */
static const struct ice_fdir_base_pkt ice_fdir_pkt[] = {
{
+ ICE_FLTR_PTYPE_NONF_ETH,
+ sizeof(ice_fdir_eth_pkt), ice_fdir_eth_pkt,
+ sizeof(ice_fdir_eth_pkt), ice_fdir_eth_pkt,
+ },
+ {
ICE_FLTR_PTYPE_NONF_IPV4_TCP,
sizeof(ice_fdir_tcpv4_pkt), ice_fdir_tcpv4_pkt,
sizeof(ice_fdir_tcp4_tun_pkt), ice_fdir_tcp4_tun_pkt,
@@ -914,6 +921,21 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
* perspective. The input from user is from Rx filter perspective.
*/
switch (flow) {
+ case ICE_FLTR_PTYPE_NONF_ETH:
+ ice_pkt_insert_mac_addr(loc, input->eth.h_dest);
+ ice_pkt_insert_mac_addr(loc + ETH_ALEN, input->eth.h_source);
+ if (input->ext_data.vlan_tag || input->ext_data.vlan_type) {
+ ice_pkt_insert_u16(loc, ICE_ETH_TYPE_F_OFFSET,
+ input->ext_data.vlan_type);
+ ice_pkt_insert_u16(loc, ICE_ETH_VLAN_TCI_OFFSET,
+ input->ext_data.vlan_tag);
+ ice_pkt_insert_u16(loc, ICE_ETH_TYPE_VLAN_OFFSET,
+ input->eth.h_proto);
+ } else {
+ ice_pkt_insert_u16(loc, ICE_ETH_TYPE_F_OFFSET,
+ input->eth.h_proto);
+ }
+ break;
case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
input->ip.v4.src_ip);
@@ -1189,52 +1211,58 @@ static int ice_cmp_ipv6_addr(__be32 *a, __be32 *b)
* ice_fdir_comp_rules - compare 2 filters
* @a: a Flow Director filter data structure
* @b: a Flow Director filter data structure
- * @v6: bool true if v6 filter
*
* Returns true if the filters match
*/
static bool
-ice_fdir_comp_rules(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b, bool v6)
+ice_fdir_comp_rules(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b)
{
enum ice_fltr_ptype flow_type = a->flow_type;
/* The calling function already checks that the two filters have the
* same flow_type.
*/
- if (!v6) {
- if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP) {
- if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
- a->ip.v4.src_ip == b->ip.v4.src_ip &&
- a->ip.v4.dst_port == b->ip.v4.dst_port &&
- a->ip.v4.src_port == b->ip.v4.src_port)
- return true;
- } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) {
- if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
- a->ip.v4.src_ip == b->ip.v4.src_ip &&
- a->ip.v4.l4_header == b->ip.v4.l4_header &&
- a->ip.v4.proto == b->ip.v4.proto &&
- a->ip.v4.ip_ver == b->ip.v4.ip_ver &&
- a->ip.v4.tos == b->ip.v4.tos)
- return true;
- }
- } else {
- if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV6_SCTP) {
- if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
- a->ip.v6.src_port == b->ip.v6.src_port &&
- !ice_cmp_ipv6_addr(a->ip.v6.dst_ip,
- b->ip.v6.dst_ip) &&
- !ice_cmp_ipv6_addr(a->ip.v6.src_ip,
- b->ip.v6.src_ip))
- return true;
- } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) {
- if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
- a->ip.v6.src_port == b->ip.v6.src_port)
- return true;
- }
+ switch (flow_type) {
+ case ICE_FLTR_PTYPE_NONF_ETH:
+ if (!memcmp(&a->eth, &b->eth, sizeof(a->eth)))
+ return true;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+ if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
+ a->ip.v4.src_ip == b->ip.v4.src_ip &&
+ a->ip.v4.dst_port == b->ip.v4.dst_port &&
+ a->ip.v4.src_port == b->ip.v4.src_port)
+ return true;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
+ a->ip.v4.src_ip == b->ip.v4.src_ip &&
+ a->ip.v4.l4_header == b->ip.v4.l4_header &&
+ a->ip.v4.proto == b->ip.v4.proto &&
+ a->ip.v4.ip_ver == b->ip.v4.ip_ver &&
+ a->ip.v4.tos == b->ip.v4.tos)
+ return true;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
+ if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
+ a->ip.v6.src_port == b->ip.v6.src_port &&
+ !ice_cmp_ipv6_addr(a->ip.v6.dst_ip,
+ b->ip.v6.dst_ip) &&
+ !ice_cmp_ipv6_addr(a->ip.v6.src_ip,
+ b->ip.v6.src_ip))
+ return true;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
+ if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
+ a->ip.v6.src_port == b->ip.v6.src_port)
+ return true;
+ break;
+ default:
+ break;
}
return false;
@@ -1253,19 +1281,10 @@ bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input)
bool ret = false;
list_for_each_entry(rule, &hw->fdir_list_head, fltr_node) {
- enum ice_fltr_ptype flow_type;
-
if (rule->flow_type != input->flow_type)
continue;
- flow_type = input->flow_type;
- if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
- ret = ice_fdir_comp_rules(rule, input, false);
- else
- ret = ice_fdir_comp_rules(rule, input, true);
+ ret = ice_fdir_comp_rules(rule, input);
if (ret) {
if (rule->fltr_id == input->fltr_id &&
rule->q_index != input->q_index)
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h
index 1b9b84490689..021ecbac7848 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.h
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.h
@@ -8,6 +8,9 @@
#define ICE_FDIR_MAX_RAW_PKT_SIZE (512 + ICE_FDIR_TUN_PKT_OFF)
/* macros for offsets into packets for flow director programming */
+#define ICE_ETH_TYPE_F_OFFSET 12
+#define ICE_ETH_VLAN_TCI_OFFSET 14
+#define ICE_ETH_TYPE_VLAN_OFFSET 16
#define ICE_IPV4_SRC_ADDR_OFFSET 26
#define ICE_IPV4_DST_ADDR_OFFSET 30
#define ICE_IPV4_TCP_SRC_PORT_OFFSET 34
@@ -159,6 +162,8 @@ struct ice_fdir_fltr {
struct list_head fltr_node;
enum ice_fltr_ptype flow_type;
+ struct ethhdr eth, eth_mask;
+
union {
struct ice_fdir_v4 v4;
struct ice_fdir_v6 v6;
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index d427a79d001a..817beca591e0 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -93,6 +93,7 @@ enum ice_tunnel_type {
TNL_GRETAP,
TNL_GTPC,
TNL_GTPU,
+ TNL_PFCP,
__TNL_TYPE_CNT,
TNL_LAST = 0xFF,
TNL_ALL = 0xFF,
@@ -358,7 +359,8 @@ enum ice_prof_type {
ICE_PROF_TUN_GRE = 0x4,
ICE_PROF_TUN_GTPU = 0x8,
ICE_PROF_TUN_GTPC = 0x10,
- ICE_PROF_TUN_ALL = 0x1E,
+ ICE_PROF_TUN_PFCP = 0x20,
+ ICE_PROF_TUN_ALL = 0x3E,
ICE_PROF_ALL = 0xFF,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index 319a2d6fe26c..f81db6c107c8 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -286,10 +286,9 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
*
* Returns: zero on success, or a negative error code on failure.
*/
-static int
-ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
- u16 block_size, u8 *block, bool last_cmd,
- u8 *reset_level, struct netlink_ext_ack *extack)
+int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
+ u16 block_size, u8 *block, bool last_cmd,
+ u8 *reset_level, struct netlink_ext_ack *extack)
{
u16 completion_module, completion_retval;
struct device *dev = ice_pf_to_dev(pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.h b/drivers/net/ethernet/intel/ice/ice_fw_update.h
index 750574885716..04b200462757 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.h
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.h
@@ -9,5 +9,8 @@ int ice_devlink_flash_update(struct devlink *devlink,
struct netlink_ext_ack *extack);
int ice_get_pending_updates(struct ice_pf *pf, u8 *pending,
struct netlink_ext_ack *extack);
+int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
+ u16 block_size, u8 *block, bool last_cmd,
+ u8 *reset_level, struct netlink_ext_ack *extack);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index f97128b69f87..1ccb572ce285 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -202,11 +202,12 @@ static struct ice_lag *ice_lag_find_primary(struct ice_lag *lag)
* @act: rule action
* @recipe_id: recipe id for the new rule
* @rule_idx: pointer to rule index
+ * @direction: ICE_FLTR_RX or ICE_FLTR_TX
* @add: boolean on whether we are adding filters
*/
static int
ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx,
- bool add)
+ u8 direction, bool add)
{
struct ice_sw_rule_lkup_rx_tx *s_rule;
u16 s_rule_sz, vsi_num;
@@ -231,9 +232,16 @@ ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx,
act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, vsi_num);
- s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
s_rule->recipe_id = cpu_to_le16(recipe_id);
- s_rule->src = cpu_to_le16(hw->port_info->lport);
+ if (direction == ICE_FLTR_RX) {
+ s_rule->hdr.type =
+ cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+ s_rule->src = cpu_to_le16(hw->port_info->lport);
+ } else {
+ s_rule->hdr.type =
+ cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
+ s_rule->src = cpu_to_le16(vsi_num);
+ }
s_rule->act = cpu_to_le32(act);
s_rule->hdr_len = cpu_to_le16(DUMMY_ETH_HDR_LEN);
opc = ice_aqc_opc_add_sw_rules;
@@ -266,9 +274,27 @@ ice_lag_cfg_dflt_fltr(struct ice_lag *lag, bool add)
{
u32 act = ICE_SINGLE_ACT_VSI_FORWARDING |
ICE_SINGLE_ACT_VALID_BIT | ICE_SINGLE_ACT_LAN_ENABLE;
+ int err;
+
+ err = ice_lag_cfg_fltr(lag, act, lag->pf_recipe, &lag->pf_rx_rule_id,
+ ICE_FLTR_RX, add);
+ if (err)
+ goto err_rx;
- return ice_lag_cfg_fltr(lag, act, lag->pf_recipe,
- &lag->pf_rule_id, add);
+ act = ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT |
+ ICE_SINGLE_ACT_LB_ENABLE;
+ err = ice_lag_cfg_fltr(lag, act, lag->pf_recipe, &lag->pf_tx_rule_id,
+ ICE_FLTR_TX, add);
+ if (err)
+ goto err_tx;
+
+ return 0;
+
+err_tx:
+ ice_lag_cfg_fltr(lag, act, lag->pf_recipe, &lag->pf_rx_rule_id,
+ ICE_FLTR_RX, !add);
+err_rx:
+ return err;
}
/**
@@ -284,7 +310,7 @@ ice_lag_cfg_drop_fltr(struct ice_lag *lag, bool add)
ICE_SINGLE_ACT_DROP;
return ice_lag_cfg_fltr(lag, act, lag->lport_recipe,
- &lag->lport_rule_idx, add);
+ &lag->lport_rule_idx, ICE_FLTR_RX, add);
}
/**
@@ -310,7 +336,7 @@ ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
dev = ice_pf_to_dev(lag->pf);
/* interface not active - remove old default VSI rule */
- if (bonding_info->slave.state && lag->pf_rule_id) {
+ if (bonding_info->slave.state && lag->pf_rx_rule_id) {
if (ice_lag_cfg_dflt_fltr(lag, false))
dev_err(dev, "Error removing old default VSI filter\n");
if (ice_lag_cfg_drop_fltr(lag, true))
@@ -319,7 +345,7 @@ ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
}
/* interface becoming active - add new default VSI rule */
- if (!bonding_info->slave.state && !lag->pf_rule_id) {
+ if (!bonding_info->slave.state && !lag->pf_rx_rule_id) {
if (ice_lag_cfg_dflt_fltr(lag, true))
dev_err(dev, "Error adding new default VSI filter\n");
if (lag->lport_rule_idx && ice_lag_cfg_drop_fltr(lag, false))
@@ -714,8 +740,7 @@ static void ice_lag_move_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport)
pf = lag->pf;
ice_for_each_vsi(pf, i)
- if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF ||
- pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL))
+ if (pf->vsi[i] && pf->vsi[i]->type == ICE_VSI_VF)
ice_lag_move_single_vf_nodes(lag, oldport, newport, i);
}
@@ -953,8 +978,7 @@ ice_lag_reclaim_vf_nodes(struct ice_lag *lag, struct ice_hw *src_hw)
pf = lag->pf;
ice_for_each_vsi(pf, i)
- if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF ||
- pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL))
+ if (pf->vsi[i] && pf->vsi[i]->type == ICE_VSI_VF)
ice_for_each_traffic_class(tc)
ice_lag_reclaim_vf_tc(lag, src_hw, i, tc);
}
@@ -1976,8 +2000,7 @@ ice_lag_move_vf_nodes_sync(struct ice_lag *lag, struct ice_hw *dest_hw)
pf = lag->pf;
ice_for_each_vsi(pf, i)
- if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF ||
- pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL))
+ if (pf->vsi[i] && pf->vsi[i]->type == ICE_VSI_VF)
ice_for_each_traffic_class(tc)
ice_lag_move_vf_nodes_tc_sync(lag, dest_hw, i,
tc);
@@ -2041,7 +2064,7 @@ int ice_init_lag(struct ice_pf *pf)
/* associate recipes to profiles */
for (n = 0; n < ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER; n++) {
err = ice_aq_get_recipe_to_profile(&pf->hw, n,
- (u8 *)&recipe_bits, NULL);
+ &recipe_bits, NULL);
if (err)
continue;
@@ -2049,7 +2072,7 @@ int ice_init_lag(struct ice_pf *pf)
recipe_bits |= BIT(lag->pf_recipe) |
BIT(lag->lport_recipe);
ice_aq_map_recipe_to_profile(&pf->hw, n,
- (u8 *)&recipe_bits, NULL);
+ recipe_bits, NULL);
}
}
@@ -2149,7 +2172,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
ice_lag_cfg_cp_fltr(lag, true);
- if (lag->pf_rule_id)
+ if (lag->pf_rx_rule_id)
if (ice_lag_cfg_dflt_fltr(lag, true))
dev_err(ice_pf_to_dev(pf), "Error adding default VSI rule in rebuild\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index 183b38792ef2..bab2c83142a1 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -43,7 +43,8 @@ struct ice_lag {
u8 primary:1; /* this is primary */
u16 pf_recipe;
u16 lport_recipe;
- u16 pf_rule_id;
+ u16 pf_rx_rule_id;
+ u16 pf_tx_rule_id;
u16 cp_rule_idx;
u16 lport_rule_idx;
u8 role;
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index d384ddfcb83e..611577ebc29d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -160,64 +160,6 @@ struct ice_fltr_desc {
(0x1ULL << ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S)
#define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES 0x1ULL
-struct ice_rx_ptype_decoded {
- u32 known:1;
- u32 outer_ip:1;
- u32 outer_ip_ver:2;
- u32 outer_frag:1;
- u32 tunnel_type:3;
- u32 tunnel_end_prot:2;
- u32 tunnel_end_frag:1;
- u32 inner_prot:4;
- u32 payload_layer:3;
-};
-
-enum ice_rx_ptype_outer_ip {
- ICE_RX_PTYPE_OUTER_L2 = 0,
- ICE_RX_PTYPE_OUTER_IP = 1,
-};
-
-enum ice_rx_ptype_outer_ip_ver {
- ICE_RX_PTYPE_OUTER_NONE = 0,
- ICE_RX_PTYPE_OUTER_IPV4 = 1,
- ICE_RX_PTYPE_OUTER_IPV6 = 2,
-};
-
-enum ice_rx_ptype_outer_fragmented {
- ICE_RX_PTYPE_NOT_FRAG = 0,
- ICE_RX_PTYPE_FRAG = 1,
-};
-
-enum ice_rx_ptype_tunnel_type {
- ICE_RX_PTYPE_TUNNEL_NONE = 0,
- ICE_RX_PTYPE_TUNNEL_IP_IP = 1,
- ICE_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
- ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
- ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
-};
-
-enum ice_rx_ptype_tunnel_end_prot {
- ICE_RX_PTYPE_TUNNEL_END_NONE = 0,
- ICE_RX_PTYPE_TUNNEL_END_IPV4 = 1,
- ICE_RX_PTYPE_TUNNEL_END_IPV6 = 2,
-};
-
-enum ice_rx_ptype_inner_prot {
- ICE_RX_PTYPE_INNER_PROT_NONE = 0,
- ICE_RX_PTYPE_INNER_PROT_UDP = 1,
- ICE_RX_PTYPE_INNER_PROT_TCP = 2,
- ICE_RX_PTYPE_INNER_PROT_SCTP = 3,
- ICE_RX_PTYPE_INNER_PROT_ICMP = 4,
- ICE_RX_PTYPE_INNER_PROT_TIMESYNC = 5,
-};
-
-enum ice_rx_ptype_payload_layer {
- ICE_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
- ICE_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
- ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
- ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
-};
-
/* Rx Flex Descriptor
* This descriptor is used instead of the legacy version descriptor when
* ice_rlan_ctx.adv_desc is set
@@ -651,266 +593,4 @@ struct ice_tlan_ctx {
u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */
};
-/* The ice_ptype_lkup table is used to convert from the 10-bit ptype in the
- * hardware to a bit-field that can be used by SW to more easily determine the
- * packet type.
- *
- * Macros are used to shorten the table lines and make this table human
- * readable.
- *
- * We store the PTYPE in the top byte of the bit field - this is just so that
- * we can check that the table doesn't have a row missing, as the index into
- * the table should be the PTYPE.
- *
- * Typical work flow:
- *
- * IF NOT ice_ptype_lkup[ptype].known
- * THEN
- * Packet is unknown
- * ELSE IF ice_ptype_lkup[ptype].outer_ip == ICE_RX_PTYPE_OUTER_IP
- * Use the rest of the fields to look at the tunnels, inner protocols, etc
- * ELSE
- * Use the enum ice_rx_l2_ptype to decode the packet type
- * ENDIF
- */
-#define ICE_PTYPES \
- /* L2 Packet types */ \
- ICE_PTT_UNUSED_ENTRY(0), \
- ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), \
- ICE_PTT_UNUSED_ENTRY(2), \
- ICE_PTT_UNUSED_ENTRY(3), \
- ICE_PTT_UNUSED_ENTRY(4), \
- ICE_PTT_UNUSED_ENTRY(5), \
- ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
- ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
- ICE_PTT_UNUSED_ENTRY(8), \
- ICE_PTT_UNUSED_ENTRY(9), \
- ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
- ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
- ICE_PTT_UNUSED_ENTRY(12), \
- ICE_PTT_UNUSED_ENTRY(13), \
- ICE_PTT_UNUSED_ENTRY(14), \
- ICE_PTT_UNUSED_ENTRY(15), \
- ICE_PTT_UNUSED_ENTRY(16), \
- ICE_PTT_UNUSED_ENTRY(17), \
- ICE_PTT_UNUSED_ENTRY(18), \
- ICE_PTT_UNUSED_ENTRY(19), \
- ICE_PTT_UNUSED_ENTRY(20), \
- ICE_PTT_UNUSED_ENTRY(21), \
- \
- /* Non Tunneled IPv4 */ \
- ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), \
- ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), \
- ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(25), \
- ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), \
- ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), \
- ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), \
- \
- /* IPv4 --> IPv4 */ \
- ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \
- ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \
- ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(32), \
- ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \
- ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \
- ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \
- \
- /* IPv4 --> IPv6 */ \
- ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \
- ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \
- ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(39), \
- ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \
- ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \
- ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \
- \
- /* IPv4 --> GRE/NAT */ \
- ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \
- \
- /* IPv4 --> GRE/NAT --> IPv4 */ \
- ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \
- ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \
- ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(47), \
- ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \
- ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \
- ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \
- \
- /* IPv4 --> GRE/NAT --> IPv6 */ \
- ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \
- ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \
- ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(54), \
- ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \
- ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \
- ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \
- \
- /* IPv4 --> GRE/NAT --> MAC */ \
- ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \
- \
- /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ \
- ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \
- ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \
- ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(62), \
- ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \
- ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \
- ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \
- \
- /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ \
- ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \
- ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \
- ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(69), \
- ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \
- ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \
- ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \
- \
- /* IPv4 --> GRE/NAT --> MAC/VLAN */ \
- ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \
- \
- /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ \
- ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \
- ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \
- ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(77), \
- ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \
- ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \
- ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \
- \
- /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ \
- ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \
- ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \
- ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(84), \
- ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \
- ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \
- ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), \
- \
- /* Non Tunneled IPv6 */ \
- ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), \
- ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), \
- ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(91), \
- ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), \
- ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), \
- ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), \
- \
- /* IPv6 --> IPv4 */ \
- ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \
- ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \
- ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(98), \
- ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \
- ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \
- ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \
- \
- /* IPv6 --> IPv6 */ \
- ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \
- ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \
- ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(105), \
- ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \
- ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \
- ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \
- \
- /* IPv6 --> GRE/NAT */ \
- ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \
- \
- /* IPv6 --> GRE/NAT -> IPv4 */ \
- ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \
- ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \
- ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(113), \
- ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \
- ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \
- ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \
- \
- /* IPv6 --> GRE/NAT -> IPv6 */ \
- ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \
- ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \
- ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(120), \
- ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \
- ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \
- ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \
- \
- /* IPv6 --> GRE/NAT -> MAC */ \
- ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \
- \
- /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ \
- ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \
- ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \
- ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(128), \
- ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \
- ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \
- ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \
- \
- /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ \
- ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \
- ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \
- ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(135), \
- ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \
- ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \
- ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \
- \
- /* IPv6 --> GRE/NAT -> MAC/VLAN */ \
- ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \
- \
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ \
- ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \
- ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \
- ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(143), \
- ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \
- ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \
- ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \
- \
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ \
- ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \
- ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \
- ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(150), \
- ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \
- ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \
- ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
-#define ICE_NUM_DEFINED_PTYPES 154
-
-/* macro to make the table lines short, use explicit indexing with [PTYPE] */
-#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
- [PTYPE] = { \
- 1, \
- ICE_RX_PTYPE_OUTER_##OUTER_IP, \
- ICE_RX_PTYPE_OUTER_##OUTER_IP_VER, \
- ICE_RX_PTYPE_##OUTER_FRAG, \
- ICE_RX_PTYPE_TUNNEL_##T, \
- ICE_RX_PTYPE_TUNNEL_END_##TE, \
- ICE_RX_PTYPE_##TEF, \
- ICE_RX_PTYPE_INNER_PROT_##I, \
- ICE_RX_PTYPE_PAYLOAD_LAYER_##PL }
-
-#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
-
-/* shorter macros makes the table fit but are terse */
-#define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG
-#define ICE_RX_PTYPE_FRG ICE_RX_PTYPE_FRAG
-
-/* Lookup table mapping in the 10-bit HW PTYPE to the bit field for decoding */
-static const struct ice_rx_ptype_decoded ice_ptype_lkup[BIT(10)] = {
- ICE_PTYPES
-
- /* unused entries */
- [ICE_NUM_DEFINED_PTYPES ... 1023] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
-};
-
-static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
-{
- return ice_ptype_lkup[ptype];
-}
-
-
#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index ee3f0d3e3f6d..5371e91f6bbb 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -7,7 +7,6 @@
#include "ice_lib.h"
#include "ice_fltr.h"
#include "ice_dcb_lib.h"
-#include "ice_devlink.h"
#include "ice_vsi_vlan_ops.h"
/**
@@ -27,8 +26,6 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
return "ICE_VSI_CHNL";
case ICE_VSI_LB:
return "ICE_VSI_LB";
- case ICE_VSI_SWITCHDEV_CTRL:
- return "ICE_VSI_SWITCHDEV_CTRL";
default:
return "unknown";
}
@@ -144,7 +141,6 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
switch (vsi->type) {
case ICE_VSI_PF:
- case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_CTRL:
case ICE_VSI_LB:
/* a user could change the values of num_[tr]x_desc using
@@ -211,21 +207,6 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
max_t(int, vsi->alloc_rxq,
vsi->alloc_txq));
break;
- case ICE_VSI_SWITCHDEV_CTRL:
- /* The number of queues for ctrl VSI is equal to number of PRs
- * Each ring is associated to the corresponding VF_PR netdev.
- * Tx and Rx rings are always equal
- */
- if (vsi->req_txq && vsi->req_rxq) {
- vsi->alloc_txq = vsi->req_txq;
- vsi->alloc_rxq = vsi->req_rxq;
- } else {
- vsi->alloc_txq = 1;
- vsi->alloc_rxq = 1;
- }
-
- vsi->num_q_vectors = 1;
- break;
case ICE_VSI_VF:
if (vf->num_req_qs)
vf->num_vf_qs = vf->num_req_qs;
@@ -522,22 +503,6 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *data)
-{
- struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
- struct ice_pf *pf = q_vector->vsi->back;
- struct ice_repr *repr;
- unsigned long id;
-
- if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
- return IRQ_HANDLED;
-
- xa_for_each(&pf->eswitch.reprs, id, repr)
- napi_schedule(&repr->q_vector->napi);
-
- return IRQ_HANDLED;
-}
-
/**
* ice_vsi_alloc_stat_arrays - Allocate statistics arrays
* @vsi: VSI pointer
@@ -600,10 +565,6 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
}
switch (vsi->type) {
- case ICE_VSI_SWITCHDEV_CTRL:
- /* Setup eswitch MSIX irq handler for VSI */
- vsi->irq_handler = ice_eswitch_msix_clean_rings;
- break;
case ICE_VSI_PF:
/* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings;
@@ -933,11 +894,6 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
max_rss_size);
vsi->rss_lut_type = ICE_LUT_PF;
break;
- case ICE_VSI_SWITCHDEV_CTRL:
- vsi->rss_table_size = ICE_LUT_VSI_SIZE;
- vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size);
- vsi->rss_lut_type = ICE_LUT_VSI;
- break;
case ICE_VSI_VF:
/* VF VSI will get a small RSS table.
* For VSI_LUT, LUT size should be set to 64 bytes.
@@ -1263,7 +1219,6 @@ static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags)
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
break;
- case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_CHNL:
ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2;
break;
@@ -2145,7 +2100,6 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
case ICE_VSI_CHNL:
case ICE_VSI_LB:
case ICE_VSI_PF:
- case ICE_VSI_SWITCHDEV_CTRL:
max_agg_nodes = ICE_MAX_PF_AGG_NODES;
agg_node_id_start = ICE_PF_AGG_NODE_ID_START;
agg_node_iter = &pf->pf_agg_node[0];
@@ -2273,10 +2227,8 @@ static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)
/**
* ice_vsi_cfg_def - configure default VSI based on the type
* @vsi: pointer to VSI
- * @params: the parameters to configure this VSI with
*/
-static int
-ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
+static int ice_vsi_cfg_def(struct ice_vsi *vsi)
{
struct device *dev = ice_pf_to_dev(vsi->back);
struct ice_pf *pf = vsi->back;
@@ -2284,7 +2236,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
vsi->vsw = pf->first_sw;
- ret = ice_vsi_alloc_def(vsi, params->ch);
+ ret = ice_vsi_alloc_def(vsi, vsi->ch);
if (ret)
return ret;
@@ -2309,7 +2261,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
ice_vsi_set_tc_cfg(vsi);
/* create the VSI */
- ret = ice_vsi_init(vsi, params->flags);
+ ret = ice_vsi_init(vsi, vsi->flags);
if (ret)
goto unroll_get_qs;
@@ -2317,7 +2269,6 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
switch (vsi->type) {
case ICE_VSI_CTRL:
- case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
@@ -2430,23 +2381,16 @@ unroll_vsi_alloc:
/**
* ice_vsi_cfg - configure a previously allocated VSI
* @vsi: pointer to VSI
- * @params: parameters used to configure this VSI
*/
-int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
+int ice_vsi_cfg(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
int ret;
- if (WARN_ON(params->type == ICE_VSI_VF && !params->vf))
+ if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
return -EINVAL;
- vsi->type = params->type;
- vsi->port_info = params->pi;
-
- /* For VSIs which don't have a connected VF, this will be NULL */
- vsi->vf = params->vf;
-
- ret = ice_vsi_cfg_def(vsi, params);
+ ret = ice_vsi_cfg_def(vsi);
if (ret)
return ret;
@@ -2532,7 +2476,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
* a port_info structure for it.
*/
if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) ||
- WARN_ON(!params->pi))
+ WARN_ON(!params->port_info))
return NULL;
vsi = ice_vsi_alloc(pf);
@@ -2541,7 +2485,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
return NULL;
}
- ret = ice_vsi_cfg(vsi, params);
+ vsi->params = *params;
+ ret = ice_vsi_cfg(vsi);
if (ret)
goto err_vsi_cfg;
@@ -2750,8 +2695,7 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
} else {
ice_vsi_close(vsi);
}
- } else if (vsi->type == ICE_VSI_CTRL ||
- vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
+ } else if (vsi->type == ICE_VSI_CTRL) {
ice_vsi_close(vsi);
}
}
@@ -3089,38 +3033,35 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi)
*/
int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
{
- struct ice_vsi_cfg_params params = {};
struct ice_coalesce_stored *coalesce;
- int prev_num_q_vectors = 0;
+ int prev_num_q_vectors;
struct ice_pf *pf;
int ret;
if (!vsi)
return -EINVAL;
- params = ice_vsi_to_params(vsi);
- params.flags = vsi_flags;
-
+ vsi->flags = vsi_flags;
pf = vsi->back;
if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
return -EINVAL;
- coalesce = kcalloc(vsi->num_q_vectors,
- sizeof(struct ice_coalesce_stored), GFP_KERNEL);
- if (!coalesce)
- return -ENOMEM;
-
- prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
-
ret = ice_vsi_realloc_stat_arrays(vsi);
if (ret)
goto err_vsi_cfg;
ice_vsi_decfg(vsi);
- ret = ice_vsi_cfg_def(vsi, &params);
+ ret = ice_vsi_cfg_def(vsi);
if (ret)
goto err_vsi_cfg;
+ coalesce = kcalloc(vsi->num_q_vectors,
+ sizeof(struct ice_coalesce_stored), GFP_KERNEL);
+ if (!coalesce)
+ return -ENOMEM;
+
+ prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
+
ret = ice_vsi_cfg_tc_lan(pf, vsi);
if (ret) {
if (vsi_flags & ICE_VSI_FLAG_INIT) {
@@ -3139,8 +3080,8 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
err_vsi_cfg_tc_lan:
ice_vsi_decfg(vsi);
-err_vsi_cfg:
kfree(coalesce);
+err_vsi_cfg:
return ret;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 9cd23afe5f15..94ce8964dda6 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -11,43 +11,6 @@
#define ICE_VSI_FLAG_INIT BIT(0)
#define ICE_VSI_FLAG_NO_INIT 0
-/**
- * struct ice_vsi_cfg_params - VSI configuration parameters
- * @pi: pointer to the port_info instance for the VSI
- * @ch: pointer to the channel structure for the VSI, may be NULL
- * @vf: pointer to the VF associated with this VSI, may be NULL
- * @type: the type of VSI to configure
- * @flags: VSI flags used for rebuild and configuration
- *
- * Parameter structure used when configuring a new VSI.
- */
-struct ice_vsi_cfg_params {
- struct ice_port_info *pi;
- struct ice_channel *ch;
- struct ice_vf *vf;
- enum ice_vsi_type type;
- u32 flags;
-};
-
-/**
- * ice_vsi_to_params - Get parameters for an existing VSI
- * @vsi: the VSI to get parameters for
- *
- * Fill a parameter structure for reconfiguring a VSI with its current
- * parameters, such as during a rebuild operation.
- */
-static inline struct ice_vsi_cfg_params ice_vsi_to_params(struct ice_vsi *vsi)
-{
- struct ice_vsi_cfg_params params = {};
-
- params.pi = vsi->port_info;
- params.ch = vsi->ch;
- params.vf = vsi->vf;
- params.type = vsi->type;
-
- return params;
-}
-
const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
bool ice_pf_state_is_nominal(struct ice_pf *pf);
@@ -101,7 +64,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi);
void ice_dis_vsi(struct ice_vsi *vsi, bool locked);
int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags);
-int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params);
+int ice_vsi_cfg(struct ice_vsi *vsi);
bool ice_is_reset_in_progress(unsigned long *state);
int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 33a164fa325a..f60c022f7960 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -13,7 +13,8 @@
#include "ice_fltr.h"
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
-#include "ice_devlink.h"
+#include "devlink/devlink.h"
+#include "devlink/devlink_port.h"
#include "ice_hwmon.h"
/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
* ice tracepoint functions. This must be done exactly once across the
@@ -36,6 +37,7 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
+MODULE_IMPORT_NS(LIBIE);
MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
@@ -1745,6 +1747,39 @@ static void ice_service_timer(struct timer_list *t)
}
/**
+ * ice_mdd_maybe_reset_vf - reset VF after MDD event
+ * @pf: pointer to the PF structure
+ * @vf: pointer to the VF structure
+ * @reset_vf_tx: whether Tx MDD has occurred
+ * @reset_vf_rx: whether Rx MDD has occurred
+ *
+ * Since the queue can get stuck on VF MDD events, the PF can be configured to
+ * automatically reset the VF by enabling the private ethtool flag
+ * mdd-auto-reset-vf.
+ */
+static void ice_mdd_maybe_reset_vf(struct ice_pf *pf, struct ice_vf *vf,
+ bool reset_vf_tx, bool reset_vf_rx)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+
+ if (!test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags))
+ return;
+
+ /* VF MDD event counters will be cleared by reset, so print the event
+ * prior to reset.
+ */
+ if (reset_vf_tx)
+ ice_print_vf_tx_mdd_event(vf);
+
+ if (reset_vf_rx)
+ ice_print_vf_rx_mdd_event(vf);
+
+ dev_info(dev, "PF-to-VF reset on PF %d VF %d due to MDD event\n",
+ pf->hw.pf_id, vf->vf_id);
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK);
+}
+
+/**
* ice_handle_mdd_event - handle malicious driver detect event
* @pf: pointer to the PF structure
*
@@ -1837,6 +1872,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
*/
mutex_lock(&pf->vfs.table_lock);
ice_for_each_vf(pf, bkt, vf) {
+ bool reset_vf_tx = false, reset_vf_rx = false;
+
reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
if (reg & VP_MDET_TX_PQM_VALID_M) {
wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
@@ -1845,6 +1882,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
vf->vf_id);
+
+ reset_vf_tx = true;
}
reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
@@ -1855,6 +1894,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
vf->vf_id);
+
+ reset_vf_tx = true;
}
reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
@@ -1865,6 +1906,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
vf->vf_id);
+
+ reset_vf_tx = true;
}
reg = rd32(hw, VP_MDET_RX(vf->vf_id));
@@ -1876,18 +1919,12 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
vf->vf_id);
- /* Since the queue is disabled on VF Rx MDD events, the
- * PF can be configured to reset the VF through ethtool
- * private flag mdd-auto-reset-vf.
- */
- if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
- /* VF MDD event counters will be cleared by
- * reset, so print the event prior to reset.
- */
- ice_print_vf_rx_mdd_event(vf);
- ice_reset_vf(vf, ICE_VF_RESET_LOCK);
- }
+ reset_vf_rx = true;
}
+
+ if (reset_vf_tx || reset_vf_rx)
+ ice_mdd_maybe_reset_vf(pf, vf, reset_vf_tx,
+ reset_vf_rx);
}
mutex_unlock(&pf->vfs.table_lock);
@@ -3648,7 +3685,7 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
struct ice_vsi_cfg_params params = {};
params.type = ICE_VSI_PF;
- params.pi = pi;
+ params.port_info = pi;
params.flags = ICE_VSI_FLAG_INIT;
return ice_vsi_setup(pf, &params);
@@ -3661,7 +3698,7 @@ ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_vsi_cfg_params params = {};
params.type = ICE_VSI_CHNL;
- params.pi = pi;
+ params.port_info = pi;
params.ch = ch;
params.flags = ICE_VSI_FLAG_INIT;
@@ -3682,7 +3719,7 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
struct ice_vsi_cfg_params params = {};
params.type = ICE_VSI_CTRL;
- params.pi = pi;
+ params.port_info = pi;
params.flags = ICE_VSI_FLAG_INIT;
return ice_vsi_setup(pf, &params);
@@ -3702,7 +3739,7 @@ ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
struct ice_vsi_cfg_params params = {};
params.type = ICE_VSI_LB;
- params.pi = pi;
+ params.port_info = pi;
params.flags = ICE_VSI_FLAG_INIT;
return ice_vsi_setup(pf, &params);
@@ -4417,11 +4454,13 @@ static char *ice_get_opt_fw_name(struct ice_pf *pf)
/**
* ice_request_fw - Device initialization routine
* @pf: pointer to the PF instance
+ * @firmware: double pointer to firmware struct
+ *
+ * Return: zero when successful, negative values otherwise.
*/
-static void ice_request_fw(struct ice_pf *pf)
+static int ice_request_fw(struct ice_pf *pf, const struct firmware **firmware)
{
char *opt_fw_filename = ice_get_opt_fw_name(pf);
- const struct firmware *firmware = NULL;
struct device *dev = ice_pf_to_dev(pf);
int err = 0;
@@ -4430,29 +4469,95 @@ static void ice_request_fw(struct ice_pf *pf)
* and warning messages for other errors.
*/
if (opt_fw_filename) {
- err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
- if (err) {
- kfree(opt_fw_filename);
- goto dflt_pkg_load;
- }
-
- /* request for firmware was successful. Download to device */
- ice_load_pkg(firmware, pf);
+ err = firmware_request_nowarn(firmware, opt_fw_filename, dev);
kfree(opt_fw_filename);
- release_firmware(firmware);
- return;
+ if (!err)
+ return err;
+ }
+ err = request_firmware(firmware, ICE_DDP_PKG_FILE, dev);
+ if (err)
+ dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
+
+ return err;
+}
+
+/**
+ * ice_init_tx_topology - performs Tx topology initialization
+ * @hw: pointer to the hardware structure
+ * @firmware: pointer to firmware structure
+ *
+ * Return: zero when init was successful, negative values otherwise.
+ */
+static int
+ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware)
+{
+ u8 num_tx_sched_layers = hw->num_tx_sched_layers;
+ struct ice_pf *pf = hw->back;
+ struct device *dev;
+ u8 *buf_copy;
+ int err;
+
+ dev = ice_pf_to_dev(pf);
+ /* ice_cfg_tx_topo buf argument is not a constant,
+ * so we have to make a copy
+ */
+ buf_copy = kmemdup(firmware->data, firmware->size, GFP_KERNEL);
+
+ err = ice_cfg_tx_topo(hw, buf_copy, firmware->size);
+ if (!err) {
+ if (hw->num_tx_sched_layers > num_tx_sched_layers)
+ dev_info(dev, "Tx scheduling layers switching feature disabled\n");
+ else
+ dev_info(dev, "Tx scheduling layers switching feature enabled\n");
+ /* if there was a change in topology ice_cfg_tx_topo triggered
+ * a CORER and we need to re-init hw
+ */
+ ice_deinit_hw(hw);
+ err = ice_init_hw(hw);
+
+ return err;
+ } else if (err == -EIO) {
+ dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n");
}
-dflt_pkg_load:
- err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
+ return 0;
+}
+
+/**
+ * ice_init_ddp_config - DDP related configuration
+ * @hw: pointer to the hardware structure
+ * @pf: pointer to pf structure
+ *
+ * This function loads DDP file from the disk, then initializes Tx
+ * topology. At the end DDP package is loaded on the card.
+ *
+ * Return: zero when init was successful, negative values otherwise.
+ */
+static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ const struct firmware *firmware = NULL;
+ int err;
+
+ err = ice_request_fw(pf, &firmware);
if (err) {
- dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
- return;
+ dev_err(dev, "Fail during requesting FW: %d\n", err);
+ return err;
}
- /* request for firmware was successful. Download to device */
+ err = ice_init_tx_topology(hw, firmware);
+ if (err) {
+ dev_err(dev, "Fail during initialization of Tx topology: %d\n",
+ err);
+ release_firmware(firmware);
+ return err;
+ }
+
+ /* Download firmware to device */
ice_load_pkg(firmware, pf);
release_firmware(firmware);
+
+ return 0;
}
/**
@@ -4625,9 +4730,11 @@ int ice_init_dev(struct ice_pf *pf)
ice_init_feature_support(pf);
- ice_request_fw(pf);
+ err = ice_init_ddp_config(hw, pf);
+ if (err)
+ return err;
- /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
+ /* if ice_init_ddp_config fails, ICE_FLAG_ADV_FEATURES bit won't be
* set in pf->state, which will cause ice_is_safe_mode to return
* true
*/
@@ -5093,6 +5200,7 @@ static int
ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
{
struct device *dev = &pdev->dev;
+ struct ice_adapter *adapter;
struct ice_pf *pf;
struct ice_hw *hw;
int err;
@@ -5145,7 +5253,12 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
pci_set_master(pdev);
+ adapter = ice_adapter_get(pdev);
+ if (IS_ERR(adapter))
+ return PTR_ERR(adapter);
+
pf->pdev = pdev;
+ pf->adapter = adapter;
pci_set_drvdata(pdev, pf);
set_bit(ICE_DOWN, pf->state);
/* Disable service task until DOWN bit is cleared */
@@ -5179,23 +5292,23 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
devl_lock(priv_to_devlink(pf));
err = ice_load(pf);
- devl_unlock(priv_to_devlink(pf));
if (err)
goto err_load;
err = ice_init_devlink(pf);
if (err)
goto err_init_devlink;
+ devl_unlock(priv_to_devlink(pf));
return 0;
err_init_devlink:
- devl_lock(priv_to_devlink(pf));
ice_unload(pf);
- devl_unlock(priv_to_devlink(pf));
err_load:
+ devl_unlock(priv_to_devlink(pf));
ice_deinit(pf);
err_init:
+ ice_adapter_put(pdev);
pci_disable_device(pdev);
return err;
}
@@ -5290,9 +5403,9 @@ static void ice_remove(struct pci_dev *pdev)
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
+ devl_lock(priv_to_devlink(pf));
ice_deinit_devlink(pf);
- devl_lock(priv_to_devlink(pf));
ice_unload(pf);
devl_unlock(priv_to_devlink(pf));
@@ -5302,6 +5415,7 @@ static void ice_remove(struct pci_dev *pdev)
ice_setup_mc_magic_wake(pf);
ice_set_wake(pf);
+ ice_adapter_put(pdev);
pci_disable_device(pdev);
}
@@ -5321,7 +5435,6 @@ static void ice_shutdown(struct pci_dev *pdev)
}
}
-#ifdef CONFIG_PM
/**
* ice_prepare_for_shutdown - prep for PCI shutdown
* @pf: board private structure
@@ -5410,7 +5523,7 @@ err_reinit:
* Power Management callback to quiesce the device and prepare
* for D3 transition.
*/
-static int __maybe_unused ice_suspend(struct device *dev)
+static int ice_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct ice_pf *pf;
@@ -5477,7 +5590,7 @@ static int __maybe_unused ice_suspend(struct device *dev)
* ice_resume - PM callback for waking up from D3
* @dev: generic device information structure
*/
-static int __maybe_unused ice_resume(struct device *dev)
+static int ice_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
enum ice_reset_req reset_type;
@@ -5528,7 +5641,6 @@ static int __maybe_unused ice_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM */
/**
* ice_pci_err_detected - warning that PCI error has been detected
@@ -5693,16 +5805,22 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_BACKPLANE) },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_QSFP56) },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP) },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP_DD) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_BACKPLANE) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_QSFP56) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP_DD) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_QSFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_QSFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_SFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_SFP), },
/* required last entry */
{}
};
MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
-static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
static const struct pci_error_handlers ice_pci_err_handler = {
.error_detected = ice_pci_err_detected,
@@ -5717,9 +5835,7 @@ static struct pci_driver ice_driver = {
.id_table = ice_pci_tbl,
.probe = ice_probe,
.remove = ice_remove,
-#ifdef CONFIG_PM
- .driver.pm = &ice_pm_ops,
-#endif /* CONFIG_PM */
+ .driver.pm = pm_sleep_ptr(&ice_pm_ops),
.shutdown = ice_shutdown,
.sriov_configure = ice_sriov_configure,
.sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix,
@@ -7055,13 +7171,11 @@ int ice_down(struct ice_vsi *vsi)
WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
- if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ if (vsi->netdev) {
vlan_err = ice_vsi_del_vlan_zero(vsi);
ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
netif_carrier_off(vsi->netdev);
netif_tx_disable(vsi->netdev);
- } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
- ice_eswitch_stop_all_tx_queues(vsi->back);
}
ice_vsi_dis_irq(vsi);
@@ -7544,11 +7658,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_vsi_rebuild;
}
- err = ice_eswitch_rebuild(pf);
- if (err) {
- dev_err(dev, "Switchdev rebuild failed: %d\n", err);
- goto err_vsi_rebuild;
- }
+ ice_eswitch_rebuild(pf);
if (reset_type == ICE_RESET_PFR) {
err = ice_rebuild_channels(pf);
@@ -7666,7 +7776,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return -EBUSY;
}
- netdev->mtu = (unsigned int)new_mtu;
+ WRITE_ONCE(netdev->mtu, (unsigned int)new_mtu);
err = ice_down_up(vsi);
if (err)
return err;
@@ -7999,12 +8109,9 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
- __u16 mode;
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
+ __u16 mode = nla_get_u16(attr);
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
- mode = nla_get_u16(attr);
if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
return -EINVAL;
/* Continue if bridge mode is not being flipped */
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index d4e05d2cb30c..84eab92dc03c 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -18,10 +18,9 @@
*
* Read the NVM using the admin queue commands (0x0701)
*/
-static int
-ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
- void *data, bool last_command, bool read_shadow_ram,
- struct ice_sq_cd *cd)
+int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
struct ice_aqc_nvm *cmd;
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h
index 774c2317967d..63cdc6bdac58 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.h
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.h
@@ -14,6 +14,9 @@ struct ice_orom_civd_info {
int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_nvm(struct ice_hw *hw);
+int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram, struct ice_sq_cd *cd);
int
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram);
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index f6f27361c3cf..755a9c55267c 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -43,6 +43,7 @@ enum ice_protocol_type {
ICE_NVGRE,
ICE_GTP,
ICE_GTP_NO_PAY,
+ ICE_PFCP,
ICE_PPPOE,
ICE_L2TPV3,
ICE_VLAN_EX,
@@ -61,6 +62,7 @@ enum ice_sw_tunnel_type {
ICE_SW_TUN_NVGRE,
ICE_SW_TUN_GTPU,
ICE_SW_TUN_GTPC,
+ ICE_SW_TUN_PFCP,
ICE_ALL_TUNNELS /* All tunnel types including NVGRE */
};
@@ -202,6 +204,15 @@ struct ice_udp_gtp_hdr {
u8 rsvrd;
};
+struct ice_pfcp_hdr {
+ u8 flags;
+ u8 msg_type;
+ __be16 length;
+ __be64 seid;
+ __be32 seq;
+ u8 spare;
+} __packed __aligned(__alignof__(u16));
+
struct ice_pppoe_hdr {
u8 rsrvd_ver_type;
u8 rsrvd_code;
@@ -418,6 +429,7 @@ union ice_prot_hdr {
struct ice_udp_tnl_hdr tnl_hdr;
struct ice_nvgre_hdr nvgre_hdr;
struct ice_udp_gtp_hdr gtp_hdr;
+ struct ice_pfcp_hdr pfcp_hdr;
struct ice_pppoe_hdr pppoe_hdr;
struct ice_l2tpv3_sess_hdr l2tpv3_sess_hdr;
struct ice_hw_metadata metadata;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index c11eba07283c..0f17fc1181d2 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -374,6 +374,7 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
u8 tmr_idx;
tmr_idx = ice_get_ptp_src_clock_index(hw);
+ guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
/* Read the system timestamp pre PHC read */
ptp_read_system_prets(sts);
@@ -1166,26 +1167,6 @@ static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)
}
/**
- * ice_ptp_read_time - Read the time from the device
- * @pf: Board private structure
- * @ts: timespec structure to hold the current time value
- * @sts: Optional parameter for holding a pair of system timestamps from
- * the system clock. Will be ignored if NULL is given.
- *
- * This function reads the source clock registers and stores them in a timespec.
- * However, since the registers are 64 bits of nanoseconds, we must convert the
- * result to a timespec before we can return.
- */
-static void
-ice_ptp_read_time(struct ice_pf *pf, struct timespec64 *ts,
- struct ptp_system_timestamp *sts)
-{
- u64 time_ns = ice_ptp_read_src_clk_reg(pf, sts);
-
- *ts = ns_to_timespec64(time_ns);
-}
-
-/**
* ice_ptp_write_init - Set PHC time to provided value
* @pf: Board private structure
* @ts: timespec structure that holds the new time value
@@ -1925,16 +1906,10 @@ ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
struct ice_pf *pf = ptp_info_to_pf(info);
- struct ice_hw *hw = &pf->hw;
-
- if (!ice_ptp_lock(hw)) {
- dev_err(ice_pf_to_dev(pf), "PTP failed to get time\n");
- return -EBUSY;
- }
-
- ice_ptp_read_time(pf, ts, sts);
- ice_ptp_unlock(hw);
+ u64 time_ns;
+ time_ns = ice_ptp_read_src_clk_reg(pf, sts);
+ *ts = ns_to_timespec64(time_ns);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 187ce9b54e1a..2b9423a173bb 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -274,6 +274,9 @@ void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
*/
static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+
+ guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
ice_flush(hw);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index 5f30fb131f74..d367f4c66dcd 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -3,42 +3,51 @@
#include "ice.h"
#include "ice_eswitch.h"
-#include "ice_devlink.h"
+#include "devlink/devlink.h"
+#include "devlink/devlink_port.h"
#include "ice_sriov.h"
#include "ice_tc_lib.h"
#include "ice_dcb_lib.h"
/**
- * ice_repr_get_sw_port_id - get port ID associated with representor
- * @repr: pointer to port representor
+ * ice_repr_inc_tx_stats - increment Tx statistic by one packet
+ * @repr: repr to increment stats on
+ * @len: length of the packet
+ * @xmit_status: value returned by xmit function
*/
-static int ice_repr_get_sw_port_id(struct ice_repr *repr)
+void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len,
+ int xmit_status)
{
- return repr->src_vsi->back->hw.port_info->lport;
+ struct ice_repr_pcpu_stats *stats;
+
+ if (unlikely(xmit_status != NET_XMIT_SUCCESS &&
+ xmit_status != NET_XMIT_CN)) {
+ this_cpu_inc(repr->stats->tx_drops);
+ return;
+ }
+
+ stats = this_cpu_ptr(repr->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += len;
+ u64_stats_update_end(&stats->syncp);
}
/**
- * ice_repr_get_phys_port_name - get phys port name
- * @netdev: pointer to port representor netdev
- * @buf: write here port name
- * @len: max length of buf
+ * ice_repr_inc_rx_stats - increment Rx statistic by one packet
+ * @netdev: repr netdev to increment stats on
+ * @len: length of the packet
*/
-static int
-ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len)
+void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_repr *repr = np->repr;
- int res;
-
- /* Devlink port is registered and devlink core is taking care of name formatting. */
- if (repr->vf->devlink_port.devlink)
- return -EOPNOTSUPP;
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+ struct ice_repr_pcpu_stats *stats;
- res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr),
- repr->id);
- if (res <= 0)
- return -EOPNOTSUPP;
- return 0;
+ stats = this_cpu_ptr(repr->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += len;
+ u64_stats_update_end(&stats->syncp);
}
/**
@@ -76,7 +85,7 @@ ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
* ice_netdev_to_repr - Get port representor for given netdevice
* @netdev: pointer to port representor netdev
*/
-struct ice_repr *ice_netdev_to_repr(struct net_device *netdev)
+struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
@@ -139,38 +148,35 @@ static int ice_repr_stop(struct net_device *netdev)
* ice_repr_sp_stats64 - get slow path stats for port representor
* @dev: network interface device structure
* @stats: netlink stats structure
- *
- * RX/TX stats are being swapped here to be consistent with VF stats. In slow
- * path, port representor receives data when the corresponding VF is sending it
- * (and vice versa), TX and RX bytes/packets are effectively swapped on port
- * representor.
*/
static int
ice_repr_sp_stats64(const struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
- struct ice_netdev_priv *np = netdev_priv(dev);
- int vf_id = np->repr->vf->vf_id;
- struct ice_tx_ring *tx_ring;
- struct ice_rx_ring *rx_ring;
- u64 pkts, bytes;
-
- tx_ring = np->vsi->tx_rings[vf_id];
- ice_fetch_u64_stats_per_ring(&tx_ring->ring_stats->syncp,
- tx_ring->ring_stats->stats,
- &pkts, &bytes);
- stats->rx_packets = pkts;
- stats->rx_bytes = bytes;
-
- rx_ring = np->vsi->rx_rings[vf_id];
- ice_fetch_u64_stats_per_ring(&rx_ring->ring_stats->syncp,
- rx_ring->ring_stats->stats,
- &pkts, &bytes);
- stats->tx_packets = pkts;
- stats->tx_bytes = bytes;
- stats->tx_dropped = rx_ring->ring_stats->rx_stats.alloc_page_failed +
- rx_ring->ring_stats->rx_stats.alloc_buf_failed;
-
+ struct ice_repr *repr = ice_netdev_to_repr(dev);
+ int i;
+
+ for_each_possible_cpu(i) {
+ u64 tbytes, tpkts, tdrops, rbytes, rpkts;
+ struct ice_repr_pcpu_stats *repr_stats;
+ unsigned int start;
+
+ repr_stats = per_cpu_ptr(repr->stats, i);
+ do {
+ start = u64_stats_fetch_begin(&repr_stats->syncp);
+ tbytes = repr_stats->tx_bytes;
+ tpkts = repr_stats->tx_packets;
+ tdrops = repr_stats->tx_drops;
+ rbytes = repr_stats->rx_bytes;
+ rpkts = repr_stats->rx_packets;
+ } while (u64_stats_fetch_retry(&repr_stats->syncp, start));
+
+ stats->tx_bytes += tbytes;
+ stats->tx_packets += tpkts;
+ stats->tx_dropped += tdrops;
+ stats->rx_bytes += rbytes;
+ stats->rx_packets += rpkts;
+ }
return 0;
}
@@ -240,7 +246,6 @@ ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type,
}
static const struct net_device_ops ice_repr_netdev_ops = {
- .ndo_get_phys_port_name = ice_repr_get_phys_port_name,
.ndo_get_stats64 = ice_repr_get_stats64,
.ndo_open = ice_repr_open,
.ndo_stop = ice_repr_stop,
@@ -291,7 +296,7 @@ static void ice_repr_remove_node(struct devlink_port *devlink_port)
*/
static void ice_repr_rem(struct ice_repr *repr)
{
- kfree(repr->q_vector);
+ free_percpu(repr->stats);
free_netdev(repr->netdev);
kfree(repr);
}
@@ -331,7 +336,6 @@ static void ice_repr_set_tx_topology(struct ice_pf *pf)
static struct ice_repr *
ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
{
- struct ice_q_vector *q_vector;
struct ice_netdev_priv *np;
struct ice_repr *repr;
int err;
@@ -346,23 +350,22 @@ ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
goto err_alloc;
}
+ repr->stats = netdev_alloc_pcpu_stats(struct ice_repr_pcpu_stats);
+ if (!repr->stats) {
+ err = -ENOMEM;
+ goto err_stats;
+ }
+
repr->src_vsi = src_vsi;
+ repr->id = src_vsi->vsi_num;
np = netdev_priv(repr->netdev);
np->repr = repr;
- q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL);
- if (!q_vector) {
- err = -ENOMEM;
- goto err_alloc_q_vector;
- }
- repr->q_vector = q_vector;
- repr->q_id = repr->id;
-
ether_addr_copy(repr->parent_mac, parent_mac);
return repr;
-err_alloc_q_vector:
+err_stats:
free_netdev(repr->netdev);
err_alloc:
kfree(repr);
@@ -439,15 +442,3 @@ void ice_repr_stop_tx_queues(struct ice_repr *repr)
netif_carrier_off(repr->netdev);
netif_tx_stop_all_queues(repr->netdev);
}
-
-/**
- * ice_repr_set_traffic_vsi - set traffic VSI for port representor
- * @repr: repr on with VSI will be set
- * @vsi: pointer to VSI that will be used by port representor to pass traffic
- */
-void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi)
-{
- struct ice_netdev_priv *np = netdev_priv(repr->netdev);
-
- np->vsi = vsi;
-}
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
index f9aede315716..cff730b15ca0 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.h
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -6,20 +6,24 @@
#include <net/dst_metadata.h>
+struct ice_repr_pcpu_stats {
+ struct u64_stats_sync syncp;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_drops;
+};
+
struct ice_repr {
struct ice_vsi *src_vsi;
struct ice_vf *vf;
- struct ice_q_vector *q_vector;
struct net_device *netdev;
struct metadata_dst *dst;
struct ice_esw_br_port *br_port;
- int q_id;
+ struct ice_repr_pcpu_stats __percpu *stats;
u32 id;
u8 parent_mac[ETH_ALEN];
-#ifdef CONFIG_ICE_SWITCHDEV
- /* info about slow path rule */
- struct ice_rule_query_data sp_rule;
-#endif
};
struct ice_repr *ice_repr_add_vf(struct ice_vf *vf);
@@ -28,10 +32,12 @@ void ice_repr_rem_vf(struct ice_repr *repr);
void ice_repr_start_tx_queues(struct ice_repr *repr);
void ice_repr_stop_tx_queues(struct ice_repr *repr);
-void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi);
-
-struct ice_repr *ice_netdev_to_repr(struct net_device *netdev);
+struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev);
bool ice_is_port_repr_netdev(const struct net_device *netdev);
struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi);
+
+void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len,
+ int xmit_status);
+void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index a1525992d14b..ecf8f5d60292 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -1128,12 +1128,11 @@ u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
* 5 or less sw_entry_point_layer
*/
/* calculate the VSI layer based on number of layers. */
- if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
- u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
-
- if (layer > hw->sw_entry_point_layer)
- return layer;
- }
+ if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS)
+ return hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
+ else if (hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS)
+ /* qgroup and VSI layers are same */
+ return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
return hw->sw_entry_point_layer;
}
@@ -1150,13 +1149,10 @@ u8 ice_sched_get_agg_layer(struct ice_hw *hw)
* 7 or less sw_entry_point_layer
*/
/* calculate the aggregator layer based on number of layers. */
- if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) {
- u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
-
- if (layer > hw->sw_entry_point_layer)
- return layer;
- }
- return hw->sw_entry_point_layer;
+ if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS)
+ return hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
+ else
+ return hw->sw_entry_point_layer;
}
/**
@@ -1510,10 +1506,11 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
{
struct ice_sched_node *vsi_node, *qgrp_node;
struct ice_vsi_ctx *vsi_ctx;
+ u8 qgrp_layer, vsi_layer;
u16 max_children;
- u8 qgrp_layer;
qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
+ vsi_layer = ice_sched_get_vsi_layer(pi->hw);
max_children = pi->hw->max_children[qgrp_layer];
vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
@@ -1524,6 +1521,12 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
if (!vsi_node)
return NULL;
+ /* If the queue group and VSI layer are same then queues
+ * are all attached directly to VSI
+ */
+ if (qgrp_layer == vsi_layer)
+ return vsi_node;
+
/* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
while (qgrp_node) {
@@ -3199,7 +3202,7 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
u8 profile_type;
int status;
- if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
+ if (!pi || layer_num >= pi->hw->num_tx_sched_layers)
return NULL;
switch (rl_type) {
case ICE_MIN_BW:
@@ -3215,8 +3218,6 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
return NULL;
}
- if (!pi)
- return NULL;
hw = pi->hw;
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
list_entry)
@@ -3446,7 +3447,7 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
struct ice_aqc_rl_profile_info *rl_prof_elem;
int status = 0;
- if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
+ if (layer_num >= pi->hw->num_tx_sched_layers)
return -EINVAL;
/* Check the existing list for RL profile */
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 1aef05ea5a57..7b668083be07 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -6,6 +6,17 @@
#include "ice_common.h"
+/**
+ * DOC: ice_sched.h
+ *
+ * This header file stores everything that is needed for broadly understood
+ * scheduler. It consists of defines related to layers, structures related to
+ * aggregator, functions declarations and others.
+ */
+
+#define ICE_SCHED_5_LAYERS 5
+#define ICE_SCHED_9_LAYERS 9
+
#define SCHED_NODE_NAME_MAX_LEN 32
#define ICE_QGRP_LAYER_OFFSET 2
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index a958fcf3e6be..067712f4923f 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -170,8 +170,6 @@ void ice_free_vfs(struct ice_pf *pf)
else
dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
- ice_eswitch_reserve_cp_queues(pf, -ice_get_num_vfs(pf));
-
mutex_lock(&vfs->table_lock);
ice_for_each_vf(pf, bkt, vf) {
@@ -227,7 +225,7 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
struct ice_vsi *vsi;
params.type = ICE_VSI_VF;
- params.pi = ice_vf_get_port_info(vf);
+ params.port_info = ice_vf_get_port_info(vf);
params.vf = vf;
params.flags = ICE_VSI_FLAG_INIT;
@@ -362,13 +360,14 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
* @vf: VF to calculate the register index for
* @q_vector: a q_vector associated to the VF
*/
-int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
+void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
{
if (!vf || !q_vector)
- return -EINVAL;
+ return;
/* always add one to account for the OICR being the first MSIX */
- return vf->first_vector_idx + q_vector->v_idx + 1;
+ q_vector->vf_reg_idx = q_vector->v_idx + ICE_NONQ_VECS_VF;
+ q_vector->reg_idx = vf->first_vector_idx + q_vector->vf_reg_idx;
}
/**
@@ -833,11 +832,6 @@ static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
pci_dev_get(vfdev);
- /* set default number of MSI-X */
- vf->num_msix = pf->vfs.num_msix_per;
- vf->num_vf_qs = pf->vfs.num_qps_per;
- ice_vc_set_default_allowlist(vf);
-
hash_add_rcu(vfs->table, &vf->entry, vf_id);
}
@@ -897,7 +891,6 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
goto err_unroll_sriov;
}
- ice_eswitch_reserve_cp_queues(pf, num_vfs);
ret = ice_start_vfs(pf);
if (ret) {
dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret);
@@ -1869,6 +1862,24 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
}
/**
+ * ice_print_vf_tx_mdd_event - print VF Tx malicious driver detect event
+ * @vf: pointer to the VF structure
+ */
+void ice_print_vf_tx_mdd_event(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(pf);
+
+ dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
+ vf->mdd_tx_events.count, pf->hw.pf_id, vf->vf_id,
+ vf->dev_lan_addr,
+ test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
+ ? "on" : "off");
+}
+
+/**
* ice_print_vfs_mdd_events - print VFs malicious driver detect event
* @pf: pointer to the PF structure
*
@@ -1876,8 +1887,6 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
*/
void ice_print_vfs_mdd_events(struct ice_pf *pf)
{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
struct ice_vf *vf;
unsigned int bkt;
@@ -1904,10 +1913,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf)
if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
vf->mdd_tx_events.last_printed =
vf->mdd_tx_events.count;
-
- dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
- vf->mdd_tx_events.count, hw->pf_id, vf->vf_id,
- vf->dev_lan_addr);
+ ice_print_vf_tx_mdd_event(vf);
}
}
mutex_unlock(&pf->vfs.table_lock);
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
index 8488df38b586..8f22313474d6 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.h
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -49,7 +49,7 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
-int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
+void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
int
ice_get_vf_stats(struct net_device *netdev, int vf_id,
@@ -58,6 +58,7 @@ void
ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_print_vfs_mdd_events(struct ice_pf *pf);
void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
+void ice_print_vf_tx_mdd_event(struct ice_vf *vf);
bool
ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto);
u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev);
@@ -69,6 +70,7 @@ static inline
void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { }
static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { }
static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { }
+static inline void ice_print_vf_tx_mdd_event(struct ice_vf *vf) { }
static inline void ice_restore_all_vfs_msi_state(struct ice_pf *pf) { }
static inline int
@@ -130,11 +132,10 @@ ice_set_vf_bw(struct net_device __always_unused *netdev,
return -EOPNOTSUPP;
}
-static inline int
+static inline void
ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf,
struct ice_q_vector __always_unused *q_vector)
{
- return 0;
}
static inline int
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index d4baae8c3b72..94d6670d0901 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -42,6 +42,7 @@ enum {
ICE_PKT_KMALLOC = BIT(9),
ICE_PKT_PPPOE = BIT(10),
ICE_PKT_L2TPV3 = BIT(11),
+ ICE_PKT_PFCP = BIT(12),
};
struct ice_dummy_pkt_offsets {
@@ -1110,6 +1111,77 @@ ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
0x00, 0x00,
};
+ICE_DECLARE_PKT_OFFSETS(pfcp_session_ipv4) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_ILOS, 34 },
+ { ICE_PFCP, 42 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(pfcp_session_ipv4) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x2c, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x22, 0x65, /* ICE_UDP_ILOS 34 */
+ 0x00, 0x18, 0x00, 0x00,
+
+ 0x21, 0x01, 0x00, 0x0c, /* ICE_PFCP 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(pfcp_session_ipv6) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_ILOS, 54 },
+ { ICE_PFCP, 62 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(pfcp_session_ipv6) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xdd, /* ICE_ETYPE_OL 12 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
+ 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x22, 0x65, /* ICE_UDP_ILOS 54 */
+ 0x00, 0x18, 0x00, 0x00,
+
+ 0x21, 0x01, 0x00, 0x0c, /* ICE_PFCP 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
@@ -1343,6 +1415,8 @@ static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
+ ICE_PKT_PROFILE(pfcp_session_ipv6, ICE_PKT_PFCP | ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(pfcp_session_ipv4, ICE_PKT_PFCP),
ICE_PKT_PROFILE(pppoe_ipv6_udp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6 |
ICE_PKT_INNER_UDP),
ICE_PKT_PROFILE(pppoe_ipv6_tcp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6),
@@ -2025,12 +2099,12 @@ error_out:
* ice_aq_map_recipe_to_profile - Map recipe to packet profile
* @hw: pointer to the HW struct
* @profile_id: package profile ID to associate the recipe with
- * @r_bitmap: Recipe bitmap filled in and need to be returned as response
+ * @r_assoc: Recipe bitmap filled in and need to be returned as response
* @cd: pointer to command details structure or NULL
* Recipe to profile association (0x0291)
*/
int
-ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc,
struct ice_sq_cd *cd)
{
struct ice_aqc_recipe_to_profile *cmd;
@@ -2042,7 +2116,7 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
/* Set the recipe ID bit in the bitmask to let the device know which
* profile we are associating the recipe to
*/
- memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
+ cmd->recipe_assoc = cpu_to_le64(r_assoc);
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
@@ -2051,12 +2125,12 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
* ice_aq_get_recipe_to_profile - Map recipe to packet profile
* @hw: pointer to the HW struct
* @profile_id: package profile ID to associate the recipe with
- * @r_bitmap: Recipe bitmap filled in and need to be returned as response
+ * @r_assoc: Recipe bitmap filled in and need to be returned as response
* @cd: pointer to command details structure or NULL
* Associate profile ID with given recipe (0x0293)
*/
int
-ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc,
struct ice_sq_cd *cd)
{
struct ice_aqc_recipe_to_profile *cmd;
@@ -2069,12 +2143,24 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (!status)
- memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
+ *r_assoc = le64_to_cpu(cmd->recipe_assoc);
return status;
}
/**
+ * ice_init_chk_recipe_reuse_support - check if recipe reuse is supported
+ * @hw: pointer to the hardware structure
+ */
+void ice_init_chk_recipe_reuse_support(struct ice_hw *hw)
+{
+ struct ice_nvm_info *nvm = &hw->flash.nvm;
+
+ hw->recp_reuse = (nvm->major == 0x4 && nvm->minor >= 0x30) ||
+ nvm->major > 0x4;
+}
+
+/**
* ice_alloc_recipe - add recipe resource
* @hw: pointer to the hardware structure
* @rid: recipe ID returned as response to AQ call
@@ -2083,12 +2169,16 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
{
DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
u16 buf_len = __struct_size(sw_buf);
+ u16 res_type;
int status;
sw_buf->num_elems = cpu_to_le16(1);
- sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
- ICE_AQC_RES_TYPE_S) |
- ICE_AQC_RES_TYPE_FLAG_SHARED);
+ res_type = FIELD_PREP(ICE_AQC_RES_TYPE_M, ICE_AQC_RES_TYPE_RECIPE);
+ if (hw->recp_reuse)
+ res_type |= ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED;
+ else
+ res_type |= ICE_AQC_RES_TYPE_FLAG_SHARED;
+ sw_buf->res_type = cpu_to_le16(res_type);
status = ice_aq_alloc_free_res(hw, sw_buf, buf_len,
ice_aqc_opc_alloc_res);
if (!status)
@@ -2098,6 +2188,70 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
}
/**
+ * ice_free_recipe_res - free recipe resource
+ * @hw: pointer to the hardware structure
+ * @rid: recipe ID to free
+ *
+ * Return: 0 on success, and others on error
+ */
+static int ice_free_recipe_res(struct ice_hw *hw, u16 rid)
+{
+ return ice_free_hw_res(hw, ICE_AQC_RES_TYPE_RECIPE, 1, &rid);
+}
+
+/**
+ * ice_release_recipe_res - disassociate and free recipe resource
+ * @hw: pointer to the hardware structure
+ * @recp: the recipe struct resource to unassociate and free
+ *
+ * Return: 0 on success, and others on error
+ */
+static int ice_release_recipe_res(struct ice_hw *hw,
+ struct ice_sw_recipe *recp)
+{
+ DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ struct ice_switch_info *sw = hw->switch_info;
+ u64 recp_assoc;
+ u32 rid, prof;
+ int status;
+
+ for_each_set_bit(rid, recp->r_bitmap, ICE_MAX_NUM_RECIPES) {
+ for_each_set_bit(prof, recipe_to_profile[rid],
+ ICE_MAX_NUM_PROFILES) {
+ status = ice_aq_get_recipe_to_profile(hw, prof,
+ &recp_assoc,
+ NULL);
+ if (status)
+ return status;
+
+ bitmap_from_arr64(r_bitmap, &recp_assoc,
+ ICE_MAX_NUM_RECIPES);
+ bitmap_andnot(r_bitmap, r_bitmap, recp->r_bitmap,
+ ICE_MAX_NUM_RECIPES);
+ bitmap_to_arr64(&recp_assoc, r_bitmap,
+ ICE_MAX_NUM_RECIPES);
+ ice_aq_map_recipe_to_profile(hw, prof,
+ recp_assoc, NULL);
+
+ clear_bit(rid, profile_to_recipe[prof]);
+ clear_bit(prof, recipe_to_profile[rid]);
+ }
+
+ status = ice_free_recipe_res(hw, rid);
+ if (status)
+ return status;
+
+ sw->recp_list[rid].recp_created = false;
+ sw->recp_list[rid].adv_rule = false;
+ memset(&sw->recp_list[rid].lkup_exts, 0,
+ sizeof(sw->recp_list[rid].lkup_exts));
+ clear_bit(rid, recp->r_bitmap);
+ }
+
+ return 0;
+}
+
+/**
* ice_get_recp_to_prof_map - updates recipe to profile mapping
* @hw: pointer to hardware structure
*
@@ -2108,6 +2262,7 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
static void ice_get_recp_to_prof_map(struct ice_hw *hw)
{
DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ u64 recp_assoc;
u16 i;
for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
@@ -2115,8 +2270,9 @@ static void ice_get_recp_to_prof_map(struct ice_hw *hw)
bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
- if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
+ if (ice_aq_get_recipe_to_profile(hw, i, &recp_assoc, NULL))
continue;
+ bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES);
bitmap_copy(profile_to_recipe[i], r_bitmap,
ICE_MAX_NUM_RECIPES);
for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
@@ -2144,6 +2300,7 @@ ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
* @recps: struct that we need to populate
* @rid: recipe ID that we are populating
* @refresh_required: true if we should get recipe to profile mapping from FW
+ * @is_add: flag of adding recipe
*
* This function is used to populate all the necessary entries into our
* bookkeeping so that we have a current list of all the recipes that are
@@ -2151,7 +2308,7 @@ ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
*/
static int
ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
- bool *refresh_required)
+ bool *refresh_required, bool is_add)
{
DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
struct ice_aqc_recipe_data_elem *tmp;
@@ -2268,8 +2425,12 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
}
- if (!is_root)
+ if (!is_root) {
+ if (hw->recp_reuse && is_add)
+ recps[idx].recp_created = true;
+
continue;
+ }
/* Only do the following for root recipes entries */
memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
@@ -2293,7 +2454,8 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
/* Copy result indexes */
bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
- recps[rid].recp_created = true;
+ if (is_add)
+ recps[rid].recp_created = true;
err_unroll:
kfree(tmp);
@@ -2444,6 +2606,9 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
fi->lan_en = true;
}
}
+
+ if (fi->flag & ICE_FLTR_TX_ONLY)
+ fi->lan_en = false;
}
/**
@@ -3819,6 +3984,7 @@ ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
} else if (f_info.flag & ICE_FLTR_TX) {
f_info.src_id = ICE_SRC_ID_VSI;
f_info.src = hw_vsi_id;
+ f_info.flag |= ICE_FLTR_TX_ONLY;
}
f_list_entry.fltr_info = f_info;
@@ -4526,6 +4692,7 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
ICE_PROTOCOL_ENTRY(ICE_NVGRE, 0, 2, 4, 6),
ICE_PROTOCOL_ENTRY(ICE_GTP, 8, 10, 12, 14, 16, 18, 20, 22),
ICE_PROTOCOL_ENTRY(ICE_GTP_NO_PAY, 8, 10, 12, 14),
+ ICE_PROTOCOL_ENTRY(ICE_PFCP, 8, 10, 12, 14, 16, 18, 20, 22),
ICE_PROTOCOL_ENTRY(ICE_PPPOE, 0, 2, 4, 6),
ICE_PROTOCOL_ENTRY(ICE_L2TPV3, 0, 2, 4, 6, 8, 10),
ICE_PROTOCOL_ENTRY(ICE_VLAN_EX, 2, 0),
@@ -4559,6 +4726,7 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
{ ICE_NVGRE, ICE_GRE_OF_HW },
{ ICE_GTP, ICE_UDP_OF_HW },
{ ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
+ { ICE_PFCP, ICE_UDP_ILOS_HW },
{ ICE_PPPOE, ICE_PPPOE_HW },
{ ICE_L2TPV3, ICE_L2TPV3_HW },
{ ICE_VLAN_EX, ICE_VLAN_OF_HW },
@@ -4571,12 +4739,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
* @hw: pointer to the hardware structure
* @lkup_exts: extension sequence to match
* @rinfo: information regarding the rule e.g. priority and action info
+ * @is_add: flag of adding recipe
*
* Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
*/
static u16
ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
- const struct ice_adv_rule_info *rinfo)
+ const struct ice_adv_rule_info *rinfo, bool is_add)
{
bool refresh_required = true;
struct ice_sw_recipe *recp;
@@ -4590,11 +4759,12 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
* entry update it in our SW bookkeeping and continue with the
* matching.
*/
- if (!recp[i].recp_created)
+ if (hw->recp_reuse) {
if (ice_get_recp_frm_fw(hw,
hw->switch_info->recp_list, i,
- &refresh_required))
+ &refresh_required, is_add))
continue;
+ }
/* Skip inverse action recipes */
if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
@@ -5266,6 +5436,9 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
case ICE_SW_TUN_GTPC:
prof_type = ICE_PROF_TUN_GTPC;
break;
+ case ICE_SW_TUN_PFCP:
+ prof_type = ICE_PROF_TUN_PFCP;
+ break;
case ICE_SW_TUN_AND_NON_TUN:
default:
prof_type = ICE_PROF_ALL;
@@ -5276,6 +5449,49 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
}
/**
+ * ice_subscribe_recipe - subscribe to an existing recipe
+ * @hw: pointer to the hardware structure
+ * @rid: recipe ID to subscribe to
+ *
+ * Return: 0 on success, and others on error
+ */
+static int ice_subscribe_recipe(struct ice_hw *hw, u16 rid)
+{
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
+ u16 buf_len = __struct_size(sw_buf);
+ u16 res_type;
+ int status;
+
+ /* Prepare buffer to allocate resource */
+ sw_buf->num_elems = cpu_to_le16(1);
+ res_type = FIELD_PREP(ICE_AQC_RES_TYPE_M, ICE_AQC_RES_TYPE_RECIPE) |
+ ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED |
+ ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_CTL;
+ sw_buf->res_type = cpu_to_le16(res_type);
+
+ sw_buf->elem[0].e.sw_resp = cpu_to_le16(rid);
+
+ status = ice_aq_alloc_free_res(hw, sw_buf, buf_len,
+ ice_aqc_opc_alloc_res);
+
+ return status;
+}
+
+/**
+ * ice_subscribable_recp_shared - share an existing subscribable recipe
+ * @hw: pointer to the hardware structure
+ * @rid: recipe ID to subscribe to
+ */
+static void ice_subscribable_recp_shared(struct ice_hw *hw, u16 rid)
+{
+ struct ice_sw_recipe *recps = hw->switch_info->recp_list;
+ u16 sub_rid;
+
+ for_each_set_bit(sub_rid, recps[rid].r_bitmap, ICE_MAX_NUM_RECIPES)
+ ice_subscribe_recipe(hw, sub_rid);
+}
+
+/**
* ice_add_adv_recipe - Add an advanced recipe that is not part of the default
* @hw: pointer to hardware structure
* @lkups: lookup elements or match criteria for the advanced recipe, one
@@ -5297,6 +5513,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
struct ice_sw_fv_list_entry *tmp;
struct ice_sw_recipe *rm;
int status = 0;
+ u16 rid_tmp;
u8 i;
if (!lkups_cnt)
@@ -5374,10 +5591,14 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
/* Look for a recipe which matches our requested fv / mask list */
- *rid = ice_find_recp(hw, lkup_exts, rinfo);
- if (*rid < ICE_MAX_NUM_RECIPES)
+ *rid = ice_find_recp(hw, lkup_exts, rinfo, true);
+ if (*rid < ICE_MAX_NUM_RECIPES) {
/* Success if found a recipe that match the existing criteria */
+ if (hw->recp_reuse)
+ ice_subscribable_recp_shared(hw, *rid);
+
goto err_unroll;
+ }
rm->tun_type = rinfo->tun_type;
/* Recipe we need does not exist, add a recipe */
@@ -5390,26 +5611,28 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
*/
list_for_each_entry(fvit, &rm->fv_list, list_entry) {
DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ u64 recp_assoc;
u16 j;
status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
- (u8 *)r_bitmap, NULL);
+ &recp_assoc, NULL);
if (status)
- goto err_unroll;
+ goto err_free_recipe;
+ bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES);
bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
ICE_MAX_NUM_RECIPES);
status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
if (status)
- goto err_unroll;
+ goto err_free_recipe;
+ bitmap_to_arr64(&recp_assoc, r_bitmap, ICE_MAX_NUM_RECIPES);
status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
- (u8 *)r_bitmap,
- NULL);
+ recp_assoc, NULL);
ice_release_change_lock(hw);
if (status)
- goto err_unroll;
+ goto err_free_recipe;
/* Update profile to recipe bitmap array */
bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
@@ -5423,6 +5646,16 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
*rid = rm->root_rid;
memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
sizeof(*lkup_exts));
+ goto err_unroll;
+
+err_free_recipe:
+ if (hw->recp_reuse) {
+ for_each_set_bit(rid_tmp, rm->r_bitmap, ICE_MAX_NUM_RECIPES) {
+ if (!ice_free_recipe_res(hw, rid_tmp))
+ clear_bit(rid_tmp, rm->r_bitmap);
+ }
+ }
+
err_unroll:
list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
list_del(&r_entry->l_entry);
@@ -5548,6 +5781,9 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
case ICE_SW_TUN_VXLAN:
match |= ICE_PKT_TUN_UDP;
break;
+ case ICE_SW_TUN_PFCP:
+ match |= ICE_PKT_PFCP;
+ break;
default:
break;
}
@@ -5688,6 +5924,9 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
case ICE_GTP:
len = sizeof(struct ice_udp_gtp_hdr);
break;
+ case ICE_PFCP:
+ len = sizeof(struct ice_pfcp_hdr);
+ break;
case ICE_PPPOE:
len = sizeof(struct ice_pppoe_hdr);
break;
@@ -6436,7 +6675,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
return -EIO;
}
- rid = ice_find_recp(hw, &lkup_exts, rinfo);
+ rid = ice_find_recp(hw, &lkup_exts, rinfo, false);
/* If did not find a recipe that match the existing criteria */
if (rid == ICE_MAX_NUM_RECIPES)
return -EINVAL;
@@ -6480,14 +6719,21 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
ice_aqc_opc_remove_sw_rules, NULL);
if (!status || status == -ENOENT) {
struct ice_switch_info *sw = hw->switch_info;
+ struct ice_sw_recipe *r_list = sw->recp_list;
mutex_lock(rule_lock);
list_del(&list_elem->list_entry);
devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
devm_kfree(ice_hw_to_dev(hw), list_elem);
mutex_unlock(rule_lock);
- if (list_empty(&sw->recp_list[rid].filt_rules))
- sw->recp_list[rid].adv_rule = false;
+ if (list_empty(&r_list[rid].filt_rules)) {
+ r_list[rid].adv_rule = false;
+
+ /* All rules for this recipe are now removed */
+ if (hw->recp_reuse)
+ ice_release_recipe_res(hw,
+ &r_list[rid]);
+ }
}
kfree(s_rule);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index db7e501b7e0a..ad98e98c812d 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -8,8 +8,9 @@
#define ICE_SW_CFG_MAX_BUF_LEN 2048
#define ICE_DFLT_VSI_INVAL 0xff
-#define ICE_FLTR_RX BIT(0)
-#define ICE_FLTR_TX BIT(1)
+#define ICE_FLTR_RX BIT(0)
+#define ICE_FLTR_TX BIT(1)
+#define ICE_FLTR_TX_ONLY BIT(2)
#define ICE_VSI_INVAL_ID 0xffff
#define ICE_INVAL_Q_HANDLE 0xFFFF
@@ -21,6 +22,8 @@
#define ICE_PROFID_IPV6_GTPC_NO_TEID 45
#define ICE_PROFID_IPV6_GTPU_TEID 46
#define ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER 70
+#define ICE_PROFID_IPV4_PFCP_NODE 79
+#define ICE_PROFID_IPV6_PFCP_SESSION 82
#define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n))
#define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l))
@@ -424,10 +427,11 @@ int ice_aq_add_recipe(struct ice_hw *hw,
struct ice_aqc_recipe_data_elem *s_recipe_list,
u16 num_recipes, struct ice_sq_cd *cd);
int
-ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc,
struct ice_sq_cd *cd);
int
-ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc,
struct ice_sq_cd *cd);
+void ice_init_chk_recipe_reuse_support(struct ice_hw *hw);
#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index b890410a2bc0..8bd24b33f3a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -28,6 +28,8 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
* - ICE_TC_FLWR_FIELD_VLAN_TPID (present if specified)
* - Tunnel flag (present if tunnel)
*/
+ if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
+ lkups_cnt++;
if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
lkups_cnt++;
@@ -35,7 +37,10 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC)
lkups_cnt++;
- if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS)
+ if (flags & ICE_TC_FLWR_FIELD_GTP_OPTS)
+ lkups_cnt++;
+
+ if (flags & ICE_TC_FLWR_FIELD_PFCP_OPTS)
lkups_cnt++;
if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
@@ -138,6 +143,8 @@ ice_proto_type_from_tunnel(enum ice_tunnel_type type)
return ICE_GTP;
case TNL_GTPC:
return ICE_GTP_NO_PAY;
+ case TNL_PFCP:
+ return ICE_PFCP;
default:
return 0;
}
@@ -157,6 +164,8 @@ ice_sw_type_from_tunnel(enum ice_tunnel_type type)
return ICE_SW_TUN_GTPU;
case TNL_GTPC:
return ICE_SW_TUN_GTPC;
+ case TNL_PFCP:
+ return ICE_SW_TUN_PFCP;
default:
return ICE_NON_TUN;
}
@@ -219,8 +228,7 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
i++;
}
- if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS &&
- (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) {
+ if (flags & ICE_TC_FLWR_FIELD_GTP_OPTS) {
list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
if (fltr->gtp_pdu_info_masks.pdu_type) {
@@ -237,6 +245,22 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
i++;
}
+ if (flags & ICE_TC_FLWR_FIELD_PFCP_OPTS) {
+ struct ice_pfcp_hdr *hdr_h, *hdr_m;
+
+ hdr_h = &list[i].h_u.pfcp_hdr;
+ hdr_m = &list[i].m_u.pfcp_hdr;
+ list[i].type = ICE_PFCP;
+
+ hdr_h->flags = fltr->pfcp_meta_keys.type;
+ hdr_m->flags = fltr->pfcp_meta_masks.type & 0x01;
+
+ hdr_h->seid = fltr->pfcp_meta_keys.seid;
+ hdr_m->seid = fltr->pfcp_meta_masks.seid;
+
+ i++;
+ }
+
if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) {
list[i].type = ice_proto_type_from_ipv4(false);
@@ -363,12 +387,20 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
/* Always add direction metadata */
ice_rule_add_direction_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
+ if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
+ ice_rule_add_src_vsi_metadata(&list[i]);
+ i++;
+ }
+
rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
if (tc_fltr->tunnel_type != TNL_LAST) {
i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i);
- headers = &tc_fltr->inner_headers;
- inner = true;
+ /* PFCP is considered non-tunneled - don't swap headers. */
+ if (tc_fltr->tunnel_type != TNL_PFCP) {
+ headers = &tc_fltr->inner_headers;
+ inner = true;
+ }
}
if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
@@ -622,6 +654,8 @@ static int ice_tc_tun_get_type(struct net_device *tunnel_dev)
*/
if (netif_is_gtp(tunnel_dev))
return TNL_GTPU;
+ if (netif_is_pfcp(tunnel_dev))
+ return TNL_PFCP;
return TNL_LAST;
}
@@ -635,13 +669,19 @@ static bool ice_tc_is_dev_uplink(struct net_device *dev)
return netif_is_ice(dev) || ice_is_tunnel_supported(dev);
}
-static int ice_tc_setup_redirect_action(struct net_device *filter_dev,
- struct ice_tc_flower_fltr *fltr,
- struct net_device *target_dev)
+static int ice_tc_setup_action(struct net_device *filter_dev,
+ struct ice_tc_flower_fltr *fltr,
+ struct net_device *target_dev,
+ enum ice_sw_fwd_act_type action)
{
struct ice_repr *repr;
- fltr->action.fltr_act = ICE_FWD_TO_VSI;
+ if (action != ICE_FWD_TO_VSI && action != ICE_MIRROR_PACKET) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action to setup provided");
+ return -EINVAL;
+ }
+
+ fltr->action.fltr_act = action;
if (ice_is_port_repr_netdev(filter_dev) &&
ice_is_port_repr_netdev(target_dev)) {
@@ -689,41 +729,6 @@ ice_tc_setup_drop_action(struct net_device *filter_dev,
return 0;
}
-static int ice_tc_setup_mirror_action(struct net_device *filter_dev,
- struct ice_tc_flower_fltr *fltr,
- struct net_device *target_dev)
-{
- struct ice_repr *repr;
-
- fltr->action.fltr_act = ICE_MIRROR_PACKET;
-
- if (ice_is_port_repr_netdev(filter_dev) &&
- ice_is_port_repr_netdev(target_dev)) {
- repr = ice_netdev_to_repr(target_dev);
-
- fltr->dest_vsi = repr->src_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
- } else if (ice_is_port_repr_netdev(filter_dev) &&
- ice_tc_is_dev_uplink(target_dev)) {
- repr = ice_netdev_to_repr(filter_dev);
-
- fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
- } else if (ice_tc_is_dev_uplink(filter_dev) &&
- ice_is_port_repr_netdev(target_dev)) {
- repr = ice_netdev_to_repr(target_dev);
-
- fltr->dest_vsi = repr->src_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
- } else {
- NL_SET_ERR_MSG_MOD(fltr->extack,
- "Unsupported netdevice in switchdev mode");
- return -EINVAL;
- }
-
- return 0;
-}
-
static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
struct ice_tc_flower_fltr *fltr,
struct flow_action_entry *act)
@@ -739,16 +744,19 @@ static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
break;
case FLOW_ACTION_REDIRECT:
- err = ice_tc_setup_redirect_action(filter_dev, fltr, act->dev);
+ err = ice_tc_setup_action(filter_dev, fltr,
+ act->dev, ICE_FWD_TO_VSI);
if (err)
return err;
break;
case FLOW_ACTION_MIRRED:
- err = ice_tc_setup_mirror_action(filter_dev, fltr, act->dev);
+ err = ice_tc_setup_action(filter_dev, fltr,
+ act->dev, ICE_MIRROR_PACKET);
if (err)
return err;
+
break;
default:
@@ -772,7 +780,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
int ret;
int i;
- if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
+ if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
return -EOPNOTSUPP;
}
@@ -820,6 +828,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
/* specify the cookie as filter_rule_id */
rule_info.fltr_rule_id = fltr->cookie;
+ rule_info.src_vsi = vsi->idx;
ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
if (ret == -EEXIST) {
@@ -1401,7 +1410,8 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
}
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS) &&
+ (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) {
struct flow_match_enc_opts match;
flow_rule_match_enc_opts(rule, &match);
@@ -1412,7 +1422,21 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0],
sizeof(struct gtp_pdu_session_info));
- fltr->flags |= ICE_TC_FLWR_FIELD_ENC_OPTS;
+ fltr->flags |= ICE_TC_FLWR_FIELD_GTP_OPTS;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS) &&
+ fltr->tunnel_type == TNL_PFCP) {
+ struct flow_match_enc_opts match;
+
+ flow_rule_match_enc_opts(rule, &match);
+
+ memcpy(&fltr->pfcp_meta_keys, match.key->data,
+ sizeof(struct pfcp_metadata));
+ memcpy(&fltr->pfcp_meta_masks, match.mask->data,
+ sizeof(struct pfcp_metadata));
+
+ fltr->flags |= ICE_TC_FLWR_FIELD_PFCP_OPTS;
}
return 0;
@@ -1473,15 +1497,22 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
return err;
}
- /* header pointers should point to the inner headers, outer
- * header were already set by ice_parse_tunnel_attr
- */
- headers = &fltr->inner_headers;
+ /* PFCP is considered non-tunneled - don't swap headers. */
+ if (fltr->tunnel_type != TNL_PFCP) {
+ /* Header pointers should point to the inner headers,
+ * outer header were already set by
+ * ice_parse_tunnel_attr().
+ */
+ headers = &fltr->inner_headers;
+ }
} else if (dissector->used_keys &
(BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
- BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel");
return -EOPNOTSUPP;
} else {
@@ -1627,6 +1658,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
+
+ if (flow_rule_has_control_flags(match.mask->flags,
+ fltr->extack))
+ return -EOPNOTSUPP;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
index 65d387163a46..d84f153517ec 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -4,6 +4,9 @@
#ifndef _ICE_TC_LIB_H_
#define _ICE_TC_LIB_H_
+#include <linux/bits.h>
+#include <net/pfcp.h>
+
#define ICE_TC_FLWR_FIELD_DST_MAC BIT(0)
#define ICE_TC_FLWR_FIELD_SRC_MAC BIT(1)
#define ICE_TC_FLWR_FIELD_VLAN BIT(2)
@@ -22,7 +25,7 @@
#define ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT BIT(15)
#define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16)
#define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17)
-#define ICE_TC_FLWR_FIELD_ENC_OPTS BIT(18)
+#define ICE_TC_FLWR_FIELD_GTP_OPTS BIT(18)
#define ICE_TC_FLWR_FIELD_CVLAN BIT(19)
#define ICE_TC_FLWR_FIELD_PPPOE_SESSID BIT(20)
#define ICE_TC_FLWR_FIELD_PPP_PROTO BIT(21)
@@ -34,6 +37,7 @@
#define ICE_TC_FLWR_FIELD_VLAN_PRIO BIT(27)
#define ICE_TC_FLWR_FIELD_CVLAN_PRIO BIT(28)
#define ICE_TC_FLWR_FIELD_VLAN_TPID BIT(29)
+#define ICE_TC_FLWR_FIELD_PFCP_OPTS BIT(30)
#define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF
@@ -161,6 +165,8 @@ struct ice_tc_flower_fltr {
__be32 tenant_id;
struct gtp_pdu_session_info gtp_pdu_info_keys;
struct gtp_pdu_session_info gtp_pdu_info_masks;
+ struct pfcp_metadata pfcp_meta_keys;
+ struct pfcp_metadata pfcp_meta_masks;
u32 flags;
u8 tunnel_type;
struct ice_tc_flower_action action;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 97d41d6ebf1f..8bb743f78fcb 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1051,8 +1051,7 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
}
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE);
if (unlikely(!skb))
return NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index af955b0e5dc5..feba314a3fe4 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -365,6 +365,7 @@ struct ice_rx_ring {
u8 ptp_rx;
#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
+#define ICE_RX_FLAGS_MULTIDEV BIT(3)
u8 flags;
/* CL5 - 5th cacheline starts here */
struct xdp_rxq_info xdp_rxq;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index f8f1d2bdc1be..2719f0e20933 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019, Intel Corporation. */
#include <linux/filter.h>
+#include <linux/net/intel/libie/rx.h>
#include "ice_txrx_lib.h"
#include "ice_eswitch.h"
@@ -39,30 +40,6 @@ void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val)
}
/**
- * ice_ptype_to_htype - get a hash type
- * @ptype: the ptype value from the descriptor
- *
- * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by
- * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of
- * Rx desc.
- */
-static enum pkt_hash_types ice_ptype_to_htype(u16 ptype)
-{
- struct ice_rx_ptype_decoded decoded = ice_decode_rx_desc_ptype(ptype);
-
- if (!decoded.known)
- return PKT_HASH_TYPE_NONE;
- if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4)
- return PKT_HASH_TYPE_L4;
- if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3)
- return PKT_HASH_TYPE_L3;
- if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2)
- return PKT_HASH_TYPE_L2;
-
- return PKT_HASH_TYPE_NONE;
-}
-
-/**
* ice_get_rx_hash - get RX hash value from descriptor
* @rx_desc: specific descriptor
*
@@ -91,14 +68,16 @@ ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring,
const union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb, u16 rx_ptype)
{
+ struct libeth_rx_pt decoded;
u32 hash;
- if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
+ decoded = libie_rx_pt_parse(rx_ptype);
+ if (!libeth_rx_pt_has_hash(rx_ring->netdev, decoded))
return;
hash = ice_get_rx_hash(rx_desc);
if (likely(hash))
- skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
+ libeth_rx_pt_set_hash(skb, hash, decoded);
}
/**
@@ -114,34 +93,26 @@ static void
ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
union ice_32b_rx_flex_desc *rx_desc, u16 ptype)
{
- struct ice_rx_ptype_decoded decoded;
+ struct libeth_rx_pt decoded;
u16 rx_status0, rx_status1;
bool ipv4, ipv6;
- rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
- rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
-
- decoded = ice_decode_rx_desc_ptype(ptype);
-
/* Start with CHECKSUM_NONE and by default csum_level = 0 */
skb->ip_summed = CHECKSUM_NONE;
- skb_checksum_none_assert(skb);
- /* check if Rx checksum is enabled */
- if (!(ring->netdev->features & NETIF_F_RXCSUM))
+ decoded = libie_rx_pt_parse(ptype);
+ if (!libeth_rx_pt_has_checksum(ring->netdev, decoded))
return;
+ rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
+ rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
+
/* check if HW has decoded the packet and checksum */
if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
return;
- if (!(decoded.known && decoded.outer_ip))
- return;
-
- ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
- ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) {
ring->vsi->back->hw_rx_eipe_error++;
@@ -169,19 +140,10 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
* we need to bump the checksum level by 1 to reflect the fact that
* we are indicating we validated the inner checksum.
*/
- if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT)
+ if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT)
skb->csum_level = 1;
- /* Only report checksum unnecessary for TCP, UDP, or SCTP */
- switch (decoded.inner_prot) {
- case ICE_RX_PTYPE_INNER_PROT_TCP:
- case ICE_RX_PTYPE_INNER_PROT_UDP:
- case ICE_RX_PTYPE_INNER_PROT_SCTP:
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- break;
- default:
- break;
- }
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
return;
checksum_fail:
@@ -236,7 +198,16 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring,
ice_rx_hash_to_skb(rx_ring, rx_desc, skb, ptype);
/* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ if (unlikely(rx_ring->flags & ICE_RX_FLAGS_MULTIDEV)) {
+ struct net_device *netdev = ice_eswitch_get_target(rx_ring,
+ rx_desc);
+
+ if (ice_is_port_repr_netdev(netdev))
+ ice_repr_inc_rx_stats(netdev, skb->len);
+ skb->protocol = eth_type_trans(skb, netdev);
+ } else {
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ }
ice_rx_csum(rx_ring, skb, rx_desc, ptype);
@@ -527,42 +498,6 @@ static int ice_xdp_rx_hw_ts(const struct xdp_md *ctx, u64 *ts_ns)
return 0;
}
-/* Define a ptype index -> XDP hash type lookup table.
- * It uses the same ptype definitions as ice_decode_rx_desc_ptype[],
- * avoiding possible copy-paste errors.
- */
-#undef ICE_PTT
-#undef ICE_PTT_UNUSED_ENTRY
-
-#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
- [PTYPE] = XDP_RSS_L3_##OUTER_IP_VER | XDP_RSS_L4_##I | XDP_RSS_TYPE_##PL
-
-#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = 0
-
-/* A few supplementary definitions for when XDP hash types do not coincide
- * with what can be generated from ptype definitions
- * by means of preprocessor concatenation.
- */
-#define XDP_RSS_L3_NONE XDP_RSS_TYPE_NONE
-#define XDP_RSS_L4_NONE XDP_RSS_TYPE_NONE
-#define XDP_RSS_TYPE_PAY2 XDP_RSS_TYPE_L2
-#define XDP_RSS_TYPE_PAY3 XDP_RSS_TYPE_NONE
-#define XDP_RSS_TYPE_PAY4 XDP_RSS_L4
-
-static const enum xdp_rss_hash_type
-ice_ptype_to_xdp_hash[ICE_NUM_DEFINED_PTYPES] = {
- ICE_PTYPES
-};
-
-#undef XDP_RSS_L3_NONE
-#undef XDP_RSS_L4_NONE
-#undef XDP_RSS_TYPE_PAY2
-#undef XDP_RSS_TYPE_PAY3
-#undef XDP_RSS_TYPE_PAY4
-
-#undef ICE_PTT
-#undef ICE_PTT_UNUSED_ENTRY
-
/**
* ice_xdp_rx_hash_type - Get XDP-specific hash type from the RX descriptor
* @eop_desc: End of Packet descriptor
@@ -570,12 +505,7 @@ ice_ptype_to_xdp_hash[ICE_NUM_DEFINED_PTYPES] = {
static enum xdp_rss_hash_type
ice_xdp_rx_hash_type(const union ice_32b_rx_flex_desc *eop_desc)
{
- u16 ptype = ice_get_ptype(eop_desc);
-
- if (unlikely(ptype >= ICE_NUM_DEFINED_PTYPES))
- return 0;
-
- return ice_ptype_to_xdp_hash[ptype];
+ return libie_rx_pt_parse(ice_get_ptype(eop_desc)).hash_type;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 9ff92dba5823..f0796a93f428 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -150,7 +150,6 @@ enum ice_vsi_type {
ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */
ICE_VSI_CHNL = 4,
ICE_VSI_LB = 6,
- ICE_VSI_SWITCHDEV_CTRL = 7,
};
struct ice_link_status {
@@ -204,6 +203,7 @@ struct ice_phy_info {
enum ice_fltr_ptype {
/* NONE - used for undef/error */
ICE_FLTR_PTYPE_NONF_NONE = 0,
+ ICE_FLTR_PTYPE_NONF_ETH,
ICE_FLTR_PTYPE_NONF_IPV4_UDP,
ICE_FLTR_PTYPE_NONF_IPV4_TCP,
ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
@@ -296,6 +296,7 @@ struct ice_hw_common_caps {
bool pcie_reset_avoidance;
/* Post update reset restriction */
bool reset_restrict_support;
+ bool tx_sched_topo_comp_mode_en;
};
/* IEEE 1588 TIME_SYNC specific info */
@@ -849,6 +850,8 @@ struct ice_hw {
u16 max_burst_size; /* driver sets this value */
+ u8 recp_reuse:1; /* indicates whether FW supports recipe reuse */
+
/* Tx Scheduler values */
u8 num_tx_sched_layers;
u8 num_tx_sched_phys_layers;
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 21d26e19338a..48a8d462d76a 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -259,20 +259,18 @@ static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
int ice_vf_reconfig_vsi(struct ice_vf *vf)
{
struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- struct ice_vsi_cfg_params params = {};
struct ice_pf *pf = vf->pf;
int err;
if (WARN_ON(!vsi))
return -EINVAL;
- params = ice_vsi_to_params(vsi);
- params.flags = ICE_VSI_FLAG_NO_INIT;
+ vsi->flags = ICE_VSI_FLAG_NO_INIT;
ice_vsi_decfg(vsi);
ice_fltr_remove_all(vsi);
- err = ice_vsi_cfg(vsi, &params);
+ err = ice_vsi_cfg(vsi);
if (err) {
dev_err(ice_pf_to_dev(pf),
"Failed to reconfigure the VF%u's VSI, error %d\n",
@@ -856,6 +854,11 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
return 0;
}
+ if (flags & ICE_VF_RESET_LOCK)
+ mutex_lock(&vf->cfg_lock);
+ else
+ lockdep_assert_held(&vf->cfg_lock);
+
lag = pf->lag;
mutex_lock(&pf->lag_mutex);
if (lag && lag->bonded && lag->primary) {
@@ -867,11 +870,6 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
act_prt = ICE_LAG_INVALID_PORT;
}
- if (flags & ICE_VF_RESET_LOCK)
- mutex_lock(&vf->cfg_lock);
- else
- lockdep_assert_held(&vf->cfg_lock);
-
if (ice_is_vf_disabled(vf)) {
vsi = ice_get_vf_vsi(vf);
if (!vsi) {
@@ -956,14 +954,14 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
ice_mbx_clear_malvf(&vf->mbx_info);
out_unlock:
- if (flags & ICE_VF_RESET_LOCK)
- mutex_unlock(&vf->cfg_lock);
-
if (lag && lag->bonded && lag->primary &&
act_prt != ICE_LAG_INVALID_PORT)
ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
mutex_unlock(&pf->lag_mutex);
+ if (flags & ICE_VF_RESET_LOCK)
+ mutex_unlock(&vf->cfg_lock);
+
return err;
}
@@ -992,10 +990,13 @@ void ice_initialize_vf_entry(struct ice_vf *vf)
/* assign default capabilities */
vf->spoofchk = true;
- vf->num_vf_qs = vfs->num_qps_per;
ice_vc_set_default_allowlist(vf);
ice_virtchnl_set_dflt_ops(vf);
+ /* set default number of MSI-X */
+ vf->num_msix = vfs->num_msix_per;
+ vf->num_vf_qs = vfs->num_qps_per;
+
/* ctrl_vsi_idx will be set to a valid value only when iAVF
* creates its first fdir rule.
*/
@@ -1240,7 +1241,7 @@ struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
struct ice_vsi *vsi;
params.type = ICE_VSI_CTRL;
- params.pi = ice_vf_get_port_info(vf);
+ params.port_info = ice_vf_get_port_info(vf);
params.vf = vf;
params.flags = ICE_VSI_FLAG_INIT;
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
index 80dc4bcdd3a4..b3e1bdcb80f8 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
@@ -26,24 +26,22 @@ static void ice_port_vlan_on(struct ice_vsi *vsi)
struct ice_vsi_vlan_ops *vlan_ops;
struct ice_pf *pf = vsi->back;
- if (ice_is_dvm_ena(&pf->hw)) {
- vlan_ops = &vsi->outer_vlan_ops;
-
- /* setup outer VLAN ops */
- vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
- vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
+ /* setup inner VLAN ops */
+ vlan_ops = &vsi->inner_vlan_ops;
- /* setup inner VLAN ops */
- vlan_ops = &vsi->inner_vlan_ops;
+ if (ice_is_dvm_ena(&pf->hw)) {
vlan_ops->add_vlan = noop_vlan_arg;
vlan_ops->del_vlan = noop_vlan_arg;
vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
- } else {
- vlan_ops = &vsi->inner_vlan_ops;
+ /* setup outer VLAN ops */
+ vlan_ops = &vsi->outer_vlan_ops;
+ vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
+ vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
+ } else {
vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 1ff9818b4c84..1c6ce0c4ed4e 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -1505,13 +1505,12 @@ error_param:
* ice_cfg_interrupt
* @vf: pointer to the VF info
* @vsi: the VSI being configured
- * @vector_id: vector ID
* @map: vector map for mapping vectors to queues
* @q_vector: structure for interrupt vector
* configure the IRQ to queue map
*/
-static int
-ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
+static enum virtchnl_status_code
+ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi,
struct virtchnl_vector_map *map,
struct ice_q_vector *q_vector)
{
@@ -1531,7 +1530,8 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
q_vector->num_ring_rx++;
q_vector->rx.itr_idx = map->rxitr_idx;
vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
+ ice_cfg_rxq_interrupt(vsi, vsi_q_id,
+ q_vector->vf_reg_idx,
q_vector->rx.itr_idx);
}
@@ -1545,7 +1545,8 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
q_vector->num_ring_tx++;
q_vector->tx.itr_idx = map->txitr_idx;
vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
+ ice_cfg_txq_interrupt(vsi, vsi_q_id,
+ q_vector->vf_reg_idx,
q_vector->tx.itr_idx);
}
@@ -1619,8 +1620,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
}
/* lookout for the invalid queue index */
- v_ret = (enum virtchnl_status_code)
- ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
+ v_ret = ice_cfg_interrupt(vf, vsi, map, q_vector);
if (v_ret)
goto error_param;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
index 4a6c850d83ac..7aae7fdcfcdb 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
@@ -72,7 +72,6 @@ void ice_vsi_init_vlan_ops(struct ice_vsi *vsi)
switch (vsi->type) {
case ICE_VSI_PF:
- case ICE_VSI_SWITCHDEV_CTRL:
ice_pf_vsi_init_vlan_ops(vsi);
break;
case ICE_VSI_VF:
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 1857220d27fe..7541f223bf4f 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -555,8 +555,7 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
}
net_prefetch(xdp->data_meta);
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize);
if (unlikely(!skb))
return NULL;
@@ -879,7 +878,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
ICE_RX_FLX_DESC_PKT_LEN_M;
xsk_buff_set_size(xdp, size);
- xsk_buff_dma_sync_for_cpu(xdp, xsk_pool);
+ xsk_buff_dma_sync_for_cpu(xdp);
if (!first) {
first = xdp;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 986d429d1175..6972d728431c 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -376,7 +376,8 @@ static int idpf_set_ringparam(struct net_device *netdev,
new_tx_count);
if (new_tx_count == vport->txq_desc_count &&
- new_rx_count == vport->rxq_desc_count)
+ new_rx_count == vport->rxq_desc_count &&
+ kring->tcp_data_split == idpf_vport_get_hsplit(vport))
goto unlock_mutex;
if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) {
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 5d3532c27d57..52ceda6306a3 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -2234,7 +2234,7 @@ static int idpf_change_mtu(struct net_device *netdev, int new_mtu)
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
err = idpf_initiate_soft_reset(vport, IDPF_SR_MTU_CHANGE);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 6dd7a66bb897..285da2177ee4 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -2941,6 +2941,8 @@ static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0,
VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M);
+ skb->protocol = eth_type_trans(skb, rxq->vport->netdev);
+
decoded = rxq->vport->rx_ptype_lkup[rx_ptype];
/* If we don't know the ptype we can't do anything else with it. Just
* pass it up the stack as-is.
@@ -2951,8 +2953,6 @@ static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
/* process RSS/hash */
idpf_rx_hash(rxq, skb, rx_desc, &decoded);
- skb->protocol = eth_type_trans(skb, rxq->vport->netdev);
-
if (le16_get_bits(rx_desc->hdrlen_flags,
VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
return idpf_rx_rsc(rxq, skb, rx_desc, &decoded);
@@ -3005,8 +3005,7 @@ struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq,
/* prefetch first cache line of first page */
net_prefetch(va);
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rxq->q_vector->napi, IDPF_RX_HDR_SIZE,
- GFP_ATOMIC);
+ skb = napi_alloc_skb(&rxq->q_vector->napi, IDPF_RX_HDR_SIZE);
if (unlikely(!skb)) {
idpf_rx_put_page(rx_buf);
@@ -3060,7 +3059,7 @@ static struct sk_buff *idpf_rx_hdr_construct_skb(struct idpf_queue *rxq,
struct sk_buff *skb;
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rxq->q_vector->napi, size, GFP_ATOMIC);
+ skb = napi_alloc_skb(&rxq->q_vector->napi, size);
if (unlikely(!skb))
return NULL;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index df76493faa75..3d046b81e507 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -8,6 +8,8 @@
#include <net/tcp.h>
#include <net/netdev_queues.h>
+#include "virtchnl2_lan_desc.h"
+
#define IDPF_LARGE_MAX_Q 256
#define IDPF_MAX_Q 16
#define IDPF_MIN_Q 2
diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h
index 4a3c4454d25a..63deb120359c 100644
--- a/drivers/net/ethernet/intel/idpf/virtchnl2.h
+++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h
@@ -4,6 +4,8 @@
#ifndef _VIRTCHNL2_H_
#define _VIRTCHNL2_H_
+#include <linux/if_ether.h>
+
/* All opcodes associated with virtchnl2 are prefixed with virtchnl2 or
* VIRTCHNL2. Any future opcodes, offloads/capabilities, structures,
* and defines must be prefixed with virtchnl2 or VIRTCHNL2 to avoid confusion.
@@ -17,8 +19,6 @@
* must remain unchanged over time, so we specify explicit values for all enums.
*/
-#include "virtchnl2_lan_desc.h"
-
/* This macro is used to generate compilation errors if a structure
* is not exactly the correct length.
*/
@@ -555,7 +555,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_queue_reg_chunk);
struct virtchnl2_queue_reg_chunks {
__le16 num_chunks;
u8 pad[6];
- struct virtchnl2_queue_reg_chunk chunks[];
+ struct virtchnl2_queue_reg_chunk chunks[] __counted_by_le(num_chunks);
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks);
@@ -703,7 +703,7 @@ struct virtchnl2_config_tx_queues {
__le32 vport_id;
__le16 num_qinfo;
u8 pad[10];
- struct virtchnl2_txq_info qinfo[];
+ struct virtchnl2_txq_info qinfo[] __counted_by_le(num_qinfo);
};
VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_config_tx_queues);
@@ -782,7 +782,7 @@ struct virtchnl2_config_rx_queues {
__le32 vport_id;
__le16 num_qinfo;
u8 pad[18];
- struct virtchnl2_rxq_info qinfo[];
+ struct virtchnl2_rxq_info qinfo[] __counted_by_le(num_qinfo);
};
VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_config_rx_queues);
@@ -868,7 +868,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_vector_chunk);
struct virtchnl2_vector_chunks {
__le16 num_vchunks;
u8 pad[14];
- struct virtchnl2_vector_chunk vchunks[];
+ struct virtchnl2_vector_chunk vchunks[] __counted_by_le(num_vchunks);
};
VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_vector_chunks);
@@ -912,7 +912,7 @@ struct virtchnl2_rss_lut {
__le16 lut_entries_start;
__le16 lut_entries;
u8 pad[4];
- __le32 lut[];
+ __le32 lut[] __counted_by_le(lut_entries);
};
VIRTCHNL2_CHECK_STRUCT_LEN(12, virtchnl2_rss_lut);
@@ -977,7 +977,7 @@ struct virtchnl2_ptype {
u8 ptype_id_8;
u8 proto_id_count;
__le16 pad;
- __le16 proto_id[];
+ __le16 proto_id[] __counted_by(proto_id_count);
} __packed __aligned(2);
VIRTCHNL2_CHECK_STRUCT_LEN(6, virtchnl2_ptype);
@@ -1104,7 +1104,7 @@ struct virtchnl2_rss_key {
__le32 vport_id;
__le16 key_len;
u8 pad;
- u8 key_flex[];
+ u8 key_flex[] __counted_by_le(key_len);
} __packed;
VIRTCHNL2_CHECK_STRUCT_LEN(7, virtchnl2_rss_key);
@@ -1131,7 +1131,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_chunk);
struct virtchnl2_queue_chunks {
__le16 num_chunks;
u8 pad[6];
- struct virtchnl2_queue_chunk chunks[];
+ struct virtchnl2_queue_chunk chunks[] __counted_by_le(num_chunks);
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_chunks);
@@ -1195,7 +1195,7 @@ struct virtchnl2_queue_vector_maps {
__le32 vport_id;
__le16 num_qv_maps;
u8 pad[10];
- struct virtchnl2_queue_vector qv_maps[];
+ struct virtchnl2_queue_vector qv_maps[] __counted_by_le(num_qv_maps);
};
VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_vector_maps);
@@ -1247,7 +1247,7 @@ struct virtchnl2_mac_addr_list {
__le32 vport_id;
__le16 num_mac_addr;
u8 pad[2];
- struct virtchnl2_mac_addr mac_addr_list[];
+ struct virtchnl2_mac_addr mac_addr_list[] __counted_by_le(num_mac_addr);
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_mac_addr_list);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 99977a22b843..61d72250c0ed 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -3272,19 +3272,6 @@ static int igb_get_module_eeprom(struct net_device *netdev,
return 0;
}
-static int igb_ethtool_begin(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- pm_runtime_get_sync(&adapter->pdev->dev);
- return 0;
-}
-
-static void igb_ethtool_complete(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- pm_runtime_put(&adapter->pdev->dev);
-}
-
static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
{
return IGB_RETA_SIZE;
@@ -3508,8 +3495,6 @@ static const struct ethtool_ops igb_ethtool_ops = {
.set_channels = igb_set_channels,
.get_priv_flags = igb_get_priv_flags,
.set_priv_flags = igb_set_priv_flags,
- .begin = igb_ethtool_begin,
- .complete = igb_ethtool_complete,
.get_link_ksettings = igb_get_link_ksettings,
.set_link_ksettings = igb_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index a3f100769e39..fce2930ae6af 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -106,8 +106,6 @@ static int igb_setup_all_rx_resources(struct igb_adapter *);
static void igb_free_all_tx_resources(struct igb_adapter *);
static void igb_free_all_rx_resources(struct igb_adapter *);
static void igb_setup_mrqc(struct igb_adapter *);
-static int igb_probe(struct pci_dev *, const struct pci_device_id *);
-static void igb_remove(struct pci_dev *pdev);
static void igb_init_queue_configuration(struct igb_adapter *adapter);
static int igb_sw_init(struct igb_adapter *);
int igb_open(struct net_device *);
@@ -178,20 +176,6 @@ static int igb_vf_configure(struct igb_adapter *adapter, int vf);
static int igb_disable_sriov(struct pci_dev *dev, bool reinit);
#endif
-static int igb_suspend(struct device *);
-static int igb_resume(struct device *);
-static int igb_runtime_suspend(struct device *dev);
-static int igb_runtime_resume(struct device *dev);
-static int igb_runtime_idle(struct device *dev);
-#ifdef CONFIG_PM
-static const struct dev_pm_ops igb_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
- SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
- igb_runtime_idle)
-};
-#endif
-static void igb_shutdown(struct pci_dev *);
-static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
#ifdef CONFIG_IGB_DCA
static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
static struct notifier_block dca_notifier = {
@@ -219,19 +203,6 @@ static const struct pci_error_handlers igb_err_handler = {
static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
-static struct pci_driver igb_driver = {
- .name = igb_driver_name,
- .id_table = igb_pci_tbl,
- .probe = igb_probe,
- .remove = igb_remove,
-#ifdef CONFIG_PM
- .driver.pm = &igb_pm_ops,
-#endif
- .shutdown = igb_shutdown,
- .sriov_configure = igb_pci_sriov_configure,
- .err_handler = &igb_err_handler
-};
-
MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
MODULE_LICENSE("GPL v2");
@@ -647,6 +618,8 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
return adapter->netdev;
}
+static struct pci_driver igb_driver;
+
/**
* igb_init_module - Driver Registration Routine
*
@@ -2624,6 +2597,9 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter,
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
@@ -6668,7 +6644,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
igb_up(adapter);
@@ -9453,12 +9429,12 @@ static void igb_deliver_wake_packet(struct net_device *netdev)
netif_rx(skb);
}
-static int __maybe_unused igb_suspend(struct device *dev)
+static int igb_suspend(struct device *dev)
{
return __igb_shutdown(to_pci_dev(dev), NULL, 0);
}
-static int __maybe_unused __igb_resume(struct device *dev, bool rpm)
+static int __igb_resume(struct device *dev, bool rpm)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -9514,12 +9490,12 @@ static int __maybe_unused __igb_resume(struct device *dev, bool rpm)
return err;
}
-static int __maybe_unused igb_resume(struct device *dev)
+static int igb_resume(struct device *dev)
{
return __igb_resume(dev, false);
}
-static int __maybe_unused igb_runtime_idle(struct device *dev)
+static int igb_runtime_idle(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -9530,12 +9506,12 @@ static int __maybe_unused igb_runtime_idle(struct device *dev)
return -EBUSY;
}
-static int __maybe_unused igb_runtime_suspend(struct device *dev)
+static int igb_runtime_suspend(struct device *dev)
{
return __igb_shutdown(to_pci_dev(dev), NULL, 1);
}
-static int __maybe_unused igb_runtime_resume(struct device *dev)
+static int igb_runtime_resume(struct device *dev)
{
return __igb_resume(dev, true);
}
@@ -10157,4 +10133,20 @@ static void igb_nfc_filter_restore(struct igb_adapter *adapter)
spin_unlock(&adapter->nfc_lock);
}
+
+static _DEFINE_DEV_PM_OPS(igb_pm_ops, igb_suspend, igb_resume,
+ igb_runtime_suspend, igb_runtime_resume,
+ igb_runtime_idle);
+
+static struct pci_driver igb_driver = {
+ .name = igb_driver_name,
+ .id_table = igb_pci_tbl,
+ .probe = igb_probe,
+ .remove = igb_remove,
+ .driver.pm = pm_ptr(&igb_pm_ops),
+ .shutdown = igb_shutdown,
+ .sriov_configure = igb_pci_sriov_configure,
+ .err_handler = &igb_err_handler
+};
+
/* igb_main.c */
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index b0cf310e6f7b..7661edd7d0f2 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2434,7 +2434,7 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
igbvf_up(adapter);
@@ -2470,7 +2470,7 @@ static int igbvf_suspend(struct device *dev_d)
return 0;
}
-static int __maybe_unused igbvf_resume(struct device *dev_d)
+static int igbvf_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -2957,7 +2957,7 @@ static const struct pci_device_id igbvf_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
-static SIMPLE_DEV_PM_OPS(igbvf_pm_ops, igbvf_suspend, igbvf_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(igbvf_pm_ops, igbvf_suspend, igbvf_resume);
/* PCI Device API Driver */
static struct pci_driver igbvf_driver = {
@@ -2965,7 +2965,7 @@ static struct pci_driver igbvf_driver = {
.id_table = igbvf_pci_tbl,
.probe = igbvf_probe,
.remove = igbvf_remove,
- .driver.pm = &igbvf_pm_ops,
+ .driver.pm = pm_sleep_ptr(&igbvf_pm_ops),
.shutdown = igbvf_shutdown,
.err_handler = &igbvf_err_handler
};
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 90316dc58630..8b14c029eda1 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -72,13 +72,46 @@ struct igc_rx_packet_stats {
u64 other_packets;
};
+enum igc_tx_buffer_type {
+ IGC_TX_BUFFER_TYPE_SKB,
+ IGC_TX_BUFFER_TYPE_XDP,
+ IGC_TX_BUFFER_TYPE_XSK,
+};
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer
+ */
+struct igc_tx_buffer {
+ union igc_adv_tx_desc *next_to_watch;
+ unsigned long time_stamp;
+ enum igc_tx_buffer_type type;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
+ unsigned int bytecount;
+ u16 gso_segs;
+ __be16 protocol;
+
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ u32 tx_flags;
+ bool xsk_pending_ts;
+};
+
struct igc_tx_timestamp_request {
- struct sk_buff *skb; /* reference to the packet being timestamped */
+ union { /* reference to the packet being timestamped */
+ struct sk_buff *skb;
+ struct igc_tx_buffer *xsk_tx_buffer;
+ };
+ enum igc_tx_buffer_type buffer_type;
unsigned long start; /* when the tstamp request started (jiffies) */
u32 mask; /* _TSYNCTXCTL_TXTT_{X} bit for this request */
u32 regl; /* which TXSTMPL_{X} register should be used */
u32 regh; /* which TXSTMPH_{X} register should be used */
u32 flags; /* flags that should be added to the tx_buffer */
+ u8 xsk_queue_index; /* Tx queue which requesting timestamp */
+ struct xsk_tx_metadata_compl xsk_meta; /* ref to xsk Tx metadata */
};
struct igc_inline_rx_tstamps {
@@ -298,6 +331,7 @@ struct igc_adapter {
/* LEDs */
struct mutex led_mutex;
+ struct igc_led_classdev *leds;
};
void igc_up(struct igc_adapter *adapter);
@@ -322,6 +356,9 @@ void igc_disable_tx_ring(struct igc_ring *ring);
void igc_enable_tx_ring(struct igc_ring *ring);
int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
+/* AF_XDP TX metadata operations */
+extern const struct xsk_tx_metadata_ops igc_xsk_tx_metadata_ops;
+
/* igc_dump declarations */
void igc_rings_dump(struct igc_adapter *adapter);
void igc_regs_dump(struct igc_adapter *adapter);
@@ -507,32 +544,6 @@ enum igc_boards {
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
-enum igc_tx_buffer_type {
- IGC_TX_BUFFER_TYPE_SKB,
- IGC_TX_BUFFER_TYPE_XDP,
- IGC_TX_BUFFER_TYPE_XSK,
-};
-
-/* wrapper around a pointer to a socket buffer,
- * so a DMA handle can be stored along with the buffer
- */
-struct igc_tx_buffer {
- union igc_adv_tx_desc *next_to_watch;
- unsigned long time_stamp;
- enum igc_tx_buffer_type type;
- union {
- struct sk_buff *skb;
- struct xdp_frame *xdpf;
- };
- unsigned int bytecount;
- u16 gso_segs;
- __be16 protocol;
-
- DEFINE_DMA_UNMAP_ADDR(dma);
- DEFINE_DMA_UNMAP_LEN(len);
- u32 tx_flags;
-};
-
struct igc_rx_buffer {
union {
struct {
@@ -556,6 +567,13 @@ struct igc_xdp_buff {
struct igc_inline_rx_tstamps *rx_ts; /* data indication bit IGC_RXDADV_STAT_TSIP */
};
+struct igc_metadata_request {
+ struct igc_tx_buffer *tx_buffer;
+ struct xsk_tx_metadata *meta;
+ struct igc_ring *tx_ring;
+ u32 cmd_type;
+};
+
struct igc_q_vector {
struct igc_adapter *adapter; /* backlink */
void __iomem *itr_register;
@@ -723,6 +741,7 @@ void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts);
void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter);
int igc_led_setup(struct igc_adapter *adapter);
+void igc_led_free(struct igc_adapter *adapter);
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 1a64f1ca6ca8..f2c4f1966bb0 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1711,21 +1711,6 @@ static int igc_ethtool_set_eee(struct net_device *netdev,
return 0;
}
-static int igc_ethtool_begin(struct net_device *netdev)
-{
- struct igc_adapter *adapter = netdev_priv(netdev);
-
- pm_runtime_get_sync(&adapter->pdev->dev);
- return 0;
-}
-
-static void igc_ethtool_complete(struct net_device *netdev)
-{
- struct igc_adapter *adapter = netdev_priv(netdev);
-
- pm_runtime_put(&adapter->pdev->dev);
-}
-
static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
@@ -2025,8 +2010,6 @@ static const struct ethtool_ops igc_ethtool_ops = {
.set_priv_flags = igc_ethtool_set_priv_flags,
.get_eee = igc_ethtool_get_eee,
.set_eee = igc_ethtool_set_eee,
- .begin = igc_ethtool_begin,
- .complete = igc_ethtool_complete,
.get_link_ksettings = igc_ethtool_get_link_ksettings,
.set_link_ksettings = igc_ethtool_set_link_ksettings,
.self_test = igc_ethtool_diag_test,
diff --git a/drivers/net/ethernet/intel/igc/igc_leds.c b/drivers/net/ethernet/intel/igc/igc_leds.c
index bf240c5daf86..3929b25b6ae6 100644
--- a/drivers/net/ethernet/intel/igc/igc_leds.c
+++ b/drivers/net/ethernet/intel/igc/igc_leds.c
@@ -236,8 +236,8 @@ static void igc_led_get_name(struct igc_adapter *adapter, int index, char *buf,
pci_dev_id(adapter->pdev), index);
}
-static void igc_setup_ldev(struct igc_led_classdev *ldev,
- struct net_device *netdev, int index)
+static int igc_setup_ldev(struct igc_led_classdev *ldev,
+ struct net_device *netdev, int index)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct led_classdev *led_cdev = &ldev->led;
@@ -257,24 +257,46 @@ static void igc_setup_ldev(struct igc_led_classdev *ldev,
led_cdev->hw_control_get = igc_led_hw_control_get;
led_cdev->hw_control_get_device = igc_led_hw_control_get_device;
- devm_led_classdev_register(&netdev->dev, led_cdev);
+ return led_classdev_register(&netdev->dev, led_cdev);
}
int igc_led_setup(struct igc_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- struct device *dev = &netdev->dev;
struct igc_led_classdev *leds;
- int i;
+ int i, err;
mutex_init(&adapter->led_mutex);
- leds = devm_kcalloc(dev, IGC_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
+ leds = kcalloc(IGC_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
if (!leds)
return -ENOMEM;
- for (i = 0; i < IGC_NUM_LEDS; i++)
- igc_setup_ldev(leds + i, netdev, i);
+ for (i = 0; i < IGC_NUM_LEDS; i++) {
+ err = igc_setup_ldev(leds + i, netdev, i);
+ if (err)
+ goto err;
+ }
+
+ adapter->leds = leds;
return 0;
+
+err:
+ for (i--; i >= 0; i--)
+ led_classdev_unregister(&((leds + i)->led));
+
+ kfree(leds);
+ return err;
+}
+
+void igc_led_free(struct igc_adapter *adapter)
+{
+ struct igc_led_classdev *leds = adapter->leds;
+ int i;
+
+ for (i = 0; i < IGC_NUM_LEDS; i++)
+ led_classdev_unregister(&((leds + i)->led));
+
+ kfree(leds);
}
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 2e1cfbd82f4f..12f004f46082 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -1642,10 +1642,6 @@ done:
if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) &&
skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- /* FIXME: add support for retrieving timestamps from
- * the other timer registers before skipping the
- * timestamping request.
- */
unsigned long flags;
u32 tstamp_flags;
@@ -2716,8 +2712,7 @@ static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
net_prefetch(xdp->data_meta);
- skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&ring->q_vector->napi, totalsize);
if (unlikely(!skb))
return NULL;
@@ -2817,7 +2812,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
}
bi->xdp->data_end = bi->xdp->data + size;
- xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(bi->xdp);
res = __igc_xdp_run_prog(adapter, prog, bi->xdp);
switch (res) {
@@ -2878,6 +2873,89 @@ static void igc_update_tx_stats(struct igc_q_vector *q_vector,
q_vector->tx.total_packets += packets;
}
+static void igc_xsk_request_timestamp(void *_priv)
+{
+ struct igc_metadata_request *meta_req = _priv;
+ struct igc_ring *tx_ring = meta_req->tx_ring;
+ struct igc_tx_timestamp_request *tstamp;
+ u32 tx_flags = IGC_TX_FLAGS_TSTAMP;
+ struct igc_adapter *adapter;
+ unsigned long lock_flags;
+ bool found = false;
+ int i;
+
+ if (test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags)) {
+ adapter = netdev_priv(tx_ring->netdev);
+
+ spin_lock_irqsave(&adapter->ptp_tx_lock, lock_flags);
+
+ /* Search for available tstamp regs */
+ for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) {
+ tstamp = &adapter->tx_tstamp[i];
+
+ /* tstamp->skb and tstamp->xsk_tx_buffer are in union.
+ * When tstamp->skb is equal to NULL,
+ * tstamp->xsk_tx_buffer is equal to NULL as well.
+ * This condition means that the particular tstamp reg
+ * is not occupied by other packet.
+ */
+ if (!tstamp->skb) {
+ found = true;
+ break;
+ }
+ }
+
+ /* Return if no available tstamp regs */
+ if (!found) {
+ adapter->tx_hwtstamp_skipped++;
+ spin_unlock_irqrestore(&adapter->ptp_tx_lock,
+ lock_flags);
+ return;
+ }
+
+ tstamp->start = jiffies;
+ tstamp->xsk_queue_index = tx_ring->queue_index;
+ tstamp->xsk_tx_buffer = meta_req->tx_buffer;
+ tstamp->buffer_type = IGC_TX_BUFFER_TYPE_XSK;
+
+ /* Hold the transmit completion until timestamp is ready */
+ meta_req->tx_buffer->xsk_pending_ts = true;
+
+ /* Keep the pointer to tx_timestamp, which is located in XDP
+ * metadata area. It is the location to store the value of
+ * tx hardware timestamp.
+ */
+ xsk_tx_metadata_to_compl(meta_req->meta, &tstamp->xsk_meta);
+
+ /* Set timestamp bit based on the _TSTAMP(_X) bit. */
+ tx_flags |= tstamp->flags;
+ meta_req->cmd_type |= IGC_SET_FLAG(tx_flags,
+ IGC_TX_FLAGS_TSTAMP,
+ (IGC_ADVTXD_MAC_TSTAMP));
+ meta_req->cmd_type |= IGC_SET_FLAG(tx_flags,
+ IGC_TX_FLAGS_TSTAMP_1,
+ (IGC_ADVTXD_TSTAMP_REG_1));
+ meta_req->cmd_type |= IGC_SET_FLAG(tx_flags,
+ IGC_TX_FLAGS_TSTAMP_2,
+ (IGC_ADVTXD_TSTAMP_REG_2));
+ meta_req->cmd_type |= IGC_SET_FLAG(tx_flags,
+ IGC_TX_FLAGS_TSTAMP_3,
+ (IGC_ADVTXD_TSTAMP_REG_3));
+
+ spin_unlock_irqrestore(&adapter->ptp_tx_lock, lock_flags);
+ }
+}
+
+static u64 igc_xsk_fill_timestamp(void *_priv)
+{
+ return *(u64 *)_priv;
+}
+
+const struct xsk_tx_metadata_ops igc_xsk_tx_metadata_ops = {
+ .tmo_request_timestamp = igc_xsk_request_timestamp,
+ .tmo_fill_timestamp = igc_xsk_fill_timestamp,
+};
+
static void igc_xdp_xmit_zc(struct igc_ring *ring)
{
struct xsk_buff_pool *pool = ring->xsk_pool;
@@ -2899,24 +2977,34 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
budget = igc_desc_unused(ring);
while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
- u32 cmd_type, olinfo_status;
+ struct igc_metadata_request meta_req;
+ struct xsk_tx_metadata *meta = NULL;
struct igc_tx_buffer *bi;
+ u32 olinfo_status;
dma_addr_t dma;
- cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
- IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
- xdp_desc.len;
+ meta_req.cmd_type = IGC_ADVTXD_DTYP_DATA |
+ IGC_ADVTXD_DCMD_DEXT |
+ IGC_ADVTXD_DCMD_IFCS |
+ IGC_TXD_DCMD | xdp_desc.len;
olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT;
dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
+ meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len);
+ bi = &ring->tx_buffer_info[ntu];
+
+ meta_req.tx_ring = ring;
+ meta_req.tx_buffer = bi;
+ meta_req.meta = meta;
+ xsk_tx_metadata_request(meta, &igc_xsk_tx_metadata_ops,
+ &meta_req);
tx_desc = IGC_TX_DESC(ring, ntu);
- tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.cmd_type_len = cpu_to_le32(meta_req.cmd_type);
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
tx_desc->read.buffer_addr = cpu_to_le64(dma);
- bi = &ring->tx_buffer_info[ntu];
bi->type = IGC_TX_BUFFER_TYPE_XSK;
bi->protocol = 0;
bi->bytecount = xdp_desc.len;
@@ -2979,6 +3067,13 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
break;
+ /* Hold the completions while there's a pending tx hardware
+ * timestamp request from XDP Tx metadata.
+ */
+ if (tx_buffer->type == IGC_TX_BUFFER_TYPE_XSK &&
+ tx_buffer->xsk_pending_ts)
+ break;
+
/* clear next_to_watch to prevent false hangs */
tx_buffer->next_to_watch = NULL;
@@ -5180,7 +5275,7 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
igc_down(adapter);
netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
igc_up(adapter);
@@ -5934,15 +6029,6 @@ static int __igc_open(struct net_device *netdev, bool resuming)
if (err)
goto err_req_irq;
- /* Notify the stack of the actual queue counts. */
- err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
- if (err)
- goto err_set_queues;
-
- err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
- if (err)
- goto err_set_queues;
-
clear_bit(__IGC_DOWN, &adapter->state);
for (i = 0; i < adapter->num_q_vectors; i++)
@@ -5963,8 +6049,6 @@ static int __igc_open(struct net_device *netdev, bool resuming)
return IGC_SUCCESS;
-err_set_queues:
- igc_free_irq(adapter);
err_req_irq:
igc_release_hw_control(adapter);
igc_power_down_phy_copper_base(&adapter->hw);
@@ -5981,6 +6065,17 @@ err_setup_tx:
int igc_open(struct net_device *netdev)
{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_queues(netdev, adapter->num_tx_queues,
+ adapter->num_rx_queues);
+ if (err) {
+ netdev_err(netdev, "error setting real queue count\n");
+ return err;
+ }
+
return __igc_open(netdev, false);
}
@@ -6807,6 +6902,7 @@ static int igc_probe(struct pci_dev *pdev,
netdev->netdev_ops = &igc_netdev_ops;
netdev->xdp_metadata_ops = &igc_xdp_metadata_ops;
+ netdev->xsk_tx_metadata_ops = &igc_xsk_tx_metadata_ops;
igc_ethtool_set_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
@@ -6932,8 +7028,6 @@ static int igc_probe(struct pci_dev *pdev,
device_set_wakeup_enable(&adapter->pdev->dev,
adapter->flags & IGC_FLAG_WOL_SUPPORTED);
- igc_ptp_init(adapter);
-
igc_tsn_clear_schedule(adapter);
/* reset the hardware with the new settings */
@@ -6955,6 +7049,9 @@ static int igc_probe(struct pci_dev *pdev,
/* Check if Media Autosense is enabled */
adapter->ei = *ei;
+ /* do hw tstamp init after resetting */
+ igc_ptp_init(adapter);
+
/* print pcie link status and MAC address */
pcie_print_link_status(pdev);
netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
@@ -7025,6 +7122,9 @@ static void igc_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->watchdog_task);
hrtimer_cancel(&adapter->hrtimer);
+ if (IS_ENABLED(CONFIG_IGC_LEDS))
+ igc_led_free(adapter);
+
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
@@ -7109,8 +7209,7 @@ static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
return 0;
}
-#ifdef CONFIG_PM
-static int __maybe_unused igc_runtime_suspend(struct device *dev)
+static int igc_runtime_suspend(struct device *dev)
{
return __igc_shutdown(to_pci_dev(dev), NULL, 1);
}
@@ -7145,7 +7244,7 @@ static void igc_deliver_wake_packet(struct net_device *netdev)
netif_rx(skb);
}
-static int __maybe_unused igc_resume(struct device *dev)
+static int igc_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7187,23 +7286,21 @@ static int __maybe_unused igc_resume(struct device *dev)
wr32(IGC_WUS, ~0);
- rtnl_lock();
- if (!err && netif_running(netdev))
+ if (netif_running(netdev)) {
err = __igc_open(netdev, true);
-
- if (!err)
- netif_device_attach(netdev);
- rtnl_unlock();
+ if (!err)
+ netif_device_attach(netdev);
+ }
return err;
}
-static int __maybe_unused igc_runtime_resume(struct device *dev)
+static int igc_runtime_resume(struct device *dev)
{
return igc_resume(dev);
}
-static int __maybe_unused igc_suspend(struct device *dev)
+static int igc_suspend(struct device *dev)
{
return __igc_shutdown(to_pci_dev(dev), NULL, 0);
}
@@ -7218,7 +7315,6 @@ static int __maybe_unused igc_runtime_idle(struct device *dev)
return -EBUSY;
}
-#endif /* CONFIG_PM */
static void igc_shutdown(struct pci_dev *pdev)
{
@@ -7333,22 +7429,16 @@ static const struct pci_error_handlers igc_err_handler = {
.resume = igc_io_resume,
};
-#ifdef CONFIG_PM
-static const struct dev_pm_ops igc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume)
- SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume,
- igc_runtime_idle)
-};
-#endif
+static _DEFINE_DEV_PM_OPS(igc_pm_ops, igc_suspend, igc_resume,
+ igc_runtime_suspend, igc_runtime_resume,
+ igc_runtime_idle);
static struct pci_driver igc_driver = {
.name = igc_driver_name,
.id_table = igc_pci_tbl,
.probe = igc_probe,
.remove = igc_remove,
-#ifdef CONFIG_PM
- .driver.pm = &igc_pm_ops,
-#endif
+ .driver.pm = pm_ptr(&igc_pm_ops),
.shutdown = igc_shutdown,
.err_handler = &igc_err_handler,
};
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 885faaa7b9de..1bb026232efc 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -11,6 +11,7 @@
#include <linux/ktime.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
+#include <net/xdp_sock_drv.h>
#define INCVALUE_MASK 0x7fffffff
#define ISGN 0x80000000
@@ -545,6 +546,30 @@ static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter)
wr32(IGC_TSYNCRXCTL, val);
}
+static void igc_ptp_free_tx_buffer(struct igc_adapter *adapter,
+ struct igc_tx_timestamp_request *tstamp)
+{
+ if (tstamp->buffer_type == IGC_TX_BUFFER_TYPE_XSK) {
+ /* Release the transmit completion */
+ tstamp->xsk_tx_buffer->xsk_pending_ts = false;
+
+ /* Note: tstamp->skb and tstamp->xsk_tx_buffer are in union.
+ * By setting tstamp->xsk_tx_buffer to NULL, tstamp->skb will
+ * become NULL as well.
+ */
+ tstamp->xsk_tx_buffer = NULL;
+ tstamp->buffer_type = 0;
+
+ /* Trigger txrx interrupt for transmit completion */
+ igc_xsk_wakeup(adapter->netdev, tstamp->xsk_queue_index, 0);
+
+ return;
+ }
+
+ dev_kfree_skb_any(tstamp->skb);
+ tstamp->skb = NULL;
+}
+
static void igc_ptp_clear_tx_tstamp(struct igc_adapter *adapter)
{
unsigned long flags;
@@ -555,8 +580,8 @@ static void igc_ptp_clear_tx_tstamp(struct igc_adapter *adapter)
for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) {
struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i];
- dev_kfree_skb_any(tstamp->skb);
- tstamp->skb = NULL;
+ if (tstamp->skb)
+ igc_ptp_free_tx_buffer(adapter, tstamp);
}
spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags);
@@ -657,8 +682,9 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
static void igc_ptp_tx_timeout(struct igc_adapter *adapter,
struct igc_tx_timestamp_request *tstamp)
{
- dev_kfree_skb_any(tstamp->skb);
- tstamp->skb = NULL;
+ if (tstamp->skb)
+ igc_ptp_free_tx_buffer(adapter, tstamp);
+
adapter->tx_hwtstamp_timeouts++;
netdev_warn(adapter->netdev, "Tx timestamp timeout\n");
@@ -729,10 +755,21 @@ static void igc_ptp_tx_reg_to_stamp(struct igc_adapter *adapter,
shhwtstamps.hwtstamp =
ktime_add_ns(shhwtstamps.hwtstamp, adjust);
- tstamp->skb = NULL;
+ /* Copy the tx hardware timestamp into xdp metadata or skb */
+ if (tstamp->buffer_type == IGC_TX_BUFFER_TYPE_XSK) {
+ struct xsk_buff_pool *xsk_pool;
+
+ xsk_pool = adapter->tx_ring[tstamp->xsk_queue_index]->xsk_pool;
+ if (xsk_pool && xp_tx_metadata_enabled(xsk_pool)) {
+ xsk_tx_metadata_complete(&tstamp->xsk_meta,
+ &igc_xsk_tx_metadata_ops,
+ &shhwtstamps.hwtstamp);
+ }
+ } else {
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
- skb_tstamp_tx(skb, &shhwtstamps);
- dev_kfree_skb_any(skb);
+ igc_ptp_free_tx_buffer(adapter, tstamp);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index 13a6fca31004..866024f2b9ee 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -914,7 +914,13 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
goto err_out;
}
- xs = kzalloc(sizeof(*xs), GFP_KERNEL);
+ algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1);
+ if (unlikely(!algo)) {
+ err = -ENOENT;
+ goto err_out;
+ }
+
+ xs = kzalloc(sizeof(*xs), GFP_ATOMIC);
if (unlikely(!xs)) {
err = -ENOMEM;
goto err_out;
@@ -930,14 +936,8 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
memcpy(&xs->id.daddr.a4, sam->addr, sizeof(xs->id.daddr.a4));
xs->xso.dev = adapter->netdev;
- algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1);
- if (unlikely(!algo)) {
- err = -ENOENT;
- goto err_xs;
- }
-
aead_len = sizeof(*xs->aead) + IXGBE_IPSEC_KEY_BITS / 8;
- xs->aead = kzalloc(aead_len, GFP_KERNEL);
+ xs->aead = kzalloc(aead_len, GFP_ATOMIC);
if (unlikely(!xs->aead)) {
err = -ENOMEM;
goto err_xs;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index f985252c8c8d..094653e81b97 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6847,7 +6847,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu, new_mtu);
/* must set new MTU before calling down or up */
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
@@ -6974,7 +6974,7 @@ int ixgbe_close(struct net_device *netdev)
return 0;
}
-static int __maybe_unused ixgbe_resume(struct device *dev_d)
+static int ixgbe_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
@@ -7082,7 +7082,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
return 0;
}
-static int __maybe_unused ixgbe_suspend(struct device *dev_d)
+static int ixgbe_suspend(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
int retval;
@@ -10061,15 +10061,10 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
- int status;
- __u16 mode;
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
+ __u16 mode = nla_get_u16(attr);
+ int status = ixgbe_configure_bridge_mode(adapter, mode);
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
-
- mode = nla_get_u16(attr);
- status = ixgbe_configure_bridge_mode(adapter, mode);
if (status)
return status;
@@ -11588,14 +11583,14 @@ static const struct pci_error_handlers ixgbe_err_handler = {
.resume = ixgbe_io_resume,
};
-static SIMPLE_DEV_PM_OPS(ixgbe_pm_ops, ixgbe_suspend, ixgbe_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(ixgbe_pm_ops, ixgbe_suspend, ixgbe_resume);
static struct pci_driver ixgbe_driver = {
.name = ixgbe_driver_name,
.id_table = ixgbe_pci_tbl,
.probe = ixgbe_probe,
.remove = ixgbe_remove,
- .driver.pm = &ixgbe_pm_ops,
+ .driver.pm = pm_sleep_ptr(&ixgbe_pm_ops),
.shutdown = ixgbe_shutdown,
.sriov_configure = ixgbe_pci_sriov_configure,
.err_handler = &ixgbe_err_handler
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index ed440dd0c4f9..897fe357b65b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -2179,7 +2179,6 @@ enum {
#define IXGBE_PCI_LINK_SPEED_5000 0x2
#define IXGBE_PCI_LINK_SPEED_8000 0x3
#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
-#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
#define IXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index d34d715c59eb..3e3b471e53f0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -220,8 +220,7 @@ static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize);
if (unlikely(!skb))
return NULL;
@@ -304,7 +303,7 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
}
bi->xdp->data_end = bi->xdp->data + size;
- xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(bi->xdp);
xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
if (likely(xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))) {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 9c960017a6de..b938dc06045d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4292,7 +4292,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu, new_mtu);
/* must set new MTU before calling down or up */
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
ixgbevf_reinit_locked(adapter);
@@ -4300,7 +4300,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-static int __maybe_unused ixgbevf_suspend(struct device *dev_d)
+static int ixgbevf_suspend(struct device *dev_d)
{
struct net_device *netdev = dev_get_drvdata(dev_d);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@ -4317,7 +4317,7 @@ static int __maybe_unused ixgbevf_suspend(struct device *dev_d)
return 0;
}
-static int __maybe_unused ixgbevf_resume(struct device *dev_d)
+static int ixgbevf_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -4854,7 +4854,7 @@ static const struct pci_error_handlers ixgbevf_err_handler = {
.resume = ixgbevf_io_resume,
};
-static SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops, ixgbevf_suspend, ixgbevf_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops, ixgbevf_suspend, ixgbevf_resume);
static struct pci_driver ixgbevf_driver = {
.name = ixgbevf_driver_name,
@@ -4863,7 +4863,7 @@ static struct pci_driver ixgbevf_driver = {
.remove = ixgbevf_remove,
/* Power Management Hooks */
- .driver.pm = &ixgbevf_pm_ops,
+ .driver.pm = pm_sleep_ptr(&ixgbevf_pm_ops),
.shutdown = ixgbevf_shutdown,
.err_handler = &ixgbevf_err_handler
diff --git a/drivers/net/ethernet/intel/libeth/Kconfig b/drivers/net/ethernet/intel/libeth/Kconfig
new file mode 100644
index 000000000000..480293b71dbc
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2024 Intel Corporation
+
+config LIBETH
+ tristate
+ select PAGE_POOL
+ help
+ libeth is a common library containing routines shared between several
+ drivers, but not yet promoted to the generic kernel API.
diff --git a/drivers/net/ethernet/intel/libeth/Makefile b/drivers/net/ethernet/intel/libeth/Makefile
new file mode 100644
index 000000000000..cb99203d1dd2
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2024 Intel Corporation
+
+obj-$(CONFIG_LIBETH) += libeth.o
+
+libeth-objs += rx.o
diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/ethernet/intel/libeth/rx.c
new file mode 100644
index 000000000000..6221b88c34ac
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/rx.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2024 Intel Corporation */
+
+#include <net/libeth/rx.h>
+
+/* Rx buffer management */
+
+/**
+ * libeth_rx_hw_len - get the actual buffer size to be passed to HW
+ * @pp: &page_pool_params of the netdev to calculate the size for
+ * @max_len: maximum buffer size for a single descriptor
+ *
+ * Return: HW-writeable length per one buffer to pass it to the HW accounting:
+ * MTU the @dev has, HW required alignment, minimum and maximum allowed values,
+ * and system's page size.
+ */
+static u32 libeth_rx_hw_len(const struct page_pool_params *pp, u32 max_len)
+{
+ u32 len;
+
+ len = READ_ONCE(pp->netdev->mtu) + LIBETH_RX_LL_LEN;
+ len = ALIGN(len, LIBETH_RX_BUF_STRIDE);
+ len = min3(len, ALIGN_DOWN(max_len ? : U32_MAX, LIBETH_RX_BUF_STRIDE),
+ pp->max_len);
+
+ return len;
+}
+
+/**
+ * libeth_rx_fq_create - create a PP with the default libeth settings
+ * @fq: buffer queue struct to fill
+ * @napi: &napi_struct covering this PP (no usage outside its poll loops)
+ *
+ * Return: %0 on success, -%errno on failure.
+ */
+int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi)
+{
+ struct page_pool_params pp = {
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .order = LIBETH_RX_PAGE_ORDER,
+ .pool_size = fq->count,
+ .nid = fq->nid,
+ .dev = napi->dev->dev.parent,
+ .netdev = napi->dev,
+ .napi = napi,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = LIBETH_SKB_HEADROOM,
+ };
+ struct libeth_fqe *fqes;
+ struct page_pool *pool;
+
+ /* HW-writeable / syncable length per one page */
+ pp.max_len = LIBETH_RX_PAGE_LEN(pp.offset);
+
+ /* HW-writeable length per buffer */
+ fq->buf_len = libeth_rx_hw_len(&pp, fq->buf_len);
+ /* Buffer size to allocate */
+ fq->truesize = roundup_pow_of_two(SKB_HEAD_ALIGN(pp.offset +
+ fq->buf_len));
+
+ pool = page_pool_create(&pp);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ fqes = kvcalloc_node(fq->count, sizeof(*fqes), GFP_KERNEL, fq->nid);
+ if (!fqes)
+ goto err_buf;
+
+ fq->fqes = fqes;
+ fq->pp = pool;
+
+ return 0;
+
+err_buf:
+ page_pool_destroy(pool);
+
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_create, LIBETH);
+
+/**
+ * libeth_rx_fq_destroy - destroy a &page_pool created by libeth
+ * @fq: buffer queue to process
+ */
+void libeth_rx_fq_destroy(struct libeth_fq *fq)
+{
+ kvfree(fq->fqes);
+ page_pool_destroy(fq->pp);
+}
+EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_destroy, LIBETH);
+
+/**
+ * libeth_rx_recycle_slow - recycle a libeth page from the NAPI context
+ * @page: page to recycle
+ *
+ * To be used on exceptions or rare cases not requiring fast inline recycling.
+ */
+void libeth_rx_recycle_slow(struct page *page)
+{
+ page_pool_recycle_direct(page->pp, page);
+}
+EXPORT_SYMBOL_NS_GPL(libeth_rx_recycle_slow, LIBETH);
+
+/* Converting abstract packet type numbers into a software structure with
+ * the packet parameters to do O(1) lookup on Rx.
+ */
+
+static const u16 libeth_rx_pt_xdp_oip[] = {
+ [LIBETH_RX_PT_OUTER_L2] = XDP_RSS_TYPE_NONE,
+ [LIBETH_RX_PT_OUTER_IPV4] = XDP_RSS_L3_IPV4,
+ [LIBETH_RX_PT_OUTER_IPV6] = XDP_RSS_L3_IPV6,
+};
+
+static const u16 libeth_rx_pt_xdp_iprot[] = {
+ [LIBETH_RX_PT_INNER_NONE] = XDP_RSS_TYPE_NONE,
+ [LIBETH_RX_PT_INNER_UDP] = XDP_RSS_L4_UDP,
+ [LIBETH_RX_PT_INNER_TCP] = XDP_RSS_L4_TCP,
+ [LIBETH_RX_PT_INNER_SCTP] = XDP_RSS_L4_SCTP,
+ [LIBETH_RX_PT_INNER_ICMP] = XDP_RSS_L4_ICMP,
+ [LIBETH_RX_PT_INNER_TIMESYNC] = XDP_RSS_TYPE_NONE,
+};
+
+static const u16 libeth_rx_pt_xdp_pl[] = {
+ [LIBETH_RX_PT_PAYLOAD_NONE] = XDP_RSS_TYPE_NONE,
+ [LIBETH_RX_PT_PAYLOAD_L2] = XDP_RSS_TYPE_NONE,
+ [LIBETH_RX_PT_PAYLOAD_L3] = XDP_RSS_TYPE_NONE,
+ [LIBETH_RX_PT_PAYLOAD_L4] = XDP_RSS_L4,
+};
+
+/**
+ * libeth_rx_pt_gen_hash_type - generate an XDP RSS hash type for a PT
+ * @pt: PT structure to evaluate
+ *
+ * Generates ```hash_type``` field with XDP RSS type values from the parsed
+ * packet parameters if they're obtained dynamically at runtime.
+ */
+void libeth_rx_pt_gen_hash_type(struct libeth_rx_pt *pt)
+{
+ pt->hash_type = 0;
+ pt->hash_type |= libeth_rx_pt_xdp_oip[pt->outer_ip];
+ pt->hash_type |= libeth_rx_pt_xdp_iprot[pt->inner_prot];
+ pt->hash_type |= libeth_rx_pt_xdp_pl[pt->payload_layer];
+}
+EXPORT_SYMBOL_NS_GPL(libeth_rx_pt_gen_hash_type, LIBETH);
+
+/* Module */
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Common Ethernet library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/intel/libie/Kconfig b/drivers/net/ethernet/intel/libie/Kconfig
new file mode 100644
index 000000000000..33aff6bc8f81
--- /dev/null
+++ b/drivers/net/ethernet/intel/libie/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2024 Intel Corporation
+
+config LIBIE
+ tristate
+ select LIBETH
+ help
+ libie (Intel Ethernet library) is a common library built on top of
+ libeth and containing vendor-specific routines shared between several
+ Intel Ethernet drivers.
diff --git a/drivers/net/ethernet/intel/libie/Makefile b/drivers/net/ethernet/intel/libie/Makefile
new file mode 100644
index 000000000000..bf42c5aeeedd
--- /dev/null
+++ b/drivers/net/ethernet/intel/libie/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2024 Intel Corporation
+
+obj-$(CONFIG_LIBIE) += libie.o
+
+libie-objs += rx.o
diff --git a/drivers/net/ethernet/intel/libie/rx.c b/drivers/net/ethernet/intel/libie/rx.c
new file mode 100644
index 000000000000..38201ee1e891
--- /dev/null
+++ b/drivers/net/ethernet/intel/libie/rx.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2024 Intel Corporation */
+
+#include <linux/net/intel/libie/rx.h>
+
+/* O(1) converting i40e/ice/iavf's 8/10-bit hardware packet type to a parsed
+ * bitfield struct.
+ */
+
+/* A few supplementary definitions for when XDP hash types do not coincide
+ * with what can be generated from ptype definitions by means of preprocessor
+ * concatenation.
+ */
+#define XDP_RSS_L3_L2 XDP_RSS_TYPE_NONE
+#define XDP_RSS_L4_NONE XDP_RSS_TYPE_NONE
+#define XDP_RSS_L4_TIMESYNC XDP_RSS_TYPE_NONE
+#define XDP_RSS_TYPE_L3 XDP_RSS_TYPE_NONE
+#define XDP_RSS_TYPE_L4 XDP_RSS_L4
+
+#define LIBIE_RX_PT(oip, ofrag, tun, tp, tefr, iprot, pl) { \
+ .outer_ip = LIBETH_RX_PT_OUTER_##oip, \
+ .outer_frag = LIBETH_RX_PT_##ofrag, \
+ .tunnel_type = LIBETH_RX_PT_TUNNEL_IP_##tun, \
+ .tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_##tp, \
+ .tunnel_end_frag = LIBETH_RX_PT_##tefr, \
+ .inner_prot = LIBETH_RX_PT_INNER_##iprot, \
+ .payload_layer = LIBETH_RX_PT_PAYLOAD_##pl, \
+ .hash_type = XDP_RSS_L3_##oip | \
+ XDP_RSS_L4_##iprot | \
+ XDP_RSS_TYPE_##pl, \
+ }
+
+#define LIBIE_RX_PT_UNUSED { }
+
+#define __LIBIE_RX_PT_L2(iprot, pl) \
+ LIBIE_RX_PT(L2, NOT_FRAG, NONE, NONE, NOT_FRAG, iprot, pl)
+#define LIBIE_RX_PT_L2 __LIBIE_RX_PT_L2(NONE, L2)
+#define LIBIE_RX_PT_TS __LIBIE_RX_PT_L2(TIMESYNC, L2)
+#define LIBIE_RX_PT_L3 __LIBIE_RX_PT_L2(NONE, L3)
+
+#define LIBIE_RX_PT_IP_FRAG(oip) \
+ LIBIE_RX_PT(IPV##oip, FRAG, NONE, NONE, NOT_FRAG, NONE, L3)
+#define LIBIE_RX_PT_IP_L3(oip, tun, teprot, tefr) \
+ LIBIE_RX_PT(IPV##oip, NOT_FRAG, tun, teprot, tefr, NONE, L3)
+#define LIBIE_RX_PT_IP_L4(oip, tun, teprot, iprot) \
+ LIBIE_RX_PT(IPV##oip, NOT_FRAG, tun, teprot, NOT_FRAG, iprot, L4)
+
+#define LIBIE_RX_PT_IP_NOF(oip, tun, ver) \
+ LIBIE_RX_PT_IP_L3(oip, tun, ver, NOT_FRAG), \
+ LIBIE_RX_PT_IP_L4(oip, tun, ver, UDP), \
+ LIBIE_RX_PT_UNUSED, \
+ LIBIE_RX_PT_IP_L4(oip, tun, ver, TCP), \
+ LIBIE_RX_PT_IP_L4(oip, tun, ver, SCTP), \
+ LIBIE_RX_PT_IP_L4(oip, tun, ver, ICMP)
+
+/* IPv oip --> tun --> IPv ver */
+#define LIBIE_RX_PT_IP_TUN_VER(oip, tun, ver) \
+ LIBIE_RX_PT_IP_L3(oip, tun, ver, FRAG), \
+ LIBIE_RX_PT_IP_NOF(oip, tun, ver)
+
+/* Non Tunneled IPv oip */
+#define LIBIE_RX_PT_IP_RAW(oip) \
+ LIBIE_RX_PT_IP_FRAG(oip), \
+ LIBIE_RX_PT_IP_NOF(oip, NONE, NONE)
+
+/* IPv oip --> tun --> { IPv4, IPv6 } */
+#define LIBIE_RX_PT_IP_TUN(oip, tun) \
+ LIBIE_RX_PT_IP_TUN_VER(oip, tun, IPV4), \
+ LIBIE_RX_PT_IP_TUN_VER(oip, tun, IPV6)
+
+/* IPv oip --> GRE/NAT tun --> { x, IPv4, IPv6 } */
+#define LIBIE_RX_PT_IP_GRE(oip, tun) \
+ LIBIE_RX_PT_IP_L3(oip, tun, NONE, NOT_FRAG), \
+ LIBIE_RX_PT_IP_TUN(oip, tun)
+
+/* Non Tunneled IPv oip
+ * IPv oip --> { IPv4, IPv6 }
+ * IPv oip --> GRE/NAT --> { x, IPv4, IPv6 }
+ * IPv oip --> GRE/NAT --> MAC --> { x, IPv4, IPv6 }
+ * IPv oip --> GRE/NAT --> MAC/VLAN --> { x, IPv4, IPv6 }
+ */
+#define LIBIE_RX_PT_IP(oip) \
+ LIBIE_RX_PT_IP_RAW(oip), \
+ LIBIE_RX_PT_IP_TUN(oip, IP), \
+ LIBIE_RX_PT_IP_GRE(oip, GRENAT), \
+ LIBIE_RX_PT_IP_GRE(oip, GRENAT_MAC), \
+ LIBIE_RX_PT_IP_GRE(oip, GRENAT_MAC_VLAN)
+
+/* Lookup table mapping for O(1) parsing */
+const struct libeth_rx_pt libie_rx_pt_lut[LIBIE_RX_PT_NUM] = {
+ /* L2 packet types */
+ LIBIE_RX_PT_UNUSED,
+ LIBIE_RX_PT_L2,
+ LIBIE_RX_PT_TS,
+ LIBIE_RX_PT_L2,
+ LIBIE_RX_PT_UNUSED,
+ LIBIE_RX_PT_UNUSED,
+ LIBIE_RX_PT_L2,
+ LIBIE_RX_PT_L2,
+ LIBIE_RX_PT_UNUSED,
+ LIBIE_RX_PT_UNUSED,
+ LIBIE_RX_PT_L2,
+ LIBIE_RX_PT_UNUSED,
+
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+
+ LIBIE_RX_PT_IP(4),
+ LIBIE_RX_PT_IP(6),
+};
+EXPORT_SYMBOL_NS_GPL(libie_rx_pt_lut, LIBIE);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel(R) Ethernet common library");
+MODULE_IMPORT_NS(LIBETH);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 1732ec3c3dbd..b06e24562973 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2301,7 +2301,7 @@ jme_change_mtu(struct net_device *netdev, int new_mtu)
{
struct jme_adapter *jme = netdev_priv(netdev);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
netdev_update_features(netdev);
jme_restart_rx_engine(jme);
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 1d5b7bb6380f..5352fee62d2b 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -519,7 +519,7 @@ ltq_etop_change_mtu(struct net_device *dev, int new_mtu)
struct ltq_etop_priv *priv = netdev_priv(dev);
unsigned long flags;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
spin_lock_irqsave(&priv->lock, flags);
ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu, LTQ_ETOP_IGPLEN);
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 8bd4def3622e..07904a528f21 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -419,7 +419,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
int curr_desc;
int ret = 0;
- net_dev->mtu = new_mtu;
+ WRITE_ONCE(net_dev->mtu, new_mtu);
priv->rx_buf_size = xrx200_buffer_size(new_mtu);
priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
@@ -440,7 +440,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
buff = ch_rx->rx_buff[ch_rx->dma.desc];
ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag);
if (ret) {
- net_dev->mtu = old_mtu;
+ WRITE_ONCE(net_dev->mtu, old_mtu);
priv->rx_buf_size = xrx200_buffer_size(old_mtu);
priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
break;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index f0bdc06d253d..f35ae2c88091 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2562,7 +2562,7 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
mv643xx_eth_recalc_skb_size(mp);
tx_set_rate(mp, 1000000000, 16777216);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 40a5f1431e4e..41894834fb53 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3259,7 +3259,8 @@ static void mvneta_link_change(struct mvneta_port *pp)
{
u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
- phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP));
+ phylink_pcs_change(&pp->phylink_pcs,
+ !!(gmac_stat & MVNETA_GMAC_LINK_UP));
}
/* NAPI handler
@@ -3860,7 +3861,7 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
return -EINVAL;
}
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
if (!netif_running(dev)) {
if (pp->bm_priv)
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 23adf53c2aa1..e91486c48de3 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1375,7 +1375,7 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
}
out_set:
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
dev->wanted_features = dev->features;
netdev_update_features(dev);
@@ -3434,12 +3434,13 @@ static void mvpp2_isr_handle_ptp(struct mvpp2_port *port)
mvpp2_isr_handle_ptp_queue(port, 1);
}
-static void mvpp2_isr_handle_link(struct mvpp2_port *port, bool link)
+static void mvpp2_isr_handle_link(struct mvpp2_port *port,
+ struct phylink_pcs *pcs, bool link)
{
struct net_device *dev = port->dev;
if (port->phylink) {
- phylink_mac_change(port->phylink, link);
+ phylink_pcs_change(pcs, link);
return;
}
@@ -3472,7 +3473,7 @@ static void mvpp2_isr_handle_xlg(struct mvpp2_port *port)
if (val & MVPP22_XLG_INT_STAT_LINK) {
val = readl(port->base + MVPP22_XLG_STATUS);
link = (val & MVPP22_XLG_STATUS_LINK_UP);
- mvpp2_isr_handle_link(port, link);
+ mvpp2_isr_handle_link(port, &port->pcs_xlg, link);
}
}
@@ -3488,7 +3489,7 @@ static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port)
if (val & MVPP22_GMAC_INT_STAT_LINK) {
val = readl(port->base + MVPP2_GMAC_STATUS0);
link = (val & MVPP2_GMAC_STATUS0_LINK_UP);
- mvpp2_isr_handle_link(port, link);
+ mvpp2_isr_handle_link(port, &port->pcs_gmac, link);
}
}
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 7c9faa714a10..549436efc204 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -1096,7 +1096,7 @@ static int octep_change_mtu(struct net_device *netdev, int new_mtu)
true);
if (!err) {
oct->link_info.mtu = new_mtu;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
}
return err;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
index 2e2c3be8a0b4..e6eb98d70f3c 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/pci.h>
#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
#include "octep_config.h"
#include "octep_main.h"
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
index dd49d0b8b494..7e6771c9cdbb 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
@@ -881,7 +881,7 @@ static int octep_vf_change_mtu(struct net_device *netdev, int new_mtu)
err = octep_vf_mbox_set_mtu(oct, new_mtu);
if (!err) {
oct->link_info.mtu = new_mtu;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
}
return err;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c
index 2eab21e43048..445b626efe11 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c
@@ -7,6 +7,7 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
#include "octep_vf_config.h"
#include "octep_vf_main.h"
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 3c0f55b3e48e..27935c54b91b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -24,6 +24,8 @@
#define DRV_NAME "Marvell-CGX/RPM"
#define DRV_STRING "Marvell CGX/RPM Driver"
+#define CGX_RX_STAT_GLOBAL_INDEX 9
+
static LIST_HEAD(cgx_list);
/* Convert firmware speed encoding to user format(Mbps) */
@@ -701,6 +703,30 @@ u64 cgx_features_get(void *cgxd)
return ((struct cgx *)cgxd)->hw_features;
}
+int cgx_stats_reset(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ int stat_id;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ for (stat_id = 0 ; stat_id < CGX_RX_STATS_COUNT; stat_id++) {
+ if (stat_id >= CGX_RX_STAT_GLOBAL_INDEX)
+ /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_STAT0 + (stat_id * 8)), 0);
+ else
+ cgx_write(cgx, lmac_id,
+ (CGXX_CMRX_RX_STAT0 + (stat_id * 8)), 0);
+ }
+
+ for (stat_id = 0 ; stat_id < CGX_TX_STATS_COUNT; stat_id++)
+ cgx_write(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (stat_id * 8), 0);
+
+ return 0;
+}
+
static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo)
{
if (!linfo->fec)
@@ -808,6 +834,11 @@ static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cfg |= rx_pause ? CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
@@ -1783,6 +1814,7 @@ static struct mac_ops cgx_mac_ops = {
.pfc_config = cgx_lmac_pfc_config,
.mac_get_pfc_frm_cfg = cgx_lmac_get_pfc_frm_cfg,
.mac_reset = cgx_lmac_reset,
+ .mac_stats_reset = cgx_stats_reset,
};
static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 6f7d1dee5830..dc9ace30554a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -141,6 +141,7 @@ int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id);
int cgx_lmac_evh_unregister(void *cgxd, int lmac_id);
int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat);
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
+int cgx_stats_reset(void *cgxd, int lmac_id);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index 0b4cba03f2e8..9ffc6790c513 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -132,6 +132,7 @@ struct mac_ops {
/* FEC stats */
int (*get_fec_stats)(void *cgxd, int lmac_id,
struct cgx_fec_stats_rsp *rsp);
+ int (*mac_stats_reset)(void *cgxd, int lmac_id);
};
struct cgx {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index eb2a20b5a0d0..4a77f6fe2622 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -174,6 +174,7 @@ M(CGX_FEC_STATS, 0x217, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \
M(CGX_SET_LINK_MODE, 0x218, cgx_set_link_mode, cgx_set_link_mode_req,\
cgx_set_link_mode_rsp) \
M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
+M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp) \
M(CGX_FEATURES_GET, 0x21B, cgx_features_get, msg_req, \
cgx_features_info_msg) \
M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \
@@ -1213,10 +1214,8 @@ struct nix_bp_cfg_req {
/* bpid_per_chan = 1 assigns separate bp id for each channel */
};
-/* PF can be mapped to either CGX or LBK interface,
- * so maximum 64 channels are possible.
- */
-#define NIX_MAX_BPID_CHAN 64
+/* Maximum channels any single NIX interface can have */
+#define NIX_MAX_BPID_CHAN 256
struct nix_bp_cfg_rsp {
struct mbox_msghdr hdr;
u16 chan_bpid[NIX_MAX_BPID_CHAN]; /* Channel and bpid mapping */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index 76218f1cb459..1b34cf9c9703 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -38,6 +38,7 @@ static struct mac_ops rpm_mac_ops = {
.pfc_config = rpm_lmac_pfc_config,
.mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
.mac_reset = rpm_lmac_reset,
+ .mac_stats_reset = rpm_stats_reset,
};
static struct mac_ops rpm2_mac_ops = {
@@ -70,6 +71,7 @@ static struct mac_ops rpm2_mac_ops = {
.pfc_config = rpm_lmac_pfc_config,
.mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
.mac_reset = rpm_lmac_reset,
+ .mac_stats_reset = rpm_stats_reset,
};
bool is_dev_rpm2(void *rpmd)
@@ -443,6 +445,21 @@ int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat)
return 0;
}
+int rpm_stats_reset(void *rpmd, int lmac_id)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL);
+ cfg |= RPMX_CMD_CLEAR_TX | RPMX_CMD_CLEAR_RX | BIT_ULL(lmac_id);
+ rpm_write(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL, cfg);
+
+ return 0;
+}
+
u8 rpm_get_lmac_type(void *rpmd, int lmac_id)
{
rpm_t *rpm = rpmd;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
index b79cfbc6f877..34b11deb0f3c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -85,6 +85,8 @@
#define RPMX_MTI_STAT_STATN_CONTROL 0x10018
#define RPMX_MTI_STAT_DATA_HI_CDC 0x10038
#define RPMX_RSFEC_RX_CAPTURE BIT_ULL(27)
+#define RPMX_CMD_CLEAR_RX BIT_ULL(30)
+#define RPMX_CMD_CLEAR_TX BIT_ULL(31)
#define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2 0x40050
#define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3 0x40058
#define RPMX_MTI_FCFECX_VL0_CCW_LO 0x38618
@@ -134,4 +136,5 @@ int rpm2_get_nr_lmacs(void *rpmd);
bool is_dev_rpm2(void *rpmd);
int rpm_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp);
int rpm_lmac_reset(void *rpmd, int lmac_id, u8 pf_req_flr);
+int rpm_stats_reset(void *rpmd, int lmac_id);
#endif /* RPM_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 72e060cf6b61..266ecbc1b97a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -160,6 +160,8 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
continue;
lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
+ if (iter >= MAX_LMAC_COUNT)
+ continue;
lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
iter);
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
@@ -602,6 +604,35 @@ int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
return rvu_lmac_get_stats(rvu, req, (void *)rsp);
}
+int rvu_mbox_handler_cgx_stats_rst(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct rvu_pfvf *parent_pf;
+ struct mac_ops *mac_ops;
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ parent_pf = &rvu->pf[pf];
+ /* To ensure reset cgx stats won't affect VF stats,
+ * check if it used by only PF interface.
+ * If not, return
+ */
+ if (parent_pf->cgx_users > 1) {
+ dev_info(rvu->dev, "CGX busy, could not reset statistics\n");
+ return 0;
+ }
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ return mac_ops->mac_stats_reset(cgxd, lmac);
+}
+
int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
struct msg_req *req,
struct cgx_fec_stats_rsp *rsp)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 2500f5ba4f5a..881d704644fb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -999,12 +999,10 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
u16 pcifunc;
int ret, lf;
- cmd_buf = memdup_user(buffer, count + 1);
+ cmd_buf = memdup_user_nul(buffer, count);
if (IS_ERR(cmd_buf))
return -ENOMEM;
- cmd_buf[count] = '\0';
-
cmd_buf_tmp = strchr(cmd_buf, '\n');
if (cmd_buf_tmp) {
*cmd_buf_tmp = '\0';
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 96c04f7d93f8..7498ab429963 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -1202,7 +1202,8 @@ static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id,
}
static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1256,7 +1257,8 @@ static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
}
static int rvu_af_npc_exact_feature_disable(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1310,7 +1312,8 @@ static int rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink *devlink, u32
}
static int rvu_af_dl_npc_mcam_high_zone_percent_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1367,7 +1370,8 @@ static int rvu_af_dl_nix_maxlf_get(struct devlink *devlink, u32 id,
}
static int rvu_af_dl_nix_maxlf_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index d39001cdc707..00af8888e329 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -4819,18 +4819,18 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
*/
rvu_write64(rvu, blkaddr, NIX_AF_CFG,
rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
+ }
- /* Set chan/link to backpressure TL3 instead of TL2 */
- rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
+ /* Set chan/link to backpressure TL3 instead of TL2 */
+ rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
- /* Disable SQ manager's sticky mode operation (set TM6 = 0)
- * This sticky mode is known to cause SQ stalls when multiple
- * SQs are mapped to same SMQ and transmitting pkts at a time.
- */
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
- cfg &= ~BIT_ULL(15);
- rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
- }
+ /* Disable SQ manager's sticky mode operation (set TM6 = 0)
+ * This sticky mode is known to cause SQ stalls when multiple
+ * SQs are mapped to same SMQ and transmitting pkts at a time.
+ */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
+ cfg &= ~BIT_ULL(15);
+ rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
ltdefs = rvu->kpu.lt_def;
/* Calibrate X2P bus to check if CGX/LBK links are fine */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index e350242bbafb..e8b73b9d75e3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -1657,7 +1657,7 @@ static int npc_fwdb_detect_load_prfl_img(struct rvu *rvu, uint64_t prfl_sz,
struct npc_coalesced_kpu_prfl *img_data = NULL;
int i = 0, rc = -EINVAL;
void __iomem *kpu_prfl_addr;
- u16 offset;
+ u32 offset;
img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr;
if (le64_to_cpu(img_data->signature) == KPU_SIGN &&
@@ -2181,7 +2181,6 @@ void rvu_npc_freemem(struct rvu *rvu)
kfree(pkind->rsrc.bmap);
npc_mcam_rsrcs_deinit(rvu);
- kfree(mcam->counters.bmap);
if (rvu->kpu_prfl_addr)
iounmap(rvu->kpu_prfl_addr);
else
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index c181e7aa9eb6..150635de2bd5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -1187,6 +1187,8 @@ static int npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
action.pf_func = target;
action.op = NIX_RX_ACTIONOP_UCAST;
}
+ if (req->match_id)
+ action.match_id = req->match_id;
}
entry->action = *(u64 *)&action;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 7e16a341ec58..24fbbef265a6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -363,6 +363,7 @@ struct otx2_flow_config {
struct list_head flow_list;
u32 dmacflt_max_flows;
u16 max_flows;
+ refcount_t mark_flows;
struct list_head flow_list_tc;
bool ntuple;
};
@@ -465,6 +466,7 @@ struct otx2_nic {
#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
#define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15)
#define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16)
+#define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17)
u64 flags;
u64 *cq_op_addr;
@@ -961,6 +963,7 @@ void otx2_get_mac_from_af(struct net_device *netdev);
void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
int otx2_config_pause_frm(struct otx2_nic *pfvf);
void otx2_setup_segmentation(struct otx2_nic *pfvf);
+int otx2_reset_mac_stats(struct otx2_nic *pfvf);
/* RVU block related APIs */
int otx2_attach_npa_nix(struct otx2_nic *pfvf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
index 4e1130496573..99ddf31269d9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -32,7 +32,8 @@ static int otx2_dl_mcam_count_validate(struct devlink *devlink, u32 id,
}
static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct otx2_devlink *otx2_dl = devlink_priv(devlink);
struct otx2_nic *pfvf = otx2_dl->pfvf;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 97a71e9b8563..bc5819237ed7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -252,6 +252,7 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
+ refcount_set(&flow_cfg->mark_flows, 1);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index b40bd0e46751..f5bce3e326cc 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -67,7 +67,7 @@ static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
netdev_info(netdev, "Changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (if_up)
err = otx2_open(netdev);
@@ -450,7 +450,6 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work)
struct mbox_msghdr *msg = NULL;
int offset, vf_idx, id, err;
struct otx2_mbox_dev *mdev;
- struct mbox_hdr *req_hdr;
struct otx2_mbox *mbox;
struct mbox *vf_mbox;
struct otx2_nic *pf;
@@ -461,9 +460,8 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work)
mbox = &pf->mbox_pfvf[0].mbox;
mdev = &mbox->dev[vf_idx];
- req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+ offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
for (id = 0; id < vf_mbox->num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
@@ -494,7 +492,6 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
struct otx2_nic *pf = vf_mbox->pfvf;
struct otx2_mbox_dev *mdev;
int offset, id, vf_idx = 0;
- struct mbox_hdr *rsp_hdr;
struct mbox_msghdr *msg;
struct otx2_mbox *mbox;
@@ -502,8 +499,7 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
mbox = &pf->mbox_pfvf[0].mbox_up;
mdev = &mbox->dev[vf_idx];
- rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+ offset = mbox->rx_start + ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
for (id = 0; id < vf_mbox->up_num_msgs; id++) {
msg = mdev->mbase + offset;
@@ -1150,6 +1146,23 @@ static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable)
return err;
}
+int otx2_reset_mac_stats(struct otx2_nic *pfvf)
+{
+ struct msg_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_stats_rst(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
{
struct msg_req *msg;
@@ -1873,9 +1886,17 @@ int otx2_open(struct net_device *netdev)
vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
+ int name_len;
- snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name,
- qidx);
+ name_len = snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d",
+ pf->netdev->name, qidx);
+ if (name_len >= NAME_SIZE) {
+ dev_err(pf->dev,
+ "RVUPF%d: IRQ registration failed for CQ%d, irq name is too long\n",
+ rvu_get_pf(pf->pcifunc), qidx);
+ err = -EINVAL;
+ goto err_free_cints;
+ }
err = request_irq(pci_irq_vector(pf->pdev, vec),
otx2_cq_intr_handler, 0, irq_name,
@@ -1933,7 +1954,7 @@ int otx2_open(struct net_device *netdev)
* mcam entries are enabled to receive the packets. Hence disable the
* packet I/O.
*/
- if (err == EIO)
+ if (err == -EIO)
goto err_disable_rxtx;
else if (err)
goto err_tx_stop_queues;
@@ -3038,6 +3059,9 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = otx2_get_max_mtu(pf);
+ /* reset CGX/RPM MAC stats */
+ otx2_reset_mac_stats(pf);
+
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 87bdb93cb066..e63cc1eb6d89 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -511,7 +511,15 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
nr_police++;
break;
case FLOW_ACTION_MARK:
+ if (act->mark & ~OTX2_RX_MATCH_ID_MASK) {
+ NL_SET_ERR_MSG_MOD(extack, "Bad flow mark, only 16 bit supported");
+ return -EOPNOTSUPP;
+ }
mark = act->mark;
+ req->match_id = mark & OTX2_RX_MATCH_ID_MASK;
+ req->op = NIX_RX_ACTION_DEFAULT;
+ nic->flags |= OTX2_FLAG_TC_MARK_ENABLED;
+ refcount_inc(&nic->flow_cfg->mark_flows);
break;
case FLOW_ACTION_RX_QUEUE_MAPPING:
@@ -689,20 +697,19 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_match_control match;
+ u32 val;
flow_rule_match_control(rule, &match);
- if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
- NL_SET_ERR_MSG_MOD(extack, "HW doesn't support frag first/later");
- return -EOPNOTSUPP;
- }
if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ val = match.key->flags & FLOW_DIS_IS_FRAGMENT;
if (ntohs(flow_spec->etype) == ETH_P_IP) {
- flow_spec->ip_flag = IPV4_FLAG_MORE;
+ flow_spec->ip_flag = val ? IPV4_FLAG_MORE : 0;
flow_mask->ip_flag = IPV4_FLAG_MORE;
req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
} else if (ntohs(flow_spec->etype) == ETH_P_IPV6) {
- flow_spec->next_header = IPPROTO_FRAGMENT;
+ flow_spec->next_header = val ?
+ IPPROTO_FRAGMENT : 0;
flow_mask->next_header = 0xff;
req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
} else {
@@ -710,6 +717,10 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
return -EOPNOTSUPP;
}
}
+
+ if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT,
+ match.mask->flags, extack))
+ return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
@@ -1184,6 +1195,11 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
return -EINVAL;
}
+ /* Disable TC MARK flag if they are no rules with skbedit mark action */
+ if (flow_node->req.match_id)
+ if (!refcount_dec_and_test(&flow_cfg->mark_flows))
+ nic->flags &= ~OTX2_FLAG_TC_MARK_ENABLED;
+
if (flow_node->is_act_police) {
__clear_bit(flow_node->rq, &nic->rq_bmap);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index f828d32737af..a16e9f244117 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -380,6 +380,9 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
if (pfvf->netdev->features & NETIF_F_RXCSUM)
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (pfvf->flags & OTX2_FLAG_TC_MARK_ENABLED)
+ skb->mark = parse->match_id;
+
skb_mark_for_recycle(skb);
napi_gro_frags(napi);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index a82ffca8ce1b..3f1d2655ff77 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -62,6 +62,9 @@
#define CQ_OP_STAT_OP_ERR 63
#define CQ_OP_STAT_CQ_ERR 46
+/* Packet mark mask */
+#define OTX2_RX_MATCH_ID_MASK 0x0000ffff
+
struct queue_stats {
u64 bytes;
u64 pkts;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index cf0aa16d7540..99fcc5661674 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -456,7 +456,7 @@ static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu)
netdev_info(netdev, "Changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (if_up)
err = otx2vf_open(netdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
index 1e77bbf5d22a..070711df612e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
@@ -382,6 +382,7 @@ static void otx2_qos_read_txschq_cfg_tl(struct otx2_qos_node *parent,
otx2_qos_read_txschq_cfg_tl(node, cfg);
cnt = cfg->static_node_pos[node->level];
cfg->schq_contig_list[node->level][cnt] = node->schq;
+ cfg->schq_index_used[node->level][cnt] = true;
cfg->schq_contig[node->level]++;
cfg->static_node_pos[node->level]++;
otx2_qos_read_txschq_cfg_schq(node, cfg);
@@ -544,6 +545,20 @@ otx2_qos_sw_create_leaf_node(struct otx2_nic *pfvf,
return node;
}
+static struct otx2_qos_node
+*otx2_sw_node_find_by_qid(struct otx2_nic *pfvf, u16 qid)
+{
+ struct otx2_qos_node *node = NULL;
+ int bkt;
+
+ hash_for_each(pfvf->qos.qos_hlist, bkt, node, hlist) {
+ if (node->qid == qid)
+ break;
+ }
+
+ return node;
+}
+
static struct otx2_qos_node *
otx2_sw_node_find(struct otx2_nic *pfvf, u32 classid)
{
@@ -916,6 +931,7 @@ static void otx2_qos_enadis_sq(struct otx2_nic *pfvf,
otx2_qos_disable_sq(pfvf, qid);
pfvf->qos.qid_to_sqmap[qid] = node->schq;
+ otx2_qos_txschq_config(pfvf, node);
otx2_qos_enable_sq(pfvf, qid);
}
@@ -1474,13 +1490,45 @@ out:
return ret;
}
+static int otx2_qos_cur_leaf_nodes(struct otx2_nic *pfvf)
+{
+ int last = find_last_bit(pfvf->qos.qos_sq_bmap, pfvf->hw.tc_tx_queues);
+
+ return last == pfvf->hw.tc_tx_queues ? 0 : last + 1;
+}
+
+static void otx2_reset_qdisc(struct net_device *dev, u16 qid)
+{
+ struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid);
+ struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
+
+ if (!qdisc)
+ return;
+
+ spin_lock_bh(qdisc_lock(qdisc));
+ qdisc_reset(qdisc);
+ spin_unlock_bh(qdisc_lock(qdisc));
+}
+
+static void otx2_cfg_smq(struct otx2_nic *pfvf, struct otx2_qos_node *node,
+ int qid)
+{
+ struct otx2_qos_node *tmp;
+
+ list_for_each_entry(tmp, &node->child_schq_list, list)
+ if (tmp->level == NIX_TXSCH_LVL_MDQ) {
+ otx2_qos_txschq_config(pfvf, tmp);
+ pfvf->qos.qid_to_sqmap[qid] = tmp->schq;
+ }
+}
+
static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid,
struct netlink_ext_ack *extack)
{
struct otx2_qos_node *node, *parent;
int dwrr_del_node = false;
+ u16 qid, moved_qid;
u64 prio;
- u16 qid;
netdev_dbg(pfvf->netdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid);
@@ -1516,6 +1564,37 @@ static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid,
if (!parent->child_static_cnt)
parent->max_static_prio = 0;
+ moved_qid = otx2_qos_cur_leaf_nodes(pfvf);
+
+ /* last node just deleted */
+ if (moved_qid == 0 || moved_qid == qid)
+ return 0;
+
+ moved_qid--;
+
+ node = otx2_sw_node_find_by_qid(pfvf, moved_qid);
+ if (!node)
+ return 0;
+
+ /* stop traffic to the old queue and disable
+ * SQ associated with it
+ */
+ node->qid = OTX2_QOS_QID_INNER;
+ __clear_bit(moved_qid, pfvf->qos.qos_sq_bmap);
+ otx2_qos_disable_sq(pfvf, moved_qid);
+
+ otx2_reset_qdisc(pfvf->netdev, pfvf->hw.tx_queues + moved_qid);
+
+ /* enable SQ associated with qid and
+ * update the node
+ */
+ otx2_cfg_smq(pfvf, node, qid);
+
+ otx2_qos_enable_sq(pfvf, qid);
+ __set_bit(qid, pfvf->qos.qos_sq_bmap);
+ node->qid = qid;
+
+ *classid = node->classid;
return 0;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
index 8b9455d8a4f7..418101a93149 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
@@ -229,6 +229,10 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
flow_rule_match_control(f_rule, &match);
addr_type = match.key->addr_type;
+
+ if (flow_rule_has_control_flags(match.mask->flags,
+ f->common.extack))
+ return -EOPNOTSUPP;
}
if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_BASIC)) {
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
index fc6f7d2746e8..197198ba61b1 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -419,15 +419,6 @@ struct prestera_msg_vtcam_destroy_req {
__le32 vtcam_id;
};
-struct prestera_msg_vtcam_rule_add_req {
- struct prestera_msg_cmd cmd;
- __le32 key[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
- __le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
- __le32 vtcam_id;
- __le32 prio;
- __le32 n_act;
-};
-
struct prestera_msg_vtcam_rule_del_req {
struct prestera_msg_cmd cmd;
__le32 vtcam_id;
@@ -471,6 +462,16 @@ struct prestera_msg_acl_action {
};
};
+struct prestera_msg_vtcam_rule_add_req {
+ struct prestera_msg_cmd cmd;
+ __le32 key[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ __le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ __le32 vtcam_id;
+ __le32 prio;
+ __le32 n_act;
+ struct prestera_msg_acl_action actions_msg[] __counted_by_le(n_act);
+};
+
struct prestera_msg_counter_req {
struct prestera_msg_cmd cmd;
__le32 client;
@@ -702,12 +703,6 @@ struct prestera_msg_flood_domain_destroy_req {
__le32 flood_domain_idx;
};
-struct prestera_msg_flood_domain_ports_set_req {
- struct prestera_msg_cmd cmd;
- __le32 flood_domain_idx;
- __le32 ports_num;
-};
-
struct prestera_msg_flood_domain_ports_reset_req {
struct prestera_msg_cmd cmd;
__le32 flood_domain_idx;
@@ -725,6 +720,13 @@ struct prestera_msg_flood_domain_port {
__le16 port_type;
};
+struct prestera_msg_flood_domain_ports_set_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+ __le32 ports_num;
+ struct prestera_msg_flood_domain_port ports[] __counted_by_le(ports_num);
+};
+
struct prestera_msg_mdb_create_req {
struct prestera_msg_cmd cmd;
__le32 flood_domain_idx;
@@ -1371,23 +1373,18 @@ int prestera_hw_vtcam_rule_add(struct prestera_switch *sw,
struct prestera_acl_hw_action_info *act,
u8 n_act, u32 *rule_id)
{
- struct prestera_msg_acl_action *actions_msg;
struct prestera_msg_vtcam_rule_add_req *req;
struct prestera_msg_vtcam_resp resp;
- void *buff;
- u32 size;
+ size_t size;
int err;
u8 i;
- size = sizeof(*req) + sizeof(*actions_msg) * n_act;
-
- buff = kzalloc(size, GFP_KERNEL);
- if (!buff)
+ size = struct_size(req, actions_msg, n_act);
+ req = kzalloc(size, GFP_KERNEL);
+ if (!req)
return -ENOMEM;
- req = buff;
req->n_act = __cpu_to_le32(n_act);
- actions_msg = buff + sizeof(*req);
/* put acl matches into the message */
memcpy(req->key, key, sizeof(req->key));
@@ -1395,7 +1392,7 @@ int prestera_hw_vtcam_rule_add(struct prestera_switch *sw,
/* put acl actions into the message */
for (i = 0; i < n_act; i++) {
- err = prestera_acl_rule_add_put_action(&actions_msg[i],
+ err = prestera_acl_rule_add_put_action(&req->actions_msg[i],
&act[i]);
if (err)
goto free_buff;
@@ -1411,7 +1408,7 @@ int prestera_hw_vtcam_rule_add(struct prestera_switch *sw,
*rule_id = __le32_to_cpu(resp.rule_id);
free_buff:
- kfree(buff);
+ kfree(req);
return err;
}
@@ -2461,14 +2458,13 @@ int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain)
{
struct prestera_flood_domain_port *flood_domain_port;
struct prestera_msg_flood_domain_ports_set_req *req;
- struct prestera_msg_flood_domain_port *ports;
struct prestera_switch *sw = domain->sw;
struct prestera_port *port;
u32 ports_num = 0;
- int buf_size;
- void *buff;
+ size_t buf_size;
u16 lag_id;
int err;
+ int i = 0;
list_for_each_entry(flood_domain_port, &domain->flood_domain_port_list,
flood_domain_port_node)
@@ -2477,15 +2473,11 @@ int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain)
if (!ports_num)
return -EINVAL;
- buf_size = sizeof(*req) + sizeof(*ports) * ports_num;
-
- buff = kmalloc(buf_size, GFP_KERNEL);
- if (!buff)
+ buf_size = struct_size(req, ports, ports_num);
+ req = kmalloc(buf_size, GFP_KERNEL);
+ if (!req)
return -ENOMEM;
- req = buff;
- ports = buff + sizeof(*req);
-
req->flood_domain_idx = __cpu_to_le32(domain->idx);
req->ports_num = __cpu_to_le32(ports_num);
@@ -2494,31 +2486,30 @@ int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain)
if (netif_is_lag_master(flood_domain_port->dev)) {
if (prestera_lag_id(sw, flood_domain_port->dev,
&lag_id)) {
- kfree(buff);
+ kfree(req);
return -EINVAL;
}
- ports->port_type =
+ req->ports[i].port_type =
__cpu_to_le16(PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_LAG);
- ports->lag_id = __cpu_to_le16(lag_id);
+ req->ports[i].lag_id = __cpu_to_le16(lag_id);
} else {
port = prestera_port_dev_lower_find(flood_domain_port->dev);
- ports->port_type =
+ req->ports[i].port_type =
__cpu_to_le16(PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT);
- ports->dev_num = __cpu_to_le32(port->dev_id);
- ports->port_num = __cpu_to_le32(port->hw_id);
+ req->ports[i].dev_num = __cpu_to_le32(port->dev_id);
+ req->ports[i].port_num = __cpu_to_le32(port->hw_id);
}
- ports->vid = __cpu_to_le16(flood_domain_port->vid);
-
- ports++;
+ req->ports[i].vid = __cpu_to_le16(flood_domain_port->vid);
+ i++;
}
err = prestera_cmd(sw, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_SET,
&req->cmd, buf_size);
- kfree(buff);
+ kfree(req);
return err;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 4fb886c57cd7..63ae01954dfc 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -489,7 +489,7 @@ static int prestera_port_change_mtu(struct net_device *dev, int mtu)
if (err)
return err;
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
return 0;
}
@@ -821,7 +821,7 @@ static void prestera_port_handle_event(struct prestera_switch *sw,
if (port->state_mac.oper) {
if (port->phy_link)
- phylink_mac_change(port->phy_link, true);
+ phylink_pcs_change(&port->phylink_pcs, true);
else
netif_carrier_on(port->dev);
@@ -829,7 +829,7 @@ static void prestera_port_handle_event(struct prestera_switch *sw,
queue_delayed_work(prestera_wq, caching_dw, 0);
} else {
if (port->phy_link)
- phylink_mac_change(port->phy_link, false);
+ phylink_pcs_change(&port->phylink_pcs, false);
else if (netif_running(port->dev) && netif_carrier_ok(port->dev))
netif_carrier_off(port->dev);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
index cc2a9ae794be..39d9bf82c115 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
@@ -96,7 +96,7 @@ struct prestera_sdma {
struct dma_pool *desc_pool;
struct work_struct tx_work;
struct napi_struct rx_napi;
- struct net_device napi_dev;
+ struct net_device *napi_dev;
u32 map_addr;
u64 dma_mask;
/* protect SDMA with concurrent access from multiple CPUs */
@@ -654,13 +654,21 @@ static int prestera_sdma_switch_init(struct prestera_switch *sw)
if (err)
goto err_evt_register;
- init_dummy_netdev(&sdma->napi_dev);
+ sdma->napi_dev = alloc_netdev_dummy(0);
+ if (!sdma->napi_dev) {
+ dev_err(dev, "not able to initialize dummy device\n");
+ err = -ENOMEM;
+ goto err_alloc_dummy;
+ }
- netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll);
+ netif_napi_add(sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll);
napi_enable(&sdma->rx_napi);
return 0;
+err_alloc_dummy:
+ prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_RXTX,
+ prestera_rxtx_handle_event);
err_evt_register:
err_tx_init:
prestera_sdma_tx_fini(sdma);
@@ -677,6 +685,7 @@ static void prestera_sdma_switch_fini(struct prestera_switch *sw)
napi_disable(&sdma->rx_napi);
netif_napi_del(&sdma->rx_napi);
+ free_netdev(sdma->napi_dev);
prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_RXTX,
prestera_rxtx_handle_event);
prestera_sdma_tx_fini(sdma);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index dd6ca2e4fd51..1a59c952aa01 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1188,7 +1188,7 @@ static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
{
struct pxa168_eth_private *pep = netdev_priv(dev);
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
set_port_config_ext(pep);
if (!netif_running(dev))
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 1b43704baceb..fcfb34561882 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -2905,13 +2905,13 @@ static int skge_change_mtu(struct net_device *dev, int new_mtu)
int err;
if (!netif_running(dev)) {
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
skge_down(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
err = skge_up(dev);
if (err)
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 07720841a8d7..a7a16eac1891 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -34,6 +34,7 @@
#include <linux/mii.h>
#include <linux/of_net.h>
#include <linux/dmi.h>
+#include <linux/skbuff_ref.h>
#include <asm/irq.h>
@@ -2383,7 +2384,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
u32 imask;
if (!netif_running(dev)) {
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
netdev_update_features(dev);
return 0;
}
@@ -2406,7 +2407,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
sky2_rx_stop(sky2);
sky2_rx_clean(sky2);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
netdev_update_features(dev);
mode = DATA_BLIND_VAL(DATA_BLIND_DEF) | GM_SMOD_VLAN_ENA;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index caa13b9cedff..cae46290a7ae 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -110,16 +110,16 @@ static const struct mtk_reg_map mt7986_reg_map = {
.tx_irq_mask = 0x461c,
.tx_irq_status = 0x4618,
.pdma = {
- .rx_ptr = 0x6100,
- .rx_cnt_cfg = 0x6104,
- .pcrx_ptr = 0x6108,
- .glo_cfg = 0x6204,
- .rst_idx = 0x6208,
- .delay_irq = 0x620c,
- .irq_status = 0x6220,
- .irq_mask = 0x6228,
- .adma_rx_dbg0 = 0x6238,
- .int_grp = 0x6250,
+ .rx_ptr = 0x4100,
+ .rx_cnt_cfg = 0x4104,
+ .pcrx_ptr = 0x4108,
+ .glo_cfg = 0x4204,
+ .rst_idx = 0x4208,
+ .delay_irq = 0x420c,
+ .irq_status = 0x4220,
+ .irq_mask = 0x4228,
+ .adma_rx_dbg0 = 0x4238,
+ .int_grp = 0x4250,
},
.qdma = {
.qtx_cfg = 0x4400,
@@ -1107,7 +1107,7 @@ static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
- if (mtk_is_netsys_v2_or_greater(eth)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
}
@@ -1139,7 +1139,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
eth->scratch_ring = eth->sram_base;
else
eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
- cnt * soc->txrx.txd_size,
+ cnt * soc->tx.desc_size,
&eth->phy_scratch_ring,
GFP_KERNEL);
if (unlikely(!eth->scratch_ring))
@@ -1155,17 +1155,17 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
return -ENOMEM;
- phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
+ phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
for (i = 0; i < cnt; i++) {
dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE;
struct mtk_tx_dma_v2 *txd;
- txd = eth->scratch_ring + i * soc->txrx.txd_size;
+ txd = eth->scratch_ring + i * soc->tx.desc_size;
txd->txd1 = addr;
if (i < cnt - 1)
txd->txd2 = eth->phy_scratch_ring +
- (i + 1) * soc->txrx.txd_size;
+ (i + 1) * soc->tx.desc_size;
txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
@@ -1416,7 +1416,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
if (itxd == ring->last_free)
return -ENOMEM;
- itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
+ itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
memset(itx_buf, 0, sizeof(*itx_buf));
txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
@@ -1457,7 +1457,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
txd_info.size = min_t(unsigned int, frag_size,
- soc->txrx.dma_max_len);
+ soc->tx.dma_max_len);
txd_info.qid = queue;
txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
!(frag_size - txd_info.size);
@@ -1470,7 +1470,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
mtk_tx_set_dma_desc(dev, txd, &txd_info);
tx_buf = mtk_desc_to_tx_buf(ring, txd,
- soc->txrx.txd_size);
+ soc->tx.desc_size);
if (new_desc)
memset(tx_buf, 0, sizeof(*tx_buf));
tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
@@ -1513,7 +1513,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
} else {
int next_idx;
- next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
+ next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
ring->dma_size);
mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
}
@@ -1522,7 +1522,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
err_dma:
do {
- tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
+ tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
/* unmap dma */
mtk_tx_unmap(eth, tx_buf, NULL, false);
@@ -1547,7 +1547,7 @@ static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
nfrags += DIV_ROUND_UP(skb_frag_size(frag),
- eth->soc->txrx.dma_max_len);
+ eth->soc->tx.dma_max_len);
}
} else {
nfrags += skb_shinfo(skb)->nr_frags;
@@ -1654,7 +1654,7 @@ static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
ring = &eth->rx_ring[i];
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
- rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
+ rxd = ring->dma + idx * eth->soc->rx.desc_size;
if (rxd->rxd2 & RX_DMA_DONE) {
ring->calc_idx_update = true;
return ring;
@@ -1710,7 +1710,7 @@ static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
if (IS_ERR(pp))
return pp;
- err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, id,
+ err = __xdp_rxq_info_reg(xdp_q, eth->dummy_dev, id,
eth->rx_napi.napi_id, PAGE_SIZE);
if (err < 0)
goto err_free_pp;
@@ -1822,7 +1822,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
}
htxd = txd;
- tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
+ tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
memset(tx_buf, 0, sizeof(*tx_buf));
htx_buf = tx_buf;
@@ -1841,7 +1841,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
goto unmap;
tx_buf = mtk_desc_to_tx_buf(ring, txd,
- soc->txrx.txd_size);
+ soc->tx.desc_size);
memset(tx_buf, 0, sizeof(*tx_buf));
n_desc++;
}
@@ -1879,7 +1879,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
} else {
int idx;
- idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
+ idx = txd_to_idx(ring, txd, soc->tx.desc_size);
mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
MT7628_TX_CTX_IDX0);
}
@@ -1890,7 +1890,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
unmap:
while (htxd != txd) {
- tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
+ tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
mtk_tx_unmap(eth, tx_buf, NULL, false);
htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
@@ -2021,14 +2021,14 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
goto rx_done;
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
- rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
+ rxd = ring->dma + idx * eth->soc->rx.desc_size;
data = ring->data[idx];
if (!mtk_rx_get_desc(eth, &trxd, rxd))
break;
/* find out which mac the packet come from. values start at 1 */
- if (mtk_is_netsys_v2_or_greater(eth)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
switch (val) {
@@ -2140,7 +2140,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb->dev = netdev;
bytes += skb->len;
- if (mtk_is_netsys_v2_or_greater(eth)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
if (hash != MTK_RXD5_FOE_ENTRY)
@@ -2156,7 +2156,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
rxdcsum = &trxd.rxd4;
}
- if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
+ if (*rxdcsum & eth->soc->rx.dma_l4_valid)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
@@ -2280,7 +2280,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
break;
tx_buf = mtk_desc_to_tx_buf(ring, desc,
- eth->soc->txrx.txd_size);
+ eth->soc->tx.desc_size);
if (!tx_buf->data)
break;
@@ -2331,7 +2331,7 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
}
mtk_tx_unmap(eth, tx_buf, &bq, true);
- desc = ring->dma + cpu * eth->soc->txrx.txd_size;
+ desc = ring->dma + cpu * eth->soc->tx.desc_size;
ring->last_free = desc;
atomic_inc(&ring->free_count);
@@ -2421,7 +2421,7 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget)
do {
int rx_done;
- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
+ mtk_w32(eth, eth->soc->rx.irq_done_mask,
reg_map->pdma.irq_status);
rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
rx_done_total += rx_done;
@@ -2437,10 +2437,10 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget)
return budget;
} while (mtk_r32(eth, reg_map->pdma.irq_status) &
- eth->soc->txrx.rx_irq_done_mask);
+ eth->soc->rx.irq_done_mask);
if (napi_complete_done(napi, rx_done_total))
- mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
return rx_done_total;
}
@@ -2449,7 +2449,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
{
const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_ring *ring = &eth->tx_ring;
- int i, sz = soc->txrx.txd_size;
+ int i, sz = soc->tx.desc_size;
struct mtk_tx_dma_v2 *txd;
int ring_size;
u32 ofs, val;
@@ -2572,14 +2572,14 @@ static void mtk_tx_clean(struct mtk_eth *eth)
}
if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
dma_free_coherent(eth->dma_dev,
- ring->dma_size * soc->txrx.txd_size,
+ ring->dma_size * soc->tx.desc_size,
ring->dma, ring->phys);
ring->dma = NULL;
}
if (ring->dma_pdma) {
dma_free_coherent(eth->dma_dev,
- ring->dma_size * soc->txrx.txd_size,
+ ring->dma_size * soc->tx.desc_size,
ring->dma_pdma, ring->phys_pdma);
ring->dma_pdma = NULL;
}
@@ -2634,15 +2634,15 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
rx_flag != MTK_RX_FLAGS_NORMAL) {
ring->dma = dma_alloc_coherent(eth->dma_dev,
- rx_dma_size * eth->soc->txrx.rxd_size,
- &ring->phys, GFP_KERNEL);
+ rx_dma_size * eth->soc->rx.desc_size,
+ &ring->phys, GFP_KERNEL);
} else {
struct mtk_tx_ring *tx_ring = &eth->tx_ring;
ring->dma = tx_ring->dma + tx_ring_size *
- eth->soc->txrx.txd_size * (ring_no + 1);
+ eth->soc->tx.desc_size * (ring_no + 1);
ring->phys = tx_ring->phys + tx_ring_size *
- eth->soc->txrx.txd_size * (ring_no + 1);
+ eth->soc->tx.desc_size * (ring_no + 1);
}
if (!ring->dma)
@@ -2653,7 +2653,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
dma_addr_t dma_addr;
void *data;
- rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+ rxd = ring->dma + i * eth->soc->rx.desc_size;
if (ring->page_pool) {
data = mtk_page_pool_get_buff(ring->page_pool,
&dma_addr, GFP_KERNEL);
@@ -2690,7 +2690,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
rxd->rxd3 = 0;
rxd->rxd4 = 0;
- if (mtk_is_netsys_v2_or_greater(eth)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
rxd->rxd5 = 0;
rxd->rxd6 = 0;
rxd->rxd7 = 0;
@@ -2744,7 +2744,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_
if (!ring->data[i])
continue;
- rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+ rxd = ring->dma + i * eth->soc->rx.desc_size;
if (!rxd->rxd1)
continue;
@@ -2761,7 +2761,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_
if (!in_sram && ring->dma) {
dma_free_coherent(eth->dma_dev,
- ring->dma_size * eth->soc->txrx.rxd_size,
+ ring->dma_size * eth->soc->rx.desc_size,
ring->dma, ring->phys);
ring->dma = NULL;
}
@@ -3124,7 +3124,7 @@ static void mtk_dma_free(struct mtk_eth *eth)
netdev_reset_queue(eth->netdev[i]);
if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
dma_free_coherent(eth->dma_dev,
- MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
+ MTK_QDMA_RING_SIZE * soc->tx.desc_size,
eth->scratch_ring, eth->phy_scratch_ring);
eth->scratch_ring = NULL;
eth->phy_scratch_ring = 0;
@@ -3174,7 +3174,7 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
eth->rx_events++;
if (likely(napi_schedule_prep(&eth->rx_napi))) {
- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
__napi_schedule(&eth->rx_napi);
}
@@ -3200,9 +3200,9 @@ static irqreturn_t mtk_handle_irq(int irq, void *_eth)
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
if (mtk_r32(eth, reg_map->pdma.irq_mask) &
- eth->soc->txrx.rx_irq_done_mask) {
+ eth->soc->rx.irq_done_mask) {
if (mtk_r32(eth, reg_map->pdma.irq_status) &
- eth->soc->txrx.rx_irq_done_mask)
+ eth->soc->rx.irq_done_mask)
mtk_handle_irq_rx(irq, _eth);
}
if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
@@ -3220,10 +3220,10 @@ static void mtk_poll_controller(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
mtk_handle_irq_rx(eth->irq[2], dev);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
}
#endif
@@ -3387,7 +3387,7 @@ static int mtk_open(struct net_device *dev)
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
refcount_set(&eth->dma_refcnt, 1);
}
else
@@ -3471,7 +3471,7 @@ static int mtk_stop(struct net_device *dev)
mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
napi_disable(&eth->tx_napi);
napi_disable(&eth->rx_napi);
@@ -3893,7 +3893,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
else
mtk_hw_reset(eth);
- if (mtk_is_netsys_v2_or_greater(eth)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
/* Set FE to PDMAv2 if necessary */
val = mtk_r32(eth, MTK_FE_GLO_MISC);
mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
@@ -3947,9 +3947,9 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
/* FE int grouping */
mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
+ mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
+ mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
if (mtk_is_netsys_v3_or_greater(eth)) {
@@ -4055,7 +4055,7 @@ static int mtk_change_mtu(struct net_device *dev, int new_mtu)
}
mtk_set_mcr_max_rx(mac, length);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -4188,6 +4188,8 @@ static int mtk_free_dev(struct mtk_eth *eth)
metadata_dst_free(eth->dsa_meta[i]);
}
+ free_netdev(eth->dummy_dev);
+
return 0;
}
@@ -4983,9 +4985,14 @@ static int mtk_probe(struct platform_device *pdev)
/* we run 2 devices on the same DMA ring so we need a dummy device
* for NAPI to work
*/
- init_dummy_netdev(&eth->dummy_dev);
- netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx);
- netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
+ eth->dummy_dev = alloc_netdev_dummy(0);
+ if (!eth->dummy_dev) {
+ err = -ENOMEM;
+ dev_err(eth->dev, "failed to allocated dummy device\n");
+ goto err_unreg_netdev;
+ }
+ netif_napi_add(eth->dummy_dev, &eth->tx_napi, mtk_napi_tx);
+ netif_napi_add(eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
platform_set_drvdata(pdev, eth);
schedule_delayed_work(&eth->reset.monitor_work,
@@ -4993,6 +5000,8 @@ static int mtk_probe(struct platform_device *pdev)
return 0;
+err_unreg_netdev:
+ mtk_unreg_dev(eth);
err_deinit_ppe:
mtk_ppe_deinit(eth);
mtk_mdio_cleanup(eth);
@@ -5039,11 +5048,15 @@ static const struct mtk_soc_data mt2701_data = {
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
.version = 1,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5059,11 +5072,15 @@ static const struct mtk_soc_data mt7621_data = {
.offload_version = 1,
.hash_offset = 2,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5081,11 +5098,15 @@ static const struct mtk_soc_data mt7622_data = {
.hash_offset = 2,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5102,11 +5123,15 @@ static const struct mtk_soc_data mt7623_data = {
.hash_offset = 2,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
.disable_pll_modes = true,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5121,11 +5146,15 @@ static const struct mtk_soc_data mt7629_data = {
.required_pctl = false,
.has_accounting = true,
.version = 1,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5143,14 +5172,18 @@ static const struct mtk_soc_data mt7981_data = {
.hash_offset = 4,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma_v2),
- .rxd_size = sizeof(struct mtk_rx_dma_v2),
- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma_v2),
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
},
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
};
static const struct mtk_soc_data mt7986_data = {
@@ -5165,14 +5198,18 @@ static const struct mtk_soc_data mt7986_data = {
.hash_offset = 4,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma_v2),
- .rxd_size = sizeof(struct mtk_rx_dma_v2),
- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma_v2),
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
},
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
};
static const struct mtk_soc_data mt7988_data = {
@@ -5187,11 +5224,15 @@ static const struct mtk_soc_data mt7988_data = {
.hash_offset = 4,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma_v2),
- .rxd_size = sizeof(struct mtk_rx_dma_v2),
- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma_v2),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma_v2),
+ .irq_done_mask = MTK_RX_DONE_INT_V2,
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
},
@@ -5204,11 +5245,15 @@ static const struct mtk_soc_data rt5350_data = {
.required_clks = MT7628_CLKS_BITMAP,
.required_pctl = false,
.version = 1,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 9ae3b8a71d0e..4eab30b44070 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -327,8 +327,8 @@
/* QDMA descriptor txd3 */
#define TX_DMA_OWNER_CPU BIT(31)
#define TX_DMA_LS0 BIT(30)
-#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
-#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
+#define TX_DMA_PLEN0(x) (((x) & eth->soc->tx.dma_max_len) << eth->soc->tx.dma_len_offset)
+#define TX_DMA_PLEN1(x) ((x) & eth->soc->tx.dma_max_len)
#define TX_DMA_SWC BIT(14)
#define TX_DMA_PQID GENMASK(3, 0)
#define TX_DMA_ADDR64_MASK GENMASK(3, 0)
@@ -348,8 +348,8 @@
/* QDMA descriptor rxd2 */
#define RX_DMA_DONE BIT(31)
#define RX_DMA_LSO BIT(30)
-#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
-#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
+#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->rx.dma_max_len) << eth->soc->rx.dma_len_offset)
+#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->rx.dma_len_offset) & eth->soc->rx.dma_max_len)
#define RX_DMA_VTAG BIT(15)
#define RX_DMA_ADDR64_MASK GENMASK(3, 0)
#if IS_ENABLED(CONFIG_64BIT)
@@ -1153,10 +1153,9 @@ struct mtk_reg_map {
* @foe_entry_size Foe table entry size.
* @has_accounting Bool indicating support for accounting of
* offloaded flows.
- * @txd_size Tx DMA descriptor size.
- * @rxd_size Rx DMA descriptor size.
- * @rx_irq_done_mask Rx irq done register mask.
- * @rx_dma_l4_valid Rx DMA valid register mask.
+ * @desc_size Tx/Rx DMA descriptor size.
+ * @irq_done_mask Rx irq done register mask.
+ * @dma_l4_valid Rx DMA valid register mask.
* @dma_max_len Max DMA tx/rx buffer length.
* @dma_len_offset Tx/Rx DMA length field offset.
*/
@@ -1174,13 +1173,17 @@ struct mtk_soc_data {
bool has_accounting;
bool disable_pll_modes;
struct {
- u32 txd_size;
- u32 rxd_size;
- u32 rx_irq_done_mask;
- u32 rx_dma_l4_valid;
+ u32 desc_size;
u32 dma_max_len;
u32 dma_len_offset;
- } txrx;
+ } tx;
+ struct {
+ u32 desc_size;
+ u32 irq_done_mask;
+ u32 dma_l4_valid;
+ u32 dma_max_len;
+ u32 dma_len_offset;
+ } rx;
};
#define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000)
@@ -1242,7 +1245,7 @@ struct mtk_eth {
spinlock_t page_lock;
spinlock_t tx_irq_lock;
spinlock_t rx_irq_lock;
- struct net_device dummy_dev;
+ struct net_device *dummy_dev;
struct net_device *netdev[MTK_MAX_DEVS];
struct mtk_mac *mac[MTK_MAX_DEVS];
int irq[3];
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 6ce0db3a1a92..0acee405a749 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -580,7 +580,7 @@ mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
idle = cur_idle;
entry->data.ib1 &= ~ib1_ts_mask;
- entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
+ entry->data.ib1 |= ib1 & ib1_ts_mask;
}
}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index fbb5e9d5af13..aa262e6f4b85 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -273,6 +273,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
+
+ if (flow_rule_has_control_flags(match.mask->flags,
+ f->common.extack))
+ return -EOPNOTSUPP;
} else {
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index c895e265ae0e..61334a71058c 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -1074,13 +1074,13 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
static void
mtk_wed_stop(struct mtk_wed_device *dev)
{
+ mtk_wed_dma_disable(dev);
mtk_wed_set_ext_int(dev, false);
wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
- wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
if (!mtk_wed_get_rx_capa(dev))
return;
@@ -1093,7 +1093,6 @@ static void
mtk_wed_deinit(struct mtk_wed_device *dev)
{
mtk_wed_stop(dev);
- mtk_wed_dma_disable(dev);
wed_clr(dev, MTK_WED_CTRL,
MTK_WED_CTRL_WDMA_INT_AGENT_EN |
@@ -2605,9 +2604,6 @@ mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
static void
mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
{
- if (!dev->running)
- return;
-
mtk_wed_set_ext_int(dev, !!mask);
wed_w32(dev, MTK_WED_INT_MASK, mask);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 5d3fde63b273..4c089cfa027a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1649,7 +1649,7 @@ int mlx4_en_start_port(struct net_device *dev)
sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
/* Calculate Rx buf size */
- dev->mtu = min(dev->mtu, priv->max_mtu);
+ WRITE_ONCE(dev->mtu, min(dev->mtu, priv->max_mtu));
mlx4_en_calc_rx_buf(dev);
en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
@@ -2394,7 +2394,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
!mlx4_en_check_xdp_mtu(dev, new_mtu))
return -EOPNOTSUPP;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (netif_running(dev)) {
mutex_lock(&mdev->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index eac49657bd07..8328df8645d5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -42,6 +42,7 @@
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
#include <linux/irq.h>
+#include <linux/skbuff_ref.h>
#include <net/ip.h>
#if IS_ENABLED(CONFIG_IPV6)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 7b02ff61126d..98688e4dbec5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -185,7 +185,8 @@ static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id,
}
static int mlx4_devlink_ierr_reset_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
mlx4_internal_err_reset = ctx->val.vbool;
return 0;
@@ -202,7 +203,8 @@ static int mlx4_devlink_crdump_snapshot_get(struct devlink *devlink, u32 id,
}
static int mlx4_devlink_crdump_snapshot_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx4_priv *priv = devlink_priv(devlink);
struct mlx4_dev *dev = &priv->dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 4957412ff1f6..20768ef2e9d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -969,19 +969,32 @@ static void cmd_work_handler(struct work_struct *work)
bool poll_cmd = ent->polling;
struct mlx5_cmd_layout *lay;
struct mlx5_core_dev *dev;
- unsigned long cb_timeout;
- struct semaphore *sem;
+ unsigned long timeout;
unsigned long flags;
int alloc_ret;
int cmd_mode;
+ complete(&ent->handling);
+
dev = container_of(cmd, struct mlx5_core_dev, cmd);
- cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
+ timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
- complete(&ent->handling);
- sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem;
- down(sem);
if (!ent->page_queue) {
+ if (down_timeout(&cmd->vars.sem, timeout)) {
+ mlx5_core_warn(dev, "%s(0x%x) timed out while waiting for a slot.\n",
+ mlx5_command_str(ent->op), ent->op);
+ if (ent->callback) {
+ ent->callback(-EBUSY, ent->context);
+ mlx5_free_cmd_msg(dev, ent->out);
+ free_msg(dev, ent->in);
+ cmd_ent_put(ent);
+ } else {
+ ent->ret = -EBUSY;
+ complete(&ent->done);
+ }
+ complete(&ent->slotted);
+ return;
+ }
alloc_ret = cmd_alloc_index(cmd, ent);
if (alloc_ret < 0) {
mlx5_core_err_rl(dev, "failed to allocate command entry\n");
@@ -994,10 +1007,11 @@ static void cmd_work_handler(struct work_struct *work)
ent->ret = -EAGAIN;
complete(&ent->done);
}
- up(sem);
+ up(&cmd->vars.sem);
return;
}
} else {
+ down(&cmd->vars.pages_sem);
ent->idx = cmd->vars.max_reg_cmds;
spin_lock_irqsave(&cmd->alloc_lock, flags);
clear_bit(ent->idx, &cmd->vars.bitmask);
@@ -1005,6 +1019,8 @@ static void cmd_work_handler(struct work_struct *work)
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
+ complete(&ent->slotted);
+
lay = get_inst(cmd, ent->idx);
ent->lay = lay;
memset(lay, 0, sizeof(*lay));
@@ -1023,7 +1039,7 @@ static void cmd_work_handler(struct work_struct *work)
ent->ts1 = ktime_get_ns();
cmd_mode = cmd->mode;
- if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, cb_timeout))
+ if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, timeout))
cmd_ent_get(ent);
set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
@@ -1143,6 +1159,9 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
ent->ret = -ECANCELED;
goto out_err;
}
+
+ wait_for_completion(&ent->slotted);
+
if (cmd->mode == CMD_MODE_POLLING || ent->polling)
wait_for_completion(&ent->done);
else if (!wait_for_completion_timeout(&ent->done, timeout))
@@ -1157,6 +1176,9 @@ out_err:
} else if (err == -ECANCELED) {
mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
mlx5_command_str(ent->op), ent->op);
+ } else if (err == -EBUSY) {
+ mlx5_core_warn(dev, "%s(0x%x) timeout while waiting for command semaphore.\n",
+ mlx5_command_str(ent->op), ent->op);
}
mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
err, deliv_status_to_str(ent->status), ent->status);
@@ -1208,6 +1230,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
ent->polling = force_polling;
init_completion(&ent->handling);
+ init_completion(&ent->slotted);
if (!callback)
init_completion(&ent->done);
@@ -1225,7 +1248,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
return 0; /* mlx5_cmd_comp_handler() will put(ent) */
err = wait_func(dev, ent);
- if (err == -ETIMEDOUT || err == -ECANCELED)
+ if (err == -ETIMEDOUT || err == -ECANCELED || err == -EBUSY)
goto out_free;
ds = ent->ts2 - ent->ts1;
@@ -1611,6 +1634,9 @@ static int cmd_comp_notifier(struct notifier_block *nb,
dev = container_of(cmd, struct mlx5_core_dev, cmd);
eqe = data;
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ return NOTIFY_DONE;
+
mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
return NOTIFY_OK;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 09652dc89115..36806e813c33 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -143,8 +143,8 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
-static ssize_t average_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *pos)
+static ssize_t reset_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
{
struct mlx5_cmd_stats *stats;
@@ -152,6 +152,11 @@ static ssize_t average_write(struct file *filp, const char __user *buf,
spin_lock_irq(&stats->lock);
stats->sum = 0;
stats->n = 0;
+ stats->failed = 0;
+ stats->failed_mbox_status = 0;
+ stats->last_failed_errno = 0;
+ stats->last_failed_mbox_status = 0;
+ stats->last_failed_syndrome = 0;
spin_unlock_irq(&stats->lock);
*pos += count;
@@ -159,11 +164,16 @@ static ssize_t average_write(struct file *filp, const char __user *buf,
return count;
}
-static const struct file_operations stats_fops = {
+static const struct file_operations reset_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = reset_write,
+};
+
+static const struct file_operations average_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = average_read,
- .write = average_write,
};
static ssize_t slots_read(struct file *filp, char __user *buf, size_t count,
@@ -228,8 +238,10 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
continue;
stats->root = debugfs_create_dir(namep, *cmd);
+ debugfs_create_file("reset", 0200, stats->root, stats,
+ &reset_fops);
debugfs_create_file("average", 0400, stats->root, stats,
- &stats_fops);
+ &average_fops);
debugfs_create_u64("n", 0400, stats->root, &stats->n);
debugfs_create_u64("failed", 0400, stats->root, &stats->failed);
debugfs_create_u64("failed_mbox_status", 0400, stats->root,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 84db05fb9389..e85fb71bf0b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -320,6 +320,8 @@ struct mlx5e_params {
bool scatter_fcs_en;
bool rx_dim_enabled;
bool tx_dim_enabled;
+ bool rx_moder_use_cqe_mode;
+ bool tx_moder_use_cqe_mode;
u32 pflags;
struct bpf_prog *xdp_prog;
struct mlx5e_xsk *xsk;
@@ -430,7 +432,7 @@ struct mlx5e_txqsq {
u16 cc;
u16 skb_fifo_cc;
u32 dma_fifo_cc;
- struct dim dim; /* Adaptive Moderation */
+ struct dim *dim; /* Adaptive Moderation */
/* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
@@ -722,7 +724,7 @@ struct mlx5e_rq {
int ix;
unsigned int hw_mtu;
- struct dim dim; /* Dynamic Interrupt Moderation */
+ struct dim *dim; /* Dynamic Interrupt Moderation */
/* XDP */
struct bpf_prog __rcu *xdp_prog;
@@ -797,6 +799,10 @@ struct mlx5e_channel {
int cpu;
/* Sync between icosq recovery and XSK enable/disable. */
struct mutex icosq_recovery_lock;
+
+ /* coalescing configuration */
+ struct dim_cq_moder rx_cq_moder;
+ struct dim_cq_moder tx_cq_moder;
};
struct mlx5e_ptp;
@@ -1040,6 +1046,11 @@ void mlx5e_close_rq(struct mlx5e_rq *rq);
int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter);
void mlx5e_destroy_rq(struct mlx5e_rq *rq);
+bool mlx5e_reset_rx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode,
+ bool dim_enabled);
+bool mlx5e_reset_rx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode,
+ bool dim_enabled, bool keep_dim_state);
+
struct mlx5e_sq_param;
int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
@@ -1060,6 +1071,10 @@ int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder,
struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
struct mlx5e_cq *cq);
void mlx5e_close_cq(struct mlx5e_cq *cq);
+int mlx5e_modify_cq_period_mode(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ u8 cq_period_mode);
+int mlx5e_modify_cq_moderation(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ u16 cq_period, u16 cq_max_count, u8 cq_period_mode);
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
@@ -1087,6 +1102,7 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
void *context, bool reset);
int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv);
int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
+int mlx5e_update_tc_and_tx_queues_ctx(struct mlx5e_priv *priv, void *context);
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx);
@@ -1118,6 +1134,11 @@ int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
void mlx5e_close_txqsq(struct mlx5e_txqsq *sq);
+bool mlx5e_reset_tx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode,
+ bool dim_enabled);
+bool mlx5e_reset_tx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode,
+ bool dim_enabled, bool keep_dim_state);
+
static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
{
return MLX5_CAP_ETH(mdev, swp) &&
@@ -1143,7 +1164,6 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
-int mlx5e_update_nic_rx(struct mlx5e_priv *priv);
void mlx5e_update_carrier(struct mlx5e_priv *priv);
int mlx5e_close(struct net_device *netdev);
int mlx5e_open(struct net_device *netdev);
@@ -1160,7 +1180,7 @@ void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv);
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo);
void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
- uint32_t stringset, uint8_t *data);
+ u32 stringset, u8 *data);
int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
struct ethtool_stats *stats, u64 *data);
@@ -1180,23 +1200,16 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack);
-int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
- struct ethtool_link_ksettings *link_ksettings);
-int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
- const struct ethtool_link_ksettings *link_ksettings);
-int mlx5e_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh);
-int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
- struct netlink_ext_ack *extack);
+int mlx5e_get_per_queue_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *coal);
+int mlx5e_set_per_queue_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *coal);
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
struct ethtool_ts_info *info);
int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
struct ethtool_flash *flash);
-void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
- struct ethtool_pauseparam *pauseparam);
-int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
- struct ethtool_pauseparam *pauseparam);
/* mlx5e generic netdev management API */
static inline bool
@@ -1222,8 +1235,6 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv);
void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu);
-void mlx5e_rx_dim_work(struct work_struct *work);
-void mlx5e_tx_dim_work(struct work_struct *work);
void mlx5e_set_xdp_feature(struct net_device *netdev);
netdev_features_t mlx5e_features_check(struct sk_buff *skb,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
index 874a1016623c..66e719e88503 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
@@ -3,6 +3,7 @@
#include "channels.h"
#include "en.h"
+#include "en/dim.h"
#include "en/ptp.h"
unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs)
@@ -55,3 +56,85 @@ bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn)
*rqn = c->rq.rqn;
return true;
}
+
+int mlx5e_channels_rx_change_dim(struct mlx5e_channels *chs, bool enable)
+{
+ int i;
+
+ for (i = 0; i < chs->num; i++) {
+ int err = mlx5e_dim_rx_change(&chs->c[i]->rq, enable);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int mlx5e_channels_tx_change_dim(struct mlx5e_channels *chs, bool enable)
+{
+ int i, tc;
+
+ for (i = 0; i < chs->num; i++) {
+ for (tc = 0; tc < mlx5e_get_dcb_num_tc(&chs->params); tc++) {
+ int err = mlx5e_dim_tx_change(&chs->c[i]->sq[tc], enable);
+
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int mlx5e_channels_rx_toggle_dim(struct mlx5e_channels *chs)
+{
+ int i;
+
+ for (i = 0; i < chs->num; i++) {
+ /* If dim is enabled for the channel, reset the dim state so the
+ * collected statistics will be reset. This is useful for
+ * supporting legacy interfaces that allow things like changing
+ * the CQ period mode for all channels without disturbing
+ * individual channel configurations.
+ */
+ if (chs->c[i]->rq.dim) {
+ int err;
+
+ mlx5e_dim_rx_change(&chs->c[i]->rq, false);
+ err = mlx5e_dim_rx_change(&chs->c[i]->rq, true);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int mlx5e_channels_tx_toggle_dim(struct mlx5e_channels *chs)
+{
+ int i, tc;
+
+ for (i = 0; i < chs->num; i++) {
+ for (tc = 0; tc < mlx5e_get_dcb_num_tc(&chs->params); tc++) {
+ int err;
+
+ /* If dim is enabled for the channel, reset the dim
+ * state so the collected statistics will be reset. This
+ * is useful for supporting legacy interfaces that allow
+ * things like changing the CQ period mode for all
+ * channels without disturbing individual channel
+ * configurations.
+ */
+ if (!chs->c[i]->sq[tc].dim)
+ continue;
+
+ mlx5e_dim_tx_change(&chs->c[i]->sq[tc], false);
+ err = mlx5e_dim_tx_change(&chs->c[i]->sq[tc], true);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
index 6715aa9383b9..eda80f8c6c02 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
@@ -15,5 +15,9 @@ void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix,
void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn,
u32 *vhca_id);
bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn);
+int mlx5e_channels_rx_change_dim(struct mlx5e_channels *chs, bool enabled);
+int mlx5e_channels_tx_change_dim(struct mlx5e_channels *chs, bool enabled);
+int mlx5e_channels_rx_toggle_dim(struct mlx5e_channels *chs);
+int mlx5e_channels_tx_toggle_dim(struct mlx5e_channels *chs);
#endif /* __MLX5_EN_CHANNELS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dim.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dim.h
new file mode 100644
index 000000000000..110e2c6b7e51
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dim.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved */
+
+#ifndef __MLX5_EN_DIM_H__
+#define __MLX5_EN_DIM_H__
+
+#include <linux/dim.h>
+#include <linux/types.h>
+#include <linux/mlx5/mlx5_ifc.h>
+
+/* Forward declarations */
+struct mlx5e_rq;
+struct mlx5e_txqsq;
+struct work_struct;
+
+/* convert a boolean value for cqe mode to appropriate dim constant
+ * true : DIM_CQ_PERIOD_MODE_START_FROM_CQE
+ * false : DIM_CQ_PERIOD_MODE_START_FROM_EQE
+ */
+static inline int mlx5e_dim_cq_period_mode(bool start_from_cqe)
+{
+ return start_from_cqe ? DIM_CQ_PERIOD_MODE_START_FROM_CQE :
+ DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+}
+
+static inline enum mlx5_cq_period_mode
+mlx5e_cq_period_mode(enum dim_cq_period_mode cq_period_mode)
+{
+ switch (cq_period_mode) {
+ case DIM_CQ_PERIOD_MODE_START_FROM_EQE:
+ return MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ case DIM_CQ_PERIOD_MODE_START_FROM_CQE:
+ return MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
+ default:
+ WARN_ON_ONCE(true);
+ return MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ }
+}
+
+void mlx5e_rx_dim_work(struct work_struct *work);
+void mlx5e_tx_dim_work(struct work_struct *work);
+int mlx5e_dim_rx_change(struct mlx5e_rq *rq, bool enabled);
+int mlx5e_dim_tx_change(struct mlx5e_txqsq *sq, bool enabled);
+
+#endif /* __MLX5_EN_DIM_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index a3f31d9d527e..ec819dfc98be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -6,6 +6,7 @@
#include "en/port.h"
#include "en_accel/en_accel.h"
#include "en_accel/ipsec.h"
+#include <linux/dim.h>
#include <net/page_pool/types.h>
#include <net/xdp_sock_drv.h>
@@ -513,77 +514,6 @@ int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *param
return 0;
}
-static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
-{
- struct dim_cq_moder moder = {};
-
- moder.cq_period_mode = cq_period_mode;
- moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
- moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
- if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
- moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
-
- return moder;
-}
-
-static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
-{
- struct dim_cq_moder moder = {};
-
- moder.cq_period_mode = cq_period_mode;
- moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
- moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
- if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
- moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
-
- return moder;
-}
-
-static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
-{
- return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
- DIM_CQ_PERIOD_MODE_START_FROM_CQE :
- DIM_CQ_PERIOD_MODE_START_FROM_EQE;
-}
-
-void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
-{
- if (params->tx_dim_enabled) {
- u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
-
- params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
- } else {
- params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
- }
-}
-
-void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
-{
- if (params->rx_dim_enabled) {
- u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
-
- params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
- } else {
- params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
- }
-}
-
-void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
-{
- mlx5e_reset_tx_moderation(params, cq_period_mode);
- MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
- params->tx_cq_moderation.cq_period_mode ==
- MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
-}
-
-void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
-{
- mlx5e_reset_rx_moderation(params, cq_period_mode);
- MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
- params->rx_cq_moderation.cq_period_mode ==
- MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
-}
-
bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
{
u32 link_speed = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 9a781f18b57f..749b2ec0436e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -77,11 +77,6 @@ u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift,
/* Parameter calculations */
-void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
-void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
-void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
-void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
-
bool slow_pci_heuristic(struct mlx5_core_dev *mdev);
int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index dbe2b19a9570..b4efc780e297 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -292,10 +292,15 @@ enum mlx5e_fec_supported_link_mode {
MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X,
MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X,
MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_100G_1X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_200G_2X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_400G_4X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X,
MLX5E_MAX_FEC_SUPPORTED_LINK_MODE,
};
#define MLX5E_FEC_FIRST_50G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X
+#define MLX5E_FEC_FIRST_100G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_100G_1X
#define MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, policy, write, link) \
do { \
@@ -308,6 +313,17 @@ enum mlx5e_fec_supported_link_mode {
*_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \
} while (0)
+/* Returns true if FEC can be set for a given link mode. */
+static bool mlx5e_is_fec_supported_link_mode(struct mlx5_core_dev *dev,
+ enum mlx5e_fec_supported_link_mode link_mode)
+{
+ return link_mode < MLX5E_FEC_FIRST_50G_PER_LANE_MODE ||
+ (link_mode < MLX5E_FEC_FIRST_100G_PER_LANE_MODE &&
+ MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm)) ||
+ (link_mode >= MLX5E_FEC_FIRST_100G_PER_LANE_MODE &&
+ MLX5_CAP_PCAM_FEATURE(dev, fec_100G_per_lane_in_pplm));
+}
+
/* get/set FEC admin field for a given speed */
static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
enum mlx5e_fec_supported_link_mode link_mode)
@@ -340,6 +356,18 @@ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X:
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_8x);
break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_1X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g_1x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_2X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 200g_2x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_4X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_4x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 800g_8x);
+ break;
default:
return -EINVAL;
}
@@ -381,6 +409,18 @@ static int mlx5e_get_fec_cap_field(u32 *pplm, u16 *fec_cap,
case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X:
*fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 400g_8x);
break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_1X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 100g_1x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_2X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 200g_2x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_4X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 400g_4x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 800g_8x);
+ break;
default:
return -EINVAL;
}
@@ -389,7 +429,6 @@ static int mlx5e_get_fec_cap_field(u32 *pplm, u16 *fec_cap,
bool mlx5e_fec_in_caps(struct mlx5_core_dev *dev, int fec_policy)
{
- bool fec_50g_per_lane = MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm);
u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(pplm_reg);
@@ -407,7 +446,7 @@ bool mlx5e_fec_in_caps(struct mlx5_core_dev *dev, int fec_policy)
for (i = 0; i < MLX5E_MAX_FEC_SUPPORTED_LINK_MODE; i++) {
u16 fec_caps;
- if (i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE && !fec_50g_per_lane)
+ if (!mlx5e_is_fec_supported_link_mode(dev, i))
break;
mlx5e_get_fec_cap_field(out, &fec_caps, i);
@@ -420,7 +459,6 @@ bool mlx5e_fec_in_caps(struct mlx5_core_dev *dev, int fec_policy)
int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
u16 *fec_configured_mode)
{
- bool fec_50g_per_lane = MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm);
u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(pplm_reg);
@@ -445,7 +483,7 @@ int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
*fec_configured_mode = 0;
for (i = 0; i < MLX5E_MAX_FEC_SUPPORTED_LINK_MODE; i++) {
- if (i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE && !fec_50g_per_lane)
+ if (!mlx5e_is_fec_supported_link_mode(dev, i))
break;
mlx5e_fec_admin_field(out, fec_configured_mode, 0, i);
@@ -489,13 +527,13 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u16 fec_policy)
u16 conf_fec = fec_policy;
u16 fec_caps = 0;
- if (i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE && !fec_50g_per_lane)
+ if (!mlx5e_is_fec_supported_link_mode(dev, i))
break;
/* RS fec in ethtool is mapped to MLX5E_FEC_RS_528_514
* to link modes up to 25G per lane and to
* MLX5E_FEC_RS_544_514 in the new link modes based on
- * 50 G per lane
+ * 50G or 100G per lane
*/
if (conf_fec == (1 << MLX5E_FEC_RS_528_514) &&
i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index d0af7271da34..afd654583b6b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -169,6 +169,7 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
WARN_ON_ONCE(!pos->inuse);
pos->inuse = false;
list_del(&pos->entry);
+ ptpsq->cq_stats->lost_cqe++;
}
spin_unlock_bh(&cqe_list->tracker_list_lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index 86f1854698b4..883c044852f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -95,9 +95,15 @@ static inline void mlx5e_ptp_metadata_fifo_push(struct mlx5e_ptp_metadata_fifo *
}
static inline u8
+mlx5e_ptp_metadata_fifo_peek(struct mlx5e_ptp_metadata_fifo *fifo)
+{
+ return fifo->data[fifo->mask & fifo->cc];
+}
+
+static inline void
mlx5e_ptp_metadata_fifo_pop(struct mlx5e_ptp_metadata_fifo *fifo)
{
- return fifo->data[fifo->mask & fifo->cc++];
+ fifo->cc++;
}
static inline void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index e87e26f2c669..6743806b8480 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -83,24 +83,25 @@ int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
txq_ix = mlx5e_qid_from_qos(chs, node_qid);
- WARN_ON(node_qid > priv->htb_max_qos_sqs);
- if (node_qid == priv->htb_max_qos_sqs) {
- struct mlx5e_sq_stats *stats, **stats_list = NULL;
-
- if (priv->htb_max_qos_sqs == 0) {
- stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
- sizeof(*stats_list),
- GFP_KERNEL);
- if (!stats_list)
- return -ENOMEM;
- }
+ WARN_ON(node_qid >= mlx5e_htb_cur_leaf_nodes(priv->htb));
+ if (!priv->htb_qos_sq_stats) {
+ struct mlx5e_sq_stats **stats_list;
+
+ stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
+ sizeof(*stats_list), GFP_KERNEL);
+ if (!stats_list)
+ return -ENOMEM;
+
+ WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
+ }
+
+ if (!priv->htb_qos_sq_stats[node_qid]) {
+ struct mlx5e_sq_stats *stats;
+
stats = kzalloc(sizeof(*stats), GFP_KERNEL);
- if (!stats) {
- kvfree(stats_list);
+ if (!stats)
return -ENOMEM;
- }
- if (stats_list)
- WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
+
WRITE_ONCE(priv->htb_qos_sq_stats[node_qid], stats);
/* Order htb_max_qos_sqs increment after writing the array pointer.
* Pairs with smp_load_acquire in en_stats.c.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 0ab9db319530..22918b2ef7f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -108,7 +108,10 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
mlx5e_reset_txqsq_cc_pc(sq);
sq->stats->recover++;
clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
+ rtnl_lock();
mlx5e_activate_txqsq(sq);
+ rtnl_unlock();
+
if (sq->channel)
mlx5e_trigger_napi_icosq(sq->channel);
else
@@ -179,12 +182,16 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
carrier_ok = netif_carrier_ok(netdev);
netif_carrier_off(netdev);
+ rtnl_lock();
mlx5e_deactivate_priv_channels(priv);
+ rtnl_unlock();
mlx5e_ptp_close(chs->ptp);
err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
+ rtnl_lock();
mlx5e_activate_priv_channels(priv);
+ rtnl_unlock();
/* return carrier back if needed */
if (carrier_ok)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
index bcafb4bf9415..8d9a3b5ec973 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
@@ -179,6 +179,13 @@ u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels)
return min_t(u32, rqt_size, max_cap_rqt_size);
}
+#define MLX5E_MAX_RQT_SIZE_ALLOWED_WITH_XOR8_HASH 256
+
+unsigned int mlx5e_rqt_max_num_channels_allowed_for_xor8(void)
+{
+ return MLX5E_MAX_RQT_SIZE_ALLOWED_WITH_XOR8_HASH / MLX5E_UNIFORM_SPREAD_RQT_FACTOR;
+}
+
void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt)
{
mlx5_core_destroy_rqt(rqt->mdev, rqt->rqtn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
index e0bc30308c77..2f9e04a8418f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
@@ -38,6 +38,7 @@ static inline u32 mlx5e_rqt_get_rqtn(struct mlx5e_rqt *rqt)
}
u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels);
+unsigned int mlx5e_rqt_max_num_channels_allowed_for_xor8(void);
int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn, u32 *vhca_id);
int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, u32 *vhca_ids,
unsigned int num_rqns,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
index f675b1926340..f66bbc846464 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
@@ -57,6 +57,7 @@ int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
{
+ mutex_lock(selq->state_lock);
WARN_ON_ONCE(selq->is_prepared);
kvfree(selq->standby);
@@ -67,6 +68,7 @@ void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
kvfree(selq->standby);
selq->standby = NULL;
+ mutex_unlock(selq->state_lock);
}
void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index 92065568bb19..6873c1201803 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -117,7 +117,7 @@ bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a,
bool mlx5e_tc_tun_encap_info_equal_options(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b,
- __be16 tun_flags);
+ u32 tun_type);
#endif /* CONFIG_MLX5_ESWITCH */
#endif //__MLX5_EN_TC_TUNNEL_H__
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index f1d1e1542e81..878cbdbf5ec8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -587,7 +587,7 @@ bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a,
bool mlx5e_tc_tun_encap_info_equal_options(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b,
- __be16 tun_flags)
+ u32 tun_type)
{
struct ip_tunnel_info *a_info;
struct ip_tunnel_info *b_info;
@@ -596,8 +596,8 @@ bool mlx5e_tc_tun_encap_info_equal_options(struct mlx5e_encap_key *a,
if (!mlx5e_tc_tun_encap_info_equal_generic(a, b))
return false;
- a_has_opts = !!(a->ip_tun_key->tun_flags & tun_flags);
- b_has_opts = !!(b->ip_tun_key->tun_flags & tun_flags);
+ a_has_opts = test_bit(tun_type, a->ip_tun_key->tun_flags);
+ b_has_opts = test_bit(tun_type, b->ip_tun_key->tun_flags);
/* keys are equal when both don't have any options attached */
if (!a_has_opts && !b_has_opts)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
index 2bcd10b6d653..bf969212cc77 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
@@ -106,12 +106,13 @@ static int mlx5e_gen_ip_tunnel_header_geneve(char buf[],
memset(geneveh, 0, sizeof(*geneveh));
geneveh->ver = MLX5E_GENEVE_VER;
geneveh->opt_len = tun_info->options_len / 4;
- geneveh->oam = !!(tun_info->key.tun_flags & TUNNEL_OAM);
- geneveh->critical = !!(tun_info->key.tun_flags & TUNNEL_CRIT_OPT);
+ geneveh->oam = test_bit(IP_TUNNEL_OAM_BIT, tun_info->key.tun_flags);
+ geneveh->critical = test_bit(IP_TUNNEL_CRIT_OPT_BIT,
+ tun_info->key.tun_flags);
mlx5e_tunnel_id_to_vni(tun_info->key.tun_id, geneveh->vni);
geneveh->proto_type = htons(ETH_P_TEB);
- if (tun_info->key.tun_flags & TUNNEL_GENEVE_OPT) {
+ if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, tun_info->key.tun_flags)) {
if (!geneveh->opt_len)
return -EOPNOTSUPP;
ip_tunnel_info_opts_get(geneveh->options, tun_info);
@@ -188,7 +189,7 @@ static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv,
/* make sure that we're talking about GENEVE options */
- if (enc_opts.key->dst_opt_type != TUNNEL_GENEVE_OPT) {
+ if (enc_opts.key->dst_opt_type != IP_TUNNEL_GENEVE_OPT_BIT) {
NL_SET_ERR_MSG_MOD(extack,
"Matching on GENEVE options: option type is not GENEVE");
netdev_warn(priv->netdev,
@@ -337,7 +338,8 @@ static int mlx5e_tc_tun_parse_geneve(struct mlx5e_priv *priv,
static bool mlx5e_tc_tun_encap_info_equal_geneve(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b)
{
- return mlx5e_tc_tun_encap_info_equal_options(a, b, TUNNEL_GENEVE_OPT);
+ return mlx5e_tc_tun_encap_info_equal_options(a, b,
+ IP_TUNNEL_GENEVE_OPT_BIT);
}
struct mlx5e_tc_tunnel geneve_tunnel = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
index ada14f0574dc..579eda89fc76 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
@@ -31,12 +31,16 @@ static int mlx5e_gen_ip_tunnel_header_gretap(char buf[],
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
struct gre_base_hdr *greh = (struct gre_base_hdr *)(buf);
__be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
+ IP_TUNNEL_DECLARE_FLAGS(unsupp) = { };
int hdr_len;
*ip_proto = IPPROTO_GRE;
/* the HW does not calculate GRE csum or sequences */
- if (tun_key->tun_flags & (TUNNEL_CSUM | TUNNEL_SEQ))
+ __set_bit(IP_TUNNEL_CSUM_BIT, unsupp);
+ __set_bit(IP_TUNNEL_SEQ_BIT, unsupp);
+
+ if (ip_tunnel_flags_intersect(tun_key->tun_flags, unsupp))
return -EOPNOTSUPP;
greh->protocol = htons(ETH_P_TEB);
@@ -44,7 +48,7 @@ static int mlx5e_gen_ip_tunnel_header_gretap(char buf[],
/* GRE key */
hdr_len = mlx5e_tc_tun_calc_hlen_gretap(e);
greh->flags = gre_tnl_flags_to_gre_flags(tun_key->tun_flags);
- if (tun_key->tun_flags & TUNNEL_KEY) {
+ if (test_bit(IP_TUNNEL_KEY_BIT, tun_key->tun_flags)) {
__be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
*ptr = tun_id;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
index a184d739d5f8..e4e487c8431b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
@@ -90,7 +90,7 @@ static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[],
const struct vxlan_metadata *md;
struct vxlanhdr *vxh;
- if ((tun_key->tun_flags & TUNNEL_VXLAN_OPT) &&
+ if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, tun_key->tun_flags) &&
e->tun_info->options_len != sizeof(*md))
return -EOPNOTSUPP;
vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
@@ -99,7 +99,7 @@ static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[],
udp->dest = tun_key->tp_dst;
vxh->vx_flags = VXLAN_HF_VNI;
vxh->vx_vni = vxlan_vni_field(tun_id);
- if (tun_key->tun_flags & TUNNEL_VXLAN_OPT) {
+ if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, tun_key->tun_flags)) {
md = ip_tunnel_info_opts(e->tun_info);
vxlan_build_gbp_hdr(vxh, md);
}
@@ -125,7 +125,7 @@ static int mlx5e_tc_tun_parse_vxlan_gbp_option(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (enc_opts.key->dst_opt_type != TUNNEL_VXLAN_OPT) {
+ if (enc_opts.key->dst_opt_type != IP_TUNNEL_VXLAN_OPT_BIT) {
NL_SET_ERR_MSG_MOD(extack, "Wrong VxLAN option type: not GBP");
return -EOPNOTSUPP;
}
@@ -208,7 +208,8 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
static bool mlx5e_tc_tun_encap_info_equal_vxlan(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b)
{
- return mlx5e_tc_tun_encap_info_equal_options(a, b, TUNNEL_VXLAN_OPT);
+ return mlx5e_tc_tun_encap_info_equal_options(a, b,
+ IP_TUNNEL_VXLAN_OPT_BIT);
}
static int mlx5e_tc_tun_get_remote_ifindex(struct net_device *mirred_dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 82b5ca1be4f3..4610621a340e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -565,7 +565,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
linear = !!(dma_len - inline_hdr_sz);
ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + linear + !!inline_hdr_sz;
- /* check_result must be 0 if sinfo is passed. */
+ /* check_result must be 0 if xdptxd->has_frags is true. */
if (!check_result) {
int stop_room = 1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index b8dd74453655..1b7132fa70de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -270,7 +270,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
/* mxbuf->rq is set on allocation, but cqe is per-packet so set it here */
mxbuf->cqe = cqe;
xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
- xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(&mxbuf->xdp);
net_prefetch(mxbuf->xdp.data);
/* Possible flows:
@@ -319,7 +319,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
/* mxbuf->rq is set on allocation, but cqe is per-packet so set it here */
mxbuf->cqe = cqe;
xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
- xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(&mxbuf->xdp);
net_prefetch(mxbuf->xdp.data);
prog = rcu_dereference(rq->xdp_prog);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 06592b9f0424..9240cfe25d10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -28,8 +28,10 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5_core_dev *mdev)
{
- /* AF_XDP doesn't support frames larger than PAGE_SIZE. */
- if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) {
+ /* AF_XDP doesn't support frames larger than PAGE_SIZE,
+ * and xsk->chunk_size is limited to 65535 bytes.
+ */
+ if ((size_t)xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) {
mlx5_core_err(mdev, "XSK chunk size %u out of bounds [%u, %lu]\n", xsk->chunk_size,
MLX5E_MIN_XSK_CHUNK_SIZE, PAGE_SIZE);
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
index c7d191f66ad1..4f83e3172767 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -73,7 +73,7 @@ void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule)
struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
- uint32_t flow_tag)
+ u32 flow_tag)
{
struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs);
struct mlx5_flow_destination dest = {};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h
index a032bff482a6..7e899c716267 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h
@@ -11,14 +11,14 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs);
void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs);
struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
- uint32_t flow_tag);
+ u32 flow_tag);
void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule);
#else
static inline int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs) { return 0; }
static inline void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs) {}
static inline struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
- uint32_t flow_tag)
+ u32 flow_tag)
{ return ERR_PTR(-EOPNOTSUPP); }
static inline void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule) {}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index dd36b04e30a0..92bf3fa44a3b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -78,13 +78,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw)
unsigned int i;
if (!priv->ipsec)
- return idx;
+ return;
for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- mlx5e_ipsec_hw_stats_desc[i].format);
-
- return idx;
+ ethtool_puts(data, mlx5e_ipsec_hw_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw)
@@ -92,14 +89,14 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw)
int i;
if (!priv->ipsec)
- return idx;
+ return;
mlx5e_accel_ipsec_fs_read_stats(priv, &priv->ipsec->hw_stats);
for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->hw_stats,
- mlx5e_ipsec_hw_stats_desc, i);
-
- return idx;
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->hw_stats,
+ mlx5e_ipsec_hw_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
@@ -115,9 +112,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_sw)
if (priv->ipsec)
for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- mlx5e_ipsec_sw_stats_desc[i].format);
- return idx;
+ ethtool_puts(data, mlx5e_ipsec_sw_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
@@ -126,9 +121,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
if (priv->ipsec)
for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->sw_stats,
- mlx5e_ipsec_sw_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR_ATOMIC64(
+ &priv->ipsec->sw_stats,
+ mlx5e_ipsec_sw_stats_desc, i));
}
MLX5E_DEFINE_STATS_GRP(ipsec_hw, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index adc6d8ea0960..07a04a142a2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -95,8 +95,8 @@ int mlx5e_ktls_init(struct mlx5e_priv *priv);
void mlx5e_ktls_cleanup(struct mlx5e_priv *priv);
int mlx5e_ktls_get_count(struct mlx5e_priv *priv);
-int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data);
-int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data);
+void mlx5e_ktls_get_strings(struct mlx5e_priv *priv, u8 **data);
+void mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 **data);
#else
static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
@@ -144,15 +144,9 @@ static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
static inline int mlx5e_ktls_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_ktls_cleanup(struct mlx5e_priv *priv) { }
static inline int mlx5e_ktls_get_count(struct mlx5e_priv *priv) { return 0; }
-static inline int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
-{
- return 0;
-}
+static inline void mlx5e_ktls_get_strings(struct mlx5e_priv *priv, u8 **data) { }
-static inline int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data)
-{
- return 0;
-}
+static inline void mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 **data) { }
#endif
#endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
index 7c1c0eb16787..60be2d72eb9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
@@ -58,35 +58,31 @@ int mlx5e_ktls_get_count(struct mlx5e_priv *priv)
return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
}
-int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
+void mlx5e_ktls_get_strings(struct mlx5e_priv *priv, u8 **data)
{
- unsigned int i, n, idx = 0;
+ unsigned int i, n;
if (!priv->tls)
- return 0;
+ return;
n = mlx5e_ktls_get_count(priv);
for (i = 0; i < n; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- mlx5e_ktls_sw_stats_desc[i].format);
-
- return n;
+ ethtool_puts(data, mlx5e_ktls_sw_stats_desc[i].format);
}
-int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data)
+void mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 **data)
{
- unsigned int i, n, idx = 0;
+ unsigned int i, n;
if (!priv->tls)
- return 0;
+ return;
n = mlx5e_ktls_get_count(priv);
for (i = 0; i < n; i++)
- data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
- mlx5e_ktls_sw_stats_desc,
- i);
-
- return n;
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
+ mlx5e_ktls_sw_stats_desc, i));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index b2cabd6ab86c..cc9bcc420032 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -1640,6 +1640,7 @@ static const struct macsec_ops macsec_offload_ops = {
.mdo_add_secy = mlx5e_macsec_add_secy,
.mdo_upd_secy = mlx5e_macsec_upd_secy,
.mdo_del_secy = mlx5e_macsec_del_secy,
+ .rx_uses_md_dst = true,
};
bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c
index 4559ee16a11a..4bb47d48061d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c
@@ -38,16 +38,13 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(macsec_hw)
unsigned int i;
if (!priv->macsec)
- return idx;
+ return;
if (!mlx5e_is_macsec_device(priv->mdev))
- return idx;
+ return;
for (i = 0; i < NUM_MACSEC_HW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- mlx5e_macsec_hw_stats_desc[i].format);
-
- return idx;
+ ethtool_puts(data, mlx5e_macsec_hw_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(macsec_hw)
@@ -56,19 +53,18 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(macsec_hw)
int i;
if (!priv->macsec)
- return idx;
+ return;
if (!mlx5e_is_macsec_device(priv->mdev))
- return idx;
+ return;
macsec_fs = priv->mdev->macsec_fs;
mlx5_macsec_fs_get_stats_fill(macsec_fs, mlx5_macsec_fs_get_stats(macsec_fs));
for (i = 0; i < NUM_MACSEC_HW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_macsec_fs_get_stats(macsec_fs),
- mlx5e_macsec_hw_stats_desc,
- i);
-
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ mlx5_macsec_fs_get_stats(macsec_fs),
+ mlx5e_macsec_hw_stats_desc, i));
}
MLX5E_DEFINE_STATS_GRP(macsec_hw, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index c7f542d0b8f0..93cf23278d93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -46,6 +46,10 @@ struct arfs_table {
struct hlist_head rules_hash[ARFS_HASH_SIZE];
};
+enum {
+ MLX5E_ARFS_STATE_ENABLED,
+};
+
enum arfs_type {
ARFS_IPV4_TCP,
ARFS_IPV6_TCP,
@@ -60,6 +64,7 @@ struct mlx5e_arfs_tables {
spinlock_t arfs_lock;
int last_filter_id;
struct workqueue_struct *wq;
+ unsigned long state;
};
struct arfs_tuple {
@@ -170,6 +175,8 @@ int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
return err;
}
}
+ set_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
+
return 0;
}
@@ -455,6 +462,8 @@ static void arfs_del_rules(struct mlx5e_flow_steering *fs)
int i;
int j;
+ clear_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
+
spin_lock_bh(&arfs->arfs_lock);
mlx5e_for_each_arfs_rule(rule, htmp, arfs->arfs_tables, i, j) {
hlist_del_init(&rule->hlist);
@@ -627,17 +636,8 @@ static void arfs_handle_work(struct work_struct *work)
struct mlx5_flow_handle *rule;
arfs = mlx5e_fs_get_arfs(priv->fs);
- mutex_lock(&priv->state_lock);
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- spin_lock_bh(&arfs->arfs_lock);
- hlist_del(&arfs_rule->hlist);
- spin_unlock_bh(&arfs->arfs_lock);
-
- mutex_unlock(&priv->state_lock);
- kfree(arfs_rule);
- goto out;
- }
- mutex_unlock(&priv->state_lock);
+ if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state))
+ return;
if (!arfs_rule->rule) {
rule = arfs_add_rule(priv, arfs_rule);
@@ -753,6 +753,11 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
return -EPROTONOSUPPORT;
spin_lock_bh(&arfs->arfs_lock);
+ if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state)) {
+ spin_unlock_bh(&arfs->arfs_lock);
+ return -EPERM;
+ }
+
arfs_rule = arfs_find_rule(arfs_t, &fk);
if (arfs_rule) {
if (arfs_rule->rxq == rxq_index || work_busy(&arfs_rule->arfs_work)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
index ca9cfbf57d8f..298bb74ec5e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
@@ -30,21 +30,22 @@
* SOFTWARE.
*/
-#include <linux/dim.h>
#include "en.h"
+#include "en/dim.h"
static void
mlx5e_complete_dim_work(struct dim *dim, struct dim_cq_moder moder,
struct mlx5_core_dev *mdev, struct mlx5_core_cq *mcq)
{
- mlx5_core_modify_cq_moderation(mdev, mcq, moder.usec, moder.pkts);
+ mlx5e_modify_cq_moderation(mdev, mcq, moder.usec, moder.pkts,
+ mlx5e_cq_period_mode(moder.cq_period_mode));
dim->state = DIM_START_MEASURE;
}
void mlx5e_rx_dim_work(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
- struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim);
+ struct mlx5e_rq *rq = dim->priv;
struct dim_cq_moder cur_moder =
net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
@@ -54,9 +55,95 @@ void mlx5e_rx_dim_work(struct work_struct *work)
void mlx5e_tx_dim_work(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
- struct mlx5e_txqsq *sq = container_of(dim, struct mlx5e_txqsq, dim);
+ struct mlx5e_txqsq *sq = dim->priv;
struct dim_cq_moder cur_moder =
net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
mlx5e_complete_dim_work(dim, cur_moder, sq->cq.mdev, &sq->cq.mcq);
}
+
+static struct dim *mlx5e_dim_enable(struct mlx5_core_dev *mdev,
+ void (*work_fun)(struct work_struct *), int cpu,
+ u8 cq_period_mode, struct mlx5_core_cq *mcq,
+ void *queue)
+{
+ struct dim *dim;
+ int err;
+
+ dim = kvzalloc_node(sizeof(*dim), GFP_KERNEL, cpu_to_node(cpu));
+ if (!dim)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_WORK(&dim->work, work_fun);
+
+ dim->mode = cq_period_mode;
+ dim->priv = queue;
+
+ err = mlx5e_modify_cq_period_mode(mdev, mcq, dim->mode);
+ if (err) {
+ kvfree(dim);
+ return ERR_PTR(err);
+ }
+
+ return dim;
+}
+
+static void mlx5e_dim_disable(struct dim *dim)
+{
+ cancel_work_sync(&dim->work);
+ kvfree(dim);
+}
+
+int mlx5e_dim_rx_change(struct mlx5e_rq *rq, bool enable)
+{
+ if (enable == !!rq->dim)
+ return 0;
+
+ if (enable) {
+ struct mlx5e_channel *c = rq->channel;
+ struct dim *dim;
+
+ dim = mlx5e_dim_enable(rq->mdev, mlx5e_rx_dim_work, c->cpu,
+ c->rx_cq_moder.cq_period_mode, &rq->cq.mcq, rq);
+ if (IS_ERR(dim))
+ return PTR_ERR(dim);
+
+ rq->dim = dim;
+
+ __set_bit(MLX5E_RQ_STATE_DIM, &rq->state);
+ } else {
+ __clear_bit(MLX5E_RQ_STATE_DIM, &rq->state);
+
+ mlx5e_dim_disable(rq->dim);
+ rq->dim = NULL;
+ }
+
+ return 0;
+}
+
+int mlx5e_dim_tx_change(struct mlx5e_txqsq *sq, bool enable)
+{
+ if (enable == !!sq->dim)
+ return 0;
+
+ if (enable) {
+ struct mlx5e_channel *c = sq->channel;
+ struct dim *dim;
+
+ dim = mlx5e_dim_enable(sq->mdev, mlx5e_tx_dim_work, c->cpu,
+ c->tx_cq_moder.cq_period_mode, &sq->cq.mcq, sq);
+ if (IS_ERR(dim))
+ return PTR_ERR(dim);
+
+ sq->dim = dim;
+
+ __set_bit(MLX5E_SQ_STATE_DIM, &sq->state);
+ } else {
+ __clear_bit(MLX5E_SQ_STATE_DIM, &sq->state);
+
+ mlx5e_dim_disable(sq->dim);
+ sq->dim = NULL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index cc51ce16df14..3320f12ba2db 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -30,9 +30,12 @@
* SOFTWARE.
*/
+#include <linux/dim.h>
#include <linux/ethtool_netlink.h>
#include "en.h"
+#include "en/channels.h"
+#include "en/dim.h"
#include "en/port.h"
#include "en/params.h"
#include "en/ptp.h"
@@ -219,6 +222,13 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT,
ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_800GAUI_8_800GBASE_CR8_KR8, ext,
+ ETHTOOL_LINK_MODE_800000baseCR8_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseDR8_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT);
}
static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev,
@@ -269,8 +279,7 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < MLX5E_NUM_PFLAGS; i++)
- strcpy(data + i * ETH_GSTRING_LEN,
- mlx5e_priv_flags[i].name);
+ ethtool_puts(&data, mlx5e_priv_flags[i].name);
break;
case ETH_SS_TEST:
@@ -451,6 +460,34 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
mutex_lock(&priv->state_lock);
+ if (mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc == ETH_RSS_HASH_XOR) {
+ unsigned int xor8_max_channels = mlx5e_rqt_max_num_channels_allowed_for_xor8();
+
+ if (count > xor8_max_channels) {
+ err = -EINVAL;
+ netdev_err(priv->netdev, "%s: Requested number of channels (%d) exceeds the maximum allowed by the XOR8 RSS hfunc (%d)\n",
+ __func__, count, xor8_max_channels);
+ goto out;
+ }
+ }
+
+ /* If RXFH is configured, changing the channels number is allowed only if
+ * it does not require resizing the RSS table. This is because the previous
+ * configuration may no longer be compatible with the new RSS table.
+ */
+ if (netif_is_rxfh_configured(priv->netdev)) {
+ int cur_rqt_size = mlx5e_rqt_size(priv->mdev, cur_params->num_channels);
+ int new_rqt_size = mlx5e_rqt_size(priv->mdev, count);
+
+ if (new_rqt_size != cur_rqt_size) {
+ err = -EINVAL;
+ netdev_err(priv->netdev,
+ "%s: RXFH is configured, block changing channels number that affects RSS table size (new: %d, current: %d)\n",
+ __func__, new_rqt_size, cur_rqt_size);
+ goto out;
+ }
+ }
+
/* Don't allow changing the number of channels if HTB offload is active,
* because the numeration of the QoS SQs will change, while per-queue
* qdiscs are attached.
@@ -531,16 +568,13 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
coal->rx_coalesce_usecs = rx_moder->usec;
coal->rx_max_coalesced_frames = rx_moder->pkts;
coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled;
+ kernel_coal->use_cqe_mode_rx = priv->channels.params.rx_moder_use_cqe_mode;
tx_moder = &priv->channels.params.tx_cq_moderation;
coal->tx_coalesce_usecs = tx_moder->usec;
coal->tx_max_coalesced_frames = tx_moder->pkts;
coal->use_adaptive_tx_coalesce = priv->channels.params.tx_dim_enabled;
-
- kernel_coal->use_cqe_mode_rx =
- MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_BASED_MODER);
- kernel_coal->use_cqe_mode_tx =
- MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_CQE_BASED_MODER);
+ kernel_coal->use_cqe_mode_tx = priv->channels.params.tx_moder_use_cqe_mode;
return 0;
}
@@ -555,50 +589,109 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
}
+static int mlx5e_ethtool_get_per_queue_coalesce(struct mlx5e_priv *priv, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct dim_cq_moder cur_moder;
+ struct mlx5e_channels *chs;
+ struct mlx5e_channel *c;
+
+ if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&priv->state_lock);
+
+ chs = &priv->channels;
+ if (chs->num <= queue) {
+ mutex_unlock(&priv->state_lock);
+ return -EINVAL;
+ }
+
+ c = chs->c[queue];
+
+ coal->use_adaptive_rx_coalesce = !!c->rq.dim;
+ if (coal->use_adaptive_rx_coalesce) {
+ cur_moder = net_dim_get_rx_moderation(c->rq.dim->mode,
+ c->rq.dim->profile_ix);
+
+ coal->rx_coalesce_usecs = cur_moder.usec;
+ coal->rx_max_coalesced_frames = cur_moder.pkts;
+ } else {
+ coal->rx_coalesce_usecs = c->rx_cq_moder.usec;
+ coal->rx_max_coalesced_frames = c->rx_cq_moder.pkts;
+ }
+
+ coal->use_adaptive_tx_coalesce = !!c->sq[0].dim;
+ if (coal->use_adaptive_tx_coalesce) {
+ /* NOTE: Will only display DIM coalesce profile information of
+ * first channel. The current interface cannot display this
+ * information for all tc.
+ */
+ cur_moder = net_dim_get_tx_moderation(c->sq[0].dim->mode,
+ c->sq[0].dim->profile_ix);
+
+ coal->tx_coalesce_usecs = cur_moder.usec;
+ coal->tx_max_coalesced_frames = cur_moder.pkts;
+
+ } else {
+ coal->tx_coalesce_usecs = c->tx_cq_moder.usec;
+ coal->tx_max_coalesced_frames = c->tx_cq_moder.pkts;
+ }
+
+ mutex_unlock(&priv->state_lock);
+
+ return 0;
+}
+
+int mlx5e_get_per_queue_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_get_per_queue_coalesce(priv, queue, coal);
+}
+
#define MLX5E_MAX_COAL_TIME MLX5_MAX_CQ_PERIOD
#define MLX5E_MAX_COAL_FRAMES MLX5_MAX_CQ_COUNT
static void
-mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
+mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct dim_cq_moder *moder)
{
- struct mlx5_core_dev *mdev = priv->mdev;
int tc;
int i;
for (i = 0; i < priv->channels.num; ++i) {
struct mlx5e_channel *c = priv->channels.c[i];
+ struct mlx5_core_dev *mdev = c->mdev;
+ enum mlx5_cq_period_mode mode;
+
+ mode = mlx5e_cq_period_mode(moder->cq_period_mode);
+ c->tx_cq_moder = *moder;
for (tc = 0; tc < c->num_tc; tc++) {
- mlx5_core_modify_cq_moderation(mdev,
- &c->sq[tc].cq.mcq,
- coal->tx_coalesce_usecs,
- coal->tx_max_coalesced_frames);
+ mlx5e_modify_cq_moderation(mdev, &c->sq[tc].cq.mcq,
+ moder->usec, moder->pkts,
+ mode);
}
}
}
static void
-mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
+mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct dim_cq_moder *moder)
{
- struct mlx5_core_dev *mdev = priv->mdev;
int i;
for (i = 0; i < priv->channels.num; ++i) {
struct mlx5e_channel *c = priv->channels.c[i];
+ struct mlx5_core_dev *mdev = c->mdev;
+ enum mlx5_cq_period_mode mode;
- mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
- coal->rx_coalesce_usecs,
- coal->rx_max_coalesced_frames);
- }
-}
+ mode = mlx5e_cq_period_mode(moder->cq_period_mode);
+ c->rx_cq_moder = *moder;
-/* convert a boolean value of cq_mode to mlx5 period mode
- * true : MLX5_CQ_PERIOD_MODE_START_FROM_CQE
- * false : MLX5_CQ_PERIOD_MODE_START_FROM_EQE
- */
-static int cqe_mode_to_period_mode(bool val)
-{
- return val ? MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ mlx5e_modify_cq_moderation(mdev, &c->rq.cq.mcq, moder->usec, moder->pkts,
+ mode);
+ }
}
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
@@ -608,13 +701,14 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
{
struct dim_cq_moder *rx_moder, *tx_moder;
struct mlx5_core_dev *mdev = priv->mdev;
+ bool rx_dim_enabled, tx_dim_enabled;
struct mlx5e_params new_params;
bool reset_rx, reset_tx;
- bool reset = true;
u8 cq_period_mode;
int err = 0;
- if (!MLX5_CAP_GEN(mdev, cq_moderation))
+ if (!MLX5_CAP_GEN(mdev, cq_moderation) ||
+ !MLX5_CAP_GEN(mdev, cq_period_mode_modify))
return -EOPNOTSUPP;
if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME ||
@@ -637,60 +731,70 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
+ rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
+ tx_dim_enabled = !!coal->use_adaptive_tx_coalesce;
+
mutex_lock(&priv->state_lock);
new_params = priv->channels.params;
- rx_moder = &new_params.rx_cq_moderation;
- rx_moder->usec = coal->rx_coalesce_usecs;
- rx_moder->pkts = coal->rx_max_coalesced_frames;
- new_params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
+ cq_period_mode = mlx5e_dim_cq_period_mode(kernel_coal->use_cqe_mode_rx);
+ reset_rx = mlx5e_reset_rx_channels_moderation(&priv->channels, cq_period_mode,
+ rx_dim_enabled, false);
+ MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_BASED_MODER, cq_period_mode);
- tx_moder = &new_params.tx_cq_moderation;
- tx_moder->usec = coal->tx_coalesce_usecs;
- tx_moder->pkts = coal->tx_max_coalesced_frames;
- new_params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce;
+ cq_period_mode = mlx5e_dim_cq_period_mode(kernel_coal->use_cqe_mode_tx);
+ reset_tx = mlx5e_reset_tx_channels_moderation(&priv->channels, cq_period_mode,
+ tx_dim_enabled, false);
+ MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_CQE_BASED_MODER, cq_period_mode);
- reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
- reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled;
+ reset_rx |= rx_dim_enabled != new_params.rx_dim_enabled;
+ reset_tx |= tx_dim_enabled != new_params.tx_dim_enabled;
- cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_rx);
- if (cq_period_mode != rx_moder->cq_period_mode) {
- mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode);
- reset_rx = true;
- }
+ /* Solely used for global ethtool get coalesce */
+ rx_moder = &new_params.rx_cq_moderation;
+ new_params.rx_dim_enabled = rx_dim_enabled;
+ new_params.rx_moder_use_cqe_mode = kernel_coal->use_cqe_mode_rx;
- cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_tx);
- if (cq_period_mode != tx_moder->cq_period_mode) {
- mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode);
- reset_tx = true;
- }
+ tx_moder = &new_params.tx_cq_moderation;
+ new_params.tx_dim_enabled = tx_dim_enabled;
+ new_params.tx_moder_use_cqe_mode = kernel_coal->use_cqe_mode_tx;
if (reset_rx) {
- u8 mode = MLX5E_GET_PFLAG(&new_params,
- MLX5E_PFLAG_RX_CQE_BASED_MODER);
+ mlx5e_channels_rx_change_dim(&priv->channels, false);
+ mlx5e_reset_rx_moderation(rx_moder, new_params.rx_moder_use_cqe_mode,
+ rx_dim_enabled);
- mlx5e_reset_rx_moderation(&new_params, mode);
+ mlx5e_set_priv_channels_rx_coalesce(priv, rx_moder);
+ } else if (!rx_dim_enabled) {
+ rx_moder->usec = coal->rx_coalesce_usecs;
+ rx_moder->pkts = coal->rx_max_coalesced_frames;
+
+ mlx5e_set_priv_channels_rx_coalesce(priv, rx_moder);
}
+
if (reset_tx) {
- u8 mode = MLX5E_GET_PFLAG(&new_params,
- MLX5E_PFLAG_TX_CQE_BASED_MODER);
+ mlx5e_channels_tx_change_dim(&priv->channels, false);
+ mlx5e_reset_tx_moderation(tx_moder, new_params.tx_moder_use_cqe_mode,
+ tx_dim_enabled);
- mlx5e_reset_tx_moderation(&new_params, mode);
- }
+ mlx5e_set_priv_channels_tx_coalesce(priv, tx_moder);
+ } else if (!tx_dim_enabled) {
+ tx_moder->usec = coal->tx_coalesce_usecs;
+ tx_moder->pkts = coal->tx_max_coalesced_frames;
- /* If DIM state hasn't changed, it's possible to modify interrupt
- * moderation parameters on the fly, even if the channels are open.
- */
- if (!reset_rx && !reset_tx && test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- if (!coal->use_adaptive_rx_coalesce)
- mlx5e_set_priv_channels_rx_coalesce(priv, coal);
- if (!coal->use_adaptive_tx_coalesce)
- mlx5e_set_priv_channels_tx_coalesce(priv, coal);
- reset = false;
+ mlx5e_set_priv_channels_tx_coalesce(priv, tx_moder);
}
- err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
+ /* DIM enable/disable Rx and Tx channels */
+ err = mlx5e_channels_rx_change_dim(&priv->channels, rx_dim_enabled);
+ if (err)
+ goto state_unlock;
+ err = mlx5e_channels_tx_change_dim(&priv->channels, tx_dim_enabled);
+ if (err)
+ goto state_unlock;
+ err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, false);
+state_unlock:
mutex_unlock(&priv->state_lock);
return err;
}
@@ -705,6 +809,88 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack);
}
+static int mlx5e_ethtool_set_per_queue_coalesce(struct mlx5e_priv *priv, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ bool rx_dim_enabled, tx_dim_enabled;
+ struct mlx5e_channels *chs;
+ struct mlx5e_channel *c;
+ int err = 0;
+ int tc;
+
+ if (!MLX5_CAP_GEN(mdev, cq_moderation))
+ return -EOPNOTSUPP;
+
+ if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME ||
+ coal->rx_coalesce_usecs > MLX5E_MAX_COAL_TIME) {
+ netdev_info(priv->netdev, "%s: maximum coalesce time supported is %lu usecs\n",
+ __func__, MLX5E_MAX_COAL_TIME);
+ return -ERANGE;
+ }
+
+ if (coal->tx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES ||
+ coal->rx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES) {
+ netdev_info(priv->netdev, "%s: maximum coalesced frames supported is %lu\n",
+ __func__, MLX5E_MAX_COAL_FRAMES);
+ return -ERANGE;
+ }
+
+ rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
+ tx_dim_enabled = !!coal->use_adaptive_tx_coalesce;
+
+ mutex_lock(&priv->state_lock);
+
+ chs = &priv->channels;
+ if (chs->num <= queue) {
+ mutex_unlock(&priv->state_lock);
+ return -EINVAL;
+ }
+
+ c = chs->c[queue];
+
+ err = mlx5e_dim_rx_change(&c->rq, rx_dim_enabled);
+ if (err)
+ goto state_unlock;
+
+ for (tc = 0; tc < c->num_tc; tc++) {
+ err = mlx5e_dim_tx_change(&c->sq[tc], tx_dim_enabled);
+ if (err)
+ goto state_unlock;
+ }
+
+ if (!rx_dim_enabled) {
+ c->rx_cq_moder.usec = coal->rx_coalesce_usecs;
+ c->rx_cq_moder.pkts = coal->rx_max_coalesced_frames;
+
+ mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
+ coal->rx_coalesce_usecs,
+ coal->rx_max_coalesced_frames);
+ }
+
+ if (!tx_dim_enabled) {
+ c->tx_cq_moder.usec = coal->tx_coalesce_usecs;
+ c->tx_cq_moder.pkts = coal->tx_max_coalesced_frames;
+
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5_core_modify_cq_moderation(mdev, &c->sq[tc].cq.mcq,
+ coal->tx_coalesce_usecs,
+ coal->tx_max_coalesced_frames);
+ }
+
+state_unlock:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
+int mlx5e_set_per_queue_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_set_per_queue_coalesce(priv, queue, coal);
+}
+
static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
unsigned long *supported_modes,
u32 eth_proto_cap)
@@ -990,8 +1176,8 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
}
-int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
- struct ethtool_link_ksettings *link_ksettings)
+static int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
+ struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {};
@@ -1161,8 +1347,8 @@ static bool ext_requested(u8 autoneg, const unsigned long *adver, bool ext_suppo
return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_supported;
}
-int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
- const struct ethtool_link_ksettings *link_ksettings)
+static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
+ const struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_port_eth_proto eproto;
@@ -1262,7 +1448,7 @@ static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
return mlx5e_ethtool_get_rxfh_indir_size(priv);
}
-int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
+static int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
u32 rss_context = rxfh->rss_context;
@@ -1275,23 +1461,36 @@ int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
return err;
}
-int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
- struct netlink_ext_ack *extack)
+static int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = netdev_priv(dev);
u32 *rss_context = &rxfh->rss_context;
u8 hfunc = rxfh->hfunc;
+ unsigned int count;
int err;
mutex_lock(&priv->state_lock);
+
+ count = priv->channels.params.num_channels;
+
+ if (hfunc == ETH_RSS_HASH_XOR) {
+ unsigned int xor8_max_channels = mlx5e_rqt_max_num_channels_allowed_for_xor8();
+
+ if (count > xor8_max_channels) {
+ err = -EINVAL;
+ netdev_err(priv->netdev, "%s: Cannot set RSS hash function to XOR, current number of channels (%d) exceeds the maximum allowed for XOR8 RSS hfunc (%d)\n",
+ __func__, count, xor8_max_channels);
+ goto unlock;
+ }
+ }
+
if (*rss_context && rxfh->rss_delete) {
err = mlx5e_rx_res_rss_destroy(priv->rx_res, *rss_context);
goto unlock;
}
if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- unsigned int count = priv->channels.params.num_channels;
-
err = mlx5e_rx_res_rss_init(priv->rx_res, rss_context, count);
if (err)
goto unlock;
@@ -1405,8 +1604,8 @@ static void mlx5e_get_pause_stats(struct net_device *netdev,
mlx5e_stats_pause_get(priv, pause_stats);
}
-void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
- struct ethtool_pauseparam *pauseparam)
+static void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
+ struct ethtool_pauseparam *pauseparam)
{
struct mlx5_core_dev *mdev = priv->mdev;
int err;
@@ -1427,8 +1626,8 @@ static void mlx5e_get_pauseparam(struct net_device *netdev,
mlx5e_ethtool_get_pauseparam(priv, pauseparam);
}
-int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
- struct ethtool_pauseparam *pauseparam)
+static int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
+ struct ethtool_pauseparam *pauseparam)
{
struct mlx5_core_dev *mdev = priv->mdev;
int err;
@@ -1867,7 +2066,7 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
if (enable && !MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
return -EOPNOTSUPP;
- cq_period_mode = cqe_mode_to_period_mode(enable);
+ cq_period_mode = mlx5e_dim_cq_period_mode(enable);
current_cq_period_mode = is_rx_cq ?
priv->channels.params.rx_cq_moderation.cq_period_mode :
@@ -1877,12 +2076,22 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
return 0;
new_params = priv->channels.params;
- if (is_rx_cq)
- mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode);
- else
- mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode);
+ if (is_rx_cq) {
+ mlx5e_reset_rx_channels_moderation(&priv->channels, cq_period_mode,
+ false, true);
+ mlx5e_channels_rx_toggle_dim(&priv->channels);
+ MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
+ cq_period_mode);
+ } else {
+ mlx5e_reset_tx_channels_moderation(&priv->channels, cq_period_mode,
+ false, true);
+ mlx5e_channels_tx_toggle_dim(&priv->channels);
+ MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
+ cq_period_mode);
+ }
- return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
+ /* Update pflags of existing channels without resetting them */
+ return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, false);
}
static int set_pflag_tx_cqe_based_moder(struct net_device *netdev, bool enable)
@@ -2083,7 +2292,7 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
*/
err = mlx5e_safe_switch_params(priv, &new_params,
- mlx5e_num_channels_changed_ctx, NULL, true);
+ mlx5e_update_tc_and_tx_queues_ctx, NULL, true);
if (!err)
priv->tx_ptp_opened = true;
@@ -2381,6 +2590,14 @@ static void mlx5e_get_rmon_stats(struct net_device *netdev,
mlx5e_stats_rmon_get(priv, rmon_stats, ranges);
}
+static void mlx5e_get_ts_stats(struct net_device *netdev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5e_stats_ts_get(priv, ts_stats);
+}
+
const struct ethtool_ops mlx5e_ethtool_ops = {
.cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
@@ -2399,6 +2616,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.set_channels = mlx5e_set_channels,
.get_coalesce = mlx5e_get_coalesce,
.set_coalesce = mlx5e_set_coalesce,
+ .get_per_queue_coalesce = mlx5e_get_per_queue_coalesce,
+ .set_per_queue_coalesce = mlx5e_set_per_queue_coalesce,
.get_link_ksettings = mlx5e_get_link_ksettings,
.set_link_ksettings = mlx5e_set_link_ksettings,
.get_rxfh_key_size = mlx5e_get_rxfh_key_size,
@@ -2430,5 +2649,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_eth_mac_stats = mlx5e_get_eth_mac_stats,
.get_eth_ctrl_stats = mlx5e_get_eth_ctrl_stats,
.get_rmon_stats = mlx5e_get_rmon_stats,
+ .get_ts_stats = mlx5e_get_ts_stats,
.get_link_ext_stats = mlx5e_get_link_ext_stats
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 777d311d44ef..8c5b291a171f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -896,8 +896,7 @@ static void mlx5e_set_inner_ttc_params(struct mlx5e_flow_steering *fs,
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
- ttc_params->ns = mlx5_get_flow_namespace(fs->mdev,
- MLX5_FLOW_NAMESPACE_KERNEL);
+ ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
@@ -920,8 +919,7 @@ void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
- ttc_params->ns = mlx5_get_flow_namespace(fs->mdev,
- MLX5_FLOW_NAMESPACE_KERNEL);
+ ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
ft_attr->level = MLX5E_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 91848eae4565..b758bc72ac36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
+#include <linux/dim.h>
#include <net/tc_act/tc_gact.h>
#include <linux/mlx5/fs.h>
#include <net/vxlan.h>
@@ -43,6 +44,7 @@
#include <net/xdp_sock_drv.h>
#include "eswitch.h"
#include "en.h"
+#include "en/dim.h"
#include "en/txrx.h"
#include "en_tc.h"
#include "en_rep.h"
@@ -209,8 +211,8 @@ static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data)
*data,
mlx5e_devcom_event_mpv,
priv);
- if (IS_ERR_OR_NULL(priv->devcom))
- return -EOPNOTSUPP;
+ if (IS_ERR(priv->devcom))
+ return PTR_ERR(priv->devcom);
if (mlx5_core_is_mp_master(priv->mdev)) {
mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_UP,
@@ -960,17 +962,6 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
}
}
- INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
-
- switch (params->rx_cq_moderation.cq_period_mode) {
- case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
- rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
- break;
- case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
- default:
- rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
- }
-
return 0;
err_destroy_page_pool:
@@ -1020,6 +1011,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
mlx5e_free_wqe_alloc_info(rq);
}
+ kvfree(rq->dim);
xdp_rxq_info_unreg(&rq->xdp_rxq);
page_pool_destroy(rq->page_pool);
mlx5_wq_destroy(&rq->wq_ctrl);
@@ -1300,8 +1292,21 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
if (MLX5_CAP_ETH(mdev, cqe_checksum_full))
__set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state);
- if (params->rx_dim_enabled)
- __set_bit(MLX5E_RQ_STATE_DIM, &rq->state);
+ if (rq->channel && !params->rx_dim_enabled) {
+ rq->channel->rx_cq_moder = params->rx_cq_moderation;
+ } else if (rq->channel) {
+ u8 cq_period_mode;
+
+ cq_period_mode = params->rx_moder_use_cqe_mode ?
+ DIM_CQ_PERIOD_MODE_START_FROM_CQE :
+ DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ mlx5e_reset_rx_moderation(&rq->channel->rx_cq_moder, cq_period_mode,
+ params->rx_dim_enabled);
+
+ err = mlx5e_dim_rx_change(rq, params->rx_dim_enabled);
+ if (err)
+ goto err_destroy_rq;
+ }
/* We disable csum_complete when XDP is enabled since
* XDP programs might manipulate packets which will render
@@ -1347,7 +1352,8 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
void mlx5e_close_rq(struct mlx5e_rq *rq)
{
- cancel_work_sync(&rq->dim.work);
+ if (rq->dim)
+ cancel_work_sync(&rq->dim->work);
cancel_work_sync(&rq->recover_work);
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
@@ -1623,9 +1629,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
if (err)
goto err_sq_wq_destroy;
- INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
- sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
-
return 0;
err_sq_wq_destroy:
@@ -1636,6 +1639,7 @@ err_sq_wq_destroy:
void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
{
+ kvfree(sq->dim);
mlx5e_free_txqsq_db(sq);
mlx5_wq_destroy(&sq->wq_ctrl);
}
@@ -1791,11 +1795,27 @@ int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
if (tx_rate)
mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
- if (params->tx_dim_enabled)
- sq->state |= BIT(MLX5E_SQ_STATE_DIM);
+ if (sq->channel && !params->tx_dim_enabled) {
+ sq->channel->tx_cq_moder = params->tx_cq_moderation;
+ } else if (sq->channel) {
+ u8 cq_period_mode;
+
+ cq_period_mode = params->tx_moder_use_cqe_mode ?
+ DIM_CQ_PERIOD_MODE_START_FROM_CQE :
+ DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ mlx5e_reset_tx_moderation(&sq->channel->tx_cq_moder,
+ cq_period_mode,
+ params->tx_dim_enabled);
+
+ err = mlx5e_dim_tx_change(sq, params->tx_dim_enabled);
+ if (err)
+ goto err_destroy_sq;
+ }
return 0;
+err_destroy_sq:
+ mlx5e_destroy_sq(c->mdev, sq->sqn);
err_free_txqsq:
mlx5e_free_txqsq(sq);
@@ -1847,7 +1867,8 @@ void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
struct mlx5_core_dev *mdev = sq->mdev;
struct mlx5_rate_limit rl = {0};
- cancel_work_sync(&sq->dim.work);
+ if (sq->dim)
+ cancel_work_sync(&sq->dim->work);
cancel_work_sync(&sq->recover_work);
mlx5e_destroy_sq(mdev, sq->sqn);
if (sq->rate_limit) {
@@ -1866,6 +1887,49 @@ void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
mlx5e_reporter_tx_err_cqe(sq);
}
+static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
+{
+ return (struct dim_cq_moder) {
+ .cq_period_mode = cq_period_mode,
+ .pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS,
+ .usec = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE :
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC,
+ };
+}
+
+bool mlx5e_reset_tx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode,
+ bool dim_enabled)
+{
+ bool reset_needed = cq_moder->cq_period_mode != cq_period_mode;
+
+ if (dim_enabled)
+ *cq_moder = net_dim_get_def_tx_moderation(cq_period_mode);
+ else
+ *cq_moder = mlx5e_get_def_tx_moderation(cq_period_mode);
+
+ return reset_needed;
+}
+
+bool mlx5e_reset_tx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode,
+ bool dim_enabled, bool keep_dim_state)
+{
+ bool reset = false;
+ int i, tc;
+
+ for (i = 0; i < chs->num; i++) {
+ for (tc = 0; tc < mlx5e_get_dcb_num_tc(&chs->params); tc++) {
+ if (keep_dim_state)
+ dim_enabled = !!chs->c[i]->sq[tc].dim;
+
+ reset |= mlx5e_reset_tx_moderation(&chs->c[i]->tx_cq_moder,
+ cq_period_mode, dim_enabled);
+ }
+ }
+
+ return reset;
+}
+
static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_sq_param *param, struct mlx5e_icosq *sq,
work_func_t recover_work_func)
@@ -2089,7 +2153,8 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
- MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
+ MLX5_SET(cqc, cqc, cq_period_mode, mlx5e_cq_period_mode(param->cq_period_mode));
+
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
@@ -2127,8 +2192,10 @@ int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder,
if (err)
goto err_free_cq;
- if (MLX5_CAP_GEN(mdev, cq_moderation))
- mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
+ if (MLX5_CAP_GEN(mdev, cq_moderation) &&
+ MLX5_CAP_GEN(mdev, cq_period_mode_modify))
+ mlx5e_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts,
+ mlx5e_cq_period_mode(moder.cq_period_mode));
return 0;
err_free_cq:
@@ -2143,6 +2210,40 @@ void mlx5e_close_cq(struct mlx5e_cq *cq)
mlx5e_free_cq(cq);
}
+int mlx5e_modify_cq_period_mode(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ u8 cq_period_mode)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {};
+ void *cqc;
+
+ MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
+ cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, cq_period_mode, mlx5e_cq_period_mode(cq_period_mode));
+ MLX5_SET(modify_cq_in, in,
+ modify_field_select_resize_field_select.modify_field_select.modify_field_select,
+ MLX5_CQ_MODIFY_PERIOD_MODE);
+
+ return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
+}
+
+int mlx5e_modify_cq_moderation(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ u16 cq_period, u16 cq_max_count, u8 cq_period_mode)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {};
+ void *cqc;
+
+ MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
+ cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, cq_period, cq_period);
+ MLX5_SET(cqc, cqc, cq_max_count, cq_max_count);
+ MLX5_SET(cqc, cqc, cq_period_mode, cq_period_mode);
+ MLX5_SET(modify_cq_in, in,
+ modify_field_select_resize_field_select.modify_field_select.modify_field_select,
+ MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT | MLX5_CQ_MODIFY_PERIOD_MODE);
+
+ return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
+}
+
static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_create_cq_param *ccp,
@@ -2901,7 +3002,28 @@ int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
return err;
}
-static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
+static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
+ struct mlx5e_params *params)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int num_comp_vectors, ix, irq;
+
+ num_comp_vectors = mlx5_comp_vectors_max(mdev);
+
+ for (ix = 0; ix < params->num_channels; ix++) {
+ cpumask_clear(priv->scratchpad.cpumask);
+
+ for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) {
+ int cpu = mlx5_comp_vector_get_cpu(mdev, irq);
+
+ cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
+ }
+
+ netif_set_xps_queue(priv->netdev, priv->scratchpad.cpumask, ix);
+ }
+}
+
+static int mlx5e_update_tc_and_tx_queues(struct mlx5e_priv *priv)
{
struct netdev_tc_txq old_tc_to_txq[TC_MAX_QUEUE], *tc_to_txq;
struct net_device *netdev = priv->netdev;
@@ -2925,22 +3047,10 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
err = mlx5e_update_tx_netdev_queues(priv);
if (err)
goto err_tcs;
- err = netif_set_real_num_rx_queues(netdev, nch);
- if (err) {
- netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
- goto err_txqs;
- }
+ mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
return 0;
-err_txqs:
- /* netif_set_real_num_rx_queues could fail only when nch increased. Only
- * one of nch and ntc is changed in this function. That means, the call
- * to netif_set_real_num_tx_queues below should not fail, because it
- * decreases the number of TX queues.
- */
- WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs));
-
err_tcs:
WARN_ON_ONCE(mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc,
old_tc_to_txq));
@@ -2948,42 +3058,32 @@ err_out:
return err;
}
-static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_netdev_queues);
-
-static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
- struct mlx5e_params *params)
-{
- int ix;
-
- for (ix = 0; ix < params->num_channels; ix++) {
- int num_comp_vectors, irq, vec_ix;
- struct mlx5_core_dev *mdev;
-
- mdev = mlx5_sd_ch_ix_get_dev(priv->mdev, ix);
- num_comp_vectors = mlx5_comp_vectors_max(mdev);
- cpumask_clear(priv->scratchpad.cpumask);
- vec_ix = mlx5_sd_ch_ix_get_vec_ix(mdev, ix);
-
- for (irq = vec_ix; irq < num_comp_vectors; irq += params->num_channels) {
- int cpu = mlx5_comp_vector_get_cpu(mdev, irq);
-
- cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
- }
-
- netif_set_xps_queue(priv->netdev, priv->scratchpad.cpumask, ix);
- }
-}
+MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_tc_and_tx_queues);
static int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
{
u16 count = priv->channels.params.num_channels;
+ struct net_device *netdev = priv->netdev;
+ int old_num_rxqs;
int err;
- err = mlx5e_update_netdev_queues(priv);
- if (err)
+ old_num_rxqs = netdev->real_num_rx_queues;
+ err = netif_set_real_num_rx_queues(netdev, count);
+ if (err) {
+ netdev_warn(netdev, "%s: netif_set_real_num_rx_queues failed, %d\n",
+ __func__, err);
return err;
-
- mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
+ }
+ err = mlx5e_update_tc_and_tx_queues(priv);
+ if (err) {
+ /* mlx5e_update_tc_and_tx_queues can fail if channels or TCs number increases.
+ * Since channel number changed, it increased. That means, the call to
+ * netif_set_real_num_rx_queues below should not fail, because it
+ * decreases the number of RX queues.
+ */
+ WARN_ON_ONCE(netif_set_real_num_rx_queues(netdev, old_num_rxqs));
+ return err;
+ }
/* This function may be called on attach, before priv->rx_res is created. */
if (priv->rx_res) {
@@ -3516,7 +3616,7 @@ static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
mlx5e_params_mqprio_dcb_set(&new_params, tc ? tc : 1);
err = mlx5e_safe_switch_params(priv, &new_params,
- mlx5e_num_channels_changed_ctx, NULL, true);
+ mlx5e_update_tc_and_tx_queues_ctx, NULL, true);
if (!err && priv->mqprio_rl) {
mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
@@ -3617,10 +3717,8 @@ static struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_create(struct mlx5_core_dev *mdev
static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
struct tc_mqprio_qopt_offload *mqprio)
{
- mlx5e_fp_preactivate preactivate;
struct mlx5e_params new_params;
struct mlx5e_mqprio_rl *rl;
- bool nch_changed;
int err;
err = mlx5e_mqprio_channel_validate(priv, mqprio);
@@ -3634,10 +3732,8 @@ static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
new_params = priv->channels.params;
mlx5e_params_mqprio_channel_set(&new_params, mqprio, rl);
- nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
- preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
- mlx5e_update_netdev_queues_ctx;
- err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
+ err = mlx5e_safe_switch_params(priv, &new_params,
+ mlx5e_update_tc_and_tx_queues_ctx, NULL, true);
if (err) {
if (rl) {
mlx5e_mqprio_rl_cleanup(rl);
@@ -3960,6 +4056,47 @@ static int set_feature_rx_all(struct net_device *netdev, bool enable)
return mlx5_set_port_fcs(mdev, !enable);
}
+static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
+{
+ return (struct dim_cq_moder) {
+ .cq_period_mode = cq_period_mode,
+ .pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS,
+ .usec = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE :
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC,
+ };
+}
+
+bool mlx5e_reset_rx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode,
+ bool dim_enabled)
+{
+ bool reset_needed = cq_moder->cq_period_mode != cq_period_mode;
+
+ if (dim_enabled)
+ *cq_moder = net_dim_get_def_rx_moderation(cq_period_mode);
+ else
+ *cq_moder = mlx5e_get_def_rx_moderation(cq_period_mode);
+
+ return reset_needed;
+}
+
+bool mlx5e_reset_rx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode,
+ bool dim_enabled, bool keep_dim_state)
+{
+ bool reset = false;
+ int i;
+
+ for (i = 0; i < chs->num; i++) {
+ if (keep_dim_state)
+ dim_enabled = !!chs->c[i]->rq.dim;
+
+ reset |= mlx5e_reset_rx_moderation(&chs->c[i]->rx_cq_moder,
+ cq_period_mode, dim_enabled);
+ }
+
+ return reset;
+}
+
static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable)
{
u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {};
@@ -4383,7 +4520,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, reset);
out:
- netdev->mtu = params->sw_mtu;
+ WRITE_ONCE(netdev->mtu, params->sw_mtu);
mutex_unlock(&priv->state_lock);
return err;
}
@@ -4950,10 +5087,7 @@ static int mlx5e_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
-
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
mode = nla_get_u16(attr);
if (mode > BRIDGE_MODE_VEPA)
return -EINVAL;
@@ -5027,7 +5161,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
{
struct mlx5e_params *params = &priv->channels.params;
struct mlx5_core_dev *mdev = priv->mdev;
- u8 rx_cq_period_mode;
params->sw_mtu = mtu;
params->hard_mtu = MLX5E_ETH_HARD_MTU;
@@ -5061,13 +5194,16 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
/* CQ moderation params */
- rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
- MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
- MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
- params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
- mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode);
- mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
+ params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation) &&
+ MLX5_CAP_GEN(mdev, cq_period_mode_modify);
+ params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation) &&
+ MLX5_CAP_GEN(mdev, cq_period_mode_modify);
+ params->rx_moder_use_cqe_mode = !!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe);
+ params->tx_moder_use_cqe_mode = false;
+ mlx5e_reset_rx_moderation(&params->rx_cq_moderation, params->rx_moder_use_cqe_mode,
+ params->rx_dim_enabled);
+ mlx5e_reset_tx_moderation(&params->tx_cq_moderation, params->tx_moder_use_cqe_mode,
+ params->tx_dim_enabled);
/* TX inline */
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
@@ -5565,7 +5701,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_ipsec_cleanup(priv);
}
-int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
+static int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
{
return mlx5e_refresh_tirs(priv, false, false);
}
@@ -5726,9 +5862,7 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
kfree(priv->tx_rates);
kfree(priv->txq2sq);
destroy_workqueue(priv->wq);
- mutex_lock(&priv->state_lock);
mlx5e_selq_cleanup(&priv->selq);
- mutex_unlock(&priv->state_lock);
free_cpumask_var(priv->scratchpad.cpumask);
for (i = 0; i < priv->htb_max_qos_sqs; i++)
@@ -6060,7 +6194,7 @@ static int mlx5e_resume(struct auxiliary_device *adev)
return 0;
}
-static int _mlx5e_suspend(struct auxiliary_device *adev)
+static int _mlx5e_suspend(struct auxiliary_device *adev, bool pre_netdev_reg)
{
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
struct mlx5e_priv *priv = mlx5e_dev->priv;
@@ -6069,7 +6203,7 @@ static int _mlx5e_suspend(struct auxiliary_device *adev)
struct mlx5_core_dev *pos;
int i;
- if (!netif_device_present(netdev)) {
+ if (!pre_netdev_reg && !netif_device_present(netdev)) {
if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
mlx5_sd_for_each_dev(i, mdev, pos)
mlx5e_destroy_mdev_resources(pos);
@@ -6092,7 +6226,7 @@ static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
if (actual_adev)
- err = _mlx5e_suspend(actual_adev);
+ err = _mlx5e_suspend(actual_adev, false);
mlx5_sd_cleanup(mdev);
return err;
@@ -6159,7 +6293,7 @@ static int _mlx5e_probe(struct auxiliary_device *adev)
return 0;
err_resume:
- _mlx5e_suspend(adev);
+ _mlx5e_suspend(adev, true);
err_profile_cleanup:
profile->cleanup(priv);
err_destroy_netdev:
@@ -6199,7 +6333,7 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
mlx5_core_uplink_netdev_set(mdev, NULL);
mlx5e_dcbnl_delete_app(priv);
unregister_netdev(priv->netdev);
- _mlx5e_suspend(adev);
+ _mlx5e_suspend(adev, false);
priv->profile->cleanup(priv);
mlx5e_destroy_netdev(priv);
mlx5e_devlink_port_unregister(mlx5e_dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 05527418fa64..8790d57dc6db 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
+#include <linux/dim.h>
#include <linux/debugfs.h>
#include <linux/mlx5/fs.h>
#include <net/switchdev.h>
@@ -40,6 +41,7 @@
#include "eswitch.h"
#include "en.h"
+#include "en/dim.h"
#include "en_rep.h"
#include "en/params.h"
#include "en/txrx.h"
@@ -135,9 +137,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep)
int i;
for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- sw_rep_stats_desc[i].format);
- return idx;
+ ethtool_puts(data, sw_rep_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep)
@@ -145,9 +145,9 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep)
int i;
for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
- sw_rep_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(&priv->stats.sw,
+ sw_rep_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep)
@@ -176,11 +176,9 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
int i;
for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format);
+ ethtool_puts(data, vport_rep_stats_desc[i].format);
for (i = 0; i < NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vport_rep_loopback_stats_desc[i].format);
- return idx;
+ ethtool_puts(data, vport_rep_loopback_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
@@ -188,12 +186,14 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
int i;
for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
- vport_rep_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
+ vport_rep_stats_desc, i));
for (i = 0; i < NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev); i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
- vport_rep_loopback_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
+ vport_rep_loopback_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
@@ -275,8 +275,42 @@ out:
kvfree(out);
}
+static int mlx5e_rep_query_aggr_q_counter(struct mlx5_core_dev *dev, int vport, void *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
+
+ MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
+ MLX5_SET(query_q_counter_in, in, other_vport, 1);
+ MLX5_SET(query_q_counter_in, in, vport_number, vport);
+ MLX5_SET(query_q_counter_in, in, aggregate, 1);
+
+ return mlx5_cmd_exec_inout(dev, query_q_counter, in, out);
+}
+
+static void mlx5e_rep_update_vport_q_counter(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_stats *rep_stats = &priv->stats.rep_stats;
+ u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+ int err;
+
+ if (!MLX5_CAP_GEN(priv->mdev, q_counter_other_vport) ||
+ !MLX5_CAP_GEN(priv->mdev, q_counter_aggregation))
+ return;
+
+ err = mlx5e_rep_query_aggr_q_counter(priv->mdev, rep->vport, out);
+ if (err) {
+ netdev_warn(priv->netdev, "failed reading stats on vport %d, error %d\n",
+ rep->vport, err);
+ return;
+ }
+
+ rep_stats->rx_vport_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
+}
+
static void mlx5e_rep_get_strings(struct net_device *dev,
- u32 stringset, uint8_t *data)
+ u32 stringset, u8 *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -394,6 +428,8 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
.set_channels = mlx5e_rep_set_channels,
.get_coalesce = mlx5e_rep_get_coalesce,
.set_coalesce = mlx5e_rep_set_coalesce,
+ .get_per_queue_coalesce = mlx5e_get_per_queue_coalesce,
+ .set_per_queue_coalesce = mlx5e_set_per_queue_coalesce,
.get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
.get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
};
@@ -804,10 +840,6 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_params *params;
- u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
- MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
- MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
-
params = &priv->channels.params;
params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
@@ -835,7 +867,7 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
/* CQ moderation params */
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
- mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
+ params->rx_moder_use_cqe_mode = !!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe);
params->mqprio.num_tc = 1;
if (rep->vport != MLX5_VPORT_UPLINK)
@@ -1231,6 +1263,12 @@ static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
return 0;
}
+static void mlx5e_rep_stats_update_ndo_stats(struct mlx5e_priv *priv)
+{
+ mlx5e_stats_update_ndo_stats(priv);
+ mlx5e_rep_update_vport_q_counter(priv);
+}
+
static int mlx5e_rep_event_mpesw(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -1423,7 +1461,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.enable = mlx5e_rep_enable,
.disable = mlx5e_rep_disable,
.update_rx = mlx5e_update_rep_rx,
- .update_stats = mlx5e_stats_update_ndo_stats,
+ .update_stats = mlx5e_rep_stats_update_ndo_stats,
.rx_handlers = &mlx5e_rx_handlers_rep,
.max_tc = 1,
.stats_grps = mlx5e_rep_stats_grps,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index d601b5faaed5..b5333da20e8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -917,7 +917,7 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
if (!rq->xsk_pool) {
count = mlx5e_refill_rx_wqes(rq, head, wqe_bulk);
- } else if (likely(!rq->xsk_pool->dma_need_sync)) {
+ } else if (likely(!dma_dev_need_sync(rq->pdev))) {
mlx5e_xsk_free_rx_wqes(rq, head, wqe_bulk);
count = mlx5e_xsk_alloc_rx_wqes_batched(rq, head, wqe_bulk);
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 08a75654f5f1..5bf8318cc48b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -359,7 +359,7 @@ int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data)
if (st.cond_func && st.cond_func(priv))
continue;
if (data)
- strcpy(data + count * ETH_GSTRING_LEN, st.name);
+ ethtool_puts(&data, st.name);
count++;
}
return count;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index f3d0898bdbc6..e211c41cec06 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -41,6 +41,11 @@
#include <net/page_pool/helpers.h>
#endif
+void mlx5e_ethtool_put_stat(u64 **data, u64 val)
+{
+ *(*data)++ = val;
+}
+
static unsigned int stats_grps_num(struct mlx5e_priv *priv)
{
return !priv->profile->stats_grps_num ? 0 :
@@ -90,17 +95,17 @@ void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
int i;
for (i = 0; i < num_stats_grps; i++)
- idx = stats_grps[i]->fill_stats(priv, data, idx);
+ stats_grps[i]->fill_stats(priv, &data);
}
void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
{
mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
const unsigned int num_stats_grps = stats_grps_num(priv);
- int i, idx = 0;
+ int i;
for (i = 0; i < num_stats_grps; i++)
- idx = stats_grps[i]->fill_strings(priv, data, idx);
+ stats_grps[i]->fill_strings(priv, &data);
}
/* Concrete NIC Stats */
@@ -257,8 +262,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
int i;
for (i = 0; i < NUM_SW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
- return idx;
+ ethtool_puts(data, sw_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
@@ -266,8 +270,9 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
int i;
for (i = 0; i < NUM_SW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(data,
+ MLX5E_READ_CTR64_CPU(&priv->stats.sw,
+ sw_stats_desc, i));
}
static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s,
@@ -591,14 +596,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
int i;
for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- q_stats_desc[i].format);
+ ethtool_puts(data, q_stats_desc[i].format);
for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- drop_rq_stats_desc[i].format);
-
- return idx;
+ ethtool_puts(data, drop_rq_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
@@ -606,12 +607,13 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
int i;
for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++)
- data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
- q_stats_desc, i);
+ mlx5e_ethtool_put_stat(data,
+ MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
+ q_stats_desc, i));
for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
- data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
- drop_rq_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
+ drop_rq_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
@@ -685,18 +687,13 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
int i;
for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vnic_env_stats_steer_desc[i].format);
+ ethtool_puts(data, vnic_env_stats_steer_desc[i].format);
for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vnic_env_stats_dev_oob_desc[i].format);
+ ethtool_puts(data, vnic_env_stats_dev_oob_desc[i].format);
for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vnic_env_stats_drop_desc[i].format);
-
- return idx;
+ ethtool_puts(data, vnic_env_stats_drop_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
@@ -704,18 +701,22 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
int i;
for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
- data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
- vnic_env_stats_steer_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
+ vnic_env_stats_steer_desc, i));
for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
- data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
- vnic_env_stats_dev_oob_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
+ vnic_env_stats_dev_oob_desc, i));
for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
- data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
- vnic_env_stats_drop_desc, i);
-
- return idx;
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
+ vnic_env_stats_drop_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
@@ -798,13 +799,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
int i;
for (i = 0; i < NUM_VPORT_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
+ ethtool_puts(data, vport_stats_desc[i].format);
for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vport_loopback_stats_desc[i].format);
-
- return idx;
+ ethtool_puts(data, vport_loopback_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
@@ -812,14 +810,16 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
int i;
for (i = 0; i < NUM_VPORT_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
- vport_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
+ vport_stats_desc, i));
for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++)
- data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
- vport_loopback_stats_desc, i);
-
- return idx;
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
+ vport_loopback_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
@@ -868,8 +868,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
int i;
for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
- return idx;
+ ethtool_puts(data, pport_802_3_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
@@ -877,9 +876,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
int i;
for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
- pport_802_3_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.IEEE_802_3_counters,
+ pport_802_3_stats_desc, i));
}
#define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
@@ -1029,8 +1029,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
int i;
for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
- return idx;
+ ethtool_puts(data, pport_2863_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
@@ -1038,9 +1037,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
int i;
for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
- pport_2863_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.RFC_2863_counters,
+ pport_2863_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
@@ -1088,8 +1088,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
int i;
for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
- return idx;
+ ethtool_puts(data, pport_2819_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
@@ -1097,9 +1096,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
int i;
for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
- pport_2819_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.RFC_2819_counters,
+ pport_2819_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
@@ -1172,6 +1172,51 @@ void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
*ranges = mlx5e_rmon_ranges;
}
+void mlx5e_stats_ts_get(struct mlx5e_priv *priv,
+ struct ethtool_ts_stats *ts_stats)
+{
+ int i, j;
+
+ mutex_lock(&priv->state_lock);
+
+ if (priv->tx_ptp_opened) {
+ struct mlx5e_ptp *ptp = priv->channels.ptp;
+
+ ts_stats->pkts = 0;
+ ts_stats->err = 0;
+ ts_stats->lost = 0;
+
+ /* Aggregate stats across all TCs */
+ for (i = 0; i < ptp->num_tc; i++) {
+ struct mlx5e_ptp_cq_stats *stats =
+ ptp->ptpsq[i].cq_stats;
+
+ ts_stats->pkts += stats->cqe;
+ ts_stats->err += stats->abort + stats->err_cqe +
+ stats->late_cqe;
+ ts_stats->lost += stats->lost_cqe;
+ }
+ } else {
+ /* DMA layer will always successfully timestamp packets. Other
+ * counters do not make sense for this layer.
+ */
+ ts_stats->pkts = 0;
+
+ /* Aggregate stats across all SQs */
+ for (j = 0; j < priv->channels.num; j++) {
+ struct mlx5e_channel *c = priv->channels.c[j];
+
+ for (i = 0; i < c->num_tc; i++) {
+ struct mlx5e_sq_stats *stats = c->sq[i].stats;
+
+ ts_stats->pkts += stats->timestamps;
+ }
+ }
+ }
+
+ mutex_unlock(&priv->state_lock);
+}
+
#define PPORT_PHY_STATISTICAL_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.phys_layer_statistical_cntrs.c##_high)
@@ -1215,21 +1260,18 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
struct mlx5_core_dev *mdev = priv->mdev;
int i;
- strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
+ ethtool_puts(data, "link_down_events_phy");
if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return idx;
+ return;
for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_phy_statistical_stats_desc[i].format);
+ ethtool_puts(data, pport_phy_statistical_stats_desc[i].format);
if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_phy_statistical_err_lanes_stats_desc[i].format);
-
- return idx;
+ ethtool_puts(data,
+ pport_phy_statistical_err_lanes_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
@@ -1238,24 +1280,29 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
int i;
/* link_down_events_phy has special handling since it is not stored in __be64 format */
- data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
- counter_set.phys_layer_cntrs.link_down_events);
+ mlx5e_ethtool_put_stat(
+ data, MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
+ counter_set.phys_layer_cntrs.link_down_events));
if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return idx;
+ return;
for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
- pport_phy_statistical_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.phy_statistical_counters,
+ pport_phy_statistical_stats_desc, i));
if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
- pport_phy_statistical_err_lanes_stats_desc,
- i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pport
+ .phy_statistical_counters,
+ pport_phy_statistical_err_lanes_stats_desc,
+ i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
@@ -1436,9 +1483,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_eth_ext_stats_desc[i].format);
- return idx;
+ ethtool_puts(data, pport_eth_ext_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
@@ -1447,10 +1492,11 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
- pport_eth_ext_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.eth_ext_counters,
+ pport_eth_ext_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
@@ -1516,19 +1562,16 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pcie_perf_stats_desc[i].format);
+ ethtool_puts(data, pcie_perf_stats_desc[i].format);
if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pcie_perf_stats_desc64[i].format);
+ ethtool_puts(data, pcie_perf_stats_desc64[i].format);
if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pcie_perf_stall_stats_desc[i].format);
- return idx;
+ ethtool_puts(data,
+ pcie_perf_stall_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
@@ -1537,22 +1580,27 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
- data[idx++] =
- MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
- pcie_perf_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR32_BE(
+ &priv->stats.pcie.pcie_perf_counters,
+ pcie_perf_stats_desc, i));
if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
- data[idx++] =
- MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
- pcie_perf_stats_desc64, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pcie.pcie_perf_counters,
+ pcie_perf_stats_desc64, i));
if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
- data[idx++] =
- MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
- pcie_perf_stall_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR32_BE(
+ &priv->stats.pcie.pcie_perf_counters,
+ pcie_perf_stall_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
@@ -1609,18 +1657,18 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
int i, prio;
if (!MLX5_CAP_GEN(mdev, sbcam_reg))
- return idx;
+ return;
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_tc_prio_stats_desc[i].format, prio);
+ ethtool_sprintf(data,
+ pport_per_tc_prio_stats_desc[i].format,
+ prio);
for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_tc_congest_prio_stats_desc[i].format, prio);
+ ethtool_sprintf(data,
+ pport_per_tc_congest_prio_stats_desc[i].format,
+ prio);
}
-
- return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
@@ -1630,20 +1678,24 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
int i, prio;
if (!MLX5_CAP_GEN(mdev, sbcam_reg))
- return idx;
+ return;
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio],
- pport_per_tc_prio_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &pport->per_tc_prio_counters[prio],
+ pport_per_tc_prio_stats_desc, i));
for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
- data[idx++] =
- MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio],
- pport_per_tc_congest_prio_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &pport->per_tc_congest_prio_counters
+ [prio],
+ pport_per_tc_congest_prio_stats_desc,
+ i));
}
-
- return idx;
}
static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
@@ -1728,35 +1780,33 @@ static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
}
-static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
- u8 *data,
- int idx)
+static void mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
+ u8 **data)
{
int i, prio;
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_prio_traffic_stats_desc[i].format, prio);
+ ethtool_sprintf(data,
+ pport_per_prio_traffic_stats_desc[i].format,
+ prio);
}
-
- return idx;
}
-static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
- u64 *data,
- int idx)
+static void mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
+ u64 **data)
{
int i, prio;
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
- pport_per_prio_traffic_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pport
+ .per_prio_counters[prio],
+ pport_per_prio_traffic_stats_desc, i));
}
-
- return idx;
}
static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
@@ -1816,9 +1866,8 @@ static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
NUM_PPORT_PFC_STALL_COUNTERS(priv);
}
-static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
- u8 *data,
- int idx)
+static void mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
+ u8 **data)
{
unsigned long pfc_combined;
int i, prio;
@@ -1829,28 +1878,26 @@ static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
char pfc_string[ETH_GSTRING_LEN];
snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_prio_pfc_stats_desc[i].format, pfc_string);
+ ethtool_sprintf(data,
+ pport_per_prio_pfc_stats_desc[i].format,
+ pfc_string);
}
}
if (mlx5e_query_global_pause_combined(priv)) {
for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_prio_pfc_stats_desc[i].format, "global");
+ ethtool_sprintf(data,
+ pport_per_prio_pfc_stats_desc[i].format,
+ "global");
}
}
for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_pfc_stall_stats_desc[i].format);
-
- return idx;
+ ethtool_puts(data, pport_pfc_stall_stats_desc[i].format);
}
-static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
- u64 *data,
- int idx)
+static void mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
+ u64 **data)
{
unsigned long pfc_combined;
int i, prio;
@@ -1858,25 +1905,30 @@ static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
pfc_combined = mlx5e_query_pfc_combined(priv);
for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- data[idx++] =
- MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
- pport_per_prio_pfc_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pport
+ .per_prio_counters[prio],
+ pport_per_prio_pfc_stats_desc, i));
}
}
if (mlx5e_query_global_pause_combined(priv)) {
for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- data[idx++] =
- MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
- pport_per_prio_pfc_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.per_prio_counters[0],
+ pport_per_prio_pfc_stats_desc, i));
}
}
for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
- pport_pfc_stall_stats_desc, i);
-
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.per_prio_counters[0],
+ pport_pfc_stall_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
@@ -1887,16 +1939,14 @@ static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
{
- idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
- idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
- return idx;
+ mlx5e_grp_per_prio_traffic_fill_strings(priv, data);
+ mlx5e_grp_per_prio_pfc_fill_strings(priv, data);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
{
- idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
- idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
- return idx;
+ mlx5e_grp_per_prio_traffic_fill_stats(priv, data);
+ mlx5e_grp_per_prio_pfc_fill_stats(priv, data);
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
@@ -1944,12 +1994,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
int i;
for (i = 0; i < NUM_PME_STATUS_STATS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
+ ethtool_puts(data, mlx5e_pme_status_desc[i].format);
for (i = 0; i < NUM_PME_ERR_STATS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
-
- return idx;
+ ethtool_puts(data, mlx5e_pme_error_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
@@ -1960,14 +2008,14 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
mlx5_get_pme_stats(priv->mdev, &pme_stats);
for (i = 0; i < NUM_PME_STATUS_STATS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
- mlx5e_pme_status_desc, i);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
+ mlx5e_pme_status_desc, i));
for (i = 0; i < NUM_PME_ERR_STATS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
- mlx5e_pme_error_desc, i);
-
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
+ mlx5e_pme_error_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
@@ -1979,12 +2027,12 @@ static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
{
- return idx + mlx5e_ktls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
+ mlx5e_ktls_get_strings(priv, data);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
{
- return idx + mlx5e_ktls_get_stats(priv, data + idx);
+ mlx5e_ktls_get_stats(priv, data);
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
@@ -2063,6 +2111,7 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, timestamps) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
#ifdef CONFIG_MLX5_EN_TLS
@@ -2175,6 +2224,7 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, late_cqe) },
+ { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, lost_cqe) },
};
static const struct counter_desc ptp_rq_stats_desc[] = {
@@ -2214,6 +2264,7 @@ static const struct counter_desc qos_sq_stats_desc[] = {
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, timestamps) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
#ifdef CONFIG_MLX5_EN_TLS
@@ -2264,10 +2315,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
for (qid = 0; qid < max_qos_sqs; qid++)
for (i = 0; i < NUM_QOS_SQ_STATS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- qos_sq_stats_desc[i].format, qid);
-
- return idx;
+ ethtool_sprintf(data, qos_sq_stats_desc[i].format, qid);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
@@ -2284,10 +2332,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
for (i = 0; i < NUM_QOS_SQ_STATS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i));
}
-
- return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
@@ -2312,29 +2360,29 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
int i, tc;
if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
- return idx;
+ return;
for (i = 0; i < NUM_PTP_CH_STATS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- "%s", ptp_ch_stats_desc[i].format);
+ ethtool_puts(data, ptp_ch_stats_desc[i].format);
if (priv->tx_ptp_opened) {
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < NUM_PTP_SQ_STATS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- ptp_sq_stats_desc[i].format, tc);
+ ethtool_sprintf(data,
+ ptp_sq_stats_desc[i].format,
+ tc);
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < NUM_PTP_CQ_STATS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- ptp_cq_stats_desc[i].format, tc);
+ ethtool_sprintf(data,
+ ptp_cq_stats_desc[i].format,
+ tc);
}
if (priv->rx_ptp_opened) {
for (i = 0; i < NUM_PTP_RQ_STATS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- ptp_rq_stats_desc[i].format, MLX5E_PTP_CHANNEL_IX);
+ ethtool_sprintf(data, ptp_rq_stats_desc[i].format,
+ MLX5E_PTP_CHANNEL_IX);
}
- return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
@@ -2342,33 +2390,35 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
int i, tc;
if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
- return idx;
+ return;
for (i = 0; i < NUM_PTP_CH_STATS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch,
- ptp_ch_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch,
+ ptp_ch_stats_desc, i));
if (priv->tx_ptp_opened) {
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < NUM_PTP_SQ_STATS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->ptp_stats.sq[tc],
- ptp_sq_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ &priv->ptp_stats.sq[tc],
+ ptp_sq_stats_desc, i));
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < NUM_PTP_CQ_STATS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->ptp_stats.cq[tc],
- ptp_cq_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ &priv->ptp_stats.cq[tc],
+ ptp_cq_stats_desc, i));
}
if (priv->rx_ptp_opened) {
for (i = 0; i < NUM_PTP_RQ_STATS; i++)
- data[idx++] =
+ mlx5e_ethtool_put_stat(
+ data,
MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq,
- ptp_rq_stats_desc, i);
+ ptp_rq_stats_desc, i));
}
- return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
@@ -2394,38 +2444,29 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_CH_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- ch_stats_desc[j].format, i);
+ ethtool_sprintf(data, ch_stats_desc[j].format, i);
for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_RQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- rq_stats_desc[j].format, i);
+ ethtool_sprintf(data, rq_stats_desc[j].format, i);
for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- xskrq_stats_desc[j].format, i);
+ ethtool_sprintf(data, xskrq_stats_desc[j].format, i);
for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- rq_xdpsq_stats_desc[j].format, i);
+ ethtool_sprintf(data, rq_xdpsq_stats_desc[j].format, i);
}
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- sq_stats_desc[j].format,
- i + tc * max_nch);
+ ethtool_sprintf(data, sq_stats_desc[j].format,
+ i + tc * max_nch);
for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- xsksq_stats_desc[j].format, i);
+ ethtool_sprintf(data, xsksq_stats_desc[j].format, i);
for (j = 0; j < NUM_XDPSQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- xdpsq_stats_desc[j].format, i);
+ ethtool_sprintf(data, xdpsq_stats_desc[j].format, i);
}
-
- return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
@@ -2436,44 +2477,50 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_CH_STATS; j++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->ch,
- ch_stats_desc, j);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ &priv->channel_stats[i]->ch,
+ ch_stats_desc, j));
for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_RQ_STATS; j++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq,
- rq_stats_desc, j);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ &priv->channel_stats[i]->rq,
+ rq_stats_desc, j));
for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xskrq,
- xskrq_stats_desc, j);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ &priv->channel_stats[i]->xskrq,
+ xskrq_stats_desc, j));
for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq_xdpsq,
- rq_xdpsq_stats_desc, j);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ &priv->channel_stats[i]->rq_xdpsq,
+ rq_xdpsq_stats_desc, j));
}
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->sq[tc],
- sq_stats_desc, j);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_CPU(
+ &priv->channel_stats[i]->sq[tc],
+ sq_stats_desc, j));
for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xsksq,
- xsksq_stats_desc, j);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ &priv->channel_stats[i]->xsksq,
+ xsksq_stats_desc, j));
for (j = 0; j < NUM_XDPSQ_STATS; j++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xdpsq,
- xdpsq_stats_desc, j);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ &priv->channel_stats[i]->xdpsq,
+ xdpsq_stats_desc, j));
}
-
- return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 12b3607afecd..650732288616 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -71,11 +71,13 @@ struct mlx5e_priv;
struct mlx5e_stats_grp {
u16 update_stats_mask;
int (*get_num_stats)(struct mlx5e_priv *priv);
- int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
- int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
+ void (*fill_strings)(struct mlx5e_priv *priv, u8 **data);
+ void (*fill_stats)(struct mlx5e_priv *priv, u64 **data);
void (*update_stats)(struct mlx5e_priv *priv);
};
+void mlx5e_ethtool_put_stat(u64 **data, u64 val);
+
typedef const struct mlx5e_stats_grp *const mlx5e_stats_grp_t;
#define MLX5E_STATS_GRP_OP(grp, name) mlx5e_stats_grp_ ## grp ## _ ## name
@@ -87,10 +89,10 @@ typedef const struct mlx5e_stats_grp *const mlx5e_stats_grp_t;
void MLX5E_STATS_GRP_OP(grp, update_stats)(struct mlx5e_priv *priv)
#define MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(grp) \
- int MLX5E_STATS_GRP_OP(grp, fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx)
+ void MLX5E_STATS_GRP_OP(grp, fill_strings)(struct mlx5e_priv *priv, u8 **data)
#define MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(grp) \
- int MLX5E_STATS_GRP_OP(grp, fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx)
+ void MLX5E_STATS_GRP_OP(grp, fill_stats)(struct mlx5e_priv *priv, u64 **data)
#define MLX5E_STATS_GRP(grp) mlx5e_stats_grp_ ## grp
@@ -126,6 +128,8 @@ void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
struct ethtool_rmon_stats *rmon,
const struct ethtool_rmon_hist_range **ranges);
+void mlx5e_stats_ts_get(struct mlx5e_priv *priv,
+ struct ethtool_ts_stats *ts_stats);
void mlx5e_get_link_ext_stats(struct net_device *dev,
struct ethtool_link_ext_stats *stats);
@@ -429,6 +433,7 @@ struct mlx5e_sq_stats {
u64 stopped;
u64 dropped;
u64 recover;
+ u64 timestamps;
/* dirtied @completion */
u64 cqes ____cacheline_aligned_in_smp;
u64 wake;
@@ -461,6 +466,7 @@ struct mlx5e_ptp_cq_stats {
u64 abort;
u64 abort_abs_diff_ns;
u64 late_cqe;
+ u64 lost_cqe;
};
struct mlx5e_rep_stats {
@@ -478,6 +484,7 @@ struct mlx5e_rep_stats {
u64 tx_vport_rdma_multicast_bytes;
u64 vport_loopback_packets;
u64 vport_loopback_bytes;
+ u64 rx_vport_out_of_buffer;
};
struct mlx5e_stats {
@@ -498,6 +505,7 @@ static inline void mlx5e_stats_copy_rep_stats(struct rtnl_link_stats64 *vf_vport
vf_vport->tx_packets = rep_stats->vport_tx_packets;
vf_vport->rx_bytes = rep_stats->vport_rx_bytes;
vf_vport->tx_bytes = rep_stats->vport_tx_bytes;
+ vf_vport->rx_missed_errors = rep_stats->rx_vport_out_of_buffer;
}
extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 31ed26cac9bf..30673292e15f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -835,8 +835,7 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
memset(ttc_params, 0, sizeof(*ttc_params));
- ttc_params->ns = mlx5_get_flow_namespace(hp->func_mdev,
- MLX5_FLOW_NAMESPACE_KERNEL);
+ ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
ttc_params->dests[tt].tir_num =
@@ -2802,12 +2801,6 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
- /* the HW doesn't support frag first/later */
- if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
- NL_SET_ERR_MSG_MOD(extack, "Match on frag first/later is not supported");
- return -EOPNOTSUPP;
- }
-
if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
@@ -2820,6 +2813,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
else
*match_level = MLX5_MATCH_L3;
}
+
+ if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT,
+ match.mask->flags, extack))
+ return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
@@ -5464,6 +5461,7 @@ static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct tunnel_match_enc_opts enc_opts = {};
struct mlx5_rep_uplink_priv *uplink_priv;
+ IP_TUNNEL_DECLARE_FLAGS(flags) = { };
struct mlx5e_rep_priv *uplink_rpriv;
struct metadata_dst *tun_dst;
struct tunnel_match_key key;
@@ -5471,6 +5469,8 @@ static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb
struct net_device *dev;
int err;
+ __set_bit(IP_TUNNEL_KEY_BIT, flags);
+
enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
tun_id = tunnel_id >> ENC_OPTS_BITS;
@@ -5503,14 +5503,14 @@ static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst,
key.enc_ip.tos, key.enc_ip.ttl,
- key.enc_tp.dst, TUNNEL_KEY,
+ key.enc_tp.dst, flags,
key32_to_tunnel_id(key.enc_key_id.keyid),
enc_opts.key.len);
break;
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst,
key.enc_ip.tos, key.enc_ip.ttl,
- key.enc_tp.dst, 0, TUNNEL_KEY,
+ key.enc_tp.dst, 0, flags,
key32_to_tunnel_id(key.enc_key_id.keyid),
enc_opts.key.len);
break;
@@ -5528,11 +5528,16 @@ static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb
tun_dst->u.tun_info.key.tp_src = key.enc_tp.src;
- if (enc_opts.key.len)
+ if (enc_opts.key.len) {
+ ip_tunnel_flags_zero(flags);
+ if (enc_opts.key.dst_opt_type)
+ __set_bit(enc_opts.key.dst_opt_type, flags);
+
ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
enc_opts.key.data,
enc_opts.key.len,
- enc_opts.key.dst_opt_type);
+ flags);
+ }
skb_dst_set(skb, (struct dst_entry *)tun_dst);
dev = dev_get_by_index(&init_net, key.filter_ifindex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 2fa076b23fbe..099bf1078889 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -398,6 +398,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
u8 metadata_index = be32_to_cpu(eseg->flow_table_metadata);
+ mlx5e_ptp_metadata_fifo_pop(&sq->ptpsq->metadata_freelist);
+
mlx5e_skb_cb_hwtstamp_init(skb);
mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
metadata_index);
@@ -496,9 +498,6 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
err_drop:
stats->dropped++;
- if (unlikely(sq->ptpsq && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
- mlx5e_ptp_metadata_fifo_push(&sq->ptpsq->metadata_freelist,
- be32_to_cpu(eseg->flow_table_metadata));
dev_kfree_skb_any(skb);
mlx5e_tx_flush(sq);
}
@@ -657,7 +656,7 @@ static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
{
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
eseg->flow_table_metadata =
- cpu_to_be32(mlx5e_ptp_metadata_fifo_pop(&ptpsq->metadata_freelist));
+ cpu_to_be32(mlx5e_ptp_metadata_fifo_peek(&ptpsq->metadata_freelist));
}
static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
@@ -750,11 +749,13 @@ static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb,
u64 ts = get_cqe_ts(cqe);
hwts.hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, ts);
- if (sq->ptpsq)
+ if (sq->ptpsq) {
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_CQE_HWTSTAMP,
hwts.hwtstamp, sq->ptpsq->cq_stats);
- else
+ } else {
skb_tstamp_tx(skb, &hwts);
+ sq->stats->timestamps++;
+ }
}
napi_consume_skb(skb, napi_budget);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index a7d9b7cb4297..5873fde65c2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -55,7 +55,7 @@ static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
return;
dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
- net_dim(&sq->dim, dim_sample);
+ net_dim(sq->dim, dim_sample);
}
static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
@@ -67,7 +67,7 @@ static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
return;
dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
- net_dim(&rq->dim, dim_sample);
+ net_dim(rq->dim, dim_sample);
}
void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 40a6cb052a2d..5693986ae656 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -688,6 +688,12 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
if (err)
goto err2;
+ /* Skip page eq creation when the device does not request for page requests */
+ if (MLX5_CAP_GEN(dev, page_request_disable)) {
+ mlx5_core_dbg(dev, "Skip page EQ creation\n");
+ return 0;
+ }
+
param = (struct mlx5_eq_param) {
.irq = table->ctrl_irq,
.nent = /* TODO: sriov max_vf + */ 1,
@@ -716,7 +722,8 @@ static void destroy_async_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
- cleanup_async_eq(dev, &table->pages_eq, "pages");
+ if (!MLX5_CAP_GEN(dev, page_request_disable))
+ cleanup_async_eq(dev, &table->pages_eq, "pages");
cleanup_async_eq(dev, &table->async_eq, "async");
mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_DESTROY_EQ);
mlx5_cmd_use_polling(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index 1b9bc32efd6f..c5ea1d1d2b03 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -1874,7 +1874,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_
"Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
addr, vid, vport_num);
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
+ "Failed to lookup vlan metadata for MDB (MAC=%pM,vid=%u,vport=%u)\n",
addr, vid, vport_num);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index d8e739cbcbce..f8869c9b6802 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -98,6 +98,8 @@ static const struct devlink_port_ops mlx5_esw_pf_vf_dl_port_ops = {
.port_fn_ipsec_packet_get = mlx5_devlink_port_fn_ipsec_packet_get,
.port_fn_ipsec_packet_set = mlx5_devlink_port_fn_ipsec_packet_set,
#endif /* CONFIG_XFRM_OFFLOAD */
+ .port_fn_max_io_eqs_get = mlx5_devlink_port_fn_max_io_eqs_get,
+ .port_fn_max_io_eqs_set = mlx5_devlink_port_fn_max_io_eqs_set,
};
static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw,
@@ -143,6 +145,8 @@ static const struct devlink_port_ops mlx5_esw_dl_sf_port_ops = {
.port_fn_state_get = mlx5_devlink_sf_port_fn_state_get,
.port_fn_state_set = mlx5_devlink_sf_port_fn_state_set,
#endif
+ .port_fn_max_io_eqs_get = mlx5_devlink_port_fn_max_io_eqs_get,
+ .port_fn_max_io_eqs_set = mlx5_devlink_port_fn_max_io_eqs_set,
};
int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 3047d7015c52..17f78091ad30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1805,7 +1805,8 @@ err:
}
static int mlx5_devlink_esw_multiport_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
@@ -1868,6 +1869,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
if (err)
goto abort;
+ dev->priv.eswitch = esw;
err = esw_offloads_init(esw);
if (err)
goto reps_err;
@@ -1892,11 +1894,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
else
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
- if (MLX5_ESWITCH_MANAGER(dev) &&
- mlx5_esw_vport_match_metadata_supported(esw))
- esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
-
- dev->priv.eswitch = esw;
BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
esw_info(dev,
@@ -1908,6 +1905,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
reps_err:
mlx5_esw_vports_cleanup(esw);
+ dev->priv.eswitch = NULL;
abort:
if (esw->work_queue)
destroy_workqueue(esw->work_queue);
@@ -1926,7 +1924,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw_info(esw->dev, "cleanup\n");
- esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
WARN_ON(refcount_read(&esw->qos.refcnt));
mutex_destroy(&esw->state_lock);
@@ -1937,6 +1934,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
mutex_destroy(&esw->offloads.encap_tbl_lock);
mutex_destroy(&esw->offloads.decap_tbl_lock);
esw_offloads_cleanup(esw);
+ esw->dev->priv.eswitch = NULL;
mlx5_esw_vports_cleanup(esw);
debugfs_remove_recursive(esw->debugfs_root);
devl_params_unregister(priv_to_devlink(esw->dev), mlx5_eswitch_params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 349e28a6dd8d..88745dc6aed5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -573,6 +573,13 @@ int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_en
int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, bool enable,
struct netlink_ext_ack *extack);
#endif /* CONFIG_XFRM_OFFLOAD */
+int mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port,
+ u32 *max_io_eqs,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port,
+ u32 max_io_eqs,
+ struct netlink_ext_ack *extack);
+
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
@@ -833,7 +840,7 @@ int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
struct mlx5_eswitch *slave_esw, int max_slaves);
void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
struct mlx5_eswitch *slave_esw);
-int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
+int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw);
bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev);
void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev);
@@ -925,7 +932,7 @@ mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) { return 0; }
static inline int
-mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
+mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
{
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index baaae628b0a0..592143d5e1da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -43,6 +43,7 @@
#include "rdma.h"
#include "en.h"
#include "fs_core.h"
+#include "lib/mlx5.h"
#include "lib/devcom.h"
#include "lib/eq.h"
#include "lib/fs_chains.h"
@@ -66,6 +67,8 @@
#define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
+#define MLX5_ESW_MAX_CTRL_EQS 4
+
static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
.max_fte = MLX5_ESW_VPORT_TBL_SIZE,
.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
@@ -2410,7 +2413,8 @@ err:
}
static int esw_port_metadata_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch;
@@ -2476,6 +2480,10 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
if (err)
return err;
+ if (MLX5_ESWITCH_MANAGER(esw->dev) &&
+ mlx5_esw_vport_match_metadata_supported(esw))
+ esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
+
err = devl_params_register(priv_to_devlink(esw->dev),
esw_devlink_params,
ARRAY_SIZE(esw_devlink_params));
@@ -2497,6 +2505,16 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw)
esw_offloads_cleanup_reps(esw);
}
+static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep, u8 rep_type)
+{
+ if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
+ REP_REGISTERED, REP_LOADED) == REP_REGISTERED)
+ return esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
+
+ return 0;
+}
+
static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, u8 rep_type)
{
@@ -2521,13 +2539,11 @@ static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
int err;
rep = mlx5_eswitch_get_rep(esw, vport_num);
- for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
- if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
- REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
- err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
- if (err)
- goto err_reps;
- }
+ for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
+ err = __esw_offloads_load_rep(esw, rep, rep_type);
+ if (err)
+ goto err_reps;
+ }
return 0;
@@ -3055,7 +3071,7 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key)
key,
mlx5_esw_offloads_devcom_event,
esw);
- if (IS_ERR_OR_NULL(esw->devcom))
+ if (IS_ERR(esw->devcom))
return;
mlx5_devcom_send_event(esw->devcom,
@@ -3272,7 +3288,7 @@ static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
esw_vport_destroy_offloads_acl_tables(esw, vport);
}
-int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
+int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
{
struct mlx5_eswitch_rep *rep;
unsigned long i;
@@ -3285,13 +3301,13 @@ int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
return 0;
- ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
+ ret = __esw_offloads_load_rep(esw, rep, REP_IB);
if (ret)
return ret;
mlx5_esw_for_each_rep(esw, i, rep) {
if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED)
- mlx5_esw_offloads_rep_load(esw, rep->vport);
+ __esw_offloads_load_rep(esw, rep, REP_IB);
}
return 0;
@@ -3707,6 +3723,12 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && mlx5_get_sd(esw->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't change E-Switch mode to switchdev when multi-PF netdev (Socket Direct) is configured.");
+ return -EPERM;
+ }
+
mlx5_lag_disable_change(esw->dev);
err = mlx5_esw_try_lock(esw);
if (err < 0) {
@@ -4557,3 +4579,98 @@ unlock:
return err;
}
#endif /* CONFIG_XFRM_OFFLOAD */
+
+int
+mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port, u32 *max_io_eqs,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
+ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ u16 vport_num = vport->vport;
+ struct mlx5_eswitch *esw;
+ void *query_ctx;
+ void *hca_caps;
+ u32 max_eqs;
+ int err;
+
+ esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
+ if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device doesn't support VHCA management");
+ return -EOPNOTSUPP;
+ }
+
+ query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ if (!query_ctx)
+ return -ENOMEM;
+
+ mutex_lock(&esw->state_lock);
+ err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
+ MLX5_CAP_GENERAL);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
+ goto out;
+ }
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ max_eqs = MLX5_GET(cmd_hca_cap, hca_caps, max_num_eqs);
+ if (max_eqs < MLX5_ESW_MAX_CTRL_EQS)
+ *max_io_eqs = 0;
+ else
+ *max_io_eqs = max_eqs - MLX5_ESW_MAX_CTRL_EQS;
+out:
+ mutex_unlock(&esw->state_lock);
+ kfree(query_ctx);
+ return err;
+}
+
+int
+mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, u32 max_io_eqs,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
+ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ u16 vport_num = vport->vport;
+ struct mlx5_eswitch *esw;
+ void *query_ctx;
+ void *hca_caps;
+ u16 max_eqs;
+ int err;
+
+ esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
+ if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device doesn't support VHCA management");
+ return -EOPNOTSUPP;
+ }
+
+ if (check_add_overflow(max_io_eqs, MLX5_ESW_MAX_CTRL_EQS, &max_eqs)) {
+ NL_SET_ERR_MSG_MOD(extack, "Supplied value out of range");
+ return -EINVAL;
+ }
+
+ query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ if (!query_ctx)
+ return -ENOMEM;
+
+ mutex_lock(&esw->state_lock);
+ err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
+ MLX5_CAP_GENERAL);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
+ goto out;
+ }
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ MLX5_SET(cmd_hca_cap, hca_caps, max_num_eqs, max_eqs);
+
+ err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA caps");
+
+out:
+ mutex_unlock(&esw->state_lock);
+ kfree(query_ctx);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index e6bfa7e4f146..32cdacc34a0d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1664,6 +1664,16 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
return err;
}
+static bool mlx5_pkt_reformat_cmp(struct mlx5_pkt_reformat *p1,
+ struct mlx5_pkt_reformat *p2)
+{
+ return p1->owner == p2->owner &&
+ (p1->owner == MLX5_FLOW_RESOURCE_OWNER_FW ?
+ p1->id == p2->id :
+ mlx5_fs_dr_action_get_pkt_reformat_id(p1) ==
+ mlx5_fs_dr_action_get_pkt_reformat_id(p2));
+}
+
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
struct mlx5_flow_destination *d2)
{
@@ -1675,8 +1685,8 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
(d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
- (d1->vport.pkt_reformat->id ==
- d2->vport.pkt_reformat->id) : true)) ||
+ mlx5_pkt_reformat_cmp(d1->vport.pkt_reformat,
+ d2->vport.pkt_reformat) : true)) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
d1->ft == d2->ft) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
@@ -1808,8 +1818,9 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
}
trace_mlx5_fs_set_fte(fte, false);
+ /* Link newly added rules into the tree. */
for (i = 0; i < handle->num_rules; i++) {
- if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
+ if (!handle->rule[i]->node.parent) {
tree_add_node(&handle->rule[i]->node, &fte->node);
trace_mlx5_fs_add_rule(handle->rule[i]);
}
@@ -3321,7 +3332,8 @@ static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id,
}
static int mlx5_fs_mode_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
enum mlx5_flow_steering_mode mode;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index e7faf7e73ca4..2d95a9b7b44e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -283,7 +283,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return 0;
}
-int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id)
+int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, u32 *sw_owner_id)
{
u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {};
int i;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 2911aa34a5be..979c49ae6b5c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -52,7 +52,8 @@ static void mlx5_set_fw_rst_ack(struct mlx5_core_dev *dev)
}
static int mlx5_fw_reset_enable_remote_dev_reset_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_fw_reset *fw_reset;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index d77be1b4dd9c..8e0404c0d1ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -531,7 +531,7 @@ static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu)
if (err)
goto out;
- netdev->mtu = new_params.sw_mtu;
+ WRITE_ONCE(netdev->mtu, new_params.sw_mtu);
out:
mutex_unlock(&priv->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index f87471306f6b..028a76944d82 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -280,7 +280,7 @@ static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu)
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
mutex_lock(&priv->state_lock);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
mutex_unlock(&priv->state_lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index d14459e5c04f..f7f0476a4a58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -703,15 +703,16 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev)
return err;
}
- if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
+ if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
mlx5_lag_port_sel_destroy(ldev);
+ ldev->buckets = 1;
+ }
if (mlx5_lag_has_drop_rule(ldev))
mlx5_lag_drop_rule_cleanup(ldev);
return 0;
}
-#define MLX5_LAG_OFFLOADS_SUPPORTED_PORTS 4
bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{
#ifdef CONFIG_MLX5_ESWITCH
@@ -737,8 +738,6 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode)
return false;
- if (mode == MLX5_ESWITCH_OFFLOADS && ldev->ports > MLX5_LAG_OFFLOADS_SUPPORTED_PORTS)
- return false;
#else
for (i = 0; i < ldev->ports; i++)
if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
@@ -812,7 +811,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
if (shared_fdb)
for (i = 0; i < ldev->ports; i++)
if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
- mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
}
static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
@@ -920,7 +919,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
mlx5_rescan_drivers_locked(dev0);
for (i = 0; i < ldev->ports; i++) {
- err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
if (err)
break;
}
@@ -931,7 +930,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
mlx5_deactivate_lag(ldev);
mlx5_lag_add_devices(ldev);
for (i = 0; i < ldev->ports; i++)
- mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_core_err(dev0, "Failed to enable lag\n");
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index 82889f30506e..571ea26edd0c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -99,7 +99,7 @@ static int enable_mpesw(struct mlx5_lag *ldev)
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
for (i = 0; i < ldev->ports; i++) {
- err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
if (err)
goto err_rescan_drivers;
}
@@ -113,7 +113,7 @@ err_rescan_drivers:
err_add_devices:
mlx5_lag_add_devices(ldev);
for (i = 0; i < ldev->ports; i++)
- mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_mpesw_metadata_cleanup(ldev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index 101b3bb90863..c16b462ddedf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -449,13 +449,11 @@ static void set_tt_map(struct mlx5_lag_port_sel *port_sel,
static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev,
struct ttc_params *ttc_params)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct mlx5_flow_table_attr *ft_attr;
int tt;
- ttc_params->ns = mlx5_get_flow_namespace(dev,
- MLX5_FLOW_NAMESPACE_PORT_SEL);
+ ttc_params->ns_type = MLX5_FLOW_NAMESPACE_PORT_SEL;
ft_attr = &ttc_params->ft_attr;
ft_attr->level = MLX5_LAG_FT_LEVEL_INNER_TTC;
@@ -470,13 +468,11 @@ static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev,
static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev,
struct ttc_params *ttc_params)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct mlx5_flow_table_attr *ft_attr;
int tt;
- ttc_params->ns = mlx5_get_flow_namespace(dev,
- MLX5_FLOW_NAMESPACE_PORT_SEL);
+ ttc_params->ns_type = MLX5_FLOW_NAMESPACE_PORT_SEL;
ft_attr = &ttc_params->ft_attr;
ft_attr->level = MLX5_LAG_FT_LEVEL_TTC;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
index e7d59cfa8708..7b0766c89f4c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -220,7 +220,7 @@ mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
struct mlx5_devcom_comp *comp;
if (IS_ERR_OR_NULL(devc))
- return NULL;
+ return ERR_PTR(-EINVAL);
mutex_lock(&comp_list_lock);
comp = devcom_component_get(devc, id, key, handler);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
index b78f2ba25c19..9f13cea16446 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
@@ -9,21 +9,24 @@
#include "mlx5_core.h"
#include "lib/fs_ttc.h"
-#define MLX5_TTC_NUM_GROUPS 3
-#define MLX5_TTC_GROUP1_SIZE (BIT(3) + MLX5_NUM_TUNNEL_TT)
-#define MLX5_TTC_GROUP2_SIZE BIT(1)
-#define MLX5_TTC_GROUP3_SIZE BIT(0)
-#define MLX5_TTC_TABLE_SIZE (MLX5_TTC_GROUP1_SIZE +\
- MLX5_TTC_GROUP2_SIZE +\
- MLX5_TTC_GROUP3_SIZE)
-
-#define MLX5_INNER_TTC_NUM_GROUPS 3
-#define MLX5_INNER_TTC_GROUP1_SIZE BIT(3)
-#define MLX5_INNER_TTC_GROUP2_SIZE BIT(1)
-#define MLX5_INNER_TTC_GROUP3_SIZE BIT(0)
-#define MLX5_INNER_TTC_TABLE_SIZE (MLX5_INNER_TTC_GROUP1_SIZE +\
- MLX5_INNER_TTC_GROUP2_SIZE +\
- MLX5_INNER_TTC_GROUP3_SIZE)
+#define MLX5_TTC_MAX_NUM_GROUPS 4
+#define MLX5_TTC_GROUP_TCPUDP_SIZE (MLX5_TT_IPV6_UDP + 1)
+
+struct mlx5_fs_ttc_groups {
+ bool use_l4_type;
+ int num_groups;
+ int group_size[MLX5_TTC_MAX_NUM_GROUPS];
+};
+
+static int mlx5_fs_ttc_table_size(const struct mlx5_fs_ttc_groups *groups)
+{
+ int i, sz = 0;
+
+ for (i = 0; i < groups->num_groups; i++)
+ sz += groups->group_size[i];
+
+ return sz;
+}
/* L3/L4 traffic type classifier */
struct mlx5_ttc_table {
@@ -138,6 +141,53 @@ static struct mlx5_etype_proto ttc_tunnel_rules[] = {
};
+enum TTC_GROUP_TYPE {
+ TTC_GROUPS_DEFAULT = 0,
+ TTC_GROUPS_USE_L4_TYPE = 1,
+};
+
+static const struct mlx5_fs_ttc_groups ttc_groups[] = {
+ [TTC_GROUPS_DEFAULT] = {
+ .num_groups = 3,
+ .group_size = {
+ BIT(3) + MLX5_NUM_TUNNEL_TT,
+ BIT(1),
+ BIT(0),
+ },
+ },
+ [TTC_GROUPS_USE_L4_TYPE] = {
+ .use_l4_type = true,
+ .num_groups = 4,
+ .group_size = {
+ MLX5_TTC_GROUP_TCPUDP_SIZE,
+ BIT(3) + MLX5_NUM_TUNNEL_TT - MLX5_TTC_GROUP_TCPUDP_SIZE,
+ BIT(1),
+ BIT(0),
+ },
+ },
+};
+
+static const struct mlx5_fs_ttc_groups inner_ttc_groups[] = {
+ [TTC_GROUPS_DEFAULT] = {
+ .num_groups = 3,
+ .group_size = {
+ BIT(3),
+ BIT(1),
+ BIT(0),
+ },
+ },
+ [TTC_GROUPS_USE_L4_TYPE] = {
+ .use_l4_type = true,
+ .num_groups = 4,
+ .group_size = {
+ MLX5_TTC_GROUP_TCPUDP_SIZE,
+ BIT(3) - MLX5_TTC_GROUP_TCPUDP_SIZE,
+ BIT(1),
+ BIT(0),
+ },
+ },
+};
+
u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt)
{
return ttc_tunnel_rules[tt].proto;
@@ -188,9 +238,29 @@ static u8 mlx5_etype_to_ipv(u16 ethertype)
return 0;
}
+static void mlx5_fs_ttc_set_match_proto(void *headers_c, void *headers_v,
+ u8 proto, bool use_l4_type)
+{
+ int l4_type;
+
+ if (use_l4_type && (proto == IPPROTO_TCP || proto == IPPROTO_UDP)) {
+ if (proto == IPPROTO_TCP)
+ l4_type = MLX5_PACKET_L4_TYPE_TCP;
+ else
+ l4_type = MLX5_PACKET_L4_TYPE_UDP;
+
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, l4_type);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_type, l4_type);
+ } else {
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, proto);
+ }
+}
+
static struct mlx5_flow_handle *
mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
- struct mlx5_flow_destination *dest, u16 etype, u8 proto)
+ struct mlx5_flow_destination *dest, u16 etype, u8 proto,
+ bool use_l4_type)
{
int match_ipv_outer =
MLX5_CAP_FLOWTABLE_NIC_RX(dev,
@@ -207,8 +277,13 @@ mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
if (proto) {
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
+ mlx5_fs_ttc_set_match_proto(MLX5_ADDR_OF(fte_match_param,
+ spec->match_criteria,
+ outer_headers),
+ MLX5_ADDR_OF(fte_match_param,
+ spec->match_value,
+ outer_headers),
+ proto, use_l4_type);
}
ipv = mlx5_etype_to_ipv(etype);
@@ -234,7 +309,8 @@ mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
struct ttc_params *params,
- struct mlx5_ttc_table *ttc)
+ struct mlx5_ttc_table *ttc,
+ bool use_l4_type)
{
struct mlx5_flow_handle **trules;
struct mlx5_ttc_rule *rules;
@@ -251,7 +327,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
continue;
rule->rule = mlx5_generate_ttc_rule(dev, ft, &params->dests[tt],
ttc_rules[tt].etype,
- ttc_rules[tt].proto);
+ ttc_rules[tt].proto,
+ use_l4_type);
if (IS_ERR(rule->rule)) {
err = PTR_ERR(rule->rule);
rule->rule = NULL;
@@ -273,7 +350,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
trules[tt] = mlx5_generate_ttc_rule(dev, ft,
&params->tunnel_dests[tt],
ttc_tunnel_rules[tt].etype,
- ttc_tunnel_rules[tt].proto);
+ ttc_tunnel_rules[tt].proto,
+ use_l4_type);
if (IS_ERR(trules[tt])) {
err = PTR_ERR(trules[tt]);
trules[tt] = NULL;
@@ -289,7 +367,8 @@ del_rules:
}
static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
- bool use_ipv)
+ bool use_ipv,
+ const struct mlx5_fs_ttc_groups *groups)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
int ix = 0;
@@ -297,7 +376,7 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
int err;
u8 *mc;
- ttc->g = kcalloc(MLX5_TTC_NUM_GROUPS, sizeof(*ttc->g), GFP_KERNEL);
+ ttc->g = kcalloc(groups->num_groups, sizeof(*ttc->g), GFP_KERNEL);
if (!ttc->g)
return -ENOMEM;
in = kvzalloc(inlen, GFP_KERNEL);
@@ -307,16 +386,31 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
return -ENOMEM;
}
- /* L4 Group */
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
if (use_ipv)
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
else
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+
+ /* TCP UDP group */
+ if (groups->use_l4_type) {
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.l4_type);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += groups->group_size[ttc->num_groups];
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
+ if (IS_ERR(ttc->g[ttc->num_groups]))
+ goto err;
+ ttc->num_groups++;
+
+ MLX5_SET(fte_match_param, mc, outer_headers.l4_type, 0);
+ }
+
+ /* L4 Group */
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5_TTC_GROUP1_SIZE;
+ ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
@@ -326,7 +420,7 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
/* L3 Group */
MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5_TTC_GROUP2_SIZE;
+ ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
@@ -336,7 +430,7 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
/* Any Group */
memset(in, 0, inlen);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5_TTC_GROUP3_SIZE;
+ ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
@@ -358,7 +452,7 @@ static struct mlx5_flow_handle *
mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
struct mlx5_flow_destination *dest,
- u16 etype, u8 proto)
+ u16 etype, u8 proto, bool use_l4_type)
{
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
@@ -379,8 +473,13 @@ mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev,
if (proto) {
spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol);
- MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto);
+ mlx5_fs_ttc_set_match_proto(MLX5_ADDR_OF(fte_match_param,
+ spec->match_criteria,
+ inner_headers),
+ MLX5_ADDR_OF(fte_match_param,
+ spec->match_value,
+ inner_headers),
+ proto, use_l4_type);
}
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
@@ -395,7 +494,8 @@ mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev,
static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev,
struct ttc_params *params,
- struct mlx5_ttc_table *ttc)
+ struct mlx5_ttc_table *ttc,
+ bool use_l4_type)
{
struct mlx5_ttc_rule *rules;
struct mlx5_flow_table *ft;
@@ -413,7 +513,8 @@ static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev,
rule->rule = mlx5_generate_inner_ttc_rule(dev, ft,
&params->dests[tt],
ttc_rules[tt].etype,
- ttc_rules[tt].proto);
+ ttc_rules[tt].proto,
+ use_l4_type);
if (IS_ERR(rule->rule)) {
err = PTR_ERR(rule->rule);
rule->rule = NULL;
@@ -430,7 +531,8 @@ del_rules:
return err;
}
-static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc)
+static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc,
+ const struct mlx5_fs_ttc_groups *groups)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
int ix = 0;
@@ -438,8 +540,7 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc)
int err;
u8 *mc;
- ttc->g = kcalloc(MLX5_INNER_TTC_NUM_GROUPS, sizeof(*ttc->g),
- GFP_KERNEL);
+ ttc->g = kcalloc(groups->num_groups, sizeof(*ttc->g), GFP_KERNEL);
if (!ttc->g)
return -ENOMEM;
in = kvzalloc(inlen, GFP_KERNEL);
@@ -449,13 +550,28 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc)
return -ENOMEM;
}
- /* L4 Group */
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
- MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
+
+ /* TCP UDP group */
+ if (groups->use_l4_type) {
+ MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.l4_type);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += groups->group_size[ttc->num_groups];
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
+ if (IS_ERR(ttc->g[ttc->num_groups]))
+ goto err;
+ ttc->num_groups++;
+
+ MLX5_SET(fte_match_param, mc, inner_headers.l4_type, 0);
+ }
+
+ /* L4 Group */
+ MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5_INNER_TTC_GROUP1_SIZE;
+ ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
@@ -465,7 +581,7 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc)
/* L3 Group */
MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5_INNER_TTC_GROUP2_SIZE;
+ ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
@@ -475,7 +591,7 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc)
/* Any Group */
memset(in, 0, inlen);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5_INNER_TTC_GROUP3_SIZE;
+ ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
@@ -496,27 +612,47 @@ err:
struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev,
struct ttc_params *params)
{
+ const struct mlx5_fs_ttc_groups *groups;
+ struct mlx5_flow_namespace *ns;
struct mlx5_ttc_table *ttc;
+ bool use_l4_type;
int err;
ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
if (!ttc)
return ERR_PTR(-ENOMEM);
+ switch (params->ns_type) {
+ case MLX5_FLOW_NAMESPACE_PORT_SEL:
+ use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
+ MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(dev, inner_l4_type);
+ break;
+ case MLX5_FLOW_NAMESPACE_KERNEL:
+ use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
+ MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(dev, inner_l4_type);
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ ns = mlx5_get_flow_namespace(dev, params->ns_type);
+ groups = use_l4_type ? &inner_ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
+ &inner_ttc_groups[TTC_GROUPS_DEFAULT];
+
WARN_ON_ONCE(params->ft_attr.max_fte);
- params->ft_attr.max_fte = MLX5_INNER_TTC_TABLE_SIZE;
- ttc->t = mlx5_create_flow_table(params->ns, &params->ft_attr);
+ params->ft_attr.max_fte = mlx5_fs_ttc_table_size(groups);
+ ttc->t = mlx5_create_flow_table(ns, &params->ft_attr);
if (IS_ERR(ttc->t)) {
err = PTR_ERR(ttc->t);
kvfree(ttc);
return ERR_PTR(err);
}
- err = mlx5_create_inner_ttc_table_groups(ttc);
+ err = mlx5_create_inner_ttc_table_groups(ttc, groups);
if (err)
goto destroy_ft;
- err = mlx5_generate_inner_ttc_table_rules(dev, params, ttc);
+ err = mlx5_generate_inner_ttc_table_rules(dev, params, ttc, use_l4_type);
if (err)
goto destroy_ft;
@@ -549,27 +685,47 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
bool match_ipv_outer =
MLX5_CAP_FLOWTABLE_NIC_RX(dev,
ft_field_support.outer_ip_version);
+ const struct mlx5_fs_ttc_groups *groups;
+ struct mlx5_flow_namespace *ns;
struct mlx5_ttc_table *ttc;
+ bool use_l4_type;
int err;
ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
if (!ttc)
return ERR_PTR(-ENOMEM);
+ switch (params->ns_type) {
+ case MLX5_FLOW_NAMESPACE_PORT_SEL:
+ use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
+ MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(dev, outer_l4_type);
+ break;
+ case MLX5_FLOW_NAMESPACE_KERNEL:
+ use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
+ MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(dev, outer_l4_type);
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ ns = mlx5_get_flow_namespace(dev, params->ns_type);
+ groups = use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
+ &ttc_groups[TTC_GROUPS_DEFAULT];
+
WARN_ON_ONCE(params->ft_attr.max_fte);
- params->ft_attr.max_fte = MLX5_TTC_TABLE_SIZE;
- ttc->t = mlx5_create_flow_table(params->ns, &params->ft_attr);
+ params->ft_attr.max_fte = mlx5_fs_ttc_table_size(groups);
+ ttc->t = mlx5_create_flow_table(ns, &params->ft_attr);
if (IS_ERR(ttc->t)) {
err = PTR_ERR(ttc->t);
kvfree(ttc);
return ERR_PTR(err);
}
- err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer);
+ err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer, groups);
if (err)
goto destroy_ft;
- err = mlx5_generate_ttc_table_rules(dev, params, ttc);
+ err = mlx5_generate_ttc_table_rules(dev, params, ttc, use_l4_type);
if (err)
goto destroy_ft;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
index 85fef0cd1c07..92eea6bea310 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
@@ -40,7 +40,7 @@ struct mlx5_ttc_rule {
struct mlx5_ttc_table;
struct ttc_params {
- struct mlx5_flow_namespace *ns;
+ enum mlx5_flow_namespace_type ns_type;
struct mlx5_flow_table_attr ft_attr;
struct mlx5_flow_destination dests[MLX5_NUM_TT];
DECLARE_BITMAP(ignore_dests, MLX5_NUM_TT);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
index 5b28084e8a03..dd5d186dc614 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
@@ -213,8 +213,8 @@ static int sd_register(struct mlx5_core_dev *dev)
sd = mlx5_get_sd(dev);
devcom = mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_SD_GROUP,
sd->group_id, NULL, dev);
- if (!devcom)
- return -ENOMEM;
+ if (IS_ERR(devcom))
+ return PTR_ERR(devcom);
sd->devcom = devcom;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c2593625c09a..6574c145dc1e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -956,7 +956,7 @@ static void mlx5_register_hca_devcom_comp(struct mlx5_core_dev *dev)
mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_HCA_PORTS,
mlx5_query_nic_system_image_guid(dev),
NULL, dev);
- if (IS_ERR_OR_NULL(dev->priv.hca_devcom_comp))
+ if (IS_ERR(dev->priv.hca_devcom_comp))
mlx5_core_err(dev, "Failed to register devcom HCA component\n");
}
@@ -1480,6 +1480,14 @@ int mlx5_init_one_devl_locked(struct mlx5_core_dev *dev)
if (err)
goto err_register;
+ err = mlx5_crdump_enable(dev);
+ if (err)
+ mlx5_core_err(dev, "mlx5_crdump_enable failed with error code %d\n", err);
+
+ err = mlx5_hwmon_dev_register(dev);
+ if (err)
+ mlx5_core_err(dev, "mlx5_hwmon_dev_register failed with error code %d\n", err);
+
mutex_unlock(&dev->intf_state_mutex);
return 0;
@@ -1505,7 +1513,10 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
int err;
devl_lock(devlink);
+ devl_register(devlink);
err = mlx5_init_one_devl_locked(dev);
+ if (err)
+ devl_unregister(devlink);
devl_unlock(devlink);
return err;
}
@@ -1517,6 +1528,8 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
devl_lock(devlink);
mutex_lock(&dev->intf_state_mutex);
+ mlx5_hwmon_dev_unregister(dev);
+ mlx5_crdump_disable(dev);
mlx5_unregister_device(dev);
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
@@ -1534,6 +1547,7 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
mlx5_function_teardown(dev, true);
out:
mutex_unlock(&dev->intf_state_mutex);
+ devl_unregister(devlink);
devl_unlock(devlink);
}
@@ -1666,6 +1680,8 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev)
struct devlink *devlink = priv_to_devlink(dev);
int err;
+ devl_lock(devlink);
+ devl_register(devlink);
dev->state = MLX5_DEVICE_STATE_UP;
err = mlx5_function_enable(dev, true, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
if (err) {
@@ -1679,20 +1695,21 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev)
goto query_hca_caps_err;
}
- devl_lock(devlink);
err = mlx5_devlink_params_register(priv_to_devlink(dev));
- devl_unlock(devlink);
if (err) {
mlx5_core_warn(dev, "mlx5_devlink_param_reg err = %d\n", err);
goto query_hca_caps_err;
}
+ devl_unlock(devlink);
return 0;
query_hca_caps_err:
mlx5_function_disable(dev, true);
out:
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+ devl_unregister(devlink);
+ devl_unlock(devlink);
return err;
}
@@ -1702,6 +1719,7 @@ void mlx5_uninit_one_light(struct mlx5_core_dev *dev)
devl_lock(devlink);
mlx5_devlink_params_unregister(priv_to_devlink(dev));
+ devl_unregister(devlink);
devl_unlock(devlink);
if (dev->state != MLX5_DEVICE_STATE_UP)
return;
@@ -1943,16 +1961,7 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_init_one;
}
- err = mlx5_crdump_enable(dev);
- if (err)
- dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
-
- err = mlx5_hwmon_dev_register(dev);
- if (err)
- mlx5_core_err(dev, "mlx5_hwmon_dev_register failed with error code %d\n", err);
-
pci_save_state(pdev);
- devlink_register(devlink);
return 0;
err_init_one:
@@ -1973,16 +1982,9 @@ static void remove_one(struct pci_dev *pdev)
struct devlink *devlink = priv_to_devlink(dev);
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
- /* mlx5_drain_fw_reset() and mlx5_drain_health_wq() are using
- * devlink notify APIs.
- * Hence, we must drain them before unregistering the devlink.
- */
mlx5_drain_fw_reset(dev);
mlx5_drain_health_wq(dev);
- devlink_unregister(devlink);
mlx5_sriov_disable(pdev, false);
- mlx5_hwmon_dev_unregister(dev);
- mlx5_crdump_disable(dev);
mlx5_uninit_one(dev);
mlx5_pci_close(dev);
mlx5_mdev_uninit(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 58732f44940f..c38342b9f320 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -205,7 +205,7 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev);
void mlx5_cmd_disable(struct mlx5_core_dev *dev);
void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
enum mlx5_cmdif_state cmdif_state);
-int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id);
+int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, u32 *sw_owner_id);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index dcf58efac159..d894a88fa9f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -660,6 +660,9 @@ int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
npages, boot ? "boot" : "init", func_id);
+ if (!npages)
+ return 0;
+
return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 4dcf995cb1a2..fb8787e30d3f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -19,6 +19,7 @@
#define MLX5_IRQ_CTRL_SF_MAX 8
/* min num of vectors for SFs to be enabled */
#define MLX5_IRQ_VEC_COMP_BASE_SF 2
+#define MLX5_IRQ_VEC_COMP_BASE 1
#define MLX5_EQ_SHARE_IRQ_MAX_COMP (8)
#define MLX5_EQ_SHARE_IRQ_MAX_CTRL (UINT_MAX)
@@ -246,6 +247,7 @@ static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
return;
}
+ vecidx -= MLX5_IRQ_VEC_COMP_BASE;
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx);
}
@@ -506,58 +508,6 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
}
/**
- * mlx5_msix_alloc - allocate msix interrupt
- * @dev: mlx5 device from which to request
- * @handler: interrupt handler
- * @affdesc: affinity descriptor
- * @name: interrupt name
- *
- * Returns: struct msi_map with result encoded.
- * Note: the caller must make sure to release the irq by calling
- * mlx5_msix_free() if shutdown was initiated.
- */
-struct msi_map mlx5_msix_alloc(struct mlx5_core_dev *dev,
- irqreturn_t (*handler)(int, void *),
- const struct irq_affinity_desc *affdesc,
- const char *name)
-{
- struct msi_map map;
- int err;
-
- if (!dev->pdev) {
- map.virq = 0;
- map.index = -EINVAL;
- return map;
- }
-
- map = pci_msix_alloc_irq_at(dev->pdev, MSI_ANY_INDEX, affdesc);
- if (!map.virq)
- return map;
-
- err = request_irq(map.virq, handler, 0, name, NULL);
- if (err) {
- mlx5_core_warn(dev, "err %d\n", err);
- pci_msix_free_irq(dev->pdev, map);
- map.virq = 0;
- map.index = -ENOMEM;
- }
- return map;
-}
-EXPORT_SYMBOL(mlx5_msix_alloc);
-
-/**
- * mlx5_msix_free - free a previously allocated msix interrupt
- * @dev: mlx5 device associated with interrupt
- * @map: map previously returned by mlx5_msix_alloc()
- */
-void mlx5_msix_free(struct mlx5_core_dev *dev, struct msi_map map)
-{
- free_irq(map.virq, NULL);
- pci_msix_free_irq(dev->pdev, map);
-}
-EXPORT_SYMBOL(mlx5_msix_free);
-
-/**
* mlx5_irq_release_vector - release one IRQ back to the system.
* @irq: the irq to release.
*/
@@ -585,7 +535,7 @@ struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu,
struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
struct mlx5_irq_pool *pool = table->pcif_pool;
struct irq_affinity_desc af_desc;
- int offset = 1;
+ int offset = MLX5_IRQ_VEC_COMP_BASE;
if (!pool->xa_num_irqs.max)
offset = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index bc863e1f062e..b2986175d9af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -60,6 +60,13 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
goto remap_err;
}
+ /* Peer devlink logic expects to work on unregistered devlink instance. */
+ err = mlx5_core_peer_devlink_set(sf_dev, devlink);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_core_peer_devlink_set err=%d\n", err);
+ goto peer_devlink_set_err;
+ }
+
if (MLX5_ESWITCH_MANAGER(sf_dev->parent_mdev))
err = mlx5_init_one_light(mdev);
else
@@ -69,21 +76,10 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
goto init_one_err;
}
- err = mlx5_core_peer_devlink_set(sf_dev, devlink);
- if (err) {
- mlx5_core_warn(mdev, "mlx5_core_peer_devlink_set err=%d\n", err);
- goto peer_devlink_set_err;
- }
-
- devlink_register(devlink);
return 0;
-peer_devlink_set_err:
- if (mlx5_dev_is_lightweight(sf_dev->mdev))
- mlx5_uninit_one_light(sf_dev->mdev);
- else
- mlx5_uninit_one(sf_dev->mdev);
init_one_err:
+peer_devlink_set_err:
iounmap(mdev->iseg);
remap_err:
mlx5_mdev_uninit(mdev);
@@ -101,7 +97,6 @@ static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
devlink = priv_to_devlink(mdev);
set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
mlx5_drain_health_wq(mdev);
- devlink_unregister(devlink);
if (mlx5_dev_is_lightweight(mdev))
mlx5_uninit_one_light(mdev);
else
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
index 64f4cc284aea..030a5776c937 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
@@ -205,12 +205,11 @@ dr_dump_hex_print(char hex[DR_HEX_SIZE], char *src, u32 size)
}
static int
-dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
+dr_dump_rule_action_mem(struct seq_file *file, char *buff, const u64 rule_id,
struct mlx5dr_rule_action_member *action_mem)
{
struct mlx5dr_action *action = action_mem->action;
const u64 action_id = DR_DBG_PTR_TO_ID(action);
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
u64 hit_tbl_ptr, miss_tbl_ptr;
u32 hit_tbl_id, miss_tbl_id;
int ret;
@@ -488,10 +487,9 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
}
static int
-dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
+dr_dump_rule_mem(struct seq_file *file, char *buff, struct mlx5dr_ste *ste,
bool is_rx, const u64 rule_id, u8 format_ver)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
char hw_ste_dump[DR_HEX_SIZE];
u32 mem_rec_type;
int ret;
@@ -522,7 +520,8 @@ dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
}
static int
-dr_dump_rule_rx_tx(struct seq_file *file, struct mlx5dr_rule_rx_tx *rule_rx_tx,
+dr_dump_rule_rx_tx(struct seq_file *file, char *buff,
+ struct mlx5dr_rule_rx_tx *rule_rx_tx,
bool is_rx, const u64 rule_id, u8 format_ver)
{
struct mlx5dr_ste *ste_arr[DR_RULE_MAX_STES + DR_ACTION_MAX_STES];
@@ -533,7 +532,7 @@ dr_dump_rule_rx_tx(struct seq_file *file, struct mlx5dr_rule_rx_tx *rule_rx_tx,
return 0;
while (i--) {
- ret = dr_dump_rule_mem(file, ste_arr[i], is_rx, rule_id,
+ ret = dr_dump_rule_mem(file, buff, ste_arr[i], is_rx, rule_id,
format_ver);
if (ret < 0)
return ret;
@@ -542,7 +541,8 @@ dr_dump_rule_rx_tx(struct seq_file *file, struct mlx5dr_rule_rx_tx *rule_rx_tx,
return 0;
}
-static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
+static noinline_for_stack int
+dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
{
struct mlx5dr_rule_action_member *action_mem;
const u64 rule_id = DR_DBG_PTR_TO_ID(rule);
@@ -565,19 +565,19 @@ static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
return ret;
if (rx->nic_matcher) {
- ret = dr_dump_rule_rx_tx(file, rx, true, rule_id, format_ver);
+ ret = dr_dump_rule_rx_tx(file, buff, rx, true, rule_id, format_ver);
if (ret < 0)
return ret;
}
if (tx->nic_matcher) {
- ret = dr_dump_rule_rx_tx(file, tx, false, rule_id, format_ver);
+ ret = dr_dump_rule_rx_tx(file, buff, tx, false, rule_id, format_ver);
if (ret < 0)
return ret;
}
list_for_each_entry(action_mem, &rule->rule_actions_list, list) {
- ret = dr_dump_rule_action_mem(file, rule_id, action_mem);
+ ret = dr_dump_rule_action_mem(file, buff, rule_id, action_mem);
if (ret < 0)
return ret;
}
@@ -586,10 +586,10 @@ static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
}
static int
-dr_dump_matcher_mask(struct seq_file *file, struct mlx5dr_match_param *mask,
+dr_dump_matcher_mask(struct seq_file *file, char *buff,
+ struct mlx5dr_match_param *mask,
u8 criteria, const u64 matcher_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
char dump[DR_HEX_SIZE];
int ret;
@@ -681,10 +681,10 @@ dr_dump_matcher_mask(struct seq_file *file, struct mlx5dr_match_param *mask,
}
static int
-dr_dump_matcher_builder(struct seq_file *file, struct mlx5dr_ste_build *builder,
+dr_dump_matcher_builder(struct seq_file *file, char *buff,
+ struct mlx5dr_ste_build *builder,
u32 index, bool is_rx, const u64 matcher_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
int ret;
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
@@ -702,11 +702,10 @@ dr_dump_matcher_builder(struct seq_file *file, struct mlx5dr_ste_build *builder,
}
static int
-dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
+dr_dump_matcher_rx_tx(struct seq_file *file, char *buff, bool is_rx,
struct mlx5dr_matcher_rx_tx *matcher_rx_tx,
const u64 matcher_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
enum dr_dump_rec_type rec_type;
u64 s_icm_addr, e_icm_addr;
int i, ret;
@@ -731,7 +730,7 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
return ret;
for (i = 0; i < matcher_rx_tx->num_of_builders; i++) {
- ret = dr_dump_matcher_builder(file,
+ ret = dr_dump_matcher_builder(file, buff,
&matcher_rx_tx->ste_builder[i],
i, is_rx, matcher_id);
if (ret < 0)
@@ -741,7 +740,7 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
return 0;
}
-static int
+static noinline_for_stack int
dr_dump_matcher(struct seq_file *file, struct mlx5dr_matcher *matcher)
{
struct mlx5dr_matcher_rx_tx *rx = &matcher->rx;
@@ -763,19 +762,19 @@ dr_dump_matcher(struct seq_file *file, struct mlx5dr_matcher *matcher)
if (ret)
return ret;
- ret = dr_dump_matcher_mask(file, &matcher->mask,
+ ret = dr_dump_matcher_mask(file, buff, &matcher->mask,
matcher->match_criteria, matcher_id);
if (ret < 0)
return ret;
if (rx->nic_tbl) {
- ret = dr_dump_matcher_rx_tx(file, true, rx, matcher_id);
+ ret = dr_dump_matcher_rx_tx(file, buff, true, rx, matcher_id);
if (ret < 0)
return ret;
}
if (tx->nic_tbl) {
- ret = dr_dump_matcher_rx_tx(file, false, tx, matcher_id);
+ ret = dr_dump_matcher_rx_tx(file, buff, false, tx, matcher_id);
if (ret < 0)
return ret;
}
@@ -803,11 +802,10 @@ dr_dump_matcher_all(struct seq_file *file, struct mlx5dr_matcher *matcher)
}
static int
-dr_dump_table_rx_tx(struct seq_file *file, bool is_rx,
+dr_dump_table_rx_tx(struct seq_file *file, char *buff, bool is_rx,
struct mlx5dr_table_rx_tx *table_rx_tx,
const u64 table_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
enum dr_dump_rec_type rec_type;
u64 s_icm_addr;
int ret;
@@ -829,7 +827,8 @@ dr_dump_table_rx_tx(struct seq_file *file, bool is_rx,
return 0;
}
-static int dr_dump_table(struct seq_file *file, struct mlx5dr_table *table)
+static noinline_for_stack int
+dr_dump_table(struct seq_file *file, struct mlx5dr_table *table)
{
struct mlx5dr_table_rx_tx *rx = &table->rx;
struct mlx5dr_table_rx_tx *tx = &table->tx;
@@ -848,14 +847,14 @@ static int dr_dump_table(struct seq_file *file, struct mlx5dr_table *table)
return ret;
if (rx->nic_dmn) {
- ret = dr_dump_table_rx_tx(file, true, rx,
+ ret = dr_dump_table_rx_tx(file, buff, true, rx,
DR_DBG_PTR_TO_ID(table));
if (ret < 0)
return ret;
}
if (tx->nic_dmn) {
- ret = dr_dump_table_rx_tx(file, false, tx,
+ ret = dr_dump_table_rx_tx(file, buff, false, tx,
DR_DBG_PTR_TO_ID(table));
if (ret < 0)
return ret;
@@ -881,10 +880,10 @@ static int dr_dump_table_all(struct seq_file *file, struct mlx5dr_table *tbl)
}
static int
-dr_dump_send_ring(struct seq_file *file, struct mlx5dr_send_ring *ring,
+dr_dump_send_ring(struct seq_file *file, char *buff,
+ struct mlx5dr_send_ring *ring,
const u64 domain_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
int ret;
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
@@ -902,13 +901,13 @@ dr_dump_send_ring(struct seq_file *file, struct mlx5dr_send_ring *ring,
return 0;
}
-static noinline_for_stack int
+static int
dr_dump_domain_info_flex_parser(struct seq_file *file,
+ char *buff,
const char *flex_parser_name,
const u8 flex_parser_value,
const u64 domain_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
int ret;
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
@@ -925,11 +924,11 @@ dr_dump_domain_info_flex_parser(struct seq_file *file,
return 0;
}
-static noinline_for_stack int
-dr_dump_domain_info_caps(struct seq_file *file, struct mlx5dr_cmd_caps *caps,
+static int
+dr_dump_domain_info_caps(struct seq_file *file, char *buff,
+ struct mlx5dr_cmd_caps *caps,
const u64 domain_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
struct mlx5dr_cmd_vport_cap *vport_caps;
unsigned long i, vports_num;
int ret;
@@ -969,34 +968,35 @@ dr_dump_domain_info_caps(struct seq_file *file, struct mlx5dr_cmd_caps *caps,
}
static int
-dr_dump_domain_info(struct seq_file *file, struct mlx5dr_domain_info *info,
+dr_dump_domain_info(struct seq_file *file, char *buff,
+ struct mlx5dr_domain_info *info,
const u64 domain_id)
{
int ret;
- ret = dr_dump_domain_info_caps(file, &info->caps, domain_id);
+ ret = dr_dump_domain_info_caps(file, buff, &info->caps, domain_id);
if (ret < 0)
return ret;
- ret = dr_dump_domain_info_flex_parser(file, "icmp_dw0",
+ ret = dr_dump_domain_info_flex_parser(file, buff, "icmp_dw0",
info->caps.flex_parser_id_icmp_dw0,
domain_id);
if (ret < 0)
return ret;
- ret = dr_dump_domain_info_flex_parser(file, "icmp_dw1",
+ ret = dr_dump_domain_info_flex_parser(file, buff, "icmp_dw1",
info->caps.flex_parser_id_icmp_dw1,
domain_id);
if (ret < 0)
return ret;
- ret = dr_dump_domain_info_flex_parser(file, "icmpv6_dw0",
+ ret = dr_dump_domain_info_flex_parser(file, buff, "icmpv6_dw0",
info->caps.flex_parser_id_icmpv6_dw0,
domain_id);
if (ret < 0)
return ret;
- ret = dr_dump_domain_info_flex_parser(file, "icmpv6_dw1",
+ ret = dr_dump_domain_info_flex_parser(file, buff, "icmpv6_dw1",
info->caps.flex_parser_id_icmpv6_dw1,
domain_id);
if (ret < 0)
@@ -1032,12 +1032,12 @@ dr_dump_domain(struct seq_file *file, struct mlx5dr_domain *dmn)
if (ret)
return ret;
- ret = dr_dump_domain_info(file, &dmn->info, domain_id);
+ ret = dr_dump_domain_info(file, buff, &dmn->info, domain_id);
if (ret < 0)
return ret;
if (dmn->info.supp_sw_steering) {
- ret = dr_dump_send_ring(file, dmn->send_ring, domain_id);
+ ret = dr_dump_send_ring(file, buff, dmn->send_ring, domain_id);
if (ret < 0)
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
index f708b029425a..e9f6c7ed7a7b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
@@ -1883,7 +1883,7 @@ dr_ste_v0_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
static int dr_ste_v0_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- uint8_t *tag)
+ u8 *tag)
{
struct mlx5dr_match_misc5 *misc5 = &value->misc5;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index dd856cde188d..1d49704b9542 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -1897,7 +1897,7 @@ void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
static int dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- uint8_t *tag)
+ u8 *tag)
{
struct mlx5dr_match_misc5 *misc5 = &value->misc5;
@@ -2129,7 +2129,7 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
static int
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- uint8_t *tag)
+ u8 *tag)
{
u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
struct mlx5dr_match_misc *misc = &value->misc;
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index 3d09fa54598f..b157f0f1c5a8 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include "mlxbf_gige.h"
@@ -139,13 +140,10 @@ static int mlxbf_gige_open(struct net_device *netdev)
control |= MLXBF_GIGE_CONTROL_PORT_EN;
writeq(control, priv->base + MLXBF_GIGE_CONTROL);
- err = mlxbf_gige_request_irqs(priv);
- if (err)
- return err;
mlxbf_gige_cache_stats(priv);
err = mlxbf_gige_clean_port(priv);
if (err)
- goto free_irqs;
+ return err;
/* Clear driver's valid_polarity to match hardware,
* since the above call to clean_port() resets the
@@ -157,7 +155,7 @@ static int mlxbf_gige_open(struct net_device *netdev)
err = mlxbf_gige_tx_init(priv);
if (err)
- goto free_irqs;
+ goto phy_deinit;
err = mlxbf_gige_rx_init(priv);
if (err)
goto tx_deinit;
@@ -166,6 +164,10 @@ static int mlxbf_gige_open(struct net_device *netdev)
napi_enable(&priv->napi);
netif_start_queue(netdev);
+ err = mlxbf_gige_request_irqs(priv);
+ if (err)
+ goto napi_deinit;
+
/* Set bits in INT_EN that we care about */
int_en = MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR |
MLXBF_GIGE_INT_EN_TX_CHECKSUM_INPUTS |
@@ -182,11 +184,17 @@ static int mlxbf_gige_open(struct net_device *netdev)
return 0;
+napi_deinit:
+ netif_stop_queue(netdev);
+ napi_disable(&priv->napi);
+ netif_napi_del(&priv->napi);
+ mlxbf_gige_rx_deinit(priv);
+
tx_deinit:
mlxbf_gige_tx_deinit(priv);
-free_irqs:
- mlxbf_gige_free_irqs(priv);
+phy_deinit:
+ phy_stop(phydev);
return err;
}
@@ -428,7 +436,7 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
priv->rx_irq = platform_get_irq(pdev, MLXBF_GIGE_RECEIVE_PKT_INTR_IDX);
priv->llu_plu_irq = platform_get_irq(pdev, MLXBF_GIGE_LLU_PLU_INTR_IDX);
- phy_irq = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(&pdev->dev), "phy-gpios", 0);
+ phy_irq = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(&pdev->dev), "phy", 0);
if (phy_irq < 0) {
dev_err(&pdev->dev, "Error getting PHY irq. Use polling instead");
phy_irq = PHY_POLL;
@@ -485,8 +493,13 @@ static void mlxbf_gige_shutdown(struct platform_device *pdev)
{
struct mlxbf_gige *priv = platform_get_drvdata(pdev);
- writeq(0, priv->base + MLXBF_GIGE_INT_EN);
- mlxbf_gige_clean_port(priv);
+ rtnl_lock();
+ netif_device_detach(priv->netdev);
+
+ if (netif_running(priv->netdev))
+ dev_close(priv->netdev);
+
+ rtnl_unlock();
}
static const struct acpi_device_id __maybe_unused mlxbf_gige_acpi_match[] = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index e4d7739bd7c8..4a79c0d7e7ad 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -849,7 +849,7 @@ free_skb:
static const struct mlxsw_listener mlxsw_emad_rx_listener =
MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
- EMAD, DISCARD);
+ EMAD, FORWARD);
static int mlxsw_emad_tlv_enable(struct mlxsw_core *mlxsw_core)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 53b150b7ae4e..6c06b0592760 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -1357,24 +1357,20 @@ static struct mlxsw_linecards_event_ops mlxsw_env_event_ops = {
.got_inactive = mlxsw_env_got_inactive,
};
-static int mlxsw_env_max_module_eeprom_len_query(struct mlxsw_env *mlxsw_env)
+static void mlxsw_env_max_module_eeprom_len_query(struct mlxsw_env *mlxsw_env)
{
char mcam_pl[MLXSW_REG_MCAM_LEN];
- bool mcia_128b_supported;
+ bool mcia_128b_supported = false;
int err;
mlxsw_reg_mcam_pack(mcam_pl,
MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
err = mlxsw_reg_query(mlxsw_env->core, MLXSW_REG(mcam), mcam_pl);
- if (err)
- return err;
-
- mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_MCIA_128B,
- &mcia_128b_supported);
+ if (!err)
+ mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_MCIA_128B,
+ &mcia_128b_supported);
mlxsw_env->max_eeprom_len = mcia_128b_supported ? 128 : 48;
-
- return 0;
}
int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
@@ -1445,15 +1441,11 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
if (err)
goto err_type_set;
- err = mlxsw_env_max_module_eeprom_len_query(env);
- if (err)
- goto err_eeprom_len_query;
-
+ mlxsw_env_max_module_eeprom_len_query(env);
env->line_cards[0]->active = true;
return 0;
-err_eeprom_len_query:
err_type_set:
mlxsw_env_module_event_disable(env, 0);
err_mlxsw_env_module_event_enable:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index af99bf17eb36..bf66d996e32e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -8,7 +8,6 @@
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
-#include <linux/wait.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
@@ -36,6 +35,11 @@ enum mlxsw_pci_queue_type {
#define MLXSW_PCI_QUEUE_TYPE_COUNT 4
+enum mlxsw_pci_cq_type {
+ MLXSW_PCI_CQ_SDQ,
+ MLXSW_PCI_CQ_RDQ,
+};
+
static const u16 mlxsw_pci_doorbell_type_offset[] = {
MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
@@ -78,18 +82,15 @@ struct mlxsw_pci_queue {
u8 num; /* queue number */
u8 elem_size; /* size of one element */
enum mlxsw_pci_queue_type type;
- struct tasklet_struct tasklet; /* queue processing tasklet */
struct mlxsw_pci *pci;
union {
struct {
- u32 comp_sdq_count;
- u32 comp_rdq_count;
enum mlxsw_pci_cqe_v v;
+ struct mlxsw_pci_queue *dq;
+ struct napi_struct napi;
} cq;
struct {
- u32 ev_cmd_count;
- u32 ev_comp_count;
- u32 ev_other_count;
+ struct tasklet_struct tasklet;
} eq;
} u;
};
@@ -120,9 +121,6 @@ struct mlxsw_pci {
struct mlxsw_pci_mem_item out_mbox;
struct mlxsw_pci_mem_item in_mbox;
struct mutex lock; /* Lock access to command registers */
- bool nopoll;
- wait_queue_head_t wait;
- bool wait_done;
struct {
u8 status;
u64 out_param;
@@ -131,13 +129,43 @@ struct mlxsw_pci {
struct mlxsw_bus_info bus_info;
const struct pci_device_id *id;
enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */
- u8 num_sdq_cqs; /* Number of CQs used for SDQs */
+ u8 num_cqs; /* Number of CQs */
+ u8 num_sdqs; /* Number of SDQs */
bool skip_reset;
+ struct net_device *napi_dev_tx;
+ struct net_device *napi_dev_rx;
};
-static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
+static int mlxsw_pci_napi_devs_init(struct mlxsw_pci *mlxsw_pci)
+{
+ int err;
+
+ mlxsw_pci->napi_dev_tx = alloc_netdev_dummy(0);
+ if (!mlxsw_pci->napi_dev_tx)
+ return -ENOMEM;
+ strscpy(mlxsw_pci->napi_dev_tx->name, "mlxsw_tx",
+ sizeof(mlxsw_pci->napi_dev_tx->name));
+
+ mlxsw_pci->napi_dev_rx = alloc_netdev_dummy(0);
+ if (!mlxsw_pci->napi_dev_rx) {
+ err = -ENOMEM;
+ goto err_alloc_rx;
+ }
+ strscpy(mlxsw_pci->napi_dev_rx->name, "mlxsw_rx",
+ sizeof(mlxsw_pci->napi_dev_rx->name));
+ dev_set_threaded(mlxsw_pci->napi_dev_rx, true);
+
+ return 0;
+
+err_alloc_rx:
+ free_netdev(mlxsw_pci->napi_dev_tx);
+ return err;
+}
+
+static void mlxsw_pci_napi_devs_fini(struct mlxsw_pci *mlxsw_pci)
{
- tasklet_schedule(&q->tasklet);
+ free_netdev(mlxsw_pci->napi_dev_rx);
+ free_netdev(mlxsw_pci->napi_dev_tx);
}
static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
@@ -187,25 +215,6 @@ mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
return &mlxsw_pci->queues[q_type];
}
-static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
- enum mlxsw_pci_queue_type q_type)
-{
- struct mlxsw_pci_queue_type_group *queue_group;
-
- queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
- return queue_group->count;
-}
-
-static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
-{
- return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
-}
-
-static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
-{
- return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
-}
-
static struct mlxsw_pci_queue *
__mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
enum mlxsw_pci_queue_type q_type, u8 q_num)
@@ -220,23 +229,16 @@ static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
}
-static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
- u8 q_num)
-{
- return __mlxsw_pci_queue_get(mlxsw_pci,
- MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
-}
-
static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
u8 q_num)
{
return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
}
-static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
- u8 q_num)
+static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci)
{
- return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
+ /* There is only one EQ at index 0. */
+ return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, 0);
}
static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
@@ -291,7 +293,9 @@ static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
+ struct mlxsw_pci_queue *cq;
int tclass;
+ u8 cq_num;
int lp;
int i;
int err;
@@ -304,7 +308,8 @@ static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE;
/* Set CQ of same number of this SDQ. */
- mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
+ cq_num = q->num;
+ mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, cq_num);
mlxsw_cmd_mbox_sw2hw_dq_sdq_lp_set(mbox, lp);
mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass);
mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
@@ -317,6 +322,9 @@ static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
if (err)
return err;
+
+ cq = mlxsw_pci_cq_get(mlxsw_pci, cq_num);
+ cq->u.cq.dq = q;
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
return 0;
}
@@ -399,7 +407,9 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
struct mlxsw_pci_queue_elem_info *elem_info;
- u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci);
+ u8 sdq_count = mlxsw_pci->num_sdqs;
+ struct mlxsw_pci_queue *cq;
+ u8 cq_num;
int i;
int err;
@@ -409,7 +419,8 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
/* Set CQ of same number of this RDQ with base
* above SDQ count as the lower ones are assigned to SDQs.
*/
- mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num);
+ cq_num = sdq_count + q->num;
+ mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, cq_num);
mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
@@ -421,6 +432,9 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
if (err)
return err;
+ cq = mlxsw_pci_cq_get(mlxsw_pci, cq_num);
+ cq->u.cq.dq = q;
+
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
for (i = 0; i < q->count; i++) {
@@ -441,6 +455,7 @@ rollback:
elem_info = mlxsw_pci_queue_elem_info_get(q, i);
mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
}
+ cq->u.cq.dq = NULL;
mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
return err;
@@ -465,54 +480,11 @@ static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
q->u.cq.v = mlxsw_pci->max_cqe_ver;
if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
- q->num < mlxsw_pci->num_sdq_cqs &&
+ q->num < mlxsw_pci->num_sdqs &&
!mlxsw_core_sdq_supports_cqe_v2(mlxsw_pci->core))
q->u.cq.v = MLXSW_PCI_CQE_V1;
}
-static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
- struct mlxsw_pci_queue *q)
-{
- int i;
- int err;
-
- q->consumer_counter = 0;
-
- for (i = 0; i < q->count; i++) {
- char *elem = mlxsw_pci_queue_elem_get(q, i);
-
- mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
- }
-
- if (q->u.cq.v == MLXSW_PCI_CQE_V1)
- mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
- MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
- else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
- mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
- MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
-
- mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
- mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
- mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
- for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
- dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
-
- mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
- }
- err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
- if (err)
- return err;
- mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
- mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
- return 0;
-}
-
-static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
- struct mlxsw_pci_queue *q)
-{
- mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
-}
-
static unsigned int mlxsw_pci_read32_off(struct mlxsw_pci *mlxsw_pci,
ptrdiff_t off)
{
@@ -692,9 +664,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
out:
- /* Everything is set up, ring doorbell to pass elem to HW */
q->producer_counter++;
- mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
return;
}
@@ -714,58 +684,165 @@ static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
return elem;
}
-static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t)
+static bool mlxsw_pci_cq_cqe_to_handle(struct mlxsw_pci_queue *q)
+{
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ bool owner_bit;
+
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+ owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem_info->elem);
+ return !mlxsw_pci_elem_hw_owned(q, owner_bit);
+}
+
+static int mlxsw_pci_napi_poll_cq_rx(struct napi_struct *napi, int budget)
{
- struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
+ struct mlxsw_pci_queue *q = container_of(napi, struct mlxsw_pci_queue,
+ u.cq.napi);
+ struct mlxsw_pci_queue *rdq = q->u.cq.dq;
struct mlxsw_pci *mlxsw_pci = q->pci;
+ int work_done = 0;
char *cqe;
- int items = 0;
- int credits = q->count >> 1;
+
+ /* If the budget is 0, Rx processing should be skipped. */
+ if (unlikely(!budget))
+ return 0;
while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
- char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
- memcpy(ncqe, cqe, q->elem_size);
- mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ if (unlikely(sendq)) {
+ WARN_ON_ONCE(1);
+ continue;
+ }
- if (sendq) {
- struct mlxsw_pci_queue *sdq;
+ if (unlikely(dqn != rdq->num)) {
+ WARN_ON_ONCE(1);
+ continue;
+ }
- sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
- mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
- wqe_counter, q->u.cq.v, ncqe);
- q->u.cq.comp_sdq_count++;
- } else {
- struct mlxsw_pci_queue *rdq;
+ mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
+ wqe_counter, q->u.cq.v, cqe);
- rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
- mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
- wqe_counter, q->u.cq.v, ncqe);
- q->u.cq.comp_rdq_count++;
- }
- if (++items == credits)
+ if (++work_done == budget)
break;
}
- if (items)
+
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, rdq);
+
+ if (work_done < budget)
+ goto processing_completed;
+
+ /* The driver still has outstanding work to do, budget was exhausted.
+ * Return exactly budget. In that case, the NAPI instance will be polled
+ * again.
+ */
+ if (mlxsw_pci_cq_cqe_to_handle(q))
+ goto out;
+
+ /* The driver processed all the completions and handled exactly
+ * 'budget'. Return 'budget - 1' to distinguish from the case that
+ * driver still has completions to handle.
+ */
+ if (work_done == budget)
+ work_done--;
+
+processing_completed:
+ if (napi_complete_done(napi, work_done))
mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+out:
+ return work_done;
}
-static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
+static int mlxsw_pci_napi_poll_cq_tx(struct napi_struct *napi, int budget)
{
- return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
- MLXSW_PCI_CQE01_COUNT;
+ struct mlxsw_pci_queue *q = container_of(napi, struct mlxsw_pci_queue,
+ u.cq.napi);
+ struct mlxsw_pci_queue *sdq = q->u.cq.dq;
+ struct mlxsw_pci *mlxsw_pci = q->pci;
+ int work_done = 0;
+ char *cqe;
+
+ while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
+ u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
+ u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
+ u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
+ char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
+
+ if (unlikely(!sendq)) {
+ WARN_ON_ONCE(1);
+ continue;
+ }
+
+ if (unlikely(dqn != sdq->num)) {
+ WARN_ON_ONCE(1);
+ continue;
+ }
+
+ memcpy(ncqe, cqe, q->elem_size);
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+
+ mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
+ wqe_counter, q->u.cq.v, ncqe);
+
+ work_done++;
+ }
+
+ /* If the budget is 0 napi_complete_done() should never be called. */
+ if (unlikely(!budget))
+ goto processing_completed;
+
+ work_done = min(work_done, budget - 1);
+ if (unlikely(!napi_complete_done(napi, work_done)))
+ goto out;
+
+processing_completed:
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+out:
+ return work_done;
}
-static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
+static enum mlxsw_pci_cq_type
+mlxsw_pci_cq_type(const struct mlxsw_pci *mlxsw_pci,
+ const struct mlxsw_pci_queue *q)
{
- return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
- MLXSW_PCI_CQE01_SIZE;
+ /* Each CQ is mapped to one DQ. The first 'num_sdqs' queues are used
+ * for SDQs and the rest are used for RDQs.
+ */
+ if (q->num < mlxsw_pci->num_sdqs)
+ return MLXSW_PCI_CQ_SDQ;
+
+ return MLXSW_PCI_CQ_RDQ;
}
-static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+static void mlxsw_pci_cq_napi_setup(struct mlxsw_pci_queue *q,
+ enum mlxsw_pci_cq_type cq_type)
+{
+ struct mlxsw_pci *mlxsw_pci = q->pci;
+
+ switch (cq_type) {
+ case MLXSW_PCI_CQ_SDQ:
+ netif_napi_add(mlxsw_pci->napi_dev_tx, &q->u.cq.napi,
+ mlxsw_pci_napi_poll_cq_tx);
+ break;
+ case MLXSW_PCI_CQ_RDQ:
+ netif_napi_add(mlxsw_pci->napi_dev_rx, &q->u.cq.napi,
+ mlxsw_pci_napi_poll_cq_rx);
+ break;
+ }
+
+ napi_enable(&q->u.cq.napi);
+}
+
+static void mlxsw_pci_cq_napi_teardown(struct mlxsw_pci_queue *q)
+{
+ napi_disable(&q->u.cq.napi);
+ netif_napi_del(&q->u.cq.napi);
+}
+
+static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
int i;
@@ -776,39 +853,50 @@ static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
for (i = 0; i < q->count; i++) {
char *elem = mlxsw_pci_queue_elem_get(q, i);
- mlxsw_pci_eqe_owner_set(elem, 1);
+ mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
}
- mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
- mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
- mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
+ if (q->u.cq.v == MLXSW_PCI_CQE_V1)
+ mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
+ MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
+ else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
+ mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
+ MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
+
+ mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
+ mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
+ mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
- mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
+ mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
}
- err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
+ err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
if (err)
return err;
+ mlxsw_pci_cq_napi_setup(q, mlxsw_pci_cq_type(mlxsw_pci, q));
mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
return 0;
}
-static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
+static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q)
{
- mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
+ mlxsw_pci_cq_napi_teardown(q);
+ mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
}
-static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
+static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
{
- mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
- mlxsw_pci->cmd.comp.out_param =
- ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
- mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
- mlxsw_pci->cmd.wait_done = true;
- wake_up(&mlxsw_pci->cmd.wait);
+ return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
+ MLXSW_PCI_CQE01_COUNT;
+}
+
+static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
+{
+ return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
+ MLXSW_PCI_CQE01_SIZE;
}
static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
@@ -829,54 +917,81 @@ static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
static void mlxsw_pci_eq_tasklet(struct tasklet_struct *t)
{
- struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
- struct mlxsw_pci *mlxsw_pci = q->pci;
- u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
- char *eqe;
- u8 cqn;
- bool cq_handle = false;
- int items = 0;
+ struct mlxsw_pci_queue *q = from_tasklet(q, t, u.eq.tasklet);
+ struct mlxsw_pci *mlxsw_pci = q->pci;
int credits = q->count >> 1;
+ u8 cqn, cq_count;
+ int items = 0;
+ char *eqe;
memset(&active_cqns, 0, sizeof(active_cqns));
while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
+ cqn = mlxsw_pci_eqe_cqn_get(eqe);
+ set_bit(cqn, active_cqns);
- /* Command interface completion events are always received on
- * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events
- * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1).
- */
- switch (q->num) {
- case MLXSW_PCI_EQ_ASYNC_NUM:
- mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
- q->u.eq.ev_cmd_count++;
- break;
- case MLXSW_PCI_EQ_COMP_NUM:
- cqn = mlxsw_pci_eqe_cqn_get(eqe);
- set_bit(cqn, active_cqns);
- cq_handle = true;
- q->u.eq.ev_comp_count++;
- break;
- default:
- q->u.eq.ev_other_count++;
- }
if (++items == credits)
break;
}
- if (items) {
- mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
- mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
- }
- if (!cq_handle)
+ if (!items)
return;
+
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+
+ cq_count = mlxsw_pci->num_cqs;
for_each_set_bit(cqn, active_cqns, cq_count) {
q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
- mlxsw_pci_queue_tasklet_schedule(q);
+ napi_schedule(&q->u.cq.napi);
}
}
+static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q)
+{
+ int i;
+ int err;
+
+ /* We expect to initialize only one EQ, which gets num=0 as it is
+ * located at index zero. We use the EQ as EQ1, so set the number for
+ * future use.
+ */
+ WARN_ON_ONCE(q->num);
+ q->num = MLXSW_PCI_EQ_COMP_NUM;
+
+ q->consumer_counter = 0;
+
+ for (i = 0; i < q->count; i++) {
+ char *elem = mlxsw_pci_queue_elem_get(q, i);
+
+ mlxsw_pci_eqe_owner_set(elem, 1);
+ }
+
+ mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
+ mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
+ mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
+ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+ dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+ mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
+ }
+ err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
+ if (err)
+ return err;
+ tasklet_setup(&q->u.eq.tasklet, mlxsw_pci_eq_tasklet);
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+ return 0;
+}
+
+static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
+}
+
struct mlxsw_pci_queue_ops {
const char *name;
enum mlxsw_pci_queue_type type;
@@ -886,7 +1001,6 @@ struct mlxsw_pci_queue_ops {
struct mlxsw_pci_queue *q);
void (*fini)(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q);
- void (*tasklet)(struct tasklet_struct *t);
u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
u16 elem_count;
@@ -914,7 +1028,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
.pre_init = mlxsw_pci_cq_pre_init,
.init = mlxsw_pci_cq_init,
.fini = mlxsw_pci_cq_fini,
- .tasklet = mlxsw_pci_cq_tasklet,
.elem_count_f = mlxsw_pci_cq_elem_count,
.elem_size_f = mlxsw_pci_cq_elem_size
};
@@ -923,7 +1036,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
.type = MLXSW_PCI_QUEUE_TYPE_EQ,
.init = mlxsw_pci_eq_init,
.fini = mlxsw_pci_eq_fini,
- .tasklet = mlxsw_pci_eq_tasklet,
.elem_count = MLXSW_PCI_EQE_COUNT,
.elem_size = MLXSW_PCI_EQE_SIZE
};
@@ -948,9 +1060,6 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
q->type = q_ops->type;
q->pci = mlxsw_pci;
- if (q_ops->tasklet)
- tasklet_setup(&q->tasklet, q_ops->tasklet);
-
mem_item->size = MLXSW_PCI_AQ_SIZE;
mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
mem_item->size, &mem_item->mapaddr,
@@ -1074,7 +1183,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
if (num_sdqs + num_rdqs > num_cqs ||
num_sdqs < MLXSW_PCI_SDQS_MIN ||
- num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
+ num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_MAX) {
dev_err(&pdev->dev, "Unsupported number of queues\n");
return -EINVAL;
}
@@ -1089,10 +1198,11 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
return -EINVAL;
}
- mlxsw_pci->num_sdq_cqs = num_sdqs;
+ mlxsw_pci->num_cqs = num_cqs;
+ mlxsw_pci->num_sdqs = num_sdqs;
err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
- num_eqs);
+ MLXSW_PCI_EQS_COUNT);
if (err) {
dev_err(&pdev->dev, "Failed to initialize event queues\n");
return err;
@@ -1119,8 +1229,6 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
goto err_rdqs_init;
}
- /* We have to poll in command interface until queues are initialized */
- mlxsw_pci->cmd.nopoll = true;
return 0;
err_rdqs_init:
@@ -1134,7 +1242,6 @@ err_cqs_init:
static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
{
- mlxsw_pci->cmd.nopoll = false;
mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
@@ -1432,12 +1539,9 @@ static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
{
struct mlxsw_pci *mlxsw_pci = dev_id;
struct mlxsw_pci_queue *q;
- int i;
- for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
- q = mlxsw_pci_eq_get(mlxsw_pci, i);
- mlxsw_pci_queue_tasklet_schedule(q);
- }
+ q = mlxsw_pci_eq_get(mlxsw_pci);
+ tasklet_schedule(&q->u.eq.tasklet);
return IRQ_HANDLED;
}
@@ -1530,7 +1634,7 @@ mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
char mcam_pl[MLXSW_REG_MCAM_LEN];
- bool pci_reset_supported;
+ bool pci_reset_supported = false;
u32 sys_status;
int err;
@@ -1548,11 +1652,9 @@ mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
mlxsw_reg_mcam_pack(mcam_pl,
MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
err = mlxsw_reg_query(mlxsw_pci->core, MLXSW_REG(mcam), mcam_pl);
- if (err)
- return err;
-
- mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET,
- &pci_reset_supported);
+ if (!err)
+ mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET,
+ &pci_reset_supported);
if (pci_reset_supported) {
pci_dbg(pdev, "Starting PCI reset flow\n");
@@ -1711,6 +1813,10 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_requery_resources;
+ err = mlxsw_pci_napi_devs_init(mlxsw_pci);
+ if (err)
+ goto err_napi_devs_init;
+
err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
if (err)
goto err_aqs_init;
@@ -1728,6 +1834,8 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
err_request_eq_irq:
mlxsw_pci_aqs_fini(mlxsw_pci);
err_aqs_init:
+ mlxsw_pci_napi_devs_fini(mlxsw_pci);
+err_napi_devs_init:
err_requery_resources:
err_config_profile:
err_cqe_v_check:
@@ -1755,6 +1863,7 @@ static void mlxsw_pci_fini(void *bus_priv)
free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci);
mlxsw_pci_aqs_fini(mlxsw_pci);
+ mlxsw_pci_napi_devs_fini(mlxsw_pci);
mlxsw_pci_fw_area_fini(mlxsw_pci);
mlxsw_pci_free_irq_vectors(mlxsw_pci);
}
@@ -1763,7 +1872,7 @@ static struct mlxsw_pci_queue *
mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
const struct mlxsw_tx_info *tx_info)
{
- u8 ctl_sdq_count = mlxsw_pci_sdq_count(mlxsw_pci) - 1;
+ u8 ctl_sdq_count = mlxsw_pci->num_sdqs - 1;
u8 sdqn;
if (tx_info->is_emad) {
@@ -1862,9 +1971,9 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
dma_addr_t in_mapaddr = 0, out_mapaddr = 0;
- bool evreq = mlxsw_pci->cmd.nopoll;
unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
- bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
+ unsigned long end;
+ bool wait_done;
int err;
*p_status = MLXSW_CMD_STATUS_OK;
@@ -1888,36 +1997,28 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
- *p_wait_done = false;
+ wait_done = false;
wmb(); /* all needs to be written before we write control register */
mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
MLXSW_PCI_CIR_CTRL_GO_BIT |
- (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
(opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
opcode);
- if (!evreq) {
- unsigned long end;
-
- end = jiffies + timeout;
- do {
- u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
+ end = jiffies + timeout;
+ do {
+ u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
- if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
- *p_wait_done = true;
- *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
- break;
- }
- cond_resched();
- } while (time_before(jiffies, end));
- } else {
- wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
- *p_status = mlxsw_pci->cmd.comp.status;
- }
+ if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
+ wait_done = true;
+ *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
+ break;
+ }
+ cond_resched();
+ } while (time_before(jiffies, end));
err = 0;
- if (*p_wait_done) {
+ if (wait_done) {
if (*p_status)
err = -EIO;
} else {
@@ -1931,14 +2032,12 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
*/
__be32 tmp;
- if (!evreq) {
- tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
- CIR_OUT_PARAM_HI));
- memcpy(out_mbox, &tmp, sizeof(tmp));
- tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
- CIR_OUT_PARAM_LO));
- memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
- }
+ tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+ CIR_OUT_PARAM_HI));
+ memcpy(out_mbox, &tmp, sizeof(tmp));
+ tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+ CIR_OUT_PARAM_LO));
+ memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
} else if (!err && out_mbox) {
memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
}
@@ -2017,7 +2116,6 @@ static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci)
int err;
mutex_init(&mlxsw_pci->cmd.lock);
- init_waitqueue_head(&mlxsw_pci->cmd.wait);
err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 7cdf0ce24f28..6bed495dcf0f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -42,8 +42,8 @@
((offset) + (type_offset) + (num) * 4)
#define MLXSW_PCI_CQS_MAX 96
-#define MLXSW_PCI_EQS_COUNT 2
-#define MLXSW_PCI_EQ_ASYNC_NUM 0
+#define MLXSW_PCI_EQS_MAX 2
+#define MLXSW_PCI_EQS_COUNT 1
#define MLXSW_PCI_EQ_COMP_NUM 1
#define MLXSW_PCI_SDQS_MIN 2 /* EMAD and control traffic */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 8892654c685f..8adf86a6f5cc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4786,8 +4786,11 @@ MLXSW_ITEM32(reg, ptys, an_status, 0x04, 28, 4);
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR BIT(8)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4 BIT(9)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2 BIT(10)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_1_100GBASE_CR_KR BIT(11)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4 BIT(12)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_2_200GBASE_CR2_KR2 BIT(13)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8 BIT(15)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_4_400GBASE_CR4_KR4 BIT(16)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_800GAUI_8 BIT(19)
/* reg_ptys_ext_eth_proto_cap
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index bb642e9bb6cf..030ed71f945d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -825,7 +825,7 @@ static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
if (err)
goto err_port_mtu_set;
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
return 0;
err_port_mtu_set:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index f20052776b3f..b1d08e958bf9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -10,6 +10,7 @@
#include <linux/netdevice.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
+#include <linux/idr.h>
#include <net/devlink.h>
#include <trace/events/mlxsw.h>
@@ -58,41 +59,43 @@ int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
u16 *p_id)
{
- u16 id;
+ int id;
- id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
- if (id < tcam->max_regions) {
- __set_bit(id, tcam->used_regions);
- *p_id = id;
- return 0;
- }
- return -ENOBUFS;
+ id = ida_alloc_max(&tcam->used_regions, tcam->max_regions - 1,
+ GFP_KERNEL);
+ if (id < 0)
+ return id;
+
+ *p_id = id;
+
+ return 0;
}
static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
u16 id)
{
- __clear_bit(id, tcam->used_regions);
+ ida_free(&tcam->used_regions, id);
}
static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
u16 *p_id)
{
- u16 id;
+ int id;
- id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
- if (id < tcam->max_groups) {
- __set_bit(id, tcam->used_groups);
- *p_id = id;
- return 0;
- }
- return -ENOBUFS;
+ id = ida_alloc_max(&tcam->used_groups, tcam->max_groups - 1,
+ GFP_KERNEL);
+ if (id < 0)
+ return id;
+
+ *p_id = id;
+
+ return 0;
}
static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
u16 id)
{
- __clear_bit(id, tcam->used_groups);
+ ida_free(&tcam->used_groups, id);
}
struct mlxsw_sp_acl_tcam_pattern {
@@ -715,7 +718,9 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
rehash.dw.work);
int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
+ mutex_lock(&vregion->lock);
mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
+ mutex_unlock(&vregion->lock);
if (credits < 0)
/* Rehash gone out of credits so it was interrupted.
* Schedule the work as soon as possible to continue.
@@ -726,6 +731,17 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
}
static void
+mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
+{
+ /* The entry markers are relative to the current chunk and therefore
+ * needs to be reset together with the chunk marker.
+ */
+ ctx->current_vchunk = NULL;
+ ctx->start_ventry = NULL;
+ ctx->stop_ventry = NULL;
+}
+
+static void
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
{
struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
@@ -747,7 +763,7 @@ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *v
* the current chunk pointer to make sure all chunks
* are properly migrated.
*/
- vregion->rehash.ctx.current_vchunk = NULL;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(&vregion->rehash.ctx);
}
static struct mlxsw_sp_acl_tcam_vregion *
@@ -820,10 +836,14 @@ mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
+
mutex_lock(&tcam->lock);
list_del(&vregion->tlist);
mutex_unlock(&tcam->lock);
- cancel_delayed_work_sync(&vregion->rehash.dw);
+ if (cancel_delayed_work_sync(&vregion->rehash.dw) &&
+ ctx->hints_priv)
+ ops->region_rehash_hints_put(ctx->hints_priv);
}
mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
if (vregion->region2)
@@ -1154,8 +1174,14 @@ mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_ventry *ventry,
bool *activity)
{
- return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
- ventry->entry, activity);
+ struct mlxsw_sp_acl_tcam_vregion *vregion = ventry->vchunk->vregion;
+ int err;
+
+ mutex_lock(&vregion->lock);
+ err = mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, ventry->entry,
+ activity);
+ mutex_unlock(&vregion->lock);
+ return err;
}
static int
@@ -1189,6 +1215,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_tcam_chunk *new_chunk;
+ WARN_ON(vchunk->chunk2);
+
new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
if (IS_ERR(new_chunk))
return PTR_ERR(new_chunk);
@@ -1207,7 +1235,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
{
mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
vchunk->chunk2 = NULL;
- ctx->current_vchunk = NULL;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
}
static int
@@ -1230,6 +1258,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+ if (list_empty(&vchunk->ventry_list))
+ goto out;
+
/* If the migration got interrupted, we have the ventry to start from
* stored in context.
*/
@@ -1239,6 +1270,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
ventry = list_first_entry(&vchunk->ventry_list,
typeof(*ventry), list);
+ WARN_ON(ventry->vchunk != vchunk);
+
list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
/* During rollback, once we reach the ventry that failed
* to migrate, we are done.
@@ -1279,6 +1312,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
}
}
+out:
mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
return 0;
}
@@ -1292,6 +1326,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vchunk *vchunk;
int err;
+ if (list_empty(&vregion->vchunk_list))
+ return 0;
+
/* If the migration got interrupted, we have the vchunk
* we are working on stored in context.
*/
@@ -1320,16 +1357,17 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
int err, err2;
trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
- mutex_lock(&vregion->lock);
err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits);
if (err) {
+ if (ctx->this_is_rollback)
+ return err;
/* In case migration was not successful, we need to swap
* so the original region pointer is assigned again
* to vregion->region.
*/
swap(vregion->region, vregion->region2);
- ctx->current_vchunk = NULL;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
ctx->this_is_rollback = true;
err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits);
@@ -1340,7 +1378,6 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
/* Let the rollback to be continued later on. */
}
}
- mutex_unlock(&vregion->lock);
trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
return err;
}
@@ -1389,6 +1426,7 @@ mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
ctx->hints_priv = hints_priv;
ctx->this_is_rollback = false;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
return 0;
@@ -1441,7 +1479,8 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
ctx, credits);
if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
+ return;
}
if (*credits >= 0)
@@ -1465,7 +1504,8 @@ mlxsw_sp_acl_tcam_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
static int
mlxsw_sp_acl_tcam_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
struct mlxsw_sp_acl_tcam_vregion *vregion;
@@ -1549,19 +1589,11 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
if (max_tcam_regions < max_regions)
max_regions = max_tcam_regions;
- tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL);
- if (!tcam->used_regions) {
- err = -ENOMEM;
- goto err_alloc_used_regions;
- }
+ ida_init(&tcam->used_regions);
tcam->max_regions = max_regions;
max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
- tcam->used_groups = bitmap_zalloc(max_groups, GFP_KERNEL);
- if (!tcam->used_groups) {
- err = -ENOMEM;
- goto err_alloc_used_groups;
- }
+ ida_init(&tcam->used_groups);
tcam->max_groups = max_groups;
tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_MAX_GROUP_SIZE);
@@ -1575,10 +1607,8 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_tcam_init:
- bitmap_free(tcam->used_groups);
-err_alloc_used_groups:
- bitmap_free(tcam->used_regions);
-err_alloc_used_regions:
+ ida_destroy(&tcam->used_groups);
+ ida_destroy(&tcam->used_regions);
mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
err_rehash_params_register:
mutex_destroy(&tcam->lock);
@@ -1591,8 +1621,8 @@ void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
ops->fini(mlxsw_sp, tcam->priv);
- bitmap_free(tcam->used_groups);
- bitmap_free(tcam->used_regions);
+ ida_destroy(&tcam->used_groups);
+ ida_destroy(&tcam->used_regions);
mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
mutex_destroy(&tcam->lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index 462bf448497d..79a1d8606512 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -6,15 +6,16 @@
#include <linux/list.h>
#include <linux/parman.h>
+#include <linux/idr.h>
#include "reg.h"
#include "spectrum.h"
#include "core_acl_flex_keys.h"
struct mlxsw_sp_acl_tcam {
- unsigned long *used_regions; /* bit array */
+ struct ida used_regions;
unsigned int max_regions;
- unsigned long *used_groups; /* bit array */
+ struct ida used_groups;
unsigned int max_groups;
unsigned int max_group_size;
struct mutex lock; /* guards vregion list */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 0f29e9c19411..a755b0a901d3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -1649,6 +1649,18 @@ mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = {
ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2)
static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_100gaui_1_100gbase_cr_kr[] = {
+ ETHTOOL_LINK_MODE_100000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseDR_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_100GAUI_1_100GBASE_CR_KR_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_1_100gbase_cr_kr)
+
+static const enum ethtool_link_mode_bit_indices
mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = {
ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
@@ -1661,6 +1673,18 @@ mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = {
ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4)
static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_200gaui_2_200gbase_cr2_kr2[] = {
+ ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_200GAUI_2_200GBASE_CR2_KR2_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_2_200gbase_cr2_kr2)
+
+static const enum ethtool_link_mode_bit_indices
mlxsw_sp2_mask_ethtool_400gaui_8[] = {
ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
@@ -1673,6 +1697,18 @@ mlxsw_sp2_mask_ethtool_400gaui_8[] = {
ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_8)
static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_400gaui_4_400gbase_cr4_kr4[] = {
+ ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_400GAUI_4_400GBASE_CR4_KR4_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_4_400gbase_cr4_kr4)
+
+static const enum ethtool_link_mode_bit_indices
mlxsw_sp2_mask_ethtool_800gaui_8[] = {
ETHTOOL_LINK_MODE_800000baseCR8_Full_BIT,
ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT,
@@ -1817,6 +1853,14 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.width = 2,
},
{
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_1_100GBASE_CR_KR,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_1_100gbase_cr_kr,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_1_100GBASE_CR_KR_LEN,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_1X,
+ .speed = SPEED_100000,
+ .width = 1,
+ },
+ {
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4,
.mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN,
@@ -1826,6 +1870,14 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.width = 4,
},
{
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_2_200GBASE_CR2_KR2,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_2_200gbase_cr2_kr2,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_2_200GBASE_CR2_KR2_LEN,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_2X,
+ .speed = SPEED_200000,
+ .width = 2,
+ },
+ {
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8,
.mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_8,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN,
@@ -1834,6 +1886,14 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.width = 8,
},
{
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_4_400GBASE_CR4_KR4,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_4_400gbase_cr4_kr4,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_4_400GBASE_CR4_KR4_LEN,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_4X,
+ .speed = SPEED_400000,
+ .width = 4,
+ },
+ {
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_800GAUI_8,
.mask_ethtool = mlxsw_sp2_mask_ethtool_800gaui_8,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_800GAUI_8_LEN,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 9fd1ca079258..f07955b5439f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -595,6 +595,10 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
+
+ if (flow_rule_has_control_flags(match.mask->flags,
+ f->common.extack))
+ return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 3340b4a694c3..d761a1235994 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -8,7 +8,7 @@
#include "spectrum_ipip.h"
#include "reg.h"
-struct ip_tunnel_parm
+struct ip_tunnel_parm_kern
mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev)
{
struct ip_tunnel *tun = netdev_priv(ol_dev);
@@ -24,27 +24,29 @@ mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev)
return tun->parms;
}
-static bool mlxsw_sp_ipip_parms4_has_ikey(const struct ip_tunnel_parm *parms)
+static bool
+mlxsw_sp_ipip_parms4_has_ikey(const struct ip_tunnel_parm_kern *parms)
{
- return !!(parms->i_flags & TUNNEL_KEY);
+ return test_bit(IP_TUNNEL_KEY_BIT, parms->i_flags);
}
static bool mlxsw_sp_ipip_parms6_has_ikey(const struct __ip6_tnl_parm *parms)
{
- return !!(parms->i_flags & TUNNEL_KEY);
+ return test_bit(IP_TUNNEL_KEY_BIT, parms->i_flags);
}
-static bool mlxsw_sp_ipip_parms4_has_okey(const struct ip_tunnel_parm *parms)
+static bool
+mlxsw_sp_ipip_parms4_has_okey(const struct ip_tunnel_parm_kern *parms)
{
- return !!(parms->o_flags & TUNNEL_KEY);
+ return test_bit(IP_TUNNEL_KEY_BIT, parms->o_flags);
}
static bool mlxsw_sp_ipip_parms6_has_okey(const struct __ip6_tnl_parm *parms)
{
- return !!(parms->o_flags & TUNNEL_KEY);
+ return test_bit(IP_TUNNEL_KEY_BIT, parms->o_flags);
}
-static u32 mlxsw_sp_ipip_parms4_ikey(const struct ip_tunnel_parm *parms)
+static u32 mlxsw_sp_ipip_parms4_ikey(const struct ip_tunnel_parm_kern *parms)
{
return mlxsw_sp_ipip_parms4_has_ikey(parms) ?
be32_to_cpu(parms->i_key) : 0;
@@ -56,7 +58,7 @@ static u32 mlxsw_sp_ipip_parms6_ikey(const struct __ip6_tnl_parm *parms)
be32_to_cpu(parms->i_key) : 0;
}
-static u32 mlxsw_sp_ipip_parms4_okey(const struct ip_tunnel_parm *parms)
+static u32 mlxsw_sp_ipip_parms4_okey(const struct ip_tunnel_parm_kern *parms)
{
return mlxsw_sp_ipip_parms4_has_okey(parms) ?
be32_to_cpu(parms->o_key) : 0;
@@ -69,7 +71,7 @@ static u32 mlxsw_sp_ipip_parms6_okey(const struct __ip6_tnl_parm *parms)
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms4_saddr(const struct ip_tunnel_parm *parms)
+mlxsw_sp_ipip_parms4_saddr(const struct ip_tunnel_parm_kern *parms)
{
return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.saddr };
}
@@ -81,7 +83,7 @@ mlxsw_sp_ipip_parms6_saddr(const struct __ip6_tnl_parm *parms)
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms4_daddr(const struct ip_tunnel_parm *parms)
+mlxsw_sp_ipip_parms4_daddr(const struct ip_tunnel_parm_kern *parms)
{
return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.daddr };
}
@@ -96,7 +98,7 @@ union mlxsw_sp_l3addr
mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
const struct net_device *ol_dev)
{
- struct ip_tunnel_parm parms4;
+ struct ip_tunnel_parm_kern parms4;
struct __ip6_tnl_parm parms6;
switch (proto) {
@@ -115,7 +117,9 @@ mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev)
{
- struct ip_tunnel_parm parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+ struct ip_tunnel_parm_kern parms4;
+
+ parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
return mlxsw_sp_ipip_parms4_daddr(&parms4).addr4;
}
@@ -124,7 +128,7 @@ static union mlxsw_sp_l3addr
mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto,
const struct net_device *ol_dev)
{
- struct ip_tunnel_parm parms4;
+ struct ip_tunnel_parm_kern parms4;
struct __ip6_tnl_parm parms6;
switch (proto) {
@@ -150,7 +154,7 @@ bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr)
static struct mlxsw_sp_ipip_parms
mlxsw_sp_ipip_netdev_parms_init_gre4(const struct net_device *ol_dev)
{
- struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+ struct ip_tunnel_parm_kern parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
return (struct mlxsw_sp_ipip_parms) {
.proto = MLXSW_SP_L3_PROTO_IPV4,
@@ -187,8 +191,8 @@ mlxsw_sp_ipip_decap_config_gre4(struct mlxsw_sp *mlxsw_sp,
{
u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb);
+ struct ip_tunnel_parm_kern parms;
char rtdp_pl[MLXSW_REG_RTDP_LEN];
- struct ip_tunnel_parm parms;
unsigned int type_check;
bool has_ikey;
u32 daddr4;
@@ -238,12 +242,15 @@ static bool mlxsw_sp_ipip_can_offload_gre4(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *ol_dev)
{
struct ip_tunnel *tunnel = netdev_priv(ol_dev);
- __be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */
bool inherit_ttl = tunnel->parms.iph.ttl == 0;
bool inherit_tos = tunnel->parms.iph.tos & 0x1;
+ IP_TUNNEL_DECLARE_FLAGS(okflags) = { };
+
+ /* We can't offload any other features. */
+ __set_bit(IP_TUNNEL_KEY_BIT, okflags);
- return (tunnel->parms.i_flags & ~okflags) == 0 &&
- (tunnel->parms.o_flags & ~okflags) == 0 &&
+ return ip_tunnel_flags_subset(tunnel->parms.i_flags, okflags) &&
+ ip_tunnel_flags_subset(tunnel->parms.o_flags, okflags) &&
inherit_ttl && inherit_tos &&
mlxsw_sp_ipip_tunnel_complete(MLXSW_SP_L3_PROTO_IPV4, ol_dev);
}
@@ -252,7 +259,7 @@ static struct mlxsw_sp_rif_ipip_lb_config
mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp,
const struct net_device *ol_dev)
{
- struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+ struct ip_tunnel_parm_kern parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(&parms) ?
@@ -439,10 +446,13 @@ static bool mlxsw_sp_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp,
struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(ol_dev);
bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
bool inherit_ttl = tparm.hop_limit == 0;
- __be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */
+ IP_TUNNEL_DECLARE_FLAGS(okflags) = { };
+
+ /* We can't offload any other features. */
+ __set_bit(IP_TUNNEL_KEY_BIT, okflags);
- return (tparm.i_flags & ~okflags) == 0 &&
- (tparm.o_flags & ~okflags) == 0 &&
+ return ip_tunnel_flags_subset(tparm.i_flags, okflags) &&
+ ip_tunnel_flags_subset(tparm.o_flags, okflags) &&
inherit_ttl && inherit_tos &&
mlxsw_sp_ipip_tunnel_complete(MLXSW_SP_L3_PROTO_IPV6, ol_dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
index a35f009da561..a66173779641 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
@@ -9,7 +9,7 @@
#include <linux/if_tunnel.h>
#include <net/ip6_tunnel.h>
-struct ip_tunnel_parm
+struct ip_tunnel_parm_kern
mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev);
struct __ip6_tnl_parm
mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index af50ff9e5f26..4b5fd71c897d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -413,8 +413,8 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
__be32 *saddrp, __be32 *daddrp)
{
struct ip_tunnel *tun = netdev_priv(to_dev);
+ struct ip_tunnel_parm_kern parms;
struct net_device *dev = NULL;
- struct ip_tunnel_parm parms;
struct rtable *rt = NULL;
struct flowi4 fl4;
@@ -451,7 +451,7 @@ mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp,
const struct net_device *to_dev,
struct mlxsw_sp_span_parms *sparmsp)
{
- struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
+ struct ip_tunnel_parm_kern tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr };
union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr };
bool inherit_tos = tparm.iph.tos & 0x1;
@@ -461,7 +461,8 @@ mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp,
if (!(to_dev->flags & IFF_UP) ||
/* Reject tunnels with GRE keys, checksums, etc. */
- tparm.i_flags || tparm.o_flags ||
+ !ip_tunnel_flags_empty(tparm.i_flags) ||
+ !ip_tunnel_flags_empty(tparm.o_flags) ||
/* Require a fixed TTL and a TOS copied from the mirrored packet. */
inherit_ttl || !inherit_tos ||
/* A destination address may not be "any". */
@@ -539,7 +540,7 @@ mlxsw_sp_span_gretap6_route(const struct net_device *to_dev,
if (!dst || dst->error)
goto out;
- rt6 = container_of(dst, struct rt6_info, dst);
+ rt6 = dst_rt6_info(dst);
dev = dst->dev;
*saddrp = fl6.saddr;
@@ -565,7 +566,8 @@ mlxsw_sp_span_entry_gretap6_parms(struct mlxsw_sp *mlxsw_sp,
if (!(to_dev->flags & IFF_UP) ||
/* Reject tunnels with GRE keys, checksums, etc. */
- tparm.i_flags || tparm.o_flags ||
+ !ip_tunnel_flags_empty(tparm.i_flags) ||
+ !ip_tunnel_flags_empty(tparm.o_flags) ||
/* Require a fixed TTL and a TOS copied from the mirrored packet. */
inherit_ttl || !inherit_tos ||
/* A destination address may not be "any". */
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index e5ec0a363aff..31f75b4a67fd 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -368,7 +368,6 @@ union ks8851_tx_hdr {
* @rdfifo: FIFO read callback
* @wrfifo: FIFO write callback
* @start_xmit: start_xmit() implementation callback
- * @rx_skb: rx_skb() implementation callback
* @flush_tx_work: flush_tx_work() implementation callback
*
* The @statelock is used to protect information in the structure which may
@@ -423,8 +422,6 @@ struct ks8851_net {
struct sk_buff *txp, bool irq);
netdev_tx_t (*start_xmit)(struct sk_buff *skb,
struct net_device *dev);
- void (*rx_skb)(struct ks8851_net *ks,
- struct sk_buff *skb);
void (*flush_tx_work)(struct ks8851_net *ks);
};
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 0bf13b38b8f5..6453c92f0fa7 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -232,24 +232,15 @@ static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
}
/**
- * ks8851_rx_skb - receive skbuff
- * @ks: The device state.
- * @skb: The skbuff
- */
-static void ks8851_rx_skb(struct ks8851_net *ks, struct sk_buff *skb)
-{
- ks->rx_skb(ks, skb);
-}
-
-/**
* ks8851_rx_pkts - receive packets from the host
* @ks: The device information.
+ * @rxq: Queue of packets received in this function.
*
* This is called from the IRQ work queue when the system detects that there
* are packets in the receive queue. Find out how many packets there are and
* read them from the FIFO.
*/
-static void ks8851_rx_pkts(struct ks8851_net *ks)
+static void ks8851_rx_pkts(struct ks8851_net *ks, struct sk_buff_head *rxq)
{
struct sk_buff *skb;
unsigned rxfc;
@@ -309,7 +300,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
ks8851_dbg_dumpkkt(ks, rxpkt);
skb->protocol = eth_type_trans(skb, ks->netdev);
- ks8851_rx_skb(ks, skb);
+ __skb_queue_tail(rxq, skb);
ks->netdev->stats.rx_packets++;
ks->netdev->stats.rx_bytes += rxlen;
@@ -336,31 +327,25 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
static irqreturn_t ks8851_irq(int irq, void *_ks)
{
struct ks8851_net *ks = _ks;
- unsigned handled = 0;
+ struct sk_buff_head rxq;
unsigned long flags;
unsigned int status;
+ struct sk_buff *skb;
ks8851_lock(ks, &flags);
status = ks8851_rdreg16(ks, KS_ISR);
+ ks8851_wrreg16(ks, KS_ISR, status);
netif_dbg(ks, intr, ks->netdev,
"%s: status 0x%04x\n", __func__, status);
- if (status & IRQ_LCI)
- handled |= IRQ_LCI;
-
if (status & IRQ_LDI) {
u16 pmecr = ks8851_rdreg16(ks, KS_PMECR);
pmecr &= ~PMECR_WKEVT_MASK;
ks8851_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
-
- handled |= IRQ_LDI;
}
- if (status & IRQ_RXPSI)
- handled |= IRQ_RXPSI;
-
if (status & IRQ_TXI) {
unsigned short tx_space = ks8851_rdreg16(ks, KS_TXMIR);
@@ -372,27 +357,20 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
if (netif_queue_stopped(ks->netdev))
netif_wake_queue(ks->netdev);
spin_unlock(&ks->statelock);
-
- handled |= IRQ_TXI;
}
- if (status & IRQ_RXI)
- handled |= IRQ_RXI;
-
if (status & IRQ_SPIBEI) {
netdev_err(ks->netdev, "%s: spi bus error\n", __func__);
- handled |= IRQ_SPIBEI;
}
- ks8851_wrreg16(ks, KS_ISR, handled);
-
if (status & IRQ_RXI) {
/* the datasheet says to disable the rx interrupt during
* packet read-out, however we're masking the interrupt
* from the device so do not bother masking just the RX
* from the device. */
- ks8851_rx_pkts(ks);
+ __skb_queue_head_init(&rxq);
+ ks8851_rx_pkts(ks, &rxq);
}
/* if something stopped the rx process, probably due to wanting
@@ -416,6 +394,10 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
if (status & IRQ_LCI)
mii_check_link(&ks->mii);
+ if (status & IRQ_RXI)
+ while ((skb = __skb_dequeue(&rxq)))
+ netif_rx(skb);
+
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
index 2a7f29854267..381b9cd285eb 100644
--- a/drivers/net/ethernet/micrel/ks8851_par.c
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -210,16 +210,6 @@ static void ks8851_wrfifo_par(struct ks8851_net *ks, struct sk_buff *txp,
iowrite16_rep(ksp->hw_addr, txp->data, len / 2);
}
-/**
- * ks8851_rx_skb_par - receive skbuff
- * @ks: The device state.
- * @skb: The skbuff
- */
-static void ks8851_rx_skb_par(struct ks8851_net *ks, struct sk_buff *skb)
-{
- netif_rx(skb);
-}
-
static unsigned int ks8851_rdreg16_par_txqcr(struct ks8851_net *ks)
{
return ks8851_rdreg16_par(ks, KS_TXQCR);
@@ -298,7 +288,6 @@ static int ks8851_probe_par(struct platform_device *pdev)
ks->rdfifo = ks8851_rdfifo_par;
ks->wrfifo = ks8851_wrfifo_par;
ks->start_xmit = ks8851_start_xmit_par;
- ks->rx_skb = ks8851_rx_skb_par;
#define STD_IRQ (IRQ_LCI | /* Link Change */ \
IRQ_RXI | /* RX done */ \
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 2f803377c9f9..670c1de966db 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -299,16 +299,6 @@ static unsigned int calc_txlen(unsigned int len)
}
/**
- * ks8851_rx_skb_spi - receive skbuff
- * @ks: The device state
- * @skb: The skbuff
- */
-static void ks8851_rx_skb_spi(struct ks8851_net *ks, struct sk_buff *skb)
-{
- netif_rx(skb);
-}
-
-/**
* ks8851_tx_work - process tx packet(s)
* @work: The work strucutre what was scheduled.
*
@@ -435,7 +425,6 @@ static int ks8851_probe_spi(struct spi_device *spi)
ks->rdfifo = ks8851_rdfifo_spi;
ks->wrfifo = ks8851_wrfifo_spi;
ks->start_xmit = ks8851_start_xmit_spi;
- ks->rx_skb = ks8851_rx_skb_spi;
ks->flush_tx_work = ks8851_flush_tx_work_spi;
#define STD_IRQ (IRQ_LCI | /* Link Change */ \
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index c5aeeb964c17..dc1d9f774565 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -5427,7 +5427,7 @@ static int netdev_change_mtu(struct net_device *dev, int new_mtu)
}
hw_mtu = (hw_mtu + 3) & ~3;
hw_priv->mtu = hw_mtu;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
index 443128adbcb6..3885d6fbace1 100644
--- a/drivers/net/ethernet/microchip/encx24j600-regmap.c
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -75,7 +75,7 @@ static int regmap_encx24j600_sfr_read(void *context, u8 reg, u8 *val,
if (unlikely(ret))
return ret;
} else {
- /* Translate registers that are more effecient using
+ /* Translate registers that are more efficient using
* 3-byte SPI commands
*/
switch (reg) {
@@ -129,7 +129,7 @@ static int regmap_encx24j600_sfr_update(struct encx24j600_context *ctx,
if (unlikely(ret))
return ret;
} else {
- /* Translate registers that are more effecient using
+ /* Translate registers that are more efficient using
* 3-byte SPI commands
*/
switch (reg) {
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index d7c8aa77ec75..b011bf5c2305 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -569,7 +569,7 @@ static void encx24j600_dump_config(struct encx24j600_priv *priv,
pr_info(DRV_NAME " MABBIPG: %04X\n", encx24j600_read_reg(priv,
MABBIPG));
- /* PHY configuation */
+ /* PHY configuration */
pr_info(DRV_NAME " PHCON1: %04X\n", encx24j600_read_phy(priv, PHCON1));
pr_info(DRV_NAME " PHCON2: %04X\n", encx24j600_read_phy(priv, PHCON2));
pr_info(DRV_NAME " PHANA: %04X\n", encx24j600_read_phy(priv, PHANA));
@@ -837,7 +837,9 @@ static void encx24j600_hw_tx(struct encx24j600_priv *priv)
dump_packet("TX", priv->tx_skb->len, priv->tx_skb->data);
if (encx24j600_read_reg(priv, EIR) & TXABTIF)
- /* Last transmition aborted due to error. Reset TX interface */
+ /* Last transmission aborted due to error.
+ * Reset TX interface
+ */
encx24j600_reset_hw_tx(priv);
/* Clear the TXIF flag if were previously set */
@@ -1112,7 +1114,6 @@ MODULE_DEVICE_TABLE(spi, encx24j600_spi_id_table);
static struct spi_driver encx24j600_spi_net_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.bus = &spi_bus_type,
},
.probe = encx24j600_spi_probe,
diff --git a/drivers/net/ethernet/microchip/encx24j600_hw.h b/drivers/net/ethernet/microchip/encx24j600_hw.h
index 34c5a289898c..2522f4f48b67 100644
--- a/drivers/net/ethernet/microchip/encx24j600_hw.h
+++ b/drivers/net/ethernet/microchip/encx24j600_hw.h
@@ -243,7 +243,7 @@ int devm_regmap_init_encx24j600(struct device *dev,
/* MAIPG */
/* value of the high byte is given by the reserved bits,
- * value of the low byte is recomended setting of the
+ * value of the low byte is recommended setting of the
* IPG parameter.
*/
#define MAIPGH_VAL 0x0C
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 8a6ae171e375..d0f4ff4ee075 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -1076,15 +1076,10 @@ static int lan743x_ethtool_get_eee(struct net_device *netdev,
buf = lan743x_csr_read(adapter, MAC_CR);
if (buf & MAC_CR_EEE_EN_) {
- eee->eee_enabled = true;
- eee->tx_lpi_enabled = true;
/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
buf = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
eee->tx_lpi_timer = buf;
} else {
- eee->eee_enabled = false;
- eee->eee_active = false;
- eee->tx_lpi_enabled = false;
eee->tx_lpi_timer = 0;
}
@@ -1097,7 +1092,6 @@ static int lan743x_ethtool_set_eee(struct net_device *netdev,
struct lan743x_adapter *adapter;
struct phy_device *phydev;
u32 buf = 0;
- int ret = 0;
if (!netdev)
return -EINVAL;
@@ -1114,23 +1108,8 @@ static int lan743x_ethtool_set_eee(struct net_device *netdev,
}
if (eee->eee_enabled) {
- ret = phy_init_eee(phydev, false);
- if (ret) {
- netif_err(adapter, drv, adapter->netdev,
- "EEE initialization failed\n");
- return ret;
- }
-
buf = (u32)eee->tx_lpi_timer;
lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, buf);
-
- buf = lan743x_csr_read(adapter, MAC_CR);
- buf |= MAC_CR_EEE_EN_;
- lan743x_csr_write(adapter, MAC_CR, buf);
- } else {
- buf = lan743x_csr_read(adapter, MAC_CR);
- buf &= ~MAC_CR_EEE_EN_;
- lan743x_csr_write(adapter, MAC_CR, buf);
}
return phy_ethtool_set_eee(phydev, eee);
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index bd8aa83b47e5..6be8a43c908a 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -25,6 +25,8 @@
#define PCS_POWER_STATE_DOWN 0x6
#define PCS_POWER_STATE_UP 0x4
+#define RFE_RD_FIFO_TH_3_DWORDS 0x3
+
static void pci11x1x_strap_get_status(struct lan743x_adapter *adapter)
{
u32 chip_rev;
@@ -801,7 +803,7 @@ static int lan743x_mdiobus_read_c22(struct mii_bus *bus, int phy_id, int index)
u32 val, mii_access;
int ret;
- /* comfirm MII not busy */
+ /* confirm MII not busy */
ret = lan743x_mac_mii_wait_till_not_busy(adapter);
if (ret < 0)
return ret;
@@ -866,7 +868,7 @@ static int lan743x_mdiobus_read_c45(struct mii_bus *bus, int phy_id,
u32 mmd_access;
int ret;
- /* comfirm MII not busy */
+ /* confirm MII not busy */
ret = lan743x_mac_mii_wait_till_not_busy(adapter);
if (ret < 0)
return ret;
@@ -1460,6 +1462,13 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
phydev->interface == PHY_INTERFACE_MODE_1000BASEX ||
phydev->interface == PHY_INTERFACE_MODE_2500BASEX)
lan743x_sgmii_config(adapter);
+
+ data = lan743x_csr_read(adapter, MAC_CR);
+ if (phydev->enable_tx_lpi)
+ data |= MAC_CR_EEE_EN_;
+ else
+ data &= ~MAC_CR_EEE_EN_;
+ lan743x_csr_write(adapter, MAC_CR, data);
}
}
@@ -3175,7 +3184,7 @@ static int lan743x_netdev_change_mtu(struct net_device *netdev, int new_mtu)
ret = lan743x_mac_set_mtu(adapter, new_mtu);
if (!ret)
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
return ret;
}
@@ -3272,6 +3281,21 @@ static void lan743x_full_cleanup(struct lan743x_adapter *adapter)
lan743x_pci_cleanup(adapter);
}
+static void pci11x1x_set_rfe_rd_fifo_threshold(struct lan743x_adapter *adapter)
+{
+ u16 rev = adapter->csr.id_rev & ID_REV_CHIP_REV_MASK_;
+
+ if (rev == ID_REV_CHIP_REV_PCI11X1X_B0_) {
+ u32 misc_ctl;
+
+ misc_ctl = lan743x_csr_read(adapter, MISC_CTL_0);
+ misc_ctl &= ~MISC_CTL_0_RFE_READ_FIFO_MASK_;
+ misc_ctl |= FIELD_PREP(MISC_CTL_0_RFE_READ_FIFO_MASK_,
+ RFE_RD_FIFO_TH_3_DWORDS);
+ lan743x_csr_write(adapter, MISC_CTL_0, misc_ctl);
+ }
+}
+
static int lan743x_hardware_init(struct lan743x_adapter *adapter,
struct pci_dev *pdev)
{
@@ -3287,6 +3311,7 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
pci11x1x_strap_get_status(adapter);
spin_lock_init(&adapter->eth_syslock_spinlock);
mutex_init(&adapter->sgmii_rw_lock);
+ pci11x1x_set_rfe_rd_fifo_threshold(adapter);
} else {
adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index be79cb0ae5af..645bc048e52e 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -26,6 +26,7 @@
#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF)
#define ID_REV_CHIP_REV_A0_ (0x00000000)
#define ID_REV_CHIP_REV_B0_ (0x00000010)
+#define ID_REV_CHIP_REV_PCI11X1X_B0_ (0x000000B0)
#define FPGA_REV (0x04)
#define FPGA_REV_GET_MINOR_(fpga_rev) (((fpga_rev) >> 8) & 0x000000FF)
@@ -311,6 +312,9 @@
#define SGMII_CTL_LINK_STATUS_SOURCE_ BIT(8)
#define SGMII_CTL_SGMII_POWER_DN_ BIT(1)
+#define MISC_CTL_0 (0x920)
+#define MISC_CTL_0_RFE_READ_FIFO_MASK_ GENMASK(6, 4)
+
/* Vendor Specific SGMII MMD details */
#define SR_VSMMD_PCS_ID1 0x0004
#define SR_VSMMD_PCS_ID2 0x0005
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 2801f08bf1c9..dcea6652d56d 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -58,7 +58,7 @@ int lan743x_gpio_init(struct lan743x_adapter *adapter)
static void lan743x_ptp_wait_till_cmd_done(struct lan743x_adapter *adapter,
u32 bit_mask)
{
- int timeout = 1000;
+ int timeout = PTP_CMD_CTL_TIMEOUT_CNT;
u32 data = 0;
while (timeout &&
@@ -555,7 +555,7 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
if (half == wf_high) {
/* It's 50% match. Use the toggle option */
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_TOGGLE_;
- /* In this case, devide period value by 2 */
+ /* In this case, divide period value by 2 */
ts_period = ns_to_timespec64(div_s64(period64, 2));
period_sec = ts_period.tv_sec;
period_nsec = ts_period.tv_nsec;
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h
index e26d4eff7133..0d29914cd460 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.h
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.h
@@ -21,6 +21,7 @@
#define LAN743X_PTP_N_EXTTS 4
#define LAN743X_PTP_N_PPS 0
#define PCI11X1X_PTP_IO_MAX_CHANNELS 8
+#define PTP_CMD_CTL_TIMEOUT_CNT 50
struct lan743x_adapter;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h
index f3b1e0d31826..e706163ce9cc 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h
@@ -78,7 +78,7 @@
/* Classified internal priority for queuing */
#define IFH_POS_QOS_CLASS 100
-/* Bit mask with eight cpu copy classses */
+/* Bit mask with eight cpu copy classes */
#define IFH_POS_CPUQ 92
/* Relearn + learn flags (*) */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 2635ef8958c8..a3b919a3ce07 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -276,7 +276,7 @@ static int lan966x_port_ifh_xmit(struct sk_buff *skb,
++i;
}
- /* Inidcate EOF and valid bytes in the last word */
+ /* Indicate EOF and valid bytes in the last word */
lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
QS_INJ_CTRL_VLD_BYTES_SET(skb->len < LAN966X_BUFFER_MIN_SZ ?
0 : last) |
@@ -402,7 +402,7 @@ static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu)
lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(LAN966X_HW_MTU(new_mtu)),
lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (!lan966x->fdma)
return 0;
@@ -520,7 +520,7 @@ bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb)
u32 val;
/* The IGMP and MLD frames are not forward by the HW if
- * multicast snooping is enabled, therefor don't mark as
+ * multicast snooping is enabled, therefore don't mark as
* offload to allow the SW to forward the frames accordingly.
*/
val = lan_rd(lan966x, ANA_CPU_FWD_CFG(port));
@@ -1087,8 +1087,6 @@ static int lan966x_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, lan966x);
lan966x->dev = &pdev->dev;
- lan966x->debugfs_root = debugfs_create_dir("lan966x", NULL);
-
if (!device_get_mac_address(&pdev->dev, mac_addr)) {
ether_addr_copy(lan966x->base_mac, mac_addr);
} else {
@@ -1179,6 +1177,8 @@ static int lan966x_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, -ENODEV,
"no ethernet-ports child found\n");
+ lan966x->debugfs_root = debugfs_create_dir("lan966x", NULL);
+
/* init switch */
lan966x_init(lan966x);
lan966x_stats_init(lan966x);
@@ -1257,6 +1257,8 @@ cleanup_ports:
destroy_workqueue(lan966x->stats_queue);
mutex_destroy(&lan966x->stats_lock);
+ debugfs_remove_recursive(lan966x->debugfs_root);
+
return err;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index caa9e0533c96..f8bebbcf77b2 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -326,7 +326,7 @@ struct lan966x {
u8 base_mac[ETH_ALEN];
- spinlock_t tx_lock; /* lock for frame transmition */
+ spinlock_t tx_lock; /* lock for frame transmission */
struct net_device *bridge;
u16 bridge_mask;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
index 2e83bbb9477e..fdfa4040d9ee 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
@@ -88,7 +88,7 @@ static void lan966x_port_link_down(struct lan966x_port *port)
SYS_FRONT_PORT_MODE_HDX_MODE,
lan966x, SYS_FRONT_PORT_MODE(port->chip_port));
- /* 8: Flush the queues accociated with the port */
+ /* 8: Flush the queues associated with the port */
lan_rmw(QSYS_SW_PORT_MODE_AGING_MODE_SET(3),
QSYS_SW_PORT_MODE_AGING_MODE,
lan966x, QSYS_SW_PORT_MODE(port->chip_port));
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
index d696cf9dbd19..43913d6204e1 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
@@ -45,6 +45,7 @@ static bool lan966x_tc_is_known_etype(struct vcap_tc_flower_parse_usage *st,
static int
lan966x_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
{
+ struct netlink_ext_ack *extack = st->fco->common.extack;
struct flow_match_control match;
int err = 0;
@@ -59,7 +60,7 @@ lan966x_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
VCAP_KF_L3_FRAGMENT,
VCAP_BIT_0);
if (err)
- goto out;
+ goto bad_frag_out;
}
if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
@@ -72,15 +73,20 @@ lan966x_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
VCAP_KF_L3_FRAG_OFS_GT0,
VCAP_BIT_1);
if (err)
- goto out;
+ goto bad_frag_out;
}
+ if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT |
+ FLOW_DIS_FIRST_FRAG,
+ match.mask->flags, extack))
+ return -EOPNOTSUPP;
+
st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL);
return err;
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
+bad_frag_out:
+ NL_SET_ERR_MSG_MOD(extack, "ip_frag parse error");
return err;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
index 3c44660128da..fa34a739c748 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
@@ -157,7 +157,7 @@ void lan966x_vlan_port_apply(struct lan966x_port *port)
pvid = lan966x_vlan_port_get_pvid(port);
- /* Ingress clasification (ANA_PORT_VLAN_CFG) */
+ /* Ingress classification (ANA_PORT_VLAN_CFG) */
/* Default vlan to classify for untagged frames (may be zero) */
val = ANA_VLAN_CFG_VLAN_VID_SET(pvid);
if (port->vlan_aware)
diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile
index 1cb1cc3f1a85..b68fe9c9a656 100644
--- a/drivers/net/ethernet/microchip/sparx5/Makefile
+++ b/drivers/net/ethernet/microchip/sparx5/Makefile
@@ -10,7 +10,8 @@ sparx5-switch-y := sparx5_main.o sparx5_packet.o \
sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o \
sparx5_ptp.o sparx5_pgid.o sparx5_tc.o sparx5_qos.o \
sparx5_vcap_impl.o sparx5_vcap_ag_api.o sparx5_tc_flower.o \
- sparx5_tc_matchall.o sparx5_pool.o sparx5_sdlb.o sparx5_police.o sparx5_psfp.o
+ sparx5_tc_matchall.o sparx5_pool.o sparx5_sdlb.o sparx5_police.o \
+ sparx5_psfp.o sparx5_mirror.o
sparx5-switch-$(CONFIG_SPARX5_DCB) += sparx5_dcb.o
sparx5-switch-$(CONFIG_DEBUG_FS) += sparx5_vcap_debugfs.o
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
index 141897dfe388..1915998f6079 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
@@ -143,7 +143,7 @@ static void sparx5_fdma_rx_activate(struct sparx5 *sparx5, struct sparx5_rx *rx)
static void sparx5_fdma_rx_deactivate(struct sparx5 *sparx5, struct sparx5_rx *rx)
{
- /* Dectivate the RX channel */
+ /* Deactivate the RX channel */
spx5_rmw(0, BIT(rx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE,
sparx5, FDMA_CH_ACTIVATE);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index 3c066b62e689..b64c814eac11 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -899,6 +899,9 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
dev_err(sparx5->dev, "PTP failed\n");
goto cleanup_ports;
}
+
+ INIT_LIST_HEAD(&sparx5->mall_entries);
+
goto cleanup_config;
cleanup_ports:
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index 316fed5f2735..1982ae03b4fe 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -18,6 +18,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/hrtimer.h>
#include <linux/debugfs.h>
+#include <net/flow_offload.h>
#include "sparx5_main_regs.h"
@@ -173,6 +174,7 @@ struct sparx5_port {
struct phylink_config phylink_config;
struct phylink *phylink;
struct phylink_pcs phylink_pcs;
+ struct flow_stats mirror_stats;
u16 portno;
/* Ingress default VLAN (pvid) */
u16 pvid;
@@ -227,6 +229,22 @@ struct sparx5_mdb_entry {
u16 pgid_idx;
};
+struct sparx5_mall_mirror_entry {
+ u32 idx;
+ struct sparx5_port *port;
+};
+
+struct sparx5_mall_entry {
+ struct list_head list;
+ struct sparx5_port *port;
+ unsigned long cookie;
+ enum flow_action_id type;
+ bool ingress;
+ union {
+ struct sparx5_mall_mirror_entry mirror;
+ };
+};
+
#define SPARX5_PTP_TIMEOUT msecs_to_jiffies(10)
#define SPARX5_SKB_CB(skb) \
((struct sparx5_skb_cb *)((skb)->cb))
@@ -295,6 +313,7 @@ struct sparx5 {
struct vcap_control *vcap_ctrl;
/* PGID allocation map */
u8 pgid_map[PGID_TABLE_SIZE];
+ struct list_head mall_entries;
/* Common root for debugfs */
struct dentry *debugfs_root;
};
@@ -541,6 +560,12 @@ void sparx5_psfp_init(struct sparx5 *sparx5);
void sparx5_new_base_time(struct sparx5 *sparx5, const u32 cycle_time,
const ktime_t org_base_time, ktime_t *new_base_time);
+/* sparx5_mirror.c */
+int sparx5_mirror_add(struct sparx5_mall_entry *entry);
+void sparx5_mirror_del(struct sparx5_mall_entry *entry);
+void sparx5_mirror_stats(struct sparx5_mall_entry *entry,
+ struct flow_stats *fstats);
+
/* Clock period in picoseconds */
static inline u32 sparx5_clk_period(enum sparx5_core_clockfreq cclock)
{
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
index bd03a0a3c1da..22acc1f3380c 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
@@ -83,6 +83,64 @@ enum sparx5_target {
#define ANA_AC_OWN_UPSID_OWN_UPSID_GET(x)\
FIELD_GET(ANA_AC_OWN_UPSID_OWN_UPSID, x)
+/* ANA_AC:MIRROR_PROBE:PROBE_CFG */
+#define ANA_AC_PROBE_CFG(g) \
+ __REG(TARGET_ANA_AC, 0, 1, 893696, g, 3, 32, 0, 0, 1, 4)
+
+#define ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD GENMASK(31, 27)
+#define ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD_SET(x)\
+ FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD, x)
+#define ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD_GET(x)\
+ FIELD_GET(ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD, x)
+
+#define ANA_AC_PROBE_CFG_PROBE_CPU_SET GENMASK(26, 19)
+#define ANA_AC_PROBE_CFG_PROBE_CPU_SET_SET(x)\
+ FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_CPU_SET, x)
+#define ANA_AC_PROBE_CFG_PROBE_CPU_SET_GET(x)\
+ FIELD_GET(ANA_AC_PROBE_CFG_PROBE_CPU_SET, x)
+
+#define ANA_AC_PROBE_CFG_PROBE_VID GENMASK(18, 6)
+#define ANA_AC_PROBE_CFG_PROBE_VID_SET(x)\
+ FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_VID, x)
+#define ANA_AC_PROBE_CFG_PROBE_VID_GET(x)\
+ FIELD_GET(ANA_AC_PROBE_CFG_PROBE_VID, x)
+
+#define ANA_AC_PROBE_CFG_PROBE_VLAN_MODE GENMASK(5, 4)
+#define ANA_AC_PROBE_CFG_PROBE_VLAN_MODE_SET(x)\
+ FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_VLAN_MODE, x)
+#define ANA_AC_PROBE_CFG_PROBE_VLAN_MODE_GET(x)\
+ FIELD_GET(ANA_AC_PROBE_CFG_PROBE_VLAN_MODE, x)
+
+#define ANA_AC_PROBE_CFG_PROBE_MAC_MODE GENMASK(3, 2)
+#define ANA_AC_PROBE_CFG_PROBE_MAC_MODE_SET(x)\
+ FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_MAC_MODE, x)
+#define ANA_AC_PROBE_CFG_PROBE_MAC_MODE_GET(x)\
+ FIELD_GET(ANA_AC_PROBE_CFG_PROBE_MAC_MODE, x)
+
+#define ANA_AC_PROBE_CFG_PROBE_DIRECTION GENMASK(1, 0)
+#define ANA_AC_PROBE_CFG_PROBE_DIRECTION_SET(x)\
+ FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_DIRECTION, x)
+#define ANA_AC_PROBE_CFG_PROBE_DIRECTION_GET(x)\
+ FIELD_GET(ANA_AC_PROBE_CFG_PROBE_DIRECTION, x)
+
+/* ANA_AC:MIRROR_PROBE:PROBE_PORT_CFG */
+#define ANA_AC_PROBE_PORT_CFG(g) \
+ __REG(TARGET_ANA_AC, 0, 1, 893696, g, 3, 32, 8, 0, 1, 4)
+
+/* ANA_AC:MIRROR_PROBE:PROBE_PORT_CFG1 */
+#define ANA_AC_PROBE_PORT_CFG1(g) \
+ __REG(TARGET_ANA_AC, 0, 1, 893696, g, 3, 32, 12, 0, 1, 4)
+
+/* ANA_AC:MIRROR_PROBE:PROBE_PORT_CFG2 */
+#define ANA_AC_PROBE_PORT_CFG2(g) \
+ __REG(TARGET_ANA_AC, 0, 1, 893696, g, 3, 32, 16, 0, 1, 4)
+
+#define ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2 BIT(0)
+#define ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2_SET(x)\
+ FIELD_PREP(ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2, x)
+#define ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2_GET(x)\
+ FIELD_GET(ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2, x)
+
/* ANA_AC:SRC:SRC_CFG */
#define ANA_AC_SRC_CFG(g) __REG(TARGET_ANA_AC,\
0, 1, 849920, g, 102, 16, 0, 0, 1, 4)
@@ -6203,6 +6261,16 @@ enum sparx5_target {
#define QFWD_SWITCH_PORT_MODE_LEARNALL_MORE_GET(x)\
FIELD_GET(QFWD_SWITCH_PORT_MODE_LEARNALL_MORE, x)
+/* QFWD:SYSTEM:FRAME_COPY_CFG */
+#define QFWD_FRAME_COPY_CFG(r)\
+ __REG(TARGET_QFWD, 0, 1, 0, 0, 1, 340, 284, r, 12, 4)
+
+#define QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL GENMASK(12, 6)
+#define QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL_SET(x)\
+ FIELD_PREP(QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL, x)
+#define QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL_GET(x)\
+ FIELD_GET(QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL, x)
+
/* QRES:RES_CTRL:RES_CFG */
#define QRES_RES_CFG(g) __REG(TARGET_QRES,\
0, 1, 0, g, 5120, 16, 0, 0, 1, 4)
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c
new file mode 100644
index 000000000000..15db423be4aa
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2024 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main.h"
+#include "sparx5_main_regs.h"
+#include "sparx5_tc.h"
+
+#define SPX5_MIRROR_PROBE_MAX 3
+#define SPX5_MIRROR_DISABLED 0
+#define SPX5_MIRROR_EGRESS 1
+#define SPX5_MIRROR_INGRESS 2
+#define SPX5_MIRROR_MONITOR_PORT_DEFAULT 65
+#define SPX5_QFWD_MP_OFFSET 9 /* Mirror port offset in the QFWD register */
+
+/* Convert from bool ingress/egress to mirror direction */
+static u32 sparx5_mirror_to_dir(bool ingress)
+{
+ return ingress ? SPX5_MIRROR_INGRESS : SPX5_MIRROR_EGRESS;
+}
+
+/* Get ports belonging to this mirror */
+static u64 sparx5_mirror_port_get(struct sparx5 *sparx5, u32 idx)
+{
+ return (u64)spx5_rd(sparx5, ANA_AC_PROBE_PORT_CFG1(idx)) << 32 |
+ spx5_rd(sparx5, ANA_AC_PROBE_PORT_CFG(idx));
+}
+
+/* Add port to mirror (only front ports) */
+static void sparx5_mirror_port_add(struct sparx5 *sparx5, u32 idx, u32 portno)
+{
+ u32 val, reg = portno;
+
+ reg = portno / BITS_PER_BYTE;
+ val = BIT(portno % BITS_PER_BYTE);
+
+ if (reg == 0)
+ return spx5_rmw(val, val, sparx5, ANA_AC_PROBE_PORT_CFG(idx));
+ else
+ return spx5_rmw(val, val, sparx5, ANA_AC_PROBE_PORT_CFG1(idx));
+}
+
+/* Delete port from mirror (only front ports) */
+static void sparx5_mirror_port_del(struct sparx5 *sparx5, u32 idx, u32 portno)
+{
+ u32 val, reg = portno;
+
+ reg = portno / BITS_PER_BYTE;
+ val = BIT(portno % BITS_PER_BYTE);
+
+ if (reg == 0)
+ return spx5_rmw(0, val, sparx5, ANA_AC_PROBE_PORT_CFG(idx));
+ else
+ return spx5_rmw(0, val, sparx5, ANA_AC_PROBE_PORT_CFG1(idx));
+}
+
+/* Check if mirror contains port */
+static bool sparx5_mirror_contains(struct sparx5 *sparx5, u32 idx, u32 portno)
+{
+ return (sparx5_mirror_port_get(sparx5, idx) & BIT_ULL(portno)) != 0;
+}
+
+/* Check if mirror is empty */
+static bool sparx5_mirror_is_empty(struct sparx5 *sparx5, u32 idx)
+{
+ return sparx5_mirror_port_get(sparx5, idx) == 0;
+}
+
+/* Get direction of mirror */
+static u32 sparx5_mirror_dir_get(struct sparx5 *sparx5, u32 idx)
+{
+ u32 val = spx5_rd(sparx5, ANA_AC_PROBE_CFG(idx));
+
+ return ANA_AC_PROBE_CFG_PROBE_DIRECTION_GET(val);
+}
+
+/* Set direction of mirror */
+static void sparx5_mirror_dir_set(struct sparx5 *sparx5, u32 idx, u32 dir)
+{
+ spx5_rmw(ANA_AC_PROBE_CFG_PROBE_DIRECTION_SET(dir),
+ ANA_AC_PROBE_CFG_PROBE_DIRECTION, sparx5,
+ ANA_AC_PROBE_CFG(idx));
+}
+
+/* Set the monitor port for this mirror */
+static void sparx5_mirror_monitor_set(struct sparx5 *sparx5, u32 idx,
+ u32 portno)
+{
+ spx5_rmw(QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL_SET(portno),
+ QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL, sparx5,
+ QFWD_FRAME_COPY_CFG(idx + SPX5_QFWD_MP_OFFSET));
+}
+
+/* Get the monitor port of this mirror */
+static u32 sparx5_mirror_monitor_get(struct sparx5 *sparx5, u32 idx)
+{
+ u32 val = spx5_rd(sparx5,
+ QFWD_FRAME_COPY_CFG(idx + SPX5_QFWD_MP_OFFSET));
+
+ return QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL_GET(val);
+}
+
+/* Check if port is the monitor port of this mirror */
+static bool sparx5_mirror_has_monitor(struct sparx5 *sparx5, u32 idx,
+ u32 portno)
+{
+ return sparx5_mirror_monitor_get(sparx5, idx) == portno;
+}
+
+/* Get a suitable mirror for this port */
+static int sparx5_mirror_get(struct sparx5_port *sport,
+ struct sparx5_port *mport, u32 dir, u32 *idx)
+{
+ struct sparx5 *sparx5 = sport->sparx5;
+ u32 i;
+
+ /* Check if this port is already used as a monitor port */
+ for (i = 0; i < SPX5_MIRROR_PROBE_MAX; i++)
+ if (sparx5_mirror_has_monitor(sparx5, i, sport->portno))
+ return -EINVAL;
+
+ /* Check if existing mirror can be reused
+ * (same direction and monitor port).
+ */
+ for (i = 0; i < SPX5_MIRROR_PROBE_MAX; i++) {
+ if (sparx5_mirror_dir_get(sparx5, i) == dir &&
+ sparx5_mirror_has_monitor(sparx5, i, mport->portno)) {
+ *idx = i;
+ return 0;
+ }
+ }
+
+ /* Return free mirror */
+ for (i = 0; i < SPX5_MIRROR_PROBE_MAX; i++) {
+ if (sparx5_mirror_is_empty(sparx5, i)) {
+ *idx = i;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int sparx5_mirror_add(struct sparx5_mall_entry *entry)
+{
+ u32 mirror_idx, dir = sparx5_mirror_to_dir(entry->ingress);
+ struct sparx5_port *sport, *mport;
+ struct sparx5 *sparx5;
+ int err;
+
+ /* Source port */
+ sport = entry->port;
+ /* monitor port */
+ mport = entry->mirror.port;
+ sparx5 = sport->sparx5;
+
+ if (sport->portno == mport->portno)
+ return -EINVAL;
+
+ err = sparx5_mirror_get(sport, mport, dir, &mirror_idx);
+ if (err)
+ return err;
+
+ if (sparx5_mirror_contains(sparx5, mirror_idx, sport->portno))
+ return -EEXIST;
+
+ /* Add port to mirror */
+ sparx5_mirror_port_add(sparx5, mirror_idx, sport->portno);
+
+ /* Set direction of mirror */
+ sparx5_mirror_dir_set(sparx5, mirror_idx, dir);
+
+ /* Set monitor port for mirror */
+ sparx5_mirror_monitor_set(sparx5, mirror_idx, mport->portno);
+
+ entry->mirror.idx = mirror_idx;
+
+ return 0;
+}
+
+void sparx5_mirror_del(struct sparx5_mall_entry *entry)
+{
+ struct sparx5_port *port = entry->port;
+ struct sparx5 *sparx5 = port->sparx5;
+ u32 mirror_idx = entry->mirror.idx;
+
+ sparx5_mirror_port_del(sparx5, mirror_idx, port->portno);
+ if (!sparx5_mirror_is_empty(sparx5, mirror_idx))
+ return;
+
+ sparx5_mirror_dir_set(sparx5, mirror_idx, SPX5_MIRROR_DISABLED);
+
+ sparx5_mirror_monitor_set(sparx5,
+ mirror_idx,
+ SPX5_MIRROR_MONITOR_PORT_DEFAULT);
+}
+
+void sparx5_mirror_stats(struct sparx5_mall_entry *entry,
+ struct flow_stats *fstats)
+{
+ struct sparx5_port *port = entry->port;
+ struct rtnl_link_stats64 new_stats;
+ struct flow_stats *old_stats;
+
+ old_stats = &entry->port->mirror_stats;
+ sparx5_get_stats64(port->ndev, &new_stats);
+
+ if (entry->ingress) {
+ flow_stats_update(fstats,
+ new_stats.rx_bytes - old_stats->bytes,
+ new_stats.rx_packets - old_stats->pkts,
+ new_stats.rx_dropped - old_stats->drops,
+ old_stats->lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ old_stats->bytes = new_stats.rx_bytes;
+ old_stats->pkts = new_stats.rx_packets;
+ old_stats->drops = new_stats.rx_dropped;
+ old_stats->lastused = jiffies;
+ } else {
+ flow_stats_update(fstats,
+ new_stats.tx_bytes - old_stats->bytes,
+ new_stats.tx_packets - old_stats->pkts,
+ new_stats.tx_dropped - old_stats->drops,
+ old_stats->lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ old_stats->bytes = new_stats.tx_bytes;
+ old_stats->pkts = new_stats.tx_packets;
+ old_stats->drops = new_stats.tx_dropped;
+ old_stats->lastused = jiffies;
+ }
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index ac7e1cffbcec..f3f5fb420468 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -67,7 +67,7 @@ static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
for (i = 0; i < IFH_LEN; i++)
ifh[i] = spx5_rd(sparx5, QS_XTR_RD(grp));
- /* Decode IFH (whats needed) */
+ /* Decode IFH (what's needed) */
sparx5_ifh_parse(ifh, &fi);
/* Map to port netdev */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
index 3a1b1a1f5a19..062e486c002c 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
@@ -370,7 +370,7 @@ static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port,
/* 6: Wait while the last frame is exiting the queues */
usleep_range(8 * spd_prm, 10 * spd_prm);
- /* 7: Flush the queues accociated with the port->portno */
+ /* 7: Flush the queues associated with the port->portno */
spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
HSCH_FLUSH_CTRL_FLUSH_DST_SET(1) |
HSCH_FLUSH_CTRL_FLUSH_SRC_SET(1) |
@@ -731,7 +731,7 @@ static int sparx5_port_pcs_low_set(struct sparx5 *sparx5,
bool sgmii = false, inband_aneg = false;
int err;
- if (port->conf.inband) {
+ if (conf->inband) {
if (conf->portmode == PHY_INTERFACE_MODE_SGMII ||
conf->portmode == PHY_INTERFACE_MODE_QSGMII)
inband_aneg = true; /* Cisco-SGMII in-band-aneg */
@@ -948,7 +948,7 @@ int sparx5_port_pcs_set(struct sparx5 *sparx5,
if (err)
return -EINVAL;
- if (port->conf.inband) {
+ if (conf->inband) {
/* Enable/disable 1G counters in ASM */
spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev),
ASM_PORT_CFG_CSC_STAT_DIS,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
index 4af85d108a06..0b4abc3eb53d 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
@@ -190,7 +190,7 @@ static int sparx5_port_bridge_join(struct sparx5_port *port,
/* Remove standalone port entry */
sparx5_mact_forget(sparx5, ndev->dev_addr, 0);
- /* Port enters in bridge mode therefor don't need to copy to CPU
+ /* Port enters in bridge mode therefore don't need to copy to CPU
* frames for multicast in case the bridge is not requesting them
*/
__dev_mc_unsync(ndev, sparx5_mc_unsync);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
index 523e0c470894..8d67d9f24c76 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
@@ -36,6 +36,27 @@ struct sparx5_tc_flower_template {
u16 l3_proto; /* protocol specified in the template */
};
+/* SparX-5 VCAP fragment types:
+ * 0 = no fragment, 1 = initial fragment,
+ * 2 = suspicious fragment, 3 = valid follow-up fragment
+ */
+enum { /* key / mask */
+ FRAG_NOT = 0x03, /* 0 / 3 */
+ FRAG_SOME = 0x11, /* 1 / 1 */
+ FRAG_FIRST = 0x13, /* 1 / 3 */
+ FRAG_LATER = 0x33, /* 3 / 3 */
+ FRAG_INVAL = 0xff, /* invalid */
+};
+
+/* Flower fragment flag to VCAP fragment type mapping */
+static const u8 sparx5_vcap_frag_map[4][4] = { /* is_frag */
+ { FRAG_INVAL, FRAG_INVAL, FRAG_INVAL, FRAG_FIRST }, /* 0/0 */
+ { FRAG_NOT, FRAG_NOT, FRAG_INVAL, FRAG_INVAL }, /* 0/1 */
+ { FRAG_INVAL, FRAG_INVAL, FRAG_INVAL, FRAG_INVAL }, /* 1/0 */
+ { FRAG_SOME, FRAG_LATER, FRAG_INVAL, FRAG_FIRST } /* 1/1 */
+ /* 0/0 0/1 1/0 1/1 <-- first_frag */
+};
+
static int
sparx5_tc_flower_es0_tpid(struct vcap_tc_flower_parse_usage *st)
{
@@ -138,49 +159,51 @@ out:
static int
sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
{
+ struct netlink_ext_ack *extack = st->fco->common.extack;
struct flow_match_control mt;
u32 value, mask;
int err = 0;
flow_rule_match_control(st->frule, &mt);
- if (mt.mask->flags) {
- if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
- if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
- value = 1; /* initial fragment */
- mask = 0x3;
- } else {
- if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
- value = 3; /* follow up fragment */
- mask = 0x3;
- } else {
- value = 0; /* no fragment */
- mask = 0x3;
- }
- }
- } else {
- if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
- value = 3; /* follow up fragment */
- mask = 0x3;
- } else {
- value = 0; /* no fragment */
- mask = 0x3;
- }
+ if (mt.mask->flags & (FLOW_DIS_IS_FRAGMENT | FLOW_DIS_FIRST_FRAG)) {
+ u8 is_frag_key = !!(mt.key->flags & FLOW_DIS_IS_FRAGMENT);
+ u8 is_frag_mask = !!(mt.mask->flags & FLOW_DIS_IS_FRAGMENT);
+ u8 is_frag_idx = (is_frag_key << 1) | is_frag_mask;
+
+ u8 first_frag_key = !!(mt.key->flags & FLOW_DIS_FIRST_FRAG);
+ u8 first_frag_mask = !!(mt.mask->flags & FLOW_DIS_FIRST_FRAG);
+ u8 first_frag_idx = (first_frag_key << 1) | first_frag_mask;
+
+ /* Lookup verdict based on the 2 + 2 input bits */
+ u8 vdt = sparx5_vcap_frag_map[is_frag_idx][first_frag_idx];
+
+ if (vdt == FRAG_INVAL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Match on invalid fragment flag combination");
+ return -EINVAL;
}
+ /* Extract VCAP fragment key and mask from verdict */
+ value = (vdt >> 4) & 0x3;
+ mask = vdt & 0x3;
+
err = vcap_rule_add_key_u32(st->vrule,
VCAP_KF_L3_FRAGMENT_TYPE,
value, mask);
- if (err)
- goto out;
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "ip_frag parse error");
+ return err;
+ }
}
- st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL);
+ if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT |
+ FLOW_DIS_FIRST_FRAG,
+ mt.mask->flags, extack))
+ return -EOPNOTSUPP;
- return err;
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL);
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
return err;
}
@@ -1004,6 +1027,64 @@ static int sparx5_tc_action_vlan_push(struct vcap_admin *admin,
return err;
}
+static void sparx5_tc_flower_set_port_mask(struct vcap_u72_action *ports,
+ struct net_device *ndev)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ int byidx = port->portno / BITS_PER_BYTE;
+ int biidx = port->portno % BITS_PER_BYTE;
+
+ ports->value[byidx] |= BIT(biidx);
+}
+
+static int sparx5_tc_action_mirred(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ struct flow_cls_offload *fco,
+ struct flow_action_entry *act)
+{
+ struct vcap_u72_action ports = {0};
+ int err;
+
+ if (admin->vtype != VCAP_TYPE_IS0 && admin->vtype != VCAP_TYPE_IS2) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Mirror action not supported in this VCAP");
+ return -EOPNOTSUPP;
+ }
+
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE,
+ SPX5_PMM_OR_DSTMASK);
+ if (err)
+ return err;
+
+ sparx5_tc_flower_set_port_mask(&ports, act->dev);
+
+ return vcap_rule_add_action_u72(vrule, VCAP_AF_PORT_MASK, &ports);
+}
+
+static int sparx5_tc_action_redirect(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ struct flow_cls_offload *fco,
+ struct flow_action_entry *act)
+{
+ struct vcap_u72_action ports = {0};
+ int err;
+
+ if (admin->vtype != VCAP_TYPE_IS0 && admin->vtype != VCAP_TYPE_IS2) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Redirect action not supported in this VCAP");
+ return -EOPNOTSUPP;
+ }
+
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE,
+ SPX5_PMM_REPLACE_ALL);
+ if (err)
+ return err;
+
+ sparx5_tc_flower_set_port_mask(&ports, act->dev);
+
+ return vcap_rule_add_action_u72(vrule, VCAP_AF_PORT_MASK, &ports);
+}
+
/* Remove rule keys that may prevent templates from matching a keyset */
static void sparx5_tc_flower_simplify_rule(struct vcap_admin *admin,
struct vcap_rule *vrule,
@@ -1150,6 +1231,16 @@ static int sparx5_tc_flower_replace(struct net_device *ndev,
if (err)
goto out;
break;
+ case FLOW_ACTION_MIRRED:
+ err = sparx5_tc_action_mirred(admin, vrule, fco, act);
+ if (err)
+ goto out;
+ break;
+ case FLOW_ACTION_REDIRECT:
+ err = sparx5_tc_action_redirect(admin, vrule, fco, act);
+ if (err)
+ goto out;
+ break;
case FLOW_ACTION_ACCEPT:
err = sparx5_tc_set_actionset(admin, vrule);
if (err)
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c
index d88a93f22606..6b4d1d7b9730 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c
@@ -11,11 +11,44 @@
#include "sparx5_main.h"
#include "sparx5_vcap_impl.h"
+static struct sparx5_mall_entry *
+sparx5_tc_matchall_entry_find(struct list_head *entries, unsigned long cookie)
+{
+ struct sparx5_mall_entry *entry;
+
+ list_for_each_entry(entry, entries, list) {
+ if (entry->cookie == cookie)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static void sparx5_tc_matchall_parse_action(struct sparx5_port *port,
+ struct sparx5_mall_entry *entry,
+ struct flow_action_entry *action,
+ bool ingress,
+ unsigned long cookie)
+{
+ entry->port = port;
+ entry->type = action->id;
+ entry->ingress = ingress;
+ entry->cookie = cookie;
+}
+
+static void
+sparx5_tc_matchall_parse_mirror_action(struct sparx5_mall_entry *entry,
+ struct flow_action_entry *action)
+{
+ entry->mirror.port = netdev_priv(action->dev);
+}
+
static int sparx5_tc_matchall_replace(struct net_device *ndev,
struct tc_cls_matchall_offload *tmo,
bool ingress)
{
struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5_mall_entry *mall_entry;
struct flow_action_entry *action;
struct sparx5 *sparx5;
int err;
@@ -27,8 +60,45 @@ static int sparx5_tc_matchall_replace(struct net_device *ndev,
}
action = &tmo->rule->action.entries[0];
+ mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
+ if (!mall_entry)
+ return -ENOMEM;
+
+ sparx5_tc_matchall_parse_action(port,
+ mall_entry,
+ action,
+ ingress,
+ tmo->cookie);
+
sparx5 = port->sparx5;
switch (action->id) {
+ case FLOW_ACTION_MIRRED:
+ sparx5_tc_matchall_parse_mirror_action(mall_entry, action);
+ err = sparx5_mirror_add(mall_entry);
+ if (err) {
+ switch (err) {
+ case -EEXIST:
+ NL_SET_ERR_MSG_MOD(tmo->common.extack,
+ "Mirroring already exists");
+ break;
+ case -EINVAL:
+ NL_SET_ERR_MSG_MOD(tmo->common.extack,
+ "Cannot mirror a monitor port");
+ break;
+ case -ENOENT:
+ NL_SET_ERR_MSG_MOD(tmo->common.extack,
+ "No more mirror probes available");
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(tmo->common.extack,
+ "Unknown error");
+ break;
+ }
+ return err;
+ }
+ /* Get baseline stats for this port */
+ sparx5_mirror_stats(mall_entry, &tmo->stats);
+ break;
case FLOW_ACTION_GOTO:
err = vcap_enable_lookups(sparx5->vcap_ctrl, ndev,
tmo->common.chain_index,
@@ -59,6 +129,9 @@ static int sparx5_tc_matchall_replace(struct net_device *ndev,
NL_SET_ERR_MSG_MOD(tmo->common.extack, "Unsupported action");
return -EOPNOTSUPP;
}
+
+ list_add_tail(&mall_entry->list, &sparx5->mall_entries);
+
return 0;
}
@@ -67,19 +140,51 @@ static int sparx5_tc_matchall_destroy(struct net_device *ndev,
bool ingress)
{
struct sparx5_port *port = netdev_priv(ndev);
- struct sparx5 *sparx5;
- int err;
+ struct sparx5 *sparx5 = port->sparx5;
+ struct sparx5_mall_entry *entry;
+ int err = 0;
- sparx5 = port->sparx5;
- if (!tmo->rule && tmo->cookie) {
+ entry = sparx5_tc_matchall_entry_find(&sparx5->mall_entries,
+ tmo->cookie);
+ if (!entry)
+ return -ENOENT;
+
+ if (entry->type == FLOW_ACTION_MIRRED) {
+ sparx5_mirror_del(entry);
+ } else if (entry->type == FLOW_ACTION_GOTO) {
err = vcap_enable_lookups(sparx5->vcap_ctrl, ndev,
0, 0, tmo->cookie, false);
- if (err)
- return err;
- return 0;
+ } else {
+ NL_SET_ERR_MSG_MOD(tmo->common.extack, "Unsupported action");
+ err = -EOPNOTSUPP;
}
- NL_SET_ERR_MSG_MOD(tmo->common.extack, "Unsupported action");
- return -EOPNOTSUPP;
+
+ list_del(&entry->list);
+
+ return err;
+}
+
+static int sparx5_tc_matchall_stats(struct net_device *ndev,
+ struct tc_cls_matchall_offload *tmo,
+ bool ingress)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ struct sparx5_mall_entry *entry;
+
+ entry = sparx5_tc_matchall_entry_find(&sparx5->mall_entries,
+ tmo->cookie);
+ if (!entry)
+ return -ENOENT;
+
+ if (entry->type == FLOW_ACTION_MIRRED) {
+ sparx5_mirror_stats(entry, &tmo->stats);
+ } else {
+ NL_SET_ERR_MSG_MOD(tmo->common.extack, "Unsupported action");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
}
int sparx5_tc_matchall(struct net_device *ndev,
@@ -91,6 +196,8 @@ int sparx5_tc_matchall(struct net_device *ndev,
return sparx5_tc_matchall_replace(ndev, tmo, ingress);
case TC_CLSMATCHALL_DESTROY:
return sparx5_tc_matchall_destroy(ndev, tmo, ingress);
+ case TC_CLSMATCHALL_STATS:
+ return sparx5_tc_matchall_stats(ndev, tmo, ingress);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h b/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h
index c3569a4c7b69..4735fad05708 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h
@@ -290,7 +290,7 @@ enum vcap_keyfield_set {
* Sparx5: TCP flag RST , LAN966x: TCP: TCP flag RST. PTP over UDP: messageType
* bit 3
* VCAP_KF_L4_SEQUENCE_EQ0_IS: W1, sparx5: is2/es2, lan966x: is2
- * Set if TCP sequence number is 0, LAN966x: Overlayed with PTP over UDP:
+ * Set if TCP sequence number is 0, LAN966x: Overlaid with PTP over UDP:
* messageType bit 0
* VCAP_KF_L4_SPORT: W16, sparx5: is0/is2/es2, lan966x: is1/is2
* TCP/UDP source port
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.c b/drivers/net/ethernet/microchip/vcap/vcap_api.c
index ef980e4e5bc2..2687765abe52 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api.c
@@ -327,7 +327,7 @@ static int vcap_find_keystream_typegroup_sw(struct vcap_control *vctrl,
}
/* Verify that the typegroup information, subword count, keyset and type id
- * are in sync and correct, return the list of matchin keysets
+ * are in sync and correct, return the list of matching keysets
*/
int
vcap_find_keystream_keysets(struct vcap_control *vctrl,
@@ -2907,6 +2907,18 @@ int vcap_rule_add_action_u32(struct vcap_rule *rule,
}
EXPORT_SYMBOL_GPL(vcap_rule_add_action_u32);
+/* Add a 72 bit action field with value to the rule */
+int vcap_rule_add_action_u72(struct vcap_rule *rule,
+ enum vcap_action_field action,
+ struct vcap_u72_action *fieldval)
+{
+ struct vcap_client_actionfield_data data;
+
+ memcpy(&data.u72, fieldval, sizeof(data.u72));
+ return vcap_rule_add_action(rule, action, VCAP_FIELD_U72, &data);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_add_action_u72);
+
static int vcap_read_counter(struct vcap_rule_internal *ri,
struct vcap_counter *ctr)
{
@@ -2931,7 +2943,7 @@ void vcap_netbytes_copy(u8 *dst, u8 *src, int count)
}
EXPORT_SYMBOL_GPL(vcap_netbytes_copy);
-/* Convert validation error code into tc extact error message */
+/* Convert validation error code into tc extack error message */
void vcap_set_tc_exterr(struct flow_cls_offload *fco, struct vcap_rule *vrule)
{
switch (vrule->exterr) {
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
index 88641508f885..cdf79e17ca54 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
@@ -200,6 +200,8 @@ int vcap_rule_add_action_bit(struct vcap_rule *rule,
enum vcap_action_field action, enum vcap_bit val);
int vcap_rule_add_action_u32(struct vcap_rule *rule,
enum vcap_action_field action, u32 value);
+int vcap_rule_add_action_u72(struct vcap_rule *rule, enum vcap_action_field action,
+ struct vcap_u72_action *fieldval);
/* Get number of rules in a vcap instance lookup chain id range */
int vcap_admin_rule_count(struct vcap_admin *admin, int cid);
@@ -236,7 +238,7 @@ const struct vcap_set *vcap_keyfieldset(struct vcap_control *vctrl,
/* Copy to host byte order */
void vcap_netbytes_copy(u8 *dst, u8 *src, int count);
-/* Convert validation error code into tc extact error message */
+/* Convert validation error code into tc extack error message */
void vcap_set_tc_exterr(struct flow_cls_offload *fco, struct vcap_rule *vrule);
/* Cleanup a VCAP instance */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_private.h b/drivers/net/ethernet/microchip/vcap/vcap_api_private.h
index df81d9ff502b..844bdf6b5f45 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_private.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_private.h
@@ -109,7 +109,7 @@ int vcap_addr_keysets(struct vcap_control *vctrl, struct net_device *ndev,
struct vcap_keyset_list *kslist);
/* Verify that the typegroup information, subword count, keyset and type id
- * are in sync and correct, return the list of matchin keysets
+ * are in sync and correct, return the list of matching keysets
*/
int vcap_find_keystream_keysets(struct vcap_control *vctrl, enum vcap_type vt,
u32 *keystream, u32 *mskstream, bool mask,
diff --git a/drivers/net/ethernet/microsoft/Kconfig b/drivers/net/ethernet/microsoft/Kconfig
index 01eb7445ead9..286f0d5697a1 100644
--- a/drivers/net/ethernet/microsoft/Kconfig
+++ b/drivers/net/ethernet/microsoft/Kconfig
@@ -17,7 +17,8 @@ if NET_VENDOR_MICROSOFT
config MICROSOFT_MANA
tristate "Microsoft Azure Network Adapter (MANA) support"
- depends on PCI_MSI && X86_64
+ depends on PCI_MSI
+ depends on X86_64 || (ARM64 && !CPU_BIG_ENDIAN && ARM64_4K_PAGES)
depends on PCI_HYPERV
select AUXILIARY_BUS
select PAGE_POOL
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index 2729a2c5acf9..11021c34e47e 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -3,6 +3,7 @@
#include <net/mana/gdma.h>
#include <net/mana/hw_channel.h>
+#include <linux/vmalloc.h>
static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id)
{
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 59287c6e6cee..d087cf954f75 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -601,7 +601,7 @@ static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
*alloc_size = mtu + MANA_RXBUF_PAD + *headroom;
- *datasize = ALIGN(mtu + ETH_HLEN, MANA_RX_DATA_ALIGN);
+ *datasize = mtu + ETH_HLEN;
}
static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
@@ -690,12 +690,12 @@ static int mana_change_mtu(struct net_device *ndev, int new_mtu)
goto out;
}
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
err = mana_attach(ndev);
if (err) {
netdev_err(ndev, "mana_attach failed: %d\n", err);
- ndev->mtu = old_mtu;
+ WRITE_ONCE(ndev->mtu, old_mtu);
}
out:
@@ -1058,11 +1058,10 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
struct mana_cfg_rx_steer_req_v2 *req;
struct mana_cfg_rx_steer_resp resp = {};
struct net_device *ndev = apc->ndev;
- mana_handle_t *req_indir_tab;
u32 req_buf_size;
int err;
- req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
+ req_buf_size = struct_size(req, indir_tab, num_entries);
req = kzalloc(req_buf_size, GFP_KERNEL);
if (!req)
return -ENOMEM;
@@ -1074,7 +1073,8 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
req->vport = apc->port_handle;
req->num_indir_entries = num_entries;
- req->indir_tab_offset = sizeof(*req);
+ req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2,
+ indir_tab);
req->rx_enable = rx;
req->rss_enable = apc->rss_state;
req->update_default_rxobj = update_default_rxobj;
@@ -1086,11 +1086,9 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
if (update_key)
memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
- if (update_tab) {
- req_indir_tab = (mana_handle_t *)(req + 1);
- memcpy(req_indir_tab, apc->rxobj_table,
- req->num_indir_entries * sizeof(mana_handle_t));
- }
+ if (update_tab)
+ memcpy(req->indir_tab, apc->rxobj_table,
+ flex_array_size(req, indir_tab, req->num_indir_entries));
err = mana_send_request(apc->ac, req, req_buf_size, &resp,
sizeof(resp));
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 33b438c6aec5..a057ec3dab97 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -609,11 +609,8 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
return ret;
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
- struct flow_match_control match;
-
- flow_rule_match_control(rule, &match);
- }
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 7b7e1c5b00f4..b7d9657a7af3 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3036,11 +3036,11 @@ static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
/* if we change the mtu on an active device, we must
* reset the device so the firmware sees the change */
myri10ge_close(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
myri10ge_open(dev);
- } else
- dev->mtu = new_mtu;
-
+ } else {
+ WRITE_ONCE(dev->mtu, new_mtu);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 650a5a166070..ad0c14849115 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -2526,7 +2526,7 @@ static void __set_rx_mode(struct net_device *dev)
static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
{
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
/* synchronized against open : rtnl_lock() held by caller */
if (netif_running(dev)) {
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 55408f16fbbc..f235e76e4ce9 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -6637,7 +6637,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
struct s2io_nic *sp = netdev_priv(dev);
int ret = 0;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (netif_running(dev)) {
s2io_stop_all_tx_queue(sp);
s2io_card_down(sp);
diff --git a/drivers/net/ethernet/netronome/nfp/devlink_param.c b/drivers/net/ethernet/netronome/nfp/devlink_param.c
index a655f9e69a7b..0e1a3800f371 100644
--- a/drivers/net/ethernet/netronome/nfp/devlink_param.c
+++ b/drivers/net/ethernet/netronome/nfp/devlink_param.c
@@ -132,7 +132,8 @@ exit_close_nsp:
static int
nfp_devlink_param_u8_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
const struct nfp_devlink_param_u8_arg *arg;
struct nfp_pf *pf = devlink_priv(devlink);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 2c3f62907958..aca2a7417af3 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -396,6 +396,17 @@ nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
return 0;
}
+#define NFP_FL_CHECK(flag) ({ \
+ IP_TUNNEL_DECLARE_FLAGS(__check) = { }; \
+ __be16 __res; \
+ \
+ __set_bit(IP_TUNNEL_##flag##_BIT, __check); \
+ __res = ip_tunnel_flags_to_be16(__check); \
+ \
+ BUILD_BUG_ON(__builtin_constant_p(__res) && \
+ NFP_FL_TUNNEL_##flag != __res); \
+})
+
static int
nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
const struct flow_action_entry *act,
@@ -410,6 +421,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
u32 tmp_set_ip_tun_type_index = 0;
/* Currently support one pre-tunnel so index is always 0. */
int pretun_idx = 0;
+ __be16 tun_flags;
if (!IS_ENABLED(CONFIG_IPV6) && ipv6)
return -EOPNOTSUPP;
@@ -417,9 +429,10 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
if (ipv6 && !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN))
return -EOPNOTSUPP;
- BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
- NFP_FL_TUNNEL_KEY != TUNNEL_KEY ||
- NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
+ NFP_FL_CHECK(CSUM);
+ NFP_FL_CHECK(KEY);
+ NFP_FL_CHECK(GENEVE_OPT);
+
if (ip_tun->options_len &&
(tun_type != NFP_FL_TUNNEL_GENEVE ||
!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))) {
@@ -427,7 +440,9 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
return -EOPNOTSUPP;
}
- if (ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS) {
+ tun_flags = ip_tunnel_flags_to_be16(ip_tun->key.tun_flags);
+ if (!ip_tunnel_flags_is_be16_compat(ip_tun->key.tun_flags) ||
+ (tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS)) {
NL_SET_ERR_MSG_MOD(extack,
"unsupported offload: loaded firmware does not support tunnel flag offload");
return -EOPNOTSUPP;
@@ -442,7 +457,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
FIELD_PREP(NFP_FL_PRE_TUN_INDEX, pretun_idx);
set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
- if (ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY)
+ if (tun_flags & NFP_FL_TUNNEL_KEY)
set_tun->tun_id = ip_tun->key.tun_id;
if (ip_tun->key.ttl) {
@@ -486,7 +501,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
}
set_tun->tos = ip_tun->key.tos;
- set_tun->tun_flags = ip_tun->key.tun_flags;
+ set_tun->tun_flags = tun_flags;
if (tun_type == NFP_FL_TUNNEL_GENEVE) {
set_tun->tun_proto = htons(ETH_P_TEB);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 0aceef9fe582..8e0a890381b6 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -527,10 +527,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
struct flow_match_control ctl;
flow_rule_match_control(rule, &ctl);
- if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag");
+
+ if (!flow_rule_is_supp_control_flags(NFP_FLOWER_SUPPORTED_CTLFLAGS,
+ ctl.mask->flags, extack))
return -EOPNOTSUPP;
- }
}
ret_key_ls->key_layer = key_layer;
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
index 45be6954d5aa..01cfa9cc1b5e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
@@ -184,7 +184,7 @@ nfp_nfd3_xsk_rx(struct nfp_net_rx_ring *rx_ring, int budget,
xrxbuf->xdp->data += meta_len;
xrxbuf->xdp->data_end = xrxbuf->xdp->data + pkt_len;
xdp_set_data_meta_invalid(xrxbuf->xdp);
- xsk_buff_dma_sync_for_cpu(xrxbuf->xdp, r_vec->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(xrxbuf->xdp);
net_prefetch(xrxbuf->xdp->data);
if (meta_len) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index 635d33c0d6d3..ea75b9a06313 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -160,6 +160,7 @@ static const struct nfp_devlink_versions_simple {
{ DEVLINK_INFO_VERSION_GENERIC_BOARD_REV, "assembly.revision", },
{ DEVLINK_INFO_VERSION_GENERIC_BOARD_MANUFACTURE, "assembly.vendor", },
{ "board.model", /* code name */ "assembly.model", },
+ { DEVLINK_INFO_VERSION_GENERIC_BOARD_PART_NUMBER, "pn", },
};
static int
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index f28e769e6fda..182ba0a8b095 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1526,7 +1526,7 @@ static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp)
*dp = nn->dp;
nn->dp = new_dp;
- nn->dp.netdev->mtu = new_dp.mtu;
+ WRITE_ONCE(nn->dp.netdev->mtu, new_dp.mtu);
if (!netif_is_rxfh_configured(nn->dp.netdev))
nfp_net_rss_init_itbl(nn);
@@ -2289,10 +2289,7 @@ static int nfp_net_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
-
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
new_ctrl = nn->dp.ctrl;
mode = nla_get_u16(attr);
if (mode == BRIDGE_MODE_VEPA)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
index a614df095b08..2dd37557185e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
@@ -34,8 +34,11 @@ enum nfp_dumpspec_type {
/* generic type plus length */
struct nfp_dump_tl {
- __be32 type;
- __be32 length; /* chunk length to follow, aligned to 8 bytes */
+ /* New members must be added within the struct_group() macro below. */
+ struct_group_tagged(nfp_dump_tl_hdr, hdr,
+ __be32 type;
+ __be32 length; /* chunk length to follow, aligned to 8 bytes */
+ );
char data[];
};
@@ -55,19 +58,19 @@ struct nfp_dump_common_cpp {
/* CSR dumpables */
struct nfp_dumpspec_csr {
- struct nfp_dump_tl tl;
+ struct nfp_dump_tl_hdr tl;
struct nfp_dump_common_cpp cpp;
__be32 register_width; /* in bits */
};
struct nfp_dumpspec_rtsym {
- struct nfp_dump_tl tl;
+ struct nfp_dump_tl_hdr tl;
char rtsym[];
};
/* header for register dumpable */
struct nfp_dump_csr {
- struct nfp_dump_tl tl;
+ struct nfp_dump_tl_hdr tl;
struct nfp_dump_common_cpp cpp;
__be32 register_width; /* in bits */
__be32 error; /* error code encountered while reading */
@@ -75,7 +78,7 @@ struct nfp_dump_csr {
};
struct nfp_dump_rtsym {
- struct nfp_dump_tl tl;
+ struct nfp_dump_tl_hdr tl;
struct nfp_dump_common_cpp cpp;
__be32 error; /* error code encountered while reading */
u8 padded_name_length; /* pad so data starts at 8 byte boundary */
@@ -84,12 +87,12 @@ struct nfp_dump_rtsym {
};
struct nfp_dump_prolog {
- struct nfp_dump_tl tl;
+ struct nfp_dump_tl_hdr tl;
__be32 dump_level;
};
struct nfp_dump_error {
- struct nfp_dump_tl tl;
+ struct nfp_dump_tl_hdr tl;
__be32 error;
char padding[4];
char spec[];
@@ -449,6 +452,8 @@ static int
nfp_dump_csr_range(struct nfp_pf *pf, struct nfp_dumpspec_csr *spec_csr,
struct nfp_dump_state *dump)
{
+ struct nfp_dump_tl *spec_csr_tl =
+ container_of(&spec_csr->tl, struct nfp_dump_tl, hdr);
struct nfp_dump_csr *dump_header = dump->p;
u32 reg_sz, header_size, total_size;
u32 cpp_rd_addr, max_rd_addr;
@@ -458,7 +463,7 @@ nfp_dump_csr_range(struct nfp_pf *pf, struct nfp_dumpspec_csr *spec_csr,
int err;
if (!nfp_csr_spec_valid(spec_csr))
- return nfp_dump_error_tlv(&spec_csr->tl, -EINVAL, dump);
+ return nfp_dump_error_tlv(spec_csr_tl, -EINVAL, dump);
reg_sz = be32_to_cpu(spec_csr->register_width) / BITS_PER_BYTE;
header_size = ALIGN8(sizeof(*dump_header));
@@ -466,7 +471,7 @@ nfp_dump_csr_range(struct nfp_pf *pf, struct nfp_dumpspec_csr *spec_csr,
ALIGN8(be32_to_cpu(spec_csr->cpp.dump_length));
dest = dump->p + header_size;
- err = nfp_add_tlv(be32_to_cpu(spec_csr->tl.type), total_size, dump);
+ err = nfp_add_tlv(be32_to_cpu(spec_csr_tl->type), total_size, dump);
if (err)
return err;
@@ -552,6 +557,8 @@ nfp_dump_indirect_csr_range(struct nfp_pf *pf,
struct nfp_dumpspec_csr *spec_csr,
struct nfp_dump_state *dump)
{
+ struct nfp_dump_tl *spec_csr_tl =
+ container_of(&spec_csr->tl, struct nfp_dump_tl, hdr);
struct nfp_dump_csr *dump_header = dump->p;
u32 reg_sz, header_size, total_size;
u32 cpp_rd_addr, max_rd_addr;
@@ -560,7 +567,7 @@ nfp_dump_indirect_csr_range(struct nfp_pf *pf,
int err;
if (!nfp_csr_spec_valid(spec_csr))
- return nfp_dump_error_tlv(&spec_csr->tl, -EINVAL, dump);
+ return nfp_dump_error_tlv(spec_csr_tl, -EINVAL, dump);
reg_sz = be32_to_cpu(spec_csr->register_width) / BITS_PER_BYTE;
header_size = ALIGN8(sizeof(*dump_header));
@@ -569,7 +576,7 @@ nfp_dump_indirect_csr_range(struct nfp_pf *pf,
total_size = header_size + ALIGN8(reg_data_length);
dest = dump->p + header_size;
- err = nfp_add_tlv(be32_to_cpu(spec_csr->tl.type), total_size, dump);
+ err = nfp_add_tlv(be32_to_cpu(spec_csr_tl->type), total_size, dump);
if (err)
return err;
@@ -597,6 +604,8 @@ static int
nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec,
struct nfp_dump_state *dump)
{
+ struct nfp_dump_tl *spec_tl =
+ container_of(&spec->tl, struct nfp_dump_tl, hdr);
struct nfp_dump_rtsym *dump_header = dump->p;
struct nfp_dumpspec_cpp_isl_id cpp_params;
struct nfp_rtsym_table *rtbl = pf->rtbl;
@@ -607,14 +616,14 @@ nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec,
void *dest;
int err;
- tl_len = be32_to_cpu(spec->tl.length);
+ tl_len = be32_to_cpu(spec_tl->length);
key_len = strnlen(spec->rtsym, tl_len);
if (key_len == tl_len)
- return nfp_dump_error_tlv(&spec->tl, -EINVAL, dump);
+ return nfp_dump_error_tlv(spec_tl, -EINVAL, dump);
sym = nfp_rtsym_lookup(rtbl, spec->rtsym);
if (!sym)
- return nfp_dump_error_tlv(&spec->tl, -ENOENT, dump);
+ return nfp_dump_error_tlv(spec_tl, -ENOENT, dump);
sym_size = nfp_rtsym_size(sym);
header_size =
@@ -622,7 +631,7 @@ nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec,
total_size = header_size + ALIGN8(sym_size);
dest = dump->p + header_size;
- err = nfp_add_tlv(be32_to_cpu(spec->tl.type), total_size, dump);
+ err = nfp_add_tlv(be32_to_cpu(spec_tl->type), total_size, dump);
if (err)
return err;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 3af1229a3f08..eee0bfc41074 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -177,7 +177,7 @@ static int nfp_repr_change_mtu(struct net_device *netdev, int new_mtu)
if (err)
return err;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index fa1f78b03cb2..2aa4ad9cf96e 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -946,7 +946,7 @@ static int nixge_change_mtu(struct net_device *ndev, int new_mtu)
NIXGE_MAX_JUMBO_FRAME_SIZE)
return -EINVAL;
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 31f896c4aa26..720f577929db 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -3098,7 +3098,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
int old_mtu;
old_mtu = dev->mtu;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
/* return early if the buffer sizes will not change */
if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 28b7cec485ef..4ac29cd59f2b 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2184,7 +2184,7 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
}
} else {
pch_gbe_reset(adapter);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
adapter->hw.mac.max_frame_size = max_frame;
}
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index ed7dd0a04235..62ba269da902 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1639,7 +1639,7 @@ static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu)
reg |= PAS_MAC_CFG_MACCFG_MAXF(new_mtu + ETH_HLEN + 4);
write_mac_reg(mac, PAS_MAC_CFG_MACCFG, reg);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
/* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 7f0c6cdc375e..24870da3f484 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -1761,13 +1761,13 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
/* if we're not running, nothing more to do */
if (!netif_running(netdev)) {
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
return 0;
}
mutex_lock(&lif->queue_lock);
ionic_stop_queues_reconfig(lif);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
err = ionic_start_queues_reconfig(lif);
mutex_unlock(&lif->queue_lock);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 6e12cd21ac90..89c8b2349694 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -960,7 +960,7 @@ int netxen_nic_change_mtu(struct net_device *netdev, int mtu)
rc = adapter->set_mtu(adapter, mtu);
if (!rc)
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 1d719726f72b..b7def3b54937 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -662,8 +662,6 @@ struct qed_hwfn {
};
struct pci_params {
- int pm_cap;
-
unsigned long mem_start;
unsigned long mem_end;
unsigned int irq;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
index dad8e617c393..1adc7fbb3f2f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
@@ -132,7 +132,8 @@ static int qed_dl_param_get(struct devlink *dl, u32 id,
}
static int qed_dl_param_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct qed_devlink *qed_dl = devlink_priv(dl);
struct qed_dev *cdev;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index c278f8893042..f915c423fe70 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -323,8 +323,7 @@ static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
goto err2;
}
- cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
- if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
+ if (IS_PF(cdev) && !pdev->pm_cap)
DP_NOTICE(cdev, "Cannot find power management capability\n");
rc = dma_set_mask_and_coherent(&cdev->pdev->dev, DMA_BIT_MASK(64));
@@ -1206,7 +1205,6 @@ out:
static int qed_slowpath_wq_start(struct qed_dev *cdev)
{
struct qed_hwfn *hwfn;
- char name[NAME_SIZE];
int i;
if (IS_VF(cdev))
@@ -1215,11 +1213,11 @@ static int qed_slowpath_wq_start(struct qed_dev *cdev)
for_each_hwfn(cdev, i) {
hwfn = &cdev->hwfns[i];
- snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
- cdev->pdev->bus->number,
- PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
+ hwfn->slowpath_wq = alloc_workqueue("slowpath-%02x:%02x.%02x",
+ 0, 0, cdev->pdev->bus->number,
+ PCI_SLOT(cdev->pdev->devfn),
+ hwfn->abs_pf_id);
- hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
if (!hwfn->slowpath_wq) {
DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
return -ENOMEM;
@@ -1351,7 +1349,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
(params->drv_rev << 8) |
(params->drv_eng);
strscpy(drv_version.name, params->name,
- MCP_DRV_VER_STR_SIZE - 4);
+ sizeof(drv_version.name));
rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
&drv_version);
if (rc) {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index ae3ebf0cf999..f497f6ca1018 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -1026,7 +1026,7 @@ static int qede_get_regs_len(struct net_device *ndev)
static void qede_update_mtu(struct qede_dev *edev,
struct qede_reload_args *args)
{
- edev->ndev->mtu = args->u.mtu;
+ WRITE_ONCE(edev->ndev->mtu, args->u.mtu);
}
/* Netdevice NDOs */
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index a5ac21a0ee33..985026dd816f 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -1520,8 +1520,8 @@ static int qede_flow_spec_validate_unused(struct qede_dev *edev,
return 0;
}
-static int qede_set_v4_tuple_to_profile(struct qede_dev *edev,
- struct qede_arfs_tuple *t)
+static int qede_set_v4_tuple_to_profile(struct qede_arfs_tuple *t,
+ struct netlink_ext_ack *extack)
{
/* We must have Only 4-tuples/l4 port/src ip/dst ip
* as an input.
@@ -1538,7 +1538,7 @@ static int qede_set_v4_tuple_to_profile(struct qede_dev *edev,
t->dst_ipv4 && !t->src_ipv4) {
t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
} else {
- DP_INFO(edev, "Invalid N-tuple\n");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid N-tuple");
return -EOPNOTSUPP;
}
@@ -1549,9 +1549,9 @@ static int qede_set_v4_tuple_to_profile(struct qede_dev *edev,
return 0;
}
-static int qede_set_v6_tuple_to_profile(struct qede_dev *edev,
- struct qede_arfs_tuple *t,
- struct in6_addr *zaddr)
+static int qede_set_v6_tuple_to_profile(struct qede_arfs_tuple *t,
+ struct in6_addr *zaddr,
+ struct netlink_ext_ack *extack)
{
/* We must have Only 4-tuples/l4 port/src ip/dst ip
* as an input.
@@ -1573,7 +1573,7 @@ static int qede_set_v6_tuple_to_profile(struct qede_dev *edev,
!memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
} else {
- DP_INFO(edev, "Invalid N-tuple\n");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid N-tuple");
return -EOPNOTSUPP;
}
@@ -1671,7 +1671,7 @@ static int qede_parse_actions(struct qede_dev *edev,
int i;
if (!flow_action_has_entries(flow_action)) {
- DP_NOTICE(edev, "No actions received\n");
+ NL_SET_ERR_MSG_MOD(extack, "No actions received");
return -EINVAL;
}
@@ -1687,7 +1687,8 @@ static int qede_parse_actions(struct qede_dev *edev,
break;
if (act->queue.index >= QEDE_RSS_COUNT(edev)) {
- DP_INFO(edev, "Queue out-of-bounds\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Queue out-of-bounds");
return -EINVAL;
}
break;
@@ -1700,8 +1701,8 @@ static int qede_parse_actions(struct qede_dev *edev,
}
static int
-qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule,
- struct qede_arfs_tuple *t)
+qede_flow_parse_ports(struct flow_rule *rule, struct qede_arfs_tuple *t,
+ struct netlink_ext_ack *extack)
{
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports match;
@@ -1709,7 +1710,8 @@ qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule,
flow_rule_match_ports(rule, &match);
if ((match.key->src && match.mask->src != htons(U16_MAX)) ||
(match.key->dst && match.mask->dst != htons(U16_MAX))) {
- DP_NOTICE(edev, "Do not support ports masks\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Do not support ports masks");
return -EINVAL;
}
@@ -1721,10 +1723,12 @@ qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule,
}
static int
-qede_flow_parse_v6_common(struct qede_dev *edev, struct flow_rule *rule,
- struct qede_arfs_tuple *t)
+qede_flow_parse_v6_common(struct flow_rule *rule,
+ struct qede_arfs_tuple *t,
+ struct netlink_ext_ack *extack)
{
struct in6_addr zero_addr, addr;
+ int err;
memset(&zero_addr, 0, sizeof(addr));
memset(&addr, 0xff, sizeof(addr));
@@ -1737,8 +1741,8 @@ qede_flow_parse_v6_common(struct qede_dev *edev, struct flow_rule *rule,
memcmp(&match.mask->src, &addr, sizeof(addr))) ||
(memcmp(&match.key->dst, &zero_addr, sizeof(addr)) &&
memcmp(&match.mask->dst, &addr, sizeof(addr)))) {
- DP_NOTICE(edev,
- "Do not support IPv6 address prefix/mask\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Do not support IPv6 address prefix/mask");
return -EINVAL;
}
@@ -1746,23 +1750,28 @@ qede_flow_parse_v6_common(struct qede_dev *edev, struct flow_rule *rule,
memcpy(&t->dst_ipv6, &match.key->dst, sizeof(addr));
}
- if (qede_flow_parse_ports(edev, rule, t))
- return -EINVAL;
+ err = qede_flow_parse_ports(rule, t, extack);
+ if (err)
+ return err;
- return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
+ return qede_set_v6_tuple_to_profile(t, &zero_addr, extack);
}
static int
-qede_flow_parse_v4_common(struct qede_dev *edev, struct flow_rule *rule,
- struct qede_arfs_tuple *t)
+qede_flow_parse_v4_common(struct flow_rule *rule,
+ struct qede_arfs_tuple *t,
+ struct netlink_ext_ack *extack)
{
+ int err;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
struct flow_match_ipv4_addrs match;
flow_rule_match_ipv4_addrs(rule, &match);
if ((match.key->src && match.mask->src != htonl(U32_MAX)) ||
(match.key->dst && match.mask->dst != htonl(U32_MAX))) {
- DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Do not support ipv4 prefix/masks");
return -EINVAL;
}
@@ -1770,55 +1779,57 @@ qede_flow_parse_v4_common(struct qede_dev *edev, struct flow_rule *rule,
t->dst_ipv4 = match.key->dst;
}
- if (qede_flow_parse_ports(edev, rule, t))
- return -EINVAL;
+ err = qede_flow_parse_ports(rule, t, extack);
+ if (err)
+ return err;
- return qede_set_v4_tuple_to_profile(edev, t);
+ return qede_set_v4_tuple_to_profile(t, extack);
}
static int
-qede_flow_parse_tcp_v6(struct qede_dev *edev, struct flow_rule *rule,
- struct qede_arfs_tuple *tuple)
+qede_flow_parse_tcp_v6(struct flow_rule *rule, struct qede_arfs_tuple *tuple,
+ struct netlink_ext_ack *extack)
{
tuple->ip_proto = IPPROTO_TCP;
tuple->eth_proto = htons(ETH_P_IPV6);
- return qede_flow_parse_v6_common(edev, rule, tuple);
+ return qede_flow_parse_v6_common(rule, tuple, extack);
}
static int
-qede_flow_parse_tcp_v4(struct qede_dev *edev, struct flow_rule *rule,
- struct qede_arfs_tuple *tuple)
+qede_flow_parse_tcp_v4(struct flow_rule *rule, struct qede_arfs_tuple *tuple,
+ struct netlink_ext_ack *extack)
{
tuple->ip_proto = IPPROTO_TCP;
tuple->eth_proto = htons(ETH_P_IP);
- return qede_flow_parse_v4_common(edev, rule, tuple);
+ return qede_flow_parse_v4_common(rule, tuple, extack);
}
static int
-qede_flow_parse_udp_v6(struct qede_dev *edev, struct flow_rule *rule,
- struct qede_arfs_tuple *tuple)
+qede_flow_parse_udp_v6(struct flow_rule *rule, struct qede_arfs_tuple *tuple,
+ struct netlink_ext_ack *extack)
{
tuple->ip_proto = IPPROTO_UDP;
tuple->eth_proto = htons(ETH_P_IPV6);
- return qede_flow_parse_v6_common(edev, rule, tuple);
+ return qede_flow_parse_v6_common(rule, tuple, extack);
}
static int
-qede_flow_parse_udp_v4(struct qede_dev *edev, struct flow_rule *rule,
- struct qede_arfs_tuple *tuple)
+qede_flow_parse_udp_v4(struct flow_rule *rule, struct qede_arfs_tuple *tuple,
+ struct netlink_ext_ack *extack)
{
tuple->ip_proto = IPPROTO_UDP;
tuple->eth_proto = htons(ETH_P_IP);
- return qede_flow_parse_v4_common(edev, rule, tuple);
+ return qede_flow_parse_v4_common(rule, tuple, extack);
}
static int
-qede_parse_flow_attr(struct qede_dev *edev, __be16 proto,
- struct flow_rule *rule, struct qede_arfs_tuple *tuple)
+qede_parse_flow_attr(__be16 proto, struct flow_rule *rule,
+ struct qede_arfs_tuple *tuple,
+ struct netlink_ext_ack *extack)
{
struct flow_dissector *dissector = rule->match.dissector;
int rc = -EINVAL;
@@ -1832,14 +1843,18 @@ qede_parse_flow_attr(struct qede_dev *edev, __be16 proto,
BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_PORTS))) {
- DP_NOTICE(edev, "Unsupported key set:0x%llx\n",
- dissector->used_keys);
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported key used: 0x%llx",
+ dissector->used_keys);
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (proto != htons(ETH_P_IP) &&
proto != htons(ETH_P_IPV6)) {
- DP_NOTICE(edev, "Unsupported proto=0x%x\n", proto);
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported proto=0x%x",
+ proto);
return -EPROTONOSUPPORT;
}
@@ -1851,15 +1866,15 @@ qede_parse_flow_attr(struct qede_dev *edev, __be16 proto,
}
if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP))
- rc = qede_flow_parse_tcp_v4(edev, rule, tuple);
+ rc = qede_flow_parse_tcp_v4(rule, tuple, extack);
else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6))
- rc = qede_flow_parse_tcp_v6(edev, rule, tuple);
+ rc = qede_flow_parse_tcp_v6(rule, tuple, extack);
else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP))
- rc = qede_flow_parse_udp_v4(edev, rule, tuple);
+ rc = qede_flow_parse_udp_v4(rule, tuple, extack);
else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6))
- rc = qede_flow_parse_udp_v6(edev, rule, tuple);
+ rc = qede_flow_parse_udp_v6(rule, tuple, extack);
else
- DP_NOTICE(edev, "Invalid protocol request\n");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid protocol request");
return rc;
}
@@ -1867,9 +1882,10 @@ qede_parse_flow_attr(struct qede_dev *edev, __be16 proto,
int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
struct flow_cls_offload *f)
{
+ struct netlink_ext_ack *extack = f->common.extack;
struct qede_arfs_fltr_node *n;
- int min_hlen, rc = -EINVAL;
struct qede_arfs_tuple t;
+ int min_hlen, rc;
__qede_lock(edev);
@@ -1879,7 +1895,8 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
}
/* parse flower attribute and prepare filter */
- if (qede_parse_flow_attr(edev, proto, f->rule, &t))
+ rc = qede_parse_flow_attr(proto, f->rule, &t, extack);
+ if (rc)
goto unlock;
/* Validate profile mode and number of filters */
@@ -1888,11 +1905,13 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
DP_NOTICE(edev,
"Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n",
t.mode, edev->arfs->mode, edev->arfs->filter_count);
+ rc = -EINVAL;
goto unlock;
}
/* parse tc actions and get the vf_id */
- if (qede_parse_actions(edev, &f->rule->action, f->common.extack))
+ rc = qede_parse_actions(edev, &f->rule->action, extack);
+ if (rc)
goto unlock;
if (qede_flow_find_fltr(edev, &t)) {
@@ -1938,8 +1957,11 @@ unlock:
static int qede_flow_spec_validate(struct qede_dev *edev,
struct flow_action *flow_action,
struct qede_arfs_tuple *t,
- __u32 location)
+ __u32 location,
+ struct netlink_ext_ack *extack)
{
+ int err;
+
if (location >= QEDE_RFS_MAX_FLTR) {
DP_INFO(edev, "Location out-of-bounds\n");
return -EINVAL;
@@ -1960,8 +1982,9 @@ static int qede_flow_spec_validate(struct qede_dev *edev,
return -EINVAL;
}
- if (qede_parse_actions(edev, flow_action, NULL))
- return -EINVAL;
+ err = qede_parse_actions(edev, flow_action, extack);
+ if (err)
+ return err;
return 0;
}
@@ -1972,11 +1995,13 @@ static int qede_flow_spec_to_rule(struct qede_dev *edev,
{
struct ethtool_rx_flow_spec_input input = {};
struct ethtool_rx_flow_rule *flow;
+ struct netlink_ext_ack extack;
__be16 proto;
- int err = 0;
+ int err;
- if (qede_flow_spec_validate_unused(edev, fs))
- return -EOPNOTSUPP;
+ err = qede_flow_spec_validate_unused(edev, fs);
+ if (err)
+ return err;
switch ((fs->flow_type & ~FLOW_EXT)) {
case TCP_V4_FLOW:
@@ -1998,15 +2023,16 @@ static int qede_flow_spec_to_rule(struct qede_dev *edev,
if (IS_ERR(flow))
return PTR_ERR(flow);
- if (qede_parse_flow_attr(edev, proto, flow->rule, t)) {
- err = -EINVAL;
+ err = qede_parse_flow_attr(proto, flow->rule, t, &extack);
+ if (err)
goto err_out;
- }
/* Make sure location is valid and filter isn't already set */
err = qede_flow_spec_validate(edev, &flow->rule->action, t,
- fs->location);
+ fs->location, &extack);
err_out:
+ if (extack._msg)
+ DP_NOTICE(edev, "%s\n", extack._msg);
ethtool_rx_flow_rule_destroy(flow);
return err;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 4b8bc46f55c2..ae4ee0326ee1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -1015,7 +1015,7 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
if (!rc)
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
return rc;
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 4c06f55878de..99d4647bf245 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -216,7 +216,7 @@ static int emac_change_mtu(struct net_device *netdev, int new_mtu)
netif_dbg(adpt, hw, adpt->netdev,
"changing MTU from %d to %d\n", netdev->mtu,
new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
return emac_reinit_locked(adpt);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 9d2a9562c96f..f1e40aade127 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -90,7 +90,7 @@ static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
new_mtu > (priv->real_dev->mtu - headroom))
return -EINVAL;
- rmnet_dev->mtu = new_mtu;
+ WRITE_ONCE(rmnet_dev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index f5786d78ed23..5652da8a178c 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1277,14 +1277,14 @@ static int cp_change_mtu(struct net_device *dev, int new_mtu)
/* if network interface not up, no need for complexity */
if (!netif_running(dev)) {
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
cp_set_rxbufsize(cp); /* set new rx buf size */
return 0;
}
/* network IS up, close it, reset MTU, and come up again. */
cp_close(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
cp_set_rxbufsize(cp);
return cp_open(dev);
}
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index 4c043052198d..00882ffc7a02 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -73,6 +73,7 @@ enum mac_version {
};
struct rtl8169_private;
+struct r8169_led_classdev;
void r8169_apply_firmware(struct rtl8169_private *tp);
u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp);
@@ -84,7 +85,8 @@ void r8169_get_led_name(struct rtl8169_private *tp, int idx,
char *buf, int buf_len);
int rtl8168_get_led_mode(struct rtl8169_private *tp);
int rtl8168_led_mod_ctrl(struct rtl8169_private *tp, u16 mask, u16 val);
-void rtl8168_init_leds(struct net_device *ndev);
+struct r8169_led_classdev *rtl8168_init_leds(struct net_device *ndev);
int rtl8125_get_led_mode(struct rtl8169_private *tp, int index);
int rtl8125_set_led_mode(struct rtl8169_private *tp, int index, u16 mode);
-void rtl8125_init_leds(struct net_device *ndev);
+struct r8169_led_classdev *rtl8125_init_leds(struct net_device *ndev);
+void r8169_remove_leds(struct r8169_led_classdev *leds);
diff --git a/drivers/net/ethernet/realtek/r8169_leds.c b/drivers/net/ethernet/realtek/r8169_leds.c
index 7c5dc9d0df85..e10bee706bc6 100644
--- a/drivers/net/ethernet/realtek/r8169_leds.c
+++ b/drivers/net/ethernet/realtek/r8169_leds.c
@@ -146,22 +146,22 @@ static void rtl8168_setup_ldev(struct r8169_led_classdev *ldev,
led_cdev->hw_control_get_device = r8169_led_hw_control_get_device;
/* ignore errors */
- devm_led_classdev_register(&ndev->dev, led_cdev);
+ led_classdev_register(&ndev->dev, led_cdev);
}
-void rtl8168_init_leds(struct net_device *ndev)
+struct r8169_led_classdev *rtl8168_init_leds(struct net_device *ndev)
{
- /* bind resource mgmt to netdev */
- struct device *dev = &ndev->dev;
struct r8169_led_classdev *leds;
int i;
- leds = devm_kcalloc(dev, RTL8168_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
+ leds = kcalloc(RTL8168_NUM_LEDS + 1, sizeof(*leds), GFP_KERNEL);
if (!leds)
- return;
+ return NULL;
for (i = 0; i < RTL8168_NUM_LEDS; i++)
rtl8168_setup_ldev(leds + i, ndev, i);
+
+ return leds;
}
static int rtl8125_led_hw_control_is_supported(struct led_classdev *led_cdev,
@@ -245,20 +245,31 @@ static void rtl8125_setup_led_ldev(struct r8169_led_classdev *ldev,
led_cdev->hw_control_get_device = r8169_led_hw_control_get_device;
/* ignore errors */
- devm_led_classdev_register(&ndev->dev, led_cdev);
+ led_classdev_register(&ndev->dev, led_cdev);
}
-void rtl8125_init_leds(struct net_device *ndev)
+struct r8169_led_classdev *rtl8125_init_leds(struct net_device *ndev)
{
- /* bind resource mgmt to netdev */
- struct device *dev = &ndev->dev;
struct r8169_led_classdev *leds;
int i;
- leds = devm_kcalloc(dev, RTL8125_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
+ leds = kcalloc(RTL8125_NUM_LEDS + 1, sizeof(*leds), GFP_KERNEL);
if (!leds)
- return;
+ return NULL;
for (i = 0; i < RTL8125_NUM_LEDS; i++)
rtl8125_setup_led_ldev(leds + i, ndev, i);
+
+ return leds;
+}
+
+void r8169_remove_leds(struct r8169_led_classdev *leds)
+{
+ if (!leds)
+ return;
+
+ for (struct r8169_led_classdev *l = leds; l->ndev; l++)
+ led_classdev_unregister(&l->led);
+
+ kfree(leds);
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 5c879a5c86d7..e5ea827a21e7 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -647,6 +647,8 @@ struct rtl8169_private {
const char *fw_name;
struct rtl_fw *rtl_fw;
+ struct r8169_led_classdev *leds;
+
u32 ocp_base;
};
@@ -1314,17 +1316,40 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01);
}
+static void rtl_dash_loop_wait(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned long usecs, int n, bool high)
+{
+ if (!tp->dash_enabled)
+ return;
+ rtl_loop_wait(tp, c, usecs, n, high);
+}
+
+static void rtl_dash_loop_wait_high(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned long d, int n)
+{
+ rtl_dash_loop_wait(tp, c, d, n, true);
+}
+
+static void rtl_dash_loop_wait_low(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned long d, int n)
+{
+ rtl_dash_loop_wait(tp, c, d, n, false);
+}
+
static void rtl8168dp_driver_start(struct rtl8169_private *tp)
{
r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START);
- rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10);
+ rtl_dash_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10);
}
static void rtl8168ep_driver_start(struct rtl8169_private *tp)
{
r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
- rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
+ rtl_dash_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
}
static void rtl8168_driver_start(struct rtl8169_private *tp)
@@ -1338,7 +1363,7 @@ static void rtl8168_driver_start(struct rtl8169_private *tp)
static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
{
r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP);
- rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10);
+ rtl_dash_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10);
}
static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
@@ -1346,7 +1371,7 @@ static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
rtl8168ep_stop_cmac(tp);
r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
- rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
+ rtl_dash_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
}
static void rtl8168_driver_stop(struct rtl8169_private *tp)
@@ -2204,6 +2229,8 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
* the wild. Let's disable detection.
* { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 },
*/
+ /* Realtek calls it RTL8168M, but it's handled like RTL8168H */
+ { 0x7cf, 0x6c0, RTL_GIGA_MAC_VER_46 },
/* 8168G family. */
{ 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 },
@@ -3897,7 +3924,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
{
struct rtl8169_private *tp = netdev_priv(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
netdev_update_features(dev);
rtl_jumbo_config(tp);
rtl_set_eee_txidle_timer(tp);
@@ -5021,6 +5048,9 @@ static void rtl_remove_one(struct pci_dev *pdev)
cancel_work_sync(&tp->wk.work);
+ if (IS_ENABLED(CONFIG_R8169_LEDS))
+ r8169_remove_leds(tp->leds);
+
unregister_netdev(tp->dev);
if (tp->dash_type != RTL_DASH_NONE)
@@ -5076,7 +5106,7 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
rtl_lock_config_regs(tp);
fallthrough;
case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17:
- flags = PCI_IRQ_LEGACY;
+ flags = PCI_IRQ_INTX;
break;
default:
flags = PCI_IRQ_ALL_TYPES;
@@ -5141,6 +5171,15 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
struct mii_bus *new_bus;
int ret;
+ /* On some boards with this chip version the BIOS is buggy and misses
+ * to reset the PHY page selector. This results in the PHY ID read
+ * accessing registers on a different page, returning a more or
+ * less random value. Fix this by resetting the page selector first.
+ */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_25 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_26)
+ r8169_mdio_write(tp, 0x1f, 0);
+
new_bus = devm_mdiobus_alloc(&pdev->dev);
if (!new_bus)
return -ENOMEM;
@@ -5469,9 +5508,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (IS_ENABLED(CONFIG_R8169_LEDS)) {
if (rtl_is_8125(tp))
- rtl8125_init_leds(dev);
+ tp->leds = rtl8125_init_leds(dev);
else if (tp->mac_version > RTL_GIGA_MAC_VER_06)
- rtl8168_init_leds(dev);
+ tp->leds = rtl8168_init_leds(dev);
}
netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index d1be030c8848..4d100283c30f 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -769,25 +769,28 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
dma_addr_t dma_addr;
int rx_packets = 0;
u8 desc_status;
- u16 pkt_len;
+ u16 desc_len;
u8 die_dt;
int entry;
int limit;
int i;
- entry = priv->cur_rx[q] % priv->num_rx_ring[q];
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
stats = &priv->stats[q];
- desc = &priv->rx_ring[q].desc[entry];
- for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) {
+ for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
+ entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+ desc = &priv->rx_ring[q].desc[entry];
+ if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
+ break;
+
/* Descriptor type must be checked before all other reads */
dma_rmb();
desc_status = desc->msc;
- pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
+ desc_len = le16_to_cpu(desc->ds_cc) & RX_DS;
/* We use 0-byte descriptors to mark the DMA mapping errors */
- if (!pkt_len)
+ if (!desc_len)
continue;
if (desc_status & MSC_MC)
@@ -808,25 +811,25 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
switch (die_dt) {
case DT_FSINGLE:
skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_put(skb, pkt_len);
+ skb_put(skb, desc_len);
skb->protocol = eth_type_trans(skb, ndev);
if (ndev->features & NETIF_F_RXCSUM)
ravb_rx_csum_gbeth(skb);
napi_gro_receive(&priv->napi[q], skb);
rx_packets++;
- stats->rx_bytes += pkt_len;
+ stats->rx_bytes += desc_len;
break;
case DT_FSTART:
priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_put(priv->rx_1st_skb, pkt_len);
+ skb_put(priv->rx_1st_skb, desc_len);
break;
case DT_FMID:
skb = ravb_get_skb_gbeth(ndev, entry, desc);
skb_copy_to_linear_data_offset(priv->rx_1st_skb,
priv->rx_1st_skb->len,
skb->data,
- pkt_len);
- skb_put(priv->rx_1st_skb, pkt_len);
+ desc_len);
+ skb_put(priv->rx_1st_skb, desc_len);
dev_kfree_skb(skb);
break;
case DT_FEND:
@@ -834,23 +837,20 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
skb_copy_to_linear_data_offset(priv->rx_1st_skb,
priv->rx_1st_skb->len,
skb->data,
- pkt_len);
- skb_put(priv->rx_1st_skb, pkt_len);
+ desc_len);
+ skb_put(priv->rx_1st_skb, desc_len);
dev_kfree_skb(skb);
priv->rx_1st_skb->protocol =
eth_type_trans(priv->rx_1st_skb, ndev);
if (ndev->features & NETIF_F_RXCSUM)
- ravb_rx_csum_gbeth(skb);
+ ravb_rx_csum_gbeth(priv->rx_1st_skb);
+ stats->rx_bytes += priv->rx_1st_skb->len;
napi_gro_receive(&priv->napi[q],
priv->rx_1st_skb);
rx_packets++;
- stats->rx_bytes += pkt_len;
break;
}
}
-
- entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q].desc[entry];
}
/* Refill the RX ring buffers. */
@@ -891,30 +891,29 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
- int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
- priv->cur_rx[q];
struct net_device_stats *stats = &priv->stats[q];
struct ravb_ex_rx_desc *desc;
+ unsigned int limit, i;
struct sk_buff *skb;
dma_addr_t dma_addr;
struct timespec64 ts;
+ int rx_packets = 0;
u8 desc_status;
u16 pkt_len;
- int limit;
+ int entry;
+
+ limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
+ for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
+ entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+ desc = &priv->rx_ring[q].ex_desc[entry];
+ if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
+ break;
- boguscnt = min(boguscnt, *quota);
- limit = boguscnt;
- desc = &priv->rx_ring[q].ex_desc[entry];
- while (desc->die_dt != DT_FEMPTY) {
/* Descriptor type must be checked before all other reads */
dma_rmb();
desc_status = desc->msc;
pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
- if (--boguscnt < 0)
- break;
-
/* We use 0-byte descriptors to mark the DMA mapping errors */
if (!pkt_len)
continue;
@@ -960,12 +959,9 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
if (ndev->features & NETIF_F_RXCSUM)
ravb_rx_csum(skb);
napi_gro_receive(&priv->napi[q], skb);
- stats->rx_packets++;
+ rx_packets++;
stats->rx_bytes += pkt_len;
}
-
- entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q].ex_desc[entry];
}
/* Refill the RX ring buffers. */
@@ -995,9 +991,9 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
desc->die_dt = DT_FEMPTY;
}
- *quota -= limit - (++boguscnt);
-
- return boguscnt <= 0;
+ stats->rx_packets += rx_packets;
+ *quota -= rx_packets;
+ return *quota == 0;
}
/* Packet receive function for Ethernet AVB */
@@ -1324,12 +1320,12 @@ static int ravb_poll(struct napi_struct *napi, int budget)
int q = napi - priv->napi;
int mask = BIT(q);
int quota = budget;
+ bool unmask;
/* Processing RX Descriptor Ring */
/* Clear RX interrupt */
ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
- if (ravb_rx(ndev, &quota, q))
- goto out;
+ unmask = !ravb_rx(ndev, &quota, q);
/* Processing TX Descriptor Ring */
spin_lock_irqsave(&priv->lock, flags);
@@ -1339,6 +1335,18 @@ static int ravb_poll(struct napi_struct *napi, int budget)
netif_wake_subqueue(ndev, q);
spin_unlock_irqrestore(&priv->lock, flags);
+ /* Receive error message handling */
+ priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
+ if (info->nc_queues)
+ priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
+ if (priv->rx_over_errors != ndev->stats.rx_over_errors)
+ ndev->stats.rx_over_errors = priv->rx_over_errors;
+ if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
+ ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
+
+ if (!unmask)
+ goto out;
+
napi_complete(napi);
/* Re-enable RX/TX interrupts */
@@ -1352,14 +1360,6 @@ static int ravb_poll(struct napi_struct *napi, int budget)
}
spin_unlock_irqrestore(&priv->lock, flags);
- /* Receive error message handling */
- priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
- if (info->nc_queues)
- priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
- if (priv->rx_over_errors != ndev->stats.rx_over_errors)
- ndev->stats.rx_over_errors = priv->rx_over_errors;
- if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
- ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
out:
return budget - quota;
}
@@ -2423,7 +2423,7 @@ static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
{
struct ravb_private *priv = netdev_priv(ndev);
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
if (netif_running(ndev)) {
synchronize_irq(priv->emac_irq);
@@ -2564,6 +2564,7 @@ static int ravb_mdio_init(struct ravb_private *priv)
{
struct platform_device *pdev = priv->pdev;
struct device *dev = &pdev->dev;
+ struct device_node *mdio_node;
struct phy_device *phydev;
struct device_node *pn;
int error;
@@ -2583,7 +2584,13 @@ static int ravb_mdio_init(struct ravb_private *priv)
pdev->name, pdev->id);
/* Register MDIO bus */
- error = of_mdiobus_register(priv->mii_bus, dev->of_node);
+ mdio_node = of_get_child_by_name(dev->of_node, "mdio");
+ if (!mdio_node) {
+ /* backwards compatibility for DT lacking mdio subnode */
+ mdio_node = of_node_get(dev->of_node);
+ }
+ error = of_mdiobus_register(priv->mii_bus, mdio_node);
+ of_node_put(mdio_node);
if (error)
goto out_free_bus;
@@ -2722,19 +2729,18 @@ static int ravb_setup_irq(struct ravb_private *priv, const char *irq_name,
struct platform_device *pdev = priv->pdev;
struct net_device *ndev = priv->ndev;
struct device *dev = &pdev->dev;
- const char *dev_name;
+ const char *devname = dev_name(dev);
unsigned long flags;
int error, irq_num;
if (irq_name) {
- dev_name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
- if (!dev_name)
+ devname = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", devname, ch);
+ if (!devname)
return -ENOMEM;
irq_num = platform_get_irq_byname(pdev, irq_name);
flags = 0;
} else {
- dev_name = ndev->name;
irq_num = platform_get_irq(pdev, 0);
flags = IRQF_SHARED;
}
@@ -2744,9 +2750,9 @@ static int ravb_setup_irq(struct ravb_private *priv, const char *irq_name,
if (irq)
*irq = irq_num;
- error = devm_request_irq(dev, irq_num, handler, flags, dev_name, ndev);
+ error = devm_request_irq(dev, irq_num, handler, flags, devname, ndev);
if (error)
- netdev_err(ndev, "cannot request IRQ %s\n", dev_name);
+ netdev_err(ndev, "cannot request IRQ %s\n", devname);
return error;
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 475e1e8c1d35..7a25903e35c3 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -50,7 +50,7 @@
* the macros available to do this only define GCC 8.
*/
__diag_push();
-__diag_ignore(GCC, 8, "-Woverride-init",
+__diag_ignore_all("-Woverride-init",
"logic to initialize all and then override some is OK");
static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
SH_ETH_OFFSET_DEFAULTS,
@@ -2624,7 +2624,7 @@ static int sh_eth_change_mtu(struct net_device *ndev, int new_mtu)
if (netif_running(ndev))
return -EBUSY;
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
netdev_update_features(ndev);
return 0;
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 755db89db909..e097ce3e69ea 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1967,7 +1967,7 @@ static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
rocker_port_stop(dev);
netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
if (err)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index ecbe3994f2b1..12c8396b6942 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -1804,7 +1804,7 @@ static int sxgbe_set_features(struct net_device *dev,
*/
static int sxgbe_change_mtu(struct net_device *dev, int new_mtu)
{
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (!netif_running(dev))
return 0;
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index 551f890db90a..4ebd5ae23eca 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -302,7 +302,7 @@ int efx_change_mtu(struct net_device *net_dev, int new_mtu)
efx_stop_all(efx);
mutex_lock(&efx->mac_lock);
- net_dev->mtu = new_mtu;
+ WRITE_ONCE(net_dev->mtu, new_mtu);
efx_mac_reconfigure(efx, true);
mutex_unlock(&efx->mac_lock);
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 1cb32aedd89c..8925745f1c17 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2125,7 +2125,7 @@ static int ef4_change_mtu(struct net_device *net_dev, int new_mtu)
ef4_stop_all(efx);
mutex_lock(&efx->mac_lock);
- net_dev->mtu = new_mtu;
+ WRITE_ONCE(net_dev->mtu, new_mtu);
ef4_mac_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c
index 88e5bc347a44..cf195162e270 100644
--- a/drivers/net/ethernet/sfc/siena/efx_common.c
+++ b/drivers/net/ethernet/sfc/siena/efx_common.c
@@ -306,7 +306,7 @@ int efx_siena_change_mtu(struct net_device *net_dev, int new_mtu)
efx_siena_stop_all(efx);
mutex_lock(&efx->mac_lock);
- net_dev->mtu = new_mtu;
+ WRITE_ONCE(net_dev->mtu, new_mtu);
efx_siena_mac_reconfigure(efx, true);
mutex_unlock(&efx->mac_lock);
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index 82e8891a619a..9d140203e273 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -273,11 +273,10 @@ static int efx_tc_flower_parse_match(struct efx_nic *efx,
match->value.ip_firstfrag = fm.key->flags & FLOW_DIS_FIRST_FRAG;
match->mask.ip_firstfrag = true;
}
- if (fm.mask->flags & ~(FLOW_DIS_IS_FRAGMENT | FLOW_DIS_FIRST_FRAG)) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported match on control.flags %#x",
- fm.mask->flags);
+ if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT |
+ FLOW_DIS_FIRST_FRAG,
+ fm.mask->flags, extack))
return -EOPNOTSUPP;
- }
}
if (dissector->used_keys &
~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
diff --git a/drivers/net/ethernet/sis/Kconfig b/drivers/net/ethernet/sis/Kconfig
index 775d76d9890e..7e498bdbca73 100644
--- a/drivers/net/ethernet/sis/Kconfig
+++ b/drivers/net/ethernet/sis/Kconfig
@@ -19,7 +19,7 @@ if NET_VENDOR_SIS
config SIS900
tristate "SiS 900/7016 PCI Fast Ethernet Adapter support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select CRC32
select MII
help
@@ -35,7 +35,7 @@ config SIS900
config SIS190
tristate "SiS190/SiS191 gigabit ethernet support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select CRC32
select MII
help
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index cb7fec226cab..85b850372efe 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -2273,7 +2273,7 @@ static int sis900_set_config(struct net_device *dev, struct ifmap *map)
* (which seems to be different from the ifport(pcmcia) definition) */
switch(map->port){
case IF_PORT_UNKNOWN: /* use auto here */
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
/* we are going to change the media type, so the Link
* will be temporary down and we need to reflect that
* here. When the Link comes up again, it will be
@@ -2294,7 +2294,7 @@ static int sis900_set_config(struct net_device *dev, struct ifmap *map)
break;
case IF_PORT_10BASET: /* 10BaseT */
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
/* we are going to change the media type, so the Link
* will be temporary down and we need to reflect that
@@ -2315,7 +2315,7 @@ static int sis900_set_config(struct net_device *dev, struct ifmap *map)
case IF_PORT_100BASET: /* 100BaseT */
case IF_PORT_100BASETX: /* 100BaseTx */
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
/* we are going to change the media type, so the Link
* will be temporary down and we need to reflect that
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 5f22a8a4d27b..13ce9086a9ca 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -54,7 +54,7 @@ config SMC91X
config PCMCIA_SMC91C92
tristate "SMC 91Cxx PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
select CRC32
select MII
help
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 29bb19f42de9..86e3ec25df07 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1595,7 +1595,7 @@ static int s9k_config(struct net_device *dev, struct ifmap *map)
return -EOPNOTSUPP;
else if (map->port > 2)
return -EINVAL;
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
smc_reset(dev);
}
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 46eee747c699..45ef5ac0788a 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -156,8 +156,8 @@ static inline void mcf_outsw(void *a, unsigned char *p, int l)
writew(*wp++, a);
}
-#define SMC_inw(a, r) _swapw(readw((a) + (r)))
-#define SMC_outw(lp, v, a, r) writew(_swapw(v), (a) + (r))
+#define SMC_inw(a, r) ioread16be((a) + (r))
+#define SMC_outw(lp, v, a, r) iowrite16be(v, (a) + (r))
#define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l)
#define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l)
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 4ec61f1ee71a..05cc07b8f48c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -142,6 +142,18 @@ config DWMAC_ROCKCHIP
This selects the Rockchip RK3288 SoC glue layer support for
the stmmac device driver.
+config DWMAC_RZN1
+ tristate "Renesas RZ/N1 dwmac support"
+ default ARCH_RZN1
+ depends on OF && (ARCH_RZN1 || COMPILE_TEST)
+ select PCS_RZN1_MIIC
+ help
+ Support for Ethernet controller on Renesas RZ/N1 SoC family.
+
+ This selects the Renesas RZ/N1 SoC glue layer support for
+ the stmmac device driver. This support can make use of a custom MII
+ converter PCS device.
+
config DWMAC_SOCFPGA
tristate "SOCFPGA dwmac support"
default ARCH_INTEL_SOCFPGA
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 26cad4344701..c2f0e91f6bf8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_DWMAC_MEDIATEK) += dwmac-mediatek.o
obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o
obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o
obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
+obj-$(CONFIG_DWMAC_RZN1) += dwmac-rzn1.o
obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
obj-$(CONFIG_DWMAC_STARFIVE) += dwmac-starfive.o
obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index a6fefe675ef1..9cd62b2110a1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -553,6 +553,7 @@ extern const struct stmmac_hwtimestamp stmmac_ptp;
extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
struct mac_link {
+ u32 caps;
u32 speed_mask;
u32 speed10;
u32 speed100;
@@ -593,7 +594,7 @@ struct mac_device_info {
const struct stmmac_mmc_ops *mmc;
const struct stmmac_est_ops *est;
struct dw_xpcs *xpcs;
- struct phylink_pcs *lynx_pcs; /* Lynx external PCS */
+ struct phylink_pcs *phylink_pcs;
struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
void __iomem *pcsr; /* vpointer to device CSRs */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 281687d7083b..4ba15873d5b1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -171,6 +171,9 @@ static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
switch (gmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
div = get_clk_div_rgmii(gmac, speed);
clk_bits = NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) |
NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id);
@@ -410,6 +413,9 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
val |= NSS_COMMON_GMAC_CTL_CSYS_REQ;
switch (gmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
val |= NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL;
break;
case PHY_INTERFACE_MODE_SGMII:
@@ -425,6 +431,9 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
val &= ~(1 << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id));
switch (gmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) <<
NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
break;
@@ -442,6 +451,9 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id);
switch (gmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
val |= NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) |
NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id);
break;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 382e8de1255d..7ae04d8d291c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -12,10 +12,8 @@
#include <linux/clk.h>
#include <linux/phy.h>
#include <linux/of_net.h>
-#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/delay.h>
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c
new file mode 100644
index 000000000000..848cf3c01f4a
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2024 Schneider-Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#include <linux/of.h>
+#include <linux/pcs-rzn1-miic.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
+
+#include "stmmac_platform.h"
+#include "stmmac.h"
+
+static int rzn1_dwmac_pcs_init(struct stmmac_priv *priv)
+{
+ struct device_node *np = priv->device->of_node;
+ struct device_node *pcs_node;
+ struct phylink_pcs *pcs;
+
+ pcs_node = of_parse_phandle(np, "pcs-handle", 0);
+
+ if (pcs_node) {
+ pcs = miic_create(priv->device, pcs_node);
+ of_node_put(pcs_node);
+ if (IS_ERR(pcs))
+ return PTR_ERR(pcs);
+
+ priv->hw->phylink_pcs = pcs;
+ }
+
+ return 0;
+}
+
+static void rzn1_dwmac_pcs_exit(struct stmmac_priv *priv)
+{
+ if (priv->hw->phylink_pcs)
+ miic_destroy(priv->hw->phylink_pcs);
+}
+
+static int rzn1_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ plat_dat->bsp_priv = plat_dat;
+ plat_dat->pcs_init = rzn1_dwmac_pcs_init;
+ plat_dat->pcs_exit = rzn1_dwmac_pcs_exit;
+
+ ret = stmmac_dvr_probe(dev, plat_dat, &stmmac_res);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id rzn1_dwmac_match[] = {
+ { .compatible = "renesas,rzn1-gmac" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rzn1_dwmac_match);
+
+static struct platform_driver rzn1_dwmac_driver = {
+ .probe = rzn1_dwmac_probe,
+ .remove_new = stmmac_pltfr_remove,
+ .driver = {
+ .name = "rzn1-dwmac",
+ .of_match_table = rzn1_dwmac_match,
+ },
+};
+module_platform_driver(rzn1_dwmac_driver);
+
+MODULE_AUTHOR("Clément Léger <clement.leger@bootlin.com>");
+MODULE_DESCRIPTION("Renesas RZN1 DWMAC specific glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 68f85e4605cb..b3d45f9dfb55 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -379,6 +379,56 @@ static int socfpga_gen10_set_phy_mode(struct socfpga_dwmac *dwmac)
return 0;
}
+static int socfpga_dwmac_pcs_init(struct stmmac_priv *priv)
+{
+ struct socfpga_dwmac *dwmac = priv->plat->bsp_priv;
+ struct regmap_config pcs_regmap_cfg = {
+ .reg_bits = 16,
+ .val_bits = 16,
+ .reg_shift = REGMAP_UPSHIFT(1),
+ };
+ struct mdio_regmap_config mrc;
+ struct regmap *pcs_regmap;
+ struct phylink_pcs *pcs;
+ struct mii_bus *pcs_bus;
+
+ if (!dwmac->tse_pcs_base)
+ return 0;
+
+ pcs_regmap = devm_regmap_init_mmio(priv->device, dwmac->tse_pcs_base,
+ &pcs_regmap_cfg);
+ if (IS_ERR(pcs_regmap))
+ return PTR_ERR(pcs_regmap);
+
+ memset(&mrc, 0, sizeof(mrc));
+ mrc.regmap = pcs_regmap;
+ mrc.parent = priv->device;
+ mrc.valid_addr = 0x0;
+ mrc.autoscan = false;
+
+ /* Can't use ndev->name here because it will not have been initialised,
+ * and in any case, the user can rename network interfaces at runtime.
+ */
+ snprintf(mrc.name, MII_BUS_ID_SIZE, "%s-pcs-mii",
+ dev_name(priv->device));
+ pcs_bus = devm_mdio_regmap_register(priv->device, &mrc);
+ if (IS_ERR(pcs_bus))
+ return PTR_ERR(pcs_bus);
+
+ pcs = lynx_pcs_create_mdiodev(pcs_bus, 0);
+ if (IS_ERR(pcs))
+ return PTR_ERR(pcs);
+
+ priv->hw->phylink_pcs = pcs;
+ return 0;
+}
+
+static void socfpga_dwmac_pcs_exit(struct stmmac_priv *priv)
+{
+ if (priv->hw->phylink_pcs)
+ lynx_pcs_destroy(priv->hw->phylink_pcs);
+}
+
static int socfpga_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -426,6 +476,8 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
dwmac->ops = ops;
plat_dat->bsp_priv = dwmac;
plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
+ plat_dat->pcs_init = socfpga_dwmac_pcs_init;
+ plat_dat->pcs_exit = socfpga_dwmac_pcs_exit;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
@@ -444,48 +496,6 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
if (ret)
goto err_dvr_remove;
- /* Create a regmap for the PCS so that it can be used by the PCS driver,
- * if we have such a PCS
- */
- if (dwmac->tse_pcs_base) {
- struct regmap_config pcs_regmap_cfg;
- struct mdio_regmap_config mrc;
- struct regmap *pcs_regmap;
- struct mii_bus *pcs_bus;
-
- memset(&pcs_regmap_cfg, 0, sizeof(pcs_regmap_cfg));
- memset(&mrc, 0, sizeof(mrc));
-
- pcs_regmap_cfg.reg_bits = 16;
- pcs_regmap_cfg.val_bits = 16;
- pcs_regmap_cfg.reg_shift = REGMAP_UPSHIFT(1);
-
- pcs_regmap = devm_regmap_init_mmio(&pdev->dev, dwmac->tse_pcs_base,
- &pcs_regmap_cfg);
- if (IS_ERR(pcs_regmap)) {
- ret = PTR_ERR(pcs_regmap);
- goto err_dvr_remove;
- }
-
- mrc.regmap = pcs_regmap;
- mrc.parent = &pdev->dev;
- mrc.valid_addr = 0x0;
- mrc.autoscan = false;
-
- snprintf(mrc.name, MII_BUS_ID_SIZE, "%s-pcs-mii", ndev->name);
- pcs_bus = devm_mdio_regmap_register(&pdev->dev, &mrc);
- if (IS_ERR(pcs_bus)) {
- ret = PTR_ERR(pcs_bus);
- goto err_dvr_remove;
- }
-
- stpriv->hw->lynx_pcs = lynx_pcs_create_mdiodev(pcs_bus, 0);
- if (IS_ERR(stpriv->hw->lynx_pcs)) {
- ret = PTR_ERR(stpriv->hw->lynx_pcs);
- goto err_dvr_remove;
- }
- }
-
return 0;
err_dvr_remove:
@@ -494,17 +504,6 @@ err_dvr_remove:
return ret;
}
-static void socfpga_dwmac_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct phylink_pcs *pcs = priv->hw->lynx_pcs;
-
- stmmac_pltfr_remove(pdev);
-
- lynx_pcs_destroy(pcs);
-}
-
#ifdef CONFIG_PM_SLEEP
static int socfpga_dwmac_resume(struct device *dev)
{
@@ -576,7 +575,7 @@ MODULE_DEVICE_TABLE(of, socfpga_dwmac_match);
static struct platform_driver socfpga_dwmac_driver = {
.probe = socfpga_dwmac_probe,
- .remove_new = socfpga_dwmac_remove,
+ .remove_new = stmmac_pltfr_remove,
.driver = {
.name = "socfpga-dwmac",
.pm = &socfpga_dwmac_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index b21d99faa2d0..e1b761dcfa1d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -1096,6 +1096,8 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
priv->dev->priv_flags |= IFF_UNICAST_FLT;
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
/* The loopback bit seems to be re-set when link change
* Simply mask it each time
* Speed 10/100/1000 are set in BIT(2)/BIT(3)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 3927609abc44..8555299443f4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -539,6 +539,8 @@ int dwmac1000_setup(struct stmmac_priv *priv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
mac->link.duplex = GMAC_CONTROL_DM;
mac->link.speed10 = GMAC_CONTROL_PS;
mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index a6e8d7bd9588..7667d103cd0e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -175,6 +175,8 @@ int dwmac100_setup(struct stmmac_priv *priv)
dev_info(priv->device, "\tDWMAC100\n");
mac->pcsr = priv->ioaddr;
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100;
mac->link.duplex = MAC_CONTROL_F;
mac->link.speed10 = 0;
mac->link.speed100 = 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 6b6d0de09619..b25774d69195 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -68,9 +68,12 @@ static void dwmac4_core_init(struct mac_device_info *hw,
init_waitqueue_head(&priv->tstamp_busy_wait);
}
-static void dwmac4_phylink_get_caps(struct stmmac_priv *priv)
+static void dwmac4_update_caps(struct stmmac_priv *priv)
{
- priv->phylink_config.mac_capabilities |= MAC_2500FD;
+ if (priv->plat->tx_queues_to_use > 1)
+ priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD | MAC_1000HD);
+ else
+ priv->hw->link.caps |= (MAC_10HD | MAC_100HD | MAC_1000HD);
}
static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
@@ -92,19 +95,41 @@ static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
u32 prio, u32 queue)
{
void __iomem *ioaddr = hw->pcsr;
- u32 base_register;
- u32 value;
+ u32 clear_mask = 0;
+ u32 ctrl2, ctrl3;
+ int i;
- base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
- if (queue >= 4)
- queue -= 4;
+ ctrl2 = readl(ioaddr + GMAC_RXQ_CTRL2);
+ ctrl3 = readl(ioaddr + GMAC_RXQ_CTRL3);
- value = readl(ioaddr + base_register);
+ /* The software must ensure that the same priority
+ * is not mapped to multiple Rx queues
+ */
+ for (i = 0; i < 4; i++)
+ clear_mask |= ((prio << GMAC_RXQCTRL_PSRQX_SHIFT(i)) &
+ GMAC_RXQCTRL_PSRQX_MASK(i));
- value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
- value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
+ ctrl2 &= ~clear_mask;
+ ctrl3 &= ~clear_mask;
+
+ /* First assign new priorities to a queue, then
+ * clear them from others queues
+ */
+ if (queue < 4) {
+ ctrl2 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
GMAC_RXQCTRL_PSRQX_MASK(queue);
- writel(value, ioaddr + base_register);
+
+ writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
+ writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
+ } else {
+ queue -= 4;
+
+ ctrl3 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
+ GMAC_RXQCTRL_PSRQX_MASK(queue);
+
+ writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
+ writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
+ }
}
static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
@@ -1165,7 +1190,7 @@ static void dwmac4_set_hw_vlan_mode(struct mac_device_info *hw)
const struct stmmac_ops dwmac4_ops = {
.core_init = dwmac4_core_init,
- .phylink_get_caps = dwmac4_phylink_get_caps,
+ .update_caps = dwmac4_update_caps,
.set_mac = stmmac_set_mac,
.rx_ipc = dwmac4_rx_ipc_enable,
.rx_queue_enable = dwmac4_rx_queue_enable,
@@ -1210,7 +1235,7 @@ const struct stmmac_ops dwmac4_ops = {
const struct stmmac_ops dwmac410_ops = {
.core_init = dwmac4_core_init,
- .phylink_get_caps = dwmac4_phylink_get_caps,
+ .update_caps = dwmac4_update_caps,
.set_mac = stmmac_dwmac4_set_mac,
.rx_ipc = dwmac4_rx_ipc_enable,
.rx_queue_enable = dwmac4_rx_queue_enable,
@@ -1259,7 +1284,7 @@ const struct stmmac_ops dwmac410_ops = {
const struct stmmac_ops dwmac510_ops = {
.core_init = dwmac4_core_init,
- .phylink_get_caps = dwmac4_phylink_get_caps,
+ .update_caps = dwmac4_update_caps,
.set_mac = stmmac_dwmac4_set_mac,
.rx_ipc = dwmac4_rx_ipc_enable,
.rx_queue_enable = dwmac4_rx_queue_enable,
@@ -1356,6 +1381,8 @@ int dwmac4_setup(struct stmmac_priv *priv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
mac->link.duplex = GMAC_CONFIG_DM;
mac->link.speed10 = GMAC_CONFIG_PS;
mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 1af2f89a0504..f8e7775bb633 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -47,14 +47,6 @@ static void dwxgmac2_core_init(struct mac_device_info *hw,
writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
}
-static void xgmac_phylink_get_caps(struct stmmac_priv *priv)
-{
- priv->phylink_config.mac_capabilities |= MAC_2500FD | MAC_5000FD |
- MAC_10000FD | MAC_25000FD |
- MAC_40000FD | MAC_50000FD |
- MAC_100000FD;
-}
-
static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
{
u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
@@ -105,17 +97,41 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
u32 queue)
{
void __iomem *ioaddr = hw->pcsr;
- u32 value, reg;
+ u32 clear_mask = 0;
+ u32 ctrl2, ctrl3;
+ int i;
- reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
- if (queue >= 4)
+ ctrl2 = readl(ioaddr + XGMAC_RXQ_CTRL2);
+ ctrl3 = readl(ioaddr + XGMAC_RXQ_CTRL3);
+
+ /* The software must ensure that the same priority
+ * is not mapped to multiple Rx queues
+ */
+ for (i = 0; i < 4; i++)
+ clear_mask |= ((prio << XGMAC_PSRQ_SHIFT(i)) &
+ XGMAC_PSRQ(i));
+
+ ctrl2 &= ~clear_mask;
+ ctrl3 &= ~clear_mask;
+
+ /* First assign new priorities to a queue, then
+ * clear them from others queues
+ */
+ if (queue < 4) {
+ ctrl2 |= (prio << XGMAC_PSRQ_SHIFT(queue)) &
+ XGMAC_PSRQ(queue);
+
+ writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2);
+ writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3);
+ } else {
queue -= 4;
- value = readl(ioaddr + reg);
- value &= ~XGMAC_PSRQ(queue);
- value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue);
+ ctrl3 |= (prio << XGMAC_PSRQ_SHIFT(queue)) &
+ XGMAC_PSRQ(queue);
- writel(value, ioaddr + reg);
+ writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3);
+ writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2);
+ }
}
static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
@@ -1516,7 +1532,6 @@ static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *
const struct stmmac_ops dwxgmac210_ops = {
.core_init = dwxgmac2_core_init,
- .phylink_get_caps = xgmac_phylink_get_caps,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxgmac2_rx_queue_enable,
@@ -1577,7 +1592,6 @@ static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
const struct stmmac_ops dwxlgmac2_ops = {
.core_init = dwxgmac2_core_init,
- .phylink_get_caps = xgmac_phylink_get_caps,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxlgmac2_rx_queue_enable,
@@ -1637,6 +1651,9 @@ int dwxgmac2_setup(struct stmmac_priv *priv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_1000FD | MAC_2500FD | MAC_5000FD |
+ MAC_10000FD;
mac->link.duplex = 0;
mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
@@ -1674,6 +1691,11 @@ int dwxlgmac2_setup(struct stmmac_priv *priv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_1000FD | MAC_2500FD | MAC_5000FD |
+ MAC_10000FD | MAC_25000FD |
+ MAC_40000FD | MAC_50000FD |
+ MAC_100000FD;
mac->link.duplex = 0;
mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 7be04b54738b..90384db228b5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -308,8 +308,8 @@ struct stmmac_est;
struct stmmac_ops {
/* MAC core initialization */
void (*core_init)(struct mac_device_info *hw, struct net_device *dev);
- /* Get phylink capabilities */
- void (*phylink_get_caps)(struct stmmac_priv *priv);
+ /* Update MAC capabilities */
+ void (*update_caps)(struct stmmac_priv *priv);
/* Enable the MAC RX/TX */
void (*set_mac)(void __iomem *ioaddr, bool enable);
/* Enable and verify that the IPC module is supported */
@@ -430,8 +430,8 @@ struct stmmac_ops {
#define stmmac_core_init(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, core_init, __args)
-#define stmmac_mac_phylink_get_caps(__priv) \
- stmmac_do_void_callback(__priv, mac, phylink_get_caps, __priv)
+#define stmmac_mac_update_caps(__priv) \
+ stmmac_do_void_callback(__priv, mac, update_caps, __priv)
#define stmmac_mac_set(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_mac, __args)
#define stmmac_rx_ipc(__priv, __args...) \
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index dff02d75d519..5d1ea3e07459 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -52,6 +52,7 @@ struct stmmac_counters {
unsigned int mmc_tx_excessdef;
unsigned int mmc_tx_pause_frame;
unsigned int mmc_tx_vlan_frame_g;
+ unsigned int mmc_tx_oversize_g;
unsigned int mmc_tx_lpi_usec;
unsigned int mmc_tx_lpi_tran;
@@ -80,6 +81,7 @@ struct stmmac_counters {
unsigned int mmc_rx_fifo_overflow;
unsigned int mmc_rx_vlan_frames_gb;
unsigned int mmc_rx_watchdog_error;
+ unsigned int mmc_rx_error;
unsigned int mmc_rx_lpi_usec;
unsigned int mmc_rx_lpi_tran;
unsigned int mmc_rx_discard_frames_gb;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 7eb477faa75a..0fab842902a8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -53,6 +53,7 @@
#define MMC_TX_EXCESSDEF 0x6c
#define MMC_TX_PAUSE_FRAME 0x70
#define MMC_TX_VLAN_FRAME_G 0x74
+#define MMC_TX_OVERSIZE_G 0x78
/* MMC RX counter registers */
#define MMC_RX_FRAMECOUNT_GB 0x80
@@ -79,6 +80,13 @@
#define MMC_RX_FIFO_OVERFLOW 0xd4
#define MMC_RX_VLAN_FRAMES_GB 0xd8
#define MMC_RX_WATCHDOG_ERROR 0xdc
+#define MMC_RX_ERROR 0xe0
+
+#define MMC_TX_LPI_USEC 0xec
+#define MMC_TX_LPI_TRAN 0xf0
+#define MMC_RX_LPI_USEC 0xf4
+#define MMC_RX_LPI_TRAN 0xf8
+
/* IPC*/
#define MMC_RX_IPC_INTR_MASK 0x100
#define MMC_RX_IPC_INTR 0x108
@@ -283,6 +291,9 @@ static void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
mmc->mmc_tx_excessdef += readl(mmcaddr + MMC_TX_EXCESSDEF);
mmc->mmc_tx_pause_frame += readl(mmcaddr + MMC_TX_PAUSE_FRAME);
mmc->mmc_tx_vlan_frame_g += readl(mmcaddr + MMC_TX_VLAN_FRAME_G);
+ mmc->mmc_tx_oversize_g += readl(mmcaddr + MMC_TX_OVERSIZE_G);
+ mmc->mmc_tx_lpi_usec += readl(mmcaddr + MMC_TX_LPI_USEC);
+ mmc->mmc_tx_lpi_tran += readl(mmcaddr + MMC_TX_LPI_TRAN);
/* MMC RX counter registers */
mmc->mmc_rx_framecount_gb += readl(mmcaddr + MMC_RX_FRAMECOUNT_GB);
@@ -316,6 +327,10 @@ static void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
mmc->mmc_rx_fifo_overflow += readl(mmcaddr + MMC_RX_FIFO_OVERFLOW);
mmc->mmc_rx_vlan_frames_gb += readl(mmcaddr + MMC_RX_VLAN_FRAMES_GB);
mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_RX_WATCHDOG_ERROR);
+ mmc->mmc_rx_error += readl(mmcaddr + MMC_RX_ERROR);
+ mmc->mmc_rx_lpi_usec += readl(mmcaddr + MMC_RX_LPI_USEC);
+ mmc->mmc_rx_lpi_tran += readl(mmcaddr + MMC_RX_LPI_TRAN);
+
/* IPv4 */
mmc->mmc_rx_ipv4_gd += readl(mmcaddr + MMC_RX_IPV4_GD);
mmc->mmc_rx_ipv4_hderr += readl(mmcaddr + MMC_RX_IPV4_HDERR);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index dddcaa9220cc..b23b920eedb1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -221,6 +221,20 @@ struct stmmac_dma_conf {
unsigned int dma_tx_size;
};
+#define EST_GCL 1024
+struct stmmac_est {
+ int enable;
+ u32 btr_reserve[2];
+ u32 btr_offset[2];
+ u32 btr[2];
+ u32 ctr[2];
+ u32 ter;
+ u32 gcl_unaligned[EST_GCL];
+ u32 gcl[EST_GCL];
+ u32 gcl_size;
+ u32 max_sdu[MTL_MAX_TX_QUEUES];
+};
+
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
@@ -261,6 +275,9 @@ struct stmmac_priv {
struct stmmac_extra_stats xstats ____cacheline_aligned_in_smp;
struct stmmac_safety_stats sstats;
struct plat_stmmacenet_data *plat;
+ /* Protect est parameters */
+ struct mutex est_lock;
+ struct stmmac_est *est;
struct dma_features dma_cap;
struct stmmac_counters mmc;
int hw_cap_support;
@@ -360,7 +377,8 @@ enum stmmac_state {
int stmmac_mdio_unregister(struct net_device *ndev);
int stmmac_mdio_register(struct net_device *ndev);
int stmmac_mdio_reset(struct mii_bus *mii);
-int stmmac_xpcs_setup(struct mii_bus *mii);
+int stmmac_pcs_setup(struct net_device *ndev);
+void stmmac_pcs_clean(struct net_device *ndev);
void stmmac_set_ethtool_ops(struct net_device *netdev);
int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index e1537a57815f..542e2633a6f5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -212,6 +212,7 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_tx_excessdef),
STMMAC_MMC_STAT(mmc_tx_pause_frame),
STMMAC_MMC_STAT(mmc_tx_vlan_frame_g),
+ STMMAC_MMC_STAT(mmc_tx_oversize_g),
STMMAC_MMC_STAT(mmc_tx_lpi_usec),
STMMAC_MMC_STAT(mmc_tx_lpi_tran),
STMMAC_MMC_STAT(mmc_rx_framecount_gb),
@@ -238,6 +239,7 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_rx_fifo_overflow),
STMMAC_MMC_STAT(mmc_rx_vlan_frames_gb),
STMMAC_MMC_STAT(mmc_rx_watchdog_error),
+ STMMAC_MMC_STAT(mmc_rx_error),
STMMAC_MMC_STAT(mmc_rx_lpi_usec),
STMMAC_MMC_STAT(mmc_rx_lpi_tran),
STMMAC_MMC_STAT(mmc_rx_discard_frames_gb),
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 24cd80490d19..b3afc7cb7d72 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -936,6 +936,22 @@ static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
priv->pause, tx_cnt);
}
+static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ /* Refresh the MAC-specific capabilities */
+ stmmac_mac_update_caps(priv);
+
+ config->mac_capabilities = priv->hw->link.caps;
+
+ if (priv->plat->max_speed)
+ phylink_limit_mac_speed(config, priv->plat->max_speed);
+
+ return config->mac_capabilities;
+}
+
static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
phy_interface_t interface)
{
@@ -944,10 +960,7 @@ static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
if (priv->hw->xpcs)
return &priv->hw->xpcs->pcs;
- if (priv->hw->lynx_pcs)
- return priv->hw->lynx_pcs;
-
- return NULL;
+ return priv->hw->phylink_pcs;
}
static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
@@ -1105,6 +1118,7 @@ static void stmmac_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
+ .mac_get_caps = stmmac_mac_get_caps,
.mac_select_pcs = stmmac_mac_select_pcs,
.mac_config = stmmac_mac_config,
.mac_link_down = stmmac_mac_link_down,
@@ -1198,29 +1212,20 @@ static int stmmac_init_phy(struct net_device *dev)
return ret;
}
-static void stmmac_set_half_duplex(struct stmmac_priv *priv)
-{
- /* Half-Duplex can only work with single tx queue */
- if (priv->plat->tx_queues_to_use > 1)
- priv->phylink_config.mac_capabilities &=
- ~(MAC_10HD | MAC_100HD | MAC_1000HD);
- else
- priv->phylink_config.mac_capabilities |=
- (MAC_10HD | MAC_100HD | MAC_1000HD);
-}
-
static int stmmac_phy_setup(struct stmmac_priv *priv)
{
struct stmmac_mdio_bus_data *mdio_bus_data;
int mode = priv->plat->phy_interface;
struct fwnode_handle *fwnode;
struct phylink *phylink;
- int max_speed;
priv->phylink_config.dev = &priv->dev->dev;
priv->phylink_config.type = PHYLINK_NETDEV;
priv->phylink_config.mac_managed_pm = true;
+ /* Stmmac always requires an RX clock for hardware initialization */
+ priv->phylink_config.mac_requires_rxc = true;
+
mdio_bus_data = priv->plat->mdio_bus_data;
if (mdio_bus_data)
priv->phylink_config.ovr_an_inband =
@@ -1236,19 +1241,6 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
xpcs_get_interfaces(priv->hw->xpcs,
priv->phylink_config.supported_interfaces);
- priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_10FD | MAC_100FD |
- MAC_1000FD;
-
- stmmac_set_half_duplex(priv);
-
- /* Get the MAC specific capabilities */
- stmmac_mac_phylink_get_caps(priv);
-
- max_speed = priv->plat->max_speed;
- if (max_speed)
- phylink_limit_mac_speed(&priv->phylink_config, max_speed);
-
fwnode = priv->plat->port_node;
if (!fwnode)
fwnode = dev_fwnode(priv->device);
@@ -2506,9 +2498,9 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
if (!xsk_tx_peek_desc(pool, &xdp_desc))
break;
- if (priv->plat->est && priv->plat->est->enable &&
- priv->plat->est->max_sdu[queue] &&
- xdp_desc.len > priv->plat->est->max_sdu[queue]) {
+ if (priv->est && priv->est->enable &&
+ priv->est->max_sdu[queue] &&
+ xdp_desc.len > priv->est->max_sdu[queue]) {
priv->xstats.max_sdu_txq_drop[queue]++;
continue;
}
@@ -3411,6 +3403,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
u32 chan;
int ret;
+ /* Make sure RX clock is enabled */
+ if (priv->hw->phylink_pcs)
+ phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
+
/* DMA initialization and SW reset */
ret = stmmac_init_dma_engine(priv);
if (ret < 0) {
@@ -3960,8 +3956,7 @@ static int __stmmac_open(struct net_device *dev,
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI &&
(!priv->hw->xpcs ||
- xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
- !priv->hw->lynx_pcs) {
+ xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
ret = stmmac_init_phy(dev);
if (ret) {
netdev_err(priv->dev,
@@ -4543,9 +4538,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return stmmac_tso_xmit(skb, dev);
}
- if (priv->plat->est && priv->plat->est->enable &&
- priv->plat->est->max_sdu[queue] &&
- skb->len > priv->plat->est->max_sdu[queue]){
+ if (priv->est && priv->est->enable &&
+ priv->est->max_sdu[queue] &&
+ skb->len > priv->est->max_sdu[queue]){
priv->xstats.max_sdu_txq_drop[queue]++;
goto max_sdu_err;
}
@@ -4924,9 +4919,9 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
return STMMAC_XDP_CONSUMED;
- if (priv->plat->est && priv->plat->est->enable &&
- priv->plat->est->max_sdu[queue] &&
- xdpf->len > priv->plat->est->max_sdu[queue]) {
+ if (priv->est && priv->est->enable &&
+ priv->est->max_sdu[queue] &&
+ xdpf->len > priv->est->max_sdu[queue]) {
priv->xstats.max_sdu_txq_drop[queue]++;
return STMMAC_XDP_CONSUMED;
}
@@ -5109,9 +5104,8 @@ static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
unsigned int datasize = xdp->data_end - xdp->data;
struct sk_buff *skb;
- skb = __napi_alloc_skb(&ch->rxtx_napi,
- xdp->data_end - xdp->data_hard_start,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&ch->rxtx_napi,
+ xdp->data_end - xdp->data_hard_start);
if (unlikely(!skb))
return NULL;
@@ -5367,7 +5361,7 @@ read_again:
/* RX buffer is good and fit into a XSK pool buffer */
buf->xdp->data_end = buf->xdp->data + buf1_len;
- xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(buf->xdp);
prog = READ_ONCE(priv->xdp_prog);
res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
@@ -5915,7 +5909,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
stmmac_set_rx_mode(dev);
}
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
netdev_update_features(dev);
return 0;
@@ -7355,7 +7349,6 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
priv->rss.table[i] = ethtool_rxfh_indir_default(i,
rx_cnt);
- stmmac_set_half_duplex(priv);
stmmac_napi_add(dev);
if (netif_running(dev))
@@ -7761,11 +7754,9 @@ int stmmac_dvr_probe(struct device *device,
if (priv->plat->speed_mode_2500)
priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
- if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
- ret = stmmac_xpcs_setup(priv->mii);
- if (ret)
- goto error_xpcs_setup;
- }
+ ret = stmmac_pcs_setup(ndev);
+ if (ret)
+ goto error_pcs_setup;
ret = stmmac_phy_setup(priv);
if (ret) {
@@ -7796,8 +7787,9 @@ int stmmac_dvr_probe(struct device *device,
error_netdev_register:
phylink_destroy(priv->phylink);
-error_xpcs_setup:
error_phy_setup:
+ stmmac_pcs_clean(ndev);
+error_pcs_setup:
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
@@ -7839,6 +7831,9 @@ void stmmac_dvr_remove(struct device *dev)
if (priv->plat->stmmac_rst)
reset_control_assert(priv->plat->stmmac_rst);
reset_control_assert(priv->plat->stmmac_ahb_rst);
+
+ stmmac_pcs_clean(ndev);
+
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 0542cfd1817e..aa43117134d3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -495,34 +495,57 @@ int stmmac_mdio_reset(struct mii_bus *bus)
return 0;
}
-int stmmac_xpcs_setup(struct mii_bus *bus)
+int stmmac_pcs_setup(struct net_device *ndev)
{
- struct net_device *ndev = bus->priv;
+ struct dw_xpcs *xpcs = NULL;
struct stmmac_priv *priv;
- struct dw_xpcs *xpcs;
+ int ret = -ENODEV;
int mode, addr;
priv = netdev_priv(ndev);
mode = priv->plat->phy_interface;
- /* Try to probe the XPCS by scanning all addresses. */
- for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
- xpcs = xpcs_create_mdiodev(bus, addr, mode);
- if (IS_ERR(xpcs))
- continue;
-
- priv->hw->xpcs = xpcs;
- break;
+ if (priv->plat->pcs_init) {
+ ret = priv->plat->pcs_init(priv);
+ } else if (priv->plat->mdio_bus_data &&
+ priv->plat->mdio_bus_data->has_xpcs) {
+ /* Try to probe the XPCS by scanning all addresses */
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+ xpcs = xpcs_create_mdiodev(priv->mii, addr, mode);
+ if (IS_ERR(xpcs))
+ continue;
+
+ ret = 0;
+ break;
+ }
+ } else {
+ return 0;
}
- if (!priv->hw->xpcs) {
+ if (ret) {
dev_warn(priv->device, "No xPCS found\n");
- return -ENODEV;
+ return ret;
}
+ priv->hw->xpcs = xpcs;
+
return 0;
}
+void stmmac_pcs_clean(struct net_device *ndev)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ if (priv->plat->pcs_exit)
+ priv->plat->pcs_exit(priv);
+
+ if (!priv->hw->xpcs)
+ return;
+
+ xpcs_destroy(priv->hw->xpcs);
+ priv->hw->xpcs = NULL;
+}
+
/**
* stmmac_mdio_register
* @ndev: net device structure
@@ -679,9 +702,6 @@ int stmmac_mdio_unregister(struct net_device *ndev)
if (!priv->mii)
return 0;
- if (priv->hw->xpcs)
- xpcs_destroy(priv->hw->xpcs);
-
mdiobus_unregister(priv->mii);
priv->mii->priv = NULL;
mdiobus_free(priv->mii);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index e04830a3a1fb..a6b1de9a251d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -68,13 +68,13 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
nsec = reminder;
/* If EST is enabled, disabled it before adjust ptp time. */
- if (priv->plat->est && priv->plat->est->enable) {
+ if (priv->est && priv->est->enable) {
est_rst = true;
- mutex_lock(&priv->plat->est->lock);
- priv->plat->est->enable = false;
- stmmac_est_configure(priv, priv, priv->plat->est,
+ mutex_lock(&priv->est_lock);
+ priv->est->enable = false;
+ stmmac_est_configure(priv, priv, priv->est,
priv->plat->clk_ptp_rate);
- mutex_unlock(&priv->plat->est->lock);
+ mutex_unlock(&priv->est_lock);
}
write_lock_irqsave(&priv->ptp_lock, flags);
@@ -87,24 +87,24 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
ktime_t current_time_ns, basetime;
u64 cycle_time;
- mutex_lock(&priv->plat->est->lock);
+ mutex_lock(&priv->est_lock);
priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
current_time_ns = timespec64_to_ktime(current_time);
- time.tv_nsec = priv->plat->est->btr_reserve[0];
- time.tv_sec = priv->plat->est->btr_reserve[1];
+ time.tv_nsec = priv->est->btr_reserve[0];
+ time.tv_sec = priv->est->btr_reserve[1];
basetime = timespec64_to_ktime(time);
- cycle_time = (u64)priv->plat->est->ctr[1] * NSEC_PER_SEC +
- priv->plat->est->ctr[0];
+ cycle_time = (u64)priv->est->ctr[1] * NSEC_PER_SEC +
+ priv->est->ctr[0];
time = stmmac_calc_tas_basetime(basetime,
current_time_ns,
cycle_time);
- priv->plat->est->btr[0] = (u32)time.tv_nsec;
- priv->plat->est->btr[1] = (u32)time.tv_sec;
- priv->plat->est->enable = true;
- ret = stmmac_est_configure(priv, priv, priv->plat->est,
+ priv->est->btr[0] = (u32)time.tv_nsec;
+ priv->est->btr[1] = (u32)time.tv_sec;
+ priv->est->enable = true;
+ ret = stmmac_est_configure(priv, priv, priv->est,
priv->plat->clk_ptp_rate);
- mutex_unlock(&priv->plat->est->lock);
+ mutex_unlock(&priv->est_lock);
if (ret)
netdev_err(priv->dev, "failed to configure EST\n");
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index cce00719937d..222540b55480 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -918,7 +918,6 @@ struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
static void tc_taprio_map_maxsdu_txq(struct stmmac_priv *priv,
struct tc_taprio_qopt_offload *qopt)
{
- struct plat_stmmacenet_data *plat = priv->plat;
u32 num_tc = qopt->mqprio.qopt.num_tc;
u32 offset, count, i, j;
@@ -933,7 +932,7 @@ static void tc_taprio_map_maxsdu_txq(struct stmmac_priv *priv,
count = qopt->mqprio.qopt.count[i];
for (j = offset; j < offset + count; j++)
- plat->est->max_sdu[j] = qopt->max_sdu[i] + ETH_HLEN - ETH_TLEN;
+ priv->est->max_sdu[j] = qopt->max_sdu[i] + ETH_HLEN - ETH_TLEN;
}
}
@@ -941,7 +940,6 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
struct tc_taprio_qopt_offload *qopt)
{
u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
- struct plat_stmmacenet_data *plat = priv->plat;
struct timespec64 time, current_time, qopt_time;
ktime_t current_time_ns;
bool fpe = false;
@@ -998,23 +996,25 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
if (qopt->cycle_time_extension >= BIT(wid + 7))
return -ERANGE;
- if (!plat->est) {
- plat->est = devm_kzalloc(priv->device, sizeof(*plat->est),
+ if (!priv->est) {
+ priv->est = devm_kzalloc(priv->device, sizeof(*priv->est),
GFP_KERNEL);
- if (!plat->est)
+ if (!priv->est)
return -ENOMEM;
- mutex_init(&priv->plat->est->lock);
+ mutex_init(&priv->est_lock);
} else {
- memset(plat->est, 0, sizeof(*plat->est));
+ mutex_lock(&priv->est_lock);
+ memset(priv->est, 0, sizeof(*priv->est));
+ mutex_unlock(&priv->est_lock);
}
size = qopt->num_entries;
- mutex_lock(&priv->plat->est->lock);
- priv->plat->est->gcl_size = size;
- priv->plat->est->enable = qopt->cmd == TAPRIO_CMD_REPLACE;
- mutex_unlock(&priv->plat->est->lock);
+ mutex_lock(&priv->est_lock);
+ priv->est->gcl_size = size;
+ priv->est->enable = qopt->cmd == TAPRIO_CMD_REPLACE;
+ mutex_unlock(&priv->est_lock);
for (i = 0; i < size; i++) {
s64 delta_ns = qopt->entries[i].interval;
@@ -1042,33 +1042,33 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
return -EOPNOTSUPP;
}
- priv->plat->est->gcl[i] = delta_ns | (gates << wid);
+ priv->est->gcl[i] = delta_ns | (gates << wid);
}
- mutex_lock(&priv->plat->est->lock);
+ mutex_lock(&priv->est_lock);
/* Adjust for real system time */
priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
current_time_ns = timespec64_to_ktime(current_time);
time = stmmac_calc_tas_basetime(qopt->base_time, current_time_ns,
qopt->cycle_time);
- priv->plat->est->btr[0] = (u32)time.tv_nsec;
- priv->plat->est->btr[1] = (u32)time.tv_sec;
+ priv->est->btr[0] = (u32)time.tv_nsec;
+ priv->est->btr[1] = (u32)time.tv_sec;
qopt_time = ktime_to_timespec64(qopt->base_time);
- priv->plat->est->btr_reserve[0] = (u32)qopt_time.tv_nsec;
- priv->plat->est->btr_reserve[1] = (u32)qopt_time.tv_sec;
+ priv->est->btr_reserve[0] = (u32)qopt_time.tv_nsec;
+ priv->est->btr_reserve[1] = (u32)qopt_time.tv_sec;
ctr = qopt->cycle_time;
- priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
- priv->plat->est->ctr[1] = (u32)ctr;
+ priv->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
+ priv->est->ctr[1] = (u32)ctr;
- priv->plat->est->ter = qopt->cycle_time_extension;
+ priv->est->ter = qopt->cycle_time_extension;
tc_taprio_map_maxsdu_txq(priv, qopt);
if (fpe && !priv->dma_cap.fpesel) {
- mutex_unlock(&priv->plat->est->lock);
+ mutex_unlock(&priv->est_lock);
return -EOPNOTSUPP;
}
@@ -1077,9 +1077,9 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
*/
priv->plat->fpe_cfg->enable = fpe;
- ret = stmmac_est_configure(priv, priv, priv->plat->est,
+ ret = stmmac_est_configure(priv, priv, priv->est,
priv->plat->clk_ptp_rate);
- mutex_unlock(&priv->plat->est->lock);
+ mutex_unlock(&priv->est_lock);
if (ret) {
netdev_err(priv->dev, "failed to configure EST\n");
goto disable;
@@ -1095,17 +1095,17 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
return 0;
disable:
- if (priv->plat->est) {
- mutex_lock(&priv->plat->est->lock);
- priv->plat->est->enable = false;
- stmmac_est_configure(priv, priv, priv->plat->est,
+ if (priv->est) {
+ mutex_lock(&priv->est_lock);
+ priv->est->enable = false;
+ stmmac_est_configure(priv, priv, priv->est,
priv->plat->clk_ptp_rate);
/* Reset taprio status */
for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
priv->xstats.max_sdu_txq_drop[i] = 0;
priv->xstats.mtl_est_txq_hlbf[i] = 0;
}
- mutex_unlock(&priv->plat->est->lock);
+ mutex_unlock(&priv->est_lock);
}
priv->plat->fpe_cfg->enable = false;
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index bfb903506367..b8948d5b779a 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -73,6 +73,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
+#include <linux/skbuff_ref.h>
#include <linux/ethtool.h>
#include <linux/crc32.h>
#include <linux/random.h>
@@ -3803,7 +3804,7 @@ static int cas_change_mtu(struct net_device *dev, int new_mtu)
{
struct cas *cp = netdev_priv(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (!netif_running(dev) || !netif_device_present(dev))
return 0;
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index f68aa813d4fb..41a27ae58ced 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6751,7 +6751,7 @@ static int niu_change_mtu(struct net_device *dev, int new_mtu)
orig_jumbo = (dev->mtu > ETH_DATA_LEN);
new_jumbo = (new_mtu > ETH_DATA_LEN);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (!netif_running(dev) ||
(orig_jumbo == new_jumbo))
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 9bd1df8308d2..3e5f9b17c777 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -949,17 +949,6 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void gem_poll_controller(struct net_device *dev)
-{
- struct gem *gp = netdev_priv(dev);
-
- disable_irq(gp->pdev->irq);
- gem_interrupt(gp->pdev->irq, dev);
- enable_irq(gp->pdev->irq);
-}
-#endif
-
static void gem_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct gem *gp = netdev_priv(dev);
@@ -2499,7 +2488,7 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu)
{
struct gem *gp = netdev_priv(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
/* We'll just catch it later when the device is up'd or resumed */
if (!netif_running(dev) || !netif_device_present(dev))
@@ -2839,9 +2828,6 @@ static const struct net_device_ops gem_netdev_ops = {
.ndo_change_mtu = gem_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = gem_set_mac_address,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = gem_poll_controller,
-#endif
};
static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index 36b948820c1e..d1793b6154c7 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -823,7 +823,7 @@ static int xlgmac_change_mtu(struct net_device *netdev, int mtu)
return ret;
pdata->rx_buf_size = ret;
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
xlgmac_restart_dev(pdata);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index ca409515ead5..ede5f7890fb4 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -756,7 +756,7 @@ static int bdx_change_mtu(struct net_device *ndev, int new_mtu)
{
ENTER;
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
if (netif_running(ndev)) {
bdx_close(ndev);
bdx_open(ndev);
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 1530d13984d4..1729eb0e0b41 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -167,7 +167,7 @@ config TI_KEYSTONE_NETCP_ETHSS
config TLAN
tristate "TI ThunderLAN support"
- depends on (PCI || EISA)
+ depends on (PCI || EISA) && HAS_IOPORT
help
If you have a PCI Ethernet network card based on the ThunderLAN chip
which is supported by this driver, say Y here.
@@ -198,6 +198,21 @@ config TI_ICSSG_PRUETH
to support the Ethernet operation. Currently, it supports Ethernet
with 1G and 100M link speed.
+config TI_ICSSG_PRUETH_SR1
+ tristate "TI Gigabit PRU SR1.0 Ethernet driver"
+ select PHYLIB
+ select TI_ICSS_IEP
+ select TI_K3_CPPI_DESC_POOL
+ depends on PRU_REMOTEPROC
+ depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
+ help
+ Support dual Gigabit Ethernet ports over the ICSSG PRU Subsystem.
+ This subsystem is available on the AM65 SR1.0 platform.
+
+ This driver requires firmware binaries which will run on the PRUs
+ to support the Ethernet operation. Currently, it supports Ethernet
+ with 1G, 100M and 10M link speed.
+
config TI_ICSS_IEP
tristate "TI PRU ICSS IEP driver"
depends on PTP_1588_CLOCK_OPTIONAL
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index d8590304f3df..6e086b4c0384 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -33,10 +33,19 @@ obj-$(CONFIG_TI_K3_AM65_CPTS) += am65-cpts.o
obj-$(CONFIG_TI_ICSSG_PRUETH) += icssg-prueth.o
icssg-prueth-y := icssg/icssg_prueth.o \
+ icssg/icssg_common.o \
icssg/icssg_classifier.o \
icssg/icssg_queues.o \
icssg/icssg_config.o \
icssg/icssg_mii_cfg.o \
icssg/icssg_stats.o \
icssg/icssg_ethtool.o
+obj-$(CONFIG_TI_ICSSG_PRUETH_SR1) += icssg-prueth-sr1.o
+icssg-prueth-sr1-y := icssg/icssg_prueth_sr1.o \
+ icssg/icssg_common.o \
+ icssg/icssg_classifier.o \
+ icssg/icssg_config.o \
+ icssg/icssg_mii_cfg.o \
+ icssg/icssg_stats.o \
+ icssg/icssg_ethtool.o
obj-$(CONFIG_TI_ICSS_IEP) += icssg/icss_iep.o
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index d6ce2c9f0a8d..a1d0935d1ebe 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -695,6 +695,17 @@ static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev,
struct ethtool_ts_info *info)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ unsigned int ptp_v2_filter;
+
+ ptp_v2_filter = BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
return ethtool_op_get_ts_info(ndev, info);
@@ -708,7 +719,7 @@ static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = am65_cpts_phc_index(common->cpts);
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
- info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | ptp_v2_filter;
return 0;
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 2939a21ca74f..4e50b3792888 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -5,6 +5,7 @@
*
*/
+#include <linux/bpf_trace.h>
#include <linux/clk.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
@@ -30,6 +31,7 @@
#include <linux/sys_soc.h>
#include <linux/dma/ti-cppi5.h>
#include <linux/dma/k3-udma-glue.h>
+#include <net/page_pool/helpers.h>
#include <net/switchdev.h>
#include "cpsw_ale.h"
@@ -101,6 +103,12 @@
#define AM65_CPSW_PN_TS_CTL_TX_HOST_TS_EN BIT(11)
#define AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT 16
+#define AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN BIT(0)
+#define AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN BIT(1)
+#define AM65_CPSW_PN_TS_CTL_RX_VLAN_LT2_EN BIT(2)
+#define AM65_CPSW_PN_TS_CTL_RX_ANX_D_EN BIT(3)
+#define AM65_CPSW_PN_TS_CTL_RX_ANX_E_EN BIT(9)
+
/* AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG register fields */
#define AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT 16
@@ -124,6 +132,11 @@
AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN | \
AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN)
+#define AM65_CPSW_TS_RX_ANX_ALL_EN \
+ (AM65_CPSW_PN_TS_CTL_RX_ANX_D_EN | \
+ AM65_CPSW_PN_TS_CTL_RX_ANX_E_EN | \
+ AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN)
+
#define AM65_CPSW_ALE_AGEOUT_DEFAULT 30
/* Number of TX/RX descriptors */
#define AM65_CPSW_MAX_TX_DESC 500
@@ -138,6 +151,18 @@
#define AM65_CPSW_DEFAULT_TX_CHNS 8
+/* CPPI streaming packet interface */
+#define AM65_CPSW_CPPI_TX_FLOW_ID 0x3FFF
+#define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7
+
+/* XDP */
+#define AM65_CPSW_XDP_CONSUMED 2
+#define AM65_CPSW_XDP_REDIRECT 1
+#define AM65_CPSW_XDP_PASS 0
+
+/* Include headroom compatible with both skb and xdpf */
+#define AM65_CPSW_HEADROOM (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
+
static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave,
const u8 *dev_addr)
{
@@ -305,12 +330,11 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
}
static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
- struct sk_buff *skb)
+ struct page *page)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct cppi5_host_desc_t *desc_rx;
struct device *dev = common->dev;
- u32 pkt_len = skb_tailroom(skb);
dma_addr_t desc_dma;
dma_addr_t buf_dma;
void *swdata;
@@ -322,20 +346,22 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
}
desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
- buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len,
- DMA_FROM_DEVICE);
+ buf_dma = dma_map_single(rx_chn->dma_dev,
+ page_address(page) + AM65_CPSW_HEADROOM,
+ AM65_CPSW_MAX_PACKET_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- dev_err(dev, "Failed to map rx skb buffer\n");
+ dev_err(dev, "Failed to map rx buffer\n");
return -EINVAL;
}
cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
AM65_CPSW_NAV_PS_DATA_SIZE);
k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
- cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
+ cppi5_hdesc_attach_buf(desc_rx, buf_dma, AM65_CPSW_MAX_PACKET_SIZE,
+ buf_dma, AM65_CPSW_MAX_PACKET_SIZE);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- *((void **)swdata) = skb;
+ *((void **)swdata) = page_address(page);
return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, desc_rx, desc_dma);
}
@@ -369,25 +395,137 @@ static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common);
static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port);
static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
+static void am65_cpsw_destroy_xdp_rxqs(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct xdp_rxq_info *rxq;
+ int i;
+
+ for (i = 0; i < common->port_num; i++) {
+ if (!common->ports[i].ndev)
+ continue;
+
+ rxq = &common->ports[i].xdp_rxq;
+
+ if (xdp_rxq_info_is_reg(rxq))
+ xdp_rxq_info_unreg(rxq);
+ }
+
+ if (rx_chn->page_pool) {
+ page_pool_destroy(rx_chn->page_pool);
+ rx_chn->page_pool = NULL;
+ }
+}
+
+static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct page_pool_params pp_params = {
+ .flags = PP_FLAG_DMA_MAP,
+ .order = 0,
+ .pool_size = AM65_CPSW_MAX_RX_DESC,
+ .nid = dev_to_node(common->dev),
+ .dev = common->dev,
+ .dma_dir = DMA_BIDIRECTIONAL,
+ .napi = &common->napi_rx,
+ };
+ struct xdp_rxq_info *rxq;
+ struct page_pool *pool;
+ int i, ret;
+
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ rx_chn->page_pool = pool;
+
+ for (i = 0; i < common->port_num; i++) {
+ if (!common->ports[i].ndev)
+ continue;
+
+ rxq = &common->ports[i].xdp_rxq;
+
+ ret = xdp_rxq_info_reg(rxq, common->ports[i].ndev, i, 0);
+ if (ret)
+ goto err;
+
+ ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ am65_cpsw_destroy_xdp_rxqs(common);
+ return ret;
+}
+
+static int am65_cpsw_nuss_desc_idx(struct k3_cppi_desc_pool *desc_pool,
+ void *desc,
+ unsigned char dsize_log2)
+{
+ void *pool_addr = k3_cppi_desc_pool_cpuaddr(desc_pool);
+
+ return (desc - pool_addr) >> dsize_log2;
+}
+
+static void am65_cpsw_nuss_set_buf_type(struct am65_cpsw_tx_chn *tx_chn,
+ struct cppi5_host_desc_t *desc,
+ enum am65_cpsw_tx_buf_type buf_type)
+{
+ int desc_idx;
+
+ desc_idx = am65_cpsw_nuss_desc_idx(tx_chn->desc_pool, desc,
+ tx_chn->dsize_log2);
+ k3_cppi_desc_pool_desc_info_set(tx_chn->desc_pool, desc_idx,
+ (void *)buf_type);
+}
+
+static enum am65_cpsw_tx_buf_type am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_chn *tx_chn,
+ dma_addr_t desc_dma)
+{
+ struct cppi5_host_desc_t *desc_tx;
+ int desc_idx;
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
+ desc_idx = am65_cpsw_nuss_desc_idx(tx_chn->desc_pool, desc_tx,
+ tx_chn->dsize_log2);
+
+ return (enum am65_cpsw_tx_buf_type)k3_cppi_desc_pool_desc_info(tx_chn->desc_pool,
+ desc_idx);
+}
+
+static inline void am65_cpsw_put_page(struct am65_cpsw_rx_chn *rx_chn,
+ struct page *page,
+ bool allow_direct,
+ int desc_idx)
+{
+ page_pool_put_full_page(rx_chn->page_pool, page, allow_direct);
+ rx_chn->pages[desc_idx] = NULL;
+}
+
static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
{
struct am65_cpsw_rx_chn *rx_chn = data;
struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb;
dma_addr_t buf_dma;
u32 buf_dma_len;
+ void *page_addr;
void **swdata;
+ int desc_idx;
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
+ page_addr = *swdata;
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
-
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- dev_kfree_skb_any(skb);
+ desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx,
+ rx_chn->dsize_log2);
+ am65_cpsw_put_page(rx_chn, virt_to_page(page_addr), false, desc_idx);
}
static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
@@ -440,12 +578,32 @@ static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
dev_kfree_skb_any(skb);
}
+static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
+ struct net_device *ndev,
+ unsigned int len)
+{
+ struct sk_buff *skb;
+
+ len += AM65_CPSW_HEADROOM;
+
+ skb = build_skb(page_addr, len);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, AM65_CPSW_HEADROOM);
+ skb->dev = ndev;
+
+ return skb;
+}
+
static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
{
struct am65_cpsw_host *host_p = am65_common_get_host(common);
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
int port_idx, i, ret, tx;
- struct sk_buff *skb;
u32 val, port_mask;
+ struct page *page;
if (common->usage_count)
return 0;
@@ -505,25 +663,29 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
am65_cpsw_qos_tx_p0_rate_init(common);
- for (i = 0; i < common->rx_chns.descs_num; i++) {
- skb = __netdev_alloc_skb_ip_align(NULL,
- AM65_CPSW_MAX_PACKET_SIZE,
- GFP_KERNEL);
- if (!skb) {
+ ret = am65_cpsw_create_xdp_rxqs(common);
+ if (ret) {
+ dev_err(common->dev, "Failed to create XDP rx queues\n");
+ return ret;
+ }
+
+ for (i = 0; i < rx_chn->descs_num; i++) {
+ page = page_pool_dev_alloc_pages(rx_chn->page_pool);
+ if (!page) {
ret = -ENOMEM;
- dev_err(common->dev, "cannot allocate skb\n");
if (i)
goto fail_rx;
return ret;
}
+ rx_chn->pages[i] = page;
- ret = am65_cpsw_nuss_rx_push(common, skb);
+ ret = am65_cpsw_nuss_rx_push(common, page);
if (ret < 0) {
dev_err(common->dev,
- "cannot submit skb to channel rx, error %d\n",
+ "cannot submit page to channel rx: %d\n",
ret);
- kfree_skb(skb);
+ am65_cpsw_put_page(rx_chn, page, false, i);
if (i)
goto fail_rx;
@@ -531,27 +693,27 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
}
}
- ret = k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn);
+ ret = k3_udma_glue_enable_rx_chn(rx_chn->rx_chn);
if (ret) {
dev_err(common->dev, "couldn't enable rx chn: %d\n", ret);
goto fail_rx;
}
for (tx = 0; tx < common->tx_ch_num; tx++) {
- ret = k3_udma_glue_enable_tx_chn(common->tx_chns[tx].tx_chn);
+ ret = k3_udma_glue_enable_tx_chn(tx_chn[tx].tx_chn);
if (ret) {
dev_err(common->dev, "couldn't enable tx chn %d: %d\n",
tx, ret);
tx--;
goto fail_tx;
}
- napi_enable(&common->tx_chns[tx].napi_tx);
+ napi_enable(&tx_chn[tx].napi_tx);
}
napi_enable(&common->napi_rx);
if (common->rx_irq_disabled) {
common->rx_irq_disabled = false;
- enable_irq(common->rx_chns.irq);
+ enable_irq(rx_chn->irq);
}
dev_dbg(common->dev, "cpsw_nuss started\n");
@@ -559,22 +721,23 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
fail_tx:
while (tx >= 0) {
- napi_disable(&common->tx_chns[tx].napi_tx);
- k3_udma_glue_disable_tx_chn(common->tx_chns[tx].tx_chn);
+ napi_disable(&tx_chn[tx].napi_tx);
+ k3_udma_glue_disable_tx_chn(tx_chn[tx].tx_chn);
tx--;
}
- k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn);
+ k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
fail_rx:
- k3_udma_glue_reset_rx_chn(common->rx_chns.rx_chn, 0,
- &common->rx_chns,
+ k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, 0, rx_chn,
am65_cpsw_nuss_rx_cleanup, 0);
return ret;
}
static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
int i;
if (common->usage_count != 1)
@@ -590,26 +753,25 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
reinit_completion(&common->tdown_complete);
for (i = 0; i < common->tx_ch_num; i++)
- k3_udma_glue_tdown_tx_chn(common->tx_chns[i].tx_chn, false);
+ k3_udma_glue_tdown_tx_chn(tx_chn[i].tx_chn, false);
i = wait_for_completion_timeout(&common->tdown_complete,
msecs_to_jiffies(1000));
if (!i)
dev_err(common->dev, "tx timeout\n");
for (i = 0; i < common->tx_ch_num; i++) {
- napi_disable(&common->tx_chns[i].napi_tx);
- hrtimer_cancel(&common->tx_chns[i].tx_hrtimer);
+ napi_disable(&tx_chn[i].napi_tx);
+ hrtimer_cancel(&tx_chn[i].tx_hrtimer);
}
for (i = 0; i < common->tx_ch_num; i++) {
- k3_udma_glue_reset_tx_chn(common->tx_chns[i].tx_chn,
- &common->tx_chns[i],
+ k3_udma_glue_reset_tx_chn(tx_chn[i].tx_chn, &tx_chn[i],
am65_cpsw_nuss_tx_cleanup);
- k3_udma_glue_disable_tx_chn(common->tx_chns[i].tx_chn);
+ k3_udma_glue_disable_tx_chn(tx_chn[i].tx_chn);
}
reinit_completion(&common->tdown_complete);
- k3_udma_glue_tdown_rx_chn(common->rx_chns.rx_chn, true);
+ k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true);
if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
@@ -621,17 +783,22 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
hrtimer_cancel(&common->rx_hrtimer);
for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
- k3_udma_glue_reset_rx_chn(common->rx_chns.rx_chn, i,
- &common->rx_chns,
+ k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
am65_cpsw_nuss_rx_cleanup, !!i);
- k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn);
+ k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
cpsw_ale_stop(common->ale);
writel(0, common->cpsw_base + AM65_CPSW_REG_CTL);
writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
+ for (i = 0; i < rx_chn->descs_num; i++) {
+ if (rx_chn->pages[i])
+ am65_cpsw_put_page(rx_chn, rx_chn->pages[i], false, i);
+ }
+ am65_cpsw_destroy_xdp_rxqs(common);
+
dev_dbg(common->dev, "cpsw_nuss stopped\n");
return 0;
}
@@ -749,16 +916,149 @@ runtime_put:
return ret;
}
-static void am65_cpsw_nuss_rx_ts(struct sk_buff *skb, u32 *psdata)
+static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
+ struct am65_cpsw_tx_chn *tx_chn,
+ struct xdp_frame *xdpf,
+ enum am65_cpsw_tx_buf_type buf_type)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct cppi5_host_desc_t *host_desc;
+ struct netdev_queue *netif_txq;
+ dma_addr_t dma_desc, dma_buf;
+ u32 pkt_len = xdpf->len;
+ void **swdata;
+ int ret;
+
+ host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (unlikely(!host_desc)) {
+ ndev->stats.tx_dropped++;
+ return -ENOMEM;
+ }
+
+ am65_cpsw_nuss_set_buf_type(tx_chn, host_desc, buf_type);
+
+ dma_buf = dma_map_single(tx_chn->dma_dev, xdpf->data,
+ pkt_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_chn->dma_dev, dma_buf))) {
+ ndev->stats.tx_dropped++;
+ ret = -ENOMEM;
+ goto pool_free;
+ }
+
+ cppi5_hdesc_init(host_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ AM65_CPSW_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_set_pkttype(host_desc, AM65_CPSW_CPPI_TX_PKT_TYPE);
+ cppi5_hdesc_set_pktlen(host_desc, pkt_len);
+ cppi5_desc_set_pktids(&host_desc->hdr, 0, AM65_CPSW_CPPI_TX_FLOW_ID);
+ cppi5_desc_set_tags_ids(&host_desc->hdr, 0, port->port_id);
+
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &dma_buf);
+ cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf, pkt_len);
+
+ swdata = cppi5_hdesc_get_swdata(host_desc);
+ *(swdata) = xdpf;
+
+ /* Report BQL before sending the packet */
+ netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
+ netdev_tx_sent_queue(netif_txq, pkt_len);
+
+ dma_desc = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, host_desc);
+ if (AM65_CPSW_IS_CPSW2G(common)) {
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, host_desc,
+ dma_desc);
+ } else {
+ spin_lock_bh(&tx_chn->lock);
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, host_desc,
+ dma_desc);
+ spin_unlock_bh(&tx_chn->lock);
+ }
+ if (ret) {
+ /* Inform BQL */
+ netdev_tx_completed_queue(netif_txq, 1, pkt_len);
+ ndev->stats.tx_errors++;
+ goto dma_unmap;
+ }
+
+ return 0;
+
+dma_unmap:
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &dma_buf);
+ dma_unmap_single(tx_chn->dma_dev, dma_buf, pkt_len, DMA_TO_DEVICE);
+pool_free:
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, host_desc);
+ return ret;
+}
+
+static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
+ struct am65_cpsw_port *port,
+ struct xdp_buff *xdp,
+ int desc_idx, int cpu, int *len)
{
- struct skb_shared_hwtstamps *ssh;
- u64 ns;
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct net_device *ndev = port->ndev;
+ int ret = AM65_CPSW_XDP_CONSUMED;
+ struct am65_cpsw_tx_chn *tx_chn;
+ struct netdev_queue *netif_txq;
+ struct xdp_frame *xdpf;
+ struct bpf_prog *prog;
+ struct page *page;
+ u32 act;
+
+ prog = READ_ONCE(port->xdp_prog);
+ if (!prog)
+ return AM65_CPSW_XDP_PASS;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ /* XDP prog might have changed packet data and boundaries */
+ *len = xdp->data_end - xdp->data;
+
+ switch (act) {
+ case XDP_PASS:
+ ret = AM65_CPSW_XDP_PASS;
+ goto out;
+ case XDP_TX:
+ tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_TX_QUEUES];
+ netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
+
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ break;
+
+ __netif_tx_lock(netif_txq, cpu);
+ ret = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf,
+ AM65_CPSW_TX_BUF_TYPE_XDP_TX);
+ __netif_tx_unlock(netif_txq);
+ if (ret)
+ break;
- ns = ((u64)psdata[1] << 32) | psdata[0];
+ ndev->stats.rx_bytes += *len;
+ ndev->stats.rx_packets++;
+ ret = AM65_CPSW_XDP_CONSUMED;
+ goto out;
+ case XDP_REDIRECT:
+ if (unlikely(xdp_do_redirect(ndev, xdp, prog)))
+ break;
+
+ ndev->stats.rx_bytes += *len;
+ ndev->stats.rx_packets++;
+ ret = AM65_CPSW_XDP_REDIRECT;
+ goto out;
+ default:
+ bpf_warn_invalid_xdp_action(ndev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(ndev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ ndev->stats.rx_dropped++;
+ }
+
+ page = virt_to_head_page(xdp->data);
+ am65_cpsw_put_page(rx_chn, page, true, desc_idx);
- ssh = skb_hwtstamps(skb);
- memset(ssh, 0, sizeof(*ssh));
- ssh->hwtstamp = ns_to_ktime(ns);
+out:
+ return ret;
}
/* RX psdata[2] word format - checksum information */
@@ -795,7 +1095,7 @@ static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
}
static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
- u32 flow_idx)
+ u32 flow_idx, int cpu)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
@@ -803,13 +1103,16 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
struct am65_cpsw_ndev_stats *stats;
struct cppi5_host_desc_t *desc_rx;
struct device *dev = common->dev;
- struct sk_buff *skb, *new_skb;
+ struct page *page, *new_page;
dma_addr_t desc_dma, buf_dma;
struct am65_cpsw_port *port;
+ int headroom, desc_idx, ret;
struct net_device *ndev;
+ struct sk_buff *skb;
+ struct xdp_buff xdp;
+ void *page_addr;
void **swdata;
u32 *psdata;
- int ret = 0;
ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_idx, &desc_dma);
if (ret) {
@@ -830,7 +1133,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
__func__, flow_idx, &desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
+ page_addr = *swdata;
+ page = virt_to_page(page_addr);
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
@@ -838,12 +1142,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id);
port = am65_common_get_port(common, port_id);
ndev = port->ndev;
- skb->dev = ndev;
-
psdata = cppi5_hdesc_get_psdata(desc_rx);
- /* add RX timestamp */
- if (port->rx_ts_enabled)
- am65_cpsw_nuss_rx_ts(skb, psdata);
csum_info = psdata[2];
dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
@@ -851,36 +1150,64 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- new_skb = netdev_alloc_skb_ip_align(ndev, AM65_CPSW_MAX_PACKET_SIZE);
- if (new_skb) {
- ndev_priv = netdev_priv(ndev);
- am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark);
- skb_put(skb, pkt_len);
- skb->protocol = eth_type_trans(skb, ndev);
- am65_cpsw_nuss_rx_csum(skb, csum_info);
- napi_gro_receive(&common->napi_rx, skb);
-
- stats = this_cpu_ptr(ndev_priv->stats);
-
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += pkt_len;
- u64_stats_update_end(&stats->syncp);
- kmemleak_not_leak(new_skb);
- } else {
- ndev->stats.rx_dropped++;
- new_skb = skb;
+ desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx,
+ rx_chn->dsize_log2);
+
+ skb = am65_cpsw_build_skb(page_addr, ndev,
+ AM65_CPSW_MAX_PACKET_SIZE);
+ if (unlikely(!skb)) {
+ new_page = page;
+ goto requeue;
+ }
+
+ if (port->xdp_prog) {
+ xdp_init_buff(&xdp, AM65_CPSW_MAX_PACKET_SIZE, &port->xdp_rxq);
+
+ xdp_prepare_buff(&xdp, page_addr, skb_headroom(skb),
+ pkt_len, false);
+
+ ret = am65_cpsw_run_xdp(common, port, &xdp, desc_idx,
+ cpu, &pkt_len);
+ if (ret != AM65_CPSW_XDP_PASS)
+ return ret;
+
+ /* Compute additional headroom to be reserved */
+ headroom = (xdp.data - xdp.data_hard_start) - skb_headroom(skb);
+ skb_reserve(skb, headroom);
}
+ ndev_priv = netdev_priv(ndev);
+ am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark);
+ skb_put(skb, pkt_len);
+ if (port->rx_ts_enabled)
+ am65_cpts_rx_timestamp(common->cpts, skb);
+ skb_mark_for_recycle(skb);
+ skb->protocol = eth_type_trans(skb, ndev);
+ am65_cpsw_nuss_rx_csum(skb, csum_info);
+ napi_gro_receive(&common->napi_rx, skb);
+
+ stats = this_cpu_ptr(ndev_priv->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_len;
+ u64_stats_update_end(&stats->syncp);
+
+ new_page = page_pool_dev_alloc_pages(rx_chn->page_pool);
+ if (unlikely(!new_page))
+ return -ENOMEM;
+ rx_chn->pages[desc_idx] = new_page;
+
if (netif_dormant(ndev)) {
- dev_kfree_skb_any(new_skb);
+ am65_cpsw_put_page(rx_chn, new_page, true, desc_idx);
ndev->stats.rx_dropped++;
return 0;
}
- ret = am65_cpsw_nuss_rx_push(common, new_skb);
+requeue:
+ ret = am65_cpsw_nuss_rx_push(common, new_page);
if (WARN_ON(ret < 0)) {
- dev_kfree_skb_any(new_skb);
+ am65_cpsw_put_page(rx_chn, new_page, true, desc_idx);
ndev->stats.rx_errors++;
ndev->stats.rx_dropped++;
}
@@ -901,6 +1228,8 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
{
struct am65_cpsw_common *common = am65_cpsw_napi_to_common(napi_rx);
int flow = AM65_CPSW_MAX_RX_FLOWS;
+ int cpu = smp_processor_id();
+ bool xdp_redirect = false;
int cur_budget, ret;
int num_rx = 0;
@@ -909,9 +1238,12 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
cur_budget = budget - num_rx;
while (cur_budget--) {
- ret = am65_cpsw_nuss_rx_packets(common, flow);
- if (ret)
+ ret = am65_cpsw_nuss_rx_packets(common, flow, cpu);
+ if (ret) {
+ if (ret == AM65_CPSW_XDP_REDIRECT)
+ xdp_redirect = true;
break;
+ }
num_rx++;
}
@@ -919,6 +1251,9 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
break;
}
+ if (xdp_redirect)
+ xdp_do_flush();
+
dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget);
if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
@@ -938,8 +1273,8 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
}
static struct sk_buff *
-am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn,
- dma_addr_t desc_dma)
+am65_cpsw_nuss_tx_compl_packet_skb(struct am65_cpsw_tx_chn *tx_chn,
+ dma_addr_t desc_dma)
{
struct am65_cpsw_ndev_priv *ndev_priv;
struct am65_cpsw_ndev_stats *stats;
@@ -968,6 +1303,39 @@ am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn,
return skb;
}
+static struct xdp_frame *
+am65_cpsw_nuss_tx_compl_packet_xdp(struct am65_cpsw_common *common,
+ struct am65_cpsw_tx_chn *tx_chn,
+ dma_addr_t desc_dma,
+ struct net_device **ndev)
+{
+ struct am65_cpsw_ndev_priv *ndev_priv;
+ struct am65_cpsw_ndev_stats *stats;
+ struct cppi5_host_desc_t *desc_tx;
+ struct am65_cpsw_port *port;
+ struct xdp_frame *xdpf;
+ u32 port_id = 0;
+ void **swdata;
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
+ cppi5_desc_get_tags_ids(&desc_tx->hdr, NULL, &port_id);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+ xdpf = *(swdata);
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
+
+ port = am65_common_get_port(common, port_id);
+ *ndev = port->ndev;
+
+ ndev_priv = netdev_priv(*ndev);
+ stats = this_cpu_ptr(ndev_priv->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += xdpf->len;
+ u64_stats_update_end(&stats->syncp);
+
+ return xdpf;
+}
+
static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev,
struct netdev_queue *netif_txq)
{
@@ -988,11 +1356,13 @@ static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_d
static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
int chn, unsigned int budget, bool *tdown)
{
+ enum am65_cpsw_tx_buf_type buf_type;
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
unsigned int total_bytes = 0;
struct net_device *ndev;
+ struct xdp_frame *xdpf;
struct sk_buff *skb;
dma_addr_t desc_dma;
int res, num_tx = 0;
@@ -1013,10 +1383,21 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
break;
}
- skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
- total_bytes = skb->len;
- ndev = skb->dev;
- napi_consume_skb(skb, budget);
+ buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
+ if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
+ skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma);
+ ndev = skb->dev;
+ total_bytes = skb->len;
+ napi_consume_skb(skb, budget);
+ } else {
+ xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn,
+ desc_dma, &ndev);
+ total_bytes = xdpf->len;
+ if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
+ xdp_return_frame_rx_napi(xdpf);
+ else
+ xdp_return_frame(xdpf);
+ }
num_tx++;
netif_txq = netdev_get_tx_queue(ndev, chn);
@@ -1034,11 +1415,13 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
int chn, unsigned int budget, bool *tdown)
{
+ enum am65_cpsw_tx_buf_type buf_type;
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
unsigned int total_bytes = 0;
struct net_device *ndev;
+ struct xdp_frame *xdpf;
struct sk_buff *skb;
dma_addr_t desc_dma;
int res, num_tx = 0;
@@ -1057,11 +1440,21 @@ static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
break;
}
- skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
-
- ndev = skb->dev;
- total_bytes += skb->len;
- napi_consume_skb(skb, budget);
+ buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
+ if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
+ skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma);
+ ndev = skb->dev;
+ total_bytes += skb->len;
+ napi_consume_skb(skb, budget);
+ } else {
+ xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn,
+ desc_dma, &ndev);
+ total_bytes += xdpf->len;
+ if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
+ xdp_return_frame_rx_napi(xdpf);
+ else
+ xdp_return_frame(xdpf);
+ }
num_tx++;
}
@@ -1183,10 +1576,13 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
goto busy_stop_q;
}
+ am65_cpsw_nuss_set_buf_type(tx_chn, first_desc,
+ AM65_CPSW_TX_BUF_TYPE_SKB);
+
cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
AM65_CPSW_NAV_PS_DATA_SIZE);
- cppi5_desc_set_pktids(&first_desc->hdr, 0, 0x3FFF);
- cppi5_hdesc_set_pkttype(first_desc, 0x7);
+ cppi5_desc_set_pktids(&first_desc->hdr, 0, AM65_CPSW_CPPI_TX_FLOW_ID);
+ cppi5_hdesc_set_pkttype(first_desc, AM65_CPSW_CPPI_TX_PKT_TYPE);
cppi5_desc_set_tags_ids(&first_desc->hdr, 0, port->port_id);
k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
@@ -1225,6 +1621,9 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
goto busy_free_descs;
}
+ am65_cpsw_nuss_set_buf_type(tx_chn, next_desc,
+ AM65_CPSW_TX_BUF_TYPE_SKB);
+
buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
@@ -1334,7 +1733,6 @@ static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev,
static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
struct ifreq *ifr)
{
- struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
u32 ts_ctrl, seq_id, ts_ctrl_ltype2, ts_vlan_ltype;
struct hwtstamp_config cfg;
@@ -1358,11 +1756,6 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
case HWTSTAMP_FILTER_NONE:
port->rx_ts_enabled = false;
break;
- case HWTSTAMP_FILTER_ALL:
- case HWTSTAMP_FILTER_SOME:
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
@@ -1372,10 +1765,13 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_NTP_ALL:
port->rx_ts_enabled = true;
- cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ return -EOPNOTSUPP;
default:
return -ERANGE;
}
@@ -1405,6 +1801,10 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
ts_ctrl |= AM65_CPSW_TS_TX_ANX_ALL_EN |
AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN;
+ if (port->rx_ts_enabled)
+ ts_ctrl |= AM65_CPSW_TS_RX_ANX_ALL_EN |
+ AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN;
+
writel(seq_id, port->port_base + AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG);
writel(ts_vlan_ltype, port->port_base +
AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG);
@@ -1412,9 +1812,6 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2);
writel(ts_ctrl, port->port_base + AM65_CPSW_PORTN_REG_TS_CTL);
- /* en/dis RX timestamp */
- am65_cpts_rx_enable(common->cpts, port->rx_ts_enabled);
-
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
@@ -1431,7 +1828,7 @@ static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev,
cfg.tx_type = port->tx_ts_enabled ?
HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
cfg.rx_filter = port->rx_ts_enabled ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+ HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE;
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
@@ -1488,6 +1885,59 @@ static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
stats->tx_dropped = dev->stats.tx_dropped;
}
+static int am65_cpsw_xdp_prog_setup(struct net_device *ndev,
+ struct bpf_prog *prog)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ bool running = netif_running(ndev);
+ struct bpf_prog *old_prog;
+
+ if (running)
+ am65_cpsw_nuss_ndo_slave_stop(ndev);
+
+ old_prog = xchg(&port->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (running)
+ return am65_cpsw_nuss_ndo_slave_open(ndev);
+
+ return 0;
+}
+
+static int am65_cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
+{
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return am65_cpsw_xdp_prog_setup(ndev, bpf->prog);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int am65_cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct am65_cpsw_tx_chn *tx_chn;
+ struct netdev_queue *netif_txq;
+ int cpu = smp_processor_id();
+ int i, nxmit = 0;
+
+ tx_chn = &am65_ndev_to_common(ndev)->tx_chns[cpu % AM65_CPSW_MAX_TX_QUEUES];
+ netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
+
+ __netif_tx_lock(netif_txq, cpu);
+ for (i = 0; i < n; i++) {
+ if (am65_cpsw_xdp_tx_frame(ndev, tx_chn, frames[i],
+ AM65_CPSW_TX_BUF_TYPE_XDP_NDO))
+ break;
+ nxmit++;
+ }
+ __netif_tx_unlock(netif_txq);
+
+ return nxmit;
+}
+
static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_open = am65_cpsw_nuss_ndo_slave_open,
.ndo_stop = am65_cpsw_nuss_ndo_slave_stop,
@@ -1502,6 +1952,8 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_eth_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
.ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc,
.ndo_set_tx_maxrate = am65_cpsw_qos_ndo_tx_p0_set_maxrate,
+ .ndo_bpf = am65_cpsw_ndo_bpf,
+ .ndo_xdp_xmit = am65_cpsw_ndo_xdp_xmit,
};
static void am65_cpsw_disable_phy(struct phy *phy)
@@ -1772,7 +2224,7 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
.mode = K3_RINGACC_RING_MODE_RING,
.flags = 0
};
- u32 hdesc_size;
+ u32 hdesc_size, hdesc_size_out;
int i, ret = 0;
hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE,
@@ -1816,6 +2268,10 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
goto err;
}
+ hdesc_size_out = k3_cppi_desc_pool_desc_size(tx_chn->desc_pool);
+ tx_chn->dsize_log2 = __fls(hdesc_size_out);
+ WARN_ON(hdesc_size_out != (1 << tx_chn->dsize_log2));
+
tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
if (tx_chn->irq < 0) {
dev_err(dev, "Failed to get tx dma irq %d\n",
@@ -1862,8 +2318,8 @@ static void am65_cpsw_nuss_free_rx_chns(void *data)
static void am65_cpsw_nuss_remove_rx_chns(void *data)
{
struct am65_cpsw_common *common = data;
- struct am65_cpsw_rx_chn *rx_chn;
struct device *dev = common->dev;
+ struct am65_cpsw_rx_chn *rx_chn;
rx_chn = &common->rx_chns;
devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common);
@@ -1873,11 +2329,7 @@ static void am65_cpsw_nuss_remove_rx_chns(void *data)
netif_napi_del(&common->napi_rx);
- if (!IS_ERR_OR_NULL(rx_chn->desc_pool))
- k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
-
- if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
- k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
+ am65_cpsw_nuss_free_rx_chns(common);
common->rx_flow_id_base = -1;
}
@@ -1888,7 +2340,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 };
u32 max_desc_num = AM65_CPSW_MAX_RX_DESC;
struct device *dev = common->dev;
- u32 hdesc_size;
+ u32 hdesc_size, hdesc_size_out;
u32 fdqring_id;
int i, ret = 0;
@@ -1920,6 +2372,17 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
goto err;
}
+ hdesc_size_out = k3_cppi_desc_pool_desc_size(rx_chn->desc_pool);
+ rx_chn->dsize_log2 = __fls(hdesc_size_out);
+ WARN_ON(hdesc_size_out != (1 << rx_chn->dsize_log2));
+
+ rx_chn->page_pool = NULL;
+
+ rx_chn->pages = devm_kcalloc(dev, rx_chn->descs_num,
+ sizeof(*rx_chn->pages), GFP_KERNEL);
+ if (!rx_chn->pages)
+ return -ENOMEM;
+
common->rx_flow_id_base =
k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base);
@@ -2252,6 +2715,9 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
NETIF_F_HW_TC;
port->ndev->features = port->ndev->hw_features |
NETIF_F_HW_VLAN_CTAG_FILTER;
+ port->ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
port->ndev->vlan_features |= NETIF_F_SG;
port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops;
port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave;
@@ -2315,6 +2781,8 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
if (ret)
dev_err(dev, "failed to add percpu stat free action %d\n", ret);
+ port->xdp_prog = NULL;
+
if (!common->dma_ndev)
common->dma_ndev = port->ndev;
@@ -2588,7 +3056,8 @@ static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port)
}
static int am65_cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
struct am65_cpsw_common *cpsw = dl_priv->common;
@@ -2793,6 +3262,8 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
{
+ struct am65_cpsw_rx_chn *rx_chan = &common->rx_chns;
+ struct am65_cpsw_tx_chn *tx_chan = common->tx_chns;
struct device *dev = common->dev;
struct am65_cpsw_port *port;
int ret = 0, i;
@@ -2805,6 +3276,22 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
if (ret)
return ret;
+ /* The DMA Channels are not guaranteed to be in a clean state.
+ * Reset and disable them to ensure that they are back to the
+ * clean state and ready to be used.
+ */
+ for (i = 0; i < common->tx_ch_num; i++) {
+ k3_udma_glue_reset_tx_chn(tx_chan[i].tx_chn, &tx_chan[i],
+ am65_cpsw_nuss_tx_cleanup);
+ k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn);
+ }
+
+ for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
+ k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, rx_chan,
+ am65_cpsw_nuss_rx_cleanup, !!i);
+
+ k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
+
ret = am65_cpsw_nuss_register_devlink(common);
if (ret)
return ret;
@@ -2904,7 +3391,8 @@ static const struct am65_cpsw_pdata j784s4_cpswxg_pdata = {
.quirks = 0,
.ale_dev_id = "am64-cpswxg",
.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
- .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_USXGMII),
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII) |
+ BIT(PHY_INTERFACE_MODE_USXGMII),
};
static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
@@ -2940,9 +3428,9 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
struct device_node *node;
struct resource *res;
struct clk *clk;
+ int ale_entries;
u64 id_temp;
int ret, i;
- int ale_entries;
common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
if (!common)
@@ -3154,10 +3642,10 @@ static int am65_cpsw_nuss_suspend(struct device *dev)
static int am65_cpsw_nuss_resume(struct device *dev)
{
struct am65_cpsw_common *common = dev_get_drvdata(dev);
+ struct am65_cpsw_host *host_p = am65_common_get_host(common);
struct am65_cpsw_port *port;
struct net_device *ndev;
int i, ret;
- struct am65_cpsw_host *host_p = am65_common_get_host(common);
ret = am65_cpsw_nuss_init_tx_chns(common);
if (ret)
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index 7da0492dc091..d8ce88dc9c89 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/soc/ti/k3-ringacc.h>
#include <net/devlink.h>
+#include <net/xdp.h>
#include "am65-cpsw-qos.h"
struct am65_cpts;
@@ -56,10 +57,18 @@ struct am65_cpsw_port {
bool rx_ts_enabled;
struct am65_cpsw_qos qos;
struct devlink_port devlink_port;
+ struct bpf_prog *xdp_prog;
+ struct xdp_rxq_info xdp_rxq;
/* Only for suspend resume context */
u32 vid_context;
};
+enum am65_cpsw_tx_buf_type {
+ AM65_CPSW_TX_BUF_TYPE_SKB,
+ AM65_CPSW_TX_BUF_TYPE_XDP_TX,
+ AM65_CPSW_TX_BUF_TYPE_XDP_NDO,
+};
+
struct am65_cpsw_host {
struct am65_cpsw_common *common;
void __iomem *port_base;
@@ -80,6 +89,7 @@ struct am65_cpsw_tx_chn {
int irq;
u32 id;
u32 descs_num;
+ unsigned char dsize_log2;
char tx_chn_name[128];
u32 rate_mbps;
};
@@ -89,7 +99,10 @@ struct am65_cpsw_rx_chn {
struct device *dma_dev;
struct k3_cppi_desc_pool *desc_pool;
struct k3_udma_glue_rx_channel *rx_chn;
+ struct page_pool *page_pool;
+ struct page **pages;
u32 descs_num;
+ unsigned char dsize_log2;
int irq;
};
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
index 816e73a3d6e4..fa96db7c1a13 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -9,6 +9,7 @@
#include <linux/pm_runtime.h>
#include <linux/math.h>
+#include <linux/math64.h>
#include <linux/time.h>
#include <linux/units.h>
#include <net/pkt_cls.h>
@@ -837,6 +838,7 @@ static int am65_cpsw_taprio_replace(struct net_device *ndev,
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
struct am65_cpts *cpts = common->cpts;
struct am65_cpsw_est *est_new;
+ u64 cur_time, n;
int ret, tact;
if (!netif_running(ndev)) {
@@ -888,13 +890,21 @@ static int am65_cpsw_taprio_replace(struct net_device *ndev,
if (tact == TACT_PROG)
am65_cpsw_timer_stop(ndev);
- if (!est_new->taprio.base_time)
- est_new->taprio.base_time = am65_cpts_ns_gettime(cpts);
-
am65_cpsw_port_est_get_buf_num(ndev, est_new);
am65_cpsw_est_set_sched_list(ndev, est_new);
am65_cpsw_port_est_assign_buf_num(ndev, est_new->buf);
+ /* If the base-time is in the past, start schedule from the time:
+ * base_time + (N*cycle_time)
+ * where N is the smallest possible integer such that the above
+ * time is in the future.
+ */
+ cur_time = am65_cpts_ns_gettime(cpts);
+ if (est_new->taprio.base_time < cur_time) {
+ n = div64_u64(cur_time - est_new->taprio.base_time, est_new->taprio.cycle_time);
+ est_new->taprio.base_time += (n + 1) * est_new->taprio.cycle_time;
+ }
+
am65_cpsw_est_set(ndev, 1);
if (tact == TACT_PROG) {
@@ -1008,6 +1018,9 @@ static int am65_cpsw_qos_clsflower_add_policer(struct am65_cpsw_port *port,
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index c66618d91c28..59d6ab989c55 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -275,15 +275,13 @@ static bool am65_cpts_fifo_pop_event(struct am65_cpts *cpts,
return true;
}
-static int am65_cpts_fifo_read(struct am65_cpts *cpts)
+static int __am65_cpts_fifo_read(struct am65_cpts *cpts)
{
struct ptp_clock_event pevent;
struct am65_cpts_event *event;
bool schedule = false;
int i, type, ret = 0;
- unsigned long flags;
- spin_lock_irqsave(&cpts->lock, flags);
for (i = 0; i < AM65_CPTS_FIFO_DEPTH; i++) {
event = list_first_entry_or_null(&cpts->pool,
struct am65_cpts_event, list);
@@ -312,8 +310,7 @@ static int am65_cpts_fifo_read(struct am65_cpts *cpts)
event->tmo = jiffies +
msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT);
- list_del_init(&event->list);
- list_add_tail(&event->list, &cpts->events);
+ list_move_tail(&event->list, &cpts->events);
dev_dbg(cpts->dev,
"AM65_CPTS_EV_TX e1:%08x e2:%08x t:%lld\n",
@@ -356,14 +353,24 @@ static int am65_cpts_fifo_read(struct am65_cpts *cpts)
}
out:
- spin_unlock_irqrestore(&cpts->lock, flags);
-
if (schedule)
ptp_schedule_worker(cpts->ptp_clock, 0);
return ret;
}
+static int am65_cpts_fifo_read(struct am65_cpts *cpts)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ ret = __am65_cpts_fifo_read(cpts);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ return ret;
+}
+
static u64 am65_cpts_gettime(struct am65_cpts *cpts,
struct ptp_system_timestamp *sts)
{
@@ -784,6 +791,11 @@ static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
struct am65_cpts_skb_cb_data *skb_cb =
(struct am65_cpts_skb_cb_data *)skb->cb;
+ if ((ptp_classify_raw(skb) & PTP_CLASS_V1) &&
+ ((mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK) ==
+ (skb_cb->skb_mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK)))
+ mtype_seqid = skb_cb->skb_mtype_seqid;
+
if (mtype_seqid == skb_cb->skb_mtype_seqid) {
u64 ns = event->timestamp;
@@ -859,29 +871,6 @@ static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
return delay;
}
-/**
- * am65_cpts_rx_enable - enable rx timestamping
- * @cpts: cpts handle
- * @en: enable
- *
- * This functions enables rx packets timestamping. The CPTS can timestamp all
- * rx packets.
- */
-void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
-{
- u32 val;
-
- mutex_lock(&cpts->ptp_clk_lock);
- val = am65_cpts_read32(cpts, control);
- if (en)
- val |= AM65_CPTS_CONTROL_TSTAMP_EN;
- else
- val &= ~AM65_CPTS_CONTROL_TSTAMP_EN;
- am65_cpts_write32(cpts, val, control);
- mutex_unlock(&cpts->ptp_clk_lock);
-}
-EXPORT_SYMBOL_GPL(am65_cpts_rx_enable);
-
static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
{
unsigned int ptp_class = ptp_classify_raw(skb);
@@ -906,6 +895,69 @@ static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
return 1;
}
+static u64 am65_cpts_find_rx_ts(struct am65_cpts *cpts, u32 skb_mtype_seqid)
+{
+ struct list_head *this, *next;
+ struct am65_cpts_event *event;
+ unsigned long flags;
+ u32 mtype_seqid;
+ u64 ns = 0;
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ __am65_cpts_fifo_read(cpts);
+ list_for_each_safe(this, next, &cpts->events) {
+ event = list_entry(this, struct am65_cpts_event, list);
+ if (time_after(jiffies, event->tmo)) {
+ list_move(&event->list, &cpts->pool);
+ continue;
+ }
+
+ mtype_seqid = event->event1 &
+ (AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK |
+ AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK |
+ AM65_CPTS_EVENT_1_EVENT_TYPE_MASK);
+
+ if (mtype_seqid == skb_mtype_seqid) {
+ ns = event->timestamp;
+ list_move(&event->list, &cpts->pool);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ return ns;
+}
+
+void am65_cpts_rx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
+{
+ struct am65_cpts_skb_cb_data *skb_cb = (struct am65_cpts_skb_cb_data *)skb->cb;
+ struct skb_shared_hwtstamps *ssh;
+ int ret;
+ u64 ns;
+
+ /* am65_cpts_rx_timestamp() is called before eth_type_trans(), so
+ * skb MAC Hdr properties are not configured yet. Hence need to
+ * reset skb MAC header here
+ */
+ skb_reset_mac_header(skb);
+ ret = am65_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
+ if (!ret)
+ return; /* if not PTP class packet */
+
+ skb_cb->skb_mtype_seqid |= (AM65_CPTS_EV_RX << AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT);
+
+ dev_dbg(cpts->dev, "%s mtype seqid %08x\n", __func__, skb_cb->skb_mtype_seqid);
+
+ ns = am65_cpts_find_rx_ts(cpts, skb_cb->skb_mtype_seqid);
+ if (!ns)
+ return;
+
+ ssh = skb_hwtstamps(skb);
+ memset(ssh, 0, sizeof(*ssh));
+ ssh->hwtstamp = ns_to_ktime(ns);
+}
+EXPORT_SYMBOL_GPL(am65_cpts_rx_timestamp);
+
/**
* am65_cpts_tx_timestamp - save tx packet for timestamping
* @cpts: cpts handle
diff --git a/drivers/net/ethernet/ti/am65-cpts.h b/drivers/net/ethernet/ti/am65-cpts.h
index 6e14df0be113..6099d772799d 100644
--- a/drivers/net/ethernet/ti/am65-cpts.h
+++ b/drivers/net/ethernet/ti/am65-cpts.h
@@ -22,9 +22,9 @@ void am65_cpts_release(struct am65_cpts *cpts);
struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
struct device_node *node);
int am65_cpts_phc_index(struct am65_cpts *cpts);
+void am65_cpts_rx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
void am65_cpts_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
-void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en);
u64 am65_cpts_ns_gettime(struct am65_cpts *cpts);
int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
struct am65_cpts_estf_cfg *cfg);
@@ -48,17 +48,18 @@ static inline int am65_cpts_phc_index(struct am65_cpts *cpts)
return -1;
}
-static inline void am65_cpts_tx_timestamp(struct am65_cpts *cpts,
+static inline void am65_cpts_rx_timestamp(struct am65_cpts *cpts,
struct sk_buff *skb)
{
}
-static inline void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts,
- struct sk_buff *skb)
+static inline void am65_cpts_tx_timestamp(struct am65_cpts *cpts,
+ struct sk_buff *skb)
{
}
-static inline void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
+static inline void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts,
+ struct sk_buff *skb)
{
}
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 087dcb67505a..2baa198ebfa0 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1625,7 +1625,8 @@ static int cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
}
static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct cpsw_devlink *dl_priv = devlink_priv(dl);
struct cpsw_common *cpsw = dl_priv->cpsw;
@@ -1762,7 +1763,8 @@ static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id,
}
static int cpsw_dl_ale_ctrl_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct cpsw_devlink *dl_priv = devlink_priv(dl);
struct cpsw_common *cpsw = dl_priv->cpsw;
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 764ed298b570..6fe4edabba44 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -1404,6 +1404,9 @@ static int cpsw_qos_clsflower_add_policer(struct cpsw_priv *priv,
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_classifier.c b/drivers/net/ethernet/ti/icssg/icssg_classifier.c
index 6df53ab17fbc..79ba47bb3602 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_classifier.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_classifier.c
@@ -274,6 +274,16 @@ static void rx_class_set_or(struct regmap *miig_rt, int slice, int n,
regmap_write(miig_rt, offset, data);
}
+static u32 rx_class_get_or(struct regmap *miig_rt, int slice, int n)
+{
+ u32 offset, val;
+
+ offset = RX_CLASS_N_REG(slice, n, RX_CLASS_OR_EN);
+ regmap_read(miig_rt, offset, &val);
+
+ return val;
+}
+
void icssg_class_set_host_mac_addr(struct regmap *miig_rt, const u8 *mac)
{
regmap_write(miig_rt, MAC_INTERFACE_0, (u32)(mac[0] | mac[1] << 8 |
@@ -288,6 +298,26 @@ void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac)
regmap_write(miig_rt, offs[slice].mac1, (u32)(mac[4] | mac[5] << 8));
}
+static void icssg_class_ft1_add_mcast(struct regmap *miig_rt, int slice,
+ int slot, const u8 *addr, const u8 *mask)
+{
+ u32 val;
+ int i;
+
+ WARN(slot >= FT1_NUM_SLOTS, "invalid slot: %d\n", slot);
+
+ rx_class_ft1_set_da(miig_rt, slice, slot, addr);
+ rx_class_ft1_set_da_mask(miig_rt, slice, slot, mask);
+ rx_class_ft1_cfg_set_type(miig_rt, slice, slot, FT1_CFG_TYPE_EQ);
+
+ /* Enable the FT1 slot in OR enable for all classifiers */
+ for (i = 0; i < ICSSG_NUM_CLASSIFIERS_IN_USE; i++) {
+ val = rx_class_get_or(miig_rt, slice, i);
+ val |= RX_CLASS_FT_FT1_MATCH(slot);
+ rx_class_set_or(miig_rt, slice, i, val);
+ }
+}
+
/* disable all RX traffic */
void icssg_class_disable(struct regmap *miig_rt, int slice)
{
@@ -331,30 +361,95 @@ void icssg_class_disable(struct regmap *miig_rt, int slice)
regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0);
}
-void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti)
+void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti,
+ bool is_sr1)
{
+ int num_classifiers = is_sr1 ? ICSSG_NUM_CLASSIFIERS_IN_USE : 1;
u32 data;
+ int n;
/* defaults */
icssg_class_disable(miig_rt, slice);
/* Setup Classifier */
- /* match on Broadcast or MAC_PRU address */
- data = RX_CLASS_FT_BC | RX_CLASS_FT_DA_P;
+ for (n = 0; n < num_classifiers; n++) {
+ /* match on Broadcast or MAC_PRU address */
+ data = RX_CLASS_FT_BC | RX_CLASS_FT_DA_P;
- /* multicast */
- if (allmulti)
- data |= RX_CLASS_FT_MC;
+ /* multicast */
+ if (allmulti)
+ data |= RX_CLASS_FT_MC;
- rx_class_set_or(miig_rt, slice, 0, data);
+ rx_class_set_or(miig_rt, slice, n, data);
- /* set CFG1 for OR_OR_AND for classifier */
- rx_class_sel_set_type(miig_rt, slice, 0, RX_CLASS_SEL_TYPE_OR_OR_AND);
+ /* set CFG1 for OR_OR_AND for classifier */
+ rx_class_sel_set_type(miig_rt, slice, n,
+ RX_CLASS_SEL_TYPE_OR_OR_AND);
+ }
/* clear CFG2 */
regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0);
}
+void icssg_class_promiscuous_sr1(struct regmap *miig_rt, int slice)
+{
+ u32 data, offset;
+ int n;
+
+ /* defaults */
+ icssg_class_disable(miig_rt, slice);
+
+ /* Setup Classifier */
+ for (n = 0; n < ICSSG_NUM_CLASSIFIERS_IN_USE; n++) {
+ /* set RAW_MASK to bypass filters */
+ offset = RX_CLASS_GATES_N_REG(slice, n);
+ regmap_read(miig_rt, offset, &data);
+ data |= RX_CLASS_GATES_RAW_MASK;
+ regmap_write(miig_rt, offset, data);
+ }
+}
+
+void icssg_class_add_mcast_sr1(struct regmap *miig_rt, int slice,
+ struct net_device *ndev)
+{
+ u8 mask_addr[6] = { 0, 0, 0, 0, 0, 0xff };
+ struct netdev_hw_addr *ha;
+ int slot = 2;
+
+ rx_class_ft1_set_start_len(miig_rt, slice, 0, 6);
+ /* reserve first 2 slots for
+ * 1) 01-80-C2-00-00-XX Known Service Ethernet Multicast addresses
+ * 2) 01-00-5e-00-00-XX Local Network Control Block
+ * (224.0.0.0 - 224.0.0.255 (224.0.0/24))
+ */
+ icssg_class_ft1_add_mcast(miig_rt, slice, 0,
+ eth_reserved_addr_base, mask_addr);
+ icssg_class_ft1_add_mcast(miig_rt, slice, 1,
+ eth_ipv4_mcast_addr_base, mask_addr);
+ mask_addr[5] = 0;
+ netdev_for_each_mc_addr(ha, ndev) {
+ /* skip addresses matching reserved slots */
+ if (!memcmp(eth_reserved_addr_base, ha->addr, 5) ||
+ !memcmp(eth_ipv4_mcast_addr_base, ha->addr, 5)) {
+ netdev_dbg(ndev, "mcast skip %pM\n", ha->addr);
+ continue;
+ }
+
+ if (slot >= FT1_NUM_SLOTS) {
+ netdev_dbg(ndev,
+ "can't add more than %d MC addresses, enabling allmulti\n",
+ FT1_NUM_SLOTS);
+ icssg_class_default(miig_rt, slice, 1, true);
+ break;
+ }
+
+ netdev_dbg(ndev, "mcast add %pM\n", ha->addr);
+ icssg_class_ft1_add_mcast(miig_rt, slice, slot,
+ ha->addr, mask_addr);
+ slot++;
+ }
+}
+
/* required for SAV check */
void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr)
{
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
new file mode 100644
index 000000000000..088ab8076db4
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -0,0 +1,1252 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Texas Instruments ICSSG Ethernet Driver
+ *
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) Siemens AG, 2024
+ *
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dma/ti-cppi5.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/remoteproc/pruss.h>
+#include <linux/regmap.h>
+#include <linux/remoteproc.h>
+
+#include "icssg_prueth.h"
+#include "../k3-cppi-desc-pool.h"
+
+/* Netif debug messages possible */
+#define PRUETH_EMAC_DEBUG (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR | \
+ NETIF_MSG_TX_QUEUED | \
+ NETIF_MSG_INTR | \
+ NETIF_MSG_TX_DONE | \
+ NETIF_MSG_RX_STATUS | \
+ NETIF_MSG_PKTDATA | \
+ NETIF_MSG_HW | \
+ NETIF_MSG_WOL)
+
+#define prueth_napi_to_emac(napi) container_of(napi, struct prueth_emac, napi_rx)
+
+void prueth_cleanup_rx_chns(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ int max_rflows)
+{
+ if (rx_chn->desc_pool)
+ k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
+
+ if (rx_chn->rx_chn)
+ k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
+}
+
+void prueth_cleanup_tx_chns(struct prueth_emac *emac)
+{
+ int i;
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
+
+ if (tx_chn->desc_pool)
+ k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
+
+ if (tx_chn->tx_chn)
+ k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
+
+ /* Assume prueth_cleanup_tx_chns() is called at the
+ * end after all channel resources are freed
+ */
+ memset(tx_chn, 0, sizeof(*tx_chn));
+ }
+}
+
+void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
+
+ if (tx_chn->irq)
+ free_irq(tx_chn->irq, tx_chn);
+ netif_napi_del(&tx_chn->napi_tx);
+ }
+}
+
+void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
+ struct cppi5_host_desc_t *desc)
+{
+ struct cppi5_host_desc_t *first_desc, *next_desc;
+ dma_addr_t buf_dma, next_desc_dma;
+ u32 buf_dma_len;
+
+ first_desc = desc;
+ next_desc = first_desc;
+
+ cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
+
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len,
+ DMA_TO_DEVICE);
+
+ next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
+ while (next_desc_dma) {
+ next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ next_desc_dma);
+ cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
+
+ dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
+ DMA_TO_DEVICE);
+
+ next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
+
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
+ }
+
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
+}
+
+int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
+ int budget, bool *tdown)
+{
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_tx;
+ struct netdev_queue *netif_txq;
+ struct prueth_tx_chn *tx_chn;
+ unsigned int total_bytes = 0;
+ struct sk_buff *skb;
+ dma_addr_t desc_dma;
+ int res, num_tx = 0;
+ void **swdata;
+
+ tx_chn = &emac->tx_chns[chn];
+
+ while (true) {
+ res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
+ if (res == -ENODATA)
+ break;
+
+ /* teardown completion */
+ if (cppi5_desc_is_tdcm(desc_dma)) {
+ if (atomic_dec_and_test(&emac->tdown_cnt))
+ complete(&emac->tdown_complete);
+ *tdown = true;
+ break;
+ }
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+
+ /* was this command's TX complete? */
+ if (emac->is_sr1 && *(swdata) == emac->cmd_data) {
+ prueth_xmit_free(tx_chn, desc_tx);
+ continue;
+ }
+
+ skb = *(swdata);
+ prueth_xmit_free(tx_chn, desc_tx);
+
+ ndev = skb->dev;
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ total_bytes += skb->len;
+ napi_consume_skb(skb, budget);
+ num_tx++;
+ }
+
+ if (!num_tx)
+ return 0;
+
+ netif_txq = netdev_get_tx_queue(ndev, chn);
+ netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
+
+ if (netif_tx_queue_stopped(netif_txq)) {
+ /* If the TX queue was stopped, wake it now
+ * if we have enough room.
+ */
+ __netif_tx_lock(netif_txq, smp_processor_id());
+ if (netif_running(ndev) &&
+ (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
+ MAX_SKB_FRAGS))
+ netif_tx_wake_queue(netif_txq);
+ __netif_tx_unlock(netif_txq);
+ }
+
+ return num_tx;
+}
+
+static enum hrtimer_restart emac_tx_timer_callback(struct hrtimer *timer)
+{
+ struct prueth_tx_chn *tx_chns =
+ container_of(timer, struct prueth_tx_chn, tx_hrtimer);
+
+ enable_irq(tx_chns->irq);
+ return HRTIMER_NORESTART;
+}
+
+static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget)
+{
+ struct prueth_tx_chn *tx_chn = prueth_napi_to_tx_chn(napi_tx);
+ struct prueth_emac *emac = tx_chn->emac;
+ bool tdown = false;
+ int num_tx_packets;
+
+ num_tx_packets = emac_tx_complete_packets(emac, tx_chn->id, budget,
+ &tdown);
+
+ if (num_tx_packets >= budget)
+ return budget;
+
+ if (napi_complete_done(napi_tx, num_tx_packets)) {
+ if (unlikely(tx_chn->tx_pace_timeout_ns && !tdown)) {
+ hrtimer_start(&tx_chn->tx_hrtimer,
+ ns_to_ktime(tx_chn->tx_pace_timeout_ns),
+ HRTIMER_MODE_REL_PINNED);
+ } else {
+ enable_irq(tx_chn->irq);
+ }
+ }
+
+ return num_tx_packets;
+}
+
+static irqreturn_t prueth_tx_irq(int irq, void *dev_id)
+{
+ struct prueth_tx_chn *tx_chn = dev_id;
+
+ disable_irq_nosync(irq);
+ napi_schedule(&tx_chn->napi_tx);
+
+ return IRQ_HANDLED;
+}
+
+int prueth_ndev_add_tx_napi(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ int i, ret;
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
+
+ netif_napi_add_tx(emac->ndev, &tx_chn->napi_tx, emac_napi_tx_poll);
+ hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ tx_chn->tx_hrtimer.function = &emac_tx_timer_callback;
+ ret = request_irq(tx_chn->irq, prueth_tx_irq,
+ IRQF_TRIGGER_HIGH, tx_chn->name,
+ tx_chn);
+ if (ret) {
+ netif_napi_del(&tx_chn->napi_tx);
+ dev_err(prueth->dev, "unable to request TX IRQ %d\n",
+ tx_chn->irq);
+ goto fail;
+ }
+ }
+
+ return 0;
+fail:
+ prueth_ndev_del_tx_napi(emac, i);
+ return ret;
+}
+
+int prueth_init_tx_chns(struct prueth_emac *emac)
+{
+ static const struct k3_ring_cfg ring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .mode = K3_RINGACC_RING_MODE_RING,
+ .flags = 0,
+ .size = PRUETH_MAX_TX_DESC,
+ };
+ struct k3_udma_glue_tx_channel_cfg tx_cfg;
+ struct device *dev = emac->prueth->dev;
+ struct net_device *ndev = emac->ndev;
+ int ret, slice, i;
+ u32 hdesc_size;
+
+ slice = prueth_emac_slice(emac);
+ if (slice < 0)
+ return slice;
+
+ init_completion(&emac->tdown_complete);
+
+ hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
+ PRUETH_NAV_SW_DATA_SIZE);
+ memset(&tx_cfg, 0, sizeof(tx_cfg));
+ tx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
+ tx_cfg.tx_cfg = ring_cfg;
+ tx_cfg.txcq_cfg = ring_cfg;
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
+
+ /* To differentiate channels for SLICE0 vs SLICE1 */
+ snprintf(tx_chn->name, sizeof(tx_chn->name),
+ "tx%d-%d", slice, i);
+
+ tx_chn->emac = emac;
+ tx_chn->id = i;
+ tx_chn->descs_num = PRUETH_MAX_TX_DESC;
+
+ tx_chn->tx_chn =
+ k3_udma_glue_request_tx_chn(dev, tx_chn->name,
+ &tx_cfg);
+ if (IS_ERR(tx_chn->tx_chn)) {
+ ret = PTR_ERR(tx_chn->tx_chn);
+ tx_chn->tx_chn = NULL;
+ netdev_err(ndev,
+ "Failed to request tx dma ch: %d\n", ret);
+ goto fail;
+ }
+
+ tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn);
+ tx_chn->desc_pool =
+ k3_cppi_desc_pool_create_name(tx_chn->dma_dev,
+ tx_chn->descs_num,
+ hdesc_size,
+ tx_chn->name);
+ if (IS_ERR(tx_chn->desc_pool)) {
+ ret = PTR_ERR(tx_chn->desc_pool);
+ tx_chn->desc_pool = NULL;
+ netdev_err(ndev, "Failed to create tx pool: %d\n", ret);
+ goto fail;
+ }
+
+ ret = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to get tx irq\n");
+ goto fail;
+ }
+ tx_chn->irq = ret;
+
+ snprintf(tx_chn->name, sizeof(tx_chn->name), "%s-tx%d",
+ dev_name(dev), tx_chn->id);
+ }
+
+ return 0;
+
+fail:
+ prueth_cleanup_tx_chns(emac);
+ return ret;
+}
+
+int prueth_init_rx_chns(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ char *name, u32 max_rflows,
+ u32 max_desc_num)
+{
+ struct k3_udma_glue_rx_channel_cfg rx_cfg;
+ struct device *dev = emac->prueth->dev;
+ struct net_device *ndev = emac->ndev;
+ u32 fdqring_id, hdesc_size;
+ int i, ret = 0, slice;
+ int flow_id_base;
+
+ slice = prueth_emac_slice(emac);
+ if (slice < 0)
+ return slice;
+
+ /* To differentiate channels for SLICE0 vs SLICE1 */
+ snprintf(rx_chn->name, sizeof(rx_chn->name), "%s%d", name, slice);
+
+ hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
+ PRUETH_NAV_SW_DATA_SIZE);
+ memset(&rx_cfg, 0, sizeof(rx_cfg));
+ rx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
+ rx_cfg.flow_id_num = max_rflows;
+ rx_cfg.flow_id_base = -1; /* udmax will auto select flow id base */
+
+ /* init all flows */
+ rx_chn->dev = dev;
+ rx_chn->descs_num = max_desc_num;
+
+ rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, rx_chn->name,
+ &rx_cfg);
+ if (IS_ERR(rx_chn->rx_chn)) {
+ ret = PTR_ERR(rx_chn->rx_chn);
+ rx_chn->rx_chn = NULL;
+ netdev_err(ndev, "Failed to request rx dma ch: %d\n", ret);
+ goto fail;
+ }
+
+ rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn);
+ rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev,
+ rx_chn->descs_num,
+ hdesc_size,
+ rx_chn->name);
+ if (IS_ERR(rx_chn->desc_pool)) {
+ ret = PTR_ERR(rx_chn->desc_pool);
+ rx_chn->desc_pool = NULL;
+ netdev_err(ndev, "Failed to create rx pool: %d\n", ret);
+ goto fail;
+ }
+
+ flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
+ if (emac->is_sr1 && !strcmp(name, "rxmgm")) {
+ emac->rx_mgm_flow_id_base = flow_id_base;
+ netdev_dbg(ndev, "mgm flow id base = %d\n", flow_id_base);
+ } else {
+ emac->rx_flow_id_base = flow_id_base;
+ netdev_dbg(ndev, "flow id base = %d\n", flow_id_base);
+ }
+
+ fdqring_id = K3_RINGACC_RING_ID_ANY;
+ for (i = 0; i < rx_cfg.flow_id_num; i++) {
+ struct k3_ring_cfg rxring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .mode = K3_RINGACC_RING_MODE_RING,
+ .flags = 0,
+ };
+ struct k3_ring_cfg fdqring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .flags = K3_RINGACC_RING_SHARED,
+ };
+ struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
+ .rx_cfg = rxring_cfg,
+ .rxfdq_cfg = fdqring_cfg,
+ .ring_rxq_id = K3_RINGACC_RING_ID_ANY,
+ .src_tag_lo_sel =
+ K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
+ };
+
+ rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
+ rx_flow_cfg.rx_cfg.size = max_desc_num;
+ rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
+ rx_flow_cfg.rxfdq_cfg.mode = emac->prueth->pdata.fdqring_mode;
+
+ ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
+ i, &rx_flow_cfg);
+ if (ret) {
+ netdev_err(ndev, "Failed to init rx flow%d %d\n",
+ i, ret);
+ goto fail;
+ }
+ if (!i)
+ fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
+ i);
+ ret = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
+ if (ret <= 0) {
+ if (!ret)
+ ret = -ENXIO;
+ netdev_err(ndev, "Failed to get rx dma irq");
+ goto fail;
+ }
+ rx_chn->irq[i] = ret;
+ }
+
+ return 0;
+
+fail:
+ prueth_cleanup_rx_chns(emac, rx_chn, max_rflows);
+ return ret;
+}
+
+int prueth_dma_rx_push(struct prueth_emac *emac,
+ struct sk_buff *skb,
+ struct prueth_rx_chn *rx_chn)
+{
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_rx;
+ u32 pkt_len = skb_tailroom(skb);
+ dma_addr_t desc_dma;
+ dma_addr_t buf_dma;
+ void **swdata;
+
+ desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
+ if (!desc_rx) {
+ netdev_err(ndev, "rx push: failed to allocate descriptor\n");
+ return -ENOMEM;
+ }
+ desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
+
+ buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+ netdev_err(ndev, "rx push: failed to map rx pkt buffer\n");
+ return -EINVAL;
+ }
+
+ cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
+
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ *swdata = skb;
+
+ return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0,
+ desc_rx, desc_dma);
+}
+
+u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns)
+{
+ u32 iepcount_lo, iepcount_hi, hi_rollover_count;
+ u64 ns;
+
+ iepcount_lo = lo & GENMASK(19, 0);
+ iepcount_hi = (hi & GENMASK(11, 0)) << 12 | lo >> 20;
+ hi_rollover_count = hi >> 11;
+
+ ns = ((u64)hi_rollover_count) << 23 | (iepcount_hi + hi_sw);
+ ns = ns * cycle_time_ns + iepcount_lo;
+
+ return ns;
+}
+
+void emac_rx_timestamp(struct prueth_emac *emac,
+ struct sk_buff *skb, u32 *psdata)
+{
+ struct skb_shared_hwtstamps *ssh;
+ u64 ns;
+
+ if (emac->is_sr1) {
+ ns = (u64)psdata[1] << 32 | psdata[0];
+ } else {
+ u32 hi_sw = readl(emac->prueth->shram.va +
+ TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
+ ns = icssg_ts_to_ns(hi_sw, psdata[1], psdata[0],
+ IEP_DEFAULT_CYCLE_TIME_NS);
+ }
+
+ ssh = skb_hwtstamps(skb);
+ memset(ssh, 0, sizeof(*ssh));
+ ssh->hwtstamp = ns_to_ktime(ns);
+}
+
+static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
+{
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+ u32 buf_dma_len, pkt_len, port_id = 0;
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_rx;
+ struct sk_buff *skb, *new_skb;
+ dma_addr_t desc_dma, buf_dma;
+ void **swdata;
+ u32 *psdata;
+ int ret;
+
+ ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
+ if (ret) {
+ if (ret != -ENODATA)
+ netdev_err(ndev, "rx pop: failed: %d\n", ret);
+ return ret;
+ }
+
+ if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown ? */
+ return 0;
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ skb = *swdata;
+
+ psdata = cppi5_hdesc_get_psdata(desc_rx);
+ /* RX HW timestamp */
+ if (emac->rx_ts_enabled)
+ emac_rx_timestamp(emac, skb, psdata);
+
+ cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
+ pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
+ /* firmware adds 4 CRC bytes, strip them */
+ pkt_len -= 4;
+ cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
+
+ dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+
+ skb->dev = ndev;
+ new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE);
+ /* if allocation fails we drop the packet but push the
+ * descriptor back to the ring with old skb to prevent a stall
+ */
+ if (!new_skb) {
+ ndev->stats.rx_dropped++;
+ new_skb = skb;
+ } else {
+ /* send the filled skb up the n/w stack */
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(&emac->napi_rx, skb);
+ ndev->stats.rx_bytes += pkt_len;
+ ndev->stats.rx_packets++;
+ }
+
+ /* queue another RX DMA */
+ ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_chns);
+ if (WARN_ON(ret < 0)) {
+ dev_kfree_skb_any(new_skb);
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_dropped++;
+ }
+
+ return ret;
+}
+
+static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
+{
+ struct prueth_rx_chn *rx_chn = data;
+ struct cppi5_host_desc_t *desc_rx;
+ struct sk_buff *skb;
+ dma_addr_t buf_dma;
+ u32 buf_dma_len;
+ void **swdata;
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ skb = *swdata;
+ cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
+
+ dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len,
+ DMA_FROM_DEVICE);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+
+ dev_kfree_skb_any(skb);
+}
+
+static int prueth_tx_ts_cookie_get(struct prueth_emac *emac)
+{
+ int i;
+
+ /* search and get the next free slot */
+ for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
+ if (!emac->tx_ts_skb[i]) {
+ emac->tx_ts_skb[i] = ERR_PTR(-EBUSY); /* reserve slot */
+ return i;
+ }
+ }
+
+ return -EBUSY;
+}
+
+/**
+ * emac_ndo_start_xmit - EMAC Transmit function
+ * @skb: SKB pointer
+ * @ndev: EMAC network adapter
+ *
+ * Called by the system to transmit a packet - we queue the packet in
+ * EMAC hardware transmit queue
+ * Doesn't wait for completion we'll check for TX completion in
+ * emac_tx_complete_packets().
+ *
+ * Return: enum netdev_tx
+ */
+enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct netdev_queue *netif_txq;
+ struct prueth_tx_chn *tx_chn;
+ dma_addr_t desc_dma, buf_dma;
+ int i, ret = 0, q_idx;
+ bool in_tx_ts = 0;
+ int tx_ts_cookie;
+ void **swdata;
+ u32 pkt_len;
+ u32 *epib;
+
+ pkt_len = skb_headlen(skb);
+ q_idx = skb_get_queue_mapping(skb);
+
+ tx_chn = &emac->tx_chns[q_idx];
+ netif_txq = netdev_get_tx_queue(ndev, q_idx);
+
+ /* Map the linear buffer */
+ buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
+ netdev_err(ndev, "tx: failed to map skb buffer\n");
+ ret = NETDEV_TX_OK;
+ goto drop_free_skb;
+ }
+
+ first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!first_desc) {
+ netdev_dbg(ndev, "tx: failed to allocate descriptor\n");
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE);
+ goto drop_stop_q_busy;
+ }
+
+ cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_set_pkttype(first_desc, 0);
+ epib = first_desc->epib;
+ epib[0] = 0;
+ epib[1] = 0;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ emac->tx_ts_enabled) {
+ tx_ts_cookie = prueth_tx_ts_cookie_get(emac);
+ if (tx_ts_cookie >= 0) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ /* Request TX timestamp */
+ epib[0] = (u32)tx_ts_cookie;
+ epib[1] = 0x80000000; /* TX TS request */
+ emac->tx_ts_skb[tx_ts_cookie] = skb_get(skb);
+ in_tx_ts = 1;
+ }
+ }
+
+ /* set dst tag to indicate internal qid at the firmware which is at
+ * bit8..bit15. bit0..bit7 indicates port num for directed
+ * packets in case of switch mode operation
+ */
+ cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8)));
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
+ swdata = cppi5_hdesc_get_swdata(first_desc);
+ *swdata = skb;
+
+ /* Handle the case where skb is fragmented in pages */
+ cur_desc = first_desc;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ u32 frag_size = skb_frag_size(frag);
+
+ next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!next_desc) {
+ netdev_err(ndev,
+ "tx: failed to allocate frag. descriptor\n");
+ goto free_desc_stop_q_busy_cleanup_tx_ts;
+ }
+
+ buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
+ netdev_err(ndev, "tx: Failed to map skb page\n");
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
+ ret = NETDEV_TX_OK;
+ goto cleanup_tx_ts;
+ }
+
+ cppi5_hdesc_reset_hbdesc(next_desc);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(next_desc,
+ buf_dma, frag_size, buf_dma, frag_size);
+
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
+ next_desc);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma);
+ cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
+
+ pkt_len += frag_size;
+ cur_desc = next_desc;
+ }
+ WARN_ON_ONCE(pkt_len != skb->len);
+
+ /* report bql before sending packet */
+ netdev_tx_sent_queue(netif_txq, pkt_len);
+
+ cppi5_hdesc_set_pktlen(first_desc, pkt_len);
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
+ /* cppi5_desc_dump(first_desc, 64); */
+
+ skb_tx_timestamp(skb); /* SW timestamp if SKBTX_IN_PROGRESS not set */
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ if (ret) {
+ netdev_err(ndev, "tx: push failed: %d\n", ret);
+ goto drop_free_descs;
+ }
+
+ if (in_tx_ts)
+ atomic_inc(&emac->tx_ts_pending);
+
+ if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
+ netif_tx_stop_queue(netif_txq);
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
+
+ if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
+ MAX_SKB_FRAGS)
+ netif_tx_wake_queue(netif_txq);
+ }
+
+ return NETDEV_TX_OK;
+
+cleanup_tx_ts:
+ if (in_tx_ts) {
+ dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]);
+ emac->tx_ts_skb[tx_ts_cookie] = NULL;
+ }
+
+drop_free_descs:
+ prueth_xmit_free(tx_chn, first_desc);
+
+drop_free_skb:
+ dev_kfree_skb_any(skb);
+
+ /* error */
+ ndev->stats.tx_dropped++;
+ netdev_err(ndev, "tx: error: %d\n", ret);
+
+ return ret;
+
+free_desc_stop_q_busy_cleanup_tx_ts:
+ if (in_tx_ts) {
+ dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]);
+ emac->tx_ts_skb[tx_ts_cookie] = NULL;
+ }
+ prueth_xmit_free(tx_chn, first_desc);
+
+drop_stop_q_busy:
+ netif_tx_stop_queue(netif_txq);
+ return NETDEV_TX_BUSY;
+}
+
+static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
+{
+ struct prueth_tx_chn *tx_chn = data;
+ struct cppi5_host_desc_t *desc_tx;
+ struct sk_buff *skb;
+ void **swdata;
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+ skb = *(swdata);
+ prueth_xmit_free(tx_chn, desc_tx);
+
+ dev_kfree_skb_any(skb);
+}
+
+irqreturn_t prueth_rx_irq(int irq, void *dev_id)
+{
+ struct prueth_emac *emac = dev_id;
+
+ disable_irq_nosync(irq);
+ napi_schedule(&emac->napi_rx);
+
+ return IRQ_HANDLED;
+}
+
+void prueth_emac_stop(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ int slice;
+
+ switch (emac->port_id) {
+ case PRUETH_PORT_MII0:
+ slice = ICSS_SLICE0;
+ break;
+ case PRUETH_PORT_MII1:
+ slice = ICSS_SLICE1;
+ break;
+ default:
+ netdev_err(emac->ndev, "invalid port\n");
+ return;
+ }
+
+ emac->fw_running = 0;
+ if (!emac->is_sr1)
+ rproc_shutdown(prueth->txpru[slice]);
+ rproc_shutdown(prueth->rtu[slice]);
+ rproc_shutdown(prueth->pru[slice]);
+}
+
+void prueth_cleanup_tx_ts(struct prueth_emac *emac)
+{
+ int i;
+
+ for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
+ if (emac->tx_ts_skb[i]) {
+ dev_kfree_skb_any(emac->tx_ts_skb[i]);
+ emac->tx_ts_skb[i] = NULL;
+ }
+ }
+}
+
+int emac_napi_rx_poll(struct napi_struct *napi_rx, int budget)
+{
+ struct prueth_emac *emac = prueth_napi_to_emac(napi_rx);
+ int rx_flow = emac->is_sr1 ?
+ PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA;
+ int flow = emac->is_sr1 ?
+ PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS;
+ int num_rx = 0;
+ int cur_budget;
+ int ret;
+
+ while (flow--) {
+ cur_budget = budget - num_rx;
+
+ while (cur_budget--) {
+ ret = emac_rx_packet(emac, flow);
+ if (ret)
+ break;
+ num_rx++;
+ }
+
+ if (num_rx >= budget)
+ break;
+ }
+
+ if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
+ if (unlikely(emac->rx_pace_timeout_ns)) {
+ hrtimer_start(&emac->rx_hrtimer,
+ ns_to_ktime(emac->rx_pace_timeout_ns),
+ HRTIMER_MODE_REL_PINNED);
+ } else {
+ enable_irq(emac->rx_chns.irq[rx_flow]);
+ }
+ }
+
+ return num_rx;
+}
+
+int prueth_prepare_rx_chan(struct prueth_emac *emac,
+ struct prueth_rx_chn *chn,
+ int buf_size)
+{
+ struct sk_buff *skb;
+ int i, ret;
+
+ for (i = 0; i < chn->descs_num; i++) {
+ skb = __netdev_alloc_skb_ip_align(NULL, buf_size, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ ret = prueth_dma_rx_push(emac, skb, chn);
+ if (ret < 0) {
+ netdev_err(emac->ndev,
+ "cannot submit skb for rx chan %s ret %d\n",
+ chn->name, ret);
+ kfree_skb(skb);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num,
+ bool free_skb)
+{
+ int i;
+
+ for (i = 0; i < ch_num; i++) {
+ if (free_skb)
+ k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn,
+ &emac->tx_chns[i],
+ prueth_tx_cleanup);
+ k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn);
+ }
+}
+
+void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
+ int num_flows, bool disable)
+{
+ int i;
+
+ for (i = 0; i < num_flows; i++)
+ k3_udma_glue_reset_rx_chn(chn->rx_chn, i, chn,
+ prueth_rx_cleanup, !!i);
+ if (disable)
+ k3_udma_glue_disable_rx_chn(chn->rx_chn);
+}
+
+void emac_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
+{
+ ndev->stats.tx_errors++;
+}
+
+static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ emac->tx_ts_enabled = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ emac->tx_ts_enabled = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ emac->rx_ts_enabled = 0;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ emac->rx_ts_enabled = 1;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static int emac_get_ts_config(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ config.tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config.rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+int emac_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return emac_get_ts_config(ndev, ifr);
+ case SIOCSHWTSTAMP:
+ return emac_set_ts_config(ndev, ifr);
+ default:
+ break;
+ }
+
+ return phy_do_ioctl(ndev, ifr, cmd);
+}
+
+void emac_ndo_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ emac_update_hardware_stats(emac);
+
+ stats->rx_packets = emac_get_stat_by_name(emac, "rx_packets");
+ stats->rx_bytes = emac_get_stat_by_name(emac, "rx_bytes");
+ stats->tx_packets = emac_get_stat_by_name(emac, "tx_packets");
+ stats->tx_bytes = emac_get_stat_by_name(emac, "tx_bytes");
+ stats->rx_crc_errors = emac_get_stat_by_name(emac, "rx_crc_errors");
+ stats->rx_over_errors = emac_get_stat_by_name(emac, "rx_over_errors");
+ stats->multicast = emac_get_stat_by_name(emac, "rx_multicast_frames");
+
+ stats->rx_errors = ndev->stats.rx_errors;
+ stats->rx_dropped = ndev->stats.rx_dropped;
+ stats->tx_errors = ndev->stats.tx_errors;
+ stats->tx_dropped = ndev->stats.tx_dropped;
+}
+
+int emac_ndo_get_phys_port_name(struct net_device *ndev, char *name,
+ size_t len)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int ret;
+
+ ret = snprintf(name, len, "p%d", emac->port_id);
+ if (ret >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* get emac_port corresponding to eth_node name */
+int prueth_node_port(struct device_node *eth_node)
+{
+ u32 port_id;
+ int ret;
+
+ ret = of_property_read_u32(eth_node, "reg", &port_id);
+ if (ret)
+ return ret;
+
+ if (port_id == 0)
+ return PRUETH_PORT_MII0;
+ else if (port_id == 1)
+ return PRUETH_PORT_MII1;
+ else
+ return PRUETH_PORT_INVALID;
+}
+
+/* get MAC instance corresponding to eth_node name */
+int prueth_node_mac(struct device_node *eth_node)
+{
+ u32 port_id;
+ int ret;
+
+ ret = of_property_read_u32(eth_node, "reg", &port_id);
+ if (ret)
+ return ret;
+
+ if (port_id == 0)
+ return PRUETH_MAC0;
+ else if (port_id == 1)
+ return PRUETH_MAC1;
+ else
+ return PRUETH_MAC_INVALID;
+}
+
+void prueth_netdev_exit(struct prueth *prueth,
+ struct device_node *eth_node)
+{
+ struct prueth_emac *emac;
+ enum prueth_mac mac;
+
+ mac = prueth_node_mac(eth_node);
+ if (mac == PRUETH_MAC_INVALID)
+ return;
+
+ emac = prueth->emac[mac];
+ if (!emac)
+ return;
+
+ if (of_phy_is_fixed_link(emac->phy_node))
+ of_phy_deregister_fixed_link(emac->phy_node);
+
+ netif_napi_del(&emac->napi_rx);
+
+ pruss_release_mem_region(prueth->pruss, &emac->dram);
+ destroy_workqueue(emac->cmd_wq);
+ free_netdev(emac->ndev);
+ prueth->emac[mac] = NULL;
+}
+
+int prueth_get_cores(struct prueth *prueth, int slice, bool is_sr1)
+{
+ struct device *dev = prueth->dev;
+ enum pruss_pru_id pruss_id;
+ struct device_node *np;
+ int idx = -1, ret;
+
+ np = dev->of_node;
+
+ switch (slice) {
+ case ICSS_SLICE0:
+ idx = 0;
+ break;
+ case ICSS_SLICE1:
+ idx = is_sr1 ? 2 : 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ prueth->pru[slice] = pru_rproc_get(np, idx, &pruss_id);
+ if (IS_ERR(prueth->pru[slice])) {
+ ret = PTR_ERR(prueth->pru[slice]);
+ prueth->pru[slice] = NULL;
+ return dev_err_probe(dev, ret, "unable to get PRU%d\n", slice);
+ }
+ prueth->pru_id[slice] = pruss_id;
+
+ idx++;
+ prueth->rtu[slice] = pru_rproc_get(np, idx, NULL);
+ if (IS_ERR(prueth->rtu[slice])) {
+ ret = PTR_ERR(prueth->rtu[slice]);
+ prueth->rtu[slice] = NULL;
+ return dev_err_probe(dev, ret, "unable to get RTU%d\n", slice);
+ }
+
+ if (is_sr1)
+ return 0;
+
+ idx++;
+ prueth->txpru[slice] = pru_rproc_get(np, idx, NULL);
+ if (IS_ERR(prueth->txpru[slice])) {
+ ret = PTR_ERR(prueth->txpru[slice]);
+ prueth->txpru[slice] = NULL;
+ return dev_err_probe(dev, ret, "unable to get TX_PRU%d\n", slice);
+ }
+
+ return 0;
+}
+
+void prueth_put_cores(struct prueth *prueth, int slice)
+{
+ if (prueth->txpru[slice])
+ pru_rproc_put(prueth->txpru[slice]);
+
+ if (prueth->rtu[slice])
+ pru_rproc_put(prueth->rtu[slice]);
+
+ if (prueth->pru[slice])
+ pru_rproc_put(prueth->pru[slice]);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int prueth_suspend(struct device *dev)
+{
+ struct prueth *prueth = dev_get_drvdata(dev);
+ struct net_device *ndev;
+ int i, ret;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ ndev = prueth->registered_netdevs[i];
+
+ if (!ndev)
+ continue;
+
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ ret = ndev->netdev_ops->ndo_stop(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to stop: %d", ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int prueth_resume(struct device *dev)
+{
+ struct prueth *prueth = dev_get_drvdata(dev);
+ struct net_device *ndev;
+ int i, ret;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ ndev = prueth->registered_netdevs[i];
+
+ if (!ndev)
+ continue;
+
+ if (netif_running(ndev)) {
+ ret = ndev->netdev_ops->ndo_open(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to start: %d", ret);
+ return ret;
+ }
+ netif_device_attach(ndev);
+ }
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+const struct dev_pm_ops prueth_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(prueth_suspend, prueth_resume)
+};
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c
index 99de8a40ed60..15f2235bf90f 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.c
@@ -20,6 +20,8 @@
/* IPG is in core_clk cycles */
#define MII_RT_TX_IPG_100M 0x17
#define MII_RT_TX_IPG_1G 0xb
+#define MII_RT_TX_IPG_100M_SR1 0x166
+#define MII_RT_TX_IPG_1G_SR1 0x1a
#define ICSSG_QUEUES_MAX 64
#define ICSSG_QUEUE_OFFSET 0xd00
@@ -202,23 +204,29 @@ void icssg_config_ipg(struct prueth_emac *emac)
{
struct prueth *prueth = emac->prueth;
int slice = prueth_emac_slice(emac);
+ u32 ipg;
switch (emac->speed) {
case SPEED_1000:
- icssg_mii_update_ipg(prueth->mii_rt, slice, MII_RT_TX_IPG_1G);
+ ipg = emac->is_sr1 ? MII_RT_TX_IPG_1G_SR1 : MII_RT_TX_IPG_1G;
break;
case SPEED_100:
- icssg_mii_update_ipg(prueth->mii_rt, slice, MII_RT_TX_IPG_100M);
+ ipg = emac->is_sr1 ? MII_RT_TX_IPG_100M_SR1 : MII_RT_TX_IPG_100M;
break;
case SPEED_10:
+ /* Firmware hardcodes IPG for SR1.0 */
+ if (emac->is_sr1)
+ return;
/* IPG for 10M is same as 100M */
- icssg_mii_update_ipg(prueth->mii_rt, slice, MII_RT_TX_IPG_100M);
+ ipg = MII_RT_TX_IPG_100M;
break;
default:
/* Other links speeds not supported */
netdev_err(emac->ndev, "Unsupported link speed\n");
return;
}
+
+ icssg_mii_update_ipg(prueth->mii_rt, slice, ipg);
}
static void emac_r30_cmd_init(struct prueth_emac *emac)
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h
index 43eb0922172a..cf2ea4bd22a2 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.h
@@ -109,6 +109,62 @@ enum icssg_port_state_cmd {
#define ICSSG_FLAG_MASK 0xff00ffff
+/* SR1.0-specific bits */
+#define PRUETH_MAX_RX_FLOWS_SR1 4 /* excluding default flow */
+#define PRUETH_RX_FLOW_DATA_SR1 3 /* highest priority flow */
+#define PRUETH_MAX_RX_MGM_DESC_SR1 8
+#define PRUETH_MAX_RX_MGM_FLOWS_SR1 2 /* excluding default flow */
+#define PRUETH_RX_MGM_FLOW_RESPONSE_SR1 0
+#define PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1 1
+
+#define PRUETH_NUM_BUF_POOLS_SR1 16
+#define PRUETH_EMAC_BUF_POOL_START_SR1 8
+#define PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1 128
+#define PRUETH_EMAC_BUF_SIZE_SR1 1536
+#define PRUETH_EMAC_NUM_BUF_SR1 4
+#define PRUETH_EMAC_BUF_POOL_SIZE_SR1 (PRUETH_EMAC_NUM_BUF_SR1 * \
+ PRUETH_EMAC_BUF_SIZE_SR1)
+#define MSMC_RAM_SIZE_SR1 (SZ_64K + SZ_32K + SZ_2K) /* 0x1880 x 8 x 2 */
+
+struct icssg_sr1_config {
+ __le32 status; /* Firmware status */
+ __le32 addr_lo; /* MSMC Buffer pool base address low. */
+ __le32 addr_hi; /* MSMC Buffer pool base address high. Must be 0 */
+ __le32 tx_buf_sz[16]; /* Array of buffer pool sizes */
+ __le32 num_tx_threads; /* Number of active egress threads, 1 to 4 */
+ __le32 tx_rate_lim_en; /* Bitmask: Egress rate limit en per thread */
+ __le32 rx_flow_id; /* RX flow id for first rx ring */
+ __le32 rx_mgr_flow_id; /* RX flow id for the first management ring */
+ __le32 flags; /* TBD */
+ __le32 n_burst; /* for debug */
+ __le32 rtu_status; /* RTU status */
+ __le32 info; /* reserved */
+ __le32 reserve;
+ __le32 rand_seed; /* Used for the random number generation at fw */
+} __packed;
+
+/* SR1.0 shutdown command to stop processing at firmware.
+ * Command format: 0x8101ss00, where
+ * - ss: sequence number. Currently not used by driver.
+ */
+#define ICSSG_SHUTDOWN_CMD_SR1 0x81010000
+
+/* SR1.0 pstate speed/duplex command to set speed and duplex settings
+ * in firmware.
+ * Command format: 0x8102ssPN, where
+ * - ss: sequence number. Currently not used by driver.
+ * - P: port number (for switch mode).
+ * - N: Speed/Duplex state:
+ * 0x0 - 10Mbps/Half duplex;
+ * 0x8 - 10Mbps/Full duplex;
+ * 0x2 - 100Mbps/Half duplex;
+ * 0xa - 100Mbps/Full duplex;
+ * 0xc - 1Gbps/Full duplex;
+ * NOTE: The above are the same value as bits [3..1](slice 0)
+ * or bits [7..5](slice 1) of RGMII CFG register.
+ */
+#define ICSSG_PSTATE_SPEED_DUPLEX_CMD_SR1 0x81020000
+
struct icssg_setclock_desc {
u8 request;
u8 restore;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
index 9a7dd7efcf69..c8d0f45cc5b1 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
@@ -142,6 +142,9 @@ static int emac_set_channels(struct net_device *ndev,
emac->tx_ch_num = ch->tx_count;
+ if (emac->is_sr1)
+ emac->tx_ch_num++;
+
return 0;
}
@@ -152,8 +155,17 @@ static void emac_get_channels(struct net_device *ndev,
ch->max_rx = 1;
ch->max_tx = PRUETH_MAX_TX_QUEUES;
+
+ /* Disable multiple TX channels due to timeouts
+ * when using more than one queue */
+ if (emac->is_sr1)
+ ch->max_tx = 1;
+
ch->rx_count = 1;
ch->tx_count = emac->tx_ch_num;
+
+ if (emac->is_sr1)
+ ch->tx_count--;
}
static const struct ethtool_rmon_hist_range emac_rmon_ranges[] = {
@@ -189,6 +201,93 @@ static void emac_get_rmon_stats(struct net_device *ndev,
rmon_stats->hist_tx[4] = emac_get_stat_by_name(emac, "tx_bucket5_frames");
}
+static int emac_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth_tx_chn *tx_chn;
+
+ tx_chn = &emac->tx_chns[0];
+
+ coal->rx_coalesce_usecs = emac->rx_pace_timeout_ns / 1000;
+ coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout_ns / 1000;
+
+ return 0;
+}
+
+static int emac_get_per_queue_coalesce(struct net_device *ndev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth_tx_chn *tx_chn;
+
+ if (queue >= PRUETH_MAX_TX_QUEUES)
+ return -EINVAL;
+
+ tx_chn = &emac->tx_chns[queue];
+
+ coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout_ns / 1000;
+
+ return 0;
+}
+
+static int emac_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ struct prueth_tx_chn *tx_chn;
+
+ tx_chn = &emac->tx_chns[0];
+
+ if (coal->rx_coalesce_usecs &&
+ coal->rx_coalesce_usecs < ICSSG_MIN_COALESCE_USECS) {
+ dev_info(prueth->dev, "defaulting to min value of %dus for rx-usecs\n",
+ ICSSG_MIN_COALESCE_USECS);
+ coal->rx_coalesce_usecs = ICSSG_MIN_COALESCE_USECS;
+ }
+
+ if (coal->tx_coalesce_usecs &&
+ coal->tx_coalesce_usecs < ICSSG_MIN_COALESCE_USECS) {
+ dev_info(prueth->dev, "defaulting to min value of %dus for tx-usecs\n",
+ ICSSG_MIN_COALESCE_USECS);
+ coal->tx_coalesce_usecs = ICSSG_MIN_COALESCE_USECS;
+ }
+
+ emac->rx_pace_timeout_ns = coal->rx_coalesce_usecs * 1000;
+ tx_chn->tx_pace_timeout_ns = coal->tx_coalesce_usecs * 1000;
+
+ return 0;
+}
+
+static int emac_set_per_queue_coalesce(struct net_device *ndev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ struct prueth_tx_chn *tx_chn;
+
+ if (queue >= PRUETH_MAX_TX_QUEUES)
+ return -EINVAL;
+
+ tx_chn = &emac->tx_chns[queue];
+
+ if (coal->tx_coalesce_usecs &&
+ coal->tx_coalesce_usecs < ICSSG_MIN_COALESCE_USECS) {
+ dev_info(prueth->dev, "defaulting to min value of %dus for tx-usecs for tx-%u\n",
+ ICSSG_MIN_COALESCE_USECS, queue);
+ coal->tx_coalesce_usecs = ICSSG_MIN_COALESCE_USECS;
+ }
+
+ tx_chn->tx_pace_timeout_ns = coal->tx_coalesce_usecs * 1000;
+
+ return 0;
+}
+
const struct ethtool_ops icssg_ethtool_ops = {
.get_drvinfo = emac_get_drvinfo,
.get_msglevel = emac_get_msglevel,
@@ -197,6 +296,12 @@ const struct ethtool_ops icssg_ethtool_ops = {
.get_ethtool_stats = emac_get_ethtool_stats,
.get_strings = emac_get_strings,
.get_ts_info = emac_get_ts_info,
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_TX_USECS,
+ .get_coalesce = emac_get_coalesce,
+ .set_coalesce = emac_set_coalesce,
+ .get_per_queue_coalesce = emac_get_per_queue_coalesce,
+ .set_per_queue_coalesce = emac_set_per_queue_coalesce,
.get_channels = emac_get_channels,
.set_channels = emac_set_channels,
.get_link_ksettings = emac_get_link_ksettings,
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index cf7b73f8f450..7c9e9518f555 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -34,568 +34,9 @@
#define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver"
-/* Netif debug messages possible */
-#define PRUETH_EMAC_DEBUG (NETIF_MSG_DRV | \
- NETIF_MSG_PROBE | \
- NETIF_MSG_LINK | \
- NETIF_MSG_TIMER | \
- NETIF_MSG_IFDOWN | \
- NETIF_MSG_IFUP | \
- NETIF_MSG_RX_ERR | \
- NETIF_MSG_TX_ERR | \
- NETIF_MSG_TX_QUEUED | \
- NETIF_MSG_INTR | \
- NETIF_MSG_TX_DONE | \
- NETIF_MSG_RX_STATUS | \
- NETIF_MSG_PKTDATA | \
- NETIF_MSG_HW | \
- NETIF_MSG_WOL)
-
-#define prueth_napi_to_emac(napi) container_of(napi, struct prueth_emac, napi_rx)
-
/* CTRLMMR_ICSSG_RGMII_CTRL register bits */
#define ICSSG_CTRL_RGMII_ID_MODE BIT(24)
-#define IEP_DEFAULT_CYCLE_TIME_NS 1000000 /* 1 ms */
-
-static void prueth_cleanup_rx_chns(struct prueth_emac *emac,
- struct prueth_rx_chn *rx_chn,
- int max_rflows)
-{
- if (rx_chn->desc_pool)
- k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
-
- if (rx_chn->rx_chn)
- k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
-}
-
-static void prueth_cleanup_tx_chns(struct prueth_emac *emac)
-{
- int i;
-
- for (i = 0; i < emac->tx_ch_num; i++) {
- struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
-
- if (tx_chn->desc_pool)
- k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
-
- if (tx_chn->tx_chn)
- k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
-
- /* Assume prueth_cleanup_tx_chns() is called at the
- * end after all channel resources are freed
- */
- memset(tx_chn, 0, sizeof(*tx_chn));
- }
-}
-
-static void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num)
-{
- int i;
-
- for (i = 0; i < num; i++) {
- struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
-
- if (tx_chn->irq)
- free_irq(tx_chn->irq, tx_chn);
- netif_napi_del(&tx_chn->napi_tx);
- }
-}
-
-static void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
- struct cppi5_host_desc_t *desc)
-{
- struct cppi5_host_desc_t *first_desc, *next_desc;
- dma_addr_t buf_dma, next_desc_dma;
- u32 buf_dma_len;
-
- first_desc = desc;
- next_desc = first_desc;
-
- cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
- k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
-
- dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len,
- DMA_TO_DEVICE);
-
- next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
- k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
- while (next_desc_dma) {
- next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
- next_desc_dma);
- cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
- k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
-
- dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
- DMA_TO_DEVICE);
-
- next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
- k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
-
- k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
- }
-
- k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
-}
-
-static int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
- int budget)
-{
- struct net_device *ndev = emac->ndev;
- struct cppi5_host_desc_t *desc_tx;
- struct netdev_queue *netif_txq;
- struct prueth_tx_chn *tx_chn;
- unsigned int total_bytes = 0;
- struct sk_buff *skb;
- dma_addr_t desc_dma;
- int res, num_tx = 0;
- void **swdata;
-
- tx_chn = &emac->tx_chns[chn];
-
- while (true) {
- res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
- if (res == -ENODATA)
- break;
-
- /* teardown completion */
- if (cppi5_desc_is_tdcm(desc_dma)) {
- if (atomic_dec_and_test(&emac->tdown_cnt))
- complete(&emac->tdown_complete);
- break;
- }
-
- desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
- desc_dma);
- swdata = cppi5_hdesc_get_swdata(desc_tx);
-
- skb = *(swdata);
- prueth_xmit_free(tx_chn, desc_tx);
-
- ndev = skb->dev;
- ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
- total_bytes += skb->len;
- napi_consume_skb(skb, budget);
- num_tx++;
- }
-
- if (!num_tx)
- return 0;
-
- netif_txq = netdev_get_tx_queue(ndev, chn);
- netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
-
- if (netif_tx_queue_stopped(netif_txq)) {
- /* If the TX queue was stopped, wake it now
- * if we have enough room.
- */
- __netif_tx_lock(netif_txq, smp_processor_id());
- if (netif_running(ndev) &&
- (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
- MAX_SKB_FRAGS))
- netif_tx_wake_queue(netif_txq);
- __netif_tx_unlock(netif_txq);
- }
-
- return num_tx;
-}
-
-static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget)
-{
- struct prueth_tx_chn *tx_chn = prueth_napi_to_tx_chn(napi_tx);
- struct prueth_emac *emac = tx_chn->emac;
- int num_tx_packets;
-
- num_tx_packets = emac_tx_complete_packets(emac, tx_chn->id, budget);
-
- if (num_tx_packets >= budget)
- return budget;
-
- if (napi_complete_done(napi_tx, num_tx_packets))
- enable_irq(tx_chn->irq);
-
- return num_tx_packets;
-}
-
-static irqreturn_t prueth_tx_irq(int irq, void *dev_id)
-{
- struct prueth_tx_chn *tx_chn = dev_id;
-
- disable_irq_nosync(irq);
- napi_schedule(&tx_chn->napi_tx);
-
- return IRQ_HANDLED;
-}
-
-static int prueth_ndev_add_tx_napi(struct prueth_emac *emac)
-{
- struct prueth *prueth = emac->prueth;
- int i, ret;
-
- for (i = 0; i < emac->tx_ch_num; i++) {
- struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
-
- netif_napi_add_tx(emac->ndev, &tx_chn->napi_tx, emac_napi_tx_poll);
- ret = request_irq(tx_chn->irq, prueth_tx_irq,
- IRQF_TRIGGER_HIGH, tx_chn->name,
- tx_chn);
- if (ret) {
- netif_napi_del(&tx_chn->napi_tx);
- dev_err(prueth->dev, "unable to request TX IRQ %d\n",
- tx_chn->irq);
- goto fail;
- }
- }
-
- return 0;
-fail:
- prueth_ndev_del_tx_napi(emac, i);
- return ret;
-}
-
-static int prueth_init_tx_chns(struct prueth_emac *emac)
-{
- static const struct k3_ring_cfg ring_cfg = {
- .elm_size = K3_RINGACC_RING_ELSIZE_8,
- .mode = K3_RINGACC_RING_MODE_RING,
- .flags = 0,
- .size = PRUETH_MAX_TX_DESC,
- };
- struct k3_udma_glue_tx_channel_cfg tx_cfg;
- struct device *dev = emac->prueth->dev;
- struct net_device *ndev = emac->ndev;
- int ret, slice, i;
- u32 hdesc_size;
-
- slice = prueth_emac_slice(emac);
- if (slice < 0)
- return slice;
-
- init_completion(&emac->tdown_complete);
-
- hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
- PRUETH_NAV_SW_DATA_SIZE);
- memset(&tx_cfg, 0, sizeof(tx_cfg));
- tx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
- tx_cfg.tx_cfg = ring_cfg;
- tx_cfg.txcq_cfg = ring_cfg;
-
- for (i = 0; i < emac->tx_ch_num; i++) {
- struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
-
- /* To differentiate channels for SLICE0 vs SLICE1 */
- snprintf(tx_chn->name, sizeof(tx_chn->name),
- "tx%d-%d", slice, i);
-
- tx_chn->emac = emac;
- tx_chn->id = i;
- tx_chn->descs_num = PRUETH_MAX_TX_DESC;
-
- tx_chn->tx_chn =
- k3_udma_glue_request_tx_chn(dev, tx_chn->name,
- &tx_cfg);
- if (IS_ERR(tx_chn->tx_chn)) {
- ret = PTR_ERR(tx_chn->tx_chn);
- tx_chn->tx_chn = NULL;
- netdev_err(ndev,
- "Failed to request tx dma ch: %d\n", ret);
- goto fail;
- }
-
- tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn);
- tx_chn->desc_pool =
- k3_cppi_desc_pool_create_name(tx_chn->dma_dev,
- tx_chn->descs_num,
- hdesc_size,
- tx_chn->name);
- if (IS_ERR(tx_chn->desc_pool)) {
- ret = PTR_ERR(tx_chn->desc_pool);
- tx_chn->desc_pool = NULL;
- netdev_err(ndev, "Failed to create tx pool: %d\n", ret);
- goto fail;
- }
-
- ret = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
- if (ret < 0) {
- netdev_err(ndev, "failed to get tx irq\n");
- goto fail;
- }
- tx_chn->irq = ret;
-
- snprintf(tx_chn->name, sizeof(tx_chn->name), "%s-tx%d",
- dev_name(dev), tx_chn->id);
- }
-
- return 0;
-
-fail:
- prueth_cleanup_tx_chns(emac);
- return ret;
-}
-
-static int prueth_init_rx_chns(struct prueth_emac *emac,
- struct prueth_rx_chn *rx_chn,
- char *name, u32 max_rflows,
- u32 max_desc_num)
-{
- struct k3_udma_glue_rx_channel_cfg rx_cfg;
- struct device *dev = emac->prueth->dev;
- struct net_device *ndev = emac->ndev;
- u32 fdqring_id, hdesc_size;
- int i, ret = 0, slice;
-
- slice = prueth_emac_slice(emac);
- if (slice < 0)
- return slice;
-
- /* To differentiate channels for SLICE0 vs SLICE1 */
- snprintf(rx_chn->name, sizeof(rx_chn->name), "%s%d", name, slice);
-
- hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
- PRUETH_NAV_SW_DATA_SIZE);
- memset(&rx_cfg, 0, sizeof(rx_cfg));
- rx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
- rx_cfg.flow_id_num = max_rflows;
- rx_cfg.flow_id_base = -1; /* udmax will auto select flow id base */
-
- /* init all flows */
- rx_chn->dev = dev;
- rx_chn->descs_num = max_desc_num;
-
- rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, rx_chn->name,
- &rx_cfg);
- if (IS_ERR(rx_chn->rx_chn)) {
- ret = PTR_ERR(rx_chn->rx_chn);
- rx_chn->rx_chn = NULL;
- netdev_err(ndev, "Failed to request rx dma ch: %d\n", ret);
- goto fail;
- }
-
- rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn);
- rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev,
- rx_chn->descs_num,
- hdesc_size,
- rx_chn->name);
- if (IS_ERR(rx_chn->desc_pool)) {
- ret = PTR_ERR(rx_chn->desc_pool);
- rx_chn->desc_pool = NULL;
- netdev_err(ndev, "Failed to create rx pool: %d\n", ret);
- goto fail;
- }
-
- emac->rx_flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
- netdev_dbg(ndev, "flow id base = %d\n", emac->rx_flow_id_base);
-
- fdqring_id = K3_RINGACC_RING_ID_ANY;
- for (i = 0; i < rx_cfg.flow_id_num; i++) {
- struct k3_ring_cfg rxring_cfg = {
- .elm_size = K3_RINGACC_RING_ELSIZE_8,
- .mode = K3_RINGACC_RING_MODE_RING,
- .flags = 0,
- };
- struct k3_ring_cfg fdqring_cfg = {
- .elm_size = K3_RINGACC_RING_ELSIZE_8,
- .flags = K3_RINGACC_RING_SHARED,
- };
- struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
- .rx_cfg = rxring_cfg,
- .rxfdq_cfg = fdqring_cfg,
- .ring_rxq_id = K3_RINGACC_RING_ID_ANY,
- .src_tag_lo_sel =
- K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
- };
-
- rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
- rx_flow_cfg.rx_cfg.size = max_desc_num;
- rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
- rx_flow_cfg.rxfdq_cfg.mode = emac->prueth->pdata.fdqring_mode;
-
- ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
- i, &rx_flow_cfg);
- if (ret) {
- netdev_err(ndev, "Failed to init rx flow%d %d\n",
- i, ret);
- goto fail;
- }
- if (!i)
- fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
- i);
- rx_chn->irq[i] = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
- if (rx_chn->irq[i] <= 0) {
- ret = rx_chn->irq[i];
- netdev_err(ndev, "Failed to get rx dma irq");
- goto fail;
- }
- }
-
- return 0;
-
-fail:
- prueth_cleanup_rx_chns(emac, rx_chn, max_rflows);
- return ret;
-}
-
-static int prueth_dma_rx_push(struct prueth_emac *emac,
- struct sk_buff *skb,
- struct prueth_rx_chn *rx_chn)
-{
- struct net_device *ndev = emac->ndev;
- struct cppi5_host_desc_t *desc_rx;
- u32 pkt_len = skb_tailroom(skb);
- dma_addr_t desc_dma;
- dma_addr_t buf_dma;
- void **swdata;
-
- desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
- if (!desc_rx) {
- netdev_err(ndev, "rx push: failed to allocate descriptor\n");
- return -ENOMEM;
- }
- desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
-
- buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
- k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- netdev_err(ndev, "rx push: failed to map rx pkt buffer\n");
- return -EINVAL;
- }
-
- cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
- PRUETH_NAV_PS_DATA_SIZE);
- k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
- cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
-
- swdata = cppi5_hdesc_get_swdata(desc_rx);
- *swdata = skb;
-
- return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0,
- desc_rx, desc_dma);
-}
-
-static u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns)
-{
- u32 iepcount_lo, iepcount_hi, hi_rollover_count;
- u64 ns;
-
- iepcount_lo = lo & GENMASK(19, 0);
- iepcount_hi = (hi & GENMASK(11, 0)) << 12 | lo >> 20;
- hi_rollover_count = hi >> 11;
-
- ns = ((u64)hi_rollover_count) << 23 | (iepcount_hi + hi_sw);
- ns = ns * cycle_time_ns + iepcount_lo;
-
- return ns;
-}
-
-static void emac_rx_timestamp(struct prueth_emac *emac,
- struct sk_buff *skb, u32 *psdata)
-{
- struct skb_shared_hwtstamps *ssh;
- u64 ns;
-
- u32 hi_sw = readl(emac->prueth->shram.va +
- TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
- ns = icssg_ts_to_ns(hi_sw, psdata[1], psdata[0],
- IEP_DEFAULT_CYCLE_TIME_NS);
-
- ssh = skb_hwtstamps(skb);
- memset(ssh, 0, sizeof(*ssh));
- ssh->hwtstamp = ns_to_ktime(ns);
-}
-
-static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
-{
- struct prueth_rx_chn *rx_chn = &emac->rx_chns;
- u32 buf_dma_len, pkt_len, port_id = 0;
- struct net_device *ndev = emac->ndev;
- struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb, *new_skb;
- dma_addr_t desc_dma, buf_dma;
- void **swdata;
- u32 *psdata;
- int ret;
-
- ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
- if (ret) {
- if (ret != -ENODATA)
- netdev_err(ndev, "rx pop: failed: %d\n", ret);
- return ret;
- }
-
- if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown ? */
- return 0;
-
- desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
-
- swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
-
- psdata = cppi5_hdesc_get_psdata(desc_rx);
- /* RX HW timestamp */
- if (emac->rx_ts_enabled)
- emac_rx_timestamp(emac, skb, psdata);
-
- cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
- k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
- pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
- /* firmware adds 4 CRC bytes, strip them */
- pkt_len -= 4;
- cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
-
- dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
- k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
-
- skb->dev = ndev;
- new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE);
- /* if allocation fails we drop the packet but push the
- * descriptor back to the ring with old skb to prevent a stall
- */
- if (!new_skb) {
- ndev->stats.rx_dropped++;
- new_skb = skb;
- } else {
- /* send the filled skb up the n/w stack */
- skb_put(skb, pkt_len);
- skb->protocol = eth_type_trans(skb, ndev);
- napi_gro_receive(&emac->napi_rx, skb);
- ndev->stats.rx_bytes += pkt_len;
- ndev->stats.rx_packets++;
- }
-
- /* queue another RX DMA */
- ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_chns);
- if (WARN_ON(ret < 0)) {
- dev_kfree_skb_any(new_skb);
- ndev->stats.rx_errors++;
- ndev->stats.rx_dropped++;
- }
-
- return ret;
-}
-
-static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
-{
- struct prueth_rx_chn *rx_chn = data;
- struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb;
- dma_addr_t buf_dma;
- u32 buf_dma_len;
- void **swdata;
-
- desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
- swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
- cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
- k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
-
- dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len,
- DMA_FROM_DEVICE);
- k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
-
- dev_kfree_skb_any(skb);
-}
-
static int emac_get_tx_ts(struct prueth_emac *emac,
struct emac_tx_ts_response *rsp)
{
@@ -661,208 +102,6 @@ static void tx_ts_work(struct prueth_emac *emac)
}
}
-static int prueth_tx_ts_cookie_get(struct prueth_emac *emac)
-{
- int i;
-
- /* search and get the next free slot */
- for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
- if (!emac->tx_ts_skb[i]) {
- emac->tx_ts_skb[i] = ERR_PTR(-EBUSY); /* reserve slot */
- return i;
- }
- }
-
- return -EBUSY;
-}
-
-/**
- * emac_ndo_start_xmit - EMAC Transmit function
- * @skb: SKB pointer
- * @ndev: EMAC network adapter
- *
- * Called by the system to transmit a packet - we queue the packet in
- * EMAC hardware transmit queue
- * Doesn't wait for completion we'll check for TX completion in
- * emac_tx_complete_packets().
- *
- * Return: enum netdev_tx
- */
-static enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
-{
- struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
- struct prueth_emac *emac = netdev_priv(ndev);
- struct netdev_queue *netif_txq;
- struct prueth_tx_chn *tx_chn;
- dma_addr_t desc_dma, buf_dma;
- int i, ret = 0, q_idx;
- bool in_tx_ts = 0;
- int tx_ts_cookie;
- void **swdata;
- u32 pkt_len;
- u32 *epib;
-
- pkt_len = skb_headlen(skb);
- q_idx = skb_get_queue_mapping(skb);
-
- tx_chn = &emac->tx_chns[q_idx];
- netif_txq = netdev_get_tx_queue(ndev, q_idx);
-
- /* Map the linear buffer */
- buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
- netdev_err(ndev, "tx: failed to map skb buffer\n");
- ret = NETDEV_TX_OK;
- goto drop_free_skb;
- }
-
- first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
- if (!first_desc) {
- netdev_dbg(ndev, "tx: failed to allocate descriptor\n");
- dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE);
- goto drop_stop_q_busy;
- }
-
- cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
- PRUETH_NAV_PS_DATA_SIZE);
- cppi5_hdesc_set_pkttype(first_desc, 0);
- epib = first_desc->epib;
- epib[0] = 0;
- epib[1] = 0;
- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- emac->tx_ts_enabled) {
- tx_ts_cookie = prueth_tx_ts_cookie_get(emac);
- if (tx_ts_cookie >= 0) {
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- /* Request TX timestamp */
- epib[0] = (u32)tx_ts_cookie;
- epib[1] = 0x80000000; /* TX TS request */
- emac->tx_ts_skb[tx_ts_cookie] = skb_get(skb);
- in_tx_ts = 1;
- }
- }
-
- /* set dst tag to indicate internal qid at the firmware which is at
- * bit8..bit15. bit0..bit7 indicates port num for directed
- * packets in case of switch mode operation
- */
- cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8)));
- k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
- cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
- swdata = cppi5_hdesc_get_swdata(first_desc);
- *swdata = skb;
-
- /* Handle the case where skb is fragmented in pages */
- cur_desc = first_desc;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- u32 frag_size = skb_frag_size(frag);
-
- next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
- if (!next_desc) {
- netdev_err(ndev,
- "tx: failed to allocate frag. descriptor\n");
- goto free_desc_stop_q_busy_cleanup_tx_ts;
- }
-
- buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
- netdev_err(ndev, "tx: Failed to map skb page\n");
- k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
- ret = NETDEV_TX_OK;
- goto cleanup_tx_ts;
- }
-
- cppi5_hdesc_reset_hbdesc(next_desc);
- k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
- cppi5_hdesc_attach_buf(next_desc,
- buf_dma, frag_size, buf_dma, frag_size);
-
- desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
- next_desc);
- k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma);
- cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
-
- pkt_len += frag_size;
- cur_desc = next_desc;
- }
- WARN_ON_ONCE(pkt_len != skb->len);
-
- /* report bql before sending packet */
- netdev_tx_sent_queue(netif_txq, pkt_len);
-
- cppi5_hdesc_set_pktlen(first_desc, pkt_len);
- desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
- /* cppi5_desc_dump(first_desc, 64); */
-
- skb_tx_timestamp(skb); /* SW timestamp if SKBTX_IN_PROGRESS not set */
- ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
- if (ret) {
- netdev_err(ndev, "tx: push failed: %d\n", ret);
- goto drop_free_descs;
- }
-
- if (in_tx_ts)
- atomic_inc(&emac->tx_ts_pending);
-
- if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
- netif_tx_stop_queue(netif_txq);
- /* Barrier, so that stop_queue visible to other cpus */
- smp_mb__after_atomic();
-
- if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
- MAX_SKB_FRAGS)
- netif_tx_wake_queue(netif_txq);
- }
-
- return NETDEV_TX_OK;
-
-cleanup_tx_ts:
- if (in_tx_ts) {
- dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]);
- emac->tx_ts_skb[tx_ts_cookie] = NULL;
- }
-
-drop_free_descs:
- prueth_xmit_free(tx_chn, first_desc);
-
-drop_free_skb:
- dev_kfree_skb_any(skb);
-
- /* error */
- ndev->stats.tx_dropped++;
- netdev_err(ndev, "tx: error: %d\n", ret);
-
- return ret;
-
-free_desc_stop_q_busy_cleanup_tx_ts:
- if (in_tx_ts) {
- dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]);
- emac->tx_ts_skb[tx_ts_cookie] = NULL;
- }
- prueth_xmit_free(tx_chn, first_desc);
-
-drop_stop_q_busy:
- netif_tx_stop_queue(netif_txq);
- return NETDEV_TX_BUSY;
-}
-
-static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
-{
- struct prueth_tx_chn *tx_chn = data;
- struct cppi5_host_desc_t *desc_tx;
- struct sk_buff *skb;
- void **swdata;
-
- desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
- swdata = cppi5_hdesc_get_swdata(desc_tx);
- skb = *(swdata);
- prueth_xmit_free(tx_chn, desc_tx);
-
- dev_kfree_skb_any(skb);
-}
-
static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id)
{
struct prueth_emac *emac = dev_id;
@@ -873,22 +112,6 @@ static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static irqreturn_t prueth_rx_irq(int irq, void *dev_id)
-{
- struct prueth_emac *emac = dev_id;
-
- disable_irq_nosync(irq);
- napi_schedule(&emac->napi_rx);
-
- return IRQ_HANDLED;
-}
-
-struct icssg_firmwares {
- char *pru;
- char *rtu;
- char *txpru;
-};
-
static struct icssg_firmwares icssg_emac_firmwares[] = {
{
.pru = "ti-pruss/am65x-sr2-pru0-prueth-fw.elf",
@@ -953,41 +176,6 @@ halt_pru:
return ret;
}
-static void prueth_emac_stop(struct prueth_emac *emac)
-{
- struct prueth *prueth = emac->prueth;
- int slice;
-
- switch (emac->port_id) {
- case PRUETH_PORT_MII0:
- slice = ICSS_SLICE0;
- break;
- case PRUETH_PORT_MII1:
- slice = ICSS_SLICE1;
- break;
- default:
- netdev_err(emac->ndev, "invalid port\n");
- return;
- }
-
- emac->fw_running = 0;
- rproc_shutdown(prueth->txpru[slice]);
- rproc_shutdown(prueth->rtu[slice]);
- rproc_shutdown(prueth->pru[slice]);
-}
-
-static void prueth_cleanup_tx_ts(struct prueth_emac *emac)
-{
- int i;
-
- for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
- if (emac->tx_ts_skb[i]) {
- dev_kfree_skb_any(emac->tx_ts_skb[i]);
- emac->tx_ts_skb[i] = NULL;
- }
- }
-}
-
/* called back by PHY layer if there is change in link state of hw port*/
static void emac_adjust_link(struct net_device *ndev)
{
@@ -1055,84 +243,14 @@ static void emac_adjust_link(struct net_device *ndev)
}
}
-static int emac_napi_rx_poll(struct napi_struct *napi_rx, int budget)
+static enum hrtimer_restart emac_rx_timer_callback(struct hrtimer *timer)
{
- struct prueth_emac *emac = prueth_napi_to_emac(napi_rx);
+ struct prueth_emac *emac =
+ container_of(timer, struct prueth_emac, rx_hrtimer);
int rx_flow = PRUETH_RX_FLOW_DATA;
- int flow = PRUETH_MAX_RX_FLOWS;
- int num_rx = 0;
- int cur_budget;
- int ret;
-
- while (flow--) {
- cur_budget = budget - num_rx;
-
- while (cur_budget--) {
- ret = emac_rx_packet(emac, flow);
- if (ret)
- break;
- num_rx++;
- }
-
- if (num_rx >= budget)
- break;
- }
- if (num_rx < budget && napi_complete_done(napi_rx, num_rx))
- enable_irq(emac->rx_chns.irq[rx_flow]);
-
- return num_rx;
-}
-
-static int prueth_prepare_rx_chan(struct prueth_emac *emac,
- struct prueth_rx_chn *chn,
- int buf_size)
-{
- struct sk_buff *skb;
- int i, ret;
-
- for (i = 0; i < chn->descs_num; i++) {
- skb = __netdev_alloc_skb_ip_align(NULL, buf_size, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- ret = prueth_dma_rx_push(emac, skb, chn);
- if (ret < 0) {
- netdev_err(emac->ndev,
- "cannot submit skb for rx chan %s ret %d\n",
- chn->name, ret);
- kfree_skb(skb);
- return ret;
- }
- }
-
- return 0;
-}
-
-static void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num,
- bool free_skb)
-{
- int i;
-
- for (i = 0; i < ch_num; i++) {
- if (free_skb)
- k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn,
- &emac->tx_chns[i],
- prueth_tx_cleanup);
- k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn);
- }
-}
-
-static void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
- int num_flows, bool disable)
-{
- int i;
-
- for (i = 0; i < num_flows; i++)
- k3_udma_glue_reset_rx_chn(chn->rx_chn, i, chn,
- prueth_rx_cleanup, !!i);
- if (disable)
- k3_udma_glue_disable_rx_chn(chn->rx_chn);
+ enable_irq(emac->rx_chns.irq[rx_flow]);
+ return HRTIMER_NORESTART;
}
static int emac_phy_connect(struct prueth_emac *emac)
@@ -1329,7 +447,7 @@ static int emac_ndo_open(struct net_device *ndev)
icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
- icssg_class_default(prueth->miig_rt, slice, 0);
+ icssg_class_default(prueth->miig_rt, slice, 0, false);
/* Notify the stack of the actual queue counts. */
ret = netif_set_real_num_tx_queues(ndev, num_data_chn);
@@ -1474,8 +592,10 @@ static int emac_ndo_stop(struct net_device *ndev)
netdev_err(ndev, "tx teardown timeout\n");
prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
- for (i = 0; i < emac->tx_ch_num; i++)
+ for (i = 0; i < emac->tx_ch_num; i++) {
napi_disable(&emac->tx_chns[i].napi_tx);
+ hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer);
+ }
max_rx_flows = PRUETH_MAX_RX_FLOWS;
k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
@@ -1483,6 +603,7 @@ static int emac_ndo_stop(struct net_device *ndev)
prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
napi_disable(&emac->napi_rx);
+ hrtimer_cancel(&emac->rx_hrtimer);
cancel_work_sync(&emac->rx_mode_work);
@@ -1508,11 +629,6 @@ static int emac_ndo_stop(struct net_device *ndev)
return 0;
}
-static void emac_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
-{
- ndev->stats.tx_errors++;
-}
-
static void emac_ndo_set_rx_mode_work(struct work_struct *work)
{
struct prueth_emac *emac = container_of(work, struct prueth_emac, rx_mode_work);
@@ -1558,116 +674,6 @@ static void emac_ndo_set_rx_mode(struct net_device *ndev)
queue_work(emac->cmd_wq, &emac->rx_mode_work);
}
-static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr)
-{
- struct prueth_emac *emac = netdev_priv(ndev);
- struct hwtstamp_config config;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- emac->tx_ts_enabled = 0;
- break;
- case HWTSTAMP_TX_ON:
- emac->tx_ts_enabled = 1;
- break;
- default:
- return -ERANGE;
- }
-
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- emac->rx_ts_enabled = 0;
- break;
- case HWTSTAMP_FILTER_ALL:
- case HWTSTAMP_FILTER_SOME:
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_NTP_ALL:
- emac->rx_ts_enabled = 1;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
- default:
- return -ERANGE;
- }
-
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
-
-static int emac_get_ts_config(struct net_device *ndev, struct ifreq *ifr)
-{
- struct prueth_emac *emac = netdev_priv(ndev);
- struct hwtstamp_config config;
-
- config.flags = 0;
- config.tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- config.rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
-
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
-
-static int emac_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return emac_get_ts_config(ndev, ifr);
- case SIOCSHWTSTAMP:
- return emac_set_ts_config(ndev, ifr);
- default:
- break;
- }
-
- return phy_do_ioctl(ndev, ifr, cmd);
-}
-
-static void emac_ndo_get_stats64(struct net_device *ndev,
- struct rtnl_link_stats64 *stats)
-{
- struct prueth_emac *emac = netdev_priv(ndev);
-
- emac_update_hardware_stats(emac);
-
- stats->rx_packets = emac_get_stat_by_name(emac, "rx_packets");
- stats->rx_bytes = emac_get_stat_by_name(emac, "rx_bytes");
- stats->tx_packets = emac_get_stat_by_name(emac, "tx_packets");
- stats->tx_bytes = emac_get_stat_by_name(emac, "tx_bytes");
- stats->rx_crc_errors = emac_get_stat_by_name(emac, "rx_crc_errors");
- stats->rx_over_errors = emac_get_stat_by_name(emac, "rx_over_errors");
- stats->multicast = emac_get_stat_by_name(emac, "rx_multicast_frames");
-
- stats->rx_errors = ndev->stats.rx_errors;
- stats->rx_dropped = ndev->stats.rx_dropped;
- stats->tx_errors = ndev->stats.tx_errors;
- stats->tx_dropped = ndev->stats.tx_dropped;
-}
-
-static int emac_ndo_get_phys_port_name(struct net_device *ndev, char *name,
- size_t len)
-{
- struct prueth_emac *emac = netdev_priv(ndev);
- int ret;
-
- ret = snprintf(name, len, "p%d", emac->port_id);
- if (ret >= len)
- return -EINVAL;
-
- return 0;
-}
-
static const struct net_device_ops emac_netdev_ops = {
.ndo_open = emac_ndo_open,
.ndo_stop = emac_ndo_stop,
@@ -1681,42 +687,6 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_get_phys_port_name = emac_ndo_get_phys_port_name,
};
-/* get emac_port corresponding to eth_node name */
-static int prueth_node_port(struct device_node *eth_node)
-{
- u32 port_id;
- int ret;
-
- ret = of_property_read_u32(eth_node, "reg", &port_id);
- if (ret)
- return ret;
-
- if (port_id == 0)
- return PRUETH_PORT_MII0;
- else if (port_id == 1)
- return PRUETH_PORT_MII1;
- else
- return PRUETH_PORT_INVALID;
-}
-
-/* get MAC instance corresponding to eth_node name */
-static int prueth_node_mac(struct device_node *eth_node)
-{
- u32 port_id;
- int ret;
-
- ret = of_property_read_u32(eth_node, "reg", &port_id);
- if (ret)
- return ret;
-
- if (port_id == 0)
- return PRUETH_MAC0;
- else if (port_id == 1)
- return PRUETH_MAC1;
- else
- return PRUETH_MAC_INVALID;
-}
-
static int prueth_netdev_init(struct prueth *prueth,
struct device_node *eth_node)
{
@@ -1844,6 +814,9 @@ static int prueth_netdev_init(struct prueth *prueth,
ndev->features = ndev->hw_features;
netif_napi_add(ndev, &emac->napi_rx, emac_napi_rx_poll);
+ hrtimer_init(&emac->rx_hrtimer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ emac->rx_hrtimer.function = &emac_rx_timer_callback;
prueth->emac[mac] = emac;
return 0;
@@ -1860,90 +833,6 @@ free_ndev:
return ret;
}
-static void prueth_netdev_exit(struct prueth *prueth,
- struct device_node *eth_node)
-{
- struct prueth_emac *emac;
- enum prueth_mac mac;
-
- mac = prueth_node_mac(eth_node);
- if (mac == PRUETH_MAC_INVALID)
- return;
-
- emac = prueth->emac[mac];
- if (!emac)
- return;
-
- if (of_phy_is_fixed_link(emac->phy_node))
- of_phy_deregister_fixed_link(emac->phy_node);
-
- netif_napi_del(&emac->napi_rx);
-
- pruss_release_mem_region(prueth->pruss, &emac->dram);
- destroy_workqueue(emac->cmd_wq);
- free_netdev(emac->ndev);
- prueth->emac[mac] = NULL;
-}
-
-static int prueth_get_cores(struct prueth *prueth, int slice)
-{
- struct device *dev = prueth->dev;
- enum pruss_pru_id pruss_id;
- struct device_node *np;
- int idx = -1, ret;
-
- np = dev->of_node;
-
- switch (slice) {
- case ICSS_SLICE0:
- idx = 0;
- break;
- case ICSS_SLICE1:
- idx = 3;
- break;
- default:
- return -EINVAL;
- }
-
- prueth->pru[slice] = pru_rproc_get(np, idx, &pruss_id);
- if (IS_ERR(prueth->pru[slice])) {
- ret = PTR_ERR(prueth->pru[slice]);
- prueth->pru[slice] = NULL;
- return dev_err_probe(dev, ret, "unable to get PRU%d\n", slice);
- }
- prueth->pru_id[slice] = pruss_id;
-
- idx++;
- prueth->rtu[slice] = pru_rproc_get(np, idx, NULL);
- if (IS_ERR(prueth->rtu[slice])) {
- ret = PTR_ERR(prueth->rtu[slice]);
- prueth->rtu[slice] = NULL;
- return dev_err_probe(dev, ret, "unable to get RTU%d\n", slice);
- }
-
- idx++;
- prueth->txpru[slice] = pru_rproc_get(np, idx, NULL);
- if (IS_ERR(prueth->txpru[slice])) {
- ret = PTR_ERR(prueth->txpru[slice]);
- prueth->txpru[slice] = NULL;
- return dev_err_probe(dev, ret, "unable to get TX_PRU%d\n", slice);
- }
-
- return 0;
-}
-
-static void prueth_put_cores(struct prueth *prueth, int slice)
-{
- if (prueth->txpru[slice])
- pru_rproc_put(prueth->txpru[slice]);
-
- if (prueth->rtu[slice])
- pru_rproc_put(prueth->rtu[slice]);
-
- if (prueth->pru[slice])
- pru_rproc_put(prueth->pru[slice]);
-}
-
static int prueth_probe(struct platform_device *pdev)
{
struct device_node *eth_node, *eth_ports_node;
@@ -2034,13 +923,13 @@ static int prueth_probe(struct platform_device *pdev)
}
if (eth0_node) {
- ret = prueth_get_cores(prueth, ICSS_SLICE0);
+ ret = prueth_get_cores(prueth, ICSS_SLICE0, false);
if (ret)
goto put_cores;
}
if (eth1_node) {
- ret = prueth_get_cores(prueth, ICSS_SLICE1);
+ ret = prueth_get_cores(prueth, ICSS_SLICE1, false);
if (ret)
goto put_cores;
}
@@ -2273,62 +1162,6 @@ static void prueth_remove(struct platform_device *pdev)
prueth_put_cores(prueth, ICSS_SLICE0);
}
-#ifdef CONFIG_PM_SLEEP
-static int prueth_suspend(struct device *dev)
-{
- struct prueth *prueth = dev_get_drvdata(dev);
- struct net_device *ndev;
- int i, ret;
-
- for (i = 0; i < PRUETH_NUM_MACS; i++) {
- ndev = prueth->registered_netdevs[i];
-
- if (!ndev)
- continue;
-
- if (netif_running(ndev)) {
- netif_device_detach(ndev);
- ret = emac_ndo_stop(ndev);
- if (ret < 0) {
- netdev_err(ndev, "failed to stop: %d", ret);
- return ret;
- }
- }
- }
-
- return 0;
-}
-
-static int prueth_resume(struct device *dev)
-{
- struct prueth *prueth = dev_get_drvdata(dev);
- struct net_device *ndev;
- int i, ret;
-
- for (i = 0; i < PRUETH_NUM_MACS; i++) {
- ndev = prueth->registered_netdevs[i];
-
- if (!ndev)
- continue;
-
- if (netif_running(ndev)) {
- ret = emac_ndo_open(ndev);
- if (ret < 0) {
- netdev_err(ndev, "failed to start: %d", ret);
- return ret;
- }
- netif_device_attach(ndev);
- }
- }
-
- return 0;
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static const struct dev_pm_ops prueth_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(prueth_suspend, prueth_resume)
-};
-
static const struct prueth_pdata am654_icssg_pdata = {
.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
.quirk_10m_link_issue = 1,
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index 8b6d6b497010..a78c5eb75fb8 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -55,6 +55,8 @@
#define ICSSG_NUM_STANDARD_STATS 31
#define ICSSG_NUM_ETHTOOL_STATS (ICSSG_NUM_STATS - ICSSG_NUM_STANDARD_STATS)
+#define IEP_DEFAULT_CYCLE_TIME_NS 1000000 /* 1 ms */
+
/* Firmware status codes */
#define ICSS_HS_FW_READY 0x55555555
#define ICSS_HS_FW_DEAD 0xDEAD0000 /* lower 16 bits contain error code */
@@ -106,6 +108,8 @@ struct prueth_tx_chn {
u32 descs_num;
unsigned int irq;
char name[32];
+ struct hrtimer tx_hrtimer;
+ unsigned long tx_pace_timeout_ns;
};
struct prueth_rx_chn {
@@ -125,8 +129,12 @@ struct prueth_rx_chn {
#define PRUETH_MAX_TX_TS_REQUESTS 50 /* Max simultaneous TX_TS requests */
+/* Minimum coalesce time in usecs for both Tx and Rx */
+#define ICSSG_MIN_COALESCE_USECS 20
+
/* data for each emac port */
struct prueth_emac {
+ bool is_sr1;
bool fw_running;
struct prueth *prueth;
struct net_device *ndev;
@@ -155,6 +163,10 @@ struct prueth_emac {
int rx_flow_id_base;
int tx_ch_num;
+ /* SR1.0 Management channel */
+ struct prueth_rx_chn rx_mgm_chn;
+ int rx_mgm_flow_id_base;
+
spinlock_t lock; /* serialize access */
/* TX HW Timestamping */
@@ -165,7 +177,7 @@ struct prueth_emac {
u8 cmd_seq;
/* shutdown related */
- u32 cmd_data[4];
+ __le32 cmd_data[4];
struct completion cmd_complete;
/* Mutex to serialize access to firmware command interface */
struct mutex cmd_lock;
@@ -176,6 +188,10 @@ struct prueth_emac {
struct delayed_work stats_work;
u64 stats[ICSSG_NUM_STATS];
+
+ /* RX IRQ Coalescing Related */
+ struct hrtimer rx_hrtimer;
+ unsigned long rx_pace_timeout_ns;
};
/**
@@ -188,6 +204,12 @@ struct prueth_pdata {
u32 quirk_10m_link_issue:1;
};
+struct icssg_firmwares {
+ char *pru;
+ char *rtu;
+ char *txpru;
+};
+
/**
* struct prueth - PRUeth structure
* @dev: device
@@ -243,6 +265,13 @@ struct emac_tx_ts_response {
u32 hi_ts;
};
+struct emac_tx_ts_response_sr1 {
+ __le32 lo_ts;
+ __le32 hi_ts;
+ __le32 reserved;
+ __le32 cookie;
+};
+
/* get PRUSS SLICE number from prueth_emac */
static inline int prueth_emac_slice(struct prueth_emac *emac)
{
@@ -257,12 +286,17 @@ static inline int prueth_emac_slice(struct prueth_emac *emac)
}
extern const struct ethtool_ops icssg_ethtool_ops;
+extern const struct dev_pm_ops prueth_dev_pm_ops;
/* Classifier helpers */
void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac);
void icssg_class_set_host_mac_addr(struct regmap *miig_rt, const u8 *mac);
void icssg_class_disable(struct regmap *miig_rt, int slice);
-void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti);
+void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti,
+ bool is_sr1);
+void icssg_class_promiscuous_sr1(struct regmap *miig_rt, int slice);
+void icssg_class_add_mcast_sr1(struct regmap *miig_rt, int slice,
+ struct net_device *ndev);
void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr);
/* config helpers */
@@ -285,4 +319,54 @@ u32 icssg_queue_level(struct prueth *prueth, int queue);
void emac_stats_work_handler(struct work_struct *work);
void emac_update_hardware_stats(struct prueth_emac *emac);
int emac_get_stat_by_name(struct prueth_emac *emac, char *stat_name);
+
+/* Common functions */
+void prueth_cleanup_rx_chns(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ int max_rflows);
+void prueth_cleanup_tx_chns(struct prueth_emac *emac);
+void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num);
+void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
+ struct cppi5_host_desc_t *desc);
+int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
+ int budget, bool *tdown);
+int prueth_ndev_add_tx_napi(struct prueth_emac *emac);
+int prueth_init_tx_chns(struct prueth_emac *emac);
+int prueth_init_rx_chns(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ char *name, u32 max_rflows,
+ u32 max_desc_num);
+int prueth_dma_rx_push(struct prueth_emac *emac,
+ struct sk_buff *skb,
+ struct prueth_rx_chn *rx_chn);
+void emac_rx_timestamp(struct prueth_emac *emac,
+ struct sk_buff *skb, u32 *psdata);
+enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+irqreturn_t prueth_rx_irq(int irq, void *dev_id);
+void prueth_emac_stop(struct prueth_emac *emac);
+void prueth_cleanup_tx_ts(struct prueth_emac *emac);
+int emac_napi_rx_poll(struct napi_struct *napi_rx, int budget);
+int prueth_prepare_rx_chan(struct prueth_emac *emac,
+ struct prueth_rx_chn *chn,
+ int buf_size);
+void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num,
+ bool free_skb);
+void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
+ int num_flows, bool disable);
+void emac_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue);
+int emac_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd);
+void emac_ndo_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats);
+int emac_ndo_get_phys_port_name(struct net_device *ndev, char *name,
+ size_t len);
+int prueth_node_port(struct device_node *eth_node);
+int prueth_node_mac(struct device_node *eth_node);
+void prueth_netdev_exit(struct prueth *prueth,
+ struct device_node *eth_node);
+int prueth_get_cores(struct prueth *prueth, int slice, bool is_sr1);
+void prueth_put_cores(struct prueth *prueth, int slice);
+
+/* Revision specific helper */
+u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns);
+
#endif /* __NET_TI_ICSSG_PRUETH_H */
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
new file mode 100644
index 000000000000..7b3304bbd7fc
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
@@ -0,0 +1,1181 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Texas Instruments ICSSG SR1.0 Ethernet Driver
+ *
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (c) Siemens AG, 2024
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/genalloc.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/phy.h>
+#include <linux/remoteproc/pruss.h>
+#include <linux/pruss_driver.h>
+
+#include "icssg_prueth.h"
+#include "icssg_mii_rt.h"
+#include "../k3-cppi-desc-pool.h"
+
+#define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG SR1.0 Ethernet driver"
+
+/* SR1: Set buffer sizes for the pools. There are 8 internal queues
+ * implemented in firmware, but only 4 tx channels/threads in the Egress
+ * direction to firmware. Need a high priority queue for management
+ * messages since they shouldn't be blocked even during high traffic
+ * situation. So use Q0-Q2 as data queues and Q3 as management queue
+ * in the max case. However for ease of configuration, use the max
+ * data queue + 1 for management message if we are not using max
+ * case.
+ *
+ * Allocate 4 MTU buffers per data queue. Firmware requires
+ * pool sizes to be set for internal queues. Set the upper 5 queue
+ * pool size to min size of 128 bytes since there are only 3 tx
+ * data channels and management queue requires only minimum buffer.
+ * i.e lower queues are used by driver and highest priority queue
+ * from that is used for management message.
+ */
+
+static int emac_egress_buf_pool_size[] = {
+ PRUETH_EMAC_BUF_POOL_SIZE_SR1, PRUETH_EMAC_BUF_POOL_SIZE_SR1,
+ PRUETH_EMAC_BUF_POOL_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1,
+ PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1,
+ PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1
+};
+
+static void icssg_config_sr1(struct prueth *prueth, struct prueth_emac *emac,
+ int slice)
+{
+ struct icssg_sr1_config config;
+ void __iomem *va;
+ int i, index;
+
+ memset(&config, 0, sizeof(config));
+ config.addr_lo = cpu_to_le32(lower_32_bits(prueth->msmcram.pa));
+ config.addr_hi = cpu_to_le32(upper_32_bits(prueth->msmcram.pa));
+ config.rx_flow_id = cpu_to_le32(emac->rx_flow_id_base); /* flow id for host port */
+ config.rx_mgr_flow_id = cpu_to_le32(emac->rx_mgm_flow_id_base); /* for mgm ch */
+ config.rand_seed = cpu_to_le32(get_random_u32());
+
+ for (i = PRUETH_EMAC_BUF_POOL_START_SR1; i < PRUETH_NUM_BUF_POOLS_SR1; i++) {
+ index = i - PRUETH_EMAC_BUF_POOL_START_SR1;
+ config.tx_buf_sz[i] = cpu_to_le32(emac_egress_buf_pool_size[index]);
+ }
+
+ va = prueth->shram.va + slice * ICSSG_CONFIG_OFFSET_SLICE1;
+ memcpy_toio(va, &config, sizeof(config));
+
+ emac->speed = SPEED_1000;
+ emac->duplex = DUPLEX_FULL;
+}
+
+static int emac_send_command_sr1(struct prueth_emac *emac, u32 cmd)
+{
+ struct cppi5_host_desc_t *first_desc;
+ u32 pkt_len = sizeof(emac->cmd_data);
+ __le32 *data = emac->cmd_data;
+ dma_addr_t desc_dma, buf_dma;
+ struct prueth_tx_chn *tx_chn;
+ void **swdata;
+ int ret = 0;
+ u32 *epib;
+
+ netdev_dbg(emac->ndev, "Sending cmd %x\n", cmd);
+
+ /* only one command at a time allowed to firmware */
+ mutex_lock(&emac->cmd_lock);
+ data[0] = cpu_to_le32(cmd);
+
+ /* highest priority channel for management messages */
+ tx_chn = &emac->tx_chns[emac->tx_ch_num - 1];
+
+ /* Map the linear buffer */
+ buf_dma = dma_map_single(tx_chn->dma_dev, data, pkt_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
+ netdev_err(emac->ndev, "cmd %x: failed to map cmd buffer\n", cmd);
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!first_desc) {
+ netdev_err(emac->ndev, "cmd %x: failed to allocate descriptor\n", cmd);
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE);
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_set_pkttype(first_desc, PRUETH_PKT_TYPE_CMD);
+ epib = first_desc->epib;
+ epib[0] = 0;
+ epib[1] = 0;
+
+ cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
+ swdata = cppi5_hdesc_get_swdata(first_desc);
+ *swdata = data;
+
+ cppi5_hdesc_set_pktlen(first_desc, pkt_len);
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
+
+ /* send command */
+ reinit_completion(&emac->cmd_complete);
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ if (ret) {
+ netdev_err(emac->ndev, "cmd %x: push failed: %d\n", cmd, ret);
+ goto free_desc;
+ }
+ ret = wait_for_completion_timeout(&emac->cmd_complete, msecs_to_jiffies(100));
+ if (!ret)
+ netdev_err(emac->ndev, "cmd %x: completion timeout\n", cmd);
+
+ mutex_unlock(&emac->cmd_lock);
+
+ return ret;
+free_desc:
+ prueth_xmit_free(tx_chn, first_desc);
+err_unlock:
+ mutex_unlock(&emac->cmd_lock);
+
+ return ret;
+}
+
+static void icssg_config_set_speed_sr1(struct prueth_emac *emac)
+{
+ u32 cmd = ICSSG_PSTATE_SPEED_DUPLEX_CMD_SR1, val;
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+
+ val = icssg_rgmii_get_speed(prueth->miig_rt, slice);
+ /* firmware expects speed settings in bit 2-1 */
+ val <<= 1;
+ cmd |= val;
+
+ val = icssg_rgmii_get_fullduplex(prueth->miig_rt, slice);
+ /* firmware expects full duplex settings in bit 3 */
+ val <<= 3;
+ cmd |= val;
+
+ emac_send_command_sr1(emac, cmd);
+}
+
+/* called back by PHY layer if there is change in link state of hw port*/
+static void emac_adjust_link_sr1(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+ struct prueth *prueth = emac->prueth;
+ bool new_state = false;
+ unsigned long flags;
+
+ if (phydev->link) {
+ /* check the mode of operation - full/half duplex */
+ if (phydev->duplex != emac->duplex) {
+ new_state = true;
+ emac->duplex = phydev->duplex;
+ }
+ if (phydev->speed != emac->speed) {
+ new_state = true;
+ emac->speed = phydev->speed;
+ }
+ if (!emac->link) {
+ new_state = true;
+ emac->link = 1;
+ }
+ } else if (emac->link) {
+ new_state = true;
+ emac->link = 0;
+
+ /* f/w should support 100 & 1000 */
+ emac->speed = SPEED_1000;
+
+ /* half duplex may not be supported by f/w */
+ emac->duplex = DUPLEX_FULL;
+ }
+
+ if (new_state) {
+ phy_print_status(phydev);
+
+ /* update RGMII and MII configuration based on PHY negotiated
+ * values
+ */
+ if (emac->link) {
+ /* Set the RGMII cfg for gig en and full duplex */
+ icssg_update_rgmii_cfg(prueth->miig_rt, emac);
+
+ /* update the Tx IPG based on 100M/1G speed */
+ spin_lock_irqsave(&emac->lock, flags);
+ icssg_config_ipg(emac);
+ spin_unlock_irqrestore(&emac->lock, flags);
+ icssg_config_set_speed_sr1(emac);
+ }
+ }
+
+ if (emac->link) {
+ /* reactivate the transmit queue */
+ netif_tx_wake_all_queues(ndev);
+ } else {
+ netif_tx_stop_all_queues(ndev);
+ prueth_cleanup_tx_ts(emac);
+ }
+}
+
+static int emac_phy_connect(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ struct net_device *ndev = emac->ndev;
+ /* connect PHY */
+ ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node,
+ &emac_adjust_link_sr1, 0,
+ emac->phy_if);
+ if (!ndev->phydev) {
+ dev_err(prueth->dev, "couldn't connect to phy %s\n",
+ emac->phy_node->full_name);
+ return -ENODEV;
+ }
+
+ if (!emac->half_duplex) {
+ dev_dbg(prueth->dev, "half duplex mode is not supported\n");
+ phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ }
+
+ /* Remove 100Mbits half-duplex due to RGMII misreporting connection
+ * as full duplex */
+ phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+
+ /* remove unsupported modes */
+ phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
+ phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
+
+ if (emac->phy_if == PHY_INTERFACE_MODE_MII)
+ phy_set_max_speed(ndev->phydev, SPEED_100);
+
+ return 0;
+}
+
+/* get one packet from requested flow_id
+ *
+ * Returns skb pointer if packet found else NULL
+ * Caller must free the returned skb.
+ */
+static struct sk_buff *prueth_process_rx_mgm(struct prueth_emac *emac,
+ u32 flow_id)
+{
+ struct prueth_rx_chn *rx_chn = &emac->rx_mgm_chn;
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_rx;
+ struct sk_buff *skb, *new_skb;
+ dma_addr_t desc_dma, buf_dma;
+ u32 buf_dma_len, pkt_len;
+ void **swdata;
+ int ret;
+
+ ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
+ if (ret) {
+ if (ret != -ENODATA)
+ netdev_err(ndev, "rx mgm pop: failed: %d\n", ret);
+ return NULL;
+ }
+
+ if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown */
+ return NULL;
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+
+ /* Fix FW bug about incorrect PSDATA size */
+ if (cppi5_hdesc_get_psdata_size(desc_rx) != PRUETH_NAV_PS_DATA_SIZE) {
+ cppi5_hdesc_update_psdata_size(desc_rx,
+ PRUETH_NAV_PS_DATA_SIZE);
+ }
+
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ skb = *swdata;
+ cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
+
+ dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+
+ new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE);
+ /* if allocation fails we drop the packet but push the
+ * descriptor back to the ring with old skb to prevent a stall
+ */
+ if (!new_skb) {
+ netdev_err(ndev,
+ "skb alloc failed, dropped mgm pkt from flow %d\n",
+ flow_id);
+ new_skb = skb;
+ skb = NULL; /* return NULL */
+ } else {
+ /* return the filled skb */
+ skb_put(skb, pkt_len);
+ }
+
+ /* queue another DMA */
+ ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_mgm_chn);
+ if (WARN_ON(ret < 0))
+ dev_kfree_skb_any(new_skb);
+
+ return skb;
+}
+
+static void prueth_tx_ts_sr1(struct prueth_emac *emac,
+ struct emac_tx_ts_response_sr1 *tsr)
+{
+ struct skb_shared_hwtstamps ssh;
+ u32 hi_ts, lo_ts, cookie;
+ struct sk_buff *skb;
+ u64 ns;
+
+ hi_ts = le32_to_cpu(tsr->hi_ts);
+ lo_ts = le32_to_cpu(tsr->lo_ts);
+
+ ns = (u64)hi_ts << 32 | lo_ts;
+
+ cookie = le32_to_cpu(tsr->cookie);
+ if (cookie >= PRUETH_MAX_TX_TS_REQUESTS) {
+ netdev_dbg(emac->ndev, "Invalid TX TS cookie 0x%x\n",
+ cookie);
+ return;
+ }
+
+ skb = emac->tx_ts_skb[cookie];
+ emac->tx_ts_skb[cookie] = NULL; /* free slot */
+
+ memset(&ssh, 0, sizeof(ssh));
+ ssh.hwtstamp = ns_to_ktime(ns);
+
+ skb_tstamp_tx(skb, &ssh);
+ dev_consume_skb_any(skb);
+}
+
+static irqreturn_t prueth_rx_mgm_ts_thread_sr1(int irq, void *dev_id)
+{
+ struct prueth_emac *emac = dev_id;
+ struct sk_buff *skb;
+
+ skb = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1);
+ if (!skb)
+ return IRQ_NONE;
+
+ prueth_tx_ts_sr1(emac, (void *)skb->data);
+ dev_kfree_skb_any(skb);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t prueth_rx_mgm_rsp_thread(int irq, void *dev_id)
+{
+ struct prueth_emac *emac = dev_id;
+ struct sk_buff *skb;
+ u32 rsp;
+
+ skb = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_RESPONSE_SR1);
+ if (!skb)
+ return IRQ_NONE;
+
+ /* Process command response */
+ rsp = le32_to_cpu(*(__le32 *)skb->data) & 0xffff0000;
+ if (rsp == ICSSG_SHUTDOWN_CMD_SR1) {
+ netdev_dbg(emac->ndev, "f/w Shutdown cmd resp %x\n", rsp);
+ complete(&emac->cmd_complete);
+ } else if (rsp == ICSSG_PSTATE_SPEED_DUPLEX_CMD_SR1) {
+ netdev_dbg(emac->ndev, "f/w Speed/Duplex cmd rsp %x\n", rsp);
+ complete(&emac->cmd_complete);
+ }
+
+ dev_kfree_skb_any(skb);
+
+ return IRQ_HANDLED;
+}
+
+static struct icssg_firmwares icssg_sr1_emac_firmwares[] = {
+ {
+ .pru = "ti-pruss/am65x-pru0-prueth-fw.elf",
+ .rtu = "ti-pruss/am65x-rtu0-prueth-fw.elf",
+ },
+ {
+ .pru = "ti-pruss/am65x-pru1-prueth-fw.elf",
+ .rtu = "ti-pruss/am65x-rtu1-prueth-fw.elf",
+ }
+};
+
+static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
+{
+ struct icssg_firmwares *firmwares;
+ struct device *dev = prueth->dev;
+ int slice, ret;
+
+ firmwares = icssg_sr1_emac_firmwares;
+
+ slice = prueth_emac_slice(emac);
+ if (slice < 0) {
+ netdev_err(emac->ndev, "invalid port\n");
+ return -EINVAL;
+ }
+
+ icssg_config_sr1(prueth, emac, slice);
+
+ ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru);
+ ret = rproc_boot(prueth->pru[slice]);
+ if (ret) {
+ dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
+ return -EINVAL;
+ }
+
+ ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu);
+ ret = rproc_boot(prueth->rtu[slice]);
+ if (ret) {
+ dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
+ goto halt_pru;
+ }
+
+ emac->fw_running = 1;
+ return 0;
+
+halt_pru:
+ rproc_shutdown(prueth->pru[slice]);
+
+ return ret;
+}
+
+/**
+ * emac_ndo_open - EMAC device open
+ * @ndev: network adapter device
+ *
+ * Called when system wants to start the interface.
+ *
+ * Return: 0 for a successful open, or appropriate error code
+ */
+static int emac_ndo_open(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int num_data_chn = emac->tx_ch_num - 1;
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+ struct device *dev = prueth->dev;
+ int max_rx_flows, rx_flow;
+ int ret, i;
+
+ /* clear SMEM and MSMC settings for all slices */
+ if (!prueth->emacs_initialized) {
+ memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
+ memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
+ }
+
+ /* set h/w MAC as user might have re-configured */
+ ether_addr_copy(emac->mac_addr, ndev->dev_addr);
+
+ icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
+
+ icssg_class_default(prueth->miig_rt, slice, 0, true);
+
+ /* Notify the stack of the actual queue counts. */
+ ret = netif_set_real_num_tx_queues(ndev, num_data_chn);
+ if (ret) {
+ dev_err(dev, "cannot set real number of tx queues\n");
+ return ret;
+ }
+
+ init_completion(&emac->cmd_complete);
+ ret = prueth_init_tx_chns(emac);
+ if (ret) {
+ dev_err(dev, "failed to init tx channel: %d\n", ret);
+ return ret;
+ }
+
+ max_rx_flows = PRUETH_MAX_RX_FLOWS_SR1;
+ ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx",
+ max_rx_flows, PRUETH_MAX_RX_DESC);
+ if (ret) {
+ dev_err(dev, "failed to init rx channel: %d\n", ret);
+ goto cleanup_tx;
+ }
+
+ ret = prueth_init_rx_chns(emac, &emac->rx_mgm_chn, "rxmgm",
+ PRUETH_MAX_RX_MGM_FLOWS_SR1,
+ PRUETH_MAX_RX_MGM_DESC_SR1);
+ if (ret) {
+ dev_err(dev, "failed to init rx mgmt channel: %d\n",
+ ret);
+ goto cleanup_rx;
+ }
+
+ ret = prueth_ndev_add_tx_napi(emac);
+ if (ret)
+ goto cleanup_rx_mgm;
+
+ /* we use only the highest priority flow for now i.e. @irq[3] */
+ rx_flow = PRUETH_RX_FLOW_DATA_SR1;
+ ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq,
+ IRQF_TRIGGER_HIGH, dev_name(dev), emac);
+ if (ret) {
+ dev_err(dev, "unable to request RX IRQ\n");
+ goto cleanup_napi;
+ }
+
+ ret = request_threaded_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE_SR1],
+ NULL, prueth_rx_mgm_rsp_thread,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+ dev_name(dev), emac);
+ if (ret) {
+ dev_err(dev, "unable to request RX Management RSP IRQ\n");
+ goto free_rx_irq;
+ }
+
+ ret = request_threaded_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1],
+ NULL, prueth_rx_mgm_ts_thread_sr1,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+ dev_name(dev), emac);
+ if (ret) {
+ dev_err(dev, "unable to request RX Management TS IRQ\n");
+ goto free_rx_mgm_rsp_irq;
+ }
+
+ /* reset and start PRU firmware */
+ ret = prueth_emac_start(prueth, emac);
+ if (ret)
+ goto free_rx_mgmt_ts_irq;
+
+ icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
+
+ /* Prepare RX */
+ ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
+ if (ret)
+ goto stop;
+
+ ret = prueth_prepare_rx_chan(emac, &emac->rx_mgm_chn, 64);
+ if (ret)
+ goto reset_rx_chn;
+
+ ret = k3_udma_glue_enable_rx_chn(emac->rx_mgm_chn.rx_chn);
+ if (ret)
+ goto reset_rx_chn;
+
+ ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
+ if (ret)
+ goto reset_rx_mgm_chn;
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
+ if (ret)
+ goto reset_tx_chan;
+ }
+
+ /* Enable NAPI in Tx and Rx direction */
+ for (i = 0; i < emac->tx_ch_num; i++)
+ napi_enable(&emac->tx_chns[i].napi_tx);
+ napi_enable(&emac->napi_rx);
+
+ /* start PHY */
+ phy_start(ndev->phydev);
+
+ prueth->emacs_initialized++;
+
+ queue_work(system_long_wq, &emac->stats_work.work);
+
+ return 0;
+
+reset_tx_chan:
+ /* Since interface is not yet up, there is wouldn't be
+ * any SKB for completion. So set false to free_skb
+ */
+ prueth_reset_tx_chan(emac, i, false);
+reset_rx_mgm_chn:
+ prueth_reset_rx_chan(&emac->rx_mgm_chn,
+ PRUETH_MAX_RX_MGM_FLOWS_SR1, true);
+reset_rx_chn:
+ prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
+stop:
+ prueth_emac_stop(emac);
+free_rx_mgmt_ts_irq:
+ free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1],
+ emac);
+free_rx_mgm_rsp_irq:
+ free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE_SR1],
+ emac);
+free_rx_irq:
+ free_irq(emac->rx_chns.irq[rx_flow], emac);
+cleanup_napi:
+ prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
+cleanup_rx_mgm:
+ prueth_cleanup_rx_chns(emac, &emac->rx_mgm_chn,
+ PRUETH_MAX_RX_MGM_FLOWS_SR1);
+cleanup_rx:
+ prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
+cleanup_tx:
+ prueth_cleanup_tx_chns(emac);
+
+ return ret;
+}
+
+/**
+ * emac_ndo_stop - EMAC device stop
+ * @ndev: network adapter device
+ *
+ * Called when system wants to stop or down the interface.
+ *
+ * Return: Always 0 (Success)
+ */
+static int emac_ndo_stop(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int rx_flow = PRUETH_RX_FLOW_DATA_SR1;
+ struct prueth *prueth = emac->prueth;
+ int max_rx_flows;
+ int ret, i;
+
+ /* inform the upper layers. */
+ netif_tx_stop_all_queues(ndev);
+
+ /* block packets from wire */
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
+
+ icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac));
+
+ emac_send_command_sr1(emac, ICSSG_SHUTDOWN_CMD_SR1);
+
+ atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
+ /* ensure new tdown_cnt value is visible */
+ smp_mb__after_atomic();
+ /* tear down and disable UDMA channels */
+ reinit_completion(&emac->tdown_complete);
+ for (i = 0; i < emac->tx_ch_num; i++)
+ k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
+
+ ret = wait_for_completion_timeout(&emac->tdown_complete,
+ msecs_to_jiffies(1000));
+ if (!ret)
+ netdev_err(ndev, "tx teardown timeout\n");
+
+ prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
+ for (i = 0; i < emac->tx_ch_num; i++)
+ napi_disable(&emac->tx_chns[i].napi_tx);
+
+ max_rx_flows = PRUETH_MAX_RX_FLOWS_SR1;
+ k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
+
+ prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
+ /* Teardown RX MGM channel */
+ k3_udma_glue_tdown_rx_chn(emac->rx_mgm_chn.rx_chn, true);
+ prueth_reset_rx_chan(&emac->rx_mgm_chn,
+ PRUETH_MAX_RX_MGM_FLOWS_SR1, true);
+
+ napi_disable(&emac->napi_rx);
+
+ /* Destroying the queued work in ndo_stop() */
+ cancel_delayed_work_sync(&emac->stats_work);
+
+ /* stop PRUs */
+ prueth_emac_stop(emac);
+
+ free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1], emac);
+ free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE_SR1], emac);
+ free_irq(emac->rx_chns.irq[rx_flow], emac);
+ prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
+ prueth_cleanup_tx_chns(emac);
+
+ prueth_cleanup_rx_chns(emac, &emac->rx_mgm_chn, PRUETH_MAX_RX_MGM_FLOWS_SR1);
+ prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
+
+ prueth->emacs_initialized--;
+
+ return 0;
+}
+
+static void emac_ndo_set_rx_mode_sr1(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ bool allmulti = ndev->flags & IFF_ALLMULTI;
+ bool promisc = ndev->flags & IFF_PROMISC;
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+
+ if (promisc) {
+ icssg_class_promiscuous_sr1(prueth->miig_rt, slice);
+ return;
+ }
+
+ if (allmulti) {
+ icssg_class_default(prueth->miig_rt, slice, 1, true);
+ return;
+ }
+
+ icssg_class_default(prueth->miig_rt, slice, 0, true);
+ if (!netdev_mc_empty(ndev)) {
+ /* program multicast address list into Classifier */
+ icssg_class_add_mcast_sr1(prueth->miig_rt, slice, ndev);
+ }
+}
+
+static const struct net_device_ops emac_netdev_ops = {
+ .ndo_open = emac_ndo_open,
+ .ndo_stop = emac_ndo_stop,
+ .ndo_start_xmit = emac_ndo_start_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_tx_timeout = emac_ndo_tx_timeout,
+ .ndo_set_rx_mode = emac_ndo_set_rx_mode_sr1,
+ .ndo_eth_ioctl = emac_ndo_ioctl,
+ .ndo_get_stats64 = emac_ndo_get_stats64,
+ .ndo_get_phys_port_name = emac_ndo_get_phys_port_name,
+};
+
+static int prueth_netdev_init(struct prueth *prueth,
+ struct device_node *eth_node)
+{
+ struct prueth_emac *emac;
+ struct net_device *ndev;
+ enum prueth_port port;
+ enum prueth_mac mac;
+ /* Only enable one TX channel due to timeouts when
+ * using multiple channels */
+ int num_tx_chn = 1;
+ int ret;
+
+ port = prueth_node_port(eth_node);
+ if (port == PRUETH_PORT_INVALID)
+ return -EINVAL;
+
+ mac = prueth_node_mac(eth_node);
+ if (mac == PRUETH_MAC_INVALID)
+ return -EINVAL;
+
+ ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn);
+ if (!ndev)
+ return -ENOMEM;
+
+ emac = netdev_priv(ndev);
+ emac->is_sr1 = 1;
+ emac->prueth = prueth;
+ emac->ndev = ndev;
+ emac->port_id = port;
+ emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
+ if (!emac->cmd_wq) {
+ ret = -ENOMEM;
+ goto free_ndev;
+ }
+
+ INIT_DELAYED_WORK(&emac->stats_work, emac_stats_work_handler);
+
+ ret = pruss_request_mem_region(prueth->pruss,
+ port == PRUETH_PORT_MII0 ?
+ PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1,
+ &emac->dram);
+ if (ret) {
+ dev_err(prueth->dev, "unable to get DRAM: %d\n", ret);
+ ret = -ENOMEM;
+ goto free_wq;
+ }
+
+ /* SR1.0 uses a dedicated high priority channel
+ * to send commands to the firmware
+ */
+ emac->tx_ch_num = 2;
+
+ SET_NETDEV_DEV(ndev, prueth->dev);
+ spin_lock_init(&emac->lock);
+ mutex_init(&emac->cmd_lock);
+
+ emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0);
+ if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) {
+ dev_err(prueth->dev, "couldn't find phy-handle\n");
+ ret = -ENODEV;
+ goto free;
+ } else if (of_phy_is_fixed_link(eth_node)) {
+ ret = of_phy_register_fixed_link(eth_node);
+ if (ret) {
+ ret = dev_err_probe(prueth->dev, ret,
+ "failed to register fixed-link phy\n");
+ goto free;
+ }
+
+ emac->phy_node = eth_node;
+ }
+
+ ret = of_get_phy_mode(eth_node, &emac->phy_if);
+ if (ret) {
+ dev_err(prueth->dev, "could not get phy-mode property\n");
+ goto free;
+ }
+
+ if (emac->phy_if != PHY_INTERFACE_MODE_MII &&
+ !phy_interface_mode_is_rgmii(emac->phy_if)) {
+ dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if));
+ ret = -EINVAL;
+ goto free;
+ }
+
+ /* AM65 SR2.0 has TX Internal delay always enabled by hardware
+ * and it is not possible to disable TX Internal delay. The below
+ * switch case block describes how we handle different phy modes
+ * based on hardware restriction.
+ */
+ switch (emac->phy_if) {
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ emac->phy_if = PHY_INTERFACE_MODE_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ dev_err(prueth->dev, "RGMII mode without TX delay is not supported");
+ ret = -EINVAL;
+ goto free;
+ default:
+ break;
+ }
+
+ /* get mac address from DT and set private and netdev addr */
+ ret = of_get_ethdev_address(eth_node, ndev);
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ eth_hw_addr_random(ndev);
+ dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n",
+ port, ndev->dev_addr);
+ }
+ ether_addr_copy(emac->mac_addr, ndev->dev_addr);
+
+ ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
+ ndev->max_mtu = PRUETH_MAX_MTU;
+ ndev->netdev_ops = &emac_netdev_ops;
+ ndev->ethtool_ops = &icssg_ethtool_ops;
+ ndev->hw_features = NETIF_F_SG;
+ ndev->features = ndev->hw_features;
+
+ netif_napi_add(ndev, &emac->napi_rx, emac_napi_rx_poll);
+ prueth->emac[mac] = emac;
+
+ return 0;
+
+free:
+ pruss_release_mem_region(prueth->pruss, &emac->dram);
+free_wq:
+ destroy_workqueue(emac->cmd_wq);
+free_ndev:
+ emac->ndev = NULL;
+ prueth->emac[mac] = NULL;
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int prueth_probe(struct platform_device *pdev)
+{
+ struct device_node *eth_node, *eth_ports_node;
+ struct device_node *eth0_node = NULL;
+ struct device_node *eth1_node = NULL;
+ struct device *dev = &pdev->dev;
+ struct device_node *np;
+ struct prueth *prueth;
+ struct pruss *pruss;
+ u32 msmc_ram_size;
+ int i, ret;
+
+ np = dev->of_node;
+
+ prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
+ if (!prueth)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, prueth);
+ prueth->pdev = pdev;
+ prueth->pdata = *(const struct prueth_pdata *)device_get_match_data(dev);
+
+ prueth->dev = dev;
+ eth_ports_node = of_get_child_by_name(np, "ethernet-ports");
+ if (!eth_ports_node)
+ return -ENOENT;
+
+ for_each_child_of_node(eth_ports_node, eth_node) {
+ u32 reg;
+
+ if (strcmp(eth_node->name, "port"))
+ continue;
+ ret = of_property_read_u32(eth_node, "reg", &reg);
+ if (ret < 0) {
+ dev_err(dev, "%pOF error reading port_id %d\n",
+ eth_node, ret);
+ }
+
+ of_node_get(eth_node);
+
+ if (reg == 0) {
+ eth0_node = eth_node;
+ if (!of_device_is_available(eth0_node)) {
+ of_node_put(eth0_node);
+ eth0_node = NULL;
+ }
+ } else if (reg == 1) {
+ eth1_node = eth_node;
+ if (!of_device_is_available(eth1_node)) {
+ of_node_put(eth1_node);
+ eth1_node = NULL;
+ }
+ } else {
+ dev_err(dev, "port reg should be 0 or 1\n");
+ }
+ }
+
+ of_node_put(eth_ports_node);
+
+ /* At least one node must be present and available else we fail */
+ if (!eth0_node && !eth1_node) {
+ dev_err(dev, "neither port0 nor port1 node available\n");
+ return -ENODEV;
+ }
+
+ if (eth0_node == eth1_node) {
+ dev_err(dev, "port0 and port1 can't have same reg\n");
+ of_node_put(eth0_node);
+ return -ENODEV;
+ }
+
+ prueth->eth_node[PRUETH_MAC0] = eth0_node;
+ prueth->eth_node[PRUETH_MAC1] = eth1_node;
+
+ prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt");
+ if (IS_ERR(prueth->miig_rt)) {
+ dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n");
+ return -ENODEV;
+ }
+
+ prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt");
+ if (IS_ERR(prueth->mii_rt)) {
+ dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n");
+ return -ENODEV;
+ }
+
+ if (eth0_node) {
+ ret = prueth_get_cores(prueth, ICSS_SLICE0, true);
+ if (ret)
+ goto put_cores;
+ }
+
+ if (eth1_node) {
+ ret = prueth_get_cores(prueth, ICSS_SLICE1, true);
+ if (ret)
+ goto put_cores;
+ }
+
+ pruss = pruss_get(eth0_node ?
+ prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]);
+ if (IS_ERR(pruss)) {
+ ret = PTR_ERR(pruss);
+ dev_err(dev, "unable to get pruss handle\n");
+ goto put_cores;
+ }
+
+ prueth->pruss = pruss;
+
+ ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2,
+ &prueth->shram);
+ if (ret) {
+ dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
+ goto put_pruss;
+ }
+
+ prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
+ if (!prueth->sram_pool) {
+ dev_err(dev, "unable to get SRAM pool\n");
+ ret = -ENODEV;
+
+ goto put_mem;
+ }
+
+ msmc_ram_size = MSMC_RAM_SIZE_SR1;
+
+ prueth->msmcram.va = (void __iomem *)gen_pool_alloc(prueth->sram_pool,
+ msmc_ram_size);
+
+ if (!prueth->msmcram.va) {
+ ret = -ENOMEM;
+ dev_err(dev, "unable to allocate MSMC resource\n");
+ goto put_mem;
+ }
+ prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool,
+ (unsigned long)prueth->msmcram.va);
+ prueth->msmcram.size = msmc_ram_size;
+ memset_io(prueth->msmcram.va, 0, msmc_ram_size);
+ dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
+ prueth->msmcram.va, prueth->msmcram.size);
+
+ if (eth0_node) {
+ ret = prueth_netdev_init(prueth, eth0_node);
+ if (ret) {
+ dev_err_probe(dev, ret, "netdev init %s failed\n",
+ eth0_node->name);
+ goto free_pool;
+ }
+
+ if (of_find_property(eth0_node, "ti,half-duplex-capable", NULL))
+ prueth->emac[PRUETH_MAC0]->half_duplex = 1;
+ }
+
+ if (eth1_node) {
+ ret = prueth_netdev_init(prueth, eth1_node);
+ if (ret) {
+ dev_err_probe(dev, ret, "netdev init %s failed\n",
+ eth1_node->name);
+ goto netdev_exit;
+ }
+
+ if (of_find_property(eth1_node, "ti,half-duplex-capable", NULL))
+ prueth->emac[PRUETH_MAC1]->half_duplex = 1;
+ }
+
+ /* register the network devices */
+ if (eth0_node) {
+ ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev);
+ if (ret) {
+ dev_err(dev, "can't register netdev for port MII0\n");
+ goto netdev_exit;
+ }
+
+ prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
+ emac_phy_connect(prueth->emac[PRUETH_MAC0]);
+ phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev);
+ }
+
+ if (eth1_node) {
+ ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev);
+ if (ret) {
+ dev_err(dev, "can't register netdev for port MII1\n");
+ goto netdev_unregister;
+ }
+
+ prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
+ emac_phy_connect(prueth->emac[PRUETH_MAC1]);
+ phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev);
+ }
+
+ dev_info(dev, "TI PRU SR1.0 ethernet driver initialized: %s EMAC mode\n",
+ (!eth0_node || !eth1_node) ? "single" : "dual");
+
+ if (eth1_node)
+ of_node_put(eth1_node);
+ if (eth0_node)
+ of_node_put(eth0_node);
+
+ return 0;
+
+netdev_unregister:
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ if (!prueth->registered_netdevs[i])
+ continue;
+
+ if (prueth->emac[i]->ndev->phydev) {
+ phy_disconnect(prueth->emac[i]->ndev->phydev);
+ prueth->emac[i]->ndev->phydev = NULL;
+ }
+ unregister_netdev(prueth->registered_netdevs[i]);
+ }
+
+netdev_exit:
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ eth_node = prueth->eth_node[i];
+ if (!eth_node)
+ continue;
+
+ prueth_netdev_exit(prueth, eth_node);
+ }
+
+free_pool:
+ gen_pool_free(prueth->sram_pool,
+ (unsigned long)prueth->msmcram.va, msmc_ram_size);
+
+put_mem:
+ pruss_release_mem_region(prueth->pruss, &prueth->shram);
+
+put_pruss:
+ pruss_put(prueth->pruss);
+
+put_cores:
+ if (eth1_node) {
+ prueth_put_cores(prueth, ICSS_SLICE1);
+ of_node_put(eth1_node);
+ }
+
+ if (eth0_node) {
+ prueth_put_cores(prueth, ICSS_SLICE0);
+ of_node_put(eth0_node);
+ }
+
+ return ret;
+}
+
+static void prueth_remove(struct platform_device *pdev)
+{
+ struct prueth *prueth = platform_get_drvdata(pdev);
+ struct device_node *eth_node;
+ int i;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ if (!prueth->registered_netdevs[i])
+ continue;
+ phy_stop(prueth->emac[i]->ndev->phydev);
+ phy_disconnect(prueth->emac[i]->ndev->phydev);
+ prueth->emac[i]->ndev->phydev = NULL;
+ unregister_netdev(prueth->registered_netdevs[i]);
+ }
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ eth_node = prueth->eth_node[i];
+ if (!eth_node)
+ continue;
+
+ prueth_netdev_exit(prueth, eth_node);
+ }
+
+ gen_pool_free(prueth->sram_pool,
+ (unsigned long)prueth->msmcram.va,
+ MSMC_RAM_SIZE_SR1);
+
+ pruss_release_mem_region(prueth->pruss, &prueth->shram);
+
+ pruss_put(prueth->pruss);
+
+ if (prueth->eth_node[PRUETH_MAC1])
+ prueth_put_cores(prueth, ICSS_SLICE1);
+
+ if (prueth->eth_node[PRUETH_MAC0])
+ prueth_put_cores(prueth, ICSS_SLICE0);
+}
+
+static const struct prueth_pdata am654_sr1_icssg_pdata = {
+ .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
+};
+
+static const struct of_device_id prueth_dt_match[] = {
+ { .compatible = "ti,am654-sr1-icssg-prueth", .data = &am654_sr1_icssg_pdata },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, prueth_dt_match);
+
+static struct platform_driver prueth_driver = {
+ .probe = prueth_probe,
+ .remove_new = prueth_remove,
+ .driver = {
+ .name = "icssg-prueth-sr1",
+ .of_match_table = prueth_dt_match,
+ .pm = &prueth_dev_pm_ops,
+ },
+};
+module_platform_driver(prueth_driver);
+
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
+MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
+MODULE_AUTHOR("Diogo Ivo <diogo.ivo@siemens.com>");
+MODULE_DESCRIPTION(PRUETH_MODULE_DESCRIPTION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/k3-cppi-desc-pool.c b/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
index 05cc7aab1ec8..739bae8e11ee 100644
--- a/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
+++ b/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
@@ -22,6 +22,7 @@ struct k3_cppi_desc_pool {
size_t mem_size;
size_t num_desc;
struct gen_pool *gen_pool;
+ void **desc_infos;
};
void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool)
@@ -37,7 +38,11 @@ void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool)
dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem,
pool->dma_addr);
+ kfree(pool->desc_infos);
+
gen_pool_destroy(pool->gen_pool); /* frees pool->name */
+
+ kfree(pool);
}
EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_destroy);
@@ -50,7 +55,7 @@ k3_cppi_desc_pool_create_name(struct device *dev, size_t size,
const char *pool_name = NULL;
int ret = -ENOMEM;
- pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool)
return ERR_PTR(ret);
@@ -62,18 +67,21 @@ k3_cppi_desc_pool_create_name(struct device *dev, size_t size,
pool_name = kstrdup_const(name ? name : dev_name(pool->dev),
GFP_KERNEL);
if (!pool_name)
- return ERR_PTR(-ENOMEM);
+ goto gen_pool_create_fail;
pool->gen_pool = gen_pool_create(ilog2(pool->desc_size), -1);
if (!pool->gen_pool) {
- ret = -ENOMEM;
- dev_err(pool->dev, "pool create failed %d\n", ret);
kfree_const(pool_name);
goto gen_pool_create_fail;
}
pool->gen_pool->name = pool_name;
+ pool->desc_infos = kcalloc(pool->num_desc,
+ sizeof(*pool->desc_infos), GFP_KERNEL);
+ if (!pool->desc_infos)
+ goto gen_pool_desc_infos_alloc_fail;
+
pool->cpumem = dma_alloc_coherent(pool->dev, pool->mem_size,
&pool->dma_addr, GFP_KERNEL);
@@ -94,9 +102,11 @@ gen_pool_add_virt_fail:
dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem,
pool->dma_addr);
dma_alloc_fail:
+ kfree(pool->desc_infos);
+gen_pool_desc_infos_alloc_fail:
gen_pool_destroy(pool->gen_pool); /* frees pool->name */
gen_pool_create_fail:
- devm_kfree(pool->dev, pool);
+ kfree(pool);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_create_name);
@@ -132,5 +142,31 @@ size_t k3_cppi_desc_pool_avail(struct k3_cppi_desc_pool *pool)
}
EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_avail);
+size_t k3_cppi_desc_pool_desc_size(const struct k3_cppi_desc_pool *pool)
+{
+ return pool->desc_size;
+}
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_desc_size);
+
+void *k3_cppi_desc_pool_cpuaddr(const struct k3_cppi_desc_pool *pool)
+{
+ return pool->cpumem;
+}
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_cpuaddr);
+
+void k3_cppi_desc_pool_desc_info_set(struct k3_cppi_desc_pool *pool,
+ int desc_idx, void *info)
+{
+ pool->desc_infos[desc_idx] = info;
+}
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_desc_info_set);
+
+void *k3_cppi_desc_pool_desc_info(const struct k3_cppi_desc_pool *pool,
+ int desc_idx)
+{
+ return pool->desc_infos[desc_idx];
+}
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_desc_info);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("TI K3 CPPI5 descriptors pool API");
diff --git a/drivers/net/ethernet/ti/k3-cppi-desc-pool.h b/drivers/net/ethernet/ti/k3-cppi-desc-pool.h
index a7e3fa5e7b62..851d352b338b 100644
--- a/drivers/net/ethernet/ti/k3-cppi-desc-pool.h
+++ b/drivers/net/ethernet/ti/k3-cppi-desc-pool.h
@@ -26,5 +26,11 @@ k3_cppi_desc_pool_dma2virt(struct k3_cppi_desc_pool *pool, dma_addr_t dma);
void *k3_cppi_desc_pool_alloc(struct k3_cppi_desc_pool *pool);
void k3_cppi_desc_pool_free(struct k3_cppi_desc_pool *pool, void *addr);
size_t k3_cppi_desc_pool_avail(struct k3_cppi_desc_pool *pool);
+size_t k3_cppi_desc_pool_desc_size(const struct k3_cppi_desc_pool *pool);
+void *k3_cppi_desc_pool_cpuaddr(const struct k3_cppi_desc_pool *pool);
+void k3_cppi_desc_pool_desc_info_set(struct k3_cppi_desc_pool *pool,
+ int desc_idx, void *info);
+void *k3_cppi_desc_pool_desc_info(const struct k3_cppi_desc_pool *pool,
+ int desc_idx);
#endif /* K3_CPPI_DESC_POOL_H_ */
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
index da287ef65be7..00773f5e4d7e 100644
--- a/drivers/net/ethernet/via/Kconfig
+++ b/drivers/net/ethernet/via/Kconfig
@@ -20,6 +20,7 @@ config VIA_RHINE
tristate "VIA Rhine support"
depends on PCI || (OF_IRQ && GENERIC_PCI_IOMAP)
depends on PCI || ARCH_VT8500 || COMPILE_TEST
+ depends on HAS_IOPORT
depends on HAS_DMA
select CRC32
select MII
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 1c6b2a9bba08..55fff4d0d380 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2294,7 +2294,7 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
int ret = 0;
if (!netif_running(dev)) {
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
goto out_0;
}
@@ -2336,7 +2336,7 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
tmp_vptr->rx = rx;
tmp_vptr->tx = tx;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
velocity_init_registers(vptr, VELOCITY_INIT_COLD);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 945c13d1a982..7c4b6881a93f 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -1408,7 +1408,7 @@ int wx_change_mtu(struct net_device *netdev, int new_mtu)
{
struct wx *wx = netdev_priv(netdev);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
wx_set_rx_buffer_len(wx);
return 0;
@@ -1958,6 +1958,8 @@ int wx_sw_init(struct wx *wx)
return -ENOMEM;
}
+ bitmap_zero(wx->state, WX_STATE_NBITS);
+
return 0;
}
EXPORT_SYMBOL(wx_sw_init);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 6dff2c85682d..68bde91b67a0 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -1598,7 +1598,7 @@ static void wx_set_num_queues(struct wx *wx)
*/
static int wx_acquire_msix_vectors(struct wx *wx)
{
- struct irq_affinity affd = {0, };
+ struct irq_affinity affd = { .pre_vectors = 1 };
int nvecs, i;
/* We start by asking for one vector per queue pair */
@@ -1674,14 +1674,14 @@ static int wx_set_interrupt_capability(struct wx *wx)
/* minmum one for queue, one for misc*/
nvecs = 1;
nvecs = pci_alloc_irq_vectors(pdev, nvecs,
- nvecs, PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ nvecs, PCI_IRQ_MSI | PCI_IRQ_INTX);
if (nvecs == 1) {
if (pdev->msi_enabled)
wx_err(wx, "Fallback to MSI.\n");
else
- wx_err(wx, "Fallback to LEGACY.\n");
+ wx_err(wx, "Fallback to INTx.\n");
} else {
- wx_err(wx, "Failed to allocate MSI/LEGACY interrupts. Error: %d\n", nvecs);
+ wx_err(wx, "Failed to allocate MSI/INTx interrupts. Error: %d\n", nvecs);
return nvecs;
}
@@ -2127,7 +2127,7 @@ void wx_write_eitr(struct wx_q_vector *q_vector)
* wx_configure_vectors - Configure vectors for hardware
* @wx: board private structure
*
- * wx_configure_vectors sets up the hardware to properly generate MSI-X/MSI/LEGACY
+ * wx_configure_vectors sets up the hardware to properly generate MSI-X/MSI/INTx
* interrupts.
**/
void wx_configure_vectors(struct wx *wx)
@@ -2690,15 +2690,63 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
wx->rss_enabled = false;
}
- if (changed &
- (NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_STAG_RX))
+ netdev->features = features;
+
+ if (wx->mac.type == wx_mac_sp && changed & NETIF_F_HW_VLAN_CTAG_RX)
+ wx->do_reset(netdev);
+ else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER))
wx_set_rx_mode(netdev);
- return 1;
+ return 0;
}
EXPORT_SYMBOL(wx_set_features);
+#define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_STAG_RX)
+
+#define NETIF_VLAN_INSERTION_FEATURES (NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_HW_VLAN_STAG_TX)
+
+#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
+ NETIF_F_HW_VLAN_STAG_FILTER)
+
+netdev_features_t wx_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = netdev->features ^ features;
+ struct wx *wx = netdev_priv(netdev);
+
+ if (changed & NETIF_VLAN_STRIPPING_FEATURES) {
+ if ((features & NETIF_VLAN_STRIPPING_FEATURES) != NETIF_VLAN_STRIPPING_FEATURES &&
+ (features & NETIF_VLAN_STRIPPING_FEATURES) != 0) {
+ features &= ~NETIF_VLAN_STRIPPING_FEATURES;
+ features |= netdev->features & NETIF_VLAN_STRIPPING_FEATURES;
+ wx_err(wx, "802.1Q and 802.1ad VLAN stripping must be either both on or both off.");
+ }
+ }
+
+ if (changed & NETIF_VLAN_INSERTION_FEATURES) {
+ if ((features & NETIF_VLAN_INSERTION_FEATURES) != NETIF_VLAN_INSERTION_FEATURES &&
+ (features & NETIF_VLAN_INSERTION_FEATURES) != 0) {
+ features &= ~NETIF_VLAN_INSERTION_FEATURES;
+ features |= netdev->features & NETIF_VLAN_INSERTION_FEATURES;
+ wx_err(wx, "802.1Q and 802.1ad VLAN insertion must be either both on or both off.");
+ }
+ }
+
+ if (changed & NETIF_VLAN_FILTERING_FEATURES) {
+ if ((features & NETIF_VLAN_FILTERING_FEATURES) != NETIF_VLAN_FILTERING_FEATURES &&
+ (features & NETIF_VLAN_FILTERING_FEATURES) != 0) {
+ features &= ~NETIF_VLAN_FILTERING_FEATURES;
+ features |= netdev->features & NETIF_VLAN_FILTERING_FEATURES;
+ wx_err(wx, "802.1Q and 802.1ad VLAN filtering must be either both on or both off.");
+ }
+ }
+
+ return features;
+}
+EXPORT_SYMBOL(wx_fix_features);
+
void wx_set_ring(struct wx *wx, u32 new_tx_count,
u32 new_rx_count, struct wx_ring *temp_ring)
{
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
index ec909e876720..c41b29ea812f 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
@@ -30,6 +30,8 @@ int wx_setup_resources(struct wx *wx);
void wx_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats);
int wx_set_features(struct net_device *netdev, netdev_features_t features);
+netdev_features_t wx_fix_features(struct net_device *netdev,
+ netdev_features_t features);
void wx_set_ring(struct wx *wx, u32 new_tx_count,
u32 new_rx_count, struct wx_ring *temp_ring);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 1fdeb464d5f4..5aaf7b1fa2db 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -982,8 +982,13 @@ struct wx_hw_stats {
u64 qmprc;
};
+enum wx_state {
+ WX_STATE_RESETTING,
+ WX_STATE_NBITS, /* must be last */
+};
struct wx {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ DECLARE_BITMAP(state, WX_STATE_NBITS);
void *priv;
u8 __iomem *hw_addr;
@@ -1071,6 +1076,8 @@ struct wx {
u64 hw_csum_rx_good;
u64 hw_csum_rx_error;
u64 alloc_rx_buff_failed;
+
+ void (*do_reset)(struct net_device *netdev);
};
#define WX_INTR_ALL (~0ULL)
@@ -1131,4 +1138,19 @@ static inline struct wx *phylink_to_wx(struct phylink_config *config)
return container_of(config, struct wx, phylink_config);
}
+static inline int wx_set_state_reset(struct wx *wx)
+{
+ u8 timeout = 50;
+
+ while (test_and_set_bit(WX_STATE_RESETTING, wx->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+
+ usleep_range(1000, 2000);
+ }
+
+ return 0;
+}
+
#endif /* _WX_TYPE_H_ */
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
index 786a652ae64f..46a5a3e95202 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
@@ -52,7 +52,7 @@ static int ngbe_set_ringparam(struct net_device *netdev,
struct wx *wx = netdev_priv(netdev);
u32 new_rx_count, new_tx_count;
struct wx_ring *temp_ring;
- int i;
+ int i, err = 0;
new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE);
@@ -64,6 +64,10 @@ static int ngbe_set_ringparam(struct net_device *netdev,
new_rx_count == wx->rx_ring_count)
return 0;
+ err = wx_set_state_reset(wx);
+ if (err)
+ return err;
+
if (!netif_running(wx->netdev)) {
for (i = 0; i < wx->num_tx_queues; i++)
wx->tx_ring[i]->count = new_tx_count;
@@ -72,14 +76,16 @@ static int ngbe_set_ringparam(struct net_device *netdev,
wx->tx_ring_count = new_tx_count;
wx->rx_ring_count = new_rx_count;
- return 0;
+ goto clear_reset;
}
/* allocate temporary buffer to store rings in */
i = max_t(int, wx->num_tx_queues, wx->num_rx_queues);
temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL);
- if (!temp_ring)
- return -ENOMEM;
+ if (!temp_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
ngbe_down(wx);
@@ -89,7 +95,9 @@ static int ngbe_set_ringparam(struct net_device *netdev,
wx_configure(wx);
ngbe_up(wx);
- return 0;
+clear_reset:
+ clear_bit(WX_STATE_RESETTING, wx->state);
+ return err;
}
static int ngbe_set_channels(struct net_device *dev,
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index fdd6b4f70b7a..e894e01d030d 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -499,6 +499,7 @@ static const struct net_device_ops ngbe_netdev_ops = {
.ndo_start_xmit = wx_xmit_frame,
.ndo_set_rx_mode = wx_set_rx_mode,
.ndo_set_features = wx_set_features,
+ .ndo_fix_features = wx_fix_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = wx_set_mac,
.ndo_get_stats64 = wx_get_stats64,
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
index db675512ce4d..31fde3fa7c6b 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
@@ -19,7 +19,7 @@ static int txgbe_set_ringparam(struct net_device *netdev,
struct wx *wx = netdev_priv(netdev);
u32 new_rx_count, new_tx_count;
struct wx_ring *temp_ring;
- int i;
+ int i, err = 0;
new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE);
@@ -31,6 +31,10 @@ static int txgbe_set_ringparam(struct net_device *netdev,
new_rx_count == wx->rx_ring_count)
return 0;
+ err = wx_set_state_reset(wx);
+ if (err)
+ return err;
+
if (!netif_running(wx->netdev)) {
for (i = 0; i < wx->num_tx_queues; i++)
wx->tx_ring[i]->count = new_tx_count;
@@ -39,14 +43,16 @@ static int txgbe_set_ringparam(struct net_device *netdev,
wx->tx_ring_count = new_tx_count;
wx->rx_ring_count = new_rx_count;
- return 0;
+ goto clear_reset;
}
/* allocate temporary buffer to store rings in */
i = max_t(int, wx->num_tx_queues, wx->num_rx_queues);
temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL);
- if (!temp_ring)
- return -ENOMEM;
+ if (!temp_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
txgbe_down(wx);
@@ -55,7 +61,9 @@ static int txgbe_set_ringparam(struct net_device *netdev,
txgbe_up(wx);
- return 0;
+clear_reset:
+ clear_bit(WX_STATE_RESETTING, wx->state);
+ return err;
}
static int txgbe_set_channels(struct net_device *dev,
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index bd4624d14ca0..8c7a74981b90 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -269,6 +269,8 @@ static int txgbe_sw_init(struct wx *wx)
wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK;
wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK;
+ wx->do_reset = txgbe_do_reset;
+
return 0;
}
@@ -421,6 +423,34 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
return 0;
}
+static void txgbe_reinit_locked(struct wx *wx)
+{
+ int err = 0;
+
+ netif_trans_update(wx->netdev);
+
+ err = wx_set_state_reset(wx);
+ if (err) {
+ wx_err(wx, "wait device reset timeout\n");
+ return;
+ }
+
+ txgbe_down(wx);
+ txgbe_up(wx);
+
+ clear_bit(WX_STATE_RESETTING, wx->state);
+}
+
+void txgbe_do_reset(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ if (netif_running(netdev))
+ txgbe_reinit_locked(wx);
+ else
+ txgbe_reset(wx);
+}
+
static const struct net_device_ops txgbe_netdev_ops = {
.ndo_open = txgbe_open,
.ndo_stop = txgbe_close,
@@ -428,6 +458,7 @@ static const struct net_device_ops txgbe_netdev_ops = {
.ndo_start_xmit = wx_xmit_frame,
.ndo_set_rx_mode = wx_set_rx_mode,
.ndo_set_features = wx_set_features,
+ .ndo_fix_features = wx_fix_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = wx_set_mac,
.ndo_get_stats64 = wx_get_stats64,
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 5b5d5e4310d1..5f502265f0a6 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -302,7 +302,7 @@ irqreturn_t txgbe_link_irq_handler(int irq, void *data)
status = rd32(wx, TXGBE_CFG_PORT_ST);
up = !!(status & TXGBE_CFG_PORT_ST_LINK_UP);
- phylink_mac_change(wx->phylink, up);
+ phylink_pcs_change(&txgbe->xpcs->pcs, up);
return IRQ_HANDLED;
}
@@ -571,7 +571,7 @@ static int txgbe_clock_register(struct txgbe *txgbe)
char clk_name[32];
struct clk *clk;
- snprintf(clk_name, sizeof(clk_name), "i2c_dw.%d",
+ snprintf(clk_name, sizeof(clk_name), "i2c_designware.%d",
pci_dev_id(pdev));
clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 156250000);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 1b4ff50d5857..f434a7865cb7 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -134,6 +134,7 @@ extern char txgbe_driver_name[];
void txgbe_down(struct wx *wx);
void txgbe_up(struct wx *wx);
int txgbe_setup_tc(struct net_device *dev, u8 tc);
+void txgbe_do_reset(struct net_device *netdev);
#define NODE_PROP(_NAME, _PROP) \
(const struct software_node) { \
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 9df39cf8b097..1072e2210aed 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1443,7 +1443,7 @@ static int temac_probe(struct platform_device *pdev)
}
/* map device registers */
- lp->regs = devm_platform_ioremap_resource_byname(pdev, 0);
+ lp->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lp->regs)) {
dev_err(&pdev->dev, "could not map TEMAC registers\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 807ead678551..fa5500decc96 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -359,6 +359,7 @@
* @app2: MM2S/S2MM User Application Field 2.
* @app3: MM2S/S2MM User Application Field 3.
* @app4: MM2S/S2MM User Application Field 4.
+ * @skb: Pointer to SKB transferred using DMA
*/
struct axidma_bd {
u32 next; /* Physical address of next buffer descriptor */
@@ -399,7 +400,6 @@ struct skbuf_dma_descriptor {
* struct axienet_local - axienet private per device data
* @ndev: Pointer for net_device to which it will be attached.
* @dev: Pointer to device structure
- * @phy_node: Pointer to device node structure
* @phylink: Pointer to phylink instance
* @phylink_config: phylink configuration settings
* @pcs_phy: Reference to PCS/PMA PHY if used
@@ -537,7 +537,7 @@ struct axienet_local {
};
/**
- * struct axiethernet_option - Used to set axi ethernet hardware options
+ * struct axienet_option - Used to set axi ethernet hardware options
* @opt: Option to be set.
* @reg: Register offset to be written for setting the option
* @m_or: Mask to be ORed for setting the option in the register
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index aaf780fd4f5e..c29809cd9201 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1641,7 +1641,7 @@ static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
XAE_TRL_SIZE) > lp->rxmem)
return -EINVAL;
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 2f07fde361aa..9ca2643c921e 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -20,7 +20,14 @@
#define DEFAULT_MDIO_FREQ 2500000 /* 2.5 MHz */
#define DEFAULT_HOST_CLOCK 150000000 /* 150 MHz */
-/* Wait till MDIO interface is ready to accept a new transaction.*/
+/**
+ * axienet_mdio_wait_until_ready - MDIO wait function
+ * @lp: Pointer to axienet local data structure.
+ *
+ * Return : 0 on success, Negative value on errors
+ *
+ * Wait till MDIO interface is ready to accept a new transaction.
+ */
static int axienet_mdio_wait_until_ready(struct axienet_local *lp)
{
u32 val;
@@ -30,14 +37,24 @@ static int axienet_mdio_wait_until_ready(struct axienet_local *lp)
1, 20000);
}
-/* Enable the MDIO MDC. Called prior to a read/write operation */
+/**
+ * axienet_mdio_mdc_enable - MDIO MDC enable function
+ * @lp: Pointer to axienet local data structure.
+ *
+ * Enable the MDIO MDC. Called prior to a read/write operation
+ */
static void axienet_mdio_mdc_enable(struct axienet_local *lp)
{
axienet_iow(lp, XAE_MDIO_MC_OFFSET,
((u32)lp->mii_clk_div | XAE_MDIO_MC_MDIOEN_MASK));
}
-/* Disable the MDIO MDC. Called after a read/write operation*/
+/**
+ * axienet_mdio_mdc_disable - MDIO MDC disable function
+ * @lp: Pointer to axienet local data structure.
+ *
+ * Disable the MDIO MDC. Called after a read/write operation
+ */
static void axienet_mdio_mdc_disable(struct axienet_local *lp)
{
u32 mc_reg;
diff --git a/drivers/net/ethernet/xircom/Kconfig b/drivers/net/ethernet/xircom/Kconfig
index 7497b9bea511..bfbdcf758afb 100644
--- a/drivers/net/ethernet/xircom/Kconfig
+++ b/drivers/net/ethernet/xircom/Kconfig
@@ -19,7 +19,7 @@ if NET_VENDOR_XIRCOM
config PCMCIA_XIRC2PS
tristate "Xircom 16-bit PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
help
Say Y here if you intend to attach a Xircom 16-bit PCMCIA (PC-card)
Ethernet or Fast Ethernet card to your computer.
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index e9bc38fd2025..a31d5d5e6593 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1366,10 +1366,10 @@ do_config(struct net_device *dev, struct ifmap *map)
return -EINVAL;
if (!map->port) {
local->probe_port = 1;
- dev->if_port = 1;
+ WRITE_ONCE(dev->if_port, 1);
} else {
local->probe_port = 0;
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
}
netdev_info(dev, "switching to %s port\n", if_names[dev->if_port]);
do_reset(dev,1); /* not the fine way :-) */
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index e0d26148dfd9..8aff6a73ca0a 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1233,7 +1233,7 @@ static int ixp4xx_eth_change_mtu(struct net_device *dev, int new_mtu)
return ret;
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 1fef8a9b1a0f..0fbbb7286008 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -254,7 +254,7 @@ static const char version[] =
#define DFX_BUS_TC(dev) 0
#endif
-#if defined(CONFIG_EISA) || defined(CONFIG_PCI)
+#ifdef CONFIG_HAS_IOPORT
#define dfx_use_mmio bp->mmio
#else
#define dfx_use_mmio true
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 5fbe33a09bb0..b3ddc9a629d9 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -156,7 +156,6 @@ static void fjes_acpi_remove(struct acpi_device *device)
static struct acpi_driver fjes_acpi_driver = {
.name = DRV_NAME,
.class = DRV_NAME,
- .owner = THIS_MODULE,
.ids = fjes_acpi_ids,
.ops = {
.add = fjes_acpi_add,
@@ -811,7 +810,7 @@ static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
netif_tx_stop_all_queues(netdev);
}
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (running) {
for (epidx = 0; epidx < hw->max_epid; epidx++) {
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 2f6739fe78af..51495cb4b9be 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -225,10 +225,11 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
void *oiph;
if (ip_tunnel_collect_metadata() || gs->collect_md) {
- __be16 flags;
+ IP_TUNNEL_DECLARE_FLAGS(flags) = { };
- flags = TUNNEL_KEY | (gnvh->oam ? TUNNEL_OAM : 0) |
- (gnvh->critical ? TUNNEL_CRIT_OPT : 0);
+ __set_bit(IP_TUNNEL_KEY_BIT, flags);
+ __assign_bit(IP_TUNNEL_OAM_BIT, flags, gnvh->oam);
+ __assign_bit(IP_TUNNEL_CRIT_OPT_BIT, flags, gnvh->critical);
tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags,
vni_to_tunnel_id(gnvh->vni),
@@ -238,9 +239,11 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
goto drop;
}
/* Update tunnel dst according to Geneve options. */
+ ip_tunnel_flags_zero(flags);
+ __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, flags);
ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
gnvh->options, gnvh->opt_len * 4,
- TUNNEL_GENEVE_OPT);
+ flags);
} else {
/* Drop packets w/ critical options,
* since we don't support any...
@@ -745,14 +748,15 @@ static void geneve_build_header(struct genevehdr *geneveh,
{
geneveh->ver = GENEVE_VER;
geneveh->opt_len = info->options_len / 4;
- geneveh->oam = !!(info->key.tun_flags & TUNNEL_OAM);
- geneveh->critical = !!(info->key.tun_flags & TUNNEL_CRIT_OPT);
+ geneveh->oam = test_bit(IP_TUNNEL_OAM_BIT, info->key.tun_flags);
+ geneveh->critical = test_bit(IP_TUNNEL_CRIT_OPT_BIT,
+ info->key.tun_flags);
geneveh->rsvd1 = 0;
tunnel_id_to_vni(info->key.tun_id, geneveh->vni);
geneveh->proto_type = inner_proto;
geneveh->rsvd2 = 0;
- if (info->key.tun_flags & TUNNEL_GENEVE_OPT)
+ if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, info->key.tun_flags))
ip_tunnel_info_opts_get(geneveh->options, info);
}
@@ -761,7 +765,7 @@ static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb,
bool xnet, int ip_hdr_len,
bool inner_proto_inherit)
{
- bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
+ bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
struct genevehdr *gnvh;
__be16 inner_proto;
int min_headroom;
@@ -822,7 +826,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (!pskb_inet_may_pull(skb))
+ if (!skb_vlan_inet_prepare(skb))
return -EINVAL;
if (!gs4)
@@ -878,7 +882,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (geneve->cfg.collect_md) {
ttl = key->ttl;
- df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
+ df = test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags) ?
+ htons(IP_DF) : 0;
} else {
if (geneve->cfg.ttl_inherit)
ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb);
@@ -910,7 +915,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, saddr, info->key.u.ipv4.dst,
tos, ttl, df, sport, geneve->cfg.info.key.tp_dst,
!net_eq(geneve->net, dev_net(geneve->dev)),
- !(info->key.tun_flags & TUNNEL_CSUM));
+ !test_bit(IP_TUNNEL_CSUM_BIT,
+ info->key.tun_flags));
return 0;
}
@@ -929,7 +935,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (!pskb_inet_may_pull(skb))
+ if (!skb_vlan_inet_prepare(skb))
return -EINVAL;
if (!gs6)
@@ -998,7 +1004,8 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev,
&saddr, &key->u.ipv6.dst, prio, ttl,
info->key.label, sport, geneve->cfg.info.key.tp_dst,
- !(info->key.tun_flags & TUNNEL_CSUM));
+ !test_bit(IP_TUNNEL_CSUM_BIT,
+ info->key.tun_flags));
return 0;
}
#endif
@@ -1052,7 +1059,7 @@ static int geneve_change_mtu(struct net_device *dev, int new_mtu)
else if (new_mtu < dev->min_mtu)
new_mtu = dev->min_mtu;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -1297,7 +1304,8 @@ static struct geneve_dev *geneve_find_dev(struct geneve_net *gn,
static bool is_tnl_info_zero(const struct ip_tunnel_info *info)
{
- return !(info->key.tun_id || info->key.tun_flags || info->key.tos ||
+ return !(info->key.tun_id || info->key.tos ||
+ !ip_tunnel_flags_empty(info->key.tun_flags) ||
info->key.ttl || info->key.label || info->key.tp_src ||
memchr_inv(&info->key.u, 0, sizeof(info->key.u)));
}
@@ -1435,7 +1443,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
"Remote IPv6 address cannot be Multicast");
return -EINVAL;
}
- info->key.tun_flags |= TUNNEL_CSUM;
+ __set_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
cfg->use_udp6_rx_checksums = true;
#else
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6],
@@ -1510,7 +1518,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
goto change_notsup;
}
if (nla_get_u8(data[IFLA_GENEVE_UDP_CSUM]))
- info->key.tun_flags |= TUNNEL_CSUM;
+ __set_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
}
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]) {
@@ -1520,7 +1528,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
goto change_notsup;
}
if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]))
- info->key.tun_flags &= ~TUNNEL_CSUM;
+ __clear_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
#else
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX],
"IPv6 support not enabled in the kernel");
@@ -1753,7 +1761,8 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
info->key.u.ipv4.dst))
goto nla_put_failure;
if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM,
- !!(info->key.tun_flags & TUNNEL_CSUM)))
+ test_bit(IP_TUNNEL_CSUM_BIT,
+ info->key.tun_flags)))
goto nla_put_failure;
#if IS_ENABLED(CONFIG_IPV6)
@@ -1762,7 +1771,8 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
&info->key.u.ipv6.dst))
goto nla_put_failure;
if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX,
- !(info->key.tun_flags & TUNNEL_CSUM)))
+ !test_bit(IP_TUNNEL_CSUM_BIT,
+ info->key.tun_flags)))
goto nla_put_failure;
#endif
}
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index ba4704c2c640..427b91aca50d 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -24,6 +24,7 @@
#include <net/net_namespace.h>
#include <net/protocol.h>
#include <net/ip.h>
+#include <net/ipv6.h>
#include <net/udp.h>
#include <net/udp_tunnel.h>
#include <net/icmp.h>
@@ -50,8 +51,14 @@ struct pdp_ctx {
u8 gtp_version;
u16 af;
- struct in_addr ms_addr_ip4;
- struct in_addr peer_addr_ip4;
+ union {
+ struct in_addr addr;
+ struct in6_addr addr6;
+ } ms;
+ union {
+ struct in_addr addr;
+ struct in6_addr addr6;
+ } peer;
struct sock *sk;
struct net_device *dev;
@@ -80,9 +87,15 @@ struct gtp_dev {
};
struct echo_info {
- struct in_addr ms_addr_ip4;
- struct in_addr peer_addr_ip4;
+ u16 af;
u8 gtp_version;
+
+ union {
+ struct in_addr addr;
+ } ms;
+ union {
+ struct in_addr addr;
+ } peer;
};
static unsigned int gtp_net_id __read_mostly;
@@ -121,8 +134,14 @@ static inline u32 ipv4_hashfn(__be32 ip)
return jhash_1word((__force u32)ip, gtp_h_initval);
}
+static u32 ipv6_hashfn(const struct in6_addr *ip6)
+{
+ return jhash_2words((__force u32)ip6->s6_addr32[0],
+ (__force u32)ip6->s6_addr32[1], gtp_h_initval);
+}
+
/* Resolve a PDP context structure based on the 64bit TID. */
-static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid)
+static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid, u16 family)
{
struct hlist_head *head;
struct pdp_ctx *pdp;
@@ -130,7 +149,8 @@ static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid)
head = &gtp->tid_hash[gtp0_hashfn(tid) % gtp->hash_size];
hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
- if (pdp->gtp_version == GTP_V0 &&
+ if (pdp->af == family &&
+ pdp->gtp_version == GTP_V0 &&
pdp->u.v0.tid == tid)
return pdp;
}
@@ -138,7 +158,7 @@ static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid)
}
/* Resolve a PDP context structure based on the 32bit TEI. */
-static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid)
+static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid, u16 family)
{
struct hlist_head *head;
struct pdp_ctx *pdp;
@@ -146,7 +166,8 @@ static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid)
head = &gtp->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size];
hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
- if (pdp->gtp_version == GTP_V1 &&
+ if (pdp->af == family &&
+ pdp->gtp_version == GTP_V1 &&
pdp->u.v1.i_tei == tid)
return pdp;
}
@@ -163,7 +184,42 @@ static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr)
hlist_for_each_entry_rcu(pdp, head, hlist_addr) {
if (pdp->af == AF_INET &&
- pdp->ms_addr_ip4.s_addr == ms_addr)
+ pdp->ms.addr.s_addr == ms_addr)
+ return pdp;
+ }
+
+ return NULL;
+}
+
+/* 3GPP TS 29.060: PDN Connection: the association between a MS represented by
+ * [...] one IPv6 *prefix* and a PDN represented by an APN.
+ *
+ * Then, 3GPP TS 29.061, Section 11.2.1.3 says: The size of the prefix shall be
+ * according to the maximum prefix length for a global IPv6 address as
+ * specified in the IPv6 Addressing Architecture, see RFC 4291.
+ *
+ * Finally, RFC 4291 section 2.5.4 states: All Global Unicast addresses other
+ * than those that start with binary 000 have a 64-bit interface ID field
+ * (i.e., n + m = 64).
+ */
+static bool ipv6_pdp_addr_equal(const struct in6_addr *a,
+ const struct in6_addr *b)
+{
+ return a->s6_addr32[0] == b->s6_addr32[0] &&
+ a->s6_addr32[1] == b->s6_addr32[1];
+}
+
+static struct pdp_ctx *ipv6_pdp_find(struct gtp_dev *gtp,
+ const struct in6_addr *ms_addr)
+{
+ struct hlist_head *head;
+ struct pdp_ctx *pdp;
+
+ head = &gtp->addr_hash[ipv6_hashfn(ms_addr) % gtp->hash_size];
+
+ hlist_for_each_entry_rcu(pdp, head, hlist_addr) {
+ if (pdp->af == AF_INET6 &&
+ ipv6_pdp_addr_equal(&pdp->ms.addr6, ms_addr))
return pdp;
}
@@ -181,34 +237,85 @@ static bool gtp_check_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
iph = (struct iphdr *)(skb->data + hdrlen);
if (role == GTP_ROLE_SGSN)
- return iph->daddr == pctx->ms_addr_ip4.s_addr;
+ return iph->daddr == pctx->ms.addr.s_addr;
else
- return iph->saddr == pctx->ms_addr_ip4.s_addr;
+ return iph->saddr == pctx->ms.addr.s_addr;
+}
+
+static bool gtp_check_ms_ipv6(struct sk_buff *skb, struct pdp_ctx *pctx,
+ unsigned int hdrlen, unsigned int role)
+{
+ struct ipv6hdr *ip6h;
+ int ret;
+
+ if (!pskb_may_pull(skb, hdrlen + sizeof(struct ipv6hdr)))
+ return false;
+
+ ip6h = (struct ipv6hdr *)(skb->data + hdrlen);
+
+ if ((ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL) ||
+ (ipv6_addr_type(&ip6h->daddr) & IPV6_ADDR_LINKLOCAL))
+ return false;
+
+ if (role == GTP_ROLE_SGSN) {
+ ret = ipv6_pdp_addr_equal(&ip6h->daddr, &pctx->ms.addr6);
+ } else {
+ ret = ipv6_pdp_addr_equal(&ip6h->saddr, &pctx->ms.addr6);
+ }
+
+ return ret;
}
/* Check if the inner IP address in this packet is assigned to any
* existing mobile subscriber.
*/
static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
- unsigned int hdrlen, unsigned int role)
+ unsigned int hdrlen, unsigned int role,
+ __u16 inner_proto)
{
- switch (ntohs(skb->protocol)) {
+ switch (inner_proto) {
case ETH_P_IP:
return gtp_check_ms_ipv4(skb, pctx, hdrlen, role);
+ case ETH_P_IPV6:
+ return gtp_check_ms_ipv6(skb, pctx, hdrlen, role);
}
return false;
}
+static int gtp_inner_proto(struct sk_buff *skb, unsigned int hdrlen,
+ __u16 *inner_proto)
+{
+ __u8 *ip_version, _ip_version;
+
+ ip_version = skb_header_pointer(skb, hdrlen, sizeof(*ip_version),
+ &_ip_version);
+ if (!ip_version)
+ return -1;
+
+ switch (*ip_version & 0xf0) {
+ case 0x40:
+ *inner_proto = ETH_P_IP;
+ break;
+ case 0x60:
+ *inner_proto = ETH_P_IPV6;
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
- unsigned int hdrlen, unsigned int role)
+ unsigned int hdrlen, unsigned int role, __u16 inner_proto)
{
- if (!gtp_check_ms(skb, pctx, hdrlen, role)) {
+ if (!gtp_check_ms(skb, pctx, hdrlen, role, inner_proto)) {
netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
return 1;
}
/* Get rid of the GTP + UDP headers. */
- if (iptunnel_pull_header(skb, hdrlen, skb->protocol,
+ if (iptunnel_pull_header(skb, hdrlen, htons(inner_proto),
!net_eq(sock_net(pctx->sk), dev_net(pctx->dev)))) {
pctx->dev->stats.rx_length_errors++;
goto err;
@@ -250,6 +357,27 @@ static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
return ip_route_output_key(sock_net(sk), fl4);
}
+static struct rt6_info *ip6_route_output_gtp(struct net *net,
+ struct flowi6 *fl6,
+ const struct sock *sk,
+ const struct in6_addr *daddr,
+ struct in6_addr *saddr)
+{
+ struct dst_entry *dst;
+
+ memset(fl6, 0, sizeof(*fl6));
+ fl6->flowi6_oif = sk->sk_bound_dev_if;
+ fl6->daddr = *daddr;
+ fl6->saddr = *saddr;
+ fl6->flowi6_proto = sk->sk_protocol;
+
+ dst = ipv6_stub->ipv6_dst_lookup_flow(net, sk, fl6, NULL);
+ if (IS_ERR(dst))
+ return ERR_PTR(-ENETUNREACH);
+
+ return (struct rt6_info *)dst;
+}
+
/* GSM TS 09.60. 7.3
* In all Path Management messages:
* - TID: is not used and shall be set to 0.
@@ -292,13 +420,39 @@ static void gtp0_build_echo_msg(struct gtp0_header *hdr, __u8 msg_type)
hdr->length = 0;
}
+static int gtp0_send_echo_resp_ip(struct gtp_dev *gtp, struct sk_buff *skb)
+{
+ struct iphdr *iph = ip_hdr(skb);
+ struct flowi4 fl4;
+ struct rtable *rt;
+
+ /* find route to the sender,
+ * src address becomes dst address and vice versa.
+ */
+ rt = ip4_route_output_gtp(&fl4, gtp->sk0, iph->saddr, iph->daddr);
+ if (IS_ERR(rt)) {
+ netdev_dbg(gtp->dev, "no route for echo response from %pI4\n",
+ &iph->saddr);
+ return -1;
+ }
+
+ udp_tunnel_xmit_skb(rt, gtp->sk0, skb,
+ fl4.saddr, fl4.daddr,
+ iph->tos,
+ ip4_dst_hoplimit(&rt->dst),
+ 0,
+ htons(GTP0_PORT), htons(GTP0_PORT),
+ !net_eq(sock_net(gtp->sk1u),
+ dev_net(gtp->dev)),
+ false);
+
+ return 0;
+}
+
static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
{
struct gtp0_packet *gtp_pkt;
struct gtp0_header *gtp0;
- struct rtable *rt;
- struct flowi4 fl4;
- struct iphdr *iph;
__be16 seq;
gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
@@ -325,27 +479,15 @@ static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
gtp_pkt->ie.tag = GTPIE_RECOVERY;
gtp_pkt->ie.val = gtp->restart_count;
- iph = ip_hdr(skb);
-
- /* find route to the sender,
- * src address becomes dst address and vice versa.
- */
- rt = ip4_route_output_gtp(&fl4, gtp->sk0, iph->saddr, iph->daddr);
- if (IS_ERR(rt)) {
- netdev_dbg(gtp->dev, "no route for echo response from %pI4\n",
- &iph->saddr);
+ switch (gtp->sk0->sk_family) {
+ case AF_INET:
+ if (gtp0_send_echo_resp_ip(gtp, skb) < 0)
+ return -1;
+ break;
+ case AF_INET6:
return -1;
}
- udp_tunnel_xmit_skb(rt, gtp->sk0, skb,
- fl4.saddr, fl4.daddr,
- iph->tos,
- ip4_dst_hoplimit(&rt->dst),
- 0,
- htons(GTP0_PORT), htons(GTP0_PORT),
- !net_eq(sock_net(gtp->sk1u),
- dev_net(gtp->dev)),
- false);
return 0;
}
@@ -360,8 +502,8 @@ static int gtp_genl_fill_echo(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
goto failure;
if (nla_put_u32(skb, GTPA_VERSION, echo.gtp_version) ||
- nla_put_be32(skb, GTPA_PEER_ADDRESS, echo.peer_addr_ip4.s_addr) ||
- nla_put_be32(skb, GTPA_MS_ADDRESS, echo.ms_addr_ip4.s_addr))
+ nla_put_be32(skb, GTPA_PEER_ADDRESS, echo.peer.addr.s_addr) ||
+ nla_put_be32(skb, GTPA_MS_ADDRESS, echo.ms.addr.s_addr))
goto failure;
genlmsg_end(skb, genlh);
@@ -372,12 +514,20 @@ failure:
return -EMSGSIZE;
}
+static void gtp0_handle_echo_resp_ip(struct sk_buff *skb, struct echo_info *echo)
+{
+ struct iphdr *iph = ip_hdr(skb);
+
+ echo->ms.addr.s_addr = iph->daddr;
+ echo->peer.addr.s_addr = iph->saddr;
+ echo->gtp_version = GTP_V0;
+}
+
static int gtp0_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
{
struct gtp0_header *gtp0;
struct echo_info echo;
struct sk_buff *msg;
- struct iphdr *iph;
int ret;
gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
@@ -385,10 +535,13 @@ static int gtp0_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
if (!gtp0_validate_echo_hdr(gtp0))
return -1;
- iph = ip_hdr(skb);
- echo.ms_addr_ip4.s_addr = iph->daddr;
- echo.peer_addr_ip4.s_addr = iph->saddr;
- echo.gtp_version = GTP_V0;
+ switch (gtp->sk0->sk_family) {
+ case AF_INET:
+ gtp0_handle_echo_resp_ip(skb, &echo);
+ break;
+ case AF_INET6:
+ return -1;
+ }
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!msg)
@@ -404,6 +557,21 @@ static int gtp0_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC);
}
+static int gtp_proto_to_family(__u16 proto)
+{
+ switch (proto) {
+ case ETH_P_IP:
+ return AF_INET;
+ case ETH_P_IPV6:
+ return AF_INET6;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ return AF_UNSPEC;
+}
+
/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
{
@@ -411,6 +579,7 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
sizeof(struct gtp0_header);
struct gtp0_header *gtp0;
struct pdp_ctx *pctx;
+ __u16 inner_proto;
if (!pskb_may_pull(skb, hdrlen))
return -1;
@@ -433,13 +602,19 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
if (gtp0->type != GTP_TPDU)
return 1;
- pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid));
+ if (gtp_inner_proto(skb, hdrlen, &inner_proto) < 0) {
+ netdev_dbg(gtp->dev, "GTP packet does not encapsulate an IP packet\n");
+ return -1;
+ }
+
+ pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid),
+ gtp_proto_to_family(inner_proto));
if (!pctx) {
netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
return 1;
}
- return gtp_rx(pctx, skb, hdrlen, gtp->role);
+ return gtp_rx(pctx, skb, hdrlen, gtp->role, inner_proto);
}
/* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
@@ -549,8 +724,8 @@ static int gtp1u_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
return -1;
iph = ip_hdr(skb);
- echo.ms_addr_ip4.s_addr = iph->daddr;
- echo.peer_addr_ip4.s_addr = iph->saddr;
+ echo.ms.addr.s_addr = iph->daddr;
+ echo.peer.addr.s_addr = iph->saddr;
echo.gtp_version = GTP_V1;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
@@ -567,12 +742,50 @@ static int gtp1u_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC);
}
+static int gtp_parse_exthdrs(struct sk_buff *skb, unsigned int *hdrlen)
+{
+ struct gtp_ext_hdr *gtp_exthdr, _gtp_exthdr;
+ unsigned int offset = *hdrlen;
+ __u8 *next_type, _next_type;
+
+ /* From 29.060: "The Extension Header Length field specifies the length
+ * of the particular Extension header in 4 octets units."
+ *
+ * This length field includes length field size itself (1 byte),
+ * payload (variable length) and next type (1 byte). The extension
+ * header is aligned to to 4 bytes.
+ */
+
+ do {
+ gtp_exthdr = skb_header_pointer(skb, offset, sizeof(*gtp_exthdr),
+ &_gtp_exthdr);
+ if (!gtp_exthdr || !gtp_exthdr->len)
+ return -1;
+
+ offset += gtp_exthdr->len * 4;
+
+ /* From 29.060: "If no such Header follows, then the value of
+ * the Next Extension Header Type shall be 0."
+ */
+ next_type = skb_header_pointer(skb, offset - 1,
+ sizeof(_next_type), &_next_type);
+ if (!next_type)
+ return -1;
+
+ } while (*next_type != 0);
+
+ *hdrlen = offset;
+
+ return 0;
+}
+
static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
{
unsigned int hdrlen = sizeof(struct udphdr) +
sizeof(struct gtp1_header);
struct gtp1_header *gtp1;
struct pdp_ctx *pctx;
+ __u16 inner_proto;
if (!pskb_may_pull(skb, hdrlen))
return -1;
@@ -608,15 +821,25 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
if (!pskb_may_pull(skb, hdrlen))
return -1;
+ if (gtp_inner_proto(skb, hdrlen, &inner_proto) < 0) {
+ netdev_dbg(gtp->dev, "GTP packet does not encapsulate an IP packet\n");
+ return -1;
+ }
+
gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
- pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid));
+ pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid),
+ gtp_proto_to_family(inner_proto));
if (!pctx) {
netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
return 1;
}
- return gtp_rx(pctx, skb, hdrlen, gtp->role);
+ if (gtp1->flags & GTP1_F_EXTHDR &&
+ gtp_parse_exthdrs(skb, &hdrlen) < 0)
+ return -1;
+
+ return gtp_rx(pctx, skb, hdrlen, gtp->role, inner_proto);
}
static void __gtp_encap_destroy(struct sock *sk)
@@ -760,11 +983,17 @@ static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
struct gtp_pktinfo {
struct sock *sk;
- struct iphdr *iph;
- struct flowi4 fl4;
- struct rtable *rt;
+ union {
+ struct flowi4 fl4;
+ struct flowi6 fl6;
+ };
+ union {
+ struct rtable *rt;
+ struct rt6_info *rt6;
+ };
struct pdp_ctx *pctx;
struct net_device *dev;
+ __u8 tos;
__be16 gtph_port;
};
@@ -783,64 +1012,61 @@ static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo)
}
static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo,
- struct sock *sk, struct iphdr *iph,
+ struct sock *sk, __u8 tos,
struct pdp_ctx *pctx, struct rtable *rt,
struct flowi4 *fl4,
struct net_device *dev)
{
pktinfo->sk = sk;
- pktinfo->iph = iph;
+ pktinfo->tos = tos;
pktinfo->pctx = pctx;
pktinfo->rt = rt;
pktinfo->fl4 = *fl4;
pktinfo->dev = dev;
}
-static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
- struct gtp_pktinfo *pktinfo)
+static void gtp_set_pktinfo_ipv6(struct gtp_pktinfo *pktinfo,
+ struct sock *sk, __u8 tos,
+ struct pdp_ctx *pctx, struct rt6_info *rt6,
+ struct flowi6 *fl6,
+ struct net_device *dev)
+{
+ pktinfo->sk = sk;
+ pktinfo->tos = tos;
+ pktinfo->pctx = pctx;
+ pktinfo->rt6 = rt6;
+ pktinfo->fl6 = *fl6;
+ pktinfo->dev = dev;
+}
+
+static int gtp_build_skb_outer_ip4(struct sk_buff *skb, struct net_device *dev,
+ struct gtp_pktinfo *pktinfo,
+ struct pdp_ctx *pctx, __u8 tos,
+ __be16 frag_off)
{
- struct gtp_dev *gtp = netdev_priv(dev);
- struct pdp_ctx *pctx;
struct rtable *rt;
struct flowi4 fl4;
- struct iphdr *iph;
__be16 df;
int mtu;
- /* Read the IP destination address and resolve the PDP context.
- * Prepend PDP header with TEI/TID from PDP ctx.
- */
- iph = ip_hdr(skb);
- if (gtp->role == GTP_ROLE_SGSN)
- pctx = ipv4_pdp_find(gtp, iph->saddr);
- else
- pctx = ipv4_pdp_find(gtp, iph->daddr);
-
- if (!pctx) {
- netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
- &iph->daddr);
- return -ENOENT;
- }
- netdev_dbg(dev, "found PDP context %p\n", pctx);
-
- rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr,
+ rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer.addr.s_addr,
inet_sk(pctx->sk)->inet_saddr);
if (IS_ERR(rt)) {
netdev_dbg(dev, "no route to SSGN %pI4\n",
- &pctx->peer_addr_ip4.s_addr);
+ &pctx->peer.addr.s_addr);
dev->stats.tx_carrier_errors++;
goto err;
}
if (rt->dst.dev == dev) {
netdev_dbg(dev, "circular route to SSGN %pI4\n",
- &pctx->peer_addr_ip4.s_addr);
+ &pctx->peer.addr.s_addr);
dev->stats.collisions++;
goto err_rt;
}
/* This is similar to tnl_update_pmtu(). */
- df = iph->frag_off;
+ df = frag_off;
if (df) {
mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
sizeof(struct iphdr) - sizeof(struct udphdr);
@@ -858,7 +1084,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
skb_dst_update_pmtu_no_confirm(skb, mtu);
- if (iph->frag_off & htons(IP_DF) &&
+ if (frag_off & htons(IP_DF) &&
((!skb_is_gso(skb) && skb->len > mtu) ||
(skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu)))) {
netdev_dbg(dev, "packet too big, fragmentation needed\n");
@@ -867,7 +1093,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
goto err_rt;
}
- gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev);
+ gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, tos, pctx, rt, &fl4, dev);
gtp_push_header(skb, pktinfo);
return 0;
@@ -877,6 +1103,162 @@ err:
return -EBADMSG;
}
+static int gtp_build_skb_outer_ip6(struct net *net, struct sk_buff *skb,
+ struct net_device *dev,
+ struct gtp_pktinfo *pktinfo,
+ struct pdp_ctx *pctx, __u8 tos)
+{
+ struct dst_entry *dst;
+ struct rt6_info *rt;
+ struct flowi6 fl6;
+ int mtu;
+
+ rt = ip6_route_output_gtp(net, &fl6, pctx->sk, &pctx->peer.addr6,
+ &inet6_sk(pctx->sk)->saddr);
+ if (IS_ERR(rt)) {
+ netdev_dbg(dev, "no route to SSGN %pI6\n",
+ &pctx->peer.addr6);
+ dev->stats.tx_carrier_errors++;
+ goto err;
+ }
+ dst = &rt->dst;
+
+ if (rt->dst.dev == dev) {
+ netdev_dbg(dev, "circular route to SSGN %pI6\n",
+ &pctx->peer.addr6);
+ dev->stats.collisions++;
+ goto err_rt;
+ }
+
+ mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
+ sizeof(struct ipv6hdr) - sizeof(struct udphdr);
+ switch (pctx->gtp_version) {
+ case GTP_V0:
+ mtu -= sizeof(struct gtp0_header);
+ break;
+ case GTP_V1:
+ mtu -= sizeof(struct gtp1_header);
+ break;
+ }
+
+ skb_dst_update_pmtu_no_confirm(skb, mtu);
+
+ if ((!skb_is_gso(skb) && skb->len > mtu) ||
+ (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu))) {
+ netdev_dbg(dev, "packet too big, fragmentation needed\n");
+ icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+ goto err_rt;
+ }
+
+ gtp_set_pktinfo_ipv6(pktinfo, pctx->sk, tos, pctx, rt, &fl6, dev);
+ gtp_push_header(skb, pktinfo);
+
+ return 0;
+err_rt:
+ dst_release(dst);
+err:
+ return -EBADMSG;
+}
+
+static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
+ struct gtp_pktinfo *pktinfo)
+{
+ struct gtp_dev *gtp = netdev_priv(dev);
+ struct net *net = gtp->net;
+ struct pdp_ctx *pctx;
+ struct iphdr *iph;
+ int ret;
+
+ /* Read the IP destination address and resolve the PDP context.
+ * Prepend PDP header with TEI/TID from PDP ctx.
+ */
+ iph = ip_hdr(skb);
+ if (gtp->role == GTP_ROLE_SGSN)
+ pctx = ipv4_pdp_find(gtp, iph->saddr);
+ else
+ pctx = ipv4_pdp_find(gtp, iph->daddr);
+
+ if (!pctx) {
+ netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
+ &iph->daddr);
+ return -ENOENT;
+ }
+ netdev_dbg(dev, "found PDP context %p\n", pctx);
+
+ switch (pctx->sk->sk_family) {
+ case AF_INET:
+ ret = gtp_build_skb_outer_ip4(skb, dev, pktinfo, pctx,
+ iph->tos, iph->frag_off);
+ break;
+ case AF_INET6:
+ ret = gtp_build_skb_outer_ip6(net, skb, dev, pktinfo, pctx,
+ iph->tos);
+ break;
+ default:
+ ret = -1;
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ netdev_dbg(dev, "gtp -> IP src: %pI4 dst: %pI4\n",
+ &iph->saddr, &iph->daddr);
+
+ return 0;
+}
+
+static int gtp_build_skb_ip6(struct sk_buff *skb, struct net_device *dev,
+ struct gtp_pktinfo *pktinfo)
+{
+ struct gtp_dev *gtp = netdev_priv(dev);
+ struct net *net = gtp->net;
+ struct pdp_ctx *pctx;
+ struct ipv6hdr *ip6h;
+ __u8 tos;
+ int ret;
+
+ /* Read the IP destination address and resolve the PDP context.
+ * Prepend PDP header with TEI/TID from PDP ctx.
+ */
+ ip6h = ipv6_hdr(skb);
+ if (gtp->role == GTP_ROLE_SGSN)
+ pctx = ipv6_pdp_find(gtp, &ip6h->saddr);
+ else
+ pctx = ipv6_pdp_find(gtp, &ip6h->daddr);
+
+ if (!pctx) {
+ netdev_dbg(dev, "no PDP ctx found for %pI6, skip\n",
+ &ip6h->daddr);
+ return -ENOENT;
+ }
+ netdev_dbg(dev, "found PDP context %p\n", pctx);
+
+ tos = ipv6_get_dsfield(ip6h);
+
+ switch (pctx->sk->sk_family) {
+ case AF_INET:
+ ret = gtp_build_skb_outer_ip4(skb, dev, pktinfo, pctx, tos, 0);
+ break;
+ case AF_INET6:
+ ret = gtp_build_skb_outer_ip6(net, skb, dev, pktinfo, pctx, tos);
+ break;
+ default:
+ ret = -1;
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ netdev_dbg(dev, "gtp -> IP src: %pI6 dst: %pI6\n",
+ &ip6h->saddr, &ip6h->daddr);
+
+ return 0;
+}
+
static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned int proto = ntohs(skb->protocol);
@@ -895,6 +1277,9 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
case ETH_P_IP:
err = gtp_build_skb_ip4(skb, dev, &pktinfo);
break;
+ case ETH_P_IPV6:
+ err = gtp_build_skb_ip6(skb, dev, &pktinfo);
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -904,13 +1289,11 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
if (err < 0)
goto tx_err;
- switch (proto) {
- case ETH_P_IP:
- netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n",
- &pktinfo.iph->saddr, &pktinfo.iph->daddr);
+ switch (pktinfo.pctx->sk->sk_family) {
+ case AF_INET:
udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
pktinfo.fl4.saddr, pktinfo.fl4.daddr,
- pktinfo.iph->tos,
+ pktinfo.tos,
ip4_dst_hoplimit(&pktinfo.rt->dst),
0,
pktinfo.gtph_port, pktinfo.gtph_port,
@@ -918,6 +1301,19 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
dev_net(dev)),
false);
break;
+ case AF_INET6:
+#if IS_ENABLED(CONFIG_IPV6)
+ udp_tunnel6_xmit_skb(&pktinfo.rt6->dst, pktinfo.sk, skb, dev,
+ &pktinfo.fl6.saddr, &pktinfo.fl6.daddr,
+ pktinfo.tos,
+ ip6_dst_hoplimit(&pktinfo.rt->dst),
+ 0,
+ pktinfo.gtph_port, pktinfo.gtph_port,
+ false);
+#else
+ goto tx_err;
+#endif
+ break;
}
return NETDEV_TX_OK;
@@ -936,11 +1332,11 @@ static const struct device_type gtp_type = {
.name = "gtp",
};
+#define GTP_TH_MAXLEN (sizeof(struct udphdr) + sizeof(struct gtp0_header))
+#define GTP_IPV4_MAXLEN (sizeof(struct iphdr) + GTP_TH_MAXLEN)
+
static void gtp_link_setup(struct net_device *dev)
{
- unsigned int max_gtp_header_len = sizeof(struct iphdr) +
- sizeof(struct udphdr) +
- sizeof(struct gtp0_header);
struct gtp_dev *gtp = netdev_priv(dev);
dev->netdev_ops = &gtp_netdev_ops;
@@ -949,7 +1345,7 @@ static void gtp_link_setup(struct net_device *dev)
dev->hard_header_len = 0;
dev->addr_len = 0;
- dev->mtu = ETH_DATA_LEN - max_gtp_header_len;
+ dev->mtu = ETH_DATA_LEN - GTP_IPV4_MAXLEN;
/* Zero header length. */
dev->type = ARPHRD_NONE;
@@ -960,7 +1356,7 @@ static void gtp_link_setup(struct net_device *dev)
dev->features |= NETIF_F_LLTX;
netif_keep_dst(dev);
- dev->needed_headroom = LL_MAX_HEADER + max_gtp_header_len;
+ dev->needed_headroom = LL_MAX_HEADER + GTP_IPV4_MAXLEN;
gtp->dev = dev;
}
@@ -975,17 +1371,45 @@ static void gtp_destructor(struct net_device *dev)
kfree(gtp->tid_hash);
}
-static struct sock *gtp_create_sock(int type, struct gtp_dev *gtp)
+static int gtp_sock_udp_config(struct udp_port_cfg *udp_conf,
+ const struct nlattr *nla, int family)
+{
+ udp_conf->family = family;
+
+ switch (udp_conf->family) {
+ case AF_INET:
+ udp_conf->local_ip.s_addr = nla_get_be32(nla);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ udp_conf->local_ip6 = nla_get_in6_addr(nla);
+ break;
+#endif
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static struct sock *gtp_create_sock(int type, struct gtp_dev *gtp,
+ const struct nlattr *nla, int family)
{
struct udp_tunnel_sock_cfg tuncfg = {};
- struct udp_port_cfg udp_conf = {
- .local_ip.s_addr = htonl(INADDR_ANY),
- .family = AF_INET,
- };
+ struct udp_port_cfg udp_conf = {};
struct net *net = gtp->net;
struct socket *sock;
int err;
+ if (nla) {
+ err = gtp_sock_udp_config(&udp_conf, nla, family);
+ if (err < 0)
+ return ERR_PTR(err);
+ } else {
+ udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+ udp_conf.family = AF_INET;
+ }
+
if (type == UDP_ENCAP_GTP0)
udp_conf.local_udp_port = htons(GTP0_PORT);
else if (type == UDP_ENCAP_GTP1U)
@@ -1007,16 +1431,17 @@ static struct sock *gtp_create_sock(int type, struct gtp_dev *gtp)
return sock->sk;
}
-static int gtp_create_sockets(struct gtp_dev *gtp, struct nlattr *data[])
+static int gtp_create_sockets(struct gtp_dev *gtp, const struct nlattr *nla,
+ int family)
{
- struct sock *sk1u = NULL;
- struct sock *sk0 = NULL;
+ struct sock *sk1u;
+ struct sock *sk0;
- sk0 = gtp_create_sock(UDP_ENCAP_GTP0, gtp);
+ sk0 = gtp_create_sock(UDP_ENCAP_GTP0, gtp, nla, family);
if (IS_ERR(sk0))
return PTR_ERR(sk0);
- sk1u = gtp_create_sock(UDP_ENCAP_GTP1U, gtp);
+ sk1u = gtp_create_sock(UDP_ENCAP_GTP1U, gtp, nla, family);
if (IS_ERR(sk1u)) {
udp_tunnel_sock_release(sk0->sk_socket);
return PTR_ERR(sk1u);
@@ -1029,6 +1454,9 @@ static int gtp_create_sockets(struct gtp_dev *gtp, struct nlattr *data[])
return 0;
}
+#define GTP_TH_MAXLEN (sizeof(struct udphdr) + sizeof(struct gtp0_header))
+#define GTP_IPV6_MAXLEN (sizeof(struct ipv6hdr) + GTP_TH_MAXLEN)
+
static int gtp_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
@@ -1038,6 +1466,11 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
struct gtp_net *gn;
int hashsize, err;
+#if !IS_ENABLED(CONFIG_IPV6)
+ if (data[IFLA_GTP_LOCAL6])
+ return -EAFNOSUPPORT;
+#endif
+
gtp = netdev_priv(dev);
if (!data[IFLA_GTP_PDP_HASHSIZE]) {
@@ -1066,13 +1499,24 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
if (err < 0)
return err;
- if (data[IFLA_GTP_CREATE_SOCKETS])
- err = gtp_create_sockets(gtp, data);
- else
+ if (data[IFLA_GTP_CREATE_SOCKETS]) {
+ if (data[IFLA_GTP_LOCAL6])
+ err = gtp_create_sockets(gtp, data[IFLA_GTP_LOCAL6], AF_INET6);
+ else
+ err = gtp_create_sockets(gtp, data[IFLA_GTP_LOCAL], AF_INET);
+ } else {
err = gtp_encap_enable(gtp, data);
+ }
+
if (err < 0)
goto out_hashtable;
+ if ((gtp->sk0 && gtp->sk0->sk_family == AF_INET6) ||
+ (gtp->sk1u && gtp->sk1u->sk_family == AF_INET6)) {
+ dev->mtu = ETH_DATA_LEN - GTP_IPV6_MAXLEN;
+ dev->needed_headroom = LL_MAX_HEADER + GTP_IPV6_MAXLEN;
+ }
+
err = register_netdevice(dev);
if (err < 0) {
netdev_dbg(dev, "failed to register new netdev %d\n", err);
@@ -1098,11 +1542,12 @@ out_hashtable:
static void gtp_dellink(struct net_device *dev, struct list_head *head)
{
struct gtp_dev *gtp = netdev_priv(dev);
+ struct hlist_node *next;
struct pdp_ctx *pctx;
int i;
for (i = 0; i < gtp->hash_size; i++)
- hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
+ hlist_for_each_entry_safe(pctx, next, &gtp->tid_hash[i], hlist_tid)
pdp_context_delete(pctx);
list_del_rcu(&gtp->list);
@@ -1116,6 +1561,8 @@ static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
[IFLA_GTP_ROLE] = { .type = NLA_U32 },
[IFLA_GTP_CREATE_SOCKETS] = { .type = NLA_U8 },
[IFLA_GTP_RESTART_COUNT] = { .type = NLA_U8 },
+ [IFLA_GTP_LOCAL] = { .type = NLA_U32 },
+ [IFLA_GTP_LOCAL6] = { .len = sizeof(struct in6_addr) },
};
static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -1215,6 +1662,12 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
goto out_sock;
}
+ if (sk->sk_family == AF_INET6 &&
+ !sk->sk_ipv6only) {
+ sk = ERR_PTR(-EADDRNOTAVAIL);
+ goto out_sock;
+ }
+
lock_sock(sk);
if (sk->sk_user_data) {
sk = ERR_PTR(-EBUSY);
@@ -1266,6 +1719,13 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
gtp->sk0 = sk0;
gtp->sk1u = sk1u;
+ if (sk0 && sk1u &&
+ sk0->sk_family != sk1u->sk_family) {
+ gtp_encap_disable_sock(sk0);
+ gtp_encap_disable_sock(sk1u);
+ return -EINVAL;
+ }
+
return 0;
}
@@ -1295,14 +1755,9 @@ static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[])
return gtp;
}
-static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
+static void gtp_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
{
pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
- pctx->af = AF_INET;
- pctx->peer_addr_ip4.s_addr =
- nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]);
- pctx->ms_addr_ip4.s_addr =
- nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
switch (pctx->gtp_version) {
case GTP_V0:
@@ -1322,29 +1777,102 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
}
}
+static void ip_pdp_peer_fill(struct pdp_ctx *pctx, struct genl_info *info)
+{
+ if (info->attrs[GTPA_PEER_ADDRESS]) {
+ pctx->peer.addr.s_addr =
+ nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]);
+ } else if (info->attrs[GTPA_PEER_ADDR6]) {
+ pctx->peer.addr6 = nla_get_in6_addr(info->attrs[GTPA_PEER_ADDR6]);
+ }
+}
+
+static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
+{
+ ip_pdp_peer_fill(pctx, info);
+ pctx->ms.addr.s_addr =
+ nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
+ gtp_pdp_fill(pctx, info);
+}
+
+static bool ipv6_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
+{
+ ip_pdp_peer_fill(pctx, info);
+ pctx->ms.addr6 = nla_get_in6_addr(info->attrs[GTPA_MS_ADDR6]);
+ if (pctx->ms.addr6.s6_addr32[2] ||
+ pctx->ms.addr6.s6_addr32[3])
+ return false;
+
+ gtp_pdp_fill(pctx, info);
+
+ return true;
+}
+
static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
struct genl_info *info)
{
struct pdp_ctx *pctx, *pctx_tid = NULL;
struct net_device *dev = gtp->dev;
u32 hash_ms, hash_tid = 0;
+ struct in6_addr ms_addr6;
unsigned int version;
bool found = false;
__be32 ms_addr;
+ int family;
- ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
- hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
version = nla_get_u32(info->attrs[GTPA_VERSION]);
- pctx = ipv4_pdp_find(gtp, ms_addr);
+ if (info->attrs[GTPA_FAMILY])
+ family = nla_get_u8(info->attrs[GTPA_FAMILY]);
+ else
+ family = AF_INET;
+
+#if !IS_ENABLED(CONFIG_IPV6)
+ if (family == AF_INET6)
+ return ERR_PTR(-EAFNOSUPPORT);
+#endif
+ if (!info->attrs[GTPA_PEER_ADDRESS] &&
+ !info->attrs[GTPA_PEER_ADDR6])
+ return ERR_PTR(-EINVAL);
+
+ if ((info->attrs[GTPA_PEER_ADDRESS] &&
+ sk->sk_family == AF_INET6) ||
+ (info->attrs[GTPA_PEER_ADDR6] &&
+ sk->sk_family == AF_INET))
+ return ERR_PTR(-EAFNOSUPPORT);
+
+ switch (family) {
+ case AF_INET:
+ if (!info->attrs[GTPA_MS_ADDRESS] ||
+ info->attrs[GTPA_MS_ADDR6])
+ return ERR_PTR(-EINVAL);
+
+ ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
+ hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
+ pctx = ipv4_pdp_find(gtp, ms_addr);
+ break;
+ case AF_INET6:
+ if (!info->attrs[GTPA_MS_ADDR6] ||
+ info->attrs[GTPA_MS_ADDRESS])
+ return ERR_PTR(-EINVAL);
+
+ ms_addr6 = nla_get_in6_addr(info->attrs[GTPA_MS_ADDR6]);
+ hash_ms = ipv6_hashfn(&ms_addr6) % gtp->hash_size;
+ pctx = ipv6_pdp_find(gtp, &ms_addr6);
+ break;
+ default:
+ return ERR_PTR(-EAFNOSUPPORT);
+ }
if (pctx)
found = true;
if (version == GTP_V0)
pctx_tid = gtp0_pdp_find(gtp,
- nla_get_u64(info->attrs[GTPA_TID]));
+ nla_get_u64(info->attrs[GTPA_TID]),
+ family);
else if (version == GTP_V1)
pctx_tid = gtp1_pdp_find(gtp,
- nla_get_u32(info->attrs[GTPA_I_TEI]));
+ nla_get_u32(info->attrs[GTPA_I_TEI]),
+ family);
if (pctx_tid)
found = true;
@@ -1359,7 +1887,15 @@ static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
if (!pctx)
pctx = pctx_tid;
- ipv4_pdp_fill(pctx, info);
+ switch (pctx->af) {
+ case AF_INET:
+ ipv4_pdp_fill(pctx, info);
+ break;
+ case AF_INET6:
+ if (!ipv6_pdp_fill(pctx, info))
+ return ERR_PTR(-EADDRNOTAVAIL);
+ break;
+ }
if (pctx->gtp_version == GTP_V0)
netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n",
@@ -1379,7 +1915,32 @@ static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
sock_hold(sk);
pctx->sk = sk;
pctx->dev = gtp->dev;
- ipv4_pdp_fill(pctx, info);
+ pctx->af = family;
+
+ switch (pctx->af) {
+ case AF_INET:
+ if (!info->attrs[GTPA_MS_ADDRESS]) {
+ sock_put(sk);
+ kfree(pctx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ipv4_pdp_fill(pctx, info);
+ break;
+ case AF_INET6:
+ if (!info->attrs[GTPA_MS_ADDR6]) {
+ sock_put(sk);
+ kfree(pctx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!ipv6_pdp_fill(pctx, info)) {
+ sock_put(sk);
+ kfree(pctx);
+ return ERR_PTR(-EADDRNOTAVAIL);
+ }
+ break;
+ }
atomic_set(&pctx->tx_seq, 0);
switch (pctx->gtp_version) {
@@ -1402,13 +1963,13 @@ static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
switch (pctx->gtp_version) {
case GTP_V0:
netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
- pctx->u.v0.tid, &pctx->peer_addr_ip4,
- &pctx->ms_addr_ip4, pctx);
+ pctx->u.v0.tid, &pctx->peer.addr,
+ &pctx->ms.addr, pctx);
break;
case GTP_V1:
netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
pctx->u.v1.i_tei, pctx->u.v1.o_tei,
- &pctx->peer_addr_ip4, &pctx->ms_addr_ip4, pctx);
+ &pctx->peer.addr, &pctx->ms.addr, pctx);
break;
}
@@ -1441,9 +2002,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
int err;
if (!info->attrs[GTPA_VERSION] ||
- !info->attrs[GTPA_LINK] ||
- !info->attrs[GTPA_PEER_ADDRESS] ||
- !info->attrs[GTPA_MS_ADDRESS])
+ !info->attrs[GTPA_LINK])
return -EINVAL;
version = nla_get_u32(info->attrs[GTPA_VERSION]);
@@ -1501,6 +2060,12 @@ static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net,
struct nlattr *nla[])
{
struct gtp_dev *gtp;
+ int family;
+
+ if (nla[GTPA_FAMILY])
+ family = nla_get_u8(nla[GTPA_FAMILY]);
+ else
+ family = AF_INET;
gtp = gtp_find_dev(net, nla);
if (!gtp)
@@ -1509,14 +2074,31 @@ static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net,
if (nla[GTPA_MS_ADDRESS]) {
__be32 ip = nla_get_be32(nla[GTPA_MS_ADDRESS]);
+ if (family != AF_INET)
+ return ERR_PTR(-EINVAL);
+
return ipv4_pdp_find(gtp, ip);
+ } else if (nla[GTPA_MS_ADDR6]) {
+ struct in6_addr addr = nla_get_in6_addr(nla[GTPA_MS_ADDR6]);
+
+ if (family != AF_INET6)
+ return ERR_PTR(-EINVAL);
+
+ if (addr.s6_addr32[2] ||
+ addr.s6_addr32[3])
+ return ERR_PTR(-EADDRNOTAVAIL);
+
+ return ipv6_pdp_find(gtp, &addr);
} else if (nla[GTPA_VERSION]) {
u32 gtp_version = nla_get_u32(nla[GTPA_VERSION]);
- if (gtp_version == GTP_V0 && nla[GTPA_TID])
- return gtp0_pdp_find(gtp, nla_get_u64(nla[GTPA_TID]));
- else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI])
- return gtp1_pdp_find(gtp, nla_get_u32(nla[GTPA_I_TEI]));
+ if (gtp_version == GTP_V0 && nla[GTPA_TID]) {
+ return gtp0_pdp_find(gtp, nla_get_u64(nla[GTPA_TID]),
+ family);
+ } else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI]) {
+ return gtp1_pdp_find(gtp, nla_get_u32(nla[GTPA_I_TEI]),
+ family);
+ }
}
return ERR_PTR(-EINVAL);
@@ -1580,10 +2162,31 @@ static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) ||
- nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) ||
- nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr))
+ nla_put_u8(skb, GTPA_FAMILY, pctx->af))
goto nla_put_failure;
+ switch (pctx->af) {
+ case AF_INET:
+ if (nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms.addr.s_addr))
+ goto nla_put_failure;
+ break;
+ case AF_INET6:
+ if (nla_put_in6_addr(skb, GTPA_MS_ADDR6, &pctx->ms.addr6))
+ goto nla_put_failure;
+ break;
+ }
+
+ switch (pctx->sk->sk_family) {
+ case AF_INET:
+ if (nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer.addr.s_addr))
+ goto nla_put_failure;
+ break;
+ case AF_INET6:
+ if (nla_put_in6_addr(skb, GTPA_PEER_ADDR6, &pctx->peer.addr6))
+ goto nla_put_failure;
+ break;
+ }
+
switch (pctx->gtp_version) {
case GTP_V0:
if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) ||
@@ -1810,6 +2413,9 @@ static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
[GTPA_NET_NS_FD] = { .type = NLA_U32, },
[GTPA_I_TEI] = { .type = NLA_U32, },
[GTPA_O_TEI] = { .type = NLA_U32, },
+ [GTPA_PEER_ADDR6] = { .len = sizeof(struct in6_addr), },
+ [GTPA_MS_ADDR6] = { .len = sizeof(struct in6_addr), },
+ [GTPA_FAMILY] = { .type = NLA_U8, },
};
static const struct genl_small_ops gtp_genl_ops[] = {
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index 25b1f929c422..36a9aade9f33 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -83,7 +83,7 @@ config SCC_TRXECHO
config BAYCOM_SER_FDX
tristate "BAYCOM ser12 fullduplex driver for AX.25"
- depends on AX25 && !S390
+ depends on AX25 && HAS_IOPORT
select CRC_CCITT
help
This is one of two drivers for Baycom style simple amateur radio
@@ -103,7 +103,7 @@ config BAYCOM_SER_FDX
config BAYCOM_SER_HDX
tristate "BAYCOM ser12 halfduplex driver for AX.25"
- depends on AX25 && !S390
+ depends on AX25 && HAS_IOPORT
select CRC_CCITT
help
This is one of two drivers for Baycom style simple amateur radio
@@ -150,7 +150,7 @@ config BAYCOM_EPP
config YAM
tristate "YAM driver for AX.25"
- depends on AX25 && !S390
+ depends on AX25 && HAS_IOPORT
help
The YAM is a modem for packet radio which connects to the serial
port and includes some of the functions of a Terminal Node
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index a6fcbda64ecc..2b6ec979a62f 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -154,8 +154,11 @@ static void free_netvsc_device(struct rcu_head *head)
int i;
kfree(nvdev->extension);
- vfree(nvdev->recv_buf);
- vfree(nvdev->send_buf);
+
+ if (!nvdev->recv_buf_gpadl_handle.decrypted)
+ vfree(nvdev->recv_buf);
+ if (!nvdev->send_buf_gpadl_handle.decrypted)
+ vfree(nvdev->send_buf);
bitmap_free(nvdev->send_section_map);
for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 11831a1c9762..44142245343d 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1233,14 +1233,14 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
if (ret)
goto rollback_vf;
- ndev->mtu = mtu;
+ WRITE_ONCE(ndev->mtu, mtu);
ret = netvsc_attach(ndev, device_info);
if (!ret)
goto out;
/* Attempt rollback to original MTU */
- ndev->mtu = orig_mtu;
+ WRITE_ONCE(ndev->mtu, orig_mtu);
if (netvsc_attach(ndev, device_info))
netdev_err(ndev, "restoring mtu failed\n");
diff --git a/drivers/net/ipa/data/ipa_data-v3.1.c b/drivers/net/ipa/data/ipa_data-v3.1.c
index 3380fb3483b2..e902d731776d 100644
--- a/drivers/net/ipa/data/ipa_data-v3.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.1.c
@@ -1,15 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
+#include <linux/array_size.h>
#include <linux/log2.h>
-#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
+#include "../ipa_version.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.1 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/data/ipa_data-v3.5.1.c b/drivers/net/ipa/data/ipa_data-v3.5.1.c
index 4287114b24db..f632aab56f4c 100644
--- a/drivers/net/ipa/data/ipa_data-v3.5.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.5.1.c
@@ -1,15 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
+#include <linux/array_size.h>
#include <linux/log2.h>
-#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
+#include "../ipa_version.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.5.1 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/data/ipa_data-v4.11.c b/drivers/net/ipa/data/ipa_data-v4.11.c
index 1b4b52501ee3..c1428483ca34 100644
--- a/drivers/net/ipa/data/ipa_data-v4.11.c
+++ b/drivers/net/ipa/data/ipa_data-v4.11.c
@@ -1,13 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2021 Linaro Ltd. */
+/* Copyright (C) 2021-2024 Linaro Ltd. */
+#include <linux/array_size.h>
#include <linux/log2.h>
-#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
+#include "../ipa_version.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.11 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/data/ipa_data-v4.2.c b/drivers/net/ipa/data/ipa_data-v4.2.c
index 199ed0ed868b..2c7e8cb429b9 100644
--- a/drivers/net/ipa/data/ipa_data-v4.2.c
+++ b/drivers/net/ipa/data/ipa_data-v4.2.c
@@ -1,13 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2019-2021 Linaro Ltd. */
+/* Copyright (C) 2019-2024 Linaro Ltd. */
+#include <linux/array_size.h>
#include <linux/log2.h>
-#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
+#include "../ipa_version.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.2 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/data/ipa_data-v4.5.c b/drivers/net/ipa/data/ipa_data-v4.5.c
index 19b549f2998b..57dc78c526b0 100644
--- a/drivers/net/ipa/data/ipa_data-v4.5.c
+++ b/drivers/net/ipa/data/ipa_data-v4.5.c
@@ -1,13 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2021 Linaro Ltd. */
+/* Copyright (C) 2021-2024 Linaro Ltd. */
+#include <linux/array_size.h>
#include <linux/log2.h>
-#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
+#include "../ipa_version.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.5 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/data/ipa_data-v4.7.c b/drivers/net/ipa/data/ipa_data-v4.7.c
index b83390c48615..c8c23d9be961 100644
--- a/drivers/net/ipa/data/ipa_data-v4.7.c
+++ b/drivers/net/ipa/data/ipa_data-v4.7.c
@@ -1,13 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2022 Linaro Ltd. */
+/* Copyright (C) 2022-2024 Linaro Ltd. */
+#include <linux/array_size.h>
#include <linux/log2.h>
-#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
+#include "../ipa_version.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.7 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/data/ipa_data-v4.9.c b/drivers/net/ipa/data/ipa_data-v4.9.c
index d30fc1fe6ca2..4eb9c909d5b3 100644
--- a/drivers/net/ipa/data/ipa_data-v4.9.c
+++ b/drivers/net/ipa/data/ipa_data-v4.9.c
@@ -1,13 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2021 Linaro Ltd. */
+/* Copyright (C) 2021-2024 Linaro Ltd. */
+#include <linux/array_size.h>
#include <linux/log2.h>
-#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
+#include "../ipa_version.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.9 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/data/ipa_data-v5.0.c b/drivers/net/ipa/data/ipa_data-v5.0.c
index 4d8171dae4cd..050580c99b65 100644
--- a/drivers/net/ipa/data/ipa_data-v5.0.c
+++ b/drivers/net/ipa/data/ipa_data-v5.0.c
@@ -1,13 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2023 Linaro Ltd. */
+/* Copyright (C) 2023-2024 Linaro Ltd. */
+#include <linux/array_size.h>
#include <linux/log2.h>
-#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
+#include "../ipa_version.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v5.0 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/data/ipa_data-v5.5.c b/drivers/net/ipa/data/ipa_data-v5.5.c
index 2c6390f11354..0e6663e22533 100644
--- a/drivers/net/ipa/data/ipa_data-v5.5.c
+++ b/drivers/net/ipa/data/ipa_data-v5.5.c
@@ -1,13 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2023 Linaro Ltd. */
+/* Copyright (C) 2023-2024 Linaro Ltd. */
-#include <linux/kernel.h>
+#include <linux/array_size.h>
#include <linux/log2.h>
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
+#include "../ipa_version.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v5.5 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index 9a0b1fe4a93a..4c3227e77898 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -1,28 +1,26 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2023 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
-#include <linux/types.h>
#include <linux/bits.h>
-#include <linux/bitfield.h>
-#include <linux/mutex.h>
-#include <linux/completion.h>
-#include <linux/io.h>
#include <linux/bug.h>
+#include <linux/completion.h>
#include <linux/interrupt.h>
-#include <linux/platform_device.h>
+#include <linux/mutex.h>
#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
#include "gsi.h"
-#include "reg.h"
-#include "gsi_reg.h"
#include "gsi_private.h"
+#include "gsi_reg.h"
#include "gsi_trans.h"
-#include "ipa_gsi.h"
#include "ipa_data.h"
+#include "ipa_gsi.h"
#include "ipa_version.h"
+#include "reg.h"
/**
* DOC: The IPA Generic Software Interface
@@ -1730,10 +1728,10 @@ static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
gsi_channel_program(channel, true);
if (channel->toward_ipa)
- netif_napi_add_tx(&gsi->dummy_dev, &channel->napi,
+ netif_napi_add_tx(gsi->dummy_dev, &channel->napi,
gsi_channel_poll);
else
- netif_napi_add(&gsi->dummy_dev, &channel->napi,
+ netif_napi_add(gsi->dummy_dev, &channel->napi,
gsi_channel_poll);
return 0;
@@ -2369,12 +2367,14 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev,
/* GSI uses NAPI on all channels. Create a dummy network device
* for the channel NAPI contexts to be associated with.
*/
- init_dummy_netdev(&gsi->dummy_dev);
+ gsi->dummy_dev = alloc_netdev_dummy(0);
+ if (!gsi->dummy_dev)
+ return -ENOMEM;
init_completion(&gsi->completion);
ret = gsi_reg_init(gsi, pdev);
if (ret)
- return ret;
+ goto err_reg_exit;
ret = gsi_irq_init(gsi, pdev); /* No matching exit required */
if (ret)
@@ -2389,6 +2389,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev,
return 0;
err_reg_exit:
+ free_netdev(gsi->dummy_dev);
gsi_reg_exit(gsi);
return ret;
@@ -2399,6 +2400,7 @@ void gsi_exit(struct gsi *gsi)
{
mutex_destroy(&gsi->mutex);
gsi_channel_exit(gsi);
+ free_netdev(gsi->dummy_dev);
gsi_reg_exit(gsi);
}
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 42063b227c18..9d8e05d950e3 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -1,17 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2023 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#ifndef _GSI_H_
#define _GSI_H_
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
#include <linux/completion.h>
-#include <linux/platform_device.h>
+#include <linux/mutex.h>
#include <linux/netdevice.h>
+#include <linux/types.h>
#include "ipa_version.h"
@@ -23,12 +21,10 @@
#define GSI_TLV_MAX 64
struct device;
-struct scatterlist;
struct platform_device;
struct gsi;
struct gsi_trans;
-struct gsi_channel_data;
struct ipa_gsi_endpoint_data;
struct gsi_ring {
@@ -155,7 +151,7 @@ struct gsi {
struct mutex mutex; /* protects commands, programming */
struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX];
- struct net_device dummy_dev; /* needed for NAPI */
+ struct net_device *dummy_dev; /* needed for NAPI */
};
/**
diff --git a/drivers/net/ipa/gsi_private.h b/drivers/net/ipa/gsi_private.h
index c65f7c5cdc8d..968ab1e596e8 100644
--- a/drivers/net/ipa/gsi_private.h
+++ b/drivers/net/ipa/gsi_private.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#ifndef _GSI_PRIVATE_H_
#define _GSI_PRIVATE_H_
@@ -10,9 +10,10 @@
#include <linux/types.h>
-struct gsi_trans;
-struct gsi_ring;
+struct gsi;
struct gsi_channel;
+struct gsi_ring;
+struct gsi_trans;
#define GSI_RING_ELEMENT_SIZE 16 /* bytes; must be a power of 2 */
diff --git a/drivers/net/ipa/gsi_reg.c b/drivers/net/ipa/gsi_reg.c
index 106c43884aef..825598661188 100644
--- a/drivers/net/ipa/gsi_reg.c
+++ b/drivers/net/ipa/gsi_reg.c
@@ -1,13 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2023 Linaro Ltd. */
+/* Copyright (C) 2023-2024 Linaro Ltd. */
-#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/platform_device.h>
#include "gsi.h"
-#include "reg.h"
#include "gsi_reg.h"
+#include "reg.h"
/* Is this register ID valid for the current GSI version? */
static bool gsi_reg_id_valid(struct gsi *gsi, enum gsi_reg_id reg_id)
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
index ee6fb00b71eb..19531883864a 100644
--- a/drivers/net/ipa/gsi_trans.c
+++ b/drivers/net/ipa/gsi_trans.c
@@ -1,22 +1,22 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
-#include <linux/types.h>
-#include <linux/bits.h>
#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/dma-direction.h>
#include <linux/refcount.h>
#include <linux/scatterlist.h>
-#include <linux/dma-direction.h>
+#include <linux/types.h>
#include "gsi.h"
#include "gsi_private.h"
#include "gsi_trans.h"
-#include "ipa_gsi.h"
-#include "ipa_data.h"
#include "ipa_cmd.h"
+#include "ipa_data.h"
+#include "ipa_gsi.h"
/**
* DOC: GSI Transactions
diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h
index 30c1c2dc77c6..c1b3386cbb9d 100644
--- a/drivers/net/ipa/gsi_trans.h
+++ b/drivers/net/ipa/gsi_trans.h
@@ -1,25 +1,24 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
#ifndef _GSI_TRANS_H_
#define _GSI_TRANS_H_
-#include <linux/types.h>
-#include <linux/refcount.h>
#include <linux/completion.h>
#include <linux/dma-direction.h>
+#include <linux/refcount.h>
+#include <linux/types.h>
#include "ipa_cmd.h"
+struct device;
struct page;
struct scatterlist;
-struct device;
struct sk_buff;
struct gsi;
-struct gsi_trans;
struct gsi_trans_pool;
/* Maximum number of TREs in an IPA immediate command transaction */
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index 334cd62cf286..7ef10a4ff35e 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -1,30 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#ifndef _IPA_H_
#define _IPA_H_
-#include <linux/types.h>
-#include <linux/device.h>
#include <linux/notifier.h>
-#include <linux/pm_wakeup.h>
+#include <linux/types.h>
-#include "ipa_version.h"
#include "gsi.h"
+#include "ipa_endpoint.h"
#include "ipa_mem.h"
#include "ipa_qmi.h"
-#include "ipa_endpoint.h"
-#include "ipa_interrupt.h"
+#include "ipa_version.h"
-struct clk;
-struct icc_path;
struct net_device;
+struct ipa_interrupt;
struct ipa_power;
struct ipa_smp2p;
-struct ipa_interrupt;
/**
* struct ipa - IPA information
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index 39219963dbb3..984311a9a5f2 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -1,22 +1,23 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2023 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/slab.h>
#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/device.h>
#include <linux/dma-direction.h>
+#include <linux/types.h>
#include "gsi.h"
#include "gsi_trans.h"
#include "ipa.h"
-#include "ipa_endpoint.h"
-#include "ipa_table.h"
#include "ipa_cmd.h"
+#include "ipa_endpoint.h"
#include "ipa_mem.h"
+#include "ipa_reg.h"
+#include "ipa_table.h"
/**
* DOC: IPA Immediate Commands
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
index e2cf1c2b0ef2..2077fdbade99 100644
--- a/drivers/net/ipa/ipa_cmd.h
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -1,21 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
#ifndef _IPA_CMD_H_
#define _IPA_CMD_H_
#include <linux/types.h>
-#include <linux/dma-direction.h>
-
-struct sk_buff;
-struct scatterlist;
+struct gsi_channel;
+struct gsi_trans;
struct ipa;
struct ipa_mem;
-struct gsi_trans;
-struct gsi_channel;
/**
* enum ipa_cmd_opcode: IPA immediate commands
@@ -58,14 +54,6 @@ bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem,
bool route);
/**
- * ipa_cmd_data_valid() - Validate command-realted configuration is valid
- * @ipa: - IPA pointer
- *
- * Return: true if assumptions required for command are valid
- */
-bool ipa_cmd_data_valid(struct ipa *ipa);
-
-/**
* ipa_cmd_pool_init() - initialize command channel pools
* @channel: AP->IPA command TX GSI channel pointer
* @tre_count: Number of pool elements to allocate
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index 2a1605e67b65..d88cbbbf18b7 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -1,16 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2023 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
#ifndef _IPA_DATA_H_
#define _IPA_DATA_H_
#include <linux/types.h>
-#include "ipa_version.h"
#include "ipa_endpoint.h"
#include "ipa_mem.h"
+#include "ipa_version.h"
/**
* DOC: IPA/GSI Configuration Data
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index dd490941615e..0bd9b9fbbf56 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -1,27 +1,30 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2023 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/slab.h>
#include <linux/bitfield.h>
-#include <linux/if_rmnet.h>
+#include <linux/bits.h>
+#include <linux/device.h>
#include <linux/dma-direction.h>
+#include <linux/if_rmnet.h>
+#include <linux/types.h>
#include "gsi.h"
#include "gsi_trans.h"
#include "ipa.h"
+#include "ipa_cmd.h"
#include "ipa_data.h"
#include "ipa_endpoint.h"
-#include "ipa_cmd.h"
+#include "ipa_gsi.h"
+#include "ipa_interrupt.h"
#include "ipa_mem.h"
#include "ipa_modem.h"
-#include "ipa_table.h"
-#include "ipa_gsi.h"
#include "ipa_power.h"
+#include "ipa_reg.h"
+#include "ipa_table.h"
+#include "ipa_version.h"
/* Hardware is told about receive buffers once a "batch" has been queued */
#define IPA_REPLENISH_BATCH 16 /* Must be non-zero */
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
index 3ad2e802040a..e7d8ae6c6f6a 100644
--- a/drivers/net/ipa/ipa_endpoint.h
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -1,21 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2023 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
#ifndef _IPA_ENDPOINT_H_
#define _IPA_ENDPOINT_H_
#include <linux/types.h>
#include <linux/workqueue.h>
-#include <linux/if_ether.h>
-#include "gsi.h"
#include "ipa_reg.h"
+#include "ipa_version.h"
struct net_device;
struct sk_buff;
+struct gsi_trans;
struct ipa;
struct ipa_gsi_endpoint_data;
@@ -199,9 +199,9 @@ int ipa_endpoint_init(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data);
void ipa_endpoint_exit(struct ipa *ipa);
-void ipa_endpoint_trans_complete(struct ipa_endpoint *ipa,
+void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
struct gsi_trans *trans);
-void ipa_endpoint_trans_release(struct ipa_endpoint *ipa,
+void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
struct gsi_trans *trans);
#endif /* _IPA_ENDPOINT_H_ */
diff --git a/drivers/net/ipa/ipa_gsi.c b/drivers/net/ipa/ipa_gsi.c
index d323adb03383..cb654c7b5498 100644
--- a/drivers/net/ipa/ipa_gsi.c
+++ b/drivers/net/ipa/ipa_gsi.c
@@ -1,16 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
#include <linux/types.h>
-#include "ipa_gsi.h"
#include "gsi_trans.h"
#include "ipa.h"
-#include "ipa_endpoint.h"
#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_gsi.h"
+#include "ipa_version.h"
void ipa_gsi_trans_complete(struct gsi_trans *trans)
{
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index c3e8784d51d9..245a06997055 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
/* DOC: IPA Interrupts
@@ -19,29 +19,31 @@
* time only these three are supported.
*/
-#include <linux/platform_device.h>
-#include <linux/types.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
+#include <linux/types.h>
#include "ipa.h"
-#include "ipa_reg.h"
#include "ipa_endpoint.h"
+#include "ipa_interrupt.h"
#include "ipa_power.h"
+#include "ipa_reg.h"
#include "ipa_uc.h"
-#include "ipa_interrupt.h"
/**
* struct ipa_interrupt - IPA interrupt information
* @ipa: IPA pointer
* @irq: Linux IRQ number used for IPA interrupts
* @enabled: Mask indicating which interrupts are enabled
+ * @suspend_enabled: Bitmap of endpoints with the SUSPEND interrupt enabled
*/
struct ipa_interrupt {
struct ipa *ipa;
u32 irq;
u32 enabled;
+ unsigned long *suspend_enabled;
};
/* Clear the suspend interrupt for all endpoints that signaled it */
@@ -194,6 +196,7 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
u32 mask = BIT(endpoint_id % 32);
u32 unit = endpoint_id / 32;
const struct reg *reg;
+ unsigned long weight;
u32 offset;
u32 val;
@@ -203,6 +206,10 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
if (ipa->version == IPA_VERSION_3_0)
return;
+ weight = bitmap_weight(interrupt->suspend_enabled, ipa->endpoint_count);
+ if (weight == 1 && !enable)
+ ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
+
reg = ipa_reg(ipa, IRQ_SUSPEND_EN);
offset = reg_n_offset(reg, unit);
val = ioread32(ipa->reg_virt + offset);
@@ -211,8 +218,12 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
val |= mask;
else
val &= ~mask;
+ __change_bit(endpoint_id, interrupt->suspend_enabled);
iowrite32(val, ipa->reg_virt + offset);
+
+ if (!weight && enable)
+ ipa_interrupt_enable(ipa, IPA_IRQ_TX_SUSPEND);
}
/* Enable TX_SUSPEND for an endpoint */
@@ -246,7 +257,16 @@ int ipa_interrupt_config(struct ipa *ipa)
interrupt->ipa = ipa;
- /* Disable all IPA interrupt types */
+ /* Initially all IPA interrupt types are disabled */
+ interrupt->enabled = 0;
+ interrupt->suspend_enabled = bitmap_zalloc(ipa->endpoint_count,
+ GFP_KERNEL);
+ if (!interrupt->suspend_enabled) {
+ ret = -ENOMEM;
+ goto err_kfree;
+ }
+
+ /* Disable IPA interrupt types */
reg = ipa_reg(ipa, IPA_IRQ_EN);
iowrite32(0, ipa->reg_virt + reg_offset(reg));
@@ -254,22 +274,32 @@ int ipa_interrupt_config(struct ipa *ipa)
"ipa", interrupt);
if (ret) {
dev_err(dev, "error %d requesting \"ipa\" IRQ\n", ret);
- goto err_kfree;
+ goto err_free_bitmap;
+ }
+
+ ret = device_init_wakeup(dev, true);
+ if (ret) {
+ dev_err(dev, "error %d enabling wakeup\n", ret);
+ goto err_free_irq;
}
ret = dev_pm_set_wake_irq(dev, irq);
if (ret) {
dev_err(dev, "error %d registering \"ipa\" IRQ as wakeirq\n",
ret);
- goto err_free_irq;
+ goto err_disable_wakeup;
}
ipa->interrupt = interrupt;
return 0;
+err_disable_wakeup:
+ (void)device_init_wakeup(dev, false);
err_free_irq:
free_irq(interrupt->irq, interrupt);
+err_free_bitmap:
+ bitmap_free(interrupt->suspend_enabled);
err_kfree:
kfree(interrupt);
@@ -285,22 +315,20 @@ void ipa_interrupt_deconfig(struct ipa *ipa)
ipa->interrupt = NULL;
dev_pm_clear_wake_irq(dev);
+ (void)device_init_wakeup(dev, false);
free_irq(interrupt->irq, interrupt);
+ bitmap_free(interrupt->suspend_enabled);
}
/* Initialize the IPA interrupt structure */
struct ipa_interrupt *ipa_interrupt_init(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
struct ipa_interrupt *interrupt;
int irq;
irq = platform_get_irq_byname(pdev, "ipa");
- if (irq <= 0) {
- dev_err(dev, "DT error %d getting \"ipa\" IRQ property\n", irq);
-
+ if (irq <= 0)
return ERR_PTR(irq ? : -EINVAL);
- }
interrupt = kzalloc(sizeof(*interrupt), GFP_KERNEL);
if (!interrupt)
diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
index f3f4f4330a59..d11c4af14fa2 100644
--- a/drivers/net/ipa/ipa_interrupt.h
+++ b/drivers/net/ipa/ipa_interrupt.h
@@ -1,16 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#ifndef _IPA_INTERRUPT_H_
#define _IPA_INTERRUPT_H_
#include <linux/types.h>
-#include <linux/bits.h>
+
+struct platform_device;
struct ipa;
struct ipa_interrupt;
+
enum ipa_irq_id;
/**
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 57b241417e8c..5f3dd5a2dcf4 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -1,38 +1,37 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2023 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
-#include <linux/types.h>
-#include <linux/atomic.h>
-#include <linux/bitfield.h>
#include <linux/bug.h>
-#include <linux/io.h>
#include <linux/firmware.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/types.h>
+
#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/soc/qcom/mdt_loader.h>
#include "ipa.h"
-#include "ipa_power.h"
+#include "ipa_cmd.h"
#include "ipa_data.h"
#include "ipa_endpoint.h"
-#include "ipa_resource.h"
-#include "ipa_cmd.h"
-#include "ipa_reg.h"
+#include "ipa_interrupt.h"
#include "ipa_mem.h"
-#include "ipa_table.h"
-#include "ipa_smp2p.h"
#include "ipa_modem.h"
-#include "ipa_uc.h"
-#include "ipa_interrupt.h"
-#include "gsi_trans.h"
+#include "ipa_power.h"
+#include "ipa_reg.h"
+#include "ipa_resource.h"
+#include "ipa_smp2p.h"
#include "ipa_sysfs.h"
+#include "ipa_table.h"
+#include "ipa_uc.h"
+#include "ipa_version.h"
/**
* DOC: The IP Accelerator
@@ -120,10 +119,6 @@ int ipa_setup(struct ipa *ipa)
if (ret)
return ret;
- ret = ipa_power_setup(ipa);
- if (ret)
- goto err_gsi_teardown;
-
ipa_endpoint_setup(ipa);
/* We need to use the AP command TX endpoint to perform other
@@ -170,8 +165,6 @@ err_command_disable:
ipa_endpoint_disable_one(command_endpoint);
err_endpoint_teardown:
ipa_endpoint_teardown(ipa);
- ipa_power_teardown(ipa);
-err_gsi_teardown:
gsi_teardown(&ipa->gsi);
return ret;
@@ -196,7 +189,6 @@ static void ipa_teardown(struct ipa *ipa)
command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
ipa_endpoint_disable_one(command_endpoint);
ipa_endpoint_teardown(ipa);
- ipa_power_teardown(ipa);
gsi_teardown(&ipa->gsi);
}
@@ -818,11 +810,6 @@ static int ipa_probe(struct platform_device *pdev)
return -ENODEV;
}
- if (!ipa_version_supported(data->version)) {
- dev_err(dev, "unsupported IPA version %u\n", data->version);
- return -EINVAL;
- }
-
if (!data->modem_route_count) {
dev_err(dev, "modem_route_count cannot be zero\n");
return -EINVAL;
@@ -873,6 +860,10 @@ static int ipa_probe(struct platform_device *pdev)
if (ret)
goto err_reg_exit;
+ ret = ipa_cmd_init(ipa);
+ if (ret)
+ goto err_mem_exit;
+
ret = gsi_init(&ipa->gsi, pdev, ipa->version, data->endpoint_count,
data->endpoint_data);
if (ret)
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 709f061ede61..dee985eb08cb 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -1,25 +1,24 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2023 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
-#include <linux/types.h>
-#include <linux/bitfield.h>
-#include <linux/bug.h>
#include <linux/dma-mapping.h>
+#include <linux/io.h>
#include <linux/iommu.h>
#include <linux/platform_device.h>
-#include <linux/io.h>
+#include <linux/types.h>
+
#include <linux/soc/qcom/smem.h>
+#include "gsi_trans.h"
#include "ipa.h"
-#include "ipa_reg.h"
-#include "ipa_data.h"
#include "ipa_cmd.h"
+#include "ipa_data.h"
#include "ipa_mem.h"
+#include "ipa_reg.h"
#include "ipa_table.h"
-#include "gsi_trans.h"
/* "Canary" value placed between memory regions to detect overflow */
#define IPA_MEM_CANARY_VAL cpu_to_le32(0xdeadbeef)
diff --git a/drivers/net/ipa/ipa_mem.h b/drivers/net/ipa/ipa_mem.h
index 28aad00a151d..b25babade787 100644
--- a/drivers/net/ipa/ipa_mem.h
+++ b/drivers/net/ipa/ipa_mem.h
@@ -1,11 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2023 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
#ifndef _IPA_MEM_H_
#define _IPA_MEM_H_
+#include <linux/types.h>
+
struct platform_device;
struct ipa;
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index c27ca3f27f7d..8fe0d0e1a00f 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -1,29 +1,27 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#include <linux/errno.h>
+#include <linux/etherdevice.h>
#include <linux/if_arp.h>
+#include <linux/if_rmnet.h>
#include <linux/netdevice.h>
+#include <linux/pm_runtime.h>
#include <linux/skbuff.h>
-#include <linux/if_rmnet.h>
-#include <linux/etherdevice.h>
#include <net/pkt_sched.h>
-#include <linux/pm_runtime.h>
+
#include <linux/remoteproc/qcom_rproc.h>
#include "ipa.h"
-#include "ipa_data.h"
#include "ipa_endpoint.h"
-#include "ipa_table.h"
#include "ipa_mem.h"
#include "ipa_modem.h"
#include "ipa_smp2p.h"
-#include "ipa_qmi.h"
+#include "ipa_table.h"
#include "ipa_uc.h"
-#include "ipa_power.h"
#define IPA_NETDEV_NAME "rmnet_ipa%d"
#define IPA_NETDEV_TAILROOM 0 /* for padding by mux layer */
diff --git a/drivers/net/ipa/ipa_modem.h b/drivers/net/ipa/ipa_modem.h
index d85718db9a57..b1d2c80ed096 100644
--- a/drivers/net/ipa/ipa_modem.h
+++ b/drivers/net/ipa/ipa_modem.h
@@ -1,15 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#ifndef _IPA_MODEM_H_
#define _IPA_MODEM_H_
-struct ipa;
struct net_device;
struct sk_buff;
+struct ipa;
+
int ipa_modem_start(struct ipa *ipa);
int ipa_modem_stop(struct ipa *ipa);
diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c
index 41ca7ef5e20f..65fd14da0f86 100644
--- a/drivers/net/ipa/ipa_power.c
+++ b/drivers/net/ipa/ipa_power.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#include <linux/clk.h>
@@ -9,15 +9,15 @@
#include <linux/interconnect.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
-#include <linux/bitops.h>
#include "linux/soc/qcom/qcom_aoss.h"
#include "ipa.h"
-#include "ipa_power.h"
+#include "ipa_data.h"
#include "ipa_endpoint.h"
+#include "ipa_interrupt.h"
#include "ipa_modem.h"
-#include "ipa_data.h"
+#include "ipa_power.h"
/**
* DOC: IPA Power Management
@@ -232,25 +232,6 @@ void ipa_power_retention(struct ipa *ipa, bool enable)
ret, enable ? "en" : "dis");
}
-int ipa_power_setup(struct ipa *ipa)
-{
- int ret;
-
- ipa_interrupt_enable(ipa, IPA_IRQ_TX_SUSPEND);
-
- ret = device_init_wakeup(ipa->dev, true);
- if (ret)
- ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
-
- return ret;
-}
-
-void ipa_power_teardown(struct ipa *ipa)
-{
- (void)device_init_wakeup(ipa->dev, false);
- ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
-}
-
/* Initialize IPA power management */
struct ipa_power *
ipa_power_init(struct device *dev, const struct ipa_power_data *data)
diff --git a/drivers/net/ipa/ipa_power.h b/drivers/net/ipa/ipa_power.h
index 227cc04bea80..a83524a61c28 100644
--- a/drivers/net/ipa/ipa_power.h
+++ b/drivers/net/ipa/ipa_power.h
@@ -1,16 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#ifndef _IPA_POWER_H_
#define _IPA_POWER_H_
+#include <linux/types.h>
+
struct device;
struct ipa;
struct ipa_power_data;
-enum ipa_irq_id;
/* IPA device power management function block */
extern const struct dev_pm_ops ipa_pm_ops;
@@ -31,20 +32,6 @@ u32 ipa_core_clock_rate(struct ipa *ipa);
void ipa_power_retention(struct ipa *ipa, bool enable);
/**
- * ipa_power_setup() - Set up IPA power management
- * @ipa: IPA pointer
- *
- * Return: 0 if successful, or a negative error code
- */
-int ipa_power_setup(struct ipa *ipa);
-
-/**
- * ipa_power_teardown() - Inverse of ipa_power_setup()
- * @ipa: IPA pointer
- */
-void ipa_power_teardown(struct ipa *ipa);
-
-/**
* ipa_power_init() - Initialize IPA power management
* @dev: IPA device
* @data: Clock configuration data
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index 65c40e207802..d771f3a71f94 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -1,19 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/slab.h>
#include <linux/qrtr.h>
-#include <linux/soc/qcom/qmi.h>
+#include <linux/string.h>
+#include <linux/types.h>
#include "ipa.h"
-#include "ipa_endpoint.h"
#include "ipa_mem.h"
-#include "ipa_table.h"
#include "ipa_modem.h"
#include "ipa_qmi_msg.h"
diff --git a/drivers/net/ipa/ipa_qmi.h b/drivers/net/ipa/ipa_qmi.h
index 1c236826c17a..fb15ea7f47e0 100644
--- a/drivers/net/ipa/ipa_qmi.h
+++ b/drivers/net/ipa/ipa_qmi.h
@@ -1,12 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#ifndef _IPA_QMI_H_
#define _IPA_QMI_H_
#include <linux/types.h>
+#include <linux/workqueue.h>
+
#include <linux/soc/qcom/qmi.h>
struct ipa;
diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c
index 894f99517233..51dc13a577a5 100644
--- a/drivers/net/ipa/ipa_qmi_msg.c
+++ b/drivers/net/ipa/ipa_qmi_msg.c
@@ -1,9 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#include <linux/stddef.h>
+
#include <linux/soc/qcom/qmi.h>
#include "ipa_qmi_msg.h"
diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
index b73503552c4d..644b8c27108b 100644
--- a/drivers/net/ipa/ipa_qmi_msg.h
+++ b/drivers/net/ipa/ipa_qmi_msg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#ifndef _IPA_QMI_MSG_H_
#define _IPA_QMI_MSG_H_
@@ -9,6 +9,7 @@
/* === Only "ipa_qmi" and "ipa_qmi_msg.c" should include this file === */
#include <linux/types.h>
+
#include <linux/soc/qcom/qmi.h>
/* Request/response/indication QMI message ids used for IPA. Receiving
diff --git a/drivers/net/ipa/ipa_reg.c b/drivers/net/ipa/ipa_reg.c
index 98625956e0bb..c574f798fdc9 100644
--- a/drivers/net/ipa/ipa_reg.c
+++ b/drivers/net/ipa/ipa_reg.c
@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2023 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
-#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/platform_device.h>
#include "ipa.h"
#include "ipa_reg.h"
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index 62c62495b796..61b7c441ae95 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -1,15 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2023 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#ifndef _IPA_REG_H_
#define _IPA_REG_H_
-#include <linux/bitfield.h>
-#include <linux/bug.h>
-
-#include "ipa_version.h"
#include "reg.h"
struct platform_device;
diff --git a/drivers/net/ipa/ipa_resource.c b/drivers/net/ipa/ipa_resource.c
index 82c88a744d10..1b0c4695c32a 100644
--- a/drivers/net/ipa/ipa_resource.c
+++ b/drivers/net/ipa/ipa_resource.c
@@ -1,11 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#include <linux/types.h>
-#include <linux/kernel.h>
#include "ipa.h"
#include "ipa_data.h"
diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c
index 2f917582c423..fcaadd111a8a 100644
--- a/drivers/net/ipa/ipa_smp2p.c
+++ b/drivers/net/ipa/ipa_smp2p.c
@@ -1,20 +1,20 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
-#include <linux/types.h>
-#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/panic_notifier.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/soc/qcom/smem.h>
+#include <linux/types.h>
+
#include <linux/soc/qcom/smem_state.h>
-#include "ipa_smp2p.h"
#include "ipa.h"
+#include "ipa_smp2p.h"
#include "ipa_uc.h"
/**
diff --git a/drivers/net/ipa/ipa_sysfs.c b/drivers/net/ipa/ipa_sysfs.c
index 2ff09ce343b7..a59bd215494c 100644
--- a/drivers/net/ipa/ipa_sysfs.c
+++ b/drivers/net/ipa/ipa_sysfs.c
@@ -1,15 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2021-2022 Linaro Ltd. */
+/* Copyright (C) 2021-2024 Linaro Ltd. */
-#include <linux/kernel.h>
-#include <linux/types.h>
#include <linux/device.h>
#include <linux/sysfs.h>
+#include <linux/types.h>
#include "ipa.h"
-#include "ipa_version.h"
#include "ipa_sysfs.h"
+#include "ipa_version.h"
static const char *ipa_version_string(struct ipa *ipa)
{
diff --git a/drivers/net/ipa/ipa_sysfs.h b/drivers/net/ipa/ipa_sysfs.h
index 58ba22810bab..43d9cb0722a4 100644
--- a/drivers/net/ipa/ipa_sysfs.h
+++ b/drivers/net/ipa/ipa_sysfs.h
@@ -1,13 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
#ifndef _IPA_SYSFS_H_
#define _IPA_SYSFS_H_
-struct attribute_group;
-
extern const struct attribute_group ipa_attribute_group;
extern const struct attribute_group ipa_feature_attribute_group;
extern const struct attribute_group ipa_endpoint_id_attribute_group;
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index a24ac11b8893..4e4a3f8aa8e8 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -1,28 +1,25 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2023 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/bits.h>
#include <linux/bitops.h>
-#include <linux/bitfield.h>
-#include <linux/io.h>
#include <linux/build_bug.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include "gsi.h"
+#include "gsi_trans.h"
#include "ipa.h"
-#include "ipa_version.h"
+#include "ipa_cmd.h"
#include "ipa_endpoint.h"
-#include "ipa_table.h"
-#include "ipa_reg.h"
#include "ipa_mem.h"
-#include "ipa_cmd.h"
-#include "gsi.h"
-#include "gsi_trans.h"
+#include "ipa_reg.h"
+#include "ipa_table.h"
+#include "ipa_version.h"
/**
* DOC: IPA Filter and Route Tables
@@ -161,6 +158,12 @@ ipa_table_mem(struct ipa *ipa, bool filter, bool hashed, bool ipv6)
return ipa_mem_find(ipa, mem_id);
}
+/* Return true if hashed tables are supported */
+bool ipa_table_hash_support(struct ipa *ipa)
+{
+ return ipa->version != IPA_VERSION_4_2;
+}
+
bool ipa_filtered_valid(struct ipa *ipa, u64 filtered)
{
struct device *dev = ipa->dev;
diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
index 7cc951904bb4..16d4d15df9e9 100644
--- a/drivers/net/ipa/ipa_table.h
+++ b/drivers/net/ipa/ipa_table.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
#ifndef _IPA_TABLE_H_
#define _IPA_TABLE_H_
@@ -23,10 +23,7 @@ bool ipa_filtered_valid(struct ipa *ipa, u64 filtered);
* ipa_table_hash_support() - Return true if hashed tables are supported
* @ipa: IPA pointer
*/
-static inline bool ipa_table_hash_support(struct ipa *ipa)
-{
- return ipa->version != IPA_VERSION_4_2;
-}
+bool ipa_table_hash_support(struct ipa *ipa);
/**
* ipa_table_reset() - Reset filter and route tables entries to "none"
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index bfd5dc6dab43..2963db83ab6b 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -1,17 +1,19 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
-#include <linux/types.h>
-#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/pm_runtime.h>
+#include <linux/types.h>
#include "ipa.h"
-#include "ipa_uc.h"
+#include "ipa_interrupt.h"
#include "ipa_power.h"
+#include "ipa_reg.h"
+#include "ipa_uc.h"
/**
* DOC: The IPA embedded microcontroller
diff --git a/drivers/net/ipa/ipa_uc.h b/drivers/net/ipa/ipa_uc.h
index 85aa0df818c2..12997ecf5faa 100644
--- a/drivers/net/ipa/ipa_uc.h
+++ b/drivers/net/ipa/ipa_uc.h
@@ -1,13 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
#ifndef _IPA_UC_H_
#define _IPA_UC_H_
struct ipa;
-enum ipa_irq_id;
/**
* ipa_uc_interrupt_handler() - Handler for microcontroller IPA interrupts
diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h
index 38150345b607..38c47f51a50c 100644
--- a/drivers/net/ipa/ipa_version.h
+++ b/drivers/net/ipa/ipa_version.h
@@ -1,11 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
#ifndef _IPA_VERSION_H_
#define _IPA_VERSION_H_
+#include <linux/types.h>
+
/**
* enum ipa_version
* @IPA_VERSION_3_0: IPA version 3.0/GSI version 1.0
@@ -45,24 +47,6 @@ enum ipa_version {
IPA_VERSION_COUNT, /* Last; not a version */
};
-static inline bool ipa_version_supported(enum ipa_version version)
-{
- switch (version) {
- case IPA_VERSION_3_1:
- case IPA_VERSION_3_5_1:
- case IPA_VERSION_4_2:
- case IPA_VERSION_4_5:
- case IPA_VERSION_4_7:
- case IPA_VERSION_4_9:
- case IPA_VERSION_4_11:
- case IPA_VERSION_5_0:
- case IPA_VERSION_5_5:
- return true;
- default:
- return false;
- }
-}
-
/* Execution environment IDs */
enum gsi_ee_id {
GSI_EE_AP = 0x0,
diff --git a/drivers/net/ipa/reg.h b/drivers/net/ipa/reg.h
index 2ee07eebca67..53c16e594ea4 100644
--- a/drivers/net/ipa/reg.h
+++ b/drivers/net/ipa/reg.h
@@ -1,13 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* *Copyright (C) 2022-2023 Linaro Ltd. */
+/* Copyright (C) 2022-2024 Linaro Ltd. */
#ifndef _REG_H_
#define _REG_H_
-#include <linux/types.h>
-#include <linux/log2.h>
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/bug.h>
+#include <linux/log2.h>
+#include <linux/types.h>
/**
* struct reg - A register descriptor
diff --git a/drivers/net/ipa/reg/gsi_reg-v3.1.c b/drivers/net/ipa/reg/gsi_reg-v3.1.c
index e036805a7882..8c577b8b5c7a 100644
--- a/drivers/net/ipa/reg/gsi_reg-v3.1.c
+++ b/drivers/net/ipa/reg/gsi_reg-v3.1.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2023 Linaro Ltd. */
+/* Copyright (C) 2023-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../gsi.h"
-#include "../reg.h"
#include "../gsi_reg.h"
+#include "../ipa_version.h"
+#include "../reg.h"
REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
0x0000c020 + 0x1000 * GSI_EE_AP);
diff --git a/drivers/net/ipa/reg/gsi_reg-v3.5.1.c b/drivers/net/ipa/reg/gsi_reg-v3.5.1.c
index 8c3ab3a5288e..a1c609f40d99 100644
--- a/drivers/net/ipa/reg/gsi_reg-v3.5.1.c
+++ b/drivers/net/ipa/reg/gsi_reg-v3.5.1.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2023 Linaro Ltd. */
+/* Copyright (C) 2023-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../gsi.h"
-#include "../reg.h"
#include "../gsi_reg.h"
+#include "../ipa_version.h"
+#include "../reg.h"
REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
0x0000c020 + 0x1000 * GSI_EE_AP);
diff --git a/drivers/net/ipa/reg/gsi_reg-v4.0.c b/drivers/net/ipa/reg/gsi_reg-v4.0.c
index 7cc7a21d07f9..ff1fb1ca47dd 100644
--- a/drivers/net/ipa/reg/gsi_reg-v4.0.c
+++ b/drivers/net/ipa/reg/gsi_reg-v4.0.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2023 Linaro Ltd. */
+/* Copyright (C) 2023-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../gsi.h"
-#include "../reg.h"
#include "../gsi_reg.h"
+#include "../ipa_version.h"
+#include "../reg.h"
REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
0x0000c020 + 0x1000 * GSI_EE_AP);
diff --git a/drivers/net/ipa/reg/gsi_reg-v4.11.c b/drivers/net/ipa/reg/gsi_reg-v4.11.c
index 01696519032f..ab9757ce42e7 100644
--- a/drivers/net/ipa/reg/gsi_reg-v4.11.c
+++ b/drivers/net/ipa/reg/gsi_reg-v4.11.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2023 Linaro Ltd. */
+/* Copyright (C) 2023-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../gsi.h"
-#include "../reg.h"
#include "../gsi_reg.h"
+#include "../ipa_version.h"
+#include "../reg.h"
REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
0x0000c020 + 0x1000 * GSI_EE_AP);
diff --git a/drivers/net/ipa/reg/gsi_reg-v4.5.c b/drivers/net/ipa/reg/gsi_reg-v4.5.c
index 2900e5c3ff88..01b45f79c315 100644
--- a/drivers/net/ipa/reg/gsi_reg-v4.5.c
+++ b/drivers/net/ipa/reg/gsi_reg-v4.5.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2023 Linaro Ltd. */
+/* Copyright (C) 2023-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../gsi.h"
-#include "../reg.h"
#include "../gsi_reg.h"
+#include "../ipa_version.h"
+#include "../reg.h"
REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
0x0000c020 + 0x1000 * GSI_EE_AP);
diff --git a/drivers/net/ipa/reg/gsi_reg-v4.9.c b/drivers/net/ipa/reg/gsi_reg-v4.9.c
index 8b5d95425a76..783eaaee2936 100644
--- a/drivers/net/ipa/reg/gsi_reg-v4.9.c
+++ b/drivers/net/ipa/reg/gsi_reg-v4.9.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2023 Linaro Ltd. */
+/* Copyright (C) 2023-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../gsi.h"
-#include "../reg.h"
#include "../gsi_reg.h"
+#include "../ipa_version.h"
+#include "../reg.h"
REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
0x0000c020 + 0x1000 * GSI_EE_AP);
diff --git a/drivers/net/ipa/reg/gsi_reg-v5.0.c b/drivers/net/ipa/reg/gsi_reg-v5.0.c
index 145eb0bd096d..36d1e65df71b 100644
--- a/drivers/net/ipa/reg/gsi_reg-v5.0.c
+++ b/drivers/net/ipa/reg/gsi_reg-v5.0.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2023 Linaro Ltd. */
+/* Copyright (C) 2023-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../gsi.h"
-#include "../reg.h"
#include "../gsi_reg.h"
+#include "../ipa_version.h"
+#include "../reg.h"
REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
0x0000c01c + 0x1000 * GSI_EE_AP);
diff --git a/drivers/net/ipa/reg/ipa_reg-v3.1.c b/drivers/net/ipa/reg/ipa_reg-v3.1.c
index 648dbfe1fce3..a89103701583 100644
--- a/drivers/net/ipa/reg/ipa_reg-v3.1.c
+++ b/drivers/net/ipa/reg/ipa_reg-v3.1.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2022 Linaro Ltd. */
+/* Copyright (C) 2022-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../ipa.h"
#include "../ipa_reg.h"
+#include "../ipa_version.h"
static const u32 reg_comp_cfg_fmask[] = {
[COMP_CFG_ENABLE] = BIT(0),
@@ -76,19 +78,6 @@ static const u32 reg_qsb_max_reads_fmask[] = {
REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 reg_filt_rout_hash_en_fmask[] = {
- [IPV6_ROUTER_HASH] = BIT(0),
- /* Bits 1-3 reserved */
- [IPV6_FILTER_HASH] = BIT(4),
- /* Bits 5-7 reserved */
- [IPV4_ROUTER_HASH] = BIT(8),
- /* Bits 9-11 reserved */
- [IPV4_FILTER_HASH] = BIT(12),
- /* Bits 13-31 reserved */
-};
-
-REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
-
static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
@@ -403,7 +392,6 @@ static const struct reg *reg_array[] = {
[SHARED_MEM_SIZE] = &reg_shared_mem_size,
[QSB_MAX_WRITES] = &reg_qsb_max_writes,
[QSB_MAX_READS] = &reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
[FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
[STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
[IPA_BCR] = &reg_ipa_bcr,
diff --git a/drivers/net/ipa/reg/ipa_reg-v3.5.1.c b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
index 78b1bf60cd02..c81c48ec51f9 100644
--- a/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
+++ b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2022 Linaro Ltd. */
+/* Copyright (C) 2022-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../ipa.h"
#include "../ipa_reg.h"
+#include "../ipa_version.h"
static const u32 reg_comp_cfg_fmask[] = {
[COMP_CFG_ENABLE] = BIT(0),
@@ -81,19 +83,6 @@ static const u32 reg_qsb_max_reads_fmask[] = {
REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 reg_filt_rout_hash_en_fmask[] = {
- [IPV6_ROUTER_HASH] = BIT(0),
- /* Bits 1-3 reserved */
- [IPV6_FILTER_HASH] = BIT(4),
- /* Bits 5-7 reserved */
- [IPV4_ROUTER_HASH] = BIT(8),
- /* Bits 9-11 reserved */
- [IPV4_FILTER_HASH] = BIT(12),
- /* Bits 13-31 reserved */
-};
-
-REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
-
static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
@@ -414,7 +403,6 @@ static const struct reg *reg_array[] = {
[SHARED_MEM_SIZE] = &reg_shared_mem_size,
[QSB_MAX_WRITES] = &reg_qsb_max_writes,
[QSB_MAX_READS] = &reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
[FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
[STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
[IPA_BCR] = &reg_ipa_bcr,
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.11.c b/drivers/net/ipa/reg/ipa_reg-v4.11.c
index 29e71cce4a84..18bddc32c931 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.11.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.11.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2022 Linaro Ltd. */
+/* Copyright (C) 2022-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../ipa.h"
#include "../ipa_reg.h"
+#include "../ipa_version.h"
static const u32 reg_comp_cfg_fmask[] = {
[RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
@@ -113,19 +115,6 @@ static const u32 reg_qsb_max_reads_fmask[] = {
REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 reg_filt_rout_hash_en_fmask[] = {
- [IPV6_ROUTER_HASH] = BIT(0),
- /* Bits 1-3 reserved */
- [IPV6_FILTER_HASH] = BIT(4),
- /* Bits 5-7 reserved */
- [IPV4_ROUTER_HASH] = BIT(8),
- /* Bits 9-11 reserved */
- [IPV4_FILTER_HASH] = BIT(12),
- /* Bits 13-31 reserved */
-};
-
-REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
-
static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
@@ -470,7 +459,6 @@ static const struct reg *reg_array[] = {
[SHARED_MEM_SIZE] = &reg_shared_mem_size,
[QSB_MAX_WRITES] = &reg_qsb_max_writes,
[QSB_MAX_READS] = &reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
[FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
[STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
[LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.2.c b/drivers/net/ipa/reg/ipa_reg-v4.2.c
index bb7cf488144d..e78dd71e8b03 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.2.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.2.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2022 Linaro Ltd. */
+/* Copyright (C) 2022-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../ipa.h"
#include "../ipa_reg.h"
+#include "../ipa_version.h"
static const u32 reg_comp_cfg_fmask[] = {
/* Bit 0 reserved */
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.5.c b/drivers/net/ipa/reg/ipa_reg-v4.5.c
index 1c58f78851c2..8494731efdd3 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.5.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.5.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2022 Linaro Ltd. */
+/* Copyright (C) 2022-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../ipa.h"
#include "../ipa_reg.h"
+#include "../ipa_version.h"
static const u32 reg_comp_cfg_fmask[] = {
/* Bit 0 reserved */
@@ -107,19 +109,6 @@ static const u32 reg_qsb_max_reads_fmask[] = {
REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 reg_filt_rout_hash_en_fmask[] = {
- [IPV6_ROUTER_HASH] = BIT(0),
- /* Bits 1-3 reserved */
- [IPV6_FILTER_HASH] = BIT(4),
- /* Bits 5-7 reserved */
- [IPV4_ROUTER_HASH] = BIT(8),
- /* Bits 9-11 reserved */
- [IPV4_FILTER_HASH] = BIT(12),
- /* Bits 13-31 reserved */
-};
-
-REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
-
static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
@@ -489,7 +478,6 @@ static const struct reg *reg_array[] = {
[SHARED_MEM_SIZE] = &reg_shared_mem_size,
[QSB_MAX_WRITES] = &reg_qsb_max_writes,
[QSB_MAX_READS] = &reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
[FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
[STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
[LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.7.c b/drivers/net/ipa/reg/ipa_reg-v4.7.c
index 731824fce1d4..2c161cf69193 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.7.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.7.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2022 Linaro Ltd. */
+/* Copyright (C) 2022-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../ipa.h"
#include "../ipa_reg.h"
+#include "../ipa_version.h"
static const u32 reg_comp_cfg_fmask[] = {
[RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
@@ -107,19 +109,6 @@ static const u32 reg_qsb_max_reads_fmask[] = {
REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 reg_filt_rout_hash_en_fmask[] = {
- [IPV6_ROUTER_HASH] = BIT(0),
- /* Bits 1-3 reserved */
- [IPV6_FILTER_HASH] = BIT(4),
- /* Bits 5-7 reserved */
- [IPV4_ROUTER_HASH] = BIT(8),
- /* Bits 9-11 reserved */
- [IPV4_FILTER_HASH] = BIT(12),
- /* Bits 13-31 reserved */
-};
-
-REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
-
static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
@@ -462,7 +451,6 @@ static const struct reg *reg_array[] = {
[SHARED_MEM_SIZE] = &reg_shared_mem_size,
[QSB_MAX_WRITES] = &reg_qsb_max_writes,
[QSB_MAX_READS] = &reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
[FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
[STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
[LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.9.c b/drivers/net/ipa/reg/ipa_reg-v4.9.c
index 01f87b5290e0..fa6fd312e486 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.9.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.9.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2022 Linaro Ltd. */
+/* Copyright (C) 2022-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../ipa.h"
#include "../ipa_reg.h"
+#include "../ipa_version.h"
static const u32 reg_comp_cfg_fmask[] = {
[RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
@@ -112,19 +114,6 @@ static const u32 reg_qsb_max_reads_fmask[] = {
REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 reg_filt_rout_hash_en_fmask[] = {
- [IPV6_ROUTER_HASH] = BIT(0),
- /* Bits 1-3 reserved */
- [IPV6_FILTER_HASH] = BIT(4),
- /* Bits 5-7 reserved */
- [IPV4_ROUTER_HASH] = BIT(8),
- /* Bits 9-11 reserved */
- [IPV4_FILTER_HASH] = BIT(12),
- /* Bits 13-31 reserved */
-};
-
-REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
-
static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
@@ -467,7 +456,6 @@ static const struct reg *reg_array[] = {
[SHARED_MEM_SIZE] = &reg_shared_mem_size,
[QSB_MAX_WRITES] = &reg_qsb_max_writes,
[QSB_MAX_READS] = &reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
[FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
[STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
[LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
diff --git a/drivers/net/ipa/reg/ipa_reg-v5.0.c b/drivers/net/ipa/reg/ipa_reg-v5.0.c
index 95e0edff4170..b26b5f57ac03 100644
--- a/drivers/net/ipa/reg/ipa_reg-v5.0.c
+++ b/drivers/net/ipa/reg/ipa_reg-v5.0.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2023 Linaro Ltd. */
+/* Copyright (C) 2023-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../ipa.h"
#include "../ipa_reg.h"
+#include "../ipa_version.h"
static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(7, 0),
diff --git a/drivers/net/ipa/reg/ipa_reg-v5.5.c b/drivers/net/ipa/reg/ipa_reg-v5.5.c
index 26ca9c9bac59..abb0c443ef66 100644
--- a/drivers/net/ipa/reg/ipa_reg-v5.5.c
+++ b/drivers/net/ipa/reg/ipa_reg-v5.5.c
@@ -1,10 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2023 Linaro Ltd. */
+/* Copyright (C) 2023-2024 Linaro Ltd. */
-#include <linux/kernel.h>
-#include <linux/types.h>
+#include <linux/array_size.h>
#include <linux/bits.h>
+#include <linux/types.h>
#include "../ipa_reg.h"
#include "../ipa_version.h"
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 5920f7e63352..094f44dac5c8 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -735,6 +735,7 @@ static int ipvlan_device_event(struct notifier_block *unused,
switch (event) {
case NETDEV_UP:
+ case NETDEV_DOWN:
case NETDEV_CHANGE:
list_for_each_entry(ipvlan, &port->ipvlans, pnode)
netif_stacked_transfer_operstate(ipvlan->phy_dev,
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index f6eab66c2660..2b486e7c749c 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -141,9 +141,6 @@ static const struct ethtool_ops loopback_ethtool_ops = {
static int loopback_dev_init(struct net_device *dev)
{
- dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
- if (!dev->lstats)
- return -ENOMEM;
netdev_lockdep_set_classes(dev);
return 0;
}
@@ -151,7 +148,6 @@ static int loopback_dev_init(struct net_device *dev)
static void loopback_dev_free(struct net_device *dev)
{
dev_net(dev)->loopback_dev = NULL;
- free_percpu(dev->lstats);
}
static const struct net_device_ops loopback_ops = {
@@ -191,6 +187,7 @@ static void gen_lo_setup(struct net_device *dev,
dev->header_ops = hdr_ops;
dev->netdev_ops = dev_ops;
dev->needs_free_netdev = true;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_LSTATS;
dev->priv_destructor = dev_destructor;
netif_set_tso_max_size(dev, GSO_MAX_SIZE);
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 0206b84284ab..2da70bc3dd86 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -999,10 +999,12 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
struct metadata_dst *md_dst;
struct macsec_rxh_data *rxd;
struct macsec_dev *macsec;
+ bool is_macsec_md_dst;
rcu_read_lock();
rxd = macsec_data_rcu(skb->dev);
md_dst = skb_metadata_dst(skb);
+ is_macsec_md_dst = md_dst && md_dst->type == METADATA_MACSEC;
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
struct sk_buff *nskb;
@@ -1013,14 +1015,42 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
* the SecTAG, so we have to deduce which port to deliver to.
*/
if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
- struct macsec_rx_sc *rx_sc = NULL;
+ const struct macsec_ops *ops;
- if (md_dst && md_dst->type == METADATA_MACSEC)
- rx_sc = find_rx_sc(&macsec->secy, md_dst->u.macsec_info.sci);
+ ops = macsec_get_ops(macsec, NULL);
- if (md_dst && md_dst->type == METADATA_MACSEC && !rx_sc)
+ if (ops->rx_uses_md_dst && !is_macsec_md_dst)
continue;
+ if (is_macsec_md_dst) {
+ struct macsec_rx_sc *rx_sc;
+
+ /* All drivers that implement MACsec offload
+ * support using skb metadata destinations must
+ * indicate that they do so.
+ */
+ DEBUG_NET_WARN_ON_ONCE(!ops->rx_uses_md_dst);
+ rx_sc = find_rx_sc(&macsec->secy,
+ md_dst->u.macsec_info.sci);
+ if (!rx_sc)
+ continue;
+ /* device indicated macsec offload occurred */
+ skb->dev = ndev;
+ skb->pkt_type = PACKET_HOST;
+ eth_skb_pkt_type(skb, ndev);
+ ret = RX_HANDLER_ANOTHER;
+ goto out;
+ }
+
+ /* This datapath is insecure because it is unable to
+ * enforce isolation of broadcast/multicast traffic and
+ * unicast traffic with promiscuous mode on the macsec
+ * netdev. Since the core stack has no mechanism to
+ * check that the hardware did indeed receive MACsec
+ * traffic, it is possible that the response handling
+ * done by the MACsec port was to a plaintext packet.
+ * This violates the MACsec protocol standard.
+ */
if (ether_addr_equal_64bits(hdr->h_dest,
ndev->dev_addr)) {
/* exact match, divert skb to this port */
@@ -1036,14 +1066,10 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
break;
nskb->dev = ndev;
- if (ether_addr_equal_64bits(hdr->h_dest,
- ndev->broadcast))
- nskb->pkt_type = PACKET_BROADCAST;
- else
- nskb->pkt_type = PACKET_MULTICAST;
+ eth_skb_pkt_type(nskb, ndev);
__netif_rx(nskb);
- } else if (rx_sc || ndev->flags & IFF_PROMISC) {
+ } else if (ndev->flags & IFF_PROMISC) {
skb->dev = ndev;
skb->pkt_type = PACKET_HOST;
ret = RX_HANDLER_ANOTHER;
@@ -3727,7 +3753,7 @@ static int macsec_change_mtu(struct net_device *dev, int new_mtu)
if (macsec->real_dev->mtu - extra < new_mtu)
return -ERANGE;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 0cec2783a3e7..67b7ef2d463f 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -865,7 +865,7 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
if (vlan->lowerdev->mtu < new_mtu)
return -EINVAL;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/mdio/mdio-gpio.c b/drivers/net/mdio/mdio-gpio.c
index 778db310a28d..82088741debd 100644
--- a/drivers/net/mdio/mdio-gpio.c
+++ b/drivers/net/mdio/mdio-gpio.c
@@ -132,8 +132,7 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
new_bus->phy_ignore_ta_mask = pdata->phy_ignore_ta_mask;
}
- if (dev->of_node &&
- of_device_is_compatible(dev->of_node, "microchip,mdio-smi0")) {
+ if (device_is_compatible(dev, "microchip,mdio-smi0")) {
bitbang->ctrl.op_c22_read = 0;
bitbang->ctrl.op_c22_write = 0;
bitbang->ctrl.override_op_c22 = 1;
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index d0c916a53d7c..963d8b4af28d 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -231,7 +231,7 @@ static int net_failover_change_mtu(struct net_device *dev, int new_mtu)
}
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
index bd546d4d26c6..3f9c9327f149 100644
--- a/drivers/net/netdevsim/ethtool.c
+++ b/drivers/net/netdevsim/ethtool.c
@@ -140,6 +140,13 @@ nsim_set_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam)
return 0;
}
+static void
+nsim_get_fec_stats(struct net_device *dev, struct ethtool_fec_stats *fec_stats)
+{
+ fec_stats->corrected_blocks.total = 123;
+ fec_stats->uncorrectable_blocks.total = 4;
+}
+
static int nsim_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
@@ -163,6 +170,7 @@ static const struct ethtool_ops nsim_ethtool_ops = {
.set_channels = nsim_set_channels,
.get_fecparam = nsim_get_fecparam,
.set_fecparam = nsim_set_fecparam,
+ .get_fec_stats = nsim_get_fec_stats,
.get_ts_info = nsim_get_ts_info,
};
@@ -182,6 +190,9 @@ void nsim_ethtool_init(struct netdevsim *ns)
nsim_ethtool_ring_init(ns);
+ ns->ethtool.pauseparam.report_stats_rx = true;
+ ns->ethtool.pauseparam.report_stats_tx = true;
+
ns->ethtool.fec.fec = ETHTOOL_FEC_NONE;
ns->ethtool.fec.active_fec = ETHTOOL_FEC_NONE;
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 8330bc0bcb7e..c22897bf5509 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -19,6 +19,8 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
+#include <net/netdev_queues.h>
+#include <net/page_pool/helpers.h>
#include <net/netlink.h>
#include <net/pkt_cls.h>
#include <net/rtnetlink.h>
@@ -26,11 +28,33 @@
#include "netdevsim.h"
+#define NSIM_RING_SIZE 256
+
+static int nsim_napi_rx(struct nsim_rq *rq, struct sk_buff *skb)
+{
+ if (skb_queue_len(&rq->skb_queue) > NSIM_RING_SIZE) {
+ dev_kfree_skb_any(skb);
+ return NET_RX_DROP;
+ }
+
+ skb_queue_tail(&rq->skb_queue, skb);
+ return NET_RX_SUCCESS;
+}
+
+static int nsim_forward_skb(struct net_device *dev, struct sk_buff *skb,
+ struct nsim_rq *rq)
+{
+ return __dev_forward_skb(dev, skb) ?: nsim_napi_rx(rq, skb);
+}
+
static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct net_device *peer_dev;
unsigned int len = skb->len;
struct netdevsim *peer_ns;
+ struct nsim_rq *rq;
+ int rxq;
rcu_read_lock();
if (!nsim_ipsec_tx(ns, skb))
@@ -40,10 +64,18 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!peer_ns)
goto out_drop_free;
+ peer_dev = peer_ns->netdev;
+ rxq = skb_get_queue_mapping(skb);
+ if (rxq >= peer_dev->num_rx_queues)
+ rxq = rxq % peer_dev->num_rx_queues;
+ rq = &peer_ns->rq[rxq];
+
skb_tx_timestamp(skb);
- if (unlikely(dev_forward_skb(peer_ns->netdev, skb) == NET_RX_DROP))
+ if (unlikely(nsim_forward_skb(peer_dev, skb, rq) == NET_RX_DROP))
goto out_drop_cnt;
+ napi_schedule(&rq->napi);
+
rcu_read_unlock();
u64_stats_update_begin(&ns->syncp);
ns->tx_packets++;
@@ -72,7 +104,7 @@ static int nsim_change_mtu(struct net_device *dev, int new_mtu)
if (ns->xdp.prog && new_mtu > NSIM_XDP_MAX_MTU)
return -EBUSY;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -298,6 +330,150 @@ static int nsim_get_iflink(const struct net_device *dev)
return iflink;
}
+static int nsim_rcv(struct nsim_rq *rq, int budget)
+{
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < budget; i++) {
+ if (skb_queue_empty(&rq->skb_queue))
+ break;
+
+ skb = skb_dequeue(&rq->skb_queue);
+ netif_receive_skb(skb);
+ }
+
+ return i;
+}
+
+static int nsim_poll(struct napi_struct *napi, int budget)
+{
+ struct nsim_rq *rq = container_of(napi, struct nsim_rq, napi);
+ int done;
+
+ done = nsim_rcv(rq, budget);
+ napi_complete(napi);
+
+ return done;
+}
+
+static int nsim_create_page_pool(struct nsim_rq *rq)
+{
+ struct page_pool_params p = {
+ .order = 0,
+ .pool_size = NSIM_RING_SIZE,
+ .nid = NUMA_NO_NODE,
+ .dev = &rq->napi.dev->dev,
+ .napi = &rq->napi,
+ .dma_dir = DMA_BIDIRECTIONAL,
+ .netdev = rq->napi.dev,
+ };
+
+ rq->page_pool = page_pool_create(&p);
+ if (IS_ERR(rq->page_pool)) {
+ int err = PTR_ERR(rq->page_pool);
+
+ rq->page_pool = NULL;
+ return err;
+ }
+ return 0;
+}
+
+static int nsim_init_napi(struct netdevsim *ns)
+{
+ struct net_device *dev = ns->netdev;
+ struct nsim_rq *rq;
+ int err, i;
+
+ for (i = 0; i < dev->num_rx_queues; i++) {
+ rq = &ns->rq[i];
+
+ netif_napi_add(dev, &rq->napi, nsim_poll);
+ }
+
+ for (i = 0; i < dev->num_rx_queues; i++) {
+ rq = &ns->rq[i];
+
+ err = nsim_create_page_pool(rq);
+ if (err)
+ goto err_pp_destroy;
+ }
+
+ return 0;
+
+err_pp_destroy:
+ while (i--) {
+ page_pool_destroy(ns->rq[i].page_pool);
+ ns->rq[i].page_pool = NULL;
+ }
+
+ for (i = 0; i < dev->num_rx_queues; i++)
+ __netif_napi_del(&ns->rq[i].napi);
+
+ return err;
+}
+
+static void nsim_enable_napi(struct netdevsim *ns)
+{
+ struct net_device *dev = ns->netdev;
+ int i;
+
+ for (i = 0; i < dev->num_rx_queues; i++) {
+ struct nsim_rq *rq = &ns->rq[i];
+
+ netif_queue_set_napi(dev, i, NETDEV_QUEUE_TYPE_RX, &rq->napi);
+ napi_enable(&rq->napi);
+ }
+}
+
+static int nsim_open(struct net_device *dev)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ int err;
+
+ err = nsim_init_napi(ns);
+ if (err)
+ return err;
+
+ nsim_enable_napi(ns);
+
+ return 0;
+}
+
+static void nsim_del_napi(struct netdevsim *ns)
+{
+ struct net_device *dev = ns->netdev;
+ int i;
+
+ for (i = 0; i < dev->num_rx_queues; i++) {
+ struct nsim_rq *rq = &ns->rq[i];
+
+ napi_disable(&rq->napi);
+ __netif_napi_del(&rq->napi);
+ }
+ synchronize_net();
+
+ for (i = 0; i < dev->num_rx_queues; i++) {
+ page_pool_destroy(ns->rq[i].page_pool);
+ ns->rq[i].page_pool = NULL;
+ }
+}
+
+static int nsim_stop(struct net_device *dev)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ struct netdevsim *peer;
+
+ netif_carrier_off(dev);
+ peer = rtnl_dereference(ns->peer);
+ if (peer)
+ netif_carrier_off(peer->netdev);
+
+ nsim_del_napi(ns);
+
+ return 0;
+}
+
static const struct net_device_ops nsim_netdev_ops = {
.ndo_start_xmit = nsim_start_xmit,
.ndo_set_rx_mode = nsim_set_rx_mode,
@@ -317,6 +493,8 @@ static const struct net_device_ops nsim_netdev_ops = {
.ndo_set_features = nsim_set_features,
.ndo_get_iflink = nsim_get_iflink,
.ndo_bpf = nsim_bpf,
+ .ndo_open = nsim_open,
+ .ndo_stop = nsim_stop,
};
static const struct net_device_ops nsim_vf_netdev_ops = {
@@ -330,6 +508,107 @@ static const struct net_device_ops nsim_vf_netdev_ops = {
.ndo_set_features = nsim_set_features,
};
+/* We don't have true per-queue stats, yet, so do some random fakery here.
+ * Only report stuff for queue 0.
+ */
+static void nsim_get_queue_stats_rx(struct net_device *dev, int idx,
+ struct netdev_queue_stats_rx *stats)
+{
+ struct rtnl_link_stats64 rtstats = {};
+
+ if (!idx)
+ nsim_get_stats64(dev, &rtstats);
+
+ stats->packets = rtstats.rx_packets - !!rtstats.rx_packets;
+ stats->bytes = rtstats.rx_bytes;
+}
+
+static void nsim_get_queue_stats_tx(struct net_device *dev, int idx,
+ struct netdev_queue_stats_tx *stats)
+{
+ struct rtnl_link_stats64 rtstats = {};
+
+ if (!idx)
+ nsim_get_stats64(dev, &rtstats);
+
+ stats->packets = rtstats.tx_packets - !!rtstats.tx_packets;
+ stats->bytes = rtstats.tx_bytes;
+}
+
+static void nsim_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct rtnl_link_stats64 rtstats = {};
+
+ nsim_get_stats64(dev, &rtstats);
+
+ rx->packets = !!rtstats.rx_packets;
+ rx->bytes = 0;
+ tx->packets = !!rtstats.tx_packets;
+ tx->bytes = 0;
+}
+
+static const struct netdev_stat_ops nsim_stat_ops = {
+ .get_queue_stats_tx = nsim_get_queue_stats_tx,
+ .get_queue_stats_rx = nsim_get_queue_stats_rx,
+ .get_base_stats = nsim_get_base_stats,
+};
+
+static ssize_t
+nsim_pp_hold_read(struct file *file, char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct netdevsim *ns = file->private_data;
+ char buf[3] = "n\n";
+
+ if (ns->page)
+ buf[0] = 'y';
+
+ return simple_read_from_buffer(data, count, ppos, buf, 2);
+}
+
+static ssize_t
+nsim_pp_hold_write(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct netdevsim *ns = file->private_data;
+ ssize_t ret;
+ bool val;
+
+ ret = kstrtobool_from_user(data, count, &val);
+ if (ret)
+ return ret;
+
+ rtnl_lock();
+ ret = count;
+ if (val == !!ns->page)
+ goto exit;
+
+ if (!netif_running(ns->netdev) && val) {
+ ret = -ENETDOWN;
+ } else if (val) {
+ ns->page = page_pool_dev_alloc_pages(ns->rq[0].page_pool);
+ if (!ns->page)
+ ret = -ENOMEM;
+ } else {
+ page_pool_put_full_page(ns->page->pp, ns->page, false);
+ ns->page = NULL;
+ }
+ rtnl_unlock();
+
+exit:
+ return count;
+}
+
+static const struct file_operations nsim_pp_hold_fops = {
+ .open = simple_open,
+ .read = nsim_pp_hold_read,
+ .write = nsim_pp_hold_write,
+ .llseek = generic_file_llseek,
+ .owner = THIS_MODULE,
+};
+
static void nsim_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -349,6 +628,35 @@ static void nsim_setup(struct net_device *dev)
dev->xdp_features = NETDEV_XDP_ACT_HW_OFFLOAD;
}
+static int nsim_queue_init(struct netdevsim *ns)
+{
+ struct net_device *dev = ns->netdev;
+ int i;
+
+ ns->rq = kvcalloc(dev->num_rx_queues, sizeof(*ns->rq),
+ GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
+ if (!ns->rq)
+ return -ENOMEM;
+
+ for (i = 0; i < dev->num_rx_queues; i++)
+ skb_queue_head_init(&ns->rq[i].skb_queue);
+
+ return 0;
+}
+
+static void nsim_queue_free(struct netdevsim *ns)
+{
+ struct net_device *dev = ns->netdev;
+ int i;
+
+ for (i = 0; i < dev->num_rx_queues; i++)
+ skb_queue_purge_reason(&ns->rq[i].skb_queue,
+ SKB_DROP_REASON_QUEUE_PURGE);
+
+ kvfree(ns->rq);
+ ns->rq = NULL;
+}
+
static int nsim_init_netdevsim(struct netdevsim *ns)
{
struct mock_phc *phc;
@@ -360,16 +668,21 @@ static int nsim_init_netdevsim(struct netdevsim *ns)
ns->phc = phc;
ns->netdev->netdev_ops = &nsim_netdev_ops;
+ ns->netdev->stat_ops = &nsim_stat_ops;
err = nsim_udp_tunnels_info_create(ns->nsim_dev, ns->netdev);
if (err)
goto err_phc_destroy;
rtnl_lock();
- err = nsim_bpf_init(ns);
+ err = nsim_queue_init(ns);
if (err)
goto err_utn_destroy;
+ err = nsim_bpf_init(ns);
+ if (err)
+ goto err_rq_destroy;
+
nsim_macsec_init(ns);
nsim_ipsec_init(ns);
@@ -383,6 +696,8 @@ err_ipsec_teardown:
nsim_ipsec_teardown(ns);
nsim_macsec_teardown(ns);
nsim_bpf_uninit(ns);
+err_rq_destroy:
+ nsim_queue_free(ns);
err_utn_destroy:
rtnl_unlock();
nsim_udp_tunnels_info_destroy(ns->netdev);
@@ -436,6 +751,10 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
err = nsim_init_netdevsim_vf(ns);
if (err)
goto err_free_netdev;
+
+ ns->pp_dfs = debugfs_create_file("pp_hold", 0600, nsim_dev_port->ddir,
+ ns, &nsim_pp_hold_fops);
+
return ns;
err_free_netdev:
@@ -448,6 +767,8 @@ void nsim_destroy(struct netdevsim *ns)
struct net_device *dev = ns->netdev;
struct netdevsim *peer;
+ debugfs_remove(ns->pp_dfs);
+
rtnl_lock();
peer = rtnl_dereference(ns->peer);
if (peer)
@@ -458,10 +779,18 @@ void nsim_destroy(struct netdevsim *ns)
nsim_macsec_teardown(ns);
nsim_ipsec_teardown(ns);
nsim_bpf_uninit(ns);
+ nsim_queue_free(ns);
}
rtnl_unlock();
if (nsim_dev_port_is_pf(ns->nsim_dev_port))
nsim_exit_netdevsim(ns);
+
+ /* Put this intentionally late to exercise the orphaning path */
+ if (ns->page) {
+ page_pool_put_full_page(ns->page->pp, ns->page, false);
+ ns->page = NULL;
+ }
+
free_netdev(dev);
}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 553c4b9b4f63..bf02efa10956 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -90,11 +90,18 @@ struct nsim_ethtool {
struct ethtool_fecparam fec;
};
+struct nsim_rq {
+ struct napi_struct napi;
+ struct sk_buff_head skb_queue;
+ struct page_pool *page_pool;
+};
+
struct netdevsim {
struct net_device *netdev;
struct nsim_dev *nsim_dev;
struct nsim_dev_port *nsim_dev_port;
struct mock_phc *phc;
+ struct nsim_rq *rq;
u64 tx_packets;
u64 tx_bytes;
@@ -125,6 +132,9 @@ struct netdevsim {
struct debugfs_u32_array dfs_ports[2];
} udp_ports;
+ struct page *page;
+ struct dentry *pp_dfs;
+
struct nsim_ethtool ethtool;
struct netdevsim __rcu *peer;
};
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 536bd6564f8b..ffe7f463e16e 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -306,7 +306,7 @@ static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu)
return -EINVAL;
if (!netif_running(ndev)) {
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
return 0;
}
@@ -335,7 +335,7 @@ static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu)
}
}
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
ntb_transport_link_up(dev->qp);
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
index 853b8c138718..b79aedad855b 100644
--- a/drivers/net/pcs/pcs-lynx.c
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -61,11 +61,10 @@ static void lynx_pcs_get_state_usxgmii(struct mdio_device *pcs,
static void lynx_pcs_get_state_2500basex(struct mdio_device *pcs,
struct phylink_link_state *state)
{
- int bmsr, lpa;
+ int bmsr;
bmsr = mdiodev_read(pcs, MII_BMSR);
- lpa = mdiodev_read(pcs, MII_LPA);
- if (bmsr < 0 || lpa < 0) {
+ if (bmsr < 0) {
state->link = false;
return;
}
diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c
index 4bd66fdde367..d0a722d43368 100644
--- a/drivers/net/pcs/pcs-rzn1-miic.c
+++ b/drivers/net/pcs/pcs-rzn1-miic.c
@@ -279,10 +279,38 @@ static int miic_validate(struct phylink_pcs *pcs, unsigned long *supported,
return -EINVAL;
}
+static int miic_pre_init(struct phylink_pcs *pcs)
+{
+ struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs);
+ struct miic *miic = miic_port->miic;
+ u32 val, mask;
+
+ /* Start RX clock if required */
+ if (pcs->rxc_always_on) {
+ /* In MII through mode, the clock signals will be driven by the
+ * external PHY, which might not be initialized yet. Set RMII
+ * as default mode to ensure that a reference clock signal is
+ * generated.
+ */
+ miic_port->interface = PHY_INTERFACE_MODE_RMII;
+
+ val = FIELD_PREP(MIIC_CONVCTRL_CONV_MODE, CONV_MODE_RMII) |
+ FIELD_PREP(MIIC_CONVCTRL_CONV_SPEED, CONV_MODE_100MBPS);
+ mask = MIIC_CONVCTRL_CONV_MODE | MIIC_CONVCTRL_CONV_SPEED;
+
+ miic_reg_rmw(miic, MIIC_CONVCTRL(miic_port->port), mask, val);
+
+ miic_converter_enable(miic, miic_port->port, 1);
+ }
+
+ return 0;
+}
+
static const struct phylink_pcs_ops miic_phylink_ops = {
.pcs_validate = miic_validate,
.pcs_config = miic_config,
.pcs_link_up = miic_link_up,
+ .pcs_pre_init = miic_pre_init,
};
struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
diff --git a/drivers/net/pfcp.c b/drivers/net/pfcp.c
new file mode 100644
index 000000000000..69434fd13f96
--- /dev/null
+++ b/drivers/net/pfcp.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PFCP according to 3GPP TS 29.244
+ *
+ * Copyright (C) 2022, Intel Corporation.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/rculist.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+#include <net/udp.h>
+#include <net/udp_tunnel.h>
+#include <net/pfcp.h>
+
+struct pfcp_dev {
+ struct list_head list;
+
+ struct socket *sock;
+ struct net_device *dev;
+ struct net *net;
+
+ struct gro_cells gro_cells;
+};
+
+static unsigned int pfcp_net_id __read_mostly;
+
+struct pfcp_net {
+ struct list_head pfcp_dev_list;
+};
+
+static void
+pfcp_session_recv(struct pfcp_dev *pfcp, struct sk_buff *skb,
+ struct pfcp_metadata *md)
+{
+ struct pfcphdr_session *unparsed = pfcp_hdr_session(skb);
+
+ md->seid = unparsed->seid;
+ md->type = PFCP_TYPE_SESSION;
+}
+
+static void
+pfcp_node_recv(struct pfcp_dev *pfcp, struct sk_buff *skb,
+ struct pfcp_metadata *md)
+{
+ md->type = PFCP_TYPE_NODE;
+}
+
+static int pfcp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+ IP_TUNNEL_DECLARE_FLAGS(flags) = { };
+ struct metadata_dst *tun_dst;
+ struct pfcp_metadata *md;
+ struct pfcphdr *unparsed;
+ struct pfcp_dev *pfcp;
+
+ if (unlikely(!pskb_may_pull(skb, PFCP_HLEN)))
+ goto drop;
+
+ pfcp = rcu_dereference_sk_user_data(sk);
+ if (unlikely(!pfcp))
+ goto drop;
+
+ unparsed = pfcp_hdr(skb);
+
+ ip_tunnel_flags_zero(flags);
+ tun_dst = udp_tun_rx_dst(skb, sk->sk_family, flags, 0,
+ sizeof(*md));
+ if (unlikely(!tun_dst))
+ goto drop;
+
+ md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
+ if (unlikely(!md))
+ goto drop;
+
+ if (unparsed->flags & PFCP_SEID_FLAG)
+ pfcp_session_recv(pfcp, skb, md);
+ else
+ pfcp_node_recv(pfcp, skb, md);
+
+ __set_bit(IP_TUNNEL_PFCP_OPT_BIT, tun_dst->u.tun_info.key.tun_flags);
+ tun_dst->u.tun_info.options_len = sizeof(*md);
+
+ if (unlikely(iptunnel_pull_header(skb, PFCP_HLEN, skb->protocol,
+ !net_eq(sock_net(sk),
+ dev_net(pfcp->dev)))))
+ goto drop;
+
+ skb_dst_set(skb, (struct dst_entry *)tun_dst);
+
+ skb_reset_network_header(skb);
+ skb_reset_mac_header(skb);
+ skb->dev = pfcp->dev;
+
+ gro_cells_receive(&pfcp->gro_cells, skb);
+
+ return 0;
+drop:
+ kfree_skb(skb);
+ return 0;
+}
+
+static void pfcp_del_sock(struct pfcp_dev *pfcp)
+{
+ udp_tunnel_sock_release(pfcp->sock);
+ pfcp->sock = NULL;
+}
+
+static void pfcp_dev_uninit(struct net_device *dev)
+{
+ struct pfcp_dev *pfcp = netdev_priv(dev);
+
+ gro_cells_destroy(&pfcp->gro_cells);
+ pfcp_del_sock(pfcp);
+}
+
+static int pfcp_dev_init(struct net_device *dev)
+{
+ struct pfcp_dev *pfcp = netdev_priv(dev);
+
+ pfcp->dev = dev;
+
+ return gro_cells_init(&pfcp->gro_cells, dev);
+}
+
+static const struct net_device_ops pfcp_netdev_ops = {
+ .ndo_init = pfcp_dev_init,
+ .ndo_uninit = pfcp_dev_uninit,
+ .ndo_get_stats64 = dev_get_tstats64,
+};
+
+static const struct device_type pfcp_type = {
+ .name = "pfcp",
+};
+
+static void pfcp_link_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &pfcp_netdev_ops;
+ dev->needs_free_netdev = true;
+ SET_NETDEV_DEVTYPE(dev, &pfcp_type);
+
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+
+ dev->type = ARPHRD_NONE;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->priv_flags |= IFF_NO_QUEUE;
+
+ netif_keep_dst(dev);
+}
+
+static struct socket *pfcp_create_sock(struct pfcp_dev *pfcp)
+{
+ struct udp_tunnel_sock_cfg tuncfg = {};
+ struct udp_port_cfg udp_conf = {
+ .local_ip.s_addr = htonl(INADDR_ANY),
+ .family = AF_INET,
+ };
+ struct net *net = pfcp->net;
+ struct socket *sock;
+ int err;
+
+ udp_conf.local_udp_port = htons(PFCP_PORT);
+
+ err = udp_sock_create(net, &udp_conf, &sock);
+ if (err)
+ return ERR_PTR(err);
+
+ tuncfg.sk_user_data = pfcp;
+ tuncfg.encap_rcv = pfcp_encap_recv;
+ tuncfg.encap_type = 1;
+
+ setup_udp_tunnel_sock(net, sock, &tuncfg);
+
+ return sock;
+}
+
+static int pfcp_add_sock(struct pfcp_dev *pfcp)
+{
+ pfcp->sock = pfcp_create_sock(pfcp);
+
+ return PTR_ERR_OR_ZERO(pfcp->sock);
+}
+
+static int pfcp_newlink(struct net *net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct pfcp_dev *pfcp = netdev_priv(dev);
+ struct pfcp_net *pn;
+ int err;
+
+ pfcp->net = net;
+
+ err = pfcp_add_sock(pfcp);
+ if (err) {
+ netdev_dbg(dev, "failed to add pfcp socket %d\n", err);
+ goto exit_err;
+ }
+
+ err = register_netdevice(dev);
+ if (err) {
+ netdev_dbg(dev, "failed to register pfcp netdev %d\n", err);
+ goto exit_del_pfcp_sock;
+ }
+
+ pn = net_generic(dev_net(dev), pfcp_net_id);
+ list_add_rcu(&pfcp->list, &pn->pfcp_dev_list);
+
+ netdev_dbg(dev, "registered new PFCP interface\n");
+
+ return 0;
+
+exit_del_pfcp_sock:
+ pfcp_del_sock(pfcp);
+exit_err:
+ pfcp->net = NULL;
+ return err;
+}
+
+static void pfcp_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct pfcp_dev *pfcp = netdev_priv(dev);
+
+ list_del_rcu(&pfcp->list);
+ unregister_netdevice_queue(dev, head);
+}
+
+static struct rtnl_link_ops pfcp_link_ops __read_mostly = {
+ .kind = "pfcp",
+ .priv_size = sizeof(struct pfcp_dev),
+ .setup = pfcp_link_setup,
+ .newlink = pfcp_newlink,
+ .dellink = pfcp_dellink,
+};
+
+static int __net_init pfcp_net_init(struct net *net)
+{
+ struct pfcp_net *pn = net_generic(net, pfcp_net_id);
+
+ INIT_LIST_HEAD(&pn->pfcp_dev_list);
+ return 0;
+}
+
+static void __net_exit pfcp_net_exit(struct net *net)
+{
+ struct pfcp_net *pn = net_generic(net, pfcp_net_id);
+ struct pfcp_dev *pfcp;
+ LIST_HEAD(list);
+
+ rtnl_lock();
+ list_for_each_entry(pfcp, &pn->pfcp_dev_list, list)
+ pfcp_dellink(pfcp->dev, &list);
+
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
+}
+
+static struct pernet_operations pfcp_net_ops = {
+ .init = pfcp_net_init,
+ .exit = pfcp_net_exit,
+ .id = &pfcp_net_id,
+ .size = sizeof(struct pfcp_net),
+};
+
+static int __init pfcp_init(void)
+{
+ int err;
+
+ err = register_pernet_subsys(&pfcp_net_ops);
+ if (err)
+ goto exit_err;
+
+ err = rtnl_link_register(&pfcp_link_ops);
+ if (err)
+ goto exit_unregister_subsys;
+ return 0;
+
+exit_unregister_subsys:
+ unregister_pernet_subsys(&pfcp_net_ops);
+exit_err:
+ pr_err("loading PFCP module failed: err %d\n", err);
+ return err;
+}
+late_initcall(pfcp_init);
+
+static void __exit pfcp_exit(void)
+{
+ rtnl_link_unregister(&pfcp_link_ops);
+ unregister_pernet_subsys(&pfcp_net_ops);
+
+ pr_info("PFCP module unloaded\n");
+}
+module_exit(pfcp_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Wojciech Drewek <wojciech.drewek@intel.com>");
+MODULE_DESCRIPTION("Interface driver for PFCP encapsulated traffic");
+MODULE_ALIAS_RTNL_LINK("pfcp");
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 1df0595c5ba9..7fddc8306d82 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -76,6 +76,11 @@ config SFP
comment "MII PHY device drivers"
+config AIR_EN8811H_PHY
+ tristate "Airoha EN8811H 2.5 Gigabit PHY"
+ help
+ Currently supports the Airoha EN8811H PHY.
+
config AMD_PHY
tristate "AMD and Altima PHYs"
help
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 197acfa0b412..202ed7f450da 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -34,6 +34,7 @@ obj-y += $(sfp-obj-y) $(sfp-obj-m)
obj-$(CONFIG_ADIN_PHY) += adin.o
obj-$(CONFIG_ADIN1100_PHY) += adin1100.o
+obj-$(CONFIG_AIR_EN8811H_PHY) += air_en8811h.o
obj-$(CONFIG_AMD_PHY) += amd.o
obj-$(CONFIG_AQUANTIA_PHY) += aquantia/
ifdef CONFIG_AX88796B_RUST_PHY
diff --git a/drivers/net/phy/air_en8811h.c b/drivers/net/phy/air_en8811h.c
new file mode 100644
index 000000000000..3cdc8c6b30b6
--- /dev/null
+++ b/drivers/net/phy/air_en8811h.c
@@ -0,0 +1,1090 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for the Airoha EN8811H 2.5 Gigabit PHY.
+ *
+ * Limitations of the EN8811H:
+ * - Only full duplex supported
+ * - Forced speed (AN off) is not supported by hardware (100Mbps)
+ *
+ * Source originated from airoha's en8811h.c and en8811h.h v1.2.1
+ *
+ * Copyright (C) 2023 Airoha Technology Corp.
+ */
+
+#include <linux/phy.h>
+#include <linux/firmware.h>
+#include <linux/property.h>
+#include <linux/wordpart.h>
+#include <asm/unaligned.h>
+
+#define EN8811H_PHY_ID 0x03a2a411
+
+#define EN8811H_MD32_DM "airoha/EthMD32.dm.bin"
+#define EN8811H_MD32_DSP "airoha/EthMD32.DSP.bin"
+
+#define AIR_FW_ADDR_DM 0x00000000
+#define AIR_FW_ADDR_DSP 0x00100000
+
+/* MII Registers */
+#define AIR_AUX_CTRL_STATUS 0x1d
+#define AIR_AUX_CTRL_STATUS_SPEED_MASK GENMASK(4, 2)
+#define AIR_AUX_CTRL_STATUS_SPEED_100 0x4
+#define AIR_AUX_CTRL_STATUS_SPEED_1000 0x8
+#define AIR_AUX_CTRL_STATUS_SPEED_2500 0xc
+
+#define AIR_EXT_PAGE_ACCESS 0x1f
+#define AIR_PHY_PAGE_STANDARD 0x0000
+#define AIR_PHY_PAGE_EXTENDED_4 0x0004
+
+/* MII Registers Page 4*/
+#define AIR_BPBUS_MODE 0x10
+#define AIR_BPBUS_MODE_ADDR_FIXED 0x0000
+#define AIR_BPBUS_MODE_ADDR_INCR BIT(15)
+#define AIR_BPBUS_WR_ADDR_HIGH 0x11
+#define AIR_BPBUS_WR_ADDR_LOW 0x12
+#define AIR_BPBUS_WR_DATA_HIGH 0x13
+#define AIR_BPBUS_WR_DATA_LOW 0x14
+#define AIR_BPBUS_RD_ADDR_HIGH 0x15
+#define AIR_BPBUS_RD_ADDR_LOW 0x16
+#define AIR_BPBUS_RD_DATA_HIGH 0x17
+#define AIR_BPBUS_RD_DATA_LOW 0x18
+
+/* Registers on MDIO_MMD_VEND1 */
+#define EN8811H_PHY_FW_STATUS 0x8009
+#define EN8811H_PHY_READY 0x02
+
+#define AIR_PHY_MCU_CMD_1 0x800c
+#define AIR_PHY_MCU_CMD_1_MODE1 0x0
+#define AIR_PHY_MCU_CMD_2 0x800d
+#define AIR_PHY_MCU_CMD_2_MODE1 0x0
+#define AIR_PHY_MCU_CMD_3 0x800e
+#define AIR_PHY_MCU_CMD_3_MODE1 0x1101
+#define AIR_PHY_MCU_CMD_3_DOCMD 0x1100
+#define AIR_PHY_MCU_CMD_4 0x800f
+#define AIR_PHY_MCU_CMD_4_MODE1 0x0002
+#define AIR_PHY_MCU_CMD_4_INTCLR 0x00e4
+
+/* Registers on MDIO_MMD_VEND2 */
+#define AIR_PHY_LED_BCR 0x021
+#define AIR_PHY_LED_BCR_MODE_MASK GENMASK(1, 0)
+#define AIR_PHY_LED_BCR_TIME_TEST BIT(2)
+#define AIR_PHY_LED_BCR_CLK_EN BIT(3)
+#define AIR_PHY_LED_BCR_EXT_CTRL BIT(15)
+
+#define AIR_PHY_LED_DUR_ON 0x022
+
+#define AIR_PHY_LED_DUR_BLINK 0x023
+
+#define AIR_PHY_LED_ON(i) (0x024 + ((i) * 2))
+#define AIR_PHY_LED_ON_MASK (GENMASK(6, 0) | BIT(8))
+#define AIR_PHY_LED_ON_LINK1000 BIT(0)
+#define AIR_PHY_LED_ON_LINK100 BIT(1)
+#define AIR_PHY_LED_ON_LINK10 BIT(2)
+#define AIR_PHY_LED_ON_LINKDOWN BIT(3)
+#define AIR_PHY_LED_ON_FDX BIT(4) /* Full duplex */
+#define AIR_PHY_LED_ON_HDX BIT(5) /* Half duplex */
+#define AIR_PHY_LED_ON_FORCE_ON BIT(6)
+#define AIR_PHY_LED_ON_LINK2500 BIT(8)
+#define AIR_PHY_LED_ON_POLARITY BIT(14)
+#define AIR_PHY_LED_ON_ENABLE BIT(15)
+
+#define AIR_PHY_LED_BLINK(i) (0x025 + ((i) * 2))
+#define AIR_PHY_LED_BLINK_1000TX BIT(0)
+#define AIR_PHY_LED_BLINK_1000RX BIT(1)
+#define AIR_PHY_LED_BLINK_100TX BIT(2)
+#define AIR_PHY_LED_BLINK_100RX BIT(3)
+#define AIR_PHY_LED_BLINK_10TX BIT(4)
+#define AIR_PHY_LED_BLINK_10RX BIT(5)
+#define AIR_PHY_LED_BLINK_COLLISION BIT(6)
+#define AIR_PHY_LED_BLINK_RX_CRC_ERR BIT(7)
+#define AIR_PHY_LED_BLINK_RX_IDLE_ERR BIT(8)
+#define AIR_PHY_LED_BLINK_FORCE_BLINK BIT(9)
+#define AIR_PHY_LED_BLINK_2500TX BIT(10)
+#define AIR_PHY_LED_BLINK_2500RX BIT(11)
+
+/* Registers on BUCKPBUS */
+#define EN8811H_2P5G_LPA 0x3b30
+#define EN8811H_2P5G_LPA_2P5G BIT(0)
+
+#define EN8811H_FW_VERSION 0x3b3c
+
+#define EN8811H_POLARITY 0xca0f8
+#define EN8811H_POLARITY_TX_NORMAL BIT(0)
+#define EN8811H_POLARITY_RX_REVERSE BIT(1)
+
+#define EN8811H_GPIO_OUTPUT 0xcf8b8
+#define EN8811H_GPIO_OUTPUT_345 (BIT(3) | BIT(4) | BIT(5))
+
+#define EN8811H_FW_CTRL_1 0x0f0018
+#define EN8811H_FW_CTRL_1_START 0x0
+#define EN8811H_FW_CTRL_1_FINISH 0x1
+#define EN8811H_FW_CTRL_2 0x800000
+#define EN8811H_FW_CTRL_2_LOADING BIT(11)
+
+/* Led definitions */
+#define EN8811H_LED_COUNT 3
+
+/* Default LED setup:
+ * GPIO5 <-> LED0 On: Link detected, blink Rx/Tx
+ * GPIO4 <-> LED1 On: Link detected at 2500 or 1000 Mbps
+ * GPIO3 <-> LED2 On: Link detected at 2500 or 100 Mbps
+ */
+#define AIR_DEFAULT_TRIGGER_LED0 (BIT(TRIGGER_NETDEV_LINK) | \
+ BIT(TRIGGER_NETDEV_RX) | \
+ BIT(TRIGGER_NETDEV_TX))
+#define AIR_DEFAULT_TRIGGER_LED1 (BIT(TRIGGER_NETDEV_LINK_2500) | \
+ BIT(TRIGGER_NETDEV_LINK_1000))
+#define AIR_DEFAULT_TRIGGER_LED2 (BIT(TRIGGER_NETDEV_LINK_2500) | \
+ BIT(TRIGGER_NETDEV_LINK_100))
+
+struct led {
+ unsigned long rules;
+ unsigned long state;
+};
+
+struct en8811h_priv {
+ u32 firmware_version;
+ bool mcu_needs_restart;
+ struct led led[EN8811H_LED_COUNT];
+};
+
+enum {
+ AIR_PHY_LED_STATE_FORCE_ON,
+ AIR_PHY_LED_STATE_FORCE_BLINK,
+};
+
+enum {
+ AIR_PHY_LED_DUR_BLINK_32MS,
+ AIR_PHY_LED_DUR_BLINK_64MS,
+ AIR_PHY_LED_DUR_BLINK_128MS,
+ AIR_PHY_LED_DUR_BLINK_256MS,
+ AIR_PHY_LED_DUR_BLINK_512MS,
+ AIR_PHY_LED_DUR_BLINK_1024MS,
+};
+
+enum {
+ AIR_LED_DISABLE,
+ AIR_LED_ENABLE,
+};
+
+enum {
+ AIR_ACTIVE_LOW,
+ AIR_ACTIVE_HIGH,
+};
+
+enum {
+ AIR_LED_MODE_DISABLE,
+ AIR_LED_MODE_USER_DEFINE,
+};
+
+#define AIR_PHY_LED_DUR_UNIT 1024
+#define AIR_PHY_LED_DUR (AIR_PHY_LED_DUR_UNIT << AIR_PHY_LED_DUR_BLINK_64MS)
+
+static const unsigned long en8811h_led_trig = BIT(TRIGGER_NETDEV_FULL_DUPLEX) |
+ BIT(TRIGGER_NETDEV_LINK) |
+ BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_LINK_2500) |
+ BIT(TRIGGER_NETDEV_RX) |
+ BIT(TRIGGER_NETDEV_TX);
+
+static int air_phy_read_page(struct phy_device *phydev)
+{
+ return __phy_read(phydev, AIR_EXT_PAGE_ACCESS);
+}
+
+static int air_phy_write_page(struct phy_device *phydev, int page)
+{
+ return __phy_write(phydev, AIR_EXT_PAGE_ACCESS, page);
+}
+
+static int __air_buckpbus_reg_write(struct phy_device *phydev,
+ u32 pbus_address, u32 pbus_data)
+{
+ int ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_MODE, AIR_BPBUS_MODE_ADDR_FIXED);
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_WR_ADDR_HIGH,
+ upper_16_bits(pbus_address));
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_WR_ADDR_LOW,
+ lower_16_bits(pbus_address));
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_WR_DATA_HIGH,
+ upper_16_bits(pbus_data));
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_WR_DATA_LOW,
+ lower_16_bits(pbus_data));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int air_buckpbus_reg_write(struct phy_device *phydev,
+ u32 pbus_address, u32 pbus_data)
+{
+ int saved_page;
+ int ret = 0;
+
+ saved_page = phy_select_page(phydev, AIR_PHY_PAGE_EXTENDED_4);
+
+ if (saved_page >= 0) {
+ ret = __air_buckpbus_reg_write(phydev, pbus_address,
+ pbus_data);
+ if (ret < 0)
+ phydev_err(phydev, "%s 0x%08x failed: %d\n", __func__,
+ pbus_address, ret);
+ }
+
+ return phy_restore_page(phydev, saved_page, ret);
+}
+
+static int __air_buckpbus_reg_read(struct phy_device *phydev,
+ u32 pbus_address, u32 *pbus_data)
+{
+ int pbus_data_low, pbus_data_high;
+ int ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_MODE, AIR_BPBUS_MODE_ADDR_FIXED);
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_RD_ADDR_HIGH,
+ upper_16_bits(pbus_address));
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_RD_ADDR_LOW,
+ lower_16_bits(pbus_address));
+ if (ret < 0)
+ return ret;
+
+ pbus_data_high = __phy_read(phydev, AIR_BPBUS_RD_DATA_HIGH);
+ if (pbus_data_high < 0)
+ return pbus_data_high;
+
+ pbus_data_low = __phy_read(phydev, AIR_BPBUS_RD_DATA_LOW);
+ if (pbus_data_low < 0)
+ return pbus_data_low;
+
+ *pbus_data = pbus_data_low | (pbus_data_high << 16);
+ return 0;
+}
+
+static int air_buckpbus_reg_read(struct phy_device *phydev,
+ u32 pbus_address, u32 *pbus_data)
+{
+ int saved_page;
+ int ret = 0;
+
+ saved_page = phy_select_page(phydev, AIR_PHY_PAGE_EXTENDED_4);
+
+ if (saved_page >= 0) {
+ ret = __air_buckpbus_reg_read(phydev, pbus_address, pbus_data);
+ if (ret < 0)
+ phydev_err(phydev, "%s 0x%08x failed: %d\n", __func__,
+ pbus_address, ret);
+ }
+
+ return phy_restore_page(phydev, saved_page, ret);
+}
+
+static int __air_buckpbus_reg_modify(struct phy_device *phydev,
+ u32 pbus_address, u32 mask, u32 set)
+{
+ int pbus_data_low, pbus_data_high;
+ u32 pbus_data_old, pbus_data_new;
+ int ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_MODE, AIR_BPBUS_MODE_ADDR_FIXED);
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_RD_ADDR_HIGH,
+ upper_16_bits(pbus_address));
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_RD_ADDR_LOW,
+ lower_16_bits(pbus_address));
+ if (ret < 0)
+ return ret;
+
+ pbus_data_high = __phy_read(phydev, AIR_BPBUS_RD_DATA_HIGH);
+ if (pbus_data_high < 0)
+ return pbus_data_high;
+
+ pbus_data_low = __phy_read(phydev, AIR_BPBUS_RD_DATA_LOW);
+ if (pbus_data_low < 0)
+ return pbus_data_low;
+
+ pbus_data_old = pbus_data_low | (pbus_data_high << 16);
+ pbus_data_new = (pbus_data_old & ~mask) | set;
+ if (pbus_data_new == pbus_data_old)
+ return 0;
+
+ ret = __phy_write(phydev, AIR_BPBUS_WR_ADDR_HIGH,
+ upper_16_bits(pbus_address));
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_WR_ADDR_LOW,
+ lower_16_bits(pbus_address));
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_WR_DATA_HIGH,
+ upper_16_bits(pbus_data_new));
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_WR_DATA_LOW,
+ lower_16_bits(pbus_data_new));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int air_buckpbus_reg_modify(struct phy_device *phydev,
+ u32 pbus_address, u32 mask, u32 set)
+{
+ int saved_page;
+ int ret = 0;
+
+ saved_page = phy_select_page(phydev, AIR_PHY_PAGE_EXTENDED_4);
+
+ if (saved_page >= 0) {
+ ret = __air_buckpbus_reg_modify(phydev, pbus_address, mask,
+ set);
+ if (ret < 0)
+ phydev_err(phydev, "%s 0x%08x failed: %d\n", __func__,
+ pbus_address, ret);
+ }
+
+ return phy_restore_page(phydev, saved_page, ret);
+}
+
+static int __air_write_buf(struct phy_device *phydev, u32 address,
+ const struct firmware *fw)
+{
+ unsigned int offset;
+ int ret;
+ u16 val;
+
+ ret = __phy_write(phydev, AIR_BPBUS_MODE, AIR_BPBUS_MODE_ADDR_INCR);
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_WR_ADDR_HIGH,
+ upper_16_bits(address));
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_WR_ADDR_LOW,
+ lower_16_bits(address));
+ if (ret < 0)
+ return ret;
+
+ for (offset = 0; offset < fw->size; offset += 4) {
+ val = get_unaligned_le16(&fw->data[offset + 2]);
+ ret = __phy_write(phydev, AIR_BPBUS_WR_DATA_HIGH, val);
+ if (ret < 0)
+ return ret;
+
+ val = get_unaligned_le16(&fw->data[offset]);
+ ret = __phy_write(phydev, AIR_BPBUS_WR_DATA_LOW, val);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int air_write_buf(struct phy_device *phydev, u32 address,
+ const struct firmware *fw)
+{
+ int saved_page;
+ int ret = 0;
+
+ saved_page = phy_select_page(phydev, AIR_PHY_PAGE_EXTENDED_4);
+
+ if (saved_page >= 0) {
+ ret = __air_write_buf(phydev, address, fw);
+ if (ret < 0)
+ phydev_err(phydev, "%s 0x%08x failed: %d\n", __func__,
+ address, ret);
+ }
+
+ return phy_restore_page(phydev, saved_page, ret);
+}
+
+static int en8811h_wait_mcu_ready(struct phy_device *phydev)
+{
+ int ret, reg_value;
+
+ /* Because of mdio-lock, may have to wait for multiple loads */
+ ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+ EN8811H_PHY_FW_STATUS, reg_value,
+ reg_value == EN8811H_PHY_READY,
+ 20000, 7500000, true);
+ if (ret) {
+ phydev_err(phydev, "MCU not ready: 0x%x\n", reg_value);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int en8811h_load_firmware(struct phy_device *phydev)
+{
+ struct en8811h_priv *priv = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ const struct firmware *fw1, *fw2;
+ int ret;
+
+ ret = request_firmware_direct(&fw1, EN8811H_MD32_DM, dev);
+ if (ret < 0)
+ return ret;
+
+ ret = request_firmware_direct(&fw2, EN8811H_MD32_DSP, dev);
+ if (ret < 0)
+ goto en8811h_load_firmware_rel1;
+
+ ret = air_buckpbus_reg_write(phydev, EN8811H_FW_CTRL_1,
+ EN8811H_FW_CTRL_1_START);
+ if (ret < 0)
+ goto en8811h_load_firmware_out;
+
+ ret = air_buckpbus_reg_modify(phydev, EN8811H_FW_CTRL_2,
+ EN8811H_FW_CTRL_2_LOADING,
+ EN8811H_FW_CTRL_2_LOADING);
+ if (ret < 0)
+ goto en8811h_load_firmware_out;
+
+ ret = air_write_buf(phydev, AIR_FW_ADDR_DM, fw1);
+ if (ret < 0)
+ goto en8811h_load_firmware_out;
+
+ ret = air_write_buf(phydev, AIR_FW_ADDR_DSP, fw2);
+ if (ret < 0)
+ goto en8811h_load_firmware_out;
+
+ ret = air_buckpbus_reg_modify(phydev, EN8811H_FW_CTRL_2,
+ EN8811H_FW_CTRL_2_LOADING, 0);
+ if (ret < 0)
+ goto en8811h_load_firmware_out;
+
+ ret = air_buckpbus_reg_write(phydev, EN8811H_FW_CTRL_1,
+ EN8811H_FW_CTRL_1_FINISH);
+ if (ret < 0)
+ goto en8811h_load_firmware_out;
+
+ ret = en8811h_wait_mcu_ready(phydev);
+
+ air_buckpbus_reg_read(phydev, EN8811H_FW_VERSION,
+ &priv->firmware_version);
+ phydev_info(phydev, "MD32 firmware version: %08x\n",
+ priv->firmware_version);
+
+en8811h_load_firmware_out:
+ release_firmware(fw2);
+
+en8811h_load_firmware_rel1:
+ release_firmware(fw1);
+
+ if (ret < 0)
+ phydev_err(phydev, "Load firmware failed: %d\n", ret);
+
+ return ret;
+}
+
+static int en8811h_restart_mcu(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = air_buckpbus_reg_write(phydev, EN8811H_FW_CTRL_1,
+ EN8811H_FW_CTRL_1_START);
+ if (ret < 0)
+ return ret;
+
+ ret = air_buckpbus_reg_write(phydev, EN8811H_FW_CTRL_1,
+ EN8811H_FW_CTRL_1_FINISH);
+ if (ret < 0)
+ return ret;
+
+ return en8811h_wait_mcu_ready(phydev);
+}
+
+static int air_hw_led_on_set(struct phy_device *phydev, u8 index, bool on)
+{
+ struct en8811h_priv *priv = phydev->priv;
+ bool changed;
+
+ if (index >= EN8811H_LED_COUNT)
+ return -EINVAL;
+
+ if (on)
+ changed = !test_and_set_bit(AIR_PHY_LED_STATE_FORCE_ON,
+ &priv->led[index].state);
+ else
+ changed = !!test_and_clear_bit(AIR_PHY_LED_STATE_FORCE_ON,
+ &priv->led[index].state);
+
+ changed |= (priv->led[index].rules != 0);
+
+ /* clear netdev trigger rules in case LED_OFF has been set */
+ if (!on)
+ priv->led[index].rules = 0;
+
+ if (changed)
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2,
+ AIR_PHY_LED_ON(index),
+ AIR_PHY_LED_ON_MASK,
+ on ? AIR_PHY_LED_ON_FORCE_ON : 0);
+
+ return 0;
+}
+
+static int air_hw_led_blink_set(struct phy_device *phydev, u8 index,
+ bool blinking)
+{
+ struct en8811h_priv *priv = phydev->priv;
+ bool changed;
+
+ if (index >= EN8811H_LED_COUNT)
+ return -EINVAL;
+
+ if (blinking)
+ changed = !test_and_set_bit(AIR_PHY_LED_STATE_FORCE_BLINK,
+ &priv->led[index].state);
+ else
+ changed = !!test_and_clear_bit(AIR_PHY_LED_STATE_FORCE_BLINK,
+ &priv->led[index].state);
+
+ changed |= (priv->led[index].rules != 0);
+
+ if (changed)
+ return phy_write_mmd(phydev, MDIO_MMD_VEND2,
+ AIR_PHY_LED_BLINK(index),
+ blinking ?
+ AIR_PHY_LED_BLINK_FORCE_BLINK : 0);
+ else
+ return 0;
+}
+
+static int air_led_blink_set(struct phy_device *phydev, u8 index,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct en8811h_priv *priv = phydev->priv;
+ bool blinking = false;
+ int err;
+
+ if (index >= EN8811H_LED_COUNT)
+ return -EINVAL;
+
+ if (delay_on && delay_off && (*delay_on > 0) && (*delay_off > 0)) {
+ blinking = true;
+ *delay_on = 50;
+ *delay_off = 50;
+ }
+
+ err = air_hw_led_blink_set(phydev, index, blinking);
+ if (err)
+ return err;
+
+ /* led-blink set, so switch led-on off */
+ err = air_hw_led_on_set(phydev, index, false);
+ if (err)
+ return err;
+
+ /* hw-control is off*/
+ if (!!test_bit(AIR_PHY_LED_STATE_FORCE_BLINK, &priv->led[index].state))
+ priv->led[index].rules = 0;
+
+ return 0;
+}
+
+static int air_led_brightness_set(struct phy_device *phydev, u8 index,
+ enum led_brightness value)
+{
+ struct en8811h_priv *priv = phydev->priv;
+ int err;
+
+ if (index >= EN8811H_LED_COUNT)
+ return -EINVAL;
+
+ /* led-on set, so switch led-blink off */
+ err = air_hw_led_blink_set(phydev, index, false);
+ if (err)
+ return err;
+
+ err = air_hw_led_on_set(phydev, index, (value != LED_OFF));
+ if (err)
+ return err;
+
+ /* hw-control is off */
+ if (!!test_bit(AIR_PHY_LED_STATE_FORCE_ON, &priv->led[index].state))
+ priv->led[index].rules = 0;
+
+ return 0;
+}
+
+static int air_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ struct en8811h_priv *priv = phydev->priv;
+
+ if (index >= EN8811H_LED_COUNT)
+ return -EINVAL;
+
+ *rules = priv->led[index].rules;
+
+ return 0;
+};
+
+static int air_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ struct en8811h_priv *priv = phydev->priv;
+ u16 on = 0, blink = 0;
+ int ret;
+
+ if (index >= EN8811H_LED_COUNT)
+ return -EINVAL;
+
+ priv->led[index].rules = rules;
+
+ if (rules & BIT(TRIGGER_NETDEV_FULL_DUPLEX))
+ on |= AIR_PHY_LED_ON_FDX;
+
+ if (rules & (BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK)))
+ on |= AIR_PHY_LED_ON_LINK10;
+
+ if (rules & (BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK)))
+ on |= AIR_PHY_LED_ON_LINK100;
+
+ if (rules & (BIT(TRIGGER_NETDEV_LINK_1000) | BIT(TRIGGER_NETDEV_LINK)))
+ on |= AIR_PHY_LED_ON_LINK1000;
+
+ if (rules & (BIT(TRIGGER_NETDEV_LINK_2500) | BIT(TRIGGER_NETDEV_LINK)))
+ on |= AIR_PHY_LED_ON_LINK2500;
+
+ if (rules & BIT(TRIGGER_NETDEV_RX)) {
+ blink |= AIR_PHY_LED_BLINK_10RX |
+ AIR_PHY_LED_BLINK_100RX |
+ AIR_PHY_LED_BLINK_1000RX |
+ AIR_PHY_LED_BLINK_2500RX;
+ }
+
+ if (rules & BIT(TRIGGER_NETDEV_TX)) {
+ blink |= AIR_PHY_LED_BLINK_10TX |
+ AIR_PHY_LED_BLINK_100TX |
+ AIR_PHY_LED_BLINK_1000TX |
+ AIR_PHY_LED_BLINK_2500TX;
+ }
+
+ if (blink || on) {
+ /* switch hw-control on, so led-on and led-blink are off */
+ clear_bit(AIR_PHY_LED_STATE_FORCE_ON,
+ &priv->led[index].state);
+ clear_bit(AIR_PHY_LED_STATE_FORCE_BLINK,
+ &priv->led[index].state);
+ } else {
+ priv->led[index].rules = 0;
+ }
+
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, AIR_PHY_LED_ON(index),
+ AIR_PHY_LED_ON_MASK, on);
+
+ if (ret < 0)
+ return ret;
+
+ return phy_write_mmd(phydev, MDIO_MMD_VEND2, AIR_PHY_LED_BLINK(index),
+ blink);
+};
+
+static int air_led_init(struct phy_device *phydev, u8 index, u8 state, u8 pol)
+{
+ int val = 0;
+ int err;
+
+ if (index >= EN8811H_LED_COUNT)
+ return -EINVAL;
+
+ if (state == AIR_LED_ENABLE)
+ val |= AIR_PHY_LED_ON_ENABLE;
+ else
+ val &= ~AIR_PHY_LED_ON_ENABLE;
+
+ if (pol == AIR_ACTIVE_HIGH)
+ val |= AIR_PHY_LED_ON_POLARITY;
+ else
+ val &= ~AIR_PHY_LED_ON_POLARITY;
+
+ err = phy_modify_mmd(phydev, MDIO_MMD_VEND2, AIR_PHY_LED_ON(index),
+ AIR_PHY_LED_ON_ENABLE |
+ AIR_PHY_LED_ON_POLARITY, val);
+
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int air_leds_init(struct phy_device *phydev, int num, int dur, int mode)
+{
+ struct en8811h_priv *priv = phydev->priv;
+ int ret, i;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, AIR_PHY_LED_DUR_BLINK,
+ dur);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, AIR_PHY_LED_DUR_ON,
+ dur >> 1);
+ if (ret < 0)
+ return ret;
+
+ switch (mode) {
+ case AIR_LED_MODE_DISABLE:
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, AIR_PHY_LED_BCR,
+ AIR_PHY_LED_BCR_EXT_CTRL |
+ AIR_PHY_LED_BCR_MODE_MASK, 0);
+ if (ret < 0)
+ return ret;
+ break;
+ case AIR_LED_MODE_USER_DEFINE:
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, AIR_PHY_LED_BCR,
+ AIR_PHY_LED_BCR_EXT_CTRL |
+ AIR_PHY_LED_BCR_CLK_EN,
+ AIR_PHY_LED_BCR_EXT_CTRL |
+ AIR_PHY_LED_BCR_CLK_EN);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ phydev_err(phydev, "LED mode %d is not supported\n", mode);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num; ++i) {
+ ret = air_led_init(phydev, i, AIR_LED_ENABLE, AIR_ACTIVE_HIGH);
+ if (ret < 0) {
+ phydev_err(phydev, "LED%d init failed: %d\n", i, ret);
+ return ret;
+ }
+ air_led_hw_control_set(phydev, i, priv->led[i].rules);
+ }
+
+ return 0;
+}
+
+static int en8811h_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ if (index >= EN8811H_LED_COUNT)
+ return -EINVAL;
+
+ /* All combinations of the supported triggers are allowed */
+ if (rules & ~en8811h_led_trig)
+ return -EOPNOTSUPP;
+
+ return 0;
+};
+
+static int en8811h_probe(struct phy_device *phydev)
+{
+ struct en8811h_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(struct en8811h_priv),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ phydev->priv = priv;
+
+ ret = en8811h_load_firmware(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* mcu has just restarted after firmware load */
+ priv->mcu_needs_restart = false;
+
+ priv->led[0].rules = AIR_DEFAULT_TRIGGER_LED0;
+ priv->led[1].rules = AIR_DEFAULT_TRIGGER_LED1;
+ priv->led[2].rules = AIR_DEFAULT_TRIGGER_LED2;
+
+ /* MDIO_DEVS1/2 empty, so set mmds_present bits here */
+ phydev->c45_ids.mmds_present |= MDIO_DEVS_PMAPMD | MDIO_DEVS_AN;
+
+ ret = air_leds_init(phydev, EN8811H_LED_COUNT, AIR_PHY_LED_DUR,
+ AIR_LED_MODE_DISABLE);
+ if (ret < 0) {
+ phydev_err(phydev, "Failed to disable leds: %d\n", ret);
+ return ret;
+ }
+
+ /* Configure led gpio pins as output */
+ ret = air_buckpbus_reg_modify(phydev, EN8811H_GPIO_OUTPUT,
+ EN8811H_GPIO_OUTPUT_345,
+ EN8811H_GPIO_OUTPUT_345);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int en8811h_config_init(struct phy_device *phydev)
+{
+ struct en8811h_priv *priv = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ u32 pbus_value;
+ int ret;
+
+ /* If restart happened in .probe(), no need to restart now */
+ if (priv->mcu_needs_restart) {
+ ret = en8811h_restart_mcu(phydev);
+ if (ret < 0)
+ return ret;
+ } else {
+ /* Next calls to .config_init() mcu needs to restart */
+ priv->mcu_needs_restart = true;
+ }
+
+ /* Select mode 1, the only mode supported.
+ * Configures the SerDes for 2500Base-X with rate adaptation
+ */
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, AIR_PHY_MCU_CMD_1,
+ AIR_PHY_MCU_CMD_1_MODE1);
+ if (ret < 0)
+ return ret;
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, AIR_PHY_MCU_CMD_2,
+ AIR_PHY_MCU_CMD_2_MODE1);
+ if (ret < 0)
+ return ret;
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, AIR_PHY_MCU_CMD_3,
+ AIR_PHY_MCU_CMD_3_MODE1);
+ if (ret < 0)
+ return ret;
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, AIR_PHY_MCU_CMD_4,
+ AIR_PHY_MCU_CMD_4_MODE1);
+ if (ret < 0)
+ return ret;
+
+ /* Serdes polarity */
+ pbus_value = 0;
+ if (device_property_read_bool(dev, "airoha,pnswap-rx"))
+ pbus_value |= EN8811H_POLARITY_RX_REVERSE;
+ else
+ pbus_value &= ~EN8811H_POLARITY_RX_REVERSE;
+ if (device_property_read_bool(dev, "airoha,pnswap-tx"))
+ pbus_value &= ~EN8811H_POLARITY_TX_NORMAL;
+ else
+ pbus_value |= EN8811H_POLARITY_TX_NORMAL;
+ ret = air_buckpbus_reg_modify(phydev, EN8811H_POLARITY,
+ EN8811H_POLARITY_RX_REVERSE |
+ EN8811H_POLARITY_TX_NORMAL, pbus_value);
+ if (ret < 0)
+ return ret;
+
+ ret = air_leds_init(phydev, EN8811H_LED_COUNT, AIR_PHY_LED_DUR,
+ AIR_LED_MODE_USER_DEFINE);
+ if (ret < 0) {
+ phydev_err(phydev, "Failed to initialize leds: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int en8811h_get_features(struct phy_device *phydev)
+{
+ linkmode_set_bit_array(phy_basic_ports_array,
+ ARRAY_SIZE(phy_basic_ports_array),
+ phydev->supported);
+
+ return genphy_c45_pma_read_abilities(phydev);
+}
+
+static int en8811h_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface)
+{
+ return RATE_MATCH_PAUSE;
+}
+
+static int en8811h_config_aneg(struct phy_device *phydev)
+{
+ bool changed = false;
+ int ret;
+ u32 adv;
+
+ if (phydev->autoneg == AUTONEG_DISABLE) {
+ phydev_warn(phydev, "Disabling autoneg is not supported\n");
+ return -EINVAL;
+ }
+
+ adv = linkmode_adv_to_mii_10gbt_adv_t(phydev->advertising);
+
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ MDIO_AN_10GBT_CTRL_ADV2_5G, adv);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ return __genphy_config_aneg(phydev, changed);
+}
+
+static int en8811h_read_status(struct phy_device *phydev)
+{
+ struct en8811h_priv *priv = phydev->priv;
+ u32 pbus_value;
+ int ret, val;
+
+ ret = genphy_update_link(phydev);
+ if (ret)
+ return ret;
+
+ phydev->master_slave_get = MASTER_SLAVE_CFG_UNSUPPORTED;
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED;
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+ phydev->rate_matching = RATE_MATCH_PAUSE;
+
+ ret = genphy_read_master_slave(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = genphy_read_lpa(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Get link partner 2.5GBASE-T ability from vendor register */
+ ret = air_buckpbus_reg_read(phydev, EN8811H_2P5G_LPA, &pbus_value);
+ if (ret < 0)
+ return ret;
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ phydev->lp_advertising,
+ pbus_value & EN8811H_2P5G_LPA_2P5G);
+
+ if (phydev->autoneg_complete)
+ phy_resolve_aneg_pause(phydev);
+
+ if (!phydev->link)
+ return 0;
+
+ /* Get real speed from vendor register */
+ val = phy_read(phydev, AIR_AUX_CTRL_STATUS);
+ if (val < 0)
+ return val;
+ switch (val & AIR_AUX_CTRL_STATUS_SPEED_MASK) {
+ case AIR_AUX_CTRL_STATUS_SPEED_2500:
+ phydev->speed = SPEED_2500;
+ break;
+ case AIR_AUX_CTRL_STATUS_SPEED_1000:
+ phydev->speed = SPEED_1000;
+ break;
+ case AIR_AUX_CTRL_STATUS_SPEED_100:
+ phydev->speed = SPEED_100;
+ break;
+ }
+
+ /* Firmware before version 24011202 has no vendor register 2P5G_LPA.
+ * Assume link partner advertised it if connected at 2500Mbps.
+ */
+ if (priv->firmware_version < 0x24011202) {
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ phydev->lp_advertising,
+ phydev->speed == SPEED_2500);
+ }
+
+ /* Only supports full duplex */
+ phydev->duplex = DUPLEX_FULL;
+
+ return 0;
+}
+
+static int en8811h_clear_intr(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, AIR_PHY_MCU_CMD_3,
+ AIR_PHY_MCU_CMD_3_DOCMD);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, AIR_PHY_MCU_CMD_4,
+ AIR_PHY_MCU_CMD_4_INTCLR);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static irqreturn_t en8811h_handle_interrupt(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = en8811h_clear_intr(phydev);
+ if (ret < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
+static struct phy_driver en8811h_driver[] = {
+{
+ PHY_ID_MATCH_MODEL(EN8811H_PHY_ID),
+ .name = "Airoha EN8811H",
+ .probe = en8811h_probe,
+ .get_features = en8811h_get_features,
+ .config_init = en8811h_config_init,
+ .get_rate_matching = en8811h_get_rate_matching,
+ .config_aneg = en8811h_config_aneg,
+ .read_status = en8811h_read_status,
+ .config_intr = en8811h_clear_intr,
+ .handle_interrupt = en8811h_handle_interrupt,
+ .led_hw_is_supported = en8811h_led_hw_is_supported,
+ .read_page = air_phy_read_page,
+ .write_page = air_phy_write_page,
+ .led_blink_set = air_led_blink_set,
+ .led_brightness_set = air_led_brightness_set,
+ .led_hw_control_set = air_led_hw_control_set,
+ .led_hw_control_get = air_led_hw_control_get,
+} };
+
+module_phy_driver(en8811h_driver);
+
+static struct mdio_device_id __maybe_unused en8811h_tbl[] = {
+ { PHY_ID_MATCH_MODEL(EN8811H_PHY_ID) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, en8811h_tbl);
+MODULE_FIRMWARE(EN8811H_MD32_DM);
+MODULE_FIRMWARE(EN8811H_MD32_DSP);
+
+MODULE_DESCRIPTION("Airoha EN8811H PHY drivers");
+MODULE_AUTHOR("Airoha");
+MODULE_AUTHOR("Eric Woudstra <ericwouds@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c
index 71bfddb8f453..d34cdec47636 100644
--- a/drivers/net/phy/aquantia/aquantia_main.c
+++ b/drivers/net/phy/aquantia/aquantia_main.c
@@ -28,6 +28,7 @@
#define PHY_ID_AQR412 0x03a1b712
#define PHY_ID_AQR113 0x31c31c40
#define PHY_ID_AQR113C 0x31c31c12
+#define PHY_ID_AQR114C 0x31c31c22
#define PHY_ID_AQR813 0x31c31cb2
#define MDIO_PHYXS_VEND_IF_STATUS 0xe812
@@ -963,6 +964,25 @@ static struct phy_driver aqr_driver[] = {
.link_change_notify = aqr107_link_change_notify,
},
{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR114C),
+ .name = "Aquantia AQR114C",
+ .probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
+ .config_init = aqr111_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+},
+{
PHY_ID_MATCH_MODEL(PHY_ID_AQR813),
.name = "Aquantia AQR813",
.probe = aqr107_probe,
@@ -999,6 +1019,7 @@ static struct mdio_device_id __maybe_unused aqr_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR412) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR113) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR113C) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR114C) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR813) },
{ }
};
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index c3426a17e6d0..efeb643c1373 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -140,10 +140,11 @@ struct dp83822_private {
u16 fx_sd_enable;
u8 cfg_dac_minus;
u8 cfg_dac_plus;
+ struct ethtool_wolinfo wol;
};
-static int dp83822_set_wol(struct phy_device *phydev,
- struct ethtool_wolinfo *wol)
+static int dp83822_config_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
{
struct net_device *ndev = phydev->attached_dev;
u16 value;
@@ -197,10 +198,25 @@ static int dp83822_set_wol(struct phy_device *phydev,
MII_DP83822_WOL_CFG, value);
} else {
return phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
- MII_DP83822_WOL_CFG, DP83822_WOL_EN);
+ MII_DP83822_WOL_CFG,
+ DP83822_WOL_EN |
+ DP83822_WOL_MAGIC_EN |
+ DP83822_WOL_SECURE_ON);
}
}
+static int dp83822_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ struct dp83822_private *dp83822 = phydev->priv;
+ int ret;
+
+ ret = dp83822_config_wol(phydev, wol);
+ if (!ret)
+ memcpy(&dp83822->wol, wol, sizeof(*wol));
+ return ret;
+}
+
static void dp83822_get_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
@@ -346,13 +362,6 @@ static irqreturn_t dp83822_handle_interrupt(struct phy_device *phydev)
return IRQ_HANDLED;
}
-static int dp8382x_disable_wol(struct phy_device *phydev)
-{
- return phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG,
- DP83822_WOL_EN | DP83822_WOL_MAGIC_EN |
- DP83822_WOL_SECURE_ON);
-}
-
static int dp83822_read_status(struct phy_device *phydev)
{
struct dp83822_private *dp83822 = phydev->priv;
@@ -496,7 +505,7 @@ static int dp83822_config_init(struct phy_device *phydev)
return err;
}
}
- return dp8382x_disable_wol(phydev);
+ return dp83822_config_wol(phydev, &dp83822->wol);
}
static int dp83826_config_rmii_mode(struct phy_device *phydev)
@@ -575,12 +584,14 @@ static int dp83826_config_init(struct phy_device *phydev)
return ret;
}
- return dp8382x_disable_wol(phydev);
+ return dp83822_config_wol(phydev, &dp83822->wol);
}
static int dp8382x_config_init(struct phy_device *phydev)
{
- return dp8382x_disable_wol(phydev);
+ struct dp83822_private *dp83822 = phydev->priv;
+
+ return dp83822_config_wol(phydev, &dp83822->wol);
}
static int dp83822_phy_reset(struct phy_device *phydev)
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index fa8c6fdcf301..d7aaefb5226b 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -695,7 +695,8 @@ static int dp83869_configure_mode(struct phy_device *phydev,
phy_ctrl_val = dp83869->mode;
if (phydev->interface == PHY_INTERFACE_MODE_MII) {
if (dp83869->mode == DP83869_100M_MEDIA_CONVERT ||
- dp83869->mode == DP83869_RGMII_100_BASE) {
+ dp83869->mode == DP83869_RGMII_100_BASE ||
+ dp83869->mode == DP83869_RGMII_COPPER_ETHERNET) {
phy_ctrl_val |= DP83869_OP_MODE_MII;
} else {
phydev_err(phydev, "selected op-mode is not valid with MII mode\n");
diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c
index 6b4bd9883304..c812f16eaa3a 100644
--- a/drivers/net/phy/marvell-88q2xxx.c
+++ b/drivers/net/phy/marvell-88q2xxx.c
@@ -12,6 +12,8 @@
#include <linux/hwmon.h>
#define PHY_ID_88Q2220_REVB0 (MARVELL_PHY_ID_88Q2220 | 0x1)
+#define PHY_ID_88Q2220_REVB1 (MARVELL_PHY_ID_88Q2220 | 0x2)
+#define PHY_ID_88Q2220_REVB2 (MARVELL_PHY_ID_88Q2220 | 0x3)
#define MDIO_MMD_AN_MV_STAT 32769
#define MDIO_MMD_AN_MV_STAT_ANEG 0x0100
@@ -129,6 +131,49 @@ static const struct mmd_val mv88q222x_revb0_init_seq1[] = {
{ MDIO_MMD_PCS, 0xfe05, 0x755c },
};
+static const struct mmd_val mv88q222x_revb1_init_seq0[] = {
+ { MDIO_MMD_PCS, 0xffe4, 0x0007 },
+ { MDIO_MMD_AN, MDIO_AN_T1_CTRL, 0x0 },
+ { MDIO_MMD_PCS, 0xffe3, 0x7000 },
+ { MDIO_MMD_PMAPMD, MDIO_CTRL1, 0x0840 },
+};
+
+static const struct mmd_val mv88q222x_revb2_init_seq0[] = {
+ { MDIO_MMD_PCS, 0xffe4, 0x0007 },
+ { MDIO_MMD_AN, MDIO_AN_T1_CTRL, 0x0 },
+ { MDIO_MMD_PMAPMD, MDIO_CTRL1, 0x0840 },
+};
+
+static const struct mmd_val mv88q222x_revb1_revb2_init_seq1[] = {
+ { MDIO_MMD_PCS, 0xfe07, 0x125a },
+ { MDIO_MMD_PCS, 0xfe09, 0x1288 },
+ { MDIO_MMD_PCS, 0xfe08, 0x2588 },
+ { MDIO_MMD_PCS, 0xfe72, 0x042c },
+ { MDIO_MMD_PCS, 0xffe4, 0x0071 },
+ { MDIO_MMD_PCS, 0xffe4, 0x0001 },
+ { MDIO_MMD_PCS, 0xfe1b, 0x0048 },
+ { MDIO_MMD_PMAPMD, 0x0000, 0x0000 },
+ { MDIO_MMD_PCS, 0x0000, 0x0000 },
+ { MDIO_MMD_PCS, 0xffdb, 0xfc10 },
+ { MDIO_MMD_PCS, 0xfe1b, 0x58 },
+ { MDIO_MMD_PCS, 0xfcad, 0x030c },
+ { MDIO_MMD_PCS, 0x8032, 0x6001 },
+ { MDIO_MMD_PCS, 0xfdff, 0x05a5 },
+ { MDIO_MMD_PCS, 0xfdec, 0xdbaf },
+ { MDIO_MMD_PCS, 0xfcab, 0x1054 },
+ { MDIO_MMD_PCS, 0xfcac, 0x1483 },
+ { MDIO_MMD_PCS, 0x8033, 0xc801 },
+ { MDIO_MMD_AN, 0x8032, 0x2020 },
+ { MDIO_MMD_AN, 0x8031, 0xa28 },
+ { MDIO_MMD_AN, 0x8031, 0xc28 },
+ { MDIO_MMD_PCS, 0xfbba, 0x0cb2 },
+ { MDIO_MMD_PCS, 0xfbbb, 0x0c4a },
+ { MDIO_MMD_PCS, 0xfe5f, 0xe8 },
+ { MDIO_MMD_PCS, 0xfe05, 0x755c },
+ { MDIO_MMD_PCS, 0xfa20, 0x002a },
+ { MDIO_MMD_PCS, 0xfe11, 0x1105 },
+};
+
static int mv88q2xxx_soft_reset(struct phy_device *phydev)
{
int ret;
@@ -687,31 +732,72 @@ static int mv88q222x_soft_reset(struct phy_device *phydev)
return 0;
}
-static int mv88q222x_revb0_config_init(struct phy_device *phydev)
+static int mv88q222x_write_mmd_vals(struct phy_device *phydev,
+ const struct mmd_val *vals, size_t len)
{
- int ret, i;
+ int ret;
- for (i = 0; i < ARRAY_SIZE(mv88q222x_revb0_init_seq0); i++) {
- ret = phy_write_mmd(phydev, mv88q222x_revb0_init_seq0[i].devad,
- mv88q222x_revb0_init_seq0[i].regnum,
- mv88q222x_revb0_init_seq0[i].val);
+ for (; len; vals++, len--) {
+ ret = phy_write_mmd(phydev, vals->devad, vals->regnum,
+ vals->val);
if (ret < 0)
return ret;
}
+ return 0;
+}
+
+static int mv88q222x_revb0_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = mv88q222x_write_mmd_vals(phydev, mv88q222x_revb0_init_seq0,
+ ARRAY_SIZE(mv88q222x_revb0_init_seq0));
+ if (ret < 0)
+ return ret;
+
usleep_range(5000, 10000);
- for (i = 0; i < ARRAY_SIZE(mv88q222x_revb0_init_seq1); i++) {
- ret = phy_write_mmd(phydev, mv88q222x_revb0_init_seq1[i].devad,
- mv88q222x_revb0_init_seq1[i].regnum,
- mv88q222x_revb0_init_seq1[i].val);
- if (ret < 0)
- return ret;
- }
+ ret = mv88q222x_write_mmd_vals(phydev, mv88q222x_revb0_init_seq1,
+ ARRAY_SIZE(mv88q222x_revb0_init_seq1));
+ if (ret < 0)
+ return ret;
+
+ return mv88q2xxx_config_init(phydev);
+}
+
+static int mv88q222x_revb1_revb2_config_init(struct phy_device *phydev)
+{
+ bool is_rev_b1 = phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] == PHY_ID_88Q2220_REVB1;
+ int ret;
+
+ if (is_rev_b1)
+ ret = mv88q222x_write_mmd_vals(phydev, mv88q222x_revb1_init_seq0,
+ ARRAY_SIZE(mv88q222x_revb1_init_seq0));
+ else
+ ret = mv88q222x_write_mmd_vals(phydev, mv88q222x_revb2_init_seq0,
+ ARRAY_SIZE(mv88q222x_revb2_init_seq0));
+ if (ret < 0)
+ return ret;
+
+ usleep_range(3000, 5000);
+
+ ret = mv88q222x_write_mmd_vals(phydev, mv88q222x_revb1_revb2_init_seq1,
+ ARRAY_SIZE(mv88q222x_revb1_revb2_init_seq1));
+ if (ret < 0)
+ return ret;
return mv88q2xxx_config_init(phydev);
}
+static int mv88q222x_config_init(struct phy_device *phydev)
+{
+ if (phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] == PHY_ID_88Q2220_REVB0)
+ return mv88q222x_revb0_config_init(phydev);
+ else
+ return mv88q222x_revb1_revb2_config_init(phydev);
+}
+
static int mv88q222x_cable_test_start(struct phy_device *phydev)
{
int ret;
@@ -810,14 +896,15 @@ static struct phy_driver mv88q2xxx_driver[] = {
.get_sqi_max = mv88q2xxx_get_sqi_max,
},
{
- PHY_ID_MATCH_EXACT(PHY_ID_88Q2220_REVB0),
+ .phy_id = MARVELL_PHY_ID_88Q2220,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "mv88q2220",
.flags = PHY_POLL_CABLE_TEST,
.probe = mv88q2xxx_probe,
.get_features = mv88q2xxx_get_features,
.config_aneg = mv88q2xxx_config_aneg,
.aneg_done = genphy_c45_aneg_done,
- .config_init = mv88q222x_revb0_config_init,
+ .config_init = mv88q222x_config_init,
.read_status = mv88q2xxx_read_status,
.soft_reset = mv88q222x_soft_reset,
.config_intr = mv88q2xxx_config_intr,
@@ -836,7 +923,7 @@ module_phy_driver(mv88q2xxx_driver);
static struct mdio_device_id __maybe_unused mv88q2xxx_tbl[] = {
{ MARVELL_PHY_ID_88Q2110, MARVELL_PHY_ID_MASK },
- { PHY_ID_MATCH_EXACT(PHY_ID_88Q2220_REVB0), },
+ { MARVELL_PHY_ID_88Q2220, MARVELL_PHY_ID_MASK },
{ /*sentinel*/ }
};
MODULE_DEVICE_TABLE(mdio, mv88q2xxx_tbl);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 42ed013385bf..b89fbffa6a93 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -279,10 +279,29 @@
#define MII_VCT7_CTRL_METERS BIT(10)
#define MII_VCT7_CTRL_CENTIMETERS 0
+#define MII_VCT_TXPINS 0x1A
+#define MII_VCT_RXPINS 0x1B
+#define MII_VCT_SR 0x1C
+#define MII_VCT_TXPINS_ENVCT BIT(15)
+#define MII_VCT_TXRXPINS_VCTTST GENMASK(14, 13)
+#define MII_VCT_TXRXPINS_VCTTST_SHIFT 13
+#define MII_VCT_TXRXPINS_VCTTST_OK 0
+#define MII_VCT_TXRXPINS_VCTTST_SHORT 1
+#define MII_VCT_TXRXPINS_VCTTST_OPEN 2
+#define MII_VCT_TXRXPINS_VCTTST_FAIL 3
+#define MII_VCT_TXRXPINS_AMPRFLN GENMASK(12, 8)
+#define MII_VCT_TXRXPINS_AMPRFLN_SHIFT 8
+#define MII_VCT_TXRXPINS_DISTRFLN GENMASK(7, 0)
+#define MII_VCT_TXRXPINS_DISTRFLN_MAX 0xff
+
+#define M88E3082_PAIR_A BIT(0)
+#define M88E3082_PAIR_B BIT(1)
+
#define LPA_PAUSE_FIBER 0x180
#define LPA_PAUSE_ASYM_FIBER 0x100
#define NB_FIBER_STATS 1
+#define NB_STAT_MAX 3
MODULE_DESCRIPTION("Marvell PHY driver");
MODULE_AUTHOR("Andy Fleming");
@@ -295,14 +314,37 @@ struct marvell_hw_stat {
u8 bits;
};
-static struct marvell_hw_stat marvell_hw_stats[] = {
+static const struct marvell_hw_stat marvell_hw_stats[] = {
{ "phy_receive_errors_copper", 0, 21, 16},
{ "phy_idle_errors", 0, 10, 8 },
{ "phy_receive_errors_fiber", 1, 21, 16},
};
+static_assert(ARRAY_SIZE(marvell_hw_stats) <= NB_STAT_MAX);
+
+/* "simple" stat list + corresponding marvell_get_*_simple functions are used
+ * on PHYs without a page register
+ */
+struct marvell_hw_stat_simple {
+ const char *string;
+ u8 reg;
+ u8 bits;
+};
+
+static const struct marvell_hw_stat_simple marvell_hw_stats_simple[] = {
+ { "phy_receive_errors", 21, 16},
+};
+
+static_assert(ARRAY_SIZE(marvell_hw_stats_simple) <= NB_STAT_MAX);
+
+enum {
+ M88E3082_VCT_OFF,
+ M88E3082_VCT_PHASE1,
+ M88E3082_VCT_PHASE2,
+};
+
struct marvell_priv {
- u64 stats[ARRAY_SIZE(marvell_hw_stats)];
+ u64 stats[NB_STAT_MAX];
char *hwmon_name;
struct device *hwmon_dev;
bool cable_test_tdr;
@@ -310,6 +352,7 @@ struct marvell_priv {
u32 last;
u32 step;
s8 pair;
+ u8 vct_phase;
};
static int marvell_read_page(struct phy_device *phydev)
@@ -1953,6 +1996,11 @@ static int marvell_get_sset_count(struct phy_device *phydev)
return ARRAY_SIZE(marvell_hw_stats) - NB_FIBER_STATS;
}
+static int marvell_get_sset_count_simple(struct phy_device *phydev)
+{
+ return ARRAY_SIZE(marvell_hw_stats_simple);
+}
+
static void marvell_get_strings(struct phy_device *phydev, u8 *data)
{
int count = marvell_get_sset_count(phydev);
@@ -1964,6 +2012,17 @@ static void marvell_get_strings(struct phy_device *phydev, u8 *data)
}
}
+static void marvell_get_strings_simple(struct phy_device *phydev, u8 *data)
+{
+ int count = marvell_get_sset_count_simple(phydev);
+ int i;
+
+ for (i = 0; i < count; i++) {
+ strscpy(data + i * ETH_GSTRING_LEN,
+ marvell_hw_stats_simple[i].string, ETH_GSTRING_LEN);
+ }
+}
+
static u64 marvell_get_stat(struct phy_device *phydev, int i)
{
struct marvell_hw_stat stat = marvell_hw_stats[i];
@@ -1983,6 +2042,25 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
return ret;
}
+static u64 marvell_get_stat_simple(struct phy_device *phydev, int i)
+{
+ struct marvell_hw_stat_simple stat = marvell_hw_stats_simple[i];
+ struct marvell_priv *priv = phydev->priv;
+ int val;
+ u64 ret;
+
+ val = phy_read(phydev, stat.reg);
+ if (val < 0) {
+ ret = U64_MAX;
+ } else {
+ val = val & ((1 << stat.bits) - 1);
+ priv->stats[i] += val;
+ ret = priv->stats[i];
+ }
+
+ return ret;
+}
+
static void marvell_get_stats(struct phy_device *phydev,
struct ethtool_stats *stats, u64 *data)
{
@@ -1993,6 +2071,16 @@ static void marvell_get_stats(struct phy_device *phydev,
data[i] = marvell_get_stat(phydev, i);
}
+static void marvell_get_stats_simple(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ int count = marvell_get_sset_count_simple(phydev);
+ int i;
+
+ for (i = 0; i < count; i++)
+ data[i] = marvell_get_stat_simple(phydev, i);
+}
+
static int m88e1510_loopback(struct phy_device *phydev, bool enable)
{
int err;
@@ -2417,6 +2505,274 @@ static int marvell_vct7_cable_test_get_status(struct phy_device *phydev,
return 0;
}
+static int m88e3082_vct_cable_test_start(struct phy_device *phydev)
+{
+ struct marvell_priv *priv = phydev->priv;
+ int ret;
+
+ /* It needs some magic workarounds described in VCT manual for this PHY.
+ */
+ ret = phy_write(phydev, 29, 0x0003);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write(phydev, 30, 0x6440);
+ if (ret < 0)
+ return ret;
+
+ if (priv->vct_phase == M88E3082_VCT_PHASE1) {
+ ret = phy_write(phydev, 29, 0x000a);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write(phydev, 30, 0x0002);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = phy_write(phydev, MII_BMCR,
+ BMCR_RESET | BMCR_SPEED100 | BMCR_FULLDPLX);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write(phydev, MII_VCT_TXPINS, MII_VCT_TXPINS_ENVCT);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write(phydev, 29, 0x0003);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write(phydev, 30, 0x0);
+ if (ret < 0)
+ return ret;
+
+ if (priv->vct_phase == M88E3082_VCT_OFF) {
+ priv->vct_phase = M88E3082_VCT_PHASE1;
+ priv->pair = 0;
+
+ return 0;
+ }
+
+ ret = phy_write(phydev, 29, 0x000a);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write(phydev, 30, 0x0);
+ if (ret < 0)
+ return ret;
+
+ priv->vct_phase = M88E3082_VCT_PHASE2;
+
+ return 0;
+}
+
+static int m88e3082_vct_cable_test_report_trans(int result, u8 distance)
+{
+ switch (result) {
+ case MII_VCT_TXRXPINS_VCTTST_OK:
+ if (distance == MII_VCT_TXRXPINS_DISTRFLN_MAX)
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ return ETHTOOL_A_CABLE_RESULT_CODE_IMPEDANCE_MISMATCH;
+ case MII_VCT_TXRXPINS_VCTTST_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case MII_VCT_TXRXPINS_VCTTST_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ default:
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static u32 m88e3082_vct_distrfln_2_cm(u8 distrfln)
+{
+ if (distrfln < 24)
+ return 0;
+
+ /* Original function for meters: y = 0.7861x - 18.862 */
+ return (7861 * distrfln - 188620) / 100;
+}
+
+static int m88e3082_vct_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ u8 tx_vcttst_res, rx_vcttst_res, tx_distrfln, rx_distrfln;
+ struct marvell_priv *priv = phydev->priv;
+ int ret, tx_result, rx_result;
+ bool done_phase = true;
+
+ *finished = false;
+
+ ret = phy_read(phydev, MII_VCT_TXPINS);
+ if (ret < 0)
+ return ret;
+ else if (ret & MII_VCT_TXPINS_ENVCT)
+ return 0;
+
+ tx_distrfln = ret & MII_VCT_TXRXPINS_DISTRFLN;
+ tx_vcttst_res = (ret & MII_VCT_TXRXPINS_VCTTST) >>
+ MII_VCT_TXRXPINS_VCTTST_SHIFT;
+
+ ret = phy_read(phydev, MII_VCT_RXPINS);
+ if (ret < 0)
+ return ret;
+
+ rx_distrfln = ret & MII_VCT_TXRXPINS_DISTRFLN;
+ rx_vcttst_res = (ret & MII_VCT_TXRXPINS_VCTTST) >>
+ MII_VCT_TXRXPINS_VCTTST_SHIFT;
+
+ *finished = true;
+
+ switch (priv->vct_phase) {
+ case M88E3082_VCT_PHASE1:
+ tx_result = m88e3082_vct_cable_test_report_trans(tx_vcttst_res,
+ tx_distrfln);
+ rx_result = m88e3082_vct_cable_test_report_trans(rx_vcttst_res,
+ rx_distrfln);
+
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ tx_result);
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_B,
+ rx_result);
+
+ if (tx_vcttst_res == MII_VCT_TXRXPINS_VCTTST_OPEN) {
+ done_phase = false;
+ priv->pair |= M88E3082_PAIR_A;
+ } else if (tx_distrfln < MII_VCT_TXRXPINS_DISTRFLN_MAX) {
+ u8 pair = ETHTOOL_A_CABLE_PAIR_A;
+ u32 cm = m88e3082_vct_distrfln_2_cm(tx_distrfln);
+
+ ethnl_cable_test_fault_length(phydev, pair, cm);
+ }
+
+ if (rx_vcttst_res == MII_VCT_TXRXPINS_VCTTST_OPEN) {
+ done_phase = false;
+ priv->pair |= M88E3082_PAIR_B;
+ } else if (rx_distrfln < MII_VCT_TXRXPINS_DISTRFLN_MAX) {
+ u8 pair = ETHTOOL_A_CABLE_PAIR_B;
+ u32 cm = m88e3082_vct_distrfln_2_cm(rx_distrfln);
+
+ ethnl_cable_test_fault_length(phydev, pair, cm);
+ }
+
+ break;
+ case M88E3082_VCT_PHASE2:
+ if (priv->pair & M88E3082_PAIR_A &&
+ tx_vcttst_res == MII_VCT_TXRXPINS_VCTTST_OPEN &&
+ tx_distrfln < MII_VCT_TXRXPINS_DISTRFLN_MAX) {
+ u8 pair = ETHTOOL_A_CABLE_PAIR_A;
+ u32 cm = m88e3082_vct_distrfln_2_cm(tx_distrfln);
+
+ ethnl_cable_test_fault_length(phydev, pair, cm);
+ }
+ if (priv->pair & M88E3082_PAIR_B &&
+ rx_vcttst_res == MII_VCT_TXRXPINS_VCTTST_OPEN &&
+ rx_distrfln < MII_VCT_TXRXPINS_DISTRFLN_MAX) {
+ u8 pair = ETHTOOL_A_CABLE_PAIR_B;
+ u32 cm = m88e3082_vct_distrfln_2_cm(rx_distrfln);
+
+ ethnl_cable_test_fault_length(phydev, pair, cm);
+ }
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!done_phase) {
+ *finished = false;
+ return m88e3082_vct_cable_test_start(phydev);
+ }
+ if (*finished)
+ priv->vct_phase = M88E3082_VCT_OFF;
+ return 0;
+}
+
+static int m88e1111_vct_cable_test_start(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = marvell_cable_test_start_common(phydev);
+ if (ret)
+ return ret;
+
+ /* It needs some magic workarounds described in VCT manual for this PHY.
+ */
+ ret = phy_write(phydev, 29, 0x0018);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write(phydev, 30, 0x00c2);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write(phydev, 30, 0x00ca);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write(phydev, 30, 0x00c2);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_paged(phydev, MII_MARVELL_COPPER_PAGE, MII_VCT_SR,
+ MII_VCT_TXPINS_ENVCT);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write(phydev, 29, 0x0018);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write(phydev, 30, 0x0042);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static u32 m88e1111_vct_distrfln_2_cm(u8 distrfln)
+{
+ if (distrfln < 36)
+ return 0;
+
+ /* Original function for meters: y = 0.8018x - 28.751 */
+ return (8018 * distrfln - 287510) / 100;
+}
+
+static int m88e1111_vct_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ u8 vcttst_res, distrfln;
+ int ret, result;
+
+ *finished = false;
+
+ /* Each pair use one page: A-0, B-1, C-2, D-3 */
+ for (u8 i = 0; i < 4; i++) {
+ ret = phy_read_paged(phydev, i, MII_VCT_SR);
+ if (ret < 0)
+ return ret;
+ else if (i == 0 && ret & MII_VCT_TXPINS_ENVCT)
+ return 0;
+
+ distrfln = ret & MII_VCT_TXRXPINS_DISTRFLN;
+ vcttst_res = (ret & MII_VCT_TXRXPINS_VCTTST) >>
+ MII_VCT_TXRXPINS_VCTTST_SHIFT;
+
+ result = m88e3082_vct_cable_test_report_trans(vcttst_res,
+ distrfln);
+ ethnl_cable_test_result(phydev, i, result);
+
+ if (distrfln < MII_VCT_TXRXPINS_DISTRFLN_MAX) {
+ u32 cm = m88e1111_vct_distrfln_2_cm(distrfln);
+
+ ethnl_cable_test_fault_length(phydev, i, cm);
+ }
+ }
+
+ *finished = true;
+ return 0;
+}
+
#ifdef CONFIG_HWMON
struct marvell_hwmon_ops {
int (*config)(struct phy_device *phydev);
@@ -3290,6 +3646,20 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
},
{
+ .phy_id = MARVELL_PHY_ID_88E3082,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "Marvell 88E308X/88E609X Family",
+ /* PHY_BASIC_FEATURES */
+ .probe = marvell_probe,
+ .config_init = marvell_config_init,
+ .aneg_done = marvell_aneg_done,
+ .read_status = marvell_read_status,
+ .resume = genphy_resume,
+ .suspend = genphy_suspend,
+ .cable_test_start = m88e3082_vct_cable_test_start,
+ .cable_test_get_status = m88e3082_vct_cable_test_get_status,
+ },
+ {
.phy_id = MARVELL_PHY_ID_88E1112,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1112",
@@ -3314,6 +3684,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1111",
/* PHY_GBIT_FEATURES */
+ .flags = PHY_POLL_CABLE_TEST,
.probe = marvell_probe,
.config_init = m88e1111gbe_config_init,
.config_aneg = m88e1111_config_aneg,
@@ -3329,6 +3700,8 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1111_get_tunable,
.set_tunable = m88e1111_set_tunable,
+ .cable_test_start = m88e1111_vct_cable_test_start,
+ .cable_test_get_status = m88e1111_vct_cable_test_get_status,
},
{
.phy_id = MARVELL_PHY_ID_88E1111_FINISAR,
@@ -3422,6 +3795,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1145",
/* PHY_GBIT_FEATURES */
+ .flags = PHY_POLL_CABLE_TEST,
.probe = marvell_probe,
.config_init = m88e1145_config_init,
.config_aneg = m88e1101_config_aneg,
@@ -3436,6 +3810,8 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1111_get_tunable,
.set_tunable = m88e1111_set_tunable,
+ .cable_test_start = m88e1111_vct_cable_test_start,
+ .cable_test_get_status = m88e1111_vct_cable_test_get_status,
},
{
.phy_id = MARVELL_PHY_ID_88E1149R,
@@ -3610,6 +3986,21 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
},
{
+ .phy_id = MARVELL_PHY_ID_88E6250_FAMILY,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "Marvell 88E6250 Family",
+ /* PHY_BASIC_FEATURES */
+ .probe = marvell_probe,
+ .aneg_done = marvell_aneg_done,
+ .config_intr = marvell_config_intr,
+ .handle_interrupt = marvell_handle_interrupt,
+ .resume = genphy_resume,
+ .suspend = genphy_suspend,
+ .get_sset_count = marvell_get_sset_count_simple,
+ .get_strings = marvell_get_strings_simple,
+ .get_stats = marvell_get_stats_simple,
+ },
+ {
.phy_id = MARVELL_PHY_ID_88E6341_FAMILY,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E6341 Family",
@@ -3742,6 +4133,7 @@ module_phy_driver(marvell_drivers);
static struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ MARVELL_PHY_ID_88E1101, MARVELL_PHY_ID_MASK },
+ { MARVELL_PHY_ID_88E3082, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1112, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1111, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1111_FINISAR, MARVELL_PHY_ID_MASK },
@@ -3756,6 +4148,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
+ { MARVELL_PHY_ID_88E6250_FAMILY, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E6341_FAMILY, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E6390_FAMILY, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E6393_FAMILY, MARVELL_PHY_ID_MASK },
diff --git a/drivers/net/phy/mediatek-ge-soc.c b/drivers/net/phy/mediatek-ge-soc.c
index 0f3a1538a8b8..f4f9412d0cd7 100644
--- a/drivers/net/phy/mediatek-ge-soc.c
+++ b/drivers/net/phy/mediatek-ge-soc.c
@@ -216,6 +216,9 @@
#define MTK_PHY_LED_ON_LINK1000 BIT(0)
#define MTK_PHY_LED_ON_LINK100 BIT(1)
#define MTK_PHY_LED_ON_LINK10 BIT(2)
+#define MTK_PHY_LED_ON_LINK (MTK_PHY_LED_ON_LINK10 |\
+ MTK_PHY_LED_ON_LINK100 |\
+ MTK_PHY_LED_ON_LINK1000)
#define MTK_PHY_LED_ON_LINKDOWN BIT(3)
#define MTK_PHY_LED_ON_FDX BIT(4) /* Full duplex */
#define MTK_PHY_LED_ON_HDX BIT(5) /* Half duplex */
@@ -231,6 +234,12 @@
#define MTK_PHY_LED_BLINK_100RX BIT(3)
#define MTK_PHY_LED_BLINK_10TX BIT(4)
#define MTK_PHY_LED_BLINK_10RX BIT(5)
+#define MTK_PHY_LED_BLINK_RX (MTK_PHY_LED_BLINK_10RX |\
+ MTK_PHY_LED_BLINK_100RX |\
+ MTK_PHY_LED_BLINK_1000RX)
+#define MTK_PHY_LED_BLINK_TX (MTK_PHY_LED_BLINK_10TX |\
+ MTK_PHY_LED_BLINK_100TX |\
+ MTK_PHY_LED_BLINK_1000TX)
#define MTK_PHY_LED_BLINK_COLLISION BIT(6)
#define MTK_PHY_LED_BLINK_RX_CRC_ERR BIT(7)
#define MTK_PHY_LED_BLINK_RX_IDLE_ERR BIT(8)
@@ -1247,11 +1256,9 @@ static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
if (blink < 0)
return -EIO;
- if ((on & (MTK_PHY_LED_ON_LINK1000 | MTK_PHY_LED_ON_LINK100 |
- MTK_PHY_LED_ON_LINK10)) ||
- (blink & (MTK_PHY_LED_BLINK_1000RX | MTK_PHY_LED_BLINK_100RX |
- MTK_PHY_LED_BLINK_10RX | MTK_PHY_LED_BLINK_1000TX |
- MTK_PHY_LED_BLINK_100TX | MTK_PHY_LED_BLINK_10TX)))
+ if ((on & (MTK_PHY_LED_ON_LINK | MTK_PHY_LED_ON_FDX | MTK_PHY_LED_ON_HDX |
+ MTK_PHY_LED_ON_LINKDOWN)) ||
+ (blink & (MTK_PHY_LED_BLINK_RX | MTK_PHY_LED_BLINK_TX)))
set_bit(bit_netdev, &priv->led_state);
else
clear_bit(bit_netdev, &priv->led_state);
@@ -1269,7 +1276,7 @@ static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
if (!rules)
return 0;
- if (on & (MTK_PHY_LED_ON_LINK1000 | MTK_PHY_LED_ON_LINK100 | MTK_PHY_LED_ON_LINK10))
+ if (on & MTK_PHY_LED_ON_LINK)
*rules |= BIT(TRIGGER_NETDEV_LINK);
if (on & MTK_PHY_LED_ON_LINK10)
@@ -1287,10 +1294,10 @@ static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
if (on & MTK_PHY_LED_ON_HDX)
*rules |= BIT(TRIGGER_NETDEV_HALF_DUPLEX);
- if (blink & (MTK_PHY_LED_BLINK_1000RX | MTK_PHY_LED_BLINK_100RX | MTK_PHY_LED_BLINK_10RX))
+ if (blink & MTK_PHY_LED_BLINK_RX)
*rules |= BIT(TRIGGER_NETDEV_RX);
- if (blink & (MTK_PHY_LED_BLINK_1000TX | MTK_PHY_LED_BLINK_100TX | MTK_PHY_LED_BLINK_10TX))
+ if (blink & MTK_PHY_LED_BLINK_TX)
*rules |= BIT(TRIGGER_NETDEV_TX);
return 0;
@@ -1323,15 +1330,19 @@ static int mt798x_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
on |= MTK_PHY_LED_ON_LINK1000;
if (rules & BIT(TRIGGER_NETDEV_RX)) {
- blink |= MTK_PHY_LED_BLINK_10RX |
- MTK_PHY_LED_BLINK_100RX |
- MTK_PHY_LED_BLINK_1000RX;
+ blink |= (on & MTK_PHY_LED_ON_LINK) ?
+ (((on & MTK_PHY_LED_ON_LINK10) ? MTK_PHY_LED_BLINK_10RX : 0) |
+ ((on & MTK_PHY_LED_ON_LINK100) ? MTK_PHY_LED_BLINK_100RX : 0) |
+ ((on & MTK_PHY_LED_ON_LINK1000) ? MTK_PHY_LED_BLINK_1000RX : 0)) :
+ MTK_PHY_LED_BLINK_RX;
}
if (rules & BIT(TRIGGER_NETDEV_TX)) {
- blink |= MTK_PHY_LED_BLINK_10TX |
- MTK_PHY_LED_BLINK_100TX |
- MTK_PHY_LED_BLINK_1000TX;
+ blink |= (on & MTK_PHY_LED_ON_LINK) ?
+ (((on & MTK_PHY_LED_ON_LINK10) ? MTK_PHY_LED_BLINK_10TX : 0) |
+ ((on & MTK_PHY_LED_ON_LINK100) ? MTK_PHY_LED_BLINK_100TX : 0) |
+ ((on & MTK_PHY_LED_ON_LINK1000) ? MTK_PHY_LED_BLINK_1000TX : 0)) :
+ MTK_PHY_LED_BLINK_TX;
}
if (blink || on)
@@ -1344,9 +1355,7 @@ static int mt798x_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
MTK_PHY_LED0_ON_CTRL,
MTK_PHY_LED_ON_FDX |
MTK_PHY_LED_ON_HDX |
- MTK_PHY_LED_ON_LINK10 |
- MTK_PHY_LED_ON_LINK100 |
- MTK_PHY_LED_ON_LINK1000,
+ MTK_PHY_LED_ON_LINK,
on);
if (ret)
diff --git a/drivers/net/phy/mediatek-ge.c b/drivers/net/phy/mediatek-ge.c
index a493ae01b267..54ea64a37ab3 100644
--- a/drivers/net/phy/mediatek-ge.c
+++ b/drivers/net/phy/mediatek-ge.c
@@ -23,9 +23,6 @@ static int mtk_gephy_write_page(struct phy_device *phydev, int page)
static void mtk_gephy_config_init(struct phy_device *phydev)
{
- /* Disable EEE */
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0);
-
/* Enable HW auto downshift */
phy_modify_paged(phydev, MTK_PHY_PAGE_EXTENDED, 0x14, 0, BIT(4));
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 8b8634600c51..13e30ea7eec5 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -167,6 +167,9 @@
#define PTP_CMD_CTL_PTP_LTC_STEP_SEC_ BIT(5)
#define PTP_CMD_CTL_PTP_LTC_STEP_NSEC_ BIT(6)
+#define PTP_COMMON_INT_ENA 0x0204
+#define PTP_COMMON_INT_ENA_GPIO_CAP_EN BIT(2)
+
#define PTP_CLOCK_SET_SEC_HI 0x0205
#define PTP_CLOCK_SET_SEC_MID 0x0206
#define PTP_CLOCK_SET_SEC_LO 0x0207
@@ -179,6 +182,27 @@
#define PTP_CLOCK_READ_NS_HI 0x022C
#define PTP_CLOCK_READ_NS_LO 0x022D
+#define PTP_GPIO_SEL 0x0230
+#define PTP_GPIO_SEL_GPIO_SEL(pin) ((pin) << 8)
+#define PTP_GPIO_CAP_MAP_LO 0x0232
+
+#define PTP_GPIO_CAP_EN 0x0233
+#define PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(gpio) BIT(gpio)
+#define PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(gpio) (BIT(gpio) << 8)
+
+#define PTP_GPIO_RE_LTC_SEC_HI_CAP 0x0235
+#define PTP_GPIO_RE_LTC_SEC_LO_CAP 0x0236
+#define PTP_GPIO_RE_LTC_NS_HI_CAP 0x0237
+#define PTP_GPIO_RE_LTC_NS_LO_CAP 0x0238
+#define PTP_GPIO_FE_LTC_SEC_HI_CAP 0x0239
+#define PTP_GPIO_FE_LTC_SEC_LO_CAP 0x023A
+#define PTP_GPIO_FE_LTC_NS_HI_CAP 0x023B
+#define PTP_GPIO_FE_LTC_NS_LO_CAP 0x023C
+
+#define PTP_GPIO_CAP_STS 0x023D
+#define PTP_GPIO_CAP_STS_PTP_GPIO_RE_STS(gpio) BIT(gpio)
+#define PTP_GPIO_CAP_STS_PTP_GPIO_FE_STS(gpio) (BIT(gpio) << 8)
+
#define PTP_OPERATING_MODE 0x0241
#define PTP_OPERATING_MODE_STANDALONE_ BIT(0)
@@ -272,6 +296,67 @@
#define PS_TO_REG 200
#define FIFO_SIZE 8
+#define LAN8814_PTP_GPIO_NUM 24
+#define LAN8814_PTP_PEROUT_NUM 2
+#define LAN8814_PTP_EXTTS_NUM 3
+
+#define LAN8814_BUFFER_TIME 2
+
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_200MS 13
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100MS 12
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_50MS 11
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_10MS 10
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_5MS 9
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_1MS 8
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_500US 7
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100US 6
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_50US 5
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_10US 4
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_5US 3
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_1US 2
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_500NS 1
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100NS 0
+
+#define LAN8814_GPIO_EN1 0x20
+#define LAN8814_GPIO_EN2 0x21
+#define LAN8814_GPIO_DIR1 0x22
+#define LAN8814_GPIO_DIR2 0x23
+#define LAN8814_GPIO_BUF1 0x24
+#define LAN8814_GPIO_BUF2 0x25
+
+#define LAN8814_GPIO_EN_ADDR(pin) \
+ ((pin) > 15 ? LAN8814_GPIO_EN1 : LAN8814_GPIO_EN2)
+#define LAN8814_GPIO_EN_BIT(pin) BIT(pin)
+#define LAN8814_GPIO_DIR_ADDR(pin) \
+ ((pin) > 15 ? LAN8814_GPIO_DIR1 : LAN8814_GPIO_DIR2)
+#define LAN8814_GPIO_DIR_BIT(pin) BIT(pin)
+#define LAN8814_GPIO_BUF_ADDR(pin) \
+ ((pin) > 15 ? LAN8814_GPIO_BUF1 : LAN8814_GPIO_BUF2)
+#define LAN8814_GPIO_BUF_BIT(pin) BIT(pin)
+
+#define LAN8814_EVENT_A 0
+#define LAN8814_EVENT_B 1
+
+#define LAN8814_PTP_GENERAL_CONFIG 0x0201
+#define LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_MASK(event) \
+ ((event) ? GENMASK(11, 8) : GENMASK(7, 4))
+#define LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_SET(event, value) \
+ (((value) & GENMASK(3, 0)) << (4 + ((event) << 2)))
+#define LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event) \
+ ((event) ? BIT(2) : BIT(0))
+#define LAN8814_PTP_GENERAL_CONFIG_POLARITY_X(event) \
+ ((event) ? BIT(3) : BIT(1))
+
+#define LAN8814_PTP_CLOCK_TARGET_SEC_HI(event) ((event) ? 0x21F : 0x215)
+#define LAN8814_PTP_CLOCK_TARGET_SEC_LO(event) ((event) ? 0x220 : 0x216)
+#define LAN8814_PTP_CLOCK_TARGET_NS_HI(event) ((event) ? 0x221 : 0x217)
+#define LAN8814_PTP_CLOCK_TARGET_NS_LO(event) ((event) ? 0x222 : 0x218)
+
+#define LAN8814_PTP_CLOCK_TARGET_RELOAD_SEC_HI(event) ((event) ? 0x223 : 0x219)
+#define LAN8814_PTP_CLOCK_TARGET_RELOAD_SEC_LO(event) ((event) ? 0x224 : 0x21A)
+#define LAN8814_PTP_CLOCK_TARGET_RELOAD_NS_HI(event) ((event) ? 0x225 : 0x21B)
+#define LAN8814_PTP_CLOCK_TARGET_RELOAD_NS_LO(event) ((event) ? 0x226 : 0x21C)
+
/* Delay used to get the second part from the LTC */
#define LAN8841_GET_SEC_LTC_DELAY (500 * NSEC_PER_MSEC)
@@ -304,13 +389,9 @@ struct lan8814_shared_priv {
struct phy_device *phydev;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
+ struct ptp_pin_desc *pin_config;
- /* Reference counter to how many ports in the package are enabling the
- * timestamping
- */
- u8 ref;
-
- /* Lock for ptp_clock and ref */
+ /* Lock for ptp_clock */
struct mutex shared_lock;
};
@@ -2426,11 +2507,10 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts,
{
struct kszphy_ptp_priv *ptp_priv =
container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
- struct phy_device *phydev = ptp_priv->phydev;
- struct lan8814_shared_priv *shared = phydev->shared->priv;
struct lan8814_ptp_rx_ts *rx_ts, *tmp;
int txcfg = 0, rxcfg = 0;
int pkt_ts_enable;
+ int tx_mod;
ptp_priv->hwts_tx_type = config->tx_type;
ptp_priv->rx_filter = config->rx_filter;
@@ -2477,29 +2557,20 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts,
lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_RX_TIMESTAMP_EN, pkt_ts_enable);
lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_TIMESTAMP_EN, pkt_ts_enable);
- if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ tx_mod = lanphy_read_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD);
+ if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC) {
+ lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD,
+ tx_mod | PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
+ } else if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ON) {
lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD,
- PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
+ tx_mod & ~PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
+ }
if (config->rx_filter != HWTSTAMP_FILTER_NONE)
lan8814_config_ts_intr(ptp_priv->phydev, true);
else
lan8814_config_ts_intr(ptp_priv->phydev, false);
- mutex_lock(&shared->shared_lock);
- if (config->rx_filter != HWTSTAMP_FILTER_NONE)
- shared->ref++;
- else
- shared->ref--;
-
- if (shared->ref)
- lanphy_write_page_reg(ptp_priv->phydev, 4, PTP_CMD_CTL,
- PTP_CMD_CTL_PTP_ENABLE_);
- else
- lanphy_write_page_reg(ptp_priv->phydev, 4, PTP_CMD_CTL,
- PTP_CMD_CTL_PTP_DISABLE_);
- mutex_unlock(&shared->shared_lock);
-
/* In case of multiple starts and stops, these needs to be cleared */
list_for_each_entry_safe(rx_ts, tmp, &ptp_priv->rx_ts_list, list) {
list_del(&rx_ts->list);
@@ -2537,7 +2608,7 @@ static void lan8814_txtstamp(struct mii_timestamper *mii_ts,
}
}
-static void lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
+static bool lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
{
struct ptp_header *ptp_header;
u32 type;
@@ -2547,7 +2618,11 @@ static void lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
ptp_header = ptp_parse_header(skb, type);
skb_pull_inline(skb, ETH_HLEN);
+ if (!ptp_header)
+ return false;
+
*sig = (__force u16)(ntohs(ptp_header->sequence_id));
+ return true;
}
static bool lan8814_match_rx_skb(struct kszphy_ptp_priv *ptp_priv,
@@ -2559,7 +2634,8 @@ static bool lan8814_match_rx_skb(struct kszphy_ptp_priv *ptp_priv,
bool ret = false;
u16 skb_sig;
- lan8814_get_sig_rx(skb, &skb_sig);
+ if (!lan8814_get_sig_rx(skb, &skb_sig))
+ return ret;
/* Iterate over all RX timestamps and match it with the received skbs */
spin_lock_irqsave(&ptp_priv->rx_ts_lock, flags);
@@ -2666,6 +2742,29 @@ static int lan8814_ptpci_settime64(struct ptp_clock_info *ptpci,
return 0;
}
+static void lan8814_ptp_set_target(struct phy_device *phydev, int event,
+ s64 start_sec, u32 start_nsec)
+{
+ /* Set the start time */
+ lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_SEC_LO(event),
+ lower_16_bits(start_sec));
+ lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_SEC_HI(event),
+ upper_16_bits(start_sec));
+
+ lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_NS_LO(event),
+ lower_16_bits(start_nsec));
+ lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_NS_HI(event),
+ upper_16_bits(start_nsec) & 0x3fff);
+}
+
+static void lan8814_ptp_update_target(struct phy_device *phydev, time64_t sec)
+{
+ lan8814_ptp_set_target(phydev, LAN8814_EVENT_A,
+ sec + LAN8814_BUFFER_TIME, 0);
+ lan8814_ptp_set_target(phydev, LAN8814_EVENT_B,
+ sec + LAN8814_BUFFER_TIME, 0);
+}
+
static void lan8814_ptp_clock_step(struct phy_device *phydev,
s64 time_step_ns)
{
@@ -2687,6 +2786,7 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
nano_seconds -= 1000000000;
}
lan8814_ptp_clock_set(phydev, set_seconds, nano_seconds);
+ lan8814_ptp_update_target(phydev, set_seconds);
return;
} else if (time_step_ns < -15000000000LL) {
/* convert to clock set */
@@ -2702,6 +2802,7 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
}
nano_seconds -= nano_seconds_step;
lan8814_ptp_clock_set(phydev, set_seconds, nano_seconds);
+ lan8814_ptp_update_target(phydev, set_seconds);
return;
}
@@ -2738,6 +2839,8 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
}
while (seconds) {
+ u32 nsec;
+
if (seconds > 0) {
u32 adjustment_value = (u32)seconds;
u16 adjustment_value_lo, adjustment_value_hi;
@@ -2754,6 +2857,10 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
PTP_LTC_STEP_ADJ_DIR_ |
adjustment_value_hi);
seconds -= ((s32)adjustment_value);
+
+ lan8814_ptp_clock_get(phydev, &set_seconds, &nsec);
+ set_seconds -= adjustment_value;
+ lan8814_ptp_update_target(phydev, set_seconds);
} else {
u32 adjustment_value = (u32)(-seconds);
u16 adjustment_value_lo, adjustment_value_hi;
@@ -2769,6 +2876,10 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI,
adjustment_value_hi);
seconds += ((s32)adjustment_value);
+
+ lan8814_ptp_clock_get(phydev, &set_seconds, &nsec);
+ set_seconds += adjustment_value;
+ lan8814_ptp_update_target(phydev, set_seconds);
}
lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL,
PTP_CMD_CTL_PTP_LTC_STEP_SEC_);
@@ -2834,7 +2945,336 @@ static int lan8814_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm)
return 0;
}
-static void lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
+static void lan8814_ptp_set_reload(struct phy_device *phydev, int event,
+ s64 period_sec, u32 period_nsec)
+{
+ lanphy_write_page_reg(phydev, 4,
+ LAN8814_PTP_CLOCK_TARGET_RELOAD_SEC_LO(event),
+ lower_16_bits(period_sec));
+ lanphy_write_page_reg(phydev, 4,
+ LAN8814_PTP_CLOCK_TARGET_RELOAD_SEC_HI(event),
+ upper_16_bits(period_sec));
+
+ lanphy_write_page_reg(phydev, 4,
+ LAN8814_PTP_CLOCK_TARGET_RELOAD_NS_LO(event),
+ lower_16_bits(period_nsec));
+ lanphy_write_page_reg(phydev, 4,
+ LAN8814_PTP_CLOCK_TARGET_RELOAD_NS_HI(event),
+ upper_16_bits(period_nsec) & 0x3fff);
+}
+
+static void lan8814_ptp_enable_event(struct phy_device *phydev, int event,
+ int pulse_width)
+{
+ u16 val;
+
+ val = lanphy_read_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG);
+ /* Set the pulse width of the event */
+ val &= ~(LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_MASK(event));
+ /* Make sure that the target clock will be incremented each time when
+ * local time reaches or pass it
+ */
+ val |= LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_SET(event, pulse_width);
+ val &= ~(LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event));
+ /* Set the polarity high */
+ val |= LAN8814_PTP_GENERAL_CONFIG_POLARITY_X(event);
+ lanphy_write_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG, val);
+}
+
+static void lan8814_ptp_disable_event(struct phy_device *phydev, int event)
+{
+ u16 val;
+
+ /* Set target to too far in the future, effectively disabling it */
+ lan8814_ptp_set_target(phydev, event, 0xFFFFFFFF, 0);
+
+ /* And then reload once it recheas the target */
+ val = lanphy_read_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG);
+ val |= LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event);
+ lanphy_write_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG, val);
+}
+
+static void lan8814_ptp_perout_off(struct phy_device *phydev, int pin)
+{
+ u16 val;
+
+ /* Disable gpio alternate function,
+ * 1: select as gpio,
+ * 0: select alt func
+ */
+ val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin));
+ val |= LAN8814_GPIO_EN_BIT(pin);
+ lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin), val);
+
+ val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
+ val &= ~LAN8814_GPIO_DIR_BIT(pin);
+ lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), val);
+
+ val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin));
+ val &= ~LAN8814_GPIO_BUF_BIT(pin);
+ lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin), val);
+}
+
+static void lan8814_ptp_perout_on(struct phy_device *phydev, int pin)
+{
+ int val;
+
+ /* Set as gpio output */
+ val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
+ val |= LAN8814_GPIO_DIR_BIT(pin);
+ lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), val);
+
+ /* Enable gpio 0:for alternate function, 1:gpio */
+ val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin));
+ val &= ~LAN8814_GPIO_EN_BIT(pin);
+ lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin), val);
+
+ /* Set buffer type to push pull */
+ val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin));
+ val |= LAN8814_GPIO_BUF_BIT(pin);
+ lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin), val);
+}
+
+static int lan8814_ptp_perout(struct ptp_clock_info *ptpci,
+ struct ptp_clock_request *rq, int on)
+{
+ struct lan8814_shared_priv *shared = container_of(ptpci, struct lan8814_shared_priv,
+ ptp_clock_info);
+ struct phy_device *phydev = shared->phydev;
+ struct timespec64 ts_on, ts_period;
+ s64 on_nsec, period_nsec;
+ int pulse_width;
+ int pin, event;
+
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags & ~PTP_PEROUT_DUTY_CYCLE)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&shared->shared_lock);
+ event = rq->perout.index;
+ pin = ptp_find_pin(shared->ptp_clock, PTP_PF_PEROUT, event);
+ if (pin < 0 || pin >= LAN8814_PTP_PEROUT_NUM) {
+ mutex_unlock(&shared->shared_lock);
+ return -EBUSY;
+ }
+
+ if (!on) {
+ lan8814_ptp_perout_off(phydev, pin);
+ lan8814_ptp_disable_event(phydev, event);
+ mutex_unlock(&shared->shared_lock);
+ return 0;
+ }
+
+ ts_on.tv_sec = rq->perout.on.sec;
+ ts_on.tv_nsec = rq->perout.on.nsec;
+ on_nsec = timespec64_to_ns(&ts_on);
+
+ ts_period.tv_sec = rq->perout.period.sec;
+ ts_period.tv_nsec = rq->perout.period.nsec;
+ period_nsec = timespec64_to_ns(&ts_period);
+
+ if (period_nsec < 200) {
+ pr_warn_ratelimited("%s: perout period too small, minimum is 200 nsec\n",
+ phydev_name(phydev));
+ mutex_unlock(&shared->shared_lock);
+ return -EOPNOTSUPP;
+ }
+
+ if (on_nsec >= period_nsec) {
+ pr_warn_ratelimited("%s: pulse width must be smaller than period\n",
+ phydev_name(phydev));
+ mutex_unlock(&shared->shared_lock);
+ return -EINVAL;
+ }
+
+ switch (on_nsec) {
+ case 200000000:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_200MS;
+ break;
+ case 100000000:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100MS;
+ break;
+ case 50000000:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_50MS;
+ break;
+ case 10000000:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_10MS;
+ break;
+ case 5000000:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_5MS;
+ break;
+ case 1000000:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_1MS;
+ break;
+ case 500000:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_500US;
+ break;
+ case 100000:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100US;
+ break;
+ case 50000:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_50US;
+ break;
+ case 10000:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_10US;
+ break;
+ case 5000:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_5US;
+ break;
+ case 1000:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_1US;
+ break;
+ case 500:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_500NS;
+ break;
+ case 100:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100NS;
+ break;
+ default:
+ pr_warn_ratelimited("%s: Use default duty cycle of 100ns\n",
+ phydev_name(phydev));
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100NS;
+ break;
+ }
+
+ /* Configure to pulse every period */
+ lan8814_ptp_enable_event(phydev, event, pulse_width);
+ lan8814_ptp_set_target(phydev, event, rq->perout.start.sec,
+ rq->perout.start.nsec);
+ lan8814_ptp_set_reload(phydev, event, rq->perout.period.sec,
+ rq->perout.period.nsec);
+ lan8814_ptp_perout_on(phydev, pin);
+ mutex_unlock(&shared->shared_lock);
+
+ return 0;
+}
+
+static void lan8814_ptp_extts_on(struct phy_device *phydev, int pin, u32 flags)
+{
+ u16 tmp;
+
+ /* Set as gpio input */
+ tmp = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
+ tmp &= ~LAN8814_GPIO_DIR_BIT(pin);
+ lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), tmp);
+
+ /* Map the pin to ltc pin 0 of the capture map registers */
+ tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO);
+ tmp |= pin;
+ lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO, tmp);
+
+ /* Enable capture on the edges of the ltc pin */
+ tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_EN);
+ if (flags & PTP_RISING_EDGE)
+ tmp |= PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(0);
+ if (flags & PTP_FALLING_EDGE)
+ tmp |= PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(0);
+ lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_EN, tmp);
+
+ /* Enable interrupt top interrupt */
+ tmp = lanphy_read_page_reg(phydev, 4, PTP_COMMON_INT_ENA);
+ tmp |= PTP_COMMON_INT_ENA_GPIO_CAP_EN;
+ lanphy_write_page_reg(phydev, 4, PTP_COMMON_INT_ENA, tmp);
+}
+
+static void lan8814_ptp_extts_off(struct phy_device *phydev, int pin)
+{
+ u16 tmp;
+
+ /* Set as gpio out */
+ tmp = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
+ tmp |= LAN8814_GPIO_DIR_BIT(pin);
+ lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), tmp);
+
+ /* Enable alternate, 0:for alternate function, 1:gpio */
+ tmp = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin));
+ tmp &= ~LAN8814_GPIO_EN_BIT(pin);
+ lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin), tmp);
+
+ /* Clear the mapping of pin to registers 0 of the capture registers */
+ tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO);
+ tmp &= ~GENMASK(3, 0);
+ lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO, tmp);
+
+ /* Disable capture on both of the edges */
+ tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_EN);
+ tmp &= ~PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(pin);
+ tmp &= ~PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(pin);
+ lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_EN, tmp);
+
+ /* Disable interrupt top interrupt */
+ tmp = lanphy_read_page_reg(phydev, 4, PTP_COMMON_INT_ENA);
+ tmp &= ~PTP_COMMON_INT_ENA_GPIO_CAP_EN;
+ lanphy_write_page_reg(phydev, 4, PTP_COMMON_INT_ENA, tmp);
+}
+
+static int lan8814_ptp_extts(struct ptp_clock_info *ptpci,
+ struct ptp_clock_request *rq, int on)
+{
+ struct lan8814_shared_priv *shared = container_of(ptpci, struct lan8814_shared_priv,
+ ptp_clock_info);
+ struct phy_device *phydev = shared->phydev;
+ int pin;
+
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_EXTTS_EDGES |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ pin = ptp_find_pin(shared->ptp_clock, PTP_PF_EXTTS,
+ rq->extts.index);
+ if (pin == -1 || pin != LAN8814_PTP_EXTTS_NUM)
+ return -EINVAL;
+
+ mutex_lock(&shared->shared_lock);
+ if (on)
+ lan8814_ptp_extts_on(phydev, pin, rq->extts.flags);
+ else
+ lan8814_ptp_extts_off(phydev, pin);
+
+ mutex_unlock(&shared->shared_lock);
+
+ return 0;
+}
+
+static int lan8814_ptpci_enable(struct ptp_clock_info *ptpci,
+ struct ptp_clock_request *rq, int on)
+{
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ return lan8814_ptp_perout(ptpci, rq, on);
+ case PTP_CLK_REQ_EXTTS:
+ return lan8814_ptp_extts(ptpci, rq, on);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int lan8814_ptpci_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ /* Only pins 0 and 1 can generate perout signals. And for pin 0
+ * there is only chan 0 (event A) and for pin 1 there is only
+ * chan 1 (event B)
+ */
+ if (pin >= LAN8814_PTP_PEROUT_NUM || pin != chan)
+ return -1;
+ break;
+ case PTP_PF_EXTTS:
+ if (pin != LAN8814_PTP_EXTTS_NUM)
+ return -1;
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static bool lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
{
struct ptp_header *ptp_header;
u32 type;
@@ -2842,7 +3282,11 @@ static void lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
type = ptp_classify_raw(skb);
ptp_header = ptp_parse_header(skb, type);
+ if (!ptp_header)
+ return false;
+
*sig = (__force u16)(ntohs(ptp_header->sequence_id));
+ return true;
}
static void lan8814_match_tx_skb(struct kszphy_ptp_priv *ptp_priv,
@@ -2856,7 +3300,8 @@ static void lan8814_match_tx_skb(struct kszphy_ptp_priv *ptp_priv,
spin_lock_irqsave(&ptp_priv->tx_queue.lock, flags);
skb_queue_walk_safe(&ptp_priv->tx_queue, skb, skb_tmp) {
- lan8814_get_sig_tx(skb, &skb_sig);
+ if (!lan8814_get_sig_tx(skb, &skb_sig))
+ continue;
if (memcmp(&skb_sig, &seq_id, sizeof(seq_id)))
continue;
@@ -2910,7 +3355,8 @@ static bool lan8814_match_skb(struct kszphy_ptp_priv *ptp_priv,
spin_lock_irqsave(&ptp_priv->rx_queue.lock, flags);
skb_queue_walk_safe(&ptp_priv->rx_queue, skb, skb_tmp) {
- lan8814_get_sig_rx(skb, &skb_sig);
+ if (!lan8814_get_sig_rx(skb, &skb_sig))
+ continue;
if (memcmp(&skb_sig, &rx_ts->seq_id, sizeof(rx_ts->seq_id)))
continue;
@@ -2993,6 +3439,64 @@ static void lan8814_handle_ptp_interrupt(struct phy_device *phydev, u16 status)
}
}
+static int lan8814_gpio_process_cap(struct lan8814_shared_priv *shared)
+{
+ struct phy_device *phydev = shared->phydev;
+ struct ptp_clock_event ptp_event = {0};
+ unsigned long nsec;
+ s64 sec;
+ u16 tmp;
+
+ /* This is 0 because whatever was the input pin it was mapped it to
+ * ltc gpio pin 0
+ */
+ tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_SEL);
+ tmp |= PTP_GPIO_SEL_GPIO_SEL(0);
+ lanphy_write_page_reg(phydev, 4, PTP_GPIO_SEL, tmp);
+
+ tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_STS);
+ if (!(tmp & PTP_GPIO_CAP_STS_PTP_GPIO_RE_STS(0)) &&
+ !(tmp & PTP_GPIO_CAP_STS_PTP_GPIO_FE_STS(0)))
+ return -1;
+
+ if (tmp & BIT(0)) {
+ sec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_SEC_HI_CAP);
+ sec <<= 16;
+ sec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_SEC_LO_CAP);
+
+ nsec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_NS_HI_CAP) & 0x3fff;
+ nsec <<= 16;
+ nsec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_NS_LO_CAP);
+ } else {
+ sec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_FE_LTC_SEC_HI_CAP);
+ sec <<= 16;
+ sec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_FE_LTC_SEC_LO_CAP);
+
+ nsec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_FE_LTC_NS_HI_CAP) & 0x3fff;
+ nsec <<= 16;
+ nsec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_NS_LO_CAP);
+ }
+
+ ptp_event.index = 0;
+ ptp_event.timestamp = ktime_set(sec, nsec);
+ ptp_event.type = PTP_CLOCK_EXTTS;
+ ptp_clock_event(shared->ptp_clock, &ptp_event);
+
+ return 0;
+}
+
+static int lan8814_handle_gpio_interrupt(struct phy_device *phydev, u16 status)
+{
+ struct lan8814_shared_priv *shared = phydev->shared->priv;
+ int ret;
+
+ mutex_lock(&shared->shared_lock);
+ ret = lan8814_gpio_process_cap(shared);
+ mutex_unlock(&shared->shared_lock);
+
+ return ret;
+}
+
static int lan8804_config_init(struct phy_device *phydev)
{
int val;
@@ -3097,6 +3601,9 @@ static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
ret = IRQ_HANDLED;
}
+ if (!lan8814_handle_gpio_interrupt(phydev, irq_status))
+ ret = IRQ_HANDLED;
+
return ret;
}
@@ -3193,19 +3700,39 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
/* Initialise shared lock for clock*/
mutex_init(&shared->shared_lock);
+ shared->pin_config = devm_kmalloc_array(&phydev->mdio.dev,
+ LAN8814_PTP_GPIO_NUM,
+ sizeof(*shared->pin_config),
+ GFP_KERNEL);
+ if (!shared->pin_config)
+ return -ENOMEM;
+
+ for (int i = 0; i < LAN8814_PTP_GPIO_NUM; i++) {
+ struct ptp_pin_desc *ptp_pin = &shared->pin_config[i];
+
+ memset(ptp_pin, 0, sizeof(*ptp_pin));
+ snprintf(ptp_pin->name,
+ sizeof(ptp_pin->name), "lan8814_ptp_pin_%02d", i);
+ ptp_pin->index = i;
+ ptp_pin->func = PTP_PF_NONE;
+ }
+
shared->ptp_clock_info.owner = THIS_MODULE;
snprintf(shared->ptp_clock_info.name, 30, "%s", phydev->drv->name);
shared->ptp_clock_info.max_adj = 31249999;
shared->ptp_clock_info.n_alarm = 0;
- shared->ptp_clock_info.n_ext_ts = 0;
- shared->ptp_clock_info.n_pins = 0;
+ shared->ptp_clock_info.n_ext_ts = LAN8814_PTP_EXTTS_NUM;
+ shared->ptp_clock_info.n_pins = LAN8814_PTP_GPIO_NUM;
shared->ptp_clock_info.pps = 0;
- shared->ptp_clock_info.pin_config = NULL;
+ shared->ptp_clock_info.pin_config = shared->pin_config;
+ shared->ptp_clock_info.n_per_out = LAN8814_PTP_PEROUT_NUM;
shared->ptp_clock_info.adjfine = lan8814_ptpci_adjfine;
shared->ptp_clock_info.adjtime = lan8814_ptpci_adjtime;
shared->ptp_clock_info.gettime64 = lan8814_ptpci_gettime64;
shared->ptp_clock_info.settime64 = lan8814_ptpci_settime64;
shared->ptp_clock_info.getcrosststamp = NULL;
+ shared->ptp_clock_info.enable = lan8814_ptpci_enable;
+ shared->ptp_clock_info.verify = lan8814_ptpci_verify;
shared->ptp_clock = ptp_clock_register(&shared->ptp_clock_info,
&phydev->mdio.dev);
@@ -3230,6 +3757,9 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
lanphy_write_page_reg(phydev, 4, PTP_OPERATING_MODE,
PTP_OPERATING_MODE_STANDALONE_);
+ /* Enable ptp to run LTC clock for ptp and gpio 1PPS operation */
+ lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_ENABLE_);
+
return 0;
}
@@ -4659,7 +5189,8 @@ static int lan8841_suspend(struct phy_device *phydev)
struct kszphy_priv *priv = phydev->priv;
struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv;
- ptp_cancel_worker_sync(ptp_priv->ptp_clock);
+ if (ptp_priv->ptp_clock)
+ ptp_cancel_worker_sync(ptp_priv->ptp_clock);
return genphy_suspend(phydev);
}
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 503fd7c40523..994471fad833 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1042,6 +1042,21 @@ static void phylink_pcs_poll_start(struct phylink *pl)
mod_timer(&pl->link_poll, jiffies + HZ);
}
+int phylink_pcs_pre_init(struct phylink *pl, struct phylink_pcs *pcs)
+{
+ int ret = 0;
+
+ /* Signal to PCS driver that MAC requires RX clock for init */
+ if (pl->config->mac_requires_rxc)
+ pcs->rxc_always_on = true;
+
+ if (pcs->ops->pcs_pre_init)
+ ret = pcs->ops->pcs_pre_init(pcs);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phylink_pcs_pre_init);
+
static void phylink_mac_config(struct phylink *pl,
const struct phylink_link_state *state)
{
@@ -1823,6 +1838,9 @@ static int phylink_validate_phy(struct phylink *pl, struct phy_device *phy,
interfaces);
}
+ phylink_dbg(pl, "PHY %s doesn't supply possible interfaces\n",
+ phydev_name(phy));
+
/* Check whether we would use rate matching for the proposed interface
* mode.
*/
@@ -1923,6 +1941,8 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
static int phylink_attach_phy(struct phylink *pl, struct phy_device *phy,
phy_interface_t interface)
{
+ u32 flags = 0;
+
if (WARN_ON(pl->cfg_link_an_mode == MLO_AN_FIXED ||
(pl->cfg_link_an_mode == MLO_AN_INBAND &&
phy_interface_mode_is_8023z(interface) && !pl->sfp_bus)))
@@ -1931,7 +1951,10 @@ static int phylink_attach_phy(struct phylink *pl, struct phy_device *phy,
if (pl->phydev)
return -EBUSY;
- return phy_attach_direct(pl->netdev, phy, 0, interface);
+ if (pl->config->mac_requires_rxc)
+ flags |= PHY_F_RXC_ALWAYS_ON;
+
+ return phy_attach_direct(pl->netdev, phy, flags, interface);
}
/**
@@ -2034,6 +2057,9 @@ int phylink_fwnode_phy_connect(struct phylink *pl,
pl->link_config.interface = pl->link_interface;
}
+ if (pl->config->mac_requires_rxc)
+ flags |= PHY_F_RXC_ALWAYS_ON;
+
ret = phy_attach_direct(pl->netdev, phy_dev, flags,
pl->link_interface);
phy_device_free(phy_dev);
diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c
index 4717c59d51d0..c8f83e5f78ab 100644
--- a/drivers/net/phy/qcom/at803x.c
+++ b/drivers/net/phy/qcom/at803x.c
@@ -426,7 +426,8 @@ static int at803x_hibernation_mode_config(struct phy_device *phydev)
/* The default after hardware reset is hibernation mode enabled. After
* software reset, the value is retained.
*/
- if (!(priv->flags & AT803X_DISABLE_HIBERNATION_MODE))
+ if (!(priv->flags & AT803X_DISABLE_HIBERNATION_MODE) &&
+ !(phydev->dev_flags & PHY_F_RXC_ALWAYS_ON))
return 0;
return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
@@ -797,7 +798,7 @@ static int at8031_parse_dt(struct phy_device *phydev)
static int at8031_probe(struct phy_device *phydev)
{
- struct at803x_priv *priv = phydev->priv;
+ struct at803x_priv *priv;
int mode_cfg;
int ccr;
int ret;
@@ -806,6 +807,8 @@ static int at8031_probe(struct phy_device *phydev)
if (ret)
return ret;
+ priv = phydev->priv;
+
/* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
* options.
*/
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 1fa70427b2a2..7ab41f95dae5 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -54,6 +54,25 @@
RTL8201F_ISR_LINK)
#define RTL8201F_IER 0x13
+#define RTL822X_VND1_SERDES_OPTION 0x697a
+#define RTL822X_VND1_SERDES_OPTION_MODE_MASK GENMASK(5, 0)
+#define RTL822X_VND1_SERDES_OPTION_MODE_2500BASEX_SGMII 0
+#define RTL822X_VND1_SERDES_OPTION_MODE_2500BASEX 2
+
+#define RTL822X_VND1_SERDES_CTRL3 0x7580
+#define RTL822X_VND1_SERDES_CTRL3_MODE_MASK GENMASK(5, 0)
+#define RTL822X_VND1_SERDES_CTRL3_MODE_SGMII 0x02
+#define RTL822X_VND1_SERDES_CTRL3_MODE_2500BASEX 0x16
+
+/* RTL822X_VND2_XXXXX registers are only accessible when phydev->is_c45
+ * is set, they cannot be accessed by C45-over-C22.
+ */
+#define RTL822X_VND2_GBCR 0xa412
+
+#define RTL822X_VND2_GANLPAR 0xa414
+
+#define RTL822X_VND2_PHYSR 0xa434
+
#define RTL8366RB_POWER_SAVE 0x15
#define RTL8366RB_POWER_SAVE_ON BIT(12)
@@ -64,6 +83,9 @@
#define RTL_GENERIC_PHYID 0x001cc800
#define RTL_8211FVD_PHYID 0x001cc878
+#define RTL_8221B_VB_CG 0x001cc849
+#define RTL_8221B_VN_CG 0x001cc84a
+#define RTL_8251B 0x001cc862
MODULE_DESCRIPTION("Realtek PHY driver");
MODULE_AUTHOR("Johnson Leung");
@@ -531,17 +553,8 @@ static int rtl8366rb_config_init(struct phy_device *phydev)
}
/* get actual speed to cover the downshift case */
-static int rtlgen_get_speed(struct phy_device *phydev)
+static void rtlgen_decode_speed(struct phy_device *phydev, int val)
{
- int val;
-
- if (!phydev->link)
- return 0;
-
- val = phy_read_paged(phydev, 0xa43, 0x12);
- if (val < 0)
- return val;
-
switch (val & RTLGEN_SPEED_MASK) {
case 0x0000:
phydev->speed = SPEED_10;
@@ -564,19 +577,26 @@ static int rtlgen_get_speed(struct phy_device *phydev)
default:
break;
}
-
- return 0;
}
static int rtlgen_read_status(struct phy_device *phydev)
{
- int ret;
+ int ret, val;
ret = genphy_read_status(phydev);
if (ret < 0)
return ret;
- return rtlgen_get_speed(phydev);
+ if (!phydev->link)
+ return 0;
+
+ val = phy_read_paged(phydev, 0xa43, 0x12);
+ if (val < 0)
+ return val;
+
+ rtlgen_decode_speed(phydev, val);
+
+ return 0;
}
static int rtlgen_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
@@ -659,6 +679,84 @@ static int rtl822x_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
return ret;
}
+static int rtl822xb_config_init(struct phy_device *phydev)
+{
+ bool has_2500, has_sgmii;
+ u16 mode;
+ int ret;
+
+ has_2500 = test_bit(PHY_INTERFACE_MODE_2500BASEX,
+ phydev->host_interfaces) ||
+ phydev->interface == PHY_INTERFACE_MODE_2500BASEX;
+
+ has_sgmii = test_bit(PHY_INTERFACE_MODE_SGMII,
+ phydev->host_interfaces) ||
+ phydev->interface == PHY_INTERFACE_MODE_SGMII;
+
+ /* fill in possible interfaces */
+ __assign_bit(PHY_INTERFACE_MODE_2500BASEX, phydev->possible_interfaces,
+ has_2500);
+ __assign_bit(PHY_INTERFACE_MODE_SGMII, phydev->possible_interfaces,
+ has_sgmii);
+
+ if (!has_2500 && !has_sgmii)
+ return 0;
+
+ /* determine SerDes option mode */
+ if (has_2500 && !has_sgmii) {
+ mode = RTL822X_VND1_SERDES_OPTION_MODE_2500BASEX;
+ phydev->rate_matching = RATE_MATCH_PAUSE;
+ } else {
+ mode = RTL822X_VND1_SERDES_OPTION_MODE_2500BASEX_SGMII;
+ phydev->rate_matching = RATE_MATCH_NONE;
+ }
+
+ /* the following sequence with magic numbers sets up the SerDes
+ * option mode
+ */
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x75f3, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND1,
+ RTL822X_VND1_SERDES_OPTION,
+ RTL822X_VND1_SERDES_OPTION_MODE_MASK,
+ mode);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x6a04, 0x0503);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x6f10, 0xd455);
+ if (ret < 0)
+ return ret;
+
+ return phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x6f11, 0x8020);
+}
+
+static int rtl822xb_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface)
+{
+ int val;
+
+ /* Only rate matching at 2500base-x */
+ if (iface != PHY_INTERFACE_MODE_2500BASEX)
+ return RATE_MATCH_NONE;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, RTL822X_VND1_SERDES_OPTION);
+ if (val < 0)
+ return val;
+
+ if ((val & RTL822X_VND1_SERDES_OPTION_MODE_MASK) ==
+ RTL822X_VND1_SERDES_OPTION_MODE_2500BASEX)
+ return RATE_MATCH_PAUSE;
+
+ /* RTL822X_VND1_SERDES_OPTION_MODE_2500BASEX_SGMII */
+ return RATE_MATCH_NONE;
+}
+
static int rtl822x_get_features(struct phy_device *phydev)
{
int val;
@@ -695,10 +793,30 @@ static int rtl822x_config_aneg(struct phy_device *phydev)
return __genphy_config_aneg(phydev, ret);
}
-static int rtl822x_read_status(struct phy_device *phydev)
+static void rtl822xb_update_interface(struct phy_device *phydev)
{
- int ret;
+ int val;
+
+ if (!phydev->link)
+ return;
+
+ /* Change interface according to serdes mode */
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, RTL822X_VND1_SERDES_CTRL3);
+ if (val < 0)
+ return;
+ switch (val & RTL822X_VND1_SERDES_CTRL3_MODE_MASK) {
+ case RTL822X_VND1_SERDES_CTRL3_MODE_2500BASEX:
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ break;
+ case RTL822X_VND1_SERDES_CTRL3_MODE_SGMII:
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ break;
+ }
+}
+
+static int rtl822x_read_status(struct phy_device *phydev)
+{
if (phydev->autoneg == AUTONEG_ENABLE) {
int lpadv = phy_read_paged(phydev, 0xa5d, 0x13);
@@ -709,11 +827,99 @@ static int rtl822x_read_status(struct phy_device *phydev)
lpadv);
}
- ret = genphy_read_status(phydev);
+ return rtlgen_read_status(phydev);
+}
+
+static int rtl822xb_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = rtl822x_read_status(phydev);
+ if (ret < 0)
+ return ret;
+
+ rtl822xb_update_interface(phydev);
+
+ return 0;
+}
+
+static int rtl822x_c45_get_features(struct phy_device *phydev)
+{
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
+ phydev->supported);
+
+ return genphy_c45_pma_read_abilities(phydev);
+}
+
+static int rtl822x_c45_config_aneg(struct phy_device *phydev)
+{
+ bool changed = false;
+ int ret, val;
+
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ return genphy_c45_pma_setup_forced(phydev);
+
+ ret = genphy_c45_an_config_aneg(phydev);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ val = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising);
+
+ /* Vendor register as C45 has no standardized support for 1000BaseT */
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND2, RTL822X_VND2_GBCR,
+ ADVERTISE_1000FULL, val);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ return genphy_c45_check_and_restart_aneg(phydev, changed);
+}
+
+static int rtl822x_c45_read_status(struct phy_device *phydev)
+{
+ int ret, val;
+
+ ret = genphy_c45_read_status(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Vendor register as C45 has no standardized support for 1000BaseT */
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ RTL822X_VND2_GANLPAR);
+ if (val < 0)
+ return val;
+
+ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, val);
+ }
+
+ if (!phydev->link)
+ return 0;
+
+ /* Read actual speed from vendor register. */
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL822X_VND2_PHYSR);
+ if (val < 0)
+ return val;
+
+ rtlgen_decode_speed(phydev, val);
+
+ return 0;
+}
+
+static int rtl822xb_c45_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = rtl822x_c45_read_status(phydev);
if (ret < 0)
return ret;
- return rtlgen_get_speed(phydev);
+ rtl822xb_update_interface(phydev);
+
+ return 0;
}
static bool rtlgen_supports_2_5gbps(struct phy_device *phydev)
@@ -739,6 +945,35 @@ static int rtl8226_match_phy_device(struct phy_device *phydev)
rtlgen_supports_2_5gbps(phydev);
}
+static int rtlgen_is_c45_match(struct phy_device *phydev, unsigned int id,
+ bool is_c45)
+{
+ if (phydev->is_c45)
+ return is_c45 && (id == phydev->c45_ids.device_ids[1]);
+ else
+ return !is_c45 && (id == phydev->phy_id);
+}
+
+static int rtl8221b_vb_cg_c22_match_phy_device(struct phy_device *phydev)
+{
+ return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, false);
+}
+
+static int rtl8221b_vb_cg_c45_match_phy_device(struct phy_device *phydev)
+{
+ return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, true);
+}
+
+static int rtl8221b_vn_cg_c22_match_phy_device(struct phy_device *phydev)
+{
+ return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, false);
+}
+
+static int rtl8221b_vn_cg_c45_match_phy_device(struct phy_device *phydev)
+{
+ return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, true);
+}
+
static int rtlgen_resume(struct phy_device *phydev)
{
int ret = genphy_resume(phydev);
@@ -749,6 +984,15 @@ static int rtlgen_resume(struct phy_device *phydev)
return ret;
}
+static int rtlgen_c45_resume(struct phy_device *phydev)
+{
+ int ret = genphy_c45_pma_resume(phydev);
+
+ msleep(20);
+
+ return ret;
+}
+
static int rtl9000a_config_init(struct phy_device *phydev)
{
phydev->autoneg = AUTONEG_DISABLE;
@@ -988,7 +1232,9 @@ static struct phy_driver realtek_drvs[] = {
.name = "RTL8226B_RTL8221B 2.5Gbps PHY",
.get_features = rtl822x_get_features,
.config_aneg = rtl822x_config_aneg,
- .read_status = rtl822x_read_status,
+ .config_init = rtl822xb_config_init,
+ .get_rate_matching = rtl822xb_get_rate_matching,
+ .read_status = rtl822xb_read_status,
.suspend = genphy_suspend,
.resume = rtlgen_resume,
.read_page = rtl821x_read_page,
@@ -1010,32 +1256,58 @@ static struct phy_driver realtek_drvs[] = {
.name = "RTL8226B-CG_RTL8221B-CG 2.5Gbps PHY",
.get_features = rtl822x_get_features,
.config_aneg = rtl822x_config_aneg,
- .read_status = rtl822x_read_status,
+ .config_init = rtl822xb_config_init,
+ .get_rate_matching = rtl822xb_get_rate_matching,
+ .read_status = rtl822xb_read_status,
.suspend = genphy_suspend,
.resume = rtlgen_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
- PHY_ID_MATCH_EXACT(0x001cc849),
- .name = "RTL8221B-VB-CG 2.5Gbps PHY",
+ .match_phy_device = rtl8221b_vb_cg_c22_match_phy_device,
+ .name = "RTL8221B-VB-CG 2.5Gbps PHY (C22)",
.get_features = rtl822x_get_features,
.config_aneg = rtl822x_config_aneg,
- .read_status = rtl822x_read_status,
+ .config_init = rtl822xb_config_init,
+ .get_rate_matching = rtl822xb_get_rate_matching,
+ .read_status = rtl822xb_read_status,
.suspend = genphy_suspend,
.resume = rtlgen_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
- PHY_ID_MATCH_EXACT(0x001cc84a),
- .name = "RTL8221B-VM-CG 2.5Gbps PHY",
+ .match_phy_device = rtl8221b_vb_cg_c45_match_phy_device,
+ .name = "RTL8221B-VB-CG 2.5Gbps PHY (C45)",
+ .config_init = rtl822xb_config_init,
+ .get_rate_matching = rtl822xb_get_rate_matching,
+ .get_features = rtl822x_c45_get_features,
+ .config_aneg = rtl822x_c45_config_aneg,
+ .read_status = rtl822xb_c45_read_status,
+ .suspend = genphy_c45_pma_suspend,
+ .resume = rtlgen_c45_resume,
+ }, {
+ .match_phy_device = rtl8221b_vn_cg_c22_match_phy_device,
+ .name = "RTL8221B-VM-CG 2.5Gbps PHY (C22)",
.get_features = rtl822x_get_features,
.config_aneg = rtl822x_config_aneg,
- .read_status = rtl822x_read_status,
+ .config_init = rtl822xb_config_init,
+ .get_rate_matching = rtl822xb_get_rate_matching,
+ .read_status = rtl822xb_read_status,
.suspend = genphy_suspend,
.resume = rtlgen_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
+ .match_phy_device = rtl8221b_vn_cg_c45_match_phy_device,
+ .name = "RTL8221B-VN-CG 2.5Gbps PHY (C45)",
+ .config_init = rtl822xb_config_init,
+ .get_rate_matching = rtl822xb_get_rate_matching,
+ .get_features = rtl822x_c45_get_features,
+ .config_aneg = rtl822x_c45_config_aneg,
+ .read_status = rtl822xb_c45_read_status,
+ .suspend = genphy_c45_pma_suspend,
+ .resume = rtlgen_c45_resume,
+ }, {
PHY_ID_MATCH_EXACT(0x001cc862),
.name = "RTL8251B 5Gbps PHY",
.get_features = rtl822x_get_features,
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index db39dec7f247..2f44fc51848f 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -355,7 +355,7 @@ EXPORT_SYMBOL_GPL(sfp_parse_support);
* modes mask.
*/
phy_interface_t sfp_select_interface(struct sfp_bus *bus,
- unsigned long *link_modes)
+ const unsigned long *link_modes)
{
if (phylink_test(link_modes, 25000baseCR_Full) ||
phylink_test(link_modes, 25000baseKR_Full) ||
@@ -373,7 +373,8 @@ phy_interface_t sfp_select_interface(struct sfp_bus *bus,
if (phylink_test(link_modes, 5000baseT_Full))
return PHY_INTERFACE_MODE_5GBASER;
- if (phylink_test(link_modes, 2500baseX_Full))
+ if (phylink_test(link_modes, 2500baseX_Full) ||
+ phylink_test(link_modes, 2500baseT_Full))
return PHY_INTERFACE_MODE_2500BASEX;
if (phylink_test(link_modes, 1000baseT_Half) ||
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index f75c9eb3958e..3f9cbd797fd6 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -385,18 +385,23 @@ static void sfp_fixup_rollball(struct sfp *sfp)
sfp->phy_t_retry = msecs_to_jiffies(1000);
}
-static void sfp_fixup_fs_10gt(struct sfp *sfp)
+static void sfp_fixup_fs_2_5gt(struct sfp *sfp)
{
- sfp_fixup_10gbaset_30m(sfp);
sfp_fixup_rollball(sfp);
- /* The RollBall fixup is not enough for FS modules, the AQR chip inside
+ /* The RollBall fixup is not enough for FS modules, the PHY chip inside
* them does not return 0xffff for PHY ID registers in all MMDs for the
* while initializing. They need a 4 second wait before accessing PHY.
*/
sfp->module_t_wait = msecs_to_jiffies(4000);
}
+static void sfp_fixup_fs_10gt(struct sfp *sfp)
+{
+ sfp_fixup_10gbaset_30m(sfp);
+ sfp_fixup_fs_2_5gt(sfp);
+}
+
static void sfp_fixup_halny_gsfp(struct sfp *sfp)
{
/* Ignore the TX_FAULT and LOS signals on this module.
@@ -468,10 +473,15 @@ static const struct sfp_quirk sfp_quirks[] = {
SFP_QUIRK("ALCATELLUCENT", "3FE46541AA", sfp_quirk_2500basex,
sfp_fixup_nokia),
- // Fiberstore SFP-10G-T doesn't identify as copper, and uses the
- // Rollball protocol to talk to the PHY.
+ // Fiberstore SFP-10G-T doesn't identify as copper, uses the Rollball
+ // protocol to talk to the PHY and needs 4 sec wait before probing the
+ // PHY.
SFP_QUIRK_F("FS", "SFP-10G-T", sfp_fixup_fs_10gt),
+ // Fiberstore SFP-2.5G-T uses Rollball protocol to talk to the PHY and
+ // needs 4 sec wait before probing the PHY.
+ SFP_QUIRK_F("FS", "SFP-2.5G-T", sfp_fixup_fs_2_5gt),
+
// Fiberstore GPON-ONU-34-20BI can operate at 2500base-X, but report 1.2GBd
// NRZ in their EEPROM
SFP_QUIRK("FS", "GPON-ONU-34-20BI", sfp_quirk_2500basex,
@@ -488,9 +498,6 @@ static const struct sfp_quirk sfp_quirks[] = {
SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex,
sfp_fixup_ignore_tx_fault),
- // FS 2.5G Base-T
- SFP_QUIRK_M("FS", "SFP-2.5G-T", sfp_quirk_oem_2_5g),
-
// Lantech 8330-262D-E can operate at 2500base-X, but incorrectly report
// 2500MBd NRZ in their EEPROM
SFP_QUIRK_M("Lantech", "8330-262D-E", sfp_quirk_2500basex),
@@ -502,10 +509,14 @@ static const struct sfp_quirk sfp_quirks[] = {
SFP_QUIRK_F("Walsun", "HXSX-ATRC-1", sfp_fixup_fs_10gt),
SFP_QUIRK_F("Walsun", "HXSX-ATRI-1", sfp_fixup_fs_10gt),
+ // OEM SFP-GE-T is a 1000Base-T module with broken TX_FAULT indicator
+ SFP_QUIRK_F("OEM", "SFP-GE-T", sfp_fixup_ignore_tx_fault),
+
SFP_QUIRK_F("OEM", "SFP-10G-T", sfp_fixup_rollball_cc),
SFP_QUIRK_M("OEM", "SFP-2.5G-T", sfp_quirk_oem_2_5g),
SFP_QUIRK_F("OEM", "RTSFP-10", sfp_fixup_rollball_cc),
SFP_QUIRK_F("OEM", "RTSFP-10G", sfp_fixup_rollball_cc),
+ SFP_QUIRK_F("Turris", "RTSFP-2.5G", sfp_fixup_rollball),
SFP_QUIRK_F("Turris", "RTSFP-10", sfp_fixup_rollball),
SFP_QUIRK_F("Turris", "RTSFP-10G", sfp_fixup_rollball),
};
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index fe380fe196e7..0a65b6d690fe 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1357,7 +1357,7 @@ static struct net *ppp_nl_get_link_net(const struct net_device *dev)
{
struct ppp *ppp = netdev_priv(dev);
- return ppp->ppp_net;
+ return READ_ONCE(ppp->ppp_net);
}
static struct rtnl_link_ops ppp_link_ops __read_mostly = {
diff --git a/drivers/net/pse-pd/Kconfig b/drivers/net/pse-pd/Kconfig
index 687dec49c1e1..577ea904b3d9 100644
--- a/drivers/net/pse-pd/Kconfig
+++ b/drivers/net/pse-pd/Kconfig
@@ -5,6 +5,7 @@
menuconfig PSE_CONTROLLER
bool "Ethernet Power Sourcing Equipment Support"
+ depends on REGULATOR
help
Generic Power Sourcing Equipment Controller support.
@@ -14,10 +15,29 @@ if PSE_CONTROLLER
config PSE_REGULATOR
tristate "Regulator based PSE controller"
- depends on REGULATOR || COMPILE_TEST
help
This module provides support for simple regulator based Ethernet Power
Sourcing Equipment without automatic classification support. For
example for basic implementation of PoDL (802.3bu) specification.
+config PSE_PD692X0
+ tristate "PD692X0 PSE controller"
+ depends on I2C
+ select FW_UPLOAD
+ help
+ This module provides support for PD692x0 regulator based Ethernet
+ Power Sourcing Equipment.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pd692x0.
+
+config PSE_TPS23881
+ tristate "TPS23881 PSE controller"
+ depends on I2C
+ help
+ This module provides support for TPS23881 regulator based Ethernet
+ Power Sourcing Equipment.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tps23881.
endif
diff --git a/drivers/net/pse-pd/Makefile b/drivers/net/pse-pd/Makefile
index 1b8aa4c70f0b..9d2898b36737 100644
--- a/drivers/net/pse-pd/Makefile
+++ b/drivers/net/pse-pd/Makefile
@@ -4,3 +4,5 @@
obj-$(CONFIG_PSE_CONTROLLER) += pse_core.o
obj-$(CONFIG_PSE_REGULATOR) += pse_regulator.o
+obj-$(CONFIG_PSE_PD692X0) += pd692x0.o
+obj-$(CONFIG_PSE_TPS23881) += tps23881.o
diff --git a/drivers/net/pse-pd/pd692x0.c b/drivers/net/pse-pd/pd692x0.c
new file mode 100644
index 000000000000..6488b941703c
--- /dev/null
+++ b/drivers/net/pse-pd/pd692x0.c
@@ -0,0 +1,1223 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for the Microchip PD692X0 PoE PSE Controller driver (I2C bus)
+ *
+ * Copyright (c) 2023 Bootlin, Kory Maincent <kory.maincent@bootlin.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pse-pd/pse.h>
+
+#define PD692X0_PSE_NAME "pd692x0_pse"
+
+#define PD692X0_MAX_PIS 48
+#define PD692X0_MAX_MANAGERS 12
+#define PD692X0_MAX_MANAGER_PORTS 8
+#define PD692X0_MAX_HW_PORTS (PD692X0_MAX_MANAGERS * PD692X0_MAX_MANAGER_PORTS)
+
+#define PD69200_BT_PROD_VER 24
+#define PD69210_BT_PROD_VER 26
+#define PD69220_BT_PROD_VER 29
+
+#define PD692X0_FW_MAJ_VER 3
+#define PD692X0_FW_MIN_VER 5
+#define PD692X0_FW_PATCH_VER 5
+
+enum pd692x0_fw_state {
+ PD692X0_FW_UNKNOWN,
+ PD692X0_FW_OK,
+ PD692X0_FW_BROKEN,
+ PD692X0_FW_NEED_UPDATE,
+ PD692X0_FW_PREPARE,
+ PD692X0_FW_WRITE,
+ PD692X0_FW_COMPLETE,
+};
+
+struct pd692x0_msg {
+ u8 key;
+ u8 echo;
+ u8 sub[3];
+ u8 data[8];
+ __be16 chksum;
+} __packed;
+
+struct pd692x0_msg_ver {
+ u8 prod;
+ u8 maj_sw_ver;
+ u8 min_sw_ver;
+ u8 pa_sw_ver;
+ u8 param;
+ u8 build;
+};
+
+enum {
+ PD692X0_KEY_CMD,
+ PD692X0_KEY_PRG,
+ PD692X0_KEY_REQ,
+ PD692X0_KEY_TLM,
+ PD692X0_KEY_TEST,
+ PD692X0_KEY_REPORT = 0x52
+};
+
+enum {
+ PD692X0_MSG_RESET,
+ PD692X0_MSG_GET_SYS_STATUS,
+ PD692X0_MSG_GET_SW_VER,
+ PD692X0_MSG_SET_TMP_PORT_MATRIX,
+ PD692X0_MSG_PRG_PORT_MATRIX,
+ PD692X0_MSG_SET_PORT_PARAM,
+ PD692X0_MSG_GET_PORT_STATUS,
+ PD692X0_MSG_DOWNLOAD_CMD,
+
+ /* add new message above here */
+ PD692X0_MSG_CNT
+};
+
+struct pd692x0_priv {
+ struct i2c_client *client;
+ struct pse_controller_dev pcdev;
+ struct device_node *np;
+
+ enum pd692x0_fw_state fw_state;
+ struct fw_upload *fwl;
+ bool cancel_request;
+
+ u8 msg_id;
+ bool last_cmd_key;
+ unsigned long last_cmd_key_time;
+
+ enum ethtool_c33_pse_admin_state admin_state[PD692X0_MAX_PIS];
+};
+
+/* Template list of communication messages. The non-null bytes defined here
+ * constitute the fixed portion of the messages. The remaining bytes will
+ * be configured later within the functions. Refer to the "PD692x0 BT Serial
+ * Communication Protocol User Guide" for comprehensive details on messages
+ * content.
+ */
+static const struct pd692x0_msg pd692x0_msg_template_list[PD692X0_MSG_CNT] = {
+ [PD692X0_MSG_RESET] = {
+ .key = PD692X0_KEY_CMD,
+ .sub = {0x07, 0x55, 0x00},
+ .data = {0x55, 0x00, 0x55, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e},
+ },
+ [PD692X0_MSG_GET_SYS_STATUS] = {
+ .key = PD692X0_KEY_REQ,
+ .sub = {0x07, 0xd0, 0x4e},
+ .data = {0x4e, 0x4e, 0x4e, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e},
+ },
+ [PD692X0_MSG_GET_SW_VER] = {
+ .key = PD692X0_KEY_REQ,
+ .sub = {0x07, 0x1e, 0x21},
+ .data = {0x4e, 0x4e, 0x4e, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e},
+ },
+ [PD692X0_MSG_SET_TMP_PORT_MATRIX] = {
+ .key = PD692X0_KEY_CMD,
+ .sub = {0x05, 0x43},
+ .data = { 0, 0x4e, 0x4e, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e},
+ },
+ [PD692X0_MSG_PRG_PORT_MATRIX] = {
+ .key = PD692X0_KEY_CMD,
+ .sub = {0x07, 0x43, 0x4e},
+ .data = {0x4e, 0x4e, 0x4e, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e},
+ },
+ [PD692X0_MSG_SET_PORT_PARAM] = {
+ .key = PD692X0_KEY_CMD,
+ .sub = {0x05, 0xc0},
+ .data = { 0, 0xff, 0xff, 0xff,
+ 0x4e, 0x4e, 0x4e, 0x4e},
+ },
+ [PD692X0_MSG_GET_PORT_STATUS] = {
+ .key = PD692X0_KEY_REQ,
+ .sub = {0x05, 0xc1},
+ .data = {0x4e, 0x4e, 0x4e, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e},
+ },
+ [PD692X0_MSG_DOWNLOAD_CMD] = {
+ .key = PD692X0_KEY_PRG,
+ .sub = {0xff, 0x99, 0x15},
+ .data = {0x16, 0x16, 0x99, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e},
+ },
+};
+
+static u8 pd692x0_build_msg(struct pd692x0_msg *msg, u8 echo)
+{
+ u8 *data = (u8 *)msg;
+ u16 chksum = 0;
+ int i;
+
+ msg->echo = echo++;
+ if (echo == 0xff)
+ echo = 0;
+
+ for (i = 0; i < sizeof(*msg) - sizeof(msg->chksum); i++)
+ chksum += data[i];
+
+ msg->chksum = cpu_to_be16(chksum);
+
+ return echo;
+}
+
+static int pd692x0_send_msg(struct pd692x0_priv *priv, struct pd692x0_msg *msg)
+{
+ const struct i2c_client *client = priv->client;
+ int ret;
+
+ if (msg->key == PD692X0_KEY_CMD && priv->last_cmd_key) {
+ int cmd_msleep;
+
+ cmd_msleep = 30 - jiffies_to_msecs(jiffies - priv->last_cmd_key_time);
+ if (cmd_msleep > 0)
+ msleep(cmd_msleep);
+ }
+
+ /* Add echo and checksum bytes to the message */
+ priv->msg_id = pd692x0_build_msg(msg, priv->msg_id);
+
+ ret = i2c_master_send(client, (u8 *)msg, sizeof(*msg));
+ if (ret != sizeof(*msg))
+ return -EIO;
+
+ return 0;
+}
+
+static int pd692x0_reset(struct pd692x0_priv *priv)
+{
+ const struct i2c_client *client = priv->client;
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_RESET];
+ ret = pd692x0_send_msg(priv, &msg);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to reset the controller (%pe)\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ msleep(30);
+
+ ret = i2c_master_recv(client, (u8 *)&buf, sizeof(buf));
+ if (ret != sizeof(buf))
+ return ret < 0 ? ret : -EIO;
+
+ /* Is the reply a successful report message */
+ if (buf.key != PD692X0_KEY_REPORT || buf.sub[0] || buf.sub[1])
+ return -EIO;
+
+ msleep(300);
+
+ ret = i2c_master_recv(client, (u8 *)&buf, sizeof(buf));
+ if (ret != sizeof(buf))
+ return ret < 0 ? ret : -EIO;
+
+ /* Is the boot status without error */
+ if (buf.key != 0x03 || buf.echo != 0xff || buf.sub[0] & 0x1) {
+ dev_err(&client->dev, "PSE controller error\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static bool pd692x0_try_recv_msg(const struct i2c_client *client,
+ struct pd692x0_msg *msg,
+ struct pd692x0_msg *buf)
+{
+ /* Wait 30ms before readback as mandated by the protocol */
+ msleep(30);
+
+ memset(buf, 0, sizeof(*buf));
+ i2c_master_recv(client, (u8 *)buf, sizeof(*buf));
+ if (buf->key)
+ return 0;
+
+ msleep(100);
+
+ memset(buf, 0, sizeof(*buf));
+ i2c_master_recv(client, (u8 *)buf, sizeof(*buf));
+ if (buf->key)
+ return 0;
+
+ return 1;
+}
+
+/* Implementation of I2C communication, specifically addressing scenarios
+ * involving communication loss. Refer to the "Synchronization During
+ * Communication Loss" section in the Communication Protocol document for
+ * further details.
+ */
+static int pd692x0_recv_msg(struct pd692x0_priv *priv,
+ struct pd692x0_msg *msg,
+ struct pd692x0_msg *buf)
+{
+ const struct i2c_client *client = priv->client;
+ int ret;
+
+ ret = pd692x0_try_recv_msg(client, msg, buf);
+ if (!ret)
+ goto out_success;
+
+ dev_warn(&client->dev,
+ "Communication lost, rtnl is locked until communication is back!");
+
+ ret = pd692x0_send_msg(priv, msg);
+ if (ret)
+ return ret;
+
+ ret = pd692x0_try_recv_msg(client, msg, buf);
+ if (!ret)
+ goto out_success2;
+
+ msleep(10000);
+
+ ret = pd692x0_send_msg(priv, msg);
+ if (ret)
+ return ret;
+
+ ret = pd692x0_try_recv_msg(client, msg, buf);
+ if (!ret)
+ goto out_success2;
+
+ return pd692x0_reset(priv);
+
+out_success2:
+ dev_warn(&client->dev, "Communication is back, rtnl is unlocked!");
+out_success:
+ if (msg->key == PD692X0_KEY_CMD) {
+ priv->last_cmd_key = true;
+ priv->last_cmd_key_time = jiffies;
+ } else {
+ priv->last_cmd_key = false;
+ }
+
+ return 0;
+}
+
+static int pd692x0_sendrecv_msg(struct pd692x0_priv *priv,
+ struct pd692x0_msg *msg,
+ struct pd692x0_msg *buf)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ ret = pd692x0_send_msg(priv, msg);
+ if (ret)
+ return ret;
+
+ ret = pd692x0_recv_msg(priv, msg, buf);
+ if (ret)
+ return ret;
+
+ if (msg->echo != buf->echo) {
+ dev_err(dev,
+ "Wrong match in message ID, expect %d received %d.\n",
+ msg->echo, buf->echo);
+ return -EIO;
+ }
+
+ /* If the reply is a report message is it successful */
+ if (buf->key == PD692X0_KEY_REPORT &&
+ (buf->sub[0] || buf->sub[1])) {
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static struct pd692x0_priv *to_pd692x0_priv(struct pse_controller_dev *pcdev)
+{
+ return container_of(pcdev, struct pd692x0_priv, pcdev);
+}
+
+static int pd692x0_fw_unavailable(struct pd692x0_priv *priv)
+{
+ switch (priv->fw_state) {
+ case PD692X0_FW_OK:
+ return 0;
+ case PD692X0_FW_PREPARE:
+ case PD692X0_FW_WRITE:
+ case PD692X0_FW_COMPLETE:
+ dev_err(&priv->client->dev, "Firmware update in progress!\n");
+ return -EBUSY;
+ case PD692X0_FW_BROKEN:
+ case PD692X0_FW_NEED_UPDATE:
+ default:
+ dev_err(&priv->client->dev,
+ "Firmware issue. Please update it!\n");
+ return -EOPNOTSUPP;
+ }
+}
+
+static int pd692x0_pi_enable(struct pse_controller_dev *pcdev, int id)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+
+ if (priv->admin_state[id] == ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED)
+ return 0;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_SET_PORT_PARAM];
+ msg.data[0] = 0x1;
+ msg.sub[2] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+
+ priv->admin_state[id] = ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
+
+ return 0;
+}
+
+static int pd692x0_pi_disable(struct pse_controller_dev *pcdev, int id)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+
+ if (priv->admin_state[id] == ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED)
+ return 0;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_SET_PORT_PARAM];
+ msg.data[0] = 0x0;
+ msg.sub[2] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+
+ priv->admin_state[id] = ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
+
+ return 0;
+}
+
+static int pd692x0_pi_is_enabled(struct pse_controller_dev *pcdev, int id)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_STATUS];
+ msg.sub[2] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+
+ if (buf.sub[1]) {
+ priv->admin_state[id] = ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
+ return 1;
+ } else {
+ priv->admin_state[id] = ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
+ return 0;
+ }
+}
+
+static int pd692x0_ethtool_get_status(struct pse_controller_dev *pcdev,
+ unsigned long id,
+ struct netlink_ext_ack *extack,
+ struct pse_control_status *status)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_STATUS];
+ msg.sub[2] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+
+ /* Compare Port Status (Communication Protocol Document par. 7.1) */
+ if ((buf.sub[0] & 0xf0) == 0x80 || (buf.sub[0] & 0xf0) == 0x90)
+ status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING;
+ else if (buf.sub[0] == 0x1b || buf.sub[0] == 0x22)
+ status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_SEARCHING;
+ else if (buf.sub[0] == 0x12)
+ status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_FAULT;
+ else
+ status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED;
+
+ if (buf.sub[1])
+ status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
+ else
+ status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
+
+ priv->admin_state[id] = status->c33_admin_state;
+
+ return 0;
+}
+
+static struct pd692x0_msg_ver pd692x0_get_sw_version(struct pd692x0_priv *priv)
+{
+ struct device *dev = &priv->client->dev;
+ struct pd692x0_msg msg, buf = {0};
+ struct pd692x0_msg_ver ver = {0};
+ int ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_SW_VER];
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get PSE version (%pe)\n", ERR_PTR(ret));
+ return ver;
+ }
+
+ /* Extract version from the message */
+ ver.prod = buf.sub[2];
+ ver.maj_sw_ver = (buf.data[0] << 8 | buf.data[1]) / 100;
+ ver.min_sw_ver = ((buf.data[0] << 8 | buf.data[1]) / 10) % 10;
+ ver.pa_sw_ver = (buf.data[0] << 8 | buf.data[1]) % 10;
+ ver.param = buf.data[2];
+ ver.build = buf.data[3];
+
+ return ver;
+}
+
+struct pd692x0_manager {
+ struct device_node *port_node[PD692X0_MAX_MANAGER_PORTS];
+ int nports;
+};
+
+struct pd692x0_matrix {
+ u8 hw_port_a;
+ u8 hw_port_b;
+};
+
+static int
+pd692x0_of_get_ports_manager(struct pd692x0_priv *priv,
+ struct pd692x0_manager *manager,
+ struct device_node *np)
+{
+ struct device_node *node;
+ int ret, nports, i;
+
+ nports = 0;
+ for_each_child_of_node(np, node) {
+ u32 port;
+
+ if (!of_node_name_eq(node, "port"))
+ continue;
+
+ ret = of_property_read_u32(node, "reg", &port);
+ if (ret)
+ goto out;
+
+ if (port >= PD692X0_MAX_MANAGER_PORTS || port != nports) {
+ dev_err(&priv->client->dev,
+ "wrong number or order of manager ports (%d)\n",
+ port);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ of_node_get(node);
+ manager->port_node[port] = node;
+ nports++;
+ }
+
+ manager->nports = nports;
+ return 0;
+
+out:
+ for (i = 0; i < nports; i++) {
+ of_node_put(manager->port_node[i]);
+ manager->port_node[i] = NULL;
+ }
+ of_node_put(node);
+ return ret;
+}
+
+static int
+pd692x0_of_get_managers(struct pd692x0_priv *priv,
+ struct pd692x0_manager manager[PD692X0_MAX_MANAGERS])
+{
+ struct device_node *managers_node, *node;
+ int ret, nmanagers, i, j;
+
+ if (!priv->np)
+ return -EINVAL;
+
+ nmanagers = 0;
+ managers_node = of_get_child_by_name(priv->np, "managers");
+ if (!managers_node)
+ return -EINVAL;
+
+ for_each_child_of_node(managers_node, node) {
+ u32 manager_id;
+
+ if (!of_node_name_eq(node, "manager"))
+ continue;
+
+ ret = of_property_read_u32(node, "reg", &manager_id);
+ if (ret)
+ goto out;
+
+ if (manager_id >= PD692X0_MAX_MANAGERS ||
+ manager_id != nmanagers) {
+ dev_err(&priv->client->dev,
+ "wrong number or order of managers (%d)\n",
+ manager_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = pd692x0_of_get_ports_manager(priv, &manager[manager_id],
+ node);
+ if (ret)
+ goto out;
+
+ nmanagers++;
+ }
+
+ of_node_put(managers_node);
+ return nmanagers;
+
+out:
+ for (i = 0; i < nmanagers; i++) {
+ for (j = 0; j < manager[i].nports; j++) {
+ of_node_put(manager[i].port_node[j]);
+ manager[i].port_node[j] = NULL;
+ }
+ }
+
+ of_node_put(node);
+ of_node_put(managers_node);
+ return ret;
+}
+
+static int
+pd692x0_set_port_matrix(const struct pse_pi_pairset *pairset,
+ const struct pd692x0_manager *manager,
+ int nmanagers, struct pd692x0_matrix *port_matrix)
+{
+ int i, j, port_cnt;
+ bool found = false;
+
+ if (!pairset->np)
+ return 0;
+
+ /* Look on every managers */
+ port_cnt = 0;
+ for (i = 0; i < nmanagers; i++) {
+ /* Look on every ports of the manager */
+ for (j = 0; j < manager[i].nports; j++) {
+ if (pairset->np == manager[i].port_node[j]) {
+ found = true;
+ break;
+ }
+ }
+ port_cnt += j;
+
+ if (found)
+ break;
+ }
+
+ if (!found)
+ return -ENODEV;
+
+ if (pairset->pinout == ALTERNATIVE_A)
+ port_matrix->hw_port_a = port_cnt;
+ else if (pairset->pinout == ALTERNATIVE_B)
+ port_matrix->hw_port_b = port_cnt;
+
+ return 0;
+}
+
+static int
+pd692x0_set_ports_matrix(struct pd692x0_priv *priv,
+ const struct pd692x0_manager *manager,
+ int nmanagers,
+ struct pd692x0_matrix port_matrix[PD692X0_MAX_PIS])
+{
+ struct pse_controller_dev *pcdev = &priv->pcdev;
+ int i, ret;
+
+ /* Init Matrix */
+ for (i = 0; i < PD692X0_MAX_PIS; i++) {
+ port_matrix[i].hw_port_a = 0xff;
+ port_matrix[i].hw_port_b = 0xff;
+ }
+
+ /* Update with values for every PSE PIs */
+ for (i = 0; i < pcdev->nr_lines; i++) {
+ ret = pd692x0_set_port_matrix(&pcdev->pi[i].pairset[0],
+ manager, nmanagers,
+ &port_matrix[i]);
+ if (ret) {
+ dev_err(&priv->client->dev,
+ "unable to configure pi %d pairset 0", i);
+ return ret;
+ }
+
+ ret = pd692x0_set_port_matrix(&pcdev->pi[i].pairset[1],
+ manager, nmanagers,
+ &port_matrix[i]);
+ if (ret) {
+ dev_err(&priv->client->dev,
+ "unable to configure pi %d pairset 1", i);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+pd692x0_write_ports_matrix(struct pd692x0_priv *priv,
+ const struct pd692x0_matrix port_matrix[PD692X0_MAX_PIS])
+{
+ struct pd692x0_msg msg, buf;
+ int ret, i;
+
+ /* Write temporary Matrix */
+ msg = pd692x0_msg_template_list[PD692X0_MSG_SET_TMP_PORT_MATRIX];
+ for (i = 0; i < PD692X0_MAX_PIS; i++) {
+ msg.sub[2] = i;
+ msg.data[0] = port_matrix[i].hw_port_b;
+ msg.data[1] = port_matrix[i].hw_port_a;
+
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Program Matrix */
+ msg = pd692x0_msg_template_list[PD692X0_MSG_PRG_PORT_MATRIX];
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int pd692x0_setup_pi_matrix(struct pse_controller_dev *pcdev)
+{
+ struct pd692x0_manager manager[PD692X0_MAX_MANAGERS] = {0};
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_matrix port_matrix[PD692X0_MAX_PIS];
+ int ret, i, j, nmanagers;
+
+ /* Should we flash the port matrix */
+ if (priv->fw_state != PD692X0_FW_OK &&
+ priv->fw_state != PD692X0_FW_COMPLETE)
+ return 0;
+
+ ret = pd692x0_of_get_managers(priv, manager);
+ if (ret < 0)
+ return ret;
+
+ nmanagers = ret;
+ ret = pd692x0_set_ports_matrix(priv, manager, nmanagers, port_matrix);
+ if (ret)
+ goto out;
+
+ ret = pd692x0_write_ports_matrix(priv, port_matrix);
+ if (ret)
+ goto out;
+
+out:
+ for (i = 0; i < nmanagers; i++) {
+ for (j = 0; j < manager[i].nports; j++)
+ of_node_put(manager[i].port_node[j]);
+ }
+ return ret;
+}
+
+static const struct pse_controller_ops pd692x0_ops = {
+ .setup_pi_matrix = pd692x0_setup_pi_matrix,
+ .ethtool_get_status = pd692x0_ethtool_get_status,
+ .pi_enable = pd692x0_pi_enable,
+ .pi_disable = pd692x0_pi_disable,
+ .pi_is_enabled = pd692x0_pi_is_enabled,
+};
+
+#define PD692X0_FW_LINE_MAX_SZ 0xff
+static int pd692x0_fw_get_next_line(const u8 *data,
+ char *line, size_t size)
+{
+ size_t line_size;
+ int i;
+
+ line_size = min_t(size_t, size, PD692X0_FW_LINE_MAX_SZ);
+
+ memset(line, 0, PD692X0_FW_LINE_MAX_SZ);
+ for (i = 0; i < line_size - 1; i++) {
+ if (*data == '\r' && *(data + 1) == '\n') {
+ line[i] = '\r';
+ line[i + 1] = '\n';
+ return i + 2;
+ }
+ line[i] = *data;
+ data++;
+ }
+
+ return -EIO;
+}
+
+static enum fw_upload_err
+pd692x0_fw_recv_resp(const struct i2c_client *client, unsigned long ms_timeout,
+ const char *msg_ok, unsigned int msg_size)
+{
+ /* Maximum controller response size */
+ char fw_msg_buf[5] = {0};
+ unsigned long timeout;
+ int ret;
+
+ if (msg_size > sizeof(fw_msg_buf))
+ return FW_UPLOAD_ERR_RW_ERROR;
+
+ /* Read until we get something */
+ timeout = msecs_to_jiffies(ms_timeout) + jiffies;
+ while (true) {
+ if (time_is_before_jiffies(timeout))
+ return FW_UPLOAD_ERR_TIMEOUT;
+
+ ret = i2c_master_recv(client, fw_msg_buf, 1);
+ if (ret < 0 || *fw_msg_buf == 0) {
+ usleep_range(1000, 2000);
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ /* Read remaining characters */
+ ret = i2c_master_recv(client, fw_msg_buf + 1, msg_size - 1);
+ if (strncmp(fw_msg_buf, msg_ok, msg_size)) {
+ dev_err(&client->dev,
+ "Wrong FW download process answer (%*pE)\n",
+ msg_size, fw_msg_buf);
+ return FW_UPLOAD_ERR_HW_ERROR;
+ }
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static int pd692x0_fw_write_line(const struct i2c_client *client,
+ const char line[PD692X0_FW_LINE_MAX_SZ],
+ const bool last_line)
+{
+ int ret;
+
+ while (*line != 0) {
+ ret = i2c_master_send(client, line, 1);
+ if (ret < 0)
+ return FW_UPLOAD_ERR_RW_ERROR;
+ line++;
+ }
+
+ if (last_line) {
+ ret = pd692x0_fw_recv_resp(client, 100, "TP\r\n",
+ sizeof("TP\r\n") - 1);
+ if (ret)
+ return ret;
+ } else {
+ ret = pd692x0_fw_recv_resp(client, 100, "T*\r\n",
+ sizeof("T*\r\n") - 1);
+ if (ret)
+ return ret;
+ }
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static enum fw_upload_err pd692x0_fw_reset(const struct i2c_client *client)
+{
+ const struct pd692x0_msg zero = {0};
+ struct pd692x0_msg buf = {0};
+ unsigned long timeout;
+ char cmd[] = "RST";
+ int ret;
+
+ ret = i2c_master_send(client, cmd, strlen(cmd));
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Failed to reset the controller (%pe)\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ timeout = msecs_to_jiffies(10000) + jiffies;
+ while (true) {
+ if (time_is_before_jiffies(timeout))
+ return FW_UPLOAD_ERR_TIMEOUT;
+
+ ret = i2c_master_recv(client, (u8 *)&buf, sizeof(buf));
+ if (ret < 0 ||
+ !memcmp(&buf, &zero, sizeof(buf)))
+ usleep_range(1000, 2000);
+ else
+ break;
+ }
+
+ /* Is the reply a successful report message */
+ if (buf.key != PD692X0_KEY_TLM || buf.echo != 0xff ||
+ buf.sub[0] & 0x01) {
+ dev_err(&client->dev, "PSE controller error\n");
+ return FW_UPLOAD_ERR_HW_ERROR;
+ }
+
+ /* Is the firmware operational */
+ if (buf.sub[0] & 0x02) {
+ dev_err(&client->dev,
+ "PSE firmware error. Please update it.\n");
+ return FW_UPLOAD_ERR_HW_ERROR;
+ }
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static enum fw_upload_err pd692x0_fw_prepare(struct fw_upload *fwl,
+ const u8 *data, u32 size)
+{
+ struct pd692x0_priv *priv = fwl->dd_handle;
+ const struct i2c_client *client = priv->client;
+ enum pd692x0_fw_state last_fw_state;
+ int ret;
+
+ priv->cancel_request = false;
+ last_fw_state = priv->fw_state;
+
+ priv->fw_state = PD692X0_FW_PREPARE;
+
+ /* Enter program mode */
+ if (last_fw_state == PD692X0_FW_BROKEN) {
+ const char *msg = "ENTR";
+ const char *c;
+
+ c = msg;
+ do {
+ ret = i2c_master_send(client, c, 1);
+ if (ret < 0)
+ return FW_UPLOAD_ERR_RW_ERROR;
+ if (*(c + 1))
+ usleep_range(10000, 20000);
+ } while (*(++c));
+ } else {
+ struct pd692x0_msg msg, buf;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_DOWNLOAD_CMD];
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Failed to enter programming mode (%pe)\n",
+ ERR_PTR(ret));
+ return FW_UPLOAD_ERR_RW_ERROR;
+ }
+ }
+
+ ret = pd692x0_fw_recv_resp(client, 100, "TPE\r\n", sizeof("TPE\r\n") - 1);
+ if (ret)
+ goto err_out;
+
+ if (priv->cancel_request) {
+ ret = FW_UPLOAD_ERR_CANCELED;
+ goto err_out;
+ }
+
+ return FW_UPLOAD_ERR_NONE;
+
+err_out:
+ pd692x0_fw_reset(priv->client);
+ priv->fw_state = last_fw_state;
+ return ret;
+}
+
+static enum fw_upload_err pd692x0_fw_write(struct fw_upload *fwl,
+ const u8 *data, u32 offset,
+ u32 size, u32 *written)
+{
+ struct pd692x0_priv *priv = fwl->dd_handle;
+ char line[PD692X0_FW_LINE_MAX_SZ];
+ const struct i2c_client *client;
+ int ret, i;
+ char cmd;
+
+ client = priv->client;
+ priv->fw_state = PD692X0_FW_WRITE;
+
+ /* Erase */
+ cmd = 'E';
+ ret = i2c_master_send(client, &cmd, 1);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Failed to boot programming mode (%pe)\n",
+ ERR_PTR(ret));
+ return FW_UPLOAD_ERR_RW_ERROR;
+ }
+
+ ret = pd692x0_fw_recv_resp(client, 100, "TOE\r\n", sizeof("TOE\r\n") - 1);
+ if (ret)
+ return ret;
+
+ ret = pd692x0_fw_recv_resp(client, 5000, "TE\r\n", sizeof("TE\r\n") - 1);
+ if (ret)
+ dev_warn(&client->dev,
+ "Failed to erase internal memory, however still try to write Firmware\n");
+
+ ret = pd692x0_fw_recv_resp(client, 100, "TPE\r\n", sizeof("TPE\r\n") - 1);
+ if (ret)
+ dev_warn(&client->dev,
+ "Failed to erase internal memory, however still try to write Firmware\n");
+
+ if (priv->cancel_request)
+ return FW_UPLOAD_ERR_CANCELED;
+
+ /* Program */
+ cmd = 'P';
+ ret = i2c_master_send(client, &cmd, sizeof(char));
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Failed to boot programming mode (%pe)\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ ret = pd692x0_fw_recv_resp(client, 100, "TOP\r\n", sizeof("TOP\r\n") - 1);
+ if (ret)
+ return ret;
+
+ i = 0;
+ while (i < size) {
+ ret = pd692x0_fw_get_next_line(data, line, size - i);
+ if (ret < 0) {
+ ret = FW_UPLOAD_ERR_FW_INVALID;
+ goto err;
+ }
+
+ i += ret;
+ data += ret;
+ if (line[0] == 'S' && line[1] == '0') {
+ continue;
+ } else if (line[0] == 'S' && line[1] == '7') {
+ ret = pd692x0_fw_write_line(client, line, true);
+ if (ret)
+ goto err;
+ } else {
+ ret = pd692x0_fw_write_line(client, line, false);
+ if (ret)
+ goto err;
+ }
+
+ if (priv->cancel_request) {
+ ret = FW_UPLOAD_ERR_CANCELED;
+ goto err;
+ }
+ }
+ *written = i;
+
+ msleep(400);
+
+ return FW_UPLOAD_ERR_NONE;
+
+err:
+ strscpy_pad(line, "S7\r\n", sizeof(line));
+ pd692x0_fw_write_line(client, line, true);
+ return ret;
+}
+
+static enum fw_upload_err pd692x0_fw_poll_complete(struct fw_upload *fwl)
+{
+ struct pd692x0_priv *priv = fwl->dd_handle;
+ const struct i2c_client *client = priv->client;
+ struct pd692x0_msg_ver ver;
+ int ret;
+
+ priv->fw_state = PD692X0_FW_COMPLETE;
+
+ ret = pd692x0_fw_reset(client);
+ if (ret)
+ return ret;
+
+ ver = pd692x0_get_sw_version(priv);
+ if (ver.maj_sw_ver < PD692X0_FW_MAJ_VER) {
+ dev_err(&client->dev,
+ "Too old firmware version. Please update it\n");
+ priv->fw_state = PD692X0_FW_NEED_UPDATE;
+ return FW_UPLOAD_ERR_FW_INVALID;
+ }
+
+ ret = pd692x0_setup_pi_matrix(&priv->pcdev);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error configuring ports matrix (%pe)\n",
+ ERR_PTR(ret));
+ priv->fw_state = PD692X0_FW_NEED_UPDATE;
+ return FW_UPLOAD_ERR_HW_ERROR;
+ }
+
+ priv->fw_state = PD692X0_FW_OK;
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static void pd692x0_fw_cancel(struct fw_upload *fwl)
+{
+ struct pd692x0_priv *priv = fwl->dd_handle;
+
+ priv->cancel_request = true;
+}
+
+static void pd692x0_fw_cleanup(struct fw_upload *fwl)
+{
+ struct pd692x0_priv *priv = fwl->dd_handle;
+
+ switch (priv->fw_state) {
+ case PD692X0_FW_WRITE:
+ pd692x0_fw_reset(priv->client);
+ fallthrough;
+ case PD692X0_FW_COMPLETE:
+ priv->fw_state = PD692X0_FW_BROKEN;
+ break;
+ default:
+ break;
+ }
+}
+
+static const struct fw_upload_ops pd692x0_fw_ops = {
+ .prepare = pd692x0_fw_prepare,
+ .write = pd692x0_fw_write,
+ .poll_complete = pd692x0_fw_poll_complete,
+ .cancel = pd692x0_fw_cancel,
+ .cleanup = pd692x0_fw_cleanup,
+};
+
+static int pd692x0_i2c_probe(struct i2c_client *client)
+{
+ struct pd692x0_msg msg, buf = {0}, zero = {0};
+ struct device *dev = &client->dev;
+ struct pd692x0_msg_ver ver;
+ struct pd692x0_priv *priv;
+ struct fw_upload *fwl;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(dev, "i2c check functionality failed\n");
+ return -ENXIO;
+ }
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client = client;
+ i2c_set_clientdata(client, priv);
+
+ ret = i2c_master_recv(client, (u8 *)&buf, sizeof(buf));
+ if (ret != sizeof(buf)) {
+ dev_err(dev, "Failed to get device status\n");
+ return -EIO;
+ }
+
+ /* Probe has been already run and the status dumped */
+ if (!memcmp(&buf, &zero, sizeof(buf))) {
+ /* Ask again the controller status */
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_SYS_STATUS];
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get device status\n");
+ return ret;
+ }
+ }
+
+ if (buf.key != 0x03 || buf.sub[0] & 0x01) {
+ dev_err(dev, "PSE controller error\n");
+ return -EIO;
+ }
+ if (buf.sub[0] & 0x02) {
+ dev_err(dev, "PSE firmware error. Please update it.\n");
+ priv->fw_state = PD692X0_FW_BROKEN;
+ } else {
+ ver = pd692x0_get_sw_version(priv);
+ dev_info(&client->dev, "Software version %d.%02d.%d.%d\n",
+ ver.prod, ver.maj_sw_ver, ver.min_sw_ver,
+ ver.pa_sw_ver);
+
+ if (ver.maj_sw_ver < PD692X0_FW_MAJ_VER) {
+ dev_err(dev, "Too old firmware version. Please update it\n");
+ priv->fw_state = PD692X0_FW_NEED_UPDATE;
+ } else {
+ priv->fw_state = PD692X0_FW_OK;
+ }
+ }
+
+ priv->np = dev->of_node;
+ priv->pcdev.nr_lines = PD692X0_MAX_PIS;
+ priv->pcdev.owner = THIS_MODULE;
+ priv->pcdev.ops = &pd692x0_ops;
+ priv->pcdev.dev = dev;
+ priv->pcdev.types = ETHTOOL_PSE_C33;
+ ret = devm_pse_controller_register(dev, &priv->pcdev);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to register PSE controller\n");
+
+ fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev),
+ &pd692x0_fw_ops, priv);
+ if (IS_ERR(fwl))
+ return dev_err_probe(dev, PTR_ERR(fwl),
+ "failed to register to the Firmware Upload API\n");
+ priv->fwl = fwl;
+
+ return 0;
+}
+
+static void pd692x0_i2c_remove(struct i2c_client *client)
+{
+ struct pd692x0_priv *priv = i2c_get_clientdata(client);
+
+ firmware_upload_unregister(priv->fwl);
+}
+
+static const struct i2c_device_id pd692x0_id[] = {
+ { PD692X0_PSE_NAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, pd692x0_id);
+
+static const struct of_device_id pd692x0_of_match[] = {
+ { .compatible = "microchip,pd69200", },
+ { .compatible = "microchip,pd69210", },
+ { .compatible = "microchip,pd69220", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pd692x0_of_match);
+
+static struct i2c_driver pd692x0_driver = {
+ .probe = pd692x0_i2c_probe,
+ .remove = pd692x0_i2c_remove,
+ .id_table = pd692x0_id,
+ .driver = {
+ .name = PD692X0_PSE_NAME,
+ .of_match_table = pd692x0_of_match,
+ },
+};
+module_i2c_driver(pd692x0_driver);
+
+MODULE_AUTHOR("Kory Maincent <kory.maincent@bootlin.com>");
+MODULE_DESCRIPTION("Microchip PD692x0 PoE PSE Controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c
index 146b81f08a89..795ab264eaf2 100644
--- a/drivers/net/pse-pd/pse_core.c
+++ b/drivers/net/pse-pd/pse_core.c
@@ -8,6 +8,8 @@
#include <linux/device.h>
#include <linux/of.h>
#include <linux/pse-pd/pse.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
static DEFINE_MUTEX(pse_list_mutex);
static LIST_HEAD(pse_controller_list);
@@ -16,67 +18,357 @@ static LIST_HEAD(pse_controller_list);
* struct pse_control - a PSE control
* @pcdev: a pointer to the PSE controller device
* this PSE control belongs to
+ * @ps: PSE PI supply of the PSE control
* @list: list entry for the pcdev's PSE controller list
* @id: ID of the PSE line in the PSE controller device
* @refcnt: Number of gets of this pse_control
*/
struct pse_control {
struct pse_controller_dev *pcdev;
+ struct regulator *ps;
struct list_head list;
unsigned int id;
struct kref refcnt;
};
+static int of_load_single_pse_pi_pairset(struct device_node *node,
+ struct pse_pi *pi,
+ int pairset_num)
+{
+ struct device_node *pairset_np;
+ const char *name;
+ int ret;
+
+ ret = of_property_read_string_index(node, "pairset-names",
+ pairset_num, &name);
+ if (ret)
+ return ret;
+
+ if (!strcmp(name, "alternative-a")) {
+ pi->pairset[pairset_num].pinout = ALTERNATIVE_A;
+ } else if (!strcmp(name, "alternative-b")) {
+ pi->pairset[pairset_num].pinout = ALTERNATIVE_B;
+ } else {
+ pr_err("pse: wrong pairset-names value %s (%pOF)\n",
+ name, node);
+ return -EINVAL;
+ }
+
+ pairset_np = of_parse_phandle(node, "pairsets", pairset_num);
+ if (!pairset_np)
+ return -ENODEV;
+
+ pi->pairset[pairset_num].np = pairset_np;
+
+ return 0;
+}
+
/**
- * of_pse_zero_xlate - dummy function for controllers with one only control
- * @pcdev: a pointer to the PSE controller device
- * @pse_spec: PSE line specifier as found in the device tree
+ * of_load_pse_pi_pairsets - load PSE PI pairsets pinout and polarity
+ * @node: a pointer of the device node
+ * @pi: a pointer of the PSE PI to fill
+ * @npairsets: the number of pairsets (1 or 2) used by the PI
*
- * This static translation function is used by default if of_xlate in
- * :c:type:`pse_controller_dev` is not set. It is useful for all PSE
- * controllers with #pse-cells = <0>.
+ * Return: 0 on success and failure value on error
*/
-static int of_pse_zero_xlate(struct pse_controller_dev *pcdev,
- const struct of_phandle_args *pse_spec)
+static int of_load_pse_pi_pairsets(struct device_node *node,
+ struct pse_pi *pi,
+ int npairsets)
{
- return 0;
+ int i, ret;
+
+ ret = of_property_count_strings(node, "pairset-names");
+ if (ret != npairsets) {
+ pr_err("pse: amount of pairsets and pairset-names is not equal %d != %d (%pOF)\n",
+ npairsets, ret, node);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < npairsets; i++) {
+ ret = of_load_single_pse_pi_pairset(node, pi, i);
+ if (ret)
+ goto out;
+ }
+
+ if (npairsets == 2 &&
+ pi->pairset[0].pinout == pi->pairset[1].pinout) {
+ pr_err("pse: two PI pairsets can not have identical pinout (%pOF)",
+ node);
+ ret = -EINVAL;
+ }
+
+out:
+ /* If an error appears, release all the pairset device node kref */
+ if (ret) {
+ of_node_put(pi->pairset[0].np);
+ pi->pairset[0].np = NULL;
+ of_node_put(pi->pairset[1].np);
+ pi->pairset[1].np = NULL;
+ }
+
+ return ret;
+}
+
+static void pse_release_pis(struct pse_controller_dev *pcdev)
+{
+ int i;
+
+ for (i = 0; i <= pcdev->nr_lines; i++) {
+ of_node_put(pcdev->pi[i].pairset[0].np);
+ of_node_put(pcdev->pi[i].pairset[1].np);
+ of_node_put(pcdev->pi[i].np);
+ }
+ kfree(pcdev->pi);
}
/**
- * of_pse_simple_xlate - translate pse_spec to the PSE line number
+ * of_load_pse_pis - load all the PSE PIs
* @pcdev: a pointer to the PSE controller device
- * @pse_spec: PSE line specifier as found in the device tree
*
- * This static translation function is used by default if of_xlate in
- * :c:type:`pse_controller_dev` is not set. It is useful for all PSE
- * controllers with 1:1 mapping, where PSE lines can be indexed by number
- * without gaps.
+ * Return: 0 on success and failure value on error
*/
-static int of_pse_simple_xlate(struct pse_controller_dev *pcdev,
- const struct of_phandle_args *pse_spec)
+static int of_load_pse_pis(struct pse_controller_dev *pcdev)
{
- if (pse_spec->args[0] >= pcdev->nr_lines)
- return -EINVAL;
+ struct device_node *np = pcdev->dev->of_node;
+ struct device_node *node, *pis;
+ int ret;
- return pse_spec->args[0];
+ if (!np)
+ return -ENODEV;
+
+ pcdev->pi = kcalloc(pcdev->nr_lines, sizeof(*pcdev->pi), GFP_KERNEL);
+ if (!pcdev->pi)
+ return -ENOMEM;
+
+ pis = of_get_child_by_name(np, "pse-pis");
+ if (!pis) {
+ /* no description of PSE PIs */
+ pcdev->no_of_pse_pi = true;
+ return 0;
+ }
+
+ for_each_child_of_node(pis, node) {
+ struct pse_pi pi = {0};
+ u32 id;
+
+ if (!of_node_name_eq(node, "pse-pi"))
+ continue;
+
+ ret = of_property_read_u32(node, "reg", &id);
+ if (ret) {
+ dev_err(pcdev->dev,
+ "can't get reg property for node '%pOF'",
+ node);
+ goto out;
+ }
+
+ if (id >= pcdev->nr_lines) {
+ dev_err(pcdev->dev,
+ "reg value (%u) is out of range (%u) (%pOF)\n",
+ id, pcdev->nr_lines, node);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (pcdev->pi[id].np) {
+ dev_err(pcdev->dev,
+ "other node with same reg value was already registered. %pOF : %pOF\n",
+ pcdev->pi[id].np, node);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = of_count_phandle_with_args(node, "pairsets", NULL);
+ /* npairsets is limited to value one or two */
+ if (ret == 1 || ret == 2) {
+ ret = of_load_pse_pi_pairsets(node, &pi, ret);
+ if (ret)
+ goto out;
+ } else if (ret != ENOENT) {
+ dev_err(pcdev->dev,
+ "error: wrong number of pairsets. Should be 1 or 2, got %d (%pOF)\n",
+ ret, node);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ of_node_get(node);
+ pi.np = node;
+ memcpy(&pcdev->pi[id], &pi, sizeof(pi));
+ }
+
+ of_node_put(pis);
+ return 0;
+
+out:
+ pse_release_pis(pcdev);
+ of_node_put(node);
+ of_node_put(pis);
+ return ret;
+}
+
+static int pse_pi_is_enabled(struct regulator_dev *rdev)
+{
+ struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
+ const struct pse_controller_ops *ops;
+ int id, ret;
+
+ ops = pcdev->ops;
+ if (!ops->pi_is_enabled)
+ return -EOPNOTSUPP;
+
+ id = rdev_get_id(rdev);
+ mutex_lock(&pcdev->lock);
+ ret = ops->pi_is_enabled(pcdev, id);
+ mutex_unlock(&pcdev->lock);
+
+ return ret;
+}
+
+static int pse_pi_enable(struct regulator_dev *rdev)
+{
+ struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
+ const struct pse_controller_ops *ops;
+ int id, ret;
+
+ ops = pcdev->ops;
+ if (!ops->pi_enable)
+ return -EOPNOTSUPP;
+
+ id = rdev_get_id(rdev);
+ mutex_lock(&pcdev->lock);
+ ret = ops->pi_enable(pcdev, id);
+ if (!ret)
+ pcdev->pi[id].admin_state_enabled = 1;
+ mutex_unlock(&pcdev->lock);
+
+ return ret;
+}
+
+static int pse_pi_disable(struct regulator_dev *rdev)
+{
+ struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
+ const struct pse_controller_ops *ops;
+ int id, ret;
+
+ ops = pcdev->ops;
+ if (!ops->pi_disable)
+ return -EOPNOTSUPP;
+
+ id = rdev_get_id(rdev);
+ mutex_lock(&pcdev->lock);
+ ret = ops->pi_disable(pcdev, id);
+ if (!ret)
+ pcdev->pi[id].admin_state_enabled = 0;
+ mutex_unlock(&pcdev->lock);
+
+ return ret;
+}
+
+static const struct regulator_ops pse_pi_ops = {
+ .is_enabled = pse_pi_is_enabled,
+ .enable = pse_pi_enable,
+ .disable = pse_pi_disable,
+};
+
+static int
+devm_pse_pi_regulator_register(struct pse_controller_dev *pcdev,
+ char *name, int id)
+{
+ struct regulator_init_data *rinit_data;
+ struct regulator_config rconfig = {0};
+ struct regulator_desc *rdesc;
+ struct regulator_dev *rdev;
+
+ rinit_data = devm_kzalloc(pcdev->dev, sizeof(*rinit_data),
+ GFP_KERNEL);
+ if (!rinit_data)
+ return -ENOMEM;
+
+ rdesc = devm_kzalloc(pcdev->dev, sizeof(*rdesc), GFP_KERNEL);
+ if (!rdesc)
+ return -ENOMEM;
+
+ /* Regulator descriptor id have to be the same as its associated
+ * PSE PI id for the well functioning of the PSE controls.
+ */
+ rdesc->id = id;
+ rdesc->name = name;
+ rdesc->type = REGULATOR_VOLTAGE;
+ rdesc->ops = &pse_pi_ops;
+ rdesc->owner = pcdev->owner;
+
+ rinit_data->constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
+ rinit_data->supply_regulator = "vpwr";
+
+ rconfig.dev = pcdev->dev;
+ rconfig.driver_data = pcdev;
+ rconfig.init_data = rinit_data;
+
+ rdev = devm_regulator_register(pcdev->dev, rdesc, &rconfig);
+ if (IS_ERR(rdev)) {
+ dev_err_probe(pcdev->dev, PTR_ERR(rdev),
+ "Failed to register regulator\n");
+ return PTR_ERR(rdev);
+ }
+
+ pcdev->pi[id].rdev = rdev;
+
+ return 0;
}
/**
* pse_controller_register - register a PSE controller device
* @pcdev: a pointer to the initialized PSE controller device
+ *
+ * Return: 0 on success and failure value on error
*/
int pse_controller_register(struct pse_controller_dev *pcdev)
{
- if (!pcdev->of_xlate) {
- if (pcdev->of_pse_n_cells == 0)
- pcdev->of_xlate = of_pse_zero_xlate;
- else if (pcdev->of_pse_n_cells == 1)
- pcdev->of_xlate = of_pse_simple_xlate;
- }
+ size_t reg_name_len;
+ int ret, i;
mutex_init(&pcdev->lock);
INIT_LIST_HEAD(&pcdev->pse_control_head);
+ if (!pcdev->nr_lines)
+ pcdev->nr_lines = 1;
+
+ ret = of_load_pse_pis(pcdev);
+ if (ret)
+ return ret;
+
+ if (pcdev->ops->setup_pi_matrix) {
+ ret = pcdev->ops->setup_pi_matrix(pcdev);
+ if (ret)
+ return ret;
+ }
+
+ /* Each regulator name len is pcdev dev name + 7 char +
+ * int max digit number (10) + 1
+ */
+ reg_name_len = strlen(dev_name(pcdev->dev)) + 18;
+
+ /* Register PI regulators */
+ for (i = 0; i < pcdev->nr_lines; i++) {
+ char *reg_name;
+
+ /* Do not register regulator for PIs not described */
+ if (!pcdev->no_of_pse_pi && !pcdev->pi[i].np)
+ continue;
+
+ reg_name = devm_kzalloc(pcdev->dev, reg_name_len, GFP_KERNEL);
+ if (!reg_name)
+ return -ENOMEM;
+
+ snprintf(reg_name, reg_name_len, "pse-%s_pi%d",
+ dev_name(pcdev->dev), i);
+
+ ret = devm_pse_pi_regulator_register(pcdev, reg_name, i);
+ if (ret)
+ return ret;
+ }
+
mutex_lock(&pse_list_mutex);
list_add(&pcdev->list, &pse_controller_list);
mutex_unlock(&pse_list_mutex);
@@ -91,6 +383,7 @@ EXPORT_SYMBOL_GPL(pse_controller_register);
*/
void pse_controller_unregister(struct pse_controller_dev *pcdev)
{
+ pse_release_pis(pcdev);
mutex_lock(&pse_list_mutex);
list_del(&pcdev->list);
mutex_unlock(&pse_list_mutex);
@@ -110,6 +403,8 @@ static void devm_pse_controller_release(struct device *dev, void *res)
* Managed pse_controller_register(). For PSE controllers registered by
* this function, pse_controller_unregister() is automatically called on
* driver detach. See pse_controller_register() for more information.
+ *
+ * Return: 0 on success and failure value on error
*/
int devm_pse_controller_register(struct device *dev,
struct pse_controller_dev *pcdev)
@@ -144,6 +439,10 @@ static void __pse_control_release(struct kref *kref)
lockdep_assert_held(&pse_list_mutex);
+ if (psec->pcdev->pi[psec->id].admin_state_enabled)
+ regulator_disable(psec->ps);
+ devm_regulator_put(psec->ps);
+
module_put(psec->pcdev->owner);
list_del(&psec->list);
@@ -176,6 +475,7 @@ static struct pse_control *
pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index)
{
struct pse_control *psec;
+ int ret;
lockdep_assert_held(&pse_list_mutex);
@@ -191,20 +491,82 @@ pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index)
return ERR_PTR(-ENOMEM);
if (!try_module_get(pcdev->owner)) {
- kfree(psec);
- return ERR_PTR(-ENODEV);
+ ret = -ENODEV;
+ goto free_psec;
}
+ psec->ps = devm_regulator_get_exclusive(pcdev->dev,
+ rdev_get_name(pcdev->pi[index].rdev));
+ if (IS_ERR(psec->ps)) {
+ ret = PTR_ERR(psec->ps);
+ goto put_module;
+ }
+
+ ret = regulator_is_enabled(psec->ps);
+ if (ret < 0)
+ goto regulator_put;
+
+ pcdev->pi[index].admin_state_enabled = ret;
+
psec->pcdev = pcdev;
list_add(&psec->list, &pcdev->pse_control_head);
psec->id = index;
kref_init(&psec->refcnt);
return psec;
+
+regulator_put:
+ devm_regulator_put(psec->ps);
+put_module:
+ module_put(pcdev->owner);
+free_psec:
+ kfree(psec);
+
+ return ERR_PTR(ret);
+}
+
+/**
+ * of_pse_match_pi - Find the PSE PI id matching the device node phandle
+ * @pcdev: a pointer to the PSE controller device
+ * @np: a pointer to the device node
+ *
+ * Return: id of the PSE PI, -EINVAL if not found
+ */
+static int of_pse_match_pi(struct pse_controller_dev *pcdev,
+ struct device_node *np)
+{
+ int i;
+
+ for (i = 0; i <= pcdev->nr_lines; i++) {
+ if (pcdev->pi[i].np == np)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * psec_id_xlate - translate pse_spec to the PSE line number according
+ * to the number of pse-cells in case of no pse_pi node
+ * @pcdev: a pointer to the PSE controller device
+ * @pse_spec: PSE line specifier as found in the device tree
+ *
+ * Return: 0 if #pse-cells = <0>. Return PSE line number otherwise.
+ */
+static int psec_id_xlate(struct pse_controller_dev *pcdev,
+ const struct of_phandle_args *pse_spec)
+{
+ if (!pcdev->of_pse_n_cells)
+ return 0;
+
+ if (pcdev->of_pse_n_cells > 1 ||
+ pse_spec->args[0] >= pcdev->nr_lines)
+ return -EINVAL;
+
+ return pse_spec->args[0];
}
-struct pse_control *
-of_pse_control_get(struct device_node *node)
+struct pse_control *of_pse_control_get(struct device_node *node)
{
struct pse_controller_dev *r, *pcdev;
struct of_phandle_args args;
@@ -222,7 +584,14 @@ of_pse_control_get(struct device_node *node)
mutex_lock(&pse_list_mutex);
pcdev = NULL;
list_for_each_entry(r, &pse_controller_list, list) {
- if (args.np == r->dev->of_node) {
+ if (!r->no_of_pse_pi) {
+ ret = of_pse_match_pi(r, args.np);
+ if (ret >= 0) {
+ pcdev = r;
+ psec_id = ret;
+ break;
+ }
+ } else if (args.np == r->dev->of_node) {
pcdev = r;
break;
}
@@ -238,10 +607,12 @@ of_pse_control_get(struct device_node *node)
goto out;
}
- psec_id = pcdev->of_xlate(pcdev, &args);
- if (psec_id < 0) {
- psec = ERR_PTR(psec_id);
- goto out;
+ if (pcdev->no_of_pse_pi) {
+ psec_id = psec_id_xlate(pcdev, &args);
+ if (psec_id < 0) {
+ psec = ERR_PTR(psec_id);
+ goto out;
+ }
}
/* pse_list_mutex also protects the pcdev's pse_control list */
@@ -260,6 +631,8 @@ EXPORT_SYMBOL_GPL(of_pse_control_get);
* @psec: PSE control pointer
* @extack: extack for reporting useful error messages
* @status: struct to store PSE status
+ *
+ * Return: 0 on success and failure value on error
*/
int pse_ethtool_get_status(struct pse_control *psec,
struct netlink_ext_ack *extack,
@@ -284,31 +657,89 @@ int pse_ethtool_get_status(struct pse_control *psec,
}
EXPORT_SYMBOL_GPL(pse_ethtool_get_status);
+static int pse_ethtool_c33_set_config(struct pse_control *psec,
+ const struct pse_control_config *config)
+{
+ int err = 0;
+
+ /* Look at admin_state_enabled status to not call regulator_enable
+ * or regulator_disable twice creating a regulator counter mismatch
+ */
+ switch (config->c33_admin_control) {
+ case ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED:
+ if (!psec->pcdev->pi[psec->id].admin_state_enabled)
+ err = regulator_enable(psec->ps);
+ break;
+ case ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED:
+ if (psec->pcdev->pi[psec->id].admin_state_enabled)
+ err = regulator_disable(psec->ps);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int pse_ethtool_podl_set_config(struct pse_control *psec,
+ const struct pse_control_config *config)
+{
+ int err = 0;
+
+ /* Look at admin_state_enabled status to not call regulator_enable
+ * or regulator_disable twice creating a regulator counter mismatch
+ */
+ switch (config->podl_admin_control) {
+ case ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED:
+ if (!psec->pcdev->pi[psec->id].admin_state_enabled)
+ err = regulator_enable(psec->ps);
+ break;
+ case ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED:
+ if (psec->pcdev->pi[psec->id].admin_state_enabled)
+ err = regulator_disable(psec->ps);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
/**
* pse_ethtool_set_config - set PSE control configuration
* @psec: PSE control pointer
* @extack: extack for reporting useful error messages
* @config: Configuration of the test to run
+ *
+ * Return: 0 on success and failure value on error
*/
int pse_ethtool_set_config(struct pse_control *psec,
struct netlink_ext_ack *extack,
const struct pse_control_config *config)
{
- const struct pse_controller_ops *ops;
- int err;
-
- ops = psec->pcdev->ops;
+ int err = 0;
- if (!ops->ethtool_set_config) {
- NL_SET_ERR_MSG(extack,
- "PSE driver does not configuration");
- return -EOPNOTSUPP;
+ if (pse_has_c33(psec)) {
+ err = pse_ethtool_c33_set_config(psec, config);
+ if (err)
+ return err;
}
- mutex_lock(&psec->pcdev->lock);
- err = ops->ethtool_set_config(psec->pcdev, psec->id, extack, config);
- mutex_unlock(&psec->pcdev->lock);
+ if (pse_has_podl(psec))
+ err = pse_ethtool_podl_set_config(psec, config);
return err;
}
EXPORT_SYMBOL_GPL(pse_ethtool_set_config);
+
+bool pse_has_podl(struct pse_control *psec)
+{
+ return psec->pcdev->types & ETHTOOL_PSE_PODL;
+}
+EXPORT_SYMBOL_GPL(pse_has_podl);
+
+bool pse_has_c33(struct pse_control *psec)
+{
+ return psec->pcdev->types & ETHTOOL_PSE_C33;
+}
+EXPORT_SYMBOL_GPL(pse_has_c33);
diff --git a/drivers/net/pse-pd/pse_regulator.c b/drivers/net/pse-pd/pse_regulator.c
index e2bf8306ca90..64ab36974fe0 100644
--- a/drivers/net/pse-pd/pse_regulator.c
+++ b/drivers/net/pse-pd/pse_regulator.c
@@ -24,38 +24,42 @@ static struct pse_reg_priv *to_pse_reg(struct pse_controller_dev *pcdev)
}
static int
-pse_reg_ethtool_set_config(struct pse_controller_dev *pcdev, unsigned long id,
- struct netlink_ext_ack *extack,
- const struct pse_control_config *config)
+pse_reg_pi_enable(struct pse_controller_dev *pcdev, int id)
{
struct pse_reg_priv *priv = to_pse_reg(pcdev);
int ret;
- if (priv->admin_state == config->admin_cotrol)
- return 0;
-
- switch (config->admin_cotrol) {
- case ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED:
- ret = regulator_enable(priv->ps);
- break;
- case ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED:
- ret = regulator_disable(priv->ps);
- break;
- default:
- dev_err(pcdev->dev, "Unknown admin state %i\n",
- config->admin_cotrol);
- ret = -ENOTSUPP;
- }
-
+ ret = regulator_enable(priv->ps);
if (ret)
return ret;
- priv->admin_state = config->admin_cotrol;
+ priv->admin_state = ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED;
+ return 0;
+}
+
+static int
+pse_reg_pi_disable(struct pse_controller_dev *pcdev, int id)
+{
+ struct pse_reg_priv *priv = to_pse_reg(pcdev);
+ int ret;
+ ret = regulator_disable(priv->ps);
+ if (ret)
+ return ret;
+
+ priv->admin_state = ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED;
return 0;
}
static int
+pse_reg_pi_is_enabled(struct pse_controller_dev *pcdev, int id)
+{
+ struct pse_reg_priv *priv = to_pse_reg(pcdev);
+
+ return regulator_is_enabled(priv->ps);
+}
+
+static int
pse_reg_ethtool_get_status(struct pse_controller_dev *pcdev, unsigned long id,
struct netlink_ext_ack *extack,
struct pse_control_status *status)
@@ -80,7 +84,9 @@ pse_reg_ethtool_get_status(struct pse_controller_dev *pcdev, unsigned long id,
static const struct pse_controller_ops pse_reg_ops = {
.ethtool_get_status = pse_reg_ethtool_get_status,
- .ethtool_set_config = pse_reg_ethtool_set_config,
+ .pi_enable = pse_reg_pi_enable,
+ .pi_is_enabled = pse_reg_pi_is_enabled,
+ .pi_disable = pse_reg_pi_disable,
};
static int
@@ -116,6 +122,7 @@ pse_reg_probe(struct platform_device *pdev)
priv->pcdev.owner = THIS_MODULE;
priv->pcdev.ops = &pse_reg_ops;
priv->pcdev.dev = dev;
+ priv->pcdev.types = ETHTOOL_PSE_PODL;
ret = devm_pse_controller_register(dev, &priv->pcdev);
if (ret) {
dev_err(dev, "failed to register PSE controller (%pe)\n",
diff --git a/drivers/net/pse-pd/tps23881.c b/drivers/net/pse-pd/tps23881.c
new file mode 100644
index 000000000000..98ffbb1bbf13
--- /dev/null
+++ b/drivers/net/pse-pd/tps23881.c
@@ -0,0 +1,820 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for the TI TPS23881 PoE PSE Controller driver (I2C bus)
+ *
+ * Copyright (c) 2023 Bootlin, Kory Maincent <kory.maincent@bootlin.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pse-pd/pse.h>
+
+#define TPS23881_MAX_CHANS 8
+
+#define TPS23881_REG_PW_STATUS 0x10
+#define TPS23881_REG_OP_MODE 0x12
+#define TPS23881_OP_MODE_SEMIAUTO 0xaaaa
+#define TPS23881_REG_DIS_EN 0x13
+#define TPS23881_REG_DET_CLA_EN 0x14
+#define TPS23881_REG_GEN_MASK 0x17
+#define TPS23881_REG_NBITACC BIT(5)
+#define TPS23881_REG_PW_EN 0x19
+#define TPS23881_REG_PORT_MAP 0x26
+#define TPS23881_REG_PORT_POWER 0x29
+#define TPS23881_REG_POEPLUS 0x40
+#define TPS23881_REG_TPON BIT(0)
+#define TPS23881_REG_FWREV 0x41
+#define TPS23881_REG_DEVID 0x43
+#define TPS23881_REG_SRAM_CTRL 0x60
+#define TPS23881_REG_SRAM_DATA 0x61
+
+struct tps23881_port_desc {
+ u8 chan[2];
+ bool is_4p;
+};
+
+struct tps23881_priv {
+ struct i2c_client *client;
+ struct pse_controller_dev pcdev;
+ struct device_node *np;
+ struct tps23881_port_desc port[TPS23881_MAX_CHANS];
+};
+
+static struct tps23881_priv *to_tps23881_priv(struct pse_controller_dev *pcdev)
+{
+ return container_of(pcdev, struct tps23881_priv, pcdev);
+}
+
+static int tps23881_pi_enable(struct pse_controller_dev *pcdev, int id)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ struct i2c_client *client = priv->client;
+ u8 chan;
+ u16 val;
+ int ret;
+
+ if (id >= TPS23881_MAX_CHANS)
+ return -ERANGE;
+
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
+ if (ret < 0)
+ return ret;
+
+ chan = priv->port[id].chan[0];
+ if (chan < 4)
+ val = (u16)(ret | BIT(chan));
+ else
+ val = (u16)(ret | BIT(chan + 4));
+
+ if (priv->port[id].is_4p) {
+ chan = priv->port[id].chan[1];
+ if (chan < 4)
+ val |= BIT(chan);
+ else
+ val |= BIT(chan + 4);
+ }
+
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_PW_EN, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tps23881_pi_disable(struct pse_controller_dev *pcdev, int id)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ struct i2c_client *client = priv->client;
+ u8 chan;
+ u16 val;
+ int ret;
+
+ if (id >= TPS23881_MAX_CHANS)
+ return -ERANGE;
+
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
+ if (ret < 0)
+ return ret;
+
+ chan = priv->port[id].chan[0];
+ if (chan < 4)
+ val = (u16)(ret | BIT(chan + 4));
+ else
+ val = (u16)(ret | BIT(chan + 8));
+
+ if (priv->port[id].is_4p) {
+ chan = priv->port[id].chan[1];
+ if (chan < 4)
+ val |= BIT(chan + 4);
+ else
+ val |= BIT(chan + 8);
+ }
+
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_PW_EN, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tps23881_pi_is_enabled(struct pse_controller_dev *pcdev, int id)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ struct i2c_client *client = priv->client;
+ bool enabled;
+ u8 chan;
+ int ret;
+
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
+ if (ret < 0)
+ return ret;
+
+ chan = priv->port[id].chan[0];
+ if (chan < 4)
+ enabled = ret & BIT(chan);
+ else
+ enabled = ret & BIT(chan + 4);
+
+ if (priv->port[id].is_4p) {
+ chan = priv->port[id].chan[1];
+ if (chan < 4)
+ enabled &= !!(ret & BIT(chan));
+ else
+ enabled &= !!(ret & BIT(chan + 4));
+ }
+
+ /* Return enabled status only if both channel are on this state */
+ return enabled;
+}
+
+static int tps23881_ethtool_get_status(struct pse_controller_dev *pcdev,
+ unsigned long id,
+ struct netlink_ext_ack *extack,
+ struct pse_control_status *status)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ struct i2c_client *client = priv->client;
+ bool enabled, delivering;
+ u8 chan;
+ int ret;
+
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
+ if (ret < 0)
+ return ret;
+
+ chan = priv->port[id].chan[0];
+ if (chan < 4) {
+ enabled = ret & BIT(chan);
+ delivering = ret & BIT(chan + 4);
+ } else {
+ enabled = ret & BIT(chan + 4);
+ delivering = ret & BIT(chan + 8);
+ }
+
+ if (priv->port[id].is_4p) {
+ chan = priv->port[id].chan[1];
+ if (chan < 4) {
+ enabled &= !!(ret & BIT(chan));
+ delivering &= !!(ret & BIT(chan + 4));
+ } else {
+ enabled &= !!(ret & BIT(chan + 4));
+ delivering &= !!(ret & BIT(chan + 8));
+ }
+ }
+
+ /* Return delivering status only if both channel are on this state */
+ if (delivering)
+ status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING;
+ else
+ status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED;
+
+ /* Return enabled status only if both channel are on this state */
+ if (enabled)
+ status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
+ else
+ status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
+
+ return 0;
+}
+
+/* Parse managers subnode into a array of device node */
+static int
+tps23881_get_of_channels(struct tps23881_priv *priv,
+ struct device_node *chan_node[TPS23881_MAX_CHANS])
+{
+ struct device_node *channels_node, *node;
+ int i, ret;
+
+ if (!priv->np)
+ return -EINVAL;
+
+ channels_node = of_find_node_by_name(priv->np, "channels");
+ if (!channels_node)
+ return -EINVAL;
+
+ for_each_child_of_node(channels_node, node) {
+ u32 chan_id;
+
+ if (!of_node_name_eq(node, "channel"))
+ continue;
+
+ ret = of_property_read_u32(node, "reg", &chan_id);
+ if (ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (chan_id >= TPS23881_MAX_CHANS || chan_node[chan_id]) {
+ dev_err(&priv->client->dev,
+ "wrong number of port (%d)\n", chan_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ of_node_get(node);
+ chan_node[chan_id] = node;
+ }
+
+ of_node_put(channels_node);
+ return 0;
+
+out:
+ for (i = 0; i < TPS23881_MAX_CHANS; i++) {
+ of_node_put(chan_node[i]);
+ chan_node[i] = NULL;
+ }
+
+ of_node_put(node);
+ of_node_put(channels_node);
+ return ret;
+}
+
+struct tps23881_port_matrix {
+ u8 pi_id;
+ u8 lgcl_chan[2];
+ u8 hw_chan[2];
+ bool is_4p;
+ bool exist;
+};
+
+static int
+tps23881_match_channel(const struct pse_pi_pairset *pairset,
+ struct device_node *chan_node[TPS23881_MAX_CHANS])
+{
+ int i;
+
+ /* Look on every channels */
+ for (i = 0; i < TPS23881_MAX_CHANS; i++) {
+ if (pairset->np == chan_node[i])
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+static bool
+tps23881_is_chan_free(struct tps23881_port_matrix port_matrix[TPS23881_MAX_CHANS],
+ int chan)
+{
+ int i;
+
+ for (i = 0; i < TPS23881_MAX_CHANS; i++) {
+ if (port_matrix[i].exist &&
+ (port_matrix[i].hw_chan[0] == chan ||
+ port_matrix[i].hw_chan[1] == chan))
+ return false;
+ }
+
+ return true;
+}
+
+/* Fill port matrix with the matching channels */
+static int
+tps23881_match_port_matrix(struct pse_pi *pi, int pi_id,
+ struct device_node *chan_node[TPS23881_MAX_CHANS],
+ struct tps23881_port_matrix port_matrix[TPS23881_MAX_CHANS])
+{
+ int ret;
+
+ if (!pi->pairset[0].np)
+ return 0;
+
+ ret = tps23881_match_channel(&pi->pairset[0], chan_node);
+ if (ret < 0)
+ return ret;
+
+ if (!tps23881_is_chan_free(port_matrix, ret)) {
+ pr_err("tps23881: channel %d already used\n", ret);
+ return -ENODEV;
+ }
+
+ port_matrix[pi_id].hw_chan[0] = ret;
+ port_matrix[pi_id].exist = true;
+
+ if (!pi->pairset[1].np)
+ return 0;
+
+ ret = tps23881_match_channel(&pi->pairset[1], chan_node);
+ if (ret < 0)
+ return ret;
+
+ if (!tps23881_is_chan_free(port_matrix, ret)) {
+ pr_err("tps23881: channel %d already used\n", ret);
+ return -ENODEV;
+ }
+
+ if (port_matrix[pi_id].hw_chan[0] / 4 != ret / 4) {
+ pr_err("tps23881: 4-pair PSE can only be set within the same 4 ports group");
+ return -ENODEV;
+ }
+
+ port_matrix[pi_id].hw_chan[1] = ret;
+ port_matrix[pi_id].is_4p = true;
+
+ return 0;
+}
+
+static int
+tps23881_get_unused_chan(struct tps23881_port_matrix port_matrix[TPS23881_MAX_CHANS],
+ int port_cnt)
+{
+ bool used;
+ int i, j;
+
+ for (i = 0; i < TPS23881_MAX_CHANS; i++) {
+ used = false;
+
+ for (j = 0; j < port_cnt; j++) {
+ if (port_matrix[j].hw_chan[0] == i) {
+ used = true;
+ break;
+ }
+
+ if (port_matrix[j].is_4p &&
+ port_matrix[j].hw_chan[1] == i) {
+ used = true;
+ break;
+ }
+ }
+
+ if (!used)
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+/* Sort the port matrix to following particular hardware ports matrix
+ * specification of the tps23881. The device has two 4-ports groups and
+ * each 4-pair powered device has to be configured to use two consecutive
+ * logical channel in each 4 ports group (1 and 2 or 3 and 4). Also the
+ * hardware matrix has to be fully configured even with unused chan to be
+ * valid.
+ */
+static int
+tps23881_sort_port_matrix(struct tps23881_port_matrix port_matrix[TPS23881_MAX_CHANS])
+{
+ struct tps23881_port_matrix tmp_port_matrix[TPS23881_MAX_CHANS] = {0};
+ int i, ret, port_cnt = 0, cnt_4ch_grp1 = 0, cnt_4ch_grp2 = 4;
+
+ /* Configure 4p port matrix */
+ for (i = 0; i < TPS23881_MAX_CHANS; i++) {
+ int *cnt;
+
+ if (!port_matrix[i].exist || !port_matrix[i].is_4p)
+ continue;
+
+ if (port_matrix[i].hw_chan[0] < 4)
+ cnt = &cnt_4ch_grp1;
+ else
+ cnt = &cnt_4ch_grp2;
+
+ tmp_port_matrix[port_cnt].exist = true;
+ tmp_port_matrix[port_cnt].is_4p = true;
+ tmp_port_matrix[port_cnt].pi_id = i;
+ tmp_port_matrix[port_cnt].hw_chan[0] = port_matrix[i].hw_chan[0];
+ tmp_port_matrix[port_cnt].hw_chan[1] = port_matrix[i].hw_chan[1];
+
+ /* 4-pair ports have to be configured with consecutive
+ * logical channels 0 and 1, 2 and 3.
+ */
+ tmp_port_matrix[port_cnt].lgcl_chan[0] = (*cnt)++;
+ tmp_port_matrix[port_cnt].lgcl_chan[1] = (*cnt)++;
+
+ port_cnt++;
+ }
+
+ /* Configure 2p port matrix */
+ for (i = 0; i < TPS23881_MAX_CHANS; i++) {
+ int *cnt;
+
+ if (!port_matrix[i].exist || port_matrix[i].is_4p)
+ continue;
+
+ if (port_matrix[i].hw_chan[0] < 4)
+ cnt = &cnt_4ch_grp1;
+ else
+ cnt = &cnt_4ch_grp2;
+
+ tmp_port_matrix[port_cnt].exist = true;
+ tmp_port_matrix[port_cnt].pi_id = i;
+ tmp_port_matrix[port_cnt].lgcl_chan[0] = (*cnt)++;
+ tmp_port_matrix[port_cnt].hw_chan[0] = port_matrix[i].hw_chan[0];
+
+ port_cnt++;
+ }
+
+ /* Complete the rest of the first 4 port group matrix even if
+ * channels are unused
+ */
+ while (cnt_4ch_grp1 < 4) {
+ ret = tps23881_get_unused_chan(tmp_port_matrix, port_cnt);
+ if (ret < 0) {
+ pr_err("tps23881: port matrix issue, no chan available\n");
+ return ret;
+ }
+
+ if (port_cnt >= TPS23881_MAX_CHANS) {
+ pr_err("tps23881: wrong number of channels\n");
+ return -ENODEV;
+ }
+ tmp_port_matrix[port_cnt].lgcl_chan[0] = cnt_4ch_grp1;
+ tmp_port_matrix[port_cnt].hw_chan[0] = ret;
+ cnt_4ch_grp1++;
+ port_cnt++;
+ }
+
+ /* Complete the rest of the second 4 port group matrix even if
+ * channels are unused
+ */
+ while (cnt_4ch_grp2 < 8) {
+ ret = tps23881_get_unused_chan(tmp_port_matrix, port_cnt);
+ if (ret < 0) {
+ pr_err("tps23881: port matrix issue, no chan available\n");
+ return -ENODEV;
+ }
+
+ if (port_cnt >= TPS23881_MAX_CHANS) {
+ pr_err("tps23881: wrong number of channels\n");
+ return -ENODEV;
+ }
+ tmp_port_matrix[port_cnt].lgcl_chan[0] = cnt_4ch_grp2;
+ tmp_port_matrix[port_cnt].hw_chan[0] = ret;
+ cnt_4ch_grp2++;
+ port_cnt++;
+ }
+
+ memcpy(port_matrix, tmp_port_matrix, sizeof(tmp_port_matrix));
+
+ return port_cnt;
+}
+
+/* Write port matrix to the hardware port matrix and the software port
+ * matrix.
+ */
+static int
+tps23881_write_port_matrix(struct tps23881_priv *priv,
+ struct tps23881_port_matrix port_matrix[TPS23881_MAX_CHANS],
+ int port_cnt)
+{
+ struct i2c_client *client = priv->client;
+ u8 pi_id, lgcl_chan, hw_chan;
+ u16 val = 0;
+ int i, ret;
+
+ for (i = 0; i < port_cnt; i++) {
+ pi_id = port_matrix[i].pi_id;
+ lgcl_chan = port_matrix[i].lgcl_chan[0];
+ hw_chan = port_matrix[i].hw_chan[0] % 4;
+
+ /* Set software port matrix for existing ports */
+ if (port_matrix[i].exist)
+ priv->port[pi_id].chan[0] = lgcl_chan;
+
+ /* Set hardware port matrix for all ports */
+ val |= hw_chan << (lgcl_chan * 2);
+
+ if (!port_matrix[i].is_4p)
+ continue;
+
+ lgcl_chan = port_matrix[i].lgcl_chan[1];
+ hw_chan = port_matrix[i].hw_chan[1] % 4;
+
+ /* Set software port matrix for existing ports */
+ if (port_matrix[i].exist) {
+ priv->port[pi_id].is_4p = true;
+ priv->port[pi_id].chan[1] = lgcl_chan;
+ }
+
+ /* Set hardware port matrix for all ports */
+ val |= hw_chan << (lgcl_chan * 2);
+ }
+
+ /* Write hardware ports matrix */
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_PORT_MAP, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+tps23881_set_ports_conf(struct tps23881_priv *priv,
+ struct tps23881_port_matrix port_matrix[TPS23881_MAX_CHANS])
+{
+ struct i2c_client *client = priv->client;
+ int i, ret;
+ u16 val;
+
+ /* Set operating mode */
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_OP_MODE,
+ TPS23881_OP_MODE_SEMIAUTO);
+ if (ret)
+ return ret;
+
+ /* Disable DC disconnect */
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_DIS_EN, 0x0);
+ if (ret)
+ return ret;
+
+ /* Set port power allocation */
+ val = 0;
+ for (i = 0; i < TPS23881_MAX_CHANS; i++) {
+ if (!port_matrix[i].exist)
+ continue;
+
+ if (port_matrix[i].is_4p)
+ val |= 0xf << ((port_matrix[i].lgcl_chan[0] / 2) * 4);
+ else
+ val |= 0x3 << ((port_matrix[i].lgcl_chan[0] / 2) * 4);
+ }
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_PORT_POWER, val);
+ if (ret)
+ return ret;
+
+ /* Enable detection and classification */
+ val = 0;
+ for (i = 0; i < TPS23881_MAX_CHANS; i++) {
+ if (!port_matrix[i].exist)
+ continue;
+
+ val |= BIT(port_matrix[i].lgcl_chan[0]) |
+ BIT(port_matrix[i].lgcl_chan[0] + 4);
+ if (port_matrix[i].is_4p)
+ val |= BIT(port_matrix[i].lgcl_chan[1]) |
+ BIT(port_matrix[i].lgcl_chan[1] + 4);
+ }
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_DET_CLA_EN, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+tps23881_set_ports_matrix(struct tps23881_priv *priv,
+ struct device_node *chan_node[TPS23881_MAX_CHANS])
+{
+ struct tps23881_port_matrix port_matrix[TPS23881_MAX_CHANS] = {0};
+ int i, ret;
+
+ /* Update with values for every PSE PIs */
+ for (i = 0; i < TPS23881_MAX_CHANS; i++) {
+ ret = tps23881_match_port_matrix(&priv->pcdev.pi[i], i,
+ chan_node, port_matrix);
+ if (ret)
+ return ret;
+ }
+
+ ret = tps23881_sort_port_matrix(port_matrix);
+ if (ret < 0)
+ return ret;
+
+ ret = tps23881_write_port_matrix(priv, port_matrix, ret);
+ if (ret)
+ return ret;
+
+ ret = tps23881_set_ports_conf(priv, port_matrix);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tps23881_setup_pi_matrix(struct pse_controller_dev *pcdev)
+{
+ struct device_node *chan_node[TPS23881_MAX_CHANS] = {NULL};
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ int ret, i;
+
+ ret = tps23881_get_of_channels(priv, chan_node);
+ if (ret < 0) {
+ dev_warn(&priv->client->dev,
+ "Unable to parse port-matrix, default matrix will be used\n");
+ return 0;
+ }
+
+ ret = tps23881_set_ports_matrix(priv, chan_node);
+
+ for (i = 0; i < TPS23881_MAX_CHANS; i++)
+ of_node_put(chan_node[i]);
+
+ return ret;
+}
+
+static const struct pse_controller_ops tps23881_ops = {
+ .setup_pi_matrix = tps23881_setup_pi_matrix,
+ .pi_enable = tps23881_pi_enable,
+ .pi_disable = tps23881_pi_disable,
+ .pi_is_enabled = tps23881_pi_is_enabled,
+ .ethtool_get_status = tps23881_ethtool_get_status,
+};
+
+static const char fw_parity_name[] = "ti/tps23881/tps23881-parity-14.bin";
+static const char fw_sram_name[] = "ti/tps23881/tps23881-sram-14.bin";
+
+struct tps23881_fw_conf {
+ u8 reg;
+ u8 val;
+};
+
+static const struct tps23881_fw_conf tps23881_fw_parity_conf[] = {
+ {.reg = 0x60, .val = 0x01},
+ {.reg = 0x62, .val = 0x00},
+ {.reg = 0x63, .val = 0x80},
+ {.reg = 0x60, .val = 0xC4},
+ {.reg = 0x1D, .val = 0xBC},
+ {.reg = 0xD7, .val = 0x02},
+ {.reg = 0x91, .val = 0x00},
+ {.reg = 0x90, .val = 0x00},
+ {.reg = 0xD7, .val = 0x00},
+ {.reg = 0x1D, .val = 0x00},
+ { /* sentinel */ }
+};
+
+static const struct tps23881_fw_conf tps23881_fw_sram_conf[] = {
+ {.reg = 0x60, .val = 0xC5},
+ {.reg = 0x62, .val = 0x00},
+ {.reg = 0x63, .val = 0x80},
+ {.reg = 0x60, .val = 0xC0},
+ {.reg = 0x1D, .val = 0xBC},
+ {.reg = 0xD7, .val = 0x02},
+ {.reg = 0x91, .val = 0x00},
+ {.reg = 0x90, .val = 0x00},
+ {.reg = 0xD7, .val = 0x00},
+ {.reg = 0x1D, .val = 0x00},
+ { /* sentinel */ }
+};
+
+static int tps23881_flash_sram_fw_part(struct i2c_client *client,
+ const char *fw_name,
+ const struct tps23881_fw_conf *fw_conf)
+{
+ const struct firmware *fw = NULL;
+ int i, ret;
+
+ ret = request_firmware(&fw, fw_name, &client->dev);
+ if (ret)
+ return ret;
+
+ dev_dbg(&client->dev, "Flashing %s\n", fw_name);
+
+ /* Prepare device for RAM download */
+ while (fw_conf->reg) {
+ ret = i2c_smbus_write_byte_data(client, fw_conf->reg,
+ fw_conf->val);
+ if (ret)
+ goto out;
+
+ fw_conf++;
+ }
+
+ /* Flash the firmware file */
+ for (i = 0; i < fw->size; i++) {
+ ret = i2c_smbus_write_byte_data(client,
+ TPS23881_REG_SRAM_DATA,
+ fw->data[i]);
+ if (ret)
+ goto out;
+ }
+
+out:
+ release_firmware(fw);
+ return ret;
+}
+
+static int tps23881_flash_sram_fw(struct i2c_client *client)
+{
+ int ret;
+
+ ret = tps23881_flash_sram_fw_part(client, fw_parity_name,
+ tps23881_fw_parity_conf);
+ if (ret)
+ return ret;
+
+ ret = tps23881_flash_sram_fw_part(client, fw_sram_name,
+ tps23881_fw_sram_conf);
+ if (ret)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, TPS23881_REG_SRAM_CTRL, 0x18);
+ if (ret)
+ return ret;
+
+ mdelay(12);
+
+ return 0;
+}
+
+static int tps23881_i2c_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct tps23881_priv *priv;
+ int ret;
+ u8 val;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(dev, "i2c check functionality failed\n");
+ return -ENXIO;
+ }
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = i2c_smbus_read_byte_data(client, TPS23881_REG_DEVID);
+ if (ret < 0)
+ return ret;
+
+ if (ret != 0x22) {
+ dev_err(dev, "Wrong device ID\n");
+ return -ENXIO;
+ }
+
+ ret = tps23881_flash_sram_fw(client);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_byte_data(client, TPS23881_REG_FWREV);
+ if (ret < 0)
+ return ret;
+
+ dev_info(&client->dev, "Firmware revision 0x%x\n", ret);
+
+ /* Set configuration B, 16 bit access on a single device address */
+ ret = i2c_smbus_read_byte_data(client, TPS23881_REG_GEN_MASK);
+ if (ret < 0)
+ return ret;
+
+ val = ret | TPS23881_REG_NBITACC;
+ ret = i2c_smbus_write_byte_data(client, TPS23881_REG_GEN_MASK, val);
+ if (ret)
+ return ret;
+
+ priv->client = client;
+ i2c_set_clientdata(client, priv);
+ priv->np = dev->of_node;
+
+ priv->pcdev.owner = THIS_MODULE;
+ priv->pcdev.ops = &tps23881_ops;
+ priv->pcdev.dev = dev;
+ priv->pcdev.types = ETHTOOL_PSE_C33;
+ priv->pcdev.nr_lines = TPS23881_MAX_CHANS;
+ ret = devm_pse_controller_register(dev, &priv->pcdev);
+ if (ret) {
+ return dev_err_probe(dev, ret,
+ "failed to register PSE controller\n");
+ }
+
+ return ret;
+}
+
+static const struct i2c_device_id tps23881_id[] = {
+ { "tps23881", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, tps23881_id);
+
+static const struct of_device_id tps23881_of_match[] = {
+ { .compatible = "ti,tps23881", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tps23881_of_match);
+
+static struct i2c_driver tps23881_driver = {
+ .probe = tps23881_i2c_probe,
+ .id_table = tps23881_id,
+ .driver = {
+ .name = "tps23881",
+ .of_match_table = tps23881_of_match,
+ },
+};
+module_i2c_driver(tps23881_driver);
+
+MODULE_AUTHOR("Kory Maincent <kory.maincent@bootlin.com>");
+MODULE_DESCRIPTION("TI TPS23881 PoE PSE Controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 0aba3569ccc0..fb362ee248ff 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -286,7 +286,7 @@ static int sl_realloc_bufs(struct slip *sl, int mtu)
}
}
sl->mtu = mtu;
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
sl->buffsize = len;
err = 0;
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 9f0495e8df4d..bfdd3875fe86 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -754,7 +754,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
skb_zcopy_init(skb, msg_control);
} else if (msg_control) {
struct ubuf_info *uarg = msg_control;
- uarg->callback(NULL, uarg, false);
+ uarg->ops->complete(NULL, uarg, false);
}
dev_queue_xmit(skb);
diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile
index f582d81a5091..7a5aa20d286b 100644
--- a/drivers/net/team/Makefile
+++ b/drivers/net/team/Makefile
@@ -3,6 +3,7 @@
# Makefile for the network team driver
#
+team-y:= team_core.o team_nl.o
obj-$(CONFIG_NET_TEAM) += team.o
obj-$(CONFIG_NET_TEAM_MODE_BROADCAST) += team_mode_broadcast.o
obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o
diff --git a/drivers/net/team/team.c b/drivers/net/team/team_core.c
index 0a44bbdcfb7b..ab1935a4aa2c 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team_core.c
@@ -27,6 +27,8 @@
#include <net/sch_generic.h>
#include <linux/if_team.h>
+#include "team_nl.h"
+
#define DRV_NAME "team"
@@ -1829,7 +1831,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
team->port_mtu_change_allowed = false;
mutex_unlock(&team->lock);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
@@ -2254,28 +2256,7 @@ static struct rtnl_link_ops team_link_ops __read_mostly = {
static struct genl_family team_nl_family;
-static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
- [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, },
- [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 },
- [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED },
- [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED },
-};
-
-static const struct nla_policy
-team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
- [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, },
- [TEAM_ATTR_OPTION_NAME] = {
- .type = NLA_STRING,
- .len = TEAM_STRING_MAX_LEN,
- },
- [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
- [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
- [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY },
- [TEAM_ATTR_OPTION_PORT_IFINDEX] = { .type = NLA_U32 },
- [TEAM_ATTR_OPTION_ARRAY_INDEX] = { .type = NLA_U32 },
-};
-
-static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
+int team_nl_noop_doit(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *msg;
void *hdr;
@@ -2513,7 +2494,7 @@ errout:
return err;
}
-static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
+int team_nl_options_get_doit(struct sk_buff *skb, struct genl_info *info)
{
struct team *team;
struct team_option_inst *opt_inst;
@@ -2538,7 +2519,7 @@ static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
static int team_nl_send_event_options_get(struct team *team,
struct list_head *sel_opt_inst_list);
-static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
+int team_nl_options_set_doit(struct sk_buff *skb, struct genl_info *info)
{
struct team *team;
int err = 0;
@@ -2579,7 +2560,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
err = nla_parse_nested_deprecated(opt_attrs,
TEAM_ATTR_OPTION_MAX,
nl_option,
- team_nl_option_policy,
+ team_attr_option_nl_policy,
info->extack);
if (err)
goto team_put;
@@ -2802,8 +2783,8 @@ errout:
return err;
}
-static int team_nl_cmd_port_list_get(struct sk_buff *skb,
- struct genl_info *info)
+int team_nl_port_list_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
{
struct team *team;
int err;
@@ -2820,32 +2801,6 @@ static int team_nl_cmd_port_list_get(struct sk_buff *skb,
return err;
}
-static const struct genl_small_ops team_nl_ops[] = {
- {
- .cmd = TEAM_CMD_NOOP,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = team_nl_cmd_noop,
- },
- {
- .cmd = TEAM_CMD_OPTIONS_SET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = team_nl_cmd_options_set,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = TEAM_CMD_OPTIONS_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = team_nl_cmd_options_get,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = TEAM_CMD_PORT_LIST_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = team_nl_cmd_port_list_get,
- .flags = GENL_ADMIN_PERM,
- },
-};
-
static const struct genl_multicast_group team_nl_mcgrps[] = {
{ .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, },
};
@@ -2853,7 +2808,7 @@ static const struct genl_multicast_group team_nl_mcgrps[] = {
static struct genl_family team_nl_family __ro_after_init = {
.name = TEAM_GENL_NAME,
.version = TEAM_GENL_VERSION,
- .maxattr = TEAM_ATTR_MAX,
+ .maxattr = ARRAY_SIZE(team_nl_policy) - 1,
.policy = team_nl_policy,
.netnsok = true,
.module = THIS_MODULE,
diff --git a/drivers/net/team/team_nl.c b/drivers/net/team/team_nl.c
new file mode 100644
index 000000000000..208424ab78f5
--- /dev/null
+++ b/drivers/net/team/team_nl.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/team.yaml */
+/* YNL-GEN kernel source */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "team_nl.h"
+
+#include <uapi/linux/if_team.h>
+
+/* Common nested types */
+const struct nla_policy team_attr_option_nl_policy[TEAM_ATTR_OPTION_ARRAY_INDEX + 1] = {
+ [TEAM_ATTR_OPTION_NAME] = { .type = NLA_STRING, .len = TEAM_STRING_MAX_LEN, },
+ [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG, },
+ [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8, },
+ [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY, },
+ [TEAM_ATTR_OPTION_REMOVED] = { .type = NLA_FLAG, },
+ [TEAM_ATTR_OPTION_PORT_IFINDEX] = { .type = NLA_U32, },
+ [TEAM_ATTR_OPTION_ARRAY_INDEX] = { .type = NLA_U32, },
+};
+
+const struct nla_policy team_item_option_nl_policy[TEAM_ATTR_ITEM_OPTION + 1] = {
+ [TEAM_ATTR_ITEM_OPTION] = NLA_POLICY_NESTED(team_attr_option_nl_policy),
+};
+
+/* Global operation policy for team */
+const struct nla_policy team_nl_policy[TEAM_ATTR_LIST_OPTION + 1] = {
+ [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32, },
+ [TEAM_ATTR_LIST_OPTION] = NLA_POLICY_NESTED(team_item_option_nl_policy),
+};
+
+/* Ops table for team */
+const struct genl_small_ops team_nl_ops[4] = {
+ {
+ .cmd = TEAM_CMD_NOOP,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .doit = team_nl_noop_doit,
+ },
+ {
+ .cmd = TEAM_CMD_OPTIONS_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .doit = team_nl_options_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TEAM_CMD_OPTIONS_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .doit = team_nl_options_get_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TEAM_CMD_PORT_LIST_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .doit = team_nl_port_list_get_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+};
diff --git a/drivers/net/team/team_nl.h b/drivers/net/team/team_nl.h
new file mode 100644
index 000000000000..c9ec1b22ac4d
--- /dev/null
+++ b/drivers/net/team/team_nl.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/team.yaml */
+/* YNL-GEN kernel header */
+
+#ifndef _LINUX_TEAM_GEN_H
+#define _LINUX_TEAM_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/if_team.h>
+
+/* Common nested types */
+extern const struct nla_policy team_attr_option_nl_policy[TEAM_ATTR_OPTION_ARRAY_INDEX + 1];
+extern const struct nla_policy team_item_option_nl_policy[TEAM_ATTR_ITEM_OPTION + 1];
+
+/* Global operation policy for team */
+extern const struct nla_policy team_nl_policy[TEAM_ATTR_LIST_OPTION + 1];
+
+/* Ops table for team */
+extern const struct genl_small_ops team_nl_ops[4];
+
+int team_nl_noop_doit(struct sk_buff *skb, struct genl_info *info);
+int team_nl_options_set_doit(struct sk_buff *skb, struct genl_info *info);
+int team_nl_options_get_doit(struct sk_buff *skb, struct genl_info *info);
+int team_nl_port_list_get_doit(struct sk_buff *skb, struct genl_info *info);
+
+#endif /* _LINUX_TEAM_GEN_H */
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 0b3f21cba552..9254bca2813d 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1906,7 +1906,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb_zcopy_init(skb, msg_control);
} else if (msg_control) {
struct ubuf_info *uarg = msg_control;
- uarg->callback(NULL, uarg, false);
+ uarg->ops->complete(NULL, uarg, false);
}
skb_reset_network_header(skb);
@@ -2125,14 +2125,16 @@ static ssize_t tun_put_user(struct tun_struct *tun,
tun_is_little_endian(tun), true,
vlan_hlen)) {
struct skb_shared_info *sinfo = skb_shinfo(skb);
- pr_err("unexpected GSO type: "
- "0x%x, gso_size %d, hdr_len %d\n",
- sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
- tun16_to_cpu(tun, gso.hdr_len));
- print_hex_dump(KERN_ERR, "tun: ",
- DUMP_PREFIX_NONE,
- 16, 1, skb->head,
- min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
+
+ if (net_ratelimit()) {
+ netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
+ sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
+ tun16_to_cpu(tun, gso.hdr_len));
+ print_hex_dump(KERN_ERR, "tun: ",
+ DUMP_PREFIX_NONE,
+ 16, 1, skb->head,
+ min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
+ }
WARN_ON_ONCE(1);
return -EINVAL;
}
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index 7b8afa589a53..ff5be2cbf17b 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -424,7 +424,7 @@ static int aqc111_change_mtu(struct net_device *net, int new_mtu)
u16 reg16 = 0;
u8 buf[5];
- net->mtu = new_mtu;
+ WRITE_ONCE(net->mtu, new_mtu);
dev->hard_mtu = net->mtu + net->hard_header_len;
aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
@@ -1141,17 +1141,15 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
continue;
}
- /* Clone SKB */
- new_skb = skb_clone(skb, GFP_ATOMIC);
+ new_skb = netdev_alloc_skb_ip_align(dev->net, pkt_len);
if (!new_skb)
goto err;
- new_skb->len = pkt_len;
+ skb_put(new_skb, pkt_len);
+ memcpy(new_skb->data, skb->data, pkt_len);
skb_pull(new_skb, AQ_RX_HW_PAD);
- skb_set_tail_pointer(new_skb, new_skb->len);
- new_skb->truesize = SKB_TRUESIZE(new_skb->len);
if (aqc111_data->rx_checksum)
aqc111_rx_checksum(new_skb, pkt_desc);
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index f7cff58fe044..57d6e5abc30e 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -1230,7 +1230,7 @@ static int ax88178_change_mtu(struct net_device *net, int new_mtu)
if ((ll_mtu % dev->maxpacket) == 0)
return -EDOM;
- net->mtu = new_mtu;
+ WRITE_ONCE(net->mtu, new_mtu);
dev->hard_mtu = net->mtu + net->hard_header_len;
ax88178_set_mfb(dev);
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 88e084534853..51c295e1e823 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -174,6 +174,7 @@ struct ax88179_data {
u32 wol_supported;
u32 wolopts;
u8 disconnecting;
+ u8 initialized;
};
struct ax88179_int_data {
@@ -943,7 +944,7 @@ static int ax88179_change_mtu(struct net_device *net, int new_mtu)
struct usbnet *dev = netdev_priv(net);
u16 tmp16;
- net->mtu = new_mtu;
+ WRITE_ONCE(net->mtu, new_mtu);
dev->hard_mtu = net->mtu + net->hard_header_len;
if (net->mtu > 1500) {
@@ -1273,9 +1274,10 @@ static void ax88179_get_mac_addr(struct usbnet *dev)
if (is_valid_ether_addr(mac)) {
eth_hw_addr_set(dev->net, mac);
+ if (!is_local_ether_addr(mac))
+ dev->net->addr_assign_type = NET_ADDR_PERM;
} else {
netdev_info(dev->net, "invalid MAC address, using random\n");
- eth_hw_addr_random(dev->net);
}
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, ETH_ALEN,
@@ -1285,8 +1287,11 @@ static void ax88179_get_mac_addr(struct usbnet *dev)
static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
{
struct ax88179_data *ax179_data;
+ int ret;
- usbnet_get_endpoints(dev, intf);
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret < 0)
+ return ret;
ax179_data = kzalloc(sizeof(*ax179_data), GFP_KERNEL);
if (!ax179_data)
@@ -1315,6 +1320,8 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
netif_set_tso_max_size(dev->net, 16384);
+ ax88179_reset(dev);
+
return 0;
}
@@ -1452,21 +1459,16 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* Skip IP alignment pseudo header */
skb_pull(skb, 2);
- skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd);
ax88179_rx_checksum(skb, pkt_hdr);
return 1;
}
- ax_skb = skb_clone(skb, GFP_ATOMIC);
+ ax_skb = netdev_alloc_skb_ip_align(dev->net, pkt_len);
if (!ax_skb)
return 0;
- skb_trim(ax_skb, pkt_len);
-
- /* Skip IP alignment pseudo header */
- skb_pull(ax_skb, 2);
+ skb_put(ax_skb, pkt_len);
+ memcpy(ax_skb->data, skb->data + 2, pkt_len);
- skb->truesize = pkt_len_plus_padd +
- SKB_DATA_ALIGN(sizeof(struct sk_buff));
ax88179_rx_checksum(ax_skb, pkt_hdr);
usbnet_skb_return(dev, ax_skb);
@@ -1674,6 +1676,18 @@ static int ax88179_reset(struct usbnet *dev)
return 0;
}
+static int ax88179_net_reset(struct usbnet *dev)
+{
+ struct ax88179_data *ax179_data = dev->driver_priv;
+
+ if (ax179_data->initialized)
+ ax88179_reset(dev);
+ else
+ ax179_data->initialized = 1;
+
+ return 0;
+}
+
static int ax88179_stop(struct usbnet *dev)
{
u16 tmp16;
@@ -1693,7 +1707,7 @@ static const struct driver_info ax88179_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1706,7 +1720,7 @@ static const struct driver_info ax88178a_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1719,7 +1733,7 @@ static const struct driver_info cypress_GX3_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1732,7 +1746,7 @@ static const struct driver_info dlink_dub1312_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1745,7 +1759,7 @@ static const struct driver_info sitecom_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1758,7 +1772,7 @@ static const struct driver_info samsung_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1771,7 +1785,7 @@ static const struct driver_info lenovo_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1784,7 +1798,7 @@ static const struct driver_info belkin_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1797,7 +1811,7 @@ static const struct driver_info toshiba_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1810,7 +1824,7 @@ static const struct driver_info mct_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1823,7 +1837,7 @@ static const struct driver_info at_umc2000_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1836,7 +1850,7 @@ static const struct driver_info at_umc200_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1849,7 +1863,7 @@ static const struct driver_info at_umc2000sp_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index db05622f1f70..bf76ecccc2e6 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -798,7 +798,7 @@ int cdc_ncm_change_mtu(struct net_device *net, int new_mtu)
{
struct usbnet *dev = netdev_priv(net);
- net->mtu = new_mtu;
+ WRITE_ONCE(net->mtu, new_mtu);
cdc_ncm_set_dgram_size(dev, new_mtu + cdc_ncm_eth_hlen(dev));
return 0;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 80ee4fcdfb36..5a2c38b63012 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1692,15 +1692,10 @@ static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata)
ret = lan78xx_read_reg(dev, MAC_CR, &buf);
if (buf & MAC_CR_EEE_EN_) {
- edata->eee_enabled = true;
- edata->tx_lpi_enabled = true;
/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
edata->tx_lpi_timer = buf;
} else {
- edata->eee_enabled = false;
- edata->eee_active = false;
- edata->tx_lpi_enabled = false;
edata->tx_lpi_timer = 0;
}
@@ -1721,24 +1716,16 @@ static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata)
if (ret < 0)
return ret;
- if (edata->eee_enabled) {
- ret = lan78xx_read_reg(dev, MAC_CR, &buf);
- buf |= MAC_CR_EEE_EN_;
- ret = lan78xx_write_reg(dev, MAC_CR, buf);
-
- phy_ethtool_set_eee(net->phydev, edata);
-
- buf = (u32)edata->tx_lpi_timer;
- ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
- } else {
- ret = lan78xx_read_reg(dev, MAC_CR, &buf);
- buf &= ~MAC_CR_EEE_EN_;
- ret = lan78xx_write_reg(dev, MAC_CR, buf);
- }
+ ret = phy_ethtool_set_eee(net->phydev, edata);
+ if (ret < 0)
+ goto out;
+ buf = (u32)edata->tx_lpi_timer;
+ ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
+out:
usb_autopm_put_interface(dev->intf);
- return 0;
+ return ret;
}
static u32 lan78xx_get_link(struct net_device *net)
@@ -2114,7 +2101,20 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev)
static void lan78xx_link_status_change(struct net_device *net)
{
+ struct lan78xx_net *dev = netdev_priv(net);
struct phy_device *phydev = net->phydev;
+ u32 data;
+ int ret;
+
+ ret = lan78xx_read_reg(dev, MAC_CR, &data);
+ if (ret < 0)
+ return;
+
+ if (phydev->enable_tx_lpi)
+ data |= MAC_CR_EEE_EN_;
+ else
+ data &= ~MAC_CR_EEE_EN_;
+ lan78xx_write_reg(dev, MAC_CR, data);
phy_print_status(phydev);
}
@@ -2408,6 +2408,8 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
mii_adv_to_linkmode_adv_t(fc, mii_adv);
linkmode_or(phydev->advertising, fc, phydev->advertising);
+ phy_support_eee(phydev);
+
if (phydev->mdio.dev.of_node) {
u32 reg;
int len;
@@ -2526,7 +2528,7 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
if (!ret)
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
usb_autopm_put_interface(dev->intf);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index e2e181378f41..663e46348ce3 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -121,7 +121,6 @@ static const struct net_device_ops qmimux_netdev_ops = {
.ndo_open = qmimux_open,
.ndo_stop = qmimux_stop,
.ndo_start_xmit = qmimux_start_xmit,
- .ndo_get_stats64 = dev_get_tstats64,
};
static void qmimux_setup(struct net_device *dev)
@@ -133,6 +132,7 @@ static void qmimux_setup(struct net_device *dev)
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
dev->netdev_ops = &qmimux_netdev_ops;
dev->mtu = 1500;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
dev->needs_free_netdev = true;
}
@@ -257,12 +257,6 @@ static int qmimux_register_device(struct net_device *real_dev, u8 mux_id)
priv->mux_id = mux_id;
priv->real_dev = real_dev;
- new_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!new_dev->tstats) {
- err = -ENOBUFS;
- goto out_free_newdev;
- }
-
new_dev->sysfs_groups[0] = &qmi_wwan_sysfs_qmimux_attr_group;
err = register_netdevice(new_dev);
@@ -295,7 +289,6 @@ static void qmimux_unregister_device(struct net_device *dev,
struct qmimux_priv *priv = netdev_priv(dev);
struct net_device *real_dev = priv->real_dev;
- free_percpu(dev->tstats);
netdev_upper_dev_unlink(real_dev, dev);
unregister_netdevice_queue(dev, head);
@@ -644,7 +637,6 @@ static const struct net_device_ops qmi_wwan_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = qmi_wwan_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -1368,6 +1360,9 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a9, 0)}, /* Telit FN920C04 */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
@@ -1380,6 +1375,8 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
{QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
+ {QMI_QUIRK_SET_DTR(0x1c9e, 0x9b05, 4)}, /* Longsung U8300 */
+ {QMI_QUIRK_SET_DTR(0x1c9e, 0x9b3c, 4)}, /* Longsung U9300 */
{QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
{QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
{QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */
@@ -1431,6 +1428,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
{QMI_QUIRK_SET_DTR(0x1546, 0x1312, 4)}, /* u-blox LARA-R6 01B */
{QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)}, /* u-blox LARA-L6 */
+ {QMI_QUIRK_SET_DTR(0x33f8, 0x0104, 4)}, /* Rolling RW101 RMNET */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 5d6aeb086fc7..19df1cd9f072 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -5274,7 +5274,7 @@ post_fw:
rtl_reset_ocp_base(tp);
strscpy(rtl_fw->version, fw_hdr->version, RTL_VER_SIZE);
- dev_info(&tp->intf->dev, "load %s successfully\n", rtl_fw->version);
+ dev_dbg(&tp->intf->dev, "load %s successfully\n", rtl_fw->version);
}
static void rtl8152_release_firmware(struct r8152 *tp)
@@ -9365,7 +9365,7 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu)
case RTL_VER_01:
case RTL_VER_02:
case RTL_VER_07:
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
default:
break;
@@ -9377,7 +9377,7 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu)
mutex_lock(&tp->control);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (netif_running(dev)) {
if (tp->rtl_ops.change_mtu)
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 78ad2da3ee29..0726e18bee6f 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -2234,27 +2234,23 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
rx_cmd_b);
skb_trim(skb, skb->len - 4); /* remove fcs */
- skb->truesize = size + sizeof(struct sk_buff);
return 1;
}
- ax_skb = skb_clone(skb, GFP_ATOMIC);
+ /* Use "size - 4" to remove fcs */
+ ax_skb = netdev_alloc_skb_ip_align(dev->net, size - 4);
if (unlikely(!ax_skb)) {
netdev_warn(dev->net, "Error allocating skb\n");
return 0;
}
- ax_skb->len = size;
- ax_skb->data = packet;
- skb_set_tail_pointer(ax_skb, size);
+ skb_put(ax_skb, size - 4);
+ memcpy(ax_skb->data, packet, size - 4);
smsc75xx_rx_csum_offload(dev, ax_skb, rx_cmd_a,
rx_cmd_b);
- skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */
- ax_skb->truesize = size + sizeof(struct sk_buff);
-
usbnet_skb_return(dev, ax_skb);
}
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 2fa46baa589e..cbea24666479 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1810,9 +1810,11 @@ static int smsc95xx_reset_resume(struct usb_interface *intf)
static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
{
- skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2);
+ u16 *csum_ptr = (u16 *)(skb_tail_pointer(skb) - 2);
+
+ skb->csum = (__force __wsum)get_unaligned(csum_ptr);
skb->ip_summed = CHECKSUM_COMPLETE;
- skb_trim(skb, skb->len - 2);
+ skb_trim(skb, skb->len - 2); /* remove csum */
}
static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
@@ -1870,25 +1872,22 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (dev->net->features & NETIF_F_RXCSUM)
smsc95xx_rx_csum_offload(skb);
skb_trim(skb, skb->len - 4); /* remove fcs */
- skb->truesize = size + sizeof(struct sk_buff);
return 1;
}
- ax_skb = skb_clone(skb, GFP_ATOMIC);
+ ax_skb = netdev_alloc_skb_ip_align(dev->net, size);
if (unlikely(!ax_skb)) {
netdev_warn(dev->net, "Error allocating skb\n");
return 0;
}
- ax_skb->len = size;
- ax_skb->data = packet;
- skb_set_tail_pointer(ax_skb, size);
+ skb_put(ax_skb, size);
+ memcpy(ax_skb->data, packet, size);
if (dev->net->features & NETIF_F_RXCSUM)
smsc95xx_rx_csum_offload(ax_skb);
skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */
- ax_skb->truesize = size + sizeof(struct sk_buff);
usbnet_skb_return(dev, ax_skb);
}
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 3164451e1010..0a662e42ed96 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -421,19 +421,15 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb_pull(skb, 3);
skb->len = len;
skb_set_tail_pointer(skb, len);
- skb->truesize = len + sizeof(struct sk_buff);
return 2;
}
- /* skb_clone is used for address align */
- sr_skb = skb_clone(skb, GFP_ATOMIC);
+ sr_skb = netdev_alloc_skb_ip_align(dev->net, len);
if (!sr_skb)
return 0;
- sr_skb->len = len;
- sr_skb->data = skb->data + 3;
- skb_set_tail_pointer(sr_skb, len);
- sr_skb->truesize = len + sizeof(struct sk_buff);
+ skb_put(sr_skb, len);
+ memcpy(sr_skb->data, skb->data + 3, len);
usbnet_skb_return(dev, sr_skb);
skb_pull(skb, len + SR_RX_OVERHEAD);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index e84efa661589..9fd516e8bb10 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -398,7 +398,7 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
// no second zero-length packet read wanted after mtu-sized packets
if ((ll_mtu % dev->maxpacket) == 0)
return -EDOM;
- net->mtu = new_mtu;
+ WRITE_ONCE(net->mtu, new_mtu);
dev->hard_mtu = net->mtu + net->hard_header_len;
if (dev->rx_urb_size == old_hard_mtu) {
@@ -1733,6 +1733,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->hard_mtu = net->mtu + net->hard_header_len;
net->min_mtu = 0;
net->max_mtu = ETH_MAX_MTU;
+ net->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
net->netdev_ops = &usbnet_netdev_ops;
net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index bcdfbf61eb66..426e68a95067 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -26,6 +26,7 @@
#include <linux/ptr_ring.h>
#include <linux/bpf_trace.h>
#include <linux/net_tstamp.h>
+#include <linux/skbuff_ref.h>
#include <net/page_pool/helpers.h>
#define DRV_NAME "veth"
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c22d1118a133..4e1a0fc0d555 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -24,6 +24,7 @@
#include <net/xdp.h>
#include <net/net_failover.h>
#include <net/netdev_rx_queue.h>
+#include <net/netdev_queues.h>
static int napi_weight = NAPI_POLL_WEIGHT;
module_param(napi_weight, int, 0444);
@@ -78,6 +79,7 @@ static const unsigned long guest_offloads[] = {
struct virtnet_stat_desc {
char desc[ETH_GSTRING_LEN];
size_t offset;
+ size_t qstat_offset;
};
struct virtnet_sq_free_stats {
@@ -93,6 +95,8 @@ struct virtnet_sq_stats {
u64_stats_t xdp_tx_drops;
u64_stats_t kicks;
u64_stats_t tx_timeouts;
+ u64_stats_t stop;
+ u64_stats_t wake;
};
struct virtnet_rq_stats {
@@ -107,31 +111,159 @@ struct virtnet_rq_stats {
u64_stats_t kicks;
};
-#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
-#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m)
+#define VIRTNET_SQ_STAT(name, m) {name, offsetof(struct virtnet_sq_stats, m), -1}
+#define VIRTNET_RQ_STAT(name, m) {name, offsetof(struct virtnet_rq_stats, m), -1}
+
+#define VIRTNET_SQ_STAT_QSTAT(name, m) \
+ { \
+ name, \
+ offsetof(struct virtnet_sq_stats, m), \
+ offsetof(struct netdev_queue_stats_tx, m), \
+ }
+
+#define VIRTNET_RQ_STAT_QSTAT(name, m) \
+ { \
+ name, \
+ offsetof(struct virtnet_rq_stats, m), \
+ offsetof(struct netdev_queue_stats_rx, m), \
+ }
static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
- { "packets", VIRTNET_SQ_STAT(packets) },
- { "bytes", VIRTNET_SQ_STAT(bytes) },
- { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) },
- { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) },
- { "kicks", VIRTNET_SQ_STAT(kicks) },
- { "tx_timeouts", VIRTNET_SQ_STAT(tx_timeouts) },
+ VIRTNET_SQ_STAT("xdp_tx", xdp_tx),
+ VIRTNET_SQ_STAT("xdp_tx_drops", xdp_tx_drops),
+ VIRTNET_SQ_STAT("kicks", kicks),
+ VIRTNET_SQ_STAT("tx_timeouts", tx_timeouts),
};
static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
- { "packets", VIRTNET_RQ_STAT(packets) },
- { "bytes", VIRTNET_RQ_STAT(bytes) },
- { "drops", VIRTNET_RQ_STAT(drops) },
- { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) },
- { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) },
- { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) },
- { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) },
- { "kicks", VIRTNET_RQ_STAT(kicks) },
+ VIRTNET_RQ_STAT("drops", drops),
+ VIRTNET_RQ_STAT("xdp_packets", xdp_packets),
+ VIRTNET_RQ_STAT("xdp_tx", xdp_tx),
+ VIRTNET_RQ_STAT("xdp_redirects", xdp_redirects),
+ VIRTNET_RQ_STAT("xdp_drops", xdp_drops),
+ VIRTNET_RQ_STAT("kicks", kicks),
+};
+
+static const struct virtnet_stat_desc virtnet_sq_stats_desc_qstat[] = {
+ VIRTNET_SQ_STAT_QSTAT("packets", packets),
+ VIRTNET_SQ_STAT_QSTAT("bytes", bytes),
+ VIRTNET_SQ_STAT_QSTAT("stop", stop),
+ VIRTNET_SQ_STAT_QSTAT("wake", wake),
+};
+
+static const struct virtnet_stat_desc virtnet_rq_stats_desc_qstat[] = {
+ VIRTNET_RQ_STAT_QSTAT("packets", packets),
+ VIRTNET_RQ_STAT_QSTAT("bytes", bytes),
+};
+
+#define VIRTNET_STATS_DESC_CQ(name) \
+ {#name, offsetof(struct virtio_net_stats_cvq, name), -1}
+
+#define VIRTNET_STATS_DESC_RX(class, name) \
+ {#name, offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name), -1}
+
+#define VIRTNET_STATS_DESC_TX(class, name) \
+ {#name, offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name), -1}
+
+
+static const struct virtnet_stat_desc virtnet_stats_cvq_desc[] = {
+ VIRTNET_STATS_DESC_CQ(command_num),
+ VIRTNET_STATS_DESC_CQ(ok_num),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] = {
+ VIRTNET_STATS_DESC_RX(basic, packets),
+ VIRTNET_STATS_DESC_RX(basic, bytes),
+
+ VIRTNET_STATS_DESC_RX(basic, notifications),
+ VIRTNET_STATS_DESC_RX(basic, interrupts),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] = {
+ VIRTNET_STATS_DESC_TX(basic, packets),
+ VIRTNET_STATS_DESC_TX(basic, bytes),
+
+ VIRTNET_STATS_DESC_TX(basic, notifications),
+ VIRTNET_STATS_DESC_TX(basic, interrupts),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc[] = {
+ VIRTNET_STATS_DESC_RX(csum, needs_csum),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc[] = {
+ VIRTNET_STATS_DESC_TX(gso, gso_packets_noseg),
+ VIRTNET_STATS_DESC_TX(gso, gso_bytes_noseg),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc[] = {
+ VIRTNET_STATS_DESC_RX(speed, ratelimit_bytes),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = {
+ VIRTNET_STATS_DESC_TX(speed, ratelimit_bytes),
+};
+
+#define VIRTNET_STATS_DESC_RX_QSTAT(class, name, qstat_field) \
+ { \
+ #name, \
+ offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name), \
+ offsetof(struct netdev_queue_stats_rx, qstat_field), \
+ }
+
+#define VIRTNET_STATS_DESC_TX_QSTAT(class, name, qstat_field) \
+ { \
+ #name, \
+ offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name), \
+ offsetof(struct netdev_queue_stats_tx, qstat_field), \
+ }
+
+static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc_qstat[] = {
+ VIRTNET_STATS_DESC_RX_QSTAT(basic, drops, hw_drops),
+ VIRTNET_STATS_DESC_RX_QSTAT(basic, drop_overruns, hw_drop_overruns),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc_qstat[] = {
+ VIRTNET_STATS_DESC_TX_QSTAT(basic, drops, hw_drops),
+ VIRTNET_STATS_DESC_TX_QSTAT(basic, drop_malformed, hw_drop_errors),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc_qstat[] = {
+ VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_valid, csum_unnecessary),
+ VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_none, csum_none),
+ VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_bad, csum_bad),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_tx_csum_desc_qstat[] = {
+ VIRTNET_STATS_DESC_TX_QSTAT(csum, csum_none, csum_none),
+ VIRTNET_STATS_DESC_TX_QSTAT(csum, needs_csum, needs_csum),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc_qstat[] = {
+ VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_packets, hw_gro_packets),
+ VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_bytes, hw_gro_bytes),
+ VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_packets_coalesced, hw_gro_wire_packets),
+ VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_bytes_coalesced, hw_gro_wire_bytes),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc_qstat[] = {
+ VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_packets, hw_gso_packets),
+ VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_bytes, hw_gso_bytes),
+ VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_segments, hw_gso_wire_packets),
+ VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_segments_bytes, hw_gso_wire_bytes),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc_qstat[] = {
+ VIRTNET_STATS_DESC_RX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc_qstat[] = {
+ VIRTNET_STATS_DESC_TX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
};
-#define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc)
-#define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc)
+#define VIRTNET_Q_TYPE_RX 0
+#define VIRTNET_Q_TYPE_TX 1
+#define VIRTNET_Q_TYPE_CQ 2
struct virtnet_interrupt_coalesce {
u32 max_packets;
@@ -184,6 +316,9 @@ struct receive_queue {
/* Is dynamic interrupt moderation enabled? */
bool dim_enabled;
+ /* Used to protect dim_enabled and inter_coal */
+ struct mutex dim_lock;
+
/* Dynamic Interrupt Moderation */
struct dim dim;
@@ -213,9 +348,6 @@ struct receive_queue {
/* Record the last dma info to free after new pages is allocated. */
struct virtnet_rq_dma *last_dma;
-
- /* Do dma by self */
- bool do_dma;
};
/* This structure can contain rss message with maximum settings for indirection table and keysize
@@ -240,15 +372,6 @@ struct virtio_net_ctrl_rss {
struct control_buf {
struct virtio_net_ctrl_hdr hdr;
virtio_net_ctrl_ack status;
- struct virtio_net_ctrl_mq mq;
- u8 promisc;
- u8 allmulti;
- __virtio16 vid;
- __virtio64 offloads;
- struct virtio_net_ctrl_rss rss;
- struct virtio_net_ctrl_coal_tx coal_tx;
- struct virtio_net_ctrl_coal_rx coal_rx;
- struct virtio_net_ctrl_coal_vq coal_vq;
};
struct virtnet_info {
@@ -287,10 +410,14 @@ struct virtnet_info {
u16 rss_indir_table_size;
u32 rss_hash_types_supported;
u32 rss_hash_types_saved;
+ struct virtio_net_ctrl_rss rss;
/* Has control virtqueue */
bool has_cvq;
+ /* Lock to protect the control VQ */
+ struct mutex cvq_lock;
+
/* Host can handle any s/g split between our header and packet data */
bool any_header_sg;
@@ -340,6 +467,8 @@ struct virtnet_info {
/* failover when STANDBY feature enabled */
struct failover *failover;
+
+ u64 device_stats_cap;
};
struct padded_vnet_hdr {
@@ -425,6 +554,17 @@ static int rxq2vq(int rxq)
return rxq * 2;
}
+static int vq_type(struct virtnet_info *vi, int qid)
+{
+ if (qid == vi->max_queue_pairs * 2)
+ return VIRTNET_Q_TYPE_CQ;
+
+ if (qid % 2)
+ return VIRTNET_Q_TYPE_TX;
+
+ return VIRTNET_Q_TYPE_RX;
+}
+
static inline struct virtio_net_common_hdr *
skb_vnet_common_hdr(struct sk_buff *skb)
{
@@ -603,7 +743,6 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- /* copy small packet so we can reuse these pages */
if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
skb = virtnet_build_skb(buf, truesize, p - buf, len);
if (unlikely(!skb))
@@ -707,7 +846,7 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
void *buf;
buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
- if (buf && rq->do_dma)
+ if (buf)
virtnet_rq_unmap(rq, buf, *len);
return buf;
@@ -720,11 +859,6 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
u32 offset;
void *head;
- if (!rq->do_dma) {
- sg_init_one(rq->sg, buf, len);
- return;
- }
-
head = page_address(rq->alloc_frag.page);
offset = buf - head;
@@ -750,44 +884,42 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
head = page_address(alloc_frag->page);
- if (rq->do_dma) {
- dma = head;
-
- /* new pages */
- if (!alloc_frag->offset) {
- if (rq->last_dma) {
- /* Now, the new page is allocated, the last dma
- * will not be used. So the dma can be unmapped
- * if the ref is 0.
- */
- virtnet_rq_unmap(rq, rq->last_dma, 0);
- rq->last_dma = NULL;
- }
+ dma = head;
- dma->len = alloc_frag->size - sizeof(*dma);
+ /* new pages */
+ if (!alloc_frag->offset) {
+ if (rq->last_dma) {
+ /* Now, the new page is allocated, the last dma
+ * will not be used. So the dma can be unmapped
+ * if the ref is 0.
+ */
+ virtnet_rq_unmap(rq, rq->last_dma, 0);
+ rq->last_dma = NULL;
+ }
- addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
- dma->len, DMA_FROM_DEVICE, 0);
- if (virtqueue_dma_mapping_error(rq->vq, addr))
- return NULL;
+ dma->len = alloc_frag->size - sizeof(*dma);
- dma->addr = addr;
- dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
+ addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
+ dma->len, DMA_FROM_DEVICE, 0);
+ if (virtqueue_dma_mapping_error(rq->vq, addr))
+ return NULL;
- /* Add a reference to dma to prevent the entire dma from
- * being released during error handling. This reference
- * will be freed after the pages are no longer used.
- */
- get_page(alloc_frag->page);
- dma->ref = 1;
- alloc_frag->offset = sizeof(*dma);
+ dma->addr = addr;
+ dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
- rq->last_dma = dma;
- }
+ /* Add a reference to dma to prevent the entire dma from
+ * being released during error handling. This reference
+ * will be freed after the pages are no longer used.
+ */
+ get_page(alloc_frag->page);
+ dma->ref = 1;
+ alloc_frag->offset = sizeof(*dma);
- ++dma->ref;
+ rq->last_dma = dma;
}
+ ++dma->ref;
+
buf = head + alloc_frag->offset;
get_page(alloc_frag->page);
@@ -804,12 +936,9 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi)
if (!vi->mergeable_rx_bufs && vi->big_packets)
return;
- for (i = 0; i < vi->max_queue_pairs; i++) {
- if (virtqueue_set_dma_premapped(vi->rq[i].vq))
- continue;
-
- vi->rq[i].do_dma = true;
- }
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ /* error should never happen */
+ BUG_ON(virtqueue_set_dma_premapped(vi->rq[i].vq));
}
static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
@@ -820,7 +949,7 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
rq = &vi->rq[i];
- if (rq->do_dma)
+ if (!vi->big_packets || vi->mergeable_rx_bufs)
virtnet_rq_unmap(rq, buf, 0);
virtnet_rq_free_buf(vi, rq, buf);
@@ -875,6 +1004,9 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
*/
if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
netif_stop_subqueue(dev, qnum);
+ u64_stats_update_begin(&sq->stats.syncp);
+ u64_stats_inc(&sq->stats.stop);
+ u64_stats_update_end(&sq->stats.syncp);
if (use_napi) {
if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
virtqueue_napi_schedule(&sq->napi, sq->vq);
@@ -883,6 +1015,9 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
free_old_xmit(sq, false);
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
netif_start_subqueue(dev, qnum);
+ u64_stats_update_begin(&sq->stats.syncp);
+ u64_stats_inc(&sq->stats.wake);
+ u64_stats_update_end(&sq->stats.syncp);
virtqueue_disable_cb(sq->vq);
}
}
@@ -1881,8 +2016,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) {
- if (rq->do_dma)
- virtnet_rq_unmap(rq, buf, 0);
+ virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
}
@@ -1996,8 +2130,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
ctx = mergeable_len_to_ctx(len + room, headroom);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) {
- if (rq->do_dma)
- virtnet_rq_unmap(rq, buf, 0);
+ virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
}
@@ -2128,7 +2261,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
}
} else {
while (packets < budget &&
- (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
+ (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
packets++;
}
@@ -2145,7 +2278,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
u64_stats_set(&stats.packets, packets);
u64_stats_update_begin(&rq->stats.syncp);
- for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
+ for (i = 0; i < ARRAY_SIZE(virtnet_rq_stats_desc); i++) {
size_t offset = virtnet_rq_stats_desc[i].offset;
u64_stats_t *item, *src;
@@ -2153,6 +2286,10 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
src = (u64_stats_t *)((u8 *)&stats + offset);
u64_stats_add(item, u64_stats_read(src));
}
+
+ u64_stats_add(&rq->stats.packets, u64_stats_read(&stats.packets));
+ u64_stats_add(&rq->stats.bytes, u64_stats_read(&stats.bytes));
+
u64_stats_update_end(&rq->stats.syncp);
return packets;
@@ -2179,8 +2316,14 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
free_old_xmit(sq, true);
} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
- if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
+ if (netif_tx_queue_stopped(txq)) {
+ u64_stats_update_begin(&sq->stats.syncp);
+ u64_stats_inc(&sq->stats.wake);
+ u64_stats_update_end(&sq->stats.syncp);
+ }
netif_tx_wake_queue(txq);
+ }
__netif_tx_unlock(txq);
}
@@ -2225,6 +2368,10 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
/* Out of packets? */
if (received < budget) {
napi_complete = virtqueue_napi_complete(napi, rq->vq, received);
+ /* Intentionally not taking dim_lock here. This may result in a
+ * spurious net_dim call. But if that happens virtnet_rx_dim_work
+ * will not act on the scheduled work.
+ */
if (napi_complete && rq->dim_enabled)
virtnet_rx_dim_update(vi, rq);
}
@@ -2326,8 +2473,14 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
virtqueue_disable_cb(sq->vq);
free_old_xmit(sq, true);
- if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
+ if (netif_tx_queue_stopped(txq)) {
+ u64_stats_update_begin(&sq->stats.syncp);
+ u64_stats_inc(&sq->stats.wake);
+ u64_stats_update_end(&sq->stats.syncp);
+ }
netif_tx_wake_queue(txq);
+ }
opaque = virtqueue_enable_cb_prepare(sq->vq);
@@ -2527,16 +2680,18 @@ static int virtnet_tx_resize(struct virtnet_info *vi,
* supported by the hypervisor, as indicated by feature bits, should
* never fail unless improperly formatted.
*/
-static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
- struct scatterlist *out)
+static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd,
+ struct scatterlist *out,
+ struct scatterlist *in)
{
- struct scatterlist *sgs[4], hdr, stat;
- unsigned out_num = 0, tmp;
+ struct scatterlist *sgs[5], hdr, stat;
+ u32 out_num = 0, tmp, in_num = 0;
int ret;
/* Caller should know better */
BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
+ mutex_lock(&vi->cvq_lock);
vi->ctrl->status = ~0;
vi->ctrl->hdr.class = class;
vi->ctrl->hdr.cmd = cmd;
@@ -2549,18 +2704,22 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
/* Add return status. */
sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
- sgs[out_num] = &stat;
+ sgs[out_num + in_num++] = &stat;
+
+ if (in)
+ sgs[out_num + in_num++] = in;
- BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
- ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
+ BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
+ ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC);
if (ret < 0) {
dev_warn(&vi->vdev->dev,
"Failed to add sgs for command vq: %d\n.", ret);
+ mutex_unlock(&vi->cvq_lock);
return false;
}
if (unlikely(!virtqueue_kick(vi->cvq)))
- return vi->ctrl->status == VIRTIO_NET_OK;
+ goto unlock;
/* Spin for a response, the kick causes an ioport write, trapping
* into the hypervisor, so the request should be handled immediately.
@@ -2571,9 +2730,17 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
cpu_relax();
}
+unlock:
+ mutex_unlock(&vi->cvq_lock);
return vi->ctrl->status == VIRTIO_NET_OK;
}
+static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
+ struct scatterlist *out)
+{
+ return virtnet_send_command_reply(vi, class, cmd, out, NULL);
+}
+
static int virtnet_set_mac_address(struct net_device *dev, void *p)
{
struct virtnet_info *vi = netdev_priv(dev);
@@ -2663,23 +2830,26 @@ static void virtnet_stats(struct net_device *dev,
static void virtnet_ack_link_announce(struct virtnet_info *vi)
{
- rtnl_lock();
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
- rtnl_unlock();
}
-static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
+static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
{
+ struct virtio_net_ctrl_mq *mq __free(kfree) = NULL;
struct scatterlist sg;
struct net_device *dev = vi->dev;
if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
return 0;
- vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
- sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
+ mq = kzalloc(sizeof(*mq), GFP_KERNEL);
+ if (!mq)
+ return -ENOMEM;
+
+ mq->virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
+ sg_init_one(&sg, mq, sizeof(*mq));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
@@ -2696,16 +2866,6 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
return 0;
}
-static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
-{
- int err;
-
- rtnl_lock();
- err = _virtnet_set_queues(vi, queue_pairs);
- rtnl_unlock();
- return err;
-}
-
static int virtnet_close(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
@@ -2728,6 +2888,7 @@ static void virtnet_rx_mode_work(struct work_struct *work)
{
struct virtnet_info *vi =
container_of(work, struct virtnet_info, rx_mode_work);
+ u8 *promisc_allmulti __free(kfree) = NULL;
struct net_device *dev = vi->dev;
struct scatterlist sg[2];
struct virtio_net_ctrl_mac *mac_data;
@@ -2741,24 +2902,29 @@ static void virtnet_rx_mode_work(struct work_struct *work)
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
return;
- rtnl_lock();
+ promisc_allmulti = kzalloc(sizeof(*promisc_allmulti), GFP_KERNEL);
+ if (!promisc_allmulti) {
+ dev_warn(&dev->dev, "Failed to set RX mode, no memory.\n");
+ return;
+ }
- vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
- vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
+ rtnl_lock();
- sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
+ *promisc_allmulti = !!(dev->flags & IFF_PROMISC);
+ sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_PROMISC, sg))
dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
- vi->ctrl->promisc ? "en" : "dis");
+ *promisc_allmulti ? "en" : "dis");
- sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
+ *promisc_allmulti = !!(dev->flags & IFF_ALLMULTI);
+ sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
- vi->ctrl->allmulti ? "en" : "dis");
+ *promisc_allmulti ? "en" : "dis");
netif_addr_lock_bh(dev);
@@ -2819,10 +2985,15 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
struct virtnet_info *vi = netdev_priv(dev);
+ __virtio16 *_vid __free(kfree) = NULL;
struct scatterlist sg;
- vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
- sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
+ _vid = kzalloc(sizeof(*_vid), GFP_KERNEL);
+ if (!_vid)
+ return -ENOMEM;
+
+ *_vid = cpu_to_virtio16(vi->vdev, vid);
+ sg_init_one(&sg, _vid, sizeof(*_vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_ADD, &sg))
@@ -2834,10 +3005,15 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
struct virtnet_info *vi = netdev_priv(dev);
+ __virtio16 *_vid __free(kfree) = NULL;
struct scatterlist sg;
- vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
- sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
+ _vid = kzalloc(sizeof(*_vid), GFP_KERNEL);
+ if (!_vid)
+ return -ENOMEM;
+
+ *_vid = cpu_to_virtio16(vi->vdev, vid);
+ sg_init_one(&sg, _vid, sizeof(*_vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_DEL, &sg))
@@ -2950,12 +3126,17 @@ static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
u16 vqn, u32 max_usecs, u32 max_packets)
{
+ struct virtio_net_ctrl_coal_vq *coal_vq __free(kfree) = NULL;
struct scatterlist sgs;
- vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn);
- vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs);
- vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets);
- sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq));
+ coal_vq = kzalloc(sizeof(*coal_vq), GFP_KERNEL);
+ if (!coal_vq)
+ return -ENOMEM;
+
+ coal_vq->vqn = cpu_to_le16(vqn);
+ coal_vq->coal.max_usecs = cpu_to_le32(max_usecs);
+ coal_vq->coal.max_packets = cpu_to_le32(max_packets);
+ sg_init_one(&sgs, coal_vq, sizeof(*coal_vq));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
@@ -3066,9 +3247,11 @@ static int virtnet_set_ringparam(struct net_device *dev,
return err;
/* The reason is same as the transmit virtqueue reset */
+ mutex_lock(&vi->rq[i].dim_lock);
err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
vi->intr_coal_rx.max_usecs,
vi->intr_coal_rx.max_packets);
+ mutex_unlock(&vi->rq[i].dim_lock);
if (err)
return err;
}
@@ -3087,25 +3270,29 @@ static bool virtnet_commit_rss_command(struct virtnet_info *vi)
sg_init_table(sgs, 4);
sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
- sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size);
+ sg_set_buf(&sgs[0], &vi->rss, sg_buf_size);
- sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1);
- sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size);
+ sg_buf_size = sizeof(uint16_t) * (vi->rss.indirection_table_mask + 1);
+ sg_set_buf(&sgs[1], vi->rss.indirection_table, sg_buf_size);
sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
- offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
- sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size);
+ sg_set_buf(&sgs[2], &vi->rss.max_tx_vq, sg_buf_size);
sg_buf_size = vi->rss_key_size;
- sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size);
+ sg_set_buf(&sgs[3], vi->rss.key, sg_buf_size);
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
- : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) {
- dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
- return false;
- }
+ : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs))
+ goto err;
+
return true;
+
+err:
+ dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
+ return false;
+
}
static void virtnet_init_default_rss(struct virtnet_info *vi)
@@ -3113,21 +3300,21 @@ static void virtnet_init_default_rss(struct virtnet_info *vi)
u32 indir_val = 0;
int i = 0;
- vi->ctrl->rss.hash_types = vi->rss_hash_types_supported;
+ vi->rss.hash_types = vi->rss_hash_types_supported;
vi->rss_hash_types_saved = vi->rss_hash_types_supported;
- vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size
+ vi->rss.indirection_table_mask = vi->rss_indir_table_size
? vi->rss_indir_table_size - 1 : 0;
- vi->ctrl->rss.unclassified_queue = 0;
+ vi->rss.unclassified_queue = 0;
for (; i < vi->rss_indir_table_size; ++i) {
indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
- vi->ctrl->rss.indirection_table[i] = indir_val;
+ vi->rss.indirection_table[i] = indir_val;
}
- vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
- vi->ctrl->rss.hash_key_length = vi->rss_key_size;
+ vi->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
+ vi->rss.hash_key_length = vi->rss_key_size;
- netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
+ netdev_rss_key_fill(vi->rss.key, vi->rss_key_size);
}
static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
@@ -3238,7 +3425,7 @@ static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *
if (new_hashtypes != vi->rss_hash_types_saved) {
vi->rss_hash_types_saved = new_hashtypes;
- vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
+ vi->rss.hash_types = vi->rss_hash_types_saved;
if (vi->dev->features & NETIF_F_RXHASH)
return virtnet_commit_rss_command(vi);
}
@@ -3283,7 +3470,7 @@ static int virtnet_set_channels(struct net_device *dev,
return -EINVAL;
cpus_read_lock();
- err = _virtnet_set_queues(vi, queue_pairs);
+ err = virtnet_set_queues(vi, queue_pairs);
if (err) {
cpus_read_unlock();
goto err;
@@ -3297,25 +3484,654 @@ static int virtnet_set_channels(struct net_device *dev,
return err;
}
+static void virtnet_stats_sprintf(u8 **p, const char *fmt, const char *noq_fmt,
+ int num, int qid, const struct virtnet_stat_desc *desc)
+{
+ int i;
+
+ if (qid < 0) {
+ for (i = 0; i < num; ++i)
+ ethtool_sprintf(p, noq_fmt, desc[i].desc);
+ } else {
+ for (i = 0; i < num; ++i)
+ ethtool_sprintf(p, fmt, qid, desc[i].desc);
+ }
+}
+
+/* qid == -1: for rx/tx queue total field */
+static void virtnet_get_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data)
+{
+ const struct virtnet_stat_desc *desc;
+ const char *fmt, *noq_fmt;
+ u8 *p = *data;
+ u32 num;
+
+ if (type == VIRTNET_Q_TYPE_CQ && qid >= 0) {
+ noq_fmt = "cq_hw_%s";
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) {
+ desc = &virtnet_stats_cvq_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_cvq_desc);
+
+ virtnet_stats_sprintf(&p, NULL, noq_fmt, num, -1, desc);
+ }
+ }
+
+ if (type == VIRTNET_Q_TYPE_RX) {
+ fmt = "rx%u_%s";
+ noq_fmt = "rx_%s";
+
+ desc = &virtnet_rq_stats_desc[0];
+ num = ARRAY_SIZE(virtnet_rq_stats_desc);
+
+ virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
+
+ fmt = "rx%u_hw_%s";
+ noq_fmt = "rx_hw_%s";
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
+ desc = &virtnet_stats_rx_basic_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_rx_basic_desc);
+
+ virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
+ desc = &virtnet_stats_rx_csum_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_rx_csum_desc);
+
+ virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
+ desc = &virtnet_stats_rx_speed_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_rx_speed_desc);
+
+ virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
+ }
+ }
+
+ if (type == VIRTNET_Q_TYPE_TX) {
+ fmt = "tx%u_%s";
+ noq_fmt = "tx_%s";
+
+ desc = &virtnet_sq_stats_desc[0];
+ num = ARRAY_SIZE(virtnet_sq_stats_desc);
+
+ virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
+
+ fmt = "tx%u_hw_%s";
+ noq_fmt = "tx_hw_%s";
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
+ desc = &virtnet_stats_tx_basic_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_tx_basic_desc);
+
+ virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
+ desc = &virtnet_stats_tx_gso_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_tx_gso_desc);
+
+ virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
+ desc = &virtnet_stats_tx_speed_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_tx_speed_desc);
+
+ virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
+ }
+ }
+
+ *data = p;
+}
+
+struct virtnet_stats_ctx {
+ /* The stats are write to qstats or ethtool -S */
+ bool to_qstat;
+
+ /* Used to calculate the offset inside the output buffer. */
+ u32 desc_num[3];
+
+ /* The actual supported stat types. */
+ u32 bitmap[3];
+
+ /* Used to calculate the reply buffer size. */
+ u32 size[3];
+
+ /* Record the output buffer. */
+ u64 *data;
+};
+
+static void virtnet_stats_ctx_init(struct virtnet_info *vi,
+ struct virtnet_stats_ctx *ctx,
+ u64 *data, bool to_qstat)
+{
+ u32 queue_type;
+
+ ctx->data = data;
+ ctx->to_qstat = to_qstat;
+
+ if (to_qstat) {
+ ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc_qstat);
+ ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc_qstat);
+
+ queue_type = VIRTNET_Q_TYPE_RX;
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_BASIC;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc_qstat);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_basic);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_CSUM;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc_qstat);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_csum);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_GSO;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_gso_desc_qstat);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_gso);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_SPEED;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc_qstat);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_speed);
+ }
+
+ queue_type = VIRTNET_Q_TYPE_TX;
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_BASIC;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc_qstat);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_basic);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_CSUM;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_csum_desc_qstat);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_csum);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_GSO;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc_qstat);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_gso);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_SPEED;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc_qstat);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_speed);
+ }
+
+ return;
+ }
+
+ ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc);
+ ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc);
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) {
+ queue_type = VIRTNET_Q_TYPE_CQ;
+
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_CVQ;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_cvq_desc);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_cvq);
+ }
+
+ queue_type = VIRTNET_Q_TYPE_RX;
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_BASIC;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_basic);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_CSUM;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_csum);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_SPEED;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_speed);
+ }
+
+ queue_type = VIRTNET_Q_TYPE_TX;
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_BASIC;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_basic);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_GSO;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_gso);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_SPEED;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_speed);
+ }
+}
+
+/* stats_sum_queue - Calculate the sum of the same fields in sq or rq.
+ * @sum: the position to store the sum values
+ * @num: field num
+ * @q_value: the first queue fields
+ * @q_num: number of the queues
+ */
+static void stats_sum_queue(u64 *sum, u32 num, u64 *q_value, u32 q_num)
+{
+ u32 step = num;
+ int i, j;
+ u64 *p;
+
+ for (i = 0; i < num; ++i) {
+ p = sum + i;
+ *p = 0;
+
+ for (j = 0; j < q_num; ++j)
+ *p += *(q_value + i + j * step);
+ }
+}
+
+static void virtnet_fill_total_fields(struct virtnet_info *vi,
+ struct virtnet_stats_ctx *ctx)
+{
+ u64 *data, *first_rx_q, *first_tx_q;
+ u32 num_cq, num_rx, num_tx;
+
+ num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
+ num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX];
+ num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX];
+
+ first_rx_q = ctx->data + num_rx + num_tx + num_cq;
+ first_tx_q = first_rx_q + vi->curr_queue_pairs * num_rx;
+
+ data = ctx->data;
+
+ stats_sum_queue(data, num_rx, first_rx_q, vi->curr_queue_pairs);
+
+ data = ctx->data + num_rx;
+
+ stats_sum_queue(data, num_tx, first_tx_q, vi->curr_queue_pairs);
+}
+
+static void virtnet_fill_stats_qstat(struct virtnet_info *vi, u32 qid,
+ struct virtnet_stats_ctx *ctx,
+ const u8 *base, bool drv_stats, u8 reply_type)
+{
+ const struct virtnet_stat_desc *desc;
+ const u64_stats_t *v_stat;
+ u64 offset, bitmap;
+ const __le64 *v;
+ u32 queue_type;
+ int i, num;
+
+ queue_type = vq_type(vi, qid);
+ bitmap = ctx->bitmap[queue_type];
+
+ if (drv_stats) {
+ if (queue_type == VIRTNET_Q_TYPE_RX) {
+ desc = &virtnet_rq_stats_desc_qstat[0];
+ num = ARRAY_SIZE(virtnet_rq_stats_desc_qstat);
+ } else {
+ desc = &virtnet_sq_stats_desc_qstat[0];
+ num = ARRAY_SIZE(virtnet_sq_stats_desc_qstat);
+ }
+
+ for (i = 0; i < num; ++i) {
+ offset = desc[i].qstat_offset / sizeof(*ctx->data);
+ v_stat = (const u64_stats_t *)(base + desc[i].offset);
+ ctx->data[offset] = u64_stats_read(v_stat);
+ }
+ return;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
+ desc = &virtnet_stats_rx_basic_desc_qstat[0];
+ num = ARRAY_SIZE(virtnet_stats_rx_basic_desc_qstat);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_BASIC)
+ goto found;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
+ desc = &virtnet_stats_rx_csum_desc_qstat[0];
+ num = ARRAY_SIZE(virtnet_stats_rx_csum_desc_qstat);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_CSUM)
+ goto found;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_RX_GSO) {
+ desc = &virtnet_stats_rx_gso_desc_qstat[0];
+ num = ARRAY_SIZE(virtnet_stats_rx_gso_desc_qstat);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_GSO)
+ goto found;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
+ desc = &virtnet_stats_rx_speed_desc_qstat[0];
+ num = ARRAY_SIZE(virtnet_stats_rx_speed_desc_qstat);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_SPEED)
+ goto found;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
+ desc = &virtnet_stats_tx_basic_desc_qstat[0];
+ num = ARRAY_SIZE(virtnet_stats_tx_basic_desc_qstat);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_BASIC)
+ goto found;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_TX_CSUM) {
+ desc = &virtnet_stats_tx_csum_desc_qstat[0];
+ num = ARRAY_SIZE(virtnet_stats_tx_csum_desc_qstat);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_CSUM)
+ goto found;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
+ desc = &virtnet_stats_tx_gso_desc_qstat[0];
+ num = ARRAY_SIZE(virtnet_stats_tx_gso_desc_qstat);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_GSO)
+ goto found;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
+ desc = &virtnet_stats_tx_speed_desc_qstat[0];
+ num = ARRAY_SIZE(virtnet_stats_tx_speed_desc_qstat);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_SPEED)
+ goto found;
+ }
+
+ return;
+
+found:
+ for (i = 0; i < num; ++i) {
+ offset = desc[i].qstat_offset / sizeof(*ctx->data);
+ v = (const __le64 *)(base + desc[i].offset);
+ ctx->data[offset] = le64_to_cpu(*v);
+ }
+}
+
+/* virtnet_fill_stats - copy the stats to qstats or ethtool -S
+ * The stats source is the device or the driver.
+ *
+ * @vi: virtio net info
+ * @qid: the vq id
+ * @ctx: stats ctx (initiated by virtnet_stats_ctx_init())
+ * @base: pointer to the device reply or the driver stats structure.
+ * @drv_stats: designate the base type (device reply, driver stats)
+ * @type: the type of the device reply (if drv_stats is true, this must be zero)
+ */
+static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
+ struct virtnet_stats_ctx *ctx,
+ const u8 *base, bool drv_stats, u8 reply_type)
+{
+ u32 queue_type, num_rx, num_tx, num_cq;
+ const struct virtnet_stat_desc *desc;
+ const u64_stats_t *v_stat;
+ u64 offset, bitmap;
+ const __le64 *v;
+ int i, num;
+
+ if (ctx->to_qstat)
+ return virtnet_fill_stats_qstat(vi, qid, ctx, base, drv_stats, reply_type);
+
+ num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
+ num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX];
+ num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX];
+
+ queue_type = vq_type(vi, qid);
+ bitmap = ctx->bitmap[queue_type];
+
+ /* skip the total fields of pairs */
+ offset = num_rx + num_tx;
+
+ if (queue_type == VIRTNET_Q_TYPE_TX) {
+ offset += num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
+
+ num = ARRAY_SIZE(virtnet_sq_stats_desc);
+ if (drv_stats) {
+ desc = &virtnet_sq_stats_desc[0];
+ goto drv_stats;
+ }
+
+ offset += num;
+
+ } else if (queue_type == VIRTNET_Q_TYPE_RX) {
+ offset += num_cq + num_rx * (qid / 2);
+
+ num = ARRAY_SIZE(virtnet_rq_stats_desc);
+ if (drv_stats) {
+ desc = &virtnet_rq_stats_desc[0];
+ goto drv_stats;
+ }
+
+ offset += num;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_CVQ) {
+ desc = &virtnet_stats_cvq_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_cvq_desc);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_CVQ)
+ goto found;
+
+ offset += num;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
+ desc = &virtnet_stats_rx_basic_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_rx_basic_desc);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_BASIC)
+ goto found;
+
+ offset += num;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
+ desc = &virtnet_stats_rx_csum_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_rx_csum_desc);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_CSUM)
+ goto found;
+
+ offset += num;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
+ desc = &virtnet_stats_rx_speed_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_rx_speed_desc);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_SPEED)
+ goto found;
+
+ offset += num;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
+ desc = &virtnet_stats_tx_basic_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_tx_basic_desc);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_BASIC)
+ goto found;
+
+ offset += num;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
+ desc = &virtnet_stats_tx_gso_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_tx_gso_desc);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_GSO)
+ goto found;
+
+ offset += num;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
+ desc = &virtnet_stats_tx_speed_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_tx_speed_desc);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_SPEED)
+ goto found;
+
+ offset += num;
+ }
+
+ return;
+
+found:
+ for (i = 0; i < num; ++i) {
+ v = (const __le64 *)(base + desc[i].offset);
+ ctx->data[offset + i] = le64_to_cpu(*v);
+ }
+
+ return;
+
+drv_stats:
+ for (i = 0; i < num; ++i) {
+ v_stat = (const u64_stats_t *)(base + desc[i].offset);
+ ctx->data[offset + i] = u64_stats_read(v_stat);
+ }
+}
+
+static int __virtnet_get_hw_stats(struct virtnet_info *vi,
+ struct virtnet_stats_ctx *ctx,
+ struct virtio_net_ctrl_queue_stats *req,
+ int req_size, void *reply, int res_size)
+{
+ struct virtio_net_stats_reply_hdr *hdr;
+ struct scatterlist sgs_in, sgs_out;
+ void *p;
+ u32 qid;
+ int ok;
+
+ sg_init_one(&sgs_out, req, req_size);
+ sg_init_one(&sgs_in, reply, res_size);
+
+ ok = virtnet_send_command_reply(vi, VIRTIO_NET_CTRL_STATS,
+ VIRTIO_NET_CTRL_STATS_GET,
+ &sgs_out, &sgs_in);
+
+ if (!ok)
+ return ok;
+
+ for (p = reply; p - reply < res_size; p += le16_to_cpu(hdr->size)) {
+ hdr = p;
+ qid = le16_to_cpu(hdr->vq_index);
+ virtnet_fill_stats(vi, qid, ctx, p, false, hdr->type);
+ }
+
+ return 0;
+}
+
+static void virtnet_make_stat_req(struct virtnet_info *vi,
+ struct virtnet_stats_ctx *ctx,
+ struct virtio_net_ctrl_queue_stats *req,
+ int qid, int *idx)
+{
+ int qtype = vq_type(vi, qid);
+ u64 bitmap = ctx->bitmap[qtype];
+
+ if (!bitmap)
+ return;
+
+ req->stats[*idx].vq_index = cpu_to_le16(qid);
+ req->stats[*idx].types_bitmap[0] = cpu_to_le64(bitmap);
+ *idx += 1;
+}
+
+/* qid: -1: get stats of all vq.
+ * > 0: get the stats for the special vq. This must not be cvq.
+ */
+static int virtnet_get_hw_stats(struct virtnet_info *vi,
+ struct virtnet_stats_ctx *ctx, int qid)
+{
+ int qnum, i, j, res_size, qtype, last_vq, first_vq;
+ struct virtio_net_ctrl_queue_stats *req;
+ bool enable_cvq;
+ void *reply;
+ int ok;
+
+ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS))
+ return 0;
+
+ if (qid == -1) {
+ last_vq = vi->curr_queue_pairs * 2 - 1;
+ first_vq = 0;
+ enable_cvq = true;
+ } else {
+ last_vq = qid;
+ first_vq = qid;
+ enable_cvq = false;
+ }
+
+ qnum = 0;
+ res_size = 0;
+ for (i = first_vq; i <= last_vq ; ++i) {
+ qtype = vq_type(vi, i);
+ if (ctx->bitmap[qtype]) {
+ ++qnum;
+ res_size += ctx->size[qtype];
+ }
+ }
+
+ if (enable_cvq && ctx->bitmap[VIRTNET_Q_TYPE_CQ]) {
+ res_size += ctx->size[VIRTNET_Q_TYPE_CQ];
+ qnum += 1;
+ }
+
+ req = kcalloc(qnum, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ reply = kmalloc(res_size, GFP_KERNEL);
+ if (!reply) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ j = 0;
+ for (i = first_vq; i <= last_vq ; ++i)
+ virtnet_make_stat_req(vi, ctx, req, i, &j);
+
+ if (enable_cvq)
+ virtnet_make_stat_req(vi, ctx, req, vi->max_queue_pairs * 2, &j);
+
+ ok = __virtnet_get_hw_stats(vi, ctx, req, sizeof(*req) * j, reply, res_size);
+
+ kfree(req);
+ kfree(reply);
+
+ return ok;
+}
+
static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
struct virtnet_info *vi = netdev_priv(dev);
- unsigned int i, j;
+ unsigned int i;
u8 *p = data;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < vi->curr_queue_pairs; i++) {
- for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
- ethtool_sprintf(&p, "rx_queue_%u_%s", i,
- virtnet_rq_stats_desc[j].desc);
- }
+ /* Generate the total field names. */
+ virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, -1, &p);
+ virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, -1, &p);
- for (i = 0; i < vi->curr_queue_pairs; i++) {
- for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
- ethtool_sprintf(&p, "tx_queue_%u_%s", i,
- virtnet_sq_stats_desc[j].desc);
- }
+ virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_CQ, 0, &p);
+
+ for (i = 0; i < vi->curr_queue_pairs; ++i)
+ virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, i, &p);
+
+ for (i = 0; i < vi->curr_queue_pairs; ++i)
+ virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, i, &p);
break;
}
}
@@ -3323,11 +4139,17 @@ static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
static int virtnet_get_sset_count(struct net_device *dev, int sset)
{
struct virtnet_info *vi = netdev_priv(dev);
+ struct virtnet_stats_ctx ctx = {0};
+ u32 pair_count;
switch (sset) {
case ETH_SS_STATS:
- return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
- VIRTNET_SQ_STATS_LEN);
+ virtnet_stats_ctx_init(vi, &ctx, NULL, false);
+
+ pair_count = ctx.desc_num[VIRTNET_Q_TYPE_RX] + ctx.desc_num[VIRTNET_Q_TYPE_TX];
+
+ return pair_count + ctx.desc_num[VIRTNET_Q_TYPE_CQ] +
+ vi->curr_queue_pairs * pair_count;
default:
return -EOPNOTSUPP;
}
@@ -3337,40 +4159,32 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct virtnet_info *vi = netdev_priv(dev);
- unsigned int idx = 0, start, i, j;
+ struct virtnet_stats_ctx ctx = {0};
+ unsigned int start, i;
const u8 *stats_base;
- const u64_stats_t *p;
- size_t offset;
+
+ virtnet_stats_ctx_init(vi, &ctx, data, false);
+ if (virtnet_get_hw_stats(vi, &ctx, -1))
+ dev_warn(&vi->dev->dev, "Failed to get hw stats.\n");
for (i = 0; i < vi->curr_queue_pairs; i++) {
struct receive_queue *rq = &vi->rq[i];
+ struct send_queue *sq = &vi->sq[i];
stats_base = (const u8 *)&rq->stats;
do {
start = u64_stats_fetch_begin(&rq->stats.syncp);
- for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
- offset = virtnet_rq_stats_desc[j].offset;
- p = (const u64_stats_t *)(stats_base + offset);
- data[idx + j] = u64_stats_read(p);
- }
+ virtnet_fill_stats(vi, i * 2, &ctx, stats_base, true, 0);
} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
- idx += VIRTNET_RQ_STATS_LEN;
- }
-
- for (i = 0; i < vi->curr_queue_pairs; i++) {
- struct send_queue *sq = &vi->sq[i];
stats_base = (const u8 *)&sq->stats;
do {
start = u64_stats_fetch_begin(&sq->stats.syncp);
- for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
- offset = virtnet_sq_stats_desc[j].offset;
- p = (const u64_stats_t *)(stats_base + offset);
- data[idx + j] = u64_stats_read(p);
- }
+ virtnet_fill_stats(vi, i * 2 + 1, &ctx, stats_base, true, 0);
} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
- idx += VIRTNET_SQ_STATS_LEN;
}
+
+ virtnet_fill_total_fields(vi, &ctx);
}
static void virtnet_get_channels(struct net_device *dev,
@@ -3410,12 +4224,17 @@ static int virtnet_get_link_ksettings(struct net_device *dev,
static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
struct ethtool_coalesce *ec)
{
+ struct virtio_net_ctrl_coal_tx *coal_tx __free(kfree) = NULL;
struct scatterlist sgs_tx;
int i;
- vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
- vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
- sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx));
+ coal_tx = kzalloc(sizeof(*coal_tx), GFP_KERNEL);
+ if (!coal_tx)
+ return -ENOMEM;
+
+ coal_tx->tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
+ coal_tx->tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
+ sg_init_one(&sgs_tx, coal_tx, sizeof(*coal_tx));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
@@ -3435,8 +4254,10 @@ static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
struct ethtool_coalesce *ec)
{
+ struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL;
bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
struct scatterlist sgs_rx;
+ int ret = 0;
int i;
if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
@@ -3446,11 +4267,21 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
return -EINVAL;
+ /* Acquire all queues dim_locks */
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ mutex_lock(&vi->rq[i].dim_lock);
+
if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
vi->rx_dim_enabled = true;
for (i = 0; i < vi->max_queue_pairs; i++)
vi->rq[i].dim_enabled = true;
- return 0;
+ goto unlock;
+ }
+
+ coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL);
+ if (!coal_rx) {
+ ret = -ENOMEM;
+ goto unlock;
}
if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
@@ -3463,14 +4294,16 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
* we need apply the global new params even if they
* are not updated.
*/
- vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
- vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
- sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
+ coal_rx->rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
+ coal_rx->rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
+ sg_init_one(&sgs_rx, coal_rx, sizeof(*coal_rx));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
- &sgs_rx))
- return -EINVAL;
+ &sgs_rx)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
@@ -3478,8 +4311,11 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
}
+unlock:
+ for (i = vi->max_queue_pairs - 1; i >= 0; i--)
+ mutex_unlock(&vi->rq[i].dim_lock);
- return 0;
+ return ret;
}
static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
@@ -3503,19 +4339,24 @@ static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
u16 queue)
{
bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
- bool cur_rx_dim = vi->rq[queue].dim_enabled;
u32 max_usecs, max_packets;
+ bool cur_rx_dim;
int err;
+ mutex_lock(&vi->rq[queue].dim_lock);
+ cur_rx_dim = vi->rq[queue].dim_enabled;
max_usecs = vi->rq[queue].intr_coal.max_usecs;
max_packets = vi->rq[queue].intr_coal.max_packets;
if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs ||
- ec->rx_max_coalesced_frames != max_packets))
+ ec->rx_max_coalesced_frames != max_packets)) {
+ mutex_unlock(&vi->rq[queue].dim_lock);
return -EINVAL;
+ }
if (rx_ctrl_dim_on && !cur_rx_dim) {
vi->rq[queue].dim_enabled = true;
+ mutex_unlock(&vi->rq[queue].dim_lock);
return 0;
}
@@ -3528,10 +4369,8 @@ static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue,
ec->rx_coalesce_usecs,
ec->rx_max_coalesced_frames);
- if (err)
- return err;
-
- return 0;
+ mutex_unlock(&vi->rq[queue].dim_lock);
+ return err;
}
static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
@@ -3561,39 +4400,27 @@ static void virtnet_rx_dim_work(struct work_struct *work)
struct virtnet_info *vi = rq->vq->vdev->priv;
struct net_device *dev = vi->dev;
struct dim_cq_moder update_moder;
- int i, qnum, err;
+ int qnum, err;
- if (!rtnl_trylock())
- return;
+ qnum = rq - vi->rq;
- /* Each rxq's work is queued by "net_dim()->schedule_work()"
- * in response to NAPI traffic changes. Note that dim->profile_ix
- * for each rxq is updated prior to the queuing action.
- * So we only need to traverse and update profiles for all rxqs
- * in the work which is holding rtnl_lock.
- */
- for (i = 0; i < vi->curr_queue_pairs; i++) {
- rq = &vi->rq[i];
- dim = &rq->dim;
- qnum = rq - vi->rq;
-
- if (!rq->dim_enabled)
- continue;
-
- update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
- if (update_moder.usec != rq->intr_coal.max_usecs ||
- update_moder.pkts != rq->intr_coal.max_packets) {
- err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
- update_moder.usec,
- update_moder.pkts);
- if (err)
- pr_debug("%s: Failed to send dim parameters on rxq%d\n",
- dev->name, qnum);
- dim->state = DIM_START_MEASURE;
- }
- }
+ mutex_lock(&rq->dim_lock);
+ if (!rq->dim_enabled)
+ goto out;
- rtnl_unlock();
+ update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ if (update_moder.usec != rq->intr_coal.max_usecs ||
+ update_moder.pkts != rq->intr_coal.max_packets) {
+ err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
+ update_moder.usec,
+ update_moder.pkts);
+ if (err)
+ pr_debug("%s: Failed to send dim parameters on rxq%d\n",
+ dev->name, qnum);
+ dim->state = DIM_START_MEASURE;
+ }
+out:
+ mutex_unlock(&rq->dim_lock);
}
static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
@@ -3731,11 +4558,13 @@ static int virtnet_get_per_queue_coalesce(struct net_device *dev,
return -EINVAL;
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
+ mutex_lock(&vi->rq[queue].dim_lock);
ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled;
+ mutex_unlock(&vi->rq[queue].dim_lock);
} else {
ec->rx_max_coalesced_frames = 1;
@@ -3791,11 +4620,11 @@ static int virtnet_get_rxfh(struct net_device *dev,
if (rxfh->indir) {
for (i = 0; i < vi->rss_indir_table_size; ++i)
- rxfh->indir[i] = vi->ctrl->rss.indirection_table[i];
+ rxfh->indir[i] = vi->rss.indirection_table[i];
}
if (rxfh->key)
- memcpy(rxfh->key, vi->ctrl->rss.key, vi->rss_key_size);
+ memcpy(rxfh->key, vi->rss.key, vi->rss_key_size);
rxfh->hfunc = ETH_RSS_HASH_TOP;
@@ -3807,6 +4636,7 @@ static int virtnet_set_rxfh(struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct virtnet_info *vi = netdev_priv(dev);
+ bool update = false;
int i;
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
@@ -3814,13 +4644,28 @@ static int virtnet_set_rxfh(struct net_device *dev,
return -EOPNOTSUPP;
if (rxfh->indir) {
+ if (!vi->has_rss)
+ return -EOPNOTSUPP;
+
for (i = 0; i < vi->rss_indir_table_size; ++i)
- vi->ctrl->rss.indirection_table[i] = rxfh->indir[i];
+ vi->rss.indirection_table[i] = rxfh->indir[i];
+ update = true;
}
- if (rxfh->key)
- memcpy(vi->ctrl->rss.key, rxfh->key, vi->rss_key_size);
- virtnet_commit_rss_command(vi);
+ if (rxfh->key) {
+ /* If either _F_HASH_REPORT or _F_RSS are negotiated, the
+ * device provides hash calculation capabilities, that is,
+ * hash_key is configured.
+ */
+ if (!vi->has_rss && !vi->has_rss_hash_report)
+ return -EOPNOTSUPP;
+
+ memcpy(vi->rss.key, rxfh->key, vi->rss_key_size);
+ update = true;
+ }
+
+ if (update)
+ virtnet_commit_rss_command(vi);
return 0;
}
@@ -3889,6 +4734,97 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
.set_rxnfc = virtnet_set_rxnfc,
};
+static void virtnet_get_queue_stats_rx(struct net_device *dev, int i,
+ struct netdev_queue_stats_rx *stats)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct receive_queue *rq = &vi->rq[i];
+ struct virtnet_stats_ctx ctx = {0};
+
+ virtnet_stats_ctx_init(vi, &ctx, (void *)stats, true);
+
+ virtnet_get_hw_stats(vi, &ctx, i * 2);
+ virtnet_fill_stats(vi, i * 2, &ctx, (void *)&rq->stats, true, 0);
+}
+
+static void virtnet_get_queue_stats_tx(struct net_device *dev, int i,
+ struct netdev_queue_stats_tx *stats)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct send_queue *sq = &vi->sq[i];
+ struct virtnet_stats_ctx ctx = {0};
+
+ virtnet_stats_ctx_init(vi, &ctx, (void *)stats, true);
+
+ virtnet_get_hw_stats(vi, &ctx, i * 2 + 1);
+ virtnet_fill_stats(vi, i * 2 + 1, &ctx, (void *)&sq->stats, true, 0);
+}
+
+static void virtnet_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ /* The queue stats of the virtio-net will not be reset. So here we
+ * return 0.
+ */
+ rx->bytes = 0;
+ rx->packets = 0;
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
+ rx->hw_drops = 0;
+ rx->hw_drop_overruns = 0;
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
+ rx->csum_unnecessary = 0;
+ rx->csum_none = 0;
+ rx->csum_bad = 0;
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) {
+ rx->hw_gro_packets = 0;
+ rx->hw_gro_bytes = 0;
+ rx->hw_gro_wire_packets = 0;
+ rx->hw_gro_wire_bytes = 0;
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED)
+ rx->hw_drop_ratelimits = 0;
+
+ tx->bytes = 0;
+ tx->packets = 0;
+ tx->stop = 0;
+ tx->wake = 0;
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
+ tx->hw_drops = 0;
+ tx->hw_drop_errors = 0;
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) {
+ tx->csum_none = 0;
+ tx->needs_csum = 0;
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
+ tx->hw_gso_packets = 0;
+ tx->hw_gso_bytes = 0;
+ tx->hw_gso_wire_packets = 0;
+ tx->hw_gso_wire_bytes = 0;
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED)
+ tx->hw_drop_ratelimits = 0;
+}
+
+static const struct netdev_stat_ops virtnet_stat_ops = {
+ .get_queue_stats_rx = virtnet_get_queue_stats_rx,
+ .get_queue_stats_tx = virtnet_get_queue_stats_tx,
+ .get_base_stats = virtnet_get_base_stats,
+};
+
static void virtnet_freeze_down(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
@@ -3935,10 +4871,16 @@ static int virtnet_restore_up(struct virtio_device *vdev)
static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
{
+ __virtio64 *_offloads __free(kfree) = NULL;
struct scatterlist sg;
- vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
- sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
+ _offloads = kzalloc(sizeof(*_offloads), GFP_KERNEL);
+ if (!_offloads)
+ return -ENOMEM;
+
+ *_offloads = cpu_to_virtio64(vi->vdev, offloads);
+
+ sg_init_one(&sg, _offloads, sizeof(*_offloads));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
@@ -4038,7 +4980,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
synchronize_net();
}
- err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
+ err = virtnet_set_queues(vi, curr_qp + xdp_qp);
if (err)
goto err;
netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
@@ -4140,9 +5082,9 @@ static int virtnet_set_features(struct net_device *dev,
if ((dev->features ^ features) & NETIF_F_RXHASH) {
if (features & NETIF_F_RXHASH)
- vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
+ vi->rss.hash_types = vi->rss_hash_types_saved;
else
- vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
+ vi->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
if (!virtnet_commit_rss_command(vi))
return -EINVAL;
@@ -4271,7 +5213,7 @@ static void free_receive_page_frags(struct virtnet_info *vi)
int i;
for (i = 0; i < vi->max_queue_pairs; i++)
if (vi->rq[i].alloc_frag.page) {
- if (vi->rq[i].do_dma && vi->rq[i].last_dma)
+ if (vi->rq[i].last_dma)
virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
put_page(vi->rq[i].alloc_frag.page);
}
@@ -4454,6 +5396,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
u64_stats_init(&vi->rq[i].stats.syncp);
u64_stats_init(&vi->sq[i].stats.syncp);
+ mutex_init(&vi->rq[i].dim_lock);
}
return 0;
@@ -4621,6 +5564,48 @@ static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu)
}
}
+#define VIRTIO_NET_HASH_REPORT_MAX_TABLE 10
+static enum xdp_rss_hash_type
+virtnet_xdp_rss_type[VIRTIO_NET_HASH_REPORT_MAX_TABLE] = {
+ [VIRTIO_NET_HASH_REPORT_NONE] = XDP_RSS_TYPE_NONE,
+ [VIRTIO_NET_HASH_REPORT_IPv4] = XDP_RSS_TYPE_L3_IPV4,
+ [VIRTIO_NET_HASH_REPORT_TCPv4] = XDP_RSS_TYPE_L4_IPV4_TCP,
+ [VIRTIO_NET_HASH_REPORT_UDPv4] = XDP_RSS_TYPE_L4_IPV4_UDP,
+ [VIRTIO_NET_HASH_REPORT_IPv6] = XDP_RSS_TYPE_L3_IPV6,
+ [VIRTIO_NET_HASH_REPORT_TCPv6] = XDP_RSS_TYPE_L4_IPV6_TCP,
+ [VIRTIO_NET_HASH_REPORT_UDPv6] = XDP_RSS_TYPE_L4_IPV6_UDP,
+ [VIRTIO_NET_HASH_REPORT_IPv6_EX] = XDP_RSS_TYPE_L3_IPV6_EX,
+ [VIRTIO_NET_HASH_REPORT_TCPv6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX,
+ [VIRTIO_NET_HASH_REPORT_UDPv6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX
+};
+
+static int virtnet_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type)
+{
+ const struct xdp_buff *xdp = (void *)_ctx;
+ struct virtio_net_hdr_v1_hash *hdr_hash;
+ struct virtnet_info *vi;
+ u16 hash_report;
+
+ if (!(xdp->rxq->dev->features & NETIF_F_RXHASH))
+ return -ENODATA;
+
+ vi = netdev_priv(xdp->rxq->dev);
+ hdr_hash = (struct virtio_net_hdr_v1_hash *)(xdp->data - vi->hdr_len);
+ hash_report = __le16_to_cpu(hdr_hash->hash_report);
+
+ if (hash_report >= VIRTIO_NET_HASH_REPORT_MAX_TABLE)
+ hash_report = VIRTIO_NET_HASH_REPORT_NONE;
+
+ *rss_type = virtnet_xdp_rss_type[hash_report];
+ *hash = __le32_to_cpu(hdr_hash->hash_value);
+ return 0;
+}
+
+static const struct xdp_metadata_ops virtnet_xdp_metadata_ops = {
+ .xmo_rx_hash = virtnet_xdp_rx_hash,
+};
+
static int virtnet_probe(struct virtio_device *vdev)
{
int i, err = -ENOMEM;
@@ -4650,6 +5635,7 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
IFF_TX_SKB_NO_LINEAR;
dev->netdev_ops = &virtnet_netdev;
+ dev->stat_ops = &virtnet_stat_ops;
dev->features = NETIF_F_HIGHDMA;
dev->ethtool_ops = &virtnet_ethtool_ops;
@@ -4729,13 +5715,15 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
vi->has_rss_hash_report = true;
- if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) {
vi->has_rss = true;
- if (vi->has_rss || vi->has_rss_hash_report) {
vi->rss_indir_table_size =
virtio_cread16(vdev, offsetof(struct virtio_net_config,
rss_max_indirection_table_length));
+ }
+
+ if (vi->has_rss || vi->has_rss_hash_report) {
vi->rss_key_size =
virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
@@ -4747,6 +5735,7 @@ static int virtnet_probe(struct virtio_device *vdev)
VIRTIO_NET_RSS_HASH_TYPE_UDP_EX);
dev->hw_features |= NETIF_F_RXHASH;
+ dev->xdp_metadata_ops = &virtnet_xdp_metadata_ops;
}
if (vi->has_rss_hash_report)
@@ -4764,6 +5753,8 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
vi->has_cvq = true;
+ mutex_init(&vi->cvq_lock);
+
if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
mtu = virtio_cread16(vdev,
offsetof(struct virtio_net_config,
@@ -4855,7 +5846,7 @@ static int virtnet_probe(struct virtio_device *vdev)
virtio_device_ready(vdev);
- _virtnet_set_queues(vi, vi->curr_queue_pairs);
+ virtnet_set_queues(vi, vi->curr_queue_pairs);
/* a random MAC address has been assigned, notify the device.
* We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
@@ -4875,6 +5866,33 @@ static int virtnet_probe(struct virtio_device *vdev)
}
}
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) {
+ struct virtio_net_stats_capabilities *stats_cap __free(kfree) = NULL;
+ struct scatterlist sg;
+ __le64 v;
+
+ stats_cap = kzalloc(sizeof(*stats_cap), GFP_KERNEL);
+ if (!stats_cap) {
+ rtnl_unlock();
+ err = -ENOMEM;
+ goto free_unregister_netdev;
+ }
+
+ sg_init_one(&sg, stats_cap, sizeof(*stats_cap));
+
+ if (!virtnet_send_command_reply(vi, VIRTIO_NET_CTRL_STATS,
+ VIRTIO_NET_CTRL_STATS_QUERY,
+ NULL, &sg)) {
+ pr_debug("virtio_net: fail to get stats capability\n");
+ rtnl_unlock();
+ err = -EINVAL;
+ goto free_unregister_netdev;
+ }
+
+ v = stats_cap->supported_stats_types[0];
+ vi->device_stats_cap = le64_to_cpu(v);
+ }
+
rtnl_unlock();
err = virtnet_cpu_notif_add(vi);
@@ -5003,7 +6021,7 @@ static struct virtio_device_id id_table[] = {
VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \
VIRTIO_NET_F_VQ_NOTF_COAL, \
- VIRTIO_NET_F_GUEST_HDRLEN
+ VIRTIO_NET_F_GUEST_HDRLEN, VIRTIO_NET_F_DEVICE_STATS
static unsigned int features[] = {
VIRTNET_FEATURES,
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 0578864792b6..89ca6e75fcc6 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -3457,7 +3457,7 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
int err = 0;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
/*
* Reset_work may be in the middle of resetting the device, wait for its
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index bb95ce43cd97..3a252ac5dd28 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -653,7 +653,7 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
skb->dev = dev;
rcu_read_lock();
- nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
+ nexthop = rt6_nexthop(dst_rt6_info(dst), &ipv6_hdr(skb)->daddr);
neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
if (unlikely(!neigh))
neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
@@ -860,7 +860,7 @@ static int vrf_rt6_create(struct net_device *dev)
static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
- struct rtable *rt = (struct rtable *)dst;
+ struct rtable *rt = dst_rtable(dst);
struct net_device *dev = dst->dev;
unsigned int hh_len = LL_RESERVED_SPACE(dev);
struct neighbour *neigh;
@@ -1971,7 +1971,7 @@ static int vrf_netns_init_sysctl(struct net *net, struct netns_vrf *nn_vrf)
static void vrf_netns_exit_sysctl(struct net *net)
{
struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id);
- struct ctl_table *table;
+ const struct ctl_table *table;
table = nn_vrf->ctl_hdr->ctl_table_arg;
unregister_net_sysctl_table(nn_vrf->ctl_hdr);
diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c
index a1ba5169ed5d..4c260074c091 100644
--- a/drivers/net/vsockmon.c
+++ b/drivers/net/vsockmon.c
@@ -58,7 +58,7 @@ static int vsockmon_change_mtu(struct net_device *dev, int new_mtu)
if (!vsockmon_is_valid_mtu(new_mtu))
return -EINVAL;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 3495591a5c29..f78dd0438843 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -1584,7 +1584,8 @@ static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
tun_dst = (struct metadata_dst *)skb_dst(skb);
if (tun_dst) {
- tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
+ __set_bit(IP_TUNNEL_VXLAN_OPT_BIT,
+ tun_dst->u.tun_info.key.tun_flags);
tun_dst->u.tun_info.options_len = sizeof(*md);
}
if (gbp->dont_learn)
@@ -1615,6 +1616,10 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
return false;
+ /* Ignore packets from invalid src-address */
+ if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
+ return false;
+
/* Get address from the outer IP header */
if (vxlan_get_sk_family(vs) == AF_INET) {
saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
@@ -1670,6 +1675,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
bool raw_proto = false;
void *oiph;
__be32 vni = 0;
+ int nh;
/* Need UDP and VXLAN header to be present */
if (!pskb_may_pull(skb, VXLAN_HLEN))
@@ -1716,9 +1722,11 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
goto drop;
if (vxlan_collect_metadata(vs)) {
+ IP_TUNNEL_DECLARE_FLAGS(flags) = { };
struct metadata_dst *tun_dst;
- tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
+ __set_bit(IP_TUNNEL_KEY_BIT, flags);
+ tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), flags,
key32_to_tunnel_id(vni), sizeof(*md));
if (!tun_dst)
@@ -1758,12 +1766,28 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
skb->pkt_type = PACKET_HOST;
}
- oiph = skb_network_header(skb);
+ /* Save offset of outer header relative to skb->head,
+ * because we are going to reset the network header to the inner header
+ * and might change skb->head.
+ */
+ nh = skb_network_header(skb) - skb->head;
+
skb_reset_network_header(skb);
+ if (!pskb_inet_may_pull(skb)) {
+ DEV_STATS_INC(vxlan->dev, rx_length_errors);
+ DEV_STATS_INC(vxlan->dev, rx_errors);
+ vxlan_vnifilter_count(vxlan, vni, vninode,
+ VXLAN_VNI_STATS_RX_ERRORS, 0);
+ goto drop;
+ }
+
+ /* Get the outer header. */
+ oiph = skb->head + nh;
+
if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
- ++vxlan->dev->stats.rx_frame_errors;
- ++vxlan->dev->stats.rx_errors;
+ DEV_STATS_INC(vxlan->dev, rx_frame_errors);
+ DEV_STATS_INC(vxlan->dev, rx_errors);
vxlan_vnifilter_count(vxlan, vni, vninode,
VXLAN_VNI_STATS_RX_ERRORS, 0);
goto drop;
@@ -1833,7 +1857,9 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
goto out;
if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
- dev->stats.tx_dropped++;
+ dev_core_stats_tx_dropped_inc(dev);
+ vxlan_vnifilter_count(vxlan, vni, NULL,
+ VXLAN_VNI_STATS_TX_DROPS, 0);
goto out;
}
parp = arp_hdr(skb);
@@ -1889,7 +1915,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
reply->pkt_type = PACKET_HOST;
if (netif_rx(reply) == NET_RX_DROP) {
- dev->stats.rx_dropped++;
+ dev_core_stats_rx_dropped_inc(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2048,7 +2074,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
goto out;
if (netif_rx(reply) == NET_RX_DROP) {
- dev->stats.rx_dropped++;
+ dev_core_stats_rx_dropped_inc(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2259,7 +2285,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
len);
} else {
drop:
- dev->stats.rx_dropped++;
+ dev_core_stats_rx_dropped_inc(dev);
vxlan_vnifilter_count(dst_vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2291,7 +2317,7 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
addr_family, dst_port,
vxlan->cfg.flags);
if (!dst_vxlan) {
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_TX_ERRORS, 0);
kfree_skb(skb);
@@ -2403,14 +2429,14 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
vni = tunnel_id_to_key32(info->key.tun_id);
ifindex = 0;
dst_cache = &info->dst_cache;
- if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
+ if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, info->key.tun_flags)) {
if (info->options_len < sizeof(*md))
goto drop;
md = ip_tunnel_info_opts(info);
}
ttl = info->key.ttl;
tos = info->key.tos;
- udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
+ udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
}
src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
vxlan->cfg.port_max, true);
@@ -2451,7 +2477,8 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
old_iph->frag_off & htons(IP_DF)))
df = htons(IP_DF);
}
- } else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) {
+ } else if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT,
+ info->key.tun_flags)) {
df = htons(IP_DF);
}
@@ -2505,7 +2532,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
if (!info) {
- u32 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
+ u32 rt6i_flags = dst_rt6_info(ndst)->rt6i_flags;
err = encap_bypass_if_local(skb, dev, vxlan, AF_INET6,
dst_port, ifindex, vni,
@@ -2555,7 +2582,7 @@ out_unlock:
return;
drop:
- dev->stats.tx_dropped++;
+ dev_core_stats_tx_dropped_inc(dev);
vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
return;
@@ -2563,11 +2590,11 @@ drop:
tx_error:
rcu_read_unlock();
if (err == -ELOOP)
- dev->stats.collisions++;
+ DEV_STATS_INC(dev, collisions);
else if (err == -ENETUNREACH)
- dev->stats.tx_carrier_errors++;
+ DEV_STATS_INC(dev, tx_carrier_errors);
dst_release(ndst);
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_ERRORS, 0);
kfree_skb(skb);
}
@@ -2600,7 +2627,7 @@ static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev,
return;
drop:
- dev->stats.tx_dropped++;
+ dev_core_stats_tx_dropped_inc(dev);
vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
@@ -2638,7 +2665,7 @@ static netdev_tx_t vxlan_xmit_nhid(struct sk_buff *skb, struct net_device *dev,
return NETDEV_TX_OK;
drop:
- dev->stats.tx_dropped++;
+ dev_core_stats_tx_dropped_inc(dev);
vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
@@ -2735,7 +2762,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
!is_multicast_ether_addr(eth->h_dest))
vxlan_fdb_miss(vxlan, eth->h_dest);
- dev->stats.tx_dropped++;
+ dev_core_stats_tx_dropped_inc(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
kfree_skb(skb);
@@ -3154,7 +3181,7 @@ static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -4542,7 +4569,7 @@ static struct net *vxlan_get_link_net(const struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- return vxlan->net;
+ return READ_ONCE(vxlan->net);
}
static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 31ab2136cdf1..67be9857c86c 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -180,7 +180,7 @@ config C101
config FARSYNC
tristate "FarSync T-Series support"
- depends on HDLC && PCI
+ depends on HDLC && PCI && HAS_IOPORT
help
Support for the FarSync T-Series X.21 (and V.35/V.24) cards by
FarSite Communications Ltd.
diff --git a/drivers/net/wan/fsl_qmc_hdlc.c b/drivers/net/wan/fsl_qmc_hdlc.c
index f69b1f579a0c..c5e7ca793c43 100644
--- a/drivers/net/wan/fsl_qmc_hdlc.c
+++ b/drivers/net/wan/fsl_qmc_hdlc.c
@@ -765,15 +765,13 @@ framer_exit:
return ret;
}
-static int qmc_hdlc_remove(struct platform_device *pdev)
+static void qmc_hdlc_remove(struct platform_device *pdev)
{
struct qmc_hdlc *qmc_hdlc = platform_get_drvdata(pdev);
unregister_hdlc_device(qmc_hdlc->netdev);
free_netdev(qmc_hdlc->netdev);
qmc_hdlc_framer_exit(qmc_hdlc);
-
- return 0;
}
static const struct of_device_id qmc_hdlc_id_table[] = {
@@ -788,7 +786,7 @@ static struct platform_driver qmc_hdlc_driver = {
.of_match_table = qmc_hdlc_id_table,
},
.probe = qmc_hdlc_probe,
- .remove = qmc_hdlc_remove,
+ .remove_new = qmc_hdlc_remove,
};
module_platform_driver(qmc_hdlc_driver);
diff --git a/drivers/net/wireguard/main.c b/drivers/net/wireguard/main.c
index ee4da9ab8013..a00671b58701 100644
--- a/drivers/net/wireguard/main.c
+++ b/drivers/net/wireguard/main.c
@@ -14,7 +14,7 @@
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/genetlink.h>
+#include <net/genetlink.h>
#include <net/rtnetlink.h>
static int __init wg_mod_init(void)
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 815f8f599f5d..5a55db349cb5 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1594,6 +1594,20 @@ static int ar5523_probe(struct usb_interface *intf,
struct ar5523 *ar;
int error = -ENOMEM;
+ static const u8 bulk_ep_addr[] = {
+ AR5523_CMD_TX_PIPE | USB_DIR_OUT,
+ AR5523_DATA_TX_PIPE | USB_DIR_OUT,
+ AR5523_CMD_RX_PIPE | USB_DIR_IN,
+ AR5523_DATA_RX_PIPE | USB_DIR_IN,
+ 0};
+
+ if (!usb_check_bulk_endpoints(intf, bulk_ep_addr)) {
+ dev_err(&dev->dev,
+ "Could not find all expected endpoints\n");
+ error = -ENODEV;
+ goto out;
+ }
+
/*
* Load firmware if the device requires it. This will return
* -ENXIO on success and we'll get called back afer the usb
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index f02a308a9ffc..34654f710d8a 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -171,8 +171,10 @@ struct ath_common {
unsigned int clockrate;
spinlock_t cc_lock;
- struct ath_cycle_counters cc_ani;
- struct ath_cycle_counters cc_survey;
+ struct_group(cc,
+ struct ath_cycle_counters cc_ani;
+ struct ath_cycle_counters cc_survey;
+ );
struct ath_regulatory regulatory;
struct ath_regulatory reg_world_copy;
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index a378bc48b1d2..f0441b3d7dcb 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -394,14 +394,14 @@ static irqreturn_t ath10k_ahb_interrupt_handler(int irq, void *arg)
if (!ath10k_pci_irq_pending(ar))
return IRQ_NONE;
- ath10k_pci_disable_and_clear_legacy_irq(ar);
+ ath10k_pci_disable_and_clear_intx_irq(ar);
ath10k_pci_irq_msi_fw_mask(ar);
napi_schedule(&ar->napi);
return IRQ_HANDLED;
}
-static int ath10k_ahb_request_irq_legacy(struct ath10k *ar)
+static int ath10k_ahb_request_irq_intx(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
@@ -415,12 +415,12 @@ static int ath10k_ahb_request_irq_legacy(struct ath10k *ar)
ar_ahb->irq, ret);
return ret;
}
- ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
+ ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_INTX;
return 0;
}
-static void ath10k_ahb_release_irq_legacy(struct ath10k *ar)
+static void ath10k_ahb_release_irq_intx(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
@@ -430,7 +430,7 @@ static void ath10k_ahb_release_irq_legacy(struct ath10k *ar)
static void ath10k_ahb_irq_disable(struct ath10k *ar)
{
ath10k_ce_disable_interrupts(ar);
- ath10k_pci_disable_and_clear_legacy_irq(ar);
+ ath10k_pci_disable_and_clear_intx_irq(ar);
}
static int ath10k_ahb_resource_init(struct ath10k *ar)
@@ -621,7 +621,7 @@ static int ath10k_ahb_hif_start(struct ath10k *ar)
ath10k_core_napi_enable(ar);
ath10k_ce_enable_interrupts(ar);
- ath10k_pci_enable_legacy_irq(ar);
+ ath10k_pci_enable_intx_irq(ar);
ath10k_pci_rx_post(ar);
@@ -775,7 +775,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
ath10k_pci_init_napi(ar);
- ret = ath10k_ahb_request_irq_legacy(ar);
+ ret = ath10k_ahb_request_irq_intx(ar);
if (ret)
goto err_free_pipes;
@@ -806,7 +806,7 @@ err_halt_device:
ath10k_ahb_clock_disable(ar);
err_free_irq:
- ath10k_ahb_release_irq_legacy(ar);
+ ath10k_ahb_release_irq_intx(ar);
err_free_pipes:
ath10k_pci_release_resource(ar);
@@ -828,7 +828,7 @@ static void ath10k_ahb_remove(struct platform_device *pdev)
ath10k_core_unregister(ar);
ath10k_ahb_irq_disable(ar);
- ath10k_ahb_release_irq_legacy(ar);
+ ath10k_ahb_release_irq_intx(ar);
ath10k_pci_release_resource(ar);
ath10k_ahb_halt_chip(ar);
ath10k_ahb_clock_disable(ar);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 9ce6f49ab261..bdf0552cd1c3 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -75,7 +75,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cal_data_len = 2116,
.fw = {
.dir = QCA988X_HW_2_0_FW_DIR,
- .board = QCA988X_HW_2_0_BOARD_DATA_FILE,
.board_size = QCA988X_BOARD_DATA_SZ,
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
},
@@ -116,7 +115,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cal_data_len = 2116,
.fw = {
.dir = QCA988X_HW_2_0_FW_DIR,
- .board = QCA988X_HW_2_0_BOARD_DATA_FILE,
.board_size = QCA988X_BOARD_DATA_SZ,
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
},
@@ -158,7 +156,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cal_data_len = 2116,
.fw = {
.dir = QCA9887_HW_1_0_FW_DIR,
- .board = QCA9887_HW_1_0_BOARD_DATA_FILE,
.board_size = QCA9887_BOARD_DATA_SZ,
.board_ext_size = QCA9887_BOARD_EXT_DATA_SZ,
},
@@ -199,7 +196,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cal_data_len = 0,
.fw = {
.dir = QCA6174_HW_3_0_FW_DIR,
- .board = QCA6174_HW_3_0_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
@@ -236,7 +232,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cal_data_len = 8124,
.fw = {
.dir = QCA6174_HW_2_1_FW_DIR,
- .board = QCA6174_HW_2_1_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
@@ -277,7 +272,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cal_data_len = 8124,
.fw = {
.dir = QCA6174_HW_2_1_FW_DIR,
- .board = QCA6174_HW_2_1_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
@@ -318,7 +312,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cal_data_len = 8124,
.fw = {
.dir = QCA6174_HW_3_0_FW_DIR,
- .board = QCA6174_HW_3_0_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
@@ -360,7 +353,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.fw = {
/* uses same binaries as hw3.0 */
.dir = QCA6174_HW_3_0_FW_DIR,
- .board = QCA6174_HW_3_0_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
@@ -409,7 +401,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cal_data_len = 12064,
.fw = {
.dir = QCA99X0_HW_2_0_FW_DIR,
- .board = QCA99X0_HW_2_0_BOARD_DATA_FILE,
.board_size = QCA99X0_BOARD_DATA_SZ,
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
},
@@ -457,8 +448,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cal_data_len = 12064,
.fw = {
.dir = QCA9984_HW_1_0_FW_DIR,
- .board = QCA9984_HW_1_0_BOARD_DATA_FILE,
- .eboard = QCA9984_HW_1_0_EBOARD_DATA_FILE,
.board_size = QCA99X0_BOARD_DATA_SZ,
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
.ext_board_size = QCA99X0_EXT_BOARD_DATA_SZ,
@@ -510,7 +499,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cal_data_len = 12064,
.fw = {
.dir = QCA9888_HW_2_0_FW_DIR,
- .board = QCA9888_HW_2_0_BOARD_DATA_FILE,
.board_size = QCA99X0_BOARD_DATA_SZ,
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
},
@@ -556,7 +544,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cal_data_len = 8124,
.fw = {
.dir = QCA9377_HW_1_0_FW_DIR,
- .board = QCA9377_HW_1_0_BOARD_DATA_FILE,
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
@@ -597,7 +584,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cal_data_len = 8124,
.fw = {
.dir = QCA9377_HW_1_0_FW_DIR,
- .board = QCA9377_HW_1_0_BOARD_DATA_FILE,
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
@@ -640,7 +626,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cal_data_len = 8124,
.fw = {
.dir = QCA9377_HW_1_0_FW_DIR,
- .board = QCA9377_HW_1_0_BOARD_DATA_FILE,
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
@@ -680,7 +665,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cal_data_len = 12064,
.fw = {
.dir = QCA4019_HW_1_0_FW_DIR,
- .board = QCA4019_HW_1_0_BOARD_DATA_FILE,
.board_size = QCA4019_BOARD_DATA_SZ,
.board_ext_size = QCA4019_BOARD_EXT_DATA_SZ,
},
@@ -720,6 +704,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.max_spatial_stream = 4,
.fw = {
.dir = WCN3990_HW_1_0_FW_DIR,
+ .board_size = WCN3990_BOARD_DATA_SZ,
+ .board_ext_size = WCN3990_BOARD_EXT_DATA_SZ,
},
.sw_decrypt_mcast_mgmt = true,
.rx_desc_ops = &wcn3990_rx_desc_ops,
@@ -942,11 +928,20 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
if (dir == NULL)
dir = ".";
+ if (ar->board_name) {
+ snprintf(filename, sizeof(filename), "%s/%s/%s",
+ dir, ar->board_name, file);
+ ret = firmware_request_nowarn(&fw, filename, ar->dev);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot fw request '%s': %d\n",
+ filename, ret);
+ if (!ret)
+ return fw;
+ }
+
snprintf(filename, sizeof(filename), "%s/%s", dir, file);
ret = firmware_request_nowarn(&fw, filename, ar->dev);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot fw request '%s': %d\n",
filename, ret);
-
if (ret)
return ERR_PTR(ret);
@@ -1288,11 +1283,6 @@ static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar, int bd_ie_type)
char boardname[100];
if (bd_ie_type == ATH10K_BD_IE_BOARD) {
- if (!ar->hw_params.fw.board) {
- ath10k_err(ar, "failed to find board file fw entry\n");
- return -EINVAL;
- }
-
scnprintf(boardname, sizeof(boardname), "board-%s-%s.bin",
ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
@@ -1302,7 +1292,7 @@ static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar, int bd_ie_type)
if (IS_ERR(ar->normal_mode_fw.board)) {
fw = ath10k_fetch_fw_file(ar,
ar->hw_params.fw.dir,
- ar->hw_params.fw.board);
+ ATH10K_BOARD_DATA_FILE);
ar->normal_mode_fw.board = fw;
}
@@ -1312,13 +1302,8 @@ static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar, int bd_ie_type)
ar->normal_mode_fw.board_data = ar->normal_mode_fw.board->data;
ar->normal_mode_fw.board_len = ar->normal_mode_fw.board->size;
} else if (bd_ie_type == ATH10K_BD_IE_BOARD_EXT) {
- if (!ar->hw_params.fw.eboard) {
- ath10k_err(ar, "failed to find eboard file fw entry\n");
- return -EINVAL;
- }
-
fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
- ar->hw_params.fw.eboard);
+ ATH10K_EBOARD_DATA_FILE);
ar->normal_mode_fw.ext_board = fw;
if (IS_ERR(ar->normal_mode_fw.ext_board))
return PTR_ERR(ar->normal_mode_fw.ext_board);
@@ -3673,11 +3658,13 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
INIT_WORK(&ar->set_coverage_class_work,
ath10k_core_set_coverage_class_work);
- init_dummy_netdev(&ar->napi_dev);
+ ar->napi_dev = alloc_netdev_dummy(0);
+ if (!ar->napi_dev)
+ goto err_free_tx_complete;
ret = ath10k_coredump_create(ar);
if (ret)
- goto err_free_tx_complete;
+ goto err_free_netdev;
ret = ath10k_debug_create(ar);
if (ret)
@@ -3687,6 +3674,8 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
err_free_coredump:
ath10k_coredump_destroy(ar);
+err_free_netdev:
+ free_netdev(ar->napi_dev);
err_free_tx_complete:
destroy_workqueue(ar->workqueue_tx_complete);
err_free_aux_wq:
@@ -3708,6 +3697,7 @@ void ath10k_core_destroy(struct ath10k *ar)
destroy_workqueue(ar->workqueue_tx_complete);
+ free_netdev(ar->napi_dev);
ath10k_debug_destroy(ar);
ath10k_coredump_destroy(ar);
ath10k_htt_tx_destroy(&ar->htt);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index c110d15528bd..b00099f0b24e 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -1081,6 +1081,8 @@ struct ath10k {
*/
const struct ath10k_fw_components *running_fw;
+ const char *board_name;
+
const struct firmware *pre_cal_file;
const struct firmware *cal_file;
@@ -1269,7 +1271,7 @@ struct ath10k {
struct ath10k_per_peer_tx_stats peer_tx_stats;
/* NAPI */
- struct net_device napi_dev;
+ struct net_device *napi_dev;
struct napi_struct napi;
struct work_struct set_coverage_class_work;
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index 394bf3c32abf..0f6de862c3a9 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -439,7 +439,7 @@ ath10k_dbg_sta_write_peer_debug_trigger(struct file *file,
}
out:
mutex_unlock(&ar->conf_mutex);
- return count;
+ return ret ?: count;
}
static const struct file_operations fops_peer_debug_trigger = {
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 93c073091996..48897e5eca06 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -39,14 +39,12 @@ enum ath10k_bus {
#define QCA988X_HW_2_0_VERSION 0x4100016c
#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
#define QCA988X_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA988X/hw2.0"
-#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
/* QCA9887 1.0 definitions */
#define QCA9887_HW_1_0_VERSION 0x4100016d
#define QCA9887_HW_1_0_CHIP_ID_REV 0
#define QCA9887_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9887/hw1.0"
-#define QCA9887_HW_1_0_BOARD_DATA_FILE "board.bin"
#define QCA9887_HW_1_0_PATCH_LOAD_ADDR 0x1234
/* QCA6174 target BMI version signatures */
@@ -85,11 +83,9 @@ enum qca9377_chip_id_rev {
};
#define QCA6174_HW_2_1_FW_DIR ATH10K_FW_DIR "/QCA6174/hw2.1"
-#define QCA6174_HW_2_1_BOARD_DATA_FILE "board.bin"
#define QCA6174_HW_2_1_PATCH_LOAD_ADDR 0x1234
#define QCA6174_HW_3_0_FW_DIR ATH10K_FW_DIR "/QCA6174/hw3.0"
-#define QCA6174_HW_3_0_BOARD_DATA_FILE "board.bin"
#define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234
/* QCA99X0 1.0 definitions (unsupported) */
@@ -99,7 +95,6 @@ enum qca9377_chip_id_rev {
#define QCA99X0_HW_2_0_DEV_VERSION 0x01000000
#define QCA99X0_HW_2_0_CHIP_ID_REV 0x1
#define QCA99X0_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA99X0/hw2.0"
-#define QCA99X0_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234
/* QCA9984 1.0 defines */
@@ -107,8 +102,6 @@ enum qca9377_chip_id_rev {
#define QCA9984_HW_DEV_TYPE 0xa
#define QCA9984_HW_1_0_CHIP_ID_REV 0x0
#define QCA9984_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9984/hw1.0"
-#define QCA9984_HW_1_0_BOARD_DATA_FILE "board.bin"
-#define QCA9984_HW_1_0_EBOARD_DATA_FILE "eboard.bin"
#define QCA9984_HW_1_0_PATCH_LOAD_ADDR 0x1234
/* QCA9888 2.0 defines */
@@ -116,18 +109,15 @@ enum qca9377_chip_id_rev {
#define QCA9888_HW_DEV_TYPE 0xc
#define QCA9888_HW_2_0_CHIP_ID_REV 0x0
#define QCA9888_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA9888/hw2.0"
-#define QCA9888_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA9888_HW_2_0_PATCH_LOAD_ADDR 0x1234
/* QCA9377 1.0 definitions */
#define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0"
-#define QCA9377_HW_1_0_BOARD_DATA_FILE "board.bin"
#define QCA9377_HW_1_0_PATCH_LOAD_ADDR 0x1234
/* QCA4019 1.0 definitions */
#define QCA4019_HW_1_0_DEV_VERSION 0x01000000
#define QCA4019_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA4019/hw1.0"
-#define QCA4019_HW_1_0_BOARD_DATA_FILE "board.bin"
#define QCA4019_HW_1_0_PATCH_LOAD_ADDR 0x1234
/* WCN3990 1.0 definitions */
@@ -159,7 +149,9 @@ enum qca9377_chip_id_rev {
#define ATH10K_FIRMWARE_MAGIC "QCA-ATH10K"
#define ATH10K_BOARD_MAGIC "QCA-ATH10K-BOARD"
+#define ATH10K_BOARD_DATA_FILE "board.bin"
#define ATH10K_BOARD_API2_FILE "board-2.bin"
+#define ATH10K_EBOARD_DATA_FILE "eboard.bin"
#define REG_DUMP_COUNT_QCA988X 60
@@ -553,9 +545,7 @@ struct ath10k_hw_params {
struct ath10k_hw_params_fw {
const char *dir;
- const char *board;
size_t board_size;
- const char *eboard;
size_t ext_board_size;
size_t board_ext_size;
} fw;
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 5c34b156b4ff..c52a16f8078f 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -721,7 +721,7 @@ bool ath10k_pci_irq_pending(struct ath10k *ar)
return false;
}
-void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
+void ath10k_pci_disable_and_clear_intx_irq(struct ath10k *ar)
{
/* IMPORTANT: INTR_CLR register has to be set after
* INTR_ENABLE is set to 0, otherwise interrupt can not be
@@ -739,7 +739,7 @@ void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
PCIE_INTR_ENABLE_ADDRESS);
}
-void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
+void ath10k_pci_enable_intx_irq(struct ath10k *ar)
{
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
PCIE_INTR_ENABLE_ADDRESS,
@@ -1935,7 +1935,7 @@ static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
static void ath10k_pci_irq_disable(struct ath10k *ar)
{
ath10k_ce_disable_interrupts(ar);
- ath10k_pci_disable_and_clear_legacy_irq(ar);
+ ath10k_pci_disable_and_clear_intx_irq(ar);
ath10k_pci_irq_msi_fw_mask(ar);
}
@@ -1949,7 +1949,7 @@ static void ath10k_pci_irq_sync(struct ath10k *ar)
static void ath10k_pci_irq_enable(struct ath10k *ar)
{
ath10k_ce_enable_interrupts(ar);
- ath10k_pci_enable_legacy_irq(ar);
+ ath10k_pci_enable_intx_irq(ar);
ath10k_pci_irq_msi_fw_unmask(ar);
}
@@ -3111,11 +3111,11 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
return IRQ_NONE;
}
- if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) &&
+ if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_INTX) &&
!ath10k_pci_irq_pending(ar))
return IRQ_NONE;
- ath10k_pci_disable_and_clear_legacy_irq(ar);
+ ath10k_pci_disable_and_clear_intx_irq(ar);
ath10k_pci_irq_msi_fw_mask(ar);
napi_schedule(&ar->napi);
@@ -3152,7 +3152,7 @@ static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
napi_schedule(ctx);
goto out;
}
- ath10k_pci_enable_legacy_irq(ar);
+ ath10k_pci_enable_intx_irq(ar);
ath10k_pci_irq_msi_fw_unmask(ar);
}
@@ -3177,7 +3177,7 @@ static int ath10k_pci_request_irq_msi(struct ath10k *ar)
return 0;
}
-static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
+static int ath10k_pci_request_irq_intx(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
@@ -3199,8 +3199,8 @@ static int ath10k_pci_request_irq(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
switch (ar_pci->oper_irq_mode) {
- case ATH10K_PCI_IRQ_LEGACY:
- return ath10k_pci_request_irq_legacy(ar);
+ case ATH10K_PCI_IRQ_INTX:
+ return ath10k_pci_request_irq_intx(ar);
case ATH10K_PCI_IRQ_MSI:
return ath10k_pci_request_irq_msi(ar);
default:
@@ -3217,7 +3217,7 @@ static void ath10k_pci_free_irq(struct ath10k *ar)
void ath10k_pci_init_napi(struct ath10k *ar)
{
- netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll);
+ netif_napi_add(ar->napi_dev, &ar->napi, ath10k_pci_napi_poll);
}
static int ath10k_pci_init_irq(struct ath10k *ar)
@@ -3232,7 +3232,7 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
ath10k_pci_irq_mode);
/* Try MSI */
- if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
+ if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_INTX) {
ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
ret = pci_enable_msi(ar_pci->pdev);
if (ret == 0)
@@ -3250,7 +3250,7 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
* For now, fix the race by repeating the write in below
* synchronization checking.
*/
- ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
+ ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_INTX;
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
@@ -3258,7 +3258,7 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
return 0;
}
-static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
+static void ath10k_pci_deinit_irq_intx(struct ath10k *ar)
{
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
0);
@@ -3269,8 +3269,8 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
switch (ar_pci->oper_irq_mode) {
- case ATH10K_PCI_IRQ_LEGACY:
- ath10k_pci_deinit_irq_legacy(ar);
+ case ATH10K_PCI_IRQ_INTX:
+ ath10k_pci_deinit_irq_intx(ar);
break;
default:
pci_disable_msi(ar_pci->pdev);
@@ -3307,14 +3307,14 @@ int ath10k_pci_wait_for_target_init(struct ath10k *ar)
if (val & FW_IND_INITIALIZED)
break;
- if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
+ if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_INTX)
/* Fix potential race by repeating CORE_BASE writes */
- ath10k_pci_enable_legacy_irq(ar);
+ ath10k_pci_enable_intx_irq(ar);
mdelay(10);
} while (time_before(jiffies, timeout));
- ath10k_pci_disable_and_clear_legacy_irq(ar);
+ ath10k_pci_disable_and_clear_intx_irq(ar);
ath10k_pci_irq_msi_fw_mask(ar);
if (val == 0xffffffff) {
@@ -3826,28 +3826,28 @@ MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
-MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
/* QCA9887 1.0 firmware files */
MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
-MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE);
MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
/* QCA6174 2.1 firmware files */
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
-MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_DATA_FILE);
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
/* QCA6174 3.1 firmware files */
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE);
-MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE);
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
/* QCA9377 1.0 firmware files */
MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API6_FILE);
MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
-MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 27bb4cf2dfea..4c3f536f2ea1 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -101,7 +101,7 @@ struct ath10k_pci_supp_chip {
enum ath10k_pci_irq_mode {
ATH10K_PCI_IRQ_AUTO = 0,
- ATH10K_PCI_IRQ_LEGACY = 1,
+ ATH10K_PCI_IRQ_INTX = 1,
ATH10K_PCI_IRQ_MSI = 2,
};
@@ -243,9 +243,9 @@ int ath10k_pci_init_pipes(struct ath10k *ar);
int ath10k_pci_init_config(struct ath10k *ar);
void ath10k_pci_rx_post(struct ath10k *ar);
void ath10k_pci_flush(struct ath10k *ar);
-void ath10k_pci_enable_legacy_irq(struct ath10k *ar);
+void ath10k_pci_enable_intx_irq(struct ath10k *ar);
bool ath10k_pci_irq_pending(struct ath10k *ar);
-void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar);
+void ath10k_pci_disable_and_clear_intx_irq(struct ath10k *ar);
void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar);
int ath10k_pci_wait_for_target_init(struct ath10k *ar);
int ath10k_pci_setup_resource(struct ath10k *ar);
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 0ab5433f6cf6..08a6f36a6be9 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -2532,7 +2532,7 @@ static int ath10k_sdio_probe(struct sdio_func *func,
return -ENOMEM;
}
- netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll);
+ netif_napi_add(ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll);
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
@@ -2667,29 +2667,10 @@ static struct sdio_driver ath10k_sdio_driver = {
.probe = ath10k_sdio_probe,
.remove = ath10k_sdio_remove,
.drv = {
- .owner = THIS_MODULE,
.pm = ATH10K_SDIO_PM_OPS,
},
};
-
-static int __init ath10k_sdio_init(void)
-{
- int ret;
-
- ret = sdio_register_driver(&ath10k_sdio_driver);
- if (ret)
- pr_err("sdio driver registration failed: %d\n", ret);
-
- return ret;
-}
-
-static void __exit ath10k_sdio_exit(void)
-{
- sdio_unregister_driver(&ath10k_sdio_driver);
-}
-
-module_init(ath10k_sdio_init);
-module_exit(ath10k_sdio_exit);
+module_sdio_driver(ath10k_sdio_driver);
MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN SDIO devices");
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 2c39bad7ebfb..8530550cf5df 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -935,7 +935,7 @@ static int ath10k_snoc_hif_start(struct ath10k *ar)
bitmap_clear(ar_snoc->pending_ce_irqs, 0, CE_COUNT_MAX);
- dev_set_threaded(&ar->napi_dev, true);
+ dev_set_threaded(ar->napi_dev, true);
ath10k_core_napi_enable(ar);
ath10k_snoc_irq_enable(ar);
ath10k_snoc_rx_post(ar);
@@ -1253,7 +1253,7 @@ static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
static void ath10k_snoc_init_napi(struct ath10k *ar)
{
- netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll);
+ netif_napi_add(ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll);
}
static int ath10k_snoc_request_irq(struct ath10k *ar)
@@ -1338,6 +1338,9 @@ static void ath10k_snoc_quirks_init(struct ath10k *ar)
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
struct device *dev = &ar_snoc->dev->dev;
+ /* ignore errors, keep NULL if there is no property */
+ of_property_read_string(dev->of_node, "firmware-name", &ar->board_name);
+
if (of_property_read_bool(dev->of_node, "qcom,snoc-host-cap-8bit-quirk"))
set_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags);
}
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index ec556bb88d65..ba37e6c7ced0 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -491,4 +491,7 @@ struct host_interest {
#define QCA4019_BOARD_DATA_SZ 12064
#define QCA4019_BOARD_EXT_DATA_SZ 0
+#define WCN3990_BOARD_DATA_SZ 26328
+#define WCN3990_BOARD_EXT_DATA_SZ 0
+
#endif /* __TARGADDRS_H__ */
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
index 31c8d7fbb095..8b15ec07b107 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.c
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -100,7 +100,7 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev,
spin_unlock_bh(&ar->data_lock);
/* display in millidegree celsius */
- ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
+ ret = sysfs_emit(buf, "%d\n", temperature * 1000);
out:
mutex_unlock(&ar->conf_mutex);
return ret;
diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
index 3c482baacec1..3b51b7f52130 100644
--- a/drivers/net/wireless/ath/ath10k/usb.c
+++ b/drivers/net/wireless/ath/ath10k/usb.c
@@ -1014,7 +1014,7 @@ static int ath10k_usb_probe(struct usb_interface *interface,
return -ENOMEM;
}
- netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_usb_napi_poll);
+ netif_napi_add(ar->napi_dev, &ar->napi, ath10k_usb_napi_poll);
usb_get_dev(dev);
vendor_id = le16_to_cpu(dev->descriptor.idVendor);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 2e9661f4bea8..80d255aaff1b 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1763,12 +1763,32 @@ void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
{
- unsigned long time_left;
+ unsigned long time_left, i;
time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
WMI_SERVICE_READY_TIMEOUT_HZ);
- if (!time_left)
- return -ETIMEDOUT;
+ if (!time_left) {
+ /* Sometimes the PCI HIF doesn't receive interrupt
+ * for the service ready message even if the buffer
+ * was completed. PCIe sniffer shows that it's
+ * because the corresponding CE ring doesn't fires
+ * it. Workaround here by polling CE rings once.
+ */
+ ath10k_warn(ar, "failed to receive service ready completion, polling..\n");
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_hif_send_complete_check(ar, i, 1);
+
+ time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left) {
+ ath10k_warn(ar, "polling timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ ath10k_warn(ar, "service ready completion received, continuing normally\n");
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile
index 2c94d50ae36f..43d2d8ddcdc0 100644
--- a/drivers/net/wireless/ath/ath11k/Makefile
+++ b/drivers/net/wireless/ath/ath11k/Makefile
@@ -18,7 +18,8 @@ ath11k-y += core.o \
dbring.o \
hw.o \
pcic.o \
- fw.o
+ fw.o \
+ p2p.o
ath11k-$(CONFIG_ATH11K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o
ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index 7c0a23517949..ca0f17ddebba 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -413,7 +413,7 @@ static int ath11k_ahb_power_up(struct ath11k_base *ab)
return ret;
}
-static void ath11k_ahb_power_down(struct ath11k_base *ab)
+static void ath11k_ahb_power_down(struct ath11k_base *ab, bool is_suspend)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
@@ -442,6 +442,7 @@ static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
netif_napi_del(&irq_grp->napi);
+ free_netdev(irq_grp->napi_ndev);
}
}
@@ -533,8 +534,12 @@ static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab)
irq_grp->ab = ab;
irq_grp->grp_id = i;
- init_dummy_netdev(&irq_grp->napi_ndev);
- netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
+
+ irq_grp->napi_ndev = alloc_netdev_dummy(0);
+ if (!irq_grp->napi_ndev)
+ return -ENOMEM;
+
+ netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
ath11k_ahb_ext_grp_napi_poll);
for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
@@ -1256,7 +1261,7 @@ static void ath11k_ahb_remove(struct platform_device *pdev)
struct ath11k_base *ab = platform_get_drvdata(pdev);
if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
- ath11k_ahb_power_down(ab);
+ ath11k_ahb_power_down(ab, false);
ath11k_debugfs_soc_destroy(ab);
ath11k_qmi_deinit_service(ab);
goto qmi_fail;
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index c78bce19bd75..3cc817a3b4a4 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -247,7 +247,10 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP),
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
.supports_monitor = false,
.full_monitor_mode = false,
.supports_shadow_regs = true,
@@ -416,7 +419,10 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP),
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
.supports_monitor = false,
.full_monitor_mode = false,
.supports_shadow_regs = true,
@@ -501,7 +507,10 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP),
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
.supports_monitor = false,
.supports_shadow_regs = true,
.idle_ps = true,
@@ -750,7 +759,10 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP),
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
.supports_monitor = false,
.full_monitor_mode = false,
.supports_shadow_regs = true,
@@ -894,12 +906,6 @@ int ath11k_core_suspend(struct ath11k_base *ab)
return ret;
}
- ret = ath11k_wow_enable(ab);
- if (ret) {
- ath11k_warn(ab, "failed to enable wow during suspend: %d\n", ret);
- return ret;
- }
-
ret = ath11k_dp_rx_pktlog_stop(ab, false);
if (ret) {
ath11k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n",
@@ -910,29 +916,85 @@ int ath11k_core_suspend(struct ath11k_base *ab)
ath11k_ce_stop_shadow_timers(ab);
ath11k_dp_stop_shadow_timers(ab);
+ /* PM framework skips suspend_late/resume_early callbacks
+ * if other devices report errors in their suspend callbacks.
+ * However ath11k_core_resume() would still be called because
+ * here we return success thus kernel put us on dpm_suspended_list.
+ * Since we won't go through a power down/up cycle, there is
+ * no chance to call complete(&ab->restart_completed) in
+ * ath11k_core_restart(), making ath11k_core_resume() timeout.
+ * So call it here to avoid this issue. This also works in case
+ * no error happens thus suspend_late/resume_early get called,
+ * because it will be reinitialized in ath11k_core_resume_early().
+ */
+ complete(&ab->restart_completed);
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_core_suspend);
+
+int ath11k_core_suspend_late(struct ath11k_base *ab)
+{
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar;
+
+ if (!ab->hw_params.supports_suspend)
+ return -EOPNOTSUPP;
+
+ /* so far single_pdev_only chips have supports_suspend as true
+ * and only the first pdev is valid.
+ */
+ pdev = ath11k_core_get_single_pdev(ab);
+ ar = pdev->ar;
+ if (!ar || ar->state != ATH11K_STATE_OFF)
+ return 0;
+
ath11k_hif_irq_disable(ab);
ath11k_hif_ce_irq_disable(ab);
- ret = ath11k_hif_suspend(ab);
- if (ret) {
- ath11k_warn(ab, "failed to suspend hif: %d\n", ret);
- return ret;
- }
+ ath11k_hif_power_down(ab, true);
return 0;
}
-EXPORT_SYMBOL(ath11k_core_suspend);
+EXPORT_SYMBOL(ath11k_core_suspend_late);
+
+int ath11k_core_resume_early(struct ath11k_base *ab)
+{
+ int ret;
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar;
+
+ if (!ab->hw_params.supports_suspend)
+ return -EOPNOTSUPP;
+
+ /* so far single_pdev_only chips have supports_suspend as true
+ * and only the first pdev is valid.
+ */
+ pdev = ath11k_core_get_single_pdev(ab);
+ ar = pdev->ar;
+ if (!ar || ar->state != ATH11K_STATE_OFF)
+ return 0;
+
+ reinit_completion(&ab->restart_completed);
+ ret = ath11k_hif_power_up(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to power up hif during resume: %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath11k_core_resume_early);
int ath11k_core_resume(struct ath11k_base *ab)
{
int ret;
struct ath11k_pdev *pdev;
struct ath11k *ar;
+ long time_left;
if (!ab->hw_params.supports_suspend)
return -EOPNOTSUPP;
- /* so far signle_pdev_only chips have supports_suspend as true
+ /* so far single_pdev_only chips have supports_suspend as true
* and only the first pdev is valid.
*/
pdev = ath11k_core_get_single_pdev(ab);
@@ -940,29 +1002,19 @@ int ath11k_core_resume(struct ath11k_base *ab)
if (!ar || ar->state != ATH11K_STATE_OFF)
return 0;
- ret = ath11k_hif_resume(ab);
- if (ret) {
- ath11k_warn(ab, "failed to resume hif during resume: %d\n", ret);
- return ret;
+ time_left = wait_for_completion_timeout(&ab->restart_completed,
+ ATH11K_RESET_TIMEOUT_HZ);
+ if (time_left == 0) {
+ ath11k_warn(ab, "timeout while waiting for restart complete");
+ return -ETIMEDOUT;
}
- ath11k_hif_ce_irq_enable(ab);
- ath11k_hif_irq_enable(ab);
-
ret = ath11k_dp_rx_pktlog_start(ab);
- if (ret) {
+ if (ret)
ath11k_warn(ab, "failed to start rx pktlog during resume: %d\n",
ret);
- return ret;
- }
- ret = ath11k_wow_wakeup(ab);
- if (ret) {
- ath11k_warn(ab, "failed to wakeup wow during resume: %d\n", ret);
- return ret;
- }
-
- return 0;
+ return ret;
}
EXPORT_SYMBOL(ath11k_core_resume);
@@ -2060,6 +2112,8 @@ static void ath11k_core_restart(struct work_struct *work)
if (!ab->is_reset)
ath11k_core_post_reconfigure_recovery(ab);
+
+ complete(&ab->restart_completed);
}
static void ath11k_core_reset(struct work_struct *work)
@@ -2129,7 +2183,7 @@ static void ath11k_core_reset(struct work_struct *work)
ath11k_hif_irq_disable(ab);
ath11k_hif_ce_irq_disable(ab);
- ath11k_hif_power_down(ab);
+ ath11k_hif_power_down(ab, false);
ath11k_hif_power_up(ab);
ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset started\n");
@@ -2202,7 +2256,7 @@ void ath11k_core_deinit(struct ath11k_base *ab)
mutex_unlock(&ab->core_lock);
- ath11k_hif_power_down(ab);
+ ath11k_hif_power_down(ab, false);
ath11k_mac_destroy(ab);
ath11k_core_soc_destroy(ab);
ath11k_fw_destroy(ab);
@@ -2255,6 +2309,7 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);
init_completion(&ab->wow.wakeup_completed);
+ init_completion(&ab->restart_completed);
ab->dev = dev;
ab->hif.bus = bus;
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index b3fb74a226fb..205f40ee6b66 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_CORE_H
@@ -174,7 +174,7 @@ struct ath11k_ext_irq_grp {
u64 timestamp;
bool napi_enabled;
struct napi_struct napi;
- struct net_device napi_ndev;
+ struct net_device *napi_ndev;
};
enum ath11k_smbios_cc_type {
@@ -1033,6 +1033,8 @@ struct ath11k_base {
DECLARE_BITMAP(fw_features, ATH11K_FW_FEATURE_COUNT);
} fw;
+ struct completion restart_completed;
+
#ifdef CONFIG_NL80211_TESTMODE
struct {
u32 data_pos;
@@ -1232,8 +1234,10 @@ void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd);
int ath11k_core_check_dt(struct ath11k_base *ath11k);
int ath11k_core_check_smbios(struct ath11k_base *ab);
void ath11k_core_halt(struct ath11k *ar);
+int ath11k_core_resume_early(struct ath11k_base *ab);
int ath11k_core_resume(struct ath11k_base *ab);
int ath11k_core_suspend(struct ath11k_base *ab);
+int ath11k_core_suspend_late(struct ath11k_base *ab);
void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab);
bool ath11k_core_coldboot_cal_support(struct ath11k_base *ab);
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index a48e737ef35d..414a5ce279f7 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
@@ -980,7 +980,7 @@ int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab,
&fops_simulate_fw_crash);
- debugfs_create_file("soc_dp_stats", 0600, ab->debugfs_soc, ab,
+ debugfs_create_file("soc_dp_stats", 0400, ab->debugfs_soc, ab,
&fops_soc_dp_stats);
if (ab->hw_params.sram_dump.start != 0)
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index 65e8f244ebb9..e453c137385e 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -664,7 +664,7 @@ struct hal_srng_config {
};
/**
- * enum hal_rx_buf_return_buf_manager
+ * enum hal_rx_buf_return_buf_manager - manager for returned rx buffers
*
* @HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST: Buffer returned to WBM idle buffer list
* @HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST: Descriptor returned to WBM idle
diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h
index 877a4073fed6..c4c6cc09c7c1 100644
--- a/drivers/net/wireless/ath/ath11k/hif.h
+++ b/drivers/net/wireless/ath/ath11k/hif.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _HIF_H_
@@ -18,7 +18,7 @@ struct ath11k_hif_ops {
int (*start)(struct ath11k_base *ab);
void (*stop)(struct ath11k_base *ab);
int (*power_up)(struct ath11k_base *ab);
- void (*power_down)(struct ath11k_base *ab);
+ void (*power_down)(struct ath11k_base *ab, bool is_suspend);
int (*suspend)(struct ath11k_base *ab);
int (*resume)(struct ath11k_base *ab);
int (*map_service_to_pipe)(struct ath11k_base *ab, u16 service_id,
@@ -67,12 +67,18 @@ static inline void ath11k_hif_irq_disable(struct ath11k_base *ab)
static inline int ath11k_hif_power_up(struct ath11k_base *ab)
{
+ if (!ab->hif.ops->power_up)
+ return -EOPNOTSUPP;
+
return ab->hif.ops->power_up(ab);
}
-static inline void ath11k_hif_power_down(struct ath11k_base *ab)
+static inline void ath11k_hif_power_down(struct ath11k_base *ab, bool is_suspend)
{
- ab->hif.ops->power_down(ab);
+ if (!ab->hif.ops->power_down)
+ return;
+
+ ab->hif.ops->power_down(ab, is_suspend);
}
static inline int ath11k_hif_suspend(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index a6a37d67a50a..4f62e38ba48b 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1231,14 +1231,7 @@ static int ath11k_mac_vif_setup_ps(struct ath11k_vif *arvif)
enable_ps = arvif->ps;
- if (!arvif->is_started) {
- /* mac80211 can update vif powersave state while disconnected.
- * Firmware doesn't behave nicely and consumes more power than
- * necessary if PS is disabled on a non-started vdev. Hence
- * force-enable PS for non-running vdevs.
- */
- psmode = WMI_STA_PS_MODE_ENABLED;
- } else if (enable_ps) {
+ if (enable_ps) {
psmode = WMI_STA_PS_MODE_ENABLED;
param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
@@ -1430,10 +1423,67 @@ static bool ath11k_mac_set_nontx_vif_params(struct ath11k_vif *tx_arvif,
return false;
}
-static void ath11k_mac_set_vif_params(struct ath11k_vif *arvif,
- struct sk_buff *bcn)
+static int ath11k_mac_setup_bcn_p2p_ie(struct ath11k_vif *arvif,
+ struct sk_buff *bcn)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ieee80211_mgmt *mgmt;
+ const u8 *p2p_ie;
+ int ret;
+
+ mgmt = (void *)bcn->data;
+ p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
+ mgmt->u.beacon.variable,
+ bcn->len - (mgmt->u.beacon.variable -
+ bcn->data));
+ if (!p2p_ie)
+ return -ENOENT;
+
+ ret = ath11k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit P2P GO bcn ie for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ath11k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
+ u8 oui_type, size_t ie_offset)
{
+ size_t len;
+ const u8 *next, *end;
+ u8 *ie;
+
+ if (WARN_ON(skb->len < ie_offset))
+ return -EINVAL;
+
+ ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
+ skb->data + ie_offset,
+ skb->len - ie_offset);
+ if (!ie)
+ return -ENOENT;
+
+ len = ie[1] + 2;
+ end = skb->data + skb->len;
+ next = ie + len;
+
+ if (WARN_ON(next > end))
+ return -EINVAL;
+
+ memmove(ie, next, end - next);
+ skb_trim(skb, skb->len - len);
+
+ return 0;
+}
+
+static int ath11k_mac_set_vif_params(struct ath11k_vif *arvif,
+ struct sk_buff *bcn)
+{
+ struct ath11k_base *ab = arvif->ar->ab;
struct ieee80211_mgmt *mgmt;
+ int ret = 0;
u8 *ies;
ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn);
@@ -1451,6 +1501,32 @@ static void ath11k_mac_set_vif_params(struct ath11k_vif *arvif,
arvif->wpaie_present = true;
else
arvif->wpaie_present = false;
+
+ if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+ return ret;
+
+ ret = ath11k_mac_setup_bcn_p2p_ie(arvif, bcn);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup P2P GO bcn ie: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* P2P IE is inserted by firmware automatically (as
+ * configured above) so remove it from the base beacon
+ * template to avoid duplicate P2P IEs in beacon frames.
+ */
+ ret = ath11k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA,
+ WLAN_OUI_TYPE_WFA_P2P,
+ offsetof(struct ieee80211_mgmt,
+ u.beacon.variable));
+ if (ret) {
+ ath11k_warn(ab, "failed to remove P2P vendor ie: %d\n",
+ ret);
+ return ret;
+ }
+
+ return ret;
}
static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif)
@@ -1472,10 +1548,12 @@ static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif)
return -EPERM;
}
- if (tx_arvif == arvif)
- ath11k_mac_set_vif_params(tx_arvif, beacons->bcn[0].skb);
- else
+ if (tx_arvif == arvif) {
+ if (ath11k_mac_set_vif_params(tx_arvif, beacons->bcn[0].skb))
+ return -EINVAL;
+ } else {
arvif->wpaie_present = tx_arvif->wpaie_present;
+ }
for (i = 0; i < beacons->cnt; i++) {
if (tx_arvif != arvif && !nontx_vif_params_set)
@@ -1534,10 +1612,12 @@ static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif)
return -EPERM;
}
- if (tx_arvif == arvif)
- ath11k_mac_set_vif_params(tx_arvif, bcn);
- else if (!ath11k_mac_set_nontx_vif_params(tx_arvif, arvif, bcn))
+ if (tx_arvif == arvif) {
+ if (ath11k_mac_set_vif_params(tx_arvif, bcn))
+ return -EINVAL;
+ } else if (!ath11k_mac_set_nontx_vif_params(tx_arvif, arvif, bcn)) {
return -EINVAL;
+ }
ret = ath11k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn, 0);
kfree_skb(bcn);
@@ -1579,7 +1659,7 @@ void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif)
if (vif->bss_conf.color_change_active &&
ieee80211_beacon_cntdwn_is_complete(vif, 0)) {
arvif->bcca_zero_sent = true;
- ieee80211_color_change_finish(vif);
+ ieee80211_color_change_finish(vif, 0);
return;
}
@@ -3996,6 +4076,9 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
arg->vdev_id = arvif->vdev_id;
arg->scan_id = ATH11K_SCAN_ID;
+ if (ar->ab->hw_params.single_pdev_only)
+ arg->scan_f_filter_prb_req = 1;
+
if (req->ie_len) {
arg->extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL);
if (!arg->extraie.ptr) {
@@ -6570,17 +6653,26 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_STATION:
arvif->vdev_type = WMI_VDEV_TYPE_STA;
+ if (vif->p2p)
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT;
break;
case NL80211_IFTYPE_MESH_POINT:
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
fallthrough;
case NL80211_IFTYPE_AP:
arvif->vdev_type = WMI_VDEV_TYPE_AP;
+ if (vif->p2p)
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO;
break;
case NL80211_IFTYPE_MONITOR:
arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
ar->monitor_vdev_id = bit;
break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ arvif->vdev_type = WMI_VDEV_TYPE_STA;
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
+ break;
+
default:
WARN_ON(1);
break;
@@ -9020,6 +9112,7 @@ static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
offload = &arvif->arp_ns_offload;
count = 0;
+ /* Note: read_lock_bh() calls rcu_read_lock() */
read_lock_bh(&idev->lock);
memset(offload->ipv6_addr, 0, sizeof(offload->ipv6_addr));
@@ -9050,7 +9143,8 @@ static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
}
/* get anycast address */
- for (ifaca6 = idev->ac_list; ifaca6; ifaca6 = ifaca6->aca_next) {
+ for (ifaca6 = rcu_dereference(idev->ac_list); ifaca6;
+ ifaca6 = rcu_dereference(ifaca6->aca_next)) {
if (count >= ATH11K_IPV6_MAX_COUNT)
goto generate;
@@ -9253,9 +9347,11 @@ static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
arg->dwell_time_passive = scan_time_msec;
arg->max_scan_time = scan_time_msec;
arg->scan_f_passive = 1;
- arg->scan_f_filter_prb_req = 1;
arg->burst_duration = duration;
+ if (!ar->ab->hw_params.single_pdev_only)
+ arg->scan_f_filter_prb_req = 1;
+
ret = ath11k_start_scan(ar, arg);
if (ret) {
ath11k_warn(ar->ab, "failed to start roc scan: %d\n", ret);
@@ -9857,12 +9953,18 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
struct ieee80211_iface_combination *combinations;
struct ieee80211_iface_limit *limits;
int n_limits;
+ bool p2p;
+
+ p2p = ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_P2P_DEVICE);
combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
if (!combinations)
return -ENOMEM;
- n_limits = 2;
+ if (p2p)
+ n_limits = 3;
+ else
+ n_limits = 2;
limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL);
if (!limits) {
@@ -9870,39 +9972,29 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
return -ENOMEM;
}
+ limits[0].types |= BIT(NL80211_IFTYPE_STATION);
+ limits[1].types |= BIT(NL80211_IFTYPE_AP);
+ if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
+ ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
+ limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
+
+ combinations[0].limits = limits;
+ combinations[0].n_limits = n_limits;
+ combinations[0].beacon_int_infra_match = true;
+ combinations[0].beacon_int_min_gcd = 100;
+
if (ab->hw_params.support_dual_stations) {
limits[0].max = 2;
- limits[0].types |= BIT(NL80211_IFTYPE_STATION);
-
limits[1].max = 1;
- limits[1].types |= BIT(NL80211_IFTYPE_AP);
- if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
- ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
- limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
- combinations[0].limits = limits;
- combinations[0].n_limits = 2;
combinations[0].max_interfaces = ab->hw_params.num_vdevs;
combinations[0].num_different_channels = 2;
- combinations[0].beacon_int_infra_match = true;
- combinations[0].beacon_int_min_gcd = 100;
} else {
limits[0].max = 1;
- limits[0].types |= BIT(NL80211_IFTYPE_STATION);
-
limits[1].max = 16;
- limits[1].types |= BIT(NL80211_IFTYPE_AP);
-
- if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
- ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
- limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
- combinations[0].limits = limits;
- combinations[0].n_limits = 2;
combinations[0].max_interfaces = 16;
combinations[0].num_different_channels = 1;
- combinations[0].beacon_int_infra_match = true;
- combinations[0].beacon_int_min_gcd = 100;
combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
@@ -9911,6 +10003,13 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
BIT(NL80211_CHAN_WIDTH_160);
}
+ if (p2p) {
+ limits[1].types |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
+ limits[2].max = 1;
+ limits[2].types |= BIT(NL80211_IFTYPE_P2P_DEVICE);
+ }
+
ar->hw->wiphy->iface_combinations = combinations;
ar->hw->wiphy->n_iface_combinations = 1;
@@ -10027,6 +10126,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
if (ret)
goto err;
+ wiphy_read_of_freq_limits(ar->hw->wiphy);
ath11k_mac_setup_ht_vht_cap(ar, cap, &ht_cap);
ath11k_mac_setup_he_cap(ar, cap);
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index fb4ecf9a103e..ab182690aed3 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -19,6 +19,7 @@
#define MHI_TIMEOUT_DEFAULT_MS 20000
#define RDDM_DUMP_SIZE 0x420000
+#define MHI_CB_INVALID 0xff
static const struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
{
@@ -158,9 +159,8 @@ void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab)
ath11k_dbg(ab, ATH11K_DBG_PCI, "mhistatus 0x%x\n", val);
- /* Observed on QCA6390 that after SOC_GLOBAL_RESET, MHISTATUS
- * has SYSERR bit set and thus need to set MHICTRL_RESET
- * to clear SYSERR.
+ /* After SOC_GLOBAL_RESET, MHISTATUS may still have SYSERR bit set
+ * and thus need to set MHICTRL_RESET to clear SYSERR.
*/
ath11k_pcic_write32(ab, MHICTRL, MHICTRL_RESET_MASK);
@@ -269,6 +269,7 @@ static void ath11k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
enum mhi_callback cb)
{
struct ath11k_base *ab = dev_get_drvdata(mhi_cntrl->cntrl_dev);
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
ath11k_dbg(ab, ATH11K_DBG_BOOT, "notify status reason %s\n",
ath11k_mhi_op_callback_to_str(cb));
@@ -279,12 +280,21 @@ static void ath11k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
break;
case MHI_CB_EE_RDDM:
ath11k_warn(ab, "firmware crashed: MHI_CB_EE_RDDM\n");
+ if (ab_pci->mhi_pre_cb == MHI_CB_EE_RDDM) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "do not queue again for consecutive RDDM event\n");
+ break;
+ }
+
if (!(test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags)))
queue_work(ab->workqueue_aux, &ab->reset_work);
+
break;
default:
break;
}
+
+ ab_pci->mhi_pre_cb = cb;
}
static int ath11k_mhi_op_read_reg(struct mhi_controller *mhi_cntrl,
@@ -397,6 +407,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
goto free_controller;
}
+ ab_pci->mhi_pre_cb = MHI_CB_INVALID;
ret = mhi_register_controller(mhi_ctrl, ath11k_mhi_config);
if (ret) {
ath11k_err(ab, "failed to register to mhi bus, err = %d\n", ret);
@@ -442,9 +453,17 @@ int ath11k_mhi_start(struct ath11k_pci *ab_pci)
return 0;
}
-void ath11k_mhi_stop(struct ath11k_pci *ab_pci)
+void ath11k_mhi_stop(struct ath11k_pci *ab_pci, bool is_suspend)
{
- mhi_power_down(ab_pci->mhi_ctrl, true);
+ /* During suspend we need to use mhi_power_down_keep_dev()
+ * workaround, otherwise ath11k_core_resume() will timeout
+ * during resume.
+ */
+ if (is_suspend)
+ mhi_power_down_keep_dev(ab_pci->mhi_ctrl, true);
+ else
+ mhi_power_down(ab_pci->mhi_ctrl, true);
+
mhi_unprepare_after_power_down(ab_pci->mhi_ctrl);
}
diff --git a/drivers/net/wireless/ath/ath11k/mhi.h b/drivers/net/wireless/ath/ath11k/mhi.h
index f81fba2644a4..2d567705e732 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.h
+++ b/drivers/net/wireless/ath/ath11k/mhi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_MHI_H
#define _ATH11K_MHI_H
@@ -18,7 +18,7 @@
#define MHICTRL_RESET_MASK 0x2
int ath11k_mhi_start(struct ath11k_pci *ar_pci);
-void ath11k_mhi_stop(struct ath11k_pci *ar_pci);
+void ath11k_mhi_stop(struct ath11k_pci *ar_pci, bool is_suspend);
int ath11k_mhi_register(struct ath11k_pci *ar_pci);
void ath11k_mhi_unregister(struct ath11k_pci *ar_pci);
void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab);
@@ -26,5 +26,4 @@ void ath11k_mhi_clear_vector(struct ath11k_base *ab);
int ath11k_mhi_suspend(struct ath11k_pci *ar_pci);
int ath11k_mhi_resume(struct ath11k_pci *ar_pci);
-
#endif
diff --git a/drivers/net/wireless/ath/ath11k/p2p.c b/drivers/net/wireless/ath/ath11k/p2p.c
new file mode 100644
index 000000000000..01e14523f1fe
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/p2p.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "wmi.h"
+#include "mac.h"
+#include "p2p.h"
+
+static void ath11k_p2p_noa_ie_fill(u8 *data, size_t len,
+ const struct ath11k_wmi_p2p_noa_info *noa)
+{
+ struct ieee80211_p2p_noa_attr *noa_attr;
+ u8 noa_descriptors, ctwindow;
+ bool oppps;
+ __le16 *noa_attr_len;
+ u16 attr_len;
+ int i;
+
+ ctwindow = u32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_CTWIN_TU);
+ oppps = u32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_OPP_PS);
+ noa_descriptors = u32_get_bits(noa->noa_attr,
+ WMI_P2P_NOA_INFO_DESC_NUM);
+
+ /* P2P IE */
+ data[0] = WLAN_EID_VENDOR_SPECIFIC;
+ data[1] = len - 2;
+ data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
+ data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
+ data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
+ data[5] = WLAN_OUI_TYPE_WFA_P2P;
+
+ /* NOA ATTR */
+ data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
+ noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
+ noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
+
+ noa_attr->index = u32_get_bits(noa->noa_attr,
+ WMI_P2P_NOA_INFO_INDEX);
+ noa_attr->oppps_ctwindow = ctwindow;
+ if (oppps)
+ noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
+
+ for (i = 0; i < noa_descriptors; i++) {
+ noa_attr->desc[i].count = noa->descriptors[i].type_count;
+ noa_attr->desc[i].duration =
+ cpu_to_le32(noa->descriptors[i].duration);
+ noa_attr->desc[i].interval =
+ cpu_to_le32(noa->descriptors[i].interval);
+ noa_attr->desc[i].start_time =
+ cpu_to_le32(noa->descriptors[i].start_time);
+ }
+
+ attr_len = 2; /* index + oppps_ctwindow */
+ attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+ *noa_attr_len = __cpu_to_le16(attr_len);
+}
+
+static size_t
+ath11k_p2p_noa_ie_len_compute(const struct ath11k_wmi_p2p_noa_info *noa)
+{
+ size_t len = 0;
+ u8 noa_descriptors = u32_get_bits(noa->noa_attr,
+ WMI_P2P_NOA_INFO_DESC_NUM);
+
+ if (!(noa_descriptors) &&
+ !(u32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_OPP_PS)))
+ return 0;
+
+ len += 1 + 1 + 4; /* EID + len + OUI */
+ len += 1 + 2; /* noa attr + attr len */
+ len += 1 + 1; /* index + oppps_ctwindow */
+ len += noa_descriptors *
+ sizeof(struct ieee80211_p2p_noa_desc);
+
+ return len;
+}
+
+static void ath11k_p2p_noa_ie_assign(struct ath11k_vif *arvif, void *ie,
+ size_t len)
+{
+ struct ath11k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ kfree(arvif->u.ap.noa_data);
+
+ arvif->u.ap.noa_data = ie;
+ arvif->u.ap.noa_len = len;
+}
+
+static void __ath11k_p2p_noa_update(struct ath11k_vif *arvif,
+ const struct ath11k_wmi_p2p_noa_info *noa)
+{
+ struct ath11k *ar = arvif->ar;
+ void *ie;
+ size_t len;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ ath11k_p2p_noa_ie_assign(arvif, NULL, 0);
+
+ len = ath11k_p2p_noa_ie_len_compute(noa);
+ if (!len)
+ return;
+
+ ie = kmalloc(len, GFP_ATOMIC);
+ if (!ie)
+ return;
+
+ ath11k_p2p_noa_ie_fill(ie, len, noa);
+ ath11k_p2p_noa_ie_assign(arvif, ie, len); }
+
+void ath11k_p2p_noa_update(struct ath11k_vif *arvif,
+ const struct ath11k_wmi_p2p_noa_info *noa)
+{
+ struct ath11k *ar = arvif->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ __ath11k_p2p_noa_update(arvif, noa);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath11k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_p2p_noa_arg *arg = data;
+
+ if (arvif->vdev_id != arg->vdev_id)
+ return;
+
+ ath11k_p2p_noa_update(arvif, arg->noa);
+}
+
+void ath11k_p2p_noa_update_by_vdev_id(struct ath11k *ar, u32 vdev_id,
+ const struct ath11k_wmi_p2p_noa_info *noa)
+{
+ struct ath11k_p2p_noa_arg arg = {
+ .vdev_id = vdev_id,
+ .noa = noa,
+ };
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath11k_p2p_noa_update_vdev_iter,
+ &arg);
+}
diff --git a/drivers/net/wireless/ath/ath11k/p2p.h b/drivers/net/wireless/ath/ath11k/p2p.h
new file mode 100644
index 000000000000..d907940a9b09
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/p2p.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_P2P_H
+#define ATH11K_P2P_H
+
+#include "wmi.h"
+
+struct ath11k_wmi_p2p_noa_info;
+
+struct ath11k_p2p_noa_arg {
+ u32 vdev_id;
+ const struct ath11k_wmi_p2p_noa_info *noa;
+};
+
+void ath11k_p2p_noa_update(struct ath11k_vif *arvif,
+ const struct ath11k_wmi_p2p_noa_info *noa);
+void ath11k_p2p_noa_update_by_vdev_id(struct ath11k *ar, u32 vdev_id,
+ const struct ath11k_wmi_p2p_noa_info *noa);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index be9d2c69cc41..8d63b84d1261 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -638,7 +638,7 @@ static int ath11k_pci_power_up(struct ath11k_base *ab)
return 0;
}
-static void ath11k_pci_power_down(struct ath11k_base *ab)
+static void ath11k_pci_power_down(struct ath11k_base *ab, bool is_suspend)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
@@ -649,7 +649,7 @@ static void ath11k_pci_power_down(struct ath11k_base *ab)
ath11k_pci_msi_disable(ab_pci);
- ath11k_mhi_stop(ab_pci);
+ ath11k_mhi_stop(ab_pci, is_suspend);
clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
ath11k_pci_sw_reset(ab_pci->ab, false);
}
@@ -970,7 +970,7 @@ static void ath11k_pci_remove(struct pci_dev *pdev)
ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
- ath11k_pci_power_down(ab);
+ ath11k_pci_power_down(ab, false);
ath11k_debugfs_soc_destroy(ab);
ath11k_qmi_deinit_service(ab);
goto qmi_fail;
@@ -998,7 +998,7 @@ static void ath11k_pci_shutdown(struct pci_dev *pdev)
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
- ath11k_pci_power_down(ab);
+ ath11k_pci_power_down(ab, false);
}
static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev)
@@ -1035,9 +1035,39 @@ static __maybe_unused int ath11k_pci_pm_resume(struct device *dev)
return ret;
}
-static SIMPLE_DEV_PM_OPS(ath11k_pci_pm_ops,
- ath11k_pci_pm_suspend,
- ath11k_pci_pm_resume);
+static __maybe_unused int ath11k_pci_pm_suspend_late(struct device *dev)
+{
+ struct ath11k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath11k_core_suspend_late(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to late suspend core: %d\n", ret);
+
+ /* Similar to ath11k_pci_pm_suspend(), we return success here
+ * even error happens, to allow system suspend/hibernation survive.
+ */
+ return 0;
+}
+
+static __maybe_unused int ath11k_pci_pm_resume_early(struct device *dev)
+{
+ struct ath11k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath11k_core_resume_early(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to early resume core: %d\n", ret);
+
+ return ret;
+}
+
+static const struct dev_pm_ops __maybe_unused ath11k_pci_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ath11k_pci_pm_suspend,
+ ath11k_pci_pm_resume)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(ath11k_pci_pm_suspend_late,
+ ath11k_pci_pm_resume_early)
+};
static struct pci_driver ath11k_pci_driver = {
.name = "ath11k_pci",
diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h
index 6be73333d90b..c33c7865145c 100644
--- a/drivers/net/wireless/ath/ath11k/pci.h
+++ b/drivers/net/wireless/ath/ath11k/pci.h
@@ -64,6 +64,7 @@ struct ath11k_pci {
char amss_path[100];
struct mhi_controller *mhi_ctrl;
const struct ath11k_msi_config *msi_config;
+ enum mhi_callback mhi_pre_cb;
u32 register_window;
/* protects register_window above */
diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c
index add4db4c50bc..79eb3f9c902f 100644
--- a/drivers/net/wireless/ath/ath11k/pcic.c
+++ b/drivers/net/wireless/ath/ath11k/pcic.c
@@ -316,6 +316,7 @@ static void ath11k_pcic_free_ext_irq(struct ath11k_base *ab)
free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
netif_napi_del(&irq_grp->napi);
+ free_netdev(irq_grp->napi_ndev);
}
}
@@ -558,7 +559,7 @@ ath11k_pcic_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
{
- int i, j, ret, num_vectors = 0;
+ int i, j, n, ret, num_vectors = 0;
u32 user_base_data = 0, base_vector = 0;
unsigned long irq_flags;
@@ -578,8 +579,11 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
irq_grp->ab = ab;
irq_grp->grp_id = i;
- init_dummy_netdev(&irq_grp->napi_ndev);
- netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
+ irq_grp->napi_ndev = alloc_netdev_dummy(0);
+ if (!irq_grp->napi_ndev)
+ return -ENOMEM;
+
+ netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
ath11k_pcic_ext_grp_napi_poll);
if (ab->hw_params.ring_mask->tx[i] ||
@@ -601,8 +605,13 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
int vector = (i % num_vectors) + base_vector;
int irq = ath11k_pcic_get_msi_irq(ab, vector);
- if (irq < 0)
+ if (irq < 0) {
+ for (n = 0; n <= i; n++) {
+ irq_grp = &ab->ext_irq_grp[n];
+ free_netdev(irq_grp->napi_ndev);
+ }
return irq;
+ }
ab->irq_num[irq_idx] = irq;
@@ -615,6 +624,10 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
if (ret) {
ath11k_err(ab, "failed request irq %d: %d\n",
vector, ret);
+ for (n = 0; n <= i; n++) {
+ irq_grp = &ab->ext_irq_grp[n];
+ free_netdev(irq_grp->napi_ndev);
+ }
return ret;
}
}
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 5006f81f779b..d4a243b64f6c 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -2877,7 +2877,7 @@ int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab)
}
/* reset the firmware */
- ath11k_hif_power_down(ab);
+ ath11k_hif_power_down(ab, false);
ath11k_hif_power_up(ab);
ath11k_dbg(ab, ATH11K_DBG_QMI, "exit wait for cold boot done\n");
return 0;
diff --git a/drivers/net/wireless/ath/ath11k/thermal.c b/drivers/net/wireless/ath/ath11k/thermal.c
index 41e7499f075f..18d6eab5cce3 100644
--- a/drivers/net/wireless/ath/ath11k/thermal.c
+++ b/drivers/net/wireless/ath/ath11k/thermal.c
@@ -101,7 +101,7 @@ static ssize_t ath11k_thermal_show_temp(struct device *dev,
spin_unlock_bh(&ar->data_lock);
/* display in millidegree Celsius */
- ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
+ ret = sysfs_emit(buf, "%d\n", temperature * 1000);
out:
mutex_unlock(&ar->conf_mutex);
return ret;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 34ab9631ff36..6ff01c45f165 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -20,6 +20,7 @@
#include "hw.h"
#include "peer.h"
#include "testmode.h"
+#include "p2p.h"
struct wmi_tlv_policy {
size_t min_len;
@@ -154,6 +155,10 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
.min_len = sizeof(struct wmi_per_chain_rssi_stats) },
[WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT] = {
.min_len = sizeof(struct wmi_twt_add_dialog_event) },
+ [WMI_TAG_P2P_NOA_INFO] = {
+ .min_len = sizeof(struct ath11k_wmi_p2p_noa_info) },
+ [WMI_TAG_P2P_NOA_EVENT] = {
+ .min_len = sizeof(struct wmi_p2p_noa_event) },
};
#define PRIMAP(_hw_mode_) \
@@ -981,7 +986,7 @@ int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg,
FIELD_PREP(WMI_TLV_LEN, 0);
/* Note: This is a nested TLV containing:
- * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
+ * [wmi_tlv][ath11k_wmi_p2p_noa_descriptor][wmi_tlv]..
*/
ptr += sizeof(*tlv);
@@ -1704,6 +1709,45 @@ int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar,
return ret;
}
+int ath11k_wmi_p2p_go_bcn_ie(struct ath11k *ar, u32 vdev_id,
+ const u8 *p2p_ie)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_p2p_go_set_beacon_ie_cmd *cmd;
+ size_t p2p_ie_len, aligned_len;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int ret, len;
+
+ p2p_ie_len = p2p_ie[1] + 2;
+ aligned_len = roundup(p2p_ie_len, 4);
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_p2p_go_set_beacon_ie_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_P2P_GO_SET_BEACON_IE) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->ie_buf_len = p2p_ie_len;
+
+ tlv = (struct wmi_tlv *)cmd->tlv;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, aligned_len);
+ memcpy(tlv->value, p2p_ie, p2p_ie_len);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
struct ieee80211_mutable_offsets *offs,
struct sk_buff *bcn, u32 ema_params)
@@ -4020,7 +4064,8 @@ ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *sk
switch (ev->evt_type) {
case WMI_BSS_COLOR_COLLISION_DETECTION:
- ieee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap);
+ ieee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap,
+ 0);
ath11k_dbg(ab, ATH11K_DBG_WMI,
"OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n",
ev->vdev_id, ev->evt_type, ev->obss_color_bitmap);
@@ -8606,6 +8651,58 @@ exit:
kfree(tb);
}
+static void ath11k_wmi_p2p_noa_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_p2p_noa_event *ev;
+ const struct ath11k_wmi_p2p_noa_info *noa;
+ struct ath11k *ar;
+ int vdev_id;
+ u8 noa_descriptors;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ath11k_warn(ab, "failed to parse tlv: %ld\n", PTR_ERR(tb));
+ return;
+ }
+
+ ev = tb[WMI_TAG_P2P_NOA_EVENT];
+ noa = tb[WMI_TAG_P2P_NOA_INFO];
+
+ if (!ev || !noa)
+ goto out;
+
+ vdev_id = ev->vdev_id;
+ noa_descriptors = u32_get_bits(noa->noa_attr,
+ WMI_P2P_NOA_INFO_DESC_NUM);
+
+ if (noa_descriptors > WMI_P2P_MAX_NOA_DESCRIPTORS) {
+ ath11k_warn(ab, "invalid descriptor num %d in P2P NoA event\n",
+ noa_descriptors);
+ goto out;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi tlv p2p noa vdev_id %i descriptors %u\n",
+ vdev_id, noa_descriptors);
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id %d in P2P NoA event\n",
+ vdev_id);
+ goto unlock;
+ }
+
+ ath11k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
+
+unlock:
+ rcu_read_unlock();
+out:
+ kfree(tb);
+}
+
static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -8733,6 +8830,9 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_GTK_OFFLOAD_STATUS_EVENTID:
ath11k_wmi_gtk_offload_status_event(ab, skb);
break;
+ case WMI_P2P_NOA_EVENTID:
+ ath11k_wmi_p2p_noa_event(ab, skb);
+ break;
default:
ath11k_dbg(ab, ATH11K_DBG_WMI, "unsupported event id 0x%x\n", id);
break;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index bb419e3abb00..8982b909c821 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_WMI_H
@@ -60,13 +60,9 @@ struct wmi_tlv {
#define WLAN_SCAN_MAX_HINT_BSSID 10
#define MAX_RNR_BSS 5
-#define WLAN_SCAN_MAX_HINT_S_SSID 10
-#define WLAN_SCAN_MAX_HINT_BSSID 10
-#define MAX_RNR_BSS 5
-
#define WLAN_SCAN_PARAMS_MAX_SSID 16
#define WLAN_SCAN_PARAMS_MAX_BSSID 4
-#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN 512
#define WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG 1
@@ -3444,34 +3440,6 @@ struct wmi_bssid_arg {
const u8 *bssid;
};
-struct wmi_start_scan_arg {
- u32 scan_id;
- u32 scan_req_id;
- u32 vdev_id;
- u32 scan_priority;
- u32 notify_scan_events;
- u32 dwell_time_active;
- u32 dwell_time_passive;
- u32 min_rest_time;
- u32 max_rest_time;
- u32 repeat_probe_time;
- u32 probe_spacing_time;
- u32 idle_time;
- u32 max_scan_time;
- u32 probe_delay;
- u32 scan_ctrl_flags;
-
- u32 ie_len;
- u32 n_channels;
- u32 n_ssids;
- u32 n_bssids;
-
- u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN];
- u32 channels[64];
- struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID];
- struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID];
-};
-
#define WMI_SCAN_STOP_ONE 0x00000000
#define WMI_SCN_STOP_VAP_ALL 0x01000000
#define WMI_SCAN_STOP_ALL 0x04000000
@@ -3630,6 +3598,37 @@ struct wmi_ftm_event_msg {
u8 data[];
} __packed;
+#define WMI_P2P_MAX_NOA_DESCRIPTORS 4
+
+struct wmi_p2p_noa_event {
+ u32 vdev_id;
+} __packed;
+
+struct ath11k_wmi_p2p_noa_descriptor {
+ u32 type_count; /* 255: continuous schedule, 0: reserved */
+ u32 duration; /* Absent period duration in micro seconds */
+ u32 interval; /* Absent period interval in micro seconds */
+ u32 start_time; /* 32 bit tsf time when in starts */
+} __packed;
+
+#define WMI_P2P_NOA_INFO_CHANGED_FLAG BIT(0)
+#define WMI_P2P_NOA_INFO_INDEX GENMASK(15, 8)
+#define WMI_P2P_NOA_INFO_OPP_PS BIT(16)
+#define WMI_P2P_NOA_INFO_CTWIN_TU GENMASK(23, 17)
+#define WMI_P2P_NOA_INFO_DESC_NUM GENMASK(31, 24)
+
+struct ath11k_wmi_p2p_noa_info {
+ /* Bit 0 - Flag to indicate an update in NOA schedule
+ * Bits 7-1 - Reserved
+ * Bits 15-8 - Index (identifies the instance of NOA sub element)
+ * Bit 16 - Opp PS state of the AP
+ * Bits 23-17 - Ctwindow in TUs
+ * Bits 31-24 - Number of NOA descriptors
+ */
+ u32 noa_attr;
+ struct ath11k_wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS];
+} __packed;
+
#define WMI_BEACON_TX_BUFFER_SIZE 512
#define WMI_EMA_TMPL_IDX_SHIFT 8
@@ -3653,6 +3652,13 @@ struct wmi_bcn_tmpl_cmd {
u32 ema_params;
} __packed;
+struct wmi_p2p_go_set_beacon_ie_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 ie_buf_len;
+ u8 tlv[];
+} __packed;
+
struct wmi_key_seq_counter {
u32 key_seq_counter_l;
u32 key_seq_counter_h;
@@ -5740,8 +5746,6 @@ struct wmi_debug_log_config_cmd_fixed_param {
u32 value;
} __packed;
-#define WMI_MAX_MEM_REQS 32
-
#define MAX_RADIOS 3
#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
@@ -6349,6 +6353,8 @@ int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len);
int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
struct sk_buff *frame);
+int ath11k_wmi_p2p_go_bcn_ie(struct ath11k *ar, u32 vdev_id,
+ const u8 *p2p_ie);
int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
struct ieee80211_mutable_offsets *offs,
struct sk_buff *bcn, u32 ema_param);
diff --git a/drivers/net/wireless/ath/ath12k/Kconfig b/drivers/net/wireless/ath/ath12k/Kconfig
index e135d2b1b61d..eceab9153e98 100644
--- a/drivers/net/wireless/ath/ath12k/Kconfig
+++ b/drivers/net/wireless/ath/ath12k/Kconfig
@@ -24,6 +24,15 @@ config ATH12K_DEBUG
If unsure, say Y to make it easier to debug problems. But if
you want optimal performance choose N.
+config ATH12K_DEBUGFS
+ bool "QTI ath12k debugfs support"
+ depends on ATH12K && MAC80211_DEBUGFS
+ help
+ Enable ath12k debugfs support
+
+ If unsure, say Y to make it easier to debug problems. But if
+ you want optimal performance choose N.
+
config ATH12K_TRACING
bool "ath12k tracing support"
depends on ATH12K && EVENT_TRACING
diff --git a/drivers/net/wireless/ath/ath12k/Makefile b/drivers/net/wireless/ath/ath12k/Makefile
index 71669f94ff75..d42480db7463 100644
--- a/drivers/net/wireless/ath/ath12k/Makefile
+++ b/drivers/net/wireless/ath/ath12k/Makefile
@@ -23,6 +23,8 @@ ath12k-y += core.o \
fw.o \
p2p.o
+ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o
+ath12k-$(CONFIG_ACPI) += acpi.o
ath12k-$(CONFIG_ATH12K_TRACING) += trace.o
# for tracing framework to find trace.h
diff --git a/drivers/net/wireless/ath/ath12k/acpi.c b/drivers/net/wireless/ath/ath12k/acpi.c
new file mode 100644
index 000000000000..443ba12e01f3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/acpi.c
@@ -0,0 +1,394 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "acpi.h"
+#include "debug.h"
+
+static int ath12k_acpi_dsm_get_data(struct ath12k_base *ab, int func)
+{
+ union acpi_object *obj;
+ acpi_handle root_handle;
+ int ret;
+
+ root_handle = ACPI_HANDLE(ab->dev);
+ if (!root_handle) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "invalid acpi handler\n");
+ return -EOPNOTSUPP;
+ }
+
+ obj = acpi_evaluate_dsm(root_handle, ab->hw_params->acpi_guid, 0, func,
+ NULL);
+
+ if (!obj) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi_evaluate_dsm() failed\n");
+ return -ENOENT;
+ }
+
+ if (obj->type == ACPI_TYPE_INTEGER) {
+ ab->acpi.func_bit = obj->integer.value;
+ } else if (obj->type == ACPI_TYPE_BUFFER) {
+ switch (func) {
+ case ATH12K_ACPI_DSM_FUNC_TAS_CFG:
+ if (obj->buffer.length != ATH12K_ACPI_DSM_TAS_CFG_SIZE) {
+ ath12k_warn(ab, "invalid ACPI DSM TAS config size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->acpi.tas_cfg, obj->buffer.pointer,
+ obj->buffer.length);
+
+ break;
+ case ATH12K_ACPI_DSM_FUNC_TAS_DATA:
+ if (obj->buffer.length != ATH12K_ACPI_DSM_TAS_DATA_SIZE) {
+ ath12k_warn(ab, "invalid ACPI DSM TAS data size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->acpi.tas_sar_power_table, obj->buffer.pointer,
+ obj->buffer.length);
+
+ break;
+ case ATH12K_ACPI_DSM_FUNC_BIOS_SAR:
+ if (obj->buffer.length != ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE) {
+ ath12k_warn(ab, "invalid ACPI BIOS SAR data size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->acpi.bios_sar_data, obj->buffer.pointer,
+ obj->buffer.length);
+
+ break;
+ case ATH12K_ACPI_DSM_FUNC_GEO_OFFSET:
+ if (obj->buffer.length != ATH12K_ACPI_DSM_GEO_OFFSET_DATA_SIZE) {
+ ath12k_warn(ab, "invalid ACPI GEO OFFSET data size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->acpi.geo_offset_data, obj->buffer.pointer,
+ obj->buffer.length);
+
+ break;
+ case ATH12K_ACPI_DSM_FUNC_INDEX_CCA:
+ if (obj->buffer.length != ATH12K_ACPI_DSM_CCA_DATA_SIZE) {
+ ath12k_warn(ab, "invalid ACPI DSM CCA data size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->acpi.cca_data, obj->buffer.pointer,
+ obj->buffer.length);
+
+ break;
+ case ATH12K_ACPI_DSM_FUNC_INDEX_BAND_EDGE:
+ if (obj->buffer.length != ATH12K_ACPI_DSM_BAND_EDGE_DATA_SIZE) {
+ ath12k_warn(ab, "invalid ACPI DSM band edge data size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->acpi.band_edge_power, obj->buffer.pointer,
+ obj->buffer.length);
+
+ break;
+ }
+ } else {
+ ath12k_warn(ab, "ACPI DSM method returned an unsupported object type: %d\n",
+ obj->type);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ ACPI_FREE(obj);
+ return ret;
+}
+
+static int ath12k_acpi_set_power_limit(struct ath12k_base *ab)
+{
+ const u8 *tas_sar_power_table = ab->acpi.tas_sar_power_table;
+ int ret;
+
+ if (tas_sar_power_table[0] != ATH12K_ACPI_TAS_DATA_VERSION ||
+ tas_sar_power_table[1] != ATH12K_ACPI_TAS_DATA_ENABLE) {
+ ath12k_warn(ab, "latest ACPI TAS data is invalid\n");
+ return -EINVAL;
+ }
+
+ ret = ath12k_wmi_set_bios_cmd(ab, WMI_BIOS_PARAM_TAS_DATA_TYPE,
+ tas_sar_power_table,
+ ATH12K_ACPI_DSM_TAS_DATA_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to send ACPI TAS data table: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ath12k_acpi_set_bios_sar_power(struct ath12k_base *ab)
+{
+ int ret;
+
+ if (ab->acpi.bios_sar_data[0] != ATH12K_ACPI_POWER_LIMIT_VERSION ||
+ ab->acpi.bios_sar_data[1] != ATH12K_ACPI_POWER_LIMIT_ENABLE_FLAG) {
+ ath12k_warn(ab, "invalid latest ACPI BIOS SAR data\n");
+ return -EINVAL;
+ }
+
+ ret = ath12k_wmi_set_bios_sar_cmd(ab, ab->acpi.bios_sar_data);
+ if (ret) {
+ ath12k_warn(ab, "failed to set ACPI BIOS SAR table: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath12k_acpi_dsm_notify(acpi_handle handle, u32 event, void *data)
+{
+ int ret;
+ struct ath12k_base *ab = data;
+
+ if (event == ATH12K_ACPI_NOTIFY_EVENT) {
+ ath12k_warn(ab, "unknown acpi notify %u\n", event);
+ return;
+ }
+
+ if (!ab->acpi.acpi_tas_enable) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi_tas_enable is false\n");
+ return;
+ }
+
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_TAS_DATA);
+ if (ret) {
+ ath12k_warn(ab, "failed to update ACPI TAS data table: %d\n", ret);
+ return;
+ }
+
+ ret = ath12k_acpi_set_power_limit(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to set ACPI TAS power limit data: %d", ret);
+ return;
+ }
+
+ if (!ab->acpi.acpi_bios_sar_enable)
+ return;
+
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_BIOS_SAR);
+ if (ret) {
+ ath12k_warn(ab, "failed to update BIOS SAR: %d\n", ret);
+ return;
+ }
+
+ ret = ath12k_acpi_set_bios_sar_power(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to set BIOS SAR power limit: %d\n", ret);
+ return;
+ }
+}
+
+static int ath12k_acpi_set_bios_sar_params(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_wmi_set_bios_sar_cmd(ab, ab->acpi.bios_sar_data);
+ if (ret) {
+ ath12k_warn(ab, "failed to set ACPI BIOS SAR table: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath12k_wmi_set_bios_geo_cmd(ab, ab->acpi.geo_offset_data);
+ if (ret) {
+ ath12k_warn(ab, "failed to set ACPI BIOS GEO table: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath12k_acpi_set_tas_params(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_wmi_set_bios_cmd(ab, WMI_BIOS_PARAM_TAS_CONFIG_TYPE,
+ ab->acpi.tas_cfg,
+ ATH12K_ACPI_DSM_TAS_CFG_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to send ACPI TAS config table parameter: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath12k_wmi_set_bios_cmd(ab, WMI_BIOS_PARAM_TAS_DATA_TYPE,
+ ab->acpi.tas_sar_power_table,
+ ATH12K_ACPI_DSM_TAS_DATA_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to send ACPI TAS data table parameter: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_acpi_start(struct ath12k_base *ab)
+{
+ acpi_status status;
+ u8 *buf;
+ int ret;
+
+ if (!ab->hw_params->acpi_guid)
+ /* not supported with this hardware */
+ return 0;
+
+ ab->acpi.acpi_tas_enable = false;
+
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS);
+ if (ret) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to get ACPI DSM data: %d\n", ret);
+ return ret;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_TAS_CFG)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_TAS_CFG);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI TAS config table: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_TAS_DATA)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_TAS_DATA);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI TAS data table: %d\n", ret);
+ return ret;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_TAS_CFG) &&
+ ab->acpi.tas_sar_power_table[0] == ATH12K_ACPI_TAS_DATA_VERSION &&
+ ab->acpi.tas_sar_power_table[1] == ATH12K_ACPI_TAS_DATA_ENABLE)
+ ab->acpi.acpi_tas_enable = true;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_BIOS_SAR)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_BIOS_SAR);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI bios sar data: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_GEO_OFFSET)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_GEO_OFFSET);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI geo offset data: %d\n", ret);
+ return ret;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_BIOS_SAR) &&
+ ab->acpi.bios_sar_data[0] == ATH12K_ACPI_POWER_LIMIT_VERSION &&
+ ab->acpi.bios_sar_data[1] == ATH12K_ACPI_POWER_LIMIT_ENABLE_FLAG &&
+ !ab->acpi.acpi_tas_enable)
+ ab->acpi.acpi_bios_sar_enable = true;
+ }
+
+ if (ab->acpi.acpi_tas_enable) {
+ ret = ath12k_acpi_set_tas_params(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to send ACPI parameters: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (ab->acpi.acpi_bios_sar_enable) {
+ ret = ath12k_acpi_set_bios_sar_params(ab);
+ if (ret)
+ return ret;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_CCA)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_INDEX_CCA);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI DSM CCA threshold configuration: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (ab->acpi.cca_data[0] == ATH12K_ACPI_CCA_THR_VERSION &&
+ ab->acpi.cca_data[ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET] ==
+ ATH12K_ACPI_CCA_THR_ENABLE_FLAG) {
+ buf = ab->acpi.cca_data + ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET;
+ ret = ath12k_wmi_set_bios_cmd(ab,
+ WMI_BIOS_PARAM_CCA_THRESHOLD_TYPE,
+ buf,
+ ATH12K_ACPI_CCA_THR_OFFSET_LEN);
+ if (ret) {
+ ath12k_warn(ab, "failed to set ACPI DSM CCA threshold: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi,
+ ATH12K_ACPI_FUNC_BIT_BAND_EDGE_CHAN_POWER)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_INDEX_BAND_EDGE);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI DSM band edge channel power: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (ab->acpi.band_edge_power[0] == ATH12K_ACPI_BAND_EDGE_VERSION &&
+ ab->acpi.band_edge_power[1] == ATH12K_ACPI_BAND_EDGE_ENABLE_FLAG) {
+ ret = ath12k_wmi_set_bios_cmd(ab,
+ WMI_BIOS_PARAM_TYPE_BANDEDGE,
+ ab->acpi.band_edge_power,
+ sizeof(ab->acpi.band_edge_power));
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to set ACPI DSM band edge channel power: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ status = acpi_install_notify_handler(ACPI_HANDLE(ab->dev),
+ ACPI_DEVICE_NOTIFY,
+ ath12k_acpi_dsm_notify, ab);
+ if (ACPI_FAILURE(status)) {
+ ath12k_warn(ab, "failed to install DSM notify callback: %d\n", status);
+ return -EIO;
+ }
+
+ ab->acpi.started = true;
+
+ return 0;
+}
+
+void ath12k_acpi_stop(struct ath12k_base *ab)
+{
+ if (!ab->acpi.started)
+ return;
+
+ acpi_remove_notify_handler(ACPI_HANDLE(ab->dev),
+ ACPI_DEVICE_NOTIFY,
+ ath12k_acpi_dsm_notify);
+}
diff --git a/drivers/net/wireless/ath/ath12k/acpi.h b/drivers/net/wireless/ath/ath12k/acpi.h
new file mode 100644
index 000000000000..39e003d86a48
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/acpi.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef ATH12K_ACPI_H
+#define ATH12K_ACPI_H
+
+#include <linux/acpi.h>
+
+#define ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS 0
+#define ATH12K_ACPI_DSM_FUNC_BIOS_SAR 4
+#define ATH12K_ACPI_DSM_FUNC_GEO_OFFSET 5
+#define ATH12K_ACPI_DSM_FUNC_INDEX_CCA 6
+#define ATH12K_ACPI_DSM_FUNC_TAS_CFG 8
+#define ATH12K_ACPI_DSM_FUNC_TAS_DATA 9
+#define ATH12K_ACPI_DSM_FUNC_INDEX_BAND_EDGE 10
+
+#define ATH12K_ACPI_FUNC_BIT_BIOS_SAR BIT(3)
+#define ATH12K_ACPI_FUNC_BIT_GEO_OFFSET BIT(4)
+#define ATH12K_ACPI_FUNC_BIT_CCA BIT(5)
+#define ATH12K_ACPI_FUNC_BIT_TAS_CFG BIT(7)
+#define ATH12K_ACPI_FUNC_BIT_TAS_DATA BIT(8)
+#define ATH12K_ACPI_FUNC_BIT_BAND_EDGE_CHAN_POWER BIT(9)
+
+#define ATH12K_ACPI_NOTIFY_EVENT 0x86
+#define ATH12K_ACPI_FUNC_BIT_VALID(_acdata, _func) (((_acdata).func_bit) & (_func))
+
+#define ATH12K_ACPI_TAS_DATA_VERSION 0x1
+#define ATH12K_ACPI_TAS_DATA_ENABLE 0x1
+#define ATH12K_ACPI_POWER_LIMIT_VERSION 0x1
+#define ATH12K_ACPI_POWER_LIMIT_ENABLE_FLAG 0x1
+#define ATH12K_ACPI_CCA_THR_VERSION 0x1
+#define ATH12K_ACPI_CCA_THR_ENABLE_FLAG 0x1
+#define ATH12K_ACPI_BAND_EDGE_VERSION 0x1
+#define ATH12K_ACPI_BAND_EDGE_ENABLE_FLAG 0x1
+
+#define ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET 1
+#define ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET 2
+#define ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET 5
+#define ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN 10
+#define ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET 12
+#define ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN 18
+#define ATH12K_ACPI_BIOS_SAR_TABLE_LEN 22
+#define ATH12K_ACPI_CCA_THR_OFFSET_LEN 36
+
+#define ATH12K_ACPI_DSM_TAS_DATA_SIZE 69
+#define ATH12K_ACPI_DSM_BAND_EDGE_DATA_SIZE 100
+#define ATH12K_ACPI_DSM_TAS_CFG_SIZE 108
+
+#define ATH12K_ACPI_DSM_GEO_OFFSET_DATA_SIZE (ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET + \
+ ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN)
+#define ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE (ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET + \
+ ATH12K_ACPI_BIOS_SAR_TABLE_LEN)
+#define ATH12K_ACPI_DSM_CCA_DATA_SIZE (ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET + \
+ ATH12K_ACPI_CCA_THR_OFFSET_LEN)
+
+#ifdef CONFIG_ACPI
+
+int ath12k_acpi_start(struct ath12k_base *ab);
+void ath12k_acpi_stop(struct ath12k_base *ab);
+
+#else
+
+static inline int ath12k_acpi_start(struct ath12k_base *ab)
+{
+ return 0;
+}
+
+static inline void ath12k_acpi_stop(struct ath12k_base *ab)
+{
+}
+
+#endif /* CONFIG_ACPI */
+
+#endif /* ATH12K_ACPI_H */
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
index 391b6fb2bd42..6663f4e1792d 100644
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -15,6 +15,7 @@
#include "debug.h"
#include "hif.h"
#include "fw.h"
+#include "debugfs.h"
unsigned int ath12k_debug_mask;
module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
@@ -43,67 +44,90 @@ static int ath12k_core_rfkill_config(struct ath12k_base *ab)
int ath12k_core_suspend(struct ath12k_base *ab)
{
- int ret;
+ struct ath12k *ar;
+ int ret, i;
if (!ab->hw_params->supports_suspend)
return -EOPNOTSUPP;
- /* TODO: there can frames in queues so for now add delay as a hack.
- * Need to implement to handle and remove this delay.
+ rcu_read_lock();
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, i);
+ if (!ar)
+ continue;
+ ret = ath12k_mac_wait_tx_complete(ar);
+ if (ret) {
+ ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
+ rcu_read_unlock();
+ return ret;
+ }
+ }
+ rcu_read_unlock();
+
+ /* PM framework skips suspend_late/resume_early callbacks
+ * if other devices report errors in their suspend callbacks.
+ * However ath12k_core_resume() would still be called because
+ * here we return success thus kernel put us on dpm_suspended_list.
+ * Since we won't go through a power down/up cycle, there is
+ * no chance to call complete(&ab->restart_completed) in
+ * ath12k_core_restart(), making ath12k_core_resume() timeout.
+ * So call it here to avoid this issue. This also works in case
+ * no error happens thus suspend_late/resume_early get called,
+ * because it will be reinitialized in ath12k_core_resume_early().
*/
- msleep(500);
+ complete(&ab->restart_completed);
- ret = ath12k_dp_rx_pktlog_stop(ab, true);
- if (ret) {
- ath12k_warn(ab, "failed to stop dp rx (and timer) pktlog during suspend: %d\n",
- ret);
- return ret;
- }
+ return 0;
+}
+EXPORT_SYMBOL(ath12k_core_suspend);
- ret = ath12k_dp_rx_pktlog_stop(ab, false);
- if (ret) {
- ath12k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n",
- ret);
- return ret;
- }
+int ath12k_core_suspend_late(struct ath12k_base *ab)
+{
+ if (!ab->hw_params->supports_suspend)
+ return -EOPNOTSUPP;
ath12k_hif_irq_disable(ab);
ath12k_hif_ce_irq_disable(ab);
- ret = ath12k_hif_suspend(ab);
- if (ret) {
- ath12k_warn(ab, "failed to suspend hif: %d\n", ret);
- return ret;
- }
+ ath12k_hif_power_down(ab, true);
return 0;
}
+EXPORT_SYMBOL(ath12k_core_suspend_late);
-int ath12k_core_resume(struct ath12k_base *ab)
+int ath12k_core_resume_early(struct ath12k_base *ab)
{
int ret;
if (!ab->hw_params->supports_suspend)
return -EOPNOTSUPP;
- ret = ath12k_hif_resume(ab);
- if (ret) {
- ath12k_warn(ab, "failed to resume hif during resume: %d\n", ret);
- return ret;
- }
+ reinit_completion(&ab->restart_completed);
+ ret = ath12k_hif_power_up(ab);
+ if (ret)
+ ath12k_warn(ab, "failed to power up hif during resume: %d\n", ret);
- ath12k_hif_ce_irq_enable(ab);
- ath12k_hif_irq_enable(ab);
+ return ret;
+}
+EXPORT_SYMBOL(ath12k_core_resume_early);
- ret = ath12k_dp_rx_pktlog_start(ab);
- if (ret) {
- ath12k_warn(ab, "failed to start rx pktlog during resume: %d\n",
- ret);
- return ret;
+int ath12k_core_resume(struct ath12k_base *ab)
+{
+ long time_left;
+
+ if (!ab->hw_params->supports_suspend)
+ return -EOPNOTSUPP;
+
+ time_left = wait_for_completion_timeout(&ab->restart_completed,
+ ATH12K_RESET_TIMEOUT_HZ);
+ if (time_left == 0) {
+ ath12k_warn(ab, "timeout while waiting for restart complete");
+ return -ETIMEDOUT;
}
return 0;
}
+EXPORT_SYMBOL(ath12k_core_resume);
static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
size_t name_len, bool with_variant,
@@ -542,6 +566,8 @@ static void ath12k_core_stop(struct ath12k_base *ab)
if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
ath12k_qmi_firmware_stop(ab);
+ ath12k_acpi_stop(ab);
+
ath12k_hif_stop(ab);
ath12k_wmi_detach(ab);
ath12k_dp_rx_pdev_reo_cleanup(ab);
@@ -628,6 +654,8 @@ static int ath12k_core_soc_create(struct ath12k_base *ab)
return ret;
}
+ ath12k_debugfs_soc_create(ab);
+
ret = ath12k_hif_power_up(ab);
if (ret) {
ath12k_err(ab, "failed to power up :%d\n", ret);
@@ -637,6 +665,7 @@ static int ath12k_core_soc_create(struct ath12k_base *ab)
return 0;
err_qmi_deinit:
+ ath12k_debugfs_soc_destroy(ab);
ath12k_qmi_deinit_service(ab);
return ret;
}
@@ -645,6 +674,7 @@ static void ath12k_core_soc_destroy(struct ath12k_base *ab)
{
ath12k_dp_free(ab);
ath12k_reg_free(ab);
+ ath12k_debugfs_soc_destroy(ab);
ath12k_qmi_deinit_service(ab);
}
@@ -779,6 +809,11 @@ static int ath12k_core_start(struct ath12k_base *ab,
goto err_reo_cleanup;
}
+ ret = ath12k_acpi_start(ab);
+ if (ret)
+ /* ACPI is optional so continue in case of an error */
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", ret);
+
return 0;
err_reo_cleanup:
@@ -874,9 +909,8 @@ static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
int ret;
mutex_lock(&ab->core_lock);
- ath12k_hif_irq_disable(ab);
ath12k_dp_pdev_free(ab);
- ath12k_hif_stop(ab);
+ ath12k_ce_cleanup_pipes(ab);
ath12k_wmi_detach(ab);
ath12k_dp_rx_pdev_reo_cleanup(ab);
mutex_unlock(&ab->core_lock);
@@ -1052,9 +1086,6 @@ static void ath12k_core_restart(struct work_struct *work)
struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
int ret;
- if (!ab->is_reset)
- ath12k_core_pre_reconfigure_recovery(ab);
-
ret = ath12k_core_reconfigure_on_crash(ab);
if (ret) {
ath12k_err(ab, "failed to reconfigure driver on crash recovery\n");
@@ -1064,8 +1095,7 @@ static void ath12k_core_restart(struct work_struct *work)
if (ab->is_reset)
complete_all(&ab->reconfigure_complete);
- if (!ab->is_reset)
- ath12k_core_post_reconfigure_recovery(ab);
+ complete(&ab->restart_completed);
}
static void ath12k_core_reset(struct work_struct *work)
@@ -1131,8 +1161,10 @@ static void ath12k_core_reset(struct work_struct *work)
time_left = wait_for_completion_timeout(&ab->recovery_start,
ATH12K_RECOVER_START_TIMEOUT_HZ);
- ath12k_hif_power_down(ab);
- ath12k_qmi_free_resource(ab);
+ ath12k_hif_irq_disable(ab);
+ ath12k_hif_ce_irq_disable(ab);
+
+ ath12k_hif_power_down(ab, false);
ath12k_hif_power_up(ab);
ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
@@ -1175,7 +1207,7 @@ void ath12k_core_deinit(struct ath12k_base *ab)
mutex_unlock(&ab->core_lock);
- ath12k_hif_power_down(ab);
+ ath12k_hif_power_down(ab, false);
ath12k_mac_destroy(ab);
ath12k_core_soc_destroy(ab);
ath12k_fw_unmap(ab);
@@ -1223,11 +1255,12 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);
+ init_completion(&ab->restart_completed);
ab->dev = dev;
ab->hif.bus = bus;
ab->qmi.num_radios = U8_MAX;
- ab->slo_capable = true;
+ ab->mlo_capable_flags = ATH12K_INTRA_DEVICE_MLO_SUPPORT;
return ab;
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index 97e5a0ccd233..47dde4401210 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -26,6 +26,7 @@
#include "reg.h"
#include "dbring.h"
#include "fw.h"
+#include "acpi.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -46,6 +47,7 @@
#define ATH12K_SMBIOS_BDF_EXT_MAGIC "BDF_"
#define ATH12K_INVALID_HW_MAC_ID 0xFF
+#define ATH12K_CONNECTION_LOSS_HZ (3 * HZ)
#define ATH12K_RX_RATE_TABLE_NUM 320
#define ATH12K_RX_RATE_TABLE_11AX_NUM 576
@@ -214,6 +216,24 @@ enum ath12k_monitor_flags {
ATH12K_FLAG_MONITOR_ENABLED,
};
+struct ath12k_tx_conf {
+ bool changed;
+ u16 ac;
+ struct ieee80211_tx_queue_params tx_queue_params;
+};
+
+struct ath12k_key_conf {
+ bool changed;
+ enum set_key_cmd cmd;
+ struct ieee80211_key_conf *key;
+};
+
+struct ath12k_vif_cache {
+ struct ath12k_tx_conf tx_conf;
+ struct ath12k_key_conf key_conf;
+ u32 bss_conf_changed;
+};
+
struct ath12k_vif {
u32 vdev_id;
enum wmi_vdev_type vdev_type;
@@ -251,11 +271,13 @@ struct ath12k_vif {
} ap;
} u;
+ bool is_created;
bool is_started;
bool is_up;
u32 aid;
u8 bssid[ETH_ALEN];
struct cfg80211_bitrate_mask bitrate_mask;
+ struct delayed_work connection_loss_work;
int num_legacy_stations;
int rtscts_prot_mode;
int txpower;
@@ -267,10 +289,12 @@ struct ath12k_vif {
u8 vdev_stats_id;
u32 punct_bitmap;
bool ps;
+ struct ath12k_vif_cache *cache;
};
struct ath12k_vif_iter {
u32 vdev_id;
+ struct ath12k *ar;
struct ath12k_vif *arvif;
};
@@ -453,6 +477,10 @@ struct ath12k_fw_stats {
struct list_head bcn;
};
+struct ath12k_debug {
+ struct dentry *debugfs_pdev;
+};
+
struct ath12k_per_peer_tx_stats {
u32 succ_bytes;
u32 retry_bytes;
@@ -592,16 +620,24 @@ struct ath12k {
struct ath12k_per_peer_tx_stats cached_stats;
u32 last_ppdu_id;
u32 cached_ppdu_id;
+#ifdef CONFIG_ATH12K_DEBUGFS
+ struct ath12k_debug debug;
+#endif
bool dfs_block_radar_events;
bool monitor_conf_enabled;
bool monitor_vdev_created;
bool monitor_started;
int monitor_vdev_id;
+
+ u32 freq_low;
+ u32 freq_high;
};
struct ath12k_hw {
struct ieee80211_hw *hw;
+ bool regd_updated;
+ bool use_6ghz_regd;
u8 num_radio;
struct ath12k radio[] __aligned(sizeof(void *));
@@ -688,6 +724,21 @@ struct ath12k_soc_dp_stats {
struct ath12k_soc_dp_tx_err_stats tx_err;
};
+/**
+ * enum ath12k_link_capable_flags - link capable flags
+ *
+ * Single/Multi link capability information
+ *
+ * @ATH12K_INTRA_DEVICE_MLO_SUPPORT: SLO/MLO form between the radio, where all
+ * the links (radios) present within a device.
+ * @ATH12K_INTER_DEVICE_MLO_SUPPORT: SLO/MLO form between the radio, where all
+ * the links (radios) present across the devices.
+ */
+enum ath12k_link_capable_flags {
+ ATH12K_INTRA_DEVICE_MLO_SUPPORT = BIT(0),
+ ATH12K_INTER_DEVICE_MLO_SUPPORT = BIT(1),
+};
+
/* Master structure to hold the hw data which may be used in core module */
struct ath12k_base {
enum ath12k_hw_rev hw_rev;
@@ -782,6 +833,9 @@ struct ath12k_base {
/* Current DFS Regulatory */
enum ath12k_dfs_region dfs_region;
struct ath12k_soc_dp_stats soc_stats;
+#ifdef CONFIG_ATH12K_DEBUGFS
+ struct dentry *debugfs_soc;
+#endif
unsigned long dev_flags;
struct completion driver_recovery;
@@ -843,10 +897,31 @@ struct ath12k_base {
const struct hal_rx_ops *hal_rx_ops;
- /* slo_capable denotes if the single/multi link operation
- * is supported within the same chip (SoC).
+ /* mlo_capable_flags denotes the single/multi link operation
+ * capabilities of the Device.
+ *
+ * See enum ath12k_link_capable_flags
*/
- bool slo_capable;
+ u8 mlo_capable_flags;
+
+ struct completion restart_completed;
+
+#ifdef CONFIG_ACPI
+
+ struct {
+ bool started;
+ u32 func_bit;
+ bool acpi_tas_enable;
+ bool acpi_bios_sar_enable;
+ u8 tas_cfg[ATH12K_ACPI_DSM_TAS_CFG_SIZE];
+ u8 tas_sar_power_table[ATH12K_ACPI_DSM_TAS_DATA_SIZE];
+ u8 bios_sar_data[ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE];
+ u8 geo_offset_data[ATH12K_ACPI_DSM_GEO_OFFSET_DATA_SIZE];
+ u8 cca_data[ATH12K_ACPI_DSM_CCA_DATA_SIZE];
+ u8 band_edge_power[ATH12K_ACPI_DSM_BAND_EDGE_DATA_SIZE];
+ } acpi;
+
+#endif /* CONFIG_ACPI */
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
@@ -874,8 +949,10 @@ int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd
int ath12k_core_check_dt(struct ath12k_base *ath12k);
int ath12k_core_check_smbios(struct ath12k_base *ab);
void ath12k_core_halt(struct ath12k *ar);
+int ath12k_core_resume_early(struct ath12k_base *ab);
int ath12k_core_resume(struct ath12k_base *ab);
int ath12k_core_suspend(struct ath12k_base *ab);
+int ath12k_core_suspend_late(struct ath12k_base *ab);
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
const char *filename);
@@ -951,13 +1028,21 @@ static inline struct ath12k_hw *ath12k_hw_to_ah(struct ieee80211_hw *hw)
return hw->priv;
}
-static inline struct ath12k *ath12k_ah_to_ar(struct ath12k_hw *ah)
+static inline struct ath12k *ath12k_ah_to_ar(struct ath12k_hw *ah, u8 hw_link_id)
{
- return ah->radio;
+ if (WARN(hw_link_id >= ah->num_radio,
+ "bad hw link id %d, so switch to default link\n", hw_link_id))
+ hw_link_id = 0;
+
+ return &ah->radio[hw_link_id];
}
static inline struct ieee80211_hw *ath12k_ar_to_hw(struct ath12k *ar)
{
return ar->ah->hw;
}
+
+#define for_each_ar(ah, ar, index) \
+ for ((index) = 0; ((index) < (ah)->num_radio && \
+ ((ar) = &(ah)->radio[(index)])); (index)++)
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/debugfs.c b/drivers/net/wireless/ath/ath12k/debugfs.c
new file mode 100644
index 000000000000..8d8ba951093b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/debugfs.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "debugfs.h"
+
+static ssize_t ath12k_write_simulate_radar(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k *ar = file->private_data;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+ ret = ath12k_wmi_simulate_radar(ar);
+ if (ret)
+ goto exit;
+
+ ret = count;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_simulate_radar = {
+ .write = ath12k_write_simulate_radar,
+ .open = simple_open
+};
+
+void ath12k_debugfs_soc_create(struct ath12k_base *ab)
+{
+ bool dput_needed;
+ char soc_name[64] = { 0 };
+ struct dentry *debugfs_ath12k;
+
+ debugfs_ath12k = debugfs_lookup("ath12k", NULL);
+ if (debugfs_ath12k) {
+ /* a dentry from lookup() needs dput() after we don't use it */
+ dput_needed = true;
+ } else {
+ debugfs_ath12k = debugfs_create_dir("ath12k", NULL);
+ if (IS_ERR_OR_NULL(debugfs_ath12k))
+ return;
+ dput_needed = false;
+ }
+
+ scnprintf(soc_name, sizeof(soc_name), "%s-%s", ath12k_bus_str(ab->hif.bus),
+ dev_name(ab->dev));
+
+ ab->debugfs_soc = debugfs_create_dir(soc_name, debugfs_ath12k);
+
+ if (dput_needed)
+ dput(debugfs_ath12k);
+}
+
+void ath12k_debugfs_soc_destroy(struct ath12k_base *ab)
+{
+ debugfs_remove_recursive(ab->debugfs_soc);
+ ab->debugfs_soc = NULL;
+ /* We are not removing ath12k directory on purpose, even if it
+ * would be empty. This simplifies the directory handling and it's
+ * a minor cosmetic issue to leave an empty ath12k directory to
+ * debugfs.
+ */
+}
+
+void ath12k_debugfs_register(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ieee80211_hw *hw = ar->ah->hw;
+ char pdev_name[5];
+ char buf[100] = {0};
+
+ scnprintf(pdev_name, sizeof(pdev_name), "%s%d", "mac", ar->pdev_idx);
+
+ ar->debug.debugfs_pdev = debugfs_create_dir(pdev_name, ab->debugfs_soc);
+
+ /* Create a symlink under ieee80211/phy* */
+ scnprintf(buf, sizeof(buf), "../../ath12k/%pd2", ar->debug.debugfs_pdev);
+ debugfs_create_symlink("ath12k", hw->wiphy->debugfsdir, buf);
+
+ if (ar->mac.sbands[NL80211_BAND_5GHZ].channels) {
+ debugfs_create_file("dfs_simulate_radar", 0200,
+ ar->debug.debugfs_pdev, ar,
+ &fops_simulate_radar);
+ }
+}
diff --git a/drivers/net/wireless/ath/ath12k/debugfs.h b/drivers/net/wireless/ath/ath12k/debugfs.h
new file mode 100644
index 000000000000..a62f2a550b23
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/debugfs.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ATH12K_DEBUGFS_H_
+#define _ATH12K_DEBUGFS_H_
+
+#ifdef CONFIG_ATH12K_DEBUGFS
+void ath12k_debugfs_soc_create(struct ath12k_base *ab);
+void ath12k_debugfs_soc_destroy(struct ath12k_base *ab);
+void ath12k_debugfs_register(struct ath12k *ar);
+
+#else
+static inline void ath12k_debugfs_soc_create(struct ath12k_base *ab)
+{
+}
+
+static inline void ath12k_debugfs_soc_destroy(struct ath12k_base *ab)
+{
+}
+
+static inline void ath12k_debugfs_register(struct ath12k *ar)
+{
+}
+
+#endif /* CONFIG_ATH12K_DEBUGFS */
+
+#endif /* _ATH12K_DEBUGFS_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
index c8e1b244b69e..7843c76a82c1 100644
--- a/drivers/net/wireless/ath/ath12k/dp.c
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -14,6 +14,11 @@
#include "peer.h"
#include "dp_mon.h"
+enum ath12k_dp_desc_type {
+ ATH12K_DP_TX_DESC,
+ ATH12K_DP_RX_DESC,
+};
+
static void ath12k_dp_htt_htc_tx_complete(struct ath12k_base *ab,
struct sk_buff *skb)
{
@@ -960,8 +965,9 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
if (ab->hw_params->ring_mask->host2rxdma[grp_id]) {
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ LIST_HEAD(list);
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, 0);
+ ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0);
}
/* TODO: Implement handler for other interrupts */
@@ -1146,11 +1152,13 @@ void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif)
static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
{
- struct ath12k_rx_desc_info *desc_info, *tmp;
+ struct ath12k_rx_desc_info *desc_info;
struct ath12k_tx_desc_info *tx_desc_info, *tmp1;
struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_skb_cb *skb_cb;
struct sk_buff *skb;
- int i;
+ struct ath12k *ar;
+ int i, j;
u32 pool_id, tx_spt_page;
if (!dp->spt_info)
@@ -1159,16 +1167,23 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
/* RX Descriptor cleanup */
spin_lock_bh(&dp->rx_desc_lock);
- list_for_each_entry_safe(desc_info, tmp, &dp->rx_desc_used_list, list) {
- list_del(&desc_info->list);
- skb = desc_info->skb;
+ for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
+ desc_info = dp->spt_info->rxbaddr[i];
- if (!skb)
- continue;
+ for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
+ if (!desc_info[j].in_use) {
+ list_del(&desc_info[j].list);
+ continue;
+ }
+
+ skb = desc_info[j].skb;
+ if (!skb)
+ continue;
- dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
- skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
+ dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
}
for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
@@ -1193,6 +1208,11 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
if (!skb)
continue;
+ skb_cb = ATH12K_SKB_CB(skb);
+ ar = skb_cb->ar;
+ if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+ wake_up(&ar->dp.tx_empty_waitq);
+
dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr,
skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
@@ -1336,12 +1356,16 @@ struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
u32 cookie)
{
struct ath12k_rx_desc_info **desc_addr_ptr;
- u16 ppt_idx, spt_idx;
+ u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx;
ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
- spt_idx = u32_get_bits(cookie, ATH12k_DP_CC_COOKIE_SPT);
+ spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT);
+
+ start_ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET;
+ end_ppt_idx = start_ppt_idx + ATH12K_NUM_RX_SPT_PAGES;
- if (ppt_idx > ATH12K_NUM_RX_SPT_PAGES ||
+ if (ppt_idx < start_ppt_idx ||
+ ppt_idx >= end_ppt_idx ||
spt_idx > ATH12K_MAX_SPT_ENTRIES)
return NULL;
@@ -1354,13 +1378,17 @@ struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
u32 cookie)
{
struct ath12k_tx_desc_info **desc_addr_ptr;
- u16 ppt_idx, spt_idx;
+ u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx;
ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
- spt_idx = u32_get_bits(cookie, ATH12k_DP_CC_COOKIE_SPT);
+ spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT);
- if (ppt_idx < ATH12K_NUM_RX_SPT_PAGES ||
- ppt_idx > ab->dp.num_spt_pages ||
+ start_ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET;
+ end_ppt_idx = start_ppt_idx +
+ (ATH12K_TX_SPT_PAGES_PER_POOL * ATH12K_HW_MAX_QUEUES);
+
+ if (ppt_idx < start_ppt_idx ||
+ ppt_idx >= end_ppt_idx ||
spt_idx > ATH12K_MAX_SPT_ENTRIES)
return NULL;
@@ -1389,15 +1417,16 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
return -ENOMEM;
}
+ ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET + i;
dp->spt_info->rxbaddr[i] = &rx_descs[0];
for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
- rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(i, j);
+ rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(ppt_idx, j);
rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list);
/* Update descriptor VA in SPT */
- rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(ab, i, j);
+ rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, j);
*rx_desc_addr = &rx_descs[j];
}
}
@@ -1417,10 +1446,11 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
}
tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
+ ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET + tx_spt_page;
+
dp->spt_info->txbaddr[tx_spt_page] = &tx_descs[0];
for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
- ppt_idx = ATH12K_NUM_RX_SPT_PAGES + tx_spt_page;
tx_descs[j].desc_id = ath12k_dp_cc_cookie_gen(ppt_idx, j);
tx_descs[j].pool_id = pool_id;
list_add_tail(&tx_descs[j].list,
@@ -1437,14 +1467,43 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
return 0;
}
+static int ath12k_dp_cmem_init(struct ath12k_base *ab,
+ struct ath12k_dp *dp,
+ enum ath12k_dp_desc_type type)
+{
+ u32 cmem_base;
+ int i, start, end;
+
+ cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
+
+ switch (type) {
+ case ATH12K_DP_TX_DESC:
+ start = ATH12K_TX_SPT_PAGE_OFFSET;
+ end = start + ATH12K_NUM_TX_SPT_PAGES;
+ break;
+ case ATH12K_DP_RX_DESC:
+ start = ATH12K_RX_SPT_PAGE_OFFSET;
+ end = start + ATH12K_NUM_RX_SPT_PAGES;
+ break;
+ default:
+ ath12k_err(ab, "invalid descriptor type %d in cmem init\n", type);
+ return -EINVAL;
+ }
+
+ /* Write to PPT in CMEM */
+ for (i = start; i < end; i++)
+ ath12k_hif_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i),
+ dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET);
+
+ return 0;
+}
+
static int ath12k_dp_cc_init(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
int i, ret = 0;
- u32 cmem_base;
INIT_LIST_HEAD(&dp->rx_desc_free_list);
- INIT_LIST_HEAD(&dp->rx_desc_used_list);
spin_lock_init(&dp->rx_desc_lock);
for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
@@ -1465,8 +1524,6 @@ static int ath12k_dp_cc_init(struct ath12k_base *ab)
return -ENOMEM;
}
- cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
-
for (i = 0; i < dp->num_spt_pages; i++) {
dp->spt_info[i].vaddr = dma_alloc_coherent(ab->dev,
ATH12K_PAGE_SIZE,
@@ -1483,10 +1540,18 @@ static int ath12k_dp_cc_init(struct ath12k_base *ab)
ret = -EINVAL;
goto free;
}
+ }
- /* Write to PPT in CMEM */
- ath12k_hif_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i),
- dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET);
+ ret = ath12k_dp_cmem_init(ab, dp, ATH12K_DP_TX_DESC);
+ if (ret) {
+ ath12k_warn(ab, "HW CC Tx cmem init failed %d", ret);
+ goto free;
+ }
+
+ ret = ath12k_dp_cmem_init(ab, dp, ATH12K_DP_RX_DESC);
+ if (ret) {
+ ath12k_warn(ab, "HW CC Rx cmem init failed %d", ret);
+ goto free;
}
ret = ath12k_dp_cc_desc_init(ab);
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
index eb2dd408e081..5cf0d21ef184 100644
--- a/drivers/net/wireless/ath/ath12k/dp.h
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -223,6 +223,9 @@ struct ath12k_pdev_dp {
#define ATH12K_NUM_TX_SPT_PAGES (ATH12K_TX_SPT_PAGES_PER_POOL * ATH12K_HW_MAX_QUEUES)
#define ATH12K_NUM_SPT_PAGES (ATH12K_NUM_RX_SPT_PAGES + ATH12K_NUM_TX_SPT_PAGES)
+#define ATH12K_TX_SPT_PAGE_OFFSET 0
+#define ATH12K_RX_SPT_PAGE_OFFSET ATH12K_NUM_TX_SPT_PAGES
+
/* The SPT pages are divided for RX and TX, first block for RX
* and remaining for TX
*/
@@ -245,7 +248,7 @@ struct ath12k_pdev_dp {
#define ATH12K_CC_SPT_MSB 8
#define ATH12K_CC_PPT_MSB 19
#define ATH12K_CC_PPT_SHIFT 9
-#define ATH12k_DP_CC_COOKIE_SPT GENMASK(8, 0)
+#define ATH12K_DP_CC_COOKIE_SPT GENMASK(8, 0)
#define ATH12K_DP_CC_COOKIE_PPT GENMASK(19, 9)
#define DP_REO_QREF_NUM GENMASK(31, 16)
@@ -282,6 +285,8 @@ struct ath12k_rx_desc_info {
struct sk_buff *skb;
u32 cookie;
u32 magic;
+ u8 in_use : 1,
+ reserved : 7;
};
struct ath12k_tx_desc_info {
@@ -347,8 +352,7 @@ struct ath12k_dp {
struct ath12k_spt_info *spt_info;
u32 num_spt_pages;
struct list_head rx_desc_free_list;
- struct list_head rx_desc_used_list;
- /* protects the free and used desc list */
+ /* protects the free desc list */
spinlock_t rx_desc_lock;
struct list_head tx_desc_free_list[ATH12K_HW_MAX_QUEUES];
@@ -377,8 +381,6 @@ struct ath12k_dp {
/* peer meta data */
#define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 2)
-#define HTT_TX_WBM_COMP_STATUS_OFFSET 8
-
/* HTT tx completion is overlaid in wbm_release_ring */
#define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(16, 13)
#define HTT_TX_WBM_COMP_INFO1_REINJECT_REASON GENMASK(3, 0)
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
index 2d56913a75d0..6b0b72477540 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -944,7 +944,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
goto err_merge_fail;
ath12k_dbg(ab, ATH12K_DBG_DATA,
- "mpdu_buf %pK mpdu_buf->len %u",
+ "mpdu_buf %p mpdu_buf->len %u",
prev_buf, prev_buf->len);
} else {
ath12k_dbg(ab, ATH12K_DBG_DATA,
@@ -958,7 +958,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
err_merge_fail:
if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) {
ath12k_dbg(ab, ATH12K_DBG_DATA,
- "err_merge_fail mpdu_buf %pK", mpdu_buf);
+ "err_merge_fail mpdu_buf %p", mpdu_buf);
/* Free the head buffer */
dev_kfree_skb_any(mpdu_buf);
}
@@ -1092,7 +1092,7 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
spin_unlock_bh(&ar->ab->base_lock);
ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
- "rx skb %pK len %u peer %pM %u %s %s%s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %p len %u peer %pM %u %s %s%s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
msdu,
msdu->len,
peer ? peer->addr : NULL,
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index ca76c018dd0c..75df622f25d8 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -239,31 +239,61 @@ static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab,
return ab->hal_rx_ops->rx_desc_get_msdu_src_link_id(desc);
}
-static int ath12k_dp_purge_mon_ring(struct ath12k_base *ab)
+static void ath12k_dp_clean_up_skb_list(struct sk_buff_head *skb_list)
{
- int i, reaped = 0;
- unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(skb_list)))
+ dev_kfree_skb_any(skb);
+}
+
+static size_t ath12k_dp_list_cut_nodes(struct list_head *list,
+ struct list_head *head,
+ size_t count)
+{
+ struct list_head *cur;
+ struct ath12k_rx_desc_info *rx_desc;
+ size_t nodes = 0;
+
+ if (!count) {
+ INIT_LIST_HEAD(list);
+ goto out;
+ }
+
+ list_for_each(cur, head) {
+ if (!count)
+ break;
+
+ rx_desc = list_entry(cur, struct ath12k_rx_desc_info, list);
+ rx_desc->in_use = true;
- do {
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++)
- reaped += ath12k_dp_mon_process_ring(ab, i, NULL,
- DP_MON_SERVICE_BUDGET,
- ATH12K_DP_RX_MONITOR_MODE);
+ count--;
+ nodes++;
+ }
- /* nothing more to reap */
- if (reaped < DP_MON_SERVICE_BUDGET)
- return 0;
+ list_cut_before(list, head, cur);
+out:
+ return nodes;
+}
- } while (time_before(jiffies, timeout));
+static void ath12k_dp_rx_enqueue_free(struct ath12k_dp *dp,
+ struct list_head *used_list)
+{
+ struct ath12k_rx_desc_info *rx_desc, *safe;
- ath12k_warn(ab, "dp mon ring purge timeout");
+ /* Reset the use flag */
+ list_for_each_entry_safe(rx_desc, safe, used_list, list)
+ rx_desc->in_use = false;
- return -ETIMEDOUT;
+ spin_lock_bh(&dp->rx_desc_lock);
+ list_splice_tail(used_list, &dp->rx_desc_free_list);
+ spin_unlock_bh(&dp->rx_desc_lock);
}
/* Returns number of Rx buffers replenished */
int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
struct dp_rxdma_ring *rx_ring,
+ struct list_head *used_list,
int req_entries)
{
struct ath12k_buffer_addr *desc;
@@ -292,6 +322,19 @@ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
req_entries = min(num_free, req_entries);
num_remain = req_entries;
+ if (!num_remain)
+ goto out;
+
+ /* Get the descriptor from free list */
+ if (list_empty(used_list)) {
+ spin_lock_bh(&dp->rx_desc_lock);
+ req_entries = ath12k_dp_list_cut_nodes(used_list,
+ &dp->rx_desc_free_list,
+ num_remain);
+ spin_unlock_bh(&dp->rx_desc_lock);
+ num_remain = req_entries;
+ }
+
while (num_remain > 0) {
skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
DP_RX_BUFFER_ALIGN_SIZE);
@@ -311,33 +354,20 @@ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
if (dma_mapping_error(ab->dev, paddr))
goto fail_free_skb;
- spin_lock_bh(&dp->rx_desc_lock);
-
- /* Get desc from free list and store in used list
- * for cleanup purposes
- *
- * TODO: pass the removed descs rather than
- * add/read to optimize
- */
- rx_desc = list_first_entry_or_null(&dp->rx_desc_free_list,
+ rx_desc = list_first_entry_or_null(used_list,
struct ath12k_rx_desc_info,
list);
- if (!rx_desc) {
- spin_unlock_bh(&dp->rx_desc_lock);
+ if (!rx_desc)
goto fail_dma_unmap;
- }
rx_desc->skb = skb;
cookie = rx_desc->cookie;
- list_del(&rx_desc->list);
- list_add_tail(&rx_desc->list, &dp->rx_desc_used_list);
-
- spin_unlock_bh(&dp->rx_desc_lock);
desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
if (!desc)
- goto fail_buf_unassign;
+ goto fail_dma_unmap;
+ list_del(&rx_desc->list);
ATH12K_SKB_RXCB(skb)->paddr = paddr;
num_remain--;
@@ -345,26 +375,19 @@ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
}
- ath12k_hal_srng_access_end(ab, srng);
-
- spin_unlock_bh(&srng->lock);
+ goto out;
- return req_entries - num_remain;
-
-fail_buf_unassign:
- spin_lock_bh(&dp->rx_desc_lock);
- list_del(&rx_desc->list);
- list_add_tail(&rx_desc->list, &dp->rx_desc_free_list);
- rx_desc->skb = NULL;
- spin_unlock_bh(&dp->rx_desc_lock);
fail_dma_unmap:
dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
fail_free_skb:
dev_kfree_skb_any(skb);
-
+out:
ath12k_hal_srng_access_end(ab, srng);
+ if (!list_empty(used_list))
+ ath12k_dp_rx_enqueue_free(dp, used_list);
+
spin_unlock_bh(&srng->lock);
return req_entries - num_remain;
@@ -422,13 +445,12 @@ static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base *ab,
static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab,
struct dp_rxdma_ring *rx_ring)
{
- int num_entries;
+ LIST_HEAD(list);
- num_entries = rx_ring->refill_buf_ring.size /
- ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF);
+ rx_ring->bufs_max = rx_ring->refill_buf_ring.size /
+ ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF);
- rx_ring->bufs_max = num_entries;
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_entries);
+ ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0);
return 0;
}
@@ -2423,7 +2445,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
spin_unlock_bh(&ab->base_lock);
ath12k_dbg(ab, ATH12K_DBG_DATA,
- "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
msdu,
msdu->len,
peer ? peer->addr : NULL,
@@ -2585,6 +2607,7 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
struct napi_struct *napi, int budget)
{
+ LIST_HEAD(rx_desc_used_list);
struct ath12k_rx_desc_info *desc_info;
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
@@ -2637,9 +2660,7 @@ try_again:
msdu = desc_info->skb;
desc_info->skb = NULL;
- spin_lock_bh(&dp->rx_desc_lock);
- list_move_tail(&desc_info->list, &dp->rx_desc_free_list);
- spin_unlock_bh(&dp->rx_desc_lock);
+ list_add_tail(&desc_info->list, &rx_desc_used_list);
rxcb = ATH12K_SKB_RXCB(msdu);
dma_unmap_single(ab->dev, rxcb->paddr,
@@ -2700,7 +2721,8 @@ try_again:
if (!total_msdu_reaped)
goto exit;
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_buffs_reaped);
+ ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list,
+ num_buffs_reaped);
ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list,
ring_id);
@@ -3021,9 +3043,9 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
}
desc_info->skb = defrag_skb;
+ desc_info->in_use = true;
list_del(&desc_info->list);
- list_add_tail(&desc_info->list, &dp->rx_desc_used_list);
spin_unlock_bh(&dp->rx_desc_lock);
ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr;
@@ -3085,9 +3107,9 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
err_free_desc:
spin_lock_bh(&dp->rx_desc_lock);
- list_del(&desc_info->list);
- list_add_tail(&desc_info->list, &dp->rx_desc_free_list);
+ desc_info->in_use = false;
desc_info->skb = NULL;
+ list_add_tail(&desc_info->list, &dp->rx_desc_free_list);
spin_unlock_bh(&dp->rx_desc_lock);
err_unmap_dma:
dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),
@@ -3304,6 +3326,7 @@ out_unlock:
static int
ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
+ struct list_head *used_list,
bool drop, u32 cookie)
{
struct ath12k_base *ab = ar->ab;
@@ -3333,9 +3356,8 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
msdu = desc_info->skb;
desc_info->skb = NULL;
- spin_lock_bh(&ab->dp.rx_desc_lock);
- list_move_tail(&desc_info->list, &ab->dp.rx_desc_free_list);
- spin_unlock_bh(&ab->dp.rx_desc_lock);
+
+ list_add_tail(&desc_info->list, used_list);
rxcb = ATH12K_SKB_RXCB(msdu);
dma_unmap_single(ar->ab->dev, rxcb->paddr,
@@ -3391,6 +3413,7 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
struct hal_reo_dest_ring *reo_desc;
struct dp_rxdma_ring *rx_ring;
struct dp_srng *reo_except;
+ LIST_HEAD(rx_desc_used_list);
u32 desc_bank, num_msdus;
struct hal_srng *srng;
struct ath12k_dp *dp;
@@ -3458,7 +3481,9 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
ar = ab->pdevs[pdev_id].ar;
- if (!ath12k_dp_process_rx_err_buf(ar, reo_desc, drop,
+ if (!ath12k_dp_process_rx_err_buf(ar, reo_desc,
+ &rx_desc_used_list,
+ drop,
msdu_cookies[i]))
tot_n_bufs_reaped++;
}
@@ -3478,7 +3503,8 @@ exit:
rx_ring = &dp->rx_refill_buf_ring;
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, tot_n_bufs_reaped);
+ ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list,
+ tot_n_bufs_reaped);
return tot_n_bufs_reaped;
}
@@ -3695,25 +3721,27 @@ static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
struct napi_struct *napi, int budget)
{
+ LIST_HEAD(rx_desc_used_list);
struct ath12k *ar;
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring;
struct hal_rx_wbm_rel_info err_info;
struct hal_srng *srng;
struct sk_buff *msdu;
- struct sk_buff_head msdu_list;
+ struct sk_buff_head msdu_list, scatter_msdu_list;
struct ath12k_skb_rxcb *rxcb;
void *rx_desc;
u8 mac_id;
int num_buffs_reaped = 0;
struct ath12k_rx_desc_info *desc_info;
int ret, pdev_id;
+ struct hal_rx_desc *msdu_data;
__skb_queue_head_init(&msdu_list);
+ __skb_queue_head_init(&scatter_msdu_list);
srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
rx_ring = &dp->rx_refill_buf_ring;
-
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
@@ -3748,9 +3776,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
msdu = desc_info->skb;
desc_info->skb = NULL;
- spin_lock_bh(&dp->rx_desc_lock);
- list_move_tail(&desc_info->list, &dp->rx_desc_free_list);
- spin_unlock_bh(&dp->rx_desc_lock);
+ list_add_tail(&desc_info->list, &rx_desc_used_list);
rxcb = ATH12K_SKB_RXCB(msdu);
dma_unmap_single(ab->dev, rxcb->paddr,
@@ -3768,17 +3794,53 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
continue;
}
+ msdu_data = (struct hal_rx_desc *)msdu->data;
rxcb->err_rel_src = err_info.err_rel_src;
rxcb->err_code = err_info.err_code;
- rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
-
- __skb_queue_tail(&msdu_list, msdu);
-
rxcb->is_first_msdu = err_info.first_msdu;
rxcb->is_last_msdu = err_info.last_msdu;
rxcb->is_continuation = err_info.continuation;
+ rxcb->rx_desc = msdu_data;
+
+ if (err_info.continuation) {
+ __skb_queue_tail(&scatter_msdu_list, msdu);
+ continue;
+ }
+
+ mac_id = ath12k_dp_rx_get_msdu_src_link(ab,
+ msdu_data);
+ if (mac_id >= MAX_RADIOS) {
+ dev_kfree_skb_any(msdu);
+
+ /* In any case continuation bit is set
+ * in the previous record, cleanup scatter_msdu_list
+ */
+ ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
+ continue;
+ }
+
+ if (!skb_queue_empty(&scatter_msdu_list)) {
+ struct sk_buff *msdu;
+
+ skb_queue_walk(&scatter_msdu_list, msdu) {
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ rxcb->mac_id = mac_id;
+ }
+
+ skb_queue_splice_tail_init(&scatter_msdu_list,
+ &msdu_list);
+ }
+
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ rxcb->mac_id = mac_id;
+ __skb_queue_tail(&msdu_list, msdu);
}
+ /* In any case continuation bit is set in the
+ * last record, cleanup scatter_msdu_list
+ */
+ ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
+
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
@@ -3786,12 +3848,14 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
if (!num_buffs_reaped)
goto done;
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_buffs_reaped);
+ ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list,
+ num_buffs_reaped);
rcu_read_lock();
while ((msdu = __skb_dequeue(&msdu_list))) {
- mac_id = ath12k_dp_rx_get_msdu_src_link(ab,
- (struct hal_rx_desc *)msdu->data);
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ mac_id = rxcb->mac_id;
+
pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
ar = ab->pdevs[pdev_id].ar;
@@ -4224,29 +4288,3 @@ int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar)
return 0;
}
-
-int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab)
-{
- /* start reap timer */
- mod_timer(&ab->mon_reap_timer,
- jiffies + msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL));
-
- return 0;
-}
-
-int ath12k_dp_rx_pktlog_stop(struct ath12k_base *ab, bool stop_timer)
-{
- int ret;
-
- if (stop_timer)
- del_timer_sync(&ab->mon_reap_timer);
-
- /* reap all the monitor related rings */
- ret = ath12k_dp_purge_mon_ring(ab);
- if (ret) {
- ath12k_warn(ab, "failed to purge dp mon ring: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h
index 05b3d5581dbe..2ff421160181 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_RX_H
#define ATH12K_DP_RX_H
@@ -118,12 +118,11 @@ int ath12k_dp_rx_process(struct ath12k_base *ab, int mac_id,
int budget);
int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
struct dp_rxdma_ring *rx_ring,
+ struct list_head *used_list,
int req_entries);
int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar);
int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id);
-int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab);
-int ath12k_dp_rx_pktlog_stop(struct ath12k_base *ab, bool stop_timer);
u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
struct hal_rx_desc *desc);
struct ath12k_peer *
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
index 572b87153647..9b6d7d72f57c 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
@@ -414,7 +414,7 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
struct ath12k_dp_htt_wbm_tx_status ts = {0};
enum hal_wbm_htt_tx_comp_status wbm_status;
- status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
+ status_desc = desc;
wbm_status = le32_get_bits(status_desc->info0,
HTT_TX_WBM_COMP_INFO0_STATUS);
diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h
index 107927d64bbb..dbb9205bfa10 100644
--- a/drivers/net/wireless/ath/ath12k/hal.h
+++ b/drivers/net/wireless/ath/ath12k/hal.h
@@ -767,7 +767,7 @@ struct hal_srng_config {
};
/**
- * enum hal_rx_buf_return_buf_manager
+ * enum hal_rx_buf_return_buf_manager - manager for returned rx buffers
*
* @HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST: Buffer returned to WBM idle buffer list
* @HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST: Descriptor returned to WBM idle
diff --git a/drivers/net/wireless/ath/ath12k/hif.h b/drivers/net/wireless/ath/ath12k/hif.h
index c653ca1f59b2..7f0926fe751d 100644
--- a/drivers/net/wireless/ath/ath12k/hif.h
+++ b/drivers/net/wireless/ath/ath12k/hif.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HIF_H
@@ -17,7 +17,7 @@ struct ath12k_hif_ops {
int (*start)(struct ath12k_base *ab);
void (*stop)(struct ath12k_base *ab);
int (*power_up)(struct ath12k_base *ab);
- void (*power_down)(struct ath12k_base *ab);
+ void (*power_down)(struct ath12k_base *ab, bool is_suspend);
int (*suspend)(struct ath12k_base *ab);
int (*resume)(struct ath12k_base *ab);
int (*map_service_to_pipe)(struct ath12k_base *ab, u16 service_id,
@@ -133,12 +133,18 @@ static inline void ath12k_hif_write32(struct ath12k_base *ab, u32 address,
static inline int ath12k_hif_power_up(struct ath12k_base *ab)
{
+ if (!ab->hif.ops->power_up)
+ return -EOPNOTSUPP;
+
return ab->hif.ops->power_up(ab);
}
-static inline void ath12k_hif_power_down(struct ath12k_base *ab)
+static inline void ath12k_hif_power_down(struct ath12k_base *ab, bool is_suspend)
{
- ab->hif.ops->power_down(ab);
+ if (!ab->hif.ops->power_down)
+ return;
+
+ ab->hif.ops->power_down(ab, is_suspend);
}
#endif /* ATH12K_HIF_H */
diff --git a/drivers/net/wireless/ath/ath12k/htc.c b/drivers/net/wireless/ath/ath12k/htc.c
index 23f7428abd95..2f2230f565bb 100644
--- a/drivers/net/wireless/ath/ath12k/htc.c
+++ b/drivers/net/wireless/ath/ath12k/htc.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
@@ -358,7 +358,7 @@ void ath12k_htc_rx_completion_handler(struct ath12k_base *ab,
goto out;
}
- ath12k_dbg(ab, ATH12K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
+ ath12k_dbg(ab, ATH12K_DBG_HTC, "htc rx completion ep %d skb %p\n",
eid, skb);
ep->ep_ops.ep_rx_complete(ab, skb);
diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
index 0b17dfd47856..f4c827015821 100644
--- a/drivers/net/wireless/ath/ath12k/hw.c
+++ b/drivers/net/wireless/ath/ath12k/hw.c
@@ -15,6 +15,10 @@
#include "mhi.h"
#include "dp_rx.h"
+static const guid_t wcn7850_uuid = GUID_INIT(0xf634f534, 0x6147, 0x11ec,
+ 0x90, 0xd6, 0x02, 0x42,
+ 0xac, 0x12, 0x00, 0x03);
+
static u8 ath12k_hw_qcn9274_mac_from_pdev_id(int pdev_idx)
{
return pdev_idx;
@@ -920,6 +924,8 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.otp_board_id_register = QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB,
.supports_sta_ps = false,
+
+ .acpi_guid = NULL,
},
{
.name = "wcn7850 hw2.0",
@@ -964,7 +970,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.idle_ps = true,
.download_calib = false,
- .supports_suspend = false,
+ .supports_suspend = true,
.tcl_ring_retry = false,
.reoq_lut_support = false,
.supports_shadow_regs = true,
@@ -993,6 +999,8 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.otp_board_id_register = 0,
.supports_sta_ps = true,
+
+ .acpi_guid = &wcn7850_uuid,
},
{
.name = "qcn9274 hw2.0",
@@ -1061,6 +1069,8 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.otp_board_id_register = QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB,
.supports_sta_ps = false,
+
+ .acpi_guid = NULL,
},
};
diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h
index 87965980b938..3f450ee93f34 100644
--- a/drivers/net/wireless/ath/ath12k/hw.h
+++ b/drivers/net/wireless/ath/ath12k/hw.h
@@ -8,6 +8,7 @@
#define ATH12K_HW_H
#include <linux/mhi.h>
+#include <linux/uuid.h>
#include "wmi.h"
#include "hal.h"
@@ -80,6 +81,7 @@
#define TARGET_RX_PEER_METADATA_VER_V1A 2
#define TARGET_RX_PEER_METADATA_VER_V1B 3
+#define ATH12K_HW_DEFAULT_QUEUE 0
#define ATH12K_HW_MAX_QUEUES 4
#define ATH12K_QUEUE_LEN 4096
@@ -211,6 +213,8 @@ struct ath12k_hw_params {
u32 otp_board_id_register;
bool supports_sta_ps;
+
+ const guid_t *acpi_guid;
};
struct ath12k_hw_ops {
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 52a5fb8b03e9..805cb084484a 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -14,6 +14,7 @@
#include "dp_tx.h"
#include "dp_rx.h"
#include "peer.h"
+#include "debugfs.h"
#define CHAN2G(_channel, _freq, _flags) { \
.band = NL80211_BAND_2GHZ, \
@@ -243,6 +244,9 @@ static const u32 ath12k_smps_map[] = {
static int ath12k_start_vdev_delay(struct ath12k *ar,
struct ath12k_vif *arvif);
+static void ath12k_mac_stop(struct ath12k *ar);
+static int ath12k_mac_vdev_create(struct ath12k *ar, struct ieee80211_vif *vif);
+static int ath12k_mac_vdev_delete(struct ath12k *ar, struct ieee80211_vif *vif);
static const char *ath12k_mac_phymode_str(enum wmi_phy_mode mode)
{
@@ -530,7 +534,8 @@ static void ath12k_get_arvif_iter(void *data, u8 *mac,
struct ath12k_vif_iter *arvif_iter = data;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
- if (arvif->vdev_id == arvif_iter->vdev_id)
+ if (arvif->vdev_id == arvif_iter->vdev_id &&
+ arvif->ar == arvif_iter->ar)
arvif_iter->arvif = arvif;
}
@@ -540,6 +545,7 @@ struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id)
u32 flags;
arvif_iter.vdev_id = vdev_id;
+ arvif_iter.ar = ar;
flags = IEEE80211_IFACE_ITER_RESUME_ALL;
ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar),
@@ -613,6 +619,53 @@ struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id)
return NULL;
}
+static struct ath12k *ath12k_mac_get_ar_by_chan(struct ieee80211_hw *hw,
+ struct ieee80211_channel *channel)
+{
+ struct ath12k_hw *ah = hw->priv;
+ struct ath12k *ar;
+ int i;
+
+ ar = ah->radio;
+
+ if (ah->num_radio == 1)
+ return ar;
+
+ for_each_ar(ah, ar, i) {
+ if (channel->center_freq >= ar->freq_low &&
+ channel->center_freq <= ar->freq_high)
+ return ar;
+ }
+ return NULL;
+}
+
+static struct ath12k *ath12k_get_ar_by_ctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ if (!ctx)
+ return NULL;
+
+ return ath12k_mac_get_ar_by_chan(hw, ctx->def.chan);
+}
+
+static struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+
+ /* If there is one pdev within ah, then we return
+ * ar directly.
+ */
+ if (ah->num_radio == 1)
+ return ah->radio;
+
+ if (arvif->is_created)
+ return arvif->ar;
+
+ return NULL;
+}
+
static void ath12k_pdev_caps_update(struct ath12k *ar)
{
struct ath12k_base *ab = ar->ab;
@@ -1169,7 +1222,7 @@ static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
struct ath12k *ar;
int ret;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_ah_to_ar(ah, 0);
ret = ath12k_mac_config(ar, changed);
if (ret)
@@ -1345,6 +1398,75 @@ static void ath12k_control_beaconing(struct ath12k_vif *arvif,
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
}
+static void ath12k_mac_handle_beacon_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct sk_buff *skb = data;
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
+ return;
+
+ cancel_delayed_work(&arvif->connection_loss_work);
+}
+
+void ath12k_mac_handle_beacon(struct ath12k *ar, struct sk_buff *skb)
+{
+ ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar),
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath12k_mac_handle_beacon_iter,
+ skb);
+}
+
+static void ath12k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ u32 *vdev_id = data;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k *ar = arvif->ar;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+
+ if (arvif->vdev_id != *vdev_id)
+ return;
+
+ if (!arvif->is_up)
+ return;
+
+ ieee80211_beacon_loss(vif);
+
+ /* Firmware doesn't report beacon loss events repeatedly. If AP probe
+ * (done by mac80211) succeeds but beacons do not resume then it
+ * doesn't make sense to continue operation. Queue connection loss work
+ * which can be cancelled when beacon is received.
+ */
+ ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
+ ATH12K_CONNECTION_LOSS_HZ);
+}
+
+void ath12k_mac_handle_beacon_miss(struct ath12k *ar, u32 vdev_id)
+{
+ ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar),
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath12k_mac_handle_beacon_miss_iter,
+ &vdev_id);
+}
+
+static void ath12k_mac_vif_sta_connection_loss_work(struct work_struct *work)
+{
+ struct ath12k_vif *arvif = container_of(work, struct ath12k_vif,
+ connection_loss_work.work);
+ struct ieee80211_vif *vif = arvif->vif;
+
+ if (!arvif->is_up)
+ return;
+
+ ieee80211_connection_loss(vif);
+}
+
static void ath12k_peer_assoc_h_basic(struct ath12k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -2517,7 +2639,7 @@ static void ath12k_bss_disassoc(struct ath12k *ar,
arvif->is_up = false;
- /* TODO: cancel connection_loss_work */
+ cancel_delayed_work(&arvif->connection_loss_work);
}
static u32 ath12k_mac_get_rate_hw_value(int bitrate)
@@ -2961,16 +3083,42 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
}
}
+static struct ath12k_vif_cache *ath12k_arvif_get_cache(struct ath12k_vif *arvif)
+{
+ if (!arvif->cache)
+ arvif->cache = kzalloc(sizeof(*arvif->cache), GFP_KERNEL);
+
+ return arvif->cache;
+}
+
+static void ath12k_arvif_put_cache(struct ath12k_vif *arvif)
+{
+ kfree(arvif->cache);
+ arvif->cache = NULL;
+}
+
static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
u64 changed)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_vif_cache *cache;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_get_ar_by_vif(hw, vif);
+
+ /* if the vdev is not created on a certain radio,
+ * cache the info to be updated later on vdev creation
+ */
+
+ if (!ar) {
+ cache = ath12k_arvif_get_cache(arvif);
+ if (!cache)
+ return;
+ arvif->cache->bss_conf_changed |= changed;
+ return;
+ }
mutex_lock(&ar->conf_mutex);
@@ -2979,6 +3127,42 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
mutex_unlock(&ar->conf_mutex);
}
+static struct ath12k*
+ath12k_mac_select_scan_device(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *req)
+{
+ struct ath12k_hw *ah = hw->priv;
+ enum nl80211_band band;
+ struct ath12k *ar;
+ int i;
+
+ if (ah->num_radio == 1)
+ return ah->radio;
+
+ /* Currently mac80211 supports splitting scan requests into
+ * multiple scan requests per band.
+ * Loop through first channel and determine the scan radio
+ * TODO: There could be 5 GHz low/high channels in that case
+ * split the hw request and perform multiple scans
+ */
+
+ if (req->req.channels[0]->center_freq < ATH12K_MIN_5G_FREQ)
+ band = NL80211_BAND_2GHZ;
+ else if (req->req.channels[0]->center_freq < ATH12K_MIN_6G_FREQ)
+ band = NL80211_BAND_5GHZ;
+ else
+ band = NL80211_BAND_6GHZ;
+
+ for_each_ar(ah, ar, i) {
+ /* TODO 5 GHz low high split changes */
+ if (ar->mac.sbands[band].channels)
+ return ar;
+ }
+
+ return NULL;
+}
+
void __ath12k_mac_scan_finish(struct ath12k *ar)
{
struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
@@ -3148,15 +3332,68 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_scan_request *hw_req)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar;
+ struct ath12k *ar, *prev_ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct cfg80211_scan_request *req = &hw_req->req;
struct ath12k_wmi_scan_req_arg arg = {};
int ret;
int i;
+ bool create = true;
+
+ if (ah->num_radio == 1) {
+ WARN_ON(!arvif->is_created);
+ ar = ath12k_ah_to_ar(ah, 0);
+ goto scan;
+ }
+
+ /* Since the targeted scan device could depend on the frequency
+ * requested in the hw_req, select the corresponding radio
+ */
+ ar = ath12k_mac_select_scan_device(hw, vif, hw_req);
+ if (!ar)
+ return -EINVAL;
- ar = ath12k_ah_to_ar(ah);
+ /* If the vif is already assigned to a specific vdev of an ar,
+ * check whether its already started, vdev which is started
+ * are not allowed to switch to a new radio.
+ * If the vdev is not started, but was earlier created on a
+ * different ar, delete that vdev and create a new one. We don't
+ * delete at the scan stop as an optimization to avoid redundant
+ * delete-create vdev's for the same ar, in case the request is
+ * always on the same band for the vif
+ */
+ if (arvif->is_created) {
+ if (WARN_ON(!arvif->ar))
+ return -EINVAL;
+ if (ar != arvif->ar && arvif->is_started)
+ return -EINVAL;
+
+ if (ar != arvif->ar) {
+ /* backup the previously used ar ptr, since the vdev delete
+ * would assign the arvif->ar to NULL after the call
+ */
+ prev_ar = arvif->ar;
+ mutex_lock(&prev_ar->conf_mutex);
+ ret = ath12k_mac_vdev_delete(prev_ar, vif);
+ mutex_unlock(&prev_ar->conf_mutex);
+ if (ret)
+ ath12k_warn(prev_ar->ab,
+ "unable to delete scan vdev %d\n", ret);
+ } else {
+ create = false;
+ }
+ }
+ if (create) {
+ mutex_lock(&ar->conf_mutex);
+ ret = ath12k_mac_vdev_create(ar, vif);
+ mutex_unlock(&ar->conf_mutex);
+ if (ret) {
+ ath12k_warn(ar->ab, "unable to create scan vdev %d\n", ret);
+ return -EINVAL;
+ }
+ }
+scan:
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
@@ -3242,10 +3479,13 @@ exit:
static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k *ar;
- ar = ath12k_ah_to_ar(ah);
+ if (!arvif->is_created)
+ return;
+
+ ar = arvif->ar;
mutex_lock(&ar->conf_mutex);
ath12k_scan_abort(ar);
@@ -3369,13 +3609,11 @@ static int ath12k_clear_peer_keys(struct ath12k_vif *arvif,
return first_errno;
}
-static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
+static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar;
- struct ath12k_base *ab;
+ struct ath12k_base *ab = ar->ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_peer *peer;
struct ath12k_sta *arsta;
@@ -3383,24 +3621,11 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
int ret = 0;
u32 flags = 0;
- /* BIP needs to be done in software */
- if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
- key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
- key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
- key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
- return 1;
-
- ar = ath12k_ah_to_ar(ah);
- ab = ar->ab;
+ lockdep_assert_held(&ar->conf_mutex);
- if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
+ if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags))
return 1;
- if (key->keyidx > WMI_MAX_KEY_INDEX)
- return -ENOSPC;
-
- mutex_lock(&ar->conf_mutex);
-
if (sta)
peer_addr = sta->addr;
else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
@@ -3492,6 +3717,47 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
spin_unlock_bh(&ab->base_lock);
exit:
+ return ret;
+}
+
+static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_vif_cache *cache;
+ struct ath12k *ar;
+ int ret;
+
+ /* BIP needs to be done in software */
+ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
+ return 1;
+
+ if (key->keyidx > WMI_MAX_KEY_INDEX)
+ return -ENOSPC;
+
+ ar = ath12k_get_ar_by_vif(hw, vif);
+ if (!ar) {
+ /* ar is expected to be valid when sta ptr is available */
+ if (sta) {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ cache = ath12k_arvif_get_cache(arvif);
+ if (!cache)
+ return -ENOSPC;
+ cache->key_conf.cmd = cmd;
+ cache->key_conf.key = key;
+ cache->key_conf.changed = true;
+ return 0;
+ }
+
+ mutex_lock(&ar->conf_mutex);
+ ret = ath12k_mac_set_key(ar, cmd, vif, sta, key);
mutex_unlock(&ar->conf_mutex);
return ret;
}
@@ -3968,7 +4234,6 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
enum ieee80211_sta_state old_state,
enum ieee80211_sta_state new_state)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta);
@@ -3980,7 +4245,11 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
new_state == IEEE80211_STA_NOTEXIST))
cancel_work_sync(&arsta->update_wk);
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_get_ar_by_vif(hw, vif);
+ if (!ar) {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
mutex_lock(&ar->conf_mutex);
@@ -4109,7 +4378,7 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
if (txpwr > ATH12K_TX_POWER_MAX_VAL || txpwr < ATH12K_TX_POWER_MIN_VAL)
return -EINVAL;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_ah_to_ar(ah, 0);
mutex_lock(&ar->conf_mutex);
@@ -4131,14 +4400,17 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u32 changed)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta);
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_peer *peer;
u32 bw, smps;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_get_ar_by_vif(hw, vif);
+ if (!ar) {
+ WARN_ON_ONCE(1);
+ return;
+ }
spin_lock_bh(&ar->ab->base_lock);
@@ -4314,12 +4586,22 @@ static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw,
unsigned int link_id, u16 ac,
const struct ieee80211_tx_queue_params *params)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_vif_cache *cache = arvif->cache;
int ret;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_get_ar_by_vif(hw, vif);
+ if (!ar) {
+ /* cache the info and apply after vdev is created */
+ cache = ath12k_arvif_get_cache(arvif);
+ if (!cache)
+ return -ENOSPC;
+ cache->tx_conf.changed = true;
+ cache->tx_conf.ac = ac;
+ cache->tx_conf.tx_queue_params = *params;
+ return 0;
+ }
mutex_lock(&ar->conf_mutex);
ret = ath12k_mac_conf_tx(arvif, link_id, ac, params);
@@ -4997,6 +5279,13 @@ static int __ath12k_set_antenna(struct ath12k *ar, u32 tx_ant, u32 rx_ant)
if (ath12k_check_chain_mask(ar, rx_ant, false))
return -EINVAL;
+ /* Since we advertised the max cap of all radios combined during wiphy
+ * registration, ensure we don't set the antenna config higher than the
+ * limits
+ */
+ tx_ant = min_t(u32, tx_ant, ar->pdev->cap.tx_chain_mask);
+ rx_ant = min_t(u32, rx_ant, ar->pdev->cap.rx_chain_mask);
+
ar->cfg_tx_chainmask = tx_ant;
ar->cfg_rx_chainmask = rx_ant;
@@ -5443,23 +5732,39 @@ err:
return ret;
}
+static void ath12k_drain_tx(struct ath12k_hw *ah)
+{
+ struct ath12k *ar;
+ int i;
+
+ for_each_ar(ah, ar, i)
+ ath12k_mac_drain_tx(ar);
+}
+
static int ath12k_mac_op_start(struct ieee80211_hw *hw)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar = ath12k_ah_to_ar(ah);
- struct ath12k_base *ab = ar->ab;
- int ret;
+ struct ath12k *ar;
+ int ret, i;
- ath12k_mac_drain_tx(ar);
+ ath12k_drain_tx(ah);
- ret = ath12k_mac_start(ar);
- if (ret) {
- ath12k_err(ab, "fail to start mac operations in pdev idx %d ret %d\n",
- ar->pdev_idx, ret);
- return ret;
+ for_each_ar(ah, ar, i) {
+ ret = ath12k_mac_start(ar);
+ if (ret) {
+ ath12k_err(ar->ab, "fail to start mac operations in pdev idx %d ret %d\n",
+ ar->pdev_idx, ret);
+ goto fail_start;
+ }
}
return 0;
+fail_start:
+ for (; i > 0; i--) {
+ ar = ath12k_ah_to_ar(ah, i - 1);
+ ath12k_mac_stop(ar);
+ }
+ return ret;
}
int ath12k_mac_rfkill_config(struct ath12k *ar)
@@ -5555,11 +5860,13 @@ static void ath12k_mac_stop(struct ath12k *ar)
static void ath12k_mac_op_stop(struct ieee80211_hw *hw)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar = ath12k_ah_to_ar(ah);
+ struct ath12k *ar;
+ int i;
- ath12k_mac_drain_tx(ar);
+ ath12k_drain_tx(ah);
- ath12k_mac_stop(ar);
+ for_each_ar(ah, ar, i)
+ ath12k_mac_stop(ar);
}
static u8
@@ -5732,64 +6039,24 @@ static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
ath12k_mac_update_vif_offload(arvif);
}
-static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int ath12k_mac_vdev_create(struct ath12k *ar, struct ieee80211_vif *vif)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar;
- struct ath12k_base *ab;
+ struct ath12k_hw *ah = ar->ah;
+ struct ath12k_base *ab = ar->ab;
+ struct ieee80211_hw *hw = ah->hw;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_wmi_vdev_create_arg vdev_arg = {0};
struct ath12k_wmi_peer_create_arg peer_param;
u32 param_id, param_value;
u16 nss;
int i;
- int ret;
- int bit;
-
- vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
-
- ar = ath12k_ah_to_ar(ah);
- ab = ar->ab;
-
- mutex_lock(&ar->conf_mutex);
+ int ret, vdev_id;
- if (vif->type == NL80211_IFTYPE_AP &&
- ar->num_peers > (ar->max_num_peers - 1)) {
- ath12k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n");
- ret = -ENOBUFS;
- goto err;
- }
-
- if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) {
- ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n",
- TARGET_NUM_VDEVS);
- ret = -EBUSY;
- goto err;
- }
-
- memset(arvif, 0, sizeof(*arvif));
+ lockdep_assert_held(&ar->conf_mutex);
arvif->ar = ar;
- arvif->vif = vif;
-
- INIT_LIST_HEAD(&arvif->list);
-
- /* Should we initialize any worker to handle connection loss indication
- * from firmware in sta mode?
- */
-
- for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
- arvif->bitrate_mask.control[i].legacy = 0xffffffff;
- memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
- sizeof(arvif->bitrate_mask.control[i].ht_mcs));
- memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
- sizeof(arvif->bitrate_mask.control[i].vht_mcs));
- }
-
- bit = __ffs64(ab->free_vdev_map);
-
- arvif->vdev_id = bit;
+ vdev_id = __ffs64(ab->free_vdev_map);
+ arvif->vdev_id = vdev_id;
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
switch (vif->type) {
@@ -5813,7 +6080,7 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
break;
case NL80211_IFTYPE_MONITOR:
arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
- ar->monitor_vdev_id = bit;
+ ar->monitor_vdev_id = vdev_id;
break;
case NL80211_IFTYPE_P2P_DEVICE:
arvif->vdev_type = WMI_VDEV_TYPE_STA;
@@ -5824,7 +6091,7 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
break;
}
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac add interface id %d type %d subtype %d map %llx\n",
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev create id %d type %d subtype %d map %llx\n",
arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
ab->free_vdev_map);
@@ -5842,6 +6109,7 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
}
ar->num_created_vdevs++;
+ arvif->is_created = true;
ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM created, vdev_id %d\n",
vif->addr, arvif->vdev_id);
ar->allocated_vdev_map |= 1LL << arvif->vdev_id;
@@ -5942,8 +6210,7 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
if (vif->type != NL80211_IFTYPE_MONITOR && ar->monitor_conf_enabled)
ath12k_mac_monitor_vdev_create(ar);
- mutex_unlock(&ar->conf_mutex);
-
+ arvif->ar = ar;
return ret;
err_peer_del:
@@ -5969,6 +6236,8 @@ err_peer_del:
err_vdev_del:
ath12k_wmi_vdev_delete(ar, arvif->vdev_id);
ar->num_created_vdevs--;
+ arvif->is_created = false;
+ arvif->ar = NULL;
ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
ab->free_vdev_map |= 1LL << arvif->vdev_id;
ab->free_vdev_stats_id_map &= ~(1LL << arvif->vdev_stats_id);
@@ -5977,9 +6246,171 @@ err_vdev_del:
spin_unlock_bh(&ar->data_lock);
err:
+ arvif->ar = NULL;
+ return ret;
+}
+
+static void ath12k_mac_vif_cache_flush(struct ath12k *ar, struct ieee80211_vif *vif)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_vif_cache *cache = arvif->cache;
+ struct ath12k_base *ab = ar->ab;
+
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!cache)
+ return;
+
+ if (cache->tx_conf.changed) {
+ ret = ath12k_mac_conf_tx(arvif, 0, cache->tx_conf.ac,
+ &cache->tx_conf.tx_queue_params);
+ if (ret)
+ ath12k_warn(ab,
+ "unable to apply tx config parameters to vdev %d\n",
+ ret);
+ }
+
+ if (cache->bss_conf_changed) {
+ ath12k_mac_bss_info_changed(ar, arvif, &vif->bss_conf,
+ cache->bss_conf_changed);
+ }
+
+ if (cache->key_conf.changed) {
+ ret = ath12k_mac_set_key(ar, cache->key_conf.cmd, vif, NULL,
+ cache->key_conf.key);
+ if (ret)
+ ath12k_warn(ab, "unable to apply set key param to vdev %d ret %d\n",
+ arvif->vdev_id, ret);
+ }
+ ath12k_arvif_put_cache(arvif);
+}
+
+static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_hw *ah = hw->priv;
+ struct ath12k *ar, *prev_ar;
+ struct ath12k_base *ab;
+ int ret;
+
+ if (ah->num_radio == 1)
+ ar = ah->radio;
+ else if (ctx)
+ ar = ath12k_get_ar_by_ctx(hw, ctx);
+ else
+ return NULL;
+
+ if (!ar)
+ return NULL;
+
+ if (arvif->ar) {
+ /* This is not expected really */
+ if (WARN_ON(!arvif->is_created)) {
+ arvif->ar = NULL;
+ return NULL;
+ }
+
+ if (ah->num_radio == 1)
+ return arvif->ar;
+
+ /* This can happen as scan vdev gets created during multiple scans
+ * across different radios before a vdev is brought up in
+ * a certain radio.
+ */
+ if (ar != arvif->ar) {
+ if (WARN_ON(arvif->is_started))
+ return NULL;
+
+ /* backup the previously used ar ptr since arvif->ar would
+ * be set to NULL after vdev delete is done
+ */
+ prev_ar = arvif->ar;
+ mutex_lock(&prev_ar->conf_mutex);
+ ret = ath12k_mac_vdev_delete(prev_ar, vif);
+
+ if (ret)
+ ath12k_warn(prev_ar->ab, "unable to delete vdev %d\n",
+ ret);
+ mutex_unlock(&prev_ar->conf_mutex);
+ }
+ }
+
+ ab = ar->ab;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (arvif->is_created)
+ goto flush;
+
+ if (vif->type == NL80211_IFTYPE_AP &&
+ ar->num_peers > (ar->max_num_peers - 1)) {
+ ath12k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n");
+ goto unlock;
+ }
+
+ if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) {
+ ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n",
+ TARGET_NUM_VDEVS);
+ goto unlock;
+ }
+
+ ret = ath12k_mac_vdev_create(ar, vif);
+ if (ret) {
+ ath12k_warn(ab, "failed to create vdev %pM ret %d", vif->addr, ret);
+ goto unlock;
+ }
+
+flush:
+ /* If the vdev is created during channel assign and not during
+ * add_interface(), Apply any parameters for the vdev which were received
+ * after add_interface, corresponding to this vif.
+ */
+ ath12k_mac_vif_cache_flush(ar, vif);
+unlock:
mutex_unlock(&ar->conf_mutex);
+ return arvif->ar;
+}
- return ret;
+static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ int i;
+
+ memset(arvif, 0, sizeof(*arvif));
+
+ arvif->vif = vif;
+
+ INIT_LIST_HEAD(&arvif->list);
+ INIT_DELAYED_WORK(&arvif->connection_loss_work,
+ ath12k_mac_vif_sta_connection_loss_work);
+
+ for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
+ arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+ memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].ht_mcs));
+ memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+ }
+
+ /* Allocate Default Queue now and reassign during actual vdev create */
+ vif->cab_queue = ATH12K_HW_DEFAULT_QUEUE;
+ for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
+ vif->hw_queue[i] = ATH12K_HW_DEFAULT_QUEUE;
+
+ vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+
+ /* For single radio wiphy(i.e ah->num_radio is 1), create the vdev
+ * during add_interface itself, for multi radio wiphy, defer the vdev
+ * creation until channel_assign to determine the radio on which the
+ * vdev needs to be created
+ */
+ ath12k_mac_assign_vif_to_vdev(hw, vif, NULL);
+ return 0;
}
static void ath12k_mac_vif_unref(struct ath12k_dp *dp, struct ieee80211_vif *vif)
@@ -6007,31 +6438,14 @@ static void ath12k_mac_vif_unref(struct ath12k_dp *dp, struct ieee80211_vif *vif
}
}
-static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int ath12k_mac_vdev_delete(struct ath12k *ar, struct ieee80211_vif *vif)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
- struct ath12k_base *ab;
+ struct ath12k_base *ab = ar->ab;
unsigned long time_left;
int ret;
- ar = ath12k_ah_to_ar(ah);
- ab = ar->ab;
-
- mutex_lock(&ar->conf_mutex);
-
- ath12k_dbg(ab, ATH12K_DBG_MAC, "mac remove interface (vdev %d)\n",
- arvif->vdev_id);
-
- if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
- ret = ath12k_peer_delete(ar, arvif->vdev_id, vif->addr);
- if (ret)
- ath12k_warn(ab, "failed to submit AP self-peer removal on vdev %d: %d\n",
- arvif->vdev_id, ret);
- }
-
+ lockdep_assert_held(&ar->conf_mutex);
reinit_completion(&ar->vdev_delete_done);
ret = ath12k_wmi_vdev_delete(ar, arvif->vdev_id);
@@ -6048,6 +6462,10 @@ static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
goto err_vdev_del;
}
+ ab->free_vdev_map |= 1LL << arvif->vdev_id;
+ ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
+ ar->num_created_vdevs--;
+
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ar->monitor_vdev_id = -1;
ar->monitor_vdev_created = false;
@@ -6055,11 +6473,6 @@ static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
ret = ath12k_mac_monitor_vdev_delete(ar);
}
- ab->free_vdev_map |= 1LL << (arvif->vdev_id);
- ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
- ab->free_vdev_stats_id_map &= ~(1LL << arvif->vdev_stats_id);
- ar->num_created_vdevs--;
-
ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n",
vif->addr, arvif->vdev_id);
@@ -6069,6 +6482,7 @@ err_vdev_del:
spin_unlock_bh(&ar->data_lock);
ath12k_peer_cleanup(ar, arvif->vdev_id);
+ ath12k_arvif_put_cache(arvif);
idr_for_each(&ar->txmgmt_idr,
ath12k_mac_vif_txmgmt_idr_remove, vif);
@@ -6081,6 +6495,46 @@ err_vdev_del:
clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
/* TODO: recal traffic pause state based on the available vdevs */
+ arvif->is_created = false;
+ arvif->ar = NULL;
+
+ return ret;
+}
+
+static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_base *ab;
+ struct ath12k *ar;
+ int ret;
+
+ if (!arvif->is_created) {
+ /* if we cached some config but never received assign chanctx,
+ * free the allocated cache.
+ */
+ ath12k_arvif_put_cache(arvif);
+ return;
+ }
+
+ ar = arvif->ar;
+ ab = ar->ab;
+
+ cancel_delayed_work_sync(&arvif->connection_loss_work);
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "mac remove interface (vdev %d)\n",
+ arvif->vdev_id);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ ret = ath12k_peer_delete(ar, arvif->vdev_id, vif->addr);
+ if (ret)
+ ath12k_warn(ab, "failed to submit AP self-peer removal on vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ ath12k_mac_vdev_delete(ar, vif);
mutex_unlock(&ar->conf_mutex);
}
@@ -6132,7 +6586,7 @@ static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_ah_to_ar(ah, 0);
mutex_lock(&ar->conf_mutex);
@@ -6145,16 +6599,19 @@ static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ int antennas_rx = 0, antennas_tx = 0;
struct ath12k *ar;
+ int i;
- ar = ath12k_ah_to_ar(ah);
-
- mutex_lock(&ar->conf_mutex);
-
- *tx_ant = ar->cfg_tx_chainmask;
- *rx_ant = ar->cfg_rx_chainmask;
+ for_each_ar(ah, ar, i) {
+ mutex_lock(&ar->conf_mutex);
+ antennas_rx = max_t(u32, antennas_rx, ar->cfg_rx_chainmask);
+ antennas_tx = max_t(u32, antennas_tx, ar->cfg_tx_chainmask);
+ mutex_unlock(&ar->conf_mutex);
+ }
- mutex_unlock(&ar->conf_mutex);
+ *tx_ant = antennas_tx;
+ *rx_ant = antennas_rx;
return 0;
}
@@ -6163,13 +6620,16 @@ static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
- int ret;
-
- ar = ath12k_ah_to_ar(ah);
+ int ret = 0;
+ int i;
- mutex_lock(&ar->conf_mutex);
- ret = __ath12k_set_antenna(ar, tx_ant, rx_ant);
- mutex_unlock(&ar->conf_mutex);
+ for_each_ar(ah, ar, i) {
+ mutex_lock(&ar->conf_mutex);
+ ret = __ath12k_set_antenna(ar, tx_ant, rx_ant);
+ mutex_unlock(&ar->conf_mutex);
+ if (ret)
+ break;
+ }
return ret;
}
@@ -6213,7 +6673,11 @@ static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
int ret = -EINVAL;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_get_ar_by_vif(hw, vif);
+ if (!ar)
+ return -EINVAL;
+
+ ar = ath12k_ah_to_ar(ah, 0);
mutex_lock(&ar->conf_mutex);
ret = ath12k_mac_ampdu_action(arvif, params);
@@ -6229,15 +6693,17 @@ static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ath12k_base *ab;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_get_ar_by_ctx(hw, ctx);
+ if (!ar)
+ return -EINVAL;
+
ab = ar->ab;
ath12k_dbg(ab, ATH12K_DBG_MAC,
- "mac chanctx add freq %u width %d ptr %pK\n",
+ "mac chanctx add freq %u width %d ptr %p\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@@ -6257,15 +6723,17 @@ static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
static void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ath12k_base *ab;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_get_ar_by_ctx(hw, ctx);
+ if (!ar)
+ return;
+
ab = ar->ab;
ath12k_dbg(ab, ATH12K_DBG_MAC,
- "mac chanctx remove freq %u width %d ptr %pK\n",
+ "mac chanctx remove freq %u width %d ptr %p\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@@ -6286,14 +6754,24 @@ ath12k_mac_check_down_grade_phy_mode(struct ath12k *ar,
enum nl80211_band band,
enum nl80211_iftype type)
{
- struct ieee80211_sta_eht_cap *eht_cap;
+ struct ieee80211_sta_eht_cap *eht_cap = NULL;
enum wmi_phy_mode down_mode;
+ int n = ar->mac.sbands[band].n_iftype_data;
+ int i;
+ struct ieee80211_sband_iftype_data *data;
if (mode < MODE_11BE_EHT20)
return mode;
- eht_cap = &ar->mac.iftype[band][type].eht_cap;
- if (eht_cap->has_eht)
+ data = ar->mac.iftype[band];
+ for (i = 0; i < n; i++) {
+ if (data[i].types_mask & BIT(type)) {
+ eht_cap = &data[i].eht_cap;
+ break;
+ }
+ }
+
+ if (eht_cap && eht_cap->has_eht)
return mode;
switch (mode) {
@@ -6468,14 +6946,19 @@ struct ath12k_mac_change_chanctx_arg {
struct ieee80211_vif_chanctx_switch *vifs;
int n_vifs;
int next_vif;
+ struct ath12k *ar;
};
static void
ath12k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_mac_change_chanctx_arg *arg = data;
+ if (arvif->ar != arg->ar)
+ return;
+
if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx)
return;
@@ -6486,9 +6969,13 @@ static void
ath12k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_mac_change_chanctx_arg *arg = data;
struct ieee80211_chanctx_conf *ctx;
+ if (arvif->ar != arg->ar)
+ return;
+
ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf);
if (ctx != arg->ctx)
return;
@@ -6502,6 +6989,57 @@ ath12k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
arg->next_vif++;
}
+static u32 ath12k_mac_nlwidth_to_wmiwidth(enum nl80211_chan_width width)
+{
+ switch (width) {
+ case NL80211_CHAN_WIDTH_20:
+ return WMI_CHAN_WIDTH_20;
+ case NL80211_CHAN_WIDTH_40:
+ return WMI_CHAN_WIDTH_40;
+ case NL80211_CHAN_WIDTH_80:
+ return WMI_CHAN_WIDTH_80;
+ case NL80211_CHAN_WIDTH_160:
+ return WMI_CHAN_WIDTH_160;
+ case NL80211_CHAN_WIDTH_80P80:
+ return WMI_CHAN_WIDTH_80P80;
+ case NL80211_CHAN_WIDTH_5:
+ return WMI_CHAN_WIDTH_5;
+ case NL80211_CHAN_WIDTH_10:
+ return WMI_CHAN_WIDTH_10;
+ case NL80211_CHAN_WIDTH_320:
+ return WMI_CHAN_WIDTH_320;
+ default:
+ WARN_ON(1);
+ return WMI_CHAN_WIDTH_20;
+ }
+}
+
+static int ath12k_mac_update_peer_puncturing_width(struct ath12k *ar,
+ struct ath12k_vif *arvif,
+ struct cfg80211_chan_def def)
+{
+ u32 param_id, param_value;
+ int ret;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ param_id = WMI_PEER_CHWIDTH_PUNCTURE_20MHZ_BITMAP;
+ param_value = ath12k_mac_nlwidth_to_wmiwidth(def.width) |
+ u32_encode_bits((~def.punctured),
+ WMI_PEER_PUNCTURE_BITMAP);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "punctured bitmap %02x width %d vdev %d\n",
+ def.punctured, def.width, arvif->vdev_id);
+
+ ret = ath12k_wmi_set_peer_param(ar, arvif->bssid,
+ arvif->vdev_id, param_id,
+ param_value);
+
+ return ret;
+}
+
static void
ath12k_mac_update_vif_chan(struct ath12k *ar,
struct ieee80211_vif_chanctx_switch *vifs,
@@ -6594,6 +7132,16 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
arvif->vdev_id, ret);
continue;
}
+
+ ret = ath12k_mac_update_peer_puncturing_width(arvif->ar, arvif,
+ vifs[i].new_ctx->def);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to update puncturing bitmap %02x and width %d: %d\n",
+ vifs[i].new_ctx->def.punctured,
+ vifs[i].new_ctx->def.width, ret);
+ continue;
+ }
}
/* Restart the internal monitor vdev on new channel */
@@ -6607,7 +7155,7 @@ static void
ath12k_mac_update_active_vif_chan(struct ath12k *ar,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx };
+ struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx, .ar = ar };
struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
lockdep_assert_held(&ar->conf_mutex);
@@ -6637,17 +7185,19 @@ static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx,
u32 changed)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ath12k_base *ab;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_get_ar_by_ctx(hw, ctx);
+ if (!ar)
+ return;
+
ab = ar->ab;
mutex_lock(&ar->conf_mutex);
ath12k_dbg(ab, ATH12K_DBG_MAC,
- "mac chanctx change freq %u width %d ptr %pK changed %x\n",
+ "mac chanctx change freq %u width %d ptr %p changed %x\n",
ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
/* This shouldn't really happen because channel switching should use
@@ -6705,20 +7255,27 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ath12k_base *ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
int ret;
struct ath12k_wmi_peer_create_arg param;
- ar = ath12k_ah_to_ar(ah);
+ /* For multi radio wiphy, the vdev was not created during add_interface
+ * create now since we have a channel ctx now to assign to a specific ar/fw
+ */
+ ar = ath12k_mac_assign_vif_to_vdev(hw, vif, ctx);
+ if (!ar) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
ab = ar->ab;
mutex_lock(&ar->conf_mutex);
ath12k_dbg(ab, ATH12K_DBG_MAC,
- "mac chanctx assign ptr %pK vdev_id %i\n",
+ "mac chanctx assign ptr %p vdev_id %i\n",
ctx, arvif->vdev_id);
arvif->punct_bitmap = ctx->def.punctured;
@@ -6788,19 +7345,28 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ath12k_base *ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
int ret;
- ar = ath12k_ah_to_ar(ah);
+ /* The vif is expected to be attached to an ar's VDEV.
+ * We leave the vif/vdev in this function as is
+ * and not delete the vdev symmetric to assign_vif_chanctx()
+ * the VDEV will be deleted and unassigned either during
+ * remove_interface() or when there is a change in channel
+ * that moves the vif to a new ar
+ */
+ if (!arvif->is_created)
+ return;
+
+ ar = arvif->ar;
ab = ar->ab;
mutex_lock(&ar->conf_mutex);
ath12k_dbg(ab, ATH12K_DBG_MAC,
- "mac chanctx unassign ptr %pK vdev_id %i\n",
+ "mac chanctx unassign ptr %p vdev_id %i\n",
ctx, arvif->vdev_id);
WARN_ON(!arvif->is_started);
@@ -6846,13 +7412,20 @@ ath12k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
int n_vifs,
enum ieee80211_chanctx_switch_mode mode)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_get_ar_by_ctx(hw, vifs->old_ctx);
+ if (!ar)
+ return -EINVAL;
mutex_lock(&ar->conf_mutex);
+ /* Switching channels across radio is not allowed */
+ if (ar != ath12k_get_ar_by_ctx(hw, vifs->new_ctx)) {
+ mutex_unlock(&ar->conf_mutex);
+ return -EINVAL;
+ }
+
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"mac chanctx switch n_vifs %d mode %d\n",
n_vifs, mode);
@@ -6893,11 +7466,21 @@ static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
- int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD, ret;
+ int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD, ret = 0, i;
- ar = ath12k_ah_to_ar(ah);
-
- ret = ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
+ /* Currently we set the rts threshold value to all the vifs across
+ * all radios of the single wiphy.
+ * TODO Once support for vif specific RTS threshold in mac80211 is
+ * available, ath12k can make use of it.
+ */
+ for_each_ar(ah, ar, i) {
+ ret = ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set RTS config for all vdevs of pdev %d",
+ ar->pdev->pdev_id);
+ break;
+ }
+ }
return ret;
}
@@ -6917,33 +7500,62 @@ static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
return -EOPNOTSUPP;
}
-static void ath12k_mac_flush(struct ath12k *ar)
+static int ath12k_mac_flush(struct ath12k *ar)
{
long time_left;
+ int ret = 0;
time_left = wait_event_timeout(ar->dp.tx_empty_waitq,
(atomic_read(&ar->dp.num_tx_pending) == 0),
ATH12K_FLUSH_TIMEOUT);
- if (time_left == 0)
- ath12k_warn(ar->ab, "failed to flush transmit queue %ld\n", time_left);
+ if (time_left == 0) {
+ ath12k_warn(ar->ab,
+ "failed to flush transmit queue, data pkts pending %d\n",
+ atomic_read(&ar->dp.num_tx_pending));
+ ret = -ETIMEDOUT;
+ }
time_left = wait_event_timeout(ar->txmgmt_empty_waitq,
(atomic_read(&ar->num_pending_mgmt_tx) == 0),
ATH12K_FLUSH_TIMEOUT);
- if (time_left == 0)
- ath12k_warn(ar->ab, "failed to flush mgmt transmit queue %ld\n",
- time_left);
+ if (time_left == 0) {
+ ath12k_warn(ar->ab,
+ "failed to flush mgmt transmit queue, mgmt pkts pending %d\n",
+ atomic_read(&ar->num_pending_mgmt_tx));
+ ret = -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+int ath12k_mac_wait_tx_complete(struct ath12k *ar)
+{
+ ath12k_mac_drain_tx(ar);
+ return ath12k_mac_flush(ar);
}
static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar = ath12k_ah_to_ar(ah);
+ struct ath12k *ar;
+ int i;
if (drop)
return;
+ /* vif can be NULL when flush() is considered for hw */
+ if (!vif) {
+ for_each_ar(ah, ar, i)
+ ath12k_mac_flush(ar);
+ return;
+ }
+
+ ar = ath12k_get_ar_by_vif(hw, vif);
+
+ if (!ar)
+ return;
+
ath12k_mac_flush(ar);
}
@@ -7145,6 +7757,9 @@ static void ath12k_mac_set_bitrate_mask_iter(void *data,
struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta);
struct ath12k *ar = arvif->ar;
+ if (arsta->arvif != arvif)
+ return;
+
spin_lock_bh(&ar->data_lock);
arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
spin_unlock_bh(&ar->data_lock);
@@ -7155,10 +7770,14 @@ static void ath12k_mac_set_bitrate_mask_iter(void *data,
static void ath12k_mac_disable_peer_fixed_rate(void *data,
struct ieee80211_sta *sta)
{
+ struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta);
struct ath12k_vif *arvif = data;
struct ath12k *ar = arvif->ar;
int ret;
+ if (arsta->arvif != arvif)
+ return;
+
ret = ath12k_wmi_set_peer_param(ar, sta->addr,
arvif->vdev_id,
WMI_PEER_PARAM_FIXED_RATE,
@@ -7306,7 +7925,7 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
return;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_ah_to_ar(ah, 0);
ab = ar->ab;
mutex_lock(&ar->conf_mutex);
@@ -7392,21 +8011,13 @@ ath12k_mac_update_bss_chan_survey(struct ath12k *ar,
static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ieee80211_supported_band *sband;
struct survey_info *ar_survey;
- int ret = 0;
if (idx >= ATH12K_NUM_CHANS)
return -ENOENT;
- ar = ath12k_ah_to_ar(ah);
-
- ar_survey = &ar->survey[idx];
-
- mutex_lock(&ar->conf_mutex);
-
sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
if (sband && idx >= sband->n_channels) {
idx -= sband->n_channels;
@@ -7416,11 +8027,22 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
if (!sband)
sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
- if (!sband || idx >= sband->n_channels) {
- ret = -ENOENT;
- goto exit;
+ if (!sband || idx >= sband->n_channels)
+ return -ENOENT;
+
+ ar = ath12k_mac_get_ar_by_chan(hw, &sband->channels[idx]);
+ if (!ar) {
+ if (sband->channels[idx].flags & IEEE80211_CHAN_DISABLED) {
+ memset(survey, 0, sizeof(*survey));
+ return 0;
+ }
+ return -ENOENT;
}
+ ar_survey = &ar->survey[idx];
+
+ mutex_lock(&ar->conf_mutex);
+
ath12k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
spin_lock_bh(&ar->data_lock);
@@ -7432,10 +8054,8 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
if (ar->rx_channel == survey->channel)
survey->filled |= SURVEY_INFO_IN_USE;
-exit:
mutex_unlock(&ar->conf_mutex);
-
- return ret;
+ return 0;
}
static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
@@ -7478,7 +8098,7 @@ static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_ah_to_ar(ah, 0);
mutex_lock(&ar->conf_mutex);
@@ -7508,7 +8128,7 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
u32 scan_time_msec;
int ret;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_ah_to_ar(ah, 0);
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
@@ -7643,6 +8263,9 @@ static void ath12k_mac_update_ch_list(struct ath12k *ar,
band->channels[i].center_freq > freq_high)
band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
}
+
+ ar->freq_low = freq_low;
+ ar->freq_high = freq_high;
}
static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band)
@@ -7667,6 +8290,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
{
struct ieee80211_supported_band *band;
struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap;
+ struct ath12k_hw *ah = ar->ah;
void *channels;
u32 phy_id;
@@ -7721,6 +8345,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
ath12k_mac_update_ch_list(ar, band,
reg_cap->low_5ghz_chan,
reg_cap->high_5ghz_chan);
+ ah->use_6ghz_regd = true;
}
if (reg_cap->low_5ghz_chan < ATH12K_MIN_6G_FREQ) {
@@ -7757,10 +8382,12 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
static u16 ath12k_mac_get_ifmodes(struct ath12k_hw *ah)
{
- struct ath12k *ar = ath12k_ah_to_ar(ah);
+ struct ath12k *ar;
+ int i;
u16 interface_modes = U16_MAX;
- interface_modes &= ar->ab->hw_params->interface_modes;
+ for_each_ar(ah, ar, i)
+ interface_modes &= ar->ab->hw_params->interface_modes;
return interface_modes == U16_MAX ? 0 : interface_modes;
}
@@ -7768,15 +8395,19 @@ static u16 ath12k_mac_get_ifmodes(struct ath12k_hw *ah)
static bool ath12k_mac_is_iface_mode_enable(struct ath12k_hw *ah,
enum nl80211_iftype type)
{
- struct ath12k *ar = ath12k_ah_to_ar(ah);
+ struct ath12k *ar;
+ int i;
u16 interface_modes, mode;
bool is_enable = true;
mode = BIT(type);
-
- interface_modes = ar->ab->hw_params->interface_modes;
- if (!(interface_modes & mode))
- is_enable = false;
+ for_each_ar(ah, ar, i) {
+ interface_modes = ar->ab->hw_params->interface_modes;
+ if (!(interface_modes & mode)) {
+ is_enable = false;
+ break;
+ }
+ }
return is_enable;
}
@@ -7906,13 +8537,16 @@ static void ath12k_mac_hw_unregister(struct ath12k_hw *ah)
{
struct ieee80211_hw *hw = ah->hw;
struct wiphy *wiphy = hw->wiphy;
- struct ath12k *ar = ath12k_ah_to_ar(ah);
+ struct ath12k *ar;
+ int i;
- cancel_work_sync(&ar->regd_update_work);
+ for_each_ar(ah, ar, i)
+ cancel_work_sync(&ar->regd_update_work);
ieee80211_unregister_hw(hw);
- ath12k_mac_cleanup_unregister(ar);
+ for_each_ar(ah, ar, i)
+ ath12k_mac_cleanup_unregister(ar);
kfree(wiphy->iface_combinations[0].limits);
kfree(wiphy->iface_combinations);
@@ -7952,7 +8586,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
{
struct ieee80211_hw *hw = ah->hw;
struct wiphy *wiphy = hw->wiphy;
- struct ath12k *ar = ath12k_ah_to_ar(ah);
+ struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
struct ath12k_base *ab = ar->ab;
struct ath12k_pdev *pdev;
struct ath12k_pdev_cap *cap;
@@ -7967,39 +8601,71 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
WLAN_CIPHER_SUITE_GCMP_256,
WLAN_CIPHER_SUITE_CCMP_256,
};
- int ret;
- u32 ht_cap = 0;
+ int ret, i, j;
+ u32 ht_cap = U32_MAX, antennas_rx = 0, antennas_tx = 0;
+ bool is_6ghz = false, is_raw_mode = false, is_monitor_disable = false;
+ u8 *mac_addr = NULL;
- pdev = ar->pdev;
+ wiphy->max_ap_assoc_sta = 0;
- if (ab->pdevs_macaddr_valid)
- ether_addr_copy(ar->mac_addr, pdev->mac_addr);
- else
- ether_addr_copy(ar->mac_addr, ab->mac_addr);
+ for_each_ar(ah, ar, i) {
+ u32 ht_cap_info = 0;
- ret = ath12k_mac_setup_register(ar, &ht_cap, hw->wiphy->bands);
- if (ret)
- goto out;
+ pdev = ar->pdev;
+ if (ar->ab->pdevs_macaddr_valid) {
+ ether_addr_copy(ar->mac_addr, pdev->mac_addr);
+ } else {
+ ether_addr_copy(ar->mac_addr, ar->ab->mac_addr);
+ ar->mac_addr[4] += ar->pdev_idx;
+ }
+
+ ret = ath12k_mac_setup_register(ar, &ht_cap_info, hw->wiphy->bands);
+ if (ret)
+ goto err_cleanup_unregister;
+
+ ht_cap &= ht_cap_info;
+ wiphy->max_ap_assoc_sta += ar->max_num_stations;
+
+ /* Advertise the max antenna support of all radios, driver can handle
+ * per pdev specific antenna setting based on pdev cap when antenna
+ * changes are made
+ */
+ cap = &pdev->cap;
+
+ antennas_rx = max_t(u32, antennas_rx, cap->rx_chain_mask);
+ antennas_tx = max_t(u32, antennas_tx, cap->tx_chain_mask);
- wiphy->max_ap_assoc_sta = ar->max_num_stations;
+ if (ar->supports_6ghz)
+ is_6ghz = true;
- cap = &pdev->cap;
+ if (test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags))
+ is_raw_mode = true;
+
+ if (!ar->ab->hw_params->supports_monitor)
+ is_monitor_disable = true;
+
+ if (i == 0)
+ mac_addr = ar->mac_addr;
+ else
+ mac_addr = ab->mac_addr;
+ }
- wiphy->available_antennas_rx = cap->rx_chain_mask;
- wiphy->available_antennas_tx = cap->tx_chain_mask;
+ wiphy->available_antennas_rx = antennas_rx;
+ wiphy->available_antennas_tx = antennas_tx;
- SET_IEEE80211_PERM_ADDR(hw, ar->mac_addr);
+ SET_IEEE80211_PERM_ADDR(hw, mac_addr);
SET_IEEE80211_DEV(hw, ab->dev);
ret = ath12k_mac_setup_iface_combinations(ah);
if (ret) {
ath12k_err(ab, "failed to setup interface combinations: %d\n", ret);
- goto err_cleanup_unregister;
+ goto err_complete_cleanup_unregister;
}
wiphy->interface_modes = ath12k_mac_get_ifmodes(ah);
- if (wiphy->bands[NL80211_BAND_2GHZ] &&
+ if (ah->num_radio == 1 &&
+ wiphy->bands[NL80211_BAND_2GHZ] &&
wiphy->bands[NL80211_BAND_5GHZ] &&
wiphy->bands[NL80211_BAND_6GHZ])
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
@@ -8050,6 +8716,12 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
NL80211_FEATURE_AP_SCAN;
+ /* MLO is not yet supported so disable Wireless Extensions for now
+ * to make sure ath12k users don't use it. This flag can be removed
+ * once WIPHY_FLAG_SUPPORTS_MLO is enabled.
+ */
+ wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT;
+
hw->queues = ATH12K_HW_MAX_QUEUES;
wiphy->tx_queue_len = ATH12K_QUEUE_LEN;
hw->offchannel_tx_hw_queue = ATH12K_HW_MAX_QUEUES - 1;
@@ -8067,7 +8739,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
wiphy->iftype_ext_capab = ath12k_iftypes_ext_capa;
wiphy->num_iftype_ext_capab = ARRAY_SIZE(ath12k_iftypes_ext_capa);
- if (ar->supports_6ghz) {
+ if (is_6ghz) {
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_FILS_DISCOVERY);
wiphy_ext_feature_set(wiphy,
@@ -8078,7 +8750,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
ath12k_reg_init(hw);
- if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) {
+ if (!is_raw_mode) {
hw->netdev_features = NETIF_F_HW_CSUM;
ieee80211_hw_set(hw, SW_CRYPTO_CONTROL);
ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
@@ -8090,7 +8762,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
goto err_free_if_combs;
}
- if (!ab->hw_params->supports_monitor)
+ if (is_monitor_disable)
/* There's a race between calling ieee80211_register_hw()
* and here where the monitor mode is enabled for a little
* while. But that time is so short and in practise it make
@@ -8098,13 +8770,17 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
*/
wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR);
- /* Apply the regd received during initialization */
- ret = ath12k_regd_update(ar, true);
- if (ret) {
- ath12k_err(ar->ab, "ath12k regd update failed: %d\n", ret);
- goto err_unregister_hw;
+ for_each_ar(ah, ar, i) {
+ /* Apply the regd received during initialization */
+ ret = ath12k_regd_update(ar, true);
+ if (ret) {
+ ath12k_err(ar->ab, "ath12k regd update failed: %d\n", ret);
+ goto err_unregister_hw;
+ }
}
+ ath12k_debugfs_register(ar);
+
return 0;
err_unregister_hw:
@@ -8114,10 +8790,15 @@ err_free_if_combs:
kfree(wiphy->iface_combinations[0].limits);
kfree(wiphy->iface_combinations);
+err_complete_cleanup_unregister:
+ i = ah->num_radio;
+
err_cleanup_unregister:
- ath12k_mac_cleanup_unregister(ar);
+ for (j = 0; j < i; j++) {
+ ar = ath12k_ah_to_ar(ah, j);
+ ath12k_mac_cleanup_unregister(ar);
+ }
-out:
SET_IEEE80211_DEV(hw, NULL);
return ret;
@@ -8243,7 +8924,7 @@ static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab,
pdev_idx = pdev_map[i].pdev_idx;
pdev = &ab->pdevs[pdev_idx];
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_ah_to_ar(ah, i);
ar->ah = ah;
ar->ab = ab;
ar->hw_link_id = i;
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
index 3f5e1be0dff9..69fd282b9dd3 100644
--- a/drivers/net/wireless/ath/ath12k/mac.h
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -78,4 +78,8 @@ enum ath12k_supported_bw ath12k_mac_mac80211_bw_to_ath12k_bw(enum rate_info_bw b
enum hal_encrypt_type ath12k_dp_tx_get_encrypt_type(u32 cipher);
int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable);
int ath12k_mac_rfkill_config(struct ath12k *ar);
+int ath12k_mac_wait_tx_complete(struct ath12k *ar);
+void ath12k_mac_handle_beacon(struct ath12k *ar, struct sk_buff *skb);
+void ath12k_mac_handle_beacon_miss(struct ath12k *ar, u32 vdev_id);
+
#endif
diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c
index adb8c3ec1950..fef2f7622033 100644
--- a/drivers/net/wireless/ath/ath12k/mhi.c
+++ b/drivers/net/wireless/ath/ath12k/mhi.c
@@ -19,34 +19,6 @@
static const struct mhi_channel_config ath12k_mhi_channels_qcn9274[] = {
{
- .num = 0,
- .name = "LOOPBACK",
- .num_elements = 32,
- .event_ring = 1,
- .dir = DMA_TO_DEVICE,
- .ee_mask = 0x4,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
- {
- .num = 1,
- .name = "LOOPBACK",
- .num_elements = 32,
- .event_ring = 1,
- .dir = DMA_FROM_DEVICE,
- .ee_mask = 0x4,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
- {
.num = 20,
.name = "IPCR",
.num_elements = 32,
@@ -112,34 +84,6 @@ const struct mhi_controller_config ath12k_mhi_config_qcn9274 = {
static const struct mhi_channel_config ath12k_mhi_channels_wcn7850[] = {
{
- .num = 0,
- .name = "LOOPBACK",
- .num_elements = 32,
- .event_ring = 0,
- .dir = DMA_TO_DEVICE,
- .ee_mask = 0x4,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
- {
- .num = 1,
- .name = "LOOPBACK",
- .num_elements = 32,
- .event_ring = 0,
- .dir = DMA_FROM_DEVICE,
- .ee_mask = 0x4,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
- {
.num = 20,
.name = "IPCR",
.num_elements = 64,
@@ -196,7 +140,7 @@ const struct mhi_controller_config ath12k_mhi_config_wcn7850 = {
.max_channels = 128,
.timeout_ms = 2000,
.use_bounce_buf = false,
- .buf_len = 0,
+ .buf_len = 8192,
.num_channels = ARRAY_SIZE(ath12k_mhi_channels_wcn7850),
.ch_cfg = ath12k_mhi_channels_wcn7850,
.num_events = ARRAY_SIZE(ath12k_mhi_events_wcn7850),
@@ -385,7 +329,6 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci)
"failed to read board id\n");
} else if (board_id & OTP_VALID_DUALMAC_BOARD_ID_MASK) {
dualmac = true;
- ab->slo_capable = false;
ath12k_dbg(ab, ATH12K_DBG_BOOT,
"dualmac fw selected for board id: %x\n", board_id);
}
@@ -470,6 +413,8 @@ static char *ath12k_mhi_state_to_str(enum ath12k_mhi_state mhi_state)
return "POWER_ON";
case ATH12K_MHI_POWER_OFF:
return "POWER_OFF";
+ case ATH12K_MHI_POWER_OFF_KEEP_DEV:
+ return "POWER_OFF_KEEP_DEV";
case ATH12K_MHI_FORCE_POWER_OFF:
return "FORCE_POWER_OFF";
case ATH12K_MHI_SUSPEND:
@@ -501,6 +446,7 @@ static void ath12k_mhi_set_state_bit(struct ath12k_pci *ab_pci,
set_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state);
break;
case ATH12K_MHI_POWER_OFF:
+ case ATH12K_MHI_POWER_OFF_KEEP_DEV:
case ATH12K_MHI_FORCE_POWER_OFF:
clear_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state);
clear_bit(ATH12K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state);
@@ -544,6 +490,7 @@ static int ath12k_mhi_check_state_bit(struct ath12k_pci *ab_pci,
return 0;
break;
case ATH12K_MHI_POWER_OFF:
+ case ATH12K_MHI_POWER_OFF_KEEP_DEV:
case ATH12K_MHI_SUSPEND:
if (test_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state) &&
!test_bit(ATH12K_MHI_SUSPEND, &ab_pci->mhi_state))
@@ -594,12 +541,27 @@ static int ath12k_mhi_set_state(struct ath12k_pci *ab_pci,
ret = 0;
break;
case ATH12K_MHI_POWER_ON:
- ret = mhi_async_power_up(ab_pci->mhi_ctrl);
+ /* In case of resume, QRTR's resume_early() is called
+ * right after ath12k' resume_early(). Since QRTR requires
+ * MHI mission mode state when preparing IPCR channels
+ * (see ee_mask of that channel), we need to use the 'sync'
+ * version here to make sure MHI is in that state when we
+ * return. Or QRTR might resume before that state comes,
+ * and as a result it fails.
+ *
+ * The 'sync' version works for non-resume (normal power on)
+ * case as well.
+ */
+ ret = mhi_sync_power_up(ab_pci->mhi_ctrl);
break;
case ATH12K_MHI_POWER_OFF:
mhi_power_down(ab_pci->mhi_ctrl, true);
ret = 0;
break;
+ case ATH12K_MHI_POWER_OFF_KEEP_DEV:
+ mhi_power_down_keep_dev(ab_pci->mhi_ctrl, true);
+ ret = 0;
+ break;
case ATH12K_MHI_FORCE_POWER_OFF:
mhi_power_down(ab_pci->mhi_ctrl, false);
ret = 0;
@@ -653,9 +615,17 @@ out:
return ret;
}
-void ath12k_mhi_stop(struct ath12k_pci *ab_pci)
+void ath12k_mhi_stop(struct ath12k_pci *ab_pci, bool is_suspend)
{
- ath12k_mhi_set_state(ab_pci, ATH12K_MHI_POWER_OFF);
+ /* During suspend we need to use mhi_power_down_keep_dev()
+ * workaround, otherwise ath12k_core_resume() will timeout
+ * during resume.
+ */
+ if (is_suspend)
+ ath12k_mhi_set_state(ab_pci, ATH12K_MHI_POWER_OFF_KEEP_DEV);
+ else
+ ath12k_mhi_set_state(ab_pci, ATH12K_MHI_POWER_OFF);
+
ath12k_mhi_set_state(ab_pci, ATH12K_MHI_DEINIT);
}
diff --git a/drivers/net/wireless/ath/ath12k/mhi.h b/drivers/net/wireless/ath/ath12k/mhi.h
index ebc23640ce7a..9362ad1958c3 100644
--- a/drivers/net/wireless/ath/ath12k/mhi.h
+++ b/drivers/net/wireless/ath/ath12k/mhi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH12K_MHI_H
#define _ATH12K_MHI_H
@@ -22,6 +22,7 @@ enum ath12k_mhi_state {
ATH12K_MHI_DEINIT,
ATH12K_MHI_POWER_ON,
ATH12K_MHI_POWER_OFF,
+ ATH12K_MHI_POWER_OFF_KEEP_DEV,
ATH12K_MHI_FORCE_POWER_OFF,
ATH12K_MHI_SUSPEND,
ATH12K_MHI_RESUME,
@@ -34,7 +35,7 @@ extern const struct mhi_controller_config ath12k_mhi_config_qcn9274;
extern const struct mhi_controller_config ath12k_mhi_config_wcn7850;
int ath12k_mhi_start(struct ath12k_pci *ar_pci);
-void ath12k_mhi_stop(struct ath12k_pci *ar_pci);
+void ath12k_mhi_stop(struct ath12k_pci *ar_pci, bool is_suspend);
int ath12k_mhi_register(struct ath12k_pci *ar_pci);
void ath12k_mhi_unregister(struct ath12k_pci *ar_pci);
void ath12k_mhi_set_mhictrl_reset(struct ath12k_base *ab);
diff --git a/drivers/net/wireless/ath/ath12k/p2p.c b/drivers/net/wireless/ath/ath12k/p2p.c
index d334df720032..3a851ee15b2f 100644
--- a/drivers/net/wireless/ath/ath12k/p2p.c
+++ b/drivers/net/wireless/ath/ath12k/p2p.c
@@ -121,7 +121,7 @@ static void ath12k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_p2p_noa_arg *arg = data;
- if (arvif->vdev_id != arg->vdev_id)
+ if (arvif->ar != arg->ar || arvif->vdev_id != arg->vdev_id)
return;
ath12k_p2p_noa_update(arvif, arg->noa);
@@ -132,6 +132,7 @@ void ath12k_p2p_noa_update_by_vdev_id(struct ath12k *ar, u32 vdev_id,
{
struct ath12k_p2p_noa_arg arg = {
.vdev_id = vdev_id,
+ .ar = ar,
.noa = noa,
};
diff --git a/drivers/net/wireless/ath/ath12k/p2p.h b/drivers/net/wireless/ath/ath12k/p2p.h
index 5768139a7844..b2eec51a9900 100644
--- a/drivers/net/wireless/ath/ath12k/p2p.h
+++ b/drivers/net/wireless/ath/ath12k/p2p.h
@@ -12,6 +12,7 @@ struct ath12k_wmi_p2p_noa_info;
struct ath12k_p2p_noa_arg {
u32 vdev_id;
+ struct ath12k *ar;
const struct ath12k_wmi_p2p_noa_info *noa;
};
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index 14954bc05144..16af046c33d9 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -872,7 +872,7 @@ static int ath12k_pci_claim(struct ath12k_pci *ab_pci, struct pci_dev *pdev)
goto release_region;
}
- ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot pci_mem 0x%pK\n", ab->mem);
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot pci_mem 0x%p\n", ab->mem);
return 0;
release_region:
@@ -1271,7 +1271,7 @@ int ath12k_pci_power_up(struct ath12k_base *ab)
return 0;
}
-void ath12k_pci_power_down(struct ath12k_base *ab)
+void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend)
{
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
@@ -1280,7 +1280,7 @@ void ath12k_pci_power_down(struct ath12k_base *ab)
ath12k_pci_force_wake(ab_pci->ab);
ath12k_pci_msi_disable(ab_pci);
- ath12k_mhi_stop(ab_pci);
+ ath12k_mhi_stop(ab_pci, is_suspend);
clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
ath12k_pci_sw_reset(ab_pci->ab, false);
}
@@ -1503,7 +1503,7 @@ static void ath12k_pci_remove(struct pci_dev *pdev)
ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
- ath12k_pci_power_down(ab);
+ ath12k_pci_power_down(ab, false);
ath12k_qmi_deinit_service(ab);
goto qmi_fail;
}
@@ -1531,7 +1531,7 @@ static void ath12k_pci_shutdown(struct pci_dev *pdev)
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
- ath12k_pci_power_down(ab);
+ ath12k_pci_power_down(ab, false);
}
static __maybe_unused int ath12k_pci_pm_suspend(struct device *dev)
@@ -1558,9 +1558,36 @@ static __maybe_unused int ath12k_pci_pm_resume(struct device *dev)
return ret;
}
-static SIMPLE_DEV_PM_OPS(ath12k_pci_pm_ops,
- ath12k_pci_pm_suspend,
- ath12k_pci_pm_resume);
+static __maybe_unused int ath12k_pci_pm_suspend_late(struct device *dev)
+{
+ struct ath12k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath12k_core_suspend_late(ab);
+ if (ret)
+ ath12k_warn(ab, "failed to late suspend core: %d\n", ret);
+
+ return ret;
+}
+
+static __maybe_unused int ath12k_pci_pm_resume_early(struct device *dev)
+{
+ struct ath12k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath12k_core_resume_early(ab);
+ if (ret)
+ ath12k_warn(ab, "failed to early resume core: %d\n", ret);
+
+ return ret;
+}
+
+static const struct dev_pm_ops __maybe_unused ath12k_pci_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ath12k_pci_pm_suspend,
+ ath12k_pci_pm_resume)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(ath12k_pci_pm_suspend_late,
+ ath12k_pci_pm_resume_early)
+};
static struct pci_driver ath12k_pci_driver = {
.name = "ath12k_pci",
diff --git a/drivers/net/wireless/ath/ath12k/pci.h b/drivers/net/wireless/ath/ath12k/pci.h
index ca93693ba4e9..6186a78038cf 100644
--- a/drivers/net/wireless/ath/ath12k/pci.h
+++ b/drivers/net/wireless/ath/ath12k/pci.h
@@ -143,5 +143,5 @@ int ath12k_pci_hif_resume(struct ath12k_base *ab);
void ath12k_pci_stop(struct ath12k_base *ab);
int ath12k_pci_start(struct ath12k_base *ab);
int ath12k_pci_power_up(struct ath12k_base *ab);
-void ath12k_pci_power_down(struct ath12k_base *ab);
+void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend);
#endif /* ATH12K_PCI_H */
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
index 92845ffff44a..5484112859a6 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.c
+++ b/drivers/net/wireless/ath/ath12k/qmi.c
@@ -583,6 +583,24 @@ static const struct qmi_elem_info qmi_wlanfw_phy_cap_resp_msg_v01_ei[] = {
board_id),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+ single_chip_mlo_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+ single_chip_mlo_support),
+ },
+ {
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
@@ -2005,7 +2023,15 @@ static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
u8 hw_link_id = 0;
int i;
+ if (!(ab->mlo_capable_flags & ATH12K_INTRA_DEVICE_MLO_SUPPORT)) {
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "intra device MLO is disabled hence skip QMI MLO cap");
+ return;
+ }
+
if (!ab->qmi.num_radios || ab->qmi.num_radios == U8_MAX) {
+ ab->mlo_capable_flags = 0;
+
ath12k_dbg(ab, ATH12K_DBG_QMI,
"skip QMI MLO cap due to invalid num_radio %d\n",
ab->qmi.num_radios);
@@ -2124,9 +2150,6 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
struct qmi_txn txn;
int ret;
- if (!ab->slo_capable)
- goto out;
-
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_phy_cap_resp_msg_v01_ei, &resp);
if (ret < 0)
@@ -2151,6 +2174,13 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
goto out;
}
+ if (resp.single_chip_mlo_support_valid) {
+ if (resp.single_chip_mlo_support)
+ ab->mlo_capable_flags |= ATH12K_INTRA_DEVICE_MLO_SUPPORT;
+ else
+ ab->mlo_capable_flags &= ~ATH12K_INTRA_DEVICE_MLO_SUPPORT;
+ }
+
if (!resp.num_phy_valid) {
ret = -ENODATA;
goto out;
@@ -2158,9 +2188,11 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
ab->qmi.num_radios = resp.num_phy;
- ath12k_dbg(ab, ATH12K_DBG_QMI, "phy capability resp valid %d num_phy %d valid %d board_id %d\n",
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "phy capability resp valid %d num_phy %d valid %d board_id %d valid %d single_chip_mlo_support %d\n",
resp.num_phy_valid, resp.num_phy,
- resp.board_id_valid, resp.board_id);
+ resp.board_id_valid, resp.board_id,
+ resp.single_chip_mlo_support_valid, resp.single_chip_mlo_support);
return;
@@ -2325,8 +2357,9 @@ static void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab)
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
if (!ab->qmi.target_mem[i].v.addr)
continue;
+
dma_free_coherent(ab->dev,
- ab->qmi.target_mem[i].size,
+ ab->qmi.target_mem[i].prev_size,
ab->qmi.target_mem[i].v.addr,
ab->qmi.target_mem[i].paddr);
ab->qmi.target_mem[i].v.addr = NULL;
@@ -2352,6 +2385,20 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
case M3_DUMP_REGION_TYPE:
case PAGEABLE_MEM_REGION_TYPE:
case CALDB_MEM_REGION_TYPE:
+ /* Firmware reloads in recovery/resume.
+ * In such cases, no need to allocate memory for FW again.
+ */
+ if (chunk->v.addr) {
+ if (chunk->prev_type == chunk->type &&
+ chunk->prev_size == chunk->size)
+ goto this_chunk_done;
+
+ /* cannot reuse the existing chunk */
+ dma_free_coherent(ab->dev, chunk->prev_size,
+ chunk->v.addr, chunk->paddr);
+ chunk->v.addr = NULL;
+ }
+
chunk->v.addr = dma_alloc_coherent(ab->dev,
chunk->size,
&chunk->paddr,
@@ -2370,6 +2417,10 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
chunk->type, chunk->size);
return -ENOMEM;
}
+
+ chunk->prev_type = chunk->type;
+ chunk->prev_size = chunk->size;
+this_chunk_done:
break;
default:
ath12k_warn(ab, "memory type %u not supported\n",
@@ -2666,6 +2717,19 @@ out:
return ret;
}
+static void ath12k_qmi_m3_free(struct ath12k_base *ab)
+{
+ struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
+
+ if (!m3_mem->vaddr)
+ return;
+
+ dma_free_coherent(ab->dev, m3_mem->size,
+ m3_mem->vaddr, m3_mem->paddr);
+ m3_mem->vaddr = NULL;
+ m3_mem->size = 0;
+}
+
static int ath12k_qmi_m3_load(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
@@ -2675,10 +2739,6 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab)
size_t m3_len;
int ret;
- if (m3_mem->vaddr)
- /* m3 firmware buffer is already available in the DMA buffer */
- return 0;
-
if (ab->fw.m3_data && ab->fw.m3_len > 0) {
/* firmware-N.bin had a m3 firmware file so use that */
m3_data = ab->fw.m3_data;
@@ -2700,6 +2760,15 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab)
m3_len = fw->size;
}
+ /* In recovery/resume cases, M3 buffer is not freed, try to reuse that */
+ if (m3_mem->vaddr) {
+ if (m3_mem->size >= m3_len)
+ goto skip_m3_alloc;
+
+ /* Old buffer is too small, free and reallocate */
+ ath12k_qmi_m3_free(ab);
+ }
+
m3_mem->vaddr = dma_alloc_coherent(ab->dev,
m3_len, &m3_mem->paddr,
GFP_KERNEL);
@@ -2710,6 +2779,7 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab)
goto out;
}
+skip_m3_alloc:
memcpy(m3_mem->vaddr, m3_data, m3_len);
m3_mem->size = m3_len;
@@ -2721,19 +2791,6 @@ out:
return ret;
}
-static void ath12k_qmi_m3_free(struct ath12k_base *ab)
-{
- struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
-
- if (!m3_mem->vaddr)
- return;
-
- dma_free_coherent(ab->dev, m3_mem->size,
- m3_mem->vaddr, m3_mem->paddr);
- m3_mem->vaddr = NULL;
- m3_mem->size = 0;
-}
-
static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
@@ -3178,6 +3235,9 @@ static const struct qmi_msg_handler ath12k_qmi_msg_handlers[] = {
.decoded_size = sizeof(struct qmi_wlanfw_fw_ready_ind_msg_v01),
.fn = ath12k_qmi_msg_fw_ready_cb,
},
+
+ /* end of list */
+ {},
};
static int ath12k_qmi_ops_new_server(struct qmi_handle *qmi_hdl,
@@ -3261,7 +3321,8 @@ static void ath12k_qmi_driver_event_work(struct work_struct *work)
case ATH12K_QMI_EVENT_FW_READY:
clear_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
- ath12k_hal_dump_srng_stats(ab);
+ if (ab->is_reset)
+ ath12k_hal_dump_srng_stats(ab);
queue_work(ab->workqueue, &ab->restart_work);
break;
}
diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h
index 6ee33c9851c6..0dfcbd8cb59b 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.h
+++ b/drivers/net/wireless/ath/ath12k/qmi.h
@@ -96,6 +96,8 @@ struct ath12k_qmi_event_msg {
struct target_mem_chunk {
u32 size;
u32 type;
+ u32 prev_size;
+ u32 prev_type;
dma_addr_t paddr;
union {
void __iomem *ioaddr;
@@ -265,6 +267,8 @@ struct qmi_wlanfw_phy_cap_resp_msg_v01 {
u8 num_phy;
u8 board_id_valid;
u32 board_id;
+ u8 single_chip_mlo_support_valid;
+ u8 single_chip_mlo_support;
};
#define QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN 54
diff --git a/drivers/net/wireless/ath/ath12k/reg.c b/drivers/net/wireless/ath/ath12k/reg.c
index f308e9a6ed55..fbf38044938c 100644
--- a/drivers/net/wireless/ath/ath12k/reg.c
+++ b/drivers/net/wireless/ath/ath12k/reg.c
@@ -49,8 +49,8 @@ ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ath12k_wmi_init_country_arg arg;
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar = ath12k_ah_to_ar(ah);
- int ret;
+ struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
+ int ret, i;
ath12k_dbg(ar->ab, ATH12K_DBG_REG,
"Regulatory Notification received for %s\n", wiphy_name(wiphy));
@@ -85,10 +85,16 @@ ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
memcpy(&arg.cc_info.alpha2, request->alpha2, 2);
arg.cc_info.alpha2[2] = 0;
- ret = ath12k_wmi_send_init_country_cmd(ar, &arg);
- if (ret)
- ath12k_warn(ar->ab,
- "INIT Country code set to fw failed : %d\n", ret);
+ /* Allow fresh updates to wiphy regd */
+ ah->regd_updated = false;
+
+ /* Send the reg change request to all the radios */
+ for_each_ar(ah, ar, i) {
+ ret = ath12k_wmi_send_init_country_cmd(ar, &arg);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "INIT Country code set to fw failed : %d\n", ret);
+ }
}
int ath12k_reg_update_chan_list(struct ath12k *ar)
@@ -202,10 +208,32 @@ int ath12k_regd_update(struct ath12k *ar, bool init)
{
struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct ieee80211_regdomain *regd, *regd_copy = NULL;
+ struct ath12k_hw *ah = ar->ah;
int ret, regd_len, pdev_id;
struct ath12k_base *ab;
+ int i;
ab = ar->ab;
+
+ /* If one of the radios within ah has already updated the regd for
+ * the wiphy, then avoid setting regd again
+ */
+ if (ah->regd_updated)
+ return 0;
+
+ /* firmware provides reg rules which are similar for 2 GHz and 5 GHz
+ * pdev but 6 GHz pdev has superset of all rules including rules for
+ * all bands, we prefer 6 GHz pdev's rules to be used for setup of
+ * the wiphy regd.
+ * If 6 GHz pdev was part of the ath12k_hw, wait for the 6 GHz pdev,
+ * else pick the first pdev which calls this function and use its
+ * regd to update global hw regd.
+ * The regd_updated flag set at the end will not allow any further
+ * updates.
+ */
+ if (ah->use_6ghz_regd && !ar->supports_6ghz)
+ return 0;
+
pdev_id = ar->pdev_idx;
spin_lock_bh(&ab->base_lock);
@@ -258,10 +286,17 @@ int ath12k_regd_update(struct ath12k *ar, bool init)
if (ret)
goto err;
- if (ar->state == ATH12K_STATE_ON) {
- ret = ath12k_reg_update_chan_list(ar);
- if (ret)
- goto err;
+ ah->regd_updated = true;
+ /* Apply the new regd to all the radios, this is expected to be received only once
+ * since we check for ah->regd_updated and allow here only once.
+ */
+ for_each_ar(ah, ar, i) {
+ if (ar->state == ATH12K_STATE_ON) {
+ ab = ar->ab;
+ ret = ath12k_reg_update_chan_list(ar);
+ if (ret)
+ goto err;
+ }
}
return 0;
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 9d69a1769926..7a52d2082b79 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -858,20 +858,20 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
len = sizeof(*txrx_streams);
txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
len);
- txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G;
+ txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_2G);
txrx_streams->supported_tx_streams =
- args->chains[NL80211_BAND_2GHZ].tx;
+ cpu_to_le32(args->chains[NL80211_BAND_2GHZ].tx);
txrx_streams->supported_rx_streams =
- args->chains[NL80211_BAND_2GHZ].rx;
+ cpu_to_le32(args->chains[NL80211_BAND_2GHZ].rx);
txrx_streams++;
txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
len);
- txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G;
+ txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_5G);
txrx_streams->supported_tx_streams =
- args->chains[NL80211_BAND_5GHZ].tx;
+ cpu_to_le32(args->chains[NL80211_BAND_5GHZ].tx);
txrx_streams->supported_rx_streams =
- args->chains[NL80211_BAND_5GHZ].rx;
+ cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
@@ -1900,7 +1900,7 @@ static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
if (arg->bw_160)
cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ);
if (arg->bw_320)
- cmd->peer_flags |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
+ cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
/* Typically if STBC is enabled for VHT it should be enabled
* for HT as well
@@ -2723,6 +2723,149 @@ int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar,
return ret;
}
+int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id,
+ const u8 *buf, size_t buf_len)
+{
+ struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
+ struct wmi_pdev_set_bios_interface_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u8 *ptr;
+ u32 len, len_aligned;
+ int ret;
+
+ len_aligned = roundup(buf_len, sizeof(u32));
+ len = sizeof(*cmd) + TLV_HDR_SIZE + len_aligned;
+
+ skb = ath12k_wmi_alloc_skb(wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_bios_interface_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
+ cmd->param_type_id = cpu_to_le32(param_id);
+ cmd->length = cpu_to_le32(buf_len);
+
+ ptr = skb->data + sizeof(*cmd);
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len_aligned);
+ ptr += TLV_HDR_SIZE;
+ memcpy(ptr, buf, buf_len);
+
+ ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
+ skb,
+ WMI_PDEV_SET_BIOS_INTERFACE_CMDID);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID parameter id %d: %d\n",
+ param_id, ret);
+ dev_kfree_skb(skb);
+ }
+
+ return 0;
+}
+
+int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table)
+{
+ struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
+ struct wmi_pdev_set_bios_sar_table_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int ret;
+ u8 *buf_ptr;
+ u32 len, sar_table_len_aligned, sar_dbs_backoff_len_aligned;
+ const u8 *psar_value = psar_table + ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET;
+ const u8 *pdbs_value = psar_table + ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET;
+
+ sar_table_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_TABLE_LEN, sizeof(u32));
+ sar_dbs_backoff_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN,
+ sizeof(u32));
+ len = sizeof(*cmd) + TLV_HDR_SIZE + sar_table_len_aligned +
+ TLV_HDR_SIZE + sar_dbs_backoff_len_aligned;
+
+ skb = ath12k_wmi_alloc_skb(wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_bios_sar_table_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
+ cmd->sar_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
+ cmd->dbs_backoff_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
+
+ buf_ptr = skb->data + sizeof(*cmd);
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
+ sar_table_len_aligned);
+ buf_ptr += TLV_HDR_SIZE;
+ memcpy(buf_ptr, psar_value, ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
+
+ buf_ptr += sar_table_len_aligned;
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
+ sar_dbs_backoff_len_aligned);
+ buf_ptr += TLV_HDR_SIZE;
+ memcpy(buf_ptr, pdbs_value, ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
+
+ ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
+ skb,
+ WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID %d\n",
+ ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table)
+{
+ struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
+ struct wmi_pdev_set_bios_geo_table_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int ret;
+ u8 *buf_ptr;
+ u32 len, sar_geo_len_aligned;
+ const u8 *pgeo_value = pgeo_table + ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET;
+
+ sar_geo_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN, sizeof(u32));
+ len = sizeof(*cmd) + TLV_HDR_SIZE + sar_geo_len_aligned;
+
+ skb = ath12k_wmi_alloc_skb(wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_bios_geo_table_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
+ cmd->geo_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
+
+ buf_ptr = skb->data + sizeof(*cmd);
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, sar_geo_len_aligned);
+ buf_ptr += TLV_HDR_SIZE;
+ memcpy(buf_ptr, pgeo_value, ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
+
+ ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
+ skb,
+ WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to send WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID %d\n",
+ ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 initiator, u32 reason)
{
@@ -3324,7 +3467,8 @@ ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cf
wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size);
wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters);
wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id);
- wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config);
+ wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config |
+ WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64);
wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version);
wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
@@ -4041,6 +4185,7 @@ static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab)
{
kfree(ab->db_caps);
ab->db_caps = NULL;
+ ab->num_db_cap = 0;
}
static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab,
@@ -5927,13 +6072,11 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
}
}
- /* TODO: Pending handle beacon implementation
- *if (ieee80211_is_beacon(hdr->frame_control))
- * ath12k_mac_handle_beacon(ar, skb);
- */
+ if (ieee80211_is_beacon(hdr->frame_control))
+ ath12k_mac_handle_beacon(ar, skb);
ath12k_dbg(ab, ATH12K_DBG_MGMT,
- "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
+ "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
skb, skb->len,
fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
@@ -6137,42 +6280,44 @@ static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_roam_event roam_ev = {};
struct ath12k *ar;
+ u32 vdev_id;
+ u8 roam_reason;
if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
ath12k_warn(ab, "failed to extract roam event");
return;
}
+ vdev_id = le32_to_cpu(roam_ev.vdev_id);
+ roam_reason = u32_get_bits(le32_to_cpu(roam_ev.reason),
+ WMI_ROAM_REASON_MASK);
+
ath12k_dbg(ab, ATH12K_DBG_WMI,
- "wmi roam event vdev %u reason 0x%08x rssi %d\n",
- roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi);
+ "wmi roam event vdev %u reason %d rssi %d\n",
+ vdev_id, roam_reason, roam_ev.rssi);
rcu_read_lock();
- ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(roam_ev.vdev_id));
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
if (!ar) {
- ath12k_warn(ab, "invalid vdev id in roam ev %d",
- roam_ev.vdev_id);
+ ath12k_warn(ab, "invalid vdev id in roam ev %d", vdev_id);
rcu_read_unlock();
return;
}
- if (le32_to_cpu(roam_ev.reason) >= WMI_ROAM_REASON_MAX)
+ if (roam_reason >= WMI_ROAM_REASON_MAX)
ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
- roam_ev.reason, roam_ev.vdev_id);
+ roam_reason, vdev_id);
- switch (le32_to_cpu(roam_ev.reason)) {
+ switch (roam_reason) {
case WMI_ROAM_REASON_BEACON_MISS:
- /* TODO: Pending beacon miss and connection_loss_work
- * implementation
- * ath12k_mac_handle_beacon_miss(ar, vdev_id);
- */
+ ath12k_mac_handle_beacon_miss(ar, vdev_id);
break;
case WMI_ROAM_REASON_BETTER_AP:
case WMI_ROAM_REASON_LOW_RSSI:
case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
case WMI_ROAM_REASON_HO_FAILED:
ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
- roam_ev.reason, roam_ev.vdev_id);
+ roam_reason, vdev_id);
break;
}
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index 103462feb935..496866673aea 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -164,10 +164,6 @@ struct wmi_tlv {
#define WLAN_SCAN_MAX_HINT_BSSID 10
#define MAX_RNR_BSS 5
-#define WLAN_SCAN_MAX_HINT_S_SSID 10
-#define WLAN_SCAN_MAX_HINT_BSSID 10
-#define MAX_RNR_BSS 5
-
#define WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG 1
#define WMI_BA_MODE_BUFFER_SIZE_256 3
@@ -357,6 +353,9 @@ enum wmi_tlv_cmd_id {
WMI_PDEV_DMA_RING_CFG_REQ_CMDID,
WMI_PDEV_HE_TB_ACTION_FRM_CMDID,
WMI_PDEV_PKTLOG_FILTER_CMDID,
+ WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID = 0x4044,
+ WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID = 0x4045,
+ WMI_PDEV_SET_BIOS_INTERFACE_CMDID = 0x404A,
WMI_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_VDEV),
WMI_VDEV_DELETE_CMDID,
WMI_VDEV_START_REQUEST_CMDID,
@@ -1929,6 +1928,9 @@ enum wmi_tlv_tag {
WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9,
WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT,
WMI_TAG_EHT_RATE_SET = 0x3C4,
+ WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8,
+ WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD = 0x3D9,
+ WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD = 0x3FB,
WMI_TAG_MAX
};
@@ -2199,8 +2201,11 @@ enum wmi_peer_param {
WMI_PEER_SET_MAX_TX_RATE = 17,
WMI_PEER_SET_MIN_TX_RATE = 18,
WMI_PEER_SET_DEFAULT_ROUTING = 19,
+ WMI_PEER_CHWIDTH_PUNCTURE_20MHZ_BITMAP = 39,
};
+#define WMI_PEER_PUNCTURE_BITMAP GENMASK(23, 8)
+
enum wmi_slot_time {
WMI_VDEV_SLOT_TIME_LONG = 1,
WMI_VDEV_SLOT_TIME_SHORT = 2,
@@ -2404,6 +2409,7 @@ struct wmi_init_cmd {
#define WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT 4
#define WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION GENMASK(5, 4)
+#define WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 BIT(5)
struct ath12k_wmi_resource_config_params {
__le32 tlv_header;
@@ -2604,6 +2610,19 @@ struct ath12k_wmi_soc_hal_reg_caps_params {
__le32 num_phy;
} __packed;
+enum wmi_channel_width {
+ WMI_CHAN_WIDTH_20 = 0,
+ WMI_CHAN_WIDTH_40 = 1,
+ WMI_CHAN_WIDTH_80 = 2,
+ WMI_CHAN_WIDTH_160 = 3,
+ WMI_CHAN_WIDTH_80P80 = 4,
+ WMI_CHAN_WIDTH_5 = 5,
+ WMI_CHAN_WIDTH_10 = 6,
+ WMI_CHAN_WIDTH_165 = 7,
+ WMI_CHAN_WIDTH_160P160 = 8,
+ WMI_CHAN_WIDTH_320 = 9,
+};
+
#define WMI_MAX_EHTCAP_MAC_SIZE 2
#define WMI_MAX_EHTCAP_PHY_SIZE 3
#define WMI_MAX_EHTCAP_RATE_SET 3
@@ -2728,9 +2747,9 @@ struct wmi_vdev_create_cmd {
struct ath12k_wmi_vdev_txrx_streams_params {
__le32 tlv_header;
- u32 band;
- u32 supported_tx_streams;
- u32 supported_rx_streams;
+ __le32 band;
+ __le32 supported_tx_streams;
+ __le32 supported_rx_streams;
} __packed;
struct wmi_vdev_delete_cmd {
@@ -3357,34 +3376,6 @@ struct wmi_bssid_arg {
const u8 *bssid;
};
-struct wmi_start_scan_arg {
- u32 scan_id;
- u32 scan_req_id;
- u32 vdev_id;
- u32 scan_priority;
- u32 notify_scan_events;
- u32 dwell_time_active;
- u32 dwell_time_passive;
- u32 min_rest_time;
- u32 max_rest_time;
- u32 repeat_probe_time;
- u32 probe_spacing_time;
- u32 idle_time;
- u32 max_scan_time;
- u32 probe_delay;
- u32 scan_ctrl_flags;
-
- u32 ie_len;
- u32 n_channels;
- u32 n_ssids;
- u32 n_bssids;
-
- u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN];
- u32 channels[64];
- struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID];
- struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID];
-};
-
#define WMI_SCAN_STOP_ONE 0x00000000
#define WMI_SCAN_STOP_VAP_ALL 0x01000000
#define WMI_SCAN_STOP_ALL 0x04000000
@@ -4214,6 +4205,9 @@ struct wmi_peer_sta_kickout_event {
struct ath12k_wmi_mac_addr_params peer_macaddr;
} __packed;
+#define WMI_ROAM_REASON_MASK GENMASK(3, 0)
+#define WMI_ROAM_SUBNET_STATUS_MASK GENMASK(5, 4)
+
enum wmi_roam_reason {
WMI_ROAM_REASON_BETTER_AP = 1,
WMI_ROAM_REASON_BEACON_MISS = 2,
@@ -4776,8 +4770,6 @@ struct wmi_probe_tmpl_cmd {
__le32 buf_len;
} __packed;
-#define WMI_MAX_MEM_REQS 32
-
#define MAX_RADIOS 3
#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
@@ -4808,6 +4800,37 @@ struct ath12k_wmi_base {
struct ath12k_wmi_target_cap_arg *targ_cap;
};
+struct wmi_pdev_set_bios_interface_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 param_type_id;
+ __le32 length;
+} __packed;
+
+enum wmi_bios_param_type {
+ WMI_BIOS_PARAM_CCA_THRESHOLD_TYPE = 0,
+ WMI_BIOS_PARAM_TAS_CONFIG_TYPE = 1,
+ WMI_BIOS_PARAM_TAS_DATA_TYPE = 2,
+
+ /* bandedge control power */
+ WMI_BIOS_PARAM_TYPE_BANDEDGE = 3,
+
+ WMI_BIOS_PARAM_TYPE_MAX,
+};
+
+struct wmi_pdev_set_bios_sar_table_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 sar_len;
+ __le32 dbs_backoff_len;
+} __packed;
+
+struct wmi_pdev_set_bios_geo_table_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 geo_len;
+} __packed;
+
#define ATH12K_FW_STATS_BUF_SIZE (1024 * 1024)
enum wmi_sys_cap_info_flags {
@@ -4966,6 +4989,10 @@ int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
struct sk_buff *tmpl);
int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
enum wmi_host_hw_mode_config_type mode);
+int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id,
+ const u8 *buf, size_t buf_len);
+int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table);
+int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table);
static inline u32
ath12k_wmi_caps_ext_get_pdev_id(const struct ath12k_wmi_caps_ext_params *param)
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index 1963d3145481..fb5144e2d86c 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -364,8 +364,7 @@ static void ath6kl_htc_tx_prep_pkt(struct htc_packet *packet, u8 flags,
packet->buf -= HTC_HDR_LENGTH;
hdr = (struct htc_frame_hdr *)packet->buf;
- /* Endianess? */
- put_unaligned((u16)packet->act_len, &hdr->payld_len);
+ put_unaligned_le16(packet->act_len, &hdr->payld_len);
hdr->flags = flags;
hdr->eid = packet->endpoint;
hdr->ctrl[0] = ctrl0;
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index 9b88d96bfe96..2f2edfe43761 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -237,8 +237,7 @@ static int htc_issue_packets(struct htc_target *target,
packet->info.tx.flags |= HTC_FLAGS_TX_FIXUP_NETBUF;
- /* Endianess? */
- put_unaligned((u16) payload_len, &htc_hdr->payld_len);
+ put_unaligned_le16(payload_len, &htc_hdr->payld_len);
htc_hdr->flags = packet->info.tx.flags;
htc_hdr->eid = (u8) packet->endpoint;
htc_hdr->ctrl[0] = 0;
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 8a43c48ec1cf..9ab091044706 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -1427,25 +1427,7 @@ static struct sdio_driver ath6kl_sdio_driver = {
.remove = ath6kl_sdio_remove,
.drv.pm = ATH6KL_SDIO_PM_OPS,
};
-
-static int __init ath6kl_sdio_init(void)
-{
- int ret;
-
- ret = sdio_register_driver(&ath6kl_sdio_driver);
- if (ret)
- ath6kl_err("sdio driver registration failed: %d\n", ret);
-
- return ret;
-}
-
-static void __exit ath6kl_sdio_exit(void)
-{
- sdio_unregister_driver(&ath6kl_sdio_driver);
-}
-
-module_init(ath6kl_sdio_init);
-module_exit(ath6kl_sdio_exit);
+module_sdio_driver(ath6kl_sdio_driver);
MODULE_AUTHOR("Atheros Communications, Inc.");
MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 668fc07b3073..29ca65a732a6 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -39,6 +39,7 @@ extern int ath9k_modparam_nohwcrypt;
extern int ath9k_led_blink;
extern bool is_ath9k_unloaded;
extern int ath9k_use_chanctx;
+extern int ath9k_use_msi;
/*************************/
/* Descriptor Management */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index e8c2cc03be0c..27b860b0c769 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -76,7 +76,7 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
static u32 ath9k_dump_4k_modal_eeprom(char *buf, u32 len, u32 size,
struct modal_eep_4k_header *modal_hdr)
{
- PR_EEP("Chain0 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[0]));
+ PR_EEP("Chain0 Ant. Control", le32_to_cpu(modal_hdr->antCtrlChain[0]));
PR_EEP("Ant. Common Control", le32_to_cpu(modal_hdr->antCtrlCommon));
PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]);
PR_EEP("Switch Settle", modal_hdr->switchSettling);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index fd5312c2a7e3..d85472ee4d85 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -79,8 +79,8 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
static u32 ar9287_dump_modal_eeprom(char *buf, u32 len, u32 size,
struct modal_eep_ar9287_header *modal_hdr)
{
- PR_EEP("Chain0 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[0]));
- PR_EEP("Chain1 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[1]));
+ PR_EEP("Chain0 Ant. Control", le32_to_cpu(modal_hdr->antCtrlChain[0]));
+ PR_EEP("Chain1 Ant. Control", le32_to_cpu(modal_hdr->antCtrlChain[1]));
PR_EEP("Ant. Common Control", le32_to_cpu(modal_hdr->antCtrlCommon));
PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]);
PR_EEP("Chain1 Ant. Gain", modal_hdr->antennaGainCh[1]);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 7685f8ab371e..84b31caf8ca6 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -135,9 +135,9 @@ static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
static u32 ath9k_def_dump_modal_eeprom(char *buf, u32 len, u32 size,
struct modal_eep_header *modal_hdr)
{
- PR_EEP("Chain0 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[0]));
- PR_EEP("Chain1 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[1]));
- PR_EEP("Chain2 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[2]));
+ PR_EEP("Chain0 Ant. Control", le32_to_cpu(modal_hdr->antCtrlChain[0]));
+ PR_EEP("Chain1 Ant. Control", le32_to_cpu(modal_hdr->antCtrlChain[1]));
+ PR_EEP("Chain2 Ant. Control", le32_to_cpu(modal_hdr->antCtrlChain[2]));
PR_EEP("Ant. Common Control", le32_to_cpu(modal_hdr->antCtrlCommon));
PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]);
PR_EEP("Chain1 Ant. Gain", modal_hdr->antennaGainCh[1]);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index a2943aaecb20..01173aac3045 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -135,8 +135,7 @@ void ath9k_ps_wakeup(struct ath_softc *sc)
if (power_mode != ATH9K_PM_AWAKE) {
spin_lock(&common->cc_lock);
ath_hw_cycle_counters_update(common);
- memset(&common->cc_survey, 0, sizeof(common->cc_survey));
- memset(&common->cc_ani, 0, sizeof(common->cc_ani));
+ memset(&common->cc, 0, sizeof(common->cc));
spin_unlock(&common->cc_lock);
}
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index e655cd8bbf94..1ff53520f0a3 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -21,8 +21,6 @@
#include <linux/module.h>
#include "ath9k.h"
-extern int ath9k_use_msi;
-
static const struct pci_device_id ath_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index d519b676a109..35aa47a9db90 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1674,8 +1674,14 @@ static void
ath9k_set_moredata(struct ath_softc *sc, struct ath_buf *bf, bool val)
{
struct ieee80211_hdr *hdr;
- u16 mask = cpu_to_le16(IEEE80211_FCTL_MOREDATA);
- u16 mask_val = mask * val;
+ __le16 mask, mask_val;
+
+ mask = cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+
+ if (val)
+ mask_val = mask;
+ else
+ mask_val = 0;
hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
if ((hdr->frame_control & mask) != mask_val) {
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index e902ca80eba7..0226c31a6cae 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -280,7 +280,8 @@ static void carl9170_tx_release(struct kref *ref)
* carl9170_tx_fill_rateinfo() has filled the rate information
* before we get to this point.
*/
- memset_after(&txinfo->status, 0, rates);
+ memset(&txinfo->pad, 0, sizeof(txinfo->pad));
+ memset(&txinfo->rate_driver_data, 0, sizeof(txinfo->rate_driver_data));
if (atomic_read(&ar->tx_total_queued))
ar->tx_schedule = true;
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index c4edf8355941..a3e03580cd9f 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -1069,6 +1069,38 @@ static int carl9170_usb_probe(struct usb_interface *intf,
ar->usb_ep_cmd_is_bulk = true;
}
+ /* Verify that all expected endpoints are present */
+ if (ar->usb_ep_cmd_is_bulk) {
+ u8 bulk_ep_addr[] = {
+ AR9170_USB_EP_RX | USB_DIR_IN,
+ AR9170_USB_EP_TX | USB_DIR_OUT,
+ AR9170_USB_EP_CMD | USB_DIR_OUT,
+ 0};
+ u8 int_ep_addr[] = {
+ AR9170_USB_EP_IRQ | USB_DIR_IN,
+ 0};
+ if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) ||
+ !usb_check_int_endpoints(intf, int_ep_addr))
+ err = -ENODEV;
+ } else {
+ u8 bulk_ep_addr[] = {
+ AR9170_USB_EP_RX | USB_DIR_IN,
+ AR9170_USB_EP_TX | USB_DIR_OUT,
+ 0};
+ u8 int_ep_addr[] = {
+ AR9170_USB_EP_IRQ | USB_DIR_IN,
+ AR9170_USB_EP_CMD | USB_DIR_OUT,
+ 0};
+ if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) ||
+ !usb_check_int_endpoints(intf, int_ep_addr))
+ err = -ENODEV;
+ }
+
+ if (err) {
+ carl9170_free(ar);
+ return err;
+ }
+
usb_set_intfdata(intf, ar);
SET_IEEE80211_DEV(ar->hw, &intf->dev);
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index bfbd3c7a70b3..e760d8002e09 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -756,9 +756,9 @@ static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
if (sta->deflink.vht_cap.vht_supported) {
sta_priv->supported_rates.op_rate_mode = STA_11ac;
sta_priv->supported_rates.vht_rx_mcs_map =
- sta->deflink.vht_cap.vht_mcs.rx_mcs_map;
+ le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map);
sta_priv->supported_rates.vht_tx_mcs_map =
- sta->deflink.vht_cap.vht_mcs.tx_mcs_map;
+ le16_to_cpu(sta->deflink.vht_cap.vht_mcs.tx_mcs_map);
}
}
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index 0802ed728824..8826998797d6 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -318,7 +318,7 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
memset(&status, 0, sizeof(status));
bd = (struct wcn36xx_rx_bd *)skb->data;
- buff_to_be((u32 *)bd, sizeof(*bd)/sizeof(u32));
+ buff_to_be(bd, sizeof(*bd)/sizeof(u32));
wcn36xx_dbg_dump(WCN36XX_DBG_RX_DUMP,
"BD <<< ", (char *)bd,
sizeof(struct wcn36xx_rx_bd));
@@ -692,7 +692,7 @@ int wcn36xx_start_tx(struct wcn36xx *wcn,
/* MGMT and CTRL frames are handeld here*/
wcn36xx_set_tx_mgmt(&bd, wcn, &vif_priv, skb, bcast);
- buff_to_be((u32 *)&bd, sizeof(bd)/sizeof(u32));
+ buff_to_be(&bd, sizeof(bd)/sizeof(u32));
bd.tx_bd_sign = 0xbdbdbdbd;
ret = wcn36xx_dxe_tx_frame(wcn, vif_priv, &bd, skb, is_low);
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index ff4a8e5d7209..bccc27de848d 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -100,11 +100,14 @@ enum wcn36xx_ampdu_state {
#define RF_IRIS_WCN3660 0x3660
#define RF_IRIS_WCN3680 0x3680
-static inline void buff_to_be(u32 *buf, size_t len)
+static inline void buff_to_be(void *buf, size_t len)
{
+ __be32 *to = buf;
+ u32 *from = buf;
int i;
+
for (i = 0; i < len; i++)
- buf[i] = cpu_to_be32(buf[i]);
+ to[i] = cpu_to_be32(from[i]);
}
struct nv_data {
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index dbe4b3478f03..e8f1d30a8d73 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -892,10 +892,8 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wireless_dev *wdev = request->wdev;
struct wil6210_vif *vif = wdev_to_vif(wil, wdev);
- struct {
- struct wmi_start_scan_cmd cmd;
- u16 chnl[4];
- } __packed cmd;
+ DEFINE_FLEX(struct wmi_start_scan_cmd, cmd,
+ channel_list, num_channels, 4);
uint i, n;
int rc;
@@ -977,9 +975,8 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
vif->scan_request = request;
mod_timer(&vif->scan_timer, jiffies + WIL6210_SCAN_TO);
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd.scan_type = WMI_ACTIVE_SCAN;
- cmd.cmd.num_channels = 0;
+ cmd->scan_type = WMI_ACTIVE_SCAN;
+ cmd->num_channels = 0;
n = min(request->n_channels, 4U);
for (i = 0; i < n; i++) {
int ch = request->channels[i]->hw_value;
@@ -991,7 +988,8 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
continue;
}
/* 0-based channel indexes */
- cmd.cmd.channel_list[cmd.cmd.num_channels++].channel = ch - 1;
+ cmd->num_channels++;
+ cmd->channel_list[cmd->num_channels - 1].channel = ch - 1;
wil_dbg_misc(wil, "Scan for ch %d : %d MHz\n", ch,
request->channels[i]->center_freq);
}
@@ -1007,16 +1005,15 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
if (rc)
goto out_restore;
- if (wil->discovery_mode && cmd.cmd.scan_type == WMI_ACTIVE_SCAN) {
- cmd.cmd.discovery_mode = 1;
+ if (wil->discovery_mode && cmd->scan_type == WMI_ACTIVE_SCAN) {
+ cmd->discovery_mode = 1;
wil_dbg_misc(wil, "active scan with discovery_mode=1\n");
}
if (vif->mid == 0)
wil->radio_wdev = wdev;
rc = wmi_send(wil, WMI_START_SCAN_CMDID, vif->mid,
- &cmd, sizeof(cmd.cmd) +
- cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
+ cmd, struct_size(cmd, channel_list, cmd->num_channels));
out_restore:
if (rc) {
@@ -2735,7 +2732,7 @@ int wil_cfg80211_iface_combinations_from_fw(
return 0;
}
- combo = conc->combos;
+ combo = (const struct wil_fw_concurrency_combo *)(conc + 1);
n_combos = le16_to_cpu(conc->n_combos);
for (i = 0; i < n_combos; i++) {
total_limits += combo->n_limits;
@@ -2751,7 +2748,7 @@ int wil_cfg80211_iface_combinations_from_fw(
return -ENOMEM;
iface_limit = (struct ieee80211_iface_limit *)(iface_combinations +
n_combos);
- combo = conc->combos;
+ combo = (const struct wil_fw_concurrency_combo *)(conc + 1);
for (i = 0; i < n_combos; i++) {
iface_combinations[i].max_interfaces = combo->max_interfaces;
iface_combinations[i].num_different_channels =
diff --git a/drivers/net/wireless/ath/wil6210/fw.h b/drivers/net/wireless/ath/wil6210/fw.h
index aa1620e0d24f..2079a90ec260 100644
--- a/drivers/net/wireless/ath/wil6210/fw.h
+++ b/drivers/net/wireless/ath/wil6210/fw.h
@@ -93,7 +93,6 @@ struct wil_fw_record_concurrency { /* type == wil_fw_type_comment */
/* number of concurrency combinations that follow */
__le16 n_combos;
/* keep last - combinations, variable size by n_combos */
- struct wil_fw_concurrency_combo combos[];
} __packed;
/* brd file info encoded inside a comment record */
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index fbc84c03406b..c3c0b289dcf3 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -212,8 +212,8 @@ fw_handle_concurrency(struct wil6210_priv *wil, const void *data,
}
n_combos = le16_to_cpu(rec->n_combos);
- remain = size - offsetof(struct wil_fw_record_concurrency, combos);
- combo = rec->combos;
+ remain = size - sizeof(struct wil_fw_record_concurrency);
+ combo = (const struct wil_fw_concurrency_combo *)(rec + 1);
for (i = 0; i < n_combos; i++) {
if (remain < sizeof(*combo))
goto out_short;
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 6fdb77d4c59e..8ff69dc72fb9 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -4015,27 +4015,22 @@ int wmi_set_cqm_rssi_config(struct wil6210_priv *wil,
struct wil6210_vif *vif = ndev_to_vif(ndev);
int rc;
struct {
- struct wmi_set_link_monitor_cmd cmd;
- s8 rssi_thold;
- } __packed cmd = {
- .cmd = {
- .rssi_hyst = rssi_hyst,
- .rssi_thresholds_list_size = 1,
- },
- .rssi_thold = rssi_thold,
- };
- struct {
struct wmi_cmd_hdr hdr;
struct wmi_set_link_monitor_event evt;
} __packed reply = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
+ DEFINE_FLEX(struct wmi_set_link_monitor_cmd, cmd,
+ rssi_thresholds_list, rssi_thresholds_list_size, 1);
+
+ cmd->rssi_hyst = rssi_hyst;
+ cmd->rssi_thresholds_list[0] = rssi_thold;
if (rssi_thold > S8_MAX || rssi_thold < S8_MIN || rssi_hyst > U8_MAX)
return -EINVAL;
- rc = wmi_call(wil, WMI_SET_LINK_MONITOR_CMDID, vif->mid, &cmd,
- sizeof(cmd), WMI_SET_LINK_MONITOR_EVENTID,
+ rc = wmi_call(wil, WMI_SET_LINK_MONITOR_CMDID, vif->mid, cmd,
+ __struct_size(cmd), WMI_SET_LINK_MONITOR_EVENTID,
&reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
if (rc) {
wil_err(wil, "WMI_SET_LINK_MONITOR_CMDID failed, rc %d\n", rc);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 71bf2ae27a98..38f64524019e 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -474,7 +474,7 @@ struct wmi_start_scan_cmd {
struct {
u8 channel;
u8 reserved;
- } channel_list[];
+ } channel_list[] __counted_by(num_channels);
} __packed;
#define WMI_MAX_PNO_SSID_NUM (16)
@@ -3320,7 +3320,7 @@ struct wmi_set_link_monitor_cmd {
u8 rssi_hyst;
u8 reserved[12];
u8 rssi_thresholds_list_size;
- s8 rssi_thresholds_list[];
+ s8 rssi_thresholds_list[] __counted_by(rssi_thresholds_list_size);
} __packed;
/* wmi_link_monitor_event_type */
diff --git a/drivers/net/wireless/broadcom/b43/sysfs.c b/drivers/net/wireless/broadcom/b43/sysfs.c
index 0679d132968f..261b2b746a9c 100644
--- a/drivers/net/wireless/broadcom/b43/sysfs.c
+++ b/drivers/net/wireless/broadcom/b43/sysfs.c
@@ -53,19 +53,14 @@ static ssize_t b43_attr_interfmode_show(struct device *dev,
switch (wldev->phy.g->interfmode) {
case B43_INTERFMODE_NONE:
- count =
- snprintf(buf, PAGE_SIZE,
- "0 (No Interference Mitigation)\n");
+ count = sysfs_emit(buf, "0 (No Interference Mitigation)\n");
break;
case B43_INTERFMODE_NONWLAN:
- count =
- snprintf(buf, PAGE_SIZE,
- "1 (Non-WLAN Interference Mitigation)\n");
+ count = sysfs_emit(buf,
+ "1 (Non-WLAN Interference Mitigation)\n");
break;
case B43_INTERFMODE_MANUALWLAN:
- count =
- snprintf(buf, PAGE_SIZE,
- "2 (WLAN Interference Mitigation)\n");
+ count = sysfs_emit(buf, "2 (WLAN Interference Mitigation)\n");
break;
default:
B43_WARN_ON(1);
diff --git a/drivers/net/wireless/broadcom/b43legacy/sysfs.c b/drivers/net/wireless/broadcom/b43legacy/sysfs.c
index eec087ca30e6..d988fe541bf7 100644
--- a/drivers/net/wireless/broadcom/b43legacy/sysfs.c
+++ b/drivers/net/wireless/broadcom/b43legacy/sysfs.c
@@ -75,16 +75,14 @@ static ssize_t b43legacy_attr_interfmode_show(struct device *dev,
switch (wldev->phy.interfmode) {
case B43legacy_INTERFMODE_NONE:
- count = snprintf(buf, PAGE_SIZE, "0 (No Interference"
- " Mitigation)\n");
+ count = sysfs_emit(buf, "0 (No Interference Mitigation)\n");
break;
case B43legacy_INTERFMODE_NONWLAN:
- count = snprintf(buf, PAGE_SIZE, "1 (Non-WLAN Interference"
- " Mitigation)\n");
+ count = sysfs_emit(buf,
+ "1 (Non-WLAN Interference Mitigation)\n");
break;
case B43legacy_INTERFMODE_MANUALWLAN:
- count = snprintf(buf, PAGE_SIZE, "2 (WLAN Interference"
- " Mitigation)\n");
+ count = sysfs_emit(buf, "2 (WLAN Interference Mitigation)\n");
break;
default:
B43legacy_WARN_ON(1);
@@ -155,11 +153,9 @@ static ssize_t b43legacy_attr_preamble_show(struct device *dev,
mutex_lock(&wldev->wl->mutex);
if (wldev->short_preamble)
- count = snprintf(buf, PAGE_SIZE, "1 (Short Preamble"
- " enabled)\n");
+ count = sysfs_emit(buf, "1 (Short Preamble enabled)\n");
else
- count = snprintf(buf, PAGE_SIZE, "0 (Short Preamble"
- " disabled)\n");
+ count = sysfs_emit(buf, "0 (Short Preamble disabled)\n");
mutex_unlock(&wldev->wl->mutex);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
index dc6d27a36faa..e5ca0f511822 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
@@ -6,8 +6,8 @@
#
ccflags-y += \
- -I $(srctree)/$(src) \
- -I $(srctree)/$(src)/../include
+ -I $(src) \
+ -I $(src)/../include
obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
brcmfmac-objs += \
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/Makefile
index 46098705e236..5e37c638f966 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/Makefile
@@ -3,9 +3,9 @@
# Copyright (c) 2022 Broadcom Corporation
ccflags-y += \
- -I $(srctree)/$(src) \
- -I $(srctree)/$(src)/.. \
- -I $(srctree)/$(src)/../../include
+ -I $(src) \
+ -I $(src)/.. \
+ -I $(src)/../../include
obj-m += brcmfmac-bca.o
brcmfmac-bca-objs += \
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 00679a990e3d..13391c2d82aa 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -1238,7 +1238,6 @@ static struct sdio_driver brcmf_sdmmc_driver = {
.name = KBUILD_MODNAME,
.id_table = brcmf_sdmmc_ids,
.drv = {
- .owner = THIS_MODULE,
.pm = pm_sleep_ptr(&brcmf_sdio_pm_ops),
.coredump = brcmf_dev_coredump,
},
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index b99aa66dc5a9..5fe0e671ecb3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -4549,7 +4549,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
err = -EINVAL;
- bphy_err(drvr, "ivalid OUI\n");
+ bphy_err(drvr, "invalid OUI\n");
goto exit;
}
offset += TLV_OUI_LEN;
@@ -4588,7 +4588,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
for (i = 0; i < count; i++) {
if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
err = -EINVAL;
- bphy_err(drvr, "ivalid OUI\n");
+ bphy_err(drvr, "invalid OUI\n");
goto exit;
}
offset += TLV_OUI_LEN;
@@ -4622,7 +4622,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
for (i = 0; i < count; i++) {
if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
err = -EINVAL;
- bphy_err(drvr, "ivalid OUI\n");
+ bphy_err(drvr, "invalid OUI\n");
goto exit;
}
offset += TLV_OUI_LEN;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/Makefile
index 5e1fddaff79e..33e86724ba14 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/Makefile
@@ -3,9 +3,9 @@
# Copyright (c) 2022 Broadcom Corporation
ccflags-y += \
- -I $(srctree)/$(src) \
- -I $(srctree)/$(src)/.. \
- -I $(srctree)/$(src)/../../include
+ -I $(src) \
+ -I $(src)/.. \
+ -I $(src)/../../include
obj-m += brcmfmac-cyw.o
brcmfmac-cyw-objs += \
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index d7fb88bb6ae1..06698a714b52 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1675,6 +1675,15 @@ struct brcmf_random_seed_footer {
#define BRCMF_RANDOM_SEED_MAGIC 0xfeedc0de
#define BRCMF_RANDOM_SEED_LENGTH 0x100
+static noinline_for_stack void
+brcmf_pcie_provide_random_bytes(struct brcmf_pciedev_info *devinfo, u32 address)
+{
+ u8 randbuf[BRCMF_RANDOM_SEED_LENGTH];
+
+ get_random_bytes(randbuf, BRCMF_RANDOM_SEED_LENGTH);
+ memcpy_toio(devinfo->tcm + address, randbuf, BRCMF_RANDOM_SEED_LENGTH);
+}
+
static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
const struct firmware *fw, void *nvram,
u32 nvram_len)
@@ -1717,7 +1726,6 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
.length = cpu_to_le32(rand_len),
.magic = cpu_to_le32(BRCMF_RANDOM_SEED_MAGIC),
};
- void *randbuf;
/* Some Apple chips/firmwares expect a buffer of random
* data to be present before NVRAM
@@ -1729,10 +1737,7 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
sizeof(footer));
address -= rand_len;
- randbuf = kzalloc(rand_len, GFP_KERNEL);
- get_random_bytes(randbuf, rand_len);
- memcpy_toio(devinfo->tcm + address, randbuf, rand_len);
- kfree(randbuf);
+ brcmf_pcie_provide_random_bytes(devinfo, address);
}
} else {
brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 0ccf735316c2..9a105e6debe1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -117,13 +117,6 @@ struct bootrom_id_le {
__le32 boardrev; /* Board revision */
};
-struct brcmf_usb_image {
- struct list_head list;
- s8 *fwname;
- u8 *image;
- int image_len;
-};
-
struct brcmf_usbdev_info {
struct brcmf_usbdev bus_pub; /* MUST BE FIRST */
spinlock_t qlock;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/Makefile
index 7f455a19a2b1..3db4cce44f42 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/Makefile
@@ -3,9 +3,9 @@
# Copyright (c) 2022 Broadcom Corporation
ccflags-y += \
- -I $(srctree)/$(src) \
- -I $(srctree)/$(src)/.. \
- -I $(srctree)/$(src)/../../include
+ -I $(src) \
+ -I $(src)/.. \
+ -I $(src)/../../include
obj-m += brcmfmac-wcc.o
brcmfmac-wcc-objs += \
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
index 090757730ba6..ffdd17eabe1d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
@@ -16,9 +16,9 @@
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ccflags-y := \
- -I $(srctree)/$(src) \
- -I $(srctree)/$(src)/phy \
- -I $(srctree)/$(src)/../include
+ -I $(src) \
+ -I $(src)/phy \
+ -I $(src)/../include
brcmsmac-y := \
mac80211_if.o \
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
index e859075db716..c3376f887114 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
@@ -143,12 +143,6 @@ struct ampdu_info {
struct brcms_fifo_info fifo_tb[NUM_FFPLD_FIFO];
};
-/* used for flushing ampdu packets */
-struct cb_del_ampdu_pars {
- struct ieee80211_sta *sta;
- u16 tid;
-};
-
static void brcms_c_scb_ampdu_update_max_txlen(struct ampdu_info *ampdu, u8 dur)
{
u32 rate, mcs;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile
index 7a82d615ba2a..f9b40cab766c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile
@@ -4,7 +4,7 @@
#
# Copyright (c) 2011 Broadcom Corporation
#
-ccflags-y := -I $(srctree)/$(src)/../include
+ccflags-y := -I $(src)/../include
obj-$(CONFIG_BRCMUTIL) += brcmutil.o
brcmutil-objs = utils.o d11.o
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index 072b0a5827d1..bc98b87cf2a1 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -149,6 +149,8 @@ const struct iwl_cfg_trans_params iwl_bz_trans_cfg = {
};
const char iwl_bz_name[] = "Intel(R) TBD Bz device";
+const char iwl_fm_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz";
+const char iwl_gl_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz";
const char iwl_mtp_name[] = "Intel(R) Wi-Fi 7 BE202 160MHz";
const struct iwl_cfg iwl_cfg_bz = {
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
index 0486b17d7c41..6109d64006db 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
@@ -11,4 +11,4 @@ iwldvm-objs += rxon.o devices.o
iwldvm-$(CONFIG_IWLWIFI_LEDS) += led.o
iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
-ccflags-y += -I $(srctree)/$(src)/../
+ccflags-y += -I $(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 4caf2e25a297..fa339791223b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2023 Intel Corporation
+ * Copyright (C) 2019-2024 Intel Corporation
*/
#include <linux/uuid.h>
#include "iwl-drv.h"
@@ -960,3 +960,37 @@ out_free:
kfree(data);
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_guid_lock_status);
+
+int iwl_acpi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value)
+{
+ union acpi_object *wifi_pkg, *data;
+ int ret = -ENOENT;
+ int tbl_rev;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_WBEM_METHOD);
+ if (IS_ERR(data))
+ return ret;
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_WBEM_WIFI_DATA_SIZE,
+ &tbl_rev);
+ if (IS_ERR(wifi_pkg))
+ goto out_free;
+
+ if (tbl_rev != IWL_ACPI_WBEM_REVISION) {
+ IWL_DEBUG_RADIO(fwrt, "Unsupported ACPI WBEM revision:%d\n",
+ tbl_rev);
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
+ goto out_free;
+
+ *value = wifi_pkg->package.elements[1].integer.value &
+ IWL_ACPI_WBEM_REV0_MASK;
+ IWL_DEBUG_RADIO(fwrt, "Loaded WBEM config from ACPI\n");
+ ret = 0;
+out_free:
+ kfree(data);
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index 1d32b82f73db..bb88398a6987 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -27,6 +27,7 @@
#define ACPI_WTAS_METHOD "WTAS"
#define ACPI_WPFC_METHOD "WPFC"
#define ACPI_GLAI_METHOD "GLAI"
+#define ACPI_WBEM_METHOD "WBEM"
#define ACPI_WIFI_DOMAIN (0x07)
@@ -67,6 +68,12 @@
#define ACPI_WRDD_WIFI_DATA_SIZE 2
#define ACPI_SPLC_WIFI_DATA_SIZE 2
#define ACPI_ECKV_WIFI_DATA_SIZE 2
+
+/*
+ * One element for domain type,
+ * and one for enablement of Wi-Fi 320MHz per MCC
+ */
+#define ACPI_WBEM_WIFI_DATA_SIZE 2
/*
* One element for domain type,
* and one for the status
@@ -94,6 +101,9 @@
#define ACPI_DSM_REV 0
+#define IWL_ACPI_WBEM_REV0_MASK (BIT(0) | BIT(1))
+#define IWL_ACPI_WBEM_REVISION 0
+
#ifdef CONFIG_ACPI
struct iwl_fw_runtime;
@@ -142,6 +152,7 @@ void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt);
int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt,
enum iwl_dsm_funcs func, u32 *value);
+int iwl_acpi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value);
#else /* CONFIG_ACPI */
static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev,
@@ -205,6 +216,11 @@ static inline int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt,
{
return -ENOENT;
}
+
+static inline int iwl_acpi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value)
+{
+ return -ENOENT;
+}
#endif /* CONFIG_ACPI */
#endif /* __iwl_fw_acpi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index d2a74beed3a1..bbaaf3c73115 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -843,6 +843,52 @@ struct iwl_wowlan_info_notif_v2 {
u8 reserved2[2];
} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_2 */
+/* MAX MLO keys of non-active links that can arrive in the notification */
+#define WOWLAN_MAX_MLO_KEYS 18
+
+/**
+ * enum iwl_wowlan_mlo_gtk_type - GTK types
+ * @WOWLAN_MLO_GTK_KEY_TYPE_GTK: GTK
+ * @WOWLAN_MLO_GTK_KEY_TYPE_IGTK: IGTK
+ * @WOWLAN_MLO_GTK_KEY_TYPE_BIGTK: BIGTK
+ * @WOWLAN_MLO_GTK_KEY_NUM_TYPES: number of key types
+ */
+enum iwl_wowlan_mlo_gtk_type {
+ WOWLAN_MLO_GTK_KEY_TYPE_GTK,
+ WOWLAN_MLO_GTK_KEY_TYPE_IGTK,
+ WOWLAN_MLO_GTK_KEY_TYPE_BIGTK,
+ WOWLAN_MLO_GTK_KEY_NUM_TYPES
+}; /* WOWLAN_MLO_GTK_KEY_TYPE_API_E_VER_1 */
+
+/**
+ * enum iwl_wowlan_mlo_gtk_flag - MLO GTK flags
+ * @WOWLAN_MLO_GTK_FLAG_KEY_LEN_MSK: 0 for len 16, 1 for len 32
+ * @WOWLAN_MLO_GTK_FLAG_KEY_ID_MSK: key id (ranges from 0 to 7)
+ * @WOWLAN_MLO_GTK_FLAG_LINK_ID_MSK: spec link id of the key
+ * @WOWLAN_MLO_GTK_FLAG_KEY_TYPE_MSK: &enum iwl_wowlan_mlo_gtk_type
+ * @WOWLAN_MLO_GTK_FLAG_LAST_KEY_MSK: is this the last given key per
+ * key-type / link-id - the currently used key
+ */
+enum iwl_wowlan_mlo_gtk_flag {
+ WOWLAN_MLO_GTK_FLAG_KEY_LEN_MSK = 0x0001,
+ WOWLAN_MLO_GTK_FLAG_KEY_ID_MSK = 0x000E,
+ WOWLAN_MLO_GTK_FLAG_LINK_ID_MSK = 0x00F0,
+ WOWLAN_MLO_GTK_FLAG_KEY_TYPE_MSK = 0x0300,
+ WOWLAN_MLO_GTK_FLAG_LAST_KEY_MSK = 0x0400
+}; /* WOWLAN_MLO_GTK_FLAG_API_E_VER_1 */
+
+/**
+ * struct iwl_wowlan_mlo_gtk - MLO GTK info
+ * @key: key material
+ * @flags: &enum iwl_wowlan_mlo_gtk_flag
+ * @pn: packet number
+ */
+struct iwl_wowlan_mlo_gtk {
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ __le16 flags;
+ u8 pn[6];
+} __packed; /* WOWLAN_MLO_GTK_KEY_API_S_VER_1 */
+
/**
* struct iwl_wowlan_info_notif - WoWLAN information notification
* @gtk: GTK data
@@ -859,7 +905,10 @@ struct iwl_wowlan_info_notif_v2 {
* @tid_tear_down: bit mask of tids whose BA sessions were closed
* in suspend state
* @station_id: station id
+ * @num_mlo_link_keys: number of &struct iwl_wowlan_mlo_gtk structs
+ * following this notif, or reserved in version < 4
* @reserved2: reserved
+ * @mlo_gtks: array of GTKs of size num_mlo_link_keys for version >= 4
*/
struct iwl_wowlan_info_notif {
struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
@@ -875,8 +924,10 @@ struct iwl_wowlan_info_notif {
__le32 received_beacons;
u8 tid_tear_down;
u8 station_id;
- u8 reserved2[2];
-} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_3 */
+ u8 num_mlo_link_keys;
+ u8 reserved2;
+ struct iwl_wowlan_mlo_gtk mlo_gtks[];
+} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_3, _VER_4 */
/**
* struct iwl_wowlan_wake_pkt_notif - WoWLAN wake packet notification
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index 0f7903c5a4df..f272b6a4e72e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
+ * Copyright (C) 2024 Intel Corporation
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
@@ -90,6 +91,12 @@ enum iwl_data_path_subcmd_ids {
SEC_KEY_CMD = 0x18,
/**
+ * @ESR_MODE_NOTIF: notification to recommend/force a wanted esr mode,
+ * uses &struct iwl_mvm_esr_mode_notif
+ */
+ ESR_MODE_NOTIF = 0xF3,
+
+ /**
* @MONITOR_NOTIF: Datapath monitoring notification, using
* &struct iwl_datapath_monitor_notif
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index c6d1f5644638..754c5d655ad0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2019, 2021-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -642,4 +642,25 @@ struct iwl_mvm_sta_disable_tx_cmd {
__le32 disable;
} __packed; /* STA_DISABLE_TX_API_S_VER_1 */
+/**
+ * enum iwl_mvm_fw_esr_recommendation - FW recommendation code
+ * @ESR_RECOMMEND_LEAVE: recommendation to leave esr
+ * @ESR_FORCE_LEAVE: force exiting esr
+ * @ESR_RECOMMEND_ENTER: recommendation to enter esr
+ */
+enum iwl_mvm_fw_esr_recommendation {
+ ESR_RECOMMEND_LEAVE,
+ ESR_FORCE_LEAVE,
+ ESR_RECOMMEND_ENTER,
+}; /* ESR_MODE_RECOMMENDATION_CODE_API_E_VER_1 */
+
+/**
+ * struct iwl_mvm_esr_mode_notif - FWs recommendation/force for esr mode
+ *
+ * @action: the action to apply on esr state. See &iwl_mvm_fw_esr_recommendation
+ */
+struct iwl_mvm_esr_mode_notif {
+ __le32 action;
+} __packed; /* ESR_MODE_RECOMMENDATION_NTFY_API_S_VER_1 */
+
#endif /* __iwl_fw_api_mac_cfg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 58034dfa7e70..a08497a04733 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -7,7 +7,6 @@
#ifndef __iwl_fw_api_nvm_reg_h__
#define __iwl_fw_api_nvm_reg_h__
-#include "fw/regulatory.h"
/**
* enum iwl_regulatory_and_nvm_subcmd_ids - regulatory/NVM commands
*/
@@ -23,8 +22,9 @@ enum iwl_regulatory_and_nvm_subcmd_ids {
* &struct iwl_lari_config_change_cmd_v3,
* &struct iwl_lari_config_change_cmd_v4,
* &struct iwl_lari_config_change_cmd_v5,
- * &struct iwl_lari_config_change_cmd_v6 or
- * &struct iwl_lari_config_change_cmd_v7
+ * &struct iwl_lari_config_change_cmd_v6,
+ * &struct iwl_lari_config_change_cmd_v7 or
+ * &struct iwl_lari_config_change_cmd
*/
LARI_CONFIG_CHANGE = 0x1,
@@ -46,9 +46,9 @@ enum iwl_regulatory_and_nvm_subcmd_ids {
SAR_OFFSET_MAPPING_TABLE_CMD = 0x4,
/**
- * @UATS_TABLE_CMD: &struct iwl_uats_table_cmd
+ * @MCC_ALLOWED_AP_TYPE_CMD: &struct iwl_mcc_allowed_ap_type_cmd
*/
- UATS_TABLE_CMD = 0x5,
+ MCC_ALLOWED_AP_TYPE_CMD = 0x5,
/**
* @PNVM_INIT_COMPLETE_NTFY: &struct iwl_pnvm_init_complete_ntfy
@@ -439,6 +439,7 @@ enum iwl_mcc_source {
MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11,
};
+#define IWL_WTAS_BLACK_LIST_MAX 16
/**
* struct iwl_tas_config_cmd_common - configures the TAS.
* This is also the v2 structure.
@@ -609,7 +610,7 @@ struct iwl_lari_config_change_cmd_v6 {
/**
* struct iwl_lari_config_change_cmd_v7 - change LARI configuration
- * This structure is used also for lari cmd version 8.
+ * This structure is used also for lari cmd version 8 and 9.
* @config_bitmap: Bitmap of the config commands. Each bit will trigger a
* different predefined FW config operation.
* @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
@@ -619,6 +620,8 @@ struct iwl_lari_config_change_cmd_v6 {
* @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits
* per country, one to indicate whether to override and the other to
* indicate allow/disallow unii4 channels.
+ * For LARI cmd version 4 to 8 - bits 0:3 are supported.
+ * For LARI cmd version 9 - bits 0:5 are supported.
* @chan_state_active_bitmap: Bitmap to enable different bands per country
* or region.
* Each bit represents a country or region, and a band to activate
@@ -642,6 +645,46 @@ struct iwl_lari_config_change_cmd_v7 {
} __packed;
/* LARI_CHANGE_CONF_CMD_S_VER_7 */
/* LARI_CHANGE_CONF_CMD_S_VER_8 */
+/* LARI_CHANGE_CONF_CMD_S_VER_9 */
+
+/**
+ * struct iwl_lari_config_change_cmd - change LARI configuration
+ * @config_bitmap: Bitmap of the config commands. Each bit will trigger a
+ * different predefined FW config operation.
+ * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
+ * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate the value to use.
+ * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate allow/disallow unii4 channels.
+ * For LARI cmd version 10 - bits 0:5 are supported.
+ * @chan_state_active_bitmap: Bitmap to enable different bands per country
+ * or region.
+ * Each bit represents a country or region, and a band to activate
+ * according to the BIOS definitions.
+ * For LARI cmd version 10 - bits 0:4 are supported.
+ * @force_disable_channels_bitmap: Bitmap of disabled bands/channels.
+ * Each bit represents a set of channels in a specific band that should be
+ * disabled
+ * @edt_bitmap: Bitmap of energy detection threshold table.
+ * Disable/enable the EDT optimization method for different band.
+ * @oem_320mhz_allow_bitmap: 320Mhz bandwidth enablement bitmap per MCC.
+ * bit0: enable 320Mhz in Japan.
+ * bit1: enable 320Mhz in South Korea.
+ * bit 2 - 31: reserved.
+ */
+struct iwl_lari_config_change_cmd {
+ __le32 config_bitmap;
+ __le32 oem_uhb_allow_bitmap;
+ __le32 oem_11ax_allow_bitmap;
+ __le32 oem_unii4_allow_bitmap;
+ __le32 chan_state_active_bitmap;
+ __le32 force_disable_channels_bitmap;
+ __le32 edt_bitmap;
+ __le32 oem_320mhz_allow_bitmap;
+} __packed;
+/* LARI_CHANGE_CONF_CMD_S_VER_10 */
/* Activate UNII-1 (5.2GHz) for World Wide */
#define ACTIVATE_5G2_IN_WW_MASK BIT(4)
@@ -658,13 +701,13 @@ struct iwl_pnvm_init_complete_ntfy {
#define UATS_TABLE_COL_SIZE 13
/**
- * struct iwl_uats_table_cmd - struct for UATS_TABLE_CMD
+ * struct iwl_mcc_allowed_ap_type_cmd - struct for MCC_ALLOWED_AP_TYPE_CMD
* @offset_map: mapping a mcc to UHB AP type support (UATS) allowed
* @reserved: reserved
*/
-struct iwl_uats_table_cmd {
+struct iwl_mcc_allowed_ap_type_cmd {
u8 offset_map[UATS_TABLE_ROW_SIZE][UATS_TABLE_COL_SIZE];
__le16 reserved;
-} __packed; /* UATS_TABLE_CMD_S_VER_1 */
+} __packed; /* MCC_ALLOWED_AP_TYPE_CMD_API_S_VER_1 */
#endif /* __iwl_fw_api_nvm_reg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
index 2d2b9c8c36ea..2ed7acc09e5a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
@@ -3,7 +3,7 @@
* Copyright (C) 2012-2014 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2021-2023 Intel Corporation
+ * Copyright (C) 2021-2024 Intel Corporation
*/
#ifndef __iwl_fw_api_offload_h__
#define __iwl_fw_api_offload_h__
@@ -20,7 +20,7 @@ enum iwl_prot_offload_subcmd_ids {
/**
* @WOWLAN_INFO_NOTIFICATION: Notification in
* &struct iwl_wowlan_info_notif_v1, &struct iwl_wowlan_info_notif_v2,
- * or iwl_wowlan_info_notif
+ * or &struct iwl_wowlan_info_notif
*/
WOWLAN_INFO_NOTIFICATION = 0xFD,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
index 5a3f30e5e06d..92e4b62c119f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2019-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2019-2022, 2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -43,6 +43,11 @@ enum iwl_phy_ops_subcmd_ids {
PER_PLATFORM_ANT_GAIN_CMD = 0x07,
/**
+ * @AP_TX_POWER_CONSTRAINTS_CMD: &struct iwl_txpower_constraints_cmd
+ */
+ AP_TX_POWER_CONSTRAINTS_CMD = 0x0C,
+
+ /**
* @CT_KILL_NOTIFICATION: &struct ct_kill_notif
*/
CT_KILL_NOTIFICATION = 0xFE,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 0bf38243f88a..532d5cfa9162 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -385,6 +385,33 @@ struct iwl_dev_tx_power_cmd_v7 {
__le32 timer_period;
__le32 flags;
} __packed; /* TX_REDUCED_POWER_API_S_VER_7 */
+
+/**
+ * struct iwl_dev_tx_power_cmd_v8 - TX power reduction command version 8
+ * @per_chain: per chain restrictions
+ * @enable_ack_reduction: enable or disable close range ack TX power
+ * reduction.
+ * @per_chain_restriction_changed: is per_chain_restriction has changed
+ * from last command. used if set_mode is
+ * IWL_TX_POWER_MODE_SET_SAR_TIMER.
+ * note: if not changed, the command is used for keep alive only.
+ * @reserved: reserved (padding)
+ * @timer_period: timer in milliseconds. if expires FW will change to default
+ * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER
+ * @flags: reduce power flags.
+ * @tpc_vlp_backoff_level: user backoff of UNII5,7 VLP channels in USA.
+ * Not in use.
+ */
+struct iwl_dev_tx_power_cmd_v8 {
+ __le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
+ u8 enable_ack_reduction;
+ u8 per_chain_restriction_changed;
+ u8 reserved[2];
+ __le32 timer_period;
+ __le32 flags;
+ __le32 tpc_vlp_backoff_level;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_8 */
+
/**
* struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion)
* @common: common part of the command
@@ -392,6 +419,8 @@ struct iwl_dev_tx_power_cmd_v7 {
* @v4: version 4 part of the command
* @v5: version 5 part of the command
* @v6: version 6 part of the command
+ * @v7: version 7 part of the command
+ * @v8: version 8 part of the command
*/
struct iwl_dev_tx_power_cmd {
struct iwl_dev_tx_power_common common;
@@ -401,6 +430,7 @@ struct iwl_dev_tx_power_cmd {
struct iwl_dev_tx_power_cmd_v5 v5;
struct iwl_dev_tx_power_cmd_v6 v6;
struct iwl_dev_tx_power_cmd_v7 v7;
+ struct iwl_dev_tx_power_cmd_v8 v8;
};
};
@@ -537,7 +567,7 @@ enum iwl_ppag_flags {
* union iwl_ppag_table_cmd - union for all versions of PPAG command
* @v1: version 1
* @v2: version 2
- * version 3, 4 and 5 are the same structure as v2,
+ * version 3, 4, 5 and 6 are the same structure as v2,
* but has a different format of the flags bitmap
* @flags: values from &enum iwl_ppag_flags
* @gain: table of antenna gain values per chain and sub-band
@@ -702,4 +732,44 @@ struct iwl_beacon_filter_cmd {
#define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT)
#define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3)
+
+#define DEFAULT_TPE_TX_POWER 0x7F
+
+/*
+ * Bandwidth: 20/40/80/(160/80+80)/320
+ */
+#define IWL_MAX_TX_EIRP_PWR_MAX_SIZE 5
+#define IWL_MAX_TX_EIRP_PSD_PWR_MAX_SIZE 16
+
+enum iwl_6ghz_ap_type {
+ IWL_6GHZ_AP_TYPE_LPI,
+ IWL_6GHZ_AP_TYPE_SP,
+ IWL_6GHZ_AP_TYPE_VLP,
+}; /* PHY_AP_TYPE_API_E_VER_1 */
+
+/**
+ * struct iwl_txpower_constraints_cmd
+ * AP_TX_POWER_CONSTRAINTS_CMD
+ * Used for VLP/LPI/AFC Access Point power constraints for 6GHz channels
+ * @link_id: linkId
+ * @ap_type: see &enum iwl_ap_type
+ * @eirp_pwr: 8-bit 2s complement signed integer in the range
+ * -64 dBm to 63 dBm with a 0.5 dB step
+ * default &DEFAULT_TPE_TX_POWER (no maximum limit)
+ * @psd_pwr: 8-bit 2s complement signed integer in the range
+ * -63.5 to +63 dBm/MHz with a 0.5 step
+ * value - 128 indicates that the corresponding 20
+ * MHz channel cannot be used for transmission.
+ * value +127 indicates that no maximum PSD limit
+ * is specified for the corresponding 20 MHz channel
+ * default &DEFAULT_TPE_TX_POWER (no maximum limit)
+ * @reserved: reserved (padding)
+ */
+struct iwl_txpower_constraints_cmd {
+ __le16 link_id;
+ __le16 ap_type;
+ __s8 eirp_pwr[IWL_MAX_TX_EIRP_PWR_MAX_SIZE];
+ __s8 psd_pwr[IWL_MAX_TX_EIRP_PSD_PWR_MAX_SIZE];
+ u8 reserved[3];
+} __packed; /* PHY_AP_TX_POWER_CONSTRAINTS_CMD_API_S_VER_1 */
#endif /* __iwl_fw_api_power_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 93078f8cc08c..6684506f4fc4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -14,6 +14,10 @@
*/
enum iwl_scan_subcmd_ids {
/**
+ * @CHANNEL_SURVEY_NOTIF: &struct iwl_umac_scan_channel_survey_notif
+ */
+ CHANNEL_SURVEY_NOTIF = 0xFB,
+ /**
* @OFFLOAD_MATCH_INFO_NOTIF: &struct iwl_scan_offload_match_info
*/
OFFLOAD_MATCH_INFO_NOTIF = 0xFC,
@@ -62,6 +66,8 @@ struct iwl_ssid_ie {
#define IWL_FAST_SCHED_SCAN_ITERATIONS 3
#define IWL_MAX_SCHED_SCAN_PLANS 2
+#define IWL_MAX_NUM_NOISE_RESULTS 22
+
enum scan_framework_client {
SCAN_CLIENT_SCHED_SCAN = BIT(0),
SCAN_CLIENT_NETDETECT = BIT(1),
@@ -642,10 +648,13 @@ enum iwl_umac_scan_general_flags {
* notification per channel or not.
* @IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER: Whether to allow channel
* reorder optimization or not.
+ * @IWL_UMAC_SCAN_GEN_FLAGS2_COLLECT_CHANNEL_STATS: Enable channel statistics
+ * collection when #IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE is set.
*/
enum iwl_umac_scan_general_flags2 {
IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL = BIT(0),
IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER = BIT(1),
+ IWL_UMAC_SCAN_GEN_FLAGS2_COLLECT_CHANNEL_STATS = BIT(3),
};
/**
@@ -1258,4 +1267,26 @@ struct iwl_umac_scan_iter_complete_notif {
struct iwl_scan_results_notif results[];
} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_2 */
+/**
+ * struct iwl_umac_scan_channel_survey_notif - data for survey
+ * @channel: the channel scanned
+ * @band: band of channel
+ * @noise: noise floor measurements in negative dBm, invalid 0xff
+ * @reserved: for future use and alignment
+ * @active_time: time in ms the radio was turned on (on the channel)
+ * @busy_time: time in ms the channel was sensed busy, 0 for a clean channel
+ * @tx_time: time the radio spent transmitting data
+ * @rx_time: time the radio spent receiving data
+ */
+struct iwl_umac_scan_channel_survey_notif {
+ __le32 channel;
+ __le32 band;
+ u8 noise[IWL_MAX_NUM_NOISE_RESULTS];
+ u8 reserved[2];
+ __le32 active_time;
+ __le32 busy_time;
+ __le32 tx_time;
+ __le32 rx_time;
+} __packed; /* SCAN_CHANNEL_SURVEY_NTF_API_S_VER_1 */
+
#endif /* __iwl_fw_api_scan_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index d9e4c75403b8..bbd176d88820 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_tx_h__
@@ -793,7 +793,8 @@ enum iwl_mac_beacon_flags {
* @reserved: reserved
* @link_id: the firmware id of the link that will use this beacon
* @tim_idx: the offset of the tim IE in the beacon
- * @tim_size: the length of the tim IE
+ * @tim_size: the length of the tim IE (version < 14)
+ * @btwt_offset: offset to the broadcast TWT IE if present (version >= 14)
* @ecsa_offset: offset to the ECSA IE if present
* @csa_offset: offset to the CSA IE if present
* @frame: the template of the beacon frame
@@ -805,14 +806,18 @@ struct iwl_mac_beacon_cmd {
__le32 reserved;
__le32 link_id;
__le32 tim_idx;
- __le32 tim_size;
+ union {
+ __le32 tim_size;
+ __le32 btwt_offset;
+ };
__le32 ecsa_offset;
__le32 csa_offset;
struct ieee80211_hdr frame[];
} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_10,
* BEACON_TEMPLATE_CMD_API_S_VER_11,
* BEACON_TEMPLATE_CMD_API_S_VER_12,
- * BEACON_TEMPLATE_CMD_API_S_VER_13
+ * BEACON_TEMPLATE_CMD_API_S_VER_13,
+ * BEACON_TEMPLATE_CMD_API_S_VER_14
*/
struct iwl_beacon_notif {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index db6d7013df66..945ffc083d25 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1026,17 +1026,12 @@ static int iwl_dump_ini_prph_mac_iter_common(struct iwl_fw_runtime *fwrt,
{
struct iwl_fw_ini_error_dump_range *range = range_ptr;
__le32 *val = range->data;
- u32 prph_val;
int i;
range->internal_base_addr = cpu_to_le32(addr);
range->range_data_size = size;
- for (i = 0; i < le32_to_cpu(size); i += 4) {
- prph_val = iwl_read_prph(fwrt->trans, addr + i);
- if (iwl_trans_is_hw_error_value(prph_val))
- return -EBUSY;
- *val++ = cpu_to_le32(prph_val);
- }
+ for (i = 0; i < le32_to_cpu(size); i += 4)
+ *val++ = cpu_to_le32(iwl_read_prph(fwrt->trans, addr + i));
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
@@ -3081,11 +3076,10 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
struct iwl_fw_dbg_params params = {0};
struct iwl_fwrt_dump_data *dump_data =
&fwrt->dump.wks[wk_idx].dump_data;
- u32 policy;
- u32 time_point;
if (!test_bit(wk_idx, &fwrt->dump.active_wks))
return;
+ /* also checks 'desc' for pre-ini mode, since that shadows in union */
if (!dump_data->trig) {
IWL_ERR(fwrt, "dump trigger data is not set\n");
goto out;
@@ -3113,13 +3107,16 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
- policy = le32_to_cpu(dump_data->trig->apply_policy);
- time_point = le32_to_cpu(dump_data->trig->time_point);
+ if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
+ u32 policy = le32_to_cpu(dump_data->trig->apply_policy);
+ u32 time_point = le32_to_cpu(dump_data->trig->time_point);
- if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) {
- IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n");
- iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0);
+ if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) {
+ IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n");
+ iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0);
+ }
}
+
if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY)
iwl_force_nmi(fwrt->trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index f69d29e531c8..ae05227b6153 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -395,6 +395,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT: Support SPP (signaling and payload
* protected) A-MSDU.
* @IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT: Support secure LTF measurement.
+ * @IWL_UCODE_TLV_CAPA_MONITOR_PASSIVE_CHANS: Support monitor mode on otherwise
+ * passive channels
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -494,6 +496,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_SNIFF_VALIDATE_SUPPORT = (__force iwl_ucode_tlv_capa_t)116,
IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT = (__force iwl_ucode_tlv_capa_t)117,
IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT = (__force iwl_ucode_tlv_capa_t)121,
+ IWL_UCODE_TLV_CAPA_MONITOR_PASSIVE_CHANS = (__force iwl_ucode_tlv_capa_t)122,
NUM_IWL_UCODE_TLV_CAPA
/*
* This construction make both sparse (which cannot increment the previous
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
index 36d506463e0e..b9bb3636e88f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
@@ -38,6 +38,7 @@ IWL_BIOS_TABLE_LOADER_DATA(tas_table, struct iwl_tas_data);
IWL_BIOS_TABLE_LOADER_DATA(pwr_limit, u64);
IWL_BIOS_TABLE_LOADER_DATA(mcc, char);
IWL_BIOS_TABLE_LOADER_DATA(eckv, u32);
+IWL_BIOS_TABLE_LOADER_DATA(wbem, u32);
static const struct dmi_system_id dmi_ppag_approved_list[] = {
@@ -347,7 +348,7 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
"PPAG table rev is %d, send truncated table\n",
fwrt->ppag_ver);
}
- } else if (cmd_ver >= 2 && cmd_ver <= 5) {
+ } else if (cmd_ver >= 2 && cmd_ver <= 6) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
gain = cmd->v2.gain[0];
*cmd_size = sizeof(cmd->v2);
@@ -443,7 +444,7 @@ int iwl_parse_tas_selection(struct iwl_fw_runtime *fwrt,
return enabled;
}
-__le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
+static __le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
{
int ret;
u32 val;
@@ -490,7 +491,127 @@ __le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
return config_bitmap;
}
-IWL_EXPORT_SYMBOL(iwl_get_lari_config_bitmap);
+
+static size_t iwl_get_lari_config_cmd_size(u8 cmd_ver)
+{
+ size_t cmd_size;
+
+ switch (cmd_ver) {
+ case 10:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd);
+ break;
+ case 9:
+ case 8:
+ case 7:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v7);
+ break;
+ case 6:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6);
+ break;
+ case 5:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5);
+ break;
+ case 4:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4);
+ break;
+ case 3:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3);
+ break;
+ case 2:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2);
+ break;
+ default:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1);
+ break;
+ }
+ return cmd_size;
+}
+
+int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt,
+ struct iwl_lari_config_change_cmd *cmd,
+ size_t *cmd_size)
+{
+ int ret;
+ u32 value;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
+ WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ LARI_CONFIG_CHANGE), 1);
+
+ memset(cmd, 0, sizeof(*cmd));
+ *cmd_size = iwl_get_lari_config_cmd_size(cmd_ver);
+
+ cmd->config_bitmap = iwl_get_lari_config_bitmap(fwrt);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_11AX_ENABLEMENT, &value);
+ if (!ret)
+ cmd->oem_11ax_allow_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value);
+ if (!ret) {
+ if (cmd_ver < 9)
+ value &= DSM_UNII4_ALLOW_BITMAP_CMD_V8;
+ else
+ value &= DSM_UNII4_ALLOW_BITMAP;
+
+ cmd->oem_unii4_allow_bitmap = cpu_to_le32(value);
+ }
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value);
+ if (!ret) {
+ if (cmd_ver < 8)
+ value &= ~ACTIVATE_5G2_IN_WW_MASK;
+ cmd->chan_state_active_bitmap = cpu_to_le32(value);
+ }
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_6E, &value);
+ if (!ret)
+ cmd->oem_uhb_allow_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_FORCE_DISABLE_CHANNELS, &value);
+ if (!ret)
+ cmd->force_disable_channels_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENERGY_DETECTION_THRESHOLD,
+ &value);
+ if (!ret)
+ cmd->edt_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_wbem(fwrt, &value);
+ if (!ret)
+ cmd->oem_320mhz_allow_bitmap = cpu_to_le32(value);
+
+ if (cmd->config_bitmap ||
+ cmd->oem_uhb_allow_bitmap ||
+ cmd->oem_11ax_allow_bitmap ||
+ cmd->oem_unii4_allow_bitmap ||
+ cmd->chan_state_active_bitmap ||
+ cmd->force_disable_channels_bitmap ||
+ cmd->edt_bitmap ||
+ cmd->oem_320mhz_allow_bitmap) {
+ IWL_DEBUG_RADIO(fwrt,
+ "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
+ le32_to_cpu(cmd->config_bitmap),
+ le32_to_cpu(cmd->oem_11ax_allow_bitmap));
+ IWL_DEBUG_RADIO(fwrt,
+ "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x, cmd_ver=%d\n",
+ le32_to_cpu(cmd->oem_unii4_allow_bitmap),
+ le32_to_cpu(cmd->chan_state_active_bitmap),
+ cmd_ver);
+ IWL_DEBUG_RADIO(fwrt,
+ "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n",
+ le32_to_cpu(cmd->oem_uhb_allow_bitmap),
+ le32_to_cpu(cmd->force_disable_channels_bitmap));
+ IWL_DEBUG_RADIO(fwrt,
+ "sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x, oem_320mhz_allow_bitmap=0x%x\n",
+ le32_to_cpu(cmd->edt_bitmap),
+ le32_to_cpu(cmd->oem_320mhz_allow_bitmap));
+ } else {
+ return 1;
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_fill_lari_config);
int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
u32 *value)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
index 28e774766847..633c9ad9af84 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2023-2024 Intel Corporation
*/
#ifndef __fw_regulatory_h__
@@ -11,6 +11,7 @@
#include "fw/api/power.h"
#include "fw/api/phy.h"
#include "fw/api/config.h"
+#include "fw/api/nvm-reg.h"
#include "fw/img.h"
#include "iwl-trans.h"
@@ -39,7 +40,6 @@
#define IWL_PPAG_ETSI_CHINA_MASK 3
#define IWL_PPAG_REV3_MASK 0x7FF
-#define IWL_WTAS_BLACK_LIST_MAX 16
#define IWL_WTAS_ENABLED_MSK 0x1
#define IWL_WTAS_OVERRIDE_IEC_MSK 0x2
#define IWL_WTAS_ENABLE_IEC_MSK 0x4
@@ -132,6 +132,23 @@ enum iwl_dsm_values_indonesia {
DSM_VALUE_INDONESIA_MAX
};
+enum iwl_dsm_unii4_bitmap {
+ DSM_VALUE_UNII4_US_OVERRIDE_MSK = BIT(0),
+ DSM_VALUE_UNII4_US_EN_MSK = BIT(1),
+ DSM_VALUE_UNII4_ETSI_OVERRIDE_MSK = BIT(2),
+ DSM_VALUE_UNII4_ETSI_EN_MSK = BIT(3),
+ DSM_VALUE_UNII4_CANADA_OVERRIDE_MSK = BIT(4),
+ DSM_VALUE_UNII4_CANADA_EN_MSK = BIT(5),
+};
+
+#define DSM_UNII4_ALLOW_BITMAP_CMD_V8 (DSM_VALUE_UNII4_US_OVERRIDE_MSK | \
+ DSM_VALUE_UNII4_US_EN_MSK | \
+ DSM_VALUE_UNII4_ETSI_OVERRIDE_MSK | \
+ DSM_VALUE_UNII4_ETSI_EN_MSK)
+#define DSM_UNII4_ALLOW_BITMAP (DSM_UNII4_ALLOW_BITMAP_CMD_V8 | \
+ DSM_VALUE_UNII4_CANADA_OVERRIDE_MSK | \
+ DSM_VALUE_UNII4_CANADA_EN_MSK)
+
enum iwl_dsm_values_rfi {
DSM_VALUE_RFI_DLVR_DISABLE = BIT(0),
DSM_VALUE_RFI_DDR_DISABLE = BIT(1),
@@ -184,8 +201,11 @@ int iwl_bios_get_pwr_limit(struct iwl_fw_runtime *fwrt,
int iwl_bios_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc);
int iwl_bios_get_eckv(struct iwl_fw_runtime *fwrt, u32 *ext_clk);
+int iwl_bios_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value);
-__le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt);
+int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt,
+ struct iwl_lari_config_change_cmd *cmd,
+ size_t *cmd_size);
int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
u32 *value);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index b2bc4fd37abf..9122f9a1260a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -46,6 +46,10 @@ struct iwl_fwrt_shared_mem_cfg {
* struct iwl_fwrt_dump_data - dump data
* @trig: trigger the worker was scheduled upon
* @fw_pkt: packet received from FW
+ *
+ * Note that the decision which part of the union is used
+ * is based on iwl_trans_dbg_ini_valid(): the 'trig' part
+ * is used if it is %true, the 'desc' part otherwise.
*/
struct iwl_fwrt_dump_data {
union {
@@ -54,6 +58,7 @@ struct iwl_fwrt_dump_data {
struct iwl_rx_packet *fw_pkt;
};
struct {
+ /* must be first to be same as 'trig' */
const struct iwl_fw_dump_desc *desc;
bool monitor_only;
};
@@ -177,7 +182,7 @@ struct iwl_fw_runtime {
u8 ppag_ver;
struct iwl_sar_offset_mapping_cmd sgom_table;
bool sgom_enabled;
- struct iwl_uats_table_cmd uats_table;
+ struct iwl_mcc_allowed_ap_type_cmd uats_table;
u8 uefi_tables_lock_status;
bool uats_enabled;
};
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index e81fc0129b9d..fb982d4fe851 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -674,6 +674,29 @@ out:
return ret;
}
+int iwl_uefi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value)
+{
+ struct uefi_cnv_wlan_wbem_data *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WBEM_NAME,
+ "WBEM", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_WBEM_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WBEM revision:%d\n",
+ data->revision);
+ goto out;
+ }
+ *value = data->wbem_320mhz_per_mcc & IWL_UEFI_WBEM_REV0_MASK;
+ IWL_DEBUG_RADIO(fwrt, "Loaded WBEM config from UEFI\n");
+out:
+ kfree(data);
+ return ret;
+}
+
int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
u32 *value)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
index 303cc299d1bc..1f8884ca8997 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright(c) 2021-2023 Intel Corporation
+ * Copyright(c) 2021-2024 Intel Corporation
*/
#ifndef __iwl_fw_uefi__
#define __iwl_fw_uefi__
@@ -21,6 +21,7 @@
#define IWL_UEFI_WRDD_NAME L"UefiCnvWlanWRDD"
#define IWL_UEFI_ECKV_NAME L"UefiCnvWlanECKV"
#define IWL_UEFI_DSM_NAME L"UefiCnvWlanGeneralCfg"
+#define IWL_UEFI_WBEM_NAME L"UefiCnvWlanWBEM"
#define IWL_SGOM_MAP_SIZE 339
@@ -35,6 +36,7 @@
#define IWL_UEFI_SPLC_REVISION 0
#define IWL_UEFI_WRDD_REVISION 0
#define IWL_UEFI_ECKV_REVISION 0
+#define IWL_UEFI_WBEM_REVISION 0
#define IWL_UEFI_DSM_REVISION 4
struct pnvm_sku_package {
@@ -178,6 +180,20 @@ struct uefi_cnv_var_general_cfg {
u32 functions[UEFI_MAX_DSM_FUNCS];
} __packed;
+#define IWL_UEFI_WBEM_REV0_MASK (BIT(0) | BIT(1))
+/* struct uefi_cnv_wlan_wbem_data - Bandwidth enablement per MCC as defined
+ * in UEFI
+ * @revision: the revision of the table
+ * @wbem_320mhz_per_mcc: enablement of 320MHz bandwidth per MCC
+ * bit 0 - if set, 320MHz is enabled for Japan
+ * bit 1 - if set, 320MHz is enabled for South Korea
+ * bit 2- 31, Reserved
+ */
+struct uefi_cnv_wlan_wbem_data {
+ u8 revision;
+ u32 wbem_320mhz_per_mcc;
+} __packed;
+
/*
* This is known to be broken on v4.19 and to work on v5.4. Until we
* figure out why this is the case and how to make it work, simply
@@ -202,6 +218,7 @@ int iwl_uefi_get_pwr_limit(struct iwl_fw_runtime *fwrt,
u64 *dflt_pwr_limit);
int iwl_uefi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc);
int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk);
+int iwl_uefi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value);
int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
u32 *value);
void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt);
@@ -281,6 +298,11 @@ static inline int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
return -ENOENT;
}
+static inline int iwl_uefi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value)
+{
+ return -ENOENT;
+}
+
static inline int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt,
enum iwl_dsm_funcs func, u32 *value)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 6aa4f7f9c708..732889f96ca2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -11,6 +11,7 @@
#include <linux/netdevice.h>
#include <linux/ieee80211.h>
#include <linux/nl80211.h>
+#include <linux/mod_devicetable.h>
#include "iwl-csr.h"
#include "iwl-drv.h"
@@ -421,6 +422,7 @@ struct iwl_cfg {
#define IWL_CFG_MAC_TYPE_SC 0x48
#define IWL_CFG_MAC_TYPE_SC2 0x49
#define IWL_CFG_MAC_TYPE_SC2F 0x4A
+#define IWL_CFG_MAC_TYPE_BZ_W 0x4B
#define IWL_CFG_RF_TYPE_TH 0x105
#define IWL_CFG_RF_TYPE_TH1 0x108
@@ -429,8 +431,6 @@ struct iwl_cfg {
#define IWL_CFG_RF_TYPE_HR2 0x10A
#define IWL_CFG_RF_TYPE_HR1 0x10C
#define IWL_CFG_RF_TYPE_GF 0x10D
-#define IWL_CFG_RF_TYPE_MR 0x110
-#define IWL_CFG_RF_TYPE_MS 0x111
#define IWL_CFG_RF_TYPE_FM 0x112
#define IWL_CFG_RF_TYPE_WH 0x113
@@ -484,6 +484,7 @@ const struct iwl_dev_info *
iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb,
u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step);
+extern const struct pci_device_id iwl_hw_card_ids[];
#endif
/*
@@ -541,6 +542,8 @@ extern const char iwl_ax221_name[];
extern const char iwl_ax231_name[];
extern const char iwl_ax411_name[];
extern const char iwl_bz_name[];
+extern const char iwl_fm_name[];
+extern const char iwl_gl_name[];
extern const char iwl_mtp_name[];
extern const char iwl_sc_name[];
extern const char iwl_sc2_name[];
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
index 1379dc2d231b..5b62933134cf 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018, 2020-2023 Intel Corporation
+ * Copyright (C) 2018, 2020-2024 Intel Corporation
*/
#ifndef __iwl_context_info_file_gen3_h__
#define __iwl_context_info_file_gen3_h__
@@ -56,6 +56,8 @@ enum iwl_prph_scratch_mtr_format {
* @IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K: 8kB RB size
* @IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K: 12kB RB size
* @IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K: 16kB RB size
+ * @IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE: Indicate fw to set SCU_FORCE_ACTIVE
+ * upon reset.
*/
enum iwl_prph_scratch_flags {
IWL_PRPH_SCRATCH_IMR_DEBUG_EN = BIT(1),
@@ -71,6 +73,7 @@ enum iwl_prph_scratch_flags {
IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K = 8 << 20,
IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K = 9 << 20,
IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K = 10 << 20,
+ IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE = BIT(29),
};
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 4696d73c8971..33654f228ee8 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -192,12 +192,6 @@ const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf)
case IWL_CFG_RF_TYPE_GF:
rf = "gf";
break;
- case IWL_CFG_RF_TYPE_MR:
- rf = "mr";
- break;
- case IWL_CFG_RF_TYPE_MS:
- rf = "ms";
- break;
case IWL_CFG_RF_TYPE_FM:
rf = "fm";
break;
@@ -1484,7 +1478,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX];
u32 api_ver;
int i;
- bool load_module = false;
bool usniffer_images = false;
bool failure = true;
@@ -1732,19 +1725,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
goto out_unbind;
}
} else {
- load_module = true;
+ request_module_nowait("%s", op->name);
}
mutex_unlock(&iwlwifi_opmode_table_mtx);
complete(&drv->request_firmware_complete);
- /*
- * Load the module last so we don't block anything
- * else from proceeding if the module fails to load
- * or hangs loading.
- */
- if (load_module)
- request_module("%s", op->name);
failure = false;
goto free;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index baa39a18087a..149903f52567 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -392,11 +392,14 @@ static enum nl80211_band iwl_nl80211_band_from_channel_idx(int ch_idx)
return NL80211_BAND_2GHZ;
}
-static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
+static int iwl_init_channel_map(struct iwl_trans *trans,
+ const struct iwl_fw *fw,
struct iwl_nvm_data *data,
const void * const nvm_ch_flags,
u32 sbands_flags, bool v4)
{
+ const struct iwl_cfg *cfg = trans->cfg;
+ struct device *dev = trans->dev;
int ch_idx;
int n_channels = 0;
struct ieee80211_channel *channel;
@@ -478,11 +481,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
else
channel->flags = 0;
- /* TODO: Don't put limitations on UHB devices as we still don't
- * have NVM for them
- */
- if (cfg->uhb_supported)
- channel->flags = 0;
+ if (fw_has_capa(&fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_MONITOR_PASSIVE_CHANS))
+ channel->flags |= IEEE80211_CHAN_CAN_MONITOR;
+
iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM,
channel->hw_value, ch_flags);
IWL_DEBUG_EEPROM(dev, "Ch. %d: %ddBm\n",
@@ -597,7 +599,8 @@ static const u8 iwl_vendor_caps[] = {
static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
{
- .types_mask = BIT(NL80211_IFTYPE_STATION),
+ .types_mask = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT),
.he_cap = {
.has_he = true,
.he_cap_elem = {
@@ -753,7 +756,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
},
},
{
- .types_mask = BIT(NL80211_IFTYPE_AP),
+ .types_mask = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO),
.he_cap = {
.has_he = true,
.he_cap_elem = {
@@ -906,7 +910,8 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
u8 tx_chains, u8 rx_chains,
const struct iwl_fw *fw)
{
- bool is_ap = iftype_data->types_mask & BIT(NL80211_IFTYPE_AP);
+ bool is_ap = iftype_data->types_mask & (BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO));
bool no_320;
no_320 = (!trans->trans_cfg->integrated &&
@@ -1023,8 +1028,6 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
case IWL_CFG_RF_TYPE_GF:
- case IWL_CFG_RF_TYPE_MR:
- case IWL_CFG_RF_TYPE_MS:
case IWL_CFG_RF_TYPE_FM:
case IWL_CFG_RF_TYPE_WH:
iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |=
@@ -1176,12 +1179,11 @@ static void iwl_init_sbands(struct iwl_trans *trans,
const struct iwl_fw *fw)
{
struct device *dev = trans->dev;
- const struct iwl_cfg *cfg = trans->cfg;
int n_channels;
int n_used = 0;
struct ieee80211_supported_band *sband;
- n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags,
+ n_channels = iwl_init_channel_map(trans, fw, data, nvm_ch_flags,
sbands_flags, v4);
sband = &data->bands[NL80211_BAND_2GHZ];
sband->band = NL80211_BAND_2GHZ;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index a7d44df06eab..898e22e0d1ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -371,7 +371,10 @@ enum {
#define CNVI_AUX_MISC_CHIP 0xA200B0
#define CNVI_AUX_MISC_CHIP_MAC_STEP(_val) (((_val) & 0xf000000) >> 24)
#define CNVI_AUX_MISC_CHIP_PROD_TYPE(_val) ((_val) & 0xfff)
+#define CNVI_AUX_MISC_CHIP_PROD_TYPE_GL 0x910
#define CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_U 0x930
+#define CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_I 0x900
+#define CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_W 0x901
#define CNVR_AUX_MISC_CHIP 0xA2B800
#define CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM 0xA29890
@@ -453,11 +456,7 @@ enum {
#define REG_CRF_ID_TYPE_HR_NONE_CDB_1X1 0x501
#define REG_CRF_ID_TYPE_HR_NONE_CDB_CCP 0x532
#define REG_CRF_ID_TYPE_GF 0x410
-#define REG_CRF_ID_TYPE_GF_TC 0xF08
-#define REG_CRF_ID_TYPE_MR 0x810
#define REG_CRF_ID_TYPE_FM 0x910
-#define REG_CRF_ID_TYPE_FMI 0x930
-#define REG_CRF_ID_TYPE_FMR 0x900
#define REG_CRF_ID_TYPE_WHP 0xA10
#define HPM_DEBUG 0xA03440
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/Makefile b/drivers/net/wireless/intel/iwlwifi/mei/Makefile
index 8e3ef0347db7..e05f9421be4a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mei/Makefile
@@ -5,4 +5,4 @@ iwlmei-y += net.o
iwlmei-$(CONFIG_IWLWIFI_DEVICE_TRACING) += trace.o
CFLAGS_trace.o := -I$(src)
-ccflags-y += -I $(srctree)/$(src)/../
+ccflags-y += -I $(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
index 593fe28d89cf..8873904f51ec 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_IWLMVM) += iwlmvm.o
+obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += tests/
iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o
iwlmvm-y += scan.o time-event.o rs.o rs-fw.o
@@ -15,4 +16,4 @@ iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
iwlmvm-$(CONFIG_PM) += d3.o
iwlmvm-$(CONFIG_IWLMEI) += vendor-cmd.o
-ccflags-y += -I $(srctree)/$(src)/../
+subdir-ccflags-y += -I $(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 535edb51d1c0..ad3e14a0d043 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2013-2014, 2018-2020, 2022-2023 Intel Corporation
+ * Copyright (C) 2013-2014, 2018-2020, 2022-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
*/
#include <linux/ieee80211.h>
@@ -219,15 +219,13 @@ struct iwl_bt_iterator_data {
static inline
void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
+ struct iwl_mvm_vif_link_info *link_info,
bool enable, int rssi)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- mvmvif->bf_data.last_bt_coex_event = rssi;
- mvmvif->bf_data.bt_coex_max_thold =
+ link_info->bf_data.last_bt_coex_event = rssi;
+ link_info->bf_data.bt_coex_max_thold =
enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0;
- mvmvif->bf_data.bt_coex_min_thold =
+ link_info->bf_data.bt_coex_min_thold =
enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0;
}
@@ -255,44 +253,6 @@ static void iwl_mvm_bt_coex_tcm_based_ci(struct iwl_mvm *mvm,
swap(data->primary, data->secondary);
}
-static void iwl_mvm_bt_coex_enable_esr(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif, bool enable)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int link_id;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
- return;
-
- /* Done already */
- if (mvmvif->bt_coex_esr_disabled == !enable)
- return;
-
- mvmvif->bt_coex_esr_disabled = !enable;
-
- /* Nothing to do */
- if (mvmvif->esr_active == enable)
- return;
-
- if (enable) {
- /* Try to re-enable eSR*/
- iwl_mvm_mld_select_links(mvm, vif, false);
- return;
- }
-
- /*
- * Find the primary link, as we want to switch to it and drop the
- * secondary one.
- */
- link_id = iwl_mvm_mld_get_primary_link(mvm, vif, vif->active_links);
- WARN_ON(link_id < 0);
-
- ieee80211_set_active_links_async(vif,
- vif->active_links & BIT(link_id));
-}
-
/*
* This function receives the LB link id and checks if eSR should be
* enabled or disabled (due to BT coex)
@@ -300,35 +260,25 @@ static void iwl_mvm_bt_coex_enable_esr(struct iwl_mvm *mvm,
bool
iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- int link_id, int primary_link)
+ s32 link_rssi,
+ bool primary)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
bool have_wifi_loss_rate =
iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
BT_PROFILE_NOTIFICATION, 0) > 4;
- s8 link_rssi = 0;
u8 wifi_loss_rate;
- lockdep_assert_held(&mvm->mutex);
-
if (mvm->last_bt_notif.wifi_loss_low_rssi == BT_OFF)
return true;
- /* If LB link is the primary one we should always disable eSR */
- if (link_id == primary_link)
+ if (primary)
return false;
/* The feature is not supported */
if (!have_wifi_loss_rate)
return true;
- /*
- * We might not have a link_info when checking whether we can
- * (re)enable eSR - the LB link might not exist yet
- */
- if (link_info)
- link_rssi = (s8)link_info->beacon_stats.avg_signal;
/*
* In case we don't know the RSSI - take the lower wifi loss,
@@ -338,7 +288,7 @@ iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
if (!link_rssi)
wifi_loss_rate = mvm->last_bt_notif.wifi_loss_mid_high_rssi;
- else if (!mvmvif->bt_coex_esr_disabled)
+ else if (mvmvif->esr_active)
/* RSSI needs to get really low to disable eSR... */
wifi_loss_rate =
link_rssi <= -IWL_MVM_BT_COEX_DISABLE_ESR_THRESH ?
@@ -354,23 +304,24 @@ iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
return wifi_loss_rate <= IWL_MVM_BT_COEX_WIFI_LOSS_THRESH;
}
-void iwl_mvm_bt_coex_update_vif_esr(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- int link_id)
+void iwl_mvm_bt_coex_update_link_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id)
{
- unsigned long usable_links = ieee80211_vif_usable_links(vif);
- int primary_link = iwl_mvm_mld_get_primary_link(mvm, vif,
- usable_links);
- bool enable;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *link = mvmvif->link[link_id];
- /* Not assoc, not MLD vif or only one usable link */
- if (primary_link < 0)
+ if (!ieee80211_vif_is_mld(vif) ||
+ !iwl_mvm_vif_from_mac80211(vif)->authorized ||
+ WARN_ON(!link))
return;
- enable = iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link_id,
- primary_link);
-
- iwl_mvm_bt_coex_enable_esr(mvm, vif, enable);
+ if (!iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif,
+ (s8)link->beacon_stats.avg_signal,
+ link_id == iwl_mvm_get_primary_link(vif)))
+ /* In case we decided to exit eSR - stay with the primary */
+ iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_COEX,
+ iwl_mvm_get_primary_link(vif));
}
static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
@@ -412,13 +363,13 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
smps_mode, link_id);
iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id,
false);
- /* FIXME: should this be per link? */
- iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, link_info, false,
+ 0);
}
return;
}
- iwl_mvm_bt_coex_update_vif_esr(mvm, vif, link_id);
+ iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id);
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2))
min_ag_for_static_smps = BT_VERY_HIGH_TRAFFIC;
@@ -508,13 +459,12 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF ||
!vif->cfg.assoc) {
iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id, false);
- /* FIXME: should this be per link? */
- iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, link_info, false, 0);
return;
}
/* try to get the avg rssi from fw */
- ave_rssi = mvmvif->bf_data.ave_beacon_signal;
+ ave_rssi = link_info->bf_data.ave_beacon_signal;
/* if the RSSI isn't valid, fake it is very low */
if (!ave_rssi)
@@ -530,7 +480,7 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
}
/* Begin to monitor the RSSI: it may influence the reduced Tx power */
- iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, link_info, true, ave_rssi);
}
/* must be called under rcu_read_lock */
@@ -555,10 +505,6 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
return;
}
- /* When BT is off this will be 0 */
- if (data->notif->wifi_loss_low_rssi == BT_OFF)
- iwl_mvm_bt_coex_enable_esr(mvm, vif, true);
-
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++)
iwl_mvm_bt_notif_per_link(mvm, vif, data, link_id);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index f5122c4678a1..3cbeaddf4358 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
- * Copyright (C) 2013-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2013-2014, 2018-2024 Intel Corporation
* Copyright (C) 2015 Intel Deutschland GmbH
*/
#ifndef __MVM_CONSTANTS_H
@@ -14,6 +14,8 @@
#define IWL_MVM_BT_COEX_DISABLE_ESR_THRESH 69
#define IWL_MVM_BT_COEX_ENABLE_ESR_THRESH 63
#define IWL_MVM_BT_COEX_WIFI_LOSS_THRESH 0
+#define IWL_MVM_TRIGGER_LINK_SEL_TIME_SEC 30
+#define IWL_MVM_TPT_COUNT_WINDOW_SEC 5
#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
@@ -123,5 +125,16 @@
#define IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT 3000 /* in seconds */
#define IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT 60 /* in seconds */
#define IWL_MVM_AUTO_EML_ENABLE true
+#define IWL_MVM_MISSED_BEACONS_EXIT_ESR_THRESH 7
+#define IWL_MVM_HIGH_RSSI_THRESH_20MHZ -67
+#define IWL_MVM_LOW_RSSI_THRESH_20MHZ -71
+#define IWL_MVM_HIGH_RSSI_THRESH_40MHZ -64
+#define IWL_MVM_LOW_RSSI_THRESH_40MHZ -67
+#define IWL_MVM_HIGH_RSSI_THRESH_80MHZ -61
+#define IWL_MVM_LOW_RSSI_THRESH_80MHZ -74
+#define IWL_MVM_HIGH_RSSI_THRESH_160MHZ -58
+#define IWL_MVM_LOW_RSSI_THRESH_160MHZ -61
+
+#define IWL_MVM_ENTER_ESR_TPT_THRESH 400
#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 553c6fffc7c6..71e6b06481a9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1152,7 +1152,8 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
if (ret)
return ret;
- return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0);
+ return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0,
+ mvm_link->ap_sta_id);
}
static int
@@ -1242,7 +1243,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
.data[0] = &d3_cfg_cmd_data,
.len[0] = sizeof(d3_cfg_cmd_data),
};
- int ret, primary_link;
+ int ret;
int len __maybe_unused;
bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
@@ -1260,26 +1261,9 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (IS_ERR_OR_NULL(vif))
return 1;
- if (ieee80211_vif_is_mld(vif) && vif->cfg.assoc) {
- /*
- * Select the 'best' link. May need to revisit, it seems
- * better to not optimize for throughput but rather range,
- * reliability and power here - and select 2.4 GHz ...
- */
- primary_link =
- iwl_mvm_mld_get_primary_link(mvm, vif,
- vif->active_links);
-
- if (WARN_ONCE(primary_link < 0, "no primary link in 0x%x\n",
- vif->active_links))
- primary_link = __ffs(vif->active_links);
-
- ret = ieee80211_set_active_links(vif, BIT(primary_link));
- if (ret)
- return ret;
- } else {
- primary_link = 0;
- }
+ ret = iwl_mvm_block_esr_sync(mvm, vif, IWL_MVM_ESR_BLOCKED_WOWLAN);
+ if (ret)
+ return ret;
mutex_lock(&mvm->mutex);
@@ -1289,7 +1273,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvmvif = iwl_mvm_vif_from_mac80211(vif);
- mvm_link = mvmvif->link[primary_link];
+ mvm_link = mvmvif->link[iwl_mvm_get_primary_link(vif)];
if (WARN_ON_ONCE(!mvm_link)) {
ret = -EINVAL;
goto out_noreset;
@@ -1470,6 +1454,9 @@ struct iwl_wowlan_status_data {
struct iwl_multicast_key_data igtk;
struct iwl_multicast_key_data bigtk[WOWLAN_BIGTK_KEYS_NUM];
+ int num_mlo_keys;
+ struct iwl_wowlan_mlo_gtk mlo_keys[WOWLAN_MAX_MLO_KEYS];
+
u8 *wake_packet;
};
@@ -1828,6 +1815,10 @@ static void iwl_mvm_d3_find_last_keys(struct ieee80211_hw *hw,
void *_data)
{
struct iwl_mvm_d3_gtk_iter_data *data = _data;
+ int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
+
+ if (link_id >= 0 && key->link_id >= 0 && link_id != key->link_id)
+ return;
if (data->unhandled_cipher)
return;
@@ -1916,6 +1907,10 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
struct iwl_mvm_d3_gtk_iter_data *data = _data;
struct iwl_wowlan_status_data *status = data->status;
s8 keyidx;
+ int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
+
+ if (link_id >= 0 && key->link_id >= 0 && link_id != key->link_id)
+ return;
if (data->unhandled_cipher)
return;
@@ -1971,6 +1966,169 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
}
}
+struct iwl_mvm_d3_mlo_old_keys {
+ u32 cipher[IEEE80211_MLD_MAX_NUM_LINKS][WOWLAN_MLO_GTK_KEY_NUM_TYPES];
+ struct ieee80211_key_conf *key[IEEE80211_MLD_MAX_NUM_LINKS][8];
+};
+
+static void iwl_mvm_mlo_key_ciphers(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data)
+{
+ struct iwl_mvm_d3_mlo_old_keys *old_keys = data;
+ enum iwl_wowlan_mlo_gtk_type key_type;
+
+ if (key->link_id < 0)
+ return;
+
+ if (WARN_ON(key->link_id >= IEEE80211_MLD_MAX_NUM_LINKS ||
+ key->keyidx >= 8))
+ return;
+
+ if (WARN_ON(old_keys->key[key->link_id][key->keyidx]))
+ return;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ key_type = WOWLAN_MLO_GTK_KEY_TYPE_GTK;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ if (key->keyidx == 4 || key->keyidx == 5) {
+ key_type = WOWLAN_MLO_GTK_KEY_TYPE_IGTK;
+ break;
+ } else if (key->keyidx == 6 || key->keyidx == 7) {
+ key_type = WOWLAN_MLO_GTK_KEY_TYPE_BIGTK;
+ break;
+ }
+ return;
+ default:
+ /* ignore WEP/TKIP or unknown ciphers */
+ return;
+ }
+
+ old_keys->cipher[key->link_id][key_type] = key->cipher;
+ old_keys->key[key->link_id][key->keyidx] = key;
+}
+
+static bool iwl_mvm_mlo_gtk_rekey(struct iwl_wowlan_status_data *status,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm *mvm)
+{
+ int i;
+ struct iwl_mvm_d3_mlo_old_keys *old_keys;
+ bool ret = true;
+
+ IWL_DEBUG_WOWLAN(mvm, "Num of MLO Keys: %d\n", status->num_mlo_keys);
+ if (!status->num_mlo_keys)
+ return true;
+
+ old_keys = kzalloc(sizeof(*old_keys), GFP_KERNEL);
+ if (!old_keys)
+ return false;
+
+ /* find the cipher for each mlo key */
+ ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_mlo_key_ciphers, old_keys);
+
+ for (i = 0; i < status->num_mlo_keys; i++) {
+ struct iwl_wowlan_mlo_gtk *mlo_key = &status->mlo_keys[i];
+ struct ieee80211_key_conf *key, *old_key;
+ struct ieee80211_key_seq seq;
+ struct {
+ struct ieee80211_key_conf conf;
+ u8 key[32];
+ } conf = {};
+ u16 flags = le16_to_cpu(mlo_key->flags);
+ int j, link_id, key_id, key_type;
+
+ link_id = u16_get_bits(flags, WOWLAN_MLO_GTK_FLAG_LINK_ID_MSK);
+ key_id = u16_get_bits(flags, WOWLAN_MLO_GTK_FLAG_KEY_ID_MSK);
+ key_type = u16_get_bits(flags,
+ WOWLAN_MLO_GTK_FLAG_KEY_TYPE_MSK);
+
+ if (!(vif->valid_links & BIT(link_id)))
+ continue;
+
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS ||
+ key_id >= 8 ||
+ key_type >= WOWLAN_MLO_GTK_KEY_NUM_TYPES))
+ continue;
+
+ conf.conf.cipher = old_keys->cipher[link_id][key_type];
+ /* WARN_ON? */
+ if (!conf.conf.cipher)
+ continue;
+
+ conf.conf.keylen = 0;
+ switch (conf.conf.cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ conf.conf.keylen = WLAN_KEY_LEN_CCMP;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ conf.conf.keylen = WLAN_KEY_LEN_GCMP_256;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_128;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_256;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ conf.conf.keylen = WLAN_KEY_LEN_AES_CMAC;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ conf.conf.keylen = WLAN_KEY_LEN_BIP_CMAC_256;
+ break;
+ }
+
+ if (WARN_ON(!conf.conf.keylen ||
+ conf.conf.keylen > sizeof(conf.key)))
+ continue;
+
+ memcpy(conf.conf.key, mlo_key->key, conf.conf.keylen);
+ conf.conf.keyidx = key_id;
+
+ old_key = old_keys->key[link_id][key_id];
+ if (old_key) {
+ IWL_DEBUG_WOWLAN(mvm,
+ "Remove MLO key id %d, link id %d\n",
+ key_id, link_id);
+ ieee80211_remove_key(old_key);
+ }
+
+ IWL_DEBUG_WOWLAN(mvm, "Add MLO key id %d, link id %d\n",
+ key_id, link_id);
+ key = ieee80211_gtk_rekey_add(vif, &conf.conf, link_id);
+ if (WARN_ON(IS_ERR(key))) {
+ ret = false;
+ goto out;
+ }
+
+ /*
+ * mac80211 expects the pn in big-endian
+ * also note that seq is a union of all cipher types
+ * (ccmp, gcmp, cmac, gmac), and they all have the same
+ * pn field (of length 6) so just copy it to ccmp.pn.
+ */
+ for (j = 5; j >= 0; j--)
+ seq.ccmp.pn[5 - j] = mlo_key->pn[j];
+
+ /* group keys are non-QoS and use TID 0 */
+ ieee80211_set_key_rx_seq(key, 0, &seq);
+ }
+
+out:
+ kfree(old_keys);
+ return ret;
+}
+
static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
struct ieee80211_vif *vif,
struct iwl_mvm *mvm, u32 gtk_cipher)
@@ -2174,6 +2332,9 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
return false;
}
+ if (!iwl_mvm_mlo_gtk_rekey(status, vif, mvm))
+ return false;
+
ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
(void *)&replay_ctr, GFP_KERNEL);
}
@@ -2301,9 +2462,10 @@ static void iwl_mvm_convert_bigtk(struct iwl_wowlan_status_data *status,
static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm,
struct iwl_wowlan_info_notif *data,
struct iwl_wowlan_status_data *status,
- u32 len)
+ u32 len, bool has_mlo_keys)
{
u32 i;
+ u32 expected_len = sizeof(*data);
if (!data) {
IWL_ERR(mvm, "iwl_wowlan_info_notif data is NULL\n");
@@ -2311,7 +2473,11 @@ static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm,
return;
}
- if (len < sizeof(*data)) {
+ if (has_mlo_keys)
+ expected_len += (data->num_mlo_link_keys *
+ sizeof(status->mlo_keys[0]));
+
+ if (len < expected_len) {
IWL_ERR(mvm, "Invalid WoWLAN info notification!\n");
status = NULL;
return;
@@ -2331,6 +2497,17 @@ static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm,
le32_to_cpu(data->num_of_gtk_rekeys);
status->received_beacons = le32_to_cpu(data->received_beacons);
status->tid_tear_down = data->tid_tear_down;
+
+ if (has_mlo_keys && data->num_mlo_link_keys) {
+ status->num_mlo_keys = data->num_mlo_link_keys;
+ if (IWL_FW_CHECK(mvm,
+ status->num_mlo_keys > WOWLAN_MAX_MLO_KEYS,
+ "Too many mlo keys: %d, max %d\n",
+ status->num_mlo_keys, WOWLAN_MAX_MLO_KEYS))
+ status->num_mlo_keys = WOWLAN_MAX_MLO_KEYS;
+ memcpy(status->mlo_keys, data->mlo_gtks,
+ status->num_mlo_keys * sizeof(status->mlo_keys[0]));
+ }
}
static void
@@ -2551,6 +2728,12 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
int i;
bool keep = false;
struct iwl_mvm_sta *mvm_ap_sta;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
+ struct iwl_mvm_vif_link_info *mvm_link = mvmvif->link[link_id];
+
+ if (WARN_ON(!mvm_link))
+ goto out_unlock;
if (!status)
goto out_unlock;
@@ -2558,8 +2741,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
IWL_DEBUG_WOWLAN(mvm, "wakeup reason 0x%x\n",
status->wakeup_reasons);
- /* still at hard-coded place 0 for D3 image */
- mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0);
+ mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, mvm_link->ap_sta_id);
if (!mvm_ap_sta)
goto out_unlock;
@@ -3072,7 +3254,8 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
(void *)pkt->data;
iwl_mvm_parse_wowlan_info_notif(mvm, notif,
- d3_data->status, len);
+ d3_data->status, len,
+ wowlan_info_ver > 3);
}
d3_data->notif_received |= IWL_D3_NOTIF_WOWLAN_INFO;
@@ -3270,6 +3453,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
goto err;
}
+ iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_WOWLAN);
+
/* after the successful handshake, we're out of D3 */
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 51b01f7528be..17c97dfbc62a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -407,7 +407,7 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
};
iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
- if (mvmvif->bf_data.bf_enabled)
+ if (mvmvif->bf_enabled)
cmd.bf_enable_beacon_filter = cpu_to_le32(1);
else
cmd.bf_enable_beacon_filter = 0;
@@ -692,6 +692,96 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
+static ssize_t iwl_dbgfs_int_mlo_scan_write(struct ieee80211_vif *vif,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u32 action;
+ int ret;
+
+ if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
+ return -EINVAL;
+
+ if (kstrtou32(buf, 0, &action))
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+
+ if (!action) {
+ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_INT_MLO, false);
+ } else if (action == 1) {
+ ret = iwl_mvm_int_mlo_scan(mvm, vif);
+ } else {
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_esr_disable_reason_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ unsigned long esr_mask;
+ char *buf;
+ int bufsz, pos, i;
+ ssize_t rv;
+
+ mutex_lock(&mvm->mutex);
+ esr_mask = mvmvif->esr_disable_reason;
+ mutex_unlock(&mvm->mutex);
+
+ bufsz = hweight32(esr_mask) * 32 + 40;
+ buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos = scnprintf(buf, bufsz, "EMLSR state: '0x%lx'\nreasons:\n",
+ esr_mask);
+ for_each_set_bit(i, &esr_mask, BITS_PER_LONG)
+ pos += scnprintf(buf + pos, bufsz - pos, " - %s\n",
+ iwl_get_esr_state_string(BIT(i)));
+
+ rv = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return rv;
+}
+
+static ssize_t iwl_dbgfs_esr_disable_reason_write(struct ieee80211_vif *vif,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u32 reason;
+ u8 block;
+ int ret;
+
+ ret = sscanf(buf, "%u %hhu", &reason, &block);
+ if (ret < 0)
+ return ret;
+
+ if (hweight16(reason) != 1 || !(reason & IWL_MVM_BLOCK_ESR_REASONS))
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+ if (block)
+ iwl_mvm_block_esr(mvm, vif, reason,
+ iwl_mvm_get_primary_link(vif));
+ else
+ iwl_mvm_unblock_esr(mvm, vif, reason);
+ mutex_unlock(&mvm->mutex);
+
+ return count;
+}
+
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -711,6 +801,8 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff);
+MVM_DEBUGFS_WRITE_FILE_OPS(int_mlo_scan, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(esr_disable_reason, 32);
void iwl_mvm_vif_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
@@ -738,6 +830,10 @@ void iwl_mvm_vif_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE_VIF(os_device_timediff, mvmvif->dbgfs_dir, 0400);
+ debugfs_create_bool("ftm_unprotected", 0200, mvmvif->dbgfs_dir,
+ &mvmvif->ftm_unprotected);
+ MVM_DEBUGFS_ADD_FILE_VIF(int_mlo_scan, mvmvif->dbgfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE_VIF(esr_disable_reason, mvmvif->dbgfs_dir, 0600);
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
mvmvif == mvm->bf_allowed_vif)
@@ -748,7 +844,9 @@ void iwl_mvm_vif_dbgfs_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct dentry *dbgfs_dir = vif->debugfs_dir;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- char buf[100];
+ char buf[3 * 3 + 11 + (NL80211_WIPHY_NAME_MAXLEN + 1) +
+ (7 + IFNAMSIZ + 1) + 6 + 1];
+ char name[7 + IFNAMSIZ + 1];
/* this will happen in monitor mode */
if (!dbgfs_dir)
@@ -761,10 +859,11 @@ void iwl_mvm_vif_dbgfs_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* find
* netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/
*/
- snprintf(buf, 100, "../../../%pd3/iwlmvm", dbgfs_dir);
+ snprintf(name, sizeof(name), "%pd", dbgfs_dir);
+ snprintf(buf, sizeof(buf), "../../../%pd3/iwlmvm", dbgfs_dir);
- mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name,
- mvm->debugfs_dir, buf);
+ mvmvif->dbgfs_slink =
+ debugfs_create_symlink(name, mvm->debugfs_dir, buf);
}
void iwl_mvm_vif_dbgfs_rm_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 4863a3c74640..72a3d71f46f0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/etherdevice.h>
#include <linux/math64.h>
@@ -53,6 +53,8 @@ int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (!pasn)
return -ENOBUFS;
+ iwl_mvm_ftm_remove_pasn_sta(mvm, addr);
+
pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher);
switch (pasn->cipher) {
@@ -551,6 +553,15 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
break;
}
rcu_read_unlock();
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvmvif->ftm_unprotected) {
+ target->sta_id = IWL_MVM_INVALID_STA;
+ target->initiator_ap_flags &=
+ ~cpu_to_le32(IWL_INITIATOR_AP_FLAGS_PMF);
+ }
+
+#endif
} else {
target->sta_id = IWL_MVM_INVALID_STA;
}
@@ -713,6 +724,12 @@ iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_mvm_ftm_pasn_entry *entry;
u32 flags = le32_to_cpu(target->initiator_ap_flags);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (mvmvif->ftm_unprotected)
+ return;
+#endif
if (!(flags & (IWL_INITIATOR_AP_FLAGS_NON_TB |
IWL_INITIATOR_AP_FLAGS_TB)))
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index e1c2b7fc92ab..e7f5978ef2d7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -494,7 +494,7 @@ static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
int ret;
struct iwl_host_cmd cmd = {
.id = WIDE_ID(REGULATORY_AND_NVM_GROUP,
- UATS_TABLE_CMD),
+ MCC_ALLOWED_AP_TYPE_CMD),
.flags = 0,
.data[0] = &mvm->fwrt.uats_table,
.len[0] = sizeof(mvm->fwrt.uats_table),
@@ -516,7 +516,7 @@ static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver != 1) {
IWL_DEBUG_RADIO(mvm,
- "UATS_TABLE_CMD ver %d not supported\n",
+ "MCC_ALLOWED_AP_TYPE_CMD ver %d not supported\n",
cmd_ver);
return;
}
@@ -529,9 +529,10 @@ static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret < 0)
- IWL_ERR(mvm, "failed to send UATS_TABLE_CMD (%d)\n", ret);
+ IWL_ERR(mvm, "failed to send MCC_ALLOWED_AP_TYPE_CMD (%d)\n",
+ ret);
else
- IWL_DEBUG_RADIO(mvm, "UATS_TABLE_CMD sent to FW\n");
+ IWL_DEBUG_RADIO(mvm, "MCC_ALLOWED_AP_TYPE_CMD sent to FW\n");
}
static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
@@ -666,7 +667,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
NULL);
- if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ)
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
mvm->trans->step_urm = !!(iwl_read_umac_prph(mvm->trans,
CNVI_PMU_STEP_FLOW) &
CNVI_PMU_STEP_FLOW_FORCE_URM);
@@ -896,11 +897,13 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
u32 n_subbands;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
- if (cmd_ver == 7) {
+ if (cmd_ver >= 7) {
len = sizeof(cmd.v7);
n_subbands = IWL_NUM_SUB_BANDS_V2;
per_chain = cmd.v7.per_chain[0][0];
cmd.v7.flags = cpu_to_le32(mvm->fwrt.reduced_power_flags);
+ if (cmd_ver == 8)
+ len = sizeof(cmd.v8);
} else if (cmd_ver == 6) {
len = sizeof(cmd.v6);
n_subbands = IWL_NUM_SUB_BANDS_V2;
@@ -1223,94 +1226,12 @@ static bool iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
{
+ struct iwl_lari_config_change_cmd cmd;
+ size_t cmd_size;
int ret;
- u32 value;
- struct iwl_lari_config_change_cmd_v7 cmd = {};
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
- WIDE_ID(REGULATORY_AND_NVM_GROUP,
- LARI_CONFIG_CHANGE), 1);
-
- cmd.config_bitmap = iwl_get_lari_config_bitmap(&mvm->fwrt);
-
- ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_11AX_ENABLEMENT, &value);
- if (!ret)
- cmd.oem_11ax_allow_bitmap = cpu_to_le32(value);
- ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value);
- if (!ret)
- cmd.oem_unii4_allow_bitmap = cpu_to_le32(value);
-
- ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value);
+ ret = iwl_fill_lari_config(&mvm->fwrt, &cmd, &cmd_size);
if (!ret) {
- if (cmd_ver < 8)
- value &= ~ACTIVATE_5G2_IN_WW_MASK;
- cmd.chan_state_active_bitmap = cpu_to_le32(value);
- }
-
- ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENABLE_6E, &value);
- if (!ret)
- cmd.oem_uhb_allow_bitmap = cpu_to_le32(value);
-
- ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_FORCE_DISABLE_CHANNELS,
- &value);
- if (!ret)
- cmd.force_disable_channels_bitmap = cpu_to_le32(value);
-
- ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENERGY_DETECTION_THRESHOLD,
- &value);
- if (!ret)
- cmd.edt_bitmap = cpu_to_le32(value);
-
- if (cmd.config_bitmap ||
- cmd.oem_uhb_allow_bitmap ||
- cmd.oem_11ax_allow_bitmap ||
- cmd.oem_unii4_allow_bitmap ||
- cmd.chan_state_active_bitmap ||
- cmd.force_disable_channels_bitmap ||
- cmd.edt_bitmap) {
- size_t cmd_size;
-
- switch (cmd_ver) {
- case 8:
- case 7:
- cmd_size = sizeof(struct iwl_lari_config_change_cmd_v7);
- break;
- case 6:
- cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6);
- break;
- case 5:
- cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5);
- break;
- case 4:
- cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4);
- break;
- case 3:
- cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3);
- break;
- case 2:
- cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2);
- break;
- default:
- cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1);
- break;
- }
-
- IWL_DEBUG_RADIO(mvm,
- "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
- le32_to_cpu(cmd.config_bitmap),
- le32_to_cpu(cmd.oem_11ax_allow_bitmap));
- IWL_DEBUG_RADIO(mvm,
- "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x, cmd_ver=%d\n",
- le32_to_cpu(cmd.oem_unii4_allow_bitmap),
- le32_to_cpu(cmd.chan_state_active_bitmap),
- cmd_ver);
- IWL_DEBUG_RADIO(mvm,
- "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n",
- le32_to_cpu(cmd.oem_uhb_allow_bitmap),
- le32_to_cpu(cmd.force_disable_channels_bitmap));
- IWL_DEBUG_RADIO(mvm,
- "sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x\n",
- le32_to_cpu(cmd.edt_bitmap));
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
index f13f13e6b71a..6ec9a8e21a34 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
@@ -5,6 +5,48 @@
#include "mvm.h"
#include "time-event.h"
+#define HANDLE_ESR_REASONS(HOW) \
+ HOW(BLOCKED_PREVENTION) \
+ HOW(BLOCKED_WOWLAN) \
+ HOW(BLOCKED_TPT) \
+ HOW(BLOCKED_FW) \
+ HOW(BLOCKED_NON_BSS) \
+ HOW(EXIT_MISSED_BEACON) \
+ HOW(EXIT_LOW_RSSI) \
+ HOW(EXIT_COEX) \
+ HOW(EXIT_BANDWIDTH) \
+ HOW(EXIT_CSA) \
+ HOW(EXIT_LINK_USAGE)
+
+static const char *const iwl_mvm_esr_states_names[] = {
+#define NAME_ENTRY(x) [ilog2(IWL_MVM_ESR_##x)] = #x,
+ HANDLE_ESR_REASONS(NAME_ENTRY)
+};
+
+const char *iwl_get_esr_state_string(enum iwl_mvm_esr_state state)
+{
+ int offs = ilog2(state);
+
+ if (offs >= ARRAY_SIZE(iwl_mvm_esr_states_names) ||
+ !iwl_mvm_esr_states_names[offs])
+ return "UNKNOWN";
+
+ return iwl_mvm_esr_states_names[offs];
+}
+
+static void iwl_mvm_print_esr_state(struct iwl_mvm *mvm, u32 mask)
+{
+#define NAME_FMT(x) "%s"
+#define NAME_PR(x) (mask & IWL_MVM_ESR_##x) ? "[" #x "]" : "",
+ IWL_DEBUG_INFO(mvm,
+ "EMLSR state = " HANDLE_ESR_REASONS(NAME_FMT)
+ " (0x%x)\n",
+ HANDLE_ESR_REASONS(NAME_PR)
+ mask);
+#undef NAME_FMT
+#undef NAME_PR
+}
+
static u32 iwl_mvm_get_free_fw_link_id(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvm_vif)
{
@@ -46,6 +88,27 @@ static int iwl_mvm_link_cmd_send(struct iwl_mvm *mvm,
return ret;
}
+int iwl_mvm_set_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *link_info =
+ mvmvif->link[link_conf->link_id];
+
+ if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID) {
+ link_info->fw_link_id = iwl_mvm_get_free_fw_link_id(mvm,
+ mvmvif);
+ if (link_info->fw_link_id >=
+ ARRAY_SIZE(mvm->link_id_to_link_conf))
+ return -EINVAL;
+
+ rcu_assign_pointer(mvm->link_id_to_link_conf[link_info->fw_link_id],
+ link_conf);
+ }
+
+ return 0;
+}
+
int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf)
{
@@ -55,19 +118,14 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_link_config_cmd cmd = {};
unsigned int cmd_id = WIDE_ID(MAC_CONF_GROUP, LINK_CONFIG_CMD);
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1);
+ int ret;
if (WARN_ON_ONCE(!link_info))
return -EINVAL;
- if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID) {
- link_info->fw_link_id = iwl_mvm_get_free_fw_link_id(mvm,
- mvmvif);
- if (link_info->fw_link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf))
- return -EINVAL;
-
- rcu_assign_pointer(mvm->link_id_to_link_conf[link_info->fw_link_id],
- link_conf);
- }
+ ret = iwl_mvm_set_link_mapping(mvm, vif, link_conf);
+ if (ret)
+ return ret;
/* Update SF - Disable if needed. if this fails, SF might still be on
* while many macs are bound, which is forbidden - so fail the binding.
@@ -92,6 +150,65 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_ADD);
}
+struct iwl_mvm_esr_iter_data {
+ struct ieee80211_vif *vif;
+ unsigned int link_id;
+ bool lift_block;
+};
+
+static void iwl_mvm_esr_vif_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_esr_iter_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int link_id;
+
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION)
+ return;
+
+ for_each_mvm_vif_valid_link(mvmvif, link_id) {
+ struct iwl_mvm_vif_link_info *link_info =
+ mvmvif->link[link_id];
+ if (vif == data->vif && link_id == data->link_id)
+ continue;
+ if (link_info->active)
+ data->lift_block = false;
+ }
+}
+
+int iwl_mvm_esr_non_bss_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ unsigned int link_id, bool active)
+{
+ /* An active link of a non-station vif blocks EMLSR. Upon activation
+ * block EMLSR on the bss vif. Upon deactivation, check if this link
+ * was the last non-station link active, and if so unblock the bss vif
+ */
+ struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
+ struct iwl_mvm_esr_iter_data data = {
+ .vif = vif,
+ .link_id = link_id,
+ .lift_block = true,
+ };
+
+ if (IS_ERR_OR_NULL(bss_vif))
+ return 0;
+
+ if (active)
+ return iwl_mvm_block_esr_sync(mvm, bss_vif,
+ IWL_MVM_ESR_BLOCKED_NON_BSS);
+
+ ieee80211_iterate_active_interfaces(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_esr_vif_iterator, &data);
+ if (data.lift_block) {
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_unblock_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_NON_BSS);
+ mutex_unlock(&mvm->mutex);
+ }
+
+ return 0;
+}
+
int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
u32 changes, bool active)
@@ -248,6 +365,25 @@ send_cmd:
return ret;
}
+int iwl_mvm_unset_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *link_info =
+ mvmvif->link[link_conf->link_id];
+
+ /* mac80211 thought we have the link, but it was never configured */
+ if (WARN_ON(!link_info ||
+ link_info->fw_link_id >=
+ ARRAY_SIZE(mvm->link_id_to_link_conf)))
+ return -EINVAL;
+
+ RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id],
+ NULL);
+ iwl_mvm_release_fw_link_id(mvm, link_info->fw_link_id);
+ return 0;
+}
+
int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf)
{
@@ -257,15 +393,11 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_link_config_cmd cmd = {};
int ret;
- /* mac80211 thought we have the link, but it was never configured */
- if (WARN_ON(!link_info ||
- link_info->fw_link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf)))
+ ret = iwl_mvm_unset_link_mapping(mvm, vif, link_conf);
+ if (ret)
return 0;
- RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id],
- NULL);
cmd.link_id = cpu_to_le32(link_info->fw_link_id);
- iwl_mvm_release_fw_link_id(mvm, link_info->fw_link_id);
link_info->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
cmd.spec_link_id = link_conf->link_id;
cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID);
@@ -298,3 +430,702 @@ int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return ret;
}
+
+struct iwl_mvm_rssi_to_grade {
+ s8 rssi[2];
+ u16 grade;
+};
+
+#define RSSI_TO_GRADE_LINE(_lb, _hb_uhb, _grade) \
+ { \
+ .rssi = {_lb, _hb_uhb}, \
+ .grade = _grade \
+ }
+
+/*
+ * This array must be sorted by increasing RSSI for proper functionality.
+ * The grades are actually estimated throughput, represented as fixed-point
+ * with a scale factor of 1/10.
+ */
+static const struct iwl_mvm_rssi_to_grade rssi_to_grade_map[] = {
+ RSSI_TO_GRADE_LINE(-85, -89, 177),
+ RSSI_TO_GRADE_LINE(-83, -86, 344),
+ RSSI_TO_GRADE_LINE(-82, -85, 516),
+ RSSI_TO_GRADE_LINE(-80, -83, 688),
+ RSSI_TO_GRADE_LINE(-77, -79, 1032),
+ RSSI_TO_GRADE_LINE(-73, -76, 1376),
+ RSSI_TO_GRADE_LINE(-70, -74, 1548),
+ RSSI_TO_GRADE_LINE(-69, -72, 1750),
+ RSSI_TO_GRADE_LINE(-65, -68, 2064),
+ RSSI_TO_GRADE_LINE(-61, -66, 2294),
+ RSSI_TO_GRADE_LINE(-58, -61, 2580),
+ RSSI_TO_GRADE_LINE(-55, -58, 2868),
+ RSSI_TO_GRADE_LINE(-46, -55, 3098),
+ RSSI_TO_GRADE_LINE(-43, -54, 3442)
+};
+
+#define MAX_GRADE (rssi_to_grade_map[ARRAY_SIZE(rssi_to_grade_map) - 1].grade)
+
+#define DEFAULT_CHAN_LOAD_LB 30
+#define DEFAULT_CHAN_LOAD_HB 15
+#define DEFAULT_CHAN_LOAD_UHB 0
+
+/* Factors calculation is done with fixed-point with a scaling factor of 1/256 */
+#define SCALE_FACTOR 256
+
+/* Convert a percentage from [0,100] to [0,255] */
+#define NORMALIZE_PERCENT_TO_255(percentage) ((percentage) * SCALE_FACTOR / 100)
+
+static unsigned int
+iwl_mvm_get_puncturing_factor(const struct ieee80211_bss_conf *link_conf)
+{
+ enum nl80211_chan_width chan_width =
+ link_conf->chanreq.oper.width;
+ int mhz = nl80211_chan_width_to_mhz(chan_width);
+ unsigned int n_subchannels, n_punctured, puncturing_penalty;
+
+ if (WARN_ONCE(mhz < 20 || mhz > 320,
+ "Invalid channel width : (%d)\n", mhz))
+ return SCALE_FACTOR;
+
+ /* No puncturing, no penalty */
+ if (mhz < 80)
+ return SCALE_FACTOR;
+
+ /* total number of subchannels */
+ n_subchannels = mhz / 20;
+ /* how many of these are punctured */
+ n_punctured = hweight16(link_conf->chanreq.oper.punctured);
+
+ puncturing_penalty = n_punctured * SCALE_FACTOR / n_subchannels;
+ return SCALE_FACTOR - puncturing_penalty;
+}
+
+static unsigned int
+iwl_mvm_get_chan_load(struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm_vif_link_info *mvm_link =
+ iwl_mvm_vif_from_mac80211(link_conf->vif)->link[link_conf->link_id];
+ const struct element *bss_load_elem;
+ const struct ieee80211_bss_load_elem *bss_load;
+ enum nl80211_band band = link_conf->chanreq.oper.chan->band;
+ unsigned int chan_load;
+ u32 chan_load_by_us;
+
+ rcu_read_lock();
+ bss_load_elem = ieee80211_bss_get_elem(link_conf->bss,
+ WLAN_EID_QBSS_LOAD);
+
+ /* If there isn't BSS Load element, take the defaults */
+ if (!bss_load_elem ||
+ bss_load_elem->datalen != sizeof(*bss_load)) {
+ rcu_read_unlock();
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ chan_load = DEFAULT_CHAN_LOAD_LB;
+ break;
+ case NL80211_BAND_5GHZ:
+ chan_load = DEFAULT_CHAN_LOAD_HB;
+ break;
+ case NL80211_BAND_6GHZ:
+ chan_load = DEFAULT_CHAN_LOAD_UHB;
+ break;
+ default:
+ chan_load = 0;
+ break;
+ }
+ /* The defaults are given in percentage */
+ return NORMALIZE_PERCENT_TO_255(chan_load);
+ }
+
+ bss_load = (const void *)bss_load_elem->data;
+ /* Channel util is in range 0-255 */
+ chan_load = bss_load->channel_util;
+ rcu_read_unlock();
+
+ if (!mvm_link || !mvm_link->active)
+ return chan_load;
+
+ if (WARN_ONCE(!mvm_link->phy_ctxt,
+ "Active link (%u) without phy ctxt assigned!\n",
+ link_conf->link_id))
+ return chan_load;
+
+ /* channel load by us is given in percentage */
+ chan_load_by_us =
+ NORMALIZE_PERCENT_TO_255(mvm_link->phy_ctxt->channel_load_by_us);
+
+ /* Use only values that firmware sends that can possibly be valid */
+ if (chan_load_by_us <= chan_load)
+ chan_load -= chan_load_by_us;
+
+ return chan_load;
+}
+
+static unsigned int
+iwl_mvm_get_chan_load_factor(struct ieee80211_bss_conf *link_conf)
+{
+ return SCALE_FACTOR - iwl_mvm_get_chan_load(link_conf);
+}
+
+/* This function calculates the grade of a link. Returns 0 in error case */
+VISIBLE_IF_IWLWIFI_KUNIT
+unsigned int iwl_mvm_get_link_grade(struct ieee80211_bss_conf *link_conf)
+{
+ enum nl80211_band band;
+ int i, rssi_idx;
+ s32 link_rssi;
+ unsigned int grade = MAX_GRADE;
+
+ if (WARN_ON_ONCE(!link_conf))
+ return 0;
+
+ band = link_conf->chanreq.oper.chan->band;
+ if (WARN_ONCE(band != NL80211_BAND_2GHZ &&
+ band != NL80211_BAND_5GHZ &&
+ band != NL80211_BAND_6GHZ,
+ "Invalid band (%u)\n", band))
+ return 0;
+
+ link_rssi = MBM_TO_DBM(link_conf->bss->signal);
+ /*
+ * For 6 GHz the RSSI of the beacons is lower than
+ * the RSSI of the data.
+ */
+ if (band == NL80211_BAND_6GHZ)
+ link_rssi += 4;
+
+ rssi_idx = band == NL80211_BAND_2GHZ ? 0 : 1;
+
+ /* No valid RSSI - take the lowest grade */
+ if (!link_rssi)
+ link_rssi = rssi_to_grade_map[0].rssi[rssi_idx];
+
+ /* Get grade based on RSSI */
+ for (i = 0; i < ARRAY_SIZE(rssi_to_grade_map); i++) {
+ const struct iwl_mvm_rssi_to_grade *line =
+ &rssi_to_grade_map[i];
+
+ if (link_rssi > line->rssi[rssi_idx])
+ continue;
+ grade = line->grade;
+ break;
+ }
+
+ /* apply the channel load and puncturing factors */
+ grade = grade * iwl_mvm_get_chan_load_factor(link_conf) / SCALE_FACTOR;
+ grade = grade * iwl_mvm_get_puncturing_factor(link_conf) / SCALE_FACTOR;
+ return grade;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_get_link_grade);
+
+static
+u8 iwl_mvm_set_link_selection_data(struct ieee80211_vif *vif,
+ struct iwl_mvm_link_sel_data *data,
+ unsigned long usable_links,
+ u8 *best_link_idx)
+{
+ u8 n_data = 0;
+ u16 max_grade = 0;
+ unsigned long link_id;
+
+ /* TODO: don't select links that weren't discovered in the last scan */
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
+
+ data[n_data].link_id = link_id;
+ data[n_data].chandef = &link_conf->chanreq.oper;
+ data[n_data].signal = link_conf->bss->signal / 100;
+ data[n_data].grade = iwl_mvm_get_link_grade(link_conf);
+
+ if (data[n_data].grade > max_grade) {
+ max_grade = data[n_data].grade;
+ *best_link_idx = n_data;
+ }
+ n_data++;
+ }
+
+ return n_data;
+}
+
+struct iwl_mvm_bw_to_rssi_threshs {
+ s8 low;
+ s8 high;
+};
+
+#define BW_TO_RSSI_THRESHOLDS(_bw) \
+ [IWL_PHY_CHANNEL_MODE ## _bw] = { \
+ .low = IWL_MVM_LOW_RSSI_THRESH_##_bw##MHZ, \
+ .high = IWL_MVM_HIGH_RSSI_THRESH_##_bw##MHZ \
+ }
+
+s8 iwl_mvm_get_esr_rssi_thresh(struct iwl_mvm *mvm,
+ const struct cfg80211_chan_def *chandef,
+ bool low)
+{
+ const struct iwl_mvm_bw_to_rssi_threshs bw_to_rssi_threshs_map[] = {
+ BW_TO_RSSI_THRESHOLDS(20),
+ BW_TO_RSSI_THRESHOLDS(40),
+ BW_TO_RSSI_THRESHOLDS(80),
+ BW_TO_RSSI_THRESHOLDS(160)
+ /* 320 MHz has the same thresholds as 20 MHz */
+ };
+ const struct iwl_mvm_bw_to_rssi_threshs *threshs;
+ u8 chan_width = iwl_mvm_get_channel_width(chandef);
+
+ if (WARN_ON(chandef->chan->band != NL80211_BAND_2GHZ &&
+ chandef->chan->band != NL80211_BAND_5GHZ &&
+ chandef->chan->band != NL80211_BAND_6GHZ))
+ return S8_MAX;
+
+ /* 6 GHz will always use 20 MHz thresholds, regardless of the BW */
+ if (chan_width == IWL_PHY_CHANNEL_MODE320)
+ chan_width = IWL_PHY_CHANNEL_MODE20;
+
+ threshs = &bw_to_rssi_threshs_map[chan_width];
+
+ return low ? threshs->low : threshs->high;
+}
+
+static u32
+iwl_mvm_esr_disallowed_with_link(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const struct iwl_mvm_link_sel_data *link,
+ bool primary)
+{
+ struct wiphy *wiphy = mvm->hw->wiphy;
+ struct ieee80211_bss_conf *conf;
+ enum iwl_mvm_esr_state ret = 0;
+ s8 thresh;
+
+ conf = wiphy_dereference(wiphy, vif->link_conf[link->link_id]);
+ if (WARN_ON_ONCE(!conf))
+ return false;
+
+ /* BT Coex effects eSR mode only if one of the links is on LB */
+ if (link->chandef->chan->band == NL80211_BAND_2GHZ &&
+ (!iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link->signal,
+ primary)))
+ ret |= IWL_MVM_ESR_EXIT_COEX;
+
+ thresh = iwl_mvm_get_esr_rssi_thresh(mvm, link->chandef,
+ false);
+
+ if (link->signal < thresh)
+ ret |= IWL_MVM_ESR_EXIT_LOW_RSSI;
+
+ if (conf->csa_active)
+ ret |= IWL_MVM_ESR_EXIT_CSA;
+
+ if (ret) {
+ IWL_DEBUG_INFO(mvm,
+ "Link %d is not allowed for esr\n",
+ link->link_id);
+ iwl_mvm_print_esr_state(mvm, ret);
+ }
+ return ret;
+}
+
+VISIBLE_IF_IWLWIFI_KUNIT
+bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif,
+ const struct iwl_mvm_link_sel_data *a,
+ const struct iwl_mvm_link_sel_data *b)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ enum iwl_mvm_esr_state ret = 0;
+
+ /* Per-link considerations */
+ if (iwl_mvm_esr_disallowed_with_link(mvm, vif, a, true) ||
+ iwl_mvm_esr_disallowed_with_link(mvm, vif, b, false))
+ return false;
+
+ if (a->chandef->width != b->chandef->width ||
+ !(a->chandef->chan->band == NL80211_BAND_6GHZ &&
+ b->chandef->chan->band == NL80211_BAND_5GHZ))
+ ret |= IWL_MVM_ESR_EXIT_BANDWIDTH;
+
+ if (ret) {
+ IWL_DEBUG_INFO(mvm,
+ "Links %d and %d are not a valid pair for EMLSR\n",
+ a->link_id, b->link_id);
+ iwl_mvm_print_esr_state(mvm, ret);
+ return false;
+ }
+
+ return true;
+
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_mld_valid_link_pair);
+
+/*
+ * Returns the combined eSR grade of two given links.
+ * Returns 0 if eSR is not allowed with these 2 links.
+ */
+static
+unsigned int iwl_mvm_get_esr_grade(struct ieee80211_vif *vif,
+ const struct iwl_mvm_link_sel_data *a,
+ const struct iwl_mvm_link_sel_data *b,
+ u8 *primary_id)
+{
+ struct ieee80211_bss_conf *primary_conf;
+ struct wiphy *wiphy = ieee80211_vif_to_wdev(vif)->wiphy;
+ unsigned int primary_load;
+
+ lockdep_assert_wiphy(wiphy);
+
+ /* a is always primary, b is always secondary */
+ if (b->grade > a->grade)
+ swap(a, b);
+
+ *primary_id = a->link_id;
+
+ if (!iwl_mvm_mld_valid_link_pair(vif, a, b))
+ return 0;
+
+ primary_conf = wiphy_dereference(wiphy, vif->link_conf[*primary_id]);
+
+ if (WARN_ON_ONCE(!primary_conf))
+ return 0;
+
+ primary_load = iwl_mvm_get_chan_load(primary_conf);
+
+ return a->grade +
+ ((b->grade * primary_load) / SCALE_FACTOR);
+}
+
+void iwl_mvm_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
+ struct iwl_mvm_link_sel_data *best_link;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 max_active_links = iwl_mvm_max_active_links(mvm, vif);
+ u16 usable_links = ieee80211_vif_usable_links(vif);
+ u8 best, primary_link, best_in_pair, n_data;
+ u16 max_esr_grade = 0, new_active_links;
+
+ lockdep_assert_wiphy(mvm->hw->wiphy);
+
+ if (!mvmvif->authorized || !ieee80211_vif_is_mld(vif))
+ return;
+
+ if (!IWL_MVM_AUTO_EML_ENABLE)
+ return;
+
+ /* The logic below is a simple version that doesn't suit more than 2
+ * links
+ */
+ WARN_ON_ONCE(max_active_links > 2);
+
+ n_data = iwl_mvm_set_link_selection_data(vif, data, usable_links,
+ &best);
+
+ if (WARN(!n_data, "Couldn't find a valid grade for any link!\n"))
+ return;
+
+ best_link = &data[best];
+ primary_link = best_link->link_id;
+ new_active_links = BIT(best_link->link_id);
+
+ /* eSR is not supported/blocked, or only one usable link */
+ if (max_active_links == 1 || !iwl_mvm_vif_has_esr_cap(mvm, vif) ||
+ mvmvif->esr_disable_reason || n_data == 1)
+ goto set_active;
+
+ for (u8 a = 0; a < n_data; a++)
+ for (u8 b = a + 1; b < n_data; b++) {
+ u16 esr_grade = iwl_mvm_get_esr_grade(vif, &data[a],
+ &data[b],
+ &best_in_pair);
+
+ if (esr_grade <= max_esr_grade)
+ continue;
+
+ max_esr_grade = esr_grade;
+ primary_link = best_in_pair;
+ new_active_links = BIT(data[a].link_id) |
+ BIT(data[b].link_id);
+ }
+
+ /* No valid pair was found, go with the best link */
+ if (hweight16(new_active_links) <= 1)
+ goto set_active;
+
+ /* For equal grade - prefer EMLSR */
+ if (best_link->grade > max_esr_grade) {
+ primary_link = best_link->link_id;
+ new_active_links = BIT(best_link->link_id);
+ }
+set_active:
+ IWL_DEBUG_INFO(mvm, "Link selection result: 0x%x. Primary = %d\n",
+ new_active_links, primary_link);
+ ieee80211_set_active_links_async(vif, new_active_links);
+ mvmvif->link_selection_res = new_active_links;
+ mvmvif->link_selection_primary = primary_link;
+}
+
+u8 iwl_mvm_get_primary_link(struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ /* relevant data is written with both locks held, so read with either */
+ lockdep_assert(lockdep_is_held(&mvmvif->mvm->mutex) ||
+ lockdep_is_held(&mvmvif->mvm->hw->wiphy->mtx));
+
+ if (!ieee80211_vif_is_mld(vif))
+ return 0;
+
+ /* In AP mode, there is no primary link */
+ if (vif->type == NL80211_IFTYPE_AP)
+ return __ffs(vif->active_links);
+
+ if (mvmvif->esr_active &&
+ !WARN_ON(!(BIT(mvmvif->primary_link) & vif->active_links)))
+ return mvmvif->primary_link;
+
+ return __ffs(vif->active_links);
+}
+
+/*
+ * For non-MLO/single link, this will return the deflink/single active link,
+ * respectively
+ */
+u8 iwl_mvm_get_other_link(struct ieee80211_vif *vif, u8 link_id)
+{
+ switch (hweight16(vif->active_links)) {
+ case 0:
+ return 0;
+ default:
+ WARN_ON(1);
+ fallthrough;
+ case 1:
+ return __ffs(vif->active_links);
+ case 2:
+ return __ffs(vif->active_links & ~BIT(link_id));
+ }
+}
+
+/* Reasons that can cause esr prevention */
+#define IWL_MVM_ESR_PREVENT_REASONS IWL_MVM_ESR_EXIT_MISSED_BEACON
+#define IWL_MVM_PREVENT_ESR_TIMEOUT (HZ * 400)
+#define IWL_MVM_ESR_PREVENT_SHORT (HZ * 300)
+#define IWL_MVM_ESR_PREVENT_LONG (HZ * 600)
+
+static bool iwl_mvm_check_esr_prevention(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ enum iwl_mvm_esr_state reason)
+{
+ bool timeout_expired = time_after(jiffies,
+ mvmvif->last_esr_exit.ts +
+ IWL_MVM_PREVENT_ESR_TIMEOUT);
+ unsigned long delay;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Only handle reasons that can cause prevention */
+ if (!(reason & IWL_MVM_ESR_PREVENT_REASONS))
+ return false;
+
+ /*
+ * Reset the counter if more than 400 seconds have passed between one
+ * exit and the other, or if we exited due to a different reason.
+ * Will also reset the counter after the long prevention is done.
+ */
+ if (timeout_expired || mvmvif->last_esr_exit.reason != reason) {
+ mvmvif->exit_same_reason_count = 1;
+ return false;
+ }
+
+ mvmvif->exit_same_reason_count++;
+ if (WARN_ON(mvmvif->exit_same_reason_count < 2 ||
+ mvmvif->exit_same_reason_count > 3))
+ return false;
+
+ mvmvif->esr_disable_reason |= IWL_MVM_ESR_BLOCKED_PREVENTION;
+
+ /*
+ * For the second exit, use a short prevention, and for the third one,
+ * use a long prevention.
+ */
+ delay = mvmvif->exit_same_reason_count == 2 ?
+ IWL_MVM_ESR_PREVENT_SHORT :
+ IWL_MVM_ESR_PREVENT_LONG;
+
+ IWL_DEBUG_INFO(mvm,
+ "Preventing EMLSR for %ld seconds due to %u exits with the reason = %s (0x%x)\n",
+ delay / HZ, mvmvif->exit_same_reason_count,
+ iwl_get_esr_state_string(reason), reason);
+
+ wiphy_delayed_work_queue(mvm->hw->wiphy,
+ &mvmvif->prevent_esr_done_wk, delay);
+ return true;
+}
+
+#define IWL_MVM_TRIGGER_LINK_SEL_TIME (IWL_MVM_TRIGGER_LINK_SEL_TIME_SEC * HZ)
+
+/* API to exit eSR mode */
+void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason,
+ u8 link_to_keep)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u16 new_active_links;
+ bool prevented;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Nothing to do */
+ if (!mvmvif->esr_active)
+ return;
+
+ if (WARN_ON(!ieee80211_vif_is_mld(vif) || !mvmvif->authorized))
+ return;
+
+ if (WARN_ON(!(vif->active_links & BIT(link_to_keep))))
+ link_to_keep = __ffs(vif->active_links);
+
+ new_active_links = BIT(link_to_keep);
+ IWL_DEBUG_INFO(mvm,
+ "Exiting EMLSR. reason = %s (0x%x). Current active links=0x%x, new active links = 0x%x\n",
+ iwl_get_esr_state_string(reason), reason,
+ vif->active_links, new_active_links);
+
+ ieee80211_set_active_links_async(vif, new_active_links);
+
+ /* Prevent EMLSR if needed */
+ prevented = iwl_mvm_check_esr_prevention(mvm, mvmvif, reason);
+
+ /* Remember why and when we exited EMLSR */
+ mvmvif->last_esr_exit.ts = jiffies;
+ mvmvif->last_esr_exit.reason = reason;
+
+ /*
+ * If EMLSR is prevented now - don't try to get back to EMLSR.
+ * If we exited due to a blocking event, we will try to get back to
+ * EMLSR when the corresponding unblocking event will happen.
+ */
+ if (prevented || reason & IWL_MVM_BLOCK_ESR_REASONS)
+ return;
+
+ /* If EMLSR is not blocked - try enabling it again in 30 seconds */
+ wiphy_delayed_work_queue(mvm->hw->wiphy,
+ &mvmvif->mlo_int_scan_wk,
+ round_jiffies_relative(IWL_MVM_TRIGGER_LINK_SEL_TIME));
+}
+
+void iwl_mvm_block_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason,
+ u8 link_to_keep)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* This should be called only with disable reasons */
+ if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS)))
+ return;
+
+ if (!(mvmvif->esr_disable_reason & reason)) {
+ IWL_DEBUG_INFO(mvm,
+ "Blocking EMLSR mode. reason = %s (0x%x)\n",
+ iwl_get_esr_state_string(reason), reason);
+ iwl_mvm_print_esr_state(mvm, mvmvif->esr_disable_reason);
+ }
+
+ mvmvif->esr_disable_reason |= reason;
+
+ iwl_mvm_exit_esr(mvm, vif, reason, link_to_keep);
+}
+
+int iwl_mvm_block_esr_sync(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason)
+{
+ int primary_link = iwl_mvm_get_primary_link(vif);
+ int ret;
+
+ if (!IWL_MVM_AUTO_EML_ENABLE || !ieee80211_vif_is_mld(vif))
+ return 0;
+
+ /* This should be called only with blocking reasons */
+ if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS)))
+ return 0;
+
+ /* leave ESR immediately, not only async with iwl_mvm_block_esr() */
+ ret = ieee80211_set_active_links(vif, BIT(primary_link));
+ if (ret)
+ return ret;
+
+ mutex_lock(&mvm->mutex);
+ /* only additionally block for consistency and to avoid concurrency */
+ iwl_mvm_block_esr(mvm, vif, reason, primary_link);
+ mutex_unlock(&mvm->mutex);
+
+ return 0;
+}
+
+static void iwl_mvm_esr_unblocked(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool need_new_sel = time_after(jiffies, mvmvif->last_esr_exit.ts +
+ IWL_MVM_TRIGGER_LINK_SEL_TIME);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!ieee80211_vif_is_mld(vif) || !mvmvif->authorized ||
+ mvmvif->esr_active)
+ return;
+
+ IWL_DEBUG_INFO(mvm, "EMLSR is unblocked\n");
+
+ /*
+ * If EMLSR was blocked for more than 30 seconds, or the last link
+ * selection decided to not enter EMLSR, trigger a new scan.
+ */
+ if (need_new_sel || hweight16(mvmvif->link_selection_res) < 2) {
+ IWL_DEBUG_INFO(mvm, "Trigger MLO scan\n");
+ wiphy_delayed_work_queue(mvm->hw->wiphy,
+ &mvmvif->mlo_int_scan_wk, 0);
+ /*
+ * If EMLSR was blocked for less than 30 seconds, and the last link
+ * selection decided to use EMLSR, activate EMLSR using the previous
+ * link selection result.
+ */
+ } else {
+ IWL_DEBUG_INFO(mvm,
+ "Use the latest link selection result: 0x%x\n",
+ mvmvif->link_selection_res);
+ ieee80211_set_active_links_async(vif,
+ mvmvif->link_selection_res);
+ }
+}
+
+void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* This should be called only with disable reasons */
+ if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS)))
+ return;
+
+ /* No Change */
+ if (!(mvmvif->esr_disable_reason & reason))
+ return;
+
+ mvmvif->esr_disable_reason &= ~reason;
+
+ IWL_DEBUG_INFO(mvm,
+ "Unblocking EMLSR mode. reason = %s (0x%x)\n",
+ iwl_get_esr_state_string(reason), reason);
+ iwl_mvm_print_esr_state(mvm, mvmvif->esr_disable_reason);
+
+ if (!mvmvif->esr_disable_reason)
+ iwl_mvm_esr_unblocked(mvm, vif);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 228ede7b8957..5a06f887769a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1163,6 +1163,13 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm,
WLAN_EID_EXT_CHANSWITCH_ANN,
beacon->len));
+ if (vif->type == NL80211_IFTYPE_AP &&
+ iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD, 0) >= 14)
+ beacon_cmd.btwt_offset =
+ cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
+ WLAN_EID_S1G_TWT,
+ beacon->len));
+
return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
sizeof(beacon_cmd));
}
@@ -1591,23 +1598,23 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
u32 id = le32_to_cpu(mb->link_id);
union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
u32 mac_type;
+ int link_id = -1;
u8 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
MISSED_BEACONS_NOTIFICATION,
0);
- rcu_read_lock();
-
/* before version four the ID in the notification refers to mac ID */
if (notif_ver < 4) {
- vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, false);
} else {
struct ieee80211_bss_conf *bss_conf =
- iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, id, true);
+ iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, id, false);
if (!bss_conf)
- goto out;
+ return;
vif = bss_conf->vif;
+ link_id = bss_conf->link_id;
}
IWL_DEBUG_INFO(mvm,
@@ -1620,7 +1627,7 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
le32_to_cpu(mb->num_expected_beacons));
if (!vif)
- goto out;
+ return;
mac_type = iwl_mvm_get_mac_type(vif);
@@ -1647,6 +1654,10 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
"missed_beacons:%d, missed_beacons_since_rx:%d\n",
rx_missed_bcon, rx_missed_bcon_since_rx);
}
+ } else if (rx_missed_bcon >= IWL_MVM_MISSED_BEACONS_EXIT_ESR_THRESH &&
+ link_id >= 0 && hweight16(vif->active_links) > 1) {
+ iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_MISSED_BEACON,
+ iwl_mvm_get_other_link(vif, link_id));
} else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD) {
if (!iwl_mvm_has_new_tx_api(mvm))
ieee80211_beacon_loss(vif);
@@ -1660,7 +1671,7 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_MISSED_BEACONS);
if (!trigger)
- goto out;
+ return;
bcon_trig = (void *)trigger->data;
stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon);
@@ -1672,9 +1683,6 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx ||
rx_missed_bcon >= stop_trig_missed_bcon)
iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL);
-
-out:
- rcu_read_unlock();
}
void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 1935630d3def..486a6b8f3c97 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -359,8 +359,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
/* Set this early since we need to have it for the check below */
if (mvm->mld_api_is_used && mvm->nvm_data->sku_cap_11be_enable &&
!iwlwifi_mod_params.disable_11ax &&
- !iwlwifi_mod_params.disable_11be)
- hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+ !iwlwifi_mod_params.disable_11be) {
+ hw->wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT;
+ /* we handle this already earlier, but need it for MLO */
+ ieee80211_hw_set(hw, HANDLES_QUIET_CSA);
+ }
/* With MLD FW API, it tracks timing by itself,
* no need for any timing from the host
@@ -720,6 +723,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
+ ieee80211_hw_set(hw, DISALLOW_PUNCTURING_5GHZ);
+
#ifdef CONFIG_PM_SLEEP
if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) &&
mvm->trans->ops->d3_suspend &&
@@ -903,6 +908,8 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
&mvmtxq->state) &&
!test_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT,
&mvmtxq->state) &&
+ !test_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA,
+ &mvmtxq->state) &&
!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) {
skb = ieee80211_tx_dequeue(hw, txq);
@@ -1097,15 +1104,21 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
spin_unlock_bh(&mvm->time_event_lock);
- memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
+ mvmvif->bf_enabled = false;
+ mvmvif->ba_enabled = false;
mvmvif->ap_sta = NULL;
+ mvmvif->esr_active = false;
+ vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE;
+
for_each_mvm_vif_valid_link(mvmvif, link_id) {
mvmvif->link[link_id]->ap_sta_id = IWL_MVM_INVALID_STA;
mvmvif->link[link_id]->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
mvmvif->link[link_id]->phy_ctxt = NULL;
mvmvif->link[link_id]->active = 0;
mvmvif->link[link_id]->igtk = NULL;
+ memset(&mvmvif->link[link_id]->bf_data, 0,
+ sizeof(mvmvif->link[link_id]->bf_data));
}
probe_data = rcu_dereference_protected(mvmvif->deflink.probe_resp_data,
@@ -1332,6 +1345,13 @@ void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ /* Stop internal MLO scan, if running */
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_INT_MLO, false);
+ mutex_unlock(&mvm->mutex);
+
+ wiphy_work_cancel(mvm->hw->wiphy, &mvm->trig_link_selection_wk);
+ wiphy_work_flush(mvm->hw->wiphy, &mvm->async_handlers_wiphy_wk);
flush_work(&mvm->async_handlers_wk);
flush_work(&mvm->add_stream_wk);
@@ -1399,7 +1419,9 @@ int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
- if (cmd_ver == 7)
+ if (cmd_ver == 8)
+ len = sizeof(cmd.v8);
+ else if (cmd_ver == 7)
len = sizeof(cmd.v7);
else if (cmd_ver == 6)
len = sizeof(cmd.v6);
@@ -1418,6 +1440,20 @@ int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
}
+static void iwl_mvm_post_csa_tx(void *data, struct ieee80211_sta *sta)
+{
+ struct ieee80211_hw *hw = data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_mac80211(sta->txq[i]);
+
+ clear_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state);
+ iwl_mvm_mac_itxq_xmit(hw, sta->txq[i]);
+ }
+}
+
int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf)
@@ -1434,6 +1470,7 @@ int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
u8 ap_sta_id = mvmvif->link[link_id]->ap_sta_id;
mvmvif->csa_bcn_pending = false;
+ mvmvif->csa_blocks_tx = false;
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id);
if (WARN_ON(!mvmsta)) {
@@ -1455,6 +1492,18 @@ int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
iwl_mvm_stop_session_protection(mvm, vif);
}
+ } else if (vif->type == NL80211_IFTYPE_AP && mvmvif->csa_blocks_tx) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_mac80211(vif->txq);
+
+ clear_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state);
+
+ local_bh_disable();
+ iwl_mvm_mac_itxq_xmit(hw, vif->txq);
+ ieee80211_iterate_stations_atomic(hw, iwl_mvm_post_csa_tx, hw);
+ local_bh_enable();
+
+ mvmvif->csa_blocks_tx = false;
}
mvmvif->ps_disabled = false;
@@ -1564,6 +1613,65 @@ static int iwl_mvm_alloc_bcast_mcast_sta(struct iwl_mvm *mvm,
IWL_STA_MULTICAST);
}
+static void iwl_mvm_prevent_esr_done_wk(struct wiphy *wiphy,
+ struct wiphy_work *wk)
+{
+ struct iwl_mvm_vif *mvmvif =
+ container_of(wk, struct iwl_mvm_vif, prevent_esr_done_wk.work);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm);
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_PREVENTION);
+ mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_mlo_int_scan_wk(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+ struct iwl_mvm_vif *mvmvif = container_of(wk, struct iwl_mvm_vif,
+ mlo_int_scan_wk.work);
+ struct ieee80211_vif *vif =
+ container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
+
+ mutex_lock(&mvmvif->mvm->mutex);
+
+ iwl_mvm_int_mlo_scan(mvmvif->mvm, vif);
+
+ mutex_unlock(&mvmvif->mvm->mutex);
+}
+
+static void iwl_mvm_unblock_esr_tpt(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+ struct iwl_mvm_vif *mvmvif =
+ container_of(wk, struct iwl_mvm_vif, unblock_esr_tpt_wk);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm);
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TPT);
+ mutex_unlock(&mvm->mutex);
+}
+
+void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ return;
+
+ INIT_DELAYED_WORK(&mvmvif->csa_work,
+ iwl_mvm_channel_switch_disconnect_wk);
+
+ wiphy_delayed_work_init(&mvmvif->prevent_esr_done_wk,
+ iwl_mvm_prevent_esr_done_wk);
+
+ wiphy_delayed_work_init(&mvmvif->mlo_int_scan_wk,
+ iwl_mvm_mlo_int_scan_wk);
+
+ wiphy_work_init(&mvmvif->unblock_esr_tpt_wk,
+ iwl_mvm_unblock_esr_tpt);
+}
+
static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -1574,11 +1682,19 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
+ iwl_mvm_mac_init_mvmvif(mvm, mvmvif);
+
mvmvif->mvm = mvm;
/* the first link always points to the default one */
+ mvmvif->deflink.fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
+ mvmvif->deflink.active = 0;
mvmvif->link[0] = &mvmvif->deflink;
+ ret = iwl_mvm_set_link_mapping(mvm, vif, &vif->bss_conf);
+ if (ret)
+ goto out;
+
/*
* Not much to do here. The stack will not allow interface
* types or combinations that we didn't advertise, so we
@@ -1645,15 +1761,10 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
}
- if (vif->p2p || iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) < 5)
- vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW;
-
if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
mvm->p2p_device_vif = vif;
iwl_mvm_tcm_add_vif(mvm, vif);
- INIT_DELAYED_WORK(&mvmvif->csa_work,
- iwl_mvm_channel_switch_disconnect_wk);
if (vif->type == NL80211_IFTYPE_MONITOR) {
mvm->monitor_on = true;
@@ -1691,6 +1802,8 @@ out:
void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
/*
* Flush the ROC worker which will flush the OFFCHANNEL queue.
@@ -1699,6 +1812,16 @@ void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
*/
flush_work(&mvm->roc_done_wk);
}
+
+ wiphy_delayed_work_cancel(mvm->hw->wiphy,
+ &mvmvif->prevent_esr_done_wk);
+
+ wiphy_delayed_work_cancel(mvm->hw->wiphy,
+ &mvmvif->mlo_int_scan_wk);
+
+ wiphy_work_cancel(mvm->hw->wiphy, &mvmvif->unblock_esr_tpt_wk);
+
+ cancel_delayed_work_sync(&mvmvif->csa_work);
}
/* This function is doing the common part of removing the interface for
@@ -1783,6 +1906,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
mvm->p2p_device_vif = NULL;
}
+ iwl_mvm_unset_link_mapping(mvm, vif, &vif->bss_conf);
iwl_mvm_mac_ctxt_remove(mvm, vif);
RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL);
@@ -2538,6 +2662,7 @@ void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
+ int link_id;
/* The firmware tracks the MU-MIMO group on its own.
* However, on HW restart we should restore this data.
@@ -2553,7 +2678,8 @@ void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm,
iwl_mvm_recalc_multicast(mvm);
/* reset rssi values */
- mvmvif->bf_data.ave_beacon_signal = 0;
+ for_each_mvm_vif_valid_link(mvmvif, link_id)
+ mvmvif->link[link_id]->bf_data.ave_beacon_signal = 0;
iwl_mvm_bt_coex_vif_change(mvm);
iwl_mvm_update_smps_on_active_links(mvm, vif, IWL_MVM_SMPS_REQ_TT,
@@ -2594,10 +2720,14 @@ iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm,
}
if (changes & BSS_CHANGED_CQM) {
- IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
- /* reset cqm events tracking */
- mvmvif->bf_data.last_cqm_event = 0;
- if (mvmvif->bf_data.bf_enabled) {
+ struct iwl_mvm_vif_link_info *link_info =
+ mvmvif->link[link_conf->link_id];
+
+ IWL_DEBUG_MAC80211(mvm, "CQM info_changed\n");
+ if (link_info)
+ link_info->bf_data.last_cqm_event = 0;
+
+ if (mvmvif->bf_enabled) {
/* FIXME: need to update per link when FW API will
* support it
*/
@@ -3809,15 +3939,29 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
mvmvif->authorized = 1;
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ mvmvif->link_selection_res = vif->active_links;
+ mvmvif->link_selection_primary =
+ vif->active_links ? __ffs(vif->active_links) : 0;
+ }
+
callbacks->mac_ctxt_changed(mvm, vif, false);
iwl_mvm_mei_host_associated(mvm, vif, mvm_sta);
+ memset(&mvmvif->last_esr_exit, 0,
+ sizeof(mvmvif->last_esr_exit));
+
+ iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TPT, 0);
+
+ /* Block until FW notif will arrive */
+ iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW, 0);
+
/* when client is authorized (AP station marked as such),
- * try to enable more links
+ * try to enable the best link(s).
*/
if (vif->type == NL80211_IFTYPE_STATION &&
!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
- iwl_mvm_mld_select_links(mvm, vif, false);
+ iwl_mvm_select_links(mvm, vif);
}
mvm_sta->authorized = true;
@@ -3861,9 +4005,22 @@ iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm,
* time.
*/
mvmvif->authorized = 0;
+ mvmvif->link_selection_res = 0;
/* disable beacon filtering */
iwl_mvm_disable_beacon_filter(mvm, vif);
+
+ wiphy_delayed_work_cancel(mvm->hw->wiphy,
+ &mvmvif->prevent_esr_done_wk);
+
+ wiphy_delayed_work_cancel(mvm->hw->wiphy,
+ &mvmvif->mlo_int_scan_wk);
+
+ wiphy_work_cancel(mvm->hw->wiphy, &mvmvif->unblock_esr_tpt_wk);
+
+ /* No need for the periodic statistics anymore */
+ if (ieee80211_vif_is_mld(vif) && mvmvif->esr_active)
+ iwl_mvm_request_periodic_system_statistics(mvm, false);
}
return 0;
@@ -4677,6 +4834,10 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
*/
flush_work(&mvm->roc_done_wk);
+ ret = iwl_mvm_esr_non_bss_link(mvm, vif, 0, true);
+ if (ret)
+ return ret;
+
mutex_lock(&mvm->mutex);
switch (vif->type) {
@@ -4720,9 +4881,7 @@ int iwl_mvm_cancel_roc(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(mvm, "enter\n");
- mutex_lock(&mvm->mutex);
iwl_mvm_stop_roc(mvm, vif);
- mutex_unlock(&mvm->mutex);
IWL_DEBUG_MAC80211(mvm, "leave\n");
return 0;
@@ -5391,7 +5550,7 @@ static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm,
if (chsw->block_tx)
iwl_mvm_csa_client_absent(mvm, vif);
- if (mvmvif->bf_data.bf_enabled) {
+ if (mvmvif->bf_enabled) {
int ret = iwl_mvm_disable_beacon_filter(mvm, vif);
if (ret)
@@ -5404,19 +5563,32 @@ static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm,
return 0;
}
+static void iwl_mvm_csa_block_txqs(void *data, struct ieee80211_sta *sta)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_mac80211(sta->txq[i]);
+
+ set_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state);
+ }
+}
+
#define IWL_MAX_CSA_BLOCK_TX 1500
-int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
+int iwl_mvm_pre_channel_switch(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw)
{
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct ieee80211_vif *csa_vif;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_txq *mvmtxq;
int ret;
- mutex_lock(&mvm->mutex);
+ lockdep_assert_held(&mvm->mutex);
mvmvif->csa_failed = false;
+ mvmvif->csa_blocks_tx = false;
IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
chsw->chandef.center_freq1);
@@ -5431,30 +5603,38 @@ int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
rcu_dereference_protected(mvm->csa_vif,
lockdep_is_held(&mvm->mutex));
if (WARN_ONCE(csa_vif && csa_vif->bss_conf.csa_active,
- "Another CSA is already in progress")) {
- ret = -EBUSY;
- goto out_unlock;
- }
+ "Another CSA is already in progress"))
+ return -EBUSY;
/* we still didn't unblock tx. prevent new CS meanwhile */
if (rcu_dereference_protected(mvm->csa_tx_blocked_vif,
- lockdep_is_held(&mvm->mutex))) {
- ret = -EBUSY;
- goto out_unlock;
- }
+ lockdep_is_held(&mvm->mutex)))
+ return -EBUSY;
rcu_assign_pointer(mvm->csa_vif, vif);
if (WARN_ONCE(mvmvif->csa_countdown,
- "Previous CSA countdown didn't complete")) {
- ret = -EBUSY;
- goto out_unlock;
- }
+ "Previous CSA countdown didn't complete"))
+ return -EBUSY;
mvmvif->csa_target_freq = chsw->chandef.chan->center_freq;
+ if (!chsw->block_tx)
+ break;
+ /* don't need blocking in driver otherwise - mac80211 will do */
+ if (!ieee80211_hw_check(mvm->hw, HANDLES_QUIET_CSA))
+ break;
+
+ mvmvif->csa_blocks_tx = true;
+ mvmtxq = iwl_mvm_txq_from_mac80211(vif->txq);
+ set_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state);
+ ieee80211_iterate_stations_atomic(mvm->hw,
+ iwl_mvm_csa_block_txqs,
+ NULL);
break;
case NL80211_IFTYPE_STATION:
+ mvmvif->csa_blocks_tx = chsw->block_tx;
+
/*
* In the new flow FW is in charge of timing the switch so there
* is no need for all of this
@@ -5469,10 +5649,8 @@ int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
* we don't know the dtim period. In this case, the firmware can't
* track the beacons.
*/
- if (!vif->cfg.assoc || !vif->bss_conf.dtim_period) {
- ret = -EBUSY;
- goto out_unlock;
- }
+ if (!vif->cfg.assoc || !vif->bss_conf.dtim_period)
+ return -EBUSY;
if (chsw->delay > IWL_MAX_CSA_BLOCK_TX &&
hweight16(vif->valid_links) <= 1)
@@ -5494,7 +5672,7 @@ int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) {
ret = iwl_mvm_old_pre_chan_sw_sta(mvm, vif, chsw);
if (ret)
- goto out_unlock;
+ return ret;
} else {
iwl_mvm_schedule_client_csa(mvm, vif, chsw);
}
@@ -5510,12 +5688,23 @@ int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
ret = iwl_mvm_power_update_ps(mvm);
if (ret)
- goto out_unlock;
+ return ret;
/* we won't be on this channel any longer */
iwl_mvm_teardown_tdls_peers(mvm);
-out_unlock:
+ return ret;
+}
+
+static int iwl_mvm_mac_pre_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_pre_channel_switch(mvm, vif, chsw);
mutex_unlock(&mvm->mutex);
return ret;
@@ -5619,8 +5808,8 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct iwl_mvm_vif *mvmvif;
struct iwl_mvm_sta *mvmsta;
struct ieee80211_sta *sta;
bool ap_sta_done = false;
@@ -5632,11 +5821,22 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return;
}
+ if (!drop && hweight16(vif->active_links) <= 1) {
+ int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
+ struct ieee80211_bss_conf *link_conf;
+
+ link_conf = wiphy_dereference(hw->wiphy,
+ vif->link_conf[link_id]);
+ if (WARN_ON(!link_conf))
+ return;
+ if (link_conf->csa_active && mvmvif->csa_blocks_tx)
+ drop = true;
+ }
+
/* Make sure we're done with the deferred traffic before flushing */
flush_work(&mvm->add_stream_wk);
mutex_lock(&mvm->mutex);
- mvmvif = iwl_mvm_vif_from_mac80211(vif);
/* flush the AP-station and all TDLS peers */
for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
@@ -5699,6 +5899,65 @@ void iwl_mvm_mac_flush_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_unlock(&mvm->mutex);
}
+static int iwl_mvm_mac_get_acs_survey(struct iwl_mvm *mvm, int idx,
+ struct survey_info *survey)
+{
+ int chan_idx;
+ enum nl80211_band band;
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ if (!mvm->acs_survey) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ /* Find and return the next entry that has a non-zero active time */
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ struct ieee80211_supported_band *sband =
+ mvm->hw->wiphy->bands[band];
+
+ if (!sband)
+ continue;
+
+ for (chan_idx = 0; chan_idx < sband->n_channels; chan_idx++) {
+ struct iwl_mvm_acs_survey_channel *info =
+ &mvm->acs_survey->bands[band][chan_idx];
+
+ if (!info->time)
+ continue;
+
+ /* Found (the next) channel to report */
+ survey->channel = &sband->channels[chan_idx];
+ survey->filled = SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_TX;
+ survey->time = info->time;
+ survey->time_busy = info->time_busy;
+ survey->time_rx = info->time_rx;
+ survey->time_tx = info->time_tx;
+ survey->noise = info->noise;
+ if (survey->noise < 0)
+ survey->filled |= SURVEY_INFO_NOISE_DBM;
+
+ /* Clear time so that channel is only reported once */
+ info->time = 0;
+
+ ret = 0;
+ goto out;
+ }
+ }
+
+ ret = -ENOENT;
+
+out:
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
@@ -5711,14 +5970,18 @@ int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
memset(survey, 0, sizeof(*survey));
- /* only support global statistics right now */
- if (idx != 0)
- return -ENOENT;
-
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return -ENOENT;
+ /*
+ * Return the beacon stats at index zero and pass on following indices
+ * to the function returning the full survey, most likely for ACS
+ * (Automatic Channel Selection).
+ */
+ if (idx > 0)
+ return iwl_mvm_mac_get_acs_survey(mvm, idx - 1, survey);
+
mutex_lock(&mvm->mutex);
if (iwl_mvm_firmware_running(mvm)) {
@@ -6290,7 +6553,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.set_tim = iwl_mvm_set_tim,
.channel_switch = iwl_mvm_channel_switch,
- .pre_channel_switch = iwl_mvm_pre_channel_switch,
+ .pre_channel_switch = iwl_mvm_mac_pre_channel_switch,
.post_channel_switch = iwl_mvm_post_channel_switch,
.abort_channel_switch = iwl_mvm_abort_channel_switch,
.channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index 084314bf6f36..0a3b7284eedd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -14,6 +14,8 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
+ iwl_mvm_mac_init_mvmvif(mvm, mvmvif);
+
mvmvif->mvm = mvm;
/* Not much to do here. The stack will not allow interface
@@ -92,6 +94,9 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
mvm->csme_vif = vif;
}
+ if (vif->p2p || iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) < 5)
+ vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW;
+
goto out_unlock;
out_free_bf:
@@ -189,23 +194,43 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw,
mutex_unlock(&mvm->mutex);
}
-static unsigned int iwl_mvm_mld_count_active_links(struct ieee80211_vif *vif)
+static unsigned int iwl_mvm_mld_count_active_links(struct iwl_mvm_vif *mvmvif)
{
unsigned int n_active = 0;
int i;
for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
- struct ieee80211_bss_conf *link_conf;
-
- link_conf = link_conf_dereference_protected(vif, i);
- if (link_conf &&
- rcu_access_pointer(link_conf->chanctx_conf))
+ if (mvmvif->link[i] && mvmvif->link[i]->phy_ctxt)
n_active++;
}
return n_active;
}
+static void iwl_mvm_restart_mpdu_count(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif)
+{
+ struct ieee80211_sta *ap_sta = mvmvif->ap_sta;
+ struct iwl_mvm_sta *mvmsta;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!ap_sta)
+ return;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(ap_sta);
+ if (!mvmsta->mpdu_counters)
+ return;
+
+ for (int q = 0; q < mvm->trans->num_rx_queues; q++) {
+ spin_lock_bh(&mvmsta->mpdu_counters[q].lock);
+ memset(mvmsta->mpdu_counters[q].per_link, 0,
+ sizeof(mvmsta->mpdu_counters[q].per_link));
+ mvmsta->mpdu_counters[q].window_start = jiffies;
+ spin_unlock_bh(&mvmsta->mpdu_counters[q].lock);
+ }
+}
+
static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
@@ -233,6 +258,22 @@ static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm,
link->phy_ctxt->rlc_disabled = true;
}
+ if (vif->active_links == mvmvif->link_selection_res &&
+ !WARN_ON(!(vif->active_links & BIT(mvmvif->link_selection_primary))))
+ mvmvif->primary_link = mvmvif->link_selection_primary;
+ else
+ mvmvif->primary_link = __ffs(vif->active_links);
+
+ /* Needed for tracking RSSI */
+ iwl_mvm_request_periodic_system_statistics(mvm, true);
+
+ /*
+ * Restart the MPDU counters and the counting window, so when the
+ * statistics arrive (which is where we look at the counters) we
+ * will be at the end of the window.
+ */
+ iwl_mvm_restart_mpdu_count(mvm, mvmvif);
+
return ret;
}
@@ -245,18 +286,18 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
{
u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
- unsigned int n_active = iwl_mvm_mld_count_active_links(vif);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ unsigned int n_active = iwl_mvm_mld_count_active_links(mvmvif);
unsigned int link_id = link_conf->link_id;
int ret;
- /* if the assigned one was not counted yet, count it now */
- if (!rcu_access_pointer(link_conf->chanctx_conf))
- n_active++;
-
if (WARN_ON_ONCE(!mvmvif->link[link_id]))
return -EINVAL;
+ /* if the assigned one was not counted yet, count it now */
+ if (!mvmvif->link[link_id]->phy_ctxt)
+ n_active++;
+
/* mac parameters such as HE support can change at this stage
* For sta, need first to configure correct state from drv_sta_state
* and only after that update mac config.
@@ -276,6 +317,7 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
ret = iwl_mvm_esr_mode_active(mvm, vif);
if (ret) {
IWL_ERR(mvm, "failed to activate ESR mode (%d)\n", ret);
+ iwl_mvm_request_periodic_system_statistics(mvm, false);
goto out;
}
}
@@ -296,13 +338,8 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
* this needs the phy context assigned (and in FW?), and we cannot
* do it later because it needs to be initialized as soon as we're
* able to TX on the link, i.e. when active.
- *
- * Firmware restart isn't quite correct yet for MLO, but we don't
- * need to do it in that case anyway since it will happen from the
- * normal station state callback.
*/
- if (mvmvif->ap_sta &&
- !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ if (mvmvif->ap_sta) {
struct ieee80211_link_sta *link_sta;
rcu_read_lock();
@@ -354,6 +391,18 @@ static int iwl_mvm_mld_assign_vif_chanctx(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
+ /* update EMLSR mode */
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
+ ret = iwl_mvm_esr_non_bss_link(mvm, vif, link_conf->link_id,
+ true);
+ /*
+ * Don't activate this link if failed to exit EMLSR in
+ * the BSS interface
+ */
+ if (ret)
+ return ret;
+ }
+
mutex_lock(&mvm->mutex);
ret = __iwl_mvm_mld_assign_vif_chanctx(mvm, vif, link_conf, ctx, false);
mutex_unlock(&mvm->mutex);
@@ -404,6 +453,11 @@ static int iwl_mvm_esr_mode_inactive(struct iwl_mvm *mvm,
break;
}
+ iwl_mvm_request_periodic_system_statistics(mvm, false);
+
+ /* Start a new counting window */
+ iwl_mvm_restart_mpdu_count(mvm, mvmvif);
+
return ret;
}
@@ -416,7 +470,7 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- unsigned int n_active = iwl_mvm_mld_count_active_links(vif);
+ unsigned int n_active = iwl_mvm_mld_count_active_links(mvmvif);
unsigned int link_id = link_conf->link_id;
/* shouldn't happen, but verify link_id is valid before accessing */
@@ -472,6 +526,56 @@ static void iwl_mvm_mld_unassign_vif_chanctx(struct ieee80211_hw *hw,
iwl_mvm_add_link(mvm, vif, link_conf);
}
mutex_unlock(&mvm->mutex);
+
+ /* update EMLSR mode */
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
+ iwl_mvm_esr_non_bss_link(mvm, vif, link_conf->link_id, false);
+}
+
+static void
+iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct iwl_txpower_constraints_cmd cmd = {};
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *link_info =
+ mvmvif->link[bss_conf->link_id];
+ u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, AP_TX_POWER_CONSTRAINTS_CMD);
+ u32 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
+ IWL_FW_CMD_VER_UNKNOWN);
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN)
+ return;
+
+ if (!link_info->active ||
+ link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID)
+ return;
+
+ if (bss_conf->chanreq.oper.chan->band != NL80211_BAND_6GHZ ||
+ bss_conf->chanreq.oper.chan->flags &
+ IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT)
+ return;
+
+ cmd.link_id = cpu_to_le16(link_info->fw_link_id);
+ /*
+ * Currently supporting VLP Soft AP only.
+ */
+ cmd.ap_type = cpu_to_le16(IWL_6GHZ_AP_TYPE_VLP);
+ memset(cmd.psd_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.psd_pwr));
+ memset(cmd.eirp_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.eirp_pwr));
+
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(PHY_OPS_GROUP,
+ AP_TX_POWER_CONSTRAINTS_CMD),
+ 0, sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm,
+ "failed to send AP_TX_POWER_CONSTRAINTS_CMD (%d)\n",
+ ret);
}
static int iwl_mvm_mld_start_ap_ibss(struct ieee80211_hw *hw,
@@ -483,6 +587,10 @@ static int iwl_mvm_mld_start_ap_ibss(struct ieee80211_hw *hw,
int ret;
mutex_lock(&mvm->mutex);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif, link_conf);
+
/* Send the beacon template */
ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif, link_conf);
if (ret)
@@ -601,126 +709,23 @@ static int iwl_mvm_mld_mac_sta_state(struct ieee80211_hw *hw,
&callbacks);
}
-struct iwl_mvm_link_sel_data {
- u8 link_id;
- enum nl80211_band band;
- enum nl80211_chan_width width;
- bool active;
-};
-
-static bool iwl_mvm_mld_valid_link_pair(struct iwl_mvm_link_sel_data *a,
- struct iwl_mvm_link_sel_data *b)
-{
- return a->band != b->band;
-}
-
-void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- bool valid_links_changed)
+static bool iwl_mvm_esr_bw_criteria(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
{
- struct iwl_mvm_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
- unsigned long usable_links = ieee80211_vif_usable_links(vif);
- u32 max_active_links = iwl_mvm_max_active_links(mvm, vif);
- u16 new_active_links;
- u8 link_id, n_data = 0, i, j;
-
- if (!IWL_MVM_AUTO_EML_ENABLE)
- return;
-
- if (!ieee80211_vif_is_mld(vif) || usable_links == 1)
- return;
-
- /* The logic below is a simple version that doesn't suit more than 2
- * links
- */
- WARN_ON_ONCE(max_active_links > 2);
-
- /* if only a single active link is supported, assume that the one
- * selected by higher layer for connection establishment is the best.
- */
- if (max_active_links == 1 && !valid_links_changed)
- return;
-
- /* If we are already using the maximal number of active links, don't do
- * any change. This can later be optimized to pick a 'better' link pair.
- */
- if (hweight16(vif->active_links) == max_active_links)
- return;
-
- rcu_read_lock();
-
- for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
- struct ieee80211_bss_conf *link_conf =
- rcu_dereference(vif->link_conf[link_id]);
+ struct ieee80211_bss_conf *other_link;
+ int link_id;
- if (WARN_ON_ONCE(!link_conf))
+ /* Exit EMLSR if links don't have equal bandwidths */
+ for_each_vif_active_link(vif, other_link, link_id) {
+ if (link_id == link_conf->link_id)
continue;
-
- data[n_data].link_id = link_id;
- data[n_data].band = link_conf->chanreq.oper.chan->band;
- data[n_data].width = link_conf->chanreq.oper.width;
- data[n_data].active = vif->active_links & BIT(link_id);
- n_data++;
- }
-
- rcu_read_unlock();
-
- /* this is expected to be the current active link */
- if (n_data == 1)
- return;
-
- new_active_links = 0;
-
- /* Assume that after association only a single link is active, thus,
- * select only the 2nd link
- */
- if (!valid_links_changed) {
- for (i = 0; i < n_data; i++) {
- if (data[i].active)
- break;
- }
-
- if (WARN_ON_ONCE(i == n_data))
- return;
-
- for (j = 0; j < n_data; j++) {
- if (i == j)
- continue;
-
- if (iwl_mvm_mld_valid_link_pair(&data[i], &data[j]))
- break;
- }
-
- if (j != n_data)
- new_active_links = BIT(data[i].link_id) |
- BIT(data[j].link_id);
- } else {
- /* Try to find a valid link pair for EMLSR operation. If a pair
- * is not found continue using the current active link.
- */
- for (i = 0; i < n_data; i++) {
- for (j = 0; j < n_data; j++) {
- if (i == j)
- continue;
-
- if (iwl_mvm_mld_valid_link_pair(&data[i],
- &data[j]))
- break;
- }
-
- /* found a valid pair for EMLSR, use it */
- if (j != n_data) {
- new_active_links = BIT(data[i].link_id) |
- BIT(data[j].link_id);
- break;
- }
- }
+ if (link_conf->chanreq.oper.width ==
+ other_link->chanreq.oper.width)
+ return true;
}
- if (!new_active_links)
- return;
-
- if (vif->active_links != new_active_links)
- ieee80211_set_active_links_async(vif, new_active_links);
+ return false;
}
static void
@@ -752,6 +757,14 @@ iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm,
link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS;
}
+ if ((changes & BSS_CHANGED_BANDWIDTH) &&
+ ieee80211_vif_link_active(vif, link_conf->link_id) &&
+ mvmvif->esr_active &&
+ !iwl_mvm_esr_bw_criteria(mvm, vif, link_conf))
+ iwl_mvm_exit_esr(mvm, vif,
+ IWL_MVM_ESR_EXIT_BANDWIDTH,
+ iwl_mvm_get_primary_link(vif));
+
/* if associated, maybe puncturing changed - we'll check later */
if (vif->cfg.assoc)
link_changes |= LINK_CONTEXT_MODIFY_EHT_PARAMS;
@@ -767,9 +780,6 @@ iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm,
if (ret)
IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
- if (changes & BSS_CHANGED_MLD_VALID_LINKS)
- iwl_mvm_mld_select_links(mvm, vif, true);
-
memcpy(mvmvif->link[link_conf->link_id]->bssid, link_conf->bssid,
ETH_ALEN);
@@ -912,6 +922,11 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
}
+
+ if (changes & (BSS_CHANGED_MLD_VALID_LINKS | BSS_CHANGED_MLD_TTLM) &&
+ ieee80211_vif_is_mld(vif) && mvmvif->authorized)
+ wiphy_delayed_work_queue(mvm->hw->wiphy,
+ &mvmvif->mlo_int_scan_wk, 0);
}
static void
@@ -1190,6 +1205,14 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw,
if (new_links == 0) {
mvmvif->link[0] = &mvmvif->deflink;
err = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
+ if (err == 0)
+ mvmvif->primary_link = 0;
+ } else if (!(new_links & BIT(mvmvif->primary_link))) {
+ /*
+ * Ensure we always have a valid primary_link, the real
+ * decision happens later when PHY is activated.
+ */
+ mvmvif->primary_link = __ffs(new_links);
}
out_err:
@@ -1218,68 +1241,14 @@ iwl_mvm_mld_change_sta_links(struct ieee80211_hw *hw,
return ret;
}
-/*
- * This function receives a subset of the usable links bitmap and
- * returns the primary link id, and -1 if such link doesn't exist
- * (e.g. non-MLO connection) or wasn't found.
- */
-int iwl_mvm_mld_get_primary_link(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- unsigned long usable_links)
-{
- struct iwl_mvm_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
- u8 link_id, n_data = 0;
-
- if (!ieee80211_vif_is_mld(vif) || !vif->cfg.assoc)
- return -1;
-
- for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
- struct ieee80211_bss_conf *link_conf =
- link_conf_dereference_protected(vif, link_id);
-
- if (WARN_ON_ONCE(!link_conf))
- continue;
-
- data[n_data].link_id = link_id;
- data[n_data].band = link_conf->chanreq.oper.chan->band;
- data[n_data].width = link_conf->chanreq.oper.width;
- data[n_data].active = true;
- n_data++;
- }
-
- if (n_data <= 1)
- return -1;
-
- /* The logic should be modified to handle more than 2 links */
- WARN_ON_ONCE(n_data > 2);
-
- /* Primary link is the link with the wider bandwidth or higher band */
- if (data[0].width > data[1].width)
- return data[0].link_id;
- if (data[0].width < data[1].width)
- return data[1].link_id;
- if (data[0].band >= data[1].band)
- return data[0].link_id;
-
- return data[1].link_id;
-}
-
-/*
- * This function receives a bitmap of usable links and check if we can enter
- * eSR on those links.
- */
-static bool iwl_mvm_can_enter_esr(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- unsigned long desired_links)
+bool iwl_mvm_vif_has_esr_cap(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int primary_link = iwl_mvm_mld_get_primary_link(mvm, vif,
- desired_links);
const struct wiphy_iftype_ext_capab *ext_capa;
- bool ret = true;
- int link_id;
- if (primary_link < 0)
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!ieee80211_vif_is_mld(vif) || !vif->cfg.assoc ||
+ hweight16(ieee80211_vif_usable_links(vif)) == 1)
return false;
if (!(vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP))
@@ -1287,30 +1256,8 @@ static bool iwl_mvm_can_enter_esr(struct iwl_mvm *mvm,
ext_capa = cfg80211_get_iftype_ext_capa(mvm->hw->wiphy,
ieee80211_vif_type_p2p(vif));
- if (!ext_capa ||
- !(ext_capa->eml_capabilities & IEEE80211_EML_CAP_EMLSR_SUPP))
- return false;
-
- for_each_set_bit(link_id, &desired_links, IEEE80211_MLD_MAX_NUM_LINKS) {
- struct ieee80211_bss_conf *link_conf =
- link_conf_dereference_protected(vif, link_id);
-
- if (WARN_ON_ONCE(!link_conf))
- continue;
-
- /* BT Coex effects eSR mode only if one of the link is on LB */
- if (link_conf->chanreq.oper.chan->band != NL80211_BAND_2GHZ)
- continue;
-
- ret = iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link_id,
- primary_link);
- // Mark eSR as disabled for the next time
- if (!ret)
- mvmvif->bt_coex_esr_disabled = true;
- break;
- }
-
- return ret;
+ return (ext_capa &&
+ (ext_capa->eml_capabilities & IEEE80211_EML_CAP_EMLSR_SUPP));
}
static bool iwl_mvm_mld_can_activate_links(struct ieee80211_hw *hw,
@@ -1333,8 +1280,9 @@ static bool iwl_mvm_mld_can_activate_links(struct ieee80211_hw *hw,
}
/* If it is an eSR device, check that we can enter eSR */
- if (iwl_mvm_is_esr_supported(mvm->fwrt.trans))
- ret = iwl_mvm_can_enter_esr(mvm, vif, desired_links);
+ ret = iwl_mvm_is_esr_supported(mvm->fwrt.trans) &&
+ iwl_mvm_vif_has_esr_cap(mvm, vif);
+
unlock:
mutex_unlock(&mvm->mutex);
return ret;
@@ -1358,6 +1306,45 @@ iwl_mvm_mld_can_neg_ttlm(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return NEG_TTLM_RES_ACCEPT;
}
+static int
+iwl_mvm_mld_mac_pre_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+ if (mvmvif->esr_active) {
+ u8 primary = iwl_mvm_get_primary_link(vif);
+ int selected;
+
+ /* prefer primary unless quiet CSA on it */
+ if (chsw->link_id == primary && chsw->block_tx)
+ selected = iwl_mvm_get_other_link(vif, primary);
+ else
+ selected = primary;
+
+ iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_CSA, selected);
+ mutex_unlock(&mvm->mutex);
+
+ /*
+ * If we've not kept the link active that's doing the CSA
+ * then we don't need to do anything else, just return.
+ */
+ if (selected != chsw->link_id)
+ return 0;
+
+ mutex_lock(&mvm->mutex);
+ }
+
+ ret = iwl_mvm_pre_channel_switch(mvm, vif, chsw);
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.tx = iwl_mvm_mac_tx,
.wake_tx_queue = iwl_mvm_mac_wake_tx_queue,
@@ -1411,7 +1398,7 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.tx_last_beacon = iwl_mvm_tx_last_beacon,
.channel_switch = iwl_mvm_channel_switch,
- .pre_channel_switch = iwl_mvm_pre_channel_switch,
+ .pre_channel_switch = iwl_mvm_mld_mac_pre_channel_switch,
.post_channel_switch = iwl_mvm_post_channel_switch,
.abort_channel_switch = iwl_mvm_abort_channel_switch,
.channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
index 1628bf55458f..b7a461dba41e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022-2023 Intel Corporation
+ * Copyright (C) 2022-2024 Intel Corporation
*/
#include "mvm.h"
#include "time-sync.h"
@@ -9,7 +9,9 @@
u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int filter_link_id)
{
+ struct ieee80211_link_sta *link_sta;
struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_vif *vif;
unsigned int link_id;
u32 result = 0;
@@ -17,26 +19,27 @@ u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return 0;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ vif = mvmsta->vif;
/* it's easy when the STA is not an MLD */
if (!sta->valid_links)
return BIT(mvmsta->deflink.sta_id);
/* but if it is an MLD, get the mask of all the FW STAs it has ... */
- for (link_id = 0; link_id < ARRAY_SIZE(mvmsta->link); link_id++) {
- struct iwl_mvm_link_sta *link_sta;
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct iwl_mvm_link_sta *mvm_link_sta;
/* unless we have a specific link in mind */
if (filter_link_id >= 0 && link_id != filter_link_id)
continue;
- link_sta =
+ mvm_link_sta =
rcu_dereference_check(mvmsta->link[link_id],
lockdep_is_held(&mvm->mutex));
- if (!link_sta)
+ if (!mvm_link_sta)
continue;
- result |= BIT(link_sta->sta_id);
+ result |= BIT(mvm_link_sta->sta_id);
}
return result;
@@ -582,14 +585,14 @@ static int iwl_mvm_mld_alloc_sta_links(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_link_sta *link_sta;
unsigned int link_id;
int ret;
lockdep_assert_held(&mvm->mutex);
- for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) {
- if (!rcu_access_pointer(sta->link[link_id]) ||
- mvm_sta->link[link_id])
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ if (WARN_ON(mvm_sta->link[link_id]))
continue;
ret = iwl_mvm_mld_alloc_sta_link(mvm, vif, sta, link_id);
@@ -616,9 +619,6 @@ static void iwl_mvm_mld_set_ap_sta_id(struct ieee80211_sta *sta,
}
}
-/* FIXME: consider waiting for mac80211 to add the STA instead of allocating
- * queues here
- */
static int iwl_mvm_alloc_sta_after_restart(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -723,7 +723,6 @@ int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_mld_set_ap_sta_id(sta, mvm_vif->link[link_id],
mvm_link_sta);
}
-
return 0;
err:
@@ -849,16 +848,23 @@ int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta,
link_id, stay_in_fw);
}
+ kfree(mvm_sta->mpdu_counters);
+ mvm_sta->mpdu_counters = NULL;
return ret;
}
int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id)
{
- int ret = iwl_mvm_mld_rm_sta_from_fw(mvm, sta_id);
+ int ret;
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON(sta_id == IWL_MVM_INVALID_STA))
+ return 0;
+
+ ret = iwl_mvm_mld_rm_sta_from_fw(mvm, sta_id);
+
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
RCU_INIT_POINTER(mvm->fw_id_to_link_sta[sta_id], NULL);
return ret;
@@ -984,6 +990,10 @@ static int iwl_mvm_mld_update_sta_baids(struct iwl_mvm *mvm,
u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
int baid;
+ /* mac80211 will remove sessions later, but we ignore all that */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ return 0;
+
BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
for (baid = 0; baid < ARRAY_SIZE(mvm->baid_map); baid++) {
@@ -1117,10 +1127,21 @@ int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm,
}
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
- if (WARN_ON(!mvm_sta->link[link_id])) {
+ struct iwl_mvm_link_sta *mvm_link_sta =
+ rcu_dereference_protected(mvm_sta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+ u32 sta_id;
+
+ if (WARN_ON(!mvm_link_sta)) {
ret = -EINVAL;
goto err;
}
+
+ sta_id = mvm_link_sta->sta_id;
+
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
+ rcu_assign_pointer(mvm->fw_id_to_link_sta[sta_id],
+ link_sta);
} else {
if (WARN_ON(mvm_sta->link[link_id])) {
ret = -EINVAL;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 44571114fb15..1f58c727fa63 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -255,18 +255,14 @@ enum iwl_mvm_low_latency_cause {
};
/**
-* struct iwl_mvm_vif_bf_data - beacon filtering related data
-* @bf_enabled: indicates if beacon filtering is enabled
-* @ba_enabled: indicated if beacon abort is enabled
+* struct iwl_mvm_link_bf_data - beacon filtering related data
* @ave_beacon_signal: average beacon signal
* @last_cqm_event: rssi of the last cqm event
* @bt_coex_min_thold: minimum threshold for BT coex
* @bt_coex_max_thold: maximum threshold for BT coex
* @last_bt_coex_event: rssi of the last BT coex event
*/
-struct iwl_mvm_vif_bf_data {
- bool bf_enabled;
- bool ba_enabled;
+struct iwl_mvm_link_bf_data {
int ave_beacon_signal;
int last_cqm_event;
int bt_coex_min_thold;
@@ -309,6 +305,7 @@ struct iwl_probe_resp_data {
* @listen_lmac: indicates this link is allocated to the listen LMAC
* @mcast_sta: multicast station
* @phy_ctxt: phy context allocated to this link, if any
+ * @bf_data: beacon filtering data
*/
struct iwl_mvm_vif_link_info {
u8 bssid[ETH_ALEN];
@@ -344,6 +341,63 @@ struct iwl_mvm_vif_link_info {
struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
u16 mgmt_queue;
+
+ struct iwl_mvm_link_bf_data bf_data;
+};
+
+/**
+ * enum iwl_mvm_esr_state - defines reasons for which the EMLSR is exited or
+ * blocked.
+ * The low 16 bits are used for blocking reasons, and the 16 higher bits
+ * are used for exit reasons.
+ * For the blocking reasons - use iwl_mvm_(un)block_esr(), and for the exit
+ * reasons - use iwl_mvm_exit_esr().
+ *
+ * Note: new reasons shall be added to HANDLE_ESR_REASONS as well (for logs)
+ *
+ * @IWL_MVM_ESR_BLOCKED_PREVENTION: Prevent EMLSR to avoid entering and exiting
+ * in a loop.
+ * @IWL_MVM_ESR_BLOCKED_WOWLAN: WOWLAN is preventing the enablement of EMLSR
+ * @IWL_MVM_ESR_BLOCKED_TPT: block EMLSR when there is not enough traffic
+ * @IWL_MVM_ESR_BLOCKED_FW: FW didn't recommended/forced exit from EMLSR
+ * @IWL_MVM_ESR_BLOCKED_NON_BSS: An active non-bssid link's preventing EMLSR
+ * @IWL_MVM_ESR_EXIT_MISSED_BEACON: exited EMLSR due to missed beacons
+ * @IWL_MVM_ESR_EXIT_LOW_RSSI: link is deactivated/not allowed for EMLSR
+ * due to low RSSI.
+ * @IWL_MVM_ESR_EXIT_COEX: link is deactivated/not allowed for EMLSR
+ * due to BT Coex.
+ * @IWL_MVM_ESR_EXIT_BANDWIDTH: Bandwidths of primary and secondry links
+ * preventing the enablement of EMLSR
+ * @IWL_MVM_ESR_EXIT_CSA: CSA happened, so exit EMLSR
+ * @IWL_MVM_ESR_EXIT_LINK_USAGE: Exit EMLSR due to low tpt on secondary link
+ */
+enum iwl_mvm_esr_state {
+ IWL_MVM_ESR_BLOCKED_PREVENTION = 0x1,
+ IWL_MVM_ESR_BLOCKED_WOWLAN = 0x2,
+ IWL_MVM_ESR_BLOCKED_TPT = 0x4,
+ IWL_MVM_ESR_BLOCKED_FW = 0x8,
+ IWL_MVM_ESR_BLOCKED_NON_BSS = 0x10,
+ IWL_MVM_ESR_EXIT_MISSED_BEACON = 0x10000,
+ IWL_MVM_ESR_EXIT_LOW_RSSI = 0x20000,
+ IWL_MVM_ESR_EXIT_COEX = 0x40000,
+ IWL_MVM_ESR_EXIT_BANDWIDTH = 0x80000,
+ IWL_MVM_ESR_EXIT_CSA = 0x100000,
+ IWL_MVM_ESR_EXIT_LINK_USAGE = 0x200000,
+};
+
+#define IWL_MVM_BLOCK_ESR_REASONS 0xffff
+
+const char *iwl_get_esr_state_string(enum iwl_mvm_esr_state state);
+
+/**
+ * struct iwl_mvm_esr_exit - details of the last exit from EMLSR mode.
+ * @reason: The reason for the last exit from EMLSR.
+ * &iwl_mvm_prevent_esr_reasons. Will be 0 before exiting EMLSR.
+ * @ts: the time stamp of the last time we existed EMLSR.
+ */
+struct iwl_mvm_esr_exit {
+ unsigned long ts;
+ enum iwl_mvm_esr_state reason;
};
/**
@@ -361,7 +415,6 @@ struct iwl_mvm_vif_link_info {
* @pm_enabled - indicate if MAC power management is allowed
* @monitor_active: indicates that monitor context is configured, and that the
* interface should get quota etc.
- * @bt_coex_esr_disabled: indicates if esr is disabled due to bt coex
* @low_latency: bit flags for low latency
* see enum &iwl_mvm_low_latency_cause for causes.
* @low_latency_actual: boolean, indicates low latency is set,
@@ -371,14 +424,30 @@ struct iwl_mvm_vif_link_info {
* @csa_countdown: indicates that CSA countdown may be started
* @csa_failed: CSA failed to schedule time event, report an error later
* @csa_bcn_pending: indicates that we are waiting for a beacon on a new channel
+ * @csa_blocks_tx: CSA is blocking TX
* @features: hw features active for this vif
* @ap_beacon_time: AP beacon time for synchronisation (on older FW)
+ * @bf_enabled: indicates if beacon filtering is enabled
+ * @ba_enabled: indicated if beacon abort is enabled
* @bcn_prot: beacon protection data (keys; FIXME: needs to be per link)
- * @bf_data: beacon filtering data
* @deflink: default link data for use in non-MLO
* @link: link data for each link in MLO
* @esr_active: indicates eSR mode is active
+ * @esr_disable_reason: a bitmap of &enum iwl_mvm_esr_state
* @pm_enabled: indicates powersave is enabled
+ * @link_selection_res: bitmap of active links as it was decided in the last
+ * link selection. Valid only for a MLO vif after assoc. 0 if there wasn't
+ * any link selection yet.
+ * @link_selection_primary: primary link selected by link selection
+ * @primary_link: primary link in eSR. Valid only for an associated MLD vif,
+ * and in eSR mode. Valid only for a STA.
+ * @last_esr_exit: Details of the last exit from EMLSR.
+ * @exit_same_reason_count: The number of times we exited due to the specified
+ * @last_esr_exit::reason, only counting exits due to
+ * &IWL_MVM_ESR_PREVENT_REASONS.
+ * @prevent_esr_done_wk: work that should be done when esr prevention ends.
+ * @mlo_int_scan_wk: work for the internal MLO scan.
+ * @unblock_esr_tpt_wk: work for unblocking EMLSR when tpt is high enough.
*/
struct iwl_mvm_vif {
struct iwl_mvm *mvm;
@@ -392,7 +461,6 @@ struct iwl_mvm_vif {
bool pm_enabled;
bool monitor_active;
bool esr_active;
- bool bt_coex_esr_disabled;
u8 low_latency: 6;
u8 low_latency_actual: 1;
@@ -400,8 +468,10 @@ struct iwl_mvm_vif {
u8 authorized:1;
bool ps_disabled;
+ u32 esr_disable_reason;
u32 ap_beacon_time;
- struct iwl_mvm_vif_bf_data bf_data;
+ bool bf_enabled;
+ bool ba_enabled;
#ifdef CONFIG_PM
/* WoWLAN GTK rekey data */
@@ -435,6 +505,7 @@ struct iwl_mvm_vif {
struct iwl_dbgfs_bf dbgfs_bf;
struct iwl_mac_power_cmd mac_pwr_cmd;
int dbgfs_quota_min;
+ bool ftm_unprotected;
#endif
/* FW identified misbehaving AP */
@@ -444,6 +515,7 @@ struct iwl_mvm_vif {
bool csa_countdown;
bool csa_failed;
bool csa_bcn_pending;
+ bool csa_blocks_tx;
u16 csa_target_freq;
u16 csa_count;
u16 csa_misbehave;
@@ -466,6 +538,15 @@ struct iwl_mvm_vif {
struct ieee80211_key_conf __rcu *keys[2];
} bcn_prot;
+ u16 link_selection_res;
+ u8 link_selection_primary;
+ u8 primary_link;
+ struct iwl_mvm_esr_exit last_esr_exit;
+ u8 exit_same_reason_count;
+ struct wiphy_delayed_work prevent_esr_done_wk;
+ struct wiphy_delayed_work mlo_int_scan_wk;
+ struct wiphy_work unblock_esr_tpt_wk;
+
struct iwl_mvm_vif_link_info deflink;
struct iwl_mvm_vif_link_info *link[IEEE80211_MLD_MAX_NUM_LINKS];
};
@@ -490,10 +571,12 @@ enum iwl_scan_status {
IWL_MVM_SCAN_REGULAR = BIT(0),
IWL_MVM_SCAN_SCHED = BIT(1),
IWL_MVM_SCAN_NETDETECT = BIT(2),
+ IWL_MVM_SCAN_INT_MLO = BIT(3),
IWL_MVM_SCAN_STOPPING_REGULAR = BIT(8),
IWL_MVM_SCAN_STOPPING_SCHED = BIT(9),
IWL_MVM_SCAN_STOPPING_NETDETECT = BIT(10),
+ IWL_MVM_SCAN_STOPPING_INT_MLO = BIT(11),
IWL_MVM_SCAN_REGULAR_MASK = IWL_MVM_SCAN_REGULAR |
IWL_MVM_SCAN_STOPPING_REGULAR,
@@ -501,6 +584,8 @@ enum iwl_scan_status {
IWL_MVM_SCAN_STOPPING_SCHED,
IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT |
IWL_MVM_SCAN_STOPPING_NETDETECT,
+ IWL_MVM_SCAN_INT_MLO_MASK = IWL_MVM_SCAN_INT_MLO |
+ IWL_MVM_SCAN_STOPPING_INT_MLO,
IWL_MVM_SCAN_STOPPING_MASK = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT,
IWL_MVM_SCAN_MASK = 0xff,
@@ -754,9 +839,10 @@ struct iwl_mvm_txq {
struct list_head list;
u16 txq_id;
atomic_t tx_request;
-#define IWL_MVM_TXQ_STATE_STOP_FULL 0
-#define IWL_MVM_TXQ_STATE_STOP_REDIRECT 1
-#define IWL_MVM_TXQ_STATE_READY 2
+#define IWL_MVM_TXQ_STATE_READY 0
+#define IWL_MVM_TXQ_STATE_STOP_FULL 1
+#define IWL_MVM_TXQ_STATE_STOP_REDIRECT 2
+#define IWL_MVM_TXQ_STATE_STOP_AP_CSA 3
unsigned long state;
};
@@ -834,6 +920,35 @@ struct iwl_mei_scan_filter {
struct work_struct scan_work;
};
+/**
+ * struct iwl_mvm_acs_survey_channel - per-channel survey information
+ *
+ * Stripped down version of &struct survey_info.
+ *
+ * @time: time in ms the radio was on the channel
+ * @time_busy: time in ms the channel was sensed busy
+ * @time_tx: time in ms spent transmitting data
+ * @time_rx: time in ms spent receiving data
+ * @noise: channel noise in dBm
+ */
+struct iwl_mvm_acs_survey_channel {
+ u32 time;
+ u32 time_busy;
+ u32 time_tx;
+ u32 time_rx;
+ s8 noise;
+};
+
+struct iwl_mvm_acs_survey {
+ struct iwl_mvm_acs_survey_channel *bands[NUM_NL80211_BANDS];
+
+ /* Overall number of channels */
+ int n_channels;
+
+ /* Storage space for per-channel information follows */
+ struct iwl_mvm_acs_survey_channel channels[] __counted_by(n_channels);
+};
+
struct iwl_mvm {
/* for logger access */
struct device *dev;
@@ -853,6 +968,8 @@ struct iwl_mvm {
/* For async rx handlers that require the wiphy lock */
struct wiphy_work async_handlers_wiphy_wk;
+ struct wiphy_work trig_link_selection_wk;
+
struct work_struct roc_done_wk;
unsigned long init_status;
@@ -1201,6 +1318,8 @@ struct iwl_mvm {
struct iwl_mei_scan_filter mei_scan_filter;
+ struct iwl_mvm_acs_survey *acs_survey;
+
bool statistics_clear;
};
@@ -1570,17 +1689,14 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_trans *trans = mvm->fwrt.trans;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- lockdep_assert_held(&mvm->mutex);
if (vif->type == NL80211_IFTYPE_AP)
return mvm->fw->ucode_capa.num_beacons;
- if ((iwl_mvm_is_esr_supported(trans) &&
- !mvmvif->bt_coex_esr_disabled) ||
- ((CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM &&
- CSR_HW_RFID_IS_CDB(trans->hw_rf_id))))
+ /* Check if HW supports eSR or STR */
+ if (iwl_mvm_is_esr_supported(trans) ||
+ (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM &&
+ CSR_HW_RFID_IS_CDB(trans->hw_rf_id)))
return IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM;
return 1;
@@ -1720,6 +1836,8 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
void iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear);
+int iwl_mvm_request_periodic_system_statistics(struct iwl_mvm *mvm,
+ bool enable);
void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm);
/* NVM */
@@ -1776,6 +1894,8 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
+void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif);
+
/*
* FW notifications / CMD responses handlers
* Convention: iwl_mvm_rx_<NAME OF THE CMD>
@@ -1916,16 +2036,40 @@ int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
u32 iwl_mvm_get_lmac_id(struct iwl_mvm *mvm, enum nl80211_band band);
/* Links */
+int iwl_mvm_set_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
u32 changes, bool active);
+int iwl_mvm_unset_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
+void iwl_mvm_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+u8 iwl_mvm_get_primary_link(struct ieee80211_vif *vif);
+u8 iwl_mvm_get_other_link(struct ieee80211_vif *vif, u8 link_id);
+
+struct iwl_mvm_link_sel_data {
+ u8 link_id;
+ const struct cfg80211_chan_def *chandef;
+ s32 signal;
+ u16 grade;
+};
+
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+unsigned int iwl_mvm_get_link_grade(struct ieee80211_bss_conf *link_conf);
+bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif,
+ const struct iwl_mvm_link_sel_data *a,
+ const struct iwl_mvm_link_sel_data *b);
+
+s8 iwl_mvm_average_dbm_values(const struct iwl_umac_scan_channel_survey_notif *notif);
+#endif
+
/* AP and IBSS */
bool iwl_mvm_start_ap_ibss_common(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int *ret);
@@ -2001,9 +2145,13 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_scan_ies *ies);
size_t iwl_mvm_scan_size(struct iwl_mvm *mvm);
int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
+
int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
void iwl_mvm_scan_timeout_wk(struct work_struct *work);
+int iwl_mvm_int_mlo_scan(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_rx_channel_survey_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
/* Scheduled scan */
void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
@@ -2109,7 +2257,8 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool disable_offloading,
bool offload_ns,
- u32 cmd_flags);
+ u32 cmd_flags,
+ u8 sta_id);
/* BT Coex */
int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm);
@@ -2129,12 +2278,6 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants);
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac);
-bool iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- int link_id, int primary_link);
-void iwl_mvm_bt_coex_update_vif_esr(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- int link_id);
/* beacon filtering */
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -2446,6 +2589,21 @@ static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band)
}
}
+static inline u8 iwl_mvm_nl80211_band_from_phy(u8 phy_band)
+{
+ switch (phy_band) {
+ case PHY_BAND_24:
+ return NL80211_BAND_2GHZ;
+ case PHY_BAND_5:
+ return NL80211_BAND_5GHZ;
+ case PHY_BAND_6:
+ return NL80211_BAND_6GHZ;
+ default:
+ WARN_ONCE(1, "Unsupported phy band (%u)\n", phy_band);
+ return NL80211_BAND_5GHZ;
+ }
+}
+
/* Channel Switch */
void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk);
int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
@@ -2707,7 +2865,7 @@ void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw);
void iwl_mvm_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw);
-int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
+int iwl_mvm_pre_channel_switch(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw);
void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
@@ -2749,12 +2907,6 @@ int iwl_mvm_set_hw_timestamp(struct ieee80211_hw *hw,
int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
bool iwl_mvm_enable_fils(struct iwl_mvm *mvm,
struct ieee80211_chanctx_conf *ctx);
-void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- bool valid_links_changed);
-int iwl_mvm_mld_get_primary_link(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- unsigned long usable_links);
-
bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm,
struct ieee80211_chanctx_conf *ctx);
@@ -2775,4 +2927,30 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
struct ieee80211_channel *channel,
struct ieee80211_vif *vif,
int duration, u32 activity);
+
+/* EMLSR */
+bool iwl_mvm_vif_has_esr_cap(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_block_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason,
+ u8 link_to_keep);
+int iwl_mvm_block_esr_sync(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason);
+void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason);
+void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason,
+ u8 link_to_keep);
+s8 iwl_mvm_get_esr_rssi_thresh(struct iwl_mvm *mvm,
+ const struct cfg80211_chan_def *chandef,
+ bool low);
+void iwl_mvm_bt_coex_update_link_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id);
+bool
+iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ s32 link_rssi,
+ bool primary);
+int iwl_mvm_esr_non_bss_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ unsigned int link_id, bool active);
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
index dfb16ca5b438..1eb21fe861e5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2021-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2021-2022, 2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 Intel Deutschland GmbH
*/
@@ -30,7 +30,8 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool disable_offloading,
bool offload_ns,
- u32 cmd_flags)
+ u32 cmd_flags,
+ u8 sta_id)
{
union {
struct iwl_proto_offload_cmd_v1 v1;
@@ -205,6 +206,9 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
if (!disable_offloading)
common->enabled = cpu_to_le32(enabled);
+ if (ver >= 4)
+ cmd.v4.sta_id = cpu_to_le32(sta_id);
+
hcmd.len[0] = size;
return iwl_mvm_send_cmd(mvm, &hcmd);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index a93981cb9714..53283d052e18 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -145,6 +145,24 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
}
+static void iwl_mvm_rx_esr_mode_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm_esr_mode_notif *notif = (void *)pkt->data;
+ struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm);
+
+ /* FW recommendations is only for entering EMLSR */
+ if (!vif || iwl_mvm_vif_from_mac80211(vif)->esr_active)
+ return;
+
+ if (le32_to_cpu(notif->action) == ESR_RECOMMEND_ENTER)
+ iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW);
+ else
+ iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW,
+ iwl_mvm_get_primary_link(vif));
+}
+
static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
@@ -365,13 +383,15 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_rx_scan_match_found,
RX_HANDLER_SYNC),
RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
- RX_HANDLER_ASYNC_LOCKED, struct iwl_umac_scan_complete),
+ RX_HANDLER_ASYNC_LOCKED,
+ struct iwl_umac_scan_complete),
RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC,
struct iwl_umac_scan_iter_complete_notif),
RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
- RX_HANDLER_SYNC, struct iwl_missed_beacons_notif),
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+ struct iwl_missed_beacons_notif),
RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC,
struct iwl_error_resp),
@@ -423,6 +443,12 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_channel_switch_error_notif,
RX_HANDLER_ASYNC_UNLOCKED,
struct iwl_channel_switch_error_notif),
+
+ RX_HANDLER_GRP(DATA_PATH_GROUP, ESR_MODE_NOTIF,
+ iwl_mvm_rx_esr_mode_notif,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+ struct iwl_mvm_esr_mode_notif),
+
RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
struct iwl_datapath_monitor_notif),
@@ -447,6 +473,9 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_GRP(MAC_CONF_GROUP, ROC_NOTIF,
iwl_mvm_rx_roc_notif, RX_HANDLER_SYNC,
struct iwl_roc_notif),
+ RX_HANDLER_GRP(SCAN_GROUP, CHANNEL_SURVEY_NOTIF,
+ iwl_mvm_rx_channel_survey_notif, RX_HANDLER_ASYNC_LOCKED,
+ struct iwl_umac_scan_channel_survey_notif),
};
#undef RX_HANDLER
#undef RX_HANDLER_GRP
@@ -586,6 +615,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
HCMD_NAME(CTDP_CONFIG_CMD),
HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
HCMD_NAME(PER_CHAIN_LIMIT_OFFSET_CMD),
+ HCMD_NAME(AP_TX_POWER_CONSTRAINTS_CMD),
HCMD_NAME(CT_KILL_NOTIFICATION),
HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
};
@@ -604,6 +634,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
HCMD_NAME(SEC_KEY_CMD),
+ HCMD_NAME(ESR_MODE_NOTIF),
HCMD_NAME(MONITOR_NOTIF),
HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST),
HCMD_NAME(STA_PM_NOTIF),
@@ -623,6 +654,7 @@ static const struct iwl_hcmd_names iwl_mvm_statistics_names[] = {
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mvm_scan_names[] = {
+ HCMD_NAME(CHANNEL_SURVEY_NOTIF),
HCMD_NAME(OFFLOAD_MATCH_INFO_NOTIF),
};
@@ -1143,6 +1175,27 @@ static const struct iwl_mei_ops mei_ops = {
.nic_stolen = iwl_mvm_mei_nic_stolen,
};
+static void iwl_mvm_find_link_selection_vif(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (ieee80211_vif_is_mld(vif) && mvmvif->authorized)
+ iwl_mvm_select_links(mvmvif->mvm, vif);
+}
+
+static void iwl_mvm_trig_link_selection(struct wiphy *wiphy,
+ struct wiphy_work *wk)
+{
+ struct iwl_mvm *mvm =
+ container_of(wk, struct iwl_mvm, trig_link_selection_wk);
+
+ ieee80211_iterate_active_interfaces(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_find_link_selection_vif,
+ NULL);
+}
+
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
@@ -1274,6 +1327,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
wiphy_work_init(&mvm->async_handlers_wiphy_wk,
iwl_mvm_async_handlers_wiphy_wk);
+
+ wiphy_work_init(&mvm->trig_link_selection_wk,
+ iwl_mvm_trig_link_selection);
+
init_waitqueue_head(&mvm->rx_sync_waitq);
mvm->queue_sync_state = 0;
@@ -1528,6 +1585,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
kfree(mvm->temp_nvm_data);
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
+ kfree(mvm->acs_survey);
cancel_delayed_work_sync(&mvm->tcm.work);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index 41e68aa6bec8..568f53c56199 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -79,7 +79,7 @@ void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
cmd->bf_roaming_state =
cpu_to_le32(-vif->bss_conf.cqm_rssi_thold);
}
- cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled);
+ cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->ba_enabled);
}
static void iwl_mvm_power_log(struct iwl_mvm *mvm,
@@ -826,7 +826,7 @@ static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd);
if (!ret)
- mvmvif->bf_data.bf_enabled = true;
+ mvmvif->bf_enabled = true;
return ret;
}
@@ -855,7 +855,7 @@ static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
if (!ret)
- mvmvif->bf_data.bf_enabled = false;
+ mvmvif->bf_enabled = false;
return ret;
}
@@ -903,16 +903,16 @@ static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm,
.bf_enable_beacon_filter = cpu_to_le32(1),
};
- if (!mvmvif->bf_data.bf_enabled)
+ if (!mvmvif->bf_enabled)
return 0;
if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))
cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
- mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled ||
- mvm->ps_disabled ||
- !vif->cfg.ps ||
- iwl_mvm_vif_low_latency(mvmvif));
+ mvmvif->ba_enabled = !(!mvmvif->pm_enabled ||
+ mvm->ps_disabled ||
+ !vif->cfg.ps ||
+ iwl_mvm_vif_low_latency(mvmvif));
return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
index 2ecd32bed752..045c862a8fc4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
@@ -132,14 +132,18 @@ struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm)
if (ret)
return ERR_PTR(ret);
- if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != resp_size))
+ if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) !=
+ resp_size)) {
+ iwl_free_resp(&cmd);
return ERR_PTR(-EIO);
+ }
resp = kmemdup(cmd.resp_pkt->data, resp_size, GFP_KERNEL);
+ iwl_free_resp(&cmd);
+
if (!resp)
return ERR_PTR(-ENOMEM);
- iwl_free_resp(&cmd);
return resp;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 00860feefa7a..3ba62fb2c85e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -654,10 +654,7 @@ void iwl_mvm_rs_fw_rate_init(struct iwl_mvm *mvm,
*/
sta->deflink.agg.max_amsdu_len = max_amsdu_len;
- cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
- WIDE_ID(DATA_PATH_GROUP,
- TLC_MNG_CONFIG_CMD),
- 0);
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0);
IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n",
cfg_cmd.sta_id, cfg_cmd.max_ch_width, cfg_cmd.mode);
IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, chains=0x%X, ch_wid_supp=%d, flags=0x%X\n",
@@ -693,9 +690,7 @@ void iwl_mvm_rs_fw_rate_init(struct iwl_mvm *mvm,
u16 cmd_size = sizeof(cfg_cmd_v3);
/* In old versions of the API the struct is 4 bytes smaller */
- if (iwl_fw_lookup_cmd_ver(mvm->fw,
- WIDE_ID(DATA_PATH_GROUP,
- TLC_MNG_CONFIG_CMD), 0) < 3)
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0) < 3)
cmd_size -= 4;
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, cmd_size,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index b1add7942c5b..4fa8066a89b6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -556,36 +556,40 @@ struct iwl_mvm_stat_data_all_macs {
struct iwl_stats_ntfy_per_mac *per_mac;
};
-static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig)
+static void iwl_mvm_update_link_sig(struct ieee80211_vif *vif, int sig,
+ struct iwl_mvm_vif_link_info *link_info)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- int thold = vif->bss_conf.cqm_rssi_thold;
- int hyst = vif->bss_conf.cqm_rssi_hyst;
+ struct iwl_mvm *mvm = iwl_mvm_vif_from_mac80211(vif)->mvm;
+ struct ieee80211_bss_conf *bss_conf =
+ iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, link_info->fw_link_id,
+ false);
+ int thold = bss_conf->cqm_rssi_thold;
+ int hyst = bss_conf->cqm_rssi_hyst;
int last_event;
+ s8 exit_esr_thresh;
if (sig == 0) {
IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n");
return;
}
- mvmvif->bf_data.ave_beacon_signal = sig;
+ link_info->bf_data.ave_beacon_signal = sig;
/* BT Coex */
- if (mvmvif->bf_data.bt_coex_min_thold !=
- mvmvif->bf_data.bt_coex_max_thold) {
- last_event = mvmvif->bf_data.last_bt_coex_event;
- if (sig > mvmvif->bf_data.bt_coex_max_thold &&
- (last_event <= mvmvif->bf_data.bt_coex_min_thold ||
+ if (link_info->bf_data.bt_coex_min_thold !=
+ link_info->bf_data.bt_coex_max_thold) {
+ last_event = link_info->bf_data.last_bt_coex_event;
+ if (sig > link_info->bf_data.bt_coex_max_thold &&
+ (last_event <= link_info->bf_data.bt_coex_min_thold ||
last_event == 0)) {
- mvmvif->bf_data.last_bt_coex_event = sig;
+ link_info->bf_data.last_bt_coex_event = sig;
IWL_DEBUG_RX(mvm, "cqm_iterator bt coex high %d\n",
sig);
iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_HIGH);
- } else if (sig < mvmvif->bf_data.bt_coex_min_thold &&
- (last_event >= mvmvif->bf_data.bt_coex_max_thold ||
+ } else if (sig < link_info->bf_data.bt_coex_min_thold &&
+ (last_event >= link_info->bf_data.bt_coex_max_thold ||
last_event == 0)) {
- mvmvif->bf_data.last_bt_coex_event = sig;
+ link_info->bf_data.last_bt_coex_event = sig;
IWL_DEBUG_RX(mvm, "cqm_iterator bt coex low %d\n",
sig);
iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_LOW);
@@ -596,10 +600,10 @@ static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig)
return;
/* CQM Notification */
- last_event = mvmvif->bf_data.last_cqm_event;
+ last_event = link_info->bf_data.last_cqm_event;
if (thold && sig < thold && (last_event == 0 ||
sig < last_event - hyst)) {
- mvmvif->bf_data.last_cqm_event = sig;
+ link_info->bf_data.last_cqm_event = sig;
IWL_DEBUG_RX(mvm, "cqm_iterator cqm low %d\n",
sig);
ieee80211_cqm_rssi_notify(
@@ -609,7 +613,7 @@ static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig)
GFP_KERNEL);
} else if (sig > thold &&
(last_event == 0 || sig > last_event + hyst)) {
- mvmvif->bf_data.last_cqm_event = sig;
+ link_info->bf_data.last_cqm_event = sig;
IWL_DEBUG_RX(mvm, "cqm_iterator cqm high %d\n",
sig);
ieee80211_cqm_rssi_notify(
@@ -618,6 +622,20 @@ static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig)
sig,
GFP_KERNEL);
}
+
+ /* ESR recalculation */
+ if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
+ return;
+
+ exit_esr_thresh =
+ iwl_mvm_get_esr_rssi_thresh(mvm,
+ &bss_conf->chanreq.oper,
+ true);
+
+ if (sig < exit_esr_thresh)
+ iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_LOW_RSSI,
+ iwl_mvm_get_other_link(vif,
+ bss_conf->link_id));
}
static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
@@ -651,7 +669,8 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
mvmvif->deflink.beacon_stats.accu_num_beacons +=
mvmvif->deflink.beacon_stats.num_beacons;
- iwl_mvm_update_vif_sig(vif, sig);
+ /* This is used in pre-MLO API so use deflink */
+ iwl_mvm_update_link_sig(vif, sig, &mvmvif->deflink);
}
static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac,
@@ -684,7 +703,9 @@ static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac,
mvmvif->deflink.beacon_stats.num_beacons;
sig = -le32_to_cpu(mac_stats->beacon_filter_average_energy);
- iwl_mvm_update_vif_sig(vif, sig);
+
+ /* This is used in pre-MLO API so use deflink */
+ iwl_mvm_update_link_sig(vif, sig, &mvmvif->deflink);
}
static inline void
@@ -889,8 +910,8 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
if (link_info->phy_ctxt &&
link_info->phy_ctxt->channel->band == NL80211_BAND_2GHZ)
- iwl_mvm_bt_coex_update_vif_esr(mvm, bss_conf->vif,
- link_id);
+ iwl_mvm_bt_coex_update_link_esr(mvm, bss_conf->vif,
+ link_id);
/* make sure that beacon statistics don't go backwards with TCM
* request to clear statistics
@@ -900,7 +921,7 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
mvmvif->link[link_id]->beacon_stats.num_beacons;
sig = -le32_to_cpu(link_stats->beacon_filter_average_energy);
- iwl_mvm_update_vif_sig(bss_conf->vif, sig);
+ iwl_mvm_update_link_sig(bss_conf->vif, sig, link_info);
if (WARN_ONCE(mvmvif->id >= MAC_INDEX_AUX,
"invalid mvmvif id: %d", mvmvif->id))
@@ -930,6 +951,89 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
}
}
+#define SEC_LINK_MIN_PERC 10
+#define SEC_LINK_MIN_TX 3000
+#define SEC_LINK_MIN_RX 400
+
+static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm)
+{
+ struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
+ struct iwl_mvm_vif *mvmvif;
+ struct iwl_mvm_sta *mvmsta;
+ unsigned long total_tx = 0, total_rx = 0;
+ unsigned long sec_link_tx = 0, sec_link_rx = 0;
+ u8 sec_link_tx_perc, sec_link_rx_perc;
+ u8 sec_link;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!bss_vif)
+ return;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(bss_vif);
+
+ if (!mvmvif->esr_active || !mvmvif->ap_sta)
+ return;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(mvmvif->ap_sta);
+ /* We only count for the AP sta in a MLO connection */
+ if (!mvmsta->mpdu_counters)
+ return;
+
+ /* Get the FW ID of the secondary link */
+ sec_link = iwl_mvm_get_other_link(bss_vif,
+ iwl_mvm_get_primary_link(bss_vif));
+ if (WARN_ON(!mvmvif->link[sec_link]))
+ return;
+ sec_link = mvmvif->link[sec_link]->fw_link_id;
+
+ /* Sum up RX and TX MPDUs from the different queues/links */
+ for (int q = 0; q < mvm->trans->num_rx_queues; q++) {
+ spin_lock_bh(&mvmsta->mpdu_counters[q].lock);
+
+ /* The link IDs that doesn't exist will contain 0 */
+ for (int link = 0; link < IWL_MVM_FW_MAX_LINK_ID; link++) {
+ total_tx += mvmsta->mpdu_counters[q].per_link[link].tx;
+ total_rx += mvmsta->mpdu_counters[q].per_link[link].rx;
+ }
+
+ sec_link_tx += mvmsta->mpdu_counters[q].per_link[sec_link].tx;
+ sec_link_rx += mvmsta->mpdu_counters[q].per_link[sec_link].rx;
+
+ /*
+ * In EMLSR we have statistics every 5 seconds, so we can reset
+ * the counters upon every statistics notification.
+ */
+ memset(mvmsta->mpdu_counters[q].per_link, 0,
+ sizeof(mvmsta->mpdu_counters[q].per_link));
+
+ spin_unlock_bh(&mvmsta->mpdu_counters[q].lock);
+ }
+
+ /* If we don't have enough MPDUs - exit EMLSR */
+ if (total_tx < IWL_MVM_ENTER_ESR_TPT_THRESH &&
+ total_rx < IWL_MVM_ENTER_ESR_TPT_THRESH) {
+ iwl_mvm_block_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_TPT,
+ iwl_mvm_get_primary_link(bss_vif));
+ return;
+ }
+
+ /* Calculate the percentage of the secondary link TX/RX */
+ sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0;
+ sec_link_rx_perc = total_rx ? sec_link_rx * 100 / total_rx : 0;
+
+ /*
+ * The TX/RX percentage is checked only if it exceeds the required
+ * minimum. In addition, RX is checked only if the TX check failed.
+ */
+ if ((total_tx > SEC_LINK_MIN_TX &&
+ sec_link_tx_perc < SEC_LINK_MIN_PERC) ||
+ (total_rx > SEC_LINK_MIN_RX &&
+ sec_link_rx_perc < SEC_LINK_MIN_PERC))
+ iwl_mvm_exit_esr(mvm, bss_vif, IWL_MVM_ESR_EXIT_LINK_USAGE,
+ iwl_mvm_get_primary_link(bss_vif));
+}
+
void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
@@ -957,6 +1061,8 @@ void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm,
ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter,
average_energy);
iwl_mvm_handle_per_phy_stats(mvm, stats->per_phy);
+
+ iwl_mvm_update_esr_mode_tpt(mvm);
}
void iwl_mvm_handle_rx_system_oper_part1_stats(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 1484eaedf452..d78af2928152 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -236,21 +236,13 @@ static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm,
static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
struct napi_struct *napi,
struct sk_buff *skb, int queue,
- struct ieee80211_sta *sta,
- struct ieee80211_link_sta *link_sta)
+ struct ieee80211_sta *sta)
{
if (unlikely(iwl_mvm_check_pn(mvm, skb, queue, sta))) {
kfree_skb(skb);
return;
}
- if (sta && sta->valid_links && link_sta) {
- struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
-
- rx_status->link_valid = 1;
- rx_status->link_id = link_sta->link_id;
- }
-
ieee80211_rx_napi(mvm->hw, sta, skb, napi);
}
@@ -588,7 +580,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
while ((skb = __skb_dequeue(skb_list))) {
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
reorder_buf->queue,
- sta, NULL /* FIXME */);
+ sta);
reorder_buf->num_stored--;
}
}
@@ -1898,21 +1890,6 @@ static void iwl_mvm_decode_lsig(struct sk_buff *skb,
}
}
-static inline u8 iwl_mvm_nl80211_band_from_rx_msdu(u8 phy_band)
-{
- switch (phy_band) {
- case PHY_BAND_24:
- return NL80211_BAND_2GHZ;
- case PHY_BAND_5:
- return NL80211_BAND_5GHZ;
- case PHY_BAND_6:
- return NL80211_BAND_6GHZ;
- default:
- WARN_ONCE(1, "Unsupported phy band (%u)\n", phy_band);
- return NL80211_BAND_5GHZ;
- }
-}
-
struct iwl_rx_sta_csa {
bool all_sta_unblocked;
struct ieee80211_vif *vif;
@@ -2058,6 +2035,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct ieee80211_link_sta *link_sta = NULL;
struct sk_buff *skb;
u8 crypt_len = 0;
+ u8 sta_id = le32_get_bits(desc->status, IWL_RX_MPDU_STATUS_STA_ID);
size_t desc_size;
struct iwl_mvm_rx_phy_data phy_data = {};
u32 format;
@@ -2176,7 +2154,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (iwl_mvm_is_band_in_rx_supported(mvm)) {
u8 band = BAND_IN_RX_STATUS(desc->mac_phy_idx);
- rx_status->band = iwl_mvm_nl80211_band_from_rx_msdu(band);
+ rx_status->band = iwl_mvm_nl80211_band_from_phy(band);
} else {
rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ :
NL80211_BAND_2GHZ;
@@ -2206,13 +2184,16 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rcu_read_lock();
if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
- u8 id = le32_get_bits(desc->status, IWL_RX_MPDU_STATUS_STA_ID);
-
- if (!WARN_ON_ONCE(id >= mvm->fw->ucode_capa.num_stations)) {
- sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
+ if (!WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) {
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
if (IS_ERR(sta))
sta = NULL;
- link_sta = rcu_dereference(mvm->fw_id_to_link_sta[id]);
+ link_sta = rcu_dereference(mvm->fw_id_to_link_sta[sta_id]);
+
+ if (sta && sta->valid_links && link_sta) {
+ rx_status->link_valid = 1;
+ rx_status->link_id = link_sta->link_id;
+ }
}
} else if (!is_multicast_ether_addr(hdr->addr2)) {
/*
@@ -2328,6 +2309,16 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_agg_rx_received(mvm, reorder_data, baid);
}
+
+ if (ieee80211_is_data(hdr->frame_control)) {
+ u8 sub_frame_idx = desc->amsdu_info &
+ IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+
+ /* 0 means not an A-MSDU, and 1 means a new A-MSDU */
+ if (!sub_frame_idx || sub_frame_idx == 1)
+ iwl_mvm_count_mpdu(mvmsta, sta_id, 1, false,
+ queue);
+ }
}
/* management stuff on default queue */
@@ -2356,8 +2347,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
!(desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME))
rx_status->flag |= RX_FLAG_AMSDU_MORE;
- iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta,
- link_sta);
+ iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
}
out:
rcu_read_unlock();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index f3e3986b4c72..a7ec172eeade 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -226,6 +226,14 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
.global_cnt = 0,
};
+ /*
+ * A scanning AP interface probably wants to generate a survey to do
+ * ACS (automatic channel selection).
+ * Force a non-fragmented scan in that case.
+ */
+ if (vif && ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP)
+ return IWL_SCAN_TYPE_WILD;
+
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_scan_iterator,
@@ -852,11 +860,13 @@ static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
* 4. it's not a p2p find operation.
* 5. we are not in low latency mode,
* or if fragmented ebs is supported by the FW
+ * 6. the VIF is not an AP interface (scan wants survey results)
*/
return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
mvm->last_ebs_successful && IWL_MVM_ENABLE_EBS &&
vif->type != NL80211_IFTYPE_P2P_DEVICE &&
- (!low_latency || iwl_mvm_is_frag_ebs_supported(mvm)));
+ (!low_latency || iwl_mvm_is_frag_ebs_supported(mvm)) &&
+ ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_AP);
}
static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params)
@@ -1377,11 +1387,14 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
}
-static u32 iwl_mvm_scan_umac_ooc_priority(struct iwl_mvm_scan_params *params)
+static u32 iwl_mvm_scan_umac_ooc_priority(int type)
{
- return iwl_mvm_is_regular_scan(params) ?
- IWL_SCAN_PRIORITY_EXT_6 :
- IWL_SCAN_PRIORITY_EXT_2;
+ if (type == IWL_MVM_SCAN_REGULAR)
+ return IWL_SCAN_PRIORITY_EXT_6;
+ if (type == IWL_MVM_SCAN_INT_MLO)
+ return IWL_SCAN_PRIORITY_EXT_4;
+
+ return IWL_SCAN_PRIORITY_EXT_2;
}
static void
@@ -1747,8 +1760,9 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
&cp->channel_config[ch_cnt];
u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0;
- u8 j, k, s_max = 0, b_max = 0, n_used_bssid_entries;
- bool force_passive, found = false, allow_passive = true,
+ u8 j, k, n_s_ssids = 0, n_bssids = 0;
+ u8 max_s_ssids, max_bssids;
+ bool force_passive = false, found = false, allow_passive = true,
unsolicited_probe_on_chan = false, psc_no_listen = false;
s8 psd_20 = IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED;
@@ -1771,20 +1785,15 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
cfg->v5.iter_count = 1;
cfg->v5.iter_interval = 0;
- /*
- * The optimize the scan time, i.e., reduce the scan dwell time
- * on each channel, the below logic tries to set 3 direct BSSID
- * probe requests for each broadcast probe request with a short
- * SSID.
- * TODO: improve this logic
- */
- n_used_bssid_entries = 3;
for (j = 0; j < params->n_6ghz_params; j++) {
s8 tmp_psd_20;
if (!(scan_6ghz_params[j].channel_idx == i))
continue;
+ unsolicited_probe_on_chan |=
+ scan_6ghz_params[j].unsolicited_probe;
+
/* Use the highest PSD value allowed as advertised by
* APs for this channel
*/
@@ -1796,12 +1805,69 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
psd_20 < tmp_psd_20))
psd_20 = tmp_psd_20;
- found = false;
- unsolicited_probe_on_chan |=
- scan_6ghz_params[j].unsolicited_probe;
psc_no_listen |= scan_6ghz_params[j].psc_no_listen;
+ }
- for (k = 0; k < pp->short_ssid_num; k++) {
+ /*
+ * In the following cases apply passive scan:
+ * 1. Non fragmented scan:
+ * - PSC channel with NO_LISTEN_FLAG on should be treated
+ * like non PSC channel
+ * - Non PSC channel with more than 3 short SSIDs or more
+ * than 9 BSSIDs.
+ * - Non PSC Channel with unsolicited probe response and
+ * more than 2 short SSIDs or more than 6 BSSIDs.
+ * - PSC channel with more than 2 short SSIDs or more than
+ * 6 BSSIDs.
+ * 3. Fragmented scan:
+ * - PSC channel with more than 1 SSID or 3 BSSIDs.
+ * - Non PSC channel with more than 2 SSIDs or 6 BSSIDs.
+ * - Non PSC channel with unsolicited probe response and
+ * more than 1 SSID or more than 3 BSSIDs.
+ */
+ if (!iwl_mvm_is_scan_fragmented(params->type)) {
+ if (!cfg80211_channel_is_psc(params->channels[i]) ||
+ flags & IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN) {
+ if (unsolicited_probe_on_chan) {
+ max_s_ssids = 2;
+ max_bssids = 6;
+ } else {
+ max_s_ssids = 3;
+ max_bssids = 9;
+ }
+ } else {
+ max_s_ssids = 2;
+ max_bssids = 6;
+ }
+ } else if (cfg80211_channel_is_psc(params->channels[i])) {
+ max_s_ssids = 1;
+ max_bssids = 3;
+ } else {
+ if (unsolicited_probe_on_chan) {
+ max_s_ssids = 1;
+ max_bssids = 3;
+ } else {
+ max_s_ssids = 2;
+ max_bssids = 6;
+ }
+ }
+
+ /*
+ * The optimize the scan time, i.e., reduce the scan dwell time
+ * on each channel, the below logic tries to set 3 direct BSSID
+ * probe requests for each broadcast probe request with a short
+ * SSID.
+ * TODO: improve this logic
+ */
+ for (j = 0; j < params->n_6ghz_params; j++) {
+ if (!(scan_6ghz_params[j].channel_idx == i))
+ continue;
+
+ found = false;
+
+ for (k = 0;
+ k < pp->short_ssid_num && n_s_ssids < max_s_ssids;
+ k++) {
if (!scan_6ghz_params[j].unsolicited_probe &&
le32_to_cpu(pp->short_ssid[k]) ==
scan_6ghz_params[j].short_ssid) {
@@ -1812,25 +1878,25 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
}
/*
- * Use short SSID only to create a new
- * iteration during channel dwell or in
- * case that the short SSID has a
- * matching SSID, i.e., scan for hidden
- * APs.
+ * Prefer creating BSSID entries unless
+ * the short SSID probe can be done in
+ * the same channel dwell iteration.
+ *
+ * We also need to create a short SSID
+ * entry for any hidden AP.
*/
- if (n_used_bssid_entries >= 3) {
- s_ssid_bitmap |= BIT(k);
- s_max++;
- n_used_bssid_entries -= 3;
- found = true;
+ if (3 * n_s_ssids > n_bssids &&
+ !pp->direct_scan[k].len)
break;
- } else if (pp->direct_scan[k].len) {
- s_ssid_bitmap |= BIT(k);
- s_max++;
- found = true;
+
+ /* Hidden AP, cannot do passive scan */
+ if (pp->direct_scan[k].len)
allow_passive = false;
- break;
- }
+
+ s_ssid_bitmap |= BIT(k);
+ n_s_ssids++;
+ found = true;
+ break;
}
}
@@ -1842,9 +1908,12 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
scan_6ghz_params[j].bssid,
ETH_ALEN)) {
if (!(bssid_bitmap & BIT(k))) {
- bssid_bitmap |= BIT(k);
- b_max++;
- n_used_bssid_entries++;
+ if (n_bssids < max_bssids) {
+ bssid_bitmap |= BIT(k);
+ n_bssids++;
+ } else {
+ force_passive = TRUE;
+ }
}
break;
}
@@ -1858,39 +1927,6 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
if (unsolicited_probe_on_chan)
flags |= IWL_UHB_CHAN_CFG_FLAG_UNSOLICITED_PROBE_RES;
- /*
- * In the following cases apply passive scan:
- * 1. Non fragmented scan:
- * - PSC channel with NO_LISTEN_FLAG on should be treated
- * like non PSC channel
- * - Non PSC channel with more than 3 short SSIDs or more
- * than 9 BSSIDs.
- * - Non PSC Channel with unsolicited probe response and
- * more than 2 short SSIDs or more than 6 BSSIDs.
- * - PSC channel with more than 2 short SSIDs or more than
- * 6 BSSIDs.
- * 3. Fragmented scan:
- * - PSC channel with more than 1 SSID or 3 BSSIDs.
- * - Non PSC channel with more than 2 SSIDs or 6 BSSIDs.
- * - Non PSC channel with unsolicited probe response and
- * more than 1 SSID or more than 3 BSSIDs.
- */
- if (!iwl_mvm_is_scan_fragmented(params->type)) {
- if (!cfg80211_channel_is_psc(params->channels[i]) ||
- flags & IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN) {
- force_passive = (s_max > 3 || b_max > 9);
- force_passive |= (unsolicited_probe_on_chan &&
- (s_max > 2 || b_max > 6));
- } else {
- force_passive = (s_max > 2 || b_max > 6);
- }
- } else if (cfg80211_channel_is_psc(params->channels[i])) {
- force_passive = (s_max > 1 || b_max > 3);
- } else {
- force_passive = (s_max > 2 || b_max > 6);
- force_passive |= (unsolicited_probe_on_chan &&
- (s_max > 1 || b_max > 3));
- }
if ((allow_passive && force_passive) ||
(!(bssid_bitmap | s_ssid_bitmap) &&
!cfg80211_channel_is_psc(params->channels[i])))
@@ -2098,7 +2134,8 @@ static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm,
static u8 iwl_mvm_scan_umac_flags2(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
- struct ieee80211_vif *vif, int type)
+ struct ieee80211_vif *vif, int type,
+ u16 gen_flags)
{
u8 flags = 0;
@@ -2118,6 +2155,13 @@ static u8 iwl_mvm_scan_umac_flags2(struct iwl_mvm *mvm,
IWL_UCODE_TLV_CAPA_SCAN_DONT_TOGGLE_ANT))
flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_DONT_TOGGLE_ANT;
+ /* Passive and AP interface -> ACS (automatic channel selection) */
+ if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE &&
+ ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP &&
+ iwl_fw_lookup_notif_ver(mvm->fw, SCAN_GROUP, CHANNEL_SURVEY_NOTIF,
+ 0) >= 1)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS2_COLLECT_CHANNEL_STATS;
+
return flags;
}
@@ -2255,8 +2299,6 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_scan_umac_dwell(mvm, cmd, params);
- mvm->scan_uid_status[uid] = type;
-
cmd->uid = cpu_to_le32(uid);
gen_flags = iwl_mvm_scan_umac_flags(mvm, params, vif);
cmd->general_flags = cpu_to_le16(gen_flags);
@@ -2297,10 +2339,8 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ret = iwl_mvm_fill_scan_sched_params(params, tail_v2->schedule,
&tail_v2->delay);
- if (ret) {
- mvm->scan_uid_status[uid] = 0;
+ if (ret)
return ret;
- }
if (iwl_mvm_is_scan_ext_chan_supported(mvm)) {
tail_v2->preq = params->preq;
@@ -2450,9 +2490,7 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int ret;
u16 gen_flags;
- mvm->scan_uid_status[uid] = type;
-
- cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
+ cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(type));
cmd->uid = cpu_to_le32(uid);
gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
@@ -2487,15 +2525,14 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
u8 gen_flags2;
u32 bitmap_ssid = 0;
- mvm->scan_uid_status[uid] = type;
-
- cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
+ cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(type));
cmd->uid = cpu_to_le32(uid);
gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
if (version >= 15)
- gen_flags2 = iwl_mvm_scan_umac_flags2(mvm, params, vif, type);
+ gen_flags2 = iwl_mvm_scan_umac_flags2(mvm, params, vif, type,
+ gen_flags);
else
gen_flags2 = 0;
@@ -2532,10 +2569,8 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
params->n_channels,
pb, cp, vif->type,
version);
- if (!cp->count) {
- mvm->scan_uid_status[uid] = 0;
+ if (!cp->count)
return -EINVAL;
- }
if (!params->n_ssids ||
(params->n_ssids == 1 && !params->ssids[0].ssid_len))
@@ -2813,7 +2848,8 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
if (ver_handler->version != scan_ver)
continue;
- return ver_handler->handler(mvm, vif, params, type, uid);
+ err = ver_handler->handler(mvm, vif, params, type, uid);
+ return err ? : uid;
}
err = iwl_mvm_scan_umac(mvm, vif, params, type, uid);
@@ -2914,9 +2950,11 @@ static void iwl_mvm_fill_respect_p2p_go(struct iwl_mvm *mvm,
}
}
-int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct cfg80211_scan_request *req,
- struct ieee80211_scan_ies *ies)
+static int _iwl_mvm_single_scan_start(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req,
+ struct ieee80211_scan_ies *ies,
+ int type)
{
struct iwl_host_cmd hcmd = {
.len = { iwl_mvm_scan_size(mvm), },
@@ -2934,7 +2972,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EBUSY;
}
- ret = iwl_mvm_check_running_scans(mvm, IWL_MVM_SCAN_REGULAR);
+ ret = iwl_mvm_check_running_scans(mvm, type);
if (ret)
return ret;
@@ -2983,8 +3021,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_scan_6ghz_passive_scan(mvm, &params, vif);
- uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, &params,
- IWL_MVM_SCAN_REGULAR);
+ uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, &params, type);
if (uid < 0)
return uid;
@@ -2999,23 +3036,35 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
*/
IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
iwl_mvm_resume_tcm(mvm);
- mvm->scan_uid_status[uid] = 0;
return ret;
}
- IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
- mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
- mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif);
+ IWL_DEBUG_SCAN(mvm, "Scan request send success: type=%u, uid=%u\n",
+ type, uid);
+
+ mvm->scan_uid_status[uid] = type;
+ mvm->scan_status |= type;
+
+ if (type == IWL_MVM_SCAN_REGULAR) {
+ mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif);
+ schedule_delayed_work(&mvm->scan_timeout_dwork,
+ msecs_to_jiffies(SCAN_TIMEOUT));
+ }
if (params.enable_6ghz_passive)
mvm->last_6ghz_passive_scan_jiffies = jiffies;
- schedule_delayed_work(&mvm->scan_timeout_dwork,
- msecs_to_jiffies(SCAN_TIMEOUT));
-
return 0;
}
+int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req,
+ struct ieee80211_scan_ies *ies)
+{
+ return _iwl_mvm_single_scan_start(mvm, vif, req, ies,
+ IWL_MVM_SCAN_REGULAR);
+}
+
int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req,
@@ -3132,7 +3181,9 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (!ret) {
IWL_DEBUG_SCAN(mvm,
- "Sched scan request was sent successfully\n");
+ "Sched scan request send success: type=%u, uid=%u\n",
+ type, uid);
+ mvm->scan_uid_status[uid] = type;
mvm->scan_status |= type;
} else {
/* If the scan failed, it usually means that the FW was unable
@@ -3140,7 +3191,6 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
* should try to send the command again with different params.
*/
IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
- mvm->scan_uid_status[uid] = 0;
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
}
@@ -3154,9 +3204,25 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
struct iwl_umac_scan_complete *notif = (void *)pkt->data;
u32 uid = __le32_to_cpu(notif->uid);
bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
+ bool select_links = false;
mvm->mei_scan_filter.is_mei_limited_scan = false;
+ IWL_DEBUG_SCAN(mvm,
+ "Scan completed: uid=%u type=%u, status=%s, EBS=%s\n",
+ uid, mvm->scan_uid_status[uid],
+ notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
+ "completed" : "aborted",
+ iwl_mvm_ebs_status_str(notif->ebs_status));
+
+ IWL_DEBUG_SCAN(mvm, "Scan completed: scan_status=0x%x\n",
+ mvm->scan_status);
+
+ IWL_DEBUG_SCAN(mvm,
+ "Scan completed: line=%u, iter=%u, elapsed time=%u\n",
+ notif->last_schedule, notif->last_iter,
+ __le32_to_cpu(notif->time_from_last_iter));
+
if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
return;
@@ -3170,8 +3236,13 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
struct iwl_mvm_vif_link_info *link_info =
scan_vif->link[mvm->scan_link_id];
- if (!WARN_ON(!link_info))
+ /* It is possible that by the time the scan is complete the link
+ * was already removed and is not valid.
+ */
+ if (link_info)
memcpy(info.tsf_bssid, link_info->bssid, ETH_ALEN);
+ else
+ IWL_DEBUG_SCAN(mvm, "Scan link is no longer valid\n");
ieee80211_scan_completed(mvm->hw, &info);
mvm->scan_vif = NULL;
@@ -3180,25 +3251,28 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
+ } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_INT_MLO) {
+ IWL_DEBUG_SCAN(mvm, "Internal MLO scan completed\n");
+ /*
+ * Other scan types won't necessarily scan for the MLD links channels.
+ * Therefore, only select links after successful internal scan.
+ */
+ select_links = notif->status == IWL_SCAN_OFFLOAD_COMPLETED;
}
mvm->scan_status &= ~mvm->scan_uid_status[uid];
- IWL_DEBUG_SCAN(mvm,
- "Scan completed, uid %u type %u, status %s, EBS status %s\n",
- uid, mvm->scan_uid_status[uid],
- notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
- "completed" : "aborted",
- iwl_mvm_ebs_status_str(notif->ebs_status));
- IWL_DEBUG_SCAN(mvm,
- "Last line %d, Last iteration %d, Time from last iteration %d\n",
- notif->last_schedule, notif->last_iter,
- __le32_to_cpu(notif->time_from_last_iter));
+
+ IWL_DEBUG_SCAN(mvm, "Scan completed: after update: scan_status=0x%x\n",
+ mvm->scan_status);
if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
mvm->last_ebs_successful = false;
mvm->scan_uid_status[uid] = 0;
+
+ if (select_links)
+ wiphy_work_queue(mvm->hw->wiphy, &mvm->trig_link_selection_wk);
}
void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
@@ -3366,6 +3440,12 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
mvm->scan_uid_status[uid] = 0;
}
+ uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_INT_MLO);
+ if (uid >= 0) {
+ IWL_DEBUG_SCAN(mvm, "Internal MLO scan aborted\n");
+ mvm->scan_uid_status[uid] = 0;
+ }
+
uid = iwl_mvm_scan_uid_by_status(mvm,
IWL_MVM_SCAN_STOPPING_REGULAR);
if (uid >= 0)
@@ -3376,6 +3456,11 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
if (uid >= 0)
mvm->scan_uid_status[uid] = 0;
+ uid = iwl_mvm_scan_uid_by_status(mvm,
+ IWL_MVM_SCAN_STOPPING_INT_MLO);
+ if (uid >= 0)
+ mvm->scan_uid_status[uid] = 0;
+
/* We shouldn't have any UIDs still set. Loop over all the
* UIDs to make sure there's nothing left there and warn if
* any is found.
@@ -3412,6 +3497,10 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
{
int ret;
+ IWL_DEBUG_SCAN(mvm,
+ "Request to stop scan: type=0x%x, status=0x%x\n",
+ type, mvm->scan_status);
+
if (!(mvm->scan_status & type))
return 0;
@@ -3423,6 +3512,9 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
ret = iwl_mvm_scan_stop_wait(mvm, type);
if (!ret)
mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT;
+ else
+ IWL_DEBUG_SCAN(mvm, "Failed to stop scan\n");
+
out:
/* Clear the scan status so the next scan requests will
* succeed and mark the scan as stopping, so that the Rx
@@ -3447,3 +3539,276 @@ out:
return ret;
}
+
+static int iwl_mvm_int_mlo_scan_start(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel **channels,
+ size_t n_channels)
+{
+ struct cfg80211_scan_request *req = NULL;
+ struct ieee80211_scan_ies ies = {};
+ size_t size, i;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ IWL_DEBUG_SCAN(mvm, "Starting Internal MLO scan: n_channels=%zu\n",
+ n_channels);
+
+ if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
+ return -EINVAL;
+
+ size = struct_size(req, channels, n_channels);
+ req = kzalloc(size, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ /* set the requested channels */
+ for (i = 0; i < n_channels; i++)
+ req->channels[i] = channels[i];
+
+ req->n_channels = n_channels;
+
+ /* set the rates */
+ for (i = 0; i < NUM_NL80211_BANDS; i++)
+ if (mvm->hw->wiphy->bands[i])
+ req->rates[i] =
+ (1 << mvm->hw->wiphy->bands[i]->n_bitrates) - 1;
+
+ req->wdev = ieee80211_vif_to_wdev(vif);
+ req->wiphy = mvm->hw->wiphy;
+ req->scan_start = jiffies;
+ req->tsf_report_link_id = -1;
+
+ ret = _iwl_mvm_single_scan_start(mvm, vif, req, &ies,
+ IWL_MVM_SCAN_INT_MLO);
+ kfree(req);
+
+ IWL_DEBUG_SCAN(mvm, "Internal MLO scan: ret=%d\n", ret);
+ return ret;
+}
+
+int iwl_mvm_int_mlo_scan(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct ieee80211_channel *channels[IEEE80211_MLD_MAX_NUM_LINKS];
+ unsigned long usable_links = ieee80211_vif_usable_links(vif);
+ size_t n_channels = 0;
+ u8 link_id;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvm->scan_status & IWL_MVM_SCAN_INT_MLO) {
+ IWL_DEBUG_SCAN(mvm, "Internal MLO scan is already running\n");
+ return -EBUSY;
+ }
+
+ rcu_read_lock();
+
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf =
+ rcu_dereference(vif->link_conf[link_id]);
+
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
+
+ channels[n_channels++] = link_conf->chanreq.oper.chan;
+ }
+
+ rcu_read_unlock();
+
+ if (!n_channels)
+ return -EINVAL;
+
+ return iwl_mvm_int_mlo_scan_start(mvm, vif, channels, n_channels);
+}
+
+static int iwl_mvm_chanidx_from_phy(struct iwl_mvm *mvm,
+ enum nl80211_band band,
+ u16 phy_chan_num)
+{
+ struct ieee80211_supported_band *sband = mvm->hw->wiphy->bands[band];
+ int chan_idx;
+
+ if (WARN_ON_ONCE(!sband))
+ return -EINVAL;
+
+ for (chan_idx = 0; chan_idx < sband->n_channels; chan_idx++) {
+ struct ieee80211_channel *channel = &sband->channels[chan_idx];
+
+ if (channel->hw_value == phy_chan_num)
+ return chan_idx;
+ }
+
+ return -EINVAL;
+}
+
+static u32 iwl_mvm_div_by_db(u32 value, u8 db)
+{
+ /*
+ * 2^32 * 10**(i / 10) for i = [1, 10], skipping 0 and simply stopping
+ * at 10 dB and looping instead of using a much larger table.
+ *
+ * Using 64 bit math is overkill, but means the helper does not require
+ * a limit on the input range.
+ */
+ static const u32 db_to_val[] = {
+ 0xcb59185e, 0xa1866ba8, 0x804dce7a, 0x65ea59fe, 0x50f44d89,
+ 0x404de61f, 0x331426af, 0x2892c18b, 0x203a7e5b, 0x1999999a,
+ };
+
+ while (value && db > 0) {
+ u8 change = min_t(u8, db, ARRAY_SIZE(db_to_val));
+
+ value = (((u64)value) * db_to_val[change - 1]) >> 32;
+
+ db -= change;
+ }
+
+ return value;
+}
+
+VISIBLE_IF_IWLWIFI_KUNIT s8
+iwl_mvm_average_dbm_values(const struct iwl_umac_scan_channel_survey_notif *notif)
+{
+ s8 average_magnitude;
+ u32 average_factor;
+ s8 sum_magnitude = -128;
+ u32 sum_factor = 0;
+ int i, count = 0;
+
+ /*
+ * To properly average the decibel values (signal values given in dBm)
+ * we need to do the math in linear space. Doing a linear average of
+ * dB (dBm) values is a bit annoying though due to the large range of
+ * at least -10 to -110 dBm that will not fit into a 32 bit integer.
+ *
+ * A 64 bit integer should be sufficient, but then we still have the
+ * problem that there are no directly usable utility functions
+ * available.
+ *
+ * So, lets not deal with that and instead do much of the calculation
+ * with a 16.16 fixed point integer along with a base in dBm. 16.16 bit
+ * gives us plenty of head-room for adding up a few values and even
+ * doing some math on it. And the tail should be accurate enough too
+ * (1/2^16 is somewhere around -48 dB, so effectively zero).
+ *
+ * i.e. the real value of sum is:
+ * sum = sum_factor / 2^16 * 10^(sum_magnitude / 10) mW
+ *
+ * However, that does mean we need to be able to bring two values to
+ * a common base, so we need a helper for that.
+ *
+ * Note that this function takes an input with unsigned negative dBm
+ * values but returns a signed dBm (i.e. a negative value).
+ */
+
+ for (i = 0; i < ARRAY_SIZE(notif->noise); i++) {
+ s8 val_magnitude;
+ u32 val_factor;
+
+ if (notif->noise[i] == 0xff)
+ continue;
+
+ val_factor = 0x10000;
+ val_magnitude = -notif->noise[i];
+
+ if (val_magnitude <= sum_magnitude) {
+ u8 div_db = sum_magnitude - val_magnitude;
+
+ val_factor = iwl_mvm_div_by_db(val_factor, div_db);
+ val_magnitude = sum_magnitude;
+ } else {
+ u8 div_db = val_magnitude - sum_magnitude;
+
+ sum_factor = iwl_mvm_div_by_db(sum_factor, div_db);
+ sum_magnitude = val_magnitude;
+ }
+
+ sum_factor += val_factor;
+ count++;
+ }
+
+ /* No valid noise measurement, return a very high noise level */
+ if (count == 0)
+ return 0;
+
+ average_magnitude = sum_magnitude;
+ average_factor = sum_factor / count;
+
+ /*
+ * average_factor will be a number smaller than 1.0 (0x10000) at this
+ * point. What we need to do now is to adjust average_magnitude so that
+ * average_factor is between -0.5 dB and 0.5 dB.
+ *
+ * Just do -1 dB steps and find the point where
+ * -0.5 dB * -i dB = 0x10000 * 10^(-0.5/10) / i dB
+ * = div_by_db(0xe429, i)
+ * is smaller than average_factor.
+ */
+ for (i = 0; average_factor < iwl_mvm_div_by_db(0xe429, i); i++) {
+ /* nothing */
+ }
+
+ return average_magnitude - i;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_average_dbm_values);
+
+void iwl_mvm_rx_channel_survey_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ const struct iwl_umac_scan_channel_survey_notif *notif =
+ (void *)pkt->data;
+ struct iwl_mvm_acs_survey_channel *info;
+ enum nl80211_band band;
+ int chan_idx;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!mvm->acs_survey) {
+ size_t n_channels = 0;
+
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!mvm->hw->wiphy->bands[band])
+ continue;
+
+ n_channels += mvm->hw->wiphy->bands[band]->n_channels;
+ }
+
+ mvm->acs_survey = kzalloc(struct_size(mvm->acs_survey,
+ channels, n_channels),
+ GFP_KERNEL);
+
+ if (!mvm->acs_survey)
+ return;
+
+ mvm->acs_survey->n_channels = n_channels;
+ n_channels = 0;
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!mvm->hw->wiphy->bands[band])
+ continue;
+
+ mvm->acs_survey->bands[band] =
+ &mvm->acs_survey->channels[n_channels];
+ n_channels += mvm->hw->wiphy->bands[band]->n_channels;
+ }
+ }
+
+ band = iwl_mvm_nl80211_band_from_phy(le32_to_cpu(notif->band));
+ chan_idx = iwl_mvm_chanidx_from_phy(mvm, band,
+ le32_to_cpu(notif->channel));
+ if (WARN_ON_ONCE(chan_idx < 0))
+ return;
+
+ IWL_DEBUG_SCAN(mvm, "channel survey received for freq %d\n",
+ mvm->hw->wiphy->bands[band]->channels[chan_idx].center_freq);
+
+ info = &mvm->acs_survey->bands[band][chan_idx];
+
+ /* Times are all in ms */
+ info->time = le32_to_cpu(notif->active_time);
+ info->time_busy = le32_to_cpu(notif->busy_time);
+ info->time_rx = le32_to_cpu(notif->rx_time);
+ info->time_tx = le32_to_cpu(notif->tx_time);
+ info->noise = iwl_mvm_average_dbm_values(notif);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 491c449fd431..20d4968d692a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1847,6 +1847,18 @@ int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
+ /* MPDUs are counted only when EMLSR is possible */
+ if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
+ !sta->tdls && ieee80211_vif_is_mld(vif)) {
+ mvm_sta->mpdu_counters =
+ kcalloc(mvm->trans->num_rx_queues,
+ sizeof(*mvm_sta->mpdu_counters),
+ GFP_KERNEL);
+ if (mvm_sta->mpdu_counters)
+ for (int q = 0; q < mvm->trans->num_rx_queues; q++)
+ spin_lock_init(&mvm_sta->mpdu_counters[q].lock);
+ }
+
return 0;
}
@@ -4392,3 +4404,77 @@ void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
if (ret)
IWL_ERR(mvm, "Failed to cancel the channel switch\n");
}
+
+static int iwl_mvm_fw_sta_id_to_fw_link_id(struct iwl_mvm_vif *mvmvif,
+ u8 fw_sta_id)
+{
+ struct ieee80211_link_sta *link_sta =
+ rcu_dereference(mvmvif->mvm->fw_id_to_link_sta[fw_sta_id]);
+ struct iwl_mvm_vif_link_info *link;
+
+ if (WARN_ON_ONCE(!link_sta))
+ return -EINVAL;
+
+ link = mvmvif->link[link_sta->link_id];
+
+ if (WARN_ON_ONCE(!link))
+ return -EINVAL;
+
+ return link->fw_link_id;
+}
+
+#define IWL_MVM_TPT_COUNT_WINDOW (IWL_MVM_TPT_COUNT_WINDOW_SEC * HZ)
+
+void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count,
+ bool tx, int queue)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvm_sta->vif);
+ struct iwl_mvm_tpt_counter *queue_counter;
+ struct iwl_mvm_mpdu_counter *link_counter;
+ u32 total_mpdus = 0;
+ int fw_link_id;
+
+ /* Count only for a BSS sta, and only when EMLSR is possible */
+ if (!mvm_sta->mpdu_counters)
+ return;
+
+ /* Map sta id to link id */
+ fw_link_id = iwl_mvm_fw_sta_id_to_fw_link_id(mvmvif, fw_sta_id);
+ if (fw_link_id < 0)
+ return;
+
+ queue_counter = &mvm_sta->mpdu_counters[queue];
+ link_counter = &queue_counter->per_link[fw_link_id];
+
+ spin_lock_bh(&queue_counter->lock);
+
+ if (tx)
+ link_counter->tx += count;
+ else
+ link_counter->rx += count;
+
+ /*
+ * When not in EMLSR, the window and the decision to enter EMLSR are
+ * handled during counting, when in EMLSR - in the statistics flow
+ */
+ if (mvmvif->esr_active)
+ goto out;
+
+ if (time_is_before_jiffies(queue_counter->window_start +
+ IWL_MVM_TPT_COUNT_WINDOW)) {
+ memset(queue_counter->per_link, 0,
+ sizeof(queue_counter->per_link));
+ queue_counter->window_start = jiffies;
+ }
+
+ for (int i = 0; i < IWL_MVM_FW_MAX_LINK_ID; i++)
+ total_mpdus += tx ? queue_counter->per_link[i].tx :
+ queue_counter->per_link[i].rx;
+
+ if (total_mpdus > IWL_MVM_ENTER_ESR_TPT_THRESH)
+ wiphy_work_queue(mvmvif->mvm->hw->wiphy,
+ &mvmvif->unblock_esr_tpt_wk);
+
+out:
+ spin_unlock_bh(&queue_counter->lock);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index b3450569864e..264f1f9394b6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -347,6 +347,24 @@ struct iwl_mvm_link_sta {
u8 avg_energy;
};
+struct iwl_mvm_mpdu_counter {
+ u32 tx;
+ u32 rx;
+};
+
+/**
+ * struct iwl_mvm_tpt_counter - per-queue MPDU counter
+ *
+ * @lock: Needed to protect the counters when modified from statistics.
+ * @per_link: per-link counters.
+ * @window_start: timestamp of the counting-window start
+ */
+struct iwl_mvm_tpt_counter {
+ spinlock_t lock;
+ struct iwl_mvm_mpdu_counter per_link[IWL_MVM_FW_MAX_LINK_ID];
+ unsigned long window_start;
+} ____cacheline_aligned_in_smp;
+
/**
* struct iwl_mvm_sta - representation of a station in the driver
* @vif: the interface the station belongs to
@@ -394,6 +412,7 @@ struct iwl_mvm_link_sta {
* @link: per link sta entries. For non-MLO only link[0] holds data. For MLO,
* link[0] points to deflink and link[link_id] is allocated when new link
* sta is added.
+ * @mpdu_counters: RX/TX MPDUs counters for each queue.
*
* When mac80211 creates a station it reserves some space (hw->sta_data_size)
* in the structure for use by driver. This structure is placed in that
@@ -433,6 +452,8 @@ struct iwl_mvm_sta {
struct iwl_mvm_link_sta deflink;
struct iwl_mvm_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+
+ struct iwl_mvm_tpt_counter *mpdu_counters;
};
u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data);
@@ -514,6 +535,9 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count,
+ bool tx, int queue);
+
/* AMPDU */
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start, u16 buf_size, u16 timeout);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile
new file mode 100644
index 000000000000..6bd56a28cffd
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile
@@ -0,0 +1,3 @@
+iwlmvm-tests-y += module.o links.o scan.o
+
+obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlmvm-tests.o
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c b/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c
new file mode 100644
index 000000000000..f49e3c98b1ba
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c
@@ -0,0 +1,435 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <net/mac80211.h>
+#include "../mvm.h"
+#include <kunit/test.h>
+
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+
+static struct wiphy wiphy = {
+ .mtx = __MUTEX_INITIALIZER(wiphy.mtx),
+};
+
+static struct ieee80211_hw hw = {
+ .wiphy = &wiphy,
+};
+
+static struct ieee80211_channel chan_5ghz = {
+ .band = NL80211_BAND_5GHZ,
+};
+
+static struct ieee80211_channel chan_6ghz = {
+ .band = NL80211_BAND_6GHZ,
+};
+
+static struct ieee80211_channel chan_2ghz = {
+ .band = NL80211_BAND_2GHZ,
+};
+
+static struct cfg80211_chan_def chandef_a = {};
+
+static struct cfg80211_chan_def chandef_b = {};
+
+static struct iwl_mvm_phy_ctxt ctx = {};
+
+static struct iwl_mvm_vif_link_info mvm_link = {
+ .phy_ctxt = &ctx,
+ .active = true
+};
+
+static struct cfg80211_bss bss = {};
+
+static struct ieee80211_bss_conf link_conf = {.bss = &bss};
+
+static const struct iwl_fw_cmd_version entry = {
+ .group = LEGACY_GROUP,
+ .cmd = BT_PROFILE_NOTIFICATION,
+ .notif_ver = 4
+};
+
+static struct iwl_fw fw = {
+ .ucode_capa = {
+ .n_cmd_versions = 1,
+ .cmd_versions = &entry,
+ },
+};
+
+static struct iwl_mvm mvm = {
+ .hw = &hw,
+ .fw = &fw,
+};
+
+static const struct link_grading_case {
+ const char *desc;
+ const struct cfg80211_chan_def chandef;
+ s32 signal;
+ s16 channel_util;
+ int chan_load_by_us;
+ unsigned int grade;
+} link_grading_cases[] = {
+ {
+ .desc = "UHB, RSSI below range, no factors",
+ .chandef = {
+ .chan = &chan_6ghz,
+ .width = NL80211_CHAN_WIDTH_20,
+ },
+ .signal = -100,
+ .grade = 177,
+ },
+ {
+ .desc = "LB, RSSI in range, no factors",
+ .chandef = {
+ .chan = &chan_2ghz,
+ .width = NL80211_CHAN_WIDTH_20,
+ },
+ .signal = -84,
+ .grade = 344,
+ },
+ {
+ .desc = "HB, RSSI above range, no factors",
+ .chandef = {
+ .chan = &chan_5ghz,
+ .width = NL80211_CHAN_WIDTH_20,
+ },
+ .signal = -50,
+ .grade = 3442,
+ },
+ {
+ .desc = "HB, BSS Load IE (20 percent), inactive link, no puncturing factor",
+ .chandef = {
+ .chan = &chan_5ghz,
+ .width = NL80211_CHAN_WIDTH_20,
+ },
+ .signal = -66,
+ .channel_util = 51,
+ .grade = 1836,
+ },
+ {
+ .desc = "LB, BSS Load IE (20 percent), active link, chan_load_by_us=10 percent. No puncturing factor",
+ .chandef = {
+ .chan = &chan_2ghz,
+ .width = NL80211_CHAN_WIDTH_20,
+ },
+ .signal = -61,
+ .channel_util = 51,
+ .chan_load_by_us = 10,
+ .grade = 2061,
+ },
+ {
+ .desc = "UHB, BSS Load IE (40 percent), active link, chan_load_by_us=50 (invalid) percent. No puncturing factor",
+ .chandef = {
+ .chan = &chan_6ghz,
+ .width = NL80211_CHAN_WIDTH_20,
+ },
+ .signal = -66,
+ .channel_util = 102,
+ .chan_load_by_us = 50,
+ .grade = 1552,
+ },
+ { .desc = "HB, 80 MHz, no channel load factor, punctured percentage 0",
+ .chandef = {
+ .chan = &chan_5ghz,
+ .width = NL80211_CHAN_WIDTH_80,
+ .punctured = 0x0000
+ },
+ .signal = -72,
+ .grade = 1750,
+ },
+ { .desc = "HB, 160 MHz, no channel load factor, punctured percentage 25",
+ .chandef = {
+ .chan = &chan_5ghz,
+ .width = NL80211_CHAN_WIDTH_160,
+ .punctured = 0x3
+ },
+ .signal = -72,
+ .grade = 1312,
+ },
+ { .desc = "UHB, 320 MHz, no channel load factor, punctured percentage 12.5 (2/16)",
+ .chandef = {
+ .chan = &chan_6ghz,
+ .width = NL80211_CHAN_WIDTH_320,
+ .punctured = 0x3
+ },
+ .signal = -72,
+ .grade = 1806,
+ },
+ { .desc = "HB, 160 MHz, channel load 20, channel load by us 10, punctured percentage 25",
+ .chandef = {
+ .chan = &chan_5ghz,
+ .width = NL80211_CHAN_WIDTH_160,
+ .punctured = 0x3
+ },
+ .channel_util = 51,
+ .chan_load_by_us = 10,
+ .signal = -72,
+ .grade = 1179,
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(link_grading, link_grading_cases, desc)
+
+static void setup_link_conf(struct kunit *test)
+{
+ const struct link_grading_case *params = test->param_value;
+ size_t vif_size = sizeof(struct ieee80211_vif) +
+ sizeof(struct iwl_mvm_vif);
+ struct ieee80211_vif *vif = kunit_kzalloc(test, vif_size, GFP_KERNEL);
+ struct ieee80211_bss_load_elem *bss_load;
+ struct element *element;
+ size_t ies_size = sizeof(struct cfg80211_bss_ies) + sizeof(*bss_load) + sizeof(element);
+ struct cfg80211_bss_ies *ies;
+ struct iwl_mvm_vif *mvmvif;
+
+ KUNIT_ASSERT_NOT_NULL(test, vif);
+
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ if (params->chan_load_by_us > 0) {
+ ctx.channel_load_by_us = params->chan_load_by_us;
+ mvmvif->link[0] = &mvm_link;
+ }
+
+ link_conf.vif = vif;
+ link_conf.chanreq.oper = params->chandef;
+ bss.signal = DBM_TO_MBM(params->signal);
+
+ ies = kunit_kzalloc(test, ies_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ies);
+ ies->len = sizeof(*bss_load) + sizeof(struct element);
+
+ element = (void *)ies->data;
+ element->datalen = sizeof(*bss_load);
+ element->id = 11;
+
+ bss_load = (void *)element->data;
+ bss_load->channel_util = params->channel_util;
+
+ rcu_assign_pointer(bss.ies, ies);
+}
+
+static void test_link_grading(struct kunit *test)
+{
+ const struct link_grading_case *params = test->param_value;
+ unsigned int ret;
+
+ setup_link_conf(test);
+
+ rcu_read_lock();
+ ret = iwl_mvm_get_link_grade(&link_conf);
+ rcu_read_unlock();
+
+ KUNIT_EXPECT_EQ(test, ret, params->grade);
+
+ kunit_kfree(test, link_conf.vif);
+ RCU_INIT_POINTER(bss.ies, NULL);
+}
+
+static struct kunit_case link_grading_test_cases[] = {
+ KUNIT_CASE_PARAM(test_link_grading, link_grading_gen_params),
+ {}
+};
+
+static struct kunit_suite link_grading = {
+ .name = "iwlmvm-link-grading",
+ .test_cases = link_grading_test_cases,
+};
+
+kunit_test_suite(link_grading);
+
+static const struct valid_link_pair_case {
+ const char *desc;
+ bool bt;
+ struct ieee80211_channel *chan_a;
+ struct ieee80211_channel *chan_b;
+ enum nl80211_chan_width cw_a;
+ enum nl80211_chan_width cw_b;
+ s32 sig_a;
+ s32 sig_b;
+ bool csa_a;
+ bool valid;
+} valid_link_pair_cases[] = {
+ {
+ .desc = "HB + UHB, valid.",
+ .chan_a = &chan_6ghz,
+ .chan_b = &chan_5ghz,
+ .valid = true,
+ },
+ {
+ .desc = "LB + HB, no BT.",
+ .chan_a = &chan_2ghz,
+ .chan_b = &chan_5ghz,
+ .valid = false,
+ },
+ {
+ .desc = "LB + HB, with BT.",
+ .bt = true,
+ .chan_a = &chan_2ghz,
+ .chan_b = &chan_5ghz,
+ .valid = false,
+ },
+ {
+ .desc = "Same band",
+ .chan_a = &chan_2ghz,
+ .chan_b = &chan_2ghz,
+ .valid = false,
+ },
+ {
+ .desc = "RSSI: LB, 20 MHz, low",
+ .chan_a = &chan_2ghz,
+ .cw_a = NL80211_CHAN_WIDTH_20,
+ .sig_a = -68,
+ .chan_b = &chan_5ghz,
+ .valid = false,
+ },
+ {
+ .desc = "RSSI: UHB, 20 MHz, high",
+ .chan_a = &chan_6ghz,
+ .cw_a = NL80211_CHAN_WIDTH_20,
+ .sig_a = -66,
+ .chan_b = &chan_5ghz,
+ .cw_b = NL80211_CHAN_WIDTH_20,
+ .valid = true,
+ },
+ {
+ .desc = "RSSI: UHB, 40 MHz, low",
+ .chan_a = &chan_6ghz,
+ .cw_a = NL80211_CHAN_WIDTH_40,
+ .sig_a = -65,
+ .chan_b = &chan_5ghz,
+ .cw_b = NL80211_CHAN_WIDTH_40,
+ .valid = false,
+ },
+ {
+ .desc = "RSSI: UHB, 40 MHz, high",
+ .chan_a = &chan_6ghz,
+ .cw_a = NL80211_CHAN_WIDTH_40,
+ .sig_a = -63,
+ .chan_b = &chan_5ghz,
+ .cw_b = NL80211_CHAN_WIDTH_40,
+ .valid = true,
+ },
+ {
+ .desc = "RSSI: UHB, 80 MHz, low",
+ .chan_a = &chan_6ghz,
+ .cw_a = NL80211_CHAN_WIDTH_80,
+ .sig_a = -62,
+ .chan_b = &chan_5ghz,
+ .cw_b = NL80211_CHAN_WIDTH_80,
+ .valid = false,
+ },
+ {
+ .desc = "RSSI: UHB, 80 MHz, high",
+ .chan_a = &chan_6ghz,
+ .cw_a = NL80211_CHAN_WIDTH_80,
+ .sig_a = -60,
+ .chan_b = &chan_5ghz,
+ .cw_b = NL80211_CHAN_WIDTH_80,
+ .valid = true,
+ },
+ {
+ .desc = "RSSI: UHB, 160 MHz, low",
+ .chan_a = &chan_6ghz,
+ .cw_a = NL80211_CHAN_WIDTH_160,
+ .sig_a = -59,
+ .chan_b = &chan_5ghz,
+ .cw_b = NL80211_CHAN_WIDTH_160,
+ .valid = false,
+ },
+ {
+ .desc = "RSSI: HB, 160 MHz, high",
+ .chan_a = &chan_6ghz,
+ .cw_a = NL80211_CHAN_WIDTH_160,
+ .sig_a = -5,
+ .chan_b = &chan_5ghz,
+ .cw_b = NL80211_CHAN_WIDTH_160,
+ .valid = true,
+ },
+ {
+ .desc = "CSA active",
+ .chan_a = &chan_6ghz,
+ .cw_a = NL80211_CHAN_WIDTH_160,
+ .sig_a = -5,
+ .chan_b = &chan_5ghz,
+ .cw_b = NL80211_CHAN_WIDTH_160,
+ .valid = false,
+ /* same as previous entry with valid=true except for CSA */
+ .csa_a = true,
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(valid_link_pair, valid_link_pair_cases, desc)
+
+static void test_valid_link_pair(struct kunit *test)
+{
+ const struct valid_link_pair_case *params = test->param_value;
+ size_t vif_size = sizeof(struct ieee80211_vif) +
+ sizeof(struct iwl_mvm_vif);
+ struct ieee80211_vif *vif = kunit_kzalloc(test, vif_size, GFP_KERNEL);
+ struct iwl_trans *trans = kunit_kzalloc(test, sizeof(struct iwl_trans),
+ GFP_KERNEL);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_link_sel_data link_a = {
+ .chandef = &chandef_a,
+ .link_id = 1,
+ .signal = params->sig_a,
+ };
+ struct iwl_mvm_link_sel_data link_b = {
+ .chandef = &chandef_b,
+ .link_id = 5,
+ .signal = params->sig_b,
+ };
+ struct ieee80211_bss_conf *conf;
+ bool result;
+
+ KUNIT_ASSERT_NOT_NULL(test, vif);
+ KUNIT_ASSERT_NOT_NULL(test, trans);
+
+ chandef_a.chan = params->chan_a;
+ chandef_b.chan = params->chan_b;
+
+ chandef_a.width = params->cw_a ?: NL80211_CHAN_WIDTH_20;
+ chandef_b.width = params->cw_b ?: NL80211_CHAN_WIDTH_20;
+
+#ifdef CONFIG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES
+ trans->dbg_cfg = default_dbg_config;
+#endif
+ mvm.trans = trans;
+
+ mvm.last_bt_notif.wifi_loss_low_rssi = params->bt;
+ mvmvif->mvm = &mvm;
+
+ conf = kunit_kzalloc(test, sizeof(*vif->link_conf[0]), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, conf);
+ conf->chanreq.oper = chandef_a;
+ conf->csa_active = params->csa_a;
+ vif->link_conf[link_a.link_id] = (void __rcu *)conf;
+
+ conf = kunit_kzalloc(test, sizeof(*vif->link_conf[0]), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, conf);
+ conf->chanreq.oper = chandef_b;
+ vif->link_conf[link_b.link_id] = (void __rcu *)conf;
+
+ wiphy_lock(&wiphy);
+ result = iwl_mvm_mld_valid_link_pair(vif, &link_a, &link_b);
+ wiphy_unlock(&wiphy);
+
+ KUNIT_EXPECT_EQ(test, result, params->valid);
+
+ kunit_kfree(test, vif);
+ kunit_kfree(test, trans);
+}
+
+static struct kunit_case valid_link_pair_test_cases[] = {
+ KUNIT_CASE_PARAM(test_valid_link_pair, valid_link_pair_gen_params),
+ {},
+};
+
+static struct kunit_suite valid_link_pair = {
+ .name = "iwlmvm-valid-link-pair",
+ .test_cases = valid_link_pair_test_cases,
+};
+
+kunit_test_suite(valid_link_pair);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/module.c b/drivers/net/wireless/intel/iwlwifi/mvm/tests/module.c
new file mode 100644
index 000000000000..f556acbac77f
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/module.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This is just module boilerplate for the iwlmvm kunit module.
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <linux/module.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("kunit tests for iwlmvm");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/tests/scan.c
new file mode 100644
index 000000000000..d3b6a57c3ebe
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/scan.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <net/mac80211.h>
+#include "../mvm.h"
+#include <kunit/test.h>
+
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+
+static const struct acs_average_db_case {
+ const char *desc;
+ u8 neg_dbm[22];
+ s8 result;
+} acs_average_db_cases[] = {
+ {
+ .desc = "Smallest possible value, all filled",
+ .neg_dbm = {
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128
+ },
+ .result = -128,
+ },
+ {
+ .desc = "Biggest possible value, all filled",
+ .neg_dbm = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+ },
+ .result = 0,
+ },
+ {
+ .desc = "Smallest possible value, partial filled",
+ .neg_dbm = {
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff,
+ },
+ .result = -128,
+ },
+ {
+ .desc = "Biggest possible value, partial filled",
+ .neg_dbm = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff,
+ },
+ .result = 0,
+ },
+ {
+ .desc = "Adding -80dBm to -75dBm until it is still rounded to -79dBm",
+ .neg_dbm = {
+ 75, 80, 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 0xff, 0xff, 0xff,
+ 0xff, 0xff,
+ },
+ .result = -79,
+ },
+ {
+ .desc = "Adding -80dBm to -75dBm until it is just rounded to -80dBm",
+ .neg_dbm = {
+ 75, 80, 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80, 0xff, 0xff,
+ 0xff, 0xff,
+ },
+ .result = -80,
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(acs_average_db, acs_average_db_cases, desc)
+
+static void test_acs_average_db(struct kunit *test)
+{
+ const struct acs_average_db_case *params = test->param_value;
+ struct iwl_umac_scan_channel_survey_notif notif;
+ int i;
+
+ /* Test the values in the given order */
+ for (i = 0; i < ARRAY_SIZE(params->neg_dbm); i++)
+ notif.noise[i] = params->neg_dbm[i];
+ KUNIT_ASSERT_EQ(test,
+ iwl_mvm_average_dbm_values(&notif),
+ params->result);
+
+ /* Test in reverse order */
+ for (i = 0; i < ARRAY_SIZE(params->neg_dbm); i++)
+ notif.noise[ARRAY_SIZE(params->neg_dbm) - i - 1] =
+ params->neg_dbm[i];
+ KUNIT_ASSERT_EQ(test,
+ iwl_mvm_average_dbm_values(&notif),
+ params->result);
+}
+
+static struct kunit_case acs_average_db_case[] = {
+ KUNIT_CASE_PARAM(test_acs_average_db, acs_average_db_gen_params),
+ {}
+};
+
+static struct kunit_suite acs_average_db = {
+ .name = "iwlmvm-acs-average-db",
+ .test_cases = acs_average_db_case,
+};
+
+kunit_test_suite(acs_average_db);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index a59d264a11c5..8ee4498f4245 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -47,6 +47,10 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
{
+ struct ieee80211_vif *vif = mvm->p2p_device_vif;
+
+ lockdep_assert_held(&mvm->mutex);
+
/*
* Clear the ROC_RUNNING status bit.
* This will cause the TX path to drop offchannel transmissions.
@@ -70,9 +74,7 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
* not really racy.
*/
- if (!WARN_ON(!mvm->p2p_device_vif)) {
- struct ieee80211_vif *vif = mvm->p2p_device_vif;
-
+ if (!WARN_ON(!vif)) {
mvmvif = iwl_mvm_vif_from_mac80211(vif);
iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id,
mvmvif->deflink.bcast_sta.tfd_queue_msk);
@@ -106,6 +108,7 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
if (mvm->mld_api_is_used) {
iwl_mvm_mld_rm_aux_sta(mvm);
+ mutex_unlock(&mvm->mutex);
return;
}
@@ -115,6 +118,10 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
if (iwl_mvm_has_new_station_api(mvm->fw))
iwl_mvm_rm_aux_sta(mvm);
}
+
+ mutex_unlock(&mvm->mutex);
+ if (vif)
+ iwl_mvm_esr_non_bss_link(mvm, vif, 0, false);
}
void iwl_mvm_roc_done_wk(struct work_struct *wk)
@@ -122,8 +129,8 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
mutex_lock(&mvm->mutex);
+ /* Mutex is released inside */
iwl_mvm_cleanup_roc(mvm);
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
@@ -879,9 +886,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data;
unsigned int ver =
- iwl_fw_lookup_cmd_ver(mvm->fw,
- WIDE_ID(MAC_CONF_GROUP,
- SESSION_PROTECTION_CMD), 2);
+ iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+ SESSION_PROTECTION_NOTIF, 2);
int id = le32_to_cpu(notif->mac_link_id);
struct ieee80211_vif *vif;
struct iwl_mvm_vif *mvmvif;
@@ -1221,6 +1227,8 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
struct iwl_mvm_vif *mvmvif;
struct iwl_mvm_time_event_data *te_data;
+ mutex_lock(&mvm->mutex);
+
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -1264,6 +1272,8 @@ cleanup_roc:
set_bit(vif->type == NL80211_IFTYPE_P2P_DEVICE ?
IWL_MVM_STATUS_ROC_RUNNING : IWL_MVM_STATUS_ROC_AUX_RUNNING,
&mvm->status);
+
+ /* Mutex is released inside this function */
iwl_mvm_cleanup_roc(mvm);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 782ddc8c296b..1d695ece93e9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1870,6 +1870,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
IWL_DEBUG_TX_REPLY(mvm,
"Next reclaimed packet:%d\n",
next_reclaimed);
+ iwl_mvm_count_mpdu(mvmsta, sta_id, 1, true, 0);
} else {
IWL_DEBUG_TX_REPLY(mvm,
"NDP - don't update next_reclaimed\n");
@@ -2247,9 +2248,13 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
le32_to_cpu(ba_res->tx_rate), false);
}
- if (mvmsta)
+ if (mvmsta) {
iwl_mvm_tx_airtime(mvm, mvmsta,
le32_to_cpu(ba_res->wireless_time));
+
+ iwl_mvm_count_mpdu(mvmsta, sta_id,
+ le16_to_cpu(ba_res->txed), true, 0);
+ }
rcu_read_unlock();
return;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index ab56ff87c6f9..47283a358ffd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -344,6 +344,26 @@ static bool iwl_wait_stats_complete(struct iwl_notif_wait_data *notif_wait,
return true;
}
+#define PERIODIC_STAT_RATE 5
+
+int iwl_mvm_request_periodic_system_statistics(struct iwl_mvm *mvm, bool enable)
+{
+ u32 flags = enable ? 0 : IWL_STATS_CFG_FLG_DISABLE_NTFY_MSK;
+ u32 type = enable ? (IWL_STATS_NTFY_TYPE_ID_OPER |
+ IWL_STATS_NTFY_TYPE_ID_OPER_PART1) : 0;
+ struct iwl_system_statistics_cmd system_cmd = {
+ .cfg_mask = cpu_to_le32(flags),
+ .config_time_sec = cpu_to_le32(enable ?
+ PERIODIC_STAT_RATE : 0),
+ .type_id_mask = cpu_to_le32(type),
+ };
+
+ return iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(SYSTEM_GROUP,
+ SYSTEM_STATISTICS_CMD),
+ 0, sizeof(system_cmd), &system_cmd);
+}
+
static int iwl_mvm_request_system_statistics(struct iwl_mvm *mvm, bool clear,
u8 cmd_ver)
{
@@ -415,6 +435,13 @@ int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
IWL_FW_CMD_VER_UNKNOWN);
int ret;
+ /*
+ * Don't request statistics during restart, they'll not have any useful
+ * information right after restart, nor is clearing needed
+ */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ return 0;
+
if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN)
return iwl_mvm_request_system_statistics(mvm, clear, cmd_ver);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index c8fc8b4fd85c..ebf11f276b20 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -1,13 +1,34 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
+#include <linux/dmi.h>
#include "iwl-trans.h"
#include "iwl-fh.h"
#include "iwl-context-info-gen3.h"
#include "internal.h"
#include "iwl-prph.h"
+static const struct dmi_system_id dmi_force_scu_active_approved_list[] = {
+ { .ident = "DELL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ },
+ },
+ { .ident = "DELL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ },
+ },
+ /* keep last */
+ {}
+};
+
+static bool iwl_is_force_scu_active_approved(void)
+{
+ return !!dmi_check_system(dmi_force_scu_active_approved_list);
+}
+
static void
iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
struct iwl_prph_scratch_hwm_cfg *dbg_cfg,
@@ -128,6 +149,14 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
if (trans->trans_cfg->imr_enabled)
control_flags |= IWL_PRPH_SCRATCH_IMR_DEBUG_EN;
+ if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL &&
+ iwl_is_force_scu_active_approved()) {
+ control_flags |= IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE;
+ IWL_DEBUG_FW(trans,
+ "Context Info: Set SCU_FORCE_ACTIVE (0x%x) in control_flags\n",
+ IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE);
+ }
+
/* initialize RX default queue */
prph_sc_ctrl->rbd_cfg.free_rbd_addr =
cpu_to_le64(trans_pcie->rxq->bd_dma);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 4a657036b9d6..fed2754be680 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -33,7 +33,7 @@ extern int _invalid_type;
.driver_data = _ASSIGN_CFG(cfg)
/* Hardware specific file defines the PCI IDs table for that hardware module */
-static const struct pci_device_id iwl_hw_card_ids[] = {
+VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
#if IS_ENABLED(CONFIG_IWLDVM)
{IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
{IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
@@ -492,7 +492,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_trans_cfg)},
{IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
{IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)},
- {IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
{IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
{IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_trans_cfg)},
@@ -506,6 +505,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0x4D40, PCI_ANY_ID, iwl_bz_trans_cfg)},
/* Sc devices */
{IWL_PCI_DEVICE(0xE440, PCI_ANY_ID, iwl_sc_trans_cfg)},
@@ -517,6 +517,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{0}
};
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_hw_card_ids);
#define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
_rf_id, _rf_step, _no_160, _cores, _cdb, _cfg, _name) \
@@ -946,11 +947,6 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
iwl_cfg_ma, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_ma, iwl_ax221_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_ma, iwl_ax231_name),
@@ -1002,18 +998,25 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name),
/* Bz */
+/* FIXME: need to change the naming according to the actual CRF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_bz, iwl_bz_name),
+ iwl_cfg_bz, iwl_fm_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_bz, iwl_fm_name),
/* Ga (Gl) */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_gl, iwl_bz_name),
+ iwl_cfg_gl, iwl_gl_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
@@ -1100,24 +1103,6 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
-/* MsP */
-/* For now we use the same FW as MR, but this will change in the future. */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_so_a0_ms_a0, iwl_ax204_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_so_a0_ms_a0, iwl_ax204_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_ma, iwl_ax204_name),
-
/* Sc */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY,
@@ -1150,6 +1135,7 @@ static void get_crf_id(struct iwl_trans *iwl_trans)
{
u32 sd_reg_ver_addr;
u32 val = 0;
+ u8 step;
if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
sd_reg_ver_addr = SD_REG_VER_GEN2;
@@ -1168,16 +1154,23 @@ static void get_crf_id(struct iwl_trans *iwl_trans)
iwl_trans->hw_cnv_id =
iwl_read_prph_no_grab(iwl_trans, CNVI_AUX_MISC_CHIP);
+ /* For BZ-W, take B step also when A step is indicated */
+ if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W)
+ step = SILICON_B_STEP;
+
/* In BZ, the MAC step must be read from the CNVI aux register */
if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ) {
- u8 step = CNVI_AUX_MISC_CHIP_MAC_STEP(iwl_trans->hw_cnv_id);
+ step = CNVI_AUX_MISC_CHIP_MAC_STEP(iwl_trans->hw_cnv_id);
/* For BZ-U, take B step also when A step is indicated */
if ((CNVI_AUX_MISC_CHIP_PROD_TYPE(iwl_trans->hw_cnv_id) ==
CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_U) &&
step == SILICON_A_STEP)
step = SILICON_B_STEP;
+ }
+ if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ ||
+ CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W) {
iwl_trans->hw_rev_step = step;
iwl_trans->hw_rev |= step;
}
@@ -1224,12 +1217,7 @@ static int map_crf_id(struct iwl_trans *iwl_trans)
case REG_CRF_ID_TYPE_GF:
iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_GF << 12);
break;
- case REG_CRF_ID_TYPE_MR:
- iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_MR << 12);
- break;
case REG_CRF_ID_TYPE_FM:
- case REG_CRF_ID_TYPE_FMI:
- case REG_CRF_ID_TYPE_FMR:
iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12);
break;
case REG_CRF_ID_TYPE_WHP:
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 7805a42948af..a7eebe400b5b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -386,7 +386,7 @@ struct iwl_trans_pcie {
dma_addr_t iml_dma_addr;
struct iwl_trans *trans;
- struct net_device napi_dev;
+ struct net_device *napi_dev;
/* INT ICT Table */
__le32 *ict_tbl;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 9c2461ba13c5..984d7bcd381f 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1000,6 +1000,11 @@ void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget);
+static inline struct iwl_trans_pcie *iwl_netdev_to_trans_pcie(struct net_device *dev)
+{
+ return *(struct iwl_trans_pcie **)netdev_priv(dev);
+}
+
static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget)
{
struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
@@ -1007,7 +1012,7 @@ static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget)
struct iwl_trans *trans;
int ret;
- trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
+ trans_pcie = iwl_netdev_to_trans_pcie(napi->dev);
trans = trans_pcie->trans;
ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
@@ -1034,7 +1039,7 @@ static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget)
struct iwl_trans *trans;
int ret;
- trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
+ trans_pcie = iwl_netdev_to_trans_pcie(napi->dev);
trans = trans_pcie->trans;
ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
@@ -1131,7 +1136,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
if (trans_pcie->msix_enabled)
poll = iwl_pcie_napi_poll_msix;
- netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
+ netif_napi_add(trans_pcie->napi_dev, &rxq->napi,
poll);
napi_enable(&rxq->napi);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 6c76b2dd6878..d5a887b3a4bb 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1986,13 +1986,6 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans->command_groups = trans_cfg->command_groups;
trans->command_groups_size = trans_cfg->command_groups_size;
- /* Initialize NAPI here - it should be before registering to mac80211
- * in the opmode but after the HW struct is allocated.
- * As this function may be called again in some corner cases don't
- * do anything if NAPI was already initialized.
- */
- if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY)
- init_dummy_netdev(&trans_pcie->napi_dev);
trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake;
}
@@ -2074,6 +2067,8 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_pcie_free_ict(trans);
}
+ free_netdev(trans_pcie->napi_dev);
+
iwl_pcie_free_invalid_tx_cmd(trans);
iwl_pcie_free_fw_monitor(trans);
@@ -3594,7 +3589,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
const struct pci_device_id *ent,
const struct iwl_cfg_trans_params *cfg_trans)
{
- struct iwl_trans_pcie *trans_pcie;
+ struct iwl_trans_pcie *trans_pcie, **priv;
struct iwl_trans *trans;
int ret, addr_size;
const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
@@ -3623,6 +3618,18 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ /* Initialize NAPI here - it should be before registering to mac80211
+ * in the opmode but after the HW struct is allocated.
+ */
+ trans_pcie->napi_dev = alloc_netdev_dummy(sizeof(struct iwl_trans_pcie *));
+ if (!trans_pcie->napi_dev) {
+ ret = -ENOMEM;
+ goto out_free_trans;
+ }
+ /* The private struct in netdev is a pointer to struct iwl_trans_pcie */
+ priv = netdev_priv(trans_pcie->napi_dev);
+ *priv = trans_pcie;
+
trans_pcie->trans = trans;
trans_pcie->opmode_down = true;
spin_lock_init(&trans_pcie->irq_lock);
@@ -3637,7 +3644,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
WQ_HIGHPRI | WQ_UNBOUND, 0);
if (!trans_pcie->rba.alloc_wq) {
ret = -ENOMEM;
- goto out_free_trans;
+ goto out_free_ndev;
}
INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
@@ -3757,6 +3764,8 @@ out_free_ict:
iwl_pcie_free_ict(trans);
out_no_pci:
destroy_workqueue(trans_pcie->rba.alloc_wq);
+out_free_ndev:
+ free_netdev(trans_pcie->napi_dev);
out_free_trans:
iwl_trans_free(trans);
return ERR_PTR(ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
index 33973a60d0bf..6229c785c845 100644
--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
@@ -1589,9 +1589,9 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
return;
tfd_num = iwl_txq_get_cmd_index(txq, ssn);
- read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
spin_lock_bh(&txq->lock);
+ read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
if (!test_bit(txq_id, trans->txqs.queue_used)) {
IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/tests/Makefile
index 5658471bdf0a..84491488f589 100644
--- a/drivers/net/wireless/intel/iwlwifi/tests/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/tests/Makefile
@@ -2,6 +2,6 @@
iwlwifi-tests-y += module.o devinfo.o
-ccflags-y += -I$(srctree)/$(src)/../
+ccflags-y += -I$(src)/../
obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlwifi-tests.o
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
index 7aa47fce6e2d..7361b6d0cdb8 100644
--- a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
+++ b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
@@ -2,9 +2,10 @@
/*
* KUnit tests for the iwlwifi device info table
*
- * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2023-2024 Intel Corporation
*/
#include <kunit/test.h>
+#include <linux/pci.h>
#include "iwl-drv.h"
#include "iwl-config.h"
@@ -41,8 +42,31 @@ static void devinfo_table_order(struct kunit *test)
}
}
+static void devinfo_pci_ids(struct kunit *test)
+{
+ struct pci_dev *dev;
+
+ dev = kunit_kmalloc(test, sizeof(*dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, dev);
+
+ for (int i = 0; iwl_hw_card_ids[i].vendor; i++) {
+ const struct pci_device_id *s, *t;
+
+ s = &iwl_hw_card_ids[i];
+ dev->vendor = s->vendor;
+ dev->device = s->device;
+ dev->subsystem_vendor = s->subvendor;
+ dev->subsystem_device = s->subdevice;
+ dev->class = s->class;
+
+ t = pci_match_id(iwl_hw_card_ids, dev);
+ KUNIT_EXPECT_PTR_EQ(test, t, s);
+ }
+}
+
static struct kunit_case devinfo_test_cases[] = {
KUNIT_CASE(devinfo_table_order),
+ KUNIT_CASE(devinfo_pci_ids),
{}
};
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index 75f53c2f1e1f..bda9b2b8a1f3 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -979,7 +979,6 @@ static struct sdio_driver mwifiex_sdio = {
.probe = mwifiex_sdio_probe,
.remove = mwifiex_sdio_remove,
.drv = {
- .owner = THIS_MODULE,
.coredump = mwifiex_sdio_coredump,
.pm = &mwifiex_sdio_pm_ops,
}
@@ -3183,7 +3182,7 @@ static struct mwifiex_if_ops sdio_ops = {
.up_dev = mwifiex_sdio_up_dev,
};
-module_driver(mwifiex_sdio, sdio_register_driver, sdio_unregister_driver);
+module_sdio_driver(mwifiex_sdio);
MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION);
@@ -3192,6 +3191,7 @@ MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(SD8786_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(SD8801_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8977_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index ce8fea76dbb2..d3d07bb26335 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -587,12 +587,14 @@ static int mwl8k_request_firmware(struct mwl8k_priv *priv, char *fw_image,
}
struct mwl8k_cmd_pkt {
- __le16 code;
- __le16 length;
- __u8 seq_num;
- __u8 macid;
- __le16 result;
- char payload[];
+ __struct_group(mwl8k_cmd_pkt_hdr, hdr, __packed,
+ __le16 code;
+ __le16 length;
+ __u8 seq_num;
+ __u8 macid;
+ __le16 result;
+ );
+ char payload[];
} __packed;
/*
@@ -2201,7 +2203,7 @@ static void mwl8k_enable_bsses(struct ieee80211_hw *hw, bool enable,
/* Timeout firmware commands after 10s */
#define MWL8K_CMD_TIMEOUT_MS 10000
-static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
+static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt_hdr *cmd)
{
DECLARE_COMPLETION_ONSTACK(cmd_wait);
struct mwl8k_priv *priv = hw->priv;
@@ -2298,7 +2300,7 @@ exit:
static int mwl8k_post_pervif_cmd(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- struct mwl8k_cmd_pkt *cmd)
+ struct mwl8k_cmd_pkt_hdr *cmd)
{
if (vif != NULL)
cmd->macid = MWL8K_VIF(vif)->macid;
@@ -2350,7 +2352,7 @@ static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw)
* CMD_GET_HW_SPEC (STA version).
*/
struct mwl8k_cmd_get_hw_spec_sta {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__u8 hw_rev;
__u8 host_interface;
__le16 num_mcaddrs;
@@ -2499,7 +2501,7 @@ static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
* CMD_GET_HW_SPEC (AP version).
*/
struct mwl8k_cmd_get_hw_spec_ap {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__u8 hw_rev;
__u8 host_interface;
__le16 num_wcb;
@@ -2593,7 +2595,7 @@ done:
* CMD_SET_HW_SPEC.
*/
struct mwl8k_cmd_set_hw_spec {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__u8 hw_rev;
__u8 host_interface;
__le16 num_mcaddrs;
@@ -2670,7 +2672,7 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
* CMD_MAC_MULTICAST_ADR.
*/
struct mwl8k_cmd_mac_multicast_adr {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 action;
__le16 numaddr;
__u8 addr[][ETH_ALEN];
@@ -2681,7 +2683,7 @@ struct mwl8k_cmd_mac_multicast_adr {
#define MWL8K_ENABLE_RX_ALL_MULTICAST 0x0004
#define MWL8K_ENABLE_RX_BROADCAST 0x0008
-static struct mwl8k_cmd_pkt *
+static struct mwl8k_cmd_pkt_hdr *
__mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
struct netdev_hw_addr_list *mc_list)
{
@@ -2718,7 +2720,7 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST);
cmd->numaddr = cpu_to_le16(mc_count);
netdev_hw_addr_list_for_each(ha, mc_list) {
- memcpy(cmd->addr[i], ha->addr, ETH_ALEN);
+ memcpy(cmd->addr[i++], ha->addr, ETH_ALEN);
}
}
@@ -2729,7 +2731,7 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
* CMD_GET_STAT.
*/
struct mwl8k_cmd_get_stat {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le32 stats[64];
} __packed;
@@ -2771,7 +2773,7 @@ static int mwl8k_cmd_get_stat(struct ieee80211_hw *hw,
* CMD_RADIO_CONTROL.
*/
struct mwl8k_cmd_radio_control {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 action;
__le16 control;
__le16 radio_on;
@@ -2832,7 +2834,7 @@ mwl8k_set_radio_preamble(struct ieee80211_hw *hw, bool short_preamble)
#define MWL8K_RF_TX_POWER_LEVEL_TOTAL 8
struct mwl8k_cmd_rf_tx_power {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 action;
__le16 support_level;
__le16 current_level;
@@ -2866,7 +2868,7 @@ static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm)
#define MWL8K_TX_POWER_LEVEL_TOTAL 12
struct mwl8k_cmd_tx_power {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 action;
__le16 band;
__le16 channel;
@@ -2925,7 +2927,7 @@ static int mwl8k_cmd_tx_power(struct ieee80211_hw *hw,
* CMD_RF_ANTENNA.
*/
struct mwl8k_cmd_rf_antenna {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 antenna;
__le16 mode;
} __packed;
@@ -2958,7 +2960,7 @@ mwl8k_cmd_rf_antenna(struct ieee80211_hw *hw, int antenna, int mask)
* CMD_SET_BEACON.
*/
struct mwl8k_cmd_set_beacon {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 beacon_len;
__u8 beacon[];
};
@@ -2988,7 +2990,7 @@ static int mwl8k_cmd_set_beacon(struct ieee80211_hw *hw,
* CMD_SET_PRE_SCAN.
*/
struct mwl8k_cmd_set_pre_scan {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
} __packed;
static int mwl8k_cmd_set_pre_scan(struct ieee80211_hw *hw)
@@ -3013,7 +3015,7 @@ static int mwl8k_cmd_set_pre_scan(struct ieee80211_hw *hw)
* CMD_BBP_REG_ACCESS.
*/
struct mwl8k_cmd_bbp_reg_access {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 action;
__le16 offset;
u8 value;
@@ -3054,7 +3056,7 @@ mwl8k_cmd_bbp_reg_access(struct ieee80211_hw *hw,
* CMD_SET_POST_SCAN.
*/
struct mwl8k_cmd_set_post_scan {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le32 isibss;
__u8 bssid[ETH_ALEN];
} __packed;
@@ -3142,7 +3144,7 @@ static void mwl8k_update_survey(struct mwl8k_priv *priv,
* CMD_SET_RF_CHANNEL.
*/
struct mwl8k_cmd_set_rf_channel {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 action;
__u8 current_channel;
__le32 channel_flags;
@@ -3211,7 +3213,7 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
#define MWL8K_FRAME_PROT_11N_HT_ALL 0x06
struct mwl8k_cmd_update_set_aid {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 aid;
/* AP's MAC address (BSSID) */
@@ -3283,7 +3285,7 @@ mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
* CMD_SET_RATE.
*/
struct mwl8k_cmd_set_rate {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__u8 legacy_rates[14];
/* Bitmap for supported MCS codes. */
@@ -3319,7 +3321,7 @@ mwl8k_cmd_set_rate(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
#define MWL8K_FJ_BEACON_MAXLEN 128
struct mwl8k_cmd_finalize_join {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le32 sleep_interval; /* Number of beacon periods to sleep */
__u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN];
} __packed;
@@ -3358,7 +3360,7 @@ static int mwl8k_cmd_finalize_join(struct ieee80211_hw *hw, void *frame,
* CMD_SET_RTS_THRESHOLD.
*/
struct mwl8k_cmd_set_rts_threshold {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 action;
__le16 threshold;
} __packed;
@@ -3388,7 +3390,7 @@ mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw, int rts_thresh)
* CMD_SET_SLOT.
*/
struct mwl8k_cmd_set_slot {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 action;
__u8 short_slot;
} __packed;
@@ -3417,7 +3419,7 @@ static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time)
* CMD_SET_EDCA_PARAMS.
*/
struct mwl8k_cmd_set_edca_params {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
/* See MWL8K_SET_EDCA_XXX below */
__le16 action;
@@ -3502,7 +3504,7 @@ mwl8k_cmd_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
* CMD_SET_WMM_MODE.
*/
struct mwl8k_cmd_set_wmm_mode {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 action;
} __packed;
@@ -3533,7 +3535,7 @@ static int mwl8k_cmd_set_wmm_mode(struct ieee80211_hw *hw, bool enable)
* CMD_MIMO_CONFIG.
*/
struct mwl8k_cmd_mimo_config {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le32 action;
__u8 rx_antenna_map;
__u8 tx_antenna_map;
@@ -3564,7 +3566,7 @@ static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx)
* CMD_USE_FIXED_RATE (STA version).
*/
struct mwl8k_cmd_use_fixed_rate_sta {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le32 action;
__le32 allow_rate_drop;
__le32 num_rates;
@@ -3606,7 +3608,7 @@ static int mwl8k_cmd_use_fixed_rate_sta(struct ieee80211_hw *hw)
* CMD_USE_FIXED_RATE (AP version).
*/
struct mwl8k_cmd_use_fixed_rate_ap {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le32 action;
__le32 allow_rate_drop;
__le32 num_rates;
@@ -3647,7 +3649,7 @@ mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt)
* CMD_ENABLE_SNIFFER.
*/
struct mwl8k_cmd_enable_sniffer {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le32 action;
} __packed;
@@ -3671,7 +3673,7 @@ static int mwl8k_cmd_enable_sniffer(struct ieee80211_hw *hw, bool enable)
}
struct mwl8k_cmd_update_mac_addr {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
union {
struct {
__le16 mac_type;
@@ -3756,7 +3758,7 @@ static inline int mwl8k_cmd_del_mac_addr(struct ieee80211_hw *hw,
* CMD_SET_RATEADAPT_MODE.
*/
struct mwl8k_cmd_set_rate_adapt_mode {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 action;
__le16 mode;
} __packed;
@@ -3785,7 +3787,7 @@ static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode)
* CMD_GET_WATCHDOG_BITMAP.
*/
struct mwl8k_cmd_get_watchdog_bitmap {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
u8 bitmap;
} __packed;
@@ -3865,7 +3867,7 @@ done:
* CMD_BSS_START.
*/
struct mwl8k_cmd_bss_start {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le32 enable;
} __packed;
@@ -3960,7 +3962,7 @@ struct mwl8k_destroy_ba_stream {
} __packed;
struct mwl8k_cmd_bastream {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le32 action;
union {
struct mwl8k_create_ba_stream create_params;
@@ -4070,7 +4072,7 @@ static void mwl8k_destroy_ba(struct ieee80211_hw *hw,
* CMD_SET_NEW_STN.
*/
struct mwl8k_cmd_set_new_stn {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 aid;
__u8 mac_addr[6];
__le16 stn_id;
@@ -4206,7 +4208,7 @@ static int mwl8k_cmd_set_new_stn_del(struct ieee80211_hw *hw,
#define MIC_KEY_LENGTH 8
struct mwl8k_cmd_update_encryption {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le32 action;
__le32 reserved;
@@ -4216,7 +4218,7 @@ struct mwl8k_cmd_update_encryption {
} __packed;
struct mwl8k_cmd_set_key {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le32 action;
__le32 reserved;
@@ -4504,7 +4506,7 @@ struct peer_capability_info {
} __packed;
struct mwl8k_cmd_update_stadb {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
/* See STADB_ACTION_TYPE */
__le32 action;
@@ -5174,7 +5176,7 @@ mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list)
{
- struct mwl8k_cmd_pkt *cmd;
+ struct mwl8k_cmd_pkt_hdr *cmd;
/*
* Synthesize and return a command packet that programs the
@@ -5234,7 +5236,7 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
u64 multicast)
{
struct mwl8k_priv *priv = hw->priv;
- struct mwl8k_cmd_pkt *cmd = (void *)(unsigned long)multicast;
+ struct mwl8k_cmd_pkt_hdr *cmd = (void *)(unsigned long)multicast;
/*
* AP firmware doesn't allow fine-grained control over
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 72a7bd5a8576..f4f88c444e21 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -532,7 +532,7 @@ error:
}
static int
-mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
enum mt76_txq_id qid, struct sk_buff *skb,
struct mt76_wcid *wcid, struct ieee80211_sta *sta)
{
@@ -542,6 +542,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_tx_info tx_info = {
.skb = skb,
};
+ struct mt76_dev *dev = phy->dev;
struct ieee80211_hw *hw;
int len, n = 0, ret = -ENOMEM;
struct mt76_txwi_cache *t;
@@ -549,7 +550,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
dma_addr_t addr;
u8 *txwi;
- if (test_bit(MT76_RESET, &dev->phy.state))
+ if (test_bit(MT76_RESET, &phy->state))
goto free_skb;
t = mt76_get_txwi(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 068206e48aec..e8ba2e4e8484 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -465,6 +465,7 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+ ieee80211_hw_set(hw, SPECTRUM_MGMT);
if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD) &&
hw->max_tx_fragments > 1) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index a91f6ddacbd9..11b9f22ca7f3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -256,7 +256,7 @@ struct mt76_queue_ops {
int idx, int n_desc, int bufsize,
u32 ring_base);
- int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
+ int (*tx_queue_skb)(struct mt76_phy *phy, struct mt76_queue *q,
enum mt76_txq_id qid, struct sk_buff *skb,
struct mt76_wcid *wcid, struct ieee80211_sta *sta);
@@ -1127,7 +1127,7 @@ static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q,
#define mt76_init_queues(dev, ...) (dev)->mt76.queue_ops->init(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
#define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
-#define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__)
+#define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mphy), __VA_ARGS__)
#define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_rx_cleanup(dev, ...) (dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS__)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index 7a2f5d38562b..14304b063715 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -4,6 +4,13 @@
#include "mac.h"
#include "../dma.h"
+static const u8 wmm_queue_map[] = {
+ [IEEE80211_AC_BK] = 0,
+ [IEEE80211_AC_BE] = 1,
+ [IEEE80211_AC_VI] = 2,
+ [IEEE80211_AC_VO] = 3,
+};
+
static void
mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
{
@@ -22,10 +29,10 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
struct ieee80211_sta *sta;
struct mt7603_sta *msta;
struct mt76_wcid *wcid;
+ u8 tid = 0, hwq = 0;
void *priv;
int idx;
u32 val;
- u8 tid = 0;
if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr))
goto free;
@@ -42,19 +49,36 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
goto free;
priv = msta = container_of(wcid, struct mt7603_sta, wcid);
- val = le32_to_cpu(txd[0]);
- val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
- val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT);
- txd[0] = cpu_to_le32(val);
sta = container_of(priv, struct ieee80211_sta, drv_priv);
hdr = (struct ieee80211_hdr *)&skb->data[MT_TXD_SIZE];
- if (ieee80211_is_data_qos(hdr->frame_control))
+
+ hwq = wmm_queue_map[IEEE80211_AC_BE];
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
tid = *ieee80211_get_qos_ctl(hdr) &
- IEEE80211_QOS_CTL_TAG1D_MASK;
- skb_set_queue_mapping(skb, tid_to_ac[tid]);
+ IEEE80211_QOS_CTL_TAG1D_MASK;
+ u8 qid = tid_to_ac[tid];
+ hwq = wmm_queue_map[qid];
+ skb_set_queue_mapping(skb, qid);
+ } else if (ieee80211_is_data(hdr->frame_control)) {
+ skb_set_queue_mapping(skb, IEEE80211_AC_BE);
+ hwq = wmm_queue_map[IEEE80211_AC_BE];
+ } else {
+ skb_pull(skb, MT_TXD_SIZE);
+ if (!ieee80211_is_bufferable_mmpdu(skb))
+ goto free;
+ skb_push(skb, MT_TXD_SIZE);
+ skb_set_queue_mapping(skb, MT_TXQ_PSD);
+ hwq = MT_TX_HW_QUEUE_MGMT;
+ }
+
ieee80211_sta_set_buffered(sta, tid, true);
+ val = le32_to_cpu(txd[0]);
+ val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
+ val |= FIELD_PREP(MT_TXD0_Q_IDX, hwq);
+ txd[0] = cpu_to_le32(val);
+
spin_lock_bh(&dev->ps_lock);
__skb_queue_tail(&msta->psq, skb);
if (skb_queue_len(&msta->psq) >= 64) {
@@ -151,12 +175,6 @@ static int mt7603_poll_tx(struct napi_struct *napi, int budget)
int mt7603_dma_init(struct mt7603_dev *dev)
{
- static const u8 wmm_queue_map[] = {
- [IEEE80211_AC_BK] = 0,
- [IEEE80211_AC_BE] = 1,
- [IEEE80211_AC_VI] = 2,
- [IEEE80211_AC_VO] = 3,
- };
int ret;
int i;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index cf21d06257e5..dc8a77f0a1cc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -1393,6 +1393,7 @@ void mt7603_pse_client_reset(struct mt7603_dev *dev)
MT_CLIENT_RESET_TX_R_E_2_S);
/* Start PSE client TX abort */
+ mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF);
mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_1);
mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_1_S,
MT_CLIENT_RESET_TX_R_E_1_S, 500);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
index 98d64d3d2993..445d0f0ab779 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
@@ -177,6 +177,11 @@ static inline bool is_mt7925(struct mt76_dev *dev)
return mt76_chip(dev) == 0x7925;
}
+static inline bool is_mt7920(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7920;
+}
+
static inline bool is_mt7922(struct mt76_dev *dev)
{
return mt76_chip(dev) == 0x7922;
@@ -184,7 +189,7 @@ static inline bool is_mt7922(struct mt76_dev *dev)
static inline bool is_mt7921(struct mt76_dev *dev)
{
- return mt76_chip(dev) == 0x7961 || is_mt7922(dev);
+ return mt76_chip(dev) == 0x7961 || is_mt7922(dev) || is_mt7920(dev);
}
static inline bool is_mt7663(struct mt76_dev *dev)
@@ -259,6 +264,7 @@ static inline bool is_mt76_fw_txp(struct mt76_dev *dev)
{
switch (mt76_chip(dev)) {
case 0x7961:
+ case 0x7920:
case 0x7922:
case 0x7925:
case 0x7663:
@@ -445,4 +451,6 @@ void mt76_connac2_tx_token_put(struct mt76_dev *dev);
/* connac3 */
void mt76_connac3_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv,
u8 mode);
+void mt76_connac3_mac_decode_eht_radiotap(struct sk_buff *skb, __le32 *rxv,
+ u8 mode);
#endif /* __MT76_CONNAC_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c
index 73e9f283d0ae..92ad1ecf6c9d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c
@@ -6,8 +6,11 @@
#include "dma.h"
#define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f)
+#define EHT_BITS(f) cpu_to_le32(IEEE80211_RADIOTAP_EHT_##f)
#define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
IEEE80211_RADIOTAP_HE_##f)
+#define EHT_PREP(f, m, v) le32_encode_bits(le32_get_bits(v, MT_CRXV_EHT_##m),\
+ IEEE80211_RADIOTAP_EHT_##f)
static void
mt76_connac3_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
@@ -180,3 +183,85 @@ void mt76_connac3_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv,
}
}
EXPORT_SYMBOL_GPL(mt76_connac3_mac_decode_he_radiotap);
+
+static void *
+mt76_connac3_mac_radiotap_push_tlv(struct sk_buff *skb, u16 type, u16 len)
+{
+ struct ieee80211_radiotap_tlv *tlv;
+
+ tlv = skb_push(skb, sizeof(*tlv) + len);
+ tlv->type = cpu_to_le16(type);
+ tlv->len = cpu_to_le16(len);
+ memset(tlv->data, 0, len);
+
+ return tlv->data;
+}
+
+void mt76_connac3_mac_decode_eht_radiotap(struct sk_buff *skb, __le32 *rxv,
+ u8 mode)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct ieee80211_radiotap_eht_usig *usig;
+ struct ieee80211_radiotap_eht *eht;
+ u32 ltf_size = le32_get_bits(rxv[4], MT_CRXV_HE_LTF_SIZE) + 1;
+ u8 bw = FIELD_GET(MT_PRXV_FRAME_MODE, le32_to_cpu(rxv[2]));
+
+ if (WARN_ONCE(skb_mac_header(skb) != skb->data,
+ "Should push tlv at the top of mac hdr"))
+ return;
+
+ eht = mt76_connac3_mac_radiotap_push_tlv(skb, IEEE80211_RADIOTAP_EHT,
+ sizeof(*eht) + sizeof(u32));
+ usig = mt76_connac3_mac_radiotap_push_tlv(skb, IEEE80211_RADIOTAP_EHT_USIG,
+ sizeof(*usig));
+
+ status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
+
+ eht->known |= EHT_BITS(KNOWN_SPATIAL_REUSE) |
+ EHT_BITS(KNOWN_GI) |
+ EHT_BITS(KNOWN_EHT_LTF) |
+ EHT_BITS(KNOWN_LDPC_EXTRA_SYM_OM) |
+ EHT_BITS(KNOWN_PE_DISAMBIGUITY_OM) |
+ EHT_BITS(KNOWN_NSS_S);
+
+ eht->data[0] |=
+ EHT_PREP(DATA0_SPATIAL_REUSE, SR_MASK, rxv[13]) |
+ cpu_to_le32(FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_GI, status->eht.gi) |
+ FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_LTF, ltf_size)) |
+ EHT_PREP(DATA0_PE_DISAMBIGUITY_OM, PE_DISAMBIG, rxv[5]) |
+ EHT_PREP(DATA0_LDPC_EXTRA_SYM_OM, LDPC_EXT_SYM, rxv[4]);
+
+ eht->data[7] |= le32_encode_bits(status->nss, IEEE80211_RADIOTAP_EHT_DATA7_NSS_S);
+
+ eht->user_info[0] |=
+ EHT_BITS(USER_INFO_MCS_KNOWN) |
+ EHT_BITS(USER_INFO_CODING_KNOWN) |
+ EHT_BITS(USER_INFO_NSS_KNOWN_O) |
+ EHT_BITS(USER_INFO_BEAMFORMING_KNOWN_O) |
+ EHT_BITS(USER_INFO_DATA_FOR_USER) |
+ le32_encode_bits(status->rate_idx, IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) |
+ le32_encode_bits(status->nss, IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O);
+
+ if (le32_to_cpu(rxv[0]) & MT_PRXV_TXBF)
+ eht->user_info[0] |= EHT_BITS(USER_INFO_BEAMFORMING_O);
+
+ if (le32_to_cpu(rxv[0]) & MT_PRXV_HT_AD_CODE)
+ eht->user_info[0] |= EHT_BITS(USER_INFO_CODING);
+
+ if (mode == MT_PHY_TYPE_EHT_MU)
+ eht->user_info[0] |= EHT_BITS(USER_INFO_STA_ID_KNOWN) |
+ EHT_PREP(USER_INFO_STA_ID, MU_AID, rxv[8]);
+
+ usig->common |=
+ EHT_BITS(USIG_COMMON_PHY_VER_KNOWN) |
+ EHT_BITS(USIG_COMMON_BW_KNOWN) |
+ EHT_BITS(USIG_COMMON_UL_DL_KNOWN) |
+ EHT_BITS(USIG_COMMON_BSS_COLOR_KNOWN) |
+ EHT_BITS(USIG_COMMON_TXOP_KNOWN) |
+ le32_encode_bits(0, IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER) |
+ le32_encode_bits(bw, IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW) |
+ EHT_PREP(USIG_COMMON_UL_DL, UPLINK, rxv[5]) |
+ EHT_PREP(USIG_COMMON_BSS_COLOR, BSS_COLOR, rxv[9]) |
+ EHT_PREP(USIG_COMMON_TXOP, TXOP_DUR, rxv[9]);
+}
+EXPORT_SYMBOL_GPL(mt76_connac3_mac_decode_eht_radiotap);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
index 83dcd964bfd0..353e66069840 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
@@ -142,6 +142,28 @@ enum {
#define MT_CRXV_HE_RU3_L GENMASK(31, 27)
#define MT_CRXV_HE_RU3_H GENMASK(3, 0)
+#define MT_CRXV_EHT_NUM_USER GENMASK(26, 20)
+#define MT_CRXV_EHT_LTF_SIZE GENMASK(28, 27)
+#define MT_CRXV_EHT_LDPC_EXT_SYM BIT(30)
+#define MT_CRXV_EHT_PE_DISAMBIG BIT(1)
+#define MT_CRXV_EHT_UPLINK BIT(2)
+#define MT_CRXV_EHT_MU_AID GENMASK(27, 17)
+#define MT_CRXV_EHT_BEAM_CHNG BIT(29)
+#define MT_CRXV_EHT_DOPPLER BIT(0)
+#define MT_CRXV_EHT_BSS_COLOR GENMASK(15, 10)
+#define MT_CRXV_EHT_TXOP_DUR GENMASK(23, 17)
+#define MT_CRXV_EHT_SR_MASK GENMASK(11, 8)
+#define MT_CRXV_EHT_SR1_MASK GENMASK(15, 12)
+#define MT_CRXV_EHT_SR2_MASK GENMASK(19, 16)
+#define MT_CRXV_EHT_SR3_MASK GENMASK(23, 20)
+#define MT_CRXV_EHT_RU0 GENMASK(8, 0)
+#define MT_CRXV_EHT_RU1 GENMASK(17, 9)
+#define MT_CRXV_EHT_RU2 GENMASK(26, 18)
+#define MT_CRXV_EHT_RU3_L GENMASK(31, 27)
+#define MT_CRXV_EHT_RU3_H GENMASK(3, 0)
+#define MT_CRXV_EHT_SIG_MCS GENMASK(19, 18)
+#define MT_CRXV_EHT_LTF_SYM GENMASK(22, 20)
+
enum tx_header_format {
MT_HDR_FORMAT_802_3,
MT_HDR_FORMAT_CMD,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index af0c2b2aacb0..162c57fb7954 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -257,7 +257,7 @@ mt76_connac_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
};
u16 ntlv;
- ptlv = skb_put(skb, len);
+ ptlv = skb_put_zero(skb, len);
memcpy(ptlv, &tlv, sizeof(tlv));
ntlv = le16_to_cpu(ntlv_hdr->tlv_num);
@@ -283,7 +283,7 @@ __mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
};
struct sk_buff *skb;
- if (is_mt799x(dev) && !wcid->sta)
+ if (wcid && !wcid->sta)
hdr.muar_idx = 0xe;
mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo,
@@ -392,7 +392,14 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
if (!sta) {
basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
- eth_broadcast_addr(basic->peer_addr);
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ !is_zero_ether_addr(vif->bss_conf.bssid)) {
+ memcpy(basic->peer_addr, vif->bss_conf.bssid, ETH_ALEN);
+ basic->aid = cpu_to_le16(vif->cfg.aid);
+ } else {
+ eth_broadcast_addr(basic->peer_addr);
+ }
return;
}
@@ -1670,7 +1677,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
set_bit(MT76_HW_SCANNING, &phy->state);
mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
- req = (struct mt76_connac_hw_scan_req *)skb_put(skb, sizeof(*req));
+ req = (struct mt76_connac_hw_scan_req *)skb_put_zero(skb, sizeof(*req));
req->seq_num = mvif->scan_seq_num | mvif->band_idx << 7;
req->bss_idx = mvif->idx;
@@ -1798,7 +1805,7 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
- req = (struct mt76_connac_sched_scan_req *)skb_put(skb, sizeof(*req));
+ req = (struct mt76_connac_sched_scan_req *)skb_put_zero(skb, sizeof(*req));
req->version = 1;
req->seq_num = mvif->scan_seq_num | mvif->band_idx << 7;
@@ -2321,7 +2328,7 @@ int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
return -ENOMEM;
skb_put_data(skb, &hdr, sizeof(hdr));
- gtk_tlv = (struct mt76_connac_gtk_rekey_tlv *)skb_put(skb,
+ gtk_tlv = (struct mt76_connac_gtk_rekey_tlv *)skb_put_zero(skb,
sizeof(*gtk_tlv));
gtk_tlv->tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_GTK_REKEY);
gtk_tlv->len = cpu_to_le16(sizeof(*gtk_tlv));
@@ -2446,7 +2453,7 @@ mt76_connac_mcu_set_wow_pattern(struct mt76_dev *dev,
return -ENOMEM;
skb_put_data(skb, &hdr, sizeof(hdr));
- ptlv = (struct mt76_connac_wow_pattern_tlv *)skb_put(skb, sizeof(*ptlv));
+ ptlv = (struct mt76_connac_wow_pattern_tlv *)skb_put_zero(skb, sizeof(*ptlv));
ptlv->tag = cpu_to_le16(UNI_SUSPEND_WOW_PATTERN);
ptlv->len = cpu_to_le16(sizeof(*ptlv));
ptlv->data_len = pattern->pattern_len;
@@ -2527,6 +2534,7 @@ int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend)
__le16 tag;
__le16 len;
u8 suspend;
+ u8 pad[7];
} __packed hif_suspend;
} req = {
.hif_suspend = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index 657a4d1f856b..6873ce14d299 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -610,6 +610,12 @@ struct sta_rec_ra_fixed {
u8 mmps_mode;
} __packed;
+struct sta_rec_tx_proc {
+ __le16 tag;
+ __le16 len;
+ __le32 flag;
+} __packed;
+
/* wtbl_rec */
struct wtbl_req_hdr {
@@ -777,6 +783,7 @@ struct wtbl_raw {
sizeof(struct sta_rec_ra_fixed) + \
sizeof(struct sta_rec_he_6g_capa) + \
sizeof(struct sta_rec_pn_info) + \
+ sizeof(struct sta_rec_tx_proc) + \
sizeof(struct tlv) + \
MT76_CONNAC_WTBL_UPDATE_MAX_SIZE)
@@ -1004,6 +1011,7 @@ enum {
MCU_EVENT_CH_PRIVILEGE = 0x18,
MCU_EVENT_SCHED_SCAN_DONE = 0x23,
MCU_EVENT_DBG_MSG = 0x27,
+ MCU_EVENT_RSSI_NOTIFY = 0x96,
MCU_EVENT_TXPWR = 0xd0,
MCU_EVENT_EXT = 0xed,
MCU_EVENT_RESTART_DL = 0xef,
@@ -1182,6 +1190,7 @@ enum {
MCU_EXT_CMD_EFUSE_ACCESS = 0x01,
MCU_EXT_CMD_RF_REG_ACCESS = 0x02,
MCU_EXT_CMD_RF_TEST = 0x04,
+ MCU_EXT_CMD_ID_RADIO_ON_OFF_CTRL = 0x05,
MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
MCU_EXT_CMD_SET_TX_POWER_CTRL = 0x11,
@@ -1213,6 +1222,7 @@ enum {
MCU_EXT_CMD_TXDPD_CAL = 0x60,
MCU_EXT_CMD_CAL_CACHE = 0x67,
MCU_EXT_CMD_RED_ENABLE = 0x68,
+ MCU_EXT_CMD_CP_SUPPORT = 0x75,
MCU_EXT_CMD_SET_RADAR_TH = 0x7c,
MCU_EXT_CMD_SET_RDD_PATTERN = 0x7d,
MCU_EXT_CMD_MWDS_SUPPORT = 0x80,
@@ -1303,6 +1313,7 @@ enum {
MCU_CE_CMD_SCHED_SCAN_ENABLE = 0x61,
MCU_CE_CMD_SCHED_SCAN_REQ = 0x62,
MCU_CE_CMD_GET_NIC_CAPAB = 0x8a,
+ MCU_CE_CMD_RSSI_MONITOR = 0xa1,
MCU_CE_CMD_SET_MU_EDCA_PARMS = 0xb0,
MCU_CE_CMD_REG_WRITE = 0xc0,
MCU_CE_CMD_REG_READ = 0xc0,
@@ -1448,6 +1459,10 @@ struct mt76_connac_beacon_loss_event {
u8 pad[2];
} __packed;
+struct mt76_connac_rssi_notify_event {
+ __le32 rssi[4];
+} __packed;
+
struct mt76_connac_mcu_bss_event {
u8 bss_idx;
u8 is_absent;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
index 6c3696c8c700..578013884e43 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -523,7 +523,8 @@ mt7915_fw_debug_wm_set(void *data, u64 val)
/* WM CPU info record control */
mt76_clear(dev, MT_CPU_UTIL_CTRL, BIT(0));
- mt76_wr(dev, MT_DIC_CMD_REG_CMD, BIT(2) | BIT(13) | !dev->fw.debug_wm);
+ mt76_wr(dev, MT_DIC_CMD_REG_CMD, BIT(2) | BIT(13) |
+ (dev->fw.debug_wm ? 0 : BIT(0)));
mt76_wr(dev, MT_MCU_WM_CIRQ_IRQ_MASK_CLR_ADDR, BIT(5));
mt76_wr(dev, MT_MCU_WM_CIRQ_IRQ_SOFT_ADDR, BIT(5));
@@ -1049,6 +1050,7 @@ static ssize_t
mt7915_rate_txpower_set(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
+ int i, ret, pwr, pwr160 = 0, pwr80 = 0, pwr40 = 0, pwr20 = 0;
struct mt7915_phy *phy = file->private_data;
struct mt7915_dev *dev = phy->dev;
struct mt76_phy *mphy = phy->mt76;
@@ -1057,7 +1059,6 @@ mt7915_rate_txpower_set(struct file *file, const char __user *user_buf,
.band_idx = phy->mt76->band_idx,
};
char buf[100];
- int i, ret, pwr160 = 0, pwr80 = 0, pwr40 = 0, pwr20 = 0;
enum mac80211_rx_encoding mode;
u32 offs = 0, len = 0;
@@ -1130,8 +1131,8 @@ skip:
if (ret)
goto out;
- mphy->txpower_cur = max(mphy->txpower_cur,
- max(pwr160, max(pwr80, max(pwr40, pwr20))));
+ pwr = max3(pwr80, pwr40, pwr20);
+ mphy->txpower_cur = max3(mphy->txpower_cur, pwr160, pwr);
out:
mutex_unlock(&dev->mt76.mutex);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
index 3bb2643d1b26..bfdbc15abaa9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
@@ -9,28 +9,34 @@ static int mt7915_eeprom_load_precal(struct mt7915_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
u8 *eeprom = mdev->eeprom.data;
- u32 val = eeprom[MT_EE_DO_PRE_CAL];
- u32 offs;
+ u32 offs = is_mt7915(&dev->mt76) ? MT_EE_DO_PRE_CAL : MT_EE_DO_PRE_CAL_V2;
+ u32 size, val = eeprom[offs];
int ret;
- if (!dev->flash_mode)
+ if (!dev->flash_mode || !val)
return 0;
- if (val != (MT_EE_WIFI_CAL_DPD | MT_EE_WIFI_CAL_GROUP))
- return 0;
+ size = mt7915_get_cal_group_size(dev) + mt7915_get_cal_dpd_size(dev);
- val = MT_EE_CAL_GROUP_SIZE + MT_EE_CAL_DPD_SIZE;
- dev->cal = devm_kzalloc(mdev->dev, val, GFP_KERNEL);
+ dev->cal = devm_kzalloc(mdev->dev, size, GFP_KERNEL);
if (!dev->cal)
return -ENOMEM;
offs = is_mt7915(&dev->mt76) ? MT_EE_PRECAL : MT_EE_PRECAL_V2;
- ret = mt76_get_of_data_from_mtd(mdev, dev->cal, offs, val);
+ ret = mt76_get_of_data_from_mtd(mdev, dev->cal, offs, size);
if (!ret)
return ret;
- return mt76_get_of_data_from_nvmem(mdev, dev->cal, "precal", val);
+ ret = mt76_get_of_data_from_nvmem(mdev, dev->cal, "precal", size);
+ if (!ret)
+ return ret;
+
+ dev_warn(mdev->dev, "missing precal data, size=%d\n", size);
+ devm_kfree(mdev->dev, dev->cal);
+ dev->cal = NULL;
+
+ return ret;
}
static int mt7915_check_eeprom(struct mt7915_dev *dev)
@@ -256,10 +262,7 @@ int mt7915_eeprom_init(struct mt7915_dev *dev)
return ret;
}
- ret = mt7915_eeprom_load_precal(dev);
- if (ret)
- return ret;
-
+ mt7915_eeprom_load_precal(dev);
mt7915_eeprom_parse_hw_cap(dev, &dev->phy);
memcpy(dev->mphy.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
ETH_ALEN);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
index adc26a222823..509fb43d8a68 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
@@ -19,6 +19,7 @@ enum mt7915_eeprom_field {
MT_EE_DDIE_FT_VERSION = 0x050,
MT_EE_DO_PRE_CAL = 0x062,
MT_EE_WIFI_CONF = 0x190,
+ MT_EE_DO_PRE_CAL_V2 = 0x19a,
MT_EE_RATE_DELTA_2G = 0x252,
MT_EE_RATE_DELTA_5G = 0x29d,
MT_EE_TX0_POWER_2G = 0x2fc,
@@ -39,10 +40,19 @@ enum mt7915_eeprom_field {
};
#define MT_EE_WIFI_CAL_GROUP BIT(0)
-#define MT_EE_WIFI_CAL_DPD GENMASK(2, 1)
+#define MT_EE_WIFI_CAL_DPD_2G BIT(2)
+#define MT_EE_WIFI_CAL_DPD_5G BIT(1)
+#define MT_EE_WIFI_CAL_DPD_6G BIT(3)
+#define MT_EE_WIFI_CAL_DPD GENMASK(3, 1)
#define MT_EE_CAL_UNIT 1024
-#define MT_EE_CAL_GROUP_SIZE (49 * MT_EE_CAL_UNIT + 16)
-#define MT_EE_CAL_DPD_SIZE (54 * MT_EE_CAL_UNIT)
+#define MT_EE_CAL_GROUP_SIZE_7915 (49 * MT_EE_CAL_UNIT + 16)
+#define MT_EE_CAL_GROUP_SIZE_7916 (54 * MT_EE_CAL_UNIT + 16)
+#define MT_EE_CAL_GROUP_SIZE_7975 (54 * MT_EE_CAL_UNIT + 16)
+#define MT_EE_CAL_GROUP_SIZE_7976 (94 * MT_EE_CAL_UNIT + 16)
+#define MT_EE_CAL_GROUP_SIZE_7916_6G (94 * MT_EE_CAL_UNIT + 16)
+#define MT_EE_CAL_DPD_SIZE_V1 (54 * MT_EE_CAL_UNIT)
+#define MT_EE_CAL_DPD_SIZE_V2 (300 * MT_EE_CAL_UNIT)
+#define MT_EE_CAL_DPD_SIZE_V2_7981 (102 * MT_EE_CAL_UNIT) /* no 6g dpd data */
#define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0)
#define MT_EE_WIFI_CONF0_BAND_SEL GENMASK(7, 6)
@@ -156,6 +166,37 @@ mt7915_tssi_enabled(struct mt7915_dev *dev, enum nl80211_band band)
return val & MT_EE_WIFI_CONF7_TSSI0_5G;
}
+static inline u32
+mt7915_get_cal_group_size(struct mt7915_dev *dev)
+{
+ u8 *eep = dev->mt76.eeprom.data;
+ u32 val;
+
+ if (is_mt7915(&dev->mt76)) {
+ return MT_EE_CAL_GROUP_SIZE_7915;
+ } else if (is_mt7916(&dev->mt76)) {
+ val = eep[MT_EE_WIFI_CONF + 1];
+ val = FIELD_GET(MT_EE_WIFI_CONF0_BAND_SEL, val);
+ return (val == MT_EE_V2_BAND_SEL_6GHZ) ? MT_EE_CAL_GROUP_SIZE_7916_6G :
+ MT_EE_CAL_GROUP_SIZE_7916;
+ } else if (mt7915_check_adie(dev, false)) {
+ return MT_EE_CAL_GROUP_SIZE_7976;
+ } else {
+ return MT_EE_CAL_GROUP_SIZE_7975;
+ }
+}
+
+static inline u32
+mt7915_get_cal_dpd_size(struct mt7915_dev *dev)
+{
+ if (is_mt7915(&dev->mt76))
+ return MT_EE_CAL_DPD_SIZE_V1;
+ else if (is_mt7981(&dev->mt76))
+ return MT_EE_CAL_DPD_SIZE_V2_7981;
+ else
+ return MT_EE_CAL_DPD_SIZE_V2;
+}
+
extern const u8 mt7915_sku_group_len[MAX_SKU_RATE_GROUP_NUM];
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index cea2f6d9050a..a978f434dc5e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -823,7 +823,7 @@ mt7915_init_hardware(struct mt7915_dev *dev, struct mt7915_phy *phy2)
if (ret < 0)
return ret;
- if (dev->flash_mode) {
+ if (dev->cal) {
ret = mt7915_mcu_apply_group_cal(dev);
if (ret)
return ret;
@@ -934,11 +934,10 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_phy *phy,
/* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
elem->phy_cap_info[7] |= min_t(int, sts - 1, 2) << 3;
- if (vif != NL80211_IFTYPE_AP)
+ if (vif != NL80211_IFTYPE_AP && vif != NL80211_IFTYPE_STATION)
return;
elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
- elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
sts - 1);
@@ -947,6 +946,11 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_phy *phy,
sts_160 - 1);
elem->phy_cap_info[5] |= c;
+ if (vif != NL80211_IFTYPE_AP)
+ return;
+
+ elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+
c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB;
elem->phy_cap_info[6] |= c;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index e45361111f9b..8008ce3fa6c7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -140,8 +140,15 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
msta->airtime_ac[i] = mt76_rr(dev, addr);
msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
- tx_time[i] = msta->airtime_ac[i] - tx_last;
- rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
+ if (msta->airtime_ac[i] <= tx_last)
+ tx_time[i] = 0;
+ else
+ tx_time[i] = msta->airtime_ac[i] - tx_last;
+
+ if (msta->airtime_ac[i + 4] <= rx_last)
+ rx_time[i] = 0;
+ else
+ rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
if ((tx_last | rx_last) & BIT(30))
clear = true;
@@ -1338,10 +1345,8 @@ mt7915_mac_restart(struct mt7915_dev *dev)
set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
wake_up(&dev->mt76.mcu.wait);
- if (ext_phy) {
+ if (ext_phy)
set_bit(MT76_RESET, &ext_phy->state);
- set_bit(MT76_MCU_RESET, &ext_phy->state);
- }
/* lock/unlock all queues to ensure that no tx is pending */
mt76_txq_schedule_all(&dev->mphy);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index 3709d18da0e6..2624edbb59a1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -329,7 +329,7 @@ int mt7915_set_channel(struct mt7915_phy *phy)
mt76_set_channel(phy->mt76);
- if (dev->flash_mode) {
+ if (dev->cal) {
ret = mt7915_mcu_apply_tx_dpd(phy);
if (ret)
goto out;
@@ -744,6 +744,7 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
bool ext_phy = mvif->phy != &dev->phy;
int ret, idx;
+ u32 addr;
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA);
if (idx < 0)
@@ -767,6 +768,9 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
if (ret)
return ret;
+ addr = mt7915_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 30);
+ mt76_rmw_field(dev, addr, GENMASK(7, 0), 0xa0);
+
return mt7915_mcu_add_rate_ctrl(dev, vif, sta, false);
}
@@ -1657,6 +1661,10 @@ mt7915_net_fill_forward_path(struct ieee80211_hw *hw,
#endif
const struct ieee80211_ops mt7915_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt7915_tx,
.start = mt7915_start,
.stop = mt7915_stop,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index d90f98c50039..9599adf104b1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -331,7 +331,7 @@ mt7915_mcu_cca_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!vif->bss_conf.color_change_active || vif->type == NL80211_IFTYPE_STATION)
return;
- ieee80211_color_change_finish(vif);
+ ieee80211_color_change_finish(vif, 0);
}
static void
@@ -424,7 +424,7 @@ mt7915_mcu_add_nested_subtlv(struct sk_buff *skb, int sub_tag, int sub_len,
.len = cpu_to_le16(sub_len),
};
- ptlv = skb_put(skb, sub_len);
+ ptlv = skb_put_zero(skb, sub_len);
memcpy(ptlv, &tlv, sizeof(tlv));
le16_add_cpu(sub_ntlv, 1);
@@ -1193,7 +1193,7 @@ mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
int tx_ant = hweight8(phy->mt76->chainmask) - 1;
struct sta_rec_bf *bf;
struct tlv *tlv;
- const u8 matrix[4][4] = {
+ static const u8 matrix[4][4] = {
{0, 0, 0, 0},
{1, 1, 0, 0}, /* 2x1, 2x2, 2x3, 2x4 */
{2, 4, 4, 0}, /* 3x1, 3x2, 3x3, 3x4 */
@@ -1919,8 +1919,7 @@ mt7915_mcu_add_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif,
bcn = (struct bss_info_bcn *)tlv;
bcn->enable = true;
- if (changed & BSS_CHANGED_FILS_DISCOVERY &&
- vif->bss_conf.fils_discovery.max_interval) {
+ if (changed & BSS_CHANGED_FILS_DISCOVERY) {
interval = vif->bss_conf.fils_discovery.max_interval;
skb = ieee80211_get_fils_discovery_tmpl(hw, vif);
} else if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP &&
@@ -1957,7 +1956,7 @@ mt7915_mcu_add_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif,
discov->tx_type = !!(changed & BSS_CHANGED_FILS_DISCOVERY);
discov->tx_interval = interval;
discov->prob_rsp_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
- discov->enable = true;
+ discov->enable = !!interval;
buf = (u8 *)sub_tlv + sizeof(*discov);
@@ -2904,9 +2903,10 @@ static int mt7915_mcu_set_pre_cal(struct mt7915_dev *dev, u8 idx,
int mt7915_mcu_apply_group_cal(struct mt7915_dev *dev)
{
u8 idx = 0, *cal = dev->cal, *eep = dev->mt76.eeprom.data;
- u32 total = MT_EE_CAL_GROUP_SIZE;
+ u32 total = mt7915_get_cal_group_size(dev);
+ u32 offs = is_mt7915(&dev->mt76) ? MT_EE_DO_PRE_CAL : MT_EE_DO_PRE_CAL_V2;
- if (!(eep[MT_EE_DO_PRE_CAL] & MT_EE_WIFI_CAL_GROUP))
+ if (!(eep[offs] & MT_EE_WIFI_CAL_GROUP))
return 0;
/*
@@ -2942,9 +2942,9 @@ static int mt7915_find_freq_idx(const u16 *freqs, int n_freqs, u16 cur)
return -1;
}
-static int mt7915_dpd_freq_idx(u16 freq, u8 bw)
+static int mt7915_dpd_freq_idx(struct mt7915_dev *dev, u16 freq, u8 bw)
{
- static const u16 freq_list[] = {
+ static const u16 freq_list_v1[] = {
5180, 5200, 5220, 5240,
5260, 5280, 5300, 5320,
5500, 5520, 5540, 5560,
@@ -2952,65 +2952,135 @@ static int mt7915_dpd_freq_idx(u16 freq, u8 bw)
5660, 5680, 5700, 5745,
5765, 5785, 5805, 5825
};
- int offset_2g = ARRAY_SIZE(freq_list);
+ static const u16 freq_list_v2[] = {
+ /* 6G BW20*/
+ 5955, 5975, 5995, 6015,
+ 6035, 6055, 6075, 6095,
+ 6115, 6135, 6155, 6175,
+ 6195, 6215, 6235, 6255,
+ 6275, 6295, 6315, 6335,
+ 6355, 6375, 6395, 6415,
+ 6435, 6455, 6475, 6495,
+ 6515, 6535, 6555, 6575,
+ 6595, 6615, 6635, 6655,
+ 6675, 6695, 6715, 6735,
+ 6755, 6775, 6795, 6815,
+ 6835, 6855, 6875, 6895,
+ 6915, 6935, 6955, 6975,
+ 6995, 7015, 7035, 7055,
+ 7075, 7095, 7115,
+ /* 6G BW160 */
+ 6025, 6185, 6345, 6505,
+ 6665, 6825, 6985,
+ /* 5G BW20 */
+ 5180, 5200, 5220, 5240,
+ 5260, 5280, 5300, 5320,
+ 5500, 5520, 5540, 5560,
+ 5580, 5600, 5620, 5640,
+ 5660, 5680, 5700, 5720,
+ 5745, 5765, 5785, 5805,
+ 5825, 5845, 5865, 5885,
+ /* 5G BW160 */
+ 5250, 5570, 5815
+ };
+ static const u16 freq_list_v2_7981[] = {
+ /* 5G BW20 */
+ 5180, 5200, 5220, 5240,
+ 5260, 5280, 5300, 5320,
+ 5500, 5520, 5540, 5560,
+ 5580, 5600, 5620, 5640,
+ 5660, 5680, 5700, 5720,
+ 5745, 5765, 5785, 5805,
+ 5825, 5845, 5865, 5885,
+ /* 5G BW160 */
+ 5250, 5570, 5815
+ };
+ const u16 *freq_list = freq_list_v1;
+ int n_freqs = ARRAY_SIZE(freq_list_v1);
int idx;
+ if (!is_mt7915(&dev->mt76)) {
+ if (is_mt7981(&dev->mt76)) {
+ freq_list = freq_list_v2_7981;
+ n_freqs = ARRAY_SIZE(freq_list_v2_7981);
+ } else {
+ freq_list = freq_list_v2;
+ n_freqs = ARRAY_SIZE(freq_list_v2);
+ }
+ }
+
if (freq < 4000) {
if (freq < 2432)
- return offset_2g;
+ return n_freqs;
if (freq < 2457)
- return offset_2g + 1;
+ return n_freqs + 1;
- return offset_2g + 2;
+ return n_freqs + 2;
}
- if (bw == NL80211_CHAN_WIDTH_80P80 || bw == NL80211_CHAN_WIDTH_160)
+ if (bw == NL80211_CHAN_WIDTH_80P80)
return -1;
if (bw != NL80211_CHAN_WIDTH_20) {
- idx = mt7915_find_freq_idx(freq_list, ARRAY_SIZE(freq_list),
- freq + 10);
+ idx = mt7915_find_freq_idx(freq_list, n_freqs, freq + 10);
if (idx >= 0)
return idx;
- idx = mt7915_find_freq_idx(freq_list, ARRAY_SIZE(freq_list),
- freq - 10);
+ idx = mt7915_find_freq_idx(freq_list, n_freqs, freq - 10);
if (idx >= 0)
return idx;
}
- return mt7915_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), freq);
+ return mt7915_find_freq_idx(freq_list, n_freqs, freq);
}
int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
- u16 total = 2, center_freq = chandef->center_freq1;
+ enum nl80211_band band = chandef->chan->band;
+ u32 offs = is_mt7915(&dev->mt76) ? MT_EE_DO_PRE_CAL : MT_EE_DO_PRE_CAL_V2;
+ u16 center_freq = chandef->center_freq1;
u8 *cal = dev->cal, *eep = dev->mt76.eeprom.data;
+ u8 dpd_mask, cal_num = is_mt7915(&dev->mt76) ? 2 : 3;
int idx;
- if (!(eep[MT_EE_DO_PRE_CAL] & MT_EE_WIFI_CAL_DPD))
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ dpd_mask = MT_EE_WIFI_CAL_DPD_2G;
+ break;
+ case NL80211_BAND_5GHZ:
+ dpd_mask = MT_EE_WIFI_CAL_DPD_5G;
+ break;
+ case NL80211_BAND_6GHZ:
+ dpd_mask = MT_EE_WIFI_CAL_DPD_6G;
+ break;
+ default:
+ dpd_mask = 0;
+ break;
+ }
+
+ if (!(eep[offs] & dpd_mask))
return 0;
- idx = mt7915_dpd_freq_idx(center_freq, chandef->width);
+ idx = mt7915_dpd_freq_idx(dev, center_freq, chandef->width);
if (idx < 0)
return -EINVAL;
/* Items: Tx DPD, Tx Flatness */
- idx = idx * 2;
- cal += MT_EE_CAL_GROUP_SIZE;
+ idx = idx * cal_num;
+ cal += mt7915_get_cal_group_size(dev) + (idx * MT_EE_CAL_UNIT);
- while (total--) {
+ while (cal_num--) {
int ret;
- cal += (idx * MT_EE_CAL_UNIT);
ret = mt7915_mcu_set_pre_cal(dev, idx, cal, MT_EE_CAL_UNIT,
MCU_EXT_CMD(DPD_PRE_CAL_INFO));
if (ret)
return ret;
idx++;
+ cal += MT_EE_CAL_UNIT;
}
return 0;
@@ -3801,30 +3871,38 @@ int mt7915_mcu_wed_wa_tx_stats(struct mt7915_dev *dev, u16 wlan_idx)
{
struct {
__le32 cmd;
- __le32 num;
- __le32 __rsv;
- __le16 wlan_idx;
- } req = {
+ __le32 arg0;
+ __le32 arg1;
+ __le16 arg2;
+ } __packed req = {
.cmd = cpu_to_le32(0x15),
- .num = cpu_to_le32(1),
- .wlan_idx = cpu_to_le16(wlan_idx),
};
struct mt7915_mcu_wa_tx_stat {
- __le16 wlan_idx;
- u8 __rsv[2];
+ __le16 wcid;
+ u8 __rsv2[2];
/* tx_bytes is deprecated since WA byte counter uses u32,
* which easily leads to overflow.
*/
__le32 tx_bytes;
__le32 tx_packets;
- } *res;
+ } __packed *res;
struct mt76_wcid *wcid;
struct sk_buff *skb;
- int ret;
+ int ret, len;
+ u16 ret_wcid;
+
+ if (is_mt7915(&dev->mt76)) {
+ req.arg0 = cpu_to_le32(wlan_idx);
+ len = sizeof(req) - sizeof(req.arg2);
+ } else {
+ req.arg0 = cpu_to_le32(1);
+ req.arg2 = cpu_to_le16(wlan_idx);
+ len = sizeof(req);
+ }
ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_WA_PARAM_CMD(QUERY),
- &req, sizeof(req), true, &skb);
+ &req, len, true, &skb);
if (ret)
return ret;
@@ -3833,7 +3911,11 @@ int mt7915_mcu_wed_wa_tx_stats(struct mt7915_dev *dev, u16 wlan_idx)
res = (struct mt7915_mcu_wa_tx_stat *)skb->data;
- if (le16_to_cpu(res->wlan_idx) != wlan_idx) {
+ ret_wcid = le16_to_cpu(res->wcid);
+ if (is_mt7915(&dev->mt76))
+ ret_wcid &= 0xff;
+
+ if (ret_wcid != wlan_idx) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index 6e79bc65f5a5..a30d08eb0656 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -302,6 +302,10 @@ struct mt7915_dev {
struct rchan *relay_fwlog;
void *cal;
+ u32 cur_prek_offset;
+ u8 dpd_chan_num_2g;
+ u8 dpd_chan_num_5g;
+ u8 dpd_chan_num_6g;
struct {
u8 debug_wm;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
index f5b99917c08e..90a6f61d1089 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
@@ -7,7 +7,6 @@
#include <linux/pinctrl/consumer.h>
#include <linux/of.h>
#include <linux/of_reserved_mem.h>
-#include <linux/of_gpio.h>
#include <linux/iopoll.h>
#include <linux/reset.h>
#include <linux/of_net.h>
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index 867e14f6b93a..73e42ef42983 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -663,6 +663,7 @@ void mt7921_mac_reset_work(struct work_struct *work)
int i, ret;
dev_dbg(dev->mt76.dev, "chip reset\n");
+ set_bit(MT76_RESET, &dev->mphy.state);
dev->hw_full_reset = true;
ieee80211_stop_queues(hw);
@@ -691,6 +692,7 @@ void mt7921_mac_reset_work(struct work_struct *work)
}
dev->hw_full_reset = false;
+ clear_bit(MT76_RESET, &dev->mphy.state);
pm->suspended = false;
ieee80211_wake_queues(hw);
ieee80211_iterate_active_interfaces(hw,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index ca36de34171b..3e3ad3518d85 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -242,6 +242,15 @@ int __mt7921_start(struct mt792x_phy *phy)
ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
MT792x_WATCHDOG_TIME);
+ if (mt76_is_mmio(mphy->dev)) {
+ err = mt7921_mcu_radio_led_ctrl(phy->dev, EXT_CMD_RADIO_LED_CTRL_ENABLE);
+ if (err)
+ return err;
+
+ err = mt7921_mcu_radio_led_ctrl(phy->dev, EXT_CMD_RADIO_ON_LED);
+ if (err)
+ return err;
+ }
return 0;
}
@@ -259,6 +268,22 @@ static int mt7921_start(struct ieee80211_hw *hw)
return err;
}
+static void mt7921_stop(struct ieee80211_hw *hw)
+{
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ int err = 0;
+
+ if (mt76_is_mmio(&dev->mt76)) {
+ mt792x_mutex_acquire(dev);
+ err = mt7921_mcu_radio_led_ctrl(dev, EXT_CMD_RADIO_OFF_LED);
+ mt792x_mutex_release(dev);
+ if (err)
+ return;
+ }
+
+ mt792x_stop(hw);
+}
+
static int
mt7921_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
@@ -310,6 +335,8 @@ mt7921_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+ if (phy->chip_cap & MT792x_CHIP_CAP_RSSI_NOTIFY_EVT_EN)
+ vif->driver_flags |= IEEE80211_VIF_SUPPORTS_CQM_RSSI;
out:
mt792x_mutex_release(dev);
@@ -679,6 +706,9 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_PS)
mt7921_mcu_uni_bss_ps(dev, vif);
+ if (changed & BSS_CHANGED_CQM)
+ mt7921_mcu_set_rssimonitor(dev, vif);
+
if (changed & BSS_CHANGED_ASSOC) {
mt7921_mcu_sta_update(dev, NULL, vif, true,
MT76_STA_INFO_STATE_ASSOC);
@@ -1372,7 +1402,7 @@ static void mt7921_mgd_complete_tx(struct ieee80211_hw *hw,
const struct ieee80211_ops mt7921_ops = {
.tx = mt792x_tx,
.start = mt7921_start,
- .stop = mt792x_stop,
+ .stop = mt7921_stop,
.add_interface = mt7921_add_interface,
.remove_interface = mt792x_remove_interface,
.config = mt7921_config,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index 8b4ce32a2cd1..bdd8b5f19b24 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -254,6 +254,42 @@ mt7921_mcu_tx_done_event(struct mt792x_dev *dev, struct sk_buff *skb)
}
static void
+mt7921_mcu_rssi_monitor_iter(void *priv, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt76_connac_rssi_notify_event *event = priv;
+ enum nl80211_cqm_rssi_threshold_event nl_event;
+ s32 rssi = le32_to_cpu(event->rssi[mvif->mt76.idx]);
+
+ if (!rssi)
+ return;
+
+ if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
+ return;
+
+ if (rssi > vif->bss_conf.cqm_rssi_thold)
+ nl_event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
+ else
+ nl_event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
+
+ ieee80211_cqm_rssi_notify(vif, nl_event, rssi, GFP_KERNEL);
+}
+
+static void
+mt7921_mcu_rssi_monitor_event(struct mt792x_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_connac_rssi_notify_event *event;
+
+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
+ event = (struct mt76_connac_rssi_notify_event *)skb->data;
+
+ ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7921_mcu_rssi_monitor_iter, event);
+}
+
+static void
mt7921_mcu_rx_unsolicited_event(struct mt792x_dev *dev, struct sk_buff *skb)
{
struct mt76_connac2_mcu_rxd *rxd;
@@ -281,6 +317,9 @@ mt7921_mcu_rx_unsolicited_event(struct mt792x_dev *dev, struct sk_buff *skb)
case MCU_EVENT_TX_DONE:
mt7921_mcu_tx_done_event(dev, skb);
break;
+ case MCU_EVENT_RSSI_NOTIFY:
+ mt7921_mcu_rssi_monitor_event(dev, skb);
+ break;
default:
break;
}
@@ -327,6 +366,7 @@ void mt7921_mcu_rx_event(struct mt792x_dev *dev, struct sk_buff *skb)
if (rxd->ext_eid == MCU_EXT_EVENT_RATE_REPORT ||
rxd->eid == MCU_EVENT_BSS_BEACON_LOSS ||
rxd->eid == MCU_EVENT_SCHED_SCAN_DONE ||
+ rxd->eid == MCU_EVENT_RSSI_NOTIFY ||
rxd->eid == MCU_EVENT_SCAN_DONE ||
rxd->eid == MCU_EVENT_TX_DONE ||
rxd->eid == MCU_EVENT_DBG_MSG ||
@@ -606,6 +646,20 @@ int mt7921_run_firmware(struct mt792x_dev *dev)
}
EXPORT_SYMBOL_GPL(mt7921_run_firmware);
+int mt7921_mcu_radio_led_ctrl(struct mt792x_dev *dev, u8 value)
+{
+ struct {
+ u8 ctrlid;
+ u8 rsv[3];
+ } __packed req = {
+ .ctrlid = value,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ID_RADIO_ON_OFF_CTRL),
+ &req, sizeof(req), false);
+}
+EXPORT_SYMBOL_GPL(mt7921_mcu_radio_led_ctrl);
+
int mt7921_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
@@ -1100,12 +1154,12 @@ int mt7921_mcu_config_sniffer(struct mt792x_vif *vif,
{
struct cfg80211_chan_def *chandef = &ctx->def;
int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
- const u8 ch_band[] = {
+ static const u8 ch_band[] = {
[NL80211_BAND_2GHZ] = 1,
[NL80211_BAND_5GHZ] = 2,
[NL80211_BAND_6GHZ] = 3,
};
- const u8 ch_width[] = {
+ static const u8 ch_width[] = {
[NL80211_CHAN_WIDTH_20_NOHT] = 0,
[NL80211_CHAN_WIDTH_20] = 0,
[NL80211_CHAN_WIDTH_40] = 0,
@@ -1391,3 +1445,24 @@ int mt7921_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif,
return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_RX_FILTER),
&data, sizeof(data), false);
}
+
+int mt7921_mcu_set_rssimonitor(struct mt792x_dev *dev, struct ieee80211_vif *vif)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct {
+ u8 enable;
+ s8 cqm_rssi_high;
+ s8 cqm_rssi_low;
+ u8 bss_idx;
+ u16 duration;
+ u8 rsv2[2];
+ } __packed data = {
+ .enable = vif->cfg.assoc,
+ .cqm_rssi_high = vif->bss_conf.cqm_rssi_thold + vif->bss_conf.cqm_rssi_hyst,
+ .cqm_rssi_low = vif->bss_conf.cqm_rssi_thold - vif->bss_conf.cqm_rssi_hyst,
+ .bss_idx = mvif->mt76.idx,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(RSSI_MONITOR),
+ &data, sizeof(data), false);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index 3016636d18c6..6c5392c5d207 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -27,6 +27,10 @@
#define MCU_UNI_EVENT_ROC 0x27
#define MCU_UNI_EVENT_CLC 0x80
+#define EXT_CMD_RADIO_LED_CTRL_ENABLE 0x1
+#define EXT_CMD_RADIO_ON_LED 0x2
+#define EXT_CMD_RADIO_OFF_LED 0x3
+
enum {
UNI_ROC_ACQUIRE,
UNI_ROC_ABORT,
@@ -196,6 +200,7 @@ int mt7921_mcu_fw_log_2_host(struct mt792x_dev *dev, u8 ctrl);
void mt7921_mcu_rx_event(struct mt792x_dev *dev, struct sk_buff *skb);
int mt7921_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif,
u8 bit_op, u32 bit_map);
+int mt7921_mcu_radio_led_ctrl(struct mt792x_dev *dev, u8 value);
static inline u32
mt7921_reg_map_l1(struct mt792x_dev *dev, u32 addr)
@@ -323,4 +328,5 @@ int mt7921_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
int mt7921_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
u8 token_id);
void mt7921_roc_abort_sync(struct mt792x_dev *dev);
+int mt7921_mcu_set_rssimonitor(struct mt792x_dev *dev, struct ieee80211_vif *vif);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index cda853e86676..f768e9389ac6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -24,6 +24,8 @@ static const struct pci_device_id mt7921_pci_device_table[] = {
.driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0616),
.driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7920),
+ .driver_data = (kernel_ulong_t)MT7920_FIRMWARE_WM },
{ },
};
@@ -269,9 +271,9 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
struct mt76_bus_ops *bus_ops;
struct mt792x_dev *dev;
struct mt76_dev *mdev;
+ u16 cmd, chipid;
u8 features;
int ret;
- u16 cmd;
ret = pcim_enable_device(pdev);
if (ret)
@@ -345,7 +347,10 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
if (ret)
goto err_free_dev;
- mdev->rev = (mt7921_l1_rr(dev, MT_HW_CHIPID) << 16) |
+ chipid = mt7921_l1_rr(dev, MT_HW_CHIPID);
+ if (chipid == 0x7961 && (mt7921_l1_rr(dev, MT_HW_BOUND) & BIT(7)))
+ chipid = 0x7920;
+ mdev->rev = (chipid << 16) |
(mt7921_l1_rr(dev, MT_HW_REV) & 0xff);
dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
@@ -422,6 +427,10 @@ static int mt7921_pci_suspend(struct device *device)
wait_event_timeout(dev->wait,
!dev->regd_in_progress, 5 * HZ);
+ err = mt7921_mcu_radio_led_ctrl(dev, EXT_CMD_RADIO_OFF_LED);
+ if (err < 0)
+ goto restore_suspend;
+
err = mt76_connac_mcu_set_hif_suspend(mdev, true);
if (err)
goto restore_suspend;
@@ -520,9 +529,11 @@ static int mt7921_pci_resume(struct device *device)
mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
err = mt76_connac_mcu_set_hif_suspend(mdev, false);
+ if (err < 0)
+ goto failed;
mt7921_regd_update(dev);
-
+ err = mt7921_mcu_radio_led_ctrl(dev, EXT_CMD_RADIO_ON_LED);
failed:
pm->suspended = false;
@@ -551,6 +562,8 @@ static struct pci_driver mt7921_pci_driver = {
module_pci_driver(mt7921_pci_driver);
MODULE_DEVICE_TABLE(pci, mt7921_pci_device_table);
+MODULE_FIRMWARE(MT7920_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7920_ROM_PATCH);
MODULE_FIRMWARE(MT7921_FIRMWARE_WM);
MODULE_FIRMWARE(MT7921_ROM_PATCH);
MODULE_FIRMWARE(MT7922_FIRMWARE_WM);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
index c866144ff061..031ba9aaa4e2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
@@ -64,7 +64,6 @@ int mt7921e_mac_reset(struct mt792x_dev *dev)
mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
- set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
wake_up(&dev->mt76.mcu.wait);
skb_queue_purge(&dev->mt76.mcu.res_q);
@@ -115,7 +114,6 @@ int mt7921e_mac_reset(struct mt792x_dev *dev)
err = __mt7921_start(&dev->phy);
out:
- clear_bit(MT76_RESET, &dev->mphy.state);
local_bh_disable();
napi_enable(&dev->mt76.tx_napi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
index 389eb0903807..1f77cf71ca70 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
@@ -98,7 +98,6 @@ int mt7921s_mac_reset(struct mt792x_dev *dev)
mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
mt76_txq_schedule_all(&dev->mphy);
mt76_worker_disable(&dev->mt76.tx_worker);
- set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
wake_up(&dev->mt76.mcu.wait);
skb_queue_purge(&dev->mt76.mcu.res_q);
@@ -135,7 +134,6 @@ int mt7921s_mac_reset(struct mt792x_dev *dev)
err = __mt7921_start(&dev->phy);
out:
- clear_bit(MT76_RESET, &dev->mphy.state);
mt76_worker_enable(&dev->mt76.tx_worker);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
index 1b9fbd9a140d..c2460ef4993d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
@@ -590,14 +590,25 @@ mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
qos_ctl = *ieee80211_get_qos_ctl(hdr);
}
+ skb_set_mac_header(skb, (unsigned char *)hdr - skb->data);
} else {
status->flag |= RX_FLAG_8023;
}
mt792x_mac_assoc_rssi(dev, skb);
- if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
- mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode);
+ if (rxv && !(status->flag & RX_FLAG_8023)) {
+ switch (status->encoding) {
+ case RX_ENC_EHT:
+ mt76_connac3_mac_decode_eht_radiotap(skb, rxv, mode);
+ break;
+ case RX_ENC_HE:
+ mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode);
+ break;
+ default:
+ break;
+ }
+ }
if (!status->wcid || !ieee80211_is_data_qos(fc))
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
index bd37cb8d734b..652a9accc43c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
@@ -1768,12 +1768,12 @@ int mt7925_mcu_config_sniffer(struct mt792x_vif *vif,
struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &mphy->chandef;
int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
- const u8 ch_band[] = {
+ static const u8 ch_band[] = {
[NL80211_BAND_2GHZ] = 1,
[NL80211_BAND_5GHZ] = 2,
[NL80211_BAND_6GHZ] = 3,
};
- const u8 ch_width[] = {
+ static const u8 ch_width[] = {
[NL80211_CHAN_WIDTH_20_NOHT] = 0,
[NL80211_CHAN_WIDTH_20] = 0,
[NL80211_CHAN_WIDTH_40] = 0,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
index 2a0bbfe7bfa5..b8315a89f4a9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
@@ -535,7 +535,7 @@ struct mt7925_wow_pattern_tlv {
u8 offset;
u8 mask[MT76_CONNAC_WOW_MASK_MAX_LEN];
u8 pattern[MT76_CONNAC_WOW_PATTEN_MAX_LEN];
- u8 rsv[4];
+ u8 rsv[7];
} __packed;
static inline enum connac3_mcu_cipher_type
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h
index a8556de3d480..20578497a405 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x.h
@@ -26,6 +26,7 @@
#define MT792x_FW_CAP_CNM BIT(7)
#define MT792x_CHIP_CAP_CLC_EVT_EN BIT(0)
+#define MT792x_CHIP_CAP_RSSI_NOTIFY_EVT_EN BIT(1)
/* NOTE: used to map mt76_rates. idx may change if firmware expands table */
#define MT792x_BASIC_RATES_TBL 11
@@ -36,10 +37,12 @@
#define MT792x_MCU_INIT_RETRY_COUNT 10
#define MT792x_WFSYS_INIT_RETRY_COUNT 2
+#define MT7920_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7961_1a.bin"
#define MT7921_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7961_1.bin"
#define MT7922_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7922_1.bin"
#define MT7925_FIRMWARE_WM "mediatek/mt7925/WIFI_RAM_CODE_MT7925_1_1.bin"
+#define MT7920_ROM_PATCH "mediatek/WIFI_MT7961_patch_mcu_1a_2_hdr.bin"
#define MT7921_ROM_PATCH "mediatek/WIFI_MT7961_patch_mcu_1_2_hdr.bin"
#define MT7922_ROM_PATCH "mediatek/WIFI_MT7922_patch_mcu_1_1_hdr.bin"
#define MT7925_ROM_PATCH "mediatek/mt7925/WIFI_MT7925_PATCH_MCU_1_1_hdr.bin"
@@ -326,6 +329,8 @@ int mt792x_mcu_fw_pmctrl(struct mt792x_dev *dev);
static inline char *mt792x_ram_name(struct mt792x_dev *dev)
{
switch (mt76_chip(&dev->mt76)) {
+ case 0x7920:
+ return MT7920_FIRMWARE_WM;
case 0x7922:
return MT7922_FIRMWARE_WM;
case 0x7925:
@@ -338,6 +343,8 @@ static inline char *mt792x_ram_name(struct mt792x_dev *dev)
static inline char *mt792x_patch_name(struct mt792x_dev *dev)
{
switch (mt76_chip(&dev->mt76)) {
+ case 0x7920:
+ return MT7920_ROM_PATCH;
case 0x7922:
return MT7922_ROM_PATCH;
case 0x7925:
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
index 9bd953586b04..62c03d088925 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
@@ -225,6 +225,11 @@ mt7996_radar_trigger(void *data, u64 val)
if (val > MT_RX_SEL2)
return -EINVAL;
+ if (val == MT_RX_SEL2 && !dev->rdd2_phy) {
+ dev_err(dev->mt76.dev, "Background radar is not enabled\n");
+ return -EINVAL;
+ }
+
return mt7996_mcu_rdd_cmd(dev, RDD_RADAR_EMULATE,
val, 0, 0);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index 0384fb059ddf..bc7111a71f98 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -1655,14 +1655,10 @@ mt7996_mac_restart(struct mt7996_dev *dev)
set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
wake_up(&dev->mt76.mcu.wait);
- if (phy2) {
+ if (phy2)
set_bit(MT76_RESET, &phy2->mt76->state);
- set_bit(MT76_MCU_RESET, &phy2->mt76->state);
- }
- if (phy3) {
+ if (phy3)
set_bit(MT76_RESET, &phy3->mt76->state);
- set_bit(MT76_MCU_RESET, &phy3->mt76->state);
- }
/* lock/unlock all queues to ensure that no tx is pending */
mt76_txq_schedule_all(&dev->mphy);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index f7da8d6dd903..7c97140d8255 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -35,6 +35,14 @@ int mt7996_run(struct ieee80211_hw *hw)
ret = mt7996_mcu_set_hdr_trans(dev, true);
if (ret)
goto out;
+
+ if (is_mt7992(&dev->mt76)) {
+ u8 queue = mt76_connac_lmac_mapping(IEEE80211_AC_VI);
+
+ ret = mt7996_mcu_cp_support(dev, queue);
+ if (ret)
+ goto out;
+ }
}
mt7996_mac_enable_nf(dev, phy->mt76->band_idx);
@@ -238,7 +246,11 @@ static int mt7996_add_interface(struct ieee80211_hw *hw,
mt7996_init_bitrate_mask(vif);
mt7996_mcu_add_bss_info(phy, vif, true);
- mt7996_mcu_add_sta(dev, vif, NULL, true);
+ /* defer the first STA_REC of BMC entry to BSS_CHANGED_BSSID for STA
+ * interface, since firmware only records BSSID when the entry is new
+ */
+ if (vif->type != NL80211_IFTYPE_STATION)
+ mt7996_mcu_add_sta(dev, vif, NULL, true, true);
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
out:
@@ -256,7 +268,7 @@ static void mt7996_remove_interface(struct ieee80211_hw *hw,
struct mt7996_phy *phy = mt7996_hw_phy(hw);
int idx = msta->wcid.idx;
- mt7996_mcu_add_sta(dev, vif, NULL, false);
+ mt7996_mcu_add_sta(dev, vif, NULL, false, false);
mt7996_mcu_add_bss_info(phy, vif, false);
if (vif == phy->monitor_vif)
@@ -340,10 +352,6 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
/* fall back to sw encryption for unsupported ciphers */
switch (key->cipher) {
- case WLAN_CIPHER_SUITE_AES_CMAC:
- wcid_keyidx = &wcid->hw_key_idx2;
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
- break;
case WLAN_CIPHER_SUITE_TKIP:
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
@@ -351,6 +359,11 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
case WLAN_CIPHER_SUITE_GCMP_256:
case WLAN_CIPHER_SUITE_SMS4:
break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ wcid_keyidx = &wcid->hw_key_idx2;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
+ fallthrough;
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
if (key->keyidx == 6 || key->keyidx == 7)
@@ -438,7 +451,7 @@ mt7996_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const struct ieee80211_tx_queue_params *params)
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- const u8 mq_to_aci[] = {
+ static const u8 mq_to_aci[] = {
[IEEE80211_AC_VO] = 3,
[IEEE80211_AC_VI] = 2,
[IEEE80211_AC_BE] = 0,
@@ -599,7 +612,8 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw,
(changed & BSS_CHANGED_ASSOC && vif->cfg.assoc) ||
(changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon)) {
mt7996_mcu_add_bss_info(phy, vif, true);
- mt7996_mcu_add_sta(dev, vif, NULL, true);
+ mt7996_mcu_add_sta(dev, vif, NULL, true,
+ !!(changed & BSS_CHANGED_BSSID));
}
if (changed & BSS_CHANGED_ERP_CTS_PROT)
@@ -688,7 +702,7 @@ int mt7996_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mt7996_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- ret = mt7996_mcu_add_sta(dev, vif, sta, true);
+ ret = mt7996_mcu_add_sta(dev, vif, sta, true, true);
if (ret)
return ret;
@@ -702,7 +716,7 @@ void mt7996_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
int i;
- mt7996_mcu_add_sta(dev, vif, sta, false);
+ mt7996_mcu_add_sta(dev, vif, sta, false, false);
mt7996_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index b44abe2acc81..2c8578677800 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -355,7 +355,10 @@ mt7996_mcu_rx_radar_detected(struct mt7996_dev *dev, struct sk_buff *skb)
if (r->band_idx >= ARRAY_SIZE(dev->mt76.phys))
return;
- if (dev->rdd2_phy && r->band_idx == MT_RX_SEL2)
+ if (r->band_idx == MT_RX_SEL2 && !dev->rdd2_phy)
+ return;
+
+ if (r->band_idx == MT_RX_SEL2)
mphy = dev->rdd2_phy->mt76;
else
mphy = dev->mt76.phys[r->band_idx];
@@ -418,7 +421,7 @@ mt7996_mcu_cca_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!vif->bss_conf.color_change_active || vif->type == NL80211_IFTYPE_STATION)
return;
- ieee80211_color_change_finish(vif);
+ ieee80211_color_change_finish(vif, 0);
}
static void
@@ -819,11 +822,14 @@ mt7996_mcu_bss_mbssid_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
struct bss_info_uni_mbssid *mbssid;
struct tlv *tlv;
+ if (!vif->bss_conf.bssid_indicator)
+ return;
+
tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_11V_MBSSID, sizeof(*mbssid));
mbssid = (struct bss_info_uni_mbssid *)tlv;
- if (enable && vif->bss_conf.bssid_indicator) {
+ if (enable) {
mbssid->max_indicator = vif->bss_conf.bssid_indicator;
mbssid->mbss_idx = vif->bss_conf.bssid_index;
mbssid->tx_bss_omac_idx = 0;
@@ -1650,7 +1656,7 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
int tx_ant = hweight8(phy->mt76->chainmask) - 1;
struct sta_rec_bf *bf;
struct tlv *tlv;
- const u8 matrix[4][4] = {
+ static const u8 matrix[4][4] = {
{0, 0, 0, 0},
{1, 1, 0, 0}, /* 2x1, 2x2, 2x3, 2x4 */
{2, 4, 4, 0}, /* 3x1, 3x2, 3x3, 3x4 */
@@ -1749,6 +1755,18 @@ mt7996_mcu_sta_bfee_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
}
static void
+mt7996_mcu_sta_tx_proc_tlv(struct sk_buff *skb)
+{
+ struct sta_rec_tx_proc *tx_proc;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_TX_PROC, sizeof(*tx_proc));
+
+ tx_proc = (struct sta_rec_tx_proc *)tlv;
+ tx_proc->flag = cpu_to_le32(0);
+}
+
+static void
mt7996_mcu_sta_hdrt_tlv(struct mt7996_dev *dev, struct sk_buff *skb)
{
struct sta_rec_hdrt *hdrt;
@@ -1778,10 +1796,10 @@ mt7996_mcu_sta_hdr_trans_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
else
hdr_trans->from_ds = true;
- wcid = (struct mt76_wcid *)sta->drv_priv;
- if (!wcid)
+ if (!sta)
return;
+ wcid = (struct mt76_wcid *)sta->drv_priv;
hdr_trans->dis_rx_hdr_tran = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags);
if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) {
hdr_trans->to_ds = true;
@@ -1968,6 +1986,7 @@ static void
mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev,
struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
+#define INIT_RCPI 180
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt76_phy *mphy = mvif->phy->mt76;
struct cfg80211_chan_def *chandef = &mphy->chandef;
@@ -2065,6 +2084,8 @@ mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev,
IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
}
ra->sta_cap = cpu_to_le32(cap);
+
+ memset(ra->rx_rcpi, INIT_RCPI, sizeof(ra->rx_rcpi));
}
int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif,
@@ -2133,7 +2154,7 @@ mt7996_mcu_add_group(struct mt7996_dev *dev, struct ieee80211_vif *vif,
}
int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable)
+ struct ieee80211_sta *sta, bool enable, bool newly)
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_sta *msta;
@@ -2149,11 +2170,16 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
return PTR_ERR(skb);
/* starec basic */
- mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, sta, enable,
- !rcu_access_pointer(dev->mt76.wcid[msta->wcid.idx]));
+ mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, sta, enable, newly);
+
if (!enable)
goto out;
+ /* starec hdr trans */
+ mt7996_mcu_sta_hdr_trans_tlv(dev, skb, vif, sta);
+ /* starec tx proc */
+ mt7996_mcu_sta_tx_proc_tlv(skb);
+
/* tag order is in accordance with firmware dependency. */
if (sta) {
/* starec hdrt mode */
@@ -2178,8 +2204,6 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
mt7996_mcu_sta_muru_tlv(dev, skb, vif, sta);
/* starec bfee */
mt7996_mcu_sta_bfee_tlv(dev, skb, vif, sta);
- /* starec hdr trans */
- mt7996_mcu_sta_hdr_trans_tlv(dev, skb, vif, sta);
}
ret = mt7996_mcu_add_group(dev, vif, sta);
@@ -3729,6 +3753,7 @@ int mt7996_mcu_get_temperature(struct mt7996_phy *phy)
} __packed * res;
struct sk_buff *skb;
int ret;
+ u32 temp;
ret = mt76_mcu_send_and_get_msg(&phy->dev->mt76, MCU_WM_UNI_CMD(THERMAL),
&req, sizeof(req), true, &skb);
@@ -3736,8 +3761,10 @@ int mt7996_mcu_get_temperature(struct mt7996_phy *phy)
return ret;
res = (void *)skb->data;
+ temp = le32_to_cpu(res->temperature);
+ dev_kfree_skb(skb);
- return le32_to_cpu(res->temperature);
+ return temp;
}
int mt7996_mcu_set_thermal_throttling(struct mt7996_phy *phy, u8 state)
@@ -4464,7 +4491,7 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy)
u8 band_idx;
} __packed req = {
.tag = cpu_to_le16(UNI_TXPOWER_POWER_LIMIT_TABLE_CTRL),
- .len = cpu_to_le16(sizeof(req) + MT7996_SKU_RATE_NUM - 4),
+ .len = cpu_to_le16(sizeof(req) + MT7996_SKU_PATH_NUM - 4),
.power_ctrl_id = UNI_TXPOWER_POWER_LIMIT_TABLE_CTRL,
.power_limit_type = TX_POWER_LIMIT_TABLE_RATE,
.band_idx = phy->mt76->band_idx,
@@ -4479,7 +4506,7 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy)
mphy->txpower_cur = tx_power;
skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
- sizeof(req) + MT7996_SKU_RATE_NUM);
+ sizeof(req) + MT7996_SKU_PATH_NUM);
if (!skb)
return -ENOMEM;
@@ -4503,6 +4530,22 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy)
/* eht */
skb_put_data(skb, &la.eht[0], sizeof(la.eht));
+ /* padding */
+ skb_put_zero(skb, MT7996_SKU_PATH_NUM - MT7996_SKU_RATE_NUM);
+
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_WM_UNI_CMD(TXPOWER), true);
}
+
+int mt7996_mcu_cp_support(struct mt7996_dev *dev, u8 mode)
+{
+ __le32 cp_mode;
+
+ if (mode < mt76_connac_lmac_mapping(IEEE80211_AC_BE) ||
+ mode > mt76_connac_lmac_mapping(IEEE80211_AC_VO))
+ return -EINVAL;
+
+ cp_mode = cpu_to_le32(mode);
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WA_EXT_CMD(CP_SUPPORT),
+ &cp_mode, sizeof(cp_mode), true);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
index 304e5fd14803..928a9663b49e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
@@ -519,7 +519,7 @@ static void mt7996_irq_tasklet(struct tasklet_struct *t)
struct mt7996_dev *dev = from_tasklet(dev, t, mt76.irq_tasklet);
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
struct mtk_wed_device *wed_hif2 = &dev->mt76.mmio.wed_hif2;
- u32 i, intr, mask, intr1;
+ u32 i, intr, mask, intr1 = 0;
if (dev->hif2 && mtk_wed_device_active(wed_hif2)) {
mtk_wed_device_irq_set_mask(wed_hif2, 0);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
index 36d1f247d55a..177cfff31120 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
@@ -50,6 +50,7 @@
#define MT7996_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
#define MT7996_SKU_RATE_NUM 417
+#define MT7996_SKU_PATH_NUM 494
#define MT7996_MAX_TWT_AGRT 16
#define MT7996_MAX_STA_TWT_AGRT 8
@@ -450,7 +451,7 @@ int mt7996_mcu_add_dev_info(struct mt7996_phy *phy,
int mt7996_mcu_add_bss_info(struct mt7996_phy *phy,
struct ieee80211_vif *vif, int enable);
int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable);
+ struct ieee80211_sta *sta, bool enable, bool newly);
int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
bool add);
@@ -612,6 +613,7 @@ int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev, struct ieee80211_vif *vif
int mt7996_mcu_wtbl_update_hdr_trans(struct mt7996_dev *dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+int mt7996_mcu_cp_support(struct mt7996_dev *dev, u8 mode);
#ifdef CONFIG_MAC80211_DEBUGFS
void mt7996_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir);
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index 3e88798df017..8e9576747052 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -499,7 +499,8 @@ static void mt76s_tx_status_data(struct mt76_worker *worker)
dev = container_of(sdio, struct mt76_dev, sdio);
while (true) {
- if (test_bit(MT76_REMOVED, &dev->phy.state))
+ if (test_bit(MT76_RESET, &dev->phy.state) ||
+ test_bit(MT76_REMOVED, &dev->phy.state))
break;
if (!dev->drv->tx_status_data(dev, &update))
@@ -514,13 +515,14 @@ static void mt76s_tx_status_data(struct mt76_worker *worker)
}
static int
-mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+mt76s_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
enum mt76_txq_id qid, struct sk_buff *skb,
struct mt76_wcid *wcid, struct ieee80211_sta *sta)
{
struct mt76_tx_info tx_info = {
.skb = skb,
};
+ struct mt76_dev *dev = phy->dev;
int err, len = skb->len;
u16 idx = q->head;
@@ -548,10 +550,7 @@ static int
mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
struct sk_buff *skb, u32 tx_info)
{
- int ret = -ENOSPC, len = skb->len, pad;
-
- if (q->queued == q->ndesc)
- goto error;
+ int ret, len = skb->len, pad;
pad = round_up(skb->len, 4) - skb->len;
ret = mt76_skb_adjust_pad(skb, pad);
@@ -560,6 +559,12 @@ mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
spin_lock_bh(&q->lock);
+ if (q->queued == q->ndesc) {
+ ret = -ENOSPC;
+ spin_unlock_bh(&q->lock);
+ goto error;
+ }
+
q->entry[q->head].buf_sz = len;
q->entry[q->head].skb = skb;
diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c
index 4644dace9bb3..ca4feccf38ca 100644
--- a/drivers/net/wireless/mediatek/mt76/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/testmode.c
@@ -53,7 +53,7 @@ void mt76_testmode_tx_pending(struct mt76_phy *phy)
q->queued < q->ndesc / 2) {
int ret;
- ret = dev->queue_ops->tx_queue_skb(dev, q, qid, skb_get(skb),
+ ret = dev->queue_ops->tx_queue_skb(phy, q, qid, skb_get(skb),
wcid, NULL);
if (ret < 0)
break;
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 1809b03292c3..5cf6edee4d13 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -308,7 +308,7 @@ __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
int idx;
non_aql = !info->tx_time_est;
- idx = dev->queue_ops->tx_queue_skb(dev, q, qid, skb, wcid, sta);
+ idx = dev->queue_ops->tx_queue_skb(phy, q, qid, skb, wcid, sta);
if (idx < 0 || !sta)
return idx;
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 342c3aea549d..58ff06823389 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -850,13 +850,14 @@ mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
}
static int
-mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+mt76u_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
enum mt76_txq_id qid, struct sk_buff *skb,
struct mt76_wcid *wcid, struct ieee80211_sta *sta)
{
struct mt76_tx_info tx_info = {
.skb = skb,
};
+ struct mt76_dev *dev = phy->dev;
u16 idx = q->head;
int err;
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index 089102ed9ae5..7d9fb9f2d527 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -237,12 +237,11 @@ static int set_channel(struct wiphy *wiphy,
struct wilc_vif *vif;
u32 channelnum;
int result;
- int srcu_idx;
- srcu_idx = srcu_read_lock(&wl->srcu);
+ rcu_read_lock();
vif = wilc_get_wl_to_vif(wl);
if (IS_ERR(vif)) {
- srcu_read_unlock(&wl->srcu, srcu_idx);
+ rcu_read_unlock();
return PTR_ERR(vif);
}
@@ -253,7 +252,7 @@ static int set_channel(struct wiphy *wiphy,
if (result)
netdev_err(vif->ndev, "Error in setting channel\n");
- srcu_read_unlock(&wl->srcu, srcu_idx);
+ rcu_read_unlock();
return result;
}
@@ -806,9 +805,8 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
struct wilc *wl = wiphy_priv(wiphy);
struct wilc_vif *vif;
struct wilc_priv *priv;
- int srcu_idx;
- srcu_idx = srcu_read_lock(&wl->srcu);
+ rcu_read_lock();
vif = wilc_get_wl_to_vif(wl);
if (IS_ERR(vif))
goto out;
@@ -863,7 +861,7 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
netdev_err(priv->dev, "Error in setting WIPHY PARAMS\n");
out:
- srcu_read_unlock(&wl->srcu, srcu_idx);
+ rcu_read_unlock();
return ret;
}
@@ -1539,20 +1537,19 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy,
if (type == NL80211_IFTYPE_MONITOR) {
struct net_device *ndev;
- int srcu_idx;
- srcu_idx = srcu_read_lock(&wl->srcu);
+ rcu_read_lock();
vif = wilc_get_vif_from_type(wl, WILC_AP_MODE);
if (!vif) {
vif = wilc_get_vif_from_type(wl, WILC_GO_MODE);
if (!vif) {
- srcu_read_unlock(&wl->srcu, srcu_idx);
+ rcu_read_unlock();
goto validate_interface;
}
}
if (vif->monitor_flag) {
- srcu_read_unlock(&wl->srcu, srcu_idx);
+ rcu_read_unlock();
goto validate_interface;
}
@@ -1560,12 +1557,12 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy,
if (ndev) {
vif->monitor_flag = 1;
} else {
- srcu_read_unlock(&wl->srcu, srcu_idx);
+ rcu_read_unlock();
return ERR_PTR(-EINVAL);
}
wdev = &vif->priv.wdev;
- srcu_read_unlock(&wl->srcu, srcu_idx);
+ rcu_read_unlock();
return wdev;
}
@@ -1613,7 +1610,7 @@ static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
list_del_rcu(&vif->list);
wl->vif_num--;
mutex_unlock(&wl->vif_mutex);
- synchronize_srcu(&wl->srcu);
+ synchronize_rcu();
return 0;
}
@@ -1638,25 +1635,23 @@ static void wilc_set_wakeup(struct wiphy *wiphy, bool enabled)
{
struct wilc *wl = wiphy_priv(wiphy);
struct wilc_vif *vif;
- int srcu_idx;
- srcu_idx = srcu_read_lock(&wl->srcu);
+ rcu_read_lock();
vif = wilc_get_wl_to_vif(wl);
if (IS_ERR(vif)) {
- srcu_read_unlock(&wl->srcu, srcu_idx);
+ rcu_read_unlock();
return;
}
netdev_info(vif->ndev, "cfg set wake up = %d\n", enabled);
wilc_set_wowlan_trigger(vif, enabled);
- srcu_read_unlock(&wl->srcu, srcu_idx);
+ rcu_read_unlock();
}
static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
enum nl80211_tx_power_setting type, int mbm)
{
int ret;
- int srcu_idx;
s32 tx_power = MBM_TO_DBM(mbm);
struct wilc *wl = wiphy_priv(wiphy);
struct wilc_vif *vif;
@@ -1664,10 +1659,10 @@ static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
if (!wl->initialized)
return -EIO;
- srcu_idx = srcu_read_lock(&wl->srcu);
+ rcu_read_lock();
vif = wilc_get_wl_to_vif(wl);
if (IS_ERR(vif)) {
- srcu_read_unlock(&wl->srcu, srcu_idx);
+ rcu_read_unlock();
return -EINVAL;
}
@@ -1679,7 +1674,7 @@ static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
ret = wilc_set_tx_power(vif, tx_power);
if (ret)
netdev_err(vif->ndev, "Failed to set tx power\n");
- srcu_read_unlock(&wl->srcu, srcu_idx);
+ rcu_read_unlock();
return ret;
}
@@ -1762,7 +1757,6 @@ static void wlan_init_locks(struct wilc *wl)
init_completion(&wl->cfg_event);
init_completion(&wl->sync_event);
init_completion(&wl->txq_thread_started);
- init_srcu_struct(&wl->srcu);
}
void wlan_deinit_locks(struct wilc *wilc)
@@ -1773,7 +1767,6 @@ void wlan_deinit_locks(struct wilc *wilc)
mutex_destroy(&wilc->txq_add_to_head_cs);
mutex_destroy(&wilc->vif_mutex);
mutex_destroy(&wilc->deinit_lock);
- cleanup_srcu_struct(&wilc->srcu);
}
int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index f1085ccb7eed..919de6ffb821 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -1570,12 +1570,11 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
struct host_if_drv *hif_drv;
struct host_if_msg *msg;
struct wilc_vif *vif;
- int srcu_idx;
int result;
int id;
id = get_unaligned_le32(&buffer[length - 4]);
- srcu_idx = srcu_read_lock(&wilc->srcu);
+ rcu_read_lock();
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif)
goto out;
@@ -1594,7 +1593,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
msg->body.net_info.rssi = buffer[8];
msg->body.net_info.mgmt = kmemdup(&buffer[9],
msg->body.net_info.frame_len,
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!msg->body.net_info.mgmt) {
kfree(msg);
goto out;
@@ -1607,7 +1606,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
kfree(msg);
}
out:
- srcu_read_unlock(&wilc->srcu, srcu_idx);
+ rcu_read_unlock();
}
void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
@@ -1615,14 +1614,13 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
struct host_if_drv *hif_drv;
struct host_if_msg *msg;
struct wilc_vif *vif;
- int srcu_idx;
int result;
int id;
mutex_lock(&wilc->deinit_lock);
id = get_unaligned_le32(&buffer[length - 4]);
- srcu_idx = srcu_read_lock(&wilc->srcu);
+ rcu_read_lock();
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif)
goto out;
@@ -1649,7 +1647,7 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
kfree(msg);
}
out:
- srcu_read_unlock(&wilc->srcu, srcu_idx);
+ rcu_read_unlock();
mutex_unlock(&wilc->deinit_lock);
}
@@ -1657,12 +1655,11 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length)
{
struct host_if_drv *hif_drv;
struct wilc_vif *vif;
- int srcu_idx;
int result;
int id;
id = get_unaligned_le32(&buffer[length - 4]);
- srcu_idx = srcu_read_lock(&wilc->srcu);
+ rcu_read_lock();
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif)
goto out;
@@ -1687,7 +1684,7 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length)
}
}
out:
- srcu_read_unlock(&wilc->srcu, srcu_idx);
+ rcu_read_unlock();
}
int wilc_remain_on_channel(struct wilc_vif *vif, u64 cookie, u16 chan,
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
index 710e29bea560..73f56f7b002b 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
@@ -127,30 +127,28 @@ void wilc_wlan_set_bssid(struct net_device *wilc_netdev, const u8 *bssid,
int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
{
- int srcu_idx;
u8 ret_val = 0;
struct wilc_vif *vif;
- srcu_idx = srcu_read_lock(&wilc->srcu);
+ rcu_read_lock();
wilc_for_each_vif(wilc, vif) {
if (!is_zero_ether_addr(vif->bssid))
ret_val++;
}
- srcu_read_unlock(&wilc->srcu, srcu_idx);
+ rcu_read_unlock();
return ret_val;
}
static void wilc_wake_tx_queues(struct wilc *wl)
{
- int srcu_idx;
struct wilc_vif *ifc;
- srcu_idx = srcu_read_lock(&wl->srcu);
+ rcu_read_lock();
wilc_for_each_vif(wl, ifc) {
if (ifc->mac_opened && netif_queue_stopped(ifc->ndev))
netif_wake_queue(ifc->ndev);
}
- srcu_read_unlock(&wl->srcu, srcu_idx);
+ rcu_read_unlock();
}
static int wilc_txq_task(void *vp)
@@ -655,7 +653,6 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
struct sockaddr *addr = (struct sockaddr *)p;
unsigned char mac_addr[ETH_ALEN];
struct wilc_vif *tmp_vif;
- int srcu_idx;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -667,19 +664,19 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
/* Verify MAC Address is not already in use: */
- srcu_idx = srcu_read_lock(&wilc->srcu);
+ rcu_read_lock();
wilc_for_each_vif(wilc, tmp_vif) {
wilc_get_mac_address(tmp_vif, mac_addr);
if (ether_addr_equal(addr->sa_data, mac_addr)) {
if (vif != tmp_vif) {
- srcu_read_unlock(&wilc->srcu, srcu_idx);
+ rcu_read_unlock();
return -EADDRNOTAVAIL;
}
- srcu_read_unlock(&wilc->srcu, srcu_idx);
+ rcu_read_unlock();
return 0;
}
}
- srcu_read_unlock(&wilc->srcu, srcu_idx);
+ rcu_read_unlock();
result = wilc_set_mac_address(vif, (u8 *)addr->sa_data);
if (result)
@@ -767,15 +764,14 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
wilc_tx_complete);
if (queue_count > FLOW_CONTROL_UPPER_THRESHOLD) {
- int srcu_idx;
struct wilc_vif *vif;
- srcu_idx = srcu_read_lock(&wilc->srcu);
+ rcu_read_lock();
wilc_for_each_vif(wilc, vif) {
if (vif->mac_opened)
netif_stop_queue(vif->ndev);
}
- srcu_read_unlock(&wilc->srcu, srcu_idx);
+ rcu_read_unlock();
}
return NETDEV_TX_OK;
@@ -819,13 +815,12 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
unsigned int frame_len = 0;
struct wilc_vif *vif;
struct sk_buff *skb;
- int srcu_idx;
int stats;
if (!wilc)
return;
- srcu_idx = srcu_read_lock(&wilc->srcu);
+ rcu_read_lock();
wilc_netdev = get_if_handler(wilc, buff);
if (!wilc_netdev)
goto out;
@@ -853,15 +848,14 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
netdev_dbg(wilc_netdev, "netif_rx ret value is: %d\n", stats);
}
out:
- srcu_read_unlock(&wilc->srcu, srcu_idx);
+ rcu_read_unlock();
}
void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth)
{
- int srcu_idx;
struct wilc_vif *vif;
- srcu_idx = srcu_read_lock(&wilc->srcu);
+ rcu_read_lock();
wilc_for_each_vif(wilc, vif) {
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buff;
u16 type = le16_to_cpup((__le16 *)buff);
@@ -882,7 +876,7 @@ void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth)
if (vif->monitor_flag)
wilc_wfi_monitor_rx(wilc->monitor_dev, buff, size);
}
- srcu_read_unlock(&wilc->srcu, srcu_idx);
+ rcu_read_unlock();
}
static const struct net_device_ops wilc_netdev_ops = {
@@ -912,7 +906,7 @@ void wilc_netdev_cleanup(struct wilc *wilc)
list_del_rcu(&vif->list);
wilc->vif_num--;
mutex_unlock(&wilc->vif_mutex);
- synchronize_srcu(&wilc->srcu);
+ synchronize_rcu();
if (vif->ndev)
unregister_netdev(vif->ndev);
}
@@ -931,16 +925,15 @@ static u8 wilc_get_available_idx(struct wilc *wl)
{
int idx = 0;
struct wilc_vif *vif;
- int srcu_idx;
- srcu_idx = srcu_read_lock(&wl->srcu);
+ rcu_read_lock();
wilc_for_each_vif(wl, vif) {
if (vif->idx == 0)
idx = 1;
else
idx = 0;
}
- srcu_read_unlock(&wl->srcu, srcu_idx);
+ rcu_read_unlock();
return idx;
}
@@ -990,7 +983,7 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
list_add_tail_rcu(&vif->list, &wl->vif_list);
wl->vif_num += 1;
mutex_unlock(&wl->vif_mutex);
- synchronize_srcu(&wl->srcu);
+ synchronize_rcu();
return vif;
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h
index 5937d6d45695..eecee3973d6a 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.h
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.h
@@ -32,8 +32,8 @@
#define wilc_for_each_vif(w, v) \
struct wilc *_w = w; \
- list_for_each_entry_srcu(v, &_w->vif_list, list, \
- srcu_read_lock_held(&_w->srcu))
+ list_for_each_entry_rcu(v, &_w->vif_list, list, \
+ rcu_read_lock_held())
struct wilc_wfi_stats {
unsigned long rx_packets;
@@ -220,7 +220,6 @@ struct wilc {
/* protect vif list */
struct mutex vif_mutex;
- struct srcu_struct srcu;
u8 open_ifcs;
/* protect head of transmit queue */
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index d6d394693090..52a770c5e76f 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -981,8 +981,7 @@ static struct sdio_driver wilc_sdio_driver = {
.of_match_table = wilc_of_match,
}
};
-module_driver(wilc_sdio_driver,
- sdio_register_driver,
- sdio_unregister_driver);
+module_sdio_driver(wilc_sdio_driver);
+
MODULE_DESCRIPTION("Atmel WILC1000 SDIO wireless driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
index a9e872a7b2c3..37c32d17856e 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
@@ -712,7 +712,6 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
u32 *vmm_table = wilc->vmm_table;
u8 ac_pkt_num_to_chip[NQUEUES] = {0, 0, 0, 0};
const struct wilc_hif_func *func;
- int srcu_idx;
u8 *txb = wilc->tx_buffer;
struct wilc_vif *vif;
@@ -724,10 +723,10 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
mutex_lock(&wilc->txq_add_to_head_cs);
- srcu_idx = srcu_read_lock(&wilc->srcu);
+ rcu_read_lock();
wilc_for_each_vif(wilc, vif)
wilc_wlan_txq_filter_dup_tcp_ack(vif->ndev);
- srcu_read_unlock(&wilc->srcu, srcu_idx);
+ rcu_read_unlock();
for (ac = 0; ac < NQUEUES; ac++)
tqe_q[ac] = wilc_wlan_txq_get_first(wilc, ac);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/bus.h b/drivers/net/wireless/quantenna/qtnfmac/bus.h
index 3334c45aac13..7f8646e77ee0 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/bus.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/bus.h
@@ -59,7 +59,7 @@ struct qtnf_bus {
struct qtnf_qlink_transport trans;
struct qtnf_hw_info hw_info;
struct napi_struct mux_napi;
- struct net_device mux_dev;
+ struct net_device *mux_dev;
struct workqueue_struct *workqueue;
struct workqueue_struct *hprio_workqueue;
struct work_struct fw_work;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index 677bac835330..825b05dd3271 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -196,27 +196,12 @@ static int qtnf_netdev_port_parent_id(struct net_device *ndev,
return 0;
}
-static int qtnf_netdev_alloc_pcpu_stats(struct net_device *dev)
-{
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
-
- return dev->tstats ? 0 : -ENOMEM;
-}
-
-static void qtnf_netdev_free_pcpu_stats(struct net_device *dev)
-{
- free_percpu(dev->tstats);
-}
-
/* Network device ops handlers */
const struct net_device_ops qtnf_netdev_ops = {
- .ndo_init = qtnf_netdev_alloc_pcpu_stats,
- .ndo_uninit = qtnf_netdev_free_pcpu_stats,
.ndo_open = qtnf_netdev_open,
.ndo_stop = qtnf_netdev_close,
.ndo_start_xmit = qtnf_netdev_hard_start_xmit,
.ndo_tx_timeout = qtnf_netdev_tx_timeout,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = qtnf_netdev_set_mac_address,
.ndo_get_port_parent_id = qtnf_netdev_port_parent_id,
};
@@ -483,6 +468,7 @@ int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif,
dev->watchdog_timeo = QTNF_DEF_WDOG_TIMEOUT;
dev->tx_queue_len = 100;
dev->ethtool_ops = &qtnf_ethtool_ops;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
if (qtnf_hwcap_is_set(&mac->bus->hw_info, QLINK_HW_CAPAB_HW_BRIDGE))
dev->needed_tailroom = sizeof(struct qtnf_frame_meta_info);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
index 9ad4c120fa28..f66eb43094d4 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
@@ -372,7 +372,12 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto error;
}
- init_dummy_netdev(&bus->mux_dev);
+ bus->mux_dev = alloc_netdev_dummy(0);
+ if (!bus->mux_dev) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
qtnf_pcie_init_irq(pcie_priv, use_msi);
pcie_priv->sysctl_bar = sysctl_bar;
pcie_priv->dmareg_bar = dmareg_bar;
@@ -381,11 +386,13 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = pcie_priv->probe_cb(bus, tx_bd_size_param, rx_bd_size_param);
if (ret)
- goto error;
+ goto error_free;
qtnf_pcie_bringup_fw_async(bus);
return 0;
+error_free:
+ free_netdev(bus->mux_dev);
error:
destroy_workqueue(pcie_priv->workqueue);
pci_set_drvdata(pdev, NULL);
@@ -417,6 +424,7 @@ static void qtnf_pcie_remove(struct pci_dev *dev)
netif_napi_del(&bus->mux_napi);
destroy_workqueue(priv->workqueue);
tasklet_kill(&priv->reclaim_tq);
+ free_netdev(bus->mux_dev);
qtnf_pcie_free_shm_ipc(priv);
qtnf_debugfs_remove(bus);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
index 8c23a77d1671..c1a53e1ba3be 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
@@ -761,12 +761,12 @@ static int qtnf_pcie_pearl_rx_poll(struct napi_struct *napi, int budget)
napi_gro_receive(napi, skb);
} else {
pr_debug("drop untagged skb\n");
- bus->mux_dev.stats.rx_dropped++;
+ bus->mux_dev->stats.rx_dropped++;
dev_kfree_skb_any(skb);
}
} else {
if (skb) {
- bus->mux_dev.stats.rx_dropped++;
+ bus->mux_dev->stats.rx_dropped++;
dev_kfree_skb_any(skb);
}
}
@@ -1146,7 +1146,7 @@ static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size,
}
tasklet_setup(&ps->base.reclaim_tq, qtnf_pearl_reclaim_tasklet_fn);
- netif_napi_add_weight(&bus->mux_dev, &bus->mux_napi,
+ netif_napi_add_weight(bus->mux_dev, &bus->mux_napi,
qtnf_pcie_pearl_rx_poll, 10);
ipc_int.fn = qtnf_pcie_pearl_ipc_gen_ep_int;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
index d83362578374..ef5c069542d4 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
@@ -667,12 +667,12 @@ static int qtnf_topaz_rx_poll(struct napi_struct *napi, int budget)
netif_receive_skb(skb);
} else {
pr_debug("drop untagged skb\n");
- bus->mux_dev.stats.rx_dropped++;
+ bus->mux_dev->stats.rx_dropped++;
dev_kfree_skb_any(skb);
}
} else {
if (skb) {
- bus->mux_dev.stats.rx_dropped++;
+ bus->mux_dev->stats.rx_dropped++;
dev_kfree_skb_any(skb);
}
}
@@ -1159,7 +1159,7 @@ static int qtnf_pcie_topaz_probe(struct qtnf_bus *bus,
}
tasklet_setup(&ts->base.reclaim_tq, qtnf_reclaim_tasklet_fn);
- netif_napi_add_weight(&bus->mux_dev, &bus->mux_napi,
+ netif_napi_add_weight(bus->mux_dev, &bus->mux_napi,
qtnf_topaz_rx_poll, 10);
ipc_int.fn = qtnf_topaz_ipc_gen_ep_int;
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile b/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile
index 565a9a114134..1044105c2557 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile
@@ -3,4 +3,4 @@ rtl818x_pci-objs := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o
obj-$(CONFIG_RTL8180) += rtl818x_pci.o
-ccflags-y += -I $(srctree)/$(src)/..
+ccflags-y += -I $(src)/..
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile b/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile
index 0bf64dfb233a..3deded2c05b6 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile
@@ -3,4 +3,4 @@ rtl8187-objs := dev.o rtl8225.o leds.o rfkill.o
obj-$(CONFIG_RTL8187) += rtl8187.o
-ccflags-y += -I $(srctree)/$(src)/..
+ccflags-y += -I $(src)/..
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c b/drivers/net/wireless/realtek/rtl8xxxu/8188e.c
index afe9cc1b49dc..3d04df0f5bf4 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/8188e.c
@@ -13,24 +13,8 @@
* additional 8xxx chips like the 8192cu, 8188cus, etc.
*/
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/usb.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/wireless.h>
-#include <linux/firmware.h>
-#include <linux/moduleparam.h>
-#include <net/mac80211.h>
+#include "regs.h"
#include "rtl8xxxu.h"
-#include "rtl8xxxu_regs.h"
static const struct rtl8xxxu_reg8val rtl8188e_mac_init_table[] = {
{0x026, 0x41}, {0x027, 0x35}, {0x040, 0x00}, {0x421, 0x0f},
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c b/drivers/net/wireless/realtek/rtl8xxxu/8188f.c
index 464216d007ce..bd5a0603b4a2 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/8188f.c
@@ -11,24 +11,8 @@
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*/
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/usb.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/wireless.h>
-#include <linux/firmware.h>
-#include <linux/moduleparam.h>
-#include <net/mac80211.h>
+#include "regs.h"
#include "rtl8xxxu.h"
-#include "rtl8xxxu_regs.h"
static const struct rtl8xxxu_reg8val rtl8188f_mac_init_table[] = {
{0x024, 0xDF}, {0x025, 0x07}, {0x02B, 0x1C}, {0x283, 0x20},
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c b/drivers/net/wireless/realtek/rtl8xxxu/8192c.c
index 3ee7d8f87da6..0abb1b092bc2 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/8192c.c
@@ -13,24 +13,8 @@
* additional 8xxx chips like the 8192cu, 8188cus, etc.
*/
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/usb.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/wireless.h>
-#include <linux/firmware.h>
-#include <linux/moduleparam.h>
-#include <net/mac80211.h>
+#include "regs.h"
#include "rtl8xxxu.h"
-#include "rtl8xxxu_regs.h"
#ifdef CONFIG_RTL8XXXU_UNTESTED
static struct rtl8xxxu_power_base rtl8192c_power_base = {
@@ -77,6 +61,32 @@ static struct rtl8xxxu_power_base rtl8188r_power_base = {
.reg_0868 = 0x00020204,
};
+static const struct rtl8xxxu_reg8val rtl8192cu_mac_init_table[] = {
+ {0x420, 0x80}, {0x423, 0x00}, {0x430, 0x00}, {0x431, 0x00},
+ {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05},
+ {0x436, 0x06}, {0x437, 0x07}, {0x438, 0x00}, {0x439, 0x00},
+ {0x43a, 0x00}, {0x43b, 0x01}, {0x43c, 0x04}, {0x43d, 0x05},
+ {0x43e, 0x06}, {0x43f, 0x07}, {0x440, 0x5d}, {0x441, 0x01},
+ {0x442, 0x00}, {0x444, 0x15}, {0x445, 0xf0}, {0x446, 0x0f},
+ {0x447, 0x00}, {0x458, 0x41}, {0x459, 0xa8}, {0x45a, 0x72},
+ {0x45b, 0xb9}, {0x460, 0x66}, {0x461, 0x66}, {0x462, 0x08},
+ {0x463, 0x03}, {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff},
+ {0x4cd, 0xff}, {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2},
+ {0x502, 0x2f}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3},
+ {0x506, 0x5e}, {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4},
+ {0x50a, 0x5e}, {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4},
+ {0x50e, 0x00}, {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a},
+ {0x515, 0x10}, {0x516, 0x0a}, {0x517, 0x10}, {0x51a, 0x16},
+ {0x524, 0x0f}, {0x525, 0x4f}, {0x546, 0x40}, {0x547, 0x00},
+ {0x550, 0x10}, {0x551, 0x10}, {0x559, 0x02}, {0x55a, 0x02},
+ {0x55d, 0xff}, {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a},
+ {0x652, 0x20}, {0x652, 0x20}, {0x63c, 0x08}, {0x63d, 0x08},
+ {0x63e, 0x0c}, {0x63f, 0x0c}, {0x66e, 0x05}, {0x700, 0x21},
+ {0x701, 0x43}, {0x702, 0x65}, {0x703, 0x87}, {0x708, 0x21},
+ {0x709, 0x43}, {0x70a, 0x65}, {0x70b, 0x87},
+ {0xffff, 0xff},
+};
+
static const struct rtl8xxxu_rfregval rtl8192cu_radioa_2t_init_table[] = {
{0x00, 0x00030159}, {0x01, 0x00031284},
{0x02, 0x00098000}, {0x03, 0x00018c63},
@@ -583,6 +593,26 @@ static int rtl8192cu_power_on(struct rtl8xxxu_priv *priv)
return 0;
}
+static int rtl8192cu_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct rtl8xxxu_priv *priv = container_of(led_cdev,
+ struct rtl8xxxu_priv,
+ led_cdev);
+ u8 ledcfg = rtl8xxxu_read8(priv, REG_LEDCFG0);
+
+ if (brightness == LED_OFF)
+ ledcfg = LEDCFG2_SW_LED_CONTROL | LEDCFG2_SW_LED_DISABLE;
+ else if (brightness == LED_ON)
+ ledcfg = LEDCFG2_SW_LED_CONTROL;
+ else if (brightness == RTL8XXXU_HW_LED_CONTROL)
+ ledcfg = LEDCFG2_HW_LED_CONTROL | LEDCFG2_HW_LED_ENABLE;
+
+ rtl8xxxu_write8(priv, REG_LEDCFG0, ledcfg);
+
+ return 0;
+}
+
struct rtl8xxxu_fileops rtl8192cu_fops = {
.identify_chip = rtl8192cu_identify_chip,
.parse_efuse = rtl8192cu_parse_efuse,
@@ -609,6 +639,7 @@ struct rtl8xxxu_fileops rtl8192cu_fops = {
.report_rssi = rtl8xxxu_gen1_report_rssi,
.fill_txdesc = rtl8xxxu_fill_txdesc_v1,
.cck_rssi = rtl8723a_cck_rssi,
+ .led_classdev_brightness_set = rtl8192cu_led_brightness_set,
.writeN_block_size = 128,
.rx_agg_buf_size = 16000,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
@@ -621,7 +652,7 @@ struct rtl8xxxu_fileops rtl8192cu_fops = {
.trxff_boundary = 0x27ff,
.pbp_rx = PBP_PAGE_SIZE_128,
.pbp_tx = PBP_PAGE_SIZE_128,
- .mactable = rtl8xxxu_gen1_mac_init_table,
+ .mactable = rtl8192cu_mac_init_table,
.total_page_num = TX_TOTAL_PAGE_NUM,
.page_num_hi = TX_PAGE_NUM_HI_PQ,
.page_num_lo = TX_PAGE_NUM_LO_PQ,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/8192e.c
index 63b73ace27ec..8e123bbfc665 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/8192e.c
@@ -13,24 +13,8 @@
* additional 8xxx chips like the 8192cu, 8188cus, etc.
*/
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/usb.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/wireless.h>
-#include <linux/firmware.h>
-#include <linux/moduleparam.h>
-#include <net/mac80211.h>
+#include "regs.h"
#include "rtl8xxxu.h"
-#include "rtl8xxxu_regs.h"
static const struct rtl8xxxu_reg8val rtl8192e_mac_init_table[] = {
{0x011, 0xeb}, {0x012, 0x07}, {0x014, 0x75}, {0x303, 0xa7},
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c b/drivers/net/wireless/realtek/rtl8xxxu/8192f.c
index 21e4204769d0..cd2156b7a57a 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/8192f.c
@@ -11,24 +11,8 @@
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*/
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/usb.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/wireless.h>
-#include <linux/firmware.h>
-#include <linux/moduleparam.h>
-#include <net/mac80211.h>
+#include "regs.h"
#include "rtl8xxxu.h"
-#include "rtl8xxxu_regs.h"
static const struct rtl8xxxu_reg8val rtl8192f_mac_init_table[] = {
{0x420, 0x00}, {0x422, 0x78}, {0x428, 0x0a}, {0x429, 0x10},
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c b/drivers/net/wireless/realtek/rtl8xxxu/8710b.c
index 46d57510e9fc..11c63c320eae 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/8710b.c
@@ -11,24 +11,8 @@
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*/
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/usb.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/wireless.h>
-#include <linux/firmware.h>
-#include <linux/moduleparam.h>
-#include <net/mac80211.h>
+#include "regs.h"
#include "rtl8xxxu.h"
-#include "rtl8xxxu_regs.h"
static const struct rtl8xxxu_reg8val rtl8710b_mac_init_table[] = {
{0x421, 0x0F}, {0x428, 0x0A}, {0x429, 0x10}, {0x430, 0x00},
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c b/drivers/net/wireless/realtek/rtl8xxxu/8723a.c
index ad1bb9377ca2..ecbc324e4609 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/8723a.c
@@ -13,24 +13,8 @@
* additional 8xxx chips like the 8192cu, 8188cus, etc.
*/
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/usb.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/wireless.h>
-#include <linux/firmware.h>
-#include <linux/moduleparam.h>
-#include <net/mac80211.h>
+#include "regs.h"
#include "rtl8xxxu.h"
-#include "rtl8xxxu_regs.h"
static struct rtl8xxxu_power_base rtl8723a_power_base = {
.reg_0e00 = 0x0a0c0c0c,
@@ -54,6 +38,31 @@ static struct rtl8xxxu_power_base rtl8723a_power_base = {
.reg_0868 = 0x02040608,
};
+static const struct rtl8xxxu_reg8val rtl8723au_mac_init_table[] = {
+ {0x420, 0x80}, {0x423, 0x00}, {0x430, 0x00}, {0x431, 0x00},
+ {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05},
+ {0x436, 0x06}, {0x437, 0x07}, {0x438, 0x00}, {0x439, 0x00},
+ {0x43a, 0x00}, {0x43b, 0x01}, {0x43c, 0x04}, {0x43d, 0x05},
+ {0x43e, 0x06}, {0x43f, 0x07}, {0x440, 0x5d}, {0x441, 0x01},
+ {0x442, 0x00}, {0x444, 0x15}, {0x445, 0xf0}, {0x446, 0x0f},
+ {0x447, 0x00}, {0x458, 0x41}, {0x459, 0xa8}, {0x45a, 0x72},
+ {0x45b, 0xb9}, {0x460, 0x66}, {0x461, 0x66}, {0x462, 0x08},
+ {0x463, 0x03}, {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff},
+ {0x4cd, 0xff}, {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2},
+ {0x502, 0x2f}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3},
+ {0x506, 0x5e}, {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4},
+ {0x50a, 0x5e}, {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4},
+ {0x50e, 0x00}, {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a},
+ {0x515, 0x10}, {0x516, 0x0a}, {0x517, 0x10}, {0x51a, 0x16},
+ {0x524, 0x0f}, {0x525, 0x4f}, {0x546, 0x40}, {0x547, 0x00},
+ {0x550, 0x10}, {0x551, 0x10}, {0x559, 0x02}, {0x55a, 0x02},
+ {0x55d, 0xff}, {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a},
+ {0x652, 0x20}, {0x63c, 0x0a}, {0x63d, 0x0a}, {0x63e, 0x0e},
+ {0x63f, 0x0e}, {0x66e, 0x05}, {0x700, 0x21}, {0x701, 0x43},
+ {0x702, 0x65}, {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43},
+ {0x70a, 0x65}, {0x70b, 0x87}, {0xffff, 0xff},
+};
+
static const struct rtl8xxxu_rfregval rtl8723au_radioa_1t_init_table[] = {
{0x00, 0x00030159}, {0x01, 0x00031284},
{0x02, 0x00098000}, {0x03, 0x00039c63},
@@ -518,7 +527,7 @@ struct rtl8xxxu_fileops rtl8723au_fops = {
.trxff_boundary = 0x27ff,
.pbp_rx = PBP_PAGE_SIZE_128,
.pbp_tx = PBP_PAGE_SIZE_128,
- .mactable = rtl8xxxu_gen1_mac_init_table,
+ .mactable = rtl8723au_mac_init_table,
.total_page_num = TX_TOTAL_PAGE_NUM,
.page_num_hi = TX_PAGE_NUM_HI_PQ,
.page_num_lo = TX_PAGE_NUM_LO_PQ,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/8723b.c
index 9640c841d20a..cc2e60b06f64 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/8723b.c
@@ -13,24 +13,8 @@
* additional 8xxx chips like the 8192cu, 8188cus, etc.
*/
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/usb.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/wireless.h>
-#include <linux/firmware.h>
-#include <linux/moduleparam.h>
-#include <net/mac80211.h>
+#include "regs.h"
#include "rtl8xxxu.h"
-#include "rtl8xxxu_regs.h"
static const struct rtl8xxxu_reg8val rtl8723b_mac_init_table[] = {
{0x02f, 0x30}, {0x035, 0x00}, {0x039, 0x08}, {0x04e, 0xe0},
@@ -1701,6 +1685,28 @@ static s8 rtl8723b_cck_rssi(struct rtl8xxxu_priv *priv, struct rtl8723au_phy_sta
return rx_pwr_all;
}
+static int rtl8723bu_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct rtl8xxxu_priv *priv = container_of(led_cdev,
+ struct rtl8xxxu_priv,
+ led_cdev);
+ u8 ledcfg = rtl8xxxu_read8(priv, REG_LEDCFG2);
+
+ ledcfg &= LEDCFG2_DPDT_SELECT;
+
+ if (brightness == LED_OFF)
+ ledcfg |= LEDCFG2_SW_LED_CONTROL | LEDCFG2_SW_LED_DISABLE;
+ else if (brightness == LED_ON)
+ ledcfg |= LEDCFG2_SW_LED_CONTROL;
+ else if (brightness == RTL8XXXU_HW_LED_CONTROL)
+ ledcfg |= LEDCFG2_HW_LED_CONTROL | LEDCFG2_HW_LED_ENABLE;
+
+ rtl8xxxu_write8(priv, REG_LEDCFG2, ledcfg);
+
+ return 0;
+}
+
struct rtl8xxxu_fileops rtl8723bu_fops = {
.identify_chip = rtl8723bu_identify_chip,
.parse_efuse = rtl8723bu_parse_efuse,
@@ -1731,6 +1737,7 @@ struct rtl8xxxu_fileops rtl8723bu_fops = {
.fill_txdesc = rtl8xxxu_fill_txdesc_v2,
.set_crystal_cap = rtl8723a_set_crystal_cap,
.cck_rssi = rtl8723b_cck_rssi,
+ .led_classdev_brightness_set = rtl8723bu_led_brightness_set,
.writeN_block_size = 1024,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc40),
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Makefile b/drivers/net/wireless/realtek/rtl8xxxu/Makefile
index fa466589eccb..580a2fa675ee 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/Makefile
+++ b/drivers/net/wireless/realtek/rtl8xxxu/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_RTL8XXXU) += rtl8xxxu.o
-rtl8xxxu-y := rtl8xxxu_core.o rtl8xxxu_8192e.o rtl8xxxu_8723b.o \
- rtl8xxxu_8723a.o rtl8xxxu_8192c.o rtl8xxxu_8188f.o \
- rtl8xxxu_8188e.o rtl8xxxu_8710b.o rtl8xxxu_8192f.o
+rtl8xxxu-y := core.o 8192e.o 8723b.o \
+ 8723a.o 8192c.o 8188f.o \
+ 8188e.o 8710b.o 8192f.o
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c
index 4a49f8f9d80f..89a841b4e8d5 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c
@@ -13,24 +13,9 @@
* additional 8xxx chips like the 8192cu, 8188cus, etc.
*/
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/usb.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/wireless.h>
#include <linux/firmware.h>
-#include <linux/moduleparam.h>
-#include <net/mac80211.h>
+#include "regs.h"
#include "rtl8xxxu.h"
-#include "rtl8xxxu_regs.h"
#define DRIVER_NAME "rtl8xxxu"
@@ -132,31 +117,6 @@ static struct ieee80211_supported_band rtl8xxxu_supported_band = {
.n_bitrates = ARRAY_SIZE(rtl8xxxu_rates),
};
-const struct rtl8xxxu_reg8val rtl8xxxu_gen1_mac_init_table[] = {
- {0x420, 0x80}, {0x423, 0x00}, {0x430, 0x00}, {0x431, 0x00},
- {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05},
- {0x436, 0x06}, {0x437, 0x07}, {0x438, 0x00}, {0x439, 0x00},
- {0x43a, 0x00}, {0x43b, 0x01}, {0x43c, 0x04}, {0x43d, 0x05},
- {0x43e, 0x06}, {0x43f, 0x07}, {0x440, 0x5d}, {0x441, 0x01},
- {0x442, 0x00}, {0x444, 0x15}, {0x445, 0xf0}, {0x446, 0x0f},
- {0x447, 0x00}, {0x458, 0x41}, {0x459, 0xa8}, {0x45a, 0x72},
- {0x45b, 0xb9}, {0x460, 0x66}, {0x461, 0x66}, {0x462, 0x08},
- {0x463, 0x03}, {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff},
- {0x4cd, 0xff}, {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2},
- {0x502, 0x2f}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3},
- {0x506, 0x5e}, {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4},
- {0x50a, 0x5e}, {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4},
- {0x50e, 0x00}, {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a},
- {0x515, 0x10}, {0x516, 0x0a}, {0x517, 0x10}, {0x51a, 0x16},
- {0x524, 0x0f}, {0x525, 0x4f}, {0x546, 0x40}, {0x547, 0x00},
- {0x550, 0x10}, {0x551, 0x10}, {0x559, 0x02}, {0x55a, 0x02},
- {0x55d, 0xff}, {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a},
- {0x652, 0x20}, {0x63c, 0x0a}, {0x63d, 0x0a}, {0x63e, 0x0e},
- {0x63f, 0x0e}, {0x66e, 0x05}, {0x700, 0x21}, {0x701, 0x43},
- {0x702, 0x65}, {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43},
- {0x70a, 0x65}, {0x70b, 0x87}, {0xffff, 0xff},
-};
-
static const struct rtl8xxxu_reg32val rtl8723a_phy_1t_init_table[] = {
{0x800, 0x80040000}, {0x804, 0x00000003},
{0x808, 0x0000fc00}, {0x80c, 0x0000000a},
@@ -1505,13 +1465,13 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
u8 cck[RTL8723A_MAX_RF_PATHS], ofdm[RTL8723A_MAX_RF_PATHS];
u8 ofdmbase[RTL8723A_MAX_RF_PATHS], mcsbase[RTL8723A_MAX_RF_PATHS];
u32 val32, ofdm_a, ofdm_b, mcs_a, mcs_b;
- u8 val8;
+ u8 val8, base;
int group, i;
group = rtl8xxxu_gen1_channel_to_group(channel);
- cck[0] = priv->cck_tx_power_index_A[group] - 1;
- cck[1] = priv->cck_tx_power_index_B[group] - 1;
+ cck[0] = priv->cck_tx_power_index_A[group];
+ cck[1] = priv->cck_tx_power_index_B[group];
if (priv->hi_pa) {
if (cck[0] > 0x20)
@@ -1522,10 +1482,6 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
ofdm[0] = priv->ht40_1s_tx_power_index_A[group];
ofdm[1] = priv->ht40_1s_tx_power_index_B[group];
- if (ofdm[0])
- ofdm[0] -= 1;
- if (ofdm[1])
- ofdm[1] -= 1;
ofdmbase[0] = ofdm[0] + priv->ofdm_tx_power_index_diff[group].a;
ofdmbase[1] = ofdm[1] + priv->ofdm_tx_power_index_diff[group].b;
@@ -1614,20 +1570,19 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12,
mcs_a + power_base->reg_0e1c);
+ val8 = u32_get_bits(mcs_a + power_base->reg_0e1c, 0xff000000);
for (i = 0; i < 3; i++) {
- if (i != 2)
- val8 = (mcsbase[0] > 8) ? (mcsbase[0] - 8) : 0;
- else
- val8 = (mcsbase[0] > 6) ? (mcsbase[0] - 6) : 0;
+ base = i != 2 ? 8 : 6;
+ val8 = max_t(int, val8 - base, 0);
rtl8xxxu_write8(priv, REG_OFDM0_XC_TX_IQ_IMBALANCE + i, val8);
}
+
rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12,
mcs_b + power_base->reg_0868);
+ val8 = u32_get_bits(mcs_b + power_base->reg_0868, 0xff000000);
for (i = 0; i < 3; i++) {
- if (i != 2)
- val8 = (mcsbase[1] > 8) ? (mcsbase[1] - 8) : 0;
- else
- val8 = (mcsbase[1] > 6) ? (mcsbase[1] - 6) : 0;
+ base = i != 2 ? 8 : 6;
+ val8 = max_t(int, val8 - base, 0);
rtl8xxxu_write8(priv, REG_OFDM0_XD_TX_IQ_IMBALANCE + i, val8);
}
}
@@ -4385,7 +4340,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
/* Let the 8051 take control of antenna setting */
if (priv->rtl_chip != RTL8192E && priv->rtl_chip != RTL8188F &&
- priv->rtl_chip != RTL8710B) {
+ priv->rtl_chip != RTL8710B && priv->rtl_chip != RTL8192C) {
val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
val8 |= LEDCFG2_DPDT_SELECT;
rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
@@ -6473,7 +6428,8 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
- if (!rx_desc->swdec)
+ if (!rx_desc->swdec &&
+ rx_desc->security != RX_DESC_ENC_NONE)
rx_status->flag |= RX_FLAG_DECRYPTED;
if (rx_desc->crc32)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -6578,7 +6534,8 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
- if (!rx_desc->swdec)
+ if (!rx_desc->swdec &&
+ rx_desc->security != RX_DESC_ENC_NONE)
rx_status->flag |= RX_FLAG_DECRYPTED;
if (rx_desc->crc32)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -7998,6 +7955,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/regs.h
index 61c0c0ec07b3..61c0c0ec07b3 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/regs.h
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index fd92d23c43d9..f42463e595cc 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -5,8 +5,9 @@
* Register definitions taken from original Realtek rtl8723au driver
*/
-#include <asm/byteorder.h>
#include <linux/average.h>
+#include <linux/usb.h>
+#include <net/mac80211.h>
#define RTL8XXXU_DEBUG_REG_WRITE 0x01
#define RTL8XXXU_DEBUG_REG_READ 0x02
@@ -122,6 +123,15 @@ enum rtl8xxxu_rx_type {
RX_TYPE_ERROR = -1
};
+enum rtl8xxxu_rx_desc_enc {
+ RX_DESC_ENC_NONE = 0,
+ RX_DESC_ENC_WEP40 = 1,
+ RX_DESC_ENC_TKIP_WO_MIC = 2,
+ RX_DESC_ENC_TKIP_MIC = 3,
+ RX_DESC_ENC_AES = 4,
+ RX_DESC_ENC_WEP104 = 5,
+};
+
struct rtl8xxxu_rxdesc16 {
#ifdef __LITTLE_ENDIAN
u32 pktlen:14;
@@ -2022,7 +2032,6 @@ struct rtl8xxxu_fileops {
extern int rtl8xxxu_debug;
-extern const struct rtl8xxxu_reg8val rtl8xxxu_gen1_mac_init_table[];
extern const u32 rtl8xxxu_iqk_phy_iq_bb_reg[];
u8 rtl8xxxu_read8(struct rtl8xxxu_priv *priv, u16 addr);
u16 rtl8xxxu_read16(struct rtl8xxxu_priv *priv, u16 addr);
diff --git a/drivers/net/wireless/realtek/rtlwifi/Kconfig b/drivers/net/wireless/realtek/rtlwifi/Kconfig
index 9f6a4e35543c..cfe63f7b28d9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/Kconfig
+++ b/drivers/net/wireless/realtek/rtlwifi/Kconfig
@@ -37,6 +37,7 @@ config RTL8192SE
config RTL8192DE
tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter"
depends on PCI
+ select RTL8192D_COMMON
select RTLWIFI
select RTLWIFI_PCI
help
@@ -142,6 +143,9 @@ config RTL8192C_COMMON
depends on RTL8192CE || RTL8192CU
default y
+config RTL8192D_COMMON
+ tristate
+
config RTL8723_COMMON
tristate
depends on RTL8723AE || RTL8723BE
diff --git a/drivers/net/wireless/realtek/rtlwifi/Makefile b/drivers/net/wireless/realtek/rtlwifi/Makefile
index 09c30e428375..423981b148df 100644
--- a/drivers/net/wireless/realtek/rtlwifi/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c/
obj-$(CONFIG_RTL8192CE) += rtl8192ce/
obj-$(CONFIG_RTL8192CU) += rtl8192cu/
obj-$(CONFIG_RTL8192SE) += rtl8192se/
+obj-$(CONFIG_RTL8192D_COMMON) += rtl8192d/
obj-$(CONFIG_RTL8192DE) += rtl8192de/
obj-$(CONFIG_RTL8723AE) += rtl8723ae/
obj-$(CONFIG_RTL8723BE) += rtl8723be/
diff --git a/drivers/net/wireless/realtek/rtlwifi/cam.c b/drivers/net/wireless/realtek/rtlwifi/cam.c
index 32970ea4b4e7..f9d0d1394442 100644
--- a/drivers/net/wireless/realtek/rtlwifi/cam.c
+++ b/drivers/net/wireless/realtek/rtlwifi/cam.c
@@ -18,7 +18,8 @@ void rtl_cam_reset_sec_info(struct ieee80211_hw *hw)
}
static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
- u8 *mac_addr, u8 *key_cont_128, u16 us_config)
+ const u8 *mac_addr, u8 *key_cont_128,
+ u16 us_config)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -94,7 +95,7 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
"after set key, usconfig:%x\n", us_config);
}
-u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
+u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, const u8 *mac_addr,
u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
u32 ul_default_key, u8 *key_content)
{
diff --git a/drivers/net/wireless/realtek/rtlwifi/cam.h b/drivers/net/wireless/realtek/rtlwifi/cam.h
index 2461fa9afda0..144807a405b7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/cam.h
+++ b/drivers/net/wireless/realtek/rtlwifi/cam.h
@@ -14,9 +14,9 @@
#define CAM_CONFIG_NO_USEDK 0
void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
-u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
- u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
- u32 ul_default_key, u8 *key_content);
+u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, const u8 *mac_addr,
+ u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
+ u32 ul_default_key, u8 *key_content);
int rtl_cam_delete_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
u32 ul_key_id);
void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index);
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c
index c1fbc29d5ca1..82cf5fb5175f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.c
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c
@@ -1211,7 +1211,7 @@ static u8 efuse_calculate_word_cnts(u8 word_en)
}
int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
- int max_size, u8 *hwinfo, int *params)
+ int max_size, u8 *hwinfo, const int *params)
{
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.h b/drivers/net/wireless/realtek/rtlwifi/efuse.h
index 4821625ad1e5..e250ffb0f4b2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.h
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.h
@@ -89,7 +89,7 @@ void efuse_force_write_vendor_id(struct ieee80211_hw *hw);
void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate);
int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
- int max_size, u8 *hwinfo, int *params);
+ int max_size, u8 *hwinfo, const int *params);
void rtl_fill_dummy(u8 *pfwbuf, u32 *pfwlen);
void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, u8 *buffer,
u32 size);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 4217c9a08d01..0195c9a3e9e8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -489,7 +489,6 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
}
static void _rtl92cu_init_queue_reserved_page(struct ieee80211_hw *hw,
- bool wmm_enable,
u8 out_ep_num,
u8 queue_sel)
{
@@ -505,66 +504,39 @@ static void _rtl92cu_init_queue_reserved_page(struct ieee80211_hw *hw,
u8 value8;
u32 txqpagenum, txqpageunit, txqremaininpage;
- if (!wmm_enable) {
- numpubq = (ischipn) ? CHIP_B_PAGE_NUM_PUBQ :
- CHIP_A_PAGE_NUM_PUBQ;
- txqpagenum = TX_TOTAL_PAGE_NUMBER - numpubq;
-
- txqpageunit = txqpagenum / outepnum;
- txqremaininpage = txqpagenum % outepnum;
- if (queue_sel & TX_SELE_HQ)
- numhq = txqpageunit;
- if (queue_sel & TX_SELE_LQ)
- numlq = txqpageunit;
- /* HIGH priority queue always present in the configuration of
- * 2 out-ep. Remainder pages have assigned to High queue */
- if (outepnum > 1 && txqremaininpage)
- numhq += txqremaininpage;
- /* NOTE: This step done before writing REG_RQPN. */
- if (ischipn) {
- if (queue_sel & TX_SELE_NQ)
- numnq = txqpageunit;
- value8 = (u8)_NPQ(numnq);
- rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8);
- }
- } else {
- /* for WMM ,number of out-ep must more than or equal to 2! */
- numpubq = ischipn ? WMM_CHIP_B_PAGE_NUM_PUBQ :
- WMM_CHIP_A_PAGE_NUM_PUBQ;
- if (queue_sel & TX_SELE_HQ) {
- numhq = ischipn ? WMM_CHIP_B_PAGE_NUM_HPQ :
- WMM_CHIP_A_PAGE_NUM_HPQ;
- }
- if (queue_sel & TX_SELE_LQ) {
- numlq = ischipn ? WMM_CHIP_B_PAGE_NUM_LPQ :
- WMM_CHIP_A_PAGE_NUM_LPQ;
- }
- /* NOTE: This step done before writing REG_RQPN. */
- if (ischipn) {
- if (queue_sel & TX_SELE_NQ)
- numnq = WMM_CHIP_B_PAGE_NUM_NPQ;
- value8 = (u8)_NPQ(numnq);
- rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8);
- }
+ numpubq = (ischipn) ? CHIP_B_PAGE_NUM_PUBQ :
+ CHIP_A_PAGE_NUM_PUBQ;
+ txqpagenum = TX_TOTAL_PAGE_NUMBER - numpubq;
+
+ txqpageunit = txqpagenum / outepnum;
+ txqremaininpage = txqpagenum % outepnum;
+ if (queue_sel & TX_SELE_HQ)
+ numhq = txqpageunit;
+ if (queue_sel & TX_SELE_LQ)
+ numlq = txqpageunit;
+ /* HIGH priority queue always present in the configuration of
+ * 2 out-ep. Remainder pages have assigned to High queue.
+ */
+ if (outepnum > 1 && txqremaininpage)
+ numhq += txqremaininpage;
+ /* NOTE: This step done before writing REG_RQPN. */
+ if (ischipn) {
+ if (queue_sel & TX_SELE_NQ)
+ numnq = txqpageunit;
+ value8 = (u8)_NPQ(numnq);
+ rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8);
}
/* TX DMA */
value32 = _HPQ(numhq) | _LPQ(numlq) | _PUBQ(numpubq) | LD_RQPN;
rtl_write_dword(rtlpriv, REG_RQPN, value32);
}
-static void _rtl92c_init_trx_buffer(struct ieee80211_hw *hw, bool wmm_enable)
+static void _rtl92c_init_trx_buffer(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 txpktbuf_bndy;
+ u8 txpktbuf_bndy = TX_PAGE_BOUNDARY;
u8 value8;
- if (!wmm_enable)
- txpktbuf_bndy = TX_PAGE_BOUNDARY;
- else /* for WMM */
- txpktbuf_bndy = (IS_NORMAL_CHIP(rtlhal->version))
- ? WMM_CHIP_B_TX_PAGE_BOUNDARY
- : WMM_CHIP_A_TX_PAGE_BOUNDARY;
rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
rtl_write_byte(rtlpriv, REG_TXPKTBUF_WMAC_LBK_BF_HD, txpktbuf_bndy);
@@ -589,7 +561,6 @@ static void _rtl92c_init_chipn_reg_priority(struct ieee80211_hw *hw, u16 beq,
}
static void _rtl92cu_init_chipn_one_out_ep_priority(struct ieee80211_hw *hw,
- bool wmm_enable,
u8 queue_sel)
{
u16 value;
@@ -614,7 +585,6 @@ static void _rtl92cu_init_chipn_one_out_ep_priority(struct ieee80211_hw *hw,
}
static void _rtl92cu_init_chipn_two_out_ep_priority(struct ieee80211_hw *hw,
- bool wmm_enable,
u8 queue_sel)
{
u16 beq, bkq, viq, voq, mgtq, hiq;
@@ -638,67 +608,47 @@ static void _rtl92cu_init_chipn_two_out_ep_priority(struct ieee80211_hw *hw,
valuelow = QUEUE_NORMAL;
break;
}
- if (!wmm_enable) {
- beq = valuelow;
- bkq = valuelow;
- viq = valuehi;
- voq = valuehi;
- mgtq = valuehi;
- hiq = valuehi;
- } else {/* for WMM ,CONFIG_OUT_EP_WIFI_MODE */
- beq = valuehi;
- bkq = valuelow;
- viq = valuelow;
- voq = valuehi;
- mgtq = valuehi;
- hiq = valuehi;
- }
+
+ beq = valuelow;
+ bkq = valuelow;
+ viq = valuehi;
+ voq = valuehi;
+ mgtq = valuehi;
+ hiq = valuehi;
+
_rtl92c_init_chipn_reg_priority(hw, beq, bkq, viq, voq, mgtq, hiq);
pr_info("Tx queue select: 0x%02x\n", queue_sel);
}
static void _rtl92cu_init_chipn_three_out_ep_priority(struct ieee80211_hw *hw,
- bool wmm_enable,
u8 queue_sel)
{
u16 beq, bkq, viq, voq, mgtq, hiq;
- if (!wmm_enable) { /* typical setting */
- beq = QUEUE_LOW;
- bkq = QUEUE_LOW;
- viq = QUEUE_NORMAL;
- voq = QUEUE_HIGH;
- mgtq = QUEUE_HIGH;
- hiq = QUEUE_HIGH;
- } else { /* for WMM */
- beq = QUEUE_LOW;
- bkq = QUEUE_NORMAL;
- viq = QUEUE_NORMAL;
- voq = QUEUE_HIGH;
- mgtq = QUEUE_HIGH;
- hiq = QUEUE_HIGH;
- }
+ beq = QUEUE_LOW;
+ bkq = QUEUE_LOW;
+ viq = QUEUE_NORMAL;
+ voq = QUEUE_HIGH;
+ mgtq = QUEUE_HIGH;
+ hiq = QUEUE_HIGH;
+
_rtl92c_init_chipn_reg_priority(hw, beq, bkq, viq, voq, mgtq, hiq);
pr_info("Tx queue select :0x%02x..\n", queue_sel);
}
static void _rtl92cu_init_chipn_queue_priority(struct ieee80211_hw *hw,
- bool wmm_enable,
u8 out_ep_num,
u8 queue_sel)
{
switch (out_ep_num) {
case 1:
- _rtl92cu_init_chipn_one_out_ep_priority(hw, wmm_enable,
- queue_sel);
+ _rtl92cu_init_chipn_one_out_ep_priority(hw, queue_sel);
break;
case 2:
- _rtl92cu_init_chipn_two_out_ep_priority(hw, wmm_enable,
- queue_sel);
+ _rtl92cu_init_chipn_two_out_ep_priority(hw, queue_sel);
break;
case 3:
- _rtl92cu_init_chipn_three_out_ep_priority(hw, wmm_enable,
- queue_sel);
+ _rtl92cu_init_chipn_three_out_ep_priority(hw, queue_sel);
break;
default:
WARN_ON(1); /* Shall not reach here! */
@@ -707,7 +657,6 @@ static void _rtl92cu_init_chipn_queue_priority(struct ieee80211_hw *hw,
}
static void _rtl92cu_init_chipt_queue_priority(struct ieee80211_hw *hw,
- bool wmm_enable,
u8 out_ep_num,
u8 queue_sel)
{
@@ -716,12 +665,7 @@ static void _rtl92cu_init_chipt_queue_priority(struct ieee80211_hw *hw,
switch (out_ep_num) {
case 2: /* (TX_SELE_HQ|TX_SELE_LQ) */
- if (!wmm_enable) /* typical setting */
- hq_sele = HQSEL_VOQ | HQSEL_VIQ | HQSEL_MGTQ |
- HQSEL_HIQ;
- else /* for WMM */
- hq_sele = HQSEL_VOQ | HQSEL_BEQ | HQSEL_MGTQ |
- HQSEL_HIQ;
+ hq_sele = HQSEL_VOQ | HQSEL_VIQ | HQSEL_MGTQ | HQSEL_HIQ;
break;
case 1:
if (TX_SELE_LQ == queue_sel) {
@@ -742,18 +686,15 @@ static void _rtl92cu_init_chipt_queue_priority(struct ieee80211_hw *hw,
}
static void _rtl92cu_init_queue_priority(struct ieee80211_hw *hw,
- bool wmm_enable,
u8 out_ep_num,
u8 queue_sel)
{
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
if (IS_NORMAL_CHIP(rtlhal->version))
- _rtl92cu_init_chipn_queue_priority(hw, wmm_enable, out_ep_num,
- queue_sel);
+ _rtl92cu_init_chipn_queue_priority(hw, out_ep_num, queue_sel);
else
- _rtl92cu_init_chipt_queue_priority(hw, wmm_enable, out_ep_num,
- queue_sel);
+ _rtl92cu_init_chipt_queue_priority(hw, out_ep_num, queue_sel);
}
static void _rtl92cu_init_wmac_setting(struct ieee80211_hw *hw)
@@ -810,8 +751,7 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw)
struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
int err = 0;
- u32 boundary = 0;
- u8 wmm_enable = false; /* TODO */
+ u32 boundary = TX_PAGE_BOUNDARY;
u8 out_ep_nums = rtlusb->out_ep_nums;
u8 queue_sel = rtlusb->out_queue_sel;
@@ -821,22 +761,13 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw)
pr_err("Failed to init power on!\n");
return err;
}
- if (!wmm_enable) {
- boundary = TX_PAGE_BOUNDARY;
- } else { /* for WMM */
- boundary = (IS_NORMAL_CHIP(rtlhal->version))
- ? WMM_CHIP_B_TX_PAGE_BOUNDARY
- : WMM_CHIP_A_TX_PAGE_BOUNDARY;
- }
if (!rtl92c_init_llt_table(hw, boundary)) {
pr_err("Failed to init LLT Table!\n");
return -EINVAL;
}
- _rtl92cu_init_queue_reserved_page(hw, wmm_enable, out_ep_nums,
- queue_sel);
- _rtl92c_init_trx_buffer(hw, wmm_enable);
- _rtl92cu_init_queue_priority(hw, wmm_enable, out_ep_nums,
- queue_sel);
+ _rtl92cu_init_queue_reserved_page(hw, out_ep_nums, queue_sel);
+ _rtl92c_init_trx_buffer(hw);
+ _rtl92cu_init_queue_priority(hw, out_ep_nums, queue_sel);
/* Get Rx PHY status in order to report RSSI and others. */
rtl92c_init_driver_info_size(hw, RTL92C_DRIVER_INFO_SIZE);
rtl92c_init_interrupt(hw);
@@ -1553,7 +1484,6 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- enum wireless_mode wirelessmode = mac->mode;
u8 idx = 0;
switch (variable) {
@@ -1605,36 +1535,15 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
case HW_VAR_SLOT_TIME:{
u8 e_aci;
- u8 QOS_MODE = 1;
rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
"HW_VAR_SLOT_TIME %x\n", val[0]);
- if (QOS_MODE) {
- for (e_aci = 0; e_aci < AC_MAX; e_aci++)
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_AC_PARAM,
- &e_aci);
- } else {
- u8 sifstime = 0;
- u8 u1baifs;
- if (IS_WIRELESS_MODE_A(wirelessmode) ||
- IS_WIRELESS_MODE_N_24G(wirelessmode) ||
- IS_WIRELESS_MODE_N_5G(wirelessmode))
- sifstime = 16;
- else
- sifstime = 10;
- u1baifs = sifstime + (2 * val[0]);
- rtl_write_byte(rtlpriv, REG_EDCA_VO_PARAM,
- u1baifs);
- rtl_write_byte(rtlpriv, REG_EDCA_VI_PARAM,
- u1baifs);
- rtl_write_byte(rtlpriv, REG_EDCA_BE_PARAM,
- u1baifs);
- rtl_write_byte(rtlpriv, REG_EDCA_BK_PARAM,
- u1baifs);
- }
+ for (e_aci = 0; e_aci < AC_MAX; e_aci++)
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_AC_PARAM,
+ &e_aci);
break;
}
case HW_VAR_ACK_PREAMBLE:{
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/Makefile
new file mode 100644
index 000000000000..beebdfa3f7ff
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+rtl8192d-common-objs := \
+ dm_common.o \
+ fw_common.o \
+ hw_common.o \
+ main.o \
+ phy_common.o \
+ rf_common.o \
+ trx_common.o
+
+obj-$(CONFIG_RTL8192D_COMMON) += rtl8192d-common.o
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/def.h
index 21726d9b4aef..21726d9b4aef 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/def.h
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.c
new file mode 100644
index 000000000000..20373ce998bf
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.c
@@ -0,0 +1,1061 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "../base.h"
+#include "../core.h"
+#include "reg.h"
+#include "def.h"
+#include "phy_common.h"
+#include "dm_common.h"
+
+static const u32 ofdmswing_table[OFDM_TABLE_SIZE_92D] = {
+ 0x7f8001fe, /* 0, +6.0dB */
+ 0x788001e2, /* 1, +5.5dB */
+ 0x71c001c7, /* 2, +5.0dB */
+ 0x6b8001ae, /* 3, +4.5dB */
+ 0x65400195, /* 4, +4.0dB */
+ 0x5fc0017f, /* 5, +3.5dB */
+ 0x5a400169, /* 6, +3.0dB */
+ 0x55400155, /* 7, +2.5dB */
+ 0x50800142, /* 8, +2.0dB */
+ 0x4c000130, /* 9, +1.5dB */
+ 0x47c0011f, /* 10, +1.0dB */
+ 0x43c0010f, /* 11, +0.5dB */
+ 0x40000100, /* 12, +0dB */
+ 0x3c8000f2, /* 13, -0.5dB */
+ 0x390000e4, /* 14, -1.0dB */
+ 0x35c000d7, /* 15, -1.5dB */
+ 0x32c000cb, /* 16, -2.0dB */
+ 0x300000c0, /* 17, -2.5dB */
+ 0x2d4000b5, /* 18, -3.0dB */
+ 0x2ac000ab, /* 19, -3.5dB */
+ 0x288000a2, /* 20, -4.0dB */
+ 0x26000098, /* 21, -4.5dB */
+ 0x24000090, /* 22, -5.0dB */
+ 0x22000088, /* 23, -5.5dB */
+ 0x20000080, /* 24, -6.0dB */
+ 0x1e400079, /* 25, -6.5dB */
+ 0x1c800072, /* 26, -7.0dB */
+ 0x1b00006c, /* 27. -7.5dB */
+ 0x19800066, /* 28, -8.0dB */
+ 0x18000060, /* 29, -8.5dB */
+ 0x16c0005b, /* 30, -9.0dB */
+ 0x15800056, /* 31, -9.5dB */
+ 0x14400051, /* 32, -10.0dB */
+ 0x1300004c, /* 33, -10.5dB */
+ 0x12000048, /* 34, -11.0dB */
+ 0x11000044, /* 35, -11.5dB */
+ 0x10000040, /* 36, -12.0dB */
+ 0x0f00003c, /* 37, -12.5dB */
+ 0x0e400039, /* 38, -13.0dB */
+ 0x0d800036, /* 39, -13.5dB */
+ 0x0cc00033, /* 40, -14.0dB */
+ 0x0c000030, /* 41, -14.5dB */
+ 0x0b40002d, /* 42, -15.0dB */
+};
+
+static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
+ {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0dB */
+ {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 1, -0.5dB */
+ {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 2, -1.0dB */
+ {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 3, -1.5dB */
+ {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 4, -2.0dB */
+ {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 5, -2.5dB */
+ {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 6, -3.0dB */
+ {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 7, -3.5dB */
+ {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 8, -4.0dB */
+ {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 9, -4.5dB */
+ {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 10, -5.0dB */
+ {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 11, -5.5dB */
+ {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 12, -6.0dB */
+ {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 13, -6.5dB */
+ {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 14, -7.0dB */
+ {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 15, -7.5dB */
+ {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
+ {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 17, -8.5dB */
+ {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 18, -9.0dB */
+ {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 19, -9.5dB */
+ {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 20, -10.0dB */
+ {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 21, -10.5dB */
+ {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 22, -11.0dB */
+ {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 23, -11.5dB */
+ {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 24, -12.0dB */
+ {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 25, -12.5dB */
+ {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 26, -13.0dB */
+ {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 27, -13.5dB */
+ {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 28, -14.0dB */
+ {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB */
+ {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB */
+ {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB */
+ {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB */
+};
+
+static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
+ {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0dB */
+ {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 1, -0.5dB */
+ {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 2, -1.0dB */
+ {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 3, -1.5dB */
+ {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 4, -2.0dB */
+ {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 5, -2.5dB */
+ {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 6, -3.0dB */
+ {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 7, -3.5dB */
+ {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 8, -4.0dB */
+ {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 9, -4.5dB */
+ {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 10, -5.0dB */
+ {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 11, -5.5dB */
+ {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 12, -6.0dB */
+ {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 13, -6.5dB */
+ {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 14, -7.0dB */
+ {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 15, -7.5dB */
+ {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
+ {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 17, -8.5dB */
+ {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 18, -9.0dB */
+ {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 19, -9.5dB */
+ {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 20, -10.0dB */
+ {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 21, -10.5dB */
+ {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 22, -11.0dB */
+ {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 23, -11.5dB */
+ {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 24, -12.0dB */
+ {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 25, -12.5dB */
+ {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 26, -13.0dB */
+ {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 27, -13.5dB */
+ {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 28, -14.0dB */
+ {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 29, -14.5dB */
+ {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 30, -15.0dB */
+ {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 31, -15.5dB */
+ {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */
+};
+
+static void rtl92d_dm_rxgain_tracking_thermalmeter(struct ieee80211_hw *hw)
+{
+ static const u8 index_mapping[RX_INDEX_MAPPING_NUM] = {
+ 0x0f, 0x0f, 0x0d, 0x0c, 0x0b,
+ 0x0a, 0x09, 0x08, 0x07, 0x06,
+ 0x05, 0x04, 0x04, 0x03, 0x02
+ };
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i, idx;
+ u32 u4tmp;
+
+ idx = rtlpriv->efuse.eeprom_thermalmeter - rtlpriv->dm.thermalvalue_rxgain;
+ u4tmp = index_mapping[idx] << 12;
+
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "===> Rx Gain %x\n", u4tmp);
+
+ for (i = RF90_PATH_A; i < rtlpriv->phy.num_total_rfpath; i++)
+ rtl_set_rfreg(hw, i, 0x3C, RFREG_OFFSET_MASK,
+ (rtlpriv->phy.reg_rf3c[i] & ~0xF000) | u4tmp);
+}
+
+static void rtl92d_bandtype_2_4G(struct ieee80211_hw *hw, long *temp_cckg,
+ u8 *cck_index_old)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ unsigned long flag = 0;
+ const u8 *cckswing;
+ long temp_cck;
+ int i;
+
+ /* Query CCK default setting From 0xa24 */
+ rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
+ temp_cck = rtl_get_bbreg(hw, RCCK0_TXFILTER2,
+ MASKDWORD) & MASKCCK;
+ rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
+
+ for (i = 0; i < CCK_TABLE_LENGTH; i++) {
+ if (rtlpriv->dm.cck_inch14)
+ cckswing = &cckswing_table_ch14[i][2];
+ else
+ cckswing = &cckswing_table_ch1ch13[i][2];
+
+ if (temp_cck == le32_to_cpu(*((__le32 *)cckswing))) {
+ *cck_index_old = (u8)i;
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch14 %d\n",
+ RCCK0_TXFILTER2, temp_cck,
+ *cck_index_old,
+ rtlpriv->dm.cck_inch14);
+ break;
+ }
+ }
+ *temp_cckg = temp_cck;
+}
+
+static void rtl92d_bandtype_5G(struct rtl_hal *rtlhal, u8 *ofdm_index,
+ bool *internal_pa, u8 thermalvalue, u8 delta,
+ u8 rf, struct rtl_efuse *rtlefuse,
+ struct rtl_priv *rtlpriv, struct rtl_phy *rtlphy,
+ const u8 index_mapping[5][INDEX_MAPPING_NUM],
+ const u8 index_mapping_pa[8][INDEX_MAPPING_NUM])
+{
+ u8 offset = 0;
+ u8 index;
+ int i;
+
+ for (i = 0; i < rf; i++) {
+ if (rtlhal->macphymode == DUALMAC_DUALPHY &&
+ rtlhal->interfaceindex == 1) /* MAC 1 5G */
+ *internal_pa = rtlefuse->internal_pa_5g[1];
+ else
+ *internal_pa = rtlefuse->internal_pa_5g[i];
+
+ if (*internal_pa) {
+ if (rtlhal->interfaceindex == 1 || i == rf)
+ offset = 4;
+ else
+ offset = 0;
+ if (rtlphy->current_channel >= 100 &&
+ rtlphy->current_channel <= 165)
+ offset += 2;
+ } else {
+ if (rtlhal->interfaceindex == 1 || i == rf)
+ offset = 2;
+ else
+ offset = 0;
+ }
+
+ if (thermalvalue > rtlefuse->eeprom_thermalmeter)
+ offset++;
+
+ if (*internal_pa) {
+ if (delta > INDEX_MAPPING_NUM - 1)
+ index = index_mapping_pa[offset]
+ [INDEX_MAPPING_NUM - 1];
+ else
+ index =
+ index_mapping_pa[offset][delta];
+ } else {
+ if (delta > INDEX_MAPPING_NUM - 1)
+ index =
+ index_mapping[offset][INDEX_MAPPING_NUM - 1];
+ else
+ index = index_mapping[offset][delta];
+ }
+
+ if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
+ if (*internal_pa && thermalvalue > 0x12) {
+ ofdm_index[i] = rtlpriv->dm.ofdm_index[i] -
+ ((delta / 2) * 3 + (delta % 2));
+ } else {
+ ofdm_index[i] -= index;
+ }
+ } else {
+ ofdm_index[i] += index;
+ }
+ }
+}
+
+static void
+rtl92d_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw *hw)
+{
+ static const u8 index_mapping[5][INDEX_MAPPING_NUM] = {
+ /* 5G, path A/MAC 0, decrease power */
+ {0, 1, 3, 6, 8, 9, 11, 13, 14, 16, 17, 18, 18},
+ /* 5G, path A/MAC 0, increase power */
+ {0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18},
+ /* 5G, path B/MAC 1, decrease power */
+ {0, 2, 3, 6, 8, 9, 11, 13, 14, 16, 17, 18, 18},
+ /* 5G, path B/MAC 1, increase power */
+ {0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18},
+ /* 2.4G, for decreas power */
+ {0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9, 10, 10},
+ };
+ static const u8 index_mapping_internal_pa[8][INDEX_MAPPING_NUM] = {
+ /* 5G, path A/MAC 0, ch36-64, decrease power */
+ {0, 1, 2, 4, 6, 7, 9, 11, 12, 14, 15, 16, 16},
+ /* 5G, path A/MAC 0, ch36-64, increase power */
+ {0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18},
+ /* 5G, path A/MAC 0, ch100-165, decrease power */
+ {0, 1, 2, 3, 5, 6, 8, 10, 11, 13, 14, 15, 15},
+ /* 5G, path A/MAC 0, ch100-165, increase power */
+ {0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18},
+ /* 5G, path B/MAC 1, ch36-64, decrease power */
+ {0, 1, 2, 4, 6, 7, 9, 11, 12, 14, 15, 16, 16},
+ /* 5G, path B/MAC 1, ch36-64, increase power */
+ {0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18},
+ /* 5G, path B/MAC 1, ch100-165, decrease power */
+ {0, 1, 2, 3, 5, 6, 8, 9, 10, 12, 13, 14, 14},
+ /* 5G, path B/MAC 1, ch100-165, increase power */
+ {0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18},
+ };
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ struct rtl_dm *dm = &rtlpriv->dm;
+ u8 thermalvalue, delta, delta_lck, delta_iqk, delta_rxgain;
+ u8 ofdm_min_index = 6, ofdm_min_index_internal_pa = 3, rf;
+ long ele_a = 0, ele_d, temp_cck, val_x, value32;
+ bool is2t = IS_92D_SINGLEPHY(rtlhal->version);
+ u8 offset, thermalvalue_avg_count = 0;
+ u8 ofdm_index_old[2] = {0, 0};
+ u32 thermalvalue_avg = 0;
+ bool internal_pa = false;
+ long val_y, ele_c = 0;
+ s8 cck_index_old = 0;
+ u8 indexforchannel;
+ u8 ofdm_index[2];
+ s8 cck_index = 0;
+ u8 index, swing;
+ int i;
+
+ indexforchannel = rtl92d_get_rightchnlplace_for_iqk(rtlphy->current_channel);
+
+ dm->txpower_trackinginit = true;
+
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "\n");
+
+ thermalvalue = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0xf800);
+
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
+ thermalvalue,
+ dm->thermalvalue, rtlefuse->eeprom_thermalmeter);
+
+ if (!thermalvalue)
+ goto exit;
+
+ if (is2t)
+ rf = 2;
+ else
+ rf = 1;
+
+ if (dm->thermalvalue && !rtlhal->reloadtxpowerindex)
+ goto old_index_done;
+
+ ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD) & MASKOFDM_D;
+
+ for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
+ if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
+ ofdm_index_old[0] = (u8)i;
+
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
+ ROFDM0_XATXIQIMBALANCE,
+ ele_d, ofdm_index_old[0]);
+ break;
+ }
+ }
+
+ if (is2t) {
+ ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD);
+ ele_d &= MASKOFDM_D;
+
+ for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
+ if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
+ ofdm_index_old[1] = (u8)i;
+
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ "Initial pathB ele_d reg 0x%x = 0x%lx, ofdm_index = 0x%x\n",
+ ROFDM0_XBTXIQIMBALANCE, ele_d,
+ ofdm_index_old[1]);
+ break;
+ }
+ }
+ }
+
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ rtl92d_bandtype_2_4G(hw, &temp_cck, &cck_index_old);
+ } else {
+ temp_cck = 0x090e1317;
+ cck_index_old = 12;
+ }
+
+ if (!dm->thermalvalue) {
+ dm->thermalvalue = rtlefuse->eeprom_thermalmeter;
+ dm->thermalvalue_lck = thermalvalue;
+ dm->thermalvalue_iqk = thermalvalue;
+ dm->thermalvalue_rxgain = rtlefuse->eeprom_thermalmeter;
+
+ for (i = 0; i < rf; i++)
+ dm->ofdm_index[i] = ofdm_index_old[i];
+
+ dm->cck_index = cck_index_old;
+ }
+
+ if (rtlhal->reloadtxpowerindex) {
+ for (i = 0; i < rf; i++)
+ dm->ofdm_index[i] = ofdm_index_old[i];
+
+ dm->cck_index = cck_index_old;
+
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "reload ofdm index for band switch\n");
+ }
+
+old_index_done:
+ for (i = 0; i < rf; i++)
+ ofdm_index[i] = dm->ofdm_index[i];
+
+ dm->thermalvalue_avg[dm->thermalvalue_avg_index] = thermalvalue;
+ dm->thermalvalue_avg_index++;
+
+ if (dm->thermalvalue_avg_index == AVG_THERMAL_NUM)
+ dm->thermalvalue_avg_index = 0;
+
+ for (i = 0; i < AVG_THERMAL_NUM; i++) {
+ if (dm->thermalvalue_avg[i]) {
+ thermalvalue_avg += dm->thermalvalue_avg[i];
+ thermalvalue_avg_count++;
+ }
+ }
+
+ if (thermalvalue_avg_count)
+ thermalvalue = (u8)(thermalvalue_avg / thermalvalue_avg_count);
+
+ if (rtlhal->reloadtxpowerindex) {
+ delta = abs_diff(thermalvalue, rtlefuse->eeprom_thermalmeter);
+ rtlhal->reloadtxpowerindex = false;
+ dm->done_txpower = false;
+ } else if (dm->done_txpower) {
+ delta = abs_diff(thermalvalue, dm->thermalvalue);
+ } else {
+ delta = abs_diff(thermalvalue, rtlefuse->eeprom_thermalmeter);
+ }
+
+ delta_lck = abs_diff(thermalvalue, dm->thermalvalue_lck);
+ delta_iqk = abs_diff(thermalvalue, dm->thermalvalue_iqk);
+ delta_rxgain = abs_diff(thermalvalue, dm->thermalvalue_rxgain);
+
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
+ thermalvalue, dm->thermalvalue, rtlefuse->eeprom_thermalmeter,
+ delta, delta_lck, delta_iqk);
+
+ if (delta_lck > rtlefuse->delta_lck && rtlefuse->delta_lck != 0) {
+ dm->thermalvalue_lck = thermalvalue;
+ rtlpriv->cfg->ops->phy_lc_calibrate(hw, is2t);
+ }
+
+ if (delta == 0 || !dm->txpower_track_control)
+ goto check_delta;
+
+ dm->done_txpower = true;
+ delta = abs_diff(thermalvalue, rtlefuse->eeprom_thermalmeter);
+
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ offset = 4;
+ if (delta > INDEX_MAPPING_NUM - 1)
+ index = index_mapping[offset][INDEX_MAPPING_NUM - 1];
+ else
+ index = index_mapping[offset][delta];
+
+ if (thermalvalue > dm->thermalvalue) {
+ for (i = 0; i < rf; i++)
+ ofdm_index[i] -= delta;
+
+ cck_index -= delta;
+ } else {
+ for (i = 0; i < rf; i++)
+ ofdm_index[i] += index;
+
+ cck_index += index;
+ }
+ } else if (rtlhal->current_bandtype == BAND_ON_5G) {
+ rtl92d_bandtype_5G(rtlhal, ofdm_index, &internal_pa,
+ thermalvalue, delta, rf, rtlefuse, rtlpriv,
+ rtlphy, index_mapping,
+ index_mapping_internal_pa);
+ }
+
+ if (is2t) {
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "temp OFDM_A_index=0x%x, OFDM_B_index = 0x%x, cck_index=0x%x\n",
+ dm->ofdm_index[0], dm->ofdm_index[1], dm->cck_index);
+ } else {
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "temp OFDM_A_index=0x%x, cck_index = 0x%x\n",
+ dm->ofdm_index[0], dm->cck_index);
+ }
+
+ for (i = 0; i < rf; i++) {
+ if (ofdm_index[i] > OFDM_TABLE_SIZE_92D - 1) {
+ ofdm_index[i] = OFDM_TABLE_SIZE_92D - 1;
+ } else if (internal_pa ||
+ rtlhal->current_bandtype == BAND_ON_2_4G) {
+ if (ofdm_index[i] < ofdm_min_index_internal_pa)
+ ofdm_index[i] = ofdm_min_index_internal_pa;
+ } else if (ofdm_index[i] < ofdm_min_index) {
+ ofdm_index[i] = ofdm_min_index;
+ }
+ }
+
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ if (cck_index > CCK_TABLE_SIZE - 1)
+ cck_index = CCK_TABLE_SIZE - 1;
+ else if (cck_index < 0)
+ cck_index = 0;
+ }
+
+ if (is2t) {
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "new OFDM_A_index=0x%x, OFDM_B_index = 0x%x, cck_index=0x%x\n",
+ ofdm_index[0], ofdm_index[1], cck_index);
+ } else {
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "new OFDM_A_index=0x%x, cck_index = 0x%x\n",
+ ofdm_index[0], cck_index);
+ }
+
+ ele_d = (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22;
+ val_x = rtlphy->iqk_matrix[indexforchannel].value[0][0];
+ val_y = rtlphy->iqk_matrix[indexforchannel].value[0][1];
+
+ if (val_x != 0) {
+ if ((val_x & 0x00000200) != 0)
+ val_x = val_x | 0xFFFFFC00;
+ ele_a = ((val_x * ele_d) >> 8) & 0x000003FF;
+
+ /* new element C = element D x Y */
+ if ((val_y & 0x00000200) != 0)
+ val_y = val_y | 0xFFFFFC00;
+ ele_c = ((val_y * ele_d) >> 8) & 0x000003FF;
+
+ /* write new elements A, C, D to regC80 and
+ * regC94, element B is always 0
+ */
+ value32 = (ele_d << 22) | ((ele_c & 0x3F) << 16) | ele_a;
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+ MASKDWORD, value32);
+
+ value32 = (ele_c & 0x000003C0) >> 6;
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, value32);
+
+ value32 = ((val_x * ele_d) >> 7) & 0x01;
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24), value32);
+
+ } else {
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD,
+ ofdmswing_table[ofdm_index[0]]);
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24), 0x00);
+ }
+
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxPwrTracking for interface %d path A: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xe94 = 0x%lx 0xe9c = 0x%lx\n",
+ rtlhal->interfaceindex,
+ val_x, val_y, ele_a, ele_c, ele_d,
+ val_x, val_y);
+
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ /* Adjust CCK according to IQK result */
+ for (i = 0; i < 8; i++) {
+ if (dm->cck_inch14)
+ swing = cckswing_table_ch14[cck_index][i];
+ else
+ swing = cckswing_table_ch1ch13[cck_index][i];
+
+ rtl_write_byte(rtlpriv, 0xa22 + i, swing);
+ }
+ }
+
+ if (is2t) {
+ ele_d = (ofdmswing_table[ofdm_index[1]] & 0xFFC00000) >> 22;
+ val_x = rtlphy->iqk_matrix[indexforchannel].value[0][4];
+ val_y = rtlphy->iqk_matrix[indexforchannel].value[0][5];
+
+ if (val_x != 0) {
+ if ((val_x & 0x00000200) != 0)
+ /* consider minus */
+ val_x = val_x | 0xFFFFFC00;
+ ele_a = ((val_x * ele_d) >> 8) & 0x000003FF;
+
+ /* new element C = element D x Y */
+ if ((val_y & 0x00000200) != 0)
+ val_y = val_y | 0xFFFFFC00;
+ ele_c = ((val_y * ele_d) >> 8) & 0x00003FF;
+
+ /* write new elements A, C, D to regC88
+ * and regC9C, element B is always 0
+ */
+ value32 = (ele_d << 22) | ((ele_c & 0x3F) << 16) | ele_a;
+ rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
+ MASKDWORD, value32);
+
+ value32 = (ele_c & 0x000003C0) >> 6;
+ rtl_set_bbreg(hw, ROFDM0_XDTXAFE, MASKH4BITS, value32);
+
+ value32 = ((val_x * ele_d) >> 7) & 0x01;
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(28), value32);
+ } else {
+ rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
+ MASKDWORD, ofdmswing_table[ofdm_index[1]]);
+ rtl_set_bbreg(hw, ROFDM0_XDTXAFE, MASKH4BITS, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(28), 0x00);
+ }
+
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxPwrTracking path B: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xeb4 = 0x%lx 0xebc = 0x%lx\n",
+ val_x, val_y, ele_a, ele_c, ele_d, val_x, val_y);
+ }
+
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n",
+ rtl_get_bbreg(hw, 0xc80, MASKDWORD),
+ rtl_get_bbreg(hw, 0xc94, MASKDWORD),
+ rtl_get_rfreg(hw, RF90_PATH_A, 0x24, RFREG_OFFSET_MASK));
+
+check_delta:
+ if (delta_iqk > rtlefuse->delta_iqk && rtlefuse->delta_iqk != 0) {
+ rtl92d_phy_reset_iqk_result(hw);
+ dm->thermalvalue_iqk = thermalvalue;
+ rtlpriv->cfg->ops->phy_iq_calibrate(hw);
+ }
+
+ if (delta_rxgain > 0 && rtlhal->current_bandtype == BAND_ON_5G &&
+ thermalvalue <= rtlefuse->eeprom_thermalmeter) {
+ dm->thermalvalue_rxgain = thermalvalue;
+ rtl92d_dm_rxgain_tracking_thermalmeter(hw);
+ }
+
+ if (dm->txpower_track_control)
+ dm->thermalvalue = thermalvalue;
+
+exit:
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===\n");
+}
+
+void rtl92d_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.txpower_tracking = true;
+ rtlpriv->dm.txpower_trackinginit = false;
+ rtlpriv->dm.txpower_track_control = true;
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pMgntInfo->txpower_tracking = %d\n",
+ rtlpriv->dm.txpower_tracking);
+}
+EXPORT_SYMBOL_GPL(rtl92d_dm_initialize_txpower_tracking);
+
+void rtl92d_dm_check_txpower_tracking_thermal_meter(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (!rtlpriv->dm.txpower_tracking)
+ return;
+
+ if (!rtlpriv->dm.tm_trigger) {
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17) |
+ BIT(16), 0x03);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Trigger 92S Thermal Meter!!\n");
+ rtlpriv->dm.tm_trigger = 1;
+ } else {
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Schedule TxPowerTracking direct call!!\n");
+ rtl92d_dm_txpower_tracking_callback_thermalmeter(hw);
+ rtlpriv->dm.tm_trigger = 0;
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_dm_check_txpower_tracking_thermal_meter);
+
+void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct false_alarm_statistics *falsealm_cnt = &rtlpriv->falsealm_cnt;
+ unsigned long flag = 0;
+ u32 ret_value;
+
+ /* hold ofdm counter */
+ rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 1); /* hold page C counter */
+ rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 1); /* hold page D counter */
+
+ ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD);
+ falsealm_cnt->cnt_fast_fsync_fail = ret_value & 0xffff;
+ falsealm_cnt->cnt_sb_search_fail = (ret_value & 0xffff0000) >> 16;
+
+ ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
+ falsealm_cnt->cnt_parity_fail = (ret_value & 0xffff0000) >> 16;
+
+ ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
+ falsealm_cnt->cnt_rate_illegal = ret_value & 0xffff;
+ falsealm_cnt->cnt_crc8_fail = (ret_value & 0xffff0000) >> 16;
+
+ ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
+ falsealm_cnt->cnt_mcs_fail = ret_value & 0xffff;
+
+ falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
+ falsealm_cnt->cnt_rate_illegal +
+ falsealm_cnt->cnt_crc8_fail +
+ falsealm_cnt->cnt_mcs_fail +
+ falsealm_cnt->cnt_fast_fsync_fail +
+ falsealm_cnt->cnt_sb_search_fail;
+
+ if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) {
+ rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
+ ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
+ falsealm_cnt->cnt_cck_fail = ret_value;
+ ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3);
+ falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
+ rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
+ } else {
+ falsealm_cnt->cnt_cck_fail = 0;
+ }
+
+ falsealm_cnt->cnt_all = falsealm_cnt->cnt_ofdm_fail +
+ falsealm_cnt->cnt_cck_fail;
+
+ /* reset false alarm counter registers */
+ rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1);
+ rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0);
+
+ /* update ofdm counter */
+ rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 0); /* update page C counter */
+ rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 0); /* update page D counter */
+
+ if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) {
+ /* reset cck counter */
+ rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
+ rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
+ /* enable cck counter */
+ rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
+ rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
+ }
+
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Cnt_Fast_Fsync_fail = %x, Cnt_SB_Search_fail = %x\n",
+ falsealm_cnt->cnt_fast_fsync_fail,
+ falsealm_cnt->cnt_sb_search_fail);
+
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Cnt_Parity_Fail = %x, Cnt_Rate_Illegal = %x, Cnt_Crc8_fail = %x, Cnt_Mcs_fail = %x\n",
+ falsealm_cnt->cnt_parity_fail,
+ falsealm_cnt->cnt_rate_illegal,
+ falsealm_cnt->cnt_crc8_fail,
+ falsealm_cnt->cnt_mcs_fail);
+
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Cnt_Ofdm_fail = %x, Cnt_Cck_fail = %x, Cnt_all = %x\n",
+ falsealm_cnt->cnt_ofdm_fail,
+ falsealm_cnt->cnt_cck_fail,
+ falsealm_cnt->cnt_all);
+}
+EXPORT_SYMBOL_GPL(rtl92d_dm_false_alarm_counter_statistics);
+
+void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct dig_t *de_digtable = &rtlpriv->dm_digtable;
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+
+ /* Determine the minimum RSSI */
+ if (mac->link_state < MAC80211_LINKED &&
+ rtlpriv->dm.entry_min_undec_sm_pwdb == 0) {
+ de_digtable->min_undec_pwdb_for_dm = 0;
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "Not connected to any\n");
+ }
+ if (mac->link_state >= MAC80211_LINKED) {
+ if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC) {
+ de_digtable->min_undec_pwdb_for_dm =
+ rtlpriv->dm.entry_min_undec_sm_pwdb;
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ rtlpriv->dm.entry_min_undec_sm_pwdb);
+ } else {
+ de_digtable->min_undec_pwdb_for_dm =
+ rtlpriv->dm.undec_sm_pwdb;
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "STA Default Port PWDB = 0x%x\n",
+ de_digtable->min_undec_pwdb_for_dm);
+ }
+ } else {
+ de_digtable->min_undec_pwdb_for_dm =
+ rtlpriv->dm.entry_min_undec_sm_pwdb;
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "AP Ext Port or disconnect PWDB = 0x%x\n",
+ de_digtable->min_undec_pwdb_for_dm);
+ }
+
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
+ de_digtable->min_undec_pwdb_for_dm);
+}
+EXPORT_SYMBOL_GPL(rtl92d_dm_find_minimum_rssi);
+
+static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct dig_t *de_digtable = &rtlpriv->dm_digtable;
+ unsigned long flag = 0;
+
+ if (de_digtable->cursta_cstate == DIG_STA_CONNECT) {
+ if (de_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
+ if (de_digtable->min_undec_pwdb_for_dm <= 25)
+ de_digtable->cur_cck_pd_state =
+ CCK_PD_STAGE_LOWRSSI;
+ else
+ de_digtable->cur_cck_pd_state =
+ CCK_PD_STAGE_HIGHRSSI;
+ } else {
+ if (de_digtable->min_undec_pwdb_for_dm <= 20)
+ de_digtable->cur_cck_pd_state =
+ CCK_PD_STAGE_LOWRSSI;
+ else
+ de_digtable->cur_cck_pd_state =
+ CCK_PD_STAGE_HIGHRSSI;
+ }
+ } else {
+ de_digtable->cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
+ }
+ if (de_digtable->pre_cck_pd_state != de_digtable->cur_cck_pd_state) {
+ if (de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
+ rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
+ rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0x83);
+ rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
+ } else {
+ rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
+ rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
+ rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
+ }
+ de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state;
+ }
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n",
+ de_digtable->cursta_cstate == DIG_STA_CONNECT ?
+ "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n",
+ de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
+ "Low RSSI " : "High RSSI ");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "is92d single phy =%x\n",
+ IS_92D_SINGLEPHY(rtlpriv->rtlhal.version));
+}
+
+void rtl92d_dm_write_dig(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct dig_t *de_digtable = &rtlpriv->dm_digtable;
+
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n",
+ de_digtable->cur_igvalue, de_digtable->pre_igvalue,
+ de_digtable->back_val);
+ if (!de_digtable->dig_enable_flag) {
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "DIG is disabled\n");
+ de_digtable->pre_igvalue = 0x17;
+ return;
+ }
+ if (de_digtable->pre_igvalue != de_digtable->cur_igvalue) {
+ rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
+ de_digtable->cur_igvalue);
+ rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
+ de_digtable->cur_igvalue);
+ de_digtable->pre_igvalue = de_digtable->cur_igvalue;
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_dm_write_dig);
+
+static void rtl92d_early_mode_enabled(struct rtl_priv *rtlpriv)
+{
+ struct dig_t *de_digtable = &rtlpriv->dm_digtable;
+
+ if (rtlpriv->mac80211.link_state >= MAC80211_LINKED &&
+ rtlpriv->mac80211.vendor == PEER_CISCO) {
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "IOT_PEER = CISCO\n");
+ if (de_digtable->last_min_undec_pwdb_for_dm >= 50 &&
+ de_digtable->min_undec_pwdb_for_dm < 50) {
+ rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x00);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Early Mode Off\n");
+ } else if (de_digtable->last_min_undec_pwdb_for_dm <= 55 &&
+ de_digtable->min_undec_pwdb_for_dm > 55) {
+ rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Early Mode On\n");
+ }
+ } else if (!(rtl_read_byte(rtlpriv, REG_EARLY_MODE_CONTROL) & 0xf)) {
+ rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "Early Mode On\n");
+ }
+}
+
+void rtl92d_dm_dig(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct dig_t *de_digtable = &rtlpriv->dm_digtable;
+ u8 value_igi = de_digtable->cur_igvalue;
+ struct false_alarm_statistics *falsealm_cnt = &rtlpriv->falsealm_cnt;
+
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "==>\n");
+ if (rtlpriv->rtlhal.earlymode_enable) {
+ rtl92d_early_mode_enabled(rtlpriv);
+ de_digtable->last_min_undec_pwdb_for_dm =
+ de_digtable->min_undec_pwdb_for_dm;
+ }
+ if (!rtlpriv->dm.dm_initialgain_enable)
+ return;
+
+ /* because we will send data pkt when scanning
+ * this will cause some ap like gear-3700 wep TP
+ * lower if we return here, this is the diff of
+ * mac80211 driver vs ieee80211 driver
+ */
+ /* if (rtlpriv->mac80211.act_scanning)
+ * return;
+ */
+
+ /* Not STA mode return tmp */
+ if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
+ return;
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n");
+ /* Decide the current status and if modify initial gain or not */
+ if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
+ de_digtable->cursta_cstate = DIG_STA_CONNECT;
+ else
+ de_digtable->cursta_cstate = DIG_STA_DISCONNECT;
+
+ /* adjust initial gain according to false alarm counter */
+ if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0)
+ value_igi--;
+ else if (falsealm_cnt->cnt_all < DM_DIG_FA_TH1)
+ value_igi += 0;
+ else if (falsealm_cnt->cnt_all < DM_DIG_FA_TH2)
+ value_igi++;
+ else if (falsealm_cnt->cnt_all >= DM_DIG_FA_TH2)
+ value_igi += 2;
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "dm_DIG() Before: large_fa_hit=%d, forbidden_igi=%x\n",
+ de_digtable->large_fa_hit, de_digtable->forbidden_igi);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "dm_DIG() Before: Recover_cnt=%d, rx_gain_min=%x\n",
+ de_digtable->recover_cnt, de_digtable->rx_gain_min);
+
+ /* deal with abnormally large false alarm */
+ if (falsealm_cnt->cnt_all > 10000) {
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "dm_DIG(): Abnormally false alarm case\n");
+
+ de_digtable->large_fa_hit++;
+ if (de_digtable->forbidden_igi < de_digtable->cur_igvalue) {
+ de_digtable->forbidden_igi = de_digtable->cur_igvalue;
+ de_digtable->large_fa_hit = 1;
+ }
+ if (de_digtable->large_fa_hit >= 3) {
+ if ((de_digtable->forbidden_igi + 1) > DM_DIG_MAX)
+ de_digtable->rx_gain_min = DM_DIG_MAX;
+ else
+ de_digtable->rx_gain_min =
+ (de_digtable->forbidden_igi + 1);
+ de_digtable->recover_cnt = 3600; /* 3600=2hr */
+ }
+ } else {
+ /* Recovery mechanism for IGI lower bound */
+ if (de_digtable->recover_cnt != 0) {
+ de_digtable->recover_cnt--;
+ } else {
+ if (de_digtable->large_fa_hit == 0) {
+ if ((de_digtable->forbidden_igi - 1) <
+ DM_DIG_FA_LOWER) {
+ de_digtable->forbidden_igi =
+ DM_DIG_FA_LOWER;
+ de_digtable->rx_gain_min =
+ DM_DIG_FA_LOWER;
+
+ } else {
+ de_digtable->forbidden_igi--;
+ de_digtable->rx_gain_min =
+ (de_digtable->forbidden_igi + 1);
+ }
+ } else if (de_digtable->large_fa_hit == 3) {
+ de_digtable->large_fa_hit = 0;
+ }
+ }
+ }
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "dm_DIG() After: large_fa_hit=%d, forbidden_igi=%x\n",
+ de_digtable->large_fa_hit, de_digtable->forbidden_igi);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "dm_DIG() After: recover_cnt=%d, rx_gain_min=%x\n",
+ de_digtable->recover_cnt, de_digtable->rx_gain_min);
+
+ if (value_igi > DM_DIG_MAX)
+ value_igi = DM_DIG_MAX;
+ else if (value_igi < de_digtable->rx_gain_min)
+ value_igi = de_digtable->rx_gain_min;
+ de_digtable->cur_igvalue = value_igi;
+ rtl92d_dm_write_dig(hw);
+ if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G)
+ rtl92d_dm_cck_packet_detection_thresh(hw);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "<<==\n");
+}
+EXPORT_SYMBOL_GPL(rtl92d_dm_dig);
+
+void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.current_turbo_edca = false;
+ rtlpriv->dm.is_any_nonbepkts = false;
+ rtlpriv->dm.is_cur_rdlstate = false;
+}
+EXPORT_SYMBOL_GPL(rtl92d_dm_init_edca_turbo);
+
+void rtl92d_dm_check_edca_turbo(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ const u32 edca_be_ul = 0x5ea42b;
+ const u32 edca_be_dl = 0x5ea42b;
+ static u64 last_txok_cnt;
+ static u64 last_rxok_cnt;
+ u64 cur_txok_cnt;
+ u64 cur_rxok_cnt;
+
+ if (mac->link_state != MAC80211_LINKED) {
+ rtlpriv->dm.current_turbo_edca = false;
+ goto exit;
+ }
+
+ if (!rtlpriv->dm.is_any_nonbepkts &&
+ !rtlpriv->dm.disable_framebursting) {
+ cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
+ cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
+ if (cur_rxok_cnt > 4 * cur_txok_cnt) {
+ if (!rtlpriv->dm.is_cur_rdlstate ||
+ !rtlpriv->dm.current_turbo_edca) {
+ rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM,
+ edca_be_dl);
+ rtlpriv->dm.is_cur_rdlstate = true;
+ }
+ } else {
+ if (rtlpriv->dm.is_cur_rdlstate ||
+ !rtlpriv->dm.current_turbo_edca) {
+ rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM,
+ edca_be_ul);
+ rtlpriv->dm.is_cur_rdlstate = false;
+ }
+ }
+ rtlpriv->dm.current_turbo_edca = true;
+ } else {
+ if (rtlpriv->dm.current_turbo_edca) {
+ u8 tmp = AC0_BE;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
+ &tmp);
+ rtlpriv->dm.current_turbo_edca = false;
+ }
+ }
+
+exit:
+ rtlpriv->dm.is_any_nonbepkts = false;
+ last_txok_cnt = rtlpriv->stats.txbytesunicast;
+ last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
+}
+EXPORT_SYMBOL_GPL(rtl92d_dm_check_edca_turbo);
+
+void rtl92d_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rate_adaptive *ra = &rtlpriv->ra;
+
+ ra->ratr_state = DM_RATR_STA_INIT;
+ ra->pre_ratr_state = DM_RATR_STA_INIT;
+ if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
+ rtlpriv->dm.useramask = true;
+ else
+ rtlpriv->dm.useramask = false;
+}
+EXPORT_SYMBOL_GPL(rtl92d_dm_init_rate_adaptive_mask);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.h
new file mode 100644
index 000000000000..a146fc975421
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#ifndef __RTL92D_DM_COMMON_H__
+#define __RTL92D_DM_COMMON_H__
+
+#define HAL_DM_DIG_DISABLE BIT(0)
+#define HAL_DM_HIPWR_DISABLE BIT(1)
+
+#define OFDM_TABLE_LENGTH 37
+#define OFDM_TABLE_SIZE_92D 43
+#define CCK_TABLE_LENGTH 33
+
+#define CCK_TABLE_SIZE 33
+
+#define BW_AUTO_SWITCH_HIGH_LOW 25
+#define BW_AUTO_SWITCH_LOW_HIGH 30
+
+#define DM_DIG_FA_UPPER 0x32
+#define DM_DIG_FA_LOWER 0x20
+#define DM_DIG_FA_TH0 0x100
+#define DM_DIG_FA_TH1 0x400
+#define DM_DIG_FA_TH2 0x600
+
+#define RXPATHSELECTION_SS_TH_LOW 30
+#define RXPATHSELECTION_DIFF_TH 18
+
+#define DM_RATR_STA_INIT 0
+#define DM_RATR_STA_HIGH 1
+#define DM_RATR_STA_MIDDLE 2
+#define DM_RATR_STA_LOW 3
+
+#define CTS2SELF_THVAL 30
+#define REGC38_TH 20
+
+#define WAIOTTHVAL 25
+
+#define TXHIGHPWRLEVEL_NORMAL 0
+#define TXHIGHPWRLEVEL_LEVEL1 1
+#define TXHIGHPWRLEVEL_LEVEL2 2
+#define TXHIGHPWRLEVEL_BT1 3
+#define TXHIGHPWRLEVEL_BT2 4
+
+#define DM_TYPE_BYFW 0
+#define DM_TYPE_BYDRIVER 1
+
+#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
+#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
+#define INDEX_MAPPING_NUM 13
+
+enum dm_1r_cca {
+ CCA_1R = 0,
+ CCA_2R = 1,
+ CCA_MAX = 2,
+};
+
+enum dm_rf {
+ RF_SAVE = 0,
+ RF_NORMAL = 1,
+ RF_MAX = 2,
+};
+
+enum dm_sw_ant_switch {
+ ANS_ANTENNA_B = 1,
+ ANS_ANTENNA_A = 2,
+ ANS_ANTENNA_MAX = 3,
+};
+
+void rtl92d_dm_initialize_txpower_tracking(struct ieee80211_hw *hw);
+void rtl92d_dm_check_txpower_tracking_thermal_meter(struct ieee80211_hw *hw);
+void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw);
+void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw);
+void rtl92d_dm_write_dig(struct ieee80211_hw *hw);
+void rtl92d_dm_dig(struct ieee80211_hw *hw);
+void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw);
+void rtl92d_dm_check_edca_turbo(struct ieee80211_hw *hw);
+void rtl92d_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.c
new file mode 100644
index 000000000000..aa54dbde6ea8
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.c
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../base.h"
+#include "../efuse.h"
+#include "def.h"
+#include "reg.h"
+#include "fw_common.h"
+
+bool rtl92d_is_fw_downloaded(struct rtl_priv *rtlpriv)
+{
+ return !!(rtl_read_dword(rtlpriv, REG_MCUFWDL) & MCUFWDL_RDY);
+}
+EXPORT_SYMBOL_GPL(rtl92d_is_fw_downloaded);
+
+void rtl92d_enable_fw_download(struct ieee80211_hw *hw, bool enable)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp;
+
+ if (enable) {
+ tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp | 0x04);
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01);
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7);
+ } else {
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe);
+ /* Reserved for fw extension.
+ * 0x81[7] is used for mac0 status ,
+ * so don't write this reg here
+ * rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00);
+ */
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_enable_fw_download);
+
+void rtl92d_write_fw(struct ieee80211_hw *hw,
+ enum version_8192d version, u8 *buffer, u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 *bufferptr = buffer;
+ u32 pagenums, remainsize;
+ u32 page, offset;
+
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE)
+ rtl_fill_dummy(bufferptr, &size);
+
+ pagenums = size / FW_8192D_PAGE_SIZE;
+ remainsize = size % FW_8192D_PAGE_SIZE;
+
+ if (pagenums > 8)
+ pr_err("Page numbers should not greater then 8\n");
+
+ for (page = 0; page < pagenums; page++) {
+ offset = page * FW_8192D_PAGE_SIZE;
+ rtl_fw_page_write(hw, page, (bufferptr + offset),
+ FW_8192D_PAGE_SIZE);
+ }
+
+ if (remainsize) {
+ offset = pagenums * FW_8192D_PAGE_SIZE;
+ page = pagenums;
+ rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize);
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_write_fw);
+
+int rtl92d_fw_free_to_go(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 counter = 0;
+ u32 value32;
+
+ do {
+ value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+ } while ((counter++ < FW_8192D_POLLING_TIMEOUT_COUNT) &&
+ (!(value32 & FWDL_CHKSUM_RPT)));
+
+ if (counter >= FW_8192D_POLLING_TIMEOUT_COUNT) {
+ pr_err("chksum report fail! REG_MCUFWDL:0x%08x\n",
+ value32);
+ return -EIO;
+ }
+
+ value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+ value32 |= MCUFWDL_RDY;
+ rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl92d_fw_free_to_go);
+
+#define RTL_USB_DELAY_FACTOR 60
+
+void rtl92d_firmware_selfreset(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 u1b_tmp;
+ u8 delay = 100;
+
+ if (rtlhal->interface == INTF_USB) {
+ delay *= RTL_USB_DELAY_FACTOR;
+
+ rtl_write_byte(rtlpriv, REG_FSIMR, 0);
+
+ /* We need to disable other HRCV INT to influence 8051 reset. */
+ rtl_write_byte(rtlpriv, REG_FWIMR, 0x20);
+
+ /* Close mask to prevent incorrect FW write operation. */
+ rtl_write_byte(rtlpriv, REG_FTIMR, 0);
+ }
+
+ /* Set (REG_HMETFR + 3) to 0x20 is reset 8051 */
+ rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20);
+
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+
+ while (u1b_tmp & (FEN_CPUEN >> 8)) {
+ delay--;
+ if (delay == 0)
+ break;
+ udelay(50);
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+ }
+
+ if (rtlhal->interface == INTF_USB) {
+ if ((u1b_tmp & (FEN_CPUEN >> 8)) && delay == 0)
+ rtl_write_byte(rtlpriv, REG_FWIMR, 0);
+ }
+
+ WARN_ONCE((delay <= 0), "rtl8192de: 8051 reset failed!\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "=====> 8051 reset success (%d)\n", delay);
+}
+EXPORT_SYMBOL_GPL(rtl92d_firmware_selfreset);
+
+int rtl92d_fw_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 counter;
+
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG, "FW already have download\n");
+ /* polling for FW ready */
+ counter = 0;
+ do {
+ if (rtlhal->interfaceindex == 0) {
+ if (rtl_read_byte(rtlpriv, FW_MAC0_READY) &
+ MAC0_READY) {
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Polling FW ready success!! REG_MCUFWDL: 0x%x\n",
+ rtl_read_byte(rtlpriv,
+ FW_MAC0_READY));
+ return 0;
+ }
+ udelay(5);
+ } else {
+ if (rtl_read_byte(rtlpriv, FW_MAC1_READY) &
+ MAC1_READY) {
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Polling FW ready success!! REG_MCUFWDL: 0x%x\n",
+ rtl_read_byte(rtlpriv,
+ FW_MAC1_READY));
+ return 0;
+ }
+ udelay(5);
+ }
+ } while (counter++ < POLLING_READY_TIMEOUT_COUNT);
+
+ if (rtlhal->interfaceindex == 0) {
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Polling FW ready fail!! MAC0 FW init not ready: 0x%x\n",
+ rtl_read_byte(rtlpriv, FW_MAC0_READY));
+ } else {
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Polling FW ready fail!! MAC1 FW init not ready: 0x%x\n",
+ rtl_read_byte(rtlpriv, FW_MAC1_READY));
+ }
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Polling FW ready fail!! REG_MCUFWDL:0x%08x\n",
+ rtl_read_dword(rtlpriv, REG_MCUFWDL));
+ return -1;
+}
+EXPORT_SYMBOL_GPL(rtl92d_fw_init);
+
+static bool _rtl92d_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 val_hmetfr;
+ bool result = false;
+
+ val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR);
+ if (((val_hmetfr >> boxnum) & BIT(0)) == 0)
+ result = true;
+ return result;
+}
+
+void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw,
+ u8 element_id, u32 cmd_len, u8 *cmdbuffer)
+{
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 boxcontent[4], boxextcontent[2];
+ u16 box_reg = 0, box_extreg = 0;
+ u8 wait_writeh2c_limmit = 100;
+ bool bwrite_success = false;
+ u8 wait_h2c_limmit = 100;
+ u32 h2c_waitcounter = 0;
+ bool isfw_read = false;
+ unsigned long flag;
+ u8 u1b_tmp;
+ u8 boxnum;
+ u8 idx;
+
+ if (ppsc->rfpwr_state == ERFOFF || ppsc->inactive_pwrstate == ERFOFF) {
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Return as RF is off!!!\n");
+ return;
+ }
+
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
+
+ while (true) {
+ spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
+ if (rtlhal->h2c_setinprogress) {
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "H2C set in progress! Wait to set..element_id(%d)\n",
+ element_id);
+
+ while (rtlhal->h2c_setinprogress) {
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
+ flag);
+ h2c_waitcounter++;
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Wait 100 us (%d times)...\n",
+ h2c_waitcounter);
+ udelay(100);
+
+ if (h2c_waitcounter > 1000)
+ return;
+
+ spin_lock_irqsave(&rtlpriv->locks.h2c_lock,
+ flag);
+ }
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+ } else {
+ rtlhal->h2c_setinprogress = true;
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+ break;
+ }
+ }
+
+ while (!bwrite_success) {
+ wait_writeh2c_limmit--;
+ if (wait_writeh2c_limmit == 0) {
+ pr_err("Write H2C fail because no trigger for FW INT!\n");
+ break;
+ }
+
+ boxnum = rtlhal->last_hmeboxnum;
+ if (boxnum > 3) {
+ pr_err("boxnum %#x too big\n", boxnum);
+ break;
+ }
+
+ box_reg = REG_HMEBOX_0 + boxnum * SIZE_OF_REG_HMEBOX;
+ box_extreg = REG_HMEBOX_EXT_0 + boxnum * SIZE_OF_REG_HMEBOX_EXT;
+
+ isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum);
+ while (!isfw_read) {
+ wait_h2c_limmit--;
+ if (wait_h2c_limmit == 0) {
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting too long for FW read clear HMEBox(%d)!\n",
+ boxnum);
+ break;
+ }
+
+ udelay(10);
+
+ isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum);
+ u1b_tmp = rtl_read_byte(rtlpriv, 0x1BF);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting for FW read clear HMEBox(%d)!!! 0x1BF = %2x\n",
+ boxnum, u1b_tmp);
+ }
+
+ if (!isfw_read) {
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
+ boxnum);
+ break;
+ }
+
+ memset(boxcontent, 0, sizeof(boxcontent));
+ memset(boxextcontent, 0, sizeof(boxextcontent));
+ boxcontent[0] = element_id;
+
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write element_id box_reg(%4x) = %2x\n",
+ box_reg, element_id);
+
+ switch (cmd_len) {
+ case 1 ... 3:
+ /* BOX: | ID | A0 | A1 | A2 |
+ * BOX_EXT: --- N/A ------
+ */
+ boxcontent[0] &= ~BIT(7);
+ memcpy(boxcontent + 1, cmdbuffer, cmd_len);
+
+ for (idx = 0; idx < 4; idx++)
+ rtl_write_byte(rtlpriv, box_reg + idx,
+ boxcontent[idx]);
+ break;
+ case 4 ... 5:
+ /* * ID ext = ID | BIT(7)
+ * BOX: | ID ext | A2 | A3 | A4 |
+ * BOX_EXT: | A0 | A1 |
+ */
+ boxcontent[0] |= BIT(7);
+ memcpy(boxextcontent, cmdbuffer, 2);
+ memcpy(boxcontent + 1, cmdbuffer + 2, cmd_len - 2);
+
+ for (idx = 0; idx < 2; idx++)
+ rtl_write_byte(rtlpriv, box_extreg + idx,
+ boxextcontent[idx]);
+
+ for (idx = 0; idx < 4; idx++)
+ rtl_write_byte(rtlpriv, box_reg + idx,
+ boxcontent[idx]);
+ break;
+ default:
+ pr_err("switch case %#x not processed\n", cmd_len);
+ break;
+ }
+
+ bwrite_success = true;
+ rtlhal->last_hmeboxnum = boxnum + 1;
+ if (rtlhal->last_hmeboxnum == 4)
+ rtlhal->last_hmeboxnum = 0;
+
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "pHalData->last_hmeboxnum = %d\n",
+ rtlhal->last_hmeboxnum);
+ }
+ spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
+ rtlhal->h2c_setinprogress = false;
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
+}
+EXPORT_SYMBOL_GPL(rtl92d_fill_h2c_cmd);
+
+void rtl92d_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
+{
+ u8 u1_joinbssrpt_parm[1] = {0};
+
+ u1_joinbssrpt_parm[0] = mstatus;
+ rtl92d_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm);
+}
+EXPORT_SYMBOL_GPL(rtl92d_set_fw_joinbss_report_cmd);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.h
new file mode 100644
index 000000000000..4b73e0bd4ac4
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#ifndef __RTL92D_FW_COMMON_H__
+#define __RTL92D_FW_COMMON_H__
+
+#define FW_8192D_START_ADDRESS 0x1000
+#define FW_8192D_PAGE_SIZE 4096
+#define FW_8192D_POLLING_TIMEOUT_COUNT 1000
+
+#define IS_FW_HEADER_EXIST(_pfwhdr) \
+ ((GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFF0) == 0x92C0 || \
+ (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFF0) == 0x88C0 || \
+ (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D0 || \
+ (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D1 || \
+ (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D2 || \
+ (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D3)
+
+/* Firmware Header(8-byte alinment required) */
+/* --- LONG WORD 0 ---- */
+#define GET_FIRMWARE_HDR_SIGNATURE(__fwhdr) \
+ le32_get_bits(*(__le32 *)__fwhdr, GENMASK(15, 0))
+#define GET_FIRMWARE_HDR_VERSION(__fwhdr) \
+ le32_get_bits(*(__le32 *)((__fwhdr) + 4), GENMASK(15, 0))
+#define GET_FIRMWARE_HDR_SUB_VER(__fwhdr) \
+ le32_get_bits(*(__le32 *)((__fwhdr) + 4), GENMASK(23, 16))
+
+#define RAID_MASK GENMASK(31, 28)
+#define RATE_MASK_MASK GENMASK(27, 0)
+#define SHORT_GI_MASK BIT(5)
+#define MACID_MASK GENMASK(4, 0)
+
+struct rtl92d_rate_mask_h2c {
+ __le32 rate_mask_and_raid;
+ u8 macid_and_short_gi;
+} __packed;
+
+bool rtl92d_is_fw_downloaded(struct rtl_priv *rtlpriv);
+void rtl92d_enable_fw_download(struct ieee80211_hw *hw, bool enable);
+void rtl92d_write_fw(struct ieee80211_hw *hw,
+ enum version_8192d version, u8 *buffer, u32 size);
+int rtl92d_fw_free_to_go(struct ieee80211_hw *hw);
+void rtl92d_firmware_selfreset(struct ieee80211_hw *hw);
+int rtl92d_fw_init(struct ieee80211_hw *hw);
+void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
+ u32 cmd_len, u8 *p_cmdbuffer);
+void rtl92d_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.c
new file mode 100644
index 000000000000..6570d5e168e9
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.c
@@ -0,0 +1,1225 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "../base.h"
+#include "../cam.h"
+#include "../efuse.h"
+#include "../pci.h"
+#include "../regd.h"
+#include "def.h"
+#include "reg.h"
+#include "dm_common.h"
+#include "fw_common.h"
+#include "hw_common.h"
+#include "phy_common.h"
+
+void rtl92de_stop_tx_beacon(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp1byte;
+
+ tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte & (~BIT(6)));
+ rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xff);
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64);
+ tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+ tmp1byte &= ~(BIT(0));
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+}
+EXPORT_SYMBOL_GPL(rtl92de_stop_tx_beacon);
+
+void rtl92de_resume_tx_beacon(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp1byte;
+
+ tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte | BIT(6));
+ rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0x0a);
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
+ tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+ tmp1byte |= BIT(0);
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+}
+EXPORT_SYMBOL_GPL(rtl92de_resume_tx_beacon);
+
+void rtl92d_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ switch (variable) {
+ case HW_VAR_RF_STATE:
+ *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
+ break;
+ case HW_VAR_FWLPS_RF_ON:{
+ enum rf_pwrstate rfstate;
+ u32 val_rcr;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE,
+ (u8 *)(&rfstate));
+ if (rfstate == ERFOFF) {
+ *((bool *)(val)) = true;
+ } else {
+ val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+ val_rcr &= 0x00070000;
+ if (val_rcr)
+ *((bool *)(val)) = false;
+ else
+ *((bool *)(val)) = true;
+ }
+ break;
+ }
+ case HW_VAR_FW_PSMODE_STATUS:
+ *((bool *)(val)) = ppsc->fw_current_inpsmode;
+ break;
+ case HW_VAR_CORRECT_TSF:{
+ u64 tsf;
+ u32 *ptsf_low = (u32 *)&tsf;
+ u32 *ptsf_high = ((u32 *)&tsf) + 1;
+
+ *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4));
+ *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
+ *((u64 *)(val)) = tsf;
+ break;
+ }
+ case HW_VAR_INT_MIGRATION:
+ *((bool *)(val)) = rtlpriv->dm.interrupt_migration;
+ break;
+ case HW_VAR_INT_AC:
+ *((bool *)(val)) = rtlpriv->dm.disable_tx_int;
+ break;
+ case HAL_DEF_WOWLAN:
+ break;
+ default:
+ pr_err("switch case %#x not processed\n", variable);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_get_hw_reg);
+
+void rtl92d_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ u8 idx;
+
+ switch (variable) {
+ case HW_VAR_ETHER_ADDR:
+ for (idx = 0; idx < ETH_ALEN; idx++) {
+ rtl_write_byte(rtlpriv, (REG_MACID + idx),
+ val[idx]);
+ }
+ break;
+ case HW_VAR_BASIC_RATE: {
+ u16 rate_cfg = ((u16 *)val)[0];
+ u8 rate_index = 0;
+
+ rate_cfg = rate_cfg & 0x15f;
+ if (mac->vendor == PEER_CISCO &&
+ ((rate_cfg & 0x150) == 0))
+ rate_cfg |= 0x01;
+ rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
+ rtl_write_byte(rtlpriv, REG_RRSR + 1,
+ (rate_cfg >> 8) & 0xff);
+ while (rate_cfg > 0x1) {
+ rate_cfg = (rate_cfg >> 1);
+ rate_index++;
+ }
+ if (rtlhal->fw_version > 0xe)
+ rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL,
+ rate_index);
+ break;
+ }
+ case HW_VAR_BSSID:
+ for (idx = 0; idx < ETH_ALEN; idx++) {
+ rtl_write_byte(rtlpriv, (REG_BSSID + idx),
+ val[idx]);
+ }
+ break;
+ case HW_VAR_SIFS:
+ rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]);
+ rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]);
+ rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]);
+ rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
+ if (!mac->ht_enable)
+ rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
+ 0x0e0e);
+ else
+ rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
+ *((u16 *)val));
+ break;
+ case HW_VAR_SLOT_TIME: {
+ u8 e_aci;
+
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
+ rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
+ for (e_aci = 0; e_aci < AC_MAX; e_aci++)
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_AC_PARAM,
+ (&e_aci));
+ break;
+ }
+ case HW_VAR_ACK_PREAMBLE: {
+ u8 reg_tmp;
+ u8 short_preamble = (bool)(*val);
+
+ reg_tmp = (mac->cur_40_prime_sc) << 5;
+ if (short_preamble)
+ reg_tmp |= 0x80;
+ rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_tmp);
+ break;
+ }
+ case HW_VAR_AMPDU_MIN_SPACE: {
+ u8 min_spacing_to_set;
+
+ min_spacing_to_set = *val;
+ if (min_spacing_to_set <= 7) {
+ mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) |
+ min_spacing_to_set);
+ *val = min_spacing_to_set;
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg);
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+ mac->min_space_cfg);
+ }
+ break;
+ }
+ case HW_VAR_SHORTGI_DENSITY: {
+ u8 density_to_set;
+
+ density_to_set = *val;
+ mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg;
+ mac->min_space_cfg |= (density_to_set << 3);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg);
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+ mac->min_space_cfg);
+ break;
+ }
+ case HW_VAR_AMPDU_FACTOR: {
+ u8 factor_toset;
+ u32 regtoset;
+ u8 *ptmp_byte = NULL;
+ u8 index;
+
+ if (rtlhal->macphymode == DUALMAC_DUALPHY)
+ regtoset = 0xb9726641;
+ else if (rtlhal->macphymode == DUALMAC_SINGLEPHY)
+ regtoset = 0x66626641;
+ else
+ regtoset = 0xb972a841;
+ factor_toset = *val;
+ if (factor_toset <= 3) {
+ factor_toset = (1 << (factor_toset + 2));
+ if (factor_toset > 0xf)
+ factor_toset = 0xf;
+ for (index = 0; index < 4; index++) {
+ ptmp_byte = (u8 *)(&regtoset) + index;
+ if ((*ptmp_byte & 0xf0) >
+ (factor_toset << 4))
+ *ptmp_byte = (*ptmp_byte & 0x0f)
+ | (factor_toset << 4);
+ if ((*ptmp_byte & 0x0f) > factor_toset)
+ *ptmp_byte = (*ptmp_byte & 0xf0)
+ | (factor_toset);
+ }
+ rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, regtoset);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset);
+ }
+ break;
+ }
+ case HW_VAR_RETRY_LIMIT: {
+ u8 retry_limit = val[0];
+
+ rtl_write_word(rtlpriv, REG_RL,
+ retry_limit << RETRY_LIMIT_SHORT_SHIFT |
+ retry_limit << RETRY_LIMIT_LONG_SHIFT);
+ break;
+ }
+ case HW_VAR_DUAL_TSF_RST:
+ rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
+ break;
+ case HW_VAR_EFUSE_BYTES:
+ rtlefuse->efuse_usedbytes = *((u16 *)val);
+ break;
+ case HW_VAR_EFUSE_USAGE:
+ rtlefuse->efuse_usedpercentage = *val;
+ break;
+ case HW_VAR_IO_CMD:
+ rtl92d_phy_set_io_cmd(hw, (*(enum io_type *)val));
+ break;
+ case HW_VAR_WPA_CONFIG:
+ rtl_write_byte(rtlpriv, REG_SECCFG, *val);
+ break;
+ case HW_VAR_SET_RPWM:
+ rtl92d_fill_h2c_cmd(hw, H2C_PWRM, 1, (val));
+ break;
+ case HW_VAR_H2C_FW_PWRMODE:
+ break;
+ case HW_VAR_FW_PSMODE_STATUS:
+ ppsc->fw_current_inpsmode = *((bool *)val);
+ break;
+ case HW_VAR_AID: {
+ u16 u2btmp;
+
+ u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
+ u2btmp &= 0xC000;
+ rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp |
+ mac->assoc_id));
+ break;
+ }
+ default:
+ pr_err("switch case %#x not processed\n", variable);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_set_hw_reg);
+
+bool rtl92de_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ bool status = true;
+ long count = 0;
+ u32 value = _LLT_INIT_ADDR(address) |
+ _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS);
+
+ rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
+ do {
+ value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
+ if (_LLT_OP_VALUE(value) == _LLT_NO_ACTIVE)
+ break;
+ if (count > POLLING_LLT_THRESHOLD) {
+ pr_err("Failed to polling write LLT done at address %d!\n",
+ address);
+ status = false;
+ break;
+ }
+ } while (++count);
+ return status;
+}
+EXPORT_SYMBOL_GPL(rtl92de_llt_write);
+
+void rtl92de_enable_hw_security_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 sec_reg_value;
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm);
+ if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "not open hw encryption\n");
+ return;
+ }
+ sec_reg_value = SCR_TXENCENABLE | SCR_RXENCENABLE;
+ if (rtlpriv->sec.use_defaultkey) {
+ sec_reg_value |= SCR_TXUSEDK;
+ sec_reg_value |= SCR_RXUSEDK;
+ }
+ sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
+ rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The SECR-value %x\n", sec_reg_value);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
+}
+EXPORT_SYMBOL_GPL(rtl92de_enable_hw_security_config);
+
+/* don't set REG_EDCA_BE_PARAM here because
+ * mac80211 will send pkt when scan
+ */
+void rtl92de_set_qos(struct ieee80211_hw *hw, int aci)
+{
+ rtl92d_dm_init_edca_turbo(hw);
+}
+EXPORT_SYMBOL_GPL(rtl92de_set_qos);
+
+static enum version_8192d _rtl92d_read_chip_version(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ enum version_8192d version = VERSION_NORMAL_CHIP_92D_SINGLEPHY;
+ u32 value32;
+
+ value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
+ if (!(value32 & 0x000f0000)) {
+ version = VERSION_TEST_CHIP_92D_SINGLEPHY;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "TEST CHIP!!!\n");
+ } else {
+ version = VERSION_NORMAL_CHIP_92D_SINGLEPHY;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Normal CHIP!!!\n");
+ }
+ return version;
+}
+
+static void _rtl92de_readpowervalue_fromprom(struct txpower_info *pwrinfo,
+ u8 *efuse, bool autoloadfail)
+{
+ u32 rfpath, eeaddr, group, offset, offset1, offset2;
+ u8 i, val8;
+
+ memset(pwrinfo, 0, sizeof(struct txpower_info));
+ if (autoloadfail) {
+ for (group = 0; group < CHANNEL_GROUP_MAX; group++) {
+ for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
+ if (group < CHANNEL_GROUP_MAX_2G) {
+ pwrinfo->cck_index[rfpath][group] =
+ EEPROM_DEFAULT_TXPOWERLEVEL_2G;
+ pwrinfo->ht40_1sindex[rfpath][group] =
+ EEPROM_DEFAULT_TXPOWERLEVEL_2G;
+ } else {
+ pwrinfo->ht40_1sindex[rfpath][group] =
+ EEPROM_DEFAULT_TXPOWERLEVEL_5G;
+ }
+ pwrinfo->ht40_2sindexdiff[rfpath][group] =
+ EEPROM_DEFAULT_HT40_2SDIFF;
+ pwrinfo->ht20indexdiff[rfpath][group] =
+ EEPROM_DEFAULT_HT20_DIFF;
+ pwrinfo->ofdmindexdiff[rfpath][group] =
+ EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF;
+ pwrinfo->ht40maxoffset[rfpath][group] =
+ EEPROM_DEFAULT_HT40_PWRMAXOFFSET;
+ pwrinfo->ht20maxoffset[rfpath][group] =
+ EEPROM_DEFAULT_HT20_PWRMAXOFFSET;
+ }
+ }
+ for (i = 0; i < 3; i++) {
+ pwrinfo->tssi_a[i] = EEPROM_DEFAULT_TSSI;
+ pwrinfo->tssi_b[i] = EEPROM_DEFAULT_TSSI;
+ }
+ return;
+ }
+
+ /* Maybe autoload OK,buf the tx power index value is not filled.
+ * If we find it, we set it to default value.
+ */
+ for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
+ for (group = 0; group < CHANNEL_GROUP_MAX_2G; group++) {
+ eeaddr = EEPROM_CCK_TX_PWR_INX_2G + (rfpath * 3) + group;
+
+ pwrinfo->cck_index[rfpath][group] =
+ efuse[eeaddr] == 0xFF ?
+ (eeaddr > 0x7B ?
+ EEPROM_DEFAULT_TXPOWERLEVEL_5G :
+ EEPROM_DEFAULT_TXPOWERLEVEL_2G) :
+ efuse[eeaddr];
+ }
+ }
+ for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
+ for (group = 0; group < CHANNEL_GROUP_MAX; group++) {
+ offset1 = group / 3;
+ offset2 = group % 3;
+ eeaddr = EEPROM_HT40_1S_TX_PWR_INX_2G + (rfpath * 3);
+ eeaddr += offset2 + offset1 * 21;
+
+ pwrinfo->ht40_1sindex[rfpath][group] =
+ efuse[eeaddr] == 0xFF ?
+ (eeaddr > 0x7B ?
+ EEPROM_DEFAULT_TXPOWERLEVEL_5G :
+ EEPROM_DEFAULT_TXPOWERLEVEL_2G) :
+ efuse[eeaddr];
+ }
+ }
+
+ /* These just for 92D efuse offset. */
+ for (group = 0; group < CHANNEL_GROUP_MAX; group++) {
+ for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
+ offset1 = group / 3;
+ offset2 = group % 3;
+ offset = offset2 + offset1 * 21;
+
+ val8 = efuse[EEPROM_HT40_2S_TX_PWR_INX_DIFF_2G + offset];
+ if (val8 != 0xFF)
+ pwrinfo->ht40_2sindexdiff[rfpath][group] =
+ (val8 >> (rfpath * 4)) & 0xF;
+ else
+ pwrinfo->ht40_2sindexdiff[rfpath][group] =
+ EEPROM_DEFAULT_HT40_2SDIFF;
+
+ val8 = efuse[EEPROM_HT20_TX_PWR_INX_DIFF_2G + offset];
+ if (val8 != 0xFF)
+ pwrinfo->ht20indexdiff[rfpath][group] =
+ (val8 >> (rfpath * 4)) & 0xF;
+ else
+ pwrinfo->ht20indexdiff[rfpath][group] =
+ EEPROM_DEFAULT_HT20_DIFF;
+
+ val8 = efuse[EEPROM_OFDM_TX_PWR_INX_DIFF_2G + offset];
+ if (val8 != 0xFF)
+ pwrinfo->ofdmindexdiff[rfpath][group] =
+ (val8 >> (rfpath * 4)) & 0xF;
+ else
+ pwrinfo->ofdmindexdiff[rfpath][group] =
+ EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF;
+
+ val8 = efuse[EEPROM_HT40_MAX_PWR_OFFSET_2G + offset];
+ if (val8 != 0xFF)
+ pwrinfo->ht40maxoffset[rfpath][group] =
+ (val8 >> (rfpath * 4)) & 0xF;
+ else
+ pwrinfo->ht40maxoffset[rfpath][group] =
+ EEPROM_DEFAULT_HT40_PWRMAXOFFSET;
+
+ val8 = efuse[EEPROM_HT20_MAX_PWR_OFFSET_2G + offset];
+ if (val8 != 0xFF)
+ pwrinfo->ht20maxoffset[rfpath][group] =
+ (val8 >> (rfpath * 4)) & 0xF;
+ else
+ pwrinfo->ht20maxoffset[rfpath][group] =
+ EEPROM_DEFAULT_HT20_PWRMAXOFFSET;
+ }
+ }
+
+ if (efuse[EEPROM_TSSI_A_5G] != 0xFF) {
+ /* 5GL */
+ pwrinfo->tssi_a[0] = efuse[EEPROM_TSSI_A_5G] & 0x3F;
+ pwrinfo->tssi_b[0] = efuse[EEPROM_TSSI_B_5G] & 0x3F;
+ /* 5GM */
+ pwrinfo->tssi_a[1] = efuse[EEPROM_TSSI_AB_5G] & 0x3F;
+ pwrinfo->tssi_b[1] = (efuse[EEPROM_TSSI_AB_5G] & 0xC0) >> 6 |
+ (efuse[EEPROM_TSSI_AB_5G + 1] & 0x0F) << 2;
+ /* 5GH */
+ pwrinfo->tssi_a[2] = (efuse[EEPROM_TSSI_AB_5G + 1] & 0xF0) >> 4 |
+ (efuse[EEPROM_TSSI_AB_5G + 2] & 0x03) << 4;
+ pwrinfo->tssi_b[2] = (efuse[EEPROM_TSSI_AB_5G + 2] & 0xFC) >> 2;
+ } else {
+ for (i = 0; i < 3; i++) {
+ pwrinfo->tssi_a[i] = EEPROM_DEFAULT_TSSI;
+ pwrinfo->tssi_b[i] = EEPROM_DEFAULT_TSSI;
+ }
+ }
+}
+
+static void _rtl92de_read_txpower_info(struct ieee80211_hw *hw,
+ bool autoload_fail, u8 *hwinfo)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct txpower_info pwrinfo;
+ u8 tempval[2], i, pwr, diff;
+ u32 ch, rfpath, group;
+
+ _rtl92de_readpowervalue_fromprom(&pwrinfo, hwinfo, autoload_fail);
+ if (!autoload_fail) {
+ /* bit0~2 */
+ rtlefuse->eeprom_regulatory = (hwinfo[EEPROM_RF_OPT1] & 0x7);
+ rtlefuse->eeprom_thermalmeter =
+ hwinfo[EEPROM_THERMAL_METER] & 0x1f;
+ rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_K];
+ tempval[0] = hwinfo[EEPROM_IQK_DELTA] & 0x03;
+ tempval[1] = (hwinfo[EEPROM_LCK_DELTA] & 0x0C) >> 2;
+ rtlefuse->txpwr_fromeprom = true;
+ if (IS_92D_D_CUT(rtlpriv->rtlhal.version) ||
+ IS_92D_E_CUT(rtlpriv->rtlhal.version)) {
+ rtlefuse->internal_pa_5g[0] =
+ !((hwinfo[EEPROM_TSSI_A_5G] & BIT(6)) >> 6);
+ rtlefuse->internal_pa_5g[1] =
+ !((hwinfo[EEPROM_TSSI_B_5G] & BIT(6)) >> 6);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Is D cut,Internal PA0 %d Internal PA1 %d\n",
+ rtlefuse->internal_pa_5g[0],
+ rtlefuse->internal_pa_5g[1]);
+ }
+ rtlefuse->eeprom_c9 = hwinfo[EEPROM_RF_OPT6];
+ rtlefuse->eeprom_cc = hwinfo[EEPROM_RF_OPT7];
+ } else {
+ rtlefuse->eeprom_regulatory = 0;
+ rtlefuse->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER;
+ rtlefuse->crystalcap = EEPROM_DEFAULT_CRYSTALCAP;
+ tempval[0] = 3;
+ tempval[1] = tempval[0];
+ }
+
+ /* Use default value to fill parameters if
+ * efuse is not filled on some place.
+ */
+
+ /* ThermalMeter from EEPROM */
+ if (rtlefuse->eeprom_thermalmeter < 0x06 ||
+ rtlefuse->eeprom_thermalmeter > 0x1c)
+ rtlefuse->eeprom_thermalmeter = 0x12;
+ rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
+
+ /* check XTAL_K */
+ if (rtlefuse->crystalcap == 0xFF)
+ rtlefuse->crystalcap = 0;
+ if (rtlefuse->eeprom_regulatory > 3)
+ rtlefuse->eeprom_regulatory = 0;
+
+ for (i = 0; i < 2; i++) {
+ switch (tempval[i]) {
+ case 0:
+ tempval[i] = 5;
+ break;
+ case 1:
+ tempval[i] = 4;
+ break;
+ case 2:
+ tempval[i] = 3;
+ break;
+ case 3:
+ default:
+ tempval[i] = 0;
+ break;
+ }
+ }
+
+ rtlefuse->delta_iqk = tempval[0];
+ if (tempval[1] > 0)
+ rtlefuse->delta_lck = tempval[1] - 1;
+ if (rtlefuse->eeprom_c9 == 0xFF)
+ rtlefuse->eeprom_c9 = 0x00;
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "EEPROMRegulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "ThermalMeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "CrystalCap = 0x%x\n", rtlefuse->crystalcap);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "Delta_IQK = 0x%x Delta_LCK = 0x%x\n",
+ rtlefuse->delta_iqk, rtlefuse->delta_lck);
+
+ for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
+ for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
+ group = rtl92d_get_chnlgroup_fromarray((u8)ch);
+ if (ch < CHANNEL_MAX_NUMBER_2G)
+ rtlefuse->txpwrlevel_cck[rfpath][ch] =
+ pwrinfo.cck_index[rfpath][group];
+ rtlefuse->txpwrlevel_ht40_1s[rfpath][ch] =
+ pwrinfo.ht40_1sindex[rfpath][group];
+ rtlefuse->txpwr_ht20diff[rfpath][ch] =
+ pwrinfo.ht20indexdiff[rfpath][group];
+ rtlefuse->txpwr_legacyhtdiff[rfpath][ch] =
+ pwrinfo.ofdmindexdiff[rfpath][group];
+ rtlefuse->pwrgroup_ht20[rfpath][ch] =
+ pwrinfo.ht20maxoffset[rfpath][group];
+ rtlefuse->pwrgroup_ht40[rfpath][ch] =
+ pwrinfo.ht40maxoffset[rfpath][group];
+ pwr = pwrinfo.ht40_1sindex[rfpath][group];
+ diff = pwrinfo.ht40_2sindexdiff[rfpath][group];
+ rtlefuse->txpwrlevel_ht40_2s[rfpath][ch] =
+ (pwr > diff) ? (pwr - diff) : 0;
+ }
+ }
+}
+
+static void _rtl92de_read_macphymode_from_prom(struct ieee80211_hw *hw,
+ u8 *content)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool is_single_mac = true;
+
+ if (rtlhal->interface == INTF_PCI)
+ is_single_mac = !!(content[EEPROM_MAC_FUNCTION] & BIT(3));
+ else if (rtlhal->interface == INTF_USB)
+ is_single_mac = !(content[EEPROM_ENDPOINT_SETTING] & BIT(0));
+
+ if (is_single_mac) {
+ rtlhal->macphymode = SINGLEMAC_SINGLEPHY;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "MacPhyMode SINGLEMAC_SINGLEPHY\n");
+ } else {
+ rtlhal->macphymode = DUALMAC_DUALPHY;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "MacPhyMode DUALMAC_DUALPHY\n");
+ }
+}
+
+static void _rtl92de_read_macphymode_and_bandtype(struct ieee80211_hw *hw,
+ u8 *content)
+{
+ _rtl92de_read_macphymode_from_prom(hw, content);
+ rtl92d_phy_config_macphymode(hw);
+ rtl92d_phy_config_macphymode_info(hw);
+}
+
+static void _rtl92de_efuse_update_chip_version(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ enum version_8192d chipver = rtlpriv->rtlhal.version;
+ u8 cutvalue[2];
+ u16 chipvalue;
+
+ read_efuse_byte(hw, EEPROME_CHIP_VERSION_H, &cutvalue[1]);
+ read_efuse_byte(hw, EEPROME_CHIP_VERSION_L, &cutvalue[0]);
+ chipvalue = (cutvalue[1] << 8) | cutvalue[0];
+ switch (chipvalue) {
+ case 0xAA55:
+ chipver |= CHIP_92D_C_CUT;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "C-CUT!!!\n");
+ break;
+ case 0x9966:
+ chipver |= CHIP_92D_D_CUT;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "D-CUT!!!\n");
+ break;
+ case 0xCC33:
+ case 0x33CC:
+ chipver |= CHIP_92D_E_CUT;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "E-CUT!!!\n");
+ break;
+ default:
+ chipver |= CHIP_92D_D_CUT;
+ pr_err("Unknown CUT!\n");
+ break;
+ }
+ rtlpriv->rtlhal.version = chipver;
+}
+
+static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
+{
+ static const int params_pci[] = {
+ RTL8190_EEPROM_ID, EEPROM_VID, EEPROM_DID,
+ EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR_MAC0_92D,
+ EEPROM_CHANNEL_PLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID,
+ COUNTRY_CODE_WORLD_WIDE_13
+ };
+ static const int params_usb[] = {
+ RTL8190_EEPROM_ID, EEPROM_VID_USB, EEPROM_PID_USB,
+ EEPROM_VID_USB, EEPROM_PID_USB, EEPROM_MAC_ADDR_MAC0_92DU,
+ EEPROM_CHANNEL_PLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID,
+ COUNTRY_CODE_WORLD_WIDE_13
+ };
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ const int *params = params_pci;
+ u8 *hwinfo;
+
+ if (rtlhal->interface == INTF_USB)
+ params = params_usb;
+
+ hwinfo = kzalloc(HWSET_MAX_SIZE, GFP_KERNEL);
+ if (!hwinfo)
+ return;
+
+ if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params))
+ goto exit;
+
+ _rtl92de_efuse_update_chip_version(hw);
+ _rtl92de_read_macphymode_and_bandtype(hw, hwinfo);
+
+ /* Read Permanent MAC address for 2nd interface */
+ if (rtlhal->interfaceindex != 0)
+ ether_addr_copy(rtlefuse->dev_addr,
+ &hwinfo[EEPROM_MAC_ADDR_MAC1_92D]);
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR,
+ rtlefuse->dev_addr);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
+ _rtl92de_read_txpower_info(hw, rtlefuse->autoload_failflag, hwinfo);
+
+ /* Read Channel Plan */
+ switch (rtlhal->bandset) {
+ case BAND_ON_2_4G:
+ rtlefuse->channel_plan = COUNTRY_CODE_TELEC;
+ break;
+ case BAND_ON_5G:
+ rtlefuse->channel_plan = COUNTRY_CODE_FCC;
+ break;
+ case BAND_ON_BOTH:
+ rtlefuse->channel_plan = COUNTRY_CODE_FCC;
+ break;
+ default:
+ rtlefuse->channel_plan = COUNTRY_CODE_FCC;
+ break;
+ }
+ rtlefuse->txpwr_fromeprom = true;
+exit:
+ kfree(hwinfo);
+}
+
+void rtl92de_read_eeprom_info(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 tmp_u1b;
+
+ rtlhal->version = _rtl92d_read_chip_version(hw);
+ tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
+ rtlefuse->autoload_status = tmp_u1b;
+ if (tmp_u1b & BIT(4)) {
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
+ rtlefuse->epromtype = EEPROM_93C46;
+ } else {
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
+ rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
+ }
+ if (tmp_u1b & BIT(5)) {
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+
+ rtlefuse->autoload_failflag = false;
+ _rtl92de_read_adapter_info(hw);
+ } else {
+ pr_err("Autoload ERR!!\n");
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92de_read_eeprom_info);
+
+static void rtl92de_update_hal_rate_table(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ enum wireless_mode wirelessmode;
+ u8 mimo_ps = IEEE80211_SMPS_OFF;
+ u8 curtxbw_40mhz = mac->bw_40;
+ u8 nmode = mac->ht_enable;
+ u8 curshortgi_40mhz;
+ u8 curshortgi_20mhz;
+ u32 tmp_ratr_value;
+ u8 ratr_index = 0;
+ u16 shortgi_rate;
+ u32 ratr_value;
+
+ curshortgi_40mhz = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
+ curshortgi_20mhz = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
+ wirelessmode = mac->mode;
+
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ ratr_value = sta->deflink.supp_rates[1] << 4;
+ else
+ ratr_value = sta->deflink.supp_rates[0];
+ ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
+ switch (wirelessmode) {
+ case WIRELESS_MODE_A:
+ ratr_value &= 0x00000FF0;
+ break;
+ case WIRELESS_MODE_B:
+ if (ratr_value & 0x0000000c)
+ ratr_value &= 0x0000000d;
+ else
+ ratr_value &= 0x0000000f;
+ break;
+ case WIRELESS_MODE_G:
+ ratr_value &= 0x00000FF5;
+ break;
+ case WIRELESS_MODE_N_24G:
+ case WIRELESS_MODE_N_5G:
+ nmode = 1;
+ if (mimo_ps == IEEE80211_SMPS_STATIC) {
+ ratr_value &= 0x0007F005;
+ } else {
+ u32 ratr_mask;
+
+ if (get_rf_type(rtlphy) == RF_1T2R ||
+ get_rf_type(rtlphy) == RF_1T1R) {
+ ratr_mask = 0x000ff005;
+ } else {
+ ratr_mask = 0x0f0ff005;
+ }
+
+ ratr_value &= ratr_mask;
+ }
+ break;
+ default:
+ if (rtlphy->rf_type == RF_1T2R)
+ ratr_value &= 0x000ff0ff;
+ else
+ ratr_value &= 0x0f0ff0ff;
+
+ break;
+ }
+ ratr_value &= 0x0FFFFFFF;
+ if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) ||
+ (!curtxbw_40mhz && curshortgi_20mhz))) {
+ ratr_value |= 0x10000000;
+ tmp_ratr_value = (ratr_value >> 12);
+ for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
+ if ((1 << shortgi_rate) & tmp_ratr_value)
+ break;
+ }
+ shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
+ (shortgi_rate << 4) | (shortgi_rate);
+ }
+ rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
+ rtl_read_dword(rtlpriv, REG_ARFR0));
+}
+
+static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 rssi_level, bool update_bw)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl92d_rate_mask_h2c rate_mask = {};
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ struct rtl_sta_info *sta_entry = NULL;
+ enum wireless_mode wirelessmode;
+ bool shortgi = false;
+ u8 curshortgi_40mhz;
+ u8 curshortgi_20mhz;
+ u8 curtxbw_40mhz;
+ u32 ratr_bitmap;
+ u8 ratr_index;
+ u8 macid = 0;
+ u8 mimo_ps;
+
+ curtxbw_40mhz = sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40;
+ curshortgi_40mhz = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
+ curshortgi_20mhz = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
+
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ mimo_ps = sta_entry->mimo_ps;
+ wirelessmode = sta_entry->wireless_mode;
+
+ if (mac->opmode == NL80211_IFTYPE_STATION)
+ curtxbw_40mhz = mac->bw_40;
+ else if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC)
+ macid = sta->aid + 1;
+
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ ratr_bitmap = sta->deflink.supp_rates[1] << 4;
+ else
+ ratr_bitmap = sta->deflink.supp_rates[0];
+ ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
+
+ switch (wirelessmode) {
+ case WIRELESS_MODE_B:
+ ratr_index = RATR_INX_WIRELESS_B;
+ if (ratr_bitmap & 0x0000000c)
+ ratr_bitmap &= 0x0000000d;
+ else
+ ratr_bitmap &= 0x0000000f;
+ break;
+ case WIRELESS_MODE_G:
+ ratr_index = RATR_INX_WIRELESS_GB;
+
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x00000f00;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x00000ff0;
+ else
+ ratr_bitmap &= 0x00000ff5;
+ break;
+ case WIRELESS_MODE_A:
+ ratr_index = RATR_INX_WIRELESS_G;
+ ratr_bitmap &= 0x00000ff0;
+ break;
+ case WIRELESS_MODE_N_24G:
+ case WIRELESS_MODE_N_5G:
+ if (wirelessmode == WIRELESS_MODE_N_24G)
+ ratr_index = RATR_INX_WIRELESS_NGB;
+ else
+ ratr_index = RATR_INX_WIRELESS_NG;
+
+ if (mimo_ps == IEEE80211_SMPS_STATIC) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x00070000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0007f000;
+ else
+ ratr_bitmap &= 0x0007f005;
+ } else {
+ if (rtlphy->rf_type == RF_1T2R ||
+ rtlphy->rf_type == RF_1T1R) {
+ if (curtxbw_40mhz) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x000f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x000ff000;
+ else
+ ratr_bitmap &= 0x000ff015;
+ } else {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x000f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x000ff000;
+ else
+ ratr_bitmap &= 0x000ff005;
+ }
+ } else {
+ if (curtxbw_40mhz) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x0f0f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0f0ff000;
+ else
+ ratr_bitmap &= 0x0f0ff015;
+ } else {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x0f0f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0f0ff000;
+ else
+ ratr_bitmap &= 0x0f0ff005;
+ }
+ }
+ }
+
+ if ((curtxbw_40mhz && curshortgi_40mhz) ||
+ (!curtxbw_40mhz && curshortgi_20mhz)) {
+ if (macid == 0)
+ shortgi = true;
+ else if (macid == 1)
+ shortgi = false;
+ }
+ break;
+ default:
+ ratr_index = RATR_INX_WIRELESS_NGB;
+
+ if (rtlphy->rf_type == RF_1T2R)
+ ratr_bitmap &= 0x000ff0ff;
+ else
+ ratr_bitmap &= 0x0f0ff0ff;
+ break;
+ }
+
+ le32p_replace_bits(&rate_mask.rate_mask_and_raid, ratr_bitmap, RATE_MASK_MASK);
+ le32p_replace_bits(&rate_mask.rate_mask_and_raid, ratr_index, RAID_MASK);
+ u8p_replace_bits(&rate_mask.macid_and_short_gi, macid, MACID_MASK);
+ u8p_replace_bits(&rate_mask.macid_and_short_gi, shortgi, SHORT_GI_MASK);
+ u8p_replace_bits(&rate_mask.macid_and_short_gi, 1, BIT(7));
+
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "Rate_index:%x, ratr_val:%x, %5phC\n",
+ ratr_index, ratr_bitmap, &rate_mask);
+
+ if (rtlhal->interface == INTF_PCI) {
+ rtl92d_fill_h2c_cmd(hw, H2C_RA_MASK, sizeof(rate_mask),
+ (u8 *)&rate_mask);
+ } else {
+ /* rtl92d_fill_h2c_cmd() does USB I/O and will result in a
+ * "scheduled while atomic" if called directly
+ */
+ memcpy(rtlpriv->rate_mask, &rate_mask,
+ sizeof(rtlpriv->rate_mask));
+ schedule_work(&rtlpriv->works.fill_h2c_cmd);
+ }
+
+ if (macid != 0)
+ sta_entry->ratr_index = ratr_index;
+}
+
+void rtl92de_update_hal_rate_tbl(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 rssi_level, bool update_bw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->dm.useramask)
+ rtl92de_update_hal_rate_mask(hw, sta, rssi_level, update_bw);
+ else
+ rtl92de_update_hal_rate_table(hw, sta);
+}
+EXPORT_SYMBOL_GPL(rtl92de_update_hal_rate_tbl);
+
+void rtl92de_update_channel_access_setting(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 sifs_timer;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
+ &mac->slot_time);
+ if (!mac->ht_enable)
+ sifs_timer = 0x0a0a;
+ else
+ sifs_timer = 0x1010;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
+}
+EXPORT_SYMBOL_GPL(rtl92de_update_channel_access_setting);
+
+bool rtl92de_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ enum rf_pwrstate e_rfpowerstate_toset;
+ u8 u1tmp;
+ bool actuallyset = false;
+ unsigned long flag;
+
+ if (rtlpriv->rtlhal.interface == INTF_PCI &&
+ rtlpci->being_init_adapter)
+ return false;
+ if (ppsc->swrf_processing)
+ return false;
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+ if (ppsc->rfchange_inprogress) {
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+ return false;
+ }
+
+ ppsc->rfchange_inprogress = true;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+
+ rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG,
+ rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG) & ~(BIT(3)));
+ u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL);
+ e_rfpowerstate_toset = (u1tmp & BIT(3)) ? ERFON : ERFOFF;
+ if (ppsc->hwradiooff && e_rfpowerstate_toset == ERFON) {
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio ON, RF ON\n");
+ e_rfpowerstate_toset = ERFON;
+ ppsc->hwradiooff = false;
+ actuallyset = true;
+ } else if (!ppsc->hwradiooff && e_rfpowerstate_toset == ERFOFF) {
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio OFF, RF OFF\n");
+ e_rfpowerstate_toset = ERFOFF;
+ ppsc->hwradiooff = true;
+ actuallyset = true;
+ }
+ if (actuallyset) {
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+ } else {
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC)
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+ }
+ *valid = 1;
+ return !ppsc->hwradiooff;
+}
+EXPORT_SYMBOL_GPL(rtl92de_gpio_radio_on_off_checking);
+
+void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
+ u8 *p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all)
+{
+ static const u8 cam_const_addr[4][6] = {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
+ };
+ static const u8 cam_const_broad[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ const u8 *macaddr = p_macaddr;
+ bool is_pairwise = false;
+ u32 entry_id;
+
+ if (clear_all) {
+ u8 idx;
+ u8 cam_offset = 0;
+ u8 clear_number = 5;
+
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
+ for (idx = 0; idx < clear_number; idx++) {
+ rtl_cam_mark_invalid(hw, cam_offset + idx);
+ rtl_cam_empty_entry(hw, cam_offset + idx);
+
+ if (idx < 5) {
+ memset(rtlpriv->sec.key_buf[idx], 0,
+ MAX_KEY_LEN);
+ rtlpriv->sec.key_len[idx] = 0;
+ }
+ }
+
+ return;
+ }
+
+ switch (enc_algo) {
+ case WEP40_ENCRYPTION:
+ enc_algo = CAM_WEP40;
+ break;
+ case WEP104_ENCRYPTION:
+ enc_algo = CAM_WEP104;
+ break;
+ case TKIP_ENCRYPTION:
+ enc_algo = CAM_TKIP;
+ break;
+ case AESCCMP_ENCRYPTION:
+ enc_algo = CAM_AES;
+ break;
+ default:
+ pr_err("switch case %#x not processed\n",
+ enc_algo);
+ enc_algo = CAM_TKIP;
+ break;
+ }
+ if (is_wepkey || rtlpriv->sec.use_defaultkey) {
+ macaddr = cam_const_addr[key_index];
+ entry_id = key_index;
+ } else {
+ if (is_group) {
+ macaddr = cam_const_broad;
+ entry_id = key_index;
+ } else {
+ if (mac->opmode == NL80211_IFTYPE_AP) {
+ entry_id = rtl_cam_get_free_entry(hw, p_macaddr);
+ if (entry_id >= TOTAL_CAM_ENTRY) {
+ pr_err("Can not find free hw security cam entry\n");
+ return;
+ }
+ } else {
+ entry_id = CAM_PAIRWISE_KEY_POSITION;
+ }
+ key_index = PAIRWISE_KEYIDX;
+ is_pairwise = true;
+ }
+ }
+ if (rtlpriv->sec.key_len[key_index] == 0) {
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "delete one entry, entry_id is %d\n",
+ entry_id);
+ if (mac->opmode == NL80211_IFTYPE_AP)
+ rtl_cam_del_entry(hw, p_macaddr);
+ rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
+ } else {
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The insert KEY length is %d\n",
+ rtlpriv->sec.key_len[PAIRWISE_KEYIDX]);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The insert KEY is %x %x\n",
+ rtlpriv->sec.key_buf[0][0],
+ rtlpriv->sec.key_buf[0][1]);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "add one entry\n");
+ if (is_pairwise) {
+ RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
+ "Pairwise Key content",
+ rtlpriv->sec.pairwise_key,
+ rtlpriv->sec.key_len[PAIRWISE_KEYIDX]);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set Pairwise key\n");
+ rtl_cam_add_one_entry(hw, macaddr, key_index,
+ entry_id, enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf[key_index]);
+ } else {
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
+ if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+ rtl_cam_add_one_entry(hw,
+ rtlefuse->dev_addr,
+ PAIRWISE_KEYIDX,
+ CAM_PAIRWISE_KEY_POSITION,
+ enc_algo, CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf[entry_id]);
+ }
+ rtl_cam_add_one_entry(hw, macaddr, key_index,
+ entry_id, enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf
+ [entry_id]);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92de_set_key);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.h
new file mode 100644
index 000000000000..2c07f5cc5766
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#ifndef __RTL92D_HW_COMMON_H__
+#define __RTL92D_HW_COMMON_H__
+
+void rtl92de_stop_tx_beacon(struct ieee80211_hw *hw);
+void rtl92de_resume_tx_beacon(struct ieee80211_hw *hw);
+void rtl92d_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl92d_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+bool rtl92de_llt_write(struct ieee80211_hw *hw, u32 address, u32 data);
+void rtl92de_enable_hw_security_config(struct ieee80211_hw *hw);
+void rtl92de_set_qos(struct ieee80211_hw *hw, int aci);
+void rtl92de_read_eeprom_info(struct ieee80211_hw *hw);
+void rtl92de_update_hal_rate_tbl(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 rssi_level, bool update_bw);
+void rtl92de_update_channel_access_setting(struct ieee80211_hw *hw);
+bool rtl92de_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
+void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
+ u8 *p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/main.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/main.c
new file mode 100644
index 000000000000..e58dc4000c19
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/main.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include <linux/module.h>
+
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 8192D 802.11n common routines");
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.c
new file mode 100644
index 000000000000..228c84ab5b90
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.c
@@ -0,0 +1,856 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "../core.h"
+#include "def.h"
+#include "reg.h"
+#include "dm_common.h"
+#include "phy_common.h"
+#include "rf_common.h"
+
+static const u8 channel_all[59] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58,
+ 60, 62, 64, 100, 102, 104, 106, 108, 110, 112,
+ 114, 116, 118, 120, 122, 124, 126, 128, 130,
+ 132, 134, 136, 138, 140, 149, 151, 153, 155,
+ 157, 159, 161, 163, 165
+};
+
+static u32 _rtl92d_phy_rf_serial_read(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+ u32 newoffset;
+ u32 tmplong, tmplong2;
+ u8 rfpi_enable = 0;
+ u32 retvalue;
+
+ newoffset = offset;
+ tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
+ if (rfpath == RF90_PATH_A)
+ tmplong2 = tmplong;
+ else
+ tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
+ tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
+ (newoffset << 23) | BLSSIREADEDGE;
+ rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
+ tmplong & (~BLSSIREADEDGE));
+ udelay(10);
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
+ udelay(100);
+ rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
+ tmplong | BLSSIREADEDGE);
+ udelay(10);
+ if (rfpath == RF90_PATH_A)
+ rfpi_enable = (u8)rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
+ BIT(8));
+ else if (rfpath == RF90_PATH_B)
+ rfpi_enable = (u8)rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
+ BIT(8));
+ if (rfpi_enable)
+ retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi,
+ BLSSIREADBACKDATA);
+ else
+ retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
+ BLSSIREADBACKDATA);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x] = 0x%x\n",
+ rfpath, pphyreg->rf_rb, retvalue);
+ return retvalue;
+}
+
+static void _rtl92d_phy_rf_serial_write(struct ieee80211_hw *hw,
+ enum radio_path rfpath,
+ u32 offset, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+ u32 data_and_addr;
+ u32 newoffset;
+
+ newoffset = offset;
+ /* T65 RF */
+ data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
+ rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf3wire_offset, data_and_addr);
+}
+
+u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 original_value, readback_value, bitshift;
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask);
+ rtl92d_pci_lock(rtlpriv);
+ original_value = _rtl92d_phy_rf_serial_read(hw, rfpath, regaddr);
+ bitshift = calculate_bit_shift(bitmask);
+ readback_value = (original_value & bitmask) >> bitshift;
+ rtl92d_pci_unlock(rtlpriv);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
+ return readback_value;
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_query_rf_reg);
+
+void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u32 original_value, bitshift;
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
+ if (bitmask == 0)
+ return;
+ rtl92d_pci_lock(rtlpriv);
+ if (rtlphy->rf_mode != RF_OP_BY_FW) {
+ if (bitmask != RFREG_OFFSET_MASK) {
+ original_value = _rtl92d_phy_rf_serial_read(hw,
+ rfpath,
+ regaddr);
+ bitshift = calculate_bit_shift(bitmask);
+ data = ((original_value & (~bitmask)) |
+ (data << bitshift));
+ }
+ _rtl92d_phy_rf_serial_write(hw, rfpath, regaddr, data);
+ }
+ rtl92d_pci_unlock(rtlpriv);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_set_rf_reg);
+
+void rtl92d_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+
+ /* RF Interface Sowrtware Control */
+ /* 16 LSBs if read 32-bit from 0x870 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+ /* 16 MSBs if read 32-bit from 0x870 (16-bit for 0x872) */
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+ /* 16 LSBs if read 32-bit from 0x874 */
+ rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
+ /* 16 MSBs if read 32-bit from 0x874 (16-bit for 0x876) */
+
+ rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
+ /* RF Interface Readback Value */
+ /* 16 LSBs if read 32-bit from 0x8E0 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
+ /* 16 MSBs if read 32-bit from 0x8E0 (16-bit for 0x8E2) */
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
+ /* 16 LSBs if read 32-bit from 0x8E4 */
+ rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
+ /* 16 MSBs if read 32-bit from 0x8E4 (16-bit for 0x8E6) */
+ rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
+
+ /* RF Interface Output (and Enable) */
+ /* 16 LSBs if read 32-bit from 0x860 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
+ /* 16 LSBs if read 32-bit from 0x864 */
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
+
+ /* RF Interface (Output and) Enable */
+ /* 16 MSBs if read 32-bit from 0x860 (16-bit for 0x862) */
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
+ /* 16 MSBs if read 32-bit from 0x864 (16-bit for 0x866) */
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
+
+ /* Addr of LSSI. Write RF register by driver */
+ /* LSSI Parameter */
+ rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
+ RFPGA0_XA_LSSIPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
+ RFPGA0_XB_LSSIPARAMETER;
+
+ /* RF parameter */
+ /* BB Band Select */
+ rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = RFPGA0_XAB_RFPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = RFPGA0_XAB_RFPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = RFPGA0_XCD_RFPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = RFPGA0_XCD_RFPARAMETER;
+
+ /* Tx AGC Gain Stage (same for all path. Should we remove this?) */
+ /* Tx gain stage */
+ rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+ /* Tx gain stage */
+ rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+ /* Tx gain stage */
+ rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+ /* Tx gain stage */
+ rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+
+ /* Transceiver A~D HSSI Parameter-1 */
+ /* wire control parameter1 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
+ /* wire control parameter1 */
+ rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
+
+ /* Transceiver A~D HSSI Parameter-2 */
+ /* wire control parameter2 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
+ /* wire control parameter2 */
+ rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
+
+ /* RF switch Control */
+ /* TR/Ant switch control */
+ rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
+
+ /* AGC control 1 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
+ rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
+ rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
+ rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
+
+ /* AGC control 2 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
+ rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
+ rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
+ rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
+
+ /* RX AFE control 1 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE;
+
+ /*RX AFE control 1 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
+ rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
+ rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
+
+ /* Tx AFE control 1 */
+ rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE;
+
+ /* Tx AFE control 2 */
+ rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
+ rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
+ rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
+ rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
+
+ /* Transceiver LSSI Readback SI mode */
+ rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK;
+
+ /* Transceiver LSSI Readback PI mode */
+ rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVERA_HSPI_READBACK;
+ rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVERB_HSPI_READBACK;
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_init_bb_rf_register_definition);
+
+void rtl92d_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ int index;
+
+ if (regaddr == RTXAGC_A_RATE18_06)
+ index = 0;
+ else if (regaddr == RTXAGC_A_RATE54_24)
+ index = 1;
+ else if (regaddr == RTXAGC_A_CCK1_MCS32)
+ index = 6;
+ else if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00)
+ index = 7;
+ else if (regaddr == RTXAGC_A_MCS03_MCS00)
+ index = 2;
+ else if (regaddr == RTXAGC_A_MCS07_MCS04)
+ index = 3;
+ else if (regaddr == RTXAGC_A_MCS11_MCS08)
+ index = 4;
+ else if (regaddr == RTXAGC_A_MCS15_MCS12)
+ index = 5;
+ else if (regaddr == RTXAGC_B_RATE18_06)
+ index = 8;
+ else if (regaddr == RTXAGC_B_RATE54_24)
+ index = 9;
+ else if (regaddr == RTXAGC_B_CCK1_55_MCS32)
+ index = 14;
+ else if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff)
+ index = 15;
+ else if (regaddr == RTXAGC_B_MCS03_MCS00)
+ index = 10;
+ else if (regaddr == RTXAGC_B_MCS07_MCS04)
+ index = 11;
+ else if (regaddr == RTXAGC_B_MCS11_MCS08)
+ index = 12;
+ else if (regaddr == RTXAGC_B_MCS15_MCS12)
+ index = 13;
+ else
+ return;
+
+ rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index] = data;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][%d] = 0x%x\n",
+ rtlphy->pwrgroup_cnt, index,
+ rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index]);
+ if (index == 13)
+ rtlphy->pwrgroup_cnt++;
+}
+EXPORT_SYMBOL_GPL(rtl92d_store_pwrindex_diffrate_offset);
+
+void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+
+ rtlphy->default_initialgain[0] =
+ rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
+ rtlphy->default_initialgain[1] =
+ rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
+ rtlphy->default_initialgain[2] =
+ rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
+ rtlphy->default_initialgain[3] =
+ rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]);
+ rtlphy->framesync = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3, MASKBYTE0);
+ rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2, MASKDWORD);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync);
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_get_hw_reg_originalvalue);
+
+static void _rtl92d_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
+ u8 *cckpowerlevel, u8 *ofdmpowerlevel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 index = channel - 1;
+
+ /* 1. CCK */
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ /* RF-A */
+ cckpowerlevel[RF90_PATH_A] =
+ rtlefuse->txpwrlevel_cck[RF90_PATH_A][index];
+ /* RF-B */
+ cckpowerlevel[RF90_PATH_B] =
+ rtlefuse->txpwrlevel_cck[RF90_PATH_B][index];
+ } else {
+ cckpowerlevel[RF90_PATH_A] = 0;
+ cckpowerlevel[RF90_PATH_B] = 0;
+ }
+ /* 2. OFDM for 1S or 2S */
+ if (rtlphy->rf_type == RF_1T2R || rtlphy->rf_type == RF_1T1R) {
+ /* Read HT 40 OFDM TX power */
+ ofdmpowerlevel[RF90_PATH_A] =
+ rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index];
+ ofdmpowerlevel[RF90_PATH_B] =
+ rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index];
+ } else if (rtlphy->rf_type == RF_2T2R) {
+ /* Read HT 40 OFDM TX power */
+ ofdmpowerlevel[RF90_PATH_A] =
+ rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_A][index];
+ ofdmpowerlevel[RF90_PATH_B] =
+ rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_B][index];
+ }
+}
+
+static void _rtl92d_ccxpower_index_check(struct ieee80211_hw *hw,
+ u8 channel, u8 *cckpowerlevel,
+ u8 *ofdmpowerlevel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+
+ rtlphy->cur_cck_txpwridx = cckpowerlevel[0];
+ rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0];
+}
+
+static u8 _rtl92c_phy_get_rightchnlplace(u8 chnl)
+{
+ u8 place = chnl;
+
+ if (chnl > 14) {
+ for (place = 14; place < ARRAY_SIZE(channel_all); place++) {
+ if (channel_all[place] == chnl) {
+ place++;
+ break;
+ }
+ }
+ }
+ return place;
+}
+
+void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 cckpowerlevel[2], ofdmpowerlevel[2];
+
+ if (!rtlefuse->txpwr_fromeprom)
+ return;
+ channel = _rtl92c_phy_get_rightchnlplace(channel);
+ _rtl92d_get_txpower_index(hw, channel, &cckpowerlevel[0],
+ &ofdmpowerlevel[0]);
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)
+ _rtl92d_ccxpower_index_check(hw, channel, &cckpowerlevel[0],
+ &ofdmpowerlevel[0]);
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)
+ rtl92d_phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]);
+ rtl92d_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel);
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_set_txpower_level);
+
+void rtl92d_phy_enable_rf_env(struct ieee80211_hw *hw, u8 rfpath,
+ u32 *pu4_regval)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "====>\n");
+ /*----Store original RFENV control type----*/
+ switch (rfpath) {
+ case RF90_PATH_A:
+ case RF90_PATH_C:
+ *pu4_regval = rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV);
+ break;
+ case RF90_PATH_B:
+ case RF90_PATH_D:
+ *pu4_regval =
+ rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16);
+ break;
+ }
+ /*----Set RF_ENV enable----*/
+ rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
+ udelay(1);
+ /*----Set RF_ENV output high----*/
+ rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
+ udelay(1);
+ /* Set bit number of Address and Data for RF register */
+ /* Set 1 to 4 bits for 8255 */
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREADDRESSLENGTH, 0x0);
+ udelay(1);
+ /*Set 0 to 12 bits for 8255 */
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
+ udelay(1);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<====\n");
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_enable_rf_env);
+
+void rtl92d_phy_restore_rf_env(struct ieee80211_hw *hw, u8 rfpath,
+ u32 *pu4_regval)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "=====>\n");
+ /*----Restore RFENV control type----*/
+ switch (rfpath) {
+ case RF90_PATH_A:
+ case RF90_PATH_C:
+ rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV, *pu4_regval);
+ break;
+ case RF90_PATH_B:
+ case RF90_PATH_D:
+ rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16,
+ *pu4_regval);
+ break;
+ }
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<=====\n");
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_restore_rf_env);
+
+u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl)
+{
+ u8 place;
+
+ if (chnl > 14) {
+ for (place = 14; place < ARRAY_SIZE(channel_all); place++) {
+ if (channel_all[place] == chnl)
+ return place - 13;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl92d_get_rightchnlplace_for_iqk);
+
+void rtl92d_phy_save_adda_registers(struct ieee80211_hw *hw, const u32 *adda_reg,
+ u32 *adda_backup, u32 regnum)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Save ADDA parameters.\n");
+ for (i = 0; i < regnum; i++)
+ adda_backup[i] = rtl_get_bbreg(hw, adda_reg[i], MASKDWORD);
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_save_adda_registers);
+
+void rtl92d_phy_save_mac_registers(struct ieee80211_hw *hw,
+ const u32 *macreg, u32 *macbackup)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Save MAC parameters.\n");
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+ macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
+ macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_save_mac_registers);
+
+void rtl92d_phy_path_adda_on(struct ieee80211_hw *hw,
+ const u32 *adda_reg, bool patha_on, bool is2t)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 pathon;
+ u32 i;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "ADDA ON.\n");
+ pathon = patha_on ? 0x04db25a4 : 0x0b1b25a4;
+ if (patha_on)
+ pathon = rtlpriv->rtlhal.interfaceindex == 0 ?
+ 0x04db25a4 : 0x0b1b25a4;
+ for (i = 0; i < IQK_ADDA_REG_NUM; i++)
+ rtl_set_bbreg(hw, adda_reg[i], MASKDWORD, pathon);
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_path_adda_on);
+
+void rtl92d_phy_mac_setting_calibration(struct ieee80211_hw *hw,
+ const u32 *macreg, u32 *macbackup)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "MAC settings for Calibration.\n");
+ rtl_write_byte(rtlpriv, macreg[0], 0x3F);
+
+ for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
+ rtl_write_byte(rtlpriv, macreg[i], (u8)(macbackup[i] &
+ (~BIT(3))));
+ rtl_write_byte(rtlpriv, macreg[i], (u8)(macbackup[i] & (~BIT(5))));
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_mac_setting_calibration);
+
+static u32 _rtl92d_phy_get_abs(u32 val1, u32 val2)
+{
+ u32 ret;
+
+ if (val1 >= val2)
+ ret = val1 - val2;
+ else
+ ret = val2 - val1;
+ return ret;
+}
+
+static bool _rtl92d_is_legal_5g_channel(struct ieee80211_hw *hw, u8 channel)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(channel5g); i++)
+ if (channel == channel5g[i])
+ return true;
+ return false;
+}
+
+void rtl92d_phy_calc_curvindex(struct ieee80211_hw *hw,
+ const u32 *targetchnl, u32 *curvecount_val,
+ bool is5g, u32 *curveindex)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 smallest_abs_val = 0xffffffff, u4tmp;
+ u8 i, j;
+ u8 chnl_num = is5g ? TARGET_CHNL_NUM_5G : TARGET_CHNL_NUM_2G;
+
+ for (i = 0; i < chnl_num; i++) {
+ if (is5g && !_rtl92d_is_legal_5g_channel(hw, i + 1))
+ continue;
+ curveindex[i] = 0;
+ for (j = 0; j < (CV_CURVE_CNT * 2); j++) {
+ u4tmp = _rtl92d_phy_get_abs(targetchnl[i],
+ curvecount_val[j]);
+
+ if (u4tmp < smallest_abs_val) {
+ curveindex[i] = j;
+ smallest_abs_val = u4tmp;
+ }
+ }
+ smallest_abs_val = 0xffffffff;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "curveindex[%d] = %x\n",
+ i, curveindex[i]);
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_calc_curvindex);
+
+void rtl92d_phy_reset_iqk_result(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u8 i;
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "settings regs %zu default regs %d\n",
+ ARRAY_SIZE(rtlphy->iqk_matrix),
+ IQK_MATRIX_REG_NUM);
+ /* 0xe94, 0xe9c, 0xea4, 0xeac, 0xeb4, 0xebc, 0xec4, 0xecc */
+ for (i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) {
+ rtlphy->iqk_matrix[i].value[0][0] = 0x100;
+ rtlphy->iqk_matrix[i].value[0][2] = 0x100;
+ rtlphy->iqk_matrix[i].value[0][4] = 0x100;
+ rtlphy->iqk_matrix[i].value[0][6] = 0x100;
+ rtlphy->iqk_matrix[i].value[0][1] = 0x0;
+ rtlphy->iqk_matrix[i].value[0][3] = 0x0;
+ rtlphy->iqk_matrix[i].value[0][5] = 0x0;
+ rtlphy->iqk_matrix[i].value[0][7] = 0x0;
+ rtlphy->iqk_matrix[i].iqk_done = false;
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_reset_iqk_result);
+
+static void rtl92d_phy_set_io(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct dig_t *de_digtable = &rtlpriv->dm_digtable;
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "--->Cmd(%#x), set_io_inprogress(%d)\n",
+ rtlphy->current_io_type, rtlphy->set_io_inprogress);
+
+ switch (rtlphy->current_io_type) {
+ case IO_CMD_RESUME_DM_BY_SCAN:
+ de_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1;
+ rtl92d_dm_write_dig(hw);
+ rtl92d_phy_set_txpower_level(hw, rtlphy->current_channel);
+ break;
+ case IO_CMD_PAUSE_DM_BY_SCAN:
+ rtlphy->initgain_backup.xaagccore1 = de_digtable->cur_igvalue;
+ de_digtable->cur_igvalue = 0x37;
+ if (rtlpriv->rtlhal.interface == INTF_USB)
+ de_digtable->cur_igvalue = 0x17;
+ rtl92d_dm_write_dig(hw);
+ break;
+ default:
+ pr_err("switch case %#x not processed\n",
+ rtlphy->current_io_type);
+ break;
+ }
+
+ rtlphy->set_io_inprogress = false;
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "<---(%#x)\n",
+ rtlphy->current_io_type);
+}
+
+bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ bool postprocessing = false;
+
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+ iotype, rtlphy->set_io_inprogress);
+
+ do {
+ switch (iotype) {
+ case IO_CMD_RESUME_DM_BY_SCAN:
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Resume DM after scan\n");
+ postprocessing = true;
+ break;
+ case IO_CMD_PAUSE_DM_BY_SCAN:
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Pause DM before scan\n");
+ postprocessing = true;
+ break;
+ default:
+ pr_err("switch case %#x not processed\n",
+ iotype);
+ break;
+ }
+ } while (false);
+
+ if (postprocessing && !rtlphy->set_io_inprogress) {
+ rtlphy->set_io_inprogress = true;
+ rtlphy->current_io_type = iotype;
+ } else {
+ return false;
+ }
+
+ rtl92d_phy_set_io(hw);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "<--IO Type(%#x)\n", iotype);
+ return true;
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_set_io_cmd);
+
+void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 offset = REG_MAC_PHY_CTRL_NORMAL;
+ u8 phy_ctrl = 0xf0;
+
+ if (rtlhal->interface == INTF_USB) {
+ phy_ctrl = rtl_read_byte(rtlpriv, offset);
+ phy_ctrl &= ~(BIT(0) | BIT(1) | BIT(2));
+ }
+
+ switch (rtlhal->macphymode) {
+ case DUALMAC_DUALPHY:
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "MacPhyMode: DUALMAC_DUALPHY\n");
+ rtl_write_byte(rtlpriv, offset, phy_ctrl | BIT(0) | BIT(1));
+ break;
+ case SINGLEMAC_SINGLEPHY:
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "MacPhyMode: SINGLEMAC_SINGLEPHY\n");
+ rtl_write_byte(rtlpriv, offset, phy_ctrl | BIT(2));
+ break;
+ case DUALMAC_SINGLEPHY:
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "MacPhyMode: DUALMAC_SINGLEPHY\n");
+ rtl_write_byte(rtlpriv, offset, phy_ctrl | BIT(0));
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_config_macphymode);
+
+void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+
+ switch (rtlhal->macphymode) {
+ case DUALMAC_SINGLEPHY:
+ rtlphy->rf_type = RF_2T2R;
+ rtlhal->version |= RF_TYPE_2T2R;
+ rtlhal->bandset = BAND_ON_BOTH;
+ rtlhal->current_bandtype = BAND_ON_2_4G;
+ break;
+
+ case SINGLEMAC_SINGLEPHY:
+ rtlphy->rf_type = RF_2T2R;
+ rtlhal->version |= RF_TYPE_2T2R;
+ rtlhal->bandset = BAND_ON_BOTH;
+ rtlhal->current_bandtype = BAND_ON_2_4G;
+ break;
+
+ case DUALMAC_DUALPHY:
+ rtlphy->rf_type = RF_1T1R;
+ rtlhal->version &= RF_TYPE_1T1R;
+ /* Now we let MAC0 run on 5G band. */
+ if (rtlhal->interfaceindex == 0) {
+ rtlhal->bandset = BAND_ON_5G;
+ rtlhal->current_bandtype = BAND_ON_5G;
+ } else {
+ rtlhal->bandset = BAND_ON_2_4G;
+ rtlhal->current_bandtype = BAND_ON_2_4G;
+ }
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_config_macphymode_info);
+
+u8 rtl92d_get_chnlgroup_fromarray(u8 chnl)
+{
+ u8 group;
+
+ if (channel_all[chnl] <= 3)
+ group = 0;
+ else if (channel_all[chnl] <= 9)
+ group = 1;
+ else if (channel_all[chnl] <= 14)
+ group = 2;
+ else if (channel_all[chnl] <= 44)
+ group = 3;
+ else if (channel_all[chnl] <= 54)
+ group = 4;
+ else if (channel_all[chnl] <= 64)
+ group = 5;
+ else if (channel_all[chnl] <= 112)
+ group = 6;
+ else if (channel_all[chnl] <= 126)
+ group = 7;
+ else if (channel_all[chnl] <= 140)
+ group = 8;
+ else if (channel_all[chnl] <= 153)
+ group = 9;
+ else if (channel_all[chnl] <= 159)
+ group = 10;
+ else
+ group = 11;
+ return group;
+}
+EXPORT_SYMBOL_GPL(rtl92d_get_chnlgroup_fromarray);
+
+u8 rtl92d_phy_get_chnlgroup_bypg(u8 chnlindex)
+{
+ u8 group;
+
+ if (channel_all[chnlindex] <= 3) /* Chanel 1-3 */
+ group = 0;
+ else if (channel_all[chnlindex] <= 9) /* Channel 4-9 */
+ group = 1;
+ else if (channel_all[chnlindex] <= 14) /* Channel 10-14 */
+ group = 2;
+ else if (channel_all[chnlindex] <= 64)
+ group = 6;
+ else if (channel_all[chnlindex] <= 140)
+ group = 7;
+ else
+ group = 8;
+ return group;
+}
+
+void rtl92d_phy_config_maccoexist_rfpage(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ switch (rtlpriv->rtlhal.macphymode) {
+ case DUALMAC_DUALPHY:
+ rtl_write_byte(rtlpriv, REG_DMC, 0x0);
+ rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x08);
+ rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, 0x13ff);
+ break;
+ case DUALMAC_SINGLEPHY:
+ rtl_write_byte(rtlpriv, REG_DMC, 0xf8);
+ rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x08);
+ rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, 0x13ff);
+ break;
+ case SINGLEMAC_SINGLEPHY:
+ rtl_write_byte(rtlpriv, REG_DMC, 0x0);
+ rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x10);
+ rtl_write_word(rtlpriv, (REG_TRXFF_BNDY + 2), 0x27FF);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_config_maccoexist_rfpage);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.h
new file mode 100644
index 000000000000..0f794557af47
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#ifndef __RTL92D_PHY_COMMON_H__
+#define __RTL92D_PHY_COMMON_H__
+
+#define TARGET_CHNL_NUM_5G 221
+#define TARGET_CHNL_NUM_2G 14
+#define CV_CURVE_CNT 64
+#define RT_CANNOT_IO(hw) false
+#define RX_INDEX_MAPPING_NUM 15
+#define IQK_BB_REG_NUM 10
+
+#define IQK_DELAY_TIME 1
+#define MAX_TOLERANCE 5
+#define MAX_TOLERANCE_92D 3
+
+enum baseband_config_type {
+ BASEBAND_CONFIG_PHY_REG = 0,
+ BASEBAND_CONFIG_AGC_TAB = 1,
+};
+
+enum rf_content {
+ radioa_txt = 0,
+ radiob_txt = 1,
+ radioc_txt = 2,
+ radiod_txt = 3
+};
+
+static inline void rtl92d_acquire_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
+ unsigned long *flag)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->rtlhal.interface == INTF_USB)
+ return;
+
+ if (rtlpriv->rtlhal.interfaceindex == 1)
+ spin_lock_irqsave(&rtlpriv->locks.cck_and_rw_pagea_lock, *flag);
+}
+
+static inline void rtl92d_release_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
+ unsigned long *flag)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->rtlhal.interface == INTF_USB)
+ return;
+
+ if (rtlpriv->rtlhal.interfaceindex == 1)
+ spin_unlock_irqrestore(&rtlpriv->locks.cck_and_rw_pagea_lock,
+ *flag);
+}
+
+u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask);
+void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask, u32 data);
+void rtl92d_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
+void rtl92d_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data);
+void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+void rtl92d_phy_enable_rf_env(struct ieee80211_hw *hw, u8 rfpath,
+ u32 *pu4_regval);
+void rtl92d_phy_restore_rf_env(struct ieee80211_hw *hw, u8 rfpath,
+ u32 *pu4_regval);
+u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl);
+void rtl92d_phy_save_adda_registers(struct ieee80211_hw *hw, const u32 *adda_reg,
+ u32 *adda_backup, u32 regnum);
+void rtl92d_phy_save_mac_registers(struct ieee80211_hw *hw,
+ const u32 *macreg, u32 *macbackup);
+void rtl92d_phy_path_adda_on(struct ieee80211_hw *hw,
+ const u32 *adda_reg, bool patha_on, bool is2t);
+void rtl92d_phy_mac_setting_calibration(struct ieee80211_hw *hw,
+ const u32 *macreg, u32 *macbackup);
+void rtl92d_phy_calc_curvindex(struct ieee80211_hw *hw,
+ const u32 *targetchnl, u32 *curvecount_val,
+ bool is5g, u32 *curveindex);
+void rtl92d_phy_reset_iqk_result(struct ieee80211_hw *hw);
+bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
+void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw);
+void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw);
+u8 rtl92d_get_chnlgroup_fromarray(u8 chnl);
+u8 rtl92d_phy_get_chnlgroup_bypg(u8 chnlindex);
+void rtl92d_phy_config_maccoexist_rfpage(struct ieee80211_hw *hw);
+/* Without these declarations sparse warns about context imbalance. */
+void rtl92d_acquire_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
+ unsigned long *flag);
+void rtl92d_release_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
+ unsigned long *flag);
+
+/* Without these helpers and the declarations sparse warns about
+ * context imbalance.
+ */
+static inline void rtl92d_pci_lock(struct rtl_priv *rtlpriv)
+{
+ if (rtlpriv->rtlhal.interface == INTF_PCI)
+ spin_lock(&rtlpriv->locks.rf_lock);
+}
+
+static inline void rtl92d_pci_unlock(struct rtl_priv *rtlpriv)
+{
+ if (rtlpriv->rtlhal.interface == INTF_PCI)
+ spin_unlock(&rtlpriv->locks.rf_lock);
+}
+
+void rtl92d_pci_lock(struct rtl_priv *rtlpriv);
+void rtl92d_pci_unlock(struct rtl_priv *rtlpriv);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/reg.h
index 2783d7e7b227..b5b906b799cb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/reg.h
@@ -50,6 +50,9 @@
#define REG_HMEBOX_EXT_1 0x008A
#define REG_HMEBOX_EXT_2 0x008C
#define REG_HMEBOX_EXT_3 0x008E
+#define SIZE_OF_REG_HMEBOX_EXT 2
+
+#define REG_EFUSE_ACCESS 0x00CF
#define REG_BIST_SCAN 0x00D0
#define REG_BIST_RPT 0x00D4
@@ -86,6 +89,7 @@
#define REG_CPWM 0x012F
#define REG_FWIMR 0x0130
#define REG_FWISR 0x0134
+#define REG_FTIMR 0x0138
#define REG_PKTBUF_DBG_CTRL 0x0140
#define REG_PKTBUF_DBG_DATA_L 0x0144
#define REG_PKTBUF_DBG_DATA_H 0x0148
@@ -109,6 +113,7 @@
#define REG_HMEBOX_1 0x01D4
#define REG_HMEBOX_2 0x01D8
#define REG_HMEBOX_3 0x01DC
+#define SIZE_OF_REG_HMEBOX 4
#define REG_LLT_INIT 0x01E0
#define REG_BB_ACCEESS_CTRL 0x01E8
@@ -197,6 +202,8 @@
#define REG_POWER_STAGE1 0x04B4
#define REG_POWER_STAGE2 0x04B8
#define REG_PKT_LIFE_TIME 0x04C0
+#define REG_PKT_VO_VI_LIFE_TIME 0x04C0
+#define REG_PKT_BE_BK_LIFE_TIME 0x04C2
#define REG_STBC_SETTING 0x04C4
#define REG_PROT_MODE_CTRL 0x04C8
#define REG_MAX_AGGR_NUM 0x04CA
@@ -233,6 +240,7 @@
#define REG_RD_NAV_NXT 0x0544
#define REG_NAV_PROT_LEN 0x0546
#define REG_BCN_CTRL 0x0550
+#define REG_BCN_CTRL_1 0x0551
#define REG_MBID_NUM 0x0552
#define REG_DUAL_TSF_RST 0x0553
#define REG_BCN_INTERVAL 0x0554
@@ -319,6 +327,8 @@
#define REG_BT_COEX_TABLE 0x06C0
#define REG_WMAC_RESP_TXINFO 0x06D8
+#define REG_USB_Queue_Select_MAC0 0xFE44
+#define REG_USB_Queue_Select_MAC1 0xFE47
/* ----------------------------------------------------- */
/* Redifine 8192C register definition for compatibility */
@@ -355,27 +365,27 @@
#define RRSR_RSC_UPSUBCHNL 0x400000
#define RRSR_RSC_LOWSUBCHNL 0x200000
#define RRSR_SHORT 0x800000
-#define RRSR_1M BIT0
-#define RRSR_2M BIT1
-#define RRSR_5_5M BIT2
-#define RRSR_11M BIT3
-#define RRSR_6M BIT4
-#define RRSR_9M BIT5
-#define RRSR_12M BIT6
-#define RRSR_18M BIT7
-#define RRSR_24M BIT8
-#define RRSR_36M BIT9
-#define RRSR_48M BIT10
-#define RRSR_54M BIT11
-#define RRSR_MCS0 BIT12
-#define RRSR_MCS1 BIT13
-#define RRSR_MCS2 BIT14
-#define RRSR_MCS3 BIT15
-#define RRSR_MCS4 BIT16
-#define RRSR_MCS5 BIT17
-#define RRSR_MCS6 BIT18
-#define RRSR_MCS7 BIT19
-#define BRSR_ACKSHORTPMB BIT23
+#define RRSR_1M BIT(0)
+#define RRSR_2M BIT(1)
+#define RRSR_5_5M BIT(2)
+#define RRSR_11M BIT(3)
+#define RRSR_6M BIT(4)
+#define RRSR_9M BIT(5)
+#define RRSR_12M BIT(6)
+#define RRSR_18M BIT(7)
+#define RRSR_24M BIT(8)
+#define RRSR_36M BIT(9)
+#define RRSR_48M BIT(10)
+#define RRSR_54M BIT(11)
+#define RRSR_MCS0 BIT(12)
+#define RRSR_MCS1 BIT(13)
+#define RRSR_MCS2 BIT(14)
+#define RRSR_MCS3 BIT(15)
+#define RRSR_MCS4 BIT(16)
+#define RRSR_MCS5 BIT(17)
+#define RRSR_MCS6 BIT(18)
+#define RRSR_MCS7 BIT(19)
+#define BRSR_ACKSHORTPMB BIT(23)
/* ----------------------------------------------------- */
/* 8192C Rate Definition */
@@ -600,7 +610,11 @@
#define EEPROM_SVID 0x2C /* SE Vendor ID.E-F */
#define EEPROM_SMID 0x2E /* SE PCI Subsystem ID. 10-11 */
+#define EEPROM_VID_USB 0xC
+#define EEPROM_PID_USB 0xE
+#define EEPROM_ENDPOINT_SETTING 0x10
#define EEPROM_MAC_ADDR 0x16 /* SEMAC Address. 12-17 */
+#define EEPROM_MAC_ADDR_MAC0_92DU 0x19
#define EEPROM_MAC_ADDR_MAC0_92D 0x55
#define EEPROM_MAC_ADDR_MAC1_92D 0x5B
@@ -915,6 +929,42 @@
#define BD_HCI_SEL BIT(26)
#define TYPE_ID BIT(27)
+#define HCI_TXDMA_EN BIT(0)
+#define HCI_RXDMA_EN BIT(1)
+#define TXDMA_EN BIT(2)
+#define RXDMA_EN BIT(3)
+#define PROTOCOL_EN BIT(4)
+#define SCHEDULE_EN BIT(5)
+#define MACTXEN BIT(6)
+#define MACRXEN BIT(7)
+#define ENSWBCN BIT(8)
+#define ENSEC BIT(9)
+
+#define HQSEL_VOQ BIT(0)
+#define HQSEL_VIQ BIT(1)
+#define HQSEL_BEQ BIT(2)
+#define HQSEL_BKQ BIT(3)
+#define HQSEL_MGTQ BIT(4)
+#define HQSEL_HIQ BIT(5)
+
+#define TXDMA_HIQ_MAP GENMASK(15, 14)
+#define TXDMA_MGQ_MAP GENMASK(13, 12)
+#define TXDMA_BKQ_MAP GENMASK(11, 10)
+#define TXDMA_BEQ_MAP GENMASK(9, 8)
+#define TXDMA_VIQ_MAP GENMASK(7, 6)
+#define TXDMA_VOQ_MAP GENMASK(5, 4)
+
+#define QUEUE_LOW 1
+#define QUEUE_NORMAL 2
+#define QUEUE_HIGH 3
+
+#define HPQ_MASK GENMASK(7, 0)
+#define LPQ_MASK GENMASK(15, 8)
+#define PUBQ_MASK GENMASK(23, 16)
+#define LD_RQPN BIT(31)
+
+#define DROP_DATA_EN BIT(9)
+
/* LLT_INIT */
#define _LLT_NO_ACTIVE 0x0
#define _LLT_WRITE_ACCESS 0x1
@@ -929,6 +979,10 @@
/* ----------------------------------------------------- */
/* 0x0400h ~ 0x047Fh Protocol Configuration */
/* ----------------------------------------------------- */
+/* FWHW_TXQ_CTRL */
+#define EN_AMPDU_RTY_NEW BIT(7)
+#define EN_BCNQ_DL BIT(22)
+
#define RETRY_LIMIT_SHORT_SHIFT 8
#define RETRY_LIMIT_LONG_SHIFT 0
@@ -942,6 +996,13 @@
#define AC_PARAM_ECW_MIN_OFFSET 8
#define AC_PARAM_AIFS_OFFSET 0
+/* REG_RD_CTRL */
+#define DIS_EDCA_CNT_DWN BIT(11)
+
+/* REG_BCN_CTRL */
+#define EN_BCN_FUNCTION BIT(3)
+#define DIS_TSF_UDT BIT(4)
+
/* ACMHWCTRL */
#define ACMHW_HWEN BIT(0)
#define ACMHW_BEQEN BIT(1)
@@ -1073,6 +1134,11 @@
#define RCCK0_FACOUNTERLOWER 0xa5c
#define RCCK0_FACOUNTERUPPER 0xa58
+#define RPDP_ANTA 0xb00
+#define RCONFIG_ANTA 0xb68
+#define RCONFIG_ANTB 0xb6c
+#define RPDP_ANTB 0xb70
+
/* 6. PageC(0xC00) */
#define ROFDM0_LSTF 0xc00
@@ -1126,6 +1192,7 @@
#define ROFDM0_TXPSEUDONOISEWGT 0xce4
#define ROFDM0_FRAMESYNC 0xcf0
#define ROFDM0_DFSREPORT 0xcf4
+#define ROFDM0_RXIQEXTANTA 0xca0
#define ROFDM0_TXCOEFF1 0xca4
#define ROFDM0_TXCOEFF2 0xca8
#define ROFDM0_TXCOEFF3 0xcac
@@ -1184,17 +1251,70 @@
#define RTXAGC_B_MCS15_MCS12 0x868
#define RTXAGC_B_CCK11_A_CCK2_11 0x86c
+#define RFPGA0_IQK 0xe28
+#define RTX_IQK_TONE_A 0xe30
+#define RRX_IQK_TONE_A 0xe34
+#define RTX_IQK_PI_A 0xe38
+#define RRX_IQK_PI_A 0xe3c
+
+#define RTX_IQK 0xe40
+#define RRX_IQK 0xe44
+#define RIQK_AGC_PTS 0xe48
+#define RIQK_AGC_RSP 0xe4c
+#define RTX_IQK_TONE_B 0xe50
+#define RRX_IQK_TONE_B 0xe54
+#define RTX_IQK_PI_B 0xe58
+#define RRX_IQK_PI_B 0xe5c
+#define RIQK_AGC_CONT 0xe60
+
+#define RBLUE_TOOTH 0xe6c
+#define RRX_WAIT_CCA 0xe70
+#define RTX_CCK_RFON 0xe74
+#define RTX_CCK_BBON 0xe78
+#define RTX_OFDM_RFON 0xe7c
+#define RTX_OFDM_BBON 0xe80
+#define RTX_TO_RX 0xe84
+#define RTX_TO_TX 0xe88
+#define RRX_CCK 0xe8c
+
+#define RTX_POWER_BEFORE_IQK_A 0xe94
+#define RTX_POWER_AFTER_IQK_A 0xe9c
+
+#define RRX_POWER_BEFORE_IQK_A 0xea0
+#define RRX_POWER_BEFORE_IQK_A_2 0xea4
+#define RRX_POWER_AFTER_IQK_A 0xea8
+#define RRX_POWER_AFTER_IQK_A_2 0xeac
+
+#define RTX_POWER_BEFORE_IQK_B 0xeb4
+#define RTX_POWER_AFTER_IQK_B 0xebc
+
+#define RRX_POWER_BEFORE_IQK_B 0xec0
+#define RRX_POWER_BEFORE_IQK_B_2 0xec4
+#define RRX_POWER_AFTER_IQK_B 0xec8
+#define RRX_POWER_AFTER_IQK_B_2 0xecc
+
+#define MASK_IQK_RESULT 0x03ff0000
+
+#define RRX_OFDM 0xed0
+#define RRX_WAIT_RIFS 0xed4
+#define RRX_TO_RX 0xed8
+#define RSTANDBY 0xedc
+#define RSLEEP 0xee0
+#define RPMPD_ANAEN 0xeec
+
/* RL6052 Register definition */
#define RF_AC 0x00
#define RF_IQADJ_G1 0x01
#define RF_IQADJ_G2 0x02
+#define RF_BS_PA_APSET_G1_G4 0x03
#define RF_POW_TRSW 0x05
#define RF_GAIN_RX 0x06
#define RF_GAIN_TX 0x07
#define RF_TXM_IDAC 0x08
+#define RF_TXPA_AG 0x0B
#define RF_BS_IQGEN 0x0F
#define RF_MODE1 0x10
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.c
new file mode 100644
index 000000000000..427d1877f431
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "def.h"
+#include "reg.h"
+#include "phy_common.h"
+#include "rf_common.h"
+
+void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u8 rfpath;
+
+ switch (bandwidth) {
+ case HT_CHANNEL_WIDTH_20:
+ for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
+ rtlphy->rfreg_chnlval[rfpath] &= 0xfffff3ff;
+ rtlphy->rfreg_chnlval[rfpath] |= 0x0400;
+
+ rtl_set_rfreg(hw, rfpath, RF_CHNLBW,
+ BIT(10) | BIT(11), 0x01);
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "20M RF 0x18 = 0x%x\n",
+ rtlphy->rfreg_chnlval[rfpath]);
+ }
+
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
+ rtlphy->rfreg_chnlval[rfpath] &= 0xfffff3ff;
+
+ rtl_set_rfreg(hw, rfpath, RF_CHNLBW,
+ BIT(10) | BIT(11), 0x00);
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "40M RF 0x18 = 0x%x\n",
+ rtlphy->rfreg_chnlval[rfpath]);
+ }
+ break;
+ default:
+ pr_err("unknown bandwidth: %#X\n", bandwidth);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_rf6052_set_bandwidth);
+
+void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u32 tx_agc[2] = {0, 0}, tmpval;
+ bool turbo_scanoff = false;
+ u8 idx1, idx2;
+ u8 *ptr;
+
+ if (rtlefuse->eeprom_regulatory != 0)
+ turbo_scanoff = true;
+ if (mac->act_scanning) {
+ tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
+ tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
+ if (turbo_scanoff) {
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ tx_agc[idx1] = ppowerlevel[idx1] |
+ (ppowerlevel[idx1] << 8) |
+ (ppowerlevel[idx1] << 16) |
+ (ppowerlevel[idx1] << 24);
+ }
+ }
+ } else {
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ tx_agc[idx1] = ppowerlevel[idx1] |
+ (ppowerlevel[idx1] << 8) |
+ (ppowerlevel[idx1] << 16) |
+ (ppowerlevel[idx1] << 24);
+ }
+ if (rtlefuse->eeprom_regulatory == 0) {
+ tmpval = (rtlphy->mcs_offset[0][6]) +
+ (rtlphy->mcs_offset[0][7] << 8);
+ tx_agc[RF90_PATH_A] += tmpval;
+ tmpval = (rtlphy->mcs_offset[0][14]) +
+ (rtlphy->mcs_offset[0][15] << 24);
+ tx_agc[RF90_PATH_B] += tmpval;
+ }
+ }
+
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ ptr = (u8 *)(&tx_agc[idx1]);
+ for (idx2 = 0; idx2 < 4; idx2++) {
+ if (*ptr > RF6052_MAX_TX_PWR)
+ *ptr = RF6052_MAX_TX_PWR;
+ ptr++;
+ }
+ }
+
+ tmpval = tx_agc[RF90_PATH_A] & 0xff;
+ rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n",
+ tmpval, RTXAGC_A_CCK1_MCS32);
+ tmpval = tx_agc[RF90_PATH_A] >> 8;
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n",
+ tmpval, RTXAGC_B_CCK11_A_CCK2_11);
+ tmpval = tx_agc[RF90_PATH_B] >> 24;
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n",
+ tmpval, RTXAGC_B_CCK11_A_CCK2_11);
+ tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff;
+ rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ "CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n",
+ tmpval, RTXAGC_B_CCK1_55_MCS32);
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_rf6052_set_cck_txpower);
+
+static void _rtl92d_phy_get_power_base(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel,
+ u32 *ofdmbase, u32 *mcsbase)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u32 powerbase0, powerbase1;
+ u8 legacy_pwrdiff, ht20_pwrdiff;
+ u8 i, powerlevel[2];
+
+ for (i = 0; i < 2; i++) {
+ powerlevel[i] = ppowerlevel[i];
+ legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1];
+ powerbase0 = powerlevel[i] + legacy_pwrdiff;
+ powerbase0 = (powerbase0 << 24) | (powerbase0 << 16) |
+ (powerbase0 << 8) | powerbase0;
+ *(ofdmbase + i) = powerbase0;
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ " [OFDM power base index rf(%c) = 0x%x]\n",
+ i == 0 ? 'A' : 'B', *(ofdmbase + i));
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) {
+ ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1];
+ powerlevel[i] += ht20_pwrdiff;
+ }
+ powerbase1 = powerlevel[i];
+ powerbase1 = (powerbase1 << 24) | (powerbase1 << 16) |
+ (powerbase1 << 8) | powerbase1;
+ *(mcsbase + i) = powerbase1;
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ " [MCS power base index rf(%c) = 0x%x]\n",
+ i == 0 ? 'A' : 'B', *(mcsbase + i));
+ }
+}
+
+static void _rtl92d_get_pwr_diff_limit(struct ieee80211_hw *hw, u8 channel,
+ u8 index, u8 rf, u8 pwr_diff_limit[4])
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u32 mcs_offset;
+ u8 limit;
+ int i;
+
+ mcs_offset = rtlphy->mcs_offset[0][index + (rf ? 8 : 0)];
+
+ for (i = 0; i < 4; i++) {
+ pwr_diff_limit[i] = (mcs_offset >> (i * 8)) & 0x7f;
+
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40)
+ limit = rtlefuse->pwrgroup_ht40[rf][channel - 1];
+ else
+ limit = rtlefuse->pwrgroup_ht20[rf][channel - 1];
+
+ if (pwr_diff_limit[i] > limit)
+ pwr_diff_limit[i] = limit;
+ }
+}
+
+static void _rtl92d_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
+ u8 channel, u8 index,
+ u32 *powerbase0,
+ u32 *powerbase1,
+ u32 *p_outwriteval)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u32 writeval = 0, customer_limit, rf;
+ u8 chnlgroup = 0, pwr_diff_limit[4];
+
+ for (rf = 0; rf < 2; rf++) {
+ switch (rtlefuse->eeprom_regulatory) {
+ case 0:
+ writeval = rtlphy->mcs_offset[0][index + (rf ? 8 : 0)];
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ "RTK better performance\n");
+ break;
+ case 1:
+ if (rtlphy->pwrgroup_cnt == 1)
+ chnlgroup = 0;
+
+ if (rtlphy->pwrgroup_cnt < MAX_PG_GROUP)
+ break;
+
+ chnlgroup = rtl92d_phy_get_chnlgroup_bypg(channel - 1);
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20)
+ chnlgroup++;
+ else
+ chnlgroup += 4;
+
+ writeval = rtlphy->mcs_offset
+ [chnlgroup][index + (rf ? 8 : 0)];
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ "Realtek regulatory, 20MHz\n");
+ break;
+ case 2:
+ writeval = 0;
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Better regulatory\n");
+ break;
+ case 3:
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ "customer's limit, 40MHz rf(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B',
+ rtlefuse->pwrgroup_ht40[rf][channel - 1]);
+ } else {
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ "customer's limit, 20MHz rf(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B',
+ rtlefuse->pwrgroup_ht20[rf][channel - 1]);
+ }
+
+ _rtl92d_get_pwr_diff_limit(hw, channel, index, rf,
+ pwr_diff_limit);
+
+ customer_limit = (pwr_diff_limit[3] << 24) |
+ (pwr_diff_limit[2] << 16) |
+ (pwr_diff_limit[1] << 8) |
+ (pwr_diff_limit[0]);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ "Customer's limit rf(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', customer_limit);
+
+ writeval = customer_limit;
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Customer\n");
+ break;
+ default:
+ writeval = rtlphy->mcs_offset[0][index + (rf ? 8 : 0)];
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ "RTK better performance\n");
+ break;
+ }
+
+ if (index < 2)
+ writeval += powerbase0[rf];
+ else
+ writeval += powerbase1[rf];
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "writeval rf(%c)= 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeval);
+
+ *(p_outwriteval + rf) = writeval;
+ }
+}
+
+static void _rtl92d_write_ofdm_power_reg(struct ieee80211_hw *hw,
+ u8 index, u32 *pvalue)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ static const u16 regoffset_a[6] = {
+ RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24,
+ RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04,
+ RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12
+ };
+ static const u16 regoffset_b[6] = {
+ RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24,
+ RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04,
+ RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12
+ };
+ u8 i, rf, pwr_val[4];
+ u32 writeval;
+ u16 regoffset;
+
+ for (rf = 0; rf < 2; rf++) {
+ writeval = pvalue[rf];
+ for (i = 0; i < 4; i++) {
+ pwr_val[i] = (u8)((writeval & (0x7f <<
+ (i * 8))) >> (i * 8));
+ if (pwr_val[i] > RF6052_MAX_TX_PWR)
+ pwr_val[i] = RF6052_MAX_TX_PWR;
+ }
+ writeval = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
+ (pwr_val[1] << 8) | pwr_val[0];
+ if (rf == 0)
+ regoffset = regoffset_a[index];
+ else
+ regoffset = regoffset_b[index];
+ rtl_set_bbreg(hw, regoffset, MASKDWORD, writeval);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ "Set 0x%x = %08x\n", regoffset, writeval);
+ if (((get_rf_type(rtlphy) == RF_2T2R) &&
+ (regoffset == RTXAGC_A_MCS15_MCS12 ||
+ regoffset == RTXAGC_B_MCS15_MCS12)) ||
+ ((get_rf_type(rtlphy) != RF_2T2R) &&
+ (regoffset == RTXAGC_A_MCS07_MCS04 ||
+ regoffset == RTXAGC_B_MCS07_MCS04))) {
+ writeval = pwr_val[3];
+ if (regoffset == RTXAGC_A_MCS15_MCS12 ||
+ regoffset == RTXAGC_A_MCS07_MCS04)
+ regoffset = 0xc90;
+ if (regoffset == RTXAGC_B_MCS15_MCS12 ||
+ regoffset == RTXAGC_B_MCS07_MCS04)
+ regoffset = 0xc98;
+ for (i = 0; i < 3; i++) {
+ if (i != 2)
+ writeval = (writeval > 8) ?
+ (writeval - 8) : 0;
+ else
+ writeval = (writeval > 6) ?
+ (writeval - 6) : 0;
+ rtl_write_byte(rtlpriv, (u32)(regoffset + i),
+ (u8)writeval);
+ }
+ }
+ }
+}
+
+void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel)
+{
+ u32 writeval[2], powerbase0[2], powerbase1[2];
+ u8 index;
+
+ _rtl92d_phy_get_power_base(hw, ppowerlevel, channel,
+ &powerbase0[0], &powerbase1[0]);
+ for (index = 0; index < 6; index++) {
+ _rtl92d_get_txpower_writeval_by_regulatory(hw, channel, index,
+ &powerbase0[0],
+ &powerbase1[0],
+ &writeval[0]);
+ _rtl92d_write_ofdm_power_reg(hw, index, &writeval[0]);
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_rf6052_set_ofdm_txpower);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.h
new file mode 100644
index 000000000000..c243ec08369b
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#ifndef __RTL92D_RF_COMMON_H__
+#define __RTL92D_RF_COMMON_H__
+
+void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel);
+void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.c
new file mode 100644
index 000000000000..72d2b7426d82
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.c
@@ -0,0 +1,516 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "../base.h"
+#include "../stats.h"
+#include "def.h"
+#include "trx_common.h"
+
+static long _rtl92de_translate_todbm(struct ieee80211_hw *hw,
+ u8 signal_strength_index)
+{
+ long signal_power;
+
+ signal_power = (long)((signal_strength_index + 1) >> 1);
+ signal_power -= 95;
+ return signal_power;
+}
+
+static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats,
+ __le32 *pdesc,
+ struct rx_fwinfo_92d *p_drvinfo,
+ bool packet_match_bssid,
+ bool packet_toself,
+ bool packet_beacon)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
+ struct phy_sts_cck_8192d *cck_buf;
+ s8 rx_pwr_all, rx_pwr[4];
+ u8 rf_rx_num = 0, evm, pwdb_all;
+ u8 i, max_spatial_stream;
+ u32 rssi, total_rssi = 0;
+ bool is_cck_rate;
+ u8 rxmcs;
+
+ rxmcs = get_rx_desc_rxmcs(pdesc);
+ is_cck_rate = rxmcs <= DESC_RATE11M;
+ pstats->packet_matchbssid = packet_match_bssid;
+ pstats->packet_toself = packet_toself;
+ pstats->packet_beacon = packet_beacon;
+ pstats->is_cck = is_cck_rate;
+ pstats->rx_mimo_sig_qual[0] = -1;
+ pstats->rx_mimo_sig_qual[1] = -1;
+
+ if (is_cck_rate) {
+ u8 report, cck_highpwr;
+
+ cck_buf = (struct phy_sts_cck_8192d *)p_drvinfo;
+ if (ppsc->rfpwr_state == ERFON)
+ cck_highpwr = rtlphy->cck_high_power;
+ else
+ cck_highpwr = false;
+ if (!cck_highpwr) {
+ u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
+
+ report = cck_buf->cck_agc_rpt & 0xc0;
+ report = report >> 6;
+ switch (report) {
+ case 0x3:
+ rx_pwr_all = -46 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x2:
+ rx_pwr_all = -26 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x1:
+ rx_pwr_all = -12 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x0:
+ rx_pwr_all = 16 - (cck_agc_rpt & 0x3e);
+ break;
+ }
+ } else {
+ u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
+
+ report = p_drvinfo->cfosho[0] & 0x60;
+ report = report >> 5;
+ switch (report) {
+ case 0x3:
+ rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f) << 1);
+ break;
+ case 0x2:
+ rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f) << 1);
+ break;
+ case 0x1:
+ rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f) << 1);
+ break;
+ case 0x0:
+ rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f) << 1);
+ break;
+ }
+ }
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
+ /* CCK gain is smaller than OFDM/MCS gain, */
+ /* so we add gain diff by experiences, the val is 6 */
+ pwdb_all += 6;
+ if (pwdb_all > 100)
+ pwdb_all = 100;
+ /* modify the offset to make the same gain index with OFDM. */
+ if (pwdb_all > 34 && pwdb_all <= 42)
+ pwdb_all -= 2;
+ else if (pwdb_all > 26 && pwdb_all <= 34)
+ pwdb_all -= 6;
+ else if (pwdb_all > 14 && pwdb_all <= 26)
+ pwdb_all -= 8;
+ else if (pwdb_all > 4 && pwdb_all <= 14)
+ pwdb_all -= 4;
+ pstats->rx_pwdb_all = pwdb_all;
+ pstats->recvsignalpower = rx_pwr_all;
+ if (packet_match_bssid) {
+ u8 sq;
+
+ if (pstats->rx_pwdb_all > 40) {
+ sq = 100;
+ } else {
+ sq = cck_buf->sq_rpt;
+ if (sq > 64)
+ sq = 0;
+ else if (sq < 20)
+ sq = 100;
+ else
+ sq = ((64 - sq) * 100) / 44;
+ }
+ pstats->signalquality = sq;
+ pstats->rx_mimo_sig_qual[0] = sq;
+ pstats->rx_mimo_sig_qual[1] = -1;
+ }
+ } else {
+ rtlpriv->dm.rfpath_rxenable[0] = true;
+ rtlpriv->dm.rfpath_rxenable[1] = true;
+ for (i = RF90_PATH_A; i < RF6052_MAX_PATH; i++) {
+ if (rtlpriv->dm.rfpath_rxenable[i])
+ rf_rx_num++;
+ rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 0x3f) * 2)
+ - 110;
+ rssi = rtl_query_rxpwrpercentage(rx_pwr[i]);
+ total_rssi += rssi;
+ rtlpriv->stats.rx_snr_db[i] =
+ (long)(p_drvinfo->rxsnr[i] / 2);
+ if (packet_match_bssid)
+ pstats->rx_mimo_signalstrength[i] = (u8)rssi;
+ }
+ rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 106;
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
+ pstats->rx_pwdb_all = pwdb_all;
+ pstats->rxpower = rx_pwr_all;
+ pstats->recvsignalpower = rx_pwr_all;
+ if (get_rx_desc_rxht(pdesc) && rxmcs >= DESC_RATEMCS8 &&
+ rxmcs <= DESC_RATEMCS15)
+ max_spatial_stream = 2;
+ else
+ max_spatial_stream = 1;
+ for (i = 0; i < max_spatial_stream; i++) {
+ evm = rtl_evm_db_to_percentage(p_drvinfo->rxevm[i]);
+ if (packet_match_bssid) {
+ if (i == 0)
+ pstats->signalquality =
+ (u8)(evm & 0xff);
+ pstats->rx_mimo_sig_qual[i] =
+ (u8)(evm & 0xff);
+ }
+ }
+ }
+ if (is_cck_rate)
+ pstats->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
+ pwdb_all));
+ else if (rf_rx_num != 0)
+ pstats->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
+ total_rssi /= rf_rx_num));
+}
+
+static void rtl92d_loop_over_paths(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u8 rfpath;
+
+ for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
+ rfpath++) {
+ if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ pstats->rx_mimo_signalstrength[rfpath];
+ }
+ if (pstats->rx_mimo_signalstrength[rfpath] >
+ rtlpriv->stats.rx_rssi_percentage[rfpath]) {
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ ((rtlpriv->stats.rx_rssi_percentage[rfpath] *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_mimo_signalstrength[rfpath])) /
+ (RX_SMOOTH_FACTOR);
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ rtlpriv->stats.rx_rssi_percentage[rfpath] + 1;
+ } else {
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ ((rtlpriv->stats.rx_rssi_percentage[rfpath] *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_mimo_signalstrength[rfpath])) /
+ (RX_SMOOTH_FACTOR);
+ }
+ }
+}
+
+static void _rtl92de_process_ui_rssi(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rt_smooth_data *ui_rssi;
+ u32 last_rssi, tmpval;
+
+ if (!pstats->packet_toself && !pstats->packet_beacon)
+ return;
+
+ ui_rssi = &rtlpriv->stats.ui_rssi;
+
+ rtlpriv->stats.rssi_calculate_cnt++;
+ if (ui_rssi->total_num++ >= PHY_RSSI_SLID_WIN_MAX) {
+ ui_rssi->total_num = PHY_RSSI_SLID_WIN_MAX;
+ last_rssi = ui_rssi->elements[ui_rssi->index];
+ ui_rssi->total_val -= last_rssi;
+ }
+ ui_rssi->total_val += pstats->signalstrength;
+ ui_rssi->elements[ui_rssi->index++] = pstats->signalstrength;
+ if (ui_rssi->index >= PHY_RSSI_SLID_WIN_MAX)
+ ui_rssi->index = 0;
+ tmpval = ui_rssi->total_val / ui_rssi->total_num;
+ rtlpriv->stats.signal_strength = _rtl92de_translate_todbm(hw, (u8)tmpval);
+ pstats->rssi = rtlpriv->stats.signal_strength;
+
+ if (!pstats->is_cck && pstats->packet_toself)
+ rtl92d_loop_over_paths(hw, pstats);
+}
+
+static void _rtl92de_update_rxsignalstatistics(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int weighting = 0;
+
+ if (rtlpriv->stats.recv_signal_power == 0)
+ rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
+ if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power)
+ weighting = 5;
+ else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power)
+ weighting = (-5);
+ rtlpriv->stats.recv_signal_power = (rtlpriv->stats.recv_signal_power *
+ 5 + pstats->recvsignalpower + weighting) / 6;
+}
+
+static void _rtl92de_process_pwdb(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ long undec_sm_pwdb;
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC ||
+ mac->opmode == NL80211_IFTYPE_AP)
+ return;
+
+ undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
+
+ if (pstats->packet_toself || pstats->packet_beacon) {
+ if (undec_sm_pwdb < 0)
+ undec_sm_pwdb = pstats->rx_pwdb_all;
+ if (pstats->rx_pwdb_all > (u32)undec_sm_pwdb) {
+ undec_sm_pwdb = (((undec_sm_pwdb) *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+ undec_sm_pwdb = undec_sm_pwdb + 1;
+ } else {
+ undec_sm_pwdb = (((undec_sm_pwdb) *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+ }
+ rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
+ _rtl92de_update_rxsignalstatistics(hw, pstats);
+ }
+}
+
+static void rtl92d_loop_over_streams(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int stream;
+
+ for (stream = 0; stream < 2; stream++) {
+ if (pstats->rx_mimo_sig_qual[stream] != -1) {
+ if (rtlpriv->stats.rx_evm_percentage[stream] == 0) {
+ rtlpriv->stats.rx_evm_percentage[stream] =
+ pstats->rx_mimo_sig_qual[stream];
+ }
+ rtlpriv->stats.rx_evm_percentage[stream] =
+ ((rtlpriv->stats.rx_evm_percentage[stream]
+ * (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_mimo_sig_qual[stream] * 1)) /
+ (RX_SMOOTH_FACTOR);
+ }
+ }
+}
+
+static void _rtl92de_process_ui_link_quality(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rt_smooth_data *ui_link_quality;
+ u32 last_evm, tmpval;
+
+ if (pstats->signalquality == 0)
+ return;
+ if (!pstats->packet_toself && !pstats->packet_beacon)
+ return;
+
+ ui_link_quality = &rtlpriv->stats.ui_link_quality;
+
+ if (ui_link_quality->total_num++ >= PHY_LINKQUALITY_SLID_WIN_MAX) {
+ ui_link_quality->total_num = PHY_LINKQUALITY_SLID_WIN_MAX;
+ last_evm = ui_link_quality->elements[ui_link_quality->index];
+ ui_link_quality->total_val -= last_evm;
+ }
+ ui_link_quality->total_val += pstats->signalquality;
+ ui_link_quality->elements[ui_link_quality->index++] = pstats->signalquality;
+ if (ui_link_quality->index >= PHY_LINKQUALITY_SLID_WIN_MAX)
+ ui_link_quality->index = 0;
+ tmpval = ui_link_quality->total_val / ui_link_quality->total_num;
+ rtlpriv->stats.signal_quality = tmpval;
+ rtlpriv->stats.last_sigstrength_inpercent = tmpval;
+ rtl92d_loop_over_streams(hw, pstats);
+}
+
+static void _rtl92de_process_phyinfo(struct ieee80211_hw *hw,
+ u8 *buffer,
+ struct rtl_stats *pcurrent_stats)
+{
+ if (!pcurrent_stats->packet_matchbssid &&
+ !pcurrent_stats->packet_beacon)
+ return;
+
+ _rtl92de_process_ui_rssi(hw, pcurrent_stats);
+ _rtl92de_process_pwdb(hw, pcurrent_stats);
+ _rtl92de_process_ui_link_quality(hw, pcurrent_stats);
+}
+
+static void _rtl92de_translate_rx_signal_stuff(struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ struct rtl_stats *pstats,
+ __le32 *pdesc,
+ struct rx_fwinfo_92d *p_drvinfo)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct ieee80211_hdr *hdr;
+ bool packet_matchbssid;
+ bool packet_beacon;
+ bool packet_toself;
+ u16 type, cfc;
+ u8 *tmp_buf;
+ u8 *praddr;
+ __le16 fc;
+
+ tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
+ hdr = (struct ieee80211_hdr *)tmp_buf;
+ fc = hdr->frame_control;
+ cfc = le16_to_cpu(fc);
+ type = WLAN_FC_GET_TYPE(fc);
+ praddr = hdr->addr1;
+ packet_matchbssid = ((type != IEEE80211_FTYPE_CTL) &&
+ ether_addr_equal(mac->bssid,
+ (cfc & IEEE80211_FCTL_TODS) ? hdr->addr1 :
+ (cfc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 :
+ hdr->addr3) &&
+ (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
+ packet_toself = packet_matchbssid &&
+ ether_addr_equal(praddr, rtlefuse->dev_addr);
+ packet_beacon = ieee80211_is_beacon(fc);
+ _rtl92de_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
+ packet_matchbssid, packet_toself,
+ packet_beacon);
+ _rtl92de_process_phyinfo(hw, tmp_buf, pstats);
+}
+
+bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
+ struct ieee80211_rx_status *rx_status,
+ u8 *pdesc8, struct sk_buff *skb)
+{
+ __le32 *pdesc = (__le32 *)pdesc8;
+ struct rx_fwinfo_92d *p_drvinfo;
+ u32 phystatus = get_rx_desc_physt(pdesc);
+
+ stats->length = (u16)get_rx_desc_pkt_len(pdesc);
+ stats->rx_drvinfo_size = (u8)get_rx_desc_drv_info_size(pdesc) *
+ RX_DRV_INFO_SIZE_UNIT;
+ stats->rx_bufshift = (u8)(get_rx_desc_shift(pdesc) & 0x03);
+ stats->icv = (u16)get_rx_desc_icv(pdesc);
+ stats->crc = (u16)get_rx_desc_crc32(pdesc);
+ stats->hwerror = (stats->crc | stats->icv);
+ stats->decrypted = !get_rx_desc_swdec(pdesc) &&
+ get_rx_desc_enc_type(pdesc) != RX_DESC_ENC_NONE;
+ stats->rate = (u8)get_rx_desc_rxmcs(pdesc);
+ stats->shortpreamble = (u16)get_rx_desc_splcp(pdesc);
+ stats->isampdu = (bool)(get_rx_desc_paggr(pdesc) == 1);
+ stats->isfirst_ampdu = (bool)((get_rx_desc_paggr(pdesc) == 1) &&
+ (get_rx_desc_faggr(pdesc) == 1));
+ stats->timestamp_low = get_rx_desc_tsfl(pdesc);
+ stats->rx_is40mhzpacket = (bool)get_rx_desc_bw(pdesc);
+ stats->is_ht = (bool)get_rx_desc_rxht(pdesc);
+ rx_status->freq = hw->conf.chandef.chan->center_freq;
+ rx_status->band = hw->conf.chandef.chan->band;
+ if (get_rx_desc_crc32(pdesc))
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (get_rx_desc_bw(pdesc))
+ rx_status->bw = RATE_INFO_BW_40;
+ if (get_rx_desc_rxht(pdesc))
+ rx_status->encoding = RX_ENC_HT;
+ rx_status->flag |= RX_FLAG_MACTIME_START;
+ if (stats->decrypted)
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht,
+ false, stats->rate);
+ rx_status->mactime = get_rx_desc_tsfl(pdesc);
+ if (phystatus) {
+ p_drvinfo = (struct rx_fwinfo_92d *)(skb->data +
+ stats->rx_bufshift);
+ _rtl92de_translate_rx_signal_stuff(hw, skb, stats, pdesc,
+ p_drvinfo);
+ }
+ /*rx_status->qual = stats->signal; */
+ rx_status->signal = stats->recvsignalpower + 10;
+ return true;
+}
+EXPORT_SYMBOL_GPL(rtl92de_rx_query_desc);
+
+void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc8, bool istx,
+ u8 desc_name, u8 *val)
+{
+ __le32 *pdesc = (__le32 *)pdesc8;
+
+ if (istx) {
+ switch (desc_name) {
+ case HW_DESC_OWN:
+ wmb();
+ set_tx_desc_own(pdesc, 1);
+ break;
+ case HW_DESC_TX_NEXTDESC_ADDR:
+ set_tx_desc_next_desc_address(pdesc, *(u32 *)val);
+ break;
+ default:
+ WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n",
+ desc_name);
+ break;
+ }
+ } else {
+ switch (desc_name) {
+ case HW_DESC_RXOWN:
+ wmb();
+ set_rx_desc_own(pdesc, 1);
+ break;
+ case HW_DESC_RXBUFF_ADDR:
+ set_rx_desc_buff_addr(pdesc, *(u32 *)val);
+ break;
+ case HW_DESC_RXPKT_LEN:
+ set_rx_desc_pkt_len(pdesc, *(u32 *)val);
+ break;
+ case HW_DESC_RXERO:
+ set_rx_desc_eor(pdesc, 1);
+ break;
+ default:
+ WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n",
+ desc_name);
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92de_set_desc);
+
+u64 rtl92de_get_desc(struct ieee80211_hw *hw,
+ u8 *p_desc8, bool istx, u8 desc_name)
+{
+ __le32 *p_desc = (__le32 *)p_desc8;
+ u32 ret = 0;
+
+ if (istx) {
+ switch (desc_name) {
+ case HW_DESC_OWN:
+ ret = get_tx_desc_own(p_desc);
+ break;
+ case HW_DESC_TXBUFF_ADDR:
+ ret = get_tx_desc_tx_buffer_address(p_desc);
+ break;
+ default:
+ WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n",
+ desc_name);
+ break;
+ }
+ } else {
+ switch (desc_name) {
+ case HW_DESC_OWN:
+ ret = get_rx_desc_own(p_desc);
+ break;
+ case HW_DESC_RXPKT_LEN:
+ ret = get_rx_desc_pkt_len(p_desc);
+ break;
+ case HW_DESC_RXBUFF_ADDR:
+ ret = get_rx_desc_buff_addr(p_desc);
+ break;
+ default:
+ WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n",
+ desc_name);
+ break;
+ }
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rtl92de_get_desc);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.h
new file mode 100644
index 000000000000..87d956d771eb
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.h
@@ -0,0 +1,405 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#ifndef __RTL92D_TRX_COMMON_H__
+#define __RTL92D_TRX_COMMON_H__
+
+#define RX_DRV_INFO_SIZE_UNIT 8
+
+enum rtl92d_rx_desc_enc {
+ RX_DESC_ENC_NONE = 0,
+ RX_DESC_ENC_WEP40 = 1,
+ RX_DESC_ENC_TKIP_WO_MIC = 2,
+ RX_DESC_ENC_TKIP_MIC = 3,
+ RX_DESC_ENC_AES = 4,
+ RX_DESC_ENC_WEP104 = 5,
+};
+
+/* macros to read/write various fields in RX or TX descriptors */
+
+static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(15, 0));
+}
+
+static inline void set_tx_desc_offset(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(23, 16));
+}
+
+static inline void set_tx_desc_htc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(25));
+}
+
+static inline void set_tx_desc_last_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(26));
+}
+
+static inline void set_tx_desc_first_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(27));
+}
+
+static inline void set_tx_desc_linip(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(28));
+}
+
+static inline void set_tx_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline u32 get_tx_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(31));
+}
+
+static inline void set_tx_desc_macid(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(4, 0));
+}
+
+static inline void set_tx_desc_agg_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, BIT(5));
+}
+
+static inline void set_tx_desc_rdg_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, BIT(7));
+}
+
+static inline void set_tx_desc_queue_sel(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(12, 8));
+}
+
+static inline void set_tx_desc_rate_id(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(19, 16));
+}
+
+static inline void set_tx_desc_sec_type(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(23, 22));
+}
+
+static inline void set_tx_desc_pkt_offset(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(30, 26));
+}
+
+static inline void set_tx_desc_more_frag(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 2), __val, BIT(17));
+}
+
+static inline void set_tx_desc_ampdu_density(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 2), __val, GENMASK(22, 20));
+}
+
+static inline void set_tx_desc_seq(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 3), __val, GENMASK(27, 16));
+}
+
+static inline void set_tx_desc_pkt_id(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 3), __val, GENMASK(31, 28));
+}
+
+static inline void set_tx_desc_rts_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(4, 0));
+}
+
+static inline void set_tx_desc_qos(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(6));
+}
+
+static inline void set_tx_desc_hwseq_en(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(7));
+}
+
+static inline void set_tx_desc_use_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(8));
+}
+
+static inline void set_tx_desc_disable_fb(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(10));
+}
+
+static inline void set_tx_desc_cts2self(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(11));
+}
+
+static inline void set_tx_desc_rts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(12));
+}
+
+static inline void set_tx_desc_hw_rts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(13));
+}
+
+static inline void set_tx_desc_tx_sub_carrier(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(21, 20));
+}
+
+static inline void set_tx_desc_data_bw(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(25));
+}
+
+static inline void set_tx_desc_rts_short(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(26));
+}
+
+static inline void set_tx_desc_rts_bw(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(27));
+}
+
+static inline void set_tx_desc_rts_sc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(29, 28));
+}
+
+static inline void set_tx_desc_rts_stbc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(31, 30));
+}
+
+static inline void set_tx_desc_tx_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(5, 0));
+}
+
+static inline void set_tx_desc_data_shortgi(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, BIT(6));
+}
+
+static inline void set_tx_desc_data_rate_fb_limit(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(12, 8));
+}
+
+static inline void set_tx_desc_rts_rate_fb_limit(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(16, 13));
+}
+
+static inline void set_tx_desc_max_agg_num(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 6), __val, GENMASK(15, 11));
+}
+
+static inline void set_tx_desc_tx_buffer_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 7), __val, GENMASK(15, 0));
+}
+
+static inline void set_tx_desc_tx_buffer_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 8) = cpu_to_le32(__val);
+}
+
+static inline u32 get_tx_desc_tx_buffer_address(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 8));
+}
+
+static inline void set_tx_desc_next_desc_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 10) = cpu_to_le32(__val);
+}
+
+static inline u32 get_rx_desc_pkt_len(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, GENMASK(13, 0));
+}
+
+static inline u32 get_rx_desc_crc32(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(14));
+}
+
+static inline u32 get_rx_desc_icv(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(15));
+}
+
+static inline u32 get_rx_desc_drv_info_size(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, GENMASK(19, 16));
+}
+
+static inline u32 get_rx_desc_enc_type(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, GENMASK(22, 20));
+}
+
+static inline u32 get_rx_desc_shift(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, GENMASK(25, 24));
+}
+
+static inline u32 get_rx_desc_physt(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(26));
+}
+
+static inline u32 get_rx_desc_swdec(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(27));
+}
+
+static inline u32 get_rx_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(31));
+}
+
+static inline void set_rx_desc_pkt_len(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(13, 0));
+}
+
+static inline void set_rx_desc_eor(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(30));
+}
+
+static inline void set_rx_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline u32 get_rx_desc_paggr(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(14));
+}
+
+static inline u32 get_rx_desc_faggr(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(15));
+}
+
+static inline u32 get_rx_desc_rxmcs(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), GENMASK(5, 0));
+}
+
+static inline u32 get_rx_desc_rxht(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(6));
+}
+
+static inline u32 get_rx_desc_splcp(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(8));
+}
+
+static inline u32 get_rx_desc_bw(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(9));
+}
+
+static inline u32 get_rx_desc_tsfl(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 5));
+}
+
+static inline u32 get_rx_desc_buff_addr(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 6));
+}
+
+static inline void set_rx_desc_buff_addr(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 6) = cpu_to_le32(__val);
+}
+
+/* For 92D early mode */
+static inline void set_earlymode_pktnum(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(2, 0));
+}
+
+static inline void set_earlymode_len0(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(15, 4));
+}
+
+static inline void set_earlymode_len1(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(27, 16));
+}
+
+static inline void set_earlymode_len2_1(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(31, 28));
+}
+
+static inline void set_earlymode_len2_2(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits((__paddr + 1), __value, GENMASK(7, 0));
+}
+
+static inline void set_earlymode_len3(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits((__paddr + 1), __value, GENMASK(19, 8));
+}
+
+static inline void set_earlymode_len4(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits((__paddr + 1), __value, GENMASK(31, 20));
+}
+
+struct rx_fwinfo_92d {
+ u8 gain_trsw[4];
+ u8 pwdb_all;
+ u8 cfosho[4];
+ u8 cfotail[4];
+ s8 rxevm[2];
+ s8 rxsnr[4];
+ u8 pdsnr[2];
+ u8 csi_current[2];
+ u8 csi_target[2];
+ u8 sigevm;
+ u8 max_ex_pwr;
+#ifdef __LITTLE_ENDIAN
+ u8 ex_intf_flag:1;
+ u8 sgi_en:1;
+ u8 rxsc:2;
+ u8 reserve:4;
+#else
+ u8 reserve:4;
+ u8 rxsc:2;
+ u8 sgi_en:1;
+ u8 ex_intf_flag:1;
+#endif
+} __packed;
+
+bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,
+ struct rtl_stats *stats,
+ struct ieee80211_rx_status *rx_status,
+ u8 *pdesc, struct sk_buff *skb);
+void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val);
+u64 rtl92de_get_desc(struct ieee80211_hw *hw,
+ u8 *p_desc, bool istx, u8 desc_name);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
index cf4aca83bd05..c6a2e8b22fa0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
@@ -4,455 +4,16 @@
#include "../wifi.h"
#include "../base.h"
#include "../core.h"
-#include "reg.h"
-#include "def.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/dm_common.h"
+#include "../rtl8192d/phy_common.h"
+#include "../rtl8192d/fw_common.h"
#include "phy.h"
#include "dm.h"
-#include "fw.h"
#define UNDEC_SM_PWDB entry_min_undec_sm_pwdb
-static const u32 ofdmswing_table[OFDM_TABLE_SIZE_92D] = {
- 0x7f8001fe, /* 0, +6.0dB */
- 0x788001e2, /* 1, +5.5dB */
- 0x71c001c7, /* 2, +5.0dB */
- 0x6b8001ae, /* 3, +4.5dB */
- 0x65400195, /* 4, +4.0dB */
- 0x5fc0017f, /* 5, +3.5dB */
- 0x5a400169, /* 6, +3.0dB */
- 0x55400155, /* 7, +2.5dB */
- 0x50800142, /* 8, +2.0dB */
- 0x4c000130, /* 9, +1.5dB */
- 0x47c0011f, /* 10, +1.0dB */
- 0x43c0010f, /* 11, +0.5dB */
- 0x40000100, /* 12, +0dB */
- 0x3c8000f2, /* 13, -0.5dB */
- 0x390000e4, /* 14, -1.0dB */
- 0x35c000d7, /* 15, -1.5dB */
- 0x32c000cb, /* 16, -2.0dB */
- 0x300000c0, /* 17, -2.5dB */
- 0x2d4000b5, /* 18, -3.0dB */
- 0x2ac000ab, /* 19, -3.5dB */
- 0x288000a2, /* 20, -4.0dB */
- 0x26000098, /* 21, -4.5dB */
- 0x24000090, /* 22, -5.0dB */
- 0x22000088, /* 23, -5.5dB */
- 0x20000080, /* 24, -6.0dB */
- 0x1e400079, /* 25, -6.5dB */
- 0x1c800072, /* 26, -7.0dB */
- 0x1b00006c, /* 27. -7.5dB */
- 0x19800066, /* 28, -8.0dB */
- 0x18000060, /* 29, -8.5dB */
- 0x16c0005b, /* 30, -9.0dB */
- 0x15800056, /* 31, -9.5dB */
- 0x14400051, /* 32, -10.0dB */
- 0x1300004c, /* 33, -10.5dB */
- 0x12000048, /* 34, -11.0dB */
- 0x11000044, /* 35, -11.5dB */
- 0x10000040, /* 36, -12.0dB */
- 0x0f00003c, /* 37, -12.5dB */
- 0x0e400039, /* 38, -13.0dB */
- 0x0d800036, /* 39, -13.5dB */
- 0x0cc00033, /* 40, -14.0dB */
- 0x0c000030, /* 41, -14.5dB */
- 0x0b40002d, /* 42, -15.0dB */
-};
-
-static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
- {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0dB */
- {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 1, -0.5dB */
- {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 2, -1.0dB */
- {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 3, -1.5dB */
- {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 4, -2.0dB */
- {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 5, -2.5dB */
- {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 6, -3.0dB */
- {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 7, -3.5dB */
- {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 8, -4.0dB */
- {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 9, -4.5dB */
- {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 10, -5.0dB */
- {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 11, -5.5dB */
- {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 12, -6.0dB */
- {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 13, -6.5dB */
- {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 14, -7.0dB */
- {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 15, -7.5dB */
- {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
- {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 17, -8.5dB */
- {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 18, -9.0dB */
- {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 19, -9.5dB */
- {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 20, -10.0dB */
- {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 21, -10.5dB */
- {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 22, -11.0dB */
- {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 23, -11.5dB */
- {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 24, -12.0dB */
- {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 25, -12.5dB */
- {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 26, -13.0dB */
- {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 27, -13.5dB */
- {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 28, -14.0dB */
- {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB */
- {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB */
- {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB */
- {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB */
-};
-
-static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
- {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0dB */
- {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 1, -0.5dB */
- {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 2, -1.0dB */
- {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 3, -1.5dB */
- {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 4, -2.0dB */
- {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 5, -2.5dB */
- {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 6, -3.0dB */
- {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 7, -3.5dB */
- {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 8, -4.0dB */
- {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 9, -4.5dB */
- {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 10, -5.0dB */
- {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 11, -5.5dB */
- {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 12, -6.0dB */
- {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 13, -6.5dB */
- {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 14, -7.0dB */
- {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 15, -7.5dB */
- {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
- {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 17, -8.5dB */
- {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 18, -9.0dB */
- {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 19, -9.5dB */
- {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 20, -10.0dB */
- {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 21, -10.5dB */
- {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 22, -11.0dB */
- {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 23, -11.5dB */
- {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 24, -12.0dB */
- {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 25, -12.5dB */
- {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 26, -13.0dB */
- {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 27, -13.5dB */
- {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 28, -14.0dB */
- {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 29, -14.5dB */
- {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 30, -15.0dB */
- {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 31, -15.5dB */
- {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */
-};
-
-static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
-{
- u32 ret_value;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
- unsigned long flag = 0;
-
- /* hold ofdm counter */
- rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 1); /* hold page C counter */
- rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 1); /*hold page D counter */
-
- ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD);
- falsealm_cnt->cnt_fast_fsync_fail = (ret_value & 0xffff);
- falsealm_cnt->cnt_sb_search_fail = ((ret_value & 0xffff0000) >> 16);
- ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
- falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
- ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
- falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
- falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
- ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
- falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
- falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
- falsealm_cnt->cnt_rate_illegal +
- falsealm_cnt->cnt_crc8_fail +
- falsealm_cnt->cnt_mcs_fail +
- falsealm_cnt->cnt_fast_fsync_fail +
- falsealm_cnt->cnt_sb_search_fail;
-
- if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) {
- /* hold cck counter */
- rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
- ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
- falsealm_cnt->cnt_cck_fail = ret_value;
- ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3);
- falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
- rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
- } else {
- falsealm_cnt->cnt_cck_fail = 0;
- }
-
- /* reset false alarm counter registers */
- falsealm_cnt->cnt_all = falsealm_cnt->cnt_fast_fsync_fail +
- falsealm_cnt->cnt_sb_search_fail +
- falsealm_cnt->cnt_parity_fail +
- falsealm_cnt->cnt_rate_illegal +
- falsealm_cnt->cnt_crc8_fail +
- falsealm_cnt->cnt_mcs_fail +
- falsealm_cnt->cnt_cck_fail;
-
- rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1);
- /* update ofdm counter */
- rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0);
- /* update page C counter */
- rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 0);
- /* update page D counter */
- rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 0);
- if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) {
- /* reset cck counter */
- rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
- rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
- /* enable cck counter */
- rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
- rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
- }
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
- "Cnt_Fast_Fsync_fail = %x, Cnt_SB_Search_fail = %x\n",
- falsealm_cnt->cnt_fast_fsync_fail,
- falsealm_cnt->cnt_sb_search_fail);
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
- "Cnt_Parity_Fail = %x, Cnt_Rate_Illegal = %x, Cnt_Crc8_fail = %x, Cnt_Mcs_fail = %x\n",
- falsealm_cnt->cnt_parity_fail,
- falsealm_cnt->cnt_rate_illegal,
- falsealm_cnt->cnt_crc8_fail,
- falsealm_cnt->cnt_mcs_fail);
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
- "Cnt_Ofdm_fail = %x, Cnt_Cck_fail = %x, Cnt_all = %x\n",
- falsealm_cnt->cnt_ofdm_fail,
- falsealm_cnt->cnt_cck_fail,
- falsealm_cnt->cnt_all);
-}
-
-static void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct dig_t *de_digtable = &rtlpriv->dm_digtable;
- struct rtl_mac *mac = rtl_mac(rtlpriv);
-
- /* Determine the minimum RSSI */
- if ((mac->link_state < MAC80211_LINKED) &&
- (rtlpriv->dm.UNDEC_SM_PWDB == 0)) {
- de_digtable->min_undec_pwdb_for_dm = 0;
- rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "Not connected to any\n");
- }
- if (mac->link_state >= MAC80211_LINKED) {
- if (mac->opmode == NL80211_IFTYPE_AP ||
- mac->opmode == NL80211_IFTYPE_ADHOC) {
- de_digtable->min_undec_pwdb_for_dm =
- rtlpriv->dm.UNDEC_SM_PWDB;
- rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
- rtlpriv->dm.UNDEC_SM_PWDB);
- } else {
- de_digtable->min_undec_pwdb_for_dm =
- rtlpriv->dm.undec_sm_pwdb;
- rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "STA Default Port PWDB = 0x%x\n",
- de_digtable->min_undec_pwdb_for_dm);
- }
- } else {
- de_digtable->min_undec_pwdb_for_dm = rtlpriv->dm.UNDEC_SM_PWDB;
- rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "AP Ext Port or disconnect PWDB = 0x%x\n",
- de_digtable->min_undec_pwdb_for_dm);
- }
-
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
- de_digtable->min_undec_pwdb_for_dm);
-}
-
-static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct dig_t *de_digtable = &rtlpriv->dm_digtable;
- unsigned long flag = 0;
-
- if (de_digtable->cursta_cstate == DIG_STA_CONNECT) {
- if (de_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
- if (de_digtable->min_undec_pwdb_for_dm <= 25)
- de_digtable->cur_cck_pd_state =
- CCK_PD_STAGE_LOWRSSI;
- else
- de_digtable->cur_cck_pd_state =
- CCK_PD_STAGE_HIGHRSSI;
- } else {
- if (de_digtable->min_undec_pwdb_for_dm <= 20)
- de_digtable->cur_cck_pd_state =
- CCK_PD_STAGE_LOWRSSI;
- else
- de_digtable->cur_cck_pd_state =
- CCK_PD_STAGE_HIGHRSSI;
- }
- } else {
- de_digtable->cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
- }
- if (de_digtable->pre_cck_pd_state != de_digtable->cur_cck_pd_state) {
- if (de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
- rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
- rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0x83);
- rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
- } else {
- rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
- rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
- rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
- }
- de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state;
- }
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n",
- de_digtable->cursta_cstate == DIG_STA_CONNECT ?
- "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT");
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n",
- de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
- "Low RSSI " : "High RSSI ");
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "is92d single phy =%x\n",
- IS_92D_SINGLEPHY(rtlpriv->rtlhal.version));
-
-}
-
-void rtl92d_dm_write_dig(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct dig_t *de_digtable = &rtlpriv->dm_digtable;
-
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
- "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n",
- de_digtable->cur_igvalue, de_digtable->pre_igvalue,
- de_digtable->back_val);
- if (de_digtable->dig_enable_flag == false) {
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "DIG is disabled\n");
- de_digtable->pre_igvalue = 0x17;
- return;
- }
- if (de_digtable->pre_igvalue != de_digtable->cur_igvalue) {
- rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
- de_digtable->cur_igvalue);
- rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
- de_digtable->cur_igvalue);
- de_digtable->pre_igvalue = de_digtable->cur_igvalue;
- }
-}
-
-static void rtl92d_early_mode_enabled(struct rtl_priv *rtlpriv)
-{
- struct dig_t *de_digtable = &rtlpriv->dm_digtable;
-
- if ((rtlpriv->mac80211.link_state >= MAC80211_LINKED) &&
- (rtlpriv->mac80211.vendor == PEER_CISCO)) {
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "IOT_PEER = CISCO\n");
- if (de_digtable->last_min_undec_pwdb_for_dm >= 50
- && de_digtable->min_undec_pwdb_for_dm < 50) {
- rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x00);
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
- "Early Mode Off\n");
- } else if (de_digtable->last_min_undec_pwdb_for_dm <= 55 &&
- de_digtable->min_undec_pwdb_for_dm > 55) {
- rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
- "Early Mode On\n");
- }
- } else if (!(rtl_read_byte(rtlpriv, REG_EARLY_MODE_CONTROL) & 0xf)) {
- rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "Early Mode On\n");
- }
-}
-
-static void rtl92d_dm_dig(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct dig_t *de_digtable = &rtlpriv->dm_digtable;
- u8 value_igi = de_digtable->cur_igvalue;
- struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
-
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "==>\n");
- if (rtlpriv->rtlhal.earlymode_enable) {
- rtl92d_early_mode_enabled(rtlpriv);
- de_digtable->last_min_undec_pwdb_for_dm =
- de_digtable->min_undec_pwdb_for_dm;
- }
- if (!rtlpriv->dm.dm_initialgain_enable)
- return;
-
- /* because we will send data pkt when scanning
- * this will cause some ap like gear-3700 wep TP
- * lower if we return here, this is the diff of
- * mac80211 driver vs ieee80211 driver */
- /* if (rtlpriv->mac80211.act_scanning)
- * return; */
-
- /* Not STA mode return tmp */
- if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
- return;
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n");
- /* Decide the current status and if modify initial gain or not */
- if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
- de_digtable->cursta_cstate = DIG_STA_CONNECT;
- else
- de_digtable->cursta_cstate = DIG_STA_DISCONNECT;
-
- /* adjust initial gain according to false alarm counter */
- if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0)
- value_igi--;
- else if (falsealm_cnt->cnt_all < DM_DIG_FA_TH1)
- value_igi += 0;
- else if (falsealm_cnt->cnt_all < DM_DIG_FA_TH2)
- value_igi++;
- else if (falsealm_cnt->cnt_all >= DM_DIG_FA_TH2)
- value_igi += 2;
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
- "dm_DIG() Before: large_fa_hit=%d, forbidden_igi=%x\n",
- de_digtable->large_fa_hit, de_digtable->forbidden_igi);
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
- "dm_DIG() Before: Recover_cnt=%d, rx_gain_min=%x\n",
- de_digtable->recover_cnt, de_digtable->rx_gain_min);
-
- /* deal with abnormally large false alarm */
- if (falsealm_cnt->cnt_all > 10000) {
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
- "dm_DIG(): Abnormally false alarm case\n");
-
- de_digtable->large_fa_hit++;
- if (de_digtable->forbidden_igi < de_digtable->cur_igvalue) {
- de_digtable->forbidden_igi = de_digtable->cur_igvalue;
- de_digtable->large_fa_hit = 1;
- }
- if (de_digtable->large_fa_hit >= 3) {
- if ((de_digtable->forbidden_igi + 1) > DM_DIG_MAX)
- de_digtable->rx_gain_min = DM_DIG_MAX;
- else
- de_digtable->rx_gain_min =
- (de_digtable->forbidden_igi + 1);
- de_digtable->recover_cnt = 3600; /* 3600=2hr */
- }
- } else {
- /* Recovery mechanism for IGI lower bound */
- if (de_digtable->recover_cnt != 0) {
- de_digtable->recover_cnt--;
- } else {
- if (de_digtable->large_fa_hit == 0) {
- if ((de_digtable->forbidden_igi - 1) <
- DM_DIG_FA_LOWER) {
- de_digtable->forbidden_igi =
- DM_DIG_FA_LOWER;
- de_digtable->rx_gain_min =
- DM_DIG_FA_LOWER;
-
- } else {
- de_digtable->forbidden_igi--;
- de_digtable->rx_gain_min =
- (de_digtable->forbidden_igi + 1);
- }
- } else if (de_digtable->large_fa_hit == 3) {
- de_digtable->large_fa_hit = 0;
- }
- }
- }
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
- "dm_DIG() After: large_fa_hit=%d, forbidden_igi=%x\n",
- de_digtable->large_fa_hit, de_digtable->forbidden_igi);
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
- "dm_DIG() After: recover_cnt=%d, rx_gain_min=%x\n",
- de_digtable->recover_cnt, de_digtable->rx_gain_min);
-
- if (value_igi > DM_DIG_MAX)
- value_igi = DM_DIG_MAX;
- else if (value_igi < de_digtable->rx_gain_min)
- value_igi = de_digtable->rx_gain_min;
- de_digtable->cur_igvalue = value_igi;
- rtl92d_dm_write_dig(hw);
- if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G)
- rtl92d_dm_cck_packet_detection_thresh(hw);
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "<<==\n");
-}
-
static void rtl92d_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -579,626 +140,7 @@ static void rtl92d_dm_pwdb_monitor(struct ieee80211_hw *hw)
}
}
-void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtlpriv->dm.current_turbo_edca = false;
- rtlpriv->dm.is_any_nonbepkts = false;
- rtlpriv->dm.is_cur_rdlstate = false;
-}
-
-static void rtl92d_dm_check_edca_turbo(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- const u32 edca_be_ul = 0x5ea42b;
- const u32 edca_be_dl = 0x5ea42b;
- static u64 last_txok_cnt;
- static u64 last_rxok_cnt;
- u64 cur_txok_cnt;
- u64 cur_rxok_cnt;
-
- if (mac->link_state != MAC80211_LINKED) {
- rtlpriv->dm.current_turbo_edca = false;
- goto exit;
- }
-
- if ((!rtlpriv->dm.is_any_nonbepkts) &&
- (!rtlpriv->dm.disable_framebursting)) {
- cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
- cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
- if (cur_rxok_cnt > 4 * cur_txok_cnt) {
- if (!rtlpriv->dm.is_cur_rdlstate ||
- !rtlpriv->dm.current_turbo_edca) {
- rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM,
- edca_be_dl);
- rtlpriv->dm.is_cur_rdlstate = true;
- }
- } else {
- if (rtlpriv->dm.is_cur_rdlstate ||
- !rtlpriv->dm.current_turbo_edca) {
- rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM,
- edca_be_ul);
- rtlpriv->dm.is_cur_rdlstate = false;
- }
- }
- rtlpriv->dm.current_turbo_edca = true;
- } else {
- if (rtlpriv->dm.current_turbo_edca) {
- u8 tmp = AC0_BE;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
- &tmp);
- rtlpriv->dm.current_turbo_edca = false;
- }
- }
-
-exit:
- rtlpriv->dm.is_any_nonbepkts = false;
- last_txok_cnt = rtlpriv->stats.txbytesunicast;
- last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
-}
-
-static void rtl92d_dm_rxgain_tracking_thermalmeter(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 index_mapping[RX_INDEX_MAPPING_NUM] = {
- 0x0f, 0x0f, 0x0d, 0x0c, 0x0b,
- 0x0a, 0x09, 0x08, 0x07, 0x06,
- 0x05, 0x04, 0x04, 0x03, 0x02
- };
- int i;
- u32 u4tmp;
-
- u4tmp = (index_mapping[(rtlpriv->efuse.eeprom_thermalmeter -
- rtlpriv->dm.thermalvalue_rxgain)]) << 12;
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "===> Rx Gain %x\n", u4tmp);
- for (i = RF90_PATH_A; i < rtlpriv->phy.num_total_rfpath; i++)
- rtl_set_rfreg(hw, i, 0x3C, RFREG_OFFSET_MASK,
- (rtlpriv->phy.reg_rf3c[i] & (~(0xF000))) | u4tmp);
-}
-
-static void rtl92d_bandtype_2_4G(struct ieee80211_hw *hw, long *temp_cckg,
- u8 *cck_index_old)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- int i;
- unsigned long flag = 0;
- long temp_cck;
- const u8 *cckswing;
-
- /* Query CCK default setting From 0xa24 */
- rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
- temp_cck = rtl_get_bbreg(hw, RCCK0_TXFILTER2,
- MASKDWORD) & MASKCCK;
- rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
- for (i = 0; i < CCK_TABLE_LENGTH; i++) {
- if (rtlpriv->dm.cck_inch14)
- cckswing = &cckswing_table_ch14[i][2];
- else
- cckswing = &cckswing_table_ch1ch13[i][2];
-
- if (temp_cck == le32_to_cpu(*((__le32 *)cckswing))) {
- *cck_index_old = (u8)i;
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch14 %d\n",
- RCCK0_TXFILTER2, temp_cck,
- *cck_index_old,
- rtlpriv->dm.cck_inch14);
- break;
- }
- }
- *temp_cckg = temp_cck;
-}
-
-static void rtl92d_bandtype_5G(struct rtl_hal *rtlhal, u8 *ofdm_index,
- bool *internal_pa, u8 thermalvalue, u8 delta,
- u8 rf, struct rtl_efuse *rtlefuse,
- struct rtl_priv *rtlpriv, struct rtl_phy *rtlphy,
- const u8 index_mapping[5][INDEX_MAPPING_NUM],
- const u8 index_mapping_pa[8][INDEX_MAPPING_NUM])
-{
- int i;
- u8 index;
- u8 offset = 0;
-
- for (i = 0; i < rf; i++) {
- if (rtlhal->macphymode == DUALMAC_DUALPHY &&
- rtlhal->interfaceindex == 1) /* MAC 1 5G */
- *internal_pa = rtlefuse->internal_pa_5g[1];
- else
- *internal_pa = rtlefuse->internal_pa_5g[i];
- if (*internal_pa) {
- if (rtlhal->interfaceindex == 1 || i == rf)
- offset = 4;
- else
- offset = 0;
- if (rtlphy->current_channel >= 100 &&
- rtlphy->current_channel <= 165)
- offset += 2;
- } else {
- if (rtlhal->interfaceindex == 1 || i == rf)
- offset = 2;
- else
- offset = 0;
- }
- if (thermalvalue > rtlefuse->eeprom_thermalmeter)
- offset++;
- if (*internal_pa) {
- if (delta > INDEX_MAPPING_NUM - 1)
- index = index_mapping_pa[offset]
- [INDEX_MAPPING_NUM - 1];
- else
- index =
- index_mapping_pa[offset][delta];
- } else {
- if (delta > INDEX_MAPPING_NUM - 1)
- index =
- index_mapping[offset][INDEX_MAPPING_NUM - 1];
- else
- index = index_mapping[offset][delta];
- }
- if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
- if (*internal_pa && thermalvalue > 0x12) {
- ofdm_index[i] = rtlpriv->dm.ofdm_index[i] -
- ((delta / 2) * 3 + (delta % 2));
- } else {
- ofdm_index[i] -= index;
- }
- } else {
- ofdm_index[i] += index;
- }
- }
-}
-
-static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
- struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u8 thermalvalue, delta, delta_lck, delta_iqk, delta_rxgain;
- u8 offset, thermalvalue_avg_count = 0;
- u32 thermalvalue_avg = 0;
- bool internal_pa = false;
- long ele_a = 0, ele_d, temp_cck, val_x, value32;
- long val_y, ele_c = 0;
- u8 ofdm_index[2];
- s8 cck_index = 0;
- u8 ofdm_index_old[2] = {0, 0};
- s8 cck_index_old = 0;
- u8 index;
- int i;
- bool is2t = IS_92D_SINGLEPHY(rtlhal->version);
- u8 ofdm_min_index = 6, ofdm_min_index_internal_pa = 3, rf;
- u8 indexforchannel =
- rtl92d_get_rightchnlplace_for_iqk(rtlphy->current_channel);
- static const u8 index_mapping[5][INDEX_MAPPING_NUM] = {
- /* 5G, path A/MAC 0, decrease power */
- {0, 1, 3, 6, 8, 9, 11, 13, 14, 16, 17, 18, 18},
- /* 5G, path A/MAC 0, increase power */
- {0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18},
- /* 5G, path B/MAC 1, decrease power */
- {0, 2, 3, 6, 8, 9, 11, 13, 14, 16, 17, 18, 18},
- /* 5G, path B/MAC 1, increase power */
- {0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18},
- /* 2.4G, for decreas power */
- {0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9, 10, 10},
- };
- static const u8 index_mapping_internal_pa[8][INDEX_MAPPING_NUM] = {
- /* 5G, path A/MAC 0, ch36-64, decrease power */
- {0, 1, 2, 4, 6, 7, 9, 11, 12, 14, 15, 16, 16},
- /* 5G, path A/MAC 0, ch36-64, increase power */
- {0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18},
- /* 5G, path A/MAC 0, ch100-165, decrease power */
- {0, 1, 2, 3, 5, 6, 8, 10, 11, 13, 14, 15, 15},
- /* 5G, path A/MAC 0, ch100-165, increase power */
- {0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18},
- /* 5G, path B/MAC 1, ch36-64, decrease power */
- {0, 1, 2, 4, 6, 7, 9, 11, 12, 14, 15, 16, 16},
- /* 5G, path B/MAC 1, ch36-64, increase power */
- {0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18},
- /* 5G, path B/MAC 1, ch100-165, decrease power */
- {0, 1, 2, 3, 5, 6, 8, 9, 10, 12, 13, 14, 14},
- /* 5G, path B/MAC 1, ch100-165, increase power */
- {0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18},
- };
-
- rtlpriv->dm.txpower_trackinginit = true;
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "\n");
- thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0xf800);
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
- thermalvalue,
- rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter);
- rtl92d_phy_ap_calibrate(hw, (thermalvalue -
- rtlefuse->eeprom_thermalmeter));
-
- if (!thermalvalue)
- goto exit;
-
- if (is2t)
- rf = 2;
- else
- rf = 1;
-
- if (rtlpriv->dm.thermalvalue && !rtlhal->reloadtxpowerindex)
- goto old_index_done;
-
- ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD) & MASKOFDM_D;
- for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
- if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
- ofdm_index_old[0] = (u8)i;
-
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
- ROFDM0_XATXIQIMBALANCE,
- ele_d, ofdm_index_old[0]);
- break;
- }
- }
- if (is2t) {
- ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
- MASKDWORD) & MASKOFDM_D;
- for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
- if (ele_d ==
- (ofdmswing_table[i] & MASKOFDM_D)) {
- ofdm_index_old[1] = (u8)i;
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING,
- DBG_LOUD,
- "Initial pathB ele_d reg 0x%x = 0x%lx, ofdm_index = 0x%x\n",
- ROFDM0_XBTXIQIMBALANCE, ele_d,
- ofdm_index_old[1]);
- break;
- }
- }
- }
- if (rtlhal->current_bandtype == BAND_ON_2_4G) {
- rtl92d_bandtype_2_4G(hw, &temp_cck, &cck_index_old);
- } else {
- temp_cck = 0x090e1317;
- cck_index_old = 12;
- }
-
- if (!rtlpriv->dm.thermalvalue) {
- rtlpriv->dm.thermalvalue = rtlefuse->eeprom_thermalmeter;
- rtlpriv->dm.thermalvalue_lck = thermalvalue;
- rtlpriv->dm.thermalvalue_iqk = thermalvalue;
- rtlpriv->dm.thermalvalue_rxgain = rtlefuse->eeprom_thermalmeter;
- for (i = 0; i < rf; i++)
- rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
- rtlpriv->dm.cck_index = cck_index_old;
- }
- if (rtlhal->reloadtxpowerindex) {
- for (i = 0; i < rf; i++)
- rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
- rtlpriv->dm.cck_index = cck_index_old;
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "reload ofdm index for band switch\n");
- }
-old_index_done:
- for (i = 0; i < rf; i++)
- ofdm_index[i] = rtlpriv->dm.ofdm_index[i];
-
- rtlpriv->dm.thermalvalue_avg
- [rtlpriv->dm.thermalvalue_avg_index] = thermalvalue;
- rtlpriv->dm.thermalvalue_avg_index++;
- if (rtlpriv->dm.thermalvalue_avg_index == AVG_THERMAL_NUM)
- rtlpriv->dm.thermalvalue_avg_index = 0;
- for (i = 0; i < AVG_THERMAL_NUM; i++) {
- if (rtlpriv->dm.thermalvalue_avg[i]) {
- thermalvalue_avg += rtlpriv->dm.thermalvalue_avg[i];
- thermalvalue_avg_count++;
- }
- }
- if (thermalvalue_avg_count)
- thermalvalue = (u8)(thermalvalue_avg / thermalvalue_avg_count);
- if (rtlhal->reloadtxpowerindex) {
- delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
- (thermalvalue - rtlefuse->eeprom_thermalmeter) :
- (rtlefuse->eeprom_thermalmeter - thermalvalue);
- rtlhal->reloadtxpowerindex = false;
- rtlpriv->dm.done_txpower = false;
- } else if (rtlpriv->dm.done_txpower) {
- delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
- (thermalvalue - rtlpriv->dm.thermalvalue) :
- (rtlpriv->dm.thermalvalue - thermalvalue);
- } else {
- delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
- (thermalvalue - rtlefuse->eeprom_thermalmeter) :
- (rtlefuse->eeprom_thermalmeter - thermalvalue);
- }
- delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
- (thermalvalue - rtlpriv->dm.thermalvalue_lck) :
- (rtlpriv->dm.thermalvalue_lck - thermalvalue);
- delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
- (thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
- (rtlpriv->dm.thermalvalue_iqk - thermalvalue);
- delta_rxgain =
- (thermalvalue > rtlpriv->dm.thermalvalue_rxgain) ?
- (thermalvalue - rtlpriv->dm.thermalvalue_rxgain) :
- (rtlpriv->dm.thermalvalue_rxgain - thermalvalue);
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
- thermalvalue, rtlpriv->dm.thermalvalue,
- rtlefuse->eeprom_thermalmeter, delta, delta_lck,
- delta_iqk);
- if (delta_lck > rtlefuse->delta_lck && rtlefuse->delta_lck != 0) {
- rtlpriv->dm.thermalvalue_lck = thermalvalue;
- rtl92d_phy_lc_calibrate(hw);
- }
-
- if (delta == 0 || !rtlpriv->dm.txpower_track_control)
- goto check_delta;
-
- rtlpriv->dm.done_txpower = true;
- delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
- (thermalvalue - rtlefuse->eeprom_thermalmeter) :
- (rtlefuse->eeprom_thermalmeter - thermalvalue);
- if (rtlhal->current_bandtype == BAND_ON_2_4G) {
- offset = 4;
- if (delta > INDEX_MAPPING_NUM - 1)
- index = index_mapping[offset][INDEX_MAPPING_NUM - 1];
- else
- index = index_mapping[offset][delta];
- if (thermalvalue > rtlpriv->dm.thermalvalue) {
- for (i = 0; i < rf; i++)
- ofdm_index[i] -= delta;
- cck_index -= delta;
- } else {
- for (i = 0; i < rf; i++)
- ofdm_index[i] += index;
- cck_index += index;
- }
- } else if (rtlhal->current_bandtype == BAND_ON_5G) {
- rtl92d_bandtype_5G(rtlhal, ofdm_index,
- &internal_pa, thermalvalue,
- delta, rf, rtlefuse, rtlpriv,
- rtlphy, index_mapping,
- index_mapping_internal_pa);
- }
- if (is2t) {
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "temp OFDM_A_index=0x%x, OFDM_B_index = 0x%x,cck_index=0x%x\n",
- rtlpriv->dm.ofdm_index[0],
- rtlpriv->dm.ofdm_index[1],
- rtlpriv->dm.cck_index);
- } else {
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "temp OFDM_A_index=0x%x,cck_index = 0x%x\n",
- rtlpriv->dm.ofdm_index[0],
- rtlpriv->dm.cck_index);
- }
- for (i = 0; i < rf; i++) {
- if (ofdm_index[i] > OFDM_TABLE_SIZE_92D - 1) {
- ofdm_index[i] = OFDM_TABLE_SIZE_92D - 1;
- } else if (internal_pa ||
- rtlhal->current_bandtype == BAND_ON_2_4G) {
- if (ofdm_index[i] < ofdm_min_index_internal_pa)
- ofdm_index[i] = ofdm_min_index_internal_pa;
- } else if (ofdm_index[i] < ofdm_min_index) {
- ofdm_index[i] = ofdm_min_index;
- }
- }
- if (rtlhal->current_bandtype == BAND_ON_2_4G) {
- if (cck_index > CCK_TABLE_SIZE - 1) {
- cck_index = CCK_TABLE_SIZE - 1;
- } else if (cck_index < 0) {
- cck_index = 0;
- }
- }
- if (is2t) {
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "new OFDM_A_index=0x%x, OFDM_B_index = 0x%x, cck_index=0x%x\n",
- ofdm_index[0], ofdm_index[1],
- cck_index);
- } else {
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "new OFDM_A_index=0x%x,cck_index = 0x%x\n",
- ofdm_index[0], cck_index);
- }
- ele_d = (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22;
- val_x = rtlphy->iqk_matrix[indexforchannel].value[0][0];
- val_y = rtlphy->iqk_matrix[indexforchannel].value[0][1];
- if (val_x != 0) {
- if ((val_x & 0x00000200) != 0)
- val_x = val_x | 0xFFFFFC00;
- ele_a = ((val_x * ele_d) >> 8) & 0x000003FF;
-
- /* new element C = element D x Y */
- if ((val_y & 0x00000200) != 0)
- val_y = val_y | 0xFFFFFC00;
- ele_c = ((val_y * ele_d) >> 8) & 0x000003FF;
-
- /* write new elements A, C, D to regC80 and
- * regC94, element B is always 0
- */
- value32 = (ele_d << 22) | ((ele_c & 0x3F) << 16) | ele_a;
- rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
- MASKDWORD, value32);
-
- value32 = (ele_c & 0x000003C0) >> 6;
- rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
- value32);
-
- value32 = ((val_x * ele_d) >> 7) & 0x01;
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24),
- value32);
-
- } else {
- rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
- MASKDWORD,
- ofdmswing_table[(u8)ofdm_index[0]]);
- rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
- 0x00);
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
- BIT(24), 0x00);
- }
-
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxPwrTracking for interface %d path A: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xe94 = 0x%lx 0xe9c = 0x%lx\n",
- rtlhal->interfaceindex,
- val_x, val_y, ele_a, ele_c, ele_d,
- val_x, val_y);
-
- if (cck_index >= CCK_TABLE_SIZE)
- cck_index = CCK_TABLE_SIZE - 1;
- if (cck_index < 0)
- cck_index = 0;
- if (rtlhal->current_bandtype == BAND_ON_2_4G) {
- /* Adjust CCK according to IQK result */
- if (!rtlpriv->dm.cck_inch14) {
- rtl_write_byte(rtlpriv, 0xa22,
- cckswing_table_ch1ch13[cck_index][0]);
- rtl_write_byte(rtlpriv, 0xa23,
- cckswing_table_ch1ch13[cck_index][1]);
- rtl_write_byte(rtlpriv, 0xa24,
- cckswing_table_ch1ch13[cck_index][2]);
- rtl_write_byte(rtlpriv, 0xa25,
- cckswing_table_ch1ch13[cck_index][3]);
- rtl_write_byte(rtlpriv, 0xa26,
- cckswing_table_ch1ch13[cck_index][4]);
- rtl_write_byte(rtlpriv, 0xa27,
- cckswing_table_ch1ch13[cck_index][5]);
- rtl_write_byte(rtlpriv, 0xa28,
- cckswing_table_ch1ch13[cck_index][6]);
- rtl_write_byte(rtlpriv, 0xa29,
- cckswing_table_ch1ch13[cck_index][7]);
- } else {
- rtl_write_byte(rtlpriv, 0xa22,
- cckswing_table_ch14[cck_index][0]);
- rtl_write_byte(rtlpriv, 0xa23,
- cckswing_table_ch14[cck_index][1]);
- rtl_write_byte(rtlpriv, 0xa24,
- cckswing_table_ch14[cck_index][2]);
- rtl_write_byte(rtlpriv, 0xa25,
- cckswing_table_ch14[cck_index][3]);
- rtl_write_byte(rtlpriv, 0xa26,
- cckswing_table_ch14[cck_index][4]);
- rtl_write_byte(rtlpriv, 0xa27,
- cckswing_table_ch14[cck_index][5]);
- rtl_write_byte(rtlpriv, 0xa28,
- cckswing_table_ch14[cck_index][6]);
- rtl_write_byte(rtlpriv, 0xa29,
- cckswing_table_ch14[cck_index][7]);
- }
- }
- if (is2t) {
- ele_d = (ofdmswing_table[ofdm_index[1]] & 0xFFC00000) >> 22;
- val_x = rtlphy->iqk_matrix[indexforchannel].value[0][4];
- val_y = rtlphy->iqk_matrix[indexforchannel].value[0][5];
- if (val_x != 0) {
- if ((val_x & 0x00000200) != 0)
- /* consider minus */
- val_x = val_x | 0xFFFFFC00;
- ele_a = ((val_x * ele_d) >> 8) & 0x000003FF;
- /* new element C = element D x Y */
- if ((val_y & 0x00000200) != 0)
- val_y = val_y | 0xFFFFFC00;
- ele_c = ((val_y * ele_d) >> 8) & 0x00003FF;
- /* write new elements A, C, D to regC88
- * and regC9C, element B is always 0
- */
- value32 = (ele_d << 22) | ((ele_c & 0x3F) << 16) | ele_a;
- rtl_set_bbreg(hw,
- ROFDM0_XBTXIQIMBALANCE,
- MASKDWORD, value32);
- value32 = (ele_c & 0x000003C0) >> 6;
- rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
- MASKH4BITS, value32);
- value32 = ((val_x * ele_d) >> 7) & 0x01;
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
- BIT(28), value32);
- } else {
- rtl_set_bbreg(hw,
- ROFDM0_XBTXIQIMBALANCE,
- MASKDWORD,
- ofdmswing_table[ofdm_index[1]]);
- rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
- MASKH4BITS, 0x00);
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
- BIT(28), 0x00);
- }
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxPwrTracking path B: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xeb4 = 0x%lx 0xebc = 0x%lx\n",
- val_x, val_y, ele_a, ele_c,
- ele_d, val_x, val_y);
- }
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n",
- rtl_get_bbreg(hw, 0xc80, MASKDWORD),
- rtl_get_bbreg(hw, 0xc94, MASKDWORD),
- rtl_get_rfreg(hw, RF90_PATH_A, 0x24,
- RFREG_OFFSET_MASK));
-
-check_delta:
- if (delta_iqk > rtlefuse->delta_iqk && rtlefuse->delta_iqk != 0) {
- rtl92d_phy_reset_iqk_result(hw);
- rtlpriv->dm.thermalvalue_iqk = thermalvalue;
- rtl92d_phy_iq_calibrate(hw);
- }
- if (delta_rxgain > 0 && rtlhal->current_bandtype == BAND_ON_5G &&
- thermalvalue <= rtlefuse->eeprom_thermalmeter) {
- rtlpriv->dm.thermalvalue_rxgain = thermalvalue;
- rtl92d_dm_rxgain_tracking_thermalmeter(hw);
- }
- if (rtlpriv->dm.txpower_track_control)
- rtlpriv->dm.thermalvalue = thermalvalue;
-
-exit:
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===\n");
-}
-
-static void rtl92d_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtlpriv->dm.txpower_tracking = true;
- rtlpriv->dm.txpower_trackinginit = false;
- rtlpriv->dm.txpower_track_control = true;
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pMgntInfo->txpower_tracking = %d\n",
- rtlpriv->dm.txpower_tracking);
-}
-
-void rtl92d_dm_check_txpower_tracking_thermal_meter(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- if (!rtlpriv->dm.txpower_tracking)
- return;
-
- if (!rtlpriv->dm.tm_trigger) {
- rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17) |
- BIT(16), 0x03);
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Trigger 92S Thermal Meter!!\n");
- rtlpriv->dm.tm_trigger = 1;
- return;
- } else {
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Schedule TxPowerTracking direct call!!\n");
- rtl92d_dm_txpower_tracking_callback_thermalmeter(hw);
- rtlpriv->dm.tm_trigger = 0;
- }
-}
-
-void rtl92d_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rate_adaptive *ra = &(rtlpriv->ra);
-
- ra->ratr_state = DM_RATR_STA_INIT;
- ra->pre_ratr_state = DM_RATR_STA_INIT;
- if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
- rtlpriv->dm.useramask = true;
- else
- rtlpriv->dm.useramask = false;
-}
-
-void rtl92d_dm_init(struct ieee80211_hw *hw)
+void rtl92de_dm_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1212,7 +154,7 @@ void rtl92d_dm_init(struct ieee80211_hw *hw)
rtl92d_dm_initialize_txpower_tracking(hw);
}
-void rtl92d_dm_watchdog(struct ieee80211_hw *hw)
+void rtl92de_dm_watchdog(struct ieee80211_hw *hw)
{
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
bool fw_current_inpsmode = false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h
index 939cc45bfebd..beade227b442 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h
@@ -4,94 +4,7 @@
#ifndef __RTL92C_DM_H__
#define __RTL92C_DM_H__
-#define HAL_DM_DIG_DISABLE BIT(0)
-#define HAL_DM_HIPWR_DISABLE BIT(1)
-
-#define OFDM_TABLE_LENGTH 37
-#define OFDM_TABLE_SIZE_92D 43
-#define CCK_TABLE_LENGTH 33
-
-#define CCK_TABLE_SIZE 33
-
-#define BW_AUTO_SWITCH_HIGH_LOW 25
-#define BW_AUTO_SWITCH_LOW_HIGH 30
-
-#define DM_DIG_FA_UPPER 0x32
-#define DM_DIG_FA_LOWER 0x20
-#define DM_DIG_FA_TH0 0x100
-#define DM_DIG_FA_TH1 0x400
-#define DM_DIG_FA_TH2 0x600
-
-#define RXPATHSELECTION_SS_TH_LOW 30
-#define RXPATHSELECTION_DIFF_TH 18
-
-#define DM_RATR_STA_INIT 0
-#define DM_RATR_STA_HIGH 1
-#define DM_RATR_STA_MIDDLE 2
-#define DM_RATR_STA_LOW 3
-
-#define CTS2SELF_THVAL 30
-#define REGC38_TH 20
-
-#define WAIOTTHVAL 25
-
-#define TXHIGHPWRLEVEL_NORMAL 0
-#define TXHIGHPWRLEVEL_LEVEL1 1
-#define TXHIGHPWRLEVEL_LEVEL2 2
-#define TXHIGHPWRLEVEL_BT1 3
-#define TXHIGHPWRLEVEL_BT2 4
-
-#define DM_TYPE_BYFW 0
-#define DM_TYPE_BYDRIVER 1
-
-#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
-#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
-#define INDEX_MAPPING_NUM 13
-
-struct swat {
- u8 failure_cnt;
- u8 try_flag;
- u8 stop_trying;
- long pre_rssi;
- long trying_threshold;
- u8 cur_antenna;
- u8 pre_antenna;
-};
-
-enum tag_dynamic_init_gain_operation_type_definition {
- DIG_TYPE_THRESH_HIGH = 0,
- DIG_TYPE_THRESH_LOW = 1,
- DIG_TYPE_BACKOFF = 2,
- DIG_TYPE_RX_GAIN_MIN = 3,
- DIG_TYPE_RX_GAIN_MAX = 4,
- DIG_TYPE_ENABLE = 5,
- DIG_TYPE_DISABLE = 6,
- DIG_OP_TYPE_MAX
-};
-
-enum dm_1r_cca {
- CCA_1R = 0,
- CCA_2R = 1,
- CCA_MAX = 2,
-};
-
-enum dm_rf {
- RF_SAVE = 0,
- RF_NORMAL = 1,
- RF_MAX = 2,
-};
-
-enum dm_sw_ant_switch {
- ANS_ANTENNA_B = 1,
- ANS_ANTENNA_A = 2,
- ANS_ANTENNA_MAX = 3,
-};
-
-void rtl92d_dm_init(struct ieee80211_hw *hw);
-void rtl92d_dm_watchdog(struct ieee80211_hw *hw);
-void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw);
-void rtl92d_dm_write_dig(struct ieee80211_hw *hw);
-void rtl92d_dm_check_txpower_tracking_thermal_meter(struct ieee80211_hw *hw);
-void rtl92d_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
+void rtl92de_dm_init(struct ieee80211_hw *hw);
+void rtl92de_dm_watchdog(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
index e1fb29962801..c8444a72ff69 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
@@ -5,157 +5,12 @@
#include "../pci.h"
#include "../base.h"
#include "../efuse.h"
-#include "reg.h"
-#include "def.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/fw_common.h"
#include "fw.h"
#include "sw.h"
-static bool _rtl92d_is_fw_downloaded(struct rtl_priv *rtlpriv)
-{
- return (rtl_read_dword(rtlpriv, REG_MCUFWDL) & MCUFWDL_RDY) ?
- true : false;
-}
-
-static void _rtl92d_enable_fw_download(struct ieee80211_hw *hw, bool enable)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 tmp;
-
- if (enable) {
- tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp | 0x04);
- tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
- rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01);
- tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2);
- rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7);
- } else {
- tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
- rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe);
- /* Reserved for fw extension.
- * 0x81[7] is used for mac0 status ,
- * so don't write this reg here
- * rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00);*/
- }
-}
-
-static void _rtl92d_write_fw(struct ieee80211_hw *hw,
- enum version_8192d version, u8 *buffer, u32 size)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 *bufferptr = buffer;
- u32 pagenums, remainsize;
- u32 page, offset;
-
- rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE)
- rtl_fill_dummy(bufferptr, &size);
- pagenums = size / FW_8192D_PAGE_SIZE;
- remainsize = size % FW_8192D_PAGE_SIZE;
- if (pagenums > 8)
- pr_err("Page numbers should not greater then 8\n");
- for (page = 0; page < pagenums; page++) {
- offset = page * FW_8192D_PAGE_SIZE;
- rtl_fw_page_write(hw, page, (bufferptr + offset),
- FW_8192D_PAGE_SIZE);
- }
- if (remainsize) {
- offset = pagenums * FW_8192D_PAGE_SIZE;
- page = pagenums;
- rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize);
- }
-}
-
-static int _rtl92d_fw_free_to_go(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 counter = 0;
- u32 value32;
-
- do {
- value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
- } while ((counter++ < FW_8192D_POLLING_TIMEOUT_COUNT) &&
- (!(value32 & FWDL_CHKSUM_RPT)));
- if (counter >= FW_8192D_POLLING_TIMEOUT_COUNT) {
- pr_err("chksum report fail! REG_MCUFWDL:0x%08x\n",
- value32);
- return -EIO;
- }
- value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
- value32 |= MCUFWDL_RDY;
- rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
- return 0;
-}
-
-void rtl92d_firmware_selfreset(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 u1b_tmp;
- u8 delay = 100;
-
- /* Set (REG_HMETFR + 3) to 0x20 is reset 8051 */
- rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20);
- u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
- while (u1b_tmp & BIT(2)) {
- delay--;
- if (delay == 0)
- break;
- udelay(50);
- u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
- }
- WARN_ONCE((delay <= 0), "rtl8192de: 8051 reset failed!\n");
- rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
- "=====> 8051 reset success (%d)\n", delay);
-}
-
-static int _rtl92d_fw_init(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u32 counter;
-
- rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG, "FW already have download\n");
- /* polling for FW ready */
- counter = 0;
- do {
- if (rtlhal->interfaceindex == 0) {
- if (rtl_read_byte(rtlpriv, FW_MAC0_READY) &
- MAC0_READY) {
- rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
- "Polling FW ready success!! REG_MCUFWDL: 0x%x\n",
- rtl_read_byte(rtlpriv,
- FW_MAC0_READY));
- return 0;
- }
- udelay(5);
- } else {
- if (rtl_read_byte(rtlpriv, FW_MAC1_READY) &
- MAC1_READY) {
- rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
- "Polling FW ready success!! REG_MCUFWDL: 0x%x\n",
- rtl_read_byte(rtlpriv,
- FW_MAC1_READY));
- return 0;
- }
- udelay(5);
- }
- } while (counter++ < POLLING_READY_TIMEOUT_COUNT);
-
- if (rtlhal->interfaceindex == 0) {
- rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
- "Polling FW ready fail!! MAC0 FW init not ready: 0x%x\n",
- rtl_read_byte(rtlpriv, FW_MAC0_READY));
- } else {
- rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
- "Polling FW ready fail!! MAC1 FW init not ready: 0x%x\n",
- rtl_read_byte(rtlpriv, FW_MAC1_READY));
- }
- rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
- "Polling FW ready fail!! REG_MCUFWDL:0x%08x\n",
- rtl_read_dword(rtlpriv, REG_MCUFWDL));
- return -1;
-}
-
int rtl92d_download_fw(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -189,7 +44,7 @@ int rtl92d_download_fw(struct ieee80211_hw *hw)
}
spin_lock_irqsave(&globalmutex_for_fwdownload, flags);
- fw_downloaded = _rtl92d_is_fw_downloaded(rtlpriv);
+ fw_downloaded = rtl92d_is_fw_downloaded(rtlpriv);
if ((rtl_read_byte(rtlpriv, 0x1f) & BIT(5)) == BIT(5))
fwdl_in_process = true;
else
@@ -202,7 +57,7 @@ int rtl92d_download_fw(struct ieee80211_hw *hw)
for (count = 0; count < 5000; count++) {
udelay(500);
spin_lock_irqsave(&globalmutex_for_fwdownload, flags);
- fw_downloaded = _rtl92d_is_fw_downloaded(rtlpriv);
+ fw_downloaded = rtl92d_is_fw_downloaded(rtlpriv);
if ((rtl_read_byte(rtlpriv, 0x1f) & BIT(5)) == BIT(5))
fwdl_in_process = true;
else
@@ -237,11 +92,11 @@ int rtl92d_download_fw(struct ieee80211_hw *hw)
rtl92d_firmware_selfreset(hw);
rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
}
- _rtl92d_enable_fw_download(hw, true);
- _rtl92d_write_fw(hw, version, pfwdata, fwsize);
- _rtl92d_enable_fw_download(hw, false);
+ rtl92d_enable_fw_download(hw, true);
+ rtl92d_write_fw(hw, version, pfwdata, fwsize);
+ rtl92d_enable_fw_download(hw, false);
spin_lock_irqsave(&globalmutex_for_fwdownload, flags);
- err = _rtl92d_fw_free_to_go(hw);
+ err = rtl92d_fw_free_to_go(hw);
/* download fw over,clear 0x1f[5] */
value = rtl_read_byte(rtlpriv, 0x1f);
value &= (~BIT(5));
@@ -250,207 +105,10 @@ int rtl92d_download_fw(struct ieee80211_hw *hw)
if (err)
pr_err("fw is not ready to run!\n");
exit:
- err = _rtl92d_fw_init(hw);
+ err = rtl92d_fw_init(hw);
return err;
}
-static bool _rtl92d_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 val_hmetfr;
- bool result = false;
-
- val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR);
- if (((val_hmetfr >> boxnum) & BIT(0)) == 0)
- result = true;
- return result;
-}
-
-static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
- u8 element_id, u32 cmd_len, u8 *cmdbuffer)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- u8 boxnum;
- u16 box_reg = 0, box_extreg = 0;
- u8 u1b_tmp;
- bool isfw_read = false;
- u8 buf_index = 0;
- bool bwrite_success = false;
- u8 wait_h2c_limmit = 100;
- u8 wait_writeh2c_limmit = 100;
- u8 boxcontent[4], boxextcontent[2];
- u32 h2c_waitcounter = 0;
- unsigned long flag;
- u8 idx;
-
- if (ppsc->rfpwr_state == ERFOFF || ppsc->inactive_pwrstate == ERFOFF) {
- rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
- "Return as RF is off!!!\n");
- return;
- }
- rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
- while (true) {
- spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
- if (rtlhal->h2c_setinprogress) {
- rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
- "H2C set in progress! Wait to set..element_id(%d)\n",
- element_id);
-
- while (rtlhal->h2c_setinprogress) {
- spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
- flag);
- h2c_waitcounter++;
- rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
- "Wait 100 us (%d times)...\n",
- h2c_waitcounter);
- udelay(100);
-
- if (h2c_waitcounter > 1000)
- return;
-
- spin_lock_irqsave(&rtlpriv->locks.h2c_lock,
- flag);
- }
- spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
- } else {
- rtlhal->h2c_setinprogress = true;
- spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
- break;
- }
- }
- while (!bwrite_success) {
- wait_writeh2c_limmit--;
- if (wait_writeh2c_limmit == 0) {
- pr_err("Write H2C fail because no trigger for FW INT!\n");
- break;
- }
- boxnum = rtlhal->last_hmeboxnum;
- switch (boxnum) {
- case 0:
- box_reg = REG_HMEBOX_0;
- box_extreg = REG_HMEBOX_EXT_0;
- break;
- case 1:
- box_reg = REG_HMEBOX_1;
- box_extreg = REG_HMEBOX_EXT_1;
- break;
- case 2:
- box_reg = REG_HMEBOX_2;
- box_extreg = REG_HMEBOX_EXT_2;
- break;
- case 3:
- box_reg = REG_HMEBOX_3;
- box_extreg = REG_HMEBOX_EXT_3;
- break;
- default:
- pr_err("switch case %#x not processed\n",
- boxnum);
- break;
- }
- isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum);
- while (!isfw_read) {
- wait_h2c_limmit--;
- if (wait_h2c_limmit == 0) {
- rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting too long for FW read clear HMEBox(%d)!\n",
- boxnum);
- break;
- }
- udelay(10);
- isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum);
- u1b_tmp = rtl_read_byte(rtlpriv, 0x1BF);
- rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting for FW read clear HMEBox(%d)!!! 0x1BF = %2x\n",
- boxnum, u1b_tmp);
- }
- if (!isfw_read) {
- rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
- boxnum);
- break;
- }
- memset(boxcontent, 0, sizeof(boxcontent));
- memset(boxextcontent, 0, sizeof(boxextcontent));
- boxcontent[0] = element_id;
- rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write element_id box_reg(%4x) = %2x\n",
- box_reg, element_id);
- switch (cmd_len) {
- case 1:
- boxcontent[0] &= ~(BIT(7));
- memcpy(boxcontent + 1, cmdbuffer + buf_index, 1);
- for (idx = 0; idx < 4; idx++)
- rtl_write_byte(rtlpriv, box_reg + idx,
- boxcontent[idx]);
- break;
- case 2:
- boxcontent[0] &= ~(BIT(7));
- memcpy(boxcontent + 1, cmdbuffer + buf_index, 2);
- for (idx = 0; idx < 4; idx++)
- rtl_write_byte(rtlpriv, box_reg + idx,
- boxcontent[idx]);
- break;
- case 3:
- boxcontent[0] &= ~(BIT(7));
- memcpy(boxcontent + 1, cmdbuffer + buf_index, 3);
- for (idx = 0; idx < 4; idx++)
- rtl_write_byte(rtlpriv, box_reg + idx,
- boxcontent[idx]);
- break;
- case 4:
- boxcontent[0] |= (BIT(7));
- memcpy(boxextcontent, cmdbuffer + buf_index, 2);
- memcpy(boxcontent + 1, cmdbuffer + buf_index + 2, 2);
- for (idx = 0; idx < 2; idx++)
- rtl_write_byte(rtlpriv, box_extreg + idx,
- boxextcontent[idx]);
- for (idx = 0; idx < 4; idx++)
- rtl_write_byte(rtlpriv, box_reg + idx,
- boxcontent[idx]);
- break;
- case 5:
- boxcontent[0] |= (BIT(7));
- memcpy(boxextcontent, cmdbuffer + buf_index, 2);
- memcpy(boxcontent + 1, cmdbuffer + buf_index + 2, 3);
- for (idx = 0; idx < 2; idx++)
- rtl_write_byte(rtlpriv, box_extreg + idx,
- boxextcontent[idx]);
- for (idx = 0; idx < 4; idx++)
- rtl_write_byte(rtlpriv, box_reg + idx,
- boxcontent[idx]);
- break;
- default:
- pr_err("switch case %#x not processed\n",
- cmd_len);
- break;
- }
- bwrite_success = true;
- rtlhal->last_hmeboxnum = boxnum + 1;
- if (rtlhal->last_hmeboxnum == 4)
- rtlhal->last_hmeboxnum = 0;
- rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
- "pHalData->last_hmeboxnum = %d\n",
- rtlhal->last_hmeboxnum);
- }
- spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
- rtlhal->h2c_setinprogress = false;
- spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
- rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
-}
-
-void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw,
- u8 element_id, u32 cmd_len, u8 *cmdbuffer)
-{
- u32 tmp_cmdbuf[2];
-
- memset(tmp_cmdbuf, 0, 8);
- memcpy(tmp_cmdbuf, cmdbuffer, cmd_len);
- _rtl92d_fill_h2c_command(hw, element_id, cmd_len, (u8 *)&tmp_cmdbuf);
- return;
-}
-
static bool _rtl92d_cmd_send_packet(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
@@ -599,7 +257,7 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
struct sk_buff *skb = NULL;
u32 totalpacketlen;
bool rtstatus;
- u8 u1rsvdpageloc[3] = { 0 };
+ u8 u1rsvdpageloc[3] = { PROBERSP_PG, PSPOLL_PG, NULL_PG };
bool dlok = false;
u8 *beacon;
u8 *p_pspoll;
@@ -618,7 +276,6 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000));
SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid);
SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr);
- SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1rsvdpageloc, PSPOLL_PG);
/*--------------------------------------------------------
(3) null data
---------------------------------------------------------*/
@@ -626,7 +283,6 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid);
SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr);
SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid);
- SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1rsvdpageloc, NULL_PG);
/*---------------------------------------------------------
(4) probe response
----------------------------------------------------------*/
@@ -634,7 +290,6 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid);
SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr);
SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid);
- SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1rsvdpageloc, PROBERSP_PG);
totalpacketlen = TOTAL_RESERVED_PKT_LEN;
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
"rtl92d_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL",
@@ -663,11 +318,3 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
"Set RSVD page location to Fw FAIL!!!!!!\n");
}
-
-void rtl92d_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
-{
- u8 u1_joinbssrpt_parm[1] = {0};
-
- SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus);
- rtl92d_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm);
-}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
index 7f0a17c1a9ea..9e1385ac17b1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
@@ -4,44 +4,7 @@
#ifndef __RTL92D__FW__H__
#define __RTL92D__FW__H__
-#define FW_8192D_START_ADDRESS 0x1000
-#define FW_8192D_PAGE_SIZE 4096
-#define FW_8192D_POLLING_TIMEOUT_COUNT 1000
-
-#define IS_FW_HEADER_EXIST(_pfwhdr) \
- ((GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFF0) == 0x92C0 || \
- (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFF0) == 0x88C0 || \
- (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D0 || \
- (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D1 || \
- (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D2 || \
- (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D3)
-
-/* Firmware Header(8-byte alinment required) */
-/* --- LONG WORD 0 ---- */
-#define GET_FIRMWARE_HDR_SIGNATURE(__fwhdr) \
- le32_get_bits(*(__le32 *)__fwhdr, GENMASK(15, 0))
-#define GET_FIRMWARE_HDR_VERSION(__fwhdr) \
- le32_get_bits(*(__le32 *)(__fwhdr + 4), GENMASK(15, 0))
-#define GET_FIRMWARE_HDR_SUB_VER(__fwhdr) \
- le32_get_bits(*(__le32 *)(__fwhdr + 4), GENMASK(23, 16))
-
-#define pagenum_128(_len) \
- (u32)(((_len) >> 7) + ((_len) & 0x7F ? 1 : 0))
-
-#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
- *(u8 *)__ph2ccmd = __val;
-#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
- *(u8 *)__ph2ccmd = __val;
-#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
- *(u8 *)(__ph2ccmd + 1) = __val;
-#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
- *(u8 *)(__ph2ccmd + 2) = __val;
-
int rtl92d_download_fw(struct ieee80211_hw *hw);
-void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
- u32 cmd_len, u8 *p_cmdbuffer);
-void rtl92d_firmware_selfreset(struct ieee80211_hw *hw);
void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
-void rtl92d_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index 4ba42f6be3f2..73b81e60cfa9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -8,8 +8,12 @@
#include "../cam.h"
#include "../ps.h"
#include "../pci.h"
-#include "reg.h"
-#include "def.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/dm_common.h"
+#include "../rtl8192d/fw_common.h"
+#include "../rtl8192d/hw_common.h"
+#include "../rtl8192d/phy_common.h"
#include "phy.h"
#include "dm.h"
#include "fw.h"
@@ -50,34 +54,6 @@ static void _rtl92de_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val);
}
-static void _rtl92de_stop_tx_beacon(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 tmp1byte;
-
- tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
- rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte & (~BIT(6)));
- rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xff);
- rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64);
- tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
- tmp1byte &= ~(BIT(0));
- rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
-}
-
-static void _rtl92de_resume_tx_beacon(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 tmp1byte;
-
- tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
- rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte | BIT(6));
- rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0x0a);
- rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
- tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
- tmp1byte |= BIT(0);
- rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
-}
-
static void _rtl92de_enable_bcn_sub_func(struct ieee80211_hw *hw)
{
_rtl92de_set_bcn_ctrl_reg(hw, 0, BIT(1));
@@ -90,58 +66,14 @@ static void _rtl92de_disable_bcn_sub_func(struct ieee80211_hw *hw)
void rtl92de_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
switch (variable) {
case HW_VAR_RCR:
*((u32 *) (val)) = rtlpci->receive_config;
break;
- case HW_VAR_RF_STATE:
- *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
- break;
- case HW_VAR_FWLPS_RF_ON:{
- enum rf_pwrstate rfstate;
- u32 val_rcr;
-
- rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE,
- (u8 *)(&rfstate));
- if (rfstate == ERFOFF) {
- *((bool *) (val)) = true;
- } else {
- val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
- val_rcr &= 0x00070000;
- if (val_rcr)
- *((bool *) (val)) = false;
- else
- *((bool *) (val)) = true;
- }
- break;
- }
- case HW_VAR_FW_PSMODE_STATUS:
- *((bool *) (val)) = ppsc->fw_current_inpsmode;
- break;
- case HW_VAR_CORRECT_TSF:{
- u64 tsf;
- u32 *ptsf_low = (u32 *)&tsf;
- u32 *ptsf_high = ((u32 *)&tsf) + 1;
-
- *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4));
- *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
- *((u64 *) (val)) = tsf;
- break;
- }
- case HW_VAR_INT_MIGRATION:
- *((bool *)(val)) = rtlpriv->dm.interrupt_migration;
- break;
- case HW_VAR_INT_AC:
- *((bool *)(val)) = rtlpriv->dm.disable_tx_int;
- break;
- case HAL_DEF_WOWLAN:
- break;
default:
- pr_err("switch case %#x not processed\n", variable);
+ rtl92d_get_hw_reg(hw, variable, val);
break;
}
}
@@ -151,141 +83,8 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- u8 idx;
switch (variable) {
- case HW_VAR_ETHER_ADDR:
- for (idx = 0; idx < ETH_ALEN; idx++) {
- rtl_write_byte(rtlpriv, (REG_MACID + idx),
- val[idx]);
- }
- break;
- case HW_VAR_BASIC_RATE: {
- u16 rate_cfg = ((u16 *) val)[0];
- u8 rate_index = 0;
-
- rate_cfg = rate_cfg & 0x15f;
- if (mac->vendor == PEER_CISCO &&
- ((rate_cfg & 0x150) == 0))
- rate_cfg |= 0x01;
- rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
- rtl_write_byte(rtlpriv, REG_RRSR + 1,
- (rate_cfg >> 8) & 0xff);
- while (rate_cfg > 0x1) {
- rate_cfg = (rate_cfg >> 1);
- rate_index++;
- }
- if (rtlhal->fw_version > 0xe)
- rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL,
- rate_index);
- break;
- }
- case HW_VAR_BSSID:
- for (idx = 0; idx < ETH_ALEN; idx++) {
- rtl_write_byte(rtlpriv, (REG_BSSID + idx),
- val[idx]);
- }
- break;
- case HW_VAR_SIFS:
- rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]);
- rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]);
- rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]);
- rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
- if (!mac->ht_enable)
- rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
- 0x0e0e);
- else
- rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
- *((u16 *) val));
- break;
- case HW_VAR_SLOT_TIME: {
- u8 e_aci;
-
- rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
- "HW_VAR_SLOT_TIME %x\n", val[0]);
- rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
- for (e_aci = 0; e_aci < AC_MAX; e_aci++)
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_AC_PARAM,
- (&e_aci));
- break;
- }
- case HW_VAR_ACK_PREAMBLE: {
- u8 reg_tmp;
- u8 short_preamble = (bool) (*val);
-
- reg_tmp = (mac->cur_40_prime_sc) << 5;
- if (short_preamble)
- reg_tmp |= 0x80;
- rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_tmp);
- break;
- }
- case HW_VAR_AMPDU_MIN_SPACE: {
- u8 min_spacing_to_set;
-
- min_spacing_to_set = *val;
- if (min_spacing_to_set <= 7) {
- mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) |
- min_spacing_to_set);
- *val = min_spacing_to_set;
- rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
- mac->min_space_cfg);
- rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
- mac->min_space_cfg);
- }
- break;
- }
- case HW_VAR_SHORTGI_DENSITY: {
- u8 density_to_set;
-
- density_to_set = *val;
- mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg;
- mac->min_space_cfg |= (density_to_set << 3);
- rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
- mac->min_space_cfg);
- rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
- mac->min_space_cfg);
- break;
- }
- case HW_VAR_AMPDU_FACTOR: {
- u8 factor_toset;
- u32 regtoset;
- u8 *ptmp_byte = NULL;
- u8 index;
-
- if (rtlhal->macphymode == DUALMAC_DUALPHY)
- regtoset = 0xb9726641;
- else if (rtlhal->macphymode == DUALMAC_SINGLEPHY)
- regtoset = 0x66626641;
- else
- regtoset = 0xb972a841;
- factor_toset = *val;
- if (factor_toset <= 3) {
- factor_toset = (1 << (factor_toset + 2));
- if (factor_toset > 0xf)
- factor_toset = 0xf;
- for (index = 0; index < 4; index++) {
- ptmp_byte = (u8 *)(&regtoset) + index;
- if ((*ptmp_byte & 0xf0) >
- (factor_toset << 4))
- *ptmp_byte = (*ptmp_byte & 0x0f)
- | (factor_toset << 4);
- if ((*ptmp_byte & 0x0f) > factor_toset)
- *ptmp_byte = (*ptmp_byte & 0xf0)
- | (factor_toset);
- }
- rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, regtoset);
- rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_FACTOR: %#x\n",
- factor_toset);
- }
- break;
- }
case HW_VAR_AC_PARAM: {
u8 e_aci = *val;
rtl92d_dm_init_edca_turbo(hw);
@@ -346,37 +145,6 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl_write_dword(rtlpriv, REG_RCR, ((u32 *) (val))[0]);
rtlpci->receive_config = ((u32 *) (val))[0];
break;
- case HW_VAR_RETRY_LIMIT: {
- u8 retry_limit = val[0];
-
- rtl_write_word(rtlpriv, REG_RL,
- retry_limit << RETRY_LIMIT_SHORT_SHIFT |
- retry_limit << RETRY_LIMIT_LONG_SHIFT);
- break;
- }
- case HW_VAR_DUAL_TSF_RST:
- rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
- break;
- case HW_VAR_EFUSE_BYTES:
- rtlefuse->efuse_usedbytes = *((u16 *) val);
- break;
- case HW_VAR_EFUSE_USAGE:
- rtlefuse->efuse_usedpercentage = *val;
- break;
- case HW_VAR_IO_CMD:
- rtl92d_phy_set_io_cmd(hw, (*(enum io_type *)val));
- break;
- case HW_VAR_WPA_CONFIG:
- rtl_write_byte(rtlpriv, REG_SECCFG, *val);
- break;
- case HW_VAR_SET_RPWM:
- rtl92d_fill_h2c_cmd(hw, H2C_PWRM, 1, (val));
- break;
- case HW_VAR_H2C_FW_PWRMODE:
- break;
- case HW_VAR_FW_PSMODE_STATUS:
- ppsc->fw_current_inpsmode = *((bool *) val);
- break;
case HW_VAR_H2C_FW_JOINBSSRPT: {
u8 mstatus = (*val);
u8 tmp_regcr, tmp_reg422;
@@ -409,19 +177,11 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl92d_set_fw_joinbss_report_cmd(hw, (*val));
break;
}
- case HW_VAR_AID: {
- u16 u2btmp;
- u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
- u2btmp &= 0xC000;
- rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp |
- mac->assoc_id));
- break;
- }
case HW_VAR_CORRECT_TSF: {
u8 btype_ibss = val[0];
if (btype_ibss)
- _rtl92de_stop_tx_beacon(hw);
+ rtl92de_stop_tx_beacon(hw);
_rtl92de_set_bcn_ctrl_reg(hw, 0, BIT(3));
rtl_write_dword(rtlpriv, REG_TSFTR,
(u32) (mac->tsf & 0xffffffff));
@@ -429,7 +189,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
(u32) ((mac->tsf >> 32) & 0xffffffff));
_rtl92de_set_bcn_ctrl_reg(hw, BIT(3), 0);
if (btype_ibss)
- _rtl92de_resume_tx_beacon(hw);
+ rtl92de_resume_tx_beacon(hw);
break;
}
@@ -472,34 +232,11 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
default:
- pr_err("switch case %#x not processed\n", variable);
+ rtl92d_set_hw_reg(hw, variable, val);
break;
}
}
-static bool _rtl92de_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- bool status = true;
- long count = 0;
- u32 value = _LLT_INIT_ADDR(address) |
- _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS);
-
- rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
- do {
- value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
- if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
- break;
- if (count > POLLING_LLT_THRESHOLD) {
- pr_err("Failed to polling write LLT done at address %d!\n",
- address);
- status = false;
- break;
- }
- } while (++count);
- return status;
-}
-
static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -558,13 +295,13 @@ static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw)
/* 18. LLT_table_init(Adapter); */
for (i = 0; i < (txpktbuf_bndy - 1); i++) {
- status = _rtl92de_llt_write(hw, i, i + 1);
+ status = rtl92de_llt_write(hw, i, i + 1);
if (!status)
return status;
}
/* end of list */
- status = _rtl92de_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
+ status = rtl92de_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
if (!status)
return status;
@@ -573,13 +310,13 @@ static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw)
/* config this MAC as two MAC transfer. */
/* Otherwise used as local loopback buffer. */
for (i = txpktbuf_bndy; i < maxpage; i++) {
- status = _rtl92de_llt_write(hw, i, (i + 1));
+ status = rtl92de_llt_write(hw, i, (i + 1));
if (!status)
return status;
}
/* Let last entry point to the start entry of ring buffer */
- status = _rtl92de_llt_write(hw, maxpage, txpktbuf_bndy);
+ status = rtl92de_llt_write(hw, maxpage, txpktbuf_bndy);
if (!status)
return status;
@@ -842,32 +579,6 @@ static void _rtl92de_enable_aspm_back_door(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, 0x352, 0x1);
}
-void rtl92de_enable_hw_security_config(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 sec_reg_value;
-
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
- rtlpriv->sec.pairwise_enc_algorithm,
- rtlpriv->sec.group_enc_algorithm);
- if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
- "not open hw encryption\n");
- return;
- }
- sec_reg_value = SCR_TXENCENABLE | SCR_RXENCENABLE;
- if (rtlpriv->sec.use_defaultkey) {
- sec_reg_value |= SCR_TXUSEDK;
- sec_reg_value |= SCR_RXUSEDK;
- }
- sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
- rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
- rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
- "The SECR-value %x\n", sec_reg_value);
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
-}
-
int rtl92de_hw_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -991,11 +702,11 @@ int rtl92de_hw_init(struct ieee80211_hw *hw)
_rtl92de_enable_aspm_back_door(hw);
/* rtlpriv->intf_ops->enable_aspm(hw); */
- rtl92d_dm_init(hw);
+ rtl92de_dm_init(hw);
rtlpci->being_init_adapter = false;
if (ppsc->rfpwr_state == ERFON) {
- rtl92d_phy_lc_calibrate(hw);
+ rtl92d_phy_lc_calibrate(hw, IS_92D_SINGLEPHY(rtlhal->version));
/* 5G and 2.4G must wait sometime to let RF LO ready */
if (rtlhal->macphymode == DUALMAC_DUALPHY) {
u32 tmp_rega;
@@ -1020,23 +731,6 @@ int rtl92de_hw_init(struct ieee80211_hw *hw)
return err;
}
-static enum version_8192d _rtl92de_read_chip_version(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- enum version_8192d version = VERSION_NORMAL_CHIP_92D_SINGLEPHY;
- u32 value32;
-
- value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
- if (!(value32 & 0x000f0000)) {
- version = VERSION_TEST_CHIP_92D_SINGLEPHY;
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "TEST CHIP!!!\n");
- } else {
- version = VERSION_NORMAL_CHIP_92D_SINGLEPHY;
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Normal CHIP!!!\n");
- }
- return version;
-}
-
static int _rtl92de_set_media_status(struct ieee80211_hw *hw,
enum nl80211_iftype type)
{
@@ -1048,11 +742,11 @@ static int _rtl92de_set_media_status(struct ieee80211_hw *hw,
if (type == NL80211_IFTYPE_UNSPECIFIED ||
type == NL80211_IFTYPE_STATION) {
- _rtl92de_stop_tx_beacon(hw);
+ rtl92de_stop_tx_beacon(hw);
_rtl92de_enable_bcn_sub_func(hw);
} else if (type == NL80211_IFTYPE_ADHOC ||
type == NL80211_IFTYPE_AP) {
- _rtl92de_resume_tx_beacon(hw);
+ rtl92de_resume_tx_beacon(hw);
_rtl92de_disable_bcn_sub_func(hw);
} else {
rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
@@ -1152,13 +846,6 @@ void rtl92d_linked_set_reg(struct ieee80211_hw *hw)
}
}
-/* don't set REG_EDCA_BE_PARAM here because
- * mac80211 will send pkt when scan */
-void rtl92de_set_qos(struct ieee80211_hw *hw, int aci)
-{
- rtl92d_dm_init_edca_turbo(hw);
-}
-
void rtl92de_enable_interrupt(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1383,825 +1070,6 @@ void rtl92de_update_interrupt_mask(struct ieee80211_hw *hw,
rtl92de_enable_interrupt(hw);
}
-static void _rtl92de_readpowervalue_fromprom(struct txpower_info *pwrinfo,
- u8 *rom_content, bool autoloadfail)
-{
- u32 rfpath, eeaddr, group, offset1, offset2;
- u8 i;
-
- memset(pwrinfo, 0, sizeof(struct txpower_info));
- if (autoloadfail) {
- for (group = 0; group < CHANNEL_GROUP_MAX; group++) {
- for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
- if (group < CHANNEL_GROUP_MAX_2G) {
- pwrinfo->cck_index[rfpath][group] =
- EEPROM_DEFAULT_TXPOWERLEVEL_2G;
- pwrinfo->ht40_1sindex[rfpath][group] =
- EEPROM_DEFAULT_TXPOWERLEVEL_2G;
- } else {
- pwrinfo->ht40_1sindex[rfpath][group] =
- EEPROM_DEFAULT_TXPOWERLEVEL_5G;
- }
- pwrinfo->ht40_2sindexdiff[rfpath][group] =
- EEPROM_DEFAULT_HT40_2SDIFF;
- pwrinfo->ht20indexdiff[rfpath][group] =
- EEPROM_DEFAULT_HT20_DIFF;
- pwrinfo->ofdmindexdiff[rfpath][group] =
- EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF;
- pwrinfo->ht40maxoffset[rfpath][group] =
- EEPROM_DEFAULT_HT40_PWRMAXOFFSET;
- pwrinfo->ht20maxoffset[rfpath][group] =
- EEPROM_DEFAULT_HT20_PWRMAXOFFSET;
- }
- }
- for (i = 0; i < 3; i++) {
- pwrinfo->tssi_a[i] = EEPROM_DEFAULT_TSSI;
- pwrinfo->tssi_b[i] = EEPROM_DEFAULT_TSSI;
- }
- return;
- }
-
- /* Maybe autoload OK,buf the tx power index value is not filled.
- * If we find it, we set it to default value. */
- for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
- for (group = 0; group < CHANNEL_GROUP_MAX_2G; group++) {
- eeaddr = EEPROM_CCK_TX_PWR_INX_2G + (rfpath * 3)
- + group;
- pwrinfo->cck_index[rfpath][group] =
- (rom_content[eeaddr] == 0xFF) ?
- (eeaddr > 0x7B ?
- EEPROM_DEFAULT_TXPOWERLEVEL_5G :
- EEPROM_DEFAULT_TXPOWERLEVEL_2G) :
- rom_content[eeaddr];
- }
- }
- for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
- for (group = 0; group < CHANNEL_GROUP_MAX; group++) {
- offset1 = group / 3;
- offset2 = group % 3;
- eeaddr = EEPROM_HT40_1S_TX_PWR_INX_2G + (rfpath * 3) +
- offset2 + offset1 * 21;
- pwrinfo->ht40_1sindex[rfpath][group] =
- (rom_content[eeaddr] == 0xFF) ? (eeaddr > 0x7B ?
- EEPROM_DEFAULT_TXPOWERLEVEL_5G :
- EEPROM_DEFAULT_TXPOWERLEVEL_2G) :
- rom_content[eeaddr];
- }
- }
- /* These just for 92D efuse offset. */
- for (group = 0; group < CHANNEL_GROUP_MAX; group++) {
- for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
- int base1 = EEPROM_HT40_2S_TX_PWR_INX_DIFF_2G;
-
- offset1 = group / 3;
- offset2 = group % 3;
-
- if (rom_content[base1 + offset2 + offset1 * 21] != 0xFF)
- pwrinfo->ht40_2sindexdiff[rfpath][group] =
- (rom_content[base1 +
- offset2 + offset1 * 21] >> (rfpath * 4))
- & 0xF;
- else
- pwrinfo->ht40_2sindexdiff[rfpath][group] =
- EEPROM_DEFAULT_HT40_2SDIFF;
- if (rom_content[EEPROM_HT20_TX_PWR_INX_DIFF_2G + offset2
- + offset1 * 21] != 0xFF)
- pwrinfo->ht20indexdiff[rfpath][group] =
- (rom_content[EEPROM_HT20_TX_PWR_INX_DIFF_2G
- + offset2 + offset1 * 21] >> (rfpath * 4))
- & 0xF;
- else
- pwrinfo->ht20indexdiff[rfpath][group] =
- EEPROM_DEFAULT_HT20_DIFF;
- if (rom_content[EEPROM_OFDM_TX_PWR_INX_DIFF_2G + offset2
- + offset1 * 21] != 0xFF)
- pwrinfo->ofdmindexdiff[rfpath][group] =
- (rom_content[EEPROM_OFDM_TX_PWR_INX_DIFF_2G
- + offset2 + offset1 * 21] >> (rfpath * 4))
- & 0xF;
- else
- pwrinfo->ofdmindexdiff[rfpath][group] =
- EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF;
- if (rom_content[EEPROM_HT40_MAX_PWR_OFFSET_2G + offset2
- + offset1 * 21] != 0xFF)
- pwrinfo->ht40maxoffset[rfpath][group] =
- (rom_content[EEPROM_HT40_MAX_PWR_OFFSET_2G
- + offset2 + offset1 * 21] >> (rfpath * 4))
- & 0xF;
- else
- pwrinfo->ht40maxoffset[rfpath][group] =
- EEPROM_DEFAULT_HT40_PWRMAXOFFSET;
- if (rom_content[EEPROM_HT20_MAX_PWR_OFFSET_2G + offset2
- + offset1 * 21] != 0xFF)
- pwrinfo->ht20maxoffset[rfpath][group] =
- (rom_content[EEPROM_HT20_MAX_PWR_OFFSET_2G +
- offset2 + offset1 * 21] >> (rfpath * 4)) &
- 0xF;
- else
- pwrinfo->ht20maxoffset[rfpath][group] =
- EEPROM_DEFAULT_HT20_PWRMAXOFFSET;
- }
- }
- if (rom_content[EEPROM_TSSI_A_5G] != 0xFF) {
- /* 5GL */
- pwrinfo->tssi_a[0] = rom_content[EEPROM_TSSI_A_5G] & 0x3F;
- pwrinfo->tssi_b[0] = rom_content[EEPROM_TSSI_B_5G] & 0x3F;
- /* 5GM */
- pwrinfo->tssi_a[1] = rom_content[EEPROM_TSSI_AB_5G] & 0x3F;
- pwrinfo->tssi_b[1] =
- (rom_content[EEPROM_TSSI_AB_5G] & 0xC0) >> 6 |
- (rom_content[EEPROM_TSSI_AB_5G + 1] & 0x0F) << 2;
- /* 5GH */
- pwrinfo->tssi_a[2] = (rom_content[EEPROM_TSSI_AB_5G + 1] &
- 0xF0) >> 4 |
- (rom_content[EEPROM_TSSI_AB_5G + 2] & 0x03) << 4;
- pwrinfo->tssi_b[2] = (rom_content[EEPROM_TSSI_AB_5G + 2] &
- 0xFC) >> 2;
- } else {
- for (i = 0; i < 3; i++) {
- pwrinfo->tssi_a[i] = EEPROM_DEFAULT_TSSI;
- pwrinfo->tssi_b[i] = EEPROM_DEFAULT_TSSI;
- }
- }
-}
-
-static void _rtl92de_read_txpower_info(struct ieee80211_hw *hw,
- bool autoload_fail, u8 *hwinfo)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- struct txpower_info pwrinfo;
- u8 tempval[2], i, pwr, diff;
- u32 ch, rfpath, group;
-
- _rtl92de_readpowervalue_fromprom(&pwrinfo, hwinfo, autoload_fail);
- if (!autoload_fail) {
- /* bit0~2 */
- rtlefuse->eeprom_regulatory = (hwinfo[EEPROM_RF_OPT1] & 0x7);
- rtlefuse->eeprom_thermalmeter =
- hwinfo[EEPROM_THERMAL_METER] & 0x1f;
- rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_K];
- tempval[0] = hwinfo[EEPROM_IQK_DELTA] & 0x03;
- tempval[1] = (hwinfo[EEPROM_LCK_DELTA] & 0x0C) >> 2;
- rtlefuse->txpwr_fromeprom = true;
- if (IS_92D_D_CUT(rtlpriv->rtlhal.version) ||
- IS_92D_E_CUT(rtlpriv->rtlhal.version)) {
- rtlefuse->internal_pa_5g[0] =
- !((hwinfo[EEPROM_TSSI_A_5G] & BIT(6)) >> 6);
- rtlefuse->internal_pa_5g[1] =
- !((hwinfo[EEPROM_TSSI_B_5G] & BIT(6)) >> 6);
- rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
- "Is D cut,Internal PA0 %d Internal PA1 %d\n",
- rtlefuse->internal_pa_5g[0],
- rtlefuse->internal_pa_5g[1]);
- }
- rtlefuse->eeprom_c9 = hwinfo[EEPROM_RF_OPT6];
- rtlefuse->eeprom_cc = hwinfo[EEPROM_RF_OPT7];
- } else {
- rtlefuse->eeprom_regulatory = 0;
- rtlefuse->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER;
- rtlefuse->crystalcap = EEPROM_DEFAULT_CRYSTALCAP;
- tempval[0] = tempval[1] = 3;
- }
-
- /* Use default value to fill parameters if
- * efuse is not filled on some place. */
-
- /* ThermalMeter from EEPROM */
- if (rtlefuse->eeprom_thermalmeter < 0x06 ||
- rtlefuse->eeprom_thermalmeter > 0x1c)
- rtlefuse->eeprom_thermalmeter = 0x12;
- rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
-
- /* check XTAL_K */
- if (rtlefuse->crystalcap == 0xFF)
- rtlefuse->crystalcap = 0;
- if (rtlefuse->eeprom_regulatory > 3)
- rtlefuse->eeprom_regulatory = 0;
-
- for (i = 0; i < 2; i++) {
- switch (tempval[i]) {
- case 0:
- tempval[i] = 5;
- break;
- case 1:
- tempval[i] = 4;
- break;
- case 2:
- tempval[i] = 3;
- break;
- case 3:
- default:
- tempval[i] = 0;
- break;
- }
- }
-
- rtlefuse->delta_iqk = tempval[0];
- if (tempval[1] > 0)
- rtlefuse->delta_lck = tempval[1] - 1;
- if (rtlefuse->eeprom_c9 == 0xFF)
- rtlefuse->eeprom_c9 = 0x00;
- rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
- "EEPROMRegulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
- rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
- "ThermalMeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
- rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
- "CrystalCap = 0x%x\n", rtlefuse->crystalcap);
- rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
- "Delta_IQK = 0x%x Delta_LCK = 0x%x\n",
- rtlefuse->delta_iqk, rtlefuse->delta_lck);
-
- for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
- for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
- group = rtl92d_get_chnlgroup_fromarray((u8) ch);
- if (ch < CHANNEL_MAX_NUMBER_2G)
- rtlefuse->txpwrlevel_cck[rfpath][ch] =
- pwrinfo.cck_index[rfpath][group];
- rtlefuse->txpwrlevel_ht40_1s[rfpath][ch] =
- pwrinfo.ht40_1sindex[rfpath][group];
- rtlefuse->txpwr_ht20diff[rfpath][ch] =
- pwrinfo.ht20indexdiff[rfpath][group];
- rtlefuse->txpwr_legacyhtdiff[rfpath][ch] =
- pwrinfo.ofdmindexdiff[rfpath][group];
- rtlefuse->pwrgroup_ht20[rfpath][ch] =
- pwrinfo.ht20maxoffset[rfpath][group];
- rtlefuse->pwrgroup_ht40[rfpath][ch] =
- pwrinfo.ht40maxoffset[rfpath][group];
- pwr = pwrinfo.ht40_1sindex[rfpath][group];
- diff = pwrinfo.ht40_2sindexdiff[rfpath][group];
- rtlefuse->txpwrlevel_ht40_2s[rfpath][ch] =
- (pwr > diff) ? (pwr - diff) : 0;
- }
- }
-}
-
-static void _rtl92de_read_macphymode_from_prom(struct ieee80211_hw *hw,
- u8 *content)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 macphy_crvalue = content[EEPROM_MAC_FUNCTION];
-
- if (macphy_crvalue & BIT(3)) {
- rtlhal->macphymode = SINGLEMAC_SINGLEPHY;
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "MacPhyMode SINGLEMAC_SINGLEPHY\n");
- } else {
- rtlhal->macphymode = DUALMAC_DUALPHY;
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "MacPhyMode DUALMAC_DUALPHY\n");
- }
-}
-
-static void _rtl92de_read_macphymode_and_bandtype(struct ieee80211_hw *hw,
- u8 *content)
-{
- _rtl92de_read_macphymode_from_prom(hw, content);
- rtl92d_phy_config_macphymode(hw);
- rtl92d_phy_config_macphymode_info(hw);
-}
-
-static void _rtl92de_efuse_update_chip_version(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- enum version_8192d chipver = rtlpriv->rtlhal.version;
- u8 cutvalue[2];
- u16 chipvalue;
-
- read_efuse_byte(hw, EEPROME_CHIP_VERSION_H, &cutvalue[1]);
- read_efuse_byte(hw, EEPROME_CHIP_VERSION_L, &cutvalue[0]);
- chipvalue = (cutvalue[1] << 8) | cutvalue[0];
- switch (chipvalue) {
- case 0xAA55:
- chipver |= CHIP_92D_C_CUT;
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "C-CUT!!!\n");
- break;
- case 0x9966:
- chipver |= CHIP_92D_D_CUT;
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "D-CUT!!!\n");
- break;
- case 0xCC33:
- chipver |= CHIP_92D_E_CUT;
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "E-CUT!!!\n");
- break;
- default:
- chipver |= CHIP_92D_D_CUT;
- pr_err("Unknown CUT!\n");
- break;
- }
- rtlpriv->rtlhal.version = chipver;
-}
-
-static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- int params[] = {RTL8190_EEPROM_ID, EEPROM_VID, EEPROM_DID,
- EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR_MAC0_92D,
- EEPROM_CHANNEL_PLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID,
- COUNTRY_CODE_WORLD_WIDE_13};
- int i;
- u16 usvalue;
- u8 *hwinfo;
-
- hwinfo = kzalloc(HWSET_MAX_SIZE, GFP_KERNEL);
- if (!hwinfo)
- return;
-
- if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params))
- goto exit;
-
- _rtl92de_efuse_update_chip_version(hw);
- _rtl92de_read_macphymode_and_bandtype(hw, hwinfo);
-
- /* Read Permanent MAC address for 2nd interface */
- if (rtlhal->interfaceindex != 0) {
- for (i = 0; i < 6; i += 2) {
- usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR_MAC1_92D + i];
- *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
- }
- }
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR,
- rtlefuse->dev_addr);
- rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
- _rtl92de_read_txpower_info(hw, rtlefuse->autoload_failflag, hwinfo);
-
- /* Read Channel Plan */
- switch (rtlhal->bandset) {
- case BAND_ON_2_4G:
- rtlefuse->channel_plan = COUNTRY_CODE_TELEC;
- break;
- case BAND_ON_5G:
- rtlefuse->channel_plan = COUNTRY_CODE_FCC;
- break;
- case BAND_ON_BOTH:
- rtlefuse->channel_plan = COUNTRY_CODE_FCC;
- break;
- default:
- rtlefuse->channel_plan = COUNTRY_CODE_FCC;
- break;
- }
- rtlefuse->txpwr_fromeprom = true;
-exit:
- kfree(hwinfo);
-}
-
-void rtl92de_read_eeprom_info(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 tmp_u1b;
-
- rtlhal->version = _rtl92de_read_chip_version(hw);
- tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
- rtlefuse->autoload_status = tmp_u1b;
- if (tmp_u1b & BIT(4)) {
- rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
- rtlefuse->epromtype = EEPROM_93C46;
- } else {
- rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
- rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
- }
- if (tmp_u1b & BIT(5)) {
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
-
- rtlefuse->autoload_failflag = false;
- _rtl92de_read_adapter_info(hw);
- } else {
- pr_err("Autoload ERR!!\n");
- }
- return;
-}
-
-static void rtl92de_update_hal_rate_table(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u32 ratr_value;
- u8 ratr_index = 0;
- u8 nmode = mac->ht_enable;
- u8 mimo_ps = IEEE80211_SMPS_OFF;
- u16 shortgi_rate;
- u32 tmp_ratr_value;
- u8 curtxbw_40mhz = mac->bw_40;
- u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
- 1 : 0;
- u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
- 1 : 0;
- enum wireless_mode wirelessmode = mac->mode;
-
- if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_value = sta->deflink.supp_rates[1] << 4;
- else
- ratr_value = sta->deflink.supp_rates[0];
- ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
- sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
- switch (wirelessmode) {
- case WIRELESS_MODE_A:
- ratr_value &= 0x00000FF0;
- break;
- case WIRELESS_MODE_B:
- if (ratr_value & 0x0000000c)
- ratr_value &= 0x0000000d;
- else
- ratr_value &= 0x0000000f;
- break;
- case WIRELESS_MODE_G:
- ratr_value &= 0x00000FF5;
- break;
- case WIRELESS_MODE_N_24G:
- case WIRELESS_MODE_N_5G:
- nmode = 1;
- if (mimo_ps == IEEE80211_SMPS_STATIC) {
- ratr_value &= 0x0007F005;
- } else {
- u32 ratr_mask;
-
- if (get_rf_type(rtlphy) == RF_1T2R ||
- get_rf_type(rtlphy) == RF_1T1R) {
- ratr_mask = 0x000ff005;
- } else {
- ratr_mask = 0x0f0ff005;
- }
-
- ratr_value &= ratr_mask;
- }
- break;
- default:
- if (rtlphy->rf_type == RF_1T2R)
- ratr_value &= 0x000ff0ff;
- else
- ratr_value &= 0x0f0ff0ff;
-
- break;
- }
- ratr_value &= 0x0FFFFFFF;
- if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) ||
- (!curtxbw_40mhz && curshortgi_20mhz))) {
- ratr_value |= 0x10000000;
- tmp_ratr_value = (ratr_value >> 12);
- for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
- if ((1 << shortgi_rate) & tmp_ratr_value)
- break;
- }
- shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
- (shortgi_rate << 4) | (shortgi_rate);
- }
- rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
- rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
- rtl_read_dword(rtlpriv, REG_ARFR0));
-}
-
-static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level, bool update_bw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_sta_info *sta_entry = NULL;
- u32 ratr_bitmap;
- u8 ratr_index;
- u8 curtxbw_40mhz = (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
- u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
- 1 : 0;
- u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
- 1 : 0;
- enum wireless_mode wirelessmode = 0;
- bool shortgi = false;
- u32 value[2];
- u8 macid = 0;
- u8 mimo_ps = IEEE80211_SMPS_OFF;
-
- sta_entry = (struct rtl_sta_info *) sta->drv_priv;
- mimo_ps = sta_entry->mimo_ps;
- wirelessmode = sta_entry->wireless_mode;
- if (mac->opmode == NL80211_IFTYPE_STATION)
- curtxbw_40mhz = mac->bw_40;
- else if (mac->opmode == NL80211_IFTYPE_AP ||
- mac->opmode == NL80211_IFTYPE_ADHOC)
- macid = sta->aid + 1;
-
- if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_bitmap = sta->deflink.supp_rates[1] << 4;
- else
- ratr_bitmap = sta->deflink.supp_rates[0];
- ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
- sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
- switch (wirelessmode) {
- case WIRELESS_MODE_B:
- ratr_index = RATR_INX_WIRELESS_B;
- if (ratr_bitmap & 0x0000000c)
- ratr_bitmap &= 0x0000000d;
- else
- ratr_bitmap &= 0x0000000f;
- break;
- case WIRELESS_MODE_G:
- ratr_index = RATR_INX_WIRELESS_GB;
-
- if (rssi_level == 1)
- ratr_bitmap &= 0x00000f00;
- else if (rssi_level == 2)
- ratr_bitmap &= 0x00000ff0;
- else
- ratr_bitmap &= 0x00000ff5;
- break;
- case WIRELESS_MODE_A:
- ratr_index = RATR_INX_WIRELESS_G;
- ratr_bitmap &= 0x00000ff0;
- break;
- case WIRELESS_MODE_N_24G:
- case WIRELESS_MODE_N_5G:
- if (wirelessmode == WIRELESS_MODE_N_24G)
- ratr_index = RATR_INX_WIRELESS_NGB;
- else
- ratr_index = RATR_INX_WIRELESS_NG;
- if (mimo_ps == IEEE80211_SMPS_STATIC) {
- if (rssi_level == 1)
- ratr_bitmap &= 0x00070000;
- else if (rssi_level == 2)
- ratr_bitmap &= 0x0007f000;
- else
- ratr_bitmap &= 0x0007f005;
- } else {
- if (rtlphy->rf_type == RF_1T2R ||
- rtlphy->rf_type == RF_1T1R) {
- if (curtxbw_40mhz) {
- if (rssi_level == 1)
- ratr_bitmap &= 0x000f0000;
- else if (rssi_level == 2)
- ratr_bitmap &= 0x000ff000;
- else
- ratr_bitmap &= 0x000ff015;
- } else {
- if (rssi_level == 1)
- ratr_bitmap &= 0x000f0000;
- else if (rssi_level == 2)
- ratr_bitmap &= 0x000ff000;
- else
- ratr_bitmap &= 0x000ff005;
- }
- } else {
- if (curtxbw_40mhz) {
- if (rssi_level == 1)
- ratr_bitmap &= 0x0f0f0000;
- else if (rssi_level == 2)
- ratr_bitmap &= 0x0f0ff000;
- else
- ratr_bitmap &= 0x0f0ff015;
- } else {
- if (rssi_level == 1)
- ratr_bitmap &= 0x0f0f0000;
- else if (rssi_level == 2)
- ratr_bitmap &= 0x0f0ff000;
- else
- ratr_bitmap &= 0x0f0ff005;
- }
- }
- }
- if ((curtxbw_40mhz && curshortgi_40mhz) ||
- (!curtxbw_40mhz && curshortgi_20mhz)) {
-
- if (macid == 0)
- shortgi = true;
- else if (macid == 1)
- shortgi = false;
- }
- break;
- default:
- ratr_index = RATR_INX_WIRELESS_NGB;
-
- if (rtlphy->rf_type == RF_1T2R)
- ratr_bitmap &= 0x000ff0ff;
- else
- ratr_bitmap &= 0x0f0ff0ff;
- break;
- }
-
- value[0] = (ratr_bitmap & 0x0fffffff) | (ratr_index << 28);
- value[1] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
- rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
- "ratr_bitmap :%x value0:%x value1:%x\n",
- ratr_bitmap, value[0], value[1]);
- rtl92d_fill_h2c_cmd(hw, H2C_RA_MASK, 5, (u8 *) value);
- if (macid != 0)
- sta_entry->ratr_index = ratr_index;
-}
-
-void rtl92de_update_hal_rate_tbl(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level, bool update_bw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- if (rtlpriv->dm.useramask)
- rtl92de_update_hal_rate_mask(hw, sta, rssi_level, update_bw);
- else
- rtl92de_update_hal_rate_table(hw, sta);
-}
-
-void rtl92de_update_channel_access_setting(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- u16 sifs_timer;
-
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
- &mac->slot_time);
- if (!mac->ht_enable)
- sifs_timer = 0x0a0a;
- else
- sifs_timer = 0x1010;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
-}
-
-bool rtl92de_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- enum rf_pwrstate e_rfpowerstate_toset;
- u8 u1tmp;
- bool actuallyset = false;
- unsigned long flag;
-
- if (rtlpci->being_init_adapter)
- return false;
- if (ppsc->swrf_processing)
- return false;
- spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
- if (ppsc->rfchange_inprogress) {
- spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
- return false;
- } else {
- ppsc->rfchange_inprogress = true;
- spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
- }
- rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, rtl_read_byte(rtlpriv,
- REG_MAC_PINMUX_CFG) & ~(BIT(3)));
- u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL);
- e_rfpowerstate_toset = (u1tmp & BIT(3)) ? ERFON : ERFOFF;
- if (ppsc->hwradiooff && (e_rfpowerstate_toset == ERFON)) {
- rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio ON, RF ON\n");
- e_rfpowerstate_toset = ERFON;
- ppsc->hwradiooff = false;
- actuallyset = true;
- } else if (!ppsc->hwradiooff && (e_rfpowerstate_toset == ERFOFF)) {
- rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio OFF, RF OFF\n");
- e_rfpowerstate_toset = ERFOFF;
- ppsc->hwradiooff = true;
- actuallyset = true;
- }
- if (actuallyset) {
- spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
- ppsc->rfchange_inprogress = false;
- spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
- } else {
- if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC)
- RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
- spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
- ppsc->rfchange_inprogress = false;
- spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
- }
- *valid = 1;
- return !ppsc->hwradiooff;
-}
-
-void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
- u8 *p_macaddr, bool is_group, u8 enc_algo,
- bool is_wepkey, bool clear_all)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u8 *macaddr = p_macaddr;
- u32 entry_id;
- bool is_pairwise = false;
- static u8 cam_const_addr[4][6] = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
- };
- static u8 cam_const_broad[] = {
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
- };
-
- if (clear_all) {
- u8 idx;
- u8 cam_offset = 0;
- u8 clear_number = 5;
- rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
- for (idx = 0; idx < clear_number; idx++) {
- rtl_cam_mark_invalid(hw, cam_offset + idx);
- rtl_cam_empty_entry(hw, cam_offset + idx);
-
- if (idx < 5) {
- memset(rtlpriv->sec.key_buf[idx], 0,
- MAX_KEY_LEN);
- rtlpriv->sec.key_len[idx] = 0;
- }
- }
- } else {
- switch (enc_algo) {
- case WEP40_ENCRYPTION:
- enc_algo = CAM_WEP40;
- break;
- case WEP104_ENCRYPTION:
- enc_algo = CAM_WEP104;
- break;
- case TKIP_ENCRYPTION:
- enc_algo = CAM_TKIP;
- break;
- case AESCCMP_ENCRYPTION:
- enc_algo = CAM_AES;
- break;
- default:
- pr_err("switch case %#x not processed\n",
- enc_algo);
- enc_algo = CAM_TKIP;
- break;
- }
- if (is_wepkey || rtlpriv->sec.use_defaultkey) {
- macaddr = cam_const_addr[key_index];
- entry_id = key_index;
- } else {
- if (is_group) {
- macaddr = cam_const_broad;
- entry_id = key_index;
- } else {
- if (mac->opmode == NL80211_IFTYPE_AP) {
- entry_id = rtl_cam_get_free_entry(hw,
- p_macaddr);
- if (entry_id >= TOTAL_CAM_ENTRY) {
- pr_err("Can not find free hw security cam entry\n");
- return;
- }
- } else {
- entry_id = CAM_PAIRWISE_KEY_POSITION;
- }
- key_index = PAIRWISE_KEYIDX;
- is_pairwise = true;
- }
- }
- if (rtlpriv->sec.key_len[key_index] == 0) {
- rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
- "delete one entry, entry_id is %d\n",
- entry_id);
- if (mac->opmode == NL80211_IFTYPE_AP)
- rtl_cam_del_entry(hw, p_macaddr);
- rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
- } else {
- rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
- "The insert KEY length is %d\n",
- rtlpriv->sec.key_len[PAIRWISE_KEYIDX]);
- rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
- "The insert KEY is %x %x\n",
- rtlpriv->sec.key_buf[0][0],
- rtlpriv->sec.key_buf[0][1]);
- rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
- "add one entry\n");
- if (is_pairwise) {
- RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
- "Pairwise Key content",
- rtlpriv->sec.pairwise_key,
- rtlpriv->
- sec.key_len[PAIRWISE_KEYIDX]);
- rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
- "set Pairwise key\n");
- rtl_cam_add_one_entry(hw, macaddr, key_index,
- entry_id, enc_algo,
- CAM_CONFIG_NO_USEDK,
- rtlpriv->
- sec.key_buf[key_index]);
- } else {
- rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
- if (mac->opmode == NL80211_IFTYPE_ADHOC) {
- rtl_cam_add_one_entry(hw,
- rtlefuse->dev_addr,
- PAIRWISE_KEYIDX,
- CAM_PAIRWISE_KEY_POSITION,
- enc_algo, CAM_CONFIG_NO_USEDK,
- rtlpriv->sec.key_buf[entry_id]);
- }
- rtl_cam_add_one_entry(hw, macaddr, key_index,
- entry_id, enc_algo,
- CAM_CONFIG_NO_USEDK,
- rtlpriv->sec.key_buf
- [entry_id]);
- }
- }
- }
-}
-
void rtl92de_suspend(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
index ea495216d394..bda4a1a7c91d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
@@ -5,7 +5,6 @@
#define __RTL92DE_HW_H__
void rtl92de_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
-void rtl92de_read_eeprom_info(struct ieee80211_hw *hw);
void rtl92de_interrupt_recognized(struct ieee80211_hw *hw,
struct rtl_int *int_vec);
int rtl92de_hw_init(struct ieee80211_hw *hw);
@@ -14,21 +13,11 @@ void rtl92de_enable_interrupt(struct ieee80211_hw *hw);
void rtl92de_disable_interrupt(struct ieee80211_hw *hw);
int rtl92de_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type);
void rtl92de_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
-void rtl92de_set_qos(struct ieee80211_hw *hw, int aci);
void rtl92de_set_beacon_related_registers(struct ieee80211_hw *hw);
void rtl92de_set_beacon_interval(struct ieee80211_hw *hw);
void rtl92de_update_interrupt_mask(struct ieee80211_hw *hw,
u32 add_msr, u32 rm_msr);
void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
-void rtl92de_update_hal_rate_tbl(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level,
- bool update_bw);
-void rtl92de_update_channel_access_setting(struct ieee80211_hw *hw);
-bool rtl92de_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
-void rtl92de_enable_hw_security_config(struct ieee80211_hw *hw);
-void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
- u8 *p_macaddr, bool is_group, u8 enc_algo,
- bool is_wepkey, bool clear_all);
void rtl92de_write_dword_dbi(struct ieee80211_hw *hw, u16 offset, u32 value,
u8 direct);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
index 4bd708570992..33aede56c81b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
@@ -3,7 +3,7 @@
#include "../wifi.h"
#include "../pci.h"
-#include "reg.h"
+#include "../rtl8192d/reg.h"
#include "led.h"
void rtl92de_sw_led_on(struct ieee80211_hw *hw, enum rtl_led_pin pin)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index d835a27429f0..d429560009bb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -5,8 +5,11 @@
#include "../pci.h"
#include "../ps.h"
#include "../core.h"
-#include "reg.h"
-#include "def.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/dm_common.h"
+#include "../rtl8192d/phy_common.h"
+#include "../rtl8192d/rf_common.h"
#include "phy.h"
#include "rf.h"
#include "dm.h"
@@ -21,9 +24,6 @@
#define RF_REG_NUM_FOR_C_CUT_2G 5
#define RF_CHNL_NUM_5G 19
#define RF_CHNL_NUM_5G_40M 17
-#define TARGET_CHNL_NUM_5G 221
-#define TARGET_CHNL_NUM_2G 14
-#define CV_CURVE_CNT 64
static u32 rf_reg_for_5g_swchnl_normal[MAX_RF_IMR_INDEX_NORMAL] = {
0, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x0
@@ -160,15 +160,6 @@ static u32 targetchnl_2g[TARGET_CHNL_NUM_2G] = {
25711, 25658, 25606, 25554, 25502, 25451, 25328
};
-static const u8 channel_all[59] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
- 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58,
- 60, 62, 64, 100, 102, 104, 106, 108, 110, 112,
- 114, 116, 118, 120, 122, 124, 126, 128, 130,
- 132, 134, 136, 138, 140, 149, 151, 153, 155,
- 157, 159, 161, 163, 165
-};
-
u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -235,119 +226,6 @@ void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
regaddr, bitmask, data);
}
-static u32 _rtl92d_phy_rf_serial_read(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset)
-{
-
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
- u32 newoffset;
- u32 tmplong, tmplong2;
- u8 rfpi_enable = 0;
- u32 retvalue;
-
- newoffset = offset;
- tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
- if (rfpath == RF90_PATH_A)
- tmplong2 = tmplong;
- else
- tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
- tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
- (newoffset << 23) | BLSSIREADEDGE;
- rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
- tmplong & (~BLSSIREADEDGE));
- udelay(10);
- rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
- udelay(50);
- udelay(50);
- rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
- tmplong | BLSSIREADEDGE);
- udelay(10);
- if (rfpath == RF90_PATH_A)
- rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
- BIT(8));
- else if (rfpath == RF90_PATH_B)
- rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
- BIT(8));
- if (rfpi_enable)
- retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi,
- BLSSIREADBACKDATA);
- else
- retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
- BLSSIREADBACKDATA);
- rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x] = 0x%x\n",
- rfpath, pphyreg->rf_rb, retvalue);
- return retvalue;
-}
-
-static void _rtl92d_phy_rf_serial_write(struct ieee80211_hw *hw,
- enum radio_path rfpath,
- u32 offset, u32 data)
-{
- u32 data_and_addr;
- u32 newoffset;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
-
- newoffset = offset;
- /* T65 RF */
- data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
- rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
- rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf3wire_offset, data_and_addr);
-}
-
-u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr, u32 bitmask)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 original_value, readback_value, bitshift;
-
- rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
- regaddr, rfpath, bitmask);
- spin_lock(&rtlpriv->locks.rf_lock);
- original_value = _rtl92d_phy_rf_serial_read(hw, rfpath, regaddr);
- bitshift = calculate_bit_shift(bitmask);
- readback_value = (original_value & bitmask) >> bitshift;
- spin_unlock(&rtlpriv->locks.rf_lock);
- rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
- regaddr, rfpath, bitmask, original_value);
- return readback_value;
-}
-
-void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
- u32 regaddr, u32 bitmask, u32 data)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- u32 original_value, bitshift;
-
- rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
- if (bitmask == 0)
- return;
- spin_lock(&rtlpriv->locks.rf_lock);
- if (rtlphy->rf_mode != RF_OP_BY_FW) {
- if (bitmask != RFREG_OFFSET_MASK) {
- original_value = _rtl92d_phy_rf_serial_read(hw,
- rfpath, regaddr);
- bitshift = calculate_bit_shift(bitmask);
- data = ((original_value & (~bitmask)) |
- (data << bitshift));
- }
- _rtl92d_phy_rf_serial_write(hw, rfpath, regaddr, data);
- }
- spin_unlock(&rtlpriv->locks.rf_lock);
- rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
-}
-
bool rtl92d_phy_mac_config(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -374,133 +252,6 @@ bool rtl92d_phy_mac_config(struct ieee80211_hw *hw)
return true;
}
-static void _rtl92d_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
- /* RF Interface Sowrtware Control */
- /* 16 LSBs if read 32-bit from 0x870 */
- rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
- /* 16 MSBs if read 32-bit from 0x870 (16-bit for 0x872) */
- rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
- /* 16 LSBs if read 32-bit from 0x874 */
- rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
- /* 16 MSBs if read 32-bit from 0x874 (16-bit for 0x876) */
-
- rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
- /* RF Interface Readback Value */
- /* 16 LSBs if read 32-bit from 0x8E0 */
- rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
- /* 16 MSBs if read 32-bit from 0x8E0 (16-bit for 0x8E2) */
- rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
- /* 16 LSBs if read 32-bit from 0x8E4 */
- rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
- /* 16 MSBs if read 32-bit from 0x8E4 (16-bit for 0x8E6) */
- rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
-
- /* RF Interface Output (and Enable) */
- /* 16 LSBs if read 32-bit from 0x860 */
- rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
- /* 16 LSBs if read 32-bit from 0x864 */
- rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
-
- /* RF Interface (Output and) Enable */
- /* 16 MSBs if read 32-bit from 0x860 (16-bit for 0x862) */
- rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
- /* 16 MSBs if read 32-bit from 0x864 (16-bit for 0x866) */
- rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
-
- /* Addr of LSSI. Wirte RF register by driver */
- /* LSSI Parameter */
- rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
- RFPGA0_XA_LSSIPARAMETER;
- rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
- RFPGA0_XB_LSSIPARAMETER;
-
- /* RF parameter */
- /* BB Band Select */
- rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = RFPGA0_XAB_RFPARAMETER;
- rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = RFPGA0_XAB_RFPARAMETER;
- rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = RFPGA0_XCD_RFPARAMETER;
- rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = RFPGA0_XCD_RFPARAMETER;
-
- /* Tx AGC Gain Stage (same for all path. Should we remove this?) */
- /* Tx gain stage */
- rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
- /* Tx gain stage */
- rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
- /* Tx gain stage */
- rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
- /* Tx gain stage */
- rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
-
- /* Tranceiver A~D HSSI Parameter-1 */
- /* wire control parameter1 */
- rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
- /* wire control parameter1 */
- rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
-
- /* Tranceiver A~D HSSI Parameter-2 */
- /* wire control parameter2 */
- rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
- /* wire control parameter2 */
- rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
-
- /* RF switch Control */
- /* TR/Ant switch control */
- rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
- rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
- rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
- rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
-
- /* AGC control 1 */
- rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
- rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
- rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
- rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
-
- /* AGC control 2 */
- rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
- rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
- rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
- rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
-
- /* RX AFE control 1 */
- rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE;
-
- /*RX AFE control 1 */
- rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
- rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
- rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
- rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
-
- /* Tx AFE control 1 */
- rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE;
-
- /* Tx AFE control 2 */
- rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
- rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
- rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
- rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
-
- /* Tranceiver LSSI Readback SI mode */
- rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK;
- rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK;
- rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK;
- rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK;
-
- /* Tranceiver LSSI Readback PI mode */
- rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVERA_HSPI_READBACK;
- rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVERB_HSPI_READBACK;
-}
-
static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
u8 configtype)
{
@@ -601,58 +352,6 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
return true;
}
-static void _rtl92d_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask,
- u32 data)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- int index;
-
- if (regaddr == RTXAGC_A_RATE18_06)
- index = 0;
- else if (regaddr == RTXAGC_A_RATE54_24)
- index = 1;
- else if (regaddr == RTXAGC_A_CCK1_MCS32)
- index = 6;
- else if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00)
- index = 7;
- else if (regaddr == RTXAGC_A_MCS03_MCS00)
- index = 2;
- else if (regaddr == RTXAGC_A_MCS07_MCS04)
- index = 3;
- else if (regaddr == RTXAGC_A_MCS11_MCS08)
- index = 4;
- else if (regaddr == RTXAGC_A_MCS15_MCS12)
- index = 5;
- else if (regaddr == RTXAGC_B_RATE18_06)
- index = 8;
- else if (regaddr == RTXAGC_B_RATE54_24)
- index = 9;
- else if (regaddr == RTXAGC_B_CCK1_55_MCS32)
- index = 14;
- else if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff)
- index = 15;
- else if (regaddr == RTXAGC_B_MCS03_MCS00)
- index = 10;
- else if (regaddr == RTXAGC_B_MCS07_MCS04)
- index = 11;
- else if (regaddr == RTXAGC_B_MCS11_MCS08)
- index = 12;
- else if (regaddr == RTXAGC_B_MCS15_MCS12)
- index = 13;
- else
- return;
-
- rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index] = data;
- rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][%d] = 0x%x\n",
- rtlphy->pwrgroup_cnt, index,
- rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index]);
- if (index == 13)
- rtlphy->pwrgroup_cnt++;
-}
-
static bool _rtl92d_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
u8 configtype)
{
@@ -666,7 +365,7 @@ static bool _rtl92d_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
rtl_addr_delay(phy_regarray_table_pg[i]);
- _rtl92d_store_pwrindex_diffrate_offset(hw,
+ rtl92d_store_pwrindex_diffrate_offset(hw,
phy_regarray_table_pg[i],
phy_regarray_table_pg[i + 1],
phy_regarray_table_pg[i + 2]);
@@ -726,7 +425,7 @@ bool rtl92d_phy_bb_config(struct ieee80211_hw *hw)
u32 regvaldw;
u8 value;
- _rtl92d_phy_init_bb_rf_register_definition(hw);
+ rtl92d_phy_init_bb_rf_register_definition(hw);
regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
rtl_write_word(rtlpriv, REG_SYS_FUNC_EN,
regval | BIT(13) | BIT(0) | BIT(1));
@@ -812,115 +511,6 @@ bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
return true;
}
-void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
- rtlphy->default_initialgain[0] =
- (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
- rtlphy->default_initialgain[1] =
- (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
- rtlphy->default_initialgain[2] =
- (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
- rtlphy->default_initialgain[3] =
- (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
- rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
- rtlphy->default_initialgain[0],
- rtlphy->default_initialgain[1],
- rtlphy->default_initialgain[2],
- rtlphy->default_initialgain[3]);
- rtlphy->framesync = (u8)rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3,
- MASKBYTE0);
- rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2,
- MASKDWORD);
- rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default framesync (0x%x) = 0x%x\n",
- ROFDM0_RXDETECTOR3, rtlphy->framesync);
-}
-
-static void _rtl92d_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
- u8 *cckpowerlevel, u8 *ofdmpowerlevel)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u8 index = (channel - 1);
-
- /* 1. CCK */
- if (rtlhal->current_bandtype == BAND_ON_2_4G) {
- /* RF-A */
- cckpowerlevel[RF90_PATH_A] =
- rtlefuse->txpwrlevel_cck[RF90_PATH_A][index];
- /* RF-B */
- cckpowerlevel[RF90_PATH_B] =
- rtlefuse->txpwrlevel_cck[RF90_PATH_B][index];
- } else {
- cckpowerlevel[RF90_PATH_A] = 0;
- cckpowerlevel[RF90_PATH_B] = 0;
- }
- /* 2. OFDM for 1S or 2S */
- if (rtlphy->rf_type == RF_1T2R || rtlphy->rf_type == RF_1T1R) {
- /* Read HT 40 OFDM TX power */
- ofdmpowerlevel[RF90_PATH_A] =
- rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index];
- ofdmpowerlevel[RF90_PATH_B] =
- rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index];
- } else if (rtlphy->rf_type == RF_2T2R) {
- /* Read HT 40 OFDM TX power */
- ofdmpowerlevel[RF90_PATH_A] =
- rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_A][index];
- ofdmpowerlevel[RF90_PATH_B] =
- rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_B][index];
- }
-}
-
-static void _rtl92d_ccxpower_index_check(struct ieee80211_hw *hw,
- u8 channel, u8 *cckpowerlevel, u8 *ofdmpowerlevel)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
- rtlphy->cur_cck_txpwridx = cckpowerlevel[0];
- rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0];
-}
-
-static u8 _rtl92c_phy_get_rightchnlplace(u8 chnl)
-{
- u8 place = chnl;
-
- if (chnl > 14) {
- for (place = 14; place < ARRAY_SIZE(channel5g); place++) {
- if (channel5g[place] == chnl) {
- place++;
- break;
- }
- }
- }
- return place;
-}
-
-void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
-{
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 cckpowerlevel[2], ofdmpowerlevel[2];
-
- if (!rtlefuse->txpwr_fromeprom)
- return;
- channel = _rtl92c_phy_get_rightchnlplace(channel);
- _rtl92d_get_txpower_index(hw, channel, &cckpowerlevel[0],
- &ofdmpowerlevel[0]);
- if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)
- _rtl92d_ccxpower_index_check(hw, channel, &cckpowerlevel[0],
- &ofdmpowerlevel[0]);
- if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)
- rtl92d_phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]);
- rtl92d_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel);
-}
-
void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
enum nl80211_channel_type ch_type)
{
@@ -1122,65 +712,6 @@ static void _rtl92d_phy_reload_imr_setting(struct ieee80211_hw *hw,
rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
}
-static void _rtl92d_phy_enable_rf_env(struct ieee80211_hw *hw,
- u8 rfpath, u32 *pu4_regval)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
-
- rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "====>\n");
- /*----Store original RFENV control type----*/
- switch (rfpath) {
- case RF90_PATH_A:
- case RF90_PATH_C:
- *pu4_regval = rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV);
- break;
- case RF90_PATH_B:
- case RF90_PATH_D:
- *pu4_regval =
- rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16);
- break;
- }
- /*----Set RF_ENV enable----*/
- rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
- udelay(1);
- /*----Set RF_ENV output high----*/
- rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
- udelay(1);
- /* Set bit number of Address and Data for RF register */
- /* Set 1 to 4 bits for 8255 */
- rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREADDRESSLENGTH, 0x0);
- udelay(1);
- /*Set 0 to 12 bits for 8255 */
- rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
- udelay(1);
- rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<====\n");
-}
-
-static void _rtl92d_phy_restore_rf_env(struct ieee80211_hw *hw, u8 rfpath,
- u32 *pu4_regval)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
-
- rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "=====>\n");
- /*----Restore RFENV control type----*/
- switch (rfpath) {
- case RF90_PATH_A:
- case RF90_PATH_C:
- rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV, *pu4_regval);
- break;
- case RF90_PATH_B:
- case RF90_PATH_D:
- rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16,
- *pu4_regval);
- break;
- }
- rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<=====\n");
-}
-
static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1221,8 +752,8 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
rtlhal->during_mac1init_radioa = true;
/* asume no this case */
if (need_pwr_down)
- _rtl92d_phy_enable_rf_env(hw, path,
- &u4regvalue);
+ rtl92d_phy_enable_rf_env(hw, path,
+ &u4regvalue);
}
for (i = 0; i < RF_REG_NUM_FOR_C_CUT_5G; i++) {
if (i == 0 && (rtlhal->macphymode == DUALMAC_DUALPHY)) {
@@ -1253,7 +784,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
RFREG_OFFSET_MASK));
}
if (need_pwr_down)
- _rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
+ rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
if (rtlhal->during_mac1init_radioa)
rtl92d_phy_powerdown_anotherphy(hw, false);
if (channel < 149)
@@ -1313,8 +844,8 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
rtlhal->during_mac0init_radiob = true;
if (need_pwr_down)
- _rtl92d_phy_enable_rf_env(hw, path,
- &u4regvalue);
+ rtl92d_phy_enable_rf_env(hw, path,
+ &u4regvalue);
}
}
for (i = 0; i < RF_REG_NUM_FOR_C_CUT_2G; i++) {
@@ -1347,31 +878,13 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
RFREG_OFFSET_MASK,
rf_syn_g4_for_c_cut_2g | (u4tmp << 11));
if (need_pwr_down)
- _rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
+ rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
if (rtlhal->during_mac0init_radiob)
rtl92d_phy_powerdown_anotherphy(hw, true);
}
rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
}
-u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl)
-{
- u8 place;
-
- if (chnl > 14) {
- for (place = 14; place < ARRAY_SIZE(channel_all); place++) {
- if (channel_all[place] == chnl)
- return place - 13;
- }
- }
-
- return 0;
-}
-
-#define MAX_TOLERANCE 5
-#define IQK_DELAY_TIME 1 /* ms */
-#define MAX_TOLERANCE_92D 3
-
/* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
static u8 _rtl92d_phy_patha_iqk(struct ieee80211_hw *hw, bool configpathb)
{
@@ -1636,30 +1149,6 @@ static u8 _rtl92d_phy_pathb_iqk_5g_normal(struct ieee80211_hw *hw)
return result;
}
-static void _rtl92d_phy_save_adda_registers(struct ieee80211_hw *hw,
- u32 *adda_reg, u32 *adda_backup,
- u32 regnum)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 i;
-
- RTPRINT(rtlpriv, FINIT, INIT_IQK, "Save ADDA parameters.\n");
- for (i = 0; i < regnum; i++)
- adda_backup[i] = rtl_get_bbreg(hw, adda_reg[i], MASKDWORD);
-}
-
-static void _rtl92d_phy_save_mac_registers(struct ieee80211_hw *hw,
- u32 *macreg, u32 *macbackup)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 i;
-
- RTPRINT(rtlpriv, FINIT, INIT_IQK, "Save MAC parameters.\n");
- for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
- macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
- macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
-}
-
static void _rtl92d_phy_reload_adda_registers(struct ieee80211_hw *hw,
u32 *adda_reg, u32 *adda_backup,
u32 regnum)
@@ -1685,37 +1174,6 @@ static void _rtl92d_phy_reload_mac_registers(struct ieee80211_hw *hw,
rtl_write_byte(rtlpriv, macreg[i], macbackup[i]);
}
-static void _rtl92d_phy_path_adda_on(struct ieee80211_hw *hw,
- u32 *adda_reg, bool patha_on, bool is2t)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 pathon;
- u32 i;
-
- RTPRINT(rtlpriv, FINIT, INIT_IQK, "ADDA ON.\n");
- pathon = patha_on ? 0x04db25a4 : 0x0b1b25a4;
- if (patha_on)
- pathon = rtlpriv->rtlhal.interfaceindex == 0 ?
- 0x04db25a4 : 0x0b1b25a4;
- for (i = 0; i < IQK_ADDA_REG_NUM; i++)
- rtl_set_bbreg(hw, adda_reg[i], MASKDWORD, pathon);
-}
-
-static void _rtl92d_phy_mac_setting_calibration(struct ieee80211_hw *hw,
- u32 *macreg, u32 *macbackup)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 i;
-
- RTPRINT(rtlpriv, FINIT, INIT_IQK, "MAC settings for Calibration.\n");
- rtl_write_byte(rtlpriv, macreg[0], 0x3F);
-
- for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
- rtl_write_byte(rtlpriv, macreg[i], (u8)(macbackup[i] &
- (~BIT(3))));
- rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
-}
-
static void _rtl92d_phy_patha_standby(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1772,14 +1230,16 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
is2t ? "2T2R" : "1T1R");
/* Save ADDA parameters, turn Path A ADDA on */
- _rtl92d_phy_save_adda_registers(hw, adda_reg,
- rtlphy->adda_backup, IQK_ADDA_REG_NUM);
- _rtl92d_phy_save_mac_registers(hw, iqk_mac_reg,
- rtlphy->iqk_mac_backup);
- _rtl92d_phy_save_adda_registers(hw, iqk_bb_reg,
- rtlphy->iqk_bb_backup, IQK_BB_REG_NUM);
- }
- _rtl92d_phy_path_adda_on(hw, adda_reg, true, is2t);
+ rtl92d_phy_save_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup,
+ IQK_ADDA_REG_NUM);
+ rtl92d_phy_save_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+ rtl92d_phy_save_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM);
+ }
+ rtl92d_phy_path_adda_on(hw, adda_reg, true, is2t);
if (t == 0)
rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw,
RFPGA0_XA_HSSIPARAMETER1, BIT(8));
@@ -1800,8 +1260,8 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
0x00010000);
}
/* MAC settings */
- _rtl92d_phy_mac_setting_calibration(hw, iqk_mac_reg,
- rtlphy->iqk_mac_backup);
+ rtl92d_phy_mac_setting_calibration(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
/* Page B init */
rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x0f600000);
if (is2t)
@@ -1841,7 +1301,7 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
if (is2t) {
_rtl92d_phy_patha_standby(hw);
/* Turn Path B ADDA on */
- _rtl92d_phy_path_adda_on(hw, adda_reg, false, is2t);
+ rtl92d_phy_path_adda_on(hw, adda_reg, false, is2t);
for (i = 0; i < retrycount; i++) {
pathb_ok = _rtl92d_phy_pathb_iqk(hw);
if (pathb_ok == 0x03) {
@@ -1938,24 +1398,24 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQ Calibration for %s\n",
is2t ? "2T2R" : "1T1R");
/* Save ADDA parameters, turn Path A ADDA on */
- _rtl92d_phy_save_adda_registers(hw, adda_reg,
- rtlphy->adda_backup,
- IQK_ADDA_REG_NUM);
- _rtl92d_phy_save_mac_registers(hw, iqk_mac_reg,
- rtlphy->iqk_mac_backup);
+ rtl92d_phy_save_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup,
+ IQK_ADDA_REG_NUM);
+ rtl92d_phy_save_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
if (is2t)
- _rtl92d_phy_save_adda_registers(hw, iqk_bb_reg,
- rtlphy->iqk_bb_backup,
- IQK_BB_REG_NUM);
+ rtl92d_phy_save_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM);
else
- _rtl92d_phy_save_adda_registers(hw, iqk_bb_reg,
- rtlphy->iqk_bb_backup,
- IQK_BB_REG_NUM - 1);
+ rtl92d_phy_save_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM - 1);
}
- _rtl92d_phy_path_adda_on(hw, adda_reg, true, is2t);
+ rtl92d_phy_path_adda_on(hw, adda_reg, true, is2t);
/* MAC settings */
- _rtl92d_phy_mac_setting_calibration(hw, iqk_mac_reg,
- rtlphy->iqk_mac_backup);
+ rtl92d_phy_mac_setting_calibration(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
if (t == 0)
rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw,
RFPGA0_XA_HSSIPARAMETER1, BIT(8));
@@ -2002,7 +1462,7 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
if (is2t) {
/* _rtl92d_phy_patha_standby(hw); */
/* Turn Path B ADDA on */
- _rtl92d_phy_path_adda_on(hw, adda_reg, false, is2t);
+ rtl92d_phy_path_adda_on(hw, adda_reg, false, is2t);
pathb_ok = _rtl92d_phy_pathb_iqk_5g_normal(hw);
if (pathb_ok == 0x03) {
RTPRINT(rtlpriv, FINIT, INIT_IQK,
@@ -2401,56 +1861,6 @@ void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel)
rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
}
-static u32 _rtl92d_phy_get_abs(u32 val1, u32 val2)
-{
- u32 ret;
-
- if (val1 >= val2)
- ret = val1 - val2;
- else
- ret = val2 - val1;
- return ret;
-}
-
-static bool _rtl92d_is_legal_5g_channel(struct ieee80211_hw *hw, u8 channel)
-{
-
- int i;
-
- for (i = 0; i < ARRAY_SIZE(channel5g); i++)
- if (channel == channel5g[i])
- return true;
- return false;
-}
-
-static void _rtl92d_phy_calc_curvindex(struct ieee80211_hw *hw,
- u32 *targetchnl, u32 * curvecount_val,
- bool is5g, u32 *curveindex)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 smallest_abs_val = 0xffffffff, u4tmp;
- u8 i, j;
- u8 chnl_num = is5g ? TARGET_CHNL_NUM_5G : TARGET_CHNL_NUM_2G;
-
- for (i = 0; i < chnl_num; i++) {
- if (is5g && !_rtl92d_is_legal_5g_channel(hw, i + 1))
- continue;
- curveindex[i] = 0;
- for (j = 0; j < (CV_CURVE_CNT * 2); j++) {
- u4tmp = _rtl92d_phy_get_abs(targetchnl[i],
- curvecount_val[j]);
-
- if (u4tmp < smallest_abs_val) {
- curveindex[i] = j;
- smallest_abs_val = u4tmp;
- }
- }
- smallest_abs_val = 0xffffffff;
- RTPRINT(rtlpriv, FINIT, INIT_IQK, "curveindex[%d] = %x\n",
- i, curveindex[i]);
- }
-}
-
static void _rtl92d_phy_reload_lck_setting(struct ieee80211_hw *hw,
u8 channel)
{
@@ -2477,12 +1887,12 @@ static void _rtl92d_phy_reload_lck_setting(struct ieee80211_hw *hw,
rtlpriv->rtlhal.during_mac1init_radioa = true;
/* asume no this case */
if (bneed_powerdown_radio)
- _rtl92d_phy_enable_rf_env(hw, erfpath,
- &u4regvalue);
+ rtl92d_phy_enable_rf_env(hw, erfpath,
+ &u4regvalue);
}
rtl_set_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800, u4tmp);
if (bneed_powerdown_radio)
- _rtl92d_phy_restore_rf_env(hw, erfpath, &u4regvalue);
+ rtl92d_phy_restore_rf_env(hw, erfpath, &u4regvalue);
if (rtlpriv->rtlhal.during_mac1init_radioa)
rtl92d_phy_powerdown_anotherphy(hw, false);
} else if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) {
@@ -2495,15 +1905,15 @@ static void _rtl92d_phy_reload_lck_setting(struct ieee80211_hw *hw,
rtl92d_phy_enable_anotherphy(hw, true);
rtlpriv->rtlhal.during_mac0init_radiob = true;
if (bneed_powerdown_radio)
- _rtl92d_phy_enable_rf_env(hw, erfpath,
- &u4regvalue);
+ rtl92d_phy_enable_rf_env(hw, erfpath,
+ &u4regvalue);
}
rtl_set_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800, u4tmp);
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n",
rtl_get_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800));
if (bneed_powerdown_radio)
- _rtl92d_phy_restore_rf_env(hw, erfpath, &u4regvalue);
+ rtl92d_phy_restore_rf_env(hw, erfpath, &u4regvalue);
if (rtlpriv->rtlhal.during_mac0init_radiob)
rtl92d_phy_powerdown_anotherphy(hw, true);
}
@@ -2588,13 +1998,13 @@ static void _rtl92d_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t)
readval2);
}
if (index == 0 && rtlhal->interfaceindex == 0)
- _rtl92d_phy_calc_curvindex(hw, targetchnl_5g,
- curvecount_val,
- true, curveindex_5g);
+ rtl92d_phy_calc_curvindex(hw, targetchnl_5g,
+ curvecount_val,
+ true, curveindex_5g);
else
- _rtl92d_phy_calc_curvindex(hw, targetchnl_2g,
- curvecount_val,
- false, curveindex_2g);
+ rtl92d_phy_calc_curvindex(hw, targetchnl_2g,
+ curvecount_val,
+ false, curveindex_2g);
/* switch CV-curve control mode */
rtl_set_rfreg(hw, (enum radio_path)index, RF_SYN_G7,
BIT(17), 0x1);
@@ -2622,7 +2032,7 @@ static void _rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
_rtl92d_phy_lc_calibrate_sw(hw, is2t);
}
-void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw)
+void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -2638,12 +2048,9 @@ void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw)
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"LCK:Start!!! currentband %x delay %d ms\n",
rtlhal->current_bandtype, timecount);
- if (IS_92D_SINGLEPHY(rtlhal->version)) {
- _rtl92d_phy_lc_calibrate(hw, true);
- } else {
- /* For 1T1R */
- _rtl92d_phy_lc_calibrate(hw, false);
- }
+
+ _rtl92d_phy_lc_calibrate(hw, is2t);
+
rtlphy->lck_inprogress = false;
RTPRINT(rtlpriv, FINIT, INIT_IQK, "LCK:Finish!!!\n");
}
@@ -2674,30 +2081,6 @@ static bool _rtl92d_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
return true;
}
-void rtl92d_phy_reset_iqk_result(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- u8 i;
-
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "settings regs %zu default regs %d\n",
- ARRAY_SIZE(rtlphy->iqk_matrix),
- IQK_MATRIX_REG_NUM);
- /* 0xe94, 0xe9c, 0xea4, 0xeac, 0xeb4, 0xebc, 0xec4, 0xecc */
- for (i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) {
- rtlphy->iqk_matrix[i].value[0][0] = 0x100;
- rtlphy->iqk_matrix[i].value[0][2] = 0x100;
- rtlphy->iqk_matrix[i].value[0][4] = 0x100;
- rtlphy->iqk_matrix[i].value[0][6] = 0x100;
- rtlphy->iqk_matrix[i].value[0][1] = 0x0;
- rtlphy->iqk_matrix[i].value[0][3] = 0x0;
- rtlphy->iqk_matrix[i].value[0][5] = 0x0;
- rtlphy->iqk_matrix[i].value[0][7] = 0x0;
- rtlphy->iqk_matrix[i].iqk_done = false;
- }
-}
-
static bool _rtl92d_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
u8 channel, u8 *stage, u8 *step,
u32 *delay)
@@ -2891,74 +2274,6 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw)
return 1;
}
-static void rtl92d_phy_set_io(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct dig_t *de_digtable = &rtlpriv->dm_digtable;
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
- rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
- "--->Cmd(%#x), set_io_inprogress(%d)\n",
- rtlphy->current_io_type, rtlphy->set_io_inprogress);
- switch (rtlphy->current_io_type) {
- case IO_CMD_RESUME_DM_BY_SCAN:
- de_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1;
- rtl92d_dm_write_dig(hw);
- rtl92d_phy_set_txpower_level(hw, rtlphy->current_channel);
- break;
- case IO_CMD_PAUSE_DM_BY_SCAN:
- rtlphy->initgain_backup.xaagccore1 = de_digtable->cur_igvalue;
- de_digtable->cur_igvalue = 0x37;
- rtl92d_dm_write_dig(hw);
- break;
- default:
- pr_err("switch case %#x not processed\n",
- rtlphy->current_io_type);
- break;
- }
- rtlphy->set_io_inprogress = false;
- rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "<---(%#x)\n",
- rtlphy->current_io_type);
-}
-
-bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- bool postprocessing = false;
-
- rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
- "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
- iotype, rtlphy->set_io_inprogress);
- do {
- switch (iotype) {
- case IO_CMD_RESUME_DM_BY_SCAN:
- rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Resume DM after scan\n");
- postprocessing = true;
- break;
- case IO_CMD_PAUSE_DM_BY_SCAN:
- rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Pause DM before scan\n");
- postprocessing = true;
- break;
- default:
- pr_err("switch case %#x not processed\n",
- iotype);
- break;
- }
- } while (false);
- if (postprocessing && !rtlphy->set_io_inprogress) {
- rtlphy->set_io_inprogress = true;
- rtlphy->current_io_type = iotype;
- } else {
- return false;
- }
- rtl92d_phy_set_io(hw);
- rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "<--IO Type(%#x)\n", iotype);
- return true;
-}
-
static void _rtl92d_phy_set_rfon(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -3141,100 +2456,6 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
return bresult;
}
-void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 offset = REG_MAC_PHY_CTRL_NORMAL;
-
- switch (rtlhal->macphymode) {
- case DUALMAC_DUALPHY:
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "MacPhyMode: DUALMAC_DUALPHY\n");
- rtl_write_byte(rtlpriv, offset, 0xF3);
- break;
- case SINGLEMAC_SINGLEPHY:
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "MacPhyMode: SINGLEMAC_SINGLEPHY\n");
- rtl_write_byte(rtlpriv, offset, 0xF4);
- break;
- case DUALMAC_SINGLEPHY:
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "MacPhyMode: DUALMAC_SINGLEPHY\n");
- rtl_write_byte(rtlpriv, offset, 0xF1);
- break;
- }
-}
-
-void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
- switch (rtlhal->macphymode) {
- case DUALMAC_SINGLEPHY:
- rtlphy->rf_type = RF_2T2R;
- rtlhal->version |= RF_TYPE_2T2R;
- rtlhal->bandset = BAND_ON_BOTH;
- rtlhal->current_bandtype = BAND_ON_2_4G;
- break;
-
- case SINGLEMAC_SINGLEPHY:
- rtlphy->rf_type = RF_2T2R;
- rtlhal->version |= RF_TYPE_2T2R;
- rtlhal->bandset = BAND_ON_BOTH;
- rtlhal->current_bandtype = BAND_ON_2_4G;
- break;
-
- case DUALMAC_DUALPHY:
- rtlphy->rf_type = RF_1T1R;
- rtlhal->version &= RF_TYPE_1T1R;
- /* Now we let MAC0 run on 5G band. */
- if (rtlhal->interfaceindex == 0) {
- rtlhal->bandset = BAND_ON_5G;
- rtlhal->current_bandtype = BAND_ON_5G;
- } else {
- rtlhal->bandset = BAND_ON_2_4G;
- rtlhal->current_bandtype = BAND_ON_2_4G;
- }
- break;
- default:
- break;
- }
-}
-
-u8 rtl92d_get_chnlgroup_fromarray(u8 chnl)
-{
- u8 group;
-
- if (channel_all[chnl] <= 3)
- group = 0;
- else if (channel_all[chnl] <= 9)
- group = 1;
- else if (channel_all[chnl] <= 14)
- group = 2;
- else if (channel_all[chnl] <= 44)
- group = 3;
- else if (channel_all[chnl] <= 54)
- group = 4;
- else if (channel_all[chnl] <= 64)
- group = 5;
- else if (channel_all[chnl] <= 112)
- group = 6;
- else if (channel_all[chnl] <= 126)
- group = 7;
- else if (channel_all[chnl] <= 140)
- group = 8;
- else if (channel_all[chnl] <= 153)
- group = 9;
- else if (channel_all[chnl] <= 159)
- group = 10;
- else
- group = 11;
- return group;
-}
-
void rtl92d_phy_set_poweron(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -3286,31 +2507,6 @@ void rtl92d_phy_set_poweron(struct ieee80211_hw *hw)
}
}
-void rtl92d_phy_config_maccoexist_rfpage(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- switch (rtlpriv->rtlhal.macphymode) {
- case DUALMAC_DUALPHY:
- rtl_write_byte(rtlpriv, REG_DMC, 0x0);
- rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x08);
- rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, 0x13ff);
- break;
- case DUALMAC_SINGLEPHY:
- rtl_write_byte(rtlpriv, REG_DMC, 0xf8);
- rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x08);
- rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, 0x13ff);
- break;
- case SINGLEMAC_SINGLEPHY:
- rtl_write_byte(rtlpriv, REG_DMC, 0x0);
- rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x10);
- rtl_write_word(rtlpriv, (REG_TRXFF_BNDY + 2), 0x27FF);
- break;
- default:
- break;
- }
-}
-
void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
index 8d07c783a023..bbe9ef77225e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
@@ -10,11 +10,8 @@
#define MAX_DOZE_WAITING_TIMES_9x 64
-#define RT_CANNOT_IO(hw) false
#define HIGHPOWER_RADIOA_ARRAYLEN 22
-#define MAX_TOLERANCE 5
-
#define APK_BB_REG_NUM 5
#define APK_AFE_REG_NUM 16
#define APK_CURVE_REG_NUM 4
@@ -27,12 +24,8 @@
#define RESET_CNT_LIMIT 3
#define IQK_ADDA_REG_NUM 16
-#define IQK_BB_REG_NUM 10
#define IQK_BB_REG_NUM_test 6
#define IQK_MAC_REG_NUM 4
-#define RX_INDEX_MAPPING_NUM 15
-
-#define IQK_DELAY_TIME 1
#define CT_OFFSET_MAC_ADDR 0X16
@@ -68,80 +61,30 @@ struct swchnlcmd {
u32 msdelay;
};
-enum baseband_config_type {
- BASEBAND_CONFIG_PHY_REG = 0,
- BASEBAND_CONFIG_AGC_TAB = 1,
-};
-
-enum rf_content {
- radioa_txt = 0,
- radiob_txt = 1,
- radioc_txt = 2,
- radiod_txt = 3
-};
-
-static inline void rtl92d_acquire_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
- unsigned long *flag)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- if (rtlpriv->rtlhal.interfaceindex == 1)
- spin_lock_irqsave(&rtlpriv->locks.cck_and_rw_pagea_lock, *flag);
-}
-
-static inline void rtl92d_release_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
- unsigned long *flag)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- if (rtlpriv->rtlhal.interfaceindex == 1)
- spin_unlock_irqrestore(&rtlpriv->locks.cck_and_rw_pagea_lock,
- *flag);
-}
-
u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw,
u32 regaddr, u32 bitmask);
void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
u32 regaddr, u32 bitmask, u32 data);
-u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask);
-void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask, u32 data);
bool rtl92d_phy_mac_config(struct ieee80211_hw *hw);
bool rtl92d_phy_bb_config(struct ieee80211_hw *hw);
bool rtl92d_phy_rf_config(struct ieee80211_hw *hw);
bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
enum radio_path rfpath);
-void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
enum nl80211_channel_type ch_type);
u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw);
bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum rf_content content,
enum radio_path rfpath);
-bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
enum rf_pwrstate rfpwr_state);
-void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw);
-void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw);
-u8 rtl92d_get_chnlgroup_fromarray(u8 chnl);
void rtl92d_phy_set_poweron(struct ieee80211_hw *hw);
-void rtl92d_phy_config_maccoexist_rfpage(struct ieee80211_hw *hw);
bool rtl92d_phy_check_poweroff(struct ieee80211_hw *hw);
-void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw);
+void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw);
void rtl92d_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta);
void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw);
-void rtl92d_phy_reset_iqk_result(struct ieee80211_hw *hw);
-void rtl92d_release_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
- unsigned long *flag);
-void rtl92d_acquire_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
- unsigned long *flag);
-u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl);
void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
index 83787fd293de..eb7d8b070cc7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
@@ -2,383 +2,14 @@
/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
-#include "reg.h"
-#include "def.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/phy_common.h"
#include "phy.h"
#include "rf.h"
#include "dm.h"
#include "hw.h"
-void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- u8 rfpath;
-
- switch (bandwidth) {
- case HT_CHANNEL_WIDTH_20:
- for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
- rtlphy->rfreg_chnlval[rfpath] = ((rtlphy->rfreg_chnlval
- [rfpath] & 0xfffff3ff) | 0x0400);
- rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(10) |
- BIT(11), 0x01);
-
- rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
- "20M RF 0x18 = 0x%x\n",
- rtlphy->rfreg_chnlval[rfpath]);
- }
-
- break;
- case HT_CHANNEL_WIDTH_20_40:
- for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
- rtlphy->rfreg_chnlval[rfpath] =
- ((rtlphy->rfreg_chnlval[rfpath] & 0xfffff3ff));
- rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(10) | BIT(11),
- 0x00);
- rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
- "40M RF 0x18 = 0x%x\n",
- rtlphy->rfreg_chnlval[rfpath]);
- }
- break;
- default:
- pr_err("unknown bandwidth: %#X\n", bandwidth);
- break;
- }
-}
-
-void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u32 tx_agc[2] = {0, 0}, tmpval;
- bool turbo_scanoff = false;
- u8 idx1, idx2;
- u8 *ptr;
-
- if (rtlefuse->eeprom_regulatory != 0)
- turbo_scanoff = true;
- if (mac->act_scanning) {
- tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
- tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
- if (turbo_scanoff) {
- for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
- tx_agc[idx1] = ppowerlevel[idx1] |
- (ppowerlevel[idx1] << 8) |
- (ppowerlevel[idx1] << 16) |
- (ppowerlevel[idx1] << 24);
- }
- }
- } else {
- for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
- tx_agc[idx1] = ppowerlevel[idx1] |
- (ppowerlevel[idx1] << 8) |
- (ppowerlevel[idx1] << 16) |
- (ppowerlevel[idx1] << 24);
- }
- if (rtlefuse->eeprom_regulatory == 0) {
- tmpval = (rtlphy->mcs_offset[0][6]) +
- (rtlphy->mcs_offset[0][7] << 8);
- tx_agc[RF90_PATH_A] += tmpval;
- tmpval = (rtlphy->mcs_offset[0][14]) +
- (rtlphy->mcs_offset[0][15] << 24);
- tx_agc[RF90_PATH_B] += tmpval;
- }
- }
-
- for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
- ptr = (u8 *) (&(tx_agc[idx1]));
- for (idx2 = 0; idx2 < 4; idx2++) {
- if (*ptr > RF6052_MAX_TX_PWR)
- *ptr = RF6052_MAX_TX_PWR;
- ptr++;
- }
- }
-
- tmpval = tx_agc[RF90_PATH_A] & 0xff;
- rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval);
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n",
- tmpval, RTXAGC_A_CCK1_MCS32);
- tmpval = tx_agc[RF90_PATH_A] >> 8;
- rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n",
- tmpval, RTXAGC_B_CCK11_A_CCK2_11);
- tmpval = tx_agc[RF90_PATH_B] >> 24;
- rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval);
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n",
- tmpval, RTXAGC_B_CCK11_A_CCK2_11);
- tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff;
- rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval);
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n",
- tmpval, RTXAGC_B_CCK1_55_MCS32);
-}
-
-static void _rtl92d_phy_get_power_base(struct ieee80211_hw *hw,
- u8 *ppowerlevel, u8 channel,
- u32 *ofdmbase, u32 *mcsbase)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u32 powerbase0, powerbase1;
- u8 legacy_pwrdiff, ht20_pwrdiff;
- u8 i, powerlevel[2];
-
- for (i = 0; i < 2; i++) {
- powerlevel[i] = ppowerlevel[i];
- legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1];
- powerbase0 = powerlevel[i] + legacy_pwrdiff;
- powerbase0 = (powerbase0 << 24) | (powerbase0 << 16) |
- (powerbase0 << 8) | powerbase0;
- *(ofdmbase + i) = powerbase0;
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- " [OFDM power base index rf(%c) = 0x%x]\n",
- i == 0 ? 'A' : 'B', *(ofdmbase + i));
- }
-
- for (i = 0; i < 2; i++) {
- if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) {
- ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1];
- powerlevel[i] += ht20_pwrdiff;
- }
- powerbase1 = powerlevel[i];
- powerbase1 = (powerbase1 << 24) | (powerbase1 << 16) |
- (powerbase1 << 8) | powerbase1;
- *(mcsbase + i) = powerbase1;
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- " [MCS power base index rf(%c) = 0x%x]\n",
- i == 0 ? 'A' : 'B', *(mcsbase + i));
- }
-}
-
-static u8 _rtl92d_phy_get_chnlgroup_bypg(u8 chnlindex)
-{
- u8 group;
- u8 channel_info[59] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
- 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58,
- 60, 62, 64, 100, 102, 104, 106, 108, 110, 112,
- 114, 116, 118, 120, 122, 124, 126, 128, 130, 132,
- 134, 136, 138, 140, 149, 151, 153, 155, 157, 159,
- 161, 163, 165
- };
-
- if (channel_info[chnlindex] <= 3) /* Chanel 1-3 */
- group = 0;
- else if (channel_info[chnlindex] <= 9) /* Channel 4-9 */
- group = 1;
- else if (channel_info[chnlindex] <= 14) /* Channel 10-14 */
- group = 2;
- else if (channel_info[chnlindex] <= 64)
- group = 6;
- else if (channel_info[chnlindex] <= 140)
- group = 7;
- else
- group = 8;
- return group;
-}
-
-static void _rtl92d_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
- u8 channel, u8 index,
- u32 *powerbase0,
- u32 *powerbase1,
- u32 *p_outwriteval)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u8 i, chnlgroup = 0, pwr_diff_limit[4];
- u32 writeval = 0, customer_limit, rf;
-
- for (rf = 0; rf < 2; rf++) {
- switch (rtlefuse->eeprom_regulatory) {
- case 0:
- chnlgroup = 0;
- writeval = rtlphy->mcs_offset
- [chnlgroup][index +
- (rf ? 8 : 0)] + ((index < 2) ?
- powerbase0[rf] :
- powerbase1[rf]);
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "RTK better performance, writeval(%c) = 0x%x\n",
- rf == 0 ? 'A' : 'B', writeval);
- break;
- case 1:
- if (rtlphy->pwrgroup_cnt == 1)
- chnlgroup = 0;
- if (rtlphy->pwrgroup_cnt >= MAX_PG_GROUP) {
- chnlgroup = _rtl92d_phy_get_chnlgroup_bypg(
- channel - 1);
- if (rtlphy->current_chan_bw ==
- HT_CHANNEL_WIDTH_20)
- chnlgroup++;
- else
- chnlgroup += 4;
- writeval = rtlphy->mcs_offset
- [chnlgroup][index +
- (rf ? 8 : 0)] + ((index < 2) ?
- powerbase0[rf] :
- powerbase1[rf]);
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "Realtek regulatory, 20MHz, writeval(%c) = 0x%x\n",
- rf == 0 ? 'A' : 'B', writeval);
- }
- break;
- case 2:
- writeval = ((index < 2) ? powerbase0[rf] :
- powerbase1[rf]);
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "Better regulatory, writeval(%c) = 0x%x\n",
- rf == 0 ? 'A' : 'B', writeval);
- break;
- case 3:
- chnlgroup = 0;
- if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "customer's limit, 40MHz rf(%c) = 0x%x\n",
- rf == 0 ? 'A' : 'B',
- rtlefuse->pwrgroup_ht40[rf]
- [channel - 1]);
- } else {
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "customer's limit, 20MHz rf(%c) = 0x%x\n",
- rf == 0 ? 'A' : 'B',
- rtlefuse->pwrgroup_ht20[rf]
- [channel - 1]);
- }
- for (i = 0; i < 4; i++) {
- pwr_diff_limit[i] = (u8)((rtlphy->mcs_offset
- [chnlgroup][index + (rf ? 8 : 0)] &
- (0x7f << (i * 8))) >> (i * 8));
- if (rtlphy->current_chan_bw ==
- HT_CHANNEL_WIDTH_20_40) {
- if (pwr_diff_limit[i] >
- rtlefuse->pwrgroup_ht40[rf]
- [channel - 1])
- pwr_diff_limit[i] =
- rtlefuse->pwrgroup_ht40
- [rf][channel - 1];
- } else {
- if (pwr_diff_limit[i] >
- rtlefuse->pwrgroup_ht20[rf][
- channel - 1])
- pwr_diff_limit[i] =
- rtlefuse->pwrgroup_ht20[rf]
- [channel - 1];
- }
- }
- customer_limit = (pwr_diff_limit[3] << 24) |
- (pwr_diff_limit[2] << 16) |
- (pwr_diff_limit[1] << 8) |
- (pwr_diff_limit[0]);
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "Customer's limit rf(%c) = 0x%x\n",
- rf == 0 ? 'A' : 'B', customer_limit);
- writeval = customer_limit + ((index < 2) ?
- powerbase0[rf] : powerbase1[rf]);
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "Customer, writeval rf(%c)= 0x%x\n",
- rf == 0 ? 'A' : 'B', writeval);
- break;
- default:
- chnlgroup = 0;
- writeval = rtlphy->mcs_offset[chnlgroup][index +
- (rf ? 8 : 0)] + ((index < 2) ?
- powerbase0[rf] : powerbase1[rf]);
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "RTK better performance, writeval rf(%c) = 0x%x\n",
- rf == 0 ? 'A' : 'B', writeval);
- break;
- }
- *(p_outwriteval + rf) = writeval;
- }
-}
-
-static void _rtl92d_write_ofdm_power_reg(struct ieee80211_hw *hw,
- u8 index, u32 *pvalue)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- static u16 regoffset_a[6] = {
- RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24,
- RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04,
- RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12
- };
- static u16 regoffset_b[6] = {
- RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24,
- RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04,
- RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12
- };
- u8 i, rf, pwr_val[4];
- u32 writeval;
- u16 regoffset;
-
- for (rf = 0; rf < 2; rf++) {
- writeval = pvalue[rf];
- for (i = 0; i < 4; i++) {
- pwr_val[i] = (u8) ((writeval & (0x7f <<
- (i * 8))) >> (i * 8));
- if (pwr_val[i] > RF6052_MAX_TX_PWR)
- pwr_val[i] = RF6052_MAX_TX_PWR;
- }
- writeval = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
- (pwr_val[1] << 8) | pwr_val[0];
- if (rf == 0)
- regoffset = regoffset_a[index];
- else
- regoffset = regoffset_b[index];
- rtl_set_bbreg(hw, regoffset, MASKDWORD, writeval);
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "Set 0x%x = %08x\n", regoffset, writeval);
- if (((get_rf_type(rtlphy) == RF_2T2R) &&
- (regoffset == RTXAGC_A_MCS15_MCS12 ||
- regoffset == RTXAGC_B_MCS15_MCS12)) ||
- ((get_rf_type(rtlphy) != RF_2T2R) &&
- (regoffset == RTXAGC_A_MCS07_MCS04 ||
- regoffset == RTXAGC_B_MCS07_MCS04))) {
- writeval = pwr_val[3];
- if (regoffset == RTXAGC_A_MCS15_MCS12 ||
- regoffset == RTXAGC_A_MCS07_MCS04)
- regoffset = 0xc90;
- if (regoffset == RTXAGC_B_MCS15_MCS12 ||
- regoffset == RTXAGC_B_MCS07_MCS04)
- regoffset = 0xc98;
- for (i = 0; i < 3; i++) {
- if (i != 2)
- writeval = (writeval > 8) ?
- (writeval - 8) : 0;
- else
- writeval = (writeval > 6) ?
- (writeval - 6) : 0;
- rtl_write_byte(rtlpriv, (u32) (regoffset + i),
- (u8) writeval);
- }
- }
- }
-}
-
-void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel, u8 channel)
-{
- u32 writeval[2], powerbase0[2], powerbase1[2];
- u8 index;
-
- _rtl92d_phy_get_power_base(hw, ppowerlevel, channel,
- &powerbase0[0], &powerbase1[0]);
- for (index = 0; index < 6; index++) {
- _rtl92d_get_txpower_writeval_by_regulatory(hw,
- channel, index, &powerbase0[0],
- &powerbase1[0], &writeval[0]);
- _rtl92d_write_ofdm_power_reg(hw, index, &writeval[0]);
- }
-}
-
bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h
index 4e646cc9ebc0..c097d90cc99c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h
@@ -4,11 +4,6 @@
#ifndef __RTL92D_RF_H__
#define __RTL92D_RF_H__
-void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
-void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel);
-void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel, u8 channel);
bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw);
bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0);
void rtl92d_phy_powerdown_anotherphy(struct ieee80211_hw *hw, bool bmac0);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
index afd685ed460a..5f6311c2aac4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
@@ -5,8 +5,12 @@
#include "../core.h"
#include "../pci.h"
#include "../base.h"
-#include "reg.h"
-#include "def.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/dm_common.h"
+#include "../rtl8192d/hw_common.h"
+#include "../rtl8192d/phy_common.h"
+#include "../rtl8192d/trx_common.h"
#include "phy.h"
#include "dm.h"
#include "hw.h"
@@ -207,7 +211,7 @@ static struct rtl_hal_ops rtl8192de_hal_ops = {
.radio_onoff_checking = rtl92de_gpio_radio_on_off_checking,
.set_bw_mode = rtl92d_phy_set_bw_mode,
.switch_channel = rtl92d_phy_sw_chnl,
- .dm_watchdog = rtl92d_dm_watchdog,
+ .dm_watchdog = rtl92de_dm_watchdog,
.scan_operation_backup = rtl_phy_scan_operation_backup,
.set_rf_power_state = rtl92d_phy_set_rf_power_state,
.led_control = rtl92de_led_control,
@@ -223,6 +227,8 @@ static struct rtl_hal_ops rtl8192de_hal_ops = {
.set_rfreg = rtl92d_phy_set_rf_reg,
.linked_set_reg = rtl92d_linked_set_reg,
.get_btc_status = rtl_btc_status_false,
+ .phy_iq_calibrate = rtl92d_phy_iq_calibrate,
+ .phy_lc_calibrate = rtl92d_phy_lc_calibrate,
};
static struct rtl_mod_params rtl92de_mod_params = {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
index 192982ec8152..2b9b352f7783 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
@@ -5,8 +5,10 @@
#include "../pci.h"
#include "../base.h"
#include "../stats.h"
-#include "reg.h"
-#include "def.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/phy_common.h"
+#include "../rtl8192d/trx_common.h"
#include "phy.h"
#include "trx.h"
#include "led.h"
@@ -23,434 +25,6 @@ static u8 _rtl92de_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
return skb->priority;
}
-static long _rtl92de_translate_todbm(struct ieee80211_hw *hw,
- u8 signal_strength_index)
-{
- long signal_power;
-
- signal_power = (long)((signal_strength_index + 1) >> 1);
- signal_power -= 95;
- return signal_power;
-}
-
-static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
- struct rtl_stats *pstats,
- struct rx_desc_92d *pdesc,
- struct rx_fwinfo_92d *p_drvinfo,
- bool packet_match_bssid,
- bool packet_toself,
- bool packet_beacon)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
- struct phy_sts_cck_8192d *cck_buf;
- s8 rx_pwr_all, rx_pwr[4];
- u8 rf_rx_num = 0, evm, pwdb_all;
- u8 i, max_spatial_stream;
- u32 rssi, total_rssi = 0;
- bool is_cck_rate;
-
- is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc->rxmcs);
- pstats->packet_matchbssid = packet_match_bssid;
- pstats->packet_toself = packet_toself;
- pstats->packet_beacon = packet_beacon;
- pstats->is_cck = is_cck_rate;
- pstats->rx_mimo_sig_qual[0] = -1;
- pstats->rx_mimo_sig_qual[1] = -1;
-
- if (is_cck_rate) {
- u8 report, cck_highpwr;
- cck_buf = (struct phy_sts_cck_8192d *)p_drvinfo;
- if (ppsc->rfpwr_state == ERFON)
- cck_highpwr = rtlphy->cck_high_power;
- else
- cck_highpwr = false;
- if (!cck_highpwr) {
- u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
- report = cck_buf->cck_agc_rpt & 0xc0;
- report = report >> 6;
- switch (report) {
- case 0x3:
- rx_pwr_all = -46 - (cck_agc_rpt & 0x3e);
- break;
- case 0x2:
- rx_pwr_all = -26 - (cck_agc_rpt & 0x3e);
- break;
- case 0x1:
- rx_pwr_all = -12 - (cck_agc_rpt & 0x3e);
- break;
- case 0x0:
- rx_pwr_all = 16 - (cck_agc_rpt & 0x3e);
- break;
- }
- } else {
- u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
- report = p_drvinfo->cfosho[0] & 0x60;
- report = report >> 5;
- switch (report) {
- case 0x3:
- rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f) << 1);
- break;
- case 0x2:
- rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f) << 1);
- break;
- case 0x1:
- rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f) << 1);
- break;
- case 0x0:
- rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f) << 1);
- break;
- }
- }
- pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
- /* CCK gain is smaller than OFDM/MCS gain, */
- /* so we add gain diff by experiences, the val is 6 */
- pwdb_all += 6;
- if (pwdb_all > 100)
- pwdb_all = 100;
- /* modify the offset to make the same gain index with OFDM. */
- if (pwdb_all > 34 && pwdb_all <= 42)
- pwdb_all -= 2;
- else if (pwdb_all > 26 && pwdb_all <= 34)
- pwdb_all -= 6;
- else if (pwdb_all > 14 && pwdb_all <= 26)
- pwdb_all -= 8;
- else if (pwdb_all > 4 && pwdb_all <= 14)
- pwdb_all -= 4;
- pstats->rx_pwdb_all = pwdb_all;
- pstats->recvsignalpower = rx_pwr_all;
- if (packet_match_bssid) {
- u8 sq;
- if (pstats->rx_pwdb_all > 40) {
- sq = 100;
- } else {
- sq = cck_buf->sq_rpt;
- if (sq > 64)
- sq = 0;
- else if (sq < 20)
- sq = 100;
- else
- sq = ((64 - sq) * 100) / 44;
- }
- pstats->signalquality = sq;
- pstats->rx_mimo_sig_qual[0] = sq;
- pstats->rx_mimo_sig_qual[1] = -1;
- }
- } else {
- rtlpriv->dm.rfpath_rxenable[0] = true;
- rtlpriv->dm.rfpath_rxenable[1] = true;
- for (i = RF90_PATH_A; i < RF6052_MAX_PATH; i++) {
- if (rtlpriv->dm.rfpath_rxenable[i])
- rf_rx_num++;
- rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 0x3f) * 2)
- - 110;
- rssi = rtl_query_rxpwrpercentage(rx_pwr[i]);
- total_rssi += rssi;
- rtlpriv->stats.rx_snr_db[i] =
- (long)(p_drvinfo->rxsnr[i] / 2);
- if (packet_match_bssid)
- pstats->rx_mimo_signalstrength[i] = (u8) rssi;
- }
- rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 106;
- pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
- pstats->rx_pwdb_all = pwdb_all;
- pstats->rxpower = rx_pwr_all;
- pstats->recvsignalpower = rx_pwr_all;
- if (pdesc->rxht && pdesc->rxmcs >= DESC_RATEMCS8 &&
- pdesc->rxmcs <= DESC_RATEMCS15)
- max_spatial_stream = 2;
- else
- max_spatial_stream = 1;
- for (i = 0; i < max_spatial_stream; i++) {
- evm = rtl_evm_db_to_percentage(p_drvinfo->rxevm[i]);
- if (packet_match_bssid) {
- if (i == 0)
- pstats->signalquality =
- (u8)(evm & 0xff);
- pstats->rx_mimo_sig_qual[i] =
- (u8)(evm & 0xff);
- }
- }
- }
- if (is_cck_rate)
- pstats->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
- pwdb_all));
- else if (rf_rx_num != 0)
- pstats->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
- total_rssi /= rf_rx_num));
-}
-
-static void rtl92d_loop_over_paths(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- u8 rfpath;
-
- for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
- rfpath++) {
- if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- pstats->rx_mimo_signalstrength[rfpath];
-
- }
- if (pstats->rx_mimo_signalstrength[rfpath] >
- rtlpriv->stats.rx_rssi_percentage[rfpath]) {
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- ((rtlpriv->stats.rx_rssi_percentage[rfpath] *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_mimo_signalstrength[rfpath])) /
- (RX_SMOOTH_FACTOR);
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- rtlpriv->stats.rx_rssi_percentage[rfpath] + 1;
- } else {
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- ((rtlpriv->stats.rx_rssi_percentage[rfpath] *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_mimo_signalstrength[rfpath])) /
- (RX_SMOOTH_FACTOR);
- }
- }
-}
-
-static void _rtl92de_process_ui_rssi(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 last_rssi, tmpval;
-
- if (pstats->packet_toself || pstats->packet_beacon) {
- rtlpriv->stats.rssi_calculate_cnt++;
- if (rtlpriv->stats.ui_rssi.total_num++ >=
- PHY_RSSI_SLID_WIN_MAX) {
- rtlpriv->stats.ui_rssi.total_num =
- PHY_RSSI_SLID_WIN_MAX;
- last_rssi = rtlpriv->stats.ui_rssi.elements[
- rtlpriv->stats.ui_rssi.index];
- rtlpriv->stats.ui_rssi.total_val -= last_rssi;
- }
- rtlpriv->stats.ui_rssi.total_val += pstats->signalstrength;
- rtlpriv->stats.ui_rssi.elements
- [rtlpriv->stats.ui_rssi.index++] =
- pstats->signalstrength;
- if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
- rtlpriv->stats.ui_rssi.index = 0;
- tmpval = rtlpriv->stats.ui_rssi.total_val /
- rtlpriv->stats.ui_rssi.total_num;
- rtlpriv->stats.signal_strength = _rtl92de_translate_todbm(hw,
- (u8) tmpval);
- pstats->rssi = rtlpriv->stats.signal_strength;
- }
- if (!pstats->is_cck && pstats->packet_toself)
- rtl92d_loop_over_paths(hw, pstats);
-}
-
-static void _rtl92de_update_rxsignalstatistics(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- int weighting = 0;
-
- if (rtlpriv->stats.recv_signal_power == 0)
- rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
- if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power)
- weighting = 5;
- else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power)
- weighting = (-5);
- rtlpriv->stats.recv_signal_power = (rtlpriv->stats.recv_signal_power *
- 5 + pstats->recvsignalpower + weighting) / 6;
-}
-
-static void _rtl92de_process_pwdb(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- long undec_sm_pwdb;
-
- if (mac->opmode == NL80211_IFTYPE_ADHOC ||
- mac->opmode == NL80211_IFTYPE_AP)
- return;
- else
- undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
-
- if (pstats->packet_toself || pstats->packet_beacon) {
- if (undec_sm_pwdb < 0)
- undec_sm_pwdb = pstats->rx_pwdb_all;
- if (pstats->rx_pwdb_all > (u32) undec_sm_pwdb) {
- undec_sm_pwdb = (((undec_sm_pwdb) *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
- undec_sm_pwdb = undec_sm_pwdb + 1;
- } else {
- undec_sm_pwdb = (((undec_sm_pwdb) *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
- }
- rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
- _rtl92de_update_rxsignalstatistics(hw, pstats);
- }
-}
-
-static void rtl92d_loop_over_streams(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- int stream;
-
- for (stream = 0; stream < 2; stream++) {
- if (pstats->rx_mimo_sig_qual[stream] != -1) {
- if (rtlpriv->stats.rx_evm_percentage[stream] == 0) {
- rtlpriv->stats.rx_evm_percentage[stream] =
- pstats->rx_mimo_sig_qual[stream];
- }
- rtlpriv->stats.rx_evm_percentage[stream] =
- ((rtlpriv->stats.rx_evm_percentage[stream]
- * (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_mimo_sig_qual[stream] * 1)) /
- (RX_SMOOTH_FACTOR);
- }
- }
-}
-
-static void _rtl92de_process_ui_link_quality(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 last_evm, tmpval;
-
- if (pstats->signalquality == 0)
- return;
- if (pstats->packet_toself || pstats->packet_beacon) {
- if (rtlpriv->stats.ui_link_quality.total_num++ >=
- PHY_LINKQUALITY_SLID_WIN_MAX) {
- rtlpriv->stats.ui_link_quality.total_num =
- PHY_LINKQUALITY_SLID_WIN_MAX;
- last_evm = rtlpriv->stats.ui_link_quality.elements[
- rtlpriv->stats.ui_link_quality.index];
- rtlpriv->stats.ui_link_quality.total_val -= last_evm;
- }
- rtlpriv->stats.ui_link_quality.total_val +=
- pstats->signalquality;
- rtlpriv->stats.ui_link_quality.elements[
- rtlpriv->stats.ui_link_quality.index++] =
- pstats->signalquality;
- if (rtlpriv->stats.ui_link_quality.index >=
- PHY_LINKQUALITY_SLID_WIN_MAX)
- rtlpriv->stats.ui_link_quality.index = 0;
- tmpval = rtlpriv->stats.ui_link_quality.total_val /
- rtlpriv->stats.ui_link_quality.total_num;
- rtlpriv->stats.signal_quality = tmpval;
- rtlpriv->stats.last_sigstrength_inpercent = tmpval;
- rtl92d_loop_over_streams(hw, pstats);
- }
-}
-
-static void _rtl92de_process_phyinfo(struct ieee80211_hw *hw,
- u8 *buffer,
- struct rtl_stats *pcurrent_stats)
-{
-
- if (!pcurrent_stats->packet_matchbssid &&
- !pcurrent_stats->packet_beacon)
- return;
-
- _rtl92de_process_ui_rssi(hw, pcurrent_stats);
- _rtl92de_process_pwdb(hw, pcurrent_stats);
- _rtl92de_process_ui_link_quality(hw, pcurrent_stats);
-}
-
-static void _rtl92de_translate_rx_signal_stuff(struct ieee80211_hw *hw,
- struct sk_buff *skb,
- struct rtl_stats *pstats,
- struct rx_desc_92d *pdesc,
- struct rx_fwinfo_92d *p_drvinfo)
-{
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- struct ieee80211_hdr *hdr;
- u8 *tmp_buf;
- u8 *praddr;
- u16 type, cfc;
- __le16 fc;
- bool packet_matchbssid, packet_toself, packet_beacon = false;
-
- tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
- hdr = (struct ieee80211_hdr *)tmp_buf;
- fc = hdr->frame_control;
- cfc = le16_to_cpu(fc);
- type = WLAN_FC_GET_TYPE(fc);
- praddr = hdr->addr1;
- packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) &&
- ether_addr_equal(mac->bssid,
- (cfc & IEEE80211_FCTL_TODS) ? hdr->addr1 :
- (cfc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 :
- hdr->addr3) &&
- (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
- packet_toself = packet_matchbssid &&
- ether_addr_equal(praddr, rtlefuse->dev_addr);
- if (ieee80211_is_beacon(fc))
- packet_beacon = true;
- _rtl92de_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
- packet_matchbssid, packet_toself,
- packet_beacon);
- _rtl92de_process_phyinfo(hw, tmp_buf, pstats);
-}
-
-bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
- struct ieee80211_rx_status *rx_status,
- u8 *pdesc8, struct sk_buff *skb)
-{
- __le32 *pdesc = (__le32 *)pdesc8;
- struct rx_fwinfo_92d *p_drvinfo;
- u32 phystatus = get_rx_desc_physt(pdesc);
-
- stats->length = (u16)get_rx_desc_pkt_len(pdesc);
- stats->rx_drvinfo_size = (u8)get_rx_desc_drv_info_size(pdesc) *
- RX_DRV_INFO_SIZE_UNIT;
- stats->rx_bufshift = (u8)(get_rx_desc_shift(pdesc) & 0x03);
- stats->icv = (u16)get_rx_desc_icv(pdesc);
- stats->crc = (u16)get_rx_desc_crc32(pdesc);
- stats->hwerror = (stats->crc | stats->icv);
- stats->decrypted = !get_rx_desc_swdec(pdesc);
- stats->rate = (u8)get_rx_desc_rxmcs(pdesc);
- stats->shortpreamble = (u16)get_rx_desc_splcp(pdesc);
- stats->isampdu = (bool)(get_rx_desc_paggr(pdesc) == 1);
- stats->isfirst_ampdu = (bool)((get_rx_desc_paggr(pdesc) == 1) &&
- (get_rx_desc_faggr(pdesc) == 1));
- stats->timestamp_low = get_rx_desc_tsfl(pdesc);
- stats->rx_is40mhzpacket = (bool)get_rx_desc_bw(pdesc);
- stats->is_ht = (bool)get_rx_desc_rxht(pdesc);
- rx_status->freq = hw->conf.chandef.chan->center_freq;
- rx_status->band = hw->conf.chandef.chan->band;
- if (get_rx_desc_crc32(pdesc))
- rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- if (!get_rx_desc_swdec(pdesc))
- rx_status->flag |= RX_FLAG_DECRYPTED;
- if (get_rx_desc_bw(pdesc))
- rx_status->bw = RATE_INFO_BW_40;
- if (get_rx_desc_rxht(pdesc))
- rx_status->encoding = RX_ENC_HT;
- rx_status->flag |= RX_FLAG_MACTIME_START;
- if (stats->decrypted)
- rx_status->flag |= RX_FLAG_DECRYPTED;
- rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht,
- false, stats->rate);
- rx_status->mactime = get_rx_desc_tsfl(pdesc);
- if (phystatus) {
- p_drvinfo = (struct rx_fwinfo_92d *)(skb->data +
- stats->rx_bufshift);
- _rtl92de_translate_rx_signal_stuff(hw,
- skb, stats,
- (struct rx_desc_92d *)pdesc,
- p_drvinfo);
- }
- /*rx_status->qual = stats->signal; */
- rx_status->signal = stats->recvsignalpower + 10;
- return true;
-}
-
static void _rtl92de_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
u8 *virtualaddress8)
{
@@ -712,87 +286,6 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8,
set_tx_desc_own(pdesc, 1);
}
-void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc8, bool istx,
- u8 desc_name, u8 *val)
-{
- __le32 *pdesc = (__le32 *)pdesc8;
-
- if (istx) {
- switch (desc_name) {
- case HW_DESC_OWN:
- wmb();
- set_tx_desc_own(pdesc, 1);
- break;
- case HW_DESC_TX_NEXTDESC_ADDR:
- set_tx_desc_next_desc_address(pdesc, *(u32 *)val);
- break;
- default:
- WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n",
- desc_name);
- break;
- }
- } else {
- switch (desc_name) {
- case HW_DESC_RXOWN:
- wmb();
- set_rx_desc_own(pdesc, 1);
- break;
- case HW_DESC_RXBUFF_ADDR:
- set_rx_desc_buff_addr(pdesc, *(u32 *)val);
- break;
- case HW_DESC_RXPKT_LEN:
- set_rx_desc_pkt_len(pdesc, *(u32 *)val);
- break;
- case HW_DESC_RXERO:
- set_rx_desc_eor(pdesc, 1);
- break;
- default:
- WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n",
- desc_name);
- break;
- }
- }
-}
-
-u64 rtl92de_get_desc(struct ieee80211_hw *hw,
- u8 *p_desc8, bool istx, u8 desc_name)
-{
- __le32 *p_desc = (__le32 *)p_desc8;
- u32 ret = 0;
-
- if (istx) {
- switch (desc_name) {
- case HW_DESC_OWN:
- ret = get_tx_desc_own(p_desc);
- break;
- case HW_DESC_TXBUFF_ADDR:
- ret = get_tx_desc_tx_buffer_address(p_desc);
- break;
- default:
- WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n",
- desc_name);
- break;
- }
- } else {
- switch (desc_name) {
- case HW_DESC_OWN:
- ret = get_rx_desc_own(p_desc);
- break;
- case HW_DESC_RXPKT_LEN:
- ret = get_rx_desc_pkt_len(p_desc);
- break;
- case HW_DESC_RXBUFF_ADDR:
- ret = get_rx_desc_buff_addr(p_desc);
- break;
- default:
- WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n",
- desc_name);
- break;
- }
- }
- return ret;
-}
-
bool rtl92de_is_tx_desc_closed(struct ieee80211_hw *hw,
u8 hw_queue, u16 index)
{
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
index 2992668c156c..d3c480c75678 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
@@ -8,384 +8,17 @@
#define TX_DESC_AGGR_SUBFRAME_SIZE 32
#define RX_DESC_SIZE 32
-#define RX_DRV_INFO_SIZE_UNIT 8
#define TX_DESC_NEXT_DESC_OFFSET 40
#define USB_HWDESC_HEADER_LEN 32
#define CRCLENGTH 4
-/* macros to read/write various fields in RX or TX descriptors */
-
-static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits(__pdesc, __val, GENMASK(15, 0));
-}
-
-static inline void set_tx_desc_offset(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits(__pdesc, __val, GENMASK(23, 16));
-}
-
-static inline void set_tx_desc_htc(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits(__pdesc, __val, BIT(25));
-}
-
-static inline void set_tx_desc_last_seg(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits(__pdesc, __val, BIT(26));
-}
-
-static inline void set_tx_desc_first_seg(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits(__pdesc, __val, BIT(27));
-}
-
-static inline void set_tx_desc_linip(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits(__pdesc, __val, BIT(28));
-}
-
-static inline void set_tx_desc_own(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits(__pdesc, __val, BIT(31));
-}
-
-static inline u32 get_tx_desc_own(__le32 *__pdesc)
-{
- return le32_get_bits(*__pdesc, BIT(31));
-}
-
-static inline void set_tx_desc_macid(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 1), __val, GENMASK(4, 0));
-}
-
-static inline void set_tx_desc_agg_enable(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 1), __val, BIT(5));
-}
-
-static inline void set_tx_desc_rdg_enable(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 1), __val, BIT(7));
-}
-
-static inline void set_tx_desc_queue_sel(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 1), __val, GENMASK(12, 8));
-}
-
-static inline void set_tx_desc_rate_id(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 1), __val, GENMASK(19, 16));
-}
-
-static inline void set_tx_desc_sec_type(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 1), __val, GENMASK(23, 22));
-}
-
-static inline void set_tx_desc_pkt_offset(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 1), __val, GENMASK(30, 26));
-}
-
-static inline void set_tx_desc_more_frag(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 2), __val, BIT(17));
-}
-
-static inline void set_tx_desc_ampdu_density(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 2), __val, GENMASK(22, 20));
-}
-
-static inline void set_tx_desc_seq(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 3), __val, GENMASK(27, 16));
-}
-
-static inline void set_tx_desc_pkt_id(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 3), __val, GENMASK(31, 28));
-}
-
-static inline void set_tx_desc_rts_rate(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, GENMASK(4, 0));
-}
-
-static inline void set_tx_desc_qos(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, BIT(6));
-}
-
-static inline void set_tx_desc_hwseq_en(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, BIT(7));
-}
-
-static inline void set_tx_desc_use_rate(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, BIT(8));
-}
-
-static inline void set_tx_desc_disable_fb(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, BIT(10));
-}
-
-static inline void set_tx_desc_cts2self(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, BIT(11));
-}
-
-static inline void set_tx_desc_rts_enable(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, BIT(12));
-}
-
-static inline void set_tx_desc_hw_rts_enable(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, BIT(13));
-}
-
-static inline void set_tx_desc_tx_sub_carrier(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, GENMASK(21, 20));
-}
-
-static inline void set_tx_desc_data_bw(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, BIT(25));
-}
-
-static inline void set_tx_desc_rts_short(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, BIT(26));
-}
-
-static inline void set_tx_desc_rts_bw(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, BIT(27));
-}
-
-static inline void set_tx_desc_rts_sc(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, GENMASK(29, 28));
-}
-
-static inline void set_tx_desc_rts_stbc(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, GENMASK(31, 30));
-}
-
-static inline void set_tx_desc_tx_rate(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 5), __val, GENMASK(5, 0));
-}
-
-static inline void set_tx_desc_data_shortgi(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 5), __val, BIT(6));
-}
-
-static inline void set_tx_desc_data_rate_fb_limit(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 5), __val, GENMASK(12, 8));
-}
-
-static inline void set_tx_desc_rts_rate_fb_limit(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 5), __val, GENMASK(16, 13));
-}
-
-static inline void set_tx_desc_max_agg_num(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 6), __val, GENMASK(15, 11));
-}
-
-static inline void set_tx_desc_tx_buffer_size(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 7), __val, GENMASK(15, 0));
-}
-
-static inline void set_tx_desc_tx_buffer_address(__le32 *__pdesc, u32 __val)
-{
- *(__pdesc + 8) = cpu_to_le32(__val);
-}
-
-static inline u32 get_tx_desc_tx_buffer_address(__le32 *__pdesc)
-{
- return le32_to_cpu(*(__pdesc + 8));
-}
-
-static inline void set_tx_desc_next_desc_address(__le32 *__pdesc, u32 __val)
-{
- *(__pdesc + 10) = cpu_to_le32(__val);
-}
-
-static inline u32 get_rx_desc_pkt_len(__le32 *__pdesc)
-{
- return le32_get_bits(*__pdesc, GENMASK(13, 0));
-}
-
-static inline u32 get_rx_desc_crc32(__le32 *__pdesc)
-{
- return le32_get_bits(*__pdesc, BIT(14));
-}
-
-static inline u32 get_rx_desc_icv(__le32 *__pdesc)
-{
- return le32_get_bits(*__pdesc, BIT(15));
-}
-
-static inline u32 get_rx_desc_drv_info_size(__le32 *__pdesc)
-{
- return le32_get_bits(*__pdesc, GENMASK(19, 16));
-}
-
-static inline u32 get_rx_desc_shift(__le32 *__pdesc)
-{
- return le32_get_bits(*__pdesc, GENMASK(25, 24));
-}
-
-static inline u32 get_rx_desc_physt(__le32 *__pdesc)
-{
- return le32_get_bits(*__pdesc, BIT(26));
-}
-
-static inline u32 get_rx_desc_swdec(__le32 *__pdesc)
-{
- return le32_get_bits(*__pdesc, BIT(27));
-}
-
-static inline u32 get_rx_desc_own(__le32 *__pdesc)
-{
- return le32_get_bits(*__pdesc, BIT(31));
-}
-
-static inline void set_rx_desc_pkt_len(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits(__pdesc, __val, GENMASK(13, 0));
-}
-
-static inline void set_rx_desc_eor(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits(__pdesc, __val, BIT(30));
-}
-
-static inline void set_rx_desc_own(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits(__pdesc, __val, BIT(31));
-}
-
-static inline u32 get_rx_desc_paggr(__le32 *__pdesc)
-{
- return le32_get_bits(*(__pdesc + 1), BIT(14));
-}
-
-static inline u32 get_rx_desc_faggr(__le32 *__pdesc)
-{
- return le32_get_bits(*(__pdesc + 1), BIT(15));
-}
-
-static inline u32 get_rx_desc_rxmcs(__le32 *__pdesc)
-{
- return le32_get_bits(*(__pdesc + 3), GENMASK(5, 0));
-}
-
-static inline u32 get_rx_desc_rxht(__le32 *__pdesc)
-{
- return le32_get_bits(*(__pdesc + 3), BIT(6));
-}
-
-static inline u32 get_rx_desc_splcp(__le32 *__pdesc)
-{
- return le32_get_bits(*(__pdesc + 3), BIT(8));
-}
-
-static inline u32 get_rx_desc_bw(__le32 *__pdesc)
-{
- return le32_get_bits(*(__pdesc + 3), BIT(9));
-}
-
-static inline u32 get_rx_desc_tsfl(__le32 *__pdesc)
-{
- return le32_to_cpu(*(__pdesc + 5));
-}
-
-static inline u32 get_rx_desc_buff_addr(__le32 *__pdesc)
-{
- return le32_to_cpu(*(__pdesc + 6));
-}
-
-static inline void set_rx_desc_buff_addr(__le32 *__pdesc, u32 __val)
-{
- *(__pdesc + 6) = cpu_to_le32(__val);
-}
-
static inline void clear_pci_tx_desc_content(__le32 *__pdesc, u32 _size)
{
memset((void *)__pdesc, 0,
min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET));
}
-/* For 92D early mode */
-static inline void set_earlymode_pktnum(__le32 *__paddr, u32 __value)
-{
- le32p_replace_bits(__paddr, __value, GENMASK(2, 0));
-}
-
-static inline void set_earlymode_len0(__le32 *__paddr, u32 __value)
-{
- le32p_replace_bits(__paddr, __value, GENMASK(15, 4));
-}
-
-static inline void set_earlymode_len1(__le32 *__paddr, u32 __value)
-{
- le32p_replace_bits(__paddr, __value, GENMASK(27, 16));
-}
-
-static inline void set_earlymode_len2_1(__le32 *__paddr, u32 __value)
-{
- le32p_replace_bits(__paddr, __value, GENMASK(31, 28));
-}
-
-static inline void set_earlymode_len2_2(__le32 *__paddr, u32 __value)
-{
- le32p_replace_bits((__paddr + 1), __value, GENMASK(7, 0));
-}
-
-static inline void set_earlymode_len3(__le32 *__paddr, u32 __value)
-{
- le32p_replace_bits((__paddr + 1), __value, GENMASK(19, 8));
-}
-
-static inline void set_earlymode_len4(__le32 *__paddr, u32 __value)
-{
- le32p_replace_bits((__paddr + 1), __value, GENMASK(31, 20));
-}
-
-struct rx_fwinfo_92d {
- u8 gain_trsw[4];
- u8 pwdb_all;
- u8 cfosho[4];
- u8 cfotail[4];
- s8 rxevm[2];
- s8 rxsnr[4];
- u8 pdsnr[2];
- u8 csi_current[2];
- u8 csi_target[2];
- u8 sigevm;
- u8 max_ex_pwr;
- u8 ex_intf_flag:1;
- u8 sgi_en:1;
- u8 rxsc:2;
- u8 reserve:4;
-} __packed;
-
struct tx_desc_92d {
u32 pktsize:16;
u32 offset:8;
@@ -488,78 +121,12 @@ struct tx_desc_92d {
u32 reserve_pass_pcie_mm_limit[4];
} __packed;
-struct rx_desc_92d {
- u32 length:14;
- u32 crc32:1;
- u32 icverror:1;
- u32 drv_infosize:4;
- u32 security:3;
- u32 qos:1;
- u32 shift:2;
- u32 phystatus:1;
- u32 swdec:1;
- u32 lastseg:1;
- u32 firstseg:1;
- u32 eor:1;
- u32 own:1;
-
- u32 macid:5;
- u32 tid:4;
- u32 hwrsvd:5;
- u32 paggr:1;
- u32 faggr:1;
- u32 a1_fit:4;
- u32 a2_fit:4;
- u32 pam:1;
- u32 pwr:1;
- u32 moredata:1;
- u32 morefrag:1;
- u32 type:2;
- u32 mc:1;
- u32 bc:1;
-
- u32 seq:12;
- u32 frag:4;
- u32 nextpktlen:14;
- u32 nextind:1;
- u32 rsvd:1;
-
- u32 rxmcs:6;
- u32 rxht:1;
- u32 amsdu:1;
- u32 splcp:1;
- u32 bandwidth:1;
- u32 htc:1;
- u32 tcpchk_rpt:1;
- u32 ipcchk_rpt:1;
- u32 tcpchk_valid:1;
- u32 hwpcerr:1;
- u32 hwpcind:1;
- u32 iv0:16;
-
- u32 iv1;
-
- u32 tsfl;
-
- u32 bufferaddress;
- u32 bufferaddress64;
-
-} __packed;
-
void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc,
u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb, u8 hw_queue,
struct rtl_tcb_desc *ptcb_desc);
-bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,
- struct rtl_stats *stats,
- struct ieee80211_rx_status *rx_status,
- u8 *pdesc, struct sk_buff *skb);
-void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
- u8 desc_name, u8 *val);
-u64 rtl92de_get_desc(struct ieee80211_hw *hw,
- u8 *p_desc, bool istx, u8 desc_name);
bool rtl92de_is_tx_desc_closed(struct ieee80211_hw *hw,
u8 hw_queue, u16 index);
void rtl92de_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
index 094cb36153f5..13e689037acc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
@@ -1110,16 +1110,22 @@ static void _rtl8723be_phy_set_txpower_index(struct ieee80211_hw *hw,
void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
{
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u8 cck_rates[] = {DESC92C_RATE1M, DESC92C_RATE2M,
- DESC92C_RATE5_5M, DESC92C_RATE11M};
- u8 ofdm_rates[] = {DESC92C_RATE6M, DESC92C_RATE9M,
- DESC92C_RATE12M, DESC92C_RATE18M,
- DESC92C_RATE24M, DESC92C_RATE36M,
- DESC92C_RATE48M, DESC92C_RATE54M};
- u8 ht_rates_1t[] = {DESC92C_RATEMCS0, DESC92C_RATEMCS1,
- DESC92C_RATEMCS2, DESC92C_RATEMCS3,
- DESC92C_RATEMCS4, DESC92C_RATEMCS5,
- DESC92C_RATEMCS6, DESC92C_RATEMCS7};
+ static const u8 cck_rates[] = {
+ DESC92C_RATE1M, DESC92C_RATE2M,
+ DESC92C_RATE5_5M, DESC92C_RATE11M
+ };
+ static const u8 ofdm_rates[] = {
+ DESC92C_RATE6M, DESC92C_RATE9M,
+ DESC92C_RATE12M, DESC92C_RATE18M,
+ DESC92C_RATE24M, DESC92C_RATE36M,
+ DESC92C_RATE48M, DESC92C_RATE54M
+ };
+ static const u8 ht_rates_1t[] = {
+ DESC92C_RATEMCS0, DESC92C_RATEMCS1,
+ DESC92C_RATEMCS2, DESC92C_RATEMCS3,
+ DESC92C_RATEMCS4, DESC92C_RATEMCS5,
+ DESC92C_RATEMCS6, DESC92C_RATEMCS7
+ };
u8 i;
u8 power_index;
@@ -2155,15 +2161,16 @@ static void _rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw,
static u8 _get_right_chnl_place_for_iqk(u8 chnl)
{
- u8 channel_all[TARGET_CHNL_NUM_2G_5G] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 14, 36, 38, 40, 42, 44, 46,
- 48, 50, 52, 54, 56, 58, 60, 62, 64,
- 100, 102, 104, 106, 108, 110,
- 112, 114, 116, 118, 120, 122,
- 124, 126, 128, 130, 132, 134, 136,
- 138, 140, 149, 151, 153, 155, 157,
- 159, 161, 163, 165};
+ static const u8 channel_all[TARGET_CHNL_NUM_2G_5G] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 36, 38, 40, 42, 44, 46,
+ 48, 50, 52, 54, 56, 58, 60, 62, 64,
+ 100, 102, 104, 106, 108, 110,
+ 112, 114, 116, 118, 120, 122,
+ 124, 126, 128, 130, 132, 134, 136,
+ 138, 140, 149, 151, 153, 155, 157,
+ 159, 161, 163, 165
+ };
u8 place = chnl;
if (chnl > 14) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 6e8c87a2fae4..2ea72d9e3957 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -979,6 +979,9 @@ int rtl_usb_probe(struct usb_interface *intf,
usb_priv->dev.intf = intf;
usb_priv->dev.udev = udev;
usb_set_intfdata(intf, hw);
+ /* For dual MAC RTL8192DU, which has two interfaces. */
+ rtlpriv->rtlhal.interfaceindex =
+ intf->altsetting[0].desc.bInterfaceNumber;
/* init cfg & intf_ops */
rtlpriv->rtlhal.interface = INTF_USB;
rtlpriv->cfg = rtl_hal_cfg;
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 9fabf597cfd6..442419568734 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -20,6 +20,7 @@
#define MASKBYTE1 0xff00
#define MASKBYTE2 0xff0000
#define MASKBYTE3 0xff000000
+#define MASKH3BYTES 0xffffff00
#define MASKHWORD 0xffff0000
#define MASKLWORD 0x0000ffff
#define MASKDWORD 0xffffffff
@@ -48,6 +49,10 @@
#define MASK20BITS 0xfffff
#define RFREG_OFFSET_MASK 0xfffff
+/* For dual MAC RTL8192DU */
+#define MAC0_ACCESS_PHY1 0x4000
+#define MAC1_ACCESS_PHY0 0x2000
+
#define RF_CHANGE_BY_INIT 0
#define RF_CHANGE_BY_IPS BIT(28)
#define RF_CHANGE_BY_PS BIT(29)
@@ -1043,33 +1048,6 @@ struct octet_string {
u16 length;
};
-struct rtl_hdr_3addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
- u8 payload[];
-} __packed;
-
-struct rtl_info_element {
- u8 id;
- u8 len;
- u8 data[];
-} __packed;
-
-struct rtl_probe_rsp {
- struct rtl_hdr_3addr header;
- u32 time_stamp[2];
- __le16 beacon_interval;
- __le16 capability;
- /*SSID, supported rates, FH params, DS params,
- * CF params, IBSS params, TIM (if beacon), RSN
- */
- struct rtl_info_element info_element[];
-} __packed;
-
struct rtl_led_ctl {
bool led_opendrain;
enum rtl_led_pin sw_led0;
@@ -2268,6 +2246,7 @@ struct rtl_hal_ops {
bool (*config_bb_with_pgheaderfile)(struct ieee80211_hw *hw,
u8 configtype);
void (*phy_lc_calibrate)(struct ieee80211_hw *hw, bool is2t);
+ void (*phy_iq_calibrate)(struct ieee80211_hw *hw);
void (*phy_set_bw_mode_callback)(struct ieee80211_hw *hw);
void (*dm_dynamic_txpower)(struct ieee80211_hw *hw);
void (*c2h_command_handle)(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtw88/Kconfig b/drivers/net/wireless/realtek/rtw88/Kconfig
index cffad1c01249..22838ede03cd 100644
--- a/drivers/net/wireless/realtek/rtw88/Kconfig
+++ b/drivers/net/wireless/realtek/rtw88/Kconfig
@@ -28,8 +28,16 @@ config RTW88_8822B
config RTW88_8822C
tristate
+config RTW88_8723X
+ tristate
+
+config RTW88_8703B
+ tristate
+ select RTW88_8723X
+
config RTW88_8723D
tristate
+ select RTW88_8723X
config RTW88_8821C
tristate
@@ -122,6 +130,20 @@ config RTW88_8723DS
802.11n SDIO wireless network adapter
+config RTW88_8723CS
+ tristate "Realtek 8723CS SDIO wireless network adapter"
+ depends on MMC
+ select RTW88_CORE
+ select RTW88_SDIO
+ select RTW88_8703B
+ help
+ Select this option to enable support for 8723CS chipset (EXPERIMENTAL)
+
+ This module adds support for the 8723CS 802.11n SDIO
+ wireless network adapter.
+
+ If you choose to build a module, it'll be called rtw88_8723cs.
+
config RTW88_8723DU
tristate "Realtek 8723DU USB wireless network adapter"
depends on USB
diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile
index fd212c09d88a..8f47359b4380 100644
--- a/drivers/net/wireless/realtek/rtw88/Makefile
+++ b/drivers/net/wireless/realtek/rtw88/Makefile
@@ -44,6 +44,15 @@ rtw88_8822cs-objs := rtw8822cs.o
obj-$(CONFIG_RTW88_8822CU) += rtw88_8822cu.o
rtw88_8822cu-objs := rtw8822cu.o
+obj-$(CONFIG_RTW88_8723X) += rtw88_8723x.o
+rtw88_8723x-objs := rtw8723x.o
+
+obj-$(CONFIG_RTW88_8703B) += rtw88_8703b.o
+rtw88_8703b-objs := rtw8703b.o rtw8703b_tables.o
+
+obj-$(CONFIG_RTW88_8723CS) += rtw88_8723cs.o
+rtw88_8723cs-objs := rtw8723cs.o
+
obj-$(CONFIG_RTW88_8723D) += rtw88_8723d.o
rtw88_8723d-objs := rtw8723d.o rtw8723d_table.o
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index 86467d2f8888..de3332eb7a22 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -3937,7 +3937,9 @@ void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m)
lte_coex = rtw_coex_read_indirect_reg(rtwdev, 0x38);
bt_coex = rtw_coex_read_indirect_reg(rtwdev, 0x54);
- if (!coex_stat->bt_disabled && !coex_stat->bt_mailbox_reply) {
+ if (!coex_stat->wl_under_ips &&
+ (!coex_stat->wl_under_lps || coex_stat->wl_force_lps_ctrl) &&
+ !coex_stat->bt_disabled && !coex_stat->bt_mailbox_reply) {
rtw_coex_get_bt_supported_version(rtwdev,
&coex_stat->bt_supported_version);
rtw_coex_get_bt_patch_version(rtwdev, &coex_stat->patch_ver);
diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h
index f20c0471c82a..eb69006c463e 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.h
+++ b/drivers/net/wireless/realtek/rtw88/debug.h
@@ -26,6 +26,7 @@ enum rtw_debug_mask {
RTW_DBG_STATE = 0x00020000,
RTW_DBG_SDIO = 0x00040000,
+ RTW_DBG_UNEXP = 0x80000000,
RTW_DBG_ALL = 0xffffffff
};
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 3f037ddcecf1..ab7d414d0ba6 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -783,12 +783,18 @@ void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect,
static const u8 rssi_min = 0, rssi_max = 100, rssi_offset = 100;
struct rtw_sta_info *si =
sta ? (struct rtw_sta_info *)sta->drv_priv : NULL;
- s32 threshold = bss_conf->cqm_rssi_thold + rssi_offset;
+ s32 thold = RTW_DEFAULT_CQM_THOLD;
+ u32 hyst = RTW_DEFAULT_CQM_HYST;
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER))
return;
+ if (bss_conf->cqm_rssi_thold)
+ thold = bss_conf->cqm_rssi_thold;
+ if (bss_conf->cqm_rssi_hyst)
+ hyst = bss_conf->cqm_rssi_hyst;
+
if (!connect) {
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1);
SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect);
@@ -805,15 +811,15 @@ void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect,
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
memset(h2c_pkt, 0, sizeof(h2c_pkt));
- threshold = clamp_t(s32, threshold, rssi_min, rssi_max);
+ thold = clamp_t(s32, thold + rssi_offset, rssi_min, rssi_max);
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1);
SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect);
SET_BCN_FILTER_OFFLOAD_P1_OFFLOAD_MODE(h2c_pkt,
BCN_FILTER_OFFLOAD_MODE_DEFAULT);
- SET_BCN_FILTER_OFFLOAD_P1_THRESHOLD(h2c_pkt, (u8)threshold);
+ SET_BCN_FILTER_OFFLOAD_P1_THRESHOLD(h2c_pkt, thold);
SET_BCN_FILTER_OFFLOAD_P1_BCN_LOSS_CNT(h2c_pkt, BCN_LOSS_CNT);
SET_BCN_FILTER_OFFLOAD_P1_MACID(h2c_pkt, si->mac_id);
- SET_BCN_FILTER_OFFLOAD_P1_HYST(h2c_pkt, bss_conf->cqm_rssi_hyst);
+ SET_BCN_FILTER_OFFLOAD_P1_HYST(h2c_pkt, hyst);
SET_BCN_FILTER_OFFLOAD_P1_BCN_INTERVAL(h2c_pkt, bss_conf->beacon_int);
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index 84e47c71ea12..e999c24e4634 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -29,6 +29,8 @@
#define BCN_FILTER_CONNECTION_LOSS 1
#define BCN_FILTER_CONNECTED 2
#define BCN_FILTER_NOTIFY_BEACON_LOSS 3
+#define RTW_DEFAULT_CQM_THOLD -70
+#define RTW_DEFAULT_CQM_HYST 4
#define SCAN_NOTIFY_TIMEOUT msecs_to_jiffies(10)
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index 0c1c1ff31085..0dba8aae7716 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -943,6 +943,12 @@ static int __rtw_download_firmware_legacy(struct rtw_dev *rtwdev,
{
int ret = 0;
+ /* reset firmware if still present */
+ if (rtwdev->chip->id == RTW_CHIP_TYPE_8703B &&
+ rtw_read8_mask(rtwdev, REG_MCUFW_CTRL, BIT_RAM_DL_SEL)) {
+ rtw_write8(rtwdev, REG_MCUFW_CTRL, 0x00);
+ }
+
en_download_firmware_legacy(rtwdev, true);
ret = download_firmware_legacy(rtwdev, fw->firmware->data, fw->firmware->size);
en_download_firmware_legacy(rtwdev, false);
@@ -1033,14 +1039,15 @@ static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev,
msleep(20);
}
- /* priority queue is still not empty, throw a warning,
+ /* priority queue is still not empty, throw a debug message
*
* Note that if we want to flush the tx queue when having a lot of
* traffic (ex, 100Mbps up), some of the packets could be dropped.
* And it requires like ~2secs to flush the full priority queue.
*/
if (!drop)
- rtw_warn(rtwdev, "timed out to flush queue %d\n", prio_queue);
+ rtw_dbg(rtwdev, RTW_DBG_UNEXP,
+ "timed out to flush queue %d\n", prio_queue);
}
static void rtw_mac_flush_prio_queues(struct rtw_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 7af5bf7fe5b6..0acebbfa13c4 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -386,6 +386,8 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw_coex_media_status_notify(rtwdev, vif->cfg.assoc);
if (rtw_bf_support)
rtw_bf_assoc(rtwdev, vif, conf);
+
+ rtw_fw_beacon_filter_config(rtwdev, true, vif);
} else {
rtw_leave_lps(rtwdev);
rtw_bf_disassoc(rtwdev, vif, conf);
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index ffba6b88f392..7ab7a988b123 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -227,9 +227,6 @@ static void rtw_watch_dog_work(struct work_struct *work)
else
clear_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags);
- rtw_coex_wl_status_check(rtwdev);
- rtw_coex_query_bt_hid_list(rtwdev);
-
if (busy_traffic != test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags))
rtw_coex_wl_status_change_notify(rtwdev, 0);
@@ -257,6 +254,8 @@ static void rtw_watch_dog_work(struct work_struct *work)
/* make sure BB/RF is working for dynamic mech */
rtw_leave_lps(rtwdev);
+ rtw_coex_wl_status_check(rtwdev);
+ rtw_coex_query_bt_hid_list(rtwdev);
rtw_phy_dynamic_mechanism(rtwdev);
@@ -2204,6 +2203,7 @@ EXPORT_SYMBOL(rtw_core_deinit);
int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
{
+ bool sta_mode_only = rtwdev->hci.type == RTW_HCI_TYPE_SDIO;
struct rtw_hal *hal = &rtwdev->hal;
int max_tx_headroom = 0;
int ret;
@@ -2232,10 +2232,12 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, TX_AMSDU);
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
- hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_MESH_POINT);
+ if (sta_mode_only)
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ else
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_ADHOC);
hw->wiphy->available_antennas_tx = hal->antenna_tx;
hw->wiphy->available_antennas_rx = hal->antenna_rx;
@@ -2246,7 +2248,7 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
hw->wiphy->max_scan_ssids = RTW_SCAN_MAX_SSIDS;
hw->wiphy->max_scan_ie_len = rtw_get_max_scan_ie_len(rtwdev);
- if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C) {
+ if (!sta_mode_only && rtwdev->chip->id == RTW_CHIP_TYPE_8822C) {
hw->wiphy->iface_combinations = rtw_iface_combs;
hw->wiphy->n_iface_combinations = ARRAY_SIZE(rtw_iface_combs);
}
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index e14d1da43940..49894331f7b4 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -187,6 +187,7 @@ enum rtw_chip_type {
RTW_CHIP_TYPE_8822C,
RTW_CHIP_TYPE_8723D,
RTW_CHIP_TYPE_8821C,
+ RTW_CHIP_TYPE_8703B,
};
enum rtw_tx_queue_type {
@@ -1700,11 +1701,13 @@ struct rtw_dm_info {
s8 delta_power_index[RTW_RF_PATH_MAX];
s8 delta_power_index_last[RTW_RF_PATH_MAX];
u8 default_ofdm_index;
+ u8 default_cck_index;
bool pwr_trk_triggered;
bool pwr_trk_init_trigger;
struct ewma_thermal avg_thermal[RTW_RF_PATH_MAX];
s8 txagc_remnant_cck;
s8 txagc_remnant_ofdm;
+ u8 rx_cck_agc_report_type;
/* backup dack results for each path and I/Q */
u32 dack_adck[RTW_RF_PATH_MAX];
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index 9986a4cb37eb..30232f7e3ec5 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -729,7 +729,8 @@ static void __pci_flush_queue(struct rtw_dev *rtwdev, u8 pci_q, bool drop)
}
if (!drop)
- rtw_warn(rtwdev, "timed out to flush pci tx ring[%d]\n", pci_q);
+ rtw_dbg(rtwdev, RTW_DBG_UNEXP,
+ "timed out to flush pci tx ring[%d]\n", pci_q);
}
static void __rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 pci_queues,
@@ -1612,7 +1613,7 @@ static struct rtw_hci_ops rtw_pci_ops = {
static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
{
- unsigned int flags = PCI_IRQ_LEGACY;
+ unsigned int flags = PCI_IRQ_INTX;
int ret;
if (!rtw_disable_msi)
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b.c b/drivers/net/wireless/realtek/rtw88/rtw8703b.c
new file mode 100644
index 000000000000..8919f9e11f03
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8703b.c
@@ -0,0 +1,2109 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright Fiona Klute <fiona.klute@gmx.de> */
+
+#include <linux/of_net.h>
+#include "main.h"
+#include "coex.h"
+#include "debug.h"
+#include "mac.h"
+#include "phy.h"
+#include "reg.h"
+#include "rx.h"
+#include "rtw8703b.h"
+#include "rtw8703b_tables.h"
+#include "rtw8723x.h"
+
+#define BIT_MASK_TXQ_INIT (BIT(7))
+#define WLAN_RL_VAL 0x3030
+/* disable BAR */
+#define WLAN_BAR_VAL 0x0201ffff
+#define WLAN_PIFS_VAL 0
+#define WLAN_RX_PKT_LIMIT 0x18
+#define WLAN_SLOT_TIME 0x09
+#define WLAN_SPEC_SIFS 0x100a
+#define WLAN_MAX_AGG_NR 0x1f
+#define WLAN_AMPDU_MAX_TIME 0x70
+
+/* unit is 32us */
+#define TBTT_PROHIBIT_SETUP_TIME 0x04
+#define TBTT_PROHIBIT_HOLD_TIME 0x80
+#define TBTT_PROHIBIT_HOLD_TIME_STOP_BCN 0x64
+
+/* raw pkt_stat->drv_info_sz is in unit of 8-bytes */
+#define RX_DRV_INFO_SZ_UNIT_8703B 8
+
+#define TRANS_SEQ_END \
+ 0xFFFF, \
+ RTW_PWR_CUT_ALL_MSK, \
+ RTW_PWR_INTF_ALL_MSK, \
+ 0, \
+ RTW_PWR_CMD_END, 0, 0
+
+/* rssi in percentage % (dbm = % - 100) */
+/* These are used to select simple signal quality levels, might need
+ * tweaking. Same for rf_para tables below.
+ */
+static const u8 wl_rssi_step_8703b[] = {60, 50, 44, 30};
+static const u8 bt_rssi_step_8703b[] = {30, 30, 30, 30};
+static const struct coex_5g_afh_map afh_5g_8703b[] = { {0, 0, 0} };
+
+/* Actually decreasing wifi TX power/RX gain isn't implemented in
+ * rtw8703b, but hopefully adjusting the BT side helps.
+ */
+static const struct coex_rf_para rf_para_tx_8703b[] = {
+ {0, 0, false, 7}, /* for normal */
+ {0, 10, false, 7}, /* for WL-CPT */
+ {1, 0, true, 4},
+ {1, 2, true, 4},
+ {1, 10, true, 4},
+ {1, 15, true, 4}
+};
+
+static const struct coex_rf_para rf_para_rx_8703b[] = {
+ {0, 0, false, 7}, /* for normal */
+ {0, 10, false, 7}, /* for WL-CPT */
+ {1, 0, true, 5},
+ {1, 2, true, 5},
+ {1, 10, true, 5},
+ {1, 15, true, 5}
+};
+
+static const u32 rtw8703b_ofdm_swing_table[] = {
+ 0x0b40002d, /* 0, -15.0dB */
+ 0x0c000030, /* 1, -14.5dB */
+ 0x0cc00033, /* 2, -14.0dB */
+ 0x0d800036, /* 3, -13.5dB */
+ 0x0e400039, /* 4, -13.0dB */
+ 0x0f00003c, /* 5, -12.5dB */
+ 0x10000040, /* 6, -12.0dB */
+ 0x11000044, /* 7, -11.5dB */
+ 0x12000048, /* 8, -11.0dB */
+ 0x1300004c, /* 9, -10.5dB */
+ 0x14400051, /* 10, -10.0dB */
+ 0x15800056, /* 11, -9.5dB */
+ 0x16c0005b, /* 12, -9.0dB */
+ 0x18000060, /* 13, -8.5dB */
+ 0x19800066, /* 14, -8.0dB */
+ 0x1b00006c, /* 15, -7.5dB */
+ 0x1c800072, /* 16, -7.0dB */
+ 0x1e400079, /* 17, -6.5dB */
+ 0x20000080, /* 18, -6.0dB */
+ 0x22000088, /* 19, -5.5dB */
+ 0x24000090, /* 20, -5.0dB */
+ 0x26000098, /* 21, -4.5dB */
+ 0x288000a2, /* 22, -4.0dB */
+ 0x2ac000ab, /* 23, -3.5dB */
+ 0x2d4000b5, /* 24, -3.0dB */
+ 0x300000c0, /* 25, -2.5dB */
+ 0x32c000cb, /* 26, -2.0dB */
+ 0x35c000d7, /* 27, -1.5dB */
+ 0x390000e4, /* 28, -1.0dB */
+ 0x3c8000f2, /* 29, -0.5dB */
+ 0x40000100, /* 30, +0dB */
+ 0x43c0010f, /* 31, +0.5dB */
+ 0x47c0011f, /* 32, +1.0dB */
+ 0x4c000130, /* 33, +1.5dB */
+ 0x50800142, /* 34, +2.0dB */
+ 0x55400155, /* 35, +2.5dB */
+ 0x5a400169, /* 36, +3.0dB */
+ 0x5fc0017f, /* 37, +3.5dB */
+ 0x65400195, /* 38, +4.0dB */
+ 0x6b8001ae, /* 39, +4.5dB */
+ 0x71c001c7, /* 40, +5.0dB */
+ 0x788001e2, /* 41, +5.5dB */
+ 0x7f8001fe /* 42, +6.0dB */
+};
+
+static const u32 rtw8703b_cck_pwr_regs[] = {
+ 0x0a22, 0x0a23, 0x0a24, 0x0a25, 0x0a26, 0x0a27, 0x0a28, 0x0a29,
+ 0x0a9a, 0x0a9b, 0x0a9c, 0x0a9d, 0x0aa0, 0x0aa1, 0x0aa2, 0x0aa3,
+};
+
+static const u8 rtw8703b_cck_swing_table[][16] = {
+ {0x44, 0x42, 0x3C, 0x33, 0x28, 0x1C, 0x13, 0x0B, 0x05, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-16dB*/
+ {0x48, 0x46, 0x3F, 0x36, 0x2A, 0x1E, 0x14, 0x0B, 0x05, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-15.5dB*/
+ {0x4D, 0x4A, 0x43, 0x39, 0x2C, 0x20, 0x15, 0x0C, 0x06, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-15dB*/
+ {0x51, 0x4F, 0x47, 0x3C, 0x2F, 0x22, 0x16, 0x0D, 0x06, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-14.5dB*/
+ {0x56, 0x53, 0x4B, 0x40, 0x32, 0x24, 0x17, 0x0E, 0x06, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-14dB*/
+ {0x5B, 0x58, 0x50, 0x43, 0x35, 0x26, 0x19, 0x0E, 0x07, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-13.5dB*/
+ {0x60, 0x5D, 0x54, 0x47, 0x38, 0x28, 0x1A, 0x0F, 0x07, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-13dB*/
+ {0x66, 0x63, 0x59, 0x4C, 0x3B, 0x2B, 0x1C, 0x10, 0x08, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-12.5dB*/
+ {0x6C, 0x69, 0x5F, 0x50, 0x3F, 0x2D, 0x1E, 0x11, 0x08, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-12dB*/
+ {0x73, 0x6F, 0x64, 0x55, 0x42, 0x30, 0x1F, 0x12, 0x08, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-11.5dB*/
+ {0x79, 0x76, 0x6A, 0x5A, 0x46, 0x33, 0x21, 0x13, 0x09, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-11dB*/
+ {0x81, 0x7C, 0x71, 0x5F, 0x4A, 0x36, 0x23, 0x14, 0x0A, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-10.5dB*/
+ {0x88, 0x84, 0x77, 0x65, 0x4F, 0x39, 0x25, 0x15, 0x0A, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-10dB*/
+ {0x90, 0x8C, 0x7E, 0x6B, 0x54, 0x3C, 0x27, 0x17, 0x0B, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-9.5dB*/
+ {0x99, 0x94, 0x86, 0x71, 0x58, 0x40, 0x2A, 0x18, 0x0B, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-9dB*/
+ {0xA2, 0x9D, 0x8E, 0x78, 0x5E, 0x43, 0x2C, 0x19, 0x0C, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-8.5dB*/
+ {0xAC, 0xA6, 0x96, 0x7F, 0x63, 0x47, 0x2F, 0x1B, 0x0D, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-8dB*/
+ {0xB6, 0xB0, 0x9F, 0x87, 0x69, 0x4C, 0x32, 0x1D, 0x0D, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-7.5dB*/
+ {0xC1, 0xBA, 0xA8, 0x8F, 0x6F, 0x50, 0x35, 0x1E, 0x0E, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-7dB*/
+ {0xCC, 0xC5, 0xB2, 0x97, 0x76, 0x55, 0x38, 0x20, 0x0F, 0x05,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-6.5dB*/
+ {0xD8, 0xD1, 0xBD, 0xA0, 0x7D, 0x5A, 0x3B, 0x22, 0x10, 0x05,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} /*-6dB*/
+};
+
+#define RTW_OFDM_SWING_TABLE_SIZE ARRAY_SIZE(rtw8703b_ofdm_swing_table)
+#define RTW_CCK_SWING_TABLE_SIZE ARRAY_SIZE(rtw8703b_cck_swing_table)
+
+static const struct rtw_pwr_seq_cmd trans_pre_enable_8703b[] = {
+ /* set up external crystal (XTAL) */
+ {REG_PAD_CTRL1 + 2,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), BIT(7)},
+ /* set CLK_REQ to high active */
+ {0x0069,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+ /* unlock ISO/CLK/power control register */
+ {REG_RSV_CTRL,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xff, 0},
+ {TRANS_SEQ_END},
+};
+
+static const struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8703b[] = {
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), 0},
+ {TRANS_SEQ_END},
+};
+
+static const struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8703b[] = {
+ {0x0023,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), BIT(4)},
+ {0x0007,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK | RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x20},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), BIT(7)},
+ {TRANS_SEQ_END},
+};
+
+static const struct rtw_pwr_seq_cmd trans_cardemu_to_act_8703b[] = {
+ {0x0020,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0067,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), 0},
+ {0x0001,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_DELAY, 1, RTW_PWR_DELAY_MS},
+ {0x0000,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3) | BIT(2)), 0},
+ {0x0075,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0004,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), BIT(3)},
+ {0x0004,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), 0},
+ /* wait for power ready */
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(1), BIT(1)},
+ {0x0075,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3)), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(0), 0},
+ {0x0010,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(6), BIT(6)},
+ {0x0049,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0x0063,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0x0062,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0058,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x005A,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0x0068,
+ RTW_PWR_CUT_TEST_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), BIT(3)},
+ {0x0069,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(6), BIT(6)},
+ {TRANS_SEQ_END},
+};
+
+static const struct rtw_pwr_seq_cmd trans_act_to_cardemu_8703b[] = {
+ {0x001f,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xff, 0},
+ {0x0049,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(1), 0},
+ {0x0010,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(6), 0},
+ {0x0000,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+ {0x0020,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {TRANS_SEQ_END},
+};
+
+static const struct rtw_pwr_seq_cmd trans_act_to_reset_mcu_8703b[] = {
+ {REG_SYS_FUNC_EN + 1,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT_FEN_CPUEN, 0},
+ /* reset MCU ready */
+ {REG_MCUFW_CTRL,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xff, 0},
+ /* reset MCU IO wrapper */
+ {REG_RSV_CTRL + 1,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {REG_RSV_CTRL + 1,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 1},
+ {TRANS_SEQ_END},
+};
+
+static const struct rtw_pwr_seq_cmd trans_act_to_lps_8703b[] = {
+ {0x0301,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xff, 0xff},
+ {0x0522,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xff, 0xff},
+ {0x05f8,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, 0xff, 0},
+ {0x05f9,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, 0xff, 0},
+ {0x05fa,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, 0xff, 0},
+ {0x05fb,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, 0xff, 0},
+ {0x0002,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0002,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_DELAY, 0, RTW_PWR_DELAY_US},
+ {0x0002,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0100,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xff, 0x03},
+ {0x0101,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0093,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xff, 0},
+ {0x0553,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+ {TRANS_SEQ_END},
+};
+
+static const struct rtw_pwr_seq_cmd *card_enable_flow_8703b[] = {
+ trans_pre_enable_8703b,
+ trans_carddis_to_cardemu_8703b,
+ trans_cardemu_to_act_8703b,
+ NULL
+};
+
+static const struct rtw_pwr_seq_cmd *card_disable_flow_8703b[] = {
+ trans_act_to_lps_8703b,
+ trans_act_to_reset_mcu_8703b,
+ trans_act_to_cardemu_8703b,
+ trans_cardemu_to_carddis_8703b,
+ NULL
+};
+
+static const struct rtw_rfe_def rtw8703b_rfe_defs[] = {
+ [0] = { .phy_pg_tbl = &rtw8703b_bb_pg_tbl,
+ .txpwr_lmt_tbl = &rtw8703b_txpwr_lmt_tbl,},
+};
+
+static const struct rtw_page_table page_table_8703b[] = {
+ {12, 2, 2, 0, 1},
+ {12, 2, 2, 0, 1},
+ {12, 2, 2, 0, 1},
+ {12, 2, 2, 0, 1},
+ {12, 2, 2, 0, 1},
+};
+
+static const struct rtw_rqpn rqpn_table_8703b[] = {
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_HIGH,
+ RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+};
+
+/* Default power index table for RTL8703B, used if EFUSE does not
+ * contain valid data. Replaces EFUSE data from offset 0x10 (start of
+ * txpwr_idx_table).
+ */
+static const u8 rtw8703b_txpwr_idx_table[] = {
+ 0x2D, 0x2D, 0x2D, 0x2D, 0x2D, 0x2D,
+ 0x2D, 0x2D, 0x2D, 0x2D, 0x2D, 0x02
+};
+
+static void try_mac_from_devicetree(struct rtw_dev *rtwdev)
+{
+ struct device_node *node = rtwdev->dev->of_node;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ int ret;
+
+ if (node) {
+ ret = of_get_mac_address(node, efuse->addr);
+ if (ret == 0) {
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE,
+ "got wifi mac address from DT: %pM\n",
+ efuse->addr);
+ }
+ }
+}
+
+#define DBG_EFUSE_FIX(rtwdev, name) \
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "Fixed invalid EFUSE value: " \
+ # name "=0x%x\n", rtwdev->efuse.name)
+
+static int rtw8703b_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u8 *pwr = (u8 *)efuse->txpwr_idx_table;
+ bool valid = false;
+ int ret;
+
+ ret = rtw8723x_read_efuse(rtwdev, log_map);
+ if (ret != 0)
+ return ret;
+
+ if (!is_valid_ether_addr(efuse->addr))
+ try_mac_from_devicetree(rtwdev);
+
+ /* If TX power index table in EFUSE is invalid, fall back to
+ * built-in table.
+ */
+ for (int i = 0; i < ARRAY_SIZE(rtw8703b_txpwr_idx_table); i++)
+ if (pwr[i] != 0xff) {
+ valid = true;
+ break;
+ }
+ if (!valid) {
+ for (int i = 0; i < ARRAY_SIZE(rtw8703b_txpwr_idx_table); i++)
+ pwr[i] = rtw8703b_txpwr_idx_table[i];
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE,
+ "Replaced invalid EFUSE TX power index table.");
+ rtw8723x_debug_txpwr_limit(rtwdev,
+ efuse->txpwr_idx_table, 2);
+ }
+
+ /* Override invalid antenna settings. */
+ if (efuse->bt_setting == 0xff) {
+ /* shared antenna */
+ efuse->bt_setting |= BIT(0);
+ /* RF path A */
+ efuse->bt_setting &= ~BIT(6);
+ DBG_EFUSE_FIX(rtwdev, bt_setting);
+ }
+
+ /* Override invalid board options: The coex code incorrectly
+ * assumes that if bits 6 & 7 are set the board doesn't
+ * support coex. Regd is also derived from rf_board_option and
+ * should be 0 if there's no valid data.
+ */
+ if (efuse->rf_board_option == 0xff) {
+ efuse->regd = 0;
+ efuse->rf_board_option &= GENMASK(5, 0);
+ DBG_EFUSE_FIX(rtwdev, rf_board_option);
+ }
+
+ /* Override invalid crystal cap setting, default comes from
+ * vendor driver. Chip specific.
+ */
+ if (efuse->crystal_cap == 0xff) {
+ efuse->crystal_cap = 0x20;
+ DBG_EFUSE_FIX(rtwdev, crystal_cap);
+ }
+
+ return 0;
+}
+
+static void rtw8703b_pwrtrack_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 path;
+
+ /* TODO: The vendor driver selects these using tables in
+ * halrf_powertracking_ce.c, functions are called
+ * get_swing_index and get_cck_swing_index. There the current
+ * fixed values are only the defaults in case no match is
+ * found.
+ */
+ dm_info->default_ofdm_index = 30;
+ dm_info->default_cck_index = 20;
+
+ for (path = RF_PATH_A; path < rtwdev->hal.rf_path_num; path++) {
+ ewma_thermal_init(&dm_info->avg_thermal[path]);
+ dm_info->delta_power_index[path] = 0;
+ }
+ dm_info->pwr_trk_triggered = false;
+ dm_info->pwr_trk_init_trigger = true;
+ dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k;
+ dm_info->txagc_remnant_cck = 0;
+ dm_info->txagc_remnant_ofdm = 0;
+}
+
+static void rtw8703b_phy_set_param(struct rtw_dev *rtwdev)
+{
+ u8 xtal_cap = rtwdev->efuse.crystal_cap & 0x3F;
+
+ /* power on BB/RF domain */
+ rtw_write16_set(rtwdev, REG_SYS_FUNC_EN,
+ BIT_FEN_EN_25_1 | BIT_FEN_BB_GLB_RST | BIT_FEN_BB_RSTB);
+ rtw_write8_set(rtwdev, REG_RF_CTRL,
+ BIT_RF_EN | BIT_RF_RSTB | BIT_RF_SDM_RSTB);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_WLINT, RFREG_MASK, 0x0780);
+ rtw_write8(rtwdev, REG_AFE_CTRL1 + 1, 0x80);
+
+ rtw_phy_load_tables(rtwdev);
+
+ rtw_write32_clr(rtwdev, REG_RCR, BIT_RCR_ADF);
+ /* 0xff is from vendor driver, rtw8723d uses
+ * BIT_HIQ_NO_LMT_EN_ROOT. Comment in vendor driver: "Packet
+ * in Hi Queue Tx immediately". I wonder if setting all bits
+ * is really necessary.
+ */
+ rtw_write8_set(rtwdev, REG_HIQ_NO_LMT_EN, 0xff);
+ rtw_write16_set(rtwdev, REG_AFE_CTRL_4, BIT_CK320M_AFE_EN | BIT_EN_SYN);
+
+ rtw_write32_mask(rtwdev, REG_AFE_CTRL3, BIT_MASK_XTAL,
+ xtal_cap | (xtal_cap << 6));
+ rtw_write32_set(rtwdev, REG_FPGA0_RFMOD, BIT_CCKEN | BIT_OFDMEN);
+
+ /* Init EDCA */
+ rtw_write16(rtwdev, REG_SPEC_SIFS, WLAN_SPEC_SIFS);
+ rtw_write16(rtwdev, REG_MAC_SPEC_SIFS, WLAN_SPEC_SIFS);
+ rtw_write16(rtwdev, REG_SIFS, WLAN_SPEC_SIFS); /* CCK */
+ rtw_write16(rtwdev, REG_SIFS + 2, WLAN_SPEC_SIFS); /* OFDM */
+ /* TXOP */
+ rtw_write32(rtwdev, REG_EDCA_VO_PARAM, 0x002FA226);
+ rtw_write32(rtwdev, REG_EDCA_VI_PARAM, 0x005EA324);
+ rtw_write32(rtwdev, REG_EDCA_BE_PARAM, 0x005EA42B);
+ rtw_write32(rtwdev, REG_EDCA_BK_PARAM, 0x0000A44F);
+
+ /* Init retry */
+ rtw_write8(rtwdev, REG_ACKTO, 0x40);
+
+ /* Set up RX aggregation. sdio.c also sets DMA mode, but not
+ * the burst parameters.
+ */
+ rtw_write8(rtwdev, REG_RXDMA_MODE,
+ BIT_DMA_MODE |
+ FIELD_PREP_CONST(BIT_MASK_AGG_BURST_NUM, AGG_BURST_NUM) |
+ FIELD_PREP_CONST(BIT_MASK_AGG_BURST_SIZE, AGG_BURST_SIZE));
+
+ /* Init beacon parameters */
+ rtw_write8(rtwdev, REG_BCN_CTRL,
+ BIT_DIS_TSF_UDT | BIT_EN_BCN_FUNCTION | BIT_EN_TXBCN_RPT);
+ rtw_write8(rtwdev, REG_TBTT_PROHIBIT, TBTT_PROHIBIT_SETUP_TIME);
+ rtw_write8(rtwdev, REG_TBTT_PROHIBIT + 1,
+ TBTT_PROHIBIT_HOLD_TIME_STOP_BCN & 0xFF);
+ rtw_write8(rtwdev, REG_TBTT_PROHIBIT + 2,
+ (rtw_read8(rtwdev, REG_TBTT_PROHIBIT + 2) & 0xF0)
+ | (TBTT_PROHIBIT_HOLD_TIME_STOP_BCN >> 8));
+
+ /* configure packet burst */
+ rtw_write8_set(rtwdev, REG_SINGLE_AMPDU_CTRL, BIT_EN_SINGLE_APMDU);
+ rtw_write8(rtwdev, REG_RX_PKT_LIMIT, WLAN_RX_PKT_LIMIT);
+ rtw_write8(rtwdev, REG_MAX_AGGR_NUM, WLAN_MAX_AGG_NR);
+ rtw_write8(rtwdev, REG_PIFS, WLAN_PIFS_VAL);
+ rtw_write8_clr(rtwdev, REG_FWHW_TXQ_CTRL, BIT_MASK_TXQ_INIT);
+ rtw_write8(rtwdev, REG_AMPDU_MAX_TIME, WLAN_AMPDU_MAX_TIME);
+
+ rtw_write8(rtwdev, REG_SLOT, WLAN_SLOT_TIME);
+ rtw_write16(rtwdev, REG_RETRY_LIMIT, WLAN_RL_VAL);
+ rtw_write32(rtwdev, REG_BAR_MODE_CTRL, WLAN_BAR_VAL);
+ rtw_write16(rtwdev, REG_ATIMWND, 0x2);
+
+ rtw_phy_init(rtwdev);
+
+ if (rtw_read32_mask(rtwdev, REG_BB_AMP, BIT_MASK_RX_LNA) != 0) {
+ rtwdev->dm_info.rx_cck_agc_report_type = 1;
+ } else {
+ rtwdev->dm_info.rx_cck_agc_report_type = 0;
+ rtw_warn(rtwdev, "unexpected cck agc report type");
+ }
+
+ rtw8723x_lck(rtwdev);
+
+ rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x50);
+ rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x20);
+
+ rtw8703b_pwrtrack_init(rtwdev);
+}
+
+static bool rtw8703b_check_spur_ov_thres(struct rtw_dev *rtwdev,
+ u32 freq, u32 thres)
+{
+ bool ret = false;
+
+ rtw_write32(rtwdev, REG_ANALOG_P4, DIS_3WIRE);
+ rtw_write32(rtwdev, REG_PSDFN, freq);
+ rtw_write32(rtwdev, REG_PSDFN, START_PSD | freq);
+
+ msleep(30);
+ if (rtw_read32(rtwdev, REG_PSDRPT) >= thres)
+ ret = true;
+
+ rtw_write32(rtwdev, REG_PSDFN, freq);
+ rtw_write32(rtwdev, REG_ANALOG_P4, EN_3WIRE);
+
+ return ret;
+}
+
+static void rtw8703b_cfg_notch(struct rtw_dev *rtwdev, u8 channel, bool notch)
+{
+ if (!notch) {
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_MASK_RXDSP, 0x1f);
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x0);
+ rtw_write32(rtwdev, REG_OFDM1_CSI1, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI2, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI3, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI4, 0x00000000);
+ rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x0);
+ return;
+ }
+
+ switch (channel) {
+ case 5:
+ fallthrough;
+ case 13:
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_MASK_RXDSP, 0xb);
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x1);
+ rtw_write32(rtwdev, REG_OFDM1_CSI1, 0x06000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI2, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI3, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI4, 0x00000000);
+ rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x1);
+ break;
+ case 6:
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_MASK_RXDSP, 0x4);
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x1);
+ rtw_write32(rtwdev, REG_OFDM1_CSI1, 0x00000600);
+ rtw_write32(rtwdev, REG_OFDM1_CSI2, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI3, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI4, 0x00000000);
+ rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x1);
+ break;
+ case 7:
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_MASK_RXDSP, 0x3);
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x1);
+ rtw_write32(rtwdev, REG_OFDM1_CSI1, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI2, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI3, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI4, 0x06000000);
+ rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x1);
+ break;
+ case 8:
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_MASK_RXDSP, 0xa);
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x1);
+ rtw_write32(rtwdev, REG_OFDM1_CSI1, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI2, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI3, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI4, 0x00000380);
+ rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x1);
+ break;
+ case 14:
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_MASK_RXDSP, 0x5);
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x1);
+ rtw_write32(rtwdev, REG_OFDM1_CSI1, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI2, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI3, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI4, 0x00180000);
+ rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x1);
+ break;
+ default:
+ rtw_warn(rtwdev,
+ "Bug: Notch filter enable called for channel %u!",
+ channel);
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x0);
+ rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x0);
+ break;
+ }
+}
+
+static void rtw8703b_spur_cal(struct rtw_dev *rtwdev, u8 channel)
+{
+ bool notch;
+ u32 freq;
+
+ if (channel == 5) {
+ freq = FREQ_CH5;
+ } else if (channel == 6) {
+ freq = FREQ_CH6;
+ } else if (channel == 7) {
+ freq = FREQ_CH7;
+ } else if (channel == 8) {
+ freq = FREQ_CH8;
+ } else if (channel == 13) {
+ freq = FREQ_CH13;
+ } else if (channel == 14) {
+ freq = FREQ_CH14;
+ } else {
+ rtw8703b_cfg_notch(rtwdev, channel, false);
+ return;
+ }
+
+ notch = rtw8703b_check_spur_ov_thres(rtwdev, freq, SPUR_THRES);
+ rtw8703b_cfg_notch(rtwdev, channel, notch);
+}
+
+static void rtw8703b_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
+{
+ u32 rf_cfgch_a;
+ u32 rf_cfgch_b;
+ /* default value for 20M */
+ u32 rf_rck = 0x00000C08;
+
+ rf_cfgch_a = rtw_read_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK);
+ rf_cfgch_b = rtw_read_rf(rtwdev, RF_PATH_B, RF_CFGCH, RFREG_MASK);
+
+ rf_cfgch_a &= ~RFCFGCH_CHANNEL_MASK;
+ rf_cfgch_b &= ~RFCFGCH_CHANNEL_MASK;
+ rf_cfgch_a |= (channel & RFCFGCH_CHANNEL_MASK);
+ rf_cfgch_b |= (channel & RFCFGCH_CHANNEL_MASK);
+
+ rf_cfgch_a &= ~RFCFGCH_BW_MASK;
+ switch (bw) {
+ case RTW_CHANNEL_WIDTH_20:
+ rf_cfgch_a |= RFCFGCH_BW_20M;
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ rf_cfgch_a |= RFCFGCH_BW_40M;
+ rf_rck = 0x00000C4C;
+ break;
+ default:
+ break;
+ }
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, rf_cfgch_a);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_CFGCH, RFREG_MASK, rf_cfgch_b);
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_RCK1, RFREG_MASK, rf_rck);
+ rtw8703b_spur_cal(rtwdev, channel);
+}
+
+#define CCK_DFIR_NR_8703B 2
+static const struct rtw_backup_info cck_dfir_cfg[][CCK_DFIR_NR_8703B] = {
+ [0] = {
+ { .len = 4, .reg = REG_CCK_TXSF2, .val = 0x5A7DA0BD },
+ { .len = 4, .reg = REG_CCK_DBG, .val = 0x0000223B },
+ },
+ [1] = {
+ { .len = 4, .reg = REG_CCK_TXSF2, .val = 0x00000000 },
+ { .len = 4, .reg = REG_CCK_DBG, .val = 0x00000000 },
+ },
+};
+
+static void rtw8703b_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+ u8 primary_ch_idx)
+{
+ const struct rtw_backup_info *cck_dfir;
+ int i;
+
+ cck_dfir = channel <= 13 ? cck_dfir_cfg[0] : cck_dfir_cfg[1];
+
+ for (i = 0; i < CCK_DFIR_NR_8703B; i++, cck_dfir++)
+ rtw_write32(rtwdev, cck_dfir->reg, cck_dfir->val);
+
+ switch (bw) {
+ case RTW_CHANNEL_WIDTH_20:
+ rtw_write32_mask(rtwdev, REG_FPGA0_RFMOD, BIT_MASK_RFMOD, 0x0);
+ rtw_write32_mask(rtwdev, REG_FPGA1_RFMOD, BIT_MASK_RFMOD, 0x0);
+ rtw_write32_mask(rtwdev, REG_OFDM0_TX_PSD_NOISE,
+ GENMASK(31, 20), 0x0);
+ rtw_write32(rtwdev, REG_BBRX_DFIR, 0x4A880000);
+ rtw_write32(rtwdev, REG_OFDM0_A_TX_AFE, 0x19F60000);
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ rtw_write32_mask(rtwdev, REG_FPGA0_RFMOD, BIT_MASK_RFMOD, 0x1);
+ rtw_write32_mask(rtwdev, REG_FPGA1_RFMOD, BIT_MASK_RFMOD, 0x1);
+ rtw_write32(rtwdev, REG_BBRX_DFIR, 0x40100000);
+ rtw_write32(rtwdev, REG_OFDM0_A_TX_AFE, 0x51F60000);
+ rtw_write32_mask(rtwdev, REG_CCK0_SYS, BIT_CCK_SIDE_BAND,
+ primary_ch_idx == RTW_SC_20_UPPER ? 1 : 0);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, 0xC00,
+ primary_ch_idx == RTW_SC_20_UPPER ? 2 : 1);
+
+ rtw_write32_mask(rtwdev, REG_BB_PWR_SAV5_11N, GENMASK(27, 26),
+ primary_ch_idx == RTW_SC_20_UPPER ? 1 : 2);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rtw8703b_set_channel(struct rtw_dev *rtwdev, u8 channel,
+ u8 bw, u8 primary_chan_idx)
+{
+ rtw8703b_set_channel_rf(rtwdev, channel, bw);
+ rtw_set_channel_mac(rtwdev, channel, bw, primary_chan_idx);
+ rtw8703b_set_channel_bb(rtwdev, channel, bw, primary_chan_idx);
+}
+
+/* Not all indices are valid, based on available data. None of the
+ * known valid values are positive, so use 0x7f as "invalid".
+ */
+#define LNA_IDX_INVALID 0x7f
+static const s8 lna_gain_table[16] = {
+ -2, LNA_IDX_INVALID, LNA_IDX_INVALID, LNA_IDX_INVALID,
+ -6, LNA_IDX_INVALID, LNA_IDX_INVALID, -19,
+ -32, LNA_IDX_INVALID, -36, -42,
+ LNA_IDX_INVALID, LNA_IDX_INVALID, LNA_IDX_INVALID, -48,
+};
+
+static s8 get_cck_rx_pwr(struct rtw_dev *rtwdev, u8 lna_idx, u8 vga_idx)
+{
+ s8 lna_gain = 0;
+
+ if (lna_idx < ARRAY_SIZE(lna_gain_table))
+ lna_gain = lna_gain_table[lna_idx];
+
+ if (lna_gain >= 0) {
+ rtw_warn(rtwdev, "incorrect lna index (%d)\n", lna_idx);
+ return -120;
+ }
+
+ return lna_gain - 2 * vga_idx;
+}
+
+static void query_phy_status_cck(struct rtw_dev *rtwdev, u8 *phy_raw,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ struct phy_status_8703b *phy_status = (struct phy_status_8703b *)phy_raw;
+ u8 vga_idx = phy_status->cck_agc_rpt_ofdm_cfosho_a & VGA_BITS;
+ u8 lna_idx = phy_status->cck_agc_rpt_ofdm_cfosho_a & LNA_L_BITS;
+ s8 rx_power;
+
+ if (rtwdev->dm_info.rx_cck_agc_report_type == 1)
+ lna_idx = FIELD_PREP(BIT_LNA_H_MASK,
+ phy_status->cck_rpt_b_ofdm_cfosho_b & LNA_H_BIT)
+ | FIELD_PREP(BIT_LNA_L_MASK, lna_idx);
+ else
+ lna_idx = FIELD_PREP(BIT_LNA_L_MASK, lna_idx);
+ rx_power = get_cck_rx_pwr(rtwdev, lna_idx, vga_idx);
+
+ pkt_stat->rx_power[RF_PATH_A] = rx_power;
+ pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
+ rtwdev->dm_info.rssi[RF_PATH_A] = pkt_stat->rssi;
+ pkt_stat->signal_power = rx_power;
+}
+
+static void query_phy_status_ofdm(struct rtw_dev *rtwdev, u8 *phy_raw,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ struct phy_status_8703b *phy_status = (struct phy_status_8703b *)phy_raw;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ s8 val_s8;
+
+ val_s8 = phy_status->path_agc[RF_PATH_A].gain & 0x3F;
+ pkt_stat->rx_power[RF_PATH_A] = (val_s8 * 2) - 110;
+ pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
+ pkt_stat->rx_snr[RF_PATH_A] = (s8)(phy_status->path_rxsnr[RF_PATH_A] / 2);
+
+ /* signal power reported by HW */
+ val_s8 = phy_status->cck_sig_qual_ofdm_pwdb_all >> 1;
+ pkt_stat->signal_power = (val_s8 & 0x7f) - 110;
+
+ pkt_stat->rx_evm[RF_PATH_A] = phy_status->stream_rxevm[RF_PATH_A];
+ pkt_stat->cfo_tail[RF_PATH_A] = phy_status->path_cfotail[RF_PATH_A];
+
+ dm_info->curr_rx_rate = pkt_stat->rate;
+ dm_info->rssi[RF_PATH_A] = pkt_stat->rssi;
+ dm_info->rx_snr[RF_PATH_A] = pkt_stat->rx_snr[RF_PATH_A] >> 1;
+ /* convert to KHz (used only for debugfs) */
+ dm_info->cfo_tail[RF_PATH_A] = (pkt_stat->cfo_tail[RF_PATH_A] * 5) >> 1;
+
+ /* (EVM value as s8 / 2) is dbm, should usually be in -33 to 0
+ * range. rx_evm_dbm needs the absolute (positive) value.
+ */
+ val_s8 = (s8)pkt_stat->rx_evm[RF_PATH_A];
+ val_s8 = clamp_t(s8, -val_s8 >> 1, 0, 64);
+ val_s8 &= 0x3F; /* 64->0: second path of 1SS rate is 64 */
+ dm_info->rx_evm_dbm[RF_PATH_A] = val_s8;
+}
+
+static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ if (pkt_stat->rate <= DESC_RATE11M)
+ query_phy_status_cck(rtwdev, phy_status, pkt_stat);
+ else
+ query_phy_status_ofdm(rtwdev, phy_status, pkt_stat);
+}
+
+static void rtw8703b_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc,
+ struct rtw_rx_pkt_stat *pkt_stat,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_hdr *hdr;
+ u32 desc_sz = rtwdev->chip->rx_pkt_desc_sz;
+ u8 *phy_status = NULL;
+
+ memset(pkt_stat, 0, sizeof(*pkt_stat));
+
+ pkt_stat->phy_status = GET_RX_DESC_PHYST(rx_desc);
+ pkt_stat->icv_err = GET_RX_DESC_ICV_ERR(rx_desc);
+ pkt_stat->crc_err = GET_RX_DESC_CRC32(rx_desc);
+ pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc) &&
+ GET_RX_DESC_ENC_TYPE(rx_desc) != RX_DESC_ENC_NONE;
+ pkt_stat->is_c2h = GET_RX_DESC_C2H(rx_desc);
+ pkt_stat->pkt_len = GET_RX_DESC_PKT_LEN(rx_desc);
+ pkt_stat->drv_info_sz = GET_RX_DESC_DRV_INFO_SIZE(rx_desc);
+ pkt_stat->shift = GET_RX_DESC_SHIFT(rx_desc);
+ pkt_stat->rate = GET_RX_DESC_RX_RATE(rx_desc);
+ pkt_stat->cam_id = GET_RX_DESC_MACID(rx_desc);
+ pkt_stat->ppdu_cnt = 0;
+ pkt_stat->tsf_low = GET_RX_DESC_TSFL(rx_desc);
+
+ pkt_stat->drv_info_sz *= RX_DRV_INFO_SZ_UNIT_8703B;
+
+ if (pkt_stat->is_c2h)
+ return;
+
+ hdr = (struct ieee80211_hdr *)(rx_desc + desc_sz + pkt_stat->shift +
+ pkt_stat->drv_info_sz);
+
+ pkt_stat->bw = GET_RX_DESC_BW(rx_desc);
+
+ if (pkt_stat->phy_status) {
+ phy_status = rx_desc + desc_sz + pkt_stat->shift;
+ query_phy_status(rtwdev, phy_status, pkt_stat);
+ }
+
+ rtw_rx_fill_rx_status(rtwdev, pkt_stat, hdr, rx_status, phy_status);
+
+ /* Rtl8723cs driver checks for size < 14 or size > 8192 and
+ * simply drops the packet. Maybe this should go into
+ * rtw_rx_fill_rx_status()?
+ */
+ if (pkt_stat->pkt_len == 0) {
+ rx_status->flag |= RX_FLAG_NO_PSDU;
+ rtw_dbg(rtwdev, RTW_DBG_RX, "zero length packet");
+ }
+}
+
+#define ADDA_ON_VAL_8703B 0x03c00014
+
+static
+void rtw8703b_iqk_config_mac(struct rtw_dev *rtwdev,
+ const struct rtw8723x_iqk_backup_regs *backup)
+{
+ rtw_write8(rtwdev, rtw8723x_common.iqk_mac8_regs[0], 0x3F);
+ for (int i = 1; i < RTW8723X_IQK_MAC8_REG_NUM; i++)
+ rtw_write8(rtwdev, rtw8723x_common.iqk_mac8_regs[i],
+ backup->mac8[i] & (~BIT(3)));
+}
+
+#define IQK_LTE_WRITE_VAL_8703B 0x00007700
+#define IQK_DELAY_TIME_8703B 4
+
+static void rtw8703b_iqk_one_shot(struct rtw_dev *rtwdev, bool tx)
+{
+ u32 regval;
+ ktime_t t;
+ s64 dur;
+ int ret;
+
+ /* enter IQK mode */
+ rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, EN_IQK);
+ rtw8723x_iqk_config_lte_path_gnt(rtwdev, IQK_LTE_WRITE_VAL_8703B);
+
+ /* One shot, LOK & IQK */
+ rtw_write32(rtwdev, REG_IQK_AGC_PTS_11N, 0xf9000000);
+ rtw_write32(rtwdev, REG_IQK_AGC_PTS_11N, 0xf8000000);
+
+ t = ktime_get();
+ msleep(IQK_DELAY_TIME_8703B);
+ ret = read_poll_timeout(rtw_read32, regval, regval != 0, 1000,
+ 100000, false, rtwdev,
+ REG_IQK_RDY);
+ dur = ktime_us_delta(ktime_get(), t);
+
+ if (ret)
+ rtw_warn(rtwdev, "[IQK] %s timed out after %lldus!\n",
+ tx ? "TX" : "RX", dur);
+ else
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] %s done after %lldus\n",
+ tx ? "TX" : "RX", dur);
+}
+
+static void rtw8703b_iqk_txrx_path_post(struct rtw_dev *rtwdev,
+ const struct rtw8723x_iqk_backup_regs *backup)
+{
+ rtw8723x_iqk_restore_lte_path_gnt(rtwdev, backup);
+ rtw_write32(rtwdev, REG_BB_SEL_BTG, backup->bb_sel_btg);
+
+ /* leave IQK mode */
+ rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, RST_IQK);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTDBG, 0x800, 0x0);
+}
+
+static u8 rtw8703b_iqk_check_tx_failed(struct rtw_dev *rtwdev)
+{
+ s32 tx_x, tx_y;
+ u32 tx_fail;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0xeac = 0x%x\n",
+ rtw_read32(rtwdev, REG_IQK_RES_RY));
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0xe94 = 0x%x, 0xe9c = 0x%x\n",
+ rtw_read32(rtwdev, REG_IQK_RES_TX),
+ rtw_read32(rtwdev, REG_IQK_RES_TY));
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] 0xe90(before IQK) = 0x%x, 0xe98(after IQK) = 0x%x\n",
+ rtw_read32(rtwdev, REG_IQK_RDY),
+ rtw_read32(rtwdev, 0xe98));
+
+ tx_fail = rtw_read32_mask(rtwdev, REG_IQK_RES_RY, BIT_IQK_TX_FAIL);
+ tx_x = rtw_read32_mask(rtwdev, REG_IQK_RES_TX, BIT_MASK_RES_TX);
+ tx_y = rtw_read32_mask(rtwdev, REG_IQK_RES_TY, BIT_MASK_RES_TY);
+
+ if (!tx_fail && tx_x != IQK_TX_X_ERR && tx_y != IQK_TX_Y_ERR)
+ return IQK_TX_OK;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] A TX IQK failed\n");
+
+ return 0;
+}
+
+static u8 rtw8703b_iqk_check_rx_failed(struct rtw_dev *rtwdev)
+{
+ s32 rx_x, rx_y;
+ u32 rx_fail;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0xea4 = 0x%x, 0xeac = 0x%x\n",
+ rtw_read32(rtwdev, REG_IQK_RES_RX),
+ rtw_read32(rtwdev, REG_IQK_RES_RY));
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] 0xea0(before IQK) = 0x%x, 0xea8(after IQK) = 0x%x\n",
+ rtw_read32(rtwdev, 0xea0),
+ rtw_read32(rtwdev, 0xea8));
+
+ rx_fail = rtw_read32_mask(rtwdev, REG_IQK_RES_RY, BIT_IQK_RX_FAIL);
+ rx_x = rtw_read32_mask(rtwdev, REG_IQK_RES_RX, BIT_MASK_RES_RX);
+ rx_y = rtw_read32_mask(rtwdev, REG_IQK_RES_RY, BIT_MASK_RES_RY);
+ rx_y = abs(iqkxy_to_s32(rx_y));
+
+ if (!rx_fail && rx_x != IQK_RX_X_ERR && rx_y != IQK_RX_Y_ERR &&
+ rx_x < IQK_RX_X_UPPER && rx_x > IQK_RX_X_LOWER &&
+ rx_y < IQK_RX_Y_LMT)
+ return IQK_RX_OK;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] A RX IQK failed\n");
+
+ return 0;
+}
+
+static u8 rtw8703b_iqk_tx_path(struct rtw_dev *rtwdev,
+ const struct rtw8723x_iqk_backup_regs *backup)
+{
+ u8 status;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path A TX IQK!\n");
+
+ /* IQK setting */
+ rtw_write32(rtwdev, REG_TXIQK_11N, 0x01007c00);
+ rtw_write32(rtwdev, REG_RXIQK_11N, 0x01004800);
+ rtw_write32(rtwdev, REG_TXIQK_TONE_A_11N, 0x18008c1c);
+ rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x38008c1c);
+ rtw_write32(rtwdev, REG_TX_IQK_TONE_B, 0x38008c1c);
+ rtw_write32(rtwdev, REG_RX_IQK_TONE_B, 0x38008c1c);
+ rtw_write32(rtwdev, REG_TXIQK_PI_A_11N, 0x8214030f);
+ rtw_write32(rtwdev, REG_RXIQK_PI_A_11N, 0x28110000);
+ rtw_write32(rtwdev, REG_TXIQK_PI_B, 0x82110000);
+ rtw_write32(rtwdev, REG_RXIQK_PI_B, 0x28110000);
+
+ /* LO calibration setting */
+ rtw_write32(rtwdev, REG_IQK_AGC_RSP_11N, 0x00462911);
+
+ /* leave IQK mode */
+ rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, 0xffffff00, 0x000000);
+
+ /* PA, PAD setting */
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTDBG, 0x800, 0x1);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x55, 0x7f, 0x7);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x7f, RFREG_MASK, 0xd400);
+
+ rtw8703b_iqk_one_shot(rtwdev, true);
+ status = rtw8703b_iqk_check_tx_failed(rtwdev);
+
+ rtw8703b_iqk_txrx_path_post(rtwdev, backup);
+
+ return status;
+}
+
+static u8 rtw8703b_iqk_rx_path(struct rtw_dev *rtwdev,
+ const struct rtw8723x_iqk_backup_regs *backup)
+{
+ u8 status;
+ u32 tx_x, tx_y;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path A RX IQK step 1!\n");
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0x67 @A RX IQK1 = 0x%x\n",
+ rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3));
+ rtw_write32(rtwdev, REG_BB_SEL_BTG, 0x99000000);
+
+ /* disable IQC mode */
+ rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, RST_IQK);
+
+ /* IQK setting */
+ rtw_write32(rtwdev, REG_TXIQK_11N, 0x01007c00);
+ rtw_write32(rtwdev, REG_RXIQK_11N, 0x01004800);
+
+ /* path IQK setting */
+ rtw_write32(rtwdev, REG_TXIQK_TONE_A_11N, 0x18008c1c);
+ rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x38008c1c);
+ rtw_write32(rtwdev, REG_TX_IQK_TONE_B, 0x38008c1c);
+ rtw_write32(rtwdev, REG_RX_IQK_TONE_B, 0x38008c1c);
+ rtw_write32(rtwdev, REG_TXIQK_PI_A_11N, 0x8216000f);
+ rtw_write32(rtwdev, REG_RXIQK_PI_A_11N, 0x28110000);
+ rtw_write32(rtwdev, REG_TXIQK_PI_B, 0x28110000);
+ rtw_write32(rtwdev, REG_RXIQK_PI_B, 0x28110000);
+
+ /* LOK setting */
+ rtw_write32(rtwdev, REG_IQK_AGC_RSP_11N, 0x0046a911);
+
+ /* RX IQK mode */
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, 0x80000, 0x1);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x30, RFREG_MASK, 0x30000);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x31, RFREG_MASK, 0x00007);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x32, RFREG_MASK, 0x57db7);
+
+ rtw8703b_iqk_one_shot(rtwdev, true);
+ /* leave IQK mode */
+ rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, 0xffffff00, 0x000000);
+ status = rtw8703b_iqk_check_tx_failed(rtwdev);
+
+ if (!status)
+ goto restore;
+
+ /* second round */
+ tx_x = rtw_read32_mask(rtwdev, REG_IQK_RES_TX, BIT_MASK_RES_TX);
+ tx_y = rtw_read32_mask(rtwdev, REG_IQK_RES_TY, BIT_MASK_RES_TY);
+
+ rtw_write32(rtwdev, REG_TXIQK_11N, BIT_SET_TXIQK_11N(tx_x, tx_y));
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0xe40 = 0x%x u4tmp = 0x%x\n",
+ rtw_read32(rtwdev, REG_TXIQK_11N),
+ BIT_SET_TXIQK_11N(tx_x, tx_y));
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path A RX IQK step 2!\n");
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0x67 @A RX IQK 2 = 0x%x\n",
+ rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3));
+
+ /* IQK setting */
+ rtw_write32(rtwdev, REG_RXIQK_11N, 0x01004800);
+ rtw_write32(rtwdev, REG_TXIQK_TONE_A_11N, 0x38008c1c);
+ rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x18008c1c);
+ rtw_write32(rtwdev, REG_TX_IQK_TONE_B, 0x38008c1c);
+ rtw_write32(rtwdev, REG_RX_IQK_TONE_B, 0x38008c1c);
+ rtw_write32(rtwdev, REG_TXIQK_PI_A_11N, 0x82110000);
+ rtw_write32(rtwdev, REG_RXIQK_PI_A_11N, 0x28160c1f);
+ rtw_write32(rtwdev, REG_TXIQK_PI_B, 0x82110000);
+ rtw_write32(rtwdev, REG_RXIQK_PI_B, 0x28110000);
+
+ /* LO calibration setting */
+ rtw_write32(rtwdev, REG_IQK_AGC_RSP_11N, 0x0046a8d1);
+
+ /* leave IQK mode */
+ rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, 0xffffff00, 0x000000);
+ /* modify RX IQK mode table */
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, 0x80000, 0x1);
+ /* RF_RCK_OS, RF_TXPA_G1, RF_TXPA_G2 */
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x30, RFREG_MASK, 0x30000);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x31, RFREG_MASK, 0x00007);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x32, RFREG_MASK, 0xf7d77);
+
+ /* PA, PAD setting */
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTDBG, 0x800, 0x1);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x55, 0x7f, 0x5);
+
+ rtw8703b_iqk_one_shot(rtwdev, false);
+ status |= rtw8703b_iqk_check_rx_failed(rtwdev);
+
+restore:
+ rtw8703b_iqk_txrx_path_post(rtwdev, backup);
+
+ return status;
+}
+
+static
+void rtw8703b_iqk_one_round(struct rtw_dev *rtwdev, s32 result[][IQK_NR], u8 t,
+ const struct rtw8723x_iqk_backup_regs *backup)
+{
+ u32 i;
+ u8 a_ok;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] IQ Calibration for 1T1R_S0/S1 for %d times\n", t);
+
+ rtw8723x_iqk_path_adda_on(rtwdev, ADDA_ON_VAL_8703B);
+ rtw8703b_iqk_config_mac(rtwdev, backup);
+ rtw_write32_mask(rtwdev, REG_CCK_ANT_SEL_11N, 0x0f000000, 0xf);
+ rtw_write32(rtwdev, REG_BB_RX_PATH_11N, 0x03a05600);
+ rtw_write32(rtwdev, REG_TRMUX_11N, 0x000800e4);
+ rtw_write32(rtwdev, REG_BB_PWR_SAV1_11N, 0x25204000);
+
+ for (i = 0; i < PATH_IQK_RETRY; i++) {
+ a_ok = rtw8703b_iqk_tx_path(rtwdev, backup);
+ if (a_ok == IQK_TX_OK) {
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] path A TX IQK success!\n");
+ result[t][IQK_S1_TX_X] =
+ rtw_read32_mask(rtwdev, REG_IQK_RES_TX,
+ BIT_MASK_RES_TX);
+ result[t][IQK_S1_TX_Y] =
+ rtw_read32_mask(rtwdev, REG_IQK_RES_TY,
+ BIT_MASK_RES_TY);
+ break;
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path A TX IQK fail!\n");
+ result[t][IQK_S1_TX_X] = 0x100;
+ result[t][IQK_S1_TX_Y] = 0x0;
+ }
+
+ for (i = 0; i < PATH_IQK_RETRY; i++) {
+ a_ok = rtw8703b_iqk_rx_path(rtwdev, backup);
+ if (a_ok == (IQK_TX_OK | IQK_RX_OK)) {
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] path A RX IQK success!\n");
+ result[t][IQK_S1_RX_X] =
+ rtw_read32_mask(rtwdev, REG_IQK_RES_RX,
+ BIT_MASK_RES_RX);
+ result[t][IQK_S1_RX_Y] =
+ rtw_read32_mask(rtwdev, REG_IQK_RES_RY,
+ BIT_MASK_RES_RY);
+ break;
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path A RX IQK fail!\n");
+ result[t][IQK_S1_RX_X] = 0x100;
+ result[t][IQK_S1_RX_Y] = 0x0;
+ }
+
+ if (a_ok == 0x0)
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path A IQK fail!\n");
+
+ rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, RST_IQK);
+ mdelay(1);
+}
+
+static
+void rtw8703b_iqk_fill_a_matrix(struct rtw_dev *rtwdev, const s32 result[])
+{
+ u32 tmp_rx_iqi = 0x40000100 & GENMASK(31, 16);
+ s32 tx1_a, tx1_a_ext;
+ s32 tx1_c, tx1_c_ext;
+ s32 oldval_1;
+ s32 x, y;
+
+ if (result[IQK_S1_TX_X] == 0)
+ return;
+
+ oldval_1 = rtw_read32_mask(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE,
+ BIT_MASK_TXIQ_ELM_D);
+
+ x = iqkxy_to_s32(result[IQK_S1_TX_X]);
+ tx1_a = iqk_mult(x, oldval_1, &tx1_a_ext);
+ rtw_write32_mask(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE,
+ BIT_MASK_TXIQ_ELM_A, tx1_a);
+ rtw_write32_mask(rtwdev, REG_OFDM_0_ECCA_THRESHOLD,
+ BIT_MASK_OFDM0_EXT_A, tx1_a_ext);
+
+ y = iqkxy_to_s32(result[IQK_S1_TX_Y]);
+ tx1_c = iqk_mult(y, oldval_1, &tx1_c_ext);
+ rtw_write32_mask(rtwdev, REG_TXIQK_MATRIXA_LSB2_11N, MASKH4BITS,
+ BIT_SET_TXIQ_ELM_C1(tx1_c));
+ rtw_write32_mask(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE,
+ BIT_MASK_TXIQ_ELM_C, BIT_SET_TXIQ_ELM_C2(tx1_c));
+ rtw_write32_mask(rtwdev, REG_OFDM_0_ECCA_THRESHOLD,
+ BIT_MASK_OFDM0_EXT_C, tx1_c_ext);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] X = 0x%x, TX1_A = 0x%x, oldval_1 0x%x\n",
+ x, tx1_a, oldval_1);
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] Y = 0x%x, TX1_C = 0x%x\n", y, tx1_c);
+
+ if (result[IQK_S1_RX_X] == 0)
+ return;
+
+ tmp_rx_iqi |= FIELD_PREP(BIT_MASK_RXIQ_S1_X, result[IQK_S1_RX_X]);
+ tmp_rx_iqi |= FIELD_PREP(BIT_MASK_RXIQ_S1_Y1, result[IQK_S1_RX_X]);
+ rtw_write32(rtwdev, REG_A_RXIQI, tmp_rx_iqi);
+ rtw_write32_mask(rtwdev, REG_RXIQK_MATRIX_LSB_11N, BIT_MASK_RXIQ_S1_Y2,
+ BIT_SET_RXIQ_S1_Y2(result[IQK_S1_RX_Y]));
+}
+
+static void rtw8703b_phy_calibration(struct rtw_dev *rtwdev)
+{
+ /* For some reason path A is called S1 and B S0 in shared
+ * rtw88 calibration data.
+ */
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw8723x_iqk_backup_regs backup;
+ u8 final_candidate = IQK_ROUND_INVALID;
+ s32 result[IQK_ROUND_SIZE][IQK_NR];
+ bool good;
+ u8 i, j;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] Start!\n");
+
+ memset(result, 0, sizeof(result));
+
+ rtw8723x_iqk_backup_path_ctrl(rtwdev, &backup);
+ rtw8723x_iqk_backup_lte_path_gnt(rtwdev, &backup);
+ rtw8723x_iqk_backup_regs(rtwdev, &backup);
+
+ for (i = IQK_ROUND_0; i <= IQK_ROUND_2; i++) {
+ rtw8723x_iqk_config_path_ctrl(rtwdev);
+ rtw8723x_iqk_config_lte_path_gnt(rtwdev, IQK_LTE_WRITE_VAL_8703B);
+
+ rtw8703b_iqk_one_round(rtwdev, result, i, &backup);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] back to BB mode, load original values!\n");
+ if (i > IQK_ROUND_0)
+ rtw8723x_iqk_restore_regs(rtwdev, &backup);
+ rtw8723x_iqk_restore_lte_path_gnt(rtwdev, &backup);
+ rtw8723x_iqk_restore_path_ctrl(rtwdev, &backup);
+
+ for (j = IQK_ROUND_0; j < i; j++) {
+ good = rtw8723x_iqk_similarity_cmp(rtwdev, result, j, i);
+
+ if (good) {
+ final_candidate = j;
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] cmp %d:%d final_candidate is %x\n",
+ j, i, final_candidate);
+ goto iqk_done;
+ }
+ }
+ }
+
+ if (final_candidate == IQK_ROUND_INVALID) {
+ s32 reg_tmp = 0;
+
+ for (i = 0; i < IQK_NR; i++)
+ reg_tmp += result[IQK_ROUND_HYBRID][i];
+
+ if (reg_tmp != 0) {
+ final_candidate = IQK_ROUND_HYBRID;
+ } else {
+ WARN(1, "IQK failed\n");
+ goto out;
+ }
+ }
+
+iqk_done:
+ /* only path A is calibrated in rtl8703b */
+ rtw8703b_iqk_fill_a_matrix(rtwdev, result[final_candidate]);
+
+ dm_info->iqk.result.s1_x = result[final_candidate][IQK_S1_TX_X];
+ dm_info->iqk.result.s1_y = result[final_candidate][IQK_S1_TX_Y];
+ dm_info->iqk.result.s0_x = result[final_candidate][IQK_S0_TX_X];
+ dm_info->iqk.result.s0_y = result[final_candidate][IQK_S0_TX_Y];
+ dm_info->iqk.done = true;
+
+out:
+ rtw_write32(rtwdev, REG_BB_SEL_BTG, backup.bb_sel_btg);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] final_candidate is %x\n",
+ final_candidate);
+
+ for (i = IQK_ROUND_0; i < IQK_ROUND_SIZE; i++)
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] Result %u: rege94_s1=%x rege9c_s1=%x regea4_s1=%x regeac_s1=%x rege94_s0=%x rege9c_s0=%x regea4_s0=%x regeac_s0=%x %s\n",
+ i,
+ result[i][0], result[i][1], result[i][2], result[i][3],
+ result[i][4], result[i][5], result[i][6], result[i][7],
+ final_candidate == i ? "(final candidate)" : "");
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] 0xc80 = 0x%x 0xc94 = 0x%x 0xc14 = 0x%x 0xca0 = 0x%x\n",
+ rtw_read32(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE),
+ rtw_read32(rtwdev, REG_TXIQK_MATRIXA_LSB2_11N),
+ rtw_read32(rtwdev, REG_A_RXIQI),
+ rtw_read32(rtwdev, REG_RXIQK_MATRIX_LSB_11N));
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] 0xcd0 = 0x%x 0xcd4 = 0x%x 0xcd8 = 0x%x\n",
+ rtw_read32(rtwdev, REG_TXIQ_AB_S0),
+ rtw_read32(rtwdev, REG_TXIQ_CD_S0),
+ rtw_read32(rtwdev, REG_RXIQ_AB_S0));
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] Finished.\n");
+}
+
+static void rtw8703b_set_iqk_matrix_by_result(struct rtw_dev *rtwdev,
+ u32 ofdm_swing, u8 rf_path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ s32 ele_A, ele_D, ele_C;
+ s32 ele_A_ext, ele_C_ext, ele_D_ext;
+ s32 iqk_result_x;
+ s32 iqk_result_y;
+ s32 value32;
+
+ switch (rf_path) {
+ default:
+ case RF_PATH_A:
+ iqk_result_x = dm_info->iqk.result.s1_x;
+ iqk_result_y = dm_info->iqk.result.s1_y;
+ break;
+ case RF_PATH_B:
+ iqk_result_x = dm_info->iqk.result.s0_x;
+ iqk_result_y = dm_info->iqk.result.s0_y;
+ break;
+ }
+
+ /* new element D */
+ ele_D = OFDM_SWING_D(ofdm_swing);
+ iqk_mult(iqk_result_x, ele_D, &ele_D_ext);
+ /* new element A */
+ iqk_result_x = iqkxy_to_s32(iqk_result_x);
+ ele_A = iqk_mult(iqk_result_x, ele_D, &ele_A_ext);
+ /* new element C */
+ iqk_result_y = iqkxy_to_s32(iqk_result_y);
+ ele_C = iqk_mult(iqk_result_y, ele_D, &ele_C_ext);
+
+ switch (rf_path) {
+ case RF_PATH_A:
+ default:
+ /* write new elements A, C, D, and element B is always 0 */
+ value32 = BIT_SET_TXIQ_ELM_ACD(ele_A, ele_C, ele_D);
+ rtw_write32(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE, value32);
+ value32 = BIT_SET_TXIQ_ELM_C1(ele_C);
+ rtw_write32_mask(rtwdev, REG_TXIQK_MATRIXA_LSB2_11N, MASKH4BITS,
+ value32);
+ value32 = rtw_read32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD);
+ value32 &= ~BIT_MASK_OFDM0_EXTS;
+ value32 |= BIT_SET_OFDM0_EXTS(ele_A_ext, ele_C_ext, ele_D_ext);
+ rtw_write32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD, value32);
+ break;
+
+ case RF_PATH_B:
+ /* write new elements A, C, D, and element B is always 0 */
+ value32 = BIT_SET_TXIQ_ELM_ACD(ele_A, ele_C, ele_D);
+ rtw_write32(rtwdev, REG_OFDM_0_XB_TX_IQ_IMBALANCE, value32);
+ value32 = BIT_SET_TXIQ_ELM_C1(ele_C);
+ rtw_write32_mask(rtwdev, REG_TXIQK_MATRIXB_LSB2_11N, MASKH4BITS,
+ value32);
+ value32 = rtw_read32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD);
+ value32 &= ~BIT_MASK_OFDM0_EXTS_B;
+ value32 |= BIT_SET_OFDM0_EXTS_B(ele_A_ext, ele_C_ext, ele_D_ext);
+ rtw_write32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD, value32);
+ break;
+ }
+}
+
+static void rtw8703b_set_iqk_matrix(struct rtw_dev *rtwdev, s8 ofdm_index,
+ u8 rf_path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ s32 value32;
+ u32 ofdm_swing;
+
+ ofdm_index = clamp_t(s8, ofdm_index, 0, RTW_OFDM_SWING_TABLE_SIZE - 1);
+
+ ofdm_swing = rtw8703b_ofdm_swing_table[ofdm_index];
+
+ if (dm_info->iqk.done) {
+ rtw8703b_set_iqk_matrix_by_result(rtwdev, ofdm_swing, rf_path);
+ return;
+ }
+
+ switch (rf_path) {
+ case RF_PATH_A:
+ default:
+ rtw_write32(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE, ofdm_swing);
+ rtw_write32_mask(rtwdev, REG_TXIQK_MATRIXA_LSB2_11N, MASKH4BITS,
+ 0x00);
+
+ value32 = rtw_read32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD);
+ value32 &= ~BIT_MASK_OFDM0_EXTS;
+ rtw_write32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD, value32);
+ break;
+
+ case RF_PATH_B:
+ rtw_write32(rtwdev, REG_OFDM_0_XB_TX_IQ_IMBALANCE, ofdm_swing);
+ rtw_write32_mask(rtwdev, REG_TXIQK_MATRIXB_LSB2_11N, MASKH4BITS,
+ 0x00);
+
+ value32 = rtw_read32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD);
+ value32 &= ~BIT_MASK_OFDM0_EXTS_B;
+ rtw_write32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD, value32);
+ break;
+ }
+}
+
+static void rtw8703b_pwrtrack_set_ofdm_pwr(struct rtw_dev *rtwdev, s8 swing_idx,
+ s8 txagc_idx)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ dm_info->txagc_remnant_ofdm = txagc_idx;
+
+ /* Only path A is calibrated for rtl8703b */
+ rtw8703b_set_iqk_matrix(rtwdev, swing_idx, RF_PATH_A);
+}
+
+static void rtw8703b_pwrtrack_set_cck_pwr(struct rtw_dev *rtwdev, s8 swing_idx,
+ s8 txagc_idx)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ dm_info->txagc_remnant_cck = txagc_idx;
+
+ swing_idx = clamp_t(s8, swing_idx, 0, RTW_CCK_SWING_TABLE_SIZE - 1);
+
+ BUILD_BUG_ON(ARRAY_SIZE(rtw8703b_cck_pwr_regs)
+ != ARRAY_SIZE(rtw8703b_cck_swing_table[0]));
+
+ for (int i = 0; i < ARRAY_SIZE(rtw8703b_cck_pwr_regs); i++)
+ rtw_write8(rtwdev, rtw8703b_cck_pwr_regs[i],
+ rtw8703b_cck_swing_table[swing_idx][i]);
+}
+
+static void rtw8703b_pwrtrack_set(struct rtw_dev *rtwdev, u8 path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 limit_ofdm;
+ u8 limit_cck = 21;
+ s8 final_ofdm_swing_index;
+ s8 final_cck_swing_index;
+
+ limit_ofdm = rtw8723x_pwrtrack_get_limit_ofdm(rtwdev);
+
+ final_ofdm_swing_index = dm_info->default_ofdm_index +
+ dm_info->delta_power_index[path];
+ final_cck_swing_index = dm_info->default_cck_index +
+ dm_info->delta_power_index[path];
+
+ if (final_ofdm_swing_index > limit_ofdm)
+ rtw8703b_pwrtrack_set_ofdm_pwr(rtwdev, limit_ofdm,
+ final_ofdm_swing_index - limit_ofdm);
+ else if (final_ofdm_swing_index < 0)
+ rtw8703b_pwrtrack_set_ofdm_pwr(rtwdev, 0,
+ final_ofdm_swing_index);
+ else
+ rtw8703b_pwrtrack_set_ofdm_pwr(rtwdev, final_ofdm_swing_index, 0);
+
+ if (final_cck_swing_index > limit_cck)
+ rtw8703b_pwrtrack_set_cck_pwr(rtwdev, limit_cck,
+ final_cck_swing_index - limit_cck);
+ else if (final_cck_swing_index < 0)
+ rtw8703b_pwrtrack_set_cck_pwr(rtwdev, 0,
+ final_cck_swing_index);
+ else
+ rtw8703b_pwrtrack_set_cck_pwr(rtwdev, final_cck_swing_index, 0);
+
+ rtw_phy_set_tx_power_level(rtwdev, hal->current_channel);
+}
+
+static void rtw8703b_phy_pwrtrack(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_swing_table swing_table;
+ u8 thermal_value, delta, path;
+ bool do_iqk = false;
+
+ rtw_phy_config_swing_table(rtwdev, &swing_table);
+
+ if (rtwdev->efuse.thermal_meter[0] == 0xff)
+ return;
+
+ thermal_value = rtw_read_rf(rtwdev, RF_PATH_A, RF_T_METER, 0xfc00);
+
+ rtw_phy_pwrtrack_avg(rtwdev, thermal_value, RF_PATH_A);
+
+ do_iqk = rtw_phy_pwrtrack_need_iqk(rtwdev);
+
+ if (do_iqk)
+ rtw8723x_lck(rtwdev);
+
+ if (dm_info->pwr_trk_init_trigger)
+ dm_info->pwr_trk_init_trigger = false;
+ else if (!rtw_phy_pwrtrack_thermal_changed(rtwdev, thermal_value,
+ RF_PATH_A))
+ goto iqk;
+
+ delta = rtw_phy_pwrtrack_get_delta(rtwdev, RF_PATH_A);
+
+ delta = min_t(u8, delta, RTW_PWR_TRK_TBL_SZ - 1);
+
+ for (path = 0; path < rtwdev->hal.rf_path_num; path++) {
+ s8 delta_cur, delta_last;
+
+ delta_last = dm_info->delta_power_index[path];
+ delta_cur = rtw_phy_pwrtrack_get_pwridx(rtwdev, &swing_table,
+ path, RF_PATH_A, delta);
+ if (delta_last == delta_cur)
+ continue;
+
+ dm_info->delta_power_index[path] = delta_cur;
+ rtw8703b_pwrtrack_set(rtwdev, path);
+ }
+
+ rtw8723x_pwrtrack_set_xtal(rtwdev, RF_PATH_A, delta);
+
+iqk:
+ if (do_iqk)
+ rtw8703b_phy_calibration(rtwdev);
+}
+
+static void rtw8703b_pwr_track(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ if (efuse->power_track_type != 0) {
+ rtw_warn(rtwdev, "unsupported power track type");
+ return;
+ }
+
+ if (!dm_info->pwr_trk_triggered) {
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_T_METER,
+ GENMASK(17, 16), 0x03);
+ dm_info->pwr_trk_triggered = true;
+ return;
+ }
+
+ rtw8703b_phy_pwrtrack(rtwdev);
+ dm_info->pwr_trk_triggered = false;
+}
+
+static void rtw8703b_coex_set_gnt_fix(struct rtw_dev *rtwdev)
+{
+}
+
+static void rtw8703b_coex_set_gnt_debug(struct rtw_dev *rtwdev)
+{
+}
+
+static void rtw8703b_coex_set_rfe_type(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_rfe *coex_rfe = &coex->rfe;
+
+ coex_rfe->rfe_module_type = rtwdev->efuse.rfe_option;
+ coex_rfe->ant_switch_polarity = 0;
+ coex_rfe->ant_switch_exist = false;
+ coex_rfe->ant_switch_with_bt = false;
+ coex_rfe->ant_switch_diversity = false;
+ coex_rfe->wlg_at_btg = true;
+
+ /* disable LTE coex on wifi side */
+ rtw_coex_write_indirect_reg(rtwdev, LTE_COEX_CTRL, BIT_LTE_COEX_EN, 0x0);
+ rtw_coex_write_indirect_reg(rtwdev, LTE_WL_TRX_CTRL, MASKLWORD, 0xffff);
+ rtw_coex_write_indirect_reg(rtwdev, LTE_BT_TRX_CTRL, MASKLWORD, 0xffff);
+}
+
+static void rtw8703b_coex_set_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr)
+{
+}
+
+static void rtw8703b_coex_set_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain)
+{
+}
+
+static const u8 rtw8703b_pwrtrk_2gb_n[] = {
+ 0, 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6,
+ 7, 7, 7, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11, 11, 11
+};
+
+static const u8 rtw8703b_pwrtrk_2gb_p[] = {
+ 0, 1, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 7, 7, 7,
+ 8, 8, 9, 9, 10, 10, 11, 11, 12, 13, 13, 14, 14, 15, 15
+};
+
+static const u8 rtw8703b_pwrtrk_2ga_n[] = {
+ 0, 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6,
+ 7, 7, 7, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11, 11, 11
+};
+
+static const u8 rtw8703b_pwrtrk_2ga_p[] = {
+ 0, 1, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 7, 7, 7,
+ 8, 8, 9, 9, 10, 10, 11, 11, 12, 13, 13, 14, 14, 15, 15
+};
+
+static const u8 rtw8703b_pwrtrk_2g_cck_b_n[] = {
+ 0, 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6,
+ 7, 7, 7, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11, 11, 11
+};
+
+static const u8 rtw8703b_pwrtrk_2g_cck_b_p[] = {
+ 0, 0, 1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 6, 6, 6,
+ 7, 7, 8, 8, 8, 9, 10, 10, 10, 11, 11, 12, 12, 13, 13
+};
+
+static const u8 rtw8703b_pwrtrk_2g_cck_a_n[] = {
+ 0, 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6,
+ 7, 7, 7, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11, 11, 11
+};
+
+static const u8 rtw8703b_pwrtrk_2g_cck_a_p[] = {
+ 0, 0, 1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 6, 6, 6,
+ 7, 7, 8, 8, 8, 9, 10, 10, 10, 11, 11, 12, 12, 13, 13
+};
+
+static const s8 rtw8703b_pwrtrk_xtal_n[] = {
+ 0, 0, 0, -1, -1, -1, -1, -2, -2, -2, -3, -3, -3, -3, -3,
+ -4, -2, -2, -1, -1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1
+};
+
+static const s8 rtw8703b_pwrtrk_xtal_p[] = {
+ 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 1, 0, -1, -1, -1,
+ -2, -3, -7, -9, -10, -11, -14, -16, -18, -20, -22, -24, -26, -28, -30
+};
+
+static const struct rtw_pwr_track_tbl rtw8703b_rtw_pwr_track_tbl = {
+ .pwrtrk_2gb_n = rtw8703b_pwrtrk_2gb_n,
+ .pwrtrk_2gb_p = rtw8703b_pwrtrk_2gb_p,
+ .pwrtrk_2ga_n = rtw8703b_pwrtrk_2ga_n,
+ .pwrtrk_2ga_p = rtw8703b_pwrtrk_2ga_p,
+ .pwrtrk_2g_cckb_n = rtw8703b_pwrtrk_2g_cck_b_n,
+ .pwrtrk_2g_cckb_p = rtw8703b_pwrtrk_2g_cck_b_p,
+ .pwrtrk_2g_ccka_n = rtw8703b_pwrtrk_2g_cck_a_n,
+ .pwrtrk_2g_ccka_p = rtw8703b_pwrtrk_2g_cck_a_p,
+ .pwrtrk_xtal_n = rtw8703b_pwrtrk_xtal_n,
+ .pwrtrk_xtal_p = rtw8703b_pwrtrk_xtal_p,
+};
+
+/* Shared-Antenna Coex Table */
+static const struct coex_table_para table_sant_8703b[] = {
+ {0xffffffff, 0xffffffff}, /* case-0 */
+ {0x55555555, 0x55555555},
+ {0x66555555, 0x66555555},
+ {0xaaaaaaaa, 0xaaaaaaaa},
+ {0x5a5a5a5a, 0x5a5a5a5a},
+ {0xfafafafa, 0xfafafafa}, /* case-5 */
+ {0x6a5a5555, 0xaaaaaaaa},
+ {0x6a5a56aa, 0x6a5a56aa},
+ {0x6a5a5a5a, 0x6a5a5a5a},
+ {0x66555555, 0x5a5a5a5a},
+ {0x66555555, 0x6a5a5a5a}, /* case-10 */
+ {0x66555555, 0x6a5a5aaa},
+ {0x66555555, 0x5a5a5aaa},
+ {0x66555555, 0x6aaa5aaa},
+ {0x66555555, 0xaaaa5aaa},
+ {0x66555555, 0xaaaaaaaa}, /* case-15 */
+ {0xffff55ff, 0xfafafafa},
+ {0xffff55ff, 0x6afa5afa},
+ {0xaaffffaa, 0xfafafafa},
+ {0xaa5555aa, 0x5a5a5a5a},
+ {0xaa5555aa, 0x6a5a5a5a}, /* case-20 */
+ {0xaa5555aa, 0xaaaaaaaa},
+ {0xffffffff, 0x5a5a5a5a},
+ {0xffffffff, 0x5a5a5a5a},
+ {0xffffffff, 0x55555555},
+ {0xffffffff, 0x5a5a5aaa}, /* case-25 */
+ {0x55555555, 0x5a5a5a5a},
+ {0x55555555, 0xaaaaaaaa},
+ {0x55555555, 0x6a5a6a5a},
+ {0x66556655, 0x66556655},
+ {0x66556aaa, 0x6a5a6aaa}, /* case-30 */
+ {0xffffffff, 0x5aaa5aaa},
+ {0x56555555, 0x5a5a5aaa},
+};
+
+/* Shared-Antenna TDMA */
+static const struct coex_tdma_para tdma_sant_8703b[] = {
+ { {0x00, 0x00, 0x00, 0x00, 0x00} }, /* case-0 */
+ { {0x61, 0x45, 0x03, 0x11, 0x11} }, /* case-1 */
+ { {0x61, 0x3a, 0x03, 0x11, 0x11} },
+ { {0x61, 0x30, 0x03, 0x11, 0x11} },
+ { {0x61, 0x20, 0x03, 0x11, 0x11} },
+ { {0x61, 0x10, 0x03, 0x11, 0x11} }, /* case-5 */
+ { {0x61, 0x45, 0x03, 0x11, 0x10} },
+ { {0x61, 0x3a, 0x03, 0x11, 0x10} },
+ { {0x61, 0x30, 0x03, 0x11, 0x10} },
+ { {0x61, 0x20, 0x03, 0x11, 0x10} },
+ { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-10 */
+ { {0x61, 0x08, 0x03, 0x11, 0x14} },
+ { {0x61, 0x08, 0x03, 0x10, 0x14} },
+ { {0x51, 0x08, 0x03, 0x10, 0x54} },
+ { {0x51, 0x08, 0x03, 0x10, 0x55} },
+ { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-15 */
+ { {0x51, 0x45, 0x03, 0x10, 0x50} },
+ { {0x51, 0x3a, 0x03, 0x10, 0x50} },
+ { {0x51, 0x30, 0x03, 0x10, 0x50} },
+ { {0x51, 0x20, 0x03, 0x10, 0x50} },
+ { {0x51, 0x10, 0x03, 0x10, 0x50} }, /* case-20 */
+ { {0x51, 0x4a, 0x03, 0x10, 0x50} },
+ { {0x51, 0x0c, 0x03, 0x10, 0x54} },
+ { {0x55, 0x08, 0x03, 0x10, 0x54} },
+ { {0x65, 0x10, 0x03, 0x11, 0x10} },
+ { {0x51, 0x10, 0x03, 0x10, 0x51} }, /* case-25 */
+ { {0x51, 0x08, 0x03, 0x10, 0x50} },
+ { {0x61, 0x08, 0x03, 0x11, 0x11} },
+};
+
+static struct rtw_chip_ops rtw8703b_ops = {
+ .mac_init = rtw8723x_mac_init,
+ .dump_fw_crash = NULL,
+ .shutdown = NULL,
+ .read_efuse = rtw8703b_read_efuse,
+ .phy_set_param = rtw8703b_phy_set_param,
+ .set_channel = rtw8703b_set_channel,
+ .query_rx_desc = rtw8703b_query_rx_desc,
+ .read_rf = rtw_phy_read_rf_sipi,
+ .write_rf = rtw_phy_write_rf_reg_sipi,
+ .set_tx_power_index = rtw8723x_set_tx_power_index,
+ .set_antenna = NULL,
+ .cfg_ldo25 = rtw8723x_cfg_ldo25,
+ .efuse_grant = rtw8723x_efuse_grant,
+ .false_alarm_statistics = rtw8723x_false_alarm_statistics,
+ .phy_calibration = rtw8703b_phy_calibration,
+ .dpk_track = NULL,
+ /* 8723d uses REG_CSRATIO to set dm_info.cck_pd_default, which
+ * is used in its cck_pd_set function. According to comments
+ * in the vendor driver code it doesn't exist in this chip
+ * generation, only 0xa0a ("ODM_CCK_PD_THRESH", which is only
+ * *written* to).
+ */
+ .cck_pd_set = NULL,
+ .pwr_track = rtw8703b_pwr_track,
+ .config_bfee = NULL,
+ .set_gid_table = NULL,
+ .cfg_csi_rate = NULL,
+ .adaptivity_init = NULL,
+ .adaptivity = NULL,
+ .cfo_init = NULL,
+ .cfo_track = NULL,
+ .config_tx_path = NULL,
+ .config_txrx_mode = NULL,
+ .fill_txdesc_checksum = rtw8723x_fill_txdesc_checksum,
+
+ /* for coex */
+ .coex_set_init = rtw8723x_coex_cfg_init,
+ .coex_set_ant_switch = NULL,
+ .coex_set_gnt_fix = rtw8703b_coex_set_gnt_fix,
+ .coex_set_gnt_debug = rtw8703b_coex_set_gnt_debug,
+ .coex_set_rfe_type = rtw8703b_coex_set_rfe_type,
+ .coex_set_wl_tx_power = rtw8703b_coex_set_wl_tx_power,
+ .coex_set_wl_rx_gain = rtw8703b_coex_set_wl_rx_gain,
+};
+
+const struct rtw_chip_info rtw8703b_hw_spec = {
+ .ops = &rtw8703b_ops,
+ .id = RTW_CHIP_TYPE_8703B,
+
+ .fw_name = "rtw88/rtw8703b_fw.bin",
+ .wlan_cpu = RTW_WCPU_11N,
+ .tx_pkt_desc_sz = 40,
+ .tx_buf_desc_sz = 16,
+ .rx_pkt_desc_sz = 24,
+ .rx_buf_desc_sz = 8,
+ .phy_efuse_size = 256,
+ .log_efuse_size = 512,
+ .ptct_efuse_size = 15,
+ .txff_size = 32768,
+ .rxff_size = 16384,
+ .rsvd_drv_pg_num = 8,
+ .band = RTW_BAND_2G,
+ .page_size = TX_PAGE_SIZE,
+ .csi_buf_pg_num = 0,
+ .dig_min = 0x20,
+ .txgi_factor = 1,
+ .is_pwr_by_rate_dec = true,
+ .rx_ldpc = false,
+ .tx_stbc = false,
+ .max_power_index = 0x3f,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+
+ .path_div_supported = false,
+ .ht_supported = true,
+ .vht_supported = false,
+ .lps_deep_mode_supported = 0,
+
+ .sys_func_en = 0xFD,
+ .pwr_on_seq = card_enable_flow_8703b,
+ .pwr_off_seq = card_disable_flow_8703b,
+ .rqpn_table = rqpn_table_8703b,
+ .prioq_addrs = &rtw8723x_common.prioq_addrs,
+ .page_table = page_table_8703b,
+ /* used only in pci.c, not needed for SDIO devices */
+ .intf_table = NULL,
+
+ .dig = rtw8723x_common.dig,
+ .dig_cck = rtw8723x_common.dig_cck,
+
+ .rf_sipi_addr = {0x840, 0x844},
+ .rf_sipi_read_addr = rtw8723x_common.rf_sipi_addr,
+ .fix_rf_phy_num = 2,
+ .ltecoex_addr = &rtw8723x_common.ltecoex_addr,
+
+ .mac_tbl = &rtw8703b_mac_tbl,
+ .agc_tbl = &rtw8703b_agc_tbl,
+ .bb_tbl = &rtw8703b_bb_tbl,
+ .rf_tbl = {&rtw8703b_rf_a_tbl},
+
+ .rfe_defs = rtw8703b_rfe_defs,
+ .rfe_defs_size = ARRAY_SIZE(rtw8703b_rfe_defs),
+
+ .iqk_threshold = 8,
+ .pwr_track_tbl = &rtw8703b_rtw_pwr_track_tbl,
+
+ /* WOWLAN firmware exists, but not implemented yet */
+ .wow_fw_name = "rtw88/rtw8703b_wow_fw.bin",
+ .wowlan_stub = NULL,
+ .max_scan_ie_len = IEEE80211_MAX_DATA_LEN,
+
+ /* Vendor driver has a time-based format, converted from
+ * 20180330
+ */
+ .coex_para_ver = 0x0133ed6a,
+ .bt_desired_ver = 0x1c,
+ .scbd_support = true,
+ .new_scbd10_def = true,
+ .ble_hid_profile_support = false,
+ .wl_mimo_ps_support = false,
+ .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
+ .bt_rssi_type = COEX_BTRSSI_RATIO,
+ .ant_isolation = 15,
+ .rssi_tolerance = 2,
+ .bt_rssi_step = bt_rssi_step_8703b,
+ .wl_rssi_step = wl_rssi_step_8703b,
+ /* sant -> shared antenna, nsant -> non-shared antenna
+ * Not sure if 8703b versions with non-shard antenna even exist.
+ */
+ .table_sant_num = ARRAY_SIZE(table_sant_8703b),
+ .table_sant = table_sant_8703b,
+ .table_nsant_num = 0,
+ .table_nsant = NULL,
+ .tdma_sant_num = ARRAY_SIZE(tdma_sant_8703b),
+ .tdma_sant = tdma_sant_8703b,
+ .tdma_nsant_num = 0,
+ .tdma_nsant = NULL,
+ .wl_rf_para_num = ARRAY_SIZE(rf_para_tx_8703b),
+ .wl_rf_para_tx = rf_para_tx_8703b,
+ .wl_rf_para_rx = rf_para_rx_8703b,
+ .bt_afh_span_bw20 = 0x20,
+ .bt_afh_span_bw40 = 0x30,
+ .afh_5g_num = ARRAY_SIZE(afh_5g_8703b),
+ .afh_5g = afh_5g_8703b,
+ /* REG_BTG_SEL doesn't seem to have a counterpart in the
+ * vendor driver. Mathematically it's REG_PAD_CTRL1 + 3.
+ *
+ * It is used in the cardemu_to_act power sequence by though
+ * (by address, 0x0067), comment: "0x67[0] = 0 to disable
+ * BT_GPS_SEL pins" That seems to fit.
+ */
+ .btg_reg = NULL,
+ /* These registers are used to read (and print) from if
+ * CONFIG_RTW88_DEBUGFS is enabled.
+ */
+ .coex_info_hw_regs_num = 0,
+ .coex_info_hw_regs = NULL,
+};
+EXPORT_SYMBOL(rtw8703b_hw_spec);
+
+MODULE_FIRMWARE("rtw88/rtw8703b_fw.bin");
+MODULE_FIRMWARE("rtw88/rtw8703b_wow_fw.bin");
+
+MODULE_AUTHOR("Fiona Klute <fiona.klute@gmx.de>");
+MODULE_DESCRIPTION("Realtek 802.11n wireless 8703b driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b.h b/drivers/net/wireless/realtek/rtw88/rtw8703b.h
new file mode 100644
index 000000000000..3e2da2e6739d
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8703b.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright Fiona Klute <fiona.klute@gmx.de> */
+
+#ifndef __RTW8703B_H__
+#define __RTW8703B_H__
+
+#include "rtw8723x.h"
+
+extern const struct rtw_chip_info rtw8703b_hw_spec;
+
+/* phy status parsing */
+#define VGA_BITS GENMASK(4, 0)
+#define LNA_L_BITS GENMASK(7, 5)
+#define LNA_H_BIT BIT(7)
+/* masks for assembling LNA index from high and low bits */
+#define BIT_LNA_H_MASK BIT(3)
+#define BIT_LNA_L_MASK GENMASK(2, 0)
+
+struct phy_rx_agc_info {
+#ifdef __LITTLE_ENDIAN
+ u8 gain: 7;
+ u8 trsw: 1;
+#else
+ u8 trsw: 1;
+ u8 gain: 7;
+#endif
+} __packed;
+
+/* This struct is called phy_status_rpt_8192cd in the vendor driver,
+ * there might be potential to share it with drivers for other chips
+ * of the same generation.
+ */
+struct phy_status_8703b {
+ struct phy_rx_agc_info path_agc[2];
+ u8 ch_corr[2];
+ u8 cck_sig_qual_ofdm_pwdb_all;
+ /* for CCK: bits 0:4: VGA index, bits 5:7: LNA index (low) */
+ u8 cck_agc_rpt_ofdm_cfosho_a;
+ /* for CCK: bit 7 is high bit of LNA index if long report type */
+ u8 cck_rpt_b_ofdm_cfosho_b;
+ u8 reserved_1;
+ u8 noise_power_db_msb;
+ s8 path_cfotail[2];
+ u8 pcts_mask[2];
+ s8 stream_rxevm[2];
+ u8 path_rxsnr[2];
+ u8 noise_power_db_lsb;
+ u8 reserved_2[3];
+ u8 stream_csi[2];
+ u8 stream_target_csi[2];
+ s8 sig_evm;
+ u8 reserved_3;
+
+#ifdef __LITTLE_ENDIAN
+ u8 antsel_rx_keep_2: 1;
+ u8 sgi_en: 1;
+ u8 rxsc: 2;
+ u8 idle_long: 1;
+ u8 r_ant_train_en: 1;
+ u8 ant_sel_b: 1;
+ u8 ant_sel: 1;
+#else /* __BIG_ENDIAN */
+ u8 ant_sel: 1;
+ u8 ant_sel_b: 1;
+ u8 r_ant_train_en: 1;
+ u8 idle_long: 1;
+ u8 rxsc: 2;
+ u8 sgi_en: 1;
+ u8 antsel_rx_keep_2: 1;
+#endif
+} __packed;
+
+/* Baseband registers */
+#define REG_BB_PWR_SAV5_11N 0x0818
+/* BIT(11) should be 1 for 8703B *and* 8723D, which means LNA uses 4
+ * bit for CCK rates in report, not 3. Vendor driver logs a warning if
+ * it's 0, but handles the case.
+ *
+ * Purpose of other parts of this register is unknown, 8723cs driver
+ * code indicates some other chips use certain bits for antenna
+ * diversity.
+ */
+#define REG_BB_AMP 0x0950
+#define BIT_MASK_RX_LNA (BIT(11))
+
+/* 0xaXX: 40MHz channel settings */
+#define REG_CCK_TXSF2 0x0a24 /* CCK TX filter 2 */
+#define REG_CCK_DBG 0x0a28 /* debug port */
+#define REG_OFDM0_A_TX_AFE 0x0c84
+#define REG_TXIQK_MATRIXB_LSB2_11N 0x0c9c
+#define REG_OFDM0_TX_PSD_NOISE 0x0ce4 /* TX pseudo noise weighting */
+#define REG_IQK_RDY 0x0e90 /* is != 0 when IQK is done */
+
+/* RF registers */
+#define RF_RCK1 0x1E
+
+#define AGG_BURST_NUM 3
+#define AGG_BURST_SIZE 0 /* 1K */
+#define BIT_MASK_AGG_BURST_NUM (GENMASK(3, 2))
+#define BIT_MASK_AGG_BURST_SIZE (GENMASK(5, 4))
+
+#endif /* __RTW8703B_H__ */
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b_tables.c b/drivers/net/wireless/realtek/rtw88/rtw8703b_tables.c
new file mode 100644
index 000000000000..81020fd907aa
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8703b_tables.c
@@ -0,0 +1,902 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright Fiona Klute <fiona.klute@gmx.de> */
+
+#include "main.h"
+#include "phy.h"
+#include "rtw8703b_tables.h"
+
+static const struct rtw_phy_pg_cfg_pair rtw8703b_bb_pg[] = {
+ { 0, 0, 0, 0x00000e08, 0x0000ff00, 0x00003200, },
+ { 0, 0, 0, 0x0000086c, 0xffffff00, 0x32323200, },
+ { 0, 0, 0, 0x00000e00, 0xffffffff, 0x34363636, },
+ { 0, 0, 0, 0x00000e04, 0xffffffff, 0x28303234, },
+ { 0, 0, 0, 0x00000e10, 0xffffffff, 0x30343434, },
+ { 0, 0, 0, 0x00000e14, 0xffffffff, 0x26262830, },
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8703b_bb_pg);
+
+/* Regd: FCC -> 0, ETSI -> 2, MKK -> 1
+ * Band: 2.4G -> 0, 5G -> 1
+ * Bandwidth (bw): 20M -> 0, 40M -> 1, 80M -> 2, 160M -> 3
+ * Rate Section (rs): CCK -> 0, OFDM -> 1, HT -> 2, VHT -> 3
+ */
+static const struct rtw_txpwr_lmt_cfg_pair rtw8703b_txpwr_lmt[] = {
+ {0, 0, 0, 0, 1, 30},
+ {2, 0, 0, 0, 1, 26},
+ {1, 0, 0, 0, 1, 32},
+ {0, 0, 0, 0, 2, 30},
+ {2, 0, 0, 0, 2, 26},
+ {1, 0, 0, 0, 2, 32},
+ {0, 0, 0, 0, 3, 30},
+ {2, 0, 0, 0, 3, 26},
+ {1, 0, 0, 0, 3, 32},
+ {0, 0, 0, 0, 4, 30},
+ {2, 0, 0, 0, 4, 26},
+ {1, 0, 0, 0, 4, 32},
+ {0, 0, 0, 0, 5, 30},
+ {2, 0, 0, 0, 5, 26},
+ {1, 0, 0, 0, 5, 32},
+ {0, 0, 0, 0, 6, 30},
+ {2, 0, 0, 0, 6, 26},
+ {1, 0, 0, 0, 6, 32},
+ {0, 0, 0, 0, 7, 30},
+ {2, 0, 0, 0, 7, 26},
+ {1, 0, 0, 0, 7, 32},
+ {0, 0, 0, 0, 8, 30},
+ {2, 0, 0, 0, 8, 26},
+ {1, 0, 0, 0, 8, 32},
+ {0, 0, 0, 0, 9, 30},
+ {2, 0, 0, 0, 9, 26},
+ {1, 0, 0, 0, 9, 32},
+ {0, 0, 0, 0, 10, 30},
+ {2, 0, 0, 0, 10, 26},
+ {1, 0, 0, 0, 10, 32},
+ {0, 0, 0, 0, 11, 30},
+ {2, 0, 0, 0, 11, 26},
+ {1, 0, 0, 0, 11, 32},
+ {0, 0, 0, 0, 12, 63},
+ {2, 0, 0, 0, 12, 26},
+ {1, 0, 0, 0, 12, 32},
+ {0, 0, 0, 0, 13, 63},
+ {2, 0, 0, 0, 13, 26},
+ {1, 0, 0, 0, 13, 32},
+ {0, 0, 0, 0, 14, 63},
+ {2, 0, 0, 0, 14, 63},
+ {1, 0, 0, 0, 14, 32},
+ {0, 0, 0, 1, 1, 28},
+ {2, 0, 0, 1, 1, 28},
+ {1, 0, 0, 1, 1, 28},
+ {0, 0, 0, 1, 2, 28},
+ {2, 0, 0, 1, 2, 32},
+ {1, 0, 0, 1, 2, 32},
+ {0, 0, 0, 1, 3, 32},
+ {2, 0, 0, 1, 3, 32},
+ {1, 0, 0, 1, 3, 32},
+ {0, 0, 0, 1, 4, 32},
+ {2, 0, 0, 1, 4, 32},
+ {1, 0, 0, 1, 4, 32},
+ {0, 0, 0, 1, 5, 32},
+ {2, 0, 0, 1, 5, 32},
+ {1, 0, 0, 1, 5, 32},
+ {0, 0, 0, 1, 6, 32},
+ {2, 0, 0, 1, 6, 32},
+ {1, 0, 0, 1, 6, 32},
+ {0, 0, 0, 1, 7, 32},
+ {2, 0, 0, 1, 7, 32},
+ {1, 0, 0, 1, 7, 32},
+ {0, 0, 0, 1, 8, 32},
+ {2, 0, 0, 1, 8, 32},
+ {1, 0, 0, 1, 8, 32},
+ {0, 0, 0, 1, 9, 32},
+ {2, 0, 0, 1, 9, 32},
+ {1, 0, 0, 1, 9, 32},
+ {0, 0, 0, 1, 10, 28},
+ {2, 0, 0, 1, 10, 32},
+ {1, 0, 0, 1, 10, 32},
+ {0, 0, 0, 1, 11, 28},
+ {2, 0, 0, 1, 11, 32},
+ {1, 0, 0, 1, 11, 32},
+ {0, 0, 0, 1, 12, 63},
+ {2, 0, 0, 1, 12, 32},
+ {1, 0, 0, 1, 12, 32},
+ {0, 0, 0, 1, 13, 63},
+ {2, 0, 0, 1, 13, 28},
+ {1, 0, 0, 1, 13, 28},
+ {0, 0, 0, 1, 14, 63},
+ {2, 0, 0, 1, 14, 63},
+ {1, 0, 0, 1, 14, 63},
+ {0, 0, 0, 2, 1, 26},
+ {2, 0, 0, 2, 1, 26},
+ {1, 0, 0, 2, 1, 28},
+ {0, 0, 0, 2, 2, 26},
+ {2, 0, 0, 2, 2, 32},
+ {1, 0, 0, 2, 2, 32},
+ {0, 0, 0, 2, 3, 32},
+ {2, 0, 0, 2, 3, 32},
+ {1, 0, 0, 2, 3, 32},
+ {0, 0, 0, 2, 4, 32},
+ {2, 0, 0, 2, 4, 32},
+ {1, 0, 0, 2, 4, 32},
+ {0, 0, 0, 2, 5, 32},
+ {2, 0, 0, 2, 5, 32},
+ {1, 0, 0, 2, 5, 32},
+ {0, 0, 0, 2, 6, 32},
+ {2, 0, 0, 2, 6, 32},
+ {1, 0, 0, 2, 6, 32},
+ {0, 0, 0, 2, 7, 32},
+ {2, 0, 0, 2, 7, 32},
+ {1, 0, 0, 2, 7, 32},
+ {0, 0, 0, 2, 8, 32},
+ {2, 0, 0, 2, 8, 32},
+ {1, 0, 0, 2, 8, 32},
+ {0, 0, 0, 2, 9, 32},
+ {2, 0, 0, 2, 9, 32},
+ {1, 0, 0, 2, 9, 32},
+ {0, 0, 0, 2, 10, 26},
+ {2, 0, 0, 2, 10, 32},
+ {1, 0, 0, 2, 10, 32},
+ {0, 0, 0, 2, 11, 26},
+ {2, 0, 0, 2, 11, 32},
+ {1, 0, 0, 2, 11, 32},
+ {0, 0, 0, 2, 12, 63},
+ {2, 0, 0, 2, 12, 32},
+ {1, 0, 0, 2, 12, 32},
+ {0, 0, 0, 2, 13, 63},
+ {2, 0, 0, 2, 13, 26},
+ {1, 0, 0, 2, 13, 28},
+ {0, 0, 0, 2, 14, 63},
+ {2, 0, 0, 2, 14, 63},
+ {1, 0, 0, 2, 14, 63},
+ {0, 0, 1, 2, 1, 63},
+ {2, 0, 1, 2, 1, 63},
+ {1, 0, 1, 2, 1, 63},
+ {0, 0, 1, 2, 2, 63},
+ {2, 0, 1, 2, 2, 63},
+ {1, 0, 1, 2, 2, 63},
+ {0, 0, 1, 2, 3, 26},
+ {2, 0, 1, 2, 3, 26},
+ {1, 0, 1, 2, 3, 26},
+ {0, 0, 1, 2, 4, 26},
+ {2, 0, 1, 2, 4, 28},
+ {1, 0, 1, 2, 4, 26},
+ {0, 0, 1, 2, 5, 28},
+ {2, 0, 1, 2, 5, 28},
+ {1, 0, 1, 2, 5, 26},
+ {0, 0, 1, 2, 6, 28},
+ {2, 0, 1, 2, 6, 28},
+ {1, 0, 1, 2, 6, 26},
+ {0, 0, 1, 2, 7, 28},
+ {2, 0, 1, 2, 7, 28},
+ {1, 0, 1, 2, 7, 26},
+ {0, 0, 1, 2, 8, 26},
+ {2, 0, 1, 2, 8, 28},
+ {1, 0, 1, 2, 8, 26},
+ {0, 0, 1, 2, 9, 26},
+ {2, 0, 1, 2, 9, 28},
+ {1, 0, 1, 2, 9, 26},
+ {0, 0, 1, 2, 10, 26},
+ {2, 0, 1, 2, 10, 28},
+ {1, 0, 1, 2, 10, 26},
+ {0, 0, 1, 2, 11, 26},
+ {2, 0, 1, 2, 11, 26},
+ {1, 0, 1, 2, 11, 26},
+ {0, 0, 1, 2, 12, 63},
+ {2, 0, 1, 2, 12, 26},
+ {1, 0, 1, 2, 12, 26},
+ {0, 0, 1, 2, 13, 63},
+ {2, 0, 1, 2, 13, 26},
+ {1, 0, 1, 2, 13, 26},
+ {0, 0, 1, 2, 14, 63},
+ {2, 0, 1, 2, 14, 63},
+ {1, 0, 1, 2, 14, 63},
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8703b_txpwr_lmt);
+
+static const u32 rtw8703b_mac[] = {
+ 0x02F, 0x00000030,
+ 0x035, 0x00000000,
+ 0x067, 0x00000002,
+ 0x092, 0x00000080,
+ 0x421, 0x0000000F,
+ 0x428, 0x0000000A,
+ 0x429, 0x00000010,
+ 0x430, 0x00000000,
+ 0x431, 0x00000000,
+ 0x432, 0x00000000,
+ 0x433, 0x00000001,
+ 0x434, 0x00000002,
+ 0x435, 0x00000003,
+ 0x436, 0x00000005,
+ 0x437, 0x00000007,
+ 0x438, 0x00000000,
+ 0x439, 0x00000000,
+ 0x43A, 0x00000000,
+ 0x43B, 0x00000001,
+ 0x43C, 0x00000002,
+ 0x43D, 0x00000003,
+ 0x43E, 0x00000005,
+ 0x43F, 0x00000007,
+ 0x440, 0x0000005D,
+ 0x441, 0x00000001,
+ 0x442, 0x00000000,
+ 0x444, 0x00000010,
+ 0x445, 0x00000000,
+ 0x446, 0x00000000,
+ 0x447, 0x00000000,
+ 0x448, 0x00000000,
+ 0x449, 0x000000F0,
+ 0x44A, 0x0000000F,
+ 0x44B, 0x0000003E,
+ 0x44C, 0x00000010,
+ 0x44D, 0x00000000,
+ 0x44E, 0x00000000,
+ 0x44F, 0x00000000,
+ 0x450, 0x00000000,
+ 0x451, 0x000000F0,
+ 0x452, 0x0000000F,
+ 0x453, 0x00000000,
+ 0x456, 0x0000005E,
+ 0x460, 0x00000066,
+ 0x461, 0x00000066,
+ 0x4C8, 0x000000FF,
+ 0x4C9, 0x00000008,
+ 0x4CC, 0x000000FF,
+ 0x4CD, 0x000000FF,
+ 0x4CE, 0x00000001,
+ 0x500, 0x00000026,
+ 0x501, 0x000000A2,
+ 0x502, 0x0000002F,
+ 0x503, 0x00000000,
+ 0x504, 0x00000028,
+ 0x505, 0x000000A3,
+ 0x506, 0x0000005E,
+ 0x507, 0x00000000,
+ 0x508, 0x0000002B,
+ 0x509, 0x000000A4,
+ 0x50A, 0x0000005E,
+ 0x50B, 0x00000000,
+ 0x50C, 0x0000004F,
+ 0x50D, 0x000000A4,
+ 0x50E, 0x00000000,
+ 0x50F, 0x00000000,
+ 0x512, 0x0000001C,
+ 0x514, 0x0000000A,
+ 0x516, 0x0000000A,
+ 0x525, 0x0000004F,
+ 0x550, 0x00000010,
+ 0x551, 0x00000010,
+ 0x559, 0x00000002,
+ 0x55C, 0x00000028,
+ 0x55D, 0x000000FF,
+ 0x605, 0x00000030,
+ 0x608, 0x0000000E,
+ 0x609, 0x0000002A,
+ 0x620, 0x000000FF,
+ 0x621, 0x000000FF,
+ 0x622, 0x000000FF,
+ 0x623, 0x000000FF,
+ 0x624, 0x000000FF,
+ 0x625, 0x000000FF,
+ 0x626, 0x000000FF,
+ 0x627, 0x000000FF,
+ 0x638, 0x00000028,
+ 0x63C, 0x0000000A,
+ 0x63D, 0x0000000A,
+ 0x63E, 0x0000000C,
+ 0x63F, 0x0000000C,
+ 0x640, 0x00000040,
+ 0x642, 0x00000040,
+ 0x643, 0x00000000,
+ 0x652, 0x000000C8,
+ 0x66A, 0x000000B0,
+ 0x66E, 0x00000005,
+ 0x700, 0x00000021,
+ 0x701, 0x00000043,
+ 0x702, 0x00000065,
+ 0x703, 0x00000087,
+ 0x708, 0x00000021,
+ 0x709, 0x00000043,
+ 0x70A, 0x00000065,
+ 0x70B, 0x00000087,
+ 0x765, 0x00000018,
+ 0x76E, 0x00000004,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8703b_mac, rtw_phy_cfg_mac);
+
+static const u32 rtw8703b_agc[] = {
+ 0xC78, 0xFC000101,
+ 0xC78, 0xFB010101,
+ 0xC78, 0xFA020101,
+ 0xC78, 0xF9030101,
+ 0xC78, 0xF8040101,
+ 0xC78, 0xF7050101,
+ 0xC78, 0xF6060101,
+ 0xC78, 0xF5070101,
+ 0xC78, 0xF4080101,
+ 0xC78, 0xF3090101,
+ 0xC78, 0xF20A0101,
+ 0xC78, 0xF10B0101,
+ 0xC78, 0xF00C0101,
+ 0xC78, 0xEF0D0101,
+ 0xC78, 0xEE0E0101,
+ 0xC78, 0xED0F0101,
+ 0xC78, 0xEC100101,
+ 0xC78, 0xEB110101,
+ 0xC78, 0xEA120101,
+ 0xC78, 0xE9130101,
+ 0xC78, 0xE8140101,
+ 0xC78, 0xE7150101,
+ 0xC78, 0xE6160101,
+ 0xC78, 0xE5170101,
+ 0xC78, 0xE4180101,
+ 0xC78, 0xE3190101,
+ 0xC78, 0x661A0101,
+ 0xC78, 0x651B0101,
+ 0xC78, 0x641C0101,
+ 0xC78, 0x631D0101,
+ 0xC78, 0x071E0101,
+ 0xC78, 0x061F0101,
+ 0xC78, 0x05200101,
+ 0xC78, 0x04210101,
+ 0xC78, 0x03220101,
+ 0xC78, 0xE8230001,
+ 0xC78, 0xE7240001,
+ 0xC78, 0xE6250001,
+ 0xC78, 0xE5260001,
+ 0xC78, 0xE4270001,
+ 0xC78, 0x89280001,
+ 0xC78, 0x88290001,
+ 0xC78, 0x872A0001,
+ 0xC78, 0x862B0001,
+ 0xC78, 0x852C0001,
+ 0xC78, 0x482D0001,
+ 0xC78, 0x472E0001,
+ 0xC78, 0x462F0001,
+ 0xC78, 0x45300001,
+ 0xC78, 0x44310001,
+ 0xC78, 0x07320001,
+ 0xC78, 0x06330001,
+ 0xC78, 0x05340001,
+ 0xC78, 0x04350001,
+ 0xC78, 0x03360001,
+ 0xC78, 0x02370001,
+ 0xC78, 0x01380001,
+ 0xC78, 0x00390001,
+ 0xC78, 0x003A0001,
+ 0xC78, 0x003B0001,
+ 0xC78, 0x003C0001,
+ 0xC78, 0x003D0001,
+ 0xC78, 0x003E0001,
+ 0xC78, 0x003F0001,
+ 0xC78, 0x7F002001,
+ 0xC78, 0x7F012001,
+ 0xC78, 0x7F022001,
+ 0xC78, 0x7F032001,
+ 0xC78, 0x7F042001,
+ 0xC78, 0x7F052001,
+ 0xC78, 0x7F062001,
+ 0xC78, 0x7F072001,
+ 0xC78, 0x7F082001,
+ 0xC78, 0x7F092001,
+ 0xC78, 0x7F0A2001,
+ 0xC78, 0x7F0B2001,
+ 0xC78, 0x7F0C2001,
+ 0xC78, 0x7F0D2001,
+ 0xC78, 0x7F0E2001,
+ 0xC78, 0x7F0F2001,
+ 0xC78, 0x7F102001,
+ 0xC78, 0x7F112001,
+ 0xC78, 0x7E122001,
+ 0xC78, 0x7D132001,
+ 0xC78, 0x7C142001,
+ 0xC78, 0x7B152001,
+ 0xC78, 0x7A162001,
+ 0xC78, 0x79172001,
+ 0xC78, 0x78182001,
+ 0xC78, 0x77192001,
+ 0xC78, 0x761A2001,
+ 0xC78, 0x751B2001,
+ 0xC78, 0x741C2001,
+ 0xC78, 0x731D2001,
+ 0xC78, 0x721E2001,
+ 0xC78, 0x711F2001,
+ 0xC78, 0x70202001,
+ 0xC78, 0x6F212001,
+ 0xC78, 0x6E222001,
+ 0xC78, 0x6D232001,
+ 0xC78, 0x6C242001,
+ 0xC78, 0x6B252001,
+ 0xC78, 0x6A262001,
+ 0xC78, 0x69272001,
+ 0xC78, 0x68282001,
+ 0xC78, 0x67292001,
+ 0xC78, 0x662A2001,
+ 0xC78, 0x652B2001,
+ 0xC78, 0x642C2001,
+ 0xC78, 0x632D2001,
+ 0xC78, 0x622E2001,
+ 0xC78, 0x612F2001,
+ 0xC78, 0x60302001,
+ 0xC78, 0x42312001,
+ 0xC78, 0x41322001,
+ 0xC78, 0x40332001,
+ 0xC78, 0x23342001,
+ 0xC78, 0x22352001,
+ 0xC78, 0x21362001,
+ 0xC78, 0x20372001,
+ 0xC78, 0x00382001,
+ 0xC78, 0x02392001,
+ 0xC78, 0x013A2001,
+ 0xC78, 0x003B2001,
+ 0xC78, 0x003C2001,
+ 0xC78, 0x003D2001,
+ 0xC78, 0x003E2001,
+ 0xC78, 0x003F2001,
+ 0xC78, 0x7F003101,
+ 0xC78, 0x7F013101,
+ 0xC78, 0x7F023101,
+ 0xC78, 0x7F033101,
+ 0xC78, 0x7F043101,
+ 0xC78, 0x7F053101,
+ 0xC78, 0x7F063101,
+ 0xC78, 0x7E073101,
+ 0xC78, 0x7D083101,
+ 0xC78, 0x7C093101,
+ 0xC78, 0x7B0A3101,
+ 0xC78, 0x7A0B3101,
+ 0xC78, 0x790C3101,
+ 0xC78, 0x780D3101,
+ 0xC78, 0x770E3101,
+ 0xC78, 0x760F3101,
+ 0xC78, 0x75103101,
+ 0xC78, 0x74113101,
+ 0xC78, 0x73123101,
+ 0xC78, 0x72133101,
+ 0xC78, 0x71143101,
+ 0xC78, 0x70153101,
+ 0xC78, 0x6F163101,
+ 0xC78, 0x69173101,
+ 0xC78, 0x68183101,
+ 0xC78, 0x67193101,
+ 0xC78, 0x661A3101,
+ 0xC78, 0x651B3101,
+ 0xC78, 0x641C3101,
+ 0xC78, 0x631D3101,
+ 0xC78, 0x621E3101,
+ 0xC78, 0x611F3101,
+ 0xC78, 0x60203101,
+ 0xC78, 0x42213101,
+ 0xC78, 0x41223101,
+ 0xC78, 0x40233101,
+ 0xC78, 0x22243101,
+ 0xC78, 0x21253101,
+ 0xC78, 0x20263101,
+ 0xC78, 0x00273101,
+ 0xC78, 0x00283101,
+ 0xC78, 0x00293101,
+ 0xC78, 0x002A3101,
+ 0xC78, 0x002B3101,
+ 0xC78, 0x002C3101,
+ 0xC78, 0x002D3101,
+ 0xC78, 0x002E3101,
+ 0xC78, 0x002F3101,
+ 0xC78, 0x00303101,
+ 0xC78, 0x00313101,
+ 0xC78, 0x00323101,
+ 0xC78, 0x00333101,
+ 0xC78, 0x00343101,
+ 0xC78, 0x00353101,
+ 0xC78, 0x00363101,
+ 0xC78, 0x00373101,
+ 0xC78, 0x00383101,
+ 0xC78, 0x00393101,
+ 0xC78, 0x003A3101,
+ 0xC78, 0x003B3101,
+ 0xC78, 0x003C3101,
+ 0xC78, 0x003D3101,
+ 0xC78, 0x003E3101,
+ 0xC78, 0x003F3101,
+ 0xC78, 0xFA403101,
+ 0xC78, 0xF9413101,
+ 0xC78, 0xF8423101,
+ 0xC78, 0xF7433101,
+ 0xC78, 0xF6443101,
+ 0xC78, 0xF5453101,
+ 0xC78, 0xF4463101,
+ 0xC78, 0xF3473101,
+ 0xC78, 0xF2483101,
+ 0xC78, 0xE1493101,
+ 0xC78, 0xE04A3101,
+ 0xC78, 0xEF4B3101,
+ 0xC78, 0xEE4C3101,
+ 0xC78, 0xED4D3101,
+ 0xC78, 0xEC4E3101,
+ 0xC78, 0xEB4F3101,
+ 0xC78, 0xEA503101,
+ 0xC78, 0xE9513101,
+ 0xC78, 0xE8523101,
+ 0xC78, 0xE7533101,
+ 0xC78, 0xE6543101,
+ 0xC78, 0xE5553101,
+ 0xC78, 0xE4563101,
+ 0xC78, 0xE3573101,
+ 0xC78, 0xE2583101,
+ 0xC78, 0xE1593101,
+ 0xC78, 0xE05A3101,
+ 0xC78, 0xC25B3101,
+ 0xC78, 0xC15C3101,
+ 0xC78, 0xC05D3101,
+ 0xC78, 0x825E3101,
+ 0xC78, 0x815F3101,
+ 0xC78, 0x80603101,
+ 0xC78, 0x80613101,
+ 0xC78, 0x80623101,
+ 0xC78, 0x80633101,
+ 0xC78, 0x80643101,
+ 0xC78, 0x80653101,
+ 0xC78, 0x80663101,
+ 0xC78, 0x80673101,
+ 0xC78, 0x80683101,
+ 0xC78, 0x80693101,
+ 0xC78, 0x806A3101,
+ 0xC78, 0x806B3101,
+ 0xC78, 0x806C3101,
+ 0xC78, 0x806D3101,
+ 0xC78, 0x806E3101,
+ 0xC78, 0x806F3101,
+ 0xC78, 0x80703101,
+ 0xC78, 0x80713101,
+ 0xC78, 0x80723101,
+ 0xC78, 0x80733101,
+ 0xC78, 0x80743101,
+ 0xC78, 0x80753101,
+ 0xC78, 0x80763101,
+ 0xC78, 0x80773101,
+ 0xC78, 0x80783101,
+ 0xC78, 0x80793101,
+ 0xC78, 0x807A3101,
+ 0xC78, 0x807B3101,
+ 0xC78, 0x807C3101,
+ 0xC78, 0x807D3101,
+ 0xC78, 0x807E3101,
+ 0xC78, 0x807F3101,
+ 0xC78, 0xFF402001,
+ 0xC78, 0xFF412001,
+ 0xC78, 0xFF422001,
+ 0xC78, 0xFF432001,
+ 0xC78, 0xFF442001,
+ 0xC78, 0xFF452001,
+ 0xC78, 0xFF462001,
+ 0xC78, 0xFF472001,
+ 0xC78, 0xFF482001,
+ 0xC78, 0xFF492001,
+ 0xC78, 0xFF4A2001,
+ 0xC78, 0xFF4B2001,
+ 0xC78, 0xFF4C2001,
+ 0xC78, 0xFE4D2001,
+ 0xC78, 0xFD4E2001,
+ 0xC78, 0xFC4F2001,
+ 0xC78, 0xFB502001,
+ 0xC78, 0xFA512001,
+ 0xC78, 0xF9522001,
+ 0xC78, 0xF8532001,
+ 0xC78, 0xF7542001,
+ 0xC78, 0xF6552001,
+ 0xC78, 0xF5562001,
+ 0xC78, 0xF4572001,
+ 0xC78, 0xF3582001,
+ 0xC78, 0xF2592001,
+ 0xC78, 0xF15A2001,
+ 0xC78, 0xF05B2001,
+ 0xC78, 0xEF5C2001,
+ 0xC78, 0xEE5D2001,
+ 0xC78, 0xED5E2001,
+ 0xC78, 0xEC5F2001,
+ 0xC78, 0xEB602001,
+ 0xC78, 0xEA612001,
+ 0xC78, 0xE9622001,
+ 0xC78, 0xE8632001,
+ 0xC78, 0xE7642001,
+ 0xC78, 0xE6652001,
+ 0xC78, 0xE5662001,
+ 0xC78, 0xE4672001,
+ 0xC78, 0xE3682001,
+ 0xC78, 0xC5692001,
+ 0xC78, 0xC46A2001,
+ 0xC78, 0xC36B2001,
+ 0xC78, 0xA46C2001,
+ 0xC78, 0x846D2001,
+ 0xC78, 0x836E2001,
+ 0xC78, 0x826F2001,
+ 0xC78, 0x81702001,
+ 0xC78, 0x80712001,
+ 0xC78, 0x80722001,
+ 0xC78, 0x80732001,
+ 0xC78, 0x80742001,
+ 0xC78, 0x80752001,
+ 0xC78, 0x80762001,
+ 0xC78, 0x80772001,
+ 0xC78, 0x80782001,
+ 0xC78, 0x80792001,
+ 0xC78, 0x807A2001,
+ 0xC78, 0x807B2001,
+ 0xC78, 0x807C2001,
+ 0xC78, 0x807D2001,
+ 0xC78, 0x807E2001,
+ 0xC78, 0x807F2001,
+ 0xC50, 0x69553422,
+ 0xC50, 0x69553420,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8703b_agc, rtw_phy_cfg_agc);
+
+/* init values for BB registers */
+static const u32 rtw8703b_bb[] = {
+ 0x800, 0x83045700,
+ 0x804, 0x00000003,
+ 0x808, 0x0000FC00,
+ 0x80C, 0x0000000A,
+ 0x810, 0x10001331,
+ 0x814, 0x020C3D10,
+ 0x818, 0x02200385,
+ 0x81C, 0x00000000,
+ 0x820, 0x01000100,
+ 0x824, 0x00390204,
+ 0x828, 0x00000000,
+ 0x82C, 0x00000000,
+ 0x830, 0x00000000,
+ 0x834, 0x00000000,
+ 0x838, 0x00000000,
+ 0x83C, 0x00000000,
+ 0x840, 0x00010000,
+ 0x844, 0x00000000,
+ 0x848, 0x00000000,
+ 0x84C, 0x00000000,
+ 0x850, 0x00000000,
+ 0x854, 0x00000000,
+ 0x858, 0x569A11A9,
+ 0x85C, 0x01000014,
+ 0x860, 0x66F60110,
+ 0x864, 0x061F0649,
+ 0x868, 0x00000000,
+ 0x86C, 0x27272700,
+ 0x870, 0x07000760,
+ 0x874, 0x25004000,
+ 0x878, 0x00000808,
+ 0x87C, 0x004F0201,
+ 0x880, 0xB0000B1E,
+ 0x884, 0x00000001,
+ 0x888, 0x00000000,
+ 0x88C, 0xCCC000C0,
+ 0x890, 0x00000800,
+ 0x894, 0xFFFFFFFE,
+ 0x898, 0x40302010,
+ 0x89C, 0x00706050,
+ 0x900, 0x00000000,
+ 0x904, 0x00000023,
+ 0x908, 0x00000000,
+ 0x90C, 0x81121111,
+ 0x910, 0x00000002,
+ 0x914, 0x00000201,
+ 0x948, 0x99000000,
+ 0x94C, 0x00000010,
+ 0x950, 0x20003800,
+ 0x954, 0x4A880000,
+ 0x958, 0x4BC5D87A,
+ 0x95C, 0x04EB9B79,
+ 0xA00, 0x00D047C8,
+ 0xA04, 0x80FF800C,
+ 0xA08, 0x8C838300,
+ 0xA0C, 0x2E7F120F,
+ 0xA10, 0x9500BB78,
+ 0xA14, 0x1114D028,
+ 0xA18, 0x00881117,
+ 0xA1C, 0x89140F00,
+ 0xA20, 0xD1D80000,
+ 0xA24, 0x5A7DA0BD,
+ 0xA28, 0x0000223B,
+ 0xA2C, 0x00D30000,
+ 0xA70, 0x101FBF00,
+ 0xA74, 0x00000007,
+ 0xA78, 0x00008900,
+ 0xA7C, 0x225B0606,
+ 0xA80, 0x2180FA74,
+ 0xA84, 0x00120000,
+ 0xA88, 0x040C0000,
+ 0xA8C, 0x12345678,
+ 0xA90, 0xABCDEF00,
+ 0xA94, 0x001B1B89,
+ 0xA98, 0x05100000,
+ 0xA9C, 0x3F000000,
+ 0xAA0, 0x00000000,
+ 0xB2C, 0x00000000,
+ 0xC00, 0x48071D40,
+ 0xC04, 0x03A05611,
+ 0xC08, 0x000000E4,
+ 0xC0C, 0x6C6C6C6C,
+ 0xC10, 0x18800000,
+ 0xC14, 0x40000100,
+ 0xC18, 0x08800000,
+ 0xC1C, 0x40000100,
+ 0xC20, 0x00000000,
+ 0xC24, 0x00000000,
+ 0xC28, 0x00000000,
+ 0xC2C, 0x00000000,
+ 0xC30, 0x69E9AC4B,
+ 0xC34, 0x31000040,
+ 0xC38, 0x21688080,
+ 0xC3C, 0x000016CC,
+ 0xC40, 0x1F78403F,
+ 0xC44, 0x00010036,
+ 0xC48, 0xEC020107,
+ 0xC4C, 0x007F037F,
+ 0xC50, 0x69553420,
+ 0xC54, 0x43BC0094,
+ 0xC58, 0x00015967,
+ 0xC5C, 0x18250492,
+ 0xC60, 0x00000000,
+ 0xC64, 0x7112848B,
+ 0xC68, 0x47C07BFF,
+ 0xC6C, 0x00000036,
+ 0xC70, 0x2C7F000D,
+ 0xC74, 0x020600DB,
+ 0xC78, 0x0000001F,
+ 0xC7C, 0x00B91612,
+ 0xC80, 0x390000E4,
+ 0xC84, 0x19F60000,
+ 0xC88, 0x40000100,
+ 0xC8C, 0x20200000,
+ 0xC90, 0x00091521,
+ 0xC94, 0x00000000,
+ 0xC98, 0x00121820,
+ 0xC9C, 0x00007F7F,
+ 0xCA0, 0x00000000,
+ 0xCA4, 0x000300A0,
+ 0xCA8, 0x00000000,
+ 0xCAC, 0x00000000,
+ 0xCB0, 0x00000000,
+ 0xCB4, 0x00000000,
+ 0xCB8, 0x00000000,
+ 0xCBC, 0x28000000,
+ 0xCC0, 0x00000000,
+ 0xCC4, 0x00000000,
+ 0xCC8, 0x00000000,
+ 0xCCC, 0x00000000,
+ 0xCD0, 0x00000000,
+ 0xCD4, 0x00000000,
+ 0xCD8, 0x64B22427,
+ 0xCDC, 0x00766932,
+ 0xCE0, 0x00222222,
+ 0xCE4, 0x10000000,
+ 0xCE8, 0x37644302,
+ 0xCEC, 0x2F97D40C,
+ 0xD00, 0x00030740,
+ 0xD04, 0x40020401,
+ 0xD08, 0x0000907F,
+ 0xD0C, 0x20010201,
+ 0xD10, 0xA0633333,
+ 0xD14, 0x3333BC53,
+ 0xD18, 0x7A8F5B6F,
+ 0xD2C, 0xCB979975,
+ 0xD30, 0x00000000,
+ 0xD34, 0x80608000,
+ 0xD38, 0x98000000,
+ 0xD3C, 0x40127353,
+ 0xD40, 0x00000000,
+ 0xD44, 0x00000000,
+ 0xD48, 0x00000000,
+ 0xD4C, 0x00000000,
+ 0xD50, 0x6437140A,
+ 0xD54, 0x00000000,
+ 0xD58, 0x00000282,
+ 0xD5C, 0x30032064,
+ 0xD60, 0x4653DE68,
+ 0xD64, 0x04518A3C,
+ 0xD68, 0x00002101,
+ 0xE00, 0x2D2D2D2D,
+ 0xE04, 0x2D2D2D2D,
+ 0xE08, 0x0390272D,
+ 0xE10, 0x2D2D2D2D,
+ 0xE14, 0x2D2D2D2D,
+ 0xE18, 0x2D2D2D2D,
+ 0xE1C, 0x2D2D2D2D,
+ 0xE28, 0x00000000,
+ 0xE30, 0x1000DC1F,
+ 0xE34, 0x10008C1F,
+ 0xE38, 0x02140102,
+ 0xE3C, 0x681604C2,
+ 0xE40, 0x01007C00,
+ 0xE44, 0x01004800,
+ 0xE48, 0xFB000000,
+ 0xE4C, 0x000028D1,
+ 0xE50, 0x1000DC1F,
+ 0xE54, 0x10008C1F,
+ 0xE58, 0x02140102,
+ 0xE5C, 0x28160D05,
+ 0xE60, 0x00000048,
+ 0xE68, 0x001B25A4,
+ 0xE6C, 0x01C00014,
+ 0xE70, 0x01C00014,
+ 0xE74, 0x02000014,
+ 0xE78, 0x02000014,
+ 0xE7C, 0x02000014,
+ 0xE80, 0x02000014,
+ 0xE84, 0x01C00014,
+ 0xE88, 0x02000014,
+ 0xE8C, 0x01C00014,
+ 0xED0, 0x01C00014,
+ 0xED4, 0x01C00014,
+ 0xED8, 0x01C00014,
+ 0xEDC, 0x00000014,
+ 0xEE0, 0x00000014,
+ 0xEE8, 0x21555448,
+ 0xEEC, 0x03C00014,
+ 0xF14, 0x00000003,
+ 0xF4C, 0x00000000,
+ 0xF00, 0x00000300,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8703b_bb, rtw_phy_cfg_bb);
+
+static const u32 rtw8703b_rf_a[] = {
+ 0x018, 0x00008C01,
+ 0x0B5, 0x0008C050,
+ 0x0B1, 0x00054258,
+ 0x0B2, 0x00054C00,
+ 0x030, 0x00018000,
+ 0x031, 0x00000027,
+ 0x032, 0x000A7F07,
+ 0x030, 0x00020000,
+ 0x031, 0x00000027,
+ 0x032, 0x000E7D87,
+ 0x01C, 0x000F8635,
+ 0x0EF, 0x00080000,
+ 0x030, 0x00008000,
+ 0x031, 0x00000004,
+ 0x032, 0x00006105,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000400,
+ 0x041, 0x0000BD54,
+ 0x041, 0x00003DD4,
+ 0x041, 0x0000FDD4,
+ 0x0EF, 0x00000000,
+ 0x0DF, 0x00000600,
+ 0x050, 0x0000C6DB,
+ 0x051, 0x00004505,
+ 0x052, 0x0000E31D,
+ 0x053, 0x00040579,
+ 0x054, 0x00000000,
+ 0x055, 0x0008206E,
+ 0x056, 0x00040000,
+ 0x0EF, 0x00000100,
+ 0x034, 0x0000ADD7,
+ 0x034, 0x00009DD4,
+ 0x034, 0x00008DD1,
+ 0x034, 0x00007DCE,
+ 0x034, 0x00006DCB,
+ 0x034, 0x00005CCE,
+ 0x034, 0x000048CD,
+ 0x034, 0x000034CC,
+ 0x034, 0x0000244F,
+ 0x034, 0x0000144C,
+ 0x034, 0x0000004E,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00002000,
+ 0x03B, 0x0003801F,
+ 0x03B, 0x00030002,
+ 0x03B, 0x00028001,
+ 0x03B, 0x00020000,
+ 0x03B, 0x00018003,
+ 0x03B, 0x00010002,
+ 0x03B, 0x00008001,
+ 0x03B, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x082, 0x000C0000,
+ 0x083, 0x000AF025,
+ 0x01E, 0x00000C08,
+};
+
+RTW_DECL_TABLE_RF_RADIO(rtw8703b_rf_a, A);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b_tables.h b/drivers/net/wireless/realtek/rtw88/rtw8703b_tables.h
new file mode 100644
index 000000000000..98bd399bddbf
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8703b_tables.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright Fiona Klute <fiona.klute@gmx.de> */
+
+#ifndef __RTW8703B_TABLES_H__
+#define __RTW8703B_TABLES_H__
+
+extern const struct rtw_table rtw8703b_bb_pg_tbl;
+extern const struct rtw_table rtw8703b_txpwr_lmt_tbl;
+extern const struct rtw_table rtw8703b_mac_tbl;
+extern const struct rtw_table rtw8703b_agc_tbl;
+extern const struct rtw_table rtw8703b_bb_tbl;
+extern const struct rtw_table rtw8703b_rf_a_tbl;
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723cs.c b/drivers/net/wireless/realtek/rtw88/rtw8723cs.c
new file mode 100644
index 000000000000..8d38d36be8c0
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723cs.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright Fiona Klute <fiona.klute@gmx.de> */
+
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/module.h>
+#include "main.h"
+#include "rtw8703b.h"
+#include "sdio.h"
+
+static const struct sdio_device_id rtw_8723cs_id_table[] = {
+ {
+ SDIO_DEVICE(SDIO_VENDOR_ID_REALTEK,
+ SDIO_DEVICE_ID_REALTEK_RTW8723CS),
+ .driver_data = (kernel_ulong_t)&rtw8703b_hw_spec,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(sdio, rtw_8723cs_id_table);
+
+static struct sdio_driver rtw_8723cs_driver = {
+ .name = "rtw8723cs",
+ .id_table = rtw_8723cs_id_table,
+ .probe = rtw_sdio_probe,
+ .remove = rtw_sdio_remove,
+ .drv = {
+ .pm = &rtw_sdio_pm_ops,
+ .shutdown = rtw_sdio_shutdown
+ }};
+module_sdio_driver(rtw_8723cs_driver);
+
+MODULE_AUTHOR("Fiona Klute <fiona.klute@gmx.de>");
+MODULE_DESCRIPTION("Realtek 802.11n wireless 8723cs driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
index c575476a0020..f8df4c84d39f 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
@@ -9,36 +9,13 @@
#include "tx.h"
#include "rx.h"
#include "phy.h"
+#include "rtw8723x.h"
#include "rtw8723d.h"
#include "rtw8723d_table.h"
#include "mac.h"
#include "reg.h"
#include "debug.h"
-static const struct rtw_hw_reg rtw8723d_txagc[] = {
- [DESC_RATE1M] = { .addr = 0xe08, .mask = 0x0000ff00 },
- [DESC_RATE2M] = { .addr = 0x86c, .mask = 0x0000ff00 },
- [DESC_RATE5_5M] = { .addr = 0x86c, .mask = 0x00ff0000 },
- [DESC_RATE11M] = { .addr = 0x86c, .mask = 0xff000000 },
- [DESC_RATE6M] = { .addr = 0xe00, .mask = 0x000000ff },
- [DESC_RATE9M] = { .addr = 0xe00, .mask = 0x0000ff00 },
- [DESC_RATE12M] = { .addr = 0xe00, .mask = 0x00ff0000 },
- [DESC_RATE18M] = { .addr = 0xe00, .mask = 0xff000000 },
- [DESC_RATE24M] = { .addr = 0xe04, .mask = 0x000000ff },
- [DESC_RATE36M] = { .addr = 0xe04, .mask = 0x0000ff00 },
- [DESC_RATE48M] = { .addr = 0xe04, .mask = 0x00ff0000 },
- [DESC_RATE54M] = { .addr = 0xe04, .mask = 0xff000000 },
- [DESC_RATEMCS0] = { .addr = 0xe10, .mask = 0x000000ff },
- [DESC_RATEMCS1] = { .addr = 0xe10, .mask = 0x0000ff00 },
- [DESC_RATEMCS2] = { .addr = 0xe10, .mask = 0x00ff0000 },
- [DESC_RATEMCS3] = { .addr = 0xe10, .mask = 0xff000000 },
- [DESC_RATEMCS4] = { .addr = 0xe14, .mask = 0x000000ff },
- [DESC_RATEMCS5] = { .addr = 0xe14, .mask = 0x0000ff00 },
- [DESC_RATEMCS6] = { .addr = 0xe14, .mask = 0x00ff0000 },
- [DESC_RATEMCS7] = { .addr = 0xe14, .mask = 0xff000000 },
-};
-
-#define WLAN_TXQ_RPT_EN 0x1F
#define WLAN_SLOT_TIME 0x09
#define WLAN_RL_VAL 0x3030
#define WLAN_BAR_VAL 0x0201ffff
@@ -65,34 +42,6 @@ static const struct rtw_hw_reg rtw8723d_txagc[] = {
#define WLAN_LTR_CTRL1 0xCB004010
#define WLAN_LTR_CTRL2 0x01233425
-static void rtw8723d_lck(struct rtw_dev *rtwdev)
-{
- u32 lc_cal;
- u8 val_ctx, rf_val;
- int ret;
-
- val_ctx = rtw_read8(rtwdev, REG_CTX);
- if ((val_ctx & BIT_MASK_CTX_TYPE) != 0)
- rtw_write8(rtwdev, REG_CTX, val_ctx & ~BIT_MASK_CTX_TYPE);
- else
- rtw_write8(rtwdev, REG_TXPAUSE, 0xFF);
- lc_cal = rtw_read_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK);
-
- rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, lc_cal | BIT_LCK);
-
- ret = read_poll_timeout(rtw_read_rf, rf_val, rf_val != 0x1,
- 10000, 1000000, false,
- rtwdev, RF_PATH_A, RF_CFGCH, BIT_LCK);
- if (ret)
- rtw_warn(rtwdev, "failed to poll LCK status bit\n");
-
- rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, lc_cal);
- if ((val_ctx & BIT_MASK_CTX_TYPE) != 0)
- rtw_write8(rtwdev, REG_CTX, val_ctx);
- else
- rtw_write8(rtwdev, REG_TXPAUSE, 0x00);
-}
-
static const u32 rtw8723d_ofdm_swing_table[] = {
0x0b40002d, 0x0c000030, 0x0cc00033, 0x0d800036, 0x0e400039, 0x0f00003c,
0x10000040, 0x11000044, 0x12000048, 0x1300004c, 0x14400051, 0x15800056,
@@ -196,7 +145,7 @@ static void rtw8723d_phy_set_param(struct rtw_dev *rtwdev)
rtw_write16_set(rtwdev, REG_TXDMA_OFFSET_CHK, BIT_DROP_DATA_EN);
- rtw8723d_lck(rtwdev);
+ rtw8723x_lck(rtwdev);
rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x50);
rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x20);
@@ -204,67 +153,6 @@ static void rtw8723d_phy_set_param(struct rtw_dev *rtwdev)
rtw8723d_pwrtrack_init(rtwdev);
}
-static void rtw8723de_efuse_parsing(struct rtw_efuse *efuse,
- struct rtw8723d_efuse *map)
-{
- ether_addr_copy(efuse->addr, map->e.mac_addr);
-}
-
-static void rtw8723du_efuse_parsing(struct rtw_efuse *efuse,
- struct rtw8723d_efuse *map)
-{
- ether_addr_copy(efuse->addr, map->u.mac_addr);
-}
-
-static void rtw8723ds_efuse_parsing(struct rtw_efuse *efuse,
- struct rtw8723d_efuse *map)
-{
- ether_addr_copy(efuse->addr, map->s.mac_addr);
-}
-
-static int rtw8723d_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
-{
- struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw8723d_efuse *map;
- int i;
-
- map = (struct rtw8723d_efuse *)log_map;
-
- efuse->rfe_option = 0;
- efuse->rf_board_option = map->rf_board_option;
- efuse->crystal_cap = map->xtal_k;
- efuse->pa_type_2g = map->pa_type;
- efuse->lna_type_2g = map->lna_type_2g[0];
- efuse->channel_plan = map->channel_plan;
- efuse->country_code[0] = map->country_code[0];
- efuse->country_code[1] = map->country_code[1];
- efuse->bt_setting = map->rf_bt_setting;
- efuse->regd = map->rf_board_option & 0x7;
- efuse->thermal_meter[0] = map->thermal_meter;
- efuse->thermal_meter_k = map->thermal_meter;
- efuse->afe = map->afe;
-
- for (i = 0; i < 4; i++)
- efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
-
- switch (rtw_hci_type(rtwdev)) {
- case RTW_HCI_TYPE_PCIE:
- rtw8723de_efuse_parsing(efuse, map);
- break;
- case RTW_HCI_TYPE_USB:
- rtw8723du_efuse_parsing(efuse, map);
- break;
- case RTW_HCI_TYPE_SDIO:
- rtw8723ds_efuse_parsing(efuse, map);
- break;
- default:
- /* unsupported now */
- return -ENOTSUPP;
- }
-
- return 0;
-}
-
static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
struct rtw_rx_pkt_stat *pkt_stat)
{
@@ -540,297 +428,11 @@ static void rtw8723d_set_channel(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw8723d_set_channel_bb(rtwdev, channel, bw, primary_chan_idx);
}
-#define BIT_CFENDFORM BIT(9)
-#define BIT_WMAC_TCR_ERR0 BIT(12)
-#define BIT_WMAC_TCR_ERR1 BIT(13)
-#define BIT_TCR_CFG (BIT_CFENDFORM | BIT_WMAC_TCR_ERR0 | \
- BIT_WMAC_TCR_ERR1)
-#define WLAN_RX_FILTER0 0xFFFF
-#define WLAN_RX_FILTER1 0x400
-#define WLAN_RX_FILTER2 0xFFFF
-#define WLAN_RCR_CFG 0x700060CE
-
-static int rtw8723d_mac_init(struct rtw_dev *rtwdev)
-{
- rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 1, WLAN_TXQ_RPT_EN);
- rtw_write32(rtwdev, REG_TCR, BIT_TCR_CFG);
-
- rtw_write16(rtwdev, REG_RXFLTMAP0, WLAN_RX_FILTER0);
- rtw_write16(rtwdev, REG_RXFLTMAP1, WLAN_RX_FILTER1);
- rtw_write16(rtwdev, REG_RXFLTMAP2, WLAN_RX_FILTER2);
- rtw_write32(rtwdev, REG_RCR, WLAN_RCR_CFG);
-
- rtw_write32(rtwdev, REG_INT_MIG, 0);
- rtw_write32(rtwdev, REG_MCUTST_1, 0x0);
-
- rtw_write8(rtwdev, REG_MISC_CTRL, BIT_DIS_SECOND_CCA);
- rtw_write8(rtwdev, REG_2ND_CCA_CTRL, 0);
-
- return 0;
-}
-
static void rtw8723d_shutdown(struct rtw_dev *rtwdev)
{
rtw_write16_set(rtwdev, REG_HCI_OPT_CTRL, BIT_USB_SUS_DIS);
}
-static void rtw8723d_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
-{
- u8 ldo_pwr;
-
- ldo_pwr = rtw_read8(rtwdev, REG_LDO_EFUSE_CTRL + 3);
- if (enable) {
- ldo_pwr &= ~BIT_MASK_LDO25_VOLTAGE;
- ldo_pwr |= (BIT_LDO25_VOLTAGE_V25 << 4) | BIT_LDO25_EN;
- } else {
- ldo_pwr &= ~BIT_LDO25_EN;
- }
- rtw_write8(rtwdev, REG_LDO_EFUSE_CTRL + 3, ldo_pwr);
-}
-
-static void
-rtw8723d_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
-{
- struct rtw_hal *hal = &rtwdev->hal;
- const struct rtw_hw_reg *txagc;
- u8 rate, pwr_index;
- int j;
-
- for (j = 0; j < rtw_rate_size[rs]; j++) {
- rate = rtw_rate_section[rs][j];
- pwr_index = hal->tx_pwr_tbl[path][rate];
-
- if (rate >= ARRAY_SIZE(rtw8723d_txagc)) {
- rtw_warn(rtwdev, "rate 0x%x isn't supported\n", rate);
- continue;
- }
- txagc = &rtw8723d_txagc[rate];
- if (!txagc->addr) {
- rtw_warn(rtwdev, "rate 0x%x isn't defined\n", rate);
- continue;
- }
-
- rtw_write32_mask(rtwdev, txagc->addr, txagc->mask, pwr_index);
- }
-}
-
-static void rtw8723d_set_tx_power_index(struct rtw_dev *rtwdev)
-{
- struct rtw_hal *hal = &rtwdev->hal;
- int rs, path;
-
- for (path = 0; path < hal->rf_path_num; path++) {
- for (rs = 0; rs <= RTW_RATE_SECTION_HT_1S; rs++)
- rtw8723d_set_tx_power_index_by_rate(rtwdev, path, rs);
- }
-}
-
-static void rtw8723d_efuse_grant(struct rtw_dev *rtwdev, bool on)
-{
- if (on) {
- rtw_write8(rtwdev, REG_EFUSE_ACCESS, EFUSE_ACCESS_ON);
-
- rtw_write16_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_ELDR);
- rtw_write16_set(rtwdev, REG_SYS_CLKR, BIT_LOADER_CLK_EN | BIT_ANA8M);
- } else {
- rtw_write8(rtwdev, REG_EFUSE_ACCESS, EFUSE_ACCESS_OFF);
- }
-}
-
-static void rtw8723d_false_alarm_statistics(struct rtw_dev *rtwdev)
-{
- struct rtw_dm_info *dm_info = &rtwdev->dm_info;
- u32 cck_fa_cnt;
- u32 ofdm_fa_cnt;
- u32 crc32_cnt;
- u32 val32;
-
- /* hold counter */
- rtw_write32_mask(rtwdev, REG_OFDM_FA_HOLDC_11N, BIT_MASK_OFDM_FA_KEEP, 1);
- rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_KEEP1, 1);
- rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_CNT_KEEP, 1);
- rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_FA_KEEP, 1);
-
- cck_fa_cnt = rtw_read32_mask(rtwdev, REG_CCK_FA_LSB_11N, MASKBYTE0);
- cck_fa_cnt += rtw_read32_mask(rtwdev, REG_CCK_FA_MSB_11N, MASKBYTE3) << 8;
-
- val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE1_11N);
- ofdm_fa_cnt = u32_get_bits(val32, BIT_MASK_OFDM_FF_CNT);
- ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_SF_CNT);
- val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE2_11N);
- dm_info->ofdm_cca_cnt = u32_get_bits(val32, BIT_MASK_OFDM_CCA_CNT);
- ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_PF_CNT);
- val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE3_11N);
- ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_RI_CNT);
- ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_CRC_CNT);
- val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE4_11N);
- ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_MNS_CNT);
-
- dm_info->cck_fa_cnt = cck_fa_cnt;
- dm_info->ofdm_fa_cnt = ofdm_fa_cnt;
- dm_info->total_fa_cnt = cck_fa_cnt + ofdm_fa_cnt;
-
- dm_info->cck_err_cnt = rtw_read32(rtwdev, REG_IGI_C_11N);
- dm_info->cck_ok_cnt = rtw_read32(rtwdev, REG_IGI_D_11N);
- crc32_cnt = rtw_read32(rtwdev, REG_OFDM_CRC32_CNT_11N);
- dm_info->ofdm_err_cnt = u32_get_bits(crc32_cnt, BIT_MASK_OFDM_LCRC_ERR);
- dm_info->ofdm_ok_cnt = u32_get_bits(crc32_cnt, BIT_MASK_OFDM_LCRC_OK);
- crc32_cnt = rtw_read32(rtwdev, REG_HT_CRC32_CNT_11N);
- dm_info->ht_err_cnt = u32_get_bits(crc32_cnt, BIT_MASK_HT_CRC_ERR);
- dm_info->ht_ok_cnt = u32_get_bits(crc32_cnt, BIT_MASK_HT_CRC_OK);
- dm_info->vht_err_cnt = 0;
- dm_info->vht_ok_cnt = 0;
-
- val32 = rtw_read32(rtwdev, REG_CCK_CCA_CNT_11N);
- dm_info->cck_cca_cnt = (u32_get_bits(val32, BIT_MASK_CCK_FA_MSB) << 8) |
- u32_get_bits(val32, BIT_MASK_CCK_FA_LSB);
- dm_info->total_cca_cnt = dm_info->cck_cca_cnt + dm_info->ofdm_cca_cnt;
-
- /* reset counter */
- rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTC_11N, BIT_MASK_OFDM_FA_RST, 1);
- rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTC_11N, BIT_MASK_OFDM_FA_RST, 0);
- rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_RST1, 1);
- rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_RST1, 0);
- rtw_write32_mask(rtwdev, REG_OFDM_FA_HOLDC_11N, BIT_MASK_OFDM_FA_KEEP, 0);
- rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_KEEP1, 0);
- rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_CNT_KPEN, 0);
- rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_CNT_KPEN, 2);
- rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_FA_KPEN, 0);
- rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_FA_KPEN, 2);
- rtw_write32_mask(rtwdev, REG_PAGE_F_RST_11N, BIT_MASK_F_RST_ALL, 1);
- rtw_write32_mask(rtwdev, REG_PAGE_F_RST_11N, BIT_MASK_F_RST_ALL, 0);
-}
-
-static const u32 iqk_adda_regs[] = {
- 0x85c, 0xe6c, 0xe70, 0xe74, 0xe78, 0xe7c, 0xe80, 0xe84, 0xe88, 0xe8c,
- 0xed0, 0xed4, 0xed8, 0xedc, 0xee0, 0xeec
-};
-
-static const u32 iqk_mac8_regs[] = {0x522, 0x550, 0x551};
-static const u32 iqk_mac32_regs[] = {0x40};
-
-static const u32 iqk_bb_regs[] = {
- 0xc04, 0xc08, 0x874, 0xb68, 0xb6c, 0x870, 0x860, 0x864, 0xa04
-};
-
-#define IQK_ADDA_REG_NUM ARRAY_SIZE(iqk_adda_regs)
-#define IQK_MAC8_REG_NUM ARRAY_SIZE(iqk_mac8_regs)
-#define IQK_MAC32_REG_NUM ARRAY_SIZE(iqk_mac32_regs)
-#define IQK_BB_REG_NUM ARRAY_SIZE(iqk_bb_regs)
-
-struct iqk_backup_regs {
- u32 adda[IQK_ADDA_REG_NUM];
- u8 mac8[IQK_MAC8_REG_NUM];
- u32 mac32[IQK_MAC32_REG_NUM];
- u32 bb[IQK_BB_REG_NUM];
-
- u32 lte_path;
- u32 lte_gnt;
-
- u32 bb_sel_btg;
- u8 btg_sel;
-
- u8 igia;
- u8 igib;
-};
-
-static void rtw8723d_iqk_backup_regs(struct rtw_dev *rtwdev,
- struct iqk_backup_regs *backup)
-{
- int i;
-
- for (i = 0; i < IQK_ADDA_REG_NUM; i++)
- backup->adda[i] = rtw_read32(rtwdev, iqk_adda_regs[i]);
-
- for (i = 0; i < IQK_MAC8_REG_NUM; i++)
- backup->mac8[i] = rtw_read8(rtwdev, iqk_mac8_regs[i]);
- for (i = 0; i < IQK_MAC32_REG_NUM; i++)
- backup->mac32[i] = rtw_read32(rtwdev, iqk_mac32_regs[i]);
-
- for (i = 0; i < IQK_BB_REG_NUM; i++)
- backup->bb[i] = rtw_read32(rtwdev, iqk_bb_regs[i]);
-
- backup->igia = rtw_read32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0);
- backup->igib = rtw_read32_mask(rtwdev, REG_OFDM0_XBAGC1, MASKBYTE0);
-
- backup->bb_sel_btg = rtw_read32(rtwdev, REG_BB_SEL_BTG);
-}
-
-static void rtw8723d_iqk_restore_regs(struct rtw_dev *rtwdev,
- const struct iqk_backup_regs *backup)
-{
- int i;
-
- for (i = 0; i < IQK_ADDA_REG_NUM; i++)
- rtw_write32(rtwdev, iqk_adda_regs[i], backup->adda[i]);
-
- for (i = 0; i < IQK_MAC8_REG_NUM; i++)
- rtw_write8(rtwdev, iqk_mac8_regs[i], backup->mac8[i]);
- for (i = 0; i < IQK_MAC32_REG_NUM; i++)
- rtw_write32(rtwdev, iqk_mac32_regs[i], backup->mac32[i]);
-
- for (i = 0; i < IQK_BB_REG_NUM; i++)
- rtw_write32(rtwdev, iqk_bb_regs[i], backup->bb[i]);
-
- rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x50);
- rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, backup->igia);
-
- rtw_write32_mask(rtwdev, REG_OFDM0_XBAGC1, MASKBYTE0, 0x50);
- rtw_write32_mask(rtwdev, REG_OFDM0_XBAGC1, MASKBYTE0, backup->igib);
-
- rtw_write32(rtwdev, REG_TXIQK_TONE_A_11N, 0x01008c00);
- rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x01008c00);
-}
-
-static void rtw8723d_iqk_backup_path_ctrl(struct rtw_dev *rtwdev,
- struct iqk_backup_regs *backup)
-{
- backup->btg_sel = rtw_read8(rtwdev, REG_BTG_SEL);
- rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] original 0x67 = 0x%x\n",
- backup->btg_sel);
-}
-
-static void rtw8723d_iqk_config_path_ctrl(struct rtw_dev *rtwdev)
-{
- rtw_write32_mask(rtwdev, REG_PAD_CTRL1, BIT_BT_BTG_SEL, 0x1);
- rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] set 0x67 = 0x%x\n",
- rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3));
-}
-
-static void rtw8723d_iqk_restore_path_ctrl(struct rtw_dev *rtwdev,
- const struct iqk_backup_regs *backup)
-{
- rtw_write8(rtwdev, REG_BTG_SEL, backup->btg_sel);
- rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] restore 0x67 = 0x%x\n",
- rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3));
-}
-
-static void rtw8723d_iqk_backup_lte_path_gnt(struct rtw_dev *rtwdev,
- struct iqk_backup_regs *backup)
-{
- backup->lte_path = rtw_read32(rtwdev, REG_LTECOEX_PATH_CONTROL);
- rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0x800f0038);
- mdelay(1);
- backup->lte_gnt = rtw_read32(rtwdev, REG_LTECOEX_READ_DATA);
- rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] OriginalGNT = 0x%x\n",
- backup->lte_gnt);
-}
-
-static void rtw8723d_iqk_config_lte_path_gnt(struct rtw_dev *rtwdev)
-{
- rtw_write32(rtwdev, REG_LTECOEX_WRITE_DATA, 0x0000ff00);
- rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0xc0020038);
- rtw_write32_mask(rtwdev, REG_LTECOEX_PATH_CONTROL, BIT_LTE_MUX_CTRL_PATH, 0x1);
-}
-
-static void rtw8723d_iqk_restore_lte_path_gnt(struct rtw_dev *rtwdev,
- const struct iqk_backup_regs *bak)
-{
- rtw_write32(rtwdev, REG_LTECOEX_WRITE_DATA, bak->lte_gnt);
- rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0xc00f0038);
- rtw_write32(rtwdev, REG_LTECOEX_PATH_CONTROL, bak->lte_path);
-}
-
struct rtw_8723d_iqk_cfg {
const char *name;
u32 val_bb_sel_btg;
@@ -930,6 +532,8 @@ static u8 rtw8723d_iqk_check_rx_failed(struct rtw_dev *rtwdev,
return 0;
}
+#define IQK_LTE_WRITE_VAL_8723D 0x0000ff00
+
static void rtw8723d_iqk_one_shot(struct rtw_dev *rtwdev, bool tx,
const struct rtw_8723d_iqk_cfg *iqk_cfg)
{
@@ -937,7 +541,7 @@ static void rtw8723d_iqk_one_shot(struct rtw_dev *rtwdev, bool tx,
/* enter IQK mode */
rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, EN_IQK);
- rtw8723d_iqk_config_lte_path_gnt(rtwdev);
+ rtw8723x_iqk_config_lte_path_gnt(rtwdev, IQK_LTE_WRITE_VAL_8723D);
rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0x800f0054);
mdelay(1);
@@ -959,9 +563,9 @@ static void rtw8723d_iqk_one_shot(struct rtw_dev *rtwdev, bool tx,
static void rtw8723d_iqk_txrx_path_post(struct rtw_dev *rtwdev,
const struct rtw_8723d_iqk_cfg *iqk_cfg,
- const struct iqk_backup_regs *backup)
+ const struct rtw8723x_iqk_backup_regs *backup)
{
- rtw8723d_iqk_restore_lte_path_gnt(rtwdev, backup);
+ rtw8723x_iqk_restore_lte_path_gnt(rtwdev, backup);
rtw_write32(rtwdev, REG_BB_SEL_BTG, backup->bb_sel_btg);
/* leave IQK mode */
@@ -974,7 +578,7 @@ static void rtw8723d_iqk_txrx_path_post(struct rtw_dev *rtwdev,
static u8 rtw8723d_iqk_tx_path(struct rtw_dev *rtwdev,
const struct rtw_8723d_iqk_cfg *iqk_cfg,
- const struct iqk_backup_regs *backup)
+ const struct rtw8723x_iqk_backup_regs *backup)
{
u8 status;
@@ -1033,7 +637,7 @@ static u8 rtw8723d_iqk_tx_path(struct rtw_dev *rtwdev,
static u8 rtw8723d_iqk_rx_path(struct rtw_dev *rtwdev,
const struct rtw_8723d_iqk_cfg *iqk_cfg,
- const struct iqk_backup_regs *backup)
+ const struct rtw8723x_iqk_backup_regs *backup)
{
u32 tx_x, tx_y;
u8 status;
@@ -1220,14 +824,6 @@ void rtw8723d_iqk_fill_s0_matrix(struct rtw_dev *rtwdev, const s32 result[])
result[IQK_S0_RX_Y]);
}
-static void rtw8723d_iqk_path_adda_on(struct rtw_dev *rtwdev)
-{
- int i;
-
- for (i = 0; i < IQK_ADDA_REG_NUM; i++)
- rtw_write32(rtwdev, iqk_adda_regs[i], 0x03c00016);
-}
-
static void rtw8723d_iqk_config_mac(struct rtw_dev *rtwdev)
{
rtw_write8(rtwdev, REG_TXPAUSE, 0xff);
@@ -1245,70 +841,14 @@ void rtw8723d_iqk_rf_standby(struct rtw_dev *rtwdev, enum rtw_rf_path path)
rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, EN_IQK);
}
-static
-bool rtw8723d_iqk_similarity_cmp(struct rtw_dev *rtwdev, s32 result[][IQK_NR],
- u8 c1, u8 c2)
-{
- u32 i, j, diff;
- u32 bitmap = 0;
- u8 candidate[PATH_NR] = {IQK_ROUND_INVALID, IQK_ROUND_INVALID};
- bool ret = true;
-
- s32 tmp1, tmp2;
-
- for (i = 0; i < IQK_NR; i++) {
- tmp1 = iqkxy_to_s32(result[c1][i]);
- tmp2 = iqkxy_to_s32(result[c2][i]);
-
- diff = abs(tmp1 - tmp2);
-
- if (diff <= MAX_TOLERANCE)
- continue;
-
- if ((i == IQK_S1_RX_X || i == IQK_S0_RX_X) && !bitmap) {
- if (result[c1][i] + result[c1][i + 1] == 0)
- candidate[i / IQK_SX_NR] = c2;
- else if (result[c2][i] + result[c2][i + 1] == 0)
- candidate[i / IQK_SX_NR] = c1;
- else
- bitmap |= BIT(i);
- } else {
- bitmap |= BIT(i);
- }
- }
-
- if (bitmap != 0)
- goto check_sim;
-
- for (i = 0; i < PATH_NR; i++) {
- if (candidate[i] == IQK_ROUND_INVALID)
- continue;
-
- for (j = i * IQK_SX_NR; j < i * IQK_SX_NR + 2; j++)
- result[IQK_ROUND_HYBRID][j] = result[candidate[i]][j];
- ret = false;
- }
-
- return ret;
-
-check_sim:
- for (i = 0; i < IQK_NR; i++) {
- j = i & ~1; /* 2 bits are a pair for IQ[X, Y] */
- if (bitmap & GENMASK(j + 1, j))
- continue;
-
- result[IQK_ROUND_HYBRID][i] = result[c1][i];
- }
-
- return false;
-}
+#define ADDA_ON_VAL_8723D 0x03c00016
static
-void rtw8723d_iqk_precfg_path(struct rtw_dev *rtwdev, enum rtw8723d_path path)
+void rtw8723d_iqk_precfg_path(struct rtw_dev *rtwdev, enum rtw8723x_path path)
{
if (path == PATH_S0) {
rtw8723d_iqk_rf_standby(rtwdev, RF_PATH_A);
- rtw8723d_iqk_path_adda_on(rtwdev);
+ rtw8723x_iqk_path_adda_on(rtwdev, ADDA_ON_VAL_8723D);
}
rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, EN_IQK);
@@ -1317,13 +857,13 @@ void rtw8723d_iqk_precfg_path(struct rtw_dev *rtwdev, enum rtw8723d_path path)
if (path == PATH_S1) {
rtw8723d_iqk_rf_standby(rtwdev, RF_PATH_B);
- rtw8723d_iqk_path_adda_on(rtwdev);
+ rtw8723x_iqk_path_adda_on(rtwdev, ADDA_ON_VAL_8723D);
}
}
static
void rtw8723d_iqk_one_round(struct rtw_dev *rtwdev, s32 result[][IQK_NR], u8 t,
- const struct iqk_backup_regs *backup)
+ const struct rtw8723x_iqk_backup_regs *backup)
{
u32 i;
u8 s1_ok, s0_ok;
@@ -1331,7 +871,7 @@ void rtw8723d_iqk_one_round(struct rtw_dev *rtwdev, s32 result[][IQK_NR], u8 t,
rtw_dbg(rtwdev, RTW_DBG_RFK,
"[IQK] IQ Calibration for 1T1R_S0/S1 for %d times\n", t);
- rtw8723d_iqk_path_adda_on(rtwdev);
+ rtw8723x_iqk_path_adda_on(rtwdev, ADDA_ON_VAL_8723D);
rtw8723d_iqk_config_mac(rtwdev);
rtw_write32_mask(rtwdev, REG_CCK_ANT_SEL_11N, 0x0f000000, 0xf);
rtw_write32(rtwdev, REG_BB_RX_PATH_11N, 0x03a05611);
@@ -1427,7 +967,7 @@ static void rtw8723d_phy_calibration(struct rtw_dev *rtwdev)
{
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
s32 result[IQK_ROUND_SIZE][IQK_NR];
- struct iqk_backup_regs backup;
+ struct rtw8723x_iqk_backup_regs backup;
u8 i, j;
u8 final_candidate = IQK_ROUND_INVALID;
bool good;
@@ -1436,23 +976,23 @@ static void rtw8723d_phy_calibration(struct rtw_dev *rtwdev)
memset(result, 0, sizeof(result));
- rtw8723d_iqk_backup_path_ctrl(rtwdev, &backup);
- rtw8723d_iqk_backup_lte_path_gnt(rtwdev, &backup);
- rtw8723d_iqk_backup_regs(rtwdev, &backup);
+ rtw8723x_iqk_backup_path_ctrl(rtwdev, &backup);
+ rtw8723x_iqk_backup_lte_path_gnt(rtwdev, &backup);
+ rtw8723x_iqk_backup_regs(rtwdev, &backup);
for (i = IQK_ROUND_0; i <= IQK_ROUND_2; i++) {
- rtw8723d_iqk_config_path_ctrl(rtwdev);
- rtw8723d_iqk_config_lte_path_gnt(rtwdev);
+ rtw8723x_iqk_config_path_ctrl(rtwdev);
+ rtw8723x_iqk_config_lte_path_gnt(rtwdev, IQK_LTE_WRITE_VAL_8723D);
rtw8723d_iqk_one_round(rtwdev, result, i, &backup);
if (i > IQK_ROUND_0)
- rtw8723d_iqk_restore_regs(rtwdev, &backup);
- rtw8723d_iqk_restore_lte_path_gnt(rtwdev, &backup);
- rtw8723d_iqk_restore_path_ctrl(rtwdev, &backup);
+ rtw8723x_iqk_restore_regs(rtwdev, &backup);
+ rtw8723x_iqk_restore_lte_path_gnt(rtwdev, &backup);
+ rtw8723x_iqk_restore_path_ctrl(rtwdev, &backup);
for (j = IQK_ROUND_0; j < i; j++) {
- good = rtw8723d_iqk_similarity_cmp(rtwdev, result, j, i);
+ good = rtw8723x_iqk_similarity_cmp(rtwdev, result, j, i);
if (good) {
final_candidate = j;
@@ -1546,26 +1086,6 @@ static void rtw8723d_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl)
}
/* for coex */
-static void rtw8723d_coex_cfg_init(struct rtw_dev *rtwdev)
-{
- /* enable TBTT nterrupt */
- rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION);
-
- /* BT report packet sample rate */
- /* 0x790[5:0]=0x5 */
- rtw_write8_mask(rtwdev, REG_BT_TDMA_TIME, BIT_MASK_SAMPLE_RATE, 0x5);
-
- /* enable BT counter statistics */
- rtw_write8(rtwdev, REG_BT_STAT_CTRL, 0x1);
-
- /* enable PTA (3-wire function form BT side) */
- rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_PTA_EN);
- rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_PO_BT_PTA_PINS);
-
- /* enable PTA (tx/rx signal form WiFi side) */
- rtw_write8_set(rtwdev, REG_QUEUE_CTRL, BIT_PTA_WL_TX_EN);
-}
-
static void rtw8723d_coex_cfg_gnt_fix(struct rtw_dev *rtwdev)
{
}
@@ -1671,39 +1191,6 @@ static void rtw8723d_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain)
}
}
-static u8 rtw8723d_pwrtrack_get_limit_ofdm(struct rtw_dev *rtwdev)
-{
- struct rtw_dm_info *dm_info = &rtwdev->dm_info;
- u8 tx_rate = dm_info->tx_rate;
- u8 limit_ofdm = 30;
-
- switch (tx_rate) {
- case DESC_RATE1M...DESC_RATE5_5M:
- case DESC_RATE11M:
- break;
- case DESC_RATE6M...DESC_RATE48M:
- limit_ofdm = 36;
- break;
- case DESC_RATE54M:
- limit_ofdm = 34;
- break;
- case DESC_RATEMCS0...DESC_RATEMCS2:
- limit_ofdm = 38;
- break;
- case DESC_RATEMCS3...DESC_RATEMCS4:
- limit_ofdm = 36;
- break;
- case DESC_RATEMCS5...DESC_RATEMCS7:
- limit_ofdm = 34;
- break;
- default:
- rtw_warn(rtwdev, "pwrtrack unhandled tx_rate 0x%x\n", tx_rate);
- break;
- }
-
- return limit_ofdm;
-}
-
static void rtw8723d_set_iqk_matrix_by_result(struct rtw_dev *rtwdev,
u32 ofdm_swing, u8 rf_path)
{
@@ -1845,7 +1332,7 @@ static void rtw8723d_pwrtrack_set(struct rtw_dev *rtwdev, u8 path)
s8 final_ofdm_swing_index;
s8 final_cck_swing_index;
- limit_ofdm = rtw8723d_pwrtrack_get_limit_ofdm(rtwdev);
+ limit_ofdm = rtw8723x_pwrtrack_get_limit_ofdm(rtwdev);
final_ofdm_swing_index = RTW_DEF_OFDM_SWING_INDEX +
dm_info->delta_power_index[path];
@@ -1873,26 +1360,6 @@ static void rtw8723d_pwrtrack_set(struct rtw_dev *rtwdev, u8 path)
rtw_phy_set_tx_power_level(rtwdev, hal->current_channel);
}
-static void rtw8723d_pwrtrack_set_xtal(struct rtw_dev *rtwdev, u8 therm_path,
- u8 delta)
-{
- struct rtw_dm_info *dm_info = &rtwdev->dm_info;
- const struct rtw_pwr_track_tbl *tbl = rtwdev->chip->pwr_track_tbl;
- const s8 *pwrtrk_xtal;
- s8 xtal_cap;
-
- if (dm_info->thermal_avg[therm_path] >
- rtwdev->efuse.thermal_meter[therm_path])
- pwrtrk_xtal = tbl->pwrtrk_xtal_p;
- else
- pwrtrk_xtal = tbl->pwrtrk_xtal_n;
-
- xtal_cap = rtwdev->efuse.crystal_cap & 0x3F;
- xtal_cap = clamp_t(s8, xtal_cap + pwrtrk_xtal[delta], 0, 0x3F);
- rtw_write32_mask(rtwdev, REG_AFE_CTRL3, BIT_MASK_XTAL,
- xtal_cap | (xtal_cap << 6));
-}
-
static void rtw8723d_phy_pwrtrack(struct rtw_dev *rtwdev)
{
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
@@ -1912,7 +1379,7 @@ static void rtw8723d_phy_pwrtrack(struct rtw_dev *rtwdev)
do_iqk = rtw_phy_pwrtrack_need_iqk(rtwdev);
if (do_iqk)
- rtw8723d_lck(rtwdev);
+ rtw8723x_lck(rtwdev);
if (dm_info->pwr_trk_init_trigger)
dm_info->pwr_trk_init_trigger = false;
@@ -1937,7 +1404,7 @@ static void rtw8723d_phy_pwrtrack(struct rtw_dev *rtwdev)
rtw8723d_pwrtrack_set(rtwdev, path);
}
- rtw8723d_pwrtrack_set_xtal(rtwdev, RF_PATH_A, delta);
+ rtw8723x_pwrtrack_set_xtal(rtwdev, RF_PATH_A, delta);
iqk:
if (do_iqk)
@@ -1963,49 +1430,29 @@ static void rtw8723d_pwr_track(struct rtw_dev *rtwdev)
dm_info->pwr_trk_triggered = false;
}
-static void rtw8723d_fill_txdesc_checksum(struct rtw_dev *rtwdev,
- struct rtw_tx_pkt_info *pkt_info,
- u8 *txdesc)
-{
- size_t words = 32 / 2; /* calculate the first 32 bytes (16 words) */
- __le16 chksum = 0;
- __le16 *data = (__le16 *)(txdesc);
- struct rtw_tx_desc *tx_desc = (struct rtw_tx_desc *)txdesc;
-
- le32p_replace_bits(&tx_desc->w7, 0, RTW_TX_DESC_W7_TXDESC_CHECKSUM);
-
- while (words--)
- chksum ^= *data++;
-
- chksum = ~chksum;
-
- le32p_replace_bits(&tx_desc->w7, __le16_to_cpu(chksum),
- RTW_TX_DESC_W7_TXDESC_CHECKSUM);
-}
-
static struct rtw_chip_ops rtw8723d_ops = {
.phy_set_param = rtw8723d_phy_set_param,
- .read_efuse = rtw8723d_read_efuse,
+ .read_efuse = rtw8723x_read_efuse,
.query_rx_desc = rtw8723d_query_rx_desc,
.set_channel = rtw8723d_set_channel,
- .mac_init = rtw8723d_mac_init,
+ .mac_init = rtw8723x_mac_init,
.shutdown = rtw8723d_shutdown,
.read_rf = rtw_phy_read_rf_sipi,
.write_rf = rtw_phy_write_rf_reg_sipi,
- .set_tx_power_index = rtw8723d_set_tx_power_index,
+ .set_tx_power_index = rtw8723x_set_tx_power_index,
.set_antenna = NULL,
- .cfg_ldo25 = rtw8723d_cfg_ldo25,
- .efuse_grant = rtw8723d_efuse_grant,
- .false_alarm_statistics = rtw8723d_false_alarm_statistics,
+ .cfg_ldo25 = rtw8723x_cfg_ldo25,
+ .efuse_grant = rtw8723x_efuse_grant,
+ .false_alarm_statistics = rtw8723x_false_alarm_statistics,
.phy_calibration = rtw8723d_phy_calibration,
.cck_pd_set = rtw8723d_phy_cck_pd_set,
.pwr_track = rtw8723d_pwr_track,
.config_bfee = NULL,
.set_gid_table = NULL,
.cfg_csi_rate = NULL,
- .fill_txdesc_checksum = rtw8723d_fill_txdesc_checksum,
+ .fill_txdesc_checksum = rtw8723x_fill_txdesc_checksum,
- .coex_set_init = rtw8723d_coex_cfg_init,
+ .coex_set_init = rtw8723x_coex_cfg_init,
.coex_set_ant_switch = NULL,
.coex_set_gnt_fix = rtw8723d_coex_cfg_gnt_fix,
.coex_set_gnt_debug = rtw8723d_coex_cfg_gnt_debug,
@@ -2592,22 +2039,6 @@ static const struct rtw_rqpn rqpn_table_8723d[] = {
RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
};
-static const struct rtw_prioq_addrs prioq_addrs_8723d = {
- .prio[RTW_DMA_MAPPING_EXTRA] = {
- .rsvd = REG_RQPN_NPQ + 2, .avail = REG_RQPN_NPQ + 3,
- },
- .prio[RTW_DMA_MAPPING_LOW] = {
- .rsvd = REG_RQPN + 1, .avail = REG_FIFOPAGE_CTRL_2 + 1,
- },
- .prio[RTW_DMA_MAPPING_NORMAL] = {
- .rsvd = REG_RQPN_NPQ, .avail = REG_RQPN_NPQ + 1,
- },
- .prio[RTW_DMA_MAPPING_HIGH] = {
- .rsvd = REG_RQPN, .avail = REG_FIFOPAGE_CTRL_2,
- },
- .wsize = false,
-};
-
static const struct rtw_intf_phy_para pcie_gen1_param_8723d[] = {
{0x0008, 0x4a22,
RTW_IP_SEL_PHY,
@@ -2628,28 +2059,6 @@ static const struct rtw_intf_phy_para_table phy_para_table_8723d = {
.n_gen1_para = ARRAY_SIZE(pcie_gen1_param_8723d),
};
-static const struct rtw_hw_reg rtw8723d_dig[] = {
- [0] = { .addr = 0xc50, .mask = 0x7f },
- [1] = { .addr = 0xc50, .mask = 0x7f },
-};
-
-static const struct rtw_hw_reg rtw8723d_dig_cck[] = {
- [0] = { .addr = 0xa0c, .mask = 0x3f00 },
-};
-
-static const struct rtw_rf_sipi_addr rtw8723d_rf_sipi_addr[] = {
- [RF_PATH_A] = { .hssi_1 = 0x820, .lssi_read = 0x8a0,
- .hssi_2 = 0x824, .lssi_read_pi = 0x8b8},
- [RF_PATH_B] = { .hssi_1 = 0x828, .lssi_read = 0x8a4,
- .hssi_2 = 0x82c, .lssi_read_pi = 0x8bc},
-};
-
-static const struct rtw_ltecoex_addr rtw8723d_ltecoex_addr = {
- .ctrl = REG_LTECOEX_CTRL,
- .wdata = REG_LTECOEX_WRITE_DATA,
- .rdata = REG_LTECOEX_READ_DATA,
-};
-
static const struct rtw_rfe_def rtw8723d_rfe_defs[] = {
[0] = { .phy_pg_tbl = &rtw8723d_bb_pg_tbl,
.txpwr_lmt_tbl = &rtw8723d_txpwr_lmt_tbl,},
@@ -2770,14 +2179,14 @@ const struct rtw_chip_info rtw8723d_hw_spec = {
.pwr_off_seq = card_disable_flow_8723d,
.page_table = page_table_8723d,
.rqpn_table = rqpn_table_8723d,
- .prioq_addrs = &prioq_addrs_8723d,
+ .prioq_addrs = &rtw8723x_common.prioq_addrs,
.intf_table = &phy_para_table_8723d,
- .dig = rtw8723d_dig,
- .dig_cck = rtw8723d_dig_cck,
+ .dig = rtw8723x_common.dig,
+ .dig_cck = rtw8723x_common.dig_cck,
.rf_sipi_addr = {0x840, 0x844},
- .rf_sipi_read_addr = rtw8723d_rf_sipi_addr,
+ .rf_sipi_read_addr = rtw8723x_common.rf_sipi_addr,
.fix_rf_phy_num = 2,
- .ltecoex_addr = &rtw8723d_ltecoex_addr,
+ .ltecoex_addr = &rtw8723x_common.ltecoex_addr,
.mac_tbl = &rtw8723d_mac_tbl,
.agc_tbl = &rtw8723d_agc_tbl,
.bb_tbl = &rtw8723d_bb_tbl,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.h b/drivers/net/wireless/realtek/rtw88/rtw8723d.h
index 2434e2480cbe..fba06c9f480e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.h
@@ -5,90 +5,7 @@
#ifndef __RTW8723D_H__
#define __RTW8723D_H__
-enum rtw8723d_path {
- PATH_S1,
- PATH_S0,
- PATH_NR,
-};
-
-enum rtw8723d_iqk_round {
- IQK_ROUND_0,
- IQK_ROUND_1,
- IQK_ROUND_2,
- IQK_ROUND_HYBRID,
- IQK_ROUND_SIZE,
- IQK_ROUND_INVALID = 0xff,
-};
-
-enum rtw8723d_iqk_result {
- IQK_S1_TX_X,
- IQK_S1_TX_Y,
- IQK_S1_RX_X,
- IQK_S1_RX_Y,
- IQK_S0_TX_X,
- IQK_S0_TX_Y,
- IQK_S0_RX_X,
- IQK_S0_RX_Y,
- IQK_NR,
- IQK_SX_NR = IQK_NR / PATH_NR,
-};
-
-struct rtw8723de_efuse {
- u8 mac_addr[ETH_ALEN]; /* 0xd0 */
- u8 vender_id[2];
- u8 device_id[2];
- u8 sub_vender_id[2];
- u8 sub_device_id[2];
-};
-
-struct rtw8723du_efuse {
- u8 res4[48]; /* 0xd0 */
- u8 vender_id[2]; /* 0x100 */
- u8 product_id[2]; /* 0x102 */
- u8 usb_option; /* 0x104 */
- u8 res5[2]; /* 0x105 */
- u8 mac_addr[ETH_ALEN]; /* 0x107 */
-};
-
-struct rtw8723ds_efuse {
- u8 res4[0x4a]; /* 0xd0 */
- u8 mac_addr[ETH_ALEN]; /* 0x11a */
-};
-
-struct rtw8723d_efuse {
- __le16 rtl_id;
- u8 rsvd[2];
- u8 afe;
- u8 rsvd1[11];
-
- /* power index for four RF paths */
- struct rtw_txpwr_idx txpwr_idx_table[4];
-
- u8 channel_plan; /* 0xb8 */
- u8 xtal_k;
- u8 thermal_meter;
- u8 iqk_lck;
- u8 pa_type; /* 0xbc */
- u8 lna_type_2g[2]; /* 0xbd */
- u8 lna_type_5g[2];
- u8 rf_board_option;
- u8 rf_feature_option;
- u8 rf_bt_setting;
- u8 eeprom_version;
- u8 eeprom_customer_id;
- u8 tx_bb_swing_setting_2g;
- u8 res_c7;
- u8 tx_pwr_calibrate_rate;
- u8 rf_antenna_option; /* 0xc9 */
- u8 rfe_option;
- u8 country_code[2];
- u8 res[3];
- union {
- struct rtw8723de_efuse e;
- struct rtw8723du_efuse u;
- struct rtw8723ds_efuse s;
- };
-};
+#include "rtw8723x.h"
extern const struct rtw_chip_info rtw8723d_hw_spec;
@@ -114,193 +31,9 @@ extern const struct rtw_chip_info rtw8723d_hw_spec;
#define GET_PHY_STAT_P1_RXSNR_A(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(7, 0))
-static inline s32 iqkxy_to_s32(s32 val)
-{
- /* val is Q10.8 */
- return sign_extend32(val, 9);
-}
-
-static inline s32 iqk_mult(s32 x, s32 y, s32 *ext)
-{
- /* x, y and return value are Q10.8 */
- s32 t;
-
- t = x * y;
- if (ext)
- *ext = (t >> 7) & 0x1; /* Q.16 --> Q.9; get LSB of Q.9 */
-
- return (t >> 8); /* Q.16 --> Q.8 */
-}
-
-#define OFDM_SWING_A(swing) FIELD_GET(GENMASK(9, 0), swing)
-#define OFDM_SWING_B(swing) FIELD_GET(GENMASK(15, 10), swing)
-#define OFDM_SWING_C(swing) FIELD_GET(GENMASK(21, 16), swing)
-#define OFDM_SWING_D(swing) FIELD_GET(GENMASK(31, 22), swing)
#define RTW_DEF_OFDM_SWING_INDEX 28
#define RTW_DEF_CCK_SWING_INDEX 28
-#define MAX_TOLERANCE 5
-#define IQK_TX_X_ERR 0x142
-#define IQK_TX_Y_ERR 0x42
-#define IQK_RX_X_UPPER 0x11a
-#define IQK_RX_X_LOWER 0xe6
-#define IQK_RX_Y_LMT 0x1a
-#define IQK_TX_OK BIT(0)
-#define IQK_RX_OK BIT(1)
-#define PATH_IQK_RETRY 2
-
-#define SPUR_THRES 0x16
#define CCK_DFIR_NR 3
-#define DIS_3WIRE 0xccf000c0
-#define EN_3WIRE 0xccc000c0
-#define START_PSD 0x400000
-#define FREQ_CH13 0xfccd
-#define FREQ_CH14 0xff9a
-#define RFCFGCH_CHANNEL_MASK GENMASK(7, 0)
-#define RFCFGCH_BW_MASK (BIT(11) | BIT(10))
-#define RFCFGCH_BW_20M (BIT(11) | BIT(10))
-#define RFCFGCH_BW_40M BIT(10)
-#define BIT_MASK_RFMOD BIT(0)
-#define BIT_LCK BIT(15)
-
-#define REG_GPIO_INTM 0x0048
-#define REG_BTG_SEL 0x0067
-#define BIT_MASK_BTG_WL BIT(7)
-#define REG_LTECOEX_PATH_CONTROL 0x0070
-#define REG_LTECOEX_CTRL 0x07c0
-#define REG_LTECOEX_WRITE_DATA 0x07c4
-#define REG_LTECOEX_READ_DATA 0x07c8
-#define REG_PSDFN 0x0808
-#define REG_BB_PWR_SAV1_11N 0x0874
-#define REG_ANA_PARAM1 0x0880
-#define REG_ANALOG_P4 0x088c
-#define REG_PSDRPT 0x08b4
-#define REG_FPGA1_RFMOD 0x0900
-#define REG_BB_SEL_BTG 0x0948
-#define REG_BBRX_DFIR 0x0954
-#define BIT_MASK_RXBB_DFIR GENMASK(27, 24)
-#define BIT_RXBB_DFIR_EN BIT(19)
-#define REG_CCK0_SYS 0x0a00
-#define BIT_CCK_SIDE_BAND BIT(4)
-#define REG_CCK_ANT_SEL_11N 0x0a04
-#define REG_PWRTH 0x0a08
-#define REG_CCK_FA_RST_11N 0x0a2c
-#define BIT_MASK_CCK_CNT_KEEP BIT(12)
-#define BIT_MASK_CCK_CNT_EN BIT(13)
-#define BIT_MASK_CCK_CNT_KPEN (BIT_MASK_CCK_CNT_KEEP | BIT_MASK_CCK_CNT_EN)
-#define BIT_MASK_CCK_FA_KEEP BIT(14)
-#define BIT_MASK_CCK_FA_EN BIT(15)
-#define BIT_MASK_CCK_FA_KPEN (BIT_MASK_CCK_FA_KEEP | BIT_MASK_CCK_FA_EN)
-#define REG_CCK_FA_LSB_11N 0x0a5c
-#define REG_CCK_FA_MSB_11N 0x0a58
-#define REG_CCK_CCA_CNT_11N 0x0a60
-#define BIT_MASK_CCK_FA_MSB GENMASK(7, 0)
-#define BIT_MASK_CCK_FA_LSB GENMASK(15, 8)
-#define REG_PWRTH2 0x0aa8
-#define REG_CSRATIO 0x0aaa
-#define REG_OFDM_FA_HOLDC_11N 0x0c00
-#define BIT_MASK_OFDM_FA_KEEP BIT(31)
-#define REG_BB_RX_PATH_11N 0x0c04
-#define REG_TRMUX_11N 0x0c08
-#define REG_OFDM_FA_RSTC_11N 0x0c0c
-#define BIT_MASK_OFDM_FA_RST BIT(31)
-#define REG_A_RXIQI 0x0c14
-#define BIT_MASK_RXIQ_S1_X 0x000003FF
-#define BIT_MASK_RXIQ_S1_Y1 0x0000FC00
-#define BIT_SET_RXIQ_S1_Y1(y) ((y) & 0x3F)
-#define REG_OFDM0_RXDSP 0x0c40
-#define BIT_MASK_RXDSP GENMASK(28, 24)
-#define BIT_EN_RXDSP BIT(9)
-#define REG_OFDM_0_ECCA_THRESHOLD 0x0c4c
-#define BIT_MASK_OFDM0_EXT_A BIT(31)
-#define BIT_MASK_OFDM0_EXT_C BIT(29)
-#define BIT_MASK_OFDM0_EXTS (BIT(31) | BIT(29) | BIT(28))
-#define BIT_SET_OFDM0_EXTS(a, c, d) (((a) << 31) | ((c) << 29) | ((d) << 28))
-#define REG_OFDM0_XAAGC1 0x0c50
-#define REG_OFDM0_XBAGC1 0x0c58
-#define REG_AGCRSSI 0x0c78
-#define REG_OFDM_0_XA_TX_IQ_IMBALANCE 0x0c80
-#define BIT_MASK_TXIQ_ELM_A 0x03ff
-#define BIT_SET_TXIQ_ELM_ACD(a, c, d) (((d) << 22) | (((c) & 0x3F) << 16) | \
- ((a) & 0x03ff))
-#define BIT_MASK_TXIQ_ELM_C GENMASK(21, 16)
-#define BIT_SET_TXIQ_ELM_C2(c) ((c) & 0x3F)
-#define BIT_MASK_TXIQ_ELM_D GENMASK(31, 22)
-#define REG_TXIQK_MATRIXA_LSB2_11N 0x0c94
-#define BIT_SET_TXIQ_ELM_C1(c) (((c) & 0x000003C0) >> 6)
-#define REG_RXIQK_MATRIX_LSB_11N 0x0ca0
-#define BIT_MASK_RXIQ_S1_Y2 0xF0000000
-#define BIT_SET_RXIQ_S1_Y2(y) (((y) >> 6) & 0xF)
-#define REG_TXIQ_AB_S0 0x0cd0
-#define BIT_MASK_TXIQ_A_S0 0x000007FE
-#define BIT_MASK_TXIQ_A_EXT_S0 BIT(0)
-#define BIT_MASK_TXIQ_B_S0 0x0007E000
-#define REG_TXIQ_CD_S0 0x0cd4
-#define BIT_MASK_TXIQ_C_S0 0x000007FE
-#define BIT_MASK_TXIQ_C_EXT_S0 BIT(0)
-#define BIT_MASK_TXIQ_D_S0 GENMASK(22, 13)
-#define BIT_MASK_TXIQ_D_EXT_S0 BIT(12)
-#define REG_RXIQ_AB_S0 0x0cd8
-#define BIT_MASK_RXIQ_X_S0 0x000003FF
-#define BIT_MASK_RXIQ_Y_S0 0x003FF000
-#define REG_OFDM_FA_TYPE1_11N 0x0cf0
-#define BIT_MASK_OFDM_FF_CNT GENMASK(15, 0)
-#define BIT_MASK_OFDM_SF_CNT GENMASK(31, 16)
-#define REG_OFDM_FA_RSTD_11N 0x0d00
-#define BIT_MASK_OFDM_FA_RST1 BIT(27)
-#define BIT_MASK_OFDM_FA_KEEP1 BIT(31)
-#define REG_CTX 0x0d03
-#define BIT_MASK_CTX_TYPE GENMASK(6, 4)
-#define REG_OFDM1_CFOTRK 0x0d2c
-#define BIT_EN_CFOTRK BIT(28)
-#define REG_OFDM1_CSI1 0x0d40
-#define REG_OFDM1_CSI2 0x0d44
-#define REG_OFDM1_CSI3 0x0d48
-#define REG_OFDM1_CSI4 0x0d4c
-#define REG_OFDM_FA_TYPE2_11N 0x0da0
-#define BIT_MASK_OFDM_CCA_CNT GENMASK(15, 0)
-#define BIT_MASK_OFDM_PF_CNT GENMASK(31, 16)
-#define REG_OFDM_FA_TYPE3_11N 0x0da4
-#define BIT_MASK_OFDM_RI_CNT GENMASK(15, 0)
-#define BIT_MASK_OFDM_CRC_CNT GENMASK(31, 16)
-#define REG_OFDM_FA_TYPE4_11N 0x0da8
-#define BIT_MASK_OFDM_MNS_CNT GENMASK(15, 0)
-#define REG_FPGA0_IQK_11N 0x0e28
-#define BIT_MASK_IQK_MOD 0xffffff00
-#define EN_IQK 0x808000
-#define RST_IQK 0x000000
-#define REG_TXIQK_TONE_A_11N 0x0e30
-#define REG_RXIQK_TONE_A_11N 0x0e34
-#define REG_TXIQK_PI_A_11N 0x0e38
-#define REG_RXIQK_PI_A_11N 0x0e3c
-#define REG_TXIQK_11N 0x0e40
-#define BIT_SET_TXIQK_11N(x, y) (0x80007C00 | ((x) << 16) | (y))
-#define REG_RXIQK_11N 0x0e44
-#define REG_IQK_AGC_PTS_11N 0x0e48
-#define REG_IQK_AGC_RSP_11N 0x0e4c
-#define REG_TX_IQK_TONE_B 0x0e50
-#define REG_RX_IQK_TONE_B 0x0e54
-#define REG_IQK_RES_TX 0x0e94
-#define BIT_MASK_RES_TX GENMASK(25, 16)
-#define REG_IQK_RES_TY 0x0e9c
-#define BIT_MASK_RES_TY GENMASK(25, 16)
-#define REG_IQK_RES_RX 0x0ea4
-#define BIT_MASK_RES_RX GENMASK(25, 16)
-#define REG_IQK_RES_RY 0x0eac
-#define BIT_IQK_TX_FAIL BIT(28)
-#define BIT_IQK_RX_FAIL BIT(27)
-#define BIT_IQK_DONE BIT(26)
-#define BIT_MASK_RES_RY GENMASK(25, 16)
-#define REG_PAGE_F_RST_11N 0x0f14
-#define BIT_MASK_F_RST_ALL BIT(16)
-#define REG_IGI_C_11N 0x0f84
-#define REG_IGI_D_11N 0x0f88
-#define REG_HT_CRC32_CNT_11N 0x0f90
-#define BIT_MASK_HT_CRC_OK GENMASK(15, 0)
-#define BIT_MASK_HT_CRC_ERR GENMASK(31, 16)
-#define REG_OFDM_CRC32_CNT_11N 0x0f94
-#define BIT_MASK_OFDM_LCRC_OK GENMASK(15, 0)
-#define BIT_MASK_OFDM_LCRC_ERR GENMASK(31, 16)
-#define REG_HT_CRC32_CNT_11N_AGG 0x0fb8
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723x.c b/drivers/net/wireless/realtek/rtw88/rtw8723x.c
new file mode 100644
index 000000000000..0d0b6c2cb9aa
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723x.c
@@ -0,0 +1,721 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright 2024 Fiona Klute
+ *
+ * Based on code originally in rtw8723d.[ch],
+ * Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "debug.h"
+#include "phy.h"
+#include "reg.h"
+#include "tx.h"
+#include "rtw8723x.h"
+
+static const struct rtw_hw_reg rtw8723x_txagc[] = {
+ [DESC_RATE1M] = { .addr = 0xe08, .mask = 0x0000ff00 },
+ [DESC_RATE2M] = { .addr = 0x86c, .mask = 0x0000ff00 },
+ [DESC_RATE5_5M] = { .addr = 0x86c, .mask = 0x00ff0000 },
+ [DESC_RATE11M] = { .addr = 0x86c, .mask = 0xff000000 },
+ [DESC_RATE6M] = { .addr = 0xe00, .mask = 0x000000ff },
+ [DESC_RATE9M] = { .addr = 0xe00, .mask = 0x0000ff00 },
+ [DESC_RATE12M] = { .addr = 0xe00, .mask = 0x00ff0000 },
+ [DESC_RATE18M] = { .addr = 0xe00, .mask = 0xff000000 },
+ [DESC_RATE24M] = { .addr = 0xe04, .mask = 0x000000ff },
+ [DESC_RATE36M] = { .addr = 0xe04, .mask = 0x0000ff00 },
+ [DESC_RATE48M] = { .addr = 0xe04, .mask = 0x00ff0000 },
+ [DESC_RATE54M] = { .addr = 0xe04, .mask = 0xff000000 },
+ [DESC_RATEMCS0] = { .addr = 0xe10, .mask = 0x000000ff },
+ [DESC_RATEMCS1] = { .addr = 0xe10, .mask = 0x0000ff00 },
+ [DESC_RATEMCS2] = { .addr = 0xe10, .mask = 0x00ff0000 },
+ [DESC_RATEMCS3] = { .addr = 0xe10, .mask = 0xff000000 },
+ [DESC_RATEMCS4] = { .addr = 0xe14, .mask = 0x000000ff },
+ [DESC_RATEMCS5] = { .addr = 0xe14, .mask = 0x0000ff00 },
+ [DESC_RATEMCS6] = { .addr = 0xe14, .mask = 0x00ff0000 },
+ [DESC_RATEMCS7] = { .addr = 0xe14, .mask = 0xff000000 },
+};
+
+static void __rtw8723x_lck(struct rtw_dev *rtwdev)
+{
+ u32 lc_cal;
+ u8 val_ctx, rf_val;
+ int ret;
+
+ val_ctx = rtw_read8(rtwdev, REG_CTX);
+ if ((val_ctx & BIT_MASK_CTX_TYPE) != 0)
+ rtw_write8(rtwdev, REG_CTX, val_ctx & ~BIT_MASK_CTX_TYPE);
+ else
+ rtw_write8(rtwdev, REG_TXPAUSE, 0xFF);
+ lc_cal = rtw_read_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK);
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, lc_cal | BIT_LCK);
+
+ ret = read_poll_timeout(rtw_read_rf, rf_val, rf_val != 0x1,
+ 10000, 1000000, false,
+ rtwdev, RF_PATH_A, RF_CFGCH, BIT_LCK);
+ if (ret)
+ rtw_warn(rtwdev, "failed to poll LCK status bit\n");
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, lc_cal);
+ if ((val_ctx & BIT_MASK_CTX_TYPE) != 0)
+ rtw_write8(rtwdev, REG_CTX, val_ctx);
+ else
+ rtw_write8(rtwdev, REG_TXPAUSE, 0x00);
+}
+
+#define DBG_EFUSE_VAL(rtwdev, map, name) \
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, # name "=0x%02x\n", \
+ (map)->name)
+#define DBG_EFUSE_2BYTE(rtwdev, map, name) \
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, # name "=0x%02x%02x\n", \
+ (map)->name[0], (map)->name[1])
+
+static void rtw8723xe_efuse_debug(struct rtw_dev *rtwdev,
+ struct rtw8723x_efuse *map)
+{
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "mac_addr=%pM\n", map->e.mac_addr);
+ DBG_EFUSE_2BYTE(rtwdev, map, e.vendor_id);
+ DBG_EFUSE_2BYTE(rtwdev, map, e.device_id);
+ DBG_EFUSE_2BYTE(rtwdev, map, e.sub_vendor_id);
+ DBG_EFUSE_2BYTE(rtwdev, map, e.sub_device_id);
+}
+
+static void rtw8723xu_efuse_debug(struct rtw_dev *rtwdev,
+ struct rtw8723x_efuse *map)
+{
+ DBG_EFUSE_2BYTE(rtwdev, map, u.vendor_id);
+ DBG_EFUSE_2BYTE(rtwdev, map, u.product_id);
+ DBG_EFUSE_VAL(rtwdev, map, u.usb_option);
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "mac_addr=%pM\n", map->u.mac_addr);
+}
+
+static void rtw8723xs_efuse_debug(struct rtw_dev *rtwdev,
+ struct rtw8723x_efuse *map)
+{
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "mac_addr=%pM\n", map->s.mac_addr);
+}
+
+static void __rtw8723x_debug_txpwr_limit(struct rtw_dev *rtwdev,
+ struct rtw_txpwr_idx *table,
+ int tx_path_count)
+{
+ if (!rtw_dbg_is_enabled(rtwdev, RTW_DBG_EFUSE))
+ return;
+
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE,
+ "Power index table (2.4G):\n");
+ /* CCK base */
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "CCK base\n");
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "RF G0 G1 G2 G3 G4 G5\n");
+ for (int i = 0; i < tx_path_count; i++)
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE,
+ "[%c]: %3u %3u %3u %3u %3u %3u\n",
+ 'A' + i,
+ table[i].pwr_idx_2g.cck_base[0],
+ table[i].pwr_idx_2g.cck_base[1],
+ table[i].pwr_idx_2g.cck_base[2],
+ table[i].pwr_idx_2g.cck_base[3],
+ table[i].pwr_idx_2g.cck_base[4],
+ table[i].pwr_idx_2g.cck_base[5]);
+ /* CCK diff */
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "CCK diff\n");
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "RF 1S 2S 3S 4S\n");
+ for (int i = 0; i < tx_path_count; i++)
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE,
+ "[%c]: %2d %2d %2d %2d\n",
+ 'A' + i, 0 /* no diff for 1S */,
+ table[i].pwr_idx_2g.ht_2s_diff.cck,
+ table[i].pwr_idx_2g.ht_3s_diff.cck,
+ table[i].pwr_idx_2g.ht_4s_diff.cck);
+ /* BW40-1S base */
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "BW40-1S base\n");
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "RF G0 G1 G2 G3 G4\n");
+ for (int i = 0; i < tx_path_count; i++)
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE,
+ "[%c]: %3u %3u %3u %3u %3u\n",
+ 'A' + i,
+ table[i].pwr_idx_2g.bw40_base[0],
+ table[i].pwr_idx_2g.bw40_base[1],
+ table[i].pwr_idx_2g.bw40_base[2],
+ table[i].pwr_idx_2g.bw40_base[3],
+ table[i].pwr_idx_2g.bw40_base[4]);
+ /* OFDM diff */
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "OFDM diff\n");
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "RF 1S 2S 3S 4S\n");
+ for (int i = 0; i < tx_path_count; i++)
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE,
+ "[%c]: %2d %2d %2d %2d\n",
+ 'A' + i,
+ table[i].pwr_idx_2g.ht_1s_diff.ofdm,
+ table[i].pwr_idx_2g.ht_2s_diff.ofdm,
+ table[i].pwr_idx_2g.ht_3s_diff.ofdm,
+ table[i].pwr_idx_2g.ht_4s_diff.ofdm);
+ /* BW20 diff */
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "BW20 diff\n");
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "RF 1S 2S 3S 4S\n");
+ for (int i = 0; i < tx_path_count; i++)
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE,
+ "[%c]: %2d %2d %2d %2d\n",
+ 'A' + i,
+ table[i].pwr_idx_2g.ht_1s_diff.bw20,
+ table[i].pwr_idx_2g.ht_2s_diff.bw20,
+ table[i].pwr_idx_2g.ht_3s_diff.bw20,
+ table[i].pwr_idx_2g.ht_4s_diff.bw20);
+ /* BW40 diff */
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "BW40 diff\n");
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "RF 1S 2S 3S 4S\n");
+ for (int i = 0; i < tx_path_count; i++)
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE,
+ "[%c]: %2d %2d %2d %2d\n",
+ 'A' + i, 0 /* no diff for 1S */,
+ table[i].pwr_idx_2g.ht_2s_diff.bw40,
+ table[i].pwr_idx_2g.ht_3s_diff.bw40,
+ table[i].pwr_idx_2g.ht_4s_diff.bw40);
+}
+
+static void efuse_debug_dump(struct rtw_dev *rtwdev,
+ struct rtw8723x_efuse *map)
+{
+ if (!rtw_dbg_is_enabled(rtwdev, RTW_DBG_EFUSE))
+ return;
+
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "EFUSE raw logical map:\n");
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1,
+ (u8 *)map, sizeof(struct rtw8723x_efuse), false);
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "Parsed rtw8723x EFUSE data:\n");
+ DBG_EFUSE_VAL(rtwdev, map, rtl_id);
+ DBG_EFUSE_VAL(rtwdev, map, afe);
+ rtw8723x_debug_txpwr_limit(rtwdev, map->txpwr_idx_table, 4);
+ DBG_EFUSE_VAL(rtwdev, map, channel_plan);
+ DBG_EFUSE_VAL(rtwdev, map, xtal_k);
+ DBG_EFUSE_VAL(rtwdev, map, thermal_meter);
+ DBG_EFUSE_VAL(rtwdev, map, iqk_lck);
+ DBG_EFUSE_VAL(rtwdev, map, pa_type);
+ DBG_EFUSE_2BYTE(rtwdev, map, lna_type_2g);
+ DBG_EFUSE_2BYTE(rtwdev, map, lna_type_5g);
+ DBG_EFUSE_VAL(rtwdev, map, rf_board_option);
+ DBG_EFUSE_VAL(rtwdev, map, rf_feature_option);
+ DBG_EFUSE_VAL(rtwdev, map, rf_bt_setting);
+ DBG_EFUSE_VAL(rtwdev, map, eeprom_version);
+ DBG_EFUSE_VAL(rtwdev, map, eeprom_customer_id);
+ DBG_EFUSE_VAL(rtwdev, map, tx_bb_swing_setting_2g);
+ DBG_EFUSE_VAL(rtwdev, map, tx_pwr_calibrate_rate);
+ DBG_EFUSE_VAL(rtwdev, map, rf_antenna_option);
+ DBG_EFUSE_VAL(rtwdev, map, rfe_option);
+ DBG_EFUSE_2BYTE(rtwdev, map, country_code);
+
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_PCIE:
+ rtw8723xe_efuse_debug(rtwdev, map);
+ break;
+ case RTW_HCI_TYPE_USB:
+ rtw8723xu_efuse_debug(rtwdev, map);
+ break;
+ case RTW_HCI_TYPE_SDIO:
+ rtw8723xs_efuse_debug(rtwdev, map);
+ break;
+ default:
+ /* unsupported now */
+ break;
+ }
+}
+
+static void rtw8723xe_efuse_parsing(struct rtw_efuse *efuse,
+ struct rtw8723x_efuse *map)
+{
+ ether_addr_copy(efuse->addr, map->e.mac_addr);
+}
+
+static void rtw8723xu_efuse_parsing(struct rtw_efuse *efuse,
+ struct rtw8723x_efuse *map)
+{
+ ether_addr_copy(efuse->addr, map->u.mac_addr);
+}
+
+static void rtw8723xs_efuse_parsing(struct rtw_efuse *efuse,
+ struct rtw8723x_efuse *map)
+{
+ ether_addr_copy(efuse->addr, map->s.mac_addr);
+}
+
+static int __rtw8723x_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw8723x_efuse *map;
+ int i;
+
+ map = (struct rtw8723x_efuse *)log_map;
+ efuse_debug_dump(rtwdev, map);
+
+ efuse->rfe_option = 0;
+ efuse->rf_board_option = map->rf_board_option;
+ efuse->crystal_cap = map->xtal_k;
+ efuse->pa_type_2g = map->pa_type;
+ efuse->lna_type_2g = map->lna_type_2g[0];
+ efuse->channel_plan = map->channel_plan;
+ efuse->country_code[0] = map->country_code[0];
+ efuse->country_code[1] = map->country_code[1];
+ efuse->bt_setting = map->rf_bt_setting;
+ efuse->regd = map->rf_board_option & 0x7;
+ efuse->thermal_meter[0] = map->thermal_meter;
+ efuse->thermal_meter_k = map->thermal_meter;
+ efuse->afe = map->afe;
+
+ for (i = 0; i < 4; i++)
+ efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
+
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_PCIE:
+ rtw8723xe_efuse_parsing(efuse, map);
+ break;
+ case RTW_HCI_TYPE_USB:
+ rtw8723xu_efuse_parsing(efuse, map);
+ break;
+ case RTW_HCI_TYPE_SDIO:
+ rtw8723xs_efuse_parsing(efuse, map);
+ break;
+ default:
+ /* unsupported now */
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+#define BIT_CFENDFORM BIT(9)
+#define BIT_WMAC_TCR_ERR0 BIT(12)
+#define BIT_WMAC_TCR_ERR1 BIT(13)
+#define BIT_TCR_CFG (BIT_CFENDFORM | BIT_WMAC_TCR_ERR0 | \
+ BIT_WMAC_TCR_ERR1)
+#define WLAN_RX_FILTER0 0xFFFF
+#define WLAN_RX_FILTER1 0x400
+#define WLAN_RX_FILTER2 0xFFFF
+#define WLAN_RCR_CFG 0x700060CE
+
+static int __rtw8723x_mac_init(struct rtw_dev *rtwdev)
+{
+ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 1, WLAN_TXQ_RPT_EN);
+ rtw_write32(rtwdev, REG_TCR, BIT_TCR_CFG);
+
+ rtw_write16(rtwdev, REG_RXFLTMAP0, WLAN_RX_FILTER0);
+ rtw_write16(rtwdev, REG_RXFLTMAP1, WLAN_RX_FILTER1);
+ rtw_write16(rtwdev, REG_RXFLTMAP2, WLAN_RX_FILTER2);
+ rtw_write32(rtwdev, REG_RCR, WLAN_RCR_CFG);
+
+ rtw_write32(rtwdev, REG_INT_MIG, 0);
+ rtw_write32(rtwdev, REG_MCUTST_1, 0x0);
+
+ rtw_write8(rtwdev, REG_MISC_CTRL, BIT_DIS_SECOND_CCA);
+ rtw_write8(rtwdev, REG_2ND_CCA_CTRL, 0);
+
+ return 0;
+}
+
+static void __rtw8723x_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 ldo_pwr;
+
+ ldo_pwr = rtw_read8(rtwdev, REG_LDO_EFUSE_CTRL + 3);
+ if (enable) {
+ ldo_pwr &= ~BIT_MASK_LDO25_VOLTAGE;
+ ldo_pwr |= (BIT_LDO25_VOLTAGE_V25 << 4) | BIT_LDO25_EN;
+ } else {
+ ldo_pwr &= ~BIT_LDO25_EN;
+ }
+ rtw_write8(rtwdev, REG_LDO_EFUSE_CTRL + 3, ldo_pwr);
+}
+
+static void
+rtw8723x_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ const struct rtw_hw_reg *txagc;
+ u8 rate, pwr_index;
+ int j;
+
+ for (j = 0; j < rtw_rate_size[rs]; j++) {
+ rate = rtw_rate_section[rs][j];
+ pwr_index = hal->tx_pwr_tbl[path][rate];
+
+ if (rate >= ARRAY_SIZE(rtw8723x_txagc)) {
+ rtw_warn(rtwdev, "rate 0x%x isn't supported\n", rate);
+ continue;
+ }
+ txagc = &rtw8723x_txagc[rate];
+ if (!txagc->addr) {
+ rtw_warn(rtwdev, "rate 0x%x isn't defined\n", rate);
+ continue;
+ }
+
+ rtw_write32_mask(rtwdev, txagc->addr, txagc->mask, pwr_index);
+ }
+}
+
+static void __rtw8723x_set_tx_power_index(struct rtw_dev *rtwdev)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ int rs, path;
+
+ for (path = 0; path < hal->rf_path_num; path++) {
+ for (rs = 0; rs <= RTW_RATE_SECTION_HT_1S; rs++)
+ rtw8723x_set_tx_power_index_by_rate(rtwdev, path, rs);
+ }
+}
+
+static void __rtw8723x_efuse_grant(struct rtw_dev *rtwdev, bool on)
+{
+ if (on) {
+ rtw_write8(rtwdev, REG_EFUSE_ACCESS, EFUSE_ACCESS_ON);
+
+ rtw_write16_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_ELDR);
+ rtw_write16_set(rtwdev, REG_SYS_CLKR, BIT_LOADER_CLK_EN | BIT_ANA8M);
+ } else {
+ rtw_write8(rtwdev, REG_EFUSE_ACCESS, EFUSE_ACCESS_OFF);
+ }
+}
+
+static void __rtw8723x_false_alarm_statistics(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u32 cck_fa_cnt;
+ u32 ofdm_fa_cnt;
+ u32 crc32_cnt;
+ u32 val32;
+
+ /* hold counter */
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_HOLDC_11N, BIT_MASK_OFDM_FA_KEEP, 1);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_KEEP1, 1);
+ rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_CNT_KEEP, 1);
+ rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_FA_KEEP, 1);
+
+ cck_fa_cnt = rtw_read32_mask(rtwdev, REG_CCK_FA_LSB_11N, MASKBYTE0);
+ cck_fa_cnt += rtw_read32_mask(rtwdev, REG_CCK_FA_MSB_11N, MASKBYTE3) << 8;
+
+ val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE1_11N);
+ ofdm_fa_cnt = u32_get_bits(val32, BIT_MASK_OFDM_FF_CNT);
+ ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_SF_CNT);
+ val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE2_11N);
+ dm_info->ofdm_cca_cnt = u32_get_bits(val32, BIT_MASK_OFDM_CCA_CNT);
+ ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_PF_CNT);
+ val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE3_11N);
+ ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_RI_CNT);
+ ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_CRC_CNT);
+ val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE4_11N);
+ ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_MNS_CNT);
+
+ dm_info->cck_fa_cnt = cck_fa_cnt;
+ dm_info->ofdm_fa_cnt = ofdm_fa_cnt;
+ dm_info->total_fa_cnt = cck_fa_cnt + ofdm_fa_cnt;
+
+ dm_info->cck_err_cnt = rtw_read32(rtwdev, REG_IGI_C_11N);
+ dm_info->cck_ok_cnt = rtw_read32(rtwdev, REG_IGI_D_11N);
+ crc32_cnt = rtw_read32(rtwdev, REG_OFDM_CRC32_CNT_11N);
+ dm_info->ofdm_err_cnt = u32_get_bits(crc32_cnt, BIT_MASK_OFDM_LCRC_ERR);
+ dm_info->ofdm_ok_cnt = u32_get_bits(crc32_cnt, BIT_MASK_OFDM_LCRC_OK);
+ crc32_cnt = rtw_read32(rtwdev, REG_HT_CRC32_CNT_11N);
+ dm_info->ht_err_cnt = u32_get_bits(crc32_cnt, BIT_MASK_HT_CRC_ERR);
+ dm_info->ht_ok_cnt = u32_get_bits(crc32_cnt, BIT_MASK_HT_CRC_OK);
+ dm_info->vht_err_cnt = 0;
+ dm_info->vht_ok_cnt = 0;
+
+ val32 = rtw_read32(rtwdev, REG_CCK_CCA_CNT_11N);
+ dm_info->cck_cca_cnt = (u32_get_bits(val32, BIT_MASK_CCK_FA_MSB) << 8) |
+ u32_get_bits(val32, BIT_MASK_CCK_FA_LSB);
+ dm_info->total_cca_cnt = dm_info->cck_cca_cnt + dm_info->ofdm_cca_cnt;
+
+ /* reset counter */
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTC_11N, BIT_MASK_OFDM_FA_RST, 1);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTC_11N, BIT_MASK_OFDM_FA_RST, 0);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_RST1, 1);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_RST1, 0);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_HOLDC_11N, BIT_MASK_OFDM_FA_KEEP, 0);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_KEEP1, 0);
+ rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_CNT_KPEN, 0);
+ rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_CNT_KPEN, 2);
+ rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_FA_KPEN, 0);
+ rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_FA_KPEN, 2);
+ rtw_write32_mask(rtwdev, REG_PAGE_F_RST_11N, BIT_MASK_F_RST_ALL, 1);
+ rtw_write32_mask(rtwdev, REG_PAGE_F_RST_11N, BIT_MASK_F_RST_ALL, 0);
+}
+
+/* IQK (IQ calibration) */
+
+static
+void __rtw8723x_iqk_backup_regs(struct rtw_dev *rtwdev,
+ struct rtw8723x_iqk_backup_regs *backup)
+{
+ int i;
+
+ for (i = 0; i < RTW8723X_IQK_ADDA_REG_NUM; i++)
+ backup->adda[i] = rtw_read32(rtwdev,
+ rtw8723x_common.iqk_adda_regs[i]);
+
+ for (i = 0; i < RTW8723X_IQK_MAC8_REG_NUM; i++)
+ backup->mac8[i] = rtw_read8(rtwdev,
+ rtw8723x_common.iqk_mac8_regs[i]);
+ for (i = 0; i < RTW8723X_IQK_MAC32_REG_NUM; i++)
+ backup->mac32[i] = rtw_read32(rtwdev,
+ rtw8723x_common.iqk_mac32_regs[i]);
+
+ for (i = 0; i < RTW8723X_IQK_BB_REG_NUM; i++)
+ backup->bb[i] = rtw_read32(rtwdev,
+ rtw8723x_common.iqk_bb_regs[i]);
+
+ backup->igia = rtw_read32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0);
+ backup->igib = rtw_read32_mask(rtwdev, REG_OFDM0_XBAGC1, MASKBYTE0);
+
+ backup->bb_sel_btg = rtw_read32(rtwdev, REG_BB_SEL_BTG);
+}
+
+static
+void __rtw8723x_iqk_restore_regs(struct rtw_dev *rtwdev,
+ const struct rtw8723x_iqk_backup_regs *backup)
+{
+ int i;
+
+ for (i = 0; i < RTW8723X_IQK_ADDA_REG_NUM; i++)
+ rtw_write32(rtwdev, rtw8723x_common.iqk_adda_regs[i],
+ backup->adda[i]);
+
+ for (i = 0; i < RTW8723X_IQK_MAC8_REG_NUM; i++)
+ rtw_write8(rtwdev, rtw8723x_common.iqk_mac8_regs[i],
+ backup->mac8[i]);
+ for (i = 0; i < RTW8723X_IQK_MAC32_REG_NUM; i++)
+ rtw_write32(rtwdev, rtw8723x_common.iqk_mac32_regs[i],
+ backup->mac32[i]);
+
+ for (i = 0; i < RTW8723X_IQK_BB_REG_NUM; i++)
+ rtw_write32(rtwdev, rtw8723x_common.iqk_bb_regs[i],
+ backup->bb[i]);
+
+ rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x50);
+ rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, backup->igia);
+
+ rtw_write32_mask(rtwdev, REG_OFDM0_XBAGC1, MASKBYTE0, 0x50);
+ rtw_write32_mask(rtwdev, REG_OFDM0_XBAGC1, MASKBYTE0, backup->igib);
+
+ rtw_write32(rtwdev, REG_TXIQK_TONE_A_11N, 0x01008c00);
+ rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x01008c00);
+}
+
+static
+bool __rtw8723x_iqk_similarity_cmp(struct rtw_dev *rtwdev,
+ s32 result[][IQK_NR],
+ u8 c1, u8 c2)
+{
+ u32 i, j, diff;
+ u32 bitmap = 0;
+ u8 candidate[PATH_NR] = {IQK_ROUND_INVALID, IQK_ROUND_INVALID};
+ bool ret = true;
+
+ s32 tmp1, tmp2;
+
+ for (i = 0; i < IQK_NR; i++) {
+ tmp1 = iqkxy_to_s32(result[c1][i]);
+ tmp2 = iqkxy_to_s32(result[c2][i]);
+
+ diff = abs(tmp1 - tmp2);
+
+ if (diff <= MAX_TOLERANCE)
+ continue;
+
+ if ((i == IQK_S1_RX_X || i == IQK_S0_RX_X) && !bitmap) {
+ if (result[c1][i] + result[c1][i + 1] == 0)
+ candidate[i / IQK_SX_NR] = c2;
+ else if (result[c2][i] + result[c2][i + 1] == 0)
+ candidate[i / IQK_SX_NR] = c1;
+ else
+ bitmap |= BIT(i);
+ } else {
+ bitmap |= BIT(i);
+ }
+ }
+
+ if (bitmap != 0)
+ goto check_sim;
+
+ for (i = 0; i < PATH_NR; i++) {
+ if (candidate[i] == IQK_ROUND_INVALID)
+ continue;
+
+ for (j = i * IQK_SX_NR; j < i * IQK_SX_NR + 2; j++)
+ result[IQK_ROUND_HYBRID][j] = result[candidate[i]][j];
+ ret = false;
+ }
+
+ return ret;
+
+check_sim:
+ for (i = 0; i < IQK_NR; i++) {
+ j = i & ~1; /* 2 bits are a pair for IQ[X, Y] */
+ if (bitmap & GENMASK(j + 1, j))
+ continue;
+
+ result[IQK_ROUND_HYBRID][i] = result[c1][i];
+ }
+
+ return false;
+}
+
+static u8 __rtw8723x_pwrtrack_get_limit_ofdm(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 tx_rate = dm_info->tx_rate;
+ u8 limit_ofdm = 30;
+
+ switch (tx_rate) {
+ case DESC_RATE1M...DESC_RATE5_5M:
+ case DESC_RATE11M:
+ break;
+ case DESC_RATE6M...DESC_RATE48M:
+ limit_ofdm = 36;
+ break;
+ case DESC_RATE54M:
+ limit_ofdm = 34;
+ break;
+ case DESC_RATEMCS0...DESC_RATEMCS2:
+ limit_ofdm = 38;
+ break;
+ case DESC_RATEMCS3...DESC_RATEMCS4:
+ limit_ofdm = 36;
+ break;
+ case DESC_RATEMCS5...DESC_RATEMCS7:
+ limit_ofdm = 34;
+ break;
+ default:
+ rtw_warn(rtwdev, "pwrtrack unhandled tx_rate 0x%x\n", tx_rate);
+ break;
+ }
+
+ return limit_ofdm;
+}
+
+static
+void __rtw8723x_pwrtrack_set_xtal(struct rtw_dev *rtwdev, u8 therm_path,
+ u8 delta)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ const struct rtw_pwr_track_tbl *tbl = rtwdev->chip->pwr_track_tbl;
+ const s8 *pwrtrk_xtal;
+ s8 xtal_cap;
+
+ if (dm_info->thermal_avg[therm_path] >
+ rtwdev->efuse.thermal_meter[therm_path])
+ pwrtrk_xtal = tbl->pwrtrk_xtal_p;
+ else
+ pwrtrk_xtal = tbl->pwrtrk_xtal_n;
+
+ xtal_cap = rtwdev->efuse.crystal_cap & 0x3F;
+ xtal_cap = clamp_t(s8, xtal_cap + pwrtrk_xtal[delta], 0, 0x3F);
+ rtw_write32_mask(rtwdev, REG_AFE_CTRL3, BIT_MASK_XTAL,
+ xtal_cap | (xtal_cap << 6));
+}
+
+static
+void __rtw8723x_fill_txdesc_checksum(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ u8 *txdesc)
+{
+ size_t words = 32 / 2; /* calculate the first 32 bytes (16 words) */
+ __le16 chksum = 0;
+ __le16 *data = (__le16 *)(txdesc);
+ struct rtw_tx_desc *tx_desc = (struct rtw_tx_desc *)txdesc;
+
+ le32p_replace_bits(&tx_desc->w7, 0, RTW_TX_DESC_W7_TXDESC_CHECKSUM);
+
+ while (words--)
+ chksum ^= *data++;
+
+ chksum = ~chksum;
+
+ le32p_replace_bits(&tx_desc->w7, __le16_to_cpu(chksum),
+ RTW_TX_DESC_W7_TXDESC_CHECKSUM);
+}
+
+static void __rtw8723x_coex_cfg_init(struct rtw_dev *rtwdev)
+{
+ /* enable TBTT nterrupt */
+ rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION);
+
+ /* BT report packet sample rate */
+ /* 0x790[5:0]=0x5 */
+ rtw_write8_mask(rtwdev, REG_BT_TDMA_TIME, BIT_MASK_SAMPLE_RATE, 0x5);
+
+ /* enable BT counter statistics */
+ rtw_write8(rtwdev, REG_BT_STAT_CTRL, 0x1);
+
+ /* enable PTA (3-wire function form BT side) */
+ rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_PTA_EN);
+ rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_PO_BT_PTA_PINS);
+
+ /* enable PTA (tx/rx signal form WiFi side) */
+ rtw_write8_set(rtwdev, REG_QUEUE_CTRL, BIT_PTA_WL_TX_EN);
+}
+
+const struct rtw8723x_common rtw8723x_common = {
+ .iqk_adda_regs = {
+ 0x85c, 0xe6c, 0xe70, 0xe74, 0xe78, 0xe7c, 0xe80, 0xe84,
+ 0xe88, 0xe8c, 0xed0, 0xed4, 0xed8, 0xedc, 0xee0, 0xeec
+ },
+ .iqk_mac8_regs = {0x522, 0x550, 0x551},
+ .iqk_mac32_regs = {0x40},
+ .iqk_bb_regs = {
+ 0xc04, 0xc08, 0x874, 0xb68, 0xb6c, 0x870, 0x860, 0x864, 0xa04
+ },
+
+ .ltecoex_addr = {
+ .ctrl = REG_LTECOEX_CTRL,
+ .wdata = REG_LTECOEX_WRITE_DATA,
+ .rdata = REG_LTECOEX_READ_DATA,
+ },
+ .rf_sipi_addr = {
+ [RF_PATH_A] = { .hssi_1 = 0x820, .lssi_read = 0x8a0,
+ .hssi_2 = 0x824, .lssi_read_pi = 0x8b8},
+ [RF_PATH_B] = { .hssi_1 = 0x828, .lssi_read = 0x8a4,
+ .hssi_2 = 0x82c, .lssi_read_pi = 0x8bc},
+ },
+ .dig = {
+ [0] = { .addr = 0xc50, .mask = 0x7f },
+ [1] = { .addr = 0xc50, .mask = 0x7f },
+ },
+ .dig_cck = {
+ [0] = { .addr = 0xa0c, .mask = 0x3f00 },
+ },
+ .prioq_addrs = {
+ .prio[RTW_DMA_MAPPING_EXTRA] = {
+ .rsvd = REG_RQPN_NPQ + 2, .avail = REG_RQPN_NPQ + 3,
+ },
+ .prio[RTW_DMA_MAPPING_LOW] = {
+ .rsvd = REG_RQPN + 1, .avail = REG_FIFOPAGE_CTRL_2 + 1,
+ },
+ .prio[RTW_DMA_MAPPING_NORMAL] = {
+ .rsvd = REG_RQPN_NPQ, .avail = REG_RQPN_NPQ + 1,
+ },
+ .prio[RTW_DMA_MAPPING_HIGH] = {
+ .rsvd = REG_RQPN, .avail = REG_FIFOPAGE_CTRL_2,
+ },
+ .wsize = false,
+ },
+
+ .lck = __rtw8723x_lck,
+ .read_efuse = __rtw8723x_read_efuse,
+ .mac_init = __rtw8723x_mac_init,
+ .cfg_ldo25 = __rtw8723x_cfg_ldo25,
+ .set_tx_power_index = __rtw8723x_set_tx_power_index,
+ .efuse_grant = __rtw8723x_efuse_grant,
+ .false_alarm_statistics = __rtw8723x_false_alarm_statistics,
+ .iqk_backup_regs = __rtw8723x_iqk_backup_regs,
+ .iqk_restore_regs = __rtw8723x_iqk_restore_regs,
+ .iqk_similarity_cmp = __rtw8723x_iqk_similarity_cmp,
+ .pwrtrack_get_limit_ofdm = __rtw8723x_pwrtrack_get_limit_ofdm,
+ .pwrtrack_set_xtal = __rtw8723x_pwrtrack_set_xtal,
+ .coex_cfg_init = __rtw8723x_coex_cfg_init,
+ .fill_txdesc_checksum = __rtw8723x_fill_txdesc_checksum,
+ .debug_txpwr_limit = __rtw8723x_debug_txpwr_limit,
+};
+EXPORT_SYMBOL(rtw8723x_common);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_AUTHOR("Fiona Klute <fiona.klute@gmx.de>");
+MODULE_DESCRIPTION("Common functions for Realtek 802.11n wireless 8723x drivers");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723x.h b/drivers/net/wireless/realtek/rtw88/rtw8723x.h
new file mode 100644
index 000000000000..e93bfce994bf
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723x.h
@@ -0,0 +1,518 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright 2024 Fiona Klute
+ *
+ * Based on code originally in rtw8723d.[ch],
+ * Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW8723X_H__
+#define __RTW8723X_H__
+
+#include "main.h"
+#include "debug.h"
+#include "phy.h"
+#include "reg.h"
+
+enum rtw8723x_path {
+ PATH_S1,
+ PATH_S0,
+ PATH_NR,
+};
+
+enum rtw8723x_iqk_round {
+ IQK_ROUND_0,
+ IQK_ROUND_1,
+ IQK_ROUND_2,
+ IQK_ROUND_HYBRID,
+ IQK_ROUND_SIZE,
+ IQK_ROUND_INVALID = 0xff,
+};
+
+enum rtw8723x_iqk_result {
+ IQK_S1_TX_X,
+ IQK_S1_TX_Y,
+ IQK_S1_RX_X,
+ IQK_S1_RX_Y,
+ IQK_S0_TX_X,
+ IQK_S0_TX_Y,
+ IQK_S0_RX_X,
+ IQK_S0_RX_Y,
+ IQK_NR,
+ IQK_SX_NR = IQK_NR / PATH_NR,
+};
+
+struct rtw8723xe_efuse {
+ u8 mac_addr[ETH_ALEN]; /* 0xd0 */
+ u8 vendor_id[2];
+ u8 device_id[2];
+ u8 sub_vendor_id[2];
+ u8 sub_device_id[2];
+};
+
+struct rtw8723xu_efuse {
+ u8 res4[48]; /* 0xd0 */
+ u8 vendor_id[2]; /* 0x100 */
+ u8 product_id[2]; /* 0x102 */
+ u8 usb_option; /* 0x104 */
+ u8 res5[2]; /* 0x105 */
+ u8 mac_addr[ETH_ALEN]; /* 0x107 */
+};
+
+struct rtw8723xs_efuse {
+ u8 res4[0x4a]; /* 0xd0 */
+ u8 mac_addr[ETH_ALEN]; /* 0x11a */
+};
+
+struct rtw8723x_efuse {
+ __le16 rtl_id;
+ u8 rsvd[2];
+ u8 afe;
+ u8 rsvd1[11];
+
+ /* power index for four RF paths */
+ struct rtw_txpwr_idx txpwr_idx_table[4];
+
+ u8 channel_plan; /* 0xb8 */
+ u8 xtal_k;
+ u8 thermal_meter;
+ u8 iqk_lck;
+ u8 pa_type; /* 0xbc */
+ u8 lna_type_2g[2]; /* 0xbd */
+ u8 lna_type_5g[2];
+ u8 rf_board_option;
+ u8 rf_feature_option;
+ u8 rf_bt_setting;
+ u8 eeprom_version;
+ u8 eeprom_customer_id;
+ u8 tx_bb_swing_setting_2g;
+ u8 res_c7;
+ u8 tx_pwr_calibrate_rate;
+ u8 rf_antenna_option; /* 0xc9 */
+ u8 rfe_option;
+ u8 country_code[2];
+ u8 res[3];
+ union {
+ struct rtw8723xe_efuse e;
+ struct rtw8723xu_efuse u;
+ struct rtw8723xs_efuse s;
+ };
+};
+
+#define RTW8723X_IQK_ADDA_REG_NUM 16
+#define RTW8723X_IQK_MAC8_REG_NUM 3
+#define RTW8723X_IQK_MAC32_REG_NUM 1
+#define RTW8723X_IQK_BB_REG_NUM 9
+
+struct rtw8723x_iqk_backup_regs {
+ u32 adda[RTW8723X_IQK_ADDA_REG_NUM];
+ u8 mac8[RTW8723X_IQK_MAC8_REG_NUM];
+ u32 mac32[RTW8723X_IQK_MAC32_REG_NUM];
+ u32 bb[RTW8723X_IQK_BB_REG_NUM];
+
+ u32 lte_path;
+ u32 lte_gnt;
+
+ u32 bb_sel_btg;
+ u8 btg_sel;
+
+ u8 igia;
+ u8 igib;
+};
+
+struct rtw8723x_common {
+ /* registers that must be backed up before IQK and restored after */
+ u32 iqk_adda_regs[RTW8723X_IQK_ADDA_REG_NUM];
+ u32 iqk_mac8_regs[RTW8723X_IQK_MAC8_REG_NUM];
+ u32 iqk_mac32_regs[RTW8723X_IQK_MAC32_REG_NUM];
+ u32 iqk_bb_regs[RTW8723X_IQK_BB_REG_NUM];
+
+ /* chip register definitions */
+ struct rtw_ltecoex_addr ltecoex_addr;
+ struct rtw_rf_sipi_addr rf_sipi_addr[2];
+ struct rtw_hw_reg dig[2];
+ struct rtw_hw_reg dig_cck[1];
+ struct rtw_prioq_addrs prioq_addrs;
+
+ /* common functions */
+ void (*lck)(struct rtw_dev *rtwdev);
+ int (*read_efuse)(struct rtw_dev *rtwdev, u8 *log_map);
+ int (*mac_init)(struct rtw_dev *rtwdev);
+ void (*cfg_ldo25)(struct rtw_dev *rtwdev, bool enable);
+ void (*set_tx_power_index)(struct rtw_dev *rtwdev);
+ void (*efuse_grant)(struct rtw_dev *rtwdev, bool on);
+ void (*false_alarm_statistics)(struct rtw_dev *rtwdev);
+ void (*iqk_backup_regs)(struct rtw_dev *rtwdev,
+ struct rtw8723x_iqk_backup_regs *backup);
+ void (*iqk_restore_regs)(struct rtw_dev *rtwdev,
+ const struct rtw8723x_iqk_backup_regs *backup);
+ bool (*iqk_similarity_cmp)(struct rtw_dev *rtwdev, s32 result[][IQK_NR],
+ u8 c1, u8 c2);
+ u8 (*pwrtrack_get_limit_ofdm)(struct rtw_dev *rtwdev);
+ void (*pwrtrack_set_xtal)(struct rtw_dev *rtwdev, u8 therm_path,
+ u8 delta);
+ void (*coex_cfg_init)(struct rtw_dev *rtwdev);
+ void (*fill_txdesc_checksum)(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ u8 *txdesc);
+ void (*debug_txpwr_limit)(struct rtw_dev *rtwdev,
+ struct rtw_txpwr_idx *table,
+ int tx_path_count);
+};
+
+extern const struct rtw8723x_common rtw8723x_common;
+
+#define PATH_IQK_RETRY 2
+#define MAX_TOLERANCE 5
+#define IQK_TX_X_ERR 0x142
+#define IQK_TX_Y_ERR 0x42
+#define IQK_RX_X_ERR 0x132
+#define IQK_RX_Y_ERR 0x36
+#define IQK_RX_X_UPPER 0x11a
+#define IQK_RX_X_LOWER 0xe6
+#define IQK_RX_Y_LMT 0x1a
+#define IQK_TX_OK BIT(0)
+#define IQK_RX_OK BIT(1)
+
+#define WLAN_TXQ_RPT_EN 0x1F
+
+#define SPUR_THRES 0x16
+#define DIS_3WIRE 0xccf000c0
+#define EN_3WIRE 0xccc000c0
+#define START_PSD 0x400000
+#define FREQ_CH5 0xfccd
+#define FREQ_CH6 0xfc4d
+#define FREQ_CH7 0xffcd
+#define FREQ_CH8 0xff4d
+#define FREQ_CH13 0xfccd
+#define FREQ_CH14 0xff9a
+#define RFCFGCH_CHANNEL_MASK GENMASK(7, 0)
+#define RFCFGCH_BW_MASK (BIT(11) | BIT(10))
+#define RFCFGCH_BW_20M (BIT(11) | BIT(10))
+#define RFCFGCH_BW_40M BIT(10)
+#define BIT_MASK_RFMOD BIT(0)
+#define BIT_LCK BIT(15)
+
+#define REG_GPIO_INTM 0x0048
+#define REG_BTG_SEL 0x0067
+#define BIT_MASK_BTG_WL BIT(7)
+#define REG_LTECOEX_PATH_CONTROL 0x0070
+#define REG_LTECOEX_CTRL 0x07c0
+#define REG_LTECOEX_WRITE_DATA 0x07c4
+#define REG_LTECOEX_READ_DATA 0x07c8
+#define REG_PSDFN 0x0808
+#define REG_BB_PWR_SAV1_11N 0x0874
+#define REG_ANA_PARAM1 0x0880
+#define REG_ANALOG_P4 0x088c
+#define REG_PSDRPT 0x08b4
+#define REG_FPGA1_RFMOD 0x0900
+#define REG_BB_SEL_BTG 0x0948
+#define REG_BBRX_DFIR 0x0954
+#define BIT_MASK_RXBB_DFIR GENMASK(27, 24)
+#define BIT_RXBB_DFIR_EN BIT(19)
+#define REG_CCK0_SYS 0x0a00
+#define BIT_CCK_SIDE_BAND BIT(4)
+#define REG_CCK_ANT_SEL_11N 0x0a04
+#define REG_PWRTH 0x0a08
+#define REG_CCK_FA_RST_11N 0x0a2c
+#define BIT_MASK_CCK_CNT_KEEP BIT(12)
+#define BIT_MASK_CCK_CNT_EN BIT(13)
+#define BIT_MASK_CCK_CNT_KPEN (BIT_MASK_CCK_CNT_KEEP | BIT_MASK_CCK_CNT_EN)
+#define BIT_MASK_CCK_FA_KEEP BIT(14)
+#define BIT_MASK_CCK_FA_EN BIT(15)
+#define BIT_MASK_CCK_FA_KPEN (BIT_MASK_CCK_FA_KEEP | BIT_MASK_CCK_FA_EN)
+#define REG_CCK_FA_LSB_11N 0x0a5c
+#define REG_CCK_FA_MSB_11N 0x0a58
+#define REG_CCK_CCA_CNT_11N 0x0a60
+#define BIT_MASK_CCK_FA_MSB GENMASK(7, 0)
+#define BIT_MASK_CCK_FA_LSB GENMASK(15, 8)
+#define REG_PWRTH2 0x0aa8
+#define REG_CSRATIO 0x0aaa
+#define REG_OFDM_FA_HOLDC_11N 0x0c00
+#define BIT_MASK_OFDM_FA_KEEP BIT(31)
+#define REG_BB_RX_PATH_11N 0x0c04
+#define REG_TRMUX_11N 0x0c08
+#define REG_OFDM_FA_RSTC_11N 0x0c0c
+#define BIT_MASK_OFDM_FA_RST BIT(31)
+#define REG_A_RXIQI 0x0c14
+#define BIT_MASK_RXIQ_S1_X 0x000003FF
+#define BIT_MASK_RXIQ_S1_Y1 0x0000FC00
+#define BIT_SET_RXIQ_S1_Y1(y) ((y) & 0x3F)
+#define REG_OFDM0_RXDSP 0x0c40
+#define BIT_MASK_RXDSP GENMASK(28, 24)
+#define BIT_EN_RXDSP BIT(9)
+#define REG_OFDM_0_ECCA_THRESHOLD 0x0c4c
+#define BIT_MASK_OFDM0_EXT_A BIT(31)
+#define BIT_MASK_OFDM0_EXT_C BIT(29)
+#define BIT_MASK_OFDM0_EXTS (BIT(31) | BIT(29) | BIT(28))
+#define BIT_SET_OFDM0_EXTS(a, c, d) (((a) << 31) | ((c) << 29) | ((d) << 28))
+#define BIT_MASK_OFDM0_EXTS_B (BIT(27) | BIT(25) | BIT(24))
+#define BIT_SET_OFDM0_EXTS_B(a, c, d) (((a) << 27) | ((c) << 25) | ((d) << 24))
+#define REG_OFDM0_XAAGC1 0x0c50
+#define REG_OFDM0_XBAGC1 0x0c58
+#define REG_AGCRSSI 0x0c78
+#define REG_OFDM_0_XA_TX_IQ_IMBALANCE 0x0c80
+#define REG_OFDM_0_XB_TX_IQ_IMBALANCE 0x0c88
+#define BIT_MASK_TXIQ_ELM_A 0x03ff
+#define BIT_SET_TXIQ_ELM_ACD(a, c, d) (((d) << 22) | (((c) & 0x3F) << 16) | \
+ ((a) & 0x03ff))
+#define BIT_MASK_TXIQ_ELM_C GENMASK(21, 16)
+#define BIT_SET_TXIQ_ELM_C2(c) ((c) & 0x3F)
+#define BIT_MASK_TXIQ_ELM_D GENMASK(31, 22)
+#define REG_TXIQK_MATRIXA_LSB2_11N 0x0c94
+#define BIT_SET_TXIQ_ELM_C1(c) (((c) & 0x000003C0) >> 6)
+#define REG_RXIQK_MATRIX_LSB_11N 0x0ca0
+#define BIT_MASK_RXIQ_S1_Y2 0xF0000000
+#define BIT_SET_RXIQ_S1_Y2(y) (((y) >> 6) & 0xF)
+#define REG_TXIQ_AB_S0 0x0cd0
+#define BIT_MASK_TXIQ_A_S0 0x000007FE
+#define BIT_MASK_TXIQ_A_EXT_S0 BIT(0)
+#define BIT_MASK_TXIQ_B_S0 0x0007E000
+#define REG_TXIQ_CD_S0 0x0cd4
+#define BIT_MASK_TXIQ_C_S0 0x000007FE
+#define BIT_MASK_TXIQ_C_EXT_S0 BIT(0)
+#define BIT_MASK_TXIQ_D_S0 GENMASK(22, 13)
+#define BIT_MASK_TXIQ_D_EXT_S0 BIT(12)
+#define REG_RXIQ_AB_S0 0x0cd8
+#define BIT_MASK_RXIQ_X_S0 0x000003FF
+#define BIT_MASK_RXIQ_Y_S0 0x003FF000
+#define REG_OFDM_FA_TYPE1_11N 0x0cf0
+#define BIT_MASK_OFDM_FF_CNT GENMASK(15, 0)
+#define BIT_MASK_OFDM_SF_CNT GENMASK(31, 16)
+#define REG_OFDM_FA_RSTD_11N 0x0d00
+#define BIT_MASK_OFDM_FA_RST1 BIT(27)
+#define BIT_MASK_OFDM_FA_KEEP1 BIT(31)
+#define REG_CTX 0x0d03
+#define BIT_MASK_CTX_TYPE GENMASK(6, 4)
+#define REG_OFDM1_CFOTRK 0x0d2c
+#define BIT_EN_CFOTRK BIT(28)
+#define REG_OFDM1_CSI1 0x0d40
+#define REG_OFDM1_CSI2 0x0d44
+#define REG_OFDM1_CSI3 0x0d48
+#define REG_OFDM1_CSI4 0x0d4c
+#define REG_OFDM_FA_TYPE2_11N 0x0da0
+#define BIT_MASK_OFDM_CCA_CNT GENMASK(15, 0)
+#define BIT_MASK_OFDM_PF_CNT GENMASK(31, 16)
+#define REG_OFDM_FA_TYPE3_11N 0x0da4
+#define BIT_MASK_OFDM_RI_CNT GENMASK(15, 0)
+#define BIT_MASK_OFDM_CRC_CNT GENMASK(31, 16)
+#define REG_OFDM_FA_TYPE4_11N 0x0da8
+#define BIT_MASK_OFDM_MNS_CNT GENMASK(15, 0)
+#define REG_FPGA0_IQK_11N 0x0e28
+#define BIT_MASK_IQK_MOD 0xffffff00
+#define EN_IQK 0x808000
+#define RST_IQK 0x000000
+#define REG_TXIQK_TONE_A_11N 0x0e30
+#define REG_RXIQK_TONE_A_11N 0x0e34
+#define REG_TXIQK_PI_A_11N 0x0e38
+#define REG_RXIQK_PI_A_11N 0x0e3c
+#define REG_TXIQK_11N 0x0e40
+#define BIT_SET_TXIQK_11N(x, y) (0x80007C00 | ((x) << 16) | (y))
+#define REG_RXIQK_11N 0x0e44
+#define REG_IQK_AGC_PTS_11N 0x0e48
+#define REG_IQK_AGC_RSP_11N 0x0e4c
+#define REG_TX_IQK_TONE_B 0x0e50
+#define REG_RX_IQK_TONE_B 0x0e54
+#define REG_TXIQK_PI_B 0x0e58
+#define REG_RXIQK_PI_B 0x0e5c
+#define REG_IQK_RES_TX 0x0e94
+#define BIT_MASK_RES_TX GENMASK(25, 16)
+#define REG_IQK_RES_TY 0x0e9c
+#define BIT_MASK_RES_TY GENMASK(25, 16)
+#define REG_IQK_RES_RX 0x0ea4
+#define BIT_MASK_RES_RX GENMASK(25, 16)
+#define REG_IQK_RES_RY 0x0eac
+#define BIT_IQK_TX_FAIL BIT(28)
+#define BIT_IQK_RX_FAIL BIT(27)
+#define BIT_IQK_DONE BIT(26)
+#define BIT_MASK_RES_RY GENMASK(25, 16)
+#define REG_PAGE_F_RST_11N 0x0f14
+#define BIT_MASK_F_RST_ALL BIT(16)
+#define REG_IGI_C_11N 0x0f84
+#define REG_IGI_D_11N 0x0f88
+#define REG_HT_CRC32_CNT_11N 0x0f90
+#define BIT_MASK_HT_CRC_OK GENMASK(15, 0)
+#define BIT_MASK_HT_CRC_ERR GENMASK(31, 16)
+#define REG_OFDM_CRC32_CNT_11N 0x0f94
+#define BIT_MASK_OFDM_LCRC_OK GENMASK(15, 0)
+#define BIT_MASK_OFDM_LCRC_ERR GENMASK(31, 16)
+#define REG_HT_CRC32_CNT_11N_AGG 0x0fb8
+
+#define OFDM_SWING_A(swing) FIELD_GET(GENMASK(9, 0), swing)
+#define OFDM_SWING_B(swing) FIELD_GET(GENMASK(15, 10), swing)
+#define OFDM_SWING_C(swing) FIELD_GET(GENMASK(21, 16), swing)
+#define OFDM_SWING_D(swing) FIELD_GET(GENMASK(31, 22), swing)
+
+static inline s32 iqkxy_to_s32(s32 val)
+{
+ /* val is Q10.8 */
+ return sign_extend32(val, 9);
+}
+
+static inline s32 iqk_mult(s32 x, s32 y, s32 *ext)
+{
+ /* x, y and return value are Q10.8 */
+ s32 t;
+
+ t = x * y;
+ if (ext)
+ *ext = (t >> 7) & 0x1; /* Q.16 --> Q.9; get LSB of Q.9 */
+
+ return (t >> 8); /* Q.16 --> Q.8 */
+}
+
+static inline
+void rtw8723x_debug_txpwr_limit(struct rtw_dev *rtwdev,
+ struct rtw_txpwr_idx *table,
+ int tx_path_count)
+{
+ rtw8723x_common.debug_txpwr_limit(rtwdev, table, tx_path_count);
+}
+
+static inline void rtw8723x_lck(struct rtw_dev *rtwdev)
+{
+ rtw8723x_common.lck(rtwdev);
+}
+
+static inline int rtw8723x_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
+{
+ return rtw8723x_common.read_efuse(rtwdev, log_map);
+}
+
+static inline int rtw8723x_mac_init(struct rtw_dev *rtwdev)
+{
+ return rtw8723x_common.mac_init(rtwdev);
+}
+
+static inline void rtw8723x_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
+{
+ rtw8723x_common.cfg_ldo25(rtwdev, enable);
+}
+
+static inline void rtw8723x_set_tx_power_index(struct rtw_dev *rtwdev)
+{
+ rtw8723x_common.set_tx_power_index(rtwdev);
+}
+
+static inline void rtw8723x_efuse_grant(struct rtw_dev *rtwdev, bool on)
+{
+ rtw8723x_common.efuse_grant(rtwdev, on);
+}
+
+static inline void rtw8723x_false_alarm_statistics(struct rtw_dev *rtwdev)
+{
+ rtw8723x_common.false_alarm_statistics(rtwdev);
+}
+
+static inline
+void rtw8723x_iqk_backup_regs(struct rtw_dev *rtwdev,
+ struct rtw8723x_iqk_backup_regs *backup)
+{
+ rtw8723x_common.iqk_backup_regs(rtwdev, backup);
+}
+
+static inline
+void rtw8723x_iqk_restore_regs(struct rtw_dev *rtwdev,
+ const struct rtw8723x_iqk_backup_regs *backup)
+{
+ rtw8723x_common.iqk_restore_regs(rtwdev, backup);
+}
+
+static inline
+bool rtw8723x_iqk_similarity_cmp(struct rtw_dev *rtwdev, s32 result[][IQK_NR],
+ u8 c1, u8 c2)
+{
+ return rtw8723x_common.iqk_similarity_cmp(rtwdev, result, c1, c2);
+}
+
+static inline u8 rtw8723x_pwrtrack_get_limit_ofdm(struct rtw_dev *rtwdev)
+{
+ return rtw8723x_common.pwrtrack_get_limit_ofdm(rtwdev);
+}
+
+static inline
+void rtw8723x_pwrtrack_set_xtal(struct rtw_dev *rtwdev, u8 therm_path,
+ u8 delta)
+{
+ rtw8723x_common.pwrtrack_set_xtal(rtwdev, therm_path, delta);
+}
+
+static inline void rtw8723x_coex_cfg_init(struct rtw_dev *rtwdev)
+{
+ rtw8723x_common.coex_cfg_init(rtwdev);
+}
+
+static inline
+void rtw8723x_fill_txdesc_checksum(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ u8 *txdesc)
+{
+ rtw8723x_common.fill_txdesc_checksum(rtwdev, pkt_info, txdesc);
+}
+
+/* IQK helper functions, defined as inline so they can be shared
+ * without needing an EXPORT_SYMBOL each.
+ */
+static inline void
+rtw8723x_iqk_backup_path_ctrl(struct rtw_dev *rtwdev,
+ struct rtw8723x_iqk_backup_regs *backup)
+{
+ backup->btg_sel = rtw_read8(rtwdev, REG_BTG_SEL);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] original 0x67 = 0x%x\n",
+ backup->btg_sel);
+}
+
+static inline void rtw8723x_iqk_config_path_ctrl(struct rtw_dev *rtwdev)
+{
+ rtw_write32_mask(rtwdev, REG_PAD_CTRL1, BIT_BT_BTG_SEL, 0x1);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] set 0x67 = 0x%x\n",
+ rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3));
+}
+
+static inline void
+rtw8723x_iqk_restore_path_ctrl(struct rtw_dev *rtwdev,
+ const struct rtw8723x_iqk_backup_regs *backup)
+{
+ rtw_write8(rtwdev, REG_BTG_SEL, backup->btg_sel);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] restore 0x67 = 0x%x\n",
+ rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3));
+}
+
+static inline void
+rtw8723x_iqk_backup_lte_path_gnt(struct rtw_dev *rtwdev,
+ struct rtw8723x_iqk_backup_regs *backup)
+{
+ backup->lte_path = rtw_read32(rtwdev, REG_LTECOEX_PATH_CONTROL);
+ rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0x800f0038);
+ mdelay(1);
+ backup->lte_gnt = rtw_read32(rtwdev, REG_LTECOEX_READ_DATA);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] OriginalGNT = 0x%x\n",
+ backup->lte_gnt);
+}
+
+static inline void
+rtw8723x_iqk_config_lte_path_gnt(struct rtw_dev *rtwdev,
+ u32 write_data)
+{
+ rtw_write32(rtwdev, REG_LTECOEX_WRITE_DATA, write_data);
+ rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0xc0020038);
+ rtw_write32_mask(rtwdev, REG_LTECOEX_PATH_CONTROL,
+ BIT_LTE_MUX_CTRL_PATH, 0x1);
+}
+
+static inline void
+rtw8723x_iqk_restore_lte_path_gnt(struct rtw_dev *rtwdev,
+ const struct rtw8723x_iqk_backup_regs *bak)
+{
+ rtw_write32(rtwdev, REG_LTECOEX_WRITE_DATA, bak->lte_gnt);
+ rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0xc00f0038);
+ rtw_write32(rtwdev, REG_LTECOEX_PATH_CONTROL, bak->lte_path);
+}
+
+/* set all ADDA registers to the given value */
+static inline void rtw8723x_iqk_path_adda_on(struct rtw_dev *rtwdev, u32 value)
+{
+ for (int i = 0; i < RTW8723X_IQK_ADDA_REG_NUM; i++)
+ rtw_write32(rtwdev, rtw8723x_common.iqk_adda_regs[i], value);
+}
+
+#endif /* __RTW8723X_H__ */
diff --git a/drivers/net/wireless/realtek/rtw88/rx.h b/drivers/net/wireless/realtek/rtw88/rx.h
index 3342e3761281..d3668c4efc24 100644
--- a/drivers/net/wireless/realtek/rtw88/rx.h
+++ b/drivers/net/wireless/realtek/rtw88/rx.h
@@ -40,6 +40,8 @@ enum rtw_rx_desc_enc {
le32_get_bits(*((__le32 *)(rxdesc) + 0x02), GENMASK(30, 29))
#define GET_RX_DESC_TSFL(rxdesc) \
le32_get_bits(*((__le32 *)(rxdesc) + 0x05), GENMASK(31, 0))
+#define GET_RX_DESC_BW(rxdesc) \
+ (le32_get_bits(*((__le32 *)(rxdesc) + 0x04), GENMASK(31, 24)))
void rtw_rx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
struct sk_buff *skb);
diff --git a/drivers/net/wireless/realtek/rtw89/Kconfig b/drivers/net/wireless/realtek/rtw89/Kconfig
index 90ffbab7cc4c..eaea4eaeb361 100644
--- a/drivers/net/wireless/realtek/rtw89/Kconfig
+++ b/drivers/net/wireless/realtek/rtw89/Kconfig
@@ -28,6 +28,9 @@ config RTW89_8852B
config RTW89_8852C
tristate
+config RTW89_8922A
+ tristate
+
config RTW89_8851BE
tristate "Realtek 8851BE PCI wireless network (Wi-Fi 6) adapter"
depends on PCI
@@ -72,6 +75,18 @@ config RTW89_8852CE
802.11ax PCIe wireless network (Wi-Fi 6E) adapter
+config RTW89_8922AE
+ tristate "Realtek 8922AE PCI wireless network (Wi-Fi 7) adapter"
+ depends on PCI
+ select RTW89_CORE
+ select RTW89_PCI
+ select RTW89_8922A
+ help
+ Select this option will enable support for 8922AE chipset
+
+ 802.11be PCIe wireless network (Wi-Fi 7) adapter
+ supporting 2x2 2GHz/5GHz/6GHz 4096-QAM 160MHz channels.
+
config RTW89_DEBUG
bool
diff --git a/drivers/net/wireless/realtek/rtw89/Makefile b/drivers/net/wireless/realtek/rtw89/Makefile
index 41940099af1b..86a553fb0136 100644
--- a/drivers/net/wireless/realtek/rtw89/Makefile
+++ b/drivers/net/wireless/realtek/rtw89/Makefile
@@ -4,10 +4,13 @@ obj-$(CONFIG_RTW89_CORE) += rtw89_core.o
rtw89_core-y += core.o \
mac80211.o \
mac.o \
+ mac_be.o \
phy.o \
+ phy_be.o \
fw.o \
cam.o \
efuse.o \
+ efuse_be.o \
regd.o \
sar.o \
coex.o \
@@ -54,8 +57,15 @@ rtw89_8852c-objs := rtw8852c.o \
obj-$(CONFIG_RTW89_8852CE) += rtw89_8852ce.o
rtw89_8852ce-objs := rtw8852ce.o
+obj-$(CONFIG_RTW89_8922A) += rtw89_8922a.o
+rtw89_8922a-objs := rtw8922a.o \
+ rtw8922a_rfk.o
+
+obj-$(CONFIG_RTW89_8922AE) += rtw89_8922ae.o
+rtw89_8922ae-objs := rtw8922ae.o
+
rtw89_core-$(CONFIG_RTW89_DEBUG) += debug.o
obj-$(CONFIG_RTW89_PCI) += rtw89_pci.o
-rtw89_pci-y := pci.o
+rtw89_pci-y := pci.o pci_be.o
diff --git a/drivers/net/wireless/realtek/rtw89/acpi.c b/drivers/net/wireless/realtek/rtw89/acpi.c
index 2e7326a8e3e4..908e980a4b72 100644
--- a/drivers/net/wireless/realtek/rtw89/acpi.c
+++ b/drivers/net/wireless/realtek/rtw89/acpi.c
@@ -77,6 +77,50 @@ int rtw89_acpi_dsm_get_policy_6ghz(struct rtw89_dev *rtwdev,
return 0;
}
+static bool chk_acpi_policy_6ghz_sp_sig(const struct rtw89_acpi_policy_6ghz_sp *p)
+{
+ return p->signature[0] == 0x52 &&
+ p->signature[1] == 0x54 &&
+ p->signature[2] == 0x4B &&
+ p->signature[3] == 0x07;
+}
+
+static
+int rtw89_acpi_dsm_get_policy_6ghz_sp(struct rtw89_dev *rtwdev,
+ union acpi_object *obj,
+ struct rtw89_acpi_policy_6ghz_sp **policy)
+{
+ const struct rtw89_acpi_policy_6ghz_sp *ptr;
+ u32 buf_len;
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "acpi: expect buffer but type: %d\n", obj->type);
+ return -EINVAL;
+ }
+
+ buf_len = obj->buffer.length;
+ if (buf_len < sizeof(*ptr)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: invalid buffer length: %u\n",
+ __func__, buf_len);
+ return -EINVAL;
+ }
+
+ ptr = (typeof(ptr))obj->buffer.pointer;
+ if (!chk_acpi_policy_6ghz_sp_sig(ptr)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: bad signature\n", __func__);
+ return -EINVAL;
+ }
+
+ *policy = kmemdup(ptr, sizeof(*ptr), GFP_KERNEL);
+ if (!*policy)
+ return -ENOMEM;
+
+ rtw89_hex_dump(rtwdev, RTW89_DBG_ACPI, "policy_6ghz_sp: ", *policy,
+ sizeof(*ptr));
+ return 0;
+}
+
int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev,
enum rtw89_acpi_dsm_func func,
struct rtw89_acpi_dsm_result *res)
@@ -95,6 +139,9 @@ int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev,
if (func == RTW89_ACPI_DSM_FUNC_6G_BP)
ret = rtw89_acpi_dsm_get_policy_6ghz(rtwdev, obj,
&res->u.policy_6ghz);
+ else if (func == RTW89_ACPI_DSM_FUNC_6GHZ_SP_SUP)
+ ret = rtw89_acpi_dsm_get_policy_6ghz_sp(rtwdev, obj,
+ &res->u.policy_6ghz_sp);
else
ret = rtw89_acpi_dsm_get_value(rtwdev, obj, &res->u.value);
diff --git a/drivers/net/wireless/realtek/rtw89/acpi.h b/drivers/net/wireless/realtek/rtw89/acpi.h
index fe85b40cf076..d274be1775bf 100644
--- a/drivers/net/wireless/realtek/rtw89/acpi.h
+++ b/drivers/net/wireless/realtek/rtw89/acpi.h
@@ -12,7 +12,13 @@ enum rtw89_acpi_dsm_func {
RTW89_ACPI_DSM_FUNC_6G_DIS = 3,
RTW89_ACPI_DSM_FUNC_6G_BP = 4,
RTW89_ACPI_DSM_FUNC_TAS_EN = 5,
- RTW89_ACPI_DSM_FUNC_59G_EN = 6,
+ RTW89_ACPI_DSM_FUNC_UNII4_SUP = 6,
+ RTW89_ACPI_DSM_FUNC_6GHZ_SP_SUP = 7,
+};
+
+enum rtw89_acpi_conf_unii4 {
+ RTW89_ACPI_CONF_UNII4_FCC = BIT(0),
+ RTW89_ACPI_CONF_UNII4_IC = BIT(1),
};
enum rtw89_acpi_policy_mode {
@@ -36,11 +42,24 @@ struct rtw89_acpi_policy_6ghz {
struct rtw89_acpi_country_code country_list[] __counted_by(country_count);
} __packed;
+enum rtw89_acpi_conf_6ghz_sp {
+ RTW89_ACPI_CONF_6GHZ_SP_US = BIT(0),
+};
+
+struct rtw89_acpi_policy_6ghz_sp {
+ u8 signature[4];
+ u8 revision;
+ u8 override;
+ u8 conf;
+ u8 rsvd;
+} __packed;
+
struct rtw89_acpi_dsm_result {
union {
u8 value;
/* caller needs to free it after using */
struct rtw89_acpi_policy_6ghz *policy_6ghz;
+ struct rtw89_acpi_policy_6ghz_sp *policy_6ghz_sp;
} u;
};
diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
index 11fbdd142162..1864f543a6c6 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.c
+++ b/drivers/net/wireless/realtek/rtw89/cam.c
@@ -150,8 +150,6 @@ static int rtw89_cam_get_addr_cam_key_idx(struct rtw89_addr_cam_entry *addr_cam,
case RTW89_ADDR_CAM_SEC_NONE:
return -EINVAL;
case RTW89_ADDR_CAM_SEC_ALL_UNI:
- if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
- return -EINVAL;
idx = find_first_zero_bit(addr_cam->sec_cam_map,
RTW89_SEC_CAM_IN_ADDR_CAM);
if (idx >= RTW89_SEC_CAM_IN_ADDR_CAM)
@@ -232,6 +230,11 @@ static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
rtwvif = (struct rtw89_vif *)vif->drv_priv;
addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
+
+ if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104)
+ addr_cam->sec_ent_mode = RTW89_ADDR_CAM_SEC_ALL_UNI;
+
ret = rtw89_cam_get_addr_cam_key_idx(addr_cam, sec_cam, key, &key_idx);
if (ret) {
rtw89_err(rtwdev, "failed to get addr cam key idx %d, %d\n",
@@ -356,6 +359,9 @@ int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev,
key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
ext_key = true;
break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ hw_key_type = RTW89_SEC_KEY_TYPE_BIP_CCMP128;
+ break;
default:
return -EOPNOTSUPP;
}
@@ -753,29 +759,80 @@ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
void rtw89_cam_fill_dctl_sec_cam_info_v1(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta,
- u8 *cmd)
+ struct rtw89_h2c_dctlinfo_ud_v1 *h2c)
{
struct rtw89_addr_cam_entry *addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ u8 *ptk_tx_iv = rtw_wow->key_info.ptk_tx_iv;
+
+ h2c->c0 = le32_encode_bits(rtwsta ? rtwsta->mac_id : rtwvif->mac_id,
+ DCTLINFO_V1_C0_MACID) |
+ le32_encode_bits(1, DCTLINFO_V1_C0_OP);
- SET_DCTL_MACID_V1(cmd, rtwsta ? rtwsta->mac_id : rtwvif->mac_id);
- SET_DCTL_OPERATION_V1(cmd, 1);
-
- SET_DCTL_SEC_ENT0_KEYID_V1(cmd, addr_cam->sec_ent_keyid[0]);
- SET_DCTL_SEC_ENT1_KEYID_V1(cmd, addr_cam->sec_ent_keyid[1]);
- SET_DCTL_SEC_ENT2_KEYID_V1(cmd, addr_cam->sec_ent_keyid[2]);
- SET_DCTL_SEC_ENT3_KEYID_V1(cmd, addr_cam->sec_ent_keyid[3]);
- SET_DCTL_SEC_ENT4_KEYID_V1(cmd, addr_cam->sec_ent_keyid[4]);
- SET_DCTL_SEC_ENT5_KEYID_V1(cmd, addr_cam->sec_ent_keyid[5]);
- SET_DCTL_SEC_ENT6_KEYID_V1(cmd, addr_cam->sec_ent_keyid[6]);
-
- SET_DCTL_SEC_ENT_VALID_V1(cmd, addr_cam->sec_cam_map[0] & 0xff);
- SET_DCTL_SEC_ENT0_V1(cmd, addr_cam->sec_ent[0]);
- SET_DCTL_SEC_ENT1_V1(cmd, addr_cam->sec_ent[1]);
- SET_DCTL_SEC_ENT2_V1(cmd, addr_cam->sec_ent[2]);
- SET_DCTL_SEC_ENT3_V1(cmd, addr_cam->sec_ent[3]);
- SET_DCTL_SEC_ENT4_V1(cmd, addr_cam->sec_ent[4]);
- SET_DCTL_SEC_ENT5_V1(cmd, addr_cam->sec_ent[5]);
- SET_DCTL_SEC_ENT6_V1(cmd, addr_cam->sec_ent[6]);
+ h2c->w4 = le32_encode_bits(addr_cam->sec_ent_keyid[0],
+ DCTLINFO_V1_W4_SEC_ENT0_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[1],
+ DCTLINFO_V1_W4_SEC_ENT1_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[2],
+ DCTLINFO_V1_W4_SEC_ENT2_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[3],
+ DCTLINFO_V1_W4_SEC_ENT3_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[4],
+ DCTLINFO_V1_W4_SEC_ENT4_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[5],
+ DCTLINFO_V1_W4_SEC_ENT5_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[6],
+ DCTLINFO_V1_W4_SEC_ENT6_KEYID);
+ h2c->m4 = cpu_to_le32(DCTLINFO_V1_W4_SEC_ENT0_KEYID |
+ DCTLINFO_V1_W4_SEC_ENT1_KEYID |
+ DCTLINFO_V1_W4_SEC_ENT2_KEYID |
+ DCTLINFO_V1_W4_SEC_ENT3_KEYID |
+ DCTLINFO_V1_W4_SEC_ENT4_KEYID |
+ DCTLINFO_V1_W4_SEC_ENT5_KEYID |
+ DCTLINFO_V1_W4_SEC_ENT6_KEYID);
+
+ h2c->w5 = le32_encode_bits(addr_cam->sec_cam_map[0] & 0xff,
+ DCTLINFO_V1_W5_SEC_ENT_VALID) |
+ le32_encode_bits(addr_cam->sec_ent[0],
+ DCTLINFO_V1_W5_SEC_ENT0) |
+ le32_encode_bits(addr_cam->sec_ent[1],
+ DCTLINFO_V1_W5_SEC_ENT1) |
+ le32_encode_bits(addr_cam->sec_ent[2],
+ DCTLINFO_V1_W5_SEC_ENT2);
+ h2c->m5 = cpu_to_le32(DCTLINFO_V1_W5_SEC_ENT_VALID |
+ DCTLINFO_V1_W5_SEC_ENT0 |
+ DCTLINFO_V1_W5_SEC_ENT1 |
+ DCTLINFO_V1_W5_SEC_ENT2);
+
+ h2c->w6 = le32_encode_bits(addr_cam->sec_ent[3],
+ DCTLINFO_V1_W6_SEC_ENT3) |
+ le32_encode_bits(addr_cam->sec_ent[4],
+ DCTLINFO_V1_W6_SEC_ENT4) |
+ le32_encode_bits(addr_cam->sec_ent[5],
+ DCTLINFO_V1_W6_SEC_ENT5) |
+ le32_encode_bits(addr_cam->sec_ent[6],
+ DCTLINFO_V1_W6_SEC_ENT6);
+ h2c->m6 = cpu_to_le32(DCTLINFO_V1_W6_SEC_ENT3 |
+ DCTLINFO_V1_W6_SEC_ENT4 |
+ DCTLINFO_V1_W6_SEC_ENT5 |
+ DCTLINFO_V1_W6_SEC_ENT6);
+
+ if (rtw_wow->ptk_alg) {
+ h2c->w0 = le32_encode_bits(ptk_tx_iv[0] | ptk_tx_iv[1] << 8,
+ DCTLINFO_V1_W0_AES_IV_L);
+ h2c->m0 = cpu_to_le32(DCTLINFO_V1_W0_AES_IV_L);
+
+ h2c->w1 = le32_encode_bits(ptk_tx_iv[4] |
+ ptk_tx_iv[5] << 8 |
+ ptk_tx_iv[6] << 16 |
+ ptk_tx_iv[7] << 24,
+ DCTLINFO_V1_W1_AES_IV_H);
+ h2c->m1 = cpu_to_le32(DCTLINFO_V1_W1_AES_IV_H);
+
+ h2c->w4 |= le32_encode_bits(rtw_wow->ptk_keyidx,
+ DCTLINFO_V1_W4_SEC_KEY_ID);
+ h2c->m4 |= cpu_to_le32(DCTLINFO_V1_W4_SEC_KEY_ID);
+ }
}
void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev,
@@ -784,6 +841,8 @@ void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev,
struct rtw89_h2c_dctlinfo_ud_v2 *h2c)
{
struct rtw89_addr_cam_entry *addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ u8 *ptk_tx_iv = rtw_wow->key_info.ptk_tx_iv;
h2c->c0 = le32_encode_bits(rtwsta ? rtwsta->mac_id : rtwvif->mac_id,
DCTLINFO_V2_C0_MACID) |
@@ -837,4 +896,21 @@ void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev,
DCTLINFO_V2_W7_SEC_ENT6_V1);
h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_SEC_ENT5_V1 |
DCTLINFO_V2_W7_SEC_ENT6_V1);
+
+ if (rtw_wow->ptk_alg) {
+ h2c->w0 = le32_encode_bits(ptk_tx_iv[0] | ptk_tx_iv[1] << 8,
+ DCTLINFO_V2_W0_AES_IV_L);
+ h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_AES_IV_L);
+
+ h2c->w1 = le32_encode_bits(ptk_tx_iv[4] |
+ ptk_tx_iv[5] << 8 |
+ ptk_tx_iv[6] << 16 |
+ ptk_tx_iv[7] << 24,
+ DCTLINFO_V2_W1_AES_IV_H);
+ h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_AES_IV_H);
+
+ h2c->w4 |= le32_encode_bits(rtw_wow->ptk_keyidx,
+ DCTLINFO_V2_W4_SEC_KEY_ID);
+ h2c->m4 |= cpu_to_le32(DCTLINFO_V2_W4_SEC_KEY_ID);
+ }
}
diff --git a/drivers/net/wireless/realtek/rtw89/cam.h b/drivers/net/wireless/realtek/rtw89/cam.h
index fa09d11c345c..5d7b624c2dd4 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.h
+++ b/drivers/net/wireless/realtek/rtw89/cam.h
@@ -352,6 +352,75 @@ static inline void FWCMD_SET_ADDR_BSSID_BSSID5(void *cmd, u32 value)
le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(31, 24));
}
+struct rtw89_h2c_dctlinfo_ud_v1 {
+ __le32 c0;
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+ __le32 m0;
+ __le32 m1;
+ __le32 m2;
+ __le32 m3;
+ __le32 m4;
+ __le32 m5;
+ __le32 m6;
+ __le32 m7;
+} __packed;
+
+#define DCTLINFO_V1_C0_MACID GENMASK(6, 0)
+#define DCTLINFO_V1_C0_OP BIT(7)
+
+#define DCTLINFO_V1_W0_QOS_FIELD_H GENMASK(7, 0)
+#define DCTLINFO_V1_W0_HW_EXSEQ_MACID GENMASK(14, 8)
+#define DCTLINFO_V1_W0_QOS_DATA BIT(15)
+#define DCTLINFO_V1_W0_AES_IV_L GENMASK(31, 16)
+#define DCTLINFO_V1_W0_ALL GENMASK(31, 0)
+#define DCTLINFO_V1_W1_AES_IV_H GENMASK(31, 0)
+#define DCTLINFO_V1_W1_ALL GENMASK(31, 0)
+#define DCTLINFO_V1_W2_SEQ0 GENMASK(11, 0)
+#define DCTLINFO_V1_W2_SEQ1 GENMASK(23, 12)
+#define DCTLINFO_V1_W2_AMSDU_MAX_LEN GENMASK(26, 24)
+#define DCTLINFO_V1_W2_STA_AMSDU_EN BIT(27)
+#define DCTLINFO_V1_W2_CHKSUM_OFLD_EN BIT(28)
+#define DCTLINFO_V1_W2_WITH_LLC BIT(29)
+#define DCTLINFO_V1_W2_ALL GENMASK(29, 0)
+#define DCTLINFO_V1_W3_SEQ2 GENMASK(11, 0)
+#define DCTLINFO_V1_W3_SEQ3 GENMASK(23, 12)
+#define DCTLINFO_V1_W3_TGT_IND GENMASK(27, 24)
+#define DCTLINFO_V1_W3_TGT_IND_EN BIT(28)
+#define DCTLINFO_V1_W3_HTC_LB GENMASK(31, 29)
+#define DCTLINFO_V1_W3_ALL GENMASK(31, 0)
+#define DCTLINFO_V1_W4_MHDR_LEN GENMASK(4, 0)
+#define DCTLINFO_V1_W4_VLAN_TAG_VALID BIT(5)
+#define DCTLINFO_V1_W4_VLAN_TAG_SEL GENMASK(7, 6)
+#define DCTLINFO_V1_W4_HTC_ORDER BIT(8)
+#define DCTLINFO_V1_W4_SEC_KEY_ID GENMASK(10, 9)
+#define DCTLINFO_V1_W4_WAPI BIT(15)
+#define DCTLINFO_V1_W4_SEC_ENT_MODE GENMASK(17, 16)
+#define DCTLINFO_V1_W4_SEC_ENT0_KEYID GENMASK(19, 18)
+#define DCTLINFO_V1_W4_SEC_ENT1_KEYID GENMASK(21, 20)
+#define DCTLINFO_V1_W4_SEC_ENT2_KEYID GENMASK(23, 22)
+#define DCTLINFO_V1_W4_SEC_ENT3_KEYID GENMASK(25, 24)
+#define DCTLINFO_V1_W4_SEC_ENT4_KEYID GENMASK(27, 26)
+#define DCTLINFO_V1_W4_SEC_ENT5_KEYID GENMASK(29, 28)
+#define DCTLINFO_V1_W4_SEC_ENT6_KEYID GENMASK(31, 30)
+#define DCTLINFO_V1_W4_ALL (GENMASK(31, 15) | GENMASK(10, 0))
+#define DCTLINFO_V1_W5_SEC_ENT_VALID GENMASK(7, 0)
+#define DCTLINFO_V1_W5_SEC_ENT0 GENMASK(15, 8)
+#define DCTLINFO_V1_W5_SEC_ENT1 GENMASK(23, 16)
+#define DCTLINFO_V1_W5_SEC_ENT2 GENMASK(31, 24)
+#define DCTLINFO_V1_W5_ALL GENMASK(31, 0)
+#define DCTLINFO_V1_W6_SEC_ENT3 GENMASK(7, 0)
+#define DCTLINFO_V1_W6_SEC_ENT4 GENMASK(15, 8)
+#define DCTLINFO_V1_W6_SEC_ENT5 GENMASK(23, 16)
+#define DCTLINFO_V1_W6_SEC_ENT6 GENMASK(31, 24)
+#define DCTLINFO_V1_W6_ALL GENMASK(31, 0)
+
struct rtw89_h2c_dctlinfo_ud_v2 {
__le32 c0;
__le32 w0;
@@ -477,7 +546,7 @@ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
void rtw89_cam_fill_dctl_sec_cam_info_v1(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta,
- u8 *cmd);
+ struct rtw89_h2c_dctlinfo_ud_v1 *h2c);
void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta,
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index d9b66d43f32e..c443b39ab3c6 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -12,6 +12,7 @@
#define RTW89_COEX_VERSION 0x07000113
#define FCXDEF_STEP 50 /* MUST <= FCXMAX_STEP and match with wl fw*/
+#define BTC_E2G_LIMIT_DEF 80
enum btc_fbtc_tdma_template {
CXTD_OFF = 0x0,
@@ -54,7 +55,6 @@ enum btc_mlme_state {
MLME_LINKED,
};
-#define FCXONESLOT_VER 1
struct btc_fbtc_1slot {
u8 fver;
u8 sid; /* slot id */
@@ -133,71 +133,71 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
.fcxbtcrpt = 8, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7,
.fcxstep = 7, .fcxnullsta = 7, .fcxmreg = 7, .fcxgpiodbg = 7,
.fcxbtver = 7, .fcxbtscan = 7, .fcxbtafh = 7, .fcxbtdevinfo = 7,
- .fwlrole = 2, .frptmap = 7, .fcxctrl = 7, .fcxinit = 7,
- .drvinfo_type = 1, .info_buf = 1800, .max_role_num = 6,
+ .fwlrole = 8, .frptmap = 3, .fcxctrl = 7, .fcxinit = 7,
+ .fwevntrptl = 1, .drvinfo_type = 1, .info_buf = 1800, .max_role_num = 6,
},
{RTL8851B, RTW89_FW_VER_CODE(0, 29, 29, 0),
.fcxbtcrpt = 105, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 5,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 2, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 2, .fcxbtafh = 2, .fcxbtdevinfo = 1,
.fwlrole = 2, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
- .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6,
+ .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6,
},
{RTL8852C, RTW89_FW_VER_CODE(0, 27, 57, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
.fwlrole = 1, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
- .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
+ .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
},
{RTL8852C, RTW89_FW_VER_CODE(0, 27, 42, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
.fwlrole = 1, .frptmap = 2, .fcxctrl = 1, .fcxinit = 0,
- .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
+ .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
},
{RTL8852C, RTW89_FW_VER_CODE(0, 27, 0, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
.fwlrole = 1, .frptmap = 2, .fcxctrl = 1, .fcxinit = 0,
- .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
+ .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
},
{RTL8852B, RTW89_FW_VER_CODE(0, 29, 29, 0),
.fcxbtcrpt = 105, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 5,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 2, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 2, .fcxbtafh = 2, .fcxbtdevinfo = 1,
.fwlrole = 2, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
- .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6,
+ .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6,
},
{RTL8852B, RTW89_FW_VER_CODE(0, 29, 14, 0),
.fcxbtcrpt = 5, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 4,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
.fwlrole = 1, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
- .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6,
+ .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6,
},
{RTL8852B, RTW89_FW_VER_CODE(0, 27, 0, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
.fwlrole = 1, .frptmap = 1, .fcxctrl = 1, .fcxinit = 0,
- .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
+ .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
},
{RTL8852A, RTW89_FW_VER_CODE(0, 13, 37, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
.fwlrole = 1, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
- .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
+ .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
},
{RTL8852A, RTW89_FW_VER_CODE(0, 13, 0, 0),
.fcxbtcrpt = 1, .fcxtdma = 1, .fcxslots = 1, .fcxcysta = 2,
.fcxstep = 2, .fcxnullsta = 1, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
.fwlrole = 0, .frptmap = 0, .fcxctrl = 0, .fcxinit = 0,
- .drvinfo_type = 0, .info_buf = 1024, .max_role_num = 5,
+ .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1024, .max_role_num = 5,
},
/* keep it to be the last as default entry */
@@ -206,18 +206,51 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
.fcxstep = 2, .fcxnullsta = 1, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
.fwlrole = 0, .frptmap = 0, .fcxctrl = 0, .fcxinit = 0,
- .drvinfo_type = 0, .info_buf = 1024, .max_role_num = 5,
+ .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1024, .max_role_num = 5,
},
};
#define RTW89_DEFAULT_BTC_VER_IDX (ARRAY_SIZE(rtw89_btc_ver_defs) - 1)
+static const union rtw89_btc_wl_state_map btc_scanning_map = {
+ .map = {
+ .scan = 1,
+ .connecting = 1,
+ .roaming = 1,
+ .transacting = 1,
+ ._4way = 1,
+ },
+};
+
+static u32 chip_id_to_bt_rom_code_id(u32 id)
+{
+ switch (id) {
+ case RTL8852A:
+ case RTL8852B:
+ case RTL8852C:
+ return 0x8852;
+ case RTL8851B:
+ return 0x8851;
+ case RTL8922A:
+ return 0x8922;
+ default:
+ return 0;
+ }
+}
+
struct rtw89_btc_btf_tlv {
u8 type;
u8 len;
u8 val[];
} __packed;
+struct rtw89_btc_btf_tlv_v7 {
+ u8 type;
+ u8 ver;
+ u8 len;
+ u8 val[];
+} __packed;
+
enum btc_btf_set_report_en {
RPT_EN_TDMA,
RPT_EN_CYCLE,
@@ -235,13 +268,24 @@ enum btc_btf_set_report_en {
RPT_EN_MONITER,
};
-#define BTF_SET_REPORT_VER 1
-struct rtw89_btc_btf_set_report {
+struct rtw89_btc_btf_set_report_v1 {
u8 fver;
__le32 enable;
__le32 para;
} __packed;
+struct rtw89_btc_btf_set_report_v8 {
+ u8 type;
+ u8 fver;
+ u8 len;
+ __le32 map;
+} __packed;
+
+union rtw89_fbtc_rtp_ctrl {
+ struct rtw89_btc_btf_set_report_v1 v1;
+ struct rtw89_btc_btf_set_report_v8 v8;
+};
+
#define BTF_SET_SLOT_TABLE_VER 1
struct rtw89_btc_btf_set_slot_table {
u8 fver;
@@ -249,12 +293,31 @@ struct rtw89_btc_btf_set_slot_table {
struct rtw89_btc_fbtc_slot tbls[] __counted_by(tbl_num);
} __packed;
-struct rtw89_btc_btf_set_mon_reg {
+struct rtw89_btc_btf_set_slot_table_v7 {
+ u8 type;
+ u8 ver;
+ u8 len;
+ struct rtw89_btc_fbtc_slot_v7 v7[CXST_MAX];
+} __packed;
+
+struct rtw89_btc_btf_set_mon_reg_v1 {
u8 fver;
u8 reg_num;
struct rtw89_btc_fbtc_mreg regs[] __counted_by(reg_num);
} __packed;
+struct rtw89_btc_btf_set_mon_reg_v7 {
+ u8 type;
+ u8 fver;
+ u8 len;
+ struct rtw89_btc_fbtc_mreg regs[] __counted_by(len);
+} __packed;
+
+union rtw89_fbtc_set_mon_reg {
+ struct rtw89_btc_btf_set_mon_reg_v1 v1;
+ struct rtw89_btc_btf_set_mon_reg_v7 v7;
+} __packed;
+
struct _wl_rinfo_now {
u8 link_mode;
u32 dbcc_2g_phy: 2;
@@ -310,6 +373,7 @@ enum btc_ant_phase {
BTC_ANT_W25G,
BTC_ANT_FREERUN,
BTC_ANT_WRFK,
+ BTC_ANT_WRFK2,
BTC_ANT_BRFK,
BTC_ANT_MAX
};
@@ -614,6 +678,13 @@ enum btc_ctr_path {
BTC_CTRL_BY_WL
};
+enum btc_wlact_state {
+ BTC_WLACT_HW = 0,
+ BTC_WLACT_SW_LO,
+ BTC_WLACT_SW_HI,
+ BTC_WLACT_MAX,
+};
+
enum btc_wl_max_tx_time {
BTC_MAX_TX_TIME_L1 = 500,
BTC_MAX_TX_TIME_L2 = 1000,
@@ -739,7 +810,7 @@ static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type)
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
- struct rtw89_btc_wl_link_info *wl_linfo = wl->link_info;
+ struct rtw89_btc_wl_link_info *wl_linfo;
u8 i;
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s\n", __func__);
@@ -761,16 +832,31 @@ static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type)
if (type & BTC_RESET_DM) {
memset(&btc->dm, 0, sizeof(btc->dm));
memset(bt_linfo->rssi_state, 0, sizeof(bt_linfo->rssi_state));
-
- for (i = 0; i < RTW89_PORT_NUM; i++)
- memset(wl_linfo[i].rssi_state, 0,
- sizeof(wl_linfo[i].rssi_state));
+ for (i = 0; i < RTW89_PORT_NUM; i++) {
+ if (btc->ver->fwlrole == 8)
+ wl_linfo = &wl->rlink_info[i][0];
+ else
+ wl_linfo = &wl->link_info[i];
+ memset(wl_linfo->rssi_state, 0, sizeof(wl_linfo->rssi_state));
+ }
/* set the slot_now table to original */
btc->dm.tdma_now = t_def[CXTD_OFF];
btc->dm.tdma = t_def[CXTD_OFF];
- memcpy(&btc->dm.slot_now, s_def, sizeof(btc->dm.slot_now));
- memcpy(&btc->dm.slot, s_def, sizeof(btc->dm.slot));
+ if (ver->fcxslots >= 7) {
+ for (i = 0; i < ARRAY_SIZE(s_def); i++) {
+ btc->dm.slot.v7[i].dur = s_def[i].dur;
+ btc->dm.slot.v7[i].cxtype = s_def[i].cxtype;
+ btc->dm.slot.v7[i].cxtbl = s_def[i].cxtbl;
+ }
+ memcpy(&btc->dm.slot_now.v7, &btc->dm.slot.v7,
+ sizeof(btc->dm.slot_now.v7));
+ } else {
+ memcpy(&btc->dm.slot_now.v1, s_def,
+ sizeof(btc->dm.slot_now.v1));
+ memcpy(&btc->dm.slot.v1, s_def,
+ sizeof(btc->dm.slot.v1));
+ }
btc->policy_len = 0;
btc->bt_req_len = 0;
@@ -901,14 +987,25 @@ static void _chk_btc_err(struct rtw89_dev *rtwdev, u8 type, u32 cnt)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_cx *cx = &btc->cx;
- struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_bt_info *bt = &cx->bt;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+ struct rtw89_btc_dm *dm = &btc->dm;
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): type:%d cnt:%d\n",
__func__, type, cnt);
switch (type) {
+ case BTC_DCNT_WL_FW_VER_MATCH:
+ if ((wl->ver_info.fw_coex & 0xffff0000) !=
+ rtwdev->chip->wlcx_desired) {
+ wl->fw_ver_mismatch = true;
+ dm->error.map.wl_ver_mismatch = true;
+ } else {
+ wl->fw_ver_mismatch = false;
+ dm->error.map.wl_ver_mismatch = false;
+ }
+ break;
case BTC_DCNT_RPT_HANG:
if (dm->cnt_dm[BTC_DCNT_RPT] == cnt && btc->fwinfo.rpt_en_map)
dm->cnt_dm[BTC_DCNT_RPT_HANG]++;
@@ -1001,6 +1098,19 @@ static void _chk_btc_err(struct rtw89_dev *rtwdev, u8 type, u32 cnt)
else
dm->error.map.slot_no_sync = false;
break;
+ case BTC_DCNT_BTTX_HANG:
+ cnt = cx->cnt_bt[BTC_BCNT_LOPRI_TX];
+
+ if (cnt == 0 && bt->link_info.slave_role)
+ dm->cnt_dm[BTC_DCNT_BTTX_HANG]++;
+ else
+ dm->cnt_dm[BTC_DCNT_BTTX_HANG] = 0;
+
+ if (dm->cnt_dm[BTC_DCNT_BTTX_HANG] >= BTC_CHK_HANG_MAX)
+ dm->error.map.bt_tx_hang = true;
+ else
+ dm->error.map.bt_tx_hang = false;
+ break;
case BTC_DCNT_BTCNT_HANG:
cnt = cx->cnt_bt[BTC_BCNT_HIPRI_RX] +
cx->cnt_bt[BTC_BCNT_HIPRI_TX] +
@@ -1050,27 +1160,36 @@ static void _update_bt_report(struct rtw89_dev *rtwdev, u8 rpt_type, u8 *pfinfo)
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
struct rtw89_btc_bt_a2dp_desc *a2dp = &bt_linfo->a2dp_desc;
- struct rtw89_btc_fbtc_btver *pver = NULL;
- struct rtw89_btc_fbtc_btscan_v1 *pscan_v1;
- struct rtw89_btc_fbtc_btscan_v2 *pscan_v2;
- struct rtw89_btc_fbtc_btafh *pafh_v1 = NULL;
+ union rtw89_btc_fbtc_btver *pver = &btc->fwinfo.rpt_fbtc_btver.finfo;
struct rtw89_btc_fbtc_btafh_v2 *pafh_v2 = NULL;
+ struct rtw89_btc_fbtc_btafh_v7 *pafh_v7 = NULL;
struct rtw89_btc_fbtc_btdevinfo *pdev = NULL;
+ struct rtw89_btc_fbtc_btafh *pafh_v1 = NULL;
+ struct rtw89_btc_fbtc_btscan_v1 *pscan_v1;
+ struct rtw89_btc_fbtc_btscan_v2 *pscan_v2;
+ struct rtw89_btc_fbtc_btscan_v7 *pscan_v7;
bool scan_update = true;
int i;
- pver = (struct rtw89_btc_fbtc_btver *)pfinfo;
- pdev = (struct rtw89_btc_fbtc_btdevinfo *)pfinfo;
-
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): rpt_type:%d\n",
__func__, rpt_type);
switch (rpt_type) {
case BTC_RPT_TYPE_BT_VER:
- bt->ver_info.fw = le32_to_cpu(pver->fw_ver);
- bt->ver_info.fw_coex = le32_get_bits(pver->coex_ver, GENMASK(7, 0));
- bt->feature = le32_to_cpu(pver->feature);
+ if (ver->fcxbtver == 7) {
+ pver->v7 = *(struct rtw89_btc_fbtc_btver_v7 *)pfinfo;
+ bt->ver_info.fw = le32_to_cpu(pver->v7.fw_ver);
+ bt->ver_info.fw_coex = le32_get_bits(pver->v7.coex_ver,
+ GENMASK(7, 0));
+ bt->feature = le32_to_cpu(pver->v7.feature);
+ } else {
+ pver->v1 = *(struct rtw89_btc_fbtc_btver_v1 *)pfinfo;
+ bt->ver_info.fw = le32_to_cpu(pver->v1.fw_ver);
+ bt->ver_info.fw_coex = le32_get_bits(pver->v1.coex_ver,
+ GENMASK(7, 0));
+ bt->feature = le32_to_cpu(pver->v1.feature);
+ }
break;
case BTC_RPT_TYPE_BT_SCAN:
if (ver->fcxbtscan == 1) {
@@ -1090,6 +1209,15 @@ static void _update_bt_report(struct rtw89_dev *rtwdev, u8 rpt_type, u8 *pfinfo)
pscan_v2->para[i].intvl == 0)
scan_update = false;
}
+ } else if (ver->fcxbtscan == 7) {
+ pscan_v7 = (struct rtw89_btc_fbtc_btscan_v7 *)pfinfo;
+ for (i = 0; i < CXSCAN_MAX; i++) {
+ bt->scan_info_v2[i] = pscan_v7->para[i];
+ if ((pscan_v7->type & BIT(i)) &&
+ pscan_v7->para[i].win == 0 &&
+ pscan_v7->para[i].intvl == 0)
+ scan_update = false;
+ }
}
if (scan_update)
bt->scan_info_update = 1;
@@ -1106,6 +1234,17 @@ static void _update_bt_report(struct rtw89_dev *rtwdev, u8 rpt_type, u8 *pfinfo)
memcpy(&bt_linfo->afh_map_le[0], pafh_v2->afh_le_a, 4);
memcpy(&bt_linfo->afh_map_le[4], pafh_v2->afh_le_b, 1);
}
+ } else if (ver->fcxbtafh == 7) {
+ pafh_v7 = (struct rtw89_btc_fbtc_btafh_v7 *)pfinfo;
+ if (pafh_v7->map_type & RPT_BT_AFH_SEQ_LEGACY) {
+ memcpy(&bt_linfo->afh_map[0], pafh_v7->afh_l, 4);
+ memcpy(&bt_linfo->afh_map[4], pafh_v7->afh_m, 4);
+ memcpy(&bt_linfo->afh_map[8], pafh_v7->afh_h, 2);
+ }
+ if (pafh_v7->map_type & RPT_BT_AFH_SEQ_LE) {
+ memcpy(&bt_linfo->afh_map_le[0], pafh_v7->afh_le_a, 4);
+ memcpy(&bt_linfo->afh_map_le[4], pafh_v7->afh_le_b, 1);
+ }
} else if (ver->fcxbtafh == 1) {
pafh_v1 = (struct rtw89_btc_fbtc_btafh *)pfinfo;
memcpy(&bt_linfo->afh_map[0], pafh_v1->afh_l, 4);
@@ -1114,6 +1253,7 @@ static void _update_bt_report(struct rtw89_dev *rtwdev, u8 rpt_type, u8 *pfinfo)
}
break;
case BTC_RPT_TYPE_BT_DEVICE:
+ pdev = (struct rtw89_btc_fbtc_btdevinfo *)pfinfo;
a2dp->device_name = le32_to_cpu(pdev->dev_name);
a2dp->vendor_id = le16_to_cpu(pdev->vendor_id);
a2dp->flush_time = le32_to_cpu(pdev->flush_time);
@@ -1123,6 +1263,22 @@ static void _update_bt_report(struct rtw89_dev *rtwdev, u8 rpt_type, u8 *pfinfo)
}
}
+static void rtw89_btc_fw_rpt_evnt_ver(struct rtw89_dev *rtwdev, u8 *index)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
+
+ if (ver->fwevntrptl == 1)
+ return;
+
+ if (*index <= __BTC_RPT_TYPE_V0_SAME)
+ return;
+ else if (*index <= __BTC_RPT_TYPE_V0_MAX)
+ (*index)++;
+ else
+ *index = BTC_RPT_TYPE_MAX;
+}
+
#define BTC_LEAK_AP_TH 10
#define BTC_CYSTA_CHK_PERIOD 100
@@ -1147,10 +1303,10 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
struct rtw89_btc_prpt *btc_prpt = NULL;
void *rpt_content = NULL, *pfinfo = NULL;
u8 rpt_type = 0;
- u16 wl_slot_set = 0, wl_slot_real = 0;
+ u16 wl_slot_set = 0, wl_slot_real = 0, val16;
u32 trace_step = 0, rpt_len = 0, diff_t = 0;
u32 cnt_leak_slot, bt_slot_real, bt_slot_set, cnt_rx_imr;
- u8 i, val = 0;
+ u8 i, val = 0, val1, val2;
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): index:%d\n",
@@ -1170,6 +1326,8 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
"[BTC], %s(): rpt_type:%d\n",
__func__, rpt_type);
+ rtw89_btc_fw_rpt_evnt_ver(rtwdev, &rpt_type);
+
switch (rpt_type) {
case BTC_RPT_TYPE_CTRL:
pcinfo = &pfwinfo->rpt_ctrl.cinfo;
@@ -1188,6 +1346,10 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v105);
pcinfo->req_fver = 5;
break;
+ } else if (ver->fcxbtcrpt == 8) {
+ pfinfo = &pfwinfo->rpt_ctrl.finfo.v8;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v8);
+ break;
} else {
goto err;
}
@@ -1198,7 +1360,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
if (ver->fcxtdma == 1) {
pfinfo = &pfwinfo->rpt_fbtc_tdma.finfo.v1;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo.v1);
- } else if (ver->fcxtdma == 3) {
+ } else if (ver->fcxtdma == 3 || ver->fcxtdma == 7) {
pfinfo = &pfwinfo->rpt_fbtc_tdma.finfo.v3;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo.v3);
} else {
@@ -1208,8 +1370,15 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
break;
case BTC_RPT_TYPE_SLOT:
pcinfo = &pfwinfo->rpt_fbtc_slots.cinfo;
- pfinfo = &pfwinfo->rpt_fbtc_slots.finfo;
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_slots.finfo);
+ if (ver->fcxslots == 1) {
+ pfinfo = &pfwinfo->rpt_fbtc_slots.finfo.v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_slots.finfo.v1);
+ } else if (ver->fcxslots == 7) {
+ pfinfo = &pfwinfo->rpt_fbtc_slots.finfo.v7;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_slots.finfo.v7);
+ } else {
+ goto err;
+ }
pcinfo->req_fver = ver->fcxslots;
break;
case BTC_RPT_TYPE_CYSTA:
@@ -1231,6 +1400,10 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo.v5;
pcysta->v5 = pfwinfo->rpt_fbtc_cysta.finfo.v5;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo.v5);
+ } else if (ver->fcxcysta == 7) {
+ pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo.v7;
+ pcysta->v7 = pfwinfo->rpt_fbtc_cysta.finfo.v7;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo.v7);
} else {
goto err;
}
@@ -1264,6 +1437,9 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
} else if (ver->fcxnullsta == 2) {
pfinfo = &pfwinfo->rpt_fbtc_nullsta.finfo.v2;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo.v2);
+ } else if (ver->fcxnullsta == 7) {
+ pfinfo = &pfwinfo->rpt_fbtc_nullsta.finfo.v7;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo.v7);
} else {
goto err;
}
@@ -1277,6 +1453,9 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
} else if (ver->fcxmreg == 2) {
pfinfo = &pfwinfo->rpt_fbtc_mregval.finfo.v2;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_mregval.finfo.v2);
+ } else if (ver->fcxmreg == 7) {
+ pfinfo = &pfwinfo->rpt_fbtc_mregval.finfo.v7;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_mregval.finfo.v7);
} else {
goto err;
}
@@ -1284,14 +1463,24 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
break;
case BTC_RPT_TYPE_GPIO_DBG:
pcinfo = &pfwinfo->rpt_fbtc_gpio_dbg.cinfo;
- pfinfo = &pfwinfo->rpt_fbtc_gpio_dbg.finfo;
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_gpio_dbg.finfo);
+ if (ver->fcxgpiodbg == 7) {
+ pfinfo = &pfwinfo->rpt_fbtc_gpio_dbg.finfo.v7;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_gpio_dbg.finfo.v7);
+ } else {
+ pfinfo = &pfwinfo->rpt_fbtc_gpio_dbg.finfo.v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_gpio_dbg.finfo.v1);
+ }
pcinfo->req_fver = ver->fcxgpiodbg;
break;
case BTC_RPT_TYPE_BT_VER:
pcinfo = &pfwinfo->rpt_fbtc_btver.cinfo;
- pfinfo = &pfwinfo->rpt_fbtc_btver.finfo;
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btver.finfo);
+ if (ver->fcxbtver == 1) {
+ pfinfo = &pfwinfo->rpt_fbtc_btver.finfo.v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btver.finfo.v1);
+ } else if (ver->fcxbtver == 7) {
+ pfinfo = &pfwinfo->rpt_fbtc_btver.finfo.v7;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btver.finfo.v7);
+ }
pcinfo->req_fver = ver->fcxbtver;
break;
case BTC_RPT_TYPE_BT_SCAN:
@@ -1302,6 +1491,11 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
} else if (ver->fcxbtscan == 2) {
pfinfo = &pfwinfo->rpt_fbtc_btscan.finfo.v2;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btscan.finfo.v2);
+ } else if (ver->fcxbtscan == 7) {
+ pfinfo = &pfwinfo->rpt_fbtc_btscan.finfo.v7;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btscan.finfo.v7);
+ } else {
+ goto err;
}
pcinfo->req_fver = ver->fcxbtscan;
break;
@@ -1460,6 +1654,44 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
pfwinfo->event[BTF_EVNT_RPT]);
dm->error.map.bt_rfk_timeout = bt->rfk_info.map.timeout;
+ } else if (ver->fcxbtcrpt == 8) {
+ prpt->v8 = pfwinfo->rpt_ctrl.finfo.v8;
+ pfwinfo->rpt_en_map = le32_to_cpu(prpt->v8.rpt_info.en);
+ wl->ver_info.fw_coex = le32_to_cpu(prpt->v8.rpt_info.cx_ver);
+ wl->ver_info.fw = le32_to_cpu(prpt->v8.rpt_info.fw_ver);
+
+ for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++)
+ memcpy(&dm->gnt.band[i], &prpt->v8.gnt_val[i][0],
+ sizeof(dm->gnt.band[i]));
+
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_TX] =
+ le16_to_cpu(prpt->v8.bt_cnt[BTC_BCNT_HI_TX_V105]);
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_RX] =
+ le16_to_cpu(prpt->v8.bt_cnt[BTC_BCNT_HI_RX_V105]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_TX] =
+ le16_to_cpu(prpt->v8.bt_cnt[BTC_BCNT_LO_TX_V105]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_RX] =
+ le16_to_cpu(prpt->v8.bt_cnt[BTC_BCNT_LO_RX_V105]);
+
+ val1 = le16_to_cpu(prpt->v8.bt_cnt[BTC_BCNT_POLLUTED_V105]);
+ if (val1 > btc->cx.cnt_bt[BTC_BCNT_POLUT_NOW])
+ val1 -= btc->cx.cnt_bt[BTC_BCNT_POLUT_NOW]; /* diff */
+
+ btc->cx.cnt_bt[BTC_BCNT_POLUT_DIFF] = val1;
+ btc->cx.cnt_bt[BTC_BCNT_POLUT_NOW] =
+ le16_to_cpu(prpt->v8.bt_cnt[BTC_BCNT_POLLUTED_V105]);
+
+ val1 = pfwinfo->event[BTF_EVNT_RPT];
+ if (((prpt->v8.rpt_len_max_h << 8) +
+ prpt->v8.rpt_len_max_l) != ver->info_buf)
+ dm->error.map.h2c_c2h_buffer_mismatch = true;
+ else
+ dm->error.map.h2c_c2h_buffer_mismatch = false;
+
+ _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_HANG, 0);
+ _chk_btc_err(rtwdev, BTC_DCNT_RPT_HANG, val1);
+ _chk_btc_err(rtwdev, BTC_DCNT_WL_FW_VER_MATCH, 0);
+ _chk_btc_err(rtwdev, BTC_DCNT_BTTX_HANG, 0);
} else {
goto err;
}
@@ -1474,7 +1706,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
memcmp(&dm->tdma_now,
&pfwinfo->rpt_fbtc_tdma.finfo.v1,
sizeof(dm->tdma_now)));
- else if (ver->fcxtdma == 3)
+ else if (ver->fcxtdma == 3 || ver->fcxtdma == 7)
_chk_btc_err(rtwdev, BTC_DCNT_TDMA_NONSYNC,
memcmp(&dm->tdma_now,
&pfwinfo->rpt_fbtc_tdma.finfo.v3.tdma,
@@ -1483,14 +1715,25 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
goto err;
break;
case BTC_RPT_TYPE_SLOT:
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], %s(): check %d %zu\n",
- __func__, BTC_DCNT_SLOT_NONSYNC,
- sizeof(dm->slot_now));
- _chk_btc_err(rtwdev, BTC_DCNT_SLOT_NONSYNC,
- memcmp(dm->slot_now,
- pfwinfo->rpt_fbtc_slots.finfo.slot,
- sizeof(dm->slot_now)));
+ if (ver->fcxslots == 7) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): check %d %zu\n",
+ __func__, BTC_DCNT_SLOT_NONSYNC,
+ sizeof(dm->slot_now.v7));
+ _chk_btc_err(rtwdev, BTC_DCNT_SLOT_NONSYNC,
+ memcmp(dm->slot_now.v7,
+ pfwinfo->rpt_fbtc_slots.finfo.v7.slot,
+ sizeof(dm->slot_now.v7)));
+ } else if (ver->fcxslots == 1) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): check %d %zu\n",
+ __func__, BTC_DCNT_SLOT_NONSYNC,
+ sizeof(dm->slot_now.v1));
+ _chk_btc_err(rtwdev, BTC_DCNT_SLOT_NONSYNC,
+ memcmp(dm->slot_now.v1,
+ pfwinfo->rpt_fbtc_slots.finfo.v1.slot,
+ sizeof(dm->slot_now.v1)));
+ }
break;
case BTC_RPT_TYPE_CYSTA:
if (ver->fcxcysta == 2) {
@@ -1506,10 +1749,17 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
/* Check diff time between WL slot and W1/E2G slot */
if (dm->tdma_now.type == CXTDMA_OFF &&
- dm->tdma_now.ext_ctrl == CXECTL_EXT)
- wl_slot_set = le16_to_cpu(dm->slot_now[CXST_E2G].dur);
- else
- wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur);
+ dm->tdma_now.ext_ctrl == CXECTL_EXT) {
+ if (ver->fcxslots == 1)
+ wl_slot_set = le16_to_cpu(dm->slot_now.v1[CXST_E2G].dur);
+ else if (ver->fcxslots == 7)
+ wl_slot_set = le16_to_cpu(dm->slot_now.v7[CXST_E2G].dur);
+ } else {
+ if (ver->fcxslots == 1)
+ wl_slot_set = le16_to_cpu(dm->slot_now.v1[CXST_W1].dur);
+ else if (ver->fcxslots == 7)
+ wl_slot_set = le16_to_cpu(dm->slot_now.v7[CXST_W1].dur);
+ }
if (le16_to_cpu(pcysta->v2.tavg_cycle[CXT_WL]) > wl_slot_set) {
diff_t = le16_to_cpu(pcysta->v2.tavg_cycle[CXT_WL]) - wl_slot_set;
@@ -1539,7 +1789,10 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
/* Check diff time between real WL slot and W1 slot */
if (dm->tdma_now.type == CXTDMA_OFF) {
- wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur);
+ if (ver->fcxslots == 1)
+ wl_slot_set = le16_to_cpu(dm->slot_now.v1[CXST_W1].dur);
+ else if (ver->fcxslots == 7)
+ wl_slot_set = le16_to_cpu(dm->slot_now.v7[CXST_W1].dur);
wl_slot_real = le16_to_cpu(pcysta->v3.cycle_time.tavg[CXT_WL]);
if (wl_slot_real > wl_slot_set) {
diff_t = wl_slot_real - wl_slot_set;
@@ -1580,7 +1833,10 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
/* Check diff time between real WL slot and W1 slot */
if (dm->tdma_now.type == CXTDMA_OFF) {
- wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur);
+ if (ver->fcxslots == 1)
+ wl_slot_set = le16_to_cpu(dm->slot_now.v1[CXST_W1].dur);
+ else if (ver->fcxslots == 7)
+ wl_slot_set = le16_to_cpu(dm->slot_now.v7[CXST_W1].dur);
wl_slot_real = le16_to_cpu(pcysta->v4.cycle_time.tavg[CXT_WL]);
if (wl_slot_real > wl_slot_set) {
diff_t = wl_slot_real - wl_slot_set;
@@ -1622,7 +1878,10 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
/* Check diff time between real WL slot and W1 slot */
if (dm->tdma_now.type == CXTDMA_OFF) {
- wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur);
+ if (ver->fcxslots == 1)
+ wl_slot_set = le16_to_cpu(dm->slot_now.v1[CXST_W1].dur);
+ else if (ver->fcxslots == 7)
+ wl_slot_set = le16_to_cpu(dm->slot_now.v7[CXST_W1].dur);
wl_slot_real = le16_to_cpu(pcysta->v5.cycle_time.tavg[CXT_WL]);
if (wl_slot_real > wl_slot_set)
@@ -1654,11 +1913,62 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
le16_to_cpu(pcysta->v5.slot_cnt[CXST_B1]));
_chk_btc_err(rtwdev, BTC_DCNT_CYCLE_HANG,
le16_to_cpu(pcysta->v5.cycles));
+ } else if (ver->fcxcysta == 7) {
+ if (dm->fddt_train == BTC_FDDT_ENABLE)
+ break;
+
+ pcysta = &pfwinfo->rpt_fbtc_cysta.finfo;
+
+ if (dm->tdma_now.type != CXTDMA_OFF) {
+ /* Check diff time between real WL slot and W1 slot */
+ val16 = le16_to_cpu(pcysta->v7.cycle_time.tavg[CXT_WL]);
+ _chk_btc_err(rtwdev, BTC_DCNT_WL_SLOT_DRIFT, val16);
+
+ /* Check Leak-AP */
+ val1 = le32_to_cpu(pcysta->v7.leak_slot.cnt_rximr) *
+ BTC_LEAK_AP_TH;
+ val2 = le16_to_cpu(pcysta->v7.slot_cnt[CXST_LK]);
+
+ val16 = le16_to_cpu(pcysta->v7.cycles);
+ if (dm->tdma_now.rxflctrl &&
+ val16 >= BTC_CYSTA_CHK_PERIOD && val1 > val2)
+ dm->leak_ap = 1;
+ } else if (dm->tdma_now.ext_ctrl == CXECTL_EXT) {
+ val16 = le16_to_cpu(pcysta->v7.cycle_time.tavg[CXT_BT]);
+ /* Check diff between real BT slot and EBT/E5G slot */
+ _chk_btc_err(rtwdev, BTC_DCNT_BT_SLOT_DRIFT, val16);
+
+ /* Check bt slot length for P2P mode*/
+ val1 = le16_to_cpu(pcysta->v7.a2dp_ept.cnt_timeout) *
+ BTC_SLOT_REQ_TH;
+ val2 = le16_to_cpu(pcysta->v7.a2dp_ept.cnt);
+
+ val16 = le16_to_cpu(pcysta->v7.cycles);
+ if (val16 >= BTC_CYSTA_CHK_PERIOD && val1 > val2)
+ dm->slot_req_more = 1;
+ else if (bt->link_info.status.map.connect == 0)
+ dm->slot_req_more = 0;
+ }
+
+ _chk_btc_err(rtwdev, BTC_DCNT_E2G_HANG,
+ le16_to_cpu(pcysta->v7.slot_cnt[CXST_E2G]));
+ _chk_btc_err(rtwdev, BTC_DCNT_W1_HANG,
+ le16_to_cpu(pcysta->v7.slot_cnt[CXST_W1]));
+ _chk_btc_err(rtwdev, BTC_DCNT_B1_HANG,
+ le16_to_cpu(pcysta->v7.slot_cnt[CXST_B1]));
+
+ /* "BT_SLOT_FLOOD" error-check MUST before "CYCLE_HANG" */
+ _chk_btc_err(rtwdev, BTC_DCNT_BT_SLOT_FLOOD,
+ le16_to_cpu(pcysta->v7.cycles));
+ _chk_btc_err(rtwdev, BTC_DCNT_CYCLE_HANG,
+ le16_to_cpu(pcysta->v7.cycles));
} else {
goto err;
}
break;
case BTC_RPT_TYPE_MREG:
+ if (ver->fcxmreg == 7)
+ break;
_get_reg_status(rtwdev, BTC_CSTATUS_BB_GNT_MUX_MON, &val);
if (dm->wl_btg_rx == BTC_BTGCTRL_BB_GNT_FWCTRL)
dm->wl_btg_rx_rb = BTC_BTGCTRL_BB_GNT_FWCTRL;
@@ -1715,6 +2025,7 @@ static void _parse_btc_report(struct rtw89_dev *rtwdev,
}
#define BTC_TLV_HDR_LEN 2
+#define BTC_TLV_HDR_LEN_V7 3
static void _append_tdma(struct rtw89_dev *rtwdev)
{
@@ -1722,6 +2033,7 @@ static void _append_tdma(struct rtw89_dev *rtwdev)
const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_btf_tlv *tlv;
+ struct rtw89_btc_btf_tlv_v7 *tlv_v7;
struct rtw89_btc_fbtc_tdma *v;
struct rtw89_btc_fbtc_tdma_v3 *v3;
u16 len = btc->policy_len;
@@ -1741,6 +2053,13 @@ static void _append_tdma(struct rtw89_dev *rtwdev)
tlv->len = sizeof(*v);
*v = dm->tdma;
btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v);
+ } else if (ver->fcxtdma == 7) {
+ tlv_v7 = (struct rtw89_btc_btf_tlv_v7 *)&btc->policy[len];
+ tlv_v7->len = sizeof(dm->tdma);
+ tlv_v7->ver = ver->fcxtdma;
+ tlv_v7->type = CXPOLICY_TDMA;
+ memcpy(tlv_v7->val, &dm->tdma, tlv_v7->len);
+ btc->policy_len += BTC_TLV_HDR_LEN_V7 + tlv_v7->len;
} else {
tlv->len = sizeof(*v3);
v3 = (struct rtw89_btc_fbtc_tdma_v3 *)&tlv->val[0];
@@ -1756,7 +2075,7 @@ static void _append_tdma(struct rtw89_dev *rtwdev)
dm->tdma.ext_ctrl);
}
-static void _append_slot(struct rtw89_dev *rtwdev)
+static void _append_slot_v1(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
@@ -1771,8 +2090,8 @@ static void _append_slot(struct rtw89_dev *rtwdev)
for (i = 0; i < CXST_MAX; i++) {
if (!btc->update_policy_force &&
- !memcmp(&dm->slot[i], &dm->slot_now[i],
- sizeof(dm->slot[i])))
+ !memcmp(&dm->slot.v1[i], &dm->slot_now.v1[i],
+ sizeof(dm->slot.v1[i])))
continue;
len = btc->policy_len;
@@ -1782,14 +2101,14 @@ static void _append_slot(struct rtw89_dev *rtwdev)
tlv->type = CXPOLICY_SLOT;
tlv->len = sizeof(*v);
- v->fver = FCXONESLOT_VER;
+ v->fver = btc->ver->fcxslots;
v->sid = i;
- v->slot = dm->slot[i];
+ v->slot = dm->slot.v1[i];
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): slot-%d: dur=%d, table=0x%08x, type=%d\n",
- __func__, i, dm->slot[i].dur, dm->slot[i].cxtbl,
- dm->slot[i].cxtype);
+ __func__, i, dm->slot.v1[i].dur, dm->slot.v1[i].cxtbl,
+ dm->slot.v1[i].cxtype);
cnt++;
btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v);
@@ -1801,6 +2120,71 @@ static void _append_slot(struct rtw89_dev *rtwdev)
__func__, cnt);
}
+static void _append_slot_v7(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc_btf_tlv_v7 *tlv = NULL;
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ u8 i, cnt = 0;
+ u16 len;
+
+ for (i = 0; i < CXST_MAX; i++) {
+ if (!btc->update_policy_force &&
+ !memcmp(&dm->slot.v7[i], &dm->slot_now.v7[i],
+ sizeof(dm->slot.v7[i])))
+ continue;
+
+ len = btc->policy_len;
+
+ if (!tlv) {
+ if ((len + BTC_TLV_HDR_LEN_V7) > RTW89_BTC_POLICY_MAXLEN) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): buff overflow!\n", __func__);
+ break;
+ }
+
+ tlv = (struct rtw89_btc_btf_tlv_v7 *)&btc->policy[len];
+ tlv->type = CXPOLICY_SLOT;
+ tlv->ver = btc->ver->fcxslots;
+ tlv->len = sizeof(dm->slot.v7[0]) + BTC_TLV_SLOT_ID_LEN_V7;
+ len += BTC_TLV_HDR_LEN_V7;
+ }
+
+ if ((len + (u16)tlv->len) > RTW89_BTC_POLICY_MAXLEN) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): buff overflow!\n", __func__);
+ break;
+ }
+
+ btc->policy[len] = i; /* slot-id */
+ memcpy(&btc->policy[len + 1], &dm->slot.v7[i],
+ sizeof(dm->slot.v7[0]));
+ len += tlv->len;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s: policy_len=%d, slot-%d: dur=%d, type=%d, table=0x%08x\n",
+ __func__, btc->policy_len, i, dm->slot.v7[i].dur,
+ dm->slot.v7[i].cxtype, dm->slot.v7[i].cxtbl);
+ cnt++;
+ btc->policy_len = len; /* update total length */
+ }
+
+ if (cnt > 0)
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s: slot update (cnt=%d, len=%d)!!\n",
+ __func__, cnt, btc->policy_len);
+}
+
+static void _append_slot(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+
+ if (btc->ver->fcxslots == 7)
+ _append_slot_v7(rtwdev);
+ else
+ _append_slot_v1(rtwdev);
+}
+
static u32 rtw89_btc_fw_rpt_ver(struct rtw89_dev *rtwdev, u32 rpt_map)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -1946,13 +2330,52 @@ static u32 rtw89_btc_fw_rpt_ver(struct rtw89_dev *rtwdev, u32 rpt_map)
return bit_map;
}
+static void rtw89_btc_fw_set_slots(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
+ struct rtw89_btc_btf_tlv_v7 *tlv_v7 = NULL;
+ struct rtw89_btc_btf_set_slot_table *tbl;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ u16 n, len;
+
+ if (ver->fcxslots == 7) {
+ len = sizeof(*tlv_v7) + sizeof(dm->slot.v7);
+ tlv_v7 = kmalloc(len, GFP_KERNEL);
+ if (!tlv_v7)
+ return;
+
+ tlv_v7->type = SET_SLOT_TABLE;
+ tlv_v7->ver = ver->fcxslots;
+ tlv_v7->len = ARRAY_SIZE(dm->slot.v7);
+ memcpy(tlv_v7->val, dm->slot.v7, sizeof(dm->slot.v7));
+
+ _send_fw_cmd(rtwdev, BTFC_SET, SET_SLOT_TABLE, (u8 *)tlv_v7, len);
+
+ kfree(tlv_v7);
+ } else {
+ n = struct_size(tbl, tbls, CXST_MAX);
+ tbl = kmalloc(n, GFP_KERNEL);
+ if (!tbl)
+ return;
+
+ tbl->fver = BTF_SET_SLOT_TABLE_VER;
+ tbl->tbl_num = CXST_MAX;
+ memcpy(tbl->tbls, dm->slot.v1, flex_array_size(tbl, tbls, CXST_MAX));
+
+ _send_fw_cmd(rtwdev, BTFC_SET, SET_SLOT_TABLE, tbl, n);
+
+ kfree(tbl);
+ }
+}
+
static void rtw89_btc_fw_en_rpt(struct rtw89_dev *rtwdev,
u32 rpt_map, bool rpt_state)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_smap *wl_smap = &btc->cx.wl.status.map;
struct rtw89_btc_btf_fwinfo *fwinfo = &btc->fwinfo;
- struct rtw89_btc_btf_set_report r = {0};
+ union rtw89_fbtc_rtp_ctrl r;
u32 val, bit_map;
int ret;
@@ -1973,41 +2396,35 @@ static void rtw89_btc_fw_en_rpt(struct rtw89_dev *rtwdev,
if (val == fwinfo->rpt_en_map)
return;
- r.fver = BTF_SET_REPORT_VER;
- r.enable = cpu_to_le32(val);
- r.para = cpu_to_le32(rpt_state);
+ if (btc->ver->fcxbtcrpt == 8) {
+ r.v8.type = SET_REPORT_EN;
+ r.v8.fver = btc->ver->fcxbtcrpt;
+ r.v8.len = sizeof(r.v8.map);
+ r.v8.map = cpu_to_le32(val);
+ ret = _send_fw_cmd(rtwdev, BTFC_SET, SET_REPORT_EN, &r.v8,
+ sizeof(r.v8));
+ } else {
+ if (btc->ver->fcxbtcrpt == 105)
+ r.v1.fver = 5;
+ else
+ r.v1.fver = btc->ver->fcxbtcrpt;
+ r.v1.enable = cpu_to_le32(val);
+ r.v1.para = cpu_to_le32(rpt_state);
+ ret = _send_fw_cmd(rtwdev, BTFC_SET, SET_REPORT_EN, &r.v1,
+ sizeof(r.v1));
+ }
- ret = _send_fw_cmd(rtwdev, BTFC_SET, SET_REPORT_EN, &r, sizeof(r));
if (!ret)
fwinfo->rpt_en_map = val;
}
-static void rtw89_btc_fw_set_slots(struct rtw89_dev *rtwdev, u8 num,
- struct rtw89_btc_fbtc_slot *s)
-{
- struct rtw89_btc_btf_set_slot_table *tbl;
- u16 n;
-
- n = struct_size(tbl, tbls, num);
- tbl = kmalloc(n, GFP_KERNEL);
- if (!tbl)
- return;
-
- tbl->fver = BTF_SET_SLOT_TABLE_VER;
- tbl->tbl_num = num;
- memcpy(tbl->tbls, s, flex_array_size(tbl, tbls, num));
-
- _send_fw_cmd(rtwdev, BTFC_SET, SET_SLOT_TABLE, tbl, n);
-
- kfree(tbl);
-}
-
static void btc_fw_set_monreg(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
- struct rtw89_btc_btf_set_mon_reg *monreg = NULL;
- u8 n, ulen, cxmreg_max;
+ struct rtw89_btc_btf_set_mon_reg_v1 *v1 = NULL;
+ struct rtw89_btc_btf_set_mon_reg_v7 *v7 = NULL;
+ u8 i, n, ulen, cxmreg_max;
u16 sz = 0;
n = chip->mon_reg_num;
@@ -2016,10 +2433,8 @@ static void btc_fw_set_monreg(struct rtw89_dev *rtwdev)
if (ver->fcxmreg == 1)
cxmreg_max = CXMREG_MAX;
- else if (ver->fcxmreg == 2)
- cxmreg_max = CXMREG_MAX_V2;
else
- return;
+ cxmreg_max = CXMREG_MAX_V2;
if (n > cxmreg_max) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -2028,21 +2443,37 @@ static void btc_fw_set_monreg(struct rtw89_dev *rtwdev)
return;
}
- ulen = sizeof(monreg->regs[0]);
- sz = struct_size(monreg, regs, n);
- monreg = kmalloc(sz, GFP_KERNEL);
- if (!monreg)
- return;
+ ulen = sizeof(struct rtw89_btc_fbtc_mreg);
+
+ if (ver->fcxmreg == 7) {
+ sz = struct_size(v7, regs, n);
+ v7 = kmalloc(sz, GFP_KERNEL);
+ v7->type = RPT_EN_MREG;
+ v7->fver = ver->fcxmreg;
+ v7->len = n;
+ for (i = 0; i < n; i++) {
+ v7->regs[i].type = chip->mon_reg[i].type;
+ v7->regs[i].bytes = chip->mon_reg[i].bytes;
+ v7->regs[i].offset = chip->mon_reg[i].offset;
+ }
+
+ _send_fw_cmd(rtwdev, BTFC_SET, SET_MREG_TABLE, v7, sz);
+ kfree(v7);
+ } else {
+ sz = struct_size(v1, regs, n);
+ v1 = kmalloc(sz, GFP_KERNEL);
+ v1->fver = ver->fcxmreg;
+ v1->reg_num = n;
+ memcpy(v1->regs, chip->mon_reg, flex_array_size(v1, regs, n));
+
+ _send_fw_cmd(rtwdev, BTFC_SET, SET_MREG_TABLE, v1, sz);
+ kfree(v1);
+ }
- monreg->fver = ver->fcxmreg;
- monreg->reg_num = n;
- memcpy(monreg->regs, chip->mon_reg, flex_array_size(monreg, regs, n));
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): sz=%d ulen=%d n=%d\n",
__func__, sz, ulen, n);
- _send_fw_cmd(rtwdev, BTFC_SET, SET_MREG_TABLE, (u8 *)monreg, sz);
- kfree(monreg);
rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_MREG, 1);
}
@@ -2100,7 +2531,10 @@ static void _fw_set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
btc->policy, btc->policy_len);
if (!ret) {
memcpy(&dm->tdma_now, &dm->tdma, sizeof(dm->tdma_now));
- memcpy(&dm->slot_now, &dm->slot, sizeof(dm->slot_now));
+ if (btc->ver->fcxslots == 7)
+ memcpy(&dm->slot_now.v7, &dm->slot.v7, sizeof(dm->slot_now.v7));
+ else
+ memcpy(&dm->slot_now.v1, &dm->slot.v1, sizeof(dm->slot_now.v1));
}
if (btc->update_policy_force)
@@ -2243,6 +2677,76 @@ static void _set_gnt(struct rtw89_dev *rtwdev, u8 phy_map, u8 wl_state, u8 bt_st
rtw89_chip_mac_cfg_gnt(rtwdev, &dm->gnt);
}
+static void _set_gnt_v1(struct rtw89_dev *rtwdev, u8 phy_map,
+ u8 wl_state, u8 bt_state, u8 wlact_state)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_mac_ax_gnt *g = dm->gnt.band;
+ u8 i, bt_idx = dm->bt_select + 1;
+
+ if (phy_map > BTC_PHY_ALL)
+ return;
+
+ for (i = 0; i < RTW89_PHY_MAX; i++) {
+ if (!(phy_map & BIT(i)))
+ continue;
+
+ switch (wl_state) {
+ case BTC_GNT_HW:
+ g[i].gnt_wl_sw_en = 0;
+ g[i].gnt_wl = 0;
+ break;
+ case BTC_GNT_SW_LO:
+ g[i].gnt_wl_sw_en = 1;
+ g[i].gnt_wl = 0;
+ break;
+ case BTC_GNT_SW_HI:
+ g[i].gnt_wl_sw_en = 1;
+ g[i].gnt_wl = 1;
+ break;
+ }
+
+ switch (bt_state) {
+ case BTC_GNT_HW:
+ g[i].gnt_bt_sw_en = 0;
+ g[i].gnt_bt = 0;
+ break;
+ case BTC_GNT_SW_LO:
+ g[i].gnt_bt_sw_en = 1;
+ g[i].gnt_bt = 0;
+ break;
+ case BTC_GNT_SW_HI:
+ g[i].gnt_bt_sw_en = 1;
+ g[i].gnt_bt = 1;
+ break;
+ }
+ }
+
+ if (rtwdev->chip->para_ver & BTC_FEAT_WLAN_ACT_MUX) {
+ for (i = 0; i < 2; i++) {
+ if (!(bt_idx & BIT(i)))
+ continue;
+
+ switch (wlact_state) {
+ case BTC_WLACT_HW:
+ dm->gnt.bt[i].wlan_act_en = 0;
+ dm->gnt.bt[i].wlan_act = 0;
+ break;
+ case BTC_WLACT_SW_LO:
+ dm->gnt.bt[i].wlan_act_en = 1;
+ dm->gnt.bt[i].wlan_act = 0;
+ break;
+ case BTC_WLACT_SW_HI:
+ dm->gnt.bt[i].wlan_act_en = 1;
+ dm->gnt.bt[i].wlan_act = 1;
+ break;
+ }
+ }
+ }
+ rtw89_mac_cfg_gnt_v2(rtwdev, &dm->gnt);
+}
+
#define BTC_TDMA_WLROLE_MAX 2
static void _set_bt_ignore_wlan_act(struct rtw89_dev *rtwdev, u8 enable)
@@ -2493,9 +2997,11 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_btc_wl_active_role *r;
struct rtw89_btc_wl_active_role_v1 *r1;
struct rtw89_btc_wl_active_role_v2 *r2;
+ struct rtw89_btc_wl_rlink *rlink;
u8 en = 0, i, ch = 0, bw = 0;
u8 mode, connect_cnt;
@@ -2511,6 +3017,9 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
} else if (ver->fwlrole == 2) {
mode = wl_rinfo_v2->link_mode;
connect_cnt = wl_rinfo_v2->connect_cnt;
+ } else if (ver->fwlrole == 8) {
+ mode = wl_rinfo_v8->link_mode;
+ connect_cnt = wl_rinfo_v8->connect_cnt;
} else {
return;
}
@@ -2526,6 +3035,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
r = &wl_rinfo->active_role[i];
r1 = &wl_rinfo_v1->active_role_v1[i];
r2 = &wl_rinfo_v2->active_role_v2[i];
+ rlink = &wl_rinfo_v8->rlink[i][0];
if (ver->fwlrole == 0 &&
(r->role == RTW89_WIFI_ROLE_P2P_GO ||
@@ -2545,6 +3055,12 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
ch = r2->ch;
bw = r2->bw;
break;
+ } else if (ver->fwlrole == 8 &&
+ (rlink->role == RTW89_WIFI_ROLE_P2P_GO ||
+ rlink->role == RTW89_WIFI_ROLE_P2P_CLIENT)) {
+ ch = rlink->ch;
+ bw = rlink->bw;
+ break;
}
}
} else {
@@ -2554,6 +3070,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
r = &wl_rinfo->active_role[i];
r1 = &wl_rinfo_v1->active_role_v1[i];
r2 = &wl_rinfo_v2->active_role_v2[i];
+ rlink = &wl_rinfo_v8->rlink[i][0];
if (ver->fwlrole == 0 &&
r->connected && r->band == RTW89_BAND_2G) {
@@ -2570,6 +3087,11 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
ch = r2->ch;
bw = r2->bw;
break;
+ } else if (ver->fwlrole == 8 &&
+ rlink->connected && rlink->rf_band == RTW89_BAND_2G) {
+ ch = rlink->ch;
+ bw = rlink->bw;
+ break;
}
}
}
@@ -2622,25 +3144,35 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
+ struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
struct rtw89_btc_bt_hid_desc *hid = &bt_linfo->hid_desc;
union rtw89_btc_module_info *md = &btc->mdinfo;
const struct rtw89_btc_ver *ver = btc->ver;
- u8 isolation;
+ u8 isolation, connect_cnt = 0;
if (ver->fcxinit == 7)
isolation = md->md_v7.ant.isolation;
else
isolation = md->md.ant.isolation;
+ if (ver->fwlrole == 0)
+ connect_cnt = wl_rinfo->connect_cnt;
+ else if (ver->fwlrole == 1)
+ connect_cnt = wl_rinfo_v1->connect_cnt;
+ else if (ver->fwlrole == 2)
+ connect_cnt = wl_rinfo_v2->connect_cnt;
+ else if (ver->fwlrole == 8)
+ connect_cnt = wl_rinfo_v8->connect_cnt;
+
if (btc->ant_type == BTC_ANT_SHARED) {
btc->dm.trx_para_level = 0;
return false;
}
/* The below is dedicated antenna case */
- if (wl_rinfo->connect_cnt > BTC_TDMA_WLROLE_MAX ||
- wl_rinfo_v1->connect_cnt > BTC_TDMA_WLROLE_MAX) {
+ if (connect_cnt > BTC_TDMA_WLROLE_MAX) {
btc->dm.trx_para_level = 5;
return true;
}
@@ -2693,19 +3225,6 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
#define _tdma_set_tog(btc, wtg) ({(btc)->dm.tdma.wtgle_n = wtg; })
#define _tdma_set_lek(btc, lek) ({(btc)->dm.tdma.leak_n = lek; })
-#define _slot_set(btc, sid, dura, tbl, type) \
- do { \
- typeof(sid) _sid = (sid); \
- typeof(btc) _btc = (btc); \
- _btc->dm.slot[_sid].dur = cpu_to_le16(dura);\
- _btc->dm.slot[_sid].cxtbl = cpu_to_le32(tbl); \
- _btc->dm.slot[_sid].cxtype = cpu_to_le16(type); \
- } while (0)
-
-#define _slot_set_dur(btc, sid, dura) (btc)->dm.slot[sid].dur = cpu_to_le16(dura)
-#define _slot_set_tbl(btc, sid, tbl) (btc)->dm.slot[sid].cxtbl = cpu_to_le32(tbl)
-#define _slot_set_type(btc, sid, type) (btc)->dm.slot[sid].cxtype = cpu_to_le16(type)
-
struct btc_btinfo_lb2 {
u8 connect: 1;
u8 sco_busy: 1;
@@ -2780,7 +3299,7 @@ void rtw89_btc_set_policy(struct rtw89_dev *rtwdev, u16 policy_type)
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_fbtc_tdma *t = &dm->tdma;
- struct rtw89_btc_fbtc_slot *s = dm->slot;
+ struct rtw89_btc_fbtc_slot *s = dm->slot.v1;
u8 type;
u32 tbl_w1, tbl_b1, tbl_b4;
@@ -3091,7 +3610,6 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_fbtc_tdma *t = &dm->tdma;
- struct rtw89_btc_fbtc_slot *s = dm->slot;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo = &btc->cx.wl.role_info_v1;
struct rtw89_btc_bt_hid_desc *hid = &btc->cx.bt.link_info.hid_desc;
struct rtw89_btc_bt_hfp_desc *hfp = &btc->cx.bt.link_info.hfp_desc;
@@ -3139,13 +3657,15 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
case BTC_CXP_USERDEF0:
btc->update_policy_force = true;
*t = t_def[CXTD_OFF];
- s[CXST_OFF] = s_def[CXST_OFF];
+ _slot_set_le(btc, CXST_OFF, s_def[CXST_OFF].dur,
+ s_def[CXST_OFF].cxtbl, s_def[CXST_OFF].cxtype);
_slot_set_tbl(btc, CXST_OFF, cxtbl[2]);
break;
case BTC_CXP_OFF: /* TDMA off */
_write_scbd(rtwdev, BTC_WSCB_TDMA, false);
*t = t_def[CXTD_OFF];
- s[CXST_OFF] = s_def[CXST_OFF];
+ _slot_set_le(btc, CXST_OFF, s_def[CXST_OFF].dur,
+ s_def[CXST_OFF].cxtbl, s_def[CXST_OFF].cxtype);
switch (policy_type) {
case BTC_CXP_OFF_BT:
@@ -3186,7 +3706,8 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
case BTC_CXP_OFFB: /* TDMA off + beacon protect */
_write_scbd(rtwdev, BTC_WSCB_TDMA, false);
*t = t_def[CXTD_OFF_B2];
- s[CXST_OFF] = s_def[CXST_OFF];
+ _slot_set_le(btc, CXST_OFF, s_def[CXST_OFF].dur,
+ s_def[CXST_OFF].cxtbl, s_def[CXST_OFF].cxtype);
switch (policy_type) {
case BTC_CXP_OFFB_BWB0:
@@ -3207,21 +3728,29 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
switch (policy_type) {
case BTC_CXP_OFFE_DEF:
- s[CXST_E2G] = s_def[CXST_E2G];
- s[CXST_E5G] = s_def[CXST_E5G];
- s[CXST_EBT] = s_def[CXST_EBT];
- s[CXST_ENULL] = s_def[CXST_ENULL];
+ _slot_set_le(btc, CXST_E2G, s_def[CXST_E2G].dur,
+ s_def[CXST_E2G].cxtbl, s_def[CXST_E2G].cxtype);
+ _slot_set_le(btc, CXST_E5G, s_def[CXST_E5G].dur,
+ s_def[CXST_E5G].cxtbl, s_def[CXST_E5G].cxtype);
+ _slot_set_le(btc, CXST_EBT, s_def[CXST_EBT].dur,
+ s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype);
+ _slot_set_le(btc, CXST_ENULL, s_def[CXST_ENULL].dur,
+ s_def[CXST_ENULL].cxtbl, s_def[CXST_ENULL].cxtype);
break;
case BTC_CXP_OFFE_DEF2:
_slot_set(btc, CXST_E2G, 20, cxtbl[1], SLOT_ISO);
- s[CXST_E5G] = s_def[CXST_E5G];
- s[CXST_EBT] = s_def[CXST_EBT];
- s[CXST_ENULL] = s_def[CXST_ENULL];
+ _slot_set_le(btc, CXST_E5G, s_def[CXST_E5G].dur,
+ s_def[CXST_E5G].cxtbl, s_def[CXST_E5G].cxtype);
+ _slot_set_le(btc, CXST_EBT, s_def[CXST_EBT].dur,
+ s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype);
+ _slot_set_le(btc, CXST_ENULL, s_def[CXST_ENULL].dur,
+ s_def[CXST_ENULL].cxtbl, s_def[CXST_ENULL].cxtype);
break;
default:
break;
}
- s[CXST_OFF] = s_def[CXST_OFF];
+ _slot_set_le(btc, CXST_OFF, s_def[CXST_OFF].dur,
+ s_def[CXST_OFF].cxtbl, s_def[CXST_OFF].cxtype);
break;
case BTC_CXP_FIX: /* TDMA Fix-Slot */
_write_scbd(rtwdev, BTC_WSCB_TDMA, true);
@@ -3481,25 +4010,36 @@ EXPORT_SYMBOL(rtw89_btc_set_policy_v1);
static void _set_bt_plut(struct rtw89_dev *rtwdev, u8 phy_map,
u8 tx_val, u8 rx_val)
{
+ struct rtw89_btc_wl_info *wl = &rtwdev->btc.cx.wl;
struct rtw89_mac_ax_plt plt;
- plt.band = RTW89_MAC_0;
plt.tx = tx_val;
plt.rx = rx_val;
- if (phy_map & BTC_PHY_0)
+ if (rtwdev->btc.ver->fwlrole == 8) {
+ plt.band = wl->pta_req_mac;
+ if (wl->bt_polut_type[plt.band] == tx_val)
+ return;
+
+ wl->bt_polut_type[plt.band] = tx_val;
rtw89_mac_cfg_plt(rtwdev, &plt);
+ } else {
+ plt.band = RTW89_MAC_0;
- if (!rtwdev->dbcc_en)
- return;
+ if (phy_map & BTC_PHY_0)
+ rtw89_mac_cfg_plt(rtwdev, &plt);
- plt.band = RTW89_MAC_1;
- if (phy_map & BTC_PHY_1)
- rtw89_mac_cfg_plt(rtwdev, &plt);
+ if (!rtwdev->dbcc_en)
+ return;
+
+ plt.band = RTW89_MAC_1;
+ if (phy_map & BTC_PHY_1)
+ rtw89_mac_cfg_plt(rtwdev, &plt);
+ }
}
-static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec,
- u8 phy_map, u8 type)
+static void _set_ant_v0(struct rtw89_dev *rtwdev, bool force_exec,
+ u8 phy_map, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
@@ -3508,13 +4048,21 @@ static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec,
struct rtw89_btc_bt_info *bt = &cx->bt;
struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
u8 gnt_wl_ctrl, gnt_bt_ctrl, plt_ctrl, i, b2g = 0;
+ bool dbcc_chg = false;
u32 ant_path_type;
ant_path_type = ((phy_map << 8) + type);
+ if (btc->ver->fwlrole == 1)
+ dbcc_chg = wl->role_info_v1.dbcc_chg;
+ else if (btc->ver->fwlrole == 2)
+ dbcc_chg = wl->role_info_v2.dbcc_chg;
+ else if (btc->ver->fwlrole == 8)
+ dbcc_chg = wl->role_info_v8.dbcc_chg;
+
if (btc->dm.run_reason == BTC_RSN_NTFY_POWEROFF ||
btc->dm.run_reason == BTC_RSN_NTFY_RADIO_STATE ||
- btc->dm.run_reason == BTC_RSN_CMD_SET_COEX)
+ btc->dm.run_reason == BTC_RSN_CMD_SET_COEX || dbcc_chg)
force_exec = FC_EXEC;
if (!force_exec && ant_path_type == dm->set_ant_path) {
@@ -3617,6 +4165,117 @@ static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec,
}
}
+static void _set_ant_v1(struct rtw89_dev *rtwdev, bool force_exec,
+ u8 phy_map, u8 type)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &wl->role_info_v8;
+ u32 ant_path_type = rtw89_get_antpath_type(phy_map, type);
+ struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ u8 gwl = BTC_GNT_HW;
+
+ if (btc->dm.run_reason == BTC_RSN_NTFY_POWEROFF ||
+ btc->dm.run_reason == BTC_RSN_NTFY_RADIO_STATE ||
+ btc->dm.run_reason == BTC_RSN_CMD_SET_COEX || wl_rinfo->dbcc_chg)
+ force_exec = FC_EXEC;
+
+ if (wl_rinfo->link_mode != BTC_WLINK_25G_MCC &&
+ btc->dm.wl_btg_rx == 2)
+ force_exec = FC_EXEC;
+
+ if (!force_exec && ant_path_type == dm->set_ant_path) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): return by no change!!\n",
+ __func__);
+ return;
+ } else if (bt->rfk_info.map.run) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): return by bt rfk!!\n", __func__);
+ return;
+ } else if (btc->dm.run_reason != BTC_RSN_NTFY_WL_RFK &&
+ wl->rfk_info.state != BTC_WRFK_STOP) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): return by wl rfk!!\n", __func__);
+ return;
+ }
+
+ dm->set_ant_path = ant_path_type;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): path=0x%x, set_type=0x%x\n",
+ __func__, phy_map, dm->set_ant_path & 0xff);
+
+ switch (type) {
+ case BTC_ANT_WINIT:
+ /* To avoid BT MP driver case (bt_enable but no mailbox) */
+ if (bt->enable.now && bt->run_patch_code)
+ _set_gnt_v1(rtwdev, phy_map, BTC_GNT_SW_LO, BTC_GNT_SW_HI,
+ BTC_WLACT_SW_LO);
+ else
+ _set_gnt_v1(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_LO,
+ BTC_WLACT_SW_HI);
+ break;
+ case BTC_ANT_WONLY:
+ _set_gnt_v1(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_LO,
+ BTC_WLACT_SW_HI);
+ break;
+ case BTC_ANT_WOFF:
+ _set_gnt_v1(rtwdev, phy_map, BTC_GNT_SW_LO, BTC_GNT_SW_HI,
+ BTC_WLACT_SW_LO);
+ break;
+ case BTC_ANT_W2G:
+ case BTC_ANT_W25G:
+ if (wl_rinfo->dbcc_en) {
+ if (wl_dinfo->real_band[RTW89_PHY_0] == RTW89_BAND_2G)
+ gwl = BTC_GNT_HW;
+ else
+ gwl = BTC_GNT_SW_HI;
+ _set_gnt_v1(rtwdev, BTC_PHY_0, gwl, BTC_GNT_HW, BTC_WLACT_HW);
+
+ if (wl_dinfo->real_band[RTW89_PHY_1] == RTW89_BAND_2G)
+ gwl = BTC_GNT_HW;
+ else
+ gwl = BTC_GNT_SW_HI;
+ _set_gnt_v1(rtwdev, BTC_PHY_1, gwl, BTC_GNT_HW, BTC_WLACT_HW);
+ } else {
+ gwl = BTC_GNT_HW;
+ _set_gnt_v1(rtwdev, phy_map, gwl, BTC_GNT_HW, BTC_WLACT_HW);
+ }
+ break;
+ case BTC_ANT_W5G:
+ _set_gnt_v1(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_HW, BTC_WLACT_HW);
+ break;
+ case BTC_ANT_FREERUN:
+ _set_gnt_v1(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_HI,
+ BTC_WLACT_SW_LO);
+ break;
+ case BTC_ANT_WRFK:
+ _set_gnt_v1(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_LO,
+ BTC_WLACT_HW);
+ break;
+ case BTC_ANT_WRFK2:
+ _set_gnt_v1(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_LO,
+ BTC_WLACT_SW_HI); /* no BT-Tx */
+ break;
+ default:
+ return;
+ }
+
+ _set_bt_plut(rtwdev, phy_map, BTC_PLT_GNT_WL, BTC_PLT_GNT_WL);
+}
+
+static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec,
+ u8 phy_map, u8 type)
+{
+ if (rtwdev->chip->chip_id == RTL8922A)
+ _set_ant_v1(rtwdev, force_exec, phy_map, type);
+ else
+ _set_ant_v0(rtwdev, force_exec, phy_map, type);
+}
+
static void _action_wl_only(struct rtw89_dev *rtwdev)
{
_set_ant(rtwdev, FC_EXEC, BTC_PHY_ALL, BTC_ANT_WONLY);
@@ -3641,7 +4300,7 @@ static void _action_wl_off(struct rtw89_dev *rtwdev, u8 mode)
if (wl->status.map.rf_off || btc->dm.bt_only) {
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_WOFF);
} else if (wl->status.map.lps == BTC_LPS_RF_ON) {
- if (wl->role_info.link_mode == BTC_WLINK_5G)
+ if (mode == BTC_WLINK_5G)
_set_ant(rtwdev, FC_EXEC, BTC_PHY_ALL, BTC_ANT_W5G);
else
_set_ant(rtwdev, FC_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
@@ -4070,6 +4729,7 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_btc_wl_role_info *wl_rinfo_v0 = &wl->role_info;
struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -4090,6 +4750,8 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
wl_rinfo.link_mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
wl_rinfo.link_mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 8)
+ wl_rinfo.link_mode = wl_rinfo_v8->link_mode;
else
return;
@@ -4103,6 +4765,8 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
wl_rinfo.dbcc_2g_phy = wl_rinfo_v1->dbcc_2g_phy;
} else if (ver->fwlrole == 2) {
wl_rinfo.dbcc_2g_phy = wl_rinfo_v2->dbcc_2g_phy;
+ } else if (ver->fwlrole == 8) {
+ wl_rinfo.dbcc_2g_phy = wl_rinfo_v8->dbcc_2g_phy;
} else {
return;
}
@@ -4223,7 +4887,10 @@ static void rtw89_tx_time_iter(void *data, struct ieee80211_sta *sta)
u16 enable = iter_data->enable;
bool reenable = iter_data->reenable;
- plink = &wl->link_info[port];
+ if (btc->ver->fwlrole == 8)
+ plink = &wl->rlink_info[port][0];
+ else
+ plink = &wl->link_info[port];
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): port = %d\n", __func__, port);
@@ -4276,6 +4943,7 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_txtime_data data = {.rtwdev = rtwdev};
u8 mode, igno_bt, tx_retry;
u32 tx_time;
@@ -4291,6 +4959,8 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 8)
+ mode = wl_rinfo_v8->link_mode;
else
return;
@@ -4348,6 +5018,7 @@ static void _set_bt_rx_agc(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
bool bt_hi_lna_rx = false;
u8 mode;
@@ -4358,6 +5029,8 @@ static void _set_bt_rx_agc(struct rtw89_dev *rtwdev)
mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 8)
+ mode = wl_rinfo_v8->link_mode;
else
return;
@@ -4378,11 +5051,14 @@ static void _set_bt_rx_scan_pri(struct rtw89_dev *rtwdev)
_write_scbd(rtwdev, BTC_WSCB_RXSCAN_PRI, (bool)(!!bt->scan_rx_low_pri));
}
-/* TODO add these functions */
static void _action_common(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_smap *wl_smap = &wl->status.map;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ u32 bt_rom_code_id, bt_fw_ver;
_set_btg_ctrl(rtwdev);
_set_wl_preagc_ctrl(rtwdev);
@@ -4392,6 +5068,26 @@ static void _action_common(struct rtw89_dev *rtwdev)
_set_rf_trx_para(rtwdev);
_set_bt_rx_scan_pri(rtwdev);
+ bt_rom_code_id = chip_id_to_bt_rom_code_id(rtwdev->btc.ver->chip_id);
+ bt_fw_ver = bt->ver_info.fw & 0xffff;
+ if (bt->enable.now &&
+ (bt_fw_ver == 0 ||
+ (bt_fw_ver == bt_rom_code_id && bt->run_patch_code && rtwdev->chip->scbd)))
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, 1);
+ else
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, 0);
+
+ if (dm->run_reason == BTC_RSN_NTFY_INIT ||
+ dm->run_reason == BTC_RSN_NTFY_RADIO_STATE ||
+ dm->run_reason == BTC_RSN_NTFY_POWEROFF) {
+ _fw_set_drv_info(rtwdev, CXDRVINFO_ROLE);
+
+ if (wl_smap->rf_off == 1 || wl_smap->lps != BTC_LPS_OFF)
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_ALL, 0);
+ else
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_MREG, 1);
+ }
+
if (wl->scbd_change) {
rtw89_mac_cfg_sb(rtwdev, wl->scbd);
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], write scbd: 0x%08x\n",
@@ -4488,6 +5184,30 @@ static void _action_wl_2g_sta(struct rtw89_dev *rtwdev)
_action_by_bt(rtwdev);
}
+static void _action_wl_25g_mcc(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ u16 policy_type = BTC_CXP_OFF_BT;
+
+ if (btc->ant_type == BTC_ANT_SHARED) {
+ if (btc->cx.wl.status.map._4way)
+ policy_type = BTC_CXP_OFFE_WL;
+ else if (btc->cx.wl.status.val & btc_scanning_map.val)
+ policy_type = BTC_CXP_OFFE_2GBWMIXB;
+ else if (btc->cx.bt.link_info.profile_cnt.now == 0)
+ policy_type = BTC_CXP_OFFE_2GISOB;
+ else
+ policy_type = BTC_CXP_OFFE_2GBWISOB;
+ } else { /* dedicated-antenna */
+ policy_type = BTC_CXP_OFF_EQ0;
+ }
+
+ btc->dm.e2g_slot_limit = BTC_E2G_LIMIT_DEF;
+
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W25G);
+ _set_policy(rtwdev, policy_type, BTC_ACT_WL_25G_MCC);
+}
+
static void _action_wl_scan(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -4495,14 +5215,7 @@ static void _action_wl_scan(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) {
- _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W25G);
- if (btc->ant_type == BTC_ANT_SHARED)
- _set_policy(rtwdev, BTC_CXP_OFFE_DEF,
- BTC_RSN_NTFY_SCAN_START);
- else
- _set_policy(rtwdev, BTC_CXP_OFF_EQ0,
- BTC_RSN_NTFY_SCAN_START);
-
+ _action_wl_25g_mcc(rtwdev);
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], Scan offload!\n");
} else if (rtwdev->dbcc_en) {
if (wl_dinfo->real_band[RTW89_PHY_0] != RTW89_BAND_2G &&
@@ -4518,24 +5231,6 @@ static void _action_wl_scan(struct rtw89_dev *rtwdev)
}
}
-static void _action_wl_25g_mcc(struct rtw89_dev *rtwdev)
-{
- struct rtw89_btc *btc = &rtwdev->btc;
-
- _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W25G);
-
- if (btc->ant_type == BTC_ANT_SHARED) {
- if (btc->cx.bt.link_info.profile_cnt.now == 0)
- _set_policy(rtwdev, BTC_CXP_OFFE_DEF2,
- BTC_ACT_WL_25G_MCC);
- else
- _set_policy(rtwdev, BTC_CXP_OFFE_DEF,
- BTC_ACT_WL_25G_MCC);
- } else { /* dedicated-antenna */
- _set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_25G_MCC);
- }
-}
-
static void _action_wl_2g_mcc(struct rtw89_dev *rtwdev)
{ struct rtw89_btc *btc = &rtwdev->btc;
@@ -4695,6 +5390,31 @@ static void _action_wl_2g_scc_v2(struct rtw89_dev *rtwdev)
_set_policy(rtwdev, policy_type, BTC_ACT_WL_2G_SCC);
}
+static void _action_wl_2g_scc_v8(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ u16 policy_type = BTC_CXP_OFF_BT;
+
+ if (btc->ant_type == BTC_ANT_SHARED) {
+ if (wl->status.map._4way)
+ policy_type = BTC_CXP_OFFE_WL;
+ else if (bt->link_info.status.map.connect == 0)
+ policy_type = BTC_CXP_OFFE_2GISOB;
+ else
+ policy_type = BTC_CXP_OFFE_2GBWISOB;
+ } else {
+ policy_type = BTC_CXP_OFF_EQ0;
+ }
+
+ dm->e2g_slot_limit = BTC_E2G_LIMIT_DEF;
+
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+ _set_policy(rtwdev, policy_type, BTC_ACT_WL_2G_SCC);
+}
+
static void _action_wl_2g_ap(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -5287,6 +6007,312 @@ static void _update_wl_info_v2(struct rtw89_dev *rtwdev)
#define BTC_CHK_HANG_MAX 3
#define BTC_SCB_INV_VALUE GENMASK(31, 0)
+static u8 _get_role_link_mode(u8 role)
+{
+ switch (role) {
+ case RTW89_WIFI_ROLE_STATION:
+ return BTC_WLINK_2G_STA;
+ case RTW89_WIFI_ROLE_P2P_GO:
+ return BTC_WLINK_2G_GO;
+ case RTW89_WIFI_ROLE_P2P_CLIENT:
+ return BTC_WLINK_2G_GC;
+ case RTW89_WIFI_ROLE_AP:
+ return BTC_WLINK_2G_AP;
+ default:
+ return BTC_WLINK_OTHER;
+ }
+}
+
+static bool _chk_role_ch_group(const struct rtw89_btc_chdef *r1,
+ const struct rtw89_btc_chdef *r2)
+{
+ if (r1->chan != r2->chan) { /* primary ch is different */
+ return false;
+ } else if (r1->bw == RTW89_CHANNEL_WIDTH_40 &&
+ r2->bw == RTW89_CHANNEL_WIDTH_40) {
+ if (r1->offset != r2->offset)
+ return false;
+ }
+ return true;
+}
+
+static u8 _chk_dbcc(struct rtw89_dev *rtwdev, struct rtw89_btc_chdef *ch,
+ u8 *phy, u8 *role, u8 *dbcc_2g_phy)
+{
+ struct rtw89_btc_wl_info *wl = &rtwdev->btc.cx.wl;
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &wl->role_info_v8;
+ bool is_2g_ch_exist = false, is_multi_role_in_2g_phy = false;
+ u8 j, k, dbcc_2g_cid, dbcc_2g_cid2;
+
+ /* find out the 2G-PHY by connect-id ->ch */
+ for (j = 0; j < wl_rinfo->connect_cnt; j++) {
+ if (ch[j].center_ch <= 14) {
+ is_2g_ch_exist = true;
+ break;
+ }
+ }
+
+ /* If no any 2G-port exist, it's impossible because 5G-exclude */
+ if (!is_2g_ch_exist)
+ return BTC_WLINK_OTHER;
+
+ dbcc_2g_cid = j;
+ *dbcc_2g_phy = phy[dbcc_2g_cid];
+
+ /* connect_cnt <= 2 */
+ if (wl_rinfo->connect_cnt < BTC_TDMA_WLROLE_MAX)
+ return (_get_role_link_mode((role[dbcc_2g_cid])));
+
+ /* find the other-port in the 2G-PHY, ex: PHY-0:6G, PHY1: mcc/scc */
+ for (k = 0; k < wl_rinfo->connect_cnt; k++) {
+ if (k == dbcc_2g_cid)
+ continue;
+
+ if (phy[k] == *dbcc_2g_phy) {
+ is_multi_role_in_2g_phy = true;
+ dbcc_2g_cid2 = k;
+ break;
+ }
+ }
+
+ /* Single-role in 2G-PHY */
+ if (!is_multi_role_in_2g_phy)
+ return (_get_role_link_mode(role[dbcc_2g_cid]));
+
+ /* 2-role in 2G-PHY */
+ if (ch[dbcc_2g_cid2].center_ch > 14)
+ return BTC_WLINK_25G_MCC;
+ else if (_chk_role_ch_group(&ch[dbcc_2g_cid], &ch[dbcc_2g_cid2]))
+ return BTC_WLINK_2G_SCC;
+ else
+ return BTC_WLINK_2G_MCC;
+}
+
+static void _update_role_link_mode(struct rtw89_dev *rtwdev,
+ bool client_joined, u32 noa)
+{
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &rtwdev->btc.cx.wl.role_info_v8;
+ u32 type = BTC_WLMROLE_NONE, dur = 0;
+ u32 wl_role = wl_rinfo->role_map;
+
+ /* if no client_joined, don't care P2P-GO/AP role */
+ if (((wl_role & BIT(RTW89_WIFI_ROLE_P2P_GO)) ||
+ (wl_role & BIT(RTW89_WIFI_ROLE_AP))) && !client_joined) {
+ if (wl_rinfo->link_mode == BTC_WLINK_2G_SCC) {
+ wl_rinfo->link_mode = BTC_WLINK_2G_STA;
+ wl_rinfo->connect_cnt--;
+ } else if (wl_rinfo->link_mode == BTC_WLINK_2G_GO ||
+ wl_rinfo->link_mode == BTC_WLINK_2G_AP) {
+ wl_rinfo->link_mode = BTC_WLINK_NOLINK;
+ wl_rinfo->connect_cnt--;
+ }
+ }
+
+ /* Identify 2-Role type */
+ if (wl_rinfo->connect_cnt >= 2 &&
+ (wl_rinfo->link_mode == BTC_WLINK_2G_SCC ||
+ wl_rinfo->link_mode == BTC_WLINK_2G_MCC ||
+ wl_rinfo->link_mode == BTC_WLINK_25G_MCC ||
+ wl_rinfo->link_mode == BTC_WLINK_5G)) {
+ if ((wl_role & BIT(RTW89_WIFI_ROLE_P2P_GO)) ||
+ (wl_role & BIT(RTW89_WIFI_ROLE_AP)))
+ type = noa ? BTC_WLMROLE_STA_GO_NOA : BTC_WLMROLE_STA_GO;
+ else if (wl_role & BIT(RTW89_WIFI_ROLE_P2P_CLIENT))
+ type = noa ? BTC_WLMROLE_STA_GC_NOA : BTC_WLMROLE_STA_GC;
+ else
+ type = BTC_WLMROLE_STA_STA;
+
+ dur = noa;
+ }
+
+ wl_rinfo->mrole_type = type;
+ wl_rinfo->mrole_noa_duration = dur;
+}
+
+static void _update_wl_info_v8(struct rtw89_dev *rtwdev, u8 role_id, u8 rlink_id,
+ enum btc_role_state state)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_chdef cid_ch[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER];
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &wl->role_info_v8;
+ struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
+ bool client_joined = false, b2g = false, b5g = false;
+ u8 cid_role[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {};
+ u8 cid_phy[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {};
+ u8 dbcc_en = 0, pta_req_band = RTW89_MAC_0;
+ u8 i, j, cnt = 0, cnt_2g = 0, cnt_5g = 0;
+ struct rtw89_btc_wl_link_info *wl_linfo;
+ struct rtw89_btc_wl_rlink *rlink = NULL;
+ u8 dbcc_2g_phy = RTW89_PHY_0;
+ u8 mode = BTC_WLINK_NOLINK;
+ u32 noa_dur = 0;
+
+ if (role_id >= RTW89_BE_BTC_WL_MAX_ROLE_NUMBER || rlink_id > RTW89_MAC_1)
+ return;
+
+ /* Extract wl->link_info[role_id][rlink_id] to wl->role_info
+ * role_id: role index
+ * rlink_id: rlink index (= HW-band index)
+ * pid: port_index
+ */
+
+ wl_linfo = &wl->rlink_info[role_id][rlink_id];
+ if (wl_linfo->connected == MLME_LINKING)
+ return;
+
+ rlink = &wl_rinfo->rlink[role_id][rlink_id];
+ rlink->role = wl_linfo->role;
+ rlink->active = wl_linfo->active; /* Doze or not */
+ rlink->pid = wl_linfo->pid;
+ rlink->phy = wl_linfo->phy;
+ rlink->rf_band = wl_linfo->band;
+ rlink->ch = wl_linfo->ch;
+ rlink->bw = wl_linfo->bw;
+ rlink->noa = wl_linfo->noa;
+ rlink->noa_dur = wl_linfo->noa_duration / 1000;
+ rlink->client_cnt = wl_linfo->client_cnt;
+ rlink->mode = wl_linfo->mode;
+
+ switch (wl_linfo->connected) {
+ case MLME_NO_LINK:
+ rlink->connected = 0;
+ if (rlink->role == RTW89_WIFI_ROLE_STATION)
+ btc->dm.leak_ap = 0;
+ break;
+ case MLME_LINKED:
+ rlink->connected = 1;
+ break;
+ default:
+ return;
+ }
+
+ wl->is_5g_hi_channel = false;
+ wl->bg_mode = false;
+ wl_rinfo->role_map = 0;
+ wl_rinfo->p2p_2g = 0;
+ memset(cid_ch, 0, sizeof(cid_ch));
+
+ for (i = 0; i < RTW89_BE_BTC_WL_MAX_ROLE_NUMBER; i++) {
+ for (j = RTW89_MAC_0; j <= RTW89_MAC_1; j++) {
+ rlink = &wl_rinfo->rlink[i][j];
+
+ if (!rlink->active || !rlink->connected)
+ continue;
+
+ cnt++;
+ wl_rinfo->role_map |= BIT(rlink->role);
+
+ /* only if client connect for p2p-Go/AP */
+ if ((rlink->role == RTW89_WIFI_ROLE_P2P_GO ||
+ rlink->role == RTW89_WIFI_ROLE_AP) &&
+ rlink->client_cnt > 1)
+ client_joined = true;
+
+ /* Identufy if P2P-Go (GO/GC/AP) exist at 2G band*/
+ if (rlink->rf_band == RTW89_BAND_2G &&
+ (client_joined || rlink->role == RTW89_WIFI_ROLE_P2P_CLIENT))
+ wl_rinfo->p2p_2g = 1;
+
+ /* only one noa-role exist */
+ if (rlink->noa && rlink->noa_dur > 0)
+ noa_dur = rlink->noa_dur;
+
+ /* for WL 5G-Rx interfered with BT issue */
+ if (rlink->rf_band == RTW89_BAND_5G && rlink->ch >= 100)
+ wl->is_5g_hi_channel = 1;
+
+ if ((rlink->mode & BIT(BTC_WL_MODE_11B)) ||
+ (rlink->mode & BIT(BTC_WL_MODE_11G)))
+ wl->bg_mode = 1;
+
+ if (rtwdev->chip->para_ver & BTC_FEAT_MLO_SUPPORT)
+ continue;
+
+ cid_ch[cnt - 1] = wl_linfo->chdef;
+ cid_phy[cnt - 1] = rlink->phy;
+ cid_role[cnt - 1] = rlink->role;
+
+ if (rlink->rf_band != RTW89_BAND_2G) {
+ cnt_5g++;
+ b5g = true;
+ } else {
+ cnt_2g++;
+ b2g = true;
+ }
+ }
+ }
+
+ if (rtwdev->chip->para_ver & BTC_FEAT_MLO_SUPPORT) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC] rlink cnt_2g=%d cnt_5g=%d\n", cnt_2g, cnt_5g);
+ rtw89_warn(rtwdev, "not support MLO feature yet");
+ } else {
+ dbcc_en = rtwdev->dbcc_en;
+
+ /* Be careful to change the following sequence!! */
+ if (cnt == 0) {
+ mode = BTC_WLINK_NOLINK;
+ } else if (!b2g && b5g) {
+ mode = BTC_WLINK_5G;
+ } else if (wl_rinfo->role_map & BIT(RTW89_WIFI_ROLE_NAN)) {
+ mode = BTC_WLINK_2G_NAN;
+ } else if (cnt > BTC_TDMA_WLROLE_MAX) {
+ mode = BTC_WLINK_OTHER;
+ } else if (dbcc_en) {
+ mode = _chk_dbcc(rtwdev, cid_ch, cid_phy, cid_role,
+ &dbcc_2g_phy);
+ } else if (b2g && b5g && cnt == 2) {
+ mode = BTC_WLINK_25G_MCC;
+ } else if (!b5g && cnt == 2) { /* cnt_connect = 2 */
+ if (_chk_role_ch_group(&cid_ch[0], &cid_ch[cnt - 1]))
+ mode = BTC_WLINK_2G_SCC;
+ else
+ mode = BTC_WLINK_2G_MCC;
+ } else if (!b5g && cnt == 1) { /* cnt_connect = 1 */
+ mode = _get_role_link_mode(cid_role[0]);
+ }
+ }
+
+ wl_rinfo->link_mode = mode;
+ wl_rinfo->connect_cnt = cnt;
+ if (wl_rinfo->connect_cnt == 0)
+ wl_rinfo->role_map = BIT(RTW89_WIFI_ROLE_NONE);
+ _update_role_link_mode(rtwdev, client_joined, noa_dur);
+
+ wl_rinfo->dbcc_2g_phy = dbcc_2g_phy;
+ if (wl_rinfo->dbcc_en != dbcc_en) {
+ wl_rinfo->dbcc_en = dbcc_en;
+ wl_rinfo->dbcc_chg = 1;
+ btc->cx.cnt_wl[BTC_WCNT_DBCC_CHG]++;
+ } else {
+ wl_rinfo->dbcc_chg = 0;
+ }
+
+ if (wl_rinfo->dbcc_en) {
+ memset(wl_dinfo, 0, sizeof(struct rtw89_btc_wl_dbcc_info));
+
+ if (mode == BTC_WLINK_5G) {
+ pta_req_band = RTW89_PHY_0;
+ wl_dinfo->op_band[RTW89_PHY_0] = RTW89_BAND_5G;
+ wl_dinfo->op_band[RTW89_PHY_1] = RTW89_BAND_2G;
+ } else if (wl_rinfo->dbcc_2g_phy == RTW89_PHY_1) {
+ pta_req_band = RTW89_PHY_1;
+ wl_dinfo->op_band[RTW89_PHY_0] = RTW89_BAND_5G;
+ wl_dinfo->op_band[RTW89_PHY_1] = RTW89_BAND_2G;
+ } else {
+ pta_req_band = RTW89_PHY_0;
+ wl_dinfo->op_band[RTW89_PHY_0] = RTW89_BAND_2G;
+ wl_dinfo->op_band[RTW89_PHY_1] = RTW89_BAND_5G;
+ }
+ _update_dbcc_band(rtwdev, RTW89_PHY_0);
+ _update_dbcc_band(rtwdev, RTW89_PHY_1);
+ }
+
+ wl_rinfo->pta_req_band = pta_req_band;
+ _fw_set_drv_info(rtwdev, CXDRVINFO_ROLE);
+}
+
void rtw89_coex_act1_work(struct work_struct *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
@@ -5445,6 +6471,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
u8 mode, igno_bt, always_freerun;
lockdep_assert_held(&rtwdev->mutex);
@@ -5459,6 +6486,8 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 8)
+ mode = wl_rinfo_v8->link_mode;
else
return;
@@ -5605,6 +6634,8 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
_action_wl_2g_scc_v1(rtwdev);
else if (ver->fwlrole == 2)
_action_wl_2g_scc_v2(rtwdev);
+ else if (ver->fwlrole == 8)
+ _action_wl_2g_scc_v8(rtwdev);
break;
case BTC_WLINK_2G_MCC:
bt->scan_rx_low_pri = true;
@@ -5736,8 +6767,8 @@ void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode)
_set_init_info(rtwdev);
_set_wl_tx_power(rtwdev, RTW89_BTC_WL_DEF_TX_PWR);
- rtw89_btc_fw_set_slots(rtwdev, CXST_MAX, dm->slot);
btc_fw_set_monreg(rtwdev);
+ rtw89_btc_fw_set_slots(rtwdev);
_fw_set_drv_info(rtwdev, CXDRVINFO_INIT);
_fw_set_drv_info(rtwdev, CXDRVINFO_CTRL);
@@ -5956,6 +6987,22 @@ static u8 _update_bt_rssi_level(struct rtw89_dev *rtwdev, u8 rssi)
return rssi_level;
}
+static void _update_zb_coex_tbl(struct rtw89_dev *rtwdev)
+{
+ u8 mode = rtwdev->btc.cx.wl.role_info.link_mode;
+ u32 zb_tbl0 = 0xda5a5a5a, zb_tbl1 = 0xda5a5a5a;
+
+ if (mode == BTC_WLINK_5G || rtwdev->btc.dm.freerun) {
+ zb_tbl0 = 0xffffffff;
+ zb_tbl1 = 0xffffffff;
+ } else if (mode == BTC_WLINK_25G_MCC) {
+ zb_tbl0 = 0xffffffff; /* for E5G slot */
+ zb_tbl1 = 0xda5a5a5a; /* for E2G slot */
+ }
+ rtw89_write32(rtwdev, R_BTC_ZB_COEX_TBL_0, zb_tbl0);
+ rtw89_write32(rtwdev, R_BTC_ZB_COEX_TBL_1, zb_tbl1);
+}
+
#define BT_PROFILE_PROTOCOL_MASK GENMASK(7, 4)
static void _update_bt_info(struct rtw89_dev *rtwdev, u8 *buf, u32 len)
@@ -6093,13 +7140,6 @@ static void _update_bt_info(struct rtw89_dev *rtwdev, u8 *buf, u32 len)
_run_coex(rtwdev, BTC_RSN_UPDATE_BT_INFO);
}
-enum btc_wl_mode {
- BTC_WL_MODE_HT = 0,
- BTC_WL_MODE_VHT = 1,
- BTC_WL_MODE_HE = 2,
- BTC_WL_MODE_NUM,
-};
-
void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta, enum btc_role_state state)
{
@@ -6112,7 +7152,7 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_link_info r = {0};
struct rtw89_btc_wl_link_info *wlinfo = NULL;
- u8 mode = 0;
+ u8 mode = 0, rlink_id, link_mode_ori, pta_req_mac_ori, wa_type;
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], state=%d\n", state);
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -6162,6 +7202,10 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
r.band = chan->band_type;
r.ch = chan->channel;
r.bw = chan->band_width;
+ r.chdef.band = chan->band_type;
+ r.chdef.center_ch = chan->channel;
+ r.chdef.bw = chan->band_width;
+ r.chdef.chan = chan->primary_channel;
ether_addr_copy(r.mac_addr, rtwvif->mac_addr);
if (rtwsta && vif->type == NL80211_IFTYPE_STATION)
@@ -6171,13 +7215,37 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
wlinfo = &wl->link_info[r.pid];
- memcpy(wlinfo, &r, sizeof(*wlinfo));
- if (ver->fwlrole == 0)
+ rlink_id = 0; /* to do */
+ if (ver->fwlrole == 0) {
+ *wlinfo = r;
_update_wl_info(rtwdev);
- else if (ver->fwlrole == 1)
+ } else if (ver->fwlrole == 1) {
+ *wlinfo = r;
_update_wl_info_v1(rtwdev);
- else if (ver->fwlrole == 2)
+ } else if (ver->fwlrole == 2) {
+ *wlinfo = r;
_update_wl_info_v2(rtwdev);
+ } else if (ver->fwlrole == 8) {
+ wlinfo = &wl->rlink_info[r.pid][rlink_id];
+ *wlinfo = r;
+ link_mode_ori = wl->role_info_v8.link_mode;
+ pta_req_mac_ori = wl->pta_req_mac;
+ _update_wl_info_v8(rtwdev, r.pid, rlink_id, state);
+
+ if (wl->role_info_v8.link_mode != link_mode_ori) {
+ wl->role_info_v8.link_mode_chg = 1;
+ if (ver->fcxinit == 7)
+ wa_type = btc->mdinfo.md_v7.wa_type;
+ else
+ wa_type = btc->mdinfo.md.wa_type;
+
+ if (wa_type & BTC_WA_HFP_ZB)
+ _update_zb_coex_tbl(rtwdev);
+ }
+
+ if (wl->pta_req_mac != pta_req_mac_ori)
+ wl->pta_reg_mac_chg = 1;
+ }
if (wlinfo->role == RTW89_WIFI_ROLE_STATION &&
wlinfo->connected == MLME_NO_LINK)
@@ -6715,7 +7783,10 @@ static void _show_wl_role_info(struct rtw89_dev *rtwdev, struct seq_file *m)
}
for (i = 0; i < RTW89_PORT_NUM; i++) {
- plink = &btc->cx.wl.link_info[i];
+ if (btc->ver->fwlrole == 8)
+ plink = &btc->cx.wl.rlink_info[i][0];
+ else
+ plink = &btc->cx.wl.link_info[i];
if (!plink->active)
continue;
@@ -6760,6 +7831,7 @@ static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
u8 mode;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_WL))
@@ -6773,6 +7845,8 @@ static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 8)
+ mode = wl_rinfo_v8->link_mode;
else
return;
@@ -6985,11 +8059,6 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
seq_puts(m, "\n");
}
- if (bt->enable.now && bt->ver_info.fw == 0)
- rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, true);
- else
- rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, false);
-
if (bt_linfo->profile_cnt.now || bt_linfo->status.map.ble_connect)
rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_AFH_MAP, true);
else
@@ -7017,6 +8086,79 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
#define CASE_BTC_EVT_STR(e) case CXEVNT_## e: return #e
#define CASE_BTC_INIT(e) case BTC_MODE_## e: return #e
#define CASE_BTC_ANTPATH_STR(e) case BTC_ANT_##e: return #e
+#define CASE_BTC_POLUT_STR(e) case BTC_PLT_## e: return #e
+#define CASE_BTC_REGTYPE_STR(e) case REG_## e: return #e
+#define CASE_BTC_GDBG_STR(e) case BTC_DBG_## e: return #e
+
+static const char *id_to_polut(u32 id)
+{
+ switch (id) {
+ CASE_BTC_POLUT_STR(NONE);
+ CASE_BTC_POLUT_STR(GNT_BT_TX);
+ CASE_BTC_POLUT_STR(GNT_BT_RX);
+ CASE_BTC_POLUT_STR(GNT_WL);
+ CASE_BTC_POLUT_STR(BT);
+ CASE_BTC_POLUT_STR(ALL);
+ default:
+ return "unknown";
+ }
+}
+
+static const char *id_to_regtype(u32 id)
+{
+ switch (id) {
+ CASE_BTC_REGTYPE_STR(MAC);
+ CASE_BTC_REGTYPE_STR(BB);
+ CASE_BTC_REGTYPE_STR(RF);
+ CASE_BTC_REGTYPE_STR(BT_RF);
+ CASE_BTC_REGTYPE_STR(BT_MODEM);
+ CASE_BTC_REGTYPE_STR(BT_BLUEWIZE);
+ CASE_BTC_REGTYPE_STR(BT_VENDOR);
+ CASE_BTC_REGTYPE_STR(BT_LE);
+ default:
+ return "unknown";
+ }
+}
+
+static const char *id_to_gdbg(u32 id)
+{
+ switch (id) {
+ CASE_BTC_GDBG_STR(GNT_BT);
+ CASE_BTC_GDBG_STR(GNT_WL);
+ CASE_BTC_GDBG_STR(BCN_EARLY);
+ CASE_BTC_GDBG_STR(WL_NULL0);
+ CASE_BTC_GDBG_STR(WL_NULL1);
+ CASE_BTC_GDBG_STR(WL_RXISR);
+ CASE_BTC_GDBG_STR(TDMA_ENTRY);
+ CASE_BTC_GDBG_STR(A2DP_EMPTY);
+ CASE_BTC_GDBG_STR(BT_RETRY);
+ CASE_BTC_GDBG_STR(BT_RELINK);
+ CASE_BTC_GDBG_STR(SLOT_WL);
+ CASE_BTC_GDBG_STR(SLOT_BT);
+ CASE_BTC_GDBG_STR(WL_ERR);
+ CASE_BTC_GDBG_STR(WL_OK);
+ CASE_BTC_GDBG_STR(SLOT_B2W);
+ CASE_BTC_GDBG_STR(SLOT_W1);
+ CASE_BTC_GDBG_STR(SLOT_W2);
+ CASE_BTC_GDBG_STR(SLOT_W2B);
+ CASE_BTC_GDBG_STR(SLOT_B1);
+ CASE_BTC_GDBG_STR(SLOT_B2);
+ CASE_BTC_GDBG_STR(SLOT_B3);
+ CASE_BTC_GDBG_STR(SLOT_B4);
+ CASE_BTC_GDBG_STR(SLOT_LK);
+ CASE_BTC_GDBG_STR(SLOT_E2G);
+ CASE_BTC_GDBG_STR(SLOT_E5G);
+ CASE_BTC_GDBG_STR(SLOT_EBT);
+ CASE_BTC_GDBG_STR(SLOT_WLK);
+ CASE_BTC_GDBG_STR(SLOT_B1FDD);
+ CASE_BTC_GDBG_STR(BT_CHANGE);
+ CASE_BTC_GDBG_STR(WL_CCA);
+ CASE_BTC_GDBG_STR(BT_LEAUDIO);
+ CASE_BTC_GDBG_STR(USER_DEF);
+ default:
+ return "unknown";
+ }
+}
static const char *steps_to_str(u16 step)
{
@@ -7354,6 +8496,10 @@ static void _show_error(struct rtw89_dev *rtwdev, struct seq_file *m)
pcysta->v5 = pfwinfo->rpt_fbtc_cysta.finfo.v5;
except_cnt = pcysta->v5.except_cnt;
exception_map = le32_to_cpu(pcysta->v5.except_map);
+ } else if (ver->fcxcysta == 7) {
+ pcysta->v7 = pfwinfo->rpt_fbtc_cysta.finfo.v7;
+ except_cnt = pcysta->v7.except_cnt;
+ exception_map = le32_to_cpu(pcysta->v7.except_map);
} else {
return;
}
@@ -7430,22 +8576,35 @@ static void _show_fbtc_slots(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
- struct rtw89_btc_fbtc_slot *s;
+ u16 dur, cxtype;
+ u32 tbl;
u8 i = 0;
for (i = 0; i < CXST_MAX; i++) {
- s = &dm->slot_now[i];
+ if (btc->ver->fcxslots == 1) {
+ dur = le16_to_cpu(dm->slot_now.v1[i].dur);
+ tbl = le32_to_cpu(dm->slot_now.v1[i].cxtbl);
+ cxtype = le16_to_cpu(dm->slot_now.v1[i].cxtype);
+ } else if (btc->ver->fcxslots == 7) {
+ dur = le16_to_cpu(dm->slot_now.v7[i].dur);
+ tbl = le32_to_cpu(dm->slot_now.v7[i].cxtbl);
+ cxtype = le16_to_cpu(dm->slot_now.v7[i].cxtype);
+ } else {
+ return;
+ }
+
if (i % 5 == 0)
seq_printf(m,
" %-15s : %5s[%03d/0x%x/%d]",
"[slot_list]",
id_to_slot((u32)i),
- s->dur, s->cxtbl, s->cxtype);
+ dur, tbl, cxtype);
else
seq_printf(m,
", %5s[%03d/0x%x/%d]",
id_to_slot((u32)i),
- s->dur, s->cxtbl, s->cxtype);
+ dur, tbl, cxtype);
+
if (i % 5 == 4)
seq_puts(m, "\n");
}
@@ -7973,6 +9132,136 @@ static void _show_fbtc_cysta_v5(struct rtw89_dev *rtwdev, struct seq_file *m)
}
}
+static void _show_fbtc_cysta_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc_bt_info *bt = &rtwdev->btc.cx.bt;
+ struct rtw89_btc_bt_a2dp_desc *a2dp = &bt->link_info.a2dp_desc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo;
+ struct rtw89_btc_fbtc_cysta_v7 *pcysta = NULL;
+ struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
+ struct rtw89_btc_rpt_cmn_info *pcinfo;
+ u16 cycle, c_begin, c_end, s_id;
+ u8 i, cnt = 0, divide_cnt;
+ u8 slot_pair;
+
+ pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo;
+ if (!pcinfo->valid)
+ return;
+
+ pcysta = &pfwinfo->rpt_fbtc_cysta.finfo.v7;
+ seq_printf(m, "\n\r %-15s : cycle:%d", "[slot_stat]",
+ le16_to_cpu(pcysta->cycles));
+
+ for (i = 0; i < CXST_MAX; i++) {
+ if (!le16_to_cpu(pcysta->slot_cnt[i]))
+ continue;
+ seq_printf(m, ", %s:%d",
+ id_to_slot(i), le16_to_cpu(pcysta->slot_cnt[i]));
+ }
+
+ if (dm->tdma_now.rxflctrl)
+ seq_printf(m, ", leak_rx:%d",
+ le32_to_cpu(pcysta->leak_slot.cnt_rximr));
+
+ if (pcysta->collision_cnt)
+ seq_printf(m, ", collision:%d", pcysta->collision_cnt);
+
+ if (pcysta->skip_cnt)
+ seq_printf(m, ", skip:%d", le16_to_cpu(pcysta->skip_cnt));
+
+ seq_printf(m, "\n\r %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
+ "[cycle_stat]",
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tavg) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tavg) % 1000);
+ seq_printf(m, ", max_t[wl:%d/bt:%d(>%dms:%d)/lk:%d.%03d]",
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]),
+ dm->bt_slot_flood, dm->cnt_dm[BTC_DCNT_BT_SLOT_FLOOD],
+ le16_to_cpu(pcysta->leak_slot.tamx) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tamx) % 1000);
+ seq_printf(m, ", bcn[all:%d/ok:%d/in_bt:%d/in_bt_ok:%d]",
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK]));
+
+ if (a2dp->exist) {
+ seq_printf(m,
+ "\n\r %-15s : a2dp_ept:%d, a2dp_late:%d(streak 2S:%d/max:%d)",
+ "[a2dp_stat]",
+ le16_to_cpu(pcysta->a2dp_ept.cnt),
+ le16_to_cpu(pcysta->a2dp_ept.cnt_timeout),
+ a2dp->no_empty_streak_2s, a2dp->no_empty_streak_max);
+
+ seq_printf(m, ", avg_t:%d, max_t:%d",
+ le16_to_cpu(pcysta->a2dp_ept.tavg),
+ le16_to_cpu(pcysta->a2dp_ept.tmax));
+ }
+
+ if (le16_to_cpu(pcysta->cycles) <= 1)
+ return;
+
+ /* 1 cycle = 1 wl-slot + 1 bt-slot */
+ slot_pair = BTC_CYCLE_SLOT_MAX / 2;
+
+ if (le16_to_cpu(pcysta->cycles) <= slot_pair)
+ c_begin = 1;
+ else
+ c_begin = le16_to_cpu(pcysta->cycles) - slot_pair + 1;
+
+ c_end = le16_to_cpu(pcysta->cycles);
+
+ if (a2dp->exist)
+ divide_cnt = 2;
+ else
+ divide_cnt = 6;
+
+ if (c_begin > c_end)
+ return;
+
+ for (cycle = c_begin; cycle <= c_end; cycle++) {
+ cnt++;
+ s_id = ((cycle - 1) % slot_pair) * 2;
+
+ if (cnt % divide_cnt == 1) {
+ if (a2dp->exist)
+ seq_printf(m, "\n\r %-15s : ", "[slotT_wermtan]");
+ else
+ seq_printf(m, "\n\r %-15s : ", "[slotT_rxerr]");
+ }
+
+ seq_printf(m, "->b%d", le16_to_cpu(pcysta->slot_step_time[s_id]));
+
+ if (a2dp->exist)
+ seq_printf(m, "(%d/%d/%d/%dM/%d/%d/%d)",
+ pcysta->wl_rx_err_ratio[s_id],
+ pcysta->a2dp_trx[s_id].empty_cnt,
+ pcysta->a2dp_trx[s_id].retry_cnt,
+ (pcysta->a2dp_trx[s_id].tx_rate ? 3 : 2),
+ pcysta->a2dp_trx[s_id].tx_cnt,
+ pcysta->a2dp_trx[s_id].ack_cnt,
+ pcysta->a2dp_trx[s_id].nack_cnt);
+ else
+ seq_printf(m, "(%d)", pcysta->wl_rx_err_ratio[s_id]);
+
+ seq_printf(m, "->w%d", le16_to_cpu(pcysta->slot_step_time[s_id + 1]));
+
+ if (a2dp->exist)
+ seq_printf(m, "(%d/%d/%d/%dM/%d/%d/%d)",
+ pcysta->wl_rx_err_ratio[s_id + 1],
+ pcysta->a2dp_trx[s_id + 1].empty_cnt,
+ pcysta->a2dp_trx[s_id + 1].retry_cnt,
+ (pcysta->a2dp_trx[s_id + 1].tx_rate ? 3 : 2),
+ pcysta->a2dp_trx[s_id + 1].tx_cnt,
+ pcysta->a2dp_trx[s_id + 1].ack_cnt,
+ pcysta->a2dp_trx[s_id + 1].nack_cnt);
+ else
+ seq_printf(m, "(%d)", pcysta->wl_rx_err_ratio[s_id + 1]);
+ }
+}
+
static void _show_fbtc_nullsta(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -8009,6 +9298,27 @@ static void _show_fbtc_nullsta(struct rtw89_dev *rtwdev, struct seq_file *m)
le32_to_cpu(ns->v1.max_t[i]) / 1000,
le32_to_cpu(ns->v1.max_t[i]) % 1000);
}
+ } else if (ver->fcxnullsta == 7) {
+ for (i = 0; i < 2; i++) {
+ seq_printf(m, " %-15s : ", "[NULL-STA]");
+ seq_printf(m, "null-%d", i);
+ seq_printf(m, "[Tx:%d/",
+ le32_to_cpu(ns->v7.result[i][4]));
+ seq_printf(m, "[ok:%d/",
+ le32_to_cpu(ns->v7.result[i][1]));
+ seq_printf(m, "fail:%d/",
+ le32_to_cpu(ns->v7.result[i][0]));
+ seq_printf(m, "on_time:%d/",
+ le32_to_cpu(ns->v7.result[i][2]));
+ seq_printf(m, "retry:%d/",
+ le32_to_cpu(ns->v7.result[i][3]));
+ seq_printf(m, "avg_t:%d.%03d/",
+ le32_to_cpu(ns->v7.tavg[i]) / 1000,
+ le32_to_cpu(ns->v7.tavg[i]) % 1000);
+ seq_printf(m, "max_t:%d.%03d]\n",
+ le32_to_cpu(ns->v7.tmax[i]) / 1000,
+ le32_to_cpu(ns->v7.tmax[i]) % 1000);
+ }
} else {
for (i = 0; i < 2; i++) {
seq_printf(m, " %-15s : ", "[NULL-STA]");
@@ -8185,6 +9495,8 @@ static void _show_fw_dm_msg(struct rtw89_dev *rtwdev, struct seq_file *m)
_show_fbtc_cysta_v4(rtwdev, m);
else if (ver->fcxcysta == 5)
_show_fbtc_cysta_v5(rtwdev, m);
+ else if (ver->fcxcysta == 7)
+ _show_fbtc_cysta_v7(rtwdev, m);
_show_fbtc_nullsta(rtwdev, m);
@@ -8237,6 +9549,47 @@ static void _get_gnt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_coex_gnt *gnt
}
}
+static void _show_gpio_dbg(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo;
+ const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
+ struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
+ union rtw89_btc_fbtc_gpio_dbg *gdbg = NULL;
+ u8 *gpio_map, i;
+ u32 en_map;
+
+ pcinfo = &pfwinfo->rpt_fbtc_gpio_dbg.cinfo;
+ gdbg = &rtwdev->btc.fwinfo.rpt_fbtc_gpio_dbg.finfo;
+ if (!pcinfo->valid) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): stop due rpt_fbtc_gpio_dbg.cinfo\n",
+ __func__);
+ seq_puts(m, "\n");
+ return;
+ }
+
+ if (ver->fcxgpiodbg == 7) {
+ en_map = le32_to_cpu(gdbg->v7.en_map);
+ gpio_map = gdbg->v7.gpio_map;
+ } else {
+ en_map = le32_to_cpu(gdbg->v1.en_map);
+ gpio_map = gdbg->v1.gpio_map;
+ }
+
+ if (!en_map)
+ return;
+
+ seq_printf(m, " %-15s : enable_map:0x%08x",
+ "[gpio_dbg]", en_map);
+
+ for (i = 0; i < BTC_DBG_MAX1; i++) {
+ if (!(en_map & BIT(i)))
+ continue;
+ seq_printf(m, ", %s->GPIO%d", id_to_gdbg(i), gpio_map[i]);
+ }
+ seq_puts(m, "\n");
+}
+
static void _show_mreg_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -8244,7 +9597,6 @@ static void _show_mreg_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
struct rtw89_btc_fbtc_mreg_val_v1 *pmreg = NULL;
- struct rtw89_btc_fbtc_gpio_dbg *gdbg = NULL;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
@@ -8314,29 +9666,6 @@ static void _show_mreg_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
if (i >= pmreg->reg_num)
seq_puts(m, "\n");
}
-
- pcinfo = &pfwinfo->rpt_fbtc_gpio_dbg.cinfo;
- if (!pcinfo->valid) {
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], %s(): stop due rpt_fbtc_gpio_dbg.cinfo\n",
- __func__);
- seq_puts(m, "\n");
- return;
- }
-
- gdbg = &pfwinfo->rpt_fbtc_gpio_dbg.finfo;
- if (!gdbg->en_map)
- return;
-
- seq_printf(m, " %-15s : enable_map:0x%08x",
- "[gpio_dbg]", gdbg->en_map);
-
- for (i = 0; i < BTC_DBG_MAX1; i++) {
- if (!(gdbg->en_map & BIT(i)))
- continue;
- seq_printf(m, ", %d->GPIO%d", (u32)i, gdbg->gpio_map[i]);
- }
- seq_puts(m, "\n");
}
static void _show_mreg_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
@@ -8346,7 +9675,6 @@ static void _show_mreg_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
struct rtw89_btc_fbtc_mreg_val_v2 *pmreg = NULL;
- struct rtw89_btc_fbtc_gpio_dbg *gdbg = NULL;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
@@ -8371,12 +9699,13 @@ static void _show_mreg_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
gnt = gnt_cfg.band[0];
seq_printf(m,
- " %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], ",
+ " %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], polut_type:%s",
"[gnt_status]",
chip->chip_id == RTL8852C ? "HW" :
btc->dm.pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT",
gnt.gnt_wl_sw_en ? "SW" : "HW", gnt.gnt_wl,
- gnt.gnt_bt_sw_en ? "SW" : "HW", gnt.gnt_bt);
+ gnt.gnt_bt_sw_en ? "SW" : "HW", gnt.gnt_bt,
+ id_to_polut(wl->bt_polut_type[wl->pta_req_mac]));
gnt = gnt_cfg.band[1];
seq_printf(m, "phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]\n",
@@ -8416,27 +9745,74 @@ static void _show_mreg_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
if (i >= pmreg->reg_num)
seq_puts(m, "\n");
}
+}
- pcinfo = &pfwinfo->rpt_fbtc_gpio_dbg.cinfo;
- if (!pcinfo->valid) {
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], %s(): stop due rpt_fbtc_gpio_dbg.cinfo\n",
- __func__);
- seq_puts(m, "\n");
+static void _show_mreg_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
+ struct rtw89_btc_fbtc_mreg_val_v7 *pmreg = NULL;
+ struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+ struct rtw89_btc_bt_info *bt = &cx->bt;
+ struct rtw89_mac_ax_gnt *gnt = NULL;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ u8 i, type, cnt = 0;
+ u32 val, offset;
+
+ if (!(dm->coex_info_map & BTC_COEX_INFO_MREG))
return;
+
+ seq_puts(m, "\n\r========== [HW Status] ==========");
+
+ seq_printf(m,
+ "\n\r %-15s : WL->BT:0x%08x(cnt:%d), BT->WL:0x%08x(total:%d, bt_update:%d)",
+ "[scoreboard]", wl->scbd, cx->cnt_wl[BTC_WCNT_SCBDUPDATE],
+ bt->scbd, cx->cnt_bt[BTC_BCNT_SCBDREAD],
+ cx->cnt_bt[BTC_BCNT_SCBDUPDATE]);
+
+ /* To avoid I/O if WL LPS or power-off */
+ dm->pta_owner = rtw89_mac_get_ctrl_path(rtwdev);
+
+ seq_printf(m,
+ "\n\r %-15s : pta_owner:%s, pta_req_mac:MAC%d, rf_gnt_source: polut_type:%s",
+ "[gnt_status]",
+ rtwdev->chip->para_ver & BTC_FEAT_PTA_ONOFF_CTRL ? "HW" :
+ dm->pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT",
+ wl->pta_req_mac, id_to_polut(wl->bt_polut_type[wl->pta_req_mac]));
+
+ gnt = &dm->gnt.band[RTW89_PHY_0];
+
+ seq_printf(m, ", phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d]",
+ gnt->gnt_wl_sw_en ? "SW" : "HW", gnt->gnt_wl,
+ gnt->gnt_bt_sw_en ? "SW" : "HW", gnt->gnt_bt);
+
+ if (rtwdev->dbcc_en) {
+ gnt = &dm->gnt.band[RTW89_PHY_1];
+ seq_printf(m, ", phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]",
+ gnt->gnt_wl_sw_en ? "SW" : "HW", gnt->gnt_wl,
+ gnt->gnt_bt_sw_en ? "SW" : "HW", gnt->gnt_bt);
}
- gdbg = &pfwinfo->rpt_fbtc_gpio_dbg.finfo;
- if (!gdbg->en_map)
+ pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo;
+ if (!pcinfo->valid)
return;
- seq_printf(m, " %-15s : enable_map:0x%08x",
- "[gpio_dbg]", gdbg->en_map);
+ pmreg = &pfwinfo->rpt_fbtc_mregval.finfo.v7;
- for (i = 0; i < BTC_DBG_MAX1; i++) {
- if (!(gdbg->en_map & BIT(i)))
- continue;
- seq_printf(m, ", %d->GPIO%d", (u32)i, gdbg->gpio_map[i]);
+ for (i = 0; i < pmreg->reg_num; i++) {
+ type = (u8)le16_to_cpu(rtwdev->chip->mon_reg[i].type);
+ offset = le32_to_cpu(rtwdev->chip->mon_reg[i].offset);
+ val = le32_to_cpu(pmreg->mreg_val[i]);
+
+ if (cnt % 6 == 0)
+ seq_printf(m, "\n\r %-15s : %s_0x%x=0x%x", "[reg]",
+ id_to_regtype(type), offset, val);
+ else
+ seq_printf(m, ", %s_0x%x=0x%x",
+ id_to_regtype(type), offset, val);
+ cnt++;
}
seq_puts(m, "\n");
}
@@ -8887,6 +10263,108 @@ static void _show_summary_v105(struct rtw89_dev *rtwdev, struct seq_file *m)
cnt[BTC_NCNT_CUSTOMERIZE]);
}
+static void _show_summary_v8(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo;
+ struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
+ struct rtw89_btc_fbtc_rpt_ctrl_v8 *prptctrl = NULL;
+ struct rtw89_btc_cx *cx = &rtwdev->btc.cx;
+ struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+ u32 *cnt = rtwdev->btc.dm.cnt_notify;
+ u32 cnt_sum = 0;
+ u8 i;
+
+ if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY))
+ return;
+
+ seq_printf(m, "%s", "\n\r========== [Statistics] ==========");
+
+ pcinfo = &pfwinfo->rpt_ctrl.cinfo;
+ if (pcinfo->valid && wl->status.map.lps != BTC_LPS_RF_OFF &&
+ !wl->status.map.rf_off) {
+ prptctrl = &pfwinfo->rpt_ctrl.finfo.v8;
+
+ seq_printf(m,
+ "\n\r %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d, len:%d, max:fw-%d/drv-%d), ",
+ "[summary]", pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
+ le16_to_cpu(prptctrl->rpt_info.cnt_h2c), pfwinfo->cnt_c2h,
+ le16_to_cpu(prptctrl->rpt_info.cnt_c2h),
+ le16_to_cpu(prptctrl->rpt_info.len_c2h),
+ (prptctrl->rpt_len_max_h << 8) + prptctrl->rpt_len_max_l,
+ rtwdev->btc.ver->info_buf);
+
+ seq_printf(m, "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x",
+ pfwinfo->event[BTF_EVNT_RPT],
+ le16_to_cpu(prptctrl->rpt_info.cnt),
+ le32_to_cpu(prptctrl->rpt_info.en));
+
+ if (dm->error.map.wl_fw_hang)
+ seq_puts(m, " (WL FW Hang!!)");
+
+ seq_printf(m, "\n\r %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
+ "[mailbox]", le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
+
+ seq_printf(m, "A2DP_empty:%d(stop:%d/tx:%d/ack:%d/nack:%d)",
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
+
+ seq_printf(m,
+ "\n\r %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d/time:%dms]",
+ "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
+ cx->cnt_wl[BTC_WCNT_RFK_GO],
+ cx->cnt_wl[BTC_WCNT_RFK_REJECT],
+ cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT],
+ wl->rfk_info.proc_time);
+
+ seq_printf(m, ", bt_rfk[req:%d]",
+ le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]));
+
+ seq_printf(m, ", AOAC[RF_on:%d/RF_off:%d]",
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on),
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off));
+ } else {
+ seq_printf(m,
+ "\n\r %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d (lps=%d/rf_off=%d)",
+ "[summary]",
+ pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
+ pfwinfo->cnt_c2h,
+ wl->status.map.lps, wl->status.map.rf_off);
+ }
+
+ for (i = 0; i < BTC_NCNT_NUM; i++)
+ cnt_sum += dm->cnt_notify[i];
+
+ seq_printf(m,
+ "\n\r %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
+ "[notify_cnt]",
+ cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
+ cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+
+ seq_printf(m,
+ "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d",
+ cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
+ cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
+ cnt[BTC_NCNT_WL_STA]);
+
+ seq_printf(m,
+ "\n\r %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, switch_chbw=%d, special_pkt=%d, ",
+ "[notify_cnt]",
+ cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH],
+ cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SWITCH_CHBW],
+ cnt[BTC_NCNT_SPECIAL_PACKET]);
+
+ seq_printf(m, "timer=%d, customerize=%d, hub_msg=%d, chg_fw=%d, send_cc=%d",
+ cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CUSTOMERIZE],
+ rtwdev->btc.hubmsg_cnt, cnt[BTC_NCNT_RESUME_DL_FW],
+ cnt[BTC_NCNT_COUNTRYCODE]);
+}
+
void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_fw_suit *fw_suit = &rtwdev->fw.normal;
@@ -8924,6 +10402,10 @@ void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
_show_mreg_v1(rtwdev, m);
else if (ver->fcxmreg == 2)
_show_mreg_v2(rtwdev, m);
+ else if (ver->fcxmreg == 7)
+ _show_mreg_v7(rtwdev, m);
+
+ _show_gpio_dbg(rtwdev, m);
if (ver->fcxbtcrpt == 1)
_show_summary_v1(rtwdev, m);
@@ -8933,6 +10415,8 @@ void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
_show_summary_v5(rtwdev, m);
else if (ver->fcxbtcrpt == 105)
_show_summary_v105(rtwdev, m);
+ else if (ver->fcxbtcrpt == 8)
+ _show_summary_v8(rtwdev, m);
}
void rtw89_coex_recognize_ver(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h
index 13303830684e..0e5f268616f7 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.h
+++ b/drivers/net/wireless/realtek/rtw89/coex.h
@@ -8,6 +8,8 @@
#include "core.h"
#define BTC_H2C_MAXLEN 2020
+#define BTC_TLV_SLOT_ID_LEN_V7 1
+#define BTC_SLOT_REQ_TH 2
enum btc_mode {
BTC_MODE_NORMAL,
@@ -201,6 +203,60 @@ enum btc_3cx_type {
BTC_3CX_MAX,
};
+enum btc_chip_feature {
+ BTC_FEAT_PTA_ONOFF_CTRL = BIT(0), /* on/off ctrl by HW (not 0x73[2]) */
+ BTC_FEAT_NONBTG_GWL_THRU = BIT(1), /* non-BTG GNT_WL!=0 if GNT_BT = 1 */
+ BTC_FEAT_WLAN_ACT_MUX = BIT(2), /* separate wlan_act/gnt mux */
+ BTC_FEAT_NEW_BBAPI_FLOW = BIT(3), /* new btg_ctrl/pre_agc_ctrl */
+ BTC_FEAT_MLO_SUPPORT = BIT(4),
+ BTC_FEAT_H2C_MACRO = BIT(5),
+};
+
+enum btc_wl_mode {
+ BTC_WL_MODE_11B = 0,
+ BTC_WL_MODE_11A = 1,
+ BTC_WL_MODE_11G = 2,
+ BTC_WL_MODE_HT = 3,
+ BTC_WL_MODE_VHT = 4,
+ BTC_WL_MODE_HE = 5,
+ BTC_WL_MODE_NUM,
+};
+
+enum btc_wl_gpio_debug {
+ BTC_DBG_GNT_BT = 0,
+ BTC_DBG_GNT_WL = 1,
+ BTC_DBG_BCN_EARLY = 2,
+ BTC_DBG_WL_NULL0 = 3,
+ BTC_DBG_WL_NULL1 = 4,
+ BTC_DBG_WL_RXISR = 5,
+ BTC_DBG_TDMA_ENTRY = 6,
+ BTC_DBG_A2DP_EMPTY = 7,
+ BTC_DBG_BT_RETRY = 8,
+ BTC_DBG_BT_RELINK = 9,
+ BTC_DBG_SLOT_WL = 10,
+ BTC_DBG_SLOT_BT = 11,
+ BTC_DBG_WL_ERR = 12,
+ BTC_DBG_WL_OK = 13,
+ BTC_DBG_SLOT_B2W = 14,
+ BTC_DBG_SLOT_W1 = 15,
+ BTC_DBG_SLOT_W2 = 16,
+ BTC_DBG_SLOT_W2B = 17,
+ BTC_DBG_SLOT_B1 = 18,
+ BTC_DBG_SLOT_B2 = 19,
+ BTC_DBG_SLOT_B3 = 20,
+ BTC_DBG_SLOT_B4 = 21,
+ BTC_DBG_SLOT_LK = 22,
+ BTC_DBG_SLOT_E2G = 23,
+ BTC_DBG_SLOT_E5G = 24,
+ BTC_DBG_SLOT_EBT = 25,
+ BTC_DBG_SLOT_WLK = 26,
+ BTC_DBG_SLOT_B1FDD = 27,
+ BTC_DBG_BT_CHANGE = 28,
+ BTC_DBG_WL_CCA = 29,
+ BTC_DBG_BT_LEAUDIO = 30,
+ BTC_DBG_USER_DEF = 31,
+};
+
void rtw89_btc_ntfy_poweron(struct rtw89_dev *rtwdev);
void rtw89_btc_ntfy_poweroff(struct rtw89_dev *rtwdev);
void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode);
@@ -261,4 +317,56 @@ static inline u16 rtw89_coex_query_bt_req_len(struct rtw89_dev *rtwdev,
return btc->bt_req_len;
}
+static inline u32 rtw89_get_antpath_type(u8 phy_map, u8 type)
+{
+ return ((phy_map << 8) + type);
+}
+
+static inline
+void _slot_set_le(struct rtw89_btc *btc, u8 sid, __le16 dura, __le32 tbl, __le16 type)
+{
+ if (btc->ver->fcxslots == 1) {
+ btc->dm.slot.v1[sid].dur = dura;
+ btc->dm.slot.v1[sid].cxtbl = tbl;
+ btc->dm.slot.v1[sid].cxtype = type;
+ } else if (btc->ver->fcxslots == 7) {
+ btc->dm.slot.v7[sid].dur = dura;
+ btc->dm.slot.v7[sid].cxtype = type;
+ btc->dm.slot.v7[sid].cxtbl = tbl;
+ }
+}
+
+static inline
+void _slot_set(struct rtw89_btc *btc, u8 sid, u16 dura, u32 tbl, u16 type)
+{
+ _slot_set_le(btc, sid, cpu_to_le16(dura), cpu_to_le32(tbl), cpu_to_le16(type));
+}
+
+static inline
+void _slot_set_dur(struct rtw89_btc *btc, u8 sid, u16 dura)
+{
+ if (btc->ver->fcxslots == 1)
+ btc->dm.slot.v1[sid].dur = cpu_to_le16(dura);
+ else if (btc->ver->fcxslots == 7)
+ btc->dm.slot.v7[sid].dur = cpu_to_le16(dura);
+}
+
+static inline
+void _slot_set_type(struct rtw89_btc *btc, u8 sid, u16 type)
+{
+ if (btc->ver->fcxslots == 1)
+ btc->dm.slot.v1[sid].cxtype = cpu_to_le16(type);
+ else if (btc->ver->fcxslots == 7)
+ btc->dm.slot.v7[sid].cxtype = cpu_to_le16(type);
+}
+
+static inline
+void _slot_set_tbl(struct rtw89_btc *btc, u8 sid, u32 tbl)
+{
+ if (btc->ver->fcxslots == 1)
+ btc->dm.slot.v1[sid].cxtbl = cpu_to_le32(tbl);
+ else if (btc->ver->fcxslots == 7)
+ btc->dm.slot.v7[sid].cxtbl = cpu_to_le32(tbl);
+}
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index d474b8d5df3d..ddc390d24ec1 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -18,6 +18,7 @@
#include "ser.h"
#include "txrx.h"
#include "util.h"
+#include "wow.h"
static bool rtw89_disable_ps_mode;
module_param_named(disable_ps_mode, rtw89_disable_ps_mode, bool, 0644);
@@ -82,6 +83,9 @@ static struct ieee80211_channel rtw89_channels_5ghz[] = {
RTW89_DEF_CHAN_5G(5885, 177),
};
+static_assert(RTW89_5GHZ_UNII4_START_INDEX + RTW89_5GHZ_UNII4_CHANNEL_NUM ==
+ ARRAY_SIZE(rtw89_channels_5ghz));
+
static struct ieee80211_channel rtw89_channels_6ghz[] = {
RTW89_DEF_CHAN_6G(5955, 1),
RTW89_DEF_CHAN_6G(5975, 5),
@@ -252,6 +256,9 @@ static void rtw89_traffic_stats_accu(struct rtw89_dev *rtwdev,
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ if (tx && ieee80211_is_assoc_req(hdr->frame_control))
+ rtw89_wow_parse_akm(rtwdev, skb);
+
if (!ieee80211_is_data(hdr->frame_control))
return;
@@ -4069,6 +4076,24 @@ void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg eve
}
}
+void rtw89_check_quirks(struct rtw89_dev *rtwdev, const struct dmi_system_id *quirks)
+{
+ const struct dmi_system_id *match;
+ enum rtw89_quirks quirk;
+
+ if (!quirks)
+ return;
+
+ for (match = dmi_first_match(quirks); match; match = dmi_first_match(match + 1)) {
+ quirk = (uintptr_t)match->driver_data;
+ if (quirk >= NUM_OF_RTW89_QUIRKS)
+ continue;
+
+ set_bit(quirk, rtwdev->quirks);
+ }
+}
+EXPORT_SYMBOL(rtw89_check_quirks);
+
int rtw89_core_start(struct rtw89_dev *rtwdev)
{
int ret;
@@ -4486,7 +4511,15 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
WIPHY_FLAG_TDLS_EXTERNAL_SETUP |
- WIPHY_FLAG_AP_UAPSD | WIPHY_FLAG_SPLIT_SCAN_6GHZ;
+ WIPHY_FLAG_AP_UAPSD |
+ WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK;
+
+ if (!chip->support_rnr)
+ hw->wiphy->flags |= WIPHY_FLAG_SPLIT_SCAN_6GHZ;
+
+ if (chip->chip_gen == RTW89_CHIP_BE)
+ hw->wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT;
+
hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
hw->wiphy->max_scan_ssids = RTW89_SCANOFLD_MAX_SSID;
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 2e854c9af709..112bdd95fc6e 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -7,6 +7,7 @@
#include <linux/average.h>
#include <linux/bitfield.h>
+#include <linux/dmi.h>
#include <linux/firmware.h>
#include <linux/iopoll.h>
#include <linux/workqueue.h>
@@ -799,6 +800,7 @@ struct rtw89_rx_phy_ppdu {
enum rtw89_mac_idx {
RTW89_MAC_0 = 0,
RTW89_MAC_1 = 1,
+ RTW89_MAC_NUM,
};
enum rtw89_phy_idx {
@@ -1178,9 +1180,13 @@ enum rtw89_btc_ncnt {
BTC_NCNT_CUSTOMERIZE,
BTC_NCNT_WL_RFK,
BTC_NCNT_WL_STA,
+ BTC_NCNT_WL_STA_LAST,
BTC_NCNT_FWINFO,
BTC_NCNT_TIMER,
- BTC_NCNT_NUM
+ BTC_NCNT_SWITCH_CHBW,
+ BTC_NCNT_RESUME_DL_FW,
+ BTC_NCNT_COUNTRYCODE,
+ BTC_NCNT_NUM,
};
enum rtw89_btc_btinfo {
@@ -1209,6 +1215,7 @@ enum rtw89_btc_dcnt {
BTC_DCNT_TDMA_NONSYNC,
BTC_DCNT_SLOT_NONSYNC,
BTC_DCNT_BTCNT_HANG,
+ BTC_DCNT_BTTX_HANG,
BTC_DCNT_WL_SLOT_DRIFT,
BTC_DCNT_WL_STA_LAST,
BTC_DCNT_BT_SLOT_DRIFT,
@@ -1216,7 +1223,10 @@ enum rtw89_btc_dcnt {
BTC_DCNT_FDDT_TRIG,
BTC_DCNT_E2G,
BTC_DCNT_E2G_HANG,
- BTC_DCNT_NUM
+ BTC_DCNT_WL_FW_VER_MATCH,
+ BTC_DCNT_NULL_TX_FAIL,
+ BTC_DCNT_WL_STA_NTFY,
+ BTC_DCNT_NUM,
};
enum rtw89_btc_wl_state_cnt {
@@ -1230,6 +1240,13 @@ enum rtw89_btc_wl_state_cnt {
BTC_WCNT_RFK_REJECT,
BTC_WCNT_RFK_TIMEOUT,
BTC_WCNT_CH_UPDATE,
+ BTC_WCNT_DBCC_ALL_2G,
+ BTC_WCNT_DBCC_CHG,
+ BTC_WCNT_RX_OK_LAST,
+ BTC_WCNT_RX_OK_LAST2S,
+ BTC_WCNT_RX_ERR_LAST,
+ BTC_WCNT_RX_ERR_LAST2S,
+ BTC_WCNT_RX_LAST,
BTC_WCNT_NUM
};
@@ -1253,8 +1270,10 @@ enum rtw89_btc_bt_state_cnt {
BTC_BCNT_LOPRI_TX,
BTC_BCNT_LOPRI_RX,
BTC_BCNT_POLUT,
+ BTC_BCNT_POLUT_NOW,
+ BTC_BCNT_POLUT_DIFF,
BTC_BCNT_RATECHG,
- BTC_BCNT_NUM
+ BTC_BCNT_NUM,
};
enum rtw89_btc_bt_profile {
@@ -1299,6 +1318,7 @@ struct rtw89_btc_wl_smap {
u32 scan: 1;
u32 connecting: 1;
u32 roaming: 1;
+ u32 transacting: 1;
u32 _4way: 1;
u32 rf_off: 1;
u32 lps: 2;
@@ -1307,6 +1327,8 @@ struct rtw89_btc_wl_smap {
u32 traffic_dir : 2;
u32 rf_off_pre: 1;
u32 lps_pre: 2;
+ u32 lps_exiting: 1;
+ u32 emlsr: 1;
};
enum rtw89_tfc_lv {
@@ -1349,6 +1371,14 @@ struct rtw89_traffic_stats {
u16 rx_rate;
};
+struct rtw89_btc_chdef {
+ u8 center_ch;
+ u8 band;
+ u8 chan;
+ enum rtw89_sc_offset offset;
+ enum rtw89_bandwidth bw;
+};
+
struct rtw89_btc_statistic {
u8 rssi; /* 0%~110% (dBm = rssi -110) */
struct rtw89_traffic_stats traffic;
@@ -1357,6 +1387,7 @@ struct rtw89_btc_statistic {
#define BTC_WL_RSSI_THMAX 4
struct rtw89_btc_wl_link_info {
+ struct rtw89_btc_chdef chdef;
struct rtw89_btc_statistic stat;
enum rtw89_tfc_dir dir;
u8 rssi_state[BTC_WL_RSSI_THMAX];
@@ -1370,6 +1401,7 @@ struct rtw89_btc_wl_link_info {
u8 phy;
u8 dtim_period;
u8 mode;
+ u8 tx_1ss_limit;
u8 mac_id;
u8 tx_retry;
@@ -1379,6 +1411,7 @@ struct rtw89_btc_wl_link_info {
u32 tx_time;
u32 client_cnt;
u32 rx_rate_drop_cnt;
+ u32 noa_duration;
u32 active: 1;
u32 noa: 1;
@@ -1412,6 +1445,11 @@ struct rtw89_btc_bt_a2dp_desc {
u8 type: 3;
u8 active: 1;
u8 sink: 1;
+ u32 handle_update: 1;
+ u32 devinfo_query: 1;
+ u32 no_empty_streak_2s: 8;
+ u32 no_empty_streak_max: 8;
+ u32 rsvd: 6;
u8 bitpool;
u16 vendor_id;
@@ -1589,6 +1627,42 @@ struct rtw89_btc_wl_role_info_v2 { /* struct size must be n*4 bytes */
u32 rsvd: 27;
};
+struct rtw89_btc_wl_rlink { /* H2C info, struct size must be n*4 bytes */
+ u8 connected;
+ u8 pid;
+ u8 phy;
+ u8 noa;
+
+ u8 rf_band; /* enum band_type RF band: 2.4G/5G/6G */
+ u8 active; /* 0:rlink is under doze */
+ u8 bw; /* enum channel_width */
+ u8 role; /*enum role_type */
+
+ u8 ch;
+ u8 noa_dur; /* ms */
+ u8 client_cnt; /* for Role = P2P-Go/AP */
+ u8 mode; /* wifi protocol */
+} __packed;
+
+#define RTW89_BE_BTC_WL_MAX_ROLE_NUMBER 6
+struct rtw89_btc_wl_role_info_v8 { /* H2C info, struct size must be n*4 bytes */
+ u8 connect_cnt;
+ u8 link_mode;
+ u8 link_mode_chg;
+ u8 p2p_2g;
+
+ u8 pta_req_band;
+ u8 dbcc_en; /* 1+1 and 2.4G-included */
+ u8 dbcc_chg;
+ u8 dbcc_2g_phy; /* which phy operate in 2G, HW_PHY_0 or HW_PHY_1 */
+
+ struct rtw89_btc_wl_rlink rlink[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER][RTW89_MAC_NUM];
+
+ u32 role_map;
+ u32 mrole_type; /* btc_wl_mrole_type */
+ u32 mrole_noa_duration; /* ms */
+} __packed;
+
struct rtw89_btc_wl_ver_info {
u32 fw_coex; /* match with which coex_ver */
u32 fw;
@@ -1611,6 +1685,9 @@ struct rtw89_btc_wl_rfk_info {
u32 band: 2;
u32 type: 8;
u32 rsvd: 14;
+
+ u32 start_time;
+ u32 proc_time;
};
struct rtw89_btc_bt_smap {
@@ -1724,12 +1801,14 @@ struct rtw89_btc_wl_nhm {
struct rtw89_btc_wl_info {
struct rtw89_btc_wl_link_info link_info[RTW89_PORT_NUM];
+ struct rtw89_btc_wl_link_info rlink_info[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER][RTW89_MAC_NUM];
struct rtw89_btc_wl_rfk_info rfk_info;
struct rtw89_btc_wl_ver_info ver_info;
struct rtw89_btc_wl_afh_info afh_info;
struct rtw89_btc_wl_role_info role_info;
struct rtw89_btc_wl_role_info_v1 role_info_v1;
struct rtw89_btc_wl_role_info_v2 role_info_v2;
+ struct rtw89_btc_wl_role_info_v8 role_info_v8;
struct rtw89_btc_wl_scan_info scan_info;
struct rtw89_btc_wl_dbcc_info dbcc_info;
struct rtw89_btc_rf_para rf_para;
@@ -1740,9 +1819,14 @@ struct rtw89_btc_wl_info {
u8 rssi_level;
u8 cn_report;
u8 coex_mode;
+ u8 pta_req_mac;
+ u8 bt_polut_type[RTW89_PHY_MAX]; /* BT polluted WL-Tx type for phy0/1 */
+ bool is_5g_hi_channel;
+ bool pta_reg_mac_chg;
bool bg_mode;
bool scbd_change;
+ bool fw_ver_mismatch;
u32 scbd;
};
@@ -1870,9 +1954,18 @@ struct rtw89_btc_fbtc_btscan_v2 {
struct rtw89_btc_bt_scan_info_v2 para[CXSCAN_MAX];
} __packed;
+struct rtw89_btc_fbtc_btscan_v7 {
+ u8 fver; /* btc_ver::fcxbtscan */
+ u8 type;
+ u8 rsvd0;
+ u8 rsvd1;
+ struct rtw89_btc_bt_scan_info_v2 para[CXSCAN_MAX];
+} __packed;
+
union rtw89_btc_fbtc_btscan {
struct rtw89_btc_fbtc_btscan_v1 v1;
struct rtw89_btc_fbtc_btscan_v2 v2;
+ struct rtw89_btc_fbtc_btscan_v7 v7;
};
struct rtw89_btc_bt_info {
@@ -2014,6 +2107,20 @@ struct rtw89_btc_fbtc_rpt_ctrl_info_v5 {
__le16 cnt_aoac_rf_off; /* rf-off counter for aoac switch notify */
} __packed;
+struct rtw89_btc_fbtc_rpt_ctrl_info_v8 {
+ __le16 cnt; /* fw report counter */
+ __le16 cnt_c2h; /* fw send c2h counter */
+ __le16 cnt_h2c; /* fw recv h2c counter */
+ __le16 len_c2h; /* The total length of the last C2H */
+
+ __le16 cnt_aoac_rf_on; /* rf-on counter for aoac switch notify */
+ __le16 cnt_aoac_rf_off; /* rf-off counter for aoac switch notify */
+
+ __le32 cx_ver; /* match which driver's coex version */
+ __le32 fw_ver;
+ __le32 en; /* report map */
+} __packed;
+
struct rtw89_btc_fbtc_rpt_ctrl_wl_fw_info {
__le32 cx_ver; /* match which driver's coex version */
__le32 cx_offload;
@@ -2070,11 +2177,25 @@ struct rtw89_btc_fbtc_rpt_ctrl_v105 {
struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox bt_mbx_info;
} __packed;
+struct rtw89_btc_fbtc_rpt_ctrl_v8 {
+ u8 fver;
+ u8 rsvd0;
+ u8 rpt_len_max_l; /* BTC_RPT_MAX bit0~7 */
+ u8 rpt_len_max_h; /* BTC_RPT_MAX bit8~15 */
+
+ u8 gnt_val[RTW89_PHY_MAX][4];
+ __le16 bt_cnt[BTC_BCNT_STA_MAX_V105];
+
+ struct rtw89_btc_fbtc_rpt_ctrl_info_v8 rpt_info;
+ struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox bt_mbx_info;
+} __packed;
+
union rtw89_btc_fbtc_rpt_ctrl_ver_info {
struct rtw89_btc_fbtc_rpt_ctrl_v1 v1;
struct rtw89_btc_fbtc_rpt_ctrl_v4 v4;
struct rtw89_btc_fbtc_rpt_ctrl_v5 v5;
struct rtw89_btc_fbtc_rpt_ctrl_v105 v105;
+ struct rtw89_btc_fbtc_rpt_ctrl_v8 v8;
};
enum rtw89_fbtc_ext_ctrl_type {
@@ -2181,15 +2302,32 @@ enum rtw89_btc_afh_map_type { /*AFH MAP TYPE */
};
#define BTC_DBG_MAX1 32
-struct rtw89_btc_fbtc_gpio_dbg {
+struct rtw89_btc_fbtc_gpio_dbg_v1 {
u8 fver; /* btc_ver::fcxgpiodbg */
u8 rsvd;
- u16 rsvd2;
- u32 en_map; /* which debug signal (see btc_wl_gpio_debug) is enable */
- u32 pre_state; /* the debug signal is 1 or 0 */
+ __le16 rsvd2;
+ __le32 en_map; /* which debug signal (see btc_wl_gpio_debug) is enable */
+ __le32 pre_state; /* the debug signal is 1 or 0 */
u8 gpio_map[BTC_DBG_MAX1]; /*the debug signals to GPIO-Position */
} __packed;
+struct rtw89_btc_fbtc_gpio_dbg_v7 {
+ u8 fver;
+ u8 rsvd0;
+ u8 rsvd1;
+ u8 rsvd2;
+
+ u8 gpio_map[BTC_DBG_MAX1];
+
+ __le32 en_map;
+ __le32 pre_state;
+} __packed;
+
+union rtw89_btc_fbtc_gpio_dbg {
+ struct rtw89_btc_fbtc_gpio_dbg_v1 v1;
+ struct rtw89_btc_fbtc_gpio_dbg_v7 v7;
+};
+
struct rtw89_btc_fbtc_mreg_val_v1 {
u8 fver; /* btc_ver::fcxmreg */
u8 reg_num;
@@ -2204,9 +2342,18 @@ struct rtw89_btc_fbtc_mreg_val_v2 {
__le32 mreg_val[CXMREG_MAX_V2];
} __packed;
+struct rtw89_btc_fbtc_mreg_val_v7 {
+ u8 fver;
+ u8 reg_num;
+ u8 rsvd0;
+ u8 rsvd1;
+ __le32 mreg_val[CXMREG_MAX_V2];
+} __packed;
+
union rtw89_btc_fbtc_mreg_val {
struct rtw89_btc_fbtc_mreg_val_v1 v1;
struct rtw89_btc_fbtc_mreg_val_v2 v2;
+ struct rtw89_btc_fbtc_mreg_val_v7 v7;
};
#define RTW89_DEF_FBTC_MREG(__type, __bytes, __offset) \
@@ -2233,6 +2380,40 @@ struct rtw89_btc_fbtc_slots {
struct rtw89_btc_fbtc_slot slot[CXST_MAX];
} __packed;
+struct rtw89_btc_fbtc_slot_v7 {
+ __le16 dur; /* slot duration */
+ __le16 cxtype;
+ __le32 cxtbl;
+} __packed;
+
+struct rtw89_btc_fbtc_slot_u16 {
+ __le16 dur; /* slot duration */
+ __le16 cxtype;
+ __le16 cxtbl_l16; /* coex table [15:0] */
+ __le16 cxtbl_h16; /* coex table [31:16] */
+} __packed;
+
+struct rtw89_btc_fbtc_1slot_v7 {
+ u8 fver;
+ u8 sid; /* slot id */
+ __le16 rsvd;
+ struct rtw89_btc_fbtc_slot_v7 slot;
+} __packed;
+
+struct rtw89_btc_fbtc_slots_v7 {
+ u8 fver;
+ u8 slot_cnt;
+ u8 rsvd0;
+ u8 rsvd1;
+ struct rtw89_btc_fbtc_slot_u16 slot[CXST_MAX];
+ __le32 update_map;
+} __packed;
+
+union rtw89_btc_fbtc_slots_info {
+ struct rtw89_btc_fbtc_slots v1;
+ struct rtw89_btc_fbtc_slots_v7 v7;
+} __packed;
+
struct rtw89_btc_fbtc_step {
u8 type;
u8 val;
@@ -2339,6 +2520,12 @@ struct rtw89_btc_fbtc_cycle_leak_info {
__le16 tmax; /* max leak-slot time */
} __packed;
+struct rtw89_btc_fbtc_cycle_leak_info_v7 {
+ __le16 tavg;
+ __le16 tamx;
+ __le32 cnt_rximr;
+} __packed;
+
#define RTW89_BTC_FDDT_PHASE_CYCLE GENMASK(9, 0)
#define RTW89_BTC_FDDT_TRAIN_STEP GENMASK(15, 10)
@@ -2451,11 +2638,36 @@ struct rtw89_btc_fbtc_cysta_v5 { /* statistics for cycles */
__le32 except_map;
} __packed;
+struct rtw89_btc_fbtc_cysta_v7 { /* statistics for cycles */
+ u8 fver;
+ u8 rsvd;
+ u8 collision_cnt; /* counter for event/timer occur at the same time */
+ u8 except_cnt;
+
+ u8 wl_rx_err_ratio[BTC_CYCLE_SLOT_MAX];
+
+ struct rtw89_btc_fbtc_a2dp_trx_stat_v4 a2dp_trx[BTC_CYCLE_SLOT_MAX];
+
+ __le16 skip_cnt;
+ __le16 cycles; /* total cycle number */
+
+ __le16 slot_step_time[BTC_CYCLE_SLOT_MAX]; /* record the wl/bt slot time */
+ __le16 slot_cnt[CXST_MAX]; /* slot count */
+ __le16 bcn_cnt[CXBCN_MAX];
+
+ struct rtw89_btc_fbtc_cycle_time_info_v5 cycle_time;
+ struct rtw89_btc_fbtc_cycle_a2dp_empty_info a2dp_ept;
+ struct rtw89_btc_fbtc_cycle_leak_info_v7 leak_slot;
+
+ __le32 except_map;
+} __packed;
+
union rtw89_btc_fbtc_cysta_info {
struct rtw89_btc_fbtc_cysta_v2 v2;
struct rtw89_btc_fbtc_cysta_v3 v3;
struct rtw89_btc_fbtc_cysta_v4 v4;
struct rtw89_btc_fbtc_cysta_v5 v5;
+ struct rtw89_btc_fbtc_cysta_v7 v7;
};
struct rtw89_btc_fbtc_cynullsta_v1 { /* cycle null statistics */
@@ -2476,12 +2688,24 @@ struct rtw89_btc_fbtc_cynullsta_v2 { /* cycle null statistics */
__le32 result[2][5]; /* 0:fail, 1:ok, 2:on_time, 3:retry, 4:tx */
} __packed;
+struct rtw89_btc_fbtc_cynullsta_v7 { /* cycle null statistics */
+ u8 fver;
+ u8 rsvd0;
+ u8 rsvd1;
+ u8 rsvd2;
+
+ __le32 tmax[2];
+ __le32 tavg[2];
+ __le32 result[2][5];
+} __packed;
+
union rtw89_btc_fbtc_cynullsta_info {
struct rtw89_btc_fbtc_cynullsta_v1 v1; /* info from fw */
struct rtw89_btc_fbtc_cynullsta_v2 v2;
+ struct rtw89_btc_fbtc_cynullsta_v7 v7;
};
-struct rtw89_btc_fbtc_btver {
+struct rtw89_btc_fbtc_btver_v1 {
u8 fver; /* btc_ver::fcxbtver */
u8 rsvd;
__le16 rsvd2;
@@ -2490,6 +2714,22 @@ struct rtw89_btc_fbtc_btver {
__le32 feature;
} __packed;
+struct rtw89_btc_fbtc_btver_v7 {
+ u8 fver;
+ u8 rsvd0;
+ u8 rsvd1;
+ u8 rsvd2;
+
+ __le32 coex_ver; /*bit[15:8]->shared, bit[7:0]->non-shared */
+ __le32 fw_ver;
+ __le32 feature;
+} __packed;
+
+union rtw89_btc_fbtc_btver {
+ struct rtw89_btc_fbtc_btver_v1 v1;
+ struct rtw89_btc_fbtc_btver_v7 v7;
+} __packed;
+
struct rtw89_btc_fbtc_btafh {
u8 fver; /* btc_ver::fcxbtafh */
u8 rsvd;
@@ -2511,6 +2751,18 @@ struct rtw89_btc_fbtc_btafh_v2 {
u8 afh_le_b[4];
} __packed;
+struct rtw89_btc_fbtc_btafh_v7 {
+ u8 fver;
+ u8 map_type;
+ u8 rsvd0;
+ u8 rsvd1;
+ u8 afh_l[4]; /*bit0:2402, bit1:2403.... bit31:2433 */
+ u8 afh_m[4]; /*bit0:2434, bit1:2435.... bit31:2465 */
+ u8 afh_h[4]; /*bit0:2466, bit1:2467.....bit14:2480 */
+ u8 afh_le_a[4];
+ u8 afh_le_b[4];
+} __packed;
+
struct rtw89_btc_fbtc_btdevinfo {
u8 fver; /* btc_ver::fcxbtdevinfo */
u8 rsvd;
@@ -2551,9 +2803,14 @@ struct rtw89_btc_trx_info {
u32 rx_err_ratio;
};
+union rtw89_btc_fbtc_slot_u {
+ struct rtw89_btc_fbtc_slot v1[CXST_MAX];
+ struct rtw89_btc_fbtc_slot_v7 v7[CXST_MAX];
+};
+
struct rtw89_btc_dm {
- struct rtw89_btc_fbtc_slot slot[CXST_MAX];
- struct rtw89_btc_fbtc_slot slot_now[CXST_MAX];
+ union rtw89_btc_fbtc_slot_u slot;
+ union rtw89_btc_fbtc_slot_u slot_now;
struct rtw89_btc_fbtc_tdma tdma;
struct rtw89_btc_fbtc_tdma tdma_now;
struct rtw89_mac_ax_coex_gnt gnt;
@@ -2569,6 +2826,8 @@ struct rtw89_btc_dm {
u32 update_slot_map;
u32 set_ant_path;
+ u32 e2g_slot_limit;
+ u32 e2g_slot_nulltx_time;
u32 wl_only: 1;
u32 wl_fw_cx_offload: 1;
@@ -2589,6 +2848,7 @@ struct rtw89_btc_dm {
u32 wl_btg_rx_rb: 2;
u16 slot_dur[CXST_MAX];
+ u16 bt_slot_flood;
u8 run_reason;
u8 run_action;
@@ -2596,6 +2856,8 @@ struct rtw89_btc_dm {
u8 wl_pre_agc: 2;
u8 wl_lna2: 1;
u8 wl_pre_agc_rb: 2;
+ u8 bt_select: 2; /* 0:s0, 1:s1, 2:s0 & s1, refer to enum btc_bt_index */
+ u8 slot_req_more: 1;
};
struct rtw89_btc_ctrl {
@@ -2643,6 +2905,7 @@ enum btf_fw_event_report {
BTC_RPT_TYPE_CYSTA,
BTC_RPT_TYPE_STEP,
BTC_RPT_TYPE_NULLSTA,
+ BTC_RPT_TYPE_FDDT, /* added by ver->fwevntrptl == 1 */
BTC_RPT_TYPE_MREG,
BTC_RPT_TYPE_GPIO_DBG,
BTC_RPT_TYPE_BT_VER,
@@ -2650,7 +2913,10 @@ enum btf_fw_event_report {
BTC_RPT_TYPE_BT_AFH,
BTC_RPT_TYPE_BT_DEVICE,
BTC_RPT_TYPE_TEST,
- BTC_RPT_TYPE_MAX = 31
+ BTC_RPT_TYPE_MAX = 31,
+
+ __BTC_RPT_TYPE_V0_SAME = BTC_RPT_TYPE_NULLSTA,
+ __BTC_RPT_TYPE_V0_MAX = 12,
};
enum rtw_btc_btf_reg_type {
@@ -2691,7 +2957,7 @@ struct rtw89_btc_rpt_fbtc_tdma {
struct rtw89_btc_rpt_fbtc_slots {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- struct rtw89_btc_fbtc_slots finfo; /* info from fw */
+ union rtw89_btc_fbtc_slots_info finfo; /* info from fw */
};
struct rtw89_btc_rpt_fbtc_cysta {
@@ -2716,12 +2982,12 @@ struct rtw89_btc_rpt_fbtc_mreg {
struct rtw89_btc_rpt_fbtc_gpio_dbg {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- struct rtw89_btc_fbtc_gpio_dbg finfo; /* info from fw */
+ union rtw89_btc_fbtc_gpio_dbg finfo; /* info from fw */
};
struct rtw89_btc_rpt_fbtc_btver {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- struct rtw89_btc_fbtc_btver finfo; /* info from fw */
+ union rtw89_btc_fbtc_btver finfo; /* info from fw */
};
struct rtw89_btc_rpt_fbtc_btscan {
@@ -2792,6 +3058,7 @@ struct rtw89_btc_ver {
u8 fcxctrl;
u8 fcxinit;
+ u8 fwevntrptl;
u8 drvinfo_type;
u16 info_buf;
u8 max_role_num;
@@ -2821,6 +3088,7 @@ struct rtw89_btc {
u8 btg_pos;
u16 policy_len;
u16 policy_type;
+ u32 hubmsg_cnt;
bool bt_req_en;
bool update_policy_force;
bool lps;
@@ -3122,6 +3390,7 @@ struct rtw89_vif {
u8 port;
u8 mac_addr[ETH_ALEN];
u8 bssid[ETH_ALEN];
+ __be32 ip_addr;
u8 phy_idx;
u8 mac_idx;
u8 net_type;
@@ -3879,6 +4148,7 @@ struct rtw89_chip_info {
u8 support_bands;
u16 support_bandwidths;
bool support_unii4;
+ bool support_rnr;
bool ul_tb_waveform_ctrl;
bool ul_tb_pwr_diff;
bool hw_sec_hdr;
@@ -3977,6 +4247,7 @@ union rtw89_bus_info {
struct rtw89_driver_info {
const struct rtw89_chip_info *chip;
+ const struct dmi_system_id *quirks;
union rtw89_bus_info bus;
};
@@ -4324,6 +4595,12 @@ enum rtw89_flags {
NUM_OF_RTW89_FLAGS,
};
+enum rtw89_quirks {
+ RTW89_QUIRK_PCI_BER,
+
+ NUM_OF_RTW89_QUIRKS,
+};
+
enum rtw89_pkt_drop_sel {
RTW89_PKT_DROP_SEL_MACID_BE_ONCE,
RTW89_PKT_DROP_SEL_MACID_BK_ONCE,
@@ -4641,11 +4918,15 @@ struct rtw89_regd {
};
#define RTW89_REGD_MAX_COUNTRY_NUM U8_MAX
+#define RTW89_5GHZ_UNII4_CHANNEL_NUM 3
+#define RTW89_5GHZ_UNII4_START_INDEX 25
struct rtw89_regulatory_info {
const struct rtw89_regd *regd;
enum rtw89_reg_6ghz_power reg_6ghz_power;
+ DECLARE_BITMAP(block_unii4, RTW89_REGD_MAX_COUNTRY_NUM);
DECLARE_BITMAP(block_6ghz, RTW89_REGD_MAX_COUNTRY_NUM);
+ DECLARE_BITMAP(block_6ghz_sp, RTW89_REGD_MAX_COUNTRY_NUM);
};
enum rtw89_ifs_clm_application {
@@ -4925,11 +5206,61 @@ struct rtw89_wow_cam_info {
bool valid;
};
+struct rtw89_wow_key_info {
+ u8 ptk_tx_iv[8];
+ u8 valid_check;
+ u8 symbol_check_en;
+ u8 gtk_keyidx;
+ u8 rsvd[5];
+ u8 ptk_rx_iv[8];
+ u8 gtk_rx_iv[4][8];
+} __packed;
+
+struct rtw89_wow_gtk_info {
+ u8 kck[32];
+ u8 kek[32];
+ u8 tk1[16];
+ u8 txmickey[8];
+ u8 rxmickey[8];
+ __le32 igtk_keyid;
+ __le64 ipn;
+ u8 igtk[2][32];
+ u8 psk[32];
+} __packed;
+
+struct rtw89_wow_aoac_report {
+ u8 rpt_ver;
+ u8 sec_type;
+ u8 key_idx;
+ u8 pattern_idx;
+ u8 rekey_ok;
+ u8 ptk_tx_iv[8];
+ u8 eapol_key_replay_count[8];
+ u8 gtk[32];
+ u8 ptk_rx_iv[8];
+ u8 gtk_rx_iv[4][8];
+ u64 igtk_key_id;
+ u64 igtk_ipn;
+ u8 igtk[32];
+ u8 csa_pri_ch;
+ u8 csa_bw;
+ u8 csa_ch_offset;
+ u8 csa_chsw_failed;
+ u8 csa_ch_band;
+};
+
struct rtw89_wow_param {
struct ieee80211_vif *wow_vif;
DECLARE_BITMAP(flags, RTW89_WOW_FLAG_NUM);
struct rtw89_wow_cam_info patterns[RTW89_MAX_PATTERN_NUM];
+ struct rtw89_wow_key_info key_info;
+ struct rtw89_wow_gtk_info gtk_info;
+ struct rtw89_wow_aoac_report aoac_rpt;
u8 pattern_cnt;
+ u8 ptk_alg;
+ u8 gtk_alg;
+ u8 ptk_keyidx;
+ u8 akm;
};
struct rtw89_mcc_limit {
@@ -5084,6 +5415,7 @@ struct rtw89_dev {
DECLARE_BITMAP(mac_id_map, RTW89_MAX_MAC_ID_NUM);
DECLARE_BITMAP(flags, NUM_OF_RTW89_FLAGS);
DECLARE_BITMAP(pkt_offload, RTW89_MAX_PKT_OFLD_NUM);
+ DECLARE_BITMAP(quirks, NUM_OF_RTW89_QUIRKS);
struct rtw89_phy_stat phystat;
struct rtw89_rfk_wait_info rfk_wait;
@@ -6129,6 +6461,7 @@ int rtw89_core_sta_remove(struct rtw89_dev *rtwdev,
void rtw89_core_set_tid_config(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta,
struct cfg80211_tid_config *tid_config);
+void rtw89_check_quirks(struct rtw89_dev *rtwdev, const struct dmi_system_id *quirks);
int rtw89_core_init(struct rtw89_dev *rtwdev);
void rtw89_core_deinit(struct rtw89_dev *rtwdev);
int rtw89_core_register(struct rtw89_dev *rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 185cd339c085..044a5b90c7f4 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2019-2020 Realtek Corporation
*/
+#include <linux/if_arp.h>
#include "cam.h"
#include "chan.h"
#include "coex.h"
@@ -13,6 +14,30 @@
#include "reg.h"
#include "util.h"
+struct rtw89_eapol_2_of_2 {
+ struct ieee80211_hdr_3addr hdr;
+ u8 gtkbody[14];
+ u8 key_des_ver;
+ u8 rsvd[92];
+} __packed __aligned(2);
+
+struct rtw89_sa_query {
+ struct ieee80211_hdr_3addr hdr;
+ u8 category;
+ u8 action;
+} __packed __aligned(2);
+
+struct rtw89_arp_rsp {
+ struct ieee80211_hdr_3addr addr;
+ u8 llc_hdr[sizeof(rfc1042_header)];
+ __be16 llc_type;
+ struct arphdr arp_hdr;
+ u8 sender_hw[ETH_ALEN];
+ __be32 sender_ip;
+ u8 target_hw[ETH_ALEN];
+ __be32 target_ip;
+} __packed __aligned(2);
+
static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C};
union rtw89_fw_element_arg {
@@ -637,6 +662,7 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER),
};
static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
@@ -1349,13 +1375,12 @@ dump:
static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev)
{
u32 val32;
- u16 val16;
val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL);
rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32);
- val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2);
- rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16);
+ val32 = rtw89_read32(rtwdev, R_AX_BOOT_DBG);
+ rtw89_err(rtwdev, "[ERR]fwdl 0x83F0 = 0x%x\n", val32);
rtw89_fw_prog_cnt_dump(rtwdev);
}
@@ -1394,8 +1419,9 @@ static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev,
return 0;
}
-int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
- bool include_bb)
+static
+int __rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
+ bool include_bb)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
struct rtw89_fw_info *fw_info = &rtwdev->fw;
@@ -1433,7 +1459,7 @@ int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE);
if (ret) {
rtw89_warn(rtwdev, "download firmware fail\n");
- return ret;
+ goto fwdl_err;
}
return ret;
@@ -1443,6 +1469,21 @@ fwdl_err:
return ret;
}
+int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
+ bool include_bb)
+{
+ int retry;
+ int ret;
+
+ for (retry = 0; retry < 5; retry++) {
+ ret = __rtw89_fw_download(rtwdev, type, include_bb);
+ if (!ret)
+ return 0;
+ }
+
+ return ret;
+}
+
int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev)
{
struct rtw89_fw_info *fw = &rtwdev->fw;
@@ -1710,28 +1751,30 @@ fail:
return ret;
}
-#define H2C_DCTL_SEC_CAM_LEN 68
int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta)
{
+ struct rtw89_h2c_dctlinfo_ud_v1 *h2c;
+ u32 len = sizeof(*h2c);
struct sk_buff *skb;
int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
return -ENOMEM;
}
- skb_put(skb, H2C_DCTL_SEC_CAM_LEN);
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_dctlinfo_ud_v1 *)skb->data;
- rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data);
+ rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, h2c);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC,
H2C_CL_MAC_FR_EXCHG,
H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0,
- H2C_DCTL_SEC_CAM_LEN);
+ len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
@@ -2129,6 +2172,111 @@ fail:
return ret;
}
+static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ static const u8 gtkbody[] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x88,
+ 0x8E, 0x01, 0x03, 0x00, 0x5F, 0x02, 0x03};
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_eapol_2_of_2 *eapol_pkt;
+ struct sk_buff *skb;
+ u8 key_des_ver;
+
+ if (rtw_wow->ptk_alg == 3)
+ key_des_ver = 1;
+ else if (rtw_wow->akm == 1 || rtw_wow->akm == 2)
+ key_des_ver = 2;
+ else if (rtw_wow->akm > 2 && rtw_wow->akm < 7)
+ key_des_ver = 3;
+ else
+ key_des_ver = 0;
+
+ skb = dev_alloc_skb(sizeof(*eapol_pkt));
+ if (!skb)
+ return NULL;
+
+ eapol_pkt = skb_put_zero(skb, sizeof(*eapol_pkt));
+ eapol_pkt->hdr.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_FCTL_TODS |
+ IEEE80211_FCTL_PROTECTED);
+ ether_addr_copy(eapol_pkt->hdr.addr1, bss_conf->bssid);
+ ether_addr_copy(eapol_pkt->hdr.addr2, vif->addr);
+ ether_addr_copy(eapol_pkt->hdr.addr3, bss_conf->bssid);
+ memcpy(eapol_pkt->gtkbody, gtkbody, sizeof(gtkbody));
+ eapol_pkt->key_des_ver = key_des_ver;
+
+ return skb;
+}
+
+static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct rtw89_sa_query *sa_query;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(sizeof(*sa_query));
+ if (!skb)
+ return NULL;
+
+ sa_query = skb_put_zero(skb, sizeof(*sa_query));
+ sa_query->hdr.frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION |
+ IEEE80211_FCTL_PROTECTED);
+ ether_addr_copy(sa_query->hdr.addr1, bss_conf->bssid);
+ ether_addr_copy(sa_query->hdr.addr2, vif->addr);
+ ether_addr_copy(sa_query->hdr.addr3, bss_conf->bssid);
+ sa_query->category = WLAN_CATEGORY_SA_QUERY;
+ sa_query->action = WLAN_ACTION_SA_QUERY_RESPONSE;
+
+ return skb;
+}
+
+static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_arp_rsp *arp_skb;
+ struct arphdr *arp_hdr;
+ struct sk_buff *skb;
+ __le16 fc;
+
+ skb = dev_alloc_skb(sizeof(struct rtw89_arp_rsp));
+ if (!skb)
+ return NULL;
+
+ arp_skb = skb_put_zero(skb, sizeof(*arp_skb));
+
+ if (rtw_wow->ptk_alg)
+ fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS |
+ IEEE80211_FCTL_PROTECTED);
+ else
+ fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS);
+
+ arp_skb->addr.frame_control = fc;
+ ether_addr_copy(arp_skb->addr.addr1, rtwvif->bssid);
+ ether_addr_copy(arp_skb->addr.addr2, rtwvif->mac_addr);
+ ether_addr_copy(arp_skb->addr.addr3, rtwvif->bssid);
+
+ memcpy(arp_skb->llc_hdr, rfc1042_header, sizeof(rfc1042_header));
+ arp_skb->llc_type = htons(ETH_P_ARP);
+
+ arp_hdr = &arp_skb->arp_hdr;
+ arp_hdr->ar_hrd = htons(ARPHRD_ETHER);
+ arp_hdr->ar_pro = htons(ETH_P_IP);
+ arp_hdr->ar_hln = ETH_ALEN;
+ arp_hdr->ar_pln = 4;
+ arp_hdr->ar_op = htons(ARPOP_REPLY);
+
+ ether_addr_copy(arp_skb->sender_hw, rtwvif->mac_addr);
+ arp_skb->sender_ip = rtwvif->ip_addr;
+
+ return skb;
+}
+
static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
enum rtw89_fw_pkt_ofld_type type,
@@ -2156,6 +2304,15 @@ static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
case RTW89_PKT_OFLD_TYPE_QOS_NULL:
skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true);
break;
+ case RTW89_PKT_OFLD_TYPE_EAPOL_KEY:
+ skb = rtw89_eapol_get(rtwdev, rtwvif);
+ break;
+ case RTW89_PKT_OFLD_TYPE_SA_QUERY:
+ skb = rtw89_sa_query_get(rtwdev, rtwvif);
+ break;
+ case RTW89_PKT_OFLD_TYPE_ARP_RSP:
+ skb = rtw89_arp_response_get(rtwdev, rtwvif);
+ break;
default:
goto err;
}
@@ -4120,6 +4277,48 @@ fail:
return ret;
}
+int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_role_info_v8 *role = &btc->cx.wl.role_info_v8;
+ struct rtw89_h2c_cxrole_v8 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data;
+
+ h2c->hdr.type = type;
+ h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
+ memcpy(&h2c->_u8, role, sizeof(h2c->_u8));
+ h2c->_u32.role_map = cpu_to_le32(role->role_map);
+ h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type);
+ h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, BTFC_SET,
+ SET_DRV_INFO, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
#define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type)
{
@@ -4631,6 +4830,10 @@ static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev,
u8 i, idx;
sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ];
+ if (!sband) {
+ option->prohib_chan = U64_MAX;
+ return;
+ }
for (i = 0; i < sband->n_channels; i++) {
chan = &sband->channels[i];
@@ -4650,6 +4853,7 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
struct rtw89_h2c_scanofld_be_macc_role *macc_role;
struct rtw89_chan *op = &scan_info->op_chan;
struct rtw89_h2c_scanofld_be_opch *opch;
+ struct rtw89_pktofld_info *pkt_info;
struct rtw89_h2c_scanofld_be *h2c;
struct sk_buff *skb;
u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role;
@@ -4674,6 +4878,16 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
h2c = (struct rtw89_h2c_scanofld_be *)skb->data;
ptr = skb->data;
+ memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id));
+
+ list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) {
+ if (pkt_info->wildcard_6ghz) {
+ /* Provide wildcard as template */
+ probe_id[NL80211_BAND_6GHZ] = pkt_info->id;
+ break;
+ }
+ }
+
h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) |
le32_encode_bits(option->scan_mode,
RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) |
@@ -5511,6 +5725,7 @@ static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev,
info->ssid_len = req->ssids[ssid_idx].ssid_len;
return false;
} else {
+ info->wildcard_6ghz = true;
return true;
}
}
@@ -5545,12 +5760,8 @@ static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
goto out;
}
- if (rtw89_is_6ghz_wildcard_probe_req(rtwdev, rtwvif, info, band,
- ssid_idx)) {
- kfree_skb(new);
- kfree(info);
- goto out;
- }
+ rtw89_is_6ghz_wildcard_probe_req(rtwdev, rtwvif, info, band,
+ ssid_idx);
ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new);
if (ret) {
@@ -5708,6 +5919,10 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
continue;
else if (info->channel_6ghz && probe_count != 0)
ch_info->period += RTW89_CHANNEL_TIME_6G;
+
+ if (info->wildcard_6ghz)
+ continue;
+
ch_info->pkt_id[probe_count++] = info->id;
if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
break;
@@ -5762,6 +5977,10 @@ static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
if (info->channel_6ghz &&
ch_info->pri_ch != info->channel_6ghz)
continue;
+
+ if (info->wildcard_6ghz)
+ continue;
+
ch_info->pkt_id[probe_count++] = info->id;
if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
break;
@@ -6228,6 +6447,57 @@ fail:
return ret;
}
+int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool enable)
+{
+ struct rtw89_h2c_arp_offload *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ u8 pkt_id = 0;
+ int ret;
+
+ if (enable) {
+ ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
+ RTW89_PKT_OFLD_TYPE_ARP_RSP,
+ &pkt_id);
+ if (ret)
+ return ret;
+ }
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for arp offload\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_arp_offload *)skb->data;
+
+ h2c->w0 = le32_encode_bits(enable, RTW89_H2C_ARP_OFFLOAD_W0_ENABLE) |
+ le32_encode_bits(0, RTW89_H2C_ARP_OFFLOAD_W0_ACTION) |
+ le32_encode_bits(rtwvif->mac_id, RTW89_H2C_ARP_OFFLOAD_W0_MACID) |
+ le32_encode_bits(pkt_id, RTW89_H2C_ARP_OFFLOAD_W0_PKT_ID);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_WOW,
+ H2C_FUNC_ARP_OFLD, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
#define H2C_DISCONNECT_DETECT_LEN 8
int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool enable)
@@ -6273,30 +6543,38 @@ fail:
return ret;
}
-#define H2C_WOW_GLOBAL_LEN 8
int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool enable)
{
- struct sk_buff *skb;
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_h2c_wow_global *h2c;
u8 macid = rtwvif->mac_id;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_GLOBAL_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
- rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
+ rtw89_err(rtwdev, "failed to alloc skb for wow global\n");
return -ENOMEM;
}
- skb_put(skb, H2C_WOW_GLOBAL_LEN);
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_wow_global *)skb->data;
- RTW89_SET_WOW_GLOBAL_ENABLE(skb->data, enable);
- RTW89_SET_WOW_GLOBAL_MAC_ID(skb->data, macid);
+ h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GLOBAL_W0_ENABLE) |
+ le32_encode_bits(macid, RTW89_H2C_WOW_GLOBAL_W0_MAC_ID) |
+ le32_encode_bits(rtw_wow->ptk_alg,
+ RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO) |
+ le32_encode_bits(rtw_wow->gtk_alg,
+ RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO);
+ h2c->key_info = rtw_wow->key_info;
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC,
H2C_CL_MAC_WOW,
H2C_FUNC_WOW_GLOBAL, 0, 1,
- H2C_WOW_GLOBAL_LEN);
+ len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
@@ -6324,7 +6602,7 @@ int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN);
if (!skb) {
- rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
+ rtw89_err(rtwdev, "failed to alloc skb for wakeup ctrl\n");
return -ENOMEM;
}
@@ -6411,6 +6689,112 @@ fail:
return ret;
}
+int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ bool enable)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info;
+ struct rtw89_h2c_wow_gtk_ofld *h2c;
+ u8 macid = rtwvif->mac_id;
+ u32 len = sizeof(*h2c);
+ u8 pkt_id_sa_query = 0;
+ struct sk_buff *skb;
+ u8 pkt_id_eapol = 0;
+ int ret;
+
+ if (!rtw_wow->gtk_alg)
+ return 0;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for gtk ofld\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_wow_gtk_ofld *)skb->data;
+
+ if (!enable) {
+ skb_put_zero(skb, sizeof(*gtk_info));
+ goto hdr;
+ }
+
+ ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
+ RTW89_PKT_OFLD_TYPE_EAPOL_KEY,
+ &pkt_id_eapol);
+ if (ret)
+ goto fail;
+
+ if (gtk_info->igtk_keyid) {
+ ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
+ RTW89_PKT_OFLD_TYPE_SA_QUERY,
+ &pkt_id_sa_query);
+ if (ret)
+ goto fail;
+ }
+
+ /* not support TKIP yet */
+ h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GTK_OFLD_W0_EN) |
+ le32_encode_bits(0, RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) |
+ le32_encode_bits(gtk_info->igtk_keyid ? 1 : 0,
+ RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN) |
+ le32_encode_bits(macid, RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID) |
+ le32_encode_bits(pkt_id_eapol, RTW89_H2C_WOW_GTK_OFLD_W0_GTK_RSP_ID);
+ h2c->w1 = le32_encode_bits(gtk_info->igtk_keyid ? pkt_id_sa_query : 0,
+ RTW89_H2C_WOW_GTK_OFLD_W1_PMF_SA_QUERY_ID) |
+ le32_encode_bits(rtw_wow->akm, RTW89_H2C_WOW_GTK_OFLD_W1_ALGO_AKM_SUIT);
+ h2c->gtk_info = rtw_wow->gtk_info;
+
+hdr:
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_WOW,
+ H2C_FUNC_GTK_OFLD, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+ return 0;
+
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_h2c_wow_aoac *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ unsigned int cond;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for aoac\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+
+ /* This H2C only nofity firmware to generate AOAC report C2H,
+ * no need any parameter.
+ */
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_WOW,
+ H2C_FUNC_AOAC_REPORT_REQ, 1, 0,
+ len);
+
+ cond = RTW89_WOW_WAIT_COND(H2C_FUNC_AOAC_REPORT_REQ);
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+}
+
/* Return < 0, if failures happen during waiting for the condition.
* Return 0, when waiting for the condition succeeds.
* Return > 0, if the wait is considered unreachable due to driver/FW design,
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 44311f65b4fa..4151c9d566bd 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -48,6 +48,32 @@ struct rtw89_c2hreg_phycap {
#define RTW89_C2HREG_PHYCAP_W3_ANT_TX_NUM GENMASK(15, 8)
#define RTW89_C2HREG_PHYCAP_W3_ANT_RX_NUM GENMASK(23, 16)
+#define RTW89_C2HREG_AOAC_RPT_1_W0_KEY_IDX GENMASK(23, 16)
+#define RTW89_C2HREG_AOAC_RPT_1_W1_IV_0 GENMASK(7, 0)
+#define RTW89_C2HREG_AOAC_RPT_1_W1_IV_1 GENMASK(15, 8)
+#define RTW89_C2HREG_AOAC_RPT_1_W1_IV_2 GENMASK(23, 16)
+#define RTW89_C2HREG_AOAC_RPT_1_W1_IV_3 GENMASK(31, 24)
+#define RTW89_C2HREG_AOAC_RPT_1_W2_IV_4 GENMASK(7, 0)
+#define RTW89_C2HREG_AOAC_RPT_1_W2_IV_5 GENMASK(15, 8)
+#define RTW89_C2HREG_AOAC_RPT_1_W2_IV_6 GENMASK(23, 16)
+#define RTW89_C2HREG_AOAC_RPT_1_W2_IV_7 GENMASK(31, 24)
+#define RTW89_C2HREG_AOAC_RPT_1_W3_PTK_IV_0 GENMASK(7, 0)
+#define RTW89_C2HREG_AOAC_RPT_1_W3_PTK_IV_1 GENMASK(15, 8)
+#define RTW89_C2HREG_AOAC_RPT_1_W3_PTK_IV_2 GENMASK(23, 16)
+#define RTW89_C2HREG_AOAC_RPT_1_W3_PTK_IV_3 GENMASK(31, 24)
+#define RTW89_C2HREG_AOAC_RPT_2_W0_PTK_IV_4 GENMASK(23, 16)
+#define RTW89_C2HREG_AOAC_RPT_2_W0_PTK_IV_5 GENMASK(31, 24)
+#define RTW89_C2HREG_AOAC_RPT_2_W1_PTK_IV_6 GENMASK(7, 0)
+#define RTW89_C2HREG_AOAC_RPT_2_W1_PTK_IV_7 GENMASK(15, 8)
+#define RTW89_C2HREG_AOAC_RPT_2_W1_IGTK_IPN_IV_0 GENMASK(23, 16)
+#define RTW89_C2HREG_AOAC_RPT_2_W1_IGTK_IPN_IV_1 GENMASK(31, 24)
+#define RTW89_C2HREG_AOAC_RPT_2_W2_IGTK_IPN_IV_2 GENMASK(7, 0)
+#define RTW89_C2HREG_AOAC_RPT_2_W2_IGTK_IPN_IV_3 GENMASK(15, 8)
+#define RTW89_C2HREG_AOAC_RPT_2_W2_IGTK_IPN_IV_4 GENMASK(23, 16)
+#define RTW89_C2HREG_AOAC_RPT_2_W2_IGTK_IPN_IV_5 GENMASK(31, 24)
+#define RTW89_C2HREG_AOAC_RPT_2_W3_IGTK_IPN_IV_6 GENMASK(7, 0)
+#define RTW89_C2HREG_AOAC_RPT_2_W3_IGTK_IPN_IV_7 GENMASK(15, 8)
+
struct rtw89_h2creg_hdr {
u32 w0;
};
@@ -98,8 +124,11 @@ enum rtw89_mac_h2c_type {
RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE,
RTW89_FWCMD_H2CREG_FUNC_GETPKT_INFORM,
RTW89_FWCMD_H2CREG_FUNC_SCH_TX_EN,
- RTW89_FWCMD_H2CREG_FUNC_WOW_TRX_STOP = 0x6,
- RTW89_FWCMD_H2CREG_FUNC_WOW_CPUIO_RX_CTRL = 0xA,
+ RTW89_FWCMD_H2CREG_FUNC_WOW_TRX_STOP,
+ RTW89_FWCMD_H2CREG_FUNC_AOAC_RPT_1,
+ RTW89_FWCMD_H2CREG_FUNC_AOAC_RPT_2,
+ RTW89_FWCMD_H2CREG_FUNC_AOAC_RPT_3_REQ,
+ RTW89_FWCMD_H2CREG_FUNC_WOW_CPUIO_RX_CTRL,
};
enum rtw89_mac_c2h_type {
@@ -340,8 +369,9 @@ struct rtw89_mac_chinfo_be {
struct rtw89_pktofld_info {
struct list_head list;
u8 id;
+ bool wildcard_6ghz;
- /* Below fields are for 6 GHz RNR use only */
+ /* Below fields are for WiFi 6 chips 6 GHz RNR use only */
u8 ssid[IEEE80211_MAX_SSID_LEN];
u8 ssid_len;
u8 bssid[ETH_ALEN];
@@ -1432,308 +1462,6 @@ struct rtw89_h2c_cctlinfo_ud_g7 {
#define CCTLINFO_G7_W15_MGNT_CURR_RATE GENMASK(27, 16)
#define CCTLINFO_G7_W15_ALL GENMASK(27, 0)
-static inline void SET_DCTL_MACID_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0));
-}
-
-static inline void SET_DCTL_OPERATION_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 0, val, BIT(7));
-}
-
-#define SET_DCTL_MASK_QOS_FIELD_V1 GENMASK(7, 0)
-static inline void SET_DCTL_QOS_FIELD_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(7, 0));
- le32p_replace_bits((__le32 *)(table) + 9, SET_DCTL_MASK_QOS_FIELD_V1,
- GENMASK(7, 0));
-}
-
-#define SET_DCTL_MASK_SET_DCTL_HW_EXSEQ_MACID GENMASK(6, 0)
-static inline void SET_DCTL_HW_EXSEQ_MACID_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(14, 8));
- le32p_replace_bits((__le32 *)(table) + 9, SET_DCTL_MASK_SET_DCTL_HW_EXSEQ_MACID,
- GENMASK(14, 8));
-}
-
-#define SET_DCTL_MASK_QOS_DATA BIT(0)
-static inline void SET_DCTL_QOS_DATA_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 1, val, BIT(15));
- le32p_replace_bits((__le32 *)(table) + 9, SET_DCTL_MASK_QOS_DATA,
- BIT(15));
-}
-
-#define SET_DCTL_MASK_AES_IV_L GENMASK(15, 0)
-static inline void SET_DCTL_AES_IV_L_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(31, 16));
- le32p_replace_bits((__le32 *)(table) + 9, SET_DCTL_MASK_AES_IV_L,
- GENMASK(31, 16));
-}
-
-#define SET_DCTL_MASK_AES_IV_H GENMASK(31, 0)
-static inline void SET_DCTL_AES_IV_H_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(31, 0));
- le32p_replace_bits((__le32 *)(table) + 10, SET_DCTL_MASK_AES_IV_H,
- GENMASK(31, 0));
-}
-
-#define SET_DCTL_MASK_SEQ0 GENMASK(11, 0)
-static inline void SET_DCTL_SEQ0_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(11, 0));
- le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_SEQ0,
- GENMASK(11, 0));
-}
-
-#define SET_DCTL_MASK_SEQ1 GENMASK(11, 0)
-static inline void SET_DCTL_SEQ1_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(23, 12));
- le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_SEQ1,
- GENMASK(23, 12));
-}
-
-#define SET_DCTL_MASK_AMSDU_MAX_LEN GENMASK(2, 0)
-static inline void SET_DCTL_AMSDU_MAX_LEN_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(26, 24));
- le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_AMSDU_MAX_LEN,
- GENMASK(26, 24));
-}
-
-#define SET_DCTL_MASK_STA_AMSDU_EN BIT(0)
-static inline void SET_DCTL_STA_AMSDU_EN_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 3, val, BIT(27));
- le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_STA_AMSDU_EN,
- BIT(27));
-}
-
-#define SET_DCTL_MASK_CHKSUM_OFLD_EN BIT(0)
-static inline void SET_DCTL_CHKSUM_OFLD_EN_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 3, val, BIT(28));
- le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_CHKSUM_OFLD_EN,
- BIT(28));
-}
-
-#define SET_DCTL_MASK_WITH_LLC BIT(0)
-static inline void SET_DCTL_WITH_LLC_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 3, val, BIT(29));
- le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_WITH_LLC,
- BIT(29));
-}
-
-#define SET_DCTL_MASK_SEQ2 GENMASK(11, 0)
-static inline void SET_DCTL_SEQ2_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(11, 0));
- le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_SEQ2,
- GENMASK(11, 0));
-}
-
-#define SET_DCTL_MASK_SEQ3 GENMASK(11, 0)
-static inline void SET_DCTL_SEQ3_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(23, 12));
- le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_SEQ3,
- GENMASK(23, 12));
-}
-
-#define SET_DCTL_MASK_TGT_IND GENMASK(3, 0)
-static inline void SET_DCTL_TGT_IND_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(27, 24));
- le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_TGT_IND,
- GENMASK(27, 24));
-}
-
-#define SET_DCTL_MASK_TGT_IND_EN BIT(0)
-static inline void SET_DCTL_TGT_IND_EN_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 4, val, BIT(28));
- le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_TGT_IND_EN,
- BIT(28));
-}
-
-#define SET_DCTL_MASK_HTC_LB GENMASK(2, 0)
-static inline void SET_DCTL_HTC_LB_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(31, 29));
- le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_HTC_LB,
- GENMASK(31, 29));
-}
-
-#define SET_DCTL_MASK_MHDR_LEN GENMASK(4, 0)
-static inline void SET_DCTL_MHDR_LEN_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(4, 0));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_MHDR_LEN,
- GENMASK(4, 0));
-}
-
-#define SET_DCTL_MASK_VLAN_TAG_VALID BIT(0)
-static inline void SET_DCTL_VLAN_TAG_VALID_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, BIT(5));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_VLAN_TAG_VALID,
- BIT(5));
-}
-
-#define SET_DCTL_MASK_VLAN_TAG_SEL GENMASK(1, 0)
-static inline void SET_DCTL_VLAN_TAG_SEL_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(7, 6));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_VLAN_TAG_SEL,
- GENMASK(7, 6));
-}
-
-#define SET_DCTL_MASK_HTC_ORDER BIT(0)
-static inline void SET_DCTL_HTC_ORDER_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, BIT(8));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_HTC_ORDER,
- BIT(8));
-}
-
-#define SET_DCTL_MASK_SEC_KEY_ID GENMASK(1, 0)
-static inline void SET_DCTL_SEC_KEY_ID_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(10, 9));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_KEY_ID,
- GENMASK(10, 9));
-}
-
-#define SET_DCTL_MASK_WAPI BIT(0)
-static inline void SET_DCTL_WAPI_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, BIT(15));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_WAPI,
- BIT(15));
-}
-
-#define SET_DCTL_MASK_SEC_ENT_MODE GENMASK(1, 0)
-static inline void SET_DCTL_SEC_ENT_MODE_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(17, 16));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENT_MODE,
- GENMASK(17, 16));
-}
-
-#define SET_DCTL_MASK_SEC_ENTX_KEYID GENMASK(1, 0)
-static inline void SET_DCTL_SEC_ENT0_KEYID_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(19, 18));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
- GENMASK(19, 18));
-}
-
-static inline void SET_DCTL_SEC_ENT1_KEYID_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(21, 20));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
- GENMASK(21, 20));
-}
-
-static inline void SET_DCTL_SEC_ENT2_KEYID_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(23, 22));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
- GENMASK(23, 22));
-}
-
-static inline void SET_DCTL_SEC_ENT3_KEYID_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(25, 24));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
- GENMASK(25, 24));
-}
-
-static inline void SET_DCTL_SEC_ENT4_KEYID_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(27, 26));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
- GENMASK(27, 26));
-}
-
-static inline void SET_DCTL_SEC_ENT5_KEYID_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(29, 28));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
- GENMASK(29, 28));
-}
-
-static inline void SET_DCTL_SEC_ENT6_KEYID_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(31, 30));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
- GENMASK(31, 30));
-}
-
-#define SET_DCTL_MASK_SEC_ENT_VALID GENMASK(7, 0)
-static inline void SET_DCTL_SEC_ENT_VALID_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(7, 0));
- le32p_replace_bits((__le32 *)(table) + 14, SET_DCTL_MASK_SEC_ENT_VALID,
- GENMASK(7, 0));
-}
-
-#define SET_DCTL_MASK_SEC_ENTX GENMASK(7, 0)
-static inline void SET_DCTL_SEC_ENT0_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(15, 8));
- le32p_replace_bits((__le32 *)(table) + 14, SET_DCTL_MASK_SEC_ENTX,
- GENMASK(15, 8));
-}
-
-static inline void SET_DCTL_SEC_ENT1_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(23, 16));
- le32p_replace_bits((__le32 *)(table) + 14, SET_DCTL_MASK_SEC_ENTX,
- GENMASK(23, 16));
-}
-
-static inline void SET_DCTL_SEC_ENT2_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(31, 24));
- le32p_replace_bits((__le32 *)(table) + 14, SET_DCTL_MASK_SEC_ENTX,
- GENMASK(31, 24));
-}
-
-static inline void SET_DCTL_SEC_ENT3_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(7, 0));
- le32p_replace_bits((__le32 *)(table) + 15, SET_DCTL_MASK_SEC_ENTX,
- GENMASK(7, 0));
-}
-
-static inline void SET_DCTL_SEC_ENT4_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(15, 8));
- le32p_replace_bits((__le32 *)(table) + 15, SET_DCTL_MASK_SEC_ENTX,
- GENMASK(15, 8));
-}
-
-static inline void SET_DCTL_SEC_ENT5_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(23, 16));
- le32p_replace_bits((__le32 *)(table) + 15, SET_DCTL_MASK_SEC_ENTX,
- GENMASK(23, 16));
-}
-
-static inline void SET_DCTL_SEC_ENT6_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(31, 24));
- le32p_replace_bits((__le32 *)(table) + 15, SET_DCTL_MASK_SEC_ENTX,
- GENMASK(31, 24));
-}
-
struct rtw89_h2c_bcn_upd {
__le32 w0;
__le32 w1;
@@ -2157,45 +1885,18 @@ static inline void RTW89_SET_DISCONNECT_DETECT_TRYOK_BCNFAIL_COUNT_LIMIT(void *h
le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(7, 0));
}
-static inline void RTW89_SET_WOW_GLOBAL_ENABLE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(0));
-}
-
-static inline void RTW89_SET_WOW_GLOBAL_DROP_ALL_PKT(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(1));
-}
-
-static inline void RTW89_SET_WOW_GLOBAL_RX_PARSE_AFTER_WAKE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(2));
-}
-
-static inline void RTW89_SET_WOW_GLOBAL_WAKE_BAR_PULLED(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(3));
-}
-
-static inline void RTW89_SET_WOW_GLOBAL_MAC_ID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8));
-}
-
-static inline void RTW89_SET_WOW_GLOBAL_PAIRWISE_SEC_ALGO(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 16));
-}
-
-static inline void RTW89_SET_WOW_GLOBAL_GROUP_SEC_ALGO(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 24));
-}
+struct rtw89_h2c_wow_global {
+ __le32 w0;
+ struct rtw89_wow_key_info key_info;
+} __packed;
-static inline void RTW89_SET_WOW_GLOBAL_REMOTECTRL_INFO_CONTENT(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(31, 0));
-}
+#define RTW89_H2C_WOW_GLOBAL_W0_ENABLE BIT(0)
+#define RTW89_H2C_WOW_GLOBAL_W0_DROP_ALL_PKT BIT(1)
+#define RTW89_H2C_WOW_GLOBAL_W0_RX_PARSE_AFTER_WAKE BIT(2)
+#define RTW89_H2C_WOW_GLOBAL_W0_WAKE_BAR_PULLED BIT(3)
+#define RTW89_H2C_WOW_GLOBAL_W0_MAC_ID GENMASK(15, 8)
+#define RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO GENMASK(23, 16)
+#define RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO GENMASK(31, 24)
static inline void RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(void *h2c, u32 val)
{
@@ -2307,6 +2008,34 @@ static inline void RTW89_SET_WOW_CAM_UPD_VALID(void *h2c, u32 val)
le32p_replace_bits((__le32 *)h2c + 5, val, BIT(31));
}
+struct rtw89_h2c_wow_gtk_ofld {
+ __le32 w0;
+ __le32 w1;
+ struct rtw89_wow_gtk_info gtk_info;
+} __packed;
+
+#define RTW89_H2C_WOW_GTK_OFLD_W0_EN BIT(0)
+#define RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN BIT(1)
+#define RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN BIT(2)
+#define RTW89_H2C_WOW_GTK_OFLD_W0_PAIRWISE_WAKEUP BIT(3)
+#define RTW89_H2C_WOW_GTK_OFLD_W0_NOREKEY_WAKEUP BIT(4)
+#define RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID GENMASK(23, 16)
+#define RTW89_H2C_WOW_GTK_OFLD_W0_GTK_RSP_ID GENMASK(31, 24)
+#define RTW89_H2C_WOW_GTK_OFLD_W1_PMF_SA_QUERY_ID GENMASK(7, 0)
+#define RTW89_H2C_WOW_GTK_OFLD_W1_PMF_BIP_SEC_ALGO GENMASK(9, 8)
+#define RTW89_H2C_WOW_GTK_OFLD_W1_ALGO_AKM_SUIT GENMASK(17, 10)
+
+struct rtw89_h2c_arp_offload {
+ __le32 w0;
+ __le32 w1;
+} __packed;
+
+#define RTW89_H2C_ARP_OFFLOAD_W0_ENABLE BIT(0)
+#define RTW89_H2C_ARP_OFFLOAD_W0_ACTION BIT(1)
+#define RTW89_H2C_ARP_OFFLOAD_W0_MACID GENMASK(23, 16)
+#define RTW89_H2C_ARP_OFFLOAD_W0_PKT_ID GENMASK(31, 24)
+#define RTW89_H2C_ARP_OFFLOAD_W1_CONTENT GENMASK(31, 0)
+
enum rtw89_btc_btf_h2c_class {
BTFC_SET = 0x10,
BTFC_GET = 0x11,
@@ -2395,6 +2124,32 @@ struct rtw89_h2c_cxctrl_v7 {
#define H2C_LEN_CXDRVHDR sizeof(struct rtw89_h2c_cxhdr)
#define H2C_LEN_CXDRVHDR_V7 sizeof(struct rtw89_h2c_cxhdr_v7)
+struct rtw89_btc_wl_role_info_v8_u8 {
+ u8 connect_cnt;
+ u8 link_mode;
+ u8 link_mode_chg;
+ u8 p2p_2g;
+
+ u8 pta_req_band;
+ u8 dbcc_en;
+ u8 dbcc_chg;
+ u8 dbcc_2g_phy;
+
+ struct rtw89_btc_wl_rlink rlink[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER][RTW89_MAC_NUM];
+} __packed;
+
+struct rtw89_btc_wl_role_info_v8_u32 {
+ __le32 role_map;
+ __le32 mrole_type;
+ __le32 mrole_noa_duration;
+} __packed;
+
+struct rtw89_h2c_cxrole_v8 {
+ struct rtw89_h2c_cxhdr hdr;
+ struct rtw89_btc_wl_role_info_v8_u8 _u8;
+ struct rtw89_btc_wl_role_info_v8_u32 _u32;
+} __packed;
+
struct rtw89_h2c_cxinit {
struct rtw89_h2c_cxhdr hdr;
u8 ant_type;
@@ -2955,6 +2710,7 @@ struct rtw89_h2c_scanofld_be {
__le32 w5;
__le32 w6;
__le32 w7;
+ __le32 w8;
struct rtw89_h2c_scanofld_be_macc_role role[];
} __packed;
@@ -3636,6 +3392,10 @@ struct rtw89_h2c_mrc_upd_duration {
#define RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX GENMASK(7, 0)
#define RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION GENMASK(31, 16)
+struct rtw89_h2c_wow_aoac {
+ __le32 w0;
+} __packed;
+
#define RTW89_C2H_HEADER_LEN 8
struct rtw89_c2h_hdr {
@@ -3864,6 +3624,30 @@ struct rtw89_c2h_pkt_ofld_rsp {
#define RTW89_C2H_PKT_OFLD_RSP_W2_PTK_OP GENMASK(10, 8)
#define RTW89_C2H_PKT_OFLD_RSP_W2_PTK_LEN GENMASK(31, 16)
+struct rtw89_c2h_wow_aoac_report {
+ struct rtw89_c2h_hdr c2h_hdr;
+ u8 rpt_ver;
+ u8 sec_type;
+ u8 key_idx;
+ u8 pattern_idx;
+ u8 rekey_ok;
+ u8 rsvd1[3];
+ u8 ptk_tx_iv[8];
+ u8 eapol_key_replay_count[8];
+ u8 gtk[32];
+ u8 ptk_rx_iv[8];
+ u8 gtk_rx_iv[4][8];
+ __le64 igtk_key_id;
+ __le64 igtk_ipn;
+ u8 igtk[32];
+ u8 csa_pri_ch;
+ u8 csa_bw_ch_offset;
+ u8 csa_ch_band_chsw_failed;
+ u8 csa_rsvd1;
+} __packed;
+
+#define RTW89_C2H_WOW_AOAC_RPT_REKEY_IDX BIT(0)
+
struct rtw89_h2c_bcnfltr {
__le32 w0;
} __packed;
@@ -4141,11 +3925,21 @@ struct rtw89_fw_h2c_rf_reg_info {
/* CLASS 1 - WOW */
#define H2C_CL_MAC_WOW 0x1
-#define H2C_FUNC_KEEP_ALIVE 0x0
-#define H2C_FUNC_DISCONNECT_DETECT 0x1
-#define H2C_FUNC_WOW_GLOBAL 0x2
-#define H2C_FUNC_WAKEUP_CTRL 0x8
-#define H2C_FUNC_WOW_CAM_UPD 0xC
+enum rtw89_wow_h2c_func {
+ H2C_FUNC_KEEP_ALIVE = 0x0,
+ H2C_FUNC_DISCONNECT_DETECT = 0x1,
+ H2C_FUNC_WOW_GLOBAL = 0x2,
+ H2C_FUNC_GTK_OFLD = 0x3,
+ H2C_FUNC_ARP_OFLD = 0x4,
+ H2C_FUNC_WAKEUP_CTRL = 0x8,
+ H2C_FUNC_WOW_CAM_UPD = 0xC,
+ H2C_FUNC_AOAC_REPORT_REQ = 0xD,
+
+ NUM_OF_RTW89_WOW_H2C_FUNC,
+};
+
+#define RTW89_WOW_WAIT_COND(func) \
+ (NUM_OF_RTW89_WOW_H2C_FUNC + (func))
/* CLASS 2 - PS */
#define H2C_CL_MAC_PS 0x2
@@ -4568,6 +4362,7 @@ int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type);
@@ -4653,6 +4448,8 @@ int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool enable);
int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool enable);
+int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool enable);
int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool enable);
int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
@@ -4661,6 +4458,10 @@ int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool enable);
int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev,
struct rtw89_wow_cam_info *cam_info);
+int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ bool enable);
+int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev,
const struct rtw89_fw_mcc_add_req *p);
int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index aa5b396b5d2b..3fe0046f6eaa 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -3644,6 +3644,7 @@ static int set_host_rpr_ax(struct rtw89_dev *rtwdev)
static int trx_init_ax(struct rtw89_dev *rtwdev)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
enum rtw89_qta_mode qta_mode = rtwdev->mac.qta_mode;
int ret;
@@ -3687,6 +3688,10 @@ static int trx_init_ax(struct rtw89_dev *rtwdev)
return ret;
}
+ if (chip_id == RTL8852C)
+ rtw89_write32_clr(rtwdev, R_AX_RSP_CHK_SIG,
+ B_AX_RSP_STATIC_RTS_CHK_SERV_BW_EN);
+
return 0;
}
@@ -5132,6 +5137,37 @@ rtw89_mac_c2h_mrc_tsf_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len
}
static void
+rtw89_mac_c2h_wow_aoac_rpt(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 len)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
+ struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ const struct rtw89_c2h_wow_aoac_report *c2h =
+ (const struct rtw89_c2h_wow_aoac_report *)skb->data;
+ struct rtw89_completion_data data = {};
+ unsigned int cond;
+
+ aoac_rpt->rpt_ver = c2h->rpt_ver;
+ aoac_rpt->sec_type = c2h->sec_type;
+ aoac_rpt->key_idx = c2h->key_idx;
+ aoac_rpt->pattern_idx = c2h->pattern_idx;
+ aoac_rpt->rekey_ok = u8_get_bits(c2h->rekey_ok,
+ RTW89_C2H_WOW_AOAC_RPT_REKEY_IDX);
+ memcpy(aoac_rpt->ptk_tx_iv, c2h->ptk_tx_iv, sizeof(aoac_rpt->ptk_tx_iv));
+ memcpy(aoac_rpt->eapol_key_replay_count, c2h->eapol_key_replay_count,
+ sizeof(aoac_rpt->eapol_key_replay_count));
+ memcpy(aoac_rpt->gtk, c2h->gtk, sizeof(aoac_rpt->gtk));
+ memcpy(aoac_rpt->ptk_rx_iv, c2h->ptk_rx_iv, sizeof(aoac_rpt->ptk_rx_iv));
+ memcpy(aoac_rpt->gtk_rx_iv, c2h->gtk_rx_iv, sizeof(aoac_rpt->gtk_rx_iv));
+ aoac_rpt->igtk_key_id = le64_to_cpu(c2h->igtk_key_id);
+ aoac_rpt->igtk_ipn = le64_to_cpu(c2h->igtk_ipn);
+ memcpy(aoac_rpt->igtk, c2h->igtk, sizeof(aoac_rpt->igtk));
+
+ cond = RTW89_WOW_WAIT_COND(H2C_FUNC_AOAC_REPORT_REQ);
+ rtw89_complete_cond(wait, cond, &data);
+}
+
+static void
rtw89_mac_c2h_mrc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
{
struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
@@ -5218,6 +5254,12 @@ void (* const rtw89_mac_c2h_mrc_handler[])(struct rtw89_dev *rtwdev,
[RTW89_MAC_C2H_FUNC_MRC_STATUS_RPT] = rtw89_mac_c2h_mrc_status_rpt,
};
+static
+void (* const rtw89_mac_c2h_wow_handler[])(struct rtw89_dev *rtwdev,
+ struct sk_buff *c2h, u32 len) = {
+ [RTW89_MAC_C2H_FUNC_AOAC_REPORT] = rtw89_mac_c2h_wow_aoac_rpt,
+};
+
static void rtw89_mac_c2h_scanofld_rsp_atomic(struct rtw89_dev *rtwdev,
struct sk_buff *skb)
{
@@ -5270,6 +5312,8 @@ bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
return true;
case RTW89_MAC_C2H_CLASS_MRC:
return true;
+ case RTW89_MAC_C2H_CLASS_WOW:
+ return true;
}
}
@@ -5296,6 +5340,10 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
if (func < NUM_OF_RTW89_MAC_C2H_FUNC_MRC)
handler = rtw89_mac_c2h_mrc_handler[func];
break;
+ case RTW89_MAC_C2H_CLASS_WOW:
+ if (func < NUM_OF_RTW89_MAC_C2H_FUNC_WOW)
+ handler = rtw89_mac_c2h_wow_handler[func];
+ break;
case RTW89_MAC_C2H_CLASS_FWDBG:
return;
default:
@@ -5705,7 +5753,7 @@ bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev)
const struct rtw89_chip_info *chip = rtwdev->chip;
u8 val = 0;
- if (chip->chip_id == RTL8852C)
+ if (chip->chip_id == RTL8852C || chip->chip_id == RTL8922A)
return false;
else if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B ||
chip->chip_id == RTL8851B)
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index 6fb457153a11..a580cb719233 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -419,6 +419,13 @@ enum rtw89_mac_c2h_mrc_func {
NUM_OF_RTW89_MAC_C2H_FUNC_MRC,
};
+enum rtw89_mac_c2h_wow_func {
+ RTW89_MAC_C2H_FUNC_AOAC_REPORT,
+ RTW89_MAC_C2H_FUNC_READ_WOW_CAM,
+
+ NUM_OF_RTW89_MAC_C2H_FUNC_WOW,
+};
+
enum rtw89_mac_c2h_class {
RTW89_MAC_C2H_CLASS_INFO = 0x0,
RTW89_MAC_C2H_CLASS_OFLD = 0x1,
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index 31d1ffb16e83..1ec97250e88e 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -318,7 +318,7 @@ static u8 rtw89_aifsn_to_aifs(struct rtw89_dev *rtwdev,
u8 sifs;
slot_time = vif->bss_conf.use_short_slot ? 9 : 20;
- sifs = chan->band_type == RTW89_BAND_5G ? 16 : 10;
+ sifs = chan->band_type == RTW89_BAND_2G ? 10 : 16;
return aifsn * slot_time + sifs;
}
@@ -473,6 +473,9 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_PS)
rtw89_recalc_lps(rtwdev);
+ if (changed & BSS_CHANGED_ARP_FILTER)
+ rtwvif->ip_addr = vif->cfg.arp_addr_list[0];
+
mutex_unlock(&rtwdev->mutex);
}
@@ -1106,6 +1109,28 @@ static void rtw89_ops_set_wakeup(struct ieee80211_hw *hw, bool enabled)
device_set_wakeup_enable(rtwdev->dev, enabled);
}
+
+static void rtw89_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info;
+
+ if (data->kek_len > sizeof(gtk_info->kek) ||
+ data->kck_len > sizeof(gtk_info->kck)) {
+ rtw89_warn(rtwdev, "kek or kck length over fw limit\n");
+ return;
+ }
+
+ mutex_lock(&rtwdev->mutex);
+
+ memcpy(gtk_info->kek, data->kek, data->kek_len);
+ memcpy(gtk_info->kck, data->kck, data->kck_len);
+
+ mutex_unlock(&rtwdev->mutex);
+}
#endif
const struct ieee80211_ops rtw89_ops = {
@@ -1151,6 +1176,7 @@ const struct ieee80211_ops rtw89_ops = {
.suspend = rtw89_ops_suspend,
.resume = rtw89_ops_resume,
.set_wakeup = rtw89_ops_set_wakeup,
+ .set_rekey_data = rtw89_set_rekey_data,
#endif
};
EXPORT_SYMBOL(rtw89_ops);
diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c
index f16467377eab..934bdf3b398f 100644
--- a/drivers/net/wireless/realtek/rtw89/mac_be.c
+++ b/drivers/net/wireless/realtek/rtw89/mac_be.c
@@ -1751,6 +1751,7 @@ static int set_host_rpr_be(struct rtw89_dev *rtwdev)
static int trx_init_be(struct rtw89_dev *rtwdev)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
enum rtw89_qta_mode qta_mode = rtwdev->mac.qta_mode;
int ret;
@@ -1794,6 +1795,10 @@ static int trx_init_be(struct rtw89_dev *rtwdev)
return ret;
}
+ if (chip_id == RTL8922A)
+ rtw89_write32_clr(rtwdev, R_BE_RSP_CHK_SIG,
+ B_BE_RSP_STATIC_RTS_CHK_SERV_BW_EN);
+
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index 19001130ad94..03bbcf9b6737 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -19,6 +19,31 @@ MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support");
MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support");
MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support");
+static int rtw89_pci_get_phy_offset_by_link_speed(struct rtw89_dev *rtwdev,
+ u32 *phy_offset)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
+ u32 val;
+ int ret;
+
+ ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val);
+ if (ret)
+ return ret;
+
+ val = u32_get_bits(val, RTW89_BCFG_LINK_SPEED_MASK);
+ if (val == RTW89_PCIE_GEN1_SPEED) {
+ *phy_offset = R_RAC_DIRECT_OFFSET_G1;
+ } else if (val == RTW89_PCIE_GEN2_SPEED) {
+ *phy_offset = R_RAC_DIRECT_OFFSET_G2;
+ } else {
+ rtw89_warn(rtwdev, "Unknown PCI link speed %d\n", val);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
static int rtw89_pci_rst_bdram_ax(struct rtw89_dev *rtwdev)
{
u32 val;
@@ -1089,7 +1114,8 @@ u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev,
spin_lock_bh(&rtwpci->trx_lock);
cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
- cnt = min(cnt, wd_ring->curr_num);
+ if (txch != RTW89_TXCH_CH12)
+ cnt = min(cnt, wd_ring->curr_num);
spin_unlock_bh(&rtwpci->trx_lock);
return cnt;
@@ -2298,6 +2324,68 @@ static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev)
return 0;
}
+static void rtw89_pci_disable_eq(struct rtw89_dev *rtwdev)
+{
+ u16 g1_oobs, g2_oobs;
+ u32 backup_aspm;
+ u32 phy_offset;
+ u16 oobs_val;
+ u16 val16;
+ int ret;
+
+ if (rtwdev->chip->chip_id != RTL8852C)
+ return;
+
+ backup_aspm = rtw89_read32(rtwdev, R_AX_PCIE_MIX_CFG_V1);
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK);
+
+ g1_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 +
+ RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL);
+ g2_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 +
+ RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL);
+ if (g1_oobs && g2_oobs)
+ goto out;
+
+ ret = rtw89_pci_get_phy_offset_by_link_speed(rtwdev, &phy_offset);
+ if (ret)
+ goto out;
+
+ rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0D * RAC_MULT, BAC_RX_TEST_EN);
+ rtw89_write16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, ADDR_SEL_PINOUT_DIS_VAL);
+ rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, B_PCIE_BIT_RD_SEL);
+
+ val16 = rtw89_read16_mask(rtwdev, phy_offset + RAC_ANA1F * RAC_MULT,
+ OOBS_LEVEL_MASK);
+ oobs_val = u16_encode_bits(val16, OOBS_SEN_MASK);
+
+ rtw89_write16(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA03 * RAC_MULT, oobs_val);
+ rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA09 * RAC_MULT,
+ BAC_OOBS_SEL);
+
+ rtw89_write16(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA03 * RAC_MULT, oobs_val);
+ rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA09 * RAC_MULT,
+ BAC_OOBS_SEL);
+
+out:
+ rtw89_write32(rtwdev, R_AX_PCIE_MIX_CFG_V1, backup_aspm);
+}
+
+static void rtw89_pci_ber(struct rtw89_dev *rtwdev)
+{
+ u32 phy_offset;
+
+ if (!test_bit(RTW89_QUIRK_PCI_BER, rtwdev->quirks))
+ return;
+
+ phy_offset = R_RAC_DIRECT_OFFSET_G1;
+ rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G1_VAL);
+ rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL);
+
+ phy_offset = R_RAC_DIRECT_OFFSET_G2;
+ rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G2_VAL);
+ rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL);
+}
+
static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev)
{
if (rtwdev->chip->chip_id != RTL8852A)
@@ -2695,6 +2783,8 @@ static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
const struct rtw89_pci_info *info = rtwdev->pci_info;
int ret;
+ rtw89_pci_disable_eq(rtwdev);
+ rtw89_pci_ber(rtwdev);
rtw89_pci_rxdma_prefth(rtwdev);
rtw89_pci_l1off_pwroff(rtwdev);
rtw89_pci_deglitch_setting(rtwdev);
@@ -3547,7 +3637,7 @@ static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev,
unsigned long flags = 0;
int ret;
- flags |= PCI_IRQ_LEGACY | PCI_IRQ_MSI;
+ flags |= PCI_IRQ_INTX | PCI_IRQ_MSI;
ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
if (ret < 0) {
rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret);
@@ -4171,6 +4261,8 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rtwdev->hci.rpwm_addr = pci_info->rpwm_addr;
rtwdev->hci.cpwm_addr = pci_info->cpwm_addr;
+ rtw89_check_quirks(rtwdev, info->quirks);
+
SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
ret = rtw89_core_init(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index a63b6b7c9bfa..7666753ae983 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -12,11 +12,18 @@
#define MDIO_PG0_G2 2
#define MDIO_PG1_G2 3
#define RAC_CTRL_PPR 0x00
+#define RAC_ANA03 0x03
+#define OOBS_SEN_MASK GENMASK(5, 1)
+#define RAC_ANA09 0x09
+#define BAC_OOBS_SEL BIT(4)
#define RAC_ANA0A 0x0A
#define B_BAC_EQ_SEL BIT(5)
#define RAC_ANA0C 0x0C
#define B_PCIE_BIT_PSAVE BIT(15)
+#define RAC_ANA0D 0x0D
+#define BAC_RX_TEST_EN BIT(6)
#define RAC_ANA10 0x10
+#define ADDR_SEL_PINOUT_DIS_VAL 0x3C4
#define B_PCIE_BIT_PINOUT_DIS BIT(3)
#define RAC_REG_REV2 0x1B
#define BAC_CMU_EN_DLY_MASK GENMASK(15, 12)
@@ -26,11 +33,17 @@
#define RAC_REG_FLD_0 0x1D
#define BAC_AUTOK_N_MASK GENMASK(3, 2)
#define PCIE_AUTOK_4 0x3
+#define RAC_ANA1E 0x1E
+#define RAC_ANA1E_G1_VAL 0x66EA
+#define RAC_ANA1E_G2_VAL 0x6EEA
#define RAC_ANA1F 0x1F
+#define OOBS_LEVEL_MASK GENMASK(12, 8)
#define RAC_ANA24 0x24
#define B_AX_DEGLITCH GENMASK(11, 8)
#define RAC_ANA26 0x26
#define B_AX_RXEN GENMASK(15, 14)
+#define RAC_ANA2E 0x2E
+#define RAC_ANA2E_VAL 0xFFFE
#define RAC_CTRL_PPR_V1 0x30
#define B_AX_CLK_CALIB_EN BIT(12)
#define B_AX_CALIB_EN BIT(13)
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index 12da63d64307..a82b4c56a6f4 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -122,6 +122,7 @@ static u64 get_eht_ra_mask(struct ieee80211_sta *sta)
struct ieee80211_sta_eht_cap *eht_cap = &sta->deflink.eht_cap;
struct ieee80211_eht_mcs_nss_supp_20mhz_only *mcs_nss_20mhz;
struct ieee80211_eht_mcs_nss_supp_bw *mcs_nss;
+ u8 *he_phy_cap = sta->deflink.he_cap.he_cap_elem.phy_cap_info;
switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_320:
@@ -132,15 +133,19 @@ static u64 get_eht_ra_mask(struct ieee80211_sta *sta)
mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._160;
/* MCS 9, 11, 13 */
return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3);
+ case IEEE80211_STA_RX_BW_20:
+ if (!(he_phy_cap[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
+ mcs_nss_20mhz = &eht_cap->eht_mcs_nss_supp.only_20mhz;
+ /* MCS 7, 9, 11, 13 */
+ return get_eht_mcs_ra_mask(mcs_nss_20mhz->rx_tx_max_nss, 7, 4);
+ }
+ fallthrough;
case IEEE80211_STA_RX_BW_80:
default:
mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._80;
/* MCS 9, 11, 13 */
return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3);
- case IEEE80211_STA_RX_BW_20:
- mcs_nss_20mhz = &eht_cap->eht_mcs_nss_supp.only_20mhz;
- /* MCS 7, 9, 11, 13 */
- return get_eht_mcs_ra_mask(mcs_nss_20mhz->rx_tx_max_nss, 7, 4);
}
}
@@ -6398,10 +6403,8 @@ enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev,
return RF_D;
case MLO_0_PLUS_2_1RF:
case MLO_2_PLUS_0_1RF:
- if (phy_idx == RTW89_PHY_0)
- return RF_AB;
- else
- return RF_AB;
+ /* for both PHY 0/1 */
+ return RF_AB;
case MLO_0_PLUS_2_2RF:
case MLO_2_PLUS_0_2RF:
case MLO_2_PLUS_2_2RF:
diff --git a/drivers/net/wireless/realtek/rtw89/phy_be.c b/drivers/net/wireless/realtek/rtw89/phy_be.c
index be0148f2b96f..72eda9bbd3ae 100644
--- a/drivers/net/wireless/realtek/rtw89/phy_be.c
+++ b/drivers/net/wireless/realtek/rtw89/phy_be.c
@@ -381,6 +381,23 @@ static void rtw89_phy_bb_wrap_ftm_init(struct rtw89_dev *rtwdev,
rtw89_write32_mask(rtwdev, addr, 0x7, 0);
}
+static void rtw89_phy_bb_wrap_ul_pwr(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ u8 mac_idx;
+ u32 addr;
+
+ if (chip_id != RTL8922A)
+ return;
+
+ for (mac_idx = 0; mac_idx < RTW89_MAC_NUM; mac_idx++) {
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_RSSI_TARGET_LMT, mac_idx);
+ rtw89_write32(rtwdev, addr, 0x0201FE00);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_TH, mac_idx);
+ rtw89_write32(rtwdev, addr, 0x00FFEC7E);
+ }
+}
+
static void rtw89_phy_bb_wrap_init_be(struct rtw89_dev *rtwdev)
{
enum rtw89_mac_idx mac_idx = RTW89_MAC_0;
@@ -391,6 +408,7 @@ static void rtw89_phy_bb_wrap_init_be(struct rtw89_dev *rtwdev)
rtw89_phy_bb_wrap_force_cr_init(rtwdev, mac_idx);
rtw89_phy_bb_wrap_ftm_init(rtwdev, mac_idx);
rtw89_phy_bb_wrap_tpu_set_all(rtwdev, mac_idx);
+ rtw89_phy_bb_wrap_ul_pwr(rtwdev);
}
static void rtw89_phy_ch_info_init_be(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index 31290d8cb7f7..92074b73ebeb 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -55,7 +55,8 @@ static void rtw89_ps_power_mode_change_with_hci(struct rtw89_dev *rtwdev,
static void rtw89_ps_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
{
- if (rtwdev->chip->low_power_hci_modes & BIT(rtwdev->ps_mode))
+ if (rtwdev->chip->low_power_hci_modes & BIT(rtwdev->ps_mode) &&
+ !test_bit(RTW89_FLAG_WOWLAN, rtwdev->flags))
rtw89_ps_power_mode_change_with_hci(rtwdev, enter);
else
rtw89_mac_power_mode_change(rtwdev, enter);
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 72e448e91b6f..01cbd0312102 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -235,6 +235,9 @@
#define R_AX_SPSANA_ON_CTRL1 0x0224
+#define R_AX_SPS_ANA_ON_CTRL2 0x0228
+#define RTL8852B_RFE_05_SPS_ANA 0x4A82
+
#define R_AX_WLAN_XTAL_SI_CTRL 0x0270
#define B_AX_WL_XTAL_SI_CMD_POLL BIT(31)
#define B_AX_BT_XTAL_SI_ERR_FLAG BIT(30)
@@ -1891,7 +1894,6 @@
B_AX_B0_IMR_ERR_USRCTL_NOINIT | \
B_AX_B0_IMR_ERR_CMDPSR_1STCMDERR | \
B_AX_B0_IMR_ERR_CMDPSR_CMDTYPE | \
- B_AX_B0_IMR_ERR_CMDPSR_FRZTO | \
B_AX_B0_IMR_ERR_CMDPSR_TBLSZ | \
B_AX_B0_IMR_ERR_MPDUINFO_RECFG | \
B_AX_B0_IMR_ERR_MPDUIF_DATAERR | \
@@ -7497,6 +7499,9 @@
#define B_BE_PWR_BT_VAL GENMASK(8, 0)
#define B_BE_PWR_FORCE_COEX_ON GENMASK(29, 27)
+#define R_BE_PWR_TH 0x11A78
+#define R_BE_PWR_RSSI_TARGET_LMT 0x11A84
+
#define R_BE_PWR_OFST_SW 0x11AE8
#define B_BE_PWR_OFST_SW_DB GENMASK(27, 24)
diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c
index d0857ef60ea6..1a133914f673 100644
--- a/drivers/net/wireless/realtek/rtw89/regd.c
+++ b/drivers/net/wireless/realtek/rtw89/regd.c
@@ -341,51 +341,60 @@ do { \
static void rtw89_regd_setup_unii4(struct rtw89_dev *rtwdev,
struct wiphy *wiphy)
{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
const struct rtw89_chip_info *chip = rtwdev->chip;
- bool regd_allow_unii_4 = chip->support_unii4;
struct ieee80211_supported_band *sband;
struct rtw89_acpi_dsm_result res = {};
+ bool enable_by_fcc;
+ bool enable_by_ic;
int ret;
u8 val;
+ int i;
- if (!chip->support_unii4)
- goto bottom;
+ sband = wiphy->bands[NL80211_BAND_5GHZ];
+ if (!sband)
+ return;
+
+ if (!chip->support_unii4) {
+ sband->n_channels -= RTW89_5GHZ_UNII4_CHANNEL_NUM;
+ return;
+ }
+
+ bitmap_fill(regulatory->block_unii4, RTW89_REGD_MAX_COUNTRY_NUM);
- ret = rtw89_acpi_evaluate_dsm(rtwdev, RTW89_ACPI_DSM_FUNC_59G_EN, &res);
+ ret = rtw89_acpi_evaluate_dsm(rtwdev, RTW89_ACPI_DSM_FUNC_UNII4_SUP, &res);
if (ret) {
rtw89_debug(rtwdev, RTW89_DBG_REGD,
"acpi: cannot eval unii 4: %d\n", ret);
+ enable_by_fcc = true;
+ enable_by_ic = false;
goto bottom;
}
val = res.u.value;
+ enable_by_fcc = u8_get_bits(val, RTW89_ACPI_CONF_UNII4_FCC);
+ enable_by_ic = u8_get_bits(val, RTW89_ACPI_CONF_UNII4_IC);
rtw89_debug(rtwdev, RTW89_DBG_REGD,
- "acpi: eval if allow unii 4: %d\n", val);
-
- switch (val) {
- case 0:
- regd_allow_unii_4 = false;
- break;
- case 1:
- regd_allow_unii_4 = true;
- break;
- default:
- break;
- }
+ "acpi: eval if allow unii-4: 0x%x\n", val);
bottom:
- rtw89_debug(rtwdev, RTW89_DBG_REGD, "regd: allow unii 4: %d\n",
- regd_allow_unii_4);
-
- if (regd_allow_unii_4)
- return;
-
- sband = wiphy->bands[NL80211_BAND_5GHZ];
- if (!sband)
- return;
+ for (i = 0; i < ARRAY_SIZE(rtw89_regd_map); i++) {
+ const struct rtw89_regd *regd = &rtw89_regd_map[i];
- sband->n_channels -= 3;
+ switch (regd->txpwr_regd[RTW89_BAND_5G]) {
+ case RTW89_FCC:
+ if (enable_by_fcc)
+ clear_bit(i, regulatory->block_unii4);
+ break;
+ case RTW89_IC:
+ if (enable_by_ic)
+ clear_bit(i, regulatory->block_unii4);
+ break;
+ default:
+ break;
+ }
+ }
}
static void __rtw89_regd_setup_policy_6ghz(struct rtw89_dev *rtwdev, bool block,
@@ -459,6 +468,51 @@ out:
kfree(ptr);
}
+static void rtw89_regd_setup_policy_6ghz_sp(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_acpi_policy_6ghz_sp *ptr;
+ struct rtw89_acpi_dsm_result res = {};
+ bool enable_by_us;
+ int ret;
+ int i;
+
+ ret = rtw89_acpi_evaluate_dsm(rtwdev, RTW89_ACPI_DSM_FUNC_6GHZ_SP_SUP, &res);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_REGD,
+ "acpi: cannot eval policy 6ghz-sp: %d\n", ret);
+ return;
+ }
+
+ ptr = res.u.policy_6ghz_sp;
+
+ switch (ptr->override) {
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_REGD,
+ "%s: unknown override case: %d\n", __func__,
+ ptr->override);
+ fallthrough;
+ case 0:
+ goto out;
+ case 1:
+ break;
+ }
+
+ bitmap_fill(regulatory->block_6ghz_sp, RTW89_REGD_MAX_COUNTRY_NUM);
+
+ enable_by_us = u8_get_bits(ptr->conf, RTW89_ACPI_CONF_6GHZ_SP_US);
+
+ for (i = 0; i < ARRAY_SIZE(rtw89_regd_map); i++) {
+ const struct rtw89_regd *tmp = &rtw89_regd_map[i];
+
+ if (enable_by_us && memcmp(tmp->alpha2, "US", 2) == 0)
+ clear_bit(i, regulatory->block_6ghz_sp);
+ }
+
+out:
+ kfree(ptr);
+}
+
static void rtw89_regd_setup_6ghz(struct rtw89_dev *rtwdev, struct wiphy *wiphy)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -501,6 +555,7 @@ bottom:
if (regd_allow_6ghz) {
rtw89_regd_setup_policy_6ghz(rtwdev);
+ rtw89_regd_setup_policy_6ghz_sp(rtwdev);
return;
}
@@ -562,20 +617,47 @@ int rtw89_regd_init(struct rtw89_dev *rtwdev,
return 0;
}
-static void rtw89_regd_apply_policy_6ghz(struct rtw89_dev *rtwdev,
- struct wiphy *wiphy)
+static void rtw89_regd_apply_policy_unii4(struct rtw89_dev *rtwdev,
+ struct wiphy *wiphy)
{
struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_regd *regd = regulatory->regd;
struct ieee80211_supported_band *sband;
u8 index;
int i;
+ sband = wiphy->bands[NL80211_BAND_5GHZ];
+ if (!sband)
+ return;
+
+ if (!chip->support_unii4)
+ return;
+
index = rtw89_regd_get_index(regd);
- if (index == RTW89_REGD_MAX_COUNTRY_NUM)
+ if (index != RTW89_REGD_MAX_COUNTRY_NUM &&
+ !test_bit(index, regulatory->block_unii4))
return;
- if (!test_bit(index, regulatory->block_6ghz))
+ rtw89_debug(rtwdev, RTW89_DBG_REGD, "%c%c unii-4 is blocked by policy\n",
+ regd->alpha2[0], regd->alpha2[1]);
+
+ for (i = RTW89_5GHZ_UNII4_START_INDEX; i < sband->n_channels; i++)
+ sband->channels[i].flags |= IEEE80211_CHAN_DISABLED;
+}
+
+static void rtw89_regd_apply_policy_6ghz(struct rtw89_dev *rtwdev,
+ struct wiphy *wiphy)
+{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_regd *regd = regulatory->regd;
+ struct ieee80211_supported_band *sband;
+ u8 index;
+ int i;
+
+ index = rtw89_regd_get_index(regd);
+ if (index != RTW89_REGD_MAX_COUNTRY_NUM &&
+ !test_bit(index, regulatory->block_6ghz))
return;
rtw89_debug(rtwdev, RTW89_DBG_REGD, "%c%c 6 GHz is blocked by policy\n",
@@ -604,6 +686,7 @@ static void rtw89_regd_notifier_apply(struct rtw89_dev *rtwdev,
else
wiphy->regulatory_flags &= ~REGULATORY_COUNTRY_IE_IGNORE;
+ rtw89_regd_apply_policy_unii4(rtwdev, wiphy);
rtw89_regd_apply_policy_6ghz(rtwdev, wiphy);
}
@@ -634,10 +717,12 @@ exit:
static void __rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev)
{
struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_regd *regd = regulatory->regd;
enum rtw89_reg_6ghz_power sel;
const struct rtw89_chan *chan;
struct rtw89_vif *rtwvif;
int count = 0;
+ u8 index;
rtw89_for_each_rtwvif(rtwdev, rtwvif) {
chan = rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx);
@@ -654,6 +739,17 @@ static void __rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev)
if (count != 1)
sel = RTW89_REG_6GHZ_POWER_DFLT;
+ if (sel == RTW89_REG_6GHZ_POWER_STD) {
+ index = rtw89_regd_get_index(regd);
+ if (index == RTW89_REGD_MAX_COUNTRY_NUM ||
+ test_bit(index, regulatory->block_6ghz_sp)) {
+ rtw89_debug(rtwdev, RTW89_DBG_REGD,
+ "%c%c 6 GHz SP is blocked by policy\n",
+ regd->alpha2[0], regd->alpha2[1]);
+ sel = RTW89_REG_6GHZ_POWER_DFLT;
+ }
+ }
+
if (regulatory->reg_6ghz_power == sel)
return;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
index 51d3e61eaa1d..87b51823244d 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -2320,6 +2320,7 @@ static int rtw8851b_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
u8 wl_rfc_s1;
int ret;
+ rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN,
B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
@@ -2447,6 +2448,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.dig_regs = &rtw8851b_dig_regs,
.tssi_dbw_table = NULL,
.support_chanctx_num = 0,
+ .support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ),
.support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851be.c b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
index ca1374a71727..ec3629d95fda 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
@@ -63,6 +63,7 @@ static const struct rtw89_pci_info rtw8851b_pci_info = {
static const struct rtw89_driver_info rtw89_8851be_info = {
.chip = &rtw8851b_chip_info,
+ .quirks = NULL,
.bus = {
.pci = &rtw8851b_pci_info,
},
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 2deadec715cf..e93cee1456bd 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -2163,6 +2163,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.dig_regs = &rtw8852a_dig_regs,
.tssi_dbw_table = NULL,
.support_chanctx_num = 1,
+ .support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ),
.support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
index 7c6ffedb77e2..fdee5dd4ba14 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
@@ -61,6 +61,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
static const struct rtw89_driver_info rtw89_8852ae_info = {
.chip = &rtw8852a_chip_info,
+ .quirks = NULL,
.bus = {
.pci = &rtw8852a_pci_info,
},
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index d025c4135e1c..d351096fa4b4 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -390,6 +390,14 @@ static const struct rtw89_btc_fbtc_mreg rtw89_btc_8852b_mon_reg[] = {
static const u8 rtw89_btc_8852b_wl_rssi_thres[BTC_WL_RSSI_THMAX] = {70, 60, 50, 40};
static const u8 rtw89_btc_8852b_bt_rssi_thres[BTC_BT_RSSI_THMAX] = {50, 40, 30, 20};
+static void rtw8852b_pwr_sps_ana(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+
+ if (efuse->rfe_type == 0x5)
+ rtw89_write16(rtwdev, R_AX_SPS_ANA_ON_CTRL2, RTL8852B_RFE_05_SPS_ANA);
+}
+
static int rtw8852b_pwr_on_func(struct rtw89_dev *rtwdev)
{
u32 val32;
@@ -522,6 +530,10 @@ static int rtw8852b_pwr_off_func(struct rtw89_dev *rtwdev)
u32 val32;
u32 ret;
+ /* Only do once during probe stage after reading efuse */
+ if (!test_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags))
+ rtw8852b_pwr_sps_ana(rtwdev);
+
ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_RFC2RF,
XTAL_SI_RFC2RF);
if (ret)
@@ -550,6 +562,7 @@ static int rtw8852b_pwr_off_func(struct rtw89_dev *rtwdev)
return ret;
rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_EN_WLON);
+ rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, B_AX_FEN_BB_GLB_RSTN | B_AX_FEN_BBRSTB);
rtw89_write32_clr(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_RFC_1P3);
@@ -2469,6 +2482,7 @@ static int rtw8852b_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
u8 wl_rfc_s1;
int ret;
+ rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN,
B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
@@ -2597,6 +2611,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.dig_regs = &rtw8852b_dig_regs,
.tssi_dbw_table = NULL,
.support_chanctx_num = 0,
+ .support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ),
.support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
index ed71364e6437..5f941122655c 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
@@ -63,6 +63,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
static const struct rtw89_driver_info rtw89_8852be_info = {
.chip = &rtw8852b_chip_info,
+ .quirks = NULL,
.bus = {
.pci = &rtw8852b_pci_info,
},
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index 17e6164855fa..3571b41786d7 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -203,6 +203,9 @@ static int rtw8852c_pwr_on_func(struct rtw89_dev *rtwdev)
rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APDM_HPDN);
rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS);
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0,
+ B_AX_OCP_L1_MASK, 0x7);
+
ret = read_poll_timeout(rtw89_read32, val32, val32 & B_AX_RDY_SYSPWR,
1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL);
if (ret)
@@ -266,7 +269,7 @@ static int rtw8852c_pwr_on_func(struct rtw89_dev *rtwdev)
ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_SRAM2RFC);
if (ret)
return ret;
- ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_XMD_2, 0, XTAL_SI_LDO_LPS);
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_XMD_2, 0x10, XTAL_SI_LDO_LPS);
if (ret)
return ret;
ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_XMD_4, 0, XTAL_SI_LPS_CAP);
@@ -338,6 +341,7 @@ static int rtw8852c_pwr_off_func(struct rtw89_dev *rtwdev)
return ret;
rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_EN_WLON);
+ rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, B_AX_FEN_BB_GLB_RSTN | B_AX_FEN_BBRSTB);
rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND,
B_AX_R_SYM_FEN_WLBBGLB_1 | B_AX_R_SYM_FEN_WLBBFUN_1);
@@ -360,8 +364,11 @@ static int rtw8852c_pwr_off_func(struct rtw89_dev *rtwdev)
if (ret)
return ret;
- rtw89_write32(rtwdev, R_AX_WLLPS_CTRL, 0x0001A0B0);
+ rtw89_write32(rtwdev, R_AX_WLLPS_CTRL, SW_LPS_OPTION);
rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_XTAL_OFF_A_DIE);
+ rtw89_write32_set(rtwdev, R_AX_SYS_SWR_CTRL1, B_AX_SYM_CTRL_SPS_PWMFREQ);
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0,
+ B_AX_REG_ZCDC_H_MASK, 0x3);
rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS);
return 0;
@@ -2816,6 +2823,7 @@ static int rtw8852c_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
static int rtw8852c_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
{
+ rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN,
B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
@@ -2934,6 +2942,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.dig_regs = &rtw8852c_dig_regs,
.tssi_dbw_table = &rtw89_8852c_tssi_dbw_table,
.support_chanctx_num = 2,
+ .support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ) |
BIT(NL80211_BAND_6GHZ),
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c
index ab1a0aadc869..24c390b6f3d3 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c
@@ -34521,7 +34521,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][48] = 72,
[0][0][1][0][RTW89_ETSI][48] = 127,
[0][0][1][0][RTW89_MKK][48] = 127,
- [0][0][1][0][RTW89_IC][48] = 127,
+ [0][0][1][0][RTW89_IC][48] = 72,
[0][0][1][0][RTW89_KCC][48] = 127,
[0][0][1][0][RTW89_ACMA][48] = 127,
[0][0][1][0][RTW89_CN][48] = 127,
@@ -34534,7 +34534,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][50] = 72,
[0][0][1][0][RTW89_ETSI][50] = 127,
[0][0][1][0][RTW89_MKK][50] = 127,
- [0][0][1][0][RTW89_IC][50] = 127,
+ [0][0][1][0][RTW89_IC][50] = 72,
[0][0][1][0][RTW89_KCC][50] = 127,
[0][0][1][0][RTW89_ACMA][50] = 127,
[0][0][1][0][RTW89_CN][50] = 127,
@@ -34547,7 +34547,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][52] = 72,
[0][0][1][0][RTW89_ETSI][52] = 127,
[0][0][1][0][RTW89_MKK][52] = 127,
- [0][0][1][0][RTW89_IC][52] = 127,
+ [0][0][1][0][RTW89_IC][52] = 72,
[0][0][1][0][RTW89_KCC][52] = 127,
[0][0][1][0][RTW89_ACMA][52] = 127,
[0][0][1][0][RTW89_CN][52] = 127,
@@ -34885,7 +34885,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_FCC][48] = 48,
[0][1][1][0][RTW89_ETSI][48] = 127,
[0][1][1][0][RTW89_MKK][48] = 127,
- [0][1][1][0][RTW89_IC][48] = 127,
+ [0][1][1][0][RTW89_IC][48] = 48,
[0][1][1][0][RTW89_KCC][48] = 127,
[0][1][1][0][RTW89_ACMA][48] = 127,
[0][1][1][0][RTW89_CN][48] = 127,
@@ -34898,7 +34898,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_FCC][50] = 48,
[0][1][1][0][RTW89_ETSI][50] = 127,
[0][1][1][0][RTW89_MKK][50] = 127,
- [0][1][1][0][RTW89_IC][50] = 127,
+ [0][1][1][0][RTW89_IC][50] = 48,
[0][1][1][0][RTW89_KCC][50] = 127,
[0][1][1][0][RTW89_ACMA][50] = 127,
[0][1][1][0][RTW89_CN][50] = 127,
@@ -34911,7 +34911,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_FCC][52] = 48,
[0][1][1][0][RTW89_ETSI][52] = 127,
[0][1][1][0][RTW89_MKK][52] = 127,
- [0][1][1][0][RTW89_IC][52] = 127,
+ [0][1][1][0][RTW89_IC][52] = 48,
[0][1][1][0][RTW89_KCC][52] = 127,
[0][1][1][0][RTW89_ACMA][52] = 127,
[0][1][1][0][RTW89_CN][52] = 127,
@@ -35249,7 +35249,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][48] = 72,
[0][0][2][0][RTW89_ETSI][48] = 127,
[0][0][2][0][RTW89_MKK][48] = 127,
- [0][0][2][0][RTW89_IC][48] = 127,
+ [0][0][2][0][RTW89_IC][48] = 72,
[0][0][2][0][RTW89_KCC][48] = 127,
[0][0][2][0][RTW89_ACMA][48] = 127,
[0][0][2][0][RTW89_CN][48] = 127,
@@ -35262,7 +35262,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][50] = 72,
[0][0][2][0][RTW89_ETSI][50] = 127,
[0][0][2][0][RTW89_MKK][50] = 127,
- [0][0][2][0][RTW89_IC][50] = 127,
+ [0][0][2][0][RTW89_IC][50] = 72,
[0][0][2][0][RTW89_KCC][50] = 127,
[0][0][2][0][RTW89_ACMA][50] = 127,
[0][0][2][0][RTW89_CN][50] = 127,
@@ -35275,7 +35275,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][52] = 72,
[0][0][2][0][RTW89_ETSI][52] = 127,
[0][0][2][0][RTW89_MKK][52] = 127,
- [0][0][2][0][RTW89_IC][52] = 127,
+ [0][0][2][0][RTW89_IC][52] = 72,
[0][0][2][0][RTW89_KCC][52] = 127,
[0][0][2][0][RTW89_ACMA][52] = 127,
[0][0][2][0][RTW89_CN][52] = 127,
@@ -35613,7 +35613,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_FCC][48] = 48,
[0][1][2][0][RTW89_ETSI][48] = 127,
[0][1][2][0][RTW89_MKK][48] = 127,
- [0][1][2][0][RTW89_IC][48] = 127,
+ [0][1][2][0][RTW89_IC][48] = 48,
[0][1][2][0][RTW89_KCC][48] = 127,
[0][1][2][0][RTW89_ACMA][48] = 127,
[0][1][2][0][RTW89_CN][48] = 127,
@@ -35626,7 +35626,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_FCC][50] = 50,
[0][1][2][0][RTW89_ETSI][50] = 127,
[0][1][2][0][RTW89_MKK][50] = 127,
- [0][1][2][0][RTW89_IC][50] = 127,
+ [0][1][2][0][RTW89_IC][50] = 50,
[0][1][2][0][RTW89_KCC][50] = 127,
[0][1][2][0][RTW89_ACMA][50] = 127,
[0][1][2][0][RTW89_CN][50] = 127,
@@ -35639,7 +35639,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_FCC][52] = 48,
[0][1][2][0][RTW89_ETSI][52] = 127,
[0][1][2][0][RTW89_MKK][52] = 127,
- [0][1][2][0][RTW89_IC][52] = 127,
+ [0][1][2][0][RTW89_IC][52] = 48,
[0][1][2][0][RTW89_KCC][52] = 127,
[0][1][2][0][RTW89_ACMA][52] = 127,
[0][1][2][0][RTW89_CN][52] = 127,
@@ -35977,7 +35977,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_FCC][48] = 48,
[0][1][2][1][RTW89_ETSI][48] = 127,
[0][1][2][1][RTW89_MKK][48] = 127,
- [0][1][2][1][RTW89_IC][48] = 127,
+ [0][1][2][1][RTW89_IC][48] = 48,
[0][1][2][1][RTW89_KCC][48] = 127,
[0][1][2][1][RTW89_ACMA][48] = 127,
[0][1][2][1][RTW89_CN][48] = 127,
@@ -35990,7 +35990,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_FCC][50] = 50,
[0][1][2][1][RTW89_ETSI][50] = 127,
[0][1][2][1][RTW89_MKK][50] = 127,
- [0][1][2][1][RTW89_IC][50] = 127,
+ [0][1][2][1][RTW89_IC][50] = 50,
[0][1][2][1][RTW89_KCC][50] = 127,
[0][1][2][1][RTW89_ACMA][50] = 127,
[0][1][2][1][RTW89_CN][50] = 127,
@@ -36003,7 +36003,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_FCC][52] = 48,
[0][1][2][1][RTW89_ETSI][52] = 127,
[0][1][2][1][RTW89_MKK][52] = 127,
- [0][1][2][1][RTW89_IC][52] = 127,
+ [0][1][2][1][RTW89_IC][52] = 48,
[0][1][2][1][RTW89_KCC][52] = 127,
[0][1][2][1][RTW89_ACMA][52] = 127,
[0][1][2][1][RTW89_CN][52] = 127,
@@ -36172,7 +36172,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][47] = 68,
[1][0][2][0][RTW89_ETSI][47] = 127,
[1][0][2][0][RTW89_MKK][47] = 127,
- [1][0][2][0][RTW89_IC][47] = 127,
+ [1][0][2][0][RTW89_IC][47] = 68,
[1][0][2][0][RTW89_KCC][47] = 127,
[1][0][2][0][RTW89_ACMA][47] = 127,
[1][0][2][0][RTW89_CN][47] = 127,
@@ -36185,7 +36185,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][51] = 68,
[1][0][2][0][RTW89_ETSI][51] = 127,
[1][0][2][0][RTW89_MKK][51] = 127,
- [1][0][2][0][RTW89_IC][51] = 127,
+ [1][0][2][0][RTW89_IC][51] = 68,
[1][0][2][0][RTW89_KCC][51] = 127,
[1][0][2][0][RTW89_ACMA][51] = 127,
[1][0][2][0][RTW89_CN][51] = 127,
@@ -36354,7 +36354,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_FCC][47] = 62,
[1][1][2][0][RTW89_ETSI][47] = 127,
[1][1][2][0][RTW89_MKK][47] = 127,
- [1][1][2][0][RTW89_IC][47] = 127,
+ [1][1][2][0][RTW89_IC][47] = 62,
[1][1][2][0][RTW89_KCC][47] = 127,
[1][1][2][0][RTW89_ACMA][47] = 127,
[1][1][2][0][RTW89_CN][47] = 127,
@@ -36367,7 +36367,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_FCC][51] = 60,
[1][1][2][0][RTW89_ETSI][51] = 127,
[1][1][2][0][RTW89_MKK][51] = 127,
- [1][1][2][0][RTW89_IC][51] = 127,
+ [1][1][2][0][RTW89_IC][51] = 60,
[1][1][2][0][RTW89_KCC][51] = 127,
[1][1][2][0][RTW89_ACMA][51] = 127,
[1][1][2][0][RTW89_CN][51] = 127,
@@ -36536,7 +36536,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_FCC][47] = 62,
[1][1][2][1][RTW89_ETSI][47] = 127,
[1][1][2][1][RTW89_MKK][47] = 127,
- [1][1][2][1][RTW89_IC][47] = 127,
+ [1][1][2][1][RTW89_IC][47] = 62,
[1][1][2][1][RTW89_KCC][47] = 127,
[1][1][2][1][RTW89_ACMA][47] = 127,
[1][1][2][1][RTW89_CN][47] = 127,
@@ -36549,7 +36549,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_FCC][51] = 60,
[1][1][2][1][RTW89_ETSI][51] = 127,
[1][1][2][1][RTW89_MKK][51] = 127,
- [1][1][2][1][RTW89_IC][51] = 127,
+ [1][1][2][1][RTW89_IC][51] = 60,
[1][1][2][1][RTW89_KCC][51] = 127,
[1][1][2][1][RTW89_ACMA][51] = 127,
[1][1][2][1][RTW89_CN][51] = 127,
@@ -36640,7 +36640,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_FCC][49] = 62,
[2][0][2][0][RTW89_ETSI][49] = 127,
[2][0][2][0][RTW89_MKK][49] = 127,
- [2][0][2][0][RTW89_IC][49] = 127,
+ [2][0][2][0][RTW89_IC][49] = 62,
[2][0][2][0][RTW89_KCC][49] = 127,
[2][0][2][0][RTW89_ACMA][49] = 127,
[2][0][2][0][RTW89_CN][49] = 127,
@@ -36731,7 +36731,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_FCC][49] = 62,
[2][1][2][0][RTW89_ETSI][49] = 127,
[2][1][2][0][RTW89_MKK][49] = 127,
- [2][1][2][0][RTW89_IC][49] = 127,
+ [2][1][2][0][RTW89_IC][49] = 62,
[2][1][2][0][RTW89_KCC][49] = 127,
[2][1][2][0][RTW89_ACMA][49] = 127,
[2][1][2][0][RTW89_CN][49] = 127,
@@ -36822,7 +36822,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_FCC][49] = 62,
[2][1][2][1][RTW89_ETSI][49] = 127,
[2][1][2][1][RTW89_MKK][49] = 127,
- [2][1][2][1][RTW89_IC][49] = 127,
+ [2][1][2][1][RTW89_IC][49] = 62,
[2][1][2][1][RTW89_KCC][49] = 127,
[2][1][2][1][RTW89_ACMA][49] = 127,
[2][1][2][1][RTW89_CN][49] = 127,
@@ -36861,7 +36861,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[3][0][2][0][RTW89_FCC][45] = 52,
[3][0][2][0][RTW89_ETSI][45] = 127,
[3][0][2][0][RTW89_MKK][45] = 127,
- [3][0][2][0][RTW89_IC][45] = 127,
+ [3][0][2][0][RTW89_IC][45] = 52,
[3][0][2][0][RTW89_KCC][45] = 127,
[3][0][2][0][RTW89_ACMA][45] = 127,
[3][0][2][0][RTW89_CN][45] = 127,
@@ -36900,7 +36900,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][0][RTW89_FCC][45] = 46,
[3][1][2][0][RTW89_ETSI][45] = 127,
[3][1][2][0][RTW89_MKK][45] = 127,
- [3][1][2][0][RTW89_IC][45] = 127,
+ [3][1][2][0][RTW89_IC][45] = 46,
[3][1][2][0][RTW89_KCC][45] = 127,
[3][1][2][0][RTW89_ACMA][45] = 127,
[3][1][2][0][RTW89_CN][45] = 127,
@@ -36939,7 +36939,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][1][RTW89_FCC][45] = 46,
[3][1][2][1][RTW89_ETSI][45] = 127,
[3][1][2][1][RTW89_MKK][45] = 127,
- [3][1][2][1][RTW89_IC][45] = 127,
+ [3][1][2][1][RTW89_IC][45] = 46,
[3][1][2][1][RTW89_KCC][45] = 127,
[3][1][2][1][RTW89_ACMA][45] = 127,
[3][1][2][1][RTW89_CN][45] = 127,
@@ -36958,1476 +36958,986 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[RTW89_6G_CH_NUM] = {
[0][0][1][0][RTW89_WW][0][0] = 24,
[0][0][1][0][RTW89_WW][1][0] = 24,
- [0][0][1][0][RTW89_WW][2][0] = 56,
[0][0][1][0][RTW89_WW][0][2] = 22,
[0][0][1][0][RTW89_WW][1][2] = 22,
- [0][0][1][0][RTW89_WW][2][2] = 56,
[0][0][1][0][RTW89_WW][0][4] = 22,
[0][0][1][0][RTW89_WW][1][4] = 22,
- [0][0][1][0][RTW89_WW][2][4] = 56,
[0][0][1][0][RTW89_WW][0][6] = 22,
[0][0][1][0][RTW89_WW][1][6] = 22,
- [0][0][1][0][RTW89_WW][2][6] = 56,
[0][0][1][0][RTW89_WW][0][8] = 22,
[0][0][1][0][RTW89_WW][1][8] = 22,
- [0][0][1][0][RTW89_WW][2][8] = 56,
[0][0][1][0][RTW89_WW][0][10] = 22,
[0][0][1][0][RTW89_WW][1][10] = 22,
- [0][0][1][0][RTW89_WW][2][10] = 56,
[0][0][1][0][RTW89_WW][0][12] = 22,
[0][0][1][0][RTW89_WW][1][12] = 22,
- [0][0][1][0][RTW89_WW][2][12] = 56,
[0][0][1][0][RTW89_WW][0][14] = 22,
[0][0][1][0][RTW89_WW][1][14] = 22,
- [0][0][1][0][RTW89_WW][2][14] = 56,
[0][0][1][0][RTW89_WW][0][15] = 22,
[0][0][1][0][RTW89_WW][1][15] = 22,
- [0][0][1][0][RTW89_WW][2][15] = 56,
[0][0][1][0][RTW89_WW][0][17] = 22,
[0][0][1][0][RTW89_WW][1][17] = 22,
- [0][0][1][0][RTW89_WW][2][17] = 56,
[0][0][1][0][RTW89_WW][0][19] = 22,
[0][0][1][0][RTW89_WW][1][19] = 22,
- [0][0][1][0][RTW89_WW][2][19] = 56,
[0][0][1][0][RTW89_WW][0][21] = 22,
[0][0][1][0][RTW89_WW][1][21] = 22,
- [0][0][1][0][RTW89_WW][2][21] = 56,
[0][0][1][0][RTW89_WW][0][23] = 22,
[0][0][1][0][RTW89_WW][1][23] = 22,
- [0][0][1][0][RTW89_WW][2][23] = 70,
[0][0][1][0][RTW89_WW][0][25] = 22,
[0][0][1][0][RTW89_WW][1][25] = 22,
- [0][0][1][0][RTW89_WW][2][25] = 70,
[0][0][1][0][RTW89_WW][0][27] = 22,
[0][0][1][0][RTW89_WW][1][27] = 22,
- [0][0][1][0][RTW89_WW][2][27] = 70,
[0][0][1][0][RTW89_WW][0][29] = 22,
[0][0][1][0][RTW89_WW][1][29] = 22,
- [0][0][1][0][RTW89_WW][2][29] = 70,
[0][0][1][0][RTW89_WW][0][30] = 22,
[0][0][1][0][RTW89_WW][1][30] = 22,
- [0][0][1][0][RTW89_WW][2][30] = 70,
[0][0][1][0][RTW89_WW][0][32] = 22,
[0][0][1][0][RTW89_WW][1][32] = 22,
- [0][0][1][0][RTW89_WW][2][32] = 70,
[0][0][1][0][RTW89_WW][0][34] = 22,
[0][0][1][0][RTW89_WW][1][34] = 22,
- [0][0][1][0][RTW89_WW][2][34] = 70,
[0][0][1][0][RTW89_WW][0][36] = 22,
[0][0][1][0][RTW89_WW][1][36] = 22,
- [0][0][1][0][RTW89_WW][2][36] = 70,
[0][0][1][0][RTW89_WW][0][38] = 22,
[0][0][1][0][RTW89_WW][1][38] = 22,
- [0][0][1][0][RTW89_WW][2][38] = 70,
[0][0][1][0][RTW89_WW][0][40] = 22,
[0][0][1][0][RTW89_WW][1][40] = 22,
- [0][0][1][0][RTW89_WW][2][40] = 70,
[0][0][1][0][RTW89_WW][0][42] = 22,
[0][0][1][0][RTW89_WW][1][42] = 22,
- [0][0][1][0][RTW89_WW][2][42] = 70,
[0][0][1][0][RTW89_WW][0][44] = 22,
[0][0][1][0][RTW89_WW][1][44] = 22,
- [0][0][1][0][RTW89_WW][2][44] = 70,
[0][0][1][0][RTW89_WW][0][45] = 22,
[0][0][1][0][RTW89_WW][1][45] = 22,
- [0][0][1][0][RTW89_WW][2][45] = 70,
[0][0][1][0][RTW89_WW][0][47] = 22,
[0][0][1][0][RTW89_WW][1][47] = 22,
- [0][0][1][0][RTW89_WW][2][47] = 70,
[0][0][1][0][RTW89_WW][0][49] = 24,
[0][0][1][0][RTW89_WW][1][49] = 24,
- [0][0][1][0][RTW89_WW][2][49] = 70,
[0][0][1][0][RTW89_WW][0][51] = 22,
[0][0][1][0][RTW89_WW][1][51] = 22,
- [0][0][1][0][RTW89_WW][2][51] = 70,
[0][0][1][0][RTW89_WW][0][53] = 22,
[0][0][1][0][RTW89_WW][1][53] = 22,
- [0][0][1][0][RTW89_WW][2][53] = 70,
[0][0][1][0][RTW89_WW][0][55] = 22,
[0][0][1][0][RTW89_WW][1][55] = 22,
- [0][0][1][0][RTW89_WW][2][55] = 68,
[0][0][1][0][RTW89_WW][0][57] = 22,
[0][0][1][0][RTW89_WW][1][57] = 22,
- [0][0][1][0][RTW89_WW][2][57] = 68,
[0][0][1][0][RTW89_WW][0][59] = 22,
[0][0][1][0][RTW89_WW][1][59] = 22,
- [0][0][1][0][RTW89_WW][2][59] = 68,
[0][0][1][0][RTW89_WW][0][60] = 22,
[0][0][1][0][RTW89_WW][1][60] = 22,
- [0][0][1][0][RTW89_WW][2][60] = 68,
[0][0][1][0][RTW89_WW][0][62] = 22,
[0][0][1][0][RTW89_WW][1][62] = 22,
- [0][0][1][0][RTW89_WW][2][62] = 68,
[0][0][1][0][RTW89_WW][0][64] = 22,
[0][0][1][0][RTW89_WW][1][64] = 22,
- [0][0][1][0][RTW89_WW][2][64] = 68,
[0][0][1][0][RTW89_WW][0][66] = 22,
[0][0][1][0][RTW89_WW][1][66] = 22,
- [0][0][1][0][RTW89_WW][2][66] = 68,
[0][0][1][0][RTW89_WW][0][68] = 22,
[0][0][1][0][RTW89_WW][1][68] = 22,
- [0][0][1][0][RTW89_WW][2][68] = 68,
[0][0][1][0][RTW89_WW][0][70] = 24,
[0][0][1][0][RTW89_WW][1][70] = 24,
- [0][0][1][0][RTW89_WW][2][70] = 68,
[0][0][1][0][RTW89_WW][0][72] = 22,
[0][0][1][0][RTW89_WW][1][72] = 22,
- [0][0][1][0][RTW89_WW][2][72] = 68,
[0][0][1][0][RTW89_WW][0][74] = 22,
[0][0][1][0][RTW89_WW][1][74] = 22,
- [0][0][1][0][RTW89_WW][2][74] = 68,
[0][0][1][0][RTW89_WW][0][75] = 22,
[0][0][1][0][RTW89_WW][1][75] = 22,
- [0][0][1][0][RTW89_WW][2][75] = 68,
[0][0][1][0][RTW89_WW][0][77] = 22,
[0][0][1][0][RTW89_WW][1][77] = 22,
- [0][0][1][0][RTW89_WW][2][77] = 68,
[0][0][1][0][RTW89_WW][0][79] = 22,
[0][0][1][0][RTW89_WW][1][79] = 22,
- [0][0][1][0][RTW89_WW][2][79] = 68,
[0][0][1][0][RTW89_WW][0][81] = 22,
[0][0][1][0][RTW89_WW][1][81] = 22,
- [0][0][1][0][RTW89_WW][2][81] = 68,
[0][0][1][0][RTW89_WW][0][83] = 22,
[0][0][1][0][RTW89_WW][1][83] = 22,
- [0][0][1][0][RTW89_WW][2][83] = 68,
[0][0][1][0][RTW89_WW][0][85] = 22,
[0][0][1][0][RTW89_WW][1][85] = 22,
- [0][0][1][0][RTW89_WW][2][85] = 68,
[0][0][1][0][RTW89_WW][0][87] = 22,
[0][0][1][0][RTW89_WW][1][87] = 22,
- [0][0][1][0][RTW89_WW][2][87] = 0,
[0][0][1][0][RTW89_WW][0][89] = 22,
[0][0][1][0][RTW89_WW][1][89] = 22,
- [0][0][1][0][RTW89_WW][2][89] = 0,
[0][0][1][0][RTW89_WW][0][90] = 22,
[0][0][1][0][RTW89_WW][1][90] = 22,
- [0][0][1][0][RTW89_WW][2][90] = 0,
[0][0][1][0][RTW89_WW][0][92] = 22,
[0][0][1][0][RTW89_WW][1][92] = 22,
- [0][0][1][0][RTW89_WW][2][92] = 0,
[0][0][1][0][RTW89_WW][0][94] = 22,
[0][0][1][0][RTW89_WW][1][94] = 22,
- [0][0][1][0][RTW89_WW][2][94] = 0,
[0][0][1][0][RTW89_WW][0][96] = 22,
[0][0][1][0][RTW89_WW][1][96] = 22,
- [0][0][1][0][RTW89_WW][2][96] = 0,
[0][0][1][0][RTW89_WW][0][98] = 22,
[0][0][1][0][RTW89_WW][1][98] = 22,
- [0][0][1][0][RTW89_WW][2][98] = 0,
[0][0][1][0][RTW89_WW][0][100] = 22,
[0][0][1][0][RTW89_WW][1][100] = 22,
- [0][0][1][0][RTW89_WW][2][100] = 0,
[0][0][1][0][RTW89_WW][0][102] = 22,
[0][0][1][0][RTW89_WW][1][102] = 22,
- [0][0][1][0][RTW89_WW][2][102] = 0,
[0][0][1][0][RTW89_WW][0][104] = 22,
[0][0][1][0][RTW89_WW][1][104] = 22,
- [0][0][1][0][RTW89_WW][2][104] = 0,
[0][0][1][0][RTW89_WW][0][105] = 22,
[0][0][1][0][RTW89_WW][1][105] = 22,
- [0][0][1][0][RTW89_WW][2][105] = 0,
[0][0][1][0][RTW89_WW][0][107] = 24,
[0][0][1][0][RTW89_WW][1][107] = 24,
- [0][0][1][0][RTW89_WW][2][107] = 0,
[0][0][1][0][RTW89_WW][0][109] = 24,
[0][0][1][0][RTW89_WW][1][109] = 24,
- [0][0][1][0][RTW89_WW][2][109] = 0,
[0][0][1][0][RTW89_WW][0][111] = 0,
[0][0][1][0][RTW89_WW][1][111] = 0,
- [0][0][1][0][RTW89_WW][2][111] = 0,
[0][0][1][0][RTW89_WW][0][113] = 0,
[0][0][1][0][RTW89_WW][1][113] = 0,
- [0][0][1][0][RTW89_WW][2][113] = 0,
[0][0][1][0][RTW89_WW][0][115] = 0,
[0][0][1][0][RTW89_WW][1][115] = 0,
- [0][0][1][0][RTW89_WW][2][115] = 0,
[0][0][1][0][RTW89_WW][0][117] = 0,
[0][0][1][0][RTW89_WW][1][117] = 0,
- [0][0][1][0][RTW89_WW][2][117] = 0,
[0][0][1][0][RTW89_WW][0][119] = 0,
[0][0][1][0][RTW89_WW][1][119] = 0,
- [0][0][1][0][RTW89_WW][2][119] = 0,
[0][1][1][0][RTW89_WW][0][0] = -2,
[0][1][1][0][RTW89_WW][1][0] = -2,
- [0][1][1][0][RTW89_WW][2][0] = 54,
[0][1][1][0][RTW89_WW][0][2] = -4,
[0][1][1][0][RTW89_WW][1][2] = -4,
- [0][1][1][0][RTW89_WW][2][2] = 54,
[0][1][1][0][RTW89_WW][0][4] = -4,
[0][1][1][0][RTW89_WW][1][4] = -4,
- [0][1][1][0][RTW89_WW][2][4] = 54,
[0][1][1][0][RTW89_WW][0][6] = -4,
[0][1][1][0][RTW89_WW][1][6] = -4,
- [0][1][1][0][RTW89_WW][2][6] = 54,
[0][1][1][0][RTW89_WW][0][8] = -4,
[0][1][1][0][RTW89_WW][1][8] = -4,
- [0][1][1][0][RTW89_WW][2][8] = 54,
[0][1][1][0][RTW89_WW][0][10] = -4,
[0][1][1][0][RTW89_WW][1][10] = -4,
- [0][1][1][0][RTW89_WW][2][10] = 54,
[0][1][1][0][RTW89_WW][0][12] = -4,
[0][1][1][0][RTW89_WW][1][12] = -4,
- [0][1][1][0][RTW89_WW][2][12] = 54,
[0][1][1][0][RTW89_WW][0][14] = -4,
[0][1][1][0][RTW89_WW][1][14] = -4,
- [0][1][1][0][RTW89_WW][2][14] = 54,
[0][1][1][0][RTW89_WW][0][15] = -4,
[0][1][1][0][RTW89_WW][1][15] = -4,
- [0][1][1][0][RTW89_WW][2][15] = 54,
[0][1][1][0][RTW89_WW][0][17] = -4,
[0][1][1][0][RTW89_WW][1][17] = -4,
- [0][1][1][0][RTW89_WW][2][17] = 54,
[0][1][1][0][RTW89_WW][0][19] = -4,
[0][1][1][0][RTW89_WW][1][19] = -4,
- [0][1][1][0][RTW89_WW][2][19] = 54,
[0][1][1][0][RTW89_WW][0][21] = -4,
[0][1][1][0][RTW89_WW][1][21] = -4,
- [0][1][1][0][RTW89_WW][2][21] = 54,
[0][1][1][0][RTW89_WW][0][23] = -4,
[0][1][1][0][RTW89_WW][1][23] = -4,
- [0][1][1][0][RTW89_WW][2][23] = 68,
[0][1][1][0][RTW89_WW][0][25] = -4,
[0][1][1][0][RTW89_WW][1][25] = -4,
- [0][1][1][0][RTW89_WW][2][25] = 68,
[0][1][1][0][RTW89_WW][0][27] = -4,
[0][1][1][0][RTW89_WW][1][27] = -4,
- [0][1][1][0][RTW89_WW][2][27] = 68,
[0][1][1][0][RTW89_WW][0][29] = -4,
[0][1][1][0][RTW89_WW][1][29] = -4,
- [0][1][1][0][RTW89_WW][2][29] = 68,
[0][1][1][0][RTW89_WW][0][30] = -4,
[0][1][1][0][RTW89_WW][1][30] = -4,
- [0][1][1][0][RTW89_WW][2][30] = 68,
[0][1][1][0][RTW89_WW][0][32] = -4,
[0][1][1][0][RTW89_WW][1][32] = -4,
- [0][1][1][0][RTW89_WW][2][32] = 68,
[0][1][1][0][RTW89_WW][0][34] = -4,
[0][1][1][0][RTW89_WW][1][34] = -4,
- [0][1][1][0][RTW89_WW][2][34] = 68,
[0][1][1][0][RTW89_WW][0][36] = -4,
[0][1][1][0][RTW89_WW][1][36] = -4,
- [0][1][1][0][RTW89_WW][2][36] = 68,
[0][1][1][0][RTW89_WW][0][38] = -4,
[0][1][1][0][RTW89_WW][1][38] = -4,
- [0][1][1][0][RTW89_WW][2][38] = 68,
[0][1][1][0][RTW89_WW][0][40] = -4,
[0][1][1][0][RTW89_WW][1][40] = -4,
- [0][1][1][0][RTW89_WW][2][40] = 68,
[0][1][1][0][RTW89_WW][0][42] = -4,
[0][1][1][0][RTW89_WW][1][42] = -4,
- [0][1][1][0][RTW89_WW][2][42] = 68,
[0][1][1][0][RTW89_WW][0][44] = -2,
[0][1][1][0][RTW89_WW][1][44] = -2,
- [0][1][1][0][RTW89_WW][2][44] = 68,
[0][1][1][0][RTW89_WW][0][45] = -2,
[0][1][1][0][RTW89_WW][1][45] = -2,
- [0][1][1][0][RTW89_WW][2][45] = 70,
[0][1][1][0][RTW89_WW][0][47] = -2,
[0][1][1][0][RTW89_WW][1][47] = -2,
- [0][1][1][0][RTW89_WW][2][47] = 68,
[0][1][1][0][RTW89_WW][0][49] = -2,
[0][1][1][0][RTW89_WW][1][49] = -2,
- [0][1][1][0][RTW89_WW][2][49] = 68,
[0][1][1][0][RTW89_WW][0][51] = -2,
[0][1][1][0][RTW89_WW][1][51] = -2,
- [0][1][1][0][RTW89_WW][2][51] = 68,
[0][1][1][0][RTW89_WW][0][53] = -2,
[0][1][1][0][RTW89_WW][1][53] = -2,
- [0][1][1][0][RTW89_WW][2][53] = 68,
[0][1][1][0][RTW89_WW][0][55] = -2,
[0][1][1][0][RTW89_WW][1][55] = -2,
- [0][1][1][0][RTW89_WW][2][55] = 68,
[0][1][1][0][RTW89_WW][0][57] = -2,
[0][1][1][0][RTW89_WW][1][57] = -2,
- [0][1][1][0][RTW89_WW][2][57] = 68,
[0][1][1][0][RTW89_WW][0][59] = -2,
[0][1][1][0][RTW89_WW][1][59] = -2,
- [0][1][1][0][RTW89_WW][2][59] = 68,
[0][1][1][0][RTW89_WW][0][60] = -2,
[0][1][1][0][RTW89_WW][1][60] = -2,
- [0][1][1][0][RTW89_WW][2][60] = 68,
[0][1][1][0][RTW89_WW][0][62] = -2,
[0][1][1][0][RTW89_WW][1][62] = -2,
- [0][1][1][0][RTW89_WW][2][62] = 68,
[0][1][1][0][RTW89_WW][0][64] = -2,
[0][1][1][0][RTW89_WW][1][64] = -2,
- [0][1][1][0][RTW89_WW][2][64] = 68,
[0][1][1][0][RTW89_WW][0][66] = -2,
[0][1][1][0][RTW89_WW][1][66] = -2,
- [0][1][1][0][RTW89_WW][2][66] = 68,
[0][1][1][0][RTW89_WW][0][68] = -2,
[0][1][1][0][RTW89_WW][1][68] = -2,
- [0][1][1][0][RTW89_WW][2][68] = 68,
[0][1][1][0][RTW89_WW][0][70] = -2,
[0][1][1][0][RTW89_WW][1][70] = -2,
- [0][1][1][0][RTW89_WW][2][70] = 68,
[0][1][1][0][RTW89_WW][0][72] = -2,
[0][1][1][0][RTW89_WW][1][72] = -2,
- [0][1][1][0][RTW89_WW][2][72] = 68,
[0][1][1][0][RTW89_WW][0][74] = -2,
[0][1][1][0][RTW89_WW][1][74] = -2,
- [0][1][1][0][RTW89_WW][2][74] = 68,
[0][1][1][0][RTW89_WW][0][75] = -2,
[0][1][1][0][RTW89_WW][1][75] = -2,
- [0][1][1][0][RTW89_WW][2][75] = 68,
[0][1][1][0][RTW89_WW][0][77] = -2,
[0][1][1][0][RTW89_WW][1][77] = -2,
- [0][1][1][0][RTW89_WW][2][77] = 68,
[0][1][1][0][RTW89_WW][0][79] = -2,
[0][1][1][0][RTW89_WW][1][79] = -2,
- [0][1][1][0][RTW89_WW][2][79] = 68,
[0][1][1][0][RTW89_WW][0][81] = -2,
[0][1][1][0][RTW89_WW][1][81] = -2,
- [0][1][1][0][RTW89_WW][2][81] = 68,
[0][1][1][0][RTW89_WW][0][83] = -2,
[0][1][1][0][RTW89_WW][1][83] = -2,
- [0][1][1][0][RTW89_WW][2][83] = 68,
[0][1][1][0][RTW89_WW][0][85] = -2,
[0][1][1][0][RTW89_WW][1][85] = -2,
- [0][1][1][0][RTW89_WW][2][85] = 68,
[0][1][1][0][RTW89_WW][0][87] = -2,
[0][1][1][0][RTW89_WW][1][87] = -2,
- [0][1][1][0][RTW89_WW][2][87] = 0,
[0][1][1][0][RTW89_WW][0][89] = -2,
[0][1][1][0][RTW89_WW][1][89] = -2,
- [0][1][1][0][RTW89_WW][2][89] = 0,
[0][1][1][0][RTW89_WW][0][90] = -2,
[0][1][1][0][RTW89_WW][1][90] = -2,
- [0][1][1][0][RTW89_WW][2][90] = 0,
[0][1][1][0][RTW89_WW][0][92] = -2,
[0][1][1][0][RTW89_WW][1][92] = -2,
- [0][1][1][0][RTW89_WW][2][92] = 0,
[0][1][1][0][RTW89_WW][0][94] = -2,
[0][1][1][0][RTW89_WW][1][94] = -2,
- [0][1][1][0][RTW89_WW][2][94] = 0,
[0][1][1][0][RTW89_WW][0][96] = -2,
[0][1][1][0][RTW89_WW][1][96] = -2,
- [0][1][1][0][RTW89_WW][2][96] = 0,
[0][1][1][0][RTW89_WW][0][98] = -2,
[0][1][1][0][RTW89_WW][1][98] = -2,
- [0][1][1][0][RTW89_WW][2][98] = 0,
[0][1][1][0][RTW89_WW][0][100] = -2,
[0][1][1][0][RTW89_WW][1][100] = -2,
- [0][1][1][0][RTW89_WW][2][100] = 0,
[0][1][1][0][RTW89_WW][0][102] = -2,
[0][1][1][0][RTW89_WW][1][102] = -2,
- [0][1][1][0][RTW89_WW][2][102] = 0,
[0][1][1][0][RTW89_WW][0][104] = -2,
[0][1][1][0][RTW89_WW][1][104] = -2,
- [0][1][1][0][RTW89_WW][2][104] = 0,
[0][1][1][0][RTW89_WW][0][105] = -2,
[0][1][1][0][RTW89_WW][1][105] = -2,
- [0][1][1][0][RTW89_WW][2][105] = 0,
[0][1][1][0][RTW89_WW][0][107] = 1,
[0][1][1][0][RTW89_WW][1][107] = 1,
- [0][1][1][0][RTW89_WW][2][107] = 0,
[0][1][1][0][RTW89_WW][0][109] = 1,
[0][1][1][0][RTW89_WW][1][109] = 1,
- [0][1][1][0][RTW89_WW][2][109] = 0,
[0][1][1][0][RTW89_WW][0][111] = 0,
[0][1][1][0][RTW89_WW][1][111] = 0,
- [0][1][1][0][RTW89_WW][2][111] = 0,
[0][1][1][0][RTW89_WW][0][113] = 0,
[0][1][1][0][RTW89_WW][1][113] = 0,
- [0][1][1][0][RTW89_WW][2][113] = 0,
[0][1][1][0][RTW89_WW][0][115] = 0,
[0][1][1][0][RTW89_WW][1][115] = 0,
- [0][1][1][0][RTW89_WW][2][115] = 0,
[0][1][1][0][RTW89_WW][0][117] = 0,
[0][1][1][0][RTW89_WW][1][117] = 0,
- [0][1][1][0][RTW89_WW][2][117] = 0,
[0][1][1][0][RTW89_WW][0][119] = 0,
[0][1][1][0][RTW89_WW][1][119] = 0,
- [0][1][1][0][RTW89_WW][2][119] = 0,
[0][0][2][0][RTW89_WW][0][0] = 24,
[0][0][2][0][RTW89_WW][1][0] = 24,
- [0][0][2][0][RTW89_WW][2][0] = 56,
[0][0][2][0][RTW89_WW][0][2] = 22,
[0][0][2][0][RTW89_WW][1][2] = 22,
- [0][0][2][0][RTW89_WW][2][2] = 56,
[0][0][2][0][RTW89_WW][0][4] = 22,
[0][0][2][0][RTW89_WW][1][4] = 22,
- [0][0][2][0][RTW89_WW][2][4] = 56,
[0][0][2][0][RTW89_WW][0][6] = 22,
[0][0][2][0][RTW89_WW][1][6] = 22,
- [0][0][2][0][RTW89_WW][2][6] = 56,
[0][0][2][0][RTW89_WW][0][8] = 22,
[0][0][2][0][RTW89_WW][1][8] = 22,
- [0][0][2][0][RTW89_WW][2][8] = 56,
[0][0][2][0][RTW89_WW][0][10] = 22,
[0][0][2][0][RTW89_WW][1][10] = 22,
- [0][0][2][0][RTW89_WW][2][10] = 56,
[0][0][2][0][RTW89_WW][0][12] = 22,
[0][0][2][0][RTW89_WW][1][12] = 22,
- [0][0][2][0][RTW89_WW][2][12] = 56,
[0][0][2][0][RTW89_WW][0][14] = 22,
[0][0][2][0][RTW89_WW][1][14] = 22,
- [0][0][2][0][RTW89_WW][2][14] = 56,
[0][0][2][0][RTW89_WW][0][15] = 22,
[0][0][2][0][RTW89_WW][1][15] = 22,
- [0][0][2][0][RTW89_WW][2][15] = 56,
[0][0][2][0][RTW89_WW][0][17] = 22,
[0][0][2][0][RTW89_WW][1][17] = 22,
- [0][0][2][0][RTW89_WW][2][17] = 56,
[0][0][2][0][RTW89_WW][0][19] = 22,
[0][0][2][0][RTW89_WW][1][19] = 22,
- [0][0][2][0][RTW89_WW][2][19] = 56,
[0][0][2][0][RTW89_WW][0][21] = 22,
[0][0][2][0][RTW89_WW][1][21] = 22,
- [0][0][2][0][RTW89_WW][2][21] = 56,
[0][0][2][0][RTW89_WW][0][23] = 22,
[0][0][2][0][RTW89_WW][1][23] = 22,
- [0][0][2][0][RTW89_WW][2][23] = 70,
[0][0][2][0][RTW89_WW][0][25] = 22,
[0][0][2][0][RTW89_WW][1][25] = 22,
- [0][0][2][0][RTW89_WW][2][25] = 70,
[0][0][2][0][RTW89_WW][0][27] = 22,
[0][0][2][0][RTW89_WW][1][27] = 22,
- [0][0][2][0][RTW89_WW][2][27] = 70,
[0][0][2][0][RTW89_WW][0][29] = 22,
[0][0][2][0][RTW89_WW][1][29] = 22,
- [0][0][2][0][RTW89_WW][2][29] = 70,
[0][0][2][0][RTW89_WW][0][30] = 22,
[0][0][2][0][RTW89_WW][1][30] = 22,
- [0][0][2][0][RTW89_WW][2][30] = 70,
[0][0][2][0][RTW89_WW][0][32] = 22,
[0][0][2][0][RTW89_WW][1][32] = 22,
- [0][0][2][0][RTW89_WW][2][32] = 70,
[0][0][2][0][RTW89_WW][0][34] = 22,
[0][0][2][0][RTW89_WW][1][34] = 22,
- [0][0][2][0][RTW89_WW][2][34] = 70,
[0][0][2][0][RTW89_WW][0][36] = 22,
[0][0][2][0][RTW89_WW][1][36] = 22,
- [0][0][2][0][RTW89_WW][2][36] = 70,
[0][0][2][0][RTW89_WW][0][38] = 22,
[0][0][2][0][RTW89_WW][1][38] = 22,
- [0][0][2][0][RTW89_WW][2][38] = 70,
[0][0][2][0][RTW89_WW][0][40] = 22,
[0][0][2][0][RTW89_WW][1][40] = 22,
- [0][0][2][0][RTW89_WW][2][40] = 70,
[0][0][2][0][RTW89_WW][0][42] = 22,
[0][0][2][0][RTW89_WW][1][42] = 22,
- [0][0][2][0][RTW89_WW][2][42] = 70,
[0][0][2][0][RTW89_WW][0][44] = 22,
[0][0][2][0][RTW89_WW][1][44] = 22,
- [0][0][2][0][RTW89_WW][2][44] = 70,
[0][0][2][0][RTW89_WW][0][45] = 22,
[0][0][2][0][RTW89_WW][1][45] = 22,
- [0][0][2][0][RTW89_WW][2][45] = 70,
[0][0][2][0][RTW89_WW][0][47] = 22,
[0][0][2][0][RTW89_WW][1][47] = 22,
- [0][0][2][0][RTW89_WW][2][47] = 70,
[0][0][2][0][RTW89_WW][0][49] = 24,
[0][0][2][0][RTW89_WW][1][49] = 24,
- [0][0][2][0][RTW89_WW][2][49] = 70,
[0][0][2][0][RTW89_WW][0][51] = 22,
[0][0][2][0][RTW89_WW][1][51] = 22,
- [0][0][2][0][RTW89_WW][2][51] = 70,
[0][0][2][0][RTW89_WW][0][53] = 22,
[0][0][2][0][RTW89_WW][1][53] = 22,
- [0][0][2][0][RTW89_WW][2][53] = 70,
[0][0][2][0][RTW89_WW][0][55] = 22,
[0][0][2][0][RTW89_WW][1][55] = 22,
- [0][0][2][0][RTW89_WW][2][55] = 68,
[0][0][2][0][RTW89_WW][0][57] = 22,
[0][0][2][0][RTW89_WW][1][57] = 22,
- [0][0][2][0][RTW89_WW][2][57] = 68,
[0][0][2][0][RTW89_WW][0][59] = 22,
[0][0][2][0][RTW89_WW][1][59] = 22,
- [0][0][2][0][RTW89_WW][2][59] = 68,
[0][0][2][0][RTW89_WW][0][60] = 22,
[0][0][2][0][RTW89_WW][1][60] = 22,
- [0][0][2][0][RTW89_WW][2][60] = 68,
[0][0][2][0][RTW89_WW][0][62] = 22,
[0][0][2][0][RTW89_WW][1][62] = 22,
- [0][0][2][0][RTW89_WW][2][62] = 68,
[0][0][2][0][RTW89_WW][0][64] = 22,
[0][0][2][0][RTW89_WW][1][64] = 22,
- [0][0][2][0][RTW89_WW][2][64] = 68,
[0][0][2][0][RTW89_WW][0][66] = 22,
[0][0][2][0][RTW89_WW][1][66] = 22,
- [0][0][2][0][RTW89_WW][2][66] = 68,
[0][0][2][0][RTW89_WW][0][68] = 22,
[0][0][2][0][RTW89_WW][1][68] = 22,
- [0][0][2][0][RTW89_WW][2][68] = 68,
[0][0][2][0][RTW89_WW][0][70] = 24,
[0][0][2][0][RTW89_WW][1][70] = 24,
- [0][0][2][0][RTW89_WW][2][70] = 68,
[0][0][2][0][RTW89_WW][0][72] = 22,
[0][0][2][0][RTW89_WW][1][72] = 22,
- [0][0][2][0][RTW89_WW][2][72] = 68,
[0][0][2][0][RTW89_WW][0][74] = 22,
[0][0][2][0][RTW89_WW][1][74] = 22,
- [0][0][2][0][RTW89_WW][2][74] = 68,
[0][0][2][0][RTW89_WW][0][75] = 22,
[0][0][2][0][RTW89_WW][1][75] = 22,
- [0][0][2][0][RTW89_WW][2][75] = 68,
[0][0][2][0][RTW89_WW][0][77] = 22,
[0][0][2][0][RTW89_WW][1][77] = 22,
- [0][0][2][0][RTW89_WW][2][77] = 68,
[0][0][2][0][RTW89_WW][0][79] = 22,
[0][0][2][0][RTW89_WW][1][79] = 22,
- [0][0][2][0][RTW89_WW][2][79] = 68,
[0][0][2][0][RTW89_WW][0][81] = 22,
[0][0][2][0][RTW89_WW][1][81] = 22,
- [0][0][2][0][RTW89_WW][2][81] = 68,
[0][0][2][0][RTW89_WW][0][83] = 22,
[0][0][2][0][RTW89_WW][1][83] = 22,
- [0][0][2][0][RTW89_WW][2][83] = 68,
[0][0][2][0][RTW89_WW][0][85] = 22,
[0][0][2][0][RTW89_WW][1][85] = 22,
- [0][0][2][0][RTW89_WW][2][85] = 68,
[0][0][2][0][RTW89_WW][0][87] = 22,
[0][0][2][0][RTW89_WW][1][87] = 22,
- [0][0][2][0][RTW89_WW][2][87] = 0,
[0][0][2][0][RTW89_WW][0][89] = 22,
[0][0][2][0][RTW89_WW][1][89] = 22,
- [0][0][2][0][RTW89_WW][2][89] = 0,
[0][0][2][0][RTW89_WW][0][90] = 22,
[0][0][2][0][RTW89_WW][1][90] = 22,
- [0][0][2][0][RTW89_WW][2][90] = 0,
[0][0][2][0][RTW89_WW][0][92] = 22,
[0][0][2][0][RTW89_WW][1][92] = 22,
- [0][0][2][0][RTW89_WW][2][92] = 0,
[0][0][2][0][RTW89_WW][0][94] = 22,
[0][0][2][0][RTW89_WW][1][94] = 22,
- [0][0][2][0][RTW89_WW][2][94] = 0,
[0][0][2][0][RTW89_WW][0][96] = 22,
[0][0][2][0][RTW89_WW][1][96] = 22,
- [0][0][2][0][RTW89_WW][2][96] = 0,
[0][0][2][0][RTW89_WW][0][98] = 22,
[0][0][2][0][RTW89_WW][1][98] = 22,
- [0][0][2][0][RTW89_WW][2][98] = 0,
[0][0][2][0][RTW89_WW][0][100] = 22,
[0][0][2][0][RTW89_WW][1][100] = 22,
- [0][0][2][0][RTW89_WW][2][100] = 0,
[0][0][2][0][RTW89_WW][0][102] = 22,
[0][0][2][0][RTW89_WW][1][102] = 22,
- [0][0][2][0][RTW89_WW][2][102] = 0,
[0][0][2][0][RTW89_WW][0][104] = 22,
[0][0][2][0][RTW89_WW][1][104] = 22,
- [0][0][2][0][RTW89_WW][2][104] = 0,
[0][0][2][0][RTW89_WW][0][105] = 22,
[0][0][2][0][RTW89_WW][1][105] = 22,
- [0][0][2][0][RTW89_WW][2][105] = 0,
[0][0][2][0][RTW89_WW][0][107] = 24,
[0][0][2][0][RTW89_WW][1][107] = 24,
- [0][0][2][0][RTW89_WW][2][107] = 0,
[0][0][2][0][RTW89_WW][0][109] = 24,
[0][0][2][0][RTW89_WW][1][109] = 24,
- [0][0][2][0][RTW89_WW][2][109] = 0,
[0][0][2][0][RTW89_WW][0][111] = 0,
[0][0][2][0][RTW89_WW][1][111] = 0,
- [0][0][2][0][RTW89_WW][2][111] = 0,
[0][0][2][0][RTW89_WW][0][113] = 0,
[0][0][2][0][RTW89_WW][1][113] = 0,
- [0][0][2][0][RTW89_WW][2][113] = 0,
[0][0][2][0][RTW89_WW][0][115] = 0,
[0][0][2][0][RTW89_WW][1][115] = 0,
- [0][0][2][0][RTW89_WW][2][115] = 0,
[0][0][2][0][RTW89_WW][0][117] = 0,
[0][0][2][0][RTW89_WW][1][117] = 0,
- [0][0][2][0][RTW89_WW][2][117] = 0,
[0][0][2][0][RTW89_WW][0][119] = 0,
[0][0][2][0][RTW89_WW][1][119] = 0,
- [0][0][2][0][RTW89_WW][2][119] = 0,
[0][1][2][0][RTW89_WW][0][0] = -2,
[0][1][2][0][RTW89_WW][1][0] = -2,
- [0][1][2][0][RTW89_WW][2][0] = 54,
[0][1][2][0][RTW89_WW][0][2] = -4,
[0][1][2][0][RTW89_WW][1][2] = -4,
- [0][1][2][0][RTW89_WW][2][2] = 54,
[0][1][2][0][RTW89_WW][0][4] = -4,
[0][1][2][0][RTW89_WW][1][4] = -4,
- [0][1][2][0][RTW89_WW][2][4] = 54,
[0][1][2][0][RTW89_WW][0][6] = -4,
[0][1][2][0][RTW89_WW][1][6] = -4,
- [0][1][2][0][RTW89_WW][2][6] = 54,
[0][1][2][0][RTW89_WW][0][8] = -4,
[0][1][2][0][RTW89_WW][1][8] = -4,
- [0][1][2][0][RTW89_WW][2][8] = 54,
[0][1][2][0][RTW89_WW][0][10] = -4,
[0][1][2][0][RTW89_WW][1][10] = -4,
- [0][1][2][0][RTW89_WW][2][10] = 54,
[0][1][2][0][RTW89_WW][0][12] = -4,
[0][1][2][0][RTW89_WW][1][12] = -4,
- [0][1][2][0][RTW89_WW][2][12] = 54,
[0][1][2][0][RTW89_WW][0][14] = -4,
[0][1][2][0][RTW89_WW][1][14] = -4,
- [0][1][2][0][RTW89_WW][2][14] = 54,
[0][1][2][0][RTW89_WW][0][15] = -4,
[0][1][2][0][RTW89_WW][1][15] = -4,
- [0][1][2][0][RTW89_WW][2][15] = 54,
[0][1][2][0][RTW89_WW][0][17] = -4,
[0][1][2][0][RTW89_WW][1][17] = -4,
- [0][1][2][0][RTW89_WW][2][17] = 54,
[0][1][2][0][RTW89_WW][0][19] = -4,
[0][1][2][0][RTW89_WW][1][19] = -4,
- [0][1][2][0][RTW89_WW][2][19] = 54,
[0][1][2][0][RTW89_WW][0][21] = -4,
[0][1][2][0][RTW89_WW][1][21] = -4,
- [0][1][2][0][RTW89_WW][2][21] = 54,
[0][1][2][0][RTW89_WW][0][23] = -4,
[0][1][2][0][RTW89_WW][1][23] = -4,
- [0][1][2][0][RTW89_WW][2][23] = 68,
[0][1][2][0][RTW89_WW][0][25] = -4,
[0][1][2][0][RTW89_WW][1][25] = -4,
- [0][1][2][0][RTW89_WW][2][25] = 68,
[0][1][2][0][RTW89_WW][0][27] = -4,
[0][1][2][0][RTW89_WW][1][27] = -4,
- [0][1][2][0][RTW89_WW][2][27] = 68,
[0][1][2][0][RTW89_WW][0][29] = -4,
[0][1][2][0][RTW89_WW][1][29] = -4,
- [0][1][2][0][RTW89_WW][2][29] = 68,
[0][1][2][0][RTW89_WW][0][30] = -4,
[0][1][2][0][RTW89_WW][1][30] = -4,
- [0][1][2][0][RTW89_WW][2][30] = 68,
[0][1][2][0][RTW89_WW][0][32] = -4,
[0][1][2][0][RTW89_WW][1][32] = -4,
- [0][1][2][0][RTW89_WW][2][32] = 68,
[0][1][2][0][RTW89_WW][0][34] = -4,
[0][1][2][0][RTW89_WW][1][34] = -4,
- [0][1][2][0][RTW89_WW][2][34] = 68,
[0][1][2][0][RTW89_WW][0][36] = -4,
[0][1][2][0][RTW89_WW][1][36] = -4,
- [0][1][2][0][RTW89_WW][2][36] = 68,
[0][1][2][0][RTW89_WW][0][38] = -4,
[0][1][2][0][RTW89_WW][1][38] = -4,
- [0][1][2][0][RTW89_WW][2][38] = 68,
[0][1][2][0][RTW89_WW][0][40] = -4,
[0][1][2][0][RTW89_WW][1][40] = -4,
- [0][1][2][0][RTW89_WW][2][40] = 68,
[0][1][2][0][RTW89_WW][0][42] = -4,
[0][1][2][0][RTW89_WW][1][42] = -4,
- [0][1][2][0][RTW89_WW][2][42] = 68,
[0][1][2][0][RTW89_WW][0][44] = -2,
[0][1][2][0][RTW89_WW][1][44] = -2,
- [0][1][2][0][RTW89_WW][2][44] = 68,
[0][1][2][0][RTW89_WW][0][45] = -2,
[0][1][2][0][RTW89_WW][1][45] = -2,
- [0][1][2][0][RTW89_WW][2][45] = 70,
[0][1][2][0][RTW89_WW][0][47] = -2,
[0][1][2][0][RTW89_WW][1][47] = -2,
- [0][1][2][0][RTW89_WW][2][47] = 68,
[0][1][2][0][RTW89_WW][0][49] = -2,
[0][1][2][0][RTW89_WW][1][49] = -2,
- [0][1][2][0][RTW89_WW][2][49] = 68,
[0][1][2][0][RTW89_WW][0][51] = -2,
[0][1][2][0][RTW89_WW][1][51] = -2,
- [0][1][2][0][RTW89_WW][2][51] = 68,
[0][1][2][0][RTW89_WW][0][53] = -2,
[0][1][2][0][RTW89_WW][1][53] = -2,
- [0][1][2][0][RTW89_WW][2][53] = 68,
[0][1][2][0][RTW89_WW][0][55] = -2,
[0][1][2][0][RTW89_WW][1][55] = -2,
- [0][1][2][0][RTW89_WW][2][55] = 68,
[0][1][2][0][RTW89_WW][0][57] = -2,
[0][1][2][0][RTW89_WW][1][57] = -2,
- [0][1][2][0][RTW89_WW][2][57] = 68,
[0][1][2][0][RTW89_WW][0][59] = -2,
[0][1][2][0][RTW89_WW][1][59] = -2,
- [0][1][2][0][RTW89_WW][2][59] = 68,
[0][1][2][0][RTW89_WW][0][60] = -2,
[0][1][2][0][RTW89_WW][1][60] = -2,
- [0][1][2][0][RTW89_WW][2][60] = 68,
[0][1][2][0][RTW89_WW][0][62] = -2,
[0][1][2][0][RTW89_WW][1][62] = -2,
- [0][1][2][0][RTW89_WW][2][62] = 68,
[0][1][2][0][RTW89_WW][0][64] = -2,
[0][1][2][0][RTW89_WW][1][64] = -2,
- [0][1][2][0][RTW89_WW][2][64] = 68,
[0][1][2][0][RTW89_WW][0][66] = -2,
[0][1][2][0][RTW89_WW][1][66] = -2,
- [0][1][2][0][RTW89_WW][2][66] = 68,
[0][1][2][0][RTW89_WW][0][68] = -2,
[0][1][2][0][RTW89_WW][1][68] = -2,
- [0][1][2][0][RTW89_WW][2][68] = 68,
[0][1][2][0][RTW89_WW][0][70] = -2,
[0][1][2][0][RTW89_WW][1][70] = -2,
- [0][1][2][0][RTW89_WW][2][70] = 68,
[0][1][2][0][RTW89_WW][0][72] = -2,
[0][1][2][0][RTW89_WW][1][72] = -2,
- [0][1][2][0][RTW89_WW][2][72] = 68,
[0][1][2][0][RTW89_WW][0][74] = -2,
[0][1][2][0][RTW89_WW][1][74] = -2,
- [0][1][2][0][RTW89_WW][2][74] = 68,
[0][1][2][0][RTW89_WW][0][75] = -2,
[0][1][2][0][RTW89_WW][1][75] = -2,
- [0][1][2][0][RTW89_WW][2][75] = 68,
[0][1][2][0][RTW89_WW][0][77] = -2,
[0][1][2][0][RTW89_WW][1][77] = -2,
- [0][1][2][0][RTW89_WW][2][77] = 68,
[0][1][2][0][RTW89_WW][0][79] = -2,
[0][1][2][0][RTW89_WW][1][79] = -2,
- [0][1][2][0][RTW89_WW][2][79] = 68,
[0][1][2][0][RTW89_WW][0][81] = -2,
[0][1][2][0][RTW89_WW][1][81] = -2,
- [0][1][2][0][RTW89_WW][2][81] = 68,
[0][1][2][0][RTW89_WW][0][83] = -2,
[0][1][2][0][RTW89_WW][1][83] = -2,
- [0][1][2][0][RTW89_WW][2][83] = 68,
[0][1][2][0][RTW89_WW][0][85] = -2,
[0][1][2][0][RTW89_WW][1][85] = -2,
- [0][1][2][0][RTW89_WW][2][85] = 68,
[0][1][2][0][RTW89_WW][0][87] = -2,
[0][1][2][0][RTW89_WW][1][87] = -2,
- [0][1][2][0][RTW89_WW][2][87] = 0,
[0][1][2][0][RTW89_WW][0][89] = -2,
[0][1][2][0][RTW89_WW][1][89] = -2,
- [0][1][2][0][RTW89_WW][2][89] = 0,
[0][1][2][0][RTW89_WW][0][90] = -2,
[0][1][2][0][RTW89_WW][1][90] = -2,
- [0][1][2][0][RTW89_WW][2][90] = 0,
[0][1][2][0][RTW89_WW][0][92] = -2,
[0][1][2][0][RTW89_WW][1][92] = -2,
- [0][1][2][0][RTW89_WW][2][92] = 0,
[0][1][2][0][RTW89_WW][0][94] = -2,
[0][1][2][0][RTW89_WW][1][94] = -2,
- [0][1][2][0][RTW89_WW][2][94] = 0,
[0][1][2][0][RTW89_WW][0][96] = -2,
[0][1][2][0][RTW89_WW][1][96] = -2,
- [0][1][2][0][RTW89_WW][2][96] = 0,
[0][1][2][0][RTW89_WW][0][98] = -2,
[0][1][2][0][RTW89_WW][1][98] = -2,
- [0][1][2][0][RTW89_WW][2][98] = 0,
[0][1][2][0][RTW89_WW][0][100] = -2,
[0][1][2][0][RTW89_WW][1][100] = -2,
- [0][1][2][0][RTW89_WW][2][100] = 0,
[0][1][2][0][RTW89_WW][0][102] = -2,
[0][1][2][0][RTW89_WW][1][102] = -2,
- [0][1][2][0][RTW89_WW][2][102] = 0,
[0][1][2][0][RTW89_WW][0][104] = -2,
[0][1][2][0][RTW89_WW][1][104] = -2,
- [0][1][2][0][RTW89_WW][2][104] = 0,
[0][1][2][0][RTW89_WW][0][105] = -2,
[0][1][2][0][RTW89_WW][1][105] = -2,
- [0][1][2][0][RTW89_WW][2][105] = 0,
[0][1][2][0][RTW89_WW][0][107] = 1,
[0][1][2][0][RTW89_WW][1][107] = 1,
- [0][1][2][0][RTW89_WW][2][107] = 0,
[0][1][2][0][RTW89_WW][0][109] = 1,
[0][1][2][0][RTW89_WW][1][109] = 1,
- [0][1][2][0][RTW89_WW][2][109] = 0,
[0][1][2][0][RTW89_WW][0][111] = 0,
[0][1][2][0][RTW89_WW][1][111] = 0,
- [0][1][2][0][RTW89_WW][2][111] = 0,
[0][1][2][0][RTW89_WW][0][113] = 0,
[0][1][2][0][RTW89_WW][1][113] = 0,
- [0][1][2][0][RTW89_WW][2][113] = 0,
[0][1][2][0][RTW89_WW][0][115] = 0,
[0][1][2][0][RTW89_WW][1][115] = 0,
- [0][1][2][0][RTW89_WW][2][115] = 0,
[0][1][2][0][RTW89_WW][0][117] = 0,
[0][1][2][0][RTW89_WW][1][117] = 0,
- [0][1][2][0][RTW89_WW][2][117] = 0,
[0][1][2][0][RTW89_WW][0][119] = 0,
[0][1][2][0][RTW89_WW][1][119] = 0,
- [0][1][2][0][RTW89_WW][2][119] = 0,
[0][1][2][1][RTW89_WW][0][0] = -2,
[0][1][2][1][RTW89_WW][1][0] = -2,
- [0][1][2][1][RTW89_WW][2][0] = 54,
[0][1][2][1][RTW89_WW][0][2] = -4,
[0][1][2][1][RTW89_WW][1][2] = -4,
- [0][1][2][1][RTW89_WW][2][2] = 54,
[0][1][2][1][RTW89_WW][0][4] = -4,
[0][1][2][1][RTW89_WW][1][4] = -4,
- [0][1][2][1][RTW89_WW][2][4] = 54,
[0][1][2][1][RTW89_WW][0][6] = -4,
[0][1][2][1][RTW89_WW][1][6] = -4,
- [0][1][2][1][RTW89_WW][2][6] = 54,
[0][1][2][1][RTW89_WW][0][8] = -4,
[0][1][2][1][RTW89_WW][1][8] = -4,
- [0][1][2][1][RTW89_WW][2][8] = 54,
[0][1][2][1][RTW89_WW][0][10] = -4,
[0][1][2][1][RTW89_WW][1][10] = -4,
- [0][1][2][1][RTW89_WW][2][10] = 54,
[0][1][2][1][RTW89_WW][0][12] = -4,
[0][1][2][1][RTW89_WW][1][12] = -4,
- [0][1][2][1][RTW89_WW][2][12] = 54,
[0][1][2][1][RTW89_WW][0][14] = -4,
[0][1][2][1][RTW89_WW][1][14] = -4,
- [0][1][2][1][RTW89_WW][2][14] = 54,
[0][1][2][1][RTW89_WW][0][15] = -4,
[0][1][2][1][RTW89_WW][1][15] = -4,
- [0][1][2][1][RTW89_WW][2][15] = 54,
[0][1][2][1][RTW89_WW][0][17] = -4,
[0][1][2][1][RTW89_WW][1][17] = -4,
- [0][1][2][1][RTW89_WW][2][17] = 54,
[0][1][2][1][RTW89_WW][0][19] = -4,
[0][1][2][1][RTW89_WW][1][19] = -4,
- [0][1][2][1][RTW89_WW][2][19] = 54,
[0][1][2][1][RTW89_WW][0][21] = -4,
[0][1][2][1][RTW89_WW][1][21] = -4,
- [0][1][2][1][RTW89_WW][2][21] = 54,
[0][1][2][1][RTW89_WW][0][23] = -4,
[0][1][2][1][RTW89_WW][1][23] = -4,
- [0][1][2][1][RTW89_WW][2][23] = 68,
[0][1][2][1][RTW89_WW][0][25] = -4,
[0][1][2][1][RTW89_WW][1][25] = -4,
- [0][1][2][1][RTW89_WW][2][25] = 68,
[0][1][2][1][RTW89_WW][0][27] = -4,
[0][1][2][1][RTW89_WW][1][27] = -4,
- [0][1][2][1][RTW89_WW][2][27] = 68,
[0][1][2][1][RTW89_WW][0][29] = -4,
[0][1][2][1][RTW89_WW][1][29] = -4,
- [0][1][2][1][RTW89_WW][2][29] = 68,
[0][1][2][1][RTW89_WW][0][30] = -4,
[0][1][2][1][RTW89_WW][1][30] = -4,
- [0][1][2][1][RTW89_WW][2][30] = 68,
[0][1][2][1][RTW89_WW][0][32] = -4,
[0][1][2][1][RTW89_WW][1][32] = -4,
- [0][1][2][1][RTW89_WW][2][32] = 68,
[0][1][2][1][RTW89_WW][0][34] = -4,
[0][1][2][1][RTW89_WW][1][34] = -4,
- [0][1][2][1][RTW89_WW][2][34] = 68,
[0][1][2][1][RTW89_WW][0][36] = -4,
[0][1][2][1][RTW89_WW][1][36] = -4,
- [0][1][2][1][RTW89_WW][2][36] = 68,
[0][1][2][1][RTW89_WW][0][38] = -4,
[0][1][2][1][RTW89_WW][1][38] = -4,
- [0][1][2][1][RTW89_WW][2][38] = 68,
[0][1][2][1][RTW89_WW][0][40] = -4,
[0][1][2][1][RTW89_WW][1][40] = -4,
- [0][1][2][1][RTW89_WW][2][40] = 68,
[0][1][2][1][RTW89_WW][0][42] = -4,
[0][1][2][1][RTW89_WW][1][42] = -4,
- [0][1][2][1][RTW89_WW][2][42] = 68,
[0][1][2][1][RTW89_WW][0][44] = -2,
[0][1][2][1][RTW89_WW][1][44] = -2,
- [0][1][2][1][RTW89_WW][2][44] = 68,
[0][1][2][1][RTW89_WW][0][45] = -2,
[0][1][2][1][RTW89_WW][1][45] = -2,
- [0][1][2][1][RTW89_WW][2][45] = 70,
[0][1][2][1][RTW89_WW][0][47] = -2,
[0][1][2][1][RTW89_WW][1][47] = -2,
- [0][1][2][1][RTW89_WW][2][47] = 68,
[0][1][2][1][RTW89_WW][0][49] = -2,
[0][1][2][1][RTW89_WW][1][49] = -2,
- [0][1][2][1][RTW89_WW][2][49] = 68,
[0][1][2][1][RTW89_WW][0][51] = -2,
[0][1][2][1][RTW89_WW][1][51] = -2,
- [0][1][2][1][RTW89_WW][2][51] = 68,
[0][1][2][1][RTW89_WW][0][53] = -2,
[0][1][2][1][RTW89_WW][1][53] = -2,
- [0][1][2][1][RTW89_WW][2][53] = 68,
[0][1][2][1][RTW89_WW][0][55] = -2,
[0][1][2][1][RTW89_WW][1][55] = -2,
- [0][1][2][1][RTW89_WW][2][55] = 68,
[0][1][2][1][RTW89_WW][0][57] = -2,
[0][1][2][1][RTW89_WW][1][57] = -2,
- [0][1][2][1][RTW89_WW][2][57] = 68,
[0][1][2][1][RTW89_WW][0][59] = -2,
[0][1][2][1][RTW89_WW][1][59] = -2,
- [0][1][2][1][RTW89_WW][2][59] = 68,
[0][1][2][1][RTW89_WW][0][60] = -2,
[0][1][2][1][RTW89_WW][1][60] = -2,
- [0][1][2][1][RTW89_WW][2][60] = 68,
[0][1][2][1][RTW89_WW][0][62] = -2,
[0][1][2][1][RTW89_WW][1][62] = -2,
- [0][1][2][1][RTW89_WW][2][62] = 68,
[0][1][2][1][RTW89_WW][0][64] = -2,
[0][1][2][1][RTW89_WW][1][64] = -2,
- [0][1][2][1][RTW89_WW][2][64] = 68,
[0][1][2][1][RTW89_WW][0][66] = -2,
[0][1][2][1][RTW89_WW][1][66] = -2,
- [0][1][2][1][RTW89_WW][2][66] = 68,
[0][1][2][1][RTW89_WW][0][68] = -2,
[0][1][2][1][RTW89_WW][1][68] = -2,
- [0][1][2][1][RTW89_WW][2][68] = 68,
[0][1][2][1][RTW89_WW][0][70] = -2,
[0][1][2][1][RTW89_WW][1][70] = -2,
- [0][1][2][1][RTW89_WW][2][70] = 68,
[0][1][2][1][RTW89_WW][0][72] = -2,
[0][1][2][1][RTW89_WW][1][72] = -2,
- [0][1][2][1][RTW89_WW][2][72] = 68,
[0][1][2][1][RTW89_WW][0][74] = -2,
[0][1][2][1][RTW89_WW][1][74] = -2,
- [0][1][2][1][RTW89_WW][2][74] = 68,
[0][1][2][1][RTW89_WW][0][75] = -2,
[0][1][2][1][RTW89_WW][1][75] = -2,
- [0][1][2][1][RTW89_WW][2][75] = 68,
[0][1][2][1][RTW89_WW][0][77] = -2,
[0][1][2][1][RTW89_WW][1][77] = -2,
- [0][1][2][1][RTW89_WW][2][77] = 68,
[0][1][2][1][RTW89_WW][0][79] = -2,
[0][1][2][1][RTW89_WW][1][79] = -2,
- [0][1][2][1][RTW89_WW][2][79] = 68,
[0][1][2][1][RTW89_WW][0][81] = -2,
[0][1][2][1][RTW89_WW][1][81] = -2,
- [0][1][2][1][RTW89_WW][2][81] = 68,
[0][1][2][1][RTW89_WW][0][83] = -2,
[0][1][2][1][RTW89_WW][1][83] = -2,
- [0][1][2][1][RTW89_WW][2][83] = 68,
[0][1][2][1][RTW89_WW][0][85] = -2,
[0][1][2][1][RTW89_WW][1][85] = -2,
- [0][1][2][1][RTW89_WW][2][85] = 68,
[0][1][2][1][RTW89_WW][0][87] = -2,
[0][1][2][1][RTW89_WW][1][87] = -2,
- [0][1][2][1][RTW89_WW][2][87] = 0,
[0][1][2][1][RTW89_WW][0][89] = -2,
[0][1][2][1][RTW89_WW][1][89] = -2,
- [0][1][2][1][RTW89_WW][2][89] = 0,
[0][1][2][1][RTW89_WW][0][90] = -2,
[0][1][2][1][RTW89_WW][1][90] = -2,
- [0][1][2][1][RTW89_WW][2][90] = 0,
[0][1][2][1][RTW89_WW][0][92] = -2,
[0][1][2][1][RTW89_WW][1][92] = -2,
- [0][1][2][1][RTW89_WW][2][92] = 0,
[0][1][2][1][RTW89_WW][0][94] = -2,
[0][1][2][1][RTW89_WW][1][94] = -2,
- [0][1][2][1][RTW89_WW][2][94] = 0,
[0][1][2][1][RTW89_WW][0][96] = -2,
[0][1][2][1][RTW89_WW][1][96] = -2,
- [0][1][2][1][RTW89_WW][2][96] = 0,
[0][1][2][1][RTW89_WW][0][98] = -2,
[0][1][2][1][RTW89_WW][1][98] = -2,
- [0][1][2][1][RTW89_WW][2][98] = 0,
[0][1][2][1][RTW89_WW][0][100] = -2,
[0][1][2][1][RTW89_WW][1][100] = -2,
- [0][1][2][1][RTW89_WW][2][100] = 0,
[0][1][2][1][RTW89_WW][0][102] = -2,
[0][1][2][1][RTW89_WW][1][102] = -2,
- [0][1][2][1][RTW89_WW][2][102] = 0,
[0][1][2][1][RTW89_WW][0][104] = -2,
[0][1][2][1][RTW89_WW][1][104] = -2,
- [0][1][2][1][RTW89_WW][2][104] = 0,
[0][1][2][1][RTW89_WW][0][105] = -2,
[0][1][2][1][RTW89_WW][1][105] = -2,
- [0][1][2][1][RTW89_WW][2][105] = 0,
[0][1][2][1][RTW89_WW][0][107] = 1,
[0][1][2][1][RTW89_WW][1][107] = 1,
- [0][1][2][1][RTW89_WW][2][107] = 0,
[0][1][2][1][RTW89_WW][0][109] = 1,
[0][1][2][1][RTW89_WW][1][109] = 1,
- [0][1][2][1][RTW89_WW][2][109] = 0,
[0][1][2][1][RTW89_WW][0][111] = 0,
[0][1][2][1][RTW89_WW][1][111] = 0,
- [0][1][2][1][RTW89_WW][2][111] = 0,
[0][1][2][1][RTW89_WW][0][113] = 0,
[0][1][2][1][RTW89_WW][1][113] = 0,
- [0][1][2][1][RTW89_WW][2][113] = 0,
[0][1][2][1][RTW89_WW][0][115] = 0,
[0][1][2][1][RTW89_WW][1][115] = 0,
- [0][1][2][1][RTW89_WW][2][115] = 0,
[0][1][2][1][RTW89_WW][0][117] = 0,
[0][1][2][1][RTW89_WW][1][117] = 0,
- [0][1][2][1][RTW89_WW][2][117] = 0,
[0][1][2][1][RTW89_WW][0][119] = 0,
[0][1][2][1][RTW89_WW][1][119] = 0,
- [0][1][2][1][RTW89_WW][2][119] = 0,
[1][0][2][0][RTW89_WW][0][1] = 24,
[1][0][2][0][RTW89_WW][1][1] = 34,
- [1][0][2][0][RTW89_WW][2][1] = 70,
[1][0][2][0][RTW89_WW][0][5] = 24,
[1][0][2][0][RTW89_WW][1][5] = 34,
- [1][0][2][0][RTW89_WW][2][5] = 70,
[1][0][2][0][RTW89_WW][0][9] = 24,
[1][0][2][0][RTW89_WW][1][9] = 34,
- [1][0][2][0][RTW89_WW][2][9] = 70,
[1][0][2][0][RTW89_WW][0][13] = 24,
[1][0][2][0][RTW89_WW][1][13] = 34,
- [1][0][2][0][RTW89_WW][2][13] = 70,
[1][0][2][0][RTW89_WW][0][16] = 24,
[1][0][2][0][RTW89_WW][1][16] = 34,
- [1][0][2][0][RTW89_WW][2][16] = 70,
[1][0][2][0][RTW89_WW][0][20] = 24,
[1][0][2][0][RTW89_WW][1][20] = 34,
- [1][0][2][0][RTW89_WW][2][20] = 70,
[1][0][2][0][RTW89_WW][0][24] = 26,
[1][0][2][0][RTW89_WW][1][24] = 36,
- [1][0][2][0][RTW89_WW][2][24] = 70,
[1][0][2][0][RTW89_WW][0][28] = 26,
[1][0][2][0][RTW89_WW][1][28] = 34,
- [1][0][2][0][RTW89_WW][2][28] = 70,
[1][0][2][0][RTW89_WW][0][31] = 26,
[1][0][2][0][RTW89_WW][1][31] = 34,
- [1][0][2][0][RTW89_WW][2][31] = 70,
[1][0][2][0][RTW89_WW][0][35] = 26,
[1][0][2][0][RTW89_WW][1][35] = 34,
- [1][0][2][0][RTW89_WW][2][35] = 70,
[1][0][2][0][RTW89_WW][0][39] = 26,
[1][0][2][0][RTW89_WW][1][39] = 34,
- [1][0][2][0][RTW89_WW][2][39] = 70,
[1][0][2][0][RTW89_WW][0][43] = 26,
[1][0][2][0][RTW89_WW][1][43] = 34,
- [1][0][2][0][RTW89_WW][2][43] = 70,
[1][0][2][0][RTW89_WW][0][46] = 34,
[1][0][2][0][RTW89_WW][1][46] = 34,
- [1][0][2][0][RTW89_WW][2][46] = 68,
[1][0][2][0][RTW89_WW][0][50] = 34,
[1][0][2][0][RTW89_WW][1][50] = 34,
- [1][0][2][0][RTW89_WW][2][50] = 68,
[1][0][2][0][RTW89_WW][0][54] = 36,
[1][0][2][0][RTW89_WW][1][54] = 36,
- [1][0][2][0][RTW89_WW][2][54] = 0,
[1][0][2][0][RTW89_WW][0][58] = 36,
[1][0][2][0][RTW89_WW][1][58] = 36,
- [1][0][2][0][RTW89_WW][2][58] = 66,
[1][0][2][0][RTW89_WW][0][61] = 34,
[1][0][2][0][RTW89_WW][1][61] = 34,
- [1][0][2][0][RTW89_WW][2][61] = 66,
[1][0][2][0][RTW89_WW][0][65] = 34,
[1][0][2][0][RTW89_WW][1][65] = 34,
- [1][0][2][0][RTW89_WW][2][65] = 66,
[1][0][2][0][RTW89_WW][0][69] = 34,
[1][0][2][0][RTW89_WW][1][69] = 34,
- [1][0][2][0][RTW89_WW][2][69] = 66,
[1][0][2][0][RTW89_WW][0][73] = 34,
[1][0][2][0][RTW89_WW][1][73] = 34,
- [1][0][2][0][RTW89_WW][2][73] = 66,
[1][0][2][0][RTW89_WW][0][76] = 34,
[1][0][2][0][RTW89_WW][1][76] = 34,
- [1][0][2][0][RTW89_WW][2][76] = 66,
[1][0][2][0][RTW89_WW][0][80] = 34,
[1][0][2][0][RTW89_WW][1][80] = 34,
- [1][0][2][0][RTW89_WW][2][80] = 66,
[1][0][2][0][RTW89_WW][0][84] = 34,
[1][0][2][0][RTW89_WW][1][84] = 34,
- [1][0][2][0][RTW89_WW][2][84] = 66,
[1][0][2][0][RTW89_WW][0][88] = 34,
[1][0][2][0][RTW89_WW][1][88] = 34,
- [1][0][2][0][RTW89_WW][2][88] = 0,
[1][0][2][0][RTW89_WW][0][91] = 36,
[1][0][2][0][RTW89_WW][1][91] = 36,
- [1][0][2][0][RTW89_WW][2][91] = 0,
[1][0][2][0][RTW89_WW][0][95] = 34,
[1][0][2][0][RTW89_WW][1][95] = 34,
- [1][0][2][0][RTW89_WW][2][95] = 0,
[1][0][2][0][RTW89_WW][0][99] = 34,
[1][0][2][0][RTW89_WW][1][99] = 34,
- [1][0][2][0][RTW89_WW][2][99] = 0,
[1][0][2][0][RTW89_WW][0][103] = 34,
[1][0][2][0][RTW89_WW][1][103] = 34,
- [1][0][2][0][RTW89_WW][2][103] = 0,
[1][0][2][0][RTW89_WW][0][106] = 36,
[1][0][2][0][RTW89_WW][1][106] = 36,
- [1][0][2][0][RTW89_WW][2][106] = 0,
[1][0][2][0][RTW89_WW][0][110] = 0,
[1][0][2][0][RTW89_WW][1][110] = 0,
- [1][0][2][0][RTW89_WW][2][110] = 0,
[1][0][2][0][RTW89_WW][0][114] = 0,
[1][0][2][0][RTW89_WW][1][114] = 0,
- [1][0][2][0][RTW89_WW][2][114] = 0,
[1][0][2][0][RTW89_WW][0][118] = 0,
[1][0][2][0][RTW89_WW][1][118] = 0,
- [1][0][2][0][RTW89_WW][2][118] = 0,
[1][1][2][0][RTW89_WW][0][1] = 10,
[1][1][2][0][RTW89_WW][1][1] = 10,
- [1][1][2][0][RTW89_WW][2][1] = 58,
[1][1][2][0][RTW89_WW][0][5] = 10,
[1][1][2][0][RTW89_WW][1][5] = 10,
- [1][1][2][0][RTW89_WW][2][5] = 58,
[1][1][2][0][RTW89_WW][0][9] = 10,
[1][1][2][0][RTW89_WW][1][9] = 10,
- [1][1][2][0][RTW89_WW][2][9] = 58,
[1][1][2][0][RTW89_WW][0][13] = 10,
[1][1][2][0][RTW89_WW][1][13] = 10,
- [1][1][2][0][RTW89_WW][2][13] = 58,
[1][1][2][0][RTW89_WW][0][16] = 10,
[1][1][2][0][RTW89_WW][1][16] = 10,
- [1][1][2][0][RTW89_WW][2][16] = 58,
[1][1][2][0][RTW89_WW][0][20] = 10,
[1][1][2][0][RTW89_WW][1][20] = 10,
- [1][1][2][0][RTW89_WW][2][20] = 58,
[1][1][2][0][RTW89_WW][0][24] = 10,
[1][1][2][0][RTW89_WW][1][24] = 10,
- [1][1][2][0][RTW89_WW][2][24] = 70,
[1][1][2][0][RTW89_WW][0][28] = 10,
[1][1][2][0][RTW89_WW][1][28] = 10,
- [1][1][2][0][RTW89_WW][2][28] = 70,
[1][1][2][0][RTW89_WW][0][31] = 10,
[1][1][2][0][RTW89_WW][1][31] = 10,
- [1][1][2][0][RTW89_WW][2][31] = 70,
[1][1][2][0][RTW89_WW][0][35] = 10,
[1][1][2][0][RTW89_WW][1][35] = 10,
- [1][1][2][0][RTW89_WW][2][35] = 70,
[1][1][2][0][RTW89_WW][0][39] = 10,
[1][1][2][0][RTW89_WW][1][39] = 10,
- [1][1][2][0][RTW89_WW][2][39] = 70,
[1][1][2][0][RTW89_WW][0][43] = 10,
[1][1][2][0][RTW89_WW][1][43] = 10,
- [1][1][2][0][RTW89_WW][2][43] = 70,
[1][1][2][0][RTW89_WW][0][46] = 12,
[1][1][2][0][RTW89_WW][1][46] = 12,
- [1][1][2][0][RTW89_WW][2][46] = 68,
[1][1][2][0][RTW89_WW][0][50] = 12,
[1][1][2][0][RTW89_WW][1][50] = 12,
- [1][1][2][0][RTW89_WW][2][50] = 68,
[1][1][2][0][RTW89_WW][0][54] = 10,
[1][1][2][0][RTW89_WW][1][54] = 10,
- [1][1][2][0][RTW89_WW][2][54] = 0,
[1][1][2][0][RTW89_WW][0][58] = 10,
[1][1][2][0][RTW89_WW][1][58] = 10,
- [1][1][2][0][RTW89_WW][2][58] = 66,
[1][1][2][0][RTW89_WW][0][61] = 10,
[1][1][2][0][RTW89_WW][1][61] = 10,
- [1][1][2][0][RTW89_WW][2][61] = 66,
[1][1][2][0][RTW89_WW][0][65] = 10,
[1][1][2][0][RTW89_WW][1][65] = 10,
- [1][1][2][0][RTW89_WW][2][65] = 66,
[1][1][2][0][RTW89_WW][0][69] = 10,
[1][1][2][0][RTW89_WW][1][69] = 10,
- [1][1][2][0][RTW89_WW][2][69] = 66,
[1][1][2][0][RTW89_WW][0][73] = 10,
[1][1][2][0][RTW89_WW][1][73] = 10,
- [1][1][2][0][RTW89_WW][2][73] = 66,
[1][1][2][0][RTW89_WW][0][76] = 10,
[1][1][2][0][RTW89_WW][1][76] = 10,
- [1][1][2][0][RTW89_WW][2][76] = 66,
[1][1][2][0][RTW89_WW][0][80] = 10,
[1][1][2][0][RTW89_WW][1][80] = 10,
- [1][1][2][0][RTW89_WW][2][80] = 66,
[1][1][2][0][RTW89_WW][0][84] = 10,
[1][1][2][0][RTW89_WW][1][84] = 10,
- [1][1][2][0][RTW89_WW][2][84] = 66,
[1][1][2][0][RTW89_WW][0][88] = 10,
[1][1][2][0][RTW89_WW][1][88] = 10,
- [1][1][2][0][RTW89_WW][2][88] = 0,
[1][1][2][0][RTW89_WW][0][91] = 12,
[1][1][2][0][RTW89_WW][1][91] = 12,
- [1][1][2][0][RTW89_WW][2][91] = 0,
[1][1][2][0][RTW89_WW][0][95] = 10,
[1][1][2][0][RTW89_WW][1][95] = 10,
- [1][1][2][0][RTW89_WW][2][95] = 0,
[1][1][2][0][RTW89_WW][0][99] = 10,
[1][1][2][0][RTW89_WW][1][99] = 10,
- [1][1][2][0][RTW89_WW][2][99] = 0,
[1][1][2][0][RTW89_WW][0][103] = 10,
[1][1][2][0][RTW89_WW][1][103] = 10,
- [1][1][2][0][RTW89_WW][2][103] = 0,
[1][1][2][0][RTW89_WW][0][106] = 12,
[1][1][2][0][RTW89_WW][1][106] = 12,
- [1][1][2][0][RTW89_WW][2][106] = 0,
[1][1][2][0][RTW89_WW][0][110] = 0,
[1][1][2][0][RTW89_WW][1][110] = 0,
- [1][1][2][0][RTW89_WW][2][110] = 0,
[1][1][2][0][RTW89_WW][0][114] = 0,
[1][1][2][0][RTW89_WW][1][114] = 0,
- [1][1][2][0][RTW89_WW][2][114] = 0,
[1][1][2][0][RTW89_WW][0][118] = 0,
[1][1][2][0][RTW89_WW][1][118] = 0,
- [1][1][2][0][RTW89_WW][2][118] = 0,
[1][1][2][1][RTW89_WW][0][1] = 6,
[1][1][2][1][RTW89_WW][1][1] = 10,
- [1][1][2][1][RTW89_WW][2][1] = 58,
[1][1][2][1][RTW89_WW][0][5] = 6,
[1][1][2][1][RTW89_WW][1][5] = 10,
- [1][1][2][1][RTW89_WW][2][5] = 58,
[1][1][2][1][RTW89_WW][0][9] = 6,
[1][1][2][1][RTW89_WW][1][9] = 10,
- [1][1][2][1][RTW89_WW][2][9] = 58,
[1][1][2][1][RTW89_WW][0][13] = 6,
[1][1][2][1][RTW89_WW][1][13] = 10,
- [1][1][2][1][RTW89_WW][2][13] = 58,
[1][1][2][1][RTW89_WW][0][16] = 6,
[1][1][2][1][RTW89_WW][1][16] = 10,
- [1][1][2][1][RTW89_WW][2][16] = 58,
[1][1][2][1][RTW89_WW][0][20] = 6,
[1][1][2][1][RTW89_WW][1][20] = 10,
- [1][1][2][1][RTW89_WW][2][20] = 58,
[1][1][2][1][RTW89_WW][0][24] = 6,
[1][1][2][1][RTW89_WW][1][24] = 10,
- [1][1][2][1][RTW89_WW][2][24] = 70,
[1][1][2][1][RTW89_WW][0][28] = 6,
[1][1][2][1][RTW89_WW][1][28] = 10,
- [1][1][2][1][RTW89_WW][2][28] = 70,
[1][1][2][1][RTW89_WW][0][31] = 6,
[1][1][2][1][RTW89_WW][1][31] = 10,
- [1][1][2][1][RTW89_WW][2][31] = 70,
[1][1][2][1][RTW89_WW][0][35] = 6,
[1][1][2][1][RTW89_WW][1][35] = 10,
- [1][1][2][1][RTW89_WW][2][35] = 70,
[1][1][2][1][RTW89_WW][0][39] = 6,
[1][1][2][1][RTW89_WW][1][39] = 10,
- [1][1][2][1][RTW89_WW][2][39] = 70,
[1][1][2][1][RTW89_WW][0][43] = 6,
[1][1][2][1][RTW89_WW][1][43] = 10,
- [1][1][2][1][RTW89_WW][2][43] = 70,
[1][1][2][1][RTW89_WW][0][46] = 12,
[1][1][2][1][RTW89_WW][1][46] = 12,
- [1][1][2][1][RTW89_WW][2][46] = 68,
[1][1][2][1][RTW89_WW][0][50] = 12,
[1][1][2][1][RTW89_WW][1][50] = 12,
- [1][1][2][1][RTW89_WW][2][50] = 68,
[1][1][2][1][RTW89_WW][0][54] = 10,
[1][1][2][1][RTW89_WW][1][54] = 10,
- [1][1][2][1][RTW89_WW][2][54] = 0,
[1][1][2][1][RTW89_WW][0][58] = 10,
[1][1][2][1][RTW89_WW][1][58] = 10,
- [1][1][2][1][RTW89_WW][2][58] = 66,
[1][1][2][1][RTW89_WW][0][61] = 10,
[1][1][2][1][RTW89_WW][1][61] = 10,
- [1][1][2][1][RTW89_WW][2][61] = 66,
[1][1][2][1][RTW89_WW][0][65] = 10,
[1][1][2][1][RTW89_WW][1][65] = 10,
- [1][1][2][1][RTW89_WW][2][65] = 66,
[1][1][2][1][RTW89_WW][0][69] = 10,
[1][1][2][1][RTW89_WW][1][69] = 10,
- [1][1][2][1][RTW89_WW][2][69] = 66,
[1][1][2][1][RTW89_WW][0][73] = 10,
[1][1][2][1][RTW89_WW][1][73] = 10,
- [1][1][2][1][RTW89_WW][2][73] = 66,
[1][1][2][1][RTW89_WW][0][76] = 10,
[1][1][2][1][RTW89_WW][1][76] = 10,
- [1][1][2][1][RTW89_WW][2][76] = 66,
[1][1][2][1][RTW89_WW][0][80] = 10,
[1][1][2][1][RTW89_WW][1][80] = 10,
- [1][1][2][1][RTW89_WW][2][80] = 66,
[1][1][2][1][RTW89_WW][0][84] = 10,
[1][1][2][1][RTW89_WW][1][84] = 10,
- [1][1][2][1][RTW89_WW][2][84] = 66,
[1][1][2][1][RTW89_WW][0][88] = 10,
[1][1][2][1][RTW89_WW][1][88] = 10,
- [1][1][2][1][RTW89_WW][2][88] = 0,
[1][1][2][1][RTW89_WW][0][91] = 12,
[1][1][2][1][RTW89_WW][1][91] = 12,
- [1][1][2][1][RTW89_WW][2][91] = 0,
[1][1][2][1][RTW89_WW][0][95] = 10,
[1][1][2][1][RTW89_WW][1][95] = 10,
- [1][1][2][1][RTW89_WW][2][95] = 0,
[1][1][2][1][RTW89_WW][0][99] = 10,
[1][1][2][1][RTW89_WW][1][99] = 10,
- [1][1][2][1][RTW89_WW][2][99] = 0,
[1][1][2][1][RTW89_WW][0][103] = 10,
[1][1][2][1][RTW89_WW][1][103] = 10,
- [1][1][2][1][RTW89_WW][2][103] = 0,
[1][1][2][1][RTW89_WW][0][106] = 12,
[1][1][2][1][RTW89_WW][1][106] = 12,
- [1][1][2][1][RTW89_WW][2][106] = 0,
[1][1][2][1][RTW89_WW][0][110] = 0,
[1][1][2][1][RTW89_WW][1][110] = 0,
- [1][1][2][1][RTW89_WW][2][110] = 0,
[1][1][2][1][RTW89_WW][0][114] = 0,
[1][1][2][1][RTW89_WW][1][114] = 0,
- [1][1][2][1][RTW89_WW][2][114] = 0,
[1][1][2][1][RTW89_WW][0][118] = 0,
[1][1][2][1][RTW89_WW][1][118] = 0,
- [1][1][2][1][RTW89_WW][2][118] = 0,
[2][0][2][0][RTW89_WW][0][3] = 24,
[2][0][2][0][RTW89_WW][1][3] = 46,
- [2][0][2][0][RTW89_WW][2][3] = 60,
[2][0][2][0][RTW89_WW][0][11] = 24,
[2][0][2][0][RTW89_WW][1][11] = 46,
- [2][0][2][0][RTW89_WW][2][11] = 60,
[2][0][2][0][RTW89_WW][0][18] = 24,
[2][0][2][0][RTW89_WW][1][18] = 46,
- [2][0][2][0][RTW89_WW][2][18] = 60,
[2][0][2][0][RTW89_WW][0][26] = 24,
[2][0][2][0][RTW89_WW][1][26] = 46,
- [2][0][2][0][RTW89_WW][2][26] = 60,
[2][0][2][0][RTW89_WW][0][33] = 24,
[2][0][2][0][RTW89_WW][1][33] = 46,
- [2][0][2][0][RTW89_WW][2][33] = 60,
[2][0][2][0][RTW89_WW][0][41] = 24,
[2][0][2][0][RTW89_WW][1][41] = 46,
- [2][0][2][0][RTW89_WW][2][41] = 60,
[2][0][2][0][RTW89_WW][0][48] = 46,
[2][0][2][0][RTW89_WW][1][48] = 46,
- [2][0][2][0][RTW89_WW][2][48] = 60,
[2][0][2][0][RTW89_WW][0][56] = 46,
[2][0][2][0][RTW89_WW][1][56] = 46,
- [2][0][2][0][RTW89_WW][2][56] = 58,
[2][0][2][0][RTW89_WW][0][63] = 46,
[2][0][2][0][RTW89_WW][1][63] = 46,
- [2][0][2][0][RTW89_WW][2][63] = 58,
[2][0][2][0][RTW89_WW][0][71] = 46,
[2][0][2][0][RTW89_WW][1][71] = 46,
- [2][0][2][0][RTW89_WW][2][71] = 58,
[2][0][2][0][RTW89_WW][0][78] = 46,
[2][0][2][0][RTW89_WW][1][78] = 46,
- [2][0][2][0][RTW89_WW][2][78] = 58,
[2][0][2][0][RTW89_WW][0][86] = 46,
[2][0][2][0][RTW89_WW][1][86] = 46,
- [2][0][2][0][RTW89_WW][2][86] = 0,
[2][0][2][0][RTW89_WW][0][93] = 46,
[2][0][2][0][RTW89_WW][1][93] = 46,
- [2][0][2][0][RTW89_WW][2][93] = 0,
[2][0][2][0][RTW89_WW][0][101] = 44,
[2][0][2][0][RTW89_WW][1][101] = 44,
- [2][0][2][0][RTW89_WW][2][101] = 0,
[2][0][2][0][RTW89_WW][0][108] = 0,
[2][0][2][0][RTW89_WW][1][108] = 0,
- [2][0][2][0][RTW89_WW][2][108] = 0,
[2][0][2][0][RTW89_WW][0][116] = 0,
[2][0][2][0][RTW89_WW][1][116] = 0,
- [2][0][2][0][RTW89_WW][2][116] = 0,
[2][1][2][0][RTW89_WW][0][3] = 12,
[2][1][2][0][RTW89_WW][1][3] = 22,
- [2][1][2][0][RTW89_WW][2][3] = 50,
[2][1][2][0][RTW89_WW][0][11] = 12,
[2][1][2][0][RTW89_WW][1][11] = 20,
- [2][1][2][0][RTW89_WW][2][11] = 50,
[2][1][2][0][RTW89_WW][0][18] = 12,
[2][1][2][0][RTW89_WW][1][18] = 20,
- [2][1][2][0][RTW89_WW][2][18] = 50,
[2][1][2][0][RTW89_WW][0][26] = 12,
[2][1][2][0][RTW89_WW][1][26] = 20,
- [2][1][2][0][RTW89_WW][2][26] = 60,
[2][1][2][0][RTW89_WW][0][33] = 12,
[2][1][2][0][RTW89_WW][1][33] = 20,
- [2][1][2][0][RTW89_WW][2][33] = 60,
[2][1][2][0][RTW89_WW][0][41] = 12,
[2][1][2][0][RTW89_WW][1][41] = 22,
- [2][1][2][0][RTW89_WW][2][41] = 60,
[2][1][2][0][RTW89_WW][0][48] = 22,
[2][1][2][0][RTW89_WW][1][48] = 22,
- [2][1][2][0][RTW89_WW][2][48] = 60,
[2][1][2][0][RTW89_WW][0][56] = 20,
[2][1][2][0][RTW89_WW][1][56] = 20,
- [2][1][2][0][RTW89_WW][2][56] = 56,
[2][1][2][0][RTW89_WW][0][63] = 22,
[2][1][2][0][RTW89_WW][1][63] = 22,
- [2][1][2][0][RTW89_WW][2][63] = 58,
[2][1][2][0][RTW89_WW][0][71] = 20,
[2][1][2][0][RTW89_WW][1][71] = 20,
- [2][1][2][0][RTW89_WW][2][71] = 58,
[2][1][2][0][RTW89_WW][0][78] = 20,
[2][1][2][0][RTW89_WW][1][78] = 20,
- [2][1][2][0][RTW89_WW][2][78] = 58,
[2][1][2][0][RTW89_WW][0][86] = 20,
[2][1][2][0][RTW89_WW][1][86] = 20,
- [2][1][2][0][RTW89_WW][2][86] = 0,
[2][1][2][0][RTW89_WW][0][93] = 22,
[2][1][2][0][RTW89_WW][1][93] = 22,
- [2][1][2][0][RTW89_WW][2][93] = 0,
[2][1][2][0][RTW89_WW][0][101] = 22,
[2][1][2][0][RTW89_WW][1][101] = 22,
- [2][1][2][0][RTW89_WW][2][101] = 0,
[2][1][2][0][RTW89_WW][0][108] = 0,
[2][1][2][0][RTW89_WW][1][108] = 0,
- [2][1][2][0][RTW89_WW][2][108] = 0,
[2][1][2][0][RTW89_WW][0][116] = 0,
[2][1][2][0][RTW89_WW][1][116] = 0,
- [2][1][2][0][RTW89_WW][2][116] = 0,
[2][1][2][1][RTW89_WW][0][3] = 6,
[2][1][2][1][RTW89_WW][1][3] = 22,
- [2][1][2][1][RTW89_WW][2][3] = 50,
[2][1][2][1][RTW89_WW][0][11] = 6,
[2][1][2][1][RTW89_WW][1][11] = 20,
- [2][1][2][1][RTW89_WW][2][11] = 50,
[2][1][2][1][RTW89_WW][0][18] = 6,
[2][1][2][1][RTW89_WW][1][18] = 20,
- [2][1][2][1][RTW89_WW][2][18] = 50,
[2][1][2][1][RTW89_WW][0][26] = 6,
[2][1][2][1][RTW89_WW][1][26] = 20,
- [2][1][2][1][RTW89_WW][2][26] = 60,
[2][1][2][1][RTW89_WW][0][33] = 6,
[2][1][2][1][RTW89_WW][1][33] = 20,
- [2][1][2][1][RTW89_WW][2][33] = 60,
[2][1][2][1][RTW89_WW][0][41] = 6,
[2][1][2][1][RTW89_WW][1][41] = 22,
- [2][1][2][1][RTW89_WW][2][41] = 60,
[2][1][2][1][RTW89_WW][0][48] = 22,
[2][1][2][1][RTW89_WW][1][48] = 22,
- [2][1][2][1][RTW89_WW][2][48] = 60,
[2][1][2][1][RTW89_WW][0][56] = 20,
[2][1][2][1][RTW89_WW][1][56] = 20,
- [2][1][2][1][RTW89_WW][2][56] = 56,
[2][1][2][1][RTW89_WW][0][63] = 22,
[2][1][2][1][RTW89_WW][1][63] = 22,
- [2][1][2][1][RTW89_WW][2][63] = 58,
[2][1][2][1][RTW89_WW][0][71] = 20,
[2][1][2][1][RTW89_WW][1][71] = 20,
- [2][1][2][1][RTW89_WW][2][71] = 58,
[2][1][2][1][RTW89_WW][0][78] = 20,
[2][1][2][1][RTW89_WW][1][78] = 20,
- [2][1][2][1][RTW89_WW][2][78] = 58,
[2][1][2][1][RTW89_WW][0][86] = 20,
[2][1][2][1][RTW89_WW][1][86] = 20,
- [2][1][2][1][RTW89_WW][2][86] = 0,
[2][1][2][1][RTW89_WW][0][93] = 22,
[2][1][2][1][RTW89_WW][1][93] = 22,
- [2][1][2][1][RTW89_WW][2][93] = 0,
[2][1][2][1][RTW89_WW][0][101] = 22,
[2][1][2][1][RTW89_WW][1][101] = 22,
- [2][1][2][1][RTW89_WW][2][101] = 0,
[2][1][2][1][RTW89_WW][0][108] = 0,
[2][1][2][1][RTW89_WW][1][108] = 0,
- [2][1][2][1][RTW89_WW][2][108] = 0,
[2][1][2][1][RTW89_WW][0][116] = 0,
[2][1][2][1][RTW89_WW][1][116] = 0,
- [2][1][2][1][RTW89_WW][2][116] = 0,
[3][0][2][0][RTW89_WW][0][7] = 22,
[3][0][2][0][RTW89_WW][1][7] = 42,
- [3][0][2][0][RTW89_WW][2][7] = 52,
[3][0][2][0][RTW89_WW][0][22] = 20,
[3][0][2][0][RTW89_WW][1][22] = 42,
- [3][0][2][0][RTW89_WW][2][22] = 52,
[3][0][2][0][RTW89_WW][0][37] = 20,
[3][0][2][0][RTW89_WW][1][37] = 42,
- [3][0][2][0][RTW89_WW][2][37] = 52,
[3][0][2][0][RTW89_WW][0][52] = 54,
[3][0][2][0][RTW89_WW][1][52] = 54,
- [3][0][2][0][RTW89_WW][2][52] = 56,
[3][0][2][0][RTW89_WW][0][67] = 54,
[3][0][2][0][RTW89_WW][1][67] = 54,
- [3][0][2][0][RTW89_WW][2][67] = 54,
[3][0][2][0][RTW89_WW][0][82] = 26,
[3][0][2][0][RTW89_WW][1][82] = 26,
- [3][0][2][0][RTW89_WW][2][82] = 0,
[3][0][2][0][RTW89_WW][0][97] = 26,
[3][0][2][0][RTW89_WW][1][97] = 26,
- [3][0][2][0][RTW89_WW][2][97] = 0,
[3][0][2][0][RTW89_WW][0][112] = 0,
[3][0][2][0][RTW89_WW][1][112] = 0,
- [3][0][2][0][RTW89_WW][2][112] = 0,
[3][1][2][0][RTW89_WW][0][7] = 10,
[3][1][2][0][RTW89_WW][1][7] = 32,
- [3][1][2][0][RTW89_WW][2][7] = 46,
[3][1][2][0][RTW89_WW][0][22] = 8,
[3][1][2][0][RTW89_WW][1][22] = 30,
- [3][1][2][0][RTW89_WW][2][22] = 52,
[3][1][2][0][RTW89_WW][0][37] = 8,
[3][1][2][0][RTW89_WW][1][37] = 30,
- [3][1][2][0][RTW89_WW][2][37] = 52,
[3][1][2][0][RTW89_WW][0][52] = 30,
[3][1][2][0][RTW89_WW][1][52] = 30,
- [3][1][2][0][RTW89_WW][2][52] = 56,
[3][1][2][0][RTW89_WW][0][67] = 32,
[3][1][2][0][RTW89_WW][1][67] = 32,
- [3][1][2][0][RTW89_WW][2][67] = 54,
[3][1][2][0][RTW89_WW][0][82] = 24,
[3][1][2][0][RTW89_WW][1][82] = 24,
- [3][1][2][0][RTW89_WW][2][82] = 0,
[3][1][2][0][RTW89_WW][0][97] = 24,
[3][1][2][0][RTW89_WW][1][97] = 24,
- [3][1][2][0][RTW89_WW][2][97] = 0,
[3][1][2][0][RTW89_WW][0][112] = 0,
[3][1][2][0][RTW89_WW][1][112] = 0,
- [3][1][2][0][RTW89_WW][2][112] = 0,
[3][1][2][1][RTW89_WW][0][7] = 6,
[3][1][2][1][RTW89_WW][1][7] = 32,
- [3][1][2][1][RTW89_WW][2][7] = 46,
[3][1][2][1][RTW89_WW][0][22] = 6,
[3][1][2][1][RTW89_WW][1][22] = 30,
- [3][1][2][1][RTW89_WW][2][22] = 52,
[3][1][2][1][RTW89_WW][0][37] = 6,
[3][1][2][1][RTW89_WW][1][37] = 30,
- [3][1][2][1][RTW89_WW][2][37] = 52,
[3][1][2][1][RTW89_WW][0][52] = 30,
[3][1][2][1][RTW89_WW][1][52] = 30,
- [3][1][2][1][RTW89_WW][2][52] = 56,
[3][1][2][1][RTW89_WW][0][67] = 32,
[3][1][2][1][RTW89_WW][1][67] = 32,
- [3][1][2][1][RTW89_WW][2][67] = 54,
[3][1][2][1][RTW89_WW][0][82] = 24,
[3][1][2][1][RTW89_WW][1][82] = 24,
- [3][1][2][1][RTW89_WW][2][82] = 0,
[3][1][2][1][RTW89_WW][0][97] = 24,
[3][1][2][1][RTW89_WW][1][97] = 24,
- [3][1][2][1][RTW89_WW][2][97] = 0,
[3][1][2][1][RTW89_WW][0][112] = 0,
[3][1][2][1][RTW89_WW][1][112] = 0,
- [3][1][2][1][RTW89_WW][2][112] = 0,
[0][0][1][0][RTW89_FCC][1][0] = 24,
- [0][0][1][0][RTW89_FCC][2][0] = 56,
[0][0][1][0][RTW89_ETSI][1][0] = 66,
[0][0][1][0][RTW89_ETSI][0][0] = 28,
[0][0][1][0][RTW89_MKK][1][0] = 66,
[0][0][1][0][RTW89_MKK][0][0] = 26,
[0][0][1][0][RTW89_IC][1][0] = 24,
- [0][0][1][0][RTW89_IC][2][0] = 56,
[0][0][1][0][RTW89_KCC][1][0] = 24,
[0][0][1][0][RTW89_KCC][0][0] = 24,
[0][0][1][0][RTW89_ACMA][1][0] = 66,
@@ -38440,13 +37950,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][0] = 56,
[0][0][1][0][RTW89_THAILAND][0][0] = 24,
[0][0][1][0][RTW89_FCC][1][2] = 22,
- [0][0][1][0][RTW89_FCC][2][2] = 56,
[0][0][1][0][RTW89_ETSI][1][2] = 66,
[0][0][1][0][RTW89_ETSI][0][2] = 28,
[0][0][1][0][RTW89_MKK][1][2] = 66,
[0][0][1][0][RTW89_MKK][0][2] = 26,
[0][0][1][0][RTW89_IC][1][2] = 22,
- [0][0][1][0][RTW89_IC][2][2] = 56,
[0][0][1][0][RTW89_KCC][1][2] = 24,
[0][0][1][0][RTW89_KCC][0][2] = 24,
[0][0][1][0][RTW89_ACMA][1][2] = 66,
@@ -38459,13 +37967,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][2] = 56,
[0][0][1][0][RTW89_THAILAND][0][2] = 22,
[0][0][1][0][RTW89_FCC][1][4] = 22,
- [0][0][1][0][RTW89_FCC][2][4] = 56,
[0][0][1][0][RTW89_ETSI][1][4] = 66,
[0][0][1][0][RTW89_ETSI][0][4] = 28,
[0][0][1][0][RTW89_MKK][1][4] = 66,
[0][0][1][0][RTW89_MKK][0][4] = 26,
[0][0][1][0][RTW89_IC][1][4] = 22,
- [0][0][1][0][RTW89_IC][2][4] = 56,
[0][0][1][0][RTW89_KCC][1][4] = 24,
[0][0][1][0][RTW89_KCC][0][4] = 24,
[0][0][1][0][RTW89_ACMA][1][4] = 66,
@@ -38478,13 +37984,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][4] = 56,
[0][0][1][0][RTW89_THAILAND][0][4] = 22,
[0][0][1][0][RTW89_FCC][1][6] = 22,
- [0][0][1][0][RTW89_FCC][2][6] = 56,
[0][0][1][0][RTW89_ETSI][1][6] = 66,
[0][0][1][0][RTW89_ETSI][0][6] = 28,
[0][0][1][0][RTW89_MKK][1][6] = 66,
[0][0][1][0][RTW89_MKK][0][6] = 26,
[0][0][1][0][RTW89_IC][1][6] = 22,
- [0][0][1][0][RTW89_IC][2][6] = 56,
[0][0][1][0][RTW89_KCC][1][6] = 24,
[0][0][1][0][RTW89_KCC][0][6] = 24,
[0][0][1][0][RTW89_ACMA][1][6] = 66,
@@ -38497,13 +38001,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][6] = 56,
[0][0][1][0][RTW89_THAILAND][0][6] = 22,
[0][0][1][0][RTW89_FCC][1][8] = 22,
- [0][0][1][0][RTW89_FCC][2][8] = 56,
[0][0][1][0][RTW89_ETSI][1][8] = 66,
[0][0][1][0][RTW89_ETSI][0][8] = 28,
[0][0][1][0][RTW89_MKK][1][8] = 66,
[0][0][1][0][RTW89_MKK][0][8] = 26,
[0][0][1][0][RTW89_IC][1][8] = 22,
- [0][0][1][0][RTW89_IC][2][8] = 56,
[0][0][1][0][RTW89_KCC][1][8] = 24,
[0][0][1][0][RTW89_KCC][0][8] = 24,
[0][0][1][0][RTW89_ACMA][1][8] = 66,
@@ -38516,13 +38018,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][8] = 56,
[0][0][1][0][RTW89_THAILAND][0][8] = 22,
[0][0][1][0][RTW89_FCC][1][10] = 22,
- [0][0][1][0][RTW89_FCC][2][10] = 56,
[0][0][1][0][RTW89_ETSI][1][10] = 66,
[0][0][1][0][RTW89_ETSI][0][10] = 28,
[0][0][1][0][RTW89_MKK][1][10] = 66,
[0][0][1][0][RTW89_MKK][0][10] = 26,
[0][0][1][0][RTW89_IC][1][10] = 22,
- [0][0][1][0][RTW89_IC][2][10] = 56,
[0][0][1][0][RTW89_KCC][1][10] = 24,
[0][0][1][0][RTW89_KCC][0][10] = 24,
[0][0][1][0][RTW89_ACMA][1][10] = 66,
@@ -38535,13 +38035,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][10] = 56,
[0][0][1][0][RTW89_THAILAND][0][10] = 22,
[0][0][1][0][RTW89_FCC][1][12] = 22,
- [0][0][1][0][RTW89_FCC][2][12] = 56,
[0][0][1][0][RTW89_ETSI][1][12] = 66,
[0][0][1][0][RTW89_ETSI][0][12] = 28,
[0][0][1][0][RTW89_MKK][1][12] = 66,
[0][0][1][0][RTW89_MKK][0][12] = 26,
[0][0][1][0][RTW89_IC][1][12] = 22,
- [0][0][1][0][RTW89_IC][2][12] = 56,
[0][0][1][0][RTW89_KCC][1][12] = 24,
[0][0][1][0][RTW89_KCC][0][12] = 24,
[0][0][1][0][RTW89_ACMA][1][12] = 66,
@@ -38554,13 +38052,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][12] = 56,
[0][0][1][0][RTW89_THAILAND][0][12] = 22,
[0][0][1][0][RTW89_FCC][1][14] = 22,
- [0][0][1][0][RTW89_FCC][2][14] = 56,
[0][0][1][0][RTW89_ETSI][1][14] = 66,
[0][0][1][0][RTW89_ETSI][0][14] = 28,
[0][0][1][0][RTW89_MKK][1][14] = 66,
[0][0][1][0][RTW89_MKK][0][14] = 26,
[0][0][1][0][RTW89_IC][1][14] = 22,
- [0][0][1][0][RTW89_IC][2][14] = 56,
[0][0][1][0][RTW89_KCC][1][14] = 24,
[0][0][1][0][RTW89_KCC][0][14] = 24,
[0][0][1][0][RTW89_ACMA][1][14] = 66,
@@ -38573,13 +38069,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][14] = 56,
[0][0][1][0][RTW89_THAILAND][0][14] = 22,
[0][0][1][0][RTW89_FCC][1][15] = 22,
- [0][0][1][0][RTW89_FCC][2][15] = 56,
[0][0][1][0][RTW89_ETSI][1][15] = 66,
[0][0][1][0][RTW89_ETSI][0][15] = 28,
[0][0][1][0][RTW89_MKK][1][15] = 66,
[0][0][1][0][RTW89_MKK][0][15] = 26,
[0][0][1][0][RTW89_IC][1][15] = 22,
- [0][0][1][0][RTW89_IC][2][15] = 56,
[0][0][1][0][RTW89_KCC][1][15] = 24,
[0][0][1][0][RTW89_KCC][0][15] = 24,
[0][0][1][0][RTW89_ACMA][1][15] = 66,
@@ -38592,13 +38086,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][15] = 56,
[0][0][1][0][RTW89_THAILAND][0][15] = 22,
[0][0][1][0][RTW89_FCC][1][17] = 22,
- [0][0][1][0][RTW89_FCC][2][17] = 56,
[0][0][1][0][RTW89_ETSI][1][17] = 66,
[0][0][1][0][RTW89_ETSI][0][17] = 28,
[0][0][1][0][RTW89_MKK][1][17] = 66,
[0][0][1][0][RTW89_MKK][0][17] = 26,
[0][0][1][0][RTW89_IC][1][17] = 22,
- [0][0][1][0][RTW89_IC][2][17] = 56,
[0][0][1][0][RTW89_KCC][1][17] = 24,
[0][0][1][0][RTW89_KCC][0][17] = 24,
[0][0][1][0][RTW89_ACMA][1][17] = 66,
@@ -38611,13 +38103,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][17] = 56,
[0][0][1][0][RTW89_THAILAND][0][17] = 22,
[0][0][1][0][RTW89_FCC][1][19] = 22,
- [0][0][1][0][RTW89_FCC][2][19] = 56,
[0][0][1][0][RTW89_ETSI][1][19] = 66,
[0][0][1][0][RTW89_ETSI][0][19] = 28,
[0][0][1][0][RTW89_MKK][1][19] = 66,
[0][0][1][0][RTW89_MKK][0][19] = 26,
[0][0][1][0][RTW89_IC][1][19] = 22,
- [0][0][1][0][RTW89_IC][2][19] = 56,
[0][0][1][0][RTW89_KCC][1][19] = 24,
[0][0][1][0][RTW89_KCC][0][19] = 24,
[0][0][1][0][RTW89_ACMA][1][19] = 66,
@@ -38630,13 +38120,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][19] = 56,
[0][0][1][0][RTW89_THAILAND][0][19] = 22,
[0][0][1][0][RTW89_FCC][1][21] = 22,
- [0][0][1][0][RTW89_FCC][2][21] = 56,
[0][0][1][0][RTW89_ETSI][1][21] = 66,
[0][0][1][0][RTW89_ETSI][0][21] = 28,
[0][0][1][0][RTW89_MKK][1][21] = 66,
[0][0][1][0][RTW89_MKK][0][21] = 26,
[0][0][1][0][RTW89_IC][1][21] = 22,
- [0][0][1][0][RTW89_IC][2][21] = 56,
[0][0][1][0][RTW89_KCC][1][21] = 24,
[0][0][1][0][RTW89_KCC][0][21] = 24,
[0][0][1][0][RTW89_ACMA][1][21] = 66,
@@ -38649,13 +38137,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][21] = 56,
[0][0][1][0][RTW89_THAILAND][0][21] = 22,
[0][0][1][0][RTW89_FCC][1][23] = 22,
- [0][0][1][0][RTW89_FCC][2][23] = 70,
[0][0][1][0][RTW89_ETSI][1][23] = 66,
[0][0][1][0][RTW89_ETSI][0][23] = 28,
[0][0][1][0][RTW89_MKK][1][23] = 66,
[0][0][1][0][RTW89_MKK][0][23] = 26,
[0][0][1][0][RTW89_IC][1][23] = 22,
- [0][0][1][0][RTW89_IC][2][23] = 70,
[0][0][1][0][RTW89_KCC][1][23] = 24,
[0][0][1][0][RTW89_KCC][0][23] = 26,
[0][0][1][0][RTW89_ACMA][1][23] = 66,
@@ -38668,13 +38154,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][23] = 66,
[0][0][1][0][RTW89_THAILAND][0][23] = 22,
[0][0][1][0][RTW89_FCC][1][25] = 22,
- [0][0][1][0][RTW89_FCC][2][25] = 70,
[0][0][1][0][RTW89_ETSI][1][25] = 66,
[0][0][1][0][RTW89_ETSI][0][25] = 28,
[0][0][1][0][RTW89_MKK][1][25] = 66,
[0][0][1][0][RTW89_MKK][0][25] = 26,
[0][0][1][0][RTW89_IC][1][25] = 22,
- [0][0][1][0][RTW89_IC][2][25] = 70,
[0][0][1][0][RTW89_KCC][1][25] = 24,
[0][0][1][0][RTW89_KCC][0][25] = 26,
[0][0][1][0][RTW89_ACMA][1][25] = 66,
@@ -38687,13 +38171,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][25] = 66,
[0][0][1][0][RTW89_THAILAND][0][25] = 22,
[0][0][1][0][RTW89_FCC][1][27] = 22,
- [0][0][1][0][RTW89_FCC][2][27] = 70,
[0][0][1][0][RTW89_ETSI][1][27] = 66,
[0][0][1][0][RTW89_ETSI][0][27] = 28,
[0][0][1][0][RTW89_MKK][1][27] = 66,
[0][0][1][0][RTW89_MKK][0][27] = 26,
[0][0][1][0][RTW89_IC][1][27] = 22,
- [0][0][1][0][RTW89_IC][2][27] = 70,
[0][0][1][0][RTW89_KCC][1][27] = 24,
[0][0][1][0][RTW89_KCC][0][27] = 26,
[0][0][1][0][RTW89_ACMA][1][27] = 66,
@@ -38706,13 +38188,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][27] = 66,
[0][0][1][0][RTW89_THAILAND][0][27] = 22,
[0][0][1][0][RTW89_FCC][1][29] = 22,
- [0][0][1][0][RTW89_FCC][2][29] = 70,
[0][0][1][0][RTW89_ETSI][1][29] = 66,
[0][0][1][0][RTW89_ETSI][0][29] = 28,
[0][0][1][0][RTW89_MKK][1][29] = 66,
[0][0][1][0][RTW89_MKK][0][29] = 26,
[0][0][1][0][RTW89_IC][1][29] = 22,
- [0][0][1][0][RTW89_IC][2][29] = 70,
[0][0][1][0][RTW89_KCC][1][29] = 24,
[0][0][1][0][RTW89_KCC][0][29] = 26,
[0][0][1][0][RTW89_ACMA][1][29] = 66,
@@ -38725,13 +38205,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][29] = 66,
[0][0][1][0][RTW89_THAILAND][0][29] = 22,
[0][0][1][0][RTW89_FCC][1][30] = 22,
- [0][0][1][0][RTW89_FCC][2][30] = 70,
[0][0][1][0][RTW89_ETSI][1][30] = 66,
[0][0][1][0][RTW89_ETSI][0][30] = 28,
[0][0][1][0][RTW89_MKK][1][30] = 66,
[0][0][1][0][RTW89_MKK][0][30] = 26,
[0][0][1][0][RTW89_IC][1][30] = 22,
- [0][0][1][0][RTW89_IC][2][30] = 70,
[0][0][1][0][RTW89_KCC][1][30] = 24,
[0][0][1][0][RTW89_KCC][0][30] = 26,
[0][0][1][0][RTW89_ACMA][1][30] = 66,
@@ -38744,13 +38222,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][30] = 66,
[0][0][1][0][RTW89_THAILAND][0][30] = 22,
[0][0][1][0][RTW89_FCC][1][32] = 22,
- [0][0][1][0][RTW89_FCC][2][32] = 70,
[0][0][1][0][RTW89_ETSI][1][32] = 66,
[0][0][1][0][RTW89_ETSI][0][32] = 28,
[0][0][1][0][RTW89_MKK][1][32] = 66,
[0][0][1][0][RTW89_MKK][0][32] = 26,
[0][0][1][0][RTW89_IC][1][32] = 22,
- [0][0][1][0][RTW89_IC][2][32] = 70,
[0][0][1][0][RTW89_KCC][1][32] = 24,
[0][0][1][0][RTW89_KCC][0][32] = 26,
[0][0][1][0][RTW89_ACMA][1][32] = 66,
@@ -38763,13 +38239,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][32] = 66,
[0][0][1][0][RTW89_THAILAND][0][32] = 22,
[0][0][1][0][RTW89_FCC][1][34] = 22,
- [0][0][1][0][RTW89_FCC][2][34] = 70,
[0][0][1][0][RTW89_ETSI][1][34] = 66,
[0][0][1][0][RTW89_ETSI][0][34] = 28,
[0][0][1][0][RTW89_MKK][1][34] = 66,
[0][0][1][0][RTW89_MKK][0][34] = 26,
[0][0][1][0][RTW89_IC][1][34] = 22,
- [0][0][1][0][RTW89_IC][2][34] = 70,
[0][0][1][0][RTW89_KCC][1][34] = 24,
[0][0][1][0][RTW89_KCC][0][34] = 26,
[0][0][1][0][RTW89_ACMA][1][34] = 66,
@@ -38782,13 +38256,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][34] = 66,
[0][0][1][0][RTW89_THAILAND][0][34] = 22,
[0][0][1][0][RTW89_FCC][1][36] = 22,
- [0][0][1][0][RTW89_FCC][2][36] = 70,
[0][0][1][0][RTW89_ETSI][1][36] = 66,
[0][0][1][0][RTW89_ETSI][0][36] = 28,
[0][0][1][0][RTW89_MKK][1][36] = 66,
[0][0][1][0][RTW89_MKK][0][36] = 26,
[0][0][1][0][RTW89_IC][1][36] = 22,
- [0][0][1][0][RTW89_IC][2][36] = 70,
[0][0][1][0][RTW89_KCC][1][36] = 24,
[0][0][1][0][RTW89_KCC][0][36] = 26,
[0][0][1][0][RTW89_ACMA][1][36] = 66,
@@ -38801,13 +38273,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][36] = 66,
[0][0][1][0][RTW89_THAILAND][0][36] = 22,
[0][0][1][0][RTW89_FCC][1][38] = 22,
- [0][0][1][0][RTW89_FCC][2][38] = 70,
[0][0][1][0][RTW89_ETSI][1][38] = 66,
[0][0][1][0][RTW89_ETSI][0][38] = 28,
[0][0][1][0][RTW89_MKK][1][38] = 66,
[0][0][1][0][RTW89_MKK][0][38] = 26,
[0][0][1][0][RTW89_IC][1][38] = 22,
- [0][0][1][0][RTW89_IC][2][38] = 70,
[0][0][1][0][RTW89_KCC][1][38] = 24,
[0][0][1][0][RTW89_KCC][0][38] = 26,
[0][0][1][0][RTW89_ACMA][1][38] = 66,
@@ -38820,13 +38290,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][38] = 66,
[0][0][1][0][RTW89_THAILAND][0][38] = 22,
[0][0][1][0][RTW89_FCC][1][40] = 22,
- [0][0][1][0][RTW89_FCC][2][40] = 70,
[0][0][1][0][RTW89_ETSI][1][40] = 66,
[0][0][1][0][RTW89_ETSI][0][40] = 28,
[0][0][1][0][RTW89_MKK][1][40] = 66,
[0][0][1][0][RTW89_MKK][0][40] = 26,
[0][0][1][0][RTW89_IC][1][40] = 22,
- [0][0][1][0][RTW89_IC][2][40] = 70,
[0][0][1][0][RTW89_KCC][1][40] = 24,
[0][0][1][0][RTW89_KCC][0][40] = 26,
[0][0][1][0][RTW89_ACMA][1][40] = 66,
@@ -38839,13 +38307,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][40] = 66,
[0][0][1][0][RTW89_THAILAND][0][40] = 22,
[0][0][1][0][RTW89_FCC][1][42] = 22,
- [0][0][1][0][RTW89_FCC][2][42] = 70,
[0][0][1][0][RTW89_ETSI][1][42] = 66,
[0][0][1][0][RTW89_ETSI][0][42] = 28,
[0][0][1][0][RTW89_MKK][1][42] = 66,
[0][0][1][0][RTW89_MKK][0][42] = 26,
[0][0][1][0][RTW89_IC][1][42] = 22,
- [0][0][1][0][RTW89_IC][2][42] = 70,
[0][0][1][0][RTW89_KCC][1][42] = 24,
[0][0][1][0][RTW89_KCC][0][42] = 26,
[0][0][1][0][RTW89_ACMA][1][42] = 66,
@@ -38858,13 +38324,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][42] = 66,
[0][0][1][0][RTW89_THAILAND][0][42] = 22,
[0][0][1][0][RTW89_FCC][1][44] = 22,
- [0][0][1][0][RTW89_FCC][2][44] = 70,
[0][0][1][0][RTW89_ETSI][1][44] = 66,
[0][0][1][0][RTW89_ETSI][0][44] = 30,
[0][0][1][0][RTW89_MKK][1][44] = 44,
[0][0][1][0][RTW89_MKK][0][44] = 28,
[0][0][1][0][RTW89_IC][1][44] = 22,
- [0][0][1][0][RTW89_IC][2][44] = 70,
[0][0][1][0][RTW89_KCC][1][44] = 24,
[0][0][1][0][RTW89_KCC][0][44] = 26,
[0][0][1][0][RTW89_ACMA][1][44] = 66,
@@ -38877,13 +38341,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][44] = 68,
[0][0][1][0][RTW89_THAILAND][0][44] = 22,
[0][0][1][0][RTW89_FCC][1][45] = 22,
- [0][0][1][0][RTW89_FCC][2][45] = 127,
[0][0][1][0][RTW89_ETSI][1][45] = 127,
[0][0][1][0][RTW89_ETSI][0][45] = 127,
[0][0][1][0][RTW89_MKK][1][45] = 127,
[0][0][1][0][RTW89_MKK][0][45] = 127,
[0][0][1][0][RTW89_IC][1][45] = 22,
- [0][0][1][0][RTW89_IC][2][45] = 70,
[0][0][1][0][RTW89_KCC][1][45] = 24,
[0][0][1][0][RTW89_KCC][0][45] = 127,
[0][0][1][0][RTW89_ACMA][1][45] = 127,
@@ -38896,13 +38358,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][45] = 127,
[0][0][1][0][RTW89_THAILAND][0][45] = 127,
[0][0][1][0][RTW89_FCC][1][47] = 22,
- [0][0][1][0][RTW89_FCC][2][47] = 127,
[0][0][1][0][RTW89_ETSI][1][47] = 127,
[0][0][1][0][RTW89_ETSI][0][47] = 127,
[0][0][1][0][RTW89_MKK][1][47] = 127,
[0][0][1][0][RTW89_MKK][0][47] = 127,
[0][0][1][0][RTW89_IC][1][47] = 22,
- [0][0][1][0][RTW89_IC][2][47] = 70,
[0][0][1][0][RTW89_KCC][1][47] = 24,
[0][0][1][0][RTW89_KCC][0][47] = 127,
[0][0][1][0][RTW89_ACMA][1][47] = 127,
@@ -38915,13 +38375,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][47] = 127,
[0][0][1][0][RTW89_THAILAND][0][47] = 127,
[0][0][1][0][RTW89_FCC][1][49] = 24,
- [0][0][1][0][RTW89_FCC][2][49] = 127,
[0][0][1][0][RTW89_ETSI][1][49] = 127,
[0][0][1][0][RTW89_ETSI][0][49] = 127,
[0][0][1][0][RTW89_MKK][1][49] = 127,
[0][0][1][0][RTW89_MKK][0][49] = 127,
[0][0][1][0][RTW89_IC][1][49] = 24,
- [0][0][1][0][RTW89_IC][2][49] = 70,
[0][0][1][0][RTW89_KCC][1][49] = 24,
[0][0][1][0][RTW89_KCC][0][49] = 127,
[0][0][1][0][RTW89_ACMA][1][49] = 127,
@@ -38934,13 +38392,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][49] = 127,
[0][0][1][0][RTW89_THAILAND][0][49] = 127,
[0][0][1][0][RTW89_FCC][1][51] = 22,
- [0][0][1][0][RTW89_FCC][2][51] = 127,
[0][0][1][0][RTW89_ETSI][1][51] = 127,
[0][0][1][0][RTW89_ETSI][0][51] = 127,
[0][0][1][0][RTW89_MKK][1][51] = 127,
[0][0][1][0][RTW89_MKK][0][51] = 127,
[0][0][1][0][RTW89_IC][1][51] = 22,
- [0][0][1][0][RTW89_IC][2][51] = 70,
[0][0][1][0][RTW89_KCC][1][51] = 24,
[0][0][1][0][RTW89_KCC][0][51] = 127,
[0][0][1][0][RTW89_ACMA][1][51] = 127,
@@ -38953,13 +38409,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][51] = 127,
[0][0][1][0][RTW89_THAILAND][0][51] = 127,
[0][0][1][0][RTW89_FCC][1][53] = 22,
- [0][0][1][0][RTW89_FCC][2][53] = 127,
[0][0][1][0][RTW89_ETSI][1][53] = 127,
[0][0][1][0][RTW89_ETSI][0][53] = 127,
[0][0][1][0][RTW89_MKK][1][53] = 127,
[0][0][1][0][RTW89_MKK][0][53] = 127,
[0][0][1][0][RTW89_IC][1][53] = 22,
- [0][0][1][0][RTW89_IC][2][53] = 70,
[0][0][1][0][RTW89_KCC][1][53] = 24,
[0][0][1][0][RTW89_KCC][0][53] = 127,
[0][0][1][0][RTW89_ACMA][1][53] = 127,
@@ -38972,13 +38426,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][53] = 127,
[0][0][1][0][RTW89_THAILAND][0][53] = 127,
[0][0][1][0][RTW89_FCC][1][55] = 22,
- [0][0][1][0][RTW89_FCC][2][55] = 68,
[0][0][1][0][RTW89_ETSI][1][55] = 127,
[0][0][1][0][RTW89_ETSI][0][55] = 127,
[0][0][1][0][RTW89_MKK][1][55] = 127,
[0][0][1][0][RTW89_MKK][0][55] = 127,
[0][0][1][0][RTW89_IC][1][55] = 22,
- [0][0][1][0][RTW89_IC][2][55] = 68,
[0][0][1][0][RTW89_KCC][1][55] = 26,
[0][0][1][0][RTW89_KCC][0][55] = 127,
[0][0][1][0][RTW89_ACMA][1][55] = 127,
@@ -38991,13 +38443,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][55] = 127,
[0][0][1][0][RTW89_THAILAND][0][55] = 127,
[0][0][1][0][RTW89_FCC][1][57] = 22,
- [0][0][1][0][RTW89_FCC][2][57] = 68,
[0][0][1][0][RTW89_ETSI][1][57] = 127,
[0][0][1][0][RTW89_ETSI][0][57] = 127,
[0][0][1][0][RTW89_MKK][1][57] = 127,
[0][0][1][0][RTW89_MKK][0][57] = 127,
[0][0][1][0][RTW89_IC][1][57] = 22,
- [0][0][1][0][RTW89_IC][2][57] = 68,
[0][0][1][0][RTW89_KCC][1][57] = 26,
[0][0][1][0][RTW89_KCC][0][57] = 127,
[0][0][1][0][RTW89_ACMA][1][57] = 127,
@@ -39010,13 +38460,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][57] = 127,
[0][0][1][0][RTW89_THAILAND][0][57] = 127,
[0][0][1][0][RTW89_FCC][1][59] = 22,
- [0][0][1][0][RTW89_FCC][2][59] = 68,
[0][0][1][0][RTW89_ETSI][1][59] = 127,
[0][0][1][0][RTW89_ETSI][0][59] = 127,
[0][0][1][0][RTW89_MKK][1][59] = 127,
[0][0][1][0][RTW89_MKK][0][59] = 127,
[0][0][1][0][RTW89_IC][1][59] = 22,
- [0][0][1][0][RTW89_IC][2][59] = 68,
[0][0][1][0][RTW89_KCC][1][59] = 26,
[0][0][1][0][RTW89_KCC][0][59] = 127,
[0][0][1][0][RTW89_ACMA][1][59] = 127,
@@ -39029,13 +38477,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][59] = 127,
[0][0][1][0][RTW89_THAILAND][0][59] = 127,
[0][0][1][0][RTW89_FCC][1][60] = 22,
- [0][0][1][0][RTW89_FCC][2][60] = 68,
[0][0][1][0][RTW89_ETSI][1][60] = 127,
[0][0][1][0][RTW89_ETSI][0][60] = 127,
[0][0][1][0][RTW89_MKK][1][60] = 127,
[0][0][1][0][RTW89_MKK][0][60] = 127,
[0][0][1][0][RTW89_IC][1][60] = 22,
- [0][0][1][0][RTW89_IC][2][60] = 68,
[0][0][1][0][RTW89_KCC][1][60] = 26,
[0][0][1][0][RTW89_KCC][0][60] = 127,
[0][0][1][0][RTW89_ACMA][1][60] = 127,
@@ -39048,13 +38494,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][60] = 127,
[0][0][1][0][RTW89_THAILAND][0][60] = 127,
[0][0][1][0][RTW89_FCC][1][62] = 22,
- [0][0][1][0][RTW89_FCC][2][62] = 68,
[0][0][1][0][RTW89_ETSI][1][62] = 127,
[0][0][1][0][RTW89_ETSI][0][62] = 127,
[0][0][1][0][RTW89_MKK][1][62] = 127,
[0][0][1][0][RTW89_MKK][0][62] = 127,
[0][0][1][0][RTW89_IC][1][62] = 22,
- [0][0][1][0][RTW89_IC][2][62] = 68,
[0][0][1][0][RTW89_KCC][1][62] = 26,
[0][0][1][0][RTW89_KCC][0][62] = 127,
[0][0][1][0][RTW89_ACMA][1][62] = 127,
@@ -39067,13 +38511,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][62] = 127,
[0][0][1][0][RTW89_THAILAND][0][62] = 127,
[0][0][1][0][RTW89_FCC][1][64] = 22,
- [0][0][1][0][RTW89_FCC][2][64] = 68,
[0][0][1][0][RTW89_ETSI][1][64] = 127,
[0][0][1][0][RTW89_ETSI][0][64] = 127,
[0][0][1][0][RTW89_MKK][1][64] = 127,
[0][0][1][0][RTW89_MKK][0][64] = 127,
[0][0][1][0][RTW89_IC][1][64] = 22,
- [0][0][1][0][RTW89_IC][2][64] = 68,
[0][0][1][0][RTW89_KCC][1][64] = 26,
[0][0][1][0][RTW89_KCC][0][64] = 127,
[0][0][1][0][RTW89_ACMA][1][64] = 127,
@@ -39086,13 +38528,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][64] = 127,
[0][0][1][0][RTW89_THAILAND][0][64] = 127,
[0][0][1][0][RTW89_FCC][1][66] = 22,
- [0][0][1][0][RTW89_FCC][2][66] = 68,
[0][0][1][0][RTW89_ETSI][1][66] = 127,
[0][0][1][0][RTW89_ETSI][0][66] = 127,
[0][0][1][0][RTW89_MKK][1][66] = 127,
[0][0][1][0][RTW89_MKK][0][66] = 127,
[0][0][1][0][RTW89_IC][1][66] = 22,
- [0][0][1][0][RTW89_IC][2][66] = 68,
[0][0][1][0][RTW89_KCC][1][66] = 26,
[0][0][1][0][RTW89_KCC][0][66] = 127,
[0][0][1][0][RTW89_ACMA][1][66] = 127,
@@ -39105,13 +38545,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][66] = 127,
[0][0][1][0][RTW89_THAILAND][0][66] = 127,
[0][0][1][0][RTW89_FCC][1][68] = 22,
- [0][0][1][0][RTW89_FCC][2][68] = 68,
[0][0][1][0][RTW89_ETSI][1][68] = 127,
[0][0][1][0][RTW89_ETSI][0][68] = 127,
[0][0][1][0][RTW89_MKK][1][68] = 127,
[0][0][1][0][RTW89_MKK][0][68] = 127,
[0][0][1][0][RTW89_IC][1][68] = 22,
- [0][0][1][0][RTW89_IC][2][68] = 68,
[0][0][1][0][RTW89_KCC][1][68] = 26,
[0][0][1][0][RTW89_KCC][0][68] = 127,
[0][0][1][0][RTW89_ACMA][1][68] = 127,
@@ -39124,13 +38562,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][68] = 127,
[0][0][1][0][RTW89_THAILAND][0][68] = 127,
[0][0][1][0][RTW89_FCC][1][70] = 24,
- [0][0][1][0][RTW89_FCC][2][70] = 68,
[0][0][1][0][RTW89_ETSI][1][70] = 127,
[0][0][1][0][RTW89_ETSI][0][70] = 127,
[0][0][1][0][RTW89_MKK][1][70] = 127,
[0][0][1][0][RTW89_MKK][0][70] = 127,
[0][0][1][0][RTW89_IC][1][70] = 24,
- [0][0][1][0][RTW89_IC][2][70] = 68,
[0][0][1][0][RTW89_KCC][1][70] = 26,
[0][0][1][0][RTW89_KCC][0][70] = 127,
[0][0][1][0][RTW89_ACMA][1][70] = 127,
@@ -39143,13 +38579,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][70] = 127,
[0][0][1][0][RTW89_THAILAND][0][70] = 127,
[0][0][1][0][RTW89_FCC][1][72] = 22,
- [0][0][1][0][RTW89_FCC][2][72] = 68,
[0][0][1][0][RTW89_ETSI][1][72] = 127,
[0][0][1][0][RTW89_ETSI][0][72] = 127,
[0][0][1][0][RTW89_MKK][1][72] = 127,
[0][0][1][0][RTW89_MKK][0][72] = 127,
[0][0][1][0][RTW89_IC][1][72] = 22,
- [0][0][1][0][RTW89_IC][2][72] = 68,
[0][0][1][0][RTW89_KCC][1][72] = 26,
[0][0][1][0][RTW89_KCC][0][72] = 127,
[0][0][1][0][RTW89_ACMA][1][72] = 127,
@@ -39162,13 +38596,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][72] = 127,
[0][0][1][0][RTW89_THAILAND][0][72] = 127,
[0][0][1][0][RTW89_FCC][1][74] = 22,
- [0][0][1][0][RTW89_FCC][2][74] = 68,
[0][0][1][0][RTW89_ETSI][1][74] = 127,
[0][0][1][0][RTW89_ETSI][0][74] = 127,
[0][0][1][0][RTW89_MKK][1][74] = 127,
[0][0][1][0][RTW89_MKK][0][74] = 127,
[0][0][1][0][RTW89_IC][1][74] = 22,
- [0][0][1][0][RTW89_IC][2][74] = 68,
[0][0][1][0][RTW89_KCC][1][74] = 26,
[0][0][1][0][RTW89_KCC][0][74] = 127,
[0][0][1][0][RTW89_ACMA][1][74] = 127,
@@ -39181,13 +38613,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][74] = 127,
[0][0][1][0][RTW89_THAILAND][0][74] = 127,
[0][0][1][0][RTW89_FCC][1][75] = 22,
- [0][0][1][0][RTW89_FCC][2][75] = 68,
[0][0][1][0][RTW89_ETSI][1][75] = 127,
[0][0][1][0][RTW89_ETSI][0][75] = 127,
[0][0][1][0][RTW89_MKK][1][75] = 127,
[0][0][1][0][RTW89_MKK][0][75] = 127,
[0][0][1][0][RTW89_IC][1][75] = 22,
- [0][0][1][0][RTW89_IC][2][75] = 68,
[0][0][1][0][RTW89_KCC][1][75] = 26,
[0][0][1][0][RTW89_KCC][0][75] = 127,
[0][0][1][0][RTW89_ACMA][1][75] = 127,
@@ -39200,13 +38630,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][75] = 127,
[0][0][1][0][RTW89_THAILAND][0][75] = 127,
[0][0][1][0][RTW89_FCC][1][77] = 22,
- [0][0][1][0][RTW89_FCC][2][77] = 68,
[0][0][1][0][RTW89_ETSI][1][77] = 127,
[0][0][1][0][RTW89_ETSI][0][77] = 127,
[0][0][1][0][RTW89_MKK][1][77] = 127,
[0][0][1][0][RTW89_MKK][0][77] = 127,
[0][0][1][0][RTW89_IC][1][77] = 22,
- [0][0][1][0][RTW89_IC][2][77] = 68,
[0][0][1][0][RTW89_KCC][1][77] = 26,
[0][0][1][0][RTW89_KCC][0][77] = 127,
[0][0][1][0][RTW89_ACMA][1][77] = 127,
@@ -39219,13 +38647,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][77] = 127,
[0][0][1][0][RTW89_THAILAND][0][77] = 127,
[0][0][1][0][RTW89_FCC][1][79] = 22,
- [0][0][1][0][RTW89_FCC][2][79] = 68,
[0][0][1][0][RTW89_ETSI][1][79] = 127,
[0][0][1][0][RTW89_ETSI][0][79] = 127,
[0][0][1][0][RTW89_MKK][1][79] = 127,
[0][0][1][0][RTW89_MKK][0][79] = 127,
[0][0][1][0][RTW89_IC][1][79] = 22,
- [0][0][1][0][RTW89_IC][2][79] = 68,
[0][0][1][0][RTW89_KCC][1][79] = 26,
[0][0][1][0][RTW89_KCC][0][79] = 127,
[0][0][1][0][RTW89_ACMA][1][79] = 127,
@@ -39238,13 +38664,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][79] = 127,
[0][0][1][0][RTW89_THAILAND][0][79] = 127,
[0][0][1][0][RTW89_FCC][1][81] = 22,
- [0][0][1][0][RTW89_FCC][2][81] = 68,
[0][0][1][0][RTW89_ETSI][1][81] = 127,
[0][0][1][0][RTW89_ETSI][0][81] = 127,
[0][0][1][0][RTW89_MKK][1][81] = 127,
[0][0][1][0][RTW89_MKK][0][81] = 127,
[0][0][1][0][RTW89_IC][1][81] = 22,
- [0][0][1][0][RTW89_IC][2][81] = 68,
[0][0][1][0][RTW89_KCC][1][81] = 26,
[0][0][1][0][RTW89_KCC][0][81] = 127,
[0][0][1][0][RTW89_ACMA][1][81] = 127,
@@ -39257,13 +38681,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][81] = 127,
[0][0][1][0][RTW89_THAILAND][0][81] = 127,
[0][0][1][0][RTW89_FCC][1][83] = 22,
- [0][0][1][0][RTW89_FCC][2][83] = 68,
[0][0][1][0][RTW89_ETSI][1][83] = 127,
[0][0][1][0][RTW89_ETSI][0][83] = 127,
[0][0][1][0][RTW89_MKK][1][83] = 127,
[0][0][1][0][RTW89_MKK][0][83] = 127,
[0][0][1][0][RTW89_IC][1][83] = 22,
- [0][0][1][0][RTW89_IC][2][83] = 68,
[0][0][1][0][RTW89_KCC][1][83] = 32,
[0][0][1][0][RTW89_KCC][0][83] = 127,
[0][0][1][0][RTW89_ACMA][1][83] = 127,
@@ -39276,13 +38698,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][83] = 127,
[0][0][1][0][RTW89_THAILAND][0][83] = 127,
[0][0][1][0][RTW89_FCC][1][85] = 22,
- [0][0][1][0][RTW89_FCC][2][85] = 68,
[0][0][1][0][RTW89_ETSI][1][85] = 127,
[0][0][1][0][RTW89_ETSI][0][85] = 127,
[0][0][1][0][RTW89_MKK][1][85] = 127,
[0][0][1][0][RTW89_MKK][0][85] = 127,
[0][0][1][0][RTW89_IC][1][85] = 22,
- [0][0][1][0][RTW89_IC][2][85] = 68,
[0][0][1][0][RTW89_KCC][1][85] = 32,
[0][0][1][0][RTW89_KCC][0][85] = 127,
[0][0][1][0][RTW89_ACMA][1][85] = 127,
@@ -39295,13 +38715,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][85] = 127,
[0][0][1][0][RTW89_THAILAND][0][85] = 127,
[0][0][1][0][RTW89_FCC][1][87] = 22,
- [0][0][1][0][RTW89_FCC][2][87] = 127,
[0][0][1][0][RTW89_ETSI][1][87] = 127,
[0][0][1][0][RTW89_ETSI][0][87] = 127,
[0][0][1][0][RTW89_MKK][1][87] = 127,
[0][0][1][0][RTW89_MKK][0][87] = 127,
[0][0][1][0][RTW89_IC][1][87] = 22,
- [0][0][1][0][RTW89_IC][2][87] = 127,
[0][0][1][0][RTW89_KCC][1][87] = 32,
[0][0][1][0][RTW89_KCC][0][87] = 127,
[0][0][1][0][RTW89_ACMA][1][87] = 127,
@@ -39314,13 +38732,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][87] = 127,
[0][0][1][0][RTW89_THAILAND][0][87] = 127,
[0][0][1][0][RTW89_FCC][1][89] = 22,
- [0][0][1][0][RTW89_FCC][2][89] = 127,
[0][0][1][0][RTW89_ETSI][1][89] = 127,
[0][0][1][0][RTW89_ETSI][0][89] = 127,
[0][0][1][0][RTW89_MKK][1][89] = 127,
[0][0][1][0][RTW89_MKK][0][89] = 127,
[0][0][1][0][RTW89_IC][1][89] = 22,
- [0][0][1][0][RTW89_IC][2][89] = 127,
[0][0][1][0][RTW89_KCC][1][89] = 32,
[0][0][1][0][RTW89_KCC][0][89] = 127,
[0][0][1][0][RTW89_ACMA][1][89] = 127,
@@ -39333,13 +38749,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][89] = 127,
[0][0][1][0][RTW89_THAILAND][0][89] = 127,
[0][0][1][0][RTW89_FCC][1][90] = 22,
- [0][0][1][0][RTW89_FCC][2][90] = 127,
[0][0][1][0][RTW89_ETSI][1][90] = 127,
[0][0][1][0][RTW89_ETSI][0][90] = 127,
[0][0][1][0][RTW89_MKK][1][90] = 127,
[0][0][1][0][RTW89_MKK][0][90] = 127,
[0][0][1][0][RTW89_IC][1][90] = 22,
- [0][0][1][0][RTW89_IC][2][90] = 127,
[0][0][1][0][RTW89_KCC][1][90] = 32,
[0][0][1][0][RTW89_KCC][0][90] = 127,
[0][0][1][0][RTW89_ACMA][1][90] = 127,
@@ -39352,13 +38766,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][90] = 127,
[0][0][1][0][RTW89_THAILAND][0][90] = 127,
[0][0][1][0][RTW89_FCC][1][92] = 22,
- [0][0][1][0][RTW89_FCC][2][92] = 127,
[0][0][1][0][RTW89_ETSI][1][92] = 127,
[0][0][1][0][RTW89_ETSI][0][92] = 127,
[0][0][1][0][RTW89_MKK][1][92] = 127,
[0][0][1][0][RTW89_MKK][0][92] = 127,
[0][0][1][0][RTW89_IC][1][92] = 22,
- [0][0][1][0][RTW89_IC][2][92] = 127,
[0][0][1][0][RTW89_KCC][1][92] = 32,
[0][0][1][0][RTW89_KCC][0][92] = 127,
[0][0][1][0][RTW89_ACMA][1][92] = 127,
@@ -39371,13 +38783,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][92] = 127,
[0][0][1][0][RTW89_THAILAND][0][92] = 127,
[0][0][1][0][RTW89_FCC][1][94] = 22,
- [0][0][1][0][RTW89_FCC][2][94] = 127,
[0][0][1][0][RTW89_ETSI][1][94] = 127,
[0][0][1][0][RTW89_ETSI][0][94] = 127,
[0][0][1][0][RTW89_MKK][1][94] = 127,
[0][0][1][0][RTW89_MKK][0][94] = 127,
[0][0][1][0][RTW89_IC][1][94] = 22,
- [0][0][1][0][RTW89_IC][2][94] = 127,
[0][0][1][0][RTW89_KCC][1][94] = 32,
[0][0][1][0][RTW89_KCC][0][94] = 127,
[0][0][1][0][RTW89_ACMA][1][94] = 127,
@@ -39390,13 +38800,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][94] = 127,
[0][0][1][0][RTW89_THAILAND][0][94] = 127,
[0][0][1][0][RTW89_FCC][1][96] = 22,
- [0][0][1][0][RTW89_FCC][2][96] = 127,
[0][0][1][0][RTW89_ETSI][1][96] = 127,
[0][0][1][0][RTW89_ETSI][0][96] = 127,
[0][0][1][0][RTW89_MKK][1][96] = 127,
[0][0][1][0][RTW89_MKK][0][96] = 127,
[0][0][1][0][RTW89_IC][1][96] = 22,
- [0][0][1][0][RTW89_IC][2][96] = 127,
[0][0][1][0][RTW89_KCC][1][96] = 32,
[0][0][1][0][RTW89_KCC][0][96] = 127,
[0][0][1][0][RTW89_ACMA][1][96] = 127,
@@ -39409,13 +38817,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][96] = 127,
[0][0][1][0][RTW89_THAILAND][0][96] = 127,
[0][0][1][0][RTW89_FCC][1][98] = 22,
- [0][0][1][0][RTW89_FCC][2][98] = 127,
[0][0][1][0][RTW89_ETSI][1][98] = 127,
[0][0][1][0][RTW89_ETSI][0][98] = 127,
[0][0][1][0][RTW89_MKK][1][98] = 127,
[0][0][1][0][RTW89_MKK][0][98] = 127,
[0][0][1][0][RTW89_IC][1][98] = 22,
- [0][0][1][0][RTW89_IC][2][98] = 127,
[0][0][1][0][RTW89_KCC][1][98] = 32,
[0][0][1][0][RTW89_KCC][0][98] = 127,
[0][0][1][0][RTW89_ACMA][1][98] = 127,
@@ -39428,13 +38834,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][98] = 127,
[0][0][1][0][RTW89_THAILAND][0][98] = 127,
[0][0][1][0][RTW89_FCC][1][100] = 22,
- [0][0][1][0][RTW89_FCC][2][100] = 127,
[0][0][1][0][RTW89_ETSI][1][100] = 127,
[0][0][1][0][RTW89_ETSI][0][100] = 127,
[0][0][1][0][RTW89_MKK][1][100] = 127,
[0][0][1][0][RTW89_MKK][0][100] = 127,
[0][0][1][0][RTW89_IC][1][100] = 22,
- [0][0][1][0][RTW89_IC][2][100] = 127,
[0][0][1][0][RTW89_KCC][1][100] = 32,
[0][0][1][0][RTW89_KCC][0][100] = 127,
[0][0][1][0][RTW89_ACMA][1][100] = 127,
@@ -39447,13 +38851,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][100] = 127,
[0][0][1][0][RTW89_THAILAND][0][100] = 127,
[0][0][1][0][RTW89_FCC][1][102] = 22,
- [0][0][1][0][RTW89_FCC][2][102] = 127,
[0][0][1][0][RTW89_ETSI][1][102] = 127,
[0][0][1][0][RTW89_ETSI][0][102] = 127,
[0][0][1][0][RTW89_MKK][1][102] = 127,
[0][0][1][0][RTW89_MKK][0][102] = 127,
[0][0][1][0][RTW89_IC][1][102] = 22,
- [0][0][1][0][RTW89_IC][2][102] = 127,
[0][0][1][0][RTW89_KCC][1][102] = 32,
[0][0][1][0][RTW89_KCC][0][102] = 127,
[0][0][1][0][RTW89_ACMA][1][102] = 127,
@@ -39466,13 +38868,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][102] = 127,
[0][0][1][0][RTW89_THAILAND][0][102] = 127,
[0][0][1][0][RTW89_FCC][1][104] = 22,
- [0][0][1][0][RTW89_FCC][2][104] = 127,
[0][0][1][0][RTW89_ETSI][1][104] = 127,
[0][0][1][0][RTW89_ETSI][0][104] = 127,
[0][0][1][0][RTW89_MKK][1][104] = 127,
[0][0][1][0][RTW89_MKK][0][104] = 127,
[0][0][1][0][RTW89_IC][1][104] = 22,
- [0][0][1][0][RTW89_IC][2][104] = 127,
[0][0][1][0][RTW89_KCC][1][104] = 32,
[0][0][1][0][RTW89_KCC][0][104] = 127,
[0][0][1][0][RTW89_ACMA][1][104] = 127,
@@ -39485,13 +38885,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][104] = 127,
[0][0][1][0][RTW89_THAILAND][0][104] = 127,
[0][0][1][0][RTW89_FCC][1][105] = 22,
- [0][0][1][0][RTW89_FCC][2][105] = 127,
[0][0][1][0][RTW89_ETSI][1][105] = 127,
[0][0][1][0][RTW89_ETSI][0][105] = 127,
[0][0][1][0][RTW89_MKK][1][105] = 127,
[0][0][1][0][RTW89_MKK][0][105] = 127,
[0][0][1][0][RTW89_IC][1][105] = 22,
- [0][0][1][0][RTW89_IC][2][105] = 127,
[0][0][1][0][RTW89_KCC][1][105] = 32,
[0][0][1][0][RTW89_KCC][0][105] = 127,
[0][0][1][0][RTW89_ACMA][1][105] = 127,
@@ -39504,13 +38902,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][105] = 127,
[0][0][1][0][RTW89_THAILAND][0][105] = 127,
[0][0][1][0][RTW89_FCC][1][107] = 24,
- [0][0][1][0][RTW89_FCC][2][107] = 127,
[0][0][1][0][RTW89_ETSI][1][107] = 127,
[0][0][1][0][RTW89_ETSI][0][107] = 127,
[0][0][1][0][RTW89_MKK][1][107] = 127,
[0][0][1][0][RTW89_MKK][0][107] = 127,
[0][0][1][0][RTW89_IC][1][107] = 24,
- [0][0][1][0][RTW89_IC][2][107] = 127,
[0][0][1][0][RTW89_KCC][1][107] = 32,
[0][0][1][0][RTW89_KCC][0][107] = 127,
[0][0][1][0][RTW89_ACMA][1][107] = 127,
@@ -39523,13 +38919,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][107] = 127,
[0][0][1][0][RTW89_THAILAND][0][107] = 127,
[0][0][1][0][RTW89_FCC][1][109] = 24,
- [0][0][1][0][RTW89_FCC][2][109] = 127,
[0][0][1][0][RTW89_ETSI][1][109] = 127,
[0][0][1][0][RTW89_ETSI][0][109] = 127,
[0][0][1][0][RTW89_MKK][1][109] = 127,
[0][0][1][0][RTW89_MKK][0][109] = 127,
[0][0][1][0][RTW89_IC][1][109] = 24,
- [0][0][1][0][RTW89_IC][2][109] = 127,
[0][0][1][0][RTW89_KCC][1][109] = 32,
[0][0][1][0][RTW89_KCC][0][109] = 127,
[0][0][1][0][RTW89_ACMA][1][109] = 127,
@@ -39542,13 +38936,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][109] = 127,
[0][0][1][0][RTW89_THAILAND][0][109] = 127,
[0][0][1][0][RTW89_FCC][1][111] = 127,
- [0][0][1][0][RTW89_FCC][2][111] = 127,
[0][0][1][0][RTW89_ETSI][1][111] = 127,
[0][0][1][0][RTW89_ETSI][0][111] = 127,
[0][0][1][0][RTW89_MKK][1][111] = 127,
[0][0][1][0][RTW89_MKK][0][111] = 127,
[0][0][1][0][RTW89_IC][1][111] = 127,
- [0][0][1][0][RTW89_IC][2][111] = 127,
[0][0][1][0][RTW89_KCC][1][111] = 127,
[0][0][1][0][RTW89_KCC][0][111] = 127,
[0][0][1][0][RTW89_ACMA][1][111] = 127,
@@ -39561,13 +38953,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][111] = 127,
[0][0][1][0][RTW89_THAILAND][0][111] = 127,
[0][0][1][0][RTW89_FCC][1][113] = 127,
- [0][0][1][0][RTW89_FCC][2][113] = 127,
[0][0][1][0][RTW89_ETSI][1][113] = 127,
[0][0][1][0][RTW89_ETSI][0][113] = 127,
[0][0][1][0][RTW89_MKK][1][113] = 127,
[0][0][1][0][RTW89_MKK][0][113] = 127,
[0][0][1][0][RTW89_IC][1][113] = 127,
- [0][0][1][0][RTW89_IC][2][113] = 127,
[0][0][1][0][RTW89_KCC][1][113] = 127,
[0][0][1][0][RTW89_KCC][0][113] = 127,
[0][0][1][0][RTW89_ACMA][1][113] = 127,
@@ -39580,13 +38970,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][113] = 127,
[0][0][1][0][RTW89_THAILAND][0][113] = 127,
[0][0][1][0][RTW89_FCC][1][115] = 127,
- [0][0][1][0][RTW89_FCC][2][115] = 127,
[0][0][1][0][RTW89_ETSI][1][115] = 127,
[0][0][1][0][RTW89_ETSI][0][115] = 127,
[0][0][1][0][RTW89_MKK][1][115] = 127,
[0][0][1][0][RTW89_MKK][0][115] = 127,
[0][0][1][0][RTW89_IC][1][115] = 127,
- [0][0][1][0][RTW89_IC][2][115] = 127,
[0][0][1][0][RTW89_KCC][1][115] = 127,
[0][0][1][0][RTW89_KCC][0][115] = 127,
[0][0][1][0][RTW89_ACMA][1][115] = 127,
@@ -39599,13 +38987,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][115] = 127,
[0][0][1][0][RTW89_THAILAND][0][115] = 127,
[0][0][1][0][RTW89_FCC][1][117] = 127,
- [0][0][1][0][RTW89_FCC][2][117] = 127,
[0][0][1][0][RTW89_ETSI][1][117] = 127,
[0][0][1][0][RTW89_ETSI][0][117] = 127,
[0][0][1][0][RTW89_MKK][1][117] = 127,
[0][0][1][0][RTW89_MKK][0][117] = 127,
[0][0][1][0][RTW89_IC][1][117] = 127,
- [0][0][1][0][RTW89_IC][2][117] = 127,
[0][0][1][0][RTW89_KCC][1][117] = 127,
[0][0][1][0][RTW89_KCC][0][117] = 127,
[0][0][1][0][RTW89_ACMA][1][117] = 127,
@@ -39618,13 +39004,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][117] = 127,
[0][0][1][0][RTW89_THAILAND][0][117] = 127,
[0][0][1][0][RTW89_FCC][1][119] = 127,
- [0][0][1][0][RTW89_FCC][2][119] = 127,
[0][0][1][0][RTW89_ETSI][1][119] = 127,
[0][0][1][0][RTW89_ETSI][0][119] = 127,
[0][0][1][0][RTW89_MKK][1][119] = 127,
[0][0][1][0][RTW89_MKK][0][119] = 127,
[0][0][1][0][RTW89_IC][1][119] = 127,
- [0][0][1][0][RTW89_IC][2][119] = 127,
[0][0][1][0][RTW89_KCC][1][119] = 127,
[0][0][1][0][RTW89_KCC][0][119] = 127,
[0][0][1][0][RTW89_ACMA][1][119] = 127,
@@ -39637,13 +39021,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][119] = 127,
[0][0][1][0][RTW89_THAILAND][0][119] = 127,
[0][1][1][0][RTW89_FCC][1][0] = -2,
- [0][1][1][0][RTW89_FCC][2][0] = 54,
[0][1][1][0][RTW89_ETSI][1][0] = 54,
[0][1][1][0][RTW89_ETSI][0][0] = 18,
[0][1][1][0][RTW89_MKK][1][0] = 56,
[0][1][1][0][RTW89_MKK][0][0] = 16,
[0][1][1][0][RTW89_IC][1][0] = -2,
- [0][1][1][0][RTW89_IC][2][0] = 54,
[0][1][1][0][RTW89_KCC][1][0] = 12,
[0][1][1][0][RTW89_KCC][0][0] = 10,
[0][1][1][0][RTW89_ACMA][1][0] = 54,
@@ -39656,13 +39038,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][0] = 44,
[0][1][1][0][RTW89_THAILAND][0][0] = -2,
[0][1][1][0][RTW89_FCC][1][2] = -4,
- [0][1][1][0][RTW89_FCC][2][2] = 54,
[0][1][1][0][RTW89_ETSI][1][2] = 54,
[0][1][1][0][RTW89_ETSI][0][2] = 18,
[0][1][1][0][RTW89_MKK][1][2] = 54,
[0][1][1][0][RTW89_MKK][0][2] = 16,
[0][1][1][0][RTW89_IC][1][2] = -4,
- [0][1][1][0][RTW89_IC][2][2] = 54,
[0][1][1][0][RTW89_KCC][1][2] = 12,
[0][1][1][0][RTW89_KCC][0][2] = 12,
[0][1][1][0][RTW89_ACMA][1][2] = 54,
@@ -39675,13 +39055,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][2] = 44,
[0][1][1][0][RTW89_THAILAND][0][2] = -4,
[0][1][1][0][RTW89_FCC][1][4] = -4,
- [0][1][1][0][RTW89_FCC][2][4] = 54,
[0][1][1][0][RTW89_ETSI][1][4] = 54,
[0][1][1][0][RTW89_ETSI][0][4] = 18,
[0][1][1][0][RTW89_MKK][1][4] = 54,
[0][1][1][0][RTW89_MKK][0][4] = 16,
[0][1][1][0][RTW89_IC][1][4] = -4,
- [0][1][1][0][RTW89_IC][2][4] = 54,
[0][1][1][0][RTW89_KCC][1][4] = 12,
[0][1][1][0][RTW89_KCC][0][4] = 12,
[0][1][1][0][RTW89_ACMA][1][4] = 54,
@@ -39694,13 +39072,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][4] = 44,
[0][1][1][0][RTW89_THAILAND][0][4] = -4,
[0][1][1][0][RTW89_FCC][1][6] = -4,
- [0][1][1][0][RTW89_FCC][2][6] = 54,
[0][1][1][0][RTW89_ETSI][1][6] = 54,
[0][1][1][0][RTW89_ETSI][0][6] = 18,
[0][1][1][0][RTW89_MKK][1][6] = 54,
[0][1][1][0][RTW89_MKK][0][6] = 16,
[0][1][1][0][RTW89_IC][1][6] = -4,
- [0][1][1][0][RTW89_IC][2][6] = 54,
[0][1][1][0][RTW89_KCC][1][6] = 12,
[0][1][1][0][RTW89_KCC][0][6] = 12,
[0][1][1][0][RTW89_ACMA][1][6] = 54,
@@ -39713,13 +39089,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][6] = 44,
[0][1][1][0][RTW89_THAILAND][0][6] = -4,
[0][1][1][0][RTW89_FCC][1][8] = -4,
- [0][1][1][0][RTW89_FCC][2][8] = 54,
[0][1][1][0][RTW89_ETSI][1][8] = 54,
[0][1][1][0][RTW89_ETSI][0][8] = 18,
[0][1][1][0][RTW89_MKK][1][8] = 54,
[0][1][1][0][RTW89_MKK][0][8] = 16,
[0][1][1][0][RTW89_IC][1][8] = -4,
- [0][1][1][0][RTW89_IC][2][8] = 54,
[0][1][1][0][RTW89_KCC][1][8] = 12,
[0][1][1][0][RTW89_KCC][0][8] = 12,
[0][1][1][0][RTW89_ACMA][1][8] = 54,
@@ -39732,13 +39106,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][8] = 44,
[0][1][1][0][RTW89_THAILAND][0][8] = -4,
[0][1][1][0][RTW89_FCC][1][10] = -4,
- [0][1][1][0][RTW89_FCC][2][10] = 54,
[0][1][1][0][RTW89_ETSI][1][10] = 54,
[0][1][1][0][RTW89_ETSI][0][10] = 18,
[0][1][1][0][RTW89_MKK][1][10] = 54,
[0][1][1][0][RTW89_MKK][0][10] = 16,
[0][1][1][0][RTW89_IC][1][10] = -4,
- [0][1][1][0][RTW89_IC][2][10] = 54,
[0][1][1][0][RTW89_KCC][1][10] = 12,
[0][1][1][0][RTW89_KCC][0][10] = 12,
[0][1][1][0][RTW89_ACMA][1][10] = 54,
@@ -39751,13 +39123,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][10] = 44,
[0][1][1][0][RTW89_THAILAND][0][10] = -4,
[0][1][1][0][RTW89_FCC][1][12] = -4,
- [0][1][1][0][RTW89_FCC][2][12] = 54,
[0][1][1][0][RTW89_ETSI][1][12] = 54,
[0][1][1][0][RTW89_ETSI][0][12] = 18,
[0][1][1][0][RTW89_MKK][1][12] = 54,
[0][1][1][0][RTW89_MKK][0][12] = 16,
[0][1][1][0][RTW89_IC][1][12] = -4,
- [0][1][1][0][RTW89_IC][2][12] = 54,
[0][1][1][0][RTW89_KCC][1][12] = 12,
[0][1][1][0][RTW89_KCC][0][12] = 12,
[0][1][1][0][RTW89_ACMA][1][12] = 54,
@@ -39770,13 +39140,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][12] = 44,
[0][1][1][0][RTW89_THAILAND][0][12] = -4,
[0][1][1][0][RTW89_FCC][1][14] = -4,
- [0][1][1][0][RTW89_FCC][2][14] = 54,
[0][1][1][0][RTW89_ETSI][1][14] = 54,
[0][1][1][0][RTW89_ETSI][0][14] = 18,
[0][1][1][0][RTW89_MKK][1][14] = 54,
[0][1][1][0][RTW89_MKK][0][14] = 16,
[0][1][1][0][RTW89_IC][1][14] = -4,
- [0][1][1][0][RTW89_IC][2][14] = 54,
[0][1][1][0][RTW89_KCC][1][14] = 12,
[0][1][1][0][RTW89_KCC][0][14] = 12,
[0][1][1][0][RTW89_ACMA][1][14] = 54,
@@ -39789,13 +39157,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][14] = 44,
[0][1][1][0][RTW89_THAILAND][0][14] = -4,
[0][1][1][0][RTW89_FCC][1][15] = -4,
- [0][1][1][0][RTW89_FCC][2][15] = 54,
[0][1][1][0][RTW89_ETSI][1][15] = 54,
[0][1][1][0][RTW89_ETSI][0][15] = 18,
[0][1][1][0][RTW89_MKK][1][15] = 54,
[0][1][1][0][RTW89_MKK][0][15] = 16,
[0][1][1][0][RTW89_IC][1][15] = -4,
- [0][1][1][0][RTW89_IC][2][15] = 54,
[0][1][1][0][RTW89_KCC][1][15] = 12,
[0][1][1][0][RTW89_KCC][0][15] = 12,
[0][1][1][0][RTW89_ACMA][1][15] = 54,
@@ -39808,13 +39174,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][15] = 44,
[0][1][1][0][RTW89_THAILAND][0][15] = -4,
[0][1][1][0][RTW89_FCC][1][17] = -4,
- [0][1][1][0][RTW89_FCC][2][17] = 54,
[0][1][1][0][RTW89_ETSI][1][17] = 54,
[0][1][1][0][RTW89_ETSI][0][17] = 18,
[0][1][1][0][RTW89_MKK][1][17] = 54,
[0][1][1][0][RTW89_MKK][0][17] = 16,
[0][1][1][0][RTW89_IC][1][17] = -4,
- [0][1][1][0][RTW89_IC][2][17] = 54,
[0][1][1][0][RTW89_KCC][1][17] = 12,
[0][1][1][0][RTW89_KCC][0][17] = 12,
[0][1][1][0][RTW89_ACMA][1][17] = 54,
@@ -39827,13 +39191,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][17] = 44,
[0][1][1][0][RTW89_THAILAND][0][17] = -4,
[0][1][1][0][RTW89_FCC][1][19] = -4,
- [0][1][1][0][RTW89_FCC][2][19] = 54,
[0][1][1][0][RTW89_ETSI][1][19] = 54,
[0][1][1][0][RTW89_ETSI][0][19] = 18,
[0][1][1][0][RTW89_MKK][1][19] = 54,
[0][1][1][0][RTW89_MKK][0][19] = 16,
[0][1][1][0][RTW89_IC][1][19] = -4,
- [0][1][1][0][RTW89_IC][2][19] = 54,
[0][1][1][0][RTW89_KCC][1][19] = 12,
[0][1][1][0][RTW89_KCC][0][19] = 12,
[0][1][1][0][RTW89_ACMA][1][19] = 54,
@@ -39846,13 +39208,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][19] = 44,
[0][1][1][0][RTW89_THAILAND][0][19] = -4,
[0][1][1][0][RTW89_FCC][1][21] = -4,
- [0][1][1][0][RTW89_FCC][2][21] = 54,
[0][1][1][0][RTW89_ETSI][1][21] = 54,
[0][1][1][0][RTW89_ETSI][0][21] = 18,
[0][1][1][0][RTW89_MKK][1][21] = 54,
[0][1][1][0][RTW89_MKK][0][21] = 16,
[0][1][1][0][RTW89_IC][1][21] = -4,
- [0][1][1][0][RTW89_IC][2][21] = 54,
[0][1][1][0][RTW89_KCC][1][21] = 12,
[0][1][1][0][RTW89_KCC][0][21] = 12,
[0][1][1][0][RTW89_ACMA][1][21] = 54,
@@ -39865,13 +39225,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][21] = 44,
[0][1][1][0][RTW89_THAILAND][0][21] = -4,
[0][1][1][0][RTW89_FCC][1][23] = -4,
- [0][1][1][0][RTW89_FCC][2][23] = 68,
[0][1][1][0][RTW89_ETSI][1][23] = 54,
[0][1][1][0][RTW89_ETSI][0][23] = 18,
[0][1][1][0][RTW89_MKK][1][23] = 54,
[0][1][1][0][RTW89_MKK][0][23] = 16,
[0][1][1][0][RTW89_IC][1][23] = -4,
- [0][1][1][0][RTW89_IC][2][23] = 68,
[0][1][1][0][RTW89_KCC][1][23] = 12,
[0][1][1][0][RTW89_KCC][0][23] = 10,
[0][1][1][0][RTW89_ACMA][1][23] = 54,
@@ -39884,13 +39242,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][23] = 44,
[0][1][1][0][RTW89_THAILAND][0][23] = -4,
[0][1][1][0][RTW89_FCC][1][25] = -4,
- [0][1][1][0][RTW89_FCC][2][25] = 68,
[0][1][1][0][RTW89_ETSI][1][25] = 54,
[0][1][1][0][RTW89_ETSI][0][25] = 18,
[0][1][1][0][RTW89_MKK][1][25] = 54,
[0][1][1][0][RTW89_MKK][0][25] = 16,
[0][1][1][0][RTW89_IC][1][25] = -4,
- [0][1][1][0][RTW89_IC][2][25] = 68,
[0][1][1][0][RTW89_KCC][1][25] = 12,
[0][1][1][0][RTW89_KCC][0][25] = 14,
[0][1][1][0][RTW89_ACMA][1][25] = 54,
@@ -39903,13 +39259,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][25] = 42,
[0][1][1][0][RTW89_THAILAND][0][25] = -4,
[0][1][1][0][RTW89_FCC][1][27] = -4,
- [0][1][1][0][RTW89_FCC][2][27] = 68,
[0][1][1][0][RTW89_ETSI][1][27] = 54,
[0][1][1][0][RTW89_ETSI][0][27] = 18,
[0][1][1][0][RTW89_MKK][1][27] = 54,
[0][1][1][0][RTW89_MKK][0][27] = 16,
[0][1][1][0][RTW89_IC][1][27] = -4,
- [0][1][1][0][RTW89_IC][2][27] = 68,
[0][1][1][0][RTW89_KCC][1][27] = 12,
[0][1][1][0][RTW89_KCC][0][27] = 14,
[0][1][1][0][RTW89_ACMA][1][27] = 54,
@@ -39922,13 +39276,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][27] = 42,
[0][1][1][0][RTW89_THAILAND][0][27] = -4,
[0][1][1][0][RTW89_FCC][1][29] = -4,
- [0][1][1][0][RTW89_FCC][2][29] = 68,
[0][1][1][0][RTW89_ETSI][1][29] = 54,
[0][1][1][0][RTW89_ETSI][0][29] = 18,
[0][1][1][0][RTW89_MKK][1][29] = 54,
[0][1][1][0][RTW89_MKK][0][29] = 16,
[0][1][1][0][RTW89_IC][1][29] = -4,
- [0][1][1][0][RTW89_IC][2][29] = 68,
[0][1][1][0][RTW89_KCC][1][29] = 12,
[0][1][1][0][RTW89_KCC][0][29] = 14,
[0][1][1][0][RTW89_ACMA][1][29] = 54,
@@ -39941,13 +39293,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][29] = 42,
[0][1][1][0][RTW89_THAILAND][0][29] = -4,
[0][1][1][0][RTW89_FCC][1][30] = -4,
- [0][1][1][0][RTW89_FCC][2][30] = 68,
[0][1][1][0][RTW89_ETSI][1][30] = 54,
[0][1][1][0][RTW89_ETSI][0][30] = 18,
[0][1][1][0][RTW89_MKK][1][30] = 54,
[0][1][1][0][RTW89_MKK][0][30] = 16,
[0][1][1][0][RTW89_IC][1][30] = -4,
- [0][1][1][0][RTW89_IC][2][30] = 68,
[0][1][1][0][RTW89_KCC][1][30] = 12,
[0][1][1][0][RTW89_KCC][0][30] = 14,
[0][1][1][0][RTW89_ACMA][1][30] = 54,
@@ -39960,13 +39310,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][30] = 42,
[0][1][1][0][RTW89_THAILAND][0][30] = -4,
[0][1][1][0][RTW89_FCC][1][32] = -4,
- [0][1][1][0][RTW89_FCC][2][32] = 68,
[0][1][1][0][RTW89_ETSI][1][32] = 54,
[0][1][1][0][RTW89_ETSI][0][32] = 18,
[0][1][1][0][RTW89_MKK][1][32] = 54,
[0][1][1][0][RTW89_MKK][0][32] = 16,
[0][1][1][0][RTW89_IC][1][32] = -4,
- [0][1][1][0][RTW89_IC][2][32] = 68,
[0][1][1][0][RTW89_KCC][1][32] = 12,
[0][1][1][0][RTW89_KCC][0][32] = 14,
[0][1][1][0][RTW89_ACMA][1][32] = 54,
@@ -39979,13 +39327,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][32] = 42,
[0][1][1][0][RTW89_THAILAND][0][32] = -4,
[0][1][1][0][RTW89_FCC][1][34] = -4,
- [0][1][1][0][RTW89_FCC][2][34] = 68,
[0][1][1][0][RTW89_ETSI][1][34] = 54,
[0][1][1][0][RTW89_ETSI][0][34] = 18,
[0][1][1][0][RTW89_MKK][1][34] = 54,
[0][1][1][0][RTW89_MKK][0][34] = 16,
[0][1][1][0][RTW89_IC][1][34] = -4,
- [0][1][1][0][RTW89_IC][2][34] = 68,
[0][1][1][0][RTW89_KCC][1][34] = 12,
[0][1][1][0][RTW89_KCC][0][34] = 14,
[0][1][1][0][RTW89_ACMA][1][34] = 54,
@@ -39998,13 +39344,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][34] = 42,
[0][1][1][0][RTW89_THAILAND][0][34] = -4,
[0][1][1][0][RTW89_FCC][1][36] = -4,
- [0][1][1][0][RTW89_FCC][2][36] = 68,
[0][1][1][0][RTW89_ETSI][1][36] = 54,
[0][1][1][0][RTW89_ETSI][0][36] = 18,
[0][1][1][0][RTW89_MKK][1][36] = 54,
[0][1][1][0][RTW89_MKK][0][36] = 16,
[0][1][1][0][RTW89_IC][1][36] = -4,
- [0][1][1][0][RTW89_IC][2][36] = 68,
[0][1][1][0][RTW89_KCC][1][36] = 12,
[0][1][1][0][RTW89_KCC][0][36] = 14,
[0][1][1][0][RTW89_ACMA][1][36] = 54,
@@ -40017,13 +39361,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][36] = 42,
[0][1][1][0][RTW89_THAILAND][0][36] = -4,
[0][1][1][0][RTW89_FCC][1][38] = -4,
- [0][1][1][0][RTW89_FCC][2][38] = 68,
[0][1][1][0][RTW89_ETSI][1][38] = 54,
[0][1][1][0][RTW89_ETSI][0][38] = 18,
[0][1][1][0][RTW89_MKK][1][38] = 54,
[0][1][1][0][RTW89_MKK][0][38] = 16,
[0][1][1][0][RTW89_IC][1][38] = -4,
- [0][1][1][0][RTW89_IC][2][38] = 68,
[0][1][1][0][RTW89_KCC][1][38] = 12,
[0][1][1][0][RTW89_KCC][0][38] = 14,
[0][1][1][0][RTW89_ACMA][1][38] = 54,
@@ -40036,13 +39378,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][38] = 42,
[0][1][1][0][RTW89_THAILAND][0][38] = -4,
[0][1][1][0][RTW89_FCC][1][40] = -4,
- [0][1][1][0][RTW89_FCC][2][40] = 68,
[0][1][1][0][RTW89_ETSI][1][40] = 54,
[0][1][1][0][RTW89_ETSI][0][40] = 18,
[0][1][1][0][RTW89_MKK][1][40] = 54,
[0][1][1][0][RTW89_MKK][0][40] = 16,
[0][1][1][0][RTW89_IC][1][40] = -4,
- [0][1][1][0][RTW89_IC][2][40] = 68,
[0][1][1][0][RTW89_KCC][1][40] = 12,
[0][1][1][0][RTW89_KCC][0][40] = 14,
[0][1][1][0][RTW89_ACMA][1][40] = 54,
@@ -40055,13 +39395,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][40] = 42,
[0][1][1][0][RTW89_THAILAND][0][40] = -4,
[0][1][1][0][RTW89_FCC][1][42] = -4,
- [0][1][1][0][RTW89_FCC][2][42] = 68,
[0][1][1][0][RTW89_ETSI][1][42] = 54,
[0][1][1][0][RTW89_ETSI][0][42] = 18,
[0][1][1][0][RTW89_MKK][1][42] = 54,
[0][1][1][0][RTW89_MKK][0][42] = 16,
[0][1][1][0][RTW89_IC][1][42] = -4,
- [0][1][1][0][RTW89_IC][2][42] = 68,
[0][1][1][0][RTW89_KCC][1][42] = 12,
[0][1][1][0][RTW89_KCC][0][42] = 14,
[0][1][1][0][RTW89_ACMA][1][42] = 54,
@@ -40074,13 +39412,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][42] = 42,
[0][1][1][0][RTW89_THAILAND][0][42] = -4,
[0][1][1][0][RTW89_FCC][1][44] = -2,
- [0][1][1][0][RTW89_FCC][2][44] = 68,
[0][1][1][0][RTW89_ETSI][1][44] = 54,
[0][1][1][0][RTW89_ETSI][0][44] = 18,
[0][1][1][0][RTW89_MKK][1][44] = 34,
[0][1][1][0][RTW89_MKK][0][44] = 16,
[0][1][1][0][RTW89_IC][1][44] = -2,
- [0][1][1][0][RTW89_IC][2][44] = 68,
[0][1][1][0][RTW89_KCC][1][44] = 12,
[0][1][1][0][RTW89_KCC][0][44] = 12,
[0][1][1][0][RTW89_ACMA][1][44] = 54,
@@ -40093,13 +39429,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][44] = 42,
[0][1][1][0][RTW89_THAILAND][0][44] = -2,
[0][1][1][0][RTW89_FCC][1][45] = -2,
- [0][1][1][0][RTW89_FCC][2][45] = 127,
[0][1][1][0][RTW89_ETSI][1][45] = 127,
[0][1][1][0][RTW89_ETSI][0][45] = 127,
[0][1][1][0][RTW89_MKK][1][45] = 127,
[0][1][1][0][RTW89_MKK][0][45] = 127,
[0][1][1][0][RTW89_IC][1][45] = -2,
- [0][1][1][0][RTW89_IC][2][45] = 70,
[0][1][1][0][RTW89_KCC][1][45] = 12,
[0][1][1][0][RTW89_KCC][0][45] = 127,
[0][1][1][0][RTW89_ACMA][1][45] = 127,
@@ -40112,13 +39446,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][45] = 127,
[0][1][1][0][RTW89_THAILAND][0][45] = 127,
[0][1][1][0][RTW89_FCC][1][47] = -2,
- [0][1][1][0][RTW89_FCC][2][47] = 127,
[0][1][1][0][RTW89_ETSI][1][47] = 127,
[0][1][1][0][RTW89_ETSI][0][47] = 127,
[0][1][1][0][RTW89_MKK][1][47] = 127,
[0][1][1][0][RTW89_MKK][0][47] = 127,
[0][1][1][0][RTW89_IC][1][47] = -2,
- [0][1][1][0][RTW89_IC][2][47] = 68,
[0][1][1][0][RTW89_KCC][1][47] = 12,
[0][1][1][0][RTW89_KCC][0][47] = 127,
[0][1][1][0][RTW89_ACMA][1][47] = 127,
@@ -40131,13 +39463,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][47] = 127,
[0][1][1][0][RTW89_THAILAND][0][47] = 127,
[0][1][1][0][RTW89_FCC][1][49] = -2,
- [0][1][1][0][RTW89_FCC][2][49] = 127,
[0][1][1][0][RTW89_ETSI][1][49] = 127,
[0][1][1][0][RTW89_ETSI][0][49] = 127,
[0][1][1][0][RTW89_MKK][1][49] = 127,
[0][1][1][0][RTW89_MKK][0][49] = 127,
[0][1][1][0][RTW89_IC][1][49] = -2,
- [0][1][1][0][RTW89_IC][2][49] = 68,
[0][1][1][0][RTW89_KCC][1][49] = 12,
[0][1][1][0][RTW89_KCC][0][49] = 127,
[0][1][1][0][RTW89_ACMA][1][49] = 127,
@@ -40150,13 +39480,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][49] = 127,
[0][1][1][0][RTW89_THAILAND][0][49] = 127,
[0][1][1][0][RTW89_FCC][1][51] = -2,
- [0][1][1][0][RTW89_FCC][2][51] = 127,
[0][1][1][0][RTW89_ETSI][1][51] = 127,
[0][1][1][0][RTW89_ETSI][0][51] = 127,
[0][1][1][0][RTW89_MKK][1][51] = 127,
[0][1][1][0][RTW89_MKK][0][51] = 127,
[0][1][1][0][RTW89_IC][1][51] = -2,
- [0][1][1][0][RTW89_IC][2][51] = 68,
[0][1][1][0][RTW89_KCC][1][51] = 12,
[0][1][1][0][RTW89_KCC][0][51] = 127,
[0][1][1][0][RTW89_ACMA][1][51] = 127,
@@ -40169,13 +39497,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][51] = 127,
[0][1][1][0][RTW89_THAILAND][0][51] = 127,
[0][1][1][0][RTW89_FCC][1][53] = -2,
- [0][1][1][0][RTW89_FCC][2][53] = 127,
[0][1][1][0][RTW89_ETSI][1][53] = 127,
[0][1][1][0][RTW89_ETSI][0][53] = 127,
[0][1][1][0][RTW89_MKK][1][53] = 127,
[0][1][1][0][RTW89_MKK][0][53] = 127,
[0][1][1][0][RTW89_IC][1][53] = -2,
- [0][1][1][0][RTW89_IC][2][53] = 68,
[0][1][1][0][RTW89_KCC][1][53] = 12,
[0][1][1][0][RTW89_KCC][0][53] = 127,
[0][1][1][0][RTW89_ACMA][1][53] = 127,
@@ -40188,13 +39514,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][53] = 127,
[0][1][1][0][RTW89_THAILAND][0][53] = 127,
[0][1][1][0][RTW89_FCC][1][55] = -2,
- [0][1][1][0][RTW89_FCC][2][55] = 68,
[0][1][1][0][RTW89_ETSI][1][55] = 127,
[0][1][1][0][RTW89_ETSI][0][55] = 127,
[0][1][1][0][RTW89_MKK][1][55] = 127,
[0][1][1][0][RTW89_MKK][0][55] = 127,
[0][1][1][0][RTW89_IC][1][55] = -2,
- [0][1][1][0][RTW89_IC][2][55] = 68,
[0][1][1][0][RTW89_KCC][1][55] = 12,
[0][1][1][0][RTW89_KCC][0][55] = 127,
[0][1][1][0][RTW89_ACMA][1][55] = 127,
@@ -40207,13 +39531,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][55] = 127,
[0][1][1][0][RTW89_THAILAND][0][55] = 127,
[0][1][1][0][RTW89_FCC][1][57] = -2,
- [0][1][1][0][RTW89_FCC][2][57] = 68,
[0][1][1][0][RTW89_ETSI][1][57] = 127,
[0][1][1][0][RTW89_ETSI][0][57] = 127,
[0][1][1][0][RTW89_MKK][1][57] = 127,
[0][1][1][0][RTW89_MKK][0][57] = 127,
[0][1][1][0][RTW89_IC][1][57] = -2,
- [0][1][1][0][RTW89_IC][2][57] = 68,
[0][1][1][0][RTW89_KCC][1][57] = 12,
[0][1][1][0][RTW89_KCC][0][57] = 127,
[0][1][1][0][RTW89_ACMA][1][57] = 127,
@@ -40226,13 +39548,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][57] = 127,
[0][1][1][0][RTW89_THAILAND][0][57] = 127,
[0][1][1][0][RTW89_FCC][1][59] = -2,
- [0][1][1][0][RTW89_FCC][2][59] = 68,
[0][1][1][0][RTW89_ETSI][1][59] = 127,
[0][1][1][0][RTW89_ETSI][0][59] = 127,
[0][1][1][0][RTW89_MKK][1][59] = 127,
[0][1][1][0][RTW89_MKK][0][59] = 127,
[0][1][1][0][RTW89_IC][1][59] = -2,
- [0][1][1][0][RTW89_IC][2][59] = 68,
[0][1][1][0][RTW89_KCC][1][59] = 12,
[0][1][1][0][RTW89_KCC][0][59] = 127,
[0][1][1][0][RTW89_ACMA][1][59] = 127,
@@ -40245,13 +39565,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][59] = 127,
[0][1][1][0][RTW89_THAILAND][0][59] = 127,
[0][1][1][0][RTW89_FCC][1][60] = -2,
- [0][1][1][0][RTW89_FCC][2][60] = 68,
[0][1][1][0][RTW89_ETSI][1][60] = 127,
[0][1][1][0][RTW89_ETSI][0][60] = 127,
[0][1][1][0][RTW89_MKK][1][60] = 127,
[0][1][1][0][RTW89_MKK][0][60] = 127,
[0][1][1][0][RTW89_IC][1][60] = -2,
- [0][1][1][0][RTW89_IC][2][60] = 68,
[0][1][1][0][RTW89_KCC][1][60] = 12,
[0][1][1][0][RTW89_KCC][0][60] = 127,
[0][1][1][0][RTW89_ACMA][1][60] = 127,
@@ -40264,13 +39582,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][60] = 127,
[0][1][1][0][RTW89_THAILAND][0][60] = 127,
[0][1][1][0][RTW89_FCC][1][62] = -2,
- [0][1][1][0][RTW89_FCC][2][62] = 68,
[0][1][1][0][RTW89_ETSI][1][62] = 127,
[0][1][1][0][RTW89_ETSI][0][62] = 127,
[0][1][1][0][RTW89_MKK][1][62] = 127,
[0][1][1][0][RTW89_MKK][0][62] = 127,
[0][1][1][0][RTW89_IC][1][62] = -2,
- [0][1][1][0][RTW89_IC][2][62] = 68,
[0][1][1][0][RTW89_KCC][1][62] = 12,
[0][1][1][0][RTW89_KCC][0][62] = 127,
[0][1][1][0][RTW89_ACMA][1][62] = 127,
@@ -40283,13 +39599,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][62] = 127,
[0][1][1][0][RTW89_THAILAND][0][62] = 127,
[0][1][1][0][RTW89_FCC][1][64] = -2,
- [0][1][1][0][RTW89_FCC][2][64] = 68,
[0][1][1][0][RTW89_ETSI][1][64] = 127,
[0][1][1][0][RTW89_ETSI][0][64] = 127,
[0][1][1][0][RTW89_MKK][1][64] = 127,
[0][1][1][0][RTW89_MKK][0][64] = 127,
[0][1][1][0][RTW89_IC][1][64] = -2,
- [0][1][1][0][RTW89_IC][2][64] = 68,
[0][1][1][0][RTW89_KCC][1][64] = 12,
[0][1][1][0][RTW89_KCC][0][64] = 127,
[0][1][1][0][RTW89_ACMA][1][64] = 127,
@@ -40302,13 +39616,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][64] = 127,
[0][1][1][0][RTW89_THAILAND][0][64] = 127,
[0][1][1][0][RTW89_FCC][1][66] = -2,
- [0][1][1][0][RTW89_FCC][2][66] = 68,
[0][1][1][0][RTW89_ETSI][1][66] = 127,
[0][1][1][0][RTW89_ETSI][0][66] = 127,
[0][1][1][0][RTW89_MKK][1][66] = 127,
[0][1][1][0][RTW89_MKK][0][66] = 127,
[0][1][1][0][RTW89_IC][1][66] = -2,
- [0][1][1][0][RTW89_IC][2][66] = 68,
[0][1][1][0][RTW89_KCC][1][66] = 12,
[0][1][1][0][RTW89_KCC][0][66] = 127,
[0][1][1][0][RTW89_ACMA][1][66] = 127,
@@ -40321,13 +39633,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][66] = 127,
[0][1][1][0][RTW89_THAILAND][0][66] = 127,
[0][1][1][0][RTW89_FCC][1][68] = -2,
- [0][1][1][0][RTW89_FCC][2][68] = 68,
[0][1][1][0][RTW89_ETSI][1][68] = 127,
[0][1][1][0][RTW89_ETSI][0][68] = 127,
[0][1][1][0][RTW89_MKK][1][68] = 127,
[0][1][1][0][RTW89_MKK][0][68] = 127,
[0][1][1][0][RTW89_IC][1][68] = -2,
- [0][1][1][0][RTW89_IC][2][68] = 68,
[0][1][1][0][RTW89_KCC][1][68] = 12,
[0][1][1][0][RTW89_KCC][0][68] = 127,
[0][1][1][0][RTW89_ACMA][1][68] = 127,
@@ -40340,13 +39650,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][68] = 127,
[0][1][1][0][RTW89_THAILAND][0][68] = 127,
[0][1][1][0][RTW89_FCC][1][70] = -2,
- [0][1][1][0][RTW89_FCC][2][70] = 68,
[0][1][1][0][RTW89_ETSI][1][70] = 127,
[0][1][1][0][RTW89_ETSI][0][70] = 127,
[0][1][1][0][RTW89_MKK][1][70] = 127,
[0][1][1][0][RTW89_MKK][0][70] = 127,
[0][1][1][0][RTW89_IC][1][70] = -2,
- [0][1][1][0][RTW89_IC][2][70] = 68,
[0][1][1][0][RTW89_KCC][1][70] = 12,
[0][1][1][0][RTW89_KCC][0][70] = 127,
[0][1][1][0][RTW89_ACMA][1][70] = 127,
@@ -40359,13 +39667,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][70] = 127,
[0][1][1][0][RTW89_THAILAND][0][70] = 127,
[0][1][1][0][RTW89_FCC][1][72] = -2,
- [0][1][1][0][RTW89_FCC][2][72] = 68,
[0][1][1][0][RTW89_ETSI][1][72] = 127,
[0][1][1][0][RTW89_ETSI][0][72] = 127,
[0][1][1][0][RTW89_MKK][1][72] = 127,
[0][1][1][0][RTW89_MKK][0][72] = 127,
[0][1][1][0][RTW89_IC][1][72] = -2,
- [0][1][1][0][RTW89_IC][2][72] = 68,
[0][1][1][0][RTW89_KCC][1][72] = 12,
[0][1][1][0][RTW89_KCC][0][72] = 127,
[0][1][1][0][RTW89_ACMA][1][72] = 127,
@@ -40378,13 +39684,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][72] = 127,
[0][1][1][0][RTW89_THAILAND][0][72] = 127,
[0][1][1][0][RTW89_FCC][1][74] = -2,
- [0][1][1][0][RTW89_FCC][2][74] = 68,
[0][1][1][0][RTW89_ETSI][1][74] = 127,
[0][1][1][0][RTW89_ETSI][0][74] = 127,
[0][1][1][0][RTW89_MKK][1][74] = 127,
[0][1][1][0][RTW89_MKK][0][74] = 127,
[0][1][1][0][RTW89_IC][1][74] = -2,
- [0][1][1][0][RTW89_IC][2][74] = 68,
[0][1][1][0][RTW89_KCC][1][74] = 12,
[0][1][1][0][RTW89_KCC][0][74] = 127,
[0][1][1][0][RTW89_ACMA][1][74] = 127,
@@ -40397,13 +39701,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][74] = 127,
[0][1][1][0][RTW89_THAILAND][0][74] = 127,
[0][1][1][0][RTW89_FCC][1][75] = -2,
- [0][1][1][0][RTW89_FCC][2][75] = 68,
[0][1][1][0][RTW89_ETSI][1][75] = 127,
[0][1][1][0][RTW89_ETSI][0][75] = 127,
[0][1][1][0][RTW89_MKK][1][75] = 127,
[0][1][1][0][RTW89_MKK][0][75] = 127,
[0][1][1][0][RTW89_IC][1][75] = -2,
- [0][1][1][0][RTW89_IC][2][75] = 68,
[0][1][1][0][RTW89_KCC][1][75] = 12,
[0][1][1][0][RTW89_KCC][0][75] = 127,
[0][1][1][0][RTW89_ACMA][1][75] = 127,
@@ -40416,13 +39718,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][75] = 127,
[0][1][1][0][RTW89_THAILAND][0][75] = 127,
[0][1][1][0][RTW89_FCC][1][77] = -2,
- [0][1][1][0][RTW89_FCC][2][77] = 68,
[0][1][1][0][RTW89_ETSI][1][77] = 127,
[0][1][1][0][RTW89_ETSI][0][77] = 127,
[0][1][1][0][RTW89_MKK][1][77] = 127,
[0][1][1][0][RTW89_MKK][0][77] = 127,
[0][1][1][0][RTW89_IC][1][77] = -2,
- [0][1][1][0][RTW89_IC][2][77] = 68,
[0][1][1][0][RTW89_KCC][1][77] = 12,
[0][1][1][0][RTW89_KCC][0][77] = 127,
[0][1][1][0][RTW89_ACMA][1][77] = 127,
@@ -40435,13 +39735,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][77] = 127,
[0][1][1][0][RTW89_THAILAND][0][77] = 127,
[0][1][1][0][RTW89_FCC][1][79] = -2,
- [0][1][1][0][RTW89_FCC][2][79] = 68,
[0][1][1][0][RTW89_ETSI][1][79] = 127,
[0][1][1][0][RTW89_ETSI][0][79] = 127,
[0][1][1][0][RTW89_MKK][1][79] = 127,
[0][1][1][0][RTW89_MKK][0][79] = 127,
[0][1][1][0][RTW89_IC][1][79] = -2,
- [0][1][1][0][RTW89_IC][2][79] = 68,
[0][1][1][0][RTW89_KCC][1][79] = 12,
[0][1][1][0][RTW89_KCC][0][79] = 127,
[0][1][1][0][RTW89_ACMA][1][79] = 127,
@@ -40454,13 +39752,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][79] = 127,
[0][1][1][0][RTW89_THAILAND][0][79] = 127,
[0][1][1][0][RTW89_FCC][1][81] = -2,
- [0][1][1][0][RTW89_FCC][2][81] = 68,
[0][1][1][0][RTW89_ETSI][1][81] = 127,
[0][1][1][0][RTW89_ETSI][0][81] = 127,
[0][1][1][0][RTW89_MKK][1][81] = 127,
[0][1][1][0][RTW89_MKK][0][81] = 127,
[0][1][1][0][RTW89_IC][1][81] = -2,
- [0][1][1][0][RTW89_IC][2][81] = 68,
[0][1][1][0][RTW89_KCC][1][81] = 12,
[0][1][1][0][RTW89_KCC][0][81] = 127,
[0][1][1][0][RTW89_ACMA][1][81] = 127,
@@ -40473,13 +39769,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][81] = 127,
[0][1][1][0][RTW89_THAILAND][0][81] = 127,
[0][1][1][0][RTW89_FCC][1][83] = -2,
- [0][1][1][0][RTW89_FCC][2][83] = 68,
[0][1][1][0][RTW89_ETSI][1][83] = 127,
[0][1][1][0][RTW89_ETSI][0][83] = 127,
[0][1][1][0][RTW89_MKK][1][83] = 127,
[0][1][1][0][RTW89_MKK][0][83] = 127,
[0][1][1][0][RTW89_IC][1][83] = -2,
- [0][1][1][0][RTW89_IC][2][83] = 68,
[0][1][1][0][RTW89_KCC][1][83] = 20,
[0][1][1][0][RTW89_KCC][0][83] = 127,
[0][1][1][0][RTW89_ACMA][1][83] = 127,
@@ -40492,13 +39786,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][83] = 127,
[0][1][1][0][RTW89_THAILAND][0][83] = 127,
[0][1][1][0][RTW89_FCC][1][85] = -2,
- [0][1][1][0][RTW89_FCC][2][85] = 68,
[0][1][1][0][RTW89_ETSI][1][85] = 127,
[0][1][1][0][RTW89_ETSI][0][85] = 127,
[0][1][1][0][RTW89_MKK][1][85] = 127,
[0][1][1][0][RTW89_MKK][0][85] = 127,
[0][1][1][0][RTW89_IC][1][85] = -2,
- [0][1][1][0][RTW89_IC][2][85] = 68,
[0][1][1][0][RTW89_KCC][1][85] = 20,
[0][1][1][0][RTW89_KCC][0][85] = 127,
[0][1][1][0][RTW89_ACMA][1][85] = 127,
@@ -40511,13 +39803,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][85] = 127,
[0][1][1][0][RTW89_THAILAND][0][85] = 127,
[0][1][1][0][RTW89_FCC][1][87] = -2,
- [0][1][1][0][RTW89_FCC][2][87] = 127,
[0][1][1][0][RTW89_ETSI][1][87] = 127,
[0][1][1][0][RTW89_ETSI][0][87] = 127,
[0][1][1][0][RTW89_MKK][1][87] = 127,
[0][1][1][0][RTW89_MKK][0][87] = 127,
[0][1][1][0][RTW89_IC][1][87] = -2,
- [0][1][1][0][RTW89_IC][2][87] = 127,
[0][1][1][0][RTW89_KCC][1][87] = 20,
[0][1][1][0][RTW89_KCC][0][87] = 127,
[0][1][1][0][RTW89_ACMA][1][87] = 127,
@@ -40530,13 +39820,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][87] = 127,
[0][1][1][0][RTW89_THAILAND][0][87] = 127,
[0][1][1][0][RTW89_FCC][1][89] = -2,
- [0][1][1][0][RTW89_FCC][2][89] = 127,
[0][1][1][0][RTW89_ETSI][1][89] = 127,
[0][1][1][0][RTW89_ETSI][0][89] = 127,
[0][1][1][0][RTW89_MKK][1][89] = 127,
[0][1][1][0][RTW89_MKK][0][89] = 127,
[0][1][1][0][RTW89_IC][1][89] = -2,
- [0][1][1][0][RTW89_IC][2][89] = 127,
[0][1][1][0][RTW89_KCC][1][89] = 20,
[0][1][1][0][RTW89_KCC][0][89] = 127,
[0][1][1][0][RTW89_ACMA][1][89] = 127,
@@ -40549,13 +39837,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][89] = 127,
[0][1][1][0][RTW89_THAILAND][0][89] = 127,
[0][1][1][0][RTW89_FCC][1][90] = -2,
- [0][1][1][0][RTW89_FCC][2][90] = 127,
[0][1][1][0][RTW89_ETSI][1][90] = 127,
[0][1][1][0][RTW89_ETSI][0][90] = 127,
[0][1][1][0][RTW89_MKK][1][90] = 127,
[0][1][1][0][RTW89_MKK][0][90] = 127,
[0][1][1][0][RTW89_IC][1][90] = -2,
- [0][1][1][0][RTW89_IC][2][90] = 127,
[0][1][1][0][RTW89_KCC][1][90] = 20,
[0][1][1][0][RTW89_KCC][0][90] = 127,
[0][1][1][0][RTW89_ACMA][1][90] = 127,
@@ -40568,13 +39854,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][90] = 127,
[0][1][1][0][RTW89_THAILAND][0][90] = 127,
[0][1][1][0][RTW89_FCC][1][92] = -2,
- [0][1][1][0][RTW89_FCC][2][92] = 127,
[0][1][1][0][RTW89_ETSI][1][92] = 127,
[0][1][1][0][RTW89_ETSI][0][92] = 127,
[0][1][1][0][RTW89_MKK][1][92] = 127,
[0][1][1][0][RTW89_MKK][0][92] = 127,
[0][1][1][0][RTW89_IC][1][92] = -2,
- [0][1][1][0][RTW89_IC][2][92] = 127,
[0][1][1][0][RTW89_KCC][1][92] = 20,
[0][1][1][0][RTW89_KCC][0][92] = 127,
[0][1][1][0][RTW89_ACMA][1][92] = 127,
@@ -40587,13 +39871,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][92] = 127,
[0][1][1][0][RTW89_THAILAND][0][92] = 127,
[0][1][1][0][RTW89_FCC][1][94] = -2,
- [0][1][1][0][RTW89_FCC][2][94] = 127,
[0][1][1][0][RTW89_ETSI][1][94] = 127,
[0][1][1][0][RTW89_ETSI][0][94] = 127,
[0][1][1][0][RTW89_MKK][1][94] = 127,
[0][1][1][0][RTW89_MKK][0][94] = 127,
[0][1][1][0][RTW89_IC][1][94] = -2,
- [0][1][1][0][RTW89_IC][2][94] = 127,
[0][1][1][0][RTW89_KCC][1][94] = 20,
[0][1][1][0][RTW89_KCC][0][94] = 127,
[0][1][1][0][RTW89_ACMA][1][94] = 127,
@@ -40606,13 +39888,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][94] = 127,
[0][1][1][0][RTW89_THAILAND][0][94] = 127,
[0][1][1][0][RTW89_FCC][1][96] = -2,
- [0][1][1][0][RTW89_FCC][2][96] = 127,
[0][1][1][0][RTW89_ETSI][1][96] = 127,
[0][1][1][0][RTW89_ETSI][0][96] = 127,
[0][1][1][0][RTW89_MKK][1][96] = 127,
[0][1][1][0][RTW89_MKK][0][96] = 127,
[0][1][1][0][RTW89_IC][1][96] = -2,
- [0][1][1][0][RTW89_IC][2][96] = 127,
[0][1][1][0][RTW89_KCC][1][96] = 20,
[0][1][1][0][RTW89_KCC][0][96] = 127,
[0][1][1][0][RTW89_ACMA][1][96] = 127,
@@ -40625,13 +39905,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][96] = 127,
[0][1][1][0][RTW89_THAILAND][0][96] = 127,
[0][1][1][0][RTW89_FCC][1][98] = -2,
- [0][1][1][0][RTW89_FCC][2][98] = 127,
[0][1][1][0][RTW89_ETSI][1][98] = 127,
[0][1][1][0][RTW89_ETSI][0][98] = 127,
[0][1][1][0][RTW89_MKK][1][98] = 127,
[0][1][1][0][RTW89_MKK][0][98] = 127,
[0][1][1][0][RTW89_IC][1][98] = -2,
- [0][1][1][0][RTW89_IC][2][98] = 127,
[0][1][1][0][RTW89_KCC][1][98] = 20,
[0][1][1][0][RTW89_KCC][0][98] = 127,
[0][1][1][0][RTW89_ACMA][1][98] = 127,
@@ -40644,13 +39922,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][98] = 127,
[0][1][1][0][RTW89_THAILAND][0][98] = 127,
[0][1][1][0][RTW89_FCC][1][100] = -2,
- [0][1][1][0][RTW89_FCC][2][100] = 127,
[0][1][1][0][RTW89_ETSI][1][100] = 127,
[0][1][1][0][RTW89_ETSI][0][100] = 127,
[0][1][1][0][RTW89_MKK][1][100] = 127,
[0][1][1][0][RTW89_MKK][0][100] = 127,
[0][1][1][0][RTW89_IC][1][100] = -2,
- [0][1][1][0][RTW89_IC][2][100] = 127,
[0][1][1][0][RTW89_KCC][1][100] = 20,
[0][1][1][0][RTW89_KCC][0][100] = 127,
[0][1][1][0][RTW89_ACMA][1][100] = 127,
@@ -40663,13 +39939,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][100] = 127,
[0][1][1][0][RTW89_THAILAND][0][100] = 127,
[0][1][1][0][RTW89_FCC][1][102] = -2,
- [0][1][1][0][RTW89_FCC][2][102] = 127,
[0][1][1][0][RTW89_ETSI][1][102] = 127,
[0][1][1][0][RTW89_ETSI][0][102] = 127,
[0][1][1][0][RTW89_MKK][1][102] = 127,
[0][1][1][0][RTW89_MKK][0][102] = 127,
[0][1][1][0][RTW89_IC][1][102] = -2,
- [0][1][1][0][RTW89_IC][2][102] = 127,
[0][1][1][0][RTW89_KCC][1][102] = 20,
[0][1][1][0][RTW89_KCC][0][102] = 127,
[0][1][1][0][RTW89_ACMA][1][102] = 127,
@@ -40682,13 +39956,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][102] = 127,
[0][1][1][0][RTW89_THAILAND][0][102] = 127,
[0][1][1][0][RTW89_FCC][1][104] = -2,
- [0][1][1][0][RTW89_FCC][2][104] = 127,
[0][1][1][0][RTW89_ETSI][1][104] = 127,
[0][1][1][0][RTW89_ETSI][0][104] = 127,
[0][1][1][0][RTW89_MKK][1][104] = 127,
[0][1][1][0][RTW89_MKK][0][104] = 127,
[0][1][1][0][RTW89_IC][1][104] = -2,
- [0][1][1][0][RTW89_IC][2][104] = 127,
[0][1][1][0][RTW89_KCC][1][104] = 20,
[0][1][1][0][RTW89_KCC][0][104] = 127,
[0][1][1][0][RTW89_ACMA][1][104] = 127,
@@ -40701,13 +39973,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][104] = 127,
[0][1][1][0][RTW89_THAILAND][0][104] = 127,
[0][1][1][0][RTW89_FCC][1][105] = -2,
- [0][1][1][0][RTW89_FCC][2][105] = 127,
[0][1][1][0][RTW89_ETSI][1][105] = 127,
[0][1][1][0][RTW89_ETSI][0][105] = 127,
[0][1][1][0][RTW89_MKK][1][105] = 127,
[0][1][1][0][RTW89_MKK][0][105] = 127,
[0][1][1][0][RTW89_IC][1][105] = -2,
- [0][1][1][0][RTW89_IC][2][105] = 127,
[0][1][1][0][RTW89_KCC][1][105] = 20,
[0][1][1][0][RTW89_KCC][0][105] = 127,
[0][1][1][0][RTW89_ACMA][1][105] = 127,
@@ -40720,13 +39990,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][105] = 127,
[0][1][1][0][RTW89_THAILAND][0][105] = 127,
[0][1][1][0][RTW89_FCC][1][107] = 1,
- [0][1][1][0][RTW89_FCC][2][107] = 127,
[0][1][1][0][RTW89_ETSI][1][107] = 127,
[0][1][1][0][RTW89_ETSI][0][107] = 127,
[0][1][1][0][RTW89_MKK][1][107] = 127,
[0][1][1][0][RTW89_MKK][0][107] = 127,
[0][1][1][0][RTW89_IC][1][107] = 1,
- [0][1][1][0][RTW89_IC][2][107] = 127,
[0][1][1][0][RTW89_KCC][1][107] = 20,
[0][1][1][0][RTW89_KCC][0][107] = 127,
[0][1][1][0][RTW89_ACMA][1][107] = 127,
@@ -40739,13 +40007,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][107] = 127,
[0][1][1][0][RTW89_THAILAND][0][107] = 127,
[0][1][1][0][RTW89_FCC][1][109] = 1,
- [0][1][1][0][RTW89_FCC][2][109] = 127,
[0][1][1][0][RTW89_ETSI][1][109] = 127,
[0][1][1][0][RTW89_ETSI][0][109] = 127,
[0][1][1][0][RTW89_MKK][1][109] = 127,
[0][1][1][0][RTW89_MKK][0][109] = 127,
[0][1][1][0][RTW89_IC][1][109] = 1,
- [0][1][1][0][RTW89_IC][2][109] = 127,
[0][1][1][0][RTW89_KCC][1][109] = 20,
[0][1][1][0][RTW89_KCC][0][109] = 127,
[0][1][1][0][RTW89_ACMA][1][109] = 127,
@@ -40758,13 +40024,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][109] = 127,
[0][1][1][0][RTW89_THAILAND][0][109] = 127,
[0][1][1][0][RTW89_FCC][1][111] = 127,
- [0][1][1][0][RTW89_FCC][2][111] = 127,
[0][1][1][0][RTW89_ETSI][1][111] = 127,
[0][1][1][0][RTW89_ETSI][0][111] = 127,
[0][1][1][0][RTW89_MKK][1][111] = 127,
[0][1][1][0][RTW89_MKK][0][111] = 127,
[0][1][1][0][RTW89_IC][1][111] = 127,
- [0][1][1][0][RTW89_IC][2][111] = 127,
[0][1][1][0][RTW89_KCC][1][111] = 127,
[0][1][1][0][RTW89_KCC][0][111] = 127,
[0][1][1][0][RTW89_ACMA][1][111] = 127,
@@ -40777,13 +40041,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][111] = 127,
[0][1][1][0][RTW89_THAILAND][0][111] = 127,
[0][1][1][0][RTW89_FCC][1][113] = 127,
- [0][1][1][0][RTW89_FCC][2][113] = 127,
[0][1][1][0][RTW89_ETSI][1][113] = 127,
[0][1][1][0][RTW89_ETSI][0][113] = 127,
[0][1][1][0][RTW89_MKK][1][113] = 127,
[0][1][1][0][RTW89_MKK][0][113] = 127,
[0][1][1][0][RTW89_IC][1][113] = 127,
- [0][1][1][0][RTW89_IC][2][113] = 127,
[0][1][1][0][RTW89_KCC][1][113] = 127,
[0][1][1][0][RTW89_KCC][0][113] = 127,
[0][1][1][0][RTW89_ACMA][1][113] = 127,
@@ -40796,13 +40058,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][113] = 127,
[0][1][1][0][RTW89_THAILAND][0][113] = 127,
[0][1][1][0][RTW89_FCC][1][115] = 127,
- [0][1][1][0][RTW89_FCC][2][115] = 127,
[0][1][1][0][RTW89_ETSI][1][115] = 127,
[0][1][1][0][RTW89_ETSI][0][115] = 127,
[0][1][1][0][RTW89_MKK][1][115] = 127,
[0][1][1][0][RTW89_MKK][0][115] = 127,
[0][1][1][0][RTW89_IC][1][115] = 127,
- [0][1][1][0][RTW89_IC][2][115] = 127,
[0][1][1][0][RTW89_KCC][1][115] = 127,
[0][1][1][0][RTW89_KCC][0][115] = 127,
[0][1][1][0][RTW89_ACMA][1][115] = 127,
@@ -40815,13 +40075,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][115] = 127,
[0][1][1][0][RTW89_THAILAND][0][115] = 127,
[0][1][1][0][RTW89_FCC][1][117] = 127,
- [0][1][1][0][RTW89_FCC][2][117] = 127,
[0][1][1][0][RTW89_ETSI][1][117] = 127,
[0][1][1][0][RTW89_ETSI][0][117] = 127,
[0][1][1][0][RTW89_MKK][1][117] = 127,
[0][1][1][0][RTW89_MKK][0][117] = 127,
[0][1][1][0][RTW89_IC][1][117] = 127,
- [0][1][1][0][RTW89_IC][2][117] = 127,
[0][1][1][0][RTW89_KCC][1][117] = 127,
[0][1][1][0][RTW89_KCC][0][117] = 127,
[0][1][1][0][RTW89_ACMA][1][117] = 127,
@@ -40834,13 +40092,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][117] = 127,
[0][1][1][0][RTW89_THAILAND][0][117] = 127,
[0][1][1][0][RTW89_FCC][1][119] = 127,
- [0][1][1][0][RTW89_FCC][2][119] = 127,
[0][1][1][0][RTW89_ETSI][1][119] = 127,
[0][1][1][0][RTW89_ETSI][0][119] = 127,
[0][1][1][0][RTW89_MKK][1][119] = 127,
[0][1][1][0][RTW89_MKK][0][119] = 127,
[0][1][1][0][RTW89_IC][1][119] = 127,
- [0][1][1][0][RTW89_IC][2][119] = 127,
[0][1][1][0][RTW89_KCC][1][119] = 127,
[0][1][1][0][RTW89_KCC][0][119] = 127,
[0][1][1][0][RTW89_ACMA][1][119] = 127,
@@ -40853,13 +40109,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][119] = 127,
[0][1][1][0][RTW89_THAILAND][0][119] = 127,
[0][0][2][0][RTW89_FCC][1][0] = 24,
- [0][0][2][0][RTW89_FCC][2][0] = 56,
[0][0][2][0][RTW89_ETSI][1][0] = 66,
[0][0][2][0][RTW89_ETSI][0][0] = 28,
[0][0][2][0][RTW89_MKK][1][0] = 66,
[0][0][2][0][RTW89_MKK][0][0] = 26,
[0][0][2][0][RTW89_IC][1][0] = 24,
- [0][0][2][0][RTW89_IC][2][0] = 56,
[0][0][2][0][RTW89_KCC][1][0] = 24,
[0][0][2][0][RTW89_KCC][0][0] = 24,
[0][0][2][0][RTW89_ACMA][1][0] = 66,
@@ -40872,13 +40126,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][0] = 56,
[0][0][2][0][RTW89_THAILAND][0][0] = 24,
[0][0][2][0][RTW89_FCC][1][2] = 22,
- [0][0][2][0][RTW89_FCC][2][2] = 56,
[0][0][2][0][RTW89_ETSI][1][2] = 66,
[0][0][2][0][RTW89_ETSI][0][2] = 28,
[0][0][2][0][RTW89_MKK][1][2] = 66,
[0][0][2][0][RTW89_MKK][0][2] = 26,
[0][0][2][0][RTW89_IC][1][2] = 22,
- [0][0][2][0][RTW89_IC][2][2] = 56,
[0][0][2][0][RTW89_KCC][1][2] = 24,
[0][0][2][0][RTW89_KCC][0][2] = 24,
[0][0][2][0][RTW89_ACMA][1][2] = 66,
@@ -40891,13 +40143,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][2] = 56,
[0][0][2][0][RTW89_THAILAND][0][2] = 22,
[0][0][2][0][RTW89_FCC][1][4] = 22,
- [0][0][2][0][RTW89_FCC][2][4] = 56,
[0][0][2][0][RTW89_ETSI][1][4] = 66,
[0][0][2][0][RTW89_ETSI][0][4] = 28,
[0][0][2][0][RTW89_MKK][1][4] = 66,
[0][0][2][0][RTW89_MKK][0][4] = 26,
[0][0][2][0][RTW89_IC][1][4] = 22,
- [0][0][2][0][RTW89_IC][2][4] = 56,
[0][0][2][0][RTW89_KCC][1][4] = 24,
[0][0][2][0][RTW89_KCC][0][4] = 24,
[0][0][2][0][RTW89_ACMA][1][4] = 66,
@@ -40910,13 +40160,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][4] = 56,
[0][0][2][0][RTW89_THAILAND][0][4] = 22,
[0][0][2][0][RTW89_FCC][1][6] = 22,
- [0][0][2][0][RTW89_FCC][2][6] = 56,
[0][0][2][0][RTW89_ETSI][1][6] = 66,
[0][0][2][0][RTW89_ETSI][0][6] = 28,
[0][0][2][0][RTW89_MKK][1][6] = 66,
[0][0][2][0][RTW89_MKK][0][6] = 26,
[0][0][2][0][RTW89_IC][1][6] = 22,
- [0][0][2][0][RTW89_IC][2][6] = 56,
[0][0][2][0][RTW89_KCC][1][6] = 24,
[0][0][2][0][RTW89_KCC][0][6] = 24,
[0][0][2][0][RTW89_ACMA][1][6] = 66,
@@ -40929,13 +40177,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][6] = 56,
[0][0][2][0][RTW89_THAILAND][0][6] = 22,
[0][0][2][0][RTW89_FCC][1][8] = 22,
- [0][0][2][0][RTW89_FCC][2][8] = 56,
[0][0][2][0][RTW89_ETSI][1][8] = 66,
[0][0][2][0][RTW89_ETSI][0][8] = 28,
[0][0][2][0][RTW89_MKK][1][8] = 66,
[0][0][2][0][RTW89_MKK][0][8] = 26,
[0][0][2][0][RTW89_IC][1][8] = 22,
- [0][0][2][0][RTW89_IC][2][8] = 56,
[0][0][2][0][RTW89_KCC][1][8] = 24,
[0][0][2][0][RTW89_KCC][0][8] = 24,
[0][0][2][0][RTW89_ACMA][1][8] = 66,
@@ -40948,13 +40194,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][8] = 56,
[0][0][2][0][RTW89_THAILAND][0][8] = 22,
[0][0][2][0][RTW89_FCC][1][10] = 22,
- [0][0][2][0][RTW89_FCC][2][10] = 56,
[0][0][2][0][RTW89_ETSI][1][10] = 66,
[0][0][2][0][RTW89_ETSI][0][10] = 28,
[0][0][2][0][RTW89_MKK][1][10] = 66,
[0][0][2][0][RTW89_MKK][0][10] = 26,
[0][0][2][0][RTW89_IC][1][10] = 22,
- [0][0][2][0][RTW89_IC][2][10] = 56,
[0][0][2][0][RTW89_KCC][1][10] = 24,
[0][0][2][0][RTW89_KCC][0][10] = 24,
[0][0][2][0][RTW89_ACMA][1][10] = 66,
@@ -40967,13 +40211,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][10] = 56,
[0][0][2][0][RTW89_THAILAND][0][10] = 22,
[0][0][2][0][RTW89_FCC][1][12] = 22,
- [0][0][2][0][RTW89_FCC][2][12] = 56,
[0][0][2][0][RTW89_ETSI][1][12] = 66,
[0][0][2][0][RTW89_ETSI][0][12] = 28,
[0][0][2][0][RTW89_MKK][1][12] = 66,
[0][0][2][0][RTW89_MKK][0][12] = 26,
[0][0][2][0][RTW89_IC][1][12] = 22,
- [0][0][2][0][RTW89_IC][2][12] = 56,
[0][0][2][0][RTW89_KCC][1][12] = 24,
[0][0][2][0][RTW89_KCC][0][12] = 24,
[0][0][2][0][RTW89_ACMA][1][12] = 66,
@@ -40986,13 +40228,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][12] = 56,
[0][0][2][0][RTW89_THAILAND][0][12] = 22,
[0][0][2][0][RTW89_FCC][1][14] = 22,
- [0][0][2][0][RTW89_FCC][2][14] = 56,
[0][0][2][0][RTW89_ETSI][1][14] = 66,
[0][0][2][0][RTW89_ETSI][0][14] = 28,
[0][0][2][0][RTW89_MKK][1][14] = 66,
[0][0][2][0][RTW89_MKK][0][14] = 26,
[0][0][2][0][RTW89_IC][1][14] = 22,
- [0][0][2][0][RTW89_IC][2][14] = 56,
[0][0][2][0][RTW89_KCC][1][14] = 24,
[0][0][2][0][RTW89_KCC][0][14] = 24,
[0][0][2][0][RTW89_ACMA][1][14] = 66,
@@ -41005,13 +40245,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][14] = 56,
[0][0][2][0][RTW89_THAILAND][0][14] = 22,
[0][0][2][0][RTW89_FCC][1][15] = 22,
- [0][0][2][0][RTW89_FCC][2][15] = 56,
[0][0][2][0][RTW89_ETSI][1][15] = 66,
[0][0][2][0][RTW89_ETSI][0][15] = 28,
[0][0][2][0][RTW89_MKK][1][15] = 66,
[0][0][2][0][RTW89_MKK][0][15] = 26,
[0][0][2][0][RTW89_IC][1][15] = 22,
- [0][0][2][0][RTW89_IC][2][15] = 56,
[0][0][2][0][RTW89_KCC][1][15] = 24,
[0][0][2][0][RTW89_KCC][0][15] = 24,
[0][0][2][0][RTW89_ACMA][1][15] = 66,
@@ -41024,13 +40262,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][15] = 56,
[0][0][2][0][RTW89_THAILAND][0][15] = 22,
[0][0][2][0][RTW89_FCC][1][17] = 22,
- [0][0][2][0][RTW89_FCC][2][17] = 56,
[0][0][2][0][RTW89_ETSI][1][17] = 66,
[0][0][2][0][RTW89_ETSI][0][17] = 28,
[0][0][2][0][RTW89_MKK][1][17] = 66,
[0][0][2][0][RTW89_MKK][0][17] = 26,
[0][0][2][0][RTW89_IC][1][17] = 22,
- [0][0][2][0][RTW89_IC][2][17] = 56,
[0][0][2][0][RTW89_KCC][1][17] = 24,
[0][0][2][0][RTW89_KCC][0][17] = 24,
[0][0][2][0][RTW89_ACMA][1][17] = 66,
@@ -41043,13 +40279,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][17] = 56,
[0][0][2][0][RTW89_THAILAND][0][17] = 22,
[0][0][2][0][RTW89_FCC][1][19] = 22,
- [0][0][2][0][RTW89_FCC][2][19] = 56,
[0][0][2][0][RTW89_ETSI][1][19] = 66,
[0][0][2][0][RTW89_ETSI][0][19] = 28,
[0][0][2][0][RTW89_MKK][1][19] = 66,
[0][0][2][0][RTW89_MKK][0][19] = 26,
[0][0][2][0][RTW89_IC][1][19] = 22,
- [0][0][2][0][RTW89_IC][2][19] = 56,
[0][0][2][0][RTW89_KCC][1][19] = 24,
[0][0][2][0][RTW89_KCC][0][19] = 24,
[0][0][2][0][RTW89_ACMA][1][19] = 66,
@@ -41062,13 +40296,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][19] = 56,
[0][0][2][0][RTW89_THAILAND][0][19] = 22,
[0][0][2][0][RTW89_FCC][1][21] = 22,
- [0][0][2][0][RTW89_FCC][2][21] = 56,
[0][0][2][0][RTW89_ETSI][1][21] = 66,
[0][0][2][0][RTW89_ETSI][0][21] = 28,
[0][0][2][0][RTW89_MKK][1][21] = 66,
[0][0][2][0][RTW89_MKK][0][21] = 26,
[0][0][2][0][RTW89_IC][1][21] = 22,
- [0][0][2][0][RTW89_IC][2][21] = 56,
[0][0][2][0][RTW89_KCC][1][21] = 24,
[0][0][2][0][RTW89_KCC][0][21] = 24,
[0][0][2][0][RTW89_ACMA][1][21] = 66,
@@ -41081,13 +40313,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][21] = 56,
[0][0][2][0][RTW89_THAILAND][0][21] = 22,
[0][0][2][0][RTW89_FCC][1][23] = 22,
- [0][0][2][0][RTW89_FCC][2][23] = 70,
[0][0][2][0][RTW89_ETSI][1][23] = 66,
[0][0][2][0][RTW89_ETSI][0][23] = 28,
[0][0][2][0][RTW89_MKK][1][23] = 66,
[0][0][2][0][RTW89_MKK][0][23] = 26,
[0][0][2][0][RTW89_IC][1][23] = 22,
- [0][0][2][0][RTW89_IC][2][23] = 70,
[0][0][2][0][RTW89_KCC][1][23] = 24,
[0][0][2][0][RTW89_KCC][0][23] = 26,
[0][0][2][0][RTW89_ACMA][1][23] = 66,
@@ -41100,13 +40330,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][23] = 66,
[0][0][2][0][RTW89_THAILAND][0][23] = 22,
[0][0][2][0][RTW89_FCC][1][25] = 22,
- [0][0][2][0][RTW89_FCC][2][25] = 70,
[0][0][2][0][RTW89_ETSI][1][25] = 66,
[0][0][2][0][RTW89_ETSI][0][25] = 28,
[0][0][2][0][RTW89_MKK][1][25] = 66,
[0][0][2][0][RTW89_MKK][0][25] = 26,
[0][0][2][0][RTW89_IC][1][25] = 22,
- [0][0][2][0][RTW89_IC][2][25] = 70,
[0][0][2][0][RTW89_KCC][1][25] = 24,
[0][0][2][0][RTW89_KCC][0][25] = 26,
[0][0][2][0][RTW89_ACMA][1][25] = 66,
@@ -41119,13 +40347,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][25] = 66,
[0][0][2][0][RTW89_THAILAND][0][25] = 22,
[0][0][2][0][RTW89_FCC][1][27] = 22,
- [0][0][2][0][RTW89_FCC][2][27] = 70,
[0][0][2][0][RTW89_ETSI][1][27] = 66,
[0][0][2][0][RTW89_ETSI][0][27] = 28,
[0][0][2][0][RTW89_MKK][1][27] = 66,
[0][0][2][0][RTW89_MKK][0][27] = 26,
[0][0][2][0][RTW89_IC][1][27] = 22,
- [0][0][2][0][RTW89_IC][2][27] = 70,
[0][0][2][0][RTW89_KCC][1][27] = 24,
[0][0][2][0][RTW89_KCC][0][27] = 26,
[0][0][2][0][RTW89_ACMA][1][27] = 66,
@@ -41138,13 +40364,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][27] = 66,
[0][0][2][0][RTW89_THAILAND][0][27] = 22,
[0][0][2][0][RTW89_FCC][1][29] = 22,
- [0][0][2][0][RTW89_FCC][2][29] = 70,
[0][0][2][0][RTW89_ETSI][1][29] = 66,
[0][0][2][0][RTW89_ETSI][0][29] = 28,
[0][0][2][0][RTW89_MKK][1][29] = 66,
[0][0][2][0][RTW89_MKK][0][29] = 26,
[0][0][2][0][RTW89_IC][1][29] = 22,
- [0][0][2][0][RTW89_IC][2][29] = 70,
[0][0][2][0][RTW89_KCC][1][29] = 24,
[0][0][2][0][RTW89_KCC][0][29] = 26,
[0][0][2][0][RTW89_ACMA][1][29] = 66,
@@ -41157,13 +40381,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][29] = 66,
[0][0][2][0][RTW89_THAILAND][0][29] = 22,
[0][0][2][0][RTW89_FCC][1][30] = 22,
- [0][0][2][0][RTW89_FCC][2][30] = 70,
[0][0][2][0][RTW89_ETSI][1][30] = 66,
[0][0][2][0][RTW89_ETSI][0][30] = 28,
[0][0][2][0][RTW89_MKK][1][30] = 66,
[0][0][2][0][RTW89_MKK][0][30] = 26,
[0][0][2][0][RTW89_IC][1][30] = 22,
- [0][0][2][0][RTW89_IC][2][30] = 70,
[0][0][2][0][RTW89_KCC][1][30] = 24,
[0][0][2][0][RTW89_KCC][0][30] = 26,
[0][0][2][0][RTW89_ACMA][1][30] = 66,
@@ -41176,13 +40398,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][30] = 66,
[0][0][2][0][RTW89_THAILAND][0][30] = 22,
[0][0][2][0][RTW89_FCC][1][32] = 22,
- [0][0][2][0][RTW89_FCC][2][32] = 70,
[0][0][2][0][RTW89_ETSI][1][32] = 66,
[0][0][2][0][RTW89_ETSI][0][32] = 28,
[0][0][2][0][RTW89_MKK][1][32] = 66,
[0][0][2][0][RTW89_MKK][0][32] = 26,
[0][0][2][0][RTW89_IC][1][32] = 22,
- [0][0][2][0][RTW89_IC][2][32] = 70,
[0][0][2][0][RTW89_KCC][1][32] = 24,
[0][0][2][0][RTW89_KCC][0][32] = 26,
[0][0][2][0][RTW89_ACMA][1][32] = 66,
@@ -41195,13 +40415,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][32] = 66,
[0][0][2][0][RTW89_THAILAND][0][32] = 22,
[0][0][2][0][RTW89_FCC][1][34] = 22,
- [0][0][2][0][RTW89_FCC][2][34] = 70,
[0][0][2][0][RTW89_ETSI][1][34] = 66,
[0][0][2][0][RTW89_ETSI][0][34] = 28,
[0][0][2][0][RTW89_MKK][1][34] = 66,
[0][0][2][0][RTW89_MKK][0][34] = 26,
[0][0][2][0][RTW89_IC][1][34] = 22,
- [0][0][2][0][RTW89_IC][2][34] = 70,
[0][0][2][0][RTW89_KCC][1][34] = 24,
[0][0][2][0][RTW89_KCC][0][34] = 26,
[0][0][2][0][RTW89_ACMA][1][34] = 66,
@@ -41214,13 +40432,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][34] = 66,
[0][0][2][0][RTW89_THAILAND][0][34] = 22,
[0][0][2][0][RTW89_FCC][1][36] = 22,
- [0][0][2][0][RTW89_FCC][2][36] = 70,
[0][0][2][0][RTW89_ETSI][1][36] = 66,
[0][0][2][0][RTW89_ETSI][0][36] = 28,
[0][0][2][0][RTW89_MKK][1][36] = 66,
[0][0][2][0][RTW89_MKK][0][36] = 26,
[0][0][2][0][RTW89_IC][1][36] = 22,
- [0][0][2][0][RTW89_IC][2][36] = 70,
[0][0][2][0][RTW89_KCC][1][36] = 24,
[0][0][2][0][RTW89_KCC][0][36] = 26,
[0][0][2][0][RTW89_ACMA][1][36] = 66,
@@ -41233,13 +40449,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][36] = 66,
[0][0][2][0][RTW89_THAILAND][0][36] = 22,
[0][0][2][0][RTW89_FCC][1][38] = 22,
- [0][0][2][0][RTW89_FCC][2][38] = 70,
[0][0][2][0][RTW89_ETSI][1][38] = 66,
[0][0][2][0][RTW89_ETSI][0][38] = 28,
[0][0][2][0][RTW89_MKK][1][38] = 66,
[0][0][2][0][RTW89_MKK][0][38] = 26,
[0][0][2][0][RTW89_IC][1][38] = 22,
- [0][0][2][0][RTW89_IC][2][38] = 70,
[0][0][2][0][RTW89_KCC][1][38] = 24,
[0][0][2][0][RTW89_KCC][0][38] = 26,
[0][0][2][0][RTW89_ACMA][1][38] = 66,
@@ -41252,13 +40466,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][38] = 66,
[0][0][2][0][RTW89_THAILAND][0][38] = 22,
[0][0][2][0][RTW89_FCC][1][40] = 22,
- [0][0][2][0][RTW89_FCC][2][40] = 70,
[0][0][2][0][RTW89_ETSI][1][40] = 66,
[0][0][2][0][RTW89_ETSI][0][40] = 28,
[0][0][2][0][RTW89_MKK][1][40] = 66,
[0][0][2][0][RTW89_MKK][0][40] = 26,
[0][0][2][0][RTW89_IC][1][40] = 22,
- [0][0][2][0][RTW89_IC][2][40] = 70,
[0][0][2][0][RTW89_KCC][1][40] = 24,
[0][0][2][0][RTW89_KCC][0][40] = 26,
[0][0][2][0][RTW89_ACMA][1][40] = 66,
@@ -41271,13 +40483,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][40] = 66,
[0][0][2][0][RTW89_THAILAND][0][40] = 22,
[0][0][2][0][RTW89_FCC][1][42] = 22,
- [0][0][2][0][RTW89_FCC][2][42] = 70,
[0][0][2][0][RTW89_ETSI][1][42] = 66,
[0][0][2][0][RTW89_ETSI][0][42] = 28,
[0][0][2][0][RTW89_MKK][1][42] = 66,
[0][0][2][0][RTW89_MKK][0][42] = 26,
[0][0][2][0][RTW89_IC][1][42] = 22,
- [0][0][2][0][RTW89_IC][2][42] = 70,
[0][0][2][0][RTW89_KCC][1][42] = 24,
[0][0][2][0][RTW89_KCC][0][42] = 26,
[0][0][2][0][RTW89_ACMA][1][42] = 66,
@@ -41290,13 +40500,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][42] = 66,
[0][0][2][0][RTW89_THAILAND][0][42] = 22,
[0][0][2][0][RTW89_FCC][1][44] = 22,
- [0][0][2][0][RTW89_FCC][2][44] = 70,
[0][0][2][0][RTW89_ETSI][1][44] = 66,
[0][0][2][0][RTW89_ETSI][0][44] = 30,
[0][0][2][0][RTW89_MKK][1][44] = 44,
[0][0][2][0][RTW89_MKK][0][44] = 28,
[0][0][2][0][RTW89_IC][1][44] = 22,
- [0][0][2][0][RTW89_IC][2][44] = 70,
[0][0][2][0][RTW89_KCC][1][44] = 24,
[0][0][2][0][RTW89_KCC][0][44] = 26,
[0][0][2][0][RTW89_ACMA][1][44] = 66,
@@ -41309,13 +40517,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][44] = 68,
[0][0][2][0][RTW89_THAILAND][0][44] = 22,
[0][0][2][0][RTW89_FCC][1][45] = 22,
- [0][0][2][0][RTW89_FCC][2][45] = 127,
[0][0][2][0][RTW89_ETSI][1][45] = 127,
[0][0][2][0][RTW89_ETSI][0][45] = 127,
[0][0][2][0][RTW89_MKK][1][45] = 127,
[0][0][2][0][RTW89_MKK][0][45] = 127,
[0][0][2][0][RTW89_IC][1][45] = 22,
- [0][0][2][0][RTW89_IC][2][45] = 70,
[0][0][2][0][RTW89_KCC][1][45] = 24,
[0][0][2][0][RTW89_KCC][0][45] = 127,
[0][0][2][0][RTW89_ACMA][1][45] = 127,
@@ -41328,13 +40534,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][45] = 127,
[0][0][2][0][RTW89_THAILAND][0][45] = 127,
[0][0][2][0][RTW89_FCC][1][47] = 22,
- [0][0][2][0][RTW89_FCC][2][47] = 127,
[0][0][2][0][RTW89_ETSI][1][47] = 127,
[0][0][2][0][RTW89_ETSI][0][47] = 127,
[0][0][2][0][RTW89_MKK][1][47] = 127,
[0][0][2][0][RTW89_MKK][0][47] = 127,
[0][0][2][0][RTW89_IC][1][47] = 22,
- [0][0][2][0][RTW89_IC][2][47] = 70,
[0][0][2][0][RTW89_KCC][1][47] = 24,
[0][0][2][0][RTW89_KCC][0][47] = 127,
[0][0][2][0][RTW89_ACMA][1][47] = 127,
@@ -41347,13 +40551,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][47] = 127,
[0][0][2][0][RTW89_THAILAND][0][47] = 127,
[0][0][2][0][RTW89_FCC][1][49] = 24,
- [0][0][2][0][RTW89_FCC][2][49] = 127,
[0][0][2][0][RTW89_ETSI][1][49] = 127,
[0][0][2][0][RTW89_ETSI][0][49] = 127,
[0][0][2][0][RTW89_MKK][1][49] = 127,
[0][0][2][0][RTW89_MKK][0][49] = 127,
[0][0][2][0][RTW89_IC][1][49] = 24,
- [0][0][2][0][RTW89_IC][2][49] = 70,
[0][0][2][0][RTW89_KCC][1][49] = 24,
[0][0][2][0][RTW89_KCC][0][49] = 127,
[0][0][2][0][RTW89_ACMA][1][49] = 127,
@@ -41366,13 +40568,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][49] = 127,
[0][0][2][0][RTW89_THAILAND][0][49] = 127,
[0][0][2][0][RTW89_FCC][1][51] = 22,
- [0][0][2][0][RTW89_FCC][2][51] = 127,
[0][0][2][0][RTW89_ETSI][1][51] = 127,
[0][0][2][0][RTW89_ETSI][0][51] = 127,
[0][0][2][0][RTW89_MKK][1][51] = 127,
[0][0][2][0][RTW89_MKK][0][51] = 127,
[0][0][2][0][RTW89_IC][1][51] = 22,
- [0][0][2][0][RTW89_IC][2][51] = 70,
[0][0][2][0][RTW89_KCC][1][51] = 24,
[0][0][2][0][RTW89_KCC][0][51] = 127,
[0][0][2][0][RTW89_ACMA][1][51] = 127,
@@ -41385,13 +40585,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][51] = 127,
[0][0][2][0][RTW89_THAILAND][0][51] = 127,
[0][0][2][0][RTW89_FCC][1][53] = 22,
- [0][0][2][0][RTW89_FCC][2][53] = 127,
[0][0][2][0][RTW89_ETSI][1][53] = 127,
[0][0][2][0][RTW89_ETSI][0][53] = 127,
[0][0][2][0][RTW89_MKK][1][53] = 127,
[0][0][2][0][RTW89_MKK][0][53] = 127,
[0][0][2][0][RTW89_IC][1][53] = 22,
- [0][0][2][0][RTW89_IC][2][53] = 70,
[0][0][2][0][RTW89_KCC][1][53] = 24,
[0][0][2][0][RTW89_KCC][0][53] = 127,
[0][0][2][0][RTW89_ACMA][1][53] = 127,
@@ -41404,13 +40602,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][53] = 127,
[0][0][2][0][RTW89_THAILAND][0][53] = 127,
[0][0][2][0][RTW89_FCC][1][55] = 22,
- [0][0][2][0][RTW89_FCC][2][55] = 68,
[0][0][2][0][RTW89_ETSI][1][55] = 127,
[0][0][2][0][RTW89_ETSI][0][55] = 127,
[0][0][2][0][RTW89_MKK][1][55] = 127,
[0][0][2][0][RTW89_MKK][0][55] = 127,
[0][0][2][0][RTW89_IC][1][55] = 22,
- [0][0][2][0][RTW89_IC][2][55] = 68,
[0][0][2][0][RTW89_KCC][1][55] = 26,
[0][0][2][0][RTW89_KCC][0][55] = 127,
[0][0][2][0][RTW89_ACMA][1][55] = 127,
@@ -41423,13 +40619,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][55] = 127,
[0][0][2][0][RTW89_THAILAND][0][55] = 127,
[0][0][2][0][RTW89_FCC][1][57] = 22,
- [0][0][2][0][RTW89_FCC][2][57] = 68,
[0][0][2][0][RTW89_ETSI][1][57] = 127,
[0][0][2][0][RTW89_ETSI][0][57] = 127,
[0][0][2][0][RTW89_MKK][1][57] = 127,
[0][0][2][0][RTW89_MKK][0][57] = 127,
[0][0][2][0][RTW89_IC][1][57] = 22,
- [0][0][2][0][RTW89_IC][2][57] = 68,
[0][0][2][0][RTW89_KCC][1][57] = 26,
[0][0][2][0][RTW89_KCC][0][57] = 127,
[0][0][2][0][RTW89_ACMA][1][57] = 127,
@@ -41442,13 +40636,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][57] = 127,
[0][0][2][0][RTW89_THAILAND][0][57] = 127,
[0][0][2][0][RTW89_FCC][1][59] = 22,
- [0][0][2][0][RTW89_FCC][2][59] = 68,
[0][0][2][0][RTW89_ETSI][1][59] = 127,
[0][0][2][0][RTW89_ETSI][0][59] = 127,
[0][0][2][0][RTW89_MKK][1][59] = 127,
[0][0][2][0][RTW89_MKK][0][59] = 127,
[0][0][2][0][RTW89_IC][1][59] = 22,
- [0][0][2][0][RTW89_IC][2][59] = 68,
[0][0][2][0][RTW89_KCC][1][59] = 26,
[0][0][2][0][RTW89_KCC][0][59] = 127,
[0][0][2][0][RTW89_ACMA][1][59] = 127,
@@ -41461,13 +40653,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][59] = 127,
[0][0][2][0][RTW89_THAILAND][0][59] = 127,
[0][0][2][0][RTW89_FCC][1][60] = 22,
- [0][0][2][0][RTW89_FCC][2][60] = 68,
[0][0][2][0][RTW89_ETSI][1][60] = 127,
[0][0][2][0][RTW89_ETSI][0][60] = 127,
[0][0][2][0][RTW89_MKK][1][60] = 127,
[0][0][2][0][RTW89_MKK][0][60] = 127,
[0][0][2][0][RTW89_IC][1][60] = 22,
- [0][0][2][0][RTW89_IC][2][60] = 68,
[0][0][2][0][RTW89_KCC][1][60] = 26,
[0][0][2][0][RTW89_KCC][0][60] = 127,
[0][0][2][0][RTW89_ACMA][1][60] = 127,
@@ -41480,13 +40670,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][60] = 127,
[0][0][2][0][RTW89_THAILAND][0][60] = 127,
[0][0][2][0][RTW89_FCC][1][62] = 22,
- [0][0][2][0][RTW89_FCC][2][62] = 68,
[0][0][2][0][RTW89_ETSI][1][62] = 127,
[0][0][2][0][RTW89_ETSI][0][62] = 127,
[0][0][2][0][RTW89_MKK][1][62] = 127,
[0][0][2][0][RTW89_MKK][0][62] = 127,
[0][0][2][0][RTW89_IC][1][62] = 22,
- [0][0][2][0][RTW89_IC][2][62] = 68,
[0][0][2][0][RTW89_KCC][1][62] = 26,
[0][0][2][0][RTW89_KCC][0][62] = 127,
[0][0][2][0][RTW89_ACMA][1][62] = 127,
@@ -41499,13 +40687,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][62] = 127,
[0][0][2][0][RTW89_THAILAND][0][62] = 127,
[0][0][2][0][RTW89_FCC][1][64] = 22,
- [0][0][2][0][RTW89_FCC][2][64] = 68,
[0][0][2][0][RTW89_ETSI][1][64] = 127,
[0][0][2][0][RTW89_ETSI][0][64] = 127,
[0][0][2][0][RTW89_MKK][1][64] = 127,
[0][0][2][0][RTW89_MKK][0][64] = 127,
[0][0][2][0][RTW89_IC][1][64] = 22,
- [0][0][2][0][RTW89_IC][2][64] = 68,
[0][0][2][0][RTW89_KCC][1][64] = 26,
[0][0][2][0][RTW89_KCC][0][64] = 127,
[0][0][2][0][RTW89_ACMA][1][64] = 127,
@@ -41518,13 +40704,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][64] = 127,
[0][0][2][0][RTW89_THAILAND][0][64] = 127,
[0][0][2][0][RTW89_FCC][1][66] = 22,
- [0][0][2][0][RTW89_FCC][2][66] = 68,
[0][0][2][0][RTW89_ETSI][1][66] = 127,
[0][0][2][0][RTW89_ETSI][0][66] = 127,
[0][0][2][0][RTW89_MKK][1][66] = 127,
[0][0][2][0][RTW89_MKK][0][66] = 127,
[0][0][2][0][RTW89_IC][1][66] = 22,
- [0][0][2][0][RTW89_IC][2][66] = 68,
[0][0][2][0][RTW89_KCC][1][66] = 26,
[0][0][2][0][RTW89_KCC][0][66] = 127,
[0][0][2][0][RTW89_ACMA][1][66] = 127,
@@ -41537,13 +40721,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][66] = 127,
[0][0][2][0][RTW89_THAILAND][0][66] = 127,
[0][0][2][0][RTW89_FCC][1][68] = 22,
- [0][0][2][0][RTW89_FCC][2][68] = 68,
[0][0][2][0][RTW89_ETSI][1][68] = 127,
[0][0][2][0][RTW89_ETSI][0][68] = 127,
[0][0][2][0][RTW89_MKK][1][68] = 127,
[0][0][2][0][RTW89_MKK][0][68] = 127,
[0][0][2][0][RTW89_IC][1][68] = 22,
- [0][0][2][0][RTW89_IC][2][68] = 68,
[0][0][2][0][RTW89_KCC][1][68] = 26,
[0][0][2][0][RTW89_KCC][0][68] = 127,
[0][0][2][0][RTW89_ACMA][1][68] = 127,
@@ -41556,13 +40738,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][68] = 127,
[0][0][2][0][RTW89_THAILAND][0][68] = 127,
[0][0][2][0][RTW89_FCC][1][70] = 24,
- [0][0][2][0][RTW89_FCC][2][70] = 68,
[0][0][2][0][RTW89_ETSI][1][70] = 127,
[0][0][2][0][RTW89_ETSI][0][70] = 127,
[0][0][2][0][RTW89_MKK][1][70] = 127,
[0][0][2][0][RTW89_MKK][0][70] = 127,
[0][0][2][0][RTW89_IC][1][70] = 24,
- [0][0][2][0][RTW89_IC][2][70] = 68,
[0][0][2][0][RTW89_KCC][1][70] = 26,
[0][0][2][0][RTW89_KCC][0][70] = 127,
[0][0][2][0][RTW89_ACMA][1][70] = 127,
@@ -41575,13 +40755,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][70] = 127,
[0][0][2][0][RTW89_THAILAND][0][70] = 127,
[0][0][2][0][RTW89_FCC][1][72] = 22,
- [0][0][2][0][RTW89_FCC][2][72] = 68,
[0][0][2][0][RTW89_ETSI][1][72] = 127,
[0][0][2][0][RTW89_ETSI][0][72] = 127,
[0][0][2][0][RTW89_MKK][1][72] = 127,
[0][0][2][0][RTW89_MKK][0][72] = 127,
[0][0][2][0][RTW89_IC][1][72] = 22,
- [0][0][2][0][RTW89_IC][2][72] = 68,
[0][0][2][0][RTW89_KCC][1][72] = 26,
[0][0][2][0][RTW89_KCC][0][72] = 127,
[0][0][2][0][RTW89_ACMA][1][72] = 127,
@@ -41594,13 +40772,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][72] = 127,
[0][0][2][0][RTW89_THAILAND][0][72] = 127,
[0][0][2][0][RTW89_FCC][1][74] = 22,
- [0][0][2][0][RTW89_FCC][2][74] = 68,
[0][0][2][0][RTW89_ETSI][1][74] = 127,
[0][0][2][0][RTW89_ETSI][0][74] = 127,
[0][0][2][0][RTW89_MKK][1][74] = 127,
[0][0][2][0][RTW89_MKK][0][74] = 127,
[0][0][2][0][RTW89_IC][1][74] = 22,
- [0][0][2][0][RTW89_IC][2][74] = 68,
[0][0][2][0][RTW89_KCC][1][74] = 26,
[0][0][2][0][RTW89_KCC][0][74] = 127,
[0][0][2][0][RTW89_ACMA][1][74] = 127,
@@ -41613,13 +40789,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][74] = 127,
[0][0][2][0][RTW89_THAILAND][0][74] = 127,
[0][0][2][0][RTW89_FCC][1][75] = 22,
- [0][0][2][0][RTW89_FCC][2][75] = 68,
[0][0][2][0][RTW89_ETSI][1][75] = 127,
[0][0][2][0][RTW89_ETSI][0][75] = 127,
[0][0][2][0][RTW89_MKK][1][75] = 127,
[0][0][2][0][RTW89_MKK][0][75] = 127,
[0][0][2][0][RTW89_IC][1][75] = 22,
- [0][0][2][0][RTW89_IC][2][75] = 68,
[0][0][2][0][RTW89_KCC][1][75] = 26,
[0][0][2][0][RTW89_KCC][0][75] = 127,
[0][0][2][0][RTW89_ACMA][1][75] = 127,
@@ -41632,13 +40806,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][75] = 127,
[0][0][2][0][RTW89_THAILAND][0][75] = 127,
[0][0][2][0][RTW89_FCC][1][77] = 22,
- [0][0][2][0][RTW89_FCC][2][77] = 68,
[0][0][2][0][RTW89_ETSI][1][77] = 127,
[0][0][2][0][RTW89_ETSI][0][77] = 127,
[0][0][2][0][RTW89_MKK][1][77] = 127,
[0][0][2][0][RTW89_MKK][0][77] = 127,
[0][0][2][0][RTW89_IC][1][77] = 22,
- [0][0][2][0][RTW89_IC][2][77] = 68,
[0][0][2][0][RTW89_KCC][1][77] = 26,
[0][0][2][0][RTW89_KCC][0][77] = 127,
[0][0][2][0][RTW89_ACMA][1][77] = 127,
@@ -41651,13 +40823,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][77] = 127,
[0][0][2][0][RTW89_THAILAND][0][77] = 127,
[0][0][2][0][RTW89_FCC][1][79] = 22,
- [0][0][2][0][RTW89_FCC][2][79] = 68,
[0][0][2][0][RTW89_ETSI][1][79] = 127,
[0][0][2][0][RTW89_ETSI][0][79] = 127,
[0][0][2][0][RTW89_MKK][1][79] = 127,
[0][0][2][0][RTW89_MKK][0][79] = 127,
[0][0][2][0][RTW89_IC][1][79] = 22,
- [0][0][2][0][RTW89_IC][2][79] = 68,
[0][0][2][0][RTW89_KCC][1][79] = 26,
[0][0][2][0][RTW89_KCC][0][79] = 127,
[0][0][2][0][RTW89_ACMA][1][79] = 127,
@@ -41670,13 +40840,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][79] = 127,
[0][0][2][0][RTW89_THAILAND][0][79] = 127,
[0][0][2][0][RTW89_FCC][1][81] = 22,
- [0][0][2][0][RTW89_FCC][2][81] = 68,
[0][0][2][0][RTW89_ETSI][1][81] = 127,
[0][0][2][0][RTW89_ETSI][0][81] = 127,
[0][0][2][0][RTW89_MKK][1][81] = 127,
[0][0][2][0][RTW89_MKK][0][81] = 127,
[0][0][2][0][RTW89_IC][1][81] = 22,
- [0][0][2][0][RTW89_IC][2][81] = 68,
[0][0][2][0][RTW89_KCC][1][81] = 26,
[0][0][2][0][RTW89_KCC][0][81] = 127,
[0][0][2][0][RTW89_ACMA][1][81] = 127,
@@ -41689,13 +40857,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][81] = 127,
[0][0][2][0][RTW89_THAILAND][0][81] = 127,
[0][0][2][0][RTW89_FCC][1][83] = 22,
- [0][0][2][0][RTW89_FCC][2][83] = 68,
[0][0][2][0][RTW89_ETSI][1][83] = 127,
[0][0][2][0][RTW89_ETSI][0][83] = 127,
[0][0][2][0][RTW89_MKK][1][83] = 127,
[0][0][2][0][RTW89_MKK][0][83] = 127,
[0][0][2][0][RTW89_IC][1][83] = 22,
- [0][0][2][0][RTW89_IC][2][83] = 68,
[0][0][2][0][RTW89_KCC][1][83] = 32,
[0][0][2][0][RTW89_KCC][0][83] = 127,
[0][0][2][0][RTW89_ACMA][1][83] = 127,
@@ -41708,13 +40874,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][83] = 127,
[0][0][2][0][RTW89_THAILAND][0][83] = 127,
[0][0][2][0][RTW89_FCC][1][85] = 22,
- [0][0][2][0][RTW89_FCC][2][85] = 68,
[0][0][2][0][RTW89_ETSI][1][85] = 127,
[0][0][2][0][RTW89_ETSI][0][85] = 127,
[0][0][2][0][RTW89_MKK][1][85] = 127,
[0][0][2][0][RTW89_MKK][0][85] = 127,
[0][0][2][0][RTW89_IC][1][85] = 22,
- [0][0][2][0][RTW89_IC][2][85] = 68,
[0][0][2][0][RTW89_KCC][1][85] = 32,
[0][0][2][0][RTW89_KCC][0][85] = 127,
[0][0][2][0][RTW89_ACMA][1][85] = 127,
@@ -41727,13 +40891,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][85] = 127,
[0][0][2][0][RTW89_THAILAND][0][85] = 127,
[0][0][2][0][RTW89_FCC][1][87] = 22,
- [0][0][2][0][RTW89_FCC][2][87] = 127,
[0][0][2][0][RTW89_ETSI][1][87] = 127,
[0][0][2][0][RTW89_ETSI][0][87] = 127,
[0][0][2][0][RTW89_MKK][1][87] = 127,
[0][0][2][0][RTW89_MKK][0][87] = 127,
[0][0][2][0][RTW89_IC][1][87] = 22,
- [0][0][2][0][RTW89_IC][2][87] = 127,
[0][0][2][0][RTW89_KCC][1][87] = 32,
[0][0][2][0][RTW89_KCC][0][87] = 127,
[0][0][2][0][RTW89_ACMA][1][87] = 127,
@@ -41746,13 +40908,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][87] = 127,
[0][0][2][0][RTW89_THAILAND][0][87] = 127,
[0][0][2][0][RTW89_FCC][1][89] = 22,
- [0][0][2][0][RTW89_FCC][2][89] = 127,
[0][0][2][0][RTW89_ETSI][1][89] = 127,
[0][0][2][0][RTW89_ETSI][0][89] = 127,
[0][0][2][0][RTW89_MKK][1][89] = 127,
[0][0][2][0][RTW89_MKK][0][89] = 127,
[0][0][2][0][RTW89_IC][1][89] = 22,
- [0][0][2][0][RTW89_IC][2][89] = 127,
[0][0][2][0][RTW89_KCC][1][89] = 32,
[0][0][2][0][RTW89_KCC][0][89] = 127,
[0][0][2][0][RTW89_ACMA][1][89] = 127,
@@ -41765,13 +40925,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][89] = 127,
[0][0][2][0][RTW89_THAILAND][0][89] = 127,
[0][0][2][0][RTW89_FCC][1][90] = 22,
- [0][0][2][0][RTW89_FCC][2][90] = 127,
[0][0][2][0][RTW89_ETSI][1][90] = 127,
[0][0][2][0][RTW89_ETSI][0][90] = 127,
[0][0][2][0][RTW89_MKK][1][90] = 127,
[0][0][2][0][RTW89_MKK][0][90] = 127,
[0][0][2][0][RTW89_IC][1][90] = 22,
- [0][0][2][0][RTW89_IC][2][90] = 127,
[0][0][2][0][RTW89_KCC][1][90] = 32,
[0][0][2][0][RTW89_KCC][0][90] = 127,
[0][0][2][0][RTW89_ACMA][1][90] = 127,
@@ -41784,13 +40942,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][90] = 127,
[0][0][2][0][RTW89_THAILAND][0][90] = 127,
[0][0][2][0][RTW89_FCC][1][92] = 22,
- [0][0][2][0][RTW89_FCC][2][92] = 127,
[0][0][2][0][RTW89_ETSI][1][92] = 127,
[0][0][2][0][RTW89_ETSI][0][92] = 127,
[0][0][2][0][RTW89_MKK][1][92] = 127,
[0][0][2][0][RTW89_MKK][0][92] = 127,
[0][0][2][0][RTW89_IC][1][92] = 22,
- [0][0][2][0][RTW89_IC][2][92] = 127,
[0][0][2][0][RTW89_KCC][1][92] = 32,
[0][0][2][0][RTW89_KCC][0][92] = 127,
[0][0][2][0][RTW89_ACMA][1][92] = 127,
@@ -41803,13 +40959,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][92] = 127,
[0][0][2][0][RTW89_THAILAND][0][92] = 127,
[0][0][2][0][RTW89_FCC][1][94] = 22,
- [0][0][2][0][RTW89_FCC][2][94] = 127,
[0][0][2][0][RTW89_ETSI][1][94] = 127,
[0][0][2][0][RTW89_ETSI][0][94] = 127,
[0][0][2][0][RTW89_MKK][1][94] = 127,
[0][0][2][0][RTW89_MKK][0][94] = 127,
[0][0][2][0][RTW89_IC][1][94] = 22,
- [0][0][2][0][RTW89_IC][2][94] = 127,
[0][0][2][0][RTW89_KCC][1][94] = 32,
[0][0][2][0][RTW89_KCC][0][94] = 127,
[0][0][2][0][RTW89_ACMA][1][94] = 127,
@@ -41822,13 +40976,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][94] = 127,
[0][0][2][0][RTW89_THAILAND][0][94] = 127,
[0][0][2][0][RTW89_FCC][1][96] = 22,
- [0][0][2][0][RTW89_FCC][2][96] = 127,
[0][0][2][0][RTW89_ETSI][1][96] = 127,
[0][0][2][0][RTW89_ETSI][0][96] = 127,
[0][0][2][0][RTW89_MKK][1][96] = 127,
[0][0][2][0][RTW89_MKK][0][96] = 127,
[0][0][2][0][RTW89_IC][1][96] = 22,
- [0][0][2][0][RTW89_IC][2][96] = 127,
[0][0][2][0][RTW89_KCC][1][96] = 32,
[0][0][2][0][RTW89_KCC][0][96] = 127,
[0][0][2][0][RTW89_ACMA][1][96] = 127,
@@ -41841,13 +40993,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][96] = 127,
[0][0][2][0][RTW89_THAILAND][0][96] = 127,
[0][0][2][0][RTW89_FCC][1][98] = 22,
- [0][0][2][0][RTW89_FCC][2][98] = 127,
[0][0][2][0][RTW89_ETSI][1][98] = 127,
[0][0][2][0][RTW89_ETSI][0][98] = 127,
[0][0][2][0][RTW89_MKK][1][98] = 127,
[0][0][2][0][RTW89_MKK][0][98] = 127,
[0][0][2][0][RTW89_IC][1][98] = 22,
- [0][0][2][0][RTW89_IC][2][98] = 127,
[0][0][2][0][RTW89_KCC][1][98] = 32,
[0][0][2][0][RTW89_KCC][0][98] = 127,
[0][0][2][0][RTW89_ACMA][1][98] = 127,
@@ -41860,13 +41010,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][98] = 127,
[0][0][2][0][RTW89_THAILAND][0][98] = 127,
[0][0][2][0][RTW89_FCC][1][100] = 22,
- [0][0][2][0][RTW89_FCC][2][100] = 127,
[0][0][2][0][RTW89_ETSI][1][100] = 127,
[0][0][2][0][RTW89_ETSI][0][100] = 127,
[0][0][2][0][RTW89_MKK][1][100] = 127,
[0][0][2][0][RTW89_MKK][0][100] = 127,
[0][0][2][0][RTW89_IC][1][100] = 22,
- [0][0][2][0][RTW89_IC][2][100] = 127,
[0][0][2][0][RTW89_KCC][1][100] = 32,
[0][0][2][0][RTW89_KCC][0][100] = 127,
[0][0][2][0][RTW89_ACMA][1][100] = 127,
@@ -41879,13 +41027,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][100] = 127,
[0][0][2][0][RTW89_THAILAND][0][100] = 127,
[0][0][2][0][RTW89_FCC][1][102] = 22,
- [0][0][2][0][RTW89_FCC][2][102] = 127,
[0][0][2][0][RTW89_ETSI][1][102] = 127,
[0][0][2][0][RTW89_ETSI][0][102] = 127,
[0][0][2][0][RTW89_MKK][1][102] = 127,
[0][0][2][0][RTW89_MKK][0][102] = 127,
[0][0][2][0][RTW89_IC][1][102] = 22,
- [0][0][2][0][RTW89_IC][2][102] = 127,
[0][0][2][0][RTW89_KCC][1][102] = 32,
[0][0][2][0][RTW89_KCC][0][102] = 127,
[0][0][2][0][RTW89_ACMA][1][102] = 127,
@@ -41898,13 +41044,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][102] = 127,
[0][0][2][0][RTW89_THAILAND][0][102] = 127,
[0][0][2][0][RTW89_FCC][1][104] = 22,
- [0][0][2][0][RTW89_FCC][2][104] = 127,
[0][0][2][0][RTW89_ETSI][1][104] = 127,
[0][0][2][0][RTW89_ETSI][0][104] = 127,
[0][0][2][0][RTW89_MKK][1][104] = 127,
[0][0][2][0][RTW89_MKK][0][104] = 127,
[0][0][2][0][RTW89_IC][1][104] = 22,
- [0][0][2][0][RTW89_IC][2][104] = 127,
[0][0][2][0][RTW89_KCC][1][104] = 32,
[0][0][2][0][RTW89_KCC][0][104] = 127,
[0][0][2][0][RTW89_ACMA][1][104] = 127,
@@ -41917,13 +41061,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][104] = 127,
[0][0][2][0][RTW89_THAILAND][0][104] = 127,
[0][0][2][0][RTW89_FCC][1][105] = 22,
- [0][0][2][0][RTW89_FCC][2][105] = 127,
[0][0][2][0][RTW89_ETSI][1][105] = 127,
[0][0][2][0][RTW89_ETSI][0][105] = 127,
[0][0][2][0][RTW89_MKK][1][105] = 127,
[0][0][2][0][RTW89_MKK][0][105] = 127,
[0][0][2][0][RTW89_IC][1][105] = 22,
- [0][0][2][0][RTW89_IC][2][105] = 127,
[0][0][2][0][RTW89_KCC][1][105] = 32,
[0][0][2][0][RTW89_KCC][0][105] = 127,
[0][0][2][0][RTW89_ACMA][1][105] = 127,
@@ -41936,13 +41078,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][105] = 127,
[0][0][2][0][RTW89_THAILAND][0][105] = 127,
[0][0][2][0][RTW89_FCC][1][107] = 24,
- [0][0][2][0][RTW89_FCC][2][107] = 127,
[0][0][2][0][RTW89_ETSI][1][107] = 127,
[0][0][2][0][RTW89_ETSI][0][107] = 127,
[0][0][2][0][RTW89_MKK][1][107] = 127,
[0][0][2][0][RTW89_MKK][0][107] = 127,
[0][0][2][0][RTW89_IC][1][107] = 24,
- [0][0][2][0][RTW89_IC][2][107] = 127,
[0][0][2][0][RTW89_KCC][1][107] = 32,
[0][0][2][0][RTW89_KCC][0][107] = 127,
[0][0][2][0][RTW89_ACMA][1][107] = 127,
@@ -41955,13 +41095,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][107] = 127,
[0][0][2][0][RTW89_THAILAND][0][107] = 127,
[0][0][2][0][RTW89_FCC][1][109] = 24,
- [0][0][2][0][RTW89_FCC][2][109] = 127,
[0][0][2][0][RTW89_ETSI][1][109] = 127,
[0][0][2][0][RTW89_ETSI][0][109] = 127,
[0][0][2][0][RTW89_MKK][1][109] = 127,
[0][0][2][0][RTW89_MKK][0][109] = 127,
[0][0][2][0][RTW89_IC][1][109] = 24,
- [0][0][2][0][RTW89_IC][2][109] = 127,
[0][0][2][0][RTW89_KCC][1][109] = 32,
[0][0][2][0][RTW89_KCC][0][109] = 127,
[0][0][2][0][RTW89_ACMA][1][109] = 127,
@@ -41974,13 +41112,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][109] = 127,
[0][0][2][0][RTW89_THAILAND][0][109] = 127,
[0][0][2][0][RTW89_FCC][1][111] = 127,
- [0][0][2][0][RTW89_FCC][2][111] = 127,
[0][0][2][0][RTW89_ETSI][1][111] = 127,
[0][0][2][0][RTW89_ETSI][0][111] = 127,
[0][0][2][0][RTW89_MKK][1][111] = 127,
[0][0][2][0][RTW89_MKK][0][111] = 127,
[0][0][2][0][RTW89_IC][1][111] = 127,
- [0][0][2][0][RTW89_IC][2][111] = 127,
[0][0][2][0][RTW89_KCC][1][111] = 127,
[0][0][2][0][RTW89_KCC][0][111] = 127,
[0][0][2][0][RTW89_ACMA][1][111] = 127,
@@ -41993,13 +41129,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][111] = 127,
[0][0][2][0][RTW89_THAILAND][0][111] = 127,
[0][0][2][0][RTW89_FCC][1][113] = 127,
- [0][0][2][0][RTW89_FCC][2][113] = 127,
[0][0][2][0][RTW89_ETSI][1][113] = 127,
[0][0][2][0][RTW89_ETSI][0][113] = 127,
[0][0][2][0][RTW89_MKK][1][113] = 127,
[0][0][2][0][RTW89_MKK][0][113] = 127,
[0][0][2][0][RTW89_IC][1][113] = 127,
- [0][0][2][0][RTW89_IC][2][113] = 127,
[0][0][2][0][RTW89_KCC][1][113] = 127,
[0][0][2][0][RTW89_KCC][0][113] = 127,
[0][0][2][0][RTW89_ACMA][1][113] = 127,
@@ -42012,13 +41146,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][113] = 127,
[0][0][2][0][RTW89_THAILAND][0][113] = 127,
[0][0][2][0][RTW89_FCC][1][115] = 127,
- [0][0][2][0][RTW89_FCC][2][115] = 127,
[0][0][2][0][RTW89_ETSI][1][115] = 127,
[0][0][2][0][RTW89_ETSI][0][115] = 127,
[0][0][2][0][RTW89_MKK][1][115] = 127,
[0][0][2][0][RTW89_MKK][0][115] = 127,
[0][0][2][0][RTW89_IC][1][115] = 127,
- [0][0][2][0][RTW89_IC][2][115] = 127,
[0][0][2][0][RTW89_KCC][1][115] = 127,
[0][0][2][0][RTW89_KCC][0][115] = 127,
[0][0][2][0][RTW89_ACMA][1][115] = 127,
@@ -42031,13 +41163,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][115] = 127,
[0][0][2][0][RTW89_THAILAND][0][115] = 127,
[0][0][2][0][RTW89_FCC][1][117] = 127,
- [0][0][2][0][RTW89_FCC][2][117] = 127,
[0][0][2][0][RTW89_ETSI][1][117] = 127,
[0][0][2][0][RTW89_ETSI][0][117] = 127,
[0][0][2][0][RTW89_MKK][1][117] = 127,
[0][0][2][0][RTW89_MKK][0][117] = 127,
[0][0][2][0][RTW89_IC][1][117] = 127,
- [0][0][2][0][RTW89_IC][2][117] = 127,
[0][0][2][0][RTW89_KCC][1][117] = 127,
[0][0][2][0][RTW89_KCC][0][117] = 127,
[0][0][2][0][RTW89_ACMA][1][117] = 127,
@@ -42050,13 +41180,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][117] = 127,
[0][0][2][0][RTW89_THAILAND][0][117] = 127,
[0][0][2][0][RTW89_FCC][1][119] = 127,
- [0][0][2][0][RTW89_FCC][2][119] = 127,
[0][0][2][0][RTW89_ETSI][1][119] = 127,
[0][0][2][0][RTW89_ETSI][0][119] = 127,
[0][0][2][0][RTW89_MKK][1][119] = 127,
[0][0][2][0][RTW89_MKK][0][119] = 127,
[0][0][2][0][RTW89_IC][1][119] = 127,
- [0][0][2][0][RTW89_IC][2][119] = 127,
[0][0][2][0][RTW89_KCC][1][119] = 127,
[0][0][2][0][RTW89_KCC][0][119] = 127,
[0][0][2][0][RTW89_ACMA][1][119] = 127,
@@ -42069,13 +41197,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][119] = 127,
[0][0][2][0][RTW89_THAILAND][0][119] = 127,
[0][1][2][0][RTW89_FCC][1][0] = -2,
- [0][1][2][0][RTW89_FCC][2][0] = 54,
[0][1][2][0][RTW89_ETSI][1][0] = 54,
[0][1][2][0][RTW89_ETSI][0][0] = 18,
[0][1][2][0][RTW89_MKK][1][0] = 56,
[0][1][2][0][RTW89_MKK][0][0] = 16,
[0][1][2][0][RTW89_IC][1][0] = -2,
- [0][1][2][0][RTW89_IC][2][0] = 54,
[0][1][2][0][RTW89_KCC][1][0] = 12,
[0][1][2][0][RTW89_KCC][0][0] = 10,
[0][1][2][0][RTW89_ACMA][1][0] = 54,
@@ -42088,13 +41214,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][0] = 44,
[0][1][2][0][RTW89_THAILAND][0][0] = -2,
[0][1][2][0][RTW89_FCC][1][2] = -4,
- [0][1][2][0][RTW89_FCC][2][2] = 54,
[0][1][2][0][RTW89_ETSI][1][2] = 54,
[0][1][2][0][RTW89_ETSI][0][2] = 18,
[0][1][2][0][RTW89_MKK][1][2] = 54,
[0][1][2][0][RTW89_MKK][0][2] = 16,
[0][1][2][0][RTW89_IC][1][2] = -4,
- [0][1][2][0][RTW89_IC][2][2] = 54,
[0][1][2][0][RTW89_KCC][1][2] = 12,
[0][1][2][0][RTW89_KCC][0][2] = 12,
[0][1][2][0][RTW89_ACMA][1][2] = 54,
@@ -42107,13 +41231,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][2] = 44,
[0][1][2][0][RTW89_THAILAND][0][2] = -4,
[0][1][2][0][RTW89_FCC][1][4] = -4,
- [0][1][2][0][RTW89_FCC][2][4] = 54,
[0][1][2][0][RTW89_ETSI][1][4] = 54,
[0][1][2][0][RTW89_ETSI][0][4] = 18,
[0][1][2][0][RTW89_MKK][1][4] = 54,
[0][1][2][0][RTW89_MKK][0][4] = 16,
[0][1][2][0][RTW89_IC][1][4] = -4,
- [0][1][2][0][RTW89_IC][2][4] = 54,
[0][1][2][0][RTW89_KCC][1][4] = 12,
[0][1][2][0][RTW89_KCC][0][4] = 12,
[0][1][2][0][RTW89_ACMA][1][4] = 54,
@@ -42126,13 +41248,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][4] = 44,
[0][1][2][0][RTW89_THAILAND][0][4] = -4,
[0][1][2][0][RTW89_FCC][1][6] = -4,
- [0][1][2][0][RTW89_FCC][2][6] = 54,
[0][1][2][0][RTW89_ETSI][1][6] = 54,
[0][1][2][0][RTW89_ETSI][0][6] = 18,
[0][1][2][0][RTW89_MKK][1][6] = 54,
[0][1][2][0][RTW89_MKK][0][6] = 16,
[0][1][2][0][RTW89_IC][1][6] = -4,
- [0][1][2][0][RTW89_IC][2][6] = 54,
[0][1][2][0][RTW89_KCC][1][6] = 12,
[0][1][2][0][RTW89_KCC][0][6] = 12,
[0][1][2][0][RTW89_ACMA][1][6] = 54,
@@ -42145,13 +41265,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][6] = 44,
[0][1][2][0][RTW89_THAILAND][0][6] = -4,
[0][1][2][0][RTW89_FCC][1][8] = -4,
- [0][1][2][0][RTW89_FCC][2][8] = 54,
[0][1][2][0][RTW89_ETSI][1][8] = 54,
[0][1][2][0][RTW89_ETSI][0][8] = 18,
[0][1][2][0][RTW89_MKK][1][8] = 54,
[0][1][2][0][RTW89_MKK][0][8] = 16,
[0][1][2][0][RTW89_IC][1][8] = -4,
- [0][1][2][0][RTW89_IC][2][8] = 54,
[0][1][2][0][RTW89_KCC][1][8] = 12,
[0][1][2][0][RTW89_KCC][0][8] = 12,
[0][1][2][0][RTW89_ACMA][1][8] = 54,
@@ -42164,13 +41282,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][8] = 44,
[0][1][2][0][RTW89_THAILAND][0][8] = -4,
[0][1][2][0][RTW89_FCC][1][10] = -4,
- [0][1][2][0][RTW89_FCC][2][10] = 54,
[0][1][2][0][RTW89_ETSI][1][10] = 54,
[0][1][2][0][RTW89_ETSI][0][10] = 18,
[0][1][2][0][RTW89_MKK][1][10] = 54,
[0][1][2][0][RTW89_MKK][0][10] = 16,
[0][1][2][0][RTW89_IC][1][10] = -4,
- [0][1][2][0][RTW89_IC][2][10] = 54,
[0][1][2][0][RTW89_KCC][1][10] = 12,
[0][1][2][0][RTW89_KCC][0][10] = 12,
[0][1][2][0][RTW89_ACMA][1][10] = 54,
@@ -42183,13 +41299,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][10] = 44,
[0][1][2][0][RTW89_THAILAND][0][10] = -4,
[0][1][2][0][RTW89_FCC][1][12] = -4,
- [0][1][2][0][RTW89_FCC][2][12] = 54,
[0][1][2][0][RTW89_ETSI][1][12] = 54,
[0][1][2][0][RTW89_ETSI][0][12] = 18,
[0][1][2][0][RTW89_MKK][1][12] = 54,
[0][1][2][0][RTW89_MKK][0][12] = 16,
[0][1][2][0][RTW89_IC][1][12] = -4,
- [0][1][2][0][RTW89_IC][2][12] = 54,
[0][1][2][0][RTW89_KCC][1][12] = 12,
[0][1][2][0][RTW89_KCC][0][12] = 12,
[0][1][2][0][RTW89_ACMA][1][12] = 54,
@@ -42202,13 +41316,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][12] = 44,
[0][1][2][0][RTW89_THAILAND][0][12] = -4,
[0][1][2][0][RTW89_FCC][1][14] = -4,
- [0][1][2][0][RTW89_FCC][2][14] = 54,
[0][1][2][0][RTW89_ETSI][1][14] = 54,
[0][1][2][0][RTW89_ETSI][0][14] = 18,
[0][1][2][0][RTW89_MKK][1][14] = 54,
[0][1][2][0][RTW89_MKK][0][14] = 16,
[0][1][2][0][RTW89_IC][1][14] = -4,
- [0][1][2][0][RTW89_IC][2][14] = 54,
[0][1][2][0][RTW89_KCC][1][14] = 12,
[0][1][2][0][RTW89_KCC][0][14] = 12,
[0][1][2][0][RTW89_ACMA][1][14] = 54,
@@ -42221,13 +41333,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][14] = 44,
[0][1][2][0][RTW89_THAILAND][0][14] = -4,
[0][1][2][0][RTW89_FCC][1][15] = -4,
- [0][1][2][0][RTW89_FCC][2][15] = 54,
[0][1][2][0][RTW89_ETSI][1][15] = 54,
[0][1][2][0][RTW89_ETSI][0][15] = 18,
[0][1][2][0][RTW89_MKK][1][15] = 54,
[0][1][2][0][RTW89_MKK][0][15] = 16,
[0][1][2][0][RTW89_IC][1][15] = -4,
- [0][1][2][0][RTW89_IC][2][15] = 54,
[0][1][2][0][RTW89_KCC][1][15] = 12,
[0][1][2][0][RTW89_KCC][0][15] = 12,
[0][1][2][0][RTW89_ACMA][1][15] = 54,
@@ -42240,13 +41350,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][15] = 44,
[0][1][2][0][RTW89_THAILAND][0][15] = -4,
[0][1][2][0][RTW89_FCC][1][17] = -4,
- [0][1][2][0][RTW89_FCC][2][17] = 54,
[0][1][2][0][RTW89_ETSI][1][17] = 54,
[0][1][2][0][RTW89_ETSI][0][17] = 18,
[0][1][2][0][RTW89_MKK][1][17] = 54,
[0][1][2][0][RTW89_MKK][0][17] = 16,
[0][1][2][0][RTW89_IC][1][17] = -4,
- [0][1][2][0][RTW89_IC][2][17] = 54,
[0][1][2][0][RTW89_KCC][1][17] = 12,
[0][1][2][0][RTW89_KCC][0][17] = 12,
[0][1][2][0][RTW89_ACMA][1][17] = 54,
@@ -42259,13 +41367,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][17] = 44,
[0][1][2][0][RTW89_THAILAND][0][17] = -4,
[0][1][2][0][RTW89_FCC][1][19] = -4,
- [0][1][2][0][RTW89_FCC][2][19] = 54,
[0][1][2][0][RTW89_ETSI][1][19] = 54,
[0][1][2][0][RTW89_ETSI][0][19] = 18,
[0][1][2][0][RTW89_MKK][1][19] = 54,
[0][1][2][0][RTW89_MKK][0][19] = 16,
[0][1][2][0][RTW89_IC][1][19] = -4,
- [0][1][2][0][RTW89_IC][2][19] = 54,
[0][1][2][0][RTW89_KCC][1][19] = 12,
[0][1][2][0][RTW89_KCC][0][19] = 12,
[0][1][2][0][RTW89_ACMA][1][19] = 54,
@@ -42278,13 +41384,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][19] = 44,
[0][1][2][0][RTW89_THAILAND][0][19] = -4,
[0][1][2][0][RTW89_FCC][1][21] = -4,
- [0][1][2][0][RTW89_FCC][2][21] = 54,
[0][1][2][0][RTW89_ETSI][1][21] = 54,
[0][1][2][0][RTW89_ETSI][0][21] = 18,
[0][1][2][0][RTW89_MKK][1][21] = 54,
[0][1][2][0][RTW89_MKK][0][21] = 16,
[0][1][2][0][RTW89_IC][1][21] = -4,
- [0][1][2][0][RTW89_IC][2][21] = 54,
[0][1][2][0][RTW89_KCC][1][21] = 12,
[0][1][2][0][RTW89_KCC][0][21] = 12,
[0][1][2][0][RTW89_ACMA][1][21] = 54,
@@ -42297,13 +41401,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][21] = 44,
[0][1][2][0][RTW89_THAILAND][0][21] = -4,
[0][1][2][0][RTW89_FCC][1][23] = -4,
- [0][1][2][0][RTW89_FCC][2][23] = 68,
[0][1][2][0][RTW89_ETSI][1][23] = 54,
[0][1][2][0][RTW89_ETSI][0][23] = 18,
[0][1][2][0][RTW89_MKK][1][23] = 54,
[0][1][2][0][RTW89_MKK][0][23] = 16,
[0][1][2][0][RTW89_IC][1][23] = -4,
- [0][1][2][0][RTW89_IC][2][23] = 68,
[0][1][2][0][RTW89_KCC][1][23] = 12,
[0][1][2][0][RTW89_KCC][0][23] = 10,
[0][1][2][0][RTW89_ACMA][1][23] = 54,
@@ -42316,13 +41418,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][23] = 44,
[0][1][2][0][RTW89_THAILAND][0][23] = -4,
[0][1][2][0][RTW89_FCC][1][25] = -4,
- [0][1][2][0][RTW89_FCC][2][25] = 68,
[0][1][2][0][RTW89_ETSI][1][25] = 54,
[0][1][2][0][RTW89_ETSI][0][25] = 18,
[0][1][2][0][RTW89_MKK][1][25] = 54,
[0][1][2][0][RTW89_MKK][0][25] = 16,
[0][1][2][0][RTW89_IC][1][25] = -4,
- [0][1][2][0][RTW89_IC][2][25] = 68,
[0][1][2][0][RTW89_KCC][1][25] = 12,
[0][1][2][0][RTW89_KCC][0][25] = 14,
[0][1][2][0][RTW89_ACMA][1][25] = 54,
@@ -42335,13 +41435,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][25] = 42,
[0][1][2][0][RTW89_THAILAND][0][25] = -4,
[0][1][2][0][RTW89_FCC][1][27] = -4,
- [0][1][2][0][RTW89_FCC][2][27] = 68,
[0][1][2][0][RTW89_ETSI][1][27] = 54,
[0][1][2][0][RTW89_ETSI][0][27] = 18,
[0][1][2][0][RTW89_MKK][1][27] = 54,
[0][1][2][0][RTW89_MKK][0][27] = 16,
[0][1][2][0][RTW89_IC][1][27] = -4,
- [0][1][2][0][RTW89_IC][2][27] = 68,
[0][1][2][0][RTW89_KCC][1][27] = 12,
[0][1][2][0][RTW89_KCC][0][27] = 14,
[0][1][2][0][RTW89_ACMA][1][27] = 54,
@@ -42354,13 +41452,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][27] = 42,
[0][1][2][0][RTW89_THAILAND][0][27] = -4,
[0][1][2][0][RTW89_FCC][1][29] = -4,
- [0][1][2][0][RTW89_FCC][2][29] = 68,
[0][1][2][0][RTW89_ETSI][1][29] = 54,
[0][1][2][0][RTW89_ETSI][0][29] = 18,
[0][1][2][0][RTW89_MKK][1][29] = 54,
[0][1][2][0][RTW89_MKK][0][29] = 16,
[0][1][2][0][RTW89_IC][1][29] = -4,
- [0][1][2][0][RTW89_IC][2][29] = 68,
[0][1][2][0][RTW89_KCC][1][29] = 12,
[0][1][2][0][RTW89_KCC][0][29] = 14,
[0][1][2][0][RTW89_ACMA][1][29] = 54,
@@ -42373,13 +41469,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][29] = 42,
[0][1][2][0][RTW89_THAILAND][0][29] = -4,
[0][1][2][0][RTW89_FCC][1][30] = -4,
- [0][1][2][0][RTW89_FCC][2][30] = 68,
[0][1][2][0][RTW89_ETSI][1][30] = 54,
[0][1][2][0][RTW89_ETSI][0][30] = 18,
[0][1][2][0][RTW89_MKK][1][30] = 54,
[0][1][2][0][RTW89_MKK][0][30] = 16,
[0][1][2][0][RTW89_IC][1][30] = -4,
- [0][1][2][0][RTW89_IC][2][30] = 68,
[0][1][2][0][RTW89_KCC][1][30] = 12,
[0][1][2][0][RTW89_KCC][0][30] = 14,
[0][1][2][0][RTW89_ACMA][1][30] = 54,
@@ -42392,13 +41486,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][30] = 42,
[0][1][2][0][RTW89_THAILAND][0][30] = -4,
[0][1][2][0][RTW89_FCC][1][32] = -4,
- [0][1][2][0][RTW89_FCC][2][32] = 68,
[0][1][2][0][RTW89_ETSI][1][32] = 54,
[0][1][2][0][RTW89_ETSI][0][32] = 18,
[0][1][2][0][RTW89_MKK][1][32] = 54,
[0][1][2][0][RTW89_MKK][0][32] = 16,
[0][1][2][0][RTW89_IC][1][32] = -4,
- [0][1][2][0][RTW89_IC][2][32] = 68,
[0][1][2][0][RTW89_KCC][1][32] = 12,
[0][1][2][0][RTW89_KCC][0][32] = 14,
[0][1][2][0][RTW89_ACMA][1][32] = 54,
@@ -42411,13 +41503,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][32] = 42,
[0][1][2][0][RTW89_THAILAND][0][32] = -4,
[0][1][2][0][RTW89_FCC][1][34] = -4,
- [0][1][2][0][RTW89_FCC][2][34] = 68,
[0][1][2][0][RTW89_ETSI][1][34] = 54,
[0][1][2][0][RTW89_ETSI][0][34] = 18,
[0][1][2][0][RTW89_MKK][1][34] = 54,
[0][1][2][0][RTW89_MKK][0][34] = 16,
[0][1][2][0][RTW89_IC][1][34] = -4,
- [0][1][2][0][RTW89_IC][2][34] = 68,
[0][1][2][0][RTW89_KCC][1][34] = 12,
[0][1][2][0][RTW89_KCC][0][34] = 14,
[0][1][2][0][RTW89_ACMA][1][34] = 54,
@@ -42430,13 +41520,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][34] = 42,
[0][1][2][0][RTW89_THAILAND][0][34] = -4,
[0][1][2][0][RTW89_FCC][1][36] = -4,
- [0][1][2][0][RTW89_FCC][2][36] = 68,
[0][1][2][0][RTW89_ETSI][1][36] = 54,
[0][1][2][0][RTW89_ETSI][0][36] = 18,
[0][1][2][0][RTW89_MKK][1][36] = 54,
[0][1][2][0][RTW89_MKK][0][36] = 16,
[0][1][2][0][RTW89_IC][1][36] = -4,
- [0][1][2][0][RTW89_IC][2][36] = 68,
[0][1][2][0][RTW89_KCC][1][36] = 12,
[0][1][2][0][RTW89_KCC][0][36] = 14,
[0][1][2][0][RTW89_ACMA][1][36] = 54,
@@ -42449,13 +41537,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][36] = 42,
[0][1][2][0][RTW89_THAILAND][0][36] = -4,
[0][1][2][0][RTW89_FCC][1][38] = -4,
- [0][1][2][0][RTW89_FCC][2][38] = 68,
[0][1][2][0][RTW89_ETSI][1][38] = 54,
[0][1][2][0][RTW89_ETSI][0][38] = 18,
[0][1][2][0][RTW89_MKK][1][38] = 54,
[0][1][2][0][RTW89_MKK][0][38] = 16,
[0][1][2][0][RTW89_IC][1][38] = -4,
- [0][1][2][0][RTW89_IC][2][38] = 68,
[0][1][2][0][RTW89_KCC][1][38] = 12,
[0][1][2][0][RTW89_KCC][0][38] = 14,
[0][1][2][0][RTW89_ACMA][1][38] = 54,
@@ -42468,13 +41554,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][38] = 42,
[0][1][2][0][RTW89_THAILAND][0][38] = -4,
[0][1][2][0][RTW89_FCC][1][40] = -4,
- [0][1][2][0][RTW89_FCC][2][40] = 68,
[0][1][2][0][RTW89_ETSI][1][40] = 54,
[0][1][2][0][RTW89_ETSI][0][40] = 18,
[0][1][2][0][RTW89_MKK][1][40] = 54,
[0][1][2][0][RTW89_MKK][0][40] = 16,
[0][1][2][0][RTW89_IC][1][40] = -4,
- [0][1][2][0][RTW89_IC][2][40] = 68,
[0][1][2][0][RTW89_KCC][1][40] = 12,
[0][1][2][0][RTW89_KCC][0][40] = 14,
[0][1][2][0][RTW89_ACMA][1][40] = 54,
@@ -42487,13 +41571,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][40] = 42,
[0][1][2][0][RTW89_THAILAND][0][40] = -4,
[0][1][2][0][RTW89_FCC][1][42] = -4,
- [0][1][2][0][RTW89_FCC][2][42] = 68,
[0][1][2][0][RTW89_ETSI][1][42] = 54,
[0][1][2][0][RTW89_ETSI][0][42] = 18,
[0][1][2][0][RTW89_MKK][1][42] = 54,
[0][1][2][0][RTW89_MKK][0][42] = 16,
[0][1][2][0][RTW89_IC][1][42] = -4,
- [0][1][2][0][RTW89_IC][2][42] = 68,
[0][1][2][0][RTW89_KCC][1][42] = 12,
[0][1][2][0][RTW89_KCC][0][42] = 14,
[0][1][2][0][RTW89_ACMA][1][42] = 54,
@@ -42506,13 +41588,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][42] = 42,
[0][1][2][0][RTW89_THAILAND][0][42] = -4,
[0][1][2][0][RTW89_FCC][1][44] = -2,
- [0][1][2][0][RTW89_FCC][2][44] = 68,
[0][1][2][0][RTW89_ETSI][1][44] = 54,
[0][1][2][0][RTW89_ETSI][0][44] = 18,
[0][1][2][0][RTW89_MKK][1][44] = 34,
[0][1][2][0][RTW89_MKK][0][44] = 16,
[0][1][2][0][RTW89_IC][1][44] = -2,
- [0][1][2][0][RTW89_IC][2][44] = 68,
[0][1][2][0][RTW89_KCC][1][44] = 12,
[0][1][2][0][RTW89_KCC][0][44] = 12,
[0][1][2][0][RTW89_ACMA][1][44] = 54,
@@ -42525,13 +41605,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][44] = 42,
[0][1][2][0][RTW89_THAILAND][0][44] = -2,
[0][1][2][0][RTW89_FCC][1][45] = -2,
- [0][1][2][0][RTW89_FCC][2][45] = 127,
[0][1][2][0][RTW89_ETSI][1][45] = 127,
[0][1][2][0][RTW89_ETSI][0][45] = 127,
[0][1][2][0][RTW89_MKK][1][45] = 127,
[0][1][2][0][RTW89_MKK][0][45] = 127,
[0][1][2][0][RTW89_IC][1][45] = -2,
- [0][1][2][0][RTW89_IC][2][45] = 70,
[0][1][2][0][RTW89_KCC][1][45] = 12,
[0][1][2][0][RTW89_KCC][0][45] = 127,
[0][1][2][0][RTW89_ACMA][1][45] = 127,
@@ -42544,13 +41622,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][45] = 127,
[0][1][2][0][RTW89_THAILAND][0][45] = 127,
[0][1][2][0][RTW89_FCC][1][47] = -2,
- [0][1][2][0][RTW89_FCC][2][47] = 127,
[0][1][2][0][RTW89_ETSI][1][47] = 127,
[0][1][2][0][RTW89_ETSI][0][47] = 127,
[0][1][2][0][RTW89_MKK][1][47] = 127,
[0][1][2][0][RTW89_MKK][0][47] = 127,
[0][1][2][0][RTW89_IC][1][47] = -2,
- [0][1][2][0][RTW89_IC][2][47] = 68,
[0][1][2][0][RTW89_KCC][1][47] = 12,
[0][1][2][0][RTW89_KCC][0][47] = 127,
[0][1][2][0][RTW89_ACMA][1][47] = 127,
@@ -42563,13 +41639,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][47] = 127,
[0][1][2][0][RTW89_THAILAND][0][47] = 127,
[0][1][2][0][RTW89_FCC][1][49] = -2,
- [0][1][2][0][RTW89_FCC][2][49] = 127,
[0][1][2][0][RTW89_ETSI][1][49] = 127,
[0][1][2][0][RTW89_ETSI][0][49] = 127,
[0][1][2][0][RTW89_MKK][1][49] = 127,
[0][1][2][0][RTW89_MKK][0][49] = 127,
[0][1][2][0][RTW89_IC][1][49] = -2,
- [0][1][2][0][RTW89_IC][2][49] = 68,
[0][1][2][0][RTW89_KCC][1][49] = 12,
[0][1][2][0][RTW89_KCC][0][49] = 127,
[0][1][2][0][RTW89_ACMA][1][49] = 127,
@@ -42582,13 +41656,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][49] = 127,
[0][1][2][0][RTW89_THAILAND][0][49] = 127,
[0][1][2][0][RTW89_FCC][1][51] = -2,
- [0][1][2][0][RTW89_FCC][2][51] = 127,
[0][1][2][0][RTW89_ETSI][1][51] = 127,
[0][1][2][0][RTW89_ETSI][0][51] = 127,
[0][1][2][0][RTW89_MKK][1][51] = 127,
[0][1][2][0][RTW89_MKK][0][51] = 127,
[0][1][2][0][RTW89_IC][1][51] = -2,
- [0][1][2][0][RTW89_IC][2][51] = 68,
[0][1][2][0][RTW89_KCC][1][51] = 12,
[0][1][2][0][RTW89_KCC][0][51] = 127,
[0][1][2][0][RTW89_ACMA][1][51] = 127,
@@ -42601,13 +41673,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][51] = 127,
[0][1][2][0][RTW89_THAILAND][0][51] = 127,
[0][1][2][0][RTW89_FCC][1][53] = -2,
- [0][1][2][0][RTW89_FCC][2][53] = 127,
[0][1][2][0][RTW89_ETSI][1][53] = 127,
[0][1][2][0][RTW89_ETSI][0][53] = 127,
[0][1][2][0][RTW89_MKK][1][53] = 127,
[0][1][2][0][RTW89_MKK][0][53] = 127,
[0][1][2][0][RTW89_IC][1][53] = -2,
- [0][1][2][0][RTW89_IC][2][53] = 68,
[0][1][2][0][RTW89_KCC][1][53] = 12,
[0][1][2][0][RTW89_KCC][0][53] = 127,
[0][1][2][0][RTW89_ACMA][1][53] = 127,
@@ -42620,13 +41690,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][53] = 127,
[0][1][2][0][RTW89_THAILAND][0][53] = 127,
[0][1][2][0][RTW89_FCC][1][55] = -2,
- [0][1][2][0][RTW89_FCC][2][55] = 68,
[0][1][2][0][RTW89_ETSI][1][55] = 127,
[0][1][2][0][RTW89_ETSI][0][55] = 127,
[0][1][2][0][RTW89_MKK][1][55] = 127,
[0][1][2][0][RTW89_MKK][0][55] = 127,
[0][1][2][0][RTW89_IC][1][55] = -2,
- [0][1][2][0][RTW89_IC][2][55] = 68,
[0][1][2][0][RTW89_KCC][1][55] = 12,
[0][1][2][0][RTW89_KCC][0][55] = 127,
[0][1][2][0][RTW89_ACMA][1][55] = 127,
@@ -42639,13 +41707,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][55] = 127,
[0][1][2][0][RTW89_THAILAND][0][55] = 127,
[0][1][2][0][RTW89_FCC][1][57] = -2,
- [0][1][2][0][RTW89_FCC][2][57] = 68,
[0][1][2][0][RTW89_ETSI][1][57] = 127,
[0][1][2][0][RTW89_ETSI][0][57] = 127,
[0][1][2][0][RTW89_MKK][1][57] = 127,
[0][1][2][0][RTW89_MKK][0][57] = 127,
[0][1][2][0][RTW89_IC][1][57] = -2,
- [0][1][2][0][RTW89_IC][2][57] = 68,
[0][1][2][0][RTW89_KCC][1][57] = 12,
[0][1][2][0][RTW89_KCC][0][57] = 127,
[0][1][2][0][RTW89_ACMA][1][57] = 127,
@@ -42658,13 +41724,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][57] = 127,
[0][1][2][0][RTW89_THAILAND][0][57] = 127,
[0][1][2][0][RTW89_FCC][1][59] = -2,
- [0][1][2][0][RTW89_FCC][2][59] = 68,
[0][1][2][0][RTW89_ETSI][1][59] = 127,
[0][1][2][0][RTW89_ETSI][0][59] = 127,
[0][1][2][0][RTW89_MKK][1][59] = 127,
[0][1][2][0][RTW89_MKK][0][59] = 127,
[0][1][2][0][RTW89_IC][1][59] = -2,
- [0][1][2][0][RTW89_IC][2][59] = 68,
[0][1][2][0][RTW89_KCC][1][59] = 12,
[0][1][2][0][RTW89_KCC][0][59] = 127,
[0][1][2][0][RTW89_ACMA][1][59] = 127,
@@ -42677,13 +41741,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][59] = 127,
[0][1][2][0][RTW89_THAILAND][0][59] = 127,
[0][1][2][0][RTW89_FCC][1][60] = -2,
- [0][1][2][0][RTW89_FCC][2][60] = 68,
[0][1][2][0][RTW89_ETSI][1][60] = 127,
[0][1][2][0][RTW89_ETSI][0][60] = 127,
[0][1][2][0][RTW89_MKK][1][60] = 127,
[0][1][2][0][RTW89_MKK][0][60] = 127,
[0][1][2][0][RTW89_IC][1][60] = -2,
- [0][1][2][0][RTW89_IC][2][60] = 68,
[0][1][2][0][RTW89_KCC][1][60] = 12,
[0][1][2][0][RTW89_KCC][0][60] = 127,
[0][1][2][0][RTW89_ACMA][1][60] = 127,
@@ -42696,13 +41758,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][60] = 127,
[0][1][2][0][RTW89_THAILAND][0][60] = 127,
[0][1][2][0][RTW89_FCC][1][62] = -2,
- [0][1][2][0][RTW89_FCC][2][62] = 68,
[0][1][2][0][RTW89_ETSI][1][62] = 127,
[0][1][2][0][RTW89_ETSI][0][62] = 127,
[0][1][2][0][RTW89_MKK][1][62] = 127,
[0][1][2][0][RTW89_MKK][0][62] = 127,
[0][1][2][0][RTW89_IC][1][62] = -2,
- [0][1][2][0][RTW89_IC][2][62] = 68,
[0][1][2][0][RTW89_KCC][1][62] = 12,
[0][1][2][0][RTW89_KCC][0][62] = 127,
[0][1][2][0][RTW89_ACMA][1][62] = 127,
@@ -42715,13 +41775,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][62] = 127,
[0][1][2][0][RTW89_THAILAND][0][62] = 127,
[0][1][2][0][RTW89_FCC][1][64] = -2,
- [0][1][2][0][RTW89_FCC][2][64] = 68,
[0][1][2][0][RTW89_ETSI][1][64] = 127,
[0][1][2][0][RTW89_ETSI][0][64] = 127,
[0][1][2][0][RTW89_MKK][1][64] = 127,
[0][1][2][0][RTW89_MKK][0][64] = 127,
[0][1][2][0][RTW89_IC][1][64] = -2,
- [0][1][2][0][RTW89_IC][2][64] = 68,
[0][1][2][0][RTW89_KCC][1][64] = 12,
[0][1][2][0][RTW89_KCC][0][64] = 127,
[0][1][2][0][RTW89_ACMA][1][64] = 127,
@@ -42734,13 +41792,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][64] = 127,
[0][1][2][0][RTW89_THAILAND][0][64] = 127,
[0][1][2][0][RTW89_FCC][1][66] = -2,
- [0][1][2][0][RTW89_FCC][2][66] = 68,
[0][1][2][0][RTW89_ETSI][1][66] = 127,
[0][1][2][0][RTW89_ETSI][0][66] = 127,
[0][1][2][0][RTW89_MKK][1][66] = 127,
[0][1][2][0][RTW89_MKK][0][66] = 127,
[0][1][2][0][RTW89_IC][1][66] = -2,
- [0][1][2][0][RTW89_IC][2][66] = 68,
[0][1][2][0][RTW89_KCC][1][66] = 12,
[0][1][2][0][RTW89_KCC][0][66] = 127,
[0][1][2][0][RTW89_ACMA][1][66] = 127,
@@ -42753,13 +41809,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][66] = 127,
[0][1][2][0][RTW89_THAILAND][0][66] = 127,
[0][1][2][0][RTW89_FCC][1][68] = -2,
- [0][1][2][0][RTW89_FCC][2][68] = 68,
[0][1][2][0][RTW89_ETSI][1][68] = 127,
[0][1][2][0][RTW89_ETSI][0][68] = 127,
[0][1][2][0][RTW89_MKK][1][68] = 127,
[0][1][2][0][RTW89_MKK][0][68] = 127,
[0][1][2][0][RTW89_IC][1][68] = -2,
- [0][1][2][0][RTW89_IC][2][68] = 68,
[0][1][2][0][RTW89_KCC][1][68] = 12,
[0][1][2][0][RTW89_KCC][0][68] = 127,
[0][1][2][0][RTW89_ACMA][1][68] = 127,
@@ -42772,13 +41826,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][68] = 127,
[0][1][2][0][RTW89_THAILAND][0][68] = 127,
[0][1][2][0][RTW89_FCC][1][70] = -2,
- [0][1][2][0][RTW89_FCC][2][70] = 68,
[0][1][2][0][RTW89_ETSI][1][70] = 127,
[0][1][2][0][RTW89_ETSI][0][70] = 127,
[0][1][2][0][RTW89_MKK][1][70] = 127,
[0][1][2][0][RTW89_MKK][0][70] = 127,
[0][1][2][0][RTW89_IC][1][70] = -2,
- [0][1][2][0][RTW89_IC][2][70] = 68,
[0][1][2][0][RTW89_KCC][1][70] = 12,
[0][1][2][0][RTW89_KCC][0][70] = 127,
[0][1][2][0][RTW89_ACMA][1][70] = 127,
@@ -42791,13 +41843,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][70] = 127,
[0][1][2][0][RTW89_THAILAND][0][70] = 127,
[0][1][2][0][RTW89_FCC][1][72] = -2,
- [0][1][2][0][RTW89_FCC][2][72] = 68,
[0][1][2][0][RTW89_ETSI][1][72] = 127,
[0][1][2][0][RTW89_ETSI][0][72] = 127,
[0][1][2][0][RTW89_MKK][1][72] = 127,
[0][1][2][0][RTW89_MKK][0][72] = 127,
[0][1][2][0][RTW89_IC][1][72] = -2,
- [0][1][2][0][RTW89_IC][2][72] = 68,
[0][1][2][0][RTW89_KCC][1][72] = 12,
[0][1][2][0][RTW89_KCC][0][72] = 127,
[0][1][2][0][RTW89_ACMA][1][72] = 127,
@@ -42810,13 +41860,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][72] = 127,
[0][1][2][0][RTW89_THAILAND][0][72] = 127,
[0][1][2][0][RTW89_FCC][1][74] = -2,
- [0][1][2][0][RTW89_FCC][2][74] = 68,
[0][1][2][0][RTW89_ETSI][1][74] = 127,
[0][1][2][0][RTW89_ETSI][0][74] = 127,
[0][1][2][0][RTW89_MKK][1][74] = 127,
[0][1][2][0][RTW89_MKK][0][74] = 127,
[0][1][2][0][RTW89_IC][1][74] = -2,
- [0][1][2][0][RTW89_IC][2][74] = 68,
[0][1][2][0][RTW89_KCC][1][74] = 12,
[0][1][2][0][RTW89_KCC][0][74] = 127,
[0][1][2][0][RTW89_ACMA][1][74] = 127,
@@ -42829,13 +41877,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][74] = 127,
[0][1][2][0][RTW89_THAILAND][0][74] = 127,
[0][1][2][0][RTW89_FCC][1][75] = -2,
- [0][1][2][0][RTW89_FCC][2][75] = 68,
[0][1][2][0][RTW89_ETSI][1][75] = 127,
[0][1][2][0][RTW89_ETSI][0][75] = 127,
[0][1][2][0][RTW89_MKK][1][75] = 127,
[0][1][2][0][RTW89_MKK][0][75] = 127,
[0][1][2][0][RTW89_IC][1][75] = -2,
- [0][1][2][0][RTW89_IC][2][75] = 68,
[0][1][2][0][RTW89_KCC][1][75] = 12,
[0][1][2][0][RTW89_KCC][0][75] = 127,
[0][1][2][0][RTW89_ACMA][1][75] = 127,
@@ -42848,13 +41894,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][75] = 127,
[0][1][2][0][RTW89_THAILAND][0][75] = 127,
[0][1][2][0][RTW89_FCC][1][77] = -2,
- [0][1][2][0][RTW89_FCC][2][77] = 68,
[0][1][2][0][RTW89_ETSI][1][77] = 127,
[0][1][2][0][RTW89_ETSI][0][77] = 127,
[0][1][2][0][RTW89_MKK][1][77] = 127,
[0][1][2][0][RTW89_MKK][0][77] = 127,
[0][1][2][0][RTW89_IC][1][77] = -2,
- [0][1][2][0][RTW89_IC][2][77] = 68,
[0][1][2][0][RTW89_KCC][1][77] = 12,
[0][1][2][0][RTW89_KCC][0][77] = 127,
[0][1][2][0][RTW89_ACMA][1][77] = 127,
@@ -42867,13 +41911,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][77] = 127,
[0][1][2][0][RTW89_THAILAND][0][77] = 127,
[0][1][2][0][RTW89_FCC][1][79] = -2,
- [0][1][2][0][RTW89_FCC][2][79] = 68,
[0][1][2][0][RTW89_ETSI][1][79] = 127,
[0][1][2][0][RTW89_ETSI][0][79] = 127,
[0][1][2][0][RTW89_MKK][1][79] = 127,
[0][1][2][0][RTW89_MKK][0][79] = 127,
[0][1][2][0][RTW89_IC][1][79] = -2,
- [0][1][2][0][RTW89_IC][2][79] = 68,
[0][1][2][0][RTW89_KCC][1][79] = 12,
[0][1][2][0][RTW89_KCC][0][79] = 127,
[0][1][2][0][RTW89_ACMA][1][79] = 127,
@@ -42886,13 +41928,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][79] = 127,
[0][1][2][0][RTW89_THAILAND][0][79] = 127,
[0][1][2][0][RTW89_FCC][1][81] = -2,
- [0][1][2][0][RTW89_FCC][2][81] = 68,
[0][1][2][0][RTW89_ETSI][1][81] = 127,
[0][1][2][0][RTW89_ETSI][0][81] = 127,
[0][1][2][0][RTW89_MKK][1][81] = 127,
[0][1][2][0][RTW89_MKK][0][81] = 127,
[0][1][2][0][RTW89_IC][1][81] = -2,
- [0][1][2][0][RTW89_IC][2][81] = 68,
[0][1][2][0][RTW89_KCC][1][81] = 12,
[0][1][2][0][RTW89_KCC][0][81] = 127,
[0][1][2][0][RTW89_ACMA][1][81] = 127,
@@ -42905,13 +41945,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][81] = 127,
[0][1][2][0][RTW89_THAILAND][0][81] = 127,
[0][1][2][0][RTW89_FCC][1][83] = -2,
- [0][1][2][0][RTW89_FCC][2][83] = 68,
[0][1][2][0][RTW89_ETSI][1][83] = 127,
[0][1][2][0][RTW89_ETSI][0][83] = 127,
[0][1][2][0][RTW89_MKK][1][83] = 127,
[0][1][2][0][RTW89_MKK][0][83] = 127,
[0][1][2][0][RTW89_IC][1][83] = -2,
- [0][1][2][0][RTW89_IC][2][83] = 68,
[0][1][2][0][RTW89_KCC][1][83] = 20,
[0][1][2][0][RTW89_KCC][0][83] = 127,
[0][1][2][0][RTW89_ACMA][1][83] = 127,
@@ -42924,13 +41962,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][83] = 127,
[0][1][2][0][RTW89_THAILAND][0][83] = 127,
[0][1][2][0][RTW89_FCC][1][85] = -2,
- [0][1][2][0][RTW89_FCC][2][85] = 68,
[0][1][2][0][RTW89_ETSI][1][85] = 127,
[0][1][2][0][RTW89_ETSI][0][85] = 127,
[0][1][2][0][RTW89_MKK][1][85] = 127,
[0][1][2][0][RTW89_MKK][0][85] = 127,
[0][1][2][0][RTW89_IC][1][85] = -2,
- [0][1][2][0][RTW89_IC][2][85] = 68,
[0][1][2][0][RTW89_KCC][1][85] = 20,
[0][1][2][0][RTW89_KCC][0][85] = 127,
[0][1][2][0][RTW89_ACMA][1][85] = 127,
@@ -42943,13 +41979,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][85] = 127,
[0][1][2][0][RTW89_THAILAND][0][85] = 127,
[0][1][2][0][RTW89_FCC][1][87] = -2,
- [0][1][2][0][RTW89_FCC][2][87] = 127,
[0][1][2][0][RTW89_ETSI][1][87] = 127,
[0][1][2][0][RTW89_ETSI][0][87] = 127,
[0][1][2][0][RTW89_MKK][1][87] = 127,
[0][1][2][0][RTW89_MKK][0][87] = 127,
[0][1][2][0][RTW89_IC][1][87] = -2,
- [0][1][2][0][RTW89_IC][2][87] = 127,
[0][1][2][0][RTW89_KCC][1][87] = 20,
[0][1][2][0][RTW89_KCC][0][87] = 127,
[0][1][2][0][RTW89_ACMA][1][87] = 127,
@@ -42962,13 +41996,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][87] = 127,
[0][1][2][0][RTW89_THAILAND][0][87] = 127,
[0][1][2][0][RTW89_FCC][1][89] = -2,
- [0][1][2][0][RTW89_FCC][2][89] = 127,
[0][1][2][0][RTW89_ETSI][1][89] = 127,
[0][1][2][0][RTW89_ETSI][0][89] = 127,
[0][1][2][0][RTW89_MKK][1][89] = 127,
[0][1][2][0][RTW89_MKK][0][89] = 127,
[0][1][2][0][RTW89_IC][1][89] = -2,
- [0][1][2][0][RTW89_IC][2][89] = 127,
[0][1][2][0][RTW89_KCC][1][89] = 20,
[0][1][2][0][RTW89_KCC][0][89] = 127,
[0][1][2][0][RTW89_ACMA][1][89] = 127,
@@ -42981,13 +42013,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][89] = 127,
[0][1][2][0][RTW89_THAILAND][0][89] = 127,
[0][1][2][0][RTW89_FCC][1][90] = -2,
- [0][1][2][0][RTW89_FCC][2][90] = 127,
[0][1][2][0][RTW89_ETSI][1][90] = 127,
[0][1][2][0][RTW89_ETSI][0][90] = 127,
[0][1][2][0][RTW89_MKK][1][90] = 127,
[0][1][2][0][RTW89_MKK][0][90] = 127,
[0][1][2][0][RTW89_IC][1][90] = -2,
- [0][1][2][0][RTW89_IC][2][90] = 127,
[0][1][2][0][RTW89_KCC][1][90] = 20,
[0][1][2][0][RTW89_KCC][0][90] = 127,
[0][1][2][0][RTW89_ACMA][1][90] = 127,
@@ -43000,13 +42030,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][90] = 127,
[0][1][2][0][RTW89_THAILAND][0][90] = 127,
[0][1][2][0][RTW89_FCC][1][92] = -2,
- [0][1][2][0][RTW89_FCC][2][92] = 127,
[0][1][2][0][RTW89_ETSI][1][92] = 127,
[0][1][2][0][RTW89_ETSI][0][92] = 127,
[0][1][2][0][RTW89_MKK][1][92] = 127,
[0][1][2][0][RTW89_MKK][0][92] = 127,
[0][1][2][0][RTW89_IC][1][92] = -2,
- [0][1][2][0][RTW89_IC][2][92] = 127,
[0][1][2][0][RTW89_KCC][1][92] = 20,
[0][1][2][0][RTW89_KCC][0][92] = 127,
[0][1][2][0][RTW89_ACMA][1][92] = 127,
@@ -43019,13 +42047,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][92] = 127,
[0][1][2][0][RTW89_THAILAND][0][92] = 127,
[0][1][2][0][RTW89_FCC][1][94] = -2,
- [0][1][2][0][RTW89_FCC][2][94] = 127,
[0][1][2][0][RTW89_ETSI][1][94] = 127,
[0][1][2][0][RTW89_ETSI][0][94] = 127,
[0][1][2][0][RTW89_MKK][1][94] = 127,
[0][1][2][0][RTW89_MKK][0][94] = 127,
[0][1][2][0][RTW89_IC][1][94] = -2,
- [0][1][2][0][RTW89_IC][2][94] = 127,
[0][1][2][0][RTW89_KCC][1][94] = 20,
[0][1][2][0][RTW89_KCC][0][94] = 127,
[0][1][2][0][RTW89_ACMA][1][94] = 127,
@@ -43038,13 +42064,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][94] = 127,
[0][1][2][0][RTW89_THAILAND][0][94] = 127,
[0][1][2][0][RTW89_FCC][1][96] = -2,
- [0][1][2][0][RTW89_FCC][2][96] = 127,
[0][1][2][0][RTW89_ETSI][1][96] = 127,
[0][1][2][0][RTW89_ETSI][0][96] = 127,
[0][1][2][0][RTW89_MKK][1][96] = 127,
[0][1][2][0][RTW89_MKK][0][96] = 127,
[0][1][2][0][RTW89_IC][1][96] = -2,
- [0][1][2][0][RTW89_IC][2][96] = 127,
[0][1][2][0][RTW89_KCC][1][96] = 20,
[0][1][2][0][RTW89_KCC][0][96] = 127,
[0][1][2][0][RTW89_ACMA][1][96] = 127,
@@ -43057,13 +42081,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][96] = 127,
[0][1][2][0][RTW89_THAILAND][0][96] = 127,
[0][1][2][0][RTW89_FCC][1][98] = -2,
- [0][1][2][0][RTW89_FCC][2][98] = 127,
[0][1][2][0][RTW89_ETSI][1][98] = 127,
[0][1][2][0][RTW89_ETSI][0][98] = 127,
[0][1][2][0][RTW89_MKK][1][98] = 127,
[0][1][2][0][RTW89_MKK][0][98] = 127,
[0][1][2][0][RTW89_IC][1][98] = -2,
- [0][1][2][0][RTW89_IC][2][98] = 127,
[0][1][2][0][RTW89_KCC][1][98] = 20,
[0][1][2][0][RTW89_KCC][0][98] = 127,
[0][1][2][0][RTW89_ACMA][1][98] = 127,
@@ -43076,13 +42098,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][98] = 127,
[0][1][2][0][RTW89_THAILAND][0][98] = 127,
[0][1][2][0][RTW89_FCC][1][100] = -2,
- [0][1][2][0][RTW89_FCC][2][100] = 127,
[0][1][2][0][RTW89_ETSI][1][100] = 127,
[0][1][2][0][RTW89_ETSI][0][100] = 127,
[0][1][2][0][RTW89_MKK][1][100] = 127,
[0][1][2][0][RTW89_MKK][0][100] = 127,
[0][1][2][0][RTW89_IC][1][100] = -2,
- [0][1][2][0][RTW89_IC][2][100] = 127,
[0][1][2][0][RTW89_KCC][1][100] = 20,
[0][1][2][0][RTW89_KCC][0][100] = 127,
[0][1][2][0][RTW89_ACMA][1][100] = 127,
@@ -43095,13 +42115,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][100] = 127,
[0][1][2][0][RTW89_THAILAND][0][100] = 127,
[0][1][2][0][RTW89_FCC][1][102] = -2,
- [0][1][2][0][RTW89_FCC][2][102] = 127,
[0][1][2][0][RTW89_ETSI][1][102] = 127,
[0][1][2][0][RTW89_ETSI][0][102] = 127,
[0][1][2][0][RTW89_MKK][1][102] = 127,
[0][1][2][0][RTW89_MKK][0][102] = 127,
[0][1][2][0][RTW89_IC][1][102] = -2,
- [0][1][2][0][RTW89_IC][2][102] = 127,
[0][1][2][0][RTW89_KCC][1][102] = 20,
[0][1][2][0][RTW89_KCC][0][102] = 127,
[0][1][2][0][RTW89_ACMA][1][102] = 127,
@@ -43114,13 +42132,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][102] = 127,
[0][1][2][0][RTW89_THAILAND][0][102] = 127,
[0][1][2][0][RTW89_FCC][1][104] = -2,
- [0][1][2][0][RTW89_FCC][2][104] = 127,
[0][1][2][0][RTW89_ETSI][1][104] = 127,
[0][1][2][0][RTW89_ETSI][0][104] = 127,
[0][1][2][0][RTW89_MKK][1][104] = 127,
[0][1][2][0][RTW89_MKK][0][104] = 127,
[0][1][2][0][RTW89_IC][1][104] = -2,
- [0][1][2][0][RTW89_IC][2][104] = 127,
[0][1][2][0][RTW89_KCC][1][104] = 20,
[0][1][2][0][RTW89_KCC][0][104] = 127,
[0][1][2][0][RTW89_ACMA][1][104] = 127,
@@ -43133,13 +42149,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][104] = 127,
[0][1][2][0][RTW89_THAILAND][0][104] = 127,
[0][1][2][0][RTW89_FCC][1][105] = -2,
- [0][1][2][0][RTW89_FCC][2][105] = 127,
[0][1][2][0][RTW89_ETSI][1][105] = 127,
[0][1][2][0][RTW89_ETSI][0][105] = 127,
[0][1][2][0][RTW89_MKK][1][105] = 127,
[0][1][2][0][RTW89_MKK][0][105] = 127,
[0][1][2][0][RTW89_IC][1][105] = -2,
- [0][1][2][0][RTW89_IC][2][105] = 127,
[0][1][2][0][RTW89_KCC][1][105] = 20,
[0][1][2][0][RTW89_KCC][0][105] = 127,
[0][1][2][0][RTW89_ACMA][1][105] = 127,
@@ -43152,13 +42166,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][105] = 127,
[0][1][2][0][RTW89_THAILAND][0][105] = 127,
[0][1][2][0][RTW89_FCC][1][107] = 1,
- [0][1][2][0][RTW89_FCC][2][107] = 127,
[0][1][2][0][RTW89_ETSI][1][107] = 127,
[0][1][2][0][RTW89_ETSI][0][107] = 127,
[0][1][2][0][RTW89_MKK][1][107] = 127,
[0][1][2][0][RTW89_MKK][0][107] = 127,
[0][1][2][0][RTW89_IC][1][107] = 1,
- [0][1][2][0][RTW89_IC][2][107] = 127,
[0][1][2][0][RTW89_KCC][1][107] = 20,
[0][1][2][0][RTW89_KCC][0][107] = 127,
[0][1][2][0][RTW89_ACMA][1][107] = 127,
@@ -43171,13 +42183,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][107] = 127,
[0][1][2][0][RTW89_THAILAND][0][107] = 127,
[0][1][2][0][RTW89_FCC][1][109] = 1,
- [0][1][2][0][RTW89_FCC][2][109] = 127,
[0][1][2][0][RTW89_ETSI][1][109] = 127,
[0][1][2][0][RTW89_ETSI][0][109] = 127,
[0][1][2][0][RTW89_MKK][1][109] = 127,
[0][1][2][0][RTW89_MKK][0][109] = 127,
[0][1][2][0][RTW89_IC][1][109] = 1,
- [0][1][2][0][RTW89_IC][2][109] = 127,
[0][1][2][0][RTW89_KCC][1][109] = 20,
[0][1][2][0][RTW89_KCC][0][109] = 127,
[0][1][2][0][RTW89_ACMA][1][109] = 127,
@@ -43190,13 +42200,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][109] = 127,
[0][1][2][0][RTW89_THAILAND][0][109] = 127,
[0][1][2][0][RTW89_FCC][1][111] = 127,
- [0][1][2][0][RTW89_FCC][2][111] = 127,
[0][1][2][0][RTW89_ETSI][1][111] = 127,
[0][1][2][0][RTW89_ETSI][0][111] = 127,
[0][1][2][0][RTW89_MKK][1][111] = 127,
[0][1][2][0][RTW89_MKK][0][111] = 127,
[0][1][2][0][RTW89_IC][1][111] = 127,
- [0][1][2][0][RTW89_IC][2][111] = 127,
[0][1][2][0][RTW89_KCC][1][111] = 127,
[0][1][2][0][RTW89_KCC][0][111] = 127,
[0][1][2][0][RTW89_ACMA][1][111] = 127,
@@ -43209,13 +42217,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][111] = 127,
[0][1][2][0][RTW89_THAILAND][0][111] = 127,
[0][1][2][0][RTW89_FCC][1][113] = 127,
- [0][1][2][0][RTW89_FCC][2][113] = 127,
[0][1][2][0][RTW89_ETSI][1][113] = 127,
[0][1][2][0][RTW89_ETSI][0][113] = 127,
[0][1][2][0][RTW89_MKK][1][113] = 127,
[0][1][2][0][RTW89_MKK][0][113] = 127,
[0][1][2][0][RTW89_IC][1][113] = 127,
- [0][1][2][0][RTW89_IC][2][113] = 127,
[0][1][2][0][RTW89_KCC][1][113] = 127,
[0][1][2][0][RTW89_KCC][0][113] = 127,
[0][1][2][0][RTW89_ACMA][1][113] = 127,
@@ -43228,13 +42234,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][113] = 127,
[0][1][2][0][RTW89_THAILAND][0][113] = 127,
[0][1][2][0][RTW89_FCC][1][115] = 127,
- [0][1][2][0][RTW89_FCC][2][115] = 127,
[0][1][2][0][RTW89_ETSI][1][115] = 127,
[0][1][2][0][RTW89_ETSI][0][115] = 127,
[0][1][2][0][RTW89_MKK][1][115] = 127,
[0][1][2][0][RTW89_MKK][0][115] = 127,
[0][1][2][0][RTW89_IC][1][115] = 127,
- [0][1][2][0][RTW89_IC][2][115] = 127,
[0][1][2][0][RTW89_KCC][1][115] = 127,
[0][1][2][0][RTW89_KCC][0][115] = 127,
[0][1][2][0][RTW89_ACMA][1][115] = 127,
@@ -43247,13 +42251,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][115] = 127,
[0][1][2][0][RTW89_THAILAND][0][115] = 127,
[0][1][2][0][RTW89_FCC][1][117] = 127,
- [0][1][2][0][RTW89_FCC][2][117] = 127,
[0][1][2][0][RTW89_ETSI][1][117] = 127,
[0][1][2][0][RTW89_ETSI][0][117] = 127,
[0][1][2][0][RTW89_MKK][1][117] = 127,
[0][1][2][0][RTW89_MKK][0][117] = 127,
[0][1][2][0][RTW89_IC][1][117] = 127,
- [0][1][2][0][RTW89_IC][2][117] = 127,
[0][1][2][0][RTW89_KCC][1][117] = 127,
[0][1][2][0][RTW89_KCC][0][117] = 127,
[0][1][2][0][RTW89_ACMA][1][117] = 127,
@@ -43266,13 +42268,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][117] = 127,
[0][1][2][0][RTW89_THAILAND][0][117] = 127,
[0][1][2][0][RTW89_FCC][1][119] = 127,
- [0][1][2][0][RTW89_FCC][2][119] = 127,
[0][1][2][0][RTW89_ETSI][1][119] = 127,
[0][1][2][0][RTW89_ETSI][0][119] = 127,
[0][1][2][0][RTW89_MKK][1][119] = 127,
[0][1][2][0][RTW89_MKK][0][119] = 127,
[0][1][2][0][RTW89_IC][1][119] = 127,
- [0][1][2][0][RTW89_IC][2][119] = 127,
[0][1][2][0][RTW89_KCC][1][119] = 127,
[0][1][2][0][RTW89_KCC][0][119] = 127,
[0][1][2][0][RTW89_ACMA][1][119] = 127,
@@ -43285,13 +42285,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][119] = 127,
[0][1][2][0][RTW89_THAILAND][0][119] = 127,
[0][1][2][1][RTW89_FCC][1][0] = -2,
- [0][1][2][1][RTW89_FCC][2][0] = 54,
[0][1][2][1][RTW89_ETSI][1][0] = 42,
[0][1][2][1][RTW89_ETSI][0][0] = 6,
[0][1][2][1][RTW89_MKK][1][0] = 56,
[0][1][2][1][RTW89_MKK][0][0] = 16,
[0][1][2][1][RTW89_IC][1][0] = -2,
- [0][1][2][1][RTW89_IC][2][0] = 54,
[0][1][2][1][RTW89_KCC][1][0] = 12,
[0][1][2][1][RTW89_KCC][0][0] = 10,
[0][1][2][1][RTW89_ACMA][1][0] = 42,
@@ -43304,13 +42302,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][0] = 44,
[0][1][2][1][RTW89_THAILAND][0][0] = -2,
[0][1][2][1][RTW89_FCC][1][2] = -4,
- [0][1][2][1][RTW89_FCC][2][2] = 54,
[0][1][2][1][RTW89_ETSI][1][2] = 42,
[0][1][2][1][RTW89_ETSI][0][2] = 6,
[0][1][2][1][RTW89_MKK][1][2] = 54,
[0][1][2][1][RTW89_MKK][0][2] = 16,
[0][1][2][1][RTW89_IC][1][2] = -4,
- [0][1][2][1][RTW89_IC][2][2] = 54,
[0][1][2][1][RTW89_KCC][1][2] = 12,
[0][1][2][1][RTW89_KCC][0][2] = 12,
[0][1][2][1][RTW89_ACMA][1][2] = 42,
@@ -43323,13 +42319,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][2] = 44,
[0][1][2][1][RTW89_THAILAND][0][2] = -4,
[0][1][2][1][RTW89_FCC][1][4] = -4,
- [0][1][2][1][RTW89_FCC][2][4] = 54,
[0][1][2][1][RTW89_ETSI][1][4] = 42,
[0][1][2][1][RTW89_ETSI][0][4] = 6,
[0][1][2][1][RTW89_MKK][1][4] = 54,
[0][1][2][1][RTW89_MKK][0][4] = 16,
[0][1][2][1][RTW89_IC][1][4] = -4,
- [0][1][2][1][RTW89_IC][2][4] = 54,
[0][1][2][1][RTW89_KCC][1][4] = 12,
[0][1][2][1][RTW89_KCC][0][4] = 12,
[0][1][2][1][RTW89_ACMA][1][4] = 42,
@@ -43342,13 +42336,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][4] = 44,
[0][1][2][1][RTW89_THAILAND][0][4] = -4,
[0][1][2][1][RTW89_FCC][1][6] = -4,
- [0][1][2][1][RTW89_FCC][2][6] = 54,
[0][1][2][1][RTW89_ETSI][1][6] = 42,
[0][1][2][1][RTW89_ETSI][0][6] = 6,
[0][1][2][1][RTW89_MKK][1][6] = 54,
[0][1][2][1][RTW89_MKK][0][6] = 16,
[0][1][2][1][RTW89_IC][1][6] = -4,
- [0][1][2][1][RTW89_IC][2][6] = 54,
[0][1][2][1][RTW89_KCC][1][6] = 12,
[0][1][2][1][RTW89_KCC][0][6] = 12,
[0][1][2][1][RTW89_ACMA][1][6] = 42,
@@ -43361,13 +42353,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][6] = 44,
[0][1][2][1][RTW89_THAILAND][0][6] = -4,
[0][1][2][1][RTW89_FCC][1][8] = -4,
- [0][1][2][1][RTW89_FCC][2][8] = 54,
[0][1][2][1][RTW89_ETSI][1][8] = 42,
[0][1][2][1][RTW89_ETSI][0][8] = 6,
[0][1][2][1][RTW89_MKK][1][8] = 54,
[0][1][2][1][RTW89_MKK][0][8] = 16,
[0][1][2][1][RTW89_IC][1][8] = -4,
- [0][1][2][1][RTW89_IC][2][8] = 54,
[0][1][2][1][RTW89_KCC][1][8] = 12,
[0][1][2][1][RTW89_KCC][0][8] = 12,
[0][1][2][1][RTW89_ACMA][1][8] = 42,
@@ -43380,13 +42370,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][8] = 44,
[0][1][2][1][RTW89_THAILAND][0][8] = -4,
[0][1][2][1][RTW89_FCC][1][10] = -4,
- [0][1][2][1][RTW89_FCC][2][10] = 54,
[0][1][2][1][RTW89_ETSI][1][10] = 42,
[0][1][2][1][RTW89_ETSI][0][10] = 6,
[0][1][2][1][RTW89_MKK][1][10] = 54,
[0][1][2][1][RTW89_MKK][0][10] = 16,
[0][1][2][1][RTW89_IC][1][10] = -4,
- [0][1][2][1][RTW89_IC][2][10] = 54,
[0][1][2][1][RTW89_KCC][1][10] = 12,
[0][1][2][1][RTW89_KCC][0][10] = 12,
[0][1][2][1][RTW89_ACMA][1][10] = 42,
@@ -43399,13 +42387,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][10] = 44,
[0][1][2][1][RTW89_THAILAND][0][10] = -4,
[0][1][2][1][RTW89_FCC][1][12] = -4,
- [0][1][2][1][RTW89_FCC][2][12] = 54,
[0][1][2][1][RTW89_ETSI][1][12] = 42,
[0][1][2][1][RTW89_ETSI][0][12] = 6,
[0][1][2][1][RTW89_MKK][1][12] = 54,
[0][1][2][1][RTW89_MKK][0][12] = 16,
[0][1][2][1][RTW89_IC][1][12] = -4,
- [0][1][2][1][RTW89_IC][2][12] = 54,
[0][1][2][1][RTW89_KCC][1][12] = 12,
[0][1][2][1][RTW89_KCC][0][12] = 12,
[0][1][2][1][RTW89_ACMA][1][12] = 42,
@@ -43418,13 +42404,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][12] = 44,
[0][1][2][1][RTW89_THAILAND][0][12] = -4,
[0][1][2][1][RTW89_FCC][1][14] = -4,
- [0][1][2][1][RTW89_FCC][2][14] = 54,
[0][1][2][1][RTW89_ETSI][1][14] = 42,
[0][1][2][1][RTW89_ETSI][0][14] = 6,
[0][1][2][1][RTW89_MKK][1][14] = 54,
[0][1][2][1][RTW89_MKK][0][14] = 16,
[0][1][2][1][RTW89_IC][1][14] = -4,
- [0][1][2][1][RTW89_IC][2][14] = 54,
[0][1][2][1][RTW89_KCC][1][14] = 12,
[0][1][2][1][RTW89_KCC][0][14] = 12,
[0][1][2][1][RTW89_ACMA][1][14] = 42,
@@ -43437,13 +42421,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][14] = 44,
[0][1][2][1][RTW89_THAILAND][0][14] = -4,
[0][1][2][1][RTW89_FCC][1][15] = -4,
- [0][1][2][1][RTW89_FCC][2][15] = 54,
[0][1][2][1][RTW89_ETSI][1][15] = 42,
[0][1][2][1][RTW89_ETSI][0][15] = 6,
[0][1][2][1][RTW89_MKK][1][15] = 54,
[0][1][2][1][RTW89_MKK][0][15] = 16,
[0][1][2][1][RTW89_IC][1][15] = -4,
- [0][1][2][1][RTW89_IC][2][15] = 54,
[0][1][2][1][RTW89_KCC][1][15] = 12,
[0][1][2][1][RTW89_KCC][0][15] = 12,
[0][1][2][1][RTW89_ACMA][1][15] = 42,
@@ -43456,13 +42438,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][15] = 44,
[0][1][2][1][RTW89_THAILAND][0][15] = -4,
[0][1][2][1][RTW89_FCC][1][17] = -4,
- [0][1][2][1][RTW89_FCC][2][17] = 54,
[0][1][2][1][RTW89_ETSI][1][17] = 42,
[0][1][2][1][RTW89_ETSI][0][17] = 6,
[0][1][2][1][RTW89_MKK][1][17] = 54,
[0][1][2][1][RTW89_MKK][0][17] = 16,
[0][1][2][1][RTW89_IC][1][17] = -4,
- [0][1][2][1][RTW89_IC][2][17] = 54,
[0][1][2][1][RTW89_KCC][1][17] = 12,
[0][1][2][1][RTW89_KCC][0][17] = 12,
[0][1][2][1][RTW89_ACMA][1][17] = 42,
@@ -43475,13 +42455,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][17] = 44,
[0][1][2][1][RTW89_THAILAND][0][17] = -4,
[0][1][2][1][RTW89_FCC][1][19] = -4,
- [0][1][2][1][RTW89_FCC][2][19] = 54,
[0][1][2][1][RTW89_ETSI][1][19] = 42,
[0][1][2][1][RTW89_ETSI][0][19] = 6,
[0][1][2][1][RTW89_MKK][1][19] = 54,
[0][1][2][1][RTW89_MKK][0][19] = 16,
[0][1][2][1][RTW89_IC][1][19] = -4,
- [0][1][2][1][RTW89_IC][2][19] = 54,
[0][1][2][1][RTW89_KCC][1][19] = 12,
[0][1][2][1][RTW89_KCC][0][19] = 12,
[0][1][2][1][RTW89_ACMA][1][19] = 42,
@@ -43494,13 +42472,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][19] = 44,
[0][1][2][1][RTW89_THAILAND][0][19] = -4,
[0][1][2][1][RTW89_FCC][1][21] = -4,
- [0][1][2][1][RTW89_FCC][2][21] = 54,
[0][1][2][1][RTW89_ETSI][1][21] = 42,
[0][1][2][1][RTW89_ETSI][0][21] = 6,
[0][1][2][1][RTW89_MKK][1][21] = 54,
[0][1][2][1][RTW89_MKK][0][21] = 16,
[0][1][2][1][RTW89_IC][1][21] = -4,
- [0][1][2][1][RTW89_IC][2][21] = 54,
[0][1][2][1][RTW89_KCC][1][21] = 12,
[0][1][2][1][RTW89_KCC][0][21] = 12,
[0][1][2][1][RTW89_ACMA][1][21] = 42,
@@ -43513,13 +42489,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][21] = 44,
[0][1][2][1][RTW89_THAILAND][0][21] = -4,
[0][1][2][1][RTW89_FCC][1][23] = -4,
- [0][1][2][1][RTW89_FCC][2][23] = 68,
[0][1][2][1][RTW89_ETSI][1][23] = 42,
[0][1][2][1][RTW89_ETSI][0][23] = 6,
[0][1][2][1][RTW89_MKK][1][23] = 54,
[0][1][2][1][RTW89_MKK][0][23] = 16,
[0][1][2][1][RTW89_IC][1][23] = -4,
- [0][1][2][1][RTW89_IC][2][23] = 68,
[0][1][2][1][RTW89_KCC][1][23] = 12,
[0][1][2][1][RTW89_KCC][0][23] = 10,
[0][1][2][1][RTW89_ACMA][1][23] = 42,
@@ -43532,13 +42506,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][23] = 44,
[0][1][2][1][RTW89_THAILAND][0][23] = -4,
[0][1][2][1][RTW89_FCC][1][25] = -4,
- [0][1][2][1][RTW89_FCC][2][25] = 68,
[0][1][2][1][RTW89_ETSI][1][25] = 42,
[0][1][2][1][RTW89_ETSI][0][25] = 6,
[0][1][2][1][RTW89_MKK][1][25] = 54,
[0][1][2][1][RTW89_MKK][0][25] = 16,
[0][1][2][1][RTW89_IC][1][25] = -4,
- [0][1][2][1][RTW89_IC][2][25] = 68,
[0][1][2][1][RTW89_KCC][1][25] = 12,
[0][1][2][1][RTW89_KCC][0][25] = 14,
[0][1][2][1][RTW89_ACMA][1][25] = 42,
@@ -43551,13 +42523,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][25] = 42,
[0][1][2][1][RTW89_THAILAND][0][25] = -4,
[0][1][2][1][RTW89_FCC][1][27] = -4,
- [0][1][2][1][RTW89_FCC][2][27] = 68,
[0][1][2][1][RTW89_ETSI][1][27] = 42,
[0][1][2][1][RTW89_ETSI][0][27] = 6,
[0][1][2][1][RTW89_MKK][1][27] = 54,
[0][1][2][1][RTW89_MKK][0][27] = 16,
[0][1][2][1][RTW89_IC][1][27] = -4,
- [0][1][2][1][RTW89_IC][2][27] = 68,
[0][1][2][1][RTW89_KCC][1][27] = 12,
[0][1][2][1][RTW89_KCC][0][27] = 14,
[0][1][2][1][RTW89_ACMA][1][27] = 42,
@@ -43570,13 +42540,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][27] = 42,
[0][1][2][1][RTW89_THAILAND][0][27] = -4,
[0][1][2][1][RTW89_FCC][1][29] = -4,
- [0][1][2][1][RTW89_FCC][2][29] = 68,
[0][1][2][1][RTW89_ETSI][1][29] = 42,
[0][1][2][1][RTW89_ETSI][0][29] = 6,
[0][1][2][1][RTW89_MKK][1][29] = 54,
[0][1][2][1][RTW89_MKK][0][29] = 16,
[0][1][2][1][RTW89_IC][1][29] = -4,
- [0][1][2][1][RTW89_IC][2][29] = 68,
[0][1][2][1][RTW89_KCC][1][29] = 12,
[0][1][2][1][RTW89_KCC][0][29] = 14,
[0][1][2][1][RTW89_ACMA][1][29] = 42,
@@ -43589,13 +42557,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][29] = 42,
[0][1][2][1][RTW89_THAILAND][0][29] = -4,
[0][1][2][1][RTW89_FCC][1][30] = -4,
- [0][1][2][1][RTW89_FCC][2][30] = 68,
[0][1][2][1][RTW89_ETSI][1][30] = 42,
[0][1][2][1][RTW89_ETSI][0][30] = 6,
[0][1][2][1][RTW89_MKK][1][30] = 54,
[0][1][2][1][RTW89_MKK][0][30] = 16,
[0][1][2][1][RTW89_IC][1][30] = -4,
- [0][1][2][1][RTW89_IC][2][30] = 68,
[0][1][2][1][RTW89_KCC][1][30] = 12,
[0][1][2][1][RTW89_KCC][0][30] = 14,
[0][1][2][1][RTW89_ACMA][1][30] = 42,
@@ -43608,13 +42574,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][30] = 42,
[0][1][2][1][RTW89_THAILAND][0][30] = -4,
[0][1][2][1][RTW89_FCC][1][32] = -4,
- [0][1][2][1][RTW89_FCC][2][32] = 68,
[0][1][2][1][RTW89_ETSI][1][32] = 42,
[0][1][2][1][RTW89_ETSI][0][32] = 6,
[0][1][2][1][RTW89_MKK][1][32] = 54,
[0][1][2][1][RTW89_MKK][0][32] = 16,
[0][1][2][1][RTW89_IC][1][32] = -4,
- [0][1][2][1][RTW89_IC][2][32] = 68,
[0][1][2][1][RTW89_KCC][1][32] = 12,
[0][1][2][1][RTW89_KCC][0][32] = 14,
[0][1][2][1][RTW89_ACMA][1][32] = 42,
@@ -43627,13 +42591,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][32] = 42,
[0][1][2][1][RTW89_THAILAND][0][32] = -4,
[0][1][2][1][RTW89_FCC][1][34] = -4,
- [0][1][2][1][RTW89_FCC][2][34] = 68,
[0][1][2][1][RTW89_ETSI][1][34] = 42,
[0][1][2][1][RTW89_ETSI][0][34] = 6,
[0][1][2][1][RTW89_MKK][1][34] = 54,
[0][1][2][1][RTW89_MKK][0][34] = 16,
[0][1][2][1][RTW89_IC][1][34] = -4,
- [0][1][2][1][RTW89_IC][2][34] = 68,
[0][1][2][1][RTW89_KCC][1][34] = 12,
[0][1][2][1][RTW89_KCC][0][34] = 14,
[0][1][2][1][RTW89_ACMA][1][34] = 42,
@@ -43646,13 +42608,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][34] = 42,
[0][1][2][1][RTW89_THAILAND][0][34] = -4,
[0][1][2][1][RTW89_FCC][1][36] = -4,
- [0][1][2][1][RTW89_FCC][2][36] = 68,
[0][1][2][1][RTW89_ETSI][1][36] = 42,
[0][1][2][1][RTW89_ETSI][0][36] = 6,
[0][1][2][1][RTW89_MKK][1][36] = 54,
[0][1][2][1][RTW89_MKK][0][36] = 16,
[0][1][2][1][RTW89_IC][1][36] = -4,
- [0][1][2][1][RTW89_IC][2][36] = 68,
[0][1][2][1][RTW89_KCC][1][36] = 12,
[0][1][2][1][RTW89_KCC][0][36] = 14,
[0][1][2][1][RTW89_ACMA][1][36] = 42,
@@ -43665,13 +42625,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][36] = 42,
[0][1][2][1][RTW89_THAILAND][0][36] = -4,
[0][1][2][1][RTW89_FCC][1][38] = -4,
- [0][1][2][1][RTW89_FCC][2][38] = 68,
[0][1][2][1][RTW89_ETSI][1][38] = 42,
[0][1][2][1][RTW89_ETSI][0][38] = 6,
[0][1][2][1][RTW89_MKK][1][38] = 54,
[0][1][2][1][RTW89_MKK][0][38] = 16,
[0][1][2][1][RTW89_IC][1][38] = -4,
- [0][1][2][1][RTW89_IC][2][38] = 68,
[0][1][2][1][RTW89_KCC][1][38] = 12,
[0][1][2][1][RTW89_KCC][0][38] = 14,
[0][1][2][1][RTW89_ACMA][1][38] = 42,
@@ -43684,13 +42642,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][38] = 42,
[0][1][2][1][RTW89_THAILAND][0][38] = -4,
[0][1][2][1][RTW89_FCC][1][40] = -4,
- [0][1][2][1][RTW89_FCC][2][40] = 68,
[0][1][2][1][RTW89_ETSI][1][40] = 42,
[0][1][2][1][RTW89_ETSI][0][40] = 6,
[0][1][2][1][RTW89_MKK][1][40] = 54,
[0][1][2][1][RTW89_MKK][0][40] = 16,
[0][1][2][1][RTW89_IC][1][40] = -4,
- [0][1][2][1][RTW89_IC][2][40] = 68,
[0][1][2][1][RTW89_KCC][1][40] = 12,
[0][1][2][1][RTW89_KCC][0][40] = 14,
[0][1][2][1][RTW89_ACMA][1][40] = 42,
@@ -43703,13 +42659,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][40] = 42,
[0][1][2][1][RTW89_THAILAND][0][40] = -4,
[0][1][2][1][RTW89_FCC][1][42] = -4,
- [0][1][2][1][RTW89_FCC][2][42] = 68,
[0][1][2][1][RTW89_ETSI][1][42] = 42,
[0][1][2][1][RTW89_ETSI][0][42] = 6,
[0][1][2][1][RTW89_MKK][1][42] = 54,
[0][1][2][1][RTW89_MKK][0][42] = 16,
[0][1][2][1][RTW89_IC][1][42] = -4,
- [0][1][2][1][RTW89_IC][2][42] = 68,
[0][1][2][1][RTW89_KCC][1][42] = 12,
[0][1][2][1][RTW89_KCC][0][42] = 14,
[0][1][2][1][RTW89_ACMA][1][42] = 42,
@@ -43722,13 +42676,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][42] = 42,
[0][1][2][1][RTW89_THAILAND][0][42] = -4,
[0][1][2][1][RTW89_FCC][1][44] = -2,
- [0][1][2][1][RTW89_FCC][2][44] = 68,
[0][1][2][1][RTW89_ETSI][1][44] = 42,
[0][1][2][1][RTW89_ETSI][0][44] = 6,
[0][1][2][1][RTW89_MKK][1][44] = 34,
[0][1][2][1][RTW89_MKK][0][44] = 16,
[0][1][2][1][RTW89_IC][1][44] = -2,
- [0][1][2][1][RTW89_IC][2][44] = 68,
[0][1][2][1][RTW89_KCC][1][44] = 12,
[0][1][2][1][RTW89_KCC][0][44] = 12,
[0][1][2][1][RTW89_ACMA][1][44] = 42,
@@ -43741,13 +42693,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][44] = 42,
[0][1][2][1][RTW89_THAILAND][0][44] = -2,
[0][1][2][1][RTW89_FCC][1][45] = -2,
- [0][1][2][1][RTW89_FCC][2][45] = 127,
[0][1][2][1][RTW89_ETSI][1][45] = 127,
[0][1][2][1][RTW89_ETSI][0][45] = 127,
[0][1][2][1][RTW89_MKK][1][45] = 127,
[0][1][2][1][RTW89_MKK][0][45] = 127,
[0][1][2][1][RTW89_IC][1][45] = -2,
- [0][1][2][1][RTW89_IC][2][45] = 70,
[0][1][2][1][RTW89_KCC][1][45] = 12,
[0][1][2][1][RTW89_KCC][0][45] = 127,
[0][1][2][1][RTW89_ACMA][1][45] = 127,
@@ -43760,13 +42710,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][45] = 127,
[0][1][2][1][RTW89_THAILAND][0][45] = 127,
[0][1][2][1][RTW89_FCC][1][47] = -2,
- [0][1][2][1][RTW89_FCC][2][47] = 127,
[0][1][2][1][RTW89_ETSI][1][47] = 127,
[0][1][2][1][RTW89_ETSI][0][47] = 127,
[0][1][2][1][RTW89_MKK][1][47] = 127,
[0][1][2][1][RTW89_MKK][0][47] = 127,
[0][1][2][1][RTW89_IC][1][47] = -2,
- [0][1][2][1][RTW89_IC][2][47] = 68,
[0][1][2][1][RTW89_KCC][1][47] = 12,
[0][1][2][1][RTW89_KCC][0][47] = 127,
[0][1][2][1][RTW89_ACMA][1][47] = 127,
@@ -43779,13 +42727,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][47] = 127,
[0][1][2][1][RTW89_THAILAND][0][47] = 127,
[0][1][2][1][RTW89_FCC][1][49] = -2,
- [0][1][2][1][RTW89_FCC][2][49] = 127,
[0][1][2][1][RTW89_ETSI][1][49] = 127,
[0][1][2][1][RTW89_ETSI][0][49] = 127,
[0][1][2][1][RTW89_MKK][1][49] = 127,
[0][1][2][1][RTW89_MKK][0][49] = 127,
[0][1][2][1][RTW89_IC][1][49] = -2,
- [0][1][2][1][RTW89_IC][2][49] = 68,
[0][1][2][1][RTW89_KCC][1][49] = 12,
[0][1][2][1][RTW89_KCC][0][49] = 127,
[0][1][2][1][RTW89_ACMA][1][49] = 127,
@@ -43798,13 +42744,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][49] = 127,
[0][1][2][1][RTW89_THAILAND][0][49] = 127,
[0][1][2][1][RTW89_FCC][1][51] = -2,
- [0][1][2][1][RTW89_FCC][2][51] = 127,
[0][1][2][1][RTW89_ETSI][1][51] = 127,
[0][1][2][1][RTW89_ETSI][0][51] = 127,
[0][1][2][1][RTW89_MKK][1][51] = 127,
[0][1][2][1][RTW89_MKK][0][51] = 127,
[0][1][2][1][RTW89_IC][1][51] = -2,
- [0][1][2][1][RTW89_IC][2][51] = 68,
[0][1][2][1][RTW89_KCC][1][51] = 12,
[0][1][2][1][RTW89_KCC][0][51] = 127,
[0][1][2][1][RTW89_ACMA][1][51] = 127,
@@ -43817,13 +42761,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][51] = 127,
[0][1][2][1][RTW89_THAILAND][0][51] = 127,
[0][1][2][1][RTW89_FCC][1][53] = -2,
- [0][1][2][1][RTW89_FCC][2][53] = 127,
[0][1][2][1][RTW89_ETSI][1][53] = 127,
[0][1][2][1][RTW89_ETSI][0][53] = 127,
[0][1][2][1][RTW89_MKK][1][53] = 127,
[0][1][2][1][RTW89_MKK][0][53] = 127,
[0][1][2][1][RTW89_IC][1][53] = -2,
- [0][1][2][1][RTW89_IC][2][53] = 68,
[0][1][2][1][RTW89_KCC][1][53] = 12,
[0][1][2][1][RTW89_KCC][0][53] = 127,
[0][1][2][1][RTW89_ACMA][1][53] = 127,
@@ -43836,13 +42778,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][53] = 127,
[0][1][2][1][RTW89_THAILAND][0][53] = 127,
[0][1][2][1][RTW89_FCC][1][55] = -2,
- [0][1][2][1][RTW89_FCC][2][55] = 68,
[0][1][2][1][RTW89_ETSI][1][55] = 127,
[0][1][2][1][RTW89_ETSI][0][55] = 127,
[0][1][2][1][RTW89_MKK][1][55] = 127,
[0][1][2][1][RTW89_MKK][0][55] = 127,
[0][1][2][1][RTW89_IC][1][55] = -2,
- [0][1][2][1][RTW89_IC][2][55] = 68,
[0][1][2][1][RTW89_KCC][1][55] = 12,
[0][1][2][1][RTW89_KCC][0][55] = 127,
[0][1][2][1][RTW89_ACMA][1][55] = 127,
@@ -43855,13 +42795,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][55] = 127,
[0][1][2][1][RTW89_THAILAND][0][55] = 127,
[0][1][2][1][RTW89_FCC][1][57] = -2,
- [0][1][2][1][RTW89_FCC][2][57] = 68,
[0][1][2][1][RTW89_ETSI][1][57] = 127,
[0][1][2][1][RTW89_ETSI][0][57] = 127,
[0][1][2][1][RTW89_MKK][1][57] = 127,
[0][1][2][1][RTW89_MKK][0][57] = 127,
[0][1][2][1][RTW89_IC][1][57] = -2,
- [0][1][2][1][RTW89_IC][2][57] = 68,
[0][1][2][1][RTW89_KCC][1][57] = 12,
[0][1][2][1][RTW89_KCC][0][57] = 127,
[0][1][2][1][RTW89_ACMA][1][57] = 127,
@@ -43874,13 +42812,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][57] = 127,
[0][1][2][1][RTW89_THAILAND][0][57] = 127,
[0][1][2][1][RTW89_FCC][1][59] = -2,
- [0][1][2][1][RTW89_FCC][2][59] = 68,
[0][1][2][1][RTW89_ETSI][1][59] = 127,
[0][1][2][1][RTW89_ETSI][0][59] = 127,
[0][1][2][1][RTW89_MKK][1][59] = 127,
[0][1][2][1][RTW89_MKK][0][59] = 127,
[0][1][2][1][RTW89_IC][1][59] = -2,
- [0][1][2][1][RTW89_IC][2][59] = 68,
[0][1][2][1][RTW89_KCC][1][59] = 12,
[0][1][2][1][RTW89_KCC][0][59] = 127,
[0][1][2][1][RTW89_ACMA][1][59] = 127,
@@ -43893,13 +42829,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][59] = 127,
[0][1][2][1][RTW89_THAILAND][0][59] = 127,
[0][1][2][1][RTW89_FCC][1][60] = -2,
- [0][1][2][1][RTW89_FCC][2][60] = 68,
[0][1][2][1][RTW89_ETSI][1][60] = 127,
[0][1][2][1][RTW89_ETSI][0][60] = 127,
[0][1][2][1][RTW89_MKK][1][60] = 127,
[0][1][2][1][RTW89_MKK][0][60] = 127,
[0][1][2][1][RTW89_IC][1][60] = -2,
- [0][1][2][1][RTW89_IC][2][60] = 68,
[0][1][2][1][RTW89_KCC][1][60] = 12,
[0][1][2][1][RTW89_KCC][0][60] = 127,
[0][1][2][1][RTW89_ACMA][1][60] = 127,
@@ -43912,13 +42846,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][60] = 127,
[0][1][2][1][RTW89_THAILAND][0][60] = 127,
[0][1][2][1][RTW89_FCC][1][62] = -2,
- [0][1][2][1][RTW89_FCC][2][62] = 68,
[0][1][2][1][RTW89_ETSI][1][62] = 127,
[0][1][2][1][RTW89_ETSI][0][62] = 127,
[0][1][2][1][RTW89_MKK][1][62] = 127,
[0][1][2][1][RTW89_MKK][0][62] = 127,
[0][1][2][1][RTW89_IC][1][62] = -2,
- [0][1][2][1][RTW89_IC][2][62] = 68,
[0][1][2][1][RTW89_KCC][1][62] = 12,
[0][1][2][1][RTW89_KCC][0][62] = 127,
[0][1][2][1][RTW89_ACMA][1][62] = 127,
@@ -43931,13 +42863,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][62] = 127,
[0][1][2][1][RTW89_THAILAND][0][62] = 127,
[0][1][2][1][RTW89_FCC][1][64] = -2,
- [0][1][2][1][RTW89_FCC][2][64] = 68,
[0][1][2][1][RTW89_ETSI][1][64] = 127,
[0][1][2][1][RTW89_ETSI][0][64] = 127,
[0][1][2][1][RTW89_MKK][1][64] = 127,
[0][1][2][1][RTW89_MKK][0][64] = 127,
[0][1][2][1][RTW89_IC][1][64] = -2,
- [0][1][2][1][RTW89_IC][2][64] = 68,
[0][1][2][1][RTW89_KCC][1][64] = 12,
[0][1][2][1][RTW89_KCC][0][64] = 127,
[0][1][2][1][RTW89_ACMA][1][64] = 127,
@@ -43950,13 +42880,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][64] = 127,
[0][1][2][1][RTW89_THAILAND][0][64] = 127,
[0][1][2][1][RTW89_FCC][1][66] = -2,
- [0][1][2][1][RTW89_FCC][2][66] = 68,
[0][1][2][1][RTW89_ETSI][1][66] = 127,
[0][1][2][1][RTW89_ETSI][0][66] = 127,
[0][1][2][1][RTW89_MKK][1][66] = 127,
[0][1][2][1][RTW89_MKK][0][66] = 127,
[0][1][2][1][RTW89_IC][1][66] = -2,
- [0][1][2][1][RTW89_IC][2][66] = 68,
[0][1][2][1][RTW89_KCC][1][66] = 12,
[0][1][2][1][RTW89_KCC][0][66] = 127,
[0][1][2][1][RTW89_ACMA][1][66] = 127,
@@ -43969,13 +42897,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][66] = 127,
[0][1][2][1][RTW89_THAILAND][0][66] = 127,
[0][1][2][1][RTW89_FCC][1][68] = -2,
- [0][1][2][1][RTW89_FCC][2][68] = 68,
[0][1][2][1][RTW89_ETSI][1][68] = 127,
[0][1][2][1][RTW89_ETSI][0][68] = 127,
[0][1][2][1][RTW89_MKK][1][68] = 127,
[0][1][2][1][RTW89_MKK][0][68] = 127,
[0][1][2][1][RTW89_IC][1][68] = -2,
- [0][1][2][1][RTW89_IC][2][68] = 68,
[0][1][2][1][RTW89_KCC][1][68] = 12,
[0][1][2][1][RTW89_KCC][0][68] = 127,
[0][1][2][1][RTW89_ACMA][1][68] = 127,
@@ -43988,13 +42914,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][68] = 127,
[0][1][2][1][RTW89_THAILAND][0][68] = 127,
[0][1][2][1][RTW89_FCC][1][70] = -2,
- [0][1][2][1][RTW89_FCC][2][70] = 68,
[0][1][2][1][RTW89_ETSI][1][70] = 127,
[0][1][2][1][RTW89_ETSI][0][70] = 127,
[0][1][2][1][RTW89_MKK][1][70] = 127,
[0][1][2][1][RTW89_MKK][0][70] = 127,
[0][1][2][1][RTW89_IC][1][70] = -2,
- [0][1][2][1][RTW89_IC][2][70] = 68,
[0][1][2][1][RTW89_KCC][1][70] = 12,
[0][1][2][1][RTW89_KCC][0][70] = 127,
[0][1][2][1][RTW89_ACMA][1][70] = 127,
@@ -44007,13 +42931,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][70] = 127,
[0][1][2][1][RTW89_THAILAND][0][70] = 127,
[0][1][2][1][RTW89_FCC][1][72] = -2,
- [0][1][2][1][RTW89_FCC][2][72] = 68,
[0][1][2][1][RTW89_ETSI][1][72] = 127,
[0][1][2][1][RTW89_ETSI][0][72] = 127,
[0][1][2][1][RTW89_MKK][1][72] = 127,
[0][1][2][1][RTW89_MKK][0][72] = 127,
[0][1][2][1][RTW89_IC][1][72] = -2,
- [0][1][2][1][RTW89_IC][2][72] = 68,
[0][1][2][1][RTW89_KCC][1][72] = 12,
[0][1][2][1][RTW89_KCC][0][72] = 127,
[0][1][2][1][RTW89_ACMA][1][72] = 127,
@@ -44026,13 +42948,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][72] = 127,
[0][1][2][1][RTW89_THAILAND][0][72] = 127,
[0][1][2][1][RTW89_FCC][1][74] = -2,
- [0][1][2][1][RTW89_FCC][2][74] = 68,
[0][1][2][1][RTW89_ETSI][1][74] = 127,
[0][1][2][1][RTW89_ETSI][0][74] = 127,
[0][1][2][1][RTW89_MKK][1][74] = 127,
[0][1][2][1][RTW89_MKK][0][74] = 127,
[0][1][2][1][RTW89_IC][1][74] = -2,
- [0][1][2][1][RTW89_IC][2][74] = 68,
[0][1][2][1][RTW89_KCC][1][74] = 12,
[0][1][2][1][RTW89_KCC][0][74] = 127,
[0][1][2][1][RTW89_ACMA][1][74] = 127,
@@ -44045,13 +42965,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][74] = 127,
[0][1][2][1][RTW89_THAILAND][0][74] = 127,
[0][1][2][1][RTW89_FCC][1][75] = -2,
- [0][1][2][1][RTW89_FCC][2][75] = 68,
[0][1][2][1][RTW89_ETSI][1][75] = 127,
[0][1][2][1][RTW89_ETSI][0][75] = 127,
[0][1][2][1][RTW89_MKK][1][75] = 127,
[0][1][2][1][RTW89_MKK][0][75] = 127,
[0][1][2][1][RTW89_IC][1][75] = -2,
- [0][1][2][1][RTW89_IC][2][75] = 68,
[0][1][2][1][RTW89_KCC][1][75] = 12,
[0][1][2][1][RTW89_KCC][0][75] = 127,
[0][1][2][1][RTW89_ACMA][1][75] = 127,
@@ -44064,13 +42982,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][75] = 127,
[0][1][2][1][RTW89_THAILAND][0][75] = 127,
[0][1][2][1][RTW89_FCC][1][77] = -2,
- [0][1][2][1][RTW89_FCC][2][77] = 68,
[0][1][2][1][RTW89_ETSI][1][77] = 127,
[0][1][2][1][RTW89_ETSI][0][77] = 127,
[0][1][2][1][RTW89_MKK][1][77] = 127,
[0][1][2][1][RTW89_MKK][0][77] = 127,
[0][1][2][1][RTW89_IC][1][77] = -2,
- [0][1][2][1][RTW89_IC][2][77] = 68,
[0][1][2][1][RTW89_KCC][1][77] = 12,
[0][1][2][1][RTW89_KCC][0][77] = 127,
[0][1][2][1][RTW89_ACMA][1][77] = 127,
@@ -44083,13 +42999,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][77] = 127,
[0][1][2][1][RTW89_THAILAND][0][77] = 127,
[0][1][2][1][RTW89_FCC][1][79] = -2,
- [0][1][2][1][RTW89_FCC][2][79] = 68,
[0][1][2][1][RTW89_ETSI][1][79] = 127,
[0][1][2][1][RTW89_ETSI][0][79] = 127,
[0][1][2][1][RTW89_MKK][1][79] = 127,
[0][1][2][1][RTW89_MKK][0][79] = 127,
[0][1][2][1][RTW89_IC][1][79] = -2,
- [0][1][2][1][RTW89_IC][2][79] = 68,
[0][1][2][1][RTW89_KCC][1][79] = 12,
[0][1][2][1][RTW89_KCC][0][79] = 127,
[0][1][2][1][RTW89_ACMA][1][79] = 127,
@@ -44102,13 +43016,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][79] = 127,
[0][1][2][1][RTW89_THAILAND][0][79] = 127,
[0][1][2][1][RTW89_FCC][1][81] = -2,
- [0][1][2][1][RTW89_FCC][2][81] = 68,
[0][1][2][1][RTW89_ETSI][1][81] = 127,
[0][1][2][1][RTW89_ETSI][0][81] = 127,
[0][1][2][1][RTW89_MKK][1][81] = 127,
[0][1][2][1][RTW89_MKK][0][81] = 127,
[0][1][2][1][RTW89_IC][1][81] = -2,
- [0][1][2][1][RTW89_IC][2][81] = 68,
[0][1][2][1][RTW89_KCC][1][81] = 12,
[0][1][2][1][RTW89_KCC][0][81] = 127,
[0][1][2][1][RTW89_ACMA][1][81] = 127,
@@ -44121,13 +43033,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][81] = 127,
[0][1][2][1][RTW89_THAILAND][0][81] = 127,
[0][1][2][1][RTW89_FCC][1][83] = -2,
- [0][1][2][1][RTW89_FCC][2][83] = 68,
[0][1][2][1][RTW89_ETSI][1][83] = 127,
[0][1][2][1][RTW89_ETSI][0][83] = 127,
[0][1][2][1][RTW89_MKK][1][83] = 127,
[0][1][2][1][RTW89_MKK][0][83] = 127,
[0][1][2][1][RTW89_IC][1][83] = -2,
- [0][1][2][1][RTW89_IC][2][83] = 68,
[0][1][2][1][RTW89_KCC][1][83] = 20,
[0][1][2][1][RTW89_KCC][0][83] = 127,
[0][1][2][1][RTW89_ACMA][1][83] = 127,
@@ -44140,13 +43050,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][83] = 127,
[0][1][2][1][RTW89_THAILAND][0][83] = 127,
[0][1][2][1][RTW89_FCC][1][85] = -2,
- [0][1][2][1][RTW89_FCC][2][85] = 68,
[0][1][2][1][RTW89_ETSI][1][85] = 127,
[0][1][2][1][RTW89_ETSI][0][85] = 127,
[0][1][2][1][RTW89_MKK][1][85] = 127,
[0][1][2][1][RTW89_MKK][0][85] = 127,
[0][1][2][1][RTW89_IC][1][85] = -2,
- [0][1][2][1][RTW89_IC][2][85] = 68,
[0][1][2][1][RTW89_KCC][1][85] = 20,
[0][1][2][1][RTW89_KCC][0][85] = 127,
[0][1][2][1][RTW89_ACMA][1][85] = 127,
@@ -44159,13 +43067,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][85] = 127,
[0][1][2][1][RTW89_THAILAND][0][85] = 127,
[0][1][2][1][RTW89_FCC][1][87] = -2,
- [0][1][2][1][RTW89_FCC][2][87] = 127,
[0][1][2][1][RTW89_ETSI][1][87] = 127,
[0][1][2][1][RTW89_ETSI][0][87] = 127,
[0][1][2][1][RTW89_MKK][1][87] = 127,
[0][1][2][1][RTW89_MKK][0][87] = 127,
[0][1][2][1][RTW89_IC][1][87] = -2,
- [0][1][2][1][RTW89_IC][2][87] = 127,
[0][1][2][1][RTW89_KCC][1][87] = 20,
[0][1][2][1][RTW89_KCC][0][87] = 127,
[0][1][2][1][RTW89_ACMA][1][87] = 127,
@@ -44178,13 +43084,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][87] = 127,
[0][1][2][1][RTW89_THAILAND][0][87] = 127,
[0][1][2][1][RTW89_FCC][1][89] = -2,
- [0][1][2][1][RTW89_FCC][2][89] = 127,
[0][1][2][1][RTW89_ETSI][1][89] = 127,
[0][1][2][1][RTW89_ETSI][0][89] = 127,
[0][1][2][1][RTW89_MKK][1][89] = 127,
[0][1][2][1][RTW89_MKK][0][89] = 127,
[0][1][2][1][RTW89_IC][1][89] = -2,
- [0][1][2][1][RTW89_IC][2][89] = 127,
[0][1][2][1][RTW89_KCC][1][89] = 20,
[0][1][2][1][RTW89_KCC][0][89] = 127,
[0][1][2][1][RTW89_ACMA][1][89] = 127,
@@ -44197,13 +43101,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][89] = 127,
[0][1][2][1][RTW89_THAILAND][0][89] = 127,
[0][1][2][1][RTW89_FCC][1][90] = -2,
- [0][1][2][1][RTW89_FCC][2][90] = 127,
[0][1][2][1][RTW89_ETSI][1][90] = 127,
[0][1][2][1][RTW89_ETSI][0][90] = 127,
[0][1][2][1][RTW89_MKK][1][90] = 127,
[0][1][2][1][RTW89_MKK][0][90] = 127,
[0][1][2][1][RTW89_IC][1][90] = -2,
- [0][1][2][1][RTW89_IC][2][90] = 127,
[0][1][2][1][RTW89_KCC][1][90] = 20,
[0][1][2][1][RTW89_KCC][0][90] = 127,
[0][1][2][1][RTW89_ACMA][1][90] = 127,
@@ -44216,13 +43118,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][90] = 127,
[0][1][2][1][RTW89_THAILAND][0][90] = 127,
[0][1][2][1][RTW89_FCC][1][92] = -2,
- [0][1][2][1][RTW89_FCC][2][92] = 127,
[0][1][2][1][RTW89_ETSI][1][92] = 127,
[0][1][2][1][RTW89_ETSI][0][92] = 127,
[0][1][2][1][RTW89_MKK][1][92] = 127,
[0][1][2][1][RTW89_MKK][0][92] = 127,
[0][1][2][1][RTW89_IC][1][92] = -2,
- [0][1][2][1][RTW89_IC][2][92] = 127,
[0][1][2][1][RTW89_KCC][1][92] = 20,
[0][1][2][1][RTW89_KCC][0][92] = 127,
[0][1][2][1][RTW89_ACMA][1][92] = 127,
@@ -44235,13 +43135,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][92] = 127,
[0][1][2][1][RTW89_THAILAND][0][92] = 127,
[0][1][2][1][RTW89_FCC][1][94] = -2,
- [0][1][2][1][RTW89_FCC][2][94] = 127,
[0][1][2][1][RTW89_ETSI][1][94] = 127,
[0][1][2][1][RTW89_ETSI][0][94] = 127,
[0][1][2][1][RTW89_MKK][1][94] = 127,
[0][1][2][1][RTW89_MKK][0][94] = 127,
[0][1][2][1][RTW89_IC][1][94] = -2,
- [0][1][2][1][RTW89_IC][2][94] = 127,
[0][1][2][1][RTW89_KCC][1][94] = 20,
[0][1][2][1][RTW89_KCC][0][94] = 127,
[0][1][2][1][RTW89_ACMA][1][94] = 127,
@@ -44254,13 +43152,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][94] = 127,
[0][1][2][1][RTW89_THAILAND][0][94] = 127,
[0][1][2][1][RTW89_FCC][1][96] = -2,
- [0][1][2][1][RTW89_FCC][2][96] = 127,
[0][1][2][1][RTW89_ETSI][1][96] = 127,
[0][1][2][1][RTW89_ETSI][0][96] = 127,
[0][1][2][1][RTW89_MKK][1][96] = 127,
[0][1][2][1][RTW89_MKK][0][96] = 127,
[0][1][2][1][RTW89_IC][1][96] = -2,
- [0][1][2][1][RTW89_IC][2][96] = 127,
[0][1][2][1][RTW89_KCC][1][96] = 20,
[0][1][2][1][RTW89_KCC][0][96] = 127,
[0][1][2][1][RTW89_ACMA][1][96] = 127,
@@ -44273,13 +43169,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][96] = 127,
[0][1][2][1][RTW89_THAILAND][0][96] = 127,
[0][1][2][1][RTW89_FCC][1][98] = -2,
- [0][1][2][1][RTW89_FCC][2][98] = 127,
[0][1][2][1][RTW89_ETSI][1][98] = 127,
[0][1][2][1][RTW89_ETSI][0][98] = 127,
[0][1][2][1][RTW89_MKK][1][98] = 127,
[0][1][2][1][RTW89_MKK][0][98] = 127,
[0][1][2][1][RTW89_IC][1][98] = -2,
- [0][1][2][1][RTW89_IC][2][98] = 127,
[0][1][2][1][RTW89_KCC][1][98] = 20,
[0][1][2][1][RTW89_KCC][0][98] = 127,
[0][1][2][1][RTW89_ACMA][1][98] = 127,
@@ -44292,13 +43186,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][98] = 127,
[0][1][2][1][RTW89_THAILAND][0][98] = 127,
[0][1][2][1][RTW89_FCC][1][100] = -2,
- [0][1][2][1][RTW89_FCC][2][100] = 127,
[0][1][2][1][RTW89_ETSI][1][100] = 127,
[0][1][2][1][RTW89_ETSI][0][100] = 127,
[0][1][2][1][RTW89_MKK][1][100] = 127,
[0][1][2][1][RTW89_MKK][0][100] = 127,
[0][1][2][1][RTW89_IC][1][100] = -2,
- [0][1][2][1][RTW89_IC][2][100] = 127,
[0][1][2][1][RTW89_KCC][1][100] = 20,
[0][1][2][1][RTW89_KCC][0][100] = 127,
[0][1][2][1][RTW89_ACMA][1][100] = 127,
@@ -44311,13 +43203,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][100] = 127,
[0][1][2][1][RTW89_THAILAND][0][100] = 127,
[0][1][2][1][RTW89_FCC][1][102] = -2,
- [0][1][2][1][RTW89_FCC][2][102] = 127,
[0][1][2][1][RTW89_ETSI][1][102] = 127,
[0][1][2][1][RTW89_ETSI][0][102] = 127,
[0][1][2][1][RTW89_MKK][1][102] = 127,
[0][1][2][1][RTW89_MKK][0][102] = 127,
[0][1][2][1][RTW89_IC][1][102] = -2,
- [0][1][2][1][RTW89_IC][2][102] = 127,
[0][1][2][1][RTW89_KCC][1][102] = 20,
[0][1][2][1][RTW89_KCC][0][102] = 127,
[0][1][2][1][RTW89_ACMA][1][102] = 127,
@@ -44330,13 +43220,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][102] = 127,
[0][1][2][1][RTW89_THAILAND][0][102] = 127,
[0][1][2][1][RTW89_FCC][1][104] = -2,
- [0][1][2][1][RTW89_FCC][2][104] = 127,
[0][1][2][1][RTW89_ETSI][1][104] = 127,
[0][1][2][1][RTW89_ETSI][0][104] = 127,
[0][1][2][1][RTW89_MKK][1][104] = 127,
[0][1][2][1][RTW89_MKK][0][104] = 127,
[0][1][2][1][RTW89_IC][1][104] = -2,
- [0][1][2][1][RTW89_IC][2][104] = 127,
[0][1][2][1][RTW89_KCC][1][104] = 20,
[0][1][2][1][RTW89_KCC][0][104] = 127,
[0][1][2][1][RTW89_ACMA][1][104] = 127,
@@ -44349,13 +43237,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][104] = 127,
[0][1][2][1][RTW89_THAILAND][0][104] = 127,
[0][1][2][1][RTW89_FCC][1][105] = -2,
- [0][1][2][1][RTW89_FCC][2][105] = 127,
[0][1][2][1][RTW89_ETSI][1][105] = 127,
[0][1][2][1][RTW89_ETSI][0][105] = 127,
[0][1][2][1][RTW89_MKK][1][105] = 127,
[0][1][2][1][RTW89_MKK][0][105] = 127,
[0][1][2][1][RTW89_IC][1][105] = -2,
- [0][1][2][1][RTW89_IC][2][105] = 127,
[0][1][2][1][RTW89_KCC][1][105] = 20,
[0][1][2][1][RTW89_KCC][0][105] = 127,
[0][1][2][1][RTW89_ACMA][1][105] = 127,
@@ -44368,13 +43254,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][105] = 127,
[0][1][2][1][RTW89_THAILAND][0][105] = 127,
[0][1][2][1][RTW89_FCC][1][107] = 1,
- [0][1][2][1][RTW89_FCC][2][107] = 127,
[0][1][2][1][RTW89_ETSI][1][107] = 127,
[0][1][2][1][RTW89_ETSI][0][107] = 127,
[0][1][2][1][RTW89_MKK][1][107] = 127,
[0][1][2][1][RTW89_MKK][0][107] = 127,
[0][1][2][1][RTW89_IC][1][107] = 1,
- [0][1][2][1][RTW89_IC][2][107] = 127,
[0][1][2][1][RTW89_KCC][1][107] = 20,
[0][1][2][1][RTW89_KCC][0][107] = 127,
[0][1][2][1][RTW89_ACMA][1][107] = 127,
@@ -44387,13 +43271,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][107] = 127,
[0][1][2][1][RTW89_THAILAND][0][107] = 127,
[0][1][2][1][RTW89_FCC][1][109] = 1,
- [0][1][2][1][RTW89_FCC][2][109] = 127,
[0][1][2][1][RTW89_ETSI][1][109] = 127,
[0][1][2][1][RTW89_ETSI][0][109] = 127,
[0][1][2][1][RTW89_MKK][1][109] = 127,
[0][1][2][1][RTW89_MKK][0][109] = 127,
[0][1][2][1][RTW89_IC][1][109] = 1,
- [0][1][2][1][RTW89_IC][2][109] = 127,
[0][1][2][1][RTW89_KCC][1][109] = 20,
[0][1][2][1][RTW89_KCC][0][109] = 127,
[0][1][2][1][RTW89_ACMA][1][109] = 127,
@@ -44406,13 +43288,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][109] = 127,
[0][1][2][1][RTW89_THAILAND][0][109] = 127,
[0][1][2][1][RTW89_FCC][1][111] = 127,
- [0][1][2][1][RTW89_FCC][2][111] = 127,
[0][1][2][1][RTW89_ETSI][1][111] = 127,
[0][1][2][1][RTW89_ETSI][0][111] = 127,
[0][1][2][1][RTW89_MKK][1][111] = 127,
[0][1][2][1][RTW89_MKK][0][111] = 127,
[0][1][2][1][RTW89_IC][1][111] = 127,
- [0][1][2][1][RTW89_IC][2][111] = 127,
[0][1][2][1][RTW89_KCC][1][111] = 127,
[0][1][2][1][RTW89_KCC][0][111] = 127,
[0][1][2][1][RTW89_ACMA][1][111] = 127,
@@ -44425,13 +43305,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][111] = 127,
[0][1][2][1][RTW89_THAILAND][0][111] = 127,
[0][1][2][1][RTW89_FCC][1][113] = 127,
- [0][1][2][1][RTW89_FCC][2][113] = 127,
[0][1][2][1][RTW89_ETSI][1][113] = 127,
[0][1][2][1][RTW89_ETSI][0][113] = 127,
[0][1][2][1][RTW89_MKK][1][113] = 127,
[0][1][2][1][RTW89_MKK][0][113] = 127,
[0][1][2][1][RTW89_IC][1][113] = 127,
- [0][1][2][1][RTW89_IC][2][113] = 127,
[0][1][2][1][RTW89_KCC][1][113] = 127,
[0][1][2][1][RTW89_KCC][0][113] = 127,
[0][1][2][1][RTW89_ACMA][1][113] = 127,
@@ -44444,13 +43322,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][113] = 127,
[0][1][2][1][RTW89_THAILAND][0][113] = 127,
[0][1][2][1][RTW89_FCC][1][115] = 127,
- [0][1][2][1][RTW89_FCC][2][115] = 127,
[0][1][2][1][RTW89_ETSI][1][115] = 127,
[0][1][2][1][RTW89_ETSI][0][115] = 127,
[0][1][2][1][RTW89_MKK][1][115] = 127,
[0][1][2][1][RTW89_MKK][0][115] = 127,
[0][1][2][1][RTW89_IC][1][115] = 127,
- [0][1][2][1][RTW89_IC][2][115] = 127,
[0][1][2][1][RTW89_KCC][1][115] = 127,
[0][1][2][1][RTW89_KCC][0][115] = 127,
[0][1][2][1][RTW89_ACMA][1][115] = 127,
@@ -44463,13 +43339,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][115] = 127,
[0][1][2][1][RTW89_THAILAND][0][115] = 127,
[0][1][2][1][RTW89_FCC][1][117] = 127,
- [0][1][2][1][RTW89_FCC][2][117] = 127,
[0][1][2][1][RTW89_ETSI][1][117] = 127,
[0][1][2][1][RTW89_ETSI][0][117] = 127,
[0][1][2][1][RTW89_MKK][1][117] = 127,
[0][1][2][1][RTW89_MKK][0][117] = 127,
[0][1][2][1][RTW89_IC][1][117] = 127,
- [0][1][2][1][RTW89_IC][2][117] = 127,
[0][1][2][1][RTW89_KCC][1][117] = 127,
[0][1][2][1][RTW89_KCC][0][117] = 127,
[0][1][2][1][RTW89_ACMA][1][117] = 127,
@@ -44482,13 +43356,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][117] = 127,
[0][1][2][1][RTW89_THAILAND][0][117] = 127,
[0][1][2][1][RTW89_FCC][1][119] = 127,
- [0][1][2][1][RTW89_FCC][2][119] = 127,
[0][1][2][1][RTW89_ETSI][1][119] = 127,
[0][1][2][1][RTW89_ETSI][0][119] = 127,
[0][1][2][1][RTW89_MKK][1][119] = 127,
[0][1][2][1][RTW89_MKK][0][119] = 127,
[0][1][2][1][RTW89_IC][1][119] = 127,
- [0][1][2][1][RTW89_IC][2][119] = 127,
[0][1][2][1][RTW89_KCC][1][119] = 127,
[0][1][2][1][RTW89_KCC][0][119] = 127,
[0][1][2][1][RTW89_ACMA][1][119] = 127,
@@ -44501,13 +43373,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][119] = 127,
[0][1][2][1][RTW89_THAILAND][0][119] = 127,
[1][0][2][0][RTW89_FCC][1][1] = 34,
- [1][0][2][0][RTW89_FCC][2][1] = 70,
[1][0][2][0][RTW89_ETSI][1][1] = 66,
[1][0][2][0][RTW89_ETSI][0][1] = 30,
[1][0][2][0][RTW89_MKK][1][1] = 62,
[1][0][2][0][RTW89_MKK][0][1] = 26,
[1][0][2][0][RTW89_IC][1][1] = 34,
- [1][0][2][0][RTW89_IC][2][1] = 70,
[1][0][2][0][RTW89_KCC][1][1] = 40,
[1][0][2][0][RTW89_KCC][0][1] = 24,
[1][0][2][0][RTW89_ACMA][1][1] = 66,
@@ -44520,13 +43390,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][1] = 68,
[1][0][2][0][RTW89_THAILAND][0][1] = 30,
[1][0][2][0][RTW89_FCC][1][5] = 34,
- [1][0][2][0][RTW89_FCC][2][5] = 70,
[1][0][2][0][RTW89_ETSI][1][5] = 66,
[1][0][2][0][RTW89_ETSI][0][5] = 30,
[1][0][2][0][RTW89_MKK][1][5] = 62,
[1][0][2][0][RTW89_MKK][0][5] = 26,
[1][0][2][0][RTW89_IC][1][5] = 34,
- [1][0][2][0][RTW89_IC][2][5] = 70,
[1][0][2][0][RTW89_KCC][1][5] = 40,
[1][0][2][0][RTW89_KCC][0][5] = 24,
[1][0][2][0][RTW89_ACMA][1][5] = 66,
@@ -44539,13 +43407,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][5] = 68,
[1][0][2][0][RTW89_THAILAND][0][5] = 30,
[1][0][2][0][RTW89_FCC][1][9] = 34,
- [1][0][2][0][RTW89_FCC][2][9] = 70,
[1][0][2][0][RTW89_ETSI][1][9] = 66,
[1][0][2][0][RTW89_ETSI][0][9] = 30,
[1][0][2][0][RTW89_MKK][1][9] = 62,
[1][0][2][0][RTW89_MKK][0][9] = 26,
[1][0][2][0][RTW89_IC][1][9] = 34,
- [1][0][2][0][RTW89_IC][2][9] = 70,
[1][0][2][0][RTW89_KCC][1][9] = 40,
[1][0][2][0][RTW89_KCC][0][9] = 24,
[1][0][2][0][RTW89_ACMA][1][9] = 66,
@@ -44558,13 +43424,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][9] = 68,
[1][0][2][0][RTW89_THAILAND][0][9] = 30,
[1][0][2][0][RTW89_FCC][1][13] = 34,
- [1][0][2][0][RTW89_FCC][2][13] = 70,
[1][0][2][0][RTW89_ETSI][1][13] = 66,
[1][0][2][0][RTW89_ETSI][0][13] = 30,
[1][0][2][0][RTW89_MKK][1][13] = 62,
[1][0][2][0][RTW89_MKK][0][13] = 26,
[1][0][2][0][RTW89_IC][1][13] = 34,
- [1][0][2][0][RTW89_IC][2][13] = 70,
[1][0][2][0][RTW89_KCC][1][13] = 40,
[1][0][2][0][RTW89_KCC][0][13] = 24,
[1][0][2][0][RTW89_ACMA][1][13] = 66,
@@ -44577,13 +43441,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][13] = 68,
[1][0][2][0][RTW89_THAILAND][0][13] = 30,
[1][0][2][0][RTW89_FCC][1][16] = 34,
- [1][0][2][0][RTW89_FCC][2][16] = 70,
[1][0][2][0][RTW89_ETSI][1][16] = 66,
[1][0][2][0][RTW89_ETSI][0][16] = 30,
[1][0][2][0][RTW89_MKK][1][16] = 62,
[1][0][2][0][RTW89_MKK][0][16] = 26,
[1][0][2][0][RTW89_IC][1][16] = 34,
- [1][0][2][0][RTW89_IC][2][16] = 70,
[1][0][2][0][RTW89_KCC][1][16] = 40,
[1][0][2][0][RTW89_KCC][0][16] = 24,
[1][0][2][0][RTW89_ACMA][1][16] = 66,
@@ -44596,13 +43458,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][16] = 68,
[1][0][2][0][RTW89_THAILAND][0][16] = 30,
[1][0][2][0][RTW89_FCC][1][20] = 34,
- [1][0][2][0][RTW89_FCC][2][20] = 70,
[1][0][2][0][RTW89_ETSI][1][20] = 66,
[1][0][2][0][RTW89_ETSI][0][20] = 30,
[1][0][2][0][RTW89_MKK][1][20] = 62,
[1][0][2][0][RTW89_MKK][0][20] = 26,
[1][0][2][0][RTW89_IC][1][20] = 34,
- [1][0][2][0][RTW89_IC][2][20] = 70,
[1][0][2][0][RTW89_KCC][1][20] = 40,
[1][0][2][0][RTW89_KCC][0][20] = 24,
[1][0][2][0][RTW89_ACMA][1][20] = 66,
@@ -44615,13 +43475,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][20] = 68,
[1][0][2][0][RTW89_THAILAND][0][20] = 30,
[1][0][2][0][RTW89_FCC][1][24] = 36,
- [1][0][2][0][RTW89_FCC][2][24] = 70,
[1][0][2][0][RTW89_ETSI][1][24] = 66,
[1][0][2][0][RTW89_ETSI][0][24] = 30,
[1][0][2][0][RTW89_MKK][1][24] = 64,
[1][0][2][0][RTW89_MKK][0][24] = 28,
[1][0][2][0][RTW89_IC][1][24] = 36,
- [1][0][2][0][RTW89_IC][2][24] = 70,
[1][0][2][0][RTW89_KCC][1][24] = 40,
[1][0][2][0][RTW89_KCC][0][24] = 26,
[1][0][2][0][RTW89_ACMA][1][24] = 66,
@@ -44634,13 +43492,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][24] = 68,
[1][0][2][0][RTW89_THAILAND][0][24] = 30,
[1][0][2][0][RTW89_FCC][1][28] = 34,
- [1][0][2][0][RTW89_FCC][2][28] = 70,
[1][0][2][0][RTW89_ETSI][1][28] = 66,
[1][0][2][0][RTW89_ETSI][0][28] = 30,
[1][0][2][0][RTW89_MKK][1][28] = 64,
[1][0][2][0][RTW89_MKK][0][28] = 26,
[1][0][2][0][RTW89_IC][1][28] = 34,
- [1][0][2][0][RTW89_IC][2][28] = 70,
[1][0][2][0][RTW89_KCC][1][28] = 40,
[1][0][2][0][RTW89_KCC][0][28] = 26,
[1][0][2][0][RTW89_ACMA][1][28] = 66,
@@ -44653,13 +43509,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][28] = 68,
[1][0][2][0][RTW89_THAILAND][0][28] = 30,
[1][0][2][0][RTW89_FCC][1][31] = 34,
- [1][0][2][0][RTW89_FCC][2][31] = 70,
[1][0][2][0][RTW89_ETSI][1][31] = 66,
[1][0][2][0][RTW89_ETSI][0][31] = 30,
[1][0][2][0][RTW89_MKK][1][31] = 64,
[1][0][2][0][RTW89_MKK][0][31] = 26,
[1][0][2][0][RTW89_IC][1][31] = 34,
- [1][0][2][0][RTW89_IC][2][31] = 70,
[1][0][2][0][RTW89_KCC][1][31] = 40,
[1][0][2][0][RTW89_KCC][0][31] = 26,
[1][0][2][0][RTW89_ACMA][1][31] = 66,
@@ -44672,13 +43526,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][31] = 68,
[1][0][2][0][RTW89_THAILAND][0][31] = 30,
[1][0][2][0][RTW89_FCC][1][35] = 34,
- [1][0][2][0][RTW89_FCC][2][35] = 70,
[1][0][2][0][RTW89_ETSI][1][35] = 66,
[1][0][2][0][RTW89_ETSI][0][35] = 30,
[1][0][2][0][RTW89_MKK][1][35] = 64,
[1][0][2][0][RTW89_MKK][0][35] = 26,
[1][0][2][0][RTW89_IC][1][35] = 34,
- [1][0][2][0][RTW89_IC][2][35] = 70,
[1][0][2][0][RTW89_KCC][1][35] = 40,
[1][0][2][0][RTW89_KCC][0][35] = 26,
[1][0][2][0][RTW89_ACMA][1][35] = 66,
@@ -44691,13 +43543,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][35] = 68,
[1][0][2][0][RTW89_THAILAND][0][35] = 30,
[1][0][2][0][RTW89_FCC][1][39] = 34,
- [1][0][2][0][RTW89_FCC][2][39] = 70,
[1][0][2][0][RTW89_ETSI][1][39] = 66,
[1][0][2][0][RTW89_ETSI][0][39] = 30,
[1][0][2][0][RTW89_MKK][1][39] = 64,
[1][0][2][0][RTW89_MKK][0][39] = 26,
[1][0][2][0][RTW89_IC][1][39] = 34,
- [1][0][2][0][RTW89_IC][2][39] = 70,
[1][0][2][0][RTW89_KCC][1][39] = 40,
[1][0][2][0][RTW89_KCC][0][39] = 26,
[1][0][2][0][RTW89_ACMA][1][39] = 66,
@@ -44710,13 +43560,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][39] = 68,
[1][0][2][0][RTW89_THAILAND][0][39] = 30,
[1][0][2][0][RTW89_FCC][1][43] = 34,
- [1][0][2][0][RTW89_FCC][2][43] = 70,
[1][0][2][0][RTW89_ETSI][1][43] = 66,
[1][0][2][0][RTW89_ETSI][0][43] = 30,
[1][0][2][0][RTW89_MKK][1][43] = 64,
[1][0][2][0][RTW89_MKK][0][43] = 26,
[1][0][2][0][RTW89_IC][1][43] = 34,
- [1][0][2][0][RTW89_IC][2][43] = 70,
[1][0][2][0][RTW89_KCC][1][43] = 40,
[1][0][2][0][RTW89_KCC][0][43] = 26,
[1][0][2][0][RTW89_ACMA][1][43] = 66,
@@ -44729,13 +43577,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][43] = 68,
[1][0][2][0][RTW89_THAILAND][0][43] = 30,
[1][0][2][0][RTW89_FCC][1][46] = 34,
- [1][0][2][0][RTW89_FCC][2][46] = 127,
[1][0][2][0][RTW89_ETSI][1][46] = 127,
[1][0][2][0][RTW89_ETSI][0][46] = 127,
[1][0][2][0][RTW89_MKK][1][46] = 127,
[1][0][2][0][RTW89_MKK][0][46] = 127,
[1][0][2][0][RTW89_IC][1][46] = 34,
- [1][0][2][0][RTW89_IC][2][46] = 68,
[1][0][2][0][RTW89_KCC][1][46] = 40,
[1][0][2][0][RTW89_KCC][0][46] = 127,
[1][0][2][0][RTW89_ACMA][1][46] = 127,
@@ -44748,13 +43594,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][46] = 127,
[1][0][2][0][RTW89_THAILAND][0][46] = 127,
[1][0][2][0][RTW89_FCC][1][50] = 34,
- [1][0][2][0][RTW89_FCC][2][50] = 127,
[1][0][2][0][RTW89_ETSI][1][50] = 127,
[1][0][2][0][RTW89_ETSI][0][50] = 127,
[1][0][2][0][RTW89_MKK][1][50] = 127,
[1][0][2][0][RTW89_MKK][0][50] = 127,
[1][0][2][0][RTW89_IC][1][50] = 34,
- [1][0][2][0][RTW89_IC][2][50] = 68,
[1][0][2][0][RTW89_KCC][1][50] = 40,
[1][0][2][0][RTW89_KCC][0][50] = 127,
[1][0][2][0][RTW89_ACMA][1][50] = 127,
@@ -44767,13 +43611,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][50] = 127,
[1][0][2][0][RTW89_THAILAND][0][50] = 127,
[1][0][2][0][RTW89_FCC][1][54] = 36,
- [1][0][2][0][RTW89_FCC][2][54] = 127,
[1][0][2][0][RTW89_ETSI][1][54] = 127,
[1][0][2][0][RTW89_ETSI][0][54] = 127,
[1][0][2][0][RTW89_MKK][1][54] = 127,
[1][0][2][0][RTW89_MKK][0][54] = 127,
[1][0][2][0][RTW89_IC][1][54] = 36,
- [1][0][2][0][RTW89_IC][2][54] = 127,
[1][0][2][0][RTW89_KCC][1][54] = 40,
[1][0][2][0][RTW89_KCC][0][54] = 127,
[1][0][2][0][RTW89_ACMA][1][54] = 127,
@@ -44786,13 +43628,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][54] = 127,
[1][0][2][0][RTW89_THAILAND][0][54] = 127,
[1][0][2][0][RTW89_FCC][1][58] = 36,
- [1][0][2][0][RTW89_FCC][2][58] = 66,
[1][0][2][0][RTW89_ETSI][1][58] = 127,
[1][0][2][0][RTW89_ETSI][0][58] = 127,
[1][0][2][0][RTW89_MKK][1][58] = 127,
[1][0][2][0][RTW89_MKK][0][58] = 127,
[1][0][2][0][RTW89_IC][1][58] = 36,
- [1][0][2][0][RTW89_IC][2][58] = 66,
[1][0][2][0][RTW89_KCC][1][58] = 40,
[1][0][2][0][RTW89_KCC][0][58] = 127,
[1][0][2][0][RTW89_ACMA][1][58] = 127,
@@ -44805,13 +43645,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][58] = 127,
[1][0][2][0][RTW89_THAILAND][0][58] = 127,
[1][0][2][0][RTW89_FCC][1][61] = 34,
- [1][0][2][0][RTW89_FCC][2][61] = 66,
[1][0][2][0][RTW89_ETSI][1][61] = 127,
[1][0][2][0][RTW89_ETSI][0][61] = 127,
[1][0][2][0][RTW89_MKK][1][61] = 127,
[1][0][2][0][RTW89_MKK][0][61] = 127,
[1][0][2][0][RTW89_IC][1][61] = 34,
- [1][0][2][0][RTW89_IC][2][61] = 66,
[1][0][2][0][RTW89_KCC][1][61] = 40,
[1][0][2][0][RTW89_KCC][0][61] = 127,
[1][0][2][0][RTW89_ACMA][1][61] = 127,
@@ -44824,13 +43662,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][61] = 127,
[1][0][2][0][RTW89_THAILAND][0][61] = 127,
[1][0][2][0][RTW89_FCC][1][65] = 34,
- [1][0][2][0][RTW89_FCC][2][65] = 66,
[1][0][2][0][RTW89_ETSI][1][65] = 127,
[1][0][2][0][RTW89_ETSI][0][65] = 127,
[1][0][2][0][RTW89_MKK][1][65] = 127,
[1][0][2][0][RTW89_MKK][0][65] = 127,
[1][0][2][0][RTW89_IC][1][65] = 34,
- [1][0][2][0][RTW89_IC][2][65] = 66,
[1][0][2][0][RTW89_KCC][1][65] = 40,
[1][0][2][0][RTW89_KCC][0][65] = 127,
[1][0][2][0][RTW89_ACMA][1][65] = 127,
@@ -44843,13 +43679,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][65] = 127,
[1][0][2][0][RTW89_THAILAND][0][65] = 127,
[1][0][2][0][RTW89_FCC][1][69] = 34,
- [1][0][2][0][RTW89_FCC][2][69] = 66,
[1][0][2][0][RTW89_ETSI][1][69] = 127,
[1][0][2][0][RTW89_ETSI][0][69] = 127,
[1][0][2][0][RTW89_MKK][1][69] = 127,
[1][0][2][0][RTW89_MKK][0][69] = 127,
[1][0][2][0][RTW89_IC][1][69] = 34,
- [1][0][2][0][RTW89_IC][2][69] = 66,
[1][0][2][0][RTW89_KCC][1][69] = 40,
[1][0][2][0][RTW89_KCC][0][69] = 127,
[1][0][2][0][RTW89_ACMA][1][69] = 127,
@@ -44862,13 +43696,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][69] = 127,
[1][0][2][0][RTW89_THAILAND][0][69] = 127,
[1][0][2][0][RTW89_FCC][1][73] = 34,
- [1][0][2][0][RTW89_FCC][2][73] = 66,
[1][0][2][0][RTW89_ETSI][1][73] = 127,
[1][0][2][0][RTW89_ETSI][0][73] = 127,
[1][0][2][0][RTW89_MKK][1][73] = 127,
[1][0][2][0][RTW89_MKK][0][73] = 127,
[1][0][2][0][RTW89_IC][1][73] = 34,
- [1][0][2][0][RTW89_IC][2][73] = 66,
[1][0][2][0][RTW89_KCC][1][73] = 40,
[1][0][2][0][RTW89_KCC][0][73] = 127,
[1][0][2][0][RTW89_ACMA][1][73] = 127,
@@ -44881,13 +43713,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][73] = 127,
[1][0][2][0][RTW89_THAILAND][0][73] = 127,
[1][0][2][0][RTW89_FCC][1][76] = 34,
- [1][0][2][0][RTW89_FCC][2][76] = 66,
[1][0][2][0][RTW89_ETSI][1][76] = 127,
[1][0][2][0][RTW89_ETSI][0][76] = 127,
[1][0][2][0][RTW89_MKK][1][76] = 127,
[1][0][2][0][RTW89_MKK][0][76] = 127,
[1][0][2][0][RTW89_IC][1][76] = 34,
- [1][0][2][0][RTW89_IC][2][76] = 66,
[1][0][2][0][RTW89_KCC][1][76] = 40,
[1][0][2][0][RTW89_KCC][0][76] = 127,
[1][0][2][0][RTW89_ACMA][1][76] = 127,
@@ -44900,13 +43730,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][76] = 127,
[1][0][2][0][RTW89_THAILAND][0][76] = 127,
[1][0][2][0][RTW89_FCC][1][80] = 34,
- [1][0][2][0][RTW89_FCC][2][80] = 66,
[1][0][2][0][RTW89_ETSI][1][80] = 127,
[1][0][2][0][RTW89_ETSI][0][80] = 127,
[1][0][2][0][RTW89_MKK][1][80] = 127,
[1][0][2][0][RTW89_MKK][0][80] = 127,
[1][0][2][0][RTW89_IC][1][80] = 34,
- [1][0][2][0][RTW89_IC][2][80] = 66,
[1][0][2][0][RTW89_KCC][1][80] = 42,
[1][0][2][0][RTW89_KCC][0][80] = 127,
[1][0][2][0][RTW89_ACMA][1][80] = 127,
@@ -44919,13 +43747,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][80] = 127,
[1][0][2][0][RTW89_THAILAND][0][80] = 127,
[1][0][2][0][RTW89_FCC][1][84] = 34,
- [1][0][2][0][RTW89_FCC][2][84] = 66,
[1][0][2][0][RTW89_ETSI][1][84] = 127,
[1][0][2][0][RTW89_ETSI][0][84] = 127,
[1][0][2][0][RTW89_MKK][1][84] = 127,
[1][0][2][0][RTW89_MKK][0][84] = 127,
[1][0][2][0][RTW89_IC][1][84] = 34,
- [1][0][2][0][RTW89_IC][2][84] = 66,
[1][0][2][0][RTW89_KCC][1][84] = 42,
[1][0][2][0][RTW89_KCC][0][84] = 127,
[1][0][2][0][RTW89_ACMA][1][84] = 127,
@@ -44938,13 +43764,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][84] = 127,
[1][0][2][0][RTW89_THAILAND][0][84] = 127,
[1][0][2][0][RTW89_FCC][1][88] = 34,
- [1][0][2][0][RTW89_FCC][2][88] = 127,
[1][0][2][0][RTW89_ETSI][1][88] = 127,
[1][0][2][0][RTW89_ETSI][0][88] = 127,
[1][0][2][0][RTW89_MKK][1][88] = 127,
[1][0][2][0][RTW89_MKK][0][88] = 127,
[1][0][2][0][RTW89_IC][1][88] = 34,
- [1][0][2][0][RTW89_IC][2][88] = 127,
[1][0][2][0][RTW89_KCC][1][88] = 42,
[1][0][2][0][RTW89_KCC][0][88] = 127,
[1][0][2][0][RTW89_ACMA][1][88] = 127,
@@ -44957,13 +43781,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][88] = 127,
[1][0][2][0][RTW89_THAILAND][0][88] = 127,
[1][0][2][0][RTW89_FCC][1][91] = 36,
- [1][0][2][0][RTW89_FCC][2][91] = 127,
[1][0][2][0][RTW89_ETSI][1][91] = 127,
[1][0][2][0][RTW89_ETSI][0][91] = 127,
[1][0][2][0][RTW89_MKK][1][91] = 127,
[1][0][2][0][RTW89_MKK][0][91] = 127,
[1][0][2][0][RTW89_IC][1][91] = 36,
- [1][0][2][0][RTW89_IC][2][91] = 127,
[1][0][2][0][RTW89_KCC][1][91] = 42,
[1][0][2][0][RTW89_KCC][0][91] = 127,
[1][0][2][0][RTW89_ACMA][1][91] = 127,
@@ -44976,13 +43798,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][91] = 127,
[1][0][2][0][RTW89_THAILAND][0][91] = 127,
[1][0][2][0][RTW89_FCC][1][95] = 34,
- [1][0][2][0][RTW89_FCC][2][95] = 127,
[1][0][2][0][RTW89_ETSI][1][95] = 127,
[1][0][2][0][RTW89_ETSI][0][95] = 127,
[1][0][2][0][RTW89_MKK][1][95] = 127,
[1][0][2][0][RTW89_MKK][0][95] = 127,
[1][0][2][0][RTW89_IC][1][95] = 34,
- [1][0][2][0][RTW89_IC][2][95] = 127,
[1][0][2][0][RTW89_KCC][1][95] = 42,
[1][0][2][0][RTW89_KCC][0][95] = 127,
[1][0][2][0][RTW89_ACMA][1][95] = 127,
@@ -44995,13 +43815,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][95] = 127,
[1][0][2][0][RTW89_THAILAND][0][95] = 127,
[1][0][2][0][RTW89_FCC][1][99] = 34,
- [1][0][2][0][RTW89_FCC][2][99] = 127,
[1][0][2][0][RTW89_ETSI][1][99] = 127,
[1][0][2][0][RTW89_ETSI][0][99] = 127,
[1][0][2][0][RTW89_MKK][1][99] = 127,
[1][0][2][0][RTW89_MKK][0][99] = 127,
[1][0][2][0][RTW89_IC][1][99] = 34,
- [1][0][2][0][RTW89_IC][2][99] = 127,
[1][0][2][0][RTW89_KCC][1][99] = 42,
[1][0][2][0][RTW89_KCC][0][99] = 127,
[1][0][2][0][RTW89_ACMA][1][99] = 127,
@@ -45014,13 +43832,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][99] = 127,
[1][0][2][0][RTW89_THAILAND][0][99] = 127,
[1][0][2][0][RTW89_FCC][1][103] = 34,
- [1][0][2][0][RTW89_FCC][2][103] = 127,
[1][0][2][0][RTW89_ETSI][1][103] = 127,
[1][0][2][0][RTW89_ETSI][0][103] = 127,
[1][0][2][0][RTW89_MKK][1][103] = 127,
[1][0][2][0][RTW89_MKK][0][103] = 127,
[1][0][2][0][RTW89_IC][1][103] = 34,
- [1][0][2][0][RTW89_IC][2][103] = 127,
[1][0][2][0][RTW89_KCC][1][103] = 42,
[1][0][2][0][RTW89_KCC][0][103] = 127,
[1][0][2][0][RTW89_ACMA][1][103] = 127,
@@ -45033,13 +43849,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][103] = 127,
[1][0][2][0][RTW89_THAILAND][0][103] = 127,
[1][0][2][0][RTW89_FCC][1][106] = 36,
- [1][0][2][0][RTW89_FCC][2][106] = 127,
[1][0][2][0][RTW89_ETSI][1][106] = 127,
[1][0][2][0][RTW89_ETSI][0][106] = 127,
[1][0][2][0][RTW89_MKK][1][106] = 127,
[1][0][2][0][RTW89_MKK][0][106] = 127,
[1][0][2][0][RTW89_IC][1][106] = 36,
- [1][0][2][0][RTW89_IC][2][106] = 127,
[1][0][2][0][RTW89_KCC][1][106] = 42,
[1][0][2][0][RTW89_KCC][0][106] = 127,
[1][0][2][0][RTW89_ACMA][1][106] = 127,
@@ -45052,13 +43866,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][106] = 127,
[1][0][2][0][RTW89_THAILAND][0][106] = 127,
[1][0][2][0][RTW89_FCC][1][110] = 127,
- [1][0][2][0][RTW89_FCC][2][110] = 127,
[1][0][2][0][RTW89_ETSI][1][110] = 127,
[1][0][2][0][RTW89_ETSI][0][110] = 127,
[1][0][2][0][RTW89_MKK][1][110] = 127,
[1][0][2][0][RTW89_MKK][0][110] = 127,
[1][0][2][0][RTW89_IC][1][110] = 127,
- [1][0][2][0][RTW89_IC][2][110] = 127,
[1][0][2][0][RTW89_KCC][1][110] = 127,
[1][0][2][0][RTW89_KCC][0][110] = 127,
[1][0][2][0][RTW89_ACMA][1][110] = 127,
@@ -45071,13 +43883,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][110] = 127,
[1][0][2][0][RTW89_THAILAND][0][110] = 127,
[1][0][2][0][RTW89_FCC][1][114] = 127,
- [1][0][2][0][RTW89_FCC][2][114] = 127,
[1][0][2][0][RTW89_ETSI][1][114] = 127,
[1][0][2][0][RTW89_ETSI][0][114] = 127,
[1][0][2][0][RTW89_MKK][1][114] = 127,
[1][0][2][0][RTW89_MKK][0][114] = 127,
[1][0][2][0][RTW89_IC][1][114] = 127,
- [1][0][2][0][RTW89_IC][2][114] = 127,
[1][0][2][0][RTW89_KCC][1][114] = 127,
[1][0][2][0][RTW89_KCC][0][114] = 127,
[1][0][2][0][RTW89_ACMA][1][114] = 127,
@@ -45090,13 +43900,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][114] = 127,
[1][0][2][0][RTW89_THAILAND][0][114] = 127,
[1][0][2][0][RTW89_FCC][1][118] = 127,
- [1][0][2][0][RTW89_FCC][2][118] = 127,
[1][0][2][0][RTW89_ETSI][1][118] = 127,
[1][0][2][0][RTW89_ETSI][0][118] = 127,
[1][0][2][0][RTW89_MKK][1][118] = 127,
[1][0][2][0][RTW89_MKK][0][118] = 127,
[1][0][2][0][RTW89_IC][1][118] = 127,
- [1][0][2][0][RTW89_IC][2][118] = 127,
[1][0][2][0][RTW89_KCC][1][118] = 127,
[1][0][2][0][RTW89_KCC][0][118] = 127,
[1][0][2][0][RTW89_ACMA][1][118] = 127,
@@ -45109,13 +43917,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][118] = 127,
[1][0][2][0][RTW89_THAILAND][0][118] = 127,
[1][1][2][0][RTW89_FCC][1][1] = 10,
- [1][1][2][0][RTW89_FCC][2][1] = 58,
[1][1][2][0][RTW89_ETSI][1][1] = 54,
[1][1][2][0][RTW89_ETSI][0][1] = 18,
[1][1][2][0][RTW89_MKK][1][1] = 52,
[1][1][2][0][RTW89_MKK][0][1] = 12,
[1][1][2][0][RTW89_IC][1][1] = 10,
- [1][1][2][0][RTW89_IC][2][1] = 58,
[1][1][2][0][RTW89_KCC][1][1] = 28,
[1][1][2][0][RTW89_KCC][0][1] = 12,
[1][1][2][0][RTW89_ACMA][1][1] = 54,
@@ -45128,13 +43934,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][1] = 46,
[1][1][2][0][RTW89_THAILAND][0][1] = 10,
[1][1][2][0][RTW89_FCC][1][5] = 10,
- [1][1][2][0][RTW89_FCC][2][5] = 58,
[1][1][2][0][RTW89_ETSI][1][5] = 54,
[1][1][2][0][RTW89_ETSI][0][5] = 16,
[1][1][2][0][RTW89_MKK][1][5] = 52,
[1][1][2][0][RTW89_MKK][0][5] = 12,
[1][1][2][0][RTW89_IC][1][5] = 10,
- [1][1][2][0][RTW89_IC][2][5] = 58,
[1][1][2][0][RTW89_KCC][1][5] = 28,
[1][1][2][0][RTW89_KCC][0][5] = 12,
[1][1][2][0][RTW89_ACMA][1][5] = 54,
@@ -45147,13 +43951,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][5] = 46,
[1][1][2][0][RTW89_THAILAND][0][5] = 10,
[1][1][2][0][RTW89_FCC][1][9] = 10,
- [1][1][2][0][RTW89_FCC][2][9] = 58,
[1][1][2][0][RTW89_ETSI][1][9] = 54,
[1][1][2][0][RTW89_ETSI][0][9] = 16,
[1][1][2][0][RTW89_MKK][1][9] = 52,
[1][1][2][0][RTW89_MKK][0][9] = 12,
[1][1][2][0][RTW89_IC][1][9] = 10,
- [1][1][2][0][RTW89_IC][2][9] = 58,
[1][1][2][0][RTW89_KCC][1][9] = 28,
[1][1][2][0][RTW89_KCC][0][9] = 12,
[1][1][2][0][RTW89_ACMA][1][9] = 54,
@@ -45166,13 +43968,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][9] = 46,
[1][1][2][0][RTW89_THAILAND][0][9] = 10,
[1][1][2][0][RTW89_FCC][1][13] = 10,
- [1][1][2][0][RTW89_FCC][2][13] = 58,
[1][1][2][0][RTW89_ETSI][1][13] = 54,
[1][1][2][0][RTW89_ETSI][0][13] = 16,
[1][1][2][0][RTW89_MKK][1][13] = 52,
[1][1][2][0][RTW89_MKK][0][13] = 12,
[1][1][2][0][RTW89_IC][1][13] = 10,
- [1][1][2][0][RTW89_IC][2][13] = 58,
[1][1][2][0][RTW89_KCC][1][13] = 28,
[1][1][2][0][RTW89_KCC][0][13] = 12,
[1][1][2][0][RTW89_ACMA][1][13] = 54,
@@ -45185,13 +43985,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][13] = 46,
[1][1][2][0][RTW89_THAILAND][0][13] = 10,
[1][1][2][0][RTW89_FCC][1][16] = 10,
- [1][1][2][0][RTW89_FCC][2][16] = 58,
[1][1][2][0][RTW89_ETSI][1][16] = 54,
[1][1][2][0][RTW89_ETSI][0][16] = 16,
[1][1][2][0][RTW89_MKK][1][16] = 52,
[1][1][2][0][RTW89_MKK][0][16] = 12,
[1][1][2][0][RTW89_IC][1][16] = 10,
- [1][1][2][0][RTW89_IC][2][16] = 58,
[1][1][2][0][RTW89_KCC][1][16] = 28,
[1][1][2][0][RTW89_KCC][0][16] = 12,
[1][1][2][0][RTW89_ACMA][1][16] = 54,
@@ -45204,13 +44002,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][16] = 46,
[1][1][2][0][RTW89_THAILAND][0][16] = 10,
[1][1][2][0][RTW89_FCC][1][20] = 10,
- [1][1][2][0][RTW89_FCC][2][20] = 58,
[1][1][2][0][RTW89_ETSI][1][20] = 54,
[1][1][2][0][RTW89_ETSI][0][20] = 16,
[1][1][2][0][RTW89_MKK][1][20] = 52,
[1][1][2][0][RTW89_MKK][0][20] = 12,
[1][1][2][0][RTW89_IC][1][20] = 10,
- [1][1][2][0][RTW89_IC][2][20] = 58,
[1][1][2][0][RTW89_KCC][1][20] = 28,
[1][1][2][0][RTW89_KCC][0][20] = 12,
[1][1][2][0][RTW89_ACMA][1][20] = 54,
@@ -45223,13 +44019,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][20] = 46,
[1][1][2][0][RTW89_THAILAND][0][20] = 10,
[1][1][2][0][RTW89_FCC][1][24] = 10,
- [1][1][2][0][RTW89_FCC][2][24] = 70,
[1][1][2][0][RTW89_ETSI][1][24] = 54,
[1][1][2][0][RTW89_ETSI][0][24] = 16,
[1][1][2][0][RTW89_MKK][1][24] = 54,
[1][1][2][0][RTW89_MKK][0][24] = 14,
[1][1][2][0][RTW89_IC][1][24] = 10,
- [1][1][2][0][RTW89_IC][2][24] = 70,
[1][1][2][0][RTW89_KCC][1][24] = 28,
[1][1][2][0][RTW89_KCC][0][24] = 12,
[1][1][2][0][RTW89_ACMA][1][24] = 54,
@@ -45242,13 +44036,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][24] = 46,
[1][1][2][0][RTW89_THAILAND][0][24] = 10,
[1][1][2][0][RTW89_FCC][1][28] = 10,
- [1][1][2][0][RTW89_FCC][2][28] = 70,
[1][1][2][0][RTW89_ETSI][1][28] = 54,
[1][1][2][0][RTW89_ETSI][0][28] = 16,
[1][1][2][0][RTW89_MKK][1][28] = 52,
[1][1][2][0][RTW89_MKK][0][28] = 14,
[1][1][2][0][RTW89_IC][1][28] = 10,
- [1][1][2][0][RTW89_IC][2][28] = 70,
[1][1][2][0][RTW89_KCC][1][28] = 28,
[1][1][2][0][RTW89_KCC][0][28] = 14,
[1][1][2][0][RTW89_ACMA][1][28] = 54,
@@ -45261,13 +44053,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][28] = 46,
[1][1][2][0][RTW89_THAILAND][0][28] = 10,
[1][1][2][0][RTW89_FCC][1][31] = 10,
- [1][1][2][0][RTW89_FCC][2][31] = 70,
[1][1][2][0][RTW89_ETSI][1][31] = 54,
[1][1][2][0][RTW89_ETSI][0][31] = 16,
[1][1][2][0][RTW89_MKK][1][31] = 52,
[1][1][2][0][RTW89_MKK][0][31] = 14,
[1][1][2][0][RTW89_IC][1][31] = 10,
- [1][1][2][0][RTW89_IC][2][31] = 70,
[1][1][2][0][RTW89_KCC][1][31] = 28,
[1][1][2][0][RTW89_KCC][0][31] = 14,
[1][1][2][0][RTW89_ACMA][1][31] = 54,
@@ -45280,13 +44070,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][31] = 46,
[1][1][2][0][RTW89_THAILAND][0][31] = 10,
[1][1][2][0][RTW89_FCC][1][35] = 10,
- [1][1][2][0][RTW89_FCC][2][35] = 70,
[1][1][2][0][RTW89_ETSI][1][35] = 54,
[1][1][2][0][RTW89_ETSI][0][35] = 16,
[1][1][2][0][RTW89_MKK][1][35] = 52,
[1][1][2][0][RTW89_MKK][0][35] = 14,
[1][1][2][0][RTW89_IC][1][35] = 10,
- [1][1][2][0][RTW89_IC][2][35] = 70,
[1][1][2][0][RTW89_KCC][1][35] = 28,
[1][1][2][0][RTW89_KCC][0][35] = 14,
[1][1][2][0][RTW89_ACMA][1][35] = 54,
@@ -45299,13 +44087,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][35] = 46,
[1][1][2][0][RTW89_THAILAND][0][35] = 10,
[1][1][2][0][RTW89_FCC][1][39] = 10,
- [1][1][2][0][RTW89_FCC][2][39] = 70,
[1][1][2][0][RTW89_ETSI][1][39] = 54,
[1][1][2][0][RTW89_ETSI][0][39] = 16,
[1][1][2][0][RTW89_MKK][1][39] = 52,
[1][1][2][0][RTW89_MKK][0][39] = 14,
[1][1][2][0][RTW89_IC][1][39] = 10,
- [1][1][2][0][RTW89_IC][2][39] = 70,
[1][1][2][0][RTW89_KCC][1][39] = 28,
[1][1][2][0][RTW89_KCC][0][39] = 14,
[1][1][2][0][RTW89_ACMA][1][39] = 54,
@@ -45318,13 +44104,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][39] = 46,
[1][1][2][0][RTW89_THAILAND][0][39] = 10,
[1][1][2][0][RTW89_FCC][1][43] = 10,
- [1][1][2][0][RTW89_FCC][2][43] = 70,
[1][1][2][0][RTW89_ETSI][1][43] = 54,
[1][1][2][0][RTW89_ETSI][0][43] = 16,
[1][1][2][0][RTW89_MKK][1][43] = 52,
[1][1][2][0][RTW89_MKK][0][43] = 14,
[1][1][2][0][RTW89_IC][1][43] = 10,
- [1][1][2][0][RTW89_IC][2][43] = 70,
[1][1][2][0][RTW89_KCC][1][43] = 28,
[1][1][2][0][RTW89_KCC][0][43] = 14,
[1][1][2][0][RTW89_ACMA][1][43] = 54,
@@ -45337,13 +44121,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][43] = 46,
[1][1][2][0][RTW89_THAILAND][0][43] = 10,
[1][1][2][0][RTW89_FCC][1][46] = 12,
- [1][1][2][0][RTW89_FCC][2][46] = 127,
[1][1][2][0][RTW89_ETSI][1][46] = 127,
[1][1][2][0][RTW89_ETSI][0][46] = 127,
[1][1][2][0][RTW89_MKK][1][46] = 127,
[1][1][2][0][RTW89_MKK][0][46] = 127,
[1][1][2][0][RTW89_IC][1][46] = 12,
- [1][1][2][0][RTW89_IC][2][46] = 68,
[1][1][2][0][RTW89_KCC][1][46] = 28,
[1][1][2][0][RTW89_KCC][0][46] = 127,
[1][1][2][0][RTW89_ACMA][1][46] = 127,
@@ -45356,13 +44138,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][46] = 127,
[1][1][2][0][RTW89_THAILAND][0][46] = 127,
[1][1][2][0][RTW89_FCC][1][50] = 12,
- [1][1][2][0][RTW89_FCC][2][50] = 127,
[1][1][2][0][RTW89_ETSI][1][50] = 127,
[1][1][2][0][RTW89_ETSI][0][50] = 127,
[1][1][2][0][RTW89_MKK][1][50] = 127,
[1][1][2][0][RTW89_MKK][0][50] = 127,
[1][1][2][0][RTW89_IC][1][50] = 12,
- [1][1][2][0][RTW89_IC][2][50] = 68,
[1][1][2][0][RTW89_KCC][1][50] = 28,
[1][1][2][0][RTW89_KCC][0][50] = 127,
[1][1][2][0][RTW89_ACMA][1][50] = 127,
@@ -45375,13 +44155,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][50] = 127,
[1][1][2][0][RTW89_THAILAND][0][50] = 127,
[1][1][2][0][RTW89_FCC][1][54] = 10,
- [1][1][2][0][RTW89_FCC][2][54] = 127,
[1][1][2][0][RTW89_ETSI][1][54] = 127,
[1][1][2][0][RTW89_ETSI][0][54] = 127,
[1][1][2][0][RTW89_MKK][1][54] = 127,
[1][1][2][0][RTW89_MKK][0][54] = 127,
[1][1][2][0][RTW89_IC][1][54] = 10,
- [1][1][2][0][RTW89_IC][2][54] = 127,
[1][1][2][0][RTW89_KCC][1][54] = 28,
[1][1][2][0][RTW89_KCC][0][54] = 127,
[1][1][2][0][RTW89_ACMA][1][54] = 127,
@@ -45394,13 +44172,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][54] = 127,
[1][1][2][0][RTW89_THAILAND][0][54] = 127,
[1][1][2][0][RTW89_FCC][1][58] = 10,
- [1][1][2][0][RTW89_FCC][2][58] = 66,
[1][1][2][0][RTW89_ETSI][1][58] = 127,
[1][1][2][0][RTW89_ETSI][0][58] = 127,
[1][1][2][0][RTW89_MKK][1][58] = 127,
[1][1][2][0][RTW89_MKK][0][58] = 127,
[1][1][2][0][RTW89_IC][1][58] = 10,
- [1][1][2][0][RTW89_IC][2][58] = 66,
[1][1][2][0][RTW89_KCC][1][58] = 28,
[1][1][2][0][RTW89_KCC][0][58] = 127,
[1][1][2][0][RTW89_ACMA][1][58] = 127,
@@ -45413,13 +44189,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][58] = 127,
[1][1][2][0][RTW89_THAILAND][0][58] = 127,
[1][1][2][0][RTW89_FCC][1][61] = 10,
- [1][1][2][0][RTW89_FCC][2][61] = 66,
[1][1][2][0][RTW89_ETSI][1][61] = 127,
[1][1][2][0][RTW89_ETSI][0][61] = 127,
[1][1][2][0][RTW89_MKK][1][61] = 127,
[1][1][2][0][RTW89_MKK][0][61] = 127,
[1][1][2][0][RTW89_IC][1][61] = 10,
- [1][1][2][0][RTW89_IC][2][61] = 66,
[1][1][2][0][RTW89_KCC][1][61] = 28,
[1][1][2][0][RTW89_KCC][0][61] = 127,
[1][1][2][0][RTW89_ACMA][1][61] = 127,
@@ -45432,13 +44206,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][61] = 127,
[1][1][2][0][RTW89_THAILAND][0][61] = 127,
[1][1][2][0][RTW89_FCC][1][65] = 10,
- [1][1][2][0][RTW89_FCC][2][65] = 66,
[1][1][2][0][RTW89_ETSI][1][65] = 127,
[1][1][2][0][RTW89_ETSI][0][65] = 127,
[1][1][2][0][RTW89_MKK][1][65] = 127,
[1][1][2][0][RTW89_MKK][0][65] = 127,
[1][1][2][0][RTW89_IC][1][65] = 10,
- [1][1][2][0][RTW89_IC][2][65] = 66,
[1][1][2][0][RTW89_KCC][1][65] = 28,
[1][1][2][0][RTW89_KCC][0][65] = 127,
[1][1][2][0][RTW89_ACMA][1][65] = 127,
@@ -45451,13 +44223,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][65] = 127,
[1][1][2][0][RTW89_THAILAND][0][65] = 127,
[1][1][2][0][RTW89_FCC][1][69] = 10,
- [1][1][2][0][RTW89_FCC][2][69] = 66,
[1][1][2][0][RTW89_ETSI][1][69] = 127,
[1][1][2][0][RTW89_ETSI][0][69] = 127,
[1][1][2][0][RTW89_MKK][1][69] = 127,
[1][1][2][0][RTW89_MKK][0][69] = 127,
[1][1][2][0][RTW89_IC][1][69] = 10,
- [1][1][2][0][RTW89_IC][2][69] = 66,
[1][1][2][0][RTW89_KCC][1][69] = 28,
[1][1][2][0][RTW89_KCC][0][69] = 127,
[1][1][2][0][RTW89_ACMA][1][69] = 127,
@@ -45470,13 +44240,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][69] = 127,
[1][1][2][0][RTW89_THAILAND][0][69] = 127,
[1][1][2][0][RTW89_FCC][1][73] = 10,
- [1][1][2][0][RTW89_FCC][2][73] = 66,
[1][1][2][0][RTW89_ETSI][1][73] = 127,
[1][1][2][0][RTW89_ETSI][0][73] = 127,
[1][1][2][0][RTW89_MKK][1][73] = 127,
[1][1][2][0][RTW89_MKK][0][73] = 127,
[1][1][2][0][RTW89_IC][1][73] = 10,
- [1][1][2][0][RTW89_IC][2][73] = 66,
[1][1][2][0][RTW89_KCC][1][73] = 28,
[1][1][2][0][RTW89_KCC][0][73] = 127,
[1][1][2][0][RTW89_ACMA][1][73] = 127,
@@ -45489,13 +44257,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][73] = 127,
[1][1][2][0][RTW89_THAILAND][0][73] = 127,
[1][1][2][0][RTW89_FCC][1][76] = 10,
- [1][1][2][0][RTW89_FCC][2][76] = 66,
[1][1][2][0][RTW89_ETSI][1][76] = 127,
[1][1][2][0][RTW89_ETSI][0][76] = 127,
[1][1][2][0][RTW89_MKK][1][76] = 127,
[1][1][2][0][RTW89_MKK][0][76] = 127,
[1][1][2][0][RTW89_IC][1][76] = 10,
- [1][1][2][0][RTW89_IC][2][76] = 66,
[1][1][2][0][RTW89_KCC][1][76] = 28,
[1][1][2][0][RTW89_KCC][0][76] = 127,
[1][1][2][0][RTW89_ACMA][1][76] = 127,
@@ -45508,13 +44274,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][76] = 127,
[1][1][2][0][RTW89_THAILAND][0][76] = 127,
[1][1][2][0][RTW89_FCC][1][80] = 10,
- [1][1][2][0][RTW89_FCC][2][80] = 66,
[1][1][2][0][RTW89_ETSI][1][80] = 127,
[1][1][2][0][RTW89_ETSI][0][80] = 127,
[1][1][2][0][RTW89_MKK][1][80] = 127,
[1][1][2][0][RTW89_MKK][0][80] = 127,
[1][1][2][0][RTW89_IC][1][80] = 10,
- [1][1][2][0][RTW89_IC][2][80] = 66,
[1][1][2][0][RTW89_KCC][1][80] = 32,
[1][1][2][0][RTW89_KCC][0][80] = 127,
[1][1][2][0][RTW89_ACMA][1][80] = 127,
@@ -45527,13 +44291,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][80] = 127,
[1][1][2][0][RTW89_THAILAND][0][80] = 127,
[1][1][2][0][RTW89_FCC][1][84] = 10,
- [1][1][2][0][RTW89_FCC][2][84] = 66,
[1][1][2][0][RTW89_ETSI][1][84] = 127,
[1][1][2][0][RTW89_ETSI][0][84] = 127,
[1][1][2][0][RTW89_MKK][1][84] = 127,
[1][1][2][0][RTW89_MKK][0][84] = 127,
[1][1][2][0][RTW89_IC][1][84] = 10,
- [1][1][2][0][RTW89_IC][2][84] = 66,
[1][1][2][0][RTW89_KCC][1][84] = 32,
[1][1][2][0][RTW89_KCC][0][84] = 127,
[1][1][2][0][RTW89_ACMA][1][84] = 127,
@@ -45546,13 +44308,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][84] = 127,
[1][1][2][0][RTW89_THAILAND][0][84] = 127,
[1][1][2][0][RTW89_FCC][1][88] = 10,
- [1][1][2][0][RTW89_FCC][2][88] = 127,
[1][1][2][0][RTW89_ETSI][1][88] = 127,
[1][1][2][0][RTW89_ETSI][0][88] = 127,
[1][1][2][0][RTW89_MKK][1][88] = 127,
[1][1][2][0][RTW89_MKK][0][88] = 127,
[1][1][2][0][RTW89_IC][1][88] = 10,
- [1][1][2][0][RTW89_IC][2][88] = 127,
[1][1][2][0][RTW89_KCC][1][88] = 32,
[1][1][2][0][RTW89_KCC][0][88] = 127,
[1][1][2][0][RTW89_ACMA][1][88] = 127,
@@ -45565,13 +44325,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][88] = 127,
[1][1][2][0][RTW89_THAILAND][0][88] = 127,
[1][1][2][0][RTW89_FCC][1][91] = 12,
- [1][1][2][0][RTW89_FCC][2][91] = 127,
[1][1][2][0][RTW89_ETSI][1][91] = 127,
[1][1][2][0][RTW89_ETSI][0][91] = 127,
[1][1][2][0][RTW89_MKK][1][91] = 127,
[1][1][2][0][RTW89_MKK][0][91] = 127,
[1][1][2][0][RTW89_IC][1][91] = 12,
- [1][1][2][0][RTW89_IC][2][91] = 127,
[1][1][2][0][RTW89_KCC][1][91] = 32,
[1][1][2][0][RTW89_KCC][0][91] = 127,
[1][1][2][0][RTW89_ACMA][1][91] = 127,
@@ -45584,13 +44342,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][91] = 127,
[1][1][2][0][RTW89_THAILAND][0][91] = 127,
[1][1][2][0][RTW89_FCC][1][95] = 10,
- [1][1][2][0][RTW89_FCC][2][95] = 127,
[1][1][2][0][RTW89_ETSI][1][95] = 127,
[1][1][2][0][RTW89_ETSI][0][95] = 127,
[1][1][2][0][RTW89_MKK][1][95] = 127,
[1][1][2][0][RTW89_MKK][0][95] = 127,
[1][1][2][0][RTW89_IC][1][95] = 10,
- [1][1][2][0][RTW89_IC][2][95] = 127,
[1][1][2][0][RTW89_KCC][1][95] = 32,
[1][1][2][0][RTW89_KCC][0][95] = 127,
[1][1][2][0][RTW89_ACMA][1][95] = 127,
@@ -45603,13 +44359,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][95] = 127,
[1][1][2][0][RTW89_THAILAND][0][95] = 127,
[1][1][2][0][RTW89_FCC][1][99] = 10,
- [1][1][2][0][RTW89_FCC][2][99] = 127,
[1][1][2][0][RTW89_ETSI][1][99] = 127,
[1][1][2][0][RTW89_ETSI][0][99] = 127,
[1][1][2][0][RTW89_MKK][1][99] = 127,
[1][1][2][0][RTW89_MKK][0][99] = 127,
[1][1][2][0][RTW89_IC][1][99] = 10,
- [1][1][2][0][RTW89_IC][2][99] = 127,
[1][1][2][0][RTW89_KCC][1][99] = 32,
[1][1][2][0][RTW89_KCC][0][99] = 127,
[1][1][2][0][RTW89_ACMA][1][99] = 127,
@@ -45622,13 +44376,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][99] = 127,
[1][1][2][0][RTW89_THAILAND][0][99] = 127,
[1][1][2][0][RTW89_FCC][1][103] = 10,
- [1][1][2][0][RTW89_FCC][2][103] = 127,
[1][1][2][0][RTW89_ETSI][1][103] = 127,
[1][1][2][0][RTW89_ETSI][0][103] = 127,
[1][1][2][0][RTW89_MKK][1][103] = 127,
[1][1][2][0][RTW89_MKK][0][103] = 127,
[1][1][2][0][RTW89_IC][1][103] = 10,
- [1][1][2][0][RTW89_IC][2][103] = 127,
[1][1][2][0][RTW89_KCC][1][103] = 32,
[1][1][2][0][RTW89_KCC][0][103] = 127,
[1][1][2][0][RTW89_ACMA][1][103] = 127,
@@ -45641,13 +44393,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][103] = 127,
[1][1][2][0][RTW89_THAILAND][0][103] = 127,
[1][1][2][0][RTW89_FCC][1][106] = 12,
- [1][1][2][0][RTW89_FCC][2][106] = 127,
[1][1][2][0][RTW89_ETSI][1][106] = 127,
[1][1][2][0][RTW89_ETSI][0][106] = 127,
[1][1][2][0][RTW89_MKK][1][106] = 127,
[1][1][2][0][RTW89_MKK][0][106] = 127,
[1][1][2][0][RTW89_IC][1][106] = 12,
- [1][1][2][0][RTW89_IC][2][106] = 127,
[1][1][2][0][RTW89_KCC][1][106] = 32,
[1][1][2][0][RTW89_KCC][0][106] = 127,
[1][1][2][0][RTW89_ACMA][1][106] = 127,
@@ -45660,13 +44410,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][106] = 127,
[1][1][2][0][RTW89_THAILAND][0][106] = 127,
[1][1][2][0][RTW89_FCC][1][110] = 127,
- [1][1][2][0][RTW89_FCC][2][110] = 127,
[1][1][2][0][RTW89_ETSI][1][110] = 127,
[1][1][2][0][RTW89_ETSI][0][110] = 127,
[1][1][2][0][RTW89_MKK][1][110] = 127,
[1][1][2][0][RTW89_MKK][0][110] = 127,
[1][1][2][0][RTW89_IC][1][110] = 127,
- [1][1][2][0][RTW89_IC][2][110] = 127,
[1][1][2][0][RTW89_KCC][1][110] = 127,
[1][1][2][0][RTW89_KCC][0][110] = 127,
[1][1][2][0][RTW89_ACMA][1][110] = 127,
@@ -45679,13 +44427,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][110] = 127,
[1][1][2][0][RTW89_THAILAND][0][110] = 127,
[1][1][2][0][RTW89_FCC][1][114] = 127,
- [1][1][2][0][RTW89_FCC][2][114] = 127,
[1][1][2][0][RTW89_ETSI][1][114] = 127,
[1][1][2][0][RTW89_ETSI][0][114] = 127,
[1][1][2][0][RTW89_MKK][1][114] = 127,
[1][1][2][0][RTW89_MKK][0][114] = 127,
[1][1][2][0][RTW89_IC][1][114] = 127,
- [1][1][2][0][RTW89_IC][2][114] = 127,
[1][1][2][0][RTW89_KCC][1][114] = 127,
[1][1][2][0][RTW89_KCC][0][114] = 127,
[1][1][2][0][RTW89_ACMA][1][114] = 127,
@@ -45698,13 +44444,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][114] = 127,
[1][1][2][0][RTW89_THAILAND][0][114] = 127,
[1][1][2][0][RTW89_FCC][1][118] = 127,
- [1][1][2][0][RTW89_FCC][2][118] = 127,
[1][1][2][0][RTW89_ETSI][1][118] = 127,
[1][1][2][0][RTW89_ETSI][0][118] = 127,
[1][1][2][0][RTW89_MKK][1][118] = 127,
[1][1][2][0][RTW89_MKK][0][118] = 127,
[1][1][2][0][RTW89_IC][1][118] = 127,
- [1][1][2][0][RTW89_IC][2][118] = 127,
[1][1][2][0][RTW89_KCC][1][118] = 127,
[1][1][2][0][RTW89_KCC][0][118] = 127,
[1][1][2][0][RTW89_ACMA][1][118] = 127,
@@ -45717,13 +44461,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][118] = 127,
[1][1][2][0][RTW89_THAILAND][0][118] = 127,
[1][1][2][1][RTW89_FCC][1][1] = 10,
- [1][1][2][1][RTW89_FCC][2][1] = 58,
[1][1][2][1][RTW89_ETSI][1][1] = 42,
[1][1][2][1][RTW89_ETSI][0][1] = 6,
[1][1][2][1][RTW89_MKK][1][1] = 52,
[1][1][2][1][RTW89_MKK][0][1] = 12,
[1][1][2][1][RTW89_IC][1][1] = 10,
- [1][1][2][1][RTW89_IC][2][1] = 58,
[1][1][2][1][RTW89_KCC][1][1] = 28,
[1][1][2][1][RTW89_KCC][0][1] = 12,
[1][1][2][1][RTW89_ACMA][1][1] = 42,
@@ -45736,13 +44478,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][1] = 46,
[1][1][2][1][RTW89_THAILAND][0][1] = 6,
[1][1][2][1][RTW89_FCC][1][5] = 10,
- [1][1][2][1][RTW89_FCC][2][5] = 58,
[1][1][2][1][RTW89_ETSI][1][5] = 42,
[1][1][2][1][RTW89_ETSI][0][5] = 6,
[1][1][2][1][RTW89_MKK][1][5] = 52,
[1][1][2][1][RTW89_MKK][0][5] = 12,
[1][1][2][1][RTW89_IC][1][5] = 10,
- [1][1][2][1][RTW89_IC][2][5] = 58,
[1][1][2][1][RTW89_KCC][1][5] = 28,
[1][1][2][1][RTW89_KCC][0][5] = 12,
[1][1][2][1][RTW89_ACMA][1][5] = 42,
@@ -45755,13 +44495,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][5] = 46,
[1][1][2][1][RTW89_THAILAND][0][5] = 6,
[1][1][2][1][RTW89_FCC][1][9] = 10,
- [1][1][2][1][RTW89_FCC][2][9] = 58,
[1][1][2][1][RTW89_ETSI][1][9] = 42,
[1][1][2][1][RTW89_ETSI][0][9] = 6,
[1][1][2][1][RTW89_MKK][1][9] = 52,
[1][1][2][1][RTW89_MKK][0][9] = 12,
[1][1][2][1][RTW89_IC][1][9] = 10,
- [1][1][2][1][RTW89_IC][2][9] = 58,
[1][1][2][1][RTW89_KCC][1][9] = 28,
[1][1][2][1][RTW89_KCC][0][9] = 12,
[1][1][2][1][RTW89_ACMA][1][9] = 42,
@@ -45774,13 +44512,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][9] = 46,
[1][1][2][1][RTW89_THAILAND][0][9] = 6,
[1][1][2][1][RTW89_FCC][1][13] = 10,
- [1][1][2][1][RTW89_FCC][2][13] = 58,
[1][1][2][1][RTW89_ETSI][1][13] = 42,
[1][1][2][1][RTW89_ETSI][0][13] = 6,
[1][1][2][1][RTW89_MKK][1][13] = 52,
[1][1][2][1][RTW89_MKK][0][13] = 12,
[1][1][2][1][RTW89_IC][1][13] = 10,
- [1][1][2][1][RTW89_IC][2][13] = 58,
[1][1][2][1][RTW89_KCC][1][13] = 28,
[1][1][2][1][RTW89_KCC][0][13] = 12,
[1][1][2][1][RTW89_ACMA][1][13] = 42,
@@ -45793,13 +44529,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][13] = 46,
[1][1][2][1][RTW89_THAILAND][0][13] = 6,
[1][1][2][1][RTW89_FCC][1][16] = 10,
- [1][1][2][1][RTW89_FCC][2][16] = 58,
[1][1][2][1][RTW89_ETSI][1][16] = 42,
[1][1][2][1][RTW89_ETSI][0][16] = 6,
[1][1][2][1][RTW89_MKK][1][16] = 52,
[1][1][2][1][RTW89_MKK][0][16] = 12,
[1][1][2][1][RTW89_IC][1][16] = 10,
- [1][1][2][1][RTW89_IC][2][16] = 58,
[1][1][2][1][RTW89_KCC][1][16] = 28,
[1][1][2][1][RTW89_KCC][0][16] = 12,
[1][1][2][1][RTW89_ACMA][1][16] = 42,
@@ -45812,13 +44546,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][16] = 46,
[1][1][2][1][RTW89_THAILAND][0][16] = 6,
[1][1][2][1][RTW89_FCC][1][20] = 10,
- [1][1][2][1][RTW89_FCC][2][20] = 58,
[1][1][2][1][RTW89_ETSI][1][20] = 42,
[1][1][2][1][RTW89_ETSI][0][20] = 6,
[1][1][2][1][RTW89_MKK][1][20] = 52,
[1][1][2][1][RTW89_MKK][0][20] = 12,
[1][1][2][1][RTW89_IC][1][20] = 10,
- [1][1][2][1][RTW89_IC][2][20] = 58,
[1][1][2][1][RTW89_KCC][1][20] = 28,
[1][1][2][1][RTW89_KCC][0][20] = 12,
[1][1][2][1][RTW89_ACMA][1][20] = 42,
@@ -45831,13 +44563,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][20] = 46,
[1][1][2][1][RTW89_THAILAND][0][20] = 6,
[1][1][2][1][RTW89_FCC][1][24] = 10,
- [1][1][2][1][RTW89_FCC][2][24] = 70,
[1][1][2][1][RTW89_ETSI][1][24] = 42,
[1][1][2][1][RTW89_ETSI][0][24] = 6,
[1][1][2][1][RTW89_MKK][1][24] = 54,
[1][1][2][1][RTW89_MKK][0][24] = 14,
[1][1][2][1][RTW89_IC][1][24] = 10,
- [1][1][2][1][RTW89_IC][2][24] = 70,
[1][1][2][1][RTW89_KCC][1][24] = 28,
[1][1][2][1][RTW89_KCC][0][24] = 12,
[1][1][2][1][RTW89_ACMA][1][24] = 42,
@@ -45850,13 +44580,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][24] = 46,
[1][1][2][1][RTW89_THAILAND][0][24] = 6,
[1][1][2][1][RTW89_FCC][1][28] = 10,
- [1][1][2][1][RTW89_FCC][2][28] = 70,
[1][1][2][1][RTW89_ETSI][1][28] = 42,
[1][1][2][1][RTW89_ETSI][0][28] = 6,
[1][1][2][1][RTW89_MKK][1][28] = 52,
[1][1][2][1][RTW89_MKK][0][28] = 14,
[1][1][2][1][RTW89_IC][1][28] = 10,
- [1][1][2][1][RTW89_IC][2][28] = 70,
[1][1][2][1][RTW89_KCC][1][28] = 28,
[1][1][2][1][RTW89_KCC][0][28] = 14,
[1][1][2][1][RTW89_ACMA][1][28] = 42,
@@ -45869,13 +44597,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][28] = 46,
[1][1][2][1][RTW89_THAILAND][0][28] = 6,
[1][1][2][1][RTW89_FCC][1][31] = 10,
- [1][1][2][1][RTW89_FCC][2][31] = 70,
[1][1][2][1][RTW89_ETSI][1][31] = 42,
[1][1][2][1][RTW89_ETSI][0][31] = 6,
[1][1][2][1][RTW89_MKK][1][31] = 52,
[1][1][2][1][RTW89_MKK][0][31] = 14,
[1][1][2][1][RTW89_IC][1][31] = 10,
- [1][1][2][1][RTW89_IC][2][31] = 70,
[1][1][2][1][RTW89_KCC][1][31] = 28,
[1][1][2][1][RTW89_KCC][0][31] = 14,
[1][1][2][1][RTW89_ACMA][1][31] = 42,
@@ -45888,13 +44614,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][31] = 46,
[1][1][2][1][RTW89_THAILAND][0][31] = 6,
[1][1][2][1][RTW89_FCC][1][35] = 10,
- [1][1][2][1][RTW89_FCC][2][35] = 70,
[1][1][2][1][RTW89_ETSI][1][35] = 42,
[1][1][2][1][RTW89_ETSI][0][35] = 6,
[1][1][2][1][RTW89_MKK][1][35] = 52,
[1][1][2][1][RTW89_MKK][0][35] = 14,
[1][1][2][1][RTW89_IC][1][35] = 10,
- [1][1][2][1][RTW89_IC][2][35] = 70,
[1][1][2][1][RTW89_KCC][1][35] = 28,
[1][1][2][1][RTW89_KCC][0][35] = 14,
[1][1][2][1][RTW89_ACMA][1][35] = 42,
@@ -45907,13 +44631,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][35] = 46,
[1][1][2][1][RTW89_THAILAND][0][35] = 6,
[1][1][2][1][RTW89_FCC][1][39] = 10,
- [1][1][2][1][RTW89_FCC][2][39] = 70,
[1][1][2][1][RTW89_ETSI][1][39] = 42,
[1][1][2][1][RTW89_ETSI][0][39] = 6,
[1][1][2][1][RTW89_MKK][1][39] = 52,
[1][1][2][1][RTW89_MKK][0][39] = 14,
[1][1][2][1][RTW89_IC][1][39] = 10,
- [1][1][2][1][RTW89_IC][2][39] = 70,
[1][1][2][1][RTW89_KCC][1][39] = 28,
[1][1][2][1][RTW89_KCC][0][39] = 14,
[1][1][2][1][RTW89_ACMA][1][39] = 42,
@@ -45926,13 +44648,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][39] = 46,
[1][1][2][1][RTW89_THAILAND][0][39] = 6,
[1][1][2][1][RTW89_FCC][1][43] = 10,
- [1][1][2][1][RTW89_FCC][2][43] = 70,
[1][1][2][1][RTW89_ETSI][1][43] = 42,
[1][1][2][1][RTW89_ETSI][0][43] = 6,
[1][1][2][1][RTW89_MKK][1][43] = 52,
[1][1][2][1][RTW89_MKK][0][43] = 14,
[1][1][2][1][RTW89_IC][1][43] = 10,
- [1][1][2][1][RTW89_IC][2][43] = 70,
[1][1][2][1][RTW89_KCC][1][43] = 28,
[1][1][2][1][RTW89_KCC][0][43] = 14,
[1][1][2][1][RTW89_ACMA][1][43] = 42,
@@ -45945,13 +44665,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][43] = 46,
[1][1][2][1][RTW89_THAILAND][0][43] = 6,
[1][1][2][1][RTW89_FCC][1][46] = 12,
- [1][1][2][1][RTW89_FCC][2][46] = 127,
[1][1][2][1][RTW89_ETSI][1][46] = 127,
[1][1][2][1][RTW89_ETSI][0][46] = 127,
[1][1][2][1][RTW89_MKK][1][46] = 127,
[1][1][2][1][RTW89_MKK][0][46] = 127,
[1][1][2][1][RTW89_IC][1][46] = 12,
- [1][1][2][1][RTW89_IC][2][46] = 68,
[1][1][2][1][RTW89_KCC][1][46] = 28,
[1][1][2][1][RTW89_KCC][0][46] = 127,
[1][1][2][1][RTW89_ACMA][1][46] = 127,
@@ -45964,13 +44682,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][46] = 127,
[1][1][2][1][RTW89_THAILAND][0][46] = 127,
[1][1][2][1][RTW89_FCC][1][50] = 12,
- [1][1][2][1][RTW89_FCC][2][50] = 127,
[1][1][2][1][RTW89_ETSI][1][50] = 127,
[1][1][2][1][RTW89_ETSI][0][50] = 127,
[1][1][2][1][RTW89_MKK][1][50] = 127,
[1][1][2][1][RTW89_MKK][0][50] = 127,
[1][1][2][1][RTW89_IC][1][50] = 12,
- [1][1][2][1][RTW89_IC][2][50] = 68,
[1][1][2][1][RTW89_KCC][1][50] = 28,
[1][1][2][1][RTW89_KCC][0][50] = 127,
[1][1][2][1][RTW89_ACMA][1][50] = 127,
@@ -45983,13 +44699,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][50] = 127,
[1][1][2][1][RTW89_THAILAND][0][50] = 127,
[1][1][2][1][RTW89_FCC][1][54] = 10,
- [1][1][2][1][RTW89_FCC][2][54] = 127,
[1][1][2][1][RTW89_ETSI][1][54] = 127,
[1][1][2][1][RTW89_ETSI][0][54] = 127,
[1][1][2][1][RTW89_MKK][1][54] = 127,
[1][1][2][1][RTW89_MKK][0][54] = 127,
[1][1][2][1][RTW89_IC][1][54] = 10,
- [1][1][2][1][RTW89_IC][2][54] = 127,
[1][1][2][1][RTW89_KCC][1][54] = 28,
[1][1][2][1][RTW89_KCC][0][54] = 127,
[1][1][2][1][RTW89_ACMA][1][54] = 127,
@@ -46002,13 +44716,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][54] = 127,
[1][1][2][1][RTW89_THAILAND][0][54] = 127,
[1][1][2][1][RTW89_FCC][1][58] = 10,
- [1][1][2][1][RTW89_FCC][2][58] = 66,
[1][1][2][1][RTW89_ETSI][1][58] = 127,
[1][1][2][1][RTW89_ETSI][0][58] = 127,
[1][1][2][1][RTW89_MKK][1][58] = 127,
[1][1][2][1][RTW89_MKK][0][58] = 127,
[1][1][2][1][RTW89_IC][1][58] = 10,
- [1][1][2][1][RTW89_IC][2][58] = 66,
[1][1][2][1][RTW89_KCC][1][58] = 28,
[1][1][2][1][RTW89_KCC][0][58] = 127,
[1][1][2][1][RTW89_ACMA][1][58] = 127,
@@ -46021,13 +44733,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][58] = 127,
[1][1][2][1][RTW89_THAILAND][0][58] = 127,
[1][1][2][1][RTW89_FCC][1][61] = 10,
- [1][1][2][1][RTW89_FCC][2][61] = 66,
[1][1][2][1][RTW89_ETSI][1][61] = 127,
[1][1][2][1][RTW89_ETSI][0][61] = 127,
[1][1][2][1][RTW89_MKK][1][61] = 127,
[1][1][2][1][RTW89_MKK][0][61] = 127,
[1][1][2][1][RTW89_IC][1][61] = 10,
- [1][1][2][1][RTW89_IC][2][61] = 66,
[1][1][2][1][RTW89_KCC][1][61] = 28,
[1][1][2][1][RTW89_KCC][0][61] = 127,
[1][1][2][1][RTW89_ACMA][1][61] = 127,
@@ -46040,13 +44750,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][61] = 127,
[1][1][2][1][RTW89_THAILAND][0][61] = 127,
[1][1][2][1][RTW89_FCC][1][65] = 10,
- [1][1][2][1][RTW89_FCC][2][65] = 66,
[1][1][2][1][RTW89_ETSI][1][65] = 127,
[1][1][2][1][RTW89_ETSI][0][65] = 127,
[1][1][2][1][RTW89_MKK][1][65] = 127,
[1][1][2][1][RTW89_MKK][0][65] = 127,
[1][1][2][1][RTW89_IC][1][65] = 10,
- [1][1][2][1][RTW89_IC][2][65] = 66,
[1][1][2][1][RTW89_KCC][1][65] = 28,
[1][1][2][1][RTW89_KCC][0][65] = 127,
[1][1][2][1][RTW89_ACMA][1][65] = 127,
@@ -46059,13 +44767,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][65] = 127,
[1][1][2][1][RTW89_THAILAND][0][65] = 127,
[1][1][2][1][RTW89_FCC][1][69] = 10,
- [1][1][2][1][RTW89_FCC][2][69] = 66,
[1][1][2][1][RTW89_ETSI][1][69] = 127,
[1][1][2][1][RTW89_ETSI][0][69] = 127,
[1][1][2][1][RTW89_MKK][1][69] = 127,
[1][1][2][1][RTW89_MKK][0][69] = 127,
[1][1][2][1][RTW89_IC][1][69] = 10,
- [1][1][2][1][RTW89_IC][2][69] = 66,
[1][1][2][1][RTW89_KCC][1][69] = 28,
[1][1][2][1][RTW89_KCC][0][69] = 127,
[1][1][2][1][RTW89_ACMA][1][69] = 127,
@@ -46078,13 +44784,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][69] = 127,
[1][1][2][1][RTW89_THAILAND][0][69] = 127,
[1][1][2][1][RTW89_FCC][1][73] = 10,
- [1][1][2][1][RTW89_FCC][2][73] = 66,
[1][1][2][1][RTW89_ETSI][1][73] = 127,
[1][1][2][1][RTW89_ETSI][0][73] = 127,
[1][1][2][1][RTW89_MKK][1][73] = 127,
[1][1][2][1][RTW89_MKK][0][73] = 127,
[1][1][2][1][RTW89_IC][1][73] = 10,
- [1][1][2][1][RTW89_IC][2][73] = 66,
[1][1][2][1][RTW89_KCC][1][73] = 28,
[1][1][2][1][RTW89_KCC][0][73] = 127,
[1][1][2][1][RTW89_ACMA][1][73] = 127,
@@ -46097,13 +44801,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][73] = 127,
[1][1][2][1][RTW89_THAILAND][0][73] = 127,
[1][1][2][1][RTW89_FCC][1][76] = 10,
- [1][1][2][1][RTW89_FCC][2][76] = 66,
[1][1][2][1][RTW89_ETSI][1][76] = 127,
[1][1][2][1][RTW89_ETSI][0][76] = 127,
[1][1][2][1][RTW89_MKK][1][76] = 127,
[1][1][2][1][RTW89_MKK][0][76] = 127,
[1][1][2][1][RTW89_IC][1][76] = 10,
- [1][1][2][1][RTW89_IC][2][76] = 66,
[1][1][2][1][RTW89_KCC][1][76] = 28,
[1][1][2][1][RTW89_KCC][0][76] = 127,
[1][1][2][1][RTW89_ACMA][1][76] = 127,
@@ -46116,13 +44818,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][76] = 127,
[1][1][2][1][RTW89_THAILAND][0][76] = 127,
[1][1][2][1][RTW89_FCC][1][80] = 10,
- [1][1][2][1][RTW89_FCC][2][80] = 66,
[1][1][2][1][RTW89_ETSI][1][80] = 127,
[1][1][2][1][RTW89_ETSI][0][80] = 127,
[1][1][2][1][RTW89_MKK][1][80] = 127,
[1][1][2][1][RTW89_MKK][0][80] = 127,
[1][1][2][1][RTW89_IC][1][80] = 10,
- [1][1][2][1][RTW89_IC][2][80] = 66,
[1][1][2][1][RTW89_KCC][1][80] = 32,
[1][1][2][1][RTW89_KCC][0][80] = 127,
[1][1][2][1][RTW89_ACMA][1][80] = 127,
@@ -46135,13 +44835,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][80] = 127,
[1][1][2][1][RTW89_THAILAND][0][80] = 127,
[1][1][2][1][RTW89_FCC][1][84] = 10,
- [1][1][2][1][RTW89_FCC][2][84] = 66,
[1][1][2][1][RTW89_ETSI][1][84] = 127,
[1][1][2][1][RTW89_ETSI][0][84] = 127,
[1][1][2][1][RTW89_MKK][1][84] = 127,
[1][1][2][1][RTW89_MKK][0][84] = 127,
[1][1][2][1][RTW89_IC][1][84] = 10,
- [1][1][2][1][RTW89_IC][2][84] = 66,
[1][1][2][1][RTW89_KCC][1][84] = 32,
[1][1][2][1][RTW89_KCC][0][84] = 127,
[1][1][2][1][RTW89_ACMA][1][84] = 127,
@@ -46154,13 +44852,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][84] = 127,
[1][1][2][1][RTW89_THAILAND][0][84] = 127,
[1][1][2][1][RTW89_FCC][1][88] = 10,
- [1][1][2][1][RTW89_FCC][2][88] = 127,
[1][1][2][1][RTW89_ETSI][1][88] = 127,
[1][1][2][1][RTW89_ETSI][0][88] = 127,
[1][1][2][1][RTW89_MKK][1][88] = 127,
[1][1][2][1][RTW89_MKK][0][88] = 127,
[1][1][2][1][RTW89_IC][1][88] = 10,
- [1][1][2][1][RTW89_IC][2][88] = 127,
[1][1][2][1][RTW89_KCC][1][88] = 32,
[1][1][2][1][RTW89_KCC][0][88] = 127,
[1][1][2][1][RTW89_ACMA][1][88] = 127,
@@ -46173,13 +44869,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][88] = 127,
[1][1][2][1][RTW89_THAILAND][0][88] = 127,
[1][1][2][1][RTW89_FCC][1][91] = 12,
- [1][1][2][1][RTW89_FCC][2][91] = 127,
[1][1][2][1][RTW89_ETSI][1][91] = 127,
[1][1][2][1][RTW89_ETSI][0][91] = 127,
[1][1][2][1][RTW89_MKK][1][91] = 127,
[1][1][2][1][RTW89_MKK][0][91] = 127,
[1][1][2][1][RTW89_IC][1][91] = 12,
- [1][1][2][1][RTW89_IC][2][91] = 127,
[1][1][2][1][RTW89_KCC][1][91] = 32,
[1][1][2][1][RTW89_KCC][0][91] = 127,
[1][1][2][1][RTW89_ACMA][1][91] = 127,
@@ -46192,13 +44886,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][91] = 127,
[1][1][2][1][RTW89_THAILAND][0][91] = 127,
[1][1][2][1][RTW89_FCC][1][95] = 10,
- [1][1][2][1][RTW89_FCC][2][95] = 127,
[1][1][2][1][RTW89_ETSI][1][95] = 127,
[1][1][2][1][RTW89_ETSI][0][95] = 127,
[1][1][2][1][RTW89_MKK][1][95] = 127,
[1][1][2][1][RTW89_MKK][0][95] = 127,
[1][1][2][1][RTW89_IC][1][95] = 10,
- [1][1][2][1][RTW89_IC][2][95] = 127,
[1][1][2][1][RTW89_KCC][1][95] = 32,
[1][1][2][1][RTW89_KCC][0][95] = 127,
[1][1][2][1][RTW89_ACMA][1][95] = 127,
@@ -46211,13 +44903,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][95] = 127,
[1][1][2][1][RTW89_THAILAND][0][95] = 127,
[1][1][2][1][RTW89_FCC][1][99] = 10,
- [1][1][2][1][RTW89_FCC][2][99] = 127,
[1][1][2][1][RTW89_ETSI][1][99] = 127,
[1][1][2][1][RTW89_ETSI][0][99] = 127,
[1][1][2][1][RTW89_MKK][1][99] = 127,
[1][1][2][1][RTW89_MKK][0][99] = 127,
[1][1][2][1][RTW89_IC][1][99] = 10,
- [1][1][2][1][RTW89_IC][2][99] = 127,
[1][1][2][1][RTW89_KCC][1][99] = 32,
[1][1][2][1][RTW89_KCC][0][99] = 127,
[1][1][2][1][RTW89_ACMA][1][99] = 127,
@@ -46230,13 +44920,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][99] = 127,
[1][1][2][1][RTW89_THAILAND][0][99] = 127,
[1][1][2][1][RTW89_FCC][1][103] = 10,
- [1][1][2][1][RTW89_FCC][2][103] = 127,
[1][1][2][1][RTW89_ETSI][1][103] = 127,
[1][1][2][1][RTW89_ETSI][0][103] = 127,
[1][1][2][1][RTW89_MKK][1][103] = 127,
[1][1][2][1][RTW89_MKK][0][103] = 127,
[1][1][2][1][RTW89_IC][1][103] = 10,
- [1][1][2][1][RTW89_IC][2][103] = 127,
[1][1][2][1][RTW89_KCC][1][103] = 32,
[1][1][2][1][RTW89_KCC][0][103] = 127,
[1][1][2][1][RTW89_ACMA][1][103] = 127,
@@ -46249,13 +44937,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][103] = 127,
[1][1][2][1][RTW89_THAILAND][0][103] = 127,
[1][1][2][1][RTW89_FCC][1][106] = 12,
- [1][1][2][1][RTW89_FCC][2][106] = 127,
[1][1][2][1][RTW89_ETSI][1][106] = 127,
[1][1][2][1][RTW89_ETSI][0][106] = 127,
[1][1][2][1][RTW89_MKK][1][106] = 127,
[1][1][2][1][RTW89_MKK][0][106] = 127,
[1][1][2][1][RTW89_IC][1][106] = 12,
- [1][1][2][1][RTW89_IC][2][106] = 127,
[1][1][2][1][RTW89_KCC][1][106] = 32,
[1][1][2][1][RTW89_KCC][0][106] = 127,
[1][1][2][1][RTW89_ACMA][1][106] = 127,
@@ -46268,13 +44954,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][106] = 127,
[1][1][2][1][RTW89_THAILAND][0][106] = 127,
[1][1][2][1][RTW89_FCC][1][110] = 127,
- [1][1][2][1][RTW89_FCC][2][110] = 127,
[1][1][2][1][RTW89_ETSI][1][110] = 127,
[1][1][2][1][RTW89_ETSI][0][110] = 127,
[1][1][2][1][RTW89_MKK][1][110] = 127,
[1][1][2][1][RTW89_MKK][0][110] = 127,
[1][1][2][1][RTW89_IC][1][110] = 127,
- [1][1][2][1][RTW89_IC][2][110] = 127,
[1][1][2][1][RTW89_KCC][1][110] = 127,
[1][1][2][1][RTW89_KCC][0][110] = 127,
[1][1][2][1][RTW89_ACMA][1][110] = 127,
@@ -46287,13 +44971,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][110] = 127,
[1][1][2][1][RTW89_THAILAND][0][110] = 127,
[1][1][2][1][RTW89_FCC][1][114] = 127,
- [1][1][2][1][RTW89_FCC][2][114] = 127,
[1][1][2][1][RTW89_ETSI][1][114] = 127,
[1][1][2][1][RTW89_ETSI][0][114] = 127,
[1][1][2][1][RTW89_MKK][1][114] = 127,
[1][1][2][1][RTW89_MKK][0][114] = 127,
[1][1][2][1][RTW89_IC][1][114] = 127,
- [1][1][2][1][RTW89_IC][2][114] = 127,
[1][1][2][1][RTW89_KCC][1][114] = 127,
[1][1][2][1][RTW89_KCC][0][114] = 127,
[1][1][2][1][RTW89_ACMA][1][114] = 127,
@@ -46306,13 +44988,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][114] = 127,
[1][1][2][1][RTW89_THAILAND][0][114] = 127,
[1][1][2][1][RTW89_FCC][1][118] = 127,
- [1][1][2][1][RTW89_FCC][2][118] = 127,
[1][1][2][1][RTW89_ETSI][1][118] = 127,
[1][1][2][1][RTW89_ETSI][0][118] = 127,
[1][1][2][1][RTW89_MKK][1][118] = 127,
[1][1][2][1][RTW89_MKK][0][118] = 127,
[1][1][2][1][RTW89_IC][1][118] = 127,
- [1][1][2][1][RTW89_IC][2][118] = 127,
[1][1][2][1][RTW89_KCC][1][118] = 127,
[1][1][2][1][RTW89_KCC][0][118] = 127,
[1][1][2][1][RTW89_ACMA][1][118] = 127,
@@ -46325,13 +45005,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][118] = 127,
[1][1][2][1][RTW89_THAILAND][0][118] = 127,
[2][0][2][0][RTW89_FCC][1][3] = 46,
- [2][0][2][0][RTW89_FCC][2][3] = 60,
[2][0][2][0][RTW89_ETSI][1][3] = 58,
[2][0][2][0][RTW89_ETSI][0][3] = 30,
[2][0][2][0][RTW89_MKK][1][3] = 58,
[2][0][2][0][RTW89_MKK][0][3] = 26,
[2][0][2][0][RTW89_IC][1][3] = 46,
- [2][0][2][0][RTW89_IC][2][3] = 60,
[2][0][2][0][RTW89_KCC][1][3] = 50,
[2][0][2][0][RTW89_KCC][0][3] = 24,
[2][0][2][0][RTW89_ACMA][1][3] = 58,
@@ -46344,13 +45022,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][3] = 58,
[2][0][2][0][RTW89_THAILAND][0][3] = 30,
[2][0][2][0][RTW89_FCC][1][11] = 46,
- [2][0][2][0][RTW89_FCC][2][11] = 60,
[2][0][2][0][RTW89_ETSI][1][11] = 58,
[2][0][2][0][RTW89_ETSI][0][11] = 30,
[2][0][2][0][RTW89_MKK][1][11] = 58,
[2][0][2][0][RTW89_MKK][0][11] = 24,
[2][0][2][0][RTW89_IC][1][11] = 46,
- [2][0][2][0][RTW89_IC][2][11] = 60,
[2][0][2][0][RTW89_KCC][1][11] = 50,
[2][0][2][0][RTW89_KCC][0][11] = 24,
[2][0][2][0][RTW89_ACMA][1][11] = 58,
@@ -46363,13 +45039,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][11] = 58,
[2][0][2][0][RTW89_THAILAND][0][11] = 30,
[2][0][2][0][RTW89_FCC][1][18] = 46,
- [2][0][2][0][RTW89_FCC][2][18] = 60,
[2][0][2][0][RTW89_ETSI][1][18] = 58,
[2][0][2][0][RTW89_ETSI][0][18] = 30,
[2][0][2][0][RTW89_MKK][1][18] = 58,
[2][0][2][0][RTW89_MKK][0][18] = 24,
[2][0][2][0][RTW89_IC][1][18] = 46,
- [2][0][2][0][RTW89_IC][2][18] = 60,
[2][0][2][0][RTW89_KCC][1][18] = 50,
[2][0][2][0][RTW89_KCC][0][18] = 24,
[2][0][2][0][RTW89_ACMA][1][18] = 58,
@@ -46382,13 +45056,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][18] = 58,
[2][0][2][0][RTW89_THAILAND][0][18] = 30,
[2][0][2][0][RTW89_FCC][1][26] = 46,
- [2][0][2][0][RTW89_FCC][2][26] = 60,
[2][0][2][0][RTW89_ETSI][1][26] = 58,
[2][0][2][0][RTW89_ETSI][0][26] = 30,
[2][0][2][0][RTW89_MKK][1][26] = 58,
[2][0][2][0][RTW89_MKK][0][26] = 24,
[2][0][2][0][RTW89_IC][1][26] = 46,
- [2][0][2][0][RTW89_IC][2][26] = 60,
[2][0][2][0][RTW89_KCC][1][26] = 50,
[2][0][2][0][RTW89_KCC][0][26] = 26,
[2][0][2][0][RTW89_ACMA][1][26] = 58,
@@ -46401,13 +45073,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][26] = 58,
[2][0][2][0][RTW89_THAILAND][0][26] = 30,
[2][0][2][0][RTW89_FCC][1][33] = 46,
- [2][0][2][0][RTW89_FCC][2][33] = 60,
[2][0][2][0][RTW89_ETSI][1][33] = 58,
[2][0][2][0][RTW89_ETSI][0][33] = 30,
[2][0][2][0][RTW89_MKK][1][33] = 58,
[2][0][2][0][RTW89_MKK][0][33] = 24,
[2][0][2][0][RTW89_IC][1][33] = 46,
- [2][0][2][0][RTW89_IC][2][33] = 60,
[2][0][2][0][RTW89_KCC][1][33] = 50,
[2][0][2][0][RTW89_KCC][0][33] = 24,
[2][0][2][0][RTW89_ACMA][1][33] = 58,
@@ -46420,13 +45090,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][33] = 58,
[2][0][2][0][RTW89_THAILAND][0][33] = 30,
[2][0][2][0][RTW89_FCC][1][41] = 46,
- [2][0][2][0][RTW89_FCC][2][41] = 60,
[2][0][2][0][RTW89_ETSI][1][41] = 58,
[2][0][2][0][RTW89_ETSI][0][41] = 30,
[2][0][2][0][RTW89_MKK][1][41] = 58,
[2][0][2][0][RTW89_MKK][0][41] = 24,
[2][0][2][0][RTW89_IC][1][41] = 46,
- [2][0][2][0][RTW89_IC][2][41] = 60,
[2][0][2][0][RTW89_KCC][1][41] = 50,
[2][0][2][0][RTW89_KCC][0][41] = 24,
[2][0][2][0][RTW89_ACMA][1][41] = 58,
@@ -46439,13 +45107,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][41] = 58,
[2][0][2][0][RTW89_THAILAND][0][41] = 30,
[2][0][2][0][RTW89_FCC][1][48] = 46,
- [2][0][2][0][RTW89_FCC][2][48] = 127,
[2][0][2][0][RTW89_ETSI][1][48] = 127,
[2][0][2][0][RTW89_ETSI][0][48] = 127,
[2][0][2][0][RTW89_MKK][1][48] = 127,
[2][0][2][0][RTW89_MKK][0][48] = 127,
[2][0][2][0][RTW89_IC][1][48] = 46,
- [2][0][2][0][RTW89_IC][2][48] = 60,
[2][0][2][0][RTW89_KCC][1][48] = 48,
[2][0][2][0][RTW89_KCC][0][48] = 127,
[2][0][2][0][RTW89_ACMA][1][48] = 127,
@@ -46458,13 +45124,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][48] = 127,
[2][0][2][0][RTW89_THAILAND][0][48] = 127,
[2][0][2][0][RTW89_FCC][1][56] = 46,
- [2][0][2][0][RTW89_FCC][2][56] = 127,
[2][0][2][0][RTW89_ETSI][1][56] = 127,
[2][0][2][0][RTW89_ETSI][0][56] = 127,
[2][0][2][0][RTW89_MKK][1][56] = 127,
[2][0][2][0][RTW89_MKK][0][56] = 127,
[2][0][2][0][RTW89_IC][1][56] = 46,
- [2][0][2][0][RTW89_IC][2][56] = 58,
[2][0][2][0][RTW89_KCC][1][56] = 48,
[2][0][2][0][RTW89_KCC][0][56] = 127,
[2][0][2][0][RTW89_ACMA][1][56] = 127,
@@ -46477,13 +45141,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][56] = 127,
[2][0][2][0][RTW89_THAILAND][0][56] = 127,
[2][0][2][0][RTW89_FCC][1][63] = 46,
- [2][0][2][0][RTW89_FCC][2][63] = 58,
[2][0][2][0][RTW89_ETSI][1][63] = 127,
[2][0][2][0][RTW89_ETSI][0][63] = 127,
[2][0][2][0][RTW89_MKK][1][63] = 127,
[2][0][2][0][RTW89_MKK][0][63] = 127,
[2][0][2][0][RTW89_IC][1][63] = 46,
- [2][0][2][0][RTW89_IC][2][63] = 58,
[2][0][2][0][RTW89_KCC][1][63] = 48,
[2][0][2][0][RTW89_KCC][0][63] = 127,
[2][0][2][0][RTW89_ACMA][1][63] = 127,
@@ -46496,13 +45158,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][63] = 127,
[2][0][2][0][RTW89_THAILAND][0][63] = 127,
[2][0][2][0][RTW89_FCC][1][71] = 46,
- [2][0][2][0][RTW89_FCC][2][71] = 58,
[2][0][2][0][RTW89_ETSI][1][71] = 127,
[2][0][2][0][RTW89_ETSI][0][71] = 127,
[2][0][2][0][RTW89_MKK][1][71] = 127,
[2][0][2][0][RTW89_MKK][0][71] = 127,
[2][0][2][0][RTW89_IC][1][71] = 46,
- [2][0][2][0][RTW89_IC][2][71] = 58,
[2][0][2][0][RTW89_KCC][1][71] = 48,
[2][0][2][0][RTW89_KCC][0][71] = 127,
[2][0][2][0][RTW89_ACMA][1][71] = 127,
@@ -46515,13 +45175,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][71] = 127,
[2][0][2][0][RTW89_THAILAND][0][71] = 127,
[2][0][2][0][RTW89_FCC][1][78] = 46,
- [2][0][2][0][RTW89_FCC][2][78] = 58,
[2][0][2][0][RTW89_ETSI][1][78] = 127,
[2][0][2][0][RTW89_ETSI][0][78] = 127,
[2][0][2][0][RTW89_MKK][1][78] = 127,
[2][0][2][0][RTW89_MKK][0][78] = 127,
[2][0][2][0][RTW89_IC][1][78] = 46,
- [2][0][2][0][RTW89_IC][2][78] = 58,
[2][0][2][0][RTW89_KCC][1][78] = 52,
[2][0][2][0][RTW89_KCC][0][78] = 127,
[2][0][2][0][RTW89_ACMA][1][78] = 127,
@@ -46534,13 +45192,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][78] = 127,
[2][0][2][0][RTW89_THAILAND][0][78] = 127,
[2][0][2][0][RTW89_FCC][1][86] = 46,
- [2][0][2][0][RTW89_FCC][2][86] = 127,
[2][0][2][0][RTW89_ETSI][1][86] = 127,
[2][0][2][0][RTW89_ETSI][0][86] = 127,
[2][0][2][0][RTW89_MKK][1][86] = 127,
[2][0][2][0][RTW89_MKK][0][86] = 127,
[2][0][2][0][RTW89_IC][1][86] = 46,
- [2][0][2][0][RTW89_IC][2][86] = 127,
[2][0][2][0][RTW89_KCC][1][86] = 52,
[2][0][2][0][RTW89_KCC][0][86] = 127,
[2][0][2][0][RTW89_ACMA][1][86] = 127,
@@ -46553,13 +45209,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][86] = 127,
[2][0][2][0][RTW89_THAILAND][0][86] = 127,
[2][0][2][0][RTW89_FCC][1][93] = 46,
- [2][0][2][0][RTW89_FCC][2][93] = 127,
[2][0][2][0][RTW89_ETSI][1][93] = 127,
[2][0][2][0][RTW89_ETSI][0][93] = 127,
[2][0][2][0][RTW89_MKK][1][93] = 127,
[2][0][2][0][RTW89_MKK][0][93] = 127,
[2][0][2][0][RTW89_IC][1][93] = 46,
- [2][0][2][0][RTW89_IC][2][93] = 127,
[2][0][2][0][RTW89_KCC][1][93] = 50,
[2][0][2][0][RTW89_KCC][0][93] = 127,
[2][0][2][0][RTW89_ACMA][1][93] = 127,
@@ -46572,13 +45226,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][93] = 127,
[2][0][2][0][RTW89_THAILAND][0][93] = 127,
[2][0][2][0][RTW89_FCC][1][101] = 44,
- [2][0][2][0][RTW89_FCC][2][101] = 127,
[2][0][2][0][RTW89_ETSI][1][101] = 127,
[2][0][2][0][RTW89_ETSI][0][101] = 127,
[2][0][2][0][RTW89_MKK][1][101] = 127,
[2][0][2][0][RTW89_MKK][0][101] = 127,
[2][0][2][0][RTW89_IC][1][101] = 44,
- [2][0][2][0][RTW89_IC][2][101] = 127,
[2][0][2][0][RTW89_KCC][1][101] = 50,
[2][0][2][0][RTW89_KCC][0][101] = 127,
[2][0][2][0][RTW89_ACMA][1][101] = 127,
@@ -46591,13 +45243,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][101] = 127,
[2][0][2][0][RTW89_THAILAND][0][101] = 127,
[2][0][2][0][RTW89_FCC][1][108] = 127,
- [2][0][2][0][RTW89_FCC][2][108] = 127,
[2][0][2][0][RTW89_ETSI][1][108] = 127,
[2][0][2][0][RTW89_ETSI][0][108] = 127,
[2][0][2][0][RTW89_MKK][1][108] = 127,
[2][0][2][0][RTW89_MKK][0][108] = 127,
[2][0][2][0][RTW89_IC][1][108] = 127,
- [2][0][2][0][RTW89_IC][2][108] = 127,
[2][0][2][0][RTW89_KCC][1][108] = 127,
[2][0][2][0][RTW89_KCC][0][108] = 127,
[2][0][2][0][RTW89_ACMA][1][108] = 127,
@@ -46610,13 +45260,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][108] = 127,
[2][0][2][0][RTW89_THAILAND][0][108] = 127,
[2][0][2][0][RTW89_FCC][1][116] = 127,
- [2][0][2][0][RTW89_FCC][2][116] = 127,
[2][0][2][0][RTW89_ETSI][1][116] = 127,
[2][0][2][0][RTW89_ETSI][0][116] = 127,
[2][0][2][0][RTW89_MKK][1][116] = 127,
[2][0][2][0][RTW89_MKK][0][116] = 127,
[2][0][2][0][RTW89_IC][1][116] = 127,
- [2][0][2][0][RTW89_IC][2][116] = 127,
[2][0][2][0][RTW89_KCC][1][116] = 127,
[2][0][2][0][RTW89_KCC][0][116] = 127,
[2][0][2][0][RTW89_ACMA][1][116] = 127,
@@ -46629,13 +45277,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][116] = 127,
[2][0][2][0][RTW89_THAILAND][0][116] = 127,
[2][1][2][0][RTW89_FCC][1][3] = 22,
- [2][1][2][0][RTW89_FCC][2][3] = 50,
[2][1][2][0][RTW89_ETSI][1][3] = 54,
[2][1][2][0][RTW89_ETSI][0][3] = 16,
[2][1][2][0][RTW89_MKK][1][3] = 52,
[2][1][2][0][RTW89_MKK][0][3] = 14,
[2][1][2][0][RTW89_IC][1][3] = 22,
- [2][1][2][0][RTW89_IC][2][3] = 50,
[2][1][2][0][RTW89_KCC][1][3] = 38,
[2][1][2][0][RTW89_KCC][0][3] = 12,
[2][1][2][0][RTW89_ACMA][1][3] = 54,
@@ -46648,13 +45294,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][3] = 46,
[2][1][2][0][RTW89_THAILAND][0][3] = 18,
[2][1][2][0][RTW89_FCC][1][11] = 20,
- [2][1][2][0][RTW89_FCC][2][11] = 50,
[2][1][2][0][RTW89_ETSI][1][11] = 54,
[2][1][2][0][RTW89_ETSI][0][11] = 16,
[2][1][2][0][RTW89_MKK][1][11] = 52,
[2][1][2][0][RTW89_MKK][0][11] = 12,
[2][1][2][0][RTW89_IC][1][11] = 20,
- [2][1][2][0][RTW89_IC][2][11] = 50,
[2][1][2][0][RTW89_KCC][1][11] = 38,
[2][1][2][0][RTW89_KCC][0][11] = 12,
[2][1][2][0][RTW89_ACMA][1][11] = 54,
@@ -46667,13 +45311,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][11] = 46,
[2][1][2][0][RTW89_THAILAND][0][11] = 18,
[2][1][2][0][RTW89_FCC][1][18] = 20,
- [2][1][2][0][RTW89_FCC][2][18] = 50,
[2][1][2][0][RTW89_ETSI][1][18] = 54,
[2][1][2][0][RTW89_ETSI][0][18] = 16,
[2][1][2][0][RTW89_MKK][1][18] = 52,
[2][1][2][0][RTW89_MKK][0][18] = 12,
[2][1][2][0][RTW89_IC][1][18] = 20,
- [2][1][2][0][RTW89_IC][2][18] = 50,
[2][1][2][0][RTW89_KCC][1][18] = 38,
[2][1][2][0][RTW89_KCC][0][18] = 12,
[2][1][2][0][RTW89_ACMA][1][18] = 54,
@@ -46686,13 +45328,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][18] = 46,
[2][1][2][0][RTW89_THAILAND][0][18] = 18,
[2][1][2][0][RTW89_FCC][1][26] = 20,
- [2][1][2][0][RTW89_FCC][2][26] = 60,
[2][1][2][0][RTW89_ETSI][1][26] = 54,
[2][1][2][0][RTW89_ETSI][0][26] = 16,
[2][1][2][0][RTW89_MKK][1][26] = 52,
[2][1][2][0][RTW89_MKK][0][26] = 12,
[2][1][2][0][RTW89_IC][1][26] = 20,
- [2][1][2][0][RTW89_IC][2][26] = 60,
[2][1][2][0][RTW89_KCC][1][26] = 38,
[2][1][2][0][RTW89_KCC][0][26] = 12,
[2][1][2][0][RTW89_ACMA][1][26] = 54,
@@ -46705,13 +45345,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][26] = 46,
[2][1][2][0][RTW89_THAILAND][0][26] = 18,
[2][1][2][0][RTW89_FCC][1][33] = 20,
- [2][1][2][0][RTW89_FCC][2][33] = 60,
[2][1][2][0][RTW89_ETSI][1][33] = 54,
[2][1][2][0][RTW89_ETSI][0][33] = 16,
[2][1][2][0][RTW89_MKK][1][33] = 48,
[2][1][2][0][RTW89_MKK][0][33] = 12,
[2][1][2][0][RTW89_IC][1][33] = 20,
- [2][1][2][0][RTW89_IC][2][33] = 60,
[2][1][2][0][RTW89_KCC][1][33] = 38,
[2][1][2][0][RTW89_KCC][0][33] = 12,
[2][1][2][0][RTW89_ACMA][1][33] = 54,
@@ -46724,13 +45362,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][33] = 46,
[2][1][2][0][RTW89_THAILAND][0][33] = 18,
[2][1][2][0][RTW89_FCC][1][41] = 22,
- [2][1][2][0][RTW89_FCC][2][41] = 60,
[2][1][2][0][RTW89_ETSI][1][41] = 54,
[2][1][2][0][RTW89_ETSI][0][41] = 18,
[2][1][2][0][RTW89_MKK][1][41] = 48,
[2][1][2][0][RTW89_MKK][0][41] = 12,
[2][1][2][0][RTW89_IC][1][41] = 22,
- [2][1][2][0][RTW89_IC][2][41] = 60,
[2][1][2][0][RTW89_KCC][1][41] = 38,
[2][1][2][0][RTW89_KCC][0][41] = 12,
[2][1][2][0][RTW89_ACMA][1][41] = 54,
@@ -46743,13 +45379,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][41] = 46,
[2][1][2][0][RTW89_THAILAND][0][41] = 18,
[2][1][2][0][RTW89_FCC][1][48] = 22,
- [2][1][2][0][RTW89_FCC][2][48] = 127,
[2][1][2][0][RTW89_ETSI][1][48] = 127,
[2][1][2][0][RTW89_ETSI][0][48] = 127,
[2][1][2][0][RTW89_MKK][1][48] = 127,
[2][1][2][0][RTW89_MKK][0][48] = 127,
[2][1][2][0][RTW89_IC][1][48] = 22,
- [2][1][2][0][RTW89_IC][2][48] = 60,
[2][1][2][0][RTW89_KCC][1][48] = 38,
[2][1][2][0][RTW89_KCC][0][48] = 127,
[2][1][2][0][RTW89_ACMA][1][48] = 127,
@@ -46762,13 +45396,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][48] = 127,
[2][1][2][0][RTW89_THAILAND][0][48] = 127,
[2][1][2][0][RTW89_FCC][1][56] = 20,
- [2][1][2][0][RTW89_FCC][2][56] = 127,
[2][1][2][0][RTW89_ETSI][1][56] = 127,
[2][1][2][0][RTW89_ETSI][0][56] = 127,
[2][1][2][0][RTW89_MKK][1][56] = 127,
[2][1][2][0][RTW89_MKK][0][56] = 127,
[2][1][2][0][RTW89_IC][1][56] = 20,
- [2][1][2][0][RTW89_IC][2][56] = 56,
[2][1][2][0][RTW89_KCC][1][56] = 38,
[2][1][2][0][RTW89_KCC][0][56] = 127,
[2][1][2][0][RTW89_ACMA][1][56] = 127,
@@ -46781,13 +45413,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][56] = 127,
[2][1][2][0][RTW89_THAILAND][0][56] = 127,
[2][1][2][0][RTW89_FCC][1][63] = 22,
- [2][1][2][0][RTW89_FCC][2][63] = 58,
[2][1][2][0][RTW89_ETSI][1][63] = 127,
[2][1][2][0][RTW89_ETSI][0][63] = 127,
[2][1][2][0][RTW89_MKK][1][63] = 127,
[2][1][2][0][RTW89_MKK][0][63] = 127,
[2][1][2][0][RTW89_IC][1][63] = 22,
- [2][1][2][0][RTW89_IC][2][63] = 58,
[2][1][2][0][RTW89_KCC][1][63] = 38,
[2][1][2][0][RTW89_KCC][0][63] = 127,
[2][1][2][0][RTW89_ACMA][1][63] = 127,
@@ -46800,13 +45430,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][63] = 127,
[2][1][2][0][RTW89_THAILAND][0][63] = 127,
[2][1][2][0][RTW89_FCC][1][71] = 20,
- [2][1][2][0][RTW89_FCC][2][71] = 58,
[2][1][2][0][RTW89_ETSI][1][71] = 127,
[2][1][2][0][RTW89_ETSI][0][71] = 127,
[2][1][2][0][RTW89_MKK][1][71] = 127,
[2][1][2][0][RTW89_MKK][0][71] = 127,
[2][1][2][0][RTW89_IC][1][71] = 20,
- [2][1][2][0][RTW89_IC][2][71] = 58,
[2][1][2][0][RTW89_KCC][1][71] = 38,
[2][1][2][0][RTW89_KCC][0][71] = 127,
[2][1][2][0][RTW89_ACMA][1][71] = 127,
@@ -46819,13 +45447,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][71] = 127,
[2][1][2][0][RTW89_THAILAND][0][71] = 127,
[2][1][2][0][RTW89_FCC][1][78] = 20,
- [2][1][2][0][RTW89_FCC][2][78] = 58,
[2][1][2][0][RTW89_ETSI][1][78] = 127,
[2][1][2][0][RTW89_ETSI][0][78] = 127,
[2][1][2][0][RTW89_MKK][1][78] = 127,
[2][1][2][0][RTW89_MKK][0][78] = 127,
[2][1][2][0][RTW89_IC][1][78] = 20,
- [2][1][2][0][RTW89_IC][2][78] = 58,
[2][1][2][0][RTW89_KCC][1][78] = 38,
[2][1][2][0][RTW89_KCC][0][78] = 127,
[2][1][2][0][RTW89_ACMA][1][78] = 127,
@@ -46838,13 +45464,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][78] = 127,
[2][1][2][0][RTW89_THAILAND][0][78] = 127,
[2][1][2][0][RTW89_FCC][1][86] = 20,
- [2][1][2][0][RTW89_FCC][2][86] = 127,
[2][1][2][0][RTW89_ETSI][1][86] = 127,
[2][1][2][0][RTW89_ETSI][0][86] = 127,
[2][1][2][0][RTW89_MKK][1][86] = 127,
[2][1][2][0][RTW89_MKK][0][86] = 127,
[2][1][2][0][RTW89_IC][1][86] = 20,
- [2][1][2][0][RTW89_IC][2][86] = 127,
[2][1][2][0][RTW89_KCC][1][86] = 38,
[2][1][2][0][RTW89_KCC][0][86] = 127,
[2][1][2][0][RTW89_ACMA][1][86] = 127,
@@ -46857,13 +45481,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][86] = 127,
[2][1][2][0][RTW89_THAILAND][0][86] = 127,
[2][1][2][0][RTW89_FCC][1][93] = 22,
- [2][1][2][0][RTW89_FCC][2][93] = 127,
[2][1][2][0][RTW89_ETSI][1][93] = 127,
[2][1][2][0][RTW89_ETSI][0][93] = 127,
[2][1][2][0][RTW89_MKK][1][93] = 127,
[2][1][2][0][RTW89_MKK][0][93] = 127,
[2][1][2][0][RTW89_IC][1][93] = 22,
- [2][1][2][0][RTW89_IC][2][93] = 127,
[2][1][2][0][RTW89_KCC][1][93] = 38,
[2][1][2][0][RTW89_KCC][0][93] = 127,
[2][1][2][0][RTW89_ACMA][1][93] = 127,
@@ -46876,13 +45498,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][93] = 127,
[2][1][2][0][RTW89_THAILAND][0][93] = 127,
[2][1][2][0][RTW89_FCC][1][101] = 22,
- [2][1][2][0][RTW89_FCC][2][101] = 127,
[2][1][2][0][RTW89_ETSI][1][101] = 127,
[2][1][2][0][RTW89_ETSI][0][101] = 127,
[2][1][2][0][RTW89_MKK][1][101] = 127,
[2][1][2][0][RTW89_MKK][0][101] = 127,
[2][1][2][0][RTW89_IC][1][101] = 22,
- [2][1][2][0][RTW89_IC][2][101] = 127,
[2][1][2][0][RTW89_KCC][1][101] = 38,
[2][1][2][0][RTW89_KCC][0][101] = 127,
[2][1][2][0][RTW89_ACMA][1][101] = 127,
@@ -46895,13 +45515,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][101] = 127,
[2][1][2][0][RTW89_THAILAND][0][101] = 127,
[2][1][2][0][RTW89_FCC][1][108] = 127,
- [2][1][2][0][RTW89_FCC][2][108] = 127,
[2][1][2][0][RTW89_ETSI][1][108] = 127,
[2][1][2][0][RTW89_ETSI][0][108] = 127,
[2][1][2][0][RTW89_MKK][1][108] = 127,
[2][1][2][0][RTW89_MKK][0][108] = 127,
[2][1][2][0][RTW89_IC][1][108] = 127,
- [2][1][2][0][RTW89_IC][2][108] = 127,
[2][1][2][0][RTW89_KCC][1][108] = 127,
[2][1][2][0][RTW89_KCC][0][108] = 127,
[2][1][2][0][RTW89_ACMA][1][108] = 127,
@@ -46914,13 +45532,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][108] = 127,
[2][1][2][0][RTW89_THAILAND][0][108] = 127,
[2][1][2][0][RTW89_FCC][1][116] = 127,
- [2][1][2][0][RTW89_FCC][2][116] = 127,
[2][1][2][0][RTW89_ETSI][1][116] = 127,
[2][1][2][0][RTW89_ETSI][0][116] = 127,
[2][1][2][0][RTW89_MKK][1][116] = 127,
[2][1][2][0][RTW89_MKK][0][116] = 127,
[2][1][2][0][RTW89_IC][1][116] = 127,
- [2][1][2][0][RTW89_IC][2][116] = 127,
[2][1][2][0][RTW89_KCC][1][116] = 127,
[2][1][2][0][RTW89_KCC][0][116] = 127,
[2][1][2][0][RTW89_ACMA][1][116] = 127,
@@ -46933,13 +45549,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][116] = 127,
[2][1][2][0][RTW89_THAILAND][0][116] = 127,
[2][1][2][1][RTW89_FCC][1][3] = 22,
- [2][1][2][1][RTW89_FCC][2][3] = 50,
[2][1][2][1][RTW89_ETSI][1][3] = 42,
[2][1][2][1][RTW89_ETSI][0][3] = 6,
[2][1][2][1][RTW89_MKK][1][3] = 52,
[2][1][2][1][RTW89_MKK][0][3] = 14,
[2][1][2][1][RTW89_IC][1][3] = 22,
- [2][1][2][1][RTW89_IC][2][3] = 50,
[2][1][2][1][RTW89_KCC][1][3] = 38,
[2][1][2][1][RTW89_KCC][0][3] = 12,
[2][1][2][1][RTW89_ACMA][1][3] = 42,
@@ -46952,13 +45566,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][3] = 46,
[2][1][2][1][RTW89_THAILAND][0][3] = 6,
[2][1][2][1][RTW89_FCC][1][11] = 20,
- [2][1][2][1][RTW89_FCC][2][11] = 50,
[2][1][2][1][RTW89_ETSI][1][11] = 42,
[2][1][2][1][RTW89_ETSI][0][11] = 6,
[2][1][2][1][RTW89_MKK][1][11] = 52,
[2][1][2][1][RTW89_MKK][0][11] = 12,
[2][1][2][1][RTW89_IC][1][11] = 20,
- [2][1][2][1][RTW89_IC][2][11] = 50,
[2][1][2][1][RTW89_KCC][1][11] = 38,
[2][1][2][1][RTW89_KCC][0][11] = 12,
[2][1][2][1][RTW89_ACMA][1][11] = 42,
@@ -46971,13 +45583,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][11] = 46,
[2][1][2][1][RTW89_THAILAND][0][11] = 6,
[2][1][2][1][RTW89_FCC][1][18] = 20,
- [2][1][2][1][RTW89_FCC][2][18] = 50,
[2][1][2][1][RTW89_ETSI][1][18] = 42,
[2][1][2][1][RTW89_ETSI][0][18] = 6,
[2][1][2][1][RTW89_MKK][1][18] = 52,
[2][1][2][1][RTW89_MKK][0][18] = 12,
[2][1][2][1][RTW89_IC][1][18] = 20,
- [2][1][2][1][RTW89_IC][2][18] = 50,
[2][1][2][1][RTW89_KCC][1][18] = 38,
[2][1][2][1][RTW89_KCC][0][18] = 12,
[2][1][2][1][RTW89_ACMA][1][18] = 42,
@@ -46990,13 +45600,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][18] = 46,
[2][1][2][1][RTW89_THAILAND][0][18] = 6,
[2][1][2][1][RTW89_FCC][1][26] = 20,
- [2][1][2][1][RTW89_FCC][2][26] = 60,
[2][1][2][1][RTW89_ETSI][1][26] = 42,
[2][1][2][1][RTW89_ETSI][0][26] = 6,
[2][1][2][1][RTW89_MKK][1][26] = 52,
[2][1][2][1][RTW89_MKK][0][26] = 12,
[2][1][2][1][RTW89_IC][1][26] = 20,
- [2][1][2][1][RTW89_IC][2][26] = 60,
[2][1][2][1][RTW89_KCC][1][26] = 38,
[2][1][2][1][RTW89_KCC][0][26] = 12,
[2][1][2][1][RTW89_ACMA][1][26] = 42,
@@ -47009,13 +45617,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][26] = 46,
[2][1][2][1][RTW89_THAILAND][0][26] = 6,
[2][1][2][1][RTW89_FCC][1][33] = 20,
- [2][1][2][1][RTW89_FCC][2][33] = 60,
[2][1][2][1][RTW89_ETSI][1][33] = 42,
[2][1][2][1][RTW89_ETSI][0][33] = 6,
[2][1][2][1][RTW89_MKK][1][33] = 48,
[2][1][2][1][RTW89_MKK][0][33] = 12,
[2][1][2][1][RTW89_IC][1][33] = 20,
- [2][1][2][1][RTW89_IC][2][33] = 60,
[2][1][2][1][RTW89_KCC][1][33] = 38,
[2][1][2][1][RTW89_KCC][0][33] = 12,
[2][1][2][1][RTW89_ACMA][1][33] = 42,
@@ -47028,13 +45634,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][33] = 46,
[2][1][2][1][RTW89_THAILAND][0][33] = 6,
[2][1][2][1][RTW89_FCC][1][41] = 22,
- [2][1][2][1][RTW89_FCC][2][41] = 60,
[2][1][2][1][RTW89_ETSI][1][41] = 42,
[2][1][2][1][RTW89_ETSI][0][41] = 6,
[2][1][2][1][RTW89_MKK][1][41] = 48,
[2][1][2][1][RTW89_MKK][0][41] = 12,
[2][1][2][1][RTW89_IC][1][41] = 22,
- [2][1][2][1][RTW89_IC][2][41] = 60,
[2][1][2][1][RTW89_KCC][1][41] = 38,
[2][1][2][1][RTW89_KCC][0][41] = 12,
[2][1][2][1][RTW89_ACMA][1][41] = 42,
@@ -47047,13 +45651,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][41] = 46,
[2][1][2][1][RTW89_THAILAND][0][41] = 6,
[2][1][2][1][RTW89_FCC][1][48] = 22,
- [2][1][2][1][RTW89_FCC][2][48] = 127,
[2][1][2][1][RTW89_ETSI][1][48] = 127,
[2][1][2][1][RTW89_ETSI][0][48] = 127,
[2][1][2][1][RTW89_MKK][1][48] = 127,
[2][1][2][1][RTW89_MKK][0][48] = 127,
[2][1][2][1][RTW89_IC][1][48] = 22,
- [2][1][2][1][RTW89_IC][2][48] = 60,
[2][1][2][1][RTW89_KCC][1][48] = 38,
[2][1][2][1][RTW89_KCC][0][48] = 127,
[2][1][2][1][RTW89_ACMA][1][48] = 127,
@@ -47066,13 +45668,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][48] = 127,
[2][1][2][1][RTW89_THAILAND][0][48] = 127,
[2][1][2][1][RTW89_FCC][1][56] = 20,
- [2][1][2][1][RTW89_FCC][2][56] = 127,
[2][1][2][1][RTW89_ETSI][1][56] = 127,
[2][1][2][1][RTW89_ETSI][0][56] = 127,
[2][1][2][1][RTW89_MKK][1][56] = 127,
[2][1][2][1][RTW89_MKK][0][56] = 127,
[2][1][2][1][RTW89_IC][1][56] = 20,
- [2][1][2][1][RTW89_IC][2][56] = 56,
[2][1][2][1][RTW89_KCC][1][56] = 38,
[2][1][2][1][RTW89_KCC][0][56] = 127,
[2][1][2][1][RTW89_ACMA][1][56] = 127,
@@ -47085,13 +45685,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][56] = 127,
[2][1][2][1][RTW89_THAILAND][0][56] = 127,
[2][1][2][1][RTW89_FCC][1][63] = 22,
- [2][1][2][1][RTW89_FCC][2][63] = 58,
[2][1][2][1][RTW89_ETSI][1][63] = 127,
[2][1][2][1][RTW89_ETSI][0][63] = 127,
[2][1][2][1][RTW89_MKK][1][63] = 127,
[2][1][2][1][RTW89_MKK][0][63] = 127,
[2][1][2][1][RTW89_IC][1][63] = 22,
- [2][1][2][1][RTW89_IC][2][63] = 58,
[2][1][2][1][RTW89_KCC][1][63] = 38,
[2][1][2][1][RTW89_KCC][0][63] = 127,
[2][1][2][1][RTW89_ACMA][1][63] = 127,
@@ -47104,13 +45702,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][63] = 127,
[2][1][2][1][RTW89_THAILAND][0][63] = 127,
[2][1][2][1][RTW89_FCC][1][71] = 20,
- [2][1][2][1][RTW89_FCC][2][71] = 58,
[2][1][2][1][RTW89_ETSI][1][71] = 127,
[2][1][2][1][RTW89_ETSI][0][71] = 127,
[2][1][2][1][RTW89_MKK][1][71] = 127,
[2][1][2][1][RTW89_MKK][0][71] = 127,
[2][1][2][1][RTW89_IC][1][71] = 20,
- [2][1][2][1][RTW89_IC][2][71] = 58,
[2][1][2][1][RTW89_KCC][1][71] = 38,
[2][1][2][1][RTW89_KCC][0][71] = 127,
[2][1][2][1][RTW89_ACMA][1][71] = 127,
@@ -47123,13 +45719,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][71] = 127,
[2][1][2][1][RTW89_THAILAND][0][71] = 127,
[2][1][2][1][RTW89_FCC][1][78] = 20,
- [2][1][2][1][RTW89_FCC][2][78] = 58,
[2][1][2][1][RTW89_ETSI][1][78] = 127,
[2][1][2][1][RTW89_ETSI][0][78] = 127,
[2][1][2][1][RTW89_MKK][1][78] = 127,
[2][1][2][1][RTW89_MKK][0][78] = 127,
[2][1][2][1][RTW89_IC][1][78] = 20,
- [2][1][2][1][RTW89_IC][2][78] = 58,
[2][1][2][1][RTW89_KCC][1][78] = 38,
[2][1][2][1][RTW89_KCC][0][78] = 127,
[2][1][2][1][RTW89_ACMA][1][78] = 127,
@@ -47142,13 +45736,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][78] = 127,
[2][1][2][1][RTW89_THAILAND][0][78] = 127,
[2][1][2][1][RTW89_FCC][1][86] = 20,
- [2][1][2][1][RTW89_FCC][2][86] = 127,
[2][1][2][1][RTW89_ETSI][1][86] = 127,
[2][1][2][1][RTW89_ETSI][0][86] = 127,
[2][1][2][1][RTW89_MKK][1][86] = 127,
[2][1][2][1][RTW89_MKK][0][86] = 127,
[2][1][2][1][RTW89_IC][1][86] = 20,
- [2][1][2][1][RTW89_IC][2][86] = 127,
[2][1][2][1][RTW89_KCC][1][86] = 38,
[2][1][2][1][RTW89_KCC][0][86] = 127,
[2][1][2][1][RTW89_ACMA][1][86] = 127,
@@ -47161,13 +45753,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][86] = 127,
[2][1][2][1][RTW89_THAILAND][0][86] = 127,
[2][1][2][1][RTW89_FCC][1][93] = 22,
- [2][1][2][1][RTW89_FCC][2][93] = 127,
[2][1][2][1][RTW89_ETSI][1][93] = 127,
[2][1][2][1][RTW89_ETSI][0][93] = 127,
[2][1][2][1][RTW89_MKK][1][93] = 127,
[2][1][2][1][RTW89_MKK][0][93] = 127,
[2][1][2][1][RTW89_IC][1][93] = 22,
- [2][1][2][1][RTW89_IC][2][93] = 127,
[2][1][2][1][RTW89_KCC][1][93] = 38,
[2][1][2][1][RTW89_KCC][0][93] = 127,
[2][1][2][1][RTW89_ACMA][1][93] = 127,
@@ -47180,13 +45770,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][93] = 127,
[2][1][2][1][RTW89_THAILAND][0][93] = 127,
[2][1][2][1][RTW89_FCC][1][101] = 22,
- [2][1][2][1][RTW89_FCC][2][101] = 127,
[2][1][2][1][RTW89_ETSI][1][101] = 127,
[2][1][2][1][RTW89_ETSI][0][101] = 127,
[2][1][2][1][RTW89_MKK][1][101] = 127,
[2][1][2][1][RTW89_MKK][0][101] = 127,
[2][1][2][1][RTW89_IC][1][101] = 22,
- [2][1][2][1][RTW89_IC][2][101] = 127,
[2][1][2][1][RTW89_KCC][1][101] = 38,
[2][1][2][1][RTW89_KCC][0][101] = 127,
[2][1][2][1][RTW89_ACMA][1][101] = 127,
@@ -47199,13 +45787,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][101] = 127,
[2][1][2][1][RTW89_THAILAND][0][101] = 127,
[2][1][2][1][RTW89_FCC][1][108] = 127,
- [2][1][2][1][RTW89_FCC][2][108] = 127,
[2][1][2][1][RTW89_ETSI][1][108] = 127,
[2][1][2][1][RTW89_ETSI][0][108] = 127,
[2][1][2][1][RTW89_MKK][1][108] = 127,
[2][1][2][1][RTW89_MKK][0][108] = 127,
[2][1][2][1][RTW89_IC][1][108] = 127,
- [2][1][2][1][RTW89_IC][2][108] = 127,
[2][1][2][1][RTW89_KCC][1][108] = 127,
[2][1][2][1][RTW89_KCC][0][108] = 127,
[2][1][2][1][RTW89_ACMA][1][108] = 127,
@@ -47218,13 +45804,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][108] = 127,
[2][1][2][1][RTW89_THAILAND][0][108] = 127,
[2][1][2][1][RTW89_FCC][1][116] = 127,
- [2][1][2][1][RTW89_FCC][2][116] = 127,
[2][1][2][1][RTW89_ETSI][1][116] = 127,
[2][1][2][1][RTW89_ETSI][0][116] = 127,
[2][1][2][1][RTW89_MKK][1][116] = 127,
[2][1][2][1][RTW89_MKK][0][116] = 127,
[2][1][2][1][RTW89_IC][1][116] = 127,
- [2][1][2][1][RTW89_IC][2][116] = 127,
[2][1][2][1][RTW89_KCC][1][116] = 127,
[2][1][2][1][RTW89_KCC][0][116] = 127,
[2][1][2][1][RTW89_ACMA][1][116] = 127,
@@ -47237,13 +45821,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][116] = 127,
[2][1][2][1][RTW89_THAILAND][0][116] = 127,
[3][0][2][0][RTW89_FCC][1][7] = 52,
- [3][0][2][0][RTW89_FCC][2][7] = 52,
[3][0][2][0][RTW89_ETSI][1][7] = 50,
[3][0][2][0][RTW89_ETSI][0][7] = 30,
[3][0][2][0][RTW89_MKK][1][7] = 50,
[3][0][2][0][RTW89_MKK][0][7] = 22,
[3][0][2][0][RTW89_IC][1][7] = 52,
- [3][0][2][0][RTW89_IC][2][7] = 52,
[3][0][2][0][RTW89_KCC][1][7] = 42,
[3][0][2][0][RTW89_KCC][0][7] = 24,
[3][0][2][0][RTW89_ACMA][1][7] = 50,
@@ -47256,13 +45838,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][0][2][0][RTW89_THAILAND][1][7] = 50,
[3][0][2][0][RTW89_THAILAND][0][7] = 30,
[3][0][2][0][RTW89_FCC][1][22] = 52,
- [3][0][2][0][RTW89_FCC][2][22] = 52,
[3][0][2][0][RTW89_ETSI][1][22] = 50,
[3][0][2][0][RTW89_ETSI][0][22] = 30,
[3][0][2][0][RTW89_MKK][1][22] = 50,
[3][0][2][0][RTW89_MKK][0][22] = 20,
[3][0][2][0][RTW89_IC][1][22] = 52,
- [3][0][2][0][RTW89_IC][2][22] = 52,
[3][0][2][0][RTW89_KCC][1][22] = 42,
[3][0][2][0][RTW89_KCC][0][22] = 24,
[3][0][2][0][RTW89_ACMA][1][22] = 50,
@@ -47275,13 +45855,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][0][2][0][RTW89_THAILAND][1][22] = 50,
[3][0][2][0][RTW89_THAILAND][0][22] = 30,
[3][0][2][0][RTW89_FCC][1][37] = 52,
- [3][0][2][0][RTW89_FCC][2][37] = 52,
[3][0][2][0][RTW89_ETSI][1][37] = 50,
[3][0][2][0][RTW89_ETSI][0][37] = 30,
[3][0][2][0][RTW89_MKK][1][37] = 50,
[3][0][2][0][RTW89_MKK][0][37] = 20,
[3][0][2][0][RTW89_IC][1][37] = 52,
- [3][0][2][0][RTW89_IC][2][37] = 52,
[3][0][2][0][RTW89_KCC][1][37] = 42,
[3][0][2][0][RTW89_KCC][0][37] = 24,
[3][0][2][0][RTW89_ACMA][1][37] = 50,
@@ -47294,13 +45872,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][0][2][0][RTW89_THAILAND][1][37] = 50,
[3][0][2][0][RTW89_THAILAND][0][37] = 30,
[3][0][2][0][RTW89_FCC][1][52] = 54,
- [3][0][2][0][RTW89_FCC][2][52] = 127,
[3][0][2][0][RTW89_ETSI][1][52] = 127,
[3][0][2][0][RTW89_ETSI][0][52] = 127,
[3][0][2][0][RTW89_MKK][1][52] = 127,
[3][0][2][0][RTW89_MKK][0][52] = 127,
[3][0][2][0][RTW89_IC][1][52] = 54,
- [3][0][2][0][RTW89_IC][2][52] = 56,
[3][0][2][0][RTW89_KCC][1][52] = 56,
[3][0][2][0][RTW89_KCC][0][52] = 127,
[3][0][2][0][RTW89_ACMA][1][52] = 127,
@@ -47313,13 +45889,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][0][2][0][RTW89_THAILAND][1][52] = 127,
[3][0][2][0][RTW89_THAILAND][0][52] = 127,
[3][0][2][0][RTW89_FCC][1][67] = 54,
- [3][0][2][0][RTW89_FCC][2][67] = 54,
[3][0][2][0][RTW89_ETSI][1][67] = 127,
[3][0][2][0][RTW89_ETSI][0][67] = 127,
[3][0][2][0][RTW89_MKK][1][67] = 127,
[3][0][2][0][RTW89_MKK][0][67] = 127,
[3][0][2][0][RTW89_IC][1][67] = 54,
- [3][0][2][0][RTW89_IC][2][67] = 54,
[3][0][2][0][RTW89_KCC][1][67] = 54,
[3][0][2][0][RTW89_KCC][0][67] = 127,
[3][0][2][0][RTW89_ACMA][1][67] = 127,
@@ -47332,13 +45906,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][0][2][0][RTW89_THAILAND][1][67] = 127,
[3][0][2][0][RTW89_THAILAND][0][67] = 127,
[3][0][2][0][RTW89_FCC][1][82] = 46,
- [3][0][2][0][RTW89_FCC][2][82] = 127,
[3][0][2][0][RTW89_ETSI][1][82] = 127,
[3][0][2][0][RTW89_ETSI][0][82] = 127,
[3][0][2][0][RTW89_MKK][1][82] = 127,
[3][0][2][0][RTW89_MKK][0][82] = 127,
[3][0][2][0][RTW89_IC][1][82] = 46,
- [3][0][2][0][RTW89_IC][2][82] = 127,
[3][0][2][0][RTW89_KCC][1][82] = 26,
[3][0][2][0][RTW89_KCC][0][82] = 127,
[3][0][2][0][RTW89_ACMA][1][82] = 127,
@@ -47351,13 +45923,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][0][2][0][RTW89_THAILAND][1][82] = 127,
[3][0][2][0][RTW89_THAILAND][0][82] = 127,
[3][0][2][0][RTW89_FCC][1][97] = 40,
- [3][0][2][0][RTW89_FCC][2][97] = 127,
[3][0][2][0][RTW89_ETSI][1][97] = 127,
[3][0][2][0][RTW89_ETSI][0][97] = 127,
[3][0][2][0][RTW89_MKK][1][97] = 127,
[3][0][2][0][RTW89_MKK][0][97] = 127,
[3][0][2][0][RTW89_IC][1][97] = 40,
- [3][0][2][0][RTW89_IC][2][97] = 127,
[3][0][2][0][RTW89_KCC][1][97] = 26,
[3][0][2][0][RTW89_KCC][0][97] = 127,
[3][0][2][0][RTW89_ACMA][1][97] = 127,
@@ -47370,13 +45940,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][0][2][0][RTW89_THAILAND][1][97] = 127,
[3][0][2][0][RTW89_THAILAND][0][97] = 127,
[3][0][2][0][RTW89_FCC][1][112] = 127,
- [3][0][2][0][RTW89_FCC][2][112] = 127,
[3][0][2][0][RTW89_ETSI][1][112] = 127,
[3][0][2][0][RTW89_ETSI][0][112] = 127,
[3][0][2][0][RTW89_MKK][1][112] = 127,
[3][0][2][0][RTW89_MKK][0][112] = 127,
[3][0][2][0][RTW89_IC][1][112] = 127,
- [3][0][2][0][RTW89_IC][2][112] = 127,
[3][0][2][0][RTW89_KCC][1][112] = 127,
[3][0][2][0][RTW89_KCC][0][112] = 127,
[3][0][2][0][RTW89_ACMA][1][112] = 127,
@@ -47389,13 +45957,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][0][2][0][RTW89_THAILAND][1][112] = 127,
[3][0][2][0][RTW89_THAILAND][0][112] = 127,
[3][1][2][0][RTW89_FCC][1][7] = 32,
- [3][1][2][0][RTW89_FCC][2][7] = 46,
[3][1][2][0][RTW89_ETSI][1][7] = 50,
[3][1][2][0][RTW89_ETSI][0][7] = 18,
[3][1][2][0][RTW89_MKK][1][7] = 38,
[3][1][2][0][RTW89_MKK][0][7] = 10,
[3][1][2][0][RTW89_IC][1][7] = 32,
- [3][1][2][0][RTW89_IC][2][7] = 46,
[3][1][2][0][RTW89_KCC][1][7] = 40,
[3][1][2][0][RTW89_KCC][0][7] = 12,
[3][1][2][0][RTW89_ACMA][1][7] = 50,
@@ -47408,13 +45974,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][0][RTW89_THAILAND][1][7] = 46,
[3][1][2][0][RTW89_THAILAND][0][7] = 18,
[3][1][2][0][RTW89_FCC][1][22] = 30,
- [3][1][2][0][RTW89_FCC][2][22] = 52,
[3][1][2][0][RTW89_ETSI][1][22] = 46,
[3][1][2][0][RTW89_ETSI][0][22] = 16,
[3][1][2][0][RTW89_MKK][1][22] = 48,
[3][1][2][0][RTW89_MKK][0][22] = 8,
[3][1][2][0][RTW89_IC][1][22] = 30,
- [3][1][2][0][RTW89_IC][2][22] = 52,
[3][1][2][0][RTW89_KCC][1][22] = 40,
[3][1][2][0][RTW89_KCC][0][22] = 12,
[3][1][2][0][RTW89_ACMA][1][22] = 46,
@@ -47427,13 +45991,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][0][RTW89_THAILAND][1][22] = 46,
[3][1][2][0][RTW89_THAILAND][0][22] = 18,
[3][1][2][0][RTW89_FCC][1][37] = 30,
- [3][1][2][0][RTW89_FCC][2][37] = 52,
[3][1][2][0][RTW89_ETSI][1][37] = 46,
[3][1][2][0][RTW89_ETSI][0][37] = 16,
[3][1][2][0][RTW89_MKK][1][37] = 48,
[3][1][2][0][RTW89_MKK][0][37] = 8,
[3][1][2][0][RTW89_IC][1][37] = 30,
- [3][1][2][0][RTW89_IC][2][37] = 52,
[3][1][2][0][RTW89_KCC][1][37] = 40,
[3][1][2][0][RTW89_KCC][0][37] = 12,
[3][1][2][0][RTW89_ACMA][1][37] = 46,
@@ -47446,13 +46008,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][0][RTW89_THAILAND][1][37] = 46,
[3][1][2][0][RTW89_THAILAND][0][37] = 18,
[3][1][2][0][RTW89_FCC][1][52] = 30,
- [3][1][2][0][RTW89_FCC][2][52] = 127,
[3][1][2][0][RTW89_ETSI][1][52] = 127,
[3][1][2][0][RTW89_ETSI][0][52] = 127,
[3][1][2][0][RTW89_MKK][1][52] = 127,
[3][1][2][0][RTW89_MKK][0][52] = 127,
[3][1][2][0][RTW89_IC][1][52] = 30,
- [3][1][2][0][RTW89_IC][2][52] = 56,
[3][1][2][0][RTW89_KCC][1][52] = 48,
[3][1][2][0][RTW89_KCC][0][52] = 127,
[3][1][2][0][RTW89_ACMA][1][52] = 127,
@@ -47465,13 +46025,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][0][RTW89_THAILAND][1][52] = 127,
[3][1][2][0][RTW89_THAILAND][0][52] = 127,
[3][1][2][0][RTW89_FCC][1][67] = 32,
- [3][1][2][0][RTW89_FCC][2][67] = 54,
[3][1][2][0][RTW89_ETSI][1][67] = 127,
[3][1][2][0][RTW89_ETSI][0][67] = 127,
[3][1][2][0][RTW89_MKK][1][67] = 127,
[3][1][2][0][RTW89_MKK][0][67] = 127,
[3][1][2][0][RTW89_IC][1][67] = 32,
- [3][1][2][0][RTW89_IC][2][67] = 54,
[3][1][2][0][RTW89_KCC][1][67] = 48,
[3][1][2][0][RTW89_KCC][0][67] = 127,
[3][1][2][0][RTW89_ACMA][1][67] = 127,
@@ -47484,13 +46042,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][0][RTW89_THAILAND][1][67] = 127,
[3][1][2][0][RTW89_THAILAND][0][67] = 127,
[3][1][2][0][RTW89_FCC][1][82] = 32,
- [3][1][2][0][RTW89_FCC][2][82] = 127,
[3][1][2][0][RTW89_ETSI][1][82] = 127,
[3][1][2][0][RTW89_ETSI][0][82] = 127,
[3][1][2][0][RTW89_MKK][1][82] = 127,
[3][1][2][0][RTW89_MKK][0][82] = 127,
[3][1][2][0][RTW89_IC][1][82] = 32,
- [3][1][2][0][RTW89_IC][2][82] = 127,
[3][1][2][0][RTW89_KCC][1][82] = 24,
[3][1][2][0][RTW89_KCC][0][82] = 127,
[3][1][2][0][RTW89_ACMA][1][82] = 127,
@@ -47503,13 +46059,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][0][RTW89_THAILAND][1][82] = 127,
[3][1][2][0][RTW89_THAILAND][0][82] = 127,
[3][1][2][0][RTW89_FCC][1][97] = 32,
- [3][1][2][0][RTW89_FCC][2][97] = 127,
[3][1][2][0][RTW89_ETSI][1][97] = 127,
[3][1][2][0][RTW89_ETSI][0][97] = 127,
[3][1][2][0][RTW89_MKK][1][97] = 127,
[3][1][2][0][RTW89_MKK][0][97] = 127,
[3][1][2][0][RTW89_IC][1][97] = 32,
- [3][1][2][0][RTW89_IC][2][97] = 127,
[3][1][2][0][RTW89_KCC][1][97] = 24,
[3][1][2][0][RTW89_KCC][0][97] = 127,
[3][1][2][0][RTW89_ACMA][1][97] = 127,
@@ -47522,13 +46076,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][0][RTW89_THAILAND][1][97] = 127,
[3][1][2][0][RTW89_THAILAND][0][97] = 127,
[3][1][2][0][RTW89_FCC][1][112] = 127,
- [3][1][2][0][RTW89_FCC][2][112] = 127,
[3][1][2][0][RTW89_ETSI][1][112] = 127,
[3][1][2][0][RTW89_ETSI][0][112] = 127,
[3][1][2][0][RTW89_MKK][1][112] = 127,
[3][1][2][0][RTW89_MKK][0][112] = 127,
[3][1][2][0][RTW89_IC][1][112] = 127,
- [3][1][2][0][RTW89_IC][2][112] = 127,
[3][1][2][0][RTW89_KCC][1][112] = 127,
[3][1][2][0][RTW89_KCC][0][112] = 127,
[3][1][2][0][RTW89_ACMA][1][112] = 127,
@@ -47541,13 +46093,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][0][RTW89_THAILAND][1][112] = 127,
[3][1][2][0][RTW89_THAILAND][0][112] = 127,
[3][1][2][1][RTW89_FCC][1][7] = 32,
- [3][1][2][1][RTW89_FCC][2][7] = 46,
[3][1][2][1][RTW89_ETSI][1][7] = 42,
[3][1][2][1][RTW89_ETSI][0][7] = 6,
[3][1][2][1][RTW89_MKK][1][7] = 38,
[3][1][2][1][RTW89_MKK][0][7] = 10,
[3][1][2][1][RTW89_IC][1][7] = 32,
- [3][1][2][1][RTW89_IC][2][7] = 46,
[3][1][2][1][RTW89_KCC][1][7] = 40,
[3][1][2][1][RTW89_KCC][0][7] = 12,
[3][1][2][1][RTW89_ACMA][1][7] = 42,
@@ -47560,13 +46110,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][1][RTW89_THAILAND][1][7] = 46,
[3][1][2][1][RTW89_THAILAND][0][7] = 6,
[3][1][2][1][RTW89_FCC][1][22] = 30,
- [3][1][2][1][RTW89_FCC][2][22] = 52,
[3][1][2][1][RTW89_ETSI][1][22] = 42,
[3][1][2][1][RTW89_ETSI][0][22] = 6,
[3][1][2][1][RTW89_MKK][1][22] = 48,
[3][1][2][1][RTW89_MKK][0][22] = 8,
[3][1][2][1][RTW89_IC][1][22] = 30,
- [3][1][2][1][RTW89_IC][2][22] = 52,
[3][1][2][1][RTW89_KCC][1][22] = 40,
[3][1][2][1][RTW89_KCC][0][22] = 12,
[3][1][2][1][RTW89_ACMA][1][22] = 42,
@@ -47579,13 +46127,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][1][RTW89_THAILAND][1][22] = 46,
[3][1][2][1][RTW89_THAILAND][0][22] = 6,
[3][1][2][1][RTW89_FCC][1][37] = 30,
- [3][1][2][1][RTW89_FCC][2][37] = 52,
[3][1][2][1][RTW89_ETSI][1][37] = 42,
[3][1][2][1][RTW89_ETSI][0][37] = 6,
[3][1][2][1][RTW89_MKK][1][37] = 48,
[3][1][2][1][RTW89_MKK][0][37] = 8,
[3][1][2][1][RTW89_IC][1][37] = 30,
- [3][1][2][1][RTW89_IC][2][37] = 52,
[3][1][2][1][RTW89_KCC][1][37] = 40,
[3][1][2][1][RTW89_KCC][0][37] = 12,
[3][1][2][1][RTW89_ACMA][1][37] = 42,
@@ -47598,13 +46144,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][1][RTW89_THAILAND][1][37] = 46,
[3][1][2][1][RTW89_THAILAND][0][37] = 6,
[3][1][2][1][RTW89_FCC][1][52] = 30,
- [3][1][2][1][RTW89_FCC][2][52] = 127,
[3][1][2][1][RTW89_ETSI][1][52] = 127,
[3][1][2][1][RTW89_ETSI][0][52] = 127,
[3][1][2][1][RTW89_MKK][1][52] = 127,
[3][1][2][1][RTW89_MKK][0][52] = 127,
[3][1][2][1][RTW89_IC][1][52] = 30,
- [3][1][2][1][RTW89_IC][2][52] = 56,
[3][1][2][1][RTW89_KCC][1][52] = 48,
[3][1][2][1][RTW89_KCC][0][52] = 127,
[3][1][2][1][RTW89_ACMA][1][52] = 127,
@@ -47617,13 +46161,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][1][RTW89_THAILAND][1][52] = 127,
[3][1][2][1][RTW89_THAILAND][0][52] = 127,
[3][1][2][1][RTW89_FCC][1][67] = 32,
- [3][1][2][1][RTW89_FCC][2][67] = 54,
[3][1][2][1][RTW89_ETSI][1][67] = 127,
[3][1][2][1][RTW89_ETSI][0][67] = 127,
[3][1][2][1][RTW89_MKK][1][67] = 127,
[3][1][2][1][RTW89_MKK][0][67] = 127,
[3][1][2][1][RTW89_IC][1][67] = 32,
- [3][1][2][1][RTW89_IC][2][67] = 54,
[3][1][2][1][RTW89_KCC][1][67] = 48,
[3][1][2][1][RTW89_KCC][0][67] = 127,
[3][1][2][1][RTW89_ACMA][1][67] = 127,
@@ -47636,13 +46178,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][1][RTW89_THAILAND][1][67] = 127,
[3][1][2][1][RTW89_THAILAND][0][67] = 127,
[3][1][2][1][RTW89_FCC][1][82] = 32,
- [3][1][2][1][RTW89_FCC][2][82] = 127,
[3][1][2][1][RTW89_ETSI][1][82] = 127,
[3][1][2][1][RTW89_ETSI][0][82] = 127,
[3][1][2][1][RTW89_MKK][1][82] = 127,
[3][1][2][1][RTW89_MKK][0][82] = 127,
[3][1][2][1][RTW89_IC][1][82] = 32,
- [3][1][2][1][RTW89_IC][2][82] = 127,
[3][1][2][1][RTW89_KCC][1][82] = 24,
[3][1][2][1][RTW89_KCC][0][82] = 127,
[3][1][2][1][RTW89_ACMA][1][82] = 127,
@@ -47655,13 +46195,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][1][RTW89_THAILAND][1][82] = 127,
[3][1][2][1][RTW89_THAILAND][0][82] = 127,
[3][1][2][1][RTW89_FCC][1][97] = 32,
- [3][1][2][1][RTW89_FCC][2][97] = 127,
[3][1][2][1][RTW89_ETSI][1][97] = 127,
[3][1][2][1][RTW89_ETSI][0][97] = 127,
[3][1][2][1][RTW89_MKK][1][97] = 127,
[3][1][2][1][RTW89_MKK][0][97] = 127,
[3][1][2][1][RTW89_IC][1][97] = 32,
- [3][1][2][1][RTW89_IC][2][97] = 127,
[3][1][2][1][RTW89_KCC][1][97] = 24,
[3][1][2][1][RTW89_KCC][0][97] = 127,
[3][1][2][1][RTW89_ACMA][1][97] = 127,
@@ -47674,13 +46212,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][1][RTW89_THAILAND][1][97] = 127,
[3][1][2][1][RTW89_THAILAND][0][97] = 127,
[3][1][2][1][RTW89_FCC][1][112] = 127,
- [3][1][2][1][RTW89_FCC][2][112] = 127,
[3][1][2][1][RTW89_ETSI][1][112] = 127,
[3][1][2][1][RTW89_ETSI][0][112] = 127,
[3][1][2][1][RTW89_MKK][1][112] = 127,
[3][1][2][1][RTW89_MKK][0][112] = 127,
[3][1][2][1][RTW89_IC][1][112] = 127,
- [3][1][2][1][RTW89_IC][2][112] = 127,
[3][1][2][1][RTW89_KCC][1][112] = 127,
[3][1][2][1][RTW89_KCC][0][112] = 127,
[3][1][2][1][RTW89_ACMA][1][112] = 127,
@@ -49374,7 +47910,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][48] = 46,
[0][0][RTW89_ETSI][48] = 127,
[0][0][RTW89_MKK][48] = 127,
- [0][0][RTW89_IC][48] = 127,
+ [0][0][RTW89_IC][48] = 46,
[0][0][RTW89_KCC][48] = 127,
[0][0][RTW89_ACMA][48] = 127,
[0][0][RTW89_CN][48] = 127,
@@ -49387,7 +47923,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][50] = 44,
[0][0][RTW89_ETSI][50] = 127,
[0][0][RTW89_MKK][50] = 127,
- [0][0][RTW89_IC][50] = 127,
+ [0][0][RTW89_IC][50] = 44,
[0][0][RTW89_KCC][50] = 127,
[0][0][RTW89_ACMA][50] = 127,
[0][0][RTW89_CN][50] = 127,
@@ -49400,7 +47936,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][52] = 34,
[0][0][RTW89_ETSI][52] = 127,
[0][0][RTW89_MKK][52] = 127,
- [0][0][RTW89_IC][52] = 127,
+ [0][0][RTW89_IC][52] = 34,
[0][0][RTW89_KCC][52] = 127,
[0][0][RTW89_ACMA][52] = 127,
[0][0][RTW89_CN][52] = 127,
@@ -49738,7 +48274,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_FCC][48] = 20,
[0][1][RTW89_ETSI][48] = 127,
[0][1][RTW89_MKK][48] = 127,
- [0][1][RTW89_IC][48] = 127,
+ [0][1][RTW89_IC][48] = 20,
[0][1][RTW89_KCC][48] = 127,
[0][1][RTW89_ACMA][48] = 127,
[0][1][RTW89_CN][48] = 127,
@@ -49751,7 +48287,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_FCC][50] = 20,
[0][1][RTW89_ETSI][50] = 127,
[0][1][RTW89_MKK][50] = 127,
- [0][1][RTW89_IC][50] = 127,
+ [0][1][RTW89_IC][50] = 20,
[0][1][RTW89_KCC][50] = 127,
[0][1][RTW89_ACMA][50] = 127,
[0][1][RTW89_CN][50] = 127,
@@ -49764,7 +48300,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_FCC][52] = 8,
[0][1][RTW89_ETSI][52] = 127,
[0][1][RTW89_MKK][52] = 127,
- [0][1][RTW89_IC][52] = 127,
+ [0][1][RTW89_IC][52] = 8,
[0][1][RTW89_KCC][52] = 127,
[0][1][RTW89_ACMA][52] = 127,
[0][1][RTW89_CN][52] = 127,
@@ -50102,7 +48638,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][48] = 56,
[1][0][RTW89_ETSI][48] = 127,
[1][0][RTW89_MKK][48] = 127,
- [1][0][RTW89_IC][48] = 127,
+ [1][0][RTW89_IC][48] = 56,
[1][0][RTW89_KCC][48] = 127,
[1][0][RTW89_ACMA][48] = 127,
[1][0][RTW89_CN][48] = 127,
@@ -50115,7 +48651,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][50] = 58,
[1][0][RTW89_ETSI][50] = 127,
[1][0][RTW89_MKK][50] = 127,
- [1][0][RTW89_IC][50] = 127,
+ [1][0][RTW89_IC][50] = 58,
[1][0][RTW89_KCC][50] = 127,
[1][0][RTW89_ACMA][50] = 127,
[1][0][RTW89_CN][50] = 127,
@@ -50128,7 +48664,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][52] = 56,
[1][0][RTW89_ETSI][52] = 127,
[1][0][RTW89_MKK][52] = 127,
- [1][0][RTW89_IC][52] = 127,
+ [1][0][RTW89_IC][52] = 56,
[1][0][RTW89_KCC][52] = 127,
[1][0][RTW89_ACMA][52] = 127,
[1][0][RTW89_CN][52] = 127,
@@ -50466,7 +49002,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_FCC][48] = 34,
[1][1][RTW89_ETSI][48] = 127,
[1][1][RTW89_MKK][48] = 127,
- [1][1][RTW89_IC][48] = 127,
+ [1][1][RTW89_IC][48] = 34,
[1][1][RTW89_KCC][48] = 127,
[1][1][RTW89_ACMA][48] = 127,
[1][1][RTW89_CN][48] = 127,
@@ -50479,7 +49015,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_FCC][50] = 34,
[1][1][RTW89_ETSI][50] = 127,
[1][1][RTW89_MKK][50] = 127,
- [1][1][RTW89_IC][50] = 127,
+ [1][1][RTW89_IC][50] = 34,
[1][1][RTW89_KCC][50] = 127,
[1][1][RTW89_ACMA][50] = 127,
[1][1][RTW89_CN][50] = 127,
@@ -50492,7 +49028,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_FCC][52] = 30,
[1][1][RTW89_ETSI][52] = 127,
[1][1][RTW89_MKK][52] = 127,
- [1][1][RTW89_IC][52] = 127,
+ [1][1][RTW89_IC][52] = 30,
[1][1][RTW89_KCC][52] = 127,
[1][1][RTW89_ACMA][52] = 127,
[1][1][RTW89_CN][52] = 127,
@@ -50830,7 +49366,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][48] = 64,
[2][0][RTW89_ETSI][48] = 127,
[2][0][RTW89_MKK][48] = 127,
- [2][0][RTW89_IC][48] = 127,
+ [2][0][RTW89_IC][48] = 64,
[2][0][RTW89_KCC][48] = 127,
[2][0][RTW89_ACMA][48] = 127,
[2][0][RTW89_CN][48] = 127,
@@ -50843,7 +49379,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][50] = 64,
[2][0][RTW89_ETSI][50] = 127,
[2][0][RTW89_MKK][50] = 127,
- [2][0][RTW89_IC][50] = 127,
+ [2][0][RTW89_IC][50] = 64,
[2][0][RTW89_KCC][50] = 127,
[2][0][RTW89_ACMA][50] = 127,
[2][0][RTW89_CN][50] = 127,
@@ -50856,7 +49392,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][52] = 64,
[2][0][RTW89_ETSI][52] = 127,
[2][0][RTW89_MKK][52] = 127,
- [2][0][RTW89_IC][52] = 127,
+ [2][0][RTW89_IC][52] = 64,
[2][0][RTW89_KCC][52] = 127,
[2][0][RTW89_ACMA][52] = 127,
[2][0][RTW89_CN][52] = 127,
@@ -51194,7 +49730,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_FCC][48] = 40,
[2][1][RTW89_ETSI][48] = 127,
[2][1][RTW89_MKK][48] = 127,
- [2][1][RTW89_IC][48] = 127,
+ [2][1][RTW89_IC][48] = 40,
[2][1][RTW89_KCC][48] = 127,
[2][1][RTW89_ACMA][48] = 127,
[2][1][RTW89_CN][48] = 127,
@@ -51207,7 +49743,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_FCC][50] = 40,
[2][1][RTW89_ETSI][50] = 127,
[2][1][RTW89_MKK][50] = 127,
- [2][1][RTW89_IC][50] = 127,
+ [2][1][RTW89_IC][50] = 40,
[2][1][RTW89_KCC][50] = 127,
[2][1][RTW89_ACMA][50] = 127,
[2][1][RTW89_CN][50] = 127,
@@ -51220,7 +49756,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_FCC][52] = 40,
[2][1][RTW89_ETSI][52] = 127,
[2][1][RTW89_MKK][52] = 127,
- [2][1][RTW89_IC][52] = 127,
+ [2][1][RTW89_IC][52] = 40,
[2][1][RTW89_KCC][52] = 127,
[2][1][RTW89_ACMA][52] = 127,
[2][1][RTW89_CN][52] = 127,
@@ -51238,1164 +49774,778 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[RTW89_6G_CH_NUM] = {
[0][0][RTW89_WW][0][0] = -16,
[0][0][RTW89_WW][1][0] = -16,
- [0][0][RTW89_WW][2][0] = 44,
[0][0][RTW89_WW][0][2] = -18,
[0][0][RTW89_WW][1][2] = -18,
- [0][0][RTW89_WW][2][2] = 44,
[0][0][RTW89_WW][0][4] = -18,
[0][0][RTW89_WW][1][4] = -18,
- [0][0][RTW89_WW][2][4] = 44,
[0][0][RTW89_WW][0][6] = -18,
[0][0][RTW89_WW][1][6] = -18,
- [0][0][RTW89_WW][2][6] = 44,
[0][0][RTW89_WW][0][8] = -18,
[0][0][RTW89_WW][1][8] = -18,
- [0][0][RTW89_WW][2][8] = 44,
[0][0][RTW89_WW][0][10] = -18,
[0][0][RTW89_WW][1][10] = -18,
- [0][0][RTW89_WW][2][10] = 44,
[0][0][RTW89_WW][0][12] = -18,
[0][0][RTW89_WW][1][12] = -18,
- [0][0][RTW89_WW][2][12] = 44,
[0][0][RTW89_WW][0][14] = -18,
[0][0][RTW89_WW][1][14] = -18,
- [0][0][RTW89_WW][2][14] = 44,
[0][0][RTW89_WW][0][15] = -18,
[0][0][RTW89_WW][1][15] = -18,
- [0][0][RTW89_WW][2][15] = 44,
[0][0][RTW89_WW][0][17] = -18,
[0][0][RTW89_WW][1][17] = -18,
- [0][0][RTW89_WW][2][17] = 44,
[0][0][RTW89_WW][0][19] = -18,
[0][0][RTW89_WW][1][19] = -18,
- [0][0][RTW89_WW][2][19] = 44,
[0][0][RTW89_WW][0][21] = -18,
[0][0][RTW89_WW][1][21] = -18,
- [0][0][RTW89_WW][2][21] = 44,
[0][0][RTW89_WW][0][23] = -18,
[0][0][RTW89_WW][1][23] = -18,
- [0][0][RTW89_WW][2][23] = 54,
[0][0][RTW89_WW][0][25] = -18,
[0][0][RTW89_WW][1][25] = -18,
- [0][0][RTW89_WW][2][25] = 54,
[0][0][RTW89_WW][0][27] = -18,
[0][0][RTW89_WW][1][27] = -18,
- [0][0][RTW89_WW][2][27] = 54,
[0][0][RTW89_WW][0][29] = -18,
[0][0][RTW89_WW][1][29] = -18,
- [0][0][RTW89_WW][2][29] = 54,
[0][0][RTW89_WW][0][30] = -18,
[0][0][RTW89_WW][1][30] = -18,
- [0][0][RTW89_WW][2][30] = 54,
[0][0][RTW89_WW][0][32] = -18,
[0][0][RTW89_WW][1][32] = -18,
- [0][0][RTW89_WW][2][32] = 54,
[0][0][RTW89_WW][0][34] = -18,
[0][0][RTW89_WW][1][34] = -18,
- [0][0][RTW89_WW][2][34] = 54,
[0][0][RTW89_WW][0][36] = -18,
[0][0][RTW89_WW][1][36] = -18,
- [0][0][RTW89_WW][2][36] = 54,
[0][0][RTW89_WW][0][38] = -18,
[0][0][RTW89_WW][1][38] = -18,
- [0][0][RTW89_WW][2][38] = 54,
[0][0][RTW89_WW][0][40] = -18,
[0][0][RTW89_WW][1][40] = -18,
- [0][0][RTW89_WW][2][40] = 54,
[0][0][RTW89_WW][0][42] = -18,
[0][0][RTW89_WW][1][42] = -18,
- [0][0][RTW89_WW][2][42] = 54,
[0][0][RTW89_WW][0][44] = -16,
[0][0][RTW89_WW][1][44] = -16,
- [0][0][RTW89_WW][2][44] = 56,
[0][0][RTW89_WW][0][45] = -16,
[0][0][RTW89_WW][1][45] = -16,
- [0][0][RTW89_WW][2][45] = 56,
[0][0][RTW89_WW][0][47] = -18,
[0][0][RTW89_WW][1][47] = -18,
- [0][0][RTW89_WW][2][47] = 56,
[0][0][RTW89_WW][0][49] = -18,
[0][0][RTW89_WW][1][49] = -18,
- [0][0][RTW89_WW][2][49] = 56,
[0][0][RTW89_WW][0][51] = -18,
[0][0][RTW89_WW][1][51] = -18,
- [0][0][RTW89_WW][2][51] = 56,
[0][0][RTW89_WW][0][53] = -16,
[0][0][RTW89_WW][1][53] = -16,
- [0][0][RTW89_WW][2][53] = 56,
[0][0][RTW89_WW][0][55] = -18,
[0][0][RTW89_WW][1][55] = -18,
- [0][0][RTW89_WW][2][55] = 56,
[0][0][RTW89_WW][0][57] = -18,
[0][0][RTW89_WW][1][57] = -18,
- [0][0][RTW89_WW][2][57] = 56,
[0][0][RTW89_WW][0][59] = -18,
[0][0][RTW89_WW][1][59] = -18,
- [0][0][RTW89_WW][2][59] = 56,
[0][0][RTW89_WW][0][60] = -18,
[0][0][RTW89_WW][1][60] = -18,
- [0][0][RTW89_WW][2][60] = 56,
[0][0][RTW89_WW][0][62] = -18,
[0][0][RTW89_WW][1][62] = -18,
- [0][0][RTW89_WW][2][62] = 56,
[0][0][RTW89_WW][0][64] = -18,
[0][0][RTW89_WW][1][64] = -18,
- [0][0][RTW89_WW][2][64] = 56,
[0][0][RTW89_WW][0][66] = -18,
[0][0][RTW89_WW][1][66] = -18,
- [0][0][RTW89_WW][2][66] = 56,
[0][0][RTW89_WW][0][68] = -18,
[0][0][RTW89_WW][1][68] = -18,
- [0][0][RTW89_WW][2][68] = 56,
[0][0][RTW89_WW][0][70] = -16,
[0][0][RTW89_WW][1][70] = -16,
- [0][0][RTW89_WW][2][70] = 56,
[0][0][RTW89_WW][0][72] = -18,
[0][0][RTW89_WW][1][72] = -18,
- [0][0][RTW89_WW][2][72] = 56,
[0][0][RTW89_WW][0][74] = -18,
[0][0][RTW89_WW][1][74] = -18,
- [0][0][RTW89_WW][2][74] = 56,
[0][0][RTW89_WW][0][75] = -18,
[0][0][RTW89_WW][1][75] = -18,
- [0][0][RTW89_WW][2][75] = 56,
[0][0][RTW89_WW][0][77] = -18,
[0][0][RTW89_WW][1][77] = -18,
- [0][0][RTW89_WW][2][77] = 56,
[0][0][RTW89_WW][0][79] = -18,
[0][0][RTW89_WW][1][79] = -18,
- [0][0][RTW89_WW][2][79] = 56,
[0][0][RTW89_WW][0][81] = -18,
[0][0][RTW89_WW][1][81] = -18,
- [0][0][RTW89_WW][2][81] = 56,
[0][0][RTW89_WW][0][83] = -18,
[0][0][RTW89_WW][1][83] = -18,
- [0][0][RTW89_WW][2][83] = 56,
[0][0][RTW89_WW][0][85] = -18,
[0][0][RTW89_WW][1][85] = -18,
- [0][0][RTW89_WW][2][85] = 56,
[0][0][RTW89_WW][0][87] = -16,
[0][0][RTW89_WW][1][87] = -16,
- [0][0][RTW89_WW][2][87] = 0,
[0][0][RTW89_WW][0][89] = -16,
[0][0][RTW89_WW][1][89] = -16,
- [0][0][RTW89_WW][2][89] = 0,
[0][0][RTW89_WW][0][90] = -16,
[0][0][RTW89_WW][1][90] = -16,
- [0][0][RTW89_WW][2][90] = 0,
[0][0][RTW89_WW][0][92] = -16,
[0][0][RTW89_WW][1][92] = -16,
- [0][0][RTW89_WW][2][92] = 0,
[0][0][RTW89_WW][0][94] = -16,
[0][0][RTW89_WW][1][94] = -16,
- [0][0][RTW89_WW][2][94] = 0,
[0][0][RTW89_WW][0][96] = -16,
[0][0][RTW89_WW][1][96] = -16,
- [0][0][RTW89_WW][2][96] = 0,
[0][0][RTW89_WW][0][98] = -16,
[0][0][RTW89_WW][1][98] = -16,
- [0][0][RTW89_WW][2][98] = 0,
[0][0][RTW89_WW][0][100] = -16,
[0][0][RTW89_WW][1][100] = -16,
- [0][0][RTW89_WW][2][100] = 0,
[0][0][RTW89_WW][0][102] = -16,
[0][0][RTW89_WW][1][102] = -16,
- [0][0][RTW89_WW][2][102] = 0,
[0][0][RTW89_WW][0][104] = -16,
[0][0][RTW89_WW][1][104] = -16,
- [0][0][RTW89_WW][2][104] = 0,
[0][0][RTW89_WW][0][105] = -16,
[0][0][RTW89_WW][1][105] = -16,
- [0][0][RTW89_WW][2][105] = 0,
[0][0][RTW89_WW][0][107] = -12,
[0][0][RTW89_WW][1][107] = -12,
- [0][0][RTW89_WW][2][107] = 0,
[0][0][RTW89_WW][0][109] = -12,
[0][0][RTW89_WW][1][109] = -12,
- [0][0][RTW89_WW][2][109] = 0,
[0][0][RTW89_WW][0][111] = 0,
[0][0][RTW89_WW][1][111] = 0,
- [0][0][RTW89_WW][2][111] = 0,
[0][0][RTW89_WW][0][113] = 0,
[0][0][RTW89_WW][1][113] = 0,
- [0][0][RTW89_WW][2][113] = 0,
[0][0][RTW89_WW][0][115] = 0,
[0][0][RTW89_WW][1][115] = 0,
- [0][0][RTW89_WW][2][115] = 0,
[0][0][RTW89_WW][0][117] = 0,
[0][0][RTW89_WW][1][117] = 0,
- [0][0][RTW89_WW][2][117] = 0,
[0][0][RTW89_WW][0][119] = 0,
[0][0][RTW89_WW][1][119] = 0,
- [0][0][RTW89_WW][2][119] = 0,
[0][1][RTW89_WW][0][0] = -40,
[0][1][RTW89_WW][1][0] = -40,
- [0][1][RTW89_WW][2][0] = 32,
[0][1][RTW89_WW][0][2] = -40,
[0][1][RTW89_WW][1][2] = -40,
- [0][1][RTW89_WW][2][2] = 32,
[0][1][RTW89_WW][0][4] = -40,
[0][1][RTW89_WW][1][4] = -40,
- [0][1][RTW89_WW][2][4] = 32,
[0][1][RTW89_WW][0][6] = -40,
[0][1][RTW89_WW][1][6] = -40,
- [0][1][RTW89_WW][2][6] = 32,
[0][1][RTW89_WW][0][8] = -40,
[0][1][RTW89_WW][1][8] = -40,
- [0][1][RTW89_WW][2][8] = 32,
[0][1][RTW89_WW][0][10] = -40,
[0][1][RTW89_WW][1][10] = -40,
- [0][1][RTW89_WW][2][10] = 32,
[0][1][RTW89_WW][0][12] = -40,
[0][1][RTW89_WW][1][12] = -40,
- [0][1][RTW89_WW][2][12] = 32,
[0][1][RTW89_WW][0][14] = -40,
[0][1][RTW89_WW][1][14] = -40,
- [0][1][RTW89_WW][2][14] = 32,
[0][1][RTW89_WW][0][15] = -40,
[0][1][RTW89_WW][1][15] = -40,
- [0][1][RTW89_WW][2][15] = 32,
[0][1][RTW89_WW][0][17] = -40,
[0][1][RTW89_WW][1][17] = -40,
- [0][1][RTW89_WW][2][17] = 32,
[0][1][RTW89_WW][0][19] = -40,
[0][1][RTW89_WW][1][19] = -40,
- [0][1][RTW89_WW][2][19] = 32,
[0][1][RTW89_WW][0][21] = -40,
[0][1][RTW89_WW][1][21] = -40,
- [0][1][RTW89_WW][2][21] = 32,
[0][1][RTW89_WW][0][23] = -40,
[0][1][RTW89_WW][1][23] = -40,
- [0][1][RTW89_WW][2][23] = 32,
[0][1][RTW89_WW][0][25] = -40,
[0][1][RTW89_WW][1][25] = -40,
- [0][1][RTW89_WW][2][25] = 32,
[0][1][RTW89_WW][0][27] = -40,
[0][1][RTW89_WW][1][27] = -40,
- [0][1][RTW89_WW][2][27] = 32,
[0][1][RTW89_WW][0][29] = -40,
[0][1][RTW89_WW][1][29] = -40,
- [0][1][RTW89_WW][2][29] = 32,
[0][1][RTW89_WW][0][30] = -40,
[0][1][RTW89_WW][1][30] = -40,
- [0][1][RTW89_WW][2][30] = 32,
[0][1][RTW89_WW][0][32] = -40,
[0][1][RTW89_WW][1][32] = -40,
- [0][1][RTW89_WW][2][32] = 32,
[0][1][RTW89_WW][0][34] = -40,
[0][1][RTW89_WW][1][34] = -40,
- [0][1][RTW89_WW][2][34] = 32,
[0][1][RTW89_WW][0][36] = -40,
[0][1][RTW89_WW][1][36] = -40,
- [0][1][RTW89_WW][2][36] = 32,
[0][1][RTW89_WW][0][38] = -40,
[0][1][RTW89_WW][1][38] = -40,
- [0][1][RTW89_WW][2][38] = 32,
[0][1][RTW89_WW][0][40] = -40,
[0][1][RTW89_WW][1][40] = -40,
- [0][1][RTW89_WW][2][40] = 32,
[0][1][RTW89_WW][0][42] = -40,
[0][1][RTW89_WW][1][42] = -40,
- [0][1][RTW89_WW][2][42] = 32,
[0][1][RTW89_WW][0][44] = -40,
[0][1][RTW89_WW][1][44] = -40,
- [0][1][RTW89_WW][2][44] = 32,
[0][1][RTW89_WW][0][45] = -40,
[0][1][RTW89_WW][1][45] = -40,
- [0][1][RTW89_WW][2][45] = 32,
[0][1][RTW89_WW][0][47] = -40,
[0][1][RTW89_WW][1][47] = -40,
- [0][1][RTW89_WW][2][47] = 32,
[0][1][RTW89_WW][0][49] = -40,
[0][1][RTW89_WW][1][49] = -40,
- [0][1][RTW89_WW][2][49] = 32,
[0][1][RTW89_WW][0][51] = -40,
[0][1][RTW89_WW][1][51] = -40,
- [0][1][RTW89_WW][2][51] = 32,
[0][1][RTW89_WW][0][53] = -40,
[0][1][RTW89_WW][1][53] = -40,
- [0][1][RTW89_WW][2][53] = 32,
[0][1][RTW89_WW][0][55] = -40,
[0][1][RTW89_WW][1][55] = -40,
- [0][1][RTW89_WW][2][55] = 30,
[0][1][RTW89_WW][0][57] = -40,
[0][1][RTW89_WW][1][57] = -40,
- [0][1][RTW89_WW][2][57] = 30,
[0][1][RTW89_WW][0][59] = -40,
[0][1][RTW89_WW][1][59] = -40,
- [0][1][RTW89_WW][2][59] = 30,
[0][1][RTW89_WW][0][60] = -40,
[0][1][RTW89_WW][1][60] = -40,
- [0][1][RTW89_WW][2][60] = 30,
[0][1][RTW89_WW][0][62] = -40,
[0][1][RTW89_WW][1][62] = -40,
- [0][1][RTW89_WW][2][62] = 30,
[0][1][RTW89_WW][0][64] = -40,
[0][1][RTW89_WW][1][64] = -40,
- [0][1][RTW89_WW][2][64] = 30,
[0][1][RTW89_WW][0][66] = -40,
[0][1][RTW89_WW][1][66] = -40,
- [0][1][RTW89_WW][2][66] = 30,
[0][1][RTW89_WW][0][68] = -40,
[0][1][RTW89_WW][1][68] = -40,
- [0][1][RTW89_WW][2][68] = 30,
[0][1][RTW89_WW][0][70] = -38,
[0][1][RTW89_WW][1][70] = -38,
- [0][1][RTW89_WW][2][70] = 30,
[0][1][RTW89_WW][0][72] = -38,
[0][1][RTW89_WW][1][72] = -38,
- [0][1][RTW89_WW][2][72] = 30,
[0][1][RTW89_WW][0][74] = -38,
[0][1][RTW89_WW][1][74] = -38,
- [0][1][RTW89_WW][2][74] = 30,
[0][1][RTW89_WW][0][75] = -38,
[0][1][RTW89_WW][1][75] = -38,
- [0][1][RTW89_WW][2][75] = 30,
[0][1][RTW89_WW][0][77] = -38,
[0][1][RTW89_WW][1][77] = -38,
- [0][1][RTW89_WW][2][77] = 30,
[0][1][RTW89_WW][0][79] = -38,
[0][1][RTW89_WW][1][79] = -38,
- [0][1][RTW89_WW][2][79] = 30,
[0][1][RTW89_WW][0][81] = -38,
[0][1][RTW89_WW][1][81] = -38,
- [0][1][RTW89_WW][2][81] = 30,
[0][1][RTW89_WW][0][83] = -38,
[0][1][RTW89_WW][1][83] = -38,
- [0][1][RTW89_WW][2][83] = 30,
[0][1][RTW89_WW][0][85] = -38,
[0][1][RTW89_WW][1][85] = -38,
- [0][1][RTW89_WW][2][85] = 30,
[0][1][RTW89_WW][0][87] = -40,
[0][1][RTW89_WW][1][87] = -40,
- [0][1][RTW89_WW][2][87] = 0,
[0][1][RTW89_WW][0][89] = -38,
[0][1][RTW89_WW][1][89] = -38,
- [0][1][RTW89_WW][2][89] = 0,
[0][1][RTW89_WW][0][90] = -38,
[0][1][RTW89_WW][1][90] = -38,
- [0][1][RTW89_WW][2][90] = 0,
[0][1][RTW89_WW][0][92] = -38,
[0][1][RTW89_WW][1][92] = -38,
- [0][1][RTW89_WW][2][92] = 0,
[0][1][RTW89_WW][0][94] = -38,
[0][1][RTW89_WW][1][94] = -38,
- [0][1][RTW89_WW][2][94] = 0,
[0][1][RTW89_WW][0][96] = -38,
[0][1][RTW89_WW][1][96] = -38,
- [0][1][RTW89_WW][2][96] = 0,
[0][1][RTW89_WW][0][98] = -38,
[0][1][RTW89_WW][1][98] = -38,
- [0][1][RTW89_WW][2][98] = 0,
[0][1][RTW89_WW][0][100] = -38,
[0][1][RTW89_WW][1][100] = -38,
- [0][1][RTW89_WW][2][100] = 0,
[0][1][RTW89_WW][0][102] = -38,
[0][1][RTW89_WW][1][102] = -38,
- [0][1][RTW89_WW][2][102] = 0,
[0][1][RTW89_WW][0][104] = -38,
[0][1][RTW89_WW][1][104] = -38,
- [0][1][RTW89_WW][2][104] = 0,
[0][1][RTW89_WW][0][105] = -38,
[0][1][RTW89_WW][1][105] = -38,
- [0][1][RTW89_WW][2][105] = 0,
[0][1][RTW89_WW][0][107] = -34,
[0][1][RTW89_WW][1][107] = -34,
- [0][1][RTW89_WW][2][107] = 0,
[0][1][RTW89_WW][0][109] = -34,
[0][1][RTW89_WW][1][109] = -34,
- [0][1][RTW89_WW][2][109] = 0,
[0][1][RTW89_WW][0][111] = 0,
[0][1][RTW89_WW][1][111] = 0,
- [0][1][RTW89_WW][2][111] = 0,
[0][1][RTW89_WW][0][113] = 0,
[0][1][RTW89_WW][1][113] = 0,
- [0][1][RTW89_WW][2][113] = 0,
[0][1][RTW89_WW][0][115] = 0,
[0][1][RTW89_WW][1][115] = 0,
- [0][1][RTW89_WW][2][115] = 0,
[0][1][RTW89_WW][0][117] = 0,
[0][1][RTW89_WW][1][117] = 0,
- [0][1][RTW89_WW][2][117] = 0,
[0][1][RTW89_WW][0][119] = 0,
[0][1][RTW89_WW][1][119] = 0,
- [0][1][RTW89_WW][2][119] = 0,
[1][0][RTW89_WW][0][0] = -4,
[1][0][RTW89_WW][1][0] = -4,
- [1][0][RTW89_WW][2][0] = 52,
[1][0][RTW89_WW][0][2] = -4,
[1][0][RTW89_WW][1][2] = -4,
- [1][0][RTW89_WW][2][2] = 52,
[1][0][RTW89_WW][0][4] = -4,
[1][0][RTW89_WW][1][4] = -4,
- [1][0][RTW89_WW][2][4] = 52,
[1][0][RTW89_WW][0][6] = -4,
[1][0][RTW89_WW][1][6] = -4,
- [1][0][RTW89_WW][2][6] = 52,
[1][0][RTW89_WW][0][8] = -4,
[1][0][RTW89_WW][1][8] = -4,
- [1][0][RTW89_WW][2][8] = 52,
[1][0][RTW89_WW][0][10] = -4,
[1][0][RTW89_WW][1][10] = -4,
- [1][0][RTW89_WW][2][10] = 52,
[1][0][RTW89_WW][0][12] = -4,
[1][0][RTW89_WW][1][12] = -4,
- [1][0][RTW89_WW][2][12] = 52,
[1][0][RTW89_WW][0][14] = -4,
[1][0][RTW89_WW][1][14] = -4,
- [1][0][RTW89_WW][2][14] = 52,
[1][0][RTW89_WW][0][15] = -4,
[1][0][RTW89_WW][1][15] = -4,
- [1][0][RTW89_WW][2][15] = 52,
[1][0][RTW89_WW][0][17] = -4,
[1][0][RTW89_WW][1][17] = -4,
- [1][0][RTW89_WW][2][17] = 52,
[1][0][RTW89_WW][0][19] = -4,
[1][0][RTW89_WW][1][19] = -4,
- [1][0][RTW89_WW][2][19] = 52,
[1][0][RTW89_WW][0][21] = -4,
[1][0][RTW89_WW][1][21] = -4,
- [1][0][RTW89_WW][2][21] = 52,
[1][0][RTW89_WW][0][23] = -4,
[1][0][RTW89_WW][1][23] = -4,
- [1][0][RTW89_WW][2][23] = 66,
[1][0][RTW89_WW][0][25] = -4,
[1][0][RTW89_WW][1][25] = -4,
- [1][0][RTW89_WW][2][25] = 66,
[1][0][RTW89_WW][0][27] = -4,
[1][0][RTW89_WW][1][27] = -4,
- [1][0][RTW89_WW][2][27] = 66,
[1][0][RTW89_WW][0][29] = -4,
[1][0][RTW89_WW][1][29] = -4,
- [1][0][RTW89_WW][2][29] = 66,
[1][0][RTW89_WW][0][30] = -4,
[1][0][RTW89_WW][1][30] = -4,
- [1][0][RTW89_WW][2][30] = 66,
[1][0][RTW89_WW][0][32] = -4,
[1][0][RTW89_WW][1][32] = -4,
- [1][0][RTW89_WW][2][32] = 66,
[1][0][RTW89_WW][0][34] = -4,
[1][0][RTW89_WW][1][34] = -4,
- [1][0][RTW89_WW][2][34] = 66,
[1][0][RTW89_WW][0][36] = -4,
[1][0][RTW89_WW][1][36] = -4,
- [1][0][RTW89_WW][2][36] = 66,
[1][0][RTW89_WW][0][38] = -4,
[1][0][RTW89_WW][1][38] = -4,
- [1][0][RTW89_WW][2][38] = 66,
[1][0][RTW89_WW][0][40] = -4,
[1][0][RTW89_WW][1][40] = -4,
- [1][0][RTW89_WW][2][40] = 66,
[1][0][RTW89_WW][0][42] = -4,
[1][0][RTW89_WW][1][42] = -4,
- [1][0][RTW89_WW][2][42] = 66,
[1][0][RTW89_WW][0][44] = -4,
[1][0][RTW89_WW][1][44] = -4,
- [1][0][RTW89_WW][2][44] = 66,
[1][0][RTW89_WW][0][45] = -4,
[1][0][RTW89_WW][1][45] = -4,
- [1][0][RTW89_WW][2][45] = 68,
[1][0][RTW89_WW][0][47] = -4,
[1][0][RTW89_WW][1][47] = -4,
- [1][0][RTW89_WW][2][47] = 68,
[1][0][RTW89_WW][0][49] = -4,
[1][0][RTW89_WW][1][49] = -4,
- [1][0][RTW89_WW][2][49] = 68,
[1][0][RTW89_WW][0][51] = -4,
[1][0][RTW89_WW][1][51] = -4,
- [1][0][RTW89_WW][2][51] = 68,
[1][0][RTW89_WW][0][53] = -4,
[1][0][RTW89_WW][1][53] = -4,
- [1][0][RTW89_WW][2][53] = 68,
[1][0][RTW89_WW][0][55] = -4,
[1][0][RTW89_WW][1][55] = -4,
- [1][0][RTW89_WW][2][55] = 68,
[1][0][RTW89_WW][0][57] = -4,
[1][0][RTW89_WW][1][57] = -4,
- [1][0][RTW89_WW][2][57] = 68,
[1][0][RTW89_WW][0][59] = -4,
[1][0][RTW89_WW][1][59] = -4,
- [1][0][RTW89_WW][2][59] = 68,
[1][0][RTW89_WW][0][60] = -4,
[1][0][RTW89_WW][1][60] = -4,
- [1][0][RTW89_WW][2][60] = 68,
[1][0][RTW89_WW][0][62] = -4,
[1][0][RTW89_WW][1][62] = -4,
- [1][0][RTW89_WW][2][62] = 68,
[1][0][RTW89_WW][0][64] = -4,
[1][0][RTW89_WW][1][64] = -4,
- [1][0][RTW89_WW][2][64] = 68,
[1][0][RTW89_WW][0][66] = -4,
[1][0][RTW89_WW][1][66] = -4,
- [1][0][RTW89_WW][2][66] = 68,
[1][0][RTW89_WW][0][68] = -4,
[1][0][RTW89_WW][1][68] = -4,
- [1][0][RTW89_WW][2][68] = 68,
[1][0][RTW89_WW][0][70] = -4,
[1][0][RTW89_WW][1][70] = -4,
- [1][0][RTW89_WW][2][70] = 68,
[1][0][RTW89_WW][0][72] = -4,
[1][0][RTW89_WW][1][72] = -4,
- [1][0][RTW89_WW][2][72] = 68,
[1][0][RTW89_WW][0][74] = -4,
[1][0][RTW89_WW][1][74] = -4,
- [1][0][RTW89_WW][2][74] = 68,
[1][0][RTW89_WW][0][75] = -4,
[1][0][RTW89_WW][1][75] = -4,
- [1][0][RTW89_WW][2][75] = 68,
[1][0][RTW89_WW][0][77] = -4,
[1][0][RTW89_WW][1][77] = -4,
- [1][0][RTW89_WW][2][77] = 68,
[1][0][RTW89_WW][0][79] = -4,
[1][0][RTW89_WW][1][79] = -4,
- [1][0][RTW89_WW][2][79] = 68,
[1][0][RTW89_WW][0][81] = -4,
[1][0][RTW89_WW][1][81] = -4,
- [1][0][RTW89_WW][2][81] = 68,
[1][0][RTW89_WW][0][83] = -4,
[1][0][RTW89_WW][1][83] = -4,
- [1][0][RTW89_WW][2][83] = 68,
[1][0][RTW89_WW][0][85] = -4,
[1][0][RTW89_WW][1][85] = -4,
- [1][0][RTW89_WW][2][85] = 68,
[1][0][RTW89_WW][0][87] = -4,
[1][0][RTW89_WW][1][87] = -4,
- [1][0][RTW89_WW][2][87] = 0,
[1][0][RTW89_WW][0][89] = -4,
[1][0][RTW89_WW][1][89] = -4,
- [1][0][RTW89_WW][2][89] = 0,
[1][0][RTW89_WW][0][90] = -4,
[1][0][RTW89_WW][1][90] = -4,
- [1][0][RTW89_WW][2][90] = 0,
[1][0][RTW89_WW][0][92] = -4,
[1][0][RTW89_WW][1][92] = -4,
- [1][0][RTW89_WW][2][92] = 0,
[1][0][RTW89_WW][0][94] = -4,
[1][0][RTW89_WW][1][94] = -4,
- [1][0][RTW89_WW][2][94] = 0,
[1][0][RTW89_WW][0][96] = -4,
[1][0][RTW89_WW][1][96] = -4,
- [1][0][RTW89_WW][2][96] = 0,
[1][0][RTW89_WW][0][98] = -4,
[1][0][RTW89_WW][1][98] = -4,
- [1][0][RTW89_WW][2][98] = 0,
[1][0][RTW89_WW][0][100] = -4,
[1][0][RTW89_WW][1][100] = -4,
- [1][0][RTW89_WW][2][100] = 0,
[1][0][RTW89_WW][0][102] = -4,
[1][0][RTW89_WW][1][102] = -4,
- [1][0][RTW89_WW][2][102] = 0,
[1][0][RTW89_WW][0][104] = -4,
[1][0][RTW89_WW][1][104] = -4,
- [1][0][RTW89_WW][2][104] = 0,
[1][0][RTW89_WW][0][105] = -4,
[1][0][RTW89_WW][1][105] = -4,
- [1][0][RTW89_WW][2][105] = 0,
[1][0][RTW89_WW][0][107] = -2,
[1][0][RTW89_WW][1][107] = -2,
- [1][0][RTW89_WW][2][107] = 0,
[1][0][RTW89_WW][0][109] = 2,
[1][0][RTW89_WW][1][109] = 2,
- [1][0][RTW89_WW][2][109] = 0,
[1][0][RTW89_WW][0][111] = 0,
[1][0][RTW89_WW][1][111] = 0,
- [1][0][RTW89_WW][2][111] = 0,
[1][0][RTW89_WW][0][113] = 0,
[1][0][RTW89_WW][1][113] = 0,
- [1][0][RTW89_WW][2][113] = 0,
[1][0][RTW89_WW][0][115] = 0,
[1][0][RTW89_WW][1][115] = 0,
- [1][0][RTW89_WW][2][115] = 0,
[1][0][RTW89_WW][0][117] = 0,
[1][0][RTW89_WW][1][117] = 0,
- [1][0][RTW89_WW][2][117] = 0,
[1][0][RTW89_WW][0][119] = 0,
[1][0][RTW89_WW][1][119] = 0,
- [1][0][RTW89_WW][2][119] = 0,
[1][1][RTW89_WW][0][0] = -26,
[1][1][RTW89_WW][1][0] = -26,
- [1][1][RTW89_WW][2][0] = 44,
[1][1][RTW89_WW][0][2] = -28,
[1][1][RTW89_WW][1][2] = -28,
- [1][1][RTW89_WW][2][2] = 44,
[1][1][RTW89_WW][0][4] = -28,
[1][1][RTW89_WW][1][4] = -28,
- [1][1][RTW89_WW][2][4] = 44,
[1][1][RTW89_WW][0][6] = -28,
[1][1][RTW89_WW][1][6] = -28,
- [1][1][RTW89_WW][2][6] = 44,
[1][1][RTW89_WW][0][8] = -28,
[1][1][RTW89_WW][1][8] = -28,
- [1][1][RTW89_WW][2][8] = 44,
[1][1][RTW89_WW][0][10] = -28,
[1][1][RTW89_WW][1][10] = -28,
- [1][1][RTW89_WW][2][10] = 44,
[1][1][RTW89_WW][0][12] = -28,
[1][1][RTW89_WW][1][12] = -28,
- [1][1][RTW89_WW][2][12] = 44,
[1][1][RTW89_WW][0][14] = -28,
[1][1][RTW89_WW][1][14] = -28,
- [1][1][RTW89_WW][2][14] = 44,
[1][1][RTW89_WW][0][15] = -28,
[1][1][RTW89_WW][1][15] = -28,
- [1][1][RTW89_WW][2][15] = 44,
[1][1][RTW89_WW][0][17] = -28,
[1][1][RTW89_WW][1][17] = -28,
- [1][1][RTW89_WW][2][17] = 44,
[1][1][RTW89_WW][0][19] = -28,
[1][1][RTW89_WW][1][19] = -28,
- [1][1][RTW89_WW][2][19] = 44,
[1][1][RTW89_WW][0][21] = -28,
[1][1][RTW89_WW][1][21] = -28,
- [1][1][RTW89_WW][2][21] = 44,
[1][1][RTW89_WW][0][23] = -28,
[1][1][RTW89_WW][1][23] = -28,
- [1][1][RTW89_WW][2][23] = 44,
[1][1][RTW89_WW][0][25] = -28,
[1][1][RTW89_WW][1][25] = -28,
- [1][1][RTW89_WW][2][25] = 44,
[1][1][RTW89_WW][0][27] = -28,
[1][1][RTW89_WW][1][27] = -28,
- [1][1][RTW89_WW][2][27] = 44,
[1][1][RTW89_WW][0][29] = -28,
[1][1][RTW89_WW][1][29] = -28,
- [1][1][RTW89_WW][2][29] = 44,
[1][1][RTW89_WW][0][30] = -28,
[1][1][RTW89_WW][1][30] = -28,
- [1][1][RTW89_WW][2][30] = 44,
[1][1][RTW89_WW][0][32] = -28,
[1][1][RTW89_WW][1][32] = -28,
- [1][1][RTW89_WW][2][32] = 44,
[1][1][RTW89_WW][0][34] = -28,
[1][1][RTW89_WW][1][34] = -28,
- [1][1][RTW89_WW][2][34] = 44,
[1][1][RTW89_WW][0][36] = -28,
[1][1][RTW89_WW][1][36] = -28,
- [1][1][RTW89_WW][2][36] = 44,
[1][1][RTW89_WW][0][38] = -28,
[1][1][RTW89_WW][1][38] = -28,
- [1][1][RTW89_WW][2][38] = 44,
[1][1][RTW89_WW][0][40] = -28,
[1][1][RTW89_WW][1][40] = -28,
- [1][1][RTW89_WW][2][40] = 44,
[1][1][RTW89_WW][0][42] = -28,
[1][1][RTW89_WW][1][42] = -28,
- [1][1][RTW89_WW][2][42] = 44,
[1][1][RTW89_WW][0][44] = -28,
[1][1][RTW89_WW][1][44] = -28,
- [1][1][RTW89_WW][2][44] = 44,
[1][1][RTW89_WW][0][45] = -26,
[1][1][RTW89_WW][1][45] = -26,
- [1][1][RTW89_WW][2][45] = 44,
[1][1][RTW89_WW][0][47] = -28,
[1][1][RTW89_WW][1][47] = -28,
- [1][1][RTW89_WW][2][47] = 44,
[1][1][RTW89_WW][0][49] = -28,
[1][1][RTW89_WW][1][49] = -28,
- [1][1][RTW89_WW][2][49] = 44,
[1][1][RTW89_WW][0][51] = -28,
[1][1][RTW89_WW][1][51] = -28,
- [1][1][RTW89_WW][2][51] = 44,
[1][1][RTW89_WW][0][53] = -26,
[1][1][RTW89_WW][1][53] = -26,
- [1][1][RTW89_WW][2][53] = 44,
[1][1][RTW89_WW][0][55] = -28,
[1][1][RTW89_WW][1][55] = -28,
- [1][1][RTW89_WW][2][55] = 44,
[1][1][RTW89_WW][0][57] = -28,
[1][1][RTW89_WW][1][57] = -28,
- [1][1][RTW89_WW][2][57] = 44,
[1][1][RTW89_WW][0][59] = -28,
[1][1][RTW89_WW][1][59] = -28,
- [1][1][RTW89_WW][2][59] = 44,
[1][1][RTW89_WW][0][60] = -28,
[1][1][RTW89_WW][1][60] = -28,
- [1][1][RTW89_WW][2][60] = 44,
[1][1][RTW89_WW][0][62] = -28,
[1][1][RTW89_WW][1][62] = -28,
- [1][1][RTW89_WW][2][62] = 44,
[1][1][RTW89_WW][0][64] = -28,
[1][1][RTW89_WW][1][64] = -28,
- [1][1][RTW89_WW][2][64] = 44,
[1][1][RTW89_WW][0][66] = -28,
[1][1][RTW89_WW][1][66] = -28,
- [1][1][RTW89_WW][2][66] = 44,
[1][1][RTW89_WW][0][68] = -28,
[1][1][RTW89_WW][1][68] = -28,
- [1][1][RTW89_WW][2][68] = 44,
[1][1][RTW89_WW][0][70] = -26,
[1][1][RTW89_WW][1][70] = -26,
- [1][1][RTW89_WW][2][70] = 44,
[1][1][RTW89_WW][0][72] = -28,
[1][1][RTW89_WW][1][72] = -28,
- [1][1][RTW89_WW][2][72] = 44,
[1][1][RTW89_WW][0][74] = -28,
[1][1][RTW89_WW][1][74] = -28,
- [1][1][RTW89_WW][2][74] = 44,
[1][1][RTW89_WW][0][75] = -28,
[1][1][RTW89_WW][1][75] = -28,
- [1][1][RTW89_WW][2][75] = 44,
[1][1][RTW89_WW][0][77] = -28,
[1][1][RTW89_WW][1][77] = -28,
- [1][1][RTW89_WW][2][77] = 44,
[1][1][RTW89_WW][0][79] = -28,
[1][1][RTW89_WW][1][79] = -28,
- [1][1][RTW89_WW][2][79] = 44,
[1][1][RTW89_WW][0][81] = -28,
[1][1][RTW89_WW][1][81] = -28,
- [1][1][RTW89_WW][2][81] = 44,
[1][1][RTW89_WW][0][83] = -28,
[1][1][RTW89_WW][1][83] = -28,
- [1][1][RTW89_WW][2][83] = 44,
[1][1][RTW89_WW][0][85] = -28,
[1][1][RTW89_WW][1][85] = -28,
- [1][1][RTW89_WW][2][85] = 44,
[1][1][RTW89_WW][0][87] = -28,
[1][1][RTW89_WW][1][87] = -28,
- [1][1][RTW89_WW][2][87] = 0,
[1][1][RTW89_WW][0][89] = -26,
[1][1][RTW89_WW][1][89] = -26,
- [1][1][RTW89_WW][2][89] = 0,
[1][1][RTW89_WW][0][90] = -26,
[1][1][RTW89_WW][1][90] = -26,
- [1][1][RTW89_WW][2][90] = 0,
[1][1][RTW89_WW][0][92] = -26,
[1][1][RTW89_WW][1][92] = -26,
- [1][1][RTW89_WW][2][92] = 0,
[1][1][RTW89_WW][0][94] = -26,
[1][1][RTW89_WW][1][94] = -26,
- [1][1][RTW89_WW][2][94] = 0,
[1][1][RTW89_WW][0][96] = -26,
[1][1][RTW89_WW][1][96] = -26,
- [1][1][RTW89_WW][2][96] = 0,
[1][1][RTW89_WW][0][98] = -26,
[1][1][RTW89_WW][1][98] = -26,
- [1][1][RTW89_WW][2][98] = 0,
[1][1][RTW89_WW][0][100] = -26,
[1][1][RTW89_WW][1][100] = -26,
- [1][1][RTW89_WW][2][100] = 0,
[1][1][RTW89_WW][0][102] = -26,
[1][1][RTW89_WW][1][102] = -26,
- [1][1][RTW89_WW][2][102] = 0,
[1][1][RTW89_WW][0][104] = -26,
[1][1][RTW89_WW][1][104] = -26,
- [1][1][RTW89_WW][2][104] = 0,
[1][1][RTW89_WW][0][105] = -26,
[1][1][RTW89_WW][1][105] = -26,
- [1][1][RTW89_WW][2][105] = 0,
[1][1][RTW89_WW][0][107] = -22,
[1][1][RTW89_WW][1][107] = -22,
- [1][1][RTW89_WW][2][107] = 0,
[1][1][RTW89_WW][0][109] = -22,
[1][1][RTW89_WW][1][109] = -22,
- [1][1][RTW89_WW][2][109] = 0,
[1][1][RTW89_WW][0][111] = 0,
[1][1][RTW89_WW][1][111] = 0,
- [1][1][RTW89_WW][2][111] = 0,
[1][1][RTW89_WW][0][113] = 0,
[1][1][RTW89_WW][1][113] = 0,
- [1][1][RTW89_WW][2][113] = 0,
[1][1][RTW89_WW][0][115] = 0,
[1][1][RTW89_WW][1][115] = 0,
- [1][1][RTW89_WW][2][115] = 0,
[1][1][RTW89_WW][0][117] = 0,
[1][1][RTW89_WW][1][117] = 0,
- [1][1][RTW89_WW][2][117] = 0,
[1][1][RTW89_WW][0][119] = 0,
[1][1][RTW89_WW][1][119] = 0,
- [1][1][RTW89_WW][2][119] = 0,
[2][0][RTW89_WW][0][0] = -2,
[2][0][RTW89_WW][1][0] = -2,
- [2][0][RTW89_WW][2][0] = 60,
[2][0][RTW89_WW][0][2] = -2,
[2][0][RTW89_WW][1][2] = -2,
- [2][0][RTW89_WW][2][2] = 60,
[2][0][RTW89_WW][0][4] = -2,
[2][0][RTW89_WW][1][4] = -2,
- [2][0][RTW89_WW][2][4] = 60,
[2][0][RTW89_WW][0][6] = -2,
[2][0][RTW89_WW][1][6] = -2,
- [2][0][RTW89_WW][2][6] = 60,
[2][0][RTW89_WW][0][8] = -2,
[2][0][RTW89_WW][1][8] = -2,
- [2][0][RTW89_WW][2][8] = 60,
[2][0][RTW89_WW][0][10] = -2,
[2][0][RTW89_WW][1][10] = -2,
- [2][0][RTW89_WW][2][10] = 60,
[2][0][RTW89_WW][0][12] = -2,
[2][0][RTW89_WW][1][12] = -2,
- [2][0][RTW89_WW][2][12] = 60,
[2][0][RTW89_WW][0][14] = -2,
[2][0][RTW89_WW][1][14] = -2,
- [2][0][RTW89_WW][2][14] = 60,
[2][0][RTW89_WW][0][15] = -2,
[2][0][RTW89_WW][1][15] = -2,
- [2][0][RTW89_WW][2][15] = 60,
[2][0][RTW89_WW][0][17] = -2,
[2][0][RTW89_WW][1][17] = -2,
- [2][0][RTW89_WW][2][17] = 60,
[2][0][RTW89_WW][0][19] = -2,
[2][0][RTW89_WW][1][19] = -2,
- [2][0][RTW89_WW][2][19] = 60,
[2][0][RTW89_WW][0][21] = -2,
[2][0][RTW89_WW][1][21] = -2,
- [2][0][RTW89_WW][2][21] = 60,
[2][0][RTW89_WW][0][23] = -2,
[2][0][RTW89_WW][1][23] = -2,
- [2][0][RTW89_WW][2][23] = 70,
[2][0][RTW89_WW][0][25] = -2,
[2][0][RTW89_WW][1][25] = -2,
- [2][0][RTW89_WW][2][25] = 70,
[2][0][RTW89_WW][0][27] = -2,
[2][0][RTW89_WW][1][27] = -2,
- [2][0][RTW89_WW][2][27] = 70,
[2][0][RTW89_WW][0][29] = -2,
[2][0][RTW89_WW][1][29] = -2,
- [2][0][RTW89_WW][2][29] = 70,
[2][0][RTW89_WW][0][30] = -2,
[2][0][RTW89_WW][1][30] = -2,
- [2][0][RTW89_WW][2][30] = 70,
[2][0][RTW89_WW][0][32] = -2,
[2][0][RTW89_WW][1][32] = -2,
- [2][0][RTW89_WW][2][32] = 70,
[2][0][RTW89_WW][0][34] = -2,
[2][0][RTW89_WW][1][34] = -2,
- [2][0][RTW89_WW][2][34] = 70,
[2][0][RTW89_WW][0][36] = -2,
[2][0][RTW89_WW][1][36] = -2,
- [2][0][RTW89_WW][2][36] = 70,
[2][0][RTW89_WW][0][38] = -2,
[2][0][RTW89_WW][1][38] = -2,
- [2][0][RTW89_WW][2][38] = 70,
[2][0][RTW89_WW][0][40] = -2,
[2][0][RTW89_WW][1][40] = -2,
- [2][0][RTW89_WW][2][40] = 70,
[2][0][RTW89_WW][0][42] = -2,
[2][0][RTW89_WW][1][42] = -2,
- [2][0][RTW89_WW][2][42] = 70,
[2][0][RTW89_WW][0][44] = -2,
[2][0][RTW89_WW][1][44] = -2,
- [2][0][RTW89_WW][2][44] = 70,
[2][0][RTW89_WW][0][45] = -2,
[2][0][RTW89_WW][1][45] = -2,
- [2][0][RTW89_WW][2][45] = 70,
[2][0][RTW89_WW][0][47] = -2,
[2][0][RTW89_WW][1][47] = -2,
- [2][0][RTW89_WW][2][47] = 70,
[2][0][RTW89_WW][0][49] = -2,
[2][0][RTW89_WW][1][49] = -2,
- [2][0][RTW89_WW][2][49] = 70,
[2][0][RTW89_WW][0][51] = -2,
[2][0][RTW89_WW][1][51] = -2,
- [2][0][RTW89_WW][2][51] = 70,
[2][0][RTW89_WW][0][53] = -2,
[2][0][RTW89_WW][1][53] = -2,
- [2][0][RTW89_WW][2][53] = 70,
[2][0][RTW89_WW][0][55] = -2,
[2][0][RTW89_WW][1][55] = -2,
- [2][0][RTW89_WW][2][55] = 68,
[2][0][RTW89_WW][0][57] = -2,
[2][0][RTW89_WW][1][57] = -2,
- [2][0][RTW89_WW][2][57] = 68,
[2][0][RTW89_WW][0][59] = -2,
[2][0][RTW89_WW][1][59] = -2,
- [2][0][RTW89_WW][2][59] = 68,
[2][0][RTW89_WW][0][60] = -2,
[2][0][RTW89_WW][1][60] = -2,
- [2][0][RTW89_WW][2][60] = 68,
[2][0][RTW89_WW][0][62] = -2,
[2][0][RTW89_WW][1][62] = -2,
- [2][0][RTW89_WW][2][62] = 68,
[2][0][RTW89_WW][0][64] = -2,
[2][0][RTW89_WW][1][64] = -2,
- [2][0][RTW89_WW][2][64] = 68,
[2][0][RTW89_WW][0][66] = -2,
[2][0][RTW89_WW][1][66] = -2,
- [2][0][RTW89_WW][2][66] = 68,
[2][0][RTW89_WW][0][68] = -2,
[2][0][RTW89_WW][1][68] = -2,
- [2][0][RTW89_WW][2][68] = 68,
[2][0][RTW89_WW][0][70] = -2,
[2][0][RTW89_WW][1][70] = -2,
- [2][0][RTW89_WW][2][70] = 68,
[2][0][RTW89_WW][0][72] = -2,
[2][0][RTW89_WW][1][72] = -2,
- [2][0][RTW89_WW][2][72] = 68,
[2][0][RTW89_WW][0][74] = -2,
[2][0][RTW89_WW][1][74] = -2,
- [2][0][RTW89_WW][2][74] = 68,
[2][0][RTW89_WW][0][75] = -2,
[2][0][RTW89_WW][1][75] = -2,
- [2][0][RTW89_WW][2][75] = 68,
[2][0][RTW89_WW][0][77] = -2,
[2][0][RTW89_WW][1][77] = -2,
- [2][0][RTW89_WW][2][77] = 68,
[2][0][RTW89_WW][0][79] = -2,
[2][0][RTW89_WW][1][79] = -2,
- [2][0][RTW89_WW][2][79] = 68,
[2][0][RTW89_WW][0][81] = -2,
[2][0][RTW89_WW][1][81] = -2,
- [2][0][RTW89_WW][2][81] = 68,
[2][0][RTW89_WW][0][83] = -2,
[2][0][RTW89_WW][1][83] = -2,
- [2][0][RTW89_WW][2][83] = 68,
[2][0][RTW89_WW][0][85] = -2,
[2][0][RTW89_WW][1][85] = -2,
- [2][0][RTW89_WW][2][85] = 68,
[2][0][RTW89_WW][0][87] = -2,
[2][0][RTW89_WW][1][87] = -2,
- [2][0][RTW89_WW][2][87] = 0,
[2][0][RTW89_WW][0][89] = -2,
[2][0][RTW89_WW][1][89] = -2,
- [2][0][RTW89_WW][2][89] = 0,
[2][0][RTW89_WW][0][90] = -2,
[2][0][RTW89_WW][1][90] = -2,
- [2][0][RTW89_WW][2][90] = 0,
[2][0][RTW89_WW][0][92] = -2,
[2][0][RTW89_WW][1][92] = -2,
- [2][0][RTW89_WW][2][92] = 0,
[2][0][RTW89_WW][0][94] = -2,
[2][0][RTW89_WW][1][94] = -2,
- [2][0][RTW89_WW][2][94] = 0,
[2][0][RTW89_WW][0][96] = -2,
[2][0][RTW89_WW][1][96] = -2,
- [2][0][RTW89_WW][2][96] = 0,
[2][0][RTW89_WW][0][98] = -2,
[2][0][RTW89_WW][1][98] = -2,
- [2][0][RTW89_WW][2][98] = 0,
[2][0][RTW89_WW][0][100] = -2,
[2][0][RTW89_WW][1][100] = -2,
- [2][0][RTW89_WW][2][100] = 0,
[2][0][RTW89_WW][0][102] = -2,
[2][0][RTW89_WW][1][102] = -2,
- [2][0][RTW89_WW][2][102] = 0,
[2][0][RTW89_WW][0][104] = -2,
[2][0][RTW89_WW][1][104] = -2,
- [2][0][RTW89_WW][2][104] = 0,
[2][0][RTW89_WW][0][105] = -2,
[2][0][RTW89_WW][1][105] = -2,
- [2][0][RTW89_WW][2][105] = 0,
[2][0][RTW89_WW][0][107] = -2,
[2][0][RTW89_WW][1][107] = -2,
- [2][0][RTW89_WW][2][107] = 0,
[2][0][RTW89_WW][0][109] = 12,
[2][0][RTW89_WW][1][109] = 12,
- [2][0][RTW89_WW][2][109] = 0,
[2][0][RTW89_WW][0][111] = 0,
[2][0][RTW89_WW][1][111] = 0,
- [2][0][RTW89_WW][2][111] = 0,
[2][0][RTW89_WW][0][113] = 0,
[2][0][RTW89_WW][1][113] = 0,
- [2][0][RTW89_WW][2][113] = 0,
[2][0][RTW89_WW][0][115] = 0,
[2][0][RTW89_WW][1][115] = 0,
- [2][0][RTW89_WW][2][115] = 0,
[2][0][RTW89_WW][0][117] = 0,
[2][0][RTW89_WW][1][117] = 0,
- [2][0][RTW89_WW][2][117] = 0,
[2][0][RTW89_WW][0][119] = 0,
[2][0][RTW89_WW][1][119] = 0,
- [2][0][RTW89_WW][2][119] = 0,
[2][1][RTW89_WW][0][0] = -16,
[2][1][RTW89_WW][1][0] = -16,
- [2][1][RTW89_WW][2][0] = 54,
[2][1][RTW89_WW][0][2] = -16,
[2][1][RTW89_WW][1][2] = -16,
- [2][1][RTW89_WW][2][2] = 54,
[2][1][RTW89_WW][0][4] = -16,
[2][1][RTW89_WW][1][4] = -16,
- [2][1][RTW89_WW][2][4] = 54,
[2][1][RTW89_WW][0][6] = -16,
[2][1][RTW89_WW][1][6] = -16,
- [2][1][RTW89_WW][2][6] = 54,
[2][1][RTW89_WW][0][8] = -16,
[2][1][RTW89_WW][1][8] = -16,
- [2][1][RTW89_WW][2][8] = 54,
[2][1][RTW89_WW][0][10] = -16,
[2][1][RTW89_WW][1][10] = -16,
- [2][1][RTW89_WW][2][10] = 54,
[2][1][RTW89_WW][0][12] = -16,
[2][1][RTW89_WW][1][12] = -16,
- [2][1][RTW89_WW][2][12] = 54,
[2][1][RTW89_WW][0][14] = -16,
[2][1][RTW89_WW][1][14] = -16,
- [2][1][RTW89_WW][2][14] = 54,
[2][1][RTW89_WW][0][15] = -16,
[2][1][RTW89_WW][1][15] = -16,
- [2][1][RTW89_WW][2][15] = 54,
[2][1][RTW89_WW][0][17] = -16,
[2][1][RTW89_WW][1][17] = -16,
- [2][1][RTW89_WW][2][17] = 54,
[2][1][RTW89_WW][0][19] = -16,
[2][1][RTW89_WW][1][19] = -16,
- [2][1][RTW89_WW][2][19] = 54,
[2][1][RTW89_WW][0][21] = -16,
[2][1][RTW89_WW][1][21] = -16,
- [2][1][RTW89_WW][2][21] = 54,
[2][1][RTW89_WW][0][23] = -16,
[2][1][RTW89_WW][1][23] = -16,
- [2][1][RTW89_WW][2][23] = 54,
[2][1][RTW89_WW][0][25] = -16,
[2][1][RTW89_WW][1][25] = -16,
- [2][1][RTW89_WW][2][25] = 54,
[2][1][RTW89_WW][0][27] = -16,
[2][1][RTW89_WW][1][27] = -16,
- [2][1][RTW89_WW][2][27] = 54,
[2][1][RTW89_WW][0][29] = -16,
[2][1][RTW89_WW][1][29] = -16,
- [2][1][RTW89_WW][2][29] = 54,
[2][1][RTW89_WW][0][30] = -16,
[2][1][RTW89_WW][1][30] = -16,
- [2][1][RTW89_WW][2][30] = 54,
[2][1][RTW89_WW][0][32] = -16,
[2][1][RTW89_WW][1][32] = -16,
- [2][1][RTW89_WW][2][32] = 54,
[2][1][RTW89_WW][0][34] = -16,
[2][1][RTW89_WW][1][34] = -16,
- [2][1][RTW89_WW][2][34] = 54,
[2][1][RTW89_WW][0][36] = -16,
[2][1][RTW89_WW][1][36] = -16,
- [2][1][RTW89_WW][2][36] = 54,
[2][1][RTW89_WW][0][38] = -16,
[2][1][RTW89_WW][1][38] = -16,
- [2][1][RTW89_WW][2][38] = 54,
[2][1][RTW89_WW][0][40] = -16,
[2][1][RTW89_WW][1][40] = -16,
- [2][1][RTW89_WW][2][40] = 54,
[2][1][RTW89_WW][0][42] = -16,
[2][1][RTW89_WW][1][42] = -16,
- [2][1][RTW89_WW][2][42] = 54,
[2][1][RTW89_WW][0][44] = -16,
[2][1][RTW89_WW][1][44] = -16,
- [2][1][RTW89_WW][2][44] = 54,
[2][1][RTW89_WW][0][45] = -16,
[2][1][RTW89_WW][1][45] = -16,
- [2][1][RTW89_WW][2][45] = 56,
[2][1][RTW89_WW][0][47] = -16,
[2][1][RTW89_WW][1][47] = -16,
- [2][1][RTW89_WW][2][47] = 56,
[2][1][RTW89_WW][0][49] = -16,
[2][1][RTW89_WW][1][49] = -16,
- [2][1][RTW89_WW][2][49] = 56,
[2][1][RTW89_WW][0][51] = -16,
[2][1][RTW89_WW][1][51] = -16,
- [2][1][RTW89_WW][2][51] = 56,
[2][1][RTW89_WW][0][53] = -16,
[2][1][RTW89_WW][1][53] = -16,
- [2][1][RTW89_WW][2][53] = 56,
[2][1][RTW89_WW][0][55] = -16,
[2][1][RTW89_WW][1][55] = -16,
- [2][1][RTW89_WW][2][55] = 54,
[2][1][RTW89_WW][0][57] = -16,
[2][1][RTW89_WW][1][57] = -16,
- [2][1][RTW89_WW][2][57] = 54,
[2][1][RTW89_WW][0][59] = -16,
[2][1][RTW89_WW][1][59] = -16,
- [2][1][RTW89_WW][2][59] = 54,
[2][1][RTW89_WW][0][60] = -16,
[2][1][RTW89_WW][1][60] = -16,
- [2][1][RTW89_WW][2][60] = 54,
[2][1][RTW89_WW][0][62] = -16,
[2][1][RTW89_WW][1][62] = -16,
- [2][1][RTW89_WW][2][62] = 54,
[2][1][RTW89_WW][0][64] = -16,
[2][1][RTW89_WW][1][64] = -16,
- [2][1][RTW89_WW][2][64] = 54,
[2][1][RTW89_WW][0][66] = -16,
[2][1][RTW89_WW][1][66] = -16,
- [2][1][RTW89_WW][2][66] = 54,
[2][1][RTW89_WW][0][68] = -16,
[2][1][RTW89_WW][1][68] = -16,
- [2][1][RTW89_WW][2][68] = 54,
[2][1][RTW89_WW][0][70] = -16,
[2][1][RTW89_WW][1][70] = -16,
- [2][1][RTW89_WW][2][70] = 56,
[2][1][RTW89_WW][0][72] = -16,
[2][1][RTW89_WW][1][72] = -16,
- [2][1][RTW89_WW][2][72] = 56,
[2][1][RTW89_WW][0][74] = -16,
[2][1][RTW89_WW][1][74] = -16,
- [2][1][RTW89_WW][2][74] = 56,
[2][1][RTW89_WW][0][75] = -16,
[2][1][RTW89_WW][1][75] = -16,
- [2][1][RTW89_WW][2][75] = 56,
[2][1][RTW89_WW][0][77] = -16,
[2][1][RTW89_WW][1][77] = -16,
- [2][1][RTW89_WW][2][77] = 56,
[2][1][RTW89_WW][0][79] = -16,
[2][1][RTW89_WW][1][79] = -16,
- [2][1][RTW89_WW][2][79] = 56,
[2][1][RTW89_WW][0][81] = -16,
[2][1][RTW89_WW][1][81] = -16,
- [2][1][RTW89_WW][2][81] = 56,
[2][1][RTW89_WW][0][83] = -16,
[2][1][RTW89_WW][1][83] = -16,
- [2][1][RTW89_WW][2][83] = 56,
[2][1][RTW89_WW][0][85] = -18,
[2][1][RTW89_WW][1][85] = -18,
- [2][1][RTW89_WW][2][85] = 56,
[2][1][RTW89_WW][0][87] = -16,
[2][1][RTW89_WW][1][87] = -16,
- [2][1][RTW89_WW][2][87] = 0,
[2][1][RTW89_WW][0][89] = -16,
[2][1][RTW89_WW][1][89] = -16,
- [2][1][RTW89_WW][2][89] = 0,
[2][1][RTW89_WW][0][90] = -16,
[2][1][RTW89_WW][1][90] = -16,
- [2][1][RTW89_WW][2][90] = 0,
[2][1][RTW89_WW][0][92] = -16,
[2][1][RTW89_WW][1][92] = -16,
- [2][1][RTW89_WW][2][92] = 0,
[2][1][RTW89_WW][0][94] = -16,
[2][1][RTW89_WW][1][94] = -16,
- [2][1][RTW89_WW][2][94] = 0,
[2][1][RTW89_WW][0][96] = -16,
[2][1][RTW89_WW][1][96] = -16,
- [2][1][RTW89_WW][2][96] = 0,
[2][1][RTW89_WW][0][98] = -16,
[2][1][RTW89_WW][1][98] = -16,
- [2][1][RTW89_WW][2][98] = 0,
[2][1][RTW89_WW][0][100] = -16,
[2][1][RTW89_WW][1][100] = -16,
- [2][1][RTW89_WW][2][100] = 0,
[2][1][RTW89_WW][0][102] = -16,
[2][1][RTW89_WW][1][102] = -16,
- [2][1][RTW89_WW][2][102] = 0,
[2][1][RTW89_WW][0][104] = -16,
[2][1][RTW89_WW][1][104] = -16,
- [2][1][RTW89_WW][2][104] = 0,
[2][1][RTW89_WW][0][105] = -16,
[2][1][RTW89_WW][1][105] = -16,
- [2][1][RTW89_WW][2][105] = 0,
[2][1][RTW89_WW][0][107] = -14,
[2][1][RTW89_WW][1][107] = -14,
- [2][1][RTW89_WW][2][107] = 0,
[2][1][RTW89_WW][0][109] = -10,
[2][1][RTW89_WW][1][109] = -10,
- [2][1][RTW89_WW][2][109] = 0,
[2][1][RTW89_WW][0][111] = 0,
[2][1][RTW89_WW][1][111] = 0,
- [2][1][RTW89_WW][2][111] = 0,
[2][1][RTW89_WW][0][113] = 0,
[2][1][RTW89_WW][1][113] = 0,
- [2][1][RTW89_WW][2][113] = 0,
[2][1][RTW89_WW][0][115] = 0,
[2][1][RTW89_WW][1][115] = 0,
- [2][1][RTW89_WW][2][115] = 0,
[2][1][RTW89_WW][0][117] = 0,
[2][1][RTW89_WW][1][117] = 0,
- [2][1][RTW89_WW][2][117] = 0,
[2][1][RTW89_WW][0][119] = 0,
[2][1][RTW89_WW][1][119] = 0,
- [2][1][RTW89_WW][2][119] = 0,
[0][0][RTW89_FCC][1][0] = -16,
- [0][0][RTW89_FCC][2][0] = 44,
[0][0][RTW89_ETSI][1][0] = 32,
[0][0][RTW89_ETSI][0][0] = -8,
[0][0][RTW89_MKK][1][0] = 30,
[0][0][RTW89_MKK][0][0] = -8,
[0][0][RTW89_IC][1][0] = -16,
- [0][0][RTW89_IC][2][0] = 44,
[0][0][RTW89_KCC][1][0] = -2,
[0][0][RTW89_KCC][0][0] = -2,
[0][0][RTW89_ACMA][1][0] = 32,
@@ -52408,13 +50558,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][0] = 30,
[0][0][RTW89_THAILAND][0][0] = -16,
[0][0][RTW89_FCC][1][2] = -18,
- [0][0][RTW89_FCC][2][2] = 44,
[0][0][RTW89_ETSI][1][2] = 32,
[0][0][RTW89_ETSI][0][2] = -8,
[0][0][RTW89_MKK][1][2] = 30,
[0][0][RTW89_MKK][0][2] = -8,
[0][0][RTW89_IC][1][2] = -18,
- [0][0][RTW89_IC][2][2] = 44,
[0][0][RTW89_KCC][1][2] = -2,
[0][0][RTW89_KCC][0][2] = -2,
[0][0][RTW89_ACMA][1][2] = 32,
@@ -52427,13 +50575,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][2] = 30,
[0][0][RTW89_THAILAND][0][2] = -18,
[0][0][RTW89_FCC][1][4] = -18,
- [0][0][RTW89_FCC][2][4] = 44,
[0][0][RTW89_ETSI][1][4] = 32,
[0][0][RTW89_ETSI][0][4] = -8,
[0][0][RTW89_MKK][1][4] = 30,
[0][0][RTW89_MKK][0][4] = -8,
[0][0][RTW89_IC][1][4] = -18,
- [0][0][RTW89_IC][2][4] = 44,
[0][0][RTW89_KCC][1][4] = -2,
[0][0][RTW89_KCC][0][4] = -2,
[0][0][RTW89_ACMA][1][4] = 32,
@@ -52446,13 +50592,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][4] = 30,
[0][0][RTW89_THAILAND][0][4] = -18,
[0][0][RTW89_FCC][1][6] = -18,
- [0][0][RTW89_FCC][2][6] = 44,
[0][0][RTW89_ETSI][1][6] = 32,
[0][0][RTW89_ETSI][0][6] = -8,
[0][0][RTW89_MKK][1][6] = 30,
[0][0][RTW89_MKK][0][6] = -8,
[0][0][RTW89_IC][1][6] = -18,
- [0][0][RTW89_IC][2][6] = 44,
[0][0][RTW89_KCC][1][6] = -2,
[0][0][RTW89_KCC][0][6] = -2,
[0][0][RTW89_ACMA][1][6] = 32,
@@ -52465,13 +50609,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][6] = 30,
[0][0][RTW89_THAILAND][0][6] = -18,
[0][0][RTW89_FCC][1][8] = -18,
- [0][0][RTW89_FCC][2][8] = 44,
[0][0][RTW89_ETSI][1][8] = 32,
[0][0][RTW89_ETSI][0][8] = -8,
[0][0][RTW89_MKK][1][8] = 30,
[0][0][RTW89_MKK][0][8] = -8,
[0][0][RTW89_IC][1][8] = -18,
- [0][0][RTW89_IC][2][8] = 44,
[0][0][RTW89_KCC][1][8] = -2,
[0][0][RTW89_KCC][0][8] = -2,
[0][0][RTW89_ACMA][1][8] = 32,
@@ -52484,13 +50626,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][8] = 30,
[0][0][RTW89_THAILAND][0][8] = -18,
[0][0][RTW89_FCC][1][10] = -18,
- [0][0][RTW89_FCC][2][10] = 44,
[0][0][RTW89_ETSI][1][10] = 32,
[0][0][RTW89_ETSI][0][10] = -8,
[0][0][RTW89_MKK][1][10] = 30,
[0][0][RTW89_MKK][0][10] = -8,
[0][0][RTW89_IC][1][10] = -18,
- [0][0][RTW89_IC][2][10] = 44,
[0][0][RTW89_KCC][1][10] = -2,
[0][0][RTW89_KCC][0][10] = -2,
[0][0][RTW89_ACMA][1][10] = 32,
@@ -52503,13 +50643,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][10] = 30,
[0][0][RTW89_THAILAND][0][10] = -18,
[0][0][RTW89_FCC][1][12] = -18,
- [0][0][RTW89_FCC][2][12] = 44,
[0][0][RTW89_ETSI][1][12] = 32,
[0][0][RTW89_ETSI][0][12] = -8,
[0][0][RTW89_MKK][1][12] = 30,
[0][0][RTW89_MKK][0][12] = -8,
[0][0][RTW89_IC][1][12] = -18,
- [0][0][RTW89_IC][2][12] = 44,
[0][0][RTW89_KCC][1][12] = -2,
[0][0][RTW89_KCC][0][12] = -2,
[0][0][RTW89_ACMA][1][12] = 32,
@@ -52522,13 +50660,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][12] = 30,
[0][0][RTW89_THAILAND][0][12] = -18,
[0][0][RTW89_FCC][1][14] = -18,
- [0][0][RTW89_FCC][2][14] = 44,
[0][0][RTW89_ETSI][1][14] = 32,
[0][0][RTW89_ETSI][0][14] = -8,
[0][0][RTW89_MKK][1][14] = 30,
[0][0][RTW89_MKK][0][14] = -8,
[0][0][RTW89_IC][1][14] = -18,
- [0][0][RTW89_IC][2][14] = 44,
[0][0][RTW89_KCC][1][14] = -2,
[0][0][RTW89_KCC][0][14] = -2,
[0][0][RTW89_ACMA][1][14] = 32,
@@ -52541,13 +50677,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][14] = 30,
[0][0][RTW89_THAILAND][0][14] = -18,
[0][0][RTW89_FCC][1][15] = -18,
- [0][0][RTW89_FCC][2][15] = 44,
[0][0][RTW89_ETSI][1][15] = 32,
[0][0][RTW89_ETSI][0][15] = -8,
[0][0][RTW89_MKK][1][15] = 30,
[0][0][RTW89_MKK][0][15] = -8,
[0][0][RTW89_IC][1][15] = -18,
- [0][0][RTW89_IC][2][15] = 44,
[0][0][RTW89_KCC][1][15] = -2,
[0][0][RTW89_KCC][0][15] = -2,
[0][0][RTW89_ACMA][1][15] = 32,
@@ -52560,13 +50694,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][15] = 30,
[0][0][RTW89_THAILAND][0][15] = -18,
[0][0][RTW89_FCC][1][17] = -18,
- [0][0][RTW89_FCC][2][17] = 44,
[0][0][RTW89_ETSI][1][17] = 32,
[0][0][RTW89_ETSI][0][17] = -8,
[0][0][RTW89_MKK][1][17] = 30,
[0][0][RTW89_MKK][0][17] = -8,
[0][0][RTW89_IC][1][17] = -18,
- [0][0][RTW89_IC][2][17] = 44,
[0][0][RTW89_KCC][1][17] = -2,
[0][0][RTW89_KCC][0][17] = -2,
[0][0][RTW89_ACMA][1][17] = 32,
@@ -52579,13 +50711,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][17] = 30,
[0][0][RTW89_THAILAND][0][17] = -18,
[0][0][RTW89_FCC][1][19] = -18,
- [0][0][RTW89_FCC][2][19] = 44,
[0][0][RTW89_ETSI][1][19] = 32,
[0][0][RTW89_ETSI][0][19] = -8,
[0][0][RTW89_MKK][1][19] = 30,
[0][0][RTW89_MKK][0][19] = -8,
[0][0][RTW89_IC][1][19] = -18,
- [0][0][RTW89_IC][2][19] = 44,
[0][0][RTW89_KCC][1][19] = -2,
[0][0][RTW89_KCC][0][19] = -2,
[0][0][RTW89_ACMA][1][19] = 32,
@@ -52598,13 +50728,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][19] = 30,
[0][0][RTW89_THAILAND][0][19] = -18,
[0][0][RTW89_FCC][1][21] = -18,
- [0][0][RTW89_FCC][2][21] = 44,
[0][0][RTW89_ETSI][1][21] = 32,
[0][0][RTW89_ETSI][0][21] = -8,
[0][0][RTW89_MKK][1][21] = 30,
[0][0][RTW89_MKK][0][21] = -8,
[0][0][RTW89_IC][1][21] = -18,
- [0][0][RTW89_IC][2][21] = 44,
[0][0][RTW89_KCC][1][21] = -2,
[0][0][RTW89_KCC][0][21] = -2,
[0][0][RTW89_ACMA][1][21] = 32,
@@ -52617,13 +50745,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][21] = 30,
[0][0][RTW89_THAILAND][0][21] = -18,
[0][0][RTW89_FCC][1][23] = -18,
- [0][0][RTW89_FCC][2][23] = 54,
[0][0][RTW89_ETSI][1][23] = 32,
[0][0][RTW89_ETSI][0][23] = -8,
[0][0][RTW89_MKK][1][23] = 30,
[0][0][RTW89_MKK][0][23] = -8,
[0][0][RTW89_IC][1][23] = -18,
- [0][0][RTW89_IC][2][23] = 54,
[0][0][RTW89_KCC][1][23] = -2,
[0][0][RTW89_KCC][0][23] = -2,
[0][0][RTW89_ACMA][1][23] = 32,
@@ -52636,13 +50762,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][23] = 30,
[0][0][RTW89_THAILAND][0][23] = -18,
[0][0][RTW89_FCC][1][25] = -18,
- [0][0][RTW89_FCC][2][25] = 54,
[0][0][RTW89_ETSI][1][25] = 32,
[0][0][RTW89_ETSI][0][25] = -8,
[0][0][RTW89_MKK][1][25] = 30,
[0][0][RTW89_MKK][0][25] = -8,
[0][0][RTW89_IC][1][25] = -18,
- [0][0][RTW89_IC][2][25] = 54,
[0][0][RTW89_KCC][1][25] = -2,
[0][0][RTW89_KCC][0][25] = -2,
[0][0][RTW89_ACMA][1][25] = 32,
@@ -52655,13 +50779,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][25] = 30,
[0][0][RTW89_THAILAND][0][25] = -18,
[0][0][RTW89_FCC][1][27] = -18,
- [0][0][RTW89_FCC][2][27] = 54,
[0][0][RTW89_ETSI][1][27] = 32,
[0][0][RTW89_ETSI][0][27] = -8,
[0][0][RTW89_MKK][1][27] = 30,
[0][0][RTW89_MKK][0][27] = -8,
[0][0][RTW89_IC][1][27] = -18,
- [0][0][RTW89_IC][2][27] = 54,
[0][0][RTW89_KCC][1][27] = -2,
[0][0][RTW89_KCC][0][27] = -2,
[0][0][RTW89_ACMA][1][27] = 32,
@@ -52674,13 +50796,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][27] = 30,
[0][0][RTW89_THAILAND][0][27] = -18,
[0][0][RTW89_FCC][1][29] = -18,
- [0][0][RTW89_FCC][2][29] = 54,
[0][0][RTW89_ETSI][1][29] = 32,
[0][0][RTW89_ETSI][0][29] = -8,
[0][0][RTW89_MKK][1][29] = 30,
[0][0][RTW89_MKK][0][29] = -8,
[0][0][RTW89_IC][1][29] = -18,
- [0][0][RTW89_IC][2][29] = 54,
[0][0][RTW89_KCC][1][29] = -2,
[0][0][RTW89_KCC][0][29] = -2,
[0][0][RTW89_ACMA][1][29] = 32,
@@ -52693,13 +50813,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][29] = 30,
[0][0][RTW89_THAILAND][0][29] = -18,
[0][0][RTW89_FCC][1][30] = -18,
- [0][0][RTW89_FCC][2][30] = 54,
[0][0][RTW89_ETSI][1][30] = 32,
[0][0][RTW89_ETSI][0][30] = -8,
[0][0][RTW89_MKK][1][30] = 30,
[0][0][RTW89_MKK][0][30] = -8,
[0][0][RTW89_IC][1][30] = -18,
- [0][0][RTW89_IC][2][30] = 54,
[0][0][RTW89_KCC][1][30] = -2,
[0][0][RTW89_KCC][0][30] = -2,
[0][0][RTW89_ACMA][1][30] = 32,
@@ -52712,13 +50830,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][30] = 30,
[0][0][RTW89_THAILAND][0][30] = -18,
[0][0][RTW89_FCC][1][32] = -18,
- [0][0][RTW89_FCC][2][32] = 54,
[0][0][RTW89_ETSI][1][32] = 32,
[0][0][RTW89_ETSI][0][32] = -8,
[0][0][RTW89_MKK][1][32] = 30,
[0][0][RTW89_MKK][0][32] = -8,
[0][0][RTW89_IC][1][32] = -18,
- [0][0][RTW89_IC][2][32] = 54,
[0][0][RTW89_KCC][1][32] = -2,
[0][0][RTW89_KCC][0][32] = -2,
[0][0][RTW89_ACMA][1][32] = 32,
@@ -52731,13 +50847,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][32] = 30,
[0][0][RTW89_THAILAND][0][32] = -18,
[0][0][RTW89_FCC][1][34] = -18,
- [0][0][RTW89_FCC][2][34] = 54,
[0][0][RTW89_ETSI][1][34] = 32,
[0][0][RTW89_ETSI][0][34] = -8,
[0][0][RTW89_MKK][1][34] = 30,
[0][0][RTW89_MKK][0][34] = -8,
[0][0][RTW89_IC][1][34] = -18,
- [0][0][RTW89_IC][2][34] = 54,
[0][0][RTW89_KCC][1][34] = -2,
[0][0][RTW89_KCC][0][34] = -2,
[0][0][RTW89_ACMA][1][34] = 32,
@@ -52750,13 +50864,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][34] = 30,
[0][0][RTW89_THAILAND][0][34] = -18,
[0][0][RTW89_FCC][1][36] = -18,
- [0][0][RTW89_FCC][2][36] = 54,
[0][0][RTW89_ETSI][1][36] = 32,
[0][0][RTW89_ETSI][0][36] = -8,
[0][0][RTW89_MKK][1][36] = 30,
[0][0][RTW89_MKK][0][36] = -8,
[0][0][RTW89_IC][1][36] = -18,
- [0][0][RTW89_IC][2][36] = 54,
[0][0][RTW89_KCC][1][36] = -2,
[0][0][RTW89_KCC][0][36] = -2,
[0][0][RTW89_ACMA][1][36] = 32,
@@ -52769,13 +50881,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][36] = 30,
[0][0][RTW89_THAILAND][0][36] = -18,
[0][0][RTW89_FCC][1][38] = -18,
- [0][0][RTW89_FCC][2][38] = 54,
[0][0][RTW89_ETSI][1][38] = 32,
[0][0][RTW89_ETSI][0][38] = -8,
[0][0][RTW89_MKK][1][38] = 30,
[0][0][RTW89_MKK][0][38] = -8,
[0][0][RTW89_IC][1][38] = -18,
- [0][0][RTW89_IC][2][38] = 54,
[0][0][RTW89_KCC][1][38] = -2,
[0][0][RTW89_KCC][0][38] = -2,
[0][0][RTW89_ACMA][1][38] = 32,
@@ -52788,13 +50898,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][38] = 30,
[0][0][RTW89_THAILAND][0][38] = -18,
[0][0][RTW89_FCC][1][40] = -18,
- [0][0][RTW89_FCC][2][40] = 54,
[0][0][RTW89_ETSI][1][40] = 32,
[0][0][RTW89_ETSI][0][40] = -8,
[0][0][RTW89_MKK][1][40] = 30,
[0][0][RTW89_MKK][0][40] = -8,
[0][0][RTW89_IC][1][40] = -18,
- [0][0][RTW89_IC][2][40] = 54,
[0][0][RTW89_KCC][1][40] = -2,
[0][0][RTW89_KCC][0][40] = -2,
[0][0][RTW89_ACMA][1][40] = 32,
@@ -52807,13 +50915,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][40] = 30,
[0][0][RTW89_THAILAND][0][40] = -18,
[0][0][RTW89_FCC][1][42] = -18,
- [0][0][RTW89_FCC][2][42] = 54,
[0][0][RTW89_ETSI][1][42] = 32,
[0][0][RTW89_ETSI][0][42] = -8,
[0][0][RTW89_MKK][1][42] = 30,
[0][0][RTW89_MKK][0][42] = -8,
[0][0][RTW89_IC][1][42] = -18,
- [0][0][RTW89_IC][2][42] = 54,
[0][0][RTW89_KCC][1][42] = -2,
[0][0][RTW89_KCC][0][42] = -2,
[0][0][RTW89_ACMA][1][42] = 32,
@@ -52826,13 +50932,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][42] = 30,
[0][0][RTW89_THAILAND][0][42] = -18,
[0][0][RTW89_FCC][1][44] = -16,
- [0][0][RTW89_FCC][2][44] = 56,
[0][0][RTW89_ETSI][1][44] = 32,
[0][0][RTW89_ETSI][0][44] = -6,
[0][0][RTW89_MKK][1][44] = 8,
[0][0][RTW89_MKK][0][44] = -10,
[0][0][RTW89_IC][1][44] = -16,
- [0][0][RTW89_IC][2][44] = 56,
[0][0][RTW89_KCC][1][44] = -2,
[0][0][RTW89_KCC][0][44] = -2,
[0][0][RTW89_ACMA][1][44] = 32,
@@ -52845,13 +50949,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][44] = 30,
[0][0][RTW89_THAILAND][0][44] = -16,
[0][0][RTW89_FCC][1][45] = -16,
- [0][0][RTW89_FCC][2][45] = 127,
[0][0][RTW89_ETSI][1][45] = 127,
[0][0][RTW89_ETSI][0][45] = 127,
[0][0][RTW89_MKK][1][45] = 127,
[0][0][RTW89_MKK][0][45] = 127,
[0][0][RTW89_IC][1][45] = -16,
- [0][0][RTW89_IC][2][45] = 56,
[0][0][RTW89_KCC][1][45] = -2,
[0][0][RTW89_KCC][0][45] = 127,
[0][0][RTW89_ACMA][1][45] = 127,
@@ -52864,13 +50966,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][45] = 127,
[0][0][RTW89_THAILAND][0][45] = 127,
[0][0][RTW89_FCC][1][47] = -18,
- [0][0][RTW89_FCC][2][47] = 127,
[0][0][RTW89_ETSI][1][47] = 127,
[0][0][RTW89_ETSI][0][47] = 127,
[0][0][RTW89_MKK][1][47] = 127,
[0][0][RTW89_MKK][0][47] = 127,
[0][0][RTW89_IC][1][47] = -18,
- [0][0][RTW89_IC][2][47] = 56,
[0][0][RTW89_KCC][1][47] = -2,
[0][0][RTW89_KCC][0][47] = 127,
[0][0][RTW89_ACMA][1][47] = 127,
@@ -52883,13 +50983,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][47] = 127,
[0][0][RTW89_THAILAND][0][47] = 127,
[0][0][RTW89_FCC][1][49] = -18,
- [0][0][RTW89_FCC][2][49] = 127,
[0][0][RTW89_ETSI][1][49] = 127,
[0][0][RTW89_ETSI][0][49] = 127,
[0][0][RTW89_MKK][1][49] = 127,
[0][0][RTW89_MKK][0][49] = 127,
[0][0][RTW89_IC][1][49] = -18,
- [0][0][RTW89_IC][2][49] = 56,
[0][0][RTW89_KCC][1][49] = -2,
[0][0][RTW89_KCC][0][49] = 127,
[0][0][RTW89_ACMA][1][49] = 127,
@@ -52902,13 +51000,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][49] = 127,
[0][0][RTW89_THAILAND][0][49] = 127,
[0][0][RTW89_FCC][1][51] = -18,
- [0][0][RTW89_FCC][2][51] = 127,
[0][0][RTW89_ETSI][1][51] = 127,
[0][0][RTW89_ETSI][0][51] = 127,
[0][0][RTW89_MKK][1][51] = 127,
[0][0][RTW89_MKK][0][51] = 127,
[0][0][RTW89_IC][1][51] = -18,
- [0][0][RTW89_IC][2][51] = 56,
[0][0][RTW89_KCC][1][51] = -2,
[0][0][RTW89_KCC][0][51] = 127,
[0][0][RTW89_ACMA][1][51] = 127,
@@ -52921,13 +51017,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][51] = 127,
[0][0][RTW89_THAILAND][0][51] = 127,
[0][0][RTW89_FCC][1][53] = -16,
- [0][0][RTW89_FCC][2][53] = 127,
[0][0][RTW89_ETSI][1][53] = 127,
[0][0][RTW89_ETSI][0][53] = 127,
[0][0][RTW89_MKK][1][53] = 127,
[0][0][RTW89_MKK][0][53] = 127,
[0][0][RTW89_IC][1][53] = -16,
- [0][0][RTW89_IC][2][53] = 56,
[0][0][RTW89_KCC][1][53] = -2,
[0][0][RTW89_KCC][0][53] = 127,
[0][0][RTW89_ACMA][1][53] = 127,
@@ -52940,13 +51034,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][53] = 127,
[0][0][RTW89_THAILAND][0][53] = 127,
[0][0][RTW89_FCC][1][55] = -18,
- [0][0][RTW89_FCC][2][55] = 56,
[0][0][RTW89_ETSI][1][55] = 127,
[0][0][RTW89_ETSI][0][55] = 127,
[0][0][RTW89_MKK][1][55] = 127,
[0][0][RTW89_MKK][0][55] = 127,
[0][0][RTW89_IC][1][55] = -18,
- [0][0][RTW89_IC][2][55] = 56,
[0][0][RTW89_KCC][1][55] = -2,
[0][0][RTW89_KCC][0][55] = 127,
[0][0][RTW89_ACMA][1][55] = 127,
@@ -52959,13 +51051,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][55] = 127,
[0][0][RTW89_THAILAND][0][55] = 127,
[0][0][RTW89_FCC][1][57] = -18,
- [0][0][RTW89_FCC][2][57] = 56,
[0][0][RTW89_ETSI][1][57] = 127,
[0][0][RTW89_ETSI][0][57] = 127,
[0][0][RTW89_MKK][1][57] = 127,
[0][0][RTW89_MKK][0][57] = 127,
[0][0][RTW89_IC][1][57] = -18,
- [0][0][RTW89_IC][2][57] = 56,
[0][0][RTW89_KCC][1][57] = -2,
[0][0][RTW89_KCC][0][57] = 127,
[0][0][RTW89_ACMA][1][57] = 127,
@@ -52978,13 +51068,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][57] = 127,
[0][0][RTW89_THAILAND][0][57] = 127,
[0][0][RTW89_FCC][1][59] = -18,
- [0][0][RTW89_FCC][2][59] = 56,
[0][0][RTW89_ETSI][1][59] = 127,
[0][0][RTW89_ETSI][0][59] = 127,
[0][0][RTW89_MKK][1][59] = 127,
[0][0][RTW89_MKK][0][59] = 127,
[0][0][RTW89_IC][1][59] = -18,
- [0][0][RTW89_IC][2][59] = 56,
[0][0][RTW89_KCC][1][59] = -2,
[0][0][RTW89_KCC][0][59] = 127,
[0][0][RTW89_ACMA][1][59] = 127,
@@ -52997,13 +51085,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][59] = 127,
[0][0][RTW89_THAILAND][0][59] = 127,
[0][0][RTW89_FCC][1][60] = -18,
- [0][0][RTW89_FCC][2][60] = 56,
[0][0][RTW89_ETSI][1][60] = 127,
[0][0][RTW89_ETSI][0][60] = 127,
[0][0][RTW89_MKK][1][60] = 127,
[0][0][RTW89_MKK][0][60] = 127,
[0][0][RTW89_IC][1][60] = -18,
- [0][0][RTW89_IC][2][60] = 56,
[0][0][RTW89_KCC][1][60] = -2,
[0][0][RTW89_KCC][0][60] = 127,
[0][0][RTW89_ACMA][1][60] = 127,
@@ -53016,13 +51102,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][60] = 127,
[0][0][RTW89_THAILAND][0][60] = 127,
[0][0][RTW89_FCC][1][62] = -18,
- [0][0][RTW89_FCC][2][62] = 56,
[0][0][RTW89_ETSI][1][62] = 127,
[0][0][RTW89_ETSI][0][62] = 127,
[0][0][RTW89_MKK][1][62] = 127,
[0][0][RTW89_MKK][0][62] = 127,
[0][0][RTW89_IC][1][62] = -18,
- [0][0][RTW89_IC][2][62] = 56,
[0][0][RTW89_KCC][1][62] = -2,
[0][0][RTW89_KCC][0][62] = 127,
[0][0][RTW89_ACMA][1][62] = 127,
@@ -53035,13 +51119,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][62] = 127,
[0][0][RTW89_THAILAND][0][62] = 127,
[0][0][RTW89_FCC][1][64] = -18,
- [0][0][RTW89_FCC][2][64] = 56,
[0][0][RTW89_ETSI][1][64] = 127,
[0][0][RTW89_ETSI][0][64] = 127,
[0][0][RTW89_MKK][1][64] = 127,
[0][0][RTW89_MKK][0][64] = 127,
[0][0][RTW89_IC][1][64] = -18,
- [0][0][RTW89_IC][2][64] = 56,
[0][0][RTW89_KCC][1][64] = -2,
[0][0][RTW89_KCC][0][64] = 127,
[0][0][RTW89_ACMA][1][64] = 127,
@@ -53054,13 +51136,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][64] = 127,
[0][0][RTW89_THAILAND][0][64] = 127,
[0][0][RTW89_FCC][1][66] = -18,
- [0][0][RTW89_FCC][2][66] = 56,
[0][0][RTW89_ETSI][1][66] = 127,
[0][0][RTW89_ETSI][0][66] = 127,
[0][0][RTW89_MKK][1][66] = 127,
[0][0][RTW89_MKK][0][66] = 127,
[0][0][RTW89_IC][1][66] = -18,
- [0][0][RTW89_IC][2][66] = 56,
[0][0][RTW89_KCC][1][66] = -2,
[0][0][RTW89_KCC][0][66] = 127,
[0][0][RTW89_ACMA][1][66] = 127,
@@ -53073,13 +51153,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][66] = 127,
[0][0][RTW89_THAILAND][0][66] = 127,
[0][0][RTW89_FCC][1][68] = -18,
- [0][0][RTW89_FCC][2][68] = 56,
[0][0][RTW89_ETSI][1][68] = 127,
[0][0][RTW89_ETSI][0][68] = 127,
[0][0][RTW89_MKK][1][68] = 127,
[0][0][RTW89_MKK][0][68] = 127,
[0][0][RTW89_IC][1][68] = -18,
- [0][0][RTW89_IC][2][68] = 56,
[0][0][RTW89_KCC][1][68] = -2,
[0][0][RTW89_KCC][0][68] = 127,
[0][0][RTW89_ACMA][1][68] = 127,
@@ -53092,13 +51170,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][68] = 127,
[0][0][RTW89_THAILAND][0][68] = 127,
[0][0][RTW89_FCC][1][70] = -16,
- [0][0][RTW89_FCC][2][70] = 56,
[0][0][RTW89_ETSI][1][70] = 127,
[0][0][RTW89_ETSI][0][70] = 127,
[0][0][RTW89_MKK][1][70] = 127,
[0][0][RTW89_MKK][0][70] = 127,
[0][0][RTW89_IC][1][70] = -16,
- [0][0][RTW89_IC][2][70] = 56,
[0][0][RTW89_KCC][1][70] = -2,
[0][0][RTW89_KCC][0][70] = 127,
[0][0][RTW89_ACMA][1][70] = 127,
@@ -53111,13 +51187,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][70] = 127,
[0][0][RTW89_THAILAND][0][70] = 127,
[0][0][RTW89_FCC][1][72] = -18,
- [0][0][RTW89_FCC][2][72] = 56,
[0][0][RTW89_ETSI][1][72] = 127,
[0][0][RTW89_ETSI][0][72] = 127,
[0][0][RTW89_MKK][1][72] = 127,
[0][0][RTW89_MKK][0][72] = 127,
[0][0][RTW89_IC][1][72] = -18,
- [0][0][RTW89_IC][2][72] = 56,
[0][0][RTW89_KCC][1][72] = -2,
[0][0][RTW89_KCC][0][72] = 127,
[0][0][RTW89_ACMA][1][72] = 127,
@@ -53130,13 +51204,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][72] = 127,
[0][0][RTW89_THAILAND][0][72] = 127,
[0][0][RTW89_FCC][1][74] = -18,
- [0][0][RTW89_FCC][2][74] = 56,
[0][0][RTW89_ETSI][1][74] = 127,
[0][0][RTW89_ETSI][0][74] = 127,
[0][0][RTW89_MKK][1][74] = 127,
[0][0][RTW89_MKK][0][74] = 127,
[0][0][RTW89_IC][1][74] = -18,
- [0][0][RTW89_IC][2][74] = 56,
[0][0][RTW89_KCC][1][74] = -2,
[0][0][RTW89_KCC][0][74] = 127,
[0][0][RTW89_ACMA][1][74] = 127,
@@ -53149,13 +51221,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][74] = 127,
[0][0][RTW89_THAILAND][0][74] = 127,
[0][0][RTW89_FCC][1][75] = -18,
- [0][0][RTW89_FCC][2][75] = 56,
[0][0][RTW89_ETSI][1][75] = 127,
[0][0][RTW89_ETSI][0][75] = 127,
[0][0][RTW89_MKK][1][75] = 127,
[0][0][RTW89_MKK][0][75] = 127,
[0][0][RTW89_IC][1][75] = -18,
- [0][0][RTW89_IC][2][75] = 56,
[0][0][RTW89_KCC][1][75] = -2,
[0][0][RTW89_KCC][0][75] = 127,
[0][0][RTW89_ACMA][1][75] = 127,
@@ -53168,13 +51238,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][75] = 127,
[0][0][RTW89_THAILAND][0][75] = 127,
[0][0][RTW89_FCC][1][77] = -18,
- [0][0][RTW89_FCC][2][77] = 56,
[0][0][RTW89_ETSI][1][77] = 127,
[0][0][RTW89_ETSI][0][77] = 127,
[0][0][RTW89_MKK][1][77] = 127,
[0][0][RTW89_MKK][0][77] = 127,
[0][0][RTW89_IC][1][77] = -18,
- [0][0][RTW89_IC][2][77] = 56,
[0][0][RTW89_KCC][1][77] = -2,
[0][0][RTW89_KCC][0][77] = 127,
[0][0][RTW89_ACMA][1][77] = 127,
@@ -53187,13 +51255,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][77] = 127,
[0][0][RTW89_THAILAND][0][77] = 127,
[0][0][RTW89_FCC][1][79] = -18,
- [0][0][RTW89_FCC][2][79] = 56,
[0][0][RTW89_ETSI][1][79] = 127,
[0][0][RTW89_ETSI][0][79] = 127,
[0][0][RTW89_MKK][1][79] = 127,
[0][0][RTW89_MKK][0][79] = 127,
[0][0][RTW89_IC][1][79] = -18,
- [0][0][RTW89_IC][2][79] = 56,
[0][0][RTW89_KCC][1][79] = -2,
[0][0][RTW89_KCC][0][79] = 127,
[0][0][RTW89_ACMA][1][79] = 127,
@@ -53206,13 +51272,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][79] = 127,
[0][0][RTW89_THAILAND][0][79] = 127,
[0][0][RTW89_FCC][1][81] = -18,
- [0][0][RTW89_FCC][2][81] = 56,
[0][0][RTW89_ETSI][1][81] = 127,
[0][0][RTW89_ETSI][0][81] = 127,
[0][0][RTW89_MKK][1][81] = 127,
[0][0][RTW89_MKK][0][81] = 127,
[0][0][RTW89_IC][1][81] = -18,
- [0][0][RTW89_IC][2][81] = 56,
[0][0][RTW89_KCC][1][81] = -2,
[0][0][RTW89_KCC][0][81] = 127,
[0][0][RTW89_ACMA][1][81] = 127,
@@ -53225,13 +51289,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][81] = 127,
[0][0][RTW89_THAILAND][0][81] = 127,
[0][0][RTW89_FCC][1][83] = -18,
- [0][0][RTW89_FCC][2][83] = 56,
[0][0][RTW89_ETSI][1][83] = 127,
[0][0][RTW89_ETSI][0][83] = 127,
[0][0][RTW89_MKK][1][83] = 127,
[0][0][RTW89_MKK][0][83] = 127,
[0][0][RTW89_IC][1][83] = -18,
- [0][0][RTW89_IC][2][83] = 56,
[0][0][RTW89_KCC][1][83] = -2,
[0][0][RTW89_KCC][0][83] = 127,
[0][0][RTW89_ACMA][1][83] = 127,
@@ -53244,13 +51306,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][83] = 127,
[0][0][RTW89_THAILAND][0][83] = 127,
[0][0][RTW89_FCC][1][85] = -18,
- [0][0][RTW89_FCC][2][85] = 56,
[0][0][RTW89_ETSI][1][85] = 127,
[0][0][RTW89_ETSI][0][85] = 127,
[0][0][RTW89_MKK][1][85] = 127,
[0][0][RTW89_MKK][0][85] = 127,
[0][0][RTW89_IC][1][85] = -18,
- [0][0][RTW89_IC][2][85] = 56,
[0][0][RTW89_KCC][1][85] = -2,
[0][0][RTW89_KCC][0][85] = 127,
[0][0][RTW89_ACMA][1][85] = 127,
@@ -53263,13 +51323,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][85] = 127,
[0][0][RTW89_THAILAND][0][85] = 127,
[0][0][RTW89_FCC][1][87] = -16,
- [0][0][RTW89_FCC][2][87] = 127,
[0][0][RTW89_ETSI][1][87] = 127,
[0][0][RTW89_ETSI][0][87] = 127,
[0][0][RTW89_MKK][1][87] = 127,
[0][0][RTW89_MKK][0][87] = 127,
[0][0][RTW89_IC][1][87] = -16,
- [0][0][RTW89_IC][2][87] = 127,
[0][0][RTW89_KCC][1][87] = -2,
[0][0][RTW89_KCC][0][87] = 127,
[0][0][RTW89_ACMA][1][87] = 127,
@@ -53282,13 +51340,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][87] = 127,
[0][0][RTW89_THAILAND][0][87] = 127,
[0][0][RTW89_FCC][1][89] = -16,
- [0][0][RTW89_FCC][2][89] = 127,
[0][0][RTW89_ETSI][1][89] = 127,
[0][0][RTW89_ETSI][0][89] = 127,
[0][0][RTW89_MKK][1][89] = 127,
[0][0][RTW89_MKK][0][89] = 127,
[0][0][RTW89_IC][1][89] = -16,
- [0][0][RTW89_IC][2][89] = 127,
[0][0][RTW89_KCC][1][89] = -2,
[0][0][RTW89_KCC][0][89] = 127,
[0][0][RTW89_ACMA][1][89] = 127,
@@ -53301,13 +51357,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][89] = 127,
[0][0][RTW89_THAILAND][0][89] = 127,
[0][0][RTW89_FCC][1][90] = -16,
- [0][0][RTW89_FCC][2][90] = 127,
[0][0][RTW89_ETSI][1][90] = 127,
[0][0][RTW89_ETSI][0][90] = 127,
[0][0][RTW89_MKK][1][90] = 127,
[0][0][RTW89_MKK][0][90] = 127,
[0][0][RTW89_IC][1][90] = -16,
- [0][0][RTW89_IC][2][90] = 127,
[0][0][RTW89_KCC][1][90] = -2,
[0][0][RTW89_KCC][0][90] = 127,
[0][0][RTW89_ACMA][1][90] = 127,
@@ -53320,13 +51374,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][90] = 127,
[0][0][RTW89_THAILAND][0][90] = 127,
[0][0][RTW89_FCC][1][92] = -16,
- [0][0][RTW89_FCC][2][92] = 127,
[0][0][RTW89_ETSI][1][92] = 127,
[0][0][RTW89_ETSI][0][92] = 127,
[0][0][RTW89_MKK][1][92] = 127,
[0][0][RTW89_MKK][0][92] = 127,
[0][0][RTW89_IC][1][92] = -16,
- [0][0][RTW89_IC][2][92] = 127,
[0][0][RTW89_KCC][1][92] = -2,
[0][0][RTW89_KCC][0][92] = 127,
[0][0][RTW89_ACMA][1][92] = 127,
@@ -53339,13 +51391,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][92] = 127,
[0][0][RTW89_THAILAND][0][92] = 127,
[0][0][RTW89_FCC][1][94] = -16,
- [0][0][RTW89_FCC][2][94] = 127,
[0][0][RTW89_ETSI][1][94] = 127,
[0][0][RTW89_ETSI][0][94] = 127,
[0][0][RTW89_MKK][1][94] = 127,
[0][0][RTW89_MKK][0][94] = 127,
[0][0][RTW89_IC][1][94] = -16,
- [0][0][RTW89_IC][2][94] = 127,
[0][0][RTW89_KCC][1][94] = -2,
[0][0][RTW89_KCC][0][94] = 127,
[0][0][RTW89_ACMA][1][94] = 127,
@@ -53358,13 +51408,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][94] = 127,
[0][0][RTW89_THAILAND][0][94] = 127,
[0][0][RTW89_FCC][1][96] = -16,
- [0][0][RTW89_FCC][2][96] = 127,
[0][0][RTW89_ETSI][1][96] = 127,
[0][0][RTW89_ETSI][0][96] = 127,
[0][0][RTW89_MKK][1][96] = 127,
[0][0][RTW89_MKK][0][96] = 127,
[0][0][RTW89_IC][1][96] = -16,
- [0][0][RTW89_IC][2][96] = 127,
[0][0][RTW89_KCC][1][96] = -2,
[0][0][RTW89_KCC][0][96] = 127,
[0][0][RTW89_ACMA][1][96] = 127,
@@ -53377,13 +51425,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][96] = 127,
[0][0][RTW89_THAILAND][0][96] = 127,
[0][0][RTW89_FCC][1][98] = -16,
- [0][0][RTW89_FCC][2][98] = 127,
[0][0][RTW89_ETSI][1][98] = 127,
[0][0][RTW89_ETSI][0][98] = 127,
[0][0][RTW89_MKK][1][98] = 127,
[0][0][RTW89_MKK][0][98] = 127,
[0][0][RTW89_IC][1][98] = -16,
- [0][0][RTW89_IC][2][98] = 127,
[0][0][RTW89_KCC][1][98] = -2,
[0][0][RTW89_KCC][0][98] = 127,
[0][0][RTW89_ACMA][1][98] = 127,
@@ -53396,13 +51442,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][98] = 127,
[0][0][RTW89_THAILAND][0][98] = 127,
[0][0][RTW89_FCC][1][100] = -16,
- [0][0][RTW89_FCC][2][100] = 127,
[0][0][RTW89_ETSI][1][100] = 127,
[0][0][RTW89_ETSI][0][100] = 127,
[0][0][RTW89_MKK][1][100] = 127,
[0][0][RTW89_MKK][0][100] = 127,
[0][0][RTW89_IC][1][100] = -16,
- [0][0][RTW89_IC][2][100] = 127,
[0][0][RTW89_KCC][1][100] = -2,
[0][0][RTW89_KCC][0][100] = 127,
[0][0][RTW89_ACMA][1][100] = 127,
@@ -53415,13 +51459,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][100] = 127,
[0][0][RTW89_THAILAND][0][100] = 127,
[0][0][RTW89_FCC][1][102] = -16,
- [0][0][RTW89_FCC][2][102] = 127,
[0][0][RTW89_ETSI][1][102] = 127,
[0][0][RTW89_ETSI][0][102] = 127,
[0][0][RTW89_MKK][1][102] = 127,
[0][0][RTW89_MKK][0][102] = 127,
[0][0][RTW89_IC][1][102] = -16,
- [0][0][RTW89_IC][2][102] = 127,
[0][0][RTW89_KCC][1][102] = -2,
[0][0][RTW89_KCC][0][102] = 127,
[0][0][RTW89_ACMA][1][102] = 127,
@@ -53434,13 +51476,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][102] = 127,
[0][0][RTW89_THAILAND][0][102] = 127,
[0][0][RTW89_FCC][1][104] = -16,
- [0][0][RTW89_FCC][2][104] = 127,
[0][0][RTW89_ETSI][1][104] = 127,
[0][0][RTW89_ETSI][0][104] = 127,
[0][0][RTW89_MKK][1][104] = 127,
[0][0][RTW89_MKK][0][104] = 127,
[0][0][RTW89_IC][1][104] = -16,
- [0][0][RTW89_IC][2][104] = 127,
[0][0][RTW89_KCC][1][104] = -2,
[0][0][RTW89_KCC][0][104] = 127,
[0][0][RTW89_ACMA][1][104] = 127,
@@ -53453,13 +51493,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][104] = 127,
[0][0][RTW89_THAILAND][0][104] = 127,
[0][0][RTW89_FCC][1][105] = -16,
- [0][0][RTW89_FCC][2][105] = 127,
[0][0][RTW89_ETSI][1][105] = 127,
[0][0][RTW89_ETSI][0][105] = 127,
[0][0][RTW89_MKK][1][105] = 127,
[0][0][RTW89_MKK][0][105] = 127,
[0][0][RTW89_IC][1][105] = -16,
- [0][0][RTW89_IC][2][105] = 127,
[0][0][RTW89_KCC][1][105] = -2,
[0][0][RTW89_KCC][0][105] = 127,
[0][0][RTW89_ACMA][1][105] = 127,
@@ -53472,13 +51510,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][105] = 127,
[0][0][RTW89_THAILAND][0][105] = 127,
[0][0][RTW89_FCC][1][107] = -12,
- [0][0][RTW89_FCC][2][107] = 127,
[0][0][RTW89_ETSI][1][107] = 127,
[0][0][RTW89_ETSI][0][107] = 127,
[0][0][RTW89_MKK][1][107] = 127,
[0][0][RTW89_MKK][0][107] = 127,
[0][0][RTW89_IC][1][107] = -12,
- [0][0][RTW89_IC][2][107] = 127,
[0][0][RTW89_KCC][1][107] = -2,
[0][0][RTW89_KCC][0][107] = 127,
[0][0][RTW89_ACMA][1][107] = 127,
@@ -53491,13 +51527,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][107] = 127,
[0][0][RTW89_THAILAND][0][107] = 127,
[0][0][RTW89_FCC][1][109] = -12,
- [0][0][RTW89_FCC][2][109] = 127,
[0][0][RTW89_ETSI][1][109] = 127,
[0][0][RTW89_ETSI][0][109] = 127,
[0][0][RTW89_MKK][1][109] = 127,
[0][0][RTW89_MKK][0][109] = 127,
[0][0][RTW89_IC][1][109] = -12,
- [0][0][RTW89_IC][2][109] = 127,
[0][0][RTW89_KCC][1][109] = 127,
[0][0][RTW89_KCC][0][109] = 127,
[0][0][RTW89_ACMA][1][109] = 127,
@@ -53510,13 +51544,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][109] = 127,
[0][0][RTW89_THAILAND][0][109] = 127,
[0][0][RTW89_FCC][1][111] = 127,
- [0][0][RTW89_FCC][2][111] = 127,
[0][0][RTW89_ETSI][1][111] = 127,
[0][0][RTW89_ETSI][0][111] = 127,
[0][0][RTW89_MKK][1][111] = 127,
[0][0][RTW89_MKK][0][111] = 127,
[0][0][RTW89_IC][1][111] = 127,
- [0][0][RTW89_IC][2][111] = 127,
[0][0][RTW89_KCC][1][111] = 127,
[0][0][RTW89_KCC][0][111] = 127,
[0][0][RTW89_ACMA][1][111] = 127,
@@ -53529,13 +51561,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][111] = 127,
[0][0][RTW89_THAILAND][0][111] = 127,
[0][0][RTW89_FCC][1][113] = 127,
- [0][0][RTW89_FCC][2][113] = 127,
[0][0][RTW89_ETSI][1][113] = 127,
[0][0][RTW89_ETSI][0][113] = 127,
[0][0][RTW89_MKK][1][113] = 127,
[0][0][RTW89_MKK][0][113] = 127,
[0][0][RTW89_IC][1][113] = 127,
- [0][0][RTW89_IC][2][113] = 127,
[0][0][RTW89_KCC][1][113] = 127,
[0][0][RTW89_KCC][0][113] = 127,
[0][0][RTW89_ACMA][1][113] = 127,
@@ -53548,13 +51578,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][113] = 127,
[0][0][RTW89_THAILAND][0][113] = 127,
[0][0][RTW89_FCC][1][115] = 127,
- [0][0][RTW89_FCC][2][115] = 127,
[0][0][RTW89_ETSI][1][115] = 127,
[0][0][RTW89_ETSI][0][115] = 127,
[0][0][RTW89_MKK][1][115] = 127,
[0][0][RTW89_MKK][0][115] = 127,
[0][0][RTW89_IC][1][115] = 127,
- [0][0][RTW89_IC][2][115] = 127,
[0][0][RTW89_KCC][1][115] = 127,
[0][0][RTW89_KCC][0][115] = 127,
[0][0][RTW89_ACMA][1][115] = 127,
@@ -53567,13 +51595,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][115] = 127,
[0][0][RTW89_THAILAND][0][115] = 127,
[0][0][RTW89_FCC][1][117] = 127,
- [0][0][RTW89_FCC][2][117] = 127,
[0][0][RTW89_ETSI][1][117] = 127,
[0][0][RTW89_ETSI][0][117] = 127,
[0][0][RTW89_MKK][1][117] = 127,
[0][0][RTW89_MKK][0][117] = 127,
[0][0][RTW89_IC][1][117] = 127,
- [0][0][RTW89_IC][2][117] = 127,
[0][0][RTW89_KCC][1][117] = 127,
[0][0][RTW89_KCC][0][117] = 127,
[0][0][RTW89_ACMA][1][117] = 127,
@@ -53586,13 +51612,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][117] = 127,
[0][0][RTW89_THAILAND][0][117] = 127,
[0][0][RTW89_FCC][1][119] = 127,
- [0][0][RTW89_FCC][2][119] = 127,
[0][0][RTW89_ETSI][1][119] = 127,
[0][0][RTW89_ETSI][0][119] = 127,
[0][0][RTW89_MKK][1][119] = 127,
[0][0][RTW89_MKK][0][119] = 127,
[0][0][RTW89_IC][1][119] = 127,
- [0][0][RTW89_IC][2][119] = 127,
[0][0][RTW89_KCC][1][119] = 127,
[0][0][RTW89_KCC][0][119] = 127,
[0][0][RTW89_ACMA][1][119] = 127,
@@ -53605,13 +51629,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][119] = 127,
[0][0][RTW89_THAILAND][0][119] = 127,
[0][1][RTW89_FCC][1][0] = -40,
- [0][1][RTW89_FCC][2][0] = 32,
[0][1][RTW89_ETSI][1][0] = 20,
[0][1][RTW89_ETSI][0][0] = -18,
[0][1][RTW89_MKK][1][0] = 18,
[0][1][RTW89_MKK][0][0] = -20,
[0][1][RTW89_IC][1][0] = -40,
- [0][1][RTW89_IC][2][0] = 32,
[0][1][RTW89_KCC][1][0] = -14,
[0][1][RTW89_KCC][0][0] = -14,
[0][1][RTW89_ACMA][1][0] = 20,
@@ -53624,13 +51646,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][0] = 6,
[0][1][RTW89_THAILAND][0][0] = -40,
[0][1][RTW89_FCC][1][2] = -40,
- [0][1][RTW89_FCC][2][2] = 32,
[0][1][RTW89_ETSI][1][2] = 20,
[0][1][RTW89_ETSI][0][2] = -18,
[0][1][RTW89_MKK][1][2] = 18,
[0][1][RTW89_MKK][0][2] = -22,
[0][1][RTW89_IC][1][2] = -40,
- [0][1][RTW89_IC][2][2] = 32,
[0][1][RTW89_KCC][1][2] = -14,
[0][1][RTW89_KCC][0][2] = -14,
[0][1][RTW89_ACMA][1][2] = 20,
@@ -53643,13 +51663,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][2] = 6,
[0][1][RTW89_THAILAND][0][2] = -40,
[0][1][RTW89_FCC][1][4] = -40,
- [0][1][RTW89_FCC][2][4] = 32,
[0][1][RTW89_ETSI][1][4] = 20,
[0][1][RTW89_ETSI][0][4] = -18,
[0][1][RTW89_MKK][1][4] = 18,
[0][1][RTW89_MKK][0][4] = -22,
[0][1][RTW89_IC][1][4] = -40,
- [0][1][RTW89_IC][2][4] = 32,
[0][1][RTW89_KCC][1][4] = -14,
[0][1][RTW89_KCC][0][4] = -14,
[0][1][RTW89_ACMA][1][4] = 20,
@@ -53662,13 +51680,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][4] = 6,
[0][1][RTW89_THAILAND][0][4] = -40,
[0][1][RTW89_FCC][1][6] = -40,
- [0][1][RTW89_FCC][2][6] = 32,
[0][1][RTW89_ETSI][1][6] = 20,
[0][1][RTW89_ETSI][0][6] = -18,
[0][1][RTW89_MKK][1][6] = 18,
[0][1][RTW89_MKK][0][6] = -22,
[0][1][RTW89_IC][1][6] = -40,
- [0][1][RTW89_IC][2][6] = 32,
[0][1][RTW89_KCC][1][6] = -14,
[0][1][RTW89_KCC][0][6] = -14,
[0][1][RTW89_ACMA][1][6] = 20,
@@ -53681,13 +51697,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][6] = 6,
[0][1][RTW89_THAILAND][0][6] = -40,
[0][1][RTW89_FCC][1][8] = -40,
- [0][1][RTW89_FCC][2][8] = 32,
[0][1][RTW89_ETSI][1][8] = 20,
[0][1][RTW89_ETSI][0][8] = -18,
[0][1][RTW89_MKK][1][8] = 18,
[0][1][RTW89_MKK][0][8] = -22,
[0][1][RTW89_IC][1][8] = -40,
- [0][1][RTW89_IC][2][8] = 32,
[0][1][RTW89_KCC][1][8] = -14,
[0][1][RTW89_KCC][0][8] = -14,
[0][1][RTW89_ACMA][1][8] = 20,
@@ -53700,13 +51714,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][8] = 6,
[0][1][RTW89_THAILAND][0][8] = -40,
[0][1][RTW89_FCC][1][10] = -40,
- [0][1][RTW89_FCC][2][10] = 32,
[0][1][RTW89_ETSI][1][10] = 20,
[0][1][RTW89_ETSI][0][10] = -18,
[0][1][RTW89_MKK][1][10] = 18,
[0][1][RTW89_MKK][0][10] = -22,
[0][1][RTW89_IC][1][10] = -40,
- [0][1][RTW89_IC][2][10] = 32,
[0][1][RTW89_KCC][1][10] = -14,
[0][1][RTW89_KCC][0][10] = -14,
[0][1][RTW89_ACMA][1][10] = 20,
@@ -53719,13 +51731,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][10] = 6,
[0][1][RTW89_THAILAND][0][10] = -40,
[0][1][RTW89_FCC][1][12] = -40,
- [0][1][RTW89_FCC][2][12] = 32,
[0][1][RTW89_ETSI][1][12] = 20,
[0][1][RTW89_ETSI][0][12] = -18,
[0][1][RTW89_MKK][1][12] = 18,
[0][1][RTW89_MKK][0][12] = -22,
[0][1][RTW89_IC][1][12] = -40,
- [0][1][RTW89_IC][2][12] = 32,
[0][1][RTW89_KCC][1][12] = -14,
[0][1][RTW89_KCC][0][12] = -14,
[0][1][RTW89_ACMA][1][12] = 20,
@@ -53738,13 +51748,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][12] = 6,
[0][1][RTW89_THAILAND][0][12] = -40,
[0][1][RTW89_FCC][1][14] = -40,
- [0][1][RTW89_FCC][2][14] = 32,
[0][1][RTW89_ETSI][1][14] = 20,
[0][1][RTW89_ETSI][0][14] = -18,
[0][1][RTW89_MKK][1][14] = 18,
[0][1][RTW89_MKK][0][14] = -22,
[0][1][RTW89_IC][1][14] = -40,
- [0][1][RTW89_IC][2][14] = 32,
[0][1][RTW89_KCC][1][14] = -14,
[0][1][RTW89_KCC][0][14] = -14,
[0][1][RTW89_ACMA][1][14] = 20,
@@ -53757,13 +51765,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][14] = 6,
[0][1][RTW89_THAILAND][0][14] = -40,
[0][1][RTW89_FCC][1][15] = -40,
- [0][1][RTW89_FCC][2][15] = 32,
[0][1][RTW89_ETSI][1][15] = 20,
[0][1][RTW89_ETSI][0][15] = -18,
[0][1][RTW89_MKK][1][15] = 18,
[0][1][RTW89_MKK][0][15] = -22,
[0][1][RTW89_IC][1][15] = -40,
- [0][1][RTW89_IC][2][15] = 32,
[0][1][RTW89_KCC][1][15] = -14,
[0][1][RTW89_KCC][0][15] = -14,
[0][1][RTW89_ACMA][1][15] = 20,
@@ -53776,13 +51782,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][15] = 6,
[0][1][RTW89_THAILAND][0][15] = -40,
[0][1][RTW89_FCC][1][17] = -40,
- [0][1][RTW89_FCC][2][17] = 32,
[0][1][RTW89_ETSI][1][17] = 20,
[0][1][RTW89_ETSI][0][17] = -18,
[0][1][RTW89_MKK][1][17] = 18,
[0][1][RTW89_MKK][0][17] = -22,
[0][1][RTW89_IC][1][17] = -40,
- [0][1][RTW89_IC][2][17] = 32,
[0][1][RTW89_KCC][1][17] = -14,
[0][1][RTW89_KCC][0][17] = -14,
[0][1][RTW89_ACMA][1][17] = 20,
@@ -53795,13 +51799,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][17] = 6,
[0][1][RTW89_THAILAND][0][17] = -40,
[0][1][RTW89_FCC][1][19] = -40,
- [0][1][RTW89_FCC][2][19] = 32,
[0][1][RTW89_ETSI][1][19] = 20,
[0][1][RTW89_ETSI][0][19] = -18,
[0][1][RTW89_MKK][1][19] = 18,
[0][1][RTW89_MKK][0][19] = -22,
[0][1][RTW89_IC][1][19] = -40,
- [0][1][RTW89_IC][2][19] = 32,
[0][1][RTW89_KCC][1][19] = -14,
[0][1][RTW89_KCC][0][19] = -14,
[0][1][RTW89_ACMA][1][19] = 20,
@@ -53814,13 +51816,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][19] = 6,
[0][1][RTW89_THAILAND][0][19] = -40,
[0][1][RTW89_FCC][1][21] = -40,
- [0][1][RTW89_FCC][2][21] = 32,
[0][1][RTW89_ETSI][1][21] = 20,
[0][1][RTW89_ETSI][0][21] = -18,
[0][1][RTW89_MKK][1][21] = 18,
[0][1][RTW89_MKK][0][21] = -22,
[0][1][RTW89_IC][1][21] = -40,
- [0][1][RTW89_IC][2][21] = 32,
[0][1][RTW89_KCC][1][21] = -14,
[0][1][RTW89_KCC][0][21] = -14,
[0][1][RTW89_ACMA][1][21] = 20,
@@ -53833,13 +51833,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][21] = 6,
[0][1][RTW89_THAILAND][0][21] = -40,
[0][1][RTW89_FCC][1][23] = -40,
- [0][1][RTW89_FCC][2][23] = 32,
[0][1][RTW89_ETSI][1][23] = 20,
[0][1][RTW89_ETSI][0][23] = -18,
[0][1][RTW89_MKK][1][23] = 18,
[0][1][RTW89_MKK][0][23] = -22,
[0][1][RTW89_IC][1][23] = -40,
- [0][1][RTW89_IC][2][23] = 32,
[0][1][RTW89_KCC][1][23] = -14,
[0][1][RTW89_KCC][0][23] = -14,
[0][1][RTW89_ACMA][1][23] = 20,
@@ -53852,13 +51850,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][23] = 6,
[0][1][RTW89_THAILAND][0][23] = -40,
[0][1][RTW89_FCC][1][25] = -40,
- [0][1][RTW89_FCC][2][25] = 32,
[0][1][RTW89_ETSI][1][25] = 20,
[0][1][RTW89_ETSI][0][25] = -18,
[0][1][RTW89_MKK][1][25] = -4,
[0][1][RTW89_MKK][0][25] = -22,
[0][1][RTW89_IC][1][25] = -40,
- [0][1][RTW89_IC][2][25] = 32,
[0][1][RTW89_KCC][1][25] = -14,
[0][1][RTW89_KCC][0][25] = -14,
[0][1][RTW89_ACMA][1][25] = 20,
@@ -53871,13 +51867,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][25] = 6,
[0][1][RTW89_THAILAND][0][25] = -40,
[0][1][RTW89_FCC][1][27] = -40,
- [0][1][RTW89_FCC][2][27] = 32,
[0][1][RTW89_ETSI][1][27] = 20,
[0][1][RTW89_ETSI][0][27] = -18,
[0][1][RTW89_MKK][1][27] = -4,
[0][1][RTW89_MKK][0][27] = -22,
[0][1][RTW89_IC][1][27] = -40,
- [0][1][RTW89_IC][2][27] = 32,
[0][1][RTW89_KCC][1][27] = -14,
[0][1][RTW89_KCC][0][27] = -14,
[0][1][RTW89_ACMA][1][27] = 20,
@@ -53890,13 +51884,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][27] = 6,
[0][1][RTW89_THAILAND][0][27] = -40,
[0][1][RTW89_FCC][1][29] = -40,
- [0][1][RTW89_FCC][2][29] = 32,
[0][1][RTW89_ETSI][1][29] = 20,
[0][1][RTW89_ETSI][0][29] = -18,
[0][1][RTW89_MKK][1][29] = -4,
[0][1][RTW89_MKK][0][29] = -22,
[0][1][RTW89_IC][1][29] = -40,
- [0][1][RTW89_IC][2][29] = 32,
[0][1][RTW89_KCC][1][29] = -14,
[0][1][RTW89_KCC][0][29] = -14,
[0][1][RTW89_ACMA][1][29] = 20,
@@ -53909,13 +51901,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][29] = 6,
[0][1][RTW89_THAILAND][0][29] = -40,
[0][1][RTW89_FCC][1][30] = -40,
- [0][1][RTW89_FCC][2][30] = 32,
[0][1][RTW89_ETSI][1][30] = 20,
[0][1][RTW89_ETSI][0][30] = -18,
[0][1][RTW89_MKK][1][30] = -4,
[0][1][RTW89_MKK][0][30] = -22,
[0][1][RTW89_IC][1][30] = -40,
- [0][1][RTW89_IC][2][30] = 32,
[0][1][RTW89_KCC][1][30] = -14,
[0][1][RTW89_KCC][0][30] = -14,
[0][1][RTW89_ACMA][1][30] = 20,
@@ -53928,13 +51918,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][30] = 6,
[0][1][RTW89_THAILAND][0][30] = -40,
[0][1][RTW89_FCC][1][32] = -40,
- [0][1][RTW89_FCC][2][32] = 32,
[0][1][RTW89_ETSI][1][32] = 20,
[0][1][RTW89_ETSI][0][32] = -18,
[0][1][RTW89_MKK][1][32] = -4,
[0][1][RTW89_MKK][0][32] = -22,
[0][1][RTW89_IC][1][32] = -40,
- [0][1][RTW89_IC][2][32] = 32,
[0][1][RTW89_KCC][1][32] = -14,
[0][1][RTW89_KCC][0][32] = -14,
[0][1][RTW89_ACMA][1][32] = 20,
@@ -53947,13 +51935,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][32] = 6,
[0][1][RTW89_THAILAND][0][32] = -40,
[0][1][RTW89_FCC][1][34] = -40,
- [0][1][RTW89_FCC][2][34] = 32,
[0][1][RTW89_ETSI][1][34] = 20,
[0][1][RTW89_ETSI][0][34] = -18,
[0][1][RTW89_MKK][1][34] = -4,
[0][1][RTW89_MKK][0][34] = -22,
[0][1][RTW89_IC][1][34] = -40,
- [0][1][RTW89_IC][2][34] = 32,
[0][1][RTW89_KCC][1][34] = -14,
[0][1][RTW89_KCC][0][34] = -14,
[0][1][RTW89_ACMA][1][34] = 20,
@@ -53966,13 +51952,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][34] = 6,
[0][1][RTW89_THAILAND][0][34] = -40,
[0][1][RTW89_FCC][1][36] = -40,
- [0][1][RTW89_FCC][2][36] = 32,
[0][1][RTW89_ETSI][1][36] = 20,
[0][1][RTW89_ETSI][0][36] = -18,
[0][1][RTW89_MKK][1][36] = -4,
[0][1][RTW89_MKK][0][36] = -22,
[0][1][RTW89_IC][1][36] = -40,
- [0][1][RTW89_IC][2][36] = 32,
[0][1][RTW89_KCC][1][36] = -14,
[0][1][RTW89_KCC][0][36] = -14,
[0][1][RTW89_ACMA][1][36] = 20,
@@ -53985,13 +51969,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][36] = 6,
[0][1][RTW89_THAILAND][0][36] = -40,
[0][1][RTW89_FCC][1][38] = -40,
- [0][1][RTW89_FCC][2][38] = 32,
[0][1][RTW89_ETSI][1][38] = 20,
[0][1][RTW89_ETSI][0][38] = -18,
[0][1][RTW89_MKK][1][38] = -4,
[0][1][RTW89_MKK][0][38] = -22,
[0][1][RTW89_IC][1][38] = -40,
- [0][1][RTW89_IC][2][38] = 32,
[0][1][RTW89_KCC][1][38] = -14,
[0][1][RTW89_KCC][0][38] = -14,
[0][1][RTW89_ACMA][1][38] = 20,
@@ -54004,13 +51986,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][38] = 6,
[0][1][RTW89_THAILAND][0][38] = -40,
[0][1][RTW89_FCC][1][40] = -40,
- [0][1][RTW89_FCC][2][40] = 32,
[0][1][RTW89_ETSI][1][40] = 20,
[0][1][RTW89_ETSI][0][40] = -18,
[0][1][RTW89_MKK][1][40] = -4,
[0][1][RTW89_MKK][0][40] = -22,
[0][1][RTW89_IC][1][40] = -40,
- [0][1][RTW89_IC][2][40] = 32,
[0][1][RTW89_KCC][1][40] = -14,
[0][1][RTW89_KCC][0][40] = -14,
[0][1][RTW89_ACMA][1][40] = 20,
@@ -54023,13 +52003,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][40] = 6,
[0][1][RTW89_THAILAND][0][40] = -40,
[0][1][RTW89_FCC][1][42] = -40,
- [0][1][RTW89_FCC][2][42] = 32,
[0][1][RTW89_ETSI][1][42] = 20,
[0][1][RTW89_ETSI][0][42] = -18,
[0][1][RTW89_MKK][1][42] = -4,
[0][1][RTW89_MKK][0][42] = -22,
[0][1][RTW89_IC][1][42] = -40,
- [0][1][RTW89_IC][2][42] = 32,
[0][1][RTW89_KCC][1][42] = -14,
[0][1][RTW89_KCC][0][42] = -14,
[0][1][RTW89_ACMA][1][42] = 20,
@@ -54042,13 +52020,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][42] = 6,
[0][1][RTW89_THAILAND][0][42] = -40,
[0][1][RTW89_FCC][1][44] = -40,
- [0][1][RTW89_FCC][2][44] = 32,
[0][1][RTW89_ETSI][1][44] = 20,
[0][1][RTW89_ETSI][0][44] = -18,
[0][1][RTW89_MKK][1][44] = -4,
[0][1][RTW89_MKK][0][44] = -22,
[0][1][RTW89_IC][1][44] = -40,
- [0][1][RTW89_IC][2][44] = 32,
[0][1][RTW89_KCC][1][44] = -14,
[0][1][RTW89_KCC][0][44] = -14,
[0][1][RTW89_ACMA][1][44] = 20,
@@ -54061,13 +52037,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][44] = 6,
[0][1][RTW89_THAILAND][0][44] = -40,
[0][1][RTW89_FCC][1][45] = -40,
- [0][1][RTW89_FCC][2][45] = 127,
[0][1][RTW89_ETSI][1][45] = 127,
[0][1][RTW89_ETSI][0][45] = 127,
[0][1][RTW89_MKK][1][45] = 127,
[0][1][RTW89_MKK][0][45] = 127,
[0][1][RTW89_IC][1][45] = -40,
- [0][1][RTW89_IC][2][45] = 32,
[0][1][RTW89_KCC][1][45] = -14,
[0][1][RTW89_KCC][0][45] = 127,
[0][1][RTW89_ACMA][1][45] = 127,
@@ -54080,13 +52054,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][45] = 127,
[0][1][RTW89_THAILAND][0][45] = 127,
[0][1][RTW89_FCC][1][47] = -40,
- [0][1][RTW89_FCC][2][47] = 127,
[0][1][RTW89_ETSI][1][47] = 127,
[0][1][RTW89_ETSI][0][47] = 127,
[0][1][RTW89_MKK][1][47] = 127,
[0][1][RTW89_MKK][0][47] = 127,
[0][1][RTW89_IC][1][47] = -40,
- [0][1][RTW89_IC][2][47] = 32,
[0][1][RTW89_KCC][1][47] = -14,
[0][1][RTW89_KCC][0][47] = 127,
[0][1][RTW89_ACMA][1][47] = 127,
@@ -54099,13 +52071,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][47] = 127,
[0][1][RTW89_THAILAND][0][47] = 127,
[0][1][RTW89_FCC][1][49] = -40,
- [0][1][RTW89_FCC][2][49] = 127,
[0][1][RTW89_ETSI][1][49] = 127,
[0][1][RTW89_ETSI][0][49] = 127,
[0][1][RTW89_MKK][1][49] = 127,
[0][1][RTW89_MKK][0][49] = 127,
[0][1][RTW89_IC][1][49] = -40,
- [0][1][RTW89_IC][2][49] = 32,
[0][1][RTW89_KCC][1][49] = -14,
[0][1][RTW89_KCC][0][49] = 127,
[0][1][RTW89_ACMA][1][49] = 127,
@@ -54118,13 +52088,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][49] = 127,
[0][1][RTW89_THAILAND][0][49] = 127,
[0][1][RTW89_FCC][1][51] = -40,
- [0][1][RTW89_FCC][2][51] = 127,
[0][1][RTW89_ETSI][1][51] = 127,
[0][1][RTW89_ETSI][0][51] = 127,
[0][1][RTW89_MKK][1][51] = 127,
[0][1][RTW89_MKK][0][51] = 127,
[0][1][RTW89_IC][1][51] = -40,
- [0][1][RTW89_IC][2][51] = 32,
[0][1][RTW89_KCC][1][51] = -14,
[0][1][RTW89_KCC][0][51] = 127,
[0][1][RTW89_ACMA][1][51] = 127,
@@ -54137,13 +52105,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][51] = 127,
[0][1][RTW89_THAILAND][0][51] = 127,
[0][1][RTW89_FCC][1][53] = -40,
- [0][1][RTW89_FCC][2][53] = 127,
[0][1][RTW89_ETSI][1][53] = 127,
[0][1][RTW89_ETSI][0][53] = 127,
[0][1][RTW89_MKK][1][53] = 127,
[0][1][RTW89_MKK][0][53] = 127,
[0][1][RTW89_IC][1][53] = -40,
- [0][1][RTW89_IC][2][53] = 32,
[0][1][RTW89_KCC][1][53] = -14,
[0][1][RTW89_KCC][0][53] = 127,
[0][1][RTW89_ACMA][1][53] = 127,
@@ -54156,13 +52122,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][53] = 127,
[0][1][RTW89_THAILAND][0][53] = 127,
[0][1][RTW89_FCC][1][55] = -40,
- [0][1][RTW89_FCC][2][55] = 30,
[0][1][RTW89_ETSI][1][55] = 127,
[0][1][RTW89_ETSI][0][55] = 127,
[0][1][RTW89_MKK][1][55] = 127,
[0][1][RTW89_MKK][0][55] = 127,
[0][1][RTW89_IC][1][55] = -40,
- [0][1][RTW89_IC][2][55] = 30,
[0][1][RTW89_KCC][1][55] = -14,
[0][1][RTW89_KCC][0][55] = 127,
[0][1][RTW89_ACMA][1][55] = 127,
@@ -54175,13 +52139,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][55] = 127,
[0][1][RTW89_THAILAND][0][55] = 127,
[0][1][RTW89_FCC][1][57] = -40,
- [0][1][RTW89_FCC][2][57] = 30,
[0][1][RTW89_ETSI][1][57] = 127,
[0][1][RTW89_ETSI][0][57] = 127,
[0][1][RTW89_MKK][1][57] = 127,
[0][1][RTW89_MKK][0][57] = 127,
[0][1][RTW89_IC][1][57] = -40,
- [0][1][RTW89_IC][2][57] = 30,
[0][1][RTW89_KCC][1][57] = -14,
[0][1][RTW89_KCC][0][57] = 127,
[0][1][RTW89_ACMA][1][57] = 127,
@@ -54194,13 +52156,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][57] = 127,
[0][1][RTW89_THAILAND][0][57] = 127,
[0][1][RTW89_FCC][1][59] = -40,
- [0][1][RTW89_FCC][2][59] = 30,
[0][1][RTW89_ETSI][1][59] = 127,
[0][1][RTW89_ETSI][0][59] = 127,
[0][1][RTW89_MKK][1][59] = 127,
[0][1][RTW89_MKK][0][59] = 127,
[0][1][RTW89_IC][1][59] = -40,
- [0][1][RTW89_IC][2][59] = 30,
[0][1][RTW89_KCC][1][59] = -14,
[0][1][RTW89_KCC][0][59] = 127,
[0][1][RTW89_ACMA][1][59] = 127,
@@ -54213,13 +52173,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][59] = 127,
[0][1][RTW89_THAILAND][0][59] = 127,
[0][1][RTW89_FCC][1][60] = -40,
- [0][1][RTW89_FCC][2][60] = 30,
[0][1][RTW89_ETSI][1][60] = 127,
[0][1][RTW89_ETSI][0][60] = 127,
[0][1][RTW89_MKK][1][60] = 127,
[0][1][RTW89_MKK][0][60] = 127,
[0][1][RTW89_IC][1][60] = -40,
- [0][1][RTW89_IC][2][60] = 30,
[0][1][RTW89_KCC][1][60] = -14,
[0][1][RTW89_KCC][0][60] = 127,
[0][1][RTW89_ACMA][1][60] = 127,
@@ -54232,13 +52190,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][60] = 127,
[0][1][RTW89_THAILAND][0][60] = 127,
[0][1][RTW89_FCC][1][62] = -40,
- [0][1][RTW89_FCC][2][62] = 30,
[0][1][RTW89_ETSI][1][62] = 127,
[0][1][RTW89_ETSI][0][62] = 127,
[0][1][RTW89_MKK][1][62] = 127,
[0][1][RTW89_MKK][0][62] = 127,
[0][1][RTW89_IC][1][62] = -40,
- [0][1][RTW89_IC][2][62] = 30,
[0][1][RTW89_KCC][1][62] = -14,
[0][1][RTW89_KCC][0][62] = 127,
[0][1][RTW89_ACMA][1][62] = 127,
@@ -54251,13 +52207,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][62] = 127,
[0][1][RTW89_THAILAND][0][62] = 127,
[0][1][RTW89_FCC][1][64] = -40,
- [0][1][RTW89_FCC][2][64] = 30,
[0][1][RTW89_ETSI][1][64] = 127,
[0][1][RTW89_ETSI][0][64] = 127,
[0][1][RTW89_MKK][1][64] = 127,
[0][1][RTW89_MKK][0][64] = 127,
[0][1][RTW89_IC][1][64] = -40,
- [0][1][RTW89_IC][2][64] = 30,
[0][1][RTW89_KCC][1][64] = -14,
[0][1][RTW89_KCC][0][64] = 127,
[0][1][RTW89_ACMA][1][64] = 127,
@@ -54270,13 +52224,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][64] = 127,
[0][1][RTW89_THAILAND][0][64] = 127,
[0][1][RTW89_FCC][1][66] = -40,
- [0][1][RTW89_FCC][2][66] = 30,
[0][1][RTW89_ETSI][1][66] = 127,
[0][1][RTW89_ETSI][0][66] = 127,
[0][1][RTW89_MKK][1][66] = 127,
[0][1][RTW89_MKK][0][66] = 127,
[0][1][RTW89_IC][1][66] = -40,
- [0][1][RTW89_IC][2][66] = 30,
[0][1][RTW89_KCC][1][66] = -14,
[0][1][RTW89_KCC][0][66] = 127,
[0][1][RTW89_ACMA][1][66] = 127,
@@ -54289,13 +52241,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][66] = 127,
[0][1][RTW89_THAILAND][0][66] = 127,
[0][1][RTW89_FCC][1][68] = -40,
- [0][1][RTW89_FCC][2][68] = 30,
[0][1][RTW89_ETSI][1][68] = 127,
[0][1][RTW89_ETSI][0][68] = 127,
[0][1][RTW89_MKK][1][68] = 127,
[0][1][RTW89_MKK][0][68] = 127,
[0][1][RTW89_IC][1][68] = -40,
- [0][1][RTW89_IC][2][68] = 30,
[0][1][RTW89_KCC][1][68] = -14,
[0][1][RTW89_KCC][0][68] = 127,
[0][1][RTW89_ACMA][1][68] = 127,
@@ -54308,13 +52258,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][68] = 127,
[0][1][RTW89_THAILAND][0][68] = 127,
[0][1][RTW89_FCC][1][70] = -38,
- [0][1][RTW89_FCC][2][70] = 30,
[0][1][RTW89_ETSI][1][70] = 127,
[0][1][RTW89_ETSI][0][70] = 127,
[0][1][RTW89_MKK][1][70] = 127,
[0][1][RTW89_MKK][0][70] = 127,
[0][1][RTW89_IC][1][70] = -38,
- [0][1][RTW89_IC][2][70] = 30,
[0][1][RTW89_KCC][1][70] = -14,
[0][1][RTW89_KCC][0][70] = 127,
[0][1][RTW89_ACMA][1][70] = 127,
@@ -54327,13 +52275,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][70] = 127,
[0][1][RTW89_THAILAND][0][70] = 127,
[0][1][RTW89_FCC][1][72] = -38,
- [0][1][RTW89_FCC][2][72] = 30,
[0][1][RTW89_ETSI][1][72] = 127,
[0][1][RTW89_ETSI][0][72] = 127,
[0][1][RTW89_MKK][1][72] = 127,
[0][1][RTW89_MKK][0][72] = 127,
[0][1][RTW89_IC][1][72] = -38,
- [0][1][RTW89_IC][2][72] = 30,
[0][1][RTW89_KCC][1][72] = -14,
[0][1][RTW89_KCC][0][72] = 127,
[0][1][RTW89_ACMA][1][72] = 127,
@@ -54346,13 +52292,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][72] = 127,
[0][1][RTW89_THAILAND][0][72] = 127,
[0][1][RTW89_FCC][1][74] = -38,
- [0][1][RTW89_FCC][2][74] = 30,
[0][1][RTW89_ETSI][1][74] = 127,
[0][1][RTW89_ETSI][0][74] = 127,
[0][1][RTW89_MKK][1][74] = 127,
[0][1][RTW89_MKK][0][74] = 127,
[0][1][RTW89_IC][1][74] = -38,
- [0][1][RTW89_IC][2][74] = 30,
[0][1][RTW89_KCC][1][74] = -14,
[0][1][RTW89_KCC][0][74] = 127,
[0][1][RTW89_ACMA][1][74] = 127,
@@ -54365,13 +52309,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][74] = 127,
[0][1][RTW89_THAILAND][0][74] = 127,
[0][1][RTW89_FCC][1][75] = -38,
- [0][1][RTW89_FCC][2][75] = 30,
[0][1][RTW89_ETSI][1][75] = 127,
[0][1][RTW89_ETSI][0][75] = 127,
[0][1][RTW89_MKK][1][75] = 127,
[0][1][RTW89_MKK][0][75] = 127,
[0][1][RTW89_IC][1][75] = -38,
- [0][1][RTW89_IC][2][75] = 30,
[0][1][RTW89_KCC][1][75] = -14,
[0][1][RTW89_KCC][0][75] = 127,
[0][1][RTW89_ACMA][1][75] = 127,
@@ -54384,13 +52326,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][75] = 127,
[0][1][RTW89_THAILAND][0][75] = 127,
[0][1][RTW89_FCC][1][77] = -38,
- [0][1][RTW89_FCC][2][77] = 30,
[0][1][RTW89_ETSI][1][77] = 127,
[0][1][RTW89_ETSI][0][77] = 127,
[0][1][RTW89_MKK][1][77] = 127,
[0][1][RTW89_MKK][0][77] = 127,
[0][1][RTW89_IC][1][77] = -38,
- [0][1][RTW89_IC][2][77] = 30,
[0][1][RTW89_KCC][1][77] = -14,
[0][1][RTW89_KCC][0][77] = 127,
[0][1][RTW89_ACMA][1][77] = 127,
@@ -54403,13 +52343,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][77] = 127,
[0][1][RTW89_THAILAND][0][77] = 127,
[0][1][RTW89_FCC][1][79] = -38,
- [0][1][RTW89_FCC][2][79] = 30,
[0][1][RTW89_ETSI][1][79] = 127,
[0][1][RTW89_ETSI][0][79] = 127,
[0][1][RTW89_MKK][1][79] = 127,
[0][1][RTW89_MKK][0][79] = 127,
[0][1][RTW89_IC][1][79] = -38,
- [0][1][RTW89_IC][2][79] = 30,
[0][1][RTW89_KCC][1][79] = -14,
[0][1][RTW89_KCC][0][79] = 127,
[0][1][RTW89_ACMA][1][79] = 127,
@@ -54422,13 +52360,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][79] = 127,
[0][1][RTW89_THAILAND][0][79] = 127,
[0][1][RTW89_FCC][1][81] = -38,
- [0][1][RTW89_FCC][2][81] = 30,
[0][1][RTW89_ETSI][1][81] = 127,
[0][1][RTW89_ETSI][0][81] = 127,
[0][1][RTW89_MKK][1][81] = 127,
[0][1][RTW89_MKK][0][81] = 127,
[0][1][RTW89_IC][1][81] = -38,
- [0][1][RTW89_IC][2][81] = 30,
[0][1][RTW89_KCC][1][81] = -14,
[0][1][RTW89_KCC][0][81] = 127,
[0][1][RTW89_ACMA][1][81] = 127,
@@ -54441,13 +52377,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][81] = 127,
[0][1][RTW89_THAILAND][0][81] = 127,
[0][1][RTW89_FCC][1][83] = -38,
- [0][1][RTW89_FCC][2][83] = 30,
[0][1][RTW89_ETSI][1][83] = 127,
[0][1][RTW89_ETSI][0][83] = 127,
[0][1][RTW89_MKK][1][83] = 127,
[0][1][RTW89_MKK][0][83] = 127,
[0][1][RTW89_IC][1][83] = -38,
- [0][1][RTW89_IC][2][83] = 30,
[0][1][RTW89_KCC][1][83] = -14,
[0][1][RTW89_KCC][0][83] = 127,
[0][1][RTW89_ACMA][1][83] = 127,
@@ -54460,13 +52394,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][83] = 127,
[0][1][RTW89_THAILAND][0][83] = 127,
[0][1][RTW89_FCC][1][85] = -38,
- [0][1][RTW89_FCC][2][85] = 30,
[0][1][RTW89_ETSI][1][85] = 127,
[0][1][RTW89_ETSI][0][85] = 127,
[0][1][RTW89_MKK][1][85] = 127,
[0][1][RTW89_MKK][0][85] = 127,
[0][1][RTW89_IC][1][85] = -38,
- [0][1][RTW89_IC][2][85] = 30,
[0][1][RTW89_KCC][1][85] = -14,
[0][1][RTW89_KCC][0][85] = 127,
[0][1][RTW89_ACMA][1][85] = 127,
@@ -54479,13 +52411,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][85] = 127,
[0][1][RTW89_THAILAND][0][85] = 127,
[0][1][RTW89_FCC][1][87] = -40,
- [0][1][RTW89_FCC][2][87] = 127,
[0][1][RTW89_ETSI][1][87] = 127,
[0][1][RTW89_ETSI][0][87] = 127,
[0][1][RTW89_MKK][1][87] = 127,
[0][1][RTW89_MKK][0][87] = 127,
[0][1][RTW89_IC][1][87] = -40,
- [0][1][RTW89_IC][2][87] = 127,
[0][1][RTW89_KCC][1][87] = -14,
[0][1][RTW89_KCC][0][87] = 127,
[0][1][RTW89_ACMA][1][87] = 127,
@@ -54498,13 +52428,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][87] = 127,
[0][1][RTW89_THAILAND][0][87] = 127,
[0][1][RTW89_FCC][1][89] = -38,
- [0][1][RTW89_FCC][2][89] = 127,
[0][1][RTW89_ETSI][1][89] = 127,
[0][1][RTW89_ETSI][0][89] = 127,
[0][1][RTW89_MKK][1][89] = 127,
[0][1][RTW89_MKK][0][89] = 127,
[0][1][RTW89_IC][1][89] = -38,
- [0][1][RTW89_IC][2][89] = 127,
[0][1][RTW89_KCC][1][89] = -14,
[0][1][RTW89_KCC][0][89] = 127,
[0][1][RTW89_ACMA][1][89] = 127,
@@ -54517,13 +52445,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][89] = 127,
[0][1][RTW89_THAILAND][0][89] = 127,
[0][1][RTW89_FCC][1][90] = -38,
- [0][1][RTW89_FCC][2][90] = 127,
[0][1][RTW89_ETSI][1][90] = 127,
[0][1][RTW89_ETSI][0][90] = 127,
[0][1][RTW89_MKK][1][90] = 127,
[0][1][RTW89_MKK][0][90] = 127,
[0][1][RTW89_IC][1][90] = -38,
- [0][1][RTW89_IC][2][90] = 127,
[0][1][RTW89_KCC][1][90] = -14,
[0][1][RTW89_KCC][0][90] = 127,
[0][1][RTW89_ACMA][1][90] = 127,
@@ -54536,13 +52462,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][90] = 127,
[0][1][RTW89_THAILAND][0][90] = 127,
[0][1][RTW89_FCC][1][92] = -38,
- [0][1][RTW89_FCC][2][92] = 127,
[0][1][RTW89_ETSI][1][92] = 127,
[0][1][RTW89_ETSI][0][92] = 127,
[0][1][RTW89_MKK][1][92] = 127,
[0][1][RTW89_MKK][0][92] = 127,
[0][1][RTW89_IC][1][92] = -38,
- [0][1][RTW89_IC][2][92] = 127,
[0][1][RTW89_KCC][1][92] = -14,
[0][1][RTW89_KCC][0][92] = 127,
[0][1][RTW89_ACMA][1][92] = 127,
@@ -54555,13 +52479,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][92] = 127,
[0][1][RTW89_THAILAND][0][92] = 127,
[0][1][RTW89_FCC][1][94] = -38,
- [0][1][RTW89_FCC][2][94] = 127,
[0][1][RTW89_ETSI][1][94] = 127,
[0][1][RTW89_ETSI][0][94] = 127,
[0][1][RTW89_MKK][1][94] = 127,
[0][1][RTW89_MKK][0][94] = 127,
[0][1][RTW89_IC][1][94] = -38,
- [0][1][RTW89_IC][2][94] = 127,
[0][1][RTW89_KCC][1][94] = -14,
[0][1][RTW89_KCC][0][94] = 127,
[0][1][RTW89_ACMA][1][94] = 127,
@@ -54574,13 +52496,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][94] = 127,
[0][1][RTW89_THAILAND][0][94] = 127,
[0][1][RTW89_FCC][1][96] = -38,
- [0][1][RTW89_FCC][2][96] = 127,
[0][1][RTW89_ETSI][1][96] = 127,
[0][1][RTW89_ETSI][0][96] = 127,
[0][1][RTW89_MKK][1][96] = 127,
[0][1][RTW89_MKK][0][96] = 127,
[0][1][RTW89_IC][1][96] = -38,
- [0][1][RTW89_IC][2][96] = 127,
[0][1][RTW89_KCC][1][96] = -14,
[0][1][RTW89_KCC][0][96] = 127,
[0][1][RTW89_ACMA][1][96] = 127,
@@ -54593,13 +52513,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][96] = 127,
[0][1][RTW89_THAILAND][0][96] = 127,
[0][1][RTW89_FCC][1][98] = -38,
- [0][1][RTW89_FCC][2][98] = 127,
[0][1][RTW89_ETSI][1][98] = 127,
[0][1][RTW89_ETSI][0][98] = 127,
[0][1][RTW89_MKK][1][98] = 127,
[0][1][RTW89_MKK][0][98] = 127,
[0][1][RTW89_IC][1][98] = -38,
- [0][1][RTW89_IC][2][98] = 127,
[0][1][RTW89_KCC][1][98] = -14,
[0][1][RTW89_KCC][0][98] = 127,
[0][1][RTW89_ACMA][1][98] = 127,
@@ -54612,13 +52530,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][98] = 127,
[0][1][RTW89_THAILAND][0][98] = 127,
[0][1][RTW89_FCC][1][100] = -38,
- [0][1][RTW89_FCC][2][100] = 127,
[0][1][RTW89_ETSI][1][100] = 127,
[0][1][RTW89_ETSI][0][100] = 127,
[0][1][RTW89_MKK][1][100] = 127,
[0][1][RTW89_MKK][0][100] = 127,
[0][1][RTW89_IC][1][100] = -38,
- [0][1][RTW89_IC][2][100] = 127,
[0][1][RTW89_KCC][1][100] = -14,
[0][1][RTW89_KCC][0][100] = 127,
[0][1][RTW89_ACMA][1][100] = 127,
@@ -54631,13 +52547,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][100] = 127,
[0][1][RTW89_THAILAND][0][100] = 127,
[0][1][RTW89_FCC][1][102] = -38,
- [0][1][RTW89_FCC][2][102] = 127,
[0][1][RTW89_ETSI][1][102] = 127,
[0][1][RTW89_ETSI][0][102] = 127,
[0][1][RTW89_MKK][1][102] = 127,
[0][1][RTW89_MKK][0][102] = 127,
[0][1][RTW89_IC][1][102] = -38,
- [0][1][RTW89_IC][2][102] = 127,
[0][1][RTW89_KCC][1][102] = -14,
[0][1][RTW89_KCC][0][102] = 127,
[0][1][RTW89_ACMA][1][102] = 127,
@@ -54650,13 +52564,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][102] = 127,
[0][1][RTW89_THAILAND][0][102] = 127,
[0][1][RTW89_FCC][1][104] = -38,
- [0][1][RTW89_FCC][2][104] = 127,
[0][1][RTW89_ETSI][1][104] = 127,
[0][1][RTW89_ETSI][0][104] = 127,
[0][1][RTW89_MKK][1][104] = 127,
[0][1][RTW89_MKK][0][104] = 127,
[0][1][RTW89_IC][1][104] = -38,
- [0][1][RTW89_IC][2][104] = 127,
[0][1][RTW89_KCC][1][104] = -14,
[0][1][RTW89_KCC][0][104] = 127,
[0][1][RTW89_ACMA][1][104] = 127,
@@ -54669,13 +52581,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][104] = 127,
[0][1][RTW89_THAILAND][0][104] = 127,
[0][1][RTW89_FCC][1][105] = -38,
- [0][1][RTW89_FCC][2][105] = 127,
[0][1][RTW89_ETSI][1][105] = 127,
[0][1][RTW89_ETSI][0][105] = 127,
[0][1][RTW89_MKK][1][105] = 127,
[0][1][RTW89_MKK][0][105] = 127,
[0][1][RTW89_IC][1][105] = -38,
- [0][1][RTW89_IC][2][105] = 127,
[0][1][RTW89_KCC][1][105] = -14,
[0][1][RTW89_KCC][0][105] = 127,
[0][1][RTW89_ACMA][1][105] = 127,
@@ -54688,13 +52598,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][105] = 127,
[0][1][RTW89_THAILAND][0][105] = 127,
[0][1][RTW89_FCC][1][107] = -34,
- [0][1][RTW89_FCC][2][107] = 127,
[0][1][RTW89_ETSI][1][107] = 127,
[0][1][RTW89_ETSI][0][107] = 127,
[0][1][RTW89_MKK][1][107] = 127,
[0][1][RTW89_MKK][0][107] = 127,
[0][1][RTW89_IC][1][107] = -34,
- [0][1][RTW89_IC][2][107] = 127,
[0][1][RTW89_KCC][1][107] = -14,
[0][1][RTW89_KCC][0][107] = 127,
[0][1][RTW89_ACMA][1][107] = 127,
@@ -54707,13 +52615,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][107] = 127,
[0][1][RTW89_THAILAND][0][107] = 127,
[0][1][RTW89_FCC][1][109] = -34,
- [0][1][RTW89_FCC][2][109] = 127,
[0][1][RTW89_ETSI][1][109] = 127,
[0][1][RTW89_ETSI][0][109] = 127,
[0][1][RTW89_MKK][1][109] = 127,
[0][1][RTW89_MKK][0][109] = 127,
[0][1][RTW89_IC][1][109] = -34,
- [0][1][RTW89_IC][2][109] = 127,
[0][1][RTW89_KCC][1][109] = 127,
[0][1][RTW89_KCC][0][109] = 127,
[0][1][RTW89_ACMA][1][109] = 127,
@@ -54726,13 +52632,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][109] = 127,
[0][1][RTW89_THAILAND][0][109] = 127,
[0][1][RTW89_FCC][1][111] = 127,
- [0][1][RTW89_FCC][2][111] = 127,
[0][1][RTW89_ETSI][1][111] = 127,
[0][1][RTW89_ETSI][0][111] = 127,
[0][1][RTW89_MKK][1][111] = 127,
[0][1][RTW89_MKK][0][111] = 127,
[0][1][RTW89_IC][1][111] = 127,
- [0][1][RTW89_IC][2][111] = 127,
[0][1][RTW89_KCC][1][111] = 127,
[0][1][RTW89_KCC][0][111] = 127,
[0][1][RTW89_ACMA][1][111] = 127,
@@ -54745,13 +52649,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][111] = 127,
[0][1][RTW89_THAILAND][0][111] = 127,
[0][1][RTW89_FCC][1][113] = 127,
- [0][1][RTW89_FCC][2][113] = 127,
[0][1][RTW89_ETSI][1][113] = 127,
[0][1][RTW89_ETSI][0][113] = 127,
[0][1][RTW89_MKK][1][113] = 127,
[0][1][RTW89_MKK][0][113] = 127,
[0][1][RTW89_IC][1][113] = 127,
- [0][1][RTW89_IC][2][113] = 127,
[0][1][RTW89_KCC][1][113] = 127,
[0][1][RTW89_KCC][0][113] = 127,
[0][1][RTW89_ACMA][1][113] = 127,
@@ -54764,13 +52666,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][113] = 127,
[0][1][RTW89_THAILAND][0][113] = 127,
[0][1][RTW89_FCC][1][115] = 127,
- [0][1][RTW89_FCC][2][115] = 127,
[0][1][RTW89_ETSI][1][115] = 127,
[0][1][RTW89_ETSI][0][115] = 127,
[0][1][RTW89_MKK][1][115] = 127,
[0][1][RTW89_MKK][0][115] = 127,
[0][1][RTW89_IC][1][115] = 127,
- [0][1][RTW89_IC][2][115] = 127,
[0][1][RTW89_KCC][1][115] = 127,
[0][1][RTW89_KCC][0][115] = 127,
[0][1][RTW89_ACMA][1][115] = 127,
@@ -54783,13 +52683,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][115] = 127,
[0][1][RTW89_THAILAND][0][115] = 127,
[0][1][RTW89_FCC][1][117] = 127,
- [0][1][RTW89_FCC][2][117] = 127,
[0][1][RTW89_ETSI][1][117] = 127,
[0][1][RTW89_ETSI][0][117] = 127,
[0][1][RTW89_MKK][1][117] = 127,
[0][1][RTW89_MKK][0][117] = 127,
[0][1][RTW89_IC][1][117] = 127,
- [0][1][RTW89_IC][2][117] = 127,
[0][1][RTW89_KCC][1][117] = 127,
[0][1][RTW89_KCC][0][117] = 127,
[0][1][RTW89_ACMA][1][117] = 127,
@@ -54802,13 +52700,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][117] = 127,
[0][1][RTW89_THAILAND][0][117] = 127,
[0][1][RTW89_FCC][1][119] = 127,
- [0][1][RTW89_FCC][2][119] = 127,
[0][1][RTW89_ETSI][1][119] = 127,
[0][1][RTW89_ETSI][0][119] = 127,
[0][1][RTW89_MKK][1][119] = 127,
[0][1][RTW89_MKK][0][119] = 127,
[0][1][RTW89_IC][1][119] = 127,
- [0][1][RTW89_IC][2][119] = 127,
[0][1][RTW89_KCC][1][119] = 127,
[0][1][RTW89_KCC][0][119] = 127,
[0][1][RTW89_ACMA][1][119] = 127,
@@ -54821,13 +52717,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][119] = 127,
[0][1][RTW89_THAILAND][0][119] = 127,
[1][0][RTW89_FCC][1][0] = -4,
- [1][0][RTW89_FCC][2][0] = 52,
[1][0][RTW89_ETSI][1][0] = 46,
[1][0][RTW89_ETSI][0][0] = 6,
[1][0][RTW89_MKK][1][0] = 42,
[1][0][RTW89_MKK][0][0] = 2,
[1][0][RTW89_IC][1][0] = -4,
- [1][0][RTW89_IC][2][0] = 52,
[1][0][RTW89_KCC][1][0] = -2,
[1][0][RTW89_KCC][0][0] = -2,
[1][0][RTW89_ACMA][1][0] = 46,
@@ -54840,13 +52734,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][0] = 42,
[1][0][RTW89_THAILAND][0][0] = -4,
[1][0][RTW89_FCC][1][2] = -4,
- [1][0][RTW89_FCC][2][2] = 52,
[1][0][RTW89_ETSI][1][2] = 46,
[1][0][RTW89_ETSI][0][2] = 6,
[1][0][RTW89_MKK][1][2] = 42,
[1][0][RTW89_MKK][0][2] = 2,
[1][0][RTW89_IC][1][2] = -4,
- [1][0][RTW89_IC][2][2] = 52,
[1][0][RTW89_KCC][1][2] = -2,
[1][0][RTW89_KCC][0][2] = -2,
[1][0][RTW89_ACMA][1][2] = 46,
@@ -54859,13 +52751,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][2] = 42,
[1][0][RTW89_THAILAND][0][2] = -4,
[1][0][RTW89_FCC][1][4] = -4,
- [1][0][RTW89_FCC][2][4] = 52,
[1][0][RTW89_ETSI][1][4] = 46,
[1][0][RTW89_ETSI][0][4] = 6,
[1][0][RTW89_MKK][1][4] = 42,
[1][0][RTW89_MKK][0][4] = 2,
[1][0][RTW89_IC][1][4] = -4,
- [1][0][RTW89_IC][2][4] = 52,
[1][0][RTW89_KCC][1][4] = -2,
[1][0][RTW89_KCC][0][4] = -2,
[1][0][RTW89_ACMA][1][4] = 46,
@@ -54878,13 +52768,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][4] = 42,
[1][0][RTW89_THAILAND][0][4] = -4,
[1][0][RTW89_FCC][1][6] = -4,
- [1][0][RTW89_FCC][2][6] = 52,
[1][0][RTW89_ETSI][1][6] = 46,
[1][0][RTW89_ETSI][0][6] = 6,
[1][0][RTW89_MKK][1][6] = 42,
[1][0][RTW89_MKK][0][6] = 2,
[1][0][RTW89_IC][1][6] = -4,
- [1][0][RTW89_IC][2][6] = 52,
[1][0][RTW89_KCC][1][6] = -2,
[1][0][RTW89_KCC][0][6] = -2,
[1][0][RTW89_ACMA][1][6] = 46,
@@ -54897,13 +52785,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][6] = 42,
[1][0][RTW89_THAILAND][0][6] = -4,
[1][0][RTW89_FCC][1][8] = -4,
- [1][0][RTW89_FCC][2][8] = 52,
[1][0][RTW89_ETSI][1][8] = 46,
[1][0][RTW89_ETSI][0][8] = 6,
[1][0][RTW89_MKK][1][8] = 42,
[1][0][RTW89_MKK][0][8] = 2,
[1][0][RTW89_IC][1][8] = -4,
- [1][0][RTW89_IC][2][8] = 52,
[1][0][RTW89_KCC][1][8] = -2,
[1][0][RTW89_KCC][0][8] = -2,
[1][0][RTW89_ACMA][1][8] = 46,
@@ -54916,13 +52802,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][8] = 42,
[1][0][RTW89_THAILAND][0][8] = -4,
[1][0][RTW89_FCC][1][10] = -4,
- [1][0][RTW89_FCC][2][10] = 52,
[1][0][RTW89_ETSI][1][10] = 46,
[1][0][RTW89_ETSI][0][10] = 6,
[1][0][RTW89_MKK][1][10] = 42,
[1][0][RTW89_MKK][0][10] = 2,
[1][0][RTW89_IC][1][10] = -4,
- [1][0][RTW89_IC][2][10] = 52,
[1][0][RTW89_KCC][1][10] = -2,
[1][0][RTW89_KCC][0][10] = -2,
[1][0][RTW89_ACMA][1][10] = 46,
@@ -54935,13 +52819,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][10] = 42,
[1][0][RTW89_THAILAND][0][10] = -4,
[1][0][RTW89_FCC][1][12] = -4,
- [1][0][RTW89_FCC][2][12] = 52,
[1][0][RTW89_ETSI][1][12] = 46,
[1][0][RTW89_ETSI][0][12] = 6,
[1][0][RTW89_MKK][1][12] = 42,
[1][0][RTW89_MKK][0][12] = 2,
[1][0][RTW89_IC][1][12] = -4,
- [1][0][RTW89_IC][2][12] = 52,
[1][0][RTW89_KCC][1][12] = -2,
[1][0][RTW89_KCC][0][12] = -2,
[1][0][RTW89_ACMA][1][12] = 46,
@@ -54954,13 +52836,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][12] = 42,
[1][0][RTW89_THAILAND][0][12] = -4,
[1][0][RTW89_FCC][1][14] = -4,
- [1][0][RTW89_FCC][2][14] = 52,
[1][0][RTW89_ETSI][1][14] = 46,
[1][0][RTW89_ETSI][0][14] = 6,
[1][0][RTW89_MKK][1][14] = 42,
[1][0][RTW89_MKK][0][14] = 2,
[1][0][RTW89_IC][1][14] = -4,
- [1][0][RTW89_IC][2][14] = 52,
[1][0][RTW89_KCC][1][14] = -2,
[1][0][RTW89_KCC][0][14] = -2,
[1][0][RTW89_ACMA][1][14] = 46,
@@ -54973,13 +52853,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][14] = 42,
[1][0][RTW89_THAILAND][0][14] = -4,
[1][0][RTW89_FCC][1][15] = -4,
- [1][0][RTW89_FCC][2][15] = 52,
[1][0][RTW89_ETSI][1][15] = 46,
[1][0][RTW89_ETSI][0][15] = 6,
[1][0][RTW89_MKK][1][15] = 42,
[1][0][RTW89_MKK][0][15] = 2,
[1][0][RTW89_IC][1][15] = -4,
- [1][0][RTW89_IC][2][15] = 52,
[1][0][RTW89_KCC][1][15] = -2,
[1][0][RTW89_KCC][0][15] = -2,
[1][0][RTW89_ACMA][1][15] = 46,
@@ -54992,13 +52870,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][15] = 42,
[1][0][RTW89_THAILAND][0][15] = -4,
[1][0][RTW89_FCC][1][17] = -4,
- [1][0][RTW89_FCC][2][17] = 52,
[1][0][RTW89_ETSI][1][17] = 46,
[1][0][RTW89_ETSI][0][17] = 6,
[1][0][RTW89_MKK][1][17] = 42,
[1][0][RTW89_MKK][0][17] = 2,
[1][0][RTW89_IC][1][17] = -4,
- [1][0][RTW89_IC][2][17] = 52,
[1][0][RTW89_KCC][1][17] = -2,
[1][0][RTW89_KCC][0][17] = -2,
[1][0][RTW89_ACMA][1][17] = 46,
@@ -55011,13 +52887,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][17] = 42,
[1][0][RTW89_THAILAND][0][17] = -4,
[1][0][RTW89_FCC][1][19] = -4,
- [1][0][RTW89_FCC][2][19] = 52,
[1][0][RTW89_ETSI][1][19] = 46,
[1][0][RTW89_ETSI][0][19] = 6,
[1][0][RTW89_MKK][1][19] = 42,
[1][0][RTW89_MKK][0][19] = 2,
[1][0][RTW89_IC][1][19] = -4,
- [1][0][RTW89_IC][2][19] = 52,
[1][0][RTW89_KCC][1][19] = -2,
[1][0][RTW89_KCC][0][19] = -2,
[1][0][RTW89_ACMA][1][19] = 46,
@@ -55030,13 +52904,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][19] = 42,
[1][0][RTW89_THAILAND][0][19] = -4,
[1][0][RTW89_FCC][1][21] = -4,
- [1][0][RTW89_FCC][2][21] = 52,
[1][0][RTW89_ETSI][1][21] = 46,
[1][0][RTW89_ETSI][0][21] = 6,
[1][0][RTW89_MKK][1][21] = 42,
[1][0][RTW89_MKK][0][21] = 2,
[1][0][RTW89_IC][1][21] = -4,
- [1][0][RTW89_IC][2][21] = 52,
[1][0][RTW89_KCC][1][21] = -2,
[1][0][RTW89_KCC][0][21] = -2,
[1][0][RTW89_ACMA][1][21] = 46,
@@ -55049,13 +52921,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][21] = 42,
[1][0][RTW89_THAILAND][0][21] = -4,
[1][0][RTW89_FCC][1][23] = -4,
- [1][0][RTW89_FCC][2][23] = 66,
[1][0][RTW89_ETSI][1][23] = 46,
[1][0][RTW89_ETSI][0][23] = 6,
[1][0][RTW89_MKK][1][23] = 42,
[1][0][RTW89_MKK][0][23] = 2,
[1][0][RTW89_IC][1][23] = -4,
- [1][0][RTW89_IC][2][23] = 66,
[1][0][RTW89_KCC][1][23] = -2,
[1][0][RTW89_KCC][0][23] = -2,
[1][0][RTW89_ACMA][1][23] = 46,
@@ -55068,13 +52938,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][23] = 42,
[1][0][RTW89_THAILAND][0][23] = -4,
[1][0][RTW89_FCC][1][25] = -4,
- [1][0][RTW89_FCC][2][25] = 66,
[1][0][RTW89_ETSI][1][25] = 46,
[1][0][RTW89_ETSI][0][25] = 6,
[1][0][RTW89_MKK][1][25] = 42,
[1][0][RTW89_MKK][0][25] = 2,
[1][0][RTW89_IC][1][25] = -4,
- [1][0][RTW89_IC][2][25] = 66,
[1][0][RTW89_KCC][1][25] = -2,
[1][0][RTW89_KCC][0][25] = -2,
[1][0][RTW89_ACMA][1][25] = 46,
@@ -55087,13 +52955,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][25] = 42,
[1][0][RTW89_THAILAND][0][25] = -4,
[1][0][RTW89_FCC][1][27] = -4,
- [1][0][RTW89_FCC][2][27] = 66,
[1][0][RTW89_ETSI][1][27] = 46,
[1][0][RTW89_ETSI][0][27] = 6,
[1][0][RTW89_MKK][1][27] = 42,
[1][0][RTW89_MKK][0][27] = 2,
[1][0][RTW89_IC][1][27] = -4,
- [1][0][RTW89_IC][2][27] = 66,
[1][0][RTW89_KCC][1][27] = -2,
[1][0][RTW89_KCC][0][27] = -2,
[1][0][RTW89_ACMA][1][27] = 46,
@@ -55106,13 +52972,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][27] = 42,
[1][0][RTW89_THAILAND][0][27] = -4,
[1][0][RTW89_FCC][1][29] = -4,
- [1][0][RTW89_FCC][2][29] = 66,
[1][0][RTW89_ETSI][1][29] = 46,
[1][0][RTW89_ETSI][0][29] = 6,
[1][0][RTW89_MKK][1][29] = 42,
[1][0][RTW89_MKK][0][29] = 2,
[1][0][RTW89_IC][1][29] = -4,
- [1][0][RTW89_IC][2][29] = 66,
[1][0][RTW89_KCC][1][29] = -2,
[1][0][RTW89_KCC][0][29] = -2,
[1][0][RTW89_ACMA][1][29] = 46,
@@ -55125,13 +52989,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][29] = 42,
[1][0][RTW89_THAILAND][0][29] = -4,
[1][0][RTW89_FCC][1][30] = -4,
- [1][0][RTW89_FCC][2][30] = 66,
[1][0][RTW89_ETSI][1][30] = 46,
[1][0][RTW89_ETSI][0][30] = 6,
[1][0][RTW89_MKK][1][30] = 42,
[1][0][RTW89_MKK][0][30] = 2,
[1][0][RTW89_IC][1][30] = -4,
- [1][0][RTW89_IC][2][30] = 66,
[1][0][RTW89_KCC][1][30] = -2,
[1][0][RTW89_KCC][0][30] = -2,
[1][0][RTW89_ACMA][1][30] = 46,
@@ -55144,13 +53006,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][30] = 42,
[1][0][RTW89_THAILAND][0][30] = -4,
[1][0][RTW89_FCC][1][32] = -4,
- [1][0][RTW89_FCC][2][32] = 66,
[1][0][RTW89_ETSI][1][32] = 46,
[1][0][RTW89_ETSI][0][32] = 6,
[1][0][RTW89_MKK][1][32] = 42,
[1][0][RTW89_MKK][0][32] = 2,
[1][0][RTW89_IC][1][32] = -4,
- [1][0][RTW89_IC][2][32] = 66,
[1][0][RTW89_KCC][1][32] = -2,
[1][0][RTW89_KCC][0][32] = -2,
[1][0][RTW89_ACMA][1][32] = 46,
@@ -55163,13 +53023,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][32] = 42,
[1][0][RTW89_THAILAND][0][32] = -4,
[1][0][RTW89_FCC][1][34] = -4,
- [1][0][RTW89_FCC][2][34] = 66,
[1][0][RTW89_ETSI][1][34] = 46,
[1][0][RTW89_ETSI][0][34] = 6,
[1][0][RTW89_MKK][1][34] = 42,
[1][0][RTW89_MKK][0][34] = 2,
[1][0][RTW89_IC][1][34] = -4,
- [1][0][RTW89_IC][2][34] = 66,
[1][0][RTW89_KCC][1][34] = -2,
[1][0][RTW89_KCC][0][34] = -2,
[1][0][RTW89_ACMA][1][34] = 46,
@@ -55182,13 +53040,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][34] = 42,
[1][0][RTW89_THAILAND][0][34] = -4,
[1][0][RTW89_FCC][1][36] = -4,
- [1][0][RTW89_FCC][2][36] = 66,
[1][0][RTW89_ETSI][1][36] = 46,
[1][0][RTW89_ETSI][0][36] = 6,
[1][0][RTW89_MKK][1][36] = 42,
[1][0][RTW89_MKK][0][36] = 2,
[1][0][RTW89_IC][1][36] = -4,
- [1][0][RTW89_IC][2][36] = 66,
[1][0][RTW89_KCC][1][36] = -2,
[1][0][RTW89_KCC][0][36] = -2,
[1][0][RTW89_ACMA][1][36] = 46,
@@ -55201,13 +53057,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][36] = 42,
[1][0][RTW89_THAILAND][0][36] = -4,
[1][0][RTW89_FCC][1][38] = -4,
- [1][0][RTW89_FCC][2][38] = 66,
[1][0][RTW89_ETSI][1][38] = 46,
[1][0][RTW89_ETSI][0][38] = 6,
[1][0][RTW89_MKK][1][38] = 42,
[1][0][RTW89_MKK][0][38] = 2,
[1][0][RTW89_IC][1][38] = -4,
- [1][0][RTW89_IC][2][38] = 66,
[1][0][RTW89_KCC][1][38] = -2,
[1][0][RTW89_KCC][0][38] = -2,
[1][0][RTW89_ACMA][1][38] = 46,
@@ -55220,13 +53074,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][38] = 42,
[1][0][RTW89_THAILAND][0][38] = -4,
[1][0][RTW89_FCC][1][40] = -4,
- [1][0][RTW89_FCC][2][40] = 66,
[1][0][RTW89_ETSI][1][40] = 46,
[1][0][RTW89_ETSI][0][40] = 6,
[1][0][RTW89_MKK][1][40] = 42,
[1][0][RTW89_MKK][0][40] = 2,
[1][0][RTW89_IC][1][40] = -4,
- [1][0][RTW89_IC][2][40] = 66,
[1][0][RTW89_KCC][1][40] = -2,
[1][0][RTW89_KCC][0][40] = -2,
[1][0][RTW89_ACMA][1][40] = 46,
@@ -55239,13 +53091,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][40] = 42,
[1][0][RTW89_THAILAND][0][40] = -4,
[1][0][RTW89_FCC][1][42] = -4,
- [1][0][RTW89_FCC][2][42] = 66,
[1][0][RTW89_ETSI][1][42] = 46,
[1][0][RTW89_ETSI][0][42] = 6,
[1][0][RTW89_MKK][1][42] = 42,
[1][0][RTW89_MKK][0][42] = 2,
[1][0][RTW89_IC][1][42] = -4,
- [1][0][RTW89_IC][2][42] = 66,
[1][0][RTW89_KCC][1][42] = -2,
[1][0][RTW89_KCC][0][42] = -2,
[1][0][RTW89_ACMA][1][42] = 46,
@@ -55258,13 +53108,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][42] = 42,
[1][0][RTW89_THAILAND][0][42] = -4,
[1][0][RTW89_FCC][1][44] = -4,
- [1][0][RTW89_FCC][2][44] = 66,
[1][0][RTW89_ETSI][1][44] = 46,
[1][0][RTW89_ETSI][0][44] = 8,
[1][0][RTW89_MKK][1][44] = 22,
[1][0][RTW89_MKK][0][44] = 4,
[1][0][RTW89_IC][1][44] = -4,
- [1][0][RTW89_IC][2][44] = 66,
[1][0][RTW89_KCC][1][44] = -2,
[1][0][RTW89_KCC][0][44] = -2,
[1][0][RTW89_ACMA][1][44] = 46,
@@ -55277,13 +53125,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][44] = 42,
[1][0][RTW89_THAILAND][0][44] = -4,
[1][0][RTW89_FCC][1][45] = -4,
- [1][0][RTW89_FCC][2][45] = 127,
[1][0][RTW89_ETSI][1][45] = 127,
[1][0][RTW89_ETSI][0][45] = 127,
[1][0][RTW89_MKK][1][45] = 127,
[1][0][RTW89_MKK][0][45] = 127,
[1][0][RTW89_IC][1][45] = -4,
- [1][0][RTW89_IC][2][45] = 68,
[1][0][RTW89_KCC][1][45] = -2,
[1][0][RTW89_KCC][0][45] = 127,
[1][0][RTW89_ACMA][1][45] = 127,
@@ -55296,13 +53142,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][45] = 127,
[1][0][RTW89_THAILAND][0][45] = 127,
[1][0][RTW89_FCC][1][47] = -4,
- [1][0][RTW89_FCC][2][47] = 127,
[1][0][RTW89_ETSI][1][47] = 127,
[1][0][RTW89_ETSI][0][47] = 127,
[1][0][RTW89_MKK][1][47] = 127,
[1][0][RTW89_MKK][0][47] = 127,
[1][0][RTW89_IC][1][47] = -4,
- [1][0][RTW89_IC][2][47] = 68,
[1][0][RTW89_KCC][1][47] = -2,
[1][0][RTW89_KCC][0][47] = 127,
[1][0][RTW89_ACMA][1][47] = 127,
@@ -55315,13 +53159,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][47] = 127,
[1][0][RTW89_THAILAND][0][47] = 127,
[1][0][RTW89_FCC][1][49] = -4,
- [1][0][RTW89_FCC][2][49] = 127,
[1][0][RTW89_ETSI][1][49] = 127,
[1][0][RTW89_ETSI][0][49] = 127,
[1][0][RTW89_MKK][1][49] = 127,
[1][0][RTW89_MKK][0][49] = 127,
[1][0][RTW89_IC][1][49] = -4,
- [1][0][RTW89_IC][2][49] = 68,
[1][0][RTW89_KCC][1][49] = -2,
[1][0][RTW89_KCC][0][49] = 127,
[1][0][RTW89_ACMA][1][49] = 127,
@@ -55334,13 +53176,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][49] = 127,
[1][0][RTW89_THAILAND][0][49] = 127,
[1][0][RTW89_FCC][1][51] = -4,
- [1][0][RTW89_FCC][2][51] = 127,
[1][0][RTW89_ETSI][1][51] = 127,
[1][0][RTW89_ETSI][0][51] = 127,
[1][0][RTW89_MKK][1][51] = 127,
[1][0][RTW89_MKK][0][51] = 127,
[1][0][RTW89_IC][1][51] = -4,
- [1][0][RTW89_IC][2][51] = 68,
[1][0][RTW89_KCC][1][51] = -2,
[1][0][RTW89_KCC][0][51] = 127,
[1][0][RTW89_ACMA][1][51] = 127,
@@ -55353,13 +53193,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][51] = 127,
[1][0][RTW89_THAILAND][0][51] = 127,
[1][0][RTW89_FCC][1][53] = -4,
- [1][0][RTW89_FCC][2][53] = 127,
[1][0][RTW89_ETSI][1][53] = 127,
[1][0][RTW89_ETSI][0][53] = 127,
[1][0][RTW89_MKK][1][53] = 127,
[1][0][RTW89_MKK][0][53] = 127,
[1][0][RTW89_IC][1][53] = -4,
- [1][0][RTW89_IC][2][53] = 68,
[1][0][RTW89_KCC][1][53] = -2,
[1][0][RTW89_KCC][0][53] = 127,
[1][0][RTW89_ACMA][1][53] = 127,
@@ -55372,13 +53210,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][53] = 127,
[1][0][RTW89_THAILAND][0][53] = 127,
[1][0][RTW89_FCC][1][55] = -4,
- [1][0][RTW89_FCC][2][55] = 68,
[1][0][RTW89_ETSI][1][55] = 127,
[1][0][RTW89_ETSI][0][55] = 127,
[1][0][RTW89_MKK][1][55] = 127,
[1][0][RTW89_MKK][0][55] = 127,
[1][0][RTW89_IC][1][55] = -4,
- [1][0][RTW89_IC][2][55] = 68,
[1][0][RTW89_KCC][1][55] = -2,
[1][0][RTW89_KCC][0][55] = 127,
[1][0][RTW89_ACMA][1][55] = 127,
@@ -55391,13 +53227,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][55] = 127,
[1][0][RTW89_THAILAND][0][55] = 127,
[1][0][RTW89_FCC][1][57] = -4,
- [1][0][RTW89_FCC][2][57] = 68,
[1][0][RTW89_ETSI][1][57] = 127,
[1][0][RTW89_ETSI][0][57] = 127,
[1][0][RTW89_MKK][1][57] = 127,
[1][0][RTW89_MKK][0][57] = 127,
[1][0][RTW89_IC][1][57] = -4,
- [1][0][RTW89_IC][2][57] = 68,
[1][0][RTW89_KCC][1][57] = -2,
[1][0][RTW89_KCC][0][57] = 127,
[1][0][RTW89_ACMA][1][57] = 127,
@@ -55410,13 +53244,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][57] = 127,
[1][0][RTW89_THAILAND][0][57] = 127,
[1][0][RTW89_FCC][1][59] = -4,
- [1][0][RTW89_FCC][2][59] = 68,
[1][0][RTW89_ETSI][1][59] = 127,
[1][0][RTW89_ETSI][0][59] = 127,
[1][0][RTW89_MKK][1][59] = 127,
[1][0][RTW89_MKK][0][59] = 127,
[1][0][RTW89_IC][1][59] = -4,
- [1][0][RTW89_IC][2][59] = 68,
[1][0][RTW89_KCC][1][59] = -2,
[1][0][RTW89_KCC][0][59] = 127,
[1][0][RTW89_ACMA][1][59] = 127,
@@ -55429,13 +53261,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][59] = 127,
[1][0][RTW89_THAILAND][0][59] = 127,
[1][0][RTW89_FCC][1][60] = -4,
- [1][0][RTW89_FCC][2][60] = 68,
[1][0][RTW89_ETSI][1][60] = 127,
[1][0][RTW89_ETSI][0][60] = 127,
[1][0][RTW89_MKK][1][60] = 127,
[1][0][RTW89_MKK][0][60] = 127,
[1][0][RTW89_IC][1][60] = -4,
- [1][0][RTW89_IC][2][60] = 68,
[1][0][RTW89_KCC][1][60] = -2,
[1][0][RTW89_KCC][0][60] = 127,
[1][0][RTW89_ACMA][1][60] = 127,
@@ -55448,13 +53278,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][60] = 127,
[1][0][RTW89_THAILAND][0][60] = 127,
[1][0][RTW89_FCC][1][62] = -4,
- [1][0][RTW89_FCC][2][62] = 68,
[1][0][RTW89_ETSI][1][62] = 127,
[1][0][RTW89_ETSI][0][62] = 127,
[1][0][RTW89_MKK][1][62] = 127,
[1][0][RTW89_MKK][0][62] = 127,
[1][0][RTW89_IC][1][62] = -4,
- [1][0][RTW89_IC][2][62] = 68,
[1][0][RTW89_KCC][1][62] = -2,
[1][0][RTW89_KCC][0][62] = 127,
[1][0][RTW89_ACMA][1][62] = 127,
@@ -55467,13 +53295,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][62] = 127,
[1][0][RTW89_THAILAND][0][62] = 127,
[1][0][RTW89_FCC][1][64] = -4,
- [1][0][RTW89_FCC][2][64] = 68,
[1][0][RTW89_ETSI][1][64] = 127,
[1][0][RTW89_ETSI][0][64] = 127,
[1][0][RTW89_MKK][1][64] = 127,
[1][0][RTW89_MKK][0][64] = 127,
[1][0][RTW89_IC][1][64] = -4,
- [1][0][RTW89_IC][2][64] = 68,
[1][0][RTW89_KCC][1][64] = -2,
[1][0][RTW89_KCC][0][64] = 127,
[1][0][RTW89_ACMA][1][64] = 127,
@@ -55486,13 +53312,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][64] = 127,
[1][0][RTW89_THAILAND][0][64] = 127,
[1][0][RTW89_FCC][1][66] = -4,
- [1][0][RTW89_FCC][2][66] = 68,
[1][0][RTW89_ETSI][1][66] = 127,
[1][0][RTW89_ETSI][0][66] = 127,
[1][0][RTW89_MKK][1][66] = 127,
[1][0][RTW89_MKK][0][66] = 127,
[1][0][RTW89_IC][1][66] = -4,
- [1][0][RTW89_IC][2][66] = 68,
[1][0][RTW89_KCC][1][66] = -2,
[1][0][RTW89_KCC][0][66] = 127,
[1][0][RTW89_ACMA][1][66] = 127,
@@ -55505,13 +53329,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][66] = 127,
[1][0][RTW89_THAILAND][0][66] = 127,
[1][0][RTW89_FCC][1][68] = -4,
- [1][0][RTW89_FCC][2][68] = 68,
[1][0][RTW89_ETSI][1][68] = 127,
[1][0][RTW89_ETSI][0][68] = 127,
[1][0][RTW89_MKK][1][68] = 127,
[1][0][RTW89_MKK][0][68] = 127,
[1][0][RTW89_IC][1][68] = -4,
- [1][0][RTW89_IC][2][68] = 68,
[1][0][RTW89_KCC][1][68] = -2,
[1][0][RTW89_KCC][0][68] = 127,
[1][0][RTW89_ACMA][1][68] = 127,
@@ -55524,13 +53346,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][68] = 127,
[1][0][RTW89_THAILAND][0][68] = 127,
[1][0][RTW89_FCC][1][70] = -4,
- [1][0][RTW89_FCC][2][70] = 68,
[1][0][RTW89_ETSI][1][70] = 127,
[1][0][RTW89_ETSI][0][70] = 127,
[1][0][RTW89_MKK][1][70] = 127,
[1][0][RTW89_MKK][0][70] = 127,
[1][0][RTW89_IC][1][70] = -4,
- [1][0][RTW89_IC][2][70] = 68,
[1][0][RTW89_KCC][1][70] = -2,
[1][0][RTW89_KCC][0][70] = 127,
[1][0][RTW89_ACMA][1][70] = 127,
@@ -55543,13 +53363,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][70] = 127,
[1][0][RTW89_THAILAND][0][70] = 127,
[1][0][RTW89_FCC][1][72] = -4,
- [1][0][RTW89_FCC][2][72] = 68,
[1][0][RTW89_ETSI][1][72] = 127,
[1][0][RTW89_ETSI][0][72] = 127,
[1][0][RTW89_MKK][1][72] = 127,
[1][0][RTW89_MKK][0][72] = 127,
[1][0][RTW89_IC][1][72] = -4,
- [1][0][RTW89_IC][2][72] = 68,
[1][0][RTW89_KCC][1][72] = -2,
[1][0][RTW89_KCC][0][72] = 127,
[1][0][RTW89_ACMA][1][72] = 127,
@@ -55562,13 +53380,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][72] = 127,
[1][0][RTW89_THAILAND][0][72] = 127,
[1][0][RTW89_FCC][1][74] = -4,
- [1][0][RTW89_FCC][2][74] = 68,
[1][0][RTW89_ETSI][1][74] = 127,
[1][0][RTW89_ETSI][0][74] = 127,
[1][0][RTW89_MKK][1][74] = 127,
[1][0][RTW89_MKK][0][74] = 127,
[1][0][RTW89_IC][1][74] = -4,
- [1][0][RTW89_IC][2][74] = 68,
[1][0][RTW89_KCC][1][74] = -2,
[1][0][RTW89_KCC][0][74] = 127,
[1][0][RTW89_ACMA][1][74] = 127,
@@ -55581,13 +53397,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][74] = 127,
[1][0][RTW89_THAILAND][0][74] = 127,
[1][0][RTW89_FCC][1][75] = -4,
- [1][0][RTW89_FCC][2][75] = 68,
[1][0][RTW89_ETSI][1][75] = 127,
[1][0][RTW89_ETSI][0][75] = 127,
[1][0][RTW89_MKK][1][75] = 127,
[1][0][RTW89_MKK][0][75] = 127,
[1][0][RTW89_IC][1][75] = -4,
- [1][0][RTW89_IC][2][75] = 68,
[1][0][RTW89_KCC][1][75] = -2,
[1][0][RTW89_KCC][0][75] = 127,
[1][0][RTW89_ACMA][1][75] = 127,
@@ -55600,13 +53414,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][75] = 127,
[1][0][RTW89_THAILAND][0][75] = 127,
[1][0][RTW89_FCC][1][77] = -4,
- [1][0][RTW89_FCC][2][77] = 68,
[1][0][RTW89_ETSI][1][77] = 127,
[1][0][RTW89_ETSI][0][77] = 127,
[1][0][RTW89_MKK][1][77] = 127,
[1][0][RTW89_MKK][0][77] = 127,
[1][0][RTW89_IC][1][77] = -4,
- [1][0][RTW89_IC][2][77] = 68,
[1][0][RTW89_KCC][1][77] = -2,
[1][0][RTW89_KCC][0][77] = 127,
[1][0][RTW89_ACMA][1][77] = 127,
@@ -55619,13 +53431,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][77] = 127,
[1][0][RTW89_THAILAND][0][77] = 127,
[1][0][RTW89_FCC][1][79] = -4,
- [1][0][RTW89_FCC][2][79] = 68,
[1][0][RTW89_ETSI][1][79] = 127,
[1][0][RTW89_ETSI][0][79] = 127,
[1][0][RTW89_MKK][1][79] = 127,
[1][0][RTW89_MKK][0][79] = 127,
[1][0][RTW89_IC][1][79] = -4,
- [1][0][RTW89_IC][2][79] = 68,
[1][0][RTW89_KCC][1][79] = -2,
[1][0][RTW89_KCC][0][79] = 127,
[1][0][RTW89_ACMA][1][79] = 127,
@@ -55638,13 +53448,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][79] = 127,
[1][0][RTW89_THAILAND][0][79] = 127,
[1][0][RTW89_FCC][1][81] = -4,
- [1][0][RTW89_FCC][2][81] = 68,
[1][0][RTW89_ETSI][1][81] = 127,
[1][0][RTW89_ETSI][0][81] = 127,
[1][0][RTW89_MKK][1][81] = 127,
[1][0][RTW89_MKK][0][81] = 127,
[1][0][RTW89_IC][1][81] = -4,
- [1][0][RTW89_IC][2][81] = 68,
[1][0][RTW89_KCC][1][81] = -2,
[1][0][RTW89_KCC][0][81] = 127,
[1][0][RTW89_ACMA][1][81] = 127,
@@ -55657,13 +53465,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][81] = 127,
[1][0][RTW89_THAILAND][0][81] = 127,
[1][0][RTW89_FCC][1][83] = -4,
- [1][0][RTW89_FCC][2][83] = 68,
[1][0][RTW89_ETSI][1][83] = 127,
[1][0][RTW89_ETSI][0][83] = 127,
[1][0][RTW89_MKK][1][83] = 127,
[1][0][RTW89_MKK][0][83] = 127,
[1][0][RTW89_IC][1][83] = -4,
- [1][0][RTW89_IC][2][83] = 68,
[1][0][RTW89_KCC][1][83] = -2,
[1][0][RTW89_KCC][0][83] = 127,
[1][0][RTW89_ACMA][1][83] = 127,
@@ -55676,13 +53482,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][83] = 127,
[1][0][RTW89_THAILAND][0][83] = 127,
[1][0][RTW89_FCC][1][85] = -4,
- [1][0][RTW89_FCC][2][85] = 68,
[1][0][RTW89_ETSI][1][85] = 127,
[1][0][RTW89_ETSI][0][85] = 127,
[1][0][RTW89_MKK][1][85] = 127,
[1][0][RTW89_MKK][0][85] = 127,
[1][0][RTW89_IC][1][85] = -4,
- [1][0][RTW89_IC][2][85] = 68,
[1][0][RTW89_KCC][1][85] = -2,
[1][0][RTW89_KCC][0][85] = 127,
[1][0][RTW89_ACMA][1][85] = 127,
@@ -55695,13 +53499,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][85] = 127,
[1][0][RTW89_THAILAND][0][85] = 127,
[1][0][RTW89_FCC][1][87] = -4,
- [1][0][RTW89_FCC][2][87] = 127,
[1][0][RTW89_ETSI][1][87] = 127,
[1][0][RTW89_ETSI][0][87] = 127,
[1][0][RTW89_MKK][1][87] = 127,
[1][0][RTW89_MKK][0][87] = 127,
[1][0][RTW89_IC][1][87] = -4,
- [1][0][RTW89_IC][2][87] = 127,
[1][0][RTW89_KCC][1][87] = -2,
[1][0][RTW89_KCC][0][87] = 127,
[1][0][RTW89_ACMA][1][87] = 127,
@@ -55714,13 +53516,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][87] = 127,
[1][0][RTW89_THAILAND][0][87] = 127,
[1][0][RTW89_FCC][1][89] = -4,
- [1][0][RTW89_FCC][2][89] = 127,
[1][0][RTW89_ETSI][1][89] = 127,
[1][0][RTW89_ETSI][0][89] = 127,
[1][0][RTW89_MKK][1][89] = 127,
[1][0][RTW89_MKK][0][89] = 127,
[1][0][RTW89_IC][1][89] = -4,
- [1][0][RTW89_IC][2][89] = 127,
[1][0][RTW89_KCC][1][89] = -2,
[1][0][RTW89_KCC][0][89] = 127,
[1][0][RTW89_ACMA][1][89] = 127,
@@ -55733,13 +53533,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][89] = 127,
[1][0][RTW89_THAILAND][0][89] = 127,
[1][0][RTW89_FCC][1][90] = -4,
- [1][0][RTW89_FCC][2][90] = 127,
[1][0][RTW89_ETSI][1][90] = 127,
[1][0][RTW89_ETSI][0][90] = 127,
[1][0][RTW89_MKK][1][90] = 127,
[1][0][RTW89_MKK][0][90] = 127,
[1][0][RTW89_IC][1][90] = -4,
- [1][0][RTW89_IC][2][90] = 127,
[1][0][RTW89_KCC][1][90] = -2,
[1][0][RTW89_KCC][0][90] = 127,
[1][0][RTW89_ACMA][1][90] = 127,
@@ -55752,13 +53550,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][90] = 127,
[1][0][RTW89_THAILAND][0][90] = 127,
[1][0][RTW89_FCC][1][92] = -4,
- [1][0][RTW89_FCC][2][92] = 127,
[1][0][RTW89_ETSI][1][92] = 127,
[1][0][RTW89_ETSI][0][92] = 127,
[1][0][RTW89_MKK][1][92] = 127,
[1][0][RTW89_MKK][0][92] = 127,
[1][0][RTW89_IC][1][92] = -4,
- [1][0][RTW89_IC][2][92] = 127,
[1][0][RTW89_KCC][1][92] = -2,
[1][0][RTW89_KCC][0][92] = 127,
[1][0][RTW89_ACMA][1][92] = 127,
@@ -55771,13 +53567,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][92] = 127,
[1][0][RTW89_THAILAND][0][92] = 127,
[1][0][RTW89_FCC][1][94] = -4,
- [1][0][RTW89_FCC][2][94] = 127,
[1][0][RTW89_ETSI][1][94] = 127,
[1][0][RTW89_ETSI][0][94] = 127,
[1][0][RTW89_MKK][1][94] = 127,
[1][0][RTW89_MKK][0][94] = 127,
[1][0][RTW89_IC][1][94] = -4,
- [1][0][RTW89_IC][2][94] = 127,
[1][0][RTW89_KCC][1][94] = -2,
[1][0][RTW89_KCC][0][94] = 127,
[1][0][RTW89_ACMA][1][94] = 127,
@@ -55790,13 +53584,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][94] = 127,
[1][0][RTW89_THAILAND][0][94] = 127,
[1][0][RTW89_FCC][1][96] = -4,
- [1][0][RTW89_FCC][2][96] = 127,
[1][0][RTW89_ETSI][1][96] = 127,
[1][0][RTW89_ETSI][0][96] = 127,
[1][0][RTW89_MKK][1][96] = 127,
[1][0][RTW89_MKK][0][96] = 127,
[1][0][RTW89_IC][1][96] = -4,
- [1][0][RTW89_IC][2][96] = 127,
[1][0][RTW89_KCC][1][96] = -2,
[1][0][RTW89_KCC][0][96] = 127,
[1][0][RTW89_ACMA][1][96] = 127,
@@ -55809,13 +53601,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][96] = 127,
[1][0][RTW89_THAILAND][0][96] = 127,
[1][0][RTW89_FCC][1][98] = -4,
- [1][0][RTW89_FCC][2][98] = 127,
[1][0][RTW89_ETSI][1][98] = 127,
[1][0][RTW89_ETSI][0][98] = 127,
[1][0][RTW89_MKK][1][98] = 127,
[1][0][RTW89_MKK][0][98] = 127,
[1][0][RTW89_IC][1][98] = -4,
- [1][0][RTW89_IC][2][98] = 127,
[1][0][RTW89_KCC][1][98] = -2,
[1][0][RTW89_KCC][0][98] = 127,
[1][0][RTW89_ACMA][1][98] = 127,
@@ -55828,13 +53618,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][98] = 127,
[1][0][RTW89_THAILAND][0][98] = 127,
[1][0][RTW89_FCC][1][100] = -4,
- [1][0][RTW89_FCC][2][100] = 127,
[1][0][RTW89_ETSI][1][100] = 127,
[1][0][RTW89_ETSI][0][100] = 127,
[1][0][RTW89_MKK][1][100] = 127,
[1][0][RTW89_MKK][0][100] = 127,
[1][0][RTW89_IC][1][100] = -4,
- [1][0][RTW89_IC][2][100] = 127,
[1][0][RTW89_KCC][1][100] = -2,
[1][0][RTW89_KCC][0][100] = 127,
[1][0][RTW89_ACMA][1][100] = 127,
@@ -55847,13 +53635,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][100] = 127,
[1][0][RTW89_THAILAND][0][100] = 127,
[1][0][RTW89_FCC][1][102] = -4,
- [1][0][RTW89_FCC][2][102] = 127,
[1][0][RTW89_ETSI][1][102] = 127,
[1][0][RTW89_ETSI][0][102] = 127,
[1][0][RTW89_MKK][1][102] = 127,
[1][0][RTW89_MKK][0][102] = 127,
[1][0][RTW89_IC][1][102] = -4,
- [1][0][RTW89_IC][2][102] = 127,
[1][0][RTW89_KCC][1][102] = -2,
[1][0][RTW89_KCC][0][102] = 127,
[1][0][RTW89_ACMA][1][102] = 127,
@@ -55866,13 +53652,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][102] = 127,
[1][0][RTW89_THAILAND][0][102] = 127,
[1][0][RTW89_FCC][1][104] = -4,
- [1][0][RTW89_FCC][2][104] = 127,
[1][0][RTW89_ETSI][1][104] = 127,
[1][0][RTW89_ETSI][0][104] = 127,
[1][0][RTW89_MKK][1][104] = 127,
[1][0][RTW89_MKK][0][104] = 127,
[1][0][RTW89_IC][1][104] = -4,
- [1][0][RTW89_IC][2][104] = 127,
[1][0][RTW89_KCC][1][104] = -2,
[1][0][RTW89_KCC][0][104] = 127,
[1][0][RTW89_ACMA][1][104] = 127,
@@ -55885,13 +53669,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][104] = 127,
[1][0][RTW89_THAILAND][0][104] = 127,
[1][0][RTW89_FCC][1][105] = -4,
- [1][0][RTW89_FCC][2][105] = 127,
[1][0][RTW89_ETSI][1][105] = 127,
[1][0][RTW89_ETSI][0][105] = 127,
[1][0][RTW89_MKK][1][105] = 127,
[1][0][RTW89_MKK][0][105] = 127,
[1][0][RTW89_IC][1][105] = -4,
- [1][0][RTW89_IC][2][105] = 127,
[1][0][RTW89_KCC][1][105] = -2,
[1][0][RTW89_KCC][0][105] = 127,
[1][0][RTW89_ACMA][1][105] = 127,
@@ -55904,13 +53686,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][105] = 127,
[1][0][RTW89_THAILAND][0][105] = 127,
[1][0][RTW89_FCC][1][107] = 1,
- [1][0][RTW89_FCC][2][107] = 127,
[1][0][RTW89_ETSI][1][107] = 127,
[1][0][RTW89_ETSI][0][107] = 127,
[1][0][RTW89_MKK][1][107] = 127,
[1][0][RTW89_MKK][0][107] = 127,
[1][0][RTW89_IC][1][107] = 1,
- [1][0][RTW89_IC][2][107] = 127,
[1][0][RTW89_KCC][1][107] = -2,
[1][0][RTW89_KCC][0][107] = 127,
[1][0][RTW89_ACMA][1][107] = 127,
@@ -55923,13 +53703,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][107] = 127,
[1][0][RTW89_THAILAND][0][107] = 127,
[1][0][RTW89_FCC][1][109] = 2,
- [1][0][RTW89_FCC][2][109] = 127,
[1][0][RTW89_ETSI][1][109] = 127,
[1][0][RTW89_ETSI][0][109] = 127,
[1][0][RTW89_MKK][1][109] = 127,
[1][0][RTW89_MKK][0][109] = 127,
[1][0][RTW89_IC][1][109] = 2,
- [1][0][RTW89_IC][2][109] = 127,
[1][0][RTW89_KCC][1][109] = 127,
[1][0][RTW89_KCC][0][109] = 127,
[1][0][RTW89_ACMA][1][109] = 127,
@@ -55942,13 +53720,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][109] = 127,
[1][0][RTW89_THAILAND][0][109] = 127,
[1][0][RTW89_FCC][1][111] = 127,
- [1][0][RTW89_FCC][2][111] = 127,
[1][0][RTW89_ETSI][1][111] = 127,
[1][0][RTW89_ETSI][0][111] = 127,
[1][0][RTW89_MKK][1][111] = 127,
[1][0][RTW89_MKK][0][111] = 127,
[1][0][RTW89_IC][1][111] = 127,
- [1][0][RTW89_IC][2][111] = 127,
[1][0][RTW89_KCC][1][111] = 127,
[1][0][RTW89_KCC][0][111] = 127,
[1][0][RTW89_ACMA][1][111] = 127,
@@ -55961,13 +53737,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][111] = 127,
[1][0][RTW89_THAILAND][0][111] = 127,
[1][0][RTW89_FCC][1][113] = 127,
- [1][0][RTW89_FCC][2][113] = 127,
[1][0][RTW89_ETSI][1][113] = 127,
[1][0][RTW89_ETSI][0][113] = 127,
[1][0][RTW89_MKK][1][113] = 127,
[1][0][RTW89_MKK][0][113] = 127,
[1][0][RTW89_IC][1][113] = 127,
- [1][0][RTW89_IC][2][113] = 127,
[1][0][RTW89_KCC][1][113] = 127,
[1][0][RTW89_KCC][0][113] = 127,
[1][0][RTW89_ACMA][1][113] = 127,
@@ -55980,13 +53754,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][113] = 127,
[1][0][RTW89_THAILAND][0][113] = 127,
[1][0][RTW89_FCC][1][115] = 127,
- [1][0][RTW89_FCC][2][115] = 127,
[1][0][RTW89_ETSI][1][115] = 127,
[1][0][RTW89_ETSI][0][115] = 127,
[1][0][RTW89_MKK][1][115] = 127,
[1][0][RTW89_MKK][0][115] = 127,
[1][0][RTW89_IC][1][115] = 127,
- [1][0][RTW89_IC][2][115] = 127,
[1][0][RTW89_KCC][1][115] = 127,
[1][0][RTW89_KCC][0][115] = 127,
[1][0][RTW89_ACMA][1][115] = 127,
@@ -55999,13 +53771,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][115] = 127,
[1][0][RTW89_THAILAND][0][115] = 127,
[1][0][RTW89_FCC][1][117] = 127,
- [1][0][RTW89_FCC][2][117] = 127,
[1][0][RTW89_ETSI][1][117] = 127,
[1][0][RTW89_ETSI][0][117] = 127,
[1][0][RTW89_MKK][1][117] = 127,
[1][0][RTW89_MKK][0][117] = 127,
[1][0][RTW89_IC][1][117] = 127,
- [1][0][RTW89_IC][2][117] = 127,
[1][0][RTW89_KCC][1][117] = 127,
[1][0][RTW89_KCC][0][117] = 127,
[1][0][RTW89_ACMA][1][117] = 127,
@@ -56018,13 +53788,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][117] = 127,
[1][0][RTW89_THAILAND][0][117] = 127,
[1][0][RTW89_FCC][1][119] = 127,
- [1][0][RTW89_FCC][2][119] = 127,
[1][0][RTW89_ETSI][1][119] = 127,
[1][0][RTW89_ETSI][0][119] = 127,
[1][0][RTW89_MKK][1][119] = 127,
[1][0][RTW89_MKK][0][119] = 127,
[1][0][RTW89_IC][1][119] = 127,
- [1][0][RTW89_IC][2][119] = 127,
[1][0][RTW89_KCC][1][119] = 127,
[1][0][RTW89_KCC][0][119] = 127,
[1][0][RTW89_ACMA][1][119] = 127,
@@ -56037,13 +53805,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][119] = 127,
[1][0][RTW89_THAILAND][0][119] = 127,
[1][1][RTW89_FCC][1][0] = -26,
- [1][1][RTW89_FCC][2][0] = 44,
[1][1][RTW89_ETSI][1][0] = 32,
[1][1][RTW89_ETSI][0][0] = -6,
[1][1][RTW89_MKK][1][0] = 30,
[1][1][RTW89_MKK][0][0] = -10,
[1][1][RTW89_IC][1][0] = -26,
- [1][1][RTW89_IC][2][0] = 44,
[1][1][RTW89_KCC][1][0] = -14,
[1][1][RTW89_KCC][0][0] = -14,
[1][1][RTW89_ACMA][1][0] = 32,
@@ -56056,13 +53822,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][0] = 18,
[1][1][RTW89_THAILAND][0][0] = -26,
[1][1][RTW89_FCC][1][2] = -28,
- [1][1][RTW89_FCC][2][2] = 44,
[1][1][RTW89_ETSI][1][2] = 32,
[1][1][RTW89_ETSI][0][2] = -6,
[1][1][RTW89_MKK][1][2] = 30,
[1][1][RTW89_MKK][0][2] = -10,
[1][1][RTW89_IC][1][2] = -28,
- [1][1][RTW89_IC][2][2] = 44,
[1][1][RTW89_KCC][1][2] = -14,
[1][1][RTW89_KCC][0][2] = -14,
[1][1][RTW89_ACMA][1][2] = 32,
@@ -56075,13 +53839,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][2] = 18,
[1][1][RTW89_THAILAND][0][2] = -28,
[1][1][RTW89_FCC][1][4] = -28,
- [1][1][RTW89_FCC][2][4] = 44,
[1][1][RTW89_ETSI][1][4] = 32,
[1][1][RTW89_ETSI][0][4] = -6,
[1][1][RTW89_MKK][1][4] = 30,
[1][1][RTW89_MKK][0][4] = -10,
[1][1][RTW89_IC][1][4] = -28,
- [1][1][RTW89_IC][2][4] = 44,
[1][1][RTW89_KCC][1][4] = -14,
[1][1][RTW89_KCC][0][4] = -14,
[1][1][RTW89_ACMA][1][4] = 32,
@@ -56094,13 +53856,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][4] = 18,
[1][1][RTW89_THAILAND][0][4] = -28,
[1][1][RTW89_FCC][1][6] = -28,
- [1][1][RTW89_FCC][2][6] = 44,
[1][1][RTW89_ETSI][1][6] = 32,
[1][1][RTW89_ETSI][0][6] = -6,
[1][1][RTW89_MKK][1][6] = 30,
[1][1][RTW89_MKK][0][6] = -10,
[1][1][RTW89_IC][1][6] = -28,
- [1][1][RTW89_IC][2][6] = 44,
[1][1][RTW89_KCC][1][6] = -14,
[1][1][RTW89_KCC][0][6] = -14,
[1][1][RTW89_ACMA][1][6] = 32,
@@ -56113,13 +53873,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][6] = 18,
[1][1][RTW89_THAILAND][0][6] = -28,
[1][1][RTW89_FCC][1][8] = -28,
- [1][1][RTW89_FCC][2][8] = 44,
[1][1][RTW89_ETSI][1][8] = 32,
[1][1][RTW89_ETSI][0][8] = -6,
[1][1][RTW89_MKK][1][8] = 30,
[1][1][RTW89_MKK][0][8] = -10,
[1][1][RTW89_IC][1][8] = -28,
- [1][1][RTW89_IC][2][8] = 44,
[1][1][RTW89_KCC][1][8] = -14,
[1][1][RTW89_KCC][0][8] = -14,
[1][1][RTW89_ACMA][1][8] = 32,
@@ -56132,13 +53890,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][8] = 18,
[1][1][RTW89_THAILAND][0][8] = -28,
[1][1][RTW89_FCC][1][10] = -28,
- [1][1][RTW89_FCC][2][10] = 44,
[1][1][RTW89_ETSI][1][10] = 32,
[1][1][RTW89_ETSI][0][10] = -6,
[1][1][RTW89_MKK][1][10] = 30,
[1][1][RTW89_MKK][0][10] = -10,
[1][1][RTW89_IC][1][10] = -28,
- [1][1][RTW89_IC][2][10] = 44,
[1][1][RTW89_KCC][1][10] = -14,
[1][1][RTW89_KCC][0][10] = -14,
[1][1][RTW89_ACMA][1][10] = 32,
@@ -56151,13 +53907,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][10] = 18,
[1][1][RTW89_THAILAND][0][10] = -28,
[1][1][RTW89_FCC][1][12] = -28,
- [1][1][RTW89_FCC][2][12] = 44,
[1][1][RTW89_ETSI][1][12] = 32,
[1][1][RTW89_ETSI][0][12] = -6,
[1][1][RTW89_MKK][1][12] = 30,
[1][1][RTW89_MKK][0][12] = -10,
[1][1][RTW89_IC][1][12] = -28,
- [1][1][RTW89_IC][2][12] = 44,
[1][1][RTW89_KCC][1][12] = -14,
[1][1][RTW89_KCC][0][12] = -14,
[1][1][RTW89_ACMA][1][12] = 32,
@@ -56170,13 +53924,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][12] = 18,
[1][1][RTW89_THAILAND][0][12] = -28,
[1][1][RTW89_FCC][1][14] = -28,
- [1][1][RTW89_FCC][2][14] = 44,
[1][1][RTW89_ETSI][1][14] = 32,
[1][1][RTW89_ETSI][0][14] = -6,
[1][1][RTW89_MKK][1][14] = 30,
[1][1][RTW89_MKK][0][14] = -10,
[1][1][RTW89_IC][1][14] = -28,
- [1][1][RTW89_IC][2][14] = 44,
[1][1][RTW89_KCC][1][14] = -14,
[1][1][RTW89_KCC][0][14] = -14,
[1][1][RTW89_ACMA][1][14] = 32,
@@ -56189,13 +53941,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][14] = 18,
[1][1][RTW89_THAILAND][0][14] = -28,
[1][1][RTW89_FCC][1][15] = -28,
- [1][1][RTW89_FCC][2][15] = 44,
[1][1][RTW89_ETSI][1][15] = 32,
[1][1][RTW89_ETSI][0][15] = -6,
[1][1][RTW89_MKK][1][15] = 30,
[1][1][RTW89_MKK][0][15] = -10,
[1][1][RTW89_IC][1][15] = -28,
- [1][1][RTW89_IC][2][15] = 44,
[1][1][RTW89_KCC][1][15] = -14,
[1][1][RTW89_KCC][0][15] = -14,
[1][1][RTW89_ACMA][1][15] = 32,
@@ -56208,13 +53958,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][15] = 18,
[1][1][RTW89_THAILAND][0][15] = -28,
[1][1][RTW89_FCC][1][17] = -28,
- [1][1][RTW89_FCC][2][17] = 44,
[1][1][RTW89_ETSI][1][17] = 32,
[1][1][RTW89_ETSI][0][17] = -6,
[1][1][RTW89_MKK][1][17] = 30,
[1][1][RTW89_MKK][0][17] = -10,
[1][1][RTW89_IC][1][17] = -28,
- [1][1][RTW89_IC][2][17] = 44,
[1][1][RTW89_KCC][1][17] = -14,
[1][1][RTW89_KCC][0][17] = -14,
[1][1][RTW89_ACMA][1][17] = 32,
@@ -56227,13 +53975,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][17] = 18,
[1][1][RTW89_THAILAND][0][17] = -28,
[1][1][RTW89_FCC][1][19] = -28,
- [1][1][RTW89_FCC][2][19] = 44,
[1][1][RTW89_ETSI][1][19] = 32,
[1][1][RTW89_ETSI][0][19] = -6,
[1][1][RTW89_MKK][1][19] = 30,
[1][1][RTW89_MKK][0][19] = -10,
[1][1][RTW89_IC][1][19] = -28,
- [1][1][RTW89_IC][2][19] = 44,
[1][1][RTW89_KCC][1][19] = -14,
[1][1][RTW89_KCC][0][19] = -14,
[1][1][RTW89_ACMA][1][19] = 32,
@@ -56246,13 +53992,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][19] = 18,
[1][1][RTW89_THAILAND][0][19] = -28,
[1][1][RTW89_FCC][1][21] = -28,
- [1][1][RTW89_FCC][2][21] = 44,
[1][1][RTW89_ETSI][1][21] = 32,
[1][1][RTW89_ETSI][0][21] = -6,
[1][1][RTW89_MKK][1][21] = 30,
[1][1][RTW89_MKK][0][21] = -10,
[1][1][RTW89_IC][1][21] = -28,
- [1][1][RTW89_IC][2][21] = 44,
[1][1][RTW89_KCC][1][21] = -14,
[1][1][RTW89_KCC][0][21] = -14,
[1][1][RTW89_ACMA][1][21] = 32,
@@ -56265,13 +54009,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][21] = 18,
[1][1][RTW89_THAILAND][0][21] = -28,
[1][1][RTW89_FCC][1][23] = -28,
- [1][1][RTW89_FCC][2][23] = 44,
[1][1][RTW89_ETSI][1][23] = 32,
[1][1][RTW89_ETSI][0][23] = -6,
[1][1][RTW89_MKK][1][23] = 32,
[1][1][RTW89_MKK][0][23] = -10,
[1][1][RTW89_IC][1][23] = -28,
- [1][1][RTW89_IC][2][23] = 44,
[1][1][RTW89_KCC][1][23] = -14,
[1][1][RTW89_KCC][0][23] = -14,
[1][1][RTW89_ACMA][1][23] = 32,
@@ -56284,13 +54026,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][23] = 18,
[1][1][RTW89_THAILAND][0][23] = -28,
[1][1][RTW89_FCC][1][25] = -28,
- [1][1][RTW89_FCC][2][25] = 44,
[1][1][RTW89_ETSI][1][25] = 32,
[1][1][RTW89_ETSI][0][25] = -6,
[1][1][RTW89_MKK][1][25] = 32,
[1][1][RTW89_MKK][0][25] = -10,
[1][1][RTW89_IC][1][25] = -28,
- [1][1][RTW89_IC][2][25] = 44,
[1][1][RTW89_KCC][1][25] = -14,
[1][1][RTW89_KCC][0][25] = -14,
[1][1][RTW89_ACMA][1][25] = 32,
@@ -56303,13 +54043,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][25] = 18,
[1][1][RTW89_THAILAND][0][25] = -28,
[1][1][RTW89_FCC][1][27] = -28,
- [1][1][RTW89_FCC][2][27] = 44,
[1][1][RTW89_ETSI][1][27] = 32,
[1][1][RTW89_ETSI][0][27] = -6,
[1][1][RTW89_MKK][1][27] = 32,
[1][1][RTW89_MKK][0][27] = -10,
[1][1][RTW89_IC][1][27] = -28,
- [1][1][RTW89_IC][2][27] = 44,
[1][1][RTW89_KCC][1][27] = -14,
[1][1][RTW89_KCC][0][27] = -14,
[1][1][RTW89_ACMA][1][27] = 32,
@@ -56322,13 +54060,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][27] = 18,
[1][1][RTW89_THAILAND][0][27] = -28,
[1][1][RTW89_FCC][1][29] = -28,
- [1][1][RTW89_FCC][2][29] = 44,
[1][1][RTW89_ETSI][1][29] = 32,
[1][1][RTW89_ETSI][0][29] = -6,
[1][1][RTW89_MKK][1][29] = 32,
[1][1][RTW89_MKK][0][29] = -10,
[1][1][RTW89_IC][1][29] = -28,
- [1][1][RTW89_IC][2][29] = 44,
[1][1][RTW89_KCC][1][29] = -14,
[1][1][RTW89_KCC][0][29] = -14,
[1][1][RTW89_ACMA][1][29] = 32,
@@ -56341,13 +54077,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][29] = 18,
[1][1][RTW89_THAILAND][0][29] = -28,
[1][1][RTW89_FCC][1][30] = -28,
- [1][1][RTW89_FCC][2][30] = 44,
[1][1][RTW89_ETSI][1][30] = 32,
[1][1][RTW89_ETSI][0][30] = -6,
[1][1][RTW89_MKK][1][30] = 32,
[1][1][RTW89_MKK][0][30] = -10,
[1][1][RTW89_IC][1][30] = -28,
- [1][1][RTW89_IC][2][30] = 44,
[1][1][RTW89_KCC][1][30] = -14,
[1][1][RTW89_KCC][0][30] = -14,
[1][1][RTW89_ACMA][1][30] = 32,
@@ -56360,13 +54094,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][30] = 18,
[1][1][RTW89_THAILAND][0][30] = -28,
[1][1][RTW89_FCC][1][32] = -28,
- [1][1][RTW89_FCC][2][32] = 44,
[1][1][RTW89_ETSI][1][32] = 32,
[1][1][RTW89_ETSI][0][32] = -6,
[1][1][RTW89_MKK][1][32] = 32,
[1][1][RTW89_MKK][0][32] = -10,
[1][1][RTW89_IC][1][32] = -28,
- [1][1][RTW89_IC][2][32] = 44,
[1][1][RTW89_KCC][1][32] = -14,
[1][1][RTW89_KCC][0][32] = -14,
[1][1][RTW89_ACMA][1][32] = 32,
@@ -56379,13 +54111,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][32] = 18,
[1][1][RTW89_THAILAND][0][32] = -28,
[1][1][RTW89_FCC][1][34] = -28,
- [1][1][RTW89_FCC][2][34] = 44,
[1][1][RTW89_ETSI][1][34] = 32,
[1][1][RTW89_ETSI][0][34] = -6,
[1][1][RTW89_MKK][1][34] = 32,
[1][1][RTW89_MKK][0][34] = -10,
[1][1][RTW89_IC][1][34] = -28,
- [1][1][RTW89_IC][2][34] = 44,
[1][1][RTW89_KCC][1][34] = -14,
[1][1][RTW89_KCC][0][34] = -14,
[1][1][RTW89_ACMA][1][34] = 32,
@@ -56398,13 +54128,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][34] = 18,
[1][1][RTW89_THAILAND][0][34] = -28,
[1][1][RTW89_FCC][1][36] = -28,
- [1][1][RTW89_FCC][2][36] = 44,
[1][1][RTW89_ETSI][1][36] = 32,
[1][1][RTW89_ETSI][0][36] = -6,
[1][1][RTW89_MKK][1][36] = 32,
[1][1][RTW89_MKK][0][36] = -10,
[1][1][RTW89_IC][1][36] = -28,
- [1][1][RTW89_IC][2][36] = 44,
[1][1][RTW89_KCC][1][36] = -14,
[1][1][RTW89_KCC][0][36] = -14,
[1][1][RTW89_ACMA][1][36] = 32,
@@ -56417,13 +54145,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][36] = 18,
[1][1][RTW89_THAILAND][0][36] = -28,
[1][1][RTW89_FCC][1][38] = -28,
- [1][1][RTW89_FCC][2][38] = 44,
[1][1][RTW89_ETSI][1][38] = 32,
[1][1][RTW89_ETSI][0][38] = -6,
[1][1][RTW89_MKK][1][38] = 32,
[1][1][RTW89_MKK][0][38] = -10,
[1][1][RTW89_IC][1][38] = -28,
- [1][1][RTW89_IC][2][38] = 44,
[1][1][RTW89_KCC][1][38] = -14,
[1][1][RTW89_KCC][0][38] = -14,
[1][1][RTW89_ACMA][1][38] = 32,
@@ -56436,13 +54162,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][38] = 18,
[1][1][RTW89_THAILAND][0][38] = -28,
[1][1][RTW89_FCC][1][40] = -28,
- [1][1][RTW89_FCC][2][40] = 44,
[1][1][RTW89_ETSI][1][40] = 32,
[1][1][RTW89_ETSI][0][40] = -6,
[1][1][RTW89_MKK][1][40] = 32,
[1][1][RTW89_MKK][0][40] = -10,
[1][1][RTW89_IC][1][40] = -28,
- [1][1][RTW89_IC][2][40] = 44,
[1][1][RTW89_KCC][1][40] = -14,
[1][1][RTW89_KCC][0][40] = -14,
[1][1][RTW89_ACMA][1][40] = 32,
@@ -56455,13 +54179,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][40] = 18,
[1][1][RTW89_THAILAND][0][40] = -28,
[1][1][RTW89_FCC][1][42] = -28,
- [1][1][RTW89_FCC][2][42] = 44,
[1][1][RTW89_ETSI][1][42] = 32,
[1][1][RTW89_ETSI][0][42] = -6,
[1][1][RTW89_MKK][1][42] = 32,
[1][1][RTW89_MKK][0][42] = -10,
[1][1][RTW89_IC][1][42] = -28,
- [1][1][RTW89_IC][2][42] = 44,
[1][1][RTW89_KCC][1][42] = -14,
[1][1][RTW89_KCC][0][42] = -14,
[1][1][RTW89_ACMA][1][42] = 32,
@@ -56474,13 +54196,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][42] = 18,
[1][1][RTW89_THAILAND][0][42] = -28,
[1][1][RTW89_FCC][1][44] = -28,
- [1][1][RTW89_FCC][2][44] = 44,
[1][1][RTW89_ETSI][1][44] = 34,
[1][1][RTW89_ETSI][0][44] = -4,
[1][1][RTW89_MKK][1][44] = 4,
[1][1][RTW89_MKK][0][44] = -8,
[1][1][RTW89_IC][1][44] = -28,
- [1][1][RTW89_IC][2][44] = 44,
[1][1][RTW89_KCC][1][44] = -14,
[1][1][RTW89_KCC][0][44] = -14,
[1][1][RTW89_ACMA][1][44] = 34,
@@ -56493,13 +54213,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][44] = 18,
[1][1][RTW89_THAILAND][0][44] = -28,
[1][1][RTW89_FCC][1][45] = -26,
- [1][1][RTW89_FCC][2][45] = 127,
[1][1][RTW89_ETSI][1][45] = 127,
[1][1][RTW89_ETSI][0][45] = 127,
[1][1][RTW89_MKK][1][45] = 127,
[1][1][RTW89_MKK][0][45] = 127,
[1][1][RTW89_IC][1][45] = -26,
- [1][1][RTW89_IC][2][45] = 44,
[1][1][RTW89_KCC][1][45] = -14,
[1][1][RTW89_KCC][0][45] = 127,
[1][1][RTW89_ACMA][1][45] = 127,
@@ -56512,13 +54230,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][45] = 127,
[1][1][RTW89_THAILAND][0][45] = 127,
[1][1][RTW89_FCC][1][47] = -28,
- [1][1][RTW89_FCC][2][47] = 127,
[1][1][RTW89_ETSI][1][47] = 127,
[1][1][RTW89_ETSI][0][47] = 127,
[1][1][RTW89_MKK][1][47] = 127,
[1][1][RTW89_MKK][0][47] = 127,
[1][1][RTW89_IC][1][47] = -28,
- [1][1][RTW89_IC][2][47] = 44,
[1][1][RTW89_KCC][1][47] = -14,
[1][1][RTW89_KCC][0][47] = 127,
[1][1][RTW89_ACMA][1][47] = 127,
@@ -56531,13 +54247,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][47] = 127,
[1][1][RTW89_THAILAND][0][47] = 127,
[1][1][RTW89_FCC][1][49] = -28,
- [1][1][RTW89_FCC][2][49] = 127,
[1][1][RTW89_ETSI][1][49] = 127,
[1][1][RTW89_ETSI][0][49] = 127,
[1][1][RTW89_MKK][1][49] = 127,
[1][1][RTW89_MKK][0][49] = 127,
[1][1][RTW89_IC][1][49] = -28,
- [1][1][RTW89_IC][2][49] = 44,
[1][1][RTW89_KCC][1][49] = -14,
[1][1][RTW89_KCC][0][49] = 127,
[1][1][RTW89_ACMA][1][49] = 127,
@@ -56550,13 +54264,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][49] = 127,
[1][1][RTW89_THAILAND][0][49] = 127,
[1][1][RTW89_FCC][1][51] = -28,
- [1][1][RTW89_FCC][2][51] = 127,
[1][1][RTW89_ETSI][1][51] = 127,
[1][1][RTW89_ETSI][0][51] = 127,
[1][1][RTW89_MKK][1][51] = 127,
[1][1][RTW89_MKK][0][51] = 127,
[1][1][RTW89_IC][1][51] = -28,
- [1][1][RTW89_IC][2][51] = 44,
[1][1][RTW89_KCC][1][51] = -14,
[1][1][RTW89_KCC][0][51] = 127,
[1][1][RTW89_ACMA][1][51] = 127,
@@ -56569,13 +54281,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][51] = 127,
[1][1][RTW89_THAILAND][0][51] = 127,
[1][1][RTW89_FCC][1][53] = -26,
- [1][1][RTW89_FCC][2][53] = 127,
[1][1][RTW89_ETSI][1][53] = 127,
[1][1][RTW89_ETSI][0][53] = 127,
[1][1][RTW89_MKK][1][53] = 127,
[1][1][RTW89_MKK][0][53] = 127,
[1][1][RTW89_IC][1][53] = -26,
- [1][1][RTW89_IC][2][53] = 44,
[1][1][RTW89_KCC][1][53] = -14,
[1][1][RTW89_KCC][0][53] = 127,
[1][1][RTW89_ACMA][1][53] = 127,
@@ -56588,13 +54298,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][53] = 127,
[1][1][RTW89_THAILAND][0][53] = 127,
[1][1][RTW89_FCC][1][55] = -28,
- [1][1][RTW89_FCC][2][55] = 44,
[1][1][RTW89_ETSI][1][55] = 127,
[1][1][RTW89_ETSI][0][55] = 127,
[1][1][RTW89_MKK][1][55] = 127,
[1][1][RTW89_MKK][0][55] = 127,
[1][1][RTW89_IC][1][55] = -28,
- [1][1][RTW89_IC][2][55] = 44,
[1][1][RTW89_KCC][1][55] = -14,
[1][1][RTW89_KCC][0][55] = 127,
[1][1][RTW89_ACMA][1][55] = 127,
@@ -56607,13 +54315,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][55] = 127,
[1][1][RTW89_THAILAND][0][55] = 127,
[1][1][RTW89_FCC][1][57] = -28,
- [1][1][RTW89_FCC][2][57] = 44,
[1][1][RTW89_ETSI][1][57] = 127,
[1][1][RTW89_ETSI][0][57] = 127,
[1][1][RTW89_MKK][1][57] = 127,
[1][1][RTW89_MKK][0][57] = 127,
[1][1][RTW89_IC][1][57] = -28,
- [1][1][RTW89_IC][2][57] = 44,
[1][1][RTW89_KCC][1][57] = -14,
[1][1][RTW89_KCC][0][57] = 127,
[1][1][RTW89_ACMA][1][57] = 127,
@@ -56626,13 +54332,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][57] = 127,
[1][1][RTW89_THAILAND][0][57] = 127,
[1][1][RTW89_FCC][1][59] = -28,
- [1][1][RTW89_FCC][2][59] = 44,
[1][1][RTW89_ETSI][1][59] = 127,
[1][1][RTW89_ETSI][0][59] = 127,
[1][1][RTW89_MKK][1][59] = 127,
[1][1][RTW89_MKK][0][59] = 127,
[1][1][RTW89_IC][1][59] = -28,
- [1][1][RTW89_IC][2][59] = 44,
[1][1][RTW89_KCC][1][59] = -14,
[1][1][RTW89_KCC][0][59] = 127,
[1][1][RTW89_ACMA][1][59] = 127,
@@ -56645,13 +54349,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][59] = 127,
[1][1][RTW89_THAILAND][0][59] = 127,
[1][1][RTW89_FCC][1][60] = -28,
- [1][1][RTW89_FCC][2][60] = 44,
[1][1][RTW89_ETSI][1][60] = 127,
[1][1][RTW89_ETSI][0][60] = 127,
[1][1][RTW89_MKK][1][60] = 127,
[1][1][RTW89_MKK][0][60] = 127,
[1][1][RTW89_IC][1][60] = -28,
- [1][1][RTW89_IC][2][60] = 44,
[1][1][RTW89_KCC][1][60] = -14,
[1][1][RTW89_KCC][0][60] = 127,
[1][1][RTW89_ACMA][1][60] = 127,
@@ -56664,13 +54366,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][60] = 127,
[1][1][RTW89_THAILAND][0][60] = 127,
[1][1][RTW89_FCC][1][62] = -28,
- [1][1][RTW89_FCC][2][62] = 44,
[1][1][RTW89_ETSI][1][62] = 127,
[1][1][RTW89_ETSI][0][62] = 127,
[1][1][RTW89_MKK][1][62] = 127,
[1][1][RTW89_MKK][0][62] = 127,
[1][1][RTW89_IC][1][62] = -28,
- [1][1][RTW89_IC][2][62] = 44,
[1][1][RTW89_KCC][1][62] = -14,
[1][1][RTW89_KCC][0][62] = 127,
[1][1][RTW89_ACMA][1][62] = 127,
@@ -56683,13 +54383,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][62] = 127,
[1][1][RTW89_THAILAND][0][62] = 127,
[1][1][RTW89_FCC][1][64] = -28,
- [1][1][RTW89_FCC][2][64] = 44,
[1][1][RTW89_ETSI][1][64] = 127,
[1][1][RTW89_ETSI][0][64] = 127,
[1][1][RTW89_MKK][1][64] = 127,
[1][1][RTW89_MKK][0][64] = 127,
[1][1][RTW89_IC][1][64] = -28,
- [1][1][RTW89_IC][2][64] = 44,
[1][1][RTW89_KCC][1][64] = -14,
[1][1][RTW89_KCC][0][64] = 127,
[1][1][RTW89_ACMA][1][64] = 127,
@@ -56702,13 +54400,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][64] = 127,
[1][1][RTW89_THAILAND][0][64] = 127,
[1][1][RTW89_FCC][1][66] = -28,
- [1][1][RTW89_FCC][2][66] = 44,
[1][1][RTW89_ETSI][1][66] = 127,
[1][1][RTW89_ETSI][0][66] = 127,
[1][1][RTW89_MKK][1][66] = 127,
[1][1][RTW89_MKK][0][66] = 127,
[1][1][RTW89_IC][1][66] = -28,
- [1][1][RTW89_IC][2][66] = 44,
[1][1][RTW89_KCC][1][66] = -14,
[1][1][RTW89_KCC][0][66] = 127,
[1][1][RTW89_ACMA][1][66] = 127,
@@ -56721,13 +54417,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][66] = 127,
[1][1][RTW89_THAILAND][0][66] = 127,
[1][1][RTW89_FCC][1][68] = -28,
- [1][1][RTW89_FCC][2][68] = 44,
[1][1][RTW89_ETSI][1][68] = 127,
[1][1][RTW89_ETSI][0][68] = 127,
[1][1][RTW89_MKK][1][68] = 127,
[1][1][RTW89_MKK][0][68] = 127,
[1][1][RTW89_IC][1][68] = -28,
- [1][1][RTW89_IC][2][68] = 44,
[1][1][RTW89_KCC][1][68] = -14,
[1][1][RTW89_KCC][0][68] = 127,
[1][1][RTW89_ACMA][1][68] = 127,
@@ -56740,13 +54434,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][68] = 127,
[1][1][RTW89_THAILAND][0][68] = 127,
[1][1][RTW89_FCC][1][70] = -26,
- [1][1][RTW89_FCC][2][70] = 44,
[1][1][RTW89_ETSI][1][70] = 127,
[1][1][RTW89_ETSI][0][70] = 127,
[1][1][RTW89_MKK][1][70] = 127,
[1][1][RTW89_MKK][0][70] = 127,
[1][1][RTW89_IC][1][70] = -26,
- [1][1][RTW89_IC][2][70] = 44,
[1][1][RTW89_KCC][1][70] = -14,
[1][1][RTW89_KCC][0][70] = 127,
[1][1][RTW89_ACMA][1][70] = 127,
@@ -56759,13 +54451,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][70] = 127,
[1][1][RTW89_THAILAND][0][70] = 127,
[1][1][RTW89_FCC][1][72] = -28,
- [1][1][RTW89_FCC][2][72] = 44,
[1][1][RTW89_ETSI][1][72] = 127,
[1][1][RTW89_ETSI][0][72] = 127,
[1][1][RTW89_MKK][1][72] = 127,
[1][1][RTW89_MKK][0][72] = 127,
[1][1][RTW89_IC][1][72] = -28,
- [1][1][RTW89_IC][2][72] = 44,
[1][1][RTW89_KCC][1][72] = -14,
[1][1][RTW89_KCC][0][72] = 127,
[1][1][RTW89_ACMA][1][72] = 127,
@@ -56778,13 +54468,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][72] = 127,
[1][1][RTW89_THAILAND][0][72] = 127,
[1][1][RTW89_FCC][1][74] = -28,
- [1][1][RTW89_FCC][2][74] = 44,
[1][1][RTW89_ETSI][1][74] = 127,
[1][1][RTW89_ETSI][0][74] = 127,
[1][1][RTW89_MKK][1][74] = 127,
[1][1][RTW89_MKK][0][74] = 127,
[1][1][RTW89_IC][1][74] = -28,
- [1][1][RTW89_IC][2][74] = 44,
[1][1][RTW89_KCC][1][74] = -14,
[1][1][RTW89_KCC][0][74] = 127,
[1][1][RTW89_ACMA][1][74] = 127,
@@ -56797,13 +54485,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][74] = 127,
[1][1][RTW89_THAILAND][0][74] = 127,
[1][1][RTW89_FCC][1][75] = -28,
- [1][1][RTW89_FCC][2][75] = 44,
[1][1][RTW89_ETSI][1][75] = 127,
[1][1][RTW89_ETSI][0][75] = 127,
[1][1][RTW89_MKK][1][75] = 127,
[1][1][RTW89_MKK][0][75] = 127,
[1][1][RTW89_IC][1][75] = -28,
- [1][1][RTW89_IC][2][75] = 44,
[1][1][RTW89_KCC][1][75] = -14,
[1][1][RTW89_KCC][0][75] = 127,
[1][1][RTW89_ACMA][1][75] = 127,
@@ -56816,13 +54502,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][75] = 127,
[1][1][RTW89_THAILAND][0][75] = 127,
[1][1][RTW89_FCC][1][77] = -28,
- [1][1][RTW89_FCC][2][77] = 44,
[1][1][RTW89_ETSI][1][77] = 127,
[1][1][RTW89_ETSI][0][77] = 127,
[1][1][RTW89_MKK][1][77] = 127,
[1][1][RTW89_MKK][0][77] = 127,
[1][1][RTW89_IC][1][77] = -28,
- [1][1][RTW89_IC][2][77] = 44,
[1][1][RTW89_KCC][1][77] = -14,
[1][1][RTW89_KCC][0][77] = 127,
[1][1][RTW89_ACMA][1][77] = 127,
@@ -56835,13 +54519,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][77] = 127,
[1][1][RTW89_THAILAND][0][77] = 127,
[1][1][RTW89_FCC][1][79] = -28,
- [1][1][RTW89_FCC][2][79] = 44,
[1][1][RTW89_ETSI][1][79] = 127,
[1][1][RTW89_ETSI][0][79] = 127,
[1][1][RTW89_MKK][1][79] = 127,
[1][1][RTW89_MKK][0][79] = 127,
[1][1][RTW89_IC][1][79] = -28,
- [1][1][RTW89_IC][2][79] = 44,
[1][1][RTW89_KCC][1][79] = -14,
[1][1][RTW89_KCC][0][79] = 127,
[1][1][RTW89_ACMA][1][79] = 127,
@@ -56854,13 +54536,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][79] = 127,
[1][1][RTW89_THAILAND][0][79] = 127,
[1][1][RTW89_FCC][1][81] = -28,
- [1][1][RTW89_FCC][2][81] = 44,
[1][1][RTW89_ETSI][1][81] = 127,
[1][1][RTW89_ETSI][0][81] = 127,
[1][1][RTW89_MKK][1][81] = 127,
[1][1][RTW89_MKK][0][81] = 127,
[1][1][RTW89_IC][1][81] = -28,
- [1][1][RTW89_IC][2][81] = 44,
[1][1][RTW89_KCC][1][81] = -14,
[1][1][RTW89_KCC][0][81] = 127,
[1][1][RTW89_ACMA][1][81] = 127,
@@ -56873,13 +54553,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][81] = 127,
[1][1][RTW89_THAILAND][0][81] = 127,
[1][1][RTW89_FCC][1][83] = -28,
- [1][1][RTW89_FCC][2][83] = 44,
[1][1][RTW89_ETSI][1][83] = 127,
[1][1][RTW89_ETSI][0][83] = 127,
[1][1][RTW89_MKK][1][83] = 127,
[1][1][RTW89_MKK][0][83] = 127,
[1][1][RTW89_IC][1][83] = -28,
- [1][1][RTW89_IC][2][83] = 44,
[1][1][RTW89_KCC][1][83] = -14,
[1][1][RTW89_KCC][0][83] = 127,
[1][1][RTW89_ACMA][1][83] = 127,
@@ -56892,13 +54570,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][83] = 127,
[1][1][RTW89_THAILAND][0][83] = 127,
[1][1][RTW89_FCC][1][85] = -28,
- [1][1][RTW89_FCC][2][85] = 44,
[1][1][RTW89_ETSI][1][85] = 127,
[1][1][RTW89_ETSI][0][85] = 127,
[1][1][RTW89_MKK][1][85] = 127,
[1][1][RTW89_MKK][0][85] = 127,
[1][1][RTW89_IC][1][85] = -28,
- [1][1][RTW89_IC][2][85] = 44,
[1][1][RTW89_KCC][1][85] = -14,
[1][1][RTW89_KCC][0][85] = 127,
[1][1][RTW89_ACMA][1][85] = 127,
@@ -56911,13 +54587,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][85] = 127,
[1][1][RTW89_THAILAND][0][85] = 127,
[1][1][RTW89_FCC][1][87] = -28,
- [1][1][RTW89_FCC][2][87] = 127,
[1][1][RTW89_ETSI][1][87] = 127,
[1][1][RTW89_ETSI][0][87] = 127,
[1][1][RTW89_MKK][1][87] = 127,
[1][1][RTW89_MKK][0][87] = 127,
[1][1][RTW89_IC][1][87] = -28,
- [1][1][RTW89_IC][2][87] = 127,
[1][1][RTW89_KCC][1][87] = -14,
[1][1][RTW89_KCC][0][87] = 127,
[1][1][RTW89_ACMA][1][87] = 127,
@@ -56930,13 +54604,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][87] = 127,
[1][1][RTW89_THAILAND][0][87] = 127,
[1][1][RTW89_FCC][1][89] = -26,
- [1][1][RTW89_FCC][2][89] = 127,
[1][1][RTW89_ETSI][1][89] = 127,
[1][1][RTW89_ETSI][0][89] = 127,
[1][1][RTW89_MKK][1][89] = 127,
[1][1][RTW89_MKK][0][89] = 127,
[1][1][RTW89_IC][1][89] = -26,
- [1][1][RTW89_IC][2][89] = 127,
[1][1][RTW89_KCC][1][89] = -14,
[1][1][RTW89_KCC][0][89] = 127,
[1][1][RTW89_ACMA][1][89] = 127,
@@ -56949,13 +54621,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][89] = 127,
[1][1][RTW89_THAILAND][0][89] = 127,
[1][1][RTW89_FCC][1][90] = -26,
- [1][1][RTW89_FCC][2][90] = 127,
[1][1][RTW89_ETSI][1][90] = 127,
[1][1][RTW89_ETSI][0][90] = 127,
[1][1][RTW89_MKK][1][90] = 127,
[1][1][RTW89_MKK][0][90] = 127,
[1][1][RTW89_IC][1][90] = -26,
- [1][1][RTW89_IC][2][90] = 127,
[1][1][RTW89_KCC][1][90] = -14,
[1][1][RTW89_KCC][0][90] = 127,
[1][1][RTW89_ACMA][1][90] = 127,
@@ -56968,13 +54638,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][90] = 127,
[1][1][RTW89_THAILAND][0][90] = 127,
[1][1][RTW89_FCC][1][92] = -26,
- [1][1][RTW89_FCC][2][92] = 127,
[1][1][RTW89_ETSI][1][92] = 127,
[1][1][RTW89_ETSI][0][92] = 127,
[1][1][RTW89_MKK][1][92] = 127,
[1][1][RTW89_MKK][0][92] = 127,
[1][1][RTW89_IC][1][92] = -26,
- [1][1][RTW89_IC][2][92] = 127,
[1][1][RTW89_KCC][1][92] = -14,
[1][1][RTW89_KCC][0][92] = 127,
[1][1][RTW89_ACMA][1][92] = 127,
@@ -56987,13 +54655,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][92] = 127,
[1][1][RTW89_THAILAND][0][92] = 127,
[1][1][RTW89_FCC][1][94] = -26,
- [1][1][RTW89_FCC][2][94] = 127,
[1][1][RTW89_ETSI][1][94] = 127,
[1][1][RTW89_ETSI][0][94] = 127,
[1][1][RTW89_MKK][1][94] = 127,
[1][1][RTW89_MKK][0][94] = 127,
[1][1][RTW89_IC][1][94] = -26,
- [1][1][RTW89_IC][2][94] = 127,
[1][1][RTW89_KCC][1][94] = -14,
[1][1][RTW89_KCC][0][94] = 127,
[1][1][RTW89_ACMA][1][94] = 127,
@@ -57006,13 +54672,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][94] = 127,
[1][1][RTW89_THAILAND][0][94] = 127,
[1][1][RTW89_FCC][1][96] = -26,
- [1][1][RTW89_FCC][2][96] = 127,
[1][1][RTW89_ETSI][1][96] = 127,
[1][1][RTW89_ETSI][0][96] = 127,
[1][1][RTW89_MKK][1][96] = 127,
[1][1][RTW89_MKK][0][96] = 127,
[1][1][RTW89_IC][1][96] = -26,
- [1][1][RTW89_IC][2][96] = 127,
[1][1][RTW89_KCC][1][96] = -14,
[1][1][RTW89_KCC][0][96] = 127,
[1][1][RTW89_ACMA][1][96] = 127,
@@ -57025,13 +54689,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][96] = 127,
[1][1][RTW89_THAILAND][0][96] = 127,
[1][1][RTW89_FCC][1][98] = -26,
- [1][1][RTW89_FCC][2][98] = 127,
[1][1][RTW89_ETSI][1][98] = 127,
[1][1][RTW89_ETSI][0][98] = 127,
[1][1][RTW89_MKK][1][98] = 127,
[1][1][RTW89_MKK][0][98] = 127,
[1][1][RTW89_IC][1][98] = -26,
- [1][1][RTW89_IC][2][98] = 127,
[1][1][RTW89_KCC][1][98] = -14,
[1][1][RTW89_KCC][0][98] = 127,
[1][1][RTW89_ACMA][1][98] = 127,
@@ -57044,13 +54706,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][98] = 127,
[1][1][RTW89_THAILAND][0][98] = 127,
[1][1][RTW89_FCC][1][100] = -26,
- [1][1][RTW89_FCC][2][100] = 127,
[1][1][RTW89_ETSI][1][100] = 127,
[1][1][RTW89_ETSI][0][100] = 127,
[1][1][RTW89_MKK][1][100] = 127,
[1][1][RTW89_MKK][0][100] = 127,
[1][1][RTW89_IC][1][100] = -26,
- [1][1][RTW89_IC][2][100] = 127,
[1][1][RTW89_KCC][1][100] = -14,
[1][1][RTW89_KCC][0][100] = 127,
[1][1][RTW89_ACMA][1][100] = 127,
@@ -57063,13 +54723,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][100] = 127,
[1][1][RTW89_THAILAND][0][100] = 127,
[1][1][RTW89_FCC][1][102] = -26,
- [1][1][RTW89_FCC][2][102] = 127,
[1][1][RTW89_ETSI][1][102] = 127,
[1][1][RTW89_ETSI][0][102] = 127,
[1][1][RTW89_MKK][1][102] = 127,
[1][1][RTW89_MKK][0][102] = 127,
[1][1][RTW89_IC][1][102] = -26,
- [1][1][RTW89_IC][2][102] = 127,
[1][1][RTW89_KCC][1][102] = -14,
[1][1][RTW89_KCC][0][102] = 127,
[1][1][RTW89_ACMA][1][102] = 127,
@@ -57082,13 +54740,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][102] = 127,
[1][1][RTW89_THAILAND][0][102] = 127,
[1][1][RTW89_FCC][1][104] = -26,
- [1][1][RTW89_FCC][2][104] = 127,
[1][1][RTW89_ETSI][1][104] = 127,
[1][1][RTW89_ETSI][0][104] = 127,
[1][1][RTW89_MKK][1][104] = 127,
[1][1][RTW89_MKK][0][104] = 127,
[1][1][RTW89_IC][1][104] = -26,
- [1][1][RTW89_IC][2][104] = 127,
[1][1][RTW89_KCC][1][104] = -14,
[1][1][RTW89_KCC][0][104] = 127,
[1][1][RTW89_ACMA][1][104] = 127,
@@ -57101,13 +54757,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][104] = 127,
[1][1][RTW89_THAILAND][0][104] = 127,
[1][1][RTW89_FCC][1][105] = -26,
- [1][1][RTW89_FCC][2][105] = 127,
[1][1][RTW89_ETSI][1][105] = 127,
[1][1][RTW89_ETSI][0][105] = 127,
[1][1][RTW89_MKK][1][105] = 127,
[1][1][RTW89_MKK][0][105] = 127,
[1][1][RTW89_IC][1][105] = -26,
- [1][1][RTW89_IC][2][105] = 127,
[1][1][RTW89_KCC][1][105] = -14,
[1][1][RTW89_KCC][0][105] = 127,
[1][1][RTW89_ACMA][1][105] = 127,
@@ -57120,13 +54774,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][105] = 127,
[1][1][RTW89_THAILAND][0][105] = 127,
[1][1][RTW89_FCC][1][107] = -22,
- [1][1][RTW89_FCC][2][107] = 127,
[1][1][RTW89_ETSI][1][107] = 127,
[1][1][RTW89_ETSI][0][107] = 127,
[1][1][RTW89_MKK][1][107] = 127,
[1][1][RTW89_MKK][0][107] = 127,
[1][1][RTW89_IC][1][107] = -22,
- [1][1][RTW89_IC][2][107] = 127,
[1][1][RTW89_KCC][1][107] = -14,
[1][1][RTW89_KCC][0][107] = 127,
[1][1][RTW89_ACMA][1][107] = 127,
@@ -57139,13 +54791,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][107] = 127,
[1][1][RTW89_THAILAND][0][107] = 127,
[1][1][RTW89_FCC][1][109] = -22,
- [1][1][RTW89_FCC][2][109] = 127,
[1][1][RTW89_ETSI][1][109] = 127,
[1][1][RTW89_ETSI][0][109] = 127,
[1][1][RTW89_MKK][1][109] = 127,
[1][1][RTW89_MKK][0][109] = 127,
[1][1][RTW89_IC][1][109] = -22,
- [1][1][RTW89_IC][2][109] = 127,
[1][1][RTW89_KCC][1][109] = 127,
[1][1][RTW89_KCC][0][109] = 127,
[1][1][RTW89_ACMA][1][109] = 127,
@@ -57158,13 +54808,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][109] = 127,
[1][1][RTW89_THAILAND][0][109] = 127,
[1][1][RTW89_FCC][1][111] = 127,
- [1][1][RTW89_FCC][2][111] = 127,
[1][1][RTW89_ETSI][1][111] = 127,
[1][1][RTW89_ETSI][0][111] = 127,
[1][1][RTW89_MKK][1][111] = 127,
[1][1][RTW89_MKK][0][111] = 127,
[1][1][RTW89_IC][1][111] = 127,
- [1][1][RTW89_IC][2][111] = 127,
[1][1][RTW89_KCC][1][111] = 127,
[1][1][RTW89_KCC][0][111] = 127,
[1][1][RTW89_ACMA][1][111] = 127,
@@ -57177,13 +54825,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][111] = 127,
[1][1][RTW89_THAILAND][0][111] = 127,
[1][1][RTW89_FCC][1][113] = 127,
- [1][1][RTW89_FCC][2][113] = 127,
[1][1][RTW89_ETSI][1][113] = 127,
[1][1][RTW89_ETSI][0][113] = 127,
[1][1][RTW89_MKK][1][113] = 127,
[1][1][RTW89_MKK][0][113] = 127,
[1][1][RTW89_IC][1][113] = 127,
- [1][1][RTW89_IC][2][113] = 127,
[1][1][RTW89_KCC][1][113] = 127,
[1][1][RTW89_KCC][0][113] = 127,
[1][1][RTW89_ACMA][1][113] = 127,
@@ -57196,13 +54842,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][113] = 127,
[1][1][RTW89_THAILAND][0][113] = 127,
[1][1][RTW89_FCC][1][115] = 127,
- [1][1][RTW89_FCC][2][115] = 127,
[1][1][RTW89_ETSI][1][115] = 127,
[1][1][RTW89_ETSI][0][115] = 127,
[1][1][RTW89_MKK][1][115] = 127,
[1][1][RTW89_MKK][0][115] = 127,
[1][1][RTW89_IC][1][115] = 127,
- [1][1][RTW89_IC][2][115] = 127,
[1][1][RTW89_KCC][1][115] = 127,
[1][1][RTW89_KCC][0][115] = 127,
[1][1][RTW89_ACMA][1][115] = 127,
@@ -57215,13 +54859,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][115] = 127,
[1][1][RTW89_THAILAND][0][115] = 127,
[1][1][RTW89_FCC][1][117] = 127,
- [1][1][RTW89_FCC][2][117] = 127,
[1][1][RTW89_ETSI][1][117] = 127,
[1][1][RTW89_ETSI][0][117] = 127,
[1][1][RTW89_MKK][1][117] = 127,
[1][1][RTW89_MKK][0][117] = 127,
[1][1][RTW89_IC][1][117] = 127,
- [1][1][RTW89_IC][2][117] = 127,
[1][1][RTW89_KCC][1][117] = 127,
[1][1][RTW89_KCC][0][117] = 127,
[1][1][RTW89_ACMA][1][117] = 127,
@@ -57234,13 +54876,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][117] = 127,
[1][1][RTW89_THAILAND][0][117] = 127,
[1][1][RTW89_FCC][1][119] = 127,
- [1][1][RTW89_FCC][2][119] = 127,
[1][1][RTW89_ETSI][1][119] = 127,
[1][1][RTW89_ETSI][0][119] = 127,
[1][1][RTW89_MKK][1][119] = 127,
[1][1][RTW89_MKK][0][119] = 127,
[1][1][RTW89_IC][1][119] = 127,
- [1][1][RTW89_IC][2][119] = 127,
[1][1][RTW89_KCC][1][119] = 127,
[1][1][RTW89_KCC][0][119] = 127,
[1][1][RTW89_ACMA][1][119] = 127,
@@ -57253,13 +54893,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][119] = 127,
[1][1][RTW89_THAILAND][0][119] = 127,
[2][0][RTW89_FCC][1][0] = 8,
- [2][0][RTW89_FCC][2][0] = 60,
[2][0][RTW89_ETSI][1][0] = 56,
[2][0][RTW89_ETSI][0][0] = 18,
[2][0][RTW89_MKK][1][0] = 54,
[2][0][RTW89_MKK][0][0] = 14,
[2][0][RTW89_IC][1][0] = 8,
- [2][0][RTW89_IC][2][0] = 60,
[2][0][RTW89_KCC][1][0] = -2,
[2][0][RTW89_KCC][0][0] = -2,
[2][0][RTW89_ACMA][1][0] = 56,
@@ -57272,13 +54910,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][0] = 52,
[2][0][RTW89_THAILAND][0][0] = 8,
[2][0][RTW89_FCC][1][2] = 8,
- [2][0][RTW89_FCC][2][2] = 60,
[2][0][RTW89_ETSI][1][2] = 56,
[2][0][RTW89_ETSI][0][2] = 18,
[2][0][RTW89_MKK][1][2] = 54,
[2][0][RTW89_MKK][0][2] = 14,
[2][0][RTW89_IC][1][2] = 8,
- [2][0][RTW89_IC][2][2] = 60,
[2][0][RTW89_KCC][1][2] = -2,
[2][0][RTW89_KCC][0][2] = -2,
[2][0][RTW89_ACMA][1][2] = 56,
@@ -57291,13 +54927,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][2] = 52,
[2][0][RTW89_THAILAND][0][2] = 8,
[2][0][RTW89_FCC][1][4] = 8,
- [2][0][RTW89_FCC][2][4] = 60,
[2][0][RTW89_ETSI][1][4] = 56,
[2][0][RTW89_ETSI][0][4] = 18,
[2][0][RTW89_MKK][1][4] = 54,
[2][0][RTW89_MKK][0][4] = 14,
[2][0][RTW89_IC][1][4] = 8,
- [2][0][RTW89_IC][2][4] = 60,
[2][0][RTW89_KCC][1][4] = -2,
[2][0][RTW89_KCC][0][4] = -2,
[2][0][RTW89_ACMA][1][4] = 56,
@@ -57310,13 +54944,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][4] = 52,
[2][0][RTW89_THAILAND][0][4] = 8,
[2][0][RTW89_FCC][1][6] = 8,
- [2][0][RTW89_FCC][2][6] = 60,
[2][0][RTW89_ETSI][1][6] = 56,
[2][0][RTW89_ETSI][0][6] = 18,
[2][0][RTW89_MKK][1][6] = 54,
[2][0][RTW89_MKK][0][6] = 14,
[2][0][RTW89_IC][1][6] = 8,
- [2][0][RTW89_IC][2][6] = 60,
[2][0][RTW89_KCC][1][6] = -2,
[2][0][RTW89_KCC][0][6] = -2,
[2][0][RTW89_ACMA][1][6] = 56,
@@ -57329,13 +54961,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][6] = 52,
[2][0][RTW89_THAILAND][0][6] = 8,
[2][0][RTW89_FCC][1][8] = 8,
- [2][0][RTW89_FCC][2][8] = 60,
[2][0][RTW89_ETSI][1][8] = 56,
[2][0][RTW89_ETSI][0][8] = 18,
[2][0][RTW89_MKK][1][8] = 54,
[2][0][RTW89_MKK][0][8] = 14,
[2][0][RTW89_IC][1][8] = 8,
- [2][0][RTW89_IC][2][8] = 60,
[2][0][RTW89_KCC][1][8] = -2,
[2][0][RTW89_KCC][0][8] = -2,
[2][0][RTW89_ACMA][1][8] = 56,
@@ -57348,13 +54978,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][8] = 52,
[2][0][RTW89_THAILAND][0][8] = 8,
[2][0][RTW89_FCC][1][10] = 8,
- [2][0][RTW89_FCC][2][10] = 60,
[2][0][RTW89_ETSI][1][10] = 56,
[2][0][RTW89_ETSI][0][10] = 18,
[2][0][RTW89_MKK][1][10] = 54,
[2][0][RTW89_MKK][0][10] = 14,
[2][0][RTW89_IC][1][10] = 8,
- [2][0][RTW89_IC][2][10] = 60,
[2][0][RTW89_KCC][1][10] = -2,
[2][0][RTW89_KCC][0][10] = -2,
[2][0][RTW89_ACMA][1][10] = 56,
@@ -57367,13 +54995,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][10] = 52,
[2][0][RTW89_THAILAND][0][10] = 8,
[2][0][RTW89_FCC][1][12] = 8,
- [2][0][RTW89_FCC][2][12] = 60,
[2][0][RTW89_ETSI][1][12] = 56,
[2][0][RTW89_ETSI][0][12] = 18,
[2][0][RTW89_MKK][1][12] = 54,
[2][0][RTW89_MKK][0][12] = 14,
[2][0][RTW89_IC][1][12] = 8,
- [2][0][RTW89_IC][2][12] = 60,
[2][0][RTW89_KCC][1][12] = -2,
[2][0][RTW89_KCC][0][12] = -2,
[2][0][RTW89_ACMA][1][12] = 56,
@@ -57386,13 +55012,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][12] = 52,
[2][0][RTW89_THAILAND][0][12] = 8,
[2][0][RTW89_FCC][1][14] = 8,
- [2][0][RTW89_FCC][2][14] = 60,
[2][0][RTW89_ETSI][1][14] = 56,
[2][0][RTW89_ETSI][0][14] = 18,
[2][0][RTW89_MKK][1][14] = 54,
[2][0][RTW89_MKK][0][14] = 14,
[2][0][RTW89_IC][1][14] = 8,
- [2][0][RTW89_IC][2][14] = 60,
[2][0][RTW89_KCC][1][14] = -2,
[2][0][RTW89_KCC][0][14] = -2,
[2][0][RTW89_ACMA][1][14] = 56,
@@ -57405,13 +55029,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][14] = 52,
[2][0][RTW89_THAILAND][0][14] = 8,
[2][0][RTW89_FCC][1][15] = 8,
- [2][0][RTW89_FCC][2][15] = 60,
[2][0][RTW89_ETSI][1][15] = 56,
[2][0][RTW89_ETSI][0][15] = 18,
[2][0][RTW89_MKK][1][15] = 54,
[2][0][RTW89_MKK][0][15] = 14,
[2][0][RTW89_IC][1][15] = 8,
- [2][0][RTW89_IC][2][15] = 60,
[2][0][RTW89_KCC][1][15] = -2,
[2][0][RTW89_KCC][0][15] = -2,
[2][0][RTW89_ACMA][1][15] = 56,
@@ -57424,13 +55046,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][15] = 52,
[2][0][RTW89_THAILAND][0][15] = 8,
[2][0][RTW89_FCC][1][17] = 8,
- [2][0][RTW89_FCC][2][17] = 60,
[2][0][RTW89_ETSI][1][17] = 56,
[2][0][RTW89_ETSI][0][17] = 18,
[2][0][RTW89_MKK][1][17] = 54,
[2][0][RTW89_MKK][0][17] = 14,
[2][0][RTW89_IC][1][17] = 8,
- [2][0][RTW89_IC][2][17] = 60,
[2][0][RTW89_KCC][1][17] = -2,
[2][0][RTW89_KCC][0][17] = -2,
[2][0][RTW89_ACMA][1][17] = 56,
@@ -57443,13 +55063,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][17] = 52,
[2][0][RTW89_THAILAND][0][17] = 8,
[2][0][RTW89_FCC][1][19] = 8,
- [2][0][RTW89_FCC][2][19] = 60,
[2][0][RTW89_ETSI][1][19] = 56,
[2][0][RTW89_ETSI][0][19] = 18,
[2][0][RTW89_MKK][1][19] = 54,
[2][0][RTW89_MKK][0][19] = 14,
[2][0][RTW89_IC][1][19] = 8,
- [2][0][RTW89_IC][2][19] = 60,
[2][0][RTW89_KCC][1][19] = -2,
[2][0][RTW89_KCC][0][19] = -2,
[2][0][RTW89_ACMA][1][19] = 56,
@@ -57462,13 +55080,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][19] = 52,
[2][0][RTW89_THAILAND][0][19] = 8,
[2][0][RTW89_FCC][1][21] = 8,
- [2][0][RTW89_FCC][2][21] = 60,
[2][0][RTW89_ETSI][1][21] = 56,
[2][0][RTW89_ETSI][0][21] = 18,
[2][0][RTW89_MKK][1][21] = 54,
[2][0][RTW89_MKK][0][21] = 14,
[2][0][RTW89_IC][1][21] = 8,
- [2][0][RTW89_IC][2][21] = 60,
[2][0][RTW89_KCC][1][21] = -2,
[2][0][RTW89_KCC][0][21] = -2,
[2][0][RTW89_ACMA][1][21] = 56,
@@ -57481,13 +55097,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][21] = 52,
[2][0][RTW89_THAILAND][0][21] = 8,
[2][0][RTW89_FCC][1][23] = 8,
- [2][0][RTW89_FCC][2][23] = 70,
[2][0][RTW89_ETSI][1][23] = 56,
[2][0][RTW89_ETSI][0][23] = 18,
[2][0][RTW89_MKK][1][23] = 56,
[2][0][RTW89_MKK][0][23] = 14,
[2][0][RTW89_IC][1][23] = 8,
- [2][0][RTW89_IC][2][23] = 70,
[2][0][RTW89_KCC][1][23] = -2,
[2][0][RTW89_KCC][0][23] = -2,
[2][0][RTW89_ACMA][1][23] = 56,
@@ -57500,13 +55114,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][23] = 52,
[2][0][RTW89_THAILAND][0][23] = 8,
[2][0][RTW89_FCC][1][25] = 8,
- [2][0][RTW89_FCC][2][25] = 70,
[2][0][RTW89_ETSI][1][25] = 56,
[2][0][RTW89_ETSI][0][25] = 18,
[2][0][RTW89_MKK][1][25] = 56,
[2][0][RTW89_MKK][0][25] = 14,
[2][0][RTW89_IC][1][25] = 8,
- [2][0][RTW89_IC][2][25] = 70,
[2][0][RTW89_KCC][1][25] = -2,
[2][0][RTW89_KCC][0][25] = -2,
[2][0][RTW89_ACMA][1][25] = 56,
@@ -57519,13 +55131,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][25] = 52,
[2][0][RTW89_THAILAND][0][25] = 8,
[2][0][RTW89_FCC][1][27] = 8,
- [2][0][RTW89_FCC][2][27] = 70,
[2][0][RTW89_ETSI][1][27] = 56,
[2][0][RTW89_ETSI][0][27] = 18,
[2][0][RTW89_MKK][1][27] = 56,
[2][0][RTW89_MKK][0][27] = 14,
[2][0][RTW89_IC][1][27] = 8,
- [2][0][RTW89_IC][2][27] = 70,
[2][0][RTW89_KCC][1][27] = -2,
[2][0][RTW89_KCC][0][27] = -2,
[2][0][RTW89_ACMA][1][27] = 56,
@@ -57538,13 +55148,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][27] = 52,
[2][0][RTW89_THAILAND][0][27] = 8,
[2][0][RTW89_FCC][1][29] = 8,
- [2][0][RTW89_FCC][2][29] = 70,
[2][0][RTW89_ETSI][1][29] = 56,
[2][0][RTW89_ETSI][0][29] = 18,
[2][0][RTW89_MKK][1][29] = 56,
[2][0][RTW89_MKK][0][29] = 14,
[2][0][RTW89_IC][1][29] = 8,
- [2][0][RTW89_IC][2][29] = 70,
[2][0][RTW89_KCC][1][29] = -2,
[2][0][RTW89_KCC][0][29] = -2,
[2][0][RTW89_ACMA][1][29] = 56,
@@ -57557,13 +55165,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][29] = 52,
[2][0][RTW89_THAILAND][0][29] = 8,
[2][0][RTW89_FCC][1][30] = 8,
- [2][0][RTW89_FCC][2][30] = 70,
[2][0][RTW89_ETSI][1][30] = 56,
[2][0][RTW89_ETSI][0][30] = 18,
[2][0][RTW89_MKK][1][30] = 56,
[2][0][RTW89_MKK][0][30] = 14,
[2][0][RTW89_IC][1][30] = 8,
- [2][0][RTW89_IC][2][30] = 70,
[2][0][RTW89_KCC][1][30] = -2,
[2][0][RTW89_KCC][0][30] = -2,
[2][0][RTW89_ACMA][1][30] = 56,
@@ -57576,13 +55182,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][30] = 52,
[2][0][RTW89_THAILAND][0][30] = 8,
[2][0][RTW89_FCC][1][32] = 8,
- [2][0][RTW89_FCC][2][32] = 70,
[2][0][RTW89_ETSI][1][32] = 56,
[2][0][RTW89_ETSI][0][32] = 18,
[2][0][RTW89_MKK][1][32] = 56,
[2][0][RTW89_MKK][0][32] = 14,
[2][0][RTW89_IC][1][32] = 8,
- [2][0][RTW89_IC][2][32] = 70,
[2][0][RTW89_KCC][1][32] = -2,
[2][0][RTW89_KCC][0][32] = -2,
[2][0][RTW89_ACMA][1][32] = 56,
@@ -57595,13 +55199,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][32] = 52,
[2][0][RTW89_THAILAND][0][32] = 8,
[2][0][RTW89_FCC][1][34] = 8,
- [2][0][RTW89_FCC][2][34] = 70,
[2][0][RTW89_ETSI][1][34] = 56,
[2][0][RTW89_ETSI][0][34] = 18,
[2][0][RTW89_MKK][1][34] = 56,
[2][0][RTW89_MKK][0][34] = 14,
[2][0][RTW89_IC][1][34] = 8,
- [2][0][RTW89_IC][2][34] = 70,
[2][0][RTW89_KCC][1][34] = -2,
[2][0][RTW89_KCC][0][34] = -2,
[2][0][RTW89_ACMA][1][34] = 56,
@@ -57614,13 +55216,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][34] = 52,
[2][0][RTW89_THAILAND][0][34] = 8,
[2][0][RTW89_FCC][1][36] = 8,
- [2][0][RTW89_FCC][2][36] = 70,
[2][0][RTW89_ETSI][1][36] = 56,
[2][0][RTW89_ETSI][0][36] = 18,
[2][0][RTW89_MKK][1][36] = 56,
[2][0][RTW89_MKK][0][36] = 14,
[2][0][RTW89_IC][1][36] = 8,
- [2][0][RTW89_IC][2][36] = 70,
[2][0][RTW89_KCC][1][36] = -2,
[2][0][RTW89_KCC][0][36] = -2,
[2][0][RTW89_ACMA][1][36] = 56,
@@ -57633,13 +55233,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][36] = 52,
[2][0][RTW89_THAILAND][0][36] = 8,
[2][0][RTW89_FCC][1][38] = 8,
- [2][0][RTW89_FCC][2][38] = 70,
[2][0][RTW89_ETSI][1][38] = 56,
[2][0][RTW89_ETSI][0][38] = 18,
[2][0][RTW89_MKK][1][38] = 56,
[2][0][RTW89_MKK][0][38] = 14,
[2][0][RTW89_IC][1][38] = 8,
- [2][0][RTW89_IC][2][38] = 70,
[2][0][RTW89_KCC][1][38] = -2,
[2][0][RTW89_KCC][0][38] = -2,
[2][0][RTW89_ACMA][1][38] = 56,
@@ -57652,13 +55250,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][38] = 52,
[2][0][RTW89_THAILAND][0][38] = 8,
[2][0][RTW89_FCC][1][40] = 8,
- [2][0][RTW89_FCC][2][40] = 70,
[2][0][RTW89_ETSI][1][40] = 56,
[2][0][RTW89_ETSI][0][40] = 18,
[2][0][RTW89_MKK][1][40] = 56,
[2][0][RTW89_MKK][0][40] = 14,
[2][0][RTW89_IC][1][40] = 8,
- [2][0][RTW89_IC][2][40] = 70,
[2][0][RTW89_KCC][1][40] = -2,
[2][0][RTW89_KCC][0][40] = -2,
[2][0][RTW89_ACMA][1][40] = 56,
@@ -57671,13 +55267,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][40] = 52,
[2][0][RTW89_THAILAND][0][40] = 8,
[2][0][RTW89_FCC][1][42] = 8,
- [2][0][RTW89_FCC][2][42] = 70,
[2][0][RTW89_ETSI][1][42] = 56,
[2][0][RTW89_ETSI][0][42] = 18,
[2][0][RTW89_MKK][1][42] = 56,
[2][0][RTW89_MKK][0][42] = 14,
[2][0][RTW89_IC][1][42] = 8,
- [2][0][RTW89_IC][2][42] = 70,
[2][0][RTW89_KCC][1][42] = -2,
[2][0][RTW89_KCC][0][42] = -2,
[2][0][RTW89_ACMA][1][42] = 56,
@@ -57690,13 +55284,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][42] = 52,
[2][0][RTW89_THAILAND][0][42] = 8,
[2][0][RTW89_FCC][1][44] = 8,
- [2][0][RTW89_FCC][2][44] = 70,
[2][0][RTW89_ETSI][1][44] = 56,
[2][0][RTW89_ETSI][0][44] = 18,
[2][0][RTW89_MKK][1][44] = 32,
[2][0][RTW89_MKK][0][44] = 14,
[2][0][RTW89_IC][1][44] = 8,
- [2][0][RTW89_IC][2][44] = 70,
[2][0][RTW89_KCC][1][44] = -2,
[2][0][RTW89_KCC][0][44] = -2,
[2][0][RTW89_ACMA][1][44] = 56,
@@ -57709,13 +55301,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][44] = 52,
[2][0][RTW89_THAILAND][0][44] = 8,
[2][0][RTW89_FCC][1][45] = 8,
- [2][0][RTW89_FCC][2][45] = 127,
[2][0][RTW89_ETSI][1][45] = 127,
[2][0][RTW89_ETSI][0][45] = 127,
[2][0][RTW89_MKK][1][45] = 127,
[2][0][RTW89_MKK][0][45] = 127,
[2][0][RTW89_IC][1][45] = 8,
- [2][0][RTW89_IC][2][45] = 70,
[2][0][RTW89_KCC][1][45] = -2,
[2][0][RTW89_KCC][0][45] = 127,
[2][0][RTW89_ACMA][1][45] = 127,
@@ -57728,13 +55318,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][45] = 127,
[2][0][RTW89_THAILAND][0][45] = 127,
[2][0][RTW89_FCC][1][47] = 8,
- [2][0][RTW89_FCC][2][47] = 127,
[2][0][RTW89_ETSI][1][47] = 127,
[2][0][RTW89_ETSI][0][47] = 127,
[2][0][RTW89_MKK][1][47] = 127,
[2][0][RTW89_MKK][0][47] = 127,
[2][0][RTW89_IC][1][47] = 8,
- [2][0][RTW89_IC][2][47] = 70,
[2][0][RTW89_KCC][1][47] = -2,
[2][0][RTW89_KCC][0][47] = 127,
[2][0][RTW89_ACMA][1][47] = 127,
@@ -57747,13 +55335,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][47] = 127,
[2][0][RTW89_THAILAND][0][47] = 127,
[2][0][RTW89_FCC][1][49] = 8,
- [2][0][RTW89_FCC][2][49] = 127,
[2][0][RTW89_ETSI][1][49] = 127,
[2][0][RTW89_ETSI][0][49] = 127,
[2][0][RTW89_MKK][1][49] = 127,
[2][0][RTW89_MKK][0][49] = 127,
[2][0][RTW89_IC][1][49] = 8,
- [2][0][RTW89_IC][2][49] = 70,
[2][0][RTW89_KCC][1][49] = -2,
[2][0][RTW89_KCC][0][49] = 127,
[2][0][RTW89_ACMA][1][49] = 127,
@@ -57766,13 +55352,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][49] = 127,
[2][0][RTW89_THAILAND][0][49] = 127,
[2][0][RTW89_FCC][1][51] = 8,
- [2][0][RTW89_FCC][2][51] = 127,
[2][0][RTW89_ETSI][1][51] = 127,
[2][0][RTW89_ETSI][0][51] = 127,
[2][0][RTW89_MKK][1][51] = 127,
[2][0][RTW89_MKK][0][51] = 127,
[2][0][RTW89_IC][1][51] = 8,
- [2][0][RTW89_IC][2][51] = 70,
[2][0][RTW89_KCC][1][51] = -2,
[2][0][RTW89_KCC][0][51] = 127,
[2][0][RTW89_ACMA][1][51] = 127,
@@ -57785,13 +55369,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][51] = 127,
[2][0][RTW89_THAILAND][0][51] = 127,
[2][0][RTW89_FCC][1][53] = 8,
- [2][0][RTW89_FCC][2][53] = 127,
[2][0][RTW89_ETSI][1][53] = 127,
[2][0][RTW89_ETSI][0][53] = 127,
[2][0][RTW89_MKK][1][53] = 127,
[2][0][RTW89_MKK][0][53] = 127,
[2][0][RTW89_IC][1][53] = 8,
- [2][0][RTW89_IC][2][53] = 70,
[2][0][RTW89_KCC][1][53] = -2,
[2][0][RTW89_KCC][0][53] = 127,
[2][0][RTW89_ACMA][1][53] = 127,
@@ -57804,13 +55386,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][53] = 127,
[2][0][RTW89_THAILAND][0][53] = 127,
[2][0][RTW89_FCC][1][55] = 8,
- [2][0][RTW89_FCC][2][55] = 68,
[2][0][RTW89_ETSI][1][55] = 127,
[2][0][RTW89_ETSI][0][55] = 127,
[2][0][RTW89_MKK][1][55] = 127,
[2][0][RTW89_MKK][0][55] = 127,
[2][0][RTW89_IC][1][55] = 8,
- [2][0][RTW89_IC][2][55] = 68,
[2][0][RTW89_KCC][1][55] = -2,
[2][0][RTW89_KCC][0][55] = 127,
[2][0][RTW89_ACMA][1][55] = 127,
@@ -57823,13 +55403,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][55] = 127,
[2][0][RTW89_THAILAND][0][55] = 127,
[2][0][RTW89_FCC][1][57] = 8,
- [2][0][RTW89_FCC][2][57] = 68,
[2][0][RTW89_ETSI][1][57] = 127,
[2][0][RTW89_ETSI][0][57] = 127,
[2][0][RTW89_MKK][1][57] = 127,
[2][0][RTW89_MKK][0][57] = 127,
[2][0][RTW89_IC][1][57] = 8,
- [2][0][RTW89_IC][2][57] = 68,
[2][0][RTW89_KCC][1][57] = -2,
[2][0][RTW89_KCC][0][57] = 127,
[2][0][RTW89_ACMA][1][57] = 127,
@@ -57842,13 +55420,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][57] = 127,
[2][0][RTW89_THAILAND][0][57] = 127,
[2][0][RTW89_FCC][1][59] = 8,
- [2][0][RTW89_FCC][2][59] = 68,
[2][0][RTW89_ETSI][1][59] = 127,
[2][0][RTW89_ETSI][0][59] = 127,
[2][0][RTW89_MKK][1][59] = 127,
[2][0][RTW89_MKK][0][59] = 127,
[2][0][RTW89_IC][1][59] = 8,
- [2][0][RTW89_IC][2][59] = 68,
[2][0][RTW89_KCC][1][59] = -2,
[2][0][RTW89_KCC][0][59] = 127,
[2][0][RTW89_ACMA][1][59] = 127,
@@ -57861,13 +55437,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][59] = 127,
[2][0][RTW89_THAILAND][0][59] = 127,
[2][0][RTW89_FCC][1][60] = 8,
- [2][0][RTW89_FCC][2][60] = 68,
[2][0][RTW89_ETSI][1][60] = 127,
[2][0][RTW89_ETSI][0][60] = 127,
[2][0][RTW89_MKK][1][60] = 127,
[2][0][RTW89_MKK][0][60] = 127,
[2][0][RTW89_IC][1][60] = 8,
- [2][0][RTW89_IC][2][60] = 68,
[2][0][RTW89_KCC][1][60] = -2,
[2][0][RTW89_KCC][0][60] = 127,
[2][0][RTW89_ACMA][1][60] = 127,
@@ -57880,13 +55454,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][60] = 127,
[2][0][RTW89_THAILAND][0][60] = 127,
[2][0][RTW89_FCC][1][62] = 8,
- [2][0][RTW89_FCC][2][62] = 68,
[2][0][RTW89_ETSI][1][62] = 127,
[2][0][RTW89_ETSI][0][62] = 127,
[2][0][RTW89_MKK][1][62] = 127,
[2][0][RTW89_MKK][0][62] = 127,
[2][0][RTW89_IC][1][62] = 8,
- [2][0][RTW89_IC][2][62] = 68,
[2][0][RTW89_KCC][1][62] = -2,
[2][0][RTW89_KCC][0][62] = 127,
[2][0][RTW89_ACMA][1][62] = 127,
@@ -57899,13 +55471,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][62] = 127,
[2][0][RTW89_THAILAND][0][62] = 127,
[2][0][RTW89_FCC][1][64] = 8,
- [2][0][RTW89_FCC][2][64] = 68,
[2][0][RTW89_ETSI][1][64] = 127,
[2][0][RTW89_ETSI][0][64] = 127,
[2][0][RTW89_MKK][1][64] = 127,
[2][0][RTW89_MKK][0][64] = 127,
[2][0][RTW89_IC][1][64] = 8,
- [2][0][RTW89_IC][2][64] = 68,
[2][0][RTW89_KCC][1][64] = -2,
[2][0][RTW89_KCC][0][64] = 127,
[2][0][RTW89_ACMA][1][64] = 127,
@@ -57918,13 +55488,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][64] = 127,
[2][0][RTW89_THAILAND][0][64] = 127,
[2][0][RTW89_FCC][1][66] = 8,
- [2][0][RTW89_FCC][2][66] = 68,
[2][0][RTW89_ETSI][1][66] = 127,
[2][0][RTW89_ETSI][0][66] = 127,
[2][0][RTW89_MKK][1][66] = 127,
[2][0][RTW89_MKK][0][66] = 127,
[2][0][RTW89_IC][1][66] = 8,
- [2][0][RTW89_IC][2][66] = 68,
[2][0][RTW89_KCC][1][66] = -2,
[2][0][RTW89_KCC][0][66] = 127,
[2][0][RTW89_ACMA][1][66] = 127,
@@ -57937,13 +55505,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][66] = 127,
[2][0][RTW89_THAILAND][0][66] = 127,
[2][0][RTW89_FCC][1][68] = 8,
- [2][0][RTW89_FCC][2][68] = 68,
[2][0][RTW89_ETSI][1][68] = 127,
[2][0][RTW89_ETSI][0][68] = 127,
[2][0][RTW89_MKK][1][68] = 127,
[2][0][RTW89_MKK][0][68] = 127,
[2][0][RTW89_IC][1][68] = 8,
- [2][0][RTW89_IC][2][68] = 68,
[2][0][RTW89_KCC][1][68] = -2,
[2][0][RTW89_KCC][0][68] = 127,
[2][0][RTW89_ACMA][1][68] = 127,
@@ -57956,13 +55522,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][68] = 127,
[2][0][RTW89_THAILAND][0][68] = 127,
[2][0][RTW89_FCC][1][70] = 8,
- [2][0][RTW89_FCC][2][70] = 68,
[2][0][RTW89_ETSI][1][70] = 127,
[2][0][RTW89_ETSI][0][70] = 127,
[2][0][RTW89_MKK][1][70] = 127,
[2][0][RTW89_MKK][0][70] = 127,
[2][0][RTW89_IC][1][70] = 8,
- [2][0][RTW89_IC][2][70] = 68,
[2][0][RTW89_KCC][1][70] = -2,
[2][0][RTW89_KCC][0][70] = 127,
[2][0][RTW89_ACMA][1][70] = 127,
@@ -57975,13 +55539,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][70] = 127,
[2][0][RTW89_THAILAND][0][70] = 127,
[2][0][RTW89_FCC][1][72] = 8,
- [2][0][RTW89_FCC][2][72] = 68,
[2][0][RTW89_ETSI][1][72] = 127,
[2][0][RTW89_ETSI][0][72] = 127,
[2][0][RTW89_MKK][1][72] = 127,
[2][0][RTW89_MKK][0][72] = 127,
[2][0][RTW89_IC][1][72] = 8,
- [2][0][RTW89_IC][2][72] = 68,
[2][0][RTW89_KCC][1][72] = -2,
[2][0][RTW89_KCC][0][72] = 127,
[2][0][RTW89_ACMA][1][72] = 127,
@@ -57994,13 +55556,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][72] = 127,
[2][0][RTW89_THAILAND][0][72] = 127,
[2][0][RTW89_FCC][1][74] = 8,
- [2][0][RTW89_FCC][2][74] = 68,
[2][0][RTW89_ETSI][1][74] = 127,
[2][0][RTW89_ETSI][0][74] = 127,
[2][0][RTW89_MKK][1][74] = 127,
[2][0][RTW89_MKK][0][74] = 127,
[2][0][RTW89_IC][1][74] = 8,
- [2][0][RTW89_IC][2][74] = 68,
[2][0][RTW89_KCC][1][74] = -2,
[2][0][RTW89_KCC][0][74] = 127,
[2][0][RTW89_ACMA][1][74] = 127,
@@ -58013,13 +55573,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][74] = 127,
[2][0][RTW89_THAILAND][0][74] = 127,
[2][0][RTW89_FCC][1][75] = 8,
- [2][0][RTW89_FCC][2][75] = 68,
[2][0][RTW89_ETSI][1][75] = 127,
[2][0][RTW89_ETSI][0][75] = 127,
[2][0][RTW89_MKK][1][75] = 127,
[2][0][RTW89_MKK][0][75] = 127,
[2][0][RTW89_IC][1][75] = 8,
- [2][0][RTW89_IC][2][75] = 68,
[2][0][RTW89_KCC][1][75] = -2,
[2][0][RTW89_KCC][0][75] = 127,
[2][0][RTW89_ACMA][1][75] = 127,
@@ -58032,13 +55590,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][75] = 127,
[2][0][RTW89_THAILAND][0][75] = 127,
[2][0][RTW89_FCC][1][77] = 8,
- [2][0][RTW89_FCC][2][77] = 68,
[2][0][RTW89_ETSI][1][77] = 127,
[2][0][RTW89_ETSI][0][77] = 127,
[2][0][RTW89_MKK][1][77] = 127,
[2][0][RTW89_MKK][0][77] = 127,
[2][0][RTW89_IC][1][77] = 8,
- [2][0][RTW89_IC][2][77] = 68,
[2][0][RTW89_KCC][1][77] = -2,
[2][0][RTW89_KCC][0][77] = 127,
[2][0][RTW89_ACMA][1][77] = 127,
@@ -58051,13 +55607,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][77] = 127,
[2][0][RTW89_THAILAND][0][77] = 127,
[2][0][RTW89_FCC][1][79] = 8,
- [2][0][RTW89_FCC][2][79] = 68,
[2][0][RTW89_ETSI][1][79] = 127,
[2][0][RTW89_ETSI][0][79] = 127,
[2][0][RTW89_MKK][1][79] = 127,
[2][0][RTW89_MKK][0][79] = 127,
[2][0][RTW89_IC][1][79] = 8,
- [2][0][RTW89_IC][2][79] = 68,
[2][0][RTW89_KCC][1][79] = -2,
[2][0][RTW89_KCC][0][79] = 127,
[2][0][RTW89_ACMA][1][79] = 127,
@@ -58070,13 +55624,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][79] = 127,
[2][0][RTW89_THAILAND][0][79] = 127,
[2][0][RTW89_FCC][1][81] = 8,
- [2][0][RTW89_FCC][2][81] = 68,
[2][0][RTW89_ETSI][1][81] = 127,
[2][0][RTW89_ETSI][0][81] = 127,
[2][0][RTW89_MKK][1][81] = 127,
[2][0][RTW89_MKK][0][81] = 127,
[2][0][RTW89_IC][1][81] = 8,
- [2][0][RTW89_IC][2][81] = 68,
[2][0][RTW89_KCC][1][81] = -2,
[2][0][RTW89_KCC][0][81] = 127,
[2][0][RTW89_ACMA][1][81] = 127,
@@ -58089,13 +55641,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][81] = 127,
[2][0][RTW89_THAILAND][0][81] = 127,
[2][0][RTW89_FCC][1][83] = 8,
- [2][0][RTW89_FCC][2][83] = 68,
[2][0][RTW89_ETSI][1][83] = 127,
[2][0][RTW89_ETSI][0][83] = 127,
[2][0][RTW89_MKK][1][83] = 127,
[2][0][RTW89_MKK][0][83] = 127,
[2][0][RTW89_IC][1][83] = 8,
- [2][0][RTW89_IC][2][83] = 68,
[2][0][RTW89_KCC][1][83] = -2,
[2][0][RTW89_KCC][0][83] = 127,
[2][0][RTW89_ACMA][1][83] = 127,
@@ -58108,13 +55658,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][83] = 127,
[2][0][RTW89_THAILAND][0][83] = 127,
[2][0][RTW89_FCC][1][85] = 8,
- [2][0][RTW89_FCC][2][85] = 68,
[2][0][RTW89_ETSI][1][85] = 127,
[2][0][RTW89_ETSI][0][85] = 127,
[2][0][RTW89_MKK][1][85] = 127,
[2][0][RTW89_MKK][0][85] = 127,
[2][0][RTW89_IC][1][85] = 8,
- [2][0][RTW89_IC][2][85] = 68,
[2][0][RTW89_KCC][1][85] = -2,
[2][0][RTW89_KCC][0][85] = 127,
[2][0][RTW89_ACMA][1][85] = 127,
@@ -58127,13 +55675,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][85] = 127,
[2][0][RTW89_THAILAND][0][85] = 127,
[2][0][RTW89_FCC][1][87] = 8,
- [2][0][RTW89_FCC][2][87] = 127,
[2][0][RTW89_ETSI][1][87] = 127,
[2][0][RTW89_ETSI][0][87] = 127,
[2][0][RTW89_MKK][1][87] = 127,
[2][0][RTW89_MKK][0][87] = 127,
[2][0][RTW89_IC][1][87] = 8,
- [2][0][RTW89_IC][2][87] = 127,
[2][0][RTW89_KCC][1][87] = -2,
[2][0][RTW89_KCC][0][87] = 127,
[2][0][RTW89_ACMA][1][87] = 127,
@@ -58146,13 +55692,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][87] = 127,
[2][0][RTW89_THAILAND][0][87] = 127,
[2][0][RTW89_FCC][1][89] = 8,
- [2][0][RTW89_FCC][2][89] = 127,
[2][0][RTW89_ETSI][1][89] = 127,
[2][0][RTW89_ETSI][0][89] = 127,
[2][0][RTW89_MKK][1][89] = 127,
[2][0][RTW89_MKK][0][89] = 127,
[2][0][RTW89_IC][1][89] = 8,
- [2][0][RTW89_IC][2][89] = 127,
[2][0][RTW89_KCC][1][89] = -2,
[2][0][RTW89_KCC][0][89] = 127,
[2][0][RTW89_ACMA][1][89] = 127,
@@ -58165,13 +55709,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][89] = 127,
[2][0][RTW89_THAILAND][0][89] = 127,
[2][0][RTW89_FCC][1][90] = 8,
- [2][0][RTW89_FCC][2][90] = 127,
[2][0][RTW89_ETSI][1][90] = 127,
[2][0][RTW89_ETSI][0][90] = 127,
[2][0][RTW89_MKK][1][90] = 127,
[2][0][RTW89_MKK][0][90] = 127,
[2][0][RTW89_IC][1][90] = 8,
- [2][0][RTW89_IC][2][90] = 127,
[2][0][RTW89_KCC][1][90] = -2,
[2][0][RTW89_KCC][0][90] = 127,
[2][0][RTW89_ACMA][1][90] = 127,
@@ -58184,13 +55726,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][90] = 127,
[2][0][RTW89_THAILAND][0][90] = 127,
[2][0][RTW89_FCC][1][92] = 8,
- [2][0][RTW89_FCC][2][92] = 127,
[2][0][RTW89_ETSI][1][92] = 127,
[2][0][RTW89_ETSI][0][92] = 127,
[2][0][RTW89_MKK][1][92] = 127,
[2][0][RTW89_MKK][0][92] = 127,
[2][0][RTW89_IC][1][92] = 8,
- [2][0][RTW89_IC][2][92] = 127,
[2][0][RTW89_KCC][1][92] = -2,
[2][0][RTW89_KCC][0][92] = 127,
[2][0][RTW89_ACMA][1][92] = 127,
@@ -58203,13 +55743,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][92] = 127,
[2][0][RTW89_THAILAND][0][92] = 127,
[2][0][RTW89_FCC][1][94] = 8,
- [2][0][RTW89_FCC][2][94] = 127,
[2][0][RTW89_ETSI][1][94] = 127,
[2][0][RTW89_ETSI][0][94] = 127,
[2][0][RTW89_MKK][1][94] = 127,
[2][0][RTW89_MKK][0][94] = 127,
[2][0][RTW89_IC][1][94] = 8,
- [2][0][RTW89_IC][2][94] = 127,
[2][0][RTW89_KCC][1][94] = -2,
[2][0][RTW89_KCC][0][94] = 127,
[2][0][RTW89_ACMA][1][94] = 127,
@@ -58222,13 +55760,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][94] = 127,
[2][0][RTW89_THAILAND][0][94] = 127,
[2][0][RTW89_FCC][1][96] = 8,
- [2][0][RTW89_FCC][2][96] = 127,
[2][0][RTW89_ETSI][1][96] = 127,
[2][0][RTW89_ETSI][0][96] = 127,
[2][0][RTW89_MKK][1][96] = 127,
[2][0][RTW89_MKK][0][96] = 127,
[2][0][RTW89_IC][1][96] = 8,
- [2][0][RTW89_IC][2][96] = 127,
[2][0][RTW89_KCC][1][96] = -2,
[2][0][RTW89_KCC][0][96] = 127,
[2][0][RTW89_ACMA][1][96] = 127,
@@ -58241,13 +55777,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][96] = 127,
[2][0][RTW89_THAILAND][0][96] = 127,
[2][0][RTW89_FCC][1][98] = 8,
- [2][0][RTW89_FCC][2][98] = 127,
[2][0][RTW89_ETSI][1][98] = 127,
[2][0][RTW89_ETSI][0][98] = 127,
[2][0][RTW89_MKK][1][98] = 127,
[2][0][RTW89_MKK][0][98] = 127,
[2][0][RTW89_IC][1][98] = 8,
- [2][0][RTW89_IC][2][98] = 127,
[2][0][RTW89_KCC][1][98] = -2,
[2][0][RTW89_KCC][0][98] = 127,
[2][0][RTW89_ACMA][1][98] = 127,
@@ -58260,13 +55794,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][98] = 127,
[2][0][RTW89_THAILAND][0][98] = 127,
[2][0][RTW89_FCC][1][100] = 8,
- [2][0][RTW89_FCC][2][100] = 127,
[2][0][RTW89_ETSI][1][100] = 127,
[2][0][RTW89_ETSI][0][100] = 127,
[2][0][RTW89_MKK][1][100] = 127,
[2][0][RTW89_MKK][0][100] = 127,
[2][0][RTW89_IC][1][100] = 8,
- [2][0][RTW89_IC][2][100] = 127,
[2][0][RTW89_KCC][1][100] = -2,
[2][0][RTW89_KCC][0][100] = 127,
[2][0][RTW89_ACMA][1][100] = 127,
@@ -58279,13 +55811,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][100] = 127,
[2][0][RTW89_THAILAND][0][100] = 127,
[2][0][RTW89_FCC][1][102] = 8,
- [2][0][RTW89_FCC][2][102] = 127,
[2][0][RTW89_ETSI][1][102] = 127,
[2][0][RTW89_ETSI][0][102] = 127,
[2][0][RTW89_MKK][1][102] = 127,
[2][0][RTW89_MKK][0][102] = 127,
[2][0][RTW89_IC][1][102] = 8,
- [2][0][RTW89_IC][2][102] = 127,
[2][0][RTW89_KCC][1][102] = -2,
[2][0][RTW89_KCC][0][102] = 127,
[2][0][RTW89_ACMA][1][102] = 127,
@@ -58298,13 +55828,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][102] = 127,
[2][0][RTW89_THAILAND][0][102] = 127,
[2][0][RTW89_FCC][1][104] = 8,
- [2][0][RTW89_FCC][2][104] = 127,
[2][0][RTW89_ETSI][1][104] = 127,
[2][0][RTW89_ETSI][0][104] = 127,
[2][0][RTW89_MKK][1][104] = 127,
[2][0][RTW89_MKK][0][104] = 127,
[2][0][RTW89_IC][1][104] = 8,
- [2][0][RTW89_IC][2][104] = 127,
[2][0][RTW89_KCC][1][104] = -2,
[2][0][RTW89_KCC][0][104] = 127,
[2][0][RTW89_ACMA][1][104] = 127,
@@ -58317,13 +55845,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][104] = 127,
[2][0][RTW89_THAILAND][0][104] = 127,
[2][0][RTW89_FCC][1][105] = 8,
- [2][0][RTW89_FCC][2][105] = 127,
[2][0][RTW89_ETSI][1][105] = 127,
[2][0][RTW89_ETSI][0][105] = 127,
[2][0][RTW89_MKK][1][105] = 127,
[2][0][RTW89_MKK][0][105] = 127,
[2][0][RTW89_IC][1][105] = 8,
- [2][0][RTW89_IC][2][105] = 127,
[2][0][RTW89_KCC][1][105] = -2,
[2][0][RTW89_KCC][0][105] = 127,
[2][0][RTW89_ACMA][1][105] = 127,
@@ -58336,13 +55862,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][105] = 127,
[2][0][RTW89_THAILAND][0][105] = 127,
[2][0][RTW89_FCC][1][107] = 10,
- [2][0][RTW89_FCC][2][107] = 127,
[2][0][RTW89_ETSI][1][107] = 127,
[2][0][RTW89_ETSI][0][107] = 127,
[2][0][RTW89_MKK][1][107] = 127,
[2][0][RTW89_MKK][0][107] = 127,
[2][0][RTW89_IC][1][107] = 10,
- [2][0][RTW89_IC][2][107] = 127,
[2][0][RTW89_KCC][1][107] = -2,
[2][0][RTW89_KCC][0][107] = 127,
[2][0][RTW89_ACMA][1][107] = 127,
@@ -58355,13 +55879,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][107] = 127,
[2][0][RTW89_THAILAND][0][107] = 127,
[2][0][RTW89_FCC][1][109] = 12,
- [2][0][RTW89_FCC][2][109] = 127,
[2][0][RTW89_ETSI][1][109] = 127,
[2][0][RTW89_ETSI][0][109] = 127,
[2][0][RTW89_MKK][1][109] = 127,
[2][0][RTW89_MKK][0][109] = 127,
[2][0][RTW89_IC][1][109] = 12,
- [2][0][RTW89_IC][2][109] = 127,
[2][0][RTW89_KCC][1][109] = 127,
[2][0][RTW89_KCC][0][109] = 127,
[2][0][RTW89_ACMA][1][109] = 127,
@@ -58374,13 +55896,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][109] = 127,
[2][0][RTW89_THAILAND][0][109] = 127,
[2][0][RTW89_FCC][1][111] = 127,
- [2][0][RTW89_FCC][2][111] = 127,
[2][0][RTW89_ETSI][1][111] = 127,
[2][0][RTW89_ETSI][0][111] = 127,
[2][0][RTW89_MKK][1][111] = 127,
[2][0][RTW89_MKK][0][111] = 127,
[2][0][RTW89_IC][1][111] = 127,
- [2][0][RTW89_IC][2][111] = 127,
[2][0][RTW89_KCC][1][111] = 127,
[2][0][RTW89_KCC][0][111] = 127,
[2][0][RTW89_ACMA][1][111] = 127,
@@ -58393,13 +55913,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][111] = 127,
[2][0][RTW89_THAILAND][0][111] = 127,
[2][0][RTW89_FCC][1][113] = 127,
- [2][0][RTW89_FCC][2][113] = 127,
[2][0][RTW89_ETSI][1][113] = 127,
[2][0][RTW89_ETSI][0][113] = 127,
[2][0][RTW89_MKK][1][113] = 127,
[2][0][RTW89_MKK][0][113] = 127,
[2][0][RTW89_IC][1][113] = 127,
- [2][0][RTW89_IC][2][113] = 127,
[2][0][RTW89_KCC][1][113] = 127,
[2][0][RTW89_KCC][0][113] = 127,
[2][0][RTW89_ACMA][1][113] = 127,
@@ -58412,13 +55930,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][113] = 127,
[2][0][RTW89_THAILAND][0][113] = 127,
[2][0][RTW89_FCC][1][115] = 127,
- [2][0][RTW89_FCC][2][115] = 127,
[2][0][RTW89_ETSI][1][115] = 127,
[2][0][RTW89_ETSI][0][115] = 127,
[2][0][RTW89_MKK][1][115] = 127,
[2][0][RTW89_MKK][0][115] = 127,
[2][0][RTW89_IC][1][115] = 127,
- [2][0][RTW89_IC][2][115] = 127,
[2][0][RTW89_KCC][1][115] = 127,
[2][0][RTW89_KCC][0][115] = 127,
[2][0][RTW89_ACMA][1][115] = 127,
@@ -58431,13 +55947,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][115] = 127,
[2][0][RTW89_THAILAND][0][115] = 127,
[2][0][RTW89_FCC][1][117] = 127,
- [2][0][RTW89_FCC][2][117] = 127,
[2][0][RTW89_ETSI][1][117] = 127,
[2][0][RTW89_ETSI][0][117] = 127,
[2][0][RTW89_MKK][1][117] = 127,
[2][0][RTW89_MKK][0][117] = 127,
[2][0][RTW89_IC][1][117] = 127,
- [2][0][RTW89_IC][2][117] = 127,
[2][0][RTW89_KCC][1][117] = 127,
[2][0][RTW89_KCC][0][117] = 127,
[2][0][RTW89_ACMA][1][117] = 127,
@@ -58450,13 +55964,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][117] = 127,
[2][0][RTW89_THAILAND][0][117] = 127,
[2][0][RTW89_FCC][1][119] = 127,
- [2][0][RTW89_FCC][2][119] = 127,
[2][0][RTW89_ETSI][1][119] = 127,
[2][0][RTW89_ETSI][0][119] = 127,
[2][0][RTW89_MKK][1][119] = 127,
[2][0][RTW89_MKK][0][119] = 127,
[2][0][RTW89_IC][1][119] = 127,
- [2][0][RTW89_IC][2][119] = 127,
[2][0][RTW89_KCC][1][119] = 127,
[2][0][RTW89_KCC][0][119] = 127,
[2][0][RTW89_ACMA][1][119] = 127,
@@ -58469,13 +55981,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][119] = 127,
[2][0][RTW89_THAILAND][0][119] = 127,
[2][1][RTW89_FCC][1][0] = -16,
- [2][1][RTW89_FCC][2][0] = 54,
[2][1][RTW89_ETSI][1][0] = 44,
[2][1][RTW89_ETSI][0][0] = 6,
[2][1][RTW89_MKK][1][0] = 42,
[2][1][RTW89_MKK][0][0] = 2,
[2][1][RTW89_IC][1][0] = -16,
- [2][1][RTW89_IC][2][0] = 54,
[2][1][RTW89_KCC][1][0] = -14,
[2][1][RTW89_KCC][0][0] = -14,
[2][1][RTW89_ACMA][1][0] = 44,
@@ -58488,13 +55998,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][0] = 28,
[2][1][RTW89_THAILAND][0][0] = -16,
[2][1][RTW89_FCC][1][2] = -16,
- [2][1][RTW89_FCC][2][2] = 54,
[2][1][RTW89_ETSI][1][2] = 44,
[2][1][RTW89_ETSI][0][2] = 6,
[2][1][RTW89_MKK][1][2] = 40,
[2][1][RTW89_MKK][0][2] = 2,
[2][1][RTW89_IC][1][2] = -16,
- [2][1][RTW89_IC][2][2] = 54,
[2][1][RTW89_KCC][1][2] = -14,
[2][1][RTW89_KCC][0][2] = -14,
[2][1][RTW89_ACMA][1][2] = 44,
@@ -58507,13 +56015,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][2] = 28,
[2][1][RTW89_THAILAND][0][2] = -16,
[2][1][RTW89_FCC][1][4] = -16,
- [2][1][RTW89_FCC][2][4] = 54,
[2][1][RTW89_ETSI][1][4] = 44,
[2][1][RTW89_ETSI][0][4] = 6,
[2][1][RTW89_MKK][1][4] = 40,
[2][1][RTW89_MKK][0][4] = 2,
[2][1][RTW89_IC][1][4] = -16,
- [2][1][RTW89_IC][2][4] = 54,
[2][1][RTW89_KCC][1][4] = -14,
[2][1][RTW89_KCC][0][4] = -14,
[2][1][RTW89_ACMA][1][4] = 44,
@@ -58526,13 +56032,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][4] = 28,
[2][1][RTW89_THAILAND][0][4] = -16,
[2][1][RTW89_FCC][1][6] = -16,
- [2][1][RTW89_FCC][2][6] = 54,
[2][1][RTW89_ETSI][1][6] = 44,
[2][1][RTW89_ETSI][0][6] = 6,
[2][1][RTW89_MKK][1][6] = 40,
[2][1][RTW89_MKK][0][6] = 2,
[2][1][RTW89_IC][1][6] = -16,
- [2][1][RTW89_IC][2][6] = 54,
[2][1][RTW89_KCC][1][6] = -14,
[2][1][RTW89_KCC][0][6] = -14,
[2][1][RTW89_ACMA][1][6] = 44,
@@ -58545,13 +56049,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][6] = 28,
[2][1][RTW89_THAILAND][0][6] = -16,
[2][1][RTW89_FCC][1][8] = -16,
- [2][1][RTW89_FCC][2][8] = 54,
[2][1][RTW89_ETSI][1][8] = 44,
[2][1][RTW89_ETSI][0][8] = 6,
[2][1][RTW89_MKK][1][8] = 40,
[2][1][RTW89_MKK][0][8] = 2,
[2][1][RTW89_IC][1][8] = -16,
- [2][1][RTW89_IC][2][8] = 54,
[2][1][RTW89_KCC][1][8] = -14,
[2][1][RTW89_KCC][0][8] = -14,
[2][1][RTW89_ACMA][1][8] = 44,
@@ -58564,13 +56066,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][8] = 28,
[2][1][RTW89_THAILAND][0][8] = -16,
[2][1][RTW89_FCC][1][10] = -16,
- [2][1][RTW89_FCC][2][10] = 54,
[2][1][RTW89_ETSI][1][10] = 44,
[2][1][RTW89_ETSI][0][10] = 6,
[2][1][RTW89_MKK][1][10] = 40,
[2][1][RTW89_MKK][0][10] = 2,
[2][1][RTW89_IC][1][10] = -16,
- [2][1][RTW89_IC][2][10] = 54,
[2][1][RTW89_KCC][1][10] = -14,
[2][1][RTW89_KCC][0][10] = -14,
[2][1][RTW89_ACMA][1][10] = 44,
@@ -58583,13 +56083,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][10] = 28,
[2][1][RTW89_THAILAND][0][10] = -16,
[2][1][RTW89_FCC][1][12] = -16,
- [2][1][RTW89_FCC][2][12] = 54,
[2][1][RTW89_ETSI][1][12] = 44,
[2][1][RTW89_ETSI][0][12] = 6,
[2][1][RTW89_MKK][1][12] = 40,
[2][1][RTW89_MKK][0][12] = 2,
[2][1][RTW89_IC][1][12] = -16,
- [2][1][RTW89_IC][2][12] = 54,
[2][1][RTW89_KCC][1][12] = -14,
[2][1][RTW89_KCC][0][12] = -14,
[2][1][RTW89_ACMA][1][12] = 44,
@@ -58602,13 +56100,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][12] = 28,
[2][1][RTW89_THAILAND][0][12] = -16,
[2][1][RTW89_FCC][1][14] = -16,
- [2][1][RTW89_FCC][2][14] = 54,
[2][1][RTW89_ETSI][1][14] = 44,
[2][1][RTW89_ETSI][0][14] = 6,
[2][1][RTW89_MKK][1][14] = 40,
[2][1][RTW89_MKK][0][14] = 2,
[2][1][RTW89_IC][1][14] = -16,
- [2][1][RTW89_IC][2][14] = 54,
[2][1][RTW89_KCC][1][14] = -14,
[2][1][RTW89_KCC][0][14] = -14,
[2][1][RTW89_ACMA][1][14] = 44,
@@ -58621,13 +56117,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][14] = 28,
[2][1][RTW89_THAILAND][0][14] = -16,
[2][1][RTW89_FCC][1][15] = -16,
- [2][1][RTW89_FCC][2][15] = 54,
[2][1][RTW89_ETSI][1][15] = 44,
[2][1][RTW89_ETSI][0][15] = 6,
[2][1][RTW89_MKK][1][15] = 40,
[2][1][RTW89_MKK][0][15] = 2,
[2][1][RTW89_IC][1][15] = -16,
- [2][1][RTW89_IC][2][15] = 54,
[2][1][RTW89_KCC][1][15] = -14,
[2][1][RTW89_KCC][0][15] = -14,
[2][1][RTW89_ACMA][1][15] = 44,
@@ -58640,13 +56134,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][15] = 28,
[2][1][RTW89_THAILAND][0][15] = -16,
[2][1][RTW89_FCC][1][17] = -16,
- [2][1][RTW89_FCC][2][17] = 54,
[2][1][RTW89_ETSI][1][17] = 44,
[2][1][RTW89_ETSI][0][17] = 6,
[2][1][RTW89_MKK][1][17] = 40,
[2][1][RTW89_MKK][0][17] = 2,
[2][1][RTW89_IC][1][17] = -16,
- [2][1][RTW89_IC][2][17] = 54,
[2][1][RTW89_KCC][1][17] = -14,
[2][1][RTW89_KCC][0][17] = -14,
[2][1][RTW89_ACMA][1][17] = 44,
@@ -58659,13 +56151,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][17] = 28,
[2][1][RTW89_THAILAND][0][17] = -16,
[2][1][RTW89_FCC][1][19] = -16,
- [2][1][RTW89_FCC][2][19] = 54,
[2][1][RTW89_ETSI][1][19] = 44,
[2][1][RTW89_ETSI][0][19] = 6,
[2][1][RTW89_MKK][1][19] = 40,
[2][1][RTW89_MKK][0][19] = 2,
[2][1][RTW89_IC][1][19] = -16,
- [2][1][RTW89_IC][2][19] = 54,
[2][1][RTW89_KCC][1][19] = -14,
[2][1][RTW89_KCC][0][19] = -14,
[2][1][RTW89_ACMA][1][19] = 44,
@@ -58678,13 +56168,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][19] = 28,
[2][1][RTW89_THAILAND][0][19] = -16,
[2][1][RTW89_FCC][1][21] = -16,
- [2][1][RTW89_FCC][2][21] = 54,
[2][1][RTW89_ETSI][1][21] = 44,
[2][1][RTW89_ETSI][0][21] = 6,
[2][1][RTW89_MKK][1][21] = 40,
[2][1][RTW89_MKK][0][21] = 2,
[2][1][RTW89_IC][1][21] = -16,
- [2][1][RTW89_IC][2][21] = 54,
[2][1][RTW89_KCC][1][21] = -14,
[2][1][RTW89_KCC][0][21] = -14,
[2][1][RTW89_ACMA][1][21] = 44,
@@ -58697,13 +56185,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][21] = 28,
[2][1][RTW89_THAILAND][0][21] = -16,
[2][1][RTW89_FCC][1][23] = -16,
- [2][1][RTW89_FCC][2][23] = 54,
[2][1][RTW89_ETSI][1][23] = 44,
[2][1][RTW89_ETSI][0][23] = 6,
[2][1][RTW89_MKK][1][23] = 40,
[2][1][RTW89_MKK][0][23] = 2,
[2][1][RTW89_IC][1][23] = -16,
- [2][1][RTW89_IC][2][23] = 54,
[2][1][RTW89_KCC][1][23] = -14,
[2][1][RTW89_KCC][0][23] = -14,
[2][1][RTW89_ACMA][1][23] = 44,
@@ -58716,13 +56202,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][23] = 30,
[2][1][RTW89_THAILAND][0][23] = -16,
[2][1][RTW89_FCC][1][25] = -16,
- [2][1][RTW89_FCC][2][25] = 54,
[2][1][RTW89_ETSI][1][25] = 44,
[2][1][RTW89_ETSI][0][25] = 6,
[2][1][RTW89_MKK][1][25] = 40,
[2][1][RTW89_MKK][0][25] = 2,
[2][1][RTW89_IC][1][25] = -16,
- [2][1][RTW89_IC][2][25] = 54,
[2][1][RTW89_KCC][1][25] = -14,
[2][1][RTW89_KCC][0][25] = -14,
[2][1][RTW89_ACMA][1][25] = 44,
@@ -58735,13 +56219,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][25] = 28,
[2][1][RTW89_THAILAND][0][25] = -16,
[2][1][RTW89_FCC][1][27] = -16,
- [2][1][RTW89_FCC][2][27] = 54,
[2][1][RTW89_ETSI][1][27] = 44,
[2][1][RTW89_ETSI][0][27] = 6,
[2][1][RTW89_MKK][1][27] = 40,
[2][1][RTW89_MKK][0][27] = 2,
[2][1][RTW89_IC][1][27] = -16,
- [2][1][RTW89_IC][2][27] = 54,
[2][1][RTW89_KCC][1][27] = -14,
[2][1][RTW89_KCC][0][27] = -14,
[2][1][RTW89_ACMA][1][27] = 44,
@@ -58754,13 +56236,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][27] = 28,
[2][1][RTW89_THAILAND][0][27] = -16,
[2][1][RTW89_FCC][1][29] = -16,
- [2][1][RTW89_FCC][2][29] = 54,
[2][1][RTW89_ETSI][1][29] = 44,
[2][1][RTW89_ETSI][0][29] = 6,
[2][1][RTW89_MKK][1][29] = 40,
[2][1][RTW89_MKK][0][29] = 2,
[2][1][RTW89_IC][1][29] = -16,
- [2][1][RTW89_IC][2][29] = 54,
[2][1][RTW89_KCC][1][29] = -14,
[2][1][RTW89_KCC][0][29] = -14,
[2][1][RTW89_ACMA][1][29] = 44,
@@ -58773,13 +56253,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][29] = 28,
[2][1][RTW89_THAILAND][0][29] = -16,
[2][1][RTW89_FCC][1][30] = -16,
- [2][1][RTW89_FCC][2][30] = 54,
[2][1][RTW89_ETSI][1][30] = 44,
[2][1][RTW89_ETSI][0][30] = 6,
[2][1][RTW89_MKK][1][30] = 40,
[2][1][RTW89_MKK][0][30] = 2,
[2][1][RTW89_IC][1][30] = -16,
- [2][1][RTW89_IC][2][30] = 54,
[2][1][RTW89_KCC][1][30] = -14,
[2][1][RTW89_KCC][0][30] = -14,
[2][1][RTW89_ACMA][1][30] = 44,
@@ -58792,13 +56270,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][30] = 28,
[2][1][RTW89_THAILAND][0][30] = -16,
[2][1][RTW89_FCC][1][32] = -16,
- [2][1][RTW89_FCC][2][32] = 54,
[2][1][RTW89_ETSI][1][32] = 44,
[2][1][RTW89_ETSI][0][32] = 6,
[2][1][RTW89_MKK][1][32] = 40,
[2][1][RTW89_MKK][0][32] = 2,
[2][1][RTW89_IC][1][32] = -16,
- [2][1][RTW89_IC][2][32] = 54,
[2][1][RTW89_KCC][1][32] = -14,
[2][1][RTW89_KCC][0][32] = -14,
[2][1][RTW89_ACMA][1][32] = 44,
@@ -58811,13 +56287,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][32] = 28,
[2][1][RTW89_THAILAND][0][32] = -16,
[2][1][RTW89_FCC][1][34] = -16,
- [2][1][RTW89_FCC][2][34] = 54,
[2][1][RTW89_ETSI][1][34] = 44,
[2][1][RTW89_ETSI][0][34] = 6,
[2][1][RTW89_MKK][1][34] = 40,
[2][1][RTW89_MKK][0][34] = 2,
[2][1][RTW89_IC][1][34] = -16,
- [2][1][RTW89_IC][2][34] = 54,
[2][1][RTW89_KCC][1][34] = -14,
[2][1][RTW89_KCC][0][34] = -14,
[2][1][RTW89_ACMA][1][34] = 44,
@@ -58830,13 +56304,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][34] = 28,
[2][1][RTW89_THAILAND][0][34] = -16,
[2][1][RTW89_FCC][1][36] = -16,
- [2][1][RTW89_FCC][2][36] = 54,
[2][1][RTW89_ETSI][1][36] = 44,
[2][1][RTW89_ETSI][0][36] = 6,
[2][1][RTW89_MKK][1][36] = 40,
[2][1][RTW89_MKK][0][36] = 2,
[2][1][RTW89_IC][1][36] = -16,
- [2][1][RTW89_IC][2][36] = 54,
[2][1][RTW89_KCC][1][36] = -14,
[2][1][RTW89_KCC][0][36] = -14,
[2][1][RTW89_ACMA][1][36] = 44,
@@ -58849,13 +56321,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][36] = 28,
[2][1][RTW89_THAILAND][0][36] = -16,
[2][1][RTW89_FCC][1][38] = -16,
- [2][1][RTW89_FCC][2][38] = 54,
[2][1][RTW89_ETSI][1][38] = 44,
[2][1][RTW89_ETSI][0][38] = 6,
[2][1][RTW89_MKK][1][38] = 40,
[2][1][RTW89_MKK][0][38] = 2,
[2][1][RTW89_IC][1][38] = -16,
- [2][1][RTW89_IC][2][38] = 54,
[2][1][RTW89_KCC][1][38] = -14,
[2][1][RTW89_KCC][0][38] = -14,
[2][1][RTW89_ACMA][1][38] = 44,
@@ -58868,13 +56338,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][38] = 28,
[2][1][RTW89_THAILAND][0][38] = -16,
[2][1][RTW89_FCC][1][40] = -16,
- [2][1][RTW89_FCC][2][40] = 54,
[2][1][RTW89_ETSI][1][40] = 44,
[2][1][RTW89_ETSI][0][40] = 6,
[2][1][RTW89_MKK][1][40] = 40,
[2][1][RTW89_MKK][0][40] = 2,
[2][1][RTW89_IC][1][40] = -16,
- [2][1][RTW89_IC][2][40] = 54,
[2][1][RTW89_KCC][1][40] = -14,
[2][1][RTW89_KCC][0][40] = -14,
[2][1][RTW89_ACMA][1][40] = 44,
@@ -58887,13 +56355,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][40] = 28,
[2][1][RTW89_THAILAND][0][40] = -16,
[2][1][RTW89_FCC][1][42] = -16,
- [2][1][RTW89_FCC][2][42] = 54,
[2][1][RTW89_ETSI][1][42] = 44,
[2][1][RTW89_ETSI][0][42] = 6,
[2][1][RTW89_MKK][1][42] = 40,
[2][1][RTW89_MKK][0][42] = 2,
[2][1][RTW89_IC][1][42] = -16,
- [2][1][RTW89_IC][2][42] = 54,
[2][1][RTW89_KCC][1][42] = -14,
[2][1][RTW89_KCC][0][42] = -14,
[2][1][RTW89_ACMA][1][42] = 44,
@@ -58906,13 +56372,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][42] = 28,
[2][1][RTW89_THAILAND][0][42] = -16,
[2][1][RTW89_FCC][1][44] = -16,
- [2][1][RTW89_FCC][2][44] = 54,
[2][1][RTW89_ETSI][1][44] = 44,
[2][1][RTW89_ETSI][0][44] = 6,
[2][1][RTW89_MKK][1][44] = 16,
[2][1][RTW89_MKK][0][44] = 2,
[2][1][RTW89_IC][1][44] = -16,
- [2][1][RTW89_IC][2][44] = 54,
[2][1][RTW89_KCC][1][44] = -14,
[2][1][RTW89_KCC][0][44] = -14,
[2][1][RTW89_ACMA][1][44] = 44,
@@ -58925,13 +56389,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][44] = 28,
[2][1][RTW89_THAILAND][0][44] = -16,
[2][1][RTW89_FCC][1][45] = -16,
- [2][1][RTW89_FCC][2][45] = 127,
[2][1][RTW89_ETSI][1][45] = 127,
[2][1][RTW89_ETSI][0][45] = 127,
[2][1][RTW89_MKK][1][45] = 127,
[2][1][RTW89_MKK][0][45] = 127,
[2][1][RTW89_IC][1][45] = -16,
- [2][1][RTW89_IC][2][45] = 56,
[2][1][RTW89_KCC][1][45] = -14,
[2][1][RTW89_KCC][0][45] = 127,
[2][1][RTW89_ACMA][1][45] = 127,
@@ -58944,13 +56406,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][45] = 127,
[2][1][RTW89_THAILAND][0][45] = 127,
[2][1][RTW89_FCC][1][47] = -16,
- [2][1][RTW89_FCC][2][47] = 127,
[2][1][RTW89_ETSI][1][47] = 127,
[2][1][RTW89_ETSI][0][47] = 127,
[2][1][RTW89_MKK][1][47] = 127,
[2][1][RTW89_MKK][0][47] = 127,
[2][1][RTW89_IC][1][47] = -16,
- [2][1][RTW89_IC][2][47] = 56,
[2][1][RTW89_KCC][1][47] = -14,
[2][1][RTW89_KCC][0][47] = 127,
[2][1][RTW89_ACMA][1][47] = 127,
@@ -58963,13 +56423,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][47] = 127,
[2][1][RTW89_THAILAND][0][47] = 127,
[2][1][RTW89_FCC][1][49] = -16,
- [2][1][RTW89_FCC][2][49] = 127,
[2][1][RTW89_ETSI][1][49] = 127,
[2][1][RTW89_ETSI][0][49] = 127,
[2][1][RTW89_MKK][1][49] = 127,
[2][1][RTW89_MKK][0][49] = 127,
[2][1][RTW89_IC][1][49] = -16,
- [2][1][RTW89_IC][2][49] = 56,
[2][1][RTW89_KCC][1][49] = -14,
[2][1][RTW89_KCC][0][49] = 127,
[2][1][RTW89_ACMA][1][49] = 127,
@@ -58982,13 +56440,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][49] = 127,
[2][1][RTW89_THAILAND][0][49] = 127,
[2][1][RTW89_FCC][1][51] = -16,
- [2][1][RTW89_FCC][2][51] = 127,
[2][1][RTW89_ETSI][1][51] = 127,
[2][1][RTW89_ETSI][0][51] = 127,
[2][1][RTW89_MKK][1][51] = 127,
[2][1][RTW89_MKK][0][51] = 127,
[2][1][RTW89_IC][1][51] = -16,
- [2][1][RTW89_IC][2][51] = 56,
[2][1][RTW89_KCC][1][51] = -14,
[2][1][RTW89_KCC][0][51] = 127,
[2][1][RTW89_ACMA][1][51] = 127,
@@ -59001,13 +56457,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][51] = 127,
[2][1][RTW89_THAILAND][0][51] = 127,
[2][1][RTW89_FCC][1][53] = -16,
- [2][1][RTW89_FCC][2][53] = 127,
[2][1][RTW89_ETSI][1][53] = 127,
[2][1][RTW89_ETSI][0][53] = 127,
[2][1][RTW89_MKK][1][53] = 127,
[2][1][RTW89_MKK][0][53] = 127,
[2][1][RTW89_IC][1][53] = -16,
- [2][1][RTW89_IC][2][53] = 56,
[2][1][RTW89_KCC][1][53] = -14,
[2][1][RTW89_KCC][0][53] = 127,
[2][1][RTW89_ACMA][1][53] = 127,
@@ -59020,13 +56474,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][53] = 127,
[2][1][RTW89_THAILAND][0][53] = 127,
[2][1][RTW89_FCC][1][55] = -16,
- [2][1][RTW89_FCC][2][55] = 54,
[2][1][RTW89_ETSI][1][55] = 127,
[2][1][RTW89_ETSI][0][55] = 127,
[2][1][RTW89_MKK][1][55] = 127,
[2][1][RTW89_MKK][0][55] = 127,
[2][1][RTW89_IC][1][55] = -16,
- [2][1][RTW89_IC][2][55] = 54,
[2][1][RTW89_KCC][1][55] = -14,
[2][1][RTW89_KCC][0][55] = 127,
[2][1][RTW89_ACMA][1][55] = 127,
@@ -59039,13 +56491,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][55] = 127,
[2][1][RTW89_THAILAND][0][55] = 127,
[2][1][RTW89_FCC][1][57] = -16,
- [2][1][RTW89_FCC][2][57] = 54,
[2][1][RTW89_ETSI][1][57] = 127,
[2][1][RTW89_ETSI][0][57] = 127,
[2][1][RTW89_MKK][1][57] = 127,
[2][1][RTW89_MKK][0][57] = 127,
[2][1][RTW89_IC][1][57] = -16,
- [2][1][RTW89_IC][2][57] = 54,
[2][1][RTW89_KCC][1][57] = -14,
[2][1][RTW89_KCC][0][57] = 127,
[2][1][RTW89_ACMA][1][57] = 127,
@@ -59058,13 +56508,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][57] = 127,
[2][1][RTW89_THAILAND][0][57] = 127,
[2][1][RTW89_FCC][1][59] = -16,
- [2][1][RTW89_FCC][2][59] = 54,
[2][1][RTW89_ETSI][1][59] = 127,
[2][1][RTW89_ETSI][0][59] = 127,
[2][1][RTW89_MKK][1][59] = 127,
[2][1][RTW89_MKK][0][59] = 127,
[2][1][RTW89_IC][1][59] = -16,
- [2][1][RTW89_IC][2][59] = 54,
[2][1][RTW89_KCC][1][59] = -14,
[2][1][RTW89_KCC][0][59] = 127,
[2][1][RTW89_ACMA][1][59] = 127,
@@ -59077,13 +56525,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][59] = 127,
[2][1][RTW89_THAILAND][0][59] = 127,
[2][1][RTW89_FCC][1][60] = -16,
- [2][1][RTW89_FCC][2][60] = 54,
[2][1][RTW89_ETSI][1][60] = 127,
[2][1][RTW89_ETSI][0][60] = 127,
[2][1][RTW89_MKK][1][60] = 127,
[2][1][RTW89_MKK][0][60] = 127,
[2][1][RTW89_IC][1][60] = -16,
- [2][1][RTW89_IC][2][60] = 54,
[2][1][RTW89_KCC][1][60] = -14,
[2][1][RTW89_KCC][0][60] = 127,
[2][1][RTW89_ACMA][1][60] = 127,
@@ -59096,13 +56542,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][60] = 127,
[2][1][RTW89_THAILAND][0][60] = 127,
[2][1][RTW89_FCC][1][62] = -16,
- [2][1][RTW89_FCC][2][62] = 54,
[2][1][RTW89_ETSI][1][62] = 127,
[2][1][RTW89_ETSI][0][62] = 127,
[2][1][RTW89_MKK][1][62] = 127,
[2][1][RTW89_MKK][0][62] = 127,
[2][1][RTW89_IC][1][62] = -16,
- [2][1][RTW89_IC][2][62] = 54,
[2][1][RTW89_KCC][1][62] = -14,
[2][1][RTW89_KCC][0][62] = 127,
[2][1][RTW89_ACMA][1][62] = 127,
@@ -59115,13 +56559,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][62] = 127,
[2][1][RTW89_THAILAND][0][62] = 127,
[2][1][RTW89_FCC][1][64] = -16,
- [2][1][RTW89_FCC][2][64] = 54,
[2][1][RTW89_ETSI][1][64] = 127,
[2][1][RTW89_ETSI][0][64] = 127,
[2][1][RTW89_MKK][1][64] = 127,
[2][1][RTW89_MKK][0][64] = 127,
[2][1][RTW89_IC][1][64] = -16,
- [2][1][RTW89_IC][2][64] = 54,
[2][1][RTW89_KCC][1][64] = -14,
[2][1][RTW89_KCC][0][64] = 127,
[2][1][RTW89_ACMA][1][64] = 127,
@@ -59134,13 +56576,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][64] = 127,
[2][1][RTW89_THAILAND][0][64] = 127,
[2][1][RTW89_FCC][1][66] = -16,
- [2][1][RTW89_FCC][2][66] = 54,
[2][1][RTW89_ETSI][1][66] = 127,
[2][1][RTW89_ETSI][0][66] = 127,
[2][1][RTW89_MKK][1][66] = 127,
[2][1][RTW89_MKK][0][66] = 127,
[2][1][RTW89_IC][1][66] = -16,
- [2][1][RTW89_IC][2][66] = 54,
[2][1][RTW89_KCC][1][66] = -14,
[2][1][RTW89_KCC][0][66] = 127,
[2][1][RTW89_ACMA][1][66] = 127,
@@ -59153,13 +56593,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][66] = 127,
[2][1][RTW89_THAILAND][0][66] = 127,
[2][1][RTW89_FCC][1][68] = -16,
- [2][1][RTW89_FCC][2][68] = 54,
[2][1][RTW89_ETSI][1][68] = 127,
[2][1][RTW89_ETSI][0][68] = 127,
[2][1][RTW89_MKK][1][68] = 127,
[2][1][RTW89_MKK][0][68] = 127,
[2][1][RTW89_IC][1][68] = -16,
- [2][1][RTW89_IC][2][68] = 54,
[2][1][RTW89_KCC][1][68] = -14,
[2][1][RTW89_KCC][0][68] = 127,
[2][1][RTW89_ACMA][1][68] = 127,
@@ -59172,13 +56610,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][68] = 127,
[2][1][RTW89_THAILAND][0][68] = 127,
[2][1][RTW89_FCC][1][70] = -16,
- [2][1][RTW89_FCC][2][70] = 56,
[2][1][RTW89_ETSI][1][70] = 127,
[2][1][RTW89_ETSI][0][70] = 127,
[2][1][RTW89_MKK][1][70] = 127,
[2][1][RTW89_MKK][0][70] = 127,
[2][1][RTW89_IC][1][70] = -16,
- [2][1][RTW89_IC][2][70] = 56,
[2][1][RTW89_KCC][1][70] = -14,
[2][1][RTW89_KCC][0][70] = 127,
[2][1][RTW89_ACMA][1][70] = 127,
@@ -59191,13 +56627,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][70] = 127,
[2][1][RTW89_THAILAND][0][70] = 127,
[2][1][RTW89_FCC][1][72] = -16,
- [2][1][RTW89_FCC][2][72] = 56,
[2][1][RTW89_ETSI][1][72] = 127,
[2][1][RTW89_ETSI][0][72] = 127,
[2][1][RTW89_MKK][1][72] = 127,
[2][1][RTW89_MKK][0][72] = 127,
[2][1][RTW89_IC][1][72] = -16,
- [2][1][RTW89_IC][2][72] = 56,
[2][1][RTW89_KCC][1][72] = -14,
[2][1][RTW89_KCC][0][72] = 127,
[2][1][RTW89_ACMA][1][72] = 127,
@@ -59210,13 +56644,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][72] = 127,
[2][1][RTW89_THAILAND][0][72] = 127,
[2][1][RTW89_FCC][1][74] = -16,
- [2][1][RTW89_FCC][2][74] = 56,
[2][1][RTW89_ETSI][1][74] = 127,
[2][1][RTW89_ETSI][0][74] = 127,
[2][1][RTW89_MKK][1][74] = 127,
[2][1][RTW89_MKK][0][74] = 127,
[2][1][RTW89_IC][1][74] = -16,
- [2][1][RTW89_IC][2][74] = 56,
[2][1][RTW89_KCC][1][74] = -14,
[2][1][RTW89_KCC][0][74] = 127,
[2][1][RTW89_ACMA][1][74] = 127,
@@ -59229,13 +56661,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][74] = 127,
[2][1][RTW89_THAILAND][0][74] = 127,
[2][1][RTW89_FCC][1][75] = -16,
- [2][1][RTW89_FCC][2][75] = 56,
[2][1][RTW89_ETSI][1][75] = 127,
[2][1][RTW89_ETSI][0][75] = 127,
[2][1][RTW89_MKK][1][75] = 127,
[2][1][RTW89_MKK][0][75] = 127,
[2][1][RTW89_IC][1][75] = -16,
- [2][1][RTW89_IC][2][75] = 56,
[2][1][RTW89_KCC][1][75] = -14,
[2][1][RTW89_KCC][0][75] = 127,
[2][1][RTW89_ACMA][1][75] = 127,
@@ -59248,13 +56678,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][75] = 127,
[2][1][RTW89_THAILAND][0][75] = 127,
[2][1][RTW89_FCC][1][77] = -16,
- [2][1][RTW89_FCC][2][77] = 56,
[2][1][RTW89_ETSI][1][77] = 127,
[2][1][RTW89_ETSI][0][77] = 127,
[2][1][RTW89_MKK][1][77] = 127,
[2][1][RTW89_MKK][0][77] = 127,
[2][1][RTW89_IC][1][77] = -16,
- [2][1][RTW89_IC][2][77] = 56,
[2][1][RTW89_KCC][1][77] = -14,
[2][1][RTW89_KCC][0][77] = 127,
[2][1][RTW89_ACMA][1][77] = 127,
@@ -59267,13 +56695,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][77] = 127,
[2][1][RTW89_THAILAND][0][77] = 127,
[2][1][RTW89_FCC][1][79] = -16,
- [2][1][RTW89_FCC][2][79] = 56,
[2][1][RTW89_ETSI][1][79] = 127,
[2][1][RTW89_ETSI][0][79] = 127,
[2][1][RTW89_MKK][1][79] = 127,
[2][1][RTW89_MKK][0][79] = 127,
[2][1][RTW89_IC][1][79] = -16,
- [2][1][RTW89_IC][2][79] = 56,
[2][1][RTW89_KCC][1][79] = -14,
[2][1][RTW89_KCC][0][79] = 127,
[2][1][RTW89_ACMA][1][79] = 127,
@@ -59286,13 +56712,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][79] = 127,
[2][1][RTW89_THAILAND][0][79] = 127,
[2][1][RTW89_FCC][1][81] = -16,
- [2][1][RTW89_FCC][2][81] = 56,
[2][1][RTW89_ETSI][1][81] = 127,
[2][1][RTW89_ETSI][0][81] = 127,
[2][1][RTW89_MKK][1][81] = 127,
[2][1][RTW89_MKK][0][81] = 127,
[2][1][RTW89_IC][1][81] = -16,
- [2][1][RTW89_IC][2][81] = 56,
[2][1][RTW89_KCC][1][81] = -14,
[2][1][RTW89_KCC][0][81] = 127,
[2][1][RTW89_ACMA][1][81] = 127,
@@ -59305,13 +56729,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][81] = 127,
[2][1][RTW89_THAILAND][0][81] = 127,
[2][1][RTW89_FCC][1][83] = -16,
- [2][1][RTW89_FCC][2][83] = 56,
[2][1][RTW89_ETSI][1][83] = 127,
[2][1][RTW89_ETSI][0][83] = 127,
[2][1][RTW89_MKK][1][83] = 127,
[2][1][RTW89_MKK][0][83] = 127,
[2][1][RTW89_IC][1][83] = -16,
- [2][1][RTW89_IC][2][83] = 56,
[2][1][RTW89_KCC][1][83] = -14,
[2][1][RTW89_KCC][0][83] = 127,
[2][1][RTW89_ACMA][1][83] = 127,
@@ -59324,13 +56746,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][83] = 127,
[2][1][RTW89_THAILAND][0][83] = 127,
[2][1][RTW89_FCC][1][85] = -18,
- [2][1][RTW89_FCC][2][85] = 56,
[2][1][RTW89_ETSI][1][85] = 127,
[2][1][RTW89_ETSI][0][85] = 127,
[2][1][RTW89_MKK][1][85] = 127,
[2][1][RTW89_MKK][0][85] = 127,
[2][1][RTW89_IC][1][85] = -18,
- [2][1][RTW89_IC][2][85] = 56,
[2][1][RTW89_KCC][1][85] = -14,
[2][1][RTW89_KCC][0][85] = 127,
[2][1][RTW89_ACMA][1][85] = 127,
@@ -59343,13 +56763,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][85] = 127,
[2][1][RTW89_THAILAND][0][85] = 127,
[2][1][RTW89_FCC][1][87] = -16,
- [2][1][RTW89_FCC][2][87] = 127,
[2][1][RTW89_ETSI][1][87] = 127,
[2][1][RTW89_ETSI][0][87] = 127,
[2][1][RTW89_MKK][1][87] = 127,
[2][1][RTW89_MKK][0][87] = 127,
[2][1][RTW89_IC][1][87] = -16,
- [2][1][RTW89_IC][2][87] = 127,
[2][1][RTW89_KCC][1][87] = -14,
[2][1][RTW89_KCC][0][87] = 127,
[2][1][RTW89_ACMA][1][87] = 127,
@@ -59362,13 +56780,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][87] = 127,
[2][1][RTW89_THAILAND][0][87] = 127,
[2][1][RTW89_FCC][1][89] = -16,
- [2][1][RTW89_FCC][2][89] = 127,
[2][1][RTW89_ETSI][1][89] = 127,
[2][1][RTW89_ETSI][0][89] = 127,
[2][1][RTW89_MKK][1][89] = 127,
[2][1][RTW89_MKK][0][89] = 127,
[2][1][RTW89_IC][1][89] = -16,
- [2][1][RTW89_IC][2][89] = 127,
[2][1][RTW89_KCC][1][89] = -14,
[2][1][RTW89_KCC][0][89] = 127,
[2][1][RTW89_ACMA][1][89] = 127,
@@ -59381,13 +56797,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][89] = 127,
[2][1][RTW89_THAILAND][0][89] = 127,
[2][1][RTW89_FCC][1][90] = -16,
- [2][1][RTW89_FCC][2][90] = 127,
[2][1][RTW89_ETSI][1][90] = 127,
[2][1][RTW89_ETSI][0][90] = 127,
[2][1][RTW89_MKK][1][90] = 127,
[2][1][RTW89_MKK][0][90] = 127,
[2][1][RTW89_IC][1][90] = -16,
- [2][1][RTW89_IC][2][90] = 127,
[2][1][RTW89_KCC][1][90] = -14,
[2][1][RTW89_KCC][0][90] = 127,
[2][1][RTW89_ACMA][1][90] = 127,
@@ -59400,13 +56814,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][90] = 127,
[2][1][RTW89_THAILAND][0][90] = 127,
[2][1][RTW89_FCC][1][92] = -16,
- [2][1][RTW89_FCC][2][92] = 127,
[2][1][RTW89_ETSI][1][92] = 127,
[2][1][RTW89_ETSI][0][92] = 127,
[2][1][RTW89_MKK][1][92] = 127,
[2][1][RTW89_MKK][0][92] = 127,
[2][1][RTW89_IC][1][92] = -16,
- [2][1][RTW89_IC][2][92] = 127,
[2][1][RTW89_KCC][1][92] = -14,
[2][1][RTW89_KCC][0][92] = 127,
[2][1][RTW89_ACMA][1][92] = 127,
@@ -59419,13 +56831,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][92] = 127,
[2][1][RTW89_THAILAND][0][92] = 127,
[2][1][RTW89_FCC][1][94] = -16,
- [2][1][RTW89_FCC][2][94] = 127,
[2][1][RTW89_ETSI][1][94] = 127,
[2][1][RTW89_ETSI][0][94] = 127,
[2][1][RTW89_MKK][1][94] = 127,
[2][1][RTW89_MKK][0][94] = 127,
[2][1][RTW89_IC][1][94] = -16,
- [2][1][RTW89_IC][2][94] = 127,
[2][1][RTW89_KCC][1][94] = -14,
[2][1][RTW89_KCC][0][94] = 127,
[2][1][RTW89_ACMA][1][94] = 127,
@@ -59438,13 +56848,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][94] = 127,
[2][1][RTW89_THAILAND][0][94] = 127,
[2][1][RTW89_FCC][1][96] = -16,
- [2][1][RTW89_FCC][2][96] = 127,
[2][1][RTW89_ETSI][1][96] = 127,
[2][1][RTW89_ETSI][0][96] = 127,
[2][1][RTW89_MKK][1][96] = 127,
[2][1][RTW89_MKK][0][96] = 127,
[2][1][RTW89_IC][1][96] = -16,
- [2][1][RTW89_IC][2][96] = 127,
[2][1][RTW89_KCC][1][96] = -14,
[2][1][RTW89_KCC][0][96] = 127,
[2][1][RTW89_ACMA][1][96] = 127,
@@ -59457,13 +56865,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][96] = 127,
[2][1][RTW89_THAILAND][0][96] = 127,
[2][1][RTW89_FCC][1][98] = -16,
- [2][1][RTW89_FCC][2][98] = 127,
[2][1][RTW89_ETSI][1][98] = 127,
[2][1][RTW89_ETSI][0][98] = 127,
[2][1][RTW89_MKK][1][98] = 127,
[2][1][RTW89_MKK][0][98] = 127,
[2][1][RTW89_IC][1][98] = -16,
- [2][1][RTW89_IC][2][98] = 127,
[2][1][RTW89_KCC][1][98] = -14,
[2][1][RTW89_KCC][0][98] = 127,
[2][1][RTW89_ACMA][1][98] = 127,
@@ -59476,13 +56882,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][98] = 127,
[2][1][RTW89_THAILAND][0][98] = 127,
[2][1][RTW89_FCC][1][100] = -16,
- [2][1][RTW89_FCC][2][100] = 127,
[2][1][RTW89_ETSI][1][100] = 127,
[2][1][RTW89_ETSI][0][100] = 127,
[2][1][RTW89_MKK][1][100] = 127,
[2][1][RTW89_MKK][0][100] = 127,
[2][1][RTW89_IC][1][100] = -16,
- [2][1][RTW89_IC][2][100] = 127,
[2][1][RTW89_KCC][1][100] = -14,
[2][1][RTW89_KCC][0][100] = 127,
[2][1][RTW89_ACMA][1][100] = 127,
@@ -59495,13 +56899,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][100] = 127,
[2][1][RTW89_THAILAND][0][100] = 127,
[2][1][RTW89_FCC][1][102] = -16,
- [2][1][RTW89_FCC][2][102] = 127,
[2][1][RTW89_ETSI][1][102] = 127,
[2][1][RTW89_ETSI][0][102] = 127,
[2][1][RTW89_MKK][1][102] = 127,
[2][1][RTW89_MKK][0][102] = 127,
[2][1][RTW89_IC][1][102] = -16,
- [2][1][RTW89_IC][2][102] = 127,
[2][1][RTW89_KCC][1][102] = -14,
[2][1][RTW89_KCC][0][102] = 127,
[2][1][RTW89_ACMA][1][102] = 127,
@@ -59514,13 +56916,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][102] = 127,
[2][1][RTW89_THAILAND][0][102] = 127,
[2][1][RTW89_FCC][1][104] = -16,
- [2][1][RTW89_FCC][2][104] = 127,
[2][1][RTW89_ETSI][1][104] = 127,
[2][1][RTW89_ETSI][0][104] = 127,
[2][1][RTW89_MKK][1][104] = 127,
[2][1][RTW89_MKK][0][104] = 127,
[2][1][RTW89_IC][1][104] = -16,
- [2][1][RTW89_IC][2][104] = 127,
[2][1][RTW89_KCC][1][104] = -14,
[2][1][RTW89_KCC][0][104] = 127,
[2][1][RTW89_ACMA][1][104] = 127,
@@ -59533,13 +56933,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][104] = 127,
[2][1][RTW89_THAILAND][0][104] = 127,
[2][1][RTW89_FCC][1][105] = -16,
- [2][1][RTW89_FCC][2][105] = 127,
[2][1][RTW89_ETSI][1][105] = 127,
[2][1][RTW89_ETSI][0][105] = 127,
[2][1][RTW89_MKK][1][105] = 127,
[2][1][RTW89_MKK][0][105] = 127,
[2][1][RTW89_IC][1][105] = -16,
- [2][1][RTW89_IC][2][105] = 127,
[2][1][RTW89_KCC][1][105] = -14,
[2][1][RTW89_KCC][0][105] = 127,
[2][1][RTW89_ACMA][1][105] = 127,
@@ -59552,13 +56950,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][105] = 127,
[2][1][RTW89_THAILAND][0][105] = 127,
[2][1][RTW89_FCC][1][107] = -12,
- [2][1][RTW89_FCC][2][107] = 127,
[2][1][RTW89_ETSI][1][107] = 127,
[2][1][RTW89_ETSI][0][107] = 127,
[2][1][RTW89_MKK][1][107] = 127,
[2][1][RTW89_MKK][0][107] = 127,
[2][1][RTW89_IC][1][107] = -12,
- [2][1][RTW89_IC][2][107] = 127,
[2][1][RTW89_KCC][1][107] = -14,
[2][1][RTW89_KCC][0][107] = 127,
[2][1][RTW89_ACMA][1][107] = 127,
@@ -59571,13 +56967,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][107] = 127,
[2][1][RTW89_THAILAND][0][107] = 127,
[2][1][RTW89_FCC][1][109] = -10,
- [2][1][RTW89_FCC][2][109] = 127,
[2][1][RTW89_ETSI][1][109] = 127,
[2][1][RTW89_ETSI][0][109] = 127,
[2][1][RTW89_MKK][1][109] = 127,
[2][1][RTW89_MKK][0][109] = 127,
[2][1][RTW89_IC][1][109] = -10,
- [2][1][RTW89_IC][2][109] = 127,
[2][1][RTW89_KCC][1][109] = 127,
[2][1][RTW89_KCC][0][109] = 127,
[2][1][RTW89_ACMA][1][109] = 127,
@@ -59590,13 +56984,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][109] = 127,
[2][1][RTW89_THAILAND][0][109] = 127,
[2][1][RTW89_FCC][1][111] = 127,
- [2][1][RTW89_FCC][2][111] = 127,
[2][1][RTW89_ETSI][1][111] = 127,
[2][1][RTW89_ETSI][0][111] = 127,
[2][1][RTW89_MKK][1][111] = 127,
[2][1][RTW89_MKK][0][111] = 127,
[2][1][RTW89_IC][1][111] = 127,
- [2][1][RTW89_IC][2][111] = 127,
[2][1][RTW89_KCC][1][111] = 127,
[2][1][RTW89_KCC][0][111] = 127,
[2][1][RTW89_ACMA][1][111] = 127,
@@ -59609,13 +57001,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][111] = 127,
[2][1][RTW89_THAILAND][0][111] = 127,
[2][1][RTW89_FCC][1][113] = 127,
- [2][1][RTW89_FCC][2][113] = 127,
[2][1][RTW89_ETSI][1][113] = 127,
[2][1][RTW89_ETSI][0][113] = 127,
[2][1][RTW89_MKK][1][113] = 127,
[2][1][RTW89_MKK][0][113] = 127,
[2][1][RTW89_IC][1][113] = 127,
- [2][1][RTW89_IC][2][113] = 127,
[2][1][RTW89_KCC][1][113] = 127,
[2][1][RTW89_KCC][0][113] = 127,
[2][1][RTW89_ACMA][1][113] = 127,
@@ -59628,13 +57018,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][113] = 127,
[2][1][RTW89_THAILAND][0][113] = 127,
[2][1][RTW89_FCC][1][115] = 127,
- [2][1][RTW89_FCC][2][115] = 127,
[2][1][RTW89_ETSI][1][115] = 127,
[2][1][RTW89_ETSI][0][115] = 127,
[2][1][RTW89_MKK][1][115] = 127,
[2][1][RTW89_MKK][0][115] = 127,
[2][1][RTW89_IC][1][115] = 127,
- [2][1][RTW89_IC][2][115] = 127,
[2][1][RTW89_KCC][1][115] = 127,
[2][1][RTW89_KCC][0][115] = 127,
[2][1][RTW89_ACMA][1][115] = 127,
@@ -59647,13 +57035,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][115] = 127,
[2][1][RTW89_THAILAND][0][115] = 127,
[2][1][RTW89_FCC][1][117] = 127,
- [2][1][RTW89_FCC][2][117] = 127,
[2][1][RTW89_ETSI][1][117] = 127,
[2][1][RTW89_ETSI][0][117] = 127,
[2][1][RTW89_MKK][1][117] = 127,
[2][1][RTW89_MKK][0][117] = 127,
[2][1][RTW89_IC][1][117] = 127,
- [2][1][RTW89_IC][2][117] = 127,
[2][1][RTW89_KCC][1][117] = 127,
[2][1][RTW89_KCC][0][117] = 127,
[2][1][RTW89_ACMA][1][117] = 127,
@@ -59666,13 +57052,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][117] = 127,
[2][1][RTW89_THAILAND][0][117] = 127,
[2][1][RTW89_FCC][1][119] = 127,
- [2][1][RTW89_FCC][2][119] = 127,
[2][1][RTW89_ETSI][1][119] = 127,
[2][1][RTW89_ETSI][0][119] = 127,
[2][1][RTW89_MKK][1][119] = 127,
[2][1][RTW89_MKK][0][119] = 127,
[2][1][RTW89_IC][1][119] = 127,
- [2][1][RTW89_IC][2][119] = 127,
[2][1][RTW89_KCC][1][119] = 127,
[2][1][RTW89_KCC][0][119] = 127,
[2][1][RTW89_ACMA][1][119] = 127,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
index 583ea673a4f5..e07c7f3ade41 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
@@ -68,8 +68,31 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
.recognize_intrs = rtw89_pci_recognize_intrs_v1,
};
+static const struct dmi_system_id rtw8852c_pci_quirks[] = {
+ {
+ .ident = "Dell Inc. Vostro 16 5640",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 16 5640"),
+ DMI_MATCH(DMI_PRODUCT_SKU, "0CA0"),
+ },
+ .driver_data = (void *)RTW89_QUIRK_PCI_BER,
+ },
+ {
+ .ident = "Dell Inc. Inspiron 16 5640",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 16 5640"),
+ DMI_MATCH(DMI_PRODUCT_SKU, "0C9F"),
+ },
+ .driver_data = (void *)RTW89_QUIRK_PCI_BER,
+ },
+ {},
+};
+
static const struct rtw89_driver_info rtw89_8852ce_info = {
.chip = &rtw8852c_chip_info,
+ .quirks = rtw8852c_pci_quirks,
.bus = {
.pci = &rtw8852c_pci_info,
},
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 367459bd1345..3b3ea3a7c19a 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -2126,7 +2126,7 @@ static void rtw8922a_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
rtw8922a_hal_reset(rtwdev, RTW89_PHY_0, RTW89_MAC_0, band, &tx_en0, false);
if (rtwdev->dbcc_en)
rtw8922a_hal_reset(rtwdev, RTW89_PHY_1, RTW89_MAC_1, band,
- &tx_en0, false);
+ &tx_en1, false);
}
static u8 rtw8922a_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path)
@@ -2233,7 +2233,7 @@ static void rtw8922a_btc_init_cfg(struct rtw89_dev *rtwdev)
* Shared-Ant && BTG-path:WL mask(0x55f), others:WL THRU(0x5ff)
*/
if (btc->ant_type == BTC_ANT_SHARED && btc->btg_pos == path)
- rtw8922a_set_trx_mask(rtwdev, path, BTC_BT_TX_GROUP, 0x5ff);
+ rtw8922a_set_trx_mask(rtwdev, path, BTC_BT_TX_GROUP, 0x55f);
else
rtw8922a_set_trx_mask(rtwdev, path, BTC_BT_TX_GROUP, 0x5ff);
@@ -2257,6 +2257,138 @@ static void rtw8922a_btc_init_cfg(struct rtw89_dev *rtwdev)
btc->cx.wl.status.map.init_ok = true;
}
+static void
+rtw8922a_btc_set_wl_txpwr_ctrl(struct rtw89_dev *rtwdev, u32 txpwr_val)
+{
+ u16 ctrl_all_time = u32_get_bits(txpwr_val, GENMASK(15, 0));
+ u16 ctrl_gnt_bt = u32_get_bits(txpwr_val, GENMASK(31, 16));
+
+ switch (ctrl_all_time) {
+ case 0xffff:
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_BE_PWR_RATE_CTRL,
+ B_BE_FORCE_PWR_BY_RATE_EN, 0x0);
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_BE_PWR_RATE_CTRL,
+ B_BE_FORCE_PWR_BY_RATE_VAL, 0x0);
+ break;
+ default:
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_BE_PWR_RATE_CTRL,
+ B_BE_FORCE_PWR_BY_RATE_VAL, ctrl_all_time);
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_BE_PWR_RATE_CTRL,
+ B_BE_FORCE_PWR_BY_RATE_EN, 0x1);
+ break;
+ }
+
+ switch (ctrl_gnt_bt) {
+ case 0xffff:
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_BE_PWR_REG_CTRL,
+ B_BE_PWR_BT_EN, 0x0);
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_BE_PWR_COEX_CTRL,
+ B_BE_PWR_BT_VAL, 0x0);
+ break;
+ default:
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_BE_PWR_COEX_CTRL,
+ B_BE_PWR_BT_VAL, ctrl_gnt_bt);
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_BE_PWR_REG_CTRL,
+ B_BE_PWR_BT_EN, 0x1);
+ break;
+ }
+}
+
+static
+s8 rtw8922a_btc_get_bt_rssi(struct rtw89_dev *rtwdev, s8 val)
+{
+ return clamp_t(s8, val, -100, 0) + 100;
+}
+
+static const struct rtw89_btc_rf_trx_para rtw89_btc_8922a_rf_ul[] = {
+ {255, 0, 0, 7}, /* 0 -> original */
+ {255, 2, 0, 7}, /* 1 -> for BT-connected ACI issue && BTG co-rx */
+ {255, 0, 0, 7}, /* 2 ->reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 3- >reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 4 ->reserved for shared-antenna */
+ {255, 1, 0, 7}, /* the below id is for non-shared-antenna free-run */
+ {6, 1, 0, 7},
+ {13, 1, 0, 7},
+ {13, 1, 0, 7}
+};
+
+static const struct rtw89_btc_rf_trx_para rtw89_btc_8922a_rf_dl[] = {
+ {255, 0, 0, 7}, /* 0 -> original */
+ {255, 2, 0, 7}, /* 1 -> reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 2 ->reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 3- >reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 4 ->reserved for shared-antenna */
+ {255, 1, 0, 7}, /* the below id is for non-shared-antenna free-run */
+ {255, 1, 0, 7},
+ {255, 1, 0, 7},
+ {255, 1, 0, 7}
+};
+
+static const u8 rtw89_btc_8922a_wl_rssi_thres[BTC_WL_RSSI_THMAX] = {60, 50, 40, 30};
+static const u8 rtw89_btc_8922a_bt_rssi_thres[BTC_BT_RSSI_THMAX] = {50, 40, 30, 20};
+
+static const struct rtw89_btc_fbtc_mreg rtw89_btc_8922a_mon_reg[] = {
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe300),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe320),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe324),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe328),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe32c),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe330),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe334),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe338),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe344),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe348),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe34c),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe350),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0x11a2c),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0x11a50),
+ RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x980),
+ RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x660),
+ RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x1660),
+ RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x418c),
+ RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x518c),
+};
+
+static
+void rtw8922a_btc_update_bt_cnt(struct rtw89_dev *rtwdev)
+{
+ /* Feature move to firmware */
+}
+
+static
+void rtw8922a_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state)
+{
+ if (!state) {
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x80000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD1, RFREG_MASK, 0x0c110);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x01018);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x00000);
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x80000);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD1, RFREG_MASK, 0x0c110);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x01018);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x00000);
+ } else {
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x80000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD1, RFREG_MASK, 0x0c110);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x09018);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x00000);
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x80000);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD1, RFREG_MASK, 0x0c110);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x09018);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x00000);
+ }
+}
+
+static void rtw8922a_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
+{
+}
+
static void rtw8922a_fill_freq_with_ppdu(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct ieee80211_rx_status *status)
@@ -2367,6 +2499,13 @@ static const struct rtw89_chip_ops rtw8922a_chip_ops = {
.btc_set_rfe = rtw8922a_btc_set_rfe,
.btc_init_cfg = rtw8922a_btc_init_cfg,
+ .btc_set_wl_pri = NULL,
+ .btc_set_wl_txpwr_ctrl = rtw8922a_btc_set_wl_txpwr_ctrl,
+ .btc_get_bt_rssi = rtw8922a_btc_get_bt_rssi,
+ .btc_update_bt_cnt = rtw8922a_btc_update_bt_cnt,
+ .btc_wl_s1_standby = rtw8922a_btc_wl_s1_standby,
+ .btc_set_wl_rx_gain = rtw8922a_btc_set_wl_rx_gain,
+ .btc_set_policy = rtw89_btc_set_policy_v1,
};
const struct rtw89_chip_info rtw8922a_chip_info = {
@@ -2406,6 +2545,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.dig_regs = &rtw8922a_dig_regs,
.tssi_dbw_table = NULL,
.support_chanctx_num = 2,
+ .support_rnr = true,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ) |
BIT(NL80211_BAND_6GHZ),
@@ -2436,7 +2576,22 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.efuse_blocks = rtw8922a_efuse_blocks,
.phycap_addr = 0x1700,
.phycap_size = 0x38,
-
+ .para_ver = 0xf,
+ .wlcx_desired = 0x07110000,
+ .btcx_desired = 0x7,
+ .scbd = 0x1,
+ .mailbox = 0x1,
+
+ .afh_guard_ch = 6,
+ .wl_rssi_thres = rtw89_btc_8922a_wl_rssi_thres,
+ .bt_rssi_thres = rtw89_btc_8922a_bt_rssi_thres,
+ .rssi_tol = 2,
+ .mon_reg_num = ARRAY_SIZE(rtw89_btc_8922a_mon_reg),
+ .mon_reg = rtw89_btc_8922a_mon_reg,
+ .rf_para_ulink_num = ARRAY_SIZE(rtw89_btc_8922a_rf_ul),
+ .rf_para_ulink = rtw89_btc_8922a_rf_ul,
+ .rf_para_dlink_num = ARRAY_SIZE(rtw89_btc_8922a_rf_dl),
+ .rf_para_dlink = rtw89_btc_8922a_rf_dl,
.ps_mode_supported = BIT(RTW89_PS_MODE_RFOFF) |
BIT(RTW89_PS_MODE_CLK_GATED) |
BIT(RTW89_PS_MODE_PWR_GATED),
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
index 4981b657bd7b..ce8aaa9501e1 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
@@ -61,6 +61,7 @@ static const struct rtw89_pci_info rtw8922a_pci_info = {
static const struct rtw89_driver_info rtw89_8922ae_info = {
.chip = &rtw8922a_chip_info,
+ .quirks = NULL,
.bus = {
.pci = &rtw8922a_pci_info,
},
diff --git a/drivers/net/wireless/realtek/rtw89/sar.h b/drivers/net/wireless/realtek/rtw89/sar.h
index bd7a657188d9..4ae081d2d3b4 100644
--- a/drivers/net/wireless/realtek/rtw89/sar.h
+++ b/drivers/net/wireless/realtek/rtw89/sar.h
@@ -7,8 +7,8 @@
#include "core.h"
-#define RTW89_SAR_TXPWR_MAC_MAX S8_MAX
-#define RTW89_SAR_TXPWR_MAC_MIN S8_MIN
+#define RTW89_SAR_TXPWR_MAC_MAX 63
+#define RTW89_SAR_TXPWR_MAC_MIN -64
struct rtw89_sar_handler {
const char *descr_sar_source;
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index ccad026defb5..fa61484c3839 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -12,6 +12,662 @@
#include "util.h"
#include "wow.h"
+void rtw89_wow_parse_akm(struct rtw89_dev *rtwdev, struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ const u8 *rsn, *ies = mgmt->u.assoc_req.variable;
+ struct rtw89_rsn_ie *rsn_ie;
+
+ rsn = cfg80211_find_ie(WLAN_EID_RSN, ies, skb->len);
+ if (!rsn)
+ return;
+
+ rsn_ie = (struct rtw89_rsn_ie *)rsn;
+ rtw_wow->akm = rsn_ie->akm_cipher_suite.type;
+}
+
+static const struct rtw89_cipher_info rtw89_cipher_info_defs[] = {
+ {WLAN_CIPHER_SUITE_WEP40, .fw_alg = 1, .len = WLAN_KEY_LEN_WEP40,},
+ {WLAN_CIPHER_SUITE_WEP104, .fw_alg = 2, .len = WLAN_KEY_LEN_WEP104,},
+ {WLAN_CIPHER_SUITE_TKIP, .fw_alg = 3, .len = WLAN_KEY_LEN_TKIP,},
+ {WLAN_CIPHER_SUITE_CCMP, .fw_alg = 6, .len = WLAN_KEY_LEN_CCMP,},
+ {WLAN_CIPHER_SUITE_GCMP, .fw_alg = 8, .len = WLAN_KEY_LEN_GCMP,},
+ {WLAN_CIPHER_SUITE_CCMP_256, .fw_alg = 7, .len = WLAN_KEY_LEN_CCMP_256,},
+ {WLAN_CIPHER_SUITE_GCMP_256, .fw_alg = 23, .len = WLAN_KEY_LEN_GCMP_256,},
+ {WLAN_CIPHER_SUITE_AES_CMAC, .fw_alg = 32, .len = WLAN_KEY_LEN_AES_CMAC,},
+};
+
+static const
+struct rtw89_cipher_info *rtw89_cipher_alg_recognize(u32 cipher)
+{
+ const struct rtw89_cipher_info *cipher_info_defs;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rtw89_cipher_info_defs); i++) {
+ cipher_info_defs = &rtw89_cipher_info_defs[i];
+ if (cipher_info_defs->cipher == cipher)
+ return cipher_info_defs;
+ }
+
+ return NULL;
+}
+
+static int _pn_to_iv(struct rtw89_dev *rtwdev, struct ieee80211_key_conf *key,
+ u8 *iv, u64 pn, u8 key_idx)
+{
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ iv[0] = u64_get_bits(pn, RTW89_KEY_PN_1);
+ iv[1] = (u64_get_bits(pn, RTW89_KEY_PN_1) | 0x20) & 0x7f;
+ iv[2] = u64_get_bits(pn, RTW89_KEY_PN_0);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ iv[0] = u64_get_bits(pn, RTW89_KEY_PN_0);
+ iv[1] = u64_get_bits(pn, RTW89_KEY_PN_1);
+ iv[2] = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ iv[3] = BIT(5) | ((key_idx & 0x3) << 6);
+ iv[4] = u64_get_bits(pn, RTW89_KEY_PN_2);
+ iv[5] = u64_get_bits(pn, RTW89_KEY_PN_3);
+ iv[6] = u64_get_bits(pn, RTW89_KEY_PN_4);
+ iv[7] = u64_get_bits(pn, RTW89_KEY_PN_5);
+
+ return 0;
+}
+
+static int rtw89_rx_pn_to_iv(struct rtw89_dev *rtwdev,
+ struct ieee80211_key_conf *key,
+ u8 *iv)
+{
+ struct ieee80211_key_seq seq;
+ int err;
+ u64 pn;
+
+ ieee80211_get_key_rx_seq(key, 0, &seq);
+
+ /* seq.ccmp.pn[] is BE order array */
+ pn = u64_encode_bits(seq.ccmp.pn[0], RTW89_KEY_PN_5) |
+ u64_encode_bits(seq.ccmp.pn[1], RTW89_KEY_PN_4) |
+ u64_encode_bits(seq.ccmp.pn[2], RTW89_KEY_PN_3) |
+ u64_encode_bits(seq.ccmp.pn[3], RTW89_KEY_PN_2) |
+ u64_encode_bits(seq.ccmp.pn[4], RTW89_KEY_PN_1) |
+ u64_encode_bits(seq.ccmp.pn[5], RTW89_KEY_PN_0);
+
+ err = _pn_to_iv(rtwdev, key, iv, pn, key->keyidx);
+ if (err)
+ return err;
+
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "%s key %d pn-%llx to iv-%*ph\n",
+ __func__, key->keyidx, pn, 8, iv);
+
+ return 0;
+}
+
+static int rtw89_tx_pn_to_iv(struct rtw89_dev *rtwdev,
+ struct ieee80211_key_conf *key,
+ u8 *iv)
+{
+ int err;
+ u64 pn;
+
+ pn = atomic64_inc_return(&key->tx_pn);
+ err = _pn_to_iv(rtwdev, key, iv, pn, key->keyidx);
+ if (err)
+ return err;
+
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "%s key %d pn-%llx to iv-%*ph\n",
+ __func__, key->keyidx, pn, 8, iv);
+
+ return 0;
+}
+
+static int _iv_to_pn(struct rtw89_dev *rtwdev, u8 *iv, u64 *pn, u8 *key_id,
+ struct ieee80211_key_conf *key)
+{
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ *pn = u64_encode_bits(iv[2], RTW89_KEY_PN_0) |
+ u64_encode_bits(iv[0], RTW89_KEY_PN_1);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ *pn = u64_encode_bits(iv[0], RTW89_KEY_PN_0) |
+ u64_encode_bits(iv[1], RTW89_KEY_PN_1);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *pn |= u64_encode_bits(iv[4], RTW89_KEY_PN_2) |
+ u64_encode_bits(iv[5], RTW89_KEY_PN_3) |
+ u64_encode_bits(iv[6], RTW89_KEY_PN_4) |
+ u64_encode_bits(iv[7], RTW89_KEY_PN_5);
+
+ if (key_id)
+ *key_id = *(iv + 3) >> 6;
+
+ return 0;
+}
+
+static int rtw89_rx_iv_to_pn(struct rtw89_dev *rtwdev,
+ struct ieee80211_key_conf *key,
+ u8 *iv)
+{
+ struct ieee80211_key_seq seq;
+ int err;
+ u64 pn;
+
+ err = _iv_to_pn(rtwdev, iv, &pn, NULL, key);
+ if (err)
+ return err;
+
+ /* seq.ccmp.pn[] is BE order array */
+ seq.ccmp.pn[0] = u64_get_bits(pn, RTW89_KEY_PN_5);
+ seq.ccmp.pn[1] = u64_get_bits(pn, RTW89_KEY_PN_4);
+ seq.ccmp.pn[2] = u64_get_bits(pn, RTW89_KEY_PN_3);
+ seq.ccmp.pn[3] = u64_get_bits(pn, RTW89_KEY_PN_2);
+ seq.ccmp.pn[4] = u64_get_bits(pn, RTW89_KEY_PN_1);
+ seq.ccmp.pn[5] = u64_get_bits(pn, RTW89_KEY_PN_0);
+
+ ieee80211_set_key_rx_seq(key, 0, &seq);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "%s key %d iv-%*ph to pn-%*ph\n",
+ __func__, key->keyidx, 8, iv, 6, seq.ccmp.pn);
+
+ return 0;
+}
+
+static int rtw89_tx_iv_to_pn(struct rtw89_dev *rtwdev,
+ struct ieee80211_key_conf *key,
+ u8 *iv)
+{
+ int err;
+ u64 pn;
+
+ err = _iv_to_pn(rtwdev, iv, &pn, NULL, key);
+ if (err)
+ return err;
+
+ atomic64_set(&key->tx_pn, pn);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "%s key %d iv-%*ph to pn-%llx\n",
+ __func__, key->keyidx, 8, iv, pn);
+
+ return 0;
+}
+
+static int rtw89_rx_pn_get_pmf(struct rtw89_dev *rtwdev,
+ struct ieee80211_key_conf *key,
+ struct rtw89_wow_gtk_info *gtk_info)
+{
+ struct ieee80211_key_seq seq;
+ u64 pn;
+
+ if (key->keyidx == 4)
+ memcpy(gtk_info->igtk[0], key->key, key->keylen);
+ else if (key->keyidx == 5)
+ memcpy(gtk_info->igtk[1], key->key, key->keylen);
+ else
+ return -EINVAL;
+
+ ieee80211_get_key_rx_seq(key, 0, &seq);
+
+ /* seq.ccmp.pn[] is BE order array */
+ pn = u64_encode_bits(seq.ccmp.pn[0], RTW89_KEY_PN_5) |
+ u64_encode_bits(seq.ccmp.pn[1], RTW89_KEY_PN_4) |
+ u64_encode_bits(seq.ccmp.pn[2], RTW89_KEY_PN_3) |
+ u64_encode_bits(seq.ccmp.pn[3], RTW89_KEY_PN_2) |
+ u64_encode_bits(seq.ccmp.pn[4], RTW89_KEY_PN_1) |
+ u64_encode_bits(seq.ccmp.pn[5], RTW89_KEY_PN_0);
+ gtk_info->ipn = cpu_to_le64(pn);
+ gtk_info->igtk_keyid = cpu_to_le32(key->keyidx);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "%s key %d pn-%llx\n",
+ __func__, key->keyidx, pn);
+
+ return 0;
+}
+
+static int rtw89_rx_pn_set_pmf(struct rtw89_dev *rtwdev,
+ struct ieee80211_key_conf *key,
+ u64 pn)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
+ struct ieee80211_key_seq seq;
+
+ if (key->keyidx != aoac_rpt->igtk_key_id)
+ return 0;
+
+ /* seq.ccmp.pn[] is BE order array */
+ seq.ccmp.pn[0] = u64_get_bits(pn, RTW89_KEY_PN_5);
+ seq.ccmp.pn[1] = u64_get_bits(pn, RTW89_KEY_PN_4);
+ seq.ccmp.pn[2] = u64_get_bits(pn, RTW89_KEY_PN_3);
+ seq.ccmp.pn[3] = u64_get_bits(pn, RTW89_KEY_PN_2);
+ seq.ccmp.pn[4] = u64_get_bits(pn, RTW89_KEY_PN_1);
+ seq.ccmp.pn[5] = u64_get_bits(pn, RTW89_KEY_PN_0);
+
+ ieee80211_set_key_rx_seq(key, 0, &seq);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "%s key %d pn-%*ph\n",
+ __func__, key->keyidx, 6, seq.ccmp.pn);
+
+ return 0;
+}
+
+static void rtw89_wow_get_key_info_iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_wow_key_info *key_info = &rtw_wow->key_info;
+ struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info;
+ const struct rtw89_cipher_info *cipher_info;
+ bool *err = data;
+ int ret;
+
+ cipher_info = rtw89_cipher_alg_recognize(key->cipher);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (sta) {
+ ret = rtw89_tx_pn_to_iv(rtwdev, key,
+ key_info->ptk_tx_iv);
+ if (ret)
+ goto err;
+ ret = rtw89_rx_pn_to_iv(rtwdev, key,
+ key_info->ptk_rx_iv);
+ if (ret)
+ goto err;
+
+ rtw_wow->ptk_alg = cipher_info->fw_alg;
+ rtw_wow->ptk_keyidx = key->keyidx;
+ } else {
+ ret = rtw89_rx_pn_to_iv(rtwdev, key,
+ key_info->gtk_rx_iv[key->keyidx]);
+ if (ret)
+ goto err;
+
+ rtw_wow->gtk_alg = cipher_info->fw_alg;
+ key_info->gtk_keyidx = key->keyidx;
+ }
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ ret = rtw89_rx_pn_get_pmf(rtwdev, key, gtk_info);
+ if (ret)
+ goto err;
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ /* WEP only set group key in mac80211, but fw need to set
+ * both of pairwise key and group key.
+ */
+ rtw_wow->ptk_alg = cipher_info->fw_alg;
+ rtw_wow->ptk_keyidx = key->keyidx;
+ rtw_wow->gtk_alg = cipher_info->fw_alg;
+ key_info->gtk_keyidx = key->keyidx;
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "unsupport cipher %x\n",
+ key->cipher);
+ goto err;
+ }
+
+ return;
+err:
+ *err = true;
+}
+
+static void rtw89_wow_set_key_info_iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
+ struct rtw89_set_key_info_iter_data *iter_data = data;
+ bool update_tx_key_info = iter_data->rx_ready;
+ int ret;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (sta && !update_tx_key_info) {
+ ret = rtw89_rx_iv_to_pn(rtwdev, key,
+ aoac_rpt->ptk_rx_iv);
+ if (ret)
+ goto err;
+ }
+
+ if (sta && update_tx_key_info) {
+ ret = rtw89_tx_iv_to_pn(rtwdev, key,
+ aoac_rpt->ptk_tx_iv);
+ if (ret)
+ goto err;
+ }
+
+ if (!sta && !update_tx_key_info) {
+ ret = rtw89_rx_iv_to_pn(rtwdev, key,
+ aoac_rpt->gtk_rx_iv[key->keyidx]);
+ if (ret)
+ goto err;
+ }
+
+ if (!sta && update_tx_key_info && aoac_rpt->rekey_ok)
+ iter_data->gtk_cipher = key->cipher;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ if (update_tx_key_info) {
+ if (aoac_rpt->rekey_ok)
+ iter_data->igtk_cipher = key->cipher;
+ } else {
+ ret = rtw89_rx_pn_set_pmf(rtwdev, key,
+ aoac_rpt->igtk_ipn);
+ if (ret)
+ goto err;
+ }
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "unsupport cipher %x\n",
+ key->cipher);
+ goto err;
+ }
+
+ return;
+
+err:
+ iter_data->error = true;
+}
+
+static void rtw89_wow_key_clear(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+
+ memset(&rtw_wow->aoac_rpt, 0, sizeof(rtw_wow->aoac_rpt));
+ memset(&rtw_wow->gtk_info, 0, sizeof(rtw_wow->gtk_info));
+ memset(&rtw_wow->key_info, 0, sizeof(rtw_wow->key_info));
+ rtw_wow->ptk_alg = 0;
+ rtw_wow->gtk_alg = 0;
+}
+
+static void rtw89_wow_construct_key_info(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_wow_key_info *key_info = &rtw_wow->key_info;
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+ bool err = false;
+
+ rcu_read_lock();
+ ieee80211_iter_keys_rcu(rtwdev->hw, wow_vif,
+ rtw89_wow_get_key_info_iter, &err);
+ rcu_read_unlock();
+
+ if (err) {
+ rtw89_wow_key_clear(rtwdev);
+ return;
+ }
+
+ key_info->valid_check = RTW89_WOW_VALID_CHECK;
+ key_info->symbol_check_en = RTW89_WOW_SYMBOL_CHK_PTK |
+ RTW89_WOW_SYMBOL_CHK_GTK;
+}
+
+static void rtw89_wow_debug_aoac_rpt(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
+
+ if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_WOW))
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] rpt_ver = %d\n",
+ aoac_rpt->rpt_ver);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] sec_type = %d\n",
+ aoac_rpt->sec_type);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] key_idx = %d\n",
+ aoac_rpt->key_idx);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] pattern_idx = %d\n",
+ aoac_rpt->pattern_idx);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] rekey_ok = %d\n",
+ aoac_rpt->rekey_ok);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] ptk_tx_iv = %*ph\n",
+ 8, aoac_rpt->ptk_tx_iv);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW,
+ "[aoac_rpt] eapol_key_replay_count = %*ph\n",
+ 8, aoac_rpt->eapol_key_replay_count);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] ptk_rx_iv = %*ph\n",
+ 8, aoac_rpt->ptk_rx_iv);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] gtk_rx_iv[0] = %*ph\n",
+ 8, aoac_rpt->gtk_rx_iv[0]);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] gtk_rx_iv[1] = %*ph\n",
+ 8, aoac_rpt->gtk_rx_iv[1]);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] gtk_rx_iv[2] = %*ph\n",
+ 8, aoac_rpt->gtk_rx_iv[2]);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] gtk_rx_iv[3] = %*ph\n",
+ 8, aoac_rpt->gtk_rx_iv[3]);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] igtk_key_id = %llu\n",
+ aoac_rpt->igtk_key_id);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] igtk_ipn = %llu\n",
+ aoac_rpt->igtk_ipn);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] igtk = %*ph\n",
+ 32, aoac_rpt->igtk);
+}
+
+static int rtw89_wow_get_aoac_rpt_reg(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
+ struct rtw89_mac_c2h_info c2h_info = {};
+ struct rtw89_mac_h2c_info h2c_info = {};
+ u8 igtk_ipn[8];
+ u8 key_idx;
+ int ret;
+
+ h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_AOAC_RPT_1;
+ h2c_info.content_len = 2;
+ ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info);
+ if (ret)
+ return ret;
+
+ aoac_rpt->key_idx =
+ u32_get_bits(c2h_info.u.c2hreg[0], RTW89_C2HREG_AOAC_RPT_1_W0_KEY_IDX);
+ key_idx = aoac_rpt->key_idx;
+ aoac_rpt->gtk_rx_iv[key_idx][0] =
+ u32_get_bits(c2h_info.u.c2hreg[1], RTW89_C2HREG_AOAC_RPT_1_W1_IV_0);
+ aoac_rpt->gtk_rx_iv[key_idx][1] =
+ u32_get_bits(c2h_info.u.c2hreg[1], RTW89_C2HREG_AOAC_RPT_1_W1_IV_1);
+ aoac_rpt->gtk_rx_iv[key_idx][2] =
+ u32_get_bits(c2h_info.u.c2hreg[1], RTW89_C2HREG_AOAC_RPT_1_W1_IV_2);
+ aoac_rpt->gtk_rx_iv[key_idx][3] =
+ u32_get_bits(c2h_info.u.c2hreg[1], RTW89_C2HREG_AOAC_RPT_1_W1_IV_3);
+ aoac_rpt->gtk_rx_iv[key_idx][4] =
+ u32_get_bits(c2h_info.u.c2hreg[2], RTW89_C2HREG_AOAC_RPT_1_W2_IV_4);
+ aoac_rpt->gtk_rx_iv[key_idx][5] =
+ u32_get_bits(c2h_info.u.c2hreg[2], RTW89_C2HREG_AOAC_RPT_1_W2_IV_5);
+ aoac_rpt->gtk_rx_iv[key_idx][6] =
+ u32_get_bits(c2h_info.u.c2hreg[2], RTW89_C2HREG_AOAC_RPT_1_W2_IV_6);
+ aoac_rpt->gtk_rx_iv[key_idx][7] =
+ u32_get_bits(c2h_info.u.c2hreg[2], RTW89_C2HREG_AOAC_RPT_1_W2_IV_7);
+ aoac_rpt->ptk_rx_iv[0] =
+ u32_get_bits(c2h_info.u.c2hreg[3], RTW89_C2HREG_AOAC_RPT_1_W3_PTK_IV_0);
+ aoac_rpt->ptk_rx_iv[1] =
+ u32_get_bits(c2h_info.u.c2hreg[3], RTW89_C2HREG_AOAC_RPT_1_W3_PTK_IV_1);
+ aoac_rpt->ptk_rx_iv[2] =
+ u32_get_bits(c2h_info.u.c2hreg[3], RTW89_C2HREG_AOAC_RPT_1_W3_PTK_IV_2);
+ aoac_rpt->ptk_rx_iv[3] =
+ u32_get_bits(c2h_info.u.c2hreg[3], RTW89_C2HREG_AOAC_RPT_1_W3_PTK_IV_3);
+
+ h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_AOAC_RPT_2;
+ h2c_info.content_len = 2;
+ ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info);
+ if (ret)
+ return ret;
+
+ aoac_rpt->ptk_rx_iv[4] =
+ u32_get_bits(c2h_info.u.c2hreg[0], RTW89_C2HREG_AOAC_RPT_2_W0_PTK_IV_4);
+ aoac_rpt->ptk_rx_iv[5] =
+ u32_get_bits(c2h_info.u.c2hreg[0], RTW89_C2HREG_AOAC_RPT_2_W0_PTK_IV_5);
+ aoac_rpt->ptk_rx_iv[6] =
+ u32_get_bits(c2h_info.u.c2hreg[1], RTW89_C2HREG_AOAC_RPT_2_W1_PTK_IV_6);
+ aoac_rpt->ptk_rx_iv[7] =
+ u32_get_bits(c2h_info.u.c2hreg[1], RTW89_C2HREG_AOAC_RPT_2_W1_PTK_IV_7);
+ igtk_ipn[0] =
+ u32_get_bits(c2h_info.u.c2hreg[1], RTW89_C2HREG_AOAC_RPT_2_W1_IGTK_IPN_IV_0);
+ igtk_ipn[1] =
+ u32_get_bits(c2h_info.u.c2hreg[1], RTW89_C2HREG_AOAC_RPT_2_W1_IGTK_IPN_IV_1);
+ igtk_ipn[2] =
+ u32_get_bits(c2h_info.u.c2hreg[2], RTW89_C2HREG_AOAC_RPT_2_W2_IGTK_IPN_IV_2);
+ igtk_ipn[3] =
+ u32_get_bits(c2h_info.u.c2hreg[2], RTW89_C2HREG_AOAC_RPT_2_W2_IGTK_IPN_IV_3);
+ igtk_ipn[4] =
+ u32_get_bits(c2h_info.u.c2hreg[2], RTW89_C2HREG_AOAC_RPT_2_W2_IGTK_IPN_IV_4);
+ igtk_ipn[5] =
+ u32_get_bits(c2h_info.u.c2hreg[2], RTW89_C2HREG_AOAC_RPT_2_W2_IGTK_IPN_IV_5);
+ igtk_ipn[6] =
+ u32_get_bits(c2h_info.u.c2hreg[3], RTW89_C2HREG_AOAC_RPT_2_W3_IGTK_IPN_IV_6);
+ igtk_ipn[7] =
+ u32_get_bits(c2h_info.u.c2hreg[3], RTW89_C2HREG_AOAC_RPT_2_W3_IGTK_IPN_IV_7);
+ aoac_rpt->igtk_ipn = u64_encode_bits(igtk_ipn[0], RTW89_IGTK_IPN_0) |
+ u64_encode_bits(igtk_ipn[1], RTW89_IGTK_IPN_1) |
+ u64_encode_bits(igtk_ipn[2], RTW89_IGTK_IPN_2) |
+ u64_encode_bits(igtk_ipn[3], RTW89_IGTK_IPN_3) |
+ u64_encode_bits(igtk_ipn[4], RTW89_IGTK_IPN_4) |
+ u64_encode_bits(igtk_ipn[5], RTW89_IGTK_IPN_5) |
+ u64_encode_bits(igtk_ipn[6], RTW89_IGTK_IPN_6) |
+ u64_encode_bits(igtk_ipn[7], RTW89_IGTK_IPN_7);
+
+ return 0;
+}
+
+static int rtw89_wow_get_aoac_rpt(struct rtw89_dev *rtwdev, bool rx_ready)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ int ret;
+
+ if (!rtw_wow->ptk_alg)
+ return -EPERM;
+
+ if (!rx_ready) {
+ ret = rtw89_wow_get_aoac_rpt_reg(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to get aoac rpt by reg\n");
+ return ret;
+ }
+ } else {
+ ret = rtw89_fw_h2c_wow_request_aoac(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to get aoac rpt by pkt\n");
+ return ret;
+ }
+ }
+
+ rtw89_wow_debug_aoac_rpt(rtwdev);
+
+ return 0;
+}
+
+static struct ieee80211_key_conf *rtw89_wow_gtk_rekey(struct rtw89_dev *rtwdev,
+ u32 cipher, u8 keyidx, u8 *gtk)
+{
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+ const struct rtw89_cipher_info *cipher_info;
+ struct ieee80211_key_conf *rekey_conf;
+ struct ieee80211_key_conf *key;
+ u8 sz;
+
+ cipher_info = rtw89_cipher_alg_recognize(cipher);
+ sz = struct_size(rekey_conf, key, cipher_info->len);
+ rekey_conf = kmalloc(sz, GFP_KERNEL);
+ if (!rekey_conf)
+ return NULL;
+
+ rekey_conf->cipher = cipher;
+ rekey_conf->keyidx = keyidx;
+ rekey_conf->keylen = cipher_info->len;
+ memcpy(rekey_conf->key, gtk,
+ flex_array_size(rekey_conf, key, cipher_info->len));
+
+ /* ieee80211_gtk_rekey_add() will call set_key(), therefore we
+ * need to unlock mutex
+ */
+ mutex_unlock(&rtwdev->mutex);
+ key = ieee80211_gtk_rekey_add(wow_vif, rekey_conf, -1);
+ mutex_lock(&rtwdev->mutex);
+
+ kfree(rekey_conf);
+ if (IS_ERR(key)) {
+ rtw89_err(rtwdev, "ieee80211_gtk_rekey_add failed\n");
+ return NULL;
+ }
+
+ return key;
+}
+
+static void rtw89_wow_update_key_info(struct rtw89_dev *rtwdev, bool rx_ready)
+{
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
+ struct rtw89_set_key_info_iter_data data = {.error = false,
+ .rx_ready = rx_ready};
+ struct ieee80211_key_conf *key;
+
+ rcu_read_lock();
+ ieee80211_iter_keys_rcu(rtwdev->hw, wow_vif,
+ rtw89_wow_set_key_info_iter, &data);
+ rcu_read_unlock();
+
+ if (data.error) {
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "%s error\n", __func__);
+ return;
+ }
+
+ if (!data.gtk_cipher)
+ return;
+
+ key = rtw89_wow_gtk_rekey(rtwdev, data.gtk_cipher, aoac_rpt->key_idx,
+ aoac_rpt->gtk);
+ if (!key)
+ return;
+
+ rtw89_rx_iv_to_pn(rtwdev, key,
+ aoac_rpt->gtk_rx_iv[key->keyidx]);
+
+ if (!data.igtk_cipher)
+ return;
+
+ key = rtw89_wow_gtk_rekey(rtwdev, data.igtk_cipher, aoac_rpt->igtk_key_id,
+ aoac_rpt->igtk);
+ if (!key)
+ return;
+
+ rtw89_rx_pn_set_pmf(rtwdev, key, aoac_rpt->igtk_ipn);
+ ieee80211_gtk_rekey_notify(wow_vif, wow_vif->bss_conf.bssid,
+ aoac_rpt->eapol_key_replay_count,
+ GFP_KERNEL);
+}
+
static void rtw89_wow_leave_deep_ps(struct rtw89_dev *rtwdev)
{
__rtw89_leave_ps_mode(rtwdev);
@@ -59,6 +715,8 @@ static void rtw89_wow_set_rx_filter(struct rtw89_dev *rtwdev, bool enable)
static void rtw89_wow_show_wakeup_reason(struct rtw89_dev *rtwdev)
{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
u32 wow_reason_reg = rtwdev->chip->wow_reason_reg;
struct cfg80211_wowlan_nd_info nd_info;
struct cfg80211_wowlan_wakeup wakeup = {
@@ -85,10 +743,7 @@ static void rtw89_wow_show_wakeup_reason(struct rtw89_dev *rtwdev)
rtw89_debug(rtwdev, RTW89_DBG_WOW, "WOW: Rx gtk rekey\n");
break;
case RTW89_WOW_RSN_RX_PATTERN_MATCH:
- /* Current firmware and driver don't report pattern index
- * Use pattern_idx to 0 defaultly.
- */
- wakeup.pattern_idx = 0;
+ wakeup.pattern_idx = aoac_rpt->pattern_idx;
rtw89_debug(rtwdev, RTW89_DBG_WOW, "WOW: Rx pattern match packet\n");
break;
case RTW89_WOW_RSN_RX_NLO:
@@ -454,17 +1109,21 @@ static int rtw89_wow_check_fw_status(struct rtw89_dev *rtwdev, bool wow_enable)
static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow)
{
enum rtw89_fw_type fw_type = wow ? RTW89_FW_WOWLAN : RTW89_FW_NORMAL;
+ enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
struct ieee80211_vif *wow_vif = rtw_wow->wow_vif;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
const struct rtw89_chip_info *chip = rtwdev->chip;
bool include_bb = !!chip->bbmcu_nr;
+ bool disable_intr_for_dlfw = false;
struct ieee80211_sta *wow_sta;
struct rtw89_sta *rtwsta = NULL;
bool is_conn = true;
int ret;
- rtw89_hci_disable_intr(rtwdev);
+ if (chip_id == RTL8852C || chip_id == RTL8922A)
+ disable_intr_for_dlfw = true;
wow_sta = ieee80211_find_sta(wow_vif, rtwvif->bssid);
if (wow_sta)
@@ -472,12 +1131,18 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow)
else
is_conn = false;
+ if (disable_intr_for_dlfw)
+ rtw89_hci_disable_intr(rtwdev);
+
ret = rtw89_fw_download(rtwdev, fw_type, include_bb);
if (ret) {
rtw89_warn(rtwdev, "download fw failed\n");
return ret;
}
+ if (disable_intr_for_dlfw)
+ rtw89_hci_enable_intr(rtwdev);
+
rtw89_phy_init_rf_reg(rtwdev, true);
ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta,
@@ -519,8 +1184,10 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow)
rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, wow_vif);
}
+ if (chip_gen == RTW89_CHIP_BE)
+ rtw89_phy_rfk_pre_ntfy_and_wait(rtwdev, RTW89_PHY_0, 5);
+
rtw89_mac_hw_mgnt_sec(rtwdev, wow);
- rtw89_hci_enable_intr(rtwdev);
return 0;
}
@@ -595,7 +1262,22 @@ static int rtw89_wow_disable_trx_pre(struct rtw89_dev *rtwdev)
rtw89_err(rtwdev, "failed to config mac\n");
return ret;
}
+
+ /* Before enabling interrupt, we need to get AOAC report by reg due to RX
+ * not enabled yet. Also, we need to sync RX related IV from firmware to
+ * mac80211 before receiving RX packets from driver.
+ * After enabling interrupt, we can get AOAC report from h2c and c2h, and
+ * can get TX IV and complete rekey info. We need to update TX related IV
+ * and new GTK info if rekey happened.
+ */
+ ret = rtw89_wow_get_aoac_rpt(rtwdev, false);
+ if (!ret)
+ rtw89_wow_update_key_info(rtwdev, false);
+
rtw89_hci_enable_intr(rtwdev);
+ ret = rtw89_wow_get_aoac_rpt(rtwdev, true);
+ if (!ret)
+ rtw89_wow_update_key_info(rtwdev, true);
return 0;
}
@@ -618,6 +1300,7 @@ static int rtw89_wow_fw_start(struct rtw89_dev *rtwdev)
int ret;
rtw89_wow_pattern_write(rtwdev);
+ rtw89_wow_construct_key_info(rtwdev);
ret = rtw89_fw_h2c_keep_alive(rtwdev, rtwvif, true);
if (ret) {
@@ -631,6 +1314,16 @@ static int rtw89_wow_fw_start(struct rtw89_dev *rtwdev)
goto out;
}
+ ret = rtw89_fw_h2c_wow_gtk_ofld(rtwdev, rtwvif, true);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to enable GTK offload\n");
+ goto out;
+ }
+
+ ret = rtw89_fw_h2c_arp_offload(rtwdev, rtwvif, true);
+ if (ret)
+ rtw89_warn(rtwdev, "wow: failed to enable arp offload\n");
+
ret = rtw89_wow_cfg_wake(rtwdev, true);
if (ret) {
rtw89_err(rtwdev, "wow: failed to config wake\n");
@@ -667,6 +1360,17 @@ static int rtw89_wow_fw_stop(struct rtw89_dev *rtwdev)
goto out;
}
+ ret = rtw89_fw_h2c_wow_gtk_ofld(rtwdev, rtwvif, false);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to disable GTK offload\n");
+ goto out;
+ }
+
+ ret = rtw89_fw_h2c_arp_offload(rtwdev, rtwvif, false);
+ if (ret)
+ rtw89_warn(rtwdev, "wow: failed to disable arp offload\n");
+
+ rtw89_wow_key_clear(rtwdev);
rtw89_fw_release_general_pkt_list(rtwdev, true);
ret = rtw89_wow_cfg_wake(rtwdev, false);
diff --git a/drivers/net/wireless/realtek/rtw89/wow.h b/drivers/net/wireless/realtek/rtw89/wow.h
index a2f7b2e3cdb4..e595aee0196d 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.h
+++ b/drivers/net/wireless/realtek/rtw89/wow.h
@@ -5,6 +5,26 @@
#ifndef __RTW89_WOW_H__
#define __RTW89_WOW_H__
+#define RTW89_KEY_PN_0 GENMASK_ULL(7, 0)
+#define RTW89_KEY_PN_1 GENMASK_ULL(15, 8)
+#define RTW89_KEY_PN_2 GENMASK_ULL(23, 16)
+#define RTW89_KEY_PN_3 GENMASK_ULL(31, 24)
+#define RTW89_KEY_PN_4 GENMASK_ULL(39, 32)
+#define RTW89_KEY_PN_5 GENMASK_ULL(47, 40)
+
+#define RTW89_IGTK_IPN_0 GENMASK_ULL(7, 0)
+#define RTW89_IGTK_IPN_1 GENMASK_ULL(15, 8)
+#define RTW89_IGTK_IPN_2 GENMASK_ULL(23, 16)
+#define RTW89_IGTK_IPN_3 GENMASK_ULL(31, 24)
+#define RTW89_IGTK_IPN_4 GENMASK_ULL(39, 32)
+#define RTW89_IGTK_IPN_5 GENMASK_ULL(47, 40)
+#define RTW89_IGTK_IPN_6 GENMASK_ULL(55, 48)
+#define RTW89_IGTK_IPN_7 GENMASK_ULL(63, 56)
+
+#define RTW89_WOW_VALID_CHECK 0xDD
+#define RTW89_WOW_SYMBOL_CHK_PTK BIT(0)
+#define RTW89_WOW_SYMBOL_CHK_GTK BIT(1)
+
enum rtw89_wake_reason {
RTW89_WOW_RSN_RX_PTK_REKEY = 0x1,
RTW89_WOW_RSN_RX_GTK_REKEY = 0x2,
@@ -15,7 +35,44 @@ enum rtw89_wake_reason {
RTW89_WOW_RSN_RX_NLO = 0x55,
};
+struct rtw89_cipher_suite {
+ u8 oui[3];
+ u8 type;
+} __packed;
+
+struct rtw89_rsn_ie {
+ u8 tag_number;
+ u8 tag_length;
+ __le16 rsn_version;
+ struct rtw89_cipher_suite group_cipher_suite;
+ __le16 pairwise_cipher_suite_cnt;
+ struct rtw89_cipher_suite pairwise_cipher_suite;
+ __le16 akm_cipher_suite_cnt;
+ struct rtw89_cipher_suite akm_cipher_suite;
+} __packed;
+
+struct rtw89_cipher_info {
+ u32 cipher;
+ u8 fw_alg;
+ enum ieee80211_key_len len;
+};
+
+struct rtw89_set_key_info_iter_data {
+ u32 gtk_cipher;
+ u32 igtk_cipher;
+ bool rx_ready;
+ bool error;
+};
+
+#ifdef CONFIG_PM
int rtw89_wow_suspend(struct rtw89_dev *rtwdev, struct cfg80211_wowlan *wowlan);
int rtw89_wow_resume(struct rtw89_dev *rtwdev);
+void rtw89_wow_parse_akm(struct rtw89_dev *rtwdev, struct sk_buff *skb);
+#else
+static inline
+void rtw89_wow_parse_akm(struct rtw89_dev *rtwdev, struct sk_buff *skb)
+{
+}
+#endif
#endif
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 8e7b757475d2..1e578533e473 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -1519,36 +1519,7 @@ static struct sdio_driver rsi_driver = {
}
#endif
};
-
-/**
- * rsi_module_init() - This function registers the sdio module.
- * @void: Void.
- *
- * Return: 0 on success.
- */
-static int rsi_module_init(void)
-{
- int ret;
-
- ret = sdio_register_driver(&rsi_driver);
- rsi_dbg(INIT_ZONE, "%s: Registering driver\n", __func__);
- return ret;
-}
-
-/**
- * rsi_module_exit() - This function unregisters the sdio module.
- * @void: Void.
- *
- * Return: None.
- */
-static void rsi_module_exit(void)
-{
- sdio_unregister_driver(&rsi_driver);
- rsi_dbg(INFO_ZONE, "%s: Unregistering driver\n", __func__);
-}
-
-module_init(rsi_module_init);
-module_exit(rsi_module_exit);
+module_sdio_driver(rsi_driver);
MODULE_AUTHOR("Redpine Signals Inc");
MODULE_DESCRIPTION("Common SDIO layer for RSI drivers");
diff --git a/drivers/net/wireless/silabs/wfx/bus_sdio.c b/drivers/net/wireless/silabs/wfx/bus_sdio.c
index 909d5f346a01..f290eecde773 100644
--- a/drivers/net/wireless/silabs/wfx/bus_sdio.c
+++ b/drivers/net/wireless/silabs/wfx/bus_sdio.c
@@ -267,7 +267,6 @@ struct sdio_driver wfx_sdio_driver = {
.probe = wfx_sdio_probe,
.remove = wfx_sdio_remove,
.drv = {
- .owner = THIS_MODULE,
.of_match_table = wfx_sdio_of_match,
}
};
diff --git a/drivers/net/wireless/ti/wl1251/cmd.h b/drivers/net/wireless/ti/wl1251/cmd.h
index e5874186f9d7..39159201b97e 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.h
+++ b/drivers/net/wireless/ti/wl1251/cmd.h
@@ -89,8 +89,6 @@ enum wl1251_commands {
struct wl1251_cmd_header {
u16 id;
u16 status;
- /* payload */
- u8 data[];
} __packed;
struct wl1251_command {
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index 4e5b351f80f0..c705081249d6 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -323,25 +323,7 @@ static struct sdio_driver wl1251_sdio_driver = {
.remove = wl1251_sdio_remove,
.drv.pm = &wl1251_sdio_pm_ops,
};
-
-static int __init wl1251_sdio_init(void)
-{
- int err;
-
- err = sdio_register_driver(&wl1251_sdio_driver);
- if (err)
- wl1251_error("failed to register sdio driver: %d", err);
- return err;
-}
-
-static void __exit wl1251_sdio_exit(void)
-{
- sdio_unregister_driver(&wl1251_sdio_driver);
- wl1251_notice("unloaded");
-}
-
-module_init(wl1251_sdio_init);
-module_exit(wl1251_sdio_exit);
+module_sdio_driver(wl1251_sdio_driver);
MODULE_DESCRIPTION("TI WL1251 SDIO helpers");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/ti/wl1251/wl12xx_80211.h b/drivers/net/wireless/ti/wl1251/wl12xx_80211.h
index 7e28fe435b43..3d5b0df5b231 100644
--- a/drivers/net/wireless/ti/wl1251/wl12xx_80211.h
+++ b/drivers/net/wireless/ti/wl1251/wl12xx_80211.h
@@ -65,7 +65,6 @@ struct ieee80211_header {
u8 sa[ETH_ALEN];
u8 bssid[ETH_ALEN];
__le16 seq_ctl;
- u8 payload[];
} __packed;
struct wl12xx_ie_header {
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index f2609d5b6bf7..4c2f2608ef3b 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -208,8 +208,6 @@ enum cmd_templ {
struct wl1271_cmd_header {
__le16 id;
__le16 status;
- /* payload */
- u8 data[];
} __packed;
#define WL1271_CMD_MAX_PARAMS 572
diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c
index f0c7e09b314d..c07acfcbbd9c 100644
--- a/drivers/net/wireless/ti/wlcore/sysfs.c
+++ b/drivers/net/wireless/ti/wlcore/sysfs.c
@@ -19,11 +19,8 @@ static ssize_t bt_coex_state_show(struct device *dev,
struct wl1271 *wl = dev_get_drvdata(dev);
ssize_t len;
- len = PAGE_SIZE;
-
mutex_lock(&wl->mutex);
- len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
- wl->sg_enabled);
+ len = sysfs_emit(buf, "%d\n\n0 - off\n1 - on\n", wl->sg_enabled);
mutex_unlock(&wl->mutex);
return len;
@@ -78,13 +75,11 @@ static ssize_t hw_pg_ver_show(struct device *dev,
struct wl1271 *wl = dev_get_drvdata(dev);
ssize_t len;
- len = PAGE_SIZE;
-
mutex_lock(&wl->mutex);
if (wl->hw_pg_ver >= 0)
- len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
+ len = sysfs_emit(buf, "%d\n", wl->hw_pg_ver);
else
- len = snprintf(buf, len, "n/a\n");
+ len = sysfs_emit(buf, "n/a\n");
mutex_unlock(&wl->mutex);
return len;
diff --git a/drivers/net/wireless/ti/wlcore/wl12xx_80211.h b/drivers/net/wireless/ti/wlcore/wl12xx_80211.h
index 1dd7ecc11f86..602915c4da26 100644
--- a/drivers/net/wireless/ti/wlcore/wl12xx_80211.h
+++ b/drivers/net/wireless/ti/wlcore/wl12xx_80211.h
@@ -66,7 +66,6 @@ struct ieee80211_header {
u8 sa[ETH_ALEN];
u8 bssid[ETH_ALEN];
__le16 seq_ctl;
- u8 payload[];
} __packed;
struct wl12xx_ie_header {
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index b55fe320633c..b5afaec61827 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -216,7 +216,7 @@ static const struct ieee80211_regdomain *hwsim_world_regdom_custom[] = {
struct hwsim_vif_priv {
u32 magic;
- u32 skip_beacons;
+ u32 skip_beacons[IEEE80211_MLD_MAX_NUM_LINKS];
u8 bssid[ETH_ALEN];
bool assoc;
bool bcn_en;
@@ -1721,6 +1721,9 @@ static void mac80211_hwsim_rx(struct mac80211_hwsim_data *data,
sp->active_links_rx &= ~BIT(link_id);
else
sp->active_links_rx |= BIT(link_id);
+
+ rx_status->link_valid = true;
+ rx_status->link_id = link_id;
}
rcu_read_unlock();
}
@@ -2133,13 +2136,16 @@ static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
}
#ifdef CONFIG_MAC80211_DEBUGFS
-static void mac80211_hwsim_vif_add_debugfs(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static void
+mac80211_hwsim_link_add_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct dentry *dir)
{
struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
- debugfs_create_u32("skip_beacons", 0600, vif->debugfs_dir,
- &vp->skip_beacons);
+ debugfs_create_u32("skip_beacons", 0600, dir,
+ &vp->skip_beacons[link_conf->link_id]);
}
#endif
@@ -2214,8 +2220,8 @@ static void __mac80211_hwsim_beacon_tx(struct ieee80211_bss_conf *link_conf,
/* TODO: get MCS */
int bitrate = 100;
- if (vp->skip_beacons) {
- vp->skip_beacons--;
+ if (vp->skip_beacons[link_conf->link_id]) {
+ vp->skip_beacons[link_conf->link_id]--;
dev_kfree_skb(skb);
return;
}
@@ -2307,6 +2313,10 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
if (link_conf->csa_active && ieee80211_beacon_cntdwn_is_complete(vif, link_id))
ieee80211_csa_finish(vif, link_id);
+
+ if (link_conf->color_change_active &&
+ ieee80211_beacon_cntdwn_is_complete(vif, link_id))
+ ieee80211_color_change_finish(vif, link_id);
}
static enum hrtimer_restart
@@ -3899,7 +3909,7 @@ static int hwsim_pmsr_report_nl(struct sk_buff *msg, struct genl_info *info)
}
nla_for_each_nested(peer, peers, rem) {
- struct cfg80211_pmsr_result result;
+ struct cfg80211_pmsr_result result = {};
err = mac80211_hwsim_parse_pmsr_result(peer, &result, info);
if (err)
@@ -3922,7 +3932,7 @@ out:
#ifdef CONFIG_MAC80211_DEBUGFS
#define HWSIM_DEBUGFS_OPS \
- .vif_add_debugfs = mac80211_hwsim_vif_add_debugfs,
+ .link_add_debugfs = mac80211_hwsim_link_add_debugfs,
#else
#define HWSIM_DEBUGFS_OPS
#endif
@@ -4122,7 +4132,8 @@ out_err:
static const struct ieee80211_sband_iftype_data sband_capa_2ghz[] = {
{
- .types_mask = BIT(NL80211_IFTYPE_STATION),
+ .types_mask = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT),
.he_cap = {
.has_he = true,
.he_cap_elem = {
@@ -4229,7 +4240,8 @@ static const struct ieee80211_sband_iftype_data sband_capa_2ghz[] = {
},
},
{
- .types_mask = BIT(NL80211_IFTYPE_AP),
+ .types_mask = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO),
.he_cap = {
.has_he = true,
.he_cap_elem = {
@@ -4380,8 +4392,8 @@ static const struct ieee80211_sband_iftype_data sband_capa_2ghz[] = {
static const struct ieee80211_sband_iftype_data sband_capa_5ghz[] = {
{
- /* TODO: should we support other types, e.g., P2P? */
- .types_mask = BIT(NL80211_IFTYPE_STATION),
+ .types_mask = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT),
.he_cap = {
.has_he = true,
.he_cap_elem = {
@@ -4505,7 +4517,8 @@ static const struct ieee80211_sband_iftype_data sband_capa_5ghz[] = {
},
},
{
- .types_mask = BIT(NL80211_IFTYPE_AP),
+ .types_mask = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO),
.he_cap = {
.has_he = true,
.he_cap_elem = {
@@ -4676,8 +4689,8 @@ static const struct ieee80211_sband_iftype_data sband_capa_5ghz[] = {
static const struct ieee80211_sband_iftype_data sband_capa_6ghz[] = {
{
- /* TODO: should we support other types, e.g., P2P? */
- .types_mask = BIT(NL80211_IFTYPE_STATION),
+ .types_mask = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT),
.he_6ghz_capa = {
.capa = cpu_to_le16(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START |
IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP |
@@ -4822,7 +4835,8 @@ static const struct ieee80211_sband_iftype_data sband_capa_6ghz[] = {
},
},
{
- .types_mask = BIT(NL80211_IFTYPE_AP),
+ .types_mask = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO),
.he_6ghz_capa = {
.capa = cpu_to_le16(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START |
IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP |
@@ -5313,6 +5327,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT);
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_BSS_COLOR);
hw->wiphy->interface_modes = param->iftypes;
@@ -6751,11 +6767,11 @@ static int __init init_mac80211_hwsim(void)
param.regd = &hwsim_world_regdom_custom_01;
break;
case HWSIM_REGTEST_CUSTOM_WORLD:
- param.regd = &hwsim_world_regdom_custom_01;
+ param.regd = &hwsim_world_regdom_custom_03;
break;
case HWSIM_REGTEST_CUSTOM_WORLD_2:
if (i == 0)
- param.regd = &hwsim_world_regdom_custom_01;
+ param.regd = &hwsim_world_regdom_custom_03;
else if (i == 1)
param.regd = &hwsim_world_regdom_custom_02;
break;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.c b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
index 2fe724d623c0..bef6819986e9 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_devlink.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
@@ -33,7 +33,8 @@ static int ipc_devlink_get_param(struct devlink *dl, u32 id,
/* Set the param values for the specific param ID's */
static int ipc_devlink_set_param(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct iosm_devlink *ipc_devlink = devlink_priv(dl);
diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c
index 3f72ae943b29..f2aef84fc08d 100644
--- a/drivers/net/wwan/mhi_wwan_mbim.c
+++ b/drivers/net/wwan/mhi_wwan_mbim.c
@@ -648,7 +648,6 @@ static struct mhi_driver mhi_mbim_driver = {
.id_table = mhi_mbim_id_table,
.driver = {
.name = "mhi_wwan_mbim",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/wwan/t7xx/t7xx_cldma.c b/drivers/net/wwan/t7xx/t7xx_cldma.c
index 9f43f256db1d..f0a4783baf1f 100644
--- a/drivers/net/wwan/t7xx/t7xx_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_cldma.c
@@ -106,7 +106,7 @@ bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno)
{
u32 offset = REG_CLDMA_UL_START_ADDRL_0 + qno * ADDR_SIZE;
- return ioread64(hw_info->ap_pdn_base + offset);
+ return ioread64_lo_hi(hw_info->ap_pdn_base + offset);
}
void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qno, u64 address,
@@ -117,7 +117,7 @@ void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qn
reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_START_ADDRL_0 :
hw_info->ap_pdn_base + REG_CLDMA_UL_START_ADDRL_0;
- iowrite64(address, reg + offset);
+ iowrite64_lo_hi(address, reg + offset);
}
void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
index abc41a7089fa..97163e1e5783 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -137,8 +137,9 @@ static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool
return -ENODEV;
}
- gpd_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_DL_CURRENT_ADDRL_0 +
- queue->index * sizeof(u64));
+ gpd_addr = ioread64_lo_hi(hw_info->ap_pdn_base +
+ REG_CLDMA_DL_CURRENT_ADDRL_0 +
+ queue->index * sizeof(u64));
if (req->gpd_addr == gpd_addr || hwo_polling_count++ >= 100)
return 0;
@@ -316,8 +317,8 @@ static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue)
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
/* Check current processing TGPD, 64-bit address is in a table by Q index */
- ul_curr_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 +
- queue->index * sizeof(u64));
+ ul_curr_addr = ioread64_lo_hi(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 +
+ queue->index * sizeof(u64));
if (req->gpd_addr != ul_curr_addr) {
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n",
diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.c b/drivers/net/wwan/t7xx/t7xx_netdev.c
index 3ef4a8a4f8fd..91fa082e9cab 100644
--- a/drivers/net/wwan/t7xx/t7xx_netdev.c
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.c
@@ -253,22 +253,27 @@ static void t7xx_ccmni_wwan_setup(struct net_device *dev)
dev->netdev_ops = &ccmni_netdev_ops;
}
-static void t7xx_init_netdev_napi(struct t7xx_ccmni_ctrl *ctlb)
+static int t7xx_init_netdev_napi(struct t7xx_ccmni_ctrl *ctlb)
{
int i;
/* one HW, but shared with multiple net devices,
* so add a dummy device for NAPI.
*/
- init_dummy_netdev(&ctlb->dummy_dev);
+ ctlb->dummy_dev = alloc_netdev_dummy(0);
+ if (!ctlb->dummy_dev)
+ return -ENOMEM;
+
atomic_set(&ctlb->napi_usr_refcnt, 0);
ctlb->is_napi_en = false;
for (i = 0; i < RXQ_NUM; i++) {
ctlb->napi[i] = &ctlb->hif_ctrl->rxq[i].napi;
- netif_napi_add_weight(&ctlb->dummy_dev, ctlb->napi[i], t7xx_dpmaif_napi_rx_poll,
+ netif_napi_add_weight(ctlb->dummy_dev, ctlb->napi[i], t7xx_dpmaif_napi_rx_poll,
NIC_NAPI_POLL_BUDGET);
}
+
+ return 0;
}
static void t7xx_uninit_netdev_napi(struct t7xx_ccmni_ctrl *ctlb)
@@ -279,6 +284,7 @@ static void t7xx_uninit_netdev_napi(struct t7xx_ccmni_ctrl *ctlb)
netif_napi_del(ctlb->napi[i]);
ctlb->napi[i] = NULL;
}
+ free_netdev(ctlb->dummy_dev);
}
static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id,
@@ -480,6 +486,7 @@ int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev)
{
struct device *dev = &t7xx_dev->pdev->dev;
struct t7xx_ccmni_ctrl *ctlb;
+ int ret;
ctlb = devm_kzalloc(dev, sizeof(*ctlb), GFP_KERNEL);
if (!ctlb)
@@ -495,7 +502,12 @@ int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev)
if (!ctlb->hif_ctrl)
return -ENOMEM;
- t7xx_init_netdev_napi(ctlb);
+ ret = t7xx_init_netdev_napi(ctlb);
+ if (ret) {
+ t7xx_dpmaif_hif_exit(ctlb->hif_ctrl);
+ return ret;
+ }
+
init_md_status_notifier(t7xx_dev);
return 0;
}
diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.h b/drivers/net/wwan/t7xx/t7xx_netdev.h
index f5ed6f99a145..b18312f49844 100644
--- a/drivers/net/wwan/t7xx/t7xx_netdev.h
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.h
@@ -48,7 +48,7 @@ struct t7xx_ccmni_ctrl {
unsigned int md_sta;
struct t7xx_fsm_notifier md_status_notify;
bool wwan_is_registered;
- struct net_device dummy_dev;
+ struct net_device *dummy_dev;
struct napi_struct *napi[RXQ_NUM];
atomic_t napi_usr_refcnt;
bool is_napi_en;
diff --git a/drivers/net/wwan/t7xx/t7xx_pcie_mac.c b/drivers/net/wwan/t7xx/t7xx_pcie_mac.c
index 76da4c15e3de..f071ec7ff23d 100644
--- a/drivers/net/wwan/t7xx/t7xx_pcie_mac.c
+++ b/drivers/net/wwan/t7xx/t7xx_pcie_mac.c
@@ -75,7 +75,7 @@ static void t7xx_pcie_mac_atr_tables_dis(void __iomem *pbase, enum t7xx_atr_src_
for (i = 0; i < ATR_TABLE_NUM_PER_ATR; i++) {
offset = ATR_PORT_OFFSET * port + ATR_TABLE_OFFSET * i;
reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset;
- iowrite64(0, reg);
+ iowrite64_lo_hi(0, reg);
}
}
@@ -112,17 +112,17 @@ static int t7xx_pcie_mac_atr_cfg(struct t7xx_pci_dev *t7xx_dev, struct t7xx_atr_
reg = pbase + ATR_PCIE_WIN0_T0_TRSL_ADDR + offset;
value = cfg->trsl_addr & ATR_PCIE_WIN0_ADDR_ALGMT;
- iowrite64(value, reg);
+ iowrite64_lo_hi(value, reg);
reg = pbase + ATR_PCIE_WIN0_T0_TRSL_PARAM + offset;
iowrite32(cfg->trsl_id, reg);
reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset;
value = (cfg->src_addr & ATR_PCIE_WIN0_ADDR_ALGMT) | (atr_size << 1) | BIT(0);
- iowrite64(value, reg);
+ iowrite64_lo_hi(value, reg);
/* Ensure ATR is set */
- ioread64(reg);
+ ioread64_lo_hi(reg);
return 0;
}
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 1fcbd83f7ff2..17421da139f2 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -390,9 +390,8 @@ bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
void xenvif_carrier_on(struct xenvif *vif);
-/* Callback from stack when TX packet can be released */
-void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf,
- bool zerocopy_success);
+/* Callbacks from stack when TX packet can be released */
+extern const struct ubuf_info_ops xenvif_ubuf_ops;
static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
{
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 7cff90aa8d24..325fcb3d1075 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -358,7 +358,7 @@ static int xenvif_change_mtu(struct net_device *dev, int mtu)
if (mtu > max)
return -EINVAL;
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
return 0;
}
@@ -593,7 +593,7 @@ int xenvif_init_queue(struct xenvif_queue *queue)
for (i = 0; i < MAX_PENDING_REQS; i++) {
queue->pending_tx_info[i].callback_struct = (struct ubuf_info_msgzc)
- { { .callback = xenvif_zerocopy_callback },
+ { { .ops = &xenvif_ubuf_ops },
{ { .ctx = NULL,
.desc = i } } };
queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index ef76850d9bcd..5836995d6774 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -38,6 +38,7 @@
#include <linux/if_vlan.h>
#include <linux/udp.h>
#include <linux/highmem.h>
+#include <linux/skbuff_ref.h>
#include <net/tcp.h>
@@ -1156,7 +1157,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
uarg = skb_shinfo(skb)->destructor_arg;
/* increase inflight counter to offset decrement in callback */
atomic_inc(&queue->inflight_packets);
- uarg->callback(NULL, uarg, true);
+ uarg->ops->complete(NULL, uarg, true);
skb_shinfo(skb)->destructor_arg = NULL;
/* Fill the skb with the new (local) frags. */
@@ -1278,8 +1279,9 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
return work_done;
}
-void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf_base,
- bool zerocopy_success)
+static void xenvif_zerocopy_callback(struct sk_buff *skb,
+ struct ubuf_info *ubuf_base,
+ bool zerocopy_success)
{
unsigned long flags;
pending_ring_idx_t index;
@@ -1312,6 +1314,10 @@ void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf_base,
xenvif_skb_zerocopy_complete(queue);
}
+const struct ubuf_info_ops xenvif_ubuf_ops = {
+ .complete = xenvif_zerocopy_callback,
+};
+
static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
{
struct gnttab_unmap_grant_ref *gop;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index ad29f370034e..4265c1cd0ff7 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -285,6 +285,7 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
return NULL;
}
skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
+ skb_mark_for_recycle(skb);
/* Align ip header to a 16 bytes boundary */
skb_reserve(skb, NET_IP_ALIGN);
@@ -1375,7 +1376,7 @@ static int xennet_change_mtu(struct net_device *dev, int mtu)
if (mtu > max)
return -EINVAL;
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
return 0;
}
diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c
index ad3359a4942c..9c8cde1250fb 100644
--- a/drivers/nfc/nfcmrvl/spi.c
+++ b/drivers/nfc/nfcmrvl/spi.c
@@ -199,7 +199,6 @@ static struct spi_driver nfcmrvl_spi_driver = {
.id_table = nfcmrvl_spi_id_table,
.driver = {
.name = "nfcmrvl_spi",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(of_nfcmrvl_spi_match),
},
};
diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c
index ed704bb77226..ffe5b4eab457 100644
--- a/drivers/nfc/st95hf/core.c
+++ b/drivers/nfc/st95hf/core.c
@@ -7,14 +7,13 @@
*/
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/nfc.h>
-#include <linux/of_gpio.h>
#include <linux/of.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
@@ -196,7 +195,7 @@ struct st95_digital_cmd_complete_arg {
* for spi communication between st95hf and host.
* @ddev: nfc digital device object.
* @nfcdev: nfc device object.
- * @enable_gpio: gpio used to enable st95hf transceiver.
+ * @enable_gpiod: gpio used to enable st95hf transceiver.
* @complete_cb_arg: structure to store various context information
* that is passed from nfc requesting thread to the threaded ISR.
* @st95hf_supply: regulator "consumer" for NFC device.
@@ -219,7 +218,7 @@ struct st95hf_context {
struct st95hf_spi_context spicontext;
struct nfc_digital_dev *ddev;
struct nfc_dev *nfcdev;
- unsigned int enable_gpio;
+ struct gpio_desc *enable_gpiod;
struct st95_digital_cmd_complete_arg complete_cb_arg;
struct regulator *st95hf_supply;
unsigned char sendrcv_trflag;
@@ -451,19 +450,19 @@ static int st95hf_select_protocol(struct st95hf_context *stcontext, int type)
static void st95hf_send_st95enable_negativepulse(struct st95hf_context *st95con)
{
/* First make irq_in pin high */
- gpio_set_value(st95con->enable_gpio, HIGH);
+ gpiod_set_value(st95con->enable_gpiod, HIGH);
/* wait for 1 milisecond */
usleep_range(1000, 2000);
/* Make irq_in pin low */
- gpio_set_value(st95con->enable_gpio, LOW);
+ gpiod_set_value(st95con->enable_gpiod, LOW);
/* wait for minimum interrupt pulse to make st95 active */
usleep_range(1000, 2000);
/* At end make it high */
- gpio_set_value(st95con->enable_gpio, HIGH);
+ gpiod_set_value(st95con->enable_gpiod, HIGH);
}
/*
@@ -1063,6 +1062,7 @@ MODULE_DEVICE_TABLE(of, st95hf_spi_of_match);
static int st95hf_probe(struct spi_device *nfc_spi_dev)
{
+ struct device *dev = &nfc_spi_dev->dev;
int ret;
struct st95hf_context *st95context;
@@ -1108,19 +1108,14 @@ static int st95hf_probe(struct spi_device *nfc_spi_dev)
*/
dev_set_drvdata(&nfc_spi_dev->dev, spicontext);
- st95context->enable_gpio =
- of_get_named_gpio(nfc_spi_dev->dev.of_node,
- "enable-gpio",
- 0);
- if (!gpio_is_valid(st95context->enable_gpio)) {
+ st95context->enable_gpiod = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(st95context->enable_gpiod)) {
+ ret = PTR_ERR(st95context->enable_gpiod);
dev_err(&nfc_spi_dev->dev, "No valid enable gpio\n");
- ret = st95context->enable_gpio;
goto err_disable_regulator;
}
- ret = devm_gpio_request_one(&nfc_spi_dev->dev, st95context->enable_gpio,
- GPIOF_DIR_OUT | GPIOF_INIT_HIGH,
- "enable_gpio");
+ ret = gpiod_set_consumer_name(st95context->enable_gpiod, "enable_gpio");
if (ret)
goto err_disable_regulator;
@@ -1242,7 +1237,6 @@ static void st95hf_remove(struct spi_device *nfc_spi_dev)
static struct spi_driver st95hf_driver = {
.driver = {
.name = "st95hf",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(st95hf_spi_of_match),
},
.id_table = st95hf_id,
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index 7eb17f46a815..9e1a34e23af2 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -424,7 +424,8 @@ struct trf7970a {
enum trf7970a_state state;
struct device *dev;
struct spi_device *spi;
- struct regulator *regulator;
+ struct regulator *vin_regulator;
+ struct regulator *vddio_regulator;
struct nfc_digital_dev *ddev;
u32 quirks;
bool is_initiator;
@@ -1883,7 +1884,7 @@ static int trf7970a_power_up(struct trf7970a *trf)
if (trf->state != TRF7970A_ST_PWR_OFF)
return 0;
- ret = regulator_enable(trf->regulator);
+ ret = regulator_enable(trf->vin_regulator);
if (ret) {
dev_err(trf->dev, "%s - Can't enable VIN: %d\n", __func__, ret);
return ret;
@@ -1926,7 +1927,7 @@ static int trf7970a_power_down(struct trf7970a *trf)
if (trf->en2_gpiod && !(trf->quirks & TRF7970A_QUIRK_EN2_MUST_STAY_LOW))
gpiod_set_value_cansleep(trf->en2_gpiod, 0);
- ret = regulator_disable(trf->regulator);
+ ret = regulator_disable(trf->vin_regulator);
if (ret)
dev_err(trf->dev, "%s - Can't disable VIN: %d\n", __func__,
ret);
@@ -2065,37 +2066,37 @@ static int trf7970a_probe(struct spi_device *spi)
mutex_init(&trf->lock);
INIT_DELAYED_WORK(&trf->timeout_work, trf7970a_timeout_work_handler);
- trf->regulator = devm_regulator_get(&spi->dev, "vin");
- if (IS_ERR(trf->regulator)) {
- ret = PTR_ERR(trf->regulator);
+ trf->vin_regulator = devm_regulator_get(&spi->dev, "vin");
+ if (IS_ERR(trf->vin_regulator)) {
+ ret = PTR_ERR(trf->vin_regulator);
dev_err(trf->dev, "Can't get VIN regulator: %d\n", ret);
goto err_destroy_lock;
}
- ret = regulator_enable(trf->regulator);
+ ret = regulator_enable(trf->vin_regulator);
if (ret) {
dev_err(trf->dev, "Can't enable VIN: %d\n", ret);
goto err_destroy_lock;
}
- uvolts = regulator_get_voltage(trf->regulator);
+ uvolts = regulator_get_voltage(trf->vin_regulator);
if (uvolts > 4000000)
trf->chip_status_ctrl = TRF7970A_CHIP_STATUS_VRS5_3;
- trf->regulator = devm_regulator_get(&spi->dev, "vdd-io");
- if (IS_ERR(trf->regulator)) {
- ret = PTR_ERR(trf->regulator);
+ trf->vddio_regulator = devm_regulator_get(&spi->dev, "vdd-io");
+ if (IS_ERR(trf->vddio_regulator)) {
+ ret = PTR_ERR(trf->vddio_regulator);
dev_err(trf->dev, "Can't get VDD_IO regulator: %d\n", ret);
- goto err_destroy_lock;
+ goto err_disable_vin_regulator;
}
- ret = regulator_enable(trf->regulator);
+ ret = regulator_enable(trf->vddio_regulator);
if (ret) {
dev_err(trf->dev, "Can't enable VDD_IO: %d\n", ret);
- goto err_destroy_lock;
+ goto err_disable_vin_regulator;
}
- if (regulator_get_voltage(trf->regulator) == 1800000) {
+ if (regulator_get_voltage(trf->vddio_regulator) == 1800000) {
trf->io_ctrl = TRF7970A_REG_IO_CTRL_IO_LOW;
dev_dbg(trf->dev, "trf7970a config vdd_io to 1.8V\n");
}
@@ -2108,7 +2109,7 @@ static int trf7970a_probe(struct spi_device *spi)
if (!trf->ddev) {
dev_err(trf->dev, "Can't allocate NFC digital device\n");
ret = -ENOMEM;
- goto err_disable_regulator;
+ goto err_disable_vddio_regulator;
}
nfc_digital_set_parent_dev(trf->ddev, trf->dev);
@@ -2137,8 +2138,10 @@ err_shutdown:
trf7970a_shutdown(trf);
err_free_ddev:
nfc_digital_free_device(trf->ddev);
-err_disable_regulator:
- regulator_disable(trf->regulator);
+err_disable_vddio_regulator:
+ regulator_disable(trf->vddio_regulator);
+err_disable_vin_regulator:
+ regulator_disable(trf->vin_regulator);
err_destroy_lock:
mutex_destroy(&trf->lock);
return ret;
@@ -2157,7 +2160,8 @@ static void trf7970a_remove(struct spi_device *spi)
nfc_digital_unregister_device(trf->ddev);
nfc_digital_free_device(trf->ddev);
- regulator_disable(trf->regulator);
+ regulator_disable(trf->vddio_regulator);
+ regulator_disable(trf->vin_regulator);
mutex_destroy(&trf->lock);
}
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
index 48823b53ede3..48dfb1a69a77 100644
--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -2129,7 +2129,7 @@ static int idt_init_isr(struct idt_ntb_dev *ndev)
int ret;
/* Allocate just one interrupt vector for the ISR */
- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_INTX);
if (ret != 1) {
dev_err(&pdev->dev, "Failed to allocate IRQ vector");
return ret;
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 4d0c527e8576..1e5aedaf8c7b 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -6,6 +6,7 @@
#include <linux/highmem.h>
#include <linux/debugfs.h>
#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <linux/pagemap.h>
#include <linux/module.h>
#include <linux/device.h>
@@ -1499,6 +1500,7 @@ static int btt_blk_init(struct btt *btt)
struct queue_limits lim = {
.logical_block_size = btt->sector_size,
.max_hw_sectors = UINT_MAX,
+ .max_integrity_segments = 1,
};
int rc;
@@ -1514,10 +1516,12 @@ static int btt_blk_init(struct btt *btt)
blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_disk->queue);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, btt->btt_disk->queue);
- if (btt_meta_size(btt)) {
- rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
- if (rc)
- goto out_cleanup_disk;
+ if (btt_meta_size(btt) && IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
+ struct blk_integrity bi = {
+ .tuple_size = btt_meta_size(btt),
+ .tag_size = btt_meta_size(btt),
+ };
+ blk_integrity_register(btt->btt_disk, &bi);
}
set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index d91799b71d23..2023a661bbb0 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -7,7 +7,6 @@
#include <linux/export.h>
#include <linux/module.h>
#include <linux/blkdev.h>
-#include <linux/blk-integrity.h>
#include <linux/device.h>
#include <linux/ctype.h>
#include <linux/ndctl.h>
@@ -508,35 +507,6 @@ int nvdimm_bus_add_badrange(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
}
EXPORT_SYMBOL_GPL(nvdimm_bus_add_badrange);
-#ifdef CONFIG_BLK_DEV_INTEGRITY
-int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
-{
- struct blk_integrity bi;
-
- if (meta_size == 0)
- return 0;
-
- memset(&bi, 0, sizeof(bi));
-
- bi.tuple_size = meta_size;
- bi.tag_size = meta_size;
-
- blk_integrity_register(disk, &bi);
- blk_queue_max_integrity_segments(disk->queue, 1);
-
- return 0;
-}
-EXPORT_SYMBOL(nd_integrity_init);
-
-#else /* CONFIG_BLK_DEV_INTEGRITY */
-int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
-{
- return 0;
-}
-EXPORT_SYMBOL(nd_integrity_init);
-
-#endif
-
static __init int libnvdimm_init(void)
{
int rc;
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index ae2078eb6a62..2dbb1dca17b5 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -489,7 +489,6 @@ enum nd_async_mode {
ND_ASYNC,
};
-int nd_integrity_init(struct gendisk *disk, unsigned long meta_size);
void wait_nvdimm_bus_probe_idle(struct device *dev);
void nd_device_register(struct device *dev);
void nd_device_unregister(struct device *dev, enum nd_async_mode mode);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 943d72bdd794..bf7615cb36ee 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -629,27 +629,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
/*
- * Returns true for sink states that can't ever transition back to live.
- */
-static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
-{
- switch (nvme_ctrl_state(ctrl)) {
- case NVME_CTRL_NEW:
- case NVME_CTRL_LIVE:
- case NVME_CTRL_RESETTING:
- case NVME_CTRL_CONNECTING:
- return false;
- case NVME_CTRL_DELETING:
- case NVME_CTRL_DELETING_NOIO:
- case NVME_CTRL_DEAD:
- return true;
- default:
- WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
- return true;
- }
-}
-
-/*
* Waits for the controller state to be resetting, or returns false if it is
* not possible to ever transition to that state.
*/
@@ -2076,6 +2055,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
bool vwc = ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT;
struct queue_limits lim;
struct nvme_id_ns_nvm *nvm = NULL;
+ struct nvme_zone_info zi = {};
struct nvme_id_ns *id;
sector_t capacity;
unsigned lbaf;
@@ -2088,9 +2068,10 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
if (id->ncap == 0) {
/* namespace not allocated or attached */
info->is_removed = true;
- ret = -ENODEV;
+ ret = -ENXIO;
goto out;
}
+ lbaf = nvme_lbaf_index(id->flbas);
if (ns->ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) {
ret = nvme_identify_ns_nvm(ns->ctrl, info->nsid, &nvm);
@@ -2098,8 +2079,14 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
goto out;
}
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
+ ns->head->ids.csi == NVME_CSI_ZNS) {
+ ret = nvme_query_zone_info(ns, lbaf, &zi);
+ if (ret < 0)
+ goto out;
+ }
+
blk_mq_freeze_queue(ns->disk->queue);
- lbaf = nvme_lbaf_index(id->flbas);
ns->head->lba_shift = id->lbaf[lbaf].ds;
ns->head->nuse = le64_to_cpu(id->nuse);
capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze));
@@ -2112,13 +2099,8 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
capacity = 0;
nvme_config_discard(ns, &lim);
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
- ns->head->ids.csi == NVME_CSI_ZNS) {
- ret = nvme_update_zone_info(ns, lbaf, &lim);
- if (ret) {
- blk_mq_unfreeze_queue(ns->disk->queue);
- goto out;
- }
- }
+ ns->head->ids.csi == NVME_CSI_ZNS)
+ nvme_update_zone_info(ns, &lim, &zi);
ret = queue_limits_commit_update(ns->disk->queue, &lim);
if (ret) {
blk_mq_unfreeze_queue(ns->disk->queue);
@@ -2150,7 +2132,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
blk_mq_unfreeze_queue(ns->disk->queue);
if (blk_queue_is_zoned(ns->queue)) {
- ret = blk_revalidate_disk_zones(ns->disk, NULL);
+ ret = blk_revalidate_disk_zones(ns->disk);
if (ret && !nvme_first_scan(ns->disk))
goto out;
}
@@ -2201,6 +2183,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
}
if (!ret && nvme_ns_head_multipath(ns->head)) {
+ struct queue_limits *ns_lim = &ns->disk->queue->limits;
struct queue_limits lim;
blk_mq_freeze_queue(ns->head->disk->queue);
@@ -2212,7 +2195,26 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
nvme_mpath_revalidate_paths(ns);
+ /*
+ * queue_limits mixes values that are the hardware limitations
+ * for bio splitting with what is the device configuration.
+ *
+ * For NVMe the device configuration can change after e.g. a
+ * Format command, and we really want to pick up the new format
+ * value here. But we must still stack the queue limits to the
+ * least common denominator for multipathing to split the bios
+ * properly.
+ *
+ * To work around this, we explicitly set the device
+ * configuration to those that we just queried, but only stack
+ * the splitting limits in to make sure we still obey possibly
+ * lower limitations of other controllers.
+ */
lim = queue_limits_start_update(ns->head->disk->queue);
+ lim.logical_block_size = ns_lim->logical_block_size;
+ lim.physical_block_size = ns_lim->physical_block_size;
+ lim.io_min = ns_lim->io_min;
+ lim.io_opt = ns_lim->io_opt;
queue_limits_stack_bdev(&lim, ns->disk->part0, 0,
ns->head->disk->disk_name);
ret = queue_limits_commit_update(ns->head->disk->queue, &lim);
@@ -3658,7 +3660,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
"Found shared namespace %d, but multipathing not supported.\n",
info->nsid);
dev_warn_once(ctrl->device,
- "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n.");
+ "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0.\n");
}
}
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 68a5d971657b..a5b29e9ad342 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2428,7 +2428,7 @@ nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
* controller. Called after last nvme_put_ctrl() call
*/
static void
-nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
+nvme_fc_free_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
@@ -3384,7 +3384,7 @@ static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
.reg_read32 = nvmf_reg_read32,
.reg_read64 = nvmf_reg_read64,
.reg_write32 = nvmf_reg_write32,
- .free_ctrl = nvme_fc_nvme_ctrl_freed,
+ .free_ctrl = nvme_fc_free_ctrl,
.submit_async_event = nvme_fc_submit_async_event,
.delete_ctrl = nvme_fc_delete_ctrl,
.get_address = nvmf_get_address,
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 3dfd5ae99ae0..499a8bb7cac7 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -423,13 +423,20 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
pdu->result = le64_to_cpu(nvme_req(req)->result.u64);
/*
- * For iopoll, complete it directly.
+ * For iopoll, complete it directly. Note that using the uring_cmd
+ * helper for this is safe only because we check blk_rq_is_poll().
+ * As that returns false if we're NOT on a polled queue, then it's
+ * safe to use the polled completion helper.
+ *
* Otherwise, move the completion to task work.
*/
- if (blk_rq_is_poll(req))
- nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED);
- else
+ if (blk_rq_is_poll(req)) {
+ if (pdu->bio)
+ blk_rq_unmap_user(pdu->bio);
+ io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status);
+ } else {
io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
+ }
return RQ_END_IO_FREE;
}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 5397fb428b24..d16e976ae1a4 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -247,7 +247,8 @@ static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
if (nvme_path_is_disabled(ns))
continue;
- if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
+ if (ns->ctrl->numa_node != NUMA_NO_NODE &&
+ READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
distance = node_distance(node, ns->ctrl->numa_node);
else
distance = LOCAL_DISTANCE;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 24193fcb8bd5..05532c281177 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -162,6 +162,11 @@ enum nvme_quirks {
* Disables simple suspend/resume path.
*/
NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND = (1 << 20),
+
+ /*
+ * MSI (but not MSI-X) interrupts are broken and never fire.
+ */
+ NVME_QUIRK_BROKEN_MSI = (1 << 21),
};
/*
@@ -741,6 +746,27 @@ static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
}
+/*
+ * Returns true for sink states that can't ever transition back to live.
+ */
+static inline bool nvme_state_terminal(struct nvme_ctrl *ctrl)
+{
+ switch (nvme_ctrl_state(ctrl)) {
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_LIVE:
+ case NVME_CTRL_RESETTING:
+ case NVME_CTRL_CONNECTING:
+ return false;
+ case NVME_CTRL_DELETING:
+ case NVME_CTRL_DELETING_NOIO:
+ case NVME_CTRL_DEAD:
+ return true;
+ default:
+ WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
+ return true;
+ }
+}
+
void nvme_complete_rq(struct request *req);
void nvme_complete_batch_req(struct request *req);
@@ -1036,10 +1062,18 @@ static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
}
#endif /* CONFIG_NVME_MULTIPATH */
+struct nvme_zone_info {
+ u64 zone_size;
+ unsigned int max_open_zones;
+ unsigned int max_active_zones;
+};
+
int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
-int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf,
- struct queue_limits *lim);
+int nvme_query_zone_info(struct nvme_ns *ns, unsigned lbaf,
+ struct nvme_zone_info *zi);
+void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim,
+ struct nvme_zone_info *zi);
#ifdef CONFIG_BLK_DEV_ZONED
blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmnd,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 8e0bb9692685..710043086dff 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1286,6 +1286,9 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
u32 csts = readl(dev->bar + NVME_REG_CSTS);
u8 opcode;
+ if (nvme_state_terminal(&dev->ctrl))
+ goto disable;
+
/* If PCI error recovery process is happening, we cannot reset or
* the recovery mechanism will surely fail.
*/
@@ -1390,8 +1393,11 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
return BLK_EH_RESET_TIMER;
disable:
- if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) {
+ if (nvme_state_terminal(&dev->ctrl))
+ nvme_dev_disable(dev, true);
return BLK_EH_DONE;
+ }
nvme_dev_disable(dev, false);
if (nvme_try_sched_reset(&dev->ctrl))
@@ -2218,6 +2224,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
.priv = dev,
};
unsigned int irq_queues, poll_queues;
+ unsigned int flags = PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY;
/*
* Poll queues don't need interrupts, but we need at least one I/O queue
@@ -2241,8 +2248,10 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
irq_queues = 1;
if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR))
irq_queues += (nr_io_queues - poll_queues);
- return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues,
- PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
+ if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI)
+ flags &= ~PCI_IRQ_MSI;
+ return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, flags,
+ &affd);
}
static unsigned int nvme_max_io_queues(struct nvme_dev *dev)
@@ -2471,6 +2480,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
{
int result = -ENOMEM;
struct pci_dev *pdev = to_pci_dev(dev->dev);
+ unsigned int flags = PCI_IRQ_ALL_TYPES;
if (pci_enable_device_mem(pdev))
return result;
@@ -2487,7 +2497,9 @@ static int nvme_pci_enable(struct nvme_dev *dev)
* interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
* adjust this later.
*/
- result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI)
+ flags &= ~PCI_IRQ_MSI;
+ result = pci_alloc_irq_vectors(pdev, 1, 1, flags);
if (result < 0)
goto disable;
@@ -3384,6 +3396,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
NVME_QUIRK_DISABLE_WRITE_ZEROES|
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x15b7, 0x5008), /* Sandisk SN530 */
+ .driver_data = NVME_QUIRK_BROKEN_MSI },
{ PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index fdbcdcedcee9..28bc2f373cfa 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -360,12 +360,18 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
} while (ret > 0);
}
-static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+static inline bool nvme_tcp_queue_has_pending(struct nvme_tcp_queue *queue)
{
return !list_empty(&queue->send_list) ||
!llist_empty(&queue->req_list);
}
+static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+{
+ return !nvme_tcp_tls(&queue->ctrl->ctrl) &&
+ nvme_tcp_queue_has_pending(queue);
+}
+
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
bool sync, bool last)
{
@@ -386,7 +392,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
mutex_unlock(&queue->send_mutex);
}
- if (last && nvme_tcp_queue_more(queue))
+ if (last && nvme_tcp_queue_has_pending(queue))
queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
}
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index 722384bcc765..77aa0f440a6d 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -35,8 +35,8 @@ static int nvme_set_max_append(struct nvme_ctrl *ctrl)
return 0;
}
-int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf,
- struct queue_limits *lim)
+int nvme_query_zone_info(struct nvme_ns *ns, unsigned lbaf,
+ struct nvme_zone_info *zi)
{
struct nvme_effects_log *log = ns->head->effects;
struct nvme_command c = { };
@@ -89,27 +89,34 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf,
goto free_data;
}
- ns->head->zsze =
- nvme_lba_to_sect(ns->head, le64_to_cpu(id->lbafe[lbaf].zsze));
- if (!is_power_of_2(ns->head->zsze)) {
+ zi->zone_size = le64_to_cpu(id->lbafe[lbaf].zsze);
+ if (!is_power_of_2(zi->zone_size)) {
dev_warn(ns->ctrl->device,
- "invalid zone size:%llu for namespace:%u\n",
- ns->head->zsze, ns->head->ns_id);
+ "invalid zone size: %llu for namespace: %u\n",
+ zi->zone_size, ns->head->ns_id);
status = -ENODEV;
goto free_data;
}
+ zi->max_open_zones = le32_to_cpu(id->mor) + 1;
+ zi->max_active_zones = le32_to_cpu(id->mar) + 1;
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ns->queue);
- lim->zoned = 1;
- lim->max_open_zones = le32_to_cpu(id->mor) + 1;
- lim->max_active_zones = le32_to_cpu(id->mar) + 1;
- lim->chunk_sectors = ns->head->zsze;
- lim->max_zone_append_sectors = ns->ctrl->max_zone_append;
free_data:
kfree(id);
return status;
}
+void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim,
+ struct nvme_zone_info *zi)
+{
+ lim->zoned = 1;
+ lim->max_open_zones = zi->max_open_zones;
+ lim->max_active_zones = zi->max_active_zones;
+ lim->max_zone_append_sectors = ns->ctrl->max_zone_append;
+ lim->chunk_sectors = ns->head->zsze =
+ nvme_lba_to_sect(ns->head, zi->zone_size);
+ blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ns->queue);
+}
+
static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
unsigned int nr_zones, size_t *buflen)
{
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
index 3ddbc3880cac..4f08362aee8b 100644
--- a/drivers/nvme/target/auth.c
+++ b/drivers/nvme/target/auth.c
@@ -285,9 +285,9 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
}
if (shash_len != crypto_shash_digestsize(shash_tfm)) {
- pr_debug("%s: hash len mismatch (len %d digest %d)\n",
- __func__, shash_len,
- crypto_shash_digestsize(shash_tfm));
+ pr_err("%s: hash len mismatch (len %d digest %d)\n",
+ __func__, shash_len,
+ crypto_shash_digestsize(shash_tfm));
ret = -EINVAL;
goto out_free_tfm;
}
@@ -370,7 +370,7 @@ out_free_response:
nvme_auth_free_key(transformed_key);
out_free_tfm:
crypto_free_shash(shash_tfm);
- return 0;
+ return ret;
}
int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
@@ -480,7 +480,7 @@ out_free_response:
nvme_auth_free_key(transformed_key);
out_free_tfm:
crypto_free_shash(shash_tfm);
- return 0;
+ return ret;
}
int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 77a6e817b315..7fda69395c1e 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -754,6 +754,18 @@ static struct configfs_attribute *nvmet_ns_attrs[] = {
NULL,
};
+bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid)
+{
+ struct config_item *ns_item;
+ char name[12];
+
+ snprintf(name, sizeof(name), "%u", nsid);
+ mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex);
+ ns_item = config_group_find_item(&subsys->namespaces_group, name);
+ mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex);
+ return ns_item != NULL;
+}
+
static void nvmet_ns_release(struct config_item *item)
{
struct nvmet_ns *ns = to_nvmet_ns(item);
@@ -1613,6 +1625,11 @@ static struct config_group *nvmet_subsys_make(struct config_group *group,
return ERR_PTR(-EINVAL);
}
+ if (sysfs_streq(name, nvmet_disc_subsys->subsysnqn)) {
+ pr_err("can't create subsystem using unique discovery NQN\n");
+ return ERR_PTR(-EINVAL);
+ }
+
subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
if (IS_ERR(subsys))
return ERR_CAST(subsys);
@@ -2159,7 +2176,49 @@ static const struct config_item_type nvmet_hosts_type = {
static struct config_group nvmet_hosts_group;
+static ssize_t nvmet_root_discovery_nqn_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%s\n", nvmet_disc_subsys->subsysnqn);
+}
+
+static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct list_head *entry;
+ size_t len;
+
+ len = strcspn(page, "\n");
+ if (!len || len > NVMF_NQN_FIELD_LEN - 1)
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ list_for_each(entry, &nvmet_subsystems_group.cg_children) {
+ struct config_item *item =
+ container_of(entry, struct config_item, ci_entry);
+
+ if (!strncmp(config_item_name(item), page, len)) {
+ pr_err("duplicate NQN %s\n", config_item_name(item));
+ up_write(&nvmet_config_sem);
+ return -EINVAL;
+ }
+ }
+ memset(nvmet_disc_subsys->subsysnqn, 0, NVMF_NQN_FIELD_LEN);
+ memcpy(nvmet_disc_subsys->subsysnqn, page, len);
+ up_write(&nvmet_config_sem);
+
+ return len;
+}
+
+CONFIGFS_ATTR(nvmet_root_, discovery_nqn);
+
+static struct configfs_attribute *nvmet_root_attrs[] = {
+ &nvmet_root_attr_discovery_nqn,
+ NULL,
+};
+
static const struct config_item_type nvmet_root_type = {
+ .ct_attrs = nvmet_root_attrs,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 6bbe4df0166c..2fde22323622 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -437,10 +437,13 @@ void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
u16 nvmet_req_find_ns(struct nvmet_req *req)
{
u32 nsid = le32_to_cpu(req->cmd->common.nsid);
+ struct nvmet_subsys *subsys = nvmet_req_subsys(req);
- req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid);
+ req->ns = xa_load(&subsys->namespaces, nsid);
if (unlikely(!req->ns)) {
req->error_loc = offsetof(struct nvme_common_command, nsid);
+ if (nvmet_subsys_nsid_exists(subsys, nsid))
+ return NVME_SC_INTERNAL_PATH_ERROR;
return NVME_SC_INVALID_NS | NVME_SC_DNR;
}
@@ -1541,6 +1544,13 @@ static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
}
down_read(&nvmet_config_sem);
+ if (!strncmp(nvmet_disc_subsys->subsysnqn, subsysnqn,
+ NVMF_NQN_SIZE)) {
+ if (kref_get_unless_zero(&nvmet_disc_subsys->ref)) {
+ up_read(&nvmet_config_sem);
+ return nvmet_disc_subsys;
+ }
+ }
list_for_each_entry(p, &port->subsystems, entry) {
if (!strncmp(p->subsys->subsysnqn, subsysnqn,
NVMF_NQN_SIZE)) {
@@ -1676,7 +1686,8 @@ static int __init nvmet_init(void)
if (!buffered_io_wq)
goto out_free_zbd_work_queue;
- nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
+ nvmet_wq = alloc_workqueue("nvmet-wq",
+ WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
if (!nvmet_wq)
goto out_free_buffered_work_queue;
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index fd229f310c93..337ee1cb09ae 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1115,16 +1115,21 @@ nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
}
static bool
-nvmet_fc_assoc_exits(struct nvmet_fc_tgtport *tgtport, u64 association_id)
+nvmet_fc_assoc_exists(struct nvmet_fc_tgtport *tgtport, u64 association_id)
{
struct nvmet_fc_tgt_assoc *a;
+ bool found = false;
+ rcu_read_lock();
list_for_each_entry_rcu(a, &tgtport->assoc_list, a_list) {
- if (association_id == a->association_id)
- return true;
+ if (association_id == a->association_id) {
+ found = true;
+ break;
+ }
}
+ rcu_read_unlock();
- return false;
+ return found;
}
static struct nvmet_fc_tgt_assoc *
@@ -1164,13 +1169,11 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
ran = ran << BYTES_FOR_QID_SHIFT;
spin_lock_irqsave(&tgtport->lock, flags);
- rcu_read_lock();
- if (!nvmet_fc_assoc_exits(tgtport, ran)) {
+ if (!nvmet_fc_assoc_exists(tgtport, ran)) {
assoc->association_id = ran;
list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
done = true;
}
- rcu_read_unlock();
spin_unlock_irqrestore(&tgtport->lock, flags);
} while (!done);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index f460728e1df1..c1306de1f4dd 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -543,6 +543,7 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
struct nvmet_host *host);
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
u8 event_info, u8 log_page);
+bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid);
#define NVMET_MIN_QUEUE_SIZE 16
#define NVMET_MAX_QUEUE_SIZE 1024
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 5b8c63e74639..6e1b4140cde0 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -474,12 +474,8 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
return 0;
out_free:
- while (--i >= 0) {
- struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
-
- list_del(&rsp->free_list);
- nvmet_rdma_free_rsp(ndev, rsp);
- }
+ while (--i >= 0)
+ nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
kfree(queue->rsps);
out:
return ret;
@@ -490,12 +486,8 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
struct nvmet_rdma_device *ndev = queue->dev;
int i, nr_rsps = queue->recv_queue_size * 2;
- for (i = 0; i < nr_rsps; i++) {
- struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
-
- list_del(&rsp->free_list);
- nvmet_rdma_free_rsp(ndev, rsp);
- }
+ for (i = 0; i < nr_rsps; i++)
+ nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
kfree(queue->rsps);
}
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index a5422e2c979a..380f22ee3ebb 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -348,6 +348,7 @@ static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
return 0;
}
+/* If cmd buffers are NULL, no operation is performed */
static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
{
kfree(cmd->iov);
@@ -1581,13 +1582,9 @@ static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
struct nvmet_tcp_cmd *cmd = queue->cmds;
int i;
- for (i = 0; i < queue->nr_cmds; i++, cmd++) {
- if (nvmet_tcp_need_data_in(cmd))
- nvmet_tcp_free_cmd_buffers(cmd);
- }
-
- if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect))
- nvmet_tcp_free_cmd_buffers(&queue->connect);
+ for (i = 0; i < queue->nr_cmds; i++, cmd++)
+ nvmet_tcp_free_cmd_buffers(cmd);
+ nvmet_tcp_free_cmd_buffers(&queue->connect);
}
static void nvmet_tcp_release_queue_work(struct work_struct *w)
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 3148d9f1bde6..0021d06041c1 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -52,14 +52,10 @@ bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
if (get_capacity(bd_disk) & (bdev_zone_sectors(ns->bdev) - 1))
return false;
/*
- * ZNS does not define a conventional zone type. If the underlying
- * device has a bitmap set indicating the existence of conventional
- * zones, reject the device. Otherwise, use report zones to detect if
- * the device has conventional zones.
+ * ZNS does not define a conventional zone type. Use report zones
+ * to detect if the device has conventional zones and reject it if
+ * it does.
*/
- if (ns->bdev->bd_disk->conv_zones_bitmap)
- return false;
-
ret = blkdev_report_zones(ns->bdev, 0, bdev_nr_zones(ns->bdev),
validate_conv_zones_cb, NULL);
if (ret < 0)
diff --git a/drivers/of/address.c b/drivers/of/address.c
index ae46a3605904..d669ce25b5f9 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -486,34 +486,30 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
* device that had registered logical PIO mapping, and the return code is
* relative to that node.
*/
-static u64 __of_translate_address(struct device_node *dev,
+static u64 __of_translate_address(struct device_node *node,
struct device_node *(*get_parent)(const struct device_node *),
const __be32 *in_addr, const char *rprop,
struct device_node **host)
{
- struct device_node *parent = NULL;
+ struct device_node *dev __free(device_node) = of_node_get(node);
+ struct device_node *parent __free(device_node) = get_parent(dev);
struct of_bus *bus, *pbus;
__be32 addr[OF_MAX_ADDR_CELLS];
int na, ns, pna, pns;
- u64 result = OF_BAD_ADDR;
pr_debug("** translation for device %pOF **\n", dev);
- /* Increase refcount at current level */
- of_node_get(dev);
-
*host = NULL;
- /* Get parent & match bus type */
- parent = get_parent(dev);
+
if (parent == NULL)
- goto bail;
+ return OF_BAD_ADDR;
bus = of_match_bus(parent);
/* Count address cells & copy address locally */
bus->count_cells(dev, &na, &ns);
if (!OF_CHECK_COUNTS(na, ns)) {
pr_debug("Bad cell count for %pOF\n", dev);
- goto bail;
+ return OF_BAD_ADDR;
}
memcpy(addr, in_addr, na * 4);
@@ -533,8 +529,7 @@ static u64 __of_translate_address(struct device_node *dev,
/* If root, we have finished */
if (parent == NULL) {
pr_debug("reached root node\n");
- result = of_read_number(addr, na);
- break;
+ return of_read_number(addr, na);
}
/*
@@ -543,11 +538,11 @@ static u64 __of_translate_address(struct device_node *dev,
*/
iorange = find_io_range_by_fwnode(&dev->fwnode);
if (iorange && (iorange->flags != LOGIC_PIO_CPU_MMIO)) {
- result = of_read_number(addr + 1, na - 1);
+ u64 result = of_read_number(addr + 1, na - 1);
pr_debug("indirectIO matched(%pOF) 0x%llx\n",
dev, result);
- *host = of_node_get(dev);
- break;
+ *host = no_free_ptr(dev);
+ return result;
}
/* Get new parent bus and counts */
@@ -555,7 +550,7 @@ static u64 __of_translate_address(struct device_node *dev,
pbus->count_cells(dev, &pna, &pns);
if (!OF_CHECK_COUNTS(pna, pns)) {
pr_err("Bad cell count for %pOF\n", dev);
- break;
+ return OF_BAD_ADDR;
}
pr_debug("parent bus is %s (na=%d, ns=%d) on %pOF\n",
@@ -563,7 +558,7 @@ static u64 __of_translate_address(struct device_node *dev,
/* Apply bus translation */
if (of_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop))
- break;
+ return OF_BAD_ADDR;
/* Complete the move up one level */
na = pna;
@@ -572,11 +567,8 @@ static u64 __of_translate_address(struct device_node *dev,
of_dump_addr("one level translation:", addr, na);
}
- bail:
- of_node_put(parent);
- of_node_put(dev);
- return result;
+ unreachable();
}
u64 of_translate_address(struct device_node *dev, const __be32 *in_addr)
@@ -654,19 +646,16 @@ EXPORT_SYMBOL(of_translate_dma_address);
const __be32 *of_translate_dma_region(struct device_node *dev, const __be32 *prop,
phys_addr_t *start, size_t *length)
{
- struct device_node *parent;
+ struct device_node *parent __free(device_node) = __of_get_dma_parent(dev);
u64 address, size;
int na, ns;
- parent = __of_get_dma_parent(dev);
if (!parent)
return NULL;
na = of_bus_n_addr_cells(parent);
ns = of_bus_n_size_cells(parent);
- of_node_put(parent);
-
address = of_translate_dma_address(dev, prop);
if (address == OF_BAD_ADDR)
return NULL;
@@ -688,21 +677,19 @@ const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no,
{
const __be32 *prop;
unsigned int psize;
- struct device_node *parent;
+ struct device_node *parent __free(device_node) = of_get_parent(dev);
struct of_bus *bus;
int onesize, i, na, ns;
- /* Get parent & match bus type */
- parent = of_get_parent(dev);
if (parent == NULL)
return NULL;
+
+ /* match the parent's bus type */
bus = of_match_bus(parent);
- if (strcmp(bus->name, "pci") && (bar_no >= 0)) {
- of_node_put(parent);
+ if (strcmp(bus->name, "pci") && (bar_no >= 0))
return NULL;
- }
+
bus->count_cells(dev, &na, &ns);
- of_node_put(parent);
if (!OF_CHECK_ADDR_COUNT(na))
return NULL;
@@ -888,14 +875,13 @@ static u64 of_translate_ioport(struct device_node *dev, const __be32 *in_addr,
*/
int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map)
{
- struct device_node *node = of_node_get(np);
+ struct device_node *node __free(device_node) = of_node_get(np);
const __be32 *ranges = NULL;
bool found_dma_ranges = false;
struct of_range_parser parser;
struct of_range range;
struct bus_dma_region *r;
int len, num_ranges = 0;
- int ret = 0;
while (node) {
ranges = of_get_property(node, "dma-ranges", &len);
@@ -905,10 +891,9 @@ int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map)
break;
/* Once we find 'dma-ranges', then a missing one is an error */
- if (found_dma_ranges && !ranges) {
- ret = -ENODEV;
- goto out;
- }
+ if (found_dma_ranges && !ranges)
+ return -ENODEV;
+
found_dma_ranges = true;
node = of_get_next_dma_parent(node);
@@ -916,10 +901,8 @@ int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map)
if (!node || !ranges) {
pr_debug("no dma-ranges found for node(%pOF)\n", np);
- ret = -ENODEV;
- goto out;
+ return -ENODEV;
}
-
of_dma_range_parser_init(&parser, node);
for_each_of_range(&parser, &range) {
if (range.cpu_addr == OF_BAD_ADDR) {
@@ -930,16 +913,12 @@ int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map)
num_ranges++;
}
- if (!num_ranges) {
- ret = -EINVAL;
- goto out;
- }
+ if (!num_ranges)
+ return -EINVAL;
r = kcalloc(num_ranges + 1, sizeof(*r), GFP_KERNEL);
- if (!r) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!r)
+ return -ENOMEM;
/*
* Record all info in the generic DMA ranges array for struct device,
@@ -957,9 +936,7 @@ int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map)
r->size = range.size;
r++;
}
-out:
- of_node_put(node);
- return ret;
+ return 0;
}
#endif /* CONFIG_HAS_DMA */
@@ -1016,24 +993,18 @@ phys_addr_t __init of_dma_get_max_cpu_address(struct device_node *np)
*/
bool of_dma_is_coherent(struct device_node *np)
{
- struct device_node *node;
- bool is_coherent = dma_default_coherent;
-
- node = of_node_get(np);
+ struct device_node *node __free(device_node) = of_node_get(np);
while (node) {
- if (of_property_read_bool(node, "dma-coherent")) {
- is_coherent = true;
- break;
- }
- if (of_property_read_bool(node, "dma-noncoherent")) {
- is_coherent = false;
- break;
- }
+ if (of_property_read_bool(node, "dma-coherent"))
+ return true;
+
+ if (of_property_read_bool(node, "dma-noncoherent"))
+ return false;
+
node = of_get_next_dma_parent(node);
}
- of_node_put(node);
- return is_coherent;
+ return dma_default_coherent;
}
EXPORT_SYMBOL_GPL(of_dma_is_coherent);
@@ -1049,20 +1020,14 @@ EXPORT_SYMBOL_GPL(of_dma_is_coherent);
*/
static bool of_mmio_is_nonposted(struct device_node *np)
{
- struct device_node *parent;
- bool nonposted;
-
if (!IS_ENABLED(CONFIG_ARCH_APPLE))
return false;
- parent = of_get_parent(np);
+ struct device_node *parent __free(device_node) = of_get_parent(np);
if (!parent)
return false;
- nonposted = of_property_read_bool(parent, "nonposted-mmio");
-
- of_node_put(parent);
- return nonposted;
+ return of_property_read_bool(parent, "nonposted-mmio");
}
static int __of_address_to_resource(struct device_node *dev, int index, int bar_no,
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 8856c67c466a..20603d3c9931 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -16,6 +16,7 @@
#define pr_fmt(fmt) "OF: " fmt
+#include <linux/cleanup.h>
#include <linux/console.h>
#include <linux/ctype.h>
#include <linux/cpu.h>
@@ -1393,8 +1394,10 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
const char *stem_name,
int index, struct of_phandle_args *out_args)
{
- char *cells_name, *map_name = NULL, *mask_name = NULL;
- char *pass_name = NULL;
+ char *cells_name __free(kfree) = kasprintf(GFP_KERNEL, "#%s-cells", stem_name);
+ char *map_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map", stem_name);
+ char *mask_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map-mask", stem_name);
+ char *pass_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name);
struct device_node *cur, *new = NULL;
const __be32 *map, *mask, *pass;
static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) };
@@ -1407,27 +1410,13 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
if (index < 0)
return -EINVAL;
- cells_name = kasprintf(GFP_KERNEL, "#%s-cells", stem_name);
- if (!cells_name)
+ if (!cells_name || !map_name || !mask_name || !pass_name)
return -ENOMEM;
- ret = -ENOMEM;
- map_name = kasprintf(GFP_KERNEL, "%s-map", stem_name);
- if (!map_name)
- goto free;
-
- mask_name = kasprintf(GFP_KERNEL, "%s-map-mask", stem_name);
- if (!mask_name)
- goto free;
-
- pass_name = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name);
- if (!pass_name)
- goto free;
-
ret = __of_parse_phandle_with_args(np, list_name, cells_name, -1, index,
out_args);
if (ret)
- goto free;
+ return ret;
/* Get the #<list>-cells property */
cur = out_args->np;
@@ -1444,8 +1433,7 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
/* Get the <list>-map property */
map = of_get_property(cur, map_name, &map_len);
if (!map) {
- ret = 0;
- goto free;
+ return 0;
}
map_len /= sizeof(u32);
@@ -1521,12 +1509,6 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
put:
of_node_put(cur);
of_node_put(new);
-free:
- kfree(mask_name);
- kfree(map_name);
- kfree(cells_name);
- kfree(pass_name);
-
return ret;
}
EXPORT_SYMBOL(of_parse_phandle_with_args_map);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index de89f9906375..312c63361211 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -95,8 +95,7 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
{
const struct bus_dma_region *map = NULL;
struct device_node *bus_np;
- u64 dma_start = 0;
- u64 mask, end, size = 0;
+ u64 mask, end = 0;
bool coherent;
int iommu_ret;
int ret;
@@ -117,34 +116,8 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
if (!force_dma)
return ret == -ENODEV ? 0 : ret;
} else {
- const struct bus_dma_region *r = map;
- u64 dma_end = 0;
-
/* Determine the overall bounds of all DMA regions */
- for (dma_start = ~0; r->size; r++) {
- /* Take lower and upper limits */
- if (r->dma_start < dma_start)
- dma_start = r->dma_start;
- if (r->dma_start + r->size > dma_end)
- dma_end = r->dma_start + r->size;
- }
- size = dma_end - dma_start;
-
- /*
- * Add a work around to treat the size as mask + 1 in case
- * it is defined in DT as a mask.
- */
- if (size & 1) {
- dev_warn(dev, "Invalid size 0x%llx for dma-range(s)\n",
- size);
- size = size + 1;
- }
-
- if (!size) {
- dev_err(dev, "Adjusted size 0x%llx invalid\n", size);
- kfree(map);
- return -EINVAL;
- }
+ end = dma_range_map_max(map);
}
/*
@@ -158,16 +131,15 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
dev->dma_mask = &dev->coherent_dma_mask;
}
- if (!size && dev->coherent_dma_mask)
- size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
- else if (!size)
- size = 1ULL << 32;
+ if (!end && dev->coherent_dma_mask)
+ end = dev->coherent_dma_mask;
+ else if (!end)
+ end = (1ULL << 32) - 1;
/*
* Limit coherent and dma mask based on size and default mask
* set by the driver.
*/
- end = dma_start + size - 1;
mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->coherent_dma_mask &= mask;
*dev->dma_mask &= mask;
@@ -201,7 +173,7 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
} else
dev_dbg(dev, "device is behind an iommu\n");
- arch_setup_dma_ops(dev, dma_start, size, coherent);
+ arch_setup_dma_ops(dev, coherent);
if (iommu_ret)
of_dma_set_restricted_buffer(dev, np);
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 3bf27052832f..dda6092e6d3a 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -9,6 +9,8 @@
#define pr_fmt(fmt) "OF: " fmt
+#include <linux/cleanup.h>
+#include <linux/device.h>
#include <linux/of.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -305,15 +307,20 @@ int of_detach_node(struct device_node *np)
}
EXPORT_SYMBOL_GPL(of_detach_node);
+void __of_prop_free(struct property *prop)
+{
+ kfree(prop->name);
+ kfree(prop->value);
+ kfree(prop);
+}
+
static void property_list_free(struct property *prop_list)
{
struct property *prop, *next;
for (prop = prop_list; prop != NULL; prop = next) {
next = prop->next;
- kfree(prop->name);
- kfree(prop->value);
- kfree(prop);
+ __of_prop_free(prop);
}
}
@@ -426,9 +433,7 @@ struct property *__of_prop_dup(const struct property *prop, gfp_t allocflags)
return new;
err_free:
- kfree(new->name);
- kfree(new->value);
- kfree(new);
+ __of_prop_free(new);
return NULL;
}
@@ -470,9 +475,7 @@ struct device_node *__of_node_dup(const struct device_node *np,
if (!new_pp)
goto err_prop;
if (__of_add_property(node, new_pp)) {
- kfree(new_pp->name);
- kfree(new_pp->value);
- kfree(new_pp);
+ __of_prop_free(new_pp);
goto err_prop;
}
}
@@ -667,6 +670,17 @@ void of_changeset_destroy(struct of_changeset *ocs)
{
struct of_changeset_entry *ce, *cen;
+ /*
+ * When a device is deleted, the device links to/from it are also queued
+ * for deletion. Until these device links are freed, the devices
+ * themselves aren't freed. If the device being deleted is due to an
+ * overlay change, this device might be holding a reference to a device
+ * node that will be freed. So, wait until all already pending device
+ * links are deleted before freeing a device node. This ensures we don't
+ * free any device node that has a non-zero reference count.
+ */
+ device_link_wait_removal();
+
list_for_each_entry_safe_reverse(ce, cen, &ocs->entries, node)
__of_changeset_entry_destroy(ce);
}
@@ -921,11 +935,8 @@ static int of_changeset_add_prop_helper(struct of_changeset *ocs,
return -ENOMEM;
ret = of_changeset_add_property(ocs, np, new_pp);
- if (ret) {
- kfree(new_pp->name);
- kfree(new_pp->value);
- kfree(new_pp);
- }
+ if (ret)
+ __of_prop_free(new_pp);
return ret;
}
@@ -1021,10 +1032,9 @@ int of_changeset_add_prop_u32_array(struct of_changeset *ocs,
const u32 *array, size_t sz)
{
struct property prop;
- __be32 *val;
- int i, ret;
+ __be32 *val __free(kfree) = kcalloc(sz, sizeof(__be32), GFP_KERNEL);
+ int i;
- val = kcalloc(sz, sizeof(__be32), GFP_KERNEL);
if (!val)
return -ENOMEM;
@@ -1034,9 +1044,6 @@ int of_changeset_add_prop_u32_array(struct of_changeset *ocs,
prop.length = sizeof(u32) * sz;
prop.value = (void *)val;
- ret = of_changeset_add_prop_helper(ocs, np, &prop);
- kfree(val);
-
- return ret;
+ return of_changeset_add_prop_helper(ocs, np, &prop);
}
EXPORT_SYMBOL_GPL(of_changeset_add_prop_u32_array);
diff --git a/drivers/of/module.c b/drivers/of/module.c
index 0e8aa974f0f2..780fd82a7ecc 100644
--- a/drivers/of/module.c
+++ b/drivers/of/module.c
@@ -16,19 +16,28 @@ ssize_t of_modalias(const struct device_node *np, char *str, ssize_t len)
ssize_t csize;
ssize_t tsize;
+ /*
+ * Prevent a kernel oops in vsnprintf() -- it only allows passing a
+ * NULL ptr when the length is also 0. Also filter out the negative
+ * lengths...
+ */
+ if ((len > 0 && !str) || len < 0)
+ return -EINVAL;
+
/* Name & Type */
/* %p eats all alphanum characters, so %c must be used here */
csize = snprintf(str, len, "of:N%pOFn%c%s", np, 'T',
of_node_get_device_type(np));
tsize = csize;
+ if (csize >= len)
+ csize = len > 0 ? len - 1 : 0;
len -= csize;
- if (str)
- str += csize;
+ str += csize;
of_property_for_each_string(np, "compatible", p, compat) {
csize = strlen(compat) + 1;
tsize += csize;
- if (csize > len)
+ if (csize >= len)
continue;
csize = snprintf(str, len, "C%s", compat);
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 485483524b7f..94fc0aa07af9 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -123,6 +123,7 @@ extern void *__unflatten_device_tree(const void *blob,
* own the devtree lock or work on detached trees only.
*/
struct property *__of_prop_dup(const struct property *prop, gfp_t allocflags);
+void __of_prop_free(struct property *prop);
struct device_node *__of_node_dup(const struct device_node *np,
const char *full_name);
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 8236ecae2953..46e1c3fbc769 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -437,17 +437,10 @@ void __init fdt_init_reserved_mem(void)
for (i = 0; i < reserved_mem_count; i++) {
struct reserved_mem *rmem = &reserved_mem[i];
unsigned long node = rmem->fdt_node;
- int len;
- const __be32 *prop;
int err = 0;
bool nomap;
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
- prop = of_get_flat_dt_prop(node, "phandle", &len);
- if (!prop)
- prop = of_get_flat_dt_prop(node, "linux,phandle", &len);
- if (prop)
- rmem->phandle = of_read_number(prop, len/4);
if (rmem->size == 0)
err = __reserved_mem_alloc_size(node, rmem->name,
@@ -477,19 +470,6 @@ void __init fdt_init_reserved_mem(void)
}
}
-static inline struct reserved_mem *__find_rmem(struct device_node *node)
-{
- unsigned int i;
-
- if (!node->phandle)
- return NULL;
-
- for (i = 0; i < reserved_mem_count; i++)
- if (reserved_mem[i].phandle == node->phandle)
- return &reserved_mem[i];
- return NULL;
-}
-
struct rmem_assigned_device {
struct device *dev;
struct reserved_mem *rmem;
@@ -534,7 +514,7 @@ int of_reserved_mem_device_init_by_idx(struct device *dev,
return 0;
}
- rmem = __find_rmem(target);
+ rmem = of_reserved_mem_lookup(target);
of_node_put(target);
if (!rmem || !rmem->ops || !rmem->ops->device_init)
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 2ae7e9d24a64..4d861a75d694 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -262,9 +262,7 @@ static struct property *dup_and_fixup_symbol_prop(
return new_prop;
err_free_new_prop:
- kfree(new_prop->name);
- kfree(new_prop->value);
- kfree(new_prop);
+ __of_prop_free(new_prop);
err_free_target_path:
kfree(target_path);
@@ -361,11 +359,8 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
pr_err("WARNING: memory leak will occur if overlay removed, property: %pOF/%s\n",
target->np, new_prop->name);
- if (ret) {
- kfree(new_prop->name);
- kfree(new_prop->value);
- kfree(new_prop);
- }
+ if (ret)
+ __of_prop_free(new_prop);
return ret;
}
diff --git a/drivers/of/property.c b/drivers/of/property.c
index a6358ee99b74..1c83e68f805b 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -40,15 +40,12 @@
*/
bool of_graph_is_present(const struct device_node *node)
{
- struct device_node *ports, *port;
+ struct device_node *ports __free(device_node) = of_get_child_by_name(node, "ports");
- ports = of_get_child_by_name(node, "ports");
if (ports)
node = ports;
- port = of_get_child_by_name(node, "port");
- of_node_put(ports);
- of_node_put(port);
+ struct device_node *port __free(device_node) = of_get_child_by_name(node, "port");
return !!port;
}
@@ -579,7 +576,8 @@ EXPORT_SYMBOL_GPL(of_prop_next_string);
int of_graph_parse_endpoint(const struct device_node *node,
struct of_endpoint *endpoint)
{
- struct device_node *port_node = of_get_parent(node);
+ struct device_node *port_node __free(device_node) =
+ of_get_parent(node);
WARN_ONCE(!port_node, "%s(): endpoint %pOF has no parent node\n",
__func__, node);
@@ -594,8 +592,6 @@ int of_graph_parse_endpoint(const struct device_node *node,
of_property_read_u32(port_node, "reg", &endpoint->port);
of_property_read_u32(node, "reg", &endpoint->id);
- of_node_put(port_node);
-
return 0;
}
EXPORT_SYMBOL(of_graph_parse_endpoint);
@@ -610,25 +606,22 @@ EXPORT_SYMBOL(of_graph_parse_endpoint);
*/
struct device_node *of_graph_get_port_by_id(struct device_node *parent, u32 id)
{
- struct device_node *node, *port;
+ struct device_node *node __free(device_node) = of_get_child_by_name(parent, "ports");
- node = of_get_child_by_name(parent, "ports");
if (node)
parent = node;
- for_each_child_of_node(parent, port) {
+ for_each_child_of_node_scoped(parent, port) {
u32 port_id = 0;
if (!of_node_name_eq(port, "port"))
continue;
of_property_read_u32(port, "reg", &port_id);
if (id == port_id)
- break;
+ return_ptr(port);
}
- of_node_put(node);
-
- return port;
+ return NULL;
}
EXPORT_SYMBOL(of_graph_get_port_by_id);
@@ -655,15 +648,13 @@ struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
* parent port node.
*/
if (!prev) {
- struct device_node *node;
+ struct device_node *node __free(device_node) =
+ of_get_child_by_name(parent, "ports");
- node = of_get_child_by_name(parent, "ports");
if (node)
parent = node;
port = of_get_child_by_name(parent, "port");
- of_node_put(node);
-
if (!port) {
pr_debug("graph: no port node found in %pOF\n", parent);
return NULL;
@@ -1052,15 +1043,13 @@ static int of_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint)
{
const struct device_node *node = to_of_node(fwnode);
- struct device_node *port_node = of_get_parent(node);
+ struct device_node *port_node __free(device_node) = of_get_parent(node);
endpoint->local_fwnode = fwnode;
of_property_read_u32(port_node, "reg", &endpoint->port);
of_property_read_u32(node, "reg", &endpoint->id);
- of_node_put(port_node);
-
return 0;
}
@@ -1252,6 +1241,9 @@ DEFINE_SIMPLE_PROP(backlight, "backlight", NULL)
DEFINE_SIMPLE_PROP(panel, "panel", NULL)
DEFINE_SIMPLE_PROP(msi_parent, "msi-parent", "#msi-cells")
DEFINE_SIMPLE_PROP(post_init_providers, "post-init-providers", NULL)
+DEFINE_SIMPLE_PROP(access_controllers, "access-controllers", "#access-controller-cells")
+DEFINE_SIMPLE_PROP(pses, "pses", "#pse-cells")
+DEFINE_SIMPLE_PROP(power_supplies, "power-supplies", NULL)
DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
@@ -1311,6 +1303,57 @@ static struct device_node *parse_interrupts(struct device_node *np,
return of_irq_parse_one(np, index, &sup_args) ? NULL : sup_args.np;
}
+static struct device_node *parse_interrupt_map(struct device_node *np,
+ const char *prop_name, int index)
+{
+ const __be32 *imap, *imap_end, *addr;
+ struct of_phandle_args sup_args;
+ u32 addrcells, intcells;
+ int i, imaplen;
+
+ if (!IS_ENABLED(CONFIG_OF_IRQ))
+ return NULL;
+
+ if (strcmp(prop_name, "interrupt-map"))
+ return NULL;
+
+ if (of_property_read_u32(np, "#interrupt-cells", &intcells))
+ return NULL;
+ addrcells = of_bus_n_addr_cells(np);
+
+ imap = of_get_property(np, "interrupt-map", &imaplen);
+ if (!imap || imaplen <= (addrcells + intcells))
+ return NULL;
+ imap_end = imap + imaplen;
+
+ while (imap < imap_end) {
+ addr = imap;
+ imap += addrcells;
+
+ sup_args.np = np;
+ sup_args.args_count = intcells;
+ for (i = 0; i < intcells; i++)
+ sup_args.args[i] = be32_to_cpu(imap[i]);
+ imap += intcells;
+
+ /*
+ * Upon success, the function of_irq_parse_raw() returns
+ * interrupt controller DT node pointer in sup_args.np.
+ */
+ if (of_irq_parse_raw(addr, &sup_args))
+ return NULL;
+
+ if (!index)
+ return sup_args.np;
+
+ of_node_put(sup_args.np);
+ imap += sup_args.args_count + 1;
+ index--;
+ }
+
+ return NULL;
+}
+
static struct device_node *parse_remote_endpoint(struct device_node *np,
const char *prop_name,
int index)
@@ -1357,8 +1400,12 @@ static const struct supplier_bindings of_supplier_bindings[] = {
{ .parse_prop = parse_backlight, },
{ .parse_prop = parse_panel, },
{ .parse_prop = parse_msi_parent, },
+ { .parse_prop = parse_pses, },
+ { .parse_prop = parse_power_supplies, },
{ .parse_prop = parse_gpio_compat, },
{ .parse_prop = parse_interrupts, },
+ { .parse_prop = parse_interrupt_map, },
+ { .parse_prop = parse_access_controllers, },
{ .parse_prop = parse_regulators, },
{ .parse_prop = parse_gpio, },
{ .parse_prop = parse_gpios, },
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
index b278ab4338ce..2780928764a4 100644
--- a/drivers/of/resolver.c
+++ b/drivers/of/resolver.c
@@ -8,6 +8,7 @@
#define pr_fmt(fmt) "OF: resolver: " fmt
+#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -74,11 +75,11 @@ static int update_usages_of_a_phandle_reference(struct device_node *overlay,
{
struct device_node *refnode;
struct property *prop;
- char *value, *cur, *end, *node_path, *prop_name, *s;
+ char *value __free(kfree) = kmemdup(prop_fixup->value, prop_fixup->length, GFP_KERNEL);
+ char *cur, *end, *node_path, *prop_name, *s;
int offset, len;
int err = 0;
- value = kmemdup(prop_fixup->value, prop_fixup->length, GFP_KERNEL);
if (!value)
return -ENOMEM;
@@ -89,23 +90,19 @@ static int update_usages_of_a_phandle_reference(struct device_node *overlay,
node_path = cur;
s = strchr(cur, ':');
- if (!s) {
- err = -EINVAL;
- goto err_fail;
- }
+ if (!s)
+ return -EINVAL;
*s++ = '\0';
prop_name = s;
s = strchr(s, ':');
- if (!s) {
- err = -EINVAL;
- goto err_fail;
- }
+ if (!s)
+ return -EINVAL;
*s++ = '\0';
err = kstrtoint(s, 10, &offset);
if (err)
- goto err_fail;
+ return err;
refnode = __of_find_node_by_full_path(of_node_get(overlay), node_path);
if (!refnode)
@@ -117,22 +114,16 @@ static int update_usages_of_a_phandle_reference(struct device_node *overlay,
}
of_node_put(refnode);
- if (!prop) {
- err = -ENOENT;
- goto err_fail;
- }
+ if (!prop)
+ return -ENOENT;
- if (offset < 0 || offset + sizeof(__be32) > prop->length) {
- err = -EINVAL;
- goto err_fail;
- }
+ if (offset < 0 || offset + sizeof(__be32) > prop->length)
+ return -EINVAL;
*(__be32 *)(prop->value + offset) = cpu_to_be32(phandle);
}
-err_fail:
- kfree(value);
- return err;
+ return 0;
}
/* compare nodes taking into account that 'name' strips out the @ part */
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 6b5c36b6a758..445ad13dab98 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -795,15 +795,11 @@ static void __init of_unittest_property_copy(void)
new = __of_prop_dup(&p1, GFP_KERNEL);
unittest(new && propcmp(&p1, new), "empty property didn't copy correctly\n");
- kfree(new->value);
- kfree(new->name);
- kfree(new);
+ __of_prop_free(new);
new = __of_prop_dup(&p2, GFP_KERNEL);
unittest(new && propcmp(&p2, new), "non-empty property didn't copy correctly\n");
- kfree(new->value);
- kfree(new->name);
- kfree(new);
+ __of_prop_free(new);
#endif
}
@@ -2815,7 +2811,7 @@ static int unittest_i2c_mux_probe(struct i2c_client *client)
if (!muxc)
return -ENOMEM;
for (i = 0; i < nchans; i++) {
- if (i2c_mux_add_adapter(muxc, 0, i, 0)) {
+ if (i2c_mux_add_adapter(muxc, 0, i)) {
dev_err(dev, "Failed to register mux #%d\n", i);
i2c_mux_del_adapters(muxc);
return -ENODEV;
@@ -3718,9 +3714,7 @@ static __init void of_unittest_overlay_high_level(void)
goto err_unlock;
}
if (__of_add_property(of_symbols, new_prop)) {
- kfree(new_prop->name);
- kfree(new_prop->value);
- kfree(new_prop);
+ __of_prop_free(new_prop);
/* "name" auto-generated by unflatten */
if (!strcmp(prop->name, "name"))
continue;
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index e233734b7220..cb4611fe1b5b 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -2394,7 +2394,8 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev,
const char * const *names, struct device ***virt_devs)
{
- struct device *virt_dev;
+ struct device *virt_dev, *gdev;
+ struct opp_table *genpd_table;
int index = 0, ret = -EINVAL;
const char * const *name = names;
@@ -2428,6 +2429,34 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev,
}
/*
+ * The required_opp_tables parsing is not perfect, as the OPP
+ * core does the parsing solely based on the DT node pointers.
+ * The core sets the required_opp_tables entry to the first OPP
+ * table in the "opp_tables" list, that matches with the node
+ * pointer.
+ *
+ * If the target DT OPP table is used by multiple devices and
+ * they all create separate instances of 'struct opp_table' from
+ * it, then it is possible that the required_opp_tables entry
+ * may be set to the incorrect sibling device.
+ *
+ * Cross check it again and fix if required.
+ */
+ gdev = dev_to_genpd_dev(virt_dev);
+ if (IS_ERR(gdev))
+ return PTR_ERR(gdev);
+
+ genpd_table = _find_opp_table(gdev);
+ if (!IS_ERR(genpd_table)) {
+ if (genpd_table != opp_table->required_opp_tables[index]) {
+ dev_pm_opp_put_opp_table(opp_table->required_opp_tables[index]);
+ opp_table->required_opp_tables[index] = genpd_table;
+ } else {
+ dev_pm_opp_put_opp_table(genpd_table);
+ }
+ }
+
+ /*
* Add the virtual genpd device as a user of the OPP table, so
* we can call dev_pm_opp_set_opp() on it directly.
*
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index f9f0b22bccbb..282eb5966fd0 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -1494,20 +1494,26 @@ _get_dt_power(struct device *dev, unsigned long *uW, unsigned long *kHz)
return 0;
}
-/*
- * Callback function provided to the Energy Model framework upon registration.
+/**
+ * dev_pm_opp_calc_power() - Calculate power value for device with EM
+ * @dev : Device for which an Energy Model has to be registered
+ * @uW : New power value that is calculated
+ * @kHz : Frequency for which the new power is calculated
+ *
* This computes the power estimated by @dev at @kHz if it is the frequency
* of an existing OPP, or at the frequency of the first OPP above @kHz otherwise
* (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled
* frequency and @uW to the associated power. The power is estimated as
* P = C * V^2 * f with C being the device's capacitance and V and f
* respectively the voltage and frequency of the OPP.
+ * It is also used as a callback function provided to the Energy Model
+ * framework upon registration.
*
* Returns -EINVAL if the power calculation failed because of missing
* parameters, 0 otherwise.
*/
-static int __maybe_unused _get_power(struct device *dev, unsigned long *uW,
- unsigned long *kHz)
+int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW,
+ unsigned long *kHz)
{
struct dev_pm_opp *opp;
struct device_node *np;
@@ -1544,6 +1550,7 @@ static int __maybe_unused _get_power(struct device *dev, unsigned long *uW,
return 0;
}
+EXPORT_SYMBOL_GPL(dev_pm_opp_calc_power);
static bool _of_has_opp_microwatt_property(struct device *dev)
{
@@ -1619,7 +1626,7 @@ int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus)
goto failed;
}
- EM_SET_ACTIVE_POWER_CB(em_cb, _get_power);
+ EM_SET_ACTIVE_POWER_CB(em_cb, dev_pm_opp_calc_power);
register_em:
ret = em_dev_register_perf_domain(dev, nr_opp, &em_cb, cpus, true);
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 9ce0d20a6c58..feef537257d0 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -1022,7 +1022,7 @@ static const struct dma_map_ops ccio_ops = {
.map_sg = ccio_map_sg,
.unmap_sg = ccio_unmap_sg,
.get_sgtable = dma_common_get_sgtable,
- .alloc_pages = dma_common_alloc_pages,
+ .alloc_pages_op = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
};
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 784037837f65..fc3863c09f83 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -1090,7 +1090,7 @@ static const struct dma_map_ops sba_ops = {
.map_sg = sba_map_sg,
.unmap_sg = sba_unmap_sg,
.get_sgtable = dma_common_get_sgtable,
- .alloc_pages = dma_common_alloc_pages,
+ .alloc_pages_op = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
};
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 6449056b57dd..30f031de9cfe 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -36,10 +36,13 @@ DEFINE_RAW_SPINLOCK(pci_lock);
int noinline pci_bus_read_config_##size \
(struct pci_bus *bus, unsigned int devfn, int pos, type *value) \
{ \
- int res; \
unsigned long flags; \
u32 data = 0; \
- if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
+ int res; \
+ \
+ if (PCI_##size##_BAD) \
+ return PCIBIOS_BAD_REGISTER_NUMBER; \
+ \
pci_lock_config(flags); \
res = bus->ops->read(bus, devfn, pos, len, &data); \
if (res) \
@@ -47,6 +50,7 @@ int noinline pci_bus_read_config_##size \
else \
*value = (type)data; \
pci_unlock_config(flags); \
+ \
return res; \
}
@@ -54,12 +58,16 @@ int noinline pci_bus_read_config_##size \
int noinline pci_bus_write_config_##size \
(struct pci_bus *bus, unsigned int devfn, int pos, type value) \
{ \
- int res; \
unsigned long flags; \
- if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
+ int res; \
+ \
+ if (PCI_##size##_BAD) \
+ return PCIBIOS_BAD_REGISTER_NUMBER; \
+ \
pci_lock_config(flags); \
res = bus->ops->write(bus, devfn, pos, len, value); \
pci_unlock_config(flags); \
+ \
return res; \
}
@@ -216,24 +224,27 @@ static noinline void pci_wait_cfg(struct pci_dev *dev)
}
/* Returns 0 on success, negative values indicate error. */
-#define PCI_USER_READ_CONFIG(size, type) \
+#define PCI_USER_READ_CONFIG(size, type) \
int pci_user_read_config_##size \
(struct pci_dev *dev, int pos, type *val) \
{ \
- int ret = PCIBIOS_SUCCESSFUL; \
u32 data = -1; \
+ int ret; \
+ \
if (PCI_##size##_BAD) \
return -EINVAL; \
- raw_spin_lock_irq(&pci_lock); \
+ \
+ raw_spin_lock_irq(&pci_lock); \
if (unlikely(dev->block_cfg_access)) \
pci_wait_cfg(dev); \
ret = dev->bus->ops->read(dev->bus, dev->devfn, \
- pos, sizeof(type), &data); \
- raw_spin_unlock_irq(&pci_lock); \
+ pos, sizeof(type), &data); \
+ raw_spin_unlock_irq(&pci_lock); \
if (ret) \
PCI_SET_ERROR_RESPONSE(val); \
else \
*val = (type)data; \
+ \
return pcibios_err_to_errno(ret); \
} \
EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
@@ -243,15 +254,18 @@ EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
int pci_user_write_config_##size \
(struct pci_dev *dev, int pos, type val) \
{ \
- int ret = PCIBIOS_SUCCESSFUL; \
+ int ret; \
+ \
if (PCI_##size##_BAD) \
return -EINVAL; \
- raw_spin_lock_irq(&pci_lock); \
+ \
+ raw_spin_lock_irq(&pci_lock); \
if (unlikely(dev->block_cfg_access)) \
pci_wait_cfg(dev); \
ret = dev->bus->ops->write(dev->bus, dev->devfn, \
- pos, sizeof(type), val); \
- raw_spin_unlock_irq(&pci_lock); \
+ pos, sizeof(type), val); \
+ raw_spin_unlock_irq(&pci_lock); \
+ \
return pcibios_err_to_errno(ret); \
} \
EXPORT_SYMBOL_GPL(pci_user_write_config_##size);
@@ -275,6 +289,8 @@ void pci_cfg_access_lock(struct pci_dev *dev)
{
might_sleep();
+ lock_map_acquire(&dev->cfg_access_lock);
+
raw_spin_lock_irq(&pci_lock);
if (dev->block_cfg_access)
pci_wait_cfg(dev);
@@ -329,6 +345,8 @@ void pci_cfg_access_unlock(struct pci_dev *dev)
raw_spin_unlock_irqrestore(&pci_lock, flags);
wake_up_all(&pci_cfg_wait);
+
+ lock_map_release(&dev->cfg_access_lock);
}
EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 81c50dc64da9..e0cc4560dfde 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -99,14 +99,11 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS;
} else {
bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
- bool is_64bits = sz > SZ_2G;
+ bool is_64bits = !!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64);
if (is_64bits && (bar & 1))
return -EINVAL;
- if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
- epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
-
if (is_64bits && is_prefetch)
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
else if (is_prefetch)
@@ -746,6 +743,8 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
spin_lock_init(&ep->lock);
+ pci_epc_init_notify(epc);
+
return 0;
free_epc_mem:
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 0e406677060d..d2d17d37d3e0 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -467,6 +467,15 @@ static int dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
return ret;
}
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(ep);
+ return ret;
+ }
+
+ dw_pcie_ep_init_notify(ep);
+
return 0;
}
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 99a60270b26c..917c69edee1d 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -1123,6 +1123,16 @@ static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
dev_err(dev, "failed to initialize endpoint\n");
return ret;
}
+
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(ep);
+ return ret;
+ }
+
+ dw_pcie_ep_init_notify(ep);
+
/* Start LTSSM. */
imx6_pcie_ltssm_enable(dev);
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 844de4418724..d3a7d14ee685 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -1286,6 +1286,15 @@ static int ks_pcie_probe(struct platform_device *pdev)
ret = dw_pcie_ep_init(&pci->ep);
if (ret < 0)
goto err_get_sync;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ goto err_ep_init;
+ }
+
+ dw_pcie_ep_init_notify(&pci->ep);
+
break;
default:
dev_err(dev, "INVALID device type %d\n", mode);
@@ -1295,6 +1304,8 @@ static int ks_pcie_probe(struct platform_device *pdev)
return 0;
+err_ep_init:
+ dw_pcie_ep_deinit(&pci->ep);
err_get_sync:
pm_runtime_put(dev);
pm_runtime_disable(dev);
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index 1f6ee1460ec2..7dde6d5fa4d8 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -279,6 +279,15 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ return ret;
+ }
+
+ dw_pcie_ep_init_notify(&pci->ep);
+
return ls_pcie_ep_interrupt_init(pcie, pdev);
}
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index 9ed0a9ba7619..a4630b92489b 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -441,7 +441,20 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
pci->ep.ops = &pcie_ep_ops;
- return dw_pcie_ep_init(&pci->ep);
+ ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ return ret;
+ }
+
+ dw_pcie_ep_init_notify(&pci->ep);
+
+ break;
default:
dev_err(dev, "INVALID device type %d\n", artpec6_pcie->mode);
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 746a11dcb67f..47391d7d3a73 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -15,6 +15,10 @@
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
+/**
+ * dw_pcie_ep_linkup - Notify EPF drivers about Link Up event
+ * @ep: DWC EP device
+ */
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
{
struct pci_epc *epc = ep->epc;
@@ -23,6 +27,10 @@ void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
+/**
+ * dw_pcie_ep_init_notify - Notify EPF drivers about EPC initialization complete
+ * @ep: DWC EP device
+ */
void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
{
struct pci_epc *epc = ep->epc;
@@ -31,6 +39,14 @@ void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_init_notify);
+/**
+ * dw_pcie_ep_get_func_from_ep - Get the struct dw_pcie_ep_func corresponding to
+ * the endpoint function
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint device
+ *
+ * Return: struct dw_pcie_ep_func if success, NULL otherwise.
+ */
struct dw_pcie_ep_func *
dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
{
@@ -61,6 +77,11 @@ static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no,
dw_pcie_dbi_ro_wr_dis(pci);
}
+/**
+ * dw_pcie_ep_reset_bar - Reset endpoint BAR
+ * @pci: DWC PCI device
+ * @bar: BAR number of the endpoint
+ */
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
u8 func_no, funcs;
@@ -440,6 +461,13 @@ static const struct pci_epc_ops epc_ops = {
.get_features = dw_pcie_ep_get_features,
};
+/**
+ * dw_pcie_ep_raise_intx_irq - Raise INTx IRQ to the host
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint
+ *
+ * Return: 0 if success, errono otherwise.
+ */
int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -451,6 +479,14 @@ int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_intx_irq);
+/**
+ * dw_pcie_ep_raise_msi_irq - Raise MSI IRQ to the host
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint
+ * @interrupt_num: Interrupt number to be raised
+ *
+ * Return: 0 if success, errono otherwise.
+ */
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num)
{
@@ -500,6 +536,15 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msi_irq);
+/**
+ * dw_pcie_ep_raise_msix_irq_doorbell - Raise MSI-X to the host using Doorbell
+ * method
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint device
+ * @interrupt_num: Interrupt number to be raised
+ *
+ * Return: 0 if success, errno otherwise.
+ */
int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num)
{
@@ -519,6 +564,14 @@ int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
+/**
+ * dw_pcie_ep_raise_msix_irq - Raise MSI-X to the host
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint device
+ * @interrupt_num: Interrupt number to be raised
+ *
+ * Return: 0 if success, errno otherwise.
+ */
int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num)
{
@@ -566,22 +619,42 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
-void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+/**
+ * dw_pcie_ep_cleanup - Cleanup DWC EP resources after fundamental reset
+ * @ep: DWC EP device
+ *
+ * Cleans up the DWC EP specific resources like eDMA etc... after fundamental
+ * reset like PERST#. Note that this API is only applicable for drivers
+ * supporting PERST# or any other methods of fundamental reset.
+ */
+void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- struct pci_epc *epc = ep->epc;
dw_pcie_edma_remove(pci);
+ ep->epc->init_complete = false;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_cleanup);
+
+/**
+ * dw_pcie_ep_deinit - Deinitialize the endpoint device
+ * @ep: DWC EP device
+ *
+ * Deinitialize the endpoint device. EPC device is not destroyed since that will
+ * be taken care by Devres.
+ */
+void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
+{
+ struct pci_epc *epc = ep->epc;
+
+ dw_pcie_ep_cleanup(ep);
pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
epc->mem->window.page_size);
pci_epc_mem_exit(epc);
-
- if (ep->ops->deinit)
- ep->ops->deinit(ep);
}
-EXPORT_SYMBOL_GPL(dw_pcie_ep_exit);
+EXPORT_SYMBOL_GPL(dw_pcie_ep_deinit);
static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
{
@@ -601,14 +674,27 @@ static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
return 0;
}
-int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+/**
+ * dw_pcie_ep_init_registers - Initialize DWC EP specific registers
+ * @ep: DWC EP device
+ *
+ * Initialize the registers (CSRs) specific to DWC EP. This API should be called
+ * only when the endpoint receives an active refclk (either from host or
+ * generated locally).
+ */
+int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
+ struct device *dev = pci->dev;
+ struct pci_epc *epc = ep->epc;
unsigned int offset, ptm_cap_base;
unsigned int nbars;
u8 hdr_type;
+ u8 func_no;
+ int i, ret;
+ void *addr;
u32 reg;
- int i;
hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
PCI_HEADER_TYPE_MASK;
@@ -619,6 +705,58 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
return -EIO;
}
+ dw_pcie_version_detect(pci);
+
+ dw_pcie_iatu_detect(pci);
+
+ ret = dw_pcie_edma_detect(pci);
+ if (ret)
+ return ret;
+
+ if (!ep->ib_window_map) {
+ ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
+ GFP_KERNEL);
+ if (!ep->ib_window_map)
+ goto err_remove_edma;
+ }
+
+ if (!ep->ob_window_map) {
+ ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
+ GFP_KERNEL);
+ if (!ep->ob_window_map)
+ goto err_remove_edma;
+ }
+
+ if (!ep->outbound_addr) {
+ addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
+ GFP_KERNEL);
+ if (!addr)
+ goto err_remove_edma;
+ ep->outbound_addr = addr;
+ }
+
+ for (func_no = 0; func_no < epc->max_functions; func_no++) {
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (ep_func)
+ continue;
+
+ ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
+ if (!ep_func)
+ goto err_remove_edma;
+
+ ep_func->func_no = func_no;
+ ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSI);
+ ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSIX);
+
+ list_add_tail(&ep_func->list, &ep->func_list);
+ }
+
+ if (ep->ops->init)
+ ep->ops->init(ep);
+
offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
@@ -658,22 +796,32 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
+
+err_remove_edma:
+ dw_pcie_edma_remove(pci);
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(dw_pcie_ep_init_complete);
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init_registers);
+/**
+ * dw_pcie_ep_init - Initialize the endpoint device
+ * @ep: DWC EP device
+ *
+ * Initialize the endpoint device. Allocate resources and create the EPC
+ * device with the endpoint framework.
+ *
+ * Return: 0 if success, errno otherwise.
+ */
int dw_pcie_ep_init(struct dw_pcie_ep *ep)
{
int ret;
- void *addr;
- u8 func_no;
struct resource *res;
struct pci_epc *epc;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct device *dev = pci->dev;
struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
- const struct pci_epc_features *epc_features;
- struct dw_pcie_ep_func *ep_func;
INIT_LIST_HEAD(&ep->func_list);
@@ -691,26 +839,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
if (ep->ops->pre_init)
ep->ops->pre_init(ep);
- dw_pcie_version_detect(pci);
-
- dw_pcie_iatu_detect(pci);
-
- ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
- GFP_KERNEL);
- if (!ep->ib_window_map)
- return -ENOMEM;
-
- ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
- GFP_KERNEL);
- if (!ep->ob_window_map)
- return -ENOMEM;
-
- addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
- GFP_KERNEL);
- if (!addr)
- return -ENOMEM;
- ep->outbound_addr = addr;
-
epc = devm_pci_epc_create(dev, &epc_ops);
if (IS_ERR(epc)) {
dev_err(dev, "Failed to create epc device\n");
@@ -724,28 +852,11 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
if (ret < 0)
epc->max_functions = 1;
- for (func_no = 0; func_no < epc->max_functions; func_no++) {
- ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
- if (!ep_func)
- return -ENOMEM;
-
- ep_func->func_no = func_no;
- ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
- PCI_CAP_ID_MSI);
- ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
- PCI_CAP_ID_MSIX);
-
- list_add_tail(&ep_func->list, &ep->func_list);
- }
-
- if (ep->ops->init)
- ep->ops->init(ep);
-
ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
ep->page_size);
if (ret < 0) {
dev_err(dev, "Failed to initialize address space\n");
- goto err_ep_deinit;
+ return ret;
}
ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
@@ -756,36 +867,11 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
goto err_exit_epc_mem;
}
- ret = dw_pcie_edma_detect(pci);
- if (ret)
- goto err_free_epc_mem;
-
- if (ep->ops->get_features) {
- epc_features = ep->ops->get_features(ep);
- if (epc_features->core_init_notifier)
- return 0;
- }
-
- ret = dw_pcie_ep_init_complete(ep);
- if (ret)
- goto err_remove_edma;
-
return 0;
-err_remove_edma:
- dw_pcie_edma_remove(pci);
-
-err_free_epc_mem:
- pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
- epc->mem->window.page_size);
-
err_exit_epc_mem:
pci_epc_mem_exit(epc);
-err_ep_deinit:
- if (ep->ops->deinit)
- ep->ops->deinit(ep);
-
return ret;
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_init);
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 778588b4be70..8490c5d6ff9f 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -145,6 +145,17 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
pci->ep.ops = &pcie_ep_ops;
ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ }
+
+ dw_pcie_ep_init_notify(&pci->ep);
+
break;
default:
dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 26dae4837462..f8e5431a207b 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -333,7 +333,6 @@ struct dw_pcie_rp {
struct dw_pcie_ep_ops {
void (*pre_init)(struct dw_pcie_ep *ep);
void (*init)(struct dw_pcie_ep *ep);
- void (*deinit)(struct dw_pcie_ep *ep);
int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
unsigned int type, u16 interrupt_num);
const struct pci_epc_features* (*get_features)(struct dw_pcie_ep *ep);
@@ -670,9 +669,10 @@ static inline void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus,
#ifdef CONFIG_PCIE_DW_EP
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
int dw_pcie_ep_init(struct dw_pcie_ep *ep);
-int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep);
+int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep);
void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep);
-void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
+void dw_pcie_ep_deinit(struct dw_pcie_ep *ep);
+void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep);
int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no);
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num);
@@ -693,7 +693,7 @@ static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
return 0;
}
-static inline int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+static inline int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
{
return 0;
}
@@ -702,7 +702,11 @@ static inline void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
{
}
-static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+static inline void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
+{
+}
+
+static inline void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)
{
}
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
index 5e8e54f597dd..98bbc83182b4 100644
--- a/drivers/pci/controller/dwc/pcie-keembay.c
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -396,6 +396,7 @@ static int keembay_pcie_probe(struct platform_device *pdev)
struct keembay_pcie *pcie;
struct dw_pcie *pci;
enum dw_pcie_device_mode mode;
+ int ret;
data = device_get_match_data(dev);
if (!data)
@@ -430,11 +431,26 @@ static int keembay_pcie_probe(struct platform_device *pdev)
return -ENODEV;
pci->ep.ops = &keembay_pcie_ep_ops;
- return dw_pcie_ep_init(&pci->ep);
+ ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ return ret;
+ }
+
+ dw_pcie_ep_init_notify(&pci->ep);
+
+ break;
default:
dev_err(dev, "Invalid device type %d\n", pcie->mode);
return -ENODEV;
}
+
+ return 0;
}
static const struct keembay_pcie_of_data keembay_pcie_rc_of_data = {
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index 36e5e80cd22f..2fb8c15e7a91 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -463,7 +463,7 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
PARF_INT_ALL_LINK_UP | PARF_INT_ALL_EDMA;
writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_MASK);
- ret = dw_pcie_ep_init_complete(&pcie_ep->pci.ep);
+ ret = dw_pcie_ep_init_registers(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to complete initialization: %d\n", ret);
goto err_disable_resources;
@@ -507,6 +507,7 @@ static void qcom_pcie_perst_assert(struct dw_pcie *pci)
return;
}
+ dw_pcie_ep_cleanup(&pci->ep);
qcom_pcie_disable_resources(pcie_ep);
pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED;
}
@@ -774,7 +775,6 @@ static void qcom_pcie_ep_init_debugfs(struct qcom_pcie_ep *pcie_ep)
static const struct pci_epc_features qcom_pcie_epc_features = {
.linkup_notifier = true,
- .core_init_notifier = true,
.msi_capable = true,
.msix_capable = false,
.align = SZ_4K,
diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
index 0be760ed420b..cfeccc2f9ee1 100644
--- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c
+++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
@@ -352,11 +352,8 @@ static void rcar_gen4_pcie_ep_init(struct dw_pcie_ep *ep)
dw_pcie_ep_reset_bar(pci, bar);
}
-static void rcar_gen4_pcie_ep_deinit(struct dw_pcie_ep *ep)
+static void rcar_gen4_pcie_ep_deinit(struct rcar_gen4_pcie *rcar)
{
- struct dw_pcie *dw = to_dw_pcie_from_ep(ep);
- struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
-
writel(0, rcar->base + PCIEDMAINTSTSEN);
rcar_gen4_pcie_common_deinit(rcar);
}
@@ -410,7 +407,6 @@ static unsigned int rcar_gen4_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep,
static const struct dw_pcie_ep_ops pcie_ep_ops = {
.pre_init = rcar_gen4_pcie_ep_pre_init,
.init = rcar_gen4_pcie_ep_init,
- .deinit = rcar_gen4_pcie_ep_deinit,
.raise_irq = rcar_gen4_pcie_ep_raise_irq,
.get_features = rcar_gen4_pcie_ep_get_features,
.get_dbi_offset = rcar_gen4_pcie_ep_get_dbi_offset,
@@ -420,18 +416,36 @@ static const struct dw_pcie_ep_ops pcie_ep_ops = {
static int rcar_gen4_add_dw_pcie_ep(struct rcar_gen4_pcie *rcar)
{
struct dw_pcie_ep *ep = &rcar->dw.ep;
+ struct device *dev = rcar->dw.dev;
+ int ret;
if (!IS_ENABLED(CONFIG_PCIE_RCAR_GEN4_EP))
return -ENODEV;
ep->ops = &pcie_ep_ops;
- return dw_pcie_ep_init(ep);
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ rcar_gen4_pcie_ep_deinit(rcar);
+ return ret;
+ }
+
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(ep);
+ rcar_gen4_pcie_ep_deinit(rcar);
+ }
+
+ dw_pcie_ep_init_notify(ep);
+
+ return ret;
}
static void rcar_gen4_remove_dw_pcie_ep(struct rcar_gen4_pcie *rcar)
{
- dw_pcie_ep_exit(&rcar->dw.ep);
+ dw_pcie_ep_deinit(&rcar->dw.ep);
+ rcar_gen4_pcie_ep_deinit(rcar);
}
/* Common */
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 1f7b662cb8e1..93f5433c5c55 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -1715,6 +1715,8 @@ static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
if (ret)
dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret);
+ dw_pcie_ep_cleanup(&pcie->pci.ep);
+
reset_control_assert(pcie->core_rst);
tegra_pcie_disable_phy(pcie);
@@ -1895,7 +1897,7 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
- ret = dw_pcie_ep_init_complete(ep);
+ ret = dw_pcie_ep_init_registers(ep);
if (ret) {
dev_err(dev, "Failed to complete initialization: %d\n", ret);
goto fail_init_complete;
@@ -2004,7 +2006,6 @@ static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
static const struct pci_epc_features tegra_pcie_epc_features = {
.linkup_notifier = true,
- .core_init_notifier = true,
.msi_capable = false,
.msix_capable = false,
.bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M,
@@ -2273,11 +2274,14 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
ret = tegra_pcie_config_ep(pcie, pdev);
if (ret < 0)
goto fail;
+ else
+ return 0;
break;
default:
dev_err(dev, "Invalid PCIe device type %d\n",
pcie->of_data->mode);
+ ret = -EINVAL;
}
fail:
diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
index 639bc2e12476..a2b844268e28 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier-ep.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
@@ -399,7 +399,20 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev)
return ret;
priv->pci.ep.ops = &uniphier_pcie_ep_ops;
- return dw_pcie_ep_init(&priv->pci.ep);
+ ret = dw_pcie_ep_init(&priv->pci.ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&priv->pci.ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&priv->pci.ep);
+ return ret;
+ }
+
+ dw_pcie_ep_init_notify(&priv->pci.ep);
+
+ return 0;
}
static const struct uniphier_pcie_ep_soc_data uniphier_pro5_data = {
diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c
index 79e225edb42a..d97b956e6e57 100644
--- a/drivers/pci/controller/pcie-mt7621.c
+++ b/drivers/pci/controller/pcie-mt7621.c
@@ -202,7 +202,7 @@ static int mt7621_pcie_parse_port(struct mt7621_pcie *pcie,
struct mt7621_pcie_port *port;
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
- char name[10];
+ char name[11];
int err;
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
index 05967c6c0b42..047e2cef5afc 100644
--- a/drivers/pci/controller/pcie-rcar-ep.c
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -542,6 +542,8 @@ static int rcar_pcie_ep_probe(struct platform_device *pdev)
goto err_pm_put;
}
+ pci_epc_init_notify(epc);
+
return 0;
err_pm_put:
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index c9046e97a1d2..136274533656 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -98,10 +98,8 @@ static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
/* All functions share the same vendor ID with function 0 */
if (fn == 0) {
- u32 vid_regs = (hdr->vendorid & GENMASK(15, 0)) |
- (hdr->subsys_vendor_id & GENMASK(31, 16)) << 16;
-
- rockchip_pcie_write(rockchip, vid_regs,
+ rockchip_pcie_write(rockchip,
+ hdr->vendorid | hdr->subsys_vendor_id << 16,
PCIE_CORE_CONFIG_VENDOR);
}
@@ -153,7 +151,7 @@ static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS;
} else {
bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
- bool is_64bits = sz > SZ_2G;
+ bool is_64bits = !!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64);
if (is_64bits && (bar & 1))
return -EINVAL;
@@ -609,6 +607,8 @@ static int rockchip_pcie_ep_probe(struct platform_device *pdev)
rockchip_pcie_write(rockchip, PCIE_CLIENT_CONF_ENABLE,
PCIE_CLIENT_CONFIG);
+ pci_epc_init_notify(epc);
+
return 0;
err_epc_mem_exit:
pci_epc_mem_exit(epc);
diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c
index e3aab5edaf70..652d63df9d22 100644
--- a/drivers/pci/doe.c
+++ b/drivers/pci/doe.c
@@ -383,11 +383,13 @@ static void pci_doe_task_complete(struct pci_doe_task *task)
complete(task->private);
}
-static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid,
+static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 capver, u8 *index, u16 *vid,
u8 *protocol)
{
u32 request_pl = FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX,
- *index);
+ *index) |
+ FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_VER,
+ (capver >= 2) ? 2 : 0);
__le32 request_pl_le = cpu_to_le32(request_pl);
__le32 response_pl_le;
u32 response_pl;
@@ -421,13 +423,17 @@ static int pci_doe_cache_protocols(struct pci_doe_mb *doe_mb)
{
u8 index = 0;
u8 xa_idx = 0;
+ u32 hdr = 0;
+
+ pci_read_config_dword(doe_mb->pdev, doe_mb->cap_offset, &hdr);
do {
int rc;
u16 vid;
u8 prot;
- rc = pci_doe_discovery(doe_mb, &index, &vid, &prot);
+ rc = pci_doe_discovery(doe_mb, PCI_EXT_CAP_VER(hdr), &index,
+ &vid, &prot);
if (rc)
return rc;
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index cd4ffb39dcdc..977fb79c1567 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -690,50 +690,35 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
struct pci_epc *epc = epf->epc;
- struct pci_epf_bar *epf_bar;
int bar;
cancel_delayed_work(&epf_test->cmd_handler);
pci_epf_test_clean_dma_chan(epf_test);
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
- epf_bar = &epf->bar[bar];
+ if (!epf_test->reg[bar])
+ continue;
- if (epf_test->reg[bar]) {
- pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
- epf_bar);
- pci_epf_free_space(epf, epf_test->reg[bar], bar,
- PRIMARY_INTERFACE);
- }
+ pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
+ &epf->bar[bar]);
+ pci_epf_free_space(epf, epf_test->reg[bar], bar,
+ PRIMARY_INTERFACE);
}
}
static int pci_epf_test_set_bar(struct pci_epf *epf)
{
- int bar, add;
- int ret;
- struct pci_epf_bar *epf_bar;
+ int bar, ret;
struct pci_epc *epc = epf->epc;
struct device *dev = &epf->dev;
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
- const struct pci_epc_features *epc_features;
-
- epc_features = epf_test->epc_features;
- for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
- epf_bar = &epf->bar[bar];
- /*
- * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
- * if the specific implementation required a 64-bit BAR,
- * even if we only requested a 32-bit BAR.
- */
- add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
-
- if (epc_features->bar[bar].type == BAR_RESERVED)
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ if (!epf_test->reg[bar])
continue;
ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
- epf_bar);
+ &epf->bar[bar]);
if (ret) {
pci_epf_free_space(epf, epf_test->reg[bar], bar,
PRIMARY_INTERFACE);
@@ -753,6 +738,7 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
const struct pci_epc_features *epc_features;
struct pci_epc *epc = epf->epc;
struct device *dev = &epf->dev;
+ bool linkup_notifier = false;
bool msix_capable = false;
bool msi_capable = true;
int ret;
@@ -795,6 +781,10 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
}
}
+ linkup_notifier = epc_features->linkup_notifier;
+ if (!linkup_notifier)
+ queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
+
return 0;
}
@@ -817,14 +807,13 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
struct device *dev = &epf->dev;
- struct pci_epf_bar *epf_bar;
size_t msix_table_size = 0;
size_t test_reg_bar_size;
size_t pba_size = 0;
bool msix_capable;
void *base;
- int bar, add;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
+ enum pci_barno bar;
const struct pci_epc_features *epc_features;
size_t test_reg_size;
@@ -849,16 +838,14 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
}
epf_test->reg[test_reg_bar] = base;
- for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
- epf_bar = &epf->bar[bar];
- add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
+ for (bar = BAR_0; bar < PCI_STD_NUM_BARS; bar++) {
+ bar = pci_epc_get_next_free_bar(epc_features, bar);
+ if (bar == NO_BAR)
+ break;
if (bar == test_reg_bar)
continue;
- if (epc_features->bar[bar].type == BAR_RESERVED)
- continue;
-
base = pci_epf_alloc_space(epf, bar_size[bar], bar,
epc_features, PRIMARY_INTERFACE);
if (!base)
@@ -870,19 +857,6 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
return 0;
}
-static void pci_epf_configure_bar(struct pci_epf *epf,
- const struct pci_epc_features *epc_features)
-{
- struct pci_epf_bar *epf_bar;
- int i;
-
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- epf_bar = &epf->bar[i];
- if (epc_features->bar[i].only_64bit)
- epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
- }
-}
-
static int pci_epf_test_bind(struct pci_epf *epf)
{
int ret;
@@ -890,8 +864,6 @@ static int pci_epf_test_bind(struct pci_epf *epf)
const struct pci_epc_features *epc_features;
enum pci_barno test_reg_bar = BAR_0;
struct pci_epc *epc = epf->epc;
- bool linkup_notifier = false;
- bool core_init_notifier = false;
if (WARN_ON_ONCE(!epc))
return -EINVAL;
@@ -902,12 +874,9 @@ static int pci_epf_test_bind(struct pci_epf *epf)
return -EOPNOTSUPP;
}
- linkup_notifier = epc_features->linkup_notifier;
- core_init_notifier = epc_features->core_init_notifier;
test_reg_bar = pci_epc_get_first_free_bar(epc_features);
if (test_reg_bar < 0)
return -EINVAL;
- pci_epf_configure_bar(epf, epc_features);
epf_test->test_reg_bar = test_reg_bar;
epf_test->epc_features = epc_features;
@@ -916,21 +885,12 @@ static int pci_epf_test_bind(struct pci_epf *epf)
if (ret)
return ret;
- if (!core_init_notifier) {
- ret = pci_epf_test_core_init(epf);
- if (ret)
- return ret;
- }
-
epf_test->dma_supported = true;
ret = pci_epf_test_init_dma_chan(epf_test);
if (ret)
epf_test->dma_supported = false;
- if (!linkup_notifier && !core_init_notifier)
- queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
-
return 0;
}
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index 0ea64e24ed61..3b21e28f9b59 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -64,6 +64,9 @@ static int pci_secondary_epc_epf_link(struct config_item *epf_item,
return ret;
}
+ /* Send any pending EPC initialization complete to the EPF driver */
+ pci_epc_notify_pending_init(epc, epf);
+
return 0;
}
@@ -125,6 +128,9 @@ static int pci_primary_epc_epf_link(struct config_item *epf_item,
return ret;
}
+ /* Send any pending EPC initialization complete to the EPF driver */
+ pci_epc_notify_pending_init(epc, epf);
+
return 0;
}
@@ -230,6 +236,9 @@ static int pci_epc_epf_link(struct config_item *epc_item,
return ret;
}
+ /* Send any pending EPC initialization complete to the EPF driver */
+ pci_epc_notify_pending_init(epc, epf);
+
return 0;
}
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index da3fc0795b0b..47d27ec7439d 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -748,11 +748,33 @@ void pci_epc_init_notify(struct pci_epc *epc)
epf->event_ops->core_init(epf);
mutex_unlock(&epf->lock);
}
+ epc->init_complete = true;
mutex_unlock(&epc->list_lock);
}
EXPORT_SYMBOL_GPL(pci_epc_init_notify);
/**
+ * pci_epc_notify_pending_init() - Notify the pending EPC device initialization
+ * complete to the EPF device
+ * @epc: the EPC device whose core initialization is pending to be notified
+ * @epf: the EPF device to be notified
+ *
+ * Invoke to notify the pending EPC device initialization complete to the EPF
+ * device. This is used to deliver the notification if the EPC initialization
+ * got completed before the EPF driver bind.
+ */
+void pci_epc_notify_pending_init(struct pci_epc *epc, struct pci_epf *epf)
+{
+ if (epc->init_complete) {
+ mutex_lock(&epf->lock);
+ if (epf->event_ops && epf->event_ops->core_init)
+ epf->event_ops->core_init(epf);
+ mutex_unlock(&epf->lock);
+ }
+}
+EXPORT_SYMBOL_GPL(pci_epc_notify_pending_init);
+
+/**
* pci_epc_bme_notify() - Notify the EPF device that the EPC device has received
* the BME event from the Root complex
* @epc: the EPC device that received the BME event
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 0a28a0b0911b..323f2a60ab16 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -255,6 +255,8 @@ EXPORT_SYMBOL_GPL(pci_epf_free_space);
* @type: Identifies if the allocation is for primary EPC or secondary EPC
*
* Invoke to allocate memory for the PCI EPF register space.
+ * Flag PCI_BASE_ADDRESS_MEM_TYPE_64 will automatically get set if the BAR
+ * can only be a 64-bit BAR, or if the requested size is larger than 2 GB.
*/
void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
const struct pci_epc_features *epc_features,
@@ -304,9 +306,10 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
epf_bar[bar].addr = space;
epf_bar[bar].size = size;
epf_bar[bar].barno = bar;
- epf_bar[bar].flags |= upper_32_bits(size) ?
- PCI_BASE_ADDRESS_MEM_TYPE_64 :
- PCI_BASE_ADDRESS_MEM_TYPE_32;
+ if (upper_32_bits(size) || epc_features->bar[bar].only_64bit)
+ epf_bar[bar].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+ else
+ epf_bar[bar].flags |= PCI_BASE_ADDRESS_MEM_TYPE_32;
return space;
}
diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO
index fdb8dd6ea24d..9d428b0ea524 100644
--- a/drivers/pci/hotplug/TODO
+++ b/drivers/pci/hotplug/TODO
@@ -6,6 +6,8 @@ cpcihp:
->set_power callbacks in struct cpci_hp_controller_ops. Why were they
introduced? Can they be removed from the struct?
+* Returned code from pci_hp_add_bridge() is not checked.
+
cpqphp:
* The driver spawns a kthread cpqhp_event_thread() which is woken by the
@@ -16,6 +18,8 @@ cpqphp:
* A large portion of cpqphp_ctrl.c and cpqphp_pci.c concerns resource
management. Doesn't this duplicate functionality in the core?
+* Returned code from pci_hp_add_bridge() is not checked.
+
ibmphp:
* Implementations of hotplug_slot_ops callbacks such as get_adapter_present()
@@ -43,13 +47,7 @@ ibmphp:
* A large portion of ibmphp_res.c and ibmphp_pci.c concerns resource
management. Doesn't this duplicate functionality in the core?
-sgi_hotplug:
-
-* Several functions access the pci_slot member in struct hotplug_slot even
- though pci_hotplug.h declares it private. See sn_hp_destroy() for an
- example. Either the pci_slot member should no longer be declared private
- or sgi_hotplug should store a pointer to it in struct slot. Probably the
- former.
+* Returned code from pci_hp_add_bridge() is not checked.
shpchp:
diff --git a/drivers/pci/msi/api.c b/drivers/pci/msi/api.c
index be679aa5db64..b956ce591f96 100644
--- a/drivers/pci/msi/api.c
+++ b/drivers/pci/msi/api.c
@@ -213,8 +213,8 @@ EXPORT_SYMBOL(pci_disable_msix);
* * %PCI_IRQ_MSIX Allow trying MSI-X vector allocations
* * %PCI_IRQ_MSI Allow trying MSI vector allocations
*
- * * %PCI_IRQ_LEGACY Allow trying legacy INTx interrupts, if
- * and only if @min_vecs == 1
+ * * %PCI_IRQ_INTX Allow trying INTx interrupts, if and
+ * only if @min_vecs == 1
*
* * %PCI_IRQ_AFFINITY Auto-manage IRQs affinity by spreading
* the vectors around available CPUs
@@ -279,8 +279,8 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
return nvecs;
}
- /* use legacy IRQ if allowed */
- if (flags & PCI_IRQ_LEGACY) {
+ /* use INTx IRQ if allowed */
+ if (flags & PCI_IRQ_INTX) {
if (min_vecs == 1 && dev->irq) {
/*
* Invoke the affinity spreading logic to ensure that
@@ -366,56 +366,6 @@ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr)
EXPORT_SYMBOL(pci_irq_get_affinity);
/**
- * pci_ims_alloc_irq - Allocate an interrupt on a PCI/IMS interrupt domain
- * @dev: The PCI device to operate on
- * @icookie: Pointer to an IMS implementation specific cookie for this
- * IMS instance (PASID, queue ID, pointer...).
- * The cookie content is copied into the MSI descriptor for the
- * interrupt chip callbacks or domain specific setup functions.
- * @affdesc: Optional pointer to an interrupt affinity descriptor
- *
- * There is no index for IMS allocations as IMS is an implementation
- * specific storage and does not have any direct associations between
- * index, which might be a pure software construct, and device
- * functionality. This association is established by the driver either via
- * the index - if there is a hardware table - or in case of purely software
- * managed IMS implementation the association happens via the
- * irq_write_msi_msg() callback of the implementation specific interrupt
- * chip, which utilizes the provided @icookie to store the MSI message in
- * the appropriate place.
- *
- * Return: A struct msi_map
- *
- * On success msi_map::index contains the allocated index (>= 0) and
- * msi_map::virq the allocated Linux interrupt number (> 0).
- *
- * On fail msi_map::index contains the error code and msi_map::virq
- * is set to 0.
- */
-struct msi_map pci_ims_alloc_irq(struct pci_dev *dev, union msi_instance_cookie *icookie,
- const struct irq_affinity_desc *affdesc)
-{
- return msi_domain_alloc_irq_at(&dev->dev, MSI_SECONDARY_DOMAIN, MSI_ANY_INDEX,
- affdesc, icookie);
-}
-EXPORT_SYMBOL_GPL(pci_ims_alloc_irq);
-
-/**
- * pci_ims_free_irq - Allocate an interrupt on a PCI/IMS interrupt domain
- * which was allocated via pci_ims_alloc_irq()
- * @dev: The PCI device to operate on
- * @map: A struct msi_map describing the interrupt to free as
- * returned from pci_ims_alloc_irq()
- */
-void pci_ims_free_irq(struct pci_dev *dev, struct msi_map map)
-{
- if (WARN_ON_ONCE(map.index < 0 || map.virq <= 0))
- return;
- msi_domain_free_irqs_range(&dev->dev, MSI_SECONDARY_DOMAIN, map.index, map.index);
-}
-EXPORT_SYMBOL_GPL(pci_ims_free_irq);
-
-/**
* pci_free_irq_vectors() - Free previously allocated IRQs for a device
* @dev: the PCI device to operate on
*
diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
index cfd84a899c82..03d2dd25790d 100644
--- a/drivers/pci/msi/irqdomain.c
+++ b/drivers/pci/msi/irqdomain.c
@@ -355,65 +355,6 @@ bool pci_msi_domain_supports(struct pci_dev *pdev, unsigned int feature_mask,
return (supported & feature_mask) == feature_mask;
}
-/**
- * pci_create_ims_domain - Create a secondary IMS domain for a PCI device
- * @pdev: The PCI device to operate on
- * @template: The MSI info template which describes the domain
- * @hwsize: The size of the hardware entry table or 0 if the domain
- * is purely software managed
- * @data: Optional pointer to domain specific data to be stored
- * in msi_domain_info::data
- *
- * Return: True on success, false otherwise
- *
- * An IMS domain is expected to have the following constraints:
- * - The index space is managed by the core code
- *
- * - There is no requirement for consecutive index ranges
- *
- * - The interrupt chip must provide the following callbacks:
- * - irq_mask()
- * - irq_unmask()
- * - irq_write_msi_msg()
- *
- * - The interrupt chip must provide the following optional callbacks
- * when the irq_mask(), irq_unmask() and irq_write_msi_msg() callbacks
- * cannot operate directly on hardware, e.g. in the case that the
- * interrupt message store is in queue memory:
- * - irq_bus_lock()
- * - irq_bus_unlock()
- *
- * These callbacks are invoked from preemptible task context and are
- * allowed to sleep. In this case the mandatory callbacks above just
- * store the information. The irq_bus_unlock() callback is supposed
- * to make the change effective before returning.
- *
- * - Interrupt affinity setting is handled by the underlying parent
- * interrupt domain and communicated to the IMS domain via
- * irq_write_msi_msg().
- *
- * The domain is automatically destroyed when the PCI device is removed.
- */
-bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template,
- unsigned int hwsize, void *data)
-{
- struct irq_domain *domain = dev_get_msi_domain(&pdev->dev);
-
- if (!domain || !irq_domain_is_msi_parent(domain))
- return false;
-
- if (template->info.bus_token != DOMAIN_BUS_PCI_DEVICE_IMS ||
- !(template->info.flags & MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS) ||
- !(template->info.flags & MSI_FLAG_FREE_MSI_DESCS) ||
- !template->chip.irq_mask || !template->chip.irq_unmask ||
- !template->chip.irq_write_msi_msg || template->chip.irq_set_affinity)
- return false;
-
- return msi_create_device_irq_domain(&pdev->dev, MSI_SECONDARY_DOMAIN, template,
- hwsize, data, NULL);
-}
-EXPORT_SYMBOL_GPL(pci_create_ims_domain);
-
/*
* Users of the generic MSI infrastructure expect a device to have a single ID,
* so with DMA aliases we have to pick the least-worst compromise. Devices with
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index 682fa877478f..c5625dd9bf49 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -86,9 +86,11 @@ static int pcim_setup_msi_release(struct pci_dev *dev)
return 0;
ret = devm_add_action(&dev->dev, pcim_msi_release, dev);
- if (!ret)
- dev->is_msi_managed = true;
- return ret;
+ if (ret)
+ return ret;
+
+ dev->is_msi_managed = true;
+ return 0;
}
/*
@@ -99,9 +101,10 @@ static int pci_setup_msi_context(struct pci_dev *dev)
{
int ret = msi_setup_device_data(&dev->dev);
- if (!ret)
- ret = pcim_setup_msi_release(dev);
- return ret;
+ if (ret)
+ return ret;
+
+ return pcim_setup_msi_release(dev);
}
/*
diff --git a/drivers/pci/of_property.c b/drivers/pci/of_property.c
index c2c7334152bc..03539e505372 100644
--- a/drivers/pci/of_property.c
+++ b/drivers/pci/of_property.c
@@ -238,6 +238,8 @@ static int of_pci_prop_intr_map(struct pci_dev *pdev, struct of_changeset *ocs,
return 0;
int_map = kcalloc(map_sz, sizeof(u32), GFP_KERNEL);
+ if (!int_map)
+ return -ENOMEM;
mapp = int_map;
list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e5f243dd4288..59e0949fb079 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -142,8 +142,8 @@ enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
* the dfl or actual value as it sees fit. Don't forget this is
* measured in 32-bit words, not bytes.
*/
-u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
-u8 pci_cache_line_size;
+u8 pci_dfl_cache_line_size __ro_after_init = L1_CACHE_BYTES >> 2;
+u8 pci_cache_line_size __ro_after_init ;
/*
* If we set up a device for bus mastering, we need to check the latency
@@ -1277,6 +1277,11 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
for (;;) {
u32 id;
+ if (pci_dev_is_disconnected(dev)) {
+ pci_dbg(dev, "disconnected; not waiting\n");
+ return -ENOTTY;
+ }
+
pci_read_config_dword(dev, PCI_COMMAND, &id);
if (!PCI_POSSIBLE_ERROR(id))
break;
@@ -2111,20 +2116,6 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
}
/**
- * pci_enable_device_io - Initialize a device for use with IO space
- * @dev: PCI device to be initialized
- *
- * Initialize device before it's used by a driver. Ask low-level code
- * to enable I/O resources. Wake up the device if it was suspended.
- * Beware, this function can fail.
- */
-int pci_enable_device_io(struct pci_dev *dev)
-{
- return pci_enable_device_flags(dev, IORESOURCE_IO);
-}
-EXPORT_SYMBOL(pci_enable_device_io);
-
-/**
* pci_enable_device_mem - Initialize a device for use with Memory space
* @dev: PCI device to be initialized
*
@@ -2962,6 +2953,18 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"),
},
},
+ {
+ /*
+ * Changing power state of root port dGPU is connected fails
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3229
+ */
+ .ident = "Hewlett-Packard HP Pavilion 17 Notebook PC/1972",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_BOARD_NAME, "1972"),
+ DMI_MATCH(DMI_BOARD_VERSION, "95.33"),
+ },
+ },
#endif
{ }
};
@@ -4625,11 +4628,12 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
/*
* Ensure the updated LNKCTL parameters are used during link
- * training by checking that there is no ongoing link training to
- * avoid LTSSM race as recommended in Implementation Note at the
- * end of PCIe r6.0.1 sec 7.5.3.7.
+ * training by checking that there is no ongoing link training that
+ * may have started before link parameters were changed, so as to
+ * avoid LTSSM race as recommended in Implementation Note at the end
+ * of PCIe r6.1 sec 7.5.3.7.
*/
- rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt);
+ rc = pcie_wait_for_link_status(pdev, true, false);
if (rc)
return rc;
@@ -4879,6 +4883,7 @@ void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
*/
int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
{
+ lock_map_assert_held(&dev->cfg_access_lock);
pcibios_reset_secondary_bus(dev);
return pci_bridge_wait_for_secondary_bus(dev, "bus reset");
@@ -4927,16 +4932,96 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
}
+static u16 cxl_port_dvsec(struct pci_dev *dev)
+{
+ return pci_find_dvsec_capability(dev, PCI_VENDOR_ID_CXL,
+ PCI_DVSEC_CXL_PORT);
+}
+
+static bool cxl_sbr_masked(struct pci_dev *dev)
+{
+ u16 dvsec, reg;
+ int rc;
+
+ dvsec = cxl_port_dvsec(dev);
+ if (!dvsec)
+ return false;
+
+ rc = pci_read_config_word(dev, dvsec + PCI_DVSEC_CXL_PORT_CTL, &reg);
+ if (rc || PCI_POSSIBLE_ERROR(reg))
+ return false;
+
+ /*
+ * Per CXL spec r3.1, sec 8.1.5.2, when "Unmask SBR" is 0, the SBR
+ * bit in Bridge Control has no effect. When 1, the Port generates
+ * hot reset when the SBR bit is set to 1.
+ */
+ if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR)
+ return false;
+
+ return true;
+}
+
static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
{
+ struct pci_dev *bridge = pci_upstream_bridge(dev);
int rc;
+ /*
+ * If "dev" is below a CXL port that has SBR control masked, SBR
+ * won't do anything, so return error.
+ */
+ if (bridge && cxl_sbr_masked(bridge)) {
+ if (probe)
+ return 0;
+
+ return -ENOTTY;
+ }
+
rc = pci_dev_reset_slot_function(dev, probe);
if (rc != -ENOTTY)
return rc;
return pci_parent_bus_reset(dev, probe);
}
+static int cxl_reset_bus_function(struct pci_dev *dev, bool probe)
+{
+ struct pci_dev *bridge;
+ u16 dvsec, reg, val;
+ int rc;
+
+ bridge = pci_upstream_bridge(dev);
+ if (!bridge)
+ return -ENOTTY;
+
+ dvsec = cxl_port_dvsec(bridge);
+ if (!dvsec)
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ rc = pci_read_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL, &reg);
+ if (rc)
+ return -ENOTTY;
+
+ if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR) {
+ val = reg;
+ } else {
+ val = reg | PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR;
+ pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL,
+ val);
+ }
+
+ rc = pci_reset_bus_function(dev, probe);
+
+ if (reg != val)
+ pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL,
+ reg);
+
+ return rc;
+}
+
void pci_dev_lock(struct pci_dev *dev)
{
/* block PM suspend, driver probe, etc. */
@@ -5021,6 +5106,7 @@ static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
{ pci_af_flr, .name = "af_flr" },
{ pci_pm_reset, .name = "pm" },
{ pci_reset_bus_function, .name = "bus" },
+ { cxl_reset_bus_function, .name = "cxl_bus" },
};
static ssize_t reset_method_show(struct device *dev,
@@ -5245,11 +5331,20 @@ void pci_init_reset_methods(struct pci_dev *dev)
*/
int pci_reset_function(struct pci_dev *dev)
{
+ struct pci_dev *bridge;
int rc;
if (!pci_reset_supported(dev))
return -ENOTTY;
+ /*
+ * If there's no upstream bridge, no locking is needed since there is
+ * no upstream bridge configuration to hold consistent.
+ */
+ bridge = pci_upstream_bridge(dev);
+ if (bridge)
+ pci_dev_lock(bridge);
+
pci_dev_lock(dev);
pci_dev_save_and_disable(dev);
@@ -5258,6 +5353,9 @@ int pci_reset_function(struct pci_dev *dev)
pci_dev_restore(dev);
pci_dev_unlock(dev);
+ if (bridge)
+ pci_dev_unlock(bridge);
+
return rc;
}
EXPORT_SYMBOL_GPL(pci_reset_function);
@@ -6065,8 +6163,9 @@ EXPORT_SYMBOL(pcie_get_width_cap);
* and width, multiplying them, and applying encoding overhead. The result
* is in Mb/s, i.e., megabits/second of raw bandwidth.
*/
-u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
- enum pcie_link_width *width)
+static u32 pcie_bandwidth_capable(struct pci_dev *dev,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
{
*speed = pcie_get_speed_cap(dev);
*width = pcie_get_width_cap(dev);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 17fed1846847..fd44565c4756 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -293,8 +293,6 @@ void pci_bus_put(struct pci_bus *bus);
const char *pci_speed_string(enum pci_bus_speed speed);
enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
-u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
- enum pcie_link_width *width);
void __pcie_print_link_status(struct pci_dev *dev, bool verbose);
void pcie_report_downtraining(struct pci_dev *dev);
void pcie_update_link_speed(struct pci_bus *bus, u16 link_status);
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 8999fcebde6a..17919b99fa66 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -47,7 +47,7 @@ config PCIEAER_INJECT
error injection can fake almost all kinds of errors with the
help of a user space helper tool aer-inject, which can be
gotten from:
- https://git.kernel.org/cgit/linux/kernel/git/gong.chen/aer-inject.git/
+ https://github.com/intel/aer-inject.git
config PCIEAER_CXL
bool "PCI Express CXL RAS support"
diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c
index 2dab275d252f..f81b2303bf6a 100644
--- a/drivers/pci/pcie/aer_inject.c
+++ b/drivers/pci/pcie/aer_inject.c
@@ -6,7 +6,7 @@
* trigger various real hardware errors. Software based error
* injection can fake almost all kinds of errors with the help of a
* user space helper tool aer-inject, which can be gotten from:
- * https://git.kernel.org/cgit/linux/kernel/git/gong.chen/aer-inject.git/
+ * https://github.com/intel/aer-inject.git
*
* Copyright 2009 Intel Corporation.
* Huang Ying <ying.huang@intel.com>
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 2428d278e015..cee2365e54b8 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -8,6 +8,8 @@
*/
#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/build_bug.h>
#include <linux/kernel.h>
#include <linux/limits.h>
#include <linux/math.h>
@@ -177,8 +179,8 @@ void pci_restore_aspm_l1ss_state(struct pci_dev *pdev)
/* Restore L0s/L1 if they were enabled */
if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, clnkctl) ||
FIELD_GET(PCI_EXP_LNKCTL_ASPMC, plnkctl)) {
- pcie_capability_write_word(parent, PCI_EXP_LNKCTL, clnkctl);
- pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, plnkctl);
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, plnkctl);
+ pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, clnkctl);
}
}
@@ -189,21 +191,18 @@ void pci_restore_aspm_l1ss_state(struct pci_dev *pdev)
#endif
#define MODULE_PARAM_PREFIX "pcie_aspm."
-/* Note: those are not register definitions */
-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
-#define ASPM_STATE_L1 (4) /* L1 state */
-#define ASPM_STATE_L1_1 (8) /* ASPM L1.1 state */
-#define ASPM_STATE_L1_2 (0x10) /* ASPM L1.2 state */
-#define ASPM_STATE_L1_1_PCIPM (0x20) /* PCI PM L1.1 state */
-#define ASPM_STATE_L1_2_PCIPM (0x40) /* PCI PM L1.2 state */
-#define ASPM_STATE_L1_SS_PCIPM (ASPM_STATE_L1_1_PCIPM | ASPM_STATE_L1_2_PCIPM)
-#define ASPM_STATE_L1_2_MASK (ASPM_STATE_L1_2 | ASPM_STATE_L1_2_PCIPM)
-#define ASPM_STATE_L1SS (ASPM_STATE_L1_1 | ASPM_STATE_L1_1_PCIPM |\
- ASPM_STATE_L1_2_MASK)
-#define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
-#define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1 | \
- ASPM_STATE_L1SS)
+/* Note: these are not register definitions */
+#define PCIE_LINK_STATE_L0S_UP BIT(0) /* Upstream direction L0s state */
+#define PCIE_LINK_STATE_L0S_DW BIT(1) /* Downstream direction L0s state */
+static_assert(PCIE_LINK_STATE_L0S == (PCIE_LINK_STATE_L0S_UP | PCIE_LINK_STATE_L0S_DW));
+
+#define PCIE_LINK_STATE_L1_SS_PCIPM (PCIE_LINK_STATE_L1_1_PCIPM |\
+ PCIE_LINK_STATE_L1_2_PCIPM)
+#define PCIE_LINK_STATE_L1_2_MASK (PCIE_LINK_STATE_L1_2 |\
+ PCIE_LINK_STATE_L1_2_PCIPM)
+#define PCIE_LINK_STATE_L1SS (PCIE_LINK_STATE_L1_1 |\
+ PCIE_LINK_STATE_L1_1_PCIPM |\
+ PCIE_LINK_STATE_L1_2_MASK)
struct pcie_link_state {
struct pci_dev *pdev; /* Upstream component of the Link */
@@ -275,10 +274,10 @@ static int policy_to_aspm_state(struct pcie_link_state *link)
return 0;
case POLICY_POWERSAVE:
/* Enable ASPM L0s/L1 */
- return (ASPM_STATE_L0S | ASPM_STATE_L1);
+ return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
case POLICY_POWER_SUPERSAVE:
/* Enable Everything */
- return ASPM_STATE_ALL;
+ return PCIE_LINK_STATE_ASPM_ALL;
case POLICY_DEFAULT:
return link->aspm_default;
}
@@ -581,14 +580,14 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint)
latency_dw_l1 = calc_l1_latency(lnkcap_dw);
/* Check upstream direction L0s latency */
- if ((link->aspm_capable & ASPM_STATE_L0S_UP) &&
+ if ((link->aspm_capable & PCIE_LINK_STATE_L0S_UP) &&
(latency_up_l0s > acceptable_l0s))
- link->aspm_capable &= ~ASPM_STATE_L0S_UP;
+ link->aspm_capable &= ~PCIE_LINK_STATE_L0S_UP;
/* Check downstream direction L0s latency */
- if ((link->aspm_capable & ASPM_STATE_L0S_DW) &&
+ if ((link->aspm_capable & PCIE_LINK_STATE_L0S_DW) &&
(latency_dw_l0s > acceptable_l0s))
- link->aspm_capable &= ~ASPM_STATE_L0S_DW;
+ link->aspm_capable &= ~PCIE_LINK_STATE_L0S_DW;
/*
* Check L1 latency.
* Every switch on the path to root complex need 1
@@ -603,9 +602,9 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint)
* substate latencies (and hence do not do any check).
*/
latency = max_t(u32, latency_up_l1, latency_dw_l1);
- if ((link->aspm_capable & ASPM_STATE_L1) &&
+ if ((link->aspm_capable & PCIE_LINK_STATE_L1) &&
(latency + l1_switch_latency > acceptable_l1))
- link->aspm_capable &= ~ASPM_STATE_L1;
+ link->aspm_capable &= ~PCIE_LINK_STATE_L1;
l1_switch_latency += NSEC_PER_USEC;
link = link->parent;
@@ -741,13 +740,13 @@ static void aspm_l1ss_init(struct pcie_link_state *link)
child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
- link->aspm_support |= ASPM_STATE_L1_1;
+ link->aspm_support |= PCIE_LINK_STATE_L1_1;
if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
- link->aspm_support |= ASPM_STATE_L1_2;
+ link->aspm_support |= PCIE_LINK_STATE_L1_2;
if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
- link->aspm_support |= ASPM_STATE_L1_1_PCIPM;
+ link->aspm_support |= PCIE_LINK_STATE_L1_1_PCIPM;
if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
- link->aspm_support |= ASPM_STATE_L1_2_PCIPM;
+ link->aspm_support |= PCIE_LINK_STATE_L1_2_PCIPM;
if (parent_l1ss_cap)
pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
@@ -757,15 +756,15 @@ static void aspm_l1ss_init(struct pcie_link_state *link)
&child_l1ss_ctl1);
if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
- link->aspm_enabled |= ASPM_STATE_L1_1;
+ link->aspm_enabled |= PCIE_LINK_STATE_L1_1;
if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
- link->aspm_enabled |= ASPM_STATE_L1_2;
+ link->aspm_enabled |= PCIE_LINK_STATE_L1_2;
if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
- link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM;
+ link->aspm_enabled |= PCIE_LINK_STATE_L1_1_PCIPM;
if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
- link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM;
+ link->aspm_enabled |= PCIE_LINK_STATE_L1_2_PCIPM;
- if (link->aspm_support & ASPM_STATE_L1_2_MASK)
+ if (link->aspm_support & PCIE_LINK_STATE_L1_2_MASK)
aspm_calc_l12_info(link, parent_l1ss_cap, child_l1ss_cap);
}
@@ -778,8 +777,8 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
if (blacklist) {
/* Set enabled/disable so that we will disable ASPM later */
- link->aspm_enabled = ASPM_STATE_ALL;
- link->aspm_disable = ASPM_STATE_ALL;
+ link->aspm_enabled = PCIE_LINK_STATE_ASPM_ALL;
+ link->aspm_disable = PCIE_LINK_STATE_ASPM_ALL;
return;
}
@@ -814,19 +813,19 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
* support L0s.
*/
if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L0S)
- link->aspm_support |= ASPM_STATE_L0S;
+ link->aspm_support |= PCIE_LINK_STATE_L0S;
if (child_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
- link->aspm_enabled |= ASPM_STATE_L0S_UP;
+ link->aspm_enabled |= PCIE_LINK_STATE_L0S_UP;
if (parent_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
- link->aspm_enabled |= ASPM_STATE_L0S_DW;
+ link->aspm_enabled |= PCIE_LINK_STATE_L0S_DW;
/* Setup L1 state */
if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L1)
- link->aspm_support |= ASPM_STATE_L1;
+ link->aspm_support |= PCIE_LINK_STATE_L1;
if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1)
- link->aspm_enabled |= ASPM_STATE_L1;
+ link->aspm_enabled |= PCIE_LINK_STATE_L1;
aspm_l1ss_init(link);
@@ -876,7 +875,7 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
* If needed, disable L1, and it gets enabled later
* in pcie_config_aspm_link().
*/
- if (enable_req & (ASPM_STATE_L1_1 | ASPM_STATE_L1_2)) {
+ if (enable_req & (PCIE_LINK_STATE_L1_1 | PCIE_LINK_STATE_L1_2)) {
pcie_capability_clear_word(child, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_ASPM_L1);
pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
@@ -884,13 +883,13 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
}
val = 0;
- if (state & ASPM_STATE_L1_1)
+ if (state & PCIE_LINK_STATE_L1_1)
val |= PCI_L1SS_CTL1_ASPM_L1_1;
- if (state & ASPM_STATE_L1_2)
+ if (state & PCIE_LINK_STATE_L1_2)
val |= PCI_L1SS_CTL1_ASPM_L1_2;
- if (state & ASPM_STATE_L1_1_PCIPM)
+ if (state & PCIE_LINK_STATE_L1_1_PCIPM)
val |= PCI_L1SS_CTL1_PCIPM_L1_1;
- if (state & ASPM_STATE_L1_2_PCIPM)
+ if (state & PCIE_LINK_STATE_L1_2_PCIPM)
val |= PCI_L1SS_CTL1_PCIPM_L1_2;
/* Enable what we need to enable */
@@ -916,29 +915,29 @@ static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
state &= (link->aspm_capable & ~link->aspm_disable);
/* Can't enable any substates if L1 is not enabled */
- if (!(state & ASPM_STATE_L1))
- state &= ~ASPM_STATE_L1SS;
+ if (!(state & PCIE_LINK_STATE_L1))
+ state &= ~PCIE_LINK_STATE_L1SS;
/* Spec says both ports must be in D0 before enabling PCI PM substates*/
if (parent->current_state != PCI_D0 || child->current_state != PCI_D0) {
- state &= ~ASPM_STATE_L1_SS_PCIPM;
- state |= (link->aspm_enabled & ASPM_STATE_L1_SS_PCIPM);
+ state &= ~PCIE_LINK_STATE_L1_SS_PCIPM;
+ state |= (link->aspm_enabled & PCIE_LINK_STATE_L1_SS_PCIPM);
}
/* Nothing to do if the link is already in the requested state */
if (link->aspm_enabled == state)
return;
/* Convert ASPM state to upstream/downstream ASPM register state */
- if (state & ASPM_STATE_L0S_UP)
+ if (state & PCIE_LINK_STATE_L0S_UP)
dwstream |= PCI_EXP_LNKCTL_ASPM_L0S;
- if (state & ASPM_STATE_L0S_DW)
+ if (state & PCIE_LINK_STATE_L0S_DW)
upstream |= PCI_EXP_LNKCTL_ASPM_L0S;
- if (state & ASPM_STATE_L1) {
+ if (state & PCIE_LINK_STATE_L1) {
upstream |= PCI_EXP_LNKCTL_ASPM_L1;
dwstream |= PCI_EXP_LNKCTL_ASPM_L1;
}
- if (link->aspm_capable & ASPM_STATE_L1SS)
+ if (link->aspm_capable & PCIE_LINK_STATE_L1SS)
pcie_config_aspm_l1ss(link, state);
/*
@@ -947,11 +946,11 @@ static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
* upstream component first and then downstream, and vice
* versa for disabling ASPM L1. Spec doesn't mention L0S.
*/
- if (state & ASPM_STATE_L1)
+ if (state & PCIE_LINK_STATE_L1)
pcie_config_aspm_dev(parent, upstream);
list_for_each_entry(child, &linkbus->devices, bus_list)
pcie_config_aspm_dev(child, dwstream);
- if (!(state & ASPM_STATE_L1))
+ if (!(state & PCIE_LINK_STATE_L1))
pcie_config_aspm_dev(parent, upstream);
link->aspm_enabled = state;
@@ -1324,6 +1323,28 @@ static struct pcie_link_state *pcie_aspm_get_link(struct pci_dev *pdev)
return bridge->link_state;
}
+static u8 pci_calc_aspm_disable_mask(int state)
+{
+ state &= ~PCIE_LINK_STATE_CLKPM;
+
+ /* L1 PM substates require L1 */
+ if (state & PCIE_LINK_STATE_L1)
+ state |= PCIE_LINK_STATE_L1SS;
+
+ return state;
+}
+
+static u8 pci_calc_aspm_enable_mask(int state)
+{
+ state &= ~PCIE_LINK_STATE_CLKPM;
+
+ /* L1 PM substates require L1 */
+ if (state & PCIE_LINK_STATE_L1SS)
+ state |= PCIE_LINK_STATE_L1;
+
+ return state;
+}
+
static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool locked)
{
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
@@ -1346,19 +1367,7 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool locked
if (!locked)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- if (state & PCIE_LINK_STATE_L0S)
- link->aspm_disable |= ASPM_STATE_L0S;
- if (state & PCIE_LINK_STATE_L1)
- /* L1 PM substates require L1 */
- link->aspm_disable |= ASPM_STATE_L1 | ASPM_STATE_L1SS;
- if (state & PCIE_LINK_STATE_L1_1)
- link->aspm_disable |= ASPM_STATE_L1_1;
- if (state & PCIE_LINK_STATE_L1_2)
- link->aspm_disable |= ASPM_STATE_L1_2;
- if (state & PCIE_LINK_STATE_L1_1_PCIPM)
- link->aspm_disable |= ASPM_STATE_L1_1_PCIPM;
- if (state & PCIE_LINK_STATE_L1_2_PCIPM)
- link->aspm_disable |= ASPM_STATE_L1_2_PCIPM;
+ link->aspm_disable |= pci_calc_aspm_disable_mask(state);
pcie_config_aspm_link(link, policy_to_aspm_state(link));
if (state & PCIE_LINK_STATE_CLKPM)
@@ -1414,20 +1423,7 @@ static int __pci_enable_link_state(struct pci_dev *pdev, int state, bool locked)
if (!locked)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- link->aspm_default = 0;
- if (state & PCIE_LINK_STATE_L0S)
- link->aspm_default |= ASPM_STATE_L0S;
- if (state & PCIE_LINK_STATE_L1)
- link->aspm_default |= ASPM_STATE_L1;
- /* L1 PM substates require L1 */
- if (state & PCIE_LINK_STATE_L1_1)
- link->aspm_default |= ASPM_STATE_L1_1 | ASPM_STATE_L1;
- if (state & PCIE_LINK_STATE_L1_2)
- link->aspm_default |= ASPM_STATE_L1_2 | ASPM_STATE_L1;
- if (state & PCIE_LINK_STATE_L1_1_PCIPM)
- link->aspm_default |= ASPM_STATE_L1_1_PCIPM | ASPM_STATE_L1;
- if (state & PCIE_LINK_STATE_L1_2_PCIPM)
- link->aspm_default |= ASPM_STATE_L1_2_PCIPM | ASPM_STATE_L1;
+ link->aspm_default = pci_calc_aspm_enable_mask(state);
pcie_config_aspm_link(link, policy_to_aspm_state(link));
link->clkpm_default = (state & PCIE_LINK_STATE_CLKPM) ? 1 : 0;
@@ -1563,12 +1559,12 @@ static ssize_t aspm_attr_store_common(struct device *dev,
if (state_enable) {
link->aspm_disable &= ~state;
/* need to enable L1 for substates */
- if (state & ASPM_STATE_L1SS)
- link->aspm_disable &= ~ASPM_STATE_L1;
+ if (state & PCIE_LINK_STATE_L1SS)
+ link->aspm_disable &= ~PCIE_LINK_STATE_L1;
} else {
link->aspm_disable |= state;
- if (state & ASPM_STATE_L1)
- link->aspm_disable |= ASPM_STATE_L1SS;
+ if (state & PCIE_LINK_STATE_L1)
+ link->aspm_disable |= PCIE_LINK_STATE_L1SS;
}
pcie_config_aspm_link(link, policy_to_aspm_state(link));
@@ -1582,12 +1578,12 @@ static ssize_t aspm_attr_store_common(struct device *dev,
#define ASPM_ATTR(_f, _s) \
static ssize_t _f##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
-{ return aspm_attr_show_common(dev, attr, buf, ASPM_STATE_##_s); } \
+{ return aspm_attr_show_common(dev, attr, buf, PCIE_LINK_STATE_##_s); } \
\
static ssize_t _f##_store(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t len) \
-{ return aspm_attr_store_common(dev, attr, buf, len, ASPM_STATE_##_s); }
+{ return aspm_attr_store_common(dev, attr, buf, len, PCIE_LINK_STATE_##_s); }
ASPM_ATTR(l0s_aspm, L0S)
ASPM_ATTR(l1_aspm, L1)
@@ -1654,12 +1650,12 @@ static umode_t aspm_ctrl_attrs_are_visible(struct kobject *kobj,
struct pci_dev *pdev = to_pci_dev(dev);
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
static const u8 aspm_state_map[] = {
- ASPM_STATE_L0S,
- ASPM_STATE_L1,
- ASPM_STATE_L1_1,
- ASPM_STATE_L1_2,
- ASPM_STATE_L1_1_PCIPM,
- ASPM_STATE_L1_2_PCIPM,
+ PCIE_LINK_STATE_L0S,
+ PCIE_LINK_STATE_L1,
+ PCIE_LINK_STATE_L1_1,
+ PCIE_LINK_STATE_L1_2,
+ PCIE_LINK_STATE_L1_1_PCIPM,
+ PCIE_LINK_STATE_L1_2_PCIPM,
};
if (aspm_disabled || !link)
diff --git a/drivers/pci/pcie/edr.c b/drivers/pci/pcie/edr.c
index 5f4914d313a1..e86298dbbcff 100644
--- a/drivers/pci/pcie/edr.c
+++ b/drivers/pci/pcie/edr.c
@@ -32,10 +32,10 @@ static int acpi_enable_dpc(struct pci_dev *pdev)
int status = 0;
/*
- * Behavior when calling unsupported _DSM functions is undefined,
- * so check whether EDR_PORT_DPC_ENABLE_DSM is supported.
+ * Per PCI Firmware r3.3, sec 4.6.12, EDR_PORT_DPC_ENABLE_DSM is
+ * optional. Return success if it's not implemented.
*/
- if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
+ if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 6,
1ULL << EDR_PORT_DPC_ENABLE_DSM))
return 0;
@@ -46,12 +46,7 @@ static int acpi_enable_dpc(struct pci_dev *pdev)
argv4.package.count = 1;
argv4.package.elements = &req;
- /*
- * Per Downstream Port Containment Related Enhancements ECN to PCI
- * Firmware Specification r3.2, sec 4.6.12, EDR_PORT_DPC_ENABLE_DSM is
- * optional. Return success if it's not implemented.
- */
- obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
+ obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 6,
EDR_PORT_DPC_ENABLE_DSM, &argv4);
if (!obj)
return 0;
@@ -85,8 +80,9 @@ static struct pci_dev *acpi_dpc_port_get(struct pci_dev *pdev)
u16 port;
/*
- * Behavior when calling unsupported _DSM functions is undefined,
- * so check whether EDR_PORT_DPC_ENABLE_DSM is supported.
+ * If EDR_PORT_LOCATE_DSM is not implemented under the target of
+ * EDR, the target is the port that experienced the containment
+ * event (PCI Firmware r3.3, sec 4.6.13).
*/
if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
1ULL << EDR_PORT_LOCATE_DSM))
@@ -104,6 +100,16 @@ static struct pci_dev *acpi_dpc_port_get(struct pci_dev *pdev)
}
/*
+ * Bit 31 represents the success/failure of the operation. If bit
+ * 31 is set, the operation failed.
+ */
+ if (obj->integer.value & BIT(31)) {
+ ACPI_FREE(obj);
+ pci_err(pdev, "Locate Port _DSM failed\n");
+ return NULL;
+ }
+
+ /*
* Firmware returns DPC port BDF details in following format:
* 15:8 = bus
* 7:3 = device
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index 705893b5f7b0..31090770fffc 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -116,9 +116,7 @@ static int report_mmio_enabled(struct pci_dev *dev, void *data)
device_lock(&dev->dev);
pdrv = dev->driver;
- if (!pdrv ||
- !pdrv->err_handler ||
- !pdrv->err_handler->mmio_enabled)
+ if (!pdrv || !pdrv->err_handler || !pdrv->err_handler->mmio_enabled)
goto out;
err_handler = pdrv->err_handler;
@@ -137,9 +135,7 @@ static int report_slot_reset(struct pci_dev *dev, void *data)
device_lock(&dev->dev);
pdrv = dev->driver;
- if (!pdrv ||
- !pdrv->err_handler ||
- !pdrv->err_handler->slot_reset)
+ if (!pdrv || !pdrv->err_handler || !pdrv->err_handler->slot_reset)
goto out;
err_handler = pdrv->err_handler;
@@ -158,9 +154,7 @@ static int report_resume(struct pci_dev *dev, void *data)
device_lock(&dev->dev);
pdrv = dev->driver;
if (!pci_dev_set_io_state(dev, pci_channel_io_normal) ||
- !pdrv ||
- !pdrv->err_handler ||
- !pdrv->err_handler->resume)
+ !pdrv || !pdrv->err_handler || !pdrv->err_handler->resume)
goto out;
err_handler = pdrv->err_handler;
diff --git a/drivers/pci/pcie/portdrv.c b/drivers/pci/pcie/portdrv.c
index 14a4b89a3b83..bb65dfe43409 100644
--- a/drivers/pci/pcie/portdrv.c
+++ b/drivers/pci/pcie/portdrv.c
@@ -187,15 +187,15 @@ static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
* interrupt.
*/
if ((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi())
- goto legacy_irq;
+ goto intx_irq;
/* Try to use MSI-X or MSI if supported */
if (pcie_port_enable_irq_vec(dev, irqs, mask) == 0)
return 0;
-legacy_irq:
- /* fall back to legacy IRQ */
- ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
+intx_irq:
+ /* fall back to INTX IRQ */
+ ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_INTX);
if (ret < 0)
return -ENODEV;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 1325fbae2f28..8e696e547565 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -95,7 +95,7 @@ static void release_pcibus_dev(struct device *dev)
kfree(pci_bus);
}
-static struct class pcibus_class = {
+static const struct class pcibus_class = {
.name = "pci_bus",
.dev_release = &release_pcibus_dev,
.dev_groups = pcibus_groups,
@@ -1482,6 +1482,9 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
}
out:
+ /* Clear errors in the Secondary Status Register */
+ pci_write_config_word(dev, PCI_SEC_STATUS, 0xffff);
+
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
pm_runtime_put(&dev->dev);
@@ -2543,6 +2546,9 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
dev->dev.dma_mask = &dev->dma_mask;
dev->dev.dma_parms = &dev->dma_parms;
dev->dev.coherent_dma_mask = 0xffffffffull;
+ lockdep_register_key(&dev->cfg_access_key);
+ lockdep_init_map(&dev->cfg_access_lock, dev_name(&dev->dev),
+ &dev->cfg_access_key, 0);
dma_set_max_seg_size(&dev->dev, 65536);
dma_set_seg_boundary(&dev->dev, 0xffffffff);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index bf4833221816..568410e64ce6 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3766,14 +3766,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003e, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
/*
- * Apparently the LSI / Agere FW643 can't recover after a Secondary Bus
- * Reset and requires a power-off or suspend/resume and rescan. Prevent
- * use of that reset.
- */
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATT, 0x5900, quirk_no_bus_reset);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATT, 0x5901, quirk_no_bus_reset);
-
-/*
* Some TI KeyStone C667X devices do not support bus/hot reset. The PCIESS
* automatically disables LTSSM when Secondary Bus Reset is received and
* the device stops working. Prevent bus reset for these devices. With
@@ -6261,3 +6253,23 @@ static void pci_fixup_d3cold_delay_1sec(struct pci_dev *pdev)
pdev->d3cold_delay = 1000;
}
DECLARE_PCI_FIXUP_FINAL(0x5555, 0x0004, pci_fixup_d3cold_delay_1sec);
+
+#ifdef CONFIG_PCIEAER
+static void pci_mask_replay_timer_timeout(struct pci_dev *pdev)
+{
+ struct pci_dev *parent = pci_upstream_bridge(pdev);
+ u32 val;
+
+ if (!parent || !parent->aer_cap)
+ return;
+
+ pci_info(parent, "mask Replay Timer Timeout Correctable Errors due to %s hardware defect",
+ pci_name(pdev));
+
+ pci_read_config_dword(parent, parent->aer_cap + PCI_ERR_COR_MASK, &val);
+ val |= PCI_ERR_COR_REP_TIMER;
+ pci_write_config_dword(parent, parent->aer_cap + PCI_ERR_COR_MASK, val);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_GLI, 0x9750, pci_mask_replay_timer_timeout);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_GLI, 0x9755, pci_mask_replay_timer_timeout);
+#endif
diff --git a/drivers/perf/alibaba_uncore_drw_pmu.c b/drivers/perf/alibaba_uncore_drw_pmu.c
index a9277dcf90ce..89dd38343f93 100644
--- a/drivers/perf/alibaba_uncore_drw_pmu.c
+++ b/drivers/perf/alibaba_uncore_drw_pmu.c
@@ -709,6 +709,7 @@ static int ali_drw_pmu_probe(struct platform_device *pdev)
drw_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = &pdev->dev,
.task_ctx_nr = perf_invalid_context,
.event_init = ali_drw_pmu_event_init,
.add = ali_drw_pmu_add,
@@ -746,18 +747,14 @@ static int ali_drw_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
struct ali_drw_pmu_irq *irq;
struct ali_drw_pmu *drw_pmu;
unsigned int target;
- int ret;
- cpumask_t node_online_cpus;
irq = hlist_entry_safe(node, struct ali_drw_pmu_irq, node);
if (cpu != irq->cpu)
return 0;
- ret = cpumask_and(&node_online_cpus,
- cpumask_of_node(cpu_to_node(cpu)), cpu_online_mask);
- if (ret)
- target = cpumask_any_but(&node_online_cpus, cpu);
- else
+ target = cpumask_any_and_but(cpumask_of_node(cpu_to_node(cpu)),
+ cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
diff --git a/drivers/perf/amlogic/meson_ddr_pmu_core.c b/drivers/perf/amlogic/meson_ddr_pmu_core.c
index bbc7285fd934..07446d784a1a 100644
--- a/drivers/perf/amlogic/meson_ddr_pmu_core.c
+++ b/drivers/perf/amlogic/meson_ddr_pmu_core.c
@@ -492,6 +492,7 @@ int meson_ddr_pmu_create(struct platform_device *pdev)
*pmu = (struct ddr_pmu) {
.pmu = {
.module = THIS_MODULE,
+ .parent = &pdev->dev,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
.task_ctx_nr = perf_invalid_context,
.attr_groups = attr_groups,
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index 6be03f81ae5d..a7fd80677919 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -1409,6 +1409,7 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
cci_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = &pdev->dev,
.name = cci_pmu->model->name,
.task_ctx_nr = perf_invalid_context,
.pmu_enable = cci_pmu_enable,
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index 641471bd5eff..f4495ff6525f 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -1265,6 +1265,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
/* Perf driver registration */
ccn->dt.pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = ccn->dev,
.attr_groups = arm_ccn_pmu_attr_groups,
.task_ctx_nr = perf_invalid_context,
.event_init = arm_ccn_pmu_event_init,
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 7ef9c7e4836b..e26ad1d3ed0b 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -1950,20 +1950,20 @@ static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_no
struct arm_cmn *cmn;
unsigned int target;
int node;
- cpumask_t mask;
cmn = hlist_entry_safe(cpuhp_node, struct arm_cmn, cpuhp_node);
if (cpu != cmn->cpu)
return 0;
node = dev_to_node(cmn->dev);
- if (cpumask_and(&mask, cpumask_of_node(node), cpu_online_mask) &&
- cpumask_andnot(&mask, &mask, cpumask_of(cpu)))
- target = cpumask_any(&mask);
- else
+
+ target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
target = cpumask_any_but(cpu_online_mask, cpu);
+
if (target < nr_cpu_ids)
arm_cmn_migrate(cmn, target);
+
return 0;
}
@@ -2482,6 +2482,7 @@ static int arm_cmn_probe(struct platform_device *pdev)
cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev));
cmn->pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = cmn->dev,
.attr_groups = arm_cmn_attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
.task_ctx_nr = perf_invalid_context,
diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c
index b9a252272f1e..ba0cf2f466ef 100644
--- a/drivers/perf/arm_cspmu/arm_cspmu.c
+++ b/drivers/perf/arm_cspmu/arm_cspmu.c
@@ -1206,6 +1206,7 @@ static int arm_cspmu_register_pmu(struct arm_cspmu *cspmu)
cspmu->pmu = (struct pmu){
.task_ctx_nr = perf_invalid_context,
.module = cspmu->impl.module,
+ .parent = cspmu->dev,
.pmu_enable = arm_cspmu_enable,
.pmu_disable = arm_cspmu_disable,
.event_init = arm_cspmu_event_init,
@@ -1322,8 +1323,7 @@ static int arm_cspmu_cpu_online(unsigned int cpu, struct hlist_node *node)
static int arm_cspmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
{
- int dst;
- struct cpumask online_supported;
+ unsigned int dst;
struct arm_cspmu *cspmu =
hlist_entry_safe(node, struct arm_cspmu, cpuhp_node);
@@ -1333,9 +1333,8 @@ static int arm_cspmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
return 0;
/* Choose a new CPU to migrate ownership of the PMU to */
- cpumask_and(&online_supported, &cspmu->associated_cpus,
- cpu_online_mask);
- dst = cpumask_any_but(&online_supported, cpu);
+ dst = cpumask_any_and_but(&cspmu->associated_cpus,
+ cpu_online_mask, cpu);
if (dst >= nr_cpu_ids)
return 0;
diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
index 8a81be2dd5ec..2ec96e204c40 100644
--- a/drivers/perf/arm_dmc620_pmu.c
+++ b/drivers/perf/arm_dmc620_pmu.c
@@ -673,6 +673,7 @@ static int dmc620_pmu_device_probe(struct platform_device *pdev)
dmc620_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = &pdev->dev,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
.task_ctx_nr = perf_invalid_context,
.event_init = dmc620_pmu_event_init,
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
index bae3ca37f846..92248a24a1aa 100644
--- a/drivers/perf/arm_dsu_pmu.c
+++ b/drivers/perf/arm_dsu_pmu.c
@@ -230,15 +230,6 @@ static const struct attribute_group *dsu_pmu_attr_groups[] = {
NULL,
};
-static int dsu_pmu_get_online_cpu_any_but(struct dsu_pmu *dsu_pmu, int cpu)
-{
- struct cpumask online_supported;
-
- cpumask_and(&online_supported,
- &dsu_pmu->associated_cpus, cpu_online_mask);
- return cpumask_any_but(&online_supported, cpu);
-}
-
static inline bool dsu_pmu_counter_valid(struct dsu_pmu *dsu_pmu, u32 idx)
{
return (idx < dsu_pmu->num_counters) ||
@@ -751,6 +742,7 @@ static int dsu_pmu_device_probe(struct platform_device *pdev)
dsu_pmu->pmu = (struct pmu) {
.task_ctx_nr = perf_invalid_context,
+ .parent = &pdev->dev,
.module = THIS_MODULE,
.pmu_enable = dsu_pmu_enable,
.pmu_disable = dsu_pmu_disable,
@@ -827,14 +819,16 @@ static int dsu_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
static int dsu_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
{
- int dst;
- struct dsu_pmu *dsu_pmu = hlist_entry_safe(node, struct dsu_pmu,
- cpuhp_node);
+ struct dsu_pmu *dsu_pmu;
+ unsigned int dst;
+
+ dsu_pmu = hlist_entry_safe(node, struct dsu_pmu, cpuhp_node);
if (!cpumask_test_and_clear_cpu(cpu, &dsu_pmu->active_cpu))
return 0;
- dst = dsu_pmu_get_online_cpu_any_but(dsu_pmu, cpu);
+ dst = cpumask_any_and_but(&dsu_pmu->associated_cpus,
+ cpu_online_mask, cpu);
/* If there are no active CPUs in the DSU, leave IRQ disabled */
if (dst >= nr_cpu_ids)
return 0;
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 3596db36cbff..4b1a9a92ea11 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -196,6 +196,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
if (!pmu)
return -ENOMEM;
+ pmu->pmu.parent = &pdev->dev;
pmu->plat_device = pdev;
ret = pmu_parse_irqs(pmu);
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index 719aa953a1c4..d5fa92ba8373 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -860,6 +860,7 @@ static int smmu_pmu_probe(struct platform_device *pdev)
smmu_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = &pdev->dev,
.task_ctx_nr = perf_invalid_context,
.pmu_enable = smmu_pmu_enable,
.pmu_disable = smmu_pmu_disable,
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 35f0de03416f..9100d82bfabc 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -932,6 +932,7 @@ static int arm_spe_pmu_perf_init(struct arm_spe_pmu *spe_pmu)
spe_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = &spe_pmu->pdev->dev,
.capabilities = PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE,
.attr_groups = arm_spe_pmu_attr_groups,
/*
diff --git a/drivers/perf/cxl_pmu.c b/drivers/perf/cxl_pmu.c
index 308c9969642e..a1b742b1a735 100644
--- a/drivers/perf/cxl_pmu.c
+++ b/drivers/perf/cxl_pmu.c
@@ -345,7 +345,7 @@ static ssize_t cxl_pmu_event_sysfs_show(struct device *dev,
/* For CXL spec defined events */
#define CXL_PMU_EVENT_CXL_ATTR(_name, _gid, _msk) \
- CXL_PMU_EVENT_ATTR(_name, PCI_DVSEC_VENDOR_ID_CXL, _gid, _msk)
+ CXL_PMU_EVENT_ATTR(_name, PCI_VENDOR_ID_CXL, _gid, _msk)
static struct attribute *cxl_pmu_event_attrs[] = {
CXL_PMU_EVENT_CXL_ATTR(clock_ticks, CXL_PMU_GID_CLOCK_TICKS, BIT(0)),
diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c
index 957058ad0099..c5e328f23841 100644
--- a/drivers/perf/dwc_pcie_pmu.c
+++ b/drivers/perf/dwc_pcie_pmu.c
@@ -690,9 +690,8 @@ static int dwc_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_n
{
struct dwc_pcie_pmu *pcie_pmu;
struct pci_dev *pdev;
- int node;
- cpumask_t mask;
unsigned int target;
+ int node;
pcie_pmu = hlist_entry_safe(cpuhp_node, struct dwc_pcie_pmu, cpuhp_node);
/* Nothing to do if this CPU doesn't own the PMU */
@@ -702,10 +701,9 @@ static int dwc_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_n
pcie_pmu->on_cpu = -1;
pdev = pcie_pmu->pdev;
node = dev_to_node(&pdev->dev);
- if (cpumask_and(&mask, cpumask_of_node(node), cpu_online_mask) &&
- cpumask_andnot(&mask, &mask, cpumask_of(cpu)))
- target = cpumask_any(&mask);
- else
+
+ target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids) {
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 4e8fa5a48fcf..1bbdb29743c4 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -651,6 +651,7 @@ static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
*pmu = (struct ddr_pmu) {
.pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = dev,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
.task_ctx_nr = perf_invalid_context,
.attr_groups = attr_groups,
diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c
index 5d1f0e9fdb08..03c506aa3853 100644
--- a/drivers/perf/hisilicon/hisi_pcie_pmu.c
+++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c
@@ -350,15 +350,27 @@ static bool hisi_pcie_pmu_validate_event_group(struct perf_event *event)
return false;
for (num = 0; num < counters; num++) {
+ /*
+ * If we find a related event, then it's a valid group
+ * since we don't need to allocate a new counter for it.
+ */
if (hisi_pcie_pmu_cmp_event(event_group[num], sibling))
break;
}
+ /*
+ * Otherwise it's a new event but if there's no available counter,
+ * fail the check since we cannot schedule all the events in
+ * the group simultaneously.
+ */
+ if (num == HISI_PCIE_MAX_COUNTERS)
+ return false;
+
if (num == counters)
event_group[counters++] = sibling;
}
- return counters <= HISI_PCIE_MAX_COUNTERS;
+ return true;
}
static int hisi_pcie_pmu_event_init(struct perf_event *event)
@@ -673,7 +685,6 @@ static int hisi_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct hisi_pcie_pmu *pcie_pmu = hlist_entry_safe(node, struct hisi_pcie_pmu, node);
unsigned int target;
- cpumask_t mask;
int numa_node;
/* Nothing to do if this CPU doesn't own the PMU */
@@ -684,10 +695,10 @@ static int hisi_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
/* Choose a local CPU from all online cpus. */
numa_node = dev_to_node(&pcie_pmu->pdev->dev);
- if (cpumask_and(&mask, cpumask_of_node(numa_node), cpu_online_mask) &&
- cpumask_andnot(&mask, &mask, cpumask_of(cpu)))
- target = cpumask_any(&mask);
- else
+
+ target = cpumask_any_and_but(cpumask_of_node(numa_node),
+ cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids) {
@@ -807,6 +818,7 @@ static int hisi_pcie_alloc_pmu(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_
pcie_pmu->pmu = (struct pmu) {
.name = name,
.module = THIS_MODULE,
+ .parent = &pdev->dev,
.event_init = hisi_pcie_pmu_event_init,
.pmu_enable = hisi_pcie_pmu_enable,
.pmu_disable = hisi_pcie_pmu_disable,
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index 04031450d5fe..a60e4c966098 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -504,7 +504,6 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu,
node);
- cpumask_t pmu_online_cpus;
unsigned int target;
if (!cpumask_test_and_clear_cpu(cpu, &hisi_pmu->associated_cpus))
@@ -518,9 +517,8 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
hisi_pmu->on_cpu = -1;
/* Choose a new CPU to migrate ownership of the PMU to */
- cpumask_and(&pmu_online_cpus, &hisi_pmu->associated_cpus,
- cpu_online_mask);
- target = cpumask_any_but(&pmu_online_cpus, cpu);
+ target = cpumask_any_and_but(&hisi_pmu->associated_cpus,
+ cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
return 0;
@@ -538,6 +536,7 @@ void hisi_pmu_init(struct hisi_pmu *hisi_pmu, struct module *module)
struct pmu *pmu = &hisi_pmu->pmu;
pmu->module = module;
+ pmu->parent = hisi_pmu->dev;
pmu->task_ctx_nr = perf_invalid_context;
pmu->event_init = hisi_uncore_pmu_event_init;
pmu->pmu_enable = hisi_uncore_pmu_enable;
diff --git a/drivers/perf/hisilicon/hns3_pmu.c b/drivers/perf/hisilicon/hns3_pmu.c
index 16869bf5bf4c..e900f8e00b18 100644
--- a/drivers/perf/hisilicon/hns3_pmu.c
+++ b/drivers/perf/hisilicon/hns3_pmu.c
@@ -1085,15 +1085,27 @@ static bool hns3_pmu_validate_event_group(struct perf_event *event)
return false;
for (num = 0; num < counters; num++) {
+ /*
+ * If we find a related event, then it's a valid group
+ * since we don't need to allocate a new counter for it.
+ */
if (hns3_pmu_cmp_event(event_group[num], sibling))
break;
}
+ /*
+ * Otherwise it's a new event but if there's no available counter,
+ * fail the check since we cannot schedule all the events in
+ * the group simultaneously.
+ */
+ if (num == HNS3_PMU_MAX_HW_EVENTS)
+ return false;
+
if (num == counters)
event_group[counters++] = sibling;
}
- return counters <= HNS3_PMU_MAX_HW_EVENTS;
+ return true;
}
static u32 hns3_pmu_get_filter_condition(struct perf_event *event)
@@ -1419,6 +1431,7 @@ static int hns3_pmu_alloc_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu)
hns3_pmu->pmu = (struct pmu) {
.name = name,
.module = THIS_MODULE,
+ .parent = &pdev->dev,
.event_init = hns3_pmu_event_init,
.pmu_enable = hns3_pmu_enable,
.pmu_disable = hns3_pmu_disable,
@@ -1515,7 +1528,7 @@ static int hns3_pmu_irq_register(struct pci_dev *pdev,
return ret;
}
- ret = devm_add_action(&pdev->dev, hns3_pmu_free_irq, pdev);
+ ret = devm_add_action_or_reset(&pdev->dev, hns3_pmu_free_irq, pdev);
if (ret) {
pci_err(pdev, "failed to add free irq action, ret = %d.\n", ret);
return ret;
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index 148df5ae8ef8..980e3051edd7 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -801,9 +801,8 @@ static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
static int l2cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
- struct cluster_pmu *cluster;
struct l2cache_pmu *l2cache_pmu;
- cpumask_t cluster_online_cpus;
+ struct cluster_pmu *cluster;
unsigned int target;
l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
@@ -820,9 +819,8 @@ static int l2cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
cluster->on_cpu = -1;
/* Any other CPU for this cluster which is still online */
- cpumask_and(&cluster_online_cpus, &cluster->cluster_cpus,
- cpu_online_mask);
- target = cpumask_any_but(&cluster_online_cpus, cpu);
+ target = cpumask_any_and_but(&cluster->cluster_cpus,
+ cpu_online_mask, cpu);
if (target >= nr_cpu_ids) {
disable_irq(cluster->irq);
return 0;
@@ -904,6 +902,7 @@ static int l2_cache_pmu_probe(struct platform_device *pdev)
l2cache_pmu->pmu = (struct pmu) {
/* suffix is instance id for future use with multiple sockets */
.name = "l2cache_0",
+ .parent = &pdev->dev,
.task_ctx_nr = perf_invalid_context,
.pmu_enable = l2_cache_pmu_enable,
.pmu_disable = l2_cache_pmu_disable,
diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c
index f16783d03db7..37786e88514e 100644
--- a/drivers/perf/qcom_l3_pmu.c
+++ b/drivers/perf/qcom_l3_pmu.c
@@ -748,6 +748,7 @@ static int qcom_l3_cache_pmu_probe(struct platform_device *pdev)
return -ENOMEM;
l3pmu->pmu = (struct pmu) {
+ .parent = &pdev->dev,
.task_ctx_nr = perf_invalid_context,
.pmu_enable = qcom_l3_cache__pmu_enable,
diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c
index c78a6fd6c57f..78c490e0505a 100644
--- a/drivers/perf/riscv_pmu.c
+++ b/drivers/perf/riscv_pmu.c
@@ -191,8 +191,6 @@ void riscv_pmu_stop(struct perf_event *event, int flags)
struct hw_perf_event *hwc = &event->hw;
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
- WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
-
if (!(hwc->state & PERF_HES_STOPPED)) {
if (rvpmu->ctr_stop) {
rvpmu->ctr_stop(event, 0);
@@ -313,6 +311,10 @@ static int riscv_pmu_event_init(struct perf_event *event)
u64 event_config = 0;
uint64_t cmask;
+ /* driver does not support branch stack sampling */
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
hwc->flags = 0;
mapped_event = rvpmu->event_map(event, &event_config);
if (mapped_event < 0) {
@@ -404,6 +406,7 @@ struct riscv_pmu *riscv_pmu_alloc(void)
cpuc->n_events = 0;
for (i = 0; i < RISCV_MAX_COUNTERS; i++)
cpuc->events[i] = NULL;
+ cpuc->snapshot_addr = NULL;
}
pmu->pmu = (struct pmu) {
.event_init = riscv_pmu_event_init,
diff --git a/drivers/perf/riscv_pmu_legacy.c b/drivers/perf/riscv_pmu_legacy.c
index fa0bccf4edf2..04487ad7fba0 100644
--- a/drivers/perf/riscv_pmu_legacy.c
+++ b/drivers/perf/riscv_pmu_legacy.c
@@ -136,6 +136,7 @@ static int pmu_legacy_device_probe(struct platform_device *pdev)
pmu = riscv_pmu_alloc();
if (!pmu)
return -ENOMEM;
+ pmu->pmu.parent = &pdev->dev;
pmu_legacy_init(pmu);
return 0;
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 8cbe6e5f9c39..a2e4005e1fd0 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -27,7 +27,7 @@
#define ALT_SBI_PMU_OVERFLOW(__ovl) \
asm volatile(ALTERNATIVE_2( \
- "csrr %0, " __stringify(CSR_SSCOUNTOVF), \
+ "csrr %0, " __stringify(CSR_SCOUNTOVF), \
"csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \
THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \
CONFIG_ERRATA_THEAD_PMU, \
@@ -57,6 +57,11 @@ asm volatile(ALTERNATIVE( \
PMU_FORMAT_ATTR(event, "config:0-47");
PMU_FORMAT_ATTR(firmware, "config:63");
+static bool sbi_v2_available;
+static DEFINE_STATIC_KEY_FALSE(sbi_pmu_snapshot_available);
+#define sbi_pmu_snapshot_available() \
+ static_branch_unlikely(&sbi_pmu_snapshot_available)
+
static struct attribute *riscv_arch_formats_attr[] = {
&format_attr_event.attr,
&format_attr_firmware.attr,
@@ -384,7 +389,7 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
cmask = 1;
} else if (event->attr.config == PERF_COUNT_HW_INSTRUCTIONS) {
cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH;
- cmask = 1UL << (CSR_INSTRET - CSR_CYCLE);
+ cmask = BIT(CSR_INSTRET - CSR_CYCLE);
}
}
@@ -506,24 +511,126 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
return ret;
}
+static void pmu_sbi_snapshot_free(struct riscv_pmu *pmu)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct cpu_hw_events *cpu_hw_evt = per_cpu_ptr(pmu->hw_events, cpu);
+
+ if (!cpu_hw_evt->snapshot_addr)
+ continue;
+
+ free_page((unsigned long)cpu_hw_evt->snapshot_addr);
+ cpu_hw_evt->snapshot_addr = NULL;
+ cpu_hw_evt->snapshot_addr_phys = 0;
+ }
+}
+
+static int pmu_sbi_snapshot_alloc(struct riscv_pmu *pmu)
+{
+ int cpu;
+ struct page *snapshot_page;
+
+ for_each_possible_cpu(cpu) {
+ struct cpu_hw_events *cpu_hw_evt = per_cpu_ptr(pmu->hw_events, cpu);
+
+ snapshot_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
+ if (!snapshot_page) {
+ pmu_sbi_snapshot_free(pmu);
+ return -ENOMEM;
+ }
+ cpu_hw_evt->snapshot_addr = page_to_virt(snapshot_page);
+ cpu_hw_evt->snapshot_addr_phys = page_to_phys(snapshot_page);
+ }
+
+ return 0;
+}
+
+static int pmu_sbi_snapshot_disable(void)
+{
+ struct sbiret ret;
+
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_SNAPSHOT_SET_SHMEM, SBI_SHMEM_DISABLE,
+ SBI_SHMEM_DISABLE, 0, 0, 0, 0);
+ if (ret.error) {
+ pr_warn("failed to disable snapshot shared memory\n");
+ return sbi_err_map_linux_errno(ret.error);
+ }
+
+ return 0;
+}
+
+static int pmu_sbi_snapshot_setup(struct riscv_pmu *pmu, int cpu)
+{
+ struct cpu_hw_events *cpu_hw_evt;
+ struct sbiret ret = {0};
+
+ cpu_hw_evt = per_cpu_ptr(pmu->hw_events, cpu);
+ if (!cpu_hw_evt->snapshot_addr_phys)
+ return -EINVAL;
+
+ if (cpu_hw_evt->snapshot_set_done)
+ return 0;
+
+ if (IS_ENABLED(CONFIG_32BIT))
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_SNAPSHOT_SET_SHMEM,
+ cpu_hw_evt->snapshot_addr_phys,
+ (u64)(cpu_hw_evt->snapshot_addr_phys) >> 32, 0, 0, 0, 0);
+ else
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_SNAPSHOT_SET_SHMEM,
+ cpu_hw_evt->snapshot_addr_phys, 0, 0, 0, 0, 0);
+
+ /* Free up the snapshot area memory and fall back to SBI PMU calls without snapshot */
+ if (ret.error) {
+ if (ret.error != SBI_ERR_NOT_SUPPORTED)
+ pr_warn("pmu snapshot setup failed with error %ld\n", ret.error);
+ return sbi_err_map_linux_errno(ret.error);
+ }
+
+ memset(cpu_hw_evt->snapshot_cval_shcopy, 0, sizeof(u64) * RISCV_MAX_COUNTERS);
+ cpu_hw_evt->snapshot_set_done = true;
+
+ return 0;
+}
+
static u64 pmu_sbi_ctr_read(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
struct sbiret ret;
- union sbi_pmu_ctr_info info;
u64 val = 0;
+ struct riscv_pmu *pmu = to_riscv_pmu(event->pmu);
+ struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
+ struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr;
+ union sbi_pmu_ctr_info info = pmu_ctr_list[idx];
+
+ /* Read the value from the shared memory directly only if counter is stopped */
+ if (sbi_pmu_snapshot_available() && (hwc->state & PERF_HES_STOPPED)) {
+ val = sdata->ctr_values[idx];
+ return val;
+ }
if (pmu_sbi_is_fw_event(event)) {
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ,
hwc->idx, 0, 0, 0, 0, 0);
- if (!ret.error)
- val = ret.value;
+ if (ret.error)
+ return 0;
+
+ val = ret.value;
+ if (IS_ENABLED(CONFIG_32BIT) && sbi_v2_available && info.width >= 32) {
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ_HI,
+ hwc->idx, 0, 0, 0, 0, 0);
+ if (!ret.error)
+ val |= ((u64)ret.value << 32);
+ else
+ WARN_ONCE(1, "Unable to read upper 32 bits of firmware counter error: %ld\n",
+ ret.error);
+ }
} else {
- info = pmu_ctr_list[idx];
val = riscv_pmu_ctr_read_csr(info.csr);
if (IS_ENABLED(CONFIG_32BIT))
- val = ((u64)riscv_pmu_ctr_read_csr(info.csr + 0x80)) << 31 | val;
+ val |= ((u64)riscv_pmu_ctr_read_csr(info.csr + 0x80)) << 32;
}
return val;
@@ -553,6 +660,7 @@ static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
struct hw_perf_event *hwc = &event->hw;
unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE;
+ /* There is no benefit setting SNAPSHOT FLAG for a single counter */
#if defined(CONFIG_32BIT)
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx,
1, flag, ival, ival >> 32, 0);
@@ -573,16 +681,36 @@ static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
{
struct sbiret ret;
struct hw_perf_event *hwc = &event->hw;
+ struct riscv_pmu *pmu = to_riscv_pmu(event->pmu);
+ struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
+ struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr;
if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
(hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
pmu_sbi_reset_scounteren((void *)event);
+ if (sbi_pmu_snapshot_available())
+ flag |= SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT;
+
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0);
- if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) &&
- flag != SBI_PMU_STOP_FLAG_RESET)
+ if (!ret.error && sbi_pmu_snapshot_available()) {
+ /*
+ * The counter snapshot is based on the index base specified by hwc->idx.
+ * The actual counter value is updated in shared memory at index 0 when counter
+ * mask is 0x01. To ensure accurate counter values, it's necessary to transfer
+ * the counter value to shared memory. However, if hwc->idx is zero, the counter
+ * value is already correctly updated in shared memory, requiring no further
+ * adjustment.
+ */
+ if (hwc->idx > 0) {
+ sdata->ctr_values[hwc->idx] = sdata->ctr_values[0];
+ sdata->ctr_values[0] = 0;
+ }
+ } else if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) &&
+ flag != SBI_PMU_STOP_FLAG_RESET) {
pr_err("Stopping counter idx %d failed with error %d\n",
hwc->idx, sbi_err_map_linux_errno(ret.error));
+ }
}
static int pmu_sbi_find_num_ctrs(void)
@@ -640,10 +768,39 @@ static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
{
struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
+ struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr;
+ unsigned long flag = 0;
+ int i, idx;
+ struct sbiret ret;
+ u64 temp_ctr_overflow_mask = 0;
+
+ if (sbi_pmu_snapshot_available())
+ flag = SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT;
+
+ /* Reset the shadow copy to avoid save/restore any value from previous overflow */
+ memset(cpu_hw_evt->snapshot_cval_shcopy, 0, sizeof(u64) * RISCV_MAX_COUNTERS);
+
+ for (i = 0; i < BITS_TO_LONGS(RISCV_MAX_COUNTERS); i++) {
+ /* No need to check the error here as we can't do anything about the error */
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, i * BITS_PER_LONG,
+ cpu_hw_evt->used_hw_ctrs[i], flag, 0, 0, 0);
+ if (!ret.error && sbi_pmu_snapshot_available()) {
+ /* Save the counter values to avoid clobbering */
+ for_each_set_bit(idx, &cpu_hw_evt->used_hw_ctrs[i], BITS_PER_LONG)
+ cpu_hw_evt->snapshot_cval_shcopy[i * BITS_PER_LONG + idx] =
+ sdata->ctr_values[idx];
+ /* Save the overflow mask to avoid clobbering */
+ temp_ctr_overflow_mask |= sdata->ctr_overflow_mask << (i * BITS_PER_LONG);
+ }
+ }
- /* No need to check the error here as we can't do anything about the error */
- sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, 0,
- cpu_hw_evt->used_hw_ctrs[0], 0, 0, 0, 0);
+ /* Restore the counter values to the shared memory for used hw counters */
+ if (sbi_pmu_snapshot_available()) {
+ for_each_set_bit(idx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS)
+ sdata->ctr_values[idx] = cpu_hw_evt->snapshot_cval_shcopy[idx];
+ if (temp_ctr_overflow_mask)
+ sdata->ctr_overflow_mask = temp_ctr_overflow_mask;
+ }
}
/*
@@ -652,11 +809,10 @@ static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
* while the overflowed counters need to be started with updated initialization
* value.
*/
-static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
- unsigned long ctr_ovf_mask)
+static inline void pmu_sbi_start_ovf_ctrs_sbi(struct cpu_hw_events *cpu_hw_evt,
+ u64 ctr_ovf_mask)
{
- int idx = 0;
- struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
+ int idx = 0, i;
struct perf_event *event;
unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE;
unsigned long ctr_start_mask = 0;
@@ -664,11 +820,12 @@ static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
struct hw_perf_event *hwc;
u64 init_val = 0;
- ctr_start_mask = cpu_hw_evt->used_hw_ctrs[0] & ~ctr_ovf_mask;
-
- /* Start all the counters that did not overflow in a single shot */
- sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, 0, ctr_start_mask,
- 0, 0, 0, 0);
+ for (i = 0; i < BITS_TO_LONGS(RISCV_MAX_COUNTERS); i++) {
+ ctr_start_mask = cpu_hw_evt->used_hw_ctrs[i] & ~ctr_ovf_mask;
+ /* Start all the counters that did not overflow in a single shot */
+ sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, i * BITS_PER_LONG, ctr_start_mask,
+ 0, 0, 0, 0);
+ }
/* Reinitialize and start all the counter that overflowed */
while (ctr_ovf_mask) {
@@ -691,6 +848,52 @@ static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
}
}
+static inline void pmu_sbi_start_ovf_ctrs_snapshot(struct cpu_hw_events *cpu_hw_evt,
+ u64 ctr_ovf_mask)
+{
+ int i, idx = 0;
+ struct perf_event *event;
+ unsigned long flag = SBI_PMU_START_FLAG_INIT_SNAPSHOT;
+ u64 max_period, init_val = 0;
+ struct hw_perf_event *hwc;
+ struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr;
+
+ for_each_set_bit(idx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) {
+ if (ctr_ovf_mask & BIT(idx)) {
+ event = cpu_hw_evt->events[idx];
+ hwc = &event->hw;
+ max_period = riscv_pmu_ctr_get_width_mask(event);
+ init_val = local64_read(&hwc->prev_count) & max_period;
+ cpu_hw_evt->snapshot_cval_shcopy[idx] = init_val;
+ }
+ /*
+ * We do not need to update the non-overflow counters the previous
+ * value should have been there already.
+ */
+ }
+
+ for (i = 0; i < BITS_TO_LONGS(RISCV_MAX_COUNTERS); i++) {
+ /* Restore the counter values to relative indices for used hw counters */
+ for_each_set_bit(idx, &cpu_hw_evt->used_hw_ctrs[i], BITS_PER_LONG)
+ sdata->ctr_values[idx] =
+ cpu_hw_evt->snapshot_cval_shcopy[idx + i * BITS_PER_LONG];
+ /* Start all the counters in a single shot */
+ sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx * BITS_PER_LONG,
+ cpu_hw_evt->used_hw_ctrs[i], flag, 0, 0, 0);
+ }
+}
+
+static void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
+ u64 ctr_ovf_mask)
+{
+ struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
+
+ if (sbi_pmu_snapshot_available())
+ pmu_sbi_start_ovf_ctrs_snapshot(cpu_hw_evt, ctr_ovf_mask);
+ else
+ pmu_sbi_start_ovf_ctrs_sbi(cpu_hw_evt, ctr_ovf_mask);
+}
+
static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
{
struct perf_sample_data data;
@@ -700,10 +903,11 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
int lidx, hidx, fidx;
struct riscv_pmu *pmu;
struct perf_event *event;
- unsigned long overflow;
- unsigned long overflowed_ctrs = 0;
+ u64 overflow;
+ u64 overflowed_ctrs = 0;
struct cpu_hw_events *cpu_hw_evt = dev;
u64 start_clock = sched_clock();
+ struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr;
if (WARN_ON_ONCE(!cpu_hw_evt))
return IRQ_NONE;
@@ -725,7 +929,10 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
pmu_sbi_stop_hw_ctrs(pmu);
/* Overflow status register should only be read after counter are stopped */
- ALT_SBI_PMU_OVERFLOW(overflow);
+ if (sbi_pmu_snapshot_available())
+ overflow = sdata->ctr_overflow_mask;
+ else
+ ALT_SBI_PMU_OVERFLOW(overflow);
/*
* Overflow interrupt pending bit should only be cleared after stopping
@@ -751,9 +958,14 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
if (!info || info->type != SBI_PMU_CTR_TYPE_HW)
continue;
- /* compute hardware counter index */
- hidx = info->csr - CSR_CYCLE;
- /* check if the corresponding bit is set in sscountovf */
+ if (sbi_pmu_snapshot_available())
+ /* SBI implementation already updated the logical indicies */
+ hidx = lidx;
+ else
+ /* compute hardware counter index */
+ hidx = info->csr - CSR_CYCLE;
+
+ /* check if the corresponding bit is set in sscountovf or overflow mask in shmem */
if (!(overflow & BIT(hidx)))
continue;
@@ -763,7 +975,10 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
*/
overflowed_ctrs |= BIT(lidx);
hw_evt = &event->hw;
+ /* Update the event states here so that we know the state while reading */
+ hw_evt->state |= PERF_HES_STOPPED;
riscv_pmu_event_update(event);
+ hw_evt->state |= PERF_HES_UPTODATE;
perf_sample_data_init(&data, 0, hw_evt->last_period);
if (riscv_pmu_event_set_period(event)) {
/*
@@ -776,6 +991,8 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
*/
perf_event_overflow(event, &data, regs);
}
+ /* Reset the state as we are going to start the counter after the loop */
+ hw_evt->state = 0;
}
pmu_sbi_start_overflow_mask(pmu, overflowed_ctrs);
@@ -807,6 +1024,9 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE);
}
+ if (sbi_pmu_snapshot_available())
+ return pmu_sbi_snapshot_setup(pmu, cpu);
+
return 0;
}
@@ -819,6 +1039,9 @@ static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node)
/* Disable all counters access for user mode now */
csr_write(CSR_SCOUNTEREN, 0x0);
+ if (sbi_pmu_snapshot_available())
+ return pmu_sbi_snapshot_disable();
+
return 0;
}
@@ -927,6 +1150,12 @@ static inline void riscv_pm_pmu_unregister(struct riscv_pmu *pmu) { }
static void riscv_pmu_destroy(struct riscv_pmu *pmu)
{
+ if (sbi_v2_available) {
+ if (sbi_pmu_snapshot_available()) {
+ pmu_sbi_snapshot_disable();
+ pmu_sbi_snapshot_free(pmu);
+ }
+ }
riscv_pm_pmu_unregister(pmu);
cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
}
@@ -1043,7 +1272,6 @@ static struct ctl_table sbi_pmu_sysctl_table[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_TWO,
},
- { }
};
static int pmu_sbi_device_probe(struct platform_device *pdev)
@@ -1081,6 +1309,7 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
}
pmu->pmu.attr_groups = riscv_pmu_attr_groups;
+ pmu->pmu.parent = &pdev->dev;
pmu->cmask = cmask;
pmu->ctr_start = pmu_sbi_ctr_start;
pmu->ctr_stop = pmu_sbi_ctr_stop;
@@ -1094,10 +1323,6 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
pmu->event_unmapped = pmu_sbi_event_unmapped;
pmu->csr_index = pmu_sbi_csr_index;
- ret = cpuhp_state_add_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
- if (ret)
- return ret;
-
ret = riscv_pm_pmu_register(pmu);
if (ret)
goto out_unregister;
@@ -1106,8 +1331,34 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
if (ret)
goto out_unregister;
+ /* SBI PMU Snapsphot is only available in SBI v2.0 */
+ if (sbi_v2_available) {
+ ret = pmu_sbi_snapshot_alloc(pmu);
+ if (ret)
+ goto out_unregister;
+
+ ret = pmu_sbi_snapshot_setup(pmu, smp_processor_id());
+ if (ret) {
+ /* Snapshot is an optional feature. Continue if not available */
+ pmu_sbi_snapshot_free(pmu);
+ } else {
+ pr_info("SBI PMU snapshot detected\n");
+ /*
+ * We enable it once here for the boot cpu. If snapshot shmem setup
+ * fails during cpu hotplug process, it will fail to start the cpu
+ * as we can not handle hetergenous PMUs with different snapshot
+ * capability.
+ */
+ static_branch_enable(&sbi_pmu_snapshot_available);
+ }
+ }
+
register_sysctl("kernel", sbi_pmu_sysctl_table);
+ ret = cpuhp_state_add_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
+ if (ret)
+ goto out_unregister;
+
return 0;
out_unregister:
@@ -1135,6 +1386,9 @@ static int __init pmu_sbi_devinit(void)
return 0;
}
+ if (sbi_spec_version >= sbi_mk_version(2, 0))
+ sbi_v2_available = true;
+
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_RISCV_STARTING,
"perf/riscv/pmu:starting",
pmu_sbi_starting_cpu, pmu_sbi_dying_cpu);
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
index e16d10c763de..faf763d2c95c 100644
--- a/drivers/perf/thunderx2_pmu.c
+++ b/drivers/perf/thunderx2_pmu.c
@@ -504,24 +504,19 @@ static void tx2_uncore_event_update(struct perf_event *event)
static enum tx2_uncore_type get_tx2_pmu_type(struct acpi_device *adev)
{
- int i = 0;
- struct acpi_tx2_pmu_device {
- __u8 id[ACPI_ID_LEN];
- enum tx2_uncore_type type;
- } devices[] = {
+ struct acpi_device_id devices[] = {
{"CAV901D", PMU_TYPE_L3C},
{"CAV901F", PMU_TYPE_DMC},
{"CAV901E", PMU_TYPE_CCPI2},
- {"", PMU_TYPE_INVALID}
+ {}
};
+ const struct acpi_device_id *id;
- while (devices[i].type != PMU_TYPE_INVALID) {
- if (!strcmp(acpi_device_hid(adev), devices[i].id))
- break;
- i++;
- }
+ id = acpi_match_acpi_device(devices, adev);
+ if (!id)
+ return PMU_TYPE_INVALID;
- return devices[i].type;
+ return (enum tx2_uncore_type)id->driver_data;
}
static bool tx2_uncore_validate_event(struct pmu *pmu,
@@ -729,6 +724,7 @@ static int tx2_uncore_pmu_register(
/* Perf event registration */
tx2_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = tx2_pmu->dev,
.attr_groups = tx2_pmu->attr_groups,
.task_ctx_nr = perf_invalid_context,
.event_init = tx2_uncore_event_init,
@@ -932,9 +928,8 @@ static int tx2_uncore_pmu_online_cpu(unsigned int cpu,
static int tx2_uncore_pmu_offline_cpu(unsigned int cpu,
struct hlist_node *hpnode)
{
- int new_cpu;
struct tx2_uncore_pmu *tx2_pmu;
- struct cpumask cpu_online_mask_temp;
+ unsigned int new_cpu;
tx2_pmu = hlist_entry_safe(hpnode,
struct tx2_uncore_pmu, hpnode);
@@ -945,11 +940,8 @@ static int tx2_uncore_pmu_offline_cpu(unsigned int cpu,
if (tx2_pmu->hrtimer_callback)
hrtimer_cancel(&tx2_pmu->hrtimer);
- cpumask_copy(&cpu_online_mask_temp, cpu_online_mask);
- cpumask_clear_cpu(cpu, &cpu_online_mask_temp);
- new_cpu = cpumask_any_and(
- cpumask_of_node(tx2_pmu->node),
- &cpu_online_mask_temp);
+ new_cpu = cpumask_any_and_but(cpumask_of_node(tx2_pmu->node),
+ cpu_online_mask, cpu);
tx2_pmu->cpu = new_cpu;
if (new_cpu >= nr_cpu_ids)
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index 0d49343d704b..8823b4c6b556 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -1102,6 +1102,7 @@ static int xgene_init_perf(struct xgene_pmu_dev *pmu_dev, char *name)
/* Perf driver registration */
pmu_dev->pmu = (struct pmu) {
+ .parent = pmu_dev->parent->dev,
.attr_groups = pmu_dev->attr_groups,
.task_ctx_nr = perf_invalid_context,
.pmu_enable = xgene_perf_pmu_enable,
diff --git a/drivers/phy/freescale/Kconfig b/drivers/phy/freescale/Kconfig
index 853958fb2c06..45aaaea14fb4 100644
--- a/drivers/phy/freescale/Kconfig
+++ b/drivers/phy/freescale/Kconfig
@@ -35,6 +35,12 @@ config PHY_FSL_IMX8M_PCIE
Enable this to add support for the PCIE PHY as found on
i.MX8M family of SOCs.
+config PHY_FSL_SAMSUNG_HDMI_PHY
+ tristate "Samsung HDMI PHY support"
+ depends on OF && HAS_IOMEM && COMMON_CLK
+ help
+ Enable this to add support for the Samsung HDMI PHY in i.MX8MP.
+
endif
config PHY_FSL_LYNX_28G
diff --git a/drivers/phy/freescale/Makefile b/drivers/phy/freescale/Makefile
index cedb328bc4d2..c4386bfdb853 100644
--- a/drivers/phy/freescale/Makefile
+++ b/drivers/phy/freescale/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_PHY_MIXEL_LVDS_PHY) += phy-fsl-imx8qm-lvds-phy.o
obj-$(CONFIG_PHY_MIXEL_MIPI_DPHY) += phy-fsl-imx8-mipi-dphy.o
obj-$(CONFIG_PHY_FSL_IMX8M_PCIE) += phy-fsl-imx8m-pcie.o
obj-$(CONFIG_PHY_FSL_LYNX_28G) += phy-fsl-lynx-28g.o
+obj-$(CONFIG_PHY_FSL_SAMSUNG_HDMI_PHY) += phy-fsl-samsung-hdmi.o
diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
index b700f52b7b67..11fcb1867118 100644
--- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
+++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
@@ -110,8 +110,10 @@ static int imx8_pcie_phy_power_on(struct phy *phy)
/* Source clock from SoC internal PLL */
writel(ANA_PLL_CLK_OUT_TO_EXT_IO_SEL,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG062);
- writel(AUX_PLL_REFCLK_SEL_SYS_PLL,
- imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG063);
+ if (imx8_phy->drvdata->variant != IMX8MM) {
+ writel(AUX_PLL_REFCLK_SEL_SYS_PLL,
+ imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG063);
+ }
val = ANA_AUX_RX_TX_SEL_TX | ANA_AUX_TX_TERM;
writel(val | ANA_AUX_RX_TERM_GND_EN,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG064);
diff --git a/drivers/phy/freescale/phy-fsl-samsung-hdmi.c b/drivers/phy/freescale/phy-fsl-samsung-hdmi.c
new file mode 100644
index 000000000000..9048cdc760c2
--- /dev/null
+++ b/drivers/phy/freescale/phy-fsl-samsung-hdmi.c
@@ -0,0 +1,718 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2020 NXP
+ * Copyright 2022 Pengutronix, Lucas Stach <kernel@pengutronix.de>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#define PHY_REG_00 0x00
+#define PHY_REG_01 0x04
+#define PHY_REG_02 0x08
+#define PHY_REG_08 0x20
+#define PHY_REG_09 0x24
+#define PHY_REG_10 0x28
+#define PHY_REG_11 0x2c
+
+#define PHY_REG_12 0x30
+#define REG12_CK_DIV_MASK GENMASK(5, 4)
+
+#define PHY_REG_13 0x34
+#define REG13_TG_CODE_LOW_MASK GENMASK(7, 0)
+
+#define PHY_REG_14 0x38
+#define REG14_TOL_MASK GENMASK(7, 4)
+#define REG14_RP_CODE_MASK GENMASK(3, 1)
+#define REG14_TG_CODE_HIGH_MASK GENMASK(0, 0)
+
+#define PHY_REG_15 0x3c
+#define PHY_REG_16 0x40
+#define PHY_REG_17 0x44
+#define PHY_REG_18 0x48
+#define PHY_REG_19 0x4c
+#define PHY_REG_20 0x50
+
+#define PHY_REG_21 0x54
+#define REG21_SEL_TX_CK_INV BIT(7)
+#define REG21_PMS_S_MASK GENMASK(3, 0)
+
+#define PHY_REG_22 0x58
+#define PHY_REG_23 0x5c
+#define PHY_REG_24 0x60
+#define PHY_REG_25 0x64
+#define PHY_REG_26 0x68
+#define PHY_REG_27 0x6c
+#define PHY_REG_28 0x70
+#define PHY_REG_29 0x74
+#define PHY_REG_30 0x78
+#define PHY_REG_31 0x7c
+#define PHY_REG_32 0x80
+
+/*
+ * REG33 does not match the ref manual. According to Sandor Yu from NXP,
+ * "There is a doc issue on the i.MX8MP latest RM"
+ * REG33 is being used per guidance from Sandor
+ */
+
+#define PHY_REG_33 0x84
+#define REG33_MODE_SET_DONE BIT(7)
+#define REG33_FIX_DA BIT(1)
+
+#define PHY_REG_34 0x88
+#define REG34_PHY_READY BIT(7)
+#define REG34_PLL_LOCK BIT(6)
+#define REG34_PHY_CLK_READY BIT(5)
+
+#define PHY_REG_35 0x8c
+#define PHY_REG_36 0x90
+#define PHY_REG_37 0x94
+#define PHY_REG_38 0x98
+#define PHY_REG_39 0x9c
+#define PHY_REG_40 0xa0
+#define PHY_REG_41 0xa4
+#define PHY_REG_42 0xa8
+#define PHY_REG_43 0xac
+#define PHY_REG_44 0xb0
+#define PHY_REG_45 0xb4
+#define PHY_REG_46 0xb8
+#define PHY_REG_47 0xbc
+
+#define PHY_PLL_DIV_REGS_NUM 6
+
+struct phy_config {
+ u32 pixclk;
+ u8 pll_div_regs[PHY_PLL_DIV_REGS_NUM];
+};
+
+static const struct phy_config phy_pll_cfg[] = {
+ {
+ .pixclk = 22250000,
+ .pll_div_regs = { 0x4b, 0xf1, 0x89, 0x88, 0x80, 0x40 },
+ }, {
+ .pixclk = 23750000,
+ .pll_div_regs = { 0x50, 0xf1, 0x86, 0x85, 0x80, 0x40 },
+ }, {
+ .pixclk = 24000000,
+ .pll_div_regs = { 0x50, 0xf0, 0x00, 0x00, 0x80, 0x00 },
+ }, {
+ .pixclk = 24024000,
+ .pll_div_regs = { 0x50, 0xf1, 0x99, 0x02, 0x80, 0x40 },
+ }, {
+ .pixclk = 25175000,
+ .pll_div_regs = { 0x54, 0xfc, 0xcc, 0x91, 0x80, 0x40 },
+ }, {
+ .pixclk = 25200000,
+ .pll_div_regs = { 0x54, 0xf0, 0x00, 0x00, 0x80, 0x00 },
+ }, {
+ .pixclk = 26750000,
+ .pll_div_regs = { 0x5a, 0xf2, 0x89, 0x88, 0x80, 0x40 },
+ }, {
+ .pixclk = 27000000,
+ .pll_div_regs = { 0x5a, 0xf0, 0x00, 0x00, 0x80, 0x00 },
+ }, {
+ .pixclk = 27027000,
+ .pll_div_regs = { 0x5a, 0xf2, 0xfd, 0x0c, 0x80, 0x40 },
+ }, {
+ .pixclk = 29500000,
+ .pll_div_regs = { 0x62, 0xf4, 0x95, 0x08, 0x80, 0x40 },
+ }, {
+ .pixclk = 30750000,
+ .pll_div_regs = { 0x66, 0xf4, 0x82, 0x01, 0x88, 0x45 },
+ }, {
+ .pixclk = 30888000,
+ .pll_div_regs = { 0x66, 0xf4, 0x99, 0x18, 0x88, 0x45 },
+ }, {
+ .pixclk = 33750000,
+ .pll_div_regs = { 0x70, 0xf4, 0x82, 0x01, 0x80, 0x40 },
+ }, {
+ .pixclk = 35000000,
+ .pll_div_regs = { 0x58, 0xb8, 0x8b, 0x88, 0x80, 0x40 },
+ }, {
+ .pixclk = 36000000,
+ .pll_div_regs = { 0x5a, 0xb0, 0x00, 0x00, 0x80, 0x00 },
+ }, {
+ .pixclk = 36036000,
+ .pll_div_regs = { 0x5a, 0xb2, 0xfd, 0x0c, 0x80, 0x40 },
+ }, {
+ .pixclk = 40000000,
+ .pll_div_regs = { 0x64, 0xb0, 0x00, 0x00, 0x80, 0x00 },
+ }, {
+ .pixclk = 43200000,
+ .pll_div_regs = { 0x5a, 0x90, 0x00, 0x00, 0x80, 0x00 },
+ }, {
+ .pixclk = 43243200,
+ .pll_div_regs = { 0x5a, 0x92, 0xfd, 0x0c, 0x80, 0x40 },
+ }, {
+ .pixclk = 44500000,
+ .pll_div_regs = { 0x5c, 0x92, 0x98, 0x11, 0x84, 0x41 },
+ }, {
+ .pixclk = 47000000,
+ .pll_div_regs = { 0x62, 0x94, 0x95, 0x82, 0x80, 0x40 },
+ }, {
+ .pixclk = 47500000,
+ .pll_div_regs = { 0x63, 0x96, 0xa1, 0x82, 0x80, 0x40 },
+ }, {
+ .pixclk = 50349650,
+ .pll_div_regs = { 0x54, 0x7c, 0xc3, 0x8f, 0x80, 0x40 },
+ }, {
+ .pixclk = 50400000,
+ .pll_div_regs = { 0x54, 0x70, 0x00, 0x00, 0x80, 0x00 },
+ }, {
+ .pixclk = 53250000,
+ .pll_div_regs = { 0x58, 0x72, 0x84, 0x03, 0x82, 0x41 },
+ }, {
+ .pixclk = 53500000,
+ .pll_div_regs = { 0x5a, 0x72, 0x89, 0x88, 0x80, 0x40 },
+ }, {
+ .pixclk = 54000000,
+ .pll_div_regs = { 0x5a, 0x70, 0x00, 0x00, 0x80, 0x00 },
+ }, {
+ .pixclk = 54054000,
+ .pll_div_regs = { 0x5a, 0x72, 0xfd, 0x0c, 0x80, 0x40 },
+ }, {
+ .pixclk = 59000000,
+ .pll_div_regs = { 0x62, 0x74, 0x95, 0x08, 0x80, 0x40 },
+ }, {
+ .pixclk = 59340659,
+ .pll_div_regs = { 0x62, 0x74, 0xdb, 0x52, 0x88, 0x47 },
+ }, {
+ .pixclk = 59400000,
+ .pll_div_regs = { 0x63, 0x70, 0x00, 0x00, 0x80, 0x00 },
+ }, {
+ .pixclk = 61500000,
+ .pll_div_regs = { 0x66, 0x74, 0x82, 0x01, 0x88, 0x45 },
+ }, {
+ .pixclk = 63500000,
+ .pll_div_regs = { 0x69, 0x74, 0x89, 0x08, 0x80, 0x40 },
+ }, {
+ .pixclk = 67500000,
+ .pll_div_regs = { 0x54, 0x52, 0x87, 0x03, 0x80, 0x40 },
+ }, {
+ .pixclk = 70000000,
+ .pll_div_regs = { 0x58, 0x58, 0x8b, 0x88, 0x80, 0x40 },
+ }, {
+ .pixclk = 72000000,
+ .pll_div_regs = { 0x5a, 0x50, 0x00, 0x00, 0x80, 0x00 },
+ }, {
+ .pixclk = 72072000,
+ .pll_div_regs = { 0x5a, 0x52, 0xfd, 0x0c, 0x80, 0x40 },
+ }, {
+ .pixclk = 74176000,
+ .pll_div_regs = { 0x5d, 0x58, 0xdb, 0xA2, 0x88, 0x41 },
+ }, {
+ .pixclk = 74250000,
+ .pll_div_regs = { 0x5c, 0x52, 0x90, 0x0d, 0x84, 0x41 },
+ }, {
+ .pixclk = 78500000,
+ .pll_div_regs = { 0x62, 0x54, 0x87, 0x01, 0x80, 0x40 },
+ }, {
+ .pixclk = 80000000,
+ .pll_div_regs = { 0x64, 0x50, 0x00, 0x00, 0x80, 0x00 },
+ }, {
+ .pixclk = 82000000,
+ .pll_div_regs = { 0x66, 0x54, 0x82, 0x01, 0x88, 0x45 },
+ }, {
+ .pixclk = 82500000,
+ .pll_div_regs = { 0x67, 0x54, 0x88, 0x01, 0x90, 0x49 },
+ }, {
+ .pixclk = 89000000,
+ .pll_div_regs = { 0x70, 0x54, 0x84, 0x83, 0x80, 0x40 },
+ }, {
+ .pixclk = 90000000,
+ .pll_div_regs = { 0x70, 0x54, 0x82, 0x01, 0x80, 0x40 },
+ }, {
+ .pixclk = 94000000,
+ .pll_div_regs = { 0x4e, 0x32, 0xa7, 0x10, 0x80, 0x40 },
+ }, {
+ .pixclk = 95000000,
+ .pll_div_regs = { 0x50, 0x31, 0x86, 0x85, 0x80, 0x40 },
+ }, {
+ .pixclk = 98901099,
+ .pll_div_regs = { 0x52, 0x3a, 0xdb, 0x4c, 0x88, 0x47 },
+ }, {
+ .pixclk = 99000000,
+ .pll_div_regs = { 0x52, 0x32, 0x82, 0x01, 0x88, 0x47 },
+ }, {
+ .pixclk = 100699300,
+ .pll_div_regs = { 0x54, 0x3c, 0xc3, 0x8f, 0x80, 0x40 },
+ }, {
+ .pixclk = 100800000,
+ .pll_div_regs = { 0x54, 0x30, 0x00, 0x00, 0x80, 0x00 },
+ }, {
+ .pixclk = 102500000,
+ .pll_div_regs = { 0x55, 0x32, 0x8c, 0x05, 0x90, 0x4b },
+ }, {
+ .pixclk = 104750000,
+ .pll_div_regs = { 0x57, 0x32, 0x98, 0x07, 0x90, 0x49 },
+ }, {
+ .pixclk = 106500000,
+ .pll_div_regs = { 0x58, 0x32, 0x84, 0x03, 0x82, 0x41 },
+ }, {
+ .pixclk = 107000000,
+ .pll_div_regs = { 0x5a, 0x32, 0x89, 0x88, 0x80, 0x40 },
+ }, {
+ .pixclk = 108000000,
+ .pll_div_regs = { 0x5a, 0x30, 0x00, 0x00, 0x80, 0x00 },
+ }, {
+ .pixclk = 108108000,
+ .pll_div_regs = { 0x5a, 0x32, 0xfd, 0x0c, 0x80, 0x40 },
+ }, {
+ .pixclk = 118000000,
+ .pll_div_regs = { 0x62, 0x34, 0x95, 0x08, 0x80, 0x40 },
+ }, {
+ .pixclk = 118800000,
+ .pll_div_regs = { 0x63, 0x30, 0x00, 0x00, 0x80, 0x00 },
+ }, {
+ .pixclk = 123000000,
+ .pll_div_regs = { 0x66, 0x34, 0x82, 0x01, 0x88, 0x45 },
+ }, {
+ .pixclk = 127000000,
+ .pll_div_regs = { 0x69, 0x34, 0x89, 0x08, 0x80, 0x40 },
+ }, {
+ .pixclk = 135000000,
+ .pll_div_regs = { 0x70, 0x34, 0x82, 0x01, 0x80, 0x40 },
+ }, {
+ .pixclk = 135580000,
+ .pll_div_regs = { 0x71, 0x39, 0xe9, 0x82, 0x9c, 0x5b },
+ }, {
+ .pixclk = 137520000,
+ .pll_div_regs = { 0x72, 0x38, 0x99, 0x10, 0x85, 0x41 },
+ }, {
+ .pixclk = 138750000,
+ .pll_div_regs = { 0x73, 0x35, 0x88, 0x05, 0x90, 0x4d },
+ }, {
+ .pixclk = 140000000,
+ .pll_div_regs = { 0x75, 0x36, 0xa7, 0x90, 0x80, 0x40 },
+ }, {
+ .pixclk = 144000000,
+ .pll_div_regs = { 0x78, 0x30, 0x00, 0x00, 0x80, 0x00 },
+ }, {
+ .pixclk = 148352000,
+ .pll_div_regs = { 0x7b, 0x35, 0xdb, 0x39, 0x90, 0x45 },
+ }, {
+ .pixclk = 148500000,
+ .pll_div_regs = { 0x7b, 0x35, 0x84, 0x03, 0x90, 0x45 },
+ }, {
+ .pixclk = 154000000,
+ .pll_div_regs = { 0x40, 0x18, 0x83, 0x01, 0x00, 0x40 },
+ }, {
+ .pixclk = 157000000,
+ .pll_div_regs = { 0x41, 0x11, 0xa7, 0x14, 0x80, 0x40 },
+ }, {
+ .pixclk = 160000000,
+ .pll_div_regs = { 0x42, 0x12, 0xa1, 0x20, 0x80, 0x40 },
+ }, {
+ .pixclk = 162000000,
+ .pll_div_regs = { 0x43, 0x18, 0x8b, 0x08, 0x96, 0x55 },
+ }, {
+ .pixclk = 164000000,
+ .pll_div_regs = { 0x45, 0x11, 0x83, 0x82, 0x90, 0x4b },
+ }, {
+ .pixclk = 165000000,
+ .pll_div_regs = { 0x45, 0x11, 0x84, 0x81, 0x90, 0x4b },
+ }, {
+ .pixclk = 180000000,
+ .pll_div_regs = { 0x4b, 0x10, 0x00, 0x00, 0x80, 0x00 },
+ }, {
+ .pixclk = 185625000,
+ .pll_div_regs = { 0x4e, 0x12, 0x9a, 0x95, 0x80, 0x40 },
+ }, {
+ .pixclk = 188000000,
+ .pll_div_regs = { 0x4e, 0x12, 0xa7, 0x10, 0x80, 0x40 },
+ }, {
+ .pixclk = 198000000,
+ .pll_div_regs = { 0x52, 0x12, 0x82, 0x01, 0x88, 0x47 },
+ }, {
+ .pixclk = 205000000,
+ .pll_div_regs = { 0x55, 0x12, 0x8c, 0x05, 0x90, 0x4b },
+ }, {
+ .pixclk = 209500000,
+ .pll_div_regs = { 0x57, 0x12, 0x98, 0x07, 0x90, 0x49 },
+ }, {
+ .pixclk = 213000000,
+ .pll_div_regs = { 0x58, 0x12, 0x84, 0x03, 0x82, 0x41 },
+ }, {
+ .pixclk = 216000000,
+ .pll_div_regs = { 0x5a, 0x10, 0x00, 0x00, 0x80, 0x00 },
+ }, {
+ .pixclk = 216216000,
+ .pll_div_regs = { 0x5a, 0x12, 0xfd, 0x0c, 0x80, 0x40 },
+ }, {
+ .pixclk = 237600000,
+ .pll_div_regs = { 0x63, 0x10, 0x00, 0x00, 0x80, 0x00 },
+ }, {
+ .pixclk = 254000000,
+ .pll_div_regs = { 0x69, 0x14, 0x89, 0x08, 0x80, 0x40 },
+ }, {
+ .pixclk = 277500000,
+ .pll_div_regs = { 0x73, 0x15, 0x88, 0x05, 0x90, 0x4d },
+ }, {
+ .pixclk = 288000000,
+ .pll_div_regs = { 0x78, 0x10, 0x00, 0x00, 0x80, 0x00 },
+ }, {
+ .pixclk = 297000000,
+ .pll_div_regs = { 0x7b, 0x15, 0x84, 0x03, 0x90, 0x45 },
+ },
+};
+
+struct reg_settings {
+ u8 reg;
+ u8 val;
+};
+
+static const struct reg_settings common_phy_cfg[] = {
+ { PHY_REG_00, 0x00 }, { PHY_REG_01, 0xd1 },
+ { PHY_REG_08, 0x4f }, { PHY_REG_09, 0x30 },
+ { PHY_REG_10, 0x33 }, { PHY_REG_11, 0x65 },
+ /* REG12 pixclk specific */
+ /* REG13 pixclk specific */
+ /* REG14 pixclk specific */
+ { PHY_REG_15, 0x80 }, { PHY_REG_16, 0x6c },
+ { PHY_REG_17, 0xf2 }, { PHY_REG_18, 0x67 },
+ { PHY_REG_19, 0x00 }, { PHY_REG_20, 0x10 },
+ /* REG21 pixclk specific */
+ { PHY_REG_22, 0x30 }, { PHY_REG_23, 0x32 },
+ { PHY_REG_24, 0x60 }, { PHY_REG_25, 0x8f },
+ { PHY_REG_26, 0x00 }, { PHY_REG_27, 0x00 },
+ { PHY_REG_28, 0x08 }, { PHY_REG_29, 0x00 },
+ { PHY_REG_30, 0x00 }, { PHY_REG_31, 0x00 },
+ { PHY_REG_32, 0x00 }, { PHY_REG_33, 0x80 },
+ { PHY_REG_34, 0x00 }, { PHY_REG_35, 0x00 },
+ { PHY_REG_36, 0x00 }, { PHY_REG_37, 0x00 },
+ { PHY_REG_38, 0x00 }, { PHY_REG_39, 0x00 },
+ { PHY_REG_40, 0x00 }, { PHY_REG_41, 0xe0 },
+ { PHY_REG_42, 0x83 }, { PHY_REG_43, 0x0f },
+ { PHY_REG_44, 0x3E }, { PHY_REG_45, 0xf8 },
+ { PHY_REG_46, 0x00 }, { PHY_REG_47, 0x00 }
+};
+
+struct fsl_samsung_hdmi_phy {
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *apbclk;
+ struct clk *refclk;
+
+ /* clk provider */
+ struct clk_hw hw;
+ const struct phy_config *cur_cfg;
+};
+
+static inline struct fsl_samsung_hdmi_phy *
+to_fsl_samsung_hdmi_phy(struct clk_hw *hw)
+{
+ return container_of(hw, struct fsl_samsung_hdmi_phy, hw);
+}
+
+static void
+fsl_samsung_hdmi_phy_configure_pixclk(struct fsl_samsung_hdmi_phy *phy,
+ const struct phy_config *cfg)
+{
+ u8 div = 0x1;
+
+ switch (cfg->pixclk) {
+ case 22250000 ... 33750000:
+ div = 0xf;
+ break;
+ case 35000000 ... 40000000:
+ div = 0xb;
+ break;
+ case 43200000 ... 47500000:
+ div = 0x9;
+ break;
+ case 50349650 ... 63500000:
+ div = 0x7;
+ break;
+ case 67500000 ... 90000000:
+ div = 0x5;
+ break;
+ case 94000000 ... 148500000:
+ div = 0x3;
+ break;
+ case 154000000 ... 297000000:
+ div = 0x1;
+ break;
+ }
+
+ writeb(REG21_SEL_TX_CK_INV | FIELD_PREP(REG21_PMS_S_MASK, div),
+ phy->regs + PHY_REG_21);
+}
+
+static void
+fsl_samsung_hdmi_phy_configure_pll_lock_det(struct fsl_samsung_hdmi_phy *phy,
+ const struct phy_config *cfg)
+{
+ u32 pclk = cfg->pixclk;
+ u32 fld_tg_code;
+ u32 pclk_khz;
+ u8 div = 1;
+
+ switch (cfg->pixclk) {
+ case 22250000 ... 47500000:
+ div = 1;
+ break;
+ case 50349650 ... 99000000:
+ div = 2;
+ break;
+ case 100699300 ... 198000000:
+ div = 4;
+ break;
+ case 205000000 ... 297000000:
+ div = 8;
+ break;
+ }
+
+ writeb(FIELD_PREP(REG12_CK_DIV_MASK, ilog2(div)), phy->regs + PHY_REG_12);
+
+ /*
+ * Calculation for the frequency lock detector target code (fld_tg_code)
+ * is based on reference manual register description of PHY_REG13
+ * (13.10.3.1.14.2):
+ * 1st) Calculate int_pllclk which is determinded by FLD_CK_DIV
+ * 2nd) Increase resolution to avoid rounding issues
+ * 3th) Do the div (256 / Freq. of int_pllclk) * 24
+ * 4th) Reduce the resolution and always round up since the NXP
+ * settings rounding up always too. TODO: Check if that is
+ * correct.
+ */
+ pclk /= div;
+ pclk_khz = pclk / 1000;
+ fld_tg_code = 256 * 1000 * 1000 / pclk_khz * 24;
+ fld_tg_code = DIV_ROUND_UP(fld_tg_code, 1000);
+
+ /* FLD_TOL and FLD_RP_CODE taken from downstream driver */
+ writeb(FIELD_PREP(REG13_TG_CODE_LOW_MASK, fld_tg_code),
+ phy->regs + PHY_REG_13);
+ writeb(FIELD_PREP(REG14_TOL_MASK, 2) |
+ FIELD_PREP(REG14_RP_CODE_MASK, 2) |
+ FIELD_PREP(REG14_TG_CODE_HIGH_MASK, fld_tg_code >> 8),
+ phy->regs + PHY_REG_14);
+}
+
+static int fsl_samsung_hdmi_phy_configure(struct fsl_samsung_hdmi_phy *phy,
+ const struct phy_config *cfg)
+{
+ int i, ret;
+ u8 val;
+
+ /* HDMI PHY init */
+ writeb(REG33_FIX_DA, phy->regs + PHY_REG_33);
+
+ /* common PHY registers */
+ for (i = 0; i < ARRAY_SIZE(common_phy_cfg); i++)
+ writeb(common_phy_cfg[i].val, phy->regs + common_phy_cfg[i].reg);
+
+ /* set individual PLL registers PHY_REG2 ... PHY_REG7 */
+ for (i = 0; i < PHY_PLL_DIV_REGS_NUM; i++)
+ writeb(cfg->pll_div_regs[i], phy->regs + PHY_REG_02 + i * 4);
+
+ fsl_samsung_hdmi_phy_configure_pixclk(phy, cfg);
+ fsl_samsung_hdmi_phy_configure_pll_lock_det(phy, cfg);
+
+ writeb(REG33_FIX_DA | REG33_MODE_SET_DONE, phy->regs + PHY_REG_33);
+
+ ret = readb_poll_timeout(phy->regs + PHY_REG_34, val,
+ val & REG34_PLL_LOCK, 50, 20000);
+ if (ret)
+ dev_err(phy->dev, "PLL failed to lock\n");
+
+ return ret;
+}
+
+static unsigned long phy_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct fsl_samsung_hdmi_phy *phy = to_fsl_samsung_hdmi_phy(hw);
+
+ if (!phy->cur_cfg)
+ return 74250000;
+
+ return phy->cur_cfg->pixclk;
+}
+
+static long phy_clk_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *parent_rate)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(phy_pll_cfg) - 1; i >= 0; i--)
+ if (phy_pll_cfg[i].pixclk <= rate)
+ return phy_pll_cfg[i].pixclk;
+
+ return -EINVAL;
+}
+
+static int phy_clk_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct fsl_samsung_hdmi_phy *phy = to_fsl_samsung_hdmi_phy(hw);
+ int i;
+
+ for (i = ARRAY_SIZE(phy_pll_cfg) - 1; i >= 0; i--)
+ if (phy_pll_cfg[i].pixclk <= rate)
+ break;
+
+ if (i < 0)
+ return -EINVAL;
+
+ phy->cur_cfg = &phy_pll_cfg[i];
+
+ return fsl_samsung_hdmi_phy_configure(phy, phy->cur_cfg);
+}
+
+static const struct clk_ops phy_clk_ops = {
+ .recalc_rate = phy_clk_recalc_rate,
+ .round_rate = phy_clk_round_rate,
+ .set_rate = phy_clk_set_rate,
+};
+
+static int phy_clk_register(struct fsl_samsung_hdmi_phy *phy)
+{
+ struct device *dev = phy->dev;
+ struct device_node *np = dev->of_node;
+ struct clk_init_data init;
+ const char *parent_name;
+ struct clk *phyclk;
+ int ret;
+
+ parent_name = __clk_get_name(phy->refclk);
+
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = 0;
+ init.name = "hdmi_pclk";
+ init.ops = &phy_clk_ops;
+
+ phy->hw.init = &init;
+
+ phyclk = devm_clk_register(dev, &phy->hw);
+ if (IS_ERR(phyclk))
+ return dev_err_probe(dev, PTR_ERR(phyclk),
+ "failed to register clock\n");
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, phyclk);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to register clock provider\n");
+
+ return 0;
+}
+
+static int fsl_samsung_hdmi_phy_probe(struct platform_device *pdev)
+{
+ struct fsl_samsung_hdmi_phy *phy;
+ int ret;
+
+ phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, phy);
+ phy->dev = &pdev->dev;
+
+ phy->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(phy->regs))
+ return PTR_ERR(phy->regs);
+
+ phy->apbclk = devm_clk_get(phy->dev, "apb");
+ if (IS_ERR(phy->apbclk))
+ return dev_err_probe(phy->dev, PTR_ERR(phy->apbclk),
+ "failed to get apb clk\n");
+
+ phy->refclk = devm_clk_get(phy->dev, "ref");
+ if (IS_ERR(phy->refclk))
+ return dev_err_probe(phy->dev, PTR_ERR(phy->refclk),
+ "failed to get ref clk\n");
+
+ ret = clk_prepare_enable(phy->apbclk);
+ if (ret) {
+ dev_err(phy->dev, "failed to enable apbclk\n");
+ return ret;
+ }
+
+ pm_runtime_get_noresume(phy->dev);
+ pm_runtime_set_active(phy->dev);
+ pm_runtime_enable(phy->dev);
+
+ ret = phy_clk_register(phy);
+ if (ret) {
+ dev_err(&pdev->dev, "register clk failed\n");
+ goto register_clk_failed;
+ }
+
+ pm_runtime_put(phy->dev);
+
+ return 0;
+
+register_clk_failed:
+ clk_disable_unprepare(phy->apbclk);
+
+ return ret;
+}
+
+static void fsl_samsung_hdmi_phy_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+}
+
+static int __maybe_unused fsl_samsung_hdmi_phy_suspend(struct device *dev)
+{
+ struct fsl_samsung_hdmi_phy *phy = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(phy->apbclk);
+
+ return 0;
+}
+
+static int __maybe_unused fsl_samsung_hdmi_phy_resume(struct device *dev)
+{
+ struct fsl_samsung_hdmi_phy *phy = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = clk_prepare_enable(phy->apbclk);
+ if (ret) {
+ dev_err(phy->dev, "failed to enable apbclk\n");
+ return ret;
+ }
+
+ if (phy->cur_cfg)
+ ret = fsl_samsung_hdmi_phy_configure(phy, phy->cur_cfg);
+
+ return ret;
+
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(fsl_samsung_hdmi_phy_pm_ops,
+ fsl_samsung_hdmi_phy_suspend,
+ fsl_samsung_hdmi_phy_resume, NULL);
+
+static const struct of_device_id fsl_samsung_hdmi_phy_of_match[] = {
+ {
+ .compatible = "fsl,imx8mp-hdmi-phy",
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, fsl_samsung_hdmi_phy_of_match);
+
+static struct platform_driver fsl_samsung_hdmi_phy_driver = {
+ .probe = fsl_samsung_hdmi_phy_probe,
+ .remove_new = fsl_samsung_hdmi_phy_remove,
+ .driver = {
+ .name = "fsl-samsung-hdmi-phy",
+ .of_match_table = fsl_samsung_hdmi_phy_of_match,
+ .pm = pm_ptr(&fsl_samsung_hdmi_phy_pm_ops),
+ },
+};
+module_platform_driver(fsl_samsung_hdmi_phy_driver);
+
+MODULE_AUTHOR("Sandor Yu <Sandor.yu@nxp.com>");
+MODULE_DESCRIPTION("SAMSUNG HDMI 2.0 Transmitter PHY Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
index 41162d7228c9..1d1db1737422 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
@@ -603,7 +603,7 @@ static void comphy_gbe_phy_init(struct mvebu_a3700_comphy_lane *lane,
u16 val;
fix_idx = 0;
- for (addr = 0; addr < 512; addr++) {
+ for (addr = 0; addr < ARRAY_SIZE(gbe_phy_init); addr++) {
/*
* All PHY register values are defined in full for 3.125Gbps
* SERDES speed. The values required for 1.25 Gbps are almost
@@ -611,11 +611,12 @@ static void comphy_gbe_phy_init(struct mvebu_a3700_comphy_lane *lane,
* comparison to 3.125 Gbps values. These register values are
* stored in "gbe_phy_init_fix" array.
*/
- if (!is_1gbps && gbe_phy_init_fix[fix_idx].addr == addr) {
+ if (!is_1gbps &&
+ fix_idx < ARRAY_SIZE(gbe_phy_init_fix) &&
+ gbe_phy_init_fix[fix_idx].addr == addr) {
/* Use new value */
val = gbe_phy_init_fix[fix_idx].value;
- if (fix_idx < ARRAY_SIZE(gbe_phy_init_fix))
- fix_idx++;
+ fix_idx++;
} else {
val = gbe_phy_init[addr];
}
diff --git a/drivers/phy/mediatek/Kconfig b/drivers/phy/mediatek/Kconfig
index 3849b7c87d28..60e00057e8bc 100644
--- a/drivers/phy/mediatek/Kconfig
+++ b/drivers/phy/mediatek/Kconfig
@@ -13,6 +13,17 @@ config PHY_MTK_PCIE
callback for PCIe GEN3 port, it supports software efuse
initialization.
+config PHY_MTK_XFI_TPHY
+ tristate "MediaTek 10GE SerDes XFI T-PHY driver"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on OF
+ select GENERIC_PHY
+ help
+ Say 'Y' here to add support for MediaTek XFI T-PHY driver.
+ The driver provides access to the Ethernet SerDes T-PHY supporting
+ 1GE and 2.5GE modes via the LynxI PCS, and 5GE and 10GE modes
+ via the USXGMII PCS found in MediaTek SoCs with 10G Ethernet.
+
config PHY_MTK_TPHY
tristate "MediaTek T-PHY Driver"
depends on ARCH_MEDIATEK || COMPILE_TEST
diff --git a/drivers/phy/mediatek/Makefile b/drivers/phy/mediatek/Makefile
index f6e24a47e081..1b8088df71e8 100644
--- a/drivers/phy/mediatek/Makefile
+++ b/drivers/phy/mediatek/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_PHY_MTK_PCIE) += phy-mtk-pcie.o
obj-$(CONFIG_PHY_MTK_TPHY) += phy-mtk-tphy.o
obj-$(CONFIG_PHY_MTK_UFS) += phy-mtk-ufs.o
obj-$(CONFIG_PHY_MTK_XSPHY) += phy-mtk-xsphy.o
+obj-$(CONFIG_PHY_MTK_XFI_TPHY) += phy-mtk-xfi-tphy.o
phy-mtk-hdmi-drv-y := phy-mtk-hdmi.o
phy-mtk-hdmi-drv-y += phy-mtk-hdmi-mt2701.o
diff --git a/drivers/phy/mediatek/phy-mtk-xfi-tphy.c b/drivers/phy/mediatek/phy-mtk-xfi-tphy.c
new file mode 100644
index 000000000000..1a0b7484f525
--- /dev/null
+++ b/drivers/phy/mediatek/phy-mtk-xfi-tphy.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MediaTek 10GE SerDes XFI T-PHY driver
+ *
+ * Copyright (c) 2024 Daniel Golle <daniel@makrotopia.org>
+ * Bc-bocun Chen <bc-bocun.chen@mediatek.com>
+ * based on mtk_usxgmii.c and mtk_sgmii.c found in MediaTek's SDK (GPL-2.0)
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Henry Yen <henry.yen@mediatek.com>
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/phy.h>
+#include <linux/phy/phy.h>
+
+#include "phy-mtk-io.h"
+
+#define MTK_XFI_TPHY_NUM_CLOCKS 2
+
+#define REG_DIG_GLB_70 0x0070
+#define XTP_PCS_RX_EQ_IN_PROGRESS(x) FIELD_PREP(GENMASK(25, 24), (x))
+#define XTP_PCS_MODE_MASK GENMASK(17, 16)
+#define XTP_PCS_MODE(x) FIELD_PREP(GENMASK(17, 16), (x))
+#define XTP_PCS_RST_B BIT(15)
+#define XTP_FRC_PCS_RST_B BIT(14)
+#define XTP_PCS_PWD_SYNC_MASK GENMASK(13, 12)
+#define XTP_PCS_PWD_SYNC(x) FIELD_PREP(XTP_PCS_PWD_SYNC_MASK, (x))
+#define XTP_PCS_PWD_ASYNC_MASK GENMASK(11, 10)
+#define XTP_PCS_PWD_ASYNC(x) FIELD_PREP(XTP_PCS_PWD_ASYNC_MASK, (x))
+#define XTP_FRC_PCS_PWD_ASYNC BIT(8)
+#define XTP_PCS_UPDT BIT(4)
+#define XTP_PCS_IN_FR_RG BIT(0)
+
+#define REG_DIG_GLB_F4 0x00f4
+#define XFI_DPHY_PCS_SEL BIT(0)
+#define XFI_DPHY_PCS_SEL_SGMII FIELD_PREP(XFI_DPHY_PCS_SEL, 1)
+#define XFI_DPHY_PCS_SEL_USXGMII FIELD_PREP(XFI_DPHY_PCS_SEL, 0)
+#define XFI_DPHY_AD_SGDT_FRC_EN BIT(5)
+
+#define REG_DIG_LN_TRX_40 0x3040
+#define XTP_LN_FRC_TX_DATA_EN BIT(29)
+#define XTP_LN_TX_DATA_EN BIT(28)
+
+#define REG_DIG_LN_TRX_B0 0x30b0
+#define XTP_LN_FRC_TX_MACCK_EN BIT(5)
+#define XTP_LN_TX_MACCK_EN BIT(4)
+
+#define REG_ANA_GLB_D0 0x90d0
+#define XTP_GLB_USXGMII_SEL_MASK GENMASK(3, 1)
+#define XTP_GLB_USXGMII_SEL(x) FIELD_PREP(GENMASK(3, 1), (x))
+#define XTP_GLB_USXGMII_EN BIT(0)
+
+/**
+ * struct mtk_xfi_tphy - run-time data of the XFI phy instance
+ * @base: IO memory area to access phy registers.
+ * @dev: Kernel device used to output prefixed debug info.
+ * @reset: Reset control corresponding to the phy instance.
+ * @clocks: All clocks required for the phy to operate.
+ * @da_war: Enables work-around for 10GBase-R mode.
+ */
+struct mtk_xfi_tphy {
+ void __iomem *base;
+ struct device *dev;
+ struct reset_control *reset;
+ struct clk_bulk_data clocks[MTK_XFI_TPHY_NUM_CLOCKS];
+ bool da_war;
+};
+
+/**
+ * mtk_xfi_tphy_setup() - Setup phy for specified interface mode.
+ * @xfi_tphy: XFI phy instance.
+ * @interface: Ethernet interface mode
+ *
+ * The setup function is the condensed result of combining the 5 functions which
+ * setup the phy in MediaTek's GPL licensed public SDK sources. They can be found
+ * in mtk_sgmii.c[1] as well as mtk_usxgmii.c[2].
+ *
+ * Many magic values have been replaced by register and bit definitions, however,
+ * that has not been possible in all cases. While the vendor driver uses a
+ * sequence of 32-bit writes, here we try to only modify the actually required
+ * bits.
+ *
+ * [1]: https://git01.mediatek.com/plugins/gitiles/openwrt/feeds/mtk-openwrt-feeds/+/b72d6cba92bf9e29fb035c03052fa1e86664a25b/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_sgmii.c
+ *
+ * [2]: https://git01.mediatek.com/plugins/gitiles/openwrt/feeds/mtk-openwrt-feeds/+/dec96a1d9b82cdcda4a56453fd0b453d4cab4b85/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+ */
+static void mtk_xfi_tphy_setup(struct mtk_xfi_tphy *xfi_tphy,
+ phy_interface_t interface)
+{
+ bool is_1g, is_2p5g, is_5g, is_10g, da_war, use_lynxi_pcs;
+
+ /* shorthands for specific clock speeds depending on interface mode */
+ is_1g = interface == PHY_INTERFACE_MODE_1000BASEX ||
+ interface == PHY_INTERFACE_MODE_SGMII;
+ is_2p5g = interface == PHY_INTERFACE_MODE_2500BASEX;
+ is_5g = interface == PHY_INTERFACE_MODE_5GBASER;
+ is_10g = interface == PHY_INTERFACE_MODE_10GBASER ||
+ interface == PHY_INTERFACE_MODE_USXGMII;
+
+ /* Is overriding 10GBase-R tuning value required? */
+ da_war = xfi_tphy->da_war && (interface == PHY_INTERFACE_MODE_10GBASER);
+
+ /* configure input mux to either
+ * - USXGMII PCS (64b/66b coding) for 5G/10G
+ * - LynxI PCS (8b/10b coding) for 1G/2.5G
+ */
+ use_lynxi_pcs = is_1g || is_2p5g;
+
+ dev_dbg(xfi_tphy->dev, "setting up for mode %s\n", phy_modes(interface));
+
+ /* Setup PLL setting */
+ mtk_phy_update_bits(xfi_tphy->base + 0x9024, 0x100000, is_10g ? 0x0 : 0x100000);
+ mtk_phy_update_bits(xfi_tphy->base + 0x2020, 0x202000, is_5g ? 0x202000 : 0x0);
+ mtk_phy_update_bits(xfi_tphy->base + 0x2030, 0x500, is_1g ? 0x0 : 0x500);
+ mtk_phy_update_bits(xfi_tphy->base + 0x2034, 0xa00, is_1g ? 0x0 : 0xa00);
+ mtk_phy_update_bits(xfi_tphy->base + 0x2040, 0x340000, is_1g ? 0x200000 : 0x140000);
+
+ /* Setup RXFE BW setting */
+ mtk_phy_update_bits(xfi_tphy->base + 0x50f0, 0xc10, is_1g ? 0x410 : is_5g ? 0x800 : 0x400);
+ mtk_phy_update_bits(xfi_tphy->base + 0x50e0, 0x4000, is_5g ? 0x0 : 0x4000);
+
+ /* Setup RX CDR setting */
+ mtk_phy_update_bits(xfi_tphy->base + 0x506c, 0x30000, is_5g ? 0x0 : 0x30000);
+ mtk_phy_update_bits(xfi_tphy->base + 0x5070, 0x670000, is_5g ? 0x620000 : 0x50000);
+ mtk_phy_update_bits(xfi_tphy->base + 0x5074, 0x180000, is_5g ? 0x180000 : 0x0);
+ mtk_phy_update_bits(xfi_tphy->base + 0x5078, 0xf000400, is_5g ? 0x8000000 :
+ 0x7000400);
+ mtk_phy_update_bits(xfi_tphy->base + 0x507c, 0x5000500, is_5g ? 0x4000400 :
+ 0x1000100);
+ mtk_phy_update_bits(xfi_tphy->base + 0x5080, 0x1410, is_1g ? 0x400 : is_5g ? 0x1010 : 0x0);
+ mtk_phy_update_bits(xfi_tphy->base + 0x5084, 0x30300, is_1g ? 0x30300 :
+ is_5g ? 0x30100 :
+ 0x100);
+ mtk_phy_update_bits(xfi_tphy->base + 0x5088, 0x60200, is_1g ? 0x20200 :
+ is_5g ? 0x40000 :
+ 0x20000);
+
+ /* Setting RXFE adaptation range setting */
+ mtk_phy_update_bits(xfi_tphy->base + 0x50e4, 0xc0000, is_5g ? 0x0 : 0xc0000);
+ mtk_phy_update_bits(xfi_tphy->base + 0x50e8, 0x40000, is_5g ? 0x0 : 0x40000);
+ mtk_phy_update_bits(xfi_tphy->base + 0x50ec, 0xa00, is_1g ? 0x200 : 0x800);
+ mtk_phy_update_bits(xfi_tphy->base + 0x50a8, 0xee0000, is_5g ? 0x800000 :
+ 0x6e0000);
+ mtk_phy_update_bits(xfi_tphy->base + 0x6004, 0x190000, is_5g ? 0x0 : 0x190000);
+
+ if (is_10g)
+ writel(0x01423342, xfi_tphy->base + 0x00f8);
+ else if (is_5g)
+ writel(0x00a132a1, xfi_tphy->base + 0x00f8);
+ else if (is_2p5g)
+ writel(0x009c329c, xfi_tphy->base + 0x00f8);
+ else
+ writel(0x00fa32fa, xfi_tphy->base + 0x00f8);
+
+ /* Force SGDT_OUT off and select PCS */
+ mtk_phy_update_bits(xfi_tphy->base + REG_DIG_GLB_F4,
+ XFI_DPHY_AD_SGDT_FRC_EN | XFI_DPHY_PCS_SEL,
+ XFI_DPHY_AD_SGDT_FRC_EN |
+ (use_lynxi_pcs ? XFI_DPHY_PCS_SEL_SGMII :
+ XFI_DPHY_PCS_SEL_USXGMII));
+
+ /* Force GLB_CKDET_OUT */
+ mtk_phy_set_bits(xfi_tphy->base + 0x0030, 0xc00);
+
+ /* Force AEQ on */
+ writel(XTP_PCS_RX_EQ_IN_PROGRESS(2) | XTP_PCS_PWD_SYNC(2) | XTP_PCS_PWD_ASYNC(2),
+ xfi_tphy->base + REG_DIG_GLB_70);
+
+ usleep_range(1, 5);
+ writel(XTP_LN_FRC_TX_DATA_EN, xfi_tphy->base + REG_DIG_LN_TRX_40);
+
+ /* Setup TX DA default value */
+ mtk_phy_update_bits(xfi_tphy->base + 0x30b0, 0x30, 0x20);
+ writel(0x00008a01, xfi_tphy->base + 0x3028);
+ writel(0x0000a884, xfi_tphy->base + 0x302c);
+ writel(0x00083002, xfi_tphy->base + 0x3024);
+
+ /* Setup RG default value */
+ if (use_lynxi_pcs) {
+ writel(0x00011110, xfi_tphy->base + 0x3010);
+ writel(0x40704000, xfi_tphy->base + 0x3048);
+ } else {
+ writel(0x00022220, xfi_tphy->base + 0x3010);
+ writel(0x0f020a01, xfi_tphy->base + 0x5064);
+ writel(0x06100600, xfi_tphy->base + 0x50b4);
+ if (interface == PHY_INTERFACE_MODE_USXGMII)
+ writel(0x40704000, xfi_tphy->base + 0x3048);
+ else
+ writel(0x47684100, xfi_tphy->base + 0x3048);
+ }
+
+ if (is_1g)
+ writel(0x0000c000, xfi_tphy->base + 0x3064);
+
+ /* Setup RX EQ initial value */
+ mtk_phy_update_bits(xfi_tphy->base + 0x3050, 0xa8000000,
+ (interface != PHY_INTERFACE_MODE_10GBASER) ? 0xa8000000 : 0x0);
+ mtk_phy_update_bits(xfi_tphy->base + 0x3054, 0xaa,
+ (interface != PHY_INTERFACE_MODE_10GBASER) ? 0xaa : 0x0);
+
+ if (!use_lynxi_pcs)
+ writel(0x00000f00, xfi_tphy->base + 0x306c);
+ else if (is_2p5g)
+ writel(0x22000f00, xfi_tphy->base + 0x306c);
+ else
+ writel(0x20200f00, xfi_tphy->base + 0x306c);
+
+ mtk_phy_update_bits(xfi_tphy->base + 0xa008, 0x10000, da_war ? 0x10000 : 0x0);
+
+ mtk_phy_update_bits(xfi_tphy->base + 0xa060, 0x50000, use_lynxi_pcs ? 0x50000 : 0x40000);
+
+ /* Setup PHYA speed */
+ mtk_phy_update_bits(xfi_tphy->base + REG_ANA_GLB_D0,
+ XTP_GLB_USXGMII_SEL_MASK | XTP_GLB_USXGMII_EN,
+ is_10g ? XTP_GLB_USXGMII_SEL(0) :
+ is_5g ? XTP_GLB_USXGMII_SEL(1) :
+ is_2p5g ? XTP_GLB_USXGMII_SEL(2) :
+ XTP_GLB_USXGMII_SEL(3));
+ mtk_phy_set_bits(xfi_tphy->base + REG_ANA_GLB_D0, XTP_GLB_USXGMII_EN);
+
+ /* Release reset */
+ mtk_phy_set_bits(xfi_tphy->base + REG_DIG_GLB_70,
+ XTP_PCS_RST_B | XTP_FRC_PCS_RST_B);
+ usleep_range(150, 500);
+
+ /* Switch to P0 */
+ mtk_phy_update_bits(xfi_tphy->base + REG_DIG_GLB_70,
+ XTP_PCS_IN_FR_RG |
+ XTP_FRC_PCS_PWD_ASYNC |
+ XTP_PCS_PWD_ASYNC_MASK |
+ XTP_PCS_PWD_SYNC_MASK |
+ XTP_PCS_UPDT,
+ XTP_PCS_IN_FR_RG |
+ XTP_FRC_PCS_PWD_ASYNC |
+ XTP_PCS_UPDT);
+ usleep_range(1, 5);
+
+ mtk_phy_clear_bits(xfi_tphy->base + REG_DIG_GLB_70, XTP_PCS_UPDT);
+ usleep_range(15, 50);
+
+ if (use_lynxi_pcs) {
+ /* Switch to Gen2 */
+ mtk_phy_update_bits(xfi_tphy->base + REG_DIG_GLB_70,
+ XTP_PCS_MODE_MASK | XTP_PCS_UPDT,
+ XTP_PCS_MODE(1) | XTP_PCS_UPDT);
+ } else {
+ /* Switch to Gen3 */
+ mtk_phy_update_bits(xfi_tphy->base + REG_DIG_GLB_70,
+ XTP_PCS_MODE_MASK | XTP_PCS_UPDT,
+ XTP_PCS_MODE(2) | XTP_PCS_UPDT);
+ }
+ usleep_range(1, 5);
+
+ mtk_phy_clear_bits(xfi_tphy->base + REG_DIG_GLB_70, XTP_PCS_UPDT);
+
+ usleep_range(100, 500);
+
+ /* Enable MAC CK */
+ mtk_phy_set_bits(xfi_tphy->base + REG_DIG_LN_TRX_B0, XTP_LN_TX_MACCK_EN);
+ mtk_phy_clear_bits(xfi_tphy->base + REG_DIG_GLB_F4, XFI_DPHY_AD_SGDT_FRC_EN);
+
+ /* Enable TX data */
+ mtk_phy_set_bits(xfi_tphy->base + REG_DIG_LN_TRX_40,
+ XTP_LN_FRC_TX_DATA_EN | XTP_LN_TX_DATA_EN);
+ usleep_range(400, 1000);
+}
+
+/**
+ * mtk_xfi_tphy_set_mode() - Setup phy for specified interface mode.
+ *
+ * @phy: Phy instance.
+ * @mode: Only PHY_MODE_ETHERNET is supported.
+ * @submode: An Ethernet interface mode.
+ *
+ * Validate selected mode and call function mtk_xfi_tphy_setup().
+ *
+ * Return:
+ * * %0 - OK
+ * * %-EINVAL - invalid mode
+ */
+static int mtk_xfi_tphy_set_mode(struct phy *phy, enum phy_mode mode, int
+ submode)
+{
+ struct mtk_xfi_tphy *xfi_tphy = phy_get_drvdata(phy);
+
+ if (mode != PHY_MODE_ETHERNET)
+ return -EINVAL;
+
+ switch (submode) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_5GBASER:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_USXGMII:
+ mtk_xfi_tphy_setup(xfi_tphy, submode);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * mtk_xfi_tphy_reset() - Reset the phy.
+ *
+ * @phy: Phy instance.
+ *
+ * Reset the phy using the external reset controller.
+ *
+ * Return:
+ * %0 - OK
+ */
+static int mtk_xfi_tphy_reset(struct phy *phy)
+{
+ struct mtk_xfi_tphy *xfi_tphy = phy_get_drvdata(phy);
+
+ reset_control_assert(xfi_tphy->reset);
+ usleep_range(100, 500);
+ reset_control_deassert(xfi_tphy->reset);
+ usleep_range(1, 10);
+
+ return 0;
+}
+
+/**
+ * mtk_xfi_tphy_power_on() - Power-on the phy.
+ *
+ * @phy: Phy instance.
+ *
+ * Prepare and enable all clocks required for the phy to operate.
+ *
+ * Return:
+ * See clk_bulk_prepare_enable().
+ */
+static int mtk_xfi_tphy_power_on(struct phy *phy)
+{
+ struct mtk_xfi_tphy *xfi_tphy = phy_get_drvdata(phy);
+
+ return clk_bulk_prepare_enable(MTK_XFI_TPHY_NUM_CLOCKS, xfi_tphy->clocks);
+}
+
+/**
+ * mtk_xfi_tphy_power_off() - Power-off the phy.
+ *
+ * @phy: Phy instance.
+ *
+ * Disable and unprepare all clocks previously enabled.
+ *
+ * Return:
+ * See clk_bulk_prepare_disable().
+ */
+static int mtk_xfi_tphy_power_off(struct phy *phy)
+{
+ struct mtk_xfi_tphy *xfi_tphy = phy_get_drvdata(phy);
+
+ clk_bulk_disable_unprepare(MTK_XFI_TPHY_NUM_CLOCKS, xfi_tphy->clocks);
+
+ return 0;
+}
+
+static const struct phy_ops mtk_xfi_tphy_ops = {
+ .power_on = mtk_xfi_tphy_power_on,
+ .power_off = mtk_xfi_tphy_power_off,
+ .set_mode = mtk_xfi_tphy_set_mode,
+ .reset = mtk_xfi_tphy_reset,
+ .owner = THIS_MODULE,
+};
+
+/**
+ * mtk_xfi_tphy_probe() - Probe phy instance from Device Tree.
+ * @pdev: Matching platform device.
+ *
+ * The probe function gets IO resource, clocks, reset controller and
+ * whether the DA work-around for 10GBase-R is required from Device Tree and
+ * allocates memory for holding that information in a struct mtk_xfi_tphy.
+ *
+ * Return:
+ * * %0 - OK
+ * * %-ENODEV - Missing associated Device Tree node (should never happen).
+ * * %-ENOMEM - Out of memory.
+ * * Any error value which devm_platform_ioremap_resource(),
+ * devm_clk_bulk_get(), devm_reset_control_get_exclusive(),
+ * devm_phy_create() or devm_of_phy_provider_register() may return.
+ */
+static int mtk_xfi_tphy_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct phy_provider *phy_provider;
+ struct mtk_xfi_tphy *xfi_tphy;
+ struct phy *phy;
+ int ret;
+
+ if (!np)
+ return -ENODEV;
+
+ xfi_tphy = devm_kzalloc(&pdev->dev, sizeof(*xfi_tphy), GFP_KERNEL);
+ if (!xfi_tphy)
+ return -ENOMEM;
+
+ xfi_tphy->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(xfi_tphy->base))
+ return PTR_ERR(xfi_tphy->base);
+
+ xfi_tphy->dev = &pdev->dev;
+ xfi_tphy->clocks[0].id = "topxtal";
+ xfi_tphy->clocks[1].id = "xfipll";
+ ret = devm_clk_bulk_get(&pdev->dev, MTK_XFI_TPHY_NUM_CLOCKS, xfi_tphy->clocks);
+ if (ret)
+ return ret;
+
+ xfi_tphy->reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(xfi_tphy->reset))
+ return PTR_ERR(xfi_tphy->reset);
+
+ xfi_tphy->da_war = of_property_read_bool(np, "mediatek,usxgmii-performance-errata");
+
+ phy = devm_phy_create(&pdev->dev, NULL, &mtk_xfi_tphy_ops);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ phy_set_drvdata(phy, xfi_tphy);
+ phy_provider = devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id mtk_xfi_tphy_match[] = {
+ { .compatible = "mediatek,mt7988-xfi-tphy", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mtk_xfi_tphy_match);
+
+static struct platform_driver mtk_xfi_tphy_driver = {
+ .probe = mtk_xfi_tphy_probe,
+ .driver = {
+ .name = "mtk-xfi-tphy",
+ .of_match_table = mtk_xfi_tphy_match,
+ },
+};
+module_platform_driver(mtk_xfi_tphy_driver);
+
+MODULE_DESCRIPTION("MediaTek 10GE SerDes XFI T-PHY driver");
+MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>");
+MODULE_AUTHOR("Bc-bocun Chen <bc-bocun.chen@mediatek.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index c5c8d70bc853..bf6a07590321 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -20,7 +20,12 @@
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
-static struct class *phy_class;
+static void phy_release(struct device *dev);
+static const struct class phy_class = {
+ .name = "phy",
+ .dev_release = phy_release,
+};
+
static struct dentry *phy_debugfs_root;
static DEFINE_MUTEX(phy_provider_mutex);
static LIST_HEAD(phy_provider_list);
@@ -753,7 +758,7 @@ struct phy *of_phy_simple_xlate(struct device *dev,
struct phy *phy;
struct class_dev_iter iter;
- class_dev_iter_init(&iter, phy_class, NULL, NULL);
+ class_dev_iter_init(&iter, &phy_class, NULL, NULL);
while ((dev = class_dev_iter_next(&iter))) {
phy = to_phy(dev);
if (args->np != phy->dev.of_node)
@@ -1016,7 +1021,7 @@ struct phy *phy_create(struct device *dev, struct device_node *node,
device_initialize(&phy->dev);
mutex_init(&phy->mutex);
- phy->dev.class = phy_class;
+ phy->dev.class = &phy_class;
phy->dev.parent = dev;
phy->dev.of_node = node ?: dev->of_node;
phy->id = id;
@@ -1285,14 +1290,13 @@ static void phy_release(struct device *dev)
static int __init phy_core_init(void)
{
- phy_class = class_create("phy");
- if (IS_ERR(phy_class)) {
- pr_err("failed to create phy class --> %ld\n",
- PTR_ERR(phy_class));
- return PTR_ERR(phy_class);
- }
+ int err;
- phy_class->dev_release = phy_release;
+ err = class_register(&phy_class);
+ if (err) {
+ pr_err("failed to register phy class");
+ return err;
+ }
phy_debugfs_root = debugfs_create_dir("phy", NULL);
@@ -1303,6 +1307,6 @@ device_initcall(phy_core_init);
static void __exit phy_core_exit(void)
{
debugfs_remove_recursive(phy_debugfs_root);
- class_destroy(phy_class);
+ class_unregister(&phy_class);
}
module_exit(phy_core_exit);
diff --git a/drivers/phy/qualcomm/phy-qcom-edp.c b/drivers/phy/qualcomm/phy-qcom-edp.c
index 9818d994c68b..da2b32fb5b45 100644
--- a/drivers/phy/qualcomm/phy-qcom-edp.c
+++ b/drivers/phy/qualcomm/phy-qcom-edp.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
+#include <linux/phy/phy-dp.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -23,6 +24,7 @@
#include "phy-qcom-qmp-dp-phy.h"
#include "phy-qcom-qmp-qserdes-com-v4.h"
+#include "phy-qcom-qmp-qserdes-com-v6.h"
/* EDP_PHY registers */
#define DP_PHY_CFG 0x0010
@@ -69,19 +71,32 @@
#define TXn_TRAN_DRVR_EMP_EN 0x0078
-struct qcom_edp_cfg {
- bool is_dp;
-
- /* DP PHY swing and pre_emphasis tables */
+struct qcom_edp_swing_pre_emph_cfg {
const u8 (*swing_hbr_rbr)[4][4];
const u8 (*swing_hbr3_hbr2)[4][4];
const u8 (*pre_emphasis_hbr_rbr)[4][4];
const u8 (*pre_emphasis_hbr3_hbr2)[4][4];
};
+struct qcom_edp;
+
+struct phy_ver_ops {
+ int (*com_power_on)(const struct qcom_edp *edp);
+ int (*com_resetsm_cntrl)(const struct qcom_edp *edp);
+ int (*com_bias_en_clkbuflr)(const struct qcom_edp *edp);
+ int (*com_configure_pll)(const struct qcom_edp *edp);
+ int (*com_configure_ssc)(const struct qcom_edp *edp);
+};
+
+struct qcom_edp_phy_cfg {
+ bool is_edp;
+ const struct qcom_edp_swing_pre_emph_cfg *swing_pre_emph_cfg;
+ const struct phy_ver_ops *ver_ops;
+};
+
struct qcom_edp {
struct device *dev;
- const struct qcom_edp_cfg *cfg;
+ const struct qcom_edp_phy_cfg *cfg;
struct phy *phy;
@@ -97,6 +112,8 @@ struct qcom_edp {
struct clk_bulk_data clks[2];
struct regulator_bulk_data supplies[2];
+
+ bool is_edp;
};
static const u8 dp_swing_hbr_rbr[4][4] = {
@@ -127,8 +144,7 @@ static const u8 dp_pre_emp_hbr2_hbr3[4][4] = {
{ 0x04, 0xff, 0xff, 0xff }
};
-static const struct qcom_edp_cfg dp_phy_cfg = {
- .is_dp = true,
+static const struct qcom_edp_swing_pre_emph_cfg dp_phy_swing_pre_emph_cfg = {
.swing_hbr_rbr = &dp_swing_hbr_rbr,
.swing_hbr3_hbr2 = &dp_swing_hbr2_hbr3,
.pre_emphasis_hbr_rbr = &dp_pre_emp_hbr_rbr,
@@ -163,8 +179,7 @@ static const u8 edp_pre_emp_hbr2_hbr3[4][4] = {
{ 0x00, 0xff, 0xff, 0xff }
};
-static const struct qcom_edp_cfg edp_phy_cfg = {
- .is_dp = false,
+static const struct qcom_edp_swing_pre_emph_cfg edp_phy_swing_pre_emph_cfg = {
.swing_hbr_rbr = &edp_swing_hbr_rbr,
.swing_hbr3_hbr2 = &edp_swing_hbr2_hbr3,
.pre_emphasis_hbr_rbr = &edp_pre_emp_hbr_rbr,
@@ -174,7 +189,6 @@ static const struct qcom_edp_cfg edp_phy_cfg = {
static int qcom_edp_phy_init(struct phy *phy)
{
struct qcom_edp *edp = phy_get_drvdata(phy);
- const struct qcom_edp_cfg *cfg = edp->cfg;
int ret;
u8 cfg8;
@@ -190,8 +204,9 @@ static int qcom_edp_phy_init(struct phy *phy)
DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
edp->edp + DP_PHY_PD_CTL);
- /* Turn on BIAS current for PHY/PLL */
- writel(0x17, edp->pll + QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN);
+ ret = edp->cfg->ver_ops->com_bias_en_clkbuflr(edp);
+ if (ret)
+ return ret;
writel(DP_PHY_PD_CTL_PSR_PWRDN, edp->edp + DP_PHY_PD_CTL);
msleep(20);
@@ -201,7 +216,12 @@ static int qcom_edp_phy_init(struct phy *phy)
DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
edp->edp + DP_PHY_PD_CTL);
- if (cfg && cfg->is_dp)
+ /*
+ * TODO: Re-work the conditions around setting the cfg8 value
+ * when more information becomes available about why this is
+ * even needed.
+ */
+ if (edp->cfg->swing_pre_emph_cfg && !edp->is_edp)
cfg8 = 0xb7;
else
cfg8 = 0x37;
@@ -235,7 +255,7 @@ out_disable_supplies:
static int qcom_edp_set_voltages(struct qcom_edp *edp, const struct phy_configure_opts_dp *dp_opts)
{
- const struct qcom_edp_cfg *cfg = edp->cfg;
+ const struct qcom_edp_swing_pre_emph_cfg *cfg = edp->cfg->swing_pre_emph_cfg;
unsigned int v_level = 0;
unsigned int p_level = 0;
u8 ldo_config;
@@ -246,6 +266,9 @@ static int qcom_edp_set_voltages(struct qcom_edp *edp, const struct phy_configur
if (!cfg)
return 0;
+ if (edp->is_edp)
+ cfg = &edp_phy_swing_pre_emph_cfg;
+
for (i = 0; i < dp_opts->lanes; i++) {
v_level = max(v_level, dp_opts->voltage[i]);
p_level = max(p_level, dp_opts->pre[i]);
@@ -262,7 +285,7 @@ static int qcom_edp_set_voltages(struct qcom_edp *edp, const struct phy_configur
if (swing == 0xff || emph == 0xff)
return -EINVAL;
- ldo_config = (cfg && cfg->is_dp) ? 0x1 : 0x0;
+ ldo_config = edp->is_edp ? 0x0 : 0x1;
writel(ldo_config, edp->tx0 + TXn_LDO_CONFIG);
writel(swing, edp->tx0 + TXn_TX_DRV_LVL);
@@ -291,6 +314,84 @@ static int qcom_edp_phy_configure(struct phy *phy, union phy_configure_opts *opt
static int qcom_edp_configure_ssc(const struct qcom_edp *edp)
{
+ return edp->cfg->ver_ops->com_configure_ssc(edp);
+}
+
+static int qcom_edp_configure_pll(const struct qcom_edp *edp)
+{
+ return edp->cfg->ver_ops->com_configure_pll(edp);
+}
+
+static int qcom_edp_set_vco_div(const struct qcom_edp *edp, unsigned long *pixel_freq)
+{
+ const struct phy_configure_opts_dp *dp_opts = &edp->dp_opts;
+ u32 vco_div;
+
+ switch (dp_opts->link_rate) {
+ case 1620:
+ vco_div = 0x1;
+ *pixel_freq = 1620000000UL / 2;
+ break;
+
+ case 2700:
+ vco_div = 0x1;
+ *pixel_freq = 2700000000UL / 2;
+ break;
+
+ case 5400:
+ vco_div = 0x2;
+ *pixel_freq = 5400000000UL / 4;
+ break;
+
+ case 8100:
+ vco_div = 0x0;
+ *pixel_freq = 8100000000UL / 6;
+ break;
+
+ default:
+ /* Other link rates aren't supported */
+ return -EINVAL;
+ }
+
+ writel(vco_div, edp->edp + DP_PHY_VCO_DIV);
+
+ return 0;
+}
+
+static int qcom_edp_phy_power_on_v4(const struct qcom_edp *edp)
+{
+ u32 val;
+
+ writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
+ DP_PHY_PD_CTL_LANE_0_1_PWRDN | DP_PHY_PD_CTL_LANE_2_3_PWRDN |
+ DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
+ edp->edp + DP_PHY_PD_CTL);
+ writel(0xfc, edp->edp + DP_PHY_MODE);
+
+ return readl_poll_timeout(edp->pll + QSERDES_V4_COM_CMN_STATUS,
+ val, val & BIT(7), 5, 200);
+}
+
+static int qcom_edp_phy_com_resetsm_cntrl_v4(const struct qcom_edp *edp)
+{
+ u32 val;
+
+ writel(0x20, edp->pll + QSERDES_V4_COM_RESETSM_CNTRL);
+
+ return readl_poll_timeout(edp->pll + QSERDES_V4_COM_C_READY_STATUS,
+ val, val & BIT(0), 500, 10000);
+}
+
+static int qcom_edp_com_bias_en_clkbuflr_v4(const struct qcom_edp *edp)
+{
+ /* Turn on BIAS current for PHY/PLL */
+ writel(0x17, edp->pll + QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN);
+
+ return 0;
+}
+
+static int qcom_edp_com_configure_ssc_v4(const struct qcom_edp *edp)
+{
const struct phy_configure_opts_dp *dp_opts = &edp->dp_opts;
u32 step1;
u32 step2;
@@ -323,7 +424,7 @@ static int qcom_edp_configure_ssc(const struct qcom_edp *edp)
return 0;
}
-static int qcom_edp_configure_pll(const struct qcom_edp *edp)
+static int qcom_edp_com_configure_pll_v4(const struct qcom_edp *edp)
{
const struct phy_configure_opts_dp *dp_opts = &edp->dp_opts;
u32 div_frac_start2_mode0;
@@ -409,30 +510,150 @@ static int qcom_edp_configure_pll(const struct qcom_edp *edp)
return 0;
}
-static int qcom_edp_set_vco_div(const struct qcom_edp *edp, unsigned long *pixel_freq)
+static const struct phy_ver_ops qcom_edp_phy_ops_v4 = {
+ .com_power_on = qcom_edp_phy_power_on_v4,
+ .com_resetsm_cntrl = qcom_edp_phy_com_resetsm_cntrl_v4,
+ .com_bias_en_clkbuflr = qcom_edp_com_bias_en_clkbuflr_v4,
+ .com_configure_pll = qcom_edp_com_configure_pll_v4,
+ .com_configure_ssc = qcom_edp_com_configure_ssc_v4,
+};
+
+static const struct qcom_edp_phy_cfg sc7280_dp_phy_cfg = {
+ .ver_ops = &qcom_edp_phy_ops_v4,
+};
+
+static const struct qcom_edp_phy_cfg sc8280xp_dp_phy_cfg = {
+ .swing_pre_emph_cfg = &dp_phy_swing_pre_emph_cfg,
+ .ver_ops = &qcom_edp_phy_ops_v4,
+};
+
+static const struct qcom_edp_phy_cfg sc8280xp_edp_phy_cfg = {
+ .is_edp = true,
+ .swing_pre_emph_cfg = &edp_phy_swing_pre_emph_cfg,
+ .ver_ops = &qcom_edp_phy_ops_v4,
+};
+
+static int qcom_edp_phy_power_on_v6(const struct qcom_edp *edp)
+{
+ u32 val;
+
+ writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
+ DP_PHY_PD_CTL_LANE_0_1_PWRDN | DP_PHY_PD_CTL_LANE_2_3_PWRDN |
+ DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
+ edp->edp + DP_PHY_PD_CTL);
+ writel(0xfc, edp->edp + DP_PHY_MODE);
+
+ return readl_poll_timeout(edp->pll + QSERDES_V6_COM_CMN_STATUS,
+ val, val & BIT(7), 5, 200);
+}
+
+static int qcom_edp_phy_com_resetsm_cntrl_v6(const struct qcom_edp *edp)
+{
+ u32 val;
+
+ writel(0x20, edp->pll + QSERDES_V6_COM_RESETSM_CNTRL);
+
+ return readl_poll_timeout(edp->pll + QSERDES_V6_COM_C_READY_STATUS,
+ val, val & BIT(0), 500, 10000);
+}
+
+static int qcom_edp_com_bias_en_clkbuflr_v6(const struct qcom_edp *edp)
+{
+ /* Turn on BIAS current for PHY/PLL */
+ writel(0x1f, edp->pll + QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN);
+
+ return 0;
+}
+
+static int qcom_edp_com_configure_ssc_v6(const struct qcom_edp *edp)
{
const struct phy_configure_opts_dp *dp_opts = &edp->dp_opts;
- u32 vco_div;
+ u32 step1;
+ u32 step2;
switch (dp_opts->link_rate) {
case 1620:
- vco_div = 0x1;
- *pixel_freq = 1620000000UL / 2;
+ case 2700:
+ case 8100:
+ step1 = 0x92;
+ step2 = 0x01;
+ break;
+
+ case 5400:
+ step1 = 0x18;
+ step2 = 0x02;
+ break;
+
+ default:
+ /* Other link rates aren't supported */
+ return -EINVAL;
+ }
+
+ writel(0x01, edp->pll + QSERDES_V6_COM_SSC_EN_CENTER);
+ writel(0x00, edp->pll + QSERDES_V6_COM_SSC_ADJ_PER1);
+ writel(0x36, edp->pll + QSERDES_V6_COM_SSC_PER1);
+ writel(0x01, edp->pll + QSERDES_V6_COM_SSC_PER2);
+ writel(step1, edp->pll + QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0);
+ writel(step2, edp->pll + QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0);
+
+ return 0;
+}
+
+static int qcom_edp_com_configure_pll_v6(const struct qcom_edp *edp)
+{
+ const struct phy_configure_opts_dp *dp_opts = &edp->dp_opts;
+ u32 div_frac_start2_mode0;
+ u32 div_frac_start3_mode0;
+ u32 dec_start_mode0;
+ u32 lock_cmp1_mode0;
+ u32 lock_cmp2_mode0;
+ u32 code1_mode0;
+ u32 code2_mode0;
+ u32 hsclk_sel;
+
+ switch (dp_opts->link_rate) {
+ case 1620:
+ hsclk_sel = 0x5;
+ dec_start_mode0 = 0x34;
+ div_frac_start2_mode0 = 0xc0;
+ div_frac_start3_mode0 = 0x0b;
+ lock_cmp1_mode0 = 0x37;
+ lock_cmp2_mode0 = 0x04;
+ code1_mode0 = 0x71;
+ code2_mode0 = 0x0c;
break;
case 2700:
- vco_div = 0x1;
- *pixel_freq = 2700000000UL / 2;
+ hsclk_sel = 0x3;
+ dec_start_mode0 = 0x34;
+ div_frac_start2_mode0 = 0xc0;
+ div_frac_start3_mode0 = 0x0b;
+ lock_cmp1_mode0 = 0x07;
+ lock_cmp2_mode0 = 0x07;
+ code1_mode0 = 0x71;
+ code2_mode0 = 0x0c;
break;
case 5400:
- vco_div = 0x2;
- *pixel_freq = 5400000000UL / 4;
+ hsclk_sel = 0x1;
+ dec_start_mode0 = 0x46;
+ div_frac_start2_mode0 = 0x00;
+ div_frac_start3_mode0 = 0x05;
+ lock_cmp1_mode0 = 0x0f;
+ lock_cmp2_mode0 = 0x0e;
+ code1_mode0 = 0x97;
+ code2_mode0 = 0x10;
break;
case 8100:
- vco_div = 0x0;
- *pixel_freq = 8100000000UL / 6;
+ hsclk_sel = 0x0;
+ dec_start_mode0 = 0x34;
+ div_frac_start2_mode0 = 0xc0;
+ div_frac_start3_mode0 = 0x0b;
+ lock_cmp1_mode0 = 0x17;
+ lock_cmp2_mode0 = 0x15;
+ code1_mode0 = 0x71;
+ code2_mode0 = 0x0c;
break;
default:
@@ -440,36 +661,72 @@ static int qcom_edp_set_vco_div(const struct qcom_edp *edp, unsigned long *pixel
return -EINVAL;
}
- writel(vco_div, edp->edp + DP_PHY_VCO_DIV);
+ writel(0x01, edp->pll + QSERDES_V6_COM_SVS_MODE_CLK_SEL);
+ writel(0x0b, edp->pll + QSERDES_V6_COM_SYSCLK_EN_SEL);
+ writel(0x02, edp->pll + QSERDES_V6_COM_SYS_CLK_CTRL);
+ writel(0x0c, edp->pll + QSERDES_V6_COM_CLK_ENABLE1);
+ writel(0x06, edp->pll + QSERDES_V6_COM_SYSCLK_BUF_ENABLE);
+ writel(0x30, edp->pll + QSERDES_V6_COM_CLK_SELECT);
+ writel(hsclk_sel, edp->pll + QSERDES_V6_COM_HSCLK_SEL_1);
+ writel(0x07, edp->pll + QSERDES_V6_COM_PLL_IVCO);
+ writel(0x08, edp->pll + QSERDES_V6_COM_LOCK_CMP_EN);
+ writel(0x36, edp->pll + QSERDES_V6_COM_PLL_CCTRL_MODE0);
+ writel(0x16, edp->pll + QSERDES_V6_COM_PLL_RCTRL_MODE0);
+ writel(0x06, edp->pll + QSERDES_V6_COM_CP_CTRL_MODE0);
+ writel(dec_start_mode0, edp->pll + QSERDES_V6_COM_DEC_START_MODE0);
+ writel(0x00, edp->pll + QSERDES_V6_COM_DIV_FRAC_START1_MODE0);
+ writel(div_frac_start2_mode0, edp->pll + QSERDES_V6_COM_DIV_FRAC_START2_MODE0);
+ writel(div_frac_start3_mode0, edp->pll + QSERDES_V6_COM_DIV_FRAC_START3_MODE0);
+ writel(0x12, edp->pll + QSERDES_V6_COM_CMN_CONFIG_1);
+ writel(0x3f, edp->pll + QSERDES_V6_COM_INTEGLOOP_GAIN0_MODE0);
+ writel(0x00, edp->pll + QSERDES_V6_COM_INTEGLOOP_GAIN1_MODE0);
+ writel(0x00, edp->pll + QSERDES_V6_COM_VCO_TUNE_MAP);
+ writel(lock_cmp1_mode0, edp->pll + QSERDES_V6_COM_LOCK_CMP1_MODE0);
+ writel(lock_cmp2_mode0, edp->pll + QSERDES_V6_COM_LOCK_CMP2_MODE0);
+
+ writel(0x0a, edp->pll + QSERDES_V6_COM_BG_TIMER);
+ writel(0x14, edp->pll + QSERDES_V6_COM_PLL_CORE_CLK_DIV_MODE0);
+ writel(0x00, edp->pll + QSERDES_V6_COM_VCO_TUNE_CTRL);
+ writel(0x1f, edp->pll + QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN);
+ writel(0x0f, edp->pll + QSERDES_V6_COM_CORE_CLK_EN);
+ writel(0xa0, edp->pll + QSERDES_V6_COM_VCO_TUNE1_MODE0);
+ writel(0x03, edp->pll + QSERDES_V6_COM_VCO_TUNE2_MODE0);
+
+ writel(code1_mode0, edp->pll + QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0);
+ writel(code2_mode0, edp->pll + QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0);
return 0;
}
+static const struct phy_ver_ops qcom_edp_phy_ops_v6 = {
+ .com_power_on = qcom_edp_phy_power_on_v6,
+ .com_resetsm_cntrl = qcom_edp_phy_com_resetsm_cntrl_v6,
+ .com_bias_en_clkbuflr = qcom_edp_com_bias_en_clkbuflr_v6,
+ .com_configure_pll = qcom_edp_com_configure_pll_v6,
+ .com_configure_ssc = qcom_edp_com_configure_ssc_v6,
+};
+
+static struct qcom_edp_phy_cfg x1e80100_phy_cfg = {
+ .swing_pre_emph_cfg = &dp_phy_swing_pre_emph_cfg,
+ .ver_ops = &qcom_edp_phy_ops_v6,
+};
+
static int qcom_edp_phy_power_on(struct phy *phy)
{
const struct qcom_edp *edp = phy_get_drvdata(phy);
- const struct qcom_edp_cfg *cfg = edp->cfg;
u32 bias0_en, drvr0_en, bias1_en, drvr1_en;
unsigned long pixel_freq;
- u8 ldo_config;
- int timeout;
+ u8 ldo_config = 0x0;
int ret;
u32 val;
u8 cfg1;
- writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
- DP_PHY_PD_CTL_LANE_0_1_PWRDN | DP_PHY_PD_CTL_LANE_2_3_PWRDN |
- DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
- edp->edp + DP_PHY_PD_CTL);
- writel(0xfc, edp->edp + DP_PHY_MODE);
-
- timeout = readl_poll_timeout(edp->pll + QSERDES_V4_COM_CMN_STATUS,
- val, val & BIT(7), 5, 200);
- if (timeout)
- return timeout;
-
+ ret = edp->cfg->ver_ops->com_power_on(edp);
+ if (ret)
+ return ret;
- ldo_config = (cfg && cfg->is_dp) ? 0x1 : 0x0;
+ if (edp->cfg->swing_pre_emph_cfg && !edp->is_edp)
+ ldo_config = 0x1;
writel(ldo_config, edp->tx0 + TXn_LDO_CONFIG);
writel(ldo_config, edp->tx1 + TXn_LDO_CONFIG);
@@ -513,12 +770,9 @@ static int qcom_edp_phy_power_on(struct phy *phy)
writel(0x01, edp->edp + DP_PHY_CFG);
writel(0x09, edp->edp + DP_PHY_CFG);
- writel(0x20, edp->pll + QSERDES_V4_COM_RESETSM_CNTRL);
-
- timeout = readl_poll_timeout(edp->pll + QSERDES_V4_COM_C_READY_STATUS,
- val, val & BIT(0), 500, 10000);
- if (timeout)
- return timeout;
+ ret = edp->cfg->ver_ops->com_resetsm_cntrl(edp);
+ if (ret)
+ return ret;
writel(0x19, edp->edp + DP_PHY_CFG);
writel(0x1f, edp->tx0 + TXn_HIGHZ_DRVR_EN);
@@ -590,6 +844,18 @@ static int qcom_edp_phy_power_off(struct phy *phy)
return 0;
}
+static int qcom_edp_phy_set_mode(struct phy *phy, enum phy_mode mode, int submode)
+{
+ struct qcom_edp *edp = phy_get_drvdata(phy);
+
+ if (mode != PHY_MODE_DP)
+ return -EINVAL;
+
+ edp->is_edp = submode == PHY_SUBMODE_EDP;
+
+ return 0;
+}
+
static int qcom_edp_phy_exit(struct phy *phy)
{
struct qcom_edp *edp = phy_get_drvdata(phy);
@@ -605,6 +871,7 @@ static const struct phy_ops qcom_edp_ops = {
.configure = qcom_edp_phy_configure,
.power_on = qcom_edp_phy_power_on,
.power_off = qcom_edp_phy_power_off,
+ .set_mode = qcom_edp_phy_set_mode,
.exit = qcom_edp_phy_exit,
.owner = THIS_MODULE,
};
@@ -782,6 +1049,7 @@ static int qcom_edp_phy_probe(struct platform_device *pdev)
edp->dev = dev;
edp->cfg = of_device_get_match_data(&pdev->dev);
+ edp->is_edp = edp->cfg->is_edp;
edp->edp = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(edp->edp))
@@ -840,10 +1108,11 @@ static int qcom_edp_phy_probe(struct platform_device *pdev)
}
static const struct of_device_id qcom_edp_phy_match_table[] = {
- { .compatible = "qcom,sc7280-edp-phy" },
- { .compatible = "qcom,sc8180x-edp-phy" },
- { .compatible = "qcom,sc8280xp-dp-phy", .data = &dp_phy_cfg },
- { .compatible = "qcom,sc8280xp-edp-phy", .data = &edp_phy_cfg },
+ { .compatible = "qcom,sc7280-edp-phy", .data = &sc7280_dp_phy_cfg, },
+ { .compatible = "qcom,sc8180x-edp-phy", .data = &sc7280_dp_phy_cfg, },
+ { .compatible = "qcom,sc8280xp-dp-phy", .data = &sc8280xp_dp_phy_cfg, },
+ { .compatible = "qcom,sc8280xp-edp-phy", .data = &sc8280xp_edp_phy_cfg, },
+ { .compatible = "qcom,x1e80100-dp-phy", .data = &x1e80100_phy_cfg, },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_edp_phy_match_table);
diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
index a43e20abb10d..68cc8e24f383 100644
--- a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
@@ -88,6 +88,12 @@ static const u32 pm8550b_init_tbl[NUM_TUNE_FIELDS] = {
[TUNE_USB2_PREEM] = 0x5,
};
+static const u32 smb2360_init_tbl[NUM_TUNE_FIELDS] = {
+ [TUNE_IUSB2] = 0x5,
+ [TUNE_SQUELCH_U] = 0x3,
+ [TUNE_USB2_PREEM] = 0x2,
+};
+
static const struct eusb2_repeater_cfg pm8550b_eusb2_cfg = {
.init_tbl = pm8550b_init_tbl,
.init_tbl_num = ARRAY_SIZE(pm8550b_init_tbl),
@@ -95,6 +101,13 @@ static const struct eusb2_repeater_cfg pm8550b_eusb2_cfg = {
.num_vregs = ARRAY_SIZE(pm8550b_vreg_l),
};
+static const struct eusb2_repeater_cfg smb2360_eusb2_cfg = {
+ .init_tbl = smb2360_init_tbl,
+ .init_tbl_num = ARRAY_SIZE(smb2360_init_tbl),
+ .vreg_list = pm8550b_vreg_l,
+ .num_vregs = ARRAY_SIZE(pm8550b_vreg_l),
+};
+
static int eusb2_repeater_init_vregs(struct eusb2_repeater *rptr)
{
int num = rptr->cfg->num_vregs;
@@ -271,6 +284,10 @@ static const struct of_device_id eusb2_repeater_of_match_table[] = {
.compatible = "qcom,pm8550b-eusb2-repeater",
.data = &pm8550b_eusb2_cfg,
},
+ {
+ .compatible = "qcom,smb2360-eusb2-repeater",
+ .data = &smb2360_eusb2_cfg,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, eusb2_repeater_of_match_table);
diff --git a/drivers/phy/qualcomm/phy-qcom-m31.c b/drivers/phy/qualcomm/phy-qcom-m31.c
index 03fb0d4b75d7..20d4c020a83c 100644
--- a/drivers/phy/qualcomm/phy-qcom-m31.c
+++ b/drivers/phy/qualcomm/phy-qcom-m31.c
@@ -297,7 +297,7 @@ static int m31usb_phy_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(qphy->phy),
"failed to create phy\n");
- qphy->vreg = devm_regulator_get(dev, "vdda-phy");
+ qphy->vreg = devm_regulator_get(dev, "vdd");
if (IS_ERR(qphy->vreg))
return dev_err_probe(dev, PTR_ERR(qphy->vreg),
"failed to get vreg\n");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 7d585a4bbbba..7f999e8a433d 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -77,6 +77,7 @@ enum qphy_reg_layout {
QPHY_COM_BIAS_EN_CLKBUFLR_EN,
QPHY_DP_PHY_STATUS,
+ QPHY_DP_PHY_VCO_DIV,
QPHY_TX_TX_POL_INV,
QPHY_TX_TX_DRV_LVL,
@@ -102,6 +103,7 @@ static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN,
[QPHY_DP_PHY_STATUS] = QSERDES_V3_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V3_DP_PHY_VCO_DIV,
[QPHY_TX_TX_POL_INV] = QSERDES_V3_TX_TX_POL_INV,
[QPHY_TX_TX_DRV_LVL] = QSERDES_V3_TX_TX_DRV_LVL,
@@ -126,6 +128,7 @@ static const unsigned int qmp_v45_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN,
[QPHY_DP_PHY_STATUS] = QSERDES_V4_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V4_DP_PHY_VCO_DIV,
[QPHY_TX_TX_POL_INV] = QSERDES_V4_TX_TX_POL_INV,
[QPHY_TX_TX_DRV_LVL] = QSERDES_V4_TX_TX_DRV_LVL,
@@ -150,6 +153,7 @@ static const unsigned int qmp_v5_5nm_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN,
[QPHY_DP_PHY_STATUS] = QSERDES_V5_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V5_DP_PHY_VCO_DIV,
[QPHY_TX_TX_POL_INV] = QSERDES_V5_5NM_TX_TX_POL_INV,
[QPHY_TX_TX_DRV_LVL] = QSERDES_V5_5NM_TX_TX_DRV_LVL,
@@ -174,6 +178,7 @@ static const unsigned int qmp_v6_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN,
[QPHY_DP_PHY_STATUS] = QSERDES_V6_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V6_DP_PHY_VCO_DIV,
[QPHY_TX_TX_POL_INV] = QSERDES_V6_TX_TX_POL_INV,
[QPHY_TX_TX_DRV_LVL] = QSERDES_V6_TX_TX_DRV_LVL,
@@ -1377,6 +1382,13 @@ static const u8 qmp_dp_v5_voltage_swing_hbr_rbr[4][4] = {
{ 0x3f, 0xff, 0xff, 0xff }
};
+static const u8 qmp_dp_v6_voltage_swing_hbr_rbr[4][4] = {
+ { 0x27, 0x2f, 0x36, 0x3f },
+ { 0x31, 0x3e, 0x3f, 0xff },
+ { 0x36, 0x3f, 0xff, 0xff },
+ { 0x3f, 0xff, 0xff, 0xff }
+};
+
static const u8 qmp_dp_v6_pre_emphasis_hbr_rbr[4][4] = {
{ 0x20, 0x2d, 0x34, 0x3a },
{ 0x20, 0x2e, 0x35, 0xff },
@@ -1996,6 +2008,51 @@ static const struct qmp_phy_cfg sm8550_usb3dpphy_cfg = {
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
};
+static const struct qmp_phy_cfg sm8650_usb3dpphy_cfg = {
+ .offsets = &qmp_combo_offsets_v3,
+
+ .serdes_tbl = sm8550_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8550_usb3_serdes_tbl),
+ .tx_tbl = sm8550_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8550_usb3_tx_tbl),
+ .rx_tbl = sm8550_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8550_usb3_rx_tbl),
+ .pcs_tbl = sm8550_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8550_usb3_pcs_tbl),
+ .pcs_usb_tbl = sm8550_usb3_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sm8550_usb3_pcs_usb_tbl),
+
+ .dp_serdes_tbl = qmp_v6_dp_serdes_tbl,
+ .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl),
+ .dp_tx_tbl = qmp_v6_dp_tx_tbl,
+ .dp_tx_tbl_num = ARRAY_SIZE(qmp_v6_dp_tx_tbl),
+
+ .serdes_tbl_rbr = qmp_v6_dp_serdes_tbl_rbr,
+ .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_rbr),
+ .serdes_tbl_hbr = qmp_v6_dp_serdes_tbl_hbr,
+ .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr),
+ .serdes_tbl_hbr2 = qmp_v6_dp_serdes_tbl_hbr2,
+ .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr2),
+ .serdes_tbl_hbr3 = qmp_v6_dp_serdes_tbl_hbr3,
+ .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr3),
+
+ .swing_hbr_rbr = &qmp_dp_v6_voltage_swing_hbr_rbr,
+ .pre_emphasis_hbr_rbr = &qmp_dp_v6_pre_emphasis_hbr_rbr,
+ .swing_hbr3_hbr2 = &qmp_dp_v5_voltage_swing_hbr3_hbr2,
+ .pre_emphasis_hbr3_hbr2 = &qmp_dp_v5_pre_emphasis_hbr3_hbr2,
+
+ .dp_aux_init = qmp_v4_dp_aux_init,
+ .configure_dp_tx = qmp_v4_configure_dp_tx,
+ .configure_dp_phy = qmp_v4_configure_dp_phy,
+ .calibrate_dp_phy = qmp_v4_calibrate_dp_phy,
+
+ .regs = qmp_v6_usb3phy_regs_layout,
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+};
+
static int qmp_combo_dp_serdes_init(struct qmp_combo *qmp)
{
const struct qmp_phy_cfg *cfg = qmp->cfg;
@@ -2150,9 +2207,9 @@ static bool qmp_combo_configure_dp_mode(struct qmp_combo *qmp)
writel(val, qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL);
if (reverse)
- writel(0x4c, qmp->pcs + QSERDES_DP_PHY_MODE);
+ writel(0x4c, qmp->dp_dp_phy + QSERDES_DP_PHY_MODE);
else
- writel(0x5c, qmp->pcs + QSERDES_DP_PHY_MODE);
+ writel(0x5c, qmp->dp_dp_phy + QSERDES_DP_PHY_MODE);
return reverse;
}
@@ -2162,6 +2219,7 @@ static int qmp_combo_configure_dp_clocks(struct qmp_combo *qmp)
const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
u32 phy_vco_div;
unsigned long pixel_freq;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
switch (dp_opts->link_rate) {
case 1620:
@@ -2184,7 +2242,7 @@ static int qmp_combo_configure_dp_clocks(struct qmp_combo *qmp)
/* Other link rates aren't supported */
return -EINVAL;
}
- writel(phy_vco_div, qmp->dp_dp_phy + QSERDES_V4_DP_PHY_VCO_DIV);
+ writel(phy_vco_div, qmp->dp_dp_phy + cfg->regs[QPHY_DP_PHY_VCO_DIV]);
clk_set_rate(qmp->dp_link_hw.clk, dp_opts->link_rate * 100000);
clk_set_rate(qmp->dp_pixel_hw.clk, pixel_freq);
@@ -2431,8 +2489,6 @@ static int qmp_v4_configure_dp_phy(struct qmp_combo *qmp)
writel(0x20, qmp->dp_tx2 + cfg->regs[QPHY_TX_TX_EMP_POST1_LVL]);
return 0;
-
- return 0;
}
/*
@@ -3625,7 +3681,7 @@ static const struct of_device_id qmp_combo_of_match_table[] = {
},
{
.compatible = "qcom,sm8650-qmp-usb3-dp-phy",
- .data = &sm8550_usb3dpphy_cfg,
+ .data = &sm8650_usb3dpphy_cfg,
},
{
.compatible = "qcom,x1e80100-qmp-usb3-dp-phy",
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h
index f5cfacf9be96..181057421c11 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h
@@ -7,6 +7,7 @@
#define QCOM_PHY_QMP_DP_PHY_V5_H_
/* Only for QMP V5 PHY - DP PHY registers */
+#define QSERDES_V5_DP_PHY_VCO_DIV 0x070
#define QSERDES_V5_DP_PHY_AUX_INTERRUPT_STATUS 0x0d8
#define QSERDES_V5_DP_PHY_STATUS 0x0dc
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h
index 01a20d3be4b8..fa967a1af058 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h
@@ -7,6 +7,7 @@
#define QCOM_PHY_QMP_DP_PHY_V6_H_
/* Only for QMP V6 PHY - DP PHY registers */
+#define QSERDES_V6_DP_PHY_VCO_DIV 0x070
#define QSERDES_V6_DP_PHY_AUX_INTERRUPT_STATUS 0x0e0
#define QSERDES_V6_DP_PHY_STATUS 0x0e4
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
index 8836bb1ff0cc..6c796723c8f5 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
@@ -22,6 +22,8 @@
#include <linux/reset.h>
#include <linux/slab.h>
+#include <dt-bindings/phy/phy-qcom-qmp.h>
+
#include "phy-qcom-qmp-common.h"
#include "phy-qcom-qmp.h"
@@ -2246,6 +2248,7 @@ static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x4_pcie_pcs_alt_tbl[] = {
};
static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x4_pcie_serdes_alt_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x1c),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36),
@@ -2272,7 +2275,6 @@ static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x4_pcie_rc_serdes_alt_tbl[]
QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x07),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0x97),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x1c),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_ENABLE1, 0x90),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x06),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x06),
@@ -2389,6 +2391,9 @@ struct qmp_phy_cfg {
/* QMP PHY pipe clock interface rate */
unsigned long pipe_clock_rate;
+
+ /* QMP PHY AUX clock interface rate */
+ unsigned long aux_clock_rate;
};
struct qmp_pcie {
@@ -2420,6 +2425,7 @@ struct qmp_pcie {
int mode;
struct clk_fixed_rate pipe_clk_fixed;
+ struct clk_fixed_rate aux_clk_fixed;
};
static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
@@ -3135,6 +3141,9 @@ static const struct qmp_phy_cfg sm8450_qmp_gen4x2_pciephy_cfg = {
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS_4_20,
+
+ /* 20MHz PHY AUX Clock */
+ .aux_clock_rate = 20000000,
};
static const struct qmp_phy_cfg sm8550_qmp_gen3x2_pciephy_cfg = {
@@ -3192,6 +3201,9 @@ static const struct qmp_phy_cfg sm8550_qmp_gen4x2_pciephy_cfg = {
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS_4_20,
.has_nocsr_reset = true,
+
+ /* 20MHz PHY AUX Clock */
+ .aux_clock_rate = 20000000,
};
static const struct qmp_phy_cfg sm8650_qmp_gen4x2_pciephy_cfg = {
@@ -3222,6 +3234,9 @@ static const struct qmp_phy_cfg sm8650_qmp_gen4x2_pciephy_cfg = {
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS_4_20,
.has_nocsr_reset = true,
+
+ /* 20MHz PHY AUX Clock */
+ .aux_clock_rate = 20000000,
};
static const struct qmp_phy_cfg sa8775p_qmp_gen4x2_pciephy_cfg = {
@@ -3291,6 +3306,13 @@ static const struct qmp_phy_cfg sa8775p_qmp_gen4x4_pciephy_cfg = {
.pcs_misc_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_rc_pcs_misc_tbl),
},
+ .tbls_ep = &(const struct qmp_phy_cfg_tbls) {
+ .serdes = sa8775p_qmp_gen4x2_pcie_ep_serdes_alt_tbl,
+ .serdes_num = ARRAY_SIZE(sa8775p_qmp_gen4x2_pcie_ep_serdes_alt_tbl),
+ .pcs_misc = sm8450_qmp_gen4x2_pcie_ep_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_ep_pcs_misc_tbl),
+ },
+
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -3664,7 +3686,7 @@ static int phy_pipe_clk_register(struct qmp_pcie *qmp, struct device_node *np)
struct clk_init_data init = { };
int ret;
- ret = of_property_read_string(np, "clock-output-names", &init.name);
+ ret = of_property_read_string_index(np, "clock-output-names", 0, &init.name);
if (ret) {
dev_err(qmp->dev, "%pOFn: No clock-output-names\n", np);
return ret;
@@ -3683,14 +3705,87 @@ static int phy_pipe_clk_register(struct qmp_pcie *qmp, struct device_node *np)
fixed->hw.init = &init;
- ret = devm_clk_hw_register(qmp->dev, &fixed->hw);
- if (ret)
+ return devm_clk_hw_register(qmp->dev, &fixed->hw);
+}
+
+/*
+ * Register a fixed rate PHY aux clock.
+ *
+ * The <s>_phy_aux_clksrc generated by PHY goes to the GCC that gate
+ * controls it. The <s>_phy_aux_clk coming out of the GCC is requested
+ * by the PHY driver for its operations.
+ * We register the <s>_phy_aux_clksrc here. The gcc driver takes care
+ * of assigning this <s>_phy_aux_clksrc as parent to <s>_phy_aux_clk.
+ * Below picture shows this relationship.
+ *
+ * +---------------+
+ * | PHY block |<<---------------------------------------------+
+ * | | |
+ * | +-------+ | +-----+ |
+ * I/P---^-->| PLL |---^--->phy_aux_clksrc--->| GCC |--->phy_aux_clk---+
+ * clk | +-------+ | +-----+
+ * +---------------+
+ */
+static int phy_aux_clk_register(struct qmp_pcie *qmp, struct device_node *np)
+{
+ struct clk_fixed_rate *fixed = &qmp->aux_clk_fixed;
+ struct clk_init_data init = { };
+ int ret;
+
+ ret = of_property_read_string_index(np, "clock-output-names", 1, &init.name);
+ if (ret) {
+ dev_err(qmp->dev, "%pOFn: No clock-output-names index 1\n", np);
return ret;
+ }
+
+ init.ops = &clk_fixed_rate_ops;
+
+ fixed->fixed_rate = qmp->cfg->aux_clock_rate;
+ fixed->hw.init = &init;
+
+ return devm_clk_hw_register(qmp->dev, &fixed->hw);
+}
+
+static struct clk_hw *qmp_pcie_clk_hw_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct qmp_pcie *qmp = data;
+
+ /* Support legacy bindings */
+ if (!clkspec->args_count)
+ return &qmp->pipe_clk_fixed.hw;
- ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &fixed->hw);
+ switch (clkspec->args[0]) {
+ case QMP_PCIE_PIPE_CLK:
+ return &qmp->pipe_clk_fixed.hw;
+ case QMP_PCIE_PHY_AUX_CLK:
+ return &qmp->aux_clk_fixed.hw;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int qmp_pcie_register_clocks(struct qmp_pcie *qmp, struct device_node *np)
+{
+ int ret;
+
+ ret = phy_pipe_clk_register(qmp, np);
if (ret)
return ret;
+ if (qmp->cfg->aux_clock_rate) {
+ ret = phy_aux_clk_register(qmp, np);
+ if (ret)
+ return ret;
+
+ ret = of_clk_add_hw_provider(np, qmp_pcie_clk_hw_get, qmp);
+ if (ret)
+ return ret;
+ } else {
+ ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &qmp->pipe_clk_fixed.hw);
+ if (ret)
+ return ret;
+ }
+
/*
* Roll a devm action because the clock provider is the child node, but
* the child node is not actually a device.
@@ -3899,7 +3994,7 @@ static int qmp_pcie_probe(struct platform_device *pdev)
if (ret)
goto err_node_put;
- ret = phy_pipe_clk_register(qmp, np);
+ ret = qmp_pcie_register_clocks(qmp, np);
if (ret)
goto err_node_put;
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v6.h
index 970cc0667809..f19f9892ed7b 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v6.h
@@ -30,5 +30,9 @@
#define QPHY_V6_PCS_UFS_TX_MID_TERM_CTRL1 0x1f4
#define QPHY_V6_PCS_UFS_MULTI_LANE_CTRL1 0x1fc
#define QPHY_V6_PCS_UFS_RX_HSG5_SYNC_WAIT_TIME 0x220
+#define QPHY_V6_PCS_UFS_TX_POST_EMP_LVL_S4 0x240
+#define QPHY_V6_PCS_UFS_TX_POST_EMP_LVL_S5 0x244
+#define QPHY_V6_PCS_UFS_TX_POST_EMP_LVL_S6 0x248
+#define QPHY_V6_PCS_UFS_TX_POST_EMP_LVL_S7 0x24c
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v6.h
index d9a87bd95590..d17a52357965 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v6.h
@@ -25,12 +25,15 @@
#define QSERDES_UFS_V6_RX_UCDR_SO_GAIN_RATE4 0xf0
#define QSERDES_UFS_V6_RX_UCDR_PI_CONTROLS 0xf4
#define QSERDES_UFS_V6_RX_VGA_CAL_MAN_VAL 0x178
+#define QSERDES_UFS_V6_RX_RX_EQU_ADAPTOR_CNTRL4 0x1ac
#define QSERDES_UFS_V6_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x1bc
#define QSERDES_UFS_V6_RX_INTERFACE_MODE 0x1e0
#define QSERDES_UFS_V6_RX_OFFSET_ADAPTOR_CNTRL3 0x1c4
#define QSERDES_UFS_V6_RX_MODE_RATE_0_1_B0 0x208
#define QSERDES_UFS_V6_RX_MODE_RATE_0_1_B1 0x20c
+#define QSERDES_UFS_V6_RX_MODE_RATE_0_1_B2 0x210
#define QSERDES_UFS_V6_RX_MODE_RATE_0_1_B3 0x214
+#define QSERDES_UFS_V6_RX_MODE_RATE_0_1_B4 0x218
#define QSERDES_UFS_V6_RX_MODE_RATE_0_1_B6 0x220
#define QSERDES_UFS_V6_RX_MODE_RATE2_B3 0x238
#define QSERDES_UFS_V6_RX_MODE_RATE2_B6 0x244
@@ -38,6 +41,9 @@
#define QSERDES_UFS_V6_RX_MODE_RATE3_B4 0x260
#define QSERDES_UFS_V6_RX_MODE_RATE3_B5 0x264
#define QSERDES_UFS_V6_RX_MODE_RATE3_B8 0x270
+#define QSERDES_UFS_V6_RX_MODE_RATE4_B0 0x274
+#define QSERDES_UFS_V6_RX_MODE_RATE4_B1 0x278
+#define QSERDES_UFS_V6_RX_MODE_RATE4_B2 0x27c
#define QSERDES_UFS_V6_RX_MODE_RATE4_B3 0x280
#define QSERDES_UFS_V6_RX_MODE_RATE4_B4 0x284
#define QSERDES_UFS_V6_RX_MODE_RATE4_B6 0x28c
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
index 590432d581f9..a57e8a4657f4 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
@@ -722,6 +722,38 @@ static const struct qmp_phy_init_tbl sm8350_ufsphy_g4_pcs[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_BIST_FIXED_PAT_CTRL, 0x0a),
};
+static const struct qmp_phy_init_tbl sm8475_ufsphy_serdes[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0xd9),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_HS_SWITCH_SEL_1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_INITVAL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sm8475_ufsphy_g4_serdes[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE1, 0x98),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE1, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE1, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE1, 0x32),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE1, 0x0f),
+};
+
+static const struct qmp_phy_init_tbl sm8475_ufsphy_g4_pcs[] = {
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_PLL_CNTL, 0x0b),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_HSGEAR_CAPABILITY, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_RX_HSGEAR_CAPABILITY, 0x04),
+};
+
static const struct qmp_phy_init_tbl sm8550_ufsphy_serdes[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0xd9),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x16),
@@ -830,17 +862,20 @@ static const struct qmp_phy_init_tbl sm8650_ufsphy_serdes[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x11),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_HS_SWITCH_SEL_1, 0x00),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x44),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO_MODE1, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_IETRIM, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_IPTRIM, 0x17),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x04),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_INITVAL2, 0x00),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x41),
- QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x06),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x18),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x14),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x7f),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x06),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE1, 0x4c),
- QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE1, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE1, 0x06),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE1, 0x18),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE1, 0x14),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE1, 0x99),
@@ -848,17 +883,28 @@ static const struct qmp_phy_init_tbl sm8650_ufsphy_serdes[] = {
};
static const struct qmp_phy_init_tbl sm8650_ufsphy_tx[] = {
- QMP_PHY_INIT_CFG(QSERDES_UFS_V6_TX_LANE_MODE_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_TX_LANE_MODE_1, 0x01),
QMP_PHY_INIT_CFG(QSERDES_UFS_V6_TX_RES_CODE_LANE_OFFSET_TX, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_TX_RES_CODE_LANE_OFFSET_RX, 0x0e),
};
static const struct qmp_phy_init_tbl sm8650_ufsphy_rx[] = {
QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_FO_GAIN_RATE2, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_FO_GAIN_RATE4, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_VGA_CAL_MAN_VAL, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE_0_1_B0, 0xc2),
- QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE_0_1_B1, 0xc2),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_FO_GAIN_RATE4, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_SO_GAIN_RATE4, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_PI_CONTROLS, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_OFFSET_ADAPTOR_CNTRL3, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_FASTLOCK_COUNT_HIGH_RATE4, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_FASTLOCK_FO_GAIN_RATE4, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_FASTLOCK_SO_GAIN_RATE4, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_VGA_CAL_MAN_VAL, 0x3e),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE_0_1_B0, 0xce),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE_0_1_B1, 0xce),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE_0_1_B2, 0x18),
QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE_0_1_B3, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE_0_1_B4, 0x0f),
QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE_0_1_B6, 0x60),
QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE2_B3, 0x9e),
QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE2_B6, 0x60),
@@ -866,23 +912,41 @@ static const struct qmp_phy_init_tbl sm8650_ufsphy_rx[] = {
QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE3_B4, 0x0e),
QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE3_B5, 0x36),
QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE3_B8, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE4_B0, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE4_B1, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE4_B2, 0x20),
QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE4_B3, 0xb9),
- QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE4_B6, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE4_B4, 0x4f),
QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_SO_SATURATION, 0x1f),
QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_PI_CTRL1, 0x94),
QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_RX_TERM_BW_CTRL0, 0xfa),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_DLL0_FTUNE_CTRL, 0x30),
};
static const struct qmp_phy_init_tbl sm8650_ufsphy_pcs[] = {
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_MULTI_LANE_CTRL1, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_MULTI_LANE_CTRL1, 0x02),
QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_MID_TERM_CTRL1, 0x43),
QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_PCS_CTRL1, 0xc1),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_PLL_CNTL, 0x33),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_LARGE_AMP_DRV_LVL, 0x0f),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_RX_SIGDET_CTRL2, 0x68),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_POST_EMP_LVL_S4, 0x0e),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_POST_EMP_LVL_S5, 0x12),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_POST_EMP_LVL_S6, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_POST_EMP_LVL_S7, 0x19),
+};
+
+static const struct qmp_phy_init_tbl sm8650_ufsphy_g4_pcs[] = {
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_PLL_CNTL, 0x13),
QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_HSGEAR_CAPABILITY, 0x04),
QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_RX_HSGEAR_CAPABILITY, 0x04),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_LARGE_AMP_DRV_LVL, 0x0f),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_RX_SIGDET_CTRL2, 0x69),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_MULTI_LANE_CTRL1, 0x02),
+};
+
+static const struct qmp_phy_init_tbl sm8650_ufsphy_g5_pcs[] = {
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_PLL_CNTL, 0x33),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_HSGEAR_CAPABILITY, 0x05),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_RX_HSGEAR_CAPABILITY, 0x05),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_RX_HS_G5_SYNC_LENGTH_CAPABILITY, 0x4d),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_RX_HSG5_SYNC_WAIT_TIME, 0x9e),
};
struct qmp_ufs_offsets {
@@ -1346,6 +1410,42 @@ static const struct qmp_phy_cfg sm8450_ufsphy_cfg = {
.regs = ufsphy_v5_regs_layout,
};
+static const struct qmp_phy_cfg sm8475_ufsphy_cfg = {
+ .lanes = 2,
+
+ .offsets = &qmp_ufs_offsets_v6,
+ .max_supported_gear = UFS_HS_G4,
+
+ .tbls = {
+ .serdes = sm8475_ufsphy_serdes,
+ .serdes_num = ARRAY_SIZE(sm8475_ufsphy_serdes),
+ .tx = sm8550_ufsphy_tx,
+ .tx_num = ARRAY_SIZE(sm8550_ufsphy_tx),
+ .rx = sm8550_ufsphy_rx,
+ .rx_num = ARRAY_SIZE(sm8550_ufsphy_rx),
+ .pcs = sm8550_ufsphy_pcs,
+ .pcs_num = ARRAY_SIZE(sm8550_ufsphy_pcs),
+ },
+ .tbls_hs_b = {
+ .serdes = sm8550_ufsphy_hs_b_serdes,
+ .serdes_num = ARRAY_SIZE(sm8550_ufsphy_hs_b_serdes),
+ },
+ .tbls_hs_overlay[0] = {
+ .serdes = sm8475_ufsphy_g4_serdes,
+ .serdes_num = ARRAY_SIZE(sm8475_ufsphy_g4_serdes),
+ .tx = sm8550_ufsphy_g4_tx,
+ .tx_num = ARRAY_SIZE(sm8550_ufsphy_g4_tx),
+ .rx = sm8550_ufsphy_g4_rx,
+ .rx_num = ARRAY_SIZE(sm8550_ufsphy_g4_rx),
+ .pcs = sm8475_ufsphy_g4_pcs,
+ .pcs_num = ARRAY_SIZE(sm8475_ufsphy_g4_pcs),
+ .max_gear = UFS_HS_G4,
+ },
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = ufsphy_v6_regs_layout,
+};
+
static const struct qmp_phy_cfg sm8550_ufsphy_cfg = {
.lanes = 2,
@@ -1407,6 +1507,17 @@ static const struct qmp_phy_cfg sm8650_ufsphy_cfg = {
.pcs = sm8650_ufsphy_pcs,
.pcs_num = ARRAY_SIZE(sm8650_ufsphy_pcs),
},
+ .tbls_hs_overlay[0] = {
+ .pcs = sm8650_ufsphy_g4_pcs,
+ .pcs_num = ARRAY_SIZE(sm8650_ufsphy_g4_pcs),
+ .max_gear = UFS_HS_G4,
+ },
+ .tbls_hs_overlay[1] = {
+ .pcs = sm8650_ufsphy_g5_pcs,
+ .pcs_num = ARRAY_SIZE(sm8650_ufsphy_g5_pcs),
+ .max_gear = UFS_HS_G5,
+ },
+
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = ufsphy_v6_regs_layout,
@@ -1942,6 +2053,9 @@ static const struct of_device_id qmp_ufs_of_match_table[] = {
.compatible = "qcom,sm8450-qmp-ufs-phy",
.data = &sm8450_ufsphy_cfg,
}, {
+ .compatible = "qcom,sm8475-qmp-ufs-phy",
+ .data = &sm8475_ufsphy_cfg,
+ }, {
.compatible = "qcom,sm8550-qmp-ufs-phy",
.data = &sm8550_ufsphy_cfg,
}, {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index 85253936fac3..c174463c58a3 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -337,6 +337,29 @@ static const struct qmp_phy_init_tbl msm8996_usb3_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V2_PCS_POWER_STATE_CONFIG2, 0x08),
};
+static const struct qmp_phy_init_tbl qdu1000_usb3_uniphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xc4),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x89),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+};
+
+static const struct qmp_phy_init_tbl qdu1000_usb3_uniphy_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_POWER_STATE_CONFIG1, 0x6f),
+};
+
static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
@@ -1400,6 +1423,27 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
.regs = qmp_v2_usb3phy_regs_layout,
};
+static const struct qmp_phy_cfg qdu1000_usb3_uniphy_cfg = {
+ .offsets = &qmp_usb_offsets_v5,
+
+ .serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
+ .tx_tbl = sm8350_usb3_uniphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_tx_tbl),
+ .rx_tbl = sm8350_usb3_uniphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_rx_tbl),
+ .pcs_tbl = qdu1000_usb3_uniphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(qdu1000_usb3_uniphy_pcs_tbl),
+ .pcs_usb_tbl = qdu1000_usb3_uniphy_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(qdu1000_usb3_uniphy_pcs_usb_tbl),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x1000,
+
+ .has_pwrdn_delay = true,
+};
+
static const struct qmp_phy_cfg sa8775p_usb3_uniphy_cfg = {
.offsets = &qmp_usb_offsets_v5,
@@ -2203,6 +2247,9 @@ static const struct of_device_id qmp_usb_of_match_table[] = {
.compatible = "qcom,msm8996-qmp-usb3-phy",
.data = &msm8996_usb3phy_cfg,
}, {
+ .compatible = "qcom,qdu1000-qmp-usb3-uni-phy",
+ .data = &qdu1000_usb3_uniphy_cfg,
+ }, {
.compatible = "qcom,sa8775p-qmp-usb3-uni-phy",
.data = &sa8775p_usb3_uniphy_cfg,
}, {
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index a34f67bb7e61..08b0f4345760 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -87,6 +87,7 @@ config PHY_ROCKCHIP_SAMSUNG_HDPTX
tristate "Rockchip Samsung HDMI/eDP Combo PHY driver"
depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
select GENERIC_PHY
+ select RATIONAL
help
Enable this to support the Rockchip HDMI/eDP Combo PHY
with Samsung IP block.
@@ -115,3 +116,15 @@ config PHY_ROCKCHIP_USB
select GENERIC_PHY
help
Enable this to support the Rockchip USB 2.0 PHY.
+
+config PHY_ROCKCHIP_USBDP
+ tristate "Rockchip USBDP COMBO PHY Driver"
+ depends on ARCH_ROCKCHIP && OF
+ depends on TYPEC
+ select GENERIC_PHY
+ help
+ Enable this to support the Rockchip USB3.0/DP combo PHY with
+ Samsung IP block. This is required for USB3 support on RK3588.
+
+ To compile this driver as a module, choose M here: the module
+ will be called phy-rockchip-usbdp
diff --git a/drivers/phy/rockchip/Makefile b/drivers/phy/rockchip/Makefile
index 3d911304e654..010a824e32ce 100644
--- a/drivers/phy/rockchip/Makefile
+++ b/drivers/phy/rockchip/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_PHY_ROCKCHIP_SAMSUNG_HDPTX) += phy-rockchip-samsung-hdptx.o
obj-$(CONFIG_PHY_ROCKCHIP_SNPS_PCIE3) += phy-rockchip-snps-pcie3.o
obj-$(CONFIG_PHY_ROCKCHIP_TYPEC) += phy-rockchip-typec.o
obj-$(CONFIG_PHY_ROCKCHIP_USB) += phy-rockchip-usb.o
+obj-$(CONFIG_PHY_ROCKCHIP_USBDP) += phy-rockchip-usbdp.o
diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
index 76b9cf417591..0a9989e41237 100644
--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
@@ -125,12 +125,15 @@ struct rockchip_combphy_grfcfg {
};
struct rockchip_combphy_cfg {
+ unsigned int num_phys;
+ unsigned int phy_ids[3];
const struct rockchip_combphy_grfcfg *grfcfg;
int (*combphy_cfg)(struct rockchip_combphy_priv *priv);
};
struct rockchip_combphy_priv {
u8 type;
+ int id;
void __iomem *mmio;
int num_clks;
struct clk_bulk_data *clks;
@@ -245,7 +248,7 @@ static int rockchip_combphy_exit(struct phy *phy)
return 0;
}
-static const struct phy_ops rochchip_combphy_ops = {
+static const struct phy_ops rockchip_combphy_ops = {
.init = rockchip_combphy_init,
.exit = rockchip_combphy_exit,
.owner = THIS_MODULE,
@@ -320,7 +323,7 @@ static int rockchip_combphy_probe(struct platform_device *pdev)
struct rockchip_combphy_priv *priv;
const struct rockchip_combphy_cfg *phy_cfg;
struct resource *res;
- int ret;
+ int ret, id;
phy_cfg = of_device_get_match_data(dev);
if (!phy_cfg) {
@@ -338,6 +341,15 @@ static int rockchip_combphy_probe(struct platform_device *pdev)
return ret;
}
+ /* find the phy-id from the io address */
+ priv->id = -ENODEV;
+ for (id = 0; id < phy_cfg->num_phys; id++) {
+ if (res->start == phy_cfg->phy_ids[id]) {
+ priv->id = id;
+ break;
+ }
+ }
+
priv->dev = dev;
priv->type = PHY_NONE;
priv->cfg = phy_cfg;
@@ -352,7 +364,7 @@ static int rockchip_combphy_probe(struct platform_device *pdev)
return ret;
}
- priv->phy = devm_phy_create(dev, NULL, &rochchip_combphy_ops);
+ priv->phy = devm_phy_create(dev, NULL, &rockchip_combphy_ops);
if (IS_ERR(priv->phy)) {
dev_err(dev, "failed to create combphy\n");
return PTR_ERR(priv->phy);
@@ -562,6 +574,12 @@ static const struct rockchip_combphy_grfcfg rk3568_combphy_grfcfgs = {
};
static const struct rockchip_combphy_cfg rk3568_combphy_cfgs = {
+ .num_phys = 3,
+ .phy_ids = {
+ 0xfe820000,
+ 0xfe830000,
+ 0xfe840000,
+ },
.grfcfg = &rk3568_combphy_grfcfgs,
.combphy_cfg = rk3568_combphy_cfg,
};
@@ -578,8 +596,14 @@ static int rk3588_combphy_cfg(struct rockchip_combphy_priv *priv)
rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_pcie, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con2_for_pcie, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con3_for_pcie, true);
- rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l0_sel, true);
- rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l1_sel, true);
+ switch (priv->id) {
+ case 1:
+ rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l0_sel, true);
+ break;
+ case 2:
+ rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l1_sel, true);
+ break;
+ }
break;
case PHY_TYPE_USB3:
/* Set SSC downward spread spectrum */
@@ -736,6 +760,12 @@ static const struct rockchip_combphy_grfcfg rk3588_combphy_grfcfgs = {
};
static const struct rockchip_combphy_cfg rk3588_combphy_cfgs = {
+ .num_phys = 3,
+ .phy_ids = {
+ 0xfee00000,
+ 0xfee10000,
+ 0xfee20000,
+ },
.grfcfg = &rk3588_combphy_grfcfgs,
.combphy_cfg = rk3588_combphy_cfg,
};
diff --git a/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
index 121e5961ce11..4e8ffd173096 100644
--- a/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
+++ b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
@@ -35,11 +35,19 @@
#define RK3588_PCIE3PHY_GRF_CMN_CON0 0x0
#define RK3588_PCIE3PHY_GRF_PHY0_STATUS1 0x904
#define RK3588_PCIE3PHY_GRF_PHY1_STATUS1 0xa04
+#define RK3588_PCIE3PHY_GRF_PHY0_LN0_CON1 0x1004
+#define RK3588_PCIE3PHY_GRF_PHY0_LN1_CON1 0x1104
+#define RK3588_PCIE3PHY_GRF_PHY1_LN0_CON1 0x2004
+#define RK3588_PCIE3PHY_GRF_PHY1_LN1_CON1 0x2104
#define RK3588_SRAM_INIT_DONE(reg) (reg & BIT(0))
#define RK3588_BIFURCATION_LANE_0_1 BIT(0)
#define RK3588_BIFURCATION_LANE_2_3 BIT(1)
#define RK3588_LANE_AGGREGATION BIT(2)
+#define RK3588_RX_CMN_REFCLK_MODE_EN ((BIT(7) << 16) | BIT(7))
+#define RK3588_RX_CMN_REFCLK_MODE_DIS (BIT(7) << 16)
+#define RK3588_PCIE1LN_SEL_EN (GENMASK(1, 0) << 16)
+#define RK3588_PCIE30_PHY_MODE_EN (GENMASK(2, 0) << 16)
struct rockchip_p3phy_ops;
@@ -58,6 +66,7 @@ struct rockchip_p3phy_priv {
int num_clks;
int num_lanes;
u32 lanes[4];
+ u32 rx_cmn_refclk_mode[4];
};
struct rockchip_p3phy_ops {
@@ -132,39 +141,45 @@ static const struct rockchip_p3phy_ops rk3568_ops = {
static int rockchip_p3phy_rk3588_init(struct rockchip_p3phy_priv *priv)
{
u32 reg = 0;
- u8 mode = 0;
+ u8 mode = RK3588_LANE_AGGREGATION; /* default */
int ret;
+ regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_PHY0_LN0_CON1,
+ priv->rx_cmn_refclk_mode[0] ? RK3588_RX_CMN_REFCLK_MODE_EN :
+ RK3588_RX_CMN_REFCLK_MODE_DIS);
+ regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_PHY0_LN1_CON1,
+ priv->rx_cmn_refclk_mode[1] ? RK3588_RX_CMN_REFCLK_MODE_EN :
+ RK3588_RX_CMN_REFCLK_MODE_DIS);
+ regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_PHY1_LN0_CON1,
+ priv->rx_cmn_refclk_mode[2] ? RK3588_RX_CMN_REFCLK_MODE_EN :
+ RK3588_RX_CMN_REFCLK_MODE_DIS);
+ regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_PHY1_LN1_CON1,
+ priv->rx_cmn_refclk_mode[3] ? RK3588_RX_CMN_REFCLK_MODE_EN :
+ RK3588_RX_CMN_REFCLK_MODE_DIS);
+
/* Deassert PCIe PMA output clamp mode */
regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_CMN_CON0, BIT(8) | BIT(24));
/* Set bifurcation if needed */
for (int i = 0; i < priv->num_lanes; i++) {
- if (!priv->lanes[i])
- mode |= (BIT(i) << 3);
-
if (priv->lanes[i] > 1)
- mode |= (BIT(i) >> 1);
- }
-
- if (!mode)
- reg = RK3588_LANE_AGGREGATION;
- else {
- if (mode & (BIT(0) | BIT(1)))
- reg |= RK3588_BIFURCATION_LANE_0_1;
-
- if (mode & (BIT(2) | BIT(3)))
- reg |= RK3588_BIFURCATION_LANE_2_3;
+ mode &= ~RK3588_LANE_AGGREGATION;
+ if (priv->lanes[i] == 3)
+ mode |= RK3588_BIFURCATION_LANE_0_1;
+ if (priv->lanes[i] == 4)
+ mode |= RK3588_BIFURCATION_LANE_2_3;
}
- regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_CMN_CON0, (0x7<<16) | reg);
+ reg = mode;
+ regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_CMN_CON0,
+ RK3588_PCIE30_PHY_MODE_EN | reg);
/* Set pcie1ln_sel in PHP_GRF_PCIESEL_CON */
if (!IS_ERR(priv->pipe_grf)) {
- reg = (mode & (BIT(6) | BIT(7))) >> 6;
+ reg = mode & (RK3588_BIFURCATION_LANE_0_1 | RK3588_BIFURCATION_LANE_2_3);
if (reg)
regmap_write(priv->pipe_grf, PHP_GRF_PCIESEL_CON,
- (reg << 16) | reg);
+ RK3588_PCIE1LN_SEL_EN | reg);
}
reset_control_deassert(priv->p30phy);
@@ -187,7 +202,7 @@ static const struct rockchip_p3phy_ops rk3588_ops = {
.phy_init = rockchip_p3phy_rk3588_init,
};
-static int rochchip_p3phy_init(struct phy *phy)
+static int rockchip_p3phy_init(struct phy *phy)
{
struct rockchip_p3phy_priv *priv = phy_get_drvdata(phy);
int ret;
@@ -210,7 +225,7 @@ static int rochchip_p3phy_init(struct phy *phy)
return ret;
}
-static int rochchip_p3phy_exit(struct phy *phy)
+static int rockchip_p3phy_exit(struct phy *phy)
{
struct rockchip_p3phy_priv *priv = phy_get_drvdata(phy);
@@ -219,9 +234,9 @@ static int rochchip_p3phy_exit(struct phy *phy)
return 0;
}
-static const struct phy_ops rochchip_p3phy_ops = {
- .init = rochchip_p3phy_init,
- .exit = rochchip_p3phy_exit,
+static const struct phy_ops rockchip_p3phy_ops = {
+ .init = rockchip_p3phy_init,
+ .exit = rockchip_p3phy_exit,
.set_mode = rockchip_p3phy_set_mode,
.owner = THIS_MODULE,
};
@@ -280,7 +295,24 @@ static int rockchip_p3phy_probe(struct platform_device *pdev)
return priv->num_lanes;
}
- priv->phy = devm_phy_create(dev, NULL, &rochchip_p3phy_ops);
+ ret = of_property_read_variable_u32_array(dev->of_node,
+ "rockchip,rx-common-refclk-mode",
+ priv->rx_cmn_refclk_mode, 1,
+ ARRAY_SIZE(priv->rx_cmn_refclk_mode));
+ /*
+ * if no rockchip,rx-common-refclk-mode, assume enabled for all lanes in
+ * order to be DT backwards compatible. (Since HW reset val is enabled.)
+ */
+ if (ret == -EINVAL) {
+ for (int i = 0; i < ARRAY_SIZE(priv->rx_cmn_refclk_mode); i++)
+ priv->rx_cmn_refclk_mode[i] = 1;
+ } else if (ret < 0) {
+ dev_err(dev, "failed to read rockchip,rx-common-refclk-mode property %d\n",
+ ret);
+ return ret;
+ }
+
+ priv->phy = devm_phy_create(dev, NULL, &rockchip_p3phy_ops);
if (IS_ERR(priv->phy)) {
dev_err(dev, "failed to create combphy\n");
return PTR_ERR(priv->phy);
diff --git a/drivers/phy/rockchip/phy-rockchip-usbdp.c b/drivers/phy/rockchip/phy-rockchip-usbdp.c
new file mode 100644
index 000000000000..2c51e5c62d3e
--- /dev/null
+++ b/drivers/phy/rockchip/phy-rockchip-usbdp.c
@@ -0,0 +1,1608 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Rockchip USBDP Combo PHY with Samsung IP block driver
+ *
+ * Copyright (C) 2021-2024 Rockchip Electronics Co., Ltd
+ * Copyright (C) 2024 Collabora Ltd
+ */
+
+#include <dt-bindings/phy/phy.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+
+/* USBDP PHY Register Definitions */
+#define UDPHY_PCS 0x4000
+#define UDPHY_PMA 0x8000
+
+/* VO0 GRF Registers */
+#define DP_SINK_HPD_CFG BIT(11)
+#define DP_SINK_HPD_SEL BIT(10)
+#define DP_AUX_DIN_SEL BIT(9)
+#define DP_AUX_DOUT_SEL BIT(8)
+#define DP_LANE_SEL_N(n) GENMASK(2 * (n) + 1, 2 * (n))
+#define DP_LANE_SEL_ALL GENMASK(7, 0)
+
+/* PMA CMN Registers */
+#define CMN_LANE_MUX_AND_EN_OFFSET 0x0288 /* cmn_reg00A2 */
+#define CMN_DP_LANE_MUX_N(n) BIT((n) + 4)
+#define CMN_DP_LANE_EN_N(n) BIT(n)
+#define CMN_DP_LANE_MUX_ALL GENMASK(7, 4)
+#define CMN_DP_LANE_EN_ALL GENMASK(3, 0)
+
+#define CMN_DP_LINK_OFFSET 0x28c /* cmn_reg00A3 */
+#define CMN_DP_TX_LINK_BW GENMASK(6, 5)
+#define CMN_DP_TX_LANE_SWAP_EN BIT(2)
+
+#define CMN_SSC_EN_OFFSET 0x2d0 /* cmn_reg00B4 */
+#define CMN_ROPLL_SSC_EN BIT(1)
+#define CMN_LCPLL_SSC_EN BIT(0)
+
+#define CMN_ANA_LCPLL_DONE_OFFSET 0x0350 /* cmn_reg00D4 */
+#define CMN_ANA_LCPLL_LOCK_DONE BIT(7)
+#define CMN_ANA_LCPLL_AFC_DONE BIT(6)
+
+#define CMN_ANA_ROPLL_DONE_OFFSET 0x0354 /* cmn_reg00D5 */
+#define CMN_ANA_ROPLL_LOCK_DONE BIT(1)
+#define CMN_ANA_ROPLL_AFC_DONE BIT(0)
+
+#define CMN_DP_RSTN_OFFSET 0x038c /* cmn_reg00E3 */
+#define CMN_DP_INIT_RSTN BIT(3)
+#define CMN_DP_CMN_RSTN BIT(2)
+#define CMN_CDR_WTCHDG_EN BIT(1)
+#define CMN_CDR_WTCHDG_MSK_CDR_EN BIT(0)
+
+#define TRSV_ANA_TX_CLK_OFFSET_N(n) (0x854 + (n) * 0x800) /* trsv_reg0215 */
+#define LN_ANA_TX_SER_TXCLK_INV BIT(1)
+
+#define TRSV_LN0_MON_RX_CDR_DONE_OFFSET 0x0b84 /* trsv_reg02E1 */
+#define TRSV_LN0_MON_RX_CDR_LOCK_DONE BIT(0)
+
+#define TRSV_LN2_MON_RX_CDR_DONE_OFFSET 0x1b84 /* trsv_reg06E1 */
+#define TRSV_LN2_MON_RX_CDR_LOCK_DONE BIT(0)
+
+#define BIT_WRITEABLE_SHIFT 16
+#define PHY_AUX_DP_DATA_POL_NORMAL 0
+#define PHY_AUX_DP_DATA_POL_INVERT 1
+#define PHY_LANE_MUX_USB 0
+#define PHY_LANE_MUX_DP 1
+
+enum {
+ DP_BW_RBR,
+ DP_BW_HBR,
+ DP_BW_HBR2,
+ DP_BW_HBR3,
+};
+
+enum {
+ UDPHY_MODE_NONE = 0,
+ UDPHY_MODE_USB = BIT(0),
+ UDPHY_MODE_DP = BIT(1),
+ UDPHY_MODE_DP_USB = BIT(1) | BIT(0),
+};
+
+struct rk_udphy_grf_reg {
+ unsigned int offset;
+ unsigned int disable;
+ unsigned int enable;
+};
+
+#define _RK_UDPHY_GEN_GRF_REG(offset, mask, disable, enable) \
+{\
+ offset, \
+ FIELD_PREP_CONST(mask, disable) | (mask << BIT_WRITEABLE_SHIFT), \
+ FIELD_PREP_CONST(mask, enable) | (mask << BIT_WRITEABLE_SHIFT), \
+}
+
+#define RK_UDPHY_GEN_GRF_REG(offset, bitend, bitstart, disable, enable) \
+ _RK_UDPHY_GEN_GRF_REG(offset, GENMASK(bitend, bitstart), disable, enable)
+
+struct rk_udphy_grf_cfg {
+ /* u2phy-grf */
+ struct rk_udphy_grf_reg bvalid_phy_con;
+ struct rk_udphy_grf_reg bvalid_grf_con;
+
+ /* usb-grf */
+ struct rk_udphy_grf_reg usb3otg0_cfg;
+ struct rk_udphy_grf_reg usb3otg1_cfg;
+
+ /* usbdpphy-grf */
+ struct rk_udphy_grf_reg low_pwrn;
+ struct rk_udphy_grf_reg rx_lfps;
+};
+
+struct rk_udphy_vogrf_cfg {
+ /* vo-grf */
+ struct rk_udphy_grf_reg hpd_trigger;
+ u32 dp_lane_reg;
+};
+
+struct rk_udphy_dp_tx_drv_ctrl {
+ u32 trsv_reg0204;
+ u32 trsv_reg0205;
+ u32 trsv_reg0206;
+ u32 trsv_reg0207;
+};
+
+struct rk_udphy_cfg {
+ unsigned int num_phys;
+ unsigned int phy_ids[2];
+ /* resets to be requested */
+ const char * const *rst_list;
+ int num_rsts;
+
+ struct rk_udphy_grf_cfg grfcfg;
+ struct rk_udphy_vogrf_cfg vogrfcfg[2];
+ const struct rk_udphy_dp_tx_drv_ctrl (*dp_tx_ctrl_cfg[4])[4];
+ const struct rk_udphy_dp_tx_drv_ctrl (*dp_tx_ctrl_cfg_typec[4])[4];
+};
+
+struct rk_udphy {
+ struct device *dev;
+ struct regmap *pma_regmap;
+ struct regmap *u2phygrf;
+ struct regmap *udphygrf;
+ struct regmap *usbgrf;
+ struct regmap *vogrf;
+ struct typec_switch_dev *sw;
+ struct typec_mux_dev *mux;
+ struct mutex mutex; /* mutex to protect access to individual PHYs */
+
+ /* clocks and rests */
+ int num_clks;
+ struct clk_bulk_data *clks;
+ struct clk *refclk;
+ int num_rsts;
+ struct reset_control_bulk_data *rsts;
+
+ /* PHY status management */
+ bool flip;
+ bool mode_change;
+ u8 mode;
+ u8 status;
+
+ /* utilized for USB */
+ bool hs; /* flag for high-speed */
+
+ /* utilized for DP */
+ struct gpio_desc *sbu1_dc_gpio;
+ struct gpio_desc *sbu2_dc_gpio;
+ u32 lane_mux_sel[4];
+ u32 dp_lane_sel[4];
+ u32 dp_aux_dout_sel;
+ u32 dp_aux_din_sel;
+ bool dp_sink_hpd_sel;
+ bool dp_sink_hpd_cfg;
+ u8 bw;
+ int id;
+
+ bool dp_in_use;
+
+ /* PHY const config */
+ const struct rk_udphy_cfg *cfgs;
+
+ /* PHY devices */
+ struct phy *phy_dp;
+ struct phy *phy_u3;
+};
+
+static const struct rk_udphy_dp_tx_drv_ctrl rk3588_dp_tx_drv_ctrl_rbr_hbr[4][4] = {
+ /* voltage swing 0, pre-emphasis 0->3 */
+ {
+ { 0x20, 0x10, 0x42, 0xe5 },
+ { 0x26, 0x14, 0x42, 0xe5 },
+ { 0x29, 0x18, 0x42, 0xe5 },
+ { 0x2b, 0x1c, 0x43, 0xe7 },
+ },
+
+ /* voltage swing 1, pre-emphasis 0->2 */
+ {
+ { 0x23, 0x10, 0x42, 0xe7 },
+ { 0x2a, 0x17, 0x43, 0xe7 },
+ { 0x2b, 0x1a, 0x43, 0xe7 },
+ },
+
+ /* voltage swing 2, pre-emphasis 0->1 */
+ {
+ { 0x27, 0x10, 0x42, 0xe7 },
+ { 0x2b, 0x17, 0x43, 0xe7 },
+ },
+
+ /* voltage swing 3, pre-emphasis 0 */
+ {
+ { 0x29, 0x10, 0x43, 0xe7 },
+ },
+};
+
+static const struct rk_udphy_dp_tx_drv_ctrl rk3588_dp_tx_drv_ctrl_rbr_hbr_typec[4][4] = {
+ /* voltage swing 0, pre-emphasis 0->3 */
+ {
+ { 0x20, 0x10, 0x42, 0xe5 },
+ { 0x26, 0x14, 0x42, 0xe5 },
+ { 0x29, 0x18, 0x42, 0xe5 },
+ { 0x2b, 0x1c, 0x43, 0xe7 },
+ },
+
+ /* voltage swing 1, pre-emphasis 0->2 */
+ {
+ { 0x23, 0x10, 0x42, 0xe7 },
+ { 0x2a, 0x17, 0x43, 0xe7 },
+ { 0x2b, 0x1a, 0x43, 0xe7 },
+ },
+
+ /* voltage swing 2, pre-emphasis 0->1 */
+ {
+ { 0x27, 0x10, 0x43, 0x67 },
+ { 0x2b, 0x17, 0x43, 0xe7 },
+ },
+
+ /* voltage swing 3, pre-emphasis 0 */
+ {
+ { 0x29, 0x10, 0x43, 0xe7 },
+ },
+};
+
+static const struct rk_udphy_dp_tx_drv_ctrl rk3588_dp_tx_drv_ctrl_hbr2[4][4] = {
+ /* voltage swing 0, pre-emphasis 0->3 */
+ {
+ { 0x21, 0x10, 0x42, 0xe5 },
+ { 0x26, 0x14, 0x42, 0xe5 },
+ { 0x26, 0x16, 0x43, 0xe5 },
+ { 0x2a, 0x19, 0x43, 0xe7 },
+ },
+
+ /* voltage swing 1, pre-emphasis 0->2 */
+ {
+ { 0x24, 0x10, 0x42, 0xe7 },
+ { 0x2a, 0x17, 0x43, 0xe7 },
+ { 0x2b, 0x1a, 0x43, 0xe7 },
+ },
+
+ /* voltage swing 2, pre-emphasis 0->1 */
+ {
+ { 0x28, 0x10, 0x42, 0xe7 },
+ { 0x2b, 0x17, 0x43, 0xe7 },
+ },
+
+ /* voltage swing 3, pre-emphasis 0 */
+ {
+ { 0x28, 0x10, 0x43, 0xe7 },
+ },
+};
+
+static const struct rk_udphy_dp_tx_drv_ctrl rk3588_dp_tx_drv_ctrl_hbr3[4][4] = {
+ /* voltage swing 0, pre-emphasis 0->3 */
+ {
+ { 0x21, 0x10, 0x42, 0xe5 },
+ { 0x26, 0x14, 0x42, 0xe5 },
+ { 0x26, 0x16, 0x43, 0xe5 },
+ { 0x29, 0x18, 0x43, 0xe7 },
+ },
+
+ /* voltage swing 1, pre-emphasis 0->2 */
+ {
+ { 0x24, 0x10, 0x42, 0xe7 },
+ { 0x2a, 0x18, 0x43, 0xe7 },
+ { 0x2b, 0x1b, 0x43, 0xe7 }
+ },
+
+ /* voltage swing 2, pre-emphasis 0->1 */
+ {
+ { 0x27, 0x10, 0x42, 0xe7 },
+ { 0x2b, 0x18, 0x43, 0xe7 }
+ },
+
+ /* voltage swing 3, pre-emphasis 0 */
+ {
+ { 0x28, 0x10, 0x43, 0xe7 },
+ },
+};
+
+static const struct reg_sequence rk_udphy_24m_refclk_cfg[] = {
+ {0x0090, 0x68}, {0x0094, 0x68},
+ {0x0128, 0x24}, {0x012c, 0x44},
+ {0x0130, 0x3f}, {0x0134, 0x44},
+ {0x015c, 0xa9}, {0x0160, 0x71},
+ {0x0164, 0x71}, {0x0168, 0xa9},
+ {0x0174, 0xa9}, {0x0178, 0x71},
+ {0x017c, 0x71}, {0x0180, 0xa9},
+ {0x018c, 0x41}, {0x0190, 0x00},
+ {0x0194, 0x05}, {0x01ac, 0x2a},
+ {0x01b0, 0x17}, {0x01b4, 0x17},
+ {0x01b8, 0x2a}, {0x01c8, 0x04},
+ {0x01cc, 0x08}, {0x01d0, 0x08},
+ {0x01d4, 0x04}, {0x01d8, 0x20},
+ {0x01dc, 0x01}, {0x01e0, 0x09},
+ {0x01e4, 0x03}, {0x01f0, 0x29},
+ {0x01f4, 0x02}, {0x01f8, 0x02},
+ {0x01fc, 0x29}, {0x0208, 0x2a},
+ {0x020c, 0x17}, {0x0210, 0x17},
+ {0x0214, 0x2a}, {0x0224, 0x20},
+ {0x03f0, 0x0a}, {0x03f4, 0x07},
+ {0x03f8, 0x07}, {0x03fc, 0x0c},
+ {0x0404, 0x12}, {0x0408, 0x1a},
+ {0x040c, 0x1a}, {0x0410, 0x3f},
+ {0x0ce0, 0x68}, {0x0ce8, 0xd0},
+ {0x0cf0, 0x87}, {0x0cf8, 0x70},
+ {0x0d00, 0x70}, {0x0d08, 0xa9},
+ {0x1ce0, 0x68}, {0x1ce8, 0xd0},
+ {0x1cf0, 0x87}, {0x1cf8, 0x70},
+ {0x1d00, 0x70}, {0x1d08, 0xa9},
+ {0x0a3c, 0xd0}, {0x0a44, 0xd0},
+ {0x0a48, 0x01}, {0x0a4c, 0x0d},
+ {0x0a54, 0xe0}, {0x0a5c, 0xe0},
+ {0x0a64, 0xa8}, {0x1a3c, 0xd0},
+ {0x1a44, 0xd0}, {0x1a48, 0x01},
+ {0x1a4c, 0x0d}, {0x1a54, 0xe0},
+ {0x1a5c, 0xe0}, {0x1a64, 0xa8}
+};
+
+static const struct reg_sequence rk_udphy_26m_refclk_cfg[] = {
+ {0x0830, 0x07}, {0x085c, 0x80},
+ {0x1030, 0x07}, {0x105c, 0x80},
+ {0x1830, 0x07}, {0x185c, 0x80},
+ {0x2030, 0x07}, {0x205c, 0x80},
+ {0x0228, 0x38}, {0x0104, 0x44},
+ {0x0248, 0x44}, {0x038c, 0x02},
+ {0x0878, 0x04}, {0x1878, 0x04},
+ {0x0898, 0x77}, {0x1898, 0x77},
+ {0x0054, 0x01}, {0x00e0, 0x38},
+ {0x0060, 0x24}, {0x0064, 0x77},
+ {0x0070, 0x76}, {0x0234, 0xe8},
+ {0x0af4, 0x15}, {0x1af4, 0x15},
+ {0x081c, 0xe5}, {0x181c, 0xe5},
+ {0x099c, 0x48}, {0x199c, 0x48},
+ {0x09a4, 0x07}, {0x09a8, 0x22},
+ {0x19a4, 0x07}, {0x19a8, 0x22},
+ {0x09b8, 0x3e}, {0x19b8, 0x3e},
+ {0x09e4, 0x02}, {0x19e4, 0x02},
+ {0x0a34, 0x1e}, {0x1a34, 0x1e},
+ {0x0a98, 0x2f}, {0x1a98, 0x2f},
+ {0x0c30, 0x0e}, {0x0c48, 0x06},
+ {0x1c30, 0x0e}, {0x1c48, 0x06},
+ {0x028c, 0x18}, {0x0af0, 0x00},
+ {0x1af0, 0x00}
+};
+
+static const struct reg_sequence rk_udphy_init_sequence[] = {
+ {0x0104, 0x44}, {0x0234, 0xe8},
+ {0x0248, 0x44}, {0x028c, 0x18},
+ {0x081c, 0xe5}, {0x0878, 0x00},
+ {0x0994, 0x1c}, {0x0af0, 0x00},
+ {0x181c, 0xe5}, {0x1878, 0x00},
+ {0x1994, 0x1c}, {0x1af0, 0x00},
+ {0x0428, 0x60}, {0x0d58, 0x33},
+ {0x1d58, 0x33}, {0x0990, 0x74},
+ {0x0d64, 0x17}, {0x08c8, 0x13},
+ {0x1990, 0x74}, {0x1d64, 0x17},
+ {0x18c8, 0x13}, {0x0d90, 0x40},
+ {0x0da8, 0x40}, {0x0dc0, 0x40},
+ {0x0dd8, 0x40}, {0x1d90, 0x40},
+ {0x1da8, 0x40}, {0x1dc0, 0x40},
+ {0x1dd8, 0x40}, {0x03c0, 0x30},
+ {0x03c4, 0x06}, {0x0e10, 0x00},
+ {0x1e10, 0x00}, {0x043c, 0x0f},
+ {0x0d2c, 0xff}, {0x1d2c, 0xff},
+ {0x0d34, 0x0f}, {0x1d34, 0x0f},
+ {0x08fc, 0x2a}, {0x0914, 0x28},
+ {0x0a30, 0x03}, {0x0e38, 0x03},
+ {0x0ecc, 0x27}, {0x0ed0, 0x22},
+ {0x0ed4, 0x26}, {0x18fc, 0x2a},
+ {0x1914, 0x28}, {0x1a30, 0x03},
+ {0x1e38, 0x03}, {0x1ecc, 0x27},
+ {0x1ed0, 0x22}, {0x1ed4, 0x26},
+ {0x0048, 0x0f}, {0x0060, 0x3c},
+ {0x0064, 0xf7}, {0x006c, 0x20},
+ {0x0070, 0x7d}, {0x0074, 0x68},
+ {0x0af4, 0x1a}, {0x1af4, 0x1a},
+ {0x0440, 0x3f}, {0x10d4, 0x08},
+ {0x20d4, 0x08}, {0x00d4, 0x30},
+ {0x0024, 0x6e},
+};
+
+static inline int rk_udphy_grfreg_write(struct regmap *base,
+ const struct rk_udphy_grf_reg *reg, bool en)
+{
+ return regmap_write(base, reg->offset, en ? reg->enable : reg->disable);
+}
+
+static int rk_udphy_clk_init(struct rk_udphy *udphy, struct device *dev)
+{
+ int i;
+
+ udphy->num_clks = devm_clk_bulk_get_all(dev, &udphy->clks);
+ if (udphy->num_clks < 1)
+ return -ENODEV;
+
+ /* used for configure phy reference clock frequency */
+ for (i = 0; i < udphy->num_clks; i++) {
+ if (!strncmp(udphy->clks[i].id, "refclk", 6)) {
+ udphy->refclk = udphy->clks[i].clk;
+ break;
+ }
+ }
+
+ if (!udphy->refclk)
+ return dev_err_probe(udphy->dev, -EINVAL, "no refclk found\n");
+
+ return 0;
+}
+
+static int rk_udphy_reset_assert_all(struct rk_udphy *udphy)
+{
+ return reset_control_bulk_assert(udphy->num_rsts, udphy->rsts);
+}
+
+static int rk_udphy_reset_deassert_all(struct rk_udphy *udphy)
+{
+ return reset_control_bulk_deassert(udphy->num_rsts, udphy->rsts);
+}
+
+static int rk_udphy_reset_deassert(struct rk_udphy *udphy, char *name)
+{
+ struct reset_control_bulk_data *list = udphy->rsts;
+ int idx;
+
+ for (idx = 0; idx < udphy->num_rsts; idx++) {
+ if (!strcmp(list[idx].id, name))
+ return reset_control_deassert(list[idx].rstc);
+ }
+
+ return -EINVAL;
+}
+
+static int rk_udphy_reset_init(struct rk_udphy *udphy, struct device *dev)
+{
+ const struct rk_udphy_cfg *cfg = udphy->cfgs;
+ int idx;
+
+ udphy->num_rsts = cfg->num_rsts;
+ udphy->rsts = devm_kcalloc(dev, udphy->num_rsts,
+ sizeof(*udphy->rsts), GFP_KERNEL);
+ if (!udphy->rsts)
+ return -ENOMEM;
+
+ for (idx = 0; idx < cfg->num_rsts; idx++)
+ udphy->rsts[idx].id = cfg->rst_list[idx];
+
+ return devm_reset_control_bulk_get_exclusive(dev, cfg->num_rsts,
+ udphy->rsts);
+}
+
+static void rk_udphy_u3_port_disable(struct rk_udphy *udphy, u8 disable)
+{
+ const struct rk_udphy_cfg *cfg = udphy->cfgs;
+ const struct rk_udphy_grf_reg *preg;
+
+ preg = udphy->id ? &cfg->grfcfg.usb3otg1_cfg : &cfg->grfcfg.usb3otg0_cfg;
+ rk_udphy_grfreg_write(udphy->usbgrf, preg, disable);
+}
+
+static void rk_udphy_usb_bvalid_enable(struct rk_udphy *udphy, u8 enable)
+{
+ const struct rk_udphy_cfg *cfg = udphy->cfgs;
+
+ rk_udphy_grfreg_write(udphy->u2phygrf, &cfg->grfcfg.bvalid_phy_con, enable);
+ rk_udphy_grfreg_write(udphy->u2phygrf, &cfg->grfcfg.bvalid_grf_con, enable);
+}
+
+/*
+ * In usb/dp combo phy driver, here are 2 ways to mapping lanes.
+ *
+ * 1 Type-C Mapping table (DP_Alt_Mode V1.0b remove ABF pin mapping)
+ * ---------------------------------------------------------------------------
+ * Type-C Pin B11-B10 A2-A3 A11-A10 B2-B3
+ * PHY Pad ln0(tx/rx) ln1(tx) ln2(tx/rx) ln3(tx)
+ * C/E(Normal) dpln3 dpln2 dpln0 dpln1
+ * C/E(Flip ) dpln0 dpln1 dpln3 dpln2
+ * D/F(Normal) usbrx usbtx dpln0 dpln1
+ * D/F(Flip ) dpln0 dpln1 usbrx usbtx
+ * A(Normal ) dpln3 dpln1 dpln2 dpln0
+ * A(Flip ) dpln2 dpln0 dpln3 dpln1
+ * B(Normal ) usbrx usbtx dpln1 dpln0
+ * B(Flip ) dpln1 dpln0 usbrx usbtx
+ * ---------------------------------------------------------------------------
+ *
+ * 2 Mapping the lanes in dtsi
+ * if all 4 lane assignment for dp function, define rockchip,dp-lane-mux = <x x x x>;
+ * sample as follow:
+ * ---------------------------------------------------------------------------
+ * B11-B10 A2-A3 A11-A10 B2-B3
+ * rockchip,dp-lane-mux ln0(tx/rx) ln1(tx) ln2(tx/rx) ln3(tx)
+ * <0 1 2 3> dpln0 dpln1 dpln2 dpln3
+ * <2 3 0 1> dpln2 dpln3 dpln0 dpln1
+ * ---------------------------------------------------------------------------
+ * if 2 lane for dp function, 2 lane for usb function, define rockchip,dp-lane-mux = <x x>;
+ * sample as follow:
+ * ---------------------------------------------------------------------------
+ * B11-B10 A2-A3 A11-A10 B2-B3
+ * rockchip,dp-lane-mux ln0(tx/rx) ln1(tx) ln2(tx/rx) ln3(tx)
+ * <0 1> dpln0 dpln1 usbrx usbtx
+ * <2 3> usbrx usbtx dpln0 dpln1
+ * ---------------------------------------------------------------------------
+ */
+
+static void rk_udphy_dplane_select(struct rk_udphy *udphy)
+{
+ const struct rk_udphy_cfg *cfg = udphy->cfgs;
+ u32 value = 0;
+
+ switch (udphy->mode) {
+ case UDPHY_MODE_DP:
+ value |= 2 << udphy->dp_lane_sel[2] * 2;
+ value |= 3 << udphy->dp_lane_sel[3] * 2;
+ fallthrough;
+
+ case UDPHY_MODE_DP_USB:
+ value |= 0 << udphy->dp_lane_sel[0] * 2;
+ value |= 1 << udphy->dp_lane_sel[1] * 2;
+ break;
+
+ case UDPHY_MODE_USB:
+ break;
+
+ default:
+ break;
+ }
+
+ regmap_write(udphy->vogrf, cfg->vogrfcfg[udphy->id].dp_lane_reg,
+ ((DP_AUX_DIN_SEL | DP_AUX_DOUT_SEL | DP_LANE_SEL_ALL) << 16) |
+ FIELD_PREP(DP_AUX_DIN_SEL, udphy->dp_aux_din_sel) |
+ FIELD_PREP(DP_AUX_DOUT_SEL, udphy->dp_aux_dout_sel) | value);
+}
+
+static int rk_udphy_dplane_get(struct rk_udphy *udphy)
+{
+ int dp_lanes;
+
+ switch (udphy->mode) {
+ case UDPHY_MODE_DP:
+ dp_lanes = 4;
+ break;
+
+ case UDPHY_MODE_DP_USB:
+ dp_lanes = 2;
+ break;
+
+ case UDPHY_MODE_USB:
+ default:
+ dp_lanes = 0;
+ break;
+ }
+
+ return dp_lanes;
+}
+
+static void rk_udphy_dplane_enable(struct rk_udphy *udphy, int dp_lanes)
+{
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < dp_lanes; i++)
+ val |= BIT(udphy->dp_lane_sel[i]);
+
+ regmap_update_bits(udphy->pma_regmap, CMN_LANE_MUX_AND_EN_OFFSET, CMN_DP_LANE_EN_ALL,
+ FIELD_PREP(CMN_DP_LANE_EN_ALL, val));
+
+ if (!dp_lanes)
+ regmap_update_bits(udphy->pma_regmap, CMN_DP_RSTN_OFFSET,
+ CMN_DP_CMN_RSTN, FIELD_PREP(CMN_DP_CMN_RSTN, 0x0));
+}
+
+static void rk_udphy_dp_hpd_event_trigger(struct rk_udphy *udphy, bool hpd)
+{
+ const struct rk_udphy_cfg *cfg = udphy->cfgs;
+
+ udphy->dp_sink_hpd_sel = true;
+ udphy->dp_sink_hpd_cfg = hpd;
+
+ if (!udphy->dp_in_use)
+ return;
+
+ rk_udphy_grfreg_write(udphy->vogrf, &cfg->vogrfcfg[udphy->id].hpd_trigger, hpd);
+}
+
+static void rk_udphy_set_typec_default_mapping(struct rk_udphy *udphy)
+{
+ if (udphy->flip) {
+ udphy->dp_lane_sel[0] = 0;
+ udphy->dp_lane_sel[1] = 1;
+ udphy->dp_lane_sel[2] = 3;
+ udphy->dp_lane_sel[3] = 2;
+ udphy->lane_mux_sel[0] = PHY_LANE_MUX_DP;
+ udphy->lane_mux_sel[1] = PHY_LANE_MUX_DP;
+ udphy->lane_mux_sel[2] = PHY_LANE_MUX_USB;
+ udphy->lane_mux_sel[3] = PHY_LANE_MUX_USB;
+ udphy->dp_aux_dout_sel = PHY_AUX_DP_DATA_POL_INVERT;
+ udphy->dp_aux_din_sel = PHY_AUX_DP_DATA_POL_INVERT;
+ gpiod_set_value_cansleep(udphy->sbu1_dc_gpio, 1);
+ gpiod_set_value_cansleep(udphy->sbu2_dc_gpio, 0);
+ } else {
+ udphy->dp_lane_sel[0] = 2;
+ udphy->dp_lane_sel[1] = 3;
+ udphy->dp_lane_sel[2] = 1;
+ udphy->dp_lane_sel[3] = 0;
+ udphy->lane_mux_sel[0] = PHY_LANE_MUX_USB;
+ udphy->lane_mux_sel[1] = PHY_LANE_MUX_USB;
+ udphy->lane_mux_sel[2] = PHY_LANE_MUX_DP;
+ udphy->lane_mux_sel[3] = PHY_LANE_MUX_DP;
+ udphy->dp_aux_dout_sel = PHY_AUX_DP_DATA_POL_NORMAL;
+ udphy->dp_aux_din_sel = PHY_AUX_DP_DATA_POL_NORMAL;
+ gpiod_set_value_cansleep(udphy->sbu1_dc_gpio, 0);
+ gpiod_set_value_cansleep(udphy->sbu2_dc_gpio, 1);
+ }
+
+ udphy->mode = UDPHY_MODE_DP_USB;
+}
+
+static int rk_udphy_orien_sw_set(struct typec_switch_dev *sw,
+ enum typec_orientation orien)
+{
+ struct rk_udphy *udphy = typec_switch_get_drvdata(sw);
+
+ mutex_lock(&udphy->mutex);
+
+ if (orien == TYPEC_ORIENTATION_NONE) {
+ gpiod_set_value_cansleep(udphy->sbu1_dc_gpio, 0);
+ gpiod_set_value_cansleep(udphy->sbu2_dc_gpio, 0);
+ /* unattached */
+ rk_udphy_usb_bvalid_enable(udphy, false);
+ goto unlock_ret;
+ }
+
+ udphy->flip = (orien == TYPEC_ORIENTATION_REVERSE) ? true : false;
+ rk_udphy_set_typec_default_mapping(udphy);
+ rk_udphy_usb_bvalid_enable(udphy, true);
+
+unlock_ret:
+ mutex_unlock(&udphy->mutex);
+ return 0;
+}
+
+static void rk_udphy_orien_switch_unregister(void *data)
+{
+ struct rk_udphy *udphy = data;
+
+ typec_switch_unregister(udphy->sw);
+}
+
+static int rk_udphy_setup_orien_switch(struct rk_udphy *udphy)
+{
+ struct typec_switch_desc sw_desc = { };
+
+ sw_desc.drvdata = udphy;
+ sw_desc.fwnode = dev_fwnode(udphy->dev);
+ sw_desc.set = rk_udphy_orien_sw_set;
+
+ udphy->sw = typec_switch_register(udphy->dev, &sw_desc);
+ if (IS_ERR(udphy->sw)) {
+ dev_err(udphy->dev, "Error register typec orientation switch: %ld\n",
+ PTR_ERR(udphy->sw));
+ return PTR_ERR(udphy->sw);
+ }
+
+ return devm_add_action_or_reset(udphy->dev,
+ rk_udphy_orien_switch_unregister, udphy);
+}
+
+static int rk_udphy_refclk_set(struct rk_udphy *udphy)
+{
+ unsigned long rate;
+ int ret;
+
+ /* configure phy reference clock */
+ rate = clk_get_rate(udphy->refclk);
+ dev_dbg(udphy->dev, "refclk freq %ld\n", rate);
+
+ switch (rate) {
+ case 24000000:
+ ret = regmap_multi_reg_write(udphy->pma_regmap, rk_udphy_24m_refclk_cfg,
+ ARRAY_SIZE(rk_udphy_24m_refclk_cfg));
+ if (ret)
+ return ret;
+ break;
+
+ case 26000000:
+ /* register default is 26MHz */
+ ret = regmap_multi_reg_write(udphy->pma_regmap, rk_udphy_26m_refclk_cfg,
+ ARRAY_SIZE(rk_udphy_26m_refclk_cfg));
+ if (ret)
+ return ret;
+ break;
+
+ default:
+ dev_err(udphy->dev, "unsupported refclk freq %ld\n", rate);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rk_udphy_status_check(struct rk_udphy *udphy)
+{
+ unsigned int val;
+ int ret;
+
+ /* LCPLL check */
+ if (udphy->mode & UDPHY_MODE_USB) {
+ ret = regmap_read_poll_timeout(udphy->pma_regmap, CMN_ANA_LCPLL_DONE_OFFSET,
+ val, (val & CMN_ANA_LCPLL_AFC_DONE) &&
+ (val & CMN_ANA_LCPLL_LOCK_DONE), 200, 100000);
+ if (ret) {
+ dev_err(udphy->dev, "cmn ana lcpll lock timeout\n");
+ /*
+ * If earlier software (U-Boot) enabled USB once already
+ * the PLL may have problems locking on the first try.
+ * It will be successful on the second try, so for the
+ * time being a -EPROBE_DEFER will solve the issue.
+ *
+ * This requires further investigation to understand the
+ * root cause, especially considering that the driver is
+ * asserting all reset lines at probe time.
+ */
+ return -EPROBE_DEFER;
+ }
+
+ if (!udphy->flip) {
+ ret = regmap_read_poll_timeout(udphy->pma_regmap,
+ TRSV_LN0_MON_RX_CDR_DONE_OFFSET, val,
+ val & TRSV_LN0_MON_RX_CDR_LOCK_DONE,
+ 200, 100000);
+ if (ret)
+ dev_err(udphy->dev, "trsv ln0 mon rx cdr lock timeout\n");
+ } else {
+ ret = regmap_read_poll_timeout(udphy->pma_regmap,
+ TRSV_LN2_MON_RX_CDR_DONE_OFFSET, val,
+ val & TRSV_LN2_MON_RX_CDR_LOCK_DONE,
+ 200, 100000);
+ if (ret)
+ dev_err(udphy->dev, "trsv ln2 mon rx cdr lock timeout\n");
+ }
+ }
+
+ return 0;
+}
+
+static int rk_udphy_init(struct rk_udphy *udphy)
+{
+ const struct rk_udphy_cfg *cfg = udphy->cfgs;
+ int ret;
+
+ rk_udphy_reset_assert_all(udphy);
+ usleep_range(10000, 11000);
+
+ /* enable rx lfps for usb */
+ if (udphy->mode & UDPHY_MODE_USB)
+ rk_udphy_grfreg_write(udphy->udphygrf, &cfg->grfcfg.rx_lfps, true);
+
+ /* Step 1: power on pma and deassert apb rstn */
+ rk_udphy_grfreg_write(udphy->udphygrf, &cfg->grfcfg.low_pwrn, true);
+
+ rk_udphy_reset_deassert(udphy, "pma_apb");
+ rk_udphy_reset_deassert(udphy, "pcs_apb");
+
+ /* Step 2: set init sequence and phy refclk */
+ ret = regmap_multi_reg_write(udphy->pma_regmap, rk_udphy_init_sequence,
+ ARRAY_SIZE(rk_udphy_init_sequence));
+ if (ret) {
+ dev_err(udphy->dev, "init sequence set error %d\n", ret);
+ goto assert_resets;
+ }
+
+ ret = rk_udphy_refclk_set(udphy);
+ if (ret) {
+ dev_err(udphy->dev, "refclk set error %d\n", ret);
+ goto assert_resets;
+ }
+
+ /* Step 3: configure lane mux */
+ regmap_update_bits(udphy->pma_regmap, CMN_LANE_MUX_AND_EN_OFFSET,
+ CMN_DP_LANE_MUX_ALL | CMN_DP_LANE_EN_ALL,
+ FIELD_PREP(CMN_DP_LANE_MUX_N(3), udphy->lane_mux_sel[3]) |
+ FIELD_PREP(CMN_DP_LANE_MUX_N(2), udphy->lane_mux_sel[2]) |
+ FIELD_PREP(CMN_DP_LANE_MUX_N(1), udphy->lane_mux_sel[1]) |
+ FIELD_PREP(CMN_DP_LANE_MUX_N(0), udphy->lane_mux_sel[0]) |
+ FIELD_PREP(CMN_DP_LANE_EN_ALL, 0));
+
+ /* Step 4: deassert init rstn and wait for 200ns from datasheet */
+ if (udphy->mode & UDPHY_MODE_USB)
+ rk_udphy_reset_deassert(udphy, "init");
+
+ if (udphy->mode & UDPHY_MODE_DP) {
+ regmap_update_bits(udphy->pma_regmap, CMN_DP_RSTN_OFFSET,
+ CMN_DP_INIT_RSTN,
+ FIELD_PREP(CMN_DP_INIT_RSTN, 0x1));
+ }
+
+ udelay(1);
+
+ /* Step 5: deassert cmn/lane rstn */
+ if (udphy->mode & UDPHY_MODE_USB) {
+ rk_udphy_reset_deassert(udphy, "cmn");
+ rk_udphy_reset_deassert(udphy, "lane");
+ }
+
+ /* Step 6: wait for lock done of pll */
+ ret = rk_udphy_status_check(udphy);
+ if (ret)
+ goto assert_resets;
+
+ return 0;
+
+assert_resets:
+ rk_udphy_reset_assert_all(udphy);
+ return ret;
+}
+
+static int rk_udphy_setup(struct rk_udphy *udphy)
+{
+ int ret;
+
+ ret = clk_bulk_prepare_enable(udphy->num_clks, udphy->clks);
+ if (ret) {
+ dev_err(udphy->dev, "failed to enable clk\n");
+ return ret;
+ }
+
+ ret = rk_udphy_init(udphy);
+ if (ret) {
+ dev_err(udphy->dev, "failed to init combophy\n");
+ clk_bulk_disable_unprepare(udphy->num_clks, udphy->clks);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rk_udphy_disable(struct rk_udphy *udphy)
+{
+ clk_bulk_disable_unprepare(udphy->num_clks, udphy->clks);
+ rk_udphy_reset_assert_all(udphy);
+}
+
+static int rk_udphy_parse_lane_mux_data(struct rk_udphy *udphy)
+{
+ int ret, i, num_lanes;
+
+ num_lanes = device_property_count_u32(udphy->dev, "rockchip,dp-lane-mux");
+ if (num_lanes < 0) {
+ dev_dbg(udphy->dev, "no dp-lane-mux, following dp alt mode\n");
+ udphy->mode = UDPHY_MODE_USB;
+ return 0;
+ }
+
+ if (num_lanes != 2 && num_lanes != 4)
+ return dev_err_probe(udphy->dev, -EINVAL,
+ "invalid number of lane mux\n");
+
+ ret = device_property_read_u32_array(udphy->dev, "rockchip,dp-lane-mux",
+ udphy->dp_lane_sel, num_lanes);
+ if (ret)
+ return dev_err_probe(udphy->dev, ret, "get dp lane mux failed\n");
+
+ for (i = 0; i < num_lanes; i++) {
+ int j;
+
+ if (udphy->dp_lane_sel[i] > 3)
+ return dev_err_probe(udphy->dev, -EINVAL,
+ "lane mux between 0 and 3, exceeding the range\n");
+
+ udphy->lane_mux_sel[udphy->dp_lane_sel[i]] = PHY_LANE_MUX_DP;
+
+ for (j = i + 1; j < num_lanes; j++) {
+ if (udphy->dp_lane_sel[i] == udphy->dp_lane_sel[j])
+ return dev_err_probe(udphy->dev, -EINVAL,
+ "set repeat lane mux value\n");
+ }
+ }
+
+ udphy->mode = UDPHY_MODE_DP;
+ if (num_lanes == 2) {
+ udphy->mode |= UDPHY_MODE_USB;
+ udphy->flip = (udphy->lane_mux_sel[0] == PHY_LANE_MUX_DP);
+ }
+
+ return 0;
+}
+
+static int rk_udphy_get_initial_status(struct rk_udphy *udphy)
+{
+ int ret;
+ u32 value;
+
+ ret = clk_bulk_prepare_enable(udphy->num_clks, udphy->clks);
+ if (ret) {
+ dev_err(udphy->dev, "failed to enable clk\n");
+ return ret;
+ }
+
+ rk_udphy_reset_deassert_all(udphy);
+
+ regmap_read(udphy->pma_regmap, CMN_LANE_MUX_AND_EN_OFFSET, &value);
+ if (FIELD_GET(CMN_DP_LANE_MUX_ALL, value) && FIELD_GET(CMN_DP_LANE_EN_ALL, value))
+ udphy->status = UDPHY_MODE_DP;
+ else
+ rk_udphy_disable(udphy);
+
+ return 0;
+}
+
+static int rk_udphy_parse_dt(struct rk_udphy *udphy)
+{
+ struct device *dev = udphy->dev;
+ struct device_node *np = dev_of_node(dev);
+ enum usb_device_speed maximum_speed;
+ int ret;
+
+ udphy->u2phygrf = syscon_regmap_lookup_by_phandle(np, "rockchip,u2phy-grf");
+ if (IS_ERR(udphy->u2phygrf))
+ return dev_err_probe(dev, PTR_ERR(udphy->u2phygrf), "failed to get u2phy-grf\n");
+
+ udphy->udphygrf = syscon_regmap_lookup_by_phandle(np, "rockchip,usbdpphy-grf");
+ if (IS_ERR(udphy->udphygrf))
+ return dev_err_probe(dev, PTR_ERR(udphy->udphygrf), "failed to get usbdpphy-grf\n");
+
+ udphy->usbgrf = syscon_regmap_lookup_by_phandle(np, "rockchip,usb-grf");
+ if (IS_ERR(udphy->usbgrf))
+ return dev_err_probe(dev, PTR_ERR(udphy->usbgrf), "failed to get usb-grf\n");
+
+ udphy->vogrf = syscon_regmap_lookup_by_phandle(np, "rockchip,vo-grf");
+ if (IS_ERR(udphy->vogrf))
+ return dev_err_probe(dev, PTR_ERR(udphy->vogrf), "failed to get vo-grf\n");
+
+ ret = rk_udphy_parse_lane_mux_data(udphy);
+ if (ret)
+ return ret;
+
+ udphy->sbu1_dc_gpio = devm_gpiod_get_optional(dev, "sbu1-dc", GPIOD_OUT_LOW);
+ if (IS_ERR(udphy->sbu1_dc_gpio))
+ return PTR_ERR(udphy->sbu1_dc_gpio);
+
+ udphy->sbu2_dc_gpio = devm_gpiod_get_optional(dev, "sbu2-dc", GPIOD_OUT_LOW);
+ if (IS_ERR(udphy->sbu2_dc_gpio))
+ return PTR_ERR(udphy->sbu2_dc_gpio);
+
+ if (device_property_present(dev, "maximum-speed")) {
+ maximum_speed = usb_get_maximum_speed(dev);
+ udphy->hs = maximum_speed <= USB_SPEED_HIGH ? true : false;
+ }
+
+ ret = rk_udphy_clk_init(udphy, dev);
+ if (ret)
+ return ret;
+
+ return rk_udphy_reset_init(udphy, dev);
+}
+
+static int rk_udphy_power_on(struct rk_udphy *udphy, u8 mode)
+{
+ int ret;
+
+ if (!(udphy->mode & mode)) {
+ dev_info(udphy->dev, "mode 0x%02x is not support\n", mode);
+ return 0;
+ }
+
+ if (udphy->status == UDPHY_MODE_NONE) {
+ udphy->mode_change = false;
+ ret = rk_udphy_setup(udphy);
+ if (ret)
+ return ret;
+
+ if (udphy->mode & UDPHY_MODE_USB)
+ rk_udphy_u3_port_disable(udphy, false);
+ } else if (udphy->mode_change) {
+ udphy->mode_change = false;
+ udphy->status = UDPHY_MODE_NONE;
+ if (udphy->mode == UDPHY_MODE_DP)
+ rk_udphy_u3_port_disable(udphy, true);
+
+ rk_udphy_disable(udphy);
+ ret = rk_udphy_setup(udphy);
+ if (ret)
+ return ret;
+ }
+
+ udphy->status |= mode;
+
+ return 0;
+}
+
+static void rk_udphy_power_off(struct rk_udphy *udphy, u8 mode)
+{
+ if (!(udphy->mode & mode)) {
+ dev_info(udphy->dev, "mode 0x%02x is not support\n", mode);
+ return;
+ }
+
+ if (!udphy->status)
+ return;
+
+ udphy->status &= ~mode;
+
+ if (udphy->status == UDPHY_MODE_NONE)
+ rk_udphy_disable(udphy);
+}
+
+static int rk_udphy_dp_phy_init(struct phy *phy)
+{
+ struct rk_udphy *udphy = phy_get_drvdata(phy);
+
+ mutex_lock(&udphy->mutex);
+
+ udphy->dp_in_use = true;
+ rk_udphy_dp_hpd_event_trigger(udphy, udphy->dp_sink_hpd_cfg);
+
+ mutex_unlock(&udphy->mutex);
+
+ return 0;
+}
+
+static int rk_udphy_dp_phy_exit(struct phy *phy)
+{
+ struct rk_udphy *udphy = phy_get_drvdata(phy);
+
+ mutex_lock(&udphy->mutex);
+ udphy->dp_in_use = false;
+ mutex_unlock(&udphy->mutex);
+ return 0;
+}
+
+static int rk_udphy_dp_phy_power_on(struct phy *phy)
+{
+ struct rk_udphy *udphy = phy_get_drvdata(phy);
+ int ret, dp_lanes;
+
+ mutex_lock(&udphy->mutex);
+
+ dp_lanes = rk_udphy_dplane_get(udphy);
+ phy_set_bus_width(phy, dp_lanes);
+
+ ret = rk_udphy_power_on(udphy, UDPHY_MODE_DP);
+ if (ret)
+ goto unlock;
+
+ rk_udphy_dplane_enable(udphy, dp_lanes);
+
+ rk_udphy_dplane_select(udphy);
+
+unlock:
+ mutex_unlock(&udphy->mutex);
+ /*
+ * If data send by aux channel too fast after phy power on,
+ * the aux may be not ready which will cause aux error. Adding
+ * delay to avoid this issue.
+ */
+ usleep_range(10000, 11000);
+ return ret;
+}
+
+static int rk_udphy_dp_phy_power_off(struct phy *phy)
+{
+ struct rk_udphy *udphy = phy_get_drvdata(phy);
+
+ mutex_lock(&udphy->mutex);
+ rk_udphy_dplane_enable(udphy, 0);
+ rk_udphy_power_off(udphy, UDPHY_MODE_DP);
+ mutex_unlock(&udphy->mutex);
+
+ return 0;
+}
+
+static int rk_udphy_dp_phy_verify_link_rate(unsigned int link_rate)
+{
+ switch (link_rate) {
+ case 1620:
+ case 2700:
+ case 5400:
+ case 8100:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rk_udphy_dp_phy_verify_config(struct rk_udphy *udphy,
+ struct phy_configure_opts_dp *dp)
+{
+ int i, ret;
+
+ /* If changing link rate was required, verify it's supported. */
+ ret = rk_udphy_dp_phy_verify_link_rate(dp->link_rate);
+ if (ret)
+ return ret;
+
+ /* Verify lane count. */
+ switch (dp->lanes) {
+ case 1:
+ case 2:
+ case 4:
+ /* valid lane count. */
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * If changing voltages is required, check swing and pre-emphasis
+ * levels, per-lane.
+ */
+ if (dp->set_voltages) {
+ /* Lane count verified previously. */
+ for (i = 0; i < dp->lanes; i++) {
+ if (dp->voltage[i] > 3 || dp->pre[i] > 3)
+ return -EINVAL;
+
+ /*
+ * Sum of voltage swing and pre-emphasis levels cannot
+ * exceed 3.
+ */
+ if (dp->voltage[i] + dp->pre[i] > 3)
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void rk_udphy_dp_set_voltage(struct rk_udphy *udphy, u8 bw,
+ u32 voltage, u32 pre, u32 lane)
+{
+ const struct rk_udphy_cfg *cfg = udphy->cfgs;
+ const struct rk_udphy_dp_tx_drv_ctrl (*dp_ctrl)[4];
+ u32 offset = 0x800 * lane;
+ u32 val;
+
+ if (udphy->mux)
+ dp_ctrl = cfg->dp_tx_ctrl_cfg_typec[bw];
+ else
+ dp_ctrl = cfg->dp_tx_ctrl_cfg[bw];
+
+ val = dp_ctrl[voltage][pre].trsv_reg0204;
+ regmap_write(udphy->pma_regmap, 0x0810 + offset, val);
+
+ val = dp_ctrl[voltage][pre].trsv_reg0205;
+ regmap_write(udphy->pma_regmap, 0x0814 + offset, val);
+
+ val = dp_ctrl[voltage][pre].trsv_reg0206;
+ regmap_write(udphy->pma_regmap, 0x0818 + offset, val);
+
+ val = dp_ctrl[voltage][pre].trsv_reg0207;
+ regmap_write(udphy->pma_regmap, 0x081c + offset, val);
+}
+
+static int rk_udphy_dp_phy_configure(struct phy *phy,
+ union phy_configure_opts *opts)
+{
+ struct rk_udphy *udphy = phy_get_drvdata(phy);
+ struct phy_configure_opts_dp *dp = &opts->dp;
+ u32 i, val, lane;
+ int ret;
+
+ ret = rk_udphy_dp_phy_verify_config(udphy, dp);
+ if (ret)
+ return ret;
+
+ if (dp->set_rate) {
+ regmap_update_bits(udphy->pma_regmap, CMN_DP_RSTN_OFFSET,
+ CMN_DP_CMN_RSTN, FIELD_PREP(CMN_DP_CMN_RSTN, 0x0));
+
+ switch (dp->link_rate) {
+ case 1620:
+ udphy->bw = DP_BW_RBR;
+ break;
+
+ case 2700:
+ udphy->bw = DP_BW_HBR;
+ break;
+
+ case 5400:
+ udphy->bw = DP_BW_HBR2;
+ break;
+
+ case 8100:
+ udphy->bw = DP_BW_HBR3;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(udphy->pma_regmap, CMN_DP_LINK_OFFSET, CMN_DP_TX_LINK_BW,
+ FIELD_PREP(CMN_DP_TX_LINK_BW, udphy->bw));
+ regmap_update_bits(udphy->pma_regmap, CMN_SSC_EN_OFFSET, CMN_ROPLL_SSC_EN,
+ FIELD_PREP(CMN_ROPLL_SSC_EN, dp->ssc));
+ regmap_update_bits(udphy->pma_regmap, CMN_DP_RSTN_OFFSET, CMN_DP_CMN_RSTN,
+ FIELD_PREP(CMN_DP_CMN_RSTN, 0x1));
+
+ ret = regmap_read_poll_timeout(udphy->pma_regmap, CMN_ANA_ROPLL_DONE_OFFSET, val,
+ FIELD_GET(CMN_ANA_ROPLL_LOCK_DONE, val) &&
+ FIELD_GET(CMN_ANA_ROPLL_AFC_DONE, val),
+ 0, 1000);
+ if (ret) {
+ dev_err(udphy->dev, "ROPLL is not lock, set_rate failed\n");
+ return ret;
+ }
+ }
+
+ if (dp->set_voltages) {
+ for (i = 0; i < dp->lanes; i++) {
+ lane = udphy->dp_lane_sel[i];
+ switch (dp->link_rate) {
+ case 1620:
+ case 2700:
+ regmap_update_bits(udphy->pma_regmap,
+ TRSV_ANA_TX_CLK_OFFSET_N(lane),
+ LN_ANA_TX_SER_TXCLK_INV,
+ FIELD_PREP(LN_ANA_TX_SER_TXCLK_INV,
+ udphy->lane_mux_sel[lane]));
+ break;
+
+ case 5400:
+ case 8100:
+ regmap_update_bits(udphy->pma_regmap,
+ TRSV_ANA_TX_CLK_OFFSET_N(lane),
+ LN_ANA_TX_SER_TXCLK_INV,
+ FIELD_PREP(LN_ANA_TX_SER_TXCLK_INV, 0x0));
+ break;
+ }
+
+ rk_udphy_dp_set_voltage(udphy, udphy->bw, dp->voltage[i],
+ dp->pre[i], lane);
+ }
+ }
+
+ return 0;
+}
+
+static const struct phy_ops rk_udphy_dp_phy_ops = {
+ .init = rk_udphy_dp_phy_init,
+ .exit = rk_udphy_dp_phy_exit,
+ .power_on = rk_udphy_dp_phy_power_on,
+ .power_off = rk_udphy_dp_phy_power_off,
+ .configure = rk_udphy_dp_phy_configure,
+ .owner = THIS_MODULE,
+};
+
+static int rk_udphy_usb3_phy_init(struct phy *phy)
+{
+ struct rk_udphy *udphy = phy_get_drvdata(phy);
+ int ret = 0;
+
+ mutex_lock(&udphy->mutex);
+ /* DP only or high-speed, disable U3 port */
+ if (!(udphy->mode & UDPHY_MODE_USB) || udphy->hs) {
+ rk_udphy_u3_port_disable(udphy, true);
+ goto unlock;
+ }
+
+ ret = rk_udphy_power_on(udphy, UDPHY_MODE_USB);
+
+unlock:
+ mutex_unlock(&udphy->mutex);
+ return ret;
+}
+
+static int rk_udphy_usb3_phy_exit(struct phy *phy)
+{
+ struct rk_udphy *udphy = phy_get_drvdata(phy);
+
+ mutex_lock(&udphy->mutex);
+ /* DP only or high-speed */
+ if (!(udphy->mode & UDPHY_MODE_USB) || udphy->hs)
+ goto unlock;
+
+ rk_udphy_power_off(udphy, UDPHY_MODE_USB);
+
+unlock:
+ mutex_unlock(&udphy->mutex);
+ return 0;
+}
+
+static const struct phy_ops rk_udphy_usb3_phy_ops = {
+ .init = rk_udphy_usb3_phy_init,
+ .exit = rk_udphy_usb3_phy_exit,
+ .owner = THIS_MODULE,
+};
+
+static int rk_udphy_typec_mux_set(struct typec_mux_dev *mux,
+ struct typec_mux_state *state)
+{
+ struct rk_udphy *udphy = typec_mux_get_drvdata(mux);
+ u8 mode;
+
+ mutex_lock(&udphy->mutex);
+
+ switch (state->mode) {
+ case TYPEC_DP_STATE_C:
+ case TYPEC_DP_STATE_E:
+ udphy->lane_mux_sel[0] = PHY_LANE_MUX_DP;
+ udphy->lane_mux_sel[1] = PHY_LANE_MUX_DP;
+ udphy->lane_mux_sel[2] = PHY_LANE_MUX_DP;
+ udphy->lane_mux_sel[3] = PHY_LANE_MUX_DP;
+ mode = UDPHY_MODE_DP;
+ break;
+
+ case TYPEC_DP_STATE_D:
+ default:
+ if (udphy->flip) {
+ udphy->lane_mux_sel[0] = PHY_LANE_MUX_DP;
+ udphy->lane_mux_sel[1] = PHY_LANE_MUX_DP;
+ udphy->lane_mux_sel[2] = PHY_LANE_MUX_USB;
+ udphy->lane_mux_sel[3] = PHY_LANE_MUX_USB;
+ } else {
+ udphy->lane_mux_sel[0] = PHY_LANE_MUX_USB;
+ udphy->lane_mux_sel[1] = PHY_LANE_MUX_USB;
+ udphy->lane_mux_sel[2] = PHY_LANE_MUX_DP;
+ udphy->lane_mux_sel[3] = PHY_LANE_MUX_DP;
+ }
+ mode = UDPHY_MODE_DP_USB;
+ break;
+ }
+
+ if (state->alt && state->alt->svid == USB_TYPEC_DP_SID) {
+ struct typec_displayport_data *data = state->data;
+
+ if (!data) {
+ rk_udphy_dp_hpd_event_trigger(udphy, false);
+ } else if (data->status & DP_STATUS_IRQ_HPD) {
+ rk_udphy_dp_hpd_event_trigger(udphy, false);
+ usleep_range(750, 800);
+ rk_udphy_dp_hpd_event_trigger(udphy, true);
+ } else if (data->status & DP_STATUS_HPD_STATE) {
+ if (udphy->mode != mode) {
+ udphy->mode = mode;
+ udphy->mode_change = true;
+ }
+ rk_udphy_dp_hpd_event_trigger(udphy, true);
+ } else {
+ rk_udphy_dp_hpd_event_trigger(udphy, false);
+ }
+ }
+
+ mutex_unlock(&udphy->mutex);
+ return 0;
+}
+
+static void rk_udphy_typec_mux_unregister(void *data)
+{
+ struct rk_udphy *udphy = data;
+
+ typec_mux_unregister(udphy->mux);
+}
+
+static int rk_udphy_setup_typec_mux(struct rk_udphy *udphy)
+{
+ struct typec_mux_desc mux_desc = {};
+
+ mux_desc.drvdata = udphy;
+ mux_desc.fwnode = dev_fwnode(udphy->dev);
+ mux_desc.set = rk_udphy_typec_mux_set;
+
+ udphy->mux = typec_mux_register(udphy->dev, &mux_desc);
+ if (IS_ERR(udphy->mux)) {
+ dev_err(udphy->dev, "Error register typec mux: %ld\n",
+ PTR_ERR(udphy->mux));
+ return PTR_ERR(udphy->mux);
+ }
+
+ return devm_add_action_or_reset(udphy->dev, rk_udphy_typec_mux_unregister,
+ udphy);
+}
+
+static const struct regmap_config rk_udphy_pma_regmap_cfg = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .max_register = 0x20dc,
+};
+
+static struct phy *rk_udphy_phy_xlate(struct device *dev, const struct of_phandle_args *args)
+{
+ struct rk_udphy *udphy = dev_get_drvdata(dev);
+
+ if (args->args_count == 0)
+ return ERR_PTR(-EINVAL);
+
+ switch (args->args[0]) {
+ case PHY_TYPE_USB3:
+ return udphy->phy_u3;
+ case PHY_TYPE_DP:
+ return udphy->phy_dp;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int rk_udphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+ struct resource *res;
+ struct rk_udphy *udphy;
+ void __iomem *base;
+ int id, ret;
+
+ udphy = devm_kzalloc(dev, sizeof(*udphy), GFP_KERNEL);
+ if (!udphy)
+ return -ENOMEM;
+
+ udphy->cfgs = device_get_match_data(dev);
+ if (!udphy->cfgs)
+ return dev_err_probe(dev, -EINVAL, "missing match data\n");
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ /* find the phy-id from the io address */
+ udphy->id = -ENODEV;
+ for (id = 0; id < udphy->cfgs->num_phys; id++) {
+ if (res->start == udphy->cfgs->phy_ids[id]) {
+ udphy->id = id;
+ break;
+ }
+ }
+
+ if (udphy->id < 0)
+ return dev_err_probe(dev, -ENODEV, "no matching device found\n");
+
+ udphy->pma_regmap = devm_regmap_init_mmio(dev, base + UDPHY_PMA,
+ &rk_udphy_pma_regmap_cfg);
+ if (IS_ERR(udphy->pma_regmap))
+ return PTR_ERR(udphy->pma_regmap);
+
+ udphy->dev = dev;
+ ret = rk_udphy_parse_dt(udphy);
+ if (ret)
+ return ret;
+
+ ret = rk_udphy_get_initial_status(udphy);
+ if (ret)
+ return ret;
+
+ mutex_init(&udphy->mutex);
+ platform_set_drvdata(pdev, udphy);
+
+ if (device_property_present(dev, "orientation-switch")) {
+ ret = rk_udphy_setup_orien_switch(udphy);
+ if (ret)
+ return ret;
+ }
+
+ if (device_property_present(dev, "mode-switch")) {
+ ret = rk_udphy_setup_typec_mux(udphy);
+ if (ret)
+ return ret;
+ }
+
+ udphy->phy_u3 = devm_phy_create(dev, dev->of_node, &rk_udphy_usb3_phy_ops);
+ if (IS_ERR(udphy->phy_u3)) {
+ ret = PTR_ERR(udphy->phy_u3);
+ return dev_err_probe(dev, ret, "failed to create USB3 phy\n");
+ }
+ phy_set_drvdata(udphy->phy_u3, udphy);
+
+ udphy->phy_dp = devm_phy_create(dev, dev->of_node, &rk_udphy_dp_phy_ops);
+ if (IS_ERR(udphy->phy_dp)) {
+ ret = PTR_ERR(udphy->phy_dp);
+ return dev_err_probe(dev, ret, "failed to create DP phy\n");
+ }
+ phy_set_bus_width(udphy->phy_dp, rk_udphy_dplane_get(udphy));
+ udphy->phy_dp->attrs.max_link_rate = 8100;
+ phy_set_drvdata(udphy->phy_dp, udphy);
+
+ phy_provider = devm_of_phy_provider_register(dev, rk_udphy_phy_xlate);
+ if (IS_ERR(phy_provider)) {
+ ret = PTR_ERR(phy_provider);
+ return dev_err_probe(dev, ret, "failed to register phy provider\n");
+ }
+
+ return 0;
+}
+
+static int __maybe_unused rk_udphy_resume(struct device *dev)
+{
+ struct rk_udphy *udphy = dev_get_drvdata(dev);
+
+ if (udphy->dp_sink_hpd_sel)
+ rk_udphy_dp_hpd_event_trigger(udphy, udphy->dp_sink_hpd_cfg);
+
+ return 0;
+}
+
+static const struct dev_pm_ops rk_udphy_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, rk_udphy_resume)
+};
+
+static const char * const rk_udphy_rst_list[] = {
+ "init", "cmn", "lane", "pcs_apb", "pma_apb"
+};
+
+static const struct rk_udphy_cfg rk3588_udphy_cfgs = {
+ .num_phys = 2,
+ .phy_ids = {
+ 0xfed80000,
+ 0xfed90000,
+ },
+ .num_rsts = ARRAY_SIZE(rk_udphy_rst_list),
+ .rst_list = rk_udphy_rst_list,
+ .grfcfg = {
+ /* u2phy-grf */
+ .bvalid_phy_con = RK_UDPHY_GEN_GRF_REG(0x0008, 1, 0, 0x2, 0x3),
+ .bvalid_grf_con = RK_UDPHY_GEN_GRF_REG(0x0010, 3, 2, 0x2, 0x3),
+
+ /* usb-grf */
+ .usb3otg0_cfg = RK_UDPHY_GEN_GRF_REG(0x001c, 15, 0, 0x1100, 0x0188),
+ .usb3otg1_cfg = RK_UDPHY_GEN_GRF_REG(0x0034, 15, 0, 0x1100, 0x0188),
+
+ /* usbdpphy-grf */
+ .low_pwrn = RK_UDPHY_GEN_GRF_REG(0x0004, 13, 13, 0, 1),
+ .rx_lfps = RK_UDPHY_GEN_GRF_REG(0x0004, 14, 14, 0, 1),
+ },
+ .vogrfcfg = {
+ {
+ .hpd_trigger = RK_UDPHY_GEN_GRF_REG(0x0000, 11, 10, 1, 3),
+ .dp_lane_reg = 0x0000,
+ },
+ {
+ .hpd_trigger = RK_UDPHY_GEN_GRF_REG(0x0008, 11, 10, 1, 3),
+ .dp_lane_reg = 0x0008,
+ },
+ },
+ .dp_tx_ctrl_cfg = {
+ rk3588_dp_tx_drv_ctrl_rbr_hbr,
+ rk3588_dp_tx_drv_ctrl_rbr_hbr,
+ rk3588_dp_tx_drv_ctrl_hbr2,
+ rk3588_dp_tx_drv_ctrl_hbr3,
+ },
+ .dp_tx_ctrl_cfg_typec = {
+ rk3588_dp_tx_drv_ctrl_rbr_hbr_typec,
+ rk3588_dp_tx_drv_ctrl_rbr_hbr_typec,
+ rk3588_dp_tx_drv_ctrl_hbr2,
+ rk3588_dp_tx_drv_ctrl_hbr3,
+ },
+};
+
+static const struct of_device_id rk_udphy_dt_match[] = {
+ {
+ .compatible = "rockchip,rk3588-usbdp-phy",
+ .data = &rk3588_udphy_cfgs
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rk_udphy_dt_match);
+
+static struct platform_driver rk_udphy_driver = {
+ .probe = rk_udphy_probe,
+ .driver = {
+ .name = "rockchip-usbdp-phy",
+ .of_match_table = rk_udphy_dt_match,
+ .pm = &rk_udphy_pm_ops,
+ },
+};
+module_platform_driver(rk_udphy_driver);
+
+MODULE_AUTHOR("Frank Wang <frank.wang@rock-chips.com>");
+MODULE_AUTHOR("Zhang Yubing <yubing.zhang@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip USBDP Combo PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/samsung/Makefile b/drivers/phy/samsung/Makefile
index afb34a153e34..fea1f96d0e43 100644
--- a/drivers/phy/samsung/Makefile
+++ b/drivers/phy/samsung/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_PHY_EXYNOS_DP_VIDEO) += phy-exynos-dp-video.o
obj-$(CONFIG_PHY_EXYNOS_MIPI_VIDEO) += phy-exynos-mipi-video.o
obj-$(CONFIG_PHY_EXYNOS_PCIE) += phy-exynos-pcie.o
obj-$(CONFIG_PHY_SAMSUNG_UFS) += phy-exynos-ufs.o
+phy-exynos-ufs-y += phy-gs101-ufs.o
phy-exynos-ufs-y += phy-samsung-ufs.o
phy-exynos-ufs-y += phy-exynos7-ufs.o
phy-exynos-ufs-y += phy-exynosautov9-ufs.o
diff --git a/drivers/phy/samsung/phy-exynos7-ufs.c b/drivers/phy/samsung/phy-exynos7-ufs.c
index a982e7c128c5..15eec1d9e0e0 100644
--- a/drivers/phy/samsung/phy-exynos7-ufs.c
+++ b/drivers/phy/samsung/phy-exynos7-ufs.c
@@ -82,4 +82,5 @@ const struct samsung_ufs_phy_drvdata exynos7_ufs_phy = {
.clk_list = exynos7_ufs_phy_clks,
.num_clks = ARRAY_SIZE(exynos7_ufs_phy_clks),
.cdr_lock_status_offset = EXYNOS7_EMBEDDED_COMBO_PHY_CDR_LOCK_STATUS,
+ .wait_for_cdr = samsung_ufs_phy_wait_for_lock_acq,
};
diff --git a/drivers/phy/samsung/phy-exynosautov9-ufs.c b/drivers/phy/samsung/phy-exynosautov9-ufs.c
index 49e2bcbef0b4..9c3e030f07ba 100644
--- a/drivers/phy/samsung/phy-exynosautov9-ufs.c
+++ b/drivers/phy/samsung/phy-exynosautov9-ufs.c
@@ -71,4 +71,5 @@ const struct samsung_ufs_phy_drvdata exynosautov9_ufs_phy = {
.clk_list = exynosautov9_ufs_phy_clks,
.num_clks = ARRAY_SIZE(exynosautov9_ufs_phy_clks),
.cdr_lock_status_offset = EXYNOSAUTOV9_EMBEDDED_COMBO_PHY_CDR_LOCK_STATUS,
+ .wait_for_cdr = samsung_ufs_phy_wait_for_lock_acq,
};
diff --git a/drivers/phy/samsung/phy-fsd-ufs.c b/drivers/phy/samsung/phy-fsd-ufs.c
index d36cabd53434..f2361746db0e 100644
--- a/drivers/phy/samsung/phy-fsd-ufs.c
+++ b/drivers/phy/samsung/phy-fsd-ufs.c
@@ -60,4 +60,5 @@ const struct samsung_ufs_phy_drvdata fsd_ufs_phy = {
.clk_list = fsd_ufs_phy_clks,
.num_clks = ARRAY_SIZE(fsd_ufs_phy_clks),
.cdr_lock_status_offset = FSD_EMBEDDED_COMBO_PHY_CDR_LOCK_STATUS,
+ .wait_for_cdr = samsung_ufs_phy_wait_for_lock_acq,
};
diff --git a/drivers/phy/samsung/phy-gs101-ufs.c b/drivers/phy/samsung/phy-gs101-ufs.c
new file mode 100644
index 000000000000..17b798da5b57
--- /dev/null
+++ b/drivers/phy/samsung/phy-gs101-ufs.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * UFS PHY driver data for Google Tensor gs101 SoC
+ *
+ * Copyright (C) 2024 Linaro Ltd
+ * Author: Peter Griffin <peter.griffin@linaro.org>
+ */
+
+#include "phy-samsung-ufs.h"
+
+#define TENSOR_GS101_PHY_CTRL 0x3ec8
+#define TENSOR_GS101_PHY_CTRL_MASK 0x1
+#define TENSOR_GS101_PHY_CTRL_EN BIT(0)
+#define PHY_GS101_LANE_OFFSET 0x200
+#define TRSV_REG338 0x338
+#define LN0_MON_RX_CAL_DONE BIT(3)
+#define TRSV_REG339 0x339
+#define LN0_MON_RX_CDR_FLD_CK_MODE_DONE BIT(3)
+#define TRSV_REG222 0x222
+#define LN0_OVRD_RX_CDR_EN BIT(4)
+#define LN0_RX_CDR_EN BIT(3)
+
+#define PHY_PMA_TRSV_ADDR(reg, lane) (PHY_APB_ADDR((reg) + \
+ ((lane) * PHY_GS101_LANE_OFFSET)))
+
+#define PHY_TRSV_REG_CFG_GS101(o, v, d) \
+ PHY_TRSV_REG_CFG_OFFSET(o, v, d, PHY_GS101_LANE_OFFSET)
+
+/* Calibration for phy initialization */
+static const struct samsung_ufs_phy_cfg tensor_gs101_pre_init_cfg[] = {
+ PHY_COMN_REG_CFG(0x43, 0x10, PWR_MODE_ANY),
+ PHY_COMN_REG_CFG(0x3C, 0x14, PWR_MODE_ANY),
+ PHY_COMN_REG_CFG(0x46, 0x48, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x200, 0x00, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x201, 0x06, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x202, 0x06, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x203, 0x0a, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x204, 0x00, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x205, 0x11, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x207, 0x0c, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x2E1, 0xc0, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x22D, 0xb8, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x234, 0x60, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x238, 0x13, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x239, 0x48, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x23A, 0x01, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x23B, 0x25, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x23C, 0x2a, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x23D, 0x01, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x23E, 0x13, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x23F, 0x13, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x240, 0x4a, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x243, 0x40, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x244, 0x02, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x25D, 0x00, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x25E, 0x3f, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x25F, 0xff, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x273, 0x33, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x274, 0x50, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x284, 0x02, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x285, 0x02, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x2A2, 0x04, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x25D, 0x01, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x2FA, 0x01, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x286, 0x03, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x287, 0x03, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x288, 0x03, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x289, 0x03, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x2B3, 0x04, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x2B6, 0x0b, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x2B7, 0x0b, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x2B8, 0x0b, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x2B9, 0x0b, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x2BA, 0x0b, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x2BB, 0x06, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x2BC, 0x06, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x2BD, 0x06, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x29E, 0x06, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x2E4, 0x1a, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x2ED, 0x25, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x269, 0x1a, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x2F4, 0x2f, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x34B, 0x01, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x34C, 0x23, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x34D, 0x23, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x34E, 0x45, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x34F, 0x00, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x350, 0x31, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x351, 0x00, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x352, 0x02, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x353, 0x00, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x354, 0x01, PWR_MODE_ANY),
+ PHY_COMN_REG_CFG(0x43, 0x18, PWR_MODE_ANY),
+ PHY_COMN_REG_CFG(0x43, 0x00, PWR_MODE_ANY),
+ END_UFS_PHY_CFG,
+};
+
+static const struct samsung_ufs_phy_cfg tensor_gs101_pre_pwr_hs_config[] = {
+ PHY_TRSV_REG_CFG_GS101(0x369, 0x11, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x246, 0x03, PWR_MODE_ANY),
+};
+
+/* Calibration for HS mode series A/B */
+static const struct samsung_ufs_phy_cfg tensor_gs101_post_pwr_hs_config[] = {
+ PHY_COMN_REG_CFG(0x8, 0x60, PWR_MODE_PWM_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x222, 0x08, PWR_MODE_PWM_ANY),
+ PHY_TRSV_REG_CFG_GS101(0x246, 0x01, PWR_MODE_ANY),
+ END_UFS_PHY_CFG,
+};
+
+static const struct samsung_ufs_phy_cfg *tensor_gs101_ufs_phy_cfgs[CFG_TAG_MAX] = {
+ [CFG_PRE_INIT] = tensor_gs101_pre_init_cfg,
+ [CFG_PRE_PWR_HS] = tensor_gs101_pre_pwr_hs_config,
+ [CFG_POST_PWR_HS] = tensor_gs101_post_pwr_hs_config,
+};
+
+static const char * const tensor_gs101_ufs_phy_clks[] = {
+ "ref_clk",
+};
+
+static int gs101_phy_wait_for_calibration(struct phy *phy, u8 lane)
+{
+ struct samsung_ufs_phy *ufs_phy = get_samsung_ufs_phy(phy);
+ const unsigned int timeout_us = 40000;
+ const unsigned int sleep_us = 40;
+ u32 val;
+ u32 off;
+ int err;
+
+ off = PHY_PMA_TRSV_ADDR(TRSV_REG338, lane);
+
+ err = readl_poll_timeout(ufs_phy->reg_pma + off,
+ val, (val & LN0_MON_RX_CAL_DONE),
+ sleep_us, timeout_us);
+
+ if (err) {
+ dev_err(ufs_phy->dev,
+ "failed to get phy cal done %d\n", err);
+ }
+
+ return err;
+}
+
+#define DELAY_IN_US 40
+#define RETRY_CNT 100
+static int gs101_phy_wait_for_cdr_lock(struct phy *phy, u8 lane)
+{
+ struct samsung_ufs_phy *ufs_phy = get_samsung_ufs_phy(phy);
+ u32 val;
+ int i;
+
+ for (i = 0; i < RETRY_CNT; i++) {
+ udelay(DELAY_IN_US);
+ val = readl(ufs_phy->reg_pma +
+ PHY_PMA_TRSV_ADDR(TRSV_REG339, lane));
+
+ if (val & LN0_MON_RX_CDR_FLD_CK_MODE_DONE)
+ return 0;
+
+ udelay(DELAY_IN_US);
+ /* Override and enable clock data recovery */
+ writel(LN0_OVRD_RX_CDR_EN, ufs_phy->reg_pma +
+ PHY_PMA_TRSV_ADDR(TRSV_REG222, lane));
+ writel(LN0_OVRD_RX_CDR_EN | LN0_RX_CDR_EN,
+ ufs_phy->reg_pma + PHY_PMA_TRSV_ADDR(TRSV_REG222, lane));
+ }
+ dev_err(ufs_phy->dev, "failed to get cdr lock\n");
+ return -ETIMEDOUT;
+}
+
+const struct samsung_ufs_phy_drvdata tensor_gs101_ufs_phy = {
+ .cfgs = tensor_gs101_ufs_phy_cfgs,
+ .isol = {
+ .offset = TENSOR_GS101_PHY_CTRL,
+ .mask = TENSOR_GS101_PHY_CTRL_MASK,
+ .en = TENSOR_GS101_PHY_CTRL_EN,
+ },
+ .clk_list = tensor_gs101_ufs_phy_clks,
+ .num_clks = ARRAY_SIZE(tensor_gs101_ufs_phy_clks),
+ .wait_for_cal = gs101_phy_wait_for_calibration,
+ .wait_for_cdr = gs101_phy_wait_for_cdr_lock,
+};
diff --git a/drivers/phy/samsung/phy-samsung-ufs.c b/drivers/phy/samsung/phy-samsung-ufs.c
index 183c88e3d1ec..6c5d41552649 100644
--- a/drivers/phy/samsung/phy-samsung-ufs.c
+++ b/drivers/phy/samsung/phy-samsung-ufs.c
@@ -13,11 +13,11 @@
#include <linux/of.h>
#include <linux/io.h>
#include <linux/iopoll.h>
-#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/soc/samsung/exynos-pmu.h>
#include "phy-samsung-ufs.h"
@@ -45,7 +45,7 @@ static void samsung_ufs_phy_config(struct samsung_ufs_phy *phy,
}
}
-static int samsung_ufs_phy_wait_for_lock_acq(struct phy *phy)
+int samsung_ufs_phy_wait_for_lock_acq(struct phy *phy, u8 lane)
{
struct samsung_ufs_phy *ufs_phy = get_samsung_ufs_phy(phy);
const unsigned int timeout_us = 100000;
@@ -97,8 +97,21 @@ static int samsung_ufs_phy_calibrate(struct phy *phy)
}
}
- if (ufs_phy->ufs_phy_state == CFG_POST_PWR_HS)
- err = samsung_ufs_phy_wait_for_lock_acq(phy);
+ for_each_phy_lane(ufs_phy, i) {
+ if (ufs_phy->ufs_phy_state == CFG_PRE_INIT &&
+ ufs_phy->drvdata->wait_for_cal) {
+ err = ufs_phy->drvdata->wait_for_cal(phy, i);
+ if (err)
+ goto out;
+ }
+
+ if (ufs_phy->ufs_phy_state == CFG_POST_PWR_HS &&
+ ufs_phy->drvdata->wait_for_cdr) {
+ err = ufs_phy->drvdata->wait_for_cdr(phy, i);
+ if (err)
+ goto out;
+ }
+ }
/**
* In Samsung ufshci, PHY need to be calibrated at different
@@ -255,8 +268,8 @@ static int samsung_ufs_phy_probe(struct platform_device *pdev)
goto out;
}
- phy->reg_pmu = syscon_regmap_lookup_by_phandle(
- dev->of_node, "samsung,pmu-syscon");
+ phy->reg_pmu = exynos_get_pmu_regmap_by_phandle(dev->of_node,
+ "samsung,pmu-syscon");
if (IS_ERR(phy->reg_pmu)) {
err = PTR_ERR(phy->reg_pmu);
dev_err(dev, "failed syscon remap for pmu\n");
@@ -302,6 +315,9 @@ out:
static const struct of_device_id samsung_ufs_phy_match[] = {
{
+ .compatible = "google,gs101-ufs-phy",
+ .data = &tensor_gs101_ufs_phy,
+ }, {
.compatible = "samsung,exynos7-ufs-phy",
.data = &exynos7_ufs_phy,
}, {
diff --git a/drivers/phy/samsung/phy-samsung-ufs.h b/drivers/phy/samsung/phy-samsung-ufs.h
index e122960cfee8..9b7deef6e10f 100644
--- a/drivers/phy/samsung/phy-samsung-ufs.h
+++ b/drivers/phy/samsung/phy-samsung-ufs.h
@@ -112,6 +112,9 @@ struct samsung_ufs_phy_drvdata {
const char * const *clk_list;
int num_clks;
u32 cdr_lock_status_offset;
+ /* SoC's specific operations */
+ int (*wait_for_cal)(struct phy *phy, u8 lane);
+ int (*wait_for_cdr)(struct phy *phy, u8 lane);
};
struct samsung_ufs_phy {
@@ -139,8 +142,11 @@ static inline void samsung_ufs_phy_ctrl_isol(
phy->isol.mask, isol ? 0 : phy->isol.en);
}
+int samsung_ufs_phy_wait_for_lock_acq(struct phy *phy, u8 lane);
+
extern const struct samsung_ufs_phy_drvdata exynos7_ufs_phy;
extern const struct samsung_ufs_phy_drvdata exynosautov9_ufs_phy;
extern const struct samsung_ufs_phy_drvdata fsd_ufs_phy;
+extern const struct samsung_ufs_phy_drvdata tensor_gs101_ufs_phy;
#endif /* _PHY_SAMSUNG_UFS_ */
diff --git a/drivers/phy/ti/phy-tusb1210.c b/drivers/phy/ti/phy-tusb1210.c
index 13cd614e12a1..751fecd466e3 100644
--- a/drivers/phy/ti/phy-tusb1210.c
+++ b/drivers/phy/ti/phy-tusb1210.c
@@ -69,7 +69,6 @@ struct tusb1210 {
struct delayed_work chg_det_work;
struct notifier_block psy_nb;
struct power_supply *psy;
- struct power_supply *charger;
#endif
};
@@ -236,19 +235,24 @@ static const char * const tusb1210_chargers[] = {
static bool tusb1210_get_online(struct tusb1210 *tusb)
{
+ struct power_supply *charger = NULL;
union power_supply_propval val;
- int i;
+ bool online = false;
+ int i, ret;
- for (i = 0; i < ARRAY_SIZE(tusb1210_chargers) && !tusb->charger; i++)
- tusb->charger = power_supply_get_by_name(tusb1210_chargers[i]);
+ for (i = 0; i < ARRAY_SIZE(tusb1210_chargers) && !charger; i++)
+ charger = power_supply_get_by_name(tusb1210_chargers[i]);
- if (!tusb->charger)
+ if (!charger)
return false;
- if (power_supply_get_property(tusb->charger, POWER_SUPPLY_PROP_ONLINE, &val))
- return false;
+ ret = power_supply_get_property(charger, POWER_SUPPLY_PROP_ONLINE, &val);
+ if (ret == 0)
+ online = val.intval;
+
+ power_supply_put(charger);
- return val.intval;
+ return online;
}
static void tusb1210_chg_det_work(struct work_struct *work)
@@ -473,9 +477,6 @@ static void tusb1210_remove_charger_detect(struct tusb1210 *tusb)
cancel_delayed_work_sync(&tusb->chg_det_work);
power_supply_unregister(tusb->psy);
}
-
- if (tusb->charger)
- power_supply_put(tusb->charger);
}
#else
static void tusb1210_probe_charger_detect(struct tusb1210 *tusb) { }
diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c
index f72c5257d712..dc8319bda43d 100644
--- a/drivers/phy/xilinx/phy-zynqmp.c
+++ b/drivers/phy/xilinx/phy-zynqmp.c
@@ -995,15 +995,13 @@ static int xpsgtr_probe(struct platform_device *pdev)
return 0;
}
-static int xpsgtr_remove(struct platform_device *pdev)
+static void xpsgtr_remove(struct platform_device *pdev)
{
struct xpsgtr_dev *gtr_dev = platform_get_drvdata(pdev);
pm_runtime_disable(gtr_dev->dev);
pm_runtime_put_noidle(gtr_dev->dev);
pm_runtime_set_suspended(gtr_dev->dev);
-
- return 0;
}
static const struct of_device_id xpsgtr_of_match[] = {
@@ -1015,7 +1013,7 @@ MODULE_DEVICE_TABLE(of, xpsgtr_of_match);
static struct platform_driver xpsgtr_driver = {
.probe = xpsgtr_probe,
- .remove = xpsgtr_remove,
+ .remove_new = xpsgtr_remove,
.driver = {
.name = "xilinx-psgtr",
.of_match_table = xpsgtr_of_match,
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index d45657aa986a..7e4f93a3bc7a 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -235,13 +235,13 @@ config PINCTRL_INGENIC
config PINCTRL_K210
bool "Pinctrl driver for the Canaan Kendryte K210 SoC"
- depends on RISCV && SOC_CANAAN && OF
+ depends on RISCV && SOC_CANAAN_K210 && OF
select GENERIC_PINMUX_FUNCTIONS
select GENERIC_PINCONF
select GPIOLIB
select OF_GPIO
select REGMAP_MMIO
- default SOC_CANAAN
+ default SOC_CANAAN_K210
help
Add support for the Canaan Kendryte K210 RISC-V SOC Field
Programmable IO Array (FPIOA) controller.
@@ -450,6 +450,17 @@ config PINCTRL_ROCKCHIP
help
This support pinctrl and GPIO driver for Rockchip SoCs.
+config PINCTRL_SCMI
+ tristate "Pinctrl driver using SCMI protocol interface"
+ depends on ARM_SCMI_PROTOCOL || COMPILE_TEST
+ select PINMUX
+ select GENERIC_PINCONF
+ help
+ This driver provides support for pinctrl which is controlled
+ by firmware that implements the SCMI interface.
+ It uses SCMI Message Protocol to interact with the
+ firmware providing all the pinctrl controls.
+
config PINCTRL_SINGLE
tristate "One-register-per-pin type device tree based pinctrl driver"
depends on OF
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 2152539b53d5..cc809669405a 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_PINCTRL_PIC32) += pinctrl-pic32.o
obj-$(CONFIG_PINCTRL_PISTACHIO) += pinctrl-pistachio.o
obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
+obj-$(CONFIG_PINCTRL_SCMI) += pinctrl-scmi.o
obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o
obj-$(CONFIG_PINCTRL_STMFX) += pinctrl-stmfx.o
diff --git a/drivers/pinctrl/aspeed/Makefile b/drivers/pinctrl/aspeed/Makefile
index 489ea1778353..db2a7600ae2b 100644
--- a/drivers/pinctrl/aspeed/Makefile
+++ b/drivers/pinctrl/aspeed/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
# Aspeed pinctrl support
-ccflags-y += $(call cc-option,-Woverride-init)
+ccflags-y += -Woverride-init
obj-$(CONFIG_PINCTRL_ASPEED) += pinctrl-aspeed.o pinmux-aspeed.o
obj-$(CONFIG_PINCTRL_ASPEED_G4) += pinctrl-aspeed-g4.o
obj-$(CONFIG_PINCTRL_ASPEED_G5) += pinctrl-aspeed-g5.o
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index d376fa7114d1..029efe16f8cc 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -43,7 +43,7 @@
#define SCU614 0x614 /* Disable GPIO Internal Pull-Down #1 */
#define SCU618 0x618 /* Disable GPIO Internal Pull-Down #2 */
#define SCU61C 0x61c /* Disable GPIO Internal Pull-Down #3 */
-#define SCU620 0x620 /* Disable GPIO Internal Pull-Down #4 */
+#define SCU630 0x630 /* Disable GPIO Internal Pull-Down #4 */
#define SCU634 0x634 /* Disable GPIO Internal Pull-Down #5 */
#define SCU638 0x638 /* Disable GPIO Internal Pull-Down #6 */
#define SCU690 0x690 /* Multi-function Pin Control #24 */
@@ -2495,38 +2495,38 @@ static struct aspeed_pin_config aspeed_g6_configs[] = {
ASPEED_PULL_DOWN_PINCONF(D14, SCU61C, 0),
/* GPIOS7 */
- ASPEED_PULL_DOWN_PINCONF(T24, SCU620, 23),
+ ASPEED_PULL_DOWN_PINCONF(T24, SCU630, 23),
/* GPIOS6 */
- ASPEED_PULL_DOWN_PINCONF(P23, SCU620, 22),
+ ASPEED_PULL_DOWN_PINCONF(P23, SCU630, 22),
/* GPIOS5 */
- ASPEED_PULL_DOWN_PINCONF(P24, SCU620, 21),
+ ASPEED_PULL_DOWN_PINCONF(P24, SCU630, 21),
/* GPIOS4 */
- ASPEED_PULL_DOWN_PINCONF(R26, SCU620, 20),
+ ASPEED_PULL_DOWN_PINCONF(R26, SCU630, 20),
/* GPIOS3*/
- ASPEED_PULL_DOWN_PINCONF(R24, SCU620, 19),
+ ASPEED_PULL_DOWN_PINCONF(R24, SCU630, 19),
/* GPIOS2 */
- ASPEED_PULL_DOWN_PINCONF(T26, SCU620, 18),
+ ASPEED_PULL_DOWN_PINCONF(T26, SCU630, 18),
/* GPIOS1 */
- ASPEED_PULL_DOWN_PINCONF(T25, SCU620, 17),
+ ASPEED_PULL_DOWN_PINCONF(T25, SCU630, 17),
/* GPIOS0 */
- ASPEED_PULL_DOWN_PINCONF(R23, SCU620, 16),
+ ASPEED_PULL_DOWN_PINCONF(R23, SCU630, 16),
/* GPIOR7 */
- ASPEED_PULL_DOWN_PINCONF(U26, SCU620, 15),
+ ASPEED_PULL_DOWN_PINCONF(U26, SCU630, 15),
/* GPIOR6 */
- ASPEED_PULL_DOWN_PINCONF(W26, SCU620, 14),
+ ASPEED_PULL_DOWN_PINCONF(W26, SCU630, 14),
/* GPIOR5 */
- ASPEED_PULL_DOWN_PINCONF(T23, SCU620, 13),
+ ASPEED_PULL_DOWN_PINCONF(T23, SCU630, 13),
/* GPIOR4 */
- ASPEED_PULL_DOWN_PINCONF(U25, SCU620, 12),
+ ASPEED_PULL_DOWN_PINCONF(U25, SCU630, 12),
/* GPIOR3*/
- ASPEED_PULL_DOWN_PINCONF(V26, SCU620, 11),
+ ASPEED_PULL_DOWN_PINCONF(V26, SCU630, 11),
/* GPIOR2 */
- ASPEED_PULL_DOWN_PINCONF(V24, SCU620, 10),
+ ASPEED_PULL_DOWN_PINCONF(V24, SCU630, 10),
/* GPIOR1 */
- ASPEED_PULL_DOWN_PINCONF(U24, SCU620, 9),
+ ASPEED_PULL_DOWN_PINCONF(U24, SCU630, 9),
/* GPIOR0 */
- ASPEED_PULL_DOWN_PINCONF(V25, SCU620, 8),
+ ASPEED_PULL_DOWN_PINCONF(V25, SCU630, 8),
/* GPIOX7 */
ASPEED_PULL_DOWN_PINCONF(AB10, SCU634, 31),
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 1489191a213f..7178a38475cc 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -244,6 +244,10 @@ static const char * const irq_type_names[] = {
[IRQ_TYPE_LEVEL_LOW] = "level-low",
};
+static bool persist_gpio_outputs;
+module_param(persist_gpio_outputs, bool, 0644);
+MODULE_PARM_DESC(persist_gpio_outputs, "Enable GPIO_OUT persistence when pin is freed");
+
static inline u32 bcm2835_gpio_rd(struct bcm2835_pinctrl *pc, unsigned reg)
{
return readl(pc->base + reg);
@@ -926,6 +930,13 @@ static int bcm2835_pmx_free(struct pinctrl_dev *pctldev,
unsigned offset)
{
struct bcm2835_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ enum bcm2835_fsel fsel = bcm2835_pinctrl_fsel_get(pc, offset);
+
+ if (fsel == BCM2835_FSEL_GPIO_IN)
+ return 0;
+
+ if (persist_gpio_outputs && fsel == BCM2835_FSEL_GPIO_OUT)
+ return 0;
/* disable by setting to GPIO_IN */
bcm2835_pinctrl_fsel_set(pc, offset, BCM2835_FSEL_GPIO_IN);
@@ -970,10 +981,7 @@ static void bcm2835_pmx_gpio_disable_free(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned offset)
{
- struct bcm2835_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
-
- /* disable by setting to GPIO_IN */
- bcm2835_pinctrl_fsel_set(pc, offset, BCM2835_FSEL_GPIO_IN);
+ bcm2835_pmx_free(pctldev, offset);
}
static int bcm2835_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
@@ -1003,8 +1011,27 @@ static const struct pinmux_ops bcm2835_pmx_ops = {
static int bcm2835_pinconf_get(struct pinctrl_dev *pctldev,
unsigned pin, unsigned long *config)
{
- /* No way to read back config in HW */
- return -ENOTSUPP;
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ struct bcm2835_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ enum bcm2835_fsel fsel = bcm2835_pinctrl_fsel_get(pc, pin);
+ u32 val;
+
+ /* No way to read back bias config in HW */
+
+ switch (param) {
+ case PIN_CONFIG_OUTPUT:
+ if (fsel != BCM2835_FSEL_GPIO_OUT)
+ return -EINVAL;
+
+ val = bcm2835_gpio_get_bit(pc, GPLEV0, pin);
+ *config = pinconf_to_config_packed(param, val);
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
}
static void bcm2835_pull_config_set(struct bcm2835_pinctrl *pc,
@@ -1079,6 +1106,45 @@ static const struct pinconf_ops bcm2835_pinconf_ops = {
.pin_config_set = bcm2835_pinconf_set,
};
+static int bcm2711_pinconf_get(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *config)
+{
+ struct bcm2835_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ u32 offset, shift, val;
+
+ offset = PUD_2711_REG_OFFSET(pin);
+ shift = PUD_2711_REG_SHIFT(pin);
+ val = bcm2835_gpio_rd(pc, GP_GPIO_PUP_PDN_CNTRL_REG0 + (offset * 4));
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (((val >> shift) & PUD_2711_MASK) != BCM2711_PULL_NONE)
+ return -EINVAL;
+
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (((val >> shift) & PUD_2711_MASK) != BCM2711_PULL_UP)
+ return -EINVAL;
+
+ *config = pinconf_to_config_packed(param, 50000);
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (((val >> shift) & PUD_2711_MASK) != BCM2711_PULL_DOWN)
+ return -EINVAL;
+
+ *config = pinconf_to_config_packed(param, 50000);
+ break;
+
+ default:
+ return bcm2835_pinconf_get(pctldev, pin, config);
+ }
+
+ return 0;
+}
+
static void bcm2711_pull_config_set(struct bcm2835_pinctrl *pc,
unsigned int pin, unsigned int arg)
{
@@ -1146,7 +1212,7 @@ static int bcm2711_pinconf_set(struct pinctrl_dev *pctldev,
static const struct pinconf_ops bcm2711_pinconf_ops = {
.is_generic = true,
- .pin_config_get = bcm2835_pinconf_get,
+ .pin_config_get = bcm2711_pinconf_get,
.pin_config_set = bcm2711_pinconf_set,
};
@@ -1361,6 +1427,9 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
goto out_remove;
}
+ dev_info(dev, "GPIO_OUT persistence: %s\n",
+ persist_gpio_outputs ? "yes" : "no");
+
return 0;
out_remove:
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 6649357637ff..cffeb869130d 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -2124,13 +2124,7 @@ int pinctrl_enable(struct pinctrl_dev *pctldev)
error = pinctrl_claim_hogs(pctldev);
if (error) {
- dev_err(pctldev->dev, "could not claim hogs: %i\n",
- error);
- pinctrl_free_pindescs(pctldev, pctldev->desc->pins,
- pctldev->desc->npins);
- mutex_destroy(&pctldev->mutex);
- kfree(pctldev);
-
+ dev_err(pctldev->dev, "could not claim hogs: %i\n", error);
return error;
}
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index df1efc2e5202..6a94ecd6a8de 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -220,14 +220,16 @@ int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev)
for (state = 0; ; state++) {
/* Retrieve the pinctrl-* property */
propname = kasprintf(GFP_KERNEL, "pinctrl-%d", state);
- if (!propname)
- return -ENOMEM;
+ if (!propname) {
+ ret = -ENOMEM;
+ goto err;
+ }
prop = of_find_property(np, propname, &size);
kfree(propname);
if (!prop) {
if (state == 0) {
- of_node_put(np);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err;
}
break;
}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8ulp.c b/drivers/pinctrl/freescale/pinctrl-imx8ulp.c
index 2e86ca9fc7ac..5632c7285147 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx8ulp.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx8ulp.c
@@ -252,6 +252,7 @@ static const struct of_device_id imx8ulp_pinctrl_of_match[] = {
{ .compatible = "fsl,imx8ulp-iomuxc1", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, imx8ulp_pinctrl_of_match);
static int imx8ulp_pinctrl_probe(struct platform_device *pdev)
{
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index ac97724c59ba..4e87f5b875c0 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -231,6 +231,7 @@ static const unsigned int byt_score_pins_map[BYT_NGPIO_SCORE] = {
/* SCORE groups */
static const unsigned int byt_score_uart1_pins[] = { 70, 71, 72, 73 };
static const unsigned int byt_score_uart2_pins[] = { 74, 75, 76, 77 };
+static const unsigned int byt_score_uart3_pins[] = { 57, 61 };
static const unsigned int byt_score_pwm0_pins[] = { 94 };
static const unsigned int byt_score_pwm1_pins[] = { 95 };
@@ -278,37 +279,38 @@ static const unsigned int byt_score_plt_clk5_pins[] = { 101 };
static const unsigned int byt_score_smbus_pins[] = { 51, 52, 53 };
static const struct intel_pingroup byt_score_groups[] = {
- PIN_GROUP("uart1_grp", byt_score_uart1_pins, 1),
- PIN_GROUP("uart2_grp", byt_score_uart2_pins, 1),
- PIN_GROUP("pwm0_grp", byt_score_pwm0_pins, 1),
- PIN_GROUP("pwm1_grp", byt_score_pwm1_pins, 1),
- PIN_GROUP("ssp2_grp", byt_score_ssp2_pins, 1),
- PIN_GROUP("sio_spi_grp", byt_score_sio_spi_pins, 1),
- PIN_GROUP("i2c5_grp", byt_score_i2c5_pins, 1),
- PIN_GROUP("i2c6_grp", byt_score_i2c6_pins, 1),
- PIN_GROUP("i2c4_grp", byt_score_i2c4_pins, 1),
- PIN_GROUP("i2c3_grp", byt_score_i2c3_pins, 1),
- PIN_GROUP("i2c2_grp", byt_score_i2c2_pins, 1),
- PIN_GROUP("i2c1_grp", byt_score_i2c1_pins, 1),
- PIN_GROUP("i2c0_grp", byt_score_i2c0_pins, 1),
- PIN_GROUP("ssp0_grp", byt_score_ssp0_pins, 1),
- PIN_GROUP("ssp1_grp", byt_score_ssp1_pins, 1),
- PIN_GROUP("sdcard_grp", byt_score_sdcard_pins, byt_score_sdcard_mux_values),
- PIN_GROUP("sdio_grp", byt_score_sdio_pins, 1),
- PIN_GROUP("emmc_grp", byt_score_emmc_pins, 1),
- PIN_GROUP("lpc_grp", byt_score_ilb_lpc_pins, 1),
- PIN_GROUP("sata_grp", byt_score_sata_pins, 1),
- PIN_GROUP("plt_clk0_grp", byt_score_plt_clk0_pins, 1),
- PIN_GROUP("plt_clk1_grp", byt_score_plt_clk1_pins, 1),
- PIN_GROUP("plt_clk2_grp", byt_score_plt_clk2_pins, 1),
- PIN_GROUP("plt_clk3_grp", byt_score_plt_clk3_pins, 1),
- PIN_GROUP("plt_clk4_grp", byt_score_plt_clk4_pins, 1),
- PIN_GROUP("plt_clk5_grp", byt_score_plt_clk5_pins, 1),
- PIN_GROUP("smbus_grp", byt_score_smbus_pins, 1),
+ PIN_GROUP_GPIO("uart1_grp", byt_score_uart1_pins, 1),
+ PIN_GROUP_GPIO("uart2_grp", byt_score_uart2_pins, 1),
+ PIN_GROUP_GPIO("uart3_grp", byt_score_uart3_pins, 1),
+ PIN_GROUP_GPIO("pwm0_grp", byt_score_pwm0_pins, 1),
+ PIN_GROUP_GPIO("pwm1_grp", byt_score_pwm1_pins, 1),
+ PIN_GROUP_GPIO("ssp2_grp", byt_score_ssp2_pins, 1),
+ PIN_GROUP_GPIO("sio_spi_grp", byt_score_sio_spi_pins, 1),
+ PIN_GROUP_GPIO("i2c5_grp", byt_score_i2c5_pins, 1),
+ PIN_GROUP_GPIO("i2c6_grp", byt_score_i2c6_pins, 1),
+ PIN_GROUP_GPIO("i2c4_grp", byt_score_i2c4_pins, 1),
+ PIN_GROUP_GPIO("i2c3_grp", byt_score_i2c3_pins, 1),
+ PIN_GROUP_GPIO("i2c2_grp", byt_score_i2c2_pins, 1),
+ PIN_GROUP_GPIO("i2c1_grp", byt_score_i2c1_pins, 1),
+ PIN_GROUP_GPIO("i2c0_grp", byt_score_i2c0_pins, 1),
+ PIN_GROUP_GPIO("ssp0_grp", byt_score_ssp0_pins, 1),
+ PIN_GROUP_GPIO("ssp1_grp", byt_score_ssp1_pins, 1),
+ PIN_GROUP_GPIO("sdcard_grp", byt_score_sdcard_pins, byt_score_sdcard_mux_values),
+ PIN_GROUP_GPIO("sdio_grp", byt_score_sdio_pins, 1),
+ PIN_GROUP_GPIO("emmc_grp", byt_score_emmc_pins, 1),
+ PIN_GROUP_GPIO("lpc_grp", byt_score_ilb_lpc_pins, 1),
+ PIN_GROUP_GPIO("sata_grp", byt_score_sata_pins, 1),
+ PIN_GROUP_GPIO("plt_clk0_grp", byt_score_plt_clk0_pins, 1),
+ PIN_GROUP_GPIO("plt_clk1_grp", byt_score_plt_clk1_pins, 1),
+ PIN_GROUP_GPIO("plt_clk2_grp", byt_score_plt_clk2_pins, 1),
+ PIN_GROUP_GPIO("plt_clk3_grp", byt_score_plt_clk3_pins, 1),
+ PIN_GROUP_GPIO("plt_clk4_grp", byt_score_plt_clk4_pins, 1),
+ PIN_GROUP_GPIO("plt_clk5_grp", byt_score_plt_clk5_pins, 1),
+ PIN_GROUP_GPIO("smbus_grp", byt_score_smbus_pins, 1),
};
static const char * const byt_score_uart_groups[] = {
- "uart1_grp", "uart2_grp",
+ "uart1_grp", "uart2_grp", "uart3_grp",
};
static const char * const byt_score_pwm_groups[] = {
"pwm0_grp", "pwm1_grp",
@@ -332,12 +334,14 @@ static const char * const byt_score_plt_clk_groups[] = {
};
static const char * const byt_score_smbus_groups[] = { "smbus_grp" };
static const char * const byt_score_gpio_groups[] = {
- "uart1_grp", "uart2_grp", "pwm0_grp", "pwm1_grp", "ssp0_grp",
- "ssp1_grp", "ssp2_grp", "sio_spi_grp", "i2c0_grp", "i2c1_grp",
- "i2c2_grp", "i2c3_grp", "i2c4_grp", "i2c5_grp", "i2c6_grp",
- "sdcard_grp", "sdio_grp", "emmc_grp", "lpc_grp", "sata_grp",
- "plt_clk0_grp", "plt_clk1_grp", "plt_clk2_grp", "plt_clk3_grp",
- "plt_clk4_grp", "plt_clk5_grp", "smbus_grp",
+ "uart1_grp_gpio", "uart2_grp_gpio", "uart3_grp_gpio", "pwm0_grp_gpio",
+ "pwm1_grp_gpio", "ssp0_grp_gpio", "ssp1_grp_gpio", "ssp2_grp_gpio",
+ "sio_spi_grp_gpio", "i2c0_grp_gpio", "i2c1_grp_gpio", "i2c2_grp_gpio",
+ "i2c3_grp_gpio", "i2c4_grp_gpio", "i2c5_grp_gpio", "i2c6_grp_gpio",
+ "sdcard_grp_gpio", "sdio_grp_gpio", "emmc_grp_gpio", "lpc_grp_gpio",
+ "sata_grp_gpio", "plt_clk0_grp_gpio", "plt_clk1_grp_gpio",
+ "plt_clk2_grp_gpio", "plt_clk3_grp_gpio", "plt_clk4_grp_gpio",
+ "plt_clk5_grp_gpio", "smbus_grp_gpio",
};
static const struct intel_function byt_score_functions[] = {
@@ -456,8 +460,8 @@ static const struct intel_pingroup byt_sus_groups[] = {
PIN_GROUP("usb_oc_grp_gpio", byt_sus_usb_over_current_pins, byt_sus_usb_over_current_gpio_mode_values),
PIN_GROUP("usb_ulpi_grp_gpio", byt_sus_usb_ulpi_pins, byt_sus_usb_ulpi_gpio_mode_values),
PIN_GROUP("pcu_spi_grp_gpio", byt_sus_pcu_spi_pins, byt_sus_pcu_spi_gpio_mode_values),
- PIN_GROUP("pmu_clk1_grp", byt_sus_pmu_clk1_pins, 1),
- PIN_GROUP("pmu_clk2_grp", byt_sus_pmu_clk2_pins, 1),
+ PIN_GROUP_GPIO("pmu_clk1_grp", byt_sus_pmu_clk1_pins, 1),
+ PIN_GROUP_GPIO("pmu_clk2_grp", byt_sus_pmu_clk2_pins, 1),
};
static const char * const byt_sus_usb_groups[] = {
@@ -469,7 +473,7 @@ static const char * const byt_sus_pmu_clk_groups[] = {
};
static const char * const byt_sus_gpio_groups[] = {
"usb_oc_grp_gpio", "usb_ulpi_grp_gpio", "pcu_spi_grp_gpio",
- "pmu_clk1_grp", "pmu_clk2_grp",
+ "pmu_clk1_grp_gpio", "pmu_clk2_grp_gpio",
};
static const struct intel_function byt_sus_functions[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index fde65e18cd14..6981e2fab93f 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -179,6 +179,10 @@ struct intel_community {
.modes = __builtin_choose_expr(__builtin_constant_p((m)), NULL, (m)), \
}
+#define PIN_GROUP_GPIO(n, p, m) \
+ PIN_GROUP(n, p, m), \
+ PIN_GROUP(n "_gpio", p, 0)
+
#define FUNCTION(n, g) \
{ \
.func = PINCTRL_PINFUNCTION((n), (g), ARRAY_SIZE(g)), \
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6765.c b/drivers/pinctrl/mediatek/pinctrl-mt6765.c
index f6ec41eb6e0c..72609cf74760 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt6765.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt6765.c
@@ -1086,6 +1086,7 @@ static const struct of_device_id mt6765_pinctrl_of_match[] = {
{ .compatible = "mediatek,mt6765-pinctrl", .data = &mt6765_data },
{ }
};
+MODULE_DEVICE_TABLE(of, mt6765_pinctrl_of_match);
static struct platform_driver mt6765_pinctrl_driver = {
.driver = {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6779.c b/drivers/pinctrl/mediatek/pinctrl-mt6779.c
index 62d4f5ad6737..591905e4132a 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt6779.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt6779.c
@@ -762,6 +762,7 @@ static const struct of_device_id mt6779_pinctrl_of_match[] = {
{ .compatible = "mediatek,mt6779-pinctrl", .data = &mt6779_data },
{ }
};
+MODULE_DEVICE_TABLE(of, mt6779_pinctrl_of_match);
static struct platform_driver mt6779_pinctrl_driver = {
.driver = {
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index b6bc31abd2b0..b19bc391705e 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -165,20 +165,21 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SR, &ret);
break;
case PIN_CONFIG_INPUT_ENABLE:
- case PIN_CONFIG_OUTPUT_ENABLE:
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_IES, &ret);
+ if (!ret)
+ err = -EINVAL;
+ break;
+ case PIN_CONFIG_OUTPUT:
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &ret);
if (err)
break;
- /* CONFIG Current direction return value
- * ------------- ----------------- ----------------------
- * OUTPUT_ENABLE output 1 (= HW value)
- * input 0 (= HW value)
- * INPUT_ENABLE output 0 (= reverse HW value)
- * input 1 (= reverse HW value)
- */
- if (param == PIN_CONFIG_INPUT_ENABLE)
- ret = !ret;
+ if (!ret) {
+ err = -EINVAL;
+ break;
+ }
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DO, &ret);
break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &ret);
@@ -193,6 +194,8 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
}
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SMT, &ret);
+ if (!ret)
+ err = -EINVAL;
break;
case PIN_CONFIG_DRIVE_STRENGTH:
if (!hw->soc->drive_get)
@@ -281,26 +284,9 @@ static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
break;
err = hw->soc->bias_set_combo(hw, desc, 0, arg);
break;
- case PIN_CONFIG_OUTPUT_ENABLE:
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT,
- MTK_DISABLE);
- /* Keep set direction to consider the case that a GPIO pin
- * does not have SMT control
- */
- if (err != -ENOTSUPP)
- break;
-
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
- MTK_OUTPUT);
- break;
case PIN_CONFIG_INPUT_ENABLE:
/* regard all non-zero value as enable */
err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_IES, !!arg);
- if (err)
- break;
-
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
- MTK_INPUT);
break;
case PIN_CONFIG_SLEW_RATE:
/* regard all non-zero value as enable */
diff --git a/drivers/pinctrl/meson/pinctrl-meson-a1.c b/drivers/pinctrl/meson/pinctrl-meson-a1.c
index 79f5d753d7e1..50a87d9618a8 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-a1.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-a1.c
@@ -250,7 +250,7 @@ static const unsigned int pdm_dclk_x_pins[] = { GPIOX_10 };
static const unsigned int pdm_din2_a_pins[] = { GPIOA_6 };
static const unsigned int pdm_din1_a_pins[] = { GPIOA_7 };
static const unsigned int pdm_din0_a_pins[] = { GPIOA_8 };
-static const unsigned int pdm_dclk_pins[] = { GPIOA_9 };
+static const unsigned int pdm_dclk_a_pins[] = { GPIOA_9 };
/* gen_clk */
static const unsigned int gen_clk_x_pins[] = { GPIOX_7 };
@@ -591,7 +591,7 @@ static struct meson_pmx_group meson_a1_periphs_groups[] = {
GROUP(pdm_din2_a, 3),
GROUP(pdm_din1_a, 3),
GROUP(pdm_din0_a, 3),
- GROUP(pdm_dclk, 3),
+ GROUP(pdm_dclk_a, 3),
GROUP(pwm_c_a, 3),
GROUP(pwm_b_a, 3),
@@ -755,7 +755,7 @@ static const char * const spi_a_groups[] = {
static const char * const pdm_groups[] = {
"pdm_din0_x", "pdm_din1_x", "pdm_din2_x", "pdm_dclk_x", "pdm_din2_a",
- "pdm_din1_a", "pdm_din0_a", "pdm_dclk",
+ "pdm_din1_a", "pdm_din0_a", "pdm_dclk_a",
};
static const char * const gen_clk_groups[] = {
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index c34719b7506d..4c4ada06423d 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -834,8 +834,6 @@ static int armada_37xx_gpiochip_register(struct platform_device *pdev,
static int armada_37xx_add_function(struct armada_37xx_pmx_func *funcs,
int *funcsize, const char *name)
{
- int i = 0;
-
if (*funcsize <= 0)
return -EOVERFLOW;
@@ -847,7 +845,6 @@ static int armada_37xx_add_function(struct armada_37xx_pmx_func *funcs,
return -EEXIST;
}
funcs++;
- i++;
}
/* append new unique function */
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index cada5d18ffae..80de389199bd 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -88,7 +88,7 @@ static void pinconf_generic_dump_one(struct pinctrl_dev *pctldev,
seq_puts(s, items[i].display);
/* Print unit if available */
if (items[i].has_arg) {
- seq_printf(s, " (%u",
+ seq_printf(s, " (0x%x",
pinconf_to_config_argument(config));
if (items[i].format)
seq_printf(s, " %s)", items[i].format);
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 49f89b70dcec..7f66ec73199a 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -1159,7 +1159,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
}
ret = devm_request_irq(&pdev->dev, gpio_dev->irq, amd_gpio_irq_handler,
- IRQF_SHARED | IRQF_ONESHOT, KBUILD_MODNAME, gpio_dev);
+ IRQF_SHARED | IRQF_COND_ONESHOT, KBUILD_MODNAME, gpio_dev);
if (ret)
goto out2;
diff --git a/drivers/pinctrl/pinctrl-aw9523.c b/drivers/pinctrl/pinctrl-aw9523.c
index 4edd371c469f..b5e1c467625b 100644
--- a/drivers/pinctrl/pinctrl-aw9523.c
+++ b/drivers/pinctrl/pinctrl-aw9523.c
@@ -1,28 +1,29 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Awinic AW9523B i2c pin controller driver
- * Copyright (c) 2020, AngeloGioacchino Del Regno
- * <angelogioacchino.delregno@somainline.org>
+ * Copyright (c) 2020, AngeloGioacchino Del Regno <angelogioacchino.delregno@somainline.org>
*/
#include <linux/bitfield.h>
+#include <linux/errno.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/mutex.h>
#include <linux/module.h>
-#include <linux/pinctrl/pinconf.h>
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinmux.h>
-#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
#define AW9523_MAX_FUNCS 2
#define AW9523_NUM_PORTS 2
#define AW9523_PINS_PER_PORT 8
@@ -56,28 +57,14 @@
/*
* struct aw9523_irq - Interrupt controller structure
* @lock: mutex locking for the irq bus
- * @irqchip: structure holding irqchip params
* @cached_gpio: stores the previous gpio status for bit comparison
*/
struct aw9523_irq {
struct mutex lock;
- struct irq_chip *irqchip;
u16 cached_gpio;
};
/*
- * struct aw9523_pinmux - Pin mux params
- * @name: Name of the mux
- * @grps: Groups of the mux
- * @num_grps: Number of groups (sizeof array grps)
- */
-struct aw9523_pinmux {
- const char *name;
- const char * const *grps;
- const u8 num_grps;
-};
-
-/*
* struct aw9523 - Main driver structure
* @dev: device handle
* @regmap: regmap handle for current device
@@ -151,23 +138,16 @@ static const struct pinctrl_ops aw9523_pinctrl_ops = {
};
static const char * const gpio_pwm_groups[] = {
- "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5",
- "gpio6", "gpio7", "gpio8", "gpio9", "gpio10", "gpio11",
- "gpio12", "gpio13", "gpio14", "gpio15"
+ "gpio0", "gpio1", "gpio2", "gpio3", /* 0-3 */
+ "gpio4", "gpio5", "gpio6", "gpio7", /* 4-7 */
+ "gpio8", "gpio9", "gpio10", "gpio11", /* 8-11 */
+ "gpio12", "gpio13", "gpio14", "gpio15", /* 11-15 */
};
/* Warning: Do NOT reorder this array */
-static const struct aw9523_pinmux aw9523_pmx[] = {
- {
- .name = "pwm",
- .grps = gpio_pwm_groups,
- .num_grps = ARRAY_SIZE(gpio_pwm_groups),
- },
- {
- .name = "gpio",
- .grps = gpio_pwm_groups,
- .num_grps = ARRAY_SIZE(gpio_pwm_groups),
- },
+static const struct pinfunction aw9523_pmx[] = {
+ PINCTRL_PINFUNCTION("pwm", gpio_pwm_groups, ARRAY_SIZE(gpio_pwm_groups)),
+ PINCTRL_PINFUNCTION("gpio", gpio_pwm_groups, ARRAY_SIZE(gpio_pwm_groups)),
};
static int aw9523_pmx_get_funcs_count(struct pinctrl_dev *pctl)
@@ -183,10 +163,10 @@ static const char *aw9523_pmx_get_fname(struct pinctrl_dev *pctl,
static int aw9523_pmx_get_groups(struct pinctrl_dev *pctl, unsigned int sel,
const char * const **groups,
- unsigned int * const num_groups)
+ unsigned int * const ngroups)
{
- *groups = aw9523_pmx[sel].grps;
- *num_groups = aw9523_pmx[sel].num_grps;
+ *groups = aw9523_pmx[sel].groups;
+ *ngroups = aw9523_pmx[sel].ngroups;
return 0;
}
@@ -239,7 +219,7 @@ static int aw9523_pcfg_param_to_reg(enum pin_config_param pcp, int pin, u8 *r)
reg = AW9523_REG_OUT_STATE(pin);
break;
default:
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
}
*r = reg;
@@ -290,7 +270,7 @@ static int aw9523_pconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
val = FIELD_GET(AW9523_GCR_GPOMD_MASK, val);
break;
default:
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
}
if (val < 1)
return -EINVAL;
@@ -344,7 +324,7 @@ static int aw9523_pconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
/* Open-Drain is supported only on port 0 */
if (pin >= AW9523_PINS_PER_PORT) {
- rc = -EOPNOTSUPP;
+ rc = -ENOTSUPP;
goto end;
}
mask = AW9523_GCR_GPOMD_MASK;
@@ -361,7 +341,7 @@ static int aw9523_pconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
val = AW9523_GCR_GPOMD_MASK;
break;
default:
- rc = -EOPNOTSUPP;
+ rc = -ENOTSUPP;
goto end;
}
@@ -408,8 +388,8 @@ static int aw9523_get_pin_direction(struct regmap *regmap, u8 pin, u8 n)
*
* Return: Zero for success or negative number for error
*/
-static int aw9523_get_port_state(struct regmap *regmap, u8 pin,
- u8 regbit, unsigned int *state)
+static int aw9523_get_port_state(struct regmap *regmap, u8 pin, u8 regbit,
+ unsigned int *state)
{
u8 reg;
int dir;
@@ -447,12 +427,12 @@ static int aw9523_gpio_irq_type(struct irq_data *d, unsigned int type)
static void aw9523_irq_mask(struct irq_data *d)
{
struct aw9523 *awi = gpiochip_get_data(irq_data_get_irq_chip_data(d));
- unsigned int n = d->hwirq % AW9523_PINS_PER_PORT;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned int n = hwirq % AW9523_PINS_PER_PORT;
- regmap_update_bits(awi->regmap,
- AW9523_REG_INTR_DIS(d->hwirq),
+ regmap_update_bits(awi->regmap, AW9523_REG_INTR_DIS(hwirq),
BIT(n), BIT(n));
- gpiochip_disable_irq(&awi->gpio, irqd_to_hwirq(d));
+ gpiochip_disable_irq(&awi->gpio, hwirq);
}
/*
@@ -465,11 +445,11 @@ static void aw9523_irq_mask(struct irq_data *d)
static void aw9523_irq_unmask(struct irq_data *d)
{
struct aw9523 *awi = gpiochip_get_data(irq_data_get_irq_chip_data(d));
- unsigned int n = d->hwirq % AW9523_PINS_PER_PORT;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned int n = hwirq % AW9523_PINS_PER_PORT;
- gpiochip_enable_irq(&awi->gpio, irqd_to_hwirq(d));
- regmap_update_bits(awi->regmap,
- AW9523_REG_INTR_DIS(d->hwirq),
+ gpiochip_enable_irq(&awi->gpio, hwirq);
+ regmap_update_bits(awi->regmap, AW9523_REG_INTR_DIS(hwirq),
BIT(n), 0);
}
@@ -622,7 +602,7 @@ static int aw9523_gpio_get_multiple(struct gpio_chip *chip,
mutex_lock(&awi->i2c_lock);
/* Port 0 (gpio 0-7) */
- m = *mask & U8_MAX;
+ m = *mask;
if (m) {
ret = _aw9523_gpio_get_multiple(awi, 0, &state, m);
if (ret)
@@ -631,7 +611,7 @@ static int aw9523_gpio_get_multiple(struct gpio_chip *chip,
*bits = state;
/* Port 1 (gpio 8-15) */
- m = (*mask >> 8) & U8_MAX;
+ m = *mask >> 8;
if (m) {
ret = _aw9523_gpio_get_multiple(awi, AW9523_PINS_PER_PORT,
&state, m);
@@ -652,29 +632,26 @@ static void aw9523_gpio_set_multiple(struct gpio_chip *chip,
struct aw9523 *awi = gpiochip_get_data(chip);
u8 mask_lo, mask_hi, bits_lo, bits_hi;
unsigned int reg;
- int ret = 0;
+ int ret;
+
+ mask_lo = *mask;
+ mask_hi = *mask >> 8;
+ bits_lo = *bits;
+ bits_hi = *bits >> 8;
- mask_lo = *mask & U8_MAX;
- mask_hi = (*mask >> 8) & U8_MAX;
mutex_lock(&awi->i2c_lock);
if (mask_hi) {
reg = AW9523_REG_OUT_STATE(AW9523_PINS_PER_PORT);
- bits_hi = (*bits >> 8) & U8_MAX;
-
ret = regmap_write_bits(awi->regmap, reg, mask_hi, bits_hi);
- if (ret) {
+ if (ret)
dev_warn(awi->dev, "Cannot write port1 out level\n");
- goto out;
- }
}
if (mask_lo) {
reg = AW9523_REG_OUT_STATE(0);
- bits_lo = *bits & U8_MAX;
ret = regmap_write_bits(awi->regmap, reg, mask_lo, bits_lo);
if (ret)
dev_warn(awi->dev, "Cannot write port0 out level\n");
}
-out:
mutex_unlock(&awi->i2c_lock);
}
@@ -827,29 +804,21 @@ static int aw9523_init_irq(struct aw9523 *awi, int irq)
{
struct device *dev = awi->dev;
struct gpio_irq_chip *girq;
- struct irq_chip *irqchip;
int ret;
if (!device_property_read_bool(dev, "interrupt-controller"))
return 0;
- irqchip = devm_kzalloc(dev, sizeof(*irqchip), GFP_KERNEL);
- if (!irqchip)
- return -ENOMEM;
-
awi->irq = devm_kzalloc(dev, sizeof(*awi->irq), GFP_KERNEL);
if (!awi->irq)
return -ENOMEM;
- awi->irq->irqchip = irqchip;
mutex_init(&awi->irq->lock);
ret = devm_request_threaded_irq(dev, irq, NULL, aw9523_irq_thread_func,
IRQF_ONESHOT, dev_name(dev), awi);
- if (ret) {
- dev_err(dev, "Failed to request irq %d\n", irq);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request irq %d\n", irq);
girq = &awi->gpio.irq;
gpio_irq_chip_set_chip(girq, &aw9523_irq_chip);
@@ -1015,8 +984,7 @@ static int aw9523_probe(struct i2c_client *client)
}
mutex_init(&awi->i2c_lock);
- lockdep_set_subclass(&awi->i2c_lock,
- i2c_adapter_depth(client->adapter));
+ lockdep_set_subclass(&awi->i2c_lock, i2c_adapter_depth(client->adapter));
pdesc = devm_kzalloc(dev, sizeof(*pdesc), GFP_KERNEL);
if (!pdesc)
@@ -1046,8 +1014,7 @@ static int aw9523_probe(struct i2c_client *client)
awi->pctl = devm_pinctrl_register(dev, pdesc, awi);
if (IS_ERR(awi->pctl)) {
- ret = PTR_ERR(awi->pctl);
- dev_err(dev, "Cannot register pinctrl: %d", ret);
+ ret = dev_err_probe(dev, PTR_ERR(awi->pctl), "Cannot register pinctrl");
goto err_disable_vregs;
}
@@ -1067,10 +1034,6 @@ err_disable_vregs:
static void aw9523_remove(struct i2c_client *client)
{
struct aw9523 *awi = i2c_get_clientdata(client);
- int ret;
-
- if (!awi)
- return;
/*
* If the chip VIO is connected to a regulator that we can turn
@@ -1082,10 +1045,8 @@ static void aw9523_remove(struct i2c_client *client)
regulator_disable(awi->vio_vreg);
} else {
mutex_lock(&awi->i2c_lock);
- ret = aw9523_hw_init(awi);
+ aw9523_hw_init(awi);
mutex_unlock(&awi->i2c_lock);
- if (ret)
- return;
}
mutex_destroy(&awi->i2c_lock);
diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c
index 67b5d160c027..981c569bd671 100644
--- a/drivers/pinctrl/pinctrl-cy8c95x0.c
+++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
@@ -95,7 +95,7 @@ static int cy8c95x0_acpi_get_irq(struct device *dev)
if (ret)
dev_warn(dev, "can't add GPIO ACPI mapping\n");
- ret = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(dev), "irq-gpios", 0);
+ ret = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(dev), "irq", 0);
if (ret < 0)
return ret;
diff --git a/drivers/pinctrl/pinctrl-loongson2.c b/drivers/pinctrl/pinctrl-loongson2.c
index a72ffeca26fb..4d4fbeadafb7 100644
--- a/drivers/pinctrl/pinctrl-loongson2.c
+++ b/drivers/pinctrl/pinctrl-loongson2.c
@@ -286,6 +286,7 @@ static const struct of_device_id loongson2_pinctrl_dt_match[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, loongson2_pinctrl_dt_match);
static struct platform_driver loongson2_pinctrl_driver = {
.probe = loongson2_pinctrl_probe,
diff --git a/drivers/pinctrl/pinctrl-max77620.c b/drivers/pinctrl/pinctrl-max77620.c
index ab723ab4ec1d..d236daa7c13e 100644
--- a/drivers/pinctrl/pinctrl-max77620.c
+++ b/drivers/pinctrl/pinctrl-max77620.c
@@ -88,7 +88,6 @@ struct max77620_pingroup {
struct max77620_pin_info {
enum max77620_pin_ppdrv drv_type;
- int pull_config;
};
struct max77620_fps_config {
@@ -104,7 +103,6 @@ struct max77620_pctrl_info {
struct device *dev;
struct pinctrl_dev *pctl;
struct regmap *rmap;
- int pins_current_opt[MAX77620_GPIO_NR];
const struct max77620_pin_function *functions;
unsigned int num_functions;
const struct max77620_pingroup *pin_groups;
diff --git a/drivers/pinctrl/pinctrl-rk805.c b/drivers/pinctrl/pinctrl-rk805.c
index 56d916f2cee6..c42f1bf93404 100644
--- a/drivers/pinctrl/pinctrl-rk805.c
+++ b/drivers/pinctrl/pinctrl-rk805.c
@@ -93,6 +93,11 @@ enum rk806_pinmux_option {
RK806_PINMUX_FUN5,
};
+enum rk816_pinmux_option {
+ RK816_PINMUX_THERMISTOR,
+ RK816_PINMUX_GPIO,
+};
+
enum {
RK805_GPIO0,
RK805_GPIO1,
@@ -104,6 +109,10 @@ enum {
RK806_GPIO_DVS3
};
+enum {
+ RK816_GPIO0,
+};
+
static const char *const rk805_gpio_groups[] = {
"gpio0",
"gpio1",
@@ -115,6 +124,10 @@ static const char *const rk806_gpio_groups[] = {
"gpio_pwrctrl3",
};
+static const char *const rk816_gpio_groups[] = {
+ "gpio0",
+};
+
/* RK805: 2 output only GPIOs */
static const struct pinctrl_pin_desc rk805_pins_desc[] = {
PINCTRL_PIN(RK805_GPIO0, "gpio0"),
@@ -128,6 +141,11 @@ static const struct pinctrl_pin_desc rk806_pins_desc[] = {
PINCTRL_PIN(RK806_GPIO_DVS3, "gpio_pwrctrl3"),
};
+/* RK816 */
+static const struct pinctrl_pin_desc rk816_pins_desc[] = {
+ PINCTRL_PIN(RK816_GPIO0, "gpio0"),
+};
+
static const struct rk805_pin_function rk805_pin_functions[] = {
{
.name = "gpio",
@@ -176,6 +194,21 @@ static const struct rk805_pin_function rk806_pin_functions[] = {
},
};
+static const struct rk805_pin_function rk816_pin_functions[] = {
+ {
+ .name = "gpio",
+ .groups = rk816_gpio_groups,
+ .ngroups = ARRAY_SIZE(rk816_gpio_groups),
+ .mux_option = RK816_PINMUX_GPIO,
+ },
+ {
+ .name = "thermistor",
+ .groups = rk816_gpio_groups,
+ .ngroups = ARRAY_SIZE(rk816_gpio_groups),
+ .mux_option = RK816_PINMUX_THERMISTOR,
+ },
+};
+
static const struct rk805_pin_group rk805_pin_groups[] = {
{
.name = "gpio0",
@@ -207,6 +240,14 @@ static const struct rk805_pin_group rk806_pin_groups[] = {
}
};
+static const struct rk805_pin_group rk816_pin_groups[] = {
+ {
+ .name = "gpio0",
+ .pins = { RK816_GPIO0 },
+ .npins = 1,
+ },
+};
+
#define RK805_GPIO0_VAL_MSK BIT(0)
#define RK805_GPIO1_VAL_MSK BIT(1)
@@ -255,6 +296,20 @@ static struct rk805_pin_config rk806_gpio_cfgs[] = {
}
};
+#define RK816_FUN_MASK BIT(2)
+#define RK816_VAL_MASK BIT(3)
+#define RK816_DIR_MASK BIT(4)
+
+static struct rk805_pin_config rk816_gpio_cfgs[] = {
+ {
+ .fun_reg = RK818_IO_POL_REG,
+ .fun_msk = RK816_FUN_MASK,
+ .reg = RK818_IO_POL_REG,
+ .val_msk = RK816_VAL_MASK,
+ .dir_msk = RK816_DIR_MASK,
+ },
+};
+
/* generic gpio chip */
static int rk805_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
@@ -439,6 +494,8 @@ static int rk805_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev,
return _rk805_pinctrl_set_mux(pctldev, offset, RK805_PINMUX_GPIO);
case RK806_ID:
return _rk805_pinctrl_set_mux(pctldev, offset, RK806_PINMUX_FUN5);
+ case RK816_ID:
+ return _rk805_pinctrl_set_mux(pctldev, offset, RK816_PINMUX_GPIO);
}
return -ENOTSUPP;
@@ -588,6 +645,18 @@ static int rk805_pinctrl_probe(struct platform_device *pdev)
pci->pin_cfg = rk806_gpio_cfgs;
pci->gpio_chip.ngpio = ARRAY_SIZE(rk806_gpio_cfgs);
break;
+ case RK816_ID:
+ pci->pins = rk816_pins_desc;
+ pci->num_pins = ARRAY_SIZE(rk816_pins_desc);
+ pci->functions = rk816_pin_functions;
+ pci->num_functions = ARRAY_SIZE(rk816_pin_functions);
+ pci->groups = rk816_pin_groups;
+ pci->num_pin_groups = ARRAY_SIZE(rk816_pin_groups);
+ pci->pinctrl_desc.pins = rk816_pins_desc;
+ pci->pinctrl_desc.npins = ARRAY_SIZE(rk816_pins_desc);
+ pci->pin_cfg = rk816_gpio_cfgs;
+ pci->gpio_chip.ngpio = ARRAY_SIZE(rk816_gpio_cfgs);
+ break;
default:
dev_err(&pdev->dev, "unsupported RK805 ID %lu\n",
pci->rk808->variant);
diff --git a/drivers/pinctrl/pinctrl-scmi.c b/drivers/pinctrl/pinctrl-scmi.c
new file mode 100644
index 000000000000..036bc1e3fc6c
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-scmi.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Power Interface (SCMI) Protocol based pinctrl driver
+ *
+ * Copyright (C) 2024 EPAM
+ * Copyright 2024 NXP
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/scmi_protocol.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "pinctrl-utils.h"
+#include "core.h"
+#include "pinconf.h"
+
+#define DRV_NAME "scmi-pinctrl"
+
+/* Define num configs, if not large than 4 use stack, else use kcalloc() */
+#define SCMI_NUM_CONFIGS 4
+
+static const struct scmi_pinctrl_proto_ops *pinctrl_ops;
+
+struct scmi_pinctrl {
+ struct device *dev;
+ struct scmi_protocol_handle *ph;
+ struct pinctrl_dev *pctldev;
+ struct pinctrl_desc pctl_desc;
+ struct pinfunction *functions;
+ unsigned int nr_functions;
+ struct pinctrl_pin_desc *pins;
+ unsigned int nr_pins;
+};
+
+static int pinctrl_scmi_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ return pinctrl_ops->count_get(pmx->ph, GROUP_TYPE);
+}
+
+static const char *pinctrl_scmi_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ int ret;
+ const char *name;
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ ret = pinctrl_ops->name_get(pmx->ph, selector, GROUP_TYPE, &name);
+ if (ret) {
+ dev_err(pmx->dev, "get name failed with err %d", ret);
+ return NULL;
+ }
+
+ return name;
+}
+
+static int pinctrl_scmi_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ return pinctrl_ops->group_pins_get(pmx->ph, selector, pins, num_pins);
+}
+
+static const struct pinctrl_ops pinctrl_scmi_pinctrl_ops = {
+ .get_groups_count = pinctrl_scmi_get_groups_count,
+ .get_group_name = pinctrl_scmi_get_group_name,
+ .get_group_pins = pinctrl_scmi_get_group_pins,
+#ifdef CONFIG_OF
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinconf_generic_dt_free_map,
+#endif
+};
+
+static int pinctrl_scmi_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ return pinctrl_ops->count_get(pmx->ph, FUNCTION_TYPE);
+}
+
+static const char *pinctrl_scmi_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ int ret;
+ const char *name;
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ ret = pinctrl_ops->name_get(pmx->ph, selector, FUNCTION_TYPE, &name);
+ if (ret) {
+ dev_err(pmx->dev, "get name failed with err %d", ret);
+ return NULL;
+ }
+
+ return name;
+}
+
+static int pinctrl_scmi_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **p_groups,
+ unsigned int * const p_num_groups)
+{
+ struct pinfunction *func;
+ const unsigned int *group_ids;
+ unsigned int num_groups;
+ const char **groups;
+ int ret, i;
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ if (!p_groups || !p_num_groups)
+ return -EINVAL;
+
+ if (selector >= pmx->nr_functions)
+ return -EINVAL;
+
+ func = &pmx->functions[selector];
+ if (func->ngroups)
+ goto done;
+
+ ret = pinctrl_ops->function_groups_get(pmx->ph, selector, &num_groups,
+ &group_ids);
+ if (ret) {
+ dev_err(pmx->dev, "Unable to get function groups, err %d", ret);
+ return ret;
+ }
+ if (!num_groups)
+ return -EINVAL;
+
+ groups = kcalloc(num_groups, sizeof(*groups), GFP_KERNEL);
+ if (!groups)
+ return -ENOMEM;
+
+ for (i = 0; i < num_groups; i++) {
+ groups[i] = pinctrl_scmi_get_group_name(pctldev, group_ids[i]);
+ if (!groups[i]) {
+ ret = -EINVAL;
+ goto err_free;
+ }
+ }
+
+ func->ngroups = num_groups;
+ func->groups = groups;
+done:
+ *p_groups = func->groups;
+ *p_num_groups = func->ngroups;
+
+ return 0;
+
+err_free:
+ kfree(groups);
+
+ return ret;
+}
+
+static int pinctrl_scmi_func_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int selector, unsigned int group)
+{
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ return pinctrl_ops->mux_set(pmx->ph, selector, group);
+}
+
+static int pinctrl_scmi_request(struct pinctrl_dev *pctldev,
+ unsigned int offset)
+{
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ return pinctrl_ops->pin_request(pmx->ph, offset);
+}
+
+static int pinctrl_scmi_free(struct pinctrl_dev *pctldev, unsigned int offset)
+{
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ return pinctrl_ops->pin_free(pmx->ph, offset);
+}
+
+static const struct pinmux_ops pinctrl_scmi_pinmux_ops = {
+ .request = pinctrl_scmi_request,
+ .free = pinctrl_scmi_free,
+ .get_functions_count = pinctrl_scmi_get_functions_count,
+ .get_function_name = pinctrl_scmi_get_function_name,
+ .get_function_groups = pinctrl_scmi_get_function_groups,
+ .set_mux = pinctrl_scmi_func_set_mux,
+};
+
+static int pinctrl_scmi_map_pinconf_type(enum pin_config_param param,
+ enum scmi_pinctrl_conf_type *type)
+{
+ u32 arg = param;
+
+ switch (arg) {
+ case PIN_CONFIG_BIAS_BUS_HOLD:
+ *type = SCMI_PIN_BIAS_BUS_HOLD;
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ *type = SCMI_PIN_BIAS_DISABLE;
+ break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ *type = SCMI_PIN_BIAS_HIGH_IMPEDANCE;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ *type = SCMI_PIN_BIAS_PULL_DOWN;
+ break;
+ case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+ *type = SCMI_PIN_BIAS_PULL_DEFAULT;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ *type = SCMI_PIN_BIAS_PULL_UP;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ *type = SCMI_PIN_DRIVE_OPEN_DRAIN;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_SOURCE:
+ *type = SCMI_PIN_DRIVE_OPEN_SOURCE;
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ *type = SCMI_PIN_DRIVE_PUSH_PULL;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ *type = SCMI_PIN_DRIVE_STRENGTH;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH_UA:
+ *type = SCMI_PIN_DRIVE_STRENGTH;
+ break;
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ *type = SCMI_PIN_INPUT_DEBOUNCE;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ *type = SCMI_PIN_INPUT_MODE;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT:
+ *type = SCMI_PIN_INPUT_SCHMITT;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ *type = SCMI_PIN_INPUT_MODE;
+ break;
+ case PIN_CONFIG_MODE_LOW_POWER:
+ *type = SCMI_PIN_LOW_POWER_MODE;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ *type = SCMI_PIN_OUTPUT_VALUE;
+ break;
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ *type = SCMI_PIN_OUTPUT_MODE;
+ break;
+ case PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS:
+ *type = SCMI_PIN_OUTPUT_VALUE;
+ break;
+ case PIN_CONFIG_POWER_SOURCE:
+ *type = SCMI_PIN_POWER_SOURCE;
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ *type = SCMI_PIN_SLEW_RATE;
+ break;
+ case SCMI_PIN_OEM_START ... SCMI_PIN_OEM_END:
+ *type = arg;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int pinctrl_scmi_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *config)
+{
+ int ret;
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param config_type;
+ enum scmi_pinctrl_conf_type type;
+ u32 config_value;
+
+ if (!config)
+ return -EINVAL;
+
+ config_type = pinconf_to_config_param(*config);
+
+ ret = pinctrl_scmi_map_pinconf_type(config_type, &type);
+ if (ret)
+ return ret;
+
+ ret = pinctrl_ops->settings_get_one(pmx->ph, pin, PIN_TYPE, type,
+ &config_value);
+ /* Convert SCMI error code to PINCTRL expected error code */
+ if (ret == -EOPNOTSUPP)
+ return -ENOTSUPP;
+ if (ret)
+ return ret;
+
+ *config = pinconf_to_config_packed(config_type, config_value);
+
+ return 0;
+}
+
+static int
+pinctrl_scmi_alloc_configs(struct pinctrl_dev *pctldev, u32 num_configs,
+ u32 **p_config_value,
+ enum scmi_pinctrl_conf_type **p_config_type)
+{
+ if (num_configs <= SCMI_NUM_CONFIGS)
+ return 0;
+
+ *p_config_value = kcalloc(num_configs, sizeof(**p_config_value), GFP_KERNEL);
+ if (!*p_config_value)
+ return -ENOMEM;
+
+ *p_config_type = kcalloc(num_configs, sizeof(**p_config_type), GFP_KERNEL);
+ if (!*p_config_type) {
+ kfree(*p_config_value);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void
+pinctrl_scmi_free_configs(struct pinctrl_dev *pctldev, u32 num_configs,
+ u32 **p_config_value,
+ enum scmi_pinctrl_conf_type **p_config_type)
+{
+ if (num_configs <= SCMI_NUM_CONFIGS)
+ return;
+
+ kfree(*p_config_value);
+ kfree(*p_config_type);
+}
+
+static int pinctrl_scmi_pinconf_set(struct pinctrl_dev *pctldev,
+ unsigned int pin,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ int i, ret;
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+ enum scmi_pinctrl_conf_type config_type[SCMI_NUM_CONFIGS];
+ u32 config_value[SCMI_NUM_CONFIGS];
+ enum scmi_pinctrl_conf_type *p_config_type = config_type;
+ u32 *p_config_value = config_value;
+ enum pin_config_param param;
+
+ if (!configs || !num_configs)
+ return -EINVAL;
+
+ ret = pinctrl_scmi_alloc_configs(pctldev, num_configs, &p_config_type,
+ &p_config_value);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ ret = pinctrl_scmi_map_pinconf_type(param, &p_config_type[i]);
+ if (ret) {
+ dev_err(pmx->dev, "Error map pinconf_type %d\n", ret);
+ goto free_config;
+ }
+ p_config_value[i] = pinconf_to_config_argument(configs[i]);
+ }
+
+ ret = pinctrl_ops->settings_conf(pmx->ph, pin, PIN_TYPE, num_configs,
+ p_config_type, p_config_value);
+ if (ret)
+ dev_err(pmx->dev, "Error parsing config %d\n", ret);
+
+free_config:
+ pinctrl_scmi_free_configs(pctldev, num_configs, &p_config_type,
+ &p_config_value);
+ return ret;
+}
+
+static int pinctrl_scmi_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ int i, ret;
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+ enum scmi_pinctrl_conf_type config_type[SCMI_NUM_CONFIGS];
+ u32 config_value[SCMI_NUM_CONFIGS];
+ enum scmi_pinctrl_conf_type *p_config_type = config_type;
+ u32 *p_config_value = config_value;
+ enum pin_config_param param;
+
+ if (!configs || !num_configs)
+ return -EINVAL;
+
+ ret = pinctrl_scmi_alloc_configs(pctldev, num_configs, &p_config_type,
+ &p_config_value);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ ret = pinctrl_scmi_map_pinconf_type(param, &p_config_type[i]);
+ if (ret) {
+ dev_err(pmx->dev, "Error map pinconf_type %d\n", ret);
+ goto free_config;
+ }
+
+ p_config_value[i] = pinconf_to_config_argument(configs[i]);
+ }
+
+ ret = pinctrl_ops->settings_conf(pmx->ph, group, GROUP_TYPE,
+ num_configs, p_config_type,
+ p_config_value);
+ if (ret)
+ dev_err(pmx->dev, "Error parsing config %d", ret);
+
+free_config:
+ pinctrl_scmi_free_configs(pctldev, num_configs, &p_config_type,
+ &p_config_value);
+ return ret;
+};
+
+static int pinctrl_scmi_pinconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ unsigned long *config)
+{
+ int ret;
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param config_type;
+ enum scmi_pinctrl_conf_type type;
+ u32 config_value;
+
+ if (!config)
+ return -EINVAL;
+
+ config_type = pinconf_to_config_param(*config);
+ ret = pinctrl_scmi_map_pinconf_type(config_type, &type);
+ if (ret) {
+ dev_err(pmx->dev, "Error map pinconf_type %d\n", ret);
+ return ret;
+ }
+
+ ret = pinctrl_ops->settings_get_one(pmx->ph, group, GROUP_TYPE, type,
+ &config_value);
+ /* Convert SCMI error code to PINCTRL expected error code */
+ if (ret == -EOPNOTSUPP)
+ return -ENOTSUPP;
+ if (ret)
+ return ret;
+
+ *config = pinconf_to_config_packed(config_type, config_value);
+
+ return 0;
+}
+
+static const struct pinconf_ops pinctrl_scmi_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = pinctrl_scmi_pinconf_get,
+ .pin_config_set = pinctrl_scmi_pinconf_set,
+ .pin_config_group_set = pinctrl_scmi_pinconf_group_set,
+ .pin_config_group_get = pinctrl_scmi_pinconf_group_get,
+ .pin_config_config_dbg_show = pinconf_generic_dump_config,
+};
+
+static int pinctrl_scmi_get_pins(struct scmi_pinctrl *pmx,
+ struct pinctrl_desc *desc)
+{
+ struct pinctrl_pin_desc *pins;
+ unsigned int npins;
+ int ret, i;
+
+ npins = pinctrl_ops->count_get(pmx->ph, PIN_TYPE);
+ /*
+ * npins will never be zero, the scmi pinctrl driver has bailed out
+ * if npins is zero.
+ */
+ pins = devm_kmalloc_array(pmx->dev, npins, sizeof(*pins), GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ for (i = 0; i < npins; i++) {
+ pins[i].number = i;
+ /*
+ * The memory for name is handled by the scmi firmware driver,
+ * no need free here
+ */
+ ret = pinctrl_ops->name_get(pmx->ph, i, PIN_TYPE, &pins[i].name);
+ if (ret)
+ return dev_err_probe(pmx->dev, ret,
+ "Can't get name for pin %d", i);
+ }
+
+ desc->npins = npins;
+ desc->pins = pins;
+ dev_dbg(pmx->dev, "got pins %u", npins);
+
+ return 0;
+}
+
+static int scmi_pinctrl_probe(struct scmi_device *sdev)
+{
+ int ret;
+ struct device *dev = &sdev->dev;
+ struct scmi_pinctrl *pmx;
+ const struct scmi_handle *handle;
+ struct scmi_protocol_handle *ph;
+
+ if (!sdev->handle)
+ return -EINVAL;
+
+ handle = sdev->handle;
+
+ pinctrl_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_PINCTRL, &ph);
+ if (IS_ERR(pinctrl_ops))
+ return PTR_ERR(pinctrl_ops);
+
+ pmx = devm_kzalloc(dev, sizeof(*pmx), GFP_KERNEL);
+ if (!pmx)
+ return -ENOMEM;
+
+ pmx->ph = ph;
+
+ pmx->dev = dev;
+ pmx->pctl_desc.name = DRV_NAME;
+ pmx->pctl_desc.owner = THIS_MODULE;
+ pmx->pctl_desc.pctlops = &pinctrl_scmi_pinctrl_ops;
+ pmx->pctl_desc.pmxops = &pinctrl_scmi_pinmux_ops;
+ pmx->pctl_desc.confops = &pinctrl_scmi_pinconf_ops;
+
+ ret = pinctrl_scmi_get_pins(pmx, &pmx->pctl_desc);
+ if (ret)
+ return ret;
+
+ ret = devm_pinctrl_register_and_init(dev, &pmx->pctl_desc, pmx,
+ &pmx->pctldev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register pinctrl\n");
+
+ pmx->nr_functions = pinctrl_scmi_get_functions_count(pmx->pctldev);
+ pmx->functions = devm_kcalloc(dev, pmx->nr_functions,
+ sizeof(*pmx->functions), GFP_KERNEL);
+ if (!pmx->functions)
+ return -ENOMEM;
+
+ return pinctrl_enable(pmx->pctldev);
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_PINCTRL, "pinctrl" },
+ { }
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_pinctrl_driver = {
+ .name = DRV_NAME,
+ .probe = scmi_pinctrl_probe,
+ .id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_pinctrl_driver);
+
+MODULE_AUTHOR("Oleksii Moisieiev <oleksii_moisieiev@epam.com>");
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("ARM SCMI pin controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 19cc0db771a5..a798f31d6954 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -81,8 +81,6 @@ struct pcs_conf_type {
* @name: pinctrl function name
* @vals: register and vals array
* @nvals: number of entries in vals array
- * @pgnames: array of pingroup names the function uses
- * @npgnames: number of pingroup names the function uses
* @conf: array of pin configurations
* @nconfs: number of pin configurations available
* @node: list node
@@ -91,8 +89,6 @@ struct pcs_function {
const char *name;
struct pcs_func_vals *vals;
unsigned nvals;
- const char **pgnames;
- int npgnames;
struct pcs_conf_vals *conf;
int nconfs;
struct list_head node;
@@ -554,21 +550,30 @@ static int pcs_pinconf_set(struct pinctrl_dev *pctldev,
unsigned offset = 0, shift = 0, i, data, ret;
u32 arg;
int j;
+ enum pin_config_param param;
ret = pcs_get_function(pctldev, pin, &func);
if (ret)
return ret;
for (j = 0; j < num_configs; j++) {
+ param = pinconf_to_config_param(configs[j]);
+
+ /* BIAS_DISABLE has no entry in the func->conf table */
+ if (param == PIN_CONFIG_BIAS_DISABLE) {
+ /* This just disables all bias entries */
+ pcs_pinconf_clear_bias(pctldev, pin);
+ continue;
+ }
+
for (i = 0; i < func->nconfs; i++) {
- if (pinconf_to_config_param(configs[j])
- != func->conf[i].param)
+ if (param != func->conf[i].param)
continue;
offset = pin * (pcs->width / BITS_PER_BYTE);
data = pcs->read(pcs->base + offset);
arg = pinconf_to_config_argument(configs[j]);
- switch (func->conf[i].param) {
+ switch (param) {
/* 2 parameters */
case PIN_CONFIG_INPUT_SCHMITT:
case PIN_CONFIG_DRIVE_STRENGTH:
@@ -580,9 +585,6 @@ static int pcs_pinconf_set(struct pinctrl_dev *pctldev,
data |= (arg << shift) & func->conf[i].mask;
break;
/* 4 parameters */
- case PIN_CONFIG_BIAS_DISABLE:
- pcs_pinconf_clear_bias(pctldev, pin);
- break;
case PIN_CONFIG_BIAS_PULL_DOWN:
case PIN_CONFIG_BIAS_PULL_UP:
if (arg)
@@ -1625,7 +1627,6 @@ static int pcs_irq_init_chained_handler(struct pcs_device *pcs,
return 0;
}
-#ifdef CONFIG_PM
static int pcs_save_context(struct pcs_device *pcs)
{
int i, mux_bytes;
@@ -1690,14 +1691,9 @@ static void pcs_restore_context(struct pcs_device *pcs)
}
}
-static int pinctrl_single_suspend(struct platform_device *pdev,
- pm_message_t state)
+static int pinctrl_single_suspend_noirq(struct device *dev)
{
- struct pcs_device *pcs;
-
- pcs = platform_get_drvdata(pdev);
- if (!pcs)
- return -EINVAL;
+ struct pcs_device *pcs = dev_get_drvdata(dev);
if (pcs->flags & PCS_CONTEXT_LOSS_OFF) {
int ret;
@@ -1710,20 +1706,19 @@ static int pinctrl_single_suspend(struct platform_device *pdev,
return pinctrl_force_sleep(pcs->pctl);
}
-static int pinctrl_single_resume(struct platform_device *pdev)
+static int pinctrl_single_resume_noirq(struct device *dev)
{
- struct pcs_device *pcs;
-
- pcs = platform_get_drvdata(pdev);
- if (!pcs)
- return -EINVAL;
+ struct pcs_device *pcs = dev_get_drvdata(dev);
if (pcs->flags & PCS_CONTEXT_LOSS_OFF)
pcs_restore_context(pcs);
return pinctrl_force_default(pcs->pctl);
}
-#endif
+
+static DEFINE_NOIRQ_DEV_PM_OPS(pinctrl_single_pm_ops,
+ pinctrl_single_suspend_noirq,
+ pinctrl_single_resume_noirq);
/**
* pcs_quirk_missing_pinctrl_cells - handle legacy binding
@@ -1986,11 +1981,8 @@ static struct platform_driver pcs_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = pcs_of_match,
+ .pm = pm_sleep_ptr(&pinctrl_single_pm_ops),
},
-#ifdef CONFIG_PM
- .suspend = pinctrl_single_suspend,
- .resume = pinctrl_single_resume,
-#endif
};
module_platform_driver(pcs_driver);
diff --git a/drivers/pinctrl/pinctrl-tps6594.c b/drivers/pinctrl/pinctrl-tps6594.c
index 66985e54b74a..085047320853 100644
--- a/drivers/pinctrl/pinctrl-tps6594.c
+++ b/drivers/pinctrl/pinctrl-tps6594.c
@@ -14,8 +14,6 @@
#include <linux/mfd/tps6594.h>
-#define TPS6594_PINCTRL_PINS_NB 11
-
#define TPS6594_PINCTRL_GPIO_FUNCTION 0
#define TPS6594_PINCTRL_SCL_I2C2_CS_SPI_FUNCTION 1
#define TPS6594_PINCTRL_TRIG_WDOG_FUNCTION 1
@@ -40,17 +38,40 @@
#define TPS6594_PINCTRL_SYNCCLKOUT_FUNCTION_GPIO8 3
#define TPS6594_PINCTRL_CLK32KOUT_FUNCTION_GPIO9 3
+/* TPS65224 pin muxval */
+#define TPS65224_PINCTRL_SDA_I2C2_SDO_SPI_FUNCTION 1
+#define TPS65224_PINCTRL_SCL_I2C2_CS_SPI_FUNCTION 1
+#define TPS65224_PINCTRL_VMON1_FUNCTION 1
+#define TPS65224_PINCTRL_VMON2_FUNCTION 1
+#define TPS65224_PINCTRL_WKUP_FUNCTION 1
+#define TPS65224_PINCTRL_NSLEEP2_FUNCTION 2
+#define TPS65224_PINCTRL_NSLEEP1_FUNCTION 2
+#define TPS65224_PINCTRL_SYNCCLKIN_FUNCTION 2
+#define TPS65224_PINCTRL_NERR_MCU_FUNCTION 2
+#define TPS65224_PINCTRL_NINT_FUNCTION 3
+#define TPS65224_PINCTRL_TRIG_WDOG_FUNCTION 3
+#define TPS65224_PINCTRL_PB_FUNCTION 3
+#define TPS65224_PINCTRL_ADC_IN_FUNCTION 3
+
+/* TPS65224 Special muxval for recalcitrant pins */
+#define TPS65224_PINCTRL_NSLEEP2_FUNCTION_GPIO5 1
+#define TPS65224_PINCTRL_WKUP_FUNCTION_GPIO5 4
+#define TPS65224_PINCTRL_SYNCCLKIN_FUNCTION_GPIO5 3
+
#define TPS6594_OFFSET_GPIO_SEL 5
-#define FUNCTION(fname, v) \
+#define TPS65224_NGPIO_PER_REG 6
+#define TPS6594_NGPIO_PER_REG 8
+
+#define FUNCTION(dev_name, fname, v) \
{ \
.pinfunction = PINCTRL_PINFUNCTION(#fname, \
- tps6594_##fname##_func_group_names, \
- ARRAY_SIZE(tps6594_##fname##_func_group_names)),\
+ dev_name##_##fname##_func_group_names, \
+ ARRAY_SIZE(dev_name##_##fname##_func_group_names)),\
.muxval = v, \
}
-static const struct pinctrl_pin_desc tps6594_pins[TPS6594_PINCTRL_PINS_NB] = {
+static const struct pinctrl_pin_desc tps6594_pins[] = {
PINCTRL_PIN(0, "GPIO0"), PINCTRL_PIN(1, "GPIO1"),
PINCTRL_PIN(2, "GPIO2"), PINCTRL_PIN(3, "GPIO3"),
PINCTRL_PIN(4, "GPIO4"), PINCTRL_PIN(5, "GPIO5"),
@@ -143,30 +164,127 @@ static const char *const tps6594_syncclkin_func_group_names[] = {
"GPIO9",
};
+static const struct pinctrl_pin_desc tps65224_pins[] = {
+ PINCTRL_PIN(0, "GPIO0"), PINCTRL_PIN(1, "GPIO1"),
+ PINCTRL_PIN(2, "GPIO2"), PINCTRL_PIN(3, "GPIO3"),
+ PINCTRL_PIN(4, "GPIO4"), PINCTRL_PIN(5, "GPIO5"),
+};
+
+static const char *const tps65224_gpio_func_group_names[] = {
+ "GPIO0", "GPIO1", "GPIO2", "GPIO3", "GPIO4", "GPIO5",
+};
+
+static const char *const tps65224_sda_i2c2_sdo_spi_func_group_names[] = {
+ "GPIO0",
+};
+
+static const char *const tps65224_nsleep2_func_group_names[] = {
+ "GPIO0", "GPIO5",
+};
+
+static const char *const tps65224_nint_func_group_names[] = {
+ "GPIO0",
+};
+
+static const char *const tps65224_scl_i2c2_cs_spi_func_group_names[] = {
+ "GPIO1",
+};
+
+static const char *const tps65224_nsleep1_func_group_names[] = {
+ "GPIO1", "GPIO2", "GPIO3",
+};
+
+static const char *const tps65224_trig_wdog_func_group_names[] = {
+ "GPIO1",
+};
+
+static const char *const tps65224_vmon1_func_group_names[] = {
+ "GPIO2",
+};
+
+static const char *const tps65224_pb_func_group_names[] = {
+ "GPIO2",
+};
+
+static const char *const tps65224_vmon2_func_group_names[] = {
+ "GPIO3",
+};
+
+static const char *const tps65224_adc_in_func_group_names[] = {
+ "GPIO3", "GPIO4",
+};
+
+static const char *const tps65224_wkup_func_group_names[] = {
+ "GPIO4", "GPIO5",
+};
+
+static const char *const tps65224_syncclkin_func_group_names[] = {
+ "GPIO4", "GPIO5",
+};
+
+static const char *const tps65224_nerr_mcu_func_group_names[] = {
+ "GPIO5",
+};
+
struct tps6594_pinctrl_function {
struct pinfunction pinfunction;
u8 muxval;
};
+struct muxval_remap {
+ unsigned int group;
+ u8 muxval;
+ u8 remap;
+};
+
+struct muxval_remap tps65224_muxval_remap[] = {
+ {5, TPS6594_PINCTRL_DISABLE_WDOG_FUNCTION, TPS65224_PINCTRL_WKUP_FUNCTION_GPIO5},
+ {5, TPS65224_PINCTRL_SYNCCLKIN_FUNCTION, TPS65224_PINCTRL_SYNCCLKIN_FUNCTION_GPIO5},
+ {5, TPS65224_PINCTRL_NSLEEP2_FUNCTION, TPS65224_PINCTRL_NSLEEP2_FUNCTION_GPIO5},
+};
+
+struct muxval_remap tps6594_muxval_remap[] = {
+ {8, TPS6594_PINCTRL_DISABLE_WDOG_FUNCTION, TPS6594_PINCTRL_DISABLE_WDOG_FUNCTION_GPIO8},
+ {8, TPS6594_PINCTRL_SYNCCLKOUT_FUNCTION, TPS6594_PINCTRL_SYNCCLKOUT_FUNCTION_GPIO8},
+ {9, TPS6594_PINCTRL_CLK32KOUT_FUNCTION, TPS6594_PINCTRL_CLK32KOUT_FUNCTION_GPIO9},
+};
+
static const struct tps6594_pinctrl_function pinctrl_functions[] = {
- FUNCTION(gpio, TPS6594_PINCTRL_GPIO_FUNCTION),
- FUNCTION(nsleep1, TPS6594_PINCTRL_NSLEEP1_FUNCTION),
- FUNCTION(nsleep2, TPS6594_PINCTRL_NSLEEP2_FUNCTION),
- FUNCTION(wkup1, TPS6594_PINCTRL_WKUP1_FUNCTION),
- FUNCTION(wkup2, TPS6594_PINCTRL_WKUP2_FUNCTION),
- FUNCTION(scl_i2c2_cs_spi, TPS6594_PINCTRL_SCL_I2C2_CS_SPI_FUNCTION),
- FUNCTION(nrstout_soc, TPS6594_PINCTRL_NRSTOUT_SOC_FUNCTION),
- FUNCTION(trig_wdog, TPS6594_PINCTRL_TRIG_WDOG_FUNCTION),
- FUNCTION(sda_i2c2_sdo_spi, TPS6594_PINCTRL_SDA_I2C2_SDO_SPI_FUNCTION),
- FUNCTION(clk32kout, TPS6594_PINCTRL_CLK32KOUT_FUNCTION),
- FUNCTION(nerr_soc, TPS6594_PINCTRL_NERR_SOC_FUNCTION),
- FUNCTION(sclk_spmi, TPS6594_PINCTRL_SCLK_SPMI_FUNCTION),
- FUNCTION(sdata_spmi, TPS6594_PINCTRL_SDATA_SPMI_FUNCTION),
- FUNCTION(nerr_mcu, TPS6594_PINCTRL_NERR_MCU_FUNCTION),
- FUNCTION(syncclkout, TPS6594_PINCTRL_SYNCCLKOUT_FUNCTION),
- FUNCTION(disable_wdog, TPS6594_PINCTRL_DISABLE_WDOG_FUNCTION),
- FUNCTION(pdog, TPS6594_PINCTRL_PDOG_FUNCTION),
- FUNCTION(syncclkin, TPS6594_PINCTRL_SYNCCLKIN_FUNCTION),
+ FUNCTION(tps6594, gpio, TPS6594_PINCTRL_GPIO_FUNCTION),
+ FUNCTION(tps6594, nsleep1, TPS6594_PINCTRL_NSLEEP1_FUNCTION),
+ FUNCTION(tps6594, nsleep2, TPS6594_PINCTRL_NSLEEP2_FUNCTION),
+ FUNCTION(tps6594, wkup1, TPS6594_PINCTRL_WKUP1_FUNCTION),
+ FUNCTION(tps6594, wkup2, TPS6594_PINCTRL_WKUP2_FUNCTION),
+ FUNCTION(tps6594, scl_i2c2_cs_spi, TPS6594_PINCTRL_SCL_I2C2_CS_SPI_FUNCTION),
+ FUNCTION(tps6594, nrstout_soc, TPS6594_PINCTRL_NRSTOUT_SOC_FUNCTION),
+ FUNCTION(tps6594, trig_wdog, TPS6594_PINCTRL_TRIG_WDOG_FUNCTION),
+ FUNCTION(tps6594, sda_i2c2_sdo_spi, TPS6594_PINCTRL_SDA_I2C2_SDO_SPI_FUNCTION),
+ FUNCTION(tps6594, clk32kout, TPS6594_PINCTRL_CLK32KOUT_FUNCTION),
+ FUNCTION(tps6594, nerr_soc, TPS6594_PINCTRL_NERR_SOC_FUNCTION),
+ FUNCTION(tps6594, sclk_spmi, TPS6594_PINCTRL_SCLK_SPMI_FUNCTION),
+ FUNCTION(tps6594, sdata_spmi, TPS6594_PINCTRL_SDATA_SPMI_FUNCTION),
+ FUNCTION(tps6594, nerr_mcu, TPS6594_PINCTRL_NERR_MCU_FUNCTION),
+ FUNCTION(tps6594, syncclkout, TPS6594_PINCTRL_SYNCCLKOUT_FUNCTION),
+ FUNCTION(tps6594, disable_wdog, TPS6594_PINCTRL_DISABLE_WDOG_FUNCTION),
+ FUNCTION(tps6594, pdog, TPS6594_PINCTRL_PDOG_FUNCTION),
+ FUNCTION(tps6594, syncclkin, TPS6594_PINCTRL_SYNCCLKIN_FUNCTION),
+};
+
+static const struct tps6594_pinctrl_function tps65224_pinctrl_functions[] = {
+ FUNCTION(tps65224, gpio, TPS6594_PINCTRL_GPIO_FUNCTION),
+ FUNCTION(tps65224, sda_i2c2_sdo_spi, TPS65224_PINCTRL_SDA_I2C2_SDO_SPI_FUNCTION),
+ FUNCTION(tps65224, nsleep2, TPS65224_PINCTRL_NSLEEP2_FUNCTION),
+ FUNCTION(tps65224, nint, TPS65224_PINCTRL_NINT_FUNCTION),
+ FUNCTION(tps65224, scl_i2c2_cs_spi, TPS65224_PINCTRL_SCL_I2C2_CS_SPI_FUNCTION),
+ FUNCTION(tps65224, nsleep1, TPS65224_PINCTRL_NSLEEP1_FUNCTION),
+ FUNCTION(tps65224, trig_wdog, TPS65224_PINCTRL_TRIG_WDOG_FUNCTION),
+ FUNCTION(tps65224, vmon1, TPS65224_PINCTRL_VMON1_FUNCTION),
+ FUNCTION(tps65224, pb, TPS65224_PINCTRL_PB_FUNCTION),
+ FUNCTION(tps65224, vmon2, TPS65224_PINCTRL_VMON2_FUNCTION),
+ FUNCTION(tps65224, adc_in, TPS65224_PINCTRL_ADC_IN_FUNCTION),
+ FUNCTION(tps65224, wkup, TPS65224_PINCTRL_WKUP_FUNCTION),
+ FUNCTION(tps65224, syncclkin, TPS65224_PINCTRL_SYNCCLKIN_FUNCTION),
+ FUNCTION(tps65224, nerr_mcu, TPS65224_PINCTRL_NERR_MCU_FUNCTION),
};
struct tps6594_pinctrl {
@@ -175,6 +293,31 @@ struct tps6594_pinctrl {
struct pinctrl_dev *pctl_dev;
const struct tps6594_pinctrl_function *funcs;
const struct pinctrl_pin_desc *pins;
+ int func_cnt;
+ int num_pins;
+ u8 mux_sel_mask;
+ unsigned int remap_cnt;
+ struct muxval_remap *remap;
+};
+
+static struct tps6594_pinctrl tps65224_template_pinctrl = {
+ .funcs = tps65224_pinctrl_functions,
+ .func_cnt = ARRAY_SIZE(tps65224_pinctrl_functions),
+ .pins = tps65224_pins,
+ .num_pins = ARRAY_SIZE(tps65224_pins),
+ .mux_sel_mask = TPS65224_MASK_GPIO_SEL,
+ .remap = tps65224_muxval_remap,
+ .remap_cnt = ARRAY_SIZE(tps65224_muxval_remap),
+};
+
+static struct tps6594_pinctrl tps6594_template_pinctrl = {
+ .funcs = pinctrl_functions,
+ .func_cnt = ARRAY_SIZE(pinctrl_functions),
+ .pins = tps6594_pins,
+ .num_pins = ARRAY_SIZE(tps6594_pins),
+ .mux_sel_mask = TPS6594_MASK_GPIO_SEL,
+ .remap = tps6594_muxval_remap,
+ .remap_cnt = ARRAY_SIZE(tps6594_muxval_remap),
};
static int tps6594_gpio_regmap_xlate(struct gpio_regmap *gpio,
@@ -201,7 +344,9 @@ static int tps6594_gpio_regmap_xlate(struct gpio_regmap *gpio,
static int tps6594_pmx_func_cnt(struct pinctrl_dev *pctldev)
{
- return ARRAY_SIZE(pinctrl_functions);
+ struct tps6594_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pinctrl->func_cnt;
}
static const char *tps6594_pmx_func_name(struct pinctrl_dev *pctldev,
@@ -229,10 +374,16 @@ static int tps6594_pmx_set(struct tps6594_pinctrl *pinctrl, unsigned int pin,
u8 muxval)
{
u8 mux_sel_val = muxval << TPS6594_OFFSET_GPIO_SEL;
+ u8 mux_sel_mask = pinctrl->mux_sel_mask;
+
+ if (pinctrl->tps->chip_id == TPS65224 && pin == 5) {
+ /* GPIO6 has a different mask in TPS65224*/
+ mux_sel_mask = TPS65224_MASK_GPIO_SEL_GPIO6;
+ }
return regmap_update_bits(pinctrl->tps->regmap,
TPS6594_REG_GPIOX_CONF(pin),
- TPS6594_MASK_GPIO_SEL, mux_sel_val);
+ mux_sel_mask, mux_sel_val);
}
static int tps6594_pmx_set_mux(struct pinctrl_dev *pctldev,
@@ -240,16 +391,14 @@ static int tps6594_pmx_set_mux(struct pinctrl_dev *pctldev,
{
struct tps6594_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
u8 muxval = pinctrl->funcs[function].muxval;
-
- /* Some pins don't have the same muxval for the same function... */
- if (group == 8) {
- if (muxval == TPS6594_PINCTRL_DISABLE_WDOG_FUNCTION)
- muxval = TPS6594_PINCTRL_DISABLE_WDOG_FUNCTION_GPIO8;
- else if (muxval == TPS6594_PINCTRL_SYNCCLKOUT_FUNCTION)
- muxval = TPS6594_PINCTRL_SYNCCLKOUT_FUNCTION_GPIO8;
- } else if (group == 9) {
- if (muxval == TPS6594_PINCTRL_CLK32KOUT_FUNCTION)
- muxval = TPS6594_PINCTRL_CLK32KOUT_FUNCTION_GPIO9;
+ unsigned int remap_cnt = pinctrl->remap_cnt;
+ struct muxval_remap *remap = pinctrl->remap;
+
+ for (unsigned int i = 0; i < remap_cnt; i++) {
+ if (group == remap[i].group && muxval == remap[i].muxval) {
+ muxval = remap[i].remap;
+ break;
+ }
}
return tps6594_pmx_set(pinctrl, group, muxval);
@@ -276,7 +425,9 @@ static const struct pinmux_ops tps6594_pmx_ops = {
static int tps6594_groups_cnt(struct pinctrl_dev *pctldev)
{
- return ARRAY_SIZE(tps6594_pins);
+ struct tps6594_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pinctrl->num_pins;
}
static int tps6594_group_pins(struct pinctrl_dev *pctldev,
@@ -318,33 +469,54 @@ static int tps6594_pinctrl_probe(struct platform_device *pdev)
pctrl_desc = devm_kzalloc(dev, sizeof(*pctrl_desc), GFP_KERNEL);
if (!pctrl_desc)
return -ENOMEM;
- pctrl_desc->name = dev_name(dev);
- pctrl_desc->owner = THIS_MODULE;
- pctrl_desc->pins = tps6594_pins;
- pctrl_desc->npins = ARRAY_SIZE(tps6594_pins);
- pctrl_desc->pctlops = &tps6594_pctrl_ops;
- pctrl_desc->pmxops = &tps6594_pmx_ops;
pinctrl = devm_kzalloc(dev, sizeof(*pinctrl), GFP_KERNEL);
if (!pinctrl)
return -ENOMEM;
- pinctrl->tps = dev_get_drvdata(dev->parent);
- pinctrl->funcs = pinctrl_functions;
- pinctrl->pins = tps6594_pins;
- pinctrl->pctl_dev = devm_pinctrl_register(dev, pctrl_desc, pinctrl);
- if (IS_ERR(pinctrl->pctl_dev))
- return dev_err_probe(dev, PTR_ERR(pinctrl->pctl_dev),
- "Couldn't register pinctrl driver\n");
+
+ switch (tps->chip_id) {
+ case TPS65224:
+ pctrl_desc->pins = tps65224_pins;
+ pctrl_desc->npins = ARRAY_SIZE(tps65224_pins);
+
+ *pinctrl = tps65224_template_pinctrl;
+
+ config.ngpio = ARRAY_SIZE(tps65224_gpio_func_group_names);
+ config.ngpio_per_reg = TPS65224_NGPIO_PER_REG;
+ break;
+ case TPS6593:
+ case TPS6594:
+ pctrl_desc->pins = tps6594_pins;
+ pctrl_desc->npins = ARRAY_SIZE(tps6594_pins);
+
+ *pinctrl = tps6594_template_pinctrl;
+
+ config.ngpio = ARRAY_SIZE(tps6594_gpio_func_group_names);
+ config.ngpio_per_reg = TPS6594_NGPIO_PER_REG;
+ break;
+ default:
+ break;
+ }
+
+ pinctrl->tps = tps;
+
+ pctrl_desc->name = dev_name(dev);
+ pctrl_desc->owner = THIS_MODULE;
+ pctrl_desc->pctlops = &tps6594_pctrl_ops;
+ pctrl_desc->pmxops = &tps6594_pmx_ops;
config.parent = tps->dev;
config.regmap = tps->regmap;
- config.ngpio = TPS6594_PINCTRL_PINS_NB;
- config.ngpio_per_reg = 8;
config.reg_dat_base = TPS6594_REG_GPIO_IN_1;
config.reg_set_base = TPS6594_REG_GPIO_OUT_1;
config.reg_dir_out_base = TPS6594_REG_GPIOX_CONF(0);
config.reg_mask_xlate = tps6594_gpio_regmap_xlate;
+ pinctrl->pctl_dev = devm_pinctrl_register(dev, pctrl_desc, pinctrl);
+ if (IS_ERR(pinctrl->pctl_dev))
+ return dev_err_probe(dev, PTR_ERR(pinctrl->pctl_dev),
+ "Couldn't register pinctrl driver\n");
+
pinctrl->gpio_regmap = devm_gpio_regmap_register(dev, &config);
if (IS_ERR(pinctrl->gpio_regmap))
return dev_err_probe(dev, PTR_ERR(pinctrl->gpio_regmap),
@@ -369,5 +541,6 @@ static struct platform_driver tps6594_pinctrl_driver = {
module_platform_driver(tps6594_pinctrl_driver);
MODULE_AUTHOR("Esteban Blanc <eblanc@baylibre.com>");
+MODULE_AUTHOR("Nirmala Devi Mal Nadar <m.nirmaladevi@ltts.com>");
MODULE_DESCRIPTION("TPS6594 pinctrl and GPIO driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index d924207d629b..addba55334d9 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -578,6 +578,7 @@ static int pinmux_functions_show(struct seq_file *s, void *what)
return 0;
}
+DEFINE_SHOW_ATTRIBUTE(pinmux_functions);
static int pinmux_pins_show(struct seq_file *s, void *what)
{
@@ -650,6 +651,7 @@ static int pinmux_pins_show(struct seq_file *s, void *what)
return 0;
}
+DEFINE_SHOW_ATTRIBUTE(pinmux_pins);
void pinmux_show_map(struct seq_file *s, const struct pinctrl_map *map)
{
@@ -672,10 +674,12 @@ void pinmux_show_setting(struct seq_file *s,
setting->data.mux.func);
}
-DEFINE_SHOW_ATTRIBUTE(pinmux_functions);
-DEFINE_SHOW_ATTRIBUTE(pinmux_pins);
+static int pinmux_select_show(struct seq_file *s, void *unused)
+{
+ return -EPERM;
+}
-static ssize_t pinmux_select(struct file *file, const char __user *user_buf,
+static ssize_t pinmux_select_write(struct file *file, const char __user *user_buf,
size_t len, loff_t *ppos)
{
struct seq_file *sfile = file->private_data;
@@ -749,19 +753,7 @@ exit_free_buf:
return ret;
}
-
-static int pinmux_select_open(struct inode *inode, struct file *file)
-{
- return single_open(file, NULL, inode->i_private);
-}
-
-static const struct file_operations pinmux_select_ops = {
- .owner = THIS_MODULE,
- .open = pinmux_select_open,
- .write = pinmux_select,
- .llseek = no_llseek,
- .release = single_release,
-};
+DEFINE_SHOW_STORE_ATTRIBUTE(pinmux_select);
void pinmux_init_device_debugfs(struct dentry *devroot,
struct pinctrl_dev *pctldev)
@@ -771,7 +763,7 @@ void pinmux_init_device_debugfs(struct dentry *devroot,
debugfs_create_file("pinmux-pins", 0444,
devroot, pctldev, &pinmux_pins_fops);
debugfs_create_file("pinmux-select", 0200,
- devroot, pctldev, &pinmux_select_ops);
+ devroot, pctldev, &pinmux_select_fops);
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
index d2568dab8c78..9e34b92ff5f2 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
@@ -32,7 +32,7 @@ static const char *pxa2xx_pctrl_get_group_name(struct pinctrl_dev *pctldev,
unsigned tgroup)
{
struct pxa_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
- struct pxa_pinctrl_group *group = pctl->groups + tgroup;
+ struct pingroup *group = pctl->groups + tgroup;
return group->name;
}
@@ -43,10 +43,10 @@ static int pxa2xx_pctrl_get_group_pins(struct pinctrl_dev *pctldev,
unsigned *num_pins)
{
struct pxa_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
- struct pxa_pinctrl_group *group = pctl->groups + tgroup;
+ struct pingroup *group = pctl->groups + tgroup;
- *pins = (unsigned *)&group->pin;
- *num_pins = 1;
+ *pins = group->pins;
+ *num_pins = group->npins;
return 0;
}
@@ -109,7 +109,7 @@ static const char *pxa2xx_pmx_get_func_name(struct pinctrl_dev *pctldev,
unsigned function)
{
struct pxa_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
- struct pxa_pinctrl_function *pf = pctl->functions + function;
+ struct pinfunction *pf = pctl->functions + function;
return pf->name;
}
@@ -127,7 +127,7 @@ static int pxa2xx_pmx_get_func_groups(struct pinctrl_dev *pctldev,
unsigned * const num_groups)
{
struct pxa_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
- struct pxa_pinctrl_function *pf = pctl->functions + function;
+ struct pinfunction *pf = pctl->functions + function;
*groups = pf->groups;
*num_groups = pf->ngroups;
@@ -139,20 +139,18 @@ static int pxa2xx_pmx_set_mux(struct pinctrl_dev *pctldev, unsigned function,
unsigned tgroup)
{
struct pxa_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
- struct pxa_pinctrl_group *group = pctl->groups + tgroup;
+ struct pingroup *g = pctl->groups + tgroup;
+ unsigned int pin = g->pins[0];
struct pxa_desc_function *df;
- int pin, shift;
unsigned long flags;
void __iomem *gafr, *gpdr;
+ int shift;
u32 val;
-
- df = pxa_desc_by_func_group(pctl, group->name,
- (pctl->functions + function)->name);
+ df = pxa_desc_by_func_group(pctl, g->name, (pctl->functions + function)->name);
if (!df)
return -EINVAL;
- pin = group->pin;
gafr = pctl->base_gafr[pin / 16];
gpdr = pctl->base_gpdr[pin / 32];
shift = (pin % 16) << 1;
@@ -186,9 +184,9 @@ static int pxa2xx_pconf_group_get(struct pinctrl_dev *pctldev,
unsigned long *config)
{
struct pxa_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
- struct pxa_pinctrl_group *g = pctl->groups + group;
+ struct pingroup *g = pctl->groups + group;
+ unsigned int pin = g->pins[0];
unsigned long flags;
- unsigned pin = g->pin;
void __iomem *pgsr = pctl->base_pgsr[pin / 32];
u32 val;
@@ -208,9 +206,9 @@ static int pxa2xx_pconf_group_set(struct pinctrl_dev *pctldev,
unsigned num_configs)
{
struct pxa_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
- struct pxa_pinctrl_group *g = pctl->groups + group;
+ struct pingroup *g = pctl->groups + group;
+ unsigned int pin = g->pins[0];
unsigned long flags;
- unsigned pin = g->pin;
void __iomem *pgsr = pctl->base_pgsr[pin / 32];
int i, is_set = 0;
u32 val;
@@ -249,11 +247,11 @@ static struct pinctrl_desc pxa2xx_pinctrl_desc = {
.pmxops = &pxa2xx_pinmux_ops,
};
-static const struct pxa_pinctrl_function *
-pxa2xx_find_function(struct pxa_pinctrl *pctl, const char *fname,
- const struct pxa_pinctrl_function *functions)
+static const struct pinfunction *pxa2xx_find_function(struct pxa_pinctrl *pctl,
+ const char *fname,
+ const struct pinfunction *functions)
{
- const struct pxa_pinctrl_function *func;
+ const struct pinfunction *func;
for (func = functions; func->name; func++)
if (!strcmp(fname, func->name))
@@ -264,8 +262,8 @@ pxa2xx_find_function(struct pxa_pinctrl *pctl, const char *fname,
static int pxa2xx_build_functions(struct pxa_pinctrl *pctl)
{
+ struct pinfunction *functions;
int i;
- struct pxa_pinctrl_function *functions;
struct pxa_desc_function *df;
/*
@@ -296,9 +294,9 @@ static int pxa2xx_build_functions(struct pxa_pinctrl *pctl)
static int pxa2xx_build_groups(struct pxa_pinctrl *pctl)
{
int i, j, ngroups;
- struct pxa_pinctrl_function *func;
struct pxa_desc_function *df;
- char **gtmp;
+ struct pinfunction *func;
+ const char **gtmp;
gtmp = devm_kmalloc_array(pctl->dev, pctl->npins, sizeof(*gtmp),
GFP_KERNEL);
@@ -316,13 +314,9 @@ static int pxa2xx_build_groups(struct pxa_pinctrl *pctl)
pctl->ppins[j].pin.name;
func = pctl->functions + i;
func->ngroups = ngroups;
- func->groups =
- devm_kmalloc_array(pctl->dev, ngroups,
- sizeof(char *), GFP_KERNEL);
+ func->groups = devm_kmemdup(pctl->dev, gtmp, ngroups * sizeof(*gtmp), GFP_KERNEL);
if (!func->groups)
return -ENOMEM;
-
- memcpy(func->groups, gtmp, ngroups * sizeof(*gtmp));
}
devm_kfree(pctl->dev, gtmp);
@@ -332,8 +326,8 @@ static int pxa2xx_build_groups(struct pxa_pinctrl *pctl)
static int pxa2xx_build_state(struct pxa_pinctrl *pctl,
const struct pxa_desc_pin *ppins, int npins)
{
- struct pxa_pinctrl_group *group;
struct pinctrl_pin_desc *pins;
+ struct pingroup *group;
int ret, i;
pctl->npins = npins;
@@ -357,7 +351,8 @@ static int pxa2xx_build_state(struct pxa_pinctrl *pctl,
for (i = 0; i < npins; i++) {
group = pctl->groups + i;
group->name = ppins[i].pin.name;
- group->pin = ppins[i].pin.number;
+ group->pins = &ppins[i].pin.number;
+ group->npins = 1;
}
ret = pxa2xx_build_functions(pctl);
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.h b/drivers/pinctrl/pxa/pinctrl-pxa2xx.h
index d86d47dbbc94..b292b79efdf8 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa2xx.h
+++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.h
@@ -52,17 +52,6 @@ struct pxa_desc_pin {
struct pxa_desc_function *functions;
};
-struct pxa_pinctrl_group {
- const char *name;
- unsigned pin;
-};
-
-struct pxa_pinctrl_function {
- const char *name;
- const char **groups;
- unsigned ngroups;
-};
-
struct pxa_pinctrl {
spinlock_t lock;
void __iomem **base_gafr;
@@ -74,9 +63,9 @@ struct pxa_pinctrl {
unsigned npins;
const struct pxa_desc_pin *ppins;
unsigned ngroups;
- struct pxa_pinctrl_group *groups;
+ struct pingroup *groups;
unsigned nfuncs;
- struct pxa_pinctrl_function *functions;
+ struct pinfunction *functions;
char *name;
};
diff --git a/drivers/pinctrl/qcom/pinctrl-sm7150.c b/drivers/pinctrl/qcom/pinctrl-sm7150.c
index c25357ca1963..095a1ca75849 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm7150.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm7150.c
@@ -65,7 +65,7 @@ enum {
.intr_detection_width = 2, \
}
-#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+#define SDC_QDSD_PINGROUP(pg_name, _tile, ctl, pull, drv) \
{ \
.grp = PINCTRL_PINGROUP(#pg_name, \
pg_name##_pins, \
@@ -75,7 +75,7 @@ enum {
.intr_cfg_reg = 0, \
.intr_status_reg = 0, \
.intr_target_reg = 0, \
- .tile = SOUTH, \
+ .tile = _tile, \
.mux_bit = -1, \
.pull_bit = pull, \
.drv_bit = drv, \
@@ -101,7 +101,7 @@ enum {
.intr_cfg_reg = 0, \
.intr_status_reg = 0, \
.intr_target_reg = 0, \
- .tile = SOUTH, \
+ .tile = WEST, \
.mux_bit = -1, \
.pull_bit = 3, \
.drv_bit = 0, \
@@ -1199,13 +1199,13 @@ static const struct msm_pingroup sm7150_groups[] = {
[117] = PINGROUP(117, NORTH, _, _, _, _, _, _, _, _, _),
[118] = PINGROUP(118, NORTH, _, _, _, _, _, _, _, _, _),
[119] = UFS_RESET(ufs_reset, 0x9f000),
- [120] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x9a000, 15, 0),
- [121] = SDC_QDSD_PINGROUP(sdc1_clk, 0x9a000, 13, 6),
- [122] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x9a000, 11, 3),
- [123] = SDC_QDSD_PINGROUP(sdc1_data, 0x9a000, 9, 0),
- [124] = SDC_QDSD_PINGROUP(sdc2_clk, 0x98000, 14, 6),
- [125] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x98000, 11, 3),
- [126] = SDC_QDSD_PINGROUP(sdc2_data, 0x98000, 9, 0),
+ [120] = SDC_QDSD_PINGROUP(sdc1_rclk, WEST, 0x9a000, 15, 0),
+ [121] = SDC_QDSD_PINGROUP(sdc1_clk, WEST, 0x9a000, 13, 6),
+ [122] = SDC_QDSD_PINGROUP(sdc1_cmd, WEST, 0x9a000, 11, 3),
+ [123] = SDC_QDSD_PINGROUP(sdc1_data, WEST, 0x9a000, 9, 0),
+ [124] = SDC_QDSD_PINGROUP(sdc2_clk, SOUTH, 0x98000, 14, 6),
+ [125] = SDC_QDSD_PINGROUP(sdc2_cmd, SOUTH, 0x98000, 11, 3),
+ [126] = SDC_QDSD_PINGROUP(sdc2_data, SOUTH, 0x98000, 9, 0),
};
static const struct msm_gpio_wakeirq_map sm7150_pdc_map[] = {
@@ -1246,6 +1246,7 @@ static const struct of_device_id sm7150_tlmm_of_match[] = {
{ .compatible = "qcom,sm7150-tlmm", },
{ },
};
+MODULE_DEVICE_TABLE(of, sm7150_tlmm_of_match);
static struct platform_driver sm7150_tlmm_driver = {
.driver = {
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index f4e2c88a7c82..4e80c7204e5f 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1202,6 +1202,7 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm6150-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pm6150l-gpio", .data = (void *) 12 },
{ .compatible = "qcom,pm6350-gpio", .data = (void *) 9 },
+ { .compatible = "qcom,pm6450-gpio", .data = (void *) 9 },
{ .compatible = "qcom,pm7250b-gpio", .data = (void *) 12 },
{ .compatible = "qcom,pm7325-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pm7550ba-gpio", .data = (void *) 8},
@@ -1234,10 +1235,12 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm8994-gpio", .data = (void *) 22 },
{ .compatible = "qcom,pm8998-gpio", .data = (void *) 26 },
{ .compatible = "qcom,pma8084-gpio", .data = (void *) 22 },
+ { .compatible = "qcom,pmd8028-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pmi632-gpio", .data = (void *) 8 },
{ .compatible = "qcom,pmi8950-gpio", .data = (void *) 2 },
{ .compatible = "qcom,pmi8994-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pmi8998-gpio", .data = (void *) 14 },
+ { .compatible = "qcom,pmih0108-gpio", .data = (void *) 18 },
{ .compatible = "qcom,pmk8350-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pmk8550-gpio", .data = (void *) 6 },
{ .compatible = "qcom,pmm8155au-gpio", .data = (void *) 10 },
@@ -1253,6 +1256,7 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pmx55-gpio", .data = (void *) 11 },
{ .compatible = "qcom,pmx65-gpio", .data = (void *) 16 },
{ .compatible = "qcom,pmx75-gpio", .data = (void *) 16 },
+ { .compatible = "qcom,pmxr2230-gpio", .data = (void *) 12 },
{ },
};
diff --git a/drivers/pinctrl/realtek/pinctrl-rtd1315e.c b/drivers/pinctrl/realtek/pinctrl-rtd1315e.c
index 10afc736a52b..86f919122bed 100644
--- a/drivers/pinctrl/realtek/pinctrl-rtd1315e.c
+++ b/drivers/pinctrl/realtek/pinctrl-rtd1315e.c
@@ -1414,6 +1414,7 @@ static const struct of_device_id rtd1315e_pinctrl_of_match[] = {
{ .compatible = "realtek,rtd1315e-pinctrl", },
{},
};
+MODULE_DEVICE_TABLE(of, rtd1315e_pinctrl_of_match);
static struct platform_driver rtd1315e_pinctrl_driver = {
.driver = {
diff --git a/drivers/pinctrl/realtek/pinctrl-rtd1319d.c b/drivers/pinctrl/realtek/pinctrl-rtd1319d.c
index b1a654ac30dc..474c337d2d05 100644
--- a/drivers/pinctrl/realtek/pinctrl-rtd1319d.c
+++ b/drivers/pinctrl/realtek/pinctrl-rtd1319d.c
@@ -1584,6 +1584,7 @@ static const struct of_device_id rtd1319d_pinctrl_of_match[] = {
{ .compatible = "realtek,rtd1319d-pinctrl", },
{},
};
+MODULE_DEVICE_TABLE(of, rtd1319d_pinctrl_of_match);
static struct platform_driver rtd1319d_pinctrl_driver = {
.driver = {
diff --git a/drivers/pinctrl/renesas/pfc-r8a779h0.c b/drivers/pinctrl/renesas/pfc-r8a779h0.c
index afa8f06c85cf..438d1f2739dd 100644
--- a/drivers/pinctrl/renesas/pfc-r8a779h0.c
+++ b/drivers/pinctrl/renesas/pfc-r8a779h0.c
@@ -75,10 +75,10 @@
#define GPSR0_9 F_(MSIOF5_SYNC, IP1SR0_7_4)
#define GPSR0_8 F_(MSIOF5_SS1, IP1SR0_3_0)
#define GPSR0_7 F_(MSIOF5_SS2, IP0SR0_31_28)
-#define GPSR0_6 F_(IRQ0, IP0SR0_27_24)
-#define GPSR0_5 F_(IRQ1, IP0SR0_23_20)
-#define GPSR0_4 F_(IRQ2, IP0SR0_19_16)
-#define GPSR0_3 F_(IRQ3, IP0SR0_15_12)
+#define GPSR0_6 F_(IRQ0_A, IP0SR0_27_24)
+#define GPSR0_5 F_(IRQ1_A, IP0SR0_23_20)
+#define GPSR0_4 F_(IRQ2_A, IP0SR0_19_16)
+#define GPSR0_3 F_(IRQ3_A, IP0SR0_15_12)
#define GPSR0_2 F_(GP0_02, IP0SR0_11_8)
#define GPSR0_1 F_(GP0_01, IP0SR0_7_4)
#define GPSR0_0 F_(GP0_00, IP0SR0_3_0)
@@ -265,10 +265,10 @@
#define IP0SR0_3_0 F_(0, 0) FM(ERROROUTC_N_B) FM(TCLK2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0SR0_7_4 F_(0, 0) FM(MSIOF3_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0SR0_11_8 F_(0, 0) FM(MSIOF3_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR0_15_12 FM(IRQ3) FM(MSIOF3_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR0_19_16 FM(IRQ2) FM(MSIOF3_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR0_23_20 FM(IRQ1) FM(MSIOF3_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR0_27_24 FM(IRQ0) FM(MSIOF3_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_15_12 FM(IRQ3_A) FM(MSIOF3_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_19_16 FM(IRQ2_A) FM(MSIOF3_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_23_20 FM(IRQ1_A) FM(MSIOF3_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_27_24 FM(IRQ0_A) FM(MSIOF3_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0SR0_31_28 FM(MSIOF5_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
/* IP1SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
@@ -672,16 +672,16 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP0SR0_11_8, MSIOF3_SS2),
- PINMUX_IPSR_GPSR(IP0SR0_15_12, IRQ3),
+ PINMUX_IPSR_GPSR(IP0SR0_15_12, IRQ3_A),
PINMUX_IPSR_GPSR(IP0SR0_15_12, MSIOF3_SCK),
- PINMUX_IPSR_GPSR(IP0SR0_19_16, IRQ2),
+ PINMUX_IPSR_GPSR(IP0SR0_19_16, IRQ2_A),
PINMUX_IPSR_GPSR(IP0SR0_19_16, MSIOF3_TXD),
- PINMUX_IPSR_GPSR(IP0SR0_23_20, IRQ1),
+ PINMUX_IPSR_GPSR(IP0SR0_23_20, IRQ1_A),
PINMUX_IPSR_GPSR(IP0SR0_23_20, MSIOF3_RXD),
- PINMUX_IPSR_GPSR(IP0SR0_27_24, IRQ0),
+ PINMUX_IPSR_GPSR(IP0SR0_27_24, IRQ0_A),
PINMUX_IPSR_GPSR(IP0SR0_27_24, MSIOF3_SYNC),
PINMUX_IPSR_GPSR(IP0SR0_31_28, MSIOF5_SS2),
@@ -1660,6 +1660,90 @@ static const unsigned int i2c3_mux[] = {
SDA3_MARK, SCL3_MARK,
};
+/* - INTC-EX ---------------------------------------------------------------- */
+static const unsigned int intc_ex_irq0_a_pins[] = {
+ /* IRQ0_A */
+ RCAR_GP_PIN(0, 6),
+};
+static const unsigned int intc_ex_irq0_a_mux[] = {
+ IRQ0_A_MARK,
+};
+static const unsigned int intc_ex_irq0_b_pins[] = {
+ /* IRQ0_B */
+ RCAR_GP_PIN(1, 20),
+};
+static const unsigned int intc_ex_irq0_b_mux[] = {
+ IRQ0_B_MARK,
+};
+
+static const unsigned int intc_ex_irq1_a_pins[] = {
+ /* IRQ1_A */
+ RCAR_GP_PIN(0, 5),
+};
+static const unsigned int intc_ex_irq1_a_mux[] = {
+ IRQ1_A_MARK,
+};
+static const unsigned int intc_ex_irq1_b_pins[] = {
+ /* IRQ1_B */
+ RCAR_GP_PIN(1, 21),
+};
+static const unsigned int intc_ex_irq1_b_mux[] = {
+ IRQ1_B_MARK,
+};
+
+static const unsigned int intc_ex_irq2_a_pins[] = {
+ /* IRQ2_A */
+ RCAR_GP_PIN(0, 4),
+};
+static const unsigned int intc_ex_irq2_a_mux[] = {
+ IRQ2_A_MARK,
+};
+static const unsigned int intc_ex_irq2_b_pins[] = {
+ /* IRQ2_B */
+ RCAR_GP_PIN(0, 13),
+};
+static const unsigned int intc_ex_irq2_b_mux[] = {
+ IRQ2_B_MARK,
+};
+
+static const unsigned int intc_ex_irq3_a_pins[] = {
+ /* IRQ3_A */
+ RCAR_GP_PIN(0, 3),
+};
+static const unsigned int intc_ex_irq3_a_mux[] = {
+ IRQ3_A_MARK,
+};
+static const unsigned int intc_ex_irq3_b_pins[] = {
+ /* IRQ3_B */
+ RCAR_GP_PIN(1, 23),
+};
+static const unsigned int intc_ex_irq3_b_mux[] = {
+ IRQ3_B_MARK,
+};
+
+static const unsigned int intc_ex_irq4_a_pins[] = {
+ /* IRQ4_A */
+ RCAR_GP_PIN(1, 17),
+};
+static const unsigned int intc_ex_irq4_a_mux[] = {
+ IRQ4_A_MARK,
+};
+static const unsigned int intc_ex_irq4_b_pins[] = {
+ /* IRQ4_B */
+ RCAR_GP_PIN(2, 3),
+};
+static const unsigned int intc_ex_irq4_b_mux[] = {
+ IRQ4_B_MARK,
+};
+
+static const unsigned int intc_ex_irq5_pins[] = {
+ /* IRQ5 */
+ RCAR_GP_PIN(2, 2),
+};
+static const unsigned int intc_ex_irq5_mux[] = {
+ IRQ5_MARK,
+};
+
/* - MMC -------------------------------------------------------------------- */
static const unsigned int mmc_data_pins[] = {
/* MMC_SD_D[0:3], MMC_D[4:7] */
@@ -2416,6 +2500,18 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(i2c2),
SH_PFC_PIN_GROUP(i2c3),
+ SH_PFC_PIN_GROUP(intc_ex_irq0_a),
+ SH_PFC_PIN_GROUP(intc_ex_irq0_b),
+ SH_PFC_PIN_GROUP(intc_ex_irq1_a),
+ SH_PFC_PIN_GROUP(intc_ex_irq1_b),
+ SH_PFC_PIN_GROUP(intc_ex_irq2_a),
+ SH_PFC_PIN_GROUP(intc_ex_irq2_b),
+ SH_PFC_PIN_GROUP(intc_ex_irq3_a),
+ SH_PFC_PIN_GROUP(intc_ex_irq3_b),
+ SH_PFC_PIN_GROUP(intc_ex_irq4_a),
+ SH_PFC_PIN_GROUP(intc_ex_irq4_b),
+ SH_PFC_PIN_GROUP(intc_ex_irq5),
+
BUS_DATA_PIN_GROUP(mmc_data, 1),
BUS_DATA_PIN_GROUP(mmc_data, 4),
BUS_DATA_PIN_GROUP(mmc_data, 8),
@@ -2629,6 +2725,20 @@ static const char * const i2c3_groups[] = {
"i2c3",
};
+static const char * const intc_ex_groups[] = {
+ "intc_ex_irq0_a",
+ "intc_ex_irq0_b",
+ "intc_ex_irq1_a",
+ "intc_ex_irq1_b",
+ "intc_ex_irq2_a",
+ "intc_ex_irq2_b",
+ "intc_ex_irq3_a",
+ "intc_ex_irq3_b",
+ "intc_ex_irq4_a",
+ "intc_ex_irq4_b",
+ "intc_ex_irq5",
+};
+
static const char * const mmc_groups[] = {
"mmc_data1",
"mmc_data4",
@@ -2813,6 +2923,8 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(i2c2),
SH_PFC_FUNCTION(i2c3),
+ SH_PFC_FUNCTION(intc_ex),
+
SH_PFC_FUNCTION(mmc),
SH_PFC_FUNCTION(msiof0),
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index eb5a8c654260..c3256bfde502 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -892,6 +892,8 @@ static int rzg2l_set_power_source(struct rzg2l_pinctrl *pctrl, u32 pin, u32 caps
val = PVDD_1800;
break;
case 2500:
+ if (!(caps & (PIN_CFG_IO_VMC_ETH0 | PIN_CFG_IO_VMC_ETH1)))
+ return -EINVAL;
val = PVDD_2500;
break;
case 3300:
@@ -2045,7 +2047,9 @@ static void rzg2l_gpio_irq_restore(struct rzg2l_pinctrl *pctrl)
for (unsigned int i = 0; i < RZG2L_TINT_MAX_INTERRUPT; i++) {
struct irq_data *data;
+ unsigned long flags;
unsigned int virq;
+ int ret;
if (!pctrl->hwirq[i])
continue;
@@ -2063,8 +2067,18 @@ static void rzg2l_gpio_irq_restore(struct rzg2l_pinctrl *pctrl)
continue;
}
- if (!irqd_irq_disabled(data))
+ /*
+ * This has to be atomically executed to protect against a concurrent
+ * interrupt.
+ */
+ raw_spin_lock_irqsave(&pctrl->lock.rlock, flags);
+ ret = rzg2l_gpio_irq_set_type(data, irqd_get_trigger_type(data));
+ if (!ret && !irqd_irq_disabled(data))
rzg2l_gpio_irq_enable(data);
+ raw_spin_unlock_irqrestore(&pctrl->lock.rlock, flags);
+
+ if (ret)
+ dev_crit(pctrl->dev, "Failed to set IRQ type for virq=%u\n", virq);
}
}
@@ -2502,7 +2516,7 @@ static void rzg2l_pinctrl_pm_setup_dedicated_regs(struct rzg2l_pinctrl *pctrl, b
}
}
-static void rzg2l_pinctrl_pm_setup_pfc(struct rzg2l_pinctrl *pctrl)
+static void rzg2l_pinctrl_pm_setup_pfc(struct rzg2l_pinctrl *pctrl)
{
u32 nports = pctrl->data->n_port_pins / RZG2L_PINS_PER_PORT;
const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index 871c1eb46ddf..ce5e6783b5b9 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -13,6 +13,7 @@
// the Samsung pinctrl/gpiolib driver. It also includes the implementation of
// external gpio and wakeup interrupt support.
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
@@ -61,6 +62,12 @@ static void exynos_irq_mask(struct irq_data *irqd)
else
reg_mask = our_chip->eint_mask + bank->eint_offset;
+ if (clk_enable(bank->drvdata->pclk)) {
+ dev_err(bank->gpio_chip.parent,
+ "unable to enable clock for masking IRQ\n");
+ return;
+ }
+
raw_spin_lock_irqsave(&bank->slock, flags);
mask = readl(bank->eint_base + reg_mask);
@@ -68,6 +75,8 @@ static void exynos_irq_mask(struct irq_data *irqd)
writel(mask, bank->eint_base + reg_mask);
raw_spin_unlock_irqrestore(&bank->slock, flags);
+
+ clk_disable(bank->drvdata->pclk);
}
static void exynos_irq_ack(struct irq_data *irqd)
@@ -82,7 +91,15 @@ static void exynos_irq_ack(struct irq_data *irqd)
else
reg_pend = our_chip->eint_pend + bank->eint_offset;
+ if (clk_enable(bank->drvdata->pclk)) {
+ dev_err(bank->gpio_chip.parent,
+ "unable to enable clock to ack IRQ\n");
+ return;
+ }
+
writel(1 << irqd->hwirq, bank->eint_base + reg_pend);
+
+ clk_disable(bank->drvdata->pclk);
}
static void exynos_irq_unmask(struct irq_data *irqd)
@@ -110,6 +127,12 @@ static void exynos_irq_unmask(struct irq_data *irqd)
else
reg_mask = our_chip->eint_mask + bank->eint_offset;
+ if (clk_enable(bank->drvdata->pclk)) {
+ dev_err(bank->gpio_chip.parent,
+ "unable to enable clock for unmasking IRQ\n");
+ return;
+ }
+
raw_spin_lock_irqsave(&bank->slock, flags);
mask = readl(bank->eint_base + reg_mask);
@@ -117,6 +140,8 @@ static void exynos_irq_unmask(struct irq_data *irqd)
writel(mask, bank->eint_base + reg_mask);
raw_spin_unlock_irqrestore(&bank->slock, flags);
+
+ clk_disable(bank->drvdata->pclk);
}
static int exynos_irq_set_type(struct irq_data *irqd, unsigned int type)
@@ -127,6 +152,7 @@ static int exynos_irq_set_type(struct irq_data *irqd, unsigned int type)
unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq;
unsigned int con, trig_type;
unsigned long reg_con;
+ int ret;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
@@ -159,11 +185,20 @@ static int exynos_irq_set_type(struct irq_data *irqd, unsigned int type)
else
reg_con = our_chip->eint_con + bank->eint_offset;
+ ret = clk_enable(bank->drvdata->pclk);
+ if (ret) {
+ dev_err(bank->gpio_chip.parent,
+ "unable to enable clock for configuring IRQ type\n");
+ return ret;
+ }
+
con = readl(bank->eint_base + reg_con);
con &= ~(EXYNOS_EINT_CON_MASK << shift);
con |= trig_type << shift;
writel(con, bank->eint_base + reg_con);
+ clk_disable(bank->drvdata->pclk);
+
return 0;
}
@@ -200,6 +235,14 @@ static int exynos_irq_request_resources(struct irq_data *irqd)
shift = irqd->hwirq * bank_type->fld_width[PINCFG_TYPE_FUNC];
mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1;
+ ret = clk_enable(bank->drvdata->pclk);
+ if (ret) {
+ dev_err(bank->gpio_chip.parent,
+ "unable to enable clock for configuring pin %s-%lu\n",
+ bank->name, irqd->hwirq);
+ return ret;
+ }
+
raw_spin_lock_irqsave(&bank->slock, flags);
con = readl(bank->pctl_base + reg_con);
@@ -209,6 +252,8 @@ static int exynos_irq_request_resources(struct irq_data *irqd)
raw_spin_unlock_irqrestore(&bank->slock, flags);
+ clk_disable(bank->drvdata->pclk);
+
return 0;
}
@@ -223,6 +268,13 @@ static void exynos_irq_release_resources(struct irq_data *irqd)
shift = irqd->hwirq * bank_type->fld_width[PINCFG_TYPE_FUNC];
mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1;
+ if (clk_enable(bank->drvdata->pclk)) {
+ dev_err(bank->gpio_chip.parent,
+ "unable to enable clock for deconfiguring pin %s-%lu\n",
+ bank->name, irqd->hwirq);
+ return;
+ }
+
raw_spin_lock_irqsave(&bank->slock, flags);
con = readl(bank->pctl_base + reg_con);
@@ -232,6 +284,8 @@ static void exynos_irq_release_resources(struct irq_data *irqd)
raw_spin_unlock_irqrestore(&bank->slock, flags);
+ clk_disable(bank->drvdata->pclk);
+
gpiochip_unlock_as_irq(&bank->gpio_chip, irqd->hwirq);
}
@@ -281,10 +335,19 @@ static irqreturn_t exynos_eint_gpio_irq(int irq, void *data)
unsigned int svc, group, pin;
int ret;
+ if (clk_enable(bank->drvdata->pclk)) {
+ dev_err(bank->gpio_chip.parent,
+ "unable to enable clock for handling IRQ\n");
+ return IRQ_NONE;
+ }
+
if (bank->eint_con_offset)
svc = readl(bank->eint_base + EXYNOSAUTO_SVC_OFFSET);
else
svc = readl(bank->eint_base + EXYNOS_SVC_OFFSET);
+
+ clk_disable(bank->drvdata->pclk);
+
group = EXYNOS_SVC_GROUP(svc);
pin = svc & EXYNOS_SVC_NUM_MASK;
@@ -563,6 +626,20 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
chained_irq_enter(chip, desc);
+ /*
+ * just enable the clock once here, to avoid an enable/disable dance for
+ * each bank.
+ */
+ if (eintd->nr_banks) {
+ struct samsung_pin_bank *b = eintd->banks[0];
+
+ if (clk_enable(b->drvdata->pclk)) {
+ dev_err(b->gpio_chip.parent,
+ "unable to enable clock for pending IRQs\n");
+ return;
+ }
+ }
+
for (i = 0; i < eintd->nr_banks; ++i) {
struct samsung_pin_bank *b = eintd->banks[i];
pend = readl(b->eint_base + b->irq_chip->eint_pend
@@ -572,6 +649,9 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
exynos_irq_demux_eint(pend & ~mask, b->irq_domain);
}
+ if (eintd->nr_banks)
+ clk_disable(eintd->banks[0]->drvdata->pclk);
+
chained_irq_exit(chip, desc);
}
@@ -695,6 +775,12 @@ static void exynos_pinctrl_suspend_bank(
struct exynos_eint_gpio_save *save = bank->soc_priv;
const void __iomem *regs = bank->eint_base;
+ if (clk_enable(bank->drvdata->pclk)) {
+ dev_err(bank->gpio_chip.parent,
+ "unable to enable clock for saving state\n");
+ return;
+ }
+
save->eint_con = readl(regs + EXYNOS_GPIO_ECON_OFFSET
+ bank->eint_offset);
save->eint_fltcon0 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
@@ -704,6 +790,8 @@ static void exynos_pinctrl_suspend_bank(
save->eint_mask = readl(regs + bank->irq_chip->eint_mask
+ bank->eint_offset);
+ clk_disable(bank->drvdata->pclk);
+
pr_debug("%s: save con %#010x\n", bank->name, save->eint_con);
pr_debug("%s: save fltcon0 %#010x\n", bank->name, save->eint_fltcon0);
pr_debug("%s: save fltcon1 %#010x\n", bank->name, save->eint_fltcon1);
@@ -716,9 +804,17 @@ static void exynosauto_pinctrl_suspend_bank(struct samsung_pinctrl_drv_data *drv
struct exynos_eint_gpio_save *save = bank->soc_priv;
const void __iomem *regs = bank->eint_base;
+ if (clk_enable(bank->drvdata->pclk)) {
+ dev_err(bank->gpio_chip.parent,
+ "unable to enable clock for saving state\n");
+ return;
+ }
+
save->eint_con = readl(regs + bank->pctl_offset + bank->eint_con_offset);
save->eint_mask = readl(regs + bank->pctl_offset + bank->eint_mask_offset);
+ clk_disable(bank->drvdata->pclk);
+
pr_debug("%s: save con %#010x\n", bank->name, save->eint_con);
pr_debug("%s: save mask %#010x\n", bank->name, save->eint_mask);
}
@@ -753,6 +849,12 @@ static void exynos_pinctrl_resume_bank(
struct exynos_eint_gpio_save *save = bank->soc_priv;
void __iomem *regs = bank->eint_base;
+ if (clk_enable(bank->drvdata->pclk)) {
+ dev_err(bank->gpio_chip.parent,
+ "unable to enable clock for restoring state\n");
+ return;
+ }
+
pr_debug("%s: con %#010x => %#010x\n", bank->name,
readl(regs + EXYNOS_GPIO_ECON_OFFSET
+ bank->eint_offset), save->eint_con);
@@ -774,6 +876,8 @@ static void exynos_pinctrl_resume_bank(
+ 2 * bank->eint_offset + 4);
writel(save->eint_mask, regs + bank->irq_chip->eint_mask
+ bank->eint_offset);
+
+ clk_disable(bank->drvdata->pclk);
}
static void exynosauto_pinctrl_resume_bank(struct samsung_pinctrl_drv_data *drvdata,
@@ -782,6 +886,12 @@ static void exynosauto_pinctrl_resume_bank(struct samsung_pinctrl_drv_data *drvd
struct exynos_eint_gpio_save *save = bank->soc_priv;
void __iomem *regs = bank->eint_base;
+ if (clk_enable(bank->drvdata->pclk)) {
+ dev_err(bank->gpio_chip.parent,
+ "unable to enable clock for restoring state\n");
+ return;
+ }
+
pr_debug("%s: con %#010x => %#010x\n", bank->name,
readl(regs + bank->pctl_offset + bank->eint_con_offset), save->eint_con);
pr_debug("%s: mask %#010x => %#010x\n", bank->name,
@@ -789,6 +899,8 @@ static void exynosauto_pinctrl_resume_bank(struct samsung_pinctrl_drv_data *drvd
writel(save->eint_con, regs + bank->pctl_offset + bank->eint_con_offset);
writel(save->eint_mask, regs + bank->pctl_offset + bank->eint_mask_offset);
+
+ clk_disable(bank->drvdata->pclk);
}
void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index ed07e23e0912..623df65a5d6f 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -15,6 +15,7 @@
// but provides extensions to which platform specific implementation of the gpio
// and wakeup interrupts can be hooked to.
+#include <linux/clk.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
#include <linux/init.h>
@@ -371,8 +372,8 @@ static void pin_to_reg_bank(struct samsung_pinctrl_drv_data *drvdata,
}
/* enable or disable a pinmux function */
-static void samsung_pinmux_setup(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned group)
+static int samsung_pinmux_setup(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned group)
{
struct samsung_pinctrl_drv_data *drvdata;
const struct samsung_pin_bank_type *type;
@@ -382,6 +383,7 @@ static void samsung_pinmux_setup(struct pinctrl_dev *pctldev, unsigned selector,
unsigned long flags;
const struct samsung_pmx_func *func;
const struct samsung_pin_group *grp;
+ int ret;
drvdata = pinctrl_dev_get_drvdata(pctldev);
func = &drvdata->pmx_functions[selector];
@@ -397,6 +399,12 @@ static void samsung_pinmux_setup(struct pinctrl_dev *pctldev, unsigned selector,
reg += 4;
}
+ ret = clk_enable(drvdata->pclk);
+ if (ret) {
+ dev_err(pctldev->dev, "failed to enable clock for setup\n");
+ return ret;
+ }
+
raw_spin_lock_irqsave(&bank->slock, flags);
data = readl(reg + type->reg_offset[PINCFG_TYPE_FUNC]);
@@ -405,6 +413,10 @@ static void samsung_pinmux_setup(struct pinctrl_dev *pctldev, unsigned selector,
writel(data, reg + type->reg_offset[PINCFG_TYPE_FUNC]);
raw_spin_unlock_irqrestore(&bank->slock, flags);
+
+ clk_disable(drvdata->pclk);
+
+ return 0;
}
/* enable a specified pinmux by writing to registers */
@@ -412,8 +424,7 @@ static int samsung_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned selector,
unsigned group)
{
- samsung_pinmux_setup(pctldev, selector, group);
- return 0;
+ return samsung_pinmux_setup(pctldev, selector, group);
}
/* list of pinmux callbacks for the pinmux vertical in pinctrl core */
@@ -436,6 +447,7 @@ static int samsung_pinconf_rw(struct pinctrl_dev *pctldev, unsigned int pin,
u32 data, width, pin_offset, mask, shift;
u32 cfg_value, cfg_reg;
unsigned long flags;
+ int ret;
drvdata = pinctrl_dev_get_drvdata(pctldev);
pin_to_reg_bank(drvdata, pin, &reg_base, &pin_offset, &bank);
@@ -447,6 +459,12 @@ static int samsung_pinconf_rw(struct pinctrl_dev *pctldev, unsigned int pin,
width = type->fld_width[cfg_type];
cfg_reg = type->reg_offset[cfg_type];
+ ret = clk_enable(drvdata->pclk);
+ if (ret) {
+ dev_err(drvdata->dev, "failed to enable clock\n");
+ return ret;
+ }
+
raw_spin_lock_irqsave(&bank->slock, flags);
mask = (1 << width) - 1;
@@ -466,6 +484,8 @@ static int samsung_pinconf_rw(struct pinctrl_dev *pctldev, unsigned int pin,
raw_spin_unlock_irqrestore(&bank->slock, flags);
+ clk_disable(drvdata->pclk);
+
return 0;
}
@@ -555,11 +575,19 @@ static void samsung_gpio_set_value(struct gpio_chip *gc,
static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
{
struct samsung_pin_bank *bank = gpiochip_get_data(gc);
+ struct samsung_pinctrl_drv_data *drvdata = bank->drvdata;
unsigned long flags;
+ if (clk_enable(drvdata->pclk)) {
+ dev_err(drvdata->dev, "failed to enable clock\n");
+ return;
+ }
+
raw_spin_lock_irqsave(&bank->slock, flags);
samsung_gpio_set_value(gc, offset, value);
raw_spin_unlock_irqrestore(&bank->slock, flags);
+
+ clk_disable(drvdata->pclk);
}
/* gpiolib gpio_get callback function */
@@ -569,12 +597,23 @@ static int samsung_gpio_get(struct gpio_chip *gc, unsigned offset)
u32 data;
struct samsung_pin_bank *bank = gpiochip_get_data(gc);
const struct samsung_pin_bank_type *type = bank->type;
+ struct samsung_pinctrl_drv_data *drvdata = bank->drvdata;
+ int ret;
reg = bank->pctl_base + bank->pctl_offset;
+ ret = clk_enable(drvdata->pclk);
+ if (ret) {
+ dev_err(drvdata->dev, "failed to enable clock\n");
+ return ret;
+ }
+
data = readl(reg + type->reg_offset[PINCFG_TYPE_DAT]);
data >>= offset;
data &= 1;
+
+ clk_disable(drvdata->pclk);
+
return data;
}
@@ -619,12 +658,22 @@ static int samsung_gpio_set_direction(struct gpio_chip *gc,
static int samsung_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
{
struct samsung_pin_bank *bank = gpiochip_get_data(gc);
+ struct samsung_pinctrl_drv_data *drvdata = bank->drvdata;
unsigned long flags;
int ret;
+ ret = clk_enable(drvdata->pclk);
+ if (ret) {
+ dev_err(drvdata->dev, "failed to enable clock\n");
+ return ret;
+ }
+
raw_spin_lock_irqsave(&bank->slock, flags);
ret = samsung_gpio_set_direction(gc, offset, true);
raw_spin_unlock_irqrestore(&bank->slock, flags);
+
+ clk_disable(drvdata->pclk);
+
return ret;
}
@@ -633,14 +682,23 @@ static int samsung_gpio_direction_output(struct gpio_chip *gc, unsigned offset,
int value)
{
struct samsung_pin_bank *bank = gpiochip_get_data(gc);
+ struct samsung_pinctrl_drv_data *drvdata = bank->drvdata;
unsigned long flags;
int ret;
+ ret = clk_enable(drvdata->pclk);
+ if (ret) {
+ dev_err(drvdata->dev, "failed to enable clock\n");
+ return ret;
+ }
+
raw_spin_lock_irqsave(&bank->slock, flags);
samsung_gpio_set_value(gc, offset, value);
ret = samsung_gpio_set_direction(gc, offset, false);
raw_spin_unlock_irqrestore(&bank->slock, flags);
+ clk_disable(drvdata->pclk);
+
return ret;
}
@@ -1164,6 +1222,12 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
}
}
+ drvdata->pclk = devm_clk_get_optional_prepared(dev, "pclk");
+ if (IS_ERR(drvdata->pclk)) {
+ ret = PTR_ERR(drvdata->pclk);
+ goto err_put_banks;
+ }
+
ret = samsung_pinctrl_register(pdev, drvdata);
if (ret)
goto err_put_banks;
@@ -1202,6 +1266,13 @@ static int __maybe_unused samsung_pinctrl_suspend(struct device *dev)
struct samsung_pinctrl_drv_data *drvdata = dev_get_drvdata(dev);
int i;
+ i = clk_enable(drvdata->pclk);
+ if (i) {
+ dev_err(drvdata->dev,
+ "failed to enable clock for saving state\n");
+ return i;
+ }
+
for (i = 0; i < drvdata->nr_banks; i++) {
struct samsung_pin_bank *bank = &drvdata->pin_banks[i];
const void __iomem *reg = bank->pctl_base + bank->pctl_offset;
@@ -1231,6 +1302,8 @@ static int __maybe_unused samsung_pinctrl_suspend(struct device *dev)
}
}
+ clk_disable(drvdata->pclk);
+
if (drvdata->suspend)
drvdata->suspend(drvdata);
if (drvdata->retention_ctrl && drvdata->retention_ctrl->enable)
@@ -1250,8 +1323,20 @@ static int __maybe_unused samsung_pinctrl_suspend(struct device *dev)
static int __maybe_unused samsung_pinctrl_resume(struct device *dev)
{
struct samsung_pinctrl_drv_data *drvdata = dev_get_drvdata(dev);
+ int ret;
int i;
+ /*
+ * enable clock before the callback, as we don't want to have to deal
+ * with callback cleanup on clock failures.
+ */
+ ret = clk_enable(drvdata->pclk);
+ if (ret) {
+ dev_err(drvdata->dev,
+ "failed to enable clock for restoring state\n");
+ return ret;
+ }
+
if (drvdata->resume)
drvdata->resume(drvdata);
@@ -1286,6 +1371,8 @@ static int __maybe_unused samsung_pinctrl_resume(struct device *dev)
writel(bank->pm_save[type], reg + offs[type]);
}
+ clk_disable(drvdata->pclk);
+
if (drvdata->retention_ctrl && drvdata->retention_ctrl->disable)
drvdata->retention_ctrl->disable(drvdata);
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index ab791afaabf5..d50ba6f07d5d 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -274,6 +274,7 @@ struct samsung_pin_ctrl {
* through samsung_pinctrl_drv_data, not samsung_pin_bank).
* @dev: device instance representing the controller.
* @irq: interrpt number used by the controller to notify gpio interrupts.
+ * @pclk: optional bus clock if required for accessing registers
* @ctrl: pin controller instance managed by the driver.
* @pctl: pin controller descriptor registered with the pinctrl subsystem.
* @pctl_dev: cookie representing pinctrl device instance.
@@ -293,6 +294,7 @@ struct samsung_pinctrl_drv_data {
void __iomem *virt_base;
struct device *dev;
int irq;
+ struct clk *pclk;
struct pinctrl_desc pctl;
struct pinctrl_dev *pctl_dev;
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
index 919b6a20af83..5b4822f77d2a 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
@@ -169,7 +169,6 @@ static struct platform_driver sun9i_a80_r_pinctrl_driver = {
.probe = sun9i_a80_r_pinctrl_probe,
.driver = {
.name = "sun9i-a80-r-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = sun9i_a80_r_pinctrl_match,
},
};
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index 868b20361769..81a298517df2 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -14,3 +14,5 @@ source "drivers/platform/olpc/Kconfig"
source "drivers/platform/surface/Kconfig"
source "drivers/platform/x86/Kconfig"
+
+source "drivers/platform/arm64/Kconfig"
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile
index 41640172975a..fbbe4f77aa5d 100644
--- a/drivers/platform/Makefile
+++ b/drivers/platform/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_OLPC_EC) += olpc/
obj-$(CONFIG_GOLDFISH) += goldfish/
obj-$(CONFIG_CHROME_PLATFORMS) += chrome/
obj-$(CONFIG_SURFACE_PLATFORMS) += surface/
+obj-$(CONFIG_ARM64) += arm64/
diff --git a/drivers/platform/arm64/Kconfig b/drivers/platform/arm64/Kconfig
new file mode 100644
index 000000000000..8fdca0f8e909
--- /dev/null
+++ b/drivers/platform/arm64/Kconfig
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# EC-like Drivers for aarch64 based devices.
+#
+
+menuconfig ARM64_PLATFORM_DEVICES
+ bool "ARM64 Platform-Specific Device Drivers"
+ depends on ARM64 || COMPILE_TEST
+ default y
+ help
+ Say Y here to get to see options for platform-specific device drivers
+ for arm64 based devices, primarily EC-like device drivers.
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if ARM64_PLATFORM_DEVICES
+
+config EC_ACER_ASPIRE1
+ tristate "Acer Aspire 1 Embedded Controller driver"
+ depends on I2C
+ depends on DRM
+ depends on POWER_SUPPLY
+ depends on INPUT
+ help
+ Say Y here to enable the EC driver for the (Snapdragon-based)
+ Acer Aspire 1 laptop. The EC handles battery and charging
+ monitoring as well as some misc functions like the lid sensor
+ and USB Type-C DP HPD events.
+
+ This driver provides battery and AC status support for the mentioned
+ laptop where this information is not properly exposed via the
+ standard ACPI devices.
+
+endif # ARM64_PLATFORM_DEVICES
diff --git a/drivers/platform/arm64/Makefile b/drivers/platform/arm64/Makefile
new file mode 100644
index 000000000000..4fcc9855579b
--- /dev/null
+++ b/drivers/platform/arm64/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for linux/drivers/platform/arm64
+#
+# This dir should only include drivers for EC-like devices.
+#
+
+obj-$(CONFIG_EC_ACER_ASPIRE1) += acer-aspire1-ec.o
diff --git a/drivers/platform/arm64/acer-aspire1-ec.c b/drivers/platform/arm64/acer-aspire1-ec.c
new file mode 100644
index 000000000000..dbb1cce13965
--- /dev/null
+++ b/drivers/platform/arm64/acer-aspire1-ec.c
@@ -0,0 +1,562 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024, Nikita Travkin <nikita@trvn.ru> */
+
+#include <asm-generic/unaligned.h>
+#include <drm/drm_bridge.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/usb/typec_mux.h>
+#include <linux/workqueue_types.h>
+
+#define MILLI_TO_MICRO 1000
+
+#define ASPIRE_EC_EVENT 0x05
+
+#define ASPIRE_EC_EVENT_WATCHDOG 0x20
+#define ASPIRE_EC_EVENT_KBD_BKL_ON 0x57
+#define ASPIRE_EC_EVENT_KBD_BKL_OFF 0x58
+#define ASPIRE_EC_EVENT_LID_CLOSE 0x9b
+#define ASPIRE_EC_EVENT_LID_OPEN 0x9c
+#define ASPIRE_EC_EVENT_BKL_UNBLANKED 0x9d
+#define ASPIRE_EC_EVENT_BKL_BLANKED 0x9e
+#define ASPIRE_EC_EVENT_FG_INF_CHG 0x85
+#define ASPIRE_EC_EVENT_FG_STA_CHG 0xc6
+#define ASPIRE_EC_EVENT_HPD_DIS 0xa3
+#define ASPIRE_EC_EVENT_HPD_CON 0xa4
+
+#define ASPIRE_EC_FG_DYNAMIC 0x07
+#define ASPIRE_EC_FG_STATIC 0x08
+
+#define ASPIRE_EC_FG_FLAG_PRESENT BIT(0)
+#define ASPIRE_EC_FG_FLAG_FULL BIT(1)
+#define ASPIRE_EC_FG_FLAG_DISCHARGING BIT(2)
+#define ASPIRE_EC_FG_FLAG_CHARGING BIT(3)
+
+#define ASPIRE_EC_RAM_READ 0x20
+#define ASPIRE_EC_RAM_WRITE 0x21
+
+#define ASPIRE_EC_RAM_WATCHDOG 0x19
+#define ASPIRE_EC_WATCHDOG_BIT BIT(6)
+
+#define ASPIRE_EC_RAM_KBD_MODE 0x43
+
+#define ASPIRE_EC_RAM_KBD_FN_EN BIT(0)
+#define ASPIRE_EC_RAM_KBD_MEDIA_ON_TOP BIT(5)
+#define ASPIRE_EC_RAM_KBD_ALWAYS_SET BIT(6)
+#define ASPIRE_EC_RAM_KBD_NUM_LAYER_EN BIT(7)
+
+#define ASPIRE_EC_RAM_KBD_MODE_2 0x60
+
+#define ASPIRE_EC_RAM_KBD_MEDIA_NOTIFY BIT(3)
+
+#define ASPIRE_EC_RAM_HPD_STATUS 0xf4
+#define ASPIRE_EC_HPD_CONNECTED 0x03
+
+#define ASPIRE_EC_RAM_LID_STATUS 0x4c
+#define ASPIRE_EC_LID_OPEN BIT(6)
+
+#define ASPIRE_EC_RAM_ADP 0x40
+#define ASPIRE_EC_AC_STATUS BIT(0)
+
+struct aspire_ec {
+ struct i2c_client *client;
+ struct power_supply *bat_psy;
+ struct power_supply *adp_psy;
+ struct input_dev *idev;
+
+ bool bridge_configured;
+ struct drm_bridge bridge;
+ struct work_struct work;
+};
+
+static int aspire_ec_ram_read(struct i2c_client *client, u8 off, u8 *data, u8 data_len)
+{
+ i2c_smbus_write_byte_data(client, ASPIRE_EC_RAM_READ, off);
+ i2c_smbus_read_i2c_block_data(client, ASPIRE_EC_RAM_READ, data_len, data);
+ return 0;
+}
+
+static int aspire_ec_ram_write(struct i2c_client *client, u8 off, u8 data)
+{
+ u8 tmp[2] = {off, data};
+
+ i2c_smbus_write_i2c_block_data(client, ASPIRE_EC_RAM_WRITE, sizeof(tmp), tmp);
+ return 0;
+}
+
+static irqreturn_t aspire_ec_irq_handler(int irq, void *data)
+{
+ struct aspire_ec *ec = data;
+ int id;
+ u8 tmp;
+
+ /*
+ * The original ACPI firmware actually has a small sleep in the handler.
+ *
+ * It seems like in most cases it's not needed but when the device
+ * just exits suspend, our i2c driver has a brief time where data
+ * transfer is not possible yet. So this delay allows us to suppress
+ * quite a bunch of spurious error messages in dmesg. Thus it's kept.
+ */
+ usleep_range(15000, 30000);
+
+ id = i2c_smbus_read_byte_data(ec->client, ASPIRE_EC_EVENT);
+ if (id < 0) {
+ dev_err(&ec->client->dev, "Failed to read event id: %pe\n", ERR_PTR(id));
+ return IRQ_HANDLED;
+ }
+
+ switch (id) {
+ case 0x0: /* No event */
+ break;
+
+ case ASPIRE_EC_EVENT_WATCHDOG:
+ /*
+ * Here acpi responds to the event and clears some bit.
+ * Notify (\_SB.I2C3.BAT1, 0x81) // Information Change
+ * Notify (\_SB.I2C3.ADP1, 0x80) // Status Change
+ */
+ aspire_ec_ram_read(ec->client, ASPIRE_EC_RAM_WATCHDOG, &tmp, sizeof(tmp));
+ tmp &= ~ASPIRE_EC_WATCHDOG_BIT;
+ aspire_ec_ram_write(ec->client, ASPIRE_EC_RAM_WATCHDOG, tmp);
+ break;
+
+ case ASPIRE_EC_EVENT_LID_CLOSE:
+ /* Notify (\_SB.LID0, 0x80) // Status Change */
+ input_report_switch(ec->idev, SW_LID, 1);
+ input_sync(ec->idev);
+ break;
+
+ case ASPIRE_EC_EVENT_LID_OPEN:
+ /* Notify (\_SB.LID0, 0x80) // Status Change */
+ input_report_switch(ec->idev, SW_LID, 0);
+ input_sync(ec->idev);
+ break;
+
+ case ASPIRE_EC_EVENT_FG_INF_CHG:
+ /* Notify (\_SB.I2C3.BAT1, 0x81) // Information Change */
+ fallthrough;
+ case ASPIRE_EC_EVENT_FG_STA_CHG:
+ /* Notify (\_SB.I2C3.BAT1, 0x80) // Status Change */
+ power_supply_changed(ec->bat_psy);
+ power_supply_changed(ec->adp_psy);
+ break;
+
+ case ASPIRE_EC_EVENT_HPD_DIS:
+ if (ec->bridge_configured)
+ drm_bridge_hpd_notify(&ec->bridge, connector_status_disconnected);
+ break;
+
+ case ASPIRE_EC_EVENT_HPD_CON:
+ if (ec->bridge_configured)
+ drm_bridge_hpd_notify(&ec->bridge, connector_status_connected);
+ break;
+
+ case ASPIRE_EC_EVENT_BKL_BLANKED:
+ case ASPIRE_EC_EVENT_BKL_UNBLANKED:
+ /* Display backlight blanked on FN+F6. No action needed. */
+ break;
+
+ case ASPIRE_EC_EVENT_KBD_BKL_ON:
+ case ASPIRE_EC_EVENT_KBD_BKL_OFF:
+ /*
+ * There is a keyboard backlight connector on Aspire 1 that is
+ * controlled by FN+F8. There is no kb backlight on the device though.
+ * Seems like this is used on other devices like Acer Spin 7.
+ * No action needed.
+ */
+ break;
+
+ default:
+ dev_warn(&ec->client->dev, "Unknown event id=0x%x\n", id);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Power supply.
+ */
+
+struct aspire_ec_bat_psy_static_data {
+ u8 unk1;
+ u8 flags;
+ __le16 unk2;
+ __le16 voltage_design;
+ __le16 capacity_full;
+ __le16 unk3;
+ __le16 serial;
+ u8 model_id;
+ u8 vendor_id;
+} __packed;
+
+static const char * const aspire_ec_bat_psy_battery_model[] = {
+ "AP18C4K",
+ "AP18C8K",
+ "AP19B8K",
+ "AP16M4J",
+ "AP16M5J",
+};
+
+static const char * const aspire_ec_bat_psy_battery_vendor[] = {
+ "SANYO",
+ "SONY",
+ "PANASONIC",
+ "SAMSUNG",
+ "SIMPLO",
+ "MOTOROLA",
+ "CELXPERT",
+ "LGC",
+ "GETAC",
+ "MURATA",
+};
+
+struct aspire_ec_bat_psy_dynamic_data {
+ u8 unk1;
+ u8 flags;
+ u8 unk2;
+ __le16 capacity_now;
+ __le16 voltage_now;
+ __le16 current_now;
+ __le16 unk3;
+ __le16 unk4;
+} __packed;
+
+static int aspire_ec_bat_psy_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct aspire_ec *ec = power_supply_get_drvdata(psy);
+ struct aspire_ec_bat_psy_static_data sdat;
+ struct aspire_ec_bat_psy_dynamic_data ddat;
+ int str_index = 0;
+
+ i2c_smbus_read_i2c_block_data(ec->client, ASPIRE_EC_FG_STATIC, sizeof(sdat), (u8 *)&sdat);
+ i2c_smbus_read_i2c_block_data(ec->client, ASPIRE_EC_FG_DYNAMIC, sizeof(ddat), (u8 *)&ddat);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ if (ddat.flags & ASPIRE_EC_FG_FLAG_CHARGING)
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else if (ddat.flags & ASPIRE_EC_FG_FLAG_DISCHARGING)
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ else if (ddat.flags & ASPIRE_EC_FG_FLAG_FULL)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = get_unaligned_le16(&ddat.voltage_now) * MILLI_TO_MICRO;
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = le16_to_cpu(sdat.voltage_design) * MILLI_TO_MICRO;
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ val->intval = get_unaligned_le16(&ddat.capacity_now) * MILLI_TO_MICRO;
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = le16_to_cpu(sdat.capacity_full) * MILLI_TO_MICRO;
+ break;
+
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = get_unaligned_le16(&ddat.capacity_now) * 100;
+ val->intval /= le16_to_cpu(sdat.capacity_full);
+ break;
+
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = (s16)get_unaligned_le16(&ddat.current_now) * MILLI_TO_MICRO;
+ break;
+
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = !!(ddat.flags & ASPIRE_EC_FG_FLAG_PRESENT);
+ break;
+
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_SYSTEM;
+ break;
+
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ str_index = sdat.model_id - 1;
+
+ if (str_index >= 0 && str_index < ARRAY_SIZE(aspire_ec_bat_psy_battery_model))
+ val->strval = aspire_ec_bat_psy_battery_model[str_index];
+ else
+ val->strval = "Unknown";
+ break;
+
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ str_index = sdat.vendor_id - 3; /* ACPI uses 3 as an offset here. */
+
+ if (str_index >= 0 && str_index < ARRAY_SIZE(aspire_ec_bat_psy_battery_vendor))
+ val->strval = aspire_ec_bat_psy_battery_vendor[str_index];
+ else
+ val->strval = "Unknown";
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static enum power_supply_property aspire_ec_bat_psy_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_SCOPE,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static const struct power_supply_desc aspire_ec_bat_psy_desc = {
+ .name = "aspire-ec-bat",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .get_property = aspire_ec_bat_psy_get_property,
+ .properties = aspire_ec_bat_psy_props,
+ .num_properties = ARRAY_SIZE(aspire_ec_bat_psy_props),
+};
+
+static int aspire_ec_adp_psy_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct aspire_ec *ec = power_supply_get_drvdata(psy);
+ u8 tmp;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ aspire_ec_ram_read(ec->client, ASPIRE_EC_RAM_ADP, &tmp, sizeof(tmp));
+ val->intval = !!(tmp & ASPIRE_EC_AC_STATUS);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static enum power_supply_property aspire_ec_adp_psy_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static const struct power_supply_desc aspire_ec_adp_psy_desc = {
+ .name = "aspire-ec-adp",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .get_property = aspire_ec_adp_psy_get_property,
+ .properties = aspire_ec_adp_psy_props,
+ .num_properties = ARRAY_SIZE(aspire_ec_adp_psy_props),
+};
+
+/*
+ * USB-C DP Alt mode HPD.
+ */
+
+static int aspire_ec_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags)
+{
+ return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
+}
+
+static void aspire_ec_bridge_update_hpd_work(struct work_struct *work)
+{
+ struct aspire_ec *ec = container_of(work, struct aspire_ec, work);
+ u8 tmp;
+
+ aspire_ec_ram_read(ec->client, ASPIRE_EC_RAM_HPD_STATUS, &tmp, sizeof(tmp));
+ if (tmp == ASPIRE_EC_HPD_CONNECTED)
+ drm_bridge_hpd_notify(&ec->bridge, connector_status_connected);
+ else
+ drm_bridge_hpd_notify(&ec->bridge, connector_status_disconnected);
+}
+
+static void aspire_ec_bridge_hpd_enable(struct drm_bridge *bridge)
+{
+ struct aspire_ec *ec = container_of(bridge, struct aspire_ec, bridge);
+
+ schedule_work(&ec->work);
+}
+
+static const struct drm_bridge_funcs aspire_ec_bridge_funcs = {
+ .hpd_enable = aspire_ec_bridge_hpd_enable,
+ .attach = aspire_ec_bridge_attach,
+};
+
+/*
+ * Sysfs attributes.
+ */
+
+static ssize_t fn_lock_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct aspire_ec *ec = i2c_get_clientdata(to_i2c_client(dev));
+ u8 tmp;
+
+ aspire_ec_ram_read(ec->client, ASPIRE_EC_RAM_KBD_MODE, &tmp, sizeof(tmp));
+
+ return sysfs_emit(buf, "%u\n", !(tmp & ASPIRE_EC_RAM_KBD_MEDIA_ON_TOP));
+}
+
+static ssize_t fn_lock_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct aspire_ec *ec = i2c_get_clientdata(to_i2c_client(dev));
+ u8 tmp;
+
+ bool state;
+ int ret;
+
+ ret = kstrtobool(buf, &state);
+ if (ret)
+ return ret;
+
+ aspire_ec_ram_read(ec->client, ASPIRE_EC_RAM_KBD_MODE, &tmp, sizeof(tmp));
+
+ if (state)
+ tmp &= ~ASPIRE_EC_RAM_KBD_MEDIA_ON_TOP;
+ else
+ tmp |= ASPIRE_EC_RAM_KBD_MEDIA_ON_TOP;
+
+ aspire_ec_ram_write(ec->client, ASPIRE_EC_RAM_KBD_MODE, tmp);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(fn_lock);
+
+static struct attribute *aspire_ec_attrs[] = {
+ &dev_attr_fn_lock.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(aspire_ec);
+
+static int aspire_ec_probe(struct i2c_client *client)
+{
+ struct power_supply_config psy_cfg = {0};
+ struct device *dev = &client->dev;
+ struct fwnode_handle *fwnode;
+ struct aspire_ec *ec;
+ int ret;
+ u8 tmp;
+
+ ec = devm_kzalloc(dev, sizeof(*ec), GFP_KERNEL);
+ if (!ec)
+ return -ENOMEM;
+
+ ec->client = client;
+ i2c_set_clientdata(client, ec);
+
+ /* Battery status reports */
+ psy_cfg.drv_data = ec;
+ ec->bat_psy = devm_power_supply_register(dev, &aspire_ec_bat_psy_desc, &psy_cfg);
+ if (IS_ERR(ec->bat_psy))
+ return dev_err_probe(dev, PTR_ERR(ec->bat_psy),
+ "Failed to register battery power supply\n");
+
+ ec->adp_psy = devm_power_supply_register(dev, &aspire_ec_adp_psy_desc, &psy_cfg);
+ if (IS_ERR(ec->adp_psy))
+ return dev_err_probe(dev, PTR_ERR(ec->adp_psy),
+ "Failed to register AC power supply\n");
+
+ /* Lid switch */
+ ec->idev = devm_input_allocate_device(dev);
+ if (!ec->idev)
+ return -ENOMEM;
+
+ ec->idev->name = "aspire-ec";
+ ec->idev->phys = "aspire-ec/input0";
+ input_set_capability(ec->idev, EV_SW, SW_LID);
+
+ ret = input_register_device(ec->idev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Input device register failed\n");
+
+ /* Enable the keyboard fn keys */
+ tmp = ASPIRE_EC_RAM_KBD_FN_EN | ASPIRE_EC_RAM_KBD_ALWAYS_SET;
+ tmp |= ASPIRE_EC_RAM_KBD_MEDIA_ON_TOP;
+ aspire_ec_ram_write(client, ASPIRE_EC_RAM_KBD_MODE, tmp);
+
+ aspire_ec_ram_read(client, ASPIRE_EC_RAM_KBD_MODE_2, &tmp, sizeof(tmp));
+ tmp |= ASPIRE_EC_RAM_KBD_MEDIA_NOTIFY;
+ aspire_ec_ram_write(client, ASPIRE_EC_RAM_KBD_MODE_2, tmp);
+
+ /* External Type-C display attach reports */
+ fwnode = device_get_named_child_node(dev, "connector");
+ if (fwnode) {
+ INIT_WORK(&ec->work, aspire_ec_bridge_update_hpd_work);
+ ec->bridge.funcs = &aspire_ec_bridge_funcs;
+ ec->bridge.of_node = to_of_node(fwnode);
+ ec->bridge.ops = DRM_BRIDGE_OP_HPD;
+ ec->bridge.type = DRM_MODE_CONNECTOR_USB;
+
+ ret = devm_drm_bridge_add(dev, &ec->bridge);
+ if (ret) {
+ fwnode_handle_put(fwnode);
+ return dev_err_probe(dev, ret, "Failed to register drm bridge\n");
+ }
+
+ ec->bridge_configured = true;
+ }
+
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ aspire_ec_irq_handler, IRQF_ONESHOT,
+ dev_name(dev), ec);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request irq\n");
+
+ return 0;
+}
+
+static int aspire_ec_resume(struct device *dev)
+{
+ struct aspire_ec *ec = i2c_get_clientdata(to_i2c_client(dev));
+ u8 tmp;
+
+ aspire_ec_ram_read(ec->client, ASPIRE_EC_RAM_LID_STATUS, &tmp, sizeof(tmp));
+ input_report_switch(ec->idev, SW_LID, !!(tmp & ASPIRE_EC_LID_OPEN));
+ input_sync(ec->idev);
+
+ return 0;
+}
+
+static const struct i2c_device_id aspire_ec_id[] = {
+ { "aspire1-ec", },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, aspire_ec_id);
+
+static const struct of_device_id aspire_ec_of_match[] = {
+ { .compatible = "acer,aspire1-ec", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, aspire_ec_of_match);
+
+static DEFINE_SIMPLE_DEV_PM_OPS(aspire_ec_pm_ops, NULL, aspire_ec_resume);
+
+static struct i2c_driver aspire_ec_driver = {
+ .driver = {
+ .name = "aspire-ec",
+ .of_match_table = aspire_ec_of_match,
+ .pm = pm_sleep_ptr(&aspire_ec_pm_ops),
+ .dev_groups = aspire_ec_groups,
+ },
+ .probe = aspire_ec_probe,
+ .id_table = aspire_ec_id,
+};
+module_i2c_driver(aspire_ec_driver);
+
+MODULE_DESCRIPTION("Acer Aspire 1 embedded controller");
+MODULE_AUTHOR("Nikita Travkin <nikita@trvn.ru>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index 7a83346bfa53..073616b5b5a0 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -132,6 +132,7 @@ config CROS_EC_UART
config CROS_EC_LPC
tristate "ChromeOS Embedded Controller (LPC)"
depends on CROS_EC && ACPI && (X86 || COMPILE_TEST)
+ depends on HAS_IOPORT
help
If you say Y here, you get support for talking to the ChromeOS EC
over an LPC bus, including the LPC Microchip EC (MEC) variant.
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index badc68bbae8c..47d19f7e295a 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -432,6 +432,12 @@ static void cros_ec_send_resume_event(struct cros_ec_device *ec_dev)
void cros_ec_resume_complete(struct cros_ec_device *ec_dev)
{
cros_ec_send_resume_event(ec_dev);
+
+ /*
+ * Let the mfd devices know about events that occur during
+ * suspend. This way the clients know what to do with them.
+ */
+ cros_ec_report_events_during_suspend(ec_dev);
}
EXPORT_SYMBOL(cros_ec_resume_complete);
@@ -442,12 +448,6 @@ static void cros_ec_enable_irq(struct cros_ec_device *ec_dev)
if (ec_dev->wake_enabled)
disable_irq_wake(ec_dev->irq);
-
- /*
- * Let the mfd devices know about events that occur during
- * suspend. This way the clients know what to do with them.
- */
- cros_ec_report_events_during_suspend(ec_dev);
}
/**
@@ -475,8 +475,8 @@ EXPORT_SYMBOL(cros_ec_resume_early);
*/
int cros_ec_resume(struct cros_ec_device *ec_dev)
{
- cros_ec_enable_irq(ec_dev);
- cros_ec_send_resume_event(ec_dev);
+ cros_ec_resume_early(ec_dev);
+ cros_ec_resume_complete(ec_dev);
return 0;
}
EXPORT_SYMBOL(cros_ec_resume);
diff --git a/drivers/platform/chrome/cros_ec_chardev.c b/drivers/platform/chrome/cros_ec_chardev.c
index 81950bb2c6da..7f034ead7ae4 100644
--- a/drivers/platform/chrome/cros_ec_chardev.c
+++ b/drivers/platform/chrome/cros_ec_chardev.c
@@ -14,6 +14,7 @@
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/platform_data/cros_ec_chardev.h>
@@ -403,17 +404,23 @@ static void cros_ec_chardev_remove(struct platform_device *pdev)
misc_deregister(&data->misc);
}
+static const struct platform_device_id cros_ec_chardev_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cros_ec_chardev_id);
+
static struct platform_driver cros_ec_chardev_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = cros_ec_chardev_probe,
.remove_new = cros_ec_chardev_remove,
+ .id_table = cros_ec_chardev_id,
};
module_platform_driver(cros_ec_chardev_driver);
-MODULE_ALIAS("platform:" DRV_NAME);
MODULE_AUTHOR("Enric Balletbo i Serra <enric.balletbo@collabora.com>");
MODULE_DESCRIPTION("ChromeOS EC Miscellaneous Character Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 6bf6f0e7b597..e1d313246beb 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -7,6 +7,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/fs.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_data/cros_ec_commands.h>
@@ -564,6 +565,12 @@ static int __maybe_unused cros_ec_debugfs_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(cros_ec_debugfs_pm_ops,
cros_ec_debugfs_suspend, cros_ec_debugfs_resume);
+static const struct platform_device_id cros_ec_debugfs_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cros_ec_debugfs_id);
+
static struct platform_driver cros_ec_debugfs_driver = {
.driver = {
.name = DRV_NAME,
@@ -572,10 +579,10 @@ static struct platform_driver cros_ec_debugfs_driver = {
},
.probe = cros_ec_debugfs_probe,
.remove_new = cros_ec_debugfs_remove,
+ .id_table = cros_ec_debugfs_id,
};
module_platform_driver(cros_ec_debugfs_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Debug logs for ChromeOS EC");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
index 6677cc6c4984..1e69f61115a4 100644
--- a/drivers/platform/chrome/cros_ec_lightbar.c
+++ b/drivers/platform/chrome/cros_ec_lightbar.c
@@ -9,6 +9,7 @@
#include <linux/fs.h>
#include <linux/kobject.h>
#include <linux/kstrtox.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
@@ -594,6 +595,12 @@ static int __maybe_unused cros_ec_lightbar_suspend(struct device *dev)
static SIMPLE_DEV_PM_OPS(cros_ec_lightbar_pm_ops,
cros_ec_lightbar_suspend, cros_ec_lightbar_resume);
+static const struct platform_device_id cros_ec_lightbar_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cros_ec_lightbar_id);
+
static struct platform_driver cros_ec_lightbar_driver = {
.driver = {
.name = DRV_NAME,
@@ -602,10 +609,10 @@ static struct platform_driver cros_ec_lightbar_driver = {
},
.probe = cros_ec_lightbar_probe,
.remove_new = cros_ec_lightbar_remove,
+ .id_table = cros_ec_lightbar_id,
};
module_platform_driver(cros_ec_lightbar_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Expose the Chromebook Pixel's lightbar to userspace");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index f0f3d3d56157..ddfbfec44f4c 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -34,6 +34,32 @@
/* True if ACPI device is present */
static bool cros_ec_lpc_acpi_device_found;
+/*
+ * Indicates that lpc_driver_data.quirk_mmio_memory_base should
+ * be used as the base port for EC mapped memory.
+ */
+#define CROS_EC_LPC_QUIRK_REMAP_MEMORY BIT(0)
+
+/**
+ * struct lpc_driver_data - driver data attached to a DMI device ID to indicate
+ * hardware quirks.
+ * @quirks: a bitfield composed of quirks from CROS_EC_LPC_QUIRK_*
+ * @quirk_mmio_memory_base: The first I/O port addressing EC mapped memory (used
+ * when quirk ...REMAP_MEMORY is set.)
+ */
+struct lpc_driver_data {
+ u32 quirks;
+ u16 quirk_mmio_memory_base;
+};
+
+/**
+ * struct cros_ec_lpc - LPC device-specific data
+ * @mmio_memory_base: The first I/O port addressing EC mapped memory.
+ */
+struct cros_ec_lpc {
+ u16 mmio_memory_base;
+};
+
/**
* struct lpc_driver_ops - LPC driver operations
* @read: Copy length bytes from EC address offset into buffer dest. Returns
@@ -290,6 +316,7 @@ done:
static int cros_ec_lpc_readmem(struct cros_ec_device *ec, unsigned int offset,
unsigned int bytes, void *dest)
{
+ struct cros_ec_lpc *ec_lpc = ec->priv;
int i = offset;
char *s = dest;
int cnt = 0;
@@ -299,13 +326,13 @@ static int cros_ec_lpc_readmem(struct cros_ec_device *ec, unsigned int offset,
/* fixed length */
if (bytes) {
- cros_ec_lpc_ops.read(EC_LPC_ADDR_MEMMAP + offset, bytes, s);
+ cros_ec_lpc_ops.read(ec_lpc->mmio_memory_base + offset, bytes, s);
return bytes;
}
/* string */
for (; i < EC_MEMMAP_SIZE; i++, s++) {
- cros_ec_lpc_ops.read(EC_LPC_ADDR_MEMMAP + i, 1, s);
+ cros_ec_lpc_ops.read(ec_lpc->mmio_memory_base + i, 1, s);
cnt++;
if (!*s)
break;
@@ -353,8 +380,28 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
struct acpi_device *adev;
acpi_status status;
struct cros_ec_device *ec_dev;
+ struct cros_ec_lpc *ec_lpc;
+ struct lpc_driver_data *driver_data;
u8 buf[2] = {};
int irq, ret;
+ u32 quirks;
+
+ ec_lpc = devm_kzalloc(dev, sizeof(*ec_lpc), GFP_KERNEL);
+ if (!ec_lpc)
+ return -ENOMEM;
+
+ ec_lpc->mmio_memory_base = EC_LPC_ADDR_MEMMAP;
+
+ driver_data = platform_get_drvdata(pdev);
+ if (driver_data) {
+ quirks = driver_data->quirks;
+
+ if (quirks)
+ dev_info(dev, "loaded with quirks %8.08x\n", quirks);
+
+ if (quirks & CROS_EC_LPC_QUIRK_REMAP_MEMORY)
+ ec_lpc->mmio_memory_base = driver_data->quirk_mmio_memory_base;
+ }
/*
* The Framework Laptop (and possibly other non-ChromeOS devices)
@@ -380,7 +427,7 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
cros_ec_lpc_ops.write = cros_ec_lpc_mec_write_bytes;
cros_ec_lpc_ops.read(EC_LPC_ADDR_MEMMAP + EC_MEMMAP_ID, 2, buf);
if (buf[0] != 'E' || buf[1] != 'C') {
- if (!devm_request_region(dev, EC_LPC_ADDR_MEMMAP, EC_MEMMAP_SIZE,
+ if (!devm_request_region(dev, ec_lpc->mmio_memory_base, EC_MEMMAP_SIZE,
dev_name(dev))) {
dev_err(dev, "couldn't reserve memmap region\n");
return -EBUSY;
@@ -389,7 +436,7 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
/* Re-assign read/write operations for the non MEC variant */
cros_ec_lpc_ops.read = cros_ec_lpc_read_bytes;
cros_ec_lpc_ops.write = cros_ec_lpc_write_bytes;
- cros_ec_lpc_ops.read(EC_LPC_ADDR_MEMMAP + EC_MEMMAP_ID, 2,
+ cros_ec_lpc_ops.read(ec_lpc->mmio_memory_base + EC_MEMMAP_ID, 2,
buf);
if (buf[0] != 'E' || buf[1] != 'C') {
dev_err(dev, "EC ID not detected\n");
@@ -423,6 +470,7 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
ec_dev->din_size = sizeof(struct ec_host_response) +
sizeof(struct ec_response_get_protocol_info);
ec_dev->dout_size = sizeof(struct ec_host_request);
+ ec_dev->priv = ec_lpc;
/*
* Some boards do not have an IRQ allotted for cros_ec_lpc,
@@ -479,6 +527,11 @@ static const struct acpi_device_id cros_ec_lpc_acpi_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, cros_ec_lpc_acpi_device_ids);
+static const struct lpc_driver_data framework_laptop_amd_lpc_driver_data __initconst = {
+ .quirks = CROS_EC_LPC_QUIRK_REMAP_MEMORY,
+ .quirk_mmio_memory_base = 0xE00,
+};
+
static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = {
{
/*
@@ -533,7 +586,16 @@ static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = {
},
/* A small number of non-Chromebook/box machines also use the ChromeOS EC */
{
- /* the Framework Laptop */
+ /* the Framework Laptop 13 (AMD Ryzen) and 16 (AMD Ryzen) */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AMD Ryzen"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Laptop"),
+ },
+ .driver_data = (void *)&framework_laptop_amd_lpc_driver_data,
+ },
+ {
+ /* the Framework Laptop (Intel 11th, 12th, 13th Generation) */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
DMI_MATCH(DMI_PRODUCT_NAME, "Laptop"),
@@ -610,14 +672,16 @@ static int __init cros_ec_lpc_init(void)
{
int ret;
acpi_status status;
+ const struct dmi_system_id *dmi_match;
status = acpi_get_devices(ACPI_DRV_NAME, cros_ec_lpc_parse_device,
&cros_ec_lpc_acpi_device_found, NULL);
if (ACPI_FAILURE(status))
pr_warn(DRV_NAME ": Looking for %s failed\n", ACPI_DRV_NAME);
- if (!cros_ec_lpc_acpi_device_found &&
- !dmi_check_system(cros_ec_lpc_dmi_table)) {
+ dmi_match = dmi_first_match(cros_ec_lpc_dmi_table);
+
+ if (!cros_ec_lpc_acpi_device_found && !dmi_match) {
pr_err(DRV_NAME ": unsupported system.\n");
return -ENODEV;
}
@@ -630,6 +694,9 @@ static int __init cros_ec_lpc_init(void)
}
if (!cros_ec_lpc_acpi_device_found) {
+ /* Pass the DMI match's driver data down to the platform device */
+ platform_set_drvdata(&cros_ec_lpc_device, dmi_match->driver_data);
+
/* Register the device, and it'll get hooked up automatically */
ret = platform_device_register(&cros_ec_lpc_device);
if (ret) {
diff --git a/drivers/platform/chrome/cros_ec_proto_test.c b/drivers/platform/chrome/cros_ec_proto_test.c
index b6169d6f2467..41378c2ee6a0 100644
--- a/drivers/platform/chrome/cros_ec_proto_test.c
+++ b/drivers/platform/chrome/cros_ec_proto_test.c
@@ -1543,21 +1543,18 @@ static void cros_ec_proto_test_cmd_xfer_normal(struct kunit *test)
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
- struct {
- struct cros_ec_command msg;
- u8 data[0x100];
- } __packed buf;
+ DEFINE_RAW_FLEX(struct cros_ec_command, buf, data, 0x100);
ec_dev->max_request = 0xff;
ec_dev->max_response = 0xee;
ec_dev->max_passthru = 0xdd;
- buf.msg.version = 0;
- buf.msg.command = EC_CMD_HELLO;
- buf.msg.insize = 4;
- buf.msg.outsize = 2;
- buf.data[0] = 0x55;
- buf.data[1] = 0xaa;
+ buf->version = 0;
+ buf->command = EC_CMD_HELLO;
+ buf->insize = 4;
+ buf->outsize = 2;
+ buf->data[0] = 0x55;
+ buf->data[1] = 0xaa;
{
u8 *data;
@@ -1572,7 +1569,7 @@ static void cros_ec_proto_test_cmd_xfer_normal(struct kunit *test)
data[3] = 0x33;
}
- ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ ret = cros_ec_cmd_xfer(ec_dev, buf);
KUNIT_EXPECT_EQ(test, ret, 4);
{
@@ -1590,10 +1587,10 @@ static void cros_ec_proto_test_cmd_xfer_normal(struct kunit *test)
KUNIT_EXPECT_EQ(test, data[0], 0x55);
KUNIT_EXPECT_EQ(test, data[1], 0xaa);
- KUNIT_EXPECT_EQ(test, buf.data[0], 0xaa);
- KUNIT_EXPECT_EQ(test, buf.data[1], 0x55);
- KUNIT_EXPECT_EQ(test, buf.data[2], 0xcc);
- KUNIT_EXPECT_EQ(test, buf.data[3], 0x33);
+ KUNIT_EXPECT_EQ(test, buf->data[0], 0xaa);
+ KUNIT_EXPECT_EQ(test, buf->data[1], 0x55);
+ KUNIT_EXPECT_EQ(test, buf->data[2], 0xcc);
+ KUNIT_EXPECT_EQ(test, buf->data[3], 0x33);
}
}
@@ -1603,26 +1600,23 @@ static void cros_ec_proto_test_cmd_xfer_excess_msg_insize(struct kunit *test)
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
- struct {
- struct cros_ec_command msg;
- u8 data[0x100];
- } __packed buf;
+ DEFINE_RAW_FLEX(struct cros_ec_command, buf, data, 0x100);
ec_dev->max_request = 0xff;
ec_dev->max_response = 0xee;
ec_dev->max_passthru = 0xdd;
- buf.msg.version = 0;
- buf.msg.command = EC_CMD_HELLO;
- buf.msg.insize = 0xee + 1;
- buf.msg.outsize = 2;
+ buf->version = 0;
+ buf->command = EC_CMD_HELLO;
+ buf->insize = 0xee + 1;
+ buf->outsize = 2;
{
mock = cros_kunit_ec_xfer_mock_add(test, 0xcc);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
- ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ ret = cros_ec_cmd_xfer(ec_dev, buf);
KUNIT_EXPECT_EQ(test, ret, 0xcc);
{
@@ -1641,21 +1635,18 @@ static void cros_ec_proto_test_cmd_xfer_excess_msg_outsize_without_passthru(stru
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
int ret;
- struct {
- struct cros_ec_command msg;
- u8 data[0x100];
- } __packed buf;
+ DEFINE_RAW_FLEX(struct cros_ec_command, buf, data, 0x100);
ec_dev->max_request = 0xff;
ec_dev->max_response = 0xee;
ec_dev->max_passthru = 0xdd;
- buf.msg.version = 0;
- buf.msg.command = EC_CMD_HELLO;
- buf.msg.insize = 4;
- buf.msg.outsize = 0xff + 1;
+ buf->version = 0;
+ buf->command = EC_CMD_HELLO;
+ buf->insize = 4;
+ buf->outsize = 0xff + 1;
- ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ ret = cros_ec_cmd_xfer(ec_dev, buf);
KUNIT_EXPECT_EQ(test, ret, -EMSGSIZE);
}
@@ -1664,21 +1655,18 @@ static void cros_ec_proto_test_cmd_xfer_excess_msg_outsize_with_passthru(struct
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
int ret;
- struct {
- struct cros_ec_command msg;
- u8 data[0x100];
- } __packed buf;
+ DEFINE_RAW_FLEX(struct cros_ec_command, buf, data, 0x100);
ec_dev->max_request = 0xff;
ec_dev->max_response = 0xee;
ec_dev->max_passthru = 0xdd;
- buf.msg.version = 0;
- buf.msg.command = EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) + EC_CMD_HELLO;
- buf.msg.insize = 4;
- buf.msg.outsize = 0xdd + 1;
+ buf->version = 0;
+ buf->command = EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) + EC_CMD_HELLO;
+ buf->insize = 4;
+ buf->outsize = 0xdd + 1;
- ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ ret = cros_ec_cmd_xfer(ec_dev, buf);
KUNIT_EXPECT_EQ(test, ret, -EMSGSIZE);
}
diff --git a/drivers/platform/chrome/cros_ec_sensorhub.c b/drivers/platform/chrome/cros_ec_sensorhub.c
index 31fb8bdaad5a..50cdae67fa32 100644
--- a/drivers/platform/chrome/cros_ec_sensorhub.c
+++ b/drivers/platform/chrome/cros_ec_sensorhub.c
@@ -8,6 +8,7 @@
#include <linux/init.h>
#include <linux/device.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
@@ -247,17 +248,23 @@ static SIMPLE_DEV_PM_OPS(cros_ec_sensorhub_pm_ops,
cros_ec_sensorhub_suspend,
cros_ec_sensorhub_resume);
+static const struct platform_device_id cros_ec_sensorhub_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cros_ec_sensorhub_id);
+
static struct platform_driver cros_ec_sensorhub_driver = {
.driver = {
.name = DRV_NAME,
.pm = &cros_ec_sensorhub_pm_ops,
},
.probe = cros_ec_sensorhub_probe,
+ .id_table = cros_ec_sensorhub_id,
};
module_platform_driver(cros_ec_sensorhub_driver);
-MODULE_ALIAS("platform:" DRV_NAME);
MODULE_AUTHOR("Gwendal Grignou <gwendal@chromium.org>");
MODULE_DESCRIPTION("ChromeOS EC MEMS Sensor Hub Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
index 93e67ab4af06..9c944146ee50 100644
--- a/drivers/platform/chrome/cros_ec_sysfs.c
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -8,6 +8,7 @@
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/kobject.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
@@ -347,16 +348,22 @@ static void cros_ec_sysfs_remove(struct platform_device *pd)
sysfs_remove_group(&ec_dev->class_dev.kobj, &cros_ec_attr_group);
}
+static const struct platform_device_id cros_ec_sysfs_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cros_ec_sysfs_id);
+
static struct platform_driver cros_ec_sysfs_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = cros_ec_sysfs_probe,
.remove_new = cros_ec_sysfs_remove,
+ .id_table = cros_ec_sysfs_id,
};
module_platform_driver(cros_ec_sysfs_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Expose the ChromeOS EC through sysfs");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/cros_ec_uart.c b/drivers/platform/chrome/cros_ec_uart.c
index 8ea867c2a01a..62bc24f6dcc7 100644
--- a/drivers/platform/chrome/cros_ec_uart.c
+++ b/drivers/platform/chrome/cros_ec_uart.c
@@ -263,12 +263,6 @@ static int cros_ec_uart_probe(struct serdev_device *serdev)
if (!ec_dev)
return -ENOMEM;
- ret = devm_serdev_device_open(dev, serdev);
- if (ret) {
- dev_err(dev, "Unable to open UART device");
- return ret;
- }
-
serdev_device_set_drvdata(serdev, ec_dev);
init_waitqueue_head(&ec_uart->response.wait_queue);
@@ -280,14 +274,6 @@ static int cros_ec_uart_probe(struct serdev_device *serdev)
return ret;
}
- ret = serdev_device_set_baudrate(serdev, ec_uart->baudrate);
- if (ret < 0) {
- dev_err(dev, "Failed to set up host baud rate (%d)", ret);
- return ret;
- }
-
- serdev_device_set_flow_control(serdev, ec_uart->flowcontrol);
-
/* Initialize ec_dev for cros_ec */
ec_dev->phys_name = dev_name(dev);
ec_dev->dev = dev;
@@ -301,6 +287,20 @@ static int cros_ec_uart_probe(struct serdev_device *serdev)
serdev_device_set_client_ops(serdev, &cros_ec_uart_client_ops);
+ ret = devm_serdev_device_open(dev, serdev);
+ if (ret) {
+ dev_err(dev, "Unable to open UART device");
+ return ret;
+ }
+
+ ret = serdev_device_set_baudrate(serdev, ec_uart->baudrate);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set up host baud rate (%d)", ret);
+ return ret;
+ }
+
+ serdev_device_set_flow_control(serdev, ec_uart->flowcontrol);
+
return cros_ec_register(ec_dev);
}
diff --git a/drivers/platform/chrome/cros_ec_vbc.c b/drivers/platform/chrome/cros_ec_vbc.c
index 274ea0c64b33..787a19db4911 100644
--- a/drivers/platform/chrome/cros_ec_vbc.c
+++ b/drivers/platform/chrome/cros_ec_vbc.c
@@ -6,6 +6,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
@@ -133,16 +134,22 @@ static void cros_ec_vbc_remove(struct platform_device *pd)
&cros_ec_vbc_attr_group);
}
+static const struct platform_device_id cros_ec_vbc_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cros_ec_vbc_id);
+
static struct platform_driver cros_ec_vbc_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = cros_ec_vbc_probe,
.remove_new = cros_ec_vbc_remove,
+ .id_table = cros_ec_vbc_id,
};
module_platform_driver(cros_ec_vbc_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Expose the vboot context nvram to userspace");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/cros_hps_i2c.c b/drivers/platform/chrome/cros_hps_i2c.c
index b31313080332..dd14957ec39f 100644
--- a/drivers/platform/chrome/cros_hps_i2c.c
+++ b/drivers/platform/chrome/cros_hps_i2c.c
@@ -126,7 +126,7 @@ static int hps_resume(struct device *dev)
hps_set_power(hps, true);
return 0;
}
-static UNIVERSAL_DEV_PM_OPS(hps_pm_ops, hps_suspend, hps_resume, NULL);
+static DEFINE_RUNTIME_DEV_PM_OPS(hps_pm_ops, hps_suspend, hps_resume, NULL);
static const struct i2c_device_id hps_i2c_id[] = {
{ "cros-hps", 0 },
@@ -148,7 +148,7 @@ static struct i2c_driver hps_i2c_driver = {
.id_table = hps_i2c_id,
.driver = {
.name = "cros-hps",
- .pm = &hps_pm_ops,
+ .pm = pm_ptr(&hps_pm_ops),
.acpi_match_table = ACPI_PTR(hps_acpi_id),
},
};
diff --git a/drivers/platform/chrome/cros_kbd_led_backlight.c b/drivers/platform/chrome/cros_kbd_led_backlight.c
index 793fd3f1015d..b83e4f328620 100644
--- a/drivers/platform/chrome/cros_kbd_led_backlight.c
+++ b/drivers/platform/chrome/cros_kbd_led_backlight.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/leds.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_data/cros_ec_commands.h>
@@ -247,17 +248,23 @@ static const struct of_device_id keyboard_led_of_match[] = {
MODULE_DEVICE_TABLE(of, keyboard_led_of_match);
#endif
+static const struct platform_device_id keyboard_led_id[] = {
+ { "cros-keyboard-leds", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, keyboard_led_id);
+
static struct platform_driver keyboard_led_driver = {
.driver = {
- .name = "chromeos-keyboard-leds",
+ .name = "cros-keyboard-leds",
.acpi_match_table = ACPI_PTR(keyboard_led_acpi_match),
.of_match_table = of_match_ptr(keyboard_led_of_match),
},
.probe = keyboard_led_probe,
+ .id_table = keyboard_led_id,
};
module_platform_driver(keyboard_led_driver);
MODULE_AUTHOR("Simon Que <sque@chromium.org>");
MODULE_DESCRIPTION("ChromeOS Keyboard backlight LED Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:chromeos-keyboard-leds");
diff --git a/drivers/platform/chrome/cros_usbpd_logger.c b/drivers/platform/chrome/cros_usbpd_logger.c
index f618757f8b32..930c2f47269f 100644
--- a/drivers/platform/chrome/cros_usbpd_logger.c
+++ b/drivers/platform/chrome/cros_usbpd_logger.c
@@ -7,6 +7,7 @@
#include <linux/ktime.h>
#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
@@ -249,6 +250,12 @@ static int __maybe_unused cros_usbpd_logger_suspend(struct device *dev)
static SIMPLE_DEV_PM_OPS(cros_usbpd_logger_pm_ops, cros_usbpd_logger_suspend,
cros_usbpd_logger_resume);
+static const struct platform_device_id cros_usbpd_logger_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cros_usbpd_logger_id);
+
static struct platform_driver cros_usbpd_logger_driver = {
.driver = {
.name = DRV_NAME,
@@ -256,10 +263,10 @@ static struct platform_driver cros_usbpd_logger_driver = {
},
.probe = cros_usbpd_logger_probe,
.remove_new = cros_usbpd_logger_remove,
+ .id_table = cros_usbpd_logger_id,
};
module_platform_driver(cros_usbpd_logger_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Logging driver for ChromeOS EC USBPD Charger.");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/cros_usbpd_notify.c b/drivers/platform/chrome/cros_usbpd_notify.c
index aacad022f21d..c83f81d86483 100644
--- a/drivers/platform/chrome/cros_usbpd_notify.c
+++ b/drivers/platform/chrome/cros_usbpd_notify.c
@@ -6,6 +6,7 @@
*/
#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_data/cros_usbpd_notify.h>
@@ -218,12 +219,19 @@ static void cros_usbpd_notify_remove_plat(struct platform_device *pdev)
&pdnotify->nb);
}
+static const struct platform_device_id cros_usbpd_notify_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cros_usbpd_notify_id);
+
static struct platform_driver cros_usbpd_notify_plat_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = cros_usbpd_notify_probe_plat,
.remove_new = cros_usbpd_notify_remove_plat,
+ .id_table = cros_usbpd_notify_id,
};
static int __init cros_usbpd_notify_init(void)
@@ -258,4 +266,3 @@ module_exit(cros_usbpd_notify_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ChromeOS power delivery notifier device");
MODULE_AUTHOR("Jon Flatley <jflat@chromium.org>");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/wilco_ec/Kconfig b/drivers/platform/chrome/wilco_ec/Kconfig
index 49e8530ca0ac..d1648fb099ac 100644
--- a/drivers/platform/chrome/wilco_ec/Kconfig
+++ b/drivers/platform/chrome/wilco_ec/Kconfig
@@ -3,6 +3,7 @@ config WILCO_EC
tristate "ChromeOS Wilco Embedded Controller"
depends on X86 || COMPILE_TEST
depends on ACPI && CROS_EC_LPC && LEDS_CLASS
+ depends on HAS_IOPORT
help
If you say Y here, you get support for talking to the ChromeOS
Wilco EC over an eSPI bus. This uses a simple byte-level protocol
diff --git a/drivers/platform/chrome/wilco_ec/core.c b/drivers/platform/chrome/wilco_ec/core.c
index 9b59a1bed286..3e6b6cd81a9b 100644
--- a/drivers/platform/chrome/wilco_ec/core.c
+++ b/drivers/platform/chrome/wilco_ec/core.c
@@ -10,6 +10,7 @@
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/ioport.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/wilco-ec.h>
#include <linux/platform_device.h>
@@ -150,6 +151,12 @@ static const struct acpi_device_id wilco_ec_acpi_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, wilco_ec_acpi_device_ids);
+static const struct platform_device_id wilco_ec_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, wilco_ec_id);
+
static struct platform_driver wilco_ec_driver = {
.driver = {
.name = DRV_NAME,
@@ -157,6 +164,7 @@ static struct platform_driver wilco_ec_driver = {
},
.probe = wilco_ec_probe,
.remove_new = wilco_ec_remove,
+ .id_table = wilco_ec_id,
};
module_platform_driver(wilco_ec_driver);
@@ -165,4 +173,3 @@ MODULE_AUTHOR("Nick Crews <ncrews@chromium.org>");
MODULE_AUTHOR("Duncan Laurie <dlaurie@chromium.org>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("ChromeOS Wilco Embedded Controller driver");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/wilco_ec/debugfs.c b/drivers/platform/chrome/wilco_ec/debugfs.c
index 93c11f81ca45..983f2fa44ba5 100644
--- a/drivers/platform/chrome/wilco_ec/debugfs.c
+++ b/drivers/platform/chrome/wilco_ec/debugfs.c
@@ -10,6 +10,7 @@
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/wilco-ec.h>
#include <linux/platform_device.h>
@@ -265,17 +266,23 @@ static void wilco_ec_debugfs_remove(struct platform_device *pdev)
debugfs_remove_recursive(debug_info->dir);
}
+static const struct platform_device_id wilco_ec_debugfs_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, wilco_ec_debugfs_id);
+
static struct platform_driver wilco_ec_debugfs_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = wilco_ec_debugfs_probe,
.remove_new = wilco_ec_debugfs_remove,
+ .id_table = wilco_ec_debugfs_id,
};
module_platform_driver(wilco_ec_debugfs_driver);
-MODULE_ALIAS("platform:" DRV_NAME);
MODULE_AUTHOR("Nick Crews <ncrews@chromium.org>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Wilco EC debugfs driver");
diff --git a/drivers/platform/chrome/wilco_ec/event.c b/drivers/platform/chrome/wilco_ec/event.c
index 13291fb4214e..bd1fb53ba028 100644
--- a/drivers/platform/chrome/wilco_ec/event.c
+++ b/drivers/platform/chrome/wilco_ec/event.c
@@ -523,7 +523,6 @@ static struct acpi_driver event_driver = {
.notify = event_device_notify,
.remove = event_device_remove,
},
- .owner = THIS_MODULE,
};
static int __init event_module_init(void)
@@ -575,4 +574,3 @@ module_exit(event_module_exit);
MODULE_AUTHOR("Nick Crews <ncrews@chromium.org>");
MODULE_DESCRIPTION("Wilco EC ACPI event driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/wilco_ec/sysfs.c b/drivers/platform/chrome/wilco_ec/sysfs.c
index 893c59dde32a..d44c43559621 100644
--- a/drivers/platform/chrome/wilco_ec/sysfs.c
+++ b/drivers/platform/chrome/wilco_ec/sysfs.c
@@ -192,7 +192,7 @@ static ssize_t usb_charge_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", rs.val);
+ return sysfs_emit(buf, "%d\n", rs.val);
}
static ssize_t usb_charge_store(struct device *dev,
diff --git a/drivers/platform/chrome/wilco_ec/telemetry.c b/drivers/platform/chrome/wilco_ec/telemetry.c
index b7c616f3d179..21d4cbbb009a 100644
--- a/drivers/platform/chrome/wilco_ec/telemetry.c
+++ b/drivers/platform/chrome/wilco_ec/telemetry.c
@@ -30,6 +30,7 @@
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/fs.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/wilco-ec.h>
#include <linux/platform_device.h>
@@ -409,12 +410,19 @@ static void telem_device_remove(struct platform_device *pdev)
put_device(&dev_data->dev);
}
+static const struct platform_device_id telem_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, telem_id);
+
static struct platform_driver telem_driver = {
.probe = telem_device_probe,
.remove_new = telem_device_remove,
.driver = {
.name = DRV_NAME,
},
+ .id_table = telem_id,
};
static int __init telem_module_init(void)
@@ -466,4 +474,3 @@ module_exit(telem_module_exit);
MODULE_AUTHOR("Nick Crews <ncrews@chromium.org>");
MODULE_DESCRIPTION("Wilco EC telemetry driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
index ba550eaa06fc..797d0645bd77 100644
--- a/drivers/platform/surface/aggregator/core.c
+++ b/drivers/platform/surface/aggregator/core.c
@@ -618,15 +618,17 @@ static const struct acpi_gpio_mapping ssam_acpi_gpios[] = {
static int ssam_serial_hub_probe(struct serdev_device *serdev)
{
- struct acpi_device *ssh = ACPI_COMPANION(&serdev->dev);
+ struct device *dev = &serdev->dev;
+ struct acpi_device *ssh = ACPI_COMPANION(dev);
struct ssam_controller *ctrl;
acpi_status astatus;
int status;
- if (gpiod_count(&serdev->dev, NULL) < 0)
- return -ENODEV;
+ status = gpiod_count(dev, NULL);
+ if (status < 0)
+ return dev_err_probe(dev, status, "no GPIO found\n");
- status = devm_acpi_dev_add_driver_gpios(&serdev->dev, ssam_acpi_gpios);
+ status = devm_acpi_dev_add_driver_gpios(dev, ssam_acpi_gpios);
if (status)
return status;
@@ -637,8 +639,10 @@ static int ssam_serial_hub_probe(struct serdev_device *serdev)
/* Initialize controller. */
status = ssam_controller_init(ctrl, serdev);
- if (status)
+ if (status) {
+ dev_err_probe(dev, status, "failed to initialize ssam controller\n");
goto err_ctrl_init;
+ }
ssam_controller_lock(ctrl);
@@ -646,12 +650,14 @@ static int ssam_serial_hub_probe(struct serdev_device *serdev)
serdev_device_set_drvdata(serdev, ctrl);
serdev_device_set_client_ops(serdev, &ssam_serdev_ops);
status = serdev_device_open(serdev);
- if (status)
+ if (status) {
+ dev_err_probe(dev, status, "failed to open serdev device\n");
goto err_devopen;
+ }
astatus = ssam_serdev_setup_via_acpi(ssh->handle, serdev);
if (ACPI_FAILURE(astatus)) {
- status = -ENXIO;
+ status = dev_err_probe(dev, -ENXIO, "failed to setup serdev\n");
goto err_devinit;
}
@@ -667,25 +673,33 @@ static int ssam_serial_hub_probe(struct serdev_device *serdev)
* states.
*/
status = ssam_log_firmware_version(ctrl);
- if (status)
+ if (status) {
+ dev_err_probe(dev, status, "failed to get firmware version\n");
goto err_initrq;
+ }
status = ssam_ctrl_notif_d0_entry(ctrl);
- if (status)
+ if (status) {
+ dev_err_probe(dev, status, "D0-entry notification failed\n");
goto err_initrq;
+ }
status = ssam_ctrl_notif_display_on(ctrl);
- if (status)
+ if (status) {
+ dev_err_probe(dev, status, "display-on notification failed\n");
goto err_initrq;
+ }
- status = sysfs_create_group(&serdev->dev.kobj, &ssam_sam_group);
+ status = sysfs_create_group(&dev->kobj, &ssam_sam_group);
if (status)
goto err_initrq;
/* Set up IRQ. */
status = ssam_irq_setup(ctrl);
- if (status)
+ if (status) {
+ dev_err_probe(dev, status, "failed to setup IRQ\n");
goto err_irq;
+ }
/* Finally, set main controller reference. */
status = ssam_try_set_controller(ctrl);
@@ -702,7 +716,7 @@ static int ssam_serial_hub_probe(struct serdev_device *serdev)
* resumed. In short, this causes some spurious unwanted wake-ups.
* For now let's thus default power/wakeup to false.
*/
- device_set_wakeup_capable(&serdev->dev, true);
+ device_set_wakeup_capable(dev, true);
acpi_dev_clear_dependencies(ssh);
return 0;
@@ -710,7 +724,7 @@ static int ssam_serial_hub_probe(struct serdev_device *serdev)
err_mainref:
ssam_irq_free(ctrl);
err_irq:
- sysfs_remove_group(&serdev->dev.kobj, &ssam_sam_group);
+ sysfs_remove_group(&dev->kobj, &ssam_sam_group);
err_initrq:
ssam_controller_lock(ctrl);
ssam_controller_shutdown(ctrl);
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
index 035d6b4105cd..1c4d74db08c9 100644
--- a/drivers/platform/surface/surface_aggregator_registry.c
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -68,12 +68,32 @@ static const struct software_node ssam_node_bat_sb3base = {
.parent = &ssam_node_hub_base,
};
-/* Platform profile / performance-mode device. */
-static const struct software_node ssam_node_tmp_pprof = {
+/* Platform profile / performance-mode device without a fan. */
+static const struct software_node ssam_node_tmp_perf_profile = {
.name = "ssam:01:03:01:00:01",
.parent = &ssam_node_root,
};
+/* Platform profile / performance-mode device with a fan, such that
+ * the fan controller profile can also be switched.
+ */
+static const struct property_entry ssam_node_tmp_perf_profile_has_fan[] = {
+ PROPERTY_ENTRY_BOOL("has_fan"),
+ { }
+};
+
+static const struct software_node ssam_node_tmp_perf_profile_with_fan = {
+ .name = "ssam:01:03:01:00:01",
+ .parent = &ssam_node_root,
+ .properties = ssam_node_tmp_perf_profile_has_fan,
+};
+
+/* Thermal sensors. */
+static const struct software_node ssam_node_tmp_sensors = {
+ .name = "ssam:01:03:01:00:02",
+ .parent = &ssam_node_root,
+};
+
/* Fan speed function. */
static const struct software_node ssam_node_fan_speed = {
.name = "ssam:01:05:01:01:01",
@@ -208,7 +228,7 @@ static const struct software_node ssam_node_pos_tablet_switch = {
*/
static const struct software_node *ssam_node_group_gen5[] = {
&ssam_node_root,
- &ssam_node_tmp_pprof,
+ &ssam_node_tmp_perf_profile,
NULL,
};
@@ -219,7 +239,7 @@ static const struct software_node *ssam_node_group_sb3[] = {
&ssam_node_bat_ac,
&ssam_node_bat_main,
&ssam_node_bat_sb3base,
- &ssam_node_tmp_pprof,
+ &ssam_node_tmp_perf_profile,
&ssam_node_bas_dtx,
&ssam_node_hid_base_keyboard,
&ssam_node_hid_base_touchpad,
@@ -233,7 +253,7 @@ static const struct software_node *ssam_node_group_sl3[] = {
&ssam_node_root,
&ssam_node_bat_ac,
&ssam_node_bat_main,
- &ssam_node_tmp_pprof,
+ &ssam_node_tmp_perf_profile,
&ssam_node_hid_main_keyboard,
&ssam_node_hid_main_touchpad,
&ssam_node_hid_main_iid5,
@@ -245,7 +265,7 @@ static const struct software_node *ssam_node_group_sl5[] = {
&ssam_node_root,
&ssam_node_bat_ac,
&ssam_node_bat_main,
- &ssam_node_tmp_pprof,
+ &ssam_node_tmp_perf_profile,
&ssam_node_hid_main_keyboard,
&ssam_node_hid_main_touchpad,
&ssam_node_hid_main_iid5,
@@ -258,7 +278,7 @@ static const struct software_node *ssam_node_group_sls[] = {
&ssam_node_root,
&ssam_node_bat_ac,
&ssam_node_bat_main,
- &ssam_node_tmp_pprof,
+ &ssam_node_tmp_perf_profile,
&ssam_node_pos_tablet_switch,
&ssam_node_hid_sam_keyboard,
&ssam_node_hid_sam_penstash,
@@ -274,7 +294,7 @@ static const struct software_node *ssam_node_group_slg1[] = {
&ssam_node_root,
&ssam_node_bat_ac,
&ssam_node_bat_main,
- &ssam_node_tmp_pprof,
+ &ssam_node_tmp_perf_profile,
NULL,
};
@@ -283,7 +303,7 @@ static const struct software_node *ssam_node_group_sp7[] = {
&ssam_node_root,
&ssam_node_bat_ac,
&ssam_node_bat_main,
- &ssam_node_tmp_pprof,
+ &ssam_node_tmp_perf_profile,
NULL,
};
@@ -293,7 +313,7 @@ static const struct software_node *ssam_node_group_sp8[] = {
&ssam_node_hub_kip,
&ssam_node_bat_ac,
&ssam_node_bat_main,
- &ssam_node_tmp_pprof,
+ &ssam_node_tmp_perf_profile,
&ssam_node_kip_tablet_switch,
&ssam_node_hid_kip_keyboard,
&ssam_node_hid_kip_penstash,
@@ -310,7 +330,8 @@ static const struct software_node *ssam_node_group_sp9[] = {
&ssam_node_hub_kip,
&ssam_node_bat_ac,
&ssam_node_bat_main,
- &ssam_node_tmp_pprof,
+ &ssam_node_tmp_perf_profile_with_fan,
+ &ssam_node_tmp_sensors,
&ssam_node_fan_speed,
&ssam_node_pos_tablet_switch,
&ssam_node_hid_kip_keyboard,
diff --git a/drivers/platform/surface/surface_platform_profile.c b/drivers/platform/surface/surface_platform_profile.c
index a5a3941b3f43..3de864bc6610 100644
--- a/drivers/platform/surface/surface_platform_profile.c
+++ b/drivers/platform/surface/surface_platform_profile.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Surface Platform Profile / Performance Mode driver for Surface System
- * Aggregator Module (thermal subsystem).
+ * Aggregator Module (thermal and fan subsystem).
*
* Copyright (C) 2021-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
@@ -14,6 +14,7 @@
#include <linux/surface_aggregator/device.h>
+// Enum for the platform performance profile sent to the TMP module.
enum ssam_tmp_profile {
SSAM_TMP_PROFILE_NORMAL = 1,
SSAM_TMP_PROFILE_BATTERY_SAVER = 2,
@@ -21,15 +22,26 @@ enum ssam_tmp_profile {
SSAM_TMP_PROFILE_BEST_PERFORMANCE = 4,
};
+// Enum for the fan profile sent to the FAN module. This fan profile is
+// only sent to the EC if the 'has_fan' property is set. The integers are
+// not a typo, they differ from the performance profile indices.
+enum ssam_fan_profile {
+ SSAM_FAN_PROFILE_NORMAL = 2,
+ SSAM_FAN_PROFILE_BATTERY_SAVER = 1,
+ SSAM_FAN_PROFILE_BETTER_PERFORMANCE = 3,
+ SSAM_FAN_PROFILE_BEST_PERFORMANCE = 4,
+};
+
struct ssam_tmp_profile_info {
__le32 profile;
__le16 unknown1;
__le16 unknown2;
} __packed;
-struct ssam_tmp_profile_device {
+struct ssam_platform_profile_device {
struct ssam_device *sdev;
struct platform_profile_handler handler;
+ bool has_fan;
};
SSAM_DEFINE_SYNC_REQUEST_CL_R(__ssam_tmp_profile_get, struct ssam_tmp_profile_info, {
@@ -42,6 +54,13 @@ SSAM_DEFINE_SYNC_REQUEST_CL_W(__ssam_tmp_profile_set, __le32, {
.command_id = 0x03,
});
+SSAM_DEFINE_SYNC_REQUEST_W(__ssam_fan_profile_set, u8, {
+ .target_category = SSAM_SSH_TC_FAN,
+ .target_id = SSAM_SSH_TID_SAM,
+ .command_id = 0x0e,
+ .instance_id = 0x01,
+});
+
static int ssam_tmp_profile_get(struct ssam_device *sdev, enum ssam_tmp_profile *p)
{
struct ssam_tmp_profile_info info;
@@ -57,12 +76,19 @@ static int ssam_tmp_profile_get(struct ssam_device *sdev, enum ssam_tmp_profile
static int ssam_tmp_profile_set(struct ssam_device *sdev, enum ssam_tmp_profile p)
{
- __le32 profile_le = cpu_to_le32(p);
+ const __le32 profile_le = cpu_to_le32(p);
return ssam_retry(__ssam_tmp_profile_set, sdev, &profile_le);
}
-static int convert_ssam_to_profile(struct ssam_device *sdev, enum ssam_tmp_profile p)
+static int ssam_fan_profile_set(struct ssam_device *sdev, enum ssam_fan_profile p)
+{
+ const u8 profile = p;
+
+ return ssam_retry(__ssam_fan_profile_set, sdev->ctrl, &profile);
+}
+
+static int convert_ssam_tmp_to_profile(struct ssam_device *sdev, enum ssam_tmp_profile p)
{
switch (p) {
case SSAM_TMP_PROFILE_NORMAL:
@@ -83,7 +109,8 @@ static int convert_ssam_to_profile(struct ssam_device *sdev, enum ssam_tmp_profi
}
}
-static int convert_profile_to_ssam(struct ssam_device *sdev, enum platform_profile_option p)
+
+static int convert_profile_to_ssam_tmp(struct ssam_device *sdev, enum platform_profile_option p)
{
switch (p) {
case PLATFORM_PROFILE_LOW_POWER:
@@ -105,20 +132,42 @@ static int convert_profile_to_ssam(struct ssam_device *sdev, enum platform_profi
}
}
+static int convert_profile_to_ssam_fan(struct ssam_device *sdev, enum platform_profile_option p)
+{
+ switch (p) {
+ case PLATFORM_PROFILE_LOW_POWER:
+ return SSAM_FAN_PROFILE_BATTERY_SAVER;
+
+ case PLATFORM_PROFILE_BALANCED:
+ return SSAM_FAN_PROFILE_NORMAL;
+
+ case PLATFORM_PROFILE_BALANCED_PERFORMANCE:
+ return SSAM_FAN_PROFILE_BETTER_PERFORMANCE;
+
+ case PLATFORM_PROFILE_PERFORMANCE:
+ return SSAM_FAN_PROFILE_BEST_PERFORMANCE;
+
+ default:
+ /* This should have already been caught by platform_profile_store(). */
+ WARN(true, "unsupported platform profile");
+ return -EOPNOTSUPP;
+ }
+}
+
static int ssam_platform_profile_get(struct platform_profile_handler *pprof,
enum platform_profile_option *profile)
{
- struct ssam_tmp_profile_device *tpd;
+ struct ssam_platform_profile_device *tpd;
enum ssam_tmp_profile tp;
int status;
- tpd = container_of(pprof, struct ssam_tmp_profile_device, handler);
+ tpd = container_of(pprof, struct ssam_platform_profile_device, handler);
status = ssam_tmp_profile_get(tpd->sdev, &tp);
if (status)
return status;
- status = convert_ssam_to_profile(tpd->sdev, tp);
+ status = convert_ssam_tmp_to_profile(tpd->sdev, tp);
if (status < 0)
return status;
@@ -129,21 +178,32 @@ static int ssam_platform_profile_get(struct platform_profile_handler *pprof,
static int ssam_platform_profile_set(struct platform_profile_handler *pprof,
enum platform_profile_option profile)
{
- struct ssam_tmp_profile_device *tpd;
+ struct ssam_platform_profile_device *tpd;
int tp;
- tpd = container_of(pprof, struct ssam_tmp_profile_device, handler);
+ tpd = container_of(pprof, struct ssam_platform_profile_device, handler);
+
+ tp = convert_profile_to_ssam_tmp(tpd->sdev, profile);
+ if (tp < 0)
+ return tp;
- tp = convert_profile_to_ssam(tpd->sdev, profile);
+ tp = ssam_tmp_profile_set(tpd->sdev, tp);
if (tp < 0)
return tp;
- return ssam_tmp_profile_set(tpd->sdev, tp);
+ if (tpd->has_fan) {
+ tp = convert_profile_to_ssam_fan(tpd->sdev, profile);
+ if (tp < 0)
+ return tp;
+ tp = ssam_fan_profile_set(tpd->sdev, tp);
+ }
+
+ return tp;
}
static int surface_platform_profile_probe(struct ssam_device *sdev)
{
- struct ssam_tmp_profile_device *tpd;
+ struct ssam_platform_profile_device *tpd;
tpd = devm_kzalloc(&sdev->dev, sizeof(*tpd), GFP_KERNEL);
if (!tpd)
@@ -154,6 +214,8 @@ static int surface_platform_profile_probe(struct ssam_device *sdev)
tpd->handler.profile_get = ssam_platform_profile_get;
tpd->handler.profile_set = ssam_platform_profile_set;
+ tpd->has_fan = device_property_read_bool(&sdev->dev, "has_fan");
+
set_bit(PLATFORM_PROFILE_LOW_POWER, tpd->handler.choices);
set_bit(PLATFORM_PROFILE_BALANCED, tpd->handler.choices);
set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, tpd->handler.choices);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 7e9251fc3341..0ec952b5d03e 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -133,6 +133,17 @@ config YOGABOOK
To compile this driver as a module, choose M here: the module will
be called lenovo-yogabook.
+config YT2_1380
+ tristate "Lenovo Yoga Tablet 2 1380 fast charge driver"
+ depends on SERIAL_DEV_BUS
+ depends on ACPI
+ help
+ Say Y here to enable support for the custom fast charging protocol
+ found on the Lenovo Yoga Tablet 2 1380F / 1380L models.
+
+ To compile this driver as a module, choose M here: the module will
+ be called lenovo-yogabook.
+
config ACERHDF
tristate "Acer Aspire One temperature and fan driver"
depends on ACPI && THERMAL
@@ -642,6 +653,30 @@ config THINKPAD_LMI
source "drivers/platform/x86/intel/Kconfig"
+config ACPI_QUICKSTART
+ tristate "ACPI Quickstart button driver"
+ depends on ACPI
+ depends on INPUT
+ select INPUT_SPARSEKMAP
+ help
+ This driver adds support for ACPI quickstart button (PNP0C32) devices.
+ The button emits a manufacturer-specific key value when pressed, so
+ userspace has to map this value to a standard key code.
+
+ To compile this driver as a module, choose M here: the module will be
+ called quickstart.
+
+config MEEGOPAD_ANX7428
+ tristate "MeeGoPad ANX7428 Type-C Switch"
+ depends on ACPI && GPIOLIB && I2C
+ help
+ Some MeeGoPad top-set boxes have an ANX7428 Type-C Switch for
+ USB3.1 Gen 1 and DisplayPort over Type-C alternate mode support.
+
+ This driver takes care of powering on the ANX7428 on supported
+ MeeGoPad top-set boxes. After this the ANX7428 takes care of Type-C
+ connector orientation and PD alternate mode switching autonomously.
+
config MSI_EC
tristate "MSI EC Extras"
depends on ACPI
@@ -685,6 +720,17 @@ config MSI_WMI
To compile this driver as a module, choose M here: the module will
be called msi-wmi.
+config MSI_WMI_PLATFORM
+ tristate "MSI WMI Platform features"
+ depends on ACPI_WMI
+ depends on HWMON
+ help
+ Say Y here if you want to have support for WMI-based platform features
+ like fan sensor access on MSI machines.
+
+ To compile this driver as a module, choose M here: the module will
+ be called msi-wmi-platform.
+
config XO15_EBOOK
tristate "OLPC XO-1.5 ebook switch"
depends on OLPC || COMPILE_TEST
@@ -996,6 +1042,18 @@ config INSPUR_PLATFORM_PROFILE
To compile this driver as a module, choose M here: the module
will be called inspur-platform-profile.
+config LENOVO_WMI_CAMERA
+ tristate "Lenovo WMI Camera Button driver"
+ depends on ACPI_WMI
+ depends on INPUT
+ help
+ This driver provides support for Lenovo camera button. The Camera
+ button is a GPIO device. This driver receives ACPI notifications when
+ the camera button is switched on/off.
+
+ To compile this driver as a module, choose M here: the module
+ will be called lenovo-wmi-camera.
+
source "drivers/platform/x86/x86-android-tablets/Kconfig"
config FW_ATTR_CLASS
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 1de432e8861e..e1b142947067 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -66,14 +66,23 @@ obj-$(CONFIG_SENSORS_HDAPS) += hdaps.o
obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
obj-$(CONFIG_THINKPAD_LMI) += think-lmi.o
obj-$(CONFIG_YOGABOOK) += lenovo-yogabook.o
+obj-$(CONFIG_YT2_1380) += lenovo-yoga-tab2-pro-1380-fastcharger.o
+obj-$(CONFIG_LENOVO_WMI_CAMERA) += lenovo-wmi-camera.o
# Intel
obj-y += intel/
+# Microsoft
+obj-$(CONFIG_ACPI_QUICKSTART) += quickstart.o
+
+# MeeGoPad
+obj-$(CONFIG_MEEGOPAD_ANX7428) += meegopad_anx7428.o
+
# MSI
obj-$(CONFIG_MSI_EC) += msi-ec.o
obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
obj-$(CONFIG_MSI_WMI) += msi-wmi.o
+obj-$(CONFIG_MSI_WMI_PLATFORM) += msi-wmi-platform.o
# OLPC
obj-$(CONFIG_XO15_EBOOK) += xo15-ebook.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index ee2e164f86b9..38c932df6446 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -598,6 +598,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
.driver_data = &quirk_acer_predator_v4,
},
{
+ .callback = dmi_matched,
+ .ident = "Acer Predator PH18-71",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Predator PH18-71"),
+ },
+ .driver_data = &quirk_acer_predator_v4,
+ },
+ {
.callback = set_force_caps,
.ident = "Acer Aspire Switch 10E SW3-016",
.matches = {
diff --git a/drivers/platform/x86/amd/hsmp.c b/drivers/platform/x86/amd/hsmp.c
index 1927be901108..d84ea66eecc6 100644
--- a/drivers/platform/x86/amd/hsmp.c
+++ b/drivers/platform/x86/amd/hsmp.c
@@ -693,7 +693,7 @@ static int hsmp_create_non_acpi_sysfs_if(struct device *dev)
hsmp_create_attr_list(attr_grp, dev, i);
}
- return devm_device_add_groups(dev, hsmp_attr_grps);
+ return device_add_groups(dev, hsmp_attr_grps);
}
static int hsmp_create_acpi_sysfs_if(struct device *dev)
diff --git a/drivers/platform/x86/amd/pmc/Kconfig b/drivers/platform/x86/amd/pmc/Kconfig
index 883c0a95ac0c..94f9563d8be7 100644
--- a/drivers/platform/x86/amd/pmc/Kconfig
+++ b/drivers/platform/x86/amd/pmc/Kconfig
@@ -18,3 +18,18 @@ config AMD_PMC
If you choose to compile this driver as a module the module will be
called amd-pmc.
+
+config AMD_MP2_STB
+ bool "AMD SoC MP2 STB function"
+ depends on AMD_PMC
+ default AMD_PMC
+ help
+ AMD MP2 STB function provides a data buffer used to log debug
+ information about the system execution during S2Idle suspend/resume.
+ A data buffer known as the STB (Smart Trace Buffer) is a circular
+ buffer which is a low-level log for the SoC which is used to debug
+ any hangs/stalls during S2Idle suspend/resume.
+
+ Creates debugfs to get STB, a userspace daemon can access STB log of
+ last S2Idle suspend/resume which can help to debug if hangs/stalls
+ during S2Idle suspend/resume.
diff --git a/drivers/platform/x86/amd/pmc/Makefile b/drivers/platform/x86/amd/pmc/Makefile
index 4aaa29d351c9..f1d9ab19d24c 100644
--- a/drivers/platform/x86/amd/pmc/Makefile
+++ b/drivers/platform/x86/amd/pmc/Makefile
@@ -6,3 +6,4 @@
amd-pmc-objs := pmc.o pmc-quirks.o
obj-$(CONFIG_AMD_PMC) += amd-pmc.o
+amd-pmc-$(CONFIG_AMD_MP2_STB) += mp2_stb.o
diff --git a/drivers/platform/x86/amd/pmc/mp2_stb.c b/drivers/platform/x86/amd/pmc/mp2_stb.c
new file mode 100644
index 000000000000..9775ddc1b27a
--- /dev/null
+++ b/drivers/platform/x86/amd/pmc/mp2_stb.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD MP2 STB layer
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include <linux/sizes.h>
+#include <linux/time.h>
+
+#include "pmc.h"
+
+#define VALID_MSG 0xA
+#define VALID_RESPONSE 2
+
+#define AMD_C2P_MSG0 0x10500
+#define AMD_C2P_MSG1 0x10504
+#define AMD_P2C_MSG0 0x10680
+#define AMD_P2C_MSG1 0x10684
+
+#define MP2_RESP_SLEEP_US 500
+#define MP2_RESP_TIMEOUT_US (1600 * USEC_PER_MSEC)
+
+#define MP2_STB_DATA_LEN_2KB 1
+#define MP2_STB_DATA_LEN_16KB 4
+
+#define MP2_MMIO_BAR 2
+
+struct mp2_cmd_base {
+ union {
+ u32 ul;
+ struct {
+ u32 cmd_id : 4;
+ u32 intr_disable : 1;
+ u32 is_dma_used : 1;
+ u32 rsvd : 26;
+ } field;
+ };
+};
+
+struct mp2_cmd_response {
+ union {
+ u32 resp;
+ struct {
+ u32 cmd_id : 4;
+ u32 status : 4;
+ u32 response : 4;
+ u32 rsvd2 : 20;
+ } field;
+ };
+};
+
+struct mp2_stb_data_valid {
+ union {
+ u32 data_valid;
+ struct {
+ u32 valid : 16;
+ u32 length : 16;
+ } val;
+ };
+};
+
+static int amd_mp2_wait_response(struct amd_mp2_dev *mp2, u8 cmd_id, u32 command_sts)
+{
+ struct mp2_cmd_response cmd_resp;
+
+ if (!readl_poll_timeout(mp2->mmio + AMD_P2C_MSG0, cmd_resp.resp,
+ (cmd_resp.field.response == 0x0 &&
+ cmd_resp.field.status == command_sts &&
+ cmd_resp.field.cmd_id == cmd_id), MP2_RESP_SLEEP_US,
+ MP2_RESP_TIMEOUT_US))
+ return cmd_resp.field.status;
+
+ return -ETIMEDOUT;
+}
+
+static void amd_mp2_stb_send_cmd(struct amd_mp2_dev *mp2, u8 cmd_id, bool is_dma_used)
+{
+ struct mp2_cmd_base cmd_base;
+
+ cmd_base.ul = 0;
+ cmd_base.field.cmd_id = cmd_id;
+ cmd_base.field.intr_disable = 1;
+ cmd_base.field.is_dma_used = is_dma_used;
+
+ writeq(mp2->dma_addr, mp2->mmio + AMD_C2P_MSG1);
+ writel(cmd_base.ul, mp2->mmio + AMD_C2P_MSG0);
+}
+
+static int amd_mp2_stb_region(struct amd_mp2_dev *mp2)
+{
+ struct device *dev = &mp2->pdev->dev;
+ unsigned int len = mp2->stb_len;
+
+ if (!mp2->stbdata) {
+ mp2->vslbase = dmam_alloc_coherent(dev, len, &mp2->dma_addr, GFP_KERNEL);
+ if (!mp2->vslbase)
+ return -ENOMEM;
+
+ mp2->stbdata = devm_kzalloc(dev, len, GFP_KERNEL);
+ if (!mp2->stbdata)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int amd_mp2_process_cmd(struct amd_mp2_dev *mp2, struct file *filp)
+{
+ struct device *dev = &mp2->pdev->dev;
+ struct mp2_stb_data_valid stb_dv;
+ int status;
+
+ stb_dv.data_valid = readl(mp2->mmio + AMD_P2C_MSG1);
+
+ if (stb_dv.val.valid != VALID_MSG) {
+ dev_dbg(dev, "Invalid STB data\n");
+ return -EBADMSG;
+ }
+
+ if (stb_dv.val.length != MP2_STB_DATA_LEN_2KB &&
+ stb_dv.val.length != MP2_STB_DATA_LEN_16KB) {
+ dev_dbg(dev, "Unsupported length\n");
+ return -EMSGSIZE;
+ }
+
+ mp2->stb_len = BIT(stb_dv.val.length) * SZ_1K;
+
+ status = amd_mp2_stb_region(mp2);
+ if (status) {
+ dev_err(dev, "Failed to init STB region, status %d\n", status);
+ return status;
+ }
+
+ amd_mp2_stb_send_cmd(mp2, VALID_MSG, true);
+ status = amd_mp2_wait_response(mp2, VALID_MSG, VALID_RESPONSE);
+ if (status == VALID_RESPONSE) {
+ memcpy_fromio(mp2->stbdata, mp2->vslbase, mp2->stb_len);
+ filp->private_data = mp2->stbdata;
+ mp2->is_stb_data = true;
+ } else {
+ dev_err(dev, "Failed to start STB dump, status %d\n", status);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int amd_mp2_stb_debugfs_open(struct inode *inode, struct file *filp)
+{
+ struct amd_pmc_dev *dev = filp->f_inode->i_private;
+ struct amd_mp2_dev *mp2 = dev->mp2;
+
+ if (mp2) {
+ if (!mp2->is_stb_data)
+ return amd_mp2_process_cmd(mp2, filp);
+
+ filp->private_data = mp2->stbdata;
+
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static ssize_t amd_mp2_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
+ loff_t *pos)
+{
+ struct amd_pmc_dev *dev = filp->f_inode->i_private;
+ struct amd_mp2_dev *mp2 = dev->mp2;
+
+ if (!mp2)
+ return -ENODEV;
+
+ if (!filp->private_data)
+ return -EINVAL;
+
+ return simple_read_from_buffer(buf, size, pos, filp->private_data, mp2->stb_len);
+}
+
+static const struct file_operations amd_mp2_stb_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = amd_mp2_stb_debugfs_open,
+ .read = amd_mp2_stb_debugfs_read,
+};
+
+static void amd_mp2_dbgfs_register(struct amd_pmc_dev *dev)
+{
+ if (!dev->dbgfs_dir)
+ return;
+
+ debugfs_create_file("stb_read_previous_boot", 0644, dev->dbgfs_dir, dev,
+ &amd_mp2_stb_debugfs_fops);
+}
+
+void amd_mp2_stb_deinit(struct amd_pmc_dev *dev)
+{
+ struct amd_mp2_dev *mp2 = dev->mp2;
+ struct pci_dev *pdev;
+
+ if (mp2 && mp2->pdev) {
+ pdev = mp2->pdev;
+
+ if (mp2->mmio)
+ pci_clear_master(pdev);
+
+ pci_dev_put(pdev);
+
+ if (mp2->devres_gid)
+ devres_release_group(&pdev->dev, mp2->devres_gid);
+
+ dev->mp2 = NULL;
+ }
+}
+
+void amd_mp2_stb_init(struct amd_pmc_dev *dev)
+{
+ struct amd_mp2_dev *mp2 = NULL;
+ struct pci_dev *pdev;
+ int rc;
+
+ mp2 = devm_kzalloc(dev->dev, sizeof(*mp2), GFP_KERNEL);
+ if (!mp2)
+ return;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MP2_STB, NULL);
+ if (!pdev)
+ return;
+
+ dev->mp2 = mp2;
+ mp2->pdev = pdev;
+
+ mp2->devres_gid = devres_open_group(&pdev->dev, NULL, GFP_KERNEL);
+ if (!mp2->devres_gid) {
+ dev_err(&pdev->dev, "devres_open_group failed\n");
+ goto mp2_error;
+ }
+
+ rc = pcim_enable_device(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "pcim_enable_device failed\n");
+ goto mp2_error;
+ }
+
+ rc = pcim_iomap_regions(pdev, BIT(MP2_MMIO_BAR), "mp2 stb");
+ if (rc) {
+ dev_err(&pdev->dev, "pcim_iomap_regions failed\n");
+ goto mp2_error;
+ }
+
+ mp2->mmio = pcim_iomap_table(pdev)[MP2_MMIO_BAR];
+ if (!mp2->mmio) {
+ dev_err(&pdev->dev, "pcim_iomap_table failed\n");
+ goto mp2_error;
+ }
+
+ pci_set_master(pdev);
+
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ dev_err(&pdev->dev, "failed to set DMA mask\n");
+ goto mp2_error;
+ }
+
+ amd_mp2_dbgfs_register(dev);
+
+ return;
+
+mp2_error:
+ amd_mp2_stb_deinit(dev);
+}
diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
index b456370166b6..b4f49720c87f 100644
--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c
+++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
@@ -208,6 +208,15 @@ static const struct dmi_system_id fwbug_list[] = {
DMI_MATCH(DMI_BIOS_VERSION, "03.03"),
}
},
+ {
+ .ident = "Framework Laptop 13 (Phoenix)",
+ .driver_data = &quirk_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Laptop 13 (AMD Ryzen 7040Series)"),
+ DMI_MATCH(DMI_BIOS_VERSION, "03.05"),
+ }
+ },
{}
};
diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
index 108e12fd580f..a3d881f6e5d9 100644
--- a/drivers/platform/x86/amd/pmc/pmc.c
+++ b/drivers/platform/x86/amd/pmc/pmc.c
@@ -1106,6 +1106,8 @@ static int amd_pmc_probe(struct platform_device *pdev)
}
amd_pmc_dbgfs_register(dev);
+ if (IS_ENABLED(CONFIG_AMD_MP2_STB))
+ amd_mp2_stb_init(dev);
pm_report_max_hw_sleep(U64_MAX);
return 0;
@@ -1122,6 +1124,8 @@ static void amd_pmc_remove(struct platform_device *pdev)
acpi_unregister_lps0_dev(&amd_pmc_s2idle_dev_ops);
amd_pmc_dbgfs_unregister(dev);
pci_dev_put(dev->rdev);
+ if (IS_ENABLED(CONFIG_AMD_MP2_STB))
+ amd_mp2_stb_deinit(dev);
mutex_destroy(&dev->lock);
}
@@ -1132,6 +1136,7 @@ static const struct acpi_device_id amd_pmc_acpi_ids[] = {
{"AMDI0008", 0},
{"AMDI0009", 0},
{"AMDI000A", 0},
+ {"AMDI000B", 0},
{"AMD0004", 0},
{"AMD0005", 0},
{ }
diff --git a/drivers/platform/x86/amd/pmc/pmc.h b/drivers/platform/x86/amd/pmc/pmc.h
index 827eef65e133..9e32d3128c3a 100644
--- a/drivers/platform/x86/amd/pmc/pmc.h
+++ b/drivers/platform/x86/amd/pmc/pmc.h
@@ -14,6 +14,17 @@
#include <linux/types.h>
#include <linux/mutex.h>
+struct amd_mp2_dev {
+ void __iomem *mmio;
+ void __iomem *vslbase;
+ void *stbdata;
+ void *devres_gid;
+ struct pci_dev *pdev;
+ dma_addr_t dma_addr;
+ int stb_len;
+ bool is_stb_data;
+};
+
struct amd_pmc_dev {
void __iomem *regbase;
void __iomem *smu_virt_addr;
@@ -38,10 +49,13 @@ struct amd_pmc_dev {
struct dentry *dbgfs_dir;
struct quirk_entry *quirks;
bool disable_8042_wakeup;
+ struct amd_mp2_dev *mp2;
};
void amd_pmc_process_restore_quirks(struct amd_pmc_dev *dev);
void amd_pmc_quirks_init(struct amd_pmc_dev *dev);
+void amd_mp2_stb_init(struct amd_pmc_dev *dev);
+void amd_mp2_stb_deinit(struct amd_pmc_dev *dev);
/* List of supported CPU ids */
#define AMD_CPU_ID_RV 0x15D0
@@ -53,5 +67,6 @@ void amd_pmc_quirks_init(struct amd_pmc_dev *dev);
#define AMD_CPU_ID_PS 0x14E8
#define AMD_CPU_ID_SP 0x14A4
#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507
+#define PCI_DEVICE_ID_AMD_MP2_STB 0x172c
#endif /* PMC_H */
diff --git a/drivers/platform/x86/amd/pmf/Makefile b/drivers/platform/x86/amd/pmf/Makefile
index 6b26e48ce8ad..7d6079b02589 100644
--- a/drivers/platform/x86/amd/pmf/Makefile
+++ b/drivers/platform/x86/amd/pmf/Makefile
@@ -7,4 +7,4 @@
obj-$(CONFIG_AMD_PMF) += amd-pmf.o
amd-pmf-objs := core.o acpi.o sps.o \
auto-mode.o cnqf.o \
- tee-if.o spc.o
+ tee-if.o spc.o pmf-quirks.o
diff --git a/drivers/platform/x86/amd/pmf/acpi.c b/drivers/platform/x86/amd/pmf/acpi.c
index d0cf46e2fc8e..1157ec148880 100644
--- a/drivers/platform/x86/amd/pmf/acpi.c
+++ b/drivers/platform/x86/amd/pmf/acpi.c
@@ -343,7 +343,10 @@ static int apmf_if_verify_interface(struct amd_pmf_dev *pdev)
if (err)
return err;
- pdev->supported_func = output.supported_functions;
+ /* only set if not already set by a quirk */
+ if (!pdev->supported_func)
+ pdev->supported_func = output.supported_functions;
+
dev_dbg(pdev->dev, "supported functions:0x%x notifications:0x%x version:%u\n",
output.supported_functions, output.notification_mask, output.version);
@@ -437,7 +440,7 @@ int apmf_check_smart_pc(struct amd_pmf_dev *pmf_dev)
status = acpi_walk_resources(ahandle, METHOD_NAME__CRS, apmf_walk_resources, pmf_dev);
if (ACPI_FAILURE(status)) {
- dev_err(pmf_dev->dev, "acpi_walk_resources failed :%d\n", status);
+ dev_dbg(pmf_dev->dev, "acpi_walk_resources failed :%d\n", status);
return -EINVAL;
}
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index 5d4f80698a8b..2d6e2558863c 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -381,6 +381,7 @@ static const struct acpi_device_id amd_pmf_acpi_ids[] = {
{"AMDI0100", 0x100},
{"AMDI0102", 0},
{"AMDI0103", 0},
+ {"AMDI0105", 0},
{ }
};
MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids);
@@ -445,6 +446,7 @@ static int amd_pmf_probe(struct platform_device *pdev)
mutex_init(&dev->lock);
mutex_init(&dev->update_mutex);
+ amd_pmf_quirks_init(dev);
apmf_acpi_init(dev);
platform_set_drvdata(pdev, dev);
amd_pmf_dbgfs_register(dev);
diff --git a/drivers/platform/x86/amd/pmf/pmf-quirks.c b/drivers/platform/x86/amd/pmf/pmf-quirks.c
new file mode 100644
index 000000000000..0b2eb0ae85fe
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/pmf-quirks.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Platform Management Framework Driver Quirks
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#include <linux/dmi.h>
+
+#include "pmf.h"
+
+struct quirk_entry {
+ u32 supported_func;
+};
+
+static struct quirk_entry quirk_no_sps_bug = {
+ .supported_func = 0x4003,
+};
+
+static const struct dmi_system_id fwbug_list[] = {
+ {
+ .ident = "ROG Zephyrus G14",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GA403UV"),
+ },
+ .driver_data = &quirk_no_sps_bug,
+ },
+ {}
+};
+
+void amd_pmf_quirks_init(struct amd_pmf_dev *dev)
+{
+ const struct dmi_system_id *dmi_id;
+ struct quirk_entry *quirks;
+
+ dmi_id = dmi_first_match(fwbug_list);
+ if (!dmi_id)
+ return;
+
+ quirks = dmi_id->driver_data;
+ if (quirks->supported_func) {
+ dev->supported_func = quirks->supported_func;
+ pr_info("Using supported funcs quirk to avoid %s platform firmware bug\n",
+ dmi_id->ident);
+ }
+}
+
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index 8c4df5753f40..eeedd0c0395a 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -720,4 +720,7 @@ int apmf_check_smart_pc(struct amd_pmf_dev *pmf_dev);
void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
+/* Quirk infrastructure */
+void amd_pmf_quirks_init(struct amd_pmf_dev *dev);
+
#endif /* PMF_H */
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index bf03ea1b1274..ccb33d034e2a 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -852,8 +852,8 @@ static ssize_t infos_show(struct device *dev, struct device_attribute *attr,
* so we don't set eof to 1
*/
- len += sprintf(page, ASUS_LAPTOP_NAME " " ASUS_LAPTOP_VERSION "\n");
- len += sprintf(page + len, "Model reference : %s\n", asus->name);
+ len += sysfs_emit_at(page, len, ASUS_LAPTOP_NAME " " ASUS_LAPTOP_VERSION "\n");
+ len += sysfs_emit_at(page, len, "Model reference : %s\n", asus->name);
/*
* The SFUN method probably allows the original driver to get the list
* of features supported by a given model. For now, 0x0100 or 0x0800
@@ -862,7 +862,7 @@ static ssize_t infos_show(struct device *dev, struct device_attribute *attr,
*/
rv = acpi_evaluate_integer(asus->handle, "SFUN", NULL, &temp);
if (ACPI_SUCCESS(rv))
- len += sprintf(page + len, "SFUN value : %#x\n",
+ len += sysfs_emit_at(page, len, "SFUN value : %#x\n",
(uint) temp);
/*
* The HWRS method return informations about the hardware.
@@ -874,7 +874,7 @@ static ssize_t infos_show(struct device *dev, struct device_attribute *attr,
*/
rv = acpi_evaluate_integer(asus->handle, "HWRS", NULL, &temp);
if (ACPI_SUCCESS(rv))
- len += sprintf(page + len, "HWRS value : %#x\n",
+ len += sysfs_emit_at(page, len, "HWRS value : %#x\n",
(uint) temp);
/*
* Another value for userspace: the ASYM method returns 0x02 for
@@ -885,25 +885,25 @@ static ssize_t infos_show(struct device *dev, struct device_attribute *attr,
*/
rv = acpi_evaluate_integer(asus->handle, "ASYM", NULL, &temp);
if (ACPI_SUCCESS(rv))
- len += sprintf(page + len, "ASYM value : %#x\n",
+ len += sysfs_emit_at(page, len, "ASYM value : %#x\n",
(uint) temp);
if (asus->dsdt_info) {
snprintf(buf, 16, "%d", asus->dsdt_info->length);
- len += sprintf(page + len, "DSDT length : %s\n", buf);
+ len += sysfs_emit_at(page, len, "DSDT length : %s\n", buf);
snprintf(buf, 16, "%d", asus->dsdt_info->checksum);
- len += sprintf(page + len, "DSDT checksum : %s\n", buf);
+ len += sysfs_emit_at(page, len, "DSDT checksum : %s\n", buf);
snprintf(buf, 16, "%d", asus->dsdt_info->revision);
- len += sprintf(page + len, "DSDT revision : %s\n", buf);
+ len += sysfs_emit_at(page, len, "DSDT revision : %s\n", buf);
snprintf(buf, 7, "%s", asus->dsdt_info->oem_id);
- len += sprintf(page + len, "OEM id : %s\n", buf);
+ len += sysfs_emit_at(page, len, "OEM id : %s\n", buf);
snprintf(buf, 9, "%s", asus->dsdt_info->oem_table_id);
- len += sprintf(page + len, "OEM table id : %s\n", buf);
+ len += sysfs_emit_at(page, len, "OEM table id : %s\n", buf);
snprintf(buf, 16, "%x", asus->dsdt_info->oem_revision);
- len += sprintf(page + len, "OEM revision : 0x%s\n", buf);
+ len += sysfs_emit_at(page, len, "OEM revision : 0x%s\n", buf);
snprintf(buf, 5, "%s", asus->dsdt_info->asl_compiler_id);
- len += sprintf(page + len, "ASL comp vendor id : %s\n", buf);
+ len += sysfs_emit_at(page, len, "ASL comp vendor id : %s\n", buf);
snprintf(buf, 16, "%x", asus->dsdt_info->asl_compiler_revision);
- len += sprintf(page + len, "ASL comp revision : 0x%s\n", buf);
+ len += sysfs_emit_at(page, len, "ASL comp revision : 0x%s\n", buf);
}
return len;
@@ -933,7 +933,7 @@ static ssize_t ledd_show(struct device *dev, struct device_attribute *attr,
{
struct asus_laptop *asus = dev_get_drvdata(dev);
- return sprintf(buf, "0x%08x\n", asus->ledd_status);
+ return sysfs_emit(buf, "0x%08x\n", asus->ledd_status);
}
static ssize_t ledd_store(struct device *dev, struct device_attribute *attr,
@@ -993,7 +993,7 @@ static ssize_t wlan_show(struct device *dev, struct device_attribute *attr,
{
struct asus_laptop *asus = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", asus_wireless_status(asus, WL_RSTS));
+ return sysfs_emit(buf, "%d\n", asus_wireless_status(asus, WL_RSTS));
}
static ssize_t wlan_store(struct device *dev, struct device_attribute *attr,
@@ -1022,7 +1022,7 @@ static ssize_t bluetooth_show(struct device *dev, struct device_attribute *attr,
{
struct asus_laptop *asus = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", asus_wireless_status(asus, BT_RSTS));
+ return sysfs_emit(buf, "%d\n", asus_wireless_status(asus, BT_RSTS));
}
static ssize_t bluetooth_store(struct device *dev,
@@ -1052,7 +1052,7 @@ static ssize_t wimax_show(struct device *dev, struct device_attribute *attr,
{
struct asus_laptop *asus = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", asus_wireless_status(asus, WM_RSTS));
+ return sysfs_emit(buf, "%d\n", asus_wireless_status(asus, WM_RSTS));
}
static ssize_t wimax_store(struct device *dev, struct device_attribute *attr,
@@ -1081,7 +1081,7 @@ static ssize_t wwan_show(struct device *dev, struct device_attribute *attr,
{
struct asus_laptop *asus = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", asus_wireless_status(asus, WW_RSTS));
+ return sysfs_emit(buf, "%d\n", asus_wireless_status(asus, WW_RSTS));
}
static ssize_t wwan_store(struct device *dev, struct device_attribute *attr,
@@ -1151,7 +1151,7 @@ static ssize_t ls_switch_show(struct device *dev, struct device_attribute *attr,
{
struct asus_laptop *asus = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", asus->light_switch);
+ return sysfs_emit(buf, "%d\n", asus->light_switch);
}
static ssize_t ls_switch_store(struct device *dev,
@@ -1182,7 +1182,7 @@ static ssize_t ls_level_show(struct device *dev, struct device_attribute *attr,
{
struct asus_laptop *asus = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", asus->light_level);
+ return sysfs_emit(buf, "%d\n", asus->light_level);
}
static ssize_t ls_level_store(struct device *dev, struct device_attribute *attr,
@@ -1228,7 +1228,7 @@ static ssize_t ls_value_show(struct device *dev, struct device_attribute *attr,
if (!err)
err = pega_int_read(asus, PEGA_READ_ALS_L, &lo);
if (!err)
- return sprintf(buf, "%d\n", 10 * hi + lo);
+ return sysfs_emit(buf, "%d\n", 10 * hi + lo);
return err;
}
static DEVICE_ATTR_RO(ls_value);
@@ -1264,7 +1264,7 @@ static ssize_t gps_show(struct device *dev, struct device_attribute *attr,
{
struct asus_laptop *asus = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", asus_gps_status(asus));
+ return sysfs_emit(buf, "%d\n", asus_gps_status(asus));
}
static ssize_t gps_store(struct device *dev, struct device_attribute *attr,
@@ -1925,7 +1925,6 @@ MODULE_DEVICE_TABLE(acpi, asus_device_ids);
static struct acpi_driver asus_acpi_driver = {
.name = ASUS_LAPTOP_NAME,
.class = ASUS_LAPTOP_CLASS,
- .owner = THIS_MODULE,
.ids = asus_device_ids,
.flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 3f07bbf809ef..727dbdec45f4 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -126,10 +126,21 @@ module_param(fnlock_default, bool, 0444);
#define ASUS_SCREENPAD_BRIGHT_MAX 255
#define ASUS_SCREENPAD_BRIGHT_DEFAULT 60
+#define ASUS_MINI_LED_MODE_MASK 0x03
+/* Standard modes for devices with only on/off */
+#define ASUS_MINI_LED_OFF 0x00
+#define ASUS_MINI_LED_ON 0x01
+/* New mode on some devices, define here to clarify remapping later */
+#define ASUS_MINI_LED_STRONG_MODE 0x02
+/* New modes for devices with 3 mini-led mode types */
+#define ASUS_MINI_LED_2024_WEAK 0x00
+#define ASUS_MINI_LED_2024_STRONG 0x01
+#define ASUS_MINI_LED_2024_OFF 0x02
+
/* Controls the power state of the USB0 hub on ROG Ally which input is on */
#define ASUS_USB0_PWR_EC0_CSEE "\\_SB.PCI0.SBRG.EC0.CSEE"
/* 300ms so far seems to produce a reliable result on AC and battery */
-#define ASUS_USB0_PWR_EC0_CSEE_WAIT 300
+#define ASUS_USB0_PWR_EC0_CSEE_WAIT 1500
static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL };
@@ -243,6 +254,9 @@ struct asus_wmi {
u32 tablet_switch_dev_id;
bool tablet_switch_inverted;
+ /* The ROG Ally device requires the MCU USB device be disconnected before suspend */
+ bool ally_mcu_usb_switch;
+
enum fan_type fan_type;
enum fan_type gpu_fan_type;
enum fan_type mid_fan_type;
@@ -255,22 +269,20 @@ struct asus_wmi {
u8 fan_boost_mode_mask;
u8 fan_boost_mode;
- bool charge_mode_available;
bool egpu_enable_available;
- bool egpu_connect_available;
bool dgpu_disable_available;
- bool gpu_mux_mode_available;
+ u32 gpu_mux_dev;
/* Tunables provided by ASUS for gaming laptops */
- bool ppt_pl2_sppt_available;
- bool ppt_pl1_spl_available;
- bool ppt_apu_sppt_available;
- bool ppt_plat_sppt_available;
- bool ppt_fppt_available;
- bool nv_dyn_boost_available;
- bool nv_temp_tgt_available;
-
- bool kbd_rgb_mode_available;
+ u32 ppt_pl2_sppt;
+ u32 ppt_pl1_spl;
+ u32 ppt_apu_sppt;
+ u32 ppt_platform_sppt;
+ u32 ppt_fppt;
+ u32 nv_dynamic_boost;
+ u32 nv_temp_target;
+
+ u32 kbd_rgb_dev;
bool kbd_rgb_state_available;
bool throttle_thermal_policy_available;
@@ -288,7 +300,7 @@ struct asus_wmi {
bool battery_rsoc_available;
bool panel_overdrive_available;
- bool mini_led_mode_available;
+ u32 mini_led_dev_id;
struct hotplug_slot hotplug_slot;
struct mutex hotplug_lock;
@@ -298,9 +310,6 @@ struct asus_wmi {
bool fnlock_locked;
- /* The ROG Ally device requires the MCU USB device be disconnected before suspend */
- bool ally_mcu_usb_switch;
-
struct asus_wmi_debug debug;
struct asus_wmi_driver *driver;
@@ -682,8 +691,8 @@ static ssize_t dgpu_disable_store(struct device *dev,
if (disable > 1)
return -EINVAL;
- if (asus->gpu_mux_mode_available) {
- result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPU_MUX);
+ if (asus->gpu_mux_dev) {
+ result = asus_wmi_get_devstate_simple(asus, asus->gpu_mux_dev);
if (result < 0)
/* An error here may signal greater failure of GPU handling */
return result;
@@ -748,8 +757,8 @@ static ssize_t egpu_enable_store(struct device *dev,
return err;
}
- if (asus->gpu_mux_mode_available) {
- result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPU_MUX);
+ if (asus->gpu_mux_dev) {
+ result = asus_wmi_get_devstate_simple(asus, asus->gpu_mux_dev);
if (result < 0) {
/* An error here may signal greater failure of GPU handling */
pr_warn("Failed to get gpu mux status: %d\n", result);
@@ -802,7 +811,7 @@ static ssize_t gpu_mux_mode_show(struct device *dev,
struct asus_wmi *asus = dev_get_drvdata(dev);
int result;
- result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPU_MUX);
+ result = asus_wmi_get_devstate_simple(asus, asus->gpu_mux_dev);
if (result < 0)
return result;
@@ -848,7 +857,7 @@ static ssize_t gpu_mux_mode_store(struct device *dev,
}
}
- err = asus_wmi_set_devstate(ASUS_WMI_DEVID_GPU_MUX, optimus, &result);
+ err = asus_wmi_set_devstate(asus->gpu_mux_dev, optimus, &result);
if (err) {
dev_err(dev, "Failed to set GPU MUX mode: %d\n", err);
return err;
@@ -870,6 +879,7 @@ static ssize_t kbd_rgb_mode_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
u32 cmd, mode, r, g, b, speed;
int err;
@@ -906,7 +916,7 @@ static ssize_t kbd_rgb_mode_store(struct device *dev,
speed = 0xeb;
}
- err = asus_wmi_evaluate_method3(ASUS_WMI_METHODID_DEVS, ASUS_WMI_DEVID_TUF_RGB_MODE,
+ err = asus_wmi_evaluate_method3(ASUS_WMI_METHODID_DEVS, asus->kbd_rgb_dev,
cmd | (mode << 8) | (r << 16) | (g << 24), b | (speed << 8), NULL);
if (err)
return err;
@@ -996,11 +1006,10 @@ static ssize_t ppt_pl2_sppt_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
int result, err;
u32 value;
- struct asus_wmi *asus = dev_get_drvdata(dev);
-
result = kstrtou32(buf, 10, &value);
if (result)
return result;
@@ -1019,22 +1028,31 @@ static ssize_t ppt_pl2_sppt_store(struct device *dev,
return -EIO;
}
+ asus->ppt_pl2_sppt = value;
sysfs_notify(&asus->platform_device->dev.kobj, NULL, "ppt_pl2_sppt");
return count;
}
-static DEVICE_ATTR_WO(ppt_pl2_sppt);
+
+static ssize_t ppt_pl2_sppt_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n", asus->ppt_pl2_sppt);
+}
+static DEVICE_ATTR_RW(ppt_pl2_sppt);
/* Tunable: PPT, Intel=PL1, AMD=SPL ******************************************/
static ssize_t ppt_pl1_spl_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
int result, err;
u32 value;
- struct asus_wmi *asus = dev_get_drvdata(dev);
-
result = kstrtou32(buf, 10, &value);
if (result)
return result;
@@ -1053,22 +1071,30 @@ static ssize_t ppt_pl1_spl_store(struct device *dev,
return -EIO;
}
+ asus->ppt_pl1_spl = value;
sysfs_notify(&asus->platform_device->dev.kobj, NULL, "ppt_pl1_spl");
return count;
}
-static DEVICE_ATTR_WO(ppt_pl1_spl);
+static ssize_t ppt_pl1_spl_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n", asus->ppt_pl1_spl);
+}
+static DEVICE_ATTR_RW(ppt_pl1_spl);
/* Tunable: PPT APU FPPT ******************************************************/
static ssize_t ppt_fppt_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
int result, err;
u32 value;
- struct asus_wmi *asus = dev_get_drvdata(dev);
-
result = kstrtou32(buf, 10, &value);
if (result)
return result;
@@ -1087,22 +1113,31 @@ static ssize_t ppt_fppt_store(struct device *dev,
return -EIO;
}
+ asus->ppt_fppt = value;
sysfs_notify(&asus->platform_device->dev.kobj, NULL, "ppt_fpu_sppt");
return count;
}
-static DEVICE_ATTR_WO(ppt_fppt);
+
+static ssize_t ppt_fppt_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n", asus->ppt_fppt);
+}
+static DEVICE_ATTR_RW(ppt_fppt);
/* Tunable: PPT APU SPPT *****************************************************/
static ssize_t ppt_apu_sppt_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
int result, err;
u32 value;
- struct asus_wmi *asus = dev_get_drvdata(dev);
-
result = kstrtou32(buf, 10, &value);
if (result)
return result;
@@ -1121,22 +1156,31 @@ static ssize_t ppt_apu_sppt_store(struct device *dev,
return -EIO;
}
+ asus->ppt_apu_sppt = value;
sysfs_notify(&asus->platform_device->dev.kobj, NULL, "ppt_apu_sppt");
return count;
}
-static DEVICE_ATTR_WO(ppt_apu_sppt);
+
+static ssize_t ppt_apu_sppt_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n", asus->ppt_apu_sppt);
+}
+static DEVICE_ATTR_RW(ppt_apu_sppt);
/* Tunable: PPT platform SPPT ************************************************/
static ssize_t ppt_platform_sppt_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
int result, err;
u32 value;
- struct asus_wmi *asus = dev_get_drvdata(dev);
-
result = kstrtou32(buf, 10, &value);
if (result)
return result;
@@ -1155,22 +1199,31 @@ static ssize_t ppt_platform_sppt_store(struct device *dev,
return -EIO;
}
+ asus->ppt_platform_sppt = value;
sysfs_notify(&asus->platform_device->dev.kobj, NULL, "ppt_platform_sppt");
return count;
}
-static DEVICE_ATTR_WO(ppt_platform_sppt);
+
+static ssize_t ppt_platform_sppt_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n", asus->ppt_platform_sppt);
+}
+static DEVICE_ATTR_RW(ppt_platform_sppt);
/* Tunable: NVIDIA dynamic boost *********************************************/
static ssize_t nv_dynamic_boost_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
int result, err;
u32 value;
- struct asus_wmi *asus = dev_get_drvdata(dev);
-
result = kstrtou32(buf, 10, &value);
if (result)
return result;
@@ -1189,22 +1242,31 @@ static ssize_t nv_dynamic_boost_store(struct device *dev,
return -EIO;
}
+ asus->nv_dynamic_boost = value;
sysfs_notify(&asus->platform_device->dev.kobj, NULL, "nv_dynamic_boost");
return count;
}
-static DEVICE_ATTR_WO(nv_dynamic_boost);
+
+static ssize_t nv_dynamic_boost_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n", asus->nv_dynamic_boost);
+}
+static DEVICE_ATTR_RW(nv_dynamic_boost);
/* Tunable: NVIDIA temperature target ****************************************/
static ssize_t nv_temp_target_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
int result, err;
u32 value;
- struct asus_wmi *asus = dev_get_drvdata(dev);
-
result = kstrtou32(buf, 10, &value);
if (result)
return result;
@@ -1223,11 +1285,68 @@ static ssize_t nv_temp_target_store(struct device *dev,
return -EIO;
}
+ asus->nv_temp_target = value;
sysfs_notify(&asus->platform_device->dev.kobj, NULL, "nv_temp_target");
return count;
}
-static DEVICE_ATTR_WO(nv_temp_target);
+
+static ssize_t nv_temp_target_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n", asus->nv_temp_target);
+}
+static DEVICE_ATTR_RW(nv_temp_target);
+
+/* Ally MCU Powersave ********************************************************/
+static ssize_t mcu_powersave_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result;
+
+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_MCU_POWERSAVE);
+ if (result < 0)
+ return result;
+
+ return sysfs_emit(buf, "%d\n", result);
+}
+
+static ssize_t mcu_powersave_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int result, err;
+ u32 enable;
+
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ result = kstrtou32(buf, 10, &enable);
+ if (result)
+ return result;
+
+ if (enable > 1)
+ return -EINVAL;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_MCU_POWERSAVE, enable, &result);
+ if (err) {
+ pr_warn("Failed to set MCU powersave: %d\n", err);
+ return err;
+ }
+
+ if (result > 1) {
+ pr_warn("Failed to set MCU powersave (result): 0x%x\n", result);
+ return -EIO;
+ }
+
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "mcu_powersave");
+
+ return count;
+}
+static DEVICE_ATTR_RW(mcu_powersave);
/* Battery ********************************************************************/
@@ -1549,7 +1668,7 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
{
int rv = 0, num_rgb_groups = 0, led_val;
- if (asus->kbd_rgb_mode_available)
+ if (asus->kbd_rgb_dev)
kbd_rgb_mode_groups[num_rgb_groups++] = &kbd_rgb_mode_group;
if (asus->kbd_rgb_state_available)
kbd_rgb_mode_groups[num_rgb_groups++] = &kbd_rgb_state_group;
@@ -2103,20 +2222,88 @@ static ssize_t panel_od_store(struct device *dev,
}
static DEVICE_ATTR_RW(panel_od);
-/* Mini-LED mode **************************************************************/
-static ssize_t mini_led_mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+/* Bootup sound ***************************************************************/
+
+static ssize_t boot_sound_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
int result;
- result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_MINI_LED_MODE);
+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_BOOT_SOUND);
if (result < 0)
return result;
return sysfs_emit(buf, "%d\n", result);
}
+static ssize_t boot_sound_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int result, err;
+ u32 snd;
+
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ result = kstrtou32(buf, 10, &snd);
+ if (result)
+ return result;
+
+ if (snd > 1)
+ return -EINVAL;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BOOT_SOUND, snd, &result);
+ if (err) {
+ pr_warn("Failed to set boot sound: %d\n", err);
+ return err;
+ }
+
+ if (result > 1) {
+ pr_warn("Failed to set panel boot sound (result): 0x%x\n", result);
+ return -EIO;
+ }
+
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "boot_sound");
+
+ return count;
+}
+static DEVICE_ATTR_RW(boot_sound);
+
+/* Mini-LED mode **************************************************************/
+static ssize_t mini_led_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ u32 value;
+ int err;
+
+ err = asus_wmi_get_devstate(asus, asus->mini_led_dev_id, &value);
+ if (err < 0)
+ return err;
+ value = value & ASUS_MINI_LED_MODE_MASK;
+
+ /*
+ * Remap the mode values to match previous generation mini-led. The last gen
+ * WMI 0 == off, while on this version WMI 2 ==off (flipped).
+ */
+ if (asus->mini_led_dev_id == ASUS_WMI_DEVID_MINI_LED_MODE2) {
+ switch (value) {
+ case ASUS_MINI_LED_2024_WEAK:
+ value = ASUS_MINI_LED_ON;
+ break;
+ case ASUS_MINI_LED_2024_STRONG:
+ value = ASUS_MINI_LED_STRONG_MODE;
+ break;
+ case ASUS_MINI_LED_2024_OFF:
+ value = ASUS_MINI_LED_OFF;
+ break;
+ }
+ }
+
+ return sysfs_emit(buf, "%d\n", value);
+}
+
static ssize_t mini_led_mode_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -2130,11 +2317,32 @@ static ssize_t mini_led_mode_store(struct device *dev,
if (result)
return result;
- if (mode > 1)
+ if (asus->mini_led_dev_id == ASUS_WMI_DEVID_MINI_LED_MODE &&
+ mode > ASUS_MINI_LED_ON)
+ return -EINVAL;
+ if (asus->mini_led_dev_id == ASUS_WMI_DEVID_MINI_LED_MODE2 &&
+ mode > ASUS_MINI_LED_STRONG_MODE)
return -EINVAL;
- err = asus_wmi_set_devstate(ASUS_WMI_DEVID_MINI_LED_MODE, mode, &result);
+ /*
+ * Remap the mode values so expected behaviour is the same as the last
+ * generation of mini-LED with 0 == off, 1 == on.
+ */
+ if (asus->mini_led_dev_id == ASUS_WMI_DEVID_MINI_LED_MODE2) {
+ switch (mode) {
+ case ASUS_MINI_LED_OFF:
+ mode = ASUS_MINI_LED_2024_OFF;
+ break;
+ case ASUS_MINI_LED_ON:
+ mode = ASUS_MINI_LED_2024_WEAK;
+ break;
+ case ASUS_MINI_LED_STRONG_MODE:
+ mode = ASUS_MINI_LED_2024_STRONG;
+ break;
+ }
+ }
+ err = asus_wmi_set_devstate(asus->mini_led_dev_id, mode, &result);
if (err) {
pr_warn("Failed to set mini-LED: %d\n", err);
return err;
@@ -2151,6 +2359,23 @@ static ssize_t mini_led_mode_store(struct device *dev,
}
static DEVICE_ATTR_RW(mini_led_mode);
+static ssize_t available_mini_led_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ switch (asus->mini_led_dev_id) {
+ case ASUS_WMI_DEVID_MINI_LED_MODE:
+ return sysfs_emit(buf, "0 1\n");
+ case ASUS_WMI_DEVID_MINI_LED_MODE2:
+ return sysfs_emit(buf, "0 1 2\n");
+ }
+
+ return sysfs_emit(buf, "0\n");
+}
+
+static DEVICE_ATTR_RO(available_mini_led_mode);
+
/* Quirks *********************************************************************/
static void asus_wmi_set_xusb2pr(struct asus_wmi *asus)
@@ -2326,7 +2551,7 @@ static ssize_t pwm1_show(struct device *dev,
/* If we already set a value then just return it */
if (asus->agfn_pwm >= 0)
- return sprintf(buf, "%d\n", asus->agfn_pwm);
+ return sysfs_emit(buf, "%d\n", asus->agfn_pwm);
/*
* If we haven't set already set a value through the AGFN interface,
@@ -2512,8 +2737,8 @@ static ssize_t asus_hwmon_temp1(struct device *dev,
if (err < 0)
return err;
- return sprintf(buf, "%ld\n",
- deci_kelvin_to_millicelsius(value & 0xFFFF));
+ return sysfs_emit(buf, "%ld\n",
+ deci_kelvin_to_millicelsius(value & 0xFFFF));
}
/* GPU fan on modern ROG laptops */
@@ -4061,7 +4286,7 @@ static ssize_t show_sys_wmi(struct asus_wmi *asus, int devid, char *buf)
if (value < 0)
return value;
- return sprintf(buf, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
#define ASUS_WMI_CREATE_DEVICE_ATTR(_name, _mode, _cm) \
@@ -4137,8 +4362,11 @@ static struct attribute *platform_attributes[] = {
&dev_attr_ppt_platform_sppt.attr,
&dev_attr_nv_dynamic_boost.attr,
&dev_attr_nv_temp_target.attr,
+ &dev_attr_mcu_powersave.attr,
+ &dev_attr_boot_sound.attr,
&dev_attr_panel_od.attr,
&dev_attr_mini_led_mode.attr,
+ &dev_attr_available_mini_led_mode.attr,
NULL
};
@@ -4161,37 +4389,43 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
else if (attr == &dev_attr_als_enable.attr)
devid = ASUS_WMI_DEVID_ALS_ENABLE;
else if (attr == &dev_attr_charge_mode.attr)
- ok = asus->charge_mode_available;
+ devid = ASUS_WMI_DEVID_CHARGE_MODE;
else if (attr == &dev_attr_egpu_enable.attr)
ok = asus->egpu_enable_available;
else if (attr == &dev_attr_egpu_connected.attr)
- ok = asus->egpu_connect_available;
+ devid = ASUS_WMI_DEVID_EGPU_CONNECTED;
else if (attr == &dev_attr_dgpu_disable.attr)
ok = asus->dgpu_disable_available;
else if (attr == &dev_attr_gpu_mux_mode.attr)
- ok = asus->gpu_mux_mode_available;
+ ok = asus->gpu_mux_dev != 0;
else if (attr == &dev_attr_fan_boost_mode.attr)
ok = asus->fan_boost_mode_available;
else if (attr == &dev_attr_throttle_thermal_policy.attr)
ok = asus->throttle_thermal_policy_available;
else if (attr == &dev_attr_ppt_pl2_sppt.attr)
- ok = asus->ppt_pl2_sppt_available;
+ devid = ASUS_WMI_DEVID_PPT_PL2_SPPT;
else if (attr == &dev_attr_ppt_pl1_spl.attr)
- ok = asus->ppt_pl1_spl_available;
+ devid = ASUS_WMI_DEVID_PPT_PL1_SPL;
else if (attr == &dev_attr_ppt_fppt.attr)
- ok = asus->ppt_fppt_available;
+ devid = ASUS_WMI_DEVID_PPT_FPPT;
else if (attr == &dev_attr_ppt_apu_sppt.attr)
- ok = asus->ppt_apu_sppt_available;
+ devid = ASUS_WMI_DEVID_PPT_APU_SPPT;
else if (attr == &dev_attr_ppt_platform_sppt.attr)
- ok = asus->ppt_plat_sppt_available;
+ devid = ASUS_WMI_DEVID_PPT_PLAT_SPPT;
else if (attr == &dev_attr_nv_dynamic_boost.attr)
- ok = asus->nv_dyn_boost_available;
+ devid = ASUS_WMI_DEVID_NV_DYN_BOOST;
else if (attr == &dev_attr_nv_temp_target.attr)
- ok = asus->nv_temp_tgt_available;
+ devid = ASUS_WMI_DEVID_NV_THERM_TARGET;
+ else if (attr == &dev_attr_mcu_powersave.attr)
+ devid = ASUS_WMI_DEVID_MCU_POWERSAVE;
+ else if (attr == &dev_attr_boot_sound.attr)
+ devid = ASUS_WMI_DEVID_BOOT_SOUND;
else if (attr == &dev_attr_panel_od.attr)
- ok = asus->panel_overdrive_available;
+ devid = ASUS_WMI_DEVID_PANEL_OD;
else if (attr == &dev_attr_mini_led_mode.attr)
- ok = asus->mini_led_mode_available;
+ ok = asus->mini_led_dev_id != 0;
+ else if (attr == &dev_attr_available_mini_led_mode.attr)
+ ok = asus->mini_led_dev_id != 0;
if (devid != -1)
ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0);
@@ -4429,25 +4663,36 @@ static int asus_wmi_add(struct platform_device *pdev)
if (err)
goto fail_platform;
- asus->charge_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_CHARGE_MODE);
+ /* ensure defaults for tunables */
+ asus->ppt_pl2_sppt = 5;
+ asus->ppt_pl1_spl = 5;
+ asus->ppt_apu_sppt = 5;
+ asus->ppt_platform_sppt = 5;
+ asus->ppt_fppt = 5;
+ asus->nv_dynamic_boost = 5;
+ asus->nv_temp_target = 75;
+
asus->egpu_enable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU);
- asus->egpu_connect_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU_CONNECTED);
asus->dgpu_disable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_DGPU);
- asus->gpu_mux_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_GPU_MUX);
- asus->kbd_rgb_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE);
asus->kbd_rgb_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_STATE);
- asus->ppt_pl2_sppt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PPT_PL2_SPPT);
- asus->ppt_pl1_spl_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PPT_PL1_SPL);
- asus->ppt_fppt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PPT_FPPT);
- asus->ppt_apu_sppt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PPT_APU_SPPT);
- asus->ppt_plat_sppt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PPT_PLAT_SPPT);
- asus->nv_dyn_boost_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_NV_DYN_BOOST);
- asus->nv_temp_tgt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_NV_THERM_TARGET);
- asus->panel_overdrive_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PANEL_OD);
- asus->mini_led_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MINI_LED_MODE);
asus->ally_mcu_usb_switch = acpi_has_method(NULL, ASUS_USB0_PWR_EC0_CSEE)
&& dmi_match(DMI_BOARD_NAME, "RC71L");
+ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MINI_LED_MODE))
+ asus->mini_led_dev_id = ASUS_WMI_DEVID_MINI_LED_MODE;
+ else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MINI_LED_MODE2))
+ asus->mini_led_dev_id = ASUS_WMI_DEVID_MINI_LED_MODE2;
+
+ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_GPU_MUX))
+ asus->gpu_mux_dev = ASUS_WMI_DEVID_GPU_MUX;
+ else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_GPU_MUX_VIVO))
+ asus->gpu_mux_dev = ASUS_WMI_DEVID_GPU_MUX_VIVO;
+
+ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE))
+ asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE;
+ else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE2))
+ asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE2;
+
err = fan_boost_mode_check_present(asus);
if (err)
goto fail_fan_boost_mode;
@@ -4629,6 +4874,7 @@ static int asus_hotk_resume_early(struct device *device)
struct asus_wmi *asus = dev_get_drvdata(device);
if (asus->ally_mcu_usb_switch) {
+ /* sleep required to prevent USB0 being yanked then reappearing rapidly */
if (ACPI_FAILURE(acpi_execute_simple_method(NULL, ASUS_USB0_PWR_EC0_CSEE, 0xB8)))
dev_err(device, "ROG Ally MCU failed to connect USB dev\n");
else
@@ -4640,17 +4886,8 @@ static int asus_hotk_resume_early(struct device *device)
static int asus_hotk_prepare(struct device *device)
{
struct asus_wmi *asus = dev_get_drvdata(device);
- int result, err;
if (asus->ally_mcu_usb_switch) {
- /* When powersave is enabled it causes many issues with resume of USB hub */
- result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_MCU_POWERSAVE);
- if (result == 1) {
- dev_warn(device, "MCU powersave enabled, disabling to prevent resume issues");
- err = asus_wmi_set_devstate(ASUS_WMI_DEVID_MCU_POWERSAVE, 0, &result);
- if (err || result != 1)
- dev_err(device, "Failed to set MCU powersave mode: %d\n", err);
- }
/* sleep required to ensure USB0 is disabled before sleep continues */
if (ACPI_FAILURE(acpi_execute_simple_method(NULL, ASUS_USB0_PWR_EC0_CSEE, 0xB7)))
dev_err(device, "ROG Ally MCU failed to disconnect USB dev\n");
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 2edaea2492df..cb6fce655e35 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -13,8 +13,6 @@
#include <linux/input.h>
#include <linux/rfkill.h>
-MODULE_LICENSE("GPL");
-
struct cmpc_accel {
int sensitivity;
int g_select;
@@ -434,7 +432,6 @@ static const struct acpi_device_id cmpc_accel_device_ids_v4[] = {
};
static struct acpi_driver cmpc_accel_acpi_driver_v4 = {
- .owner = THIS_MODULE,
.name = "cmpc_accel_v4",
.class = "cmpc_accel_v4",
.ids = cmpc_accel_device_ids_v4,
@@ -660,7 +657,6 @@ static const struct acpi_device_id cmpc_accel_device_ids[] = {
};
static struct acpi_driver cmpc_accel_acpi_driver = {
- .owner = THIS_MODULE,
.name = "cmpc_accel",
.class = "cmpc_accel",
.ids = cmpc_accel_device_ids,
@@ -754,7 +750,6 @@ static const struct acpi_device_id cmpc_tablet_device_ids[] = {
};
static struct acpi_driver cmpc_tablet_acpi_driver = {
- .owner = THIS_MODULE,
.name = "cmpc_tablet",
.class = "cmpc_tablet",
.ids = cmpc_tablet_device_ids,
@@ -996,7 +991,6 @@ static const struct acpi_device_id cmpc_ipml_device_ids[] = {
};
static struct acpi_driver cmpc_ipml_acpi_driver = {
- .owner = THIS_MODULE,
.name = "cmpc",
.class = "cmpc",
.ids = cmpc_ipml_device_ids,
@@ -1064,7 +1058,6 @@ static const struct acpi_device_id cmpc_keys_device_ids[] = {
};
static struct acpi_driver cmpc_keys_acpi_driver = {
- .owner = THIS_MODULE,
.name = "cmpc_keys",
.class = "cmpc_keys",
.ids = cmpc_keys_device_ids,
@@ -1144,3 +1137,5 @@ static const struct acpi_device_id cmpc_device_ids[] __maybe_unused = {
};
MODULE_DEVICE_TABLE(acpi, cmpc_device_ids);
+MODULE_DESCRIPTION("Support for Intel Classmate PC ACPI devices");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
index bd9f445974cc..195a8bf532cc 100644
--- a/drivers/platform/x86/dell/Kconfig
+++ b/drivers/platform/x86/dell/Kconfig
@@ -145,6 +145,21 @@ config DELL_SMO8800
To compile this driver as a module, choose M here: the module will
be called dell-smo8800.
+config DELL_UART_BACKLIGHT
+ tristate "Dell AIO UART Backlight driver"
+ depends on ACPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on SERIAL_DEV_BUS
+ help
+ Say Y here if you want to support Dell AIO UART backlight interface.
+ The Dell AIO machines released after 2017 come with a UART interface
+ to communicate with the backlight scalar board. This driver creates
+ a standard backlight interface and talks to the scalar board through
+ UART to adjust the AIO screen brightness.
+
+ To compile this driver as a module, choose M here: the module will
+ be called dell_uart_backlight.
+
config DELL_WMI
tristate "Dell WMI notifications"
default m
diff --git a/drivers/platform/x86/dell/Makefile b/drivers/platform/x86/dell/Makefile
index 1b8942426622..8176a257d9c3 100644
--- a/drivers/platform/x86/dell/Makefile
+++ b/drivers/platform/x86/dell/Makefile
@@ -14,6 +14,7 @@ dell-smbios-objs := dell-smbios-base.o
dell-smbios-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o
dell-smbios-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o
obj-$(CONFIG_DELL_SMO8800) += dell-smo8800.o
+obj-$(CONFIG_DELL_UART_BACKLIGHT) += dell-uart-backlight.o
obj-$(CONFIG_DELL_WMI) += dell-wmi.o
dell-wmi-objs := dell-wmi-base.o
dell-wmi-$(CONFIG_DELL_WMI_PRIVACY) += dell-wmi-privacy.o
diff --git a/drivers/platform/x86/dell/dell-rbtn.c b/drivers/platform/x86/dell/dell-rbtn.c
index c8fcb537fd65..a415c432d4c3 100644
--- a/drivers/platform/x86/dell/dell-rbtn.c
+++ b/drivers/platform/x86/dell/dell-rbtn.c
@@ -295,7 +295,6 @@ static struct acpi_driver rbtn_driver = {
.remove = rbtn_remove,
.notify = rbtn_notify,
},
- .owner = THIS_MODULE,
};
diff --git a/drivers/platform/x86/dell/dell-uart-backlight.c b/drivers/platform/x86/dell/dell-uart-backlight.c
new file mode 100644
index 000000000000..87d2a20b4cb3
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-uart-backlight.c
@@ -0,0 +1,398 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Dell AIO Serial Backlight Driver
+ *
+ * Copyright (C) 2024 Hans de Goede <hansg@kernel.org>
+ * Copyright (C) 2017 AceLan Kao <acelan.kao@canonical.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/serdev.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include "../serdev_helpers.h"
+
+/* The backlight controller must respond within 1 second */
+#define DELL_BL_TIMEOUT msecs_to_jiffies(1000)
+#define DELL_BL_MAX_BRIGHTNESS 100
+
+/* Defines for the commands send to the controller */
+
+/* 1st byte Start Of Frame 3 MSB bits: cmd-len + 01010 SOF marker */
+#define DELL_SOF(len) (((len) << 5) | 0x0a)
+#define GET_CMD_LEN 3
+#define SET_CMD_LEN 4
+
+/* 2nd byte command */
+#define CMD_GET_VERSION 0x06
+#define CMD_SET_BRIGHTNESS 0x0b
+#define CMD_GET_BRIGHTNESS 0x0c
+#define CMD_SET_BL_POWER 0x0e
+
+/* Indexes and other defines for response received from the controller */
+#define RESP_LEN 0
+#define RESP_CMD 1 /* Echo of CMD byte from command */
+#define RESP_DATA 2 /* Start of received data */
+
+#define SET_RESP_LEN 3
+#define GET_RESP_LEN 4
+#define MIN_RESP_LEN 3
+#define MAX_RESP_LEN 80
+
+struct dell_uart_backlight {
+ struct mutex mutex;
+ wait_queue_head_t wait_queue;
+ struct device *dev;
+ struct backlight_device *bl;
+ u8 *resp;
+ u8 resp_idx;
+ u8 resp_len;
+ u8 resp_max_len;
+ u8 pending_cmd;
+ int status;
+ int power;
+};
+
+/* Checksum: SUM(Length and Cmd and Data) xor 0xFF */
+static u8 dell_uart_checksum(u8 *buf, int len)
+{
+ u8 val = 0;
+
+ while (len-- > 0)
+ val += buf[len];
+
+ return val ^ 0xff;
+}
+
+static int dell_uart_bl_command(struct dell_uart_backlight *dell_bl,
+ const u8 *cmd, int cmd_len,
+ u8 *resp, int resp_max_len)
+{
+ int ret;
+
+ ret = mutex_lock_killable(&dell_bl->mutex);
+ if (ret)
+ return ret;
+
+ dell_bl->status = -EBUSY;
+ dell_bl->resp = resp;
+ dell_bl->resp_idx = 0;
+ dell_bl->resp_len = -1; /* Invalid / unset */
+ dell_bl->resp_max_len = resp_max_len;
+ dell_bl->pending_cmd = cmd[1];
+
+ /* The TTY buffer should be big enough to take the entire cmd in one go */
+ ret = serdev_device_write_buf(to_serdev_device(dell_bl->dev), cmd, cmd_len);
+ if (ret != cmd_len) {
+ dev_err(dell_bl->dev, "Error writing command: %d\n", ret);
+ dell_bl->status = (ret < 0) ? ret : -EIO;
+ goto out;
+ }
+
+ ret = wait_event_timeout(dell_bl->wait_queue, dell_bl->status != -EBUSY,
+ DELL_BL_TIMEOUT);
+ if (ret == 0) {
+ dev_err(dell_bl->dev, "Timed out waiting for response.\n");
+ /* Clear busy status to discard bytes received after this */
+ dell_bl->status = -ETIMEDOUT;
+ }
+
+out:
+ mutex_unlock(&dell_bl->mutex);
+ return dell_bl->status;
+}
+
+static int dell_uart_set_brightness(struct dell_uart_backlight *dell_bl, int brightness)
+{
+ u8 set_brightness[SET_CMD_LEN], resp[SET_RESP_LEN];
+
+ set_brightness[0] = DELL_SOF(SET_CMD_LEN);
+ set_brightness[1] = CMD_SET_BRIGHTNESS;
+ set_brightness[2] = brightness;
+ set_brightness[3] = dell_uart_checksum(set_brightness, 3);
+
+ return dell_uart_bl_command(dell_bl, set_brightness, SET_CMD_LEN, resp, SET_RESP_LEN);
+}
+
+static int dell_uart_get_brightness(struct dell_uart_backlight *dell_bl)
+{
+ struct device *dev = dell_bl->dev;
+ u8 get_brightness[GET_CMD_LEN], resp[GET_RESP_LEN];
+ int ret;
+
+ get_brightness[0] = DELL_SOF(GET_CMD_LEN);
+ get_brightness[1] = CMD_GET_BRIGHTNESS;
+ get_brightness[2] = dell_uart_checksum(get_brightness, 2);
+
+ ret = dell_uart_bl_command(dell_bl, get_brightness, GET_CMD_LEN, resp, GET_RESP_LEN);
+ if (ret)
+ return ret;
+
+ if (resp[RESP_LEN] != GET_RESP_LEN) {
+ dev_err(dev, "Unexpected get brightness response length: %d\n", resp[RESP_LEN]);
+ return -EIO;
+ }
+
+ if (resp[RESP_DATA] > DELL_BL_MAX_BRIGHTNESS) {
+ dev_err(dev, "Unexpected get brightness response: %d\n", resp[RESP_DATA]);
+ return -EIO;
+ }
+
+ return resp[RESP_DATA];
+}
+
+static int dell_uart_set_bl_power(struct dell_uart_backlight *dell_bl, int power)
+{
+ u8 set_power[SET_CMD_LEN], resp[SET_RESP_LEN];
+ int ret;
+
+ set_power[0] = DELL_SOF(SET_CMD_LEN);
+ set_power[1] = CMD_SET_BL_POWER;
+ set_power[2] = (power == FB_BLANK_UNBLANK) ? 1 : 0;
+ set_power[3] = dell_uart_checksum(set_power, 3);
+
+ ret = dell_uart_bl_command(dell_bl, set_power, SET_CMD_LEN, resp, SET_RESP_LEN);
+ if (ret)
+ return ret;
+
+ dell_bl->power = power;
+ return 0;
+}
+
+/*
+ * There is no command to get backlight power status,
+ * so we set the backlight power to "on" while initializing,
+ * and then track and report its status by power variable.
+ */
+static int dell_uart_get_bl_power(struct dell_uart_backlight *dell_bl)
+{
+ return dell_bl->power;
+}
+
+static int dell_uart_update_status(struct backlight_device *bd)
+{
+ struct dell_uart_backlight *dell_bl = bl_get_data(bd);
+ int ret;
+
+ ret = dell_uart_set_brightness(dell_bl, bd->props.brightness);
+ if (ret)
+ return ret;
+
+ if (bd->props.power != dell_uart_get_bl_power(dell_bl))
+ return dell_uart_set_bl_power(dell_bl, bd->props.power);
+
+ return 0;
+}
+
+static int dell_uart_get_brightness_op(struct backlight_device *bd)
+{
+ return dell_uart_get_brightness(bl_get_data(bd));
+}
+
+static const struct backlight_ops dell_uart_backlight_ops = {
+ .update_status = dell_uart_update_status,
+ .get_brightness = dell_uart_get_brightness_op,
+};
+
+static size_t dell_uart_bl_receive(struct serdev_device *serdev, const u8 *data, size_t len)
+{
+ struct dell_uart_backlight *dell_bl = serdev_device_get_drvdata(serdev);
+ size_t i;
+ u8 csum;
+
+ dev_dbg(dell_bl->dev, "Recv: %*ph\n", (int)len, data);
+
+ /* Throw away unexpected bytes / remainder of response after an error */
+ if (dell_bl->status != -EBUSY) {
+ dev_warn(dell_bl->dev, "Bytes received out of band, dropping them.\n");
+ return len;
+ }
+
+ i = 0;
+ while (i < len && dell_bl->resp_idx != dell_bl->resp_len) {
+ dell_bl->resp[dell_bl->resp_idx] = data[i++];
+
+ switch (dell_bl->resp_idx) {
+ case RESP_LEN: /* Length byte */
+ dell_bl->resp_len = dell_bl->resp[RESP_LEN];
+ if (dell_bl->resp_len < MIN_RESP_LEN ||
+ dell_bl->resp_len > dell_bl->resp_max_len) {
+ dev_err(dell_bl->dev, "Response length %d out if range %d - %d\n",
+ dell_bl->resp_len, MIN_RESP_LEN, dell_bl->resp_max_len);
+ dell_bl->status = -EIO;
+ goto wakeup;
+ }
+ break;
+ case RESP_CMD: /* CMD byte */
+ if (dell_bl->resp[RESP_CMD] != dell_bl->pending_cmd) {
+ dev_err(dell_bl->dev, "Response cmd 0x%02x != pending 0x%02x\n",
+ dell_bl->resp[RESP_CMD], dell_bl->pending_cmd);
+ dell_bl->status = -EIO;
+ goto wakeup;
+ }
+ break;
+ }
+ dell_bl->resp_idx++;
+ }
+
+ if (dell_bl->resp_idx != dell_bl->resp_len)
+ return len; /* Response not complete yet */
+
+ csum = dell_uart_checksum(dell_bl->resp, dell_bl->resp_len - 1);
+ if (dell_bl->resp[dell_bl->resp_len - 1] == csum) {
+ dell_bl->status = 0; /* Success */
+ } else {
+ dev_err(dell_bl->dev, "Checksum mismatch got 0x%02x expected 0x%02x\n",
+ dell_bl->resp[dell_bl->resp_len - 1], csum);
+ dell_bl->status = -EIO;
+ }
+wakeup:
+ wake_up(&dell_bl->wait_queue);
+ return i;
+}
+
+static const struct serdev_device_ops dell_uart_bl_serdev_ops = {
+ .receive_buf = dell_uart_bl_receive,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+static int dell_uart_bl_serdev_probe(struct serdev_device *serdev)
+{
+ u8 get_version[GET_CMD_LEN], resp[MAX_RESP_LEN];
+ struct backlight_properties props = {};
+ struct dell_uart_backlight *dell_bl;
+ struct device *dev = &serdev->dev;
+ int ret;
+
+ dell_bl = devm_kzalloc(dev, sizeof(*dell_bl), GFP_KERNEL);
+ if (!dell_bl)
+ return -ENOMEM;
+
+ mutex_init(&dell_bl->mutex);
+ init_waitqueue_head(&dell_bl->wait_queue);
+ dell_bl->dev = dev;
+
+ ret = devm_serdev_device_open(dev, serdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "opening UART device\n");
+
+ /* 9600 bps, no flow control, these are the default but set them to be sure */
+ serdev_device_set_baudrate(serdev, 9600);
+ serdev_device_set_flow_control(serdev, false);
+ serdev_device_set_drvdata(serdev, dell_bl);
+ serdev_device_set_client_ops(serdev, &dell_uart_bl_serdev_ops);
+
+ get_version[0] = DELL_SOF(GET_CMD_LEN);
+ get_version[1] = CMD_GET_VERSION;
+ get_version[2] = dell_uart_checksum(get_version, 2);
+
+ ret = dell_uart_bl_command(dell_bl, get_version, GET_CMD_LEN, resp, MAX_RESP_LEN);
+ if (ret)
+ return dev_err_probe(dev, ret, "getting firmware version\n");
+
+ dev_dbg(dev, "Firmware version: %.*s\n", resp[RESP_LEN] - 3, resp + RESP_DATA);
+
+ /* Initialize bl_power to a known value */
+ ret = dell_uart_set_bl_power(dell_bl, FB_BLANK_UNBLANK);
+ if (ret)
+ return ret;
+
+ ret = dell_uart_get_brightness(dell_bl);
+ if (ret < 0)
+ return ret;
+
+ props.type = BACKLIGHT_PLATFORM;
+ props.brightness = ret;
+ props.max_brightness = DELL_BL_MAX_BRIGHTNESS;
+ props.power = dell_bl->power;
+
+ dell_bl->bl = devm_backlight_device_register(dev, "dell_uart_backlight",
+ dev, dell_bl,
+ &dell_uart_backlight_ops,
+ &props);
+ return PTR_ERR_OR_ZERO(dell_bl->bl);
+}
+
+struct serdev_device_driver dell_uart_bl_serdev_driver = {
+ .probe = dell_uart_bl_serdev_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+};
+
+static int dell_uart_bl_pdev_probe(struct platform_device *pdev)
+{
+ struct serdev_device *serdev;
+ struct device *ctrl_dev;
+ int ret;
+
+ ctrl_dev = get_serdev_controller("DELL0501", NULL, 0, "serial0");
+ if (IS_ERR(ctrl_dev))
+ return PTR_ERR(ctrl_dev);
+
+ serdev = serdev_device_alloc(to_serdev_controller(ctrl_dev));
+ put_device(ctrl_dev);
+ if (!serdev)
+ return -ENOMEM;
+
+ ret = serdev_device_add(serdev);
+ if (ret) {
+ dev_err(&pdev->dev, "error %d adding serdev\n", ret);
+ serdev_device_put(serdev);
+ return ret;
+ }
+
+ ret = serdev_device_driver_register(&dell_uart_bl_serdev_driver);
+ if (ret)
+ goto err_remove_serdev;
+
+ /*
+ * serdev device <-> driver matching relies on OF or ACPI matches and
+ * neither is available here, manually bind the driver.
+ */
+ ret = device_driver_attach(&dell_uart_bl_serdev_driver.driver, &serdev->dev);
+ if (ret)
+ goto err_unregister_serdev_driver;
+
+ /* So that dell_uart_bl_pdev_remove() can remove the serdev */
+ platform_set_drvdata(pdev, serdev);
+ return 0;
+
+err_unregister_serdev_driver:
+ serdev_device_driver_unregister(&dell_uart_bl_serdev_driver);
+err_remove_serdev:
+ serdev_device_remove(serdev);
+ return ret;
+}
+
+static void dell_uart_bl_pdev_remove(struct platform_device *pdev)
+{
+ struct serdev_device *serdev = platform_get_drvdata(pdev);
+
+ serdev_device_driver_unregister(&dell_uart_bl_serdev_driver);
+ serdev_device_remove(serdev);
+}
+
+static struct platform_driver dell_uart_bl_pdev_driver = {
+ .probe = dell_uart_bl_pdev_probe,
+ .remove_new = dell_uart_bl_pdev_remove,
+ .driver = {
+ .name = "dell-uart-backlight",
+ },
+};
+module_platform_driver(dell_uart_bl_pdev_driver);
+
+MODULE_ALIAS("platform:dell-uart-backlight");
+MODULE_DESCRIPTION("Dell AIO Serial Backlight driver");
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index ff1b70269ccb..447364bed249 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -1463,7 +1463,6 @@ MODULE_DEVICE_TABLE(acpi, eeepc_device_ids);
static struct acpi_driver eeepc_acpi_driver = {
.name = EEEPC_LAPTOP_NAME,
.class = EEEPC_ACPI_CLASS,
- .owner = THIS_MODULE,
.ids = eeepc_device_ids,
.flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 94480af49467..968fc91bd5e4 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -386,11 +386,11 @@ static ssize_t lid_show(struct device *dev, struct device_attribute *attr,
struct fujitsu_laptop *priv = dev_get_drvdata(dev);
if (!(priv->flags_supported & FLAG_LID))
- return sprintf(buf, "unknown\n");
+ return sysfs_emit(buf, "unknown\n");
if (priv->flags_state & FLAG_LID)
- return sprintf(buf, "open\n");
+ return sysfs_emit(buf, "open\n");
else
- return sprintf(buf, "closed\n");
+ return sysfs_emit(buf, "closed\n");
}
static ssize_t dock_show(struct device *dev, struct device_attribute *attr,
@@ -399,11 +399,11 @@ static ssize_t dock_show(struct device *dev, struct device_attribute *attr,
struct fujitsu_laptop *priv = dev_get_drvdata(dev);
if (!(priv->flags_supported & FLAG_DOCK))
- return sprintf(buf, "unknown\n");
+ return sysfs_emit(buf, "unknown\n");
if (priv->flags_state & FLAG_DOCK)
- return sprintf(buf, "docked\n");
+ return sysfs_emit(buf, "docked\n");
else
- return sprintf(buf, "undocked\n");
+ return sysfs_emit(buf, "undocked\n");
}
static ssize_t radios_show(struct device *dev, struct device_attribute *attr,
@@ -412,11 +412,11 @@ static ssize_t radios_show(struct device *dev, struct device_attribute *attr,
struct fujitsu_laptop *priv = dev_get_drvdata(dev);
if (!(priv->flags_supported & FLAG_RFKILL))
- return sprintf(buf, "unknown\n");
+ return sysfs_emit(buf, "unknown\n");
if (priv->flags_state & FLAG_RFKILL)
- return sprintf(buf, "on\n");
+ return sysfs_emit(buf, "on\n");
else
- return sprintf(buf, "killed\n");
+ return sysfs_emit(buf, "killed\n");
}
static DEVICE_ATTR_RO(lid);
diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
index 630519c08617..5fa553023842 100644
--- a/drivers/platform/x86/hp/hp-wmi.c
+++ b/drivers/platform/x86/hp/hp-wmi.c
@@ -681,7 +681,7 @@ static ssize_t display_show(struct device *dev, struct device_attribute *attr,
if (value < 0)
return value;
- return sprintf(buf, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
static ssize_t hddtemp_show(struct device *dev, struct device_attribute *attr,
@@ -691,7 +691,7 @@ static ssize_t hddtemp_show(struct device *dev, struct device_attribute *attr,
if (value < 0)
return value;
- return sprintf(buf, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
static ssize_t als_show(struct device *dev, struct device_attribute *attr,
@@ -701,7 +701,7 @@ static ssize_t als_show(struct device *dev, struct device_attribute *attr,
if (value < 0)
return value;
- return sprintf(buf, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
static ssize_t dock_show(struct device *dev, struct device_attribute *attr,
@@ -711,7 +711,7 @@ static ssize_t dock_show(struct device *dev, struct device_attribute *attr,
if (value < 0)
return value;
- return sprintf(buf, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
static ssize_t tablet_show(struct device *dev, struct device_attribute *attr,
@@ -721,7 +721,7 @@ static ssize_t tablet_show(struct device *dev, struct device_attribute *attr,
if (value < 0)
return value;
- return sprintf(buf, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
static ssize_t postcode_show(struct device *dev, struct device_attribute *attr,
@@ -732,7 +732,7 @@ static ssize_t postcode_show(struct device *dev, struct device_attribute *attr,
if (value < 0)
return value;
- return sprintf(buf, "0x%x\n", value);
+ return sysfs_emit(buf, "0x%x\n", value);
}
static ssize_t als_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c
index dde139c69945..09d476dd832e 100644
--- a/drivers/platform/x86/huawei-wmi.c
+++ b/drivers/platform/x86/huawei-wmi.c
@@ -379,7 +379,7 @@ static ssize_t charge_control_start_threshold_show(struct device *dev,
if (err)
return err;
- return sprintf(buf, "%d\n", start);
+ return sysfs_emit(buf, "%d\n", start);
}
static ssize_t charge_control_end_threshold_show(struct device *dev,
@@ -392,7 +392,7 @@ static ssize_t charge_control_end_threshold_show(struct device *dev,
if (err)
return err;
- return sprintf(buf, "%d\n", end);
+ return sysfs_emit(buf, "%d\n", end);
}
static ssize_t charge_control_thresholds_show(struct device *dev,
@@ -405,7 +405,7 @@ static ssize_t charge_control_thresholds_show(struct device *dev,
if (err)
return err;
- return sprintf(buf, "%d %d\n", start, end);
+ return sysfs_emit(buf, "%d %d\n", start, end);
}
static ssize_t charge_control_start_threshold_store(struct device *dev,
@@ -562,7 +562,7 @@ static ssize_t fn_lock_state_show(struct device *dev,
if (err)
return err;
- return sprintf(buf, "%d\n", on);
+ return sysfs_emit(buf, "%d\n", on);
}
static ssize_t fn_lock_state_store(struct device *dev,
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 901849810ce2..fcf13d88fd6e 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -152,6 +152,11 @@ struct ideapad_private {
struct led_classdev led;
unsigned int last_brightness;
} kbd_bl;
+ struct {
+ bool initialized;
+ struct led_classdev led;
+ unsigned int last_brightness;
+ } fn_lock;
};
static bool no_bt_rfkill;
@@ -513,11 +518,8 @@ static ssize_t fan_mode_store(struct device *dev,
static DEVICE_ATTR_RW(fan_mode);
-static ssize_t fn_lock_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int ideapad_fn_lock_get(struct ideapad_private *priv)
{
- struct ideapad_private *priv = dev_get_drvdata(dev);
unsigned long hals;
int err;
@@ -525,7 +527,40 @@ static ssize_t fn_lock_show(struct device *dev,
if (err)
return err;
- return sysfs_emit(buf, "%d\n", !!test_bit(HALS_FNLOCK_STATE_BIT, &hals));
+ return !!test_bit(HALS_FNLOCK_STATE_BIT, &hals);
+}
+
+static int ideapad_fn_lock_set(struct ideapad_private *priv, bool state)
+{
+ return exec_sals(priv->adev->handle,
+ state ? SALS_FNLOCK_ON : SALS_FNLOCK_OFF);
+}
+
+static void ideapad_fn_lock_led_notify(struct ideapad_private *priv, int brightness)
+{
+ if (!priv->fn_lock.initialized)
+ return;
+
+ if (brightness == priv->fn_lock.last_brightness)
+ return;
+
+ priv->fn_lock.last_brightness = brightness;
+
+ led_classdev_notify_brightness_hw_changed(&priv->fn_lock.led, brightness);
+}
+
+static ssize_t fn_lock_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+ int brightness;
+
+ brightness = ideapad_fn_lock_get(priv);
+ if (brightness < 0)
+ return brightness;
+
+ return sysfs_emit(buf, "%d\n", brightness);
}
static ssize_t fn_lock_store(struct device *dev,
@@ -540,10 +575,12 @@ static ssize_t fn_lock_store(struct device *dev,
if (err)
return err;
- err = exec_sals(priv->adev->handle, state ? SALS_FNLOCK_ON : SALS_FNLOCK_OFF);
+ err = ideapad_fn_lock_set(priv, state);
if (err)
return err;
+ ideapad_fn_lock_led_notify(priv, state);
+
return count;
}
@@ -1181,8 +1218,11 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv)
switch (bit) {
case 6: /* Z570 */
case 0: /* Z580 */
- /* Thermal Management button */
- ideapad_input_report(priv, 65);
+ /* Thermal Management / Performance Mode button */
+ if (priv->dytc)
+ platform_profile_cycle();
+ else
+ ideapad_input_report(priv, 65);
break;
case 1:
/* OneKey Theater button */
@@ -1463,6 +1503,65 @@ static void ideapad_kbd_bl_exit(struct ideapad_private *priv)
}
/*
+ * FnLock LED
+ */
+static enum led_brightness ideapad_fn_lock_led_cdev_get(struct led_classdev *led_cdev)
+{
+ struct ideapad_private *priv = container_of(led_cdev, struct ideapad_private, fn_lock.led);
+
+ return ideapad_fn_lock_get(priv);
+}
+
+static int ideapad_fn_lock_led_cdev_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct ideapad_private *priv = container_of(led_cdev, struct ideapad_private, fn_lock.led);
+
+ return ideapad_fn_lock_set(priv, brightness);
+}
+
+static int ideapad_fn_lock_led_init(struct ideapad_private *priv)
+{
+ int brightness, err;
+
+ if (!priv->features.fn_lock)
+ return -ENODEV;
+
+ if (WARN_ON(priv->fn_lock.initialized))
+ return -EEXIST;
+
+ priv->fn_lock.led.max_brightness = 1;
+
+ brightness = ideapad_fn_lock_get(priv);
+ if (brightness < 0)
+ return brightness;
+
+ priv->fn_lock.last_brightness = brightness;
+ priv->fn_lock.led.name = "platform::" LED_FUNCTION_FNLOCK;
+ priv->fn_lock.led.brightness_get = ideapad_fn_lock_led_cdev_get;
+ priv->fn_lock.led.brightness_set_blocking = ideapad_fn_lock_led_cdev_set;
+ priv->fn_lock.led.flags = LED_BRIGHT_HW_CHANGED;
+
+ err = led_classdev_register(&priv->platform_device->dev, &priv->fn_lock.led);
+ if (err)
+ return err;
+
+ priv->fn_lock.initialized = true;
+
+ return 0;
+}
+
+static void ideapad_fn_lock_led_exit(struct ideapad_private *priv)
+{
+ if (!priv->fn_lock.initialized)
+ return;
+
+ priv->fn_lock.initialized = false;
+
+ led_classdev_unregister(&priv->fn_lock.led);
+}
+
+/*
* module init/exit
*/
static void ideapad_sync_touchpad_state(struct ideapad_private *priv, bool send_events)
@@ -1709,7 +1808,6 @@ static void ideapad_wmi_notify(struct wmi_device *wdev, union acpi_object *data)
{
struct ideapad_wmi_private *wpriv = dev_get_drvdata(&wdev->dev);
struct ideapad_private *priv;
- unsigned long result;
mutex_lock(&ideapad_shared_mutex);
@@ -1722,11 +1820,13 @@ static void ideapad_wmi_notify(struct wmi_device *wdev, union acpi_object *data)
ideapad_input_report(priv, 128);
break;
case IDEAPAD_WMI_EVENT_FN_KEYS:
- if (priv->features.set_fn_lock_led &&
- !eval_hals(priv->adev->handle, &result)) {
- bool state = test_bit(HALS_FNLOCK_STATE_BIT, &result);
+ if (priv->features.set_fn_lock_led) {
+ int brightness = ideapad_fn_lock_get(priv);
- exec_sals(priv->adev->handle, state ? SALS_FNLOCK_ON : SALS_FNLOCK_OFF);
+ if (brightness >= 0) {
+ ideapad_fn_lock_set(priv, brightness);
+ ideapad_fn_lock_led_notify(priv, brightness);
+ }
}
if (data->type != ACPI_TYPE_INTEGER) {
@@ -1738,6 +1838,10 @@ static void ideapad_wmi_notify(struct wmi_device *wdev, union acpi_object *data)
dev_dbg(&wdev->dev, "WMI fn-key event: 0x%llx\n",
data->integer.value);
+ /* 0x02 FnLock, 0x03 Esc */
+ if (data->integer.value == 0x02 || data->integer.value == 0x03)
+ ideapad_fn_lock_led_notify(priv, data->integer.value == 0x02);
+
ideapad_input_report(priv,
data->integer.value | IDEAPAD_WMI_KEY);
@@ -1831,6 +1935,14 @@ static int ideapad_acpi_add(struct platform_device *pdev)
dev_info(&pdev->dev, "Keyboard backlight control not available\n");
}
+ err = ideapad_fn_lock_led_init(priv);
+ if (err) {
+ if (err != -ENODEV)
+ dev_warn(&pdev->dev, "Could not set up FnLock LED: %d\n", err);
+ else
+ dev_info(&pdev->dev, "FnLock control not available\n");
+ }
+
/*
* On some models without a hw-switch (the yoga 2 13 at least)
* VPCCMD_W_RF must be explicitly set to 1 for the wifi to work.
@@ -1887,6 +1999,7 @@ backlight_failed:
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
ideapad_unregister_rfkill(priv, i);
+ ideapad_fn_lock_led_exit(priv);
ideapad_kbd_bl_exit(priv);
ideapad_input_exit(priv);
@@ -1914,6 +2027,7 @@ static void ideapad_acpi_remove(struct platform_device *pdev)
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
ideapad_unregister_rfkill(priv, i);
+ ideapad_fn_lock_led_exit(priv);
ideapad_kbd_bl_exit(priv);
ideapad_input_exit(priv);
ideapad_debugfs_exit(priv);
diff --git a/drivers/platform/x86/inspur_platform_profile.c b/drivers/platform/x86/inspur_platform_profile.c
index 743705bddda3..8440defa6788 100644
--- a/drivers/platform/x86/inspur_platform_profile.c
+++ b/drivers/platform/x86/inspur_platform_profile.c
@@ -207,6 +207,7 @@ static struct wmi_driver inspur_wmi_driver = {
.id_table = inspur_wmi_id_table,
.probe = inspur_wmi_probe,
.remove = inspur_wmi_remove,
+ .no_singleton = true,
};
module_wmi_driver(inspur_wmi_driver);
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index 7457ca2b27a6..c7a827645864 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -49,6 +49,8 @@ static const struct acpi_device_id intel_hid_ids[] = {
{"INTC1076", 0},
{"INTC1077", 0},
{"INTC1078", 0},
+ {"INTC107B", 0},
+ {"INTC10CB", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, intel_hid_ids);
@@ -504,6 +506,7 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
struct platform_device *device = context;
struct intel_hid_priv *priv = dev_get_drvdata(&device->dev);
unsigned long long ev_index;
+ struct key_entry *ke;
int err;
/*
@@ -545,11 +548,15 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
if (event == 0xc0 || !priv->array)
return;
- if (!sparse_keymap_entry_from_scancode(priv->array, event)) {
+ ke = sparse_keymap_entry_from_scancode(priv->array, event);
+ if (!ke) {
dev_info(&device->dev, "unknown event 0x%x\n", event);
return;
}
+ if (ke->type == KE_IGNORE)
+ return;
+
wakeup:
pm_wakeup_hard_event(&device->dev);
diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c
index 584c44387e10..39f19cb51749 100644
--- a/drivers/platform/x86/intel/ifs/load.c
+++ b/drivers/platform/x86/intel/ifs/load.c
@@ -233,7 +233,9 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev)
chunk_table[0] = starting_chunk_nr + i;
chunk_table[1] = linear_addr;
do {
+ local_irq_disable();
wrmsrl(MSR_AUTHENTICATE_AND_COPY_CHUNK, (u64)chunk_table);
+ local_irq_enable();
rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data);
err_code = chunk_status.error_code;
} while (err_code == AUTH_INTERRUPTED_ERROR && --retry_count);
diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c
index 95b4b71fab53..282e4bfe30da 100644
--- a/drivers/platform/x86/intel/ifs/runtest.c
+++ b/drivers/platform/x86/intel/ifs/runtest.c
@@ -69,6 +69,19 @@ static const char * const scan_test_status[] = {
static void message_not_tested(struct device *dev, int cpu, union ifs_status status)
{
+ struct ifs_data *ifsd = ifs_get_data(dev);
+
+ /*
+ * control_error is set when the microcode runs into a problem
+ * loading the image from the reserved BIOS memory, or it has
+ * been corrupted. Reloading the image may fix this issue.
+ */
+ if (status.control_error) {
+ dev_warn(dev, "CPU(s) %*pbl: Scan controller error. Batch: %02x version: 0x%x\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)), ifsd->cur_batch, ifsd->loaded_version);
+ return;
+ }
+
if (status.error_code < ARRAY_SIZE(scan_test_status)) {
dev_info(dev, "CPU(s) %*pbl: SCAN operation did not start. %s\n",
cpumask_pr_args(cpu_smt_mask(cpu)),
@@ -91,16 +104,6 @@ static void message_fail(struct device *dev, int cpu, union ifs_status status)
struct ifs_data *ifsd = ifs_get_data(dev);
/*
- * control_error is set when the microcode runs into a problem
- * loading the image from the reserved BIOS memory, or it has
- * been corrupted. Reloading the image may fix this issue.
- */
- if (status.control_error) {
- dev_err(dev, "CPU(s) %*pbl: could not execute from loaded scan image. Batch: %02x version: 0x%x\n",
- cpumask_pr_args(cpu_smt_mask(cpu)), ifsd->cur_batch, ifsd->loaded_version);
- }
-
- /*
* signature_error is set when the output from the scan chains does not
* match the expected signature. This might be a transient problem (e.g.
* due to a bit flip from an alpha particle or neutron). If the problem
@@ -285,10 +288,10 @@ static void ifs_test_core(int cpu, struct device *dev)
/* Update status for this core */
ifsd->scan_details = status.data;
- if (status.control_error || status.signature_error) {
+ if (status.signature_error) {
ifsd->status = SCAN_TEST_FAIL;
message_fail(dev, cpu, status);
- } else if (status.error_code) {
+ } else if (status.control_error || status.error_code) {
ifsd->status = SCAN_NOT_TESTED;
message_not_tested(dev, cpu, status);
} else {
diff --git a/drivers/platform/x86/intel/pmc/arl.c b/drivers/platform/x86/intel/pmc/arl.c
index 34b4cd23bfe5..e10527c4e3e0 100644
--- a/drivers/platform/x86/intel/pmc/arl.c
+++ b/drivers/platform/x86/intel/pmc/arl.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* This file contains platform specific structure definitions
- * and init function used by Meteor Lake PCH.
+ * and init function used by Arrow Lake PCH.
*
* Copyright (c) 2022, Intel Corporation.
* All Rights Reserved.
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
index 10c96c1a850a..2ad2f8753e5d 100644
--- a/drivers/platform/x86/intel/pmc/core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -678,6 +678,41 @@ static int pmc_core_ltr_show(struct seq_file *s, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr);
+static int pmc_core_s0ix_blocker_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ unsigned int pmcidx;
+
+ for (pmcidx = 0; pmcidx < ARRAY_SIZE(pmcdev->pmcs); pmcidx++) {
+ const struct pmc_bit_map **maps;
+ unsigned int arr_size, r_idx;
+ u32 offset, counter;
+ struct pmc *pmc;
+
+ pmc = pmcdev->pmcs[pmcidx];
+ if (!pmc)
+ continue;
+ maps = pmc->map->s0ix_blocker_maps;
+ offset = pmc->map->s0ix_blocker_offset;
+ arr_size = pmc_core_lpm_get_arr_size(maps);
+
+ for (r_idx = 0; r_idx < arr_size; r_idx++) {
+ const struct pmc_bit_map *map;
+
+ for (map = maps[r_idx]; map->name; map++) {
+ if (!map->blk)
+ continue;
+ counter = pmc_core_reg_read(pmc, offset);
+ seq_printf(s, "PMC%d:%-30s %-30d\n", pmcidx,
+ map->name, counter);
+ offset += map->blk * S0IX_BLK_SIZE;
+ }
+ }
+ }
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_s0ix_blocker);
+
static inline u64 adjust_lpm_residency(struct pmc *pmc, u32 offset,
const int lpm_adj_x2)
{
@@ -1197,6 +1232,9 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
debugfs_create_file("ltr_show", 0444, dir, pmcdev, &pmc_core_ltr_fops);
+ if (primary_pmc->map->s0ix_blocker_maps)
+ debugfs_create_file("s0ix_blocker", 0444, dir, pmcdev, &pmc_core_s0ix_blocker_fops);
+
debugfs_create_file("package_cstate_show", 0444, dir, primary_pmc,
&pmc_core_pkgc_fops);
diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h
index 83504c49a0e3..ea04de7eb9e8 100644
--- a/drivers/platform/x86/intel/pmc/core.h
+++ b/drivers/platform/x86/intel/pmc/core.h
@@ -22,6 +22,7 @@ struct telem_endpoint;
#define PMC_BASE_ADDR_DEFAULT 0xFE000000
#define MAX_NUM_PMC 3
+#define S0IX_BLK_SIZE 4
/* Sunrise Point Power Management Controller PCI Device ID */
#define SPT_PMC_PCI_DEVICE_ID 0x9d21
@@ -282,12 +283,14 @@ enum ppfear_regs {
#define LNL_PMC_LTR_OSSE 0x1B88
#define LNL_NUM_IP_IGN_ALLOWED 27
#define LNL_PPFEAR_NUM_ENTRIES 12
+#define LNL_S0IX_BLOCKER_OFFSET 0x2004
extern const char *pmc_lpm_modes[];
struct pmc_bit_map {
const char *name;
u32 bit_mask;
+ u8 blk;
};
/**
@@ -298,6 +301,7 @@ struct pmc_bit_map {
* @pll_sts: Maps name of PLL to corresponding bit status
* @slps0_dbg_maps: Array of SLP_S0_DBG* registers containing debug info
* @ltr_show_sts: Maps PCH IP Names to their MMIO register offsets
+ * @s0ix_blocker_maps: Maps name of IP block to S0ix blocker counter
* @slp_s0_offset: PWRMBASE offset to read SLP_S0 residency
* @ltr_ignore_offset: PWRMBASE offset to read/write LTR ignore bit
* @regmap_length: Length of memory to map from PWRMBASE address to access
@@ -307,6 +311,7 @@ struct pmc_bit_map {
* @pm_cfg_offset: PWRMBASE offset to PM_CFG register
* @pm_read_disable_bit: Bit index to read PMC_READ_DISABLE
* @slps0_dbg_offset: PWRMBASE offset to SLP_S0_DEBUG_REG*
+ * @s0ix_blocker_offset PWRMBASE offset to S0ix blocker counter
*
* Each PCH has unique set of register offsets and bit indexes. This structure
* captures them to have a common implementation.
@@ -319,6 +324,7 @@ struct pmc_reg_map {
const struct pmc_bit_map *ltr_show_sts;
const struct pmc_bit_map *msr_sts;
const struct pmc_bit_map **lpm_sts;
+ const struct pmc_bit_map **s0ix_blocker_maps;
const u32 slp_s0_offset;
const int slp_s0_res_counter_step;
const u32 ltr_ignore_offset;
@@ -330,6 +336,7 @@ struct pmc_reg_map {
const u32 slps0_dbg_offset;
const u32 ltr_ignore_max;
const u32 pm_vric1_offset;
+ const u32 s0ix_blocker_offset;
/* Low Power Mode registers */
const int lpm_num_maps;
const int lpm_num_modes;
@@ -535,8 +542,10 @@ extern const struct pmc_bit_map lnl_vnn_req_status_2_map[];
extern const struct pmc_bit_map lnl_vnn_req_status_3_map[];
extern const struct pmc_bit_map lnl_vnn_misc_status_map[];
extern const struct pmc_bit_map *lnl_lpm_maps[];
+extern const struct pmc_bit_map *lnl_blk_maps[];
extern const struct pmc_bit_map lnl_pfear_map[];
extern const struct pmc_bit_map *ext_lnl_pfear_map[];
+extern const struct pmc_bit_map lnl_signal_status_map[];
/* ARL */
extern const struct pmc_bit_map arl_socs_ltr_show_map[];
diff --git a/drivers/platform/x86/intel/pmc/lnl.c b/drivers/platform/x86/intel/pmc/lnl.c
index 068d72504683..e7a8077d1a3e 100644
--- a/drivers/platform/x86/intel/pmc/lnl.c
+++ b/drivers/platform/x86/intel/pmc/lnl.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* This file contains platform specific structure definitions
- * and init function used by Meteor Lake PCH.
+ * and init function used by Lunar Lake PCH.
*
* Copyright (c) 2022, Intel Corporation.
* All Rights Reserved.
@@ -56,264 +56,296 @@ const struct pmc_bit_map lnl_ltr_show_map[] = {
};
const struct pmc_bit_map lnl_power_gating_status_0_map[] = {
- {"PMC_PGD0_PG_STS", BIT(0)},
- {"FUSE_OSSE_PGD0_PG_STS", BIT(1)},
- {"ESPISPI_PGD0_PG_STS", BIT(2)},
- {"XHCI_PGD0_PG_STS", BIT(3)},
- {"SPA_PGD0_PG_STS", BIT(4)},
- {"SPB_PGD0_PG_STS", BIT(5)},
- {"SPR16B0_PGD0_PG_STS", BIT(6)},
- {"GBE_PGD0_PG_STS", BIT(7)},
- {"SBR8B7_PGD0_PG_STS", BIT(8)},
- {"SBR8B6_PGD0_PG_STS", BIT(9)},
- {"SBR16B1_PGD0_PG_STS", BIT(10)},
- {"SBR8B8_PGD0_PG_STS", BIT(11)},
- {"ESE_PGD3_PG_STS", BIT(12)},
- {"D2D_DISP_PGD0_PG_STS", BIT(13)},
- {"LPSS_PGD0_PG_STS", BIT(14)},
- {"LPC_PGD0_PG_STS", BIT(15)},
- {"SMB_PGD0_PG_STS", BIT(16)},
- {"ISH_PGD0_PG_STS", BIT(17)},
- {"SBR8B2_PGD0_PG_STS", BIT(18)},
- {"NPK_PGD0_PG_STS", BIT(19)},
- {"D2D_NOC_PGD0_PG_STS", BIT(20)},
- {"SAFSS_PGD0_PG_STS", BIT(21)},
- {"FUSE_PGD0_PG_STS", BIT(22)},
- {"D2D_DISP_PGD1_PG_STS", BIT(23)},
- {"MPFPW1_PGD0_PG_STS", BIT(24)},
- {"XDCI_PGD0_PG_STS", BIT(25)},
- {"EXI_PGD0_PG_STS", BIT(26)},
- {"CSE_PGD0_PG_STS", BIT(27)},
- {"KVMCC_PGD0_PG_STS", BIT(28)},
- {"PMT_PGD0_PG_STS", BIT(29)},
- {"CLINK_PGD0_PG_STS", BIT(30)},
- {"PTIO_PGD0_PG_STS", BIT(31)},
+ {"PMC_PGD0_PG_STS", BIT(0), 0},
+ {"FUSE_OSSE_PGD0_PG_STS", BIT(1), 0},
+ {"ESPISPI_PGD0_PG_STS", BIT(2), 0},
+ {"XHCI_PGD0_PG_STS", BIT(3), 1},
+ {"SPA_PGD0_PG_STS", BIT(4), 1},
+ {"SPB_PGD0_PG_STS", BIT(5), 1},
+ {"SPR16B0_PGD0_PG_STS", BIT(6), 0},
+ {"GBE_PGD0_PG_STS", BIT(7), 1},
+ {"SBR8B7_PGD0_PG_STS", BIT(8), 0},
+ {"SBR8B6_PGD0_PG_STS", BIT(9), 0},
+ {"SBR16B1_PGD0_PG_STS", BIT(10), 0},
+ {"SBR8B8_PGD0_PG_STS", BIT(11), 0},
+ {"ESE_PGD3_PG_STS", BIT(12), 1},
+ {"D2D_DISP_PGD0_PG_STS", BIT(13), 1},
+ {"LPSS_PGD0_PG_STS", BIT(14), 1},
+ {"LPC_PGD0_PG_STS", BIT(15), 0},
+ {"SMB_PGD0_PG_STS", BIT(16), 0},
+ {"ISH_PGD0_PG_STS", BIT(17), 0},
+ {"SBR8B2_PGD0_PG_STS", BIT(18), 0},
+ {"NPK_PGD0_PG_STS", BIT(19), 0},
+ {"D2D_NOC_PGD0_PG_STS", BIT(20), 0},
+ {"SAFSS_PGD0_PG_STS", BIT(21), 0},
+ {"FUSE_PGD0_PG_STS", BIT(22), 0},
+ {"D2D_DISP_PGD1_PG_STS", BIT(23), 1},
+ {"MPFPW1_PGD0_PG_STS", BIT(24), 0},
+ {"XDCI_PGD0_PG_STS", BIT(25), 1},
+ {"EXI_PGD0_PG_STS", BIT(26), 0},
+ {"CSE_PGD0_PG_STS", BIT(27), 1},
+ {"KVMCC_PGD0_PG_STS", BIT(28), 1},
+ {"PMT_PGD0_PG_STS", BIT(29), 1},
+ {"CLINK_PGD0_PG_STS", BIT(30), 1},
+ {"PTIO_PGD0_PG_STS", BIT(31), 1},
{}
};
const struct pmc_bit_map lnl_power_gating_status_1_map[] = {
- {"USBR0_PGD0_PG_STS", BIT(0)},
- {"SUSRAM_PGD0_PG_STS", BIT(1)},
- {"SMT1_PGD0_PG_STS", BIT(2)},
- {"U3FPW1_PGD0_PG_STS", BIT(3)},
- {"SMS2_PGD0_PG_STS", BIT(4)},
- {"SMS1_PGD0_PG_STS", BIT(5)},
- {"CSMERTC_PGD0_PG_STS", BIT(6)},
- {"CSMEPSF_PGD0_PG_STS", BIT(7)},
- {"FIA_PG_PGD0_PG_STS", BIT(8)},
- {"SBR16B4_PGD0_PG_STS", BIT(9)},
- {"P2SB8B_PGD0_PG_STS", BIT(10)},
- {"DBG_SBR_PGD0_PG_STS", BIT(11)},
- {"SBR8B9_PGD0_PG_STS", BIT(12)},
- {"OSSE_SMT1_PGD0_PG_STS", BIT(13)},
- {"SBR8B10_PGD0_PG_STS", BIT(14)},
- {"SBR16B3_PGD0_PG_STS", BIT(15)},
- {"G5FPW1_PGD0_PG_STS", BIT(16)},
- {"SBRG_PGD0_PG_STS", BIT(17)},
- {"PSF4_PGD0_PG_STS", BIT(18)},
- {"CNVI_PGD0_PG_STS", BIT(19)},
- {"USFX2_PGD0_PG_STS", BIT(20)},
- {"ENDBG_PGD0_PG_STS", BIT(21)},
- {"FIACPCB_P5X4_PGD0_PG_STS", BIT(22)},
- {"SBR8B3_PGD0_PG_STS", BIT(23)},
- {"SBR8B0_PGD0_PG_STS", BIT(24)},
- {"NPK_PGD1_PG_STS", BIT(25)},
- {"OSSE_HOTHAM_PGD0_PG_STS", BIT(26)},
- {"D2D_NOC_PGD2_PG_STS", BIT(27)},
- {"SBR8B1_PGD0_PG_STS", BIT(28)},
- {"PSF6_PGD0_PG_STS", BIT(29)},
- {"PSF7_PGD0_PG_STS", BIT(30)},
- {"FIA_U_PGD0_PG_STS", BIT(31)},
+ {"USBR0_PGD0_PG_STS", BIT(0), 1},
+ {"SUSRAM_PGD0_PG_STS", BIT(1), 1},
+ {"SMT1_PGD0_PG_STS", BIT(2), 1},
+ {"U3FPW1_PGD0_PG_STS", BIT(3), 0},
+ {"SMS2_PGD0_PG_STS", BIT(4), 1},
+ {"SMS1_PGD0_PG_STS", BIT(5), 1},
+ {"CSMERTC_PGD0_PG_STS", BIT(6), 0},
+ {"CSMEPSF_PGD0_PG_STS", BIT(7), 0},
+ {"FIA_PG_PGD0_PG_STS", BIT(8), 0},
+ {"SBR16B4_PGD0_PG_STS", BIT(9), 0},
+ {"P2SB8B_PGD0_PG_STS", BIT(10), 1},
+ {"DBG_SBR_PGD0_PG_STS", BIT(11), 0},
+ {"SBR8B9_PGD0_PG_STS", BIT(12), 0},
+ {"OSSE_SMT1_PGD0_PG_STS", BIT(13), 1},
+ {"SBR8B10_PGD0_PG_STS", BIT(14), 0},
+ {"SBR16B3_PGD0_PG_STS", BIT(15), 0},
+ {"G5FPW1_PGD0_PG_STS", BIT(16), 0},
+ {"SBRG_PGD0_PG_STS", BIT(17), 0},
+ {"PSF4_PGD0_PG_STS", BIT(18), 0},
+ {"CNVI_PGD0_PG_STS", BIT(19), 0},
+ {"USFX2_PGD0_PG_STS", BIT(20), 1},
+ {"ENDBG_PGD0_PG_STS", BIT(21), 0},
+ {"FIACPCB_P5X4_PGD0_PG_STS", BIT(22), 0},
+ {"SBR8B3_PGD0_PG_STS", BIT(23), 0},
+ {"SBR8B0_PGD0_PG_STS", BIT(24), 0},
+ {"NPK_PGD1_PG_STS", BIT(25), 0},
+ {"OSSE_HOTHAM_PGD0_PG_STS", BIT(26), 1},
+ {"D2D_NOC_PGD2_PG_STS", BIT(27), 1},
+ {"SBR8B1_PGD0_PG_STS", BIT(28), 0},
+ {"PSF6_PGD0_PG_STS", BIT(29), 0},
+ {"PSF7_PGD0_PG_STS", BIT(30), 0},
+ {"FIA_U_PGD0_PG_STS", BIT(31), 0},
{}
};
const struct pmc_bit_map lnl_power_gating_status_2_map[] = {
- {"PSF8_PGD0_PG_STS", BIT(0)},
- {"SBR16B2_PGD0_PG_STS", BIT(1)},
- {"D2D_IPU_PGD0_PG_STS", BIT(2)},
- {"FIACPCB_U_PGD0_PG_STS", BIT(3)},
- {"TAM_PGD0_PG_STS", BIT(4)},
- {"D2D_NOC_PGD1_PG_STS", BIT(5)},
- {"TBTLSX_PGD0_PG_STS", BIT(6)},
- {"THC0_PGD0_PG_STS", BIT(7)},
- {"THC1_PGD0_PG_STS", BIT(8)},
- {"PMC_PGD0_PG_STS", BIT(9)},
- {"SBR8B5_PGD0_PG_STS", BIT(10)},
- {"UFSPW1_PGD0_PG_STS", BIT(11)},
- {"DBC_PGD0_PG_STS", BIT(12)},
- {"TCSS_PGD0_PG_STS", BIT(13)},
- {"FIA_P5X4_PGD0_PG_STS", BIT(14)},
- {"DISP_PGA_PGD0_PG_STS", BIT(15)},
- {"DISP_PSF_PGD0_PG_STS", BIT(16)},
- {"PSF0_PGD0_PG_STS", BIT(17)},
- {"P2SB16B_PGD0_PG_STS", BIT(18)},
- {"ACE_PGD0_PG_STS", BIT(19)},
- {"ACE_PGD1_PG_STS", BIT(20)},
- {"ACE_PGD2_PG_STS", BIT(21)},
- {"ACE_PGD3_PG_STS", BIT(22)},
- {"ACE_PGD4_PG_STS", BIT(23)},
- {"ACE_PGD5_PG_STS", BIT(24)},
- {"ACE_PGD6_PG_STS", BIT(25)},
- {"ACE_PGD7_PG_STS", BIT(26)},
- {"ACE_PGD8_PG_STS", BIT(27)},
- {"ACE_PGD9_PG_STS", BIT(28)},
- {"ACE_PGD10_PG_STS", BIT(29)},
- {"FIACPCB_PG_PGD0_PG_STS", BIT(30)},
- {"OSSE_PGD0_PG_STS", BIT(31)},
+ {"PSF8_PGD0_PG_STS", BIT(0), 0},
+ {"SBR16B2_PGD0_PG_STS", BIT(1), 0},
+ {"D2D_IPU_PGD0_PG_STS", BIT(2), 1},
+ {"FIACPCB_U_PGD0_PG_STS", BIT(3), 0},
+ {"TAM_PGD0_PG_STS", BIT(4), 1},
+ {"D2D_NOC_PGD1_PG_STS", BIT(5), 1},
+ {"TBTLSX_PGD0_PG_STS", BIT(6), 1},
+ {"THC0_PGD0_PG_STS", BIT(7), 1},
+ {"THC1_PGD0_PG_STS", BIT(8), 1},
+ {"PMC_PGD0_PG_STS", BIT(9), 0},
+ {"SBR8B5_PGD0_PG_STS", BIT(10), 0},
+ {"UFSPW1_PGD0_PG_STS", BIT(11), 0},
+ {"DBC_PGD0_PG_STS", BIT(12), 0},
+ {"TCSS_PGD0_PG_STS", BIT(13), 0},
+ {"FIA_P5X4_PGD0_PG_STS", BIT(14), 0},
+ {"DISP_PGA_PGD0_PG_STS", BIT(15), 0},
+ {"DISP_PSF_PGD0_PG_STS", BIT(16), 0},
+ {"PSF0_PGD0_PG_STS", BIT(17), 0},
+ {"P2SB16B_PGD0_PG_STS", BIT(18), 1},
+ {"ACE_PGD0_PG_STS", BIT(19), 0},
+ {"ACE_PGD1_PG_STS", BIT(20), 0},
+ {"ACE_PGD2_PG_STS", BIT(21), 0},
+ {"ACE_PGD3_PG_STS", BIT(22), 0},
+ {"ACE_PGD4_PG_STS", BIT(23), 0},
+ {"ACE_PGD5_PG_STS", BIT(24), 0},
+ {"ACE_PGD6_PG_STS", BIT(25), 0},
+ {"ACE_PGD7_PG_STS", BIT(26), 0},
+ {"ACE_PGD8_PG_STS", BIT(27), 0},
+ {"ACE_PGD9_PG_STS", BIT(28), 0},
+ {"ACE_PGD10_PG_STS", BIT(29), 0},
+ {"FIACPCB_PG_PGD0_PG_STS", BIT(30), 0},
+ {"OSSE_PGD0_PG_STS", BIT(31), 1},
{}
};
const struct pmc_bit_map lnl_d3_status_0_map[] = {
- {"LPSS_D3_STS", BIT(3)},
- {"XDCI_D3_STS", BIT(4)},
- {"XHCI_D3_STS", BIT(5)},
- {"SPA_D3_STS", BIT(12)},
- {"SPB_D3_STS", BIT(13)},
- {"OSSE_D3_STS", BIT(15)},
- {"ESPISPI_D3_STS", BIT(18)},
- {"PSTH_D3_STS", BIT(21)},
+ {"LPSS_D3_STS", BIT(3), 1},
+ {"XDCI_D3_STS", BIT(4), 1},
+ {"XHCI_D3_STS", BIT(5), 1},
+ {"SPA_D3_STS", BIT(12), 0},
+ {"SPB_D3_STS", BIT(13), 0},
+ {"OSSE_D3_STS", BIT(15), 0},
+ {"ESPISPI_D3_STS", BIT(18), 0},
+ {"PSTH_D3_STS", BIT(21), 0},
{}
};
const struct pmc_bit_map lnl_d3_status_1_map[] = {
- {"OSSE_SMT1_D3_STS", BIT(7)},
- {"GBE_D3_STS", BIT(19)},
- {"ITSS_D3_STS", BIT(23)},
- {"CNVI_D3_STS", BIT(27)},
- {"UFSX2_D3_STS", BIT(28)},
- {"OSSE_HOTHAM_D3_STS", BIT(31)},
+ {"OSSE_SMT1_D3_STS", BIT(7), 0},
+ {"GBE_D3_STS", BIT(19), 0},
+ {"ITSS_D3_STS", BIT(23), 0},
+ {"CNVI_D3_STS", BIT(27), 0},
+ {"UFSX2_D3_STS", BIT(28), 1},
+ {"OSSE_HOTHAM_D3_STS", BIT(31), 0},
{}
};
const struct pmc_bit_map lnl_d3_status_2_map[] = {
- {"ESE_D3_STS", BIT(0)},
- {"CSMERTC_D3_STS", BIT(1)},
- {"SUSRAM_D3_STS", BIT(2)},
- {"CSE_D3_STS", BIT(4)},
- {"KVMCC_D3_STS", BIT(5)},
- {"USBR0_D3_STS", BIT(6)},
- {"ISH_D3_STS", BIT(7)},
- {"SMT1_D3_STS", BIT(8)},
- {"SMT2_D3_STS", BIT(9)},
- {"SMT3_D3_STS", BIT(10)},
- {"OSSE_SMT2_D3_STS", BIT(13)},
- {"CLINK_D3_STS", BIT(14)},
- {"PTIO_D3_STS", BIT(16)},
- {"PMT_D3_STS", BIT(17)},
- {"SMS1_D3_STS", BIT(18)},
- {"SMS2_D3_STS", BIT(19)},
+ {"ESE_D3_STS", BIT(0), 0},
+ {"CSMERTC_D3_STS", BIT(1), 0},
+ {"SUSRAM_D3_STS", BIT(2), 0},
+ {"CSE_D3_STS", BIT(4), 0},
+ {"KVMCC_D3_STS", BIT(5), 0},
+ {"USBR0_D3_STS", BIT(6), 0},
+ {"ISH_D3_STS", BIT(7), 0},
+ {"SMT1_D3_STS", BIT(8), 0},
+ {"SMT2_D3_STS", BIT(9), 0},
+ {"SMT3_D3_STS", BIT(10), 0},
+ {"OSSE_SMT2_D3_STS", BIT(13), 0},
+ {"CLINK_D3_STS", BIT(14), 0},
+ {"PTIO_D3_STS", BIT(16), 0},
+ {"PMT_D3_STS", BIT(17), 0},
+ {"SMS1_D3_STS", BIT(18), 0},
+ {"SMS2_D3_STS", BIT(19), 0},
{}
};
const struct pmc_bit_map lnl_d3_status_3_map[] = {
- {"THC0_D3_STS", BIT(14)},
- {"THC1_D3_STS", BIT(15)},
- {"OSSE_SMT3_D3_STS", BIT(21)},
- {"ACE_D3_STS", BIT(23)},
+ {"THC0_D3_STS", BIT(14), 1},
+ {"THC1_D3_STS", BIT(15), 1},
+ {"OSSE_SMT3_D3_STS", BIT(21), 0},
+ {"ACE_D3_STS", BIT(23), 0},
{}
};
const struct pmc_bit_map lnl_vnn_req_status_0_map[] = {
- {"LPSS_VNN_REQ_STS", BIT(3)},
- {"OSSE_VNN_REQ_STS", BIT(15)},
- {"ESPISPI_VNN_REQ_STS", BIT(18)},
+ {"LPSS_VNN_REQ_STS", BIT(3), 1},
+ {"OSSE_VNN_REQ_STS", BIT(15), 1},
+ {"ESPISPI_VNN_REQ_STS", BIT(18), 1},
{}
};
const struct pmc_bit_map lnl_vnn_req_status_1_map[] = {
- {"NPK_VNN_REQ_STS", BIT(4)},
- {"OSSE_SMT1_VNN_REQ_STS", BIT(7)},
- {"DFXAGG_VNN_REQ_STS", BIT(8)},
- {"EXI_VNN_REQ_STS", BIT(9)},
- {"P2D_VNN_REQ_STS", BIT(18)},
- {"GBE_VNN_REQ_STS", BIT(19)},
- {"SMB_VNN_REQ_STS", BIT(25)},
- {"LPC_VNN_REQ_STS", BIT(26)},
+ {"NPK_VNN_REQ_STS", BIT(4), 1},
+ {"OSSE_SMT1_VNN_REQ_STS", BIT(7), 1},
+ {"DFXAGG_VNN_REQ_STS", BIT(8), 0},
+ {"EXI_VNN_REQ_STS", BIT(9), 1},
+ {"P2D_VNN_REQ_STS", BIT(18), 1},
+ {"GBE_VNN_REQ_STS", BIT(19), 1},
+ {"SMB_VNN_REQ_STS", BIT(25), 1},
+ {"LPC_VNN_REQ_STS", BIT(26), 0},
{}
};
const struct pmc_bit_map lnl_vnn_req_status_2_map[] = {
- {"eSE_VNN_REQ_STS", BIT(0)},
- {"CSMERTC_VNN_REQ_STS", BIT(1)},
- {"CSE_VNN_REQ_STS", BIT(4)},
- {"ISH_VNN_REQ_STS", BIT(7)},
- {"SMT1_VNN_REQ_STS", BIT(8)},
- {"CLINK_VNN_REQ_STS", BIT(14)},
- {"SMS1_VNN_REQ_STS", BIT(18)},
- {"SMS2_VNN_REQ_STS", BIT(19)},
- {"GPIOCOM4_VNN_REQ_STS", BIT(20)},
- {"GPIOCOM3_VNN_REQ_STS", BIT(21)},
- {"GPIOCOM2_VNN_REQ_STS", BIT(22)},
- {"GPIOCOM1_VNN_REQ_STS", BIT(23)},
- {"GPIOCOM0_VNN_REQ_STS", BIT(24)},
+ {"eSE_VNN_REQ_STS", BIT(0), 1},
+ {"CSMERTC_VNN_REQ_STS", BIT(1), 1},
+ {"CSE_VNN_REQ_STS", BIT(4), 1},
+ {"ISH_VNN_REQ_STS", BIT(7), 1},
+ {"SMT1_VNN_REQ_STS", BIT(8), 1},
+ {"CLINK_VNN_REQ_STS", BIT(14), 1},
+ {"SMS1_VNN_REQ_STS", BIT(18), 1},
+ {"SMS2_VNN_REQ_STS", BIT(19), 1},
+ {"GPIOCOM4_VNN_REQ_STS", BIT(20), 1},
+ {"GPIOCOM3_VNN_REQ_STS", BIT(21), 1},
+ {"GPIOCOM2_VNN_REQ_STS", BIT(22), 0},
+ {"GPIOCOM1_VNN_REQ_STS", BIT(23), 1},
+ {"GPIOCOM0_VNN_REQ_STS", BIT(24), 1},
{}
};
const struct pmc_bit_map lnl_vnn_req_status_3_map[] = {
- {"DISP_SHIM_VNN_REQ_STS", BIT(2)},
- {"DTS0_VNN_REQ_STS", BIT(7)},
- {"GPIOCOM5_VNN_REQ_STS", BIT(11)},
+ {"DISP_SHIM_VNN_REQ_STS", BIT(2), 0},
+ {"DTS0_VNN_REQ_STS", BIT(7), 0},
+ {"GPIOCOM5_VNN_REQ_STS", BIT(11), 2},
{}
};
const struct pmc_bit_map lnl_vnn_misc_status_map[] = {
- {"CPU_C10_REQ_STS", BIT(0)},
- {"TS_OFF_REQ_STS", BIT(1)},
- {"PNDE_MET_REQ_STS", BIT(2)},
- {"PCIE_DEEP_PM_REQ_STS", BIT(3)},
- {"PMC_CLK_THROTTLE_EN_REQ_STS", BIT(4)},
- {"NPK_VNNAON_REQ_STS", BIT(5)},
- {"VNN_SOC_REQ_STS", BIT(6)},
- {"ISH_VNNAON_REQ_STS", BIT(7)},
- {"D2D_NOC_CFI_QACTIVE_REQ_STS", BIT(8)},
- {"D2D_NOC_GPSB_QACTIVE_REQ_STS", BIT(9)},
- {"D2D_NOC_IPU_QACTIVE_REQ_STS", BIT(10)},
- {"PLT_GREATER_REQ_STS", BIT(11)},
- {"PCIE_CLKREQ_REQ_STS", BIT(12)},
- {"PMC_IDLE_FB_OCP_REQ_STS", BIT(13)},
- {"PM_SYNC_STATES_REQ_STS", BIT(14)},
- {"EA_REQ_STS", BIT(15)},
- {"MPHY_CORE_OFF_REQ_STS", BIT(16)},
- {"BRK_EV_EN_REQ_STS", BIT(17)},
- {"AUTO_DEMO_EN_REQ_STS", BIT(18)},
- {"ITSS_CLK_SRC_REQ_STS", BIT(19)},
- {"LPC_CLK_SRC_REQ_STS", BIT(20)},
- {"ARC_IDLE_REQ_STS", BIT(21)},
- {"MPHY_SUS_REQ_STS", BIT(22)},
- {"FIA_DEEP_PM_REQ_STS", BIT(23)},
- {"UXD_CONNECTED_REQ_STS", BIT(24)},
- {"ARC_INTERRUPT_WAKE_REQ_STS", BIT(25)},
- {"D2D_NOC_DISP_DDI_QACTIVE_REQ_STS", BIT(26)},
- {"PRE_WAKE0_REQ_STS", BIT(27)},
- {"PRE_WAKE1_REQ_STS", BIT(28)},
- {"PRE_WAKE2_EN_REQ_STS", BIT(29)},
- {"WOV_REQ_STS", BIT(30)},
- {"D2D_NOC_DISP_EDP_QACTIVE_REQ_STS_31", BIT(31)},
+ {"CPU_C10_REQ_STS", BIT(0), 0},
+ {"TS_OFF_REQ_STS", BIT(1), 0},
+ {"PNDE_MET_REQ_STS", BIT(2), 1},
+ {"PCIE_DEEP_PM_REQ_STS", BIT(3), 0},
+ {"PMC_CLK_THROTTLE_EN_REQ_STS", BIT(4), 0},
+ {"NPK_VNNAON_REQ_STS", BIT(5), 0},
+ {"VNN_SOC_REQ_STS", BIT(6), 1},
+ {"ISH_VNNAON_REQ_STS", BIT(7), 0},
+ {"D2D_NOC_CFI_QACTIVE_REQ_STS", BIT(8), 1},
+ {"D2D_NOC_GPSB_QACTIVE_REQ_STS", BIT(9), 1},
+ {"D2D_NOC_IPU_QACTIVE_REQ_STS", BIT(10), 1},
+ {"PLT_GREATER_REQ_STS", BIT(11), 1},
+ {"PCIE_CLKREQ_REQ_STS", BIT(12), 0},
+ {"PMC_IDLE_FB_OCP_REQ_STS", BIT(13), 0},
+ {"PM_SYNC_STATES_REQ_STS", BIT(14), 0},
+ {"EA_REQ_STS", BIT(15), 0},
+ {"MPHY_CORE_OFF_REQ_STS", BIT(16), 0},
+ {"BRK_EV_EN_REQ_STS", BIT(17), 0},
+ {"AUTO_DEMO_EN_REQ_STS", BIT(18), 0},
+ {"ITSS_CLK_SRC_REQ_STS", BIT(19), 1},
+ {"LPC_CLK_SRC_REQ_STS", BIT(20), 0},
+ {"ARC_IDLE_REQ_STS", BIT(21), 0},
+ {"MPHY_SUS_REQ_STS", BIT(22), 0},
+ {"FIA_DEEP_PM_REQ_STS", BIT(23), 0},
+ {"UXD_CONNECTED_REQ_STS", BIT(24), 1},
+ {"ARC_INTERRUPT_WAKE_REQ_STS", BIT(25), 0},
+ {"D2D_NOC_DISP_DDI_QACTIVE_REQ_STS", BIT(26), 1},
+ {"PRE_WAKE0_REQ_STS", BIT(27), 1},
+ {"PRE_WAKE1_REQ_STS", BIT(28), 1},
+ {"PRE_WAKE2_EN_REQ_STS", BIT(29), 1},
+ {"WOV_REQ_STS", BIT(30), 0},
+ {"D2D_NOC_DISP_EDP_QACTIVE_REQ_STS_31", BIT(31), 1},
{}
};
const struct pmc_bit_map lnl_clocksource_status_map[] = {
- {"AON2_OFF_STS", BIT(0)},
- {"AON3_OFF_STS", BIT(1)},
- {"AON4_OFF_STS", BIT(2)},
- {"AON5_OFF_STS", BIT(3)},
- {"AON1_OFF_STS", BIT(4)},
- {"MPFPW1_0_PLL_OFF_STS", BIT(6)},
- {"USB3_PLL_OFF_STS", BIT(8)},
- {"AON3_SPL_OFF_STS", BIT(9)},
- {"G5FPW1_PLL_OFF_STS", BIT(15)},
- {"XTAL_AGGR_OFF_STS", BIT(17)},
- {"USB2_PLL_OFF_STS", BIT(18)},
- {"SAF_PLL_OFF_STS", BIT(19)},
- {"SE_TCSS_PLL_OFF_STS", BIT(20)},
- {"DDI_PLL_OFF_STS", BIT(21)},
- {"FILTER_PLL_OFF_STS", BIT(22)},
- {"ACE_PLL_OFF_STS", BIT(24)},
- {"FABRIC_PLL_OFF_STS", BIT(25)},
- {"SOC_PLL_OFF_STS", BIT(26)},
- {"REF_OFF_STS", BIT(28)},
- {"IMG_OFF_STS", BIT(29)},
- {"RTC_PLL_OFF_STS", BIT(31)},
+ {"AON2_OFF_STS", BIT(0), 0},
+ {"AON3_OFF_STS", BIT(1), 1},
+ {"AON4_OFF_STS", BIT(2), 1},
+ {"AON5_OFF_STS", BIT(3), 1},
+ {"AON1_OFF_STS", BIT(4), 0},
+ {"MPFPW1_0_PLL_OFF_STS", BIT(6), 1},
+ {"USB3_PLL_OFF_STS", BIT(8), 1},
+ {"AON3_SPL_OFF_STS", BIT(9), 1},
+ {"G5FPW1_PLL_OFF_STS", BIT(15), 1},
+ {"XTAL_AGGR_OFF_STS", BIT(17), 1},
+ {"USB2_PLL_OFF_STS", BIT(18), 0},
+ {"SAF_PLL_OFF_STS", BIT(19), 1},
+ {"SE_TCSS_PLL_OFF_STS", BIT(20), 1},
+ {"DDI_PLL_OFF_STS", BIT(21), 1},
+ {"FILTER_PLL_OFF_STS", BIT(22), 1},
+ {"ACE_PLL_OFF_STS", BIT(24), 0},
+ {"FABRIC_PLL_OFF_STS", BIT(25), 1},
+ {"SOC_PLL_OFF_STS", BIT(26), 1},
+ {"REF_OFF_STS", BIT(28), 1},
+ {"IMG_OFF_STS", BIT(29), 1},
+ {"RTC_PLL_OFF_STS", BIT(31), 0},
+ {}
+};
+
+const struct pmc_bit_map lnl_signal_status_map[] = {
+ {"LSX_Wake0_STS", BIT(0), 0},
+ {"LSX_Wake1_STS", BIT(1), 0},
+ {"LSX_Wake2_STS", BIT(2), 0},
+ {"LSX_Wake3_STS", BIT(3), 0},
+ {"LSX_Wake4_STS", BIT(4), 0},
+ {"LSX_Wake5_STS", BIT(5), 0},
+ {"LSX_Wake6_STS", BIT(6), 0},
+ {"LSX_Wake7_STS", BIT(7), 0},
+ {"LPSS_Wake0_STS", BIT(8), 1},
+ {"LPSS_Wake1_STS", BIT(9), 1},
+ {"Int_Timer_SS_Wake0_STS", BIT(10), 1},
+ {"Int_Timer_SS_Wake1_STS", BIT(11), 1},
+ {"Int_Timer_SS_Wake2_STS", BIT(12), 1},
+ {"Int_Timer_SS_Wake3_STS", BIT(13), 1},
+ {"Int_Timer_SS_Wake4_STS", BIT(14), 1},
+ {"Int_Timer_SS_Wake5_STS", BIT(15), 1},
+ {}
+};
+
+const struct pmc_bit_map lnl_rsc_status_map[] = {
+ {"Memory", 0, 1},
+ {"PSF0", 0, 1},
+ {"PSF4", 0, 1},
+ {"PSF6", 0, 1},
+ {"PSF7", 0, 1},
+ {"PSF8", 0, 1},
+ {"SAF_CFI_LINK", 0, 1},
+ {"SBR", 0, 1},
{}
};
@@ -331,7 +363,26 @@ const struct pmc_bit_map *lnl_lpm_maps[] = {
lnl_vnn_req_status_2_map,
lnl_vnn_req_status_3_map,
lnl_vnn_misc_status_map,
- mtl_socm_signal_status_map,
+ lnl_signal_status_map,
+ NULL
+};
+
+const struct pmc_bit_map *lnl_blk_maps[] = {
+ lnl_power_gating_status_0_map,
+ lnl_power_gating_status_1_map,
+ lnl_power_gating_status_2_map,
+ lnl_rsc_status_map,
+ lnl_vnn_req_status_0_map,
+ lnl_vnn_req_status_1_map,
+ lnl_vnn_req_status_2_map,
+ lnl_vnn_req_status_3_map,
+ lnl_d3_status_0_map,
+ lnl_d3_status_1_map,
+ lnl_d3_status_2_map,
+ lnl_d3_status_3_map,
+ lnl_clocksource_status_map,
+ lnl_vnn_misc_status_map,
+ lnl_signal_status_map,
NULL
};
@@ -475,6 +526,8 @@ const struct pmc_reg_map lnl_socm_reg_map = {
.lpm_sts = lnl_lpm_maps,
.lpm_status_offset = MTL_LPM_STATUS_OFFSET,
.lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET,
+ .s0ix_blocker_maps = lnl_blk_maps,
+ .s0ix_blocker_offset = LNL_S0IX_BLOCKER_OFFSET,
};
#define LNL_NPU_PCI_DEV 0x643e
diff --git a/drivers/platform/x86/intel/rst.c b/drivers/platform/x86/intel/rst.c
index 35814a7707af..6bc9c4a603e0 100644
--- a/drivers/platform/x86/intel/rst.c
+++ b/drivers/platform/x86/intel/rst.c
@@ -125,7 +125,6 @@ static const struct acpi_device_id irst_ids[] = {
};
static struct acpi_driver irst_driver = {
- .owner = THIS_MODULE,
.name = "intel_rapid_start",
.class = "intel_rapid_start",
.ids = irst_ids,
diff --git a/drivers/platform/x86/intel/sdsi.c b/drivers/platform/x86/intel/sdsi.c
index 556e7c6dbb05..277e4f4b20ac 100644
--- a/drivers/platform/x86/intel/sdsi.c
+++ b/drivers/platform/x86/intel/sdsi.c
@@ -15,6 +15,7 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/overflow.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
@@ -66,6 +67,8 @@
#define CTRL_OWNER GENMASK(5, 4)
#define CTRL_COMPLETE BIT(6)
#define CTRL_READY BIT(7)
+#define CTRL_INBAND_LOCK BIT(32)
+#define CTRL_METER_ENABLE_DRAM BIT(33)
#define CTRL_STATUS GENMASK(15, 8)
#define CTRL_PACKET_SIZE GENMASK(31, 16)
#define CTRL_MSG_SIZE GENMASK(63, 48)
@@ -93,6 +96,7 @@ enum sdsi_command {
struct sdsi_mbox_info {
u64 *payload;
void *buffer;
+ u64 control_flags;
int size;
};
@@ -156,8 +160,8 @@ static int sdsi_status_to_errno(u32 status)
}
}
-static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *info,
- size_t *data_size)
+static int sdsi_mbox_poll(struct sdsi_priv *priv, struct sdsi_mbox_info *info,
+ size_t *data_size)
{
struct device *dev = priv->dev;
u32 total, loop, eom, status, message_size;
@@ -166,18 +170,10 @@ static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *inf
lockdep_assert_held(&priv->mb_lock);
- /* Format and send the read command */
- control = FIELD_PREP(CTRL_EOM, 1) |
- FIELD_PREP(CTRL_SOM, 1) |
- FIELD_PREP(CTRL_RUN_BUSY, 1) |
- FIELD_PREP(CTRL_PACKET_SIZE, info->size);
- writeq(control, priv->control_addr);
-
/* For reads, data sizes that are larger than the mailbox size are read in packets. */
total = 0;
loop = 0;
do {
- void *buf = info->buffer + (SDSI_SIZE_MAILBOX * loop);
u32 packet_size;
/* Poll on ready bit */
@@ -195,6 +191,11 @@ static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *inf
if (ret)
break;
+ if (!packet_size) {
+ sdsi_complete_transaction(priv);
+ break;
+ }
+
/* Only the last packet can be less than the mailbox size. */
if (!eom && packet_size != SDSI_SIZE_MAILBOX) {
dev_err(dev, "Invalid packet size\n");
@@ -208,9 +209,13 @@ static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *inf
break;
}
- sdsi_memcpy64_fromio(buf, priv->mbox_addr, round_up(packet_size, SDSI_SIZE_CMD));
+ if (info->buffer) {
+ void *buf = info->buffer + array_size(SDSI_SIZE_MAILBOX, loop);
- total += packet_size;
+ sdsi_memcpy64_fromio(buf, priv->mbox_addr,
+ round_up(packet_size, SDSI_SIZE_CMD));
+ total += packet_size;
+ }
sdsi_complete_transaction(priv);
} while (!eom && ++loop < MBOX_MAX_PACKETS);
@@ -230,16 +235,34 @@ static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *inf
dev_warn(dev, "Read count %u differs from expected count %u\n",
total, message_size);
- *data_size = total;
+ if (data_size)
+ *data_size = total;
return 0;
}
-static int sdsi_mbox_cmd_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info)
+static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *info,
+ size_t *data_size)
+{
+ u64 control;
+
+ lockdep_assert_held(&priv->mb_lock);
+
+ /* Format and send the read command */
+ control = FIELD_PREP(CTRL_EOM, 1) |
+ FIELD_PREP(CTRL_SOM, 1) |
+ FIELD_PREP(CTRL_RUN_BUSY, 1) |
+ FIELD_PREP(CTRL_PACKET_SIZE, info->size) |
+ info->control_flags;
+ writeq(control, priv->control_addr);
+
+ return sdsi_mbox_poll(priv, info, data_size);
+}
+
+static int sdsi_mbox_cmd_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info,
+ size_t *data_size)
{
u64 control;
- u32 status;
- int ret;
lockdep_assert_held(&priv->mb_lock);
@@ -252,23 +275,11 @@ static int sdsi_mbox_cmd_write(struct sdsi_priv *priv, struct sdsi_mbox_info *in
FIELD_PREP(CTRL_SOM, 1) |
FIELD_PREP(CTRL_RUN_BUSY, 1) |
FIELD_PREP(CTRL_READ_WRITE, 1) |
+ FIELD_PREP(CTRL_MSG_SIZE, info->size) |
FIELD_PREP(CTRL_PACKET_SIZE, info->size);
writeq(control, priv->control_addr);
- /* Poll on ready bit */
- ret = readq_poll_timeout(priv->control_addr, control, control & CTRL_READY,
- MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_US);
-
- if (ret)
- goto release_mbox;
-
- status = FIELD_GET(CTRL_STATUS, control);
- ret = sdsi_status_to_errno(status);
-
-release_mbox:
- sdsi_complete_transaction(priv);
-
- return ret;
+ return sdsi_mbox_poll(priv, info, data_size);
}
static int sdsi_mbox_acquire(struct sdsi_priv *priv, struct sdsi_mbox_info *info)
@@ -312,7 +323,8 @@ static int sdsi_mbox_acquire(struct sdsi_priv *priv, struct sdsi_mbox_info *info
return ret;
}
-static int sdsi_mbox_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info)
+static int sdsi_mbox_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info,
+ size_t *data_size)
{
int ret;
@@ -322,7 +334,7 @@ static int sdsi_mbox_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info)
if (ret)
return ret;
- return sdsi_mbox_cmd_write(priv, info);
+ return sdsi_mbox_cmd_write(priv, info, data_size);
}
static int sdsi_mbox_read(struct sdsi_priv *priv, struct sdsi_mbox_info *info, size_t *data_size)
@@ -338,15 +350,24 @@ static int sdsi_mbox_read(struct sdsi_priv *priv, struct sdsi_mbox_info *info, s
return sdsi_mbox_cmd_read(priv, info, data_size);
}
+static bool sdsi_ib_locked(struct sdsi_priv *priv)
+{
+ return !!FIELD_GET(CTRL_INBAND_LOCK, readq(priv->control_addr));
+}
+
static ssize_t sdsi_provision(struct sdsi_priv *priv, char *buf, size_t count,
enum sdsi_command command)
{
- struct sdsi_mbox_info info;
+ struct sdsi_mbox_info info = {};
int ret;
if (count > (SDSI_SIZE_WRITE_MSG - SDSI_SIZE_CMD))
return -EOVERFLOW;
+ /* Make sure In-band lock is not set */
+ if (sdsi_ib_locked(priv))
+ return -EPERM;
+
/* Qword aligned message + command qword */
info.size = round_up(count, SDSI_SIZE_CMD) + SDSI_SIZE_CMD;
@@ -363,7 +384,9 @@ static ssize_t sdsi_provision(struct sdsi_priv *priv, char *buf, size_t count,
ret = mutex_lock_interruptible(&priv->mb_lock);
if (ret)
goto free_payload;
- ret = sdsi_mbox_write(priv, &info);
+
+ ret = sdsi_mbox_write(priv, &info, NULL);
+
mutex_unlock(&priv->mb_lock);
free_payload:
@@ -404,10 +427,10 @@ static ssize_t provision_cap_write(struct file *filp, struct kobject *kobj,
static BIN_ATTR_WO(provision_cap, SDSI_SIZE_WRITE_MSG);
static ssize_t
-certificate_read(u64 command, struct sdsi_priv *priv, char *buf, loff_t off,
- size_t count)
+certificate_read(u64 command, u64 control_flags, struct sdsi_priv *priv,
+ char *buf, loff_t off, size_t count)
{
- struct sdsi_mbox_info info;
+ struct sdsi_mbox_info info = {};
size_t size;
int ret;
@@ -421,6 +444,7 @@ certificate_read(u64 command, struct sdsi_priv *priv, char *buf, loff_t off,
info.payload = &command;
info.size = sizeof(command);
+ info.control_flags = control_flags;
ret = mutex_lock_interruptible(&priv->mb_lock);
if (ret)
@@ -452,7 +476,7 @@ state_certificate_read(struct file *filp, struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct sdsi_priv *priv = dev_get_drvdata(dev);
- return certificate_read(SDSI_CMD_READ_STATE, priv, buf, off, count);
+ return certificate_read(SDSI_CMD_READ_STATE, 0, priv, buf, off, count);
}
static BIN_ATTR_ADMIN_RO(state_certificate, SDSI_SIZE_READ_MSG);
@@ -464,10 +488,23 @@ meter_certificate_read(struct file *filp, struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct sdsi_priv *priv = dev_get_drvdata(dev);
- return certificate_read(SDSI_CMD_READ_METER, priv, buf, off, count);
+ return certificate_read(SDSI_CMD_READ_METER, 0, priv, buf, off, count);
}
static BIN_ATTR_ADMIN_RO(meter_certificate, SDSI_SIZE_READ_MSG);
+static ssize_t
+meter_current_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off,
+ size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct sdsi_priv *priv = dev_get_drvdata(dev);
+
+ return certificate_read(SDSI_CMD_READ_METER, CTRL_METER_ENABLE_DRAM,
+ priv, buf, off, count);
+}
+static BIN_ATTR_ADMIN_RO(meter_current, SDSI_SIZE_READ_MSG);
+
static ssize_t registers_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
@@ -498,6 +535,7 @@ static struct bin_attribute *sdsi_bin_attrs[] = {
&bin_attr_registers,
&bin_attr_state_certificate,
&bin_attr_meter_certificate,
+ &bin_attr_meter_current,
&bin_attr_provision_akc,
&bin_attr_provision_cap,
NULL
@@ -517,7 +555,7 @@ sdsi_battr_is_visible(struct kobject *kobj, struct bin_attribute *attr, int n)
if (!(priv->features & SDSI_FEATURE_SDSI))
return 0;
- if (attr == &bin_attr_meter_certificate)
+ if (attr == &bin_attr_meter_certificate || attr == &bin_attr_meter_current)
return (priv->features & SDSI_FEATURE_METERING) ?
attr->attr.mode : 0;
diff --git a/drivers/platform/x86/intel/smartconnect.c b/drivers/platform/x86/intel/smartconnect.c
index 64c2dc93472f..cd25d0585324 100644
--- a/drivers/platform/x86/intel/smartconnect.c
+++ b/drivers/platform/x86/intel/smartconnect.c
@@ -32,7 +32,6 @@ static const struct acpi_device_id smartconnect_ids[] = {
MODULE_DEVICE_TABLE(acpi, smartconnect_ids);
static struct acpi_driver smartconnect_driver = {
- .owner = THIS_MODULE,
.name = "intel_smart_connect",
.class = "intel_smart_connect",
.ids = smartconnect_ids,
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index 08df9494603c..713c0d1fa85f 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -719,7 +719,9 @@ static struct miscdevice isst_if_char_driver = {
};
static const struct x86_cpu_id hpm_cpu_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, NULL),
X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, NULL),
{}
};
@@ -837,4 +839,5 @@ void isst_if_cdev_unregister(int device_type)
}
EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
+MODULE_DESCRIPTION("ISST common interface module");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
index 1d918000d72b..7bac7841ff0a 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
@@ -17,12 +17,15 @@
* the hardware mapping.
*/
+#define dev_fmt(fmt) "tpmi_sst: " fmt
+
#include <linux/auxiliary_bus.h>
#include <linux/delay.h>
#include <linux/intel_tpmi.h>
#include <linux/fs.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <uapi/linux/isst_if.h>
@@ -263,20 +266,33 @@ struct tpmi_per_power_domain_info {
bool write_blocked;
};
+/* Supported maximum partitions */
+#define SST_MAX_PARTITIONS 2
+
/**
* struct tpmi_sst_struct - Store sst info for a package
* @package_id: Package id for this aux device instance
* @number_of_power_domains: Number of power_domains pointed by power_domain_info pointer
* @power_domain_info: Pointer to power domains information
+ * @cdie_mask: Mask of compute dies present in a partition from hardware.
+ * This mask is not present in the version 1 information header.
+ * @io_dies: Number of IO dies in a partition. This will be 0 for TPMI
+ * version 1 information header.
+ * @partition_mask: Mask of all partitions.
+ * @partition_mask_current: Current partition mask as some may have been unbound.
*
* This structure is used store full SST information for a package.
- * Each package has a unique OOB PCI device, which enumerates TPMI.
- * Each Package will have multiple power_domains.
+ * Each package has one or multiple OOB PCI devices. Each package can contain multiple
+ * power domains.
*/
struct tpmi_sst_struct {
int package_id;
- int number_of_power_domains;
- struct tpmi_per_power_domain_info *power_domain_info;
+ struct tpmi_per_power_domain_info *power_domain_info[SST_MAX_PARTITIONS];
+ u16 cdie_mask[SST_MAX_PARTITIONS];
+ u8 number_of_power_domains[SST_MAX_PARTITIONS];
+ u8 io_dies[SST_MAX_PARTITIONS];
+ u8 partition_mask;
+ u8 partition_mask_current;
};
/**
@@ -313,12 +329,11 @@ static int sst_add_perf_profiles(struct auxiliary_device *auxdev,
struct tpmi_per_power_domain_info *pd_info,
int levels)
{
+ struct device *dev = &auxdev->dev;
u64 perf_level_offsets;
int i;
- pd_info->perf_levels = devm_kcalloc(&auxdev->dev, levels,
- sizeof(struct perf_level),
- GFP_KERNEL);
+ pd_info->perf_levels = devm_kcalloc(dev, levels, sizeof(struct perf_level), GFP_KERNEL);
if (!pd_info->perf_levels)
return 0;
@@ -349,6 +364,7 @@ static int sst_add_perf_profiles(struct auxiliary_device *auxdev,
static int sst_main(struct auxiliary_device *auxdev, struct tpmi_per_power_domain_info *pd_info)
{
+ struct device *dev = &auxdev->dev;
int i, mask, levels;
*((u64 *)&pd_info->sst_header) = readq(pd_info->sst_base);
@@ -359,13 +375,13 @@ static int sst_main(struct auxiliary_device *auxdev, struct tpmi_per_power_domai
return -ENODEV;
if (TPMI_MAJOR_VERSION(pd_info->sst_header.interface_version) != ISST_MAJOR_VERSION) {
- dev_err(&auxdev->dev, "SST: Unsupported major version:%lx\n",
+ dev_err(dev, "SST: Unsupported major version:%lx\n",
TPMI_MAJOR_VERSION(pd_info->sst_header.interface_version));
return -ENODEV;
}
if (TPMI_MINOR_VERSION(pd_info->sst_header.interface_version) != ISST_MINOR_VERSION)
- dev_info(&auxdev->dev, "SST: Ignore: Unsupported minor version:%lx\n",
+ dev_info(dev, "SST: Ignore: Unsupported minor version:%lx\n",
TPMI_MINOR_VERSION(pd_info->sst_header.interface_version));
/* Read SST CP Header */
@@ -387,6 +403,126 @@ static int sst_main(struct auxiliary_device *auxdev, struct tpmi_per_power_domai
return 0;
}
+static u8 isst_instance_count(struct tpmi_sst_struct *sst_inst)
+{
+ u8 i, max_part, count = 0;
+
+ /* Partition mask starts from bit 0 and contains 1s only */
+ max_part = hweight8(sst_inst->partition_mask);
+ for (i = 0; i < max_part; i++)
+ count += sst_inst->number_of_power_domains[i];
+
+ return count;
+}
+
+/**
+ * map_cdies() - Map user domain ID to compute domain ID
+ * @sst_inst: TPMI Instance
+ * @id: User domain ID
+ * @partition: Resolved partition
+ *
+ * Helper function to map_partition_power_domain_id() to resolve compute
+ * domain ID and partition. Use hardware provided cdie_mask for a partition
+ * as is to resolve a compute domain ID.
+ *
+ * Return: %-EINVAL on error, otherwise mapped domain ID >= 0.
+ */
+static int map_cdies(struct tpmi_sst_struct *sst_inst, u8 id, u8 *partition)
+{
+ u8 i, max_part;
+
+ max_part = hweight8(sst_inst->partition_mask);
+ for (i = 0; i < max_part; i++) {
+ if (!(sst_inst->cdie_mask[i] & BIT(id)))
+ continue;
+
+ *partition = i;
+ return id - ffs(sst_inst->cdie_mask[i]) + 1;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * map_partition_power_domain_id() - Map user domain ID to partition domain ID
+ * @sst_inst: TPMI Instance
+ * @id: User domain ID
+ * @partition: Resolved partition
+ *
+ * In a partitioned system a CPU package has two separate MMIO ranges (Under
+ * two PCI devices). But the CPU package compute die/power domain IDs are
+ * unique in a package. User space can get compute die/power domain ID from
+ * CPUID and MSR 0x54 for a CPU. So, those IDs need to be preserved even if
+ * they are present in two different partitions with its own order.
+ *
+ * For example for command ISST_IF_COUNT_TPMI_INSTANCES, the valid_mask
+ * is 111111b for a 4 compute and 2 IO dies system. This is presented as
+ * provided by the hardware in a non-partitioned system with the following
+ * order:
+ * I1-I0-C3-C2-C1-C0
+ * Here: "C": for compute and "I" for IO die.
+ * Compute dies are always present first in TPMI instances, as they have
+ * to map to the real power domain/die ID of a system. In a non-partitioned
+ * system there is no way to identify compute and IO die boundaries from
+ * this driver without reading each CPU's mapping.
+ *
+ * The same order needs to be preserved, even if those compute dies are
+ * distributed among multiple partitions. For example:
+ * Partition 1 can contain: I1-C1-C0
+ * Partition 2 can contain: I2-C3-C2
+ *
+ * This will require a conversion of user space IDs to the actual index into
+ * array of stored power domains for each partition. For the above example
+ * this function will return partition and index as follows:
+ *
+ * ============= ========= ===== ========
+ * User space ID Partition Index Die type
+ * ============= ========= ===== ========
+ * 0 0 0 Compute
+ * 1 0 1 Compute
+ * 2 1 0 Compute
+ * 3 1 1 Compute
+ * 4 0 2 IO
+ * 5 1 2 IO
+ * ============= ========= ===== ========
+ *
+ * Return: %-EINVAL on error, otherwise mapped domain ID >= 0.
+ */
+static int map_partition_power_domain_id(struct tpmi_sst_struct *sst_inst, u8 id, u8 *partition)
+{
+ u8 i, io_start_id, max_part;
+
+ *partition = 0;
+
+ /* If any PCI device for partition is unbound, treat this as failure */
+ if (sst_inst->partition_mask != sst_inst->partition_mask_current)
+ return -EINVAL;
+
+ max_part = hweight8(sst_inst->partition_mask);
+
+ /* IO Index begin here */
+ io_start_id = fls(sst_inst->cdie_mask[max_part - 1]);
+
+ if (id < io_start_id)
+ return map_cdies(sst_inst, id, partition);
+
+ for (i = 0; i < max_part; i++) {
+ u8 io_id;
+
+ io_id = id - io_start_id;
+ if (io_id < sst_inst->io_dies[i]) {
+ u8 cdie_range;
+
+ cdie_range = fls(sst_inst->cdie_mask[i]) - ffs(sst_inst->cdie_mask[i]) + 1;
+ *partition = i;
+ return cdie_range + io_id;
+ }
+ io_start_id += sst_inst->io_dies[i];
+ }
+
+ return -EINVAL;
+}
+
/*
* Map a package and power_domain id to SST information structure unique for a power_domain.
* The caller should call under isst_tpmi_dev_lock.
@@ -395,19 +531,20 @@ static struct tpmi_per_power_domain_info *get_instance(int pkg_id, int power_dom
{
struct tpmi_per_power_domain_info *power_domain_info;
struct tpmi_sst_struct *sst_inst;
+ u8 part;
- if (pkg_id < 0 || pkg_id > isst_common.max_index ||
- pkg_id >= topology_max_packages())
+ if (!in_range(pkg_id, 0, topology_max_packages()) || pkg_id > isst_common.max_index)
return NULL;
sst_inst = isst_common.sst_inst[pkg_id];
if (!sst_inst)
return NULL;
- if (power_domain_id < 0 || power_domain_id >= sst_inst->number_of_power_domains)
+ power_domain_id = map_partition_power_domain_id(sst_inst, power_domain_id, &part);
+ if (power_domain_id < 0)
return NULL;
- power_domain_info = &sst_inst->power_domain_info[power_domain_id];
+ power_domain_info = &sst_inst->power_domain_info[part][power_domain_id];
if (power_domain_info && !power_domain_info->sst_base)
return NULL;
@@ -579,6 +716,7 @@ static long isst_if_clos_assoc(void __user *argp)
struct tpmi_sst_struct *sst_inst;
int offset, shift, cpu;
u64 val, mask, clos;
+ u8 part;
if (copy_from_user(&clos_assoc, ptr, sizeof(clos_assoc)))
return -EFAULT;
@@ -602,10 +740,11 @@ static long isst_if_clos_assoc(void __user *argp)
sst_inst = isst_common.sst_inst[pkg_id];
- if (clos_assoc.power_domain_id > sst_inst->number_of_power_domains)
+ punit_id = map_partition_power_domain_id(sst_inst, punit_id, &part);
+ if (punit_id < 0)
return -EINVAL;
- power_domain_info = &sst_inst->power_domain_info[punit_id];
+ power_domain_info = &sst_inst->power_domain_info[part][punit_id];
if (assoc_cmds.get_set && power_domain_info->write_blocked)
return -EPERM;
@@ -708,6 +847,8 @@ static int isst_if_get_perf_level(void __user *argp)
{
struct isst_perf_level_info perf_level;
struct tpmi_per_power_domain_info *power_domain_info;
+ unsigned long level_mask;
+ u8 level, support;
if (copy_from_user(&perf_level, argp, sizeof(perf_level)))
return -EFAULT;
@@ -727,12 +868,34 @@ static int isst_if_get_perf_level(void __user *argp)
SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH, SST_MUL_FACTOR_NONE)
perf_level.enabled = !!(power_domain_info->sst_header.cap_mask & BIT(1));
- _read_bf_level_info("bf_support", perf_level.sst_bf_support, 0, 0,
- SST_BF_FEATURE_SUPPORTED_START, SST_BF_FEATURE_SUPPORTED_WIDTH,
- SST_MUL_FACTOR_NONE);
- _read_tf_level_info("tf_support", perf_level.sst_tf_support, 0, 0,
- SST_TF_FEATURE_SUPPORTED_START, SST_TF_FEATURE_SUPPORTED_WIDTH,
- SST_MUL_FACTOR_NONE);
+ level_mask = perf_level.level_mask;
+ perf_level.sst_bf_support = 0;
+ for_each_set_bit(level, &level_mask, BITS_PER_BYTE) {
+ /*
+ * Read BF support for a level. Read output is updated
+ * to "support" variable by the below macro.
+ */
+ _read_bf_level_info("bf_support", support, level, 0, SST_BF_FEATURE_SUPPORTED_START,
+ SST_BF_FEATURE_SUPPORTED_WIDTH, SST_MUL_FACTOR_NONE);
+
+ /* If supported set the bit for the level */
+ if (support)
+ perf_level.sst_bf_support |= BIT(level);
+ }
+
+ perf_level.sst_tf_support = 0;
+ for_each_set_bit(level, &level_mask, BITS_PER_BYTE) {
+ /*
+ * Read TF support for a level. Read output is updated
+ * to "support" variable by the below macro.
+ */
+ _read_tf_level_info("tf_support", support, level, 0, SST_TF_FEATURE_SUPPORTED_START,
+ SST_TF_FEATURE_SUPPORTED_WIDTH, SST_MUL_FACTOR_NONE);
+
+ /* If supported set the bit for the level */
+ if (support)
+ perf_level.sst_tf_support |= BIT(level);
+ }
if (copy_to_user(argp, &perf_level, sizeof(perf_level)))
return -EFAULT;
@@ -1134,18 +1297,28 @@ static int isst_if_get_tpmi_instance_count(void __user *argp)
if (tpmi_inst.socket_id >= topology_max_packages())
return -EINVAL;
- tpmi_inst.count = isst_common.sst_inst[tpmi_inst.socket_id]->number_of_power_domains;
-
sst_inst = isst_common.sst_inst[tpmi_inst.socket_id];
+
+ tpmi_inst.count = isst_instance_count(sst_inst);
+
tpmi_inst.valid_mask = 0;
- for (i = 0; i < sst_inst->number_of_power_domains; ++i) {
+ for (i = 0; i < tpmi_inst.count; i++) {
struct tpmi_per_power_domain_info *pd_info;
+ u8 part;
+ int pd;
- pd_info = &sst_inst->power_domain_info[i];
+ pd = map_partition_power_domain_id(sst_inst, i, &part);
+ if (pd < 0)
+ continue;
+
+ pd_info = &sst_inst->power_domain_info[part][pd];
if (pd_info->sst_base)
tpmi_inst.valid_mask |= BIT(i);
}
+ if (!tpmi_inst.valid_mask)
+ tpmi_inst.count = 0;
+
if (copy_to_user(argp, &tpmi_inst, sizeof(tpmi_inst)))
return -EFAULT;
@@ -1271,102 +1444,175 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
int tpmi_sst_dev_add(struct auxiliary_device *auxdev)
{
+ struct tpmi_per_power_domain_info *pd_info;
bool read_blocked = 0, write_blocked = 0;
struct intel_tpmi_plat_info *plat_info;
+ struct device *dev = &auxdev->dev;
struct tpmi_sst_struct *tpmi_sst;
- int i, ret, pkg = 0, inst = 0;
- int num_resources;
+ u8 i, num_resources, io_die_cnt;
+ int ret, pkg = 0, inst = 0;
+ bool first_enum = false;
+ u16 cdie_mask;
+ u8 partition;
ret = tpmi_get_feature_status(auxdev, TPMI_ID_SST, &read_blocked, &write_blocked);
if (ret)
- dev_info(&auxdev->dev, "Can't read feature status: ignoring read/write blocked status\n");
+ dev_info(dev, "Can't read feature status: ignoring read/write blocked status\n");
if (read_blocked) {
- dev_info(&auxdev->dev, "Firmware has blocked reads, exiting\n");
+ dev_info(dev, "Firmware has blocked reads, exiting\n");
return -ENODEV;
}
plat_info = tpmi_get_platform_data(auxdev);
if (!plat_info) {
- dev_err(&auxdev->dev, "No platform info\n");
+ dev_err(dev, "No platform info\n");
return -EINVAL;
}
pkg = plat_info->package_id;
if (pkg >= topology_max_packages()) {
- dev_err(&auxdev->dev, "Invalid package id :%x\n", pkg);
+ dev_err(dev, "Invalid package id :%x\n", pkg);
return -EINVAL;
}
- if (isst_common.sst_inst[pkg])
- return -EEXIST;
+ partition = plat_info->partition;
+ if (partition >= SST_MAX_PARTITIONS) {
+ dev_err(&auxdev->dev, "Invalid partition :%x\n", partition);
+ return -EINVAL;
+ }
num_resources = tpmi_get_resource_count(auxdev);
if (!num_resources)
return -EINVAL;
- tpmi_sst = devm_kzalloc(&auxdev->dev, sizeof(*tpmi_sst), GFP_KERNEL);
- if (!tpmi_sst)
- return -ENOMEM;
+ mutex_lock(&isst_tpmi_dev_lock);
+
+ if (isst_common.sst_inst[pkg]) {
+ tpmi_sst = isst_common.sst_inst[pkg];
+ } else {
+ /*
+ * tpmi_sst instance is for a package. So needs to be
+ * allocated only once for both partitions. We can't use
+ * devm_* allocation here as each partition is a
+ * different device, which can be unbound.
+ */
+ tpmi_sst = kzalloc(sizeof(*tpmi_sst), GFP_KERNEL);
+ if (!tpmi_sst) {
+ ret = -ENOMEM;
+ goto unlock_exit;
+ }
+ first_enum = true;
+ }
- tpmi_sst->power_domain_info = devm_kcalloc(&auxdev->dev, num_resources,
- sizeof(*tpmi_sst->power_domain_info),
- GFP_KERNEL);
- if (!tpmi_sst->power_domain_info)
- return -ENOMEM;
+ ret = 0;
- tpmi_sst->number_of_power_domains = num_resources;
+ pd_info = devm_kcalloc(dev, num_resources, sizeof(*pd_info), GFP_KERNEL);
+ if (!pd_info) {
+ ret = -ENOMEM;
+ goto unlock_free;
+ }
+
+ /* Get the IO die count, if cdie_mask is present */
+ if (plat_info->cdie_mask) {
+ u8 cdie_range;
+
+ cdie_mask = plat_info->cdie_mask;
+ cdie_range = fls(cdie_mask) - ffs(cdie_mask) + 1;
+ io_die_cnt = num_resources - cdie_range;
+ } else {
+ /*
+ * This is a synthetic mask, careful when assuming that
+ * they are compute dies only.
+ */
+ cdie_mask = (1 << num_resources) - 1;
+ io_die_cnt = 0;
+ }
for (i = 0; i < num_resources; ++i) {
struct resource *res;
res = tpmi_get_resource_at_index(auxdev, i);
if (!res) {
- tpmi_sst->power_domain_info[i].sst_base = NULL;
+ pd_info[i].sst_base = NULL;
continue;
}
- tpmi_sst->power_domain_info[i].package_id = pkg;
- tpmi_sst->power_domain_info[i].power_domain_id = i;
- tpmi_sst->power_domain_info[i].auxdev = auxdev;
- tpmi_sst->power_domain_info[i].write_blocked = write_blocked;
- tpmi_sst->power_domain_info[i].sst_base = devm_ioremap_resource(&auxdev->dev, res);
- if (IS_ERR(tpmi_sst->power_domain_info[i].sst_base))
- return PTR_ERR(tpmi_sst->power_domain_info[i].sst_base);
+ pd_info[i].package_id = pkg;
+ pd_info[i].power_domain_id = i;
+ pd_info[i].auxdev = auxdev;
+ pd_info[i].write_blocked = write_blocked;
+ pd_info[i].sst_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pd_info[i].sst_base)) {
+ ret = PTR_ERR(pd_info[i].sst_base);
+ goto unlock_free;
+ }
- ret = sst_main(auxdev, &tpmi_sst->power_domain_info[i]);
+ ret = sst_main(auxdev, &pd_info[i]);
if (ret) {
- devm_iounmap(&auxdev->dev, tpmi_sst->power_domain_info[i].sst_base);
- tpmi_sst->power_domain_info[i].sst_base = NULL;
+ /*
+ * This entry is not valid, hardware can partially
+ * populate dies. In this case MMIO will have 0xFFs.
+ * Also possible some pre-production hardware has
+ * invalid data. But don't fail and continue to use
+ * other dies with valid data.
+ */
+ devm_iounmap(dev, pd_info[i].sst_base);
+ pd_info[i].sst_base = NULL;
continue;
}
++inst;
}
- if (!inst)
- return -ENODEV;
+ if (!inst) {
+ ret = -ENODEV;
+ goto unlock_free;
+ }
tpmi_sst->package_id = pkg;
+
+ tpmi_sst->power_domain_info[partition] = pd_info;
+ tpmi_sst->number_of_power_domains[partition] = num_resources;
+ tpmi_sst->cdie_mask[partition] = cdie_mask;
+ tpmi_sst->io_dies[partition] = io_die_cnt;
+ tpmi_sst->partition_mask |= BIT(partition);
+ tpmi_sst->partition_mask_current |= BIT(partition);
+
auxiliary_set_drvdata(auxdev, tpmi_sst);
- mutex_lock(&isst_tpmi_dev_lock);
if (isst_common.max_index < pkg)
isst_common.max_index = pkg;
isst_common.sst_inst[pkg] = tpmi_sst;
+
+unlock_free:
+ if (ret && first_enum)
+ kfree(tpmi_sst);
+unlock_exit:
mutex_unlock(&isst_tpmi_dev_lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_add, INTEL_TPMI_SST);
void tpmi_sst_dev_remove(struct auxiliary_device *auxdev)
{
struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
+ struct intel_tpmi_plat_info *plat_info;
+
+ plat_info = tpmi_get_platform_data(auxdev);
+ if (!plat_info)
+ return;
mutex_lock(&isst_tpmi_dev_lock);
- isst_common.sst_inst[tpmi_sst->package_id] = NULL;
+ tpmi_sst->power_domain_info[plat_info->partition] = NULL;
+ tpmi_sst->partition_mask_current &= ~BIT(plat_info->partition);
+ /* Free the package instance when the all partitions are removed */
+ if (!tpmi_sst->partition_mask_current) {
+ kfree(tpmi_sst);
+ isst_common.sst_inst[tpmi_sst->package_id] = NULL;
+ }
mutex_unlock(&isst_tpmi_dev_lock);
}
EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_remove, INTEL_TPMI_SST);
@@ -1374,9 +1620,16 @@ EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_remove, INTEL_TPMI_SST);
void tpmi_sst_dev_suspend(struct auxiliary_device *auxdev)
{
struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
- struct tpmi_per_power_domain_info *power_domain_info = tpmi_sst->power_domain_info;
+ struct tpmi_per_power_domain_info *power_domain_info;
+ struct intel_tpmi_plat_info *plat_info;
void __iomem *cp_base;
+ plat_info = tpmi_get_platform_data(auxdev);
+ if (!plat_info)
+ return;
+
+ power_domain_info = tpmi_sst->power_domain_info[plat_info->partition];
+
cp_base = power_domain_info->sst_base + power_domain_info->sst_header.cp_offset;
power_domain_info->saved_sst_cp_control = readq(cp_base + SST_CP_CONTROL_OFFSET);
@@ -1395,9 +1648,16 @@ EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_suspend, INTEL_TPMI_SST);
void tpmi_sst_dev_resume(struct auxiliary_device *auxdev)
{
struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
- struct tpmi_per_power_domain_info *power_domain_info = tpmi_sst->power_domain_info;
+ struct tpmi_per_power_domain_info *power_domain_info;
+ struct intel_tpmi_plat_info *plat_info;
void __iomem *cp_base;
+ plat_info = tpmi_get_platform_data(auxdev);
+ if (!plat_info)
+ return;
+
+ power_domain_info = tpmi_sst->power_domain_info[plat_info->partition];
+
cp_base = power_domain_info->sst_base + power_domain_info->sst_header.cp_offset;
writeq(power_domain_info->saved_sst_cp_control, cp_base + SST_CP_CONTROL_OFFSET);
@@ -1412,7 +1672,7 @@ void tpmi_sst_dev_resume(struct auxiliary_device *auxdev)
}
EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_resume, INTEL_TPMI_SST);
-#define ISST_TPMI_API_VERSION 0x02
+#define ISST_TPMI_API_VERSION 0x03
int tpmi_sst_init(void)
{
@@ -1469,4 +1729,5 @@ EXPORT_SYMBOL_NS_GPL(tpmi_sst_exit, INTEL_TPMI_SST);
MODULE_IMPORT_NS(INTEL_TPMI);
MODULE_IMPORT_NS(INTEL_TPMI_POWER_DOMAIN);
+MODULE_DESCRIPTION("ISST TPMI interface module");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c
index 910df7c654f4..6c0cbccd80bb 100644
--- a/drivers/platform/x86/intel/tpmi.c
+++ b/drivers/platform/x86/intel/tpmi.c
@@ -128,6 +128,9 @@ struct intel_tpmi_info {
* @dev: PCI device number
* @bus: PCI bus number
* @pkg: CPU Package id
+ * @segment: PCI segment id
+ * @partition: Package Partition id
+ * @cdie_mask: Bitmap of compute dies in the current partition
* @reserved: Reserved for future use
* @lock: When set to 1 the register is locked and becomes read-only
* until next reset. Not for use by the OS driver.
@@ -139,7 +142,10 @@ struct tpmi_info_header {
u64 dev:5;
u64 bus:8;
u64 pkg:8;
- u64 reserved:39;
+ u64 segment:8;
+ u64 partition:2;
+ u64 cdie_mask:16;
+ u64 reserved:13;
u64 lock:1;
} __packed;
@@ -666,28 +672,44 @@ static int tpmi_create_devices(struct intel_tpmi_info *tpmi_info)
}
#define TPMI_INFO_BUS_INFO_OFFSET 0x08
+#define TPMI_INFO_MAJOR_VERSION 0x00
+#define TPMI_INFO_MINOR_VERSION 0x02
static int tpmi_process_info(struct intel_tpmi_info *tpmi_info,
struct intel_tpmi_pm_feature *pfs)
{
struct tpmi_info_header header;
void __iomem *info_mem;
+ u64 feature_header;
+ int ret = 0;
- info_mem = ioremap(pfs->vsec_offset + TPMI_INFO_BUS_INFO_OFFSET,
- pfs->pfs_header.entry_size * sizeof(u32) - TPMI_INFO_BUS_INFO_OFFSET);
+ info_mem = ioremap(pfs->vsec_offset, pfs->pfs_header.entry_size * sizeof(u32));
if (!info_mem)
return -ENOMEM;
- memcpy_fromio(&header, info_mem, sizeof(header));
+ feature_header = readq(info_mem);
+ if (TPMI_MAJOR_VERSION(feature_header) != TPMI_INFO_MAJOR_VERSION) {
+ ret = -ENODEV;
+ goto error_info_header;
+ }
+
+ memcpy_fromio(&header, info_mem + TPMI_INFO_BUS_INFO_OFFSET, sizeof(header));
tpmi_info->plat_info.package_id = header.pkg;
tpmi_info->plat_info.bus_number = header.bus;
tpmi_info->plat_info.device_number = header.dev;
tpmi_info->plat_info.function_number = header.fn;
+ if (TPMI_MINOR_VERSION(feature_header) >= TPMI_INFO_MINOR_VERSION) {
+ tpmi_info->plat_info.cdie_mask = header.cdie_mask;
+ tpmi_info->plat_info.partition = header.partition;
+ tpmi_info->plat_info.segment = header.segment;
+ }
+
+error_info_header:
iounmap(info_mem);
- return 0;
+ return ret;
}
static int tpmi_fetch_pfs_header(struct intel_tpmi_pm_feature *pfs, u64 start, int size)
@@ -763,8 +785,11 @@ static int intel_vsec_tpmi_init(struct auxiliary_device *auxdev)
* when actual device nodes created outside this
* loop via tpmi_create_devices().
*/
- if (pfs->pfs_header.tpmi_id == TPMI_INFO_ID)
- tpmi_process_info(tpmi_info, pfs);
+ if (pfs->pfs_header.tpmi_id == TPMI_INFO_ID) {
+ ret = tpmi_process_info(tpmi_info, pfs);
+ if (ret)
+ return ret;
+ }
if (pfs->pfs_header.tpmi_id == TPMI_CONTROL_ID)
tpmi_set_control_base(auxdev, tpmi_info, pfs);
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
index bd75d61ff8a6..bb8e72deb354 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
@@ -29,7 +29,7 @@
#include "uncore-frequency-common.h"
#define UNCORE_MAJOR_VERSION 0
-#define UNCORE_MINOR_VERSION 1
+#define UNCORE_MINOR_VERSION 2
#define UNCORE_HEADER_INDEX 0
#define UNCORE_FABRIC_CLUSTER_OFFSET 8
@@ -240,6 +240,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
bool read_blocked = 0, write_blocked = 0;
struct intel_tpmi_plat_info *plat_info;
struct tpmi_uncore_struct *tpmi_uncore;
+ bool uncore_sysfs_added = false;
int ret, i, pkg = 0;
int num_resources;
@@ -329,7 +330,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
goto remove_clusters;
}
- if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) != UNCORE_MINOR_VERSION)
+ if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) > UNCORE_MINOR_VERSION)
dev_info(&auxdev->dev, "Uncore: Ignore: Unsupported minor version:%lx\n",
TPMI_MINOR_VERSION(pd_info->ufs_header_ver));
@@ -384,9 +385,15 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
}
/* Point to next cluster offset */
cluster_offset >>= UNCORE_MAX_CLUSTER_PER_DOMAIN;
+ uncore_sysfs_added = true;
}
}
+ if (!uncore_sysfs_added) {
+ ret = -ENODEV;
+ goto remove_clusters;
+ }
+
auxiliary_set_drvdata(auxdev, tpmi_uncore);
tpmi_uncore->root_cluster.root_domain = true;
diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
index 084c355c86f5..84c1353eb12b 100644
--- a/drivers/platform/x86/intel/vbtn.c
+++ b/drivers/platform/x86/intel/vbtn.c
@@ -136,8 +136,6 @@ static int intel_vbtn_input_setup(struct platform_device *device)
priv->switches_dev->id.bustype = BUS_HOST;
if (priv->has_switches) {
- detect_tablet_mode(&device->dev);
-
ret = input_register_device(priv->switches_dev);
if (ret)
return ret;
@@ -158,7 +156,8 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
if ((ke = sparse_keymap_entry_from_scancode(priv->buttons_dev, event))) {
if (!priv->has_buttons) {
- dev_warn(&device->dev, "Warning: received a button event on a device without buttons, please report this.\n");
+ dev_warn(&device->dev, "Warning: received 0x%02x button event on a device without buttons, please report this.\n",
+ event);
return;
}
input_dev = priv->buttons_dev;
@@ -258,9 +257,6 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
static bool intel_vbtn_has_switches(acpi_handle handle, bool dual_accel)
{
- unsigned long long vgbs;
- acpi_status status;
-
/* See dual_accel_detect.h for more info */
if (dual_accel)
return false;
@@ -268,8 +264,7 @@ static bool intel_vbtn_has_switches(acpi_handle handle, bool dual_accel)
if (!dmi_check_system(dmi_switches_allow_list))
return false;
- status = acpi_evaluate_integer(handle, "VGBS", NULL, &vgbs);
- return ACPI_SUCCESS(status);
+ return acpi_has_method(handle, "VGBS");
}
static int intel_vbtn_probe(struct platform_device *device)
@@ -316,6 +311,9 @@ static int intel_vbtn_probe(struct platform_device *device)
if (ACPI_FAILURE(status))
dev_err(&device->dev, "Error VBDL failed with ACPI status %d\n", status);
}
+ // Check switches after buttons since VBDL may have side effects.
+ if (has_switches)
+ detect_tablet_mode(&device->dev);
device_init_wakeup(&device->dev, true);
/*
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index ba38649cc142..73ec4460a151 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -1505,7 +1505,7 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
* IRQ handler for ME interaction
* Note: don't use MSI here as the PCH has bugs.
*/
- ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
+ ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_INTX);
if (ret < 0)
return ret;
diff --git a/drivers/platform/x86/lenovo-wmi-camera.c b/drivers/platform/x86/lenovo-wmi-camera.c
new file mode 100644
index 000000000000..0c0bedaf7407
--- /dev/null
+++ b/drivers/platform/x86/lenovo-wmi-camera.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lenovo WMI Camera Button Driver
+ *
+ * Author: Ai Chao <aichao@kylinos.cn>
+ * Copyright (C) 2024 KylinSoft Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/wmi.h>
+
+#define WMI_LENOVO_CAMERABUTTON_EVENT_GUID "50C76F1F-D8E4-D895-0A3D-62F4EA400013"
+
+struct lenovo_wmi_priv {
+ struct input_dev *idev;
+ struct mutex notify_lock; /* lenovo WMI camera button notify lock */
+};
+
+enum {
+ SW_CAMERA_OFF = 0,
+ SW_CAMERA_ON = 1,
+};
+
+static void lenovo_wmi_notify(struct wmi_device *wdev, union acpi_object *obj)
+{
+ struct lenovo_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
+ unsigned int keycode;
+ u8 camera_mode;
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ dev_err(&wdev->dev, "Bad response type %u\n", obj->type);
+ return;
+ }
+
+ if (obj->buffer.length != 1) {
+ dev_err(&wdev->dev, "Invalid buffer length %u\n", obj->buffer.length);
+ return;
+ }
+
+ /*
+ * obj->buffer.pointer[0] is camera mode:
+ * 0 camera close
+ * 1 camera open
+ */
+ camera_mode = obj->buffer.pointer[0];
+ if (camera_mode > SW_CAMERA_ON) {
+ dev_err(&wdev->dev, "Unknown camera mode %u\n", camera_mode);
+ return;
+ }
+
+ mutex_lock(&priv->notify_lock);
+
+ keycode = camera_mode == SW_CAMERA_ON ?
+ KEY_CAMERA_ACCESS_ENABLE : KEY_CAMERA_ACCESS_DISABLE;
+ input_report_key(priv->idev, keycode, 1);
+ input_sync(priv->idev);
+ input_report_key(priv->idev, keycode, 0);
+ input_sync(priv->idev);
+
+ mutex_unlock(&priv->notify_lock);
+}
+
+static int lenovo_wmi_probe(struct wmi_device *wdev, const void *context)
+{
+ struct lenovo_wmi_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&wdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(&wdev->dev, priv);
+
+ priv->idev = devm_input_allocate_device(&wdev->dev);
+ if (!priv->idev)
+ return -ENOMEM;
+
+ priv->idev->name = "Lenovo WMI Camera Button";
+ priv->idev->phys = "wmi/input0";
+ priv->idev->id.bustype = BUS_HOST;
+ priv->idev->dev.parent = &wdev->dev;
+ input_set_capability(priv->idev, EV_KEY, KEY_CAMERA_ACCESS_ENABLE);
+ input_set_capability(priv->idev, EV_KEY, KEY_CAMERA_ACCESS_DISABLE);
+
+ ret = input_register_device(priv->idev);
+ if (ret)
+ return ret;
+
+ mutex_init(&priv->notify_lock);
+
+ return 0;
+}
+
+static void lenovo_wmi_remove(struct wmi_device *wdev)
+{
+ struct lenovo_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
+
+ mutex_destroy(&priv->notify_lock);
+}
+
+static const struct wmi_device_id lenovo_wmi_id_table[] = {
+ { .guid_string = WMI_LENOVO_CAMERABUTTON_EVENT_GUID },
+ { }
+};
+MODULE_DEVICE_TABLE(wmi, lenovo_wmi_id_table);
+
+static struct wmi_driver lenovo_wmi_driver = {
+ .driver = {
+ .name = "lenovo-wmi-camera",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .id_table = lenovo_wmi_id_table,
+ .no_singleton = true,
+ .probe = lenovo_wmi_probe,
+ .notify = lenovo_wmi_notify,
+ .remove = lenovo_wmi_remove,
+};
+module_wmi_driver(lenovo_wmi_driver);
+
+MODULE_AUTHOR("Ai Chao <aichao@kylinos.cn>");
+MODULE_DESCRIPTION("Lenovo WMI Camera Button Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c b/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
new file mode 100644
index 000000000000..d525bdc8ca9b
--- /dev/null
+++ b/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Support for the custom fast charging protocol found on the Lenovo Yoga
+ * Tablet 2 1380F / 1380L models.
+ *
+ * Copyright (C) 2024 Hans de Goede <hansg@kernel.org>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/extcon.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/platform_device.h>
+#include <linux/serdev.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include "serdev_helpers.h"
+
+#define YT2_1380_FC_PDEV_NAME "lenovo-yoga-tab2-pro-1380-fastcharger"
+#define YT2_1380_FC_SERDEV_CTRL "serial0"
+#define YT2_1380_FC_SERDEV_NAME "serial0-0"
+#define YT2_1380_FC_EXTCON_NAME "i2c-lc824206xa"
+
+#define YT2_1380_FC_MAX_TRIES 5
+#define YT2_1380_FC_PIN_SW_DELAY_US (10 * USEC_PER_MSEC)
+#define YT2_1380_FC_UART_DRAIN_DELAY_US (50 * USEC_PER_MSEC)
+#define YT2_1380_FC_VOLT_SW_DELAY_US (1000 * USEC_PER_MSEC)
+
+struct yt2_1380_fc {
+ struct device *dev;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *gpio_state;
+ struct pinctrl_state *uart_state;
+ struct gpio_desc *uart3_txd;
+ struct gpio_desc *uart3_rxd;
+ struct extcon_dev *extcon;
+ struct notifier_block nb;
+ struct work_struct work;
+ bool fast_charging;
+};
+
+static int yt2_1380_fc_set_gpio_mode(struct yt2_1380_fc *fc, bool enable)
+{
+ struct pinctrl_state *state = enable ? fc->gpio_state : fc->uart_state;
+ int ret;
+
+ ret = pinctrl_select_state(fc->pinctrl, state);
+ if (ret) {
+ dev_err(fc->dev, "Error %d setting pinctrl state\n", ret);
+ return ret;
+ }
+
+ fsleep(YT2_1380_FC_PIN_SW_DELAY_US);
+ return 0;
+}
+
+static bool yt2_1380_fc_dedicated_charger_connected(struct yt2_1380_fc *fc)
+{
+ return extcon_get_state(fc->extcon, EXTCON_CHG_USB_DCP) > 0;
+}
+
+static bool yt2_1380_fc_fast_charger_connected(struct yt2_1380_fc *fc)
+{
+ return extcon_get_state(fc->extcon, EXTCON_CHG_USB_FAST) > 0;
+}
+
+static void yt2_1380_fc_worker(struct work_struct *work)
+{
+ struct yt2_1380_fc *fc = container_of(work, struct yt2_1380_fc, work);
+ int i, ret;
+
+ /* Do nothing if already fast charging */
+ if (yt2_1380_fc_fast_charger_connected(fc))
+ return;
+
+ for (i = 0; i < YT2_1380_FC_MAX_TRIES; i++) {
+ /* Set pins to UART mode (for charger disconnect and retries) */
+ ret = yt2_1380_fc_set_gpio_mode(fc, false);
+ if (ret)
+ return;
+
+ /* Only try 12V charging if a dedicated charger is detected */
+ if (!yt2_1380_fc_dedicated_charger_connected(fc))
+ return;
+
+ /* Send the command to switch to 12V charging */
+ ret = serdev_device_write_buf(to_serdev_device(fc->dev), "SC", strlen("SC"));
+ if (ret != strlen("SC")) {
+ dev_err(fc->dev, "Error %d writing to uart\n", ret);
+ return;
+ }
+
+ fsleep(YT2_1380_FC_UART_DRAIN_DELAY_US);
+
+ /* Re-check a charger is still connected */
+ if (!yt2_1380_fc_dedicated_charger_connected(fc))
+ return;
+
+ /*
+ * Now switch the lines to GPIO (output, high). The charger
+ * expects the lines being driven high after the command.
+ * Presumably this is used to detect the tablet getting
+ * unplugged (to switch back to 5V output on unplug).
+ */
+ ret = yt2_1380_fc_set_gpio_mode(fc, true);
+ if (ret)
+ return;
+
+ fsleep(YT2_1380_FC_VOLT_SW_DELAY_US);
+
+ if (yt2_1380_fc_fast_charger_connected(fc))
+ return; /* Success */
+ }
+
+ dev_dbg(fc->dev, "Failed to switch to 12V charging (not the original charger?)\n");
+ /* Failed to enable 12V fast charging, reset pins to default UART mode */
+ yt2_1380_fc_set_gpio_mode(fc, false);
+}
+
+static int yt2_1380_fc_extcon_evt(struct notifier_block *nb,
+ unsigned long event, void *param)
+{
+ struct yt2_1380_fc *fc = container_of(nb, struct yt2_1380_fc, nb);
+
+ schedule_work(&fc->work);
+ return NOTIFY_OK;
+}
+
+static size_t yt2_1380_fc_receive(struct serdev_device *serdev, const u8 *data, size_t len)
+{
+ /*
+ * Since the USB data lines are shorted for DCP detection, echos of
+ * the "SC" command send in yt2_1380_fc_worker() will be received.
+ */
+ dev_dbg(&serdev->dev, "recv: %*ph\n", (int)len, data);
+ return len;
+}
+
+static const struct serdev_device_ops yt2_1380_fc_serdev_ops = {
+ .receive_buf = yt2_1380_fc_receive,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+static int yt2_1380_fc_serdev_probe(struct serdev_device *serdev)
+{
+ struct device *dev = &serdev->dev;
+ struct yt2_1380_fc *fc;
+ int ret;
+
+ fc = devm_kzalloc(dev, sizeof(*fc), GFP_KERNEL);
+ if (!fc)
+ return -ENOMEM;
+
+ fc->dev = dev;
+ fc->nb.notifier_call = yt2_1380_fc_extcon_evt;
+ INIT_WORK(&fc->work, yt2_1380_fc_worker);
+
+ /*
+ * Do this first since it may return -EPROBE_DEFER.
+ * There is no extcon_put(), so there is no need to free this.
+ */
+ fc->extcon = extcon_get_extcon_dev(YT2_1380_FC_EXTCON_NAME);
+ if (IS_ERR(fc->extcon))
+ return dev_err_probe(dev, PTR_ERR(fc->extcon), "getting extcon\n");
+
+ fc->pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR(fc->pinctrl))
+ return dev_err_probe(dev, PTR_ERR(fc->pinctrl), "getting pinctrl\n");
+
+ /*
+ * To switch the UART3 pins connected to the USB data lines between
+ * UART and GPIO modes.
+ */
+ fc->gpio_state = pinctrl_lookup_state(fc->pinctrl, "uart3_gpio");
+ fc->uart_state = pinctrl_lookup_state(fc->pinctrl, "uart3_uart");
+ if (IS_ERR(fc->gpio_state) || IS_ERR(fc->uart_state))
+ return dev_err_probe(dev, -EINVAL, "getting pinctrl states\n");
+
+ ret = yt2_1380_fc_set_gpio_mode(fc, true);
+ if (ret)
+ return ret;
+
+ fc->uart3_txd = devm_gpiod_get(dev, "uart3_txd", GPIOD_OUT_HIGH);
+ if (IS_ERR(fc->uart3_txd))
+ return dev_err_probe(dev, PTR_ERR(fc->uart3_txd), "getting uart3_txd gpio\n");
+
+ fc->uart3_rxd = devm_gpiod_get(dev, "uart3_rxd", GPIOD_OUT_HIGH);
+ if (IS_ERR(fc->uart3_rxd))
+ return dev_err_probe(dev, PTR_ERR(fc->uart3_rxd), "getting uart3_rxd gpio\n");
+
+ ret = yt2_1380_fc_set_gpio_mode(fc, false);
+ if (ret)
+ return ret;
+
+ ret = devm_serdev_device_open(dev, serdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "opening UART device\n");
+
+ serdev_device_set_baudrate(serdev, 600);
+ serdev_device_set_flow_control(serdev, false);
+ serdev_device_set_drvdata(serdev, fc);
+ serdev_device_set_client_ops(serdev, &yt2_1380_fc_serdev_ops);
+
+ ret = devm_extcon_register_notifier_all(dev, fc->extcon, &fc->nb);
+ if (ret)
+ return dev_err_probe(dev, ret, "registering extcon notifier\n");
+
+ /* In case the extcon already has detected a DCP charger */
+ schedule_work(&fc->work);
+
+ return 0;
+}
+
+struct serdev_device_driver yt2_1380_fc_serdev_driver = {
+ .probe = yt2_1380_fc_serdev_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+};
+
+static const struct pinctrl_map yt2_1380_fc_pinctrl_map[] = {
+ PIN_MAP_MUX_GROUP(YT2_1380_FC_SERDEV_NAME, "uart3_uart",
+ "INT33FC:00", "uart3_grp", "uart"),
+ PIN_MAP_MUX_GROUP(YT2_1380_FC_SERDEV_NAME, "uart3_gpio",
+ "INT33FC:00", "uart3_grp_gpio", "gpio"),
+};
+
+static int yt2_1380_fc_pdev_probe(struct platform_device *pdev)
+{
+ struct serdev_device *serdev;
+ struct device *ctrl_dev;
+ int ret;
+
+ /* Register pinctrl mappings for setting the UART3 pins mode */
+ ret = pinctrl_register_mappings(yt2_1380_fc_pinctrl_map,
+ ARRAY_SIZE(yt2_1380_fc_pinctrl_map));
+ if (ret)
+ return ret;
+
+ /* And create the serdev to talk to the charger over the UART3 pins */
+ ctrl_dev = get_serdev_controller("PNP0501", "1", 0, YT2_1380_FC_SERDEV_CTRL);
+ if (IS_ERR(ctrl_dev)) {
+ ret = PTR_ERR(ctrl_dev);
+ goto out_pinctrl_unregister_mappings;
+ }
+
+ serdev = serdev_device_alloc(to_serdev_controller(ctrl_dev));
+ put_device(ctrl_dev);
+ if (!serdev) {
+ ret = -ENOMEM;
+ goto out_pinctrl_unregister_mappings;
+ }
+
+ ret = serdev_device_add(serdev);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "adding serdev\n");
+ serdev_device_put(serdev);
+ goto out_pinctrl_unregister_mappings;
+ }
+
+ /*
+ * serdev device <-> driver matching relies on OF or ACPI matches and
+ * neither is available here, manually bind the driver.
+ */
+ ret = device_driver_attach(&yt2_1380_fc_serdev_driver.driver, &serdev->dev);
+ if (ret) {
+ /* device_driver_attach() maps EPROBE_DEFER to EAGAIN, map it back */
+ ret = (ret == -EAGAIN) ? -EPROBE_DEFER : ret;
+ dev_err_probe(&pdev->dev, ret, "attaching serdev driver\n");
+ goto out_serdev_device_remove;
+ }
+
+ /* So that yt2_1380_fc_pdev_remove() can remove the serdev */
+ platform_set_drvdata(pdev, serdev);
+ return 0;
+
+out_serdev_device_remove:
+ serdev_device_remove(serdev);
+out_pinctrl_unregister_mappings:
+ pinctrl_unregister_mappings(yt2_1380_fc_pinctrl_map);
+ return ret;
+}
+
+static void yt2_1380_fc_pdev_remove(struct platform_device *pdev)
+{
+ struct serdev_device *serdev = platform_get_drvdata(pdev);
+
+ serdev_device_remove(serdev);
+ pinctrl_unregister_mappings(yt2_1380_fc_pinctrl_map);
+}
+
+static struct platform_driver yt2_1380_fc_pdev_driver = {
+ .probe = yt2_1380_fc_pdev_probe,
+ .remove_new = yt2_1380_fc_pdev_remove,
+ .driver = {
+ .name = YT2_1380_FC_PDEV_NAME,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+static int __init yt2_1380_fc_module_init(void)
+{
+ int ret;
+
+ /*
+ * serdev driver MUST be registered first because pdev driver calls
+ * device_driver_attach() on the serdev, serdev-driver pair.
+ */
+ ret = serdev_device_driver_register(&yt2_1380_fc_serdev_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&yt2_1380_fc_pdev_driver);
+ if (ret)
+ serdev_device_driver_unregister(&yt2_1380_fc_serdev_driver);
+
+ return ret;
+}
+module_init(yt2_1380_fc_module_init);
+
+static void __exit yt2_1380_fc_module_exit(void)
+{
+ platform_driver_unregister(&yt2_1380_fc_pdev_driver);
+ serdev_device_driver_unregister(&yt2_1380_fc_serdev_driver);
+}
+module_exit(yt2_1380_fc_module_exit);
+
+MODULE_ALIAS("platform:" YT2_1380_FC_PDEV_NAME);
+MODULE_DESCRIPTION("Lenovo Yoga Tablet 2 1380 fast charge driver");
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
index ad3c39e9e9f5..d0fee5d375d7 100644
--- a/drivers/platform/x86/lg-laptop.c
+++ b/drivers/platform/x86/lg-laptop.c
@@ -736,7 +736,7 @@ static int acpi_add(struct acpi_device *device)
default:
year = 2019;
}
- pr_info("product: %s year: %d\n", product, year);
+ pr_info("product: %s year: %d\n", product ?: "unknown", year);
if (year >= 2019)
battery_limit_use_wmbb = 1;
@@ -790,7 +790,6 @@ static struct acpi_driver acpi_driver = {
.remove = acpi_remove,
.notify = acpi_notify,
},
- .owner = THIS_MODULE,
};
static int __init acpi_init(void)
diff --git a/drivers/platform/x86/meegopad_anx7428.c b/drivers/platform/x86/meegopad_anx7428.c
new file mode 100644
index 000000000000..b2c4d4f526c4
--- /dev/null
+++ b/drivers/platform/x86/meegopad_anx7428.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver to power on the Analogix ANX7428 USB Type-C crosspoint switch
+ * on MeeGoPad top-set boxes.
+ *
+ * The MeeGoPad T8 and T9 are Cherry Trail top-set boxes which
+ * use an ANX7428 to provide a Type-C port with USB3.1 Gen 1 and
+ * DisplayPort over Type-C alternate mode support.
+ *
+ * The ANX7428 has a microcontroller which takes care of the PD
+ * negotiation and automatically sets the builtin Crosspoint Switch
+ * to send the right signal to the 4 highspeed pairs of the Type-C
+ * connector. It also takes care of HPD and AUX channel routing for
+ * DP alternate mode.
+ *
+ * IOW the ANX7428 operates fully autonomous and to the x5-Z8350 SoC
+ * things look like there simply is a USB-3 Type-A connector and a
+ * separate DisplayPort connector. Except that the BIOS does not
+ * power on the ANX7428 at boot. This driver takes care of powering
+ * on the ANX7428.
+ *
+ * It should be possible to tell the micro-controller which data- and/or
+ * power-role to negotiate and to swap the role(s) after negotiation
+ * but the MeeGoPad top-set boxes always draw their power from a separate
+ * power-connector and they only support USB host-mode. So this functionality
+ * is unnecessary and due to lack of documentation this is tricky to support.
+ *
+ * For a more complete ANX7428 driver see drivers/usb/misc/anx7418/ of
+ * the LineageOS kernel for the LG G5 (International) aka the LG H850:
+ * https://github.com/LineageOS/android_kernel_lge_msm8996/
+ *
+ * (C) Copyright 2024 Hans de Goede <hansg@kernel.org>
+ */
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+/* Register addresses and fields */
+#define VENDOR_ID 0x00
+#define DEVICE_ID 0x02
+
+#define TX_STATUS 0x16
+#define STATUS_SUCCESS BIT(0)
+#define STATUS_ERROR BIT(1)
+#define OCM_STARTUP BIT(7)
+
+static bool force;
+module_param(force, bool, 0444);
+MODULE_PARM_DESC(force, "Force the driver to probe on unknown boards");
+
+static const struct acpi_gpio_params enable_gpio = { 0, 0, false };
+static const struct acpi_gpio_params reset_gpio = { 1, 0, true };
+
+static const struct acpi_gpio_mapping meegopad_anx7428_gpios[] = {
+ { "enable-gpios", &enable_gpio, 1 },
+ { "reset-gpios", &reset_gpio, 1 },
+ { }
+};
+
+static const struct dmi_system_id meegopad_anx7428_ids[] = {
+ {
+ /* Meegopad T08 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
+ DMI_MATCH(DMI_BOARD_VERSION, "V1.1"),
+ },
+ },
+ { }
+};
+
+static int anx7428_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct gpio_desc *gpio;
+ int ret, val;
+
+ if (!dmi_check_system(meegopad_anx7428_ids) && !force) {
+ dev_warn(dev, "Not probing unknown board, pass meegopad_anx7428.force=1 to probe");
+ return -ENODEV;
+ }
+
+ ret = devm_acpi_dev_add_driver_gpios(dev, meegopad_anx7428_gpios);
+ if (ret)
+ return ret;
+
+ /*
+ * Set GPIOs to desired values while getting them, they are not needed
+ * afterwards. Ordering and delays come from android_kernel_lge_msm8996.
+ */
+ gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpio))
+ return dev_err_probe(dev, PTR_ERR(gpio), "getting enable GPIO\n");
+
+ fsleep(10000);
+
+ gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return dev_err_probe(dev, PTR_ERR(gpio), "getting reset GPIO\n");
+
+ /* Wait for the OCM (On Chip Microcontroller) to start */
+ ret = read_poll_timeout(i2c_smbus_read_byte_data, val,
+ val >= 0 && (val & OCM_STARTUP),
+ 5000, 50000, true, client, TX_STATUS);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "On Chip Microcontroller did not start, status: 0x%02x\n",
+ val);
+
+ ret = i2c_smbus_read_word_data(client, VENDOR_ID);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "reading vendor-id register\n");
+ val = ret;
+
+ ret = i2c_smbus_read_word_data(client, DEVICE_ID);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "reading device-id register\n");
+
+ dev_dbg(dev, "Powered on ANX7428 id %04x:%04x\n", val, ret);
+ return 0;
+}
+
+static const struct acpi_device_id anx7428_acpi_match[] = {
+ { "ANXO7418" }, /* ACPI says 7418 (max 2 DP lanes version) but HW is 7428 */
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, anx7428_acpi_match);
+
+static struct i2c_driver anx7428_driver = {
+ .driver = {
+ .name = "meegopad_anx7428",
+ .acpi_match_table = anx7428_acpi_match,
+ },
+ .probe = anx7428_probe,
+};
+module_i2c_driver(anx7428_driver);
+
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_DESCRIPTION("MeeGoPad ANX7428 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index f4c6c36e05a5..e5391a37014d 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -317,7 +317,7 @@ static ssize_t show_wlan(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%i\n", enabled);
+ return sysfs_emit(buf, "%i\n", enabled);
}
static ssize_t store_wlan(struct device *dev,
@@ -341,7 +341,7 @@ static ssize_t show_bluetooth(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%i\n", enabled);
+ return sysfs_emit(buf, "%i\n", enabled);
}
static ssize_t store_bluetooth(struct device *dev,
@@ -364,7 +364,7 @@ static ssize_t show_threeg(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%i\n", threeg_s);
+ return sysfs_emit(buf, "%i\n", threeg_s);
}
static ssize_t store_threeg(struct device *dev,
@@ -383,7 +383,7 @@ static ssize_t show_lcd_level(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%i\n", ret);
+ return sysfs_emit(buf, "%i\n", ret);
}
static ssize_t store_lcd_level(struct device *dev,
@@ -413,7 +413,7 @@ static ssize_t show_auto_brightness(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%i\n", ret);
+ return sysfs_emit(buf, "%i\n", ret);
}
static ssize_t store_auto_brightness(struct device *dev,
@@ -443,7 +443,7 @@ static ssize_t show_touchpad(struct device *dev,
if (result < 0)
return result;
- return sprintf(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_TOUCHPAD_MASK));
+ return sysfs_emit(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_TOUCHPAD_MASK));
}
static ssize_t show_turbo(struct device *dev,
@@ -457,7 +457,7 @@ static ssize_t show_turbo(struct device *dev,
if (result < 0)
return result;
- return sprintf(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_TURBO_MASK));
+ return sysfs_emit(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_TURBO_MASK));
}
static ssize_t show_eco(struct device *dev,
@@ -471,7 +471,7 @@ static ssize_t show_eco(struct device *dev,
if (result < 0)
return result;
- return sprintf(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_ECO_MASK));
+ return sysfs_emit(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_ECO_MASK));
}
static ssize_t show_turbo_cooldown(struct device *dev,
@@ -485,7 +485,7 @@ static ssize_t show_turbo_cooldown(struct device *dev,
if (result < 0)
return result;
- return sprintf(buf, "%i\n", (!!(rdata & MSI_STANDARD_EC_TURBO_MASK)) |
+ return sysfs_emit(buf, "%i\n", (!!(rdata & MSI_STANDARD_EC_TURBO_MASK)) |
(!!(rdata & MSI_STANDARD_EC_TURBO_COOLDOWN_MASK) << 1));
}
@@ -500,7 +500,7 @@ static ssize_t show_auto_fan(struct device *dev,
if (result < 0)
return result;
- return sprintf(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_AUTOFAN_MASK));
+ return sysfs_emit(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_AUTOFAN_MASK));
}
static ssize_t store_auto_fan(struct device *dev,
diff --git a/drivers/platform/x86/msi-wmi-platform.c b/drivers/platform/x86/msi-wmi-platform.c
new file mode 100644
index 000000000000..436fb91a47db
--- /dev/null
+++ b/drivers/platform/x86/msi-wmi-platform.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Linux driver for WMI platform features on MSI notebooks.
+ *
+ * Copyright (C) 2024 Armin Wolf <W_Armin@gmx.de>
+ */
+
+#define pr_format(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/device/driver.h>
+#include <linux/errno.h>
+#include <linux/hwmon.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/rwsem.h>
+#include <linux/types.h>
+#include <linux/wmi.h>
+
+#include <asm/unaligned.h>
+
+#define DRIVER_NAME "msi-wmi-platform"
+
+#define MSI_PLATFORM_GUID "ABBC0F6E-8EA1-11d1-00A0-C90629100000"
+
+#define MSI_WMI_PLATFORM_INTERFACE_VERSION 2
+
+#define MSI_PLATFORM_WMI_MAJOR_OFFSET 1
+#define MSI_PLATFORM_WMI_MINOR_OFFSET 2
+
+#define MSI_PLATFORM_EC_FLAGS_OFFSET 1
+#define MSI_PLATFORM_EC_MINOR_MASK GENMASK(3, 0)
+#define MSI_PLATFORM_EC_MAJOR_MASK GENMASK(5, 4)
+#define MSI_PLATFORM_EC_CHANGED_PAGE BIT(6)
+#define MSI_PLATFORM_EC_IS_TIGERLAKE BIT(7)
+#define MSI_PLATFORM_EC_VERSION_OFFSET 2
+
+static bool force;
+module_param_unsafe(force, bool, 0);
+MODULE_PARM_DESC(force, "Force loading without checking for supported WMI interface versions");
+
+enum msi_wmi_platform_method {
+ MSI_PLATFORM_GET_PACKAGE = 0x01,
+ MSI_PLATFORM_SET_PACKAGE = 0x02,
+ MSI_PLATFORM_GET_EC = 0x03,
+ MSI_PLATFORM_SET_EC = 0x04,
+ MSI_PLATFORM_GET_BIOS = 0x05,
+ MSI_PLATFORM_SET_BIOS = 0x06,
+ MSI_PLATFORM_GET_SMBUS = 0x07,
+ MSI_PLATFORM_SET_SMBUS = 0x08,
+ MSI_PLATFORM_GET_MASTER_BATTERY = 0x09,
+ MSI_PLATFORM_SET_MASTER_BATTERY = 0x0a,
+ MSI_PLATFORM_GET_SLAVE_BATTERY = 0x0b,
+ MSI_PLATFORM_SET_SLAVE_BATTERY = 0x0c,
+ MSI_PLATFORM_GET_TEMPERATURE = 0x0d,
+ MSI_PLATFORM_SET_TEMPERATURE = 0x0e,
+ MSI_PLATFORM_GET_THERMAL = 0x0f,
+ MSI_PLATFORM_SET_THERMAL = 0x10,
+ MSI_PLATFORM_GET_FAN = 0x11,
+ MSI_PLATFORM_SET_FAN = 0x12,
+ MSI_PLATFORM_GET_DEVICE = 0x13,
+ MSI_PLATFORM_SET_DEVICE = 0x14,
+ MSI_PLATFORM_GET_POWER = 0x15,
+ MSI_PLATFORM_SET_POWER = 0x16,
+ MSI_PLATFORM_GET_DEBUG = 0x17,
+ MSI_PLATFORM_SET_DEBUG = 0x18,
+ MSI_PLATFORM_GET_AP = 0x19,
+ MSI_PLATFORM_SET_AP = 0x1a,
+ MSI_PLATFORM_GET_DATA = 0x1b,
+ MSI_PLATFORM_SET_DATA = 0x1c,
+ MSI_PLATFORM_GET_WMI = 0x1d,
+};
+
+struct msi_wmi_platform_debugfs_data {
+ struct wmi_device *wdev;
+ enum msi_wmi_platform_method method;
+ struct rw_semaphore buffer_lock; /* Protects debugfs buffer */
+ size_t length;
+ u8 buffer[32];
+};
+
+static const char * const msi_wmi_platform_debugfs_names[] = {
+ "get_package",
+ "set_package",
+ "get_ec",
+ "set_ec",
+ "get_bios",
+ "set_bios",
+ "get_smbus",
+ "set_smbus",
+ "get_master_battery",
+ "set_master_battery",
+ "get_slave_battery",
+ "set_slave_battery",
+ "get_temperature",
+ "set_temperature",
+ "get_thermal",
+ "set_thermal",
+ "get_fan",
+ "set_fan",
+ "get_device",
+ "set_device",
+ "get_power",
+ "set_power",
+ "get_debug",
+ "set_debug",
+ "get_ap",
+ "set_ap",
+ "get_data",
+ "set_data",
+ "get_wmi"
+};
+
+static int msi_wmi_platform_parse_buffer(union acpi_object *obj, u8 *output, size_t length)
+{
+ if (obj->type != ACPI_TYPE_BUFFER)
+ return -ENOMSG;
+
+ if (obj->buffer.length != length)
+ return -EPROTO;
+
+ if (!obj->buffer.pointer[0])
+ return -EIO;
+
+ memcpy(output, obj->buffer.pointer, obj->buffer.length);
+
+ return 0;
+}
+
+static int msi_wmi_platform_query(struct wmi_device *wdev, enum msi_wmi_platform_method method,
+ u8 *input, size_t input_length, u8 *output, size_t output_length)
+{
+ struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer in = {
+ .length = input_length,
+ .pointer = input
+ };
+ union acpi_object *obj;
+ acpi_status status;
+ int ret;
+
+ if (!input_length || !output_length)
+ return -EINVAL;
+
+ status = wmidev_evaluate_method(wdev, 0x0, method, &in, &out);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ obj = out.pointer;
+ if (!obj)
+ return -ENODATA;
+
+ ret = msi_wmi_platform_parse_buffer(obj, output, output_length);
+ kfree(obj);
+
+ return ret;
+}
+
+static umode_t msi_wmi_platform_is_visible(const void *drvdata, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ return 0444;
+}
+
+static int msi_wmi_platform_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct wmi_device *wdev = dev_get_drvdata(dev);
+ u8 input[32] = { 0 };
+ u8 output[32];
+ u16 data;
+ int ret;
+
+ ret = msi_wmi_platform_query(wdev, MSI_PLATFORM_GET_FAN, input, sizeof(input), output,
+ sizeof(output));
+ if (ret < 0)
+ return ret;
+
+ data = get_unaligned_be16(&output[channel * 2 + 1]);
+ if (!data)
+ *val = 0;
+ else
+ *val = 480000 / data;
+
+ return 0;
+}
+
+static const struct hwmon_ops msi_wmi_platform_ops = {
+ .is_visible = msi_wmi_platform_is_visible,
+ .read = msi_wmi_platform_read,
+};
+
+static const struct hwmon_channel_info * const msi_wmi_platform_info[] = {
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT
+ ),
+ NULL
+};
+
+static const struct hwmon_chip_info msi_wmi_platform_chip_info = {
+ .ops = &msi_wmi_platform_ops,
+ .info = msi_wmi_platform_info,
+};
+
+static ssize_t msi_wmi_platform_write(struct file *fp, const char __user *input, size_t length,
+ loff_t *offset)
+{
+ struct seq_file *seq = fp->private_data;
+ struct msi_wmi_platform_debugfs_data *data = seq->private;
+ u8 payload[32] = { };
+ ssize_t ret;
+
+ /* Do not allow partial writes */
+ if (*offset != 0)
+ return -EINVAL;
+
+ /* Do not allow incomplete command buffers */
+ if (length != data->length)
+ return -EINVAL;
+
+ ret = simple_write_to_buffer(payload, sizeof(payload), offset, input, length);
+ if (ret < 0)
+ return ret;
+
+ down_write(&data->buffer_lock);
+ ret = msi_wmi_platform_query(data->wdev, data->method, payload, data->length, data->buffer,
+ data->length);
+ up_write(&data->buffer_lock);
+
+ if (ret < 0)
+ return ret;
+
+ return length;
+}
+
+static int msi_wmi_platform_show(struct seq_file *seq, void *p)
+{
+ struct msi_wmi_platform_debugfs_data *data = seq->private;
+ int ret;
+
+ down_read(&data->buffer_lock);
+ ret = seq_write(seq, data->buffer, data->length);
+ up_read(&data->buffer_lock);
+
+ return ret;
+}
+
+static int msi_wmi_platform_open(struct inode *inode, struct file *fp)
+{
+ struct msi_wmi_platform_debugfs_data *data = inode->i_private;
+
+ /* The seq_file uses the last byte of the buffer for detecting buffer overflows */
+ return single_open_size(fp, msi_wmi_platform_show, data, data->length + 1);
+}
+
+static const struct file_operations msi_wmi_platform_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = msi_wmi_platform_open,
+ .read = seq_read,
+ .write = msi_wmi_platform_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void msi_wmi_platform_debugfs_remove(void *data)
+{
+ struct dentry *dir = data;
+
+ debugfs_remove_recursive(dir);
+}
+
+static void msi_wmi_platform_debugfs_add(struct wmi_device *wdev, struct dentry *dir,
+ const char *name, enum msi_wmi_platform_method method)
+{
+ struct msi_wmi_platform_debugfs_data *data;
+ struct dentry *entry;
+
+ data = devm_kzalloc(&wdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return;
+
+ data->wdev = wdev;
+ data->method = method;
+ init_rwsem(&data->buffer_lock);
+
+ /* The ACPI firmware for now always requires a 32 byte input buffer due to
+ * a peculiarity in how Windows handles the CreateByteField() ACPI operator.
+ */
+ data->length = 32;
+
+ entry = debugfs_create_file(name, 0600, dir, data, &msi_wmi_platform_debugfs_fops);
+ if (IS_ERR(entry))
+ devm_kfree(&wdev->dev, data);
+}
+
+static void msi_wmi_platform_debugfs_init(struct wmi_device *wdev)
+{
+ struct dentry *dir;
+ char dir_name[64];
+ int ret, method;
+
+ scnprintf(dir_name, ARRAY_SIZE(dir_name), "%s-%s", DRIVER_NAME, dev_name(&wdev->dev));
+
+ dir = debugfs_create_dir(dir_name, NULL);
+ if (IS_ERR(dir))
+ return;
+
+ ret = devm_add_action_or_reset(&wdev->dev, msi_wmi_platform_debugfs_remove, dir);
+ if (ret < 0)
+ return;
+
+ for (method = MSI_PLATFORM_GET_PACKAGE; method <= MSI_PLATFORM_GET_WMI; method++)
+ msi_wmi_platform_debugfs_add(wdev, dir, msi_wmi_platform_debugfs_names[method - 1],
+ method);
+}
+
+static int msi_wmi_platform_hwmon_init(struct wmi_device *wdev)
+{
+ struct device *hdev;
+
+ hdev = devm_hwmon_device_register_with_info(&wdev->dev, "msi_wmi_platform", wdev,
+ &msi_wmi_platform_chip_info, NULL);
+
+ return PTR_ERR_OR_ZERO(hdev);
+}
+
+static int msi_wmi_platform_ec_init(struct wmi_device *wdev)
+{
+ u8 input[32] = { 0 };
+ u8 output[32];
+ u8 flags;
+ int ret;
+
+ ret = msi_wmi_platform_query(wdev, MSI_PLATFORM_GET_EC, input, sizeof(input), output,
+ sizeof(output));
+ if (ret < 0)
+ return ret;
+
+ flags = output[MSI_PLATFORM_EC_FLAGS_OFFSET];
+
+ dev_dbg(&wdev->dev, "EC RAM version %lu.%lu\n",
+ FIELD_GET(MSI_PLATFORM_EC_MAJOR_MASK, flags),
+ FIELD_GET(MSI_PLATFORM_EC_MINOR_MASK, flags));
+ dev_dbg(&wdev->dev, "EC firmware version %.28s\n",
+ &output[MSI_PLATFORM_EC_VERSION_OFFSET]);
+
+ if (!(flags & MSI_PLATFORM_EC_IS_TIGERLAKE)) {
+ if (!force)
+ return -ENODEV;
+
+ dev_warn(&wdev->dev, "Loading on a non-Tigerlake platform\n");
+ }
+
+ return 0;
+}
+
+static int msi_wmi_platform_init(struct wmi_device *wdev)
+{
+ u8 input[32] = { 0 };
+ u8 output[32];
+ int ret;
+
+ ret = msi_wmi_platform_query(wdev, MSI_PLATFORM_GET_WMI, input, sizeof(input), output,
+ sizeof(output));
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(&wdev->dev, "WMI interface version %u.%u\n",
+ output[MSI_PLATFORM_WMI_MAJOR_OFFSET],
+ output[MSI_PLATFORM_WMI_MINOR_OFFSET]);
+
+ if (output[MSI_PLATFORM_WMI_MAJOR_OFFSET] != MSI_WMI_PLATFORM_INTERFACE_VERSION) {
+ if (!force)
+ return -ENODEV;
+
+ dev_warn(&wdev->dev, "Loading despite unsupported WMI interface version (%u.%u)\n",
+ output[MSI_PLATFORM_WMI_MAJOR_OFFSET],
+ output[MSI_PLATFORM_WMI_MINOR_OFFSET]);
+ }
+
+ return 0;
+}
+
+static int msi_wmi_platform_probe(struct wmi_device *wdev, const void *context)
+{
+ int ret;
+
+ ret = msi_wmi_platform_init(wdev);
+ if (ret < 0)
+ return ret;
+
+ ret = msi_wmi_platform_ec_init(wdev);
+ if (ret < 0)
+ return ret;
+
+ msi_wmi_platform_debugfs_init(wdev);
+
+ return msi_wmi_platform_hwmon_init(wdev);
+}
+
+static const struct wmi_device_id msi_wmi_platform_id_table[] = {
+ { MSI_PLATFORM_GUID, NULL },
+ { }
+};
+MODULE_DEVICE_TABLE(wmi, msi_wmi_platform_id_table);
+
+static struct wmi_driver msi_wmi_platform_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .id_table = msi_wmi_platform_id_table,
+ .probe = msi_wmi_platform_probe,
+ .no_singleton = true,
+};
+module_wmi_driver(msi_wmi_platform_driver);
+
+MODULE_AUTHOR("Armin Wolf <W_Armin@gmx.de>");
+MODULE_DESCRIPTION("MSI WMI platform features");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
index 3d66e1d4eb1f..3bf5d2243491 100644
--- a/drivers/platform/x86/p2sb.c
+++ b/drivers/platform/x86/p2sb.c
@@ -43,7 +43,7 @@ struct p2sb_res_cache {
static struct p2sb_res_cache p2sb_resources[NR_P2SB_RES_CACHE];
-static int p2sb_get_devfn(unsigned int *devfn)
+static void p2sb_get_devfn(unsigned int *devfn)
{
unsigned int fn = P2SB_DEVFN_DEFAULT;
const struct x86_cpu_id *id;
@@ -53,15 +53,11 @@ static int p2sb_get_devfn(unsigned int *devfn)
fn = (unsigned int)id->driver_data;
*devfn = fn;
- return 0;
}
-static bool p2sb_valid_resource(struct resource *res)
+static bool p2sb_valid_resource(const struct resource *res)
{
- if (res->flags)
- return true;
-
- return false;
+ return res->flags & ~IORESOURCE_UNSET;
}
/* Copy resource from the first BAR of the device in question */
@@ -135,9 +131,7 @@ static int p2sb_cache_resources(void)
int ret;
/* Get devfn for P2SB device itself */
- ret = p2sb_get_devfn(&devfn_p2sb);
- if (ret)
- return ret;
+ p2sb_get_devfn(&devfn_p2sb);
bus = p2sb_get_bus(NULL);
if (!bus)
@@ -194,17 +188,13 @@ static int p2sb_cache_resources(void)
int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
{
struct p2sb_res_cache *cache;
- int ret;
bus = p2sb_get_bus(bus);
if (!bus)
return -ENODEV;
- if (!devfn) {
- ret = p2sb_get_devfn(&devfn);
- if (ret)
- return ret;
- }
+ if (!devfn)
+ p2sb_get_devfn(&devfn);
cache = &p2sb_resources[PCI_FUNC(devfn)];
if (cache->bus_dev_id != bus->dev.id)
@@ -220,16 +210,20 @@ EXPORT_SYMBOL_GPL(p2sb_bar);
static int __init p2sb_fs_init(void)
{
- p2sb_cache_resources();
- return 0;
+ return p2sb_cache_resources();
}
/*
- * pci_rescan_remove_lock to avoid access to unhidden P2SB devices can
- * not be locked in sysfs pci bus rescan path because of deadlock. To
- * avoid the deadlock, access to P2SB devices with the lock at an early
- * step in kernel initialization and cache required resources. This
- * should happen after subsys_initcall which initializes PCI subsystem
- * and before device_initcall which requires P2SB resources.
+ * pci_rescan_remove_lock() can not be locked in sysfs PCI bus rescan path
+ * because of deadlock. To avoid the deadlock, access P2SB devices with the lock
+ * at an early step in kernel initialization and cache required resources.
+ *
+ * We want to run as early as possible. If the P2SB was assigned a bad BAR,
+ * we'll need to wait on pcibios_assign_resources() to fix it. So, our list of
+ * initcall dependencies looks something like this:
+ *
+ * ...
+ * subsys_initcall (pci_subsys_init)
+ * fs_initcall (pcibios_assign_resources)
*/
-fs_initcall(p2sb_fs_init);
+fs_initcall_sync(p2sb_fs_init);
diff --git a/drivers/platform/x86/quickstart.c b/drivers/platform/x86/quickstart.c
new file mode 100644
index 000000000000..df496c7e7171
--- /dev/null
+++ b/drivers/platform/x86/quickstart.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ACPI Direct App Launch driver
+ *
+ * Copyright (C) 2024 Armin Wolf <W_Armin@gmx.de>
+ * Copyright (C) 2022 Arvid Norlander <lkml@vorapal.se>
+ * Copyright (C) 2007-2010 Angelo Arrifano <miknix@gmail.com>
+ *
+ * Information gathered from disassembled dsdt and from here:
+ * <https://archive.org/details/microsoft-acpi-dirapplaunch>
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeup.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#include <asm/unaligned.h>
+
+#define DRIVER_NAME "quickstart"
+
+/*
+ * There will be two events:
+ * 0x02 - Button was pressed while device was off/sleeping.
+ * 0x80 - Button was pressed while device was up.
+ */
+#define QUICKSTART_EVENT_RUNTIME 0x80
+
+struct quickstart_data {
+ struct device *dev;
+ struct mutex input_lock; /* Protects input sequence during notify */
+ struct input_dev *input_device;
+ char input_name[32];
+ char phys[32];
+ u32 id;
+};
+
+/*
+ * Knowing what these buttons do require system specific knowledge.
+ * This could be done by matching on DMI data in a long quirk table.
+ * However, it is easier to leave it up to user space to figure this out.
+ *
+ * Using for example udev hwdb the scancode 0x1 can be remapped suitably.
+ */
+static const struct key_entry quickstart_keymap[] = {
+ { KE_KEY, 0x1, { KEY_UNKNOWN } },
+ { KE_END, 0 },
+};
+
+static ssize_t button_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct quickstart_data *data = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n", data->id);
+}
+static DEVICE_ATTR_RO(button_id);
+
+static struct attribute *quickstart_attrs[] = {
+ &dev_attr_button_id.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(quickstart);
+
+static void quickstart_notify(acpi_handle handle, u32 event, void *context)
+{
+ struct quickstart_data *data = context;
+
+ switch (event) {
+ case QUICKSTART_EVENT_RUNTIME:
+ mutex_lock(&data->input_lock);
+ sparse_keymap_report_event(data->input_device, 0x1, 1, true);
+ mutex_unlock(&data->input_lock);
+
+ acpi_bus_generate_netlink_event(DRIVER_NAME, dev_name(data->dev), event, 0);
+ break;
+ default:
+ dev_err(data->dev, FW_INFO "Unexpected ACPI notify event (%u)\n", event);
+ break;
+ }
+}
+
+/*
+ * The GHID ACPI method is used to indicate the "role" of the button.
+ * However, all the meanings of these values are vendor defined.
+ *
+ * We do however expose this value to user space.
+ */
+static int quickstart_get_ghid(struct quickstart_data *data)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_handle handle = ACPI_HANDLE(data->dev);
+ union acpi_object *obj;
+ acpi_status status;
+ int ret = 0;
+
+ /*
+ * This returns a buffer telling the button usage ID,
+ * and triggers pending notify events (The ones before booting).
+ */
+ status = acpi_evaluate_object_typed(handle, "GHID", NULL, &buffer, ACPI_TYPE_BUFFER);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ obj = buffer.pointer;
+ if (!obj)
+ return -ENODATA;
+
+ /*
+ * Quoting the specification:
+ * "The GHID method can return a BYTE, WORD, or DWORD.
+ * The value must be encoded in little-endian byte
+ * order (least significant byte first)."
+ */
+ switch (obj->buffer.length) {
+ case 1:
+ data->id = obj->buffer.pointer[0];
+ break;
+ case 2:
+ data->id = get_unaligned_le16(obj->buffer.pointer);
+ break;
+ case 4:
+ data->id = get_unaligned_le32(obj->buffer.pointer);
+ break;
+ default:
+ dev_err(data->dev,
+ FW_BUG "GHID method returned buffer of unexpected length %u\n",
+ obj->buffer.length);
+ ret = -EIO;
+ break;
+ }
+
+ kfree(obj);
+
+ return ret;
+}
+
+static void quickstart_notify_remove(void *context)
+{
+ struct quickstart_data *data = context;
+ acpi_handle handle;
+
+ handle = ACPI_HANDLE(data->dev);
+
+ acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, quickstart_notify);
+}
+
+static void quickstart_mutex_destroy(void *data)
+{
+ struct mutex *lock = data;
+
+ mutex_destroy(lock);
+}
+
+static int quickstart_probe(struct platform_device *pdev)
+{
+ struct quickstart_data *data;
+ acpi_handle handle;
+ acpi_status status;
+ int ret;
+
+ handle = ACPI_HANDLE(&pdev->dev);
+ if (!handle)
+ return -ENODEV;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, data);
+
+ mutex_init(&data->input_lock);
+ ret = devm_add_action_or_reset(&pdev->dev, quickstart_mutex_destroy, &data->input_lock);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * We have to initialize the device wakeup before evaluating GHID because
+ * doing so will notify the device if the button was used to wake the machine
+ * from S5.
+ */
+ device_init_wakeup(&pdev->dev, true);
+
+ ret = quickstart_get_ghid(data);
+ if (ret < 0)
+ return ret;
+
+ data->input_device = devm_input_allocate_device(&pdev->dev);
+ if (!data->input_device)
+ return -ENOMEM;
+
+ ret = sparse_keymap_setup(data->input_device, quickstart_keymap, NULL);
+ if (ret < 0)
+ return ret;
+
+ snprintf(data->input_name, sizeof(data->input_name), "Quickstart Button %u", data->id);
+ snprintf(data->phys, sizeof(data->phys), DRIVER_NAME "/input%u", data->id);
+
+ data->input_device->name = data->input_name;
+ data->input_device->phys = data->phys;
+ data->input_device->id.bustype = BUS_HOST;
+
+ ret = input_register_device(data->input_device);
+ if (ret < 0)
+ return ret;
+
+ status = acpi_install_notify_handler(handle, ACPI_DEVICE_NOTIFY, quickstart_notify, data);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ return devm_add_action_or_reset(&pdev->dev, quickstart_notify_remove, data);
+}
+
+static const struct acpi_device_id quickstart_device_ids[] = {
+ { "PNP0C32" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, quickstart_device_ids);
+
+static struct platform_driver quickstart_platform_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .dev_groups = quickstart_groups,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .acpi_match_table = quickstart_device_ids,
+ },
+ .probe = quickstart_probe,
+};
+module_platform_driver(quickstart_platform_driver);
+
+MODULE_AUTHOR("Armin Wolf <W_Armin@gmx.de>");
+MODULE_AUTHOR("Arvid Norlander <lkml@vorpal.se>");
+MODULE_AUTHOR("Angelo Arrifano");
+MODULE_DESCRIPTION("ACPI Direct App Launch driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index b4aa8ba35d2d..3d2f8e758369 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -661,9 +661,9 @@ static ssize_t get_performance_level(struct device *dev,
/* The logic is backwards, yeah, lots of fun... */
for (i = 0; config->performance_levels[i].name; ++i) {
if (sretval.data[0] == config->performance_levels[i].value)
- return sprintf(buf, "%s\n", config->performance_levels[i].name);
+ return sysfs_emit(buf, "%s\n", config->performance_levels[i].name);
}
- return sprintf(buf, "%s\n", "unknown");
+ return sysfs_emit(buf, "%s\n", "unknown");
}
static ssize_t set_performance_level(struct device *dev,
@@ -744,7 +744,7 @@ static ssize_t get_battery_life_extender(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", ret);
+ return sysfs_emit(buf, "%d\n", ret);
}
static ssize_t set_battery_life_extender(struct device *dev,
@@ -813,7 +813,7 @@ static ssize_t get_usb_charge(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", ret);
+ return sysfs_emit(buf, "%d\n", ret);
}
static ssize_t set_usb_charge(struct device *dev,
@@ -878,7 +878,7 @@ static ssize_t get_lid_handling(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", ret);
+ return sysfs_emit(buf, "%d\n", ret);
}
static ssize_t set_lid_handling(struct device *dev,
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 40878e327afd..3e94fdd1ea52 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -3303,7 +3303,6 @@ static struct acpi_driver sony_nc_driver = {
.name = SONY_NC_DRIVER_NAME,
.class = SONY_NC_CLASS,
.ids = sony_nc_device_ids,
- .owner = THIS_MODULE,
.ops = {
.add = sony_nc_add,
.remove = sony_nc_remove,
@@ -4844,7 +4843,6 @@ static struct acpi_driver sony_pic_driver = {
.name = SONY_PIC_DRIVER_NAME,
.class = SONY_PIC_CLASS,
.ids = sony_pic_device_ids,
- .owner = THIS_MODULE,
.ops = {
.add = sony_pic_add,
.remove = sony_pic_remove,
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index 9345316b45db..0f2264bb7577 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -175,9 +175,6 @@ MODULE_PARM_DESC(debug_support, "Enable debug command support");
#define TLMI_SMP_PWD BIT(6) /* System Management */
#define TLMI_CERT BIT(7) /* Certificate Based */
-#define to_tlmi_pwd_setting(kobj) container_of(kobj, struct tlmi_pwd_setting, kobj)
-#define to_tlmi_attr_setting(kobj) container_of(kobj, struct tlmi_attr_setting, kobj)
-
static const struct tlmi_err_codes tlmi_errs[] = {
{"Success", 0},
{"Not Supported", -EOPNOTSUPP},
@@ -198,6 +195,16 @@ static struct think_lmi tlmi_priv;
static const struct class *fw_attr_class;
static DEFINE_MUTEX(tlmi_mutex);
+static inline struct tlmi_pwd_setting *to_tlmi_pwd_setting(struct kobject *kobj)
+{
+ return container_of(kobj, struct tlmi_pwd_setting, kobj);
+}
+
+static inline struct tlmi_attr_setting *to_tlmi_attr_setting(struct kobject *kobj)
+{
+ return container_of(kobj, struct tlmi_attr_setting, kobj);
+}
+
/* Convert BIOS WMI error string to suitable error code */
static int tlmi_errstr_to_err(const char *errstr)
{
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 82429e59999d..1150a5c434a6 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -45,6 +45,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/init.h>
#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
@@ -157,16 +158,32 @@ enum {
/* HKEY events */
enum tpacpi_hkey_event_t {
- /* Hotkey-related */
- TP_HKEY_EV_HOTKEY_BASE = 0x1001, /* first hotkey (FN+F1) */
+ /* Original hotkeys */
+ TP_HKEY_EV_ORIG_KEY_START = 0x1001, /* First hotkey (FN+F1) */
TP_HKEY_EV_BRGHT_UP = 0x1010, /* Brightness up */
TP_HKEY_EV_BRGHT_DOWN = 0x1011, /* Brightness down */
TP_HKEY_EV_KBD_LIGHT = 0x1012, /* Thinklight/kbd backlight */
TP_HKEY_EV_VOL_UP = 0x1015, /* Volume up or unmute */
TP_HKEY_EV_VOL_DOWN = 0x1016, /* Volume down or unmute */
TP_HKEY_EV_VOL_MUTE = 0x1017, /* Mixer output mute */
+ TP_HKEY_EV_ORIG_KEY_END = 0x1020, /* Last original hotkey code */
+
+ /* Adaptive keyboard (2014 X1 Carbon) */
+ TP_HKEY_EV_DFR_CHANGE_ROW = 0x1101, /* Change adaptive kbd Fn row mode */
+ TP_HKEY_EV_DFR_S_QUICKVIEW_ROW = 0x1102, /* Set adap. kbd Fn row to function mode */
+ TP_HKEY_EV_ADAPTIVE_KEY_START = 0x1103, /* First hotkey code on adaptive kbd */
+ TP_HKEY_EV_ADAPTIVE_KEY_END = 0x1116, /* Last hotkey code on adaptive kbd */
+
+ /* Extended hotkey events in 2017+ models */
+ TP_HKEY_EV_EXTENDED_KEY_START = 0x1300, /* First extended hotkey code */
TP_HKEY_EV_PRIVACYGUARD_TOGGLE = 0x130f, /* Toggle priv.guard on/off */
+ TP_HKEY_EV_EXTENDED_KEY_END = 0x1319, /* Last extended hotkey code using
+ * hkey -> scancode translation for
+ * compat. Later codes are entered
+ * directly in the sparse-keymap.
+ */
TP_HKEY_EV_AMT_TOGGLE = 0x131a, /* Toggle AMT on/off */
+ TP_HKEY_EV_DOUBLETAP_TOGGLE = 0x131c, /* Toggle trackpoint doubletap on/off */
TP_HKEY_EV_PROFILE_TOGGLE = 0x131f, /* Toggle platform profile */
/* Reasons for waking up from S3/S4 */
@@ -232,6 +249,9 @@ enum tpacpi_hkey_event_t {
/* Misc */
TP_HKEY_EV_RFKILL_CHANGED = 0x7000, /* rfkill switch changed */
+
+ /* Misc2 */
+ TP_HKEY_EV_TRACK_DOUBLETAP = 0x8036, /* trackpoint doubletap */
};
/****************************************************************************
@@ -353,6 +373,7 @@ static struct {
u32 hotkey_poll_active:1;
u32 has_adaptive_kbd:1;
u32 kbd_lang:1;
+ u32 trackpoint_doubletap:1;
struct quirk_entry *quirks;
} tp_features;
@@ -1744,15 +1765,15 @@ enum { /* hot key scan codes (derived from ACPI DSDT) */
TP_ACPI_HOTKEYSCAN_THINKPAD,
TP_ACPI_HOTKEYSCAN_UNK1,
TP_ACPI_HOTKEYSCAN_UNK2,
- TP_ACPI_HOTKEYSCAN_UNK3,
+ TP_ACPI_HOTKEYSCAN_MICMUTE,
TP_ACPI_HOTKEYSCAN_UNK4,
- TP_ACPI_HOTKEYSCAN_UNK5,
- TP_ACPI_HOTKEYSCAN_UNK6,
- TP_ACPI_HOTKEYSCAN_UNK7,
- TP_ACPI_HOTKEYSCAN_UNK8,
+ TP_ACPI_HOTKEYSCAN_CONFIG,
+ TP_ACPI_HOTKEYSCAN_SEARCH,
+ TP_ACPI_HOTKEYSCAN_SCALE,
+ TP_ACPI_HOTKEYSCAN_FILE,
/* Adaptive keyboard keycodes */
- TP_ACPI_HOTKEYSCAN_ADAPTIVE_START,
+ TP_ACPI_HOTKEYSCAN_ADAPTIVE_START, /* 32 / 0x20 */
TP_ACPI_HOTKEYSCAN_MUTE2 = TP_ACPI_HOTKEYSCAN_ADAPTIVE_START,
TP_ACPI_HOTKEYSCAN_BRIGHTNESS_ZERO,
TP_ACPI_HOTKEYSCAN_CLIPPING_TOOL,
@@ -1764,7 +1785,7 @@ enum { /* hot key scan codes (derived from ACPI DSDT) */
TP_ACPI_HOTKEYSCAN_UNK11,
TP_ACPI_HOTKEYSCAN_UNK12,
TP_ACPI_HOTKEYSCAN_UNK13,
- TP_ACPI_HOTKEYSCAN_CONFIG,
+ TP_ACPI_HOTKEYSCAN_CONFIG2,
TP_ACPI_HOTKEYSCAN_NEW_TAB,
TP_ACPI_HOTKEYSCAN_RELOAD,
TP_ACPI_HOTKEYSCAN_BACK,
@@ -1775,7 +1796,7 @@ enum { /* hot key scan codes (derived from ACPI DSDT) */
TP_ACPI_HOTKEYSCAN_ROTATE_DISPLAY,
/* Lenovo extended keymap, starting at 0x1300 */
- TP_ACPI_HOTKEYSCAN_EXTENDED_START,
+ TP_ACPI_HOTKEYSCAN_EXTENDED_START, /* 52 / 0x34 */
/* first new observed key (star, favorites) is 0x1311 */
TP_ACPI_HOTKEYSCAN_STAR = 69,
TP_ACPI_HOTKEYSCAN_CLIPPING_TOOL2,
@@ -1786,9 +1807,6 @@ enum { /* hot key scan codes (derived from ACPI DSDT) */
TP_ACPI_HOTKEYSCAN_NOTIFICATION_CENTER,
TP_ACPI_HOTKEYSCAN_PICKUP_PHONE,
TP_ACPI_HOTKEYSCAN_HANGUP_PHONE,
-
- /* Hotkey keymap size */
- TPACPI_HOTKEY_MAP_LEN
};
enum { /* Keys/events available through NVRAM polling */
@@ -1901,10 +1919,7 @@ static u32 hotkey_driver_mask; /* events needed by the driver */
static u32 hotkey_user_mask; /* events visible to userspace */
static u32 hotkey_acpi_mask; /* events enabled in firmware */
-static u16 *hotkey_keycode_map;
-
-static void tpacpi_driver_event(const unsigned int hkey_event);
-static void hotkey_driver_event(const unsigned int scancode);
+static bool tpacpi_driver_event(const unsigned int hkey_event);
static void hotkey_poll_setup(const bool may_warn);
/* HKEY.MHKG() return bits */
@@ -2236,32 +2251,56 @@ static void tpacpi_input_send_tabletsw(void)
}
}
-/* Do NOT call without validating scancode first */
-static void tpacpi_input_send_key(const unsigned int scancode)
+static bool tpacpi_input_send_key(const u32 hkey, bool *send_acpi_ev)
{
- const unsigned int keycode = hotkey_keycode_map[scancode];
-
- if (keycode != KEY_RESERVED) {
- mutex_lock(&tpacpi_inputdev_send_mutex);
+ bool known_ev;
+ u32 scancode;
- input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode);
- input_report_key(tpacpi_inputdev, keycode, 1);
- input_sync(tpacpi_inputdev);
+ if (tpacpi_driver_event(hkey))
+ return true;
- input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode);
- input_report_key(tpacpi_inputdev, keycode, 0);
- input_sync(tpacpi_inputdev);
+ /*
+ * Before the conversion to using the sparse-keymap helpers the driver used to
+ * map the hkey event codes to 0x00 - 0x4d scancodes so that a straight scancode
+ * indexed array could be used to map scancodes to keycodes:
+ *
+ * 0x1001 - 0x1020 -> 0x00 - 0x1f (Original ThinkPad events)
+ * 0x1103 - 0x1116 -> 0x20 - 0x33 (Adaptive keyboard, 2014 X1 Carbon)
+ * 0x1300 - 0x1319 -> 0x34 - 0x4d (Additional keys send in 2017+ models)
+ *
+ * The sparse-keymap tables still use these scancodes for these ranges to
+ * preserve userspace API compatibility (e.g. hwdb keymappings).
+ */
+ if (hkey >= TP_HKEY_EV_ORIG_KEY_START &&
+ hkey <= TP_HKEY_EV_ORIG_KEY_END) {
+ scancode = hkey - TP_HKEY_EV_ORIG_KEY_START;
+ if (!(hotkey_user_mask & (1 << scancode)))
+ return true; /* Not reported but still a known code */
+ } else if (hkey >= TP_HKEY_EV_ADAPTIVE_KEY_START &&
+ hkey <= TP_HKEY_EV_ADAPTIVE_KEY_END) {
+ scancode = hkey - TP_HKEY_EV_ADAPTIVE_KEY_START +
+ TP_ACPI_HOTKEYSCAN_ADAPTIVE_START;
+ } else if (hkey >= TP_HKEY_EV_EXTENDED_KEY_START &&
+ hkey <= TP_HKEY_EV_EXTENDED_KEY_END) {
+ scancode = hkey - TP_HKEY_EV_EXTENDED_KEY_START +
+ TP_ACPI_HOTKEYSCAN_EXTENDED_START;
+ } else {
+ /*
+ * Do not send ACPI netlink events for unknown hotkeys, to
+ * avoid userspace starting to rely on them. Instead these
+ * should be added to the keymap to send evdev events.
+ */
+ if (send_acpi_ev)
+ *send_acpi_ev = false;
- mutex_unlock(&tpacpi_inputdev_send_mutex);
+ scancode = hkey;
}
-}
-/* Do NOT call without validating scancode first */
-static void tpacpi_input_send_key_masked(const unsigned int scancode)
-{
- hotkey_driver_event(scancode);
- if (hotkey_user_mask & (1 << scancode))
- tpacpi_input_send_key(scancode);
+ mutex_lock(&tpacpi_inputdev_send_mutex);
+ known_ev = sparse_keymap_report_event(tpacpi_inputdev, scancode, 1, true);
+ mutex_unlock(&tpacpi_inputdev_send_mutex);
+
+ return known_ev;
}
#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
@@ -2270,7 +2309,7 @@ static struct tp_acpi_drv_struct ibm_hotkey_acpidriver;
/* Do NOT call without validating scancode first */
static void tpacpi_hotkey_send_key(unsigned int scancode)
{
- tpacpi_input_send_key_masked(scancode);
+ tpacpi_input_send_key(TP_HKEY_EV_ORIG_KEY_START + scancode, NULL);
}
static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
@@ -2575,6 +2614,9 @@ static void hotkey_poll_setup_safe(const bool __unused)
{
}
+static void hotkey_poll_stop_sync(void)
+{
+}
#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
static int hotkey_inputdev_open(struct input_dev *dev)
@@ -2679,7 +2721,7 @@ static ssize_t hotkey_bios_enabled_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "0\n");
+ return sysfs_emit(buf, "0\n");
}
static DEVICE_ATTR_RO(hotkey_bios_enabled);
@@ -3044,11 +3086,8 @@ static void tpacpi_send_radiosw_update(void)
static void hotkey_exit(void)
{
-#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
mutex_lock(&hotkey_mutex);
hotkey_poll_stop_sync();
- mutex_unlock(&hotkey_mutex);
-#endif
dbg_printk(TPACPI_DBG_EXIT | TPACPI_DBG_HKEY,
"restoring original HKEY status and mask\n");
/* yes, there is a bitwise or below, we want the
@@ -3057,15 +3096,8 @@ static void hotkey_exit(void)
hotkey_mask_set(hotkey_orig_mask)) |
hotkey_status_set(false)) != 0)
pr_err("failed to restore hot key mask to BIOS defaults\n");
-}
-static void __init hotkey_unmap(const unsigned int scancode)
-{
- if (hotkey_keycode_map[scancode] != KEY_RESERVED) {
- clear_bit(hotkey_keycode_map[scancode],
- tpacpi_inputdev->keybit);
- hotkey_keycode_map[scancode] = KEY_RESERVED;
- }
+ mutex_unlock(&hotkey_mutex);
}
/*
@@ -3097,9 +3129,6 @@ static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = {
TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */
};
-typedef u16 tpacpi_keymap_entry_t;
-typedef tpacpi_keymap_entry_t tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN];
-
static int hotkey_init_tablet_mode(void)
{
int in_tablet_mode = 0, res;
@@ -3136,209 +3165,126 @@ static int hotkey_init_tablet_mode(void)
return in_tablet_mode;
}
-static int __init hotkey_init(struct ibm_init_struct *iibm)
-{
- /* Requirements for changing the default keymaps:
- *
- * 1. Many of the keys are mapped to KEY_RESERVED for very
- * good reasons. Do not change them unless you have deep
- * knowledge on the IBM and Lenovo ThinkPad firmware for
- * the various ThinkPad models. The driver behaves
- * differently for KEY_RESERVED: such keys have their
- * hot key mask *unset* in mask_recommended, and also
- * in the initial hot key mask programmed into the
- * firmware at driver load time, which means the firm-
- * ware may react very differently if you change them to
- * something else;
- *
- * 2. You must be subscribed to the linux-thinkpad and
- * ibm-acpi-devel mailing lists, and you should read the
- * list archives since 2007 if you want to change the
- * keymaps. This requirement exists so that you will
- * know the past history of problems with the thinkpad-
- * acpi driver keymaps, and also that you will be
- * listening to any bug reports;
- *
- * 3. Do not send thinkpad-acpi specific patches directly to
- * for merging, *ever*. Send them to the linux-acpi
- * mailinglist for comments. Merging is to be done only
- * through acpi-test and the ACPI maintainer.
- *
- * If the above is too much to ask, don't change the keymap.
- * Ask the thinkpad-acpi maintainer to do it, instead.
+static const struct key_entry keymap_ibm[] __initconst = {
+ /* Original hotkey mappings translated scancodes 0x00 - 0x1f */
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNF1, { KEY_FN_F1 } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNF2, { KEY_BATTERY } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNF3, { KEY_COFFEE } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNF4, { KEY_SLEEP } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNF5, { KEY_WLAN } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNF6, { KEY_FN_F6 } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNF7, { KEY_SWITCHVIDEOMODE } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNF8, { KEY_FN_F8 } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNF9, { KEY_FN_F9 } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNF10, { KEY_FN_F10 } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNF11, { KEY_FN_F11 } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNF12, { KEY_SUSPEND } },
+ /* Brightness: firmware always reacts, suppressed through hotkey_reserved_mask. */
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNHOME, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNEND, { KEY_BRIGHTNESSDOWN } },
+ /* Thinklight: firmware always reacts, suppressed through hotkey_reserved_mask. */
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNPAGEUP, { KEY_KBDILLUMTOGGLE } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNSPACE, { KEY_ZOOM } },
+ /*
+ * Volume: firmware always reacts and reprograms the built-in *extra* mixer.
+ * Suppressed by default through hotkey_reserved_mask.
*/
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_VOLUMEUP, { KEY_VOLUMEUP } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_VOLUMEDOWN, { KEY_VOLUMEDOWN } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_MUTE, { KEY_MUTE } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_THINKPAD, { KEY_VENDOR } },
+ { KE_END }
+};
+
+static const struct key_entry keymap_lenovo[] __initconst = {
+ /* Original hotkey mappings translated scancodes 0x00 - 0x1f */
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNF1, { KEY_FN_F1 } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNF2, { KEY_COFFEE } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNF3, { KEY_BATTERY } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNF4, { KEY_SLEEP } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNF5, { KEY_WLAN } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNF6, { KEY_CAMERA, } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNF7, { KEY_SWITCHVIDEOMODE } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNF8, { KEY_FN_F8 } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNF9, { KEY_FN_F9 } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNF10, { KEY_FN_F10 } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNF11, { KEY_FN_F11 } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNF12, { KEY_SUSPEND } },
+ /*
+ * These should be enabled --only-- when ACPI video is disabled and
+ * are handled in a special way by the init code.
+ */
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNHOME, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNEND, { KEY_BRIGHTNESSDOWN } },
+ /* Suppressed by default through hotkey_reserved_mask. */
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNPAGEUP, { KEY_KBDILLUMTOGGLE } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FNSPACE, { KEY_ZOOM } },
+ /*
+ * Volume: z60/z61, T60 (BIOS version?): firmware always reacts and
+ * reprograms the built-in *extra* mixer.
+ * T60?, T61, R60?, R61: firmware and EC tries to send these over
+ * the regular keyboard (not through tpacpi). There are still weird bugs
+ * re. MUTE. May cause the BIOS to interfere with the HDA mixer.
+ * Suppressed by default through hotkey_reserved_mask.
+ */
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_VOLUMEUP, { KEY_VOLUMEUP } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_VOLUMEDOWN, { KEY_VOLUMEDOWN } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_MUTE, { KEY_MUTE } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_THINKPAD, { KEY_VENDOR } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_MICMUTE, { KEY_MICMUTE } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_CONFIG, { KEY_CONFIG } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_SEARCH, { KEY_SEARCH } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_SCALE, { KEY_SCALE } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FILE, { KEY_FILE } },
+ /* Adaptive keyboard mappings for Carbon X1 2014 translated scancodes 0x20 - 0x33 */
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_MUTE2, { KEY_RESERVED } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_BRIGHTNESS_ZERO, { KEY_BRIGHTNESS_MIN } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_CLIPPING_TOOL, { KEY_SELECTIVE_SCREENSHOT } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_CLOUD, { KEY_XFER } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_UNK9, { KEY_RESERVED } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_VOICE, { KEY_VOICECOMMAND } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_UNK10, { KEY_RESERVED } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_GESTURES, { KEY_RESERVED } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_UNK11, { KEY_RESERVED } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_UNK12, { KEY_RESERVED } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_UNK13, { KEY_RESERVED } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_CONFIG2, { KEY_CONFIG } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_NEW_TAB, { KEY_RESERVED } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_RELOAD, { KEY_REFRESH } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_BACK, { KEY_BACK } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_MIC_DOWN, { KEY_RESERVED } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_MIC_UP, { KEY_RESERVED } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_MIC_CANCELLATION, { KEY_RESERVED } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_CAMERA_MODE, { KEY_RESERVED } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_ROTATE_DISPLAY, { KEY_RESERVED } },
+ /* Extended hotkeys mappings translated scancodes 0x34 - 0x4d */
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_STAR, { KEY_BOOKMARKS } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_CLIPPING_TOOL2, { KEY_SELECTIVE_SCREENSHOT } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_CALCULATOR, { KEY_CALC } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_BLUETOOTH, { KEY_BLUETOOTH } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_KEYBOARD, { KEY_KEYBOARD } },
+ /* Used by "Lenovo Quick Clean" */
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_FN_RIGHT_SHIFT, { KEY_FN_RIGHT_SHIFT } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_NOTIFICATION_CENTER, { KEY_NOTIFICATION_CENTER } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_PICKUP_PHONE, { KEY_PICKUP_PHONE } },
+ { KE_KEY, TP_ACPI_HOTKEYSCAN_HANGUP_PHONE, { KEY_HANGUP_PHONE } },
+ /*
+ * All mapping below are for raw untranslated hkey event codes mapped directly
+ * after switching to sparse keymap support. The mappings above use translated
+ * scancodes to preserve uAPI compatibility, see tpacpi_input_send_key().
+ */
+ { KE_KEY, 0x131d, { KEY_VENDOR } }, /* System debug info, similar to old ThinkPad key */
+ { KE_KEY, TP_HKEY_EV_TRACK_DOUBLETAP /* 0x8036 */, { KEY_PROG4 } },
+ { KE_END }
+};
+static int __init hotkey_init(struct ibm_init_struct *iibm)
+{
enum keymap_index {
TPACPI_KEYMAP_IBM_GENERIC = 0,
TPACPI_KEYMAP_LENOVO_GENERIC,
};
- static const tpacpi_keymap_t tpacpi_keymaps[] __initconst = {
- /* Generic keymap for IBM ThinkPads */
- [TPACPI_KEYMAP_IBM_GENERIC] = {
- /* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */
- KEY_FN_F1, KEY_BATTERY, KEY_COFFEE, KEY_SLEEP,
- KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8,
- KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND,
-
- /* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */
- KEY_UNKNOWN, /* 0x0C: FN+BACKSPACE */
- KEY_UNKNOWN, /* 0x0D: FN+INSERT */
- KEY_UNKNOWN, /* 0x0E: FN+DELETE */
-
- /* brightness: firmware always reacts to them */
- KEY_RESERVED, /* 0x0F: FN+HOME (brightness up) */
- KEY_RESERVED, /* 0x10: FN+END (brightness down) */
-
- /* Thinklight: firmware always react to it */
- KEY_RESERVED, /* 0x11: FN+PGUP (thinklight toggle) */
-
- KEY_UNKNOWN, /* 0x12: FN+PGDOWN */
- KEY_ZOOM, /* 0x13: FN+SPACE (zoom) */
-
- /* Volume: firmware always react to it and reprograms
- * the built-in *extra* mixer. Never map it to control
- * another mixer by default. */
- KEY_RESERVED, /* 0x14: VOLUME UP */
- KEY_RESERVED, /* 0x15: VOLUME DOWN */
- KEY_RESERVED, /* 0x16: MUTE */
-
- KEY_VENDOR, /* 0x17: Thinkpad/AccessIBM/Lenovo */
-
- /* (assignments unknown, please report if found) */
- KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
- KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
-
- /* No assignments, only used for Adaptive keyboards. */
- KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
- KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
- KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
- KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
- KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
-
- /* No assignment, used for newer Lenovo models */
- KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
- KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
- KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
- KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
- KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
- KEY_UNKNOWN, KEY_UNKNOWN
-
- },
-
- /* Generic keymap for Lenovo ThinkPads */
- [TPACPI_KEYMAP_LENOVO_GENERIC] = {
- /* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */
- KEY_FN_F1, KEY_COFFEE, KEY_BATTERY, KEY_SLEEP,
- KEY_WLAN, KEY_CAMERA, KEY_SWITCHVIDEOMODE, KEY_FN_F8,
- KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND,
-
- /* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */
- KEY_UNKNOWN, /* 0x0C: FN+BACKSPACE */
- KEY_UNKNOWN, /* 0x0D: FN+INSERT */
- KEY_UNKNOWN, /* 0x0E: FN+DELETE */
-
- /* These should be enabled --only-- when ACPI video
- * is disabled (i.e. in "vendor" mode), and are handled
- * in a special way by the init code */
- KEY_BRIGHTNESSUP, /* 0x0F: FN+HOME (brightness up) */
- KEY_BRIGHTNESSDOWN, /* 0x10: FN+END (brightness down) */
-
- KEY_RESERVED, /* 0x11: FN+PGUP (thinklight toggle) */
-
- KEY_UNKNOWN, /* 0x12: FN+PGDOWN */
- KEY_ZOOM, /* 0x13: FN+SPACE (zoom) */
-
- /* Volume: z60/z61, T60 (BIOS version?): firmware always
- * react to it and reprograms the built-in *extra* mixer.
- * Never map it to control another mixer by default.
- *
- * T60?, T61, R60?, R61: firmware and EC tries to send
- * these over the regular keyboard, so these are no-ops,
- * but there are still weird bugs re. MUTE, so do not
- * change unless you get test reports from all Lenovo
- * models. May cause the BIOS to interfere with the
- * HDA mixer.
- */
- KEY_RESERVED, /* 0x14: VOLUME UP */
- KEY_RESERVED, /* 0x15: VOLUME DOWN */
- KEY_RESERVED, /* 0x16: MUTE */
-
- KEY_VENDOR, /* 0x17: Thinkpad/AccessIBM/Lenovo */
-
- /* (assignments unknown, please report if found) */
- KEY_UNKNOWN, KEY_UNKNOWN,
-
- /*
- * The mic mute button only sends 0x1a. It does not
- * automatically mute the mic or change the mute light.
- */
- KEY_MICMUTE, /* 0x1a: Mic mute (since ?400 or so) */
-
- /* (assignments unknown, please report if found) */
- KEY_UNKNOWN,
-
- /* Extra keys in use since the X240 / T440 / T540 */
- KEY_CONFIG, KEY_SEARCH, KEY_SCALE, KEY_FILE,
-
- /*
- * These are the adaptive keyboard keycodes for Carbon X1 2014.
- * The first item in this list is the Mute button which is
- * emitted with 0x103 through
- * adaptive_keyboard_hotkey_notify_hotkey() when the sound
- * symbol is held.
- * We'll need to offset those by 0x20.
- */
- KEY_RESERVED, /* Mute held, 0x103 */
- KEY_BRIGHTNESS_MIN, /* Backlight off */
- KEY_RESERVED, /* Clipping tool */
- KEY_RESERVED, /* Cloud */
- KEY_RESERVED,
- KEY_VOICECOMMAND, /* Voice */
- KEY_RESERVED,
- KEY_RESERVED, /* Gestures */
- KEY_RESERVED,
- KEY_RESERVED,
- KEY_RESERVED,
- KEY_CONFIG, /* Settings */
- KEY_RESERVED, /* New tab */
- KEY_REFRESH, /* Reload */
- KEY_BACK, /* Back */
- KEY_RESERVED, /* Microphone down */
- KEY_RESERVED, /* Microphone up */
- KEY_RESERVED, /* Microphone cancellation */
- KEY_RESERVED, /* Camera mode */
- KEY_RESERVED, /* Rotate display, 0x116 */
-
- /*
- * These are found in 2017 models (e.g. T470s, X270).
- * The lowest known value is 0x311, which according to
- * the manual should launch a user defined favorite
- * application.
- *
- * The offset for these is TP_ACPI_HOTKEYSCAN_EXTENDED_START,
- * corresponding to 0x34.
- */
-
- /* (assignments unknown, please report if found) */
- KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
- KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
- KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
- KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
- KEY_UNKNOWN,
-
- KEY_BOOKMARKS, /* Favorite app, 0x311 */
- KEY_SELECTIVE_SCREENSHOT, /* Clipping tool */
- KEY_CALC, /* Calculator (above numpad, P52) */
- KEY_BLUETOOTH, /* Bluetooth */
- KEY_KEYBOARD, /* Keyboard, 0x315 */
- KEY_FN_RIGHT_SHIFT, /* Fn + right Shift */
- KEY_NOTIFICATION_CENTER, /* Notification Center */
- KEY_PICKUP_PHONE, /* Answer incoming call */
- KEY_HANGUP_PHONE, /* Decline incoming call */
- },
- };
-
static const struct tpacpi_quirk tpacpi_keymap_qtable[] __initconst = {
/* Generic maps (fallback) */
{
@@ -3353,17 +3299,11 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
},
};
-#define TPACPI_HOTKEY_MAP_SIZE sizeof(tpacpi_keymap_t)
-#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_entry_t)
-
- int res, i;
- int status;
- int hkeyv;
+ unsigned long keymap_id, quirks;
+ const struct key_entry *keymap;
bool radiosw_state = false;
bool tabletsw_state = false;
-
- unsigned long quirks;
- unsigned long keymap_id;
+ int hkeyv, res, status;
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
"initializing hotkey subdriver\n");
@@ -3503,30 +3443,29 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
/* Set up key map */
keymap_id = tpacpi_check_quirks(tpacpi_keymap_qtable,
ARRAY_SIZE(tpacpi_keymap_qtable));
- BUG_ON(keymap_id >= ARRAY_SIZE(tpacpi_keymaps));
dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
"using keymap number %lu\n", keymap_id);
- hotkey_keycode_map = kmemdup(&tpacpi_keymaps[keymap_id],
- TPACPI_HOTKEY_MAP_SIZE, GFP_KERNEL);
- if (!hotkey_keycode_map) {
- pr_err("failed to allocate memory for key map\n");
- return -ENOMEM;
+ /* Keys which should be reserved on both IBM and Lenovo models */
+ hotkey_reserved_mask = TP_ACPI_HKEY_KBD_LIGHT_MASK |
+ TP_ACPI_HKEY_VOLUP_MASK |
+ TP_ACPI_HKEY_VOLDWN_MASK |
+ TP_ACPI_HKEY_MUTE_MASK;
+ /*
+ * Reserve brightness up/down unconditionally on IBM models, on Lenovo
+ * models these are disabled based on acpi_video_get_backlight_type().
+ */
+ if (keymap_id == TPACPI_KEYMAP_IBM_GENERIC) {
+ hotkey_reserved_mask |= TP_ACPI_HKEY_BRGHTUP_MASK |
+ TP_ACPI_HKEY_BRGHTDWN_MASK;
+ keymap = keymap_ibm;
+ } else {
+ keymap = keymap_lenovo;
}
- input_set_capability(tpacpi_inputdev, EV_MSC, MSC_SCAN);
- tpacpi_inputdev->keycodesize = TPACPI_HOTKEY_MAP_TYPESIZE;
- tpacpi_inputdev->keycodemax = TPACPI_HOTKEY_MAP_LEN;
- tpacpi_inputdev->keycode = hotkey_keycode_map;
- for (i = 0; i < TPACPI_HOTKEY_MAP_LEN; i++) {
- if (hotkey_keycode_map[i] != KEY_RESERVED) {
- input_set_capability(tpacpi_inputdev, EV_KEY,
- hotkey_keycode_map[i]);
- } else {
- if (i < sizeof(hotkey_reserved_mask)*8)
- hotkey_reserved_mask |= 1 << i;
- }
- }
+ res = sparse_keymap_setup(tpacpi_inputdev, keymap, NULL);
+ if (res)
+ return res;
if (tp_features.hotkey_wlsw) {
input_set_capability(tpacpi_inputdev, EV_SW, SW_RFKILL_ALL);
@@ -3549,11 +3488,8 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
/* Disable brightness up/down on Lenovo thinkpads when
* ACPI is handling them, otherwise it is plain impossible
* for userspace to do something even remotely sane */
- hotkey_reserved_mask |=
- (1 << TP_ACPI_HOTKEYSCAN_FNHOME)
- | (1 << TP_ACPI_HOTKEYSCAN_FNEND);
- hotkey_unmap(TP_ACPI_HOTKEYSCAN_FNHOME);
- hotkey_unmap(TP_ACPI_HOTKEYSCAN_FNEND);
+ hotkey_reserved_mask |= TP_ACPI_HKEY_BRGHTUP_MASK |
+ TP_ACPI_HKEY_BRGHTDWN_MASK;
}
#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
@@ -3593,6 +3529,9 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
hotkey_poll_setup_safe(true);
+ /* Enable doubletap by default */
+ tp_features.trackpoint_doubletap = 1;
+
return 0;
}
@@ -3610,10 +3549,6 @@ static const int adaptive_keyboard_modes[] = {
FUNCTION_MODE
};
-#define DFR_CHANGE_ROW 0x101
-#define DFR_SHOW_QUICKVIEW_ROW 0x102
-#define FIRST_ADAPTIVE_KEY 0x103
-
/* press Fn key a while second, it will switch to Function Mode. Then
* release Fn key, previous mode be restored.
*/
@@ -3664,153 +3599,67 @@ static int adaptive_keyboard_get_next_mode(int mode)
return adaptive_keyboard_modes[i];
}
-static bool adaptive_keyboard_hotkey_notify_hotkey(unsigned int scancode)
+static void adaptive_keyboard_change_row(void)
{
- int current_mode = 0;
- int new_mode = 0;
- int keycode;
-
- switch (scancode) {
- case DFR_CHANGE_ROW:
- if (adaptive_keyboard_mode_is_saved) {
- new_mode = adaptive_keyboard_prev_mode;
- adaptive_keyboard_mode_is_saved = false;
- } else {
- current_mode = adaptive_keyboard_get_mode();
- if (current_mode < 0)
- return false;
- new_mode = adaptive_keyboard_get_next_mode(
- current_mode);
- }
-
- if (adaptive_keyboard_set_mode(new_mode) < 0)
- return false;
-
- return true;
-
- case DFR_SHOW_QUICKVIEW_ROW:
- current_mode = adaptive_keyboard_get_mode();
- if (current_mode < 0)
- return false;
-
- adaptive_keyboard_prev_mode = current_mode;
- adaptive_keyboard_mode_is_saved = true;
+ int mode;
- if (adaptive_keyboard_set_mode (FUNCTION_MODE) < 0)
- return false;
- return true;
-
- default:
- if (scancode < FIRST_ADAPTIVE_KEY ||
- scancode >= FIRST_ADAPTIVE_KEY +
- TP_ACPI_HOTKEYSCAN_EXTENDED_START -
- TP_ACPI_HOTKEYSCAN_ADAPTIVE_START) {
- pr_info("Unhandled adaptive keyboard key: 0x%x\n",
- scancode);
- return false;
- }
- keycode = hotkey_keycode_map[scancode - FIRST_ADAPTIVE_KEY +
- TP_ACPI_HOTKEYSCAN_ADAPTIVE_START];
- if (keycode != KEY_RESERVED) {
- mutex_lock(&tpacpi_inputdev_send_mutex);
-
- input_report_key(tpacpi_inputdev, keycode, 1);
- input_sync(tpacpi_inputdev);
-
- input_report_key(tpacpi_inputdev, keycode, 0);
- input_sync(tpacpi_inputdev);
-
- mutex_unlock(&tpacpi_inputdev_send_mutex);
- }
- return true;
+ if (adaptive_keyboard_mode_is_saved) {
+ mode = adaptive_keyboard_prev_mode;
+ adaptive_keyboard_mode_is_saved = false;
+ } else {
+ mode = adaptive_keyboard_get_mode();
+ if (mode < 0)
+ return;
+ mode = adaptive_keyboard_get_next_mode(mode);
}
+
+ adaptive_keyboard_set_mode(mode);
}
-static bool hotkey_notify_extended_hotkey(const u32 hkey)
+static void adaptive_keyboard_s_quickview_row(void)
{
- unsigned int scancode;
+ int mode;
- switch (hkey) {
- case TP_HKEY_EV_PRIVACYGUARD_TOGGLE:
- case TP_HKEY_EV_AMT_TOGGLE:
- case TP_HKEY_EV_PROFILE_TOGGLE:
- tpacpi_driver_event(hkey);
- return true;
- }
+ mode = adaptive_keyboard_get_mode();
+ if (mode < 0)
+ return;
- /* Extended keycodes start at 0x300 and our offset into the map
- * TP_ACPI_HOTKEYSCAN_EXTENDED_START. The calculated scancode
- * will be positive, but might not be in the correct range.
- */
- scancode = (hkey & 0xfff) - (0x300 - TP_ACPI_HOTKEYSCAN_EXTENDED_START);
- if (scancode >= TP_ACPI_HOTKEYSCAN_EXTENDED_START &&
- scancode < TPACPI_HOTKEY_MAP_LEN) {
- tpacpi_input_send_key(scancode);
- return true;
- }
+ adaptive_keyboard_prev_mode = mode;
+ adaptive_keyboard_mode_is_saved = true;
- return false;
+ adaptive_keyboard_set_mode(FUNCTION_MODE);
}
-static bool hotkey_notify_hotkey(const u32 hkey,
- bool *send_acpi_ev,
- bool *ignore_acpi_ev)
+/* 0x1000-0x1FFF: key presses */
+static bool hotkey_notify_hotkey(const u32 hkey, bool *send_acpi_ev)
{
- /* 0x1000-0x1FFF: key presses */
- unsigned int scancode = hkey & 0xfff;
- *send_acpi_ev = true;
- *ignore_acpi_ev = false;
+ /* Never send ACPI netlink events for original hotkeys (hkey: 0x1001 - 0x1020) */
+ if (hkey >= TP_HKEY_EV_ORIG_KEY_START && hkey <= TP_HKEY_EV_ORIG_KEY_END) {
+ *send_acpi_ev = false;
- /*
- * Original events are in the 0x10XX range, the adaptive keyboard
- * found in 2014 X1 Carbon emits events are of 0x11XX. In 2017
- * models, additional keys are emitted through 0x13XX.
- */
- switch ((hkey >> 8) & 0xf) {
- case 0:
- if (scancode > 0 &&
- scancode <= TP_ACPI_HOTKEYSCAN_ADAPTIVE_START) {
- /* HKEY event 0x1001 is scancode 0x00 */
- scancode--;
- if (!(hotkey_source_mask & (1 << scancode))) {
- tpacpi_input_send_key_masked(scancode);
- *send_acpi_ev = false;
- } else {
- *ignore_acpi_ev = true;
- }
+ /* Original hotkeys may be polled from NVRAM instead */
+ unsigned int scancode = hkey - TP_HKEY_EV_ORIG_KEY_START;
+ if (hotkey_source_mask & (1 << scancode))
return true;
- }
- break;
-
- case 1:
- return adaptive_keyboard_hotkey_notify_hotkey(scancode);
-
- case 3:
- return hotkey_notify_extended_hotkey(hkey);
}
- return false;
+ return tpacpi_input_send_key(hkey, send_acpi_ev);
}
-static bool hotkey_notify_wakeup(const u32 hkey,
- bool *send_acpi_ev,
- bool *ignore_acpi_ev)
+/* 0x2000-0x2FFF: Wakeup reason */
+static bool hotkey_notify_wakeup(const u32 hkey, bool *send_acpi_ev)
{
- /* 0x2000-0x2FFF: Wakeup reason */
- *send_acpi_ev = true;
- *ignore_acpi_ev = false;
-
switch (hkey) {
case TP_HKEY_EV_WKUP_S3_UNDOCK: /* suspend, undock */
case TP_HKEY_EV_WKUP_S4_UNDOCK: /* hibernation, undock */
hotkey_wakeup_reason = TP_ACPI_WAKEUP_UNDOCK;
- *ignore_acpi_ev = true;
+ *send_acpi_ev = false;
break;
case TP_HKEY_EV_WKUP_S3_BAYEJ: /* suspend, bay eject */
case TP_HKEY_EV_WKUP_S4_BAYEJ: /* hibernation, bay eject */
hotkey_wakeup_reason = TP_ACPI_WAKEUP_BAYEJ;
- *ignore_acpi_ev = true;
+ *send_acpi_ev = false;
break;
case TP_HKEY_EV_WKUP_S3_BATLOW: /* Battery on critical low level/S3 */
@@ -3832,14 +3681,9 @@ static bool hotkey_notify_wakeup(const u32 hkey,
return true;
}
-static bool hotkey_notify_dockevent(const u32 hkey,
- bool *send_acpi_ev,
- bool *ignore_acpi_ev)
+/* 0x4000-0x4FFF: dock-related events */
+static bool hotkey_notify_dockevent(const u32 hkey, bool *send_acpi_ev)
{
- /* 0x4000-0x4FFF: dock-related events */
- *send_acpi_ev = true;
- *ignore_acpi_ev = false;
-
switch (hkey) {
case TP_HKEY_EV_UNDOCK_ACK:
/* ACPI undock operation completed after wakeup */
@@ -3869,7 +3713,6 @@ static bool hotkey_notify_dockevent(const u32 hkey,
case TP_HKEY_EV_KBD_COVER_ATTACH:
case TP_HKEY_EV_KBD_COVER_DETACH:
*send_acpi_ev = false;
- *ignore_acpi_ev = true;
return true;
default:
@@ -3877,14 +3720,9 @@ static bool hotkey_notify_dockevent(const u32 hkey,
}
}
-static bool hotkey_notify_usrevent(const u32 hkey,
- bool *send_acpi_ev,
- bool *ignore_acpi_ev)
+/* 0x5000-0x5FFF: human interface helpers */
+static bool hotkey_notify_usrevent(const u32 hkey, bool *send_acpi_ev)
{
- /* 0x5000-0x5FFF: human interface helpers */
- *send_acpi_ev = true;
- *ignore_acpi_ev = false;
-
switch (hkey) {
case TP_HKEY_EV_PEN_INSERTED: /* X61t: tablet pen inserted into bay */
case TP_HKEY_EV_PEN_REMOVED: /* X61t: tablet pen removed from bay */
@@ -3901,7 +3739,7 @@ static bool hotkey_notify_usrevent(const u32 hkey,
case TP_HKEY_EV_LID_OPEN: /* Lid opened */
case TP_HKEY_EV_BRGHT_CHANGED: /* brightness changed */
/* do not propagate these events */
- *ignore_acpi_ev = true;
+ *send_acpi_ev = false;
return true;
default:
@@ -3912,14 +3750,9 @@ static bool hotkey_notify_usrevent(const u32 hkey,
static void thermal_dump_all_sensors(void);
static void palmsensor_refresh(void);
-static bool hotkey_notify_6xxx(const u32 hkey,
- bool *send_acpi_ev,
- bool *ignore_acpi_ev)
+/* 0x6000-0x6FFF: thermal alarms/notices and keyboard events */
+static bool hotkey_notify_6xxx(const u32 hkey, bool *send_acpi_ev)
{
- /* 0x6000-0x6FFF: thermal alarms/notices and keyboard events */
- *send_acpi_ev = true;
- *ignore_acpi_ev = false;
-
switch (hkey) {
case TP_HKEY_EV_THM_TABLE_CHANGED:
pr_debug("EC reports: Thermal Table has changed\n");
@@ -3965,14 +3798,12 @@ static bool hotkey_notify_6xxx(const u32 hkey,
/* key press events, we just ignore them as long as the EC
* is still reporting them in the normal keyboard stream */
*send_acpi_ev = false;
- *ignore_acpi_ev = true;
return true;
case TP_HKEY_EV_KEY_FN_ESC:
/* Get the media key status to force the status LED to update */
acpi_evalf(hkey_handle, NULL, "GMKS", "v");
*send_acpi_ev = false;
- *ignore_acpi_ev = true;
return true;
case TP_HKEY_EV_TABLET_CHANGED:
@@ -3996,11 +3827,23 @@ static bool hotkey_notify_6xxx(const u32 hkey,
return true;
}
+static bool hotkey_notify_8xxx(const u32 hkey, bool *send_acpi_ev)
+{
+ switch (hkey) {
+ case TP_HKEY_EV_TRACK_DOUBLETAP:
+ if (tp_features.trackpoint_doubletap)
+ tpacpi_input_send_key(hkey, send_acpi_ev);
+
+ return true;
+ default:
+ return false;
+ }
+}
+
static void hotkey_notify(struct ibm_struct *ibm, u32 event)
{
u32 hkey;
bool send_acpi_ev;
- bool ignore_acpi_ev;
bool known_ev;
if (event != 0x80) {
@@ -4025,18 +3868,16 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
}
send_acpi_ev = true;
- ignore_acpi_ev = false;
+ known_ev = false;
switch (hkey >> 12) {
case 1:
/* 0x1000-0x1FFF: key presses */
- known_ev = hotkey_notify_hotkey(hkey, &send_acpi_ev,
- &ignore_acpi_ev);
+ known_ev = hotkey_notify_hotkey(hkey, &send_acpi_ev);
break;
case 2:
/* 0x2000-0x2FFF: Wakeup reason */
- known_ev = hotkey_notify_wakeup(hkey, &send_acpi_ev,
- &ignore_acpi_ev);
+ known_ev = hotkey_notify_wakeup(hkey, &send_acpi_ev);
break;
case 3:
/* 0x3000-0x3FFF: bay-related wakeups */
@@ -4051,38 +3892,34 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
/* FIXME: kick libata if SATA link offline */
known_ev = true;
break;
- default:
- known_ev = false;
}
break;
case 4:
/* 0x4000-0x4FFF: dock-related events */
- known_ev = hotkey_notify_dockevent(hkey, &send_acpi_ev,
- &ignore_acpi_ev);
+ known_ev = hotkey_notify_dockevent(hkey, &send_acpi_ev);
break;
case 5:
/* 0x5000-0x5FFF: human interface helpers */
- known_ev = hotkey_notify_usrevent(hkey, &send_acpi_ev,
- &ignore_acpi_ev);
+ known_ev = hotkey_notify_usrevent(hkey, &send_acpi_ev);
break;
case 6:
/* 0x6000-0x6FFF: thermal alarms/notices and
* keyboard events */
- known_ev = hotkey_notify_6xxx(hkey, &send_acpi_ev,
- &ignore_acpi_ev);
+ known_ev = hotkey_notify_6xxx(hkey, &send_acpi_ev);
break;
case 7:
/* 0x7000-0x7FFF: misc */
if (tp_features.hotkey_wlsw &&
hkey == TP_HKEY_EV_RFKILL_CHANGED) {
tpacpi_send_radiosw_update();
- send_acpi_ev = 0;
+ send_acpi_ev = false;
known_ev = true;
- break;
}
- fallthrough; /* to default */
- default:
- known_ev = false;
+ break;
+ case 8:
+ /* 0x8000-0x8FFF: misc2 */
+ known_ev = hotkey_notify_8xxx(hkey, &send_acpi_ev);
+ break;
}
if (!known_ev) {
pr_notice("unhandled HKEY event 0x%04x\n", hkey);
@@ -4091,7 +3928,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
}
/* netlink events */
- if (!ignore_acpi_ev && send_acpi_ev) {
+ if (send_acpi_ev) {
acpi_bus_generate_netlink_event(
ibm->acpi->device->pnp.device_class,
dev_name(&ibm->acpi->device->dev),
@@ -9789,7 +9626,7 @@ static ssize_t tpacpi_battery_show(int what,
battery = BAT_PRIMARY;
if (tpacpi_battery_get(what, battery, &ret))
return -ENODEV;
- return sprintf(buf, "%d\n", ret);
+ return sysfs_emit(buf, "%d\n", ret);
}
static ssize_t charge_control_start_threshold_show(struct device *device,
@@ -11126,92 +10963,93 @@ static struct platform_driver tpacpi_hwmon_pdriver = {
* HKEY event callout for other subdrivers go here
* (yes, it is ugly, but it is quick, safe, and gets the job done
*/
-static void tpacpi_driver_event(const unsigned int hkey_event)
+static bool tpacpi_driver_event(const unsigned int hkey_event)
{
- if (ibm_backlight_device) {
- switch (hkey_event) {
- case TP_HKEY_EV_BRGHT_UP:
- case TP_HKEY_EV_BRGHT_DOWN:
+ switch (hkey_event) {
+ case TP_HKEY_EV_BRGHT_UP:
+ case TP_HKEY_EV_BRGHT_DOWN:
+ if (ibm_backlight_device)
tpacpi_brightness_notify_change();
- }
- }
- if (alsa_card) {
- switch (hkey_event) {
- case TP_HKEY_EV_VOL_UP:
- case TP_HKEY_EV_VOL_DOWN:
- case TP_HKEY_EV_VOL_MUTE:
+ /*
+ * Key press events are suppressed by default hotkey_user_mask
+ * and should still be reported if explicitly requested.
+ */
+ return false;
+ case TP_HKEY_EV_VOL_UP:
+ case TP_HKEY_EV_VOL_DOWN:
+ case TP_HKEY_EV_VOL_MUTE:
+ if (alsa_card)
volume_alsa_notify_change();
- }
- }
- if (tp_features.kbdlight && hkey_event == TP_HKEY_EV_KBD_LIGHT) {
- enum led_brightness brightness;
- mutex_lock(&kbdlight_mutex);
+ /* Key events are suppressed by default hotkey_user_mask */
+ return false;
+ case TP_HKEY_EV_KBD_LIGHT:
+ if (tp_features.kbdlight) {
+ enum led_brightness brightness;
- /*
- * Check the brightness actually changed, setting the brightness
- * through kbdlight_set_level() also triggers this event.
- */
- brightness = kbdlight_sysfs_get(NULL);
- if (kbdlight_brightness != brightness) {
- kbdlight_brightness = brightness;
- led_classdev_notify_brightness_hw_changed(
- &tpacpi_led_kbdlight.led_classdev, brightness);
- }
+ mutex_lock(&kbdlight_mutex);
- mutex_unlock(&kbdlight_mutex);
- }
+ /*
+ * Check the brightness actually changed, setting the brightness
+ * through kbdlight_set_level() also triggers this event.
+ */
+ brightness = kbdlight_sysfs_get(NULL);
+ if (kbdlight_brightness != brightness) {
+ kbdlight_brightness = brightness;
+ led_classdev_notify_brightness_hw_changed(
+ &tpacpi_led_kbdlight.led_classdev, brightness);
+ }
- if (hkey_event == TP_HKEY_EV_THM_CSM_COMPLETED) {
+ mutex_unlock(&kbdlight_mutex);
+ }
+ /* Key events are suppressed by default hotkey_user_mask */
+ return false;
+ case TP_HKEY_EV_DFR_CHANGE_ROW:
+ adaptive_keyboard_change_row();
+ return true;
+ case TP_HKEY_EV_DFR_S_QUICKVIEW_ROW:
+ adaptive_keyboard_s_quickview_row();
+ return true;
+ case TP_HKEY_EV_THM_CSM_COMPLETED:
lapsensor_refresh();
/* If we are already accessing DYTC then skip dytc update */
if (!atomic_add_unless(&dytc_ignore_event, -1, 0))
dytc_profile_refresh();
- }
-
- if (lcdshadow_dev && hkey_event == TP_HKEY_EV_PRIVACYGUARD_TOGGLE) {
- enum drm_privacy_screen_status old_hw_state;
- bool changed;
-
- mutex_lock(&lcdshadow_dev->lock);
- old_hw_state = lcdshadow_dev->hw_state;
- lcdshadow_get_hw_state(lcdshadow_dev);
- changed = lcdshadow_dev->hw_state != old_hw_state;
- mutex_unlock(&lcdshadow_dev->lock);
- if (changed)
- drm_privacy_screen_call_notifier_chain(lcdshadow_dev);
- }
- if (hkey_event == TP_HKEY_EV_AMT_TOGGLE) {
+ return true;
+ case TP_HKEY_EV_PRIVACYGUARD_TOGGLE:
+ if (lcdshadow_dev) {
+ enum drm_privacy_screen_status old_hw_state;
+ bool changed;
+
+ mutex_lock(&lcdshadow_dev->lock);
+ old_hw_state = lcdshadow_dev->hw_state;
+ lcdshadow_get_hw_state(lcdshadow_dev);
+ changed = lcdshadow_dev->hw_state != old_hw_state;
+ mutex_unlock(&lcdshadow_dev->lock);
+
+ if (changed)
+ drm_privacy_screen_call_notifier_chain(lcdshadow_dev);
+ }
+ return true;
+ case TP_HKEY_EV_AMT_TOGGLE:
/* If we're enabling AMT we need to force balanced mode */
if (!dytc_amt_active)
/* This will also set AMT mode enabled */
dytc_profile_set(NULL, PLATFORM_PROFILE_BALANCED);
else
dytc_control_amt(!dytc_amt_active);
+
+ return true;
+ case TP_HKEY_EV_DOUBLETAP_TOGGLE:
+ tp_features.trackpoint_doubletap = !tp_features.trackpoint_doubletap;
+ return true;
+ case TP_HKEY_EV_PROFILE_TOGGLE:
+ platform_profile_cycle();
+ return true;
}
- if (hkey_event == TP_HKEY_EV_PROFILE_TOGGLE) {
- switch (dytc_current_profile) {
- case PLATFORM_PROFILE_LOW_POWER:
- dytc_profile_set(NULL, PLATFORM_PROFILE_BALANCED);
- break;
- case PLATFORM_PROFILE_BALANCED:
- dytc_profile_set(NULL, PLATFORM_PROFILE_PERFORMANCE);
- break;
- case PLATFORM_PROFILE_PERFORMANCE:
- dytc_profile_set(NULL, PLATFORM_PROFILE_LOW_POWER);
- break;
- default:
- pr_warn("Profile HKEY unexpected profile %d", dytc_current_profile);
- }
- /* Notify user space the profile changed */
- platform_profile_notify();
- }
-}
-static void hotkey_driver_event(const unsigned int scancode)
-{
- tpacpi_driver_event(TP_HKEY_EV_HOTKEY_BASE + scancode);
+ return false;
}
/* --------------------------------------------------------------------- */
@@ -11814,7 +11652,6 @@ static void thinkpad_acpi_module_exit(void)
input_unregister_device(tpacpi_inputdev);
else
input_free_device(tpacpi_inputdev);
- kfree(hotkey_keycode_map);
}
if (tpacpi_sensors_pdev)
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 291f14ef6702..f86690aa3d46 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -57,6 +57,11 @@ module_param(turn_on_panel_on_resume, int, 0644);
MODULE_PARM_DESC(turn_on_panel_on_resume,
"Call HCI_PANEL_POWER_ON on resume (-1 = auto, 0 = no, 1 = yes");
+static int hci_hotkey_quickstart = -1;
+module_param(hci_hotkey_quickstart, int, 0644);
+MODULE_PARM_DESC(hci_hotkey_quickstart,
+ "Call HCI_HOTKEY_EVENT with value 0x5 for quickstart button support (-1 = auto, 0 = no, 1 = yes");
+
#define TOSHIBA_WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100"
/* Scan code for Fn key on TOS1900 models */
@@ -136,6 +141,7 @@ MODULE_PARM_DESC(turn_on_panel_on_resume,
#define HCI_ACCEL_MASK 0x7fff
#define HCI_ACCEL_DIRECTION_MASK 0x8000
#define HCI_HOTKEY_DISABLE 0x0b
+#define HCI_HOTKEY_ENABLE_QUICKSTART 0x05
#define HCI_HOTKEY_ENABLE 0x09
#define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10
#define HCI_LCD_BRIGHTNESS_BITS 3
@@ -264,6 +270,7 @@ static const struct key_entry toshiba_acpi_keymap[] = {
{ KE_KEY, 0xb32, { KEY_NEXTSONG } },
{ KE_KEY, 0xb33, { KEY_PLAYPAUSE } },
{ KE_KEY, 0xb5a, { KEY_MEDIA } },
+ { KE_IGNORE, 0x0e00, { KEY_RESERVED } }, /* Wake from sleep */
{ KE_IGNORE, 0x1430, { KEY_RESERVED } }, /* Wake from sleep */
{ KE_IGNORE, 0x1501, { KEY_RESERVED } }, /* Output changed */
{ KE_IGNORE, 0x1502, { KEY_RESERVED } }, /* HDMI plugged/unplugged */
@@ -2730,10 +2737,15 @@ static int toshiba_acpi_enable_hotkeys(struct toshiba_acpi_dev *dev)
return -ENODEV;
/*
+ * Enable quickstart buttons if supported.
+ *
* Enable the "Special Functions" mode only if they are
* supported and if they are activated.
*/
- if (dev->kbd_function_keys_supported && dev->special_functions)
+ if (hci_hotkey_quickstart)
+ result = hci_write(dev, HCI_HOTKEY_EVENT,
+ HCI_HOTKEY_ENABLE_QUICKSTART);
+ else if (dev->kbd_function_keys_supported && dev->special_functions)
result = hci_write(dev, HCI_HOTKEY_EVENT,
HCI_HOTKEY_SPECIAL_FUNCTIONS);
else
@@ -3257,7 +3269,14 @@ static const char *find_hci_method(acpi_handle handle)
* works. toshiba_acpi_resume() uses HCI_PANEL_POWER_ON to avoid changing
* the configured brightness level.
*/
-static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = {
+#define QUIRK_TURN_ON_PANEL_ON_RESUME BIT(0)
+/*
+ * Some Toshibas use "quickstart" keys. On these, HCI_HOTKEY_EVENT must use
+ * the value HCI_HOTKEY_ENABLE_QUICKSTART.
+ */
+#define QUIRK_HCI_HOTKEY_QUICKSTART BIT(1)
+
+static const struct dmi_system_id toshiba_dmi_quirks[] = {
{
/* Toshiba Portégé R700 */
/* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
@@ -3265,6 +3284,7 @@ static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"),
},
+ .driver_data = (void *)QUIRK_TURN_ON_PANEL_ON_RESUME,
},
{
/* Toshiba Satellite/Portégé R830 */
@@ -3274,6 +3294,7 @@ static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "R830"),
},
+ .driver_data = (void *)QUIRK_TURN_ON_PANEL_ON_RESUME,
},
{
/* Toshiba Satellite/Portégé Z830 */
@@ -3281,6 +3302,7 @@ static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Z830"),
},
+ .driver_data = (void *)(QUIRK_TURN_ON_PANEL_ON_RESUME | QUIRK_HCI_HOTKEY_QUICKSTART),
},
};
@@ -3289,6 +3311,8 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
struct toshiba_acpi_dev *dev;
const char *hci_method;
u32 dummy;
+ const struct dmi_system_id *dmi_id;
+ long quirks = 0;
int ret = 0;
if (toshiba_acpi)
@@ -3441,8 +3465,15 @@ iio_error:
}
#endif
+ dmi_id = dmi_first_match(toshiba_dmi_quirks);
+ if (dmi_id)
+ quirks = (long)dmi_id->driver_data;
+
if (turn_on_panel_on_resume == -1)
- turn_on_panel_on_resume = dmi_check_system(turn_on_panel_on_resume_dmi_ids);
+ turn_on_panel_on_resume = !!(quirks & QUIRK_TURN_ON_PANEL_ON_RESUME);
+
+ if (hci_hotkey_quickstart == -1)
+ hci_hotkey_quickstart = !!(quirks & QUIRK_HCI_HOTKEY_QUICKSTART);
toshiba_wwan_available(dev);
if (dev->wwan_supported)
@@ -3523,9 +3554,10 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
(dev->kbd_mode == SCI_KBD_MODE_ON) ?
LED_FULL : LED_OFF);
break;
+ case 0x8e: /* Power button pressed */
+ break;
case 0x85: /* Unknown */
case 0x8d: /* Unknown */
- case 0x8e: /* Unknown */
case 0x94: /* Unknown */
case 0x95: /* Unknown */
default:
@@ -3581,7 +3613,6 @@ static SIMPLE_DEV_PM_OPS(toshiba_acpi_pm,
static struct acpi_driver toshiba_acpi_driver = {
.name = "Toshiba ACPI driver",
- .owner = THIS_MODULE,
.ids = toshiba_device_ids,
.flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
index d8f81962a240..dad2c3e55904 100644
--- a/drivers/platform/x86/toshiba_bluetooth.c
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -59,7 +59,6 @@ static struct acpi_driver toshiba_bt_rfkill_driver = {
.remove = toshiba_bt_rfkill_remove,
.notify = toshiba_bt_rfkill_notify,
},
- .owner = THIS_MODULE,
.drv.pm = &toshiba_bt_pm,
};
diff --git a/drivers/platform/x86/toshiba_haps.c b/drivers/platform/x86/toshiba_haps.c
index 8c9f76286b08..03dfddeee0c0 100644
--- a/drivers/platform/x86/toshiba_haps.c
+++ b/drivers/platform/x86/toshiba_haps.c
@@ -251,7 +251,6 @@ MODULE_DEVICE_TABLE(acpi, haps_device_ids);
static struct acpi_driver toshiba_haps_driver = {
.name = "Toshiba HAPS",
- .owner = THIS_MODULE,
.ids = haps_device_ids,
.flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
diff --git a/